summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-10-20 00:10:27 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-10-20 00:10:27 -0300
commitd0b2f91bede3bd5e3d24dd6803e56eee959c1797 (patch)
tree7fee4ab0509879c373c4f2cbd5b8a5be5b4041ee /drivers
parente914f8eb445e8f74b00303c19c2ffceaedd16a05 (diff)
Linux-libre 4.8.2-gnupck-4.8.2-gnu
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile8
-rw-r--r--drivers/acpi/Kconfig60
-rw-r--r--drivers/acpi/Makefile8
-rw-r--r--drivers/acpi/acpi_cmos_rtc.c2
-rw-r--r--drivers/acpi/acpi_configfs.c267
-rw-r--r--drivers/acpi/acpi_lpat.c4
-rw-r--r--drivers/acpi/acpi_lpss.c5
-rw-r--r--drivers/acpi/acpi_video.c3
-rw-r--r--drivers/acpi/apei/Makefile2
-rw-r--r--drivers/acpi/apei/apei-internal.h2
-rw-r--r--drivers/acpi/apei/bert.c150
-rw-r--r--drivers/acpi/apei/einj.c57
-rw-r--r--drivers/acpi/apei/erst.c7
-rw-r--r--drivers/acpi/bus.c99
-rw-r--r--drivers/acpi/button.c153
-rw-r--r--drivers/acpi/dock.c7
-rw-r--r--drivers/acpi/dptf/Kconfig15
-rw-r--r--drivers/acpi/dptf/Makefile4
-rw-r--r--drivers/acpi/dptf/dptf_power.c128
-rw-r--r--drivers/acpi/dptf/int340x_thermal.c53
-rw-r--r--drivers/acpi/ec.c106
-rw-r--r--drivers/acpi/internal.h3
-rw-r--r--drivers/acpi/nfit/Kconfig26
-rw-r--r--drivers/acpi/nfit/Makefile3
-rw-r--r--drivers/acpi/nfit/core.c2793
-rw-r--r--drivers/acpi/nfit/mce.c89
-rw-r--r--drivers/acpi/nfit/nfit.h227
-rw-r--r--drivers/acpi/numa.c226
-rw-r--r--drivers/acpi/osl.c5
-rw-r--r--drivers/acpi/pci_mcfg.c92
-rw-r--r--drivers/acpi/pci_root.c35
-rw-r--r--drivers/acpi/pci_slot.c43
-rw-r--r--drivers/acpi/pmic/intel_pmic.c84
-rw-r--r--drivers/acpi/pmic/intel_pmic.h4
-rw-r--r--drivers/acpi/pmic/intel_pmic_bxtwc.c420
-rw-r--r--drivers/acpi/pmic/intel_pmic_crc.c5
-rw-r--r--drivers/acpi/pmic/intel_pmic_xpower.c7
-rw-r--r--drivers/acpi/processor_core.c26
-rw-r--r--drivers/acpi/processor_driver.c13
-rw-r--r--drivers/acpi/processor_idle.c542
-rw-r--r--drivers/acpi/scan.c155
-rw-r--r--drivers/acpi/sleep.c29
-rw-r--r--drivers/acpi/sysfs.c6
-rw-r--r--drivers/acpi/tables.c23
-rw-r--r--drivers/acpi/thermal.c3
-rw-r--r--drivers/acpi/video_detect.c8
-rw-r--r--drivers/ata/Kconfig8
-rw-r--r--drivers/ata/Makefile2
-rw-r--r--drivers/ata/ahci.c2
-rw-r--r--drivers/ata/ahci_brcm.c406
-rw-r--r--drivers/ata/libahci.c10
-rw-r--r--drivers/ata/libata-core.c18
-rw-r--r--drivers/ata/libata-eh.c8
-rw-r--r--drivers/ata/libata-scsi.c84
-rw-r--r--drivers/ata/libata-transport.c9
-rw-r--r--drivers/ata/pata_arasan_cf.c2
-rw-r--r--drivers/ata/pata_atiixp.c4
-rw-r--r--drivers/ata/pata_hpt366.c2
-rw-r--r--drivers/ata/pata_marvell.c2
-rw-r--r--drivers/ata/pata_ninja32.c2
-rw-r--r--drivers/ata/sata_dwc_460ex.c14
-rw-r--r--drivers/atm/horizon.c4
-rw-r--r--drivers/atm/nicstar.c3
-rw-r--r--drivers/base/firmware_class.c183
-rw-r--r--drivers/base/memory.c28
-rw-r--r--drivers/base/node.c85
-rw-r--r--drivers/base/power/clock_ops.c45
-rw-r--r--drivers/base/power/domain.c301
-rw-r--r--drivers/base/power/opp/core.c31
-rw-r--r--drivers/base/power/runtime.c27
-rw-r--r--drivers/base/power/trace.c6
-rw-r--r--drivers/base/power/wakeup.c18
-rw-r--r--drivers/base/property.c28
-rw-r--r--drivers/base/regmap/regcache-rbtree.c38
-rw-r--r--drivers/base/regmap/regcache.c5
-rw-r--r--drivers/base/regmap/regmap-i2c.c2
-rw-r--r--drivers/base/regmap/regmap-irq.c15
-rw-r--r--drivers/base/regmap/regmap.c37
-rw-r--r--drivers/base/topology.c13
-rw-r--r--drivers/bcma/Kconfig11
-rw-r--r--drivers/bcma/driver_chipcommon_b.c9
-rw-r--r--drivers/block/brd.c31
-rw-r--r--drivers/block/cciss.c3
-rw-r--r--drivers/block/drbd/drbd_actlog.c62
-rw-r--r--drivers/block/drbd/drbd_bitmap.c92
-rw-r--r--drivers/block/drbd/drbd_debugfs.c17
-rw-r--r--drivers/block/drbd/drbd_int.h58
-rw-r--r--drivers/block/drbd/drbd_interval.h14
-rw-r--r--drivers/block/drbd/drbd_main.c135
-rw-r--r--drivers/block/drbd/drbd_nl.c282
-rw-r--r--drivers/block/drbd/drbd_proc.c30
-rw-r--r--drivers/block/drbd/drbd_protocol.h79
-rw-r--r--drivers/block/drbd/drbd_receiver.c571
-rw-r--r--drivers/block/drbd/drbd_req.c120
-rw-r--r--drivers/block/drbd/drbd_req.h5
-rw-r--r--drivers/block/drbd/drbd_state.c61
-rw-r--r--drivers/block/drbd/drbd_state.h2
-rw-r--r--drivers/block/drbd/drbd_strings.c8
-rw-r--r--drivers/block/drbd/drbd_worker.c118
-rw-r--r--drivers/block/floppy.c6
-rw-r--r--drivers/block/loop.c72
-rw-r--r--drivers/block/mg_disk.c9
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c7
-rw-r--r--drivers/block/nbd.c16
-rw-r--r--drivers/block/null_blk.c2
-rw-r--r--drivers/block/osdblk.c2
-rw-r--r--drivers/block/pktcdvd.c6
-rw-r--r--drivers/block/ps3disk.c7
-rw-r--r--drivers/block/ps3vram.c3
-rw-r--r--drivers/block/rbd.c29
-rw-r--r--drivers/block/rsxx/dev.c4
-rw-r--r--drivers/block/rsxx/dma.c2
-rw-r--r--drivers/block/skd_main.c10
-rw-r--r--drivers/block/sunvdc.c3
-rw-r--r--drivers/block/umem.c10
-rw-r--r--drivers/block/virtio_blk.c52
-rw-r--r--drivers/block/xen-blkback/blkback.c27
-rw-r--r--drivers/block/xen-blkback/xenbus.c22
-rw-r--r--drivers/block/xen-blkfront.c212
-rw-r--r--drivers/block/zram/Kconfig15
-rw-r--r--drivers/block/zram/Makefile4
-rw-r--r--drivers/block/zram/zcomp.c150
-rw-r--r--drivers/block/zram/zcomp.h36
-rw-r--r--drivers/block/zram/zram_drv.c79
-rw-r--r--drivers/block/zram/zram_drv.h5
-rw-r--r--drivers/bluetooth/bpa10x.c2
-rw-r--r--drivers/bluetooth/btmrvl_main.c2
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c15
-rw-r--r--drivers/bluetooth/btsdio.c2
-rw-r--r--drivers/bluetooth/btusb.c16
-rw-r--r--drivers/bluetooth/btwilink.c4
-rw-r--r--drivers/bluetooth/hci_intel.c28
-rw-r--r--drivers/bluetooth/hci_ldisc.c2
-rw-r--r--drivers/bluetooth/hci_vhci.c6
-rw-r--r--drivers/bus/Kconfig13
-rw-r--r--drivers/bus/Makefile1
-rw-r--r--drivers/bus/arm-cci.c55
-rw-r--r--drivers/bus/arm-ccn.c142
-rw-r--r--drivers/bus/imx-weim.c5
-rw-r--r--drivers/bus/mvebu-mbus.c10
-rw-r--r--drivers/bus/tegra-aconnect.c112
-rw-r--r--drivers/bus/uniphier-system-bus.c3
-rw-r--r--drivers/bus/vexpress-config.c1
-rw-r--r--drivers/cdrom/cdrom.c28
-rw-r--r--drivers/char/Kconfig42
-rw-r--r--drivers/char/Makefile2
-rw-r--r--drivers/char/agp/intel-gtt.c8
-rw-r--r--drivers/char/dsp56k.c2
-rw-r--r--drivers/char/hw_random/Kconfig18
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/bcm2835-rng.c47
-rw-r--r--drivers/char/hw_random/exynos-rng.c4
-rw-r--r--drivers/char/hw_random/meson-rng.c131
-rw-r--r--drivers/char/hw_random/omap-rng.c16
-rw-r--r--drivers/char/hw_random/stm32-rng.c10
-rw-r--r--drivers/char/ipmi/Kconfig12
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c4
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c73
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c6
-rw-r--r--drivers/char/mem.c30
-rw-r--r--drivers/char/powernv-op-panel.c223
-rw-r--r--drivers/char/random.c474
-rw-r--r--drivers/char/tpm/Kconfig30
-rw-r--r--drivers/char/tpm/Makefile3
-rw-r--r--drivers/char/tpm/st33zp24/Kconfig11
-rw-r--r--drivers/char/tpm/st33zp24/i2c.c70
-rw-r--r--drivers/char/tpm/st33zp24/spi.c184
-rw-r--r--drivers/char/tpm/st33zp24/st33zp24.c129
-rw-r--r--drivers/char/tpm/st33zp24/st33zp24.h14
-rw-r--r--drivers/char/tpm/tpm-chip.c299
-rw-r--r--drivers/char/tpm/tpm-dev.c17
-rw-r--r--drivers/char/tpm/tpm-interface.c183
-rw-r--r--drivers/char/tpm/tpm-sysfs.c80
-rw-r--r--drivers/char/tpm/tpm.h94
-rw-r--r--drivers/char/tpm/tpm2-cmd.c159
-rw-r--r--drivers/char/tpm/tpm_atmel.c63
-rw-r--r--drivers/char/tpm/tpm_atmel.h16
-rw-r--r--drivers/char/tpm/tpm_crb.c90
-rw-r--r--drivers/char/tpm/tpm_eventlog.c2
-rw-r--r--drivers/char/tpm/tpm_eventlog.h4
-rw-r--r--drivers/char/tpm/tpm_i2c_atmel.c45
-rw-r--r--drivers/char/tpm/tpm_i2c_infineon.c59
-rw-r--r--drivers/char/tpm/tpm_i2c_nuvoton.c131
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.c38
-rw-r--r--drivers/char/tpm/tpm_infineon.c22
-rw-r--r--drivers/char/tpm/tpm_nsc.c84
-rw-r--r--drivers/char/tpm/tpm_tis.c829
-rw-r--r--drivers/char/tpm/tpm_tis_core.c835
-rw-r--r--drivers/char/tpm/tpm_tis_core.h156
-rw-r--r--drivers/char/tpm/tpm_tis_spi.c272
-rw-r--r--drivers/char/tpm/tpm_vtpm_proxy.c637
-rw-r--r--drivers/char/tpm/xen-tpmfront.c36
-rw-r--r--drivers/char/virtio_console.c23
-rw-r--r--drivers/clk/Kconfig7
-rw-r--r--drivers/clk/Makefile21
-rw-r--r--drivers/clk/at91/clk-generated.c2
-rw-r--r--drivers/clk/bcm/clk-iproc-armpll.c11
-rw-r--r--drivers/clk/bcm/clk-iproc-asiu.c27
-rw-r--r--drivers/clk/bcm/clk-iproc-pll.c32
-rw-r--r--drivers/clk/clk-clps711x.c2
-rw-r--r--drivers/clk/clk-conf.c2
-rw-r--r--drivers/clk/clk-fixed-factor.c11
-rw-r--r--drivers/clk/clk-fixed-rate.c11
-rw-r--r--drivers/clk/clk-highbank.c9
-rw-r--r--drivers/clk/clk-multiplier.c20
-rw-r--r--drivers/clk/clk-nomadik.c48
-rw-r--r--drivers/clk/clk-oxnas.c15
-rw-r--r--drivers/clk/clk-s2mps11.c21
-rw-r--r--drivers/clk/clk-stm32f4.c14
-rw-r--r--drivers/clk/clk-u300.c59
-rw-r--r--drivers/clk/clk-vt8500.c99
-rw-r--r--drivers/clk/clk.c347
-rw-r--r--drivers/clk/clkdev.c4
-rw-r--r--drivers/clk/hisilicon/clk-hi3519.c116
-rw-r--r--drivers/clk/hisilicon/clk-hi6220.c6
-rw-r--r--drivers/clk/hisilicon/clk.c89
-rw-r--r--drivers/clk/hisilicon/clk.h34
-rw-r--r--drivers/clk/hisilicon/clkdivider-hi6220.c2
-rw-r--r--drivers/clk/hisilicon/reset.c19
-rw-r--r--drivers/clk/hisilicon/reset.h5
-rw-r--r--drivers/clk/imx/clk-imx6q.c14
-rw-r--r--drivers/clk/imx/clk-imx6sl.c14
-rw-r--r--drivers/clk/imx/clk-imx6sx.c14
-rw-r--r--drivers/clk/imx/clk-imx6ul.c18
-rw-r--r--drivers/clk/imx/clk-imx7d.c755
-rw-r--r--drivers/clk/imx/clk-pllv3.c28
-rw-r--r--drivers/clk/imx/clk-vf610.c12
-rw-r--r--drivers/clk/imx/clk.h90
-rw-r--r--drivers/clk/meson/Kconfig19
-rw-r--r--drivers/clk/meson/Makefile5
-rw-r--r--drivers/clk/meson/clk-cpu.c73
-rw-r--r--drivers/clk/meson/clk-mpll.c94
-rw-r--r--drivers/clk/meson/clk-pll.c104
-rw-r--r--drivers/clk/meson/clkc.h179
-rw-r--r--drivers/clk/meson/gxbb.c944
-rw-r--r--drivers/clk/meson/gxbb.h271
-rw-r--r--drivers/clk/meson/meson8b-clkc.c425
-rw-r--r--drivers/clk/nxp/clk-lpc32xx.c3
-rw-r--r--drivers/clk/qcom/gcc-msm8660.c28
-rw-r--r--drivers/clk/qcom/gcc-msm8996.c16
-rw-r--r--drivers/clk/renesas/Kconfig2
-rw-r--r--drivers/clk/renesas/Makefile4
-rw-r--r--drivers/clk/renesas/r8a7795-cpg-mssr.c374
-rw-r--r--drivers/clk/renesas/r8a7796-cpg-mssr.c192
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.c359
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.h43
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c6
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.h1
-rw-r--r--drivers/clk/rockchip/clk-rk3228.c125
-rw-r--r--drivers/clk/rockchip/clk-rk3399.c20
-rw-r--r--drivers/clk/samsung/Kconfig9
-rw-r--r--drivers/clk/samsung/Makefile2
-rw-r--r--drivers/clk/samsung/clk-cpu.c131
-rw-r--r--drivers/clk/samsung/clk-cpu.h4
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c12
-rw-r--r--drivers/clk/samsung/clk-exynos-clkout.c2
-rw-r--r--drivers/clk/samsung/clk-exynos3250.c41
-rw-r--r--drivers/clk/samsung/clk-exynos4.c64
-rw-r--r--drivers/clk/samsung/clk-exynos4415.c30
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c27
-rw-r--r--drivers/clk/samsung/clk-exynos5260.c114
-rw-r--r--drivers/clk/samsung/clk-exynos5410.c58
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c40
-rw-r--r--drivers/clk/samsung/clk-exynos5433.c437
-rw-r--r--drivers/clk/samsung/clk-exynos5440.c12
-rw-r--r--drivers/clk/samsung/clk-exynos7.c119
-rw-r--r--drivers/clk/samsung/clk-pll.c122
-rw-r--r--drivers/clk/samsung/clk-s3c2410-dclk.c5
-rw-r--r--drivers/clk/samsung/clk-s3c2410.c2
-rw-r--r--drivers/clk/samsung/clk-s3c2412.c2
-rw-r--r--drivers/clk/samsung/clk-s3c2443.c2
-rw-r--r--drivers/clk/samsung/clk-s3c64xx.c2
-rw-r--r--drivers/clk/samsung/clk-s5pv210-audss.c29
-rw-r--r--drivers/clk/samsung/clk-s5pv210.c2
-rw-r--r--drivers/clk/samsung/clk.c14
-rw-r--r--drivers/clk/samsung/clk.h22
-rw-r--r--drivers/clk/st/clk-flexgen.c4
-rw-r--r--drivers/clk/st/clkgen-fsyn.c10
-rw-r--r--drivers/clk/st/clkgen-pll.c27
-rw-r--r--drivers/clk/sunxi-ng/Kconfig65
-rw-r--r--drivers/clk/sunxi-ng/Makefile20
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.c826
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.h62
-rw-r--r--drivers/clk/sunxi-ng/ccu_common.c90
-rw-r--r--drivers/clk/sunxi-ng/ccu_common.h85
-rw-r--r--drivers/clk/sunxi-ng/ccu_div.c136
-rw-r--r--drivers/clk/sunxi-ng/ccu_div.h133
-rw-r--r--drivers/clk/sunxi-ng/ccu_frac.c110
-rw-r--r--drivers/clk/sunxi-ng/ccu_frac.h53
-rw-r--r--drivers/clk/sunxi-ng/ccu_gate.c82
-rw-r--r--drivers/clk/sunxi-ng/ccu_gate.h52
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.c158
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.h77
-rw-r--r--drivers/clk/sunxi-ng/ccu_mult.h15
-rw-r--r--drivers/clk/sunxi-ng/ccu_mux.c187
-rw-r--r--drivers/clk/sunxi-ng/ccu_mux.h91
-rw-r--r--drivers/clk/sunxi-ng/ccu_nk.c147
-rw-r--r--drivers/clk/sunxi-ng/ccu_nk.h71
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkm.c153
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkm.h68
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkmp.c167
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkmp.h71
-rw-r--r--drivers/clk/sunxi-ng/ccu_nm.c114
-rw-r--r--drivers/clk/sunxi-ng/ccu_nm.h91
-rw-r--r--drivers/clk/sunxi-ng/ccu_phase.c126
-rw-r--r--drivers/clk/sunxi-ng/ccu_phase.h50
-rw-r--r--drivers/clk/sunxi-ng/ccu_reset.c55
-rw-r--r--drivers/clk/sunxi-ng/ccu_reset.h40
-rw-r--r--drivers/clk/sunxi/clk-a10-pll2.c4
-rw-r--r--drivers/clk/sunxi/clk-factors.c1
-rw-r--r--drivers/clk/sunxi/clk-sun6i-apb0-gates.c9
-rw-r--r--drivers/clk/sunxi/clk-sun6i-apb0.c9
-rw-r--r--drivers/clk/sunxi/clk-sun6i-ar100.c21
-rw-r--r--drivers/clk/sunxi/clk-sun8i-apb0.c9
-rw-r--r--drivers/clk/sunxi/clk-sun8i-mbus.c2
-rw-r--r--drivers/clk/sunxi/clk-sun9i-mmc.c28
-rw-r--r--drivers/clk/tegra/clk-id.h1
-rw-r--r--drivers/clk/tegra/clk-pll.c505
-rw-r--r--drivers/clk/tegra/clk-tegra-periph.c25
-rw-r--r--drivers/clk/tegra/clk-tegra114.c160
-rw-r--r--drivers/clk/tegra/clk-tegra124.c156
-rw-r--r--drivers/clk/tegra/clk-tegra210.c295
-rw-r--r--drivers/clk/tegra/clk-tegra30.c113
-rw-r--r--drivers/clk/tegra/clk.h17
-rw-r--r--drivers/clk/ti/clk-33xx.c3
-rw-r--r--drivers/clk/ti/clk-43xx.c7
-rw-r--r--drivers/clk/ux500/u8500_of_clk.c16
-rw-r--r--drivers/clk/ux500/u8540_clk.c16
-rw-r--r--drivers/clk/ux500/u9540_clk.c4
-rw-r--r--drivers/clocksource/Kconfig116
-rw-r--r--drivers/clocksource/Makefile23
-rw-r--r--drivers/clocksource/arm_arch_timer.c136
-rw-r--r--drivers/clocksource/arm_global_timer.c59
-rw-r--r--drivers/clocksource/armv7m_systick.c17
-rw-r--r--drivers/clocksource/asm9260_timer.c22
-rw-r--r--drivers/clocksource/bcm2835_timer.c38
-rw-r--r--drivers/clocksource/bcm_kona_timer.c28
-rw-r--r--drivers/clocksource/cadence_ttc_timer.c74
-rw-r--r--drivers/clocksource/clksrc-dbx500-prcmu.c4
-rw-r--r--drivers/clocksource/clksrc-probe.c14
-rw-r--r--drivers/clocksource/clksrc_st_lpc.c20
-rw-r--r--drivers/clocksource/clps711x-timer.c12
-rw-r--r--drivers/clocksource/dummy_timer.c36
-rw-r--r--drivers/clocksource/dw_apb_timer_of.c4
-rw-r--r--drivers/clocksource/exynos_mct.c78
-rw-r--r--drivers/clocksource/fsl_ftm_timer.c20
-rw-r--r--drivers/clocksource/h8300_timer16.c12
-rw-r--r--drivers/clocksource/h8300_timer8.c11
-rw-r--r--drivers/clocksource/h8300_tpu.c10
-rw-r--r--drivers/clocksource/meson6_timer.c19
-rw-r--r--drivers/clocksource/metag_generic.c33
-rw-r--r--drivers/clocksource/mips-gic-timer.c64
-rw-r--r--drivers/clocksource/moxart_timer.c39
-rw-r--r--drivers/clocksource/mps2-timer.c8
-rw-r--r--drivers/clocksource/mtk_timer.c8
-rw-r--r--drivers/clocksource/mxs_timer.c26
-rw-r--r--drivers/clocksource/nomadik-mtu.c43
-rw-r--r--drivers/clocksource/pxa_timer.c46
-rw-r--r--drivers/clocksource/qcom-timer.c64
-rw-r--r--drivers/clocksource/rockchip_timer.c53
-rw-r--r--drivers/clocksource/samsung_pwm_timer.c70
-rw-r--r--drivers/clocksource/sun4i_timer.c43
-rw-r--r--drivers/clocksource/tango_xtal.c10
-rw-r--r--drivers/clocksource/tegra20_timer.c24
-rw-r--r--drivers/clocksource/time-armada-370-xp.c134
-rw-r--r--drivers/clocksource/time-efm32.c17
-rw-r--r--drivers/clocksource/time-lpc32xx.c10
-rw-r--r--drivers/clocksource/time-orion.c50
-rw-r--r--drivers/clocksource/time-pistachio.c26
-rw-r--r--drivers/clocksource/timer-atlas7.c69
-rw-r--r--drivers/clocksource/timer-atmel-pit.c48
-rw-r--r--drivers/clocksource/timer-atmel-st.c42
-rw-r--r--drivers/clocksource/timer-digicolor.c16
-rw-r--r--drivers/clocksource/timer-imx-gpt.c51
-rw-r--r--drivers/clocksource/timer-integrator-ap.c57
-rw-r--r--drivers/clocksource/timer-keystone.c13
-rw-r--r--drivers/clocksource/timer-nps.c14
-rw-r--r--drivers/clocksource/timer-oxnas-rps.c297
-rw-r--r--drivers/clocksource/timer-prima2.c42
-rw-r--r--drivers/clocksource/timer-sp804.c86
-rw-r--r--drivers/clocksource/timer-stm32.c8
-rw-r--r--drivers/clocksource/timer-sun5i.c33
-rw-r--r--drivers/clocksource/timer-ti-32k.c8
-rw-r--r--drivers/clocksource/timer-u300.c36
-rw-r--r--drivers/clocksource/versatile.c6
-rw-r--r--drivers/clocksource/vf_pit_timer.c25
-rw-r--r--drivers/clocksource/vt8500_timer.c24
-rw-r--r--drivers/clocksource/zevio-timer.c4
-rw-r--r--drivers/connector/cn_proc.c4
-rw-r--r--drivers/cpufreq/Kconfig14
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c17
-rw-r--r--drivers/cpufreq/amd_freq_sensitivity.c10
-rw-r--r--drivers/cpufreq/cpufreq.c223
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c88
-rw-r--r--drivers/cpufreq/cpufreq_governor.c73
-rw-r--r--drivers/cpufreq/cpufreq_governor.h24
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c38
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.h1
-rw-r--r--drivers/cpufreq/cpufreq_performance.c19
-rw-r--r--drivers/cpufreq/cpufreq_powersave.c19
-rw-r--r--drivers/cpufreq/cpufreq_stats.c157
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c104
-rw-r--r--drivers/cpufreq/davinci-cpufreq.c22
-rw-r--r--drivers/cpufreq/freq_table.c106
-rw-r--r--drivers/cpufreq/intel_pstate.c111
-rw-r--r--drivers/cpufreq/mvebu-cpufreq.c2
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c205
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq_pmi.c3
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq.c33
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c75
-rw-r--r--drivers/cpuidle/cpuidle-arm.c27
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c101
-rw-r--r--drivers/crypto/Kconfig13
-rw-r--r--drivers/crypto/bfin_crc.c5
-rw-r--r--drivers/crypto/caam/Kconfig18
-rw-r--r--drivers/crypto/caam/Makefile4
-rw-r--r--drivers/crypto/caam/caamhash.c5
-rw-r--r--drivers/crypto/caam/caampkc.c607
-rw-r--r--drivers/crypto/caam/caampkc.h70
-rw-r--r--drivers/crypto/caam/compat.h3
-rw-r--r--drivers/crypto/caam/ctrl.c125
-rw-r--r--drivers/crypto/caam/desc.h11
-rw-r--r--drivers/crypto/caam/desc_constr.h51
-rw-r--r--drivers/crypto/caam/jr.c22
-rw-r--r--drivers/crypto/caam/pdb.h188
-rw-r--r--drivers/crypto/caam/pkc_desc.c36
-rw-r--r--drivers/crypto/caam/regs.h151
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h17
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-xts.c43
-rw-r--r--drivers/crypto/ccp/ccp-crypto.h3
-rw-r--r--drivers/crypto/marvell/cesa.c143
-rw-r--r--drivers/crypto/marvell/cesa.h120
-rw-r--r--drivers/crypto/marvell/cipher.c170
-rw-r--r--drivers/crypto/marvell/hash.c162
-rw-r--r--drivers/crypto/marvell/tdma.c130
-rw-r--r--drivers/crypto/mxs-dcp.c47
-rw-r--r--drivers/crypto/omap-aes.c36
-rw-r--r--drivers/crypto/omap-des.c14
-rw-r--r--drivers/crypto/omap-sham.c47
-rw-r--r--drivers/crypto/picoxcell_crypto.c60
-rw-r--r--drivers/crypto/qat/Kconfig3
-rw-r--r--drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c1
-rw-r--r--drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c1
-rw-r--r--drivers/crypto/qat/qat_common/Makefile11
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_devices.h1
-rw-r--r--drivers/crypto/qat/qat_common/adf_aer.c49
-rw-r--r--drivers/crypto/qat/qat_common/adf_common_drv.h2
-rw-r--r--drivers/crypto/qat/qat_common/adf_sriov.c2
-rw-r--r--drivers/crypto/qat/qat_common/adf_vf_isr.c2
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c8
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c872
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c1
-rw-r--r--drivers/crypto/qce/ablkcipher.c27
-rw-r--r--drivers/crypto/qce/cipher.h2
-rw-r--r--drivers/crypto/s5p-sss.c80
-rw-r--r--drivers/crypto/sahara.c112
-rw-r--r--drivers/crypto/talitos.c672
-rw-r--r--drivers/crypto/ux500/cryp/Makefile6
-rw-r--r--drivers/crypto/ux500/hash/Makefile2
-rw-r--r--drivers/crypto/vmx/.gitignore2
-rw-r--r--drivers/crypto/vmx/Kconfig2
-rw-r--r--drivers/crypto/vmx/Makefile2
-rw-r--r--drivers/crypto/vmx/aes_xts.c190
-rw-r--r--drivers/crypto/vmx/aesp8-ppc.h4
-rw-r--r--drivers/crypto/vmx/aesp8-ppc.pl1863
-rw-r--r--drivers/crypto/vmx/vmx.c8
-rw-r--r--drivers/dax/dax.c8
-rw-r--r--drivers/dax/pmem.c14
-rw-r--r--drivers/devfreq/Kconfig2
-rw-r--r--drivers/devfreq/devfreq-event.c12
-rw-r--r--drivers/devfreq/devfreq.c15
-rw-r--r--drivers/devfreq/event/Kconfig4
-rw-r--r--drivers/devfreq/event/exynos-ppmu.c3
-rw-r--r--drivers/devfreq/exynos-bus.c11
-rw-r--r--drivers/dma-buf/Kconfig15
-rw-r--r--drivers/dma-buf/Makefile2
-rw-r--r--drivers/dma-buf/dma-buf.c59
-rw-r--r--drivers/dma-buf/fence-array.c144
-rw-r--r--drivers/dma-buf/fence.c8
-rw-r--r--drivers/dma-buf/sync_file.c2
-rw-r--r--drivers/dma/Kconfig32
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/amba-pl08x.c10
-rw-r--r--drivers/dma/at_xdmac.c12
-rw-r--r--drivers/dma/bcm2835-dma.c7
-rw-r--r--drivers/dma/bestcomm/bestcomm.c2
-rw-r--r--drivers/dma/coh901318.c32
-rw-r--r--drivers/dma/cppi41.c3
-rw-r--r--drivers/dma/dma-axi-dmac.c8
-rw-r--r--drivers/dma/dma-jz4740.c14
-rw-r--r--drivers/dma/dmatest.c43
-rw-r--r--drivers/dma/edma.c52
-rw-r--r--drivers/dma/fsl-edma.c49
-rw-r--r--drivers/dma/fsl_raid.c10
-rw-r--r--drivers/dma/fsldma.c2
-rw-r--r--drivers/dma/hsu/hsu.c90
-rw-r--r--drivers/dma/hsu/pci.c11
-rw-r--r--drivers/dma/img-mdc-dma.c4
-rw-r--r--drivers/dma/imx-dma.c31
-rw-r--r--drivers/dma/imx-sdma.c32
-rw-r--r--drivers/dma/ioat/init.c2
-rw-r--r--drivers/dma/k3dma.c19
-rw-r--r--drivers/dma/mmp_pdma.c19
-rw-r--r--drivers/dma/mmp_tdma.c9
-rw-r--r--drivers/dma/moxart-dma.c8
-rw-r--r--drivers/dma/mpc512x_dma.c1
-rw-r--r--drivers/dma/mv_xor.c2
-rw-r--r--drivers/dma/mv_xor_v2.c878
-rw-r--r--drivers/dma/nbpfaxi.c18
-rw-r--r--drivers/dma/omap-dma.c100
-rw-r--r--drivers/dma/pl330.c11
-rw-r--r--drivers/dma/ppc4xx/adma.c2
-rw-r--r--drivers/dma/pxa_dma.c27
-rw-r--r--drivers/dma/qcom/bam_dma.c109
-rw-r--r--drivers/dma/qcom/hidma.c1
-rw-r--r--drivers/dma/qcom/hidma_ll.c1
-rw-r--r--drivers/dma/qcom/hidma_mgmt.c7
-rw-r--r--drivers/dma/s3c24xx-dma.c29
-rw-r--r--drivers/dma/sh/rcar-dmac.c41
-rw-r--r--drivers/dma/sh/shdmac.c9
-rw-r--r--drivers/dma/sh/sudmac.c9
-rw-r--r--drivers/dma/sirf-dma.c12
-rw-r--r--drivers/dma/ste_dma40.c6
-rw-r--r--drivers/dma/ste_dma40_ll.c2
-rw-r--r--drivers/dma/sun6i-dma.c2
-rw-r--r--drivers/dma/tegra20-apb-dma.c49
-rw-r--r--drivers/dma/ti-dma-crossbar.c2
-rw-r--r--drivers/dma/timb_dma.c8
-rw-r--r--drivers/dma/txx9dmac.c9
-rw-r--r--drivers/dma/xilinx/Makefile3
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c2689
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c1151
-rw-r--r--drivers/edac/Kconfig15
-rw-r--r--drivers/edac/Makefile1
-rw-r--r--drivers/edac/altera_edac.c492
-rw-r--r--drivers/edac/altera_edac.h17
-rw-r--r--drivers/edac/amd64_edac.c4
-rw-r--r--drivers/edac/skx_edac.c1121
-rw-r--r--drivers/extcon/Makefile3
-rw-r--r--drivers/extcon/devres.c216
-rw-r--r--drivers/extcon/extcon-adc-jack.c34
-rw-r--r--drivers/extcon/extcon-usb-gpio.c32
-rw-r--r--drivers/extcon/extcon.c344
-rw-r--r--drivers/firmware/Kconfig12
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/arm_scpi.c47
-rw-r--r--drivers/firmware/broadcom/bcm47xx_sprom.c2
-rw-r--r--drivers/firmware/dmi-id.c8
-rw-r--r--drivers/firmware/efi/arm-runtime.c5
-rw-r--r--drivers/firmware/efi/efi-pstore.c13
-rw-r--r--drivers/firmware/efi/efi.c180
-rw-r--r--drivers/firmware/efi/efibc.c4
-rw-r--r--drivers/firmware/efi/runtime-wrappers.c53
-rw-r--r--drivers/firmware/psci.c66
-rw-r--r--drivers/firmware/qcom_scm-32.c327
-rw-r--r--drivers/firmware/qcom_scm-64.c307
-rw-r--r--drivers/firmware/qcom_scm.c353
-rw-r--r--drivers/firmware/qcom_scm.h47
-rw-r--r--drivers/firmware/scpi_pm_domain.c163
-rw-r--r--drivers/fpga/Kconfig1
-rw-r--r--drivers/gpio/Kconfig35
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/gpio-74x164.c9
-rw-r--r--drivers/gpio/gpio-clps711x.c10
-rw-r--r--drivers/gpio/gpio-dwapb.c1
-rw-r--r--drivers/gpio/gpio-f7188x.c22
-rw-r--r--drivers/gpio/gpio-intel-mid.c16
-rw-r--r--drivers/gpio/gpio-lynxpoint.c1
-rw-r--r--drivers/gpio/gpio-max77620.c315
-rw-r--r--drivers/gpio/gpio-mcp23s08.c2
-rw-r--r--drivers/gpio/gpio-menz127.c1
-rw-r--r--drivers/gpio/gpio-merrifield.c444
-rw-r--r--drivers/gpio/gpio-mmio.c51
-rw-r--r--drivers/gpio/gpio-palmas.c1
-rw-r--r--drivers/gpio/gpio-pca953x.c21
-rw-r--r--drivers/gpio/gpio-pcf857x.c9
-rw-r--r--drivers/gpio/gpio-rcar.c3
-rw-r--r--drivers/gpio/gpio-rdc321x.c1
-rw-r--r--drivers/gpio/gpio-sa1100.c2
-rw-r--r--drivers/gpio/gpio-sch311x.c1
-rw-r--r--drivers/gpio/gpio-stmpe.c18
-rw-r--r--drivers/gpio/gpio-syscon.c4
-rw-r--r--drivers/gpio/gpio-tc3589x.c1
-rw-r--r--drivers/gpio/gpio-tps65218.c7
-rw-r--r--drivers/gpio/gpio-tps6586x.c1
-rw-r--r--drivers/gpio/gpio-tps65910.c1
-rw-r--r--drivers/gpio/gpio-viperboard.c1
-rw-r--r--drivers/gpio/gpio-wm831x.c1
-rw-r--r--drivers/gpio/gpio-wm8350.c1
-rw-r--r--drivers/gpio/gpio-wm8994.c1
-rw-r--r--drivers/gpio/gpio-xilinx.c48
-rw-r--r--drivers/gpio/gpio-xlp.c52
-rw-r--r--drivers/gpio/gpiolib-acpi.c1
-rw-r--r--drivers/gpio/gpiolib-of.c128
-rw-r--r--drivers/gpio/gpiolib.c511
-rw-r--r--drivers/gpu/drm/Makefile5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h112
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c271
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c264
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c179
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c230
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h115
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c99
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c170
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c339
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c215
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c83
-rw-r--r--drivers/gpu/drm/amd/amdgpu/fiji_smc.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c137
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c480
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_smc.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_smum.h41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ppsmc.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_smc.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c182
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c160
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c57
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c4
-rw-r--r--drivers/gpu/drm/amd/include/amd_pcie.h14
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h11
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h3
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_sh_mask.h108
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h2
-rw-r--r--drivers/gpu/drm/amd/include/cgs_common.h19
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c153
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c17
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c23
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c404
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c74
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h15
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c9
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c26
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c10
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c20
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c464
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c590
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h26
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c299
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h165
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c33
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.h17
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c272
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c27
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h7
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h10
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smumgr.h29
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c5
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c3
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h4
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c190
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h60
-rw-r--r--drivers/gpu/drm/amd/scheduler/sched_fence.c81
-rw-r--r--drivers/gpu/drm/arc/Kconfig1
-rw-r--r--drivers/gpu/drm/arc/Makefile2
-rw-r--r--drivers/gpu/drm/arc/arcpgu.h2
-rw-r--r--drivers/gpu/drm/arc/arcpgu_crtc.c16
-rw-r--r--drivers/gpu/drm/arc/arcpgu_drv.c64
-rw-r--r--drivers/gpu/drm/arc/arcpgu_hdmi.c18
-rw-r--r--drivers/gpu/drm/arc/arcpgu_sim.c128
-rw-r--r--drivers/gpu/drm/arm/Kconfig17
-rw-r--r--drivers/gpu/drm/arm/Makefile2
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c19
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c13
-rw-r--r--drivers/gpu/drm/arm/malidp_crtc.c216
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c519
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.h54
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c691
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.h241
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c298
-rw-r--r--drivers/gpu/drm/arm/malidp_regs.h172
-rw-r--r--drivers/gpu/drm/armada/Kconfig4
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c16
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c3
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c4
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c1
-rw-r--r--drivers/gpu/drm/ast/Kconfig4
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c2
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c3
-rw-r--r--drivers/gpu/drm/ast/ast_main.c3
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c10
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c13
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/Kconfig1
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c8
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c19
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c12
-rw-r--r--drivers/gpu/drm/bochs/Kconfig4
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c2
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c15
-rw-r--r--drivers/gpu/drm/bridge/Kconfig19
-rw-r--r--drivers/gpu/drm/bridge/Makefile3
-rw-r--r--drivers/gpu/drm/bridge/adv7511/Kconfig15
-rw-r--r--drivers/gpu/drm/bridge/adv7511/Makefile3
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511.h392
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c1124
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7533.c265
-rw-r--r--drivers/gpu/drm/bridge/analogix-anx78xx.c8
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c3
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.h8
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c12
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h5
-rw-r--r--drivers/gpu/drm/bridge/dw-hdmi.c30
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c8
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c14
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c467
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c1413
-rw-r--r--drivers/gpu/drm/cirrus/Kconfig4
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c17
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c11
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c13
-rw-r--r--drivers/gpu/drm/drm_atomic.c109
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c573
-rw-r--r--drivers/gpu/drm/drm_auth.c285
-rw-r--r--drivers/gpu/drm/drm_blend.c238
-rw-r--r--drivers/gpu/drm/drm_bridge.c2
-rw-r--r--drivers/gpu/drm/drm_bufs.c8
-rw-r--r--drivers/gpu/drm/drm_crtc.c725
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c36
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h92
-rw-r--r--drivers/gpu/drm/drm_debugfs.c3
-rw-r--r--drivers/gpu/drm/drm_dp_aux_dev.c3
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c88
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c14
-rw-r--r--drivers/gpu/drm/drm_drv.c245
-rw-r--r--drivers/gpu/drm/drm_edid.c14
-rw-r--r--drivers/gpu/drm/drm_edid_load.c2
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c43
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c56
-rw-r--r--drivers/gpu/drm/drm_fops.c149
-rw-r--r--drivers/gpu/drm/drm_fourcc.c320
-rw-r--r--drivers/gpu/drm/drm_gem.c6
-rw-r--r--drivers/gpu/drm/drm_info.c117
-rw-r--r--drivers/gpu/drm/drm_internal.h21
-rw-r--r--drivers/gpu/drm/drm_ioctl.c176
-rw-r--r--drivers/gpu/drm/drm_irq.c243
-rw-r--r--drivers/gpu/drm/drm_legacy.h8
-rw-r--r--drivers/gpu/drm/drm_lock.c240
-rw-r--r--drivers/gpu/drm/drm_memory.c2
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c38
-rw-r--r--drivers/gpu/drm/drm_mm.c4
-rw-r--r--drivers/gpu/drm/drm_modes.c4
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c13
-rw-r--r--drivers/gpu/drm/drm_pci.c51
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c38
-rw-r--r--drivers/gpu/drm/drm_platform.c18
-rw-r--r--drivers/gpu/drm/drm_prime.c10
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c21
-rw-r--r--drivers/gpu/drm/drm_scatter.c2
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c206
-rw-r--r--drivers/gpu/drm/drm_sysfs.c71
-rw-r--r--drivers/gpu/drm/drm_vm.c58
-rw-r--r--drivers/gpu/drm/drm_vma_manager.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c5
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c18
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c64
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h2
-rw-r--r--drivers/gpu/drm/etnaviv/state_hi.xml.h7
-rw-r--r--drivers/gpu/drm/exynos/Kconfig6
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c18
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c29
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c41
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c35
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.c77
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.h91
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c67
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c26
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c6
-rw-r--r--drivers/gpu/drm/fsl-dcu/Kconfig5
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c44
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c49
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h2
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c15
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_output.h3
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c16
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h1
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c87
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_tcon.c1
-rw-r--r--drivers/gpu/drm/gma500/Kconfig4
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c3
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c9
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c9
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c11
-rw-r--r--drivers/gpu/drm/gma500/gma_display.h4
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c6
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c7
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c9
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/Kconfig1
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c34
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c11
-rw-r--r--drivers/gpu/drm/i2c/Kconfig6
-rw-r--r--drivers/gpu/drm/i2c/Makefile2
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c9
-rw-r--r--drivers/gpu/drm/i915/Kconfig22
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug3
-rw-r--r--drivers/gpu/drm/i915/Makefile12
-rw-r--r--drivers/gpu/drm/i915/gvt/Makefile5
-rw-r--r--drivers/gpu/drm/i915/gvt/debug.h34
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c145
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h69
-rw-r--r--drivers/gpu/drm/i915/gvt/hypercall.h38
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h49
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c53
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c570
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c1958
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h789
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c1276
-rw-r--r--drivers/gpu/drm/i915/i915_gem_batch_pool.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c515
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.h45
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c55
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c55
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence.c38
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c395
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h84
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c45
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c58
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c203
-rw-r--r--drivers/gpu/drm/i915/i915_guc_reg.h6
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c538
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c807
-rw-r--r--drivers/gpu/drm/i915/i915_params.c23
-rw-r--r--drivers/gpu/drm/i915/i915_params.h5
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c503
-rw-r--r--drivers/gpu/drm/i915/i915_pvinfo.h113
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h78
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c8
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c40
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h54
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c45
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.h92
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c5
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c50
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c23
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h16
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c595
-rw-r--r--drivers/gpu/drm/i915/intel_color.c23
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c50
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c27
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c274
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c388
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2820
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c1087
-rw-r--r--drivers/gpu/drm/i915/intel_dp_aux_backlight.c172
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c25
-rw-r--r--drivers/gpu/drm/i915/intel_dpio_phy.c470
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c63
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h354
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c135
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h4
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c179
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c90
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_pll.c42
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c42
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c136
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c141
-rw-r--r--drivers/gpu/drm/i915/intel_fifo_underrun.c30
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h50
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h3
-rw-r--r--drivers/gpu/drm/i915/intel_guc_loader.c220
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c104
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.h45
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c429
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c21
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c22
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c964
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h24
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c58
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.c100
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c4
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c210
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c151
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c48
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1773
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c123
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c1317
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h176
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c275
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c87
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c32
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c81
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c19
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c328
-rw-r--r--drivers/gpu/drm/i915/intel_vbt_defs.h6
-rw-r--r--drivers/gpu/drm/imx/Kconfig1
-rw-r--r--drivers/gpu/drm/imx/dw_hdmi-imx.c32
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c145
-rw-r--r--drivers/gpu/drm/imx/imx-drm.h21
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c189
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c97
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c402
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c555
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.h16
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c149
-rw-r--r--drivers/gpu/drm/mediatek/Kconfig12
-rw-r--r--drivers/gpu/drm/mediatek/Makefile7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_cec.c265
-rw-r--r--drivers/gpu/drm/mediatek/mtk_cec.h26
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c13
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.c13
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.h2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c1828
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.h23
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c358
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_regs.h238
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mipi_tx.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c515
-rw-r--r--drivers/gpu/drm/mgag200/Kconfig4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c9
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c13
-rw-r--r--drivers/gpu/drm/msm/Kconfig1
-rw-r--r--drivers/gpu/drm/msm/Makefile2
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c17
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c8
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c69
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c32
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c2
-rw-r--r--drivers/gpu/drm/msm/edp/edp_connector.c10
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c117
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h14
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c8
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c31
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c44
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c9
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h203
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c113
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c14
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c16
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c26
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c10
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c125
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c339
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h16
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c235
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c22
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c39
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c283
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h24
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c12
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c17
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c139
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h23
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c4
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c168
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c27
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c6
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h8
-rw-r--r--drivers/gpu/drm/msm/msm_perf.c7
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c71
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c6
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c12
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c9
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl0080.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h17
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h14
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h9
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c116
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c76
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c27
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c25
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c36
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_usif.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/subdev.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c102
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gp104.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c72
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/user.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp104.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp100.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp104.c78
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c66
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c55
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gp104.c81
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c136
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp104.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c58
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c58
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c67
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogp100.c34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c179
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c171
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c59
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c394
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h96
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c896
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c69
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp104.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c146
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c75
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c118
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c103
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c72
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c54
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c40
-rw-r--r--drivers/gpu/drm/omapdrm/Kconfig5
-rw-r--r--drivers/gpu/drm/omapdrm/displays/Kconfig28
-rw-r--r--drivers/gpu/drm/omapdrm/displays/Makefile28
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c11
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-dvi.c5
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-hdmi.c4
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-opa362.c3
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c3
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c3
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dpi.c26
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c7
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c22
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c4
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c3
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c3
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/core.c5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c471
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.h5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc_coefs.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/display.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c136
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c57
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss-of.c10
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c255
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.h45
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss_features.c46
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss_features.h1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi.h6
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c11
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c11
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_common.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_phy.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_pll.c78
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_wp.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h871
-rw-r--r--drivers/gpu/drm/omapdrm/dss/output.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/pll.c129
-rw-r--r--drivers/gpu/drm/omapdrm/dss/rfbi.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/video-pll.c9
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c10
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c56
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c16
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h14
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c20
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c8
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c17
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c166
-rw-r--r--drivers/gpu/drm/qxl/Kconfig5
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c11
-rw-r--r--drivers/gpu/drm/qxl/qxl_draw.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h1
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c8
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c10
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c10
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c4
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c5
-rw-r--r--drivers/gpu/drm/radeon/cik.c15
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c7
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c53
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c14
-rw-r--r--drivers/gpu/drm/radeon/si.c40
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c6
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig1
-rw-r--r--drivers/gpu/drm/rcar-du/Makefile4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c17
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.h1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c15
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.h10
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c69
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c15
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c29
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.h2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_regs.h5
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vgacon.c3
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.c43
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.h2
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig5
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c189
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi.c9
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c1
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c9
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c210
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h12
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c81
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c5
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c19
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c77
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c7
-rw-r--r--drivers/gpu/drm/shmobile/Kconfig1
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c4
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c3
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c2
-rw-r--r--drivers/gpu/drm/sti/Kconfig1
-rw-r--r--drivers/gpu/drm/sti/sti_awg_utils.c4
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.c26
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.h3
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.c71
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c39
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c148
-rw-r--r--drivers/gpu/drm/sti/sti_drv.h1
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c43
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c46
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c43
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c350
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.h13
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c40
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.c21
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.h2
-rw-r--r--drivers/gpu/drm/sti/sti_plane.c112
-rw-r--r--drivers/gpu/drm/sti/sti_plane.h8
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c43
-rw-r--r--drivers/gpu/drm/sti/sti_vid.c12
-rw-r--r--drivers/gpu/drm/sti/sti_vid.h2
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_crtc.c12
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c13
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_framebuffer.c3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c10
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tv.c9
-rw-r--r--drivers/gpu/drm/tegra/dc.c176
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c245
-rw-r--r--drivers/gpu/drm/tegra/drm.c4
-rw-r--r--drivers/gpu/drm/tegra/drm.h2
-rw-r--r--drivers/gpu/drm/tegra/dsi.c277
-rw-r--r--drivers/gpu/drm/tegra/fb.c2
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c508
-rw-r--r--drivers/gpu/drm/tegra/hdmi.h21
-rw-r--r--drivers/gpu/drm/tegra/output.c9
-rw-r--r--drivers/gpu/drm/tegra/rgb.c1
-rw-r--r--drivers/gpu/drm/tegra/sor.c717
-rw-r--r--drivers/gpu/drm/tegra/sor.h3
-rw-r--r--drivers/gpu/drm/tilcdc/Kconfig1
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c168
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c121
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c19
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c12
-rw-r--r--drivers/gpu/drm/udl/Kconfig5
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c1
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c6
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c183
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c23
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c70
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h12
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c25
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c22
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c15
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c13
-rw-r--r--drivers/gpu/drm/vc4/vc4_qpu_defines.h17
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h22
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c13
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate_shaders.c455
-rw-r--r--drivers/gpu/drm/vgem/Makefile2
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c291
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.h20
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c283
-rw-r--r--drivers/gpu/drm/via/via_mm.c2
-rw-r--r--drivers/gpu/drm/virtio/Kconfig4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c187
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drm_bus.c10
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c150
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ttm.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h4
-rw-r--r--drivers/gpu/host1x/cdma.c42
-rw-r--r--drivers/gpu/host1x/channel.c5
-rw-r--r--drivers/gpu/host1x/debug.c38
-rw-r--r--drivers/gpu/host1x/dev.c16
-rw-r--r--drivers/gpu/host1x/dev.h38
-rw-r--r--drivers/gpu/host1x/hw/cdma_hw.c23
-rw-r--r--drivers/gpu/host1x/hw/channel_hw.c5
-rw-r--r--drivers/gpu/host1x/hw/debug_hw.c36
-rw-r--r--drivers/gpu/host1x/hw/intr_hw.c30
-rw-r--r--drivers/gpu/host1x/hw/syncpt_hw.c10
-rw-r--r--drivers/gpu/host1x/intr.c16
-rw-r--r--drivers/gpu/host1x/intr.h4
-rw-r--r--drivers/gpu/host1x/job.c8
-rw-r--r--drivers/gpu/host1x/mipi.c63
-rw-r--r--drivers/gpu/host1x/syncpt.c58
-rw-r--r--drivers/gpu/host1x/syncpt.h8
-rw-r--r--drivers/gpu/ipu-v3/ipu-dc.c9
-rw-r--r--drivers/gpu/ipu-v3/ipu-di.c3
-rw-r--r--drivers/gpu/ipu-v3/ipu-dmfc.c213
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c62
-rw-r--r--drivers/hid/Kconfig31
-rw-r--r--drivers/hid/Makefile3
-rw-r--r--drivers/hid/hid-alps.c506
-rw-r--r--drivers/hid/hid-apple.c2
-rw-r--r--drivers/hid/hid-core.c10
-rw-r--r--drivers/hid/hid-ids.h10
-rw-r--r--drivers/hid/hid-led.c523
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c11
-rw-r--r--drivers/hsi/Makefile3
-rw-r--r--drivers/hsi/clients/cmt_speech.c4
-rw-r--r--drivers/hsi/clients/ssi_protocol.c110
-rw-r--r--drivers/hsi/controllers/omap_ssi.h18
-rw-r--r--drivers/hsi/controllers/omap_ssi_core.c46
-rw-r--r--drivers/hsi/controllers/omap_ssi_port.c226
-rw-r--r--drivers/hsi/hsi_core.c781
-rw-r--r--drivers/hwmon/Kconfig42
-rw-r--r--drivers/hwmon/Makefile3
-rw-r--r--drivers/hwmon/ad7314.c48
-rw-r--r--drivers/hwmon/ads7871.c65
-rw-r--r--drivers/hwmon/adt7411.c47
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c41
-rw-r--r--drivers/hwmon/emc6w201.c2
-rw-r--r--drivers/hwmon/ftsteutates.c819
-rw-r--r--drivers/hwmon/ina3221.c445
-rw-r--r--drivers/hwmon/it87.c2
-rw-r--r--drivers/hwmon/jc42.c16
-rw-r--r--drivers/hwmon/jz4740-hwmon.c65
-rw-r--r--drivers/hwmon/lm75.c243
-rw-r--r--drivers/hwmon/lm90.c455
-rw-r--r--drivers/hwmon/sht3x.c775
-rw-r--r--drivers/hwmon/tmp102.c249
-rw-r--r--drivers/hwmon/tmp401.c35
-rw-r--r--drivers/hwspinlock/qcom_hwspinlock.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x.c90
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c87
-rw-r--r--drivers/hwtracing/intel_th/core.c89
-rw-r--r--drivers/hwtracing/intel_th/gth.c18
-rw-r--r--drivers/hwtracing/intel_th/intel_th.h6
-rw-r--r--drivers/hwtracing/intel_th/pci.c5
-rw-r--r--drivers/hwtracing/stm/core.c52
-rw-r--r--drivers/i2c/Kconfig4
-rw-r--r--drivers/i2c/busses/Kconfig11
-rw-r--r--drivers/i2c/busses/i2c-at91.c24
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c2
-rw-r--r--drivers/i2c/busses/i2c-bcm-kona.c4
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c3
-rw-r--r--drivers/i2c/busses/i2c-brcmstb.c5
-rw-r--r--drivers/i2c/busses/i2c-cadence.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c18
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h1
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c143
-rw-r--r--drivers/i2c/busses/i2c-elektor.c14
-rw-r--r--drivers/i2c/busses/i2c-i801.c152
-rw-r--r--drivers/i2c/busses/i2c-jz4780.c4
-rw-r--r--drivers/i2c/busses/i2c-meson.c6
-rw-r--r--drivers/i2c/busses/i2c-ocores.c14
-rw-r--r--drivers/i2c/busses/i2c-opal.c2
-rw-r--r--drivers/i2c/busses/i2c-pca-isa.c15
-rw-r--r--drivers/i2c/busses/i2c-qup.c155
-rw-r--r--drivers/i2c/busses/i2c-rcar.c2
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c512
-rw-r--r--drivers/i2c/busses/i2c-robotfuzz-osif.c2
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c2
-rw-r--r--drivers/i2c/busses/i2c-versatile.c46
-rw-r--r--drivers/i2c/busses/i2c-xlp9xx.c13
-rw-r--r--drivers/i2c/i2c-core.c340
-rw-r--r--drivers/i2c/i2c-dev.c7
-rw-r--r--drivers/i2c/i2c-smbus.c112
-rw-r--r--drivers/i2c/muxes/i2c-demux-pinctrl.c15
-rw-r--r--drivers/ide/cmd640.c2
-rw-r--r--drivers/ide/hpt366.c2
-rw-r--r--drivers/ide/ide-cd.c3
-rw-r--r--drivers/ide/ide-cd_ioctl.c3
-rw-r--r--drivers/ide/ide-disk.c4
-rw-r--r--drivers/ide/ide-floppy.c2
-rw-r--r--drivers/ide/ide-gd.c3
-rw-r--r--drivers/ide/ide-tape.c6
-rw-r--r--drivers/ide/pmac.c1
-rw-r--r--drivers/idle/intel_idle.c178
-rw-r--r--drivers/iio/Kconfig8
-rw-r--r--drivers/iio/Makefile1
-rw-r--r--drivers/iio/accel/Kconfig29
-rw-r--r--drivers/iio/accel/Makefile3
-rw-r--r--drivers/iio/accel/bma180.c2
-rw-r--r--drivers/iio/accel/bma220_spi.c338
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c4
-rw-r--r--drivers/iio/accel/kxcjk-1013.c2
-rw-r--r--drivers/iio/accel/mma7455_core.c3
-rw-r--r--drivers/iio/accel/mma7660.c277
-rw-r--r--drivers/iio/accel/mma8452.c223
-rw-r--r--drivers/iio/accel/mma9551.c2
-rw-r--r--drivers/iio/accel/mma9553.c2
-rw-r--r--drivers/iio/accel/st_accel.h1
-rw-r--r--drivers/iio/accel/st_accel_core.c76
-rw-r--r--drivers/iio/accel/st_accel_i2c.c5
-rw-r--r--drivers/iio/accel/st_accel_spi.c1
-rw-r--r--drivers/iio/adc/Kconfig12
-rw-r--r--drivers/iio/adc/Makefile1
-rw-r--r--drivers/iio/adc/ad7266.c8
-rw-r--r--drivers/iio/adc/ad7291.c3
-rw-r--r--drivers/iio/adc/ad7298.c3
-rw-r--r--drivers/iio/adc/ad7476.c14
-rw-r--r--drivers/iio/adc/ad7791.c37
-rw-r--r--drivers/iio/adc/ad7793.c33
-rw-r--r--drivers/iio/adc/ad7887.c14
-rw-r--r--drivers/iio/adc/ad7923.c14
-rw-r--r--drivers/iio/adc/ad799x.c29
-rw-r--r--drivers/iio/adc/bcm_iproc_adc.c644
-rw-r--r--drivers/iio/adc/cc10001_adc.c2
-rw-r--r--drivers/iio/adc/hi8435.c3
-rw-r--r--drivers/iio/adc/ina2xx-adc.c7
-rw-r--r--drivers/iio/adc/max1027.c1
-rw-r--r--drivers/iio/adc/max1363.c67
-rw-r--r--drivers/iio/adc/mcp320x.c1
-rw-r--r--drivers/iio/adc/mcp3422.c1
-rw-r--r--drivers/iio/adc/mxs-lradc.c34
-rw-r--r--drivers/iio/adc/nau7802.c20
-rw-r--r--drivers/iio/adc/ti-adc081c.c30
-rw-r--r--drivers/iio/adc/ti-adc0832.c1
-rw-r--r--drivers/iio/adc/ti-adc128s052.c1
-rw-r--r--drivers/iio/adc/ti-ads1015.c132
-rw-r--r--drivers/iio/adc/ti-ads8688.c1
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c22
-rw-r--r--drivers/iio/adc/vf610_adc.c3
-rw-r--r--drivers/iio/adc/xilinx-xadc-events.c4
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dma.c4
-rw-r--r--drivers/iio/chemical/Kconfig8
-rw-r--r--drivers/iio/chemical/atlas-ph-sensor.c269
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_buffer.c49
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c57
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_i2c.c4
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_trigger.c156
-rw-r--r--drivers/iio/dac/Kconfig9
-rw-r--r--drivers/iio/dac/ad5421.c6
-rw-r--r--drivers/iio/dac/ad5504.c2
-rw-r--r--drivers/iio/dac/ad5755.c188
-rw-r--r--drivers/iio/dac/stx104.c145
-rw-r--r--drivers/iio/dummy/Kconfig1
-rw-r--r--drivers/iio/dummy/iio_simple_dummy.c102
-rw-r--r--drivers/iio/dummy/iio_simple_dummy_buffer.c3
-rw-r--r--drivers/iio/dummy/iio_simple_dummy_events.c2
-rw-r--r--drivers/iio/gyro/bmg160_core.c138
-rw-r--r--drivers/iio/gyro/st_gyro_core.c12
-rw-r--r--drivers/iio/health/afe4403.c299
-rw-r--r--drivers/iio/health/afe4404.c308
-rw-r--r--drivers/iio/health/afe440x.h48
-rw-r--r--drivers/iio/humidity/Kconfig2
-rw-r--r--drivers/iio/humidity/am2315.c1
-rw-r--r--drivers/iio/humidity/htu21.c1
-rw-r--r--drivers/iio/iio_core.h3
-rw-r--r--drivers/iio/imu/bmi160/bmi160_core.c30
-rw-r--r--drivers/iio/imu/inv_mpu6050/Kconfig8
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c6
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c1
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h2
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c2
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c1
-rw-r--r--drivers/iio/industrialio-core.c180
-rw-r--r--drivers/iio/industrialio-event.c19
-rw-r--r--drivers/iio/industrialio-sw-device.c182
-rw-r--r--drivers/iio/industrialio-trigger.c37
-rw-r--r--drivers/iio/light/Kconfig3
-rw-r--r--drivers/iio/light/acpi-als.c2
-rw-r--r--drivers/iio/light/adjd_s311.c2
-rw-r--r--drivers/iio/light/apds9300.c2
-rw-r--r--drivers/iio/light/apds9960.c4
-rw-r--r--drivers/iio/light/cm36651.c2
-rw-r--r--drivers/iio/light/gp2ap020a00f.c26
-rw-r--r--drivers/iio/light/isl29125.c23
-rw-r--r--drivers/iio/light/jsa1212.c3
-rw-r--r--drivers/iio/light/lm3533-als.c2
-rw-r--r--drivers/iio/light/ltr501.c7
-rw-r--r--drivers/iio/light/max44000.c3
-rw-r--r--drivers/iio/light/opt3001.c4
-rw-r--r--drivers/iio/light/stk3310.c2
-rw-r--r--drivers/iio/light/tcs3414.c14
-rw-r--r--drivers/iio/light/tcs3472.c15
-rw-r--r--drivers/iio/light/tsl2563.c2
-rw-r--r--drivers/iio/light/us5182d.c2
-rw-r--r--drivers/iio/magnetometer/Kconfig2
-rw-r--r--drivers/iio/magnetometer/ak8975.c154
-rw-r--r--drivers/iio/magnetometer/bmc150_magn_i2c.c3
-rw-r--r--drivers/iio/magnetometer/bmc150_magn_spi.c3
-rw-r--r--drivers/iio/magnetometer/hmc5843_core.c2
-rw-r--r--drivers/iio/magnetometer/mag3110.c2
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c12
-rw-r--r--drivers/iio/potentiometer/Kconfig23
-rw-r--r--drivers/iio/potentiometer/Makefile1
-rw-r--r--drivers/iio/potentiometer/max5487.c161
-rw-r--r--drivers/iio/potentiometer/mcp4531.c159
-rw-r--r--drivers/iio/potentiometer/tpl0102.c4
-rw-r--r--drivers/iio/pressure/Kconfig31
-rw-r--r--drivers/iio/pressure/Makefile3
-rw-r--r--drivers/iio/pressure/bmp280-core.c1119
-rw-r--r--drivers/iio/pressure/bmp280-i2c.c91
-rw-r--r--drivers/iio/pressure/bmp280-regmap.c84
-rw-r--r--drivers/iio/pressure/bmp280-spi.c125
-rw-r--r--drivers/iio/pressure/bmp280.h112
-rw-r--r--drivers/iio/pressure/hp206c.c1
-rw-r--r--drivers/iio/pressure/mpl3115.c2
-rw-r--r--drivers/iio/pressure/ms5611_core.c3
-rw-r--r--drivers/iio/pressure/ms5637.c13
-rw-r--r--drivers/iio/pressure/st_pressure.h1
-rw-r--r--drivers/iio/pressure/st_pressure_core.c252
-rw-r--r--drivers/iio/pressure/st_pressure_i2c.c4
-rw-r--r--drivers/iio/pressure/st_pressure_spi.c1
-rw-r--r--drivers/iio/proximity/as3935.c12
-rw-r--r--drivers/iio/proximity/pulsedlight-lidar-lite-v2.c16
-rw-r--r--drivers/iio/proximity/sx9500.c4
-rw-r--r--drivers/iio/temperature/tsys02d.c1
-rw-r--r--drivers/iio/trigger/Kconfig12
-rw-r--r--drivers/iio/trigger/Makefile1
-rw-r--r--drivers/iio/trigger/iio-trig-loop.c143
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/core/cma.c116
-rw-r--r--drivers/infiniband/core/device.c9
-rw-r--r--drivers/infiniband/core/iwcm.c54
-rw-r--r--drivers/infiniband/core/iwcm.h2
-rw-r--r--drivers/infiniband/core/iwpm_util.c3
-rw-r--r--drivers/infiniband/core/multicast.c25
-rw-r--r--drivers/infiniband/core/netlink.c6
-rw-r--r--drivers/infiniband/core/sa_query.c41
-rw-r--r--drivers/infiniband/core/sysfs.c15
-rw-r--r--drivers/infiniband/core/ucma.c18
-rw-r--r--drivers/infiniband/core/umem.c7
-rw-r--r--drivers/infiniband/core/uverbs.h13
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c535
-rw-r--r--drivers/infiniband/core/uverbs_main.c38
-rw-r--r--drivers/infiniband/core/verbs.c163
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c4
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c27
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c193
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c52
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c7
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h25
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c127
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c31
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c40
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h5
-rw-r--r--drivers/infiniband/hw/hfi1/Kconfig3
-rw-r--r--drivers/infiniband/hw/hfi1/Makefile2
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c572
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.h38
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c388
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h6
-rw-r--r--drivers/infiniband/hw/hfi1/chip_registers.h4
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.c132
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c63
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c97
-rw-r--r--drivers/infiniband/hw/hfi1/firmware.c125
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h147
-rw-r--r--drivers/infiniband/hw/hfi1/init.c56
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c86
-rw-r--r--drivers/infiniband/hw/hfi1/mad.h7
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.c254
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.h37
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c68
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c21
-rw-r--r--drivers/infiniband/hw/hfi1/pio_copy.c12
-rw-r--r--drivers/infiniband/hw/hfi1/platform.c14
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c68
-rw-r--r--drivers/infiniband/hw/hfi1/qp.h4
-rw-r--r--drivers/infiniband/hw/hfi1/qsfp.c441
-rw-r--r--drivers/infiniband/hw/hfi1/qsfp.h6
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c90
-rw-r--r--drivers/infiniband/hw/hfi1/ruc.c57
-rw-r--r--drivers/infiniband/hw/hfi1/sysfs.c25
-rw-r--r--drivers/infiniband/hw/hfi1/trace.h1333
-rw-r--r--drivers/infiniband/hw/hfi1/trace_ctxts.h141
-rw-r--r--drivers/infiniband/hw/hfi1/trace_dbg.h155
-rw-r--r--drivers/infiniband/hw/hfi1/trace_ibhdrs.h209
-rw-r--r--drivers/infiniband/hw/hfi1/trace_misc.h81
-rw-r--r--drivers/infiniband/hw/hfi1/trace_rc.h123
-rw-r--r--drivers/infiniband/hw/hfi1/trace_rx.h322
-rw-r--r--drivers/infiniband/hw/hfi1/trace_tx.h642
-rw-r--r--drivers/infiniband/hw/hfi1/uc.c61
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c86
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.c124
-rw-r--r--drivers/infiniband/hw/hfi1/user_pages.c19
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c306
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.h8
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c89
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.h8
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.h2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h4
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c30
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_d.h3
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_hw.c1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c12
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.c4
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_type.h2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_uk.c29
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_user.h2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c80
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c44
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c23
-rw-r--r--drivers/infiniband/hw/mlx4/main.c225
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c14
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h11
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c48
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c109
-rw-r--r--drivers/infiniband/hw/mlx5/main.c468
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c6
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h75
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c4
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c704
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c112
-rw-r--r--drivers/infiniband/hw/mlx5/user.h88
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c24
-rw-r--r--drivers/infiniband/hw/mthca/mthca_reset.c42
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c33
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c14
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c19
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_sli.h12
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c26
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c43
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_main.c19
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_sysfs.c17
-rw-r--r--drivers/infiniband/sw/Makefile1
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c126
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.h2
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c249
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c10
-rw-r--r--drivers/infiniband/sw/rxe/Kconfig24
-rw-r--r--drivers/infiniband/sw/rxe/Makefile24
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c405
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h77
-rw-r--r--drivers/infiniband/sw/rxe/rxe_av.c98
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c747
-rw-r--r--drivers/infiniband/sw/rxe/rxe_cq.c165
-rw-r--r--drivers/infiniband/sw/rxe/rxe_dma.c166
-rw-r--r--drivers/infiniband/sw/rxe/rxe_hdr.h952
-rw-r--r--drivers/infiniband/sw/rxe/rxe_icrc.c96
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h286
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mcast.c190
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mmap.c173
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c643
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c703
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.h56
-rw-r--r--drivers/infiniband/sw/rxe/rxe_opcode.c961
-rw-r--r--drivers/infiniband/sw/rxe/rxe_opcode.h129
-rw-r--r--drivers/infiniband/sw/rxe/rxe_param.h172
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c502
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.h163
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c851
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.c217
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.h178
-rw-r--r--drivers/infiniband/sw/rxe/rxe_recv.c420
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c757
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c1381
-rw-r--r--drivers/infiniband/sw/rxe/rxe_srq.c193
-rw-r--r--drivers/infiniband/sw/rxe/rxe_sysfs.c157
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.c154
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.h95
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c1330
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h480
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c16
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c9
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c5
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c10
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c27
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h3
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c16
-rw-r--r--drivers/input/input-mt.c17
-rw-r--r--drivers/input/input.c7
-rw-r--r--drivers/input/joydev.c6
-rw-r--r--drivers/input/joystick/xpad.c43
-rw-r--r--drivers/input/keyboard/clps711x-keypad.c4
-rw-r--r--drivers/input/keyboard/cros_ec_keyb.c19
-rw-r--r--drivers/input/keyboard/tc3589x-keypad.c2
-rw-r--r--drivers/input/keyboard/tegra-kbc.c2
-rw-r--r--drivers/input/misc/Kconfig23
-rw-r--r--drivers/input/misc/Makefile2
-rw-r--r--drivers/input/misc/apanel.c2
-rw-r--r--drivers/input/misc/atmel_captouch.c290
-rw-r--r--drivers/input/misc/hisi_powerkey.c142
-rw-r--r--drivers/input/misc/regulator-haptic.c2
-rw-r--r--drivers/input/misc/rotary_encoder.c23
-rw-r--r--drivers/input/misc/xen-kbdfront.c8
-rw-r--r--drivers/input/mouse/elantech.c10
-rw-r--r--drivers/input/mouse/lifebook.c2
-rw-r--r--drivers/input/rmi4/rmi_bus.c5
-rw-r--r--drivers/input/rmi4/rmi_f01.c22
-rw-r--r--drivers/input/rmi4/rmi_f11.c9
-rw-r--r--drivers/input/rmi4/rmi_f12.c1
-rw-r--r--drivers/input/rmi4/rmi_i2c.c46
-rw-r--r--drivers/input/serio/ams_delta_serio.c2
-rw-r--r--drivers/input/tablet/Kconfig15
-rw-r--r--drivers/input/tablet/Makefile1
-rw-r--r--drivers/input/tablet/pegasus_notetaker.c450
-rw-r--r--drivers/input/touchscreen/Kconfig53
-rw-r--r--drivers/input/touchscreen/Makefile4
-rw-r--r--drivers/input/touchscreen/ad7879.c2
-rw-r--r--drivers/input/touchscreen/ads7846.c1
-rw-r--r--drivers/input/touchscreen/chipone_icn8318.c61
-rw-r--r--drivers/input/touchscreen/cyttsp_core.c2
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c7
-rw-r--r--drivers/input/touchscreen/ili210x.c2
-rw-r--r--drivers/input/touchscreen/migor_ts.c6
-rw-r--r--drivers/input/touchscreen/of_touchscreen.c81
-rw-r--r--drivers/input/touchscreen/pixcir_i2c_ts.c53
-rw-r--r--drivers/input/touchscreen/raydium_i2c_ts.c1238
-rw-r--r--drivers/input/touchscreen/silead.c566
-rw-r--r--drivers/input/touchscreen/sis_i2c.c413
-rw-r--r--drivers/input/touchscreen/sur40.c16
-rw-r--r--drivers/input/touchscreen/surface3_spi.c427
-rw-r--r--drivers/input/touchscreen/ti_am335x_tsc.c2
-rw-r--r--drivers/input/touchscreen/tsc200x-core.c2
-rw-r--r--drivers/input/touchscreen/wacom_w8001.c10
-rw-r--r--drivers/iommu/Kconfig21
-rw-r--r--drivers/iommu/Makefile3
-rw-r--r--drivers/iommu/amd_iommu.c1014
-rw-r--r--drivers/iommu/amd_iommu_types.h1
-rw-r--r--drivers/iommu/amd_iommu_v2.c5
-rw-r--r--drivers/iommu/arm-smmu-v3.c2
-rw-r--r--drivers/iommu/arm-smmu.c28
-rw-r--r--drivers/iommu/dma-iommu.c19
-rw-r--r--drivers/iommu/dmar.c21
-rw-r--r--drivers/iommu/exynos-iommu.c106
-rw-r--r--drivers/iommu/intel-iommu.c16
-rw-r--r--drivers/iommu/intel-svm.c2
-rw-r--r--drivers/iommu/iommu.c32
-rw-r--r--drivers/iommu/msm_iommu.c870
-rw-r--r--drivers/iommu/msm_iommu.h73
-rw-r--r--drivers/iommu/mtk_iommu.c49
-rw-r--r--drivers/iommu/mtk_iommu.h77
-rw-r--r--drivers/iommu/mtk_iommu_v1.c727
-rw-r--r--drivers/iommu/of_iommu.c5
-rw-r--r--drivers/iommu/rockchip-iommu.c181
-rw-r--r--drivers/irqchip/Kconfig24
-rw-r--r--drivers/irqchip/Makefile2
-rw-r--r--drivers/irqchip/exynos-combiner.c14
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c46
-rw-r--r--drivers/irqchip/irq-aspeed-vic.c230
-rw-r--r--drivers/irqchip/irq-bcm2835.c3
-rw-r--r--drivers/irqchip/irq-bcm2836.c40
-rw-r--r--drivers/irqchip/irq-bcm7120-l2.c10
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c4
-rw-r--r--drivers/irqchip/irq-clps711x.c2
-rw-r--r--drivers/irqchip/irq-gic-common.c4
-rw-r--r--drivers/irqchip/irq-gic-pm.c184
-rw-r--r--drivers/irqchip/irq-gic-v2m.c1
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c411
-rw-r--r--drivers/irqchip/irq-gic-v3.c40
-rw-r--r--drivers/irqchip/irq-gic.c164
-rw-r--r--drivers/irqchip/irq-hip04.c25
-rw-r--r--drivers/irqchip/irq-mips-gic.c5
-rw-r--r--drivers/irqchip/irq-omap-intc.c2
-rw-r--r--drivers/irqchip/irq-s3c24xx.c36
-rw-r--r--drivers/irqchip/irq-sirfsoc.c11
-rw-r--r--drivers/irqchip/irq-tegra.c4
-rw-r--r--drivers/irqchip/irq-vic.c5
-rw-r--r--drivers/isdn/hardware/eicon/divasmain.c12
-rw-r--r--drivers/isdn/hardware/eicon/platform.h6
-rw-r--r--drivers/leds/Kconfig14
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/led-triggers.c2
-rw-r--r--drivers/leds/leds-gpio.c5
-rw-r--r--drivers/leds/leds-hp6xx.c2
-rw-r--r--drivers/leds/leds-is31fl32xx.c24
-rw-r--r--drivers/leds/leds-lp3952.c301
-rw-r--r--drivers/leds/leds-pca9532.c75
-rw-r--r--drivers/leds/leds-powernv.c2
-rw-r--r--drivers/leds/trigger/Kconfig8
-rw-r--r--drivers/leds/trigger/Makefile2
-rw-r--r--drivers/leds/trigger/ledtrig-cpu.c32
-rw-r--r--drivers/leds/trigger/ledtrig-disk.c41
-rw-r--r--drivers/lightnvm/Kconfig10
-rw-r--r--drivers/lightnvm/core.c242
-rw-r--r--drivers/lightnvm/gennvm.c385
-rw-r--r--drivers/lightnvm/gennvm.h10
-rw-r--r--drivers/lightnvm/rrpc.c155
-rw-r--r--drivers/lightnvm/rrpc.h13
-rw-r--r--drivers/lightnvm/sysblk.c8
-rw-r--r--drivers/macintosh/Kconfig13
-rw-r--r--drivers/macintosh/ams/ams-i2c.c1
-rw-r--r--drivers/macintosh/smu.c9
-rw-r--r--drivers/macintosh/via-pmu-led.c4
-rw-r--r--drivers/macintosh/windfarm_pm112.c1
-rw-r--r--drivers/macintosh/windfarm_pm72.c1
-rw-r--r--drivers/macintosh/windfarm_rm31.c1
-rw-r--r--drivers/mailbox/Kconfig10
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/bcm-pdc-mailbox.c1532
-rw-r--r--drivers/mailbox/mailbox-test.c1
-rw-r--r--drivers/mailbox/pl320-ipc.c46
-rw-r--r--drivers/md/Makefile3
-rw-r--r--drivers/md/bcache/btree.c4
-rw-r--r--drivers/md/bcache/closure.c2
-rw-r--r--drivers/md/bcache/closure.h3
-rw-r--r--drivers/md/bcache/debug.c6
-rw-r--r--drivers/md/bcache/io.c3
-rw-r--r--drivers/md/bcache/journal.c9
-rw-r--r--drivers/md/bcache/movinggc.c2
-rw-r--r--drivers/md/bcache/request.c34
-rw-r--r--drivers/md/bcache/super.c48
-rw-r--r--drivers/md/bcache/writeback.c4
-rw-r--r--drivers/md/bcache/writeback.h2
-rw-r--r--drivers/md/bitmap.c53
-rw-r--r--drivers/md/dm-bufio.c11
-rw-r--r--drivers/md/dm-builtin.c2
-rw-r--r--drivers/md/dm-cache-target.c18
-rw-r--r--drivers/md/dm-core.h149
-rw-r--r--drivers/md/dm-crypt.c17
-rw-r--r--drivers/md/dm-era-target.c4
-rw-r--r--drivers/md/dm-flakey.c6
-rw-r--r--drivers/md/dm-io.c65
-rw-r--r--drivers/md/dm-ioctl.c31
-rw-r--r--drivers/md/dm-kcopyd.c13
-rw-r--r--drivers/md/dm-linear.c21
-rw-r--r--drivers/md/dm-log-writes.c19
-rw-r--r--drivers/md/dm-log.c14
-rw-r--r--drivers/md/dm-mpath.c374
-rw-r--r--drivers/md/dm-raid.c3038
-rw-r--r--drivers/md/dm-raid1.c32
-rw-r--r--drivers/md/dm-region-hash.c6
-rw-r--r--drivers/md/dm-rq.c988
-rw-r--r--drivers/md/dm-rq.h64
-rw-r--r--drivers/md/dm-snap-persistent.c24
-rw-r--r--drivers/md/dm-snap.c27
-rw-r--r--drivers/md/dm-stats.c11
-rw-r--r--drivers/md/dm-stripe.c34
-rw-r--r--drivers/md/dm-sysfs.c3
-rw-r--r--drivers/md/dm-table.c114
-rw-r--r--drivers/md/dm-target.c11
-rw-r--r--drivers/md/dm-thin-metadata.c30
-rw-r--r--drivers/md/dm-thin-metadata.h3
-rw-r--r--drivers/md/dm-thin.c124
-rw-r--r--drivers/md/dm-zero.c15
-rw-r--r--drivers/md/dm.c1272
-rw-r--r--drivers/md/dm.h36
-rw-r--r--drivers/md/linear.c4
-rw-r--r--drivers/md/md-cluster.c12
-rw-r--r--drivers/md/md.c122
-rw-r--r--drivers/md/md.h15
-rw-r--r--drivers/md/multipath.c37
-rw-r--r--drivers/md/persistent-data/dm-btree.c9
-rw-r--r--drivers/md/raid0.c4
-rw-r--r--drivers/md/raid1.c173
-rw-r--r--drivers/md/raid10.c320
-rw-r--r--drivers/md/raid10.h3
-rw-r--r--drivers/md/raid5-cache.c79
-rw-r--r--drivers/md/raid5.c183
-rw-r--r--drivers/media/Kconfig3
-rw-r--r--drivers/media/Makefile4
-rw-r--r--drivers/media/cec-edid.c171
-rw-r--r--drivers/media/common/v4l2-tpg/v4l2-tpg-core.c4
-rw-r--r--drivers/media/dvb-core/demux.h167
-rw-r--r--drivers/media/dvb-core/dmxdev.c2
-rw-r--r--drivers/media/dvb-core/dvb_ca_en50221.c39
-rw-r--r--drivers/media/dvb-core/dvb_demux.c17
-rw-r--r--drivers/media/dvb-core/dvb_demux.h4
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c33
-rw-r--r--drivers/media/dvb-core/dvb_frontend.h53
-rw-r--r--drivers/media/dvb-core/dvb_math.h7
-rw-r--r--drivers/media/dvb-core/dvb_net.c2
-rw-r--r--drivers/media/dvb-core/dvb_ringbuffer.h15
-rw-r--r--drivers/media/dvb-frontends/Kconfig15
-rw-r--r--drivers/media/dvb-frontends/Makefile2
-rw-r--r--drivers/media/dvb-frontends/af9033.c327
-rw-r--r--drivers/media/dvb-frontends/ascot2e.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2841er.c1937
-rw-r--r--drivers/media/dvb-frontends/cxd2841er.h24
-rw-r--r--drivers/media/dvb-frontends/cxd2841er_priv.h1
-rw-r--r--drivers/media/dvb-frontends/dib0090.c6
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drxj.c3
-rw-r--r--drivers/media/dvb-frontends/ds3000.c9
-rw-r--r--drivers/media/dvb-frontends/helene.c1042
-rw-r--r--drivers/media/dvb-frontends/helene.h79
-rw-r--r--drivers/media/dvb-frontends/horus3a.c26
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.c144
-rw-r--r--drivers/media/dvb-frontends/m88ds3103_priv.h3
-rw-r--r--drivers/media/dvb-frontends/m88rs2000.c2
-rw-r--r--drivers/media/dvb-frontends/mb86a20s.c3
-rw-r--r--drivers/media/dvb-frontends/mn88472.c613
-rw-r--r--drivers/media/dvb-frontends/mn88472.h45
-rw-r--r--drivers/media/dvb-frontends/mn88472_priv.h38
-rw-r--r--drivers/media/dvb-frontends/mn88473.c7
-rw-r--r--drivers/media/dvb-frontends/rtl2830.c203
-rw-r--r--drivers/media/dvb-frontends/rtl2830_priv.h2
-rw-r--r--drivers/media/dvb-frontends/rtl2832.c26
-rw-r--r--drivers/media/dvb-frontends/rtl2832_priv.h1
-rw-r--r--drivers/media/dvb-frontends/rtl2832_sdr.c2
-rw-r--r--drivers/media/dvb-frontends/si2168.c127
-rw-r--r--drivers/media/dvb-frontends/si2168_priv.h8
-rw-r--r--drivers/media/i2c/Kconfig24
-rw-r--r--drivers/media/i2c/adv7180.c18
-rw-r--r--drivers/media/i2c/adv7511.c465
-rw-r--r--drivers/media/i2c/adv7604.c393
-rw-r--r--drivers/media/i2c/adv7842.c413
-rw-r--r--drivers/media/i2c/cs53l32a.c7
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.c7
-rw-r--r--drivers/media/i2c/msp3400-driver.c7
-rw-r--r--drivers/media/i2c/mt9t001.c17
-rw-r--r--drivers/media/i2c/mt9v032.c279
-rw-r--r--drivers/media/i2c/saa7115.c7
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c4
-rw-r--r--drivers/media/i2c/tc358743.c15
-rw-r--r--drivers/media/i2c/tvaudio.c7
-rw-r--r--drivers/media/i2c/wm8775.c7
-rw-r--r--drivers/media/media-device.c47
-rw-r--r--drivers/media/media-devnode.c149
-rw-r--r--drivers/media/pci/bt8xx/dst_ca.c2
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.c11
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.h1
-rw-r--r--drivers/media/pci/cobalt/cobalt-v4l2.c4
-rw-r--r--drivers/media/pci/cx18/cx18-alsa-mixer.c6
-rw-r--r--drivers/media/pci/cx18/cx18-driver.c2
-rw-r--r--drivers/media/pci/cx18/cx18-driver.h6
-rw-r--r--drivers/media/pci/cx18/cx18-ioctl.c2
-rw-r--r--drivers/media/pci/cx18/cx18-streams.c12
-rw-r--r--drivers/media/pci/cx18/cx18-vbi.c6
-rw-r--r--drivers/media/pci/cx23885/cx23885-417.c4
-rw-r--r--drivers/media/pci/cx23885/cx23885-cards.c59
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c10
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c104
-rw-r--r--drivers/media/pci/cx23885/cx23885-vbi.c3
-rw-r--r--drivers/media/pci/cx23885/cx23885-video.c5
-rw-r--r--drivers/media/pci/cx23885/cx23885.h2
-rw-r--r--drivers/media/pci/cx25821/cx25821-alsa.c2
-rw-r--r--drivers/media/pci/cx25821/cx25821-core.c10
-rw-r--r--drivers/media/pci/cx25821/cx25821-video.c5
-rw-r--r--drivers/media/pci/cx25821/cx25821.h1
-rw-r--r--drivers/media/pci/cx88/cx88-alsa.c8
-rw-r--r--drivers/media/pci/cx88/cx88-blackbird.c4
-rw-r--r--drivers/media/pci/cx88/cx88-dvb.c4
-rw-r--r--drivers/media/pci/cx88/cx88-mpeg.c10
-rw-r--r--drivers/media/pci/cx88/cx88-vbi.c3
-rw-r--r--drivers/media/pci/cx88/cx88-video.c13
-rw-r--r--drivers/media/pci/cx88/cx88.h2
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-core.c3
-rw-r--r--drivers/media/pci/dt3155/dt3155.c15
-rw-r--r--drivers/media/pci/dt3155/dt3155.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa-mixer.c6
-rw-r--r--drivers/media/pci/netup_unidvb/Kconfig7
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb.h10
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_ci.c4
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_core.c174
-rw-r--r--drivers/media/pci/saa7134/saa7134-core.c22
-rw-r--r--drivers/media/pci/saa7134/saa7134-dvb.c1
-rw-r--r--drivers/media/pci/saa7134/saa7134-empress.c1
-rw-r--r--drivers/media/pci/saa7134/saa7134-ts.c3
-rw-r--r--drivers/media/pci/saa7134/saa7134-vbi.c3
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c5
-rw-r--r--drivers/media/pci/saa7134/saa7134.h3
-rw-r--r--drivers/media/pci/saa7164/saa7164-encoder.c6
-rw-r--r--drivers/media/pci/saa7164/saa7164.h4
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c15
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-v4l2.c44
-rw-r--r--drivers/media/pci/solo6x10/solo6x10.h2
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.c20
-rw-r--r--drivers/media/pci/tw68/tw68-core.c15
-rw-r--r--drivers/media/pci/tw68/tw68-video.c4
-rw-r--r--drivers/media/pci/tw68/tw68.h1
-rw-r--r--drivers/media/pci/tw686x/Kconfig2
-rw-r--r--drivers/media/pci/tw686x/tw686x-audio.c92
-rw-r--r--drivers/media/pci/tw686x/tw686x-core.c56
-rw-r--r--drivers/media/pci/tw686x/tw686x-regs.h9
-rw-r--r--drivers/media/pci/tw686x/tw686x-video.c595
-rw-r--r--drivers/media/pci/tw686x/tw686x.h42
-rw-r--r--drivers/media/pci/zoran/zr36016.c4
-rw-r--r--drivers/media/platform/Kconfig45
-rw-r--r--drivers/media/platform/Makefile7
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.c14
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.h2
-rw-r--r--drivers/media/platform/blackfin/bfin_capture.c17
-rw-r--r--drivers/media/platform/coda/coda-common.c20
-rw-r--r--drivers/media/platform/coda/coda.h1
-rw-r--r--drivers/media/platform/davinci/ccdc_hw_device.h7
-rw-r--r--drivers/media/platform/davinci/vpbe_display.c14
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c15
-rw-r--r--drivers/media/platform/davinci/vpif_capture.h2
-rw-r--r--drivers/media/platform/davinci/vpif_display.c15
-rw-r--r--drivers/media/platform/davinci/vpif_display.h2
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.c12
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.h2
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-m2m.c8
-rw-r--r--drivers/media/platform/exynos4-is/fimc-capture.c9
-rw-r--r--drivers/media/platform/exynos4-is/fimc-core.c12
-rw-r--r--drivers/media/platform/exynos4-is/fimc-core.h3
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is.c15
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is.h2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp-video.c11
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp.h2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite.c22
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite.h2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-m2m.c32
-rw-r--r--drivers/media/platform/exynos4-is/mipi-csis.c17
-rw-r--r--drivers/media/platform/m2m-deinterlace.c17
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.c28
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.h2
-rw-r--r--drivers/media/platform/mtk-vcodec/Makefile19
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h334
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c1302
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.h58
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c439
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c137
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.h26
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.c54
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.h26
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c94
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h87
-rw-r--r--drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c679
-rw-r--r--drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c484
-rw-r--r--drivers/media/platform/mtk-vcodec/venc_drv_base.h62
-rw-r--r--drivers/media/platform/mtk-vcodec/venc_drv_if.c113
-rw-r--r--drivers/media/platform/mtk-vcodec/venc_drv_if.h163
-rw-r--r--drivers/media/platform/mtk-vcodec/venc_ipi_msg.h210
-rw-r--r--drivers/media/platform/mtk-vcodec/venc_vpu_if.c238
-rw-r--r--drivers/media/platform/mtk-vcodec/venc_vpu_if.h61
-rw-r--r--drivers/media/platform/mtk-vpu/Makefile3
-rw-r--r--drivers/media/platform/mtk-vpu/mtk_vpu.c946
-rw-r--r--drivers/media/platform/mtk-vpu/mtk_vpu.h162
-rw-r--r--drivers/media/platform/mx2_emmaprp.c19
-rw-r--r--drivers/media/platform/omap/omap_vout.c111
-rw-r--r--drivers/media/platform/omap/omap_voutdef.h7
-rw-r--r--drivers/media/platform/omap/omap_voutlib.c2
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c14
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.h1
-rw-r--r--drivers/media/platform/rcar-fcp.c187
-rw-r--r--drivers/media/platform/rcar-vin/Kconfig11
-rw-r--r--drivers/media/platform/rcar-vin/Makefile3
-rw-r--r--drivers/media/platform/rcar-vin/rcar-core.c334
-rw-r--r--drivers/media/platform/rcar-vin/rcar-dma.c1187
-rw-r--r--drivers/media/platform/rcar-vin/rcar-v4l2.c874
-rw-r--r--drivers/media/platform/rcar-vin/rcar-vin.h163
-rw-r--r--drivers/media/platform/rcar_jpu.c24
-rw-r--r--drivers/media/platform/s3c-camif/camif-capture.c5
-rw-r--r--drivers/media/platform/s3c-camif/camif-core.c11
-rw-r--r--drivers/media/platform/s3c-camif/camif-core.h2
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c17
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.h1
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c21
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.h2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c230
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_common.h4
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_dec.c28
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c35
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_iommu.h79
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_pm.c13
-rw-r--r--drivers/media/platform/s5p-tv/mixer.h2
-rw-r--r--drivers/media/platform/s5p-tv/mixer_video.c19
-rw-r--r--drivers/media/platform/sh_veu.c19
-rw-r--r--drivers/media/platform/sh_vou.c16
-rw-r--r--drivers/media/platform/soc_camera/Kconfig4
-rw-r--r--drivers/media/platform/soc_camera/Makefile2
-rw-r--r--drivers/media/platform/soc_camera/atmel-isi.c15
-rw-r--r--drivers/media/platform/soc_camera/rcar_vin.c14
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c17
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-filter.h304
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-hw.c331
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-v4l2.c18
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp.h2
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c1
-rw-r--r--drivers/media/platform/ti-vpe/cal.c17
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c22
-rw-r--r--drivers/media/platform/via-camera.c2
-rw-r--r--drivers/media/platform/vim2m.c22
-rw-r--r--drivers/media/platform/vivid/Kconfig8
-rw-r--r--drivers/media/platform/vivid/Makefile4
-rw-r--r--drivers/media/platform/vivid/vivid-cec.c231
-rw-r--r--drivers/media/platform/vivid/vivid-cec.h33
-rw-r--r--drivers/media/platform/vivid/vivid-core.c118
-rw-r--r--drivers/media/platform/vivid/vivid-core.h27
-rw-r--r--drivers/media/platform/vivid/vivid-kthread-cap.c13
-rw-r--r--drivers/media/platform/vivid/vivid-sdr-cap.c4
-rw-r--r--drivers/media/platform/vivid/vivid-vbi-cap.c2
-rw-r--r--drivers/media/platform/vivid/vivid-vbi-out.c2
-rw-r--r--drivers/media/platform/vivid/vivid-vid-cap.c34
-rw-r--r--drivers/media/platform/vivid/vivid-vid-common.c7
-rw-r--r--drivers/media/platform/vivid/vivid-vid-out.c7
-rw-r--r--drivers/media/platform/vsp1/Makefile3
-rw-r--r--drivers/media/platform/vsp1/vsp1.h11
-rw-r--r--drivers/media/platform/vsp1/vsp1_bru.c12
-rw-r--r--drivers/media/platform/vsp1/vsp1_clu.c292
-rw-r--r--drivers/media/platform/vsp1/vsp1_clu.h48
-rw-r--r--drivers/media/platform/vsp1/vsp1_dl.c72
-rw-r--r--drivers/media/platform/vsp1/vsp1_drm.c74
-rw-r--r--drivers/media/platform/vsp1/vsp1_drv.c191
-rw-r--r--drivers/media/platform/vsp1/vsp1_entity.c92
-rw-r--r--drivers/media/platform/vsp1/vsp1_entity.h17
-rw-r--r--drivers/media/platform/vsp1/vsp1_hsit.c14
-rw-r--r--drivers/media/platform/vsp1/vsp1_lif.c16
-rw-r--r--drivers/media/platform/vsp1/vsp1_lut.c101
-rw-r--r--drivers/media/platform/vsp1/vsp1_lut.h7
-rw-r--r--drivers/media/platform/vsp1/vsp1_pipe.c58
-rw-r--r--drivers/media/platform/vsp1/vsp1_pipe.h8
-rw-r--r--drivers/media/platform/vsp1/vsp1_regs.h24
-rw-r--r--drivers/media/platform/vsp1/vsp1_rpf.c38
-rw-r--r--drivers/media/platform/vsp1/vsp1_rwpf.c6
-rw-r--r--drivers/media/platform/vsp1/vsp1_rwpf.h14
-rw-r--r--drivers/media/platform/vsp1/vsp1_sru.c14
-rw-r--r--drivers/media/platform/vsp1/vsp1_uds.c16
-rw-r--r--drivers/media/platform/vsp1/vsp1_uds.h2
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.c40
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.h2
-rw-r--r--drivers/media/platform/vsp1/vsp1_wpf.c161
-rw-r--r--drivers/media/platform/xilinx/xilinx-dma.c13
-rw-r--r--drivers/media/platform/xilinx/xilinx-dma.h2
-rw-r--r--drivers/media/radio/radio-aztech.c1
-rw-r--r--drivers/media/radio/radio-maxiradio.c1
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.c2
-rw-r--r--drivers/media/rc/Kconfig2
-rw-r--r--drivers/media/rc/ene_ir.c2
-rw-r--r--drivers/media/rc/iguanair.c2
-rw-r--r--drivers/media/rc/ir-lirc-codec.c5
-rw-r--r--drivers/media/rc/ir-rx51.c229
-rw-r--r--drivers/media/rc/keymaps/Makefile2
-rw-r--r--drivers/media/rc/keymaps/rc-cec.c182
-rw-r--r--drivers/media/rc/keymaps/rc-dtt200u.c59
-rw-r--r--drivers/media/rc/lirc_dev.c299
-rw-r--r--drivers/media/rc/mceusb.c8
-rw-r--r--drivers/media/rc/nuvoton-cir.c137
-rw-r--r--drivers/media/rc/nuvoton-cir.h25
-rw-r--r--drivers/media/rc/rc-main.c11
-rw-r--r--drivers/media/rc/redrat3.c84
-rw-r--r--drivers/media/rc/winbond-cir.c4
-rw-r--r--drivers/media/tuners/it913x.c1
-rw-r--r--drivers/media/tuners/mt2063.c30
-rw-r--r--drivers/media/tuners/r820t.c29
-rw-r--r--drivers/media/tuners/si2157.c3
-rw-r--r--drivers/media/usb/airspy/airspy.c2
-rw-r--r--drivers/media/usb/au0828/au0828-core.c4
-rw-r--r--drivers/media/usb/au0828/au0828-vbi.c2
-rw-r--r--drivers/media/usb/au0828/au0828-video.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-417.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/Kconfig13
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c275
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.h3
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c2
-rw-r--r--drivers/media/usb/dvb-usb/dtt200u.c74
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-dvb.c2
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c48
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c11
-rw-r--r--drivers/media/usb/em28xx/em28xx-i2c.c5
-rw-r--r--drivers/media/usb/em28xx/em28xx-vbi.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c2
-rw-r--r--drivers/media/usb/go7007/go7007-v4l2.c2
-rw-r--r--drivers/media/usb/gspca/cpia1.c2
-rw-r--r--drivers/media/usb/gspca/gspca.c29
-rw-r--r--drivers/media/usb/gspca/konica.c2
-rw-r--r--drivers/media/usb/gspca/m5602/m5602_bridge.h15
-rw-r--r--drivers/media/usb/gspca/m5602/m5602_core.c15
-rw-r--r--drivers/media/usb/gspca/m5602/m5602_mt9m111.c144
-rw-r--r--drivers/media/usb/gspca/m5602/m5602_mt9m111.h144
-rw-r--r--drivers/media/usb/gspca/m5602/m5602_ov7660.c153
-rw-r--r--drivers/media/usb/gspca/m5602/m5602_ov7660.h153
-rw-r--r--drivers/media/usb/gspca/m5602/m5602_ov9650.c152
-rw-r--r--drivers/media/usb/gspca/m5602/m5602_ov9650.h150
-rw-r--r--drivers/media/usb/gspca/m5602/m5602_po1030.c104
-rw-r--r--drivers/media/usb/gspca/m5602/m5602_po1030.h104
-rw-r--r--drivers/media/usb/gspca/m5602/m5602_s5k4aa.c199
-rw-r--r--drivers/media/usb/gspca/m5602/m5602_s5k4aa.h197
-rw-r--r--drivers/media/usb/gspca/m5602/m5602_s5k83a.c124
-rw-r--r--drivers/media/usb/gspca/m5602/m5602_s5k83a.h124
-rw-r--r--drivers/media/usb/gspca/ov534.c7
-rw-r--r--drivers/media/usb/gspca/sn9c20x.c14
-rw-r--r--drivers/media/usb/gspca/t613.c2
-rw-r--r--drivers/media/usb/gspca/topro.c6
-rw-r--r--drivers/media/usb/gspca/zc3xx.c13
-rw-r--r--drivers/media/usb/hackrf/hackrf.c2
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-core.c10
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-video.c6
-rw-r--r--drivers/media/usb/hdpvr/hdpvr.h2
-rw-r--r--drivers/media/usb/msi2500/msi2500.c2
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.c6
-rw-r--r--drivers/media/usb/pwc/pwc-if.c4
-rw-r--r--drivers/media/usb/s2255/s2255drv.c2
-rw-r--r--drivers/media/usb/stk1160/stk1160-v4l.c5
-rw-r--r--drivers/media/usb/usbtv/usbtv-audio.c28
-rw-r--r--drivers/media/usb/usbtv/usbtv-core.c40
-rw-r--r--drivers/media/usb/usbtv/usbtv-video.c61
-rw-r--r--drivers/media/usb/usbtv/usbtv.h22
-rw-r--r--drivers/media/usb/usbvision/usbvision-core.c5
-rw-r--r--drivers/media/usb/usbvision/usbvision-video.c40
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c2
-rw-r--r--drivers/media/usb/uvc/uvc_queue.c2
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c19
-rw-r--r--drivers/media/usb/uvc/uvc_video.c1
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c45
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c34
-rw-r--r--drivers/media/v4l2-core/v4l2-flash-led-class.c9
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c10
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c30
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-contig.c112
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-sg.c64
-rw-r--r--drivers/media/v4l2-core/videobuf2-v4l2.c47
-rw-r--r--drivers/media/v4l2-core/videobuf2-vmalloc.c9
-rw-r--r--drivers/memory/Kconfig13
-rw-r--r--drivers/memory/Makefile1
-rw-r--r--drivers/memory/atmel-ebi.c766
-rw-r--r--drivers/memory/atmel-sdramc.c11
-rw-r--r--drivers/memory/fsl_ifc.c4
-rw-r--r--drivers/memory/mtk-smi.c167
-rw-r--r--drivers/memory/omap-gpmc.c139
-rw-r--r--drivers/memory/samsung/exynos-srom.c41
-rw-r--r--drivers/memory/tegra/mc.c10
-rw-r--r--drivers/memory/tegra/tegra124-emc.c8
-rw-r--r--drivers/memstick/core/ms_block.c23
-rw-r--r--drivers/memstick/core/mspro_block.c6
-rw-r--r--drivers/mfd/Kconfig25
-rw-r--r--drivers/mfd/Makefile2
-rw-r--r--drivers/mfd/ab8500-core.c4
-rw-r--r--drivers/mfd/ab8500-sysctrl.c34
-rw-r--r--drivers/mfd/altera-a10sr.c169
-rw-r--r--drivers/mfd/arizona-core.c4
-rw-r--r--drivers/mfd/arizona-irq.c16
-rw-r--r--drivers/mfd/atmel-hlcdc.c5
-rw-r--r--drivers/mfd/axp20x.c17
-rw-r--r--drivers/mfd/db8500-prcmu.c10
-rw-r--r--drivers/mfd/dm355evm_msp.c7
-rw-r--r--drivers/mfd/hi655x-pmic.c59
-rw-r--r--drivers/mfd/kempld-core.c16
-rw-r--r--drivers/mfd/max14577.c2
-rw-r--r--drivers/mfd/max77620.c75
-rw-r--r--drivers/mfd/max77843.c24
-rw-r--r--drivers/mfd/max8925-i2c.c14
-rw-r--r--drivers/mfd/max8997.c30
-rw-r--r--drivers/mfd/max8998.c27
-rw-r--r--drivers/mfd/omap-usb-tll.c2
-rw-r--r--drivers/mfd/qcom_rpm.c2
-rw-r--r--drivers/mfd/rn5t618.c70
-rw-r--r--drivers/mfd/rtsx_usb.c10
-rw-r--r--drivers/mfd/si476x-i2c.c2
-rw-r--r--drivers/mfd/smsc-ece1099.c11
-rw-r--r--drivers/mfd/stmpe.c40
-rw-r--r--drivers/mfd/ti_am335x_tscadc.c135
-rw-r--r--drivers/mfd/tps6507x.c4
-rw-r--r--drivers/mfd/twl-core.c28
-rw-r--r--drivers/mfd/twl6040.c41
-rw-r--r--drivers/misc/Kconfig10
-rw-r--r--drivers/misc/Makefile16
-rw-r--r--drivers/misc/cxl/Kconfig17
-rw-r--r--drivers/misc/cxl/Makefile2
-rw-r--r--drivers/misc/cxl/api.c168
-rw-r--r--drivers/misc/cxl/base.c104
-rw-r--r--drivers/misc/cxl/context.c5
-rw-r--r--drivers/misc/cxl/cxl.h87
-rw-r--r--drivers/misc/cxl/debugfs.c35
-rw-r--r--drivers/misc/cxl/file.c64
-rw-r--r--drivers/misc/cxl/flash.c4
-rw-r--r--drivers/misc/cxl/guest.c26
-rw-r--r--drivers/misc/cxl/irq.c32
-rw-r--r--drivers/misc/cxl/main.c5
-rw-r--r--drivers/misc/cxl/native.c209
-rw-r--r--drivers/misc/cxl/pci.c492
-rw-r--r--drivers/misc/cxl/phb.c44
-rw-r--r--drivers/misc/cxl/vphb.c90
-rw-r--r--drivers/misc/eeprom/at24.c498
-rw-r--r--drivers/misc/genwqe/card_base.c18
-rw-r--r--drivers/misc/lkdtm.h60
-rw-r--r--drivers/misc/lkdtm_bugs.c148
-rw-r--r--drivers/misc/lkdtm_core.c544
-rw-r--r--drivers/misc/lkdtm_heap.c142
-rw-r--r--drivers/misc/lkdtm_perms.c199
-rw-r--r--drivers/misc/lkdtm_rodata.c10
-rw-r--r--drivers/misc/lkdtm_usercopy.c322
-rw-r--r--drivers/misc/mei/hbm.c137
-rw-r--r--drivers/misc/mei/mei_dev.h10
-rw-r--r--drivers/misc/mic/Kconfig4
-rw-r--r--drivers/misc/mic/host/mic_boot.c20
-rw-r--r--drivers/misc/ti-st/st_core.c2
-rw-r--r--drivers/mmc/card/block.c42
-rw-r--r--drivers/mmc/card/queue.c11
-rw-r--r--drivers/mmc/card/queue.h8
-rw-r--r--drivers/mmc/core/bus.c3
-rw-r--r--drivers/mmc/core/core.c95
-rw-r--r--drivers/mmc/core/debugfs.c3
-rw-r--r--drivers/mmc/core/host.c8
-rw-r--r--drivers/mmc/core/mmc.c253
-rw-r--r--drivers/mmc/core/mmc_ops.c27
-rw-r--r--drivers/mmc/core/quirks.c2
-rw-r--r--drivers/mmc/core/sd.c19
-rw-r--r--drivers/mmc/host/Kconfig22
-rw-r--r--drivers/mmc/host/Makefile2
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c2
-rw-r--r--drivers/mmc/host/dw_mmc-k3.c7
-rw-r--r--drivers/mmc/host/dw_mmc-rockchip.c11
-rw-r--r--drivers/mmc/host/dw_mmc.c118
-rw-r--r--drivers/mmc/host/dw_mmc.h11
-rw-r--r--drivers/mmc/host/jz4740_mmc.c2
-rw-r--r--drivers/mmc/host/mtk-sd.c70
-rw-r--r--drivers/mmc/host/mxcmmc.c2
-rw-r--r--drivers/mmc/host/omap.c18
-rw-r--r--drivers/mmc/host/omap_hsmmc.c16
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c16
-rw-r--r--drivers/mmc/host/s3cmci.c2
-rw-r--r--drivers/mmc/host/s3cmci.h2
-rw-r--r--drivers/mmc/host/sdhci-acpi.c11
-rw-r--r--drivers/mmc/host/sdhci-bcm-kona.c8
-rw-r--r--drivers/mmc/host/sdhci-brcmstb.c143
-rw-r--r--drivers/mmc/host/sdhci-cns3xxx.c2
-rw-r--r--drivers/mmc/host/sdhci-dove.c2
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c153
-rw-r--r--drivers/mmc/host/sdhci-iproc.c15
-rw-r--r--drivers/mmc/host/sdhci-msm.c117
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c332
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c2
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c16
-rw-r--r--drivers/mmc/host/sdhci-of-hlwd.c2
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c83
-rw-r--r--drivers/mmc/host/sdhci-pci.h2
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c13
-rw-r--r--drivers/mmc/host/sdhci-pltfm.h7
-rw-r--r--drivers/mmc/host/sdhci-pxav2.c2
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c9
-rw-r--r--drivers/mmc/host/sdhci-s3c.c9
-rw-r--r--drivers/mmc/host/sdhci-sirf.c4
-rw-r--r--drivers/mmc/host/sdhci-st.c2
-rw-r--r--drivers/mmc/host/sdhci-tegra.c51
-rw-r--r--drivers/mmc/host/sdhci.c745
-rw-r--r--drivers/mmc/host/sdhci.h30
-rw-r--r--drivers/mmc/host/sdhci_f_sdh30.c2
-rw-r--r--drivers/mmc/host/sh_mmcif.c53
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c14
-rw-r--r--drivers/mmc/host/tmio_mmc.h2
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c2
-rw-r--r--drivers/mtd/bcm47xxpart.c2
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c2
-rw-r--r--drivers/mtd/devices/Kconfig18
-rw-r--r--drivers/mtd/devices/m25p80.c37
-rw-r--r--drivers/mtd/devices/powernv_flash.c2
-rw-r--r--drivers/mtd/maps/physmap_of.c2
-rw-r--r--drivers/mtd/mtd_blkdevs.c8
-rw-r--r--drivers/mtd/nand/Kconfig10
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/brcmnand/brcmnand.c173
-rw-r--r--drivers/mtd/nand/davinci_nand.c3
-rw-r--r--drivers/mtd/nand/jz4780_bch.c2
-rw-r--r--drivers/mtd/nand/jz4780_nand.c2
-rw-r--r--drivers/mtd/nand/mtk_ecc.c534
-rw-r--r--drivers/mtd/nand/mtk_ecc.h50
-rw-r--r--drivers/mtd/nand/mtk_nand.c1529
-rw-r--r--drivers/mtd/nand/nand_ids.c1
-rw-r--r--drivers/mtd/nand/omap2.c13
-rw-r--r--drivers/mtd/nand/sunxi_nand.c397
-rw-r--r--drivers/mtd/nand/xway_nand.c231
-rw-r--r--drivers/mtd/onenand/onenand_base.c4
-rw-r--r--drivers/mtd/spi-nor/Kconfig27
-rw-r--r--drivers/mtd/spi-nor/Makefile3
-rw-r--r--drivers/mtd/spi-nor/atmel-quadspi.c732
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c1299
-rw-r--r--drivers/mtd/spi-nor/fsl-quadspi.c29
-rw-r--r--drivers/mtd/spi-nor/hisi-sfc.c489
-rw-r--r--drivers/mtd/spi-nor/mtk-quadspi.c43
-rw-r--r--drivers/mtd/spi-nor/nxp-spifi.c25
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c125
-rw-r--r--drivers/mtd/ssfdc.c3
-rw-r--r--drivers/mtd/tests/nandbiterrs.c2
-rw-r--r--drivers/mtd/ubi/attach.c141
-rw-r--r--drivers/mtd/ubi/fastmap.c65
-rw-r--r--drivers/mtd/ubi/gluebi.c5
-rw-r--r--drivers/mtd/ubi/io.c2
-rw-r--r--drivers/mtd/ubi/ubi.h46
-rw-r--r--drivers/mtd/ubi/wl.c41
-rw-r--r--drivers/net/bonding/bond_main.c26
-rw-r--r--drivers/net/caif/Kconfig2
-rw-r--r--drivers/net/caif/caif_hsi.c5
-rw-r--r--drivers/net/caif/caif_spi.c4
-rw-r--r--drivers/net/can/Kconfig11
-rw-r--r--drivers/net/can/Makefile2
-rw-r--r--drivers/net/can/dev.c167
-rw-r--r--drivers/net/can/rcar/Kconfig21
-rw-r--r--drivers/net/can/rcar/Makefile6
-rw-r--r--drivers/net/can/rcar/rcar_can.c929
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c1858
-rw-r--r--drivers/net/can/sja1000/tscan1.c12
-rw-r--r--drivers/net/can/slcan.c4
-rw-r--r--drivers/net/can/spi/mcp251x.c7
-rw-r--r--drivers/net/can/usb/gs_usb.c141
-rw-r--r--drivers/net/dsa/Kconfig12
-rw-r--r--drivers/net/dsa/Makefile4
-rw-r--r--drivers/net/dsa/b53/Kconfig33
-rw-r--r--drivers/net/dsa/b53/Makefile6
-rw-r--r--drivers/net/dsa/b53/b53_common.c1799
-rw-r--r--drivers/net/dsa/b53/b53_mdio.c392
-rw-r--r--drivers/net/dsa/b53/b53_mmap.c273
-rw-r--r--drivers/net/dsa/b53/b53_priv.h388
-rw-r--r--drivers/net/dsa/b53/b53_regs.h434
-rw-r--r--drivers/net/dsa/b53/b53_spi.c331
-rw-r--r--drivers/net/dsa/b53/b53_srab.c442
-rw-r--r--drivers/net/dsa/bcm_sf2.c702
-rw-r--r--drivers/net/dsa/bcm_sf2.h16
-rw-r--r--drivers/net/dsa/bcm_sf2_regs.h70
-rw-r--r--drivers/net/dsa/mv88e6xxx/Kconfig7
-rw-r--r--drivers/net/dsa/mv88e6xxx/Makefile1
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c4093
-rw-r--r--drivers/net/dsa/mv88e6xxx/mv88e6xxx.h678
-rw-r--r--drivers/net/ethernet/8390/ax88796.c43
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c48
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.h1
-rw-r--r--drivers/net/ethernet/agere/et131x.c60
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c54
-rw-r--r--drivers/net/ethernet/altera/altera_tse.h1
-rw-r--r--drivers/net/ethernet/altera/altera_tse_ethtool.c26
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c17
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c55
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.h1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/Kconfig1
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c22
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c257
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.h11
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c215
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h33
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c239
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h8
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c66
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h3
-rw-r--r--drivers/net/ethernet/arc/emac.h1
-rw-r--r--drivers/net/ethernet/arc/emac_main.c86
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c14
-rw-r--r--drivers/net/ethernet/atheros/alx/reg.h1
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c73
-rw-r--r--drivers/net/ethernet/aurora/nb8800.h1
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig44
-rw-r--r--drivers/net/ethernet/broadcom/Makefile2
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c49
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h1
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c266
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c315
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c185
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c808
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h117
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c113
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c743
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h90
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c265
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h87
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c3
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c26
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_debugfs.c6
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c57
-rw-r--r--drivers/net/ethernet/cadence/macb.c77
-rw-r--r--drivers/net/ethernet/cadence/macb.h1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_device.c61
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_device.h5
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn68xx_device.c13
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn68xx_device.h1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c1009
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c1421
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_common.h408
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_config.h16
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_console.c50
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c262
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h52
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c213
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.h41
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_iq.h85
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_main.h25
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c24
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_network.h252
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.c67
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.h154
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c313
-rw-r--r--drivers/net/ethernet/cavium/liquidio/response_manager.c30
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c106
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c11
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c20
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c64
-rw-r--r--drivers/net/ethernet/chelsio/Kconfig16
-rw-r--r--drivers/net/ethernet/chelsio/Makefile1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/Makefile1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c375
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c97
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h30
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c286
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h16
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c67
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/Makefile3
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c498
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h334
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c12
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c28
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c4
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c8
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c2
-rw-r--r--drivers/net/ethernet/dnet.c48
-rw-r--r--drivers/net/ethernet/dnet.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/Kconfig8
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h58
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c160
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h16
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c66
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c334
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.h2
-rw-r--r--drivers/net/ethernet/ethoc.c40
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c27
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c271
-rw-r--r--drivers/net/ethernet/freescale/fec.h4
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c61
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c22
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h3
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig14
-rw-r--r--drivers/net/ethernet/hisilicon/Makefile1
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c1007
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c44
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c19
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h20
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c60
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c291
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c334
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h45
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c302
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h21
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c160
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c63
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c204
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c34
-rw-r--r--drivers/net/ethernet/intel/Kconfig43
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c3
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c71
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.c6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c7
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c19
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.h2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c40
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c333
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c38
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_type.h2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_vf.c12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h15
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c45
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c61
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devids.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c52
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c998
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c6
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_devids.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c3
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c8
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c8
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h7
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c22
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c92
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c89
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c192
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_model.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c24
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c96
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h3
-rw-r--r--drivers/net/ethernet/lantiq_etop.c37
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c1
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c50
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c72
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c308
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c280
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c190
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c126
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c282
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/intf.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h63
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/pd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h201
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c161
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c640
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c101
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c586
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c951
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c431
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c335
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c153
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c117
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c207
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c646
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c67
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c307
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c150
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rl.c209
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/srq.c265
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/cmd.h75
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c78
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/port.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h1397
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c2446
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h268
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c91
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c1863
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c512
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h9
-rw-r--r--drivers/net/ethernet/neterion/s2io.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c50
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c11
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c65
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c28
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.h1
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c30
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig30
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h28
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c1347
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.h24
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c1814
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.h28
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c649
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h55
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h10956
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c55
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.h12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c184
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c75
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c128
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c68
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c61
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h43
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h26
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c26
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c40
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c508
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c99
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.h13
-rw-r--r--drivers/net/ethernet/qlogic/qede/Makefile1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h9
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_dcbnl.c348
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c104
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c295
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c30
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c95
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c1
-rw-r--r--drivers/net/ethernet/rdc/r6040.c91
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c2
-rw-r--r--drivers/net/ethernet/realtek/8139too.c12
-rw-r--r--drivers/net/ethernet/realtek/r8169.c37
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c10
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c9
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c3
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h1
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c31
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c32
-rw-r--r--drivers/net/ethernet/sfc/ef10.c747
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c44
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.h3
-rw-r--r--drivers/net/ethernet/sfc/efx.c66
-rw-r--r--drivers/net/ethernet/sfc/efx.h9
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h1327
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h19
-rw-r--r--drivers/net/ethernet/sfc/nic.h5
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c13
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c287
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c60
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c274
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h36
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c165
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c147
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h86
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c149
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h43
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c98
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c60
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c51
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h159
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c17
-rw-r--r--drivers/net/ethernet/synopsys/dwc_eth_qos.c128
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/ti/Kconfig3
-rw-r--r--drivers/net/ethernet/ti/cpmac.c70
-rw-r--r--drivers/net/ethernet/ti/cpsw.c118
-rw-r--r--drivers/net/ethernet/ti/cpsw.h1
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c261
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.h3
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c189
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c169
-rw-r--r--drivers/net/ethernet/ti/tlan.c1
-rw-r--r--drivers/net/ethernet/tile/tilepro.c4
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c65
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c3
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac.h1
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c47
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c82
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c8
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c4
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c46
-rw-r--r--drivers/net/fjes/fjes_main.c5
-rw-r--r--drivers/net/geneve.c61
-rw-r--r--drivers/net/gtp.c1
-rw-r--r--drivers/net/hamradio/baycom_par.c6
-rw-r--r--drivers/net/hyperv/hyperv_net.h43
-rw-r--r--drivers/net/hyperv/netvsc.c130
-rw-r--r--drivers/net/hyperv/netvsc_drv.c123
-rw-r--r--drivers/net/hyperv/rndis_filter.c159
-rw-r--r--drivers/net/ieee802154/atusb.c6
-rw-r--r--drivers/net/ieee802154/fakelb.c8
-rw-r--r--drivers/net/ieee802154/mrf24j40.c2
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c39
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c22
-rw-r--r--drivers/net/loopback.c5
-rw-r--r--drivers/net/macsec.c176
-rw-r--r--drivers/net/macvlan.c61
-rw-r--r--drivers/net/macvtap.c178
-rw-r--r--drivers/net/phy/Kconfig37
-rw-r--r--drivers/net/phy/Makefile7
-rw-r--r--drivers/net/phy/fixed_phy.c153
-rw-r--r--drivers/net/phy/intel-xway.c376
-rw-r--r--drivers/net/phy/marvell.c346
-rw-r--r--drivers/net/phy/mdio-hisi-femac.c166
-rw-r--r--drivers/net/phy/mdio-mux-bcm-iproc.c248
-rw-r--r--drivers/net/phy/mdio-mux-gpio.c2
-rw-r--r--drivers/net/phy/mdio-mux-mmioreg.c2
-rw-r--r--drivers/net/phy/mdio-mux.c26
-rw-r--r--drivers/net/phy/mdio-xgene.c473
-rw-r--r--drivers/net/phy/mdio-xgene.h143
-rw-r--r--drivers/net/phy/micrel.c67
-rw-r--r--drivers/net/phy/swphy.c179
-rw-r--r--drivers/net/phy/swphy.h9
-rw-r--r--drivers/net/ppp/ppp_generic.c3
-rw-r--r--drivers/net/team/team.c21
-rw-r--r--drivers/net/team/team_mode_loadbalance.c15
-rw-r--r--drivers/net/tun.c240
-rw-r--r--drivers/net/usb/ax88172a.c22
-rw-r--r--drivers/net/usb/cdc_ether.c51
-rw-r--r--drivers/net/usb/kaweth.c10
-rw-r--r--drivers/net/usb/r8152.c367
-rw-r--r--drivers/net/usb/rndis_host.c6
-rw-r--r--drivers/net/usb/usbnet.c138
-rw-r--r--drivers/net/virtio_net.c103
-rw-r--r--drivers/net/vmxnet3/Makefile4
-rw-r--r--drivers/net/vmxnet3/upt1_defs.h4
-rw-r--r--drivers/net/vmxnet3/vmxnet3_defs.h105
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c289
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c215
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h54
-rw-r--r--drivers/net/vrf.c582
-rw-r--r--drivers/net/vxlan.c157
-rw-r--r--drivers/net/wan/Kconfig22
-rw-r--r--drivers/net/wan/Makefile2
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c1178
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.h147
-rw-r--r--drivers/net/wan/slic_ds26522.c255
-rw-r--r--drivers/net/wan/slic_ds26522.h134
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c11
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c133
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h34
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c63
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h11
-rw-r--r--drivers/net/wireless/ath/ath10k/debugfs_sta.c74
-rw-r--r--drivers/net/wireless/ath/ath10k/hif.h14
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c54
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c22
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c39
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h106
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c150
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c266
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h87
-rw-r--r--drivers/net/wireless/ath/ath10k/spectral.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/targaddrs.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c9
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c73
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h18
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c25
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c9
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c32
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.h5
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c128
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h25
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h7
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c240
-rw-r--r--drivers/net/wireless/ath/ath9k/channel.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/dynack.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c33
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c58
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c56
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c86
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c41
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/tx99.c3
-rw-r--r--drivers/net/wireless/ath/carl9170/Kconfig8
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.c2
-rw-r--r--drivers/net/wireless/ath/regd.c4
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c31
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.h7
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h4
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c67
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c44
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h4
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h10
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c20
-rw-r--r--drivers/net/wireless/ath/wil6210/debug.c46
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c12
-rw-r--r--drivers/net/wireless/ath/wil6210/p2p.c12
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c68
-rw-r--r--drivers/net/wireless/ath/wil6210/pm.c25
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c42
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h6
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_platform.h4
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c8
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c5
-rw-r--r--drivers/net/wireless/broadcom/b43/Makefile2
-rw-r--r--drivers/net/wireless/broadcom/b43/leds.c8
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c31
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_a.h22
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_common.h3
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_g.c25
-rw-r--r--drivers/net/wireless/broadcom/b43/wa.c283
-rw-r--r--drivers/net/wireless/broadcom/b43/xmit.c30
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c51
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c285
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c59
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c32
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c18
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcmu_d11.h22
-rw-r--r--drivers/net/wireless/cisco/airo.c4
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/lib.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rxon.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/scan.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-7000.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-8000.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-9000.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-a000.c131
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-debug.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h99
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fw.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c142
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-modparams.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-coex.h222
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h56
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h58
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c217
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c108
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c73
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c61
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c94
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c44
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c103
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c55
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sf.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c539
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c106
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c186
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c129
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c154
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c188
-rw-r--r--drivers/net/wireless/intersil/orinoco/scan.c12
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c225
-rw-r--r--drivers/net/wireless/marvell/libertas/cfg.c15
-rw-r--r--drivers/net/wireless/marvell/libertas/cmdresp.c4
-rw-r--r--drivers/net/wireless/marvell/libertas/if_sdio.c3
-rw-r--r--drivers/net/wireless/marvell/libertas/if_spi.c4
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/main.c9
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_aggr.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c105
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c40
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ioctl.h12
-rw-r--r--drivers/net/wireless/marvell/mwifiex/join.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c12
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c86
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.h4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c48
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c64
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c78
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c52
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c18
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c25
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_cmd.c30
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_txrx.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h18
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c11
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c11
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c9
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c315
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h14
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/debug.c25
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/debug.h17
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/efuse.c78
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/efuse.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c25
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rc.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c74
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c76
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c57
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c73
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c71
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c29
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c22
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c302
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c68
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c79
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c62
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c16
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/stats.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/stats.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h68
-rw-r--r--drivers/net/wireless/rndis_wlan.c10
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c2
-rw-r--r--drivers/net/wireless/st/cw1200/scan.c6
-rw-r--r--drivers/net/wireless/ti/wl1251/event.c6
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c6
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.c26
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.h19
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c19
-rw-r--r--drivers/net/wireless/ti/wl18xx/tx.c22
-rw-r--r--drivers/net/wireless/ti/wl18xx/wl18xx.h8
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.h1
-rw-r--r--drivers/net/wireless/ti/wlcore/boot.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c20
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c61
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.c7
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.c5
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c124
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h14
-rw-r--r--drivers/net/wireless/wl3501_cs.c31
-rw-r--r--drivers/net/xen-netback/xenbus.c46
-rw-r--r--drivers/nfc/Kconfig1
-rw-r--r--drivers/nfc/fdp/fdp.c6
-rw-r--r--drivers/nfc/nfcsim.c643
-rw-r--r--drivers/nfc/nfcwilink.c4
-rw-r--r--drivers/nfc/pn533/usb.c9
-rw-r--r--drivers/nfc/port100.c82
-rw-r--r--drivers/nfc/trf7970a.c4
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_intel.c49
-rw-r--r--drivers/ntb/ntb_transport.c38
-rw-r--r--drivers/ntb/test/ntb_perf.c240
-rw-r--r--drivers/ntb/test/ntb_pingpong.c62
-rw-r--r--drivers/ntb/test/ntb_tool.c459
-rw-r--r--drivers/nvdimm/Kconfig2
-rw-r--r--drivers/nvdimm/blk.c14
-rw-r--r--drivers/nvdimm/btt.c22
-rw-r--r--drivers/nvdimm/btt_devs.c23
-rw-r--r--drivers/nvdimm/bus.c220
-rw-r--r--drivers/nvdimm/claim.c7
-rw-r--r--drivers/nvdimm/core.c259
-rw-r--r--drivers/nvdimm/dimm_devs.c5
-rw-r--r--drivers/nvdimm/e820.c1
-rw-r--r--drivers/nvdimm/nd-core.h5
-rw-r--r--drivers/nvdimm/nd.h29
-rw-r--r--drivers/nvdimm/pmem.c101
-rw-r--r--drivers/nvdimm/pmem.h24
-rw-r--r--drivers/nvdimm/region.c19
-rw-r--r--drivers/nvdimm/region_devs.c158
-rw-r--r--drivers/nvme/Kconfig1
-rw-r--r--drivers/nvme/Makefile1
-rw-r--r--drivers/nvme/host/Kconfig19
-rw-r--r--drivers/nvme/host/Makefile6
-rw-r--r--drivers/nvme/host/core.c333
-rw-r--r--drivers/nvme/host/fabrics.c961
-rw-r--r--drivers/nvme/host/fabrics.h132
-rw-r--r--drivers/nvme/host/lightnvm.c4
-rw-r--r--drivers/nvme/host/nvme.h52
-rw-r--r--drivers/nvme/host/pci.c110
-rw-r--r--drivers/nvme/host/rdma.c2033
-rw-r--r--drivers/nvme/target/Kconfig36
-rw-r--r--drivers/nvme/target/Makefile9
-rw-r--r--drivers/nvme/target/admin-cmd.c461
-rw-r--r--drivers/nvme/target/configfs.c917
-rw-r--r--drivers/nvme/target/core.c968
-rw-r--r--drivers/nvme/target/discovery.c221
-rw-r--r--drivers/nvme/target/fabrics-cmd.c240
-rw-r--r--drivers/nvme/target/io-cmd.c215
-rw-r--r--drivers/nvme/target/loop.c752
-rw-r--r--drivers/nvme/target/nvmet.h332
-rw-r--r--drivers/nvme/target/rdma.c1497
-rw-r--r--drivers/nvmem/Kconfig4
-rw-r--r--drivers/nvmem/imx-ocotp.c17
-rw-r--r--drivers/nvmem/mtk-efuse.c47
-rw-r--r--drivers/nvmem/mxs-ocotp.c83
-rw-r--r--drivers/of/Kconfig1
-rw-r--r--drivers/of/address.c49
-rw-r--r--drivers/of/base.c36
-rw-r--r--drivers/of/dynamic.c47
-rw-r--r--drivers/of/fdt.c38
-rw-r--r--drivers/of/fdt_address.c35
-rw-r--r--drivers/of/irq.c5
-rw-r--r--drivers/of/of_mdio.c38
-rw-r--r--drivers/of/of_numa.c4
-rw-r--r--drivers/of/of_pci.c6
-rw-r--r--drivers/of/of_reserved_mem.c105
-rw-r--r--drivers/of/overlay.c43
-rw-r--r--drivers/of/platform.c55
-rw-r--r--drivers/of/resolver.c8
-rw-r--r--drivers/of/unittest.c5
-rw-r--r--drivers/parisc/ccio-dma.c16
-rw-r--r--drivers/parisc/sba_iommu.c16
-rw-r--r--drivers/pci/Kconfig2
-rw-r--r--drivers/pci/Makefile3
-rw-r--r--drivers/pci/bus.c31
-rw-r--r--drivers/pci/ecam.c8
-rw-r--r--drivers/pci/host-bridge.c1
-rw-r--r--drivers/pci/host/Kconfig49
-rw-r--r--drivers/pci/host/Makefile2
-rw-r--r--drivers/pci/host/pci-aardvark.c1001
-rw-r--r--drivers/pci/host/pci-dra7xx.c4
-rw-r--r--drivers/pci/host/pci-host-common.c44
-rw-r--r--drivers/pci/host/pci-host-generic.c13
-rw-r--r--drivers/pci/host/pci-hyperv.c30
-rw-r--r--drivers/pci/host/pci-keystone.c10
-rw-r--r--drivers/pci/host/pci-layerscape.c10
-rw-r--r--drivers/pci/host/pci-mvebu.c28
-rw-r--r--drivers/pci/host/pci-rcar-gen2.c27
-rw-r--r--drivers/pci/host/pci-tegra.c99
-rw-r--r--drivers/pci/host/pci-thunder-ecam.c11
-rw-r--r--drivers/pci/host/pci-thunder-pem.c14
-rw-r--r--drivers/pci/host/pci-versatile.c29
-rw-r--r--drivers/pci/host/pci-xgene.c24
-rw-r--r--drivers/pci/host/pcie-altera.c83
-rw-r--r--drivers/pci/host/pcie-armada8k.c14
-rw-r--r--drivers/pci/host/pcie-artpec6.c280
-rw-r--r--drivers/pci/host/pcie-designware-plat.c10
-rw-r--r--drivers/pci/host/pcie-designware.c34
-rw-r--r--drivers/pci/host/pcie-hisi.c13
-rw-r--r--drivers/pci/host/pcie-iproc.c4
-rw-r--r--drivers/pci/host/pcie-rcar.c44
-rw-r--r--drivers/pci/host/pcie-xilinx-nwl.c20
-rw-r--r--drivers/pci/host/pcie-xilinx.c22
-rw-r--r--drivers/pci/hotplug/Kconfig13
-rw-r--r--drivers/pci/hotplug/Makefile3
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c6
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c4
-rw-r--r--drivers/pci/hotplug/pnv_php.c711
-rw-r--r--drivers/pci/hotplug/rpaphp_slot.c17
-rw-r--r--drivers/pci/msi.c268
-rw-r--r--drivers/pci/pci-driver.c5
-rw-r--r--drivers/pci/pci-mid.c82
-rw-r--r--drivers/pci/pci-sysfs.c5
-rw-r--r--drivers/pci/pci.c285
-rw-r--r--drivers/pci/pci.h11
-rw-r--r--drivers/pci/pcie/Kconfig5
-rw-r--r--drivers/pci/pcie/aspm.c2
-rw-r--r--drivers/pci/pcie/pcie-dpc.c19
-rw-r--r--drivers/pci/pcie/portdrv_core.c3
-rw-r--r--drivers/pci/pcie/portdrv_pci.c52
-rw-r--r--drivers/pci/probe.c22
-rw-r--r--drivers/pci/proc.c9
-rw-r--r--drivers/pci/quirks.c14
-rw-r--r--drivers/pci/remove.c1
-rw-r--r--drivers/pci/setup-bus.c73
-rw-r--r--drivers/pci/xen-pcifront.c2
-rw-r--r--drivers/pcmcia/ds.c12
-rw-r--r--drivers/pcmcia/pxa2xx_base.c9
-rw-r--r--drivers/pcmcia/pxa2xx_base.h2
-rw-r--r--drivers/pcmcia/sa1111_badge4.c22
-rw-r--r--drivers/pcmcia/sa1111_generic.c22
-rw-r--r--drivers/pcmcia/sa1111_jornada720.c25
-rw-r--r--drivers/pcmcia/sa1111_lubbock.c32
-rw-r--r--drivers/pcmcia/sa1111_neponset.c26
-rw-r--r--drivers/pcmcia/sa11xx_base.c8
-rw-r--r--drivers/pcmcia/soc_common.c2
-rw-r--r--drivers/perf/arm_pmu.c88
-rw-r--r--drivers/phy/Kconfig19
-rw-r--r--drivers/phy/Makefile3
-rw-r--r--drivers/phy/phy-bcm-ns2-pcie.c115
-rw-r--r--drivers/phy/phy-brcm-sata.c83
-rw-r--r--drivers/phy/phy-core.c15
-rw-r--r--drivers/phy/phy-da8xx-usb.c245
-rw-r--r--drivers/phy/phy-qcom-ufs-qmp-14nm.c1
-rw-r--r--drivers/phy/phy-qcom-ufs-qmp-20nm.c1
-rw-r--r--drivers/phy/phy-rockchip-emmc.c241
-rw-r--r--drivers/phy/phy-rockchip-usb.c23
-rw-r--r--drivers/phy/phy-sun4i-usb.c113
-rw-r--r--drivers/phy/phy-sun9i-usb.c4
-rw-r--r--drivers/phy/phy-xgene.c4
-rw-r--r--drivers/pinctrl/Kconfig26
-rw-r--r--drivers/pinctrl/Makefile4
-rw-r--r--drivers/pinctrl/bcm/Kconfig15
-rw-r--r--drivers/pinctrl/bcm/Makefile1
-rw-r--r--drivers/pinctrl/bcm/pinctrl-iproc-gpio.c118
-rw-r--r--drivers/pinctrl/bcm/pinctrl-ns2-mux.c12
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-gpio.c6
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-mux.c642
-rw-r--r--drivers/pinctrl/core.c25
-rw-r--r--drivers/pinctrl/core.h2
-rw-r--r--drivers/pinctrl/devicetree.c7
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c29
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1-core.c3
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1.c9
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx21.c9
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx23.c17
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx25.c10
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx27.c10
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx28.c17
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx35.c10
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx50.c9
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx51.c10
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx53.c10
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6dl.c14
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6q.c10
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6sl.c15
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6sx.c14
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6ul.c14
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx7d.c14
-rw-r--r--drivers/pinctrl/freescale/pinctrl-mxs.c12
-rw-r--r--drivers/pinctrl/freescale/pinctrl-mxs.h1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-vf610.c10
-rw-r--r--drivers/pinctrl/intel/Kconfig11
-rw-r--r--drivers/pinctrl/intel/Makefile1
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c25
-rw-r--r--drivers/pinctrl/intel/pinctrl-broxton.c43
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c80
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c52
-rw-r--r--drivers/pinctrl/intel/pinctrl-merrifield.c912
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c4
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxbb.c163
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-kirkwood.c85
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c96
-rw-r--r--drivers/pinctrl/pinconf-generic.c22
-rw-r--r--drivers/pinctrl/pinconf.c8
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c26
-rw-r--r--drivers/pinctrl/pinctrl-at91.c13
-rw-r--r--drivers/pinctrl/pinctrl-digicolor.c16
-rw-r--r--drivers/pinctrl/pinctrl-lpc18xx.c20
-rw-r--r--drivers/pinctrl/pinctrl-max77620.c673
-rw-r--r--drivers/pinctrl/pinctrl-oxnas.c846
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c9
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c4
-rw-r--r--drivers/pinctrl/pinctrl-st.c2
-rw-r--r--drivers/pinctrl/pinctrl-u300.c2
-rw-r--r--drivers/pinctrl/pinctrl-xway.c77
-rw-r--r--drivers/pinctrl/pinctrl-zynq.c13
-rw-r--r--drivers/pinctrl/pinmux.c16
-rw-r--r--drivers/pinctrl/qcom/Kconfig8
-rw-r--r--drivers/pinctrl/qcom/Makefile1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-mdm9615.c483
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c6
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8660.c114
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8x74.c43
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c1
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos5440.c1
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c1
-rw-r--r--drivers/pinctrl/sh-pfc/core.c10
-rw-r--r--drivers/pinctrl/sh-pfc/core.h58
-rw-r--r--drivers/pinctrl/sh-pfc/gpio.c13
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a73a4.c1
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7740.c1
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7778.c2
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7790.c59
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7791.c1
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7795.c327
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7757.c1
-rw-r--r--drivers/pinctrl/sh-pfc/pinctrl.c46
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h55
-rw-r--r--drivers/pinctrl/sirf/pinctrl-atlas7.c4
-rw-r--r--drivers/pinctrl/stm32/Kconfig6
-rw-r--r--drivers/pinctrl/stm32/Makefile1
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c31
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32f746.c1681
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c6
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c6
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c6
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.c8
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.h6
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra114.c4
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra124.c4
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra20.c6
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra210.c4
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra30.c4
-rw-r--r--drivers/pinctrl/uniphier/Kconfig20
-rw-r--r--drivers/pinctrl/uniphier/Makefile2
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-core.c298
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c952
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c1050
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld4.c494
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld6b.c609
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c819
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pro5.c653
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pxs2.c603
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-sld8.c455
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier.h59
-rw-r--r--drivers/platform/olpc/olpc-ec.c8
-rw-r--r--drivers/platform/x86/Kconfig15
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/apple-gmux.c55
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c50
-rw-r--r--drivers/platform/x86/asus-wireless.c91
-rw-r--r--drivers/platform/x86/asus-wmi.c8
-rw-r--r--drivers/platform/x86/asus-wmi.h1
-rw-r--r--drivers/platform/x86/dell-wmi.c293
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c81
-rw-r--r--drivers/platform/x86/intel-hid.c5
-rw-r--r--drivers/platform/x86/intel-vbtn.c188
-rw-r--r--drivers/platform/x86/intel_pmc_core.c54
-rw-r--r--drivers/platform/x86/intel_pmc_core.h3
-rw-r--r--drivers/platform/x86/intel_pmc_ipc.c10
-rw-r--r--drivers/platform/x86/intel_pmic_gpio.c8
-rw-r--r--drivers/platform/x86/intel_telemetry_debugfs.c5
-rw-r--r--drivers/platform/x86/intel_telemetry_pltdrv.c5
-rw-r--r--drivers/platform/x86/toshiba_acpi.c136
-rw-r--r--drivers/pnp/isapnp/proc.c2
-rw-r--r--drivers/pnp/pnpbios/core.c4
-rw-r--r--drivers/power/Kconfig2
-rw-r--r--drivers/power/axp20x_usb_power.c92
-rw-r--r--drivers/power/axp288_charger.c77
-rw-r--r--drivers/power/bq27xxx_battery.c289
-rw-r--r--drivers/power/bq27xxx_battery_i2c.c2
-rw-r--r--drivers/power/max8903_charger.c239
-rw-r--r--drivers/power/power_supply_core.c6
-rw-r--r--drivers/power/power_supply_sysfs.c2
-rw-r--r--drivers/power/qcom_smbb.c21
-rw-r--r--drivers/power/reset/Kconfig27
-rw-r--r--drivers/power/reset/Makefile3
-rw-r--r--drivers/power/reset/brcm-kona-reset.c73
-rw-r--r--drivers/power/reset/reboot-mode.c140
-rw-r--r--drivers/power/reset/reboot-mode.h14
-rw-r--r--drivers/power/reset/syscon-poweroff.c2
-rw-r--r--drivers/power/reset/syscon-reboot-mode.c99
-rw-r--r--drivers/power/reset/vexpress-poweroff.c4
-rw-r--r--drivers/powercap/intel_rapl.c157
-rw-r--r--drivers/pwm/Kconfig26
-rw-r--r--drivers/pwm/Makefile3
-rw-r--r--drivers/pwm/core.c27
-rw-r--r--drivers/pwm/pwm-atmel.c30
-rw-r--r--drivers/pwm/pwm-bcm-iproc.c277
-rw-r--r--drivers/pwm/pwm-clps711x.c2
-rw-r--r--drivers/pwm/pwm-cros-ec.c260
-rw-r--r--drivers/pwm/pwm-lpc32xx.c7
-rw-r--r--drivers/pwm/pwm-lpss-pci.c1
-rw-r--r--drivers/pwm/pwm-lpss.c26
-rw-r--r--drivers/pwm/pwm-omap-dmtimer.c12
-rw-r--r--drivers/pwm/pwm-rockchip.c178
-rw-r--r--drivers/pwm/pwm-stmpe.c319
-rw-r--r--drivers/pwm/pwm-tegra.c69
-rw-r--r--drivers/pwm/pwm-tiecap.c37
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c38
-rw-r--r--drivers/pwm/pwm-tipwmss.c49
-rw-r--r--drivers/pwm/sysfs.c17
-rw-r--r--drivers/rapidio/Kconfig9
-rw-r--r--drivers/rapidio/Makefile1
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c6
-rw-r--r--drivers/rapidio/devices/tsi721.c57
-rw-r--r--drivers/rapidio/devices/tsi721.h2
-rw-r--r--drivers/rapidio/devices/tsi721_dma.c27
-rw-r--r--drivers/rapidio/rio-scan.c74
-rw-r--r--drivers/rapidio/rio.c212
-rw-r--r--drivers/rapidio/rio.h2
-rw-r--r--drivers/rapidio/rio_cm.c2381
-rw-r--r--drivers/rapidio/switches/Kconfig6
-rw-r--r--drivers/rapidio/switches/Makefile1
-rw-r--r--drivers/rapidio/switches/idt_gen2.c7
-rw-r--r--drivers/rapidio/switches/idt_gen3.c382
-rw-r--r--drivers/rapidio/switches/tsi57x.c26
-rw-r--r--drivers/regulator/Kconfig18
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/ab8500-ext.c465
-rw-r--r--drivers/regulator/act8865-regulator.c12
-rw-r--r--drivers/regulator/axp20x-regulator.c147
-rw-r--r--drivers/regulator/core.c27
-rw-r--r--drivers/regulator/da9052-regulator.c6
-rw-r--r--drivers/regulator/da9210-regulator.c21
-rw-r--r--drivers/regulator/da9211-regulator.c13
-rw-r--r--drivers/regulator/da9211-regulator.h3
-rw-r--r--drivers/regulator/fixed.c14
-rw-r--r--drivers/regulator/lp873x-regulator.c14
-rw-r--r--drivers/regulator/max14577-regulator.c4
-rw-r--r--drivers/regulator/max77693-regulator.c4
-rw-r--r--drivers/regulator/max8973-regulator.c16
-rw-r--r--drivers/regulator/mt6323-regulator.c425
-rw-r--r--drivers/regulator/mt6397-regulator.c95
-rw-r--r--drivers/regulator/of_regulator.c3
-rw-r--r--drivers/regulator/pfuze100-regulator.c15
-rw-r--r--drivers/regulator/pv88060-regulator.c3
-rw-r--r--drivers/regulator/pv88080-regulator.c3
-rw-r--r--drivers/regulator/pv88090-regulator.c3
-rw-r--r--drivers/regulator/pwm-regulator.c196
-rw-r--r--drivers/regulator/qcom_smd-regulator.c32
-rw-r--r--drivers/regulator/qcom_spmi-regulator.c7
-rw-r--r--drivers/regulator/rn5t618-regulator.c40
-rw-r--r--drivers/regulator/tps65217-regulator.c69
-rw-r--r--drivers/regulator/tps65218-regulator.c76
-rw-r--r--drivers/regulator/twl-regulator.c2
-rw-r--r--drivers/remoteproc/Kconfig14
-rw-r--r--drivers/remoteproc/Makefile2
-rw-r--r--drivers/remoteproc/qcom_mdt_loader.c179
-rw-r--r--drivers/remoteproc/qcom_mdt_loader.h13
-rw-r--r--drivers/remoteproc/qcom_q6v5_pil.c907
-rw-r--r--drivers/reset/Kconfig14
-rw-r--r--drivers/reset/Makefile2
-rw-r--r--drivers/reset/core.c37
-rw-r--r--drivers/reset/hisilicon/hi6220_reset.c122
-rw-r--r--drivers/reset/reset-ath79.c3
-rw-r--r--drivers/reset/reset-meson.c136
-rw-r--r--drivers/reset/reset-oxnas.c12
-rw-r--r--drivers/reset/reset-pistachio.c12
-rw-r--r--drivers/reset/reset-socfpga.c12
-rw-r--r--drivers/reset/reset-sunxi.c12
-rw-r--r--drivers/reset/reset-ti-syscon.c237
-rw-r--r--drivers/reset/reset-zynq.c12
-rw-r--r--drivers/reset/sti/Kconfig1
-rw-r--r--drivers/rtc/Kconfig26
-rw-r--r--drivers/rtc/Makefile2
-rw-r--r--drivers/rtc/interface.c32
-rw-r--r--drivers/rtc/rtc-abx80x.c12
-rw-r--r--drivers/rtc/rtc-asm9260.c1
-rw-r--r--drivers/rtc/rtc-at91sam9.c1
-rw-r--r--drivers/rtc/rtc-cmos.c17
-rw-r--r--drivers/rtc/rtc-da9052.c1
-rw-r--r--drivers/rtc/rtc-da9055.c1
-rw-r--r--drivers/rtc/rtc-davinci.c2
-rw-r--r--drivers/rtc/rtc-ds1286.c2
-rw-r--r--drivers/rtc/rtc-ds1305.c7
-rw-r--r--drivers/rtc/rtc-ds1307.c5
-rw-r--r--drivers/rtc/rtc-ds1343.c6
-rw-r--r--drivers/rtc/rtc-ds1685.c53
-rw-r--r--drivers/rtc/rtc-ds2404.c2
-rw-r--r--drivers/rtc/rtc-ds3232.c6
-rw-r--r--drivers/rtc/rtc-efi.c6
-rw-r--r--drivers/rtc/rtc-generic.c36
-rw-r--r--drivers/rtc/rtc-hym8563.c5
-rw-r--r--drivers/rtc/rtc-isl12057.c33
-rw-r--r--drivers/rtc/rtc-m41t80.c34
-rw-r--r--drivers/rtc/rtc-m48t86.c2
-rw-r--r--drivers/rtc/rtc-max6916.c164
-rw-r--r--drivers/rtc/rtc-mc146818-lib.c198
-rw-r--r--drivers/rtc/rtc-mrst.c10
-rw-r--r--drivers/rtc/rtc-opal.c4
-rw-r--r--drivers/rtc/rtc-pcf2123.c4
-rw-r--r--drivers/rtc/rtc-pcf85063.c59
-rw-r--r--drivers/rtc/rtc-pcf8563.c5
-rw-r--r--drivers/rtc/rtc-rc5t583.c1
-rw-r--r--drivers/rtc/rtc-rs5c372.c6
-rw-r--r--drivers/rtc/rtc-rv8803.c205
-rw-r--r--drivers/rtc/rtc-rx8010.c8
-rw-r--r--drivers/rtc/rtc-rx8025.c5
-rw-r--r--drivers/rtc/rtc-s35390a.c157
-rw-r--r--drivers/rtc/rtc-s3c.c14
-rw-r--r--drivers/rtc/rtc-sh.c42
-rw-r--r--drivers/rtc/rtc-tegra.c6
-rw-r--r--drivers/rtc/rtc-v3020.c2
-rw-r--r--drivers/s390/block/dasd_eckd.c14
-rw-r--r--drivers/s390/block/dasd_genhd.c3
-rw-r--r--drivers/s390/block/dcssblk.c10
-rw-r--r--drivers/s390/block/scm_blk.c3
-rw-r--r--drivers/s390/char/keyboard.c15
-rw-r--r--drivers/s390/char/sclp_con.c3
-rw-r--r--drivers/s390/char/sclp_config.c2
-rw-r--r--drivers/s390/char/sclp_early.c12
-rw-r--r--drivers/s390/char/sclp_ocf.c23
-rw-r--r--drivers/s390/char/zcore.c2
-rw-r--r--drivers/s390/cio/chp.c82
-rw-r--r--drivers/s390/cio/chp.h2
-rw-r--r--drivers/s390/cio/chsc.c25
-rw-r--r--drivers/s390/cio/chsc.h5
-rw-r--r--drivers/s390/cio/chsc_sch.c2
-rw-r--r--drivers/s390/cio/cmf.c15
-rw-r--r--drivers/s390/cio/device.c2
-rw-r--r--drivers/s390/cio/device_ops.c22
-rw-r--r--drivers/s390/cio/device_status.c7
-rw-r--r--drivers/s390/cio/idset.h2
-rw-r--r--drivers/s390/cio/io_sch.h1
-rw-r--r--drivers/s390/cio/ioasm.c91
-rw-r--r--drivers/s390/cio/qdio_main.c113
-rw-r--r--drivers/s390/crypto/ap_bus.c127
-rw-r--r--drivers/s390/crypto/ap_bus.h1
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c2
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c2
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c2
-rw-r--r--drivers/s390/net/qeth_core.h46
-rw-r--r--drivers/s390/net/qeth_core_main.c263
-rw-r--r--drivers/s390/net/qeth_core_sys.c4
-rw-r--r--drivers/s390/net/qeth_l2.h7
-rw-r--r--drivers/s390/net/qeth_l2_main.c105
-rw-r--r--drivers/s390/net/qeth_l3.h31
-rw-r--r--drivers/s390/net/qeth_l3_main.c1031
-rw-r--r--drivers/s390/net/qeth_l3_sys.c83
-rw-r--r--drivers/s390/virtio/Makefile6
-rw-r--r--drivers/s390/virtio/kvm_virtio.c4
-rw-r--r--drivers/scsi/53c700.c10
-rw-r--r--drivers/scsi/53c700.h15
-rw-r--r--drivers/scsi/Kconfig19
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/aacraid/commctrl.c7
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c14
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c2
-rw-r--r--drivers/scsi/cxgbi/Makefile2
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/Kbuild1
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/Kconfig1
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c164
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/Kbuild1
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/Kconfig1
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c203
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c734
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h188
-rw-r--r--drivers/scsi/cxlflash/main.c106
-rw-r--r--drivers/scsi/cxlflash/main.h6
-rw-r--r--drivers/scsi/cxlflash/sislite.h6
-rw-r--r--drivers/scsi/fcoe/fcoe.c374
-rw-r--r--drivers/scsi/fcoe/fcoe.h1
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c245
-rw-r--r--drivers/scsi/fcoe/fcoe_sysfs.c39
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c4
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c14
-rw-r--r--drivers/scsi/fnic/fnic_fip.h8
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c73
-rw-r--r--drivers/scsi/hosts.c2
-rw-r--r--drivers/scsi/hpsa.c83
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.h2
-rw-r--r--drivers/scsi/ibmvscsi_tgt/Makefile3
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c4087
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h346
-rw-r--r--drivers/scsi/ibmvscsi_tgt/libsrp.c427
-rw-r--r--drivers/scsi/ibmvscsi_tgt/libsrp.h123
-rw-r--r--drivers/scsi/ipr.c26
-rw-r--r--drivers/scsi/ipr.h4
-rw-r--r--drivers/scsi/libfc/fc_exch.c10
-rw-r--r--drivers/scsi/libfc/fc_lport.c24
-rw-r--r--drivers/scsi/libfc/fc_rport.c49
-rw-r--r--drivers/scsi/libsas/sas_ata.c12
-rw-r--r--drivers/scsi/lpfc/lpfc.h27
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c237
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.h116
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c290
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h36
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h41
-rw-r--r--drivers/scsi/lpfc/lpfc_ids.h122
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c292
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c116
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c27
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c5
-rw-r--r--drivers/scsi/osd/osd_initiator.c37
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c170
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c93
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h13
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c50
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h12
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c39
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c31
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c123
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c127
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c16
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/scsi_debug.c93
-rw-r--r--drivers/scsi/scsi_devinfo.c4
-rw-r--r--drivers/scsi/scsi_priv.h1
-rw-r--r--drivers/scsi/scsi_transport_sas.c16
-rw-r--r--drivers/scsi/sd.c26
-rw-r--r--drivers/scsi/ses.c5
-rw-r--r--drivers/scsi/snic/snic_disc.c4
-rw-r--r--drivers/scsi/snic/snic_fwint.h2
-rw-r--r--drivers/scsi/sr.c3
-rw-r--r--drivers/scsi/storvsc_drv.c2
-rw-r--r--drivers/scsi/ufs/Kconfig16
-rw-r--r--drivers/scsi/ufs/Makefile2
-rw-r--r--drivers/scsi/ufs/tc-dwc-g210-pci.c181
-rw-r--r--drivers/scsi/ufs/tc-dwc-g210-pltfrm.c113
-rw-r--r--drivers/scsi/ufs/tc-dwc-g210.c319
-rw-r--r--drivers/scsi/ufs/tc-dwc-g210.h19
-rw-r--r--drivers/scsi/ufs/ufshcd-dwc.c154
-rw-r--r--drivers/scsi/ufs/ufshcd-dwc.h26
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c2
-rw-r--r--drivers/scsi/ufs/ufshcd.c92
-rw-r--r--drivers/scsi/ufs/ufshcd.h7
-rw-r--r--drivers/scsi/ufs/ufshci-dwc.h36
-rw-r--r--drivers/scsi/ufs/ufshci.h11
-rw-r--r--drivers/scsi/ufs/unipro.h39
-rw-r--r--drivers/scsi/vmw_pvscsi.c2
-rw-r--r--drivers/scsi/vmw_pvscsi.h2
-rw-r--r--drivers/scsi/wd7000.c6
-rw-r--r--drivers/scsi/wd719x.c2
-rw-r--r--drivers/sh/pm_runtime.c9
-rw-r--r--drivers/soc/Kconfig2
-rw-r--r--drivers/soc/Makefile2
-rw-r--r--drivers/soc/bcm/Kconfig18
-rw-r--r--drivers/soc/bcm/Makefile1
-rw-r--r--drivers/soc/bcm/brcmstb/Makefile1
-rw-r--r--drivers/soc/bcm/brcmstb/biuctrl.c117
-rw-r--r--drivers/soc/bcm/brcmstb/common.c99
-rw-r--r--drivers/soc/fsl/qe/Kconfig6
-rw-r--r--drivers/soc/fsl/qe/Makefile1
-rw-r--r--drivers/soc/fsl/qe/qe.c6
-rw-r--r--drivers/soc/fsl/qe/qe_tdm.c276
-rw-r--r--drivers/soc/fsl/qe/ucc.c450
-rw-r--r--drivers/soc/fsl/qe/ucc_fast.c36
-rw-r--r--drivers/soc/qcom/smem_state.c12
-rw-r--r--drivers/soc/qcom/smp2p.c4
-rw-r--r--drivers/soc/qcom/smsm.c2
-rw-r--r--drivers/soc/qcom/wcnss_ctrl.c125
-rw-r--r--drivers/soc/renesas/Makefile2
-rw-r--r--drivers/soc/renesas/r8a7792-sysc.c34
-rw-r--r--drivers/soc/renesas/r8a7796-sysc.c48
-rw-r--r--drivers/soc/renesas/rcar-sysc.c45
-rw-r--r--drivers/soc/renesas/rcar-sysc.h2
-rw-r--r--drivers/soc/samsung/Kconfig4
-rw-r--r--drivers/soc/samsung/Makefile1
-rw-r--r--drivers/soc/samsung/exynos3250-pmu.c2
-rw-r--r--drivers/soc/samsung/exynos5420-pmu.c2
-rw-r--r--drivers/soc/samsung/pm_domains.c245
-rw-r--r--drivers/soc/tegra/pmc.c141
-rw-r--r--drivers/soc/ux500/Kconfig7
-rw-r--r--drivers/soc/ux500/Makefile1
-rw-r--r--drivers/soc/ux500/ux500-soc-id.c222
-rw-r--r--drivers/spi/Kconfig1
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/spi-bfin-sport.c15
-rw-r--r--drivers/spi/spi-bfin5xx.c15
-rw-r--r--drivers/spi/spi-cavium-octeon.c104
-rw-r--r--drivers/spi/spi-cavium.c151
-rw-r--r--drivers/spi/spi-cavium.h329
-rw-r--r--drivers/spi/spi-clps711x.c69
-rw-r--r--drivers/spi/spi-img-spfi.c2
-rw-r--r--drivers/spi/spi-imx.c191
-rw-r--r--drivers/spi/spi-loopback-test.c2
-rw-r--r--drivers/spi/spi-mpc52xx-psc.c17
-rw-r--r--drivers/spi/spi-mt65xx.c1
-rw-r--r--drivers/spi/spi-omap2-mcspi.c145
-rw-r--r--drivers/spi/spi-orion.c88
-rw-r--r--drivers/spi/spi-pic32-sqi.c7
-rw-r--r--drivers/spi/spi-pic32.c5
-rw-r--r--drivers/spi/spi-pxa2xx-dma.c170
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c129
-rw-r--r--drivers/spi/spi-pxa2xx.c46
-rw-r--r--drivers/spi/spi-pxa2xx.h9
-rw-r--r--drivers/spi/spi-qup.c1
-rw-r--r--drivers/spi/spi-rockchip.c20
-rw-r--r--drivers/spi/spi-s3c64xx.c206
-rw-r--r--drivers/spi/spi-sh-msiof.c21
-rw-r--r--drivers/spi/spi-sh.c16
-rw-r--r--drivers/spi/spi-st-ssc4.c36
-rw-r--r--drivers/spi/spi-sun4i.c8
-rw-r--r--drivers/spi/spi-sun6i.c7
-rw-r--r--drivers/spi/spi-ti-qspi.c2
-rw-r--r--drivers/spi/spi-topcliff-pch.c26
-rw-r--r--drivers/spi/spi-txx9.c11
-rw-r--r--drivers/spi/spi-xilinx.c8
-rw-r--r--drivers/spi/spi.c176
-rw-r--r--drivers/spi/spidev.c41
-rw-r--r--drivers/ssb/driver_gpio.c22
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/android/Kconfig17
-rw-r--r--drivers/staging/android/Makefile3
-rw-r--r--drivers/staging/android/ashmem.c2
-rw-r--r--drivers/staging/android/lowmemorykiller.c12
-rw-r--r--drivers/staging/android/sw_sync.c341
-rw-r--r--drivers/staging/android/sync_debug.c165
-rw-r--r--drivers/staging/android/sync_debug.h84
-rw-r--r--drivers/staging/android/trace/sync.h14
-rw-r--r--drivers/staging/comedi/comedi.h2
-rw-r--r--drivers/staging/comedi/comedi_fops.c21
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1564.c305
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9118.c8
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1760.c1
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas64.c209
-rw-r--r--drivers/staging/comedi/drivers/comedi_bond.c10
-rw-r--r--drivers/staging/comedi/drivers/daqboard2000.c380
-rw-r--r--drivers/staging/comedi/drivers/das16.c39
-rw-r--r--drivers/staging/comedi/drivers/das16m1.c482
-rw-r--r--drivers/staging/comedi/drivers/das6402.c74
-rw-r--r--drivers/staging/comedi/drivers/das800.c106
-rw-r--r--drivers/staging/comedi/drivers/dmm32at.c98
-rw-r--r--drivers/staging/comedi/drivers/dt2801.c95
-rw-r--r--drivers/staging/comedi/drivers/dt2811.c852
-rw-r--r--drivers/staging/comedi/drivers/dt2814.c72
-rw-r--r--drivers/staging/comedi/drivers/dt2815.c140
-rw-r--r--drivers/staging/comedi/drivers/dt2817.c64
-rw-r--r--drivers/staging/comedi/drivers/gsc_hpdi.c87
-rw-r--r--drivers/staging/comedi/drivers/jr3_pci.c36
-rw-r--r--drivers/staging/comedi/drivers/me_daq.c2
-rw-r--r--drivers/staging/comedi/drivers/mpc624.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_65xx.c18
-rw-r--r--drivers/staging/comedi/drivers/ni_pcidio.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_pcimio.c8
-rw-r--r--drivers/staging/comedi/drivers/pcmmio.c40
-rw-r--r--drivers/staging/comedi/drivers/pcmuio.c2
-rw-r--r--drivers/staging/comedi/drivers/plx9080.h957
-rw-r--r--drivers/staging/comedi/drivers/quatech_daqp_cs.c2
-rw-r--r--drivers/staging/comedi/drivers/rtd520.c15
-rw-r--r--drivers/staging/comedi/drivers/s626.c8
-rw-r--r--drivers/staging/comedi/drivers/s626.h356
-rw-r--r--drivers/staging/comedi/drivers/serial2002.c7
-rw-r--r--drivers/staging/emxx_udc/Kconfig2
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.c36
-rw-r--r--drivers/staging/fbtft/fbtft-core.c4
-rw-r--r--drivers/staging/fsl-mc/bus/dpbp.c132
-rw-r--r--drivers/staging/fsl-mc/bus/dpmcp-cmd.h86
-rw-r--r--drivers/staging/fsl-mc/bus/dpmcp.c89
-rw-r--r--drivers/staging/fsl-mc/bus/dpmng-cmd.h12
-rw-r--r--drivers/staging/fsl-mc/bus/dpmng.c15
-rw-r--r--drivers/staging/fsl-mc/bus/dprc-cmd.h379
-rw-r--r--drivers/staging/fsl-mc/bus/dprc-driver.c20
-rw-r--r--drivers/staging/fsl-mc/bus/dprc.c715
-rw-r--r--drivers/staging/fsl-mc/bus/mc-allocator.c2
-rw-r--r--drivers/staging/fsl-mc/bus/mc-bus.c71
-rw-r--r--drivers/staging/fsl-mc/bus/mc-msi.c17
-rw-r--r--drivers/staging/fsl-mc/bus/mc-sys.c46
-rw-r--r--drivers/staging/fsl-mc/include/dpbp-cmd.h125
-rw-r--r--drivers/staging/fsl-mc/include/mc-cmd.h91
-rw-r--r--drivers/staging/fsl-mc/include/mc.h21
-rw-r--r--drivers/staging/iio/accel/Kconfig14
-rw-r--r--drivers/staging/iio/accel/Makefile4
-rw-r--r--drivers/staging/iio/accel/sca3000_core.c4
-rw-r--r--drivers/staging/iio/adc/ad7280a.c8
-rw-r--r--drivers/staging/iio/adc/ad7606_ring.c3
-rw-r--r--drivers/staging/iio/adc/ad7816.c3
-rw-r--r--drivers/staging/iio/addac/adt7316.c4
-rw-r--r--drivers/staging/iio/cdc/ad7150.c2
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c2
-rw-r--r--drivers/staging/iio/light/tsl2x7x_core.c2
-rw-r--r--drivers/staging/ks7010/Kconfig10
-rw-r--r--drivers/staging/ks7010/Makefile4
-rw-r--r--drivers/staging/ks7010/TODO36
-rw-r--r--drivers/staging/ks7010/eap_packet.h129
-rw-r--r--drivers/staging/ks7010/ks7010_sdio.c1236
-rw-r--r--drivers/staging/ks7010/ks7010_sdio.h147
-rw-r--r--drivers/staging/ks7010/ks_hostif.c2760
-rw-r--r--drivers/staging/ks7010/ks_hostif.h644
-rw-r--r--drivers/staging/ks7010/ks_wlan.h505
-rw-r--r--drivers/staging/ks7010/ks_wlan_ioctl.h67
-rw-r--r--drivers/staging/ks7010/ks_wlan_net.c3528
-rw-r--r--drivers/staging/ks7010/michael_mic.c139
-rw-r--r--drivers/staging/ks7010/michael_mic.h26
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/curproc.h6
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs.h6
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h6
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h4
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h6
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h6
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h6
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_private.h6
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_string.h6
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_time.h6
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h6
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h6
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h6
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-dlc.h2
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-types.h3
-rw-r--r--drivers/staging/lustre/include/linux/lnet/types.h4
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c354
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h307
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c384
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c8
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c308
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h209
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c204
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c40
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c2
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c74
-rw-r--r--drivers/staging/lustre/lnet/libcfs/debug.c8
-rw-r--r--drivers/staging/lustre/lnet/libcfs/fail.c4
-rw-r--r--drivers/staging/lustre/lnet/libcfs/hash.c6
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_string.c6
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-curproc.c6
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c6
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-module.c6
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c6
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c6
-rw-r--r--drivers/staging/lustre/lnet/libcfs/module.c6
-rw-r--r--drivers/staging/lustre/lnet/libcfs/prng.c6
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.c6
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.h6
-rw-r--r--drivers/staging/lustre/lnet/libcfs/workitem.c6
-rw-r--r--drivers/staging/lustre/lnet/lnet/acceptor.c6
-rw-r--r--drivers/staging/lustre/lnet/lnet/api-ni.c8
-rw-r--r--drivers/staging/lustre/lnet/lnet/config.c6
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-eq.c6
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-md.c6
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-me.c6
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-move.c6
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-msg.c6
-rw-r--r--drivers/staging/lustre/lnet/lnet/lo.c6
-rw-r--r--drivers/staging/lustre/lnet/lnet/module.c8
-rw-r--r--drivers/staging/lustre/lnet/lnet/net_fault.c4
-rw-r--r--drivers/staging/lustre/lnet/lnet/nidstrings.c6
-rw-r--r--drivers/staging/lustre/lnet/lnet/peer.c6
-rw-r--r--drivers/staging/lustre/lnet/lnet/router.c9
-rw-r--r--drivers/staging/lustre/lnet/selftest/brw_test.c6
-rw-r--r--drivers/staging/lustre/lnet/selftest/conctl.c6
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.c6
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.h6
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.c6
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.h6
-rw-r--r--drivers/staging/lustre/lnet/selftest/framework.c6
-rw-r--r--drivers/staging/lustre/lnet/selftest/module.c6
-rw-r--r--drivers/staging/lustre/lnet/selftest/ping_test.c6
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.c6
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.h6
-rw-r--r--drivers/staging/lustre/lnet/selftest/selftest.h7
-rw-r--r--drivers/staging/lustre/lnet/selftest/timer.c6
-rw-r--r--drivers/staging/lustre/lnet/selftest/timer.h6
-rw-r--r--drivers/staging/lustre/lustre/Kconfig6
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_internal.h6
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_lib.c6
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_request.c12
-rw-r--r--drivers/staging/lustre/lustre/fid/lproc_fid.c6
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_cache.c6
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_internal.h6
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_request.c6
-rw-r--r--drivers/staging/lustre/lustre/fld/lproc_fld.c6
-rw-r--r--drivers/staging/lustre/lustre/include/cl_object.h16
-rw-r--r--drivers/staging/lustre/lustre/include/interval_tree.h6
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_compat25.h6
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_lite.h6
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h6
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_user.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lprocfs_status.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lu_object.h8
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_idl.h50
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_user.h21
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_acl.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_cfg.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_debug.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_disk.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm.h24
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_eacl.h17
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_export.h19
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fid.h8
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fld.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_ha.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_handles.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_import.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_intent.h36
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lib.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lite.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_log.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_mdc.h9
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_mds.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_net.h421
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_param.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_req_layout.h8
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_sec.h12
-rw-r--r--drivers/staging/lustre/lustre/include/obd.h17
-rw-r--r--drivers/staging/lustre/lustre/include/obd_cksum.h6
-rw-r--r--drivers/staging/lustre/lustre/include/obd_class.h16
-rw-r--r--drivers/staging/lustre/lustre/include/obd_support.h9
-rw-r--r--drivers/staging/lustre/lustre/ldlm/interval_tree.c6
-rw-r--r--drivers/staging/lustre/lustre/ldlm/l_lock.c6
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_extent.c6
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_flock.c15
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c6
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_internal.h6
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lib.c9
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lock.c22
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c19
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_plain.c6
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_pool.c6
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_request.c12
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_resource.c8
-rw-r--r--drivers/staging/lustre/lustre/llite/Makefile6
-rw-r--r--drivers/staging/lustre/lustre/llite/dcache.c49
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c55
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c97
-rw-r--r--drivers/staging/lustre/lustre/llite/glimpse.c6
-rw-r--r--drivers/staging/lustre/lustre/llite/lcommon_cl.c6
-rw-r--r--drivers/staging/lustre/lustre/llite/lcommon_misc.c14
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_close.c6
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h132
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c125
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_mmap.c18
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_nfs.c24
-rw-r--r--drivers/staging/lustre/lustre/llite/lproc_llite.c18
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c34
-rw-r--r--drivers/staging/lustre/lustre/llite/rw.c151
-rw-r--r--drivers/staging/lustre/lustre/llite/rw26.c23
-rw-r--r--drivers/staging/lustre/lustre/llite/statahead.c28
-rw-r--r--drivers/staging/lustre/lustre/llite/super25.c25
-rw-r--r--drivers/staging/lustre/lustre/llite/symlink.c6
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_dev.c16
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_internal.h6
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c15
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_lock.c6
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_object.c6
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_page.c6
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_req.c12
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr.c103
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr_cache.c16
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_fld.c6
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_intent.c32
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_internal.h6
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_obd.c39
-rw-r--r--drivers/staging/lustre/lustre/lmv/lproc_lmv.c6
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_cl_internal.h6
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_dev.c6
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_ea.c6
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_internal.h6
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_io.c6
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_lock.c6
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_merge.c6
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_obd.c22
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_object.c10
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_offset.c8
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pack.c6
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_page.c6
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pool.c8
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_request.c6
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_dev.c6
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_io.c6
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_lock.c6
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_object.c6
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_page.c6
-rw-r--r--drivers/staging/lustre/lustre/lov/lproc_lov.c6
-rw-r--r--drivers/staging/lustre/lustre/mdc/lproc_mdc.c6
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_internal.h6
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_lib.c18
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_locks.c120
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_reint.c8
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_request.c90
-rw-r--r--drivers/staging/lustre/lustre/mgc/lproc_mgc.c6
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_internal.h6
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_request.c14
-rw-r--r--drivers/staging/lustre/lustre/obdclass/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_internal.h6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_io.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_lock.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_object.c12
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_page.c52
-rw-r--r--drivers/staging/lustre/lustre/obdclass/class_obd.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/debug.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/genops.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/kernelcomm.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-module.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog.c12
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_cat.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_internal.h6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_obd.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_swab.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_status.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_object.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_ref.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lustre_handles.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lustre_peer.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_config.c12
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_mount.c8
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obdo.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/statfs_pack.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/uuid.c6
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c6
-rw-r--r--drivers/staging/lustre/lustre/osc/lproc_osc.c6
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cache.c27
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cl_internal.h13
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_dev.c6
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_internal.h6
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_io.c18
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_lock.c27
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_object.c6
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_page.c36
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_request.c24
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/client.c167
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/connection.c6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/events.c36
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/import.c12
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/layout.c18
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/llog_client.c6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/llog_net.c6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c10
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/niobuf.c26
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/nrs.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pack_generic.c19
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pers.c6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pinger.c9
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h49
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c10
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/recover.c6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec.c22
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c8
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_config.c10
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_gc.c6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_null.c13
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_plain.c24
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/service.c6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/wiretest.c54
-rw-r--r--drivers/staging/lustre/sysfs-fs-lustre8
-rw-r--r--drivers/staging/media/Kconfig16
-rw-r--r--drivers/staging/media/Makefile8
-rw-r--r--drivers/staging/media/cec/Kconfig15
-rw-r--r--drivers/staging/media/cec/Makefile5
-rw-r--r--drivers/staging/media/cec/TODO32
-rw-r--r--drivers/staging/media/cec/cec-adap.c1664
-rw-r--r--drivers/staging/media/cec/cec-api.c579
-rw-r--r--drivers/staging/media/cec/cec-core.c412
-rw-r--r--drivers/staging/media/cec/cec-priv.h56
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c14
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.h2
-rw-r--r--drivers/staging/media/lirc/lirc_parallel.c8
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c12
-rw-r--r--drivers/staging/media/omap4iss/iss_video.h1
-rw-r--r--drivers/staging/media/pulse8-cec/Kconfig10
-rw-r--r--drivers/staging/media/pulse8-cec/Makefile1
-rw-r--r--drivers/staging/media/pulse8-cec/TODO52
-rw-r--r--drivers/staging/media/pulse8-cec/pulse8-cec.c505
-rw-r--r--drivers/staging/media/s5p-cec/Kconfig9
-rw-r--r--drivers/staging/media/s5p-cec/Makefile2
-rw-r--r--drivers/staging/media/s5p-cec/TODO7
-rw-r--r--drivers/staging/media/s5p-cec/exynos_hdmi_cec.h38
-rw-r--r--drivers/staging/media/s5p-cec/exynos_hdmi_cecctrl.c209
-rw-r--r--drivers/staging/media/s5p-cec/regs-cec.h96
-rw-r--r--drivers/staging/media/s5p-cec/s5p_cec.c294
-rw-r--r--drivers/staging/media/s5p-cec/s5p_cec.h76
-rw-r--r--drivers/staging/media/tw686x-kh/tw686x-kh-video.c12
-rw-r--r--drivers/staging/media/tw686x-kh/tw686x-kh.h1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211.h4
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c54
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c34
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c6
-rw-r--r--drivers/staging/rtl8192u/r8180_93cx6.c30
-rw-r--r--drivers/staging/rtl8192u/r8180_93cx6.h2
-rw-r--r--drivers/staging/rtl8192u/r8192U.h11
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c198
-rw-r--r--drivers/staging/rtl8192u/r8192U_wx.c80
-rw-r--r--drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c11
-rw-r--r--drivers/staging/unisys/visorbus/iovmcall_gnuc.h4
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_main.c12
-rw-r--r--drivers/staging/unisys/visorbus/visorchipset.c2
-rw-r--r--drivers/staging/unisys/visorhba/visorhba_main.c394
-rw-r--r--drivers/staging/unisys/visorinput/visorinput.c2
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c73
-rw-r--r--drivers/staging/wilc1000/Makefile1
-rw-r--r--drivers/staging/wilc1000/TODO5
-rw-r--r--drivers/staging/wilc1000/host_interface.c477
-rw-r--r--drivers/staging/wilc1000/host_interface.h4
-rw-r--r--drivers/staging/wilc1000/linux_wlan.c37
-rw-r--r--drivers/staging/wilc1000/wilc_sdio.c2
-rw-r--r--drivers/staging/wilc1000/wilc_spi.c2
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.c19
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_netdevice.h10
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.c34
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c5
-rw-r--r--drivers/target/iscsi/cxgbit/Kconfig2
-rw-r--r--drivers/target/iscsi/cxgbit/Makefile1
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit.h2
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_main.c2
-rw-r--r--drivers/target/target_core_file.c2
-rw-r--r--drivers/target/target_core_iblock.c41
-rw-r--r--drivers/target/target_core_pscsi.c89
-rw-r--r--drivers/target/target_core_transport.c16
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c2
-rw-r--r--drivers/thermal/clock_cooling.c1
-rw-r--r--drivers/thermal/cpu_cooling.c45
-rw-r--r--drivers/thermal/fair_share.c2
-rw-r--r--drivers/thermal/gov_bang_bang.c2
-rw-r--r--drivers/thermal/imx_thermal.c4
-rw-r--r--drivers/thermal/int340x_thermal/int3406_thermal.c1
-rw-r--r--drivers/thermal/intel_pch_thermal.c60
-rw-r--r--drivers/thermal/intel_powerclamp.c11
-rw-r--r--drivers/thermal/intel_soc_dts_thermal.c4
-rw-r--r--drivers/thermal/power_allocator.c2
-rw-r--r--drivers/thermal/rcar_thermal.c1
-rw-r--r--drivers/thermal/step_wise.c2
-rw-r--r--drivers/thermal/thermal_core.c10
-rw-r--r--drivers/thermal/thermal_hwmon.c2
-rw-r--r--drivers/thunderbolt/nhi.c6
-rw-r--r--drivers/thunderbolt/switch.c4
-rw-r--r--drivers/tty/cyclades.c4
-rw-r--r--drivers/tty/hvc/hvc_console.h1
-rw-r--r--drivers/tty/hvc/hvc_irq.c9
-rw-r--r--drivers/tty/hvc/hvc_opal.c11
-rw-r--r--drivers/tty/ipwireless/tty.c11
-rw-r--r--drivers/tty/metag_da.c4
-rw-r--r--drivers/tty/mips_ejtag_fdc.c4
-rw-r--r--drivers/tty/mxser.c1
-rw-r--r--drivers/tty/serial/8250/8250.h18
-rw-r--r--drivers/tty/serial/8250/8250_core.c10
-rw-r--r--drivers/tty/serial/8250/8250_dma.c1
-rw-r--r--drivers/tty/serial/8250/8250_early.c1
-rw-r--r--drivers/tty/serial/8250/8250_fintek.c36
-rw-r--r--drivers/tty/serial/8250/8250_ingenic.c2
-rw-r--r--drivers/tty/serial/8250/8250_mid.c24
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c2
-rw-r--r--drivers/tty/serial/8250/8250_omap.c4
-rw-r--r--drivers/tty/serial/8250/8250_pci.c28
-rw-r--r--drivers/tty/serial/8250/8250_port.c64
-rw-r--r--drivers/tty/serial/8250/8250_uniphier.c2
-rw-r--r--drivers/tty/serial/8250/Kconfig5
-rw-r--r--drivers/tty/serial/Kconfig3
-rw-r--r--drivers/tty/serial/amba-pl011.c8
-rw-r--r--drivers/tty/serial/ar933x_uart.c4
-rw-r--r--drivers/tty/serial/atmel_serial.c129
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c8
-rw-r--r--drivers/tty/serial/clps711x.c2
-rw-r--r--drivers/tty/serial/fsl_lpuart.c8
-rw-r--r--drivers/tty/serial/m32r_sio.c23
-rw-r--r--drivers/tty/serial/max310x.c176
-rw-r--r--drivers/tty/serial/mps2-uart.c29
-rw-r--r--drivers/tty/serial/msm_serial.c195
-rw-r--r--drivers/tty/serial/pic32_uart.c1
-rw-r--r--drivers/tty/serial/pmac_zilog.c2
-rw-r--r--drivers/tty/serial/pxa.c30
-rw-r--r--drivers/tty/serial/samsung.c20
-rw-r--r--drivers/tty/serial/samsung.h38
-rw-r--r--drivers/tty/serial/serial-tegra.c7
-rw-r--r--drivers/tty/serial/serial_core.c8
-rw-r--r--drivers/tty/serial/serial_mctrl_gpio.c36
-rw-r--r--drivers/tty/serial/serial_mctrl_gpio.h15
-rw-r--r--drivers/tty/serial/sh-sci.c196
-rw-r--r--drivers/tty/serial/sh-sci.h25
-rw-r--r--drivers/tty/serial/sirfsoc_uart.h4
-rw-r--r--drivers/tty/serial/sunhv.c6
-rw-r--r--drivers/tty/serial/vt8500_serial.c30
-rw-r--r--drivers/tty/serial/xilinx_uartps.c30
-rw-r--r--drivers/tty/sysrq.c1
-rw-r--r--drivers/tty/vt/consolemap.c13
-rw-r--r--drivers/tty/vt/keyboard.c16
-rw-r--r--drivers/tty/vt/vt.c431
-rw-r--r--drivers/tty/vt/vt_ioctl.c8
-rw-r--r--drivers/usb/chipidea/Kconfig5
-rw-r--r--drivers/usb/class/cdc-acm.c121
-rw-r--r--drivers/usb/class/cdc-wdm.c30
-rw-r--r--drivers/usb/class/usbtmc.c3
-rw-r--r--drivers/usb/common/common.c26
-rw-r--r--drivers/usb/core/devio.c4
-rw-r--r--drivers/usb/core/message.c153
-rw-r--r--drivers/usb/dwc2/Kconfig1
-rw-r--r--drivers/usb/dwc2/core.h9
-rw-r--r--drivers/usb/dwc2/gadget.c574
-rw-r--r--drivers/usb/dwc2/hcd_queue.c3
-rw-r--r--drivers/usb/dwc2/hw.h14
-rw-r--r--drivers/usb/dwc2/platform.c22
-rw-r--r--drivers/usb/dwc3/core.c389
-rw-r--r--drivers/usb/dwc3/core.h59
-rw-r--r--drivers/usb/dwc3/debug.h140
-rw-r--r--drivers/usb/dwc3/debugfs.c191
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c1
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c53
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c170
-rw-r--r--drivers/usb/dwc3/ep0.c26
-rw-r--r--drivers/usb/dwc3/gadget.c869
-rw-r--r--drivers/usb/dwc3/gadget.h4
-rw-r--r--drivers/usb/dwc3/host.c61
-rw-r--r--drivers/usb/dwc3/io.h7
-rw-r--r--drivers/usb/dwc3/trace.h96
-rw-r--r--drivers/usb/early/ehci-dbgp.c4
-rw-r--r--drivers/usb/gadget/Kconfig2
-rw-r--r--drivers/usb/gadget/composite.c6
-rw-r--r--drivers/usb/gadget/config.c2
-rw-r--r--drivers/usb/gadget/configfs.c2
-rw-r--r--drivers/usb/gadget/function/f_eem.c12
-rw-r--r--drivers/usb/gadget/function/f_fs.c192
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c22
-rw-r--r--drivers/usb/gadget/function/f_rndis.c3
-rw-r--r--drivers/usb/gadget/function/rndis.c6
-rw-r--r--drivers/usb/gadget/function/u_ether.c3
-rw-r--r--drivers/usb/gadget/function/u_serial.c10
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.c2
-rw-r--r--drivers/usb/gadget/function/uvc_queue.c2
-rw-r--r--drivers/usb/gadget/legacy/g_ffs.c15
-rw-r--r--drivers/usb/gadget/legacy/inode.c2
-rw-r--r--drivers/usb/gadget/udc/Kconfig4
-rw-r--r--drivers/usb/gadget/udc/Makefile5
-rw-r--r--drivers/usb/gadget/udc/amd5536udc.c9
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_cmd.c3
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_ep.c6
-rw-r--r--drivers/usb/gadget/udc/core.c1526
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c5
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.c7
-rw-r--r--drivers/usb/gadget/udc/m66592-udc.c24
-rw-r--r--drivers/usb/gadget/udc/mv_u3d_core.c23
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c9
-rw-r--r--drivers/usb/gadget/udc/net2272.c4
-rw-r--r--drivers/usb/gadget/udc/net2280.c51
-rw-r--r--drivers/usb/gadget/udc/net2280.h1
-rw-r--r--drivers/usb/gadget/udc/pch_udc.c20
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.c9
-rw-r--r--drivers/usb/gadget/udc/r8a66597-udc.c24
-rw-r--r--drivers/usb/gadget/udc/trace.c18
-rw-r--r--drivers/usb/gadget/udc/trace.h298
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c3
-rw-r--r--drivers/usb/host/Kconfig2
-rw-r--r--drivers/usb/host/ehci-platform.c37
-rw-r--r--drivers/usb/host/max3421-hcd.c2
-rw-r--r--drivers/usb/host/ohci-hcd.c1
-rw-r--r--drivers/usb/host/ohci-platform.c43
-rw-r--r--drivers/usb/host/xhci-mem.c74
-rw-r--r--drivers/usb/host/xhci-pci.c2
-rw-r--r--drivers/usb/host/xhci-plat.c8
-rw-r--r--drivers/usb/host/xhci-ring.c459
-rw-r--r--drivers/usb/host/xhci.c7
-rw-r--r--drivers/usb/host/xhci.h10
-rw-r--r--drivers/usb/image/microtek.h6
-rw-r--r--drivers/usb/misc/Kconfig20
-rw-r--r--drivers/usb/misc/Makefile1
-rw-r--r--drivers/usb/misc/chaoskey.c21
-rw-r--r--drivers/usb/misc/ftdi-elan.c10
-rw-r--r--drivers/usb/misc/legousbtower.c35
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c28
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_con.c87
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_init.h2
-rw-r--r--drivers/usb/misc/usb3503.c25
-rw-r--r--drivers/usb/musb/Kconfig2
-rw-r--r--drivers/usb/musb/Makefile5
-rw-r--r--drivers/usb/musb/cppi_dma.c51
-rw-r--r--drivers/usb/musb/cppi_dma.h31
-rw-r--r--drivers/usb/musb/musb_core.c87
-rw-r--r--drivers/usb/musb/musb_cppi41.c47
-rw-r--r--drivers/usb/musb/musb_debug.h2
-rw-r--r--drivers/usb/musb/musb_dsps.c112
-rw-r--r--drivers/usb/musb/musb_gadget.c122
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c22
-rw-r--r--drivers/usb/musb/musb_host.c133
-rw-r--r--drivers/usb/musb/musb_trace.c33
-rw-r--r--drivers/usb/musb/musb_trace.h371
-rw-r--r--drivers/usb/musb/musb_virthub.c31
-rw-r--r--drivers/usb/musb/musbhsdma.c10
-rw-r--r--drivers/usb/musb/sunxi.c74
-rw-r--r--drivers/usb/phy/Kconfig11
-rw-r--r--drivers/usb/phy/phy-am335x.c2
-rw-r--r--drivers/usb/phy/phy-generic.c8
-rw-r--r--drivers/usb/phy/phy-msm-usb.c178
-rw-r--r--drivers/usb/phy/phy-omap-otg.c4
-rw-r--r--drivers/usb/renesas_usbhs/common.c2
-rw-r--r--drivers/usb/renesas_usbhs/rcar3.c2
-rw-r--r--drivers/usb/serial/cp210x.c5
-rw-r--r--drivers/usb/serial/generic.c18
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c271
-rw-r--r--drivers/usb/storage/usb.c6
-rw-r--r--drivers/usb/usbip/usbip_common.h2
-rw-r--r--drivers/usb/usbip/vudc_rx.c2
-rw-r--r--drivers/usb/usbip/vudc_sysfs.c2
-rw-r--r--drivers/vfio/pci/vfio_pci.c88
-rw-r--r--drivers/vfio/pci/vfio_pci_private.h8
-rw-r--r--drivers/vfio/platform/vfio_amba.c1
-rw-r--r--drivers/vfio/platform/vfio_platform.c5
-rw-r--r--drivers/vfio/platform/vfio_platform_common.c198
-rw-r--r--drivers/vfio/platform/vfio_platform_private.h9
-rw-r--r--drivers/vfio/vfio.c2
-rw-r--r--drivers/vhost/Kconfig18
-rw-r--r--drivers/vhost/Kconfig.vringh5
-rw-r--r--drivers/vhost/Makefile4
-rw-r--r--drivers/vhost/net.c147
-rw-r--r--drivers/vhost/test.c8
-rw-r--r--drivers/vhost/vhost.c927
-rw-r--r--drivers/vhost/vhost.h64
-rw-r--r--drivers/vhost/vsock.c723
-rw-r--r--drivers/video/backlight/lp855x_bl.c29
-rw-r--r--drivers/video/console/dummycon.c3
-rw-r--r--drivers/video/console/fbcon.c53
-rw-r--r--drivers/video/console/mdacon.c45
-rw-r--r--drivers/video/console/newport_con.c42
-rw-r--r--drivers/video/console/sticon.c29
-rw-r--r--drivers/video/console/vgacon.c29
-rw-r--r--drivers/video/fbdev/bfin_adv7393fb.c2
-rw-r--r--drivers/video/fbdev/bfin_adv7393fb.h2
-rw-r--r--drivers/video/fbdev/clps711x-fb.c4
-rw-r--r--drivers/video/fbdev/core/fbmon.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c10
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c60
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c44
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c46
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c54
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c58
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c47
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c83
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c45
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c46
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/apply.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/core.c4
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dispc.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dispc_coefs.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/display.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dpi.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dsi.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss-of.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss.h11
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss_features.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi.h3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/manager.c3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/output.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/overlay.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/pll.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/rfbi.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/sdi.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/venc.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/video-pll.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-main.c14
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb.h5
-rw-r--r--drivers/video/logo/logo.c4
-rw-r--r--drivers/virtio/virtio_balloon.c52
-rw-r--r--drivers/virtio/virtio_ring.c18
-rw-r--r--drivers/w1/slaves/w1_ds2406.c14
-rw-r--r--drivers/w1/slaves/w1_ds2408.c14
-rw-r--r--drivers/w1/slaves/w1_ds2413.c14
-rw-r--r--drivers/w1/slaves/w1_ds2423.c14
-rw-r--r--drivers/w1/slaves/w1_ds2431.c14
-rw-r--r--drivers/w1/slaves/w1_ds2433.c14
-rw-r--r--drivers/w1/slaves/w1_ds2760.c43
-rw-r--r--drivers/w1/slaves/w1_ds2780.c39
-rw-r--r--drivers/w1/slaves/w1_ds2781.c40
-rw-r--r--drivers/w1/slaves/w1_ds28e04.c14
-rw-r--r--drivers/w1/w1_family.h12
-rw-r--r--drivers/watchdog/Kconfig33
-rw-r--r--drivers/watchdog/Makefile3
-rw-r--r--drivers/watchdog/aspeed_wdt.c212
-rw-r--r--drivers/watchdog/bcm2835_wdt.c11
-rw-r--r--drivers/watchdog/da9063_wdt.c2
-rw-r--r--drivers/watchdog/f71808e_wdt.c28
-rw-r--r--drivers/watchdog/gpio_wdt.c2
-rw-r--r--drivers/watchdog/iTCO_wdt.c2
-rw-r--r--drivers/watchdog/max77620_wdt.c227
-rw-r--r--drivers/watchdog/meson_gxbb_wdt.c270
-rw-r--r--drivers/watchdog/nv_tco.c2
-rw-r--r--drivers/watchdog/pcwd.c14
-rw-r--r--drivers/watchdog/pic32-dmt.c5
-rw-r--r--drivers/watchdog/pic32-wdt.c9
-rw-r--r--drivers/watchdog/qcom-wdt.c69
-rw-r--r--drivers/watchdog/sbsa_gwdt.c16
-rw-r--r--drivers/watchdog/sirfsoc_wdt.c15
-rw-r--r--drivers/watchdog/softdog.c92
-rw-r--r--drivers/watchdog/tangox_wdt.c4
-rw-r--r--drivers/watchdog/watchdog_core.c39
-rw-r--r--drivers/watchdog/watchdog_dev.c61
-rw-r--r--drivers/watchdog/ziirave_wdt.c2
-rw-r--r--drivers/xen/Kconfig2
-rw-r--r--drivers/xen/Makefile1
-rw-r--r--drivers/xen/arm-device.c196
-rw-r--r--drivers/xen/efi.c173
-rw-r--r--drivers/xen/events/events_base.c13
-rw-r--r--drivers/xen/events/events_fifo.c2
-rw-r--r--drivers/xen/evtchn.c43
-rw-r--r--drivers/xen/gntalloc.c2
-rw-r--r--drivers/xen/gntdev.c2
-rw-r--r--drivers/xen/privcmd.c2
-rw-r--r--drivers/xen/swiotlb-xen.c14
-rw-r--r--drivers/xen/time.c50
-rw-r--r--drivers/xen/xen-pciback/conf_space.c22
-rw-r--r--drivers/xen/xen-pciback/conf_space_header.c57
-rw-r--r--drivers/xen/xen-pciback/pciback.h1
-rw-r--r--drivers/xen/xen-pciback/pciback_ops.c2
-rw-r--r--drivers/xen/xen-pciback/xenbus.c10
-rw-r--r--drivers/xen/xen-selfballoon.c4
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c15
-rw-r--r--drivers/xen/xlate_mmu.c77
4533 files changed, 307002 insertions, 88688 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index 0b6f3d601..53abb4a5f 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -12,7 +12,7 @@ obj-$(CONFIG_GENERIC_PHY) += phy/
# GPIO must come after pinctrl as gpios may need to mux pins etc
obj-$(CONFIG_PINCTRL) += pinctrl/
-obj-y += gpio/
+obj-$(CONFIG_GPIOLIB) += gpio/
obj-y += pwm/
obj-$(CONFIG_PCI) += pci/
obj-$(CONFIG_PARISC) += parisc/
@@ -78,7 +78,7 @@ obj-$(CONFIG_TARGET_CORE) += target/
obj-$(CONFIG_MTD) += mtd/
obj-$(CONFIG_SPI) += spi/
obj-$(CONFIG_SPMI) += spmi/
-obj-y += hsi/
+obj-$(CONFIG_HSI) += hsi/
obj-y += net/
obj-$(CONFIG_ATM) += atm/
obj-$(CONFIG_FUSION) += message/
@@ -122,13 +122,12 @@ obj-$(CONFIG_CPU_FREQ) += cpufreq/
obj-$(CONFIG_CPU_IDLE) += cpuidle/
obj-y += mmc/
obj-$(CONFIG_MEMSTICK) += memstick/
-obj-y += leds/
+obj-$(CONFIG_NEW_LEDS) += leds/
obj-$(CONFIG_INFINIBAND) += infiniband/
obj-$(CONFIG_SGI_SN) += sn/
obj-y += firmware/
obj-$(CONFIG_CRYPTO) += crypto/
obj-$(CONFIG_SUPERH) += sh/
-obj-$(CONFIG_ARCH_SHMOBILE) += sh/
ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
obj-y += clocksource/
endif
@@ -139,6 +138,7 @@ obj-$(CONFIG_OF) += of/
obj-$(CONFIG_SSB) += ssb/
obj-$(CONFIG_BCMA) += bcma/
obj-$(CONFIG_VHOST_RING) += vhost/
+obj-$(CONFIG_VHOST) += vhost/
obj-$(CONFIG_VLYNQ) += vlynq/
obj-$(CONFIG_STAGING) += staging/
obj-y += platform/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index b7e2e7763..445ce2847 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -213,10 +213,17 @@ config ACPI_CPU_FREQ_PSS
bool
select THERMAL
+config ACPI_PROCESSOR_CSTATE
+ def_bool y
+ depends on IA64 || X86
+
config ACPI_PROCESSOR_IDLE
bool
select CPU_IDLE
+config ACPI_MCFG
+ bool
+
config ACPI_CPPC_LIB
bool
depends on ACPI_PROCESSOR
@@ -234,7 +241,7 @@ config ACPI_CPPC_LIB
config ACPI_PROCESSOR
tristate "Processor"
depends on X86 || IA64 || ARM64
- select ACPI_PROCESSOR_IDLE if X86 || IA64
+ select ACPI_PROCESSOR_IDLE
select ACPI_CPU_FREQ_PSS if X86 || IA64
default y
help
@@ -291,8 +298,8 @@ config ACPI_THERMAL
config ACPI_NUMA
bool "NUMA support"
depends on NUMA
- depends on (X86 || IA64)
- default y if IA64_GENERIC || IA64_SGI_SN2
+ depends on (X86 || IA64 || ARM64)
+ default y if IA64_GENERIC || IA64_SGI_SN2 || ARM64
config ACPI_CUSTOM_DSDT_FILE
string "Custom DSDT Table file to include"
@@ -311,9 +318,12 @@ config ACPI_CUSTOM_DSDT
bool
default ACPI_CUSTOM_DSDT_FILE != ""
+config ARCH_HAS_ACPI_TABLE_UPGRADE
+ def_bool n
+
config ACPI_TABLE_UPGRADE
bool "Allow upgrading ACPI tables via initrd"
- depends on BLK_DEV_INITRD && X86
+ depends on BLK_DEV_INITRD && ARCH_HAS_ACPI_TABLE_UPGRADE
default y
help
This option provides functionality to upgrade arbitrary ACPI tables
@@ -447,34 +457,10 @@ config ACPI_REDUCED_HARDWARE_ONLY
If you are unsure what to do, do not enable this option.
-config ACPI_NFIT
- tristate "ACPI NVDIMM Firmware Interface Table (NFIT)"
- depends on PHYS_ADDR_T_64BIT
- depends on BLK_DEV
- depends on ARCH_HAS_MMIO_FLUSH
- select LIBNVDIMM
- help
- Infrastructure to probe ACPI 6 compliant platforms for
- NVDIMMs (NFIT) and register a libnvdimm device tree. In
- addition to storage devices this also enables libnvdimm to pass
- ACPI._DSM messages for platform/dimm configuration.
-
- To compile this driver as a module, choose M here:
- the module will be called nfit.
-
-config ACPI_NFIT_DEBUG
- bool "NFIT DSM debug"
- depends on ACPI_NFIT
- depends on DYNAMIC_DEBUG
- default n
- help
- Enabling this option causes the nfit driver to dump the
- input and output buffers of _DSM operations on the ACPI0012
- device and its children. This can be very verbose, so leave
- it disabled unless you are debugging a hardware / firmware
- issue.
+source "drivers/acpi/nfit/Kconfig"
source "drivers/acpi/apei/Kconfig"
+source "drivers/acpi/dptf/Kconfig"
config ACPI_EXTLOG
tristate "Extended Error Log support"
@@ -519,6 +505,20 @@ config XPOWER_PMIC_OPREGION
help
This config adds ACPI operation region support for XPower AXP288 PMIC.
+config BXT_WC_PMIC_OPREGION
+ bool "ACPI operation region support for BXT WhiskeyCove PMIC"
+ depends on INTEL_SOC_PMIC
+ help
+ This config adds ACPI operation region support for BXT WhiskeyCove PMIC.
+
endif
+config ACPI_CONFIGFS
+ tristate "ACPI configfs support"
+ select CONFIGFS_FS
+ help
+ Select this option to enable support for ACPI configuration from
+ userspace. The configurable ACPI groups will be visible under
+ /config/acpi, assuming configfs is mounted under /config.
+
endif # ACPI
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 251ce85a6..5ae9d85c5 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -40,11 +40,11 @@ acpi-$(CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC) += processor_pdc.o
acpi-y += ec.o
acpi-$(CONFIG_ACPI_DOCK) += dock.o
acpi-y += pci_root.o pci_link.o pci_irq.o
+obj-$(CONFIG_ACPI_MCFG) += pci_mcfg.o
acpi-y += acpi_lpss.o acpi_apd.o
acpi-y += acpi_platform.o
acpi-y += acpi_pnp.o
acpi-$(CONFIG_ARM_AMBA) += acpi_amba.o
-acpi-y += int340x_thermal.o
acpi-y += power.o
acpi-y += event.o
acpi-$(CONFIG_ACPI_REDUCED_HARDWARE_ONLY) += evged.o
@@ -70,7 +70,7 @@ obj-$(CONFIG_ACPI_PCI_SLOT) += pci_slot.o
obj-$(CONFIG_ACPI_PROCESSOR) += processor.o
obj-$(CONFIG_ACPI) += container.o
obj-$(CONFIG_ACPI_THERMAL) += thermal.o
-obj-$(CONFIG_ACPI_NFIT) += nfit.o
+obj-$(CONFIG_ACPI_NFIT) += nfit/
obj-$(CONFIG_ACPI) += acpi_memhotplug.o
obj-$(CONFIG_ACPI_HOTPLUG_IOAPIC) += ioapic.o
obj-$(CONFIG_ACPI_BATTERY) += battery.o
@@ -99,5 +99,9 @@ obj-$(CONFIG_ACPI_EXTLOG) += acpi_extlog.o
obj-$(CONFIG_PMIC_OPREGION) += pmic/intel_pmic.o
obj-$(CONFIG_CRC_PMIC_OPREGION) += pmic/intel_pmic_crc.o
obj-$(CONFIG_XPOWER_PMIC_OPREGION) += pmic/intel_pmic_xpower.o
+obj-$(CONFIG_BXT_WC_PMIC_OPREGION) += pmic/intel_pmic_bxtwc.o
+
+obj-$(CONFIG_ACPI_CONFIGFS) += acpi_configfs.o
video-objs += acpi_video.o video_detect.o
+obj-y += dptf/
diff --git a/drivers/acpi/acpi_cmos_rtc.c b/drivers/acpi/acpi_cmos_rtc.c
index 81dc75033..0980a1339 100644
--- a/drivers/acpi/acpi_cmos_rtc.c
+++ b/drivers/acpi/acpi_cmos_rtc.c
@@ -14,7 +14,7 @@
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <asm-generic/rtc.h>
+#include <linux/mc146818rtc.h>
#include "internal.h"
diff --git a/drivers/acpi/acpi_configfs.c b/drivers/acpi/acpi_configfs.c
new file mode 100644
index 000000000..146a77fb7
--- /dev/null
+++ b/drivers/acpi/acpi_configfs.c
@@ -0,0 +1,267 @@
+/*
+ * ACPI configfs support
+ *
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) "ACPI configfs: " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/configfs.h>
+#include <linux/acpi.h>
+
+static struct config_group *acpi_table_group;
+
+struct acpi_table {
+ struct config_item cfg;
+ struct acpi_table_header *header;
+};
+
+static ssize_t acpi_table_aml_write(struct config_item *cfg,
+ const void *data, size_t size)
+{
+ const struct acpi_table_header *header = data;
+ struct acpi_table *table;
+ int ret;
+
+ table = container_of(cfg, struct acpi_table, cfg);
+
+ if (table->header) {
+ pr_err("table already loaded\n");
+ return -EBUSY;
+ }
+
+ if (header->length != size) {
+ pr_err("invalid table length\n");
+ return -EINVAL;
+ }
+
+ if (memcmp(header->signature, ACPI_SIG_SSDT, 4)) {
+ pr_err("invalid table signature\n");
+ return -EINVAL;
+ }
+
+ table = container_of(cfg, struct acpi_table, cfg);
+
+ table->header = kmemdup(header, header->length, GFP_KERNEL);
+ if (!table->header)
+ return -ENOMEM;
+
+ ret = acpi_load_table(table->header);
+ if (ret) {
+ kfree(table->header);
+ table->header = NULL;
+ }
+
+ return ret;
+}
+
+static inline struct acpi_table_header *get_header(struct config_item *cfg)
+{
+ struct acpi_table *table = container_of(cfg, struct acpi_table, cfg);
+
+ if (!table->header)
+ pr_err("table not loaded\n");
+
+ return table->header;
+}
+
+static ssize_t acpi_table_aml_read(struct config_item *cfg,
+ void *data, size_t size)
+{
+ struct acpi_table_header *h = get_header(cfg);
+
+ if (!h)
+ return -EINVAL;
+
+ if (data)
+ memcpy(data, h, h->length);
+
+ return h->length;
+}
+
+#define MAX_ACPI_TABLE_SIZE (128 * 1024)
+
+CONFIGFS_BIN_ATTR(acpi_table_, aml, NULL, MAX_ACPI_TABLE_SIZE);
+
+struct configfs_bin_attribute *acpi_table_bin_attrs[] = {
+ &acpi_table_attr_aml,
+ NULL,
+};
+
+ssize_t acpi_table_signature_show(struct config_item *cfg, char *str)
+{
+ struct acpi_table_header *h = get_header(cfg);
+
+ if (!h)
+ return -EINVAL;
+
+ return sprintf(str, "%.*s\n", ACPI_NAME_SIZE, h->signature);
+}
+
+ssize_t acpi_table_length_show(struct config_item *cfg, char *str)
+{
+ struct acpi_table_header *h = get_header(cfg);
+
+ if (!h)
+ return -EINVAL;
+
+ return sprintf(str, "%d\n", h->length);
+}
+
+ssize_t acpi_table_revision_show(struct config_item *cfg, char *str)
+{
+ struct acpi_table_header *h = get_header(cfg);
+
+ if (!h)
+ return -EINVAL;
+
+ return sprintf(str, "%d\n", h->revision);
+}
+
+ssize_t acpi_table_oem_id_show(struct config_item *cfg, char *str)
+{
+ struct acpi_table_header *h = get_header(cfg);
+
+ if (!h)
+ return -EINVAL;
+
+ return sprintf(str, "%.*s\n", ACPI_OEM_ID_SIZE, h->oem_id);
+}
+
+ssize_t acpi_table_oem_table_id_show(struct config_item *cfg, char *str)
+{
+ struct acpi_table_header *h = get_header(cfg);
+
+ if (!h)
+ return -EINVAL;
+
+ return sprintf(str, "%.*s\n", ACPI_OEM_TABLE_ID_SIZE, h->oem_table_id);
+}
+
+ssize_t acpi_table_oem_revision_show(struct config_item *cfg, char *str)
+{
+ struct acpi_table_header *h = get_header(cfg);
+
+ if (!h)
+ return -EINVAL;
+
+ return sprintf(str, "%d\n", h->oem_revision);
+}
+
+ssize_t acpi_table_asl_compiler_id_show(struct config_item *cfg, char *str)
+{
+ struct acpi_table_header *h = get_header(cfg);
+
+ if (!h)
+ return -EINVAL;
+
+ return sprintf(str, "%.*s\n", ACPI_NAME_SIZE, h->asl_compiler_id);
+}
+
+ssize_t acpi_table_asl_compiler_revision_show(struct config_item *cfg,
+ char *str)
+{
+ struct acpi_table_header *h = get_header(cfg);
+
+ if (!h)
+ return -EINVAL;
+
+ return sprintf(str, "%d\n", h->asl_compiler_revision);
+}
+
+CONFIGFS_ATTR_RO(acpi_table_, signature);
+CONFIGFS_ATTR_RO(acpi_table_, length);
+CONFIGFS_ATTR_RO(acpi_table_, revision);
+CONFIGFS_ATTR_RO(acpi_table_, oem_id);
+CONFIGFS_ATTR_RO(acpi_table_, oem_table_id);
+CONFIGFS_ATTR_RO(acpi_table_, oem_revision);
+CONFIGFS_ATTR_RO(acpi_table_, asl_compiler_id);
+CONFIGFS_ATTR_RO(acpi_table_, asl_compiler_revision);
+
+struct configfs_attribute *acpi_table_attrs[] = {
+ &acpi_table_attr_signature,
+ &acpi_table_attr_length,
+ &acpi_table_attr_revision,
+ &acpi_table_attr_oem_id,
+ &acpi_table_attr_oem_table_id,
+ &acpi_table_attr_oem_revision,
+ &acpi_table_attr_asl_compiler_id,
+ &acpi_table_attr_asl_compiler_revision,
+ NULL,
+};
+
+static struct config_item_type acpi_table_type = {
+ .ct_owner = THIS_MODULE,
+ .ct_bin_attrs = acpi_table_bin_attrs,
+ .ct_attrs = acpi_table_attrs,
+};
+
+static struct config_item *acpi_table_make_item(struct config_group *group,
+ const char *name)
+{
+ struct acpi_table *table;
+
+ table = kzalloc(sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return ERR_PTR(-ENOMEM);
+
+ config_item_init_type_name(&table->cfg, name, &acpi_table_type);
+ return &table->cfg;
+}
+
+struct configfs_group_operations acpi_table_group_ops = {
+ .make_item = acpi_table_make_item,
+};
+
+static struct config_item_type acpi_tables_type = {
+ .ct_owner = THIS_MODULE,
+ .ct_group_ops = &acpi_table_group_ops,
+};
+
+static struct config_item_type acpi_root_group_type = {
+ .ct_owner = THIS_MODULE,
+};
+
+static struct configfs_subsystem acpi_configfs = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "acpi",
+ .ci_type = &acpi_root_group_type,
+ },
+ },
+ .su_mutex = __MUTEX_INITIALIZER(acpi_configfs.su_mutex),
+};
+
+static int __init acpi_configfs_init(void)
+{
+ int ret;
+ struct config_group *root = &acpi_configfs.su_group;
+
+ config_group_init(root);
+
+ ret = configfs_register_subsystem(&acpi_configfs);
+ if (ret)
+ return ret;
+
+ acpi_table_group = configfs_register_default_group(root, "table",
+ &acpi_tables_type);
+ return PTR_ERR_OR_ZERO(acpi_table_group);
+}
+module_init(acpi_configfs_init);
+
+static void __exit acpi_configfs_exit(void)
+{
+ configfs_unregister_default_group(acpi_table_group);
+ configfs_unregister_subsystem(&acpi_configfs);
+}
+module_exit(acpi_configfs_exit);
+
+MODULE_AUTHOR("Octavian Purdila <octavian.purdila@intel.com>");
+MODULE_DESCRIPTION("ACPI configfs support");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/acpi/acpi_lpat.c b/drivers/acpi/acpi_lpat.c
index feb61c163..c1c4877ca 100644
--- a/drivers/acpi/acpi_lpat.c
+++ b/drivers/acpi/acpi_lpat.c
@@ -13,7 +13,7 @@
* GNU General Public License for more details.
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/acpi.h>
#include <acpi/acpi_lpat.h>
@@ -157,5 +157,3 @@ void acpi_lpat_free_conversion_table(struct acpi_lpat_conversion_table
}
}
EXPORT_SYMBOL_GPL(acpi_lpat_free_conversion_table);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 0872d5fec..357a0b8f8 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -29,6 +29,7 @@ ACPI_MODULE_NAME("acpi_lpss");
#ifdef CONFIG_X86_INTEL_LPSS
#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
#include <asm/iosf_mbi.h>
#include <asm/pmc_atom.h>
@@ -229,8 +230,8 @@ static const struct lpss_device_desc bsw_spi_dev_desc = {
#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
static const struct x86_cpu_id lpss_cpu_ids[] = {
- ICPU(0x37), /* Valleyview, Bay Trail */
- ICPU(0x4c), /* Braswell, Cherry Trail */
+ ICPU(INTEL_FAM6_ATOM_SILVERMONT1), /* Valleyview, Bay Trail */
+ ICPU(INTEL_FAM6_ATOM_AIRMONT), /* Braswell, Cherry Trail */
{}
};
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index c1d138e12..c5557d070 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -1246,6 +1246,9 @@ static int acpi_video_device_enumerate(struct acpi_video_bus *video)
union acpi_object *dod = NULL;
union acpi_object *obj;
+ if (!video->cap._DOD)
+ return AE_NOT_EXIST;
+
status = acpi_evaluate_object(video->device->handle, "_DOD", NULL, &buffer);
if (!ACPI_SUCCESS(status)) {
ACPI_EXCEPTION((AE_INFO, status, "Evaluating _DOD"));
diff --git a/drivers/acpi/apei/Makefile b/drivers/acpi/apei/Makefile
index 5d575a955..e50573de2 100644
--- a/drivers/acpi/apei/Makefile
+++ b/drivers/acpi/apei/Makefile
@@ -3,4 +3,4 @@ obj-$(CONFIG_ACPI_APEI_GHES) += ghes.o
obj-$(CONFIG_ACPI_APEI_EINJ) += einj.o
obj-$(CONFIG_ACPI_APEI_ERST_DEBUG) += erst-dbg.o
-apei-y := apei-base.o hest.o erst.o
+apei-y := apei-base.o hest.o erst.o bert.o
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
index 16129c78b..6e9f14c0a 100644
--- a/drivers/acpi/apei/apei-internal.h
+++ b/drivers/acpi/apei/apei-internal.h
@@ -1,6 +1,6 @@
/*
* apei-internal.h - ACPI Platform Error Interface internal
- * definations.
+ * definitions.
*/
#ifndef APEI_INTERNAL_H
diff --git a/drivers/acpi/apei/bert.c b/drivers/acpi/apei/bert.c
new file mode 100644
index 000000000..a05b5c0cf
--- /dev/null
+++ b/drivers/acpi/apei/bert.c
@@ -0,0 +1,150 @@
+/*
+ * APEI Boot Error Record Table (BERT) support
+ *
+ * Copyright 2011 Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * Under normal circumstances, when a hardware error occurs, the error
+ * handler receives control and processes the error. This gives OSPM a
+ * chance to process the error condition, report it, and optionally attempt
+ * recovery. In some cases, the system is unable to process an error.
+ * For example, system firmware or a management controller may choose to
+ * reset the system or the system might experience an uncontrolled crash
+ * or reset.The boot error source is used to report unhandled errors that
+ * occurred in a previous boot. This mechanism is described in the BERT
+ * table.
+ *
+ * For more information about BERT, please refer to ACPI Specification
+ * version 4.0, section 17.3.1
+ *
+ * This file is licensed under GPLv2.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/io.h>
+
+#include "apei-internal.h"
+
+#undef pr_fmt
+#define pr_fmt(fmt) "BERT: " fmt
+
+static int bert_disable;
+
+static void __init bert_print_all(struct acpi_bert_region *region,
+ unsigned int region_len)
+{
+ struct acpi_hest_generic_status *estatus =
+ (struct acpi_hest_generic_status *)region;
+ int remain = region_len;
+ u32 estatus_len;
+
+ if (!estatus->block_status)
+ return;
+
+ while (remain > sizeof(struct acpi_bert_region)) {
+ if (cper_estatus_check(estatus)) {
+ pr_err(FW_BUG "Invalid error record.\n");
+ return;
+ }
+
+ estatus_len = cper_estatus_len(estatus);
+ if (remain < estatus_len) {
+ pr_err(FW_BUG "Truncated status block (length: %u).\n",
+ estatus_len);
+ return;
+ }
+
+ pr_info_once("Error records from previous boot:\n");
+
+ cper_estatus_print(KERN_INFO HW_ERR, estatus);
+
+ /*
+ * Because the boot error source is "one-time polled" type,
+ * clear Block Status of current Generic Error Status Block,
+ * once it's printed.
+ */
+ estatus->block_status = 0;
+
+ estatus = (void *)estatus + estatus_len;
+ /* No more error records. */
+ if (!estatus->block_status)
+ return;
+
+ remain -= estatus_len;
+ }
+}
+
+static int __init setup_bert_disable(char *str)
+{
+ bert_disable = 1;
+
+ return 0;
+}
+__setup("bert_disable", setup_bert_disable);
+
+static int __init bert_check_table(struct acpi_table_bert *bert_tab)
+{
+ if (bert_tab->header.length < sizeof(struct acpi_table_bert) ||
+ bert_tab->region_length < sizeof(struct acpi_bert_region))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __init bert_init(void)
+{
+ struct acpi_bert_region *boot_error_region;
+ struct acpi_table_bert *bert_tab;
+ unsigned int region_len;
+ acpi_status status;
+ int rc = 0;
+
+ if (acpi_disabled)
+ return 0;
+
+ if (bert_disable) {
+ pr_info("Boot Error Record Table support is disabled.\n");
+ return 0;
+ }
+
+ status = acpi_get_table(ACPI_SIG_BERT, 0, (struct acpi_table_header **)&bert_tab);
+ if (status == AE_NOT_FOUND)
+ return 0;
+
+ if (ACPI_FAILURE(status)) {
+ pr_err("get table failed, %s.\n", acpi_format_exception(status));
+ return -EINVAL;
+ }
+
+ rc = bert_check_table(bert_tab);
+ if (rc) {
+ pr_err(FW_BUG "table invalid.\n");
+ return rc;
+ }
+
+ region_len = bert_tab->region_length;
+ if (!request_mem_region(bert_tab->address, region_len, "APEI BERT")) {
+ pr_err("Can't request iomem region <%016llx-%016llx>.\n",
+ (unsigned long long)bert_tab->address,
+ (unsigned long long)bert_tab->address + region_len - 1);
+ return -EIO;
+ }
+
+ boot_error_region = ioremap_cache(bert_tab->address, region_len);
+ if (boot_error_region) {
+ bert_print_all(boot_error_region, region_len);
+ iounmap(boot_error_region);
+ } else {
+ rc = -ENOMEM;
+ }
+
+ release_mem_region(bert_tab->address, region_len);
+
+ return rc;
+}
+
+late_initcall(bert_init);
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index 559c1173d..eebb7e39c 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -33,7 +33,8 @@
#include "apei-internal.h"
-#define EINJ_PFX "EINJ: "
+#undef pr_fmt
+#define pr_fmt(fmt) "EINJ: " fmt
#define SPIN_UNIT 100 /* 100ns */
/* Firmware should respond within 1 milliseconds */
@@ -179,8 +180,7 @@ static int einj_get_available_error_type(u32 *type)
static int einj_timedout(u64 *t)
{
if ((s64)*t < SPIN_UNIT) {
- pr_warning(FW_WARN EINJ_PFX
- "Firmware does not respond in time\n");
+ pr_warning(FW_WARN "Firmware does not respond in time\n");
return 1;
}
*t -= SPIN_UNIT;
@@ -307,8 +307,7 @@ static int __einj_error_trigger(u64 trigger_paddr, u32 type,
r = request_mem_region(trigger_paddr, sizeof(*trigger_tab),
"APEI EINJ Trigger Table");
if (!r) {
- pr_err(EINJ_PFX
- "Can not request [mem %#010llx-%#010llx] for Trigger table\n",
+ pr_err("Can not request [mem %#010llx-%#010llx] for Trigger table\n",
(unsigned long long)trigger_paddr,
(unsigned long long)trigger_paddr +
sizeof(*trigger_tab) - 1);
@@ -316,13 +315,12 @@ static int __einj_error_trigger(u64 trigger_paddr, u32 type,
}
trigger_tab = ioremap_cache(trigger_paddr, sizeof(*trigger_tab));
if (!trigger_tab) {
- pr_err(EINJ_PFX "Failed to map trigger table!\n");
+ pr_err("Failed to map trigger table!\n");
goto out_rel_header;
}
rc = einj_check_trigger_header(trigger_tab);
if (rc) {
- pr_warning(FW_BUG EINJ_PFX
- "The trigger error action table is invalid\n");
+ pr_warning(FW_BUG "Invalid trigger error action table.\n");
goto out_rel_header;
}
@@ -336,8 +334,7 @@ static int __einj_error_trigger(u64 trigger_paddr, u32 type,
table_size - sizeof(*trigger_tab),
"APEI EINJ Trigger Table");
if (!r) {
- pr_err(EINJ_PFX
-"Can not request [mem %#010llx-%#010llx] for Trigger Table Entry\n",
+ pr_err("Can not request [mem %#010llx-%#010llx] for Trigger Table Entry\n",
(unsigned long long)trigger_paddr + sizeof(*trigger_tab),
(unsigned long long)trigger_paddr + table_size - 1);
goto out_rel_header;
@@ -345,7 +342,7 @@ static int __einj_error_trigger(u64 trigger_paddr, u32 type,
iounmap(trigger_tab);
trigger_tab = ioremap_cache(trigger_paddr, table_size);
if (!trigger_tab) {
- pr_err(EINJ_PFX "Failed to map trigger table!\n");
+ pr_err("Failed to map trigger table!\n");
goto out_rel_entry;
}
trigger_entry = (struct acpi_whea_header *)
@@ -695,34 +692,42 @@ static int __init einj_init(void)
struct dentry *fentry;
struct apei_exec_context ctx;
- if (acpi_disabled)
+ if (acpi_disabled) {
+ pr_warn("ACPI disabled.\n");
return -ENODEV;
+ }
status = acpi_get_table(ACPI_SIG_EINJ, 0,
(struct acpi_table_header **)&einj_tab);
- if (status == AE_NOT_FOUND)
+ if (status == AE_NOT_FOUND) {
+ pr_warn("EINJ table not found.\n");
return -ENODEV;
+ }
else if (ACPI_FAILURE(status)) {
- const char *msg = acpi_format_exception(status);
- pr_err(EINJ_PFX "Failed to get table, %s\n", msg);
+ pr_err("Failed to get EINJ table: %s\n",
+ acpi_format_exception(status));
return -EINVAL;
}
rc = einj_check_table(einj_tab);
if (rc) {
- pr_warning(FW_BUG EINJ_PFX "EINJ table is invalid\n");
+ pr_warn(FW_BUG "Invalid EINJ table.n");
return -EINVAL;
}
rc = -ENOMEM;
einj_debug_dir = debugfs_create_dir("einj", apei_get_debugfs_dir());
- if (!einj_debug_dir)
+ if (!einj_debug_dir) {
+ pr_err("Error creating debugfs node.\n");
goto err_cleanup;
+ }
+
fentry = debugfs_create_file("available_error_type", S_IRUSR,
einj_debug_dir, NULL,
&available_error_type_fops);
if (!fentry)
goto err_cleanup;
+
fentry = debugfs_create_file("error_type", S_IRUSR | S_IWUSR,
einj_debug_dir, NULL, &error_type_fops);
if (!fentry)
@@ -735,14 +740,22 @@ static int __init einj_init(void)
apei_resources_init(&einj_resources);
einj_exec_ctx_init(&ctx);
rc = apei_exec_collect_resources(&ctx, &einj_resources);
- if (rc)
+ if (rc) {
+ pr_err("Error collecting EINJ resources.\n");
goto err_fini;
+ }
+
rc = apei_resources_request(&einj_resources, "APEI EINJ");
- if (rc)
+ if (rc) {
+ pr_err("Error requesting memory/port resources.\n");
goto err_fini;
+ }
+
rc = apei_exec_pre_map_gars(&ctx);
- if (rc)
+ if (rc) {
+ pr_err("Error pre-mapping GARs.\n");
goto err_release;
+ }
rc = -ENOMEM;
einj_param = einj_get_parameter_address();
@@ -787,7 +800,7 @@ static int __init einj_init(void)
goto err_unmap;
}
- pr_info(EINJ_PFX "Error INJection is initialized.\n");
+ pr_info("Error INJection is initialized.\n");
return 0;
@@ -798,6 +811,7 @@ err_unmap:
sizeof(struct einj_parameter);
acpi_os_unmap_iomem(einj_param, size);
+ pr_err("Error creating param extension debugfs nodes.\n");
}
apei_exec_post_unmap_gars(&ctx);
err_release:
@@ -805,6 +819,7 @@ err_release:
err_fini:
apei_resources_fini(&einj_resources);
err_cleanup:
+ pr_err("Error creating primary debugfs nodes.\n");
debugfs_remove_recursive(einj_debug_dir);
return rc;
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 006c3894c..f096ab3cb 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -927,7 +927,8 @@ static int erst_open_pstore(struct pstore_info *psi);
static int erst_close_pstore(struct pstore_info *psi);
static ssize_t erst_reader(u64 *id, enum pstore_type_id *type, int *count,
struct timespec *time, char **buf,
- bool *compressed, struct pstore_info *psi);
+ bool *compressed, ssize_t *ecc_notice_size,
+ struct pstore_info *psi);
static int erst_writer(enum pstore_type_id type, enum kmsg_dump_reason reason,
u64 *id, unsigned int part, int count, bool compressed,
size_t size, struct pstore_info *psi);
@@ -987,7 +988,8 @@ static int erst_close_pstore(struct pstore_info *psi)
static ssize_t erst_reader(u64 *id, enum pstore_type_id *type, int *count,
struct timespec *time, char **buf,
- bool *compressed, struct pstore_info *psi)
+ bool *compressed, ssize_t *ecc_notice_size,
+ struct pstore_info *psi)
{
int rc;
ssize_t len = 0;
@@ -1033,6 +1035,7 @@ skip:
memcpy(*buf, rcd->data, len - sizeof(*rcd));
*id = record_id;
*compressed = false;
+ *ecc_notice_size = 0;
if (uuid_le_cmp(rcd->sec_hdr.section_type,
CPER_SECTION_TYPE_DMESG_Z) == 0) {
*type = PSTORE_TYPE_DMESG;
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 262ca31b8..85b7d07fe 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -30,6 +30,9 @@
#include <linux/acpi.h>
#include <linux/slab.h>
#include <linux/regulator/machine.h>
+#include <linux/workqueue.h>
+#include <linux/reboot.h>
+#include <linux/delay.h>
#ifdef CONFIG_X86
#include <asm/mpspec.h>
#endif
@@ -174,22 +177,17 @@ void acpi_bus_detach_private_data(acpi_handle handle)
EXPORT_SYMBOL_GPL(acpi_bus_detach_private_data);
static void acpi_print_osc_error(acpi_handle handle,
- struct acpi_osc_context *context, char *error)
+ struct acpi_osc_context *context, char *error)
{
- struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER};
int i;
- if (ACPI_FAILURE(acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer)))
- printk(KERN_DEBUG "%s: %s\n", context->uuid_str, error);
- else {
- printk(KERN_DEBUG "%s (%s): %s\n",
- (char *)buffer.pointer, context->uuid_str, error);
- kfree(buffer.pointer);
- }
- printk(KERN_DEBUG "_OSC request data:");
+ acpi_handle_debug(handle, "(%s): %s\n", context->uuid_str, error);
+
+ pr_debug("_OSC request data:");
for (i = 0; i < context->cap.length; i += sizeof(u32))
- printk(" %x", *((u32 *)(context->cap.pointer + i)));
- printk("\n");
+ pr_debug(" %x", *((u32 *)(context->cap.pointer + i)));
+
+ pr_debug("\n");
}
acpi_status acpi_str_to_uuid(char *str, u8 *uuid)
@@ -302,6 +300,14 @@ out_kfree:
EXPORT_SYMBOL(acpi_run_osc);
bool osc_sb_apei_support_acked;
+
+/*
+ * ACPI 6.0 Section 8.4.4.2 Idle State Coordination
+ * OSPM supports platform coordinated low power idle(LPI) states
+ */
+bool osc_pc_lpi_support_confirmed;
+EXPORT_SYMBOL_GPL(osc_pc_lpi_support_confirmed);
+
static u8 sb_uuid_str[] = "0811B06E-4A27-44F9-8D60-3CBBC22E7B48";
static void acpi_bus_osc_support(void)
{
@@ -322,6 +328,7 @@ static void acpi_bus_osc_support(void)
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PPC_OST_SUPPORT;
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_HOTPLUG_OST_SUPPORT;
+ capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PCLPI_SUPPORT;
if (!ghes_disable)
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_APEI_SUPPORT;
@@ -329,9 +336,12 @@ static void acpi_bus_osc_support(void)
return;
if (ACPI_SUCCESS(acpi_run_osc(handle, &context))) {
u32 *capbuf_ret = context.ret.pointer;
- if (context.ret.length > OSC_SUPPORT_DWORD)
+ if (context.ret.length > OSC_SUPPORT_DWORD) {
osc_sb_apei_support_acked =
capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT;
+ osc_pc_lpi_support_confirmed =
+ capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_PCLPI_SUPPORT;
+ }
kfree(context.ret.pointer);
}
/* do we need to check other returned cap? Sounds no */
@@ -475,6 +485,56 @@ static void acpi_device_remove_notify_handler(struct acpi_device *device)
acpi_device_notify);
}
+/* Handle events targeting \_SB device (at present only graceful shutdown) */
+
+#define ACPI_SB_NOTIFY_SHUTDOWN_REQUEST 0x81
+#define ACPI_SB_INDICATE_INTERVAL 10000
+
+static void sb_notify_work(struct work_struct *dummy)
+{
+ acpi_handle sb_handle;
+
+ orderly_poweroff(true);
+
+ /*
+ * After initiating graceful shutdown, the ACPI spec requires OSPM
+ * to evaluate _OST method once every 10seconds to indicate that
+ * the shutdown is in progress
+ */
+ acpi_get_handle(NULL, "\\_SB", &sb_handle);
+ while (1) {
+ pr_info("Graceful shutdown in progress.\n");
+ acpi_evaluate_ost(sb_handle, ACPI_OST_EC_OSPM_SHUTDOWN,
+ ACPI_OST_SC_OS_SHUTDOWN_IN_PROGRESS, NULL);
+ msleep(ACPI_SB_INDICATE_INTERVAL);
+ }
+}
+
+static void acpi_sb_notify(acpi_handle handle, u32 event, void *data)
+{
+ static DECLARE_WORK(acpi_sb_work, sb_notify_work);
+
+ if (event == ACPI_SB_NOTIFY_SHUTDOWN_REQUEST) {
+ if (!work_busy(&acpi_sb_work))
+ schedule_work(&acpi_sb_work);
+ } else
+ pr_warn("event %x is not supported by \\_SB device\n", event);
+}
+
+static int __init acpi_setup_sb_notify_handler(void)
+{
+ acpi_handle sb_handle;
+
+ if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &sb_handle)))
+ return -ENXIO;
+
+ if (ACPI_FAILURE(acpi_install_notify_handler(sb_handle, ACPI_DEVICE_NOTIFY,
+ acpi_sb_notify, NULL)))
+ return -EINVAL;
+
+ return 0;
+}
+
/* --------------------------------------------------------------------------
Device Matching
-------------------------------------------------------------------------- */
@@ -961,8 +1021,7 @@ void __init acpi_early_init(void)
/**
* acpi_subsystem_init - Finalize the early initialization of ACPI.
*
- * Switch over the platform to the ACPI mode (if possible), initialize the
- * handling of ACPI events, install the interrupt and global lock handlers.
+ * Switch over the platform to the ACPI mode (if possible).
*
* Doing this too early is generally unsafe, but at the same time it needs to be
* done before all things that really depend on ACPI. The right spot appears to
@@ -990,6 +1049,13 @@ void __init acpi_subsystem_init(void)
}
}
+static acpi_status acpi_bus_table_handler(u32 event, void *table, void *context)
+{
+ acpi_scan_table_handler(event, table, context);
+
+ return acpi_sysfs_table_handler(event, table, context);
+}
+
static int __init acpi_bus_init(void)
{
int result;
@@ -1043,6 +1109,8 @@ static int __init acpi_bus_init(void)
* _PDC control method may load dynamic SSDT tables,
* and we need to install the table handler before that.
*/
+ status = acpi_install_table_handler(acpi_bus_table_handler, NULL);
+
acpi_sysfs_init();
acpi_early_processor_set_pdc();
@@ -1124,6 +1192,7 @@ static int __init acpi_init(void)
acpi_sleep_proc_init();
acpi_wakeup_device_init();
acpi_debugger_init();
+ acpi_setup_sb_notify_handler();
return 0;
}
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 5c3b0918d..31abb0bdd 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -53,6 +53,10 @@
#define ACPI_BUTTON_DEVICE_NAME_LID "Lid Switch"
#define ACPI_BUTTON_TYPE_LID 0x05
+#define ACPI_BUTTON_LID_INIT_IGNORE 0x00
+#define ACPI_BUTTON_LID_INIT_OPEN 0x01
+#define ACPI_BUTTON_LID_INIT_METHOD 0x02
+
#define _COMPONENT ACPI_BUTTON_COMPONENT
ACPI_MODULE_NAME("button");
@@ -105,6 +109,7 @@ struct acpi_button {
static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier);
static struct acpi_device *lid_device;
+static u8 lid_init_state = ACPI_BUTTON_LID_INIT_METHOD;
/* --------------------------------------------------------------------------
FS Interface (/proc)
@@ -113,16 +118,52 @@ static struct acpi_device *lid_device;
static struct proc_dir_entry *acpi_button_dir;
static struct proc_dir_entry *acpi_lid_dir;
+static int acpi_lid_evaluate_state(struct acpi_device *device)
+{
+ unsigned long long lid_state;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(device->handle, "_LID", NULL, &lid_state);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ return lid_state ? 1 : 0;
+}
+
+static int acpi_lid_notify_state(struct acpi_device *device, int state)
+{
+ struct acpi_button *button = acpi_driver_data(device);
+ int ret;
+
+ /* input layer checks if event is redundant */
+ input_report_switch(button->input, SW_LID, !state);
+ input_sync(button->input);
+
+ if (state)
+ pm_wakeup_event(&device->dev, 0);
+
+ ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device);
+ if (ret == NOTIFY_DONE)
+ ret = blocking_notifier_call_chain(&acpi_lid_notifier, state,
+ device);
+ if (ret == NOTIFY_DONE || ret == NOTIFY_OK) {
+ /*
+ * It is also regarded as success if the notifier_chain
+ * returns NOTIFY_OK or NOTIFY_DONE.
+ */
+ ret = 0;
+ }
+ return ret;
+}
+
static int acpi_button_state_seq_show(struct seq_file *seq, void *offset)
{
struct acpi_device *device = seq->private;
- acpi_status status;
- unsigned long long state;
+ int state;
- status = acpi_evaluate_integer(device->handle, "_LID", NULL, &state);
+ state = acpi_lid_evaluate_state(device);
seq_printf(seq, "state: %s\n",
- ACPI_FAILURE(status) ? "unsupported" :
- (state ? "open" : "closed"));
+ state < 0 ? "unsupported" : (state ? "open" : "closed"));
return 0;
}
@@ -191,8 +232,10 @@ remove_dev_dir:
acpi_device_dir(device) = NULL;
remove_lid_dir:
remove_proc_entry(ACPI_BUTTON_SUBCLASS_LID, acpi_button_dir);
+ acpi_lid_dir = NULL;
remove_button_dir:
remove_proc_entry(ACPI_BUTTON_CLASS, acpi_root_dir);
+ acpi_button_dir = NULL;
goto done;
}
@@ -209,7 +252,9 @@ static int acpi_button_remove_fs(struct acpi_device *device)
acpi_lid_dir);
acpi_device_dir(device) = NULL;
remove_proc_entry(ACPI_BUTTON_SUBCLASS_LID, acpi_button_dir);
+ acpi_lid_dir = NULL;
remove_proc_entry(ACPI_BUTTON_CLASS, acpi_root_dir);
+ acpi_button_dir = NULL;
return 0;
}
@@ -231,51 +276,37 @@ EXPORT_SYMBOL(acpi_lid_notifier_unregister);
int acpi_lid_open(void)
{
- acpi_status status;
- unsigned long long state;
-
if (!lid_device)
return -ENODEV;
- status = acpi_evaluate_integer(lid_device->handle, "_LID", NULL,
- &state);
- if (ACPI_FAILURE(status))
- return -ENODEV;
-
- return !!state;
+ return acpi_lid_evaluate_state(lid_device);
}
EXPORT_SYMBOL(acpi_lid_open);
-static int acpi_lid_send_state(struct acpi_device *device)
+static int acpi_lid_update_state(struct acpi_device *device)
{
- struct acpi_button *button = acpi_driver_data(device);
- unsigned long long state;
- acpi_status status;
- int ret;
-
- status = acpi_evaluate_integer(device->handle, "_LID", NULL, &state);
- if (ACPI_FAILURE(status))
- return -ENODEV;
+ int state;
- /* input layer checks if event is redundant */
- input_report_switch(button->input, SW_LID, !state);
- input_sync(button->input);
+ state = acpi_lid_evaluate_state(device);
+ if (state < 0)
+ return state;
- if (state)
- pm_wakeup_event(&device->dev, 0);
+ return acpi_lid_notify_state(device, state);
+}
- ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device);
- if (ret == NOTIFY_DONE)
- ret = blocking_notifier_call_chain(&acpi_lid_notifier, state,
- device);
- if (ret == NOTIFY_DONE || ret == NOTIFY_OK) {
- /*
- * It is also regarded as success if the notifier_chain
- * returns NOTIFY_OK or NOTIFY_DONE.
- */
- ret = 0;
+static void acpi_lid_initialize_state(struct acpi_device *device)
+{
+ switch (lid_init_state) {
+ case ACPI_BUTTON_LID_INIT_OPEN:
+ (void)acpi_lid_notify_state(device, 1);
+ break;
+ case ACPI_BUTTON_LID_INIT_METHOD:
+ (void)acpi_lid_update_state(device);
+ break;
+ case ACPI_BUTTON_LID_INIT_IGNORE:
+ default:
+ break;
}
- return ret;
}
static void acpi_button_notify(struct acpi_device *device, u32 event)
@@ -290,7 +321,7 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
case ACPI_BUTTON_NOTIFY_STATUS:
input = button->input;
if (button->type == ACPI_BUTTON_TYPE_LID) {
- acpi_lid_send_state(device);
+ acpi_lid_update_state(device);
} else {
int keycode;
@@ -335,7 +366,7 @@ static int acpi_button_resume(struct device *dev)
button->suspended = false;
if (button->type == ACPI_BUTTON_TYPE_LID)
- return acpi_lid_send_state(device);
+ acpi_lid_initialize_state(device);
return 0;
}
#endif
@@ -416,7 +447,7 @@ static int acpi_button_add(struct acpi_device *device)
if (error)
goto err_remove_fs;
if (button->type == ACPI_BUTTON_TYPE_LID) {
- acpi_lid_send_state(device);
+ acpi_lid_initialize_state(device);
/*
* This assumes there's only one lid device, or if there are
* more we only care about the last one...
@@ -446,4 +477,42 @@ static int acpi_button_remove(struct acpi_device *device)
return 0;
}
+static int param_set_lid_init_state(const char *val, struct kernel_param *kp)
+{
+ int result = 0;
+
+ if (!strncmp(val, "open", sizeof("open") - 1)) {
+ lid_init_state = ACPI_BUTTON_LID_INIT_OPEN;
+ pr_info("Notify initial lid state as open\n");
+ } else if (!strncmp(val, "method", sizeof("method") - 1)) {
+ lid_init_state = ACPI_BUTTON_LID_INIT_METHOD;
+ pr_info("Notify initial lid state with _LID return value\n");
+ } else if (!strncmp(val, "ignore", sizeof("ignore") - 1)) {
+ lid_init_state = ACPI_BUTTON_LID_INIT_IGNORE;
+ pr_info("Do not notify initial lid state\n");
+ } else
+ result = -EINVAL;
+ return result;
+}
+
+static int param_get_lid_init_state(char *buffer, struct kernel_param *kp)
+{
+ switch (lid_init_state) {
+ case ACPI_BUTTON_LID_INIT_OPEN:
+ return sprintf(buffer, "open");
+ case ACPI_BUTTON_LID_INIT_METHOD:
+ return sprintf(buffer, "method");
+ case ACPI_BUTTON_LID_INIT_IGNORE:
+ return sprintf(buffer, "ignore");
+ default:
+ return sprintf(buffer, "invalid");
+ }
+ return 0;
+}
+
+module_param_call(lid_init_state,
+ param_set_lid_init_state, param_get_lid_init_state,
+ NULL, 0644);
+MODULE_PARM_DESC(lid_init_state, "Behavior for reporting LID initial state");
+
module_acpi_driver(acpi_button_driver);
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index e8e128ded..0c00208b4 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -21,7 +21,7 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/types.h>
@@ -33,12 +33,7 @@
#include "internal.h"
-#define ACPI_DOCK_DRIVER_DESCRIPTION "ACPI Dock Station Driver"
-
ACPI_MODULE_NAME("dock");
-MODULE_AUTHOR("Kristen Carlson Accardi");
-MODULE_DESCRIPTION(ACPI_DOCK_DRIVER_DESCRIPTION);
-MODULE_LICENSE("GPL");
static bool immediate_undock = 1;
module_param(immediate_undock, bool, 0644);
diff --git a/drivers/acpi/dptf/Kconfig b/drivers/acpi/dptf/Kconfig
new file mode 100644
index 000000000..ac0a6ed0c
--- /dev/null
+++ b/drivers/acpi/dptf/Kconfig
@@ -0,0 +1,15 @@
+config DPTF_POWER
+ tristate "DPTF Platform Power Participant"
+ depends on X86
+ help
+ This driver adds support for Dynamic Platform and Thermal Framework
+ (DPTF) Platform Power Participant device (INT3407) support.
+ This participant is responsible for exposing platform telemetry:
+ max_platform_power
+ platform_power_source
+ adapter_rating
+ battery_steady_power
+ charger_type
+
+ To compile this driver as a module, choose M here:
+ the module will be called dptf_power.
diff --git a/drivers/acpi/dptf/Makefile b/drivers/acpi/dptf/Makefile
new file mode 100644
index 000000000..06ea88095
--- /dev/null
+++ b/drivers/acpi/dptf/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_ACPI) += int340x_thermal.o
+obj-$(CONFIG_DPTF_POWER) += dptf_power.o
+
+ccflags-y += -Idrivers/acpi
diff --git a/drivers/acpi/dptf/dptf_power.c b/drivers/acpi/dptf/dptf_power.c
new file mode 100644
index 000000000..734642dc5
--- /dev/null
+++ b/drivers/acpi/dptf/dptf_power.c
@@ -0,0 +1,128 @@
+/*
+ * dptf_power: DPTF platform power driver
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/acpi.h>
+#include <linux/platform_device.h>
+
+/*
+ * Presentation of attributes which are defined for INT3407. They are:
+ * PMAX : Maximum platform powe
+ * PSRC : Platform power source
+ * ARTG : Adapter rating
+ * CTYP : Charger type
+ * PBSS : Battery steady power
+ */
+#define DPTF_POWER_SHOW(name, object) \
+static ssize_t name##_show(struct device *dev,\
+ struct device_attribute *attr,\
+ char *buf)\
+{\
+ struct platform_device *pdev = to_platform_device(dev);\
+ struct acpi_device *acpi_dev = platform_get_drvdata(pdev);\
+ unsigned long long val;\
+ acpi_status status;\
+\
+ status = acpi_evaluate_integer(acpi_dev->handle, #object,\
+ NULL, &val);\
+ if (ACPI_SUCCESS(status))\
+ return sprintf(buf, "%d\n", (int)val);\
+ else \
+ return -EINVAL;\
+}
+
+DPTF_POWER_SHOW(max_platform_power_mw, PMAX)
+DPTF_POWER_SHOW(platform_power_source, PSRC)
+DPTF_POWER_SHOW(adapter_rating_mw, ARTG)
+DPTF_POWER_SHOW(battery_steady_power_mw, PBSS)
+DPTF_POWER_SHOW(charger_type, CTYP)
+
+static DEVICE_ATTR_RO(max_platform_power_mw);
+static DEVICE_ATTR_RO(platform_power_source);
+static DEVICE_ATTR_RO(adapter_rating_mw);
+static DEVICE_ATTR_RO(battery_steady_power_mw);
+static DEVICE_ATTR_RO(charger_type);
+
+static struct attribute *dptf_power_attrs[] = {
+ &dev_attr_max_platform_power_mw.attr,
+ &dev_attr_platform_power_source.attr,
+ &dev_attr_adapter_rating_mw.attr,
+ &dev_attr_battery_steady_power_mw.attr,
+ &dev_attr_charger_type.attr,
+ NULL
+};
+
+static struct attribute_group dptf_power_attribute_group = {
+ .attrs = dptf_power_attrs,
+ .name = "dptf_power"
+};
+
+static int dptf_power_add(struct platform_device *pdev)
+{
+ struct acpi_device *acpi_dev;
+ acpi_status status;
+ unsigned long long ptype;
+ int result;
+
+ acpi_dev = ACPI_COMPANION(&(pdev->dev));
+ if (!acpi_dev)
+ return -ENODEV;
+
+ status = acpi_evaluate_integer(acpi_dev->handle, "PTYP", NULL, &ptype);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ if (ptype != 0x11)
+ return -ENODEV;
+
+ result = sysfs_create_group(&pdev->dev.kobj,
+ &dptf_power_attribute_group);
+ if (result)
+ return result;
+
+ platform_set_drvdata(pdev, acpi_dev);
+
+ return 0;
+}
+
+static int dptf_power_remove(struct platform_device *pdev)
+{
+
+ sysfs_remove_group(&pdev->dev.kobj, &dptf_power_attribute_group);
+
+ return 0;
+}
+
+static const struct acpi_device_id int3407_device_ids[] = {
+ {"INT3407", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, int3407_device_ids);
+
+static struct platform_driver dptf_power_driver = {
+ .probe = dptf_power_add,
+ .remove = dptf_power_remove,
+ .driver = {
+ .name = "DPTF Platform Power",
+ .acpi_match_table = int3407_device_ids,
+ },
+};
+
+module_platform_driver(dptf_power_driver);
+
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ACPI DPTF platform power driver");
diff --git a/drivers/acpi/dptf/int340x_thermal.c b/drivers/acpi/dptf/int340x_thermal.c
new file mode 100644
index 000000000..33505c651
--- /dev/null
+++ b/drivers/acpi/dptf/int340x_thermal.c
@@ -0,0 +1,53 @@
+/*
+ * ACPI support for int340x thermal drivers
+ *
+ * Copyright (C) 2014, Intel Corporation
+ * Authors: Zhang Rui <rui.zhang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+
+#include "internal.h"
+
+#define INT3401_DEVICE 0X01
+static const struct acpi_device_id int340x_thermal_device_ids[] = {
+ {"INT3400"},
+ {"INT3401", INT3401_DEVICE},
+ {"INT3402"},
+ {"INT3403"},
+ {"INT3404"},
+ {"INT3406"},
+ {"INT3407"},
+ {"INT3408"},
+ {"INT3409"},
+ {"INT340A"},
+ {"INT340B"},
+ {""},
+};
+
+static int int340x_thermal_handler_attach(struct acpi_device *adev,
+ const struct acpi_device_id *id)
+{
+ if (IS_ENABLED(CONFIG_INT340X_THERMAL))
+ acpi_create_platform_device(adev);
+ /* Intel SoC DTS thermal driver needs INT3401 to set IRQ descriptor */
+ else if (IS_ENABLED(CONFIG_INTEL_SOC_DTS_THERMAL) &&
+ id->driver_data == INT3401_DEVICE)
+ acpi_create_platform_device(adev);
+ return 1;
+}
+
+static struct acpi_scan_handler int340x_thermal_handler = {
+ .ids = int340x_thermal_device_ids,
+ .attach = int340x_thermal_handler_attach,
+};
+
+void __init acpi_int340x_thermal_init(void)
+{
+ acpi_scan_add_handler(&int340x_thermal_handler);
+}
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index f4218df00..e7bd57cc5 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1365,13 +1365,9 @@ static void ec_remove_handlers(struct acpi_ec *ec)
}
}
-static int acpi_ec_add(struct acpi_device *device)
+static struct acpi_ec *acpi_ec_alloc(void)
{
- struct acpi_ec *ec = NULL;
- int ret;
-
- strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_EC_CLASS);
+ struct acpi_ec *ec;
/* Check for boot EC */
if (boot_ec) {
@@ -1382,9 +1378,21 @@ static int acpi_ec_add(struct acpi_device *device)
first_ec = NULL;
} else {
ec = make_acpi_ec();
- if (!ec)
- return -ENOMEM;
}
+ return ec;
+}
+
+static int acpi_ec_add(struct acpi_device *device)
+{
+ struct acpi_ec *ec = NULL;
+ int ret;
+
+ strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
+ strcpy(acpi_device_class(device), ACPI_EC_CLASS);
+
+ ec = acpi_ec_alloc();
+ if (!ec)
+ return -ENOMEM;
if (ec_parse_device(device->handle, 0, ec, NULL) !=
AE_CTRL_TERMINATE) {
kfree(ec);
@@ -1471,27 +1479,31 @@ static const struct acpi_device_id ec_device_ids[] = {
int __init acpi_ec_dsdt_probe(void)
{
acpi_status status;
+ struct acpi_ec *ec;
+ int ret;
- if (boot_ec)
- return 0;
-
+ ec = acpi_ec_alloc();
+ if (!ec)
+ return -ENOMEM;
/*
* Finding EC from DSDT if there is no ECDT EC available. When this
* function is invoked, ACPI tables have been fully loaded, we can
* walk namespace now.
*/
- boot_ec = make_acpi_ec();
- if (!boot_ec)
- return -ENOMEM;
status = acpi_get_devices(ec_device_ids[0].id,
- ec_parse_device, boot_ec, NULL);
- if (ACPI_FAILURE(status) || !boot_ec->handle)
- return -ENODEV;
- if (!ec_install_handlers(boot_ec)) {
- first_ec = boot_ec;
- return 0;
+ ec_parse_device, ec, NULL);
+ if (ACPI_FAILURE(status) || !ec->handle) {
+ ret = -ENODEV;
+ goto error;
}
- return -EFAULT;
+ ret = ec_install_handlers(ec);
+
+error:
+ if (ret)
+ kfree(ec);
+ else
+ first_ec = boot_ec = ec;
+ return ret;
}
#if 0
@@ -1535,6 +1547,11 @@ static int ec_clear_on_resume(const struct dmi_system_id *id)
return 0;
}
+/*
+ * Some ECDTs contain wrong register addresses.
+ * MSI MS-171F
+ * https://bugzilla.kernel.org/show_bug.cgi?id=12461
+ */
static int ec_correct_ecdt(const struct dmi_system_id *id)
{
pr_debug("Detected system needing ECDT address correction.\n");
@@ -1544,16 +1561,6 @@ static int ec_correct_ecdt(const struct dmi_system_id *id)
static struct dmi_system_id ec_dmi_table[] __initdata = {
{
- ec_correct_ecdt, "Asus L4R", {
- DMI_MATCH(DMI_BIOS_VERSION, "1008.006"),
- DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),
- DMI_MATCH(DMI_BOARD_NAME, "L4R") }, NULL},
- {
- ec_correct_ecdt, "Asus M6R", {
- DMI_MATCH(DMI_BIOS_VERSION, "0207"),
- DMI_MATCH(DMI_PRODUCT_NAME, "M6R"),
- DMI_MATCH(DMI_BOARD_NAME, "M6R") }, NULL},
- {
ec_correct_ecdt, "MSI MS-171F", {
DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"),
DMI_MATCH(DMI_PRODUCT_NAME, "MS-171F"),}, NULL},
@@ -1565,12 +1572,13 @@ static struct dmi_system_id ec_dmi_table[] __initdata = {
int __init acpi_ec_ecdt_probe(void)
{
- int ret = 0;
+ int ret;
acpi_status status;
struct acpi_table_ecdt *ecdt_ptr;
+ struct acpi_ec *ec;
- boot_ec = make_acpi_ec();
- if (!boot_ec)
+ ec = acpi_ec_alloc();
+ if (!ec)
return -ENOMEM;
/*
* Generate a boot ec context
@@ -1594,28 +1602,20 @@ int __init acpi_ec_ecdt_probe(void)
pr_info("EC description table is found, configuring boot EC\n");
if (EC_FLAGS_CORRECT_ECDT) {
- /*
- * Asus L4R, Asus M6R
- * https://bugzilla.kernel.org/show_bug.cgi?id=9399
- * MSI MS-171F
- * https://bugzilla.kernel.org/show_bug.cgi?id=12461
- */
- boot_ec->command_addr = ecdt_ptr->data.address;
- boot_ec->data_addr = ecdt_ptr->control.address;
+ ec->command_addr = ecdt_ptr->data.address;
+ ec->data_addr = ecdt_ptr->control.address;
} else {
- boot_ec->command_addr = ecdt_ptr->control.address;
- boot_ec->data_addr = ecdt_ptr->data.address;
+ ec->command_addr = ecdt_ptr->control.address;
+ ec->data_addr = ecdt_ptr->data.address;
}
- boot_ec->gpe = ecdt_ptr->gpe;
- boot_ec->handle = ACPI_ROOT_OBJECT;
- ret = ec_install_handlers(boot_ec);
- if (!ret)
- first_ec = boot_ec;
+ ec->gpe = ecdt_ptr->gpe;
+ ec->handle = ACPI_ROOT_OBJECT;
+ ret = ec_install_handlers(ec);
error:
- if (ret) {
- kfree(boot_ec);
- boot_ec = NULL;
- }
+ if (ret)
+ kfree(ec);
+ else
+ first_ec = boot_ec = ec;
return ret;
}
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 27cc7feab..940218ff0 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -87,6 +87,9 @@ bool acpi_queue_hotplug_work(struct work_struct *work);
void acpi_device_hotplug(struct acpi_device *adev, u32 src);
bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent);
+acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context);
+void acpi_scan_table_handler(u32 event, void *table, void *context);
+
/* --------------------------------------------------------------------------
Device Node Initialization / Removal
-------------------------------------------------------------------------- */
diff --git a/drivers/acpi/nfit/Kconfig b/drivers/acpi/nfit/Kconfig
new file mode 100644
index 000000000..dd0d53c52
--- /dev/null
+++ b/drivers/acpi/nfit/Kconfig
@@ -0,0 +1,26 @@
+config ACPI_NFIT
+ tristate "ACPI NVDIMM Firmware Interface Table (NFIT)"
+ depends on PHYS_ADDR_T_64BIT
+ depends on BLK_DEV
+ depends on ARCH_HAS_MMIO_FLUSH
+ select LIBNVDIMM
+ help
+ Infrastructure to probe ACPI 6 compliant platforms for
+ NVDIMMs (NFIT) and register a libnvdimm device tree. In
+ addition to storage devices this also enables libnvdimm to pass
+ ACPI._DSM messages for platform/dimm configuration.
+
+ To compile this driver as a module, choose M here:
+ the module will be called nfit.
+
+config ACPI_NFIT_DEBUG
+ bool "NFIT DSM debug"
+ depends on ACPI_NFIT
+ depends on DYNAMIC_DEBUG
+ default n
+ help
+ Enabling this option causes the nfit driver to dump the
+ input and output buffers of _DSM operations on the ACPI0012
+ device and its children. This can be very verbose, so leave
+ it disabled unless you are debugging a hardware / firmware
+ issue.
diff --git a/drivers/acpi/nfit/Makefile b/drivers/acpi/nfit/Makefile
new file mode 100644
index 000000000..a407e769f
--- /dev/null
+++ b/drivers/acpi/nfit/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_ACPI_NFIT) := nfit.o
+nfit-y := core.o
+nfit-$(CONFIG_X86_MCE) += mce.o
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
new file mode 100644
index 000000000..e1d5ea6d5
--- /dev/null
+++ b/drivers/acpi/nfit/core.c
@@ -0,0 +1,2793 @@
+/*
+ * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#include <linux/list_sort.h>
+#include <linux/libnvdimm.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/ndctl.h>
+#include <linux/sysfs.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/acpi.h>
+#include <linux/sort.h>
+#include <linux/pmem.h>
+#include <linux/io.h>
+#include <linux/nd.h>
+#include <asm/cacheflush.h>
+#include "nfit.h"
+
+/*
+ * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
+ * irrelevant.
+ */
+#include <linux/io-64-nonatomic-hi-lo.h>
+
+static bool force_enable_dimms;
+module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status");
+
+static unsigned int scrub_timeout = NFIT_ARS_TIMEOUT;
+module_param(scrub_timeout, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(scrub_timeout, "Initial scrub timeout in seconds");
+
+/* after three payloads of overflow, it's dead jim */
+static unsigned int scrub_overflow_abort = 3;
+module_param(scrub_overflow_abort, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(scrub_overflow_abort,
+ "Number of times we overflow ARS results before abort");
+
+static bool disable_vendor_specific;
+module_param(disable_vendor_specific, bool, S_IRUGO);
+MODULE_PARM_DESC(disable_vendor_specific,
+ "Limit commands to the publicly specified set\n");
+
+LIST_HEAD(acpi_descs);
+DEFINE_MUTEX(acpi_desc_lock);
+
+static struct workqueue_struct *nfit_wq;
+
+struct nfit_table_prev {
+ struct list_head spas;
+ struct list_head memdevs;
+ struct list_head dcrs;
+ struct list_head bdws;
+ struct list_head idts;
+ struct list_head flushes;
+};
+
+static u8 nfit_uuid[NFIT_UUID_MAX][16];
+
+const u8 *to_nfit_uuid(enum nfit_uuids id)
+{
+ return nfit_uuid[id];
+}
+EXPORT_SYMBOL(to_nfit_uuid);
+
+static struct acpi_nfit_desc *to_acpi_nfit_desc(
+ struct nvdimm_bus_descriptor *nd_desc)
+{
+ return container_of(nd_desc, struct acpi_nfit_desc, nd_desc);
+}
+
+static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
+{
+ struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
+
+ /*
+ * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct
+ * acpi_device.
+ */
+ if (!nd_desc->provider_name
+ || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0)
+ return NULL;
+
+ return to_acpi_device(acpi_desc->dev);
+}
+
+static int xlat_status(void *buf, unsigned int cmd, u32 status)
+{
+ struct nd_cmd_clear_error *clear_err;
+ struct nd_cmd_ars_status *ars_status;
+ u16 flags;
+
+ switch (cmd) {
+ case ND_CMD_ARS_CAP:
+ if ((status & 0xffff) == NFIT_ARS_CAP_NONE)
+ return -ENOTTY;
+
+ /* Command failed */
+ if (status & 0xffff)
+ return -EIO;
+
+ /* No supported scan types for this range */
+ flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE;
+ if ((status >> 16 & flags) == 0)
+ return -ENOTTY;
+ break;
+ case ND_CMD_ARS_START:
+ /* ARS is in progress */
+ if ((status & 0xffff) == NFIT_ARS_START_BUSY)
+ return -EBUSY;
+
+ /* Command failed */
+ if (status & 0xffff)
+ return -EIO;
+ break;
+ case ND_CMD_ARS_STATUS:
+ ars_status = buf;
+ /* Command failed */
+ if (status & 0xffff)
+ return -EIO;
+ /* Check extended status (Upper two bytes) */
+ if (status == NFIT_ARS_STATUS_DONE)
+ return 0;
+
+ /* ARS is in progress */
+ if (status == NFIT_ARS_STATUS_BUSY)
+ return -EBUSY;
+
+ /* No ARS performed for the current boot */
+ if (status == NFIT_ARS_STATUS_NONE)
+ return -EAGAIN;
+
+ /*
+ * ARS interrupted, either we overflowed or some other
+ * agent wants the scan to stop. If we didn't overflow
+ * then just continue with the returned results.
+ */
+ if (status == NFIT_ARS_STATUS_INTR) {
+ if (ars_status->flags & NFIT_ARS_F_OVERFLOW)
+ return -ENOSPC;
+ return 0;
+ }
+
+ /* Unknown status */
+ if (status >> 16)
+ return -EIO;
+ break;
+ case ND_CMD_CLEAR_ERROR:
+ clear_err = buf;
+ if (status & 0xffff)
+ return -EIO;
+ if (!clear_err->cleared)
+ return -EIO;
+ if (clear_err->length > clear_err->cleared)
+ return clear_err->cleared;
+ break;
+ default:
+ break;
+ }
+
+ /* all other non-zero status results in an error */
+ if (status)
+ return -EIO;
+ return 0;
+}
+
+static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
+ struct nvdimm *nvdimm, unsigned int cmd, void *buf,
+ unsigned int buf_len, int *cmd_rc)
+{
+ struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
+ union acpi_object in_obj, in_buf, *out_obj;
+ const struct nd_cmd_desc *desc = NULL;
+ struct device *dev = acpi_desc->dev;
+ struct nd_cmd_pkg *call_pkg = NULL;
+ const char *cmd_name, *dimm_name;
+ unsigned long cmd_mask, dsm_mask;
+ u32 offset, fw_status = 0;
+ acpi_handle handle;
+ unsigned int func;
+ const u8 *uuid;
+ int rc, i;
+
+ func = cmd;
+ if (cmd == ND_CMD_CALL) {
+ call_pkg = buf;
+ func = call_pkg->nd_command;
+ }
+
+ if (nvdimm) {
+ struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
+ struct acpi_device *adev = nfit_mem->adev;
+
+ if (!adev)
+ return -ENOTTY;
+ if (call_pkg && nfit_mem->family != call_pkg->nd_family)
+ return -ENOTTY;
+
+ dimm_name = nvdimm_name(nvdimm);
+ cmd_name = nvdimm_cmd_name(cmd);
+ cmd_mask = nvdimm_cmd_mask(nvdimm);
+ dsm_mask = nfit_mem->dsm_mask;
+ desc = nd_cmd_dimm_desc(cmd);
+ uuid = to_nfit_uuid(nfit_mem->family);
+ handle = adev->handle;
+ } else {
+ struct acpi_device *adev = to_acpi_dev(acpi_desc);
+
+ cmd_name = nvdimm_bus_cmd_name(cmd);
+ cmd_mask = nd_desc->cmd_mask;
+ dsm_mask = cmd_mask;
+ desc = nd_cmd_bus_desc(cmd);
+ uuid = to_nfit_uuid(NFIT_DEV_BUS);
+ handle = adev->handle;
+ dimm_name = "bus";
+ }
+
+ if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
+ return -ENOTTY;
+
+ if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask))
+ return -ENOTTY;
+
+ in_obj.type = ACPI_TYPE_PACKAGE;
+ in_obj.package.count = 1;
+ in_obj.package.elements = &in_buf;
+ in_buf.type = ACPI_TYPE_BUFFER;
+ in_buf.buffer.pointer = buf;
+ in_buf.buffer.length = 0;
+
+ /* libnvdimm has already validated the input envelope */
+ for (i = 0; i < desc->in_num; i++)
+ in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc,
+ i, buf);
+
+ if (call_pkg) {
+ /* skip over package wrapper */
+ in_buf.buffer.pointer = (void *) &call_pkg->nd_payload;
+ in_buf.buffer.length = call_pkg->nd_size_in;
+ }
+
+ if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) {
+ dev_dbg(dev, "%s:%s cmd: %d: func: %d input length: %d\n",
+ __func__, dimm_name, cmd, func,
+ in_buf.buffer.length);
+ print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 4, 4,
+ in_buf.buffer.pointer,
+ min_t(u32, 256, in_buf.buffer.length), true);
+ }
+
+ out_obj = acpi_evaluate_dsm(handle, uuid, 1, func, &in_obj);
+ if (!out_obj) {
+ dev_dbg(dev, "%s:%s _DSM failed cmd: %s\n", __func__, dimm_name,
+ cmd_name);
+ return -EINVAL;
+ }
+
+ if (call_pkg) {
+ call_pkg->nd_fw_size = out_obj->buffer.length;
+ memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
+ out_obj->buffer.pointer,
+ min(call_pkg->nd_fw_size, call_pkg->nd_size_out));
+
+ ACPI_FREE(out_obj);
+ /*
+ * Need to support FW function w/o known size in advance.
+ * Caller can determine required size based upon nd_fw_size.
+ * If we return an error (like elsewhere) then caller wouldn't
+ * be able to rely upon data returned to make calculation.
+ */
+ return 0;
+ }
+
+ if (out_obj->package.type != ACPI_TYPE_BUFFER) {
+ dev_dbg(dev, "%s:%s unexpected output object type cmd: %s type: %d\n",
+ __func__, dimm_name, cmd_name, out_obj->type);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) {
+ dev_dbg(dev, "%s:%s cmd: %s output length: %d\n", __func__,
+ dimm_name, cmd_name, out_obj->buffer.length);
+ print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4,
+ 4, out_obj->buffer.pointer, min_t(u32, 128,
+ out_obj->buffer.length), true);
+ }
+
+ for (i = 0, offset = 0; i < desc->out_num; i++) {
+ u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
+ (u32 *) out_obj->buffer.pointer);
+
+ if (offset + out_size > out_obj->buffer.length) {
+ dev_dbg(dev, "%s:%s output object underflow cmd: %s field: %d\n",
+ __func__, dimm_name, cmd_name, i);
+ break;
+ }
+
+ if (in_buf.buffer.length + offset + out_size > buf_len) {
+ dev_dbg(dev, "%s:%s output overrun cmd: %s field: %d\n",
+ __func__, dimm_name, cmd_name, i);
+ rc = -ENXIO;
+ goto out;
+ }
+ memcpy(buf + in_buf.buffer.length + offset,
+ out_obj->buffer.pointer + offset, out_size);
+ offset += out_size;
+ }
+
+ /*
+ * Set fw_status for all the commands with a known format to be
+ * later interpreted by xlat_status().
+ */
+ if (i >= 1 && ((cmd >= ND_CMD_ARS_CAP && cmd <= ND_CMD_CLEAR_ERROR)
+ || (cmd >= ND_CMD_SMART && cmd <= ND_CMD_VENDOR)))
+ fw_status = *(u32 *) out_obj->buffer.pointer;
+
+ if (offset + in_buf.buffer.length < buf_len) {
+ if (i >= 1) {
+ /*
+ * status valid, return the number of bytes left
+ * unfilled in the output buffer
+ */
+ rc = buf_len - offset - in_buf.buffer.length;
+ if (cmd_rc)
+ *cmd_rc = xlat_status(buf, cmd, fw_status);
+ } else {
+ dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
+ __func__, dimm_name, cmd_name, buf_len,
+ offset);
+ rc = -ENXIO;
+ }
+ } else {
+ rc = 0;
+ if (cmd_rc)
+ *cmd_rc = xlat_status(buf, cmd, fw_status);
+ }
+
+ out:
+ ACPI_FREE(out_obj);
+
+ return rc;
+}
+
+static const char *spa_type_name(u16 type)
+{
+ static const char *to_name[] = {
+ [NFIT_SPA_VOLATILE] = "volatile",
+ [NFIT_SPA_PM] = "pmem",
+ [NFIT_SPA_DCR] = "dimm-control-region",
+ [NFIT_SPA_BDW] = "block-data-window",
+ [NFIT_SPA_VDISK] = "volatile-disk",
+ [NFIT_SPA_VCD] = "volatile-cd",
+ [NFIT_SPA_PDISK] = "persistent-disk",
+ [NFIT_SPA_PCD] = "persistent-cd",
+
+ };
+
+ if (type > NFIT_SPA_PCD)
+ return "unknown";
+
+ return to_name[type];
+}
+
+int nfit_spa_type(struct acpi_nfit_system_address *spa)
+{
+ int i;
+
+ for (i = 0; i < NFIT_UUID_MAX; i++)
+ if (memcmp(to_nfit_uuid(i), spa->range_guid, 16) == 0)
+ return i;
+ return -1;
+}
+
+static bool add_spa(struct acpi_nfit_desc *acpi_desc,
+ struct nfit_table_prev *prev,
+ struct acpi_nfit_system_address *spa)
+{
+ struct device *dev = acpi_desc->dev;
+ struct nfit_spa *nfit_spa;
+
+ if (spa->header.length != sizeof(*spa))
+ return false;
+
+ list_for_each_entry(nfit_spa, &prev->spas, list) {
+ if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) {
+ list_move_tail(&nfit_spa->list, &acpi_desc->spas);
+ return true;
+ }
+ }
+
+ nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof(*spa),
+ GFP_KERNEL);
+ if (!nfit_spa)
+ return false;
+ INIT_LIST_HEAD(&nfit_spa->list);
+ memcpy(nfit_spa->spa, spa, sizeof(*spa));
+ list_add_tail(&nfit_spa->list, &acpi_desc->spas);
+ dev_dbg(dev, "%s: spa index: %d type: %s\n", __func__,
+ spa->range_index,
+ spa_type_name(nfit_spa_type(spa)));
+ return true;
+}
+
+static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
+ struct nfit_table_prev *prev,
+ struct acpi_nfit_memory_map *memdev)
+{
+ struct device *dev = acpi_desc->dev;
+ struct nfit_memdev *nfit_memdev;
+
+ if (memdev->header.length != sizeof(*memdev))
+ return false;
+
+ list_for_each_entry(nfit_memdev, &prev->memdevs, list)
+ if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) {
+ list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs);
+ return true;
+ }
+
+ nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev) + sizeof(*memdev),
+ GFP_KERNEL);
+ if (!nfit_memdev)
+ return false;
+ INIT_LIST_HEAD(&nfit_memdev->list);
+ memcpy(nfit_memdev->memdev, memdev, sizeof(*memdev));
+ list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs);
+ dev_dbg(dev, "%s: memdev handle: %#x spa: %d dcr: %d\n",
+ __func__, memdev->device_handle, memdev->range_index,
+ memdev->region_index);
+ return true;
+}
+
+/*
+ * An implementation may provide a truncated control region if no block windows
+ * are defined.
+ */
+static size_t sizeof_dcr(struct acpi_nfit_control_region *dcr)
+{
+ if (dcr->header.length < offsetof(struct acpi_nfit_control_region,
+ window_size))
+ return 0;
+ if (dcr->windows)
+ return sizeof(*dcr);
+ return offsetof(struct acpi_nfit_control_region, window_size);
+}
+
+static bool add_dcr(struct acpi_nfit_desc *acpi_desc,
+ struct nfit_table_prev *prev,
+ struct acpi_nfit_control_region *dcr)
+{
+ struct device *dev = acpi_desc->dev;
+ struct nfit_dcr *nfit_dcr;
+
+ if (!sizeof_dcr(dcr))
+ return false;
+
+ list_for_each_entry(nfit_dcr, &prev->dcrs, list)
+ if (memcmp(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)) == 0) {
+ list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs);
+ return true;
+ }
+
+ nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr) + sizeof(*dcr),
+ GFP_KERNEL);
+ if (!nfit_dcr)
+ return false;
+ INIT_LIST_HEAD(&nfit_dcr->list);
+ memcpy(nfit_dcr->dcr, dcr, sizeof_dcr(dcr));
+ list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs);
+ dev_dbg(dev, "%s: dcr index: %d windows: %d\n", __func__,
+ dcr->region_index, dcr->windows);
+ return true;
+}
+
+static bool add_bdw(struct acpi_nfit_desc *acpi_desc,
+ struct nfit_table_prev *prev,
+ struct acpi_nfit_data_region *bdw)
+{
+ struct device *dev = acpi_desc->dev;
+ struct nfit_bdw *nfit_bdw;
+
+ if (bdw->header.length != sizeof(*bdw))
+ return false;
+ list_for_each_entry(nfit_bdw, &prev->bdws, list)
+ if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) {
+ list_move_tail(&nfit_bdw->list, &acpi_desc->bdws);
+ return true;
+ }
+
+ nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw) + sizeof(*bdw),
+ GFP_KERNEL);
+ if (!nfit_bdw)
+ return false;
+ INIT_LIST_HEAD(&nfit_bdw->list);
+ memcpy(nfit_bdw->bdw, bdw, sizeof(*bdw));
+ list_add_tail(&nfit_bdw->list, &acpi_desc->bdws);
+ dev_dbg(dev, "%s: bdw dcr: %d windows: %d\n", __func__,
+ bdw->region_index, bdw->windows);
+ return true;
+}
+
+static size_t sizeof_idt(struct acpi_nfit_interleave *idt)
+{
+ if (idt->header.length < sizeof(*idt))
+ return 0;
+ return sizeof(*idt) + sizeof(u32) * (idt->line_count - 1);
+}
+
+static bool add_idt(struct acpi_nfit_desc *acpi_desc,
+ struct nfit_table_prev *prev,
+ struct acpi_nfit_interleave *idt)
+{
+ struct device *dev = acpi_desc->dev;
+ struct nfit_idt *nfit_idt;
+
+ if (!sizeof_idt(idt))
+ return false;
+
+ list_for_each_entry(nfit_idt, &prev->idts, list) {
+ if (sizeof_idt(nfit_idt->idt) != sizeof_idt(idt))
+ continue;
+
+ if (memcmp(nfit_idt->idt, idt, sizeof_idt(idt)) == 0) {
+ list_move_tail(&nfit_idt->list, &acpi_desc->idts);
+ return true;
+ }
+ }
+
+ nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt) + sizeof_idt(idt),
+ GFP_KERNEL);
+ if (!nfit_idt)
+ return false;
+ INIT_LIST_HEAD(&nfit_idt->list);
+ memcpy(nfit_idt->idt, idt, sizeof_idt(idt));
+ list_add_tail(&nfit_idt->list, &acpi_desc->idts);
+ dev_dbg(dev, "%s: idt index: %d num_lines: %d\n", __func__,
+ idt->interleave_index, idt->line_count);
+ return true;
+}
+
+static size_t sizeof_flush(struct acpi_nfit_flush_address *flush)
+{
+ if (flush->header.length < sizeof(*flush))
+ return 0;
+ return sizeof(*flush) + sizeof(u64) * (flush->hint_count - 1);
+}
+
+static bool add_flush(struct acpi_nfit_desc *acpi_desc,
+ struct nfit_table_prev *prev,
+ struct acpi_nfit_flush_address *flush)
+{
+ struct device *dev = acpi_desc->dev;
+ struct nfit_flush *nfit_flush;
+
+ if (!sizeof_flush(flush))
+ return false;
+
+ list_for_each_entry(nfit_flush, &prev->flushes, list) {
+ if (sizeof_flush(nfit_flush->flush) != sizeof_flush(flush))
+ continue;
+
+ if (memcmp(nfit_flush->flush, flush,
+ sizeof_flush(flush)) == 0) {
+ list_move_tail(&nfit_flush->list, &acpi_desc->flushes);
+ return true;
+ }
+ }
+
+ nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush)
+ + sizeof_flush(flush), GFP_KERNEL);
+ if (!nfit_flush)
+ return false;
+ INIT_LIST_HEAD(&nfit_flush->list);
+ memcpy(nfit_flush->flush, flush, sizeof_flush(flush));
+ list_add_tail(&nfit_flush->list, &acpi_desc->flushes);
+ dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__,
+ flush->device_handle, flush->hint_count);
+ return true;
+}
+
+static void *add_table(struct acpi_nfit_desc *acpi_desc,
+ struct nfit_table_prev *prev, void *table, const void *end)
+{
+ struct device *dev = acpi_desc->dev;
+ struct acpi_nfit_header *hdr;
+ void *err = ERR_PTR(-ENOMEM);
+
+ if (table >= end)
+ return NULL;
+
+ hdr = table;
+ if (!hdr->length) {
+ dev_warn(dev, "found a zero length table '%d' parsing nfit\n",
+ hdr->type);
+ return NULL;
+ }
+
+ switch (hdr->type) {
+ case ACPI_NFIT_TYPE_SYSTEM_ADDRESS:
+ if (!add_spa(acpi_desc, prev, table))
+ return err;
+ break;
+ case ACPI_NFIT_TYPE_MEMORY_MAP:
+ if (!add_memdev(acpi_desc, prev, table))
+ return err;
+ break;
+ case ACPI_NFIT_TYPE_CONTROL_REGION:
+ if (!add_dcr(acpi_desc, prev, table))
+ return err;
+ break;
+ case ACPI_NFIT_TYPE_DATA_REGION:
+ if (!add_bdw(acpi_desc, prev, table))
+ return err;
+ break;
+ case ACPI_NFIT_TYPE_INTERLEAVE:
+ if (!add_idt(acpi_desc, prev, table))
+ return err;
+ break;
+ case ACPI_NFIT_TYPE_FLUSH_ADDRESS:
+ if (!add_flush(acpi_desc, prev, table))
+ return err;
+ break;
+ case ACPI_NFIT_TYPE_SMBIOS:
+ dev_dbg(dev, "%s: smbios\n", __func__);
+ break;
+ default:
+ dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type);
+ break;
+ }
+
+ return table + hdr->length;
+}
+
+static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
+ struct nfit_mem *nfit_mem)
+{
+ u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
+ u16 dcr = nfit_mem->dcr->region_index;
+ struct nfit_spa *nfit_spa;
+
+ list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
+ u16 range_index = nfit_spa->spa->range_index;
+ int type = nfit_spa_type(nfit_spa->spa);
+ struct nfit_memdev *nfit_memdev;
+
+ if (type != NFIT_SPA_BDW)
+ continue;
+
+ list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
+ if (nfit_memdev->memdev->range_index != range_index)
+ continue;
+ if (nfit_memdev->memdev->device_handle != device_handle)
+ continue;
+ if (nfit_memdev->memdev->region_index != dcr)
+ continue;
+
+ nfit_mem->spa_bdw = nfit_spa->spa;
+ return;
+ }
+ }
+
+ dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n",
+ nfit_mem->spa_dcr->range_index);
+ nfit_mem->bdw = NULL;
+}
+
+static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
+ struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
+{
+ u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
+ struct nfit_memdev *nfit_memdev;
+ struct nfit_bdw *nfit_bdw;
+ struct nfit_idt *nfit_idt;
+ u16 idt_idx, range_index;
+
+ list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
+ if (nfit_bdw->bdw->region_index != dcr)
+ continue;
+ nfit_mem->bdw = nfit_bdw->bdw;
+ break;
+ }
+
+ if (!nfit_mem->bdw)
+ return;
+
+ nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
+
+ if (!nfit_mem->spa_bdw)
+ return;
+
+ range_index = nfit_mem->spa_bdw->range_index;
+ list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
+ if (nfit_memdev->memdev->range_index != range_index ||
+ nfit_memdev->memdev->region_index != dcr)
+ continue;
+ nfit_mem->memdev_bdw = nfit_memdev->memdev;
+ idt_idx = nfit_memdev->memdev->interleave_index;
+ list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
+ if (nfit_idt->idt->interleave_index != idt_idx)
+ continue;
+ nfit_mem->idt_bdw = nfit_idt->idt;
+ break;
+ }
+ break;
+ }
+}
+
+static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
+ struct acpi_nfit_system_address *spa)
+{
+ struct nfit_mem *nfit_mem, *found;
+ struct nfit_memdev *nfit_memdev;
+ int type = nfit_spa_type(spa);
+
+ switch (type) {
+ case NFIT_SPA_DCR:
+ case NFIT_SPA_PM:
+ break;
+ default:
+ return 0;
+ }
+
+ list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
+ struct nfit_flush *nfit_flush;
+ struct nfit_dcr *nfit_dcr;
+ u32 device_handle;
+ u16 dcr;
+
+ if (nfit_memdev->memdev->range_index != spa->range_index)
+ continue;
+ found = NULL;
+ dcr = nfit_memdev->memdev->region_index;
+ device_handle = nfit_memdev->memdev->device_handle;
+ list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
+ if (__to_nfit_memdev(nfit_mem)->device_handle
+ == device_handle) {
+ found = nfit_mem;
+ break;
+ }
+
+ if (found)
+ nfit_mem = found;
+ else {
+ nfit_mem = devm_kzalloc(acpi_desc->dev,
+ sizeof(*nfit_mem), GFP_KERNEL);
+ if (!nfit_mem)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&nfit_mem->list);
+ nfit_mem->acpi_desc = acpi_desc;
+ list_add(&nfit_mem->list, &acpi_desc->dimms);
+ }
+
+ list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
+ if (nfit_dcr->dcr->region_index != dcr)
+ continue;
+ /*
+ * Record the control region for the dimm. For
+ * the ACPI 6.1 case, where there are separate
+ * control regions for the pmem vs blk
+ * interfaces, be sure to record the extended
+ * blk details.
+ */
+ if (!nfit_mem->dcr)
+ nfit_mem->dcr = nfit_dcr->dcr;
+ else if (nfit_mem->dcr->windows == 0
+ && nfit_dcr->dcr->windows)
+ nfit_mem->dcr = nfit_dcr->dcr;
+ break;
+ }
+
+ list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) {
+ struct acpi_nfit_flush_address *flush;
+ u16 i;
+
+ if (nfit_flush->flush->device_handle != device_handle)
+ continue;
+ nfit_mem->nfit_flush = nfit_flush;
+ flush = nfit_flush->flush;
+ nfit_mem->flush_wpq = devm_kzalloc(acpi_desc->dev,
+ flush->hint_count
+ * sizeof(struct resource), GFP_KERNEL);
+ if (!nfit_mem->flush_wpq)
+ return -ENOMEM;
+ for (i = 0; i < flush->hint_count; i++) {
+ struct resource *res = &nfit_mem->flush_wpq[i];
+
+ res->start = flush->hint_address[i];
+ res->end = res->start + 8 - 1;
+ }
+ break;
+ }
+
+ if (dcr && !nfit_mem->dcr) {
+ dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
+ spa->range_index, dcr);
+ return -ENODEV;
+ }
+
+ if (type == NFIT_SPA_DCR) {
+ struct nfit_idt *nfit_idt;
+ u16 idt_idx;
+
+ /* multiple dimms may share a SPA when interleaved */
+ nfit_mem->spa_dcr = spa;
+ nfit_mem->memdev_dcr = nfit_memdev->memdev;
+ idt_idx = nfit_memdev->memdev->interleave_index;
+ list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
+ if (nfit_idt->idt->interleave_index != idt_idx)
+ continue;
+ nfit_mem->idt_dcr = nfit_idt->idt;
+ break;
+ }
+ nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
+ } else {
+ /*
+ * A single dimm may belong to multiple SPA-PM
+ * ranges, record at least one in addition to
+ * any SPA-DCR range.
+ */
+ nfit_mem->memdev_pmem = nfit_memdev->memdev;
+ }
+ }
+
+ return 0;
+}
+
+static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b)
+{
+ struct nfit_mem *a = container_of(_a, typeof(*a), list);
+ struct nfit_mem *b = container_of(_b, typeof(*b), list);
+ u32 handleA, handleB;
+
+ handleA = __to_nfit_memdev(a)->device_handle;
+ handleB = __to_nfit_memdev(b)->device_handle;
+ if (handleA < handleB)
+ return -1;
+ else if (handleA > handleB)
+ return 1;
+ return 0;
+}
+
+static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc)
+{
+ struct nfit_spa *nfit_spa;
+
+ /*
+ * For each SPA-DCR or SPA-PMEM address range find its
+ * corresponding MEMDEV(s). From each MEMDEV find the
+ * corresponding DCR. Then, if we're operating on a SPA-DCR,
+ * try to find a SPA-BDW and a corresponding BDW that references
+ * the DCR. Throw it all into an nfit_mem object. Note, that
+ * BDWs are optional.
+ */
+ list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
+ int rc;
+
+ rc = nfit_mem_dcr_init(acpi_desc, nfit_spa->spa);
+ if (rc)
+ return rc;
+ }
+
+ list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp);
+
+ return 0;
+}
+
+static ssize_t revision_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
+ struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
+ struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
+
+ return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision);
+}
+static DEVICE_ATTR_RO(revision);
+
+/*
+ * This shows the number of full Address Range Scrubs that have been
+ * completed since driver load time. Userspace can wait on this using
+ * select/poll etc. A '+' at the end indicates an ARS is in progress
+ */
+static ssize_t scrub_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvdimm_bus_descriptor *nd_desc;
+ ssize_t rc = -ENXIO;
+
+ device_lock(dev);
+ nd_desc = dev_get_drvdata(dev);
+ if (nd_desc) {
+ struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
+
+ rc = sprintf(buf, "%d%s", acpi_desc->scrub_count,
+ (work_busy(&acpi_desc->work)) ? "+\n" : "\n");
+ }
+ device_unlock(dev);
+ return rc;
+}
+
+static ssize_t scrub_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct nvdimm_bus_descriptor *nd_desc;
+ ssize_t rc;
+ long val;
+
+ rc = kstrtol(buf, 0, &val);
+ if (rc)
+ return rc;
+ if (val != 1)
+ return -EINVAL;
+
+ device_lock(dev);
+ nd_desc = dev_get_drvdata(dev);
+ if (nd_desc) {
+ struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
+
+ rc = acpi_nfit_ars_rescan(acpi_desc);
+ }
+ device_unlock(dev);
+ if (rc)
+ return rc;
+ return size;
+}
+static DEVICE_ATTR_RW(scrub);
+
+static bool ars_supported(struct nvdimm_bus *nvdimm_bus)
+{
+ struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
+ const unsigned long mask = 1 << ND_CMD_ARS_CAP | 1 << ND_CMD_ARS_START
+ | 1 << ND_CMD_ARS_STATUS;
+
+ return (nd_desc->cmd_mask & mask) == mask;
+}
+
+static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
+
+ if (a == &dev_attr_scrub.attr && !ars_supported(nvdimm_bus))
+ return 0;
+ return a->mode;
+}
+
+static struct attribute *acpi_nfit_attributes[] = {
+ &dev_attr_revision.attr,
+ &dev_attr_scrub.attr,
+ NULL,
+};
+
+static struct attribute_group acpi_nfit_attribute_group = {
+ .name = "nfit",
+ .attrs = acpi_nfit_attributes,
+ .is_visible = nfit_visible,
+};
+
+static const struct attribute_group *acpi_nfit_attribute_groups[] = {
+ &nvdimm_bus_attribute_group,
+ &acpi_nfit_attribute_group,
+ NULL,
+};
+
+static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
+
+ return __to_nfit_memdev(nfit_mem);
+}
+
+static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
+
+ return nfit_mem->dcr;
+}
+
+static ssize_t handle_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
+
+ return sprintf(buf, "%#x\n", memdev->device_handle);
+}
+static DEVICE_ATTR_RO(handle);
+
+static ssize_t phys_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
+
+ return sprintf(buf, "%#x\n", memdev->physical_id);
+}
+static DEVICE_ATTR_RO(phys_id);
+
+static ssize_t vendor_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
+
+ return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id));
+}
+static DEVICE_ATTR_RO(vendor);
+
+static ssize_t rev_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
+
+ return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id));
+}
+static DEVICE_ATTR_RO(rev_id);
+
+static ssize_t device_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
+
+ return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->device_id));
+}
+static DEVICE_ATTR_RO(device);
+
+static ssize_t subsystem_vendor_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
+
+ return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id));
+}
+static DEVICE_ATTR_RO(subsystem_vendor);
+
+static ssize_t subsystem_rev_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
+
+ return sprintf(buf, "0x%04x\n",
+ be16_to_cpu(dcr->subsystem_revision_id));
+}
+static DEVICE_ATTR_RO(subsystem_rev_id);
+
+static ssize_t subsystem_device_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
+
+ return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id));
+}
+static DEVICE_ATTR_RO(subsystem_device);
+
+static int num_nvdimm_formats(struct nvdimm *nvdimm)
+{
+ struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
+ int formats = 0;
+
+ if (nfit_mem->memdev_pmem)
+ formats++;
+ if (nfit_mem->memdev_bdw)
+ formats++;
+ return formats;
+}
+
+static ssize_t format_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
+
+ return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code));
+}
+static DEVICE_ATTR_RO(format);
+
+static ssize_t format1_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 handle;
+ ssize_t rc = -ENXIO;
+ struct nfit_mem *nfit_mem;
+ struct nfit_memdev *nfit_memdev;
+ struct acpi_nfit_desc *acpi_desc;
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
+
+ nfit_mem = nvdimm_provider_data(nvdimm);
+ acpi_desc = nfit_mem->acpi_desc;
+ handle = to_nfit_memdev(dev)->device_handle;
+
+ /* assumes DIMMs have at most 2 published interface codes */
+ mutex_lock(&acpi_desc->init_mutex);
+ list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
+ struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
+ struct nfit_dcr *nfit_dcr;
+
+ if (memdev->device_handle != handle)
+ continue;
+
+ list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
+ if (nfit_dcr->dcr->region_index != memdev->region_index)
+ continue;
+ if (nfit_dcr->dcr->code == dcr->code)
+ continue;
+ rc = sprintf(buf, "0x%04x\n",
+ le16_to_cpu(nfit_dcr->dcr->code));
+ break;
+ }
+ if (rc != ENXIO)
+ break;
+ }
+ mutex_unlock(&acpi_desc->init_mutex);
+ return rc;
+}
+static DEVICE_ATTR_RO(format1);
+
+static ssize_t formats_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+
+ return sprintf(buf, "%d\n", num_nvdimm_formats(nvdimm));
+}
+static DEVICE_ATTR_RO(formats);
+
+static ssize_t serial_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
+
+ return sprintf(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number));
+}
+static DEVICE_ATTR_RO(serial);
+
+static ssize_t family_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
+
+ if (nfit_mem->family < 0)
+ return -ENXIO;
+ return sprintf(buf, "%d\n", nfit_mem->family);
+}
+static DEVICE_ATTR_RO(family);
+
+static ssize_t dsm_mask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
+
+ if (nfit_mem->family < 0)
+ return -ENXIO;
+ return sprintf(buf, "%#lx\n", nfit_mem->dsm_mask);
+}
+static DEVICE_ATTR_RO(dsm_mask);
+
+static ssize_t flags_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u16 flags = to_nfit_memdev(dev)->flags;
+
+ return sprintf(buf, "%s%s%s%s%s\n",
+ flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "",
+ flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "",
+ flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "",
+ flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "",
+ flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "");
+}
+static DEVICE_ATTR_RO(flags);
+
+static ssize_t id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
+
+ if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID)
+ return sprintf(buf, "%04x-%02x-%04x-%08x\n",
+ be16_to_cpu(dcr->vendor_id),
+ dcr->manufacturing_location,
+ be16_to_cpu(dcr->manufacturing_date),
+ be32_to_cpu(dcr->serial_number));
+ else
+ return sprintf(buf, "%04x-%08x\n",
+ be16_to_cpu(dcr->vendor_id),
+ be32_to_cpu(dcr->serial_number));
+}
+static DEVICE_ATTR_RO(id);
+
+static struct attribute *acpi_nfit_dimm_attributes[] = {
+ &dev_attr_handle.attr,
+ &dev_attr_phys_id.attr,
+ &dev_attr_vendor.attr,
+ &dev_attr_device.attr,
+ &dev_attr_rev_id.attr,
+ &dev_attr_subsystem_vendor.attr,
+ &dev_attr_subsystem_device.attr,
+ &dev_attr_subsystem_rev_id.attr,
+ &dev_attr_format.attr,
+ &dev_attr_formats.attr,
+ &dev_attr_format1.attr,
+ &dev_attr_serial.attr,
+ &dev_attr_flags.attr,
+ &dev_attr_id.attr,
+ &dev_attr_family.attr,
+ &dev_attr_dsm_mask.attr,
+ NULL,
+};
+
+static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+
+ if (!to_nfit_dcr(dev))
+ return 0;
+ if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1)
+ return 0;
+ return a->mode;
+}
+
+static struct attribute_group acpi_nfit_dimm_attribute_group = {
+ .name = "nfit",
+ .attrs = acpi_nfit_dimm_attributes,
+ .is_visible = acpi_nfit_dimm_attr_visible,
+};
+
+static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = {
+ &nvdimm_attribute_group,
+ &nd_device_attribute_group,
+ &acpi_nfit_dimm_attribute_group,
+ NULL,
+};
+
+static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc,
+ u32 device_handle)
+{
+ struct nfit_mem *nfit_mem;
+
+ list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
+ if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle)
+ return nfit_mem->nvdimm;
+
+ return NULL;
+}
+
+static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
+ struct nfit_mem *nfit_mem, u32 device_handle)
+{
+ struct acpi_device *adev, *adev_dimm;
+ struct device *dev = acpi_desc->dev;
+ unsigned long dsm_mask;
+ const u8 *uuid;
+ int i;
+
+ /* nfit test assumes 1:1 relationship between commands and dsms */
+ nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en;
+ nfit_mem->family = NVDIMM_FAMILY_INTEL;
+ adev = to_acpi_dev(acpi_desc);
+ if (!adev)
+ return 0;
+
+ adev_dimm = acpi_find_child_device(adev, device_handle, false);
+ nfit_mem->adev = adev_dimm;
+ if (!adev_dimm) {
+ dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n",
+ device_handle);
+ return force_enable_dimms ? 0 : -ENODEV;
+ }
+
+ /*
+ * Until standardization materializes we need to consider 4
+ * different command sets. Note, that checking for function0 (bit0)
+ * tells us if any commands are reachable through this uuid.
+ */
+ for (i = NVDIMM_FAMILY_INTEL; i <= NVDIMM_FAMILY_MSFT; i++)
+ if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1))
+ break;
+
+ /* limit the supported commands to those that are publicly documented */
+ nfit_mem->family = i;
+ if (nfit_mem->family == NVDIMM_FAMILY_INTEL) {
+ dsm_mask = 0x3fe;
+ if (disable_vendor_specific)
+ dsm_mask &= ~(1 << ND_CMD_VENDOR);
+ } else if (nfit_mem->family == NVDIMM_FAMILY_HPE1) {
+ dsm_mask = 0x1c3c76;
+ } else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) {
+ dsm_mask = 0x1fe;
+ if (disable_vendor_specific)
+ dsm_mask &= ~(1 << 8);
+ } else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) {
+ dsm_mask = 0xffffffff;
+ } else {
+ dev_dbg(dev, "unknown dimm command family\n");
+ nfit_mem->family = -1;
+ /* DSMs are optional, continue loading the driver... */
+ return 0;
+ }
+
+ uuid = to_nfit_uuid(nfit_mem->family);
+ for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
+ if (acpi_check_dsm(adev_dimm->handle, uuid, 1, 1ULL << i))
+ set_bit(i, &nfit_mem->dsm_mask);
+
+ return 0;
+}
+
+static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
+{
+ struct nfit_mem *nfit_mem;
+ int dimm_count = 0;
+
+ list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
+ struct acpi_nfit_flush_address *flush;
+ unsigned long flags = 0, cmd_mask;
+ struct nvdimm *nvdimm;
+ u32 device_handle;
+ u16 mem_flags;
+ int rc;
+
+ device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
+ nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle);
+ if (nvdimm) {
+ dimm_count++;
+ continue;
+ }
+
+ if (nfit_mem->bdw && nfit_mem->memdev_pmem)
+ flags |= NDD_ALIASING;
+
+ mem_flags = __to_nfit_memdev(nfit_mem)->flags;
+ if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED)
+ flags |= NDD_UNARMED;
+
+ rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle);
+ if (rc)
+ continue;
+
+ /*
+ * TODO: provide translation for non-NVDIMM_FAMILY_INTEL
+ * devices (i.e. from nd_cmd to acpi_dsm) to standardize the
+ * userspace interface.
+ */
+ cmd_mask = 1UL << ND_CMD_CALL;
+ if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
+ cmd_mask |= nfit_mem->dsm_mask;
+
+ flush = nfit_mem->nfit_flush ? nfit_mem->nfit_flush->flush
+ : NULL;
+ nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem,
+ acpi_nfit_dimm_attribute_groups,
+ flags, cmd_mask, flush ? flush->hint_count : 0,
+ nfit_mem->flush_wpq);
+ if (!nvdimm)
+ return -ENOMEM;
+
+ nfit_mem->nvdimm = nvdimm;
+ dimm_count++;
+
+ if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0)
+ continue;
+
+ dev_info(acpi_desc->dev, "%s flags:%s%s%s%s\n",
+ nvdimm_name(nvdimm),
+ mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "",
+ mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"",
+ mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "",
+ mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : "");
+
+ }
+
+ return nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count);
+}
+
+static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
+{
+ struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
+ const u8 *uuid = to_nfit_uuid(NFIT_DEV_BUS);
+ struct acpi_device *adev;
+ int i;
+
+ nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en;
+ adev = to_acpi_dev(acpi_desc);
+ if (!adev)
+ return;
+
+ for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++)
+ if (acpi_check_dsm(adev->handle, uuid, 1, 1ULL << i))
+ set_bit(i, &nd_desc->cmd_mask);
+}
+
+static ssize_t range_index_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nd_region *nd_region = to_nd_region(dev);
+ struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region);
+
+ return sprintf(buf, "%d\n", nfit_spa->spa->range_index);
+}
+static DEVICE_ATTR_RO(range_index);
+
+static struct attribute *acpi_nfit_region_attributes[] = {
+ &dev_attr_range_index.attr,
+ NULL,
+};
+
+static struct attribute_group acpi_nfit_region_attribute_group = {
+ .name = "nfit",
+ .attrs = acpi_nfit_region_attributes,
+};
+
+static const struct attribute_group *acpi_nfit_region_attribute_groups[] = {
+ &nd_region_attribute_group,
+ &nd_mapping_attribute_group,
+ &nd_device_attribute_group,
+ &nd_numa_attribute_group,
+ &acpi_nfit_region_attribute_group,
+ NULL,
+};
+
+/* enough info to uniquely specify an interleave set */
+struct nfit_set_info {
+ struct nfit_set_info_map {
+ u64 region_offset;
+ u32 serial_number;
+ u32 pad;
+ } mapping[0];
+};
+
+static size_t sizeof_nfit_set_info(int num_mappings)
+{
+ return sizeof(struct nfit_set_info)
+ + num_mappings * sizeof(struct nfit_set_info_map);
+}
+
+static int cmp_map(const void *m0, const void *m1)
+{
+ const struct nfit_set_info_map *map0 = m0;
+ const struct nfit_set_info_map *map1 = m1;
+
+ return memcmp(&map0->region_offset, &map1->region_offset,
+ sizeof(u64));
+}
+
+/* Retrieve the nth entry referencing this spa */
+static struct acpi_nfit_memory_map *memdev_from_spa(
+ struct acpi_nfit_desc *acpi_desc, u16 range_index, int n)
+{
+ struct nfit_memdev *nfit_memdev;
+
+ list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list)
+ if (nfit_memdev->memdev->range_index == range_index)
+ if (n-- == 0)
+ return nfit_memdev->memdev;
+ return NULL;
+}
+
+static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
+ struct nd_region_desc *ndr_desc,
+ struct acpi_nfit_system_address *spa)
+{
+ int i, spa_type = nfit_spa_type(spa);
+ struct device *dev = acpi_desc->dev;
+ struct nd_interleave_set *nd_set;
+ u16 nr = ndr_desc->num_mappings;
+ struct nfit_set_info *info;
+
+ if (spa_type == NFIT_SPA_PM || spa_type == NFIT_SPA_VOLATILE)
+ /* pass */;
+ else
+ return 0;
+
+ nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
+ if (!nd_set)
+ return -ENOMEM;
+
+ info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+ for (i = 0; i < nr; i++) {
+ struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i];
+ struct nfit_set_info_map *map = &info->mapping[i];
+ struct nvdimm *nvdimm = nd_mapping->nvdimm;
+ struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
+ struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc,
+ spa->range_index, i);
+
+ if (!memdev || !nfit_mem->dcr) {
+ dev_err(dev, "%s: failed to find DCR\n", __func__);
+ return -ENODEV;
+ }
+
+ map->region_offset = memdev->region_offset;
+ map->serial_number = nfit_mem->dcr->serial_number;
+ }
+
+ sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
+ cmp_map, NULL);
+ nd_set->cookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
+ ndr_desc->nd_set = nd_set;
+ devm_kfree(dev, info);
+
+ return 0;
+}
+
+static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio)
+{
+ struct acpi_nfit_interleave *idt = mmio->idt;
+ u32 sub_line_offset, line_index, line_offset;
+ u64 line_no, table_skip_count, table_offset;
+
+ line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset);
+ table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index);
+ line_offset = idt->line_offset[line_index]
+ * mmio->line_size;
+ table_offset = table_skip_count * mmio->table_size;
+
+ return mmio->base_offset + line_offset + table_offset + sub_line_offset;
+}
+
+static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
+{
+ struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
+ u64 offset = nfit_blk->stat_offset + mmio->size * bw;
+ const u32 STATUS_MASK = 0x80000037;
+
+ if (mmio->num_lines)
+ offset = to_interleave_offset(offset, mmio);
+
+ return readl(mmio->addr.base + offset) & STATUS_MASK;
+}
+
+static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
+ resource_size_t dpa, unsigned int len, unsigned int write)
+{
+ u64 cmd, offset;
+ struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
+
+ enum {
+ BCW_OFFSET_MASK = (1ULL << 48)-1,
+ BCW_LEN_SHIFT = 48,
+ BCW_LEN_MASK = (1ULL << 8) - 1,
+ BCW_CMD_SHIFT = 56,
+ };
+
+ cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK;
+ len = len >> L1_CACHE_SHIFT;
+ cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT;
+ cmd |= ((u64) write) << BCW_CMD_SHIFT;
+
+ offset = nfit_blk->cmd_offset + mmio->size * bw;
+ if (mmio->num_lines)
+ offset = to_interleave_offset(offset, mmio);
+
+ writeq(cmd, mmio->addr.base + offset);
+ nvdimm_flush(nfit_blk->nd_region);
+
+ if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH)
+ readq(mmio->addr.base + offset);
+}
+
+static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
+ resource_size_t dpa, void *iobuf, size_t len, int rw,
+ unsigned int lane)
+{
+ struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
+ unsigned int copied = 0;
+ u64 base_offset;
+ int rc;
+
+ base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES
+ + lane * mmio->size;
+ write_blk_ctl(nfit_blk, lane, dpa, len, rw);
+ while (len) {
+ unsigned int c;
+ u64 offset;
+
+ if (mmio->num_lines) {
+ u32 line_offset;
+
+ offset = to_interleave_offset(base_offset + copied,
+ mmio);
+ div_u64_rem(offset, mmio->line_size, &line_offset);
+ c = min_t(size_t, len, mmio->line_size - line_offset);
+ } else {
+ offset = base_offset + nfit_blk->bdw_offset;
+ c = len;
+ }
+
+ if (rw)
+ memcpy_to_pmem(mmio->addr.aperture + offset,
+ iobuf + copied, c);
+ else {
+ if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH)
+ mmio_flush_range((void __force *)
+ mmio->addr.aperture + offset, c);
+
+ memcpy_from_pmem(iobuf + copied,
+ mmio->addr.aperture + offset, c);
+ }
+
+ copied += c;
+ len -= c;
+ }
+
+ if (rw)
+ nvdimm_flush(nfit_blk->nd_region);
+
+ rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0;
+ return rc;
+}
+
+static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr,
+ resource_size_t dpa, void *iobuf, u64 len, int rw)
+{
+ struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
+ struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
+ struct nd_region *nd_region = nfit_blk->nd_region;
+ unsigned int lane, copied = 0;
+ int rc = 0;
+
+ lane = nd_region_acquire_lane(nd_region);
+ while (len) {
+ u64 c = min(len, mmio->size);
+
+ rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied,
+ iobuf + copied, c, rw, lane);
+ if (rc)
+ break;
+
+ copied += c;
+ len -= c;
+ }
+ nd_region_release_lane(nd_region, lane);
+
+ return rc;
+}
+
+static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio,
+ struct acpi_nfit_interleave *idt, u16 interleave_ways)
+{
+ if (idt) {
+ mmio->num_lines = idt->line_count;
+ mmio->line_size = idt->line_size;
+ if (interleave_ways == 0)
+ return -ENXIO;
+ mmio->table_size = mmio->num_lines * interleave_ways
+ * mmio->line_size;
+ }
+
+ return 0;
+}
+
+static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc,
+ struct nvdimm *nvdimm, struct nfit_blk *nfit_blk)
+{
+ struct nd_cmd_dimm_flags flags;
+ int rc;
+
+ memset(&flags, 0, sizeof(flags));
+ rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags,
+ sizeof(flags), NULL);
+
+ if (rc >= 0 && flags.status == 0)
+ nfit_blk->dimm_flags = flags.flags;
+ else if (rc == -ENOTTY) {
+ /* fall back to a conservative default */
+ nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH;
+ rc = 0;
+ } else
+ rc = -ENXIO;
+
+ return rc;
+}
+
+static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
+ struct device *dev)
+{
+ struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
+ struct nd_blk_region *ndbr = to_nd_blk_region(dev);
+ struct nfit_blk_mmio *mmio;
+ struct nfit_blk *nfit_blk;
+ struct nfit_mem *nfit_mem;
+ struct nvdimm *nvdimm;
+ int rc;
+
+ nvdimm = nd_blk_region_to_dimm(ndbr);
+ nfit_mem = nvdimm_provider_data(nvdimm);
+ if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) {
+ dev_dbg(dev, "%s: missing%s%s%s\n", __func__,
+ nfit_mem ? "" : " nfit_mem",
+ (nfit_mem && nfit_mem->dcr) ? "" : " dcr",
+ (nfit_mem && nfit_mem->bdw) ? "" : " bdw");
+ return -ENXIO;
+ }
+
+ nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL);
+ if (!nfit_blk)
+ return -ENOMEM;
+ nd_blk_region_set_provider_data(ndbr, nfit_blk);
+ nfit_blk->nd_region = to_nd_region(dev);
+
+ /* map block aperture memory */
+ nfit_blk->bdw_offset = nfit_mem->bdw->offset;
+ mmio = &nfit_blk->mmio[BDW];
+ mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address,
+ nfit_mem->spa_bdw->length, ARCH_MEMREMAP_PMEM);
+ if (!mmio->addr.base) {
+ dev_dbg(dev, "%s: %s failed to map bdw\n", __func__,
+ nvdimm_name(nvdimm));
+ return -ENOMEM;
+ }
+ mmio->size = nfit_mem->bdw->size;
+ mmio->base_offset = nfit_mem->memdev_bdw->region_offset;
+ mmio->idt = nfit_mem->idt_bdw;
+ mmio->spa = nfit_mem->spa_bdw;
+ rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw,
+ nfit_mem->memdev_bdw->interleave_ways);
+ if (rc) {
+ dev_dbg(dev, "%s: %s failed to init bdw interleave\n",
+ __func__, nvdimm_name(nvdimm));
+ return rc;
+ }
+
+ /* map block control memory */
+ nfit_blk->cmd_offset = nfit_mem->dcr->command_offset;
+ nfit_blk->stat_offset = nfit_mem->dcr->status_offset;
+ mmio = &nfit_blk->mmio[DCR];
+ mmio->addr.base = devm_nvdimm_ioremap(dev, nfit_mem->spa_dcr->address,
+ nfit_mem->spa_dcr->length);
+ if (!mmio->addr.base) {
+ dev_dbg(dev, "%s: %s failed to map dcr\n", __func__,
+ nvdimm_name(nvdimm));
+ return -ENOMEM;
+ }
+ mmio->size = nfit_mem->dcr->window_size;
+ mmio->base_offset = nfit_mem->memdev_dcr->region_offset;
+ mmio->idt = nfit_mem->idt_dcr;
+ mmio->spa = nfit_mem->spa_dcr;
+ rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr,
+ nfit_mem->memdev_dcr->interleave_ways);
+ if (rc) {
+ dev_dbg(dev, "%s: %s failed to init dcr interleave\n",
+ __func__, nvdimm_name(nvdimm));
+ return rc;
+ }
+
+ rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk);
+ if (rc < 0) {
+ dev_dbg(dev, "%s: %s failed get DIMM flags\n",
+ __func__, nvdimm_name(nvdimm));
+ return rc;
+ }
+
+ if (nvdimm_has_flush(nfit_blk->nd_region) < 0)
+ dev_warn(dev, "unable to guarantee persistence of writes\n");
+
+ if (mmio->line_size == 0)
+ return 0;
+
+ if ((u32) nfit_blk->cmd_offset % mmio->line_size
+ + 8 > mmio->line_size) {
+ dev_dbg(dev, "cmd_offset crosses interleave boundary\n");
+ return -ENXIO;
+ } else if ((u32) nfit_blk->stat_offset % mmio->line_size
+ + 8 > mmio->line_size) {
+ dev_dbg(dev, "stat_offset crosses interleave boundary\n");
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int ars_get_cap(struct acpi_nfit_desc *acpi_desc,
+ struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa)
+{
+ struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
+ struct acpi_nfit_system_address *spa = nfit_spa->spa;
+ int cmd_rc, rc;
+
+ cmd->address = spa->address;
+ cmd->length = spa->length;
+ rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd,
+ sizeof(*cmd), &cmd_rc);
+ if (rc < 0)
+ return rc;
+ return cmd_rc;
+}
+
+static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa)
+{
+ int rc;
+ int cmd_rc;
+ struct nd_cmd_ars_start ars_start;
+ struct acpi_nfit_system_address *spa = nfit_spa->spa;
+ struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
+
+ memset(&ars_start, 0, sizeof(ars_start));
+ ars_start.address = spa->address;
+ ars_start.length = spa->length;
+ if (nfit_spa_type(spa) == NFIT_SPA_PM)
+ ars_start.type = ND_ARS_PERSISTENT;
+ else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE)
+ ars_start.type = ND_ARS_VOLATILE;
+ else
+ return -ENOTTY;
+
+ rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
+ sizeof(ars_start), &cmd_rc);
+
+ if (rc < 0)
+ return rc;
+ return cmd_rc;
+}
+
+static int ars_continue(struct acpi_nfit_desc *acpi_desc)
+{
+ int rc, cmd_rc;
+ struct nd_cmd_ars_start ars_start;
+ struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
+ struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
+
+ memset(&ars_start, 0, sizeof(ars_start));
+ ars_start.address = ars_status->restart_address;
+ ars_start.length = ars_status->restart_length;
+ ars_start.type = ars_status->type;
+ rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
+ sizeof(ars_start), &cmd_rc);
+ if (rc < 0)
+ return rc;
+ return cmd_rc;
+}
+
+static int ars_get_status(struct acpi_nfit_desc *acpi_desc)
+{
+ struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
+ struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
+ int rc, cmd_rc;
+
+ rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status,
+ acpi_desc->ars_status_size, &cmd_rc);
+ if (rc < 0)
+ return rc;
+ return cmd_rc;
+}
+
+static int ars_status_process_records(struct nvdimm_bus *nvdimm_bus,
+ struct nd_cmd_ars_status *ars_status)
+{
+ int rc;
+ u32 i;
+
+ for (i = 0; i < ars_status->num_records; i++) {
+ rc = nvdimm_bus_add_poison(nvdimm_bus,
+ ars_status->records[i].err_address,
+ ars_status->records[i].length);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static void acpi_nfit_remove_resource(void *data)
+{
+ struct resource *res = data;
+
+ remove_resource(res);
+}
+
+static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc,
+ struct nd_region_desc *ndr_desc)
+{
+ struct resource *res, *nd_res = ndr_desc->res;
+ int is_pmem, ret;
+
+ /* No operation if the region is already registered as PMEM */
+ is_pmem = region_intersects(nd_res->start, resource_size(nd_res),
+ IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY);
+ if (is_pmem == REGION_INTERSECTS)
+ return 0;
+
+ res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ res->name = "Persistent Memory";
+ res->start = nd_res->start;
+ res->end = nd_res->end;
+ res->flags = IORESOURCE_MEM;
+ res->desc = IORES_DESC_PERSISTENT_MEMORY;
+
+ ret = insert_resource(&iomem_resource, res);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(acpi_desc->dev,
+ acpi_nfit_remove_resource,
+ res);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
+ struct nd_mapping *nd_mapping, struct nd_region_desc *ndr_desc,
+ struct acpi_nfit_memory_map *memdev,
+ struct nfit_spa *nfit_spa)
+{
+ struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc,
+ memdev->device_handle);
+ struct acpi_nfit_system_address *spa = nfit_spa->spa;
+ struct nd_blk_region_desc *ndbr_desc;
+ struct nfit_mem *nfit_mem;
+ int blk_valid = 0;
+
+ if (!nvdimm) {
+ dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n",
+ spa->range_index, memdev->device_handle);
+ return -ENODEV;
+ }
+
+ nd_mapping->nvdimm = nvdimm;
+ switch (nfit_spa_type(spa)) {
+ case NFIT_SPA_PM:
+ case NFIT_SPA_VOLATILE:
+ nd_mapping->start = memdev->address;
+ nd_mapping->size = memdev->region_size;
+ break;
+ case NFIT_SPA_DCR:
+ nfit_mem = nvdimm_provider_data(nvdimm);
+ if (!nfit_mem || !nfit_mem->bdw) {
+ dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n",
+ spa->range_index, nvdimm_name(nvdimm));
+ } else {
+ nd_mapping->size = nfit_mem->bdw->capacity;
+ nd_mapping->start = nfit_mem->bdw->start_address;
+ ndr_desc->num_lanes = nfit_mem->bdw->windows;
+ blk_valid = 1;
+ }
+
+ ndr_desc->nd_mapping = nd_mapping;
+ ndr_desc->num_mappings = blk_valid;
+ ndbr_desc = to_blk_region_desc(ndr_desc);
+ ndbr_desc->enable = acpi_nfit_blk_region_enable;
+ ndbr_desc->do_io = acpi_desc->blk_do_io;
+ nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus,
+ ndr_desc);
+ if (!nfit_spa->nd_region)
+ return -ENOMEM;
+ break;
+ }
+
+ return 0;
+}
+
+static bool nfit_spa_is_virtual(struct acpi_nfit_system_address *spa)
+{
+ return (nfit_spa_type(spa) == NFIT_SPA_VDISK ||
+ nfit_spa_type(spa) == NFIT_SPA_VCD ||
+ nfit_spa_type(spa) == NFIT_SPA_PDISK ||
+ nfit_spa_type(spa) == NFIT_SPA_PCD);
+}
+
+static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
+ struct nfit_spa *nfit_spa)
+{
+ static struct nd_mapping nd_mappings[ND_MAX_MAPPINGS];
+ struct acpi_nfit_system_address *spa = nfit_spa->spa;
+ struct nd_blk_region_desc ndbr_desc;
+ struct nd_region_desc *ndr_desc;
+ struct nfit_memdev *nfit_memdev;
+ struct nvdimm_bus *nvdimm_bus;
+ struct resource res;
+ int count = 0, rc;
+
+ if (nfit_spa->nd_region)
+ return 0;
+
+ if (spa->range_index == 0 && !nfit_spa_is_virtual(spa)) {
+ dev_dbg(acpi_desc->dev, "%s: detected invalid spa index\n",
+ __func__);
+ return 0;
+ }
+
+ memset(&res, 0, sizeof(res));
+ memset(&nd_mappings, 0, sizeof(nd_mappings));
+ memset(&ndbr_desc, 0, sizeof(ndbr_desc));
+ res.start = spa->address;
+ res.end = res.start + spa->length - 1;
+ ndr_desc = &ndbr_desc.ndr_desc;
+ ndr_desc->res = &res;
+ ndr_desc->provider_data = nfit_spa;
+ ndr_desc->attr_groups = acpi_nfit_region_attribute_groups;
+ if (spa->flags & ACPI_NFIT_PROXIMITY_VALID)
+ ndr_desc->numa_node = acpi_map_pxm_to_online_node(
+ spa->proximity_domain);
+ else
+ ndr_desc->numa_node = NUMA_NO_NODE;
+
+ list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
+ struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
+ struct nd_mapping *nd_mapping;
+
+ if (memdev->range_index != spa->range_index)
+ continue;
+ if (count >= ND_MAX_MAPPINGS) {
+ dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n",
+ spa->range_index, ND_MAX_MAPPINGS);
+ return -ENXIO;
+ }
+ nd_mapping = &nd_mappings[count++];
+ rc = acpi_nfit_init_mapping(acpi_desc, nd_mapping, ndr_desc,
+ memdev, nfit_spa);
+ if (rc)
+ goto out;
+ }
+
+ ndr_desc->nd_mapping = nd_mappings;
+ ndr_desc->num_mappings = count;
+ rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
+ if (rc)
+ goto out;
+
+ nvdimm_bus = acpi_desc->nvdimm_bus;
+ if (nfit_spa_type(spa) == NFIT_SPA_PM) {
+ rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc);
+ if (rc) {
+ dev_warn(acpi_desc->dev,
+ "failed to insert pmem resource to iomem: %d\n",
+ rc);
+ goto out;
+ }
+
+ nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
+ ndr_desc);
+ if (!nfit_spa->nd_region)
+ rc = -ENOMEM;
+ } else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) {
+ nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus,
+ ndr_desc);
+ if (!nfit_spa->nd_region)
+ rc = -ENOMEM;
+ } else if (nfit_spa_is_virtual(spa)) {
+ nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
+ ndr_desc);
+ if (!nfit_spa->nd_region)
+ rc = -ENOMEM;
+ }
+
+ out:
+ if (rc)
+ dev_err(acpi_desc->dev, "failed to register spa range %d\n",
+ nfit_spa->spa->range_index);
+ return rc;
+}
+
+static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc,
+ u32 max_ars)
+{
+ struct device *dev = acpi_desc->dev;
+ struct nd_cmd_ars_status *ars_status;
+
+ if (acpi_desc->ars_status && acpi_desc->ars_status_size >= max_ars) {
+ memset(acpi_desc->ars_status, 0, acpi_desc->ars_status_size);
+ return 0;
+ }
+
+ if (acpi_desc->ars_status)
+ devm_kfree(dev, acpi_desc->ars_status);
+ acpi_desc->ars_status = NULL;
+ ars_status = devm_kzalloc(dev, max_ars, GFP_KERNEL);
+ if (!ars_status)
+ return -ENOMEM;
+ acpi_desc->ars_status = ars_status;
+ acpi_desc->ars_status_size = max_ars;
+ return 0;
+}
+
+static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc,
+ struct nfit_spa *nfit_spa)
+{
+ struct acpi_nfit_system_address *spa = nfit_spa->spa;
+ int rc;
+
+ if (!nfit_spa->max_ars) {
+ struct nd_cmd_ars_cap ars_cap;
+
+ memset(&ars_cap, 0, sizeof(ars_cap));
+ rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa);
+ if (rc < 0)
+ return rc;
+ nfit_spa->max_ars = ars_cap.max_ars_out;
+ nfit_spa->clear_err_unit = ars_cap.clear_err_unit;
+ /* check that the supported scrub types match the spa type */
+ if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE &&
+ ((ars_cap.status >> 16) & ND_ARS_VOLATILE) == 0)
+ return -ENOTTY;
+ else if (nfit_spa_type(spa) == NFIT_SPA_PM &&
+ ((ars_cap.status >> 16) & ND_ARS_PERSISTENT) == 0)
+ return -ENOTTY;
+ }
+
+ if (ars_status_alloc(acpi_desc, nfit_spa->max_ars))
+ return -ENOMEM;
+
+ rc = ars_get_status(acpi_desc);
+ if (rc < 0 && rc != -ENOSPC)
+ return rc;
+
+ if (ars_status_process_records(acpi_desc->nvdimm_bus,
+ acpi_desc->ars_status))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void acpi_nfit_async_scrub(struct acpi_nfit_desc *acpi_desc,
+ struct nfit_spa *nfit_spa)
+{
+ struct acpi_nfit_system_address *spa = nfit_spa->spa;
+ unsigned int overflow_retry = scrub_overflow_abort;
+ u64 init_ars_start = 0, init_ars_len = 0;
+ struct device *dev = acpi_desc->dev;
+ unsigned int tmo = scrub_timeout;
+ int rc;
+
+ if (!nfit_spa->ars_required || !nfit_spa->nd_region)
+ return;
+
+ rc = ars_start(acpi_desc, nfit_spa);
+ /*
+ * If we timed out the initial scan we'll still be busy here,
+ * and will wait another timeout before giving up permanently.
+ */
+ if (rc < 0 && rc != -EBUSY)
+ return;
+
+ do {
+ u64 ars_start, ars_len;
+
+ if (acpi_desc->cancel)
+ break;
+ rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);
+ if (rc == -ENOTTY)
+ break;
+ if (rc == -EBUSY && !tmo) {
+ dev_warn(dev, "range %d ars timeout, aborting\n",
+ spa->range_index);
+ break;
+ }
+
+ if (rc == -EBUSY) {
+ /*
+ * Note, entries may be appended to the list
+ * while the lock is dropped, but the workqueue
+ * being active prevents entries being deleted /
+ * freed.
+ */
+ mutex_unlock(&acpi_desc->init_mutex);
+ ssleep(1);
+ tmo--;
+ mutex_lock(&acpi_desc->init_mutex);
+ continue;
+ }
+
+ /* we got some results, but there are more pending... */
+ if (rc == -ENOSPC && overflow_retry--) {
+ if (!init_ars_len) {
+ init_ars_len = acpi_desc->ars_status->length;
+ init_ars_start = acpi_desc->ars_status->address;
+ }
+ rc = ars_continue(acpi_desc);
+ }
+
+ if (rc < 0) {
+ dev_warn(dev, "range %d ars continuation failed\n",
+ spa->range_index);
+ break;
+ }
+
+ if (init_ars_len) {
+ ars_start = init_ars_start;
+ ars_len = init_ars_len;
+ } else {
+ ars_start = acpi_desc->ars_status->address;
+ ars_len = acpi_desc->ars_status->length;
+ }
+ dev_dbg(dev, "spa range: %d ars from %#llx + %#llx complete\n",
+ spa->range_index, ars_start, ars_len);
+ /* notify the region about new poison entries */
+ nvdimm_region_notify(nfit_spa->nd_region,
+ NVDIMM_REVALIDATE_POISON);
+ break;
+ } while (1);
+}
+
+static void acpi_nfit_scrub(struct work_struct *work)
+{
+ struct device *dev;
+ u64 init_scrub_length = 0;
+ struct nfit_spa *nfit_spa;
+ u64 init_scrub_address = 0;
+ bool init_ars_done = false;
+ struct acpi_nfit_desc *acpi_desc;
+ unsigned int tmo = scrub_timeout;
+ unsigned int overflow_retry = scrub_overflow_abort;
+
+ acpi_desc = container_of(work, typeof(*acpi_desc), work);
+ dev = acpi_desc->dev;
+
+ /*
+ * We scrub in 2 phases. The first phase waits for any platform
+ * firmware initiated scrubs to complete and then we go search for the
+ * affected spa regions to mark them scanned. In the second phase we
+ * initiate a directed scrub for every range that was not scrubbed in
+ * phase 1. If we're called for a 'rescan', we harmlessly pass through
+ * the first phase, but really only care about running phase 2, where
+ * regions can be notified of new poison.
+ */
+
+ /* process platform firmware initiated scrubs */
+ retry:
+ mutex_lock(&acpi_desc->init_mutex);
+ list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
+ struct nd_cmd_ars_status *ars_status;
+ struct acpi_nfit_system_address *spa;
+ u64 ars_start, ars_len;
+ int rc;
+
+ if (acpi_desc->cancel)
+ break;
+
+ if (nfit_spa->nd_region)
+ continue;
+
+ if (init_ars_done) {
+ /*
+ * No need to re-query, we're now just
+ * reconciling all the ranges covered by the
+ * initial scrub
+ */
+ rc = 0;
+ } else
+ rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);
+
+ if (rc == -ENOTTY) {
+ /* no ars capability, just register spa and move on */
+ acpi_nfit_register_region(acpi_desc, nfit_spa);
+ continue;
+ }
+
+ if (rc == -EBUSY && !tmo) {
+ /* fallthrough to directed scrub in phase 2 */
+ dev_warn(dev, "timeout awaiting ars results, continuing...\n");
+ break;
+ } else if (rc == -EBUSY) {
+ mutex_unlock(&acpi_desc->init_mutex);
+ ssleep(1);
+ tmo--;
+ goto retry;
+ }
+
+ /* we got some results, but there are more pending... */
+ if (rc == -ENOSPC && overflow_retry--) {
+ ars_status = acpi_desc->ars_status;
+ /*
+ * Record the original scrub range, so that we
+ * can recall all the ranges impacted by the
+ * initial scrub.
+ */
+ if (!init_scrub_length) {
+ init_scrub_length = ars_status->length;
+ init_scrub_address = ars_status->address;
+ }
+ rc = ars_continue(acpi_desc);
+ if (rc == 0) {
+ mutex_unlock(&acpi_desc->init_mutex);
+ goto retry;
+ }
+ }
+
+ if (rc < 0) {
+ /*
+ * Initial scrub failed, we'll give it one more
+ * try below...
+ */
+ break;
+ }
+
+ /* We got some final results, record completed ranges */
+ ars_status = acpi_desc->ars_status;
+ if (init_scrub_length) {
+ ars_start = init_scrub_address;
+ ars_len = ars_start + init_scrub_length;
+ } else {
+ ars_start = ars_status->address;
+ ars_len = ars_status->length;
+ }
+ spa = nfit_spa->spa;
+
+ if (!init_ars_done) {
+ init_ars_done = true;
+ dev_dbg(dev, "init scrub %#llx + %#llx complete\n",
+ ars_start, ars_len);
+ }
+ if (ars_start <= spa->address && ars_start + ars_len
+ >= spa->address + spa->length)
+ acpi_nfit_register_region(acpi_desc, nfit_spa);
+ }
+
+ /*
+ * For all the ranges not covered by an initial scrub we still
+ * want to see if there are errors, but it's ok to discover them
+ * asynchronously.
+ */
+ list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
+ /*
+ * Flag all the ranges that still need scrubbing, but
+ * register them now to make data available.
+ */
+ if (!nfit_spa->nd_region) {
+ nfit_spa->ars_required = 1;
+ acpi_nfit_register_region(acpi_desc, nfit_spa);
+ }
+ }
+
+ list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
+ acpi_nfit_async_scrub(acpi_desc, nfit_spa);
+ acpi_desc->scrub_count++;
+ if (acpi_desc->scrub_count_state)
+ sysfs_notify_dirent(acpi_desc->scrub_count_state);
+ mutex_unlock(&acpi_desc->init_mutex);
+}
+
+static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
+{
+ struct nfit_spa *nfit_spa;
+ int rc;
+
+ list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
+ if (nfit_spa_type(nfit_spa->spa) == NFIT_SPA_DCR) {
+ /* BLK regions don't need to wait for ars results */
+ rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
+ if (rc)
+ return rc;
+ }
+
+ queue_work(nfit_wq, &acpi_desc->work);
+ return 0;
+}
+
+static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc,
+ struct nfit_table_prev *prev)
+{
+ struct device *dev = acpi_desc->dev;
+
+ if (!list_empty(&prev->spas) ||
+ !list_empty(&prev->memdevs) ||
+ !list_empty(&prev->dcrs) ||
+ !list_empty(&prev->bdws) ||
+ !list_empty(&prev->idts) ||
+ !list_empty(&prev->flushes)) {
+ dev_err(dev, "new nfit deletes entries (unsupported)\n");
+ return -ENXIO;
+ }
+ return 0;
+}
+
+static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc *acpi_desc)
+{
+ struct device *dev = acpi_desc->dev;
+ struct kernfs_node *nfit;
+ struct device *bus_dev;
+
+ if (!ars_supported(acpi_desc->nvdimm_bus))
+ return 0;
+
+ bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus);
+ nfit = sysfs_get_dirent(bus_dev->kobj.sd, "nfit");
+ if (!nfit) {
+ dev_err(dev, "sysfs_get_dirent 'nfit' failed\n");
+ return -ENODEV;
+ }
+ acpi_desc->scrub_count_state = sysfs_get_dirent(nfit, "scrub");
+ sysfs_put(nfit);
+ if (!acpi_desc->scrub_count_state) {
+ dev_err(dev, "sysfs_get_dirent 'scrub' failed\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void acpi_nfit_destruct(void *data)
+{
+ struct acpi_nfit_desc *acpi_desc = data;
+ struct device *bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus);
+
+ /*
+ * Destruct under acpi_desc_lock so that nfit_handle_mce does not
+ * race teardown
+ */
+ mutex_lock(&acpi_desc_lock);
+ acpi_desc->cancel = 1;
+ /*
+ * Bounce the nvdimm bus lock to make sure any in-flight
+ * acpi_nfit_ars_rescan() submissions have had a chance to
+ * either submit or see ->cancel set.
+ */
+ device_lock(bus_dev);
+ device_unlock(bus_dev);
+
+ flush_workqueue(nfit_wq);
+ if (acpi_desc->scrub_count_state)
+ sysfs_put(acpi_desc->scrub_count_state);
+ nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
+ acpi_desc->nvdimm_bus = NULL;
+ list_del(&acpi_desc->list);
+ mutex_unlock(&acpi_desc_lock);
+}
+
+int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz)
+{
+ struct device *dev = acpi_desc->dev;
+ struct nfit_table_prev prev;
+ const void *end;
+ int rc;
+
+ if (!acpi_desc->nvdimm_bus) {
+ acpi_nfit_init_dsms(acpi_desc);
+
+ acpi_desc->nvdimm_bus = nvdimm_bus_register(dev,
+ &acpi_desc->nd_desc);
+ if (!acpi_desc->nvdimm_bus)
+ return -ENOMEM;
+
+ rc = devm_add_action_or_reset(dev, acpi_nfit_destruct,
+ acpi_desc);
+ if (rc)
+ return rc;
+
+ rc = acpi_nfit_desc_init_scrub_attr(acpi_desc);
+ if (rc)
+ return rc;
+
+ /* register this acpi_desc for mce notifications */
+ mutex_lock(&acpi_desc_lock);
+ list_add_tail(&acpi_desc->list, &acpi_descs);
+ mutex_unlock(&acpi_desc_lock);
+ }
+
+ mutex_lock(&acpi_desc->init_mutex);
+
+ INIT_LIST_HEAD(&prev.spas);
+ INIT_LIST_HEAD(&prev.memdevs);
+ INIT_LIST_HEAD(&prev.dcrs);
+ INIT_LIST_HEAD(&prev.bdws);
+ INIT_LIST_HEAD(&prev.idts);
+ INIT_LIST_HEAD(&prev.flushes);
+
+ list_cut_position(&prev.spas, &acpi_desc->spas,
+ acpi_desc->spas.prev);
+ list_cut_position(&prev.memdevs, &acpi_desc->memdevs,
+ acpi_desc->memdevs.prev);
+ list_cut_position(&prev.dcrs, &acpi_desc->dcrs,
+ acpi_desc->dcrs.prev);
+ list_cut_position(&prev.bdws, &acpi_desc->bdws,
+ acpi_desc->bdws.prev);
+ list_cut_position(&prev.idts, &acpi_desc->idts,
+ acpi_desc->idts.prev);
+ list_cut_position(&prev.flushes, &acpi_desc->flushes,
+ acpi_desc->flushes.prev);
+
+ end = data + sz;
+ while (!IS_ERR_OR_NULL(data))
+ data = add_table(acpi_desc, &prev, data, end);
+
+ if (IS_ERR(data)) {
+ dev_dbg(dev, "%s: nfit table parsing error: %ld\n", __func__,
+ PTR_ERR(data));
+ rc = PTR_ERR(data);
+ goto out_unlock;
+ }
+
+ rc = acpi_nfit_check_deletions(acpi_desc, &prev);
+ if (rc)
+ goto out_unlock;
+
+ rc = nfit_mem_init(acpi_desc);
+ if (rc)
+ goto out_unlock;
+
+ rc = acpi_nfit_register_dimms(acpi_desc);
+ if (rc)
+ goto out_unlock;
+
+ rc = acpi_nfit_register_regions(acpi_desc);
+
+ out_unlock:
+ mutex_unlock(&acpi_desc->init_mutex);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(acpi_nfit_init);
+
+struct acpi_nfit_flush_work {
+ struct work_struct work;
+ struct completion cmp;
+};
+
+static void flush_probe(struct work_struct *work)
+{
+ struct acpi_nfit_flush_work *flush;
+
+ flush = container_of(work, typeof(*flush), work);
+ complete(&flush->cmp);
+}
+
+static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
+{
+ struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
+ struct device *dev = acpi_desc->dev;
+ struct acpi_nfit_flush_work flush;
+
+ /* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
+ device_lock(dev);
+ device_unlock(dev);
+
+ /*
+ * Scrub work could take 10s of seconds, userspace may give up so we
+ * need to be interruptible while waiting.
+ */
+ INIT_WORK_ONSTACK(&flush.work, flush_probe);
+ COMPLETION_INITIALIZER_ONSTACK(flush.cmp);
+ queue_work(nfit_wq, &flush.work);
+ return wait_for_completion_interruptible(&flush.cmp);
+}
+
+static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
+ struct nvdimm *nvdimm, unsigned int cmd)
+{
+ struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
+
+ if (nvdimm)
+ return 0;
+ if (cmd != ND_CMD_ARS_START)
+ return 0;
+
+ /*
+ * The kernel and userspace may race to initiate a scrub, but
+ * the scrub thread is prepared to lose that initial race. It
+ * just needs guarantees that any ars it initiates are not
+ * interrupted by any intervening start reqeusts from userspace.
+ */
+ if (work_busy(&acpi_desc->work))
+ return -EBUSY;
+
+ return 0;
+}
+
+int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc)
+{
+ struct device *dev = acpi_desc->dev;
+ struct nfit_spa *nfit_spa;
+
+ if (work_busy(&acpi_desc->work))
+ return -EBUSY;
+
+ if (acpi_desc->cancel)
+ return 0;
+
+ mutex_lock(&acpi_desc->init_mutex);
+ list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
+ struct acpi_nfit_system_address *spa = nfit_spa->spa;
+
+ if (nfit_spa_type(spa) != NFIT_SPA_PM)
+ continue;
+
+ nfit_spa->ars_required = 1;
+ }
+ queue_work(nfit_wq, &acpi_desc->work);
+ dev_dbg(dev, "%s: ars_scan triggered\n", __func__);
+ mutex_unlock(&acpi_desc->init_mutex);
+
+ return 0;
+}
+
+void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev)
+{
+ struct nvdimm_bus_descriptor *nd_desc;
+
+ dev_set_drvdata(dev, acpi_desc);
+ acpi_desc->dev = dev;
+ acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io;
+ nd_desc = &acpi_desc->nd_desc;
+ nd_desc->provider_name = "ACPI.NFIT";
+ nd_desc->module = THIS_MODULE;
+ nd_desc->ndctl = acpi_nfit_ctl;
+ nd_desc->flush_probe = acpi_nfit_flush_probe;
+ nd_desc->clear_to_send = acpi_nfit_clear_to_send;
+ nd_desc->attr_groups = acpi_nfit_attribute_groups;
+
+ INIT_LIST_HEAD(&acpi_desc->spas);
+ INIT_LIST_HEAD(&acpi_desc->dcrs);
+ INIT_LIST_HEAD(&acpi_desc->bdws);
+ INIT_LIST_HEAD(&acpi_desc->idts);
+ INIT_LIST_HEAD(&acpi_desc->flushes);
+ INIT_LIST_HEAD(&acpi_desc->memdevs);
+ INIT_LIST_HEAD(&acpi_desc->dimms);
+ INIT_LIST_HEAD(&acpi_desc->list);
+ mutex_init(&acpi_desc->init_mutex);
+ INIT_WORK(&acpi_desc->work, acpi_nfit_scrub);
+}
+EXPORT_SYMBOL_GPL(acpi_nfit_desc_init);
+
+static int acpi_nfit_add(struct acpi_device *adev)
+{
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_nfit_desc *acpi_desc;
+ struct device *dev = &adev->dev;
+ struct acpi_table_header *tbl;
+ acpi_status status = AE_OK;
+ acpi_size sz;
+ int rc = 0;
+
+ status = acpi_get_table_with_size(ACPI_SIG_NFIT, 0, &tbl, &sz);
+ if (ACPI_FAILURE(status)) {
+ /* This is ok, we could have an nvdimm hotplugged later */
+ dev_dbg(dev, "failed to find NFIT at startup\n");
+ return 0;
+ }
+
+ acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
+ if (!acpi_desc)
+ return -ENOMEM;
+ acpi_nfit_desc_init(acpi_desc, &adev->dev);
+
+ /* Save the acpi header for exporting the revision via sysfs */
+ acpi_desc->acpi_header = *tbl;
+
+ /* Evaluate _FIT and override with that if present */
+ status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
+ if (ACPI_SUCCESS(status) && buf.length > 0) {
+ union acpi_object *obj = buf.pointer;
+
+ if (obj->type == ACPI_TYPE_BUFFER)
+ rc = acpi_nfit_init(acpi_desc, obj->buffer.pointer,
+ obj->buffer.length);
+ else
+ dev_dbg(dev, "%s invalid type %d, ignoring _FIT\n",
+ __func__, (int) obj->type);
+ kfree(buf.pointer);
+ } else
+ /* skip over the lead-in header table */
+ rc = acpi_nfit_init(acpi_desc, (void *) tbl
+ + sizeof(struct acpi_table_nfit),
+ sz - sizeof(struct acpi_table_nfit));
+ return rc;
+}
+
+static int acpi_nfit_remove(struct acpi_device *adev)
+{
+ /* see acpi_nfit_destruct */
+ return 0;
+}
+
+static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
+{
+ struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev);
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct device *dev = &adev->dev;
+ union acpi_object *obj;
+ acpi_status status;
+ int ret;
+
+ dev_dbg(dev, "%s: event: %d\n", __func__, event);
+
+ device_lock(dev);
+ if (!dev->driver) {
+ /* dev->driver may be null if we're being removed */
+ dev_dbg(dev, "%s: no driver found for dev\n", __func__);
+ goto out_unlock;
+ }
+
+ if (!acpi_desc) {
+ acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
+ if (!acpi_desc)
+ goto out_unlock;
+ acpi_nfit_desc_init(acpi_desc, &adev->dev);
+ } else {
+ /*
+ * Finish previous registration before considering new
+ * regions.
+ */
+ flush_workqueue(nfit_wq);
+ }
+
+ /* Evaluate _FIT */
+ status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev, "failed to evaluate _FIT\n");
+ goto out_unlock;
+ }
+
+ obj = buf.pointer;
+ if (obj->type == ACPI_TYPE_BUFFER) {
+ ret = acpi_nfit_init(acpi_desc, obj->buffer.pointer,
+ obj->buffer.length);
+ if (ret)
+ dev_err(dev, "failed to merge updated NFIT\n");
+ } else
+ dev_err(dev, "Invalid _FIT\n");
+ kfree(buf.pointer);
+
+ out_unlock:
+ device_unlock(dev);
+}
+
+static const struct acpi_device_id acpi_nfit_ids[] = {
+ { "ACPI0012", 0 },
+ { "", 0 },
+};
+MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids);
+
+static struct acpi_driver acpi_nfit_driver = {
+ .name = KBUILD_MODNAME,
+ .ids = acpi_nfit_ids,
+ .ops = {
+ .add = acpi_nfit_add,
+ .remove = acpi_nfit_remove,
+ .notify = acpi_nfit_notify,
+ },
+};
+
+static __init int nfit_init(void)
+{
+ BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40);
+ BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56);
+ BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48);
+ BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20);
+ BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9);
+ BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80);
+ BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40);
+
+ acpi_str_to_uuid(UUID_VOLATILE_MEMORY, nfit_uuid[NFIT_SPA_VOLATILE]);
+ acpi_str_to_uuid(UUID_PERSISTENT_MEMORY, nfit_uuid[NFIT_SPA_PM]);
+ acpi_str_to_uuid(UUID_CONTROL_REGION, nfit_uuid[NFIT_SPA_DCR]);
+ acpi_str_to_uuid(UUID_DATA_REGION, nfit_uuid[NFIT_SPA_BDW]);
+ acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_VDISK]);
+ acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_CD, nfit_uuid[NFIT_SPA_VCD]);
+ acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_PDISK]);
+ acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_CD, nfit_uuid[NFIT_SPA_PCD]);
+ acpi_str_to_uuid(UUID_NFIT_BUS, nfit_uuid[NFIT_DEV_BUS]);
+ acpi_str_to_uuid(UUID_NFIT_DIMM, nfit_uuid[NFIT_DEV_DIMM]);
+ acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE1, nfit_uuid[NFIT_DEV_DIMM_N_HPE1]);
+ acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE2, nfit_uuid[NFIT_DEV_DIMM_N_HPE2]);
+ acpi_str_to_uuid(UUID_NFIT_DIMM_N_MSFT, nfit_uuid[NFIT_DEV_DIMM_N_MSFT]);
+
+ nfit_wq = create_singlethread_workqueue("nfit");
+ if (!nfit_wq)
+ return -ENOMEM;
+
+ nfit_mce_register();
+
+ return acpi_bus_register_driver(&acpi_nfit_driver);
+}
+
+static __exit void nfit_exit(void)
+{
+ nfit_mce_unregister();
+ acpi_bus_unregister_driver(&acpi_nfit_driver);
+ destroy_workqueue(nfit_wq);
+ WARN_ON(!list_empty(&acpi_descs));
+}
+
+module_init(nfit_init);
+module_exit(nfit_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Intel Corporation");
diff --git a/drivers/acpi/nfit/mce.c b/drivers/acpi/nfit/mce.c
new file mode 100644
index 000000000..161f91539
--- /dev/null
+++ b/drivers/acpi/nfit/mce.c
@@ -0,0 +1,89 @@
+/*
+ * NFIT - Machine Check Handler
+ *
+ * Copyright(c) 2013-2016 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#include <linux/notifier.h>
+#include <linux/acpi.h>
+#include <asm/mce.h>
+#include "nfit.h"
+
+static int nfit_handle_mce(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct mce *mce = (struct mce *)data;
+ struct acpi_nfit_desc *acpi_desc;
+ struct nfit_spa *nfit_spa;
+
+ /* We only care about memory errors */
+ if (!(mce->status & MCACOD))
+ return NOTIFY_DONE;
+
+ /*
+ * mce->addr contains the physical addr accessed that caused the
+ * machine check. We need to walk through the list of NFITs, and see
+ * if any of them matches that address, and only then start a scrub.
+ */
+ mutex_lock(&acpi_desc_lock);
+ list_for_each_entry(acpi_desc, &acpi_descs, list) {
+ struct device *dev = acpi_desc->dev;
+ int found_match = 0;
+
+ mutex_lock(&acpi_desc->init_mutex);
+ list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
+ struct acpi_nfit_system_address *spa = nfit_spa->spa;
+
+ if (nfit_spa_type(spa) != NFIT_SPA_PM)
+ continue;
+ /* find the spa that covers the mce addr */
+ if (spa->address > mce->addr)
+ continue;
+ if ((spa->address + spa->length - 1) < mce->addr)
+ continue;
+ found_match = 1;
+ dev_dbg(dev, "%s: addr in SPA %d (0x%llx, 0x%llx)\n",
+ __func__, spa->range_index, spa->address,
+ spa->length);
+ /*
+ * We can break at the first match because we're going
+ * to rescan all the SPA ranges. There shouldn't be any
+ * aliasing anyway.
+ */
+ break;
+ }
+ mutex_unlock(&acpi_desc->init_mutex);
+
+ /*
+ * We can ignore an -EBUSY here because if an ARS is already
+ * in progress, just let that be the last authoritative one
+ */
+ if (found_match)
+ acpi_nfit_ars_rescan(acpi_desc);
+ }
+
+ mutex_unlock(&acpi_desc_lock);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block nfit_mce_dec = {
+ .notifier_call = nfit_handle_mce,
+};
+
+void nfit_mce_register(void)
+{
+ mce_register_decode_chain(&nfit_mce_dec);
+}
+
+void nfit_mce_unregister(void)
+{
+ mce_unregister_decode_chain(&nfit_mce_dec);
+}
diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
new file mode 100644
index 000000000..e894ded24
--- /dev/null
+++ b/drivers/acpi/nfit/nfit.h
@@ -0,0 +1,227 @@
+/*
+ * NVDIMM Firmware Interface Table - NFIT
+ *
+ * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#ifndef __NFIT_H__
+#define __NFIT_H__
+#include <linux/workqueue.h>
+#include <linux/libnvdimm.h>
+#include <linux/ndctl.h>
+#include <linux/types.h>
+#include <linux/uuid.h>
+#include <linux/acpi.h>
+#include <acpi/acuuid.h>
+
+/* ACPI 6.1 */
+#define UUID_NFIT_BUS "2f10e7a4-9e91-11e4-89d3-123b93f75cba"
+
+/* http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf */
+#define UUID_NFIT_DIMM "4309ac30-0d11-11e4-9191-0800200c9a66"
+
+/* https://github.com/HewlettPackard/hpe-nvm/blob/master/Documentation/ */
+#define UUID_NFIT_DIMM_N_HPE1 "9002c334-acf3-4c0e-9642-a235f0d53bc6"
+#define UUID_NFIT_DIMM_N_HPE2 "5008664b-b758-41a0-a03c-27c2f2d04f7e"
+
+/* https://msdn.microsoft.com/library/windows/hardware/mt604741 */
+#define UUID_NFIT_DIMM_N_MSFT "1ee68b36-d4bd-4a1a-9a16-4f8e53d46e05"
+
+#define ACPI_NFIT_MEM_FAILED_MASK (ACPI_NFIT_MEM_SAVE_FAILED \
+ | ACPI_NFIT_MEM_RESTORE_FAILED | ACPI_NFIT_MEM_FLUSH_FAILED \
+ | ACPI_NFIT_MEM_NOT_ARMED)
+
+enum nfit_uuids {
+ /* for simplicity alias the uuid index with the family id */
+ NFIT_DEV_DIMM = NVDIMM_FAMILY_INTEL,
+ NFIT_DEV_DIMM_N_HPE1 = NVDIMM_FAMILY_HPE1,
+ NFIT_DEV_DIMM_N_HPE2 = NVDIMM_FAMILY_HPE2,
+ NFIT_DEV_DIMM_N_MSFT = NVDIMM_FAMILY_MSFT,
+ NFIT_SPA_VOLATILE,
+ NFIT_SPA_PM,
+ NFIT_SPA_DCR,
+ NFIT_SPA_BDW,
+ NFIT_SPA_VDISK,
+ NFIT_SPA_VCD,
+ NFIT_SPA_PDISK,
+ NFIT_SPA_PCD,
+ NFIT_DEV_BUS,
+ NFIT_UUID_MAX,
+};
+
+/*
+ * Region format interface codes are stored with the interface as the
+ * LSB and the function as the MSB.
+ */
+#define NFIT_FIC_BYTE cpu_to_le16(0x101) /* byte-addressable energy backed */
+#define NFIT_FIC_BLK cpu_to_le16(0x201) /* block-addressable non-energy backed */
+#define NFIT_FIC_BYTEN cpu_to_le16(0x301) /* byte-addressable non-energy backed */
+
+enum {
+ NFIT_BLK_READ_FLUSH = 1,
+ NFIT_BLK_DCR_LATCH = 2,
+ NFIT_ARS_STATUS_DONE = 0,
+ NFIT_ARS_STATUS_BUSY = 1 << 16,
+ NFIT_ARS_STATUS_NONE = 2 << 16,
+ NFIT_ARS_STATUS_INTR = 3 << 16,
+ NFIT_ARS_START_BUSY = 6,
+ NFIT_ARS_CAP_NONE = 1,
+ NFIT_ARS_F_OVERFLOW = 1,
+ NFIT_ARS_TIMEOUT = 90,
+};
+
+struct nfit_spa {
+ struct list_head list;
+ struct nd_region *nd_region;
+ unsigned int ars_required:1;
+ u32 clear_err_unit;
+ u32 max_ars;
+ struct acpi_nfit_system_address spa[0];
+};
+
+struct nfit_dcr {
+ struct list_head list;
+ struct acpi_nfit_control_region dcr[0];
+};
+
+struct nfit_bdw {
+ struct list_head list;
+ struct acpi_nfit_data_region bdw[0];
+};
+
+struct nfit_idt {
+ struct list_head list;
+ struct acpi_nfit_interleave idt[0];
+};
+
+struct nfit_flush {
+ struct list_head list;
+ struct acpi_nfit_flush_address flush[0];
+};
+
+struct nfit_memdev {
+ struct list_head list;
+ struct acpi_nfit_memory_map memdev[0];
+};
+
+/* assembled tables for a given dimm/memory-device */
+struct nfit_mem {
+ struct nvdimm *nvdimm;
+ struct acpi_nfit_memory_map *memdev_dcr;
+ struct acpi_nfit_memory_map *memdev_pmem;
+ struct acpi_nfit_memory_map *memdev_bdw;
+ struct acpi_nfit_control_region *dcr;
+ struct acpi_nfit_data_region *bdw;
+ struct acpi_nfit_system_address *spa_dcr;
+ struct acpi_nfit_system_address *spa_bdw;
+ struct acpi_nfit_interleave *idt_dcr;
+ struct acpi_nfit_interleave *idt_bdw;
+ struct nfit_flush *nfit_flush;
+ struct list_head list;
+ struct acpi_device *adev;
+ struct acpi_nfit_desc *acpi_desc;
+ struct resource *flush_wpq;
+ unsigned long dsm_mask;
+ int family;
+};
+
+struct acpi_nfit_desc {
+ struct nvdimm_bus_descriptor nd_desc;
+ struct acpi_table_header acpi_header;
+ struct mutex init_mutex;
+ struct list_head memdevs;
+ struct list_head flushes;
+ struct list_head dimms;
+ struct list_head spas;
+ struct list_head dcrs;
+ struct list_head bdws;
+ struct list_head idts;
+ struct nvdimm_bus *nvdimm_bus;
+ struct device *dev;
+ struct nd_cmd_ars_status *ars_status;
+ size_t ars_status_size;
+ struct work_struct work;
+ struct list_head list;
+ struct kernfs_node *scrub_count_state;
+ unsigned int scrub_count;
+ unsigned int cancel:1;
+ unsigned long dimm_cmd_force_en;
+ unsigned long bus_cmd_force_en;
+ int (*blk_do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
+ void *iobuf, u64 len, int rw);
+};
+
+enum nd_blk_mmio_selector {
+ BDW,
+ DCR,
+};
+
+struct nd_blk_addr {
+ union {
+ void __iomem *base;
+ void *aperture;
+ };
+};
+
+struct nfit_blk {
+ struct nfit_blk_mmio {
+ struct nd_blk_addr addr;
+ u64 size;
+ u64 base_offset;
+ u32 line_size;
+ u32 num_lines;
+ u32 table_size;
+ struct acpi_nfit_interleave *idt;
+ struct acpi_nfit_system_address *spa;
+ } mmio[2];
+ struct nd_region *nd_region;
+ u64 bdw_offset; /* post interleave offset */
+ u64 stat_offset;
+ u64 cmd_offset;
+ u32 dimm_flags;
+};
+
+extern struct list_head acpi_descs;
+extern struct mutex acpi_desc_lock;
+int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc);
+
+#ifdef CONFIG_X86_MCE
+void nfit_mce_register(void);
+void nfit_mce_unregister(void);
+#else
+static inline void nfit_mce_register(void)
+{
+}
+static inline void nfit_mce_unregister(void)
+{
+}
+#endif
+
+int nfit_spa_type(struct acpi_nfit_system_address *spa);
+
+static inline struct acpi_nfit_memory_map *__to_nfit_memdev(
+ struct nfit_mem *nfit_mem)
+{
+ if (nfit_mem->memdev_dcr)
+ return nfit_mem->memdev_dcr;
+ return nfit_mem->memdev_pmem;
+}
+
+static inline struct acpi_nfit_desc *to_acpi_desc(
+ struct nvdimm_bus_descriptor *nd_desc)
+{
+ return container_of(nd_desc, struct acpi_nfit_desc, nd_desc);
+}
+
+const u8 *to_nfit_uuid(enum nfit_uuids id);
+int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *nfit, acpi_size sz);
+void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev);
+#endif /* __NFIT_H__ */
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index d176e0ece..ce3a7a16f 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -18,22 +18,21 @@
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
*/
+
+#define pr_fmt(fmt) "ACPI: " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/acpi.h>
+#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/numa.h>
#include <linux/nodemask.h>
#include <linux/topology.h>
-#define PREFIX "ACPI: "
-
-#define ACPI_NUMA 0x80000000
-#define _COMPONENT ACPI_NUMA
-ACPI_MODULE_NAME("numa");
-
static nodemask_t nodes_found_map = NODE_MASK_NONE;
/* maps to convert between proximity domain and logical node ID */
@@ -43,6 +42,7 @@ static int node_to_pxm_map[MAX_NUMNODES]
= { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
unsigned char acpi_srat_revision __initdata;
+int acpi_numa __initdata;
int pxm_to_node(int pxm)
{
@@ -128,68 +128,63 @@ EXPORT_SYMBOL(acpi_map_pxm_to_online_node);
static void __init
acpi_table_print_srat_entry(struct acpi_subtable_header *header)
{
-
- ACPI_FUNCTION_NAME("acpi_table_print_srat_entry");
-
- if (!header)
- return;
-
switch (header->type) {
-
case ACPI_SRAT_TYPE_CPU_AFFINITY:
-#ifdef ACPI_DEBUG_OUTPUT
{
struct acpi_srat_cpu_affinity *p =
(struct acpi_srat_cpu_affinity *)header;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "SRAT Processor (id[0x%02x] eid[0x%02x]) in proximity domain %d %s\n",
- p->apic_id, p->local_sapic_eid,
- p->proximity_domain_lo,
- (p->flags & ACPI_SRAT_CPU_ENABLED)?
- "enabled" : "disabled"));
+ pr_debug("SRAT Processor (id[0x%02x] eid[0x%02x]) in proximity domain %d %s\n",
+ p->apic_id, p->local_sapic_eid,
+ p->proximity_domain_lo,
+ (p->flags & ACPI_SRAT_CPU_ENABLED) ?
+ "enabled" : "disabled");
}
-#endif /* ACPI_DEBUG_OUTPUT */
break;
case ACPI_SRAT_TYPE_MEMORY_AFFINITY:
-#ifdef ACPI_DEBUG_OUTPUT
{
struct acpi_srat_mem_affinity *p =
(struct acpi_srat_mem_affinity *)header;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "SRAT Memory (0x%lx length 0x%lx) in proximity domain %d %s%s%s\n",
- (unsigned long)p->base_address,
- (unsigned long)p->length,
- p->proximity_domain,
- (p->flags & ACPI_SRAT_MEM_ENABLED)?
- "enabled" : "disabled",
- (p->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)?
- " hot-pluggable" : "",
- (p->flags & ACPI_SRAT_MEM_NON_VOLATILE)?
- " non-volatile" : ""));
+ pr_debug("SRAT Memory (0x%lx length 0x%lx) in proximity domain %d %s%s%s\n",
+ (unsigned long)p->base_address,
+ (unsigned long)p->length,
+ p->proximity_domain,
+ (p->flags & ACPI_SRAT_MEM_ENABLED) ?
+ "enabled" : "disabled",
+ (p->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) ?
+ " hot-pluggable" : "",
+ (p->flags & ACPI_SRAT_MEM_NON_VOLATILE) ?
+ " non-volatile" : "");
}
-#endif /* ACPI_DEBUG_OUTPUT */
break;
case ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY:
-#ifdef ACPI_DEBUG_OUTPUT
{
struct acpi_srat_x2apic_cpu_affinity *p =
(struct acpi_srat_x2apic_cpu_affinity *)header;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "SRAT Processor (x2apicid[0x%08x]) in"
- " proximity domain %d %s\n",
- p->apic_id,
- p->proximity_domain,
- (p->flags & ACPI_SRAT_CPU_ENABLED) ?
- "enabled" : "disabled"));
+ pr_debug("SRAT Processor (x2apicid[0x%08x]) in proximity domain %d %s\n",
+ p->apic_id,
+ p->proximity_domain,
+ (p->flags & ACPI_SRAT_CPU_ENABLED) ?
+ "enabled" : "disabled");
}
-#endif /* ACPI_DEBUG_OUTPUT */
break;
+
+ case ACPI_SRAT_TYPE_GICC_AFFINITY:
+ {
+ struct acpi_srat_gicc_affinity *p =
+ (struct acpi_srat_gicc_affinity *)header;
+ pr_debug("SRAT Processor (acpi id[0x%04x]) in proximity domain %d %s\n",
+ p->acpi_processor_uid,
+ p->proximity_domain,
+ (p->flags & ACPI_SRAT_GICC_ENABLED) ?
+ "enabled" : "disabled");
+ }
+ break;
+
default:
- printk(KERN_WARNING PREFIX
- "Found unsupported SRAT entry (type = 0x%x)\n",
- header->type);
+ pr_warn("Found unsupported SRAT entry (type = 0x%x)\n",
+ header->type);
break;
}
}
@@ -217,12 +212,117 @@ static int __init slit_valid(struct acpi_table_slit *slit)
return 1;
}
+void __init bad_srat(void)
+{
+ pr_err("SRAT: SRAT not used.\n");
+ acpi_numa = -1;
+}
+
+int __init srat_disabled(void)
+{
+ return acpi_numa < 0;
+}
+
+#if defined(CONFIG_X86) || defined(CONFIG_ARM64)
+/*
+ * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for
+ * I/O localities since SRAT does not list them. I/O localities are
+ * not supported at this point.
+ */
+void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
+{
+ int i, j;
+
+ for (i = 0; i < slit->locality_count; i++) {
+ const int from_node = pxm_to_node(i);
+
+ if (from_node == NUMA_NO_NODE)
+ continue;
+
+ for (j = 0; j < slit->locality_count; j++) {
+ const int to_node = pxm_to_node(j);
+
+ if (to_node == NUMA_NO_NODE)
+ continue;
+
+ numa_set_distance(from_node, to_node,
+ slit->entry[slit->locality_count * i + j]);
+ }
+ }
+}
+
+/*
+ * Default callback for parsing of the Proximity Domain <-> Memory
+ * Area mappings
+ */
+int __init
+acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
+{
+ u64 start, end;
+ u32 hotpluggable;
+ int node, pxm;
+
+ if (srat_disabled())
+ goto out_err;
+ if (ma->header.length < sizeof(struct acpi_srat_mem_affinity)) {
+ pr_err("SRAT: Unexpected header length: %d\n",
+ ma->header.length);
+ goto out_err_bad_srat;
+ }
+ if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
+ goto out_err;
+ hotpluggable = ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE;
+ if (hotpluggable && !IS_ENABLED(CONFIG_MEMORY_HOTPLUG))
+ goto out_err;
+
+ start = ma->base_address;
+ end = start + ma->length;
+ pxm = ma->proximity_domain;
+ if (acpi_srat_revision <= 1)
+ pxm &= 0xff;
+
+ node = acpi_map_pxm_to_node(pxm);
+ if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
+ pr_err("SRAT: Too many proximity domains.\n");
+ goto out_err_bad_srat;
+ }
+
+ if (numa_add_memblk(node, start, end) < 0) {
+ pr_err("SRAT: Failed to add memblk to node %u [mem %#010Lx-%#010Lx]\n",
+ node, (unsigned long long) start,
+ (unsigned long long) end - 1);
+ goto out_err_bad_srat;
+ }
+
+ node_set(node, numa_nodes_parsed);
+
+ pr_info("SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]%s%s\n",
+ node, pxm,
+ (unsigned long long) start, (unsigned long long) end - 1,
+ hotpluggable ? " hotplug" : "",
+ ma->flags & ACPI_SRAT_MEM_NON_VOLATILE ? " non-volatile" : "");
+
+ /* Mark hotplug range in memblock. */
+ if (hotpluggable && memblock_mark_hotplug(start, ma->length))
+ pr_warn("SRAT: Failed to mark hotplug range [mem %#010Lx-%#010Lx] in memblock\n",
+ (unsigned long long)start, (unsigned long long)end - 1);
+
+ max_possible_pfn = max(max_possible_pfn, PFN_UP(end - 1));
+
+ return 0;
+out_err_bad_srat:
+ bad_srat();
+out_err:
+ return -EINVAL;
+}
+#endif /* defined(CONFIG_X86) || defined (CONFIG_ARM64) */
+
static int __init acpi_parse_slit(struct acpi_table_header *table)
{
struct acpi_table_slit *slit = (struct acpi_table_slit *)table;
if (!slit_valid(slit)) {
- printk(KERN_INFO "ACPI: SLIT table looks invalid. Not used.\n");
+ pr_info("SLIT table looks invalid. Not used.\n");
return -EINVAL;
}
acpi_numa_slit_init(slit);
@@ -233,12 +333,9 @@ static int __init acpi_parse_slit(struct acpi_table_header *table)
void __init __weak
acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
{
- printk(KERN_WARNING PREFIX
- "Found unsupported x2apic [0x%08x] SRAT entry\n", pa->apic_id);
- return;
+ pr_warn("Found unsupported x2apic [0x%08x] SRAT entry\n", pa->apic_id);
}
-
static int __init
acpi_parse_x2apic_affinity(struct acpi_subtable_header *header,
const unsigned long end)
@@ -275,6 +372,24 @@ acpi_parse_processor_affinity(struct acpi_subtable_header *header,
return 0;
}
+static int __init
+acpi_parse_gicc_affinity(struct acpi_subtable_header *header,
+ const unsigned long end)
+{
+ struct acpi_srat_gicc_affinity *processor_affinity;
+
+ processor_affinity = (struct acpi_srat_gicc_affinity *)header;
+ if (!processor_affinity)
+ return -EINVAL;
+
+ acpi_table_print_srat_entry(header);
+
+ /* let architecture-dependent part to do it */
+ acpi_numa_gicc_affinity_init(processor_affinity);
+
+ return 0;
+}
+
static int __initdata parsed_numa_memblks;
static int __init
@@ -319,6 +434,9 @@ int __init acpi_numa_init(void)
{
int cnt = 0;
+ if (acpi_disabled)
+ return -EINVAL;
+
/*
* Should not limit number with cpu num that is from NR_CPUS or nr_cpus=
* SRAT cpu entries could have different order with that in MADT.
@@ -327,13 +445,15 @@ int __init acpi_numa_init(void)
/* SRAT: Static Resource Affinity Table */
if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
- struct acpi_subtable_proc srat_proc[2];
+ struct acpi_subtable_proc srat_proc[3];
memset(srat_proc, 0, sizeof(srat_proc));
srat_proc[0].id = ACPI_SRAT_TYPE_CPU_AFFINITY;
srat_proc[0].handler = acpi_parse_processor_affinity;
srat_proc[1].id = ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY;
srat_proc[1].handler = acpi_parse_x2apic_affinity;
+ srat_proc[2].id = ACPI_SRAT_TYPE_GICC_AFFINITY;
+ srat_proc[2].handler = acpi_parse_gicc_affinity;
acpi_table_parse_entries_array(ACPI_SIG_SRAT,
sizeof(struct acpi_table_srat),
@@ -347,8 +467,6 @@ int __init acpi_numa_init(void)
/* SLIT: System Locality Information Table */
acpi_table_parse(ACPI_SIG_SLIT, acpi_parse_slit);
- acpi_numa_arch_fixup();
-
if (cnt < 0)
return cnt;
else if (!parsed_numa_memblks)
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index b108f1358..4305ee9db 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -309,7 +309,7 @@ static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
* During early init (when acpi_gbl_permanent_mmap has not been set yet) this
* routine simply calls __acpi_map_table() to get the job done.
*/
-void __iomem *__init_refok
+void __iomem *__ref
acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
{
struct acpi_ioremap *map;
@@ -362,8 +362,7 @@ out:
}
EXPORT_SYMBOL_GPL(acpi_os_map_iomem);
-void *__init_refok
-acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
+void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
{
return (void *)acpi_os_map_iomem(phys, size);
}
diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c
new file mode 100644
index 000000000..b5b376e08
--- /dev/null
+++ b/drivers/acpi/pci_mcfg.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2016 Broadcom
+ * Author: Jayachandran C <jchandra@broadcom.com>
+ * Copyright (C) 2016 Semihalf
+ * Author: Tomasz Nowicki <tn@semihalf.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation (the "GPL").
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 (GPLv2) for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 (GPLv2) along with this source code.
+ */
+
+#define pr_fmt(fmt) "ACPI: " fmt
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/pci-acpi.h>
+
+/* Structure to hold entries from the MCFG table */
+struct mcfg_entry {
+ struct list_head list;
+ phys_addr_t addr;
+ u16 segment;
+ u8 bus_start;
+ u8 bus_end;
+};
+
+/* List to save MCFG entries */
+static LIST_HEAD(pci_mcfg_list);
+
+phys_addr_t pci_mcfg_lookup(u16 seg, struct resource *bus_res)
+{
+ struct mcfg_entry *e;
+
+ /*
+ * We expect exact match, unless MCFG entry end bus covers more than
+ * specified by caller.
+ */
+ list_for_each_entry(e, &pci_mcfg_list, list) {
+ if (e->segment == seg && e->bus_start == bus_res->start &&
+ e->bus_end >= bus_res->end)
+ return e->addr;
+ }
+
+ return 0;
+}
+
+static __init int pci_mcfg_parse(struct acpi_table_header *header)
+{
+ struct acpi_table_mcfg *mcfg;
+ struct acpi_mcfg_allocation *mptr;
+ struct mcfg_entry *e, *arr;
+ int i, n;
+
+ if (header->length < sizeof(struct acpi_table_mcfg))
+ return -EINVAL;
+
+ n = (header->length - sizeof(struct acpi_table_mcfg)) /
+ sizeof(struct acpi_mcfg_allocation);
+ mcfg = (struct acpi_table_mcfg *)header;
+ mptr = (struct acpi_mcfg_allocation *) &mcfg[1];
+
+ arr = kcalloc(n, sizeof(*arr), GFP_KERNEL);
+ if (!arr)
+ return -ENOMEM;
+
+ for (i = 0, e = arr; i < n; i++, mptr++, e++) {
+ e->segment = mptr->pci_segment;
+ e->addr = mptr->address;
+ e->bus_start = mptr->start_bus_number;
+ e->bus_end = mptr->end_bus_number;
+ list_add(&e->list, &pci_mcfg_list);
+ }
+
+ pr_info("MCFG table detected, %d entries\n", n);
+ return 0;
+}
+
+/* Interface called by ACPI - parse and save MCFG table */
+void __init pci_mmcfg_late_init(void)
+{
+ int err = acpi_table_parse(ACPI_SIG_MCFG, pci_mcfg_parse);
+ if (err)
+ pr_err("Failed to parse MCFG (%d)\n", err);
+}
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index ae3fe4e64..d144168d4 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -720,6 +720,36 @@ next:
}
}
+static void acpi_pci_root_remap_iospace(struct resource_entry *entry)
+{
+#ifdef PCI_IOBASE
+ struct resource *res = entry->res;
+ resource_size_t cpu_addr = res->start;
+ resource_size_t pci_addr = cpu_addr - entry->offset;
+ resource_size_t length = resource_size(res);
+ unsigned long port;
+
+ if (pci_register_io_range(cpu_addr, length))
+ goto err;
+
+ port = pci_address_to_pio(cpu_addr);
+ if (port == (unsigned long)-1)
+ goto err;
+
+ res->start = port;
+ res->end = port + length - 1;
+ entry->offset = port - pci_addr;
+
+ if (pci_remap_iospace(res, cpu_addr) < 0)
+ goto err;
+
+ pr_info("Remapped I/O %pa to %pR\n", &cpu_addr, res);
+ return;
+err:
+ res->flags |= IORESOURCE_DISABLED;
+#endif
+}
+
int acpi_pci_probe_root_resources(struct acpi_pci_root_info *info)
{
int ret;
@@ -740,6 +770,9 @@ int acpi_pci_probe_root_resources(struct acpi_pci_root_info *info)
"no IO and memory resources present in _CRS\n");
else {
resource_list_for_each_entry_safe(entry, tmp, list) {
+ if (entry->res->flags & IORESOURCE_IO)
+ acpi_pci_root_remap_iospace(entry);
+
if (entry->res->flags & IORESOURCE_DISABLED)
resource_list_destroy_entry(entry);
else
@@ -811,6 +844,8 @@ static void acpi_pci_root_release_info(struct pci_host_bridge *bridge)
resource_list_for_each_entry(entry, &bridge->windows) {
res = entry->res;
+ if (res->flags & IORESOURCE_IO)
+ pci_unmap_iospace(res);
if (res->parent &&
(res->flags & (IORESOURCE_MEM | IORESOURCE_IO)))
release_resource(res);
diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c
index 7188e53b6..f62c68e24 100644
--- a/drivers/acpi/pci_slot.c
+++ b/drivers/acpi/pci_slot.c
@@ -22,8 +22,9 @@
* General Public License for more details.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -33,30 +34,11 @@
#include <linux/dmi.h>
#include <linux/pci-acpi.h>
-static bool debug;
static int check_sta_before_sun;
-#define DRIVER_VERSION "0.1"
-#define DRIVER_AUTHOR "Alex Chiang <achiang@hp.com>"
-#define DRIVER_DESC "ACPI PCI Slot Detection Driver"
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
-MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
-module_param(debug, bool, 0644);
-
#define _COMPONENT ACPI_PCI_COMPONENT
ACPI_MODULE_NAME("pci_slot");
-#define MY_NAME "pci_slot"
-#define err(format, arg...) pr_err("%s: " format , MY_NAME , ## arg)
-#define info(format, arg...) pr_info("%s: " format , MY_NAME , ## arg)
-#define dbg(format, arg...) \
- do { \
- if (debug) \
- pr_debug("%s: " format, MY_NAME , ## arg); \
- } while (0)
-
#define SLOT_NAME_SIZE 21 /* Inspired by #define in acpiphp.h */
struct acpi_pci_slot {
@@ -76,7 +58,7 @@ check_slot(acpi_handle handle, unsigned long long *sun)
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
- dbg("Checking slot on path: %s\n", (char *)buffer.pointer);
+ pr_debug("Checking slot on path: %s\n", (char *)buffer.pointer);
if (check_sta_before_sun) {
/* If SxFy doesn't have _STA, we just assume it's there */
@@ -87,14 +69,16 @@ check_slot(acpi_handle handle, unsigned long long *sun)
status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
if (ACPI_FAILURE(status)) {
- dbg("_ADR returned %d on %s\n", status, (char *)buffer.pointer);
+ pr_debug("_ADR returned %d on %s\n",
+ status, (char *)buffer.pointer);
goto out;
}
/* No _SUN == not a slot == bail */
status = acpi_evaluate_integer(handle, "_SUN", NULL, sun);
if (ACPI_FAILURE(status)) {
- dbg("_SUN returned %d on %s\n", status, (char *)buffer.pointer);
+ pr_debug("_SUN returned %d on %s\n",
+ status, (char *)buffer.pointer);
goto out;
}
@@ -132,15 +116,13 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
}
slot = kmalloc(sizeof(*slot), GFP_KERNEL);
- if (!slot) {
- err("%s: cannot allocate memory\n", __func__);
+ if (!slot)
return AE_OK;
- }
snprintf(name, sizeof(name), "%llu", sun);
pci_slot = pci_create_slot(pci_bus, device, name, NULL);
if (IS_ERR(pci_slot)) {
- err("pci_create_slot returned %ld\n", PTR_ERR(pci_slot));
+ pr_err("pci_create_slot returned %ld\n", PTR_ERR(pci_slot));
kfree(slot);
return AE_OK;
}
@@ -150,8 +132,8 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
get_device(&pci_bus->dev);
- dbg("pci_slot: %p, pci_bus: %x, device: %d, name: %s\n",
- pci_slot, pci_bus->number, device, name);
+ pr_debug("%p, pci_bus: %x, device: %d, name: %s\n",
+ pci_slot, pci_bus->number, device, name);
return AE_OK;
}
@@ -186,7 +168,8 @@ void acpi_pci_slot_remove(struct pci_bus *bus)
static int do_sta_before_sun(const struct dmi_system_id *d)
{
- info("%s detected: will evaluate _STA before calling _SUN\n", d->ident);
+ pr_info("%s detected: will evaluate _STA before calling _SUN\n",
+ d->ident);
check_sta_before_sun = 1;
return 0;
}
diff --git a/drivers/acpi/pmic/intel_pmic.c b/drivers/acpi/pmic/intel_pmic.c
index bd772cd56..ca18e0d23 100644
--- a/drivers/acpi/pmic/intel_pmic.c
+++ b/drivers/acpi/pmic/intel_pmic.c
@@ -13,7 +13,7 @@
* GNU General Public License for more details.
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/acpi.h>
#include <linux/regmap.h>
#include <acpi/acpi_lpat.h>
@@ -21,12 +21,19 @@
#define PMIC_POWER_OPREGION_ID 0x8d
#define PMIC_THERMAL_OPREGION_ID 0x8c
+#define PMIC_REGS_OPREGION_ID 0x8f
+
+struct intel_pmic_regs_handler_ctx {
+ unsigned int val;
+ u16 addr;
+};
struct intel_pmic_opregion {
struct mutex lock;
struct acpi_lpat_conversion_table *lpat_table;
struct regmap *regmap;
struct intel_pmic_opregion_data *data;
+ struct intel_pmic_regs_handler_ctx ctx;
};
static int pmic_get_reg_bit(int address, struct pmic_table *table,
@@ -131,7 +138,7 @@ static int pmic_thermal_aux(struct intel_pmic_opregion *opregion, int reg,
}
static int pmic_thermal_pen(struct intel_pmic_opregion *opregion, int reg,
- u32 function, u64 *value)
+ int bit, u32 function, u64 *value)
{
struct intel_pmic_opregion_data *d = opregion->data;
struct regmap *regmap = opregion->regmap;
@@ -140,12 +147,12 @@ static int pmic_thermal_pen(struct intel_pmic_opregion *opregion, int reg,
return -ENXIO;
if (function == ACPI_READ)
- return d->get_policy(regmap, reg, value);
+ return d->get_policy(regmap, reg, bit, value);
if (*value != 0 && *value != 1)
return -EINVAL;
- return d->update_policy(regmap, reg, *value);
+ return d->update_policy(regmap, reg, bit, *value);
}
static bool pmic_thermal_is_temp(int address)
@@ -170,13 +177,13 @@ static acpi_status intel_pmic_thermal_handler(u32 function,
{
struct intel_pmic_opregion *opregion = region_context;
struct intel_pmic_opregion_data *d = opregion->data;
- int reg, result;
+ int reg, bit, result;
if (bits != 32 || !value64)
return AE_BAD_PARAMETER;
result = pmic_get_reg_bit(address, d->thermal_table,
- d->thermal_table_count, &reg, NULL);
+ d->thermal_table_count, &reg, &bit);
if (result == -ENOENT)
return AE_BAD_PARAMETER;
@@ -187,7 +194,8 @@ static acpi_status intel_pmic_thermal_handler(u32 function,
else if (pmic_thermal_is_aux(address))
result = pmic_thermal_aux(opregion, reg, function, value64);
else if (pmic_thermal_is_pen(address))
- result = pmic_thermal_pen(opregion, reg, function, value64);
+ result = pmic_thermal_pen(opregion, reg, bit,
+ function, value64);
else
result = -EINVAL;
@@ -203,6 +211,48 @@ static acpi_status intel_pmic_thermal_handler(u32 function,
return AE_OK;
}
+static acpi_status intel_pmic_regs_handler(u32 function,
+ acpi_physical_address address, u32 bits, u64 *value64,
+ void *handler_context, void *region_context)
+{
+ struct intel_pmic_opregion *opregion = region_context;
+ int result = 0;
+
+ switch (address) {
+ case 0:
+ return AE_OK;
+ case 1:
+ opregion->ctx.addr |= (*value64 & 0xff) << 8;
+ return AE_OK;
+ case 2:
+ opregion->ctx.addr |= *value64 & 0xff;
+ return AE_OK;
+ case 3:
+ opregion->ctx.val = *value64 & 0xff;
+ return AE_OK;
+ case 4:
+ if (*value64) {
+ result = regmap_write(opregion->regmap, opregion->ctx.addr,
+ opregion->ctx.val);
+ } else {
+ result = regmap_read(opregion->regmap, opregion->ctx.addr,
+ &opregion->ctx.val);
+ if (result == 0)
+ *value64 = opregion->ctx.val;
+ }
+ memset(&opregion->ctx, 0x00, sizeof(opregion->ctx));
+ }
+
+ if (result < 0) {
+ if (result == -EINVAL)
+ return AE_BAD_PARAMETER;
+ else
+ return AE_ERROR;
+ }
+
+ return AE_OK;
+}
+
int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle,
struct regmap *regmap,
struct intel_pmic_opregion_data *d)
@@ -242,16 +292,30 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle,
acpi_remove_address_space_handler(handle, PMIC_POWER_OPREGION_ID,
intel_pmic_power_handler);
ret = -ENODEV;
- goto out_error;
+ goto out_remove_power_handler;
+ }
+
+ status = acpi_install_address_space_handler(handle,
+ PMIC_REGS_OPREGION_ID, intel_pmic_regs_handler, NULL,
+ opregion);
+ if (ACPI_FAILURE(status)) {
+ ret = -ENODEV;
+ goto out_remove_thermal_handler;
}
opregion->data = d;
return 0;
+out_remove_thermal_handler:
+ acpi_remove_address_space_handler(handle, PMIC_THERMAL_OPREGION_ID,
+ intel_pmic_thermal_handler);
+
+out_remove_power_handler:
+ acpi_remove_address_space_handler(handle, PMIC_POWER_OPREGION_ID,
+ intel_pmic_power_handler);
+
out_error:
acpi_lpat_free_conversion_table(opregion->lpat_table);
return ret;
}
EXPORT_SYMBOL_GPL(intel_pmic_install_opregion_handler);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/pmic/intel_pmic.h b/drivers/acpi/pmic/intel_pmic.h
index d4e90af8f..e8bfa7b86 100644
--- a/drivers/acpi/pmic/intel_pmic.h
+++ b/drivers/acpi/pmic/intel_pmic.h
@@ -12,8 +12,8 @@ struct intel_pmic_opregion_data {
int (*update_power)(struct regmap *r, int reg, int bit, bool on);
int (*get_raw_temp)(struct regmap *r, int reg);
int (*update_aux)(struct regmap *r, int reg, int raw_temp);
- int (*get_policy)(struct regmap *r, int reg, u64 *value);
- int (*update_policy)(struct regmap *r, int reg, int enable);
+ int (*get_policy)(struct regmap *r, int reg, int bit, u64 *value);
+ int (*update_policy)(struct regmap *r, int reg, int bit, int enable);
struct pmic_table *power_table;
int power_table_count;
struct pmic_table *thermal_table;
diff --git a/drivers/acpi/pmic/intel_pmic_bxtwc.c b/drivers/acpi/pmic/intel_pmic_bxtwc.c
new file mode 100644
index 000000000..90011aad4
--- /dev/null
+++ b/drivers/acpi/pmic/intel_pmic_bxtwc.c
@@ -0,0 +1,420 @@
+/*
+ * intel_pmic_bxtwc.c - Intel BXT WhiskeyCove PMIC operation region driver
+ *
+ * Copyright (C) 2015 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/regmap.h>
+#include <linux/platform_device.h>
+#include "intel_pmic.h"
+
+#define WHISKEY_COVE_ALRT_HIGH_BIT_MASK 0x0F
+#define WHISKEY_COVE_ADC_HIGH_BIT(x) (((x & 0x0F) << 8))
+#define WHISKEY_COVE_ADC_CURSRC(x) (((x & 0xF0) >> 4))
+#define VR_MODE_DISABLED 0
+#define VR_MODE_AUTO BIT(0)
+#define VR_MODE_NORMAL BIT(1)
+#define VR_MODE_SWITCH BIT(2)
+#define VR_MODE_ECO (BIT(0)|BIT(1))
+#define VSWITCH2_OUTPUT BIT(5)
+#define VSWITCH1_OUTPUT BIT(4)
+#define VUSBPHY_CHARGE BIT(1)
+
+static struct pmic_table power_table[] = {
+ {
+ .address = 0x0,
+ .reg = 0x63,
+ .bit = VR_MODE_AUTO,
+ }, /* VDD1 -> VDD1CNT */
+ {
+ .address = 0x04,
+ .reg = 0x65,
+ .bit = VR_MODE_AUTO,
+ }, /* VDD2 -> VDD2CNT */
+ {
+ .address = 0x08,
+ .reg = 0x67,
+ .bit = VR_MODE_AUTO,
+ }, /* VDD3 -> VDD3CNT */
+ {
+ .address = 0x0c,
+ .reg = 0x6d,
+ .bit = VR_MODE_AUTO,
+ }, /* VLFX -> VFLEXCNT */
+ {
+ .address = 0x10,
+ .reg = 0x6f,
+ .bit = VR_MODE_NORMAL,
+ }, /* VP1A -> VPROG1ACNT */
+ {
+ .address = 0x14,
+ .reg = 0x70,
+ .bit = VR_MODE_NORMAL,
+ }, /* VP1B -> VPROG1BCNT */
+ {
+ .address = 0x18,
+ .reg = 0x71,
+ .bit = VR_MODE_NORMAL,
+ }, /* VP1C -> VPROG1CCNT */
+ {
+ .address = 0x1c,
+ .reg = 0x72,
+ .bit = VR_MODE_NORMAL,
+ }, /* VP1D -> VPROG1DCNT */
+ {
+ .address = 0x20,
+ .reg = 0x73,
+ .bit = VR_MODE_NORMAL,
+ }, /* VP2A -> VPROG2ACNT */
+ {
+ .address = 0x24,
+ .reg = 0x74,
+ .bit = VR_MODE_NORMAL,
+ }, /* VP2B -> VPROG2BCNT */
+ {
+ .address = 0x28,
+ .reg = 0x75,
+ .bit = VR_MODE_NORMAL,
+ }, /* VP2C -> VPROG2CCNT */
+ {
+ .address = 0x2c,
+ .reg = 0x76,
+ .bit = VR_MODE_NORMAL,
+ }, /* VP3A -> VPROG3ACNT */
+ {
+ .address = 0x30,
+ .reg = 0x77,
+ .bit = VR_MODE_NORMAL,
+ }, /* VP3B -> VPROG3BCNT */
+ {
+ .address = 0x34,
+ .reg = 0x78,
+ .bit = VSWITCH2_OUTPUT,
+ }, /* VSW2 -> VLD0CNT Bit 5*/
+ {
+ .address = 0x38,
+ .reg = 0x78,
+ .bit = VSWITCH1_OUTPUT,
+ }, /* VSW1 -> VLD0CNT Bit 4 */
+ {
+ .address = 0x3c,
+ .reg = 0x78,
+ .bit = VUSBPHY_CHARGE,
+ }, /* VUPY -> VLDOCNT Bit 1 */
+ {
+ .address = 0x40,
+ .reg = 0x7b,
+ .bit = VR_MODE_NORMAL,
+ }, /* VRSO -> VREFSOCCNT*/
+ {
+ .address = 0x44,
+ .reg = 0xA0,
+ .bit = VR_MODE_NORMAL,
+ }, /* VP1E -> VPROG1ECNT */
+ {
+ .address = 0x48,
+ .reg = 0xA1,
+ .bit = VR_MODE_NORMAL,
+ }, /* VP1F -> VPROG1FCNT */
+ {
+ .address = 0x4c,
+ .reg = 0xA2,
+ .bit = VR_MODE_NORMAL,
+ }, /* VP2D -> VPROG2DCNT */
+ {
+ .address = 0x50,
+ .reg = 0xA3,
+ .bit = VR_MODE_NORMAL,
+ }, /* VP4A -> VPROG4ACNT */
+ {
+ .address = 0x54,
+ .reg = 0xA4,
+ .bit = VR_MODE_NORMAL,
+ }, /* VP4B -> VPROG4BCNT */
+ {
+ .address = 0x58,
+ .reg = 0xA5,
+ .bit = VR_MODE_NORMAL,
+ }, /* VP4C -> VPROG4CCNT */
+ {
+ .address = 0x5c,
+ .reg = 0xA6,
+ .bit = VR_MODE_NORMAL,
+ }, /* VP4D -> VPROG4DCNT */
+ {
+ .address = 0x60,
+ .reg = 0xA7,
+ .bit = VR_MODE_NORMAL,
+ }, /* VP5A -> VPROG5ACNT */
+ {
+ .address = 0x64,
+ .reg = 0xA8,
+ .bit = VR_MODE_NORMAL,
+ }, /* VP5B -> VPROG5BCNT */
+ {
+ .address = 0x68,
+ .reg = 0xA9,
+ .bit = VR_MODE_NORMAL,
+ }, /* VP6A -> VPROG6ACNT */
+ {
+ .address = 0x6c,
+ .reg = 0xAA,
+ .bit = VR_MODE_NORMAL,
+ }, /* VP6B -> VPROG6BCNT */
+ {
+ .address = 0x70,
+ .reg = 0x36,
+ .bit = BIT(2),
+ }, /* SDWN_N -> MODEMCTRL Bit 2 */
+ {
+ .address = 0x74,
+ .reg = 0x36,
+ .bit = BIT(0),
+ } /* MOFF -> MODEMCTRL Bit 0 */
+};
+
+static struct pmic_table thermal_table[] = {
+ {
+ .address = 0x00,
+ .reg = 0x4F39
+ },
+ {
+ .address = 0x04,
+ .reg = 0x4F24
+ },
+ {
+ .address = 0x08,
+ .reg = 0x4F26
+ },
+ {
+ .address = 0x0c,
+ .reg = 0x4F3B
+ },
+ {
+ .address = 0x10,
+ .reg = 0x4F28
+ },
+ {
+ .address = 0x14,
+ .reg = 0x4F2A
+ },
+ {
+ .address = 0x18,
+ .reg = 0x4F3D
+ },
+ {
+ .address = 0x1c,
+ .reg = 0x4F2C
+ },
+ {
+ .address = 0x20,
+ .reg = 0x4F2E
+ },
+ {
+ .address = 0x24,
+ .reg = 0x4F3F
+ },
+ {
+ .address = 0x28,
+ .reg = 0x4F30
+ },
+ {
+ .address = 0x30,
+ .reg = 0x4F41
+ },
+ {
+ .address = 0x34,
+ .reg = 0x4F32
+ },
+ {
+ .address = 0x3c,
+ .reg = 0x4F43
+ },
+ {
+ .address = 0x40,
+ .reg = 0x4F34
+ },
+ {
+ .address = 0x48,
+ .reg = 0x4F6A,
+ .bit = 0,
+ },
+ {
+ .address = 0x4C,
+ .reg = 0x4F6A,
+ .bit = 1
+ },
+ {
+ .address = 0x50,
+ .reg = 0x4F6A,
+ .bit = 2
+ },
+ {
+ .address = 0x54,
+ .reg = 0x4F6A,
+ .bit = 4
+ },
+ {
+ .address = 0x58,
+ .reg = 0x4F6A,
+ .bit = 5
+ },
+ {
+ .address = 0x5C,
+ .reg = 0x4F6A,
+ .bit = 3
+ },
+};
+
+static int intel_bxtwc_pmic_get_power(struct regmap *regmap, int reg,
+ int bit, u64 *value)
+{
+ int data;
+
+ if (regmap_read(regmap, reg, &data))
+ return -EIO;
+
+ *value = (data & bit) ? 1 : 0;
+ return 0;
+}
+
+static int intel_bxtwc_pmic_update_power(struct regmap *regmap, int reg,
+ int bit, bool on)
+{
+ u8 val, mask = bit;
+
+ if (on)
+ val = 0xFF;
+ else
+ val = 0x0;
+
+ return regmap_update_bits(regmap, reg, mask, val);
+}
+
+static int intel_bxtwc_pmic_get_raw_temp(struct regmap *regmap, int reg)
+{
+ unsigned int val, adc_val, reg_val;
+ u8 temp_l, temp_h, cursrc;
+ unsigned long rlsb;
+ static const unsigned long rlsb_array[] = {
+ 0, 260420, 130210, 65100, 32550, 16280,
+ 8140, 4070, 2030, 0, 260420, 130210 };
+
+ if (regmap_read(regmap, reg, &val))
+ return -EIO;
+ temp_l = (u8) val;
+
+ if (regmap_read(regmap, (reg - 1), &val))
+ return -EIO;
+ temp_h = (u8) val;
+
+ reg_val = temp_l | WHISKEY_COVE_ADC_HIGH_BIT(temp_h);
+ cursrc = WHISKEY_COVE_ADC_CURSRC(temp_h);
+ rlsb = rlsb_array[cursrc];
+ adc_val = reg_val * rlsb / 1000;
+
+ return adc_val;
+}
+
+static int
+intel_bxtwc_pmic_update_aux(struct regmap *regmap, int reg, int raw)
+{
+ u32 bsr_num;
+ u16 resi_val, count = 0, thrsh = 0;
+ u8 alrt_h, alrt_l, cursel = 0;
+
+ bsr_num = raw;
+ bsr_num /= (1 << 5);
+
+ count = fls(bsr_num) - 1;
+
+ cursel = clamp_t(s8, (count - 7), 0, 7);
+ thrsh = raw / (1 << (4 + cursel));
+
+ resi_val = (cursel << 9) | thrsh;
+ alrt_h = (resi_val >> 8) & WHISKEY_COVE_ALRT_HIGH_BIT_MASK;
+ if (regmap_update_bits(regmap,
+ reg - 1,
+ WHISKEY_COVE_ALRT_HIGH_BIT_MASK,
+ alrt_h))
+ return -EIO;
+
+ alrt_l = (u8)resi_val;
+ return regmap_write(regmap, reg, alrt_l);
+}
+
+static int
+intel_bxtwc_pmic_get_policy(struct regmap *regmap, int reg, int bit, u64 *value)
+{
+ u8 mask = BIT(bit);
+ unsigned int val;
+
+ if (regmap_read(regmap, reg, &val))
+ return -EIO;
+
+ *value = (val & mask) >> bit;
+ return 0;
+}
+
+static int
+intel_bxtwc_pmic_update_policy(struct regmap *regmap,
+ int reg, int bit, int enable)
+{
+ u8 mask = BIT(bit), val = enable << bit;
+
+ return regmap_update_bits(regmap, reg, mask, val);
+}
+
+static struct intel_pmic_opregion_data intel_bxtwc_pmic_opregion_data = {
+ .get_power = intel_bxtwc_pmic_get_power,
+ .update_power = intel_bxtwc_pmic_update_power,
+ .get_raw_temp = intel_bxtwc_pmic_get_raw_temp,
+ .update_aux = intel_bxtwc_pmic_update_aux,
+ .get_policy = intel_bxtwc_pmic_get_policy,
+ .update_policy = intel_bxtwc_pmic_update_policy,
+ .power_table = power_table,
+ .power_table_count = ARRAY_SIZE(power_table),
+ .thermal_table = thermal_table,
+ .thermal_table_count = ARRAY_SIZE(thermal_table),
+};
+
+static int intel_bxtwc_pmic_opregion_probe(struct platform_device *pdev)
+{
+ struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
+
+ return intel_pmic_install_opregion_handler(&pdev->dev,
+ ACPI_HANDLE(pdev->dev.parent),
+ pmic->regmap,
+ &intel_bxtwc_pmic_opregion_data);
+}
+
+static struct platform_device_id bxt_wc_opregion_id_table[] = {
+ { .name = "bxt_wcove_region" },
+ {},
+};
+
+static struct platform_driver intel_bxtwc_pmic_opregion_driver = {
+ .probe = intel_bxtwc_pmic_opregion_probe,
+ .driver = {
+ .name = "bxt_whiskey_cove_pmic",
+ },
+ .id_table = bxt_wc_opregion_id_table,
+};
+
+static int __init intel_bxtwc_pmic_opregion_driver_init(void)
+{
+ return platform_driver_register(&intel_bxtwc_pmic_opregion_driver);
+}
+device_initcall(intel_bxtwc_pmic_opregion_driver_init);
diff --git a/drivers/acpi/pmic/intel_pmic_crc.c b/drivers/acpi/pmic/intel_pmic_crc.c
index fcd1852dc..d7f1761ab 100644
--- a/drivers/acpi/pmic/intel_pmic_crc.c
+++ b/drivers/acpi/pmic/intel_pmic_crc.c
@@ -141,7 +141,8 @@ static int intel_crc_pmic_update_aux(struct regmap *regmap, int reg, int raw)
regmap_update_bits(regmap, reg - 1, 0x3, raw >> 8) ? -EIO : 0;
}
-static int intel_crc_pmic_get_policy(struct regmap *regmap, int reg, u64 *value)
+static int intel_crc_pmic_get_policy(struct regmap *regmap,
+ int reg, int bit, u64 *value)
{
int pen;
@@ -152,7 +153,7 @@ static int intel_crc_pmic_get_policy(struct regmap *regmap, int reg, u64 *value)
}
static int intel_crc_pmic_update_policy(struct regmap *regmap,
- int reg, int enable)
+ int reg, int bit, int enable)
{
int alert0;
diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c
index 6a082d4de..e6e991ac2 100644
--- a/drivers/acpi/pmic/intel_pmic_xpower.c
+++ b/drivers/acpi/pmic/intel_pmic_xpower.c
@@ -13,7 +13,7 @@
* GNU General Public License for more details.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/mfd/axp20x.h>
#include <linux/regmap.h>
@@ -262,7 +262,4 @@ static int __init intel_xpower_pmic_opregion_driver_init(void)
{
return platform_driver_register(&intel_xpower_pmic_opregion_driver);
}
-module_init(intel_xpower_pmic_opregion_driver_init);
-
-MODULE_DESCRIPTION("XPower AXP288 ACPI operation region driver");
-MODULE_LICENSE("GPL");
+device_initcall(intel_xpower_pmic_opregion_driver_init);
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 33a38d604..9125d7d96 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -108,13 +108,12 @@ static int map_gicc_mpidr(struct acpi_subtable_header *entry,
return -EINVAL;
}
-static phys_cpuid_t map_madt_entry(int type, u32 acpi_id)
+static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt,
+ int type, u32 acpi_id)
{
unsigned long madt_end, entry;
phys_cpuid_t phys_id = PHYS_CPUID_INVALID; /* CPU hardware ID */
- struct acpi_table_madt *madt;
- madt = get_madt_table();
if (!madt)
return phys_id;
@@ -145,6 +144,25 @@ static phys_cpuid_t map_madt_entry(int type, u32 acpi_id)
return phys_id;
}
+phys_cpuid_t __init acpi_map_madt_entry(u32 acpi_id)
+{
+ struct acpi_table_madt *madt = NULL;
+ acpi_size tbl_size;
+ phys_cpuid_t rv;
+
+ acpi_get_table_with_size(ACPI_SIG_MADT, 0,
+ (struct acpi_table_header **)&madt,
+ &tbl_size);
+ if (!madt)
+ return PHYS_CPUID_INVALID;
+
+ rv = map_madt_entry(madt, 1, acpi_id);
+
+ early_acpi_os_unmap_memory(madt, tbl_size);
+
+ return rv;
+}
+
static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -185,7 +203,7 @@ phys_cpuid_t acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
phys_id = map_mat_entry(handle, type, acpi_id);
if (invalid_phys_cpuid(phys_id))
- phys_id = map_madt_entry(type, acpi_id);
+ phys_id = map_madt_entry(get_madt_table(), type, acpi_id);
return phys_id;
}
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index d2fa8cb82..0553aeebb 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -90,7 +90,7 @@ static void acpi_processor_notify(acpi_handle handle, u32 event, void *data)
pr->performance_platform_limit);
break;
case ACPI_PROCESSOR_NOTIFY_POWER:
- acpi_processor_cst_has_changed(pr);
+ acpi_processor_power_state_has_changed(pr);
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event, 0);
break;
@@ -118,12 +118,13 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
struct acpi_device *device;
action &= ~CPU_TASKS_FROZEN;
- /*
- * CPU_STARTING and CPU_DYING must not sleep. Return here since
- * acpi_bus_get_device() may sleep.
- */
- if (action == CPU_STARTING || action == CPU_DYING)
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_DEAD:
+ break;
+ default:
return NOTIFY_DONE;
+ }
if (!pr || acpi_bus_get_device(pr->handle, &device))
return NOTIFY_DONE;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 444e3745c..cea52528a 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -59,6 +59,12 @@ module_param(latency_factor, uint, 0644);
static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
+struct cpuidle_driver acpi_idle_driver = {
+ .name = "acpi_idle",
+ .owner = THIS_MODULE,
+};
+
+#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
static
DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], acpi_cstate);
@@ -297,7 +303,6 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *cst;
-
if (nocst)
return -ENODEV;
@@ -570,7 +575,7 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
return (working);
}
-static int acpi_processor_get_power_info(struct acpi_processor *pr)
+static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
{
unsigned int i;
int result;
@@ -804,36 +809,12 @@ static void acpi_idle_enter_freeze(struct cpuidle_device *dev,
acpi_idle_do_entry(cx);
}
-struct cpuidle_driver acpi_idle_driver = {
- .name = "acpi_idle",
- .owner = THIS_MODULE,
-};
-
-/**
- * acpi_processor_setup_cpuidle_cx - prepares and configures CPUIDLE
- * device i.e. per-cpu data
- *
- * @pr: the ACPI processor
- * @dev : the cpuidle device
- */
static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
struct cpuidle_device *dev)
{
int i, count = CPUIDLE_DRIVER_STATE_START;
struct acpi_processor_cx *cx;
- if (!pr->flags.power_setup_done)
- return -EINVAL;
-
- if (pr->flags.power == 0) {
- return -EINVAL;
- }
-
- if (!dev)
- return -EINVAL;
-
- dev->cpu = pr->id;
-
if (max_cstate == 0)
max_cstate = 1;
@@ -856,31 +837,13 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
return 0;
}
-/**
- * acpi_processor_setup_cpuidle states- prepares and configures cpuidle
- * global state data i.e. idle routines
- *
- * @pr: the ACPI processor
- */
-static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
+static int acpi_processor_setup_cstates(struct acpi_processor *pr)
{
int i, count = CPUIDLE_DRIVER_STATE_START;
struct acpi_processor_cx *cx;
struct cpuidle_state *state;
struct cpuidle_driver *drv = &acpi_idle_driver;
- if (!pr->flags.power_setup_done)
- return -EINVAL;
-
- if (pr->flags.power == 0)
- return -EINVAL;
-
- drv->safe_state_index = -1;
- for (i = CPUIDLE_DRIVER_STATE_START; i < CPUIDLE_STATE_MAX; i++) {
- drv->states[i].name[0] = '\0';
- drv->states[i].desc[0] = '\0';
- }
-
if (max_cstate == 0)
max_cstate = 1;
@@ -892,7 +855,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
state = &drv->states[count];
snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
- strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
+ strlcpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
state->exit_latency = cx->latency;
state->target_residency = cx->latency * latency_factor;
state->enter = acpi_idle_enter;
@@ -925,6 +888,450 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
return 0;
}
+static inline void acpi_processor_cstate_first_run_checks(void)
+{
+ acpi_status status;
+ static int first_run;
+
+ if (first_run)
+ return;
+ dmi_check_system(processor_power_dmi_table);
+ max_cstate = acpi_processor_cstate_check(max_cstate);
+ if (max_cstate < ACPI_C_STATES_MAX)
+ pr_notice("ACPI: processor limited to max C-state %d\n",
+ max_cstate);
+ first_run++;
+
+ if (acpi_gbl_FADT.cst_control && !nocst) {
+ status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
+ acpi_gbl_FADT.cst_control, 8);
+ if (ACPI_FAILURE(status))
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Notifying BIOS of _CST ability failed"));
+ }
+}
+#else
+
+static inline int disabled_by_idle_boot_param(void) { return 0; }
+static inline void acpi_processor_cstate_first_run_checks(void) { }
+static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
+{
+ return -ENODEV;
+}
+
+static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
+ struct cpuidle_device *dev)
+{
+ return -EINVAL;
+}
+
+static int acpi_processor_setup_cstates(struct acpi_processor *pr)
+{
+ return -EINVAL;
+}
+
+#endif /* CONFIG_ACPI_PROCESSOR_CSTATE */
+
+struct acpi_lpi_states_array {
+ unsigned int size;
+ unsigned int composite_states_size;
+ struct acpi_lpi_state *entries;
+ struct acpi_lpi_state *composite_states[ACPI_PROCESSOR_MAX_POWER];
+};
+
+static int obj_get_integer(union acpi_object *obj, u32 *value)
+{
+ if (obj->type != ACPI_TYPE_INTEGER)
+ return -EINVAL;
+
+ *value = obj->integer.value;
+ return 0;
+}
+
+static int acpi_processor_evaluate_lpi(acpi_handle handle,
+ struct acpi_lpi_states_array *info)
+{
+ acpi_status status;
+ int ret = 0;
+ int pkg_count, state_idx = 1, loop;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *lpi_data;
+ struct acpi_lpi_state *lpi_state;
+
+ status = acpi_evaluate_object(handle, "_LPI", NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _LPI, giving up\n"));
+ return -ENODEV;
+ }
+
+ lpi_data = buffer.pointer;
+
+ /* There must be at least 4 elements = 3 elements + 1 package */
+ if (!lpi_data || lpi_data->type != ACPI_TYPE_PACKAGE ||
+ lpi_data->package.count < 4) {
+ pr_debug("not enough elements in _LPI\n");
+ ret = -ENODATA;
+ goto end;
+ }
+
+ pkg_count = lpi_data->package.elements[2].integer.value;
+
+ /* Validate number of power states. */
+ if (pkg_count < 1 || pkg_count != lpi_data->package.count - 3) {
+ pr_debug("count given by _LPI is not valid\n");
+ ret = -ENODATA;
+ goto end;
+ }
+
+ lpi_state = kcalloc(pkg_count, sizeof(*lpi_state), GFP_KERNEL);
+ if (!lpi_state) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ info->size = pkg_count;
+ info->entries = lpi_state;
+
+ /* LPI States start at index 3 */
+ for (loop = 3; state_idx <= pkg_count; loop++, state_idx++, lpi_state++) {
+ union acpi_object *element, *pkg_elem, *obj;
+
+ element = &lpi_data->package.elements[loop];
+ if (element->type != ACPI_TYPE_PACKAGE || element->package.count < 7)
+ continue;
+
+ pkg_elem = element->package.elements;
+
+ obj = pkg_elem + 6;
+ if (obj->type == ACPI_TYPE_BUFFER) {
+ struct acpi_power_register *reg;
+
+ reg = (struct acpi_power_register *)obj->buffer.pointer;
+ if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
+ reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)
+ continue;
+
+ lpi_state->address = reg->address;
+ lpi_state->entry_method =
+ reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE ?
+ ACPI_CSTATE_FFH : ACPI_CSTATE_SYSTEMIO;
+ } else if (obj->type == ACPI_TYPE_INTEGER) {
+ lpi_state->entry_method = ACPI_CSTATE_INTEGER;
+ lpi_state->address = obj->integer.value;
+ } else {
+ continue;
+ }
+
+ /* elements[7,8] skipped for now i.e. Residency/Usage counter*/
+
+ obj = pkg_elem + 9;
+ if (obj->type == ACPI_TYPE_STRING)
+ strlcpy(lpi_state->desc, obj->string.pointer,
+ ACPI_CX_DESC_LEN);
+
+ lpi_state->index = state_idx;
+ if (obj_get_integer(pkg_elem + 0, &lpi_state->min_residency)) {
+ pr_debug("No min. residency found, assuming 10 us\n");
+ lpi_state->min_residency = 10;
+ }
+
+ if (obj_get_integer(pkg_elem + 1, &lpi_state->wake_latency)) {
+ pr_debug("No wakeup residency found, assuming 10 us\n");
+ lpi_state->wake_latency = 10;
+ }
+
+ if (obj_get_integer(pkg_elem + 2, &lpi_state->flags))
+ lpi_state->flags = 0;
+
+ if (obj_get_integer(pkg_elem + 3, &lpi_state->arch_flags))
+ lpi_state->arch_flags = 0;
+
+ if (obj_get_integer(pkg_elem + 4, &lpi_state->res_cnt_freq))
+ lpi_state->res_cnt_freq = 1;
+
+ if (obj_get_integer(pkg_elem + 5, &lpi_state->enable_parent_state))
+ lpi_state->enable_parent_state = 0;
+ }
+
+ acpi_handle_debug(handle, "Found %d power states\n", state_idx);
+end:
+ kfree(buffer.pointer);
+ return ret;
+}
+
+/*
+ * flat_state_cnt - the number of composite LPI states after the process of flattening
+ */
+static int flat_state_cnt;
+
+/**
+ * combine_lpi_states - combine local and parent LPI states to form a composite LPI state
+ *
+ * @local: local LPI state
+ * @parent: parent LPI state
+ * @result: composite LPI state
+ */
+static bool combine_lpi_states(struct acpi_lpi_state *local,
+ struct acpi_lpi_state *parent,
+ struct acpi_lpi_state *result)
+{
+ if (parent->entry_method == ACPI_CSTATE_INTEGER) {
+ if (!parent->address) /* 0 means autopromotable */
+ return false;
+ result->address = local->address + parent->address;
+ } else {
+ result->address = parent->address;
+ }
+
+ result->min_residency = max(local->min_residency, parent->min_residency);
+ result->wake_latency = local->wake_latency + parent->wake_latency;
+ result->enable_parent_state = parent->enable_parent_state;
+ result->entry_method = local->entry_method;
+
+ result->flags = parent->flags;
+ result->arch_flags = parent->arch_flags;
+ result->index = parent->index;
+
+ strlcpy(result->desc, local->desc, ACPI_CX_DESC_LEN);
+ strlcat(result->desc, "+", ACPI_CX_DESC_LEN);
+ strlcat(result->desc, parent->desc, ACPI_CX_DESC_LEN);
+ return true;
+}
+
+#define ACPI_LPI_STATE_FLAGS_ENABLED BIT(0)
+
+static void stash_composite_state(struct acpi_lpi_states_array *curr_level,
+ struct acpi_lpi_state *t)
+{
+ curr_level->composite_states[curr_level->composite_states_size++] = t;
+}
+
+static int flatten_lpi_states(struct acpi_processor *pr,
+ struct acpi_lpi_states_array *curr_level,
+ struct acpi_lpi_states_array *prev_level)
+{
+ int i, j, state_count = curr_level->size;
+ struct acpi_lpi_state *p, *t = curr_level->entries;
+
+ curr_level->composite_states_size = 0;
+ for (j = 0; j < state_count; j++, t++) {
+ struct acpi_lpi_state *flpi;
+
+ if (!(t->flags & ACPI_LPI_STATE_FLAGS_ENABLED))
+ continue;
+
+ if (flat_state_cnt >= ACPI_PROCESSOR_MAX_POWER) {
+ pr_warn("Limiting number of LPI states to max (%d)\n",
+ ACPI_PROCESSOR_MAX_POWER);
+ pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
+ break;
+ }
+
+ flpi = &pr->power.lpi_states[flat_state_cnt];
+
+ if (!prev_level) { /* leaf/processor node */
+ memcpy(flpi, t, sizeof(*t));
+ stash_composite_state(curr_level, flpi);
+ flat_state_cnt++;
+ continue;
+ }
+
+ for (i = 0; i < prev_level->composite_states_size; i++) {
+ p = prev_level->composite_states[i];
+ if (t->index <= p->enable_parent_state &&
+ combine_lpi_states(p, t, flpi)) {
+ stash_composite_state(curr_level, flpi);
+ flat_state_cnt++;
+ flpi++;
+ }
+ }
+ }
+
+ kfree(curr_level->entries);
+ return 0;
+}
+
+static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
+{
+ int ret, i;
+ acpi_status status;
+ acpi_handle handle = pr->handle, pr_ahandle;
+ struct acpi_device *d = NULL;
+ struct acpi_lpi_states_array info[2], *tmp, *prev, *curr;
+
+ if (!osc_pc_lpi_support_confirmed)
+ return -EOPNOTSUPP;
+
+ if (!acpi_has_method(handle, "_LPI"))
+ return -EINVAL;
+
+ flat_state_cnt = 0;
+ prev = &info[0];
+ curr = &info[1];
+ handle = pr->handle;
+ ret = acpi_processor_evaluate_lpi(handle, prev);
+ if (ret)
+ return ret;
+ flatten_lpi_states(pr, prev, NULL);
+
+ status = acpi_get_parent(handle, &pr_ahandle);
+ while (ACPI_SUCCESS(status)) {
+ acpi_bus_get_device(pr_ahandle, &d);
+ handle = pr_ahandle;
+
+ if (strcmp(acpi_device_hid(d), ACPI_PROCESSOR_CONTAINER_HID))
+ break;
+
+ /* can be optional ? */
+ if (!acpi_has_method(handle, "_LPI"))
+ break;
+
+ ret = acpi_processor_evaluate_lpi(handle, curr);
+ if (ret)
+ break;
+
+ /* flatten all the LPI states in this level of hierarchy */
+ flatten_lpi_states(pr, curr, prev);
+
+ tmp = prev, prev = curr, curr = tmp;
+
+ status = acpi_get_parent(handle, &pr_ahandle);
+ }
+
+ pr->power.count = flat_state_cnt;
+ /* reset the index after flattening */
+ for (i = 0; i < pr->power.count; i++)
+ pr->power.lpi_states[i].index = i;
+
+ /* Tell driver that _LPI is supported. */
+ pr->flags.has_lpi = 1;
+ pr->flags.power = 1;
+
+ return 0;
+}
+
+int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu)
+{
+ return -ENODEV;
+}
+
+int __weak acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
+{
+ return -ENODEV;
+}
+
+/**
+ * acpi_idle_lpi_enter - enters an ACPI any LPI state
+ * @dev: the target CPU
+ * @drv: cpuidle driver containing cpuidle state info
+ * @index: index of target state
+ *
+ * Return: 0 for success or negative value for error
+ */
+static int acpi_idle_lpi_enter(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ struct acpi_processor *pr;
+ struct acpi_lpi_state *lpi;
+
+ pr = __this_cpu_read(processors);
+
+ if (unlikely(!pr))
+ return -EINVAL;
+
+ lpi = &pr->power.lpi_states[index];
+ if (lpi->entry_method == ACPI_CSTATE_FFH)
+ return acpi_processor_ffh_lpi_enter(lpi);
+
+ return -EINVAL;
+}
+
+static int acpi_processor_setup_lpi_states(struct acpi_processor *pr)
+{
+ int i;
+ struct acpi_lpi_state *lpi;
+ struct cpuidle_state *state;
+ struct cpuidle_driver *drv = &acpi_idle_driver;
+
+ if (!pr->flags.has_lpi)
+ return -EOPNOTSUPP;
+
+ for (i = 0; i < pr->power.count && i < CPUIDLE_STATE_MAX; i++) {
+ lpi = &pr->power.lpi_states[i];
+
+ state = &drv->states[i];
+ snprintf(state->name, CPUIDLE_NAME_LEN, "LPI-%d", i);
+ strlcpy(state->desc, lpi->desc, CPUIDLE_DESC_LEN);
+ state->exit_latency = lpi->wake_latency;
+ state->target_residency = lpi->min_residency;
+ if (lpi->arch_flags)
+ state->flags |= CPUIDLE_FLAG_TIMER_STOP;
+ state->enter = acpi_idle_lpi_enter;
+ drv->safe_state_index = i;
+ }
+
+ drv->state_count = i;
+
+ return 0;
+}
+
+/**
+ * acpi_processor_setup_cpuidle_states- prepares and configures cpuidle
+ * global state data i.e. idle routines
+ *
+ * @pr: the ACPI processor
+ */
+static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
+{
+ int i;
+ struct cpuidle_driver *drv = &acpi_idle_driver;
+
+ if (!pr->flags.power_setup_done || !pr->flags.power)
+ return -EINVAL;
+
+ drv->safe_state_index = -1;
+ for (i = CPUIDLE_DRIVER_STATE_START; i < CPUIDLE_STATE_MAX; i++) {
+ drv->states[i].name[0] = '\0';
+ drv->states[i].desc[0] = '\0';
+ }
+
+ if (pr->flags.has_lpi)
+ return acpi_processor_setup_lpi_states(pr);
+
+ return acpi_processor_setup_cstates(pr);
+}
+
+/**
+ * acpi_processor_setup_cpuidle_dev - prepares and configures CPUIDLE
+ * device i.e. per-cpu data
+ *
+ * @pr: the ACPI processor
+ * @dev : the cpuidle device
+ */
+static int acpi_processor_setup_cpuidle_dev(struct acpi_processor *pr,
+ struct cpuidle_device *dev)
+{
+ if (!pr->flags.power_setup_done || !pr->flags.power || !dev)
+ return -EINVAL;
+
+ dev->cpu = pr->id;
+ if (pr->flags.has_lpi)
+ return acpi_processor_ffh_lpi_probe(pr->id);
+
+ return acpi_processor_setup_cpuidle_cx(pr, dev);
+}
+
+static int acpi_processor_get_power_info(struct acpi_processor *pr)
+{
+ int ret;
+
+ ret = acpi_processor_get_lpi_info(pr);
+ if (ret)
+ ret = acpi_processor_get_cstate_info(pr);
+
+ return ret;
+}
+
int acpi_processor_hotplug(struct acpi_processor *pr)
{
int ret = 0;
@@ -933,18 +1340,15 @@ int acpi_processor_hotplug(struct acpi_processor *pr)
if (disabled_by_idle_boot_param())
return 0;
- if (nocst)
- return -ENODEV;
-
if (!pr->flags.power_setup_done)
return -ENODEV;
dev = per_cpu(acpi_cpuidle_device, pr->id);
cpuidle_pause_and_lock();
cpuidle_disable_device(dev);
- acpi_processor_get_power_info(pr);
- if (pr->flags.power) {
- acpi_processor_setup_cpuidle_cx(pr, dev);
+ ret = acpi_processor_get_power_info(pr);
+ if (!ret && pr->flags.power) {
+ acpi_processor_setup_cpuidle_dev(pr, dev);
ret = cpuidle_enable_device(dev);
}
cpuidle_resume_and_unlock();
@@ -952,7 +1356,7 @@ int acpi_processor_hotplug(struct acpi_processor *pr)
return ret;
}
-int acpi_processor_cst_has_changed(struct acpi_processor *pr)
+int acpi_processor_power_state_has_changed(struct acpi_processor *pr)
{
int cpu;
struct acpi_processor *_pr;
@@ -961,9 +1365,6 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
if (disabled_by_idle_boot_param())
return 0;
- if (nocst)
- return -ENODEV;
-
if (!pr->flags.power_setup_done)
return -ENODEV;
@@ -1000,7 +1401,7 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
acpi_processor_get_power_info(_pr);
if (_pr->flags.power) {
dev = per_cpu(acpi_cpuidle_device, cpu);
- acpi_processor_setup_cpuidle_cx(_pr, dev);
+ acpi_processor_setup_cpuidle_dev(_pr, dev);
cpuidle_enable_device(dev);
}
}
@@ -1015,35 +1416,16 @@ static int acpi_processor_registered;
int acpi_processor_power_init(struct acpi_processor *pr)
{
- acpi_status status;
int retval;
struct cpuidle_device *dev;
- static int first_run;
if (disabled_by_idle_boot_param())
return 0;
- if (!first_run) {
- dmi_check_system(processor_power_dmi_table);
- max_cstate = acpi_processor_cstate_check(max_cstate);
- if (max_cstate < ACPI_C_STATES_MAX)
- printk(KERN_NOTICE
- "ACPI: processor limited to max C-state %d\n",
- max_cstate);
- first_run++;
- }
-
- if (acpi_gbl_FADT.cst_control && !nocst) {
- status =
- acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "Notifying BIOS of _CST ability failed"));
- }
- }
+ acpi_processor_cstate_first_run_checks();
- acpi_processor_get_power_info(pr);
- pr->flags.power_setup_done = 1;
+ if (!acpi_processor_get_power_info(pr))
+ pr->flags.power_setup_done = 1;
/*
* Install the idle handler if processor power management is supported.
@@ -1066,7 +1448,7 @@ int acpi_processor_power_init(struct acpi_processor *pr)
return -ENOMEM;
per_cpu(acpi_cpuidle_device, pr->id) = dev;
- acpi_processor_setup_cpuidle_cx(pr, dev);
+ acpi_processor_setup_cpuidle_dev(pr, dev);
/* Register per-cpu cpuidle_device. Cpuidle driver
* must already be registered before registering device
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index f3c022292..e878fc799 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -46,6 +46,13 @@ DEFINE_MUTEX(acpi_device_lock);
LIST_HEAD(acpi_wakeup_device_list);
static DEFINE_MUTEX(acpi_hp_context_lock);
+/*
+ * The UART device described by the SPCR table is the only object which needs
+ * special-casing. Everything else is covered by ACPI namespace paths in STAO
+ * table.
+ */
+static u64 spcr_uart_addr;
+
struct acpi_dep_data {
struct list_head node;
acpi_handle master;
@@ -494,6 +501,8 @@ static void acpi_device_del(struct acpi_device *device)
device_del(&device->dev);
}
+static BLOCKING_NOTIFIER_HEAD(acpi_reconfig_chain);
+
static LIST_HEAD(acpi_device_del_list);
static DEFINE_MUTEX(acpi_device_del_lock);
@@ -514,6 +523,9 @@ static void acpi_device_del_work_fn(struct work_struct *work_not_used)
mutex_unlock(&acpi_device_del_lock);
+ blocking_notifier_call_chain(&acpi_reconfig_chain,
+ ACPI_RECONFIG_DEVICE_REMOVE, adev);
+
acpi_device_del(adev);
/*
* Drop references to all power resources that might have been
@@ -1406,7 +1418,7 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
acpi_bus_get_flags(device);
device->flags.match_driver = false;
device->flags.initialized = true;
- device->flags.visited = false;
+ acpi_device_clear_enumerated(device);
device_initialize(&device->dev);
dev_set_uevent_suppress(&device->dev, true);
acpi_init_coherency(device);
@@ -1453,6 +1465,41 @@ static int acpi_add_single_object(struct acpi_device **child,
return 0;
}
+static acpi_status acpi_get_resource_memory(struct acpi_resource *ares,
+ void *context)
+{
+ struct resource *res = context;
+
+ if (acpi_dev_resource_memory(ares, res))
+ return AE_CTRL_TERMINATE;
+
+ return AE_OK;
+}
+
+static bool acpi_device_should_be_hidden(acpi_handle handle)
+{
+ acpi_status status;
+ struct resource res;
+
+ /* Check if it should ignore the UART device */
+ if (!(spcr_uart_addr && acpi_has_method(handle, METHOD_NAME__CRS)))
+ return false;
+
+ /*
+ * The UART device described in SPCR table is assumed to have only one
+ * memory resource present. So we only look for the first one here.
+ */
+ status = acpi_walk_resources(handle, METHOD_NAME__CRS,
+ acpi_get_resource_memory, &res);
+ if (ACPI_FAILURE(status) || res.start != spcr_uart_addr)
+ return false;
+
+ acpi_handle_info(handle, "The UART device @%pa in SPCR table will be hidden\n",
+ &res.start);
+
+ return true;
+}
+
static int acpi_bus_type_and_status(acpi_handle handle, int *type,
unsigned long long *sta)
{
@@ -1466,6 +1513,9 @@ static int acpi_bus_type_and_status(acpi_handle handle, int *type,
switch (acpi_type) {
case ACPI_TYPE_ANY: /* for ACPI_ROOT_OBJECT */
case ACPI_TYPE_DEVICE:
+ if (acpi_device_should_be_hidden(handle))
+ return -ENODEV;
+
*type = ACPI_BUS_TYPE_DEVICE;
status = acpi_bus_get_status_handle(handle, sta);
if (ACPI_FAILURE(status))
@@ -1676,15 +1726,20 @@ static void acpi_default_enumeration(struct acpi_device *device)
bool is_spi_i2c_slave = false;
/*
- * Do not enemerate SPI/I2C slaves as they will be enuerated by their
+ * Do not enumerate SPI/I2C slaves as they will be enumerated by their
* respective parents.
*/
INIT_LIST_HEAD(&resource_list);
acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave,
&is_spi_i2c_slave);
acpi_dev_free_resource_list(&resource_list);
- if (!is_spi_i2c_slave)
+ if (!is_spi_i2c_slave) {
acpi_create_platform_device(device);
+ acpi_device_set_enumerated(device);
+ } else {
+ blocking_notifier_call_chain(&acpi_reconfig_chain,
+ ACPI_RECONFIG_DEVICE_ADD, device);
+ }
}
static const struct acpi_device_id generic_device_ids[] = {
@@ -1751,7 +1806,7 @@ static void acpi_bus_attach(struct acpi_device *device)
acpi_bus_get_status(device);
/* Skip devices that are not present. */
if (!acpi_device_is_present(device)) {
- device->flags.visited = false;
+ acpi_device_clear_enumerated(device);
device->flags.power_manageable = 0;
return;
}
@@ -1766,7 +1821,7 @@ static void acpi_bus_attach(struct acpi_device *device)
device->flags.initialized = true;
}
- device->flags.visited = false;
+
ret = acpi_scan_attach_handler(device);
if (ret < 0)
return;
@@ -1780,7 +1835,6 @@ static void acpi_bus_attach(struct acpi_device *device)
if (!ret && device->pnp.type.platform_id)
acpi_default_enumeration(device);
}
- device->flags.visited = true;
ok:
list_for_each_entry(child, &device->children, node)
@@ -1872,7 +1926,7 @@ void acpi_bus_trim(struct acpi_device *adev)
*/
acpi_device_set_power(adev, ACPI_STATE_D3_COLD);
adev->flags.initialized = false;
- adev->flags.visited = false;
+ acpi_device_clear_enumerated(adev);
}
EXPORT_SYMBOL_GPL(acpi_bus_trim);
@@ -1916,9 +1970,26 @@ static int acpi_bus_scan_fixed(void)
return result < 0 ? result : 0;
}
+static void __init acpi_get_spcr_uart_addr(void)
+{
+ acpi_status status;
+ struct acpi_table_spcr *spcr_ptr;
+
+ status = acpi_get_table(ACPI_SIG_SPCR, 0,
+ (struct acpi_table_header **)&spcr_ptr);
+ if (ACPI_SUCCESS(status))
+ spcr_uart_addr = spcr_ptr->serial_port.address;
+ else
+ printk(KERN_WARNING PREFIX "STAO table present, but SPCR is missing\n");
+}
+
+static bool acpi_scan_initialized;
+
int __init acpi_scan_init(void)
{
int result;
+ acpi_status status;
+ struct acpi_table_stao *stao_ptr;
acpi_pci_root_init();
acpi_pci_link_init();
@@ -1934,6 +2005,20 @@ int __init acpi_scan_init(void)
acpi_scan_add_handler(&generic_device_handler);
+ /*
+ * If there is STAO table, check whether it needs to ignore the UART
+ * device in SPCR table.
+ */
+ status = acpi_get_table(ACPI_SIG_STAO, 0,
+ (struct acpi_table_header **)&stao_ptr);
+ if (ACPI_SUCCESS(status)) {
+ if (stao_ptr->header.length > sizeof(struct acpi_table_stao))
+ printk(KERN_INFO PREFIX "STAO Name List not yet supported.");
+
+ if (stao_ptr->ignore_uart)
+ acpi_get_spcr_uart_addr();
+ }
+
mutex_lock(&acpi_scan_lock);
/*
* Enumerate devices in the ACPI namespace.
@@ -1960,6 +2045,8 @@ int __init acpi_scan_init(void)
acpi_update_all_gpes();
+ acpi_scan_initialized = true;
+
out:
mutex_unlock(&acpi_scan_lock);
return result;
@@ -2003,3 +2090,57 @@ int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr)
return count;
}
+
+struct acpi_table_events_work {
+ struct work_struct work;
+ void *table;
+ u32 event;
+};
+
+static void acpi_table_events_fn(struct work_struct *work)
+{
+ struct acpi_table_events_work *tew;
+
+ tew = container_of(work, struct acpi_table_events_work, work);
+
+ if (tew->event == ACPI_TABLE_EVENT_LOAD) {
+ acpi_scan_lock_acquire();
+ acpi_bus_scan(ACPI_ROOT_OBJECT);
+ acpi_scan_lock_release();
+ }
+
+ kfree(tew);
+}
+
+void acpi_scan_table_handler(u32 event, void *table, void *context)
+{
+ struct acpi_table_events_work *tew;
+
+ if (!acpi_scan_initialized)
+ return;
+
+ if (event != ACPI_TABLE_EVENT_LOAD)
+ return;
+
+ tew = kmalloc(sizeof(*tew), GFP_KERNEL);
+ if (!tew)
+ return;
+
+ INIT_WORK(&tew->work, acpi_table_events_fn);
+ tew->table = table;
+ tew->event = event;
+
+ schedule_work(&tew->work);
+}
+
+int acpi_reconfig_notifier_register(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&acpi_reconfig_chain, nb);
+}
+EXPORT_SYMBOL(acpi_reconfig_notifier_register);
+
+int acpi_reconfig_notifier_unregister(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&acpi_reconfig_chain, nb);
+}
+EXPORT_SYMBOL(acpi_reconfig_notifier_unregister);
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 7a2e4d45b..2b38c1bb0 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -47,15 +47,32 @@ static void acpi_sleep_tts_switch(u32 acpi_state)
}
}
-static int tts_notify_reboot(struct notifier_block *this,
+static void acpi_sleep_pts_switch(u32 acpi_state)
+{
+ acpi_status status;
+
+ status = acpi_execute_simple_method(NULL, "\\_PTS", acpi_state);
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ /*
+ * OS can't evaluate the _PTS object correctly. Some warning
+ * message will be printed. But it won't break anything.
+ */
+ printk(KERN_NOTICE "Failure in evaluating _PTS object\n");
+ }
+}
+
+static int sleep_notify_reboot(struct notifier_block *this,
unsigned long code, void *x)
{
acpi_sleep_tts_switch(ACPI_STATE_S5);
+
+ acpi_sleep_pts_switch(ACPI_STATE_S5);
+
return NOTIFY_DONE;
}
-static struct notifier_block tts_notifier = {
- .notifier_call = tts_notify_reboot,
+static struct notifier_block sleep_notifier = {
+ .notifier_call = sleep_notify_reboot,
.next = NULL,
.priority = 0,
};
@@ -899,9 +916,9 @@ int __init acpi_sleep_init(void)
pr_info(PREFIX "(supports%s)\n", supported);
/*
- * Register the tts_notifier to reboot notifier list so that the _TTS
- * object can also be evaluated when the system enters S5.
+ * Register the sleep_notifier to reboot notifier list so that the _TTS
+ * and _PTS object can also be evaluated when the system enters S5.
*/
- register_reboot_notifier(&tts_notifier);
+ register_reboot_notifier(&sleep_notifier);
return 0;
}
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 4b3a9e27f..358165e9f 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -378,8 +378,7 @@ static void acpi_table_attr_init(struct acpi_table_attr *table_attr,
return;
}
-static acpi_status
-acpi_sysfs_table_handler(u32 event, void *table, void *context)
+acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context)
{
struct acpi_table_attr *table_attr;
@@ -452,9 +451,8 @@ static int acpi_tables_sysfs_init(void)
kobject_uevent(tables_kobj, KOBJ_ADD);
kobject_uevent(dynamic_tables_kobj, KOBJ_ADD);
- status = acpi_install_table_handler(acpi_sysfs_table_handler, NULL);
- return ACPI_FAILURE(status) ? -EINVAL : 0;
+ return 0;
err_dynamic_tables:
kobject_put(tables_kobj);
err:
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index a372f9eaa..9f0ad6ebb 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -34,6 +34,8 @@
#include <linux/bootmem.h>
#include <linux/earlycpio.h>
#include <linux/memblock.h>
+#include <linux/initrd.h>
+#include <linux/acpi.h>
#include "internal.h"
#ifdef CONFIG_ACPI_CUSTOM_DSDT
@@ -481,8 +483,10 @@ static DECLARE_BITMAP(acpi_initrd_installed, NR_ACPI_INITRD_TABLES);
#define MAP_CHUNK_SIZE (NR_FIX_BTMAPS << PAGE_SHIFT)
-static void __init acpi_table_initrd_init(void *data, size_t size)
+void __init acpi_table_upgrade(void)
{
+ void *data = (void *)initrd_start;
+ size_t size = initrd_end - initrd_start;
int sig, no, table_nr = 0, total_offset = 0;
long offset = 0;
struct acpi_table_header *table;
@@ -540,7 +544,7 @@ static void __init acpi_table_initrd_init(void *data, size_t size)
return;
acpi_tables_addr =
- memblock_find_in_range(0, max_low_pfn_mapped << PAGE_SHIFT,
+ memblock_find_in_range(0, ACPI_TABLE_UPGRADE_MAX_PHYS,
all_tables_size, PAGE_SIZE);
if (!acpi_tables_addr) {
WARN_ON(1);
@@ -578,10 +582,10 @@ static void __init acpi_table_initrd_init(void *data, size_t size)
clen = size;
if (clen > MAP_CHUNK_SIZE - slop)
clen = MAP_CHUNK_SIZE - slop;
- dest_p = early_ioremap(dest_addr & PAGE_MASK,
- clen + slop);
+ dest_p = early_memremap(dest_addr & PAGE_MASK,
+ clen + slop);
memcpy(dest_p + slop, src_p, clen);
- early_iounmap(dest_p, clen + slop);
+ early_memunmap(dest_p, clen + slop);
src_p += clen;
dest_addr += clen;
size -= clen;
@@ -696,10 +700,6 @@ next_table:
}
}
#else
-static void __init acpi_table_initrd_init(void *data, size_t size)
-{
-}
-
static acpi_status
acpi_table_initrd_override(struct acpi_table_header *existing_table,
acpi_physical_address *address,
@@ -742,11 +742,6 @@ acpi_os_table_override(struct acpi_table_header *existing_table,
return AE_OK;
}
-void __init early_acpi_table_init(void *data, size_t size)
-{
- acpi_table_initrd_init(data, size);
-}
-
/*
* acpi_table_init()
*
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 82707f982..f4ebe3953 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -1259,7 +1259,8 @@ static int __init acpi_thermal_init(void)
return -ENODEV;
}
- acpi_thermal_pm_queue = create_workqueue("acpi_thermal_pm");
+ acpi_thermal_pm_queue = alloc_workqueue("acpi_thermal_pm",
+ WQ_HIGHPRI | WQ_MEM_RECLAIM, 0);
if (!acpi_thermal_pm_queue)
return -ENODEV;
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 3d1327615..a6b36fc53 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -167,6 +167,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"),
},
},
+ {
+ .callback = video_detect_force_video,
+ .ident = "ThinkPad X201T",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201T"),
+ },
+ },
/* The native backlight controls do not work on some older machines */
{
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index e2dc4c045..2c8be74f4 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -98,12 +98,12 @@ config SATA_AHCI_PLATFORM
If unsure, say N.
-config AHCI_BRCMSTB
- tristate "Broadcom STB AHCI SATA support"
- depends on ARCH_BRCMSTB || BMIPS_GENERIC
+config AHCI_BRCM
+ tristate "Broadcom AHCI SATA support"
+ depends on ARCH_BRCMSTB || BMIPS_GENERIC || ARCH_BCM_NSP
help
This option enables support for the AHCI SATA3 controller found on
- STB SoC's.
+ Broadcom SoC's.
If unsure, say N.
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 0b2afb7e5..a46e6b784 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -11,7 +11,7 @@ obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o
obj-$(CONFIG_SATA_SIL24) += sata_sil24.o
obj-$(CONFIG_SATA_DWC) += sata_dwc_460ex.o
obj-$(CONFIG_SATA_HIGHBANK) += sata_highbank.o libahci.o
-obj-$(CONFIG_AHCI_BRCMSTB) += ahci_brcmstb.o libahci.o libahci_platform.o
+obj-$(CONFIG_AHCI_BRCM) += ahci_brcm.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_CEVA) += ahci_ceva.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_DA850) += ahci_da850.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_IMX) += ahci_imx.o libahci.o libahci_platform.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index a83bbcc58..90eabaf81 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -580,7 +580,7 @@ static struct pci_driver ahci_pci_driver = {
},
};
-#if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE)
+#if IS_ENABLED(CONFIG_PATA_MARVELL)
static int marvell_enable;
#else
static int marvell_enable = 1;
diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
new file mode 100644
index 000000000..6f8a7341f
--- /dev/null
+++ b/drivers/ata/ahci_brcm.c
@@ -0,0 +1,406 @@
+/*
+ * Broadcom SATA3 AHCI Controller Driver
+ *
+ * Copyright © 2009-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ahci_platform.h>
+#include <linux/compiler.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/libata.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+
+#include "ahci.h"
+
+#define DRV_NAME "brcm-ahci"
+
+#define SATA_TOP_CTRL_VERSION 0x0
+#define SATA_TOP_CTRL_BUS_CTRL 0x4
+ #define MMIO_ENDIAN_SHIFT 0 /* CPU->AHCI */
+ #define DMADESC_ENDIAN_SHIFT 2 /* AHCI->DDR */
+ #define DMADATA_ENDIAN_SHIFT 4 /* AHCI->DDR */
+ #define PIODATA_ENDIAN_SHIFT 6
+ #define ENDIAN_SWAP_NONE 0
+ #define ENDIAN_SWAP_FULL 2
+ #define OVERRIDE_HWINIT BIT(16)
+#define SATA_TOP_CTRL_TP_CTRL 0x8
+#define SATA_TOP_CTRL_PHY_CTRL 0xc
+ #define SATA_TOP_CTRL_PHY_CTRL_1 0x0
+ #define SATA_TOP_CTRL_1_PHY_DEFAULT_POWER_STATE BIT(14)
+ #define SATA_TOP_CTRL_PHY_CTRL_2 0x4
+ #define SATA_TOP_CTRL_2_SW_RST_MDIOREG BIT(0)
+ #define SATA_TOP_CTRL_2_SW_RST_OOB BIT(1)
+ #define SATA_TOP_CTRL_2_SW_RST_RX BIT(2)
+ #define SATA_TOP_CTRL_2_SW_RST_TX BIT(3)
+ #define SATA_TOP_CTRL_2_PHY_GLOBAL_RESET BIT(14)
+ #define SATA_TOP_CTRL_PHY_OFFS 0x8
+ #define SATA_TOP_MAX_PHYS 2
+
+#define SATA_FIRST_PORT_CTRL 0x700
+#define SATA_NEXT_PORT_CTRL_OFFSET 0x80
+#define SATA_PORT_PCTRL6(reg_base) (reg_base + 0x18)
+
+/* On big-endian MIPS, buses are reversed to big endian, so switch them back */
+#if defined(CONFIG_MIPS) && defined(__BIG_ENDIAN)
+#define DATA_ENDIAN 2 /* AHCI->DDR inbound accesses */
+#define MMIO_ENDIAN 2 /* CPU->AHCI outbound accesses */
+#else
+#define DATA_ENDIAN 0
+#define MMIO_ENDIAN 0
+#endif
+
+#define BUS_CTRL_ENDIAN_CONF \
+ ((DATA_ENDIAN << DMADATA_ENDIAN_SHIFT) | \
+ (DATA_ENDIAN << DMADESC_ENDIAN_SHIFT) | \
+ (MMIO_ENDIAN << MMIO_ENDIAN_SHIFT))
+
+enum brcm_ahci_version {
+ BRCM_SATA_BCM7425 = 1,
+ BRCM_SATA_BCM7445,
+ BRCM_SATA_NSP,
+};
+
+enum brcm_ahci_quirks {
+ BRCM_AHCI_QUIRK_NO_NCQ = BIT(0),
+ BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE = BIT(1),
+};
+
+struct brcm_ahci_priv {
+ struct device *dev;
+ void __iomem *top_ctrl;
+ u32 port_mask;
+ u32 quirks;
+ enum brcm_ahci_version version;
+};
+
+static const struct ata_port_info ahci_brcm_port_info = {
+ .flags = AHCI_FLAG_COMMON | ATA_FLAG_NO_DIPM,
+ .link_flags = ATA_LFLAG_NO_DB_DELAY,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_platform_ops,
+};
+
+static inline u32 brcm_sata_readreg(void __iomem *addr)
+{
+ /*
+ * MIPS endianness is configured by boot strap, which also reverses all
+ * bus endianness (i.e., big-endian CPU + big endian bus ==> native
+ * endian I/O).
+ *
+ * Other architectures (e.g., ARM) either do not support big endian, or
+ * else leave I/O in little endian mode.
+ */
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ return __raw_readl(addr);
+ else
+ return readl_relaxed(addr);
+}
+
+static inline void brcm_sata_writereg(u32 val, void __iomem *addr)
+{
+ /* See brcm_sata_readreg() comments */
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ __raw_writel(val, addr);
+ else
+ writel_relaxed(val, addr);
+}
+
+static void brcm_sata_alpm_init(struct ahci_host_priv *hpriv)
+{
+ struct brcm_ahci_priv *priv = hpriv->plat_data;
+ u32 bus_ctrl, port_ctrl, host_caps;
+ int i;
+
+ /* Enable support for ALPM */
+ bus_ctrl = brcm_sata_readreg(priv->top_ctrl +
+ SATA_TOP_CTRL_BUS_CTRL);
+ brcm_sata_writereg(bus_ctrl | OVERRIDE_HWINIT,
+ priv->top_ctrl + SATA_TOP_CTRL_BUS_CTRL);
+ host_caps = readl(hpriv->mmio + HOST_CAP);
+ writel(host_caps | HOST_CAP_ALPM, hpriv->mmio);
+ brcm_sata_writereg(bus_ctrl, priv->top_ctrl + SATA_TOP_CTRL_BUS_CTRL);
+
+ /*
+ * Adjust timeout to allow PLL sufficient time to lock while waking
+ * up from slumber mode.
+ */
+ for (i = 0, port_ctrl = SATA_FIRST_PORT_CTRL;
+ i < SATA_TOP_MAX_PHYS;
+ i++, port_ctrl += SATA_NEXT_PORT_CTRL_OFFSET) {
+ if (priv->port_mask & BIT(i))
+ writel(0xff1003fc,
+ hpriv->mmio + SATA_PORT_PCTRL6(port_ctrl));
+ }
+}
+
+static void brcm_sata_phy_enable(struct brcm_ahci_priv *priv, int port)
+{
+ void __iomem *phyctrl = priv->top_ctrl + SATA_TOP_CTRL_PHY_CTRL +
+ (port * SATA_TOP_CTRL_PHY_OFFS);
+ void __iomem *p;
+ u32 reg;
+
+ if (priv->quirks & BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE)
+ return;
+
+ /* clear PHY_DEFAULT_POWER_STATE */
+ p = phyctrl + SATA_TOP_CTRL_PHY_CTRL_1;
+ reg = brcm_sata_readreg(p);
+ reg &= ~SATA_TOP_CTRL_1_PHY_DEFAULT_POWER_STATE;
+ brcm_sata_writereg(reg, p);
+
+ /* reset the PHY digital logic */
+ p = phyctrl + SATA_TOP_CTRL_PHY_CTRL_2;
+ reg = brcm_sata_readreg(p);
+ reg &= ~(SATA_TOP_CTRL_2_SW_RST_MDIOREG | SATA_TOP_CTRL_2_SW_RST_OOB |
+ SATA_TOP_CTRL_2_SW_RST_RX);
+ reg |= SATA_TOP_CTRL_2_SW_RST_TX;
+ brcm_sata_writereg(reg, p);
+ reg = brcm_sata_readreg(p);
+ reg |= SATA_TOP_CTRL_2_PHY_GLOBAL_RESET;
+ brcm_sata_writereg(reg, p);
+ reg = brcm_sata_readreg(p);
+ reg &= ~SATA_TOP_CTRL_2_PHY_GLOBAL_RESET;
+ brcm_sata_writereg(reg, p);
+ (void)brcm_sata_readreg(p);
+}
+
+static void brcm_sata_phy_disable(struct brcm_ahci_priv *priv, int port)
+{
+ void __iomem *phyctrl = priv->top_ctrl + SATA_TOP_CTRL_PHY_CTRL +
+ (port * SATA_TOP_CTRL_PHY_OFFS);
+ void __iomem *p;
+ u32 reg;
+
+ if (priv->quirks & BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE)
+ return;
+
+ /* power-off the PHY digital logic */
+ p = phyctrl + SATA_TOP_CTRL_PHY_CTRL_2;
+ reg = brcm_sata_readreg(p);
+ reg |= (SATA_TOP_CTRL_2_SW_RST_MDIOREG | SATA_TOP_CTRL_2_SW_RST_OOB |
+ SATA_TOP_CTRL_2_SW_RST_RX | SATA_TOP_CTRL_2_SW_RST_TX |
+ SATA_TOP_CTRL_2_PHY_GLOBAL_RESET);
+ brcm_sata_writereg(reg, p);
+
+ /* set PHY_DEFAULT_POWER_STATE */
+ p = phyctrl + SATA_TOP_CTRL_PHY_CTRL_1;
+ reg = brcm_sata_readreg(p);
+ reg |= SATA_TOP_CTRL_1_PHY_DEFAULT_POWER_STATE;
+ brcm_sata_writereg(reg, p);
+}
+
+static void brcm_sata_phys_enable(struct brcm_ahci_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < SATA_TOP_MAX_PHYS; i++)
+ if (priv->port_mask & BIT(i))
+ brcm_sata_phy_enable(priv, i);
+}
+
+static void brcm_sata_phys_disable(struct brcm_ahci_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < SATA_TOP_MAX_PHYS; i++)
+ if (priv->port_mask & BIT(i))
+ brcm_sata_phy_disable(priv, i);
+}
+
+static u32 brcm_ahci_get_portmask(struct platform_device *pdev,
+ struct brcm_ahci_priv *priv)
+{
+ void __iomem *ahci;
+ struct resource *res;
+ u32 impl;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ahci");
+ ahci = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ahci))
+ return 0;
+
+ impl = readl(ahci + HOST_PORTS_IMPL);
+
+ if (fls(impl) > SATA_TOP_MAX_PHYS)
+ dev_warn(priv->dev, "warning: more ports than PHYs (%#x)\n",
+ impl);
+ else if (!impl)
+ dev_info(priv->dev, "no ports found\n");
+
+ devm_iounmap(&pdev->dev, ahci);
+ devm_release_mem_region(&pdev->dev, res->start, resource_size(res));
+
+ return impl;
+}
+
+static void brcm_sata_init(struct brcm_ahci_priv *priv)
+{
+ void __iomem *ctrl = priv->top_ctrl + SATA_TOP_CTRL_BUS_CTRL;
+
+ /* Configure endianness */
+ if (priv->version == BRCM_SATA_NSP) {
+ u32 data = brcm_sata_readreg(ctrl);
+
+ data &= ~((0x03 << DMADATA_ENDIAN_SHIFT) |
+ (0x03 << DMADESC_ENDIAN_SHIFT));
+ data |= (0x02 << DMADATA_ENDIAN_SHIFT) |
+ (0x02 << DMADESC_ENDIAN_SHIFT);
+ brcm_sata_writereg(data, ctrl);
+ } else
+ brcm_sata_writereg(BUS_CTRL_ENDIAN_CONF, ctrl);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int brcm_ahci_suspend(struct device *dev)
+{
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct ahci_host_priv *hpriv = host->private_data;
+ struct brcm_ahci_priv *priv = hpriv->plat_data;
+ int ret;
+
+ ret = ahci_platform_suspend(dev);
+ brcm_sata_phys_disable(priv);
+ return ret;
+}
+
+static int brcm_ahci_resume(struct device *dev)
+{
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct ahci_host_priv *hpriv = host->private_data;
+ struct brcm_ahci_priv *priv = hpriv->plat_data;
+
+ brcm_sata_init(priv);
+ brcm_sata_phys_enable(priv);
+ brcm_sata_alpm_init(hpriv);
+ return ahci_platform_resume(dev);
+}
+#endif
+
+static struct scsi_host_template ahci_platform_sht = {
+ AHCI_SHT(DRV_NAME),
+};
+
+static const struct of_device_id ahci_of_match[] = {
+ {.compatible = "brcm,bcm7425-ahci", .data = (void *)BRCM_SATA_BCM7425},
+ {.compatible = "brcm,bcm7445-ahci", .data = (void *)BRCM_SATA_BCM7445},
+ {.compatible = "brcm,bcm-nsp-ahci", .data = (void *)BRCM_SATA_NSP},
+ {},
+};
+MODULE_DEVICE_TABLE(of, ahci_of_match);
+
+static int brcm_ahci_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id;
+ struct device *dev = &pdev->dev;
+ struct brcm_ahci_priv *priv;
+ struct ahci_host_priv *hpriv;
+ struct resource *res;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ of_id = of_match_node(ahci_of_match, pdev->dev.of_node);
+ if (!of_id)
+ return -ENODEV;
+
+ priv->version = (enum brcm_ahci_version)of_id->data;
+ priv->dev = dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "top-ctrl");
+ priv->top_ctrl = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->top_ctrl))
+ return PTR_ERR(priv->top_ctrl);
+
+ if ((priv->version == BRCM_SATA_BCM7425) ||
+ (priv->version == BRCM_SATA_NSP)) {
+ priv->quirks |= BRCM_AHCI_QUIRK_NO_NCQ;
+ priv->quirks |= BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE;
+ }
+
+ brcm_sata_init(priv);
+
+ priv->port_mask = brcm_ahci_get_portmask(pdev, priv);
+ if (!priv->port_mask)
+ return -ENODEV;
+
+ brcm_sata_phys_enable(priv);
+
+ hpriv = ahci_platform_get_resources(pdev);
+ if (IS_ERR(hpriv))
+ return PTR_ERR(hpriv);
+ hpriv->plat_data = priv;
+ hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP;
+
+ brcm_sata_alpm_init(hpriv);
+
+ ret = ahci_platform_enable_resources(hpriv);
+ if (ret)
+ return ret;
+
+ if (priv->quirks & BRCM_AHCI_QUIRK_NO_NCQ)
+ hpriv->flags |= AHCI_HFLAG_NO_NCQ;
+
+ ret = ahci_platform_init_host(pdev, hpriv, &ahci_brcm_port_info,
+ &ahci_platform_sht);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "Broadcom AHCI SATA3 registered\n");
+
+ return 0;
+}
+
+static int brcm_ahci_remove(struct platform_device *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ struct ahci_host_priv *hpriv = host->private_data;
+ struct brcm_ahci_priv *priv = hpriv->plat_data;
+ int ret;
+
+ ret = ata_platform_remove_one(pdev);
+ if (ret)
+ return ret;
+
+ brcm_sata_phys_disable(priv);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ahci_brcm_pm_ops, brcm_ahci_suspend, brcm_ahci_resume);
+
+static struct platform_driver brcm_ahci_driver = {
+ .probe = brcm_ahci_probe,
+ .remove = brcm_ahci_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = ahci_of_match,
+ .pm = &ahci_brcm_pm_ops,
+ },
+};
+module_platform_driver(brcm_ahci_driver);
+
+MODULE_DESCRIPTION("Broadcom SATA3 AHCI Controller Driver");
+MODULE_AUTHOR("Brian Norris");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:sata-brcmstb");
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index ccd8cc47c..dcf2c724f 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -1975,7 +1975,7 @@ unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
*/
pp->active_link = qc->dev->link;
- if (qc->tf.protocol == ATA_PROT_NCQ)
+ if (ata_is_ncq(qc->tf.protocol))
writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
if (pp->fbs_enabled && pp->fbs_last_dev != qc->dev->link->pmp) {
@@ -2392,12 +2392,20 @@ static int ahci_port_start(struct ata_port *ap)
static void ahci_port_stop(struct ata_port *ap)
{
const char *emsg = NULL;
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *host_mmio = hpriv->mmio;
int rc;
/* de-initialize port */
rc = ahci_deinit_port(ap, &emsg);
if (rc)
ata_port_warn(ap, "%s (%d)\n", emsg, rc);
+
+ /*
+ * Clear GHC.IS to prevent stuck INTx after disabling MSI and
+ * re-enabling INTx.
+ */
+ writel(1 << ap->port_no, host_mmio + HOST_IRQ_STAT);
}
void ahci_print_info(struct ata_host *host, const char *scc_s)
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 0506c49da..9f8b07029 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -69,6 +69,7 @@
#include <asm/unaligned.h>
#include <linux/cdrom.h>
#include <linux/ratelimit.h>
+#include <linux/leds.h>
#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
@@ -1238,7 +1239,7 @@ static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
} else
tf.command = ATA_CMD_READ_NATIVE_MAX;
- tf.protocol |= ATA_PROT_NODATA;
+ tf.protocol = ATA_PROT_NODATA;
tf.device |= ATA_LBA;
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
@@ -1297,7 +1298,7 @@ static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
tf.device |= (new_sectors >> 24) & 0xf;
}
- tf.protocol |= ATA_PROT_NODATA;
+ tf.protocol = ATA_PROT_NODATA;
tf.device |= ATA_LBA;
tf.lbal = (new_sectors >> 0) & 0xff;
@@ -4852,7 +4853,7 @@ int ata_std_qc_defer(struct ata_queued_cmd *qc)
{
struct ata_link *link = qc->dev->link;
- if (qc->tf.protocol == ATA_PROT_NCQ) {
+ if (ata_is_ncq(qc->tf.protocol)) {
if (!ata_tag_valid(link->active_tag))
return 0;
} else {
@@ -5017,7 +5018,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
ata_sg_clean(qc);
/* command should be marked inactive atomically with qc completion */
- if (qc->tf.protocol == ATA_PROT_NCQ) {
+ if (ata_is_ncq(qc->tf.protocol)) {
link->sactive &= ~(1 << qc->tag);
if (!link->sactive)
ap->nr_active_links--;
@@ -5054,7 +5055,7 @@ static void ata_verify_xfer(struct ata_queued_cmd *qc)
{
struct ata_device *dev = qc->dev;
- if (ata_is_nodata(qc->tf.protocol))
+ if (!ata_is_data(qc->tf.protocol))
return;
if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
@@ -5082,6 +5083,9 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
+ /* Trigger the LED (if available) */
+ ledtrig_disk_activity();
+
/* XXX: New EH and old EH use different mechanisms to
* synchronize EH with regular execution path.
*
@@ -5137,7 +5141,9 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
switch (qc->tf.command) {
case ATA_CMD_SET_FEATURES:
if (qc->tf.feature != SETFEATURES_WC_ON &&
- qc->tf.feature != SETFEATURES_WC_OFF)
+ qc->tf.feature != SETFEATURES_WC_OFF &&
+ qc->tf.feature != SETFEATURES_RA_ON &&
+ qc->tf.feature != SETFEATURES_RA_OFF)
break;
/* fall through */
case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index c6f017458..0e1ec3707 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2607,9 +2607,13 @@ static void ata_eh_link_report(struct ata_link *link)
[DMA_FROM_DEVICE] = "in",
};
static const char *prot_str[] = {
+ [ATA_PROT_UNKNOWN] = "unknown",
+ [ATA_PROT_NODATA] = "nodata",
[ATA_PROT_PIO] = "pio",
[ATA_PROT_DMA] = "dma",
- [ATA_PROT_NCQ] = "ncq",
+ [ATA_PROT_NCQ] = "ncq dma",
+ [ATA_PROT_NCQ_NODATA] = "ncq nodata",
+ [ATAPI_PROT_NODATA] = "nodata",
[ATAPI_PROT_PIO] = "pio",
[ATAPI_PROT_DMA] = "dma",
};
@@ -3177,7 +3181,7 @@ static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
}
tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
- tf.protocol |= ATA_PROT_NODATA;
+ tf.protocol = ATA_PROT_NODATA;
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
if (park && (err_mask || tf.lbal != 0xc4)) {
ata_dev_err(dev, "head unload failed!\n");
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index bfec66fb2..e207b33e4 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -304,7 +304,7 @@ static void ata_scsi_set_invalid_field(struct ata_device *dev,
struct scsi_cmnd *cmd, u16 field, u8 bit)
{
ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x24, 0x0);
- /* "Invalid field in cbd" */
+ /* "Invalid field in CDB" */
scsi_set_sense_field_pointer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
field, bit, 1);
}
@@ -1190,7 +1190,7 @@ static int atapi_drain_needed(struct request *rq)
if (likely(rq->cmd_type != REQ_TYPE_BLOCK_PC))
return 0;
- if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_WRITE))
+ if (!blk_rq_bytes(rq) || op_is_write(req_op(rq)))
return 0;
return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC;
@@ -2075,8 +2075,8 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
0x03,
0x20, /* SBC-2 (no version claimed) */
- 0x02,
- 0x60 /* SPC-3 (no version claimed) */
+ 0x03,
+ 0x00 /* SPC-3 (no version claimed) */
};
const u8 versions_zbc[] = {
0x00,
@@ -2097,7 +2097,10 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
0,
0x5, /* claim SPC-3 version compatibility */
2,
- 95 - 4
+ 95 - 4,
+ 0,
+ 0,
+ 2
};
VPRINTK("ENTER\n");
@@ -2109,8 +2112,10 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
(args->dev->link->ap->pflags & ATA_PFLAG_EXTERNAL))
hdr[1] |= (1 << 7);
- if (args->dev->class == ATA_DEV_ZAC)
+ if (args->dev->class == ATA_DEV_ZAC) {
hdr[0] = TYPE_ZBC;
+ hdr[2] = 0x7; /* claim SPC-5 version compatibility */
+ }
memcpy(rbuf, hdr, sizeof(hdr));
memcpy(&rbuf[8], "ATA ", 8);
@@ -2314,7 +2319,7 @@ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
* with the unmap bit set.
*/
if (ata_id_has_trim(args->id)) {
- put_unaligned_be64(65535 * 512 / 8, &rbuf[36]);
+ put_unaligned_be64(65535 * ATA_MAX_TRIM_RNUM, &rbuf[36]);
put_unaligned_be32(1, &rbuf[28]);
}
@@ -2424,15 +2429,17 @@ static void modecpy(u8 *dest, const u8 *src, int n, bool changeable)
static unsigned int ata_msense_caching(u16 *id, u8 *buf, bool changeable)
{
modecpy(buf, def_cache_mpage, sizeof(def_cache_mpage), changeable);
- if (changeable || ata_id_wcache_enabled(id))
- buf[2] |= (1 << 2); /* write cache enable */
- if (!changeable && !ata_id_rahead_enabled(id))
- buf[12] |= (1 << 5); /* disable read ahead */
+ if (changeable) {
+ buf[2] |= (1 << 2); /* ata_mselect_caching() */
+ } else {
+ buf[2] |= (ata_id_wcache_enabled(id) << 2); /* write cache enable */
+ buf[12] |= (!ata_id_rahead_enabled(id) << 5); /* disable read ahead */
+ }
return sizeof(def_cache_mpage);
}
/**
- * ata_msense_ctl_mode - Simulate MODE SENSE control mode page
+ * ata_msense_control - Simulate MODE SENSE control mode page
* @dev: ATA device of interest
* @buf: output buffer
* @changeable: whether changeable parameters are requested
@@ -2442,12 +2449,17 @@ static unsigned int ata_msense_caching(u16 *id, u8 *buf, bool changeable)
* LOCKING:
* None.
*/
-static unsigned int ata_msense_ctl_mode(struct ata_device *dev, u8 *buf,
+static unsigned int ata_msense_control(struct ata_device *dev, u8 *buf,
bool changeable)
{
modecpy(buf, def_control_mpage, sizeof(def_control_mpage), changeable);
- if (changeable && (dev->flags & ATA_DFLAG_D_SENSE))
- buf[2] |= (1 << 2); /* Descriptor sense requested */
+ if (changeable) {
+ buf[2] |= (1 << 2); /* ata_mselect_control() */
+ } else {
+ bool d_sense = (dev->flags & ATA_DFLAG_D_SENSE);
+
+ buf[2] |= (d_sense << 2); /* descriptor format sense data */
+ }
return sizeof(def_control_mpage);
}
@@ -2566,13 +2578,13 @@ static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
break;
case CONTROL_MPAGE:
- p += ata_msense_ctl_mode(args->dev, p, page_control == 1);
+ p += ata_msense_control(args->dev, p, page_control == 1);
break;
case ALL_MPAGES:
p += ata_msense_rw_recovery(p, page_control == 1);
p += ata_msense_caching(args->id, p, page_control == 1);
- p += ata_msense_ctl_mode(args->dev, p, page_control == 1);
+ p += ata_msense_control(args->dev, p, page_control == 1);
break;
default: /* invalid page code */
@@ -3077,6 +3089,9 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
goto invalid_fld;
}
+ if (ata_is_ncq(tf->protocol) && (cdb[2] & 0x3) == 0)
+ tf->protocol = ATA_PROT_NCQ_NODATA;
+
/* enable LBA */
tf->flags |= ATA_TFLAG_LBA;
@@ -3125,8 +3140,8 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
tf->command = cdb[9];
}
- /* For NCQ commands with FPDMA protocol, copy the tag value */
- if (tf->protocol == ATA_PROT_NCQ)
+ /* For NCQ commands copy the tag value */
+ if (ata_is_ncq(tf->protocol))
tf->nsect = qc->tag << 3;
/* enforce correct master/slave bit */
@@ -3305,7 +3320,13 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
goto invalid_param_len;
buf = page_address(sg_page(scsi_sglist(scmd)));
- size = ata_set_lba_range_entries(buf, 512, block, n_block);
+
+ if (n_block <= 65535 * ATA_MAX_TRIM_RNUM) {
+ size = ata_set_lba_range_entries(buf, ATA_MAX_TRIM_RNUM, block, n_block);
+ } else {
+ fp = 2;
+ goto invalid_fld;
+ }
if (ata_ncq_enabled(dev) && ata_fpdma_dsm_supported(dev)) {
/* Newer devices support queued TRIM commands */
@@ -3454,7 +3475,7 @@ static unsigned int ata_scsi_zbc_in_xlat(struct ata_queued_cmd *qc)
goto invalid_param_len;
}
sect = n_block / 512;
- options = cdb[14];
+ options = cdb[14] & 0xbf;
if (ata_ncq_enabled(qc->dev) &&
ata_fpdma_zac_mgmt_in_supported(qc->dev)) {
@@ -3464,7 +3485,7 @@ static unsigned int ata_scsi_zbc_in_xlat(struct ata_queued_cmd *qc)
tf->nsect = qc->tag << 3;
tf->feature = sect & 0xff;
tf->hob_feature = (sect >> 8) & 0xff;
- tf->auxiliary = ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES;
+ tf->auxiliary = ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES | (options << 8);
} else {
tf->command = ATA_CMD_ZAC_MGMT_IN;
tf->feature = ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES;
@@ -3506,7 +3527,7 @@ static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc)
struct scsi_cmnd *scmd = qc->scsicmd;
struct ata_device *dev = qc->dev;
const u8 *cdb = scmd->cmnd;
- u8 reset_all, sa;
+ u8 all, sa;
u64 block;
u32 n_block;
u16 fp = (u16)-1;
@@ -3533,20 +3554,20 @@ static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc)
if (block > dev->n_sectors)
goto out_of_range;
- reset_all = cdb[14] & 0x1;
+ all = cdb[14] & 0x1;
if (ata_ncq_enabled(qc->dev) &&
ata_fpdma_zac_mgmt_out_supported(qc->dev)) {
- tf->protocol = ATA_PROT_NCQ;
+ tf->protocol = ATA_PROT_NCQ_NODATA;
tf->command = ATA_CMD_NCQ_NON_DATA;
- tf->hob_nsect = ATA_SUBCMD_NCQ_NON_DATA_ZAC_MGMT_OUT;
+ tf->feature = ATA_SUBCMD_NCQ_NON_DATA_ZAC_MGMT_OUT;
tf->nsect = qc->tag << 3;
- tf->auxiliary = sa | (reset_all & 0x1) << 8;
+ tf->auxiliary = sa | ((u16)all << 8);
} else {
tf->protocol = ATA_PROT_NODATA;
tf->command = ATA_CMD_ZAC_MGMT_OUT;
tf->feature = sa;
- tf->hob_feature = reset_all & 0x1;
+ tf->hob_feature = all;
}
tf->lbah = (block >> 16) & 0xff;
tf->lbam = (block >> 8) & 0xff;
@@ -3667,7 +3688,7 @@ static int ata_mselect_control(struct ata_queued_cmd *qc,
/*
* Check that read-only bits are not modified.
*/
- ata_msense_ctl_mode(dev, mpage, false);
+ ata_msense_control(dev, mpage, false);
for (i = 0; i < CONTROL_MPAGE_LEN - 2; i++) {
if (i == 0)
continue;
@@ -4039,11 +4060,6 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
args.done = cmd->scsi_done;
switch(scsicmd[0]) {
- /* TODO: worth improving? */
- case FORMAT_UNIT:
- ata_scsi_invalid_field(dev, cmd, 0);
- break;
-
case INQUIRY:
if (scsicmd[1] & 2) /* is CmdDt set? */
ata_scsi_invalid_field(dev, cmd, 1);
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index e2d949729..7ef16c085 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -495,12 +495,13 @@ struct ata_show_ering_arg {
static int ata_show_ering(struct ata_ering_entry *ent, void *void_arg)
{
struct ata_show_ering_arg* arg = void_arg;
- struct timespec time;
+ u64 seconds;
+ u32 rem;
- jiffies_to_timespec(ent->timestamp,&time);
+ seconds = div_u64_rem(ent->timestamp, HZ, &rem);
arg->written += sprintf(arg->buf + arg->written,
- "[%5lu.%06lu]",
- time.tv_sec, time.tv_nsec);
+ "[%5llu.%09lu]", seconds,
+ rem * NSEC_PER_SEC / HZ);
arg->written += get_ata_err_names(ent->err_mask,
arg->buf + arg->written);
return 0;
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index 80fe0f6fe..b4d54771c 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -565,7 +565,7 @@ chan_request_fail:
qc->ap->hsm_task_state = HSM_ST_ERR;
cf_ctrl_reset(acdev);
- spin_unlock_irqrestore(qc->ap->lock, flags);
+ spin_unlock_irqrestore(&acdev->host->lock, flags);
sff_intr:
dma_complete(acdev);
}
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 970f7767e..49d705c9f 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -183,8 +183,8 @@ static void atiixp_set_dmamode(struct ata_port *ap, struct ata_device *adev)
* We must now look at the PIO mode situation. We may need to
* adjust the PIO mode to keep the timings acceptable
*/
- if (adev->dma_mode >= XFER_MW_DMA_2)
- wanted_pio = 4;
+ if (adev->dma_mode >= XFER_MW_DMA_2)
+ wanted_pio = 4;
else if (adev->dma_mode == XFER_MW_DMA_1)
wanted_pio = 3;
else if (adev->dma_mode == XFER_MW_DMA_0)
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index e5fb7525a..a219a503c 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -368,7 +368,7 @@ static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
/* PCI clocking determines the ATA timing values to use */
/* info_hpt366 is safe against re-entry so we can scribble on it */
- switch ((reg1 & 0x700) >> 8) {
+ switch ((reg1 & 0xf00) >> 8) {
case 9:
hpriv = &hpt366_40;
break;
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
index ae9feb1ba..ff468a6fd 100644
--- a/drivers/ata/pata_marvell.c
+++ b/drivers/ata/pata_marvell.c
@@ -146,7 +146,7 @@ static int marvell_init_one (struct pci_dev *pdev, const struct pci_device_id *i
if (pdev->device == 0x6101)
ppi[1] = &ata_dummy_port_info;
-#if defined(CONFIG_SATA_AHCI) || defined(CONFIG_SATA_AHCI_MODULE)
+#if IS_ENABLED(CONFIG_SATA_AHCI)
if (!marvell_pata_active(pdev)) {
printk(KERN_INFO DRV_NAME ": PATA port not active, deferring to AHCI driver.\n");
return -ENODEV;
diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
index 633aa2934..44f97ad3c 100644
--- a/drivers/ata/pata_ninja32.c
+++ b/drivers/ata/pata_ninja32.c
@@ -144,7 +144,7 @@ static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id)
ap->ioaddr.altstatus_addr = base + 0x1E;
ap->ioaddr.bmdma_addr = base;
ata_sff_std_ports(&ap->ioaddr);
- ap->pflags = ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
+ ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
ninja32_program(base);
/* FIXME: Should we disable them at remove ? */
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 00c2af1d2..e0939bd5e 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -259,11 +259,8 @@ static int sata_dwc_dma_init_old(struct platform_device *pdev,
/* Get physical SATA DMA register base address */
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
hsdev->dma->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(hsdev->dma->regs)) {
- dev_err(&pdev->dev,
- "ioremap failed for AHBDMA register address\n");
+ if (IS_ERR(hsdev->dma->regs))
return PTR_ERR(hsdev->dma->regs);
- }
/* Initialize AHB DMAC */
return dw_dma_probe(hsdev->dma);
@@ -281,7 +278,7 @@ static void sata_dwc_dma_exit_old(struct sata_dwc_device *hsdev)
static const char *get_prot_descript(u8 protocol)
{
- switch ((enum ata_tf_protocols)protocol) {
+ switch (protocol) {
case ATA_PROT_NODATA:
return "ATA no data";
case ATA_PROT_PIO:
@@ -290,6 +287,8 @@ static const char *get_prot_descript(u8 protocol)
return "ATA DMA";
case ATA_PROT_NCQ:
return "ATA NCQ";
+ case ATA_PROT_NCQ_NODATA:
+ return "ATA NCQ no data";
case ATAPI_PROT_NODATA:
return "ATAPI no data";
case ATAPI_PROT_PIO:
@@ -1225,11 +1224,8 @@ static int sata_dwc_probe(struct platform_device *ofdev)
/* Ioremap SATA registers */
res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&ofdev->dev, res);
- if (IS_ERR(base)) {
- dev_err(&ofdev->dev,
- "ioremap failed for SATA register address\n");
+ if (IS_ERR(base))
return PTR_ERR(base);
- }
dev_dbg(&ofdev->dev, "ioremap done for SATA register address\n");
/* Synopsys DWC SATA specific Registers */
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
index 527bbd595..5fc81e240 100644
--- a/drivers/atm/horizon.c
+++ b/drivers/atm/horizon.c
@@ -2795,9 +2795,7 @@ static int hrz_probe(struct pci_dev *pci_dev,
dev->atm_dev->ci_range.vpi_bits = vpi_bits;
dev->atm_dev->ci_range.vci_bits = 10-vpi_bits;
- init_timer(&dev->housekeeping);
- dev->housekeeping.function = do_housekeeping;
- dev->housekeeping.data = (unsigned long) dev;
+ setup_timer(&dev->housekeeping, do_housekeeping, (unsigned long) dev);
mod_timer(&dev->housekeeping, jiffies);
out:
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index ddc4ceb85..700ed15c2 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -874,7 +874,8 @@ static scq_info *get_scq(ns_dev *card, int size, u32 scd)
scq->skb = kmalloc(sizeof(struct sk_buff *) *
(size / NS_SCQE_SIZE), GFP_KERNEL);
if (!scq->skb) {
- kfree(scq->org);
+ dma_free_coherent(&card->pcidev->dev,
+ 2 * size, scq->org, scq->dma);
kfree(scq);
return NULL;
}
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 11f768c70..acf228e9a 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -46,7 +46,8 @@ MODULE_LICENSE("GPL");
extern struct builtin_fw __start_builtin_fw[];
extern struct builtin_fw __end_builtin_fw[];
-static bool fw_get_builtin_firmware(struct firmware *fw, const char *name)
+static bool fw_get_builtin_firmware(struct firmware *fw, const char *name,
+ void *buf, size_t size)
{
struct builtin_fw *b_fw;
@@ -54,6 +55,9 @@ static bool fw_get_builtin_firmware(struct firmware *fw, const char *name)
if (strcmp(name, b_fw->name) == 0) {
fw->size = b_fw->size;
fw->data = b_fw->data;
+
+ if (buf && fw->size <= size)
+ memcpy(buf, fw->data, fw->size);
return true;
}
}
@@ -74,7 +78,9 @@ static bool fw_is_builtin_firmware(const struct firmware *fw)
#else /* Module case - no builtin firmware support */
-static inline bool fw_get_builtin_firmware(struct firmware *fw, const char *name)
+static inline bool fw_get_builtin_firmware(struct firmware *fw,
+ const char *name, void *buf,
+ size_t size)
{
return false;
}
@@ -112,6 +118,7 @@ static inline long firmware_loading_timeout(void)
#define FW_OPT_FALLBACK 0
#endif
#define FW_OPT_NO_WARN (1U << 3)
+#define FW_OPT_NOCACHE (1U << 4)
struct firmware_cache {
/* firmware_buf instance will be added into the below list */
@@ -143,6 +150,7 @@ struct firmware_buf {
unsigned long status;
void *data;
size_t size;
+ size_t allocated_size;
#ifdef CONFIG_FW_LOADER_USER_HELPER
bool is_paged_buf;
bool need_uevent;
@@ -178,7 +186,8 @@ static DEFINE_MUTEX(fw_lock);
static struct firmware_cache fw_cache;
static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
- struct firmware_cache *fwc)
+ struct firmware_cache *fwc,
+ void *dbuf, size_t size)
{
struct firmware_buf *buf;
@@ -194,6 +203,8 @@ static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
kref_init(&buf->ref);
buf->fwc = fwc;
+ buf->data = dbuf;
+ buf->allocated_size = size;
init_completion(&buf->completion);
#ifdef CONFIG_FW_LOADER_USER_HELPER
INIT_LIST_HEAD(&buf->pending_list);
@@ -217,7 +228,8 @@ static struct firmware_buf *__fw_lookup_buf(const char *fw_name)
static int fw_lookup_and_allocate_buf(const char *fw_name,
struct firmware_cache *fwc,
- struct firmware_buf **buf)
+ struct firmware_buf **buf, void *dbuf,
+ size_t size)
{
struct firmware_buf *tmp;
@@ -229,7 +241,7 @@ static int fw_lookup_and_allocate_buf(const char *fw_name,
*buf = tmp;
return 1;
}
- tmp = __allocate_fw_buf(fw_name, fwc);
+ tmp = __allocate_fw_buf(fw_name, fwc, dbuf, size);
if (tmp)
list_add(&tmp->list, &fwc->head);
spin_unlock(&fwc->lock);
@@ -261,6 +273,7 @@ static void __fw_free_buf(struct kref *ref)
vfree(buf->pages);
} else
#endif
+ if (!buf->allocated_size)
vfree(buf->data);
kfree_const(buf->fw_id);
kfree(buf);
@@ -301,13 +314,21 @@ static void fw_finish_direct_load(struct device *device,
mutex_unlock(&fw_lock);
}
-static int fw_get_filesystem_firmware(struct device *device,
- struct firmware_buf *buf)
+static int
+fw_get_filesystem_firmware(struct device *device, struct firmware_buf *buf)
{
loff_t size;
int i, len;
int rc = -ENOENT;
char *path;
+ enum kernel_read_file_id id = READING_FIRMWARE;
+ size_t msize = INT_MAX;
+
+ /* Already populated data member means we're loading into a buffer */
+ if (buf->data) {
+ id = READING_FIRMWARE_PREALLOC_BUFFER;
+ msize = buf->allocated_size;
+ }
path = __getname();
if (!path)
@@ -326,8 +347,8 @@ static int fw_get_filesystem_firmware(struct device *device,
}
buf->size = 0;
- rc = kernel_read_file_from_path(path, &buf->data, &size,
- INT_MAX, READING_FIRMWARE);
+ rc = kernel_read_file_from_path(path, &buf->data, &size, msize,
+ id);
if (rc) {
if (rc == -ENOENT)
dev_dbg(device, "loading %s failed with error %d\n",
@@ -691,6 +712,38 @@ out:
static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
+static void firmware_rw_buf(struct firmware_buf *buf, char *buffer,
+ loff_t offset, size_t count, bool read)
+{
+ if (read)
+ memcpy(buffer, buf->data + offset, count);
+ else
+ memcpy(buf->data + offset, buffer, count);
+}
+
+static void firmware_rw(struct firmware_buf *buf, char *buffer,
+ loff_t offset, size_t count, bool read)
+{
+ while (count) {
+ void *page_data;
+ int page_nr = offset >> PAGE_SHIFT;
+ int page_ofs = offset & (PAGE_SIZE-1);
+ int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
+
+ page_data = kmap(buf->pages[page_nr]);
+
+ if (read)
+ memcpy(buffer, page_data + page_ofs, page_cnt);
+ else
+ memcpy(page_data + page_ofs, buffer, page_cnt);
+
+ kunmap(buf->pages[page_nr]);
+ buffer += page_cnt;
+ offset += page_cnt;
+ count -= page_cnt;
+ }
+}
+
static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buffer, loff_t offset, size_t count)
@@ -715,21 +768,11 @@ static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
ret_count = count;
- while (count) {
- void *page_data;
- int page_nr = offset >> PAGE_SHIFT;
- int page_ofs = offset & (PAGE_SIZE-1);
- int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
-
- page_data = kmap(buf->pages[page_nr]);
-
- memcpy(buffer, page_data + page_ofs, page_cnt);
+ if (buf->data)
+ firmware_rw_buf(buf, buffer, offset, count, true);
+ else
+ firmware_rw(buf, buffer, offset, count, true);
- kunmap(buf->pages[page_nr]);
- buffer += page_cnt;
- offset += page_cnt;
- count -= page_cnt;
- }
out:
mutex_unlock(&fw_lock);
return ret_count;
@@ -804,29 +847,23 @@ static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
goto out;
}
- retval = fw_realloc_buffer(fw_priv, offset + count);
- if (retval)
- goto out;
-
- retval = count;
-
- while (count) {
- void *page_data;
- int page_nr = offset >> PAGE_SHIFT;
- int page_ofs = offset & (PAGE_SIZE - 1);
- int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
-
- page_data = kmap(buf->pages[page_nr]);
-
- memcpy(page_data + page_ofs, buffer, page_cnt);
+ if (buf->data) {
+ if (offset + count > buf->allocated_size) {
+ retval = -ENOMEM;
+ goto out;
+ }
+ firmware_rw_buf(buf, buffer, offset, count, false);
+ retval = count;
+ } else {
+ retval = fw_realloc_buffer(fw_priv, offset + count);
+ if (retval)
+ goto out;
- kunmap(buf->pages[page_nr]);
- buffer += page_cnt;
- offset += page_cnt;
- count -= page_cnt;
+ retval = count;
+ firmware_rw(buf, buffer, offset, count, false);
}
- buf->size = max_t(size_t, offset, buf->size);
+ buf->size = max_t(size_t, offset + count, buf->size);
out:
mutex_unlock(&fw_lock);
return retval;
@@ -894,7 +931,8 @@ static int _request_firmware_load(struct firmware_priv *fw_priv,
struct firmware_buf *buf = fw_priv->buf;
/* fall back on userspace loading */
- buf->is_paged_buf = true;
+ if (!buf->data)
+ buf->is_paged_buf = true;
dev_set_uevent_suppress(f_dev, true);
@@ -929,7 +967,7 @@ static int _request_firmware_load(struct firmware_priv *fw_priv,
if (is_fw_load_aborted(buf))
retval = -EAGAIN;
- else if (!buf->data)
+ else if (buf->is_paged_buf && !buf->data)
retval = -ENOMEM;
device_del(f_dev);
@@ -1012,7 +1050,7 @@ static int sync_cached_firmware_buf(struct firmware_buf *buf)
*/
static int
_request_firmware_prepare(struct firmware **firmware_p, const char *name,
- struct device *device)
+ struct device *device, void *dbuf, size_t size)
{
struct firmware *firmware;
struct firmware_buf *buf;
@@ -1025,12 +1063,12 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name,
return -ENOMEM;
}
- if (fw_get_builtin_firmware(firmware, name)) {
+ if (fw_get_builtin_firmware(firmware, name, dbuf, size)) {
dev_dbg(device, "using built-in %s\n", name);
return 0; /* assigned */
}
- ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf);
+ ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf, dbuf, size);
/*
* bind with 'buf' now to avoid warning in failure path
@@ -1070,14 +1108,16 @@ static int assign_firmware_buf(struct firmware *fw, struct device *device,
* should be fixed in devres or driver core.
*/
/* don't cache firmware handled without uevent */
- if (device && (opt_flags & FW_OPT_UEVENT))
+ if (device && (opt_flags & FW_OPT_UEVENT) &&
+ !(opt_flags & FW_OPT_NOCACHE))
fw_add_devm_name(device, buf->fw_id);
/*
* After caching firmware image is started, let it piggyback
* on request firmware.
*/
- if (buf->fwc->state == FW_LOADER_START_CACHE) {
+ if (!(opt_flags & FW_OPT_NOCACHE) &&
+ buf->fwc->state == FW_LOADER_START_CACHE) {
if (fw_cache_piggyback_on_request(buf->fw_id))
kref_get(&buf->ref);
}
@@ -1091,7 +1131,8 @@ static int assign_firmware_buf(struct firmware *fw, struct device *device,
/* called from request_firmware() and request_firmware_work_func() */
static int
_request_firmware(const struct firmware **firmware_p, const char *name,
- struct device *device, unsigned int opt_flags)
+ struct device *device, void *buf, size_t size,
+ unsigned int opt_flags)
{
struct firmware *fw = NULL;
long timeout;
@@ -1105,7 +1146,7 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
goto out;
}
- ret = _request_firmware_prepare(&fw, name, device);
+ ret = _request_firmware_prepare(&fw, name, device, buf, size);
if (ret <= 0) /* error or already assigned */
goto out;
@@ -1184,7 +1225,7 @@ request_firmware(const struct firmware **firmware_p, const char *name,
/* Need to pin this module until return */
__module_get(THIS_MODULE);
- ret = _request_firmware(firmware_p, name, device,
+ ret = _request_firmware(firmware_p, name, device, NULL, 0,
FW_OPT_UEVENT | FW_OPT_FALLBACK);
module_put(THIS_MODULE);
return ret;
@@ -1208,7 +1249,7 @@ int request_firmware_direct(const struct firmware **firmware_p,
int ret;
__module_get(THIS_MODULE);
- ret = _request_firmware(firmware_p, name, device,
+ ret = _request_firmware(firmware_p, name, device, NULL, 0,
FW_OPT_UEVENT | FW_OPT_NO_WARN);
module_put(THIS_MODULE);
return ret;
@@ -1216,6 +1257,36 @@ int request_firmware_direct(const struct firmware **firmware_p,
EXPORT_SYMBOL_GPL(request_firmware_direct);
/**
+ * request_firmware_into_buf - load firmware into a previously allocated buffer
+ * @firmware_p: pointer to firmware image
+ * @name: name of firmware file
+ * @device: device for which firmware is being loaded and DMA region allocated
+ * @buf: address of buffer to load firmware into
+ * @size: size of buffer
+ *
+ * This function works pretty much like request_firmware(), but it doesn't
+ * allocate a buffer to hold the firmware data. Instead, the firmware
+ * is loaded directly into the buffer pointed to by @buf and the @firmware_p
+ * data member is pointed at @buf.
+ *
+ * This function doesn't cache firmware either.
+ */
+int
+request_firmware_into_buf(const struct firmware **firmware_p, const char *name,
+ struct device *device, void *buf, size_t size)
+{
+ int ret;
+
+ __module_get(THIS_MODULE);
+ ret = _request_firmware(firmware_p, name, device, buf, size,
+ FW_OPT_UEVENT | FW_OPT_FALLBACK |
+ FW_OPT_NOCACHE);
+ module_put(THIS_MODULE);
+ return ret;
+}
+EXPORT_SYMBOL(request_firmware_into_buf);
+
+/**
* release_firmware: - release the resource associated with a firmware image
* @fw: firmware resource to release
**/
@@ -1247,7 +1318,7 @@ static void request_firmware_work_func(struct work_struct *work)
fw_work = container_of(work, struct firmware_work, work);
- _request_firmware(&fw, fw_work->name, fw_work->device,
+ _request_firmware(&fw, fw_work->name, fw_work->device, NULL, 0,
fw_work->opt_flags);
fw_work->cont(fw, fw_work->context);
put_device(fw_work->device); /* taken in request_firmware_nowait() */
@@ -1380,7 +1451,7 @@ static int uncache_firmware(const char *fw_name)
pr_debug("%s: %s\n", __func__, fw_name);
- if (fw_get_builtin_firmware(&fw, fw_name))
+ if (fw_get_builtin_firmware(&fw, fw_name, NULL, 0))
return 0;
buf = fw_lookup_buf(fw_name);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index f46dba8b7..dc75de905 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -391,6 +391,7 @@ static ssize_t show_valid_zones(struct device *dev,
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
struct page *first_page;
struct zone *zone;
+ int zone_shift = 0;
start_pfn = section_nr_to_pfn(mem->start_section_nr);
end_pfn = start_pfn + nr_pages;
@@ -402,21 +403,26 @@ static ssize_t show_valid_zones(struct device *dev,
zone = page_zone(first_page);
- if (zone_idx(zone) == ZONE_MOVABLE - 1) {
- /*The mem block is the last memoryblock of this zone.*/
- if (end_pfn == zone_end_pfn(zone))
- return sprintf(buf, "%s %s\n",
- zone->name, (zone + 1)->name);
+ /* MMOP_ONLINE_KEEP */
+ sprintf(buf, "%s", zone->name);
+
+ /* MMOP_ONLINE_KERNEL */
+ zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL);
+ if (zone_shift) {
+ strcat(buf, " ");
+ strcat(buf, (zone + zone_shift)->name);
}
- if (zone_idx(zone) == ZONE_MOVABLE) {
- /*The mem block is the first memoryblock of ZONE_MOVABLE.*/
- if (start_pfn == zone->zone_start_pfn)
- return sprintf(buf, "%s %s\n",
- zone->name, (zone - 1)->name);
+ /* MMOP_ONLINE_MOVABLE */
+ zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE);
+ if (zone_shift) {
+ strcat(buf, " ");
+ strcat(buf, (zone + zone_shift)->name);
}
- return sprintf(buf, "%s\n", zone->name);
+ strcat(buf, "\n");
+
+ return strlen(buf);
}
static DEVICE_ATTR(valid_zones, 0444, show_valid_zones, NULL);
#endif
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 560751bad..5548f9686 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -56,6 +56,7 @@ static ssize_t node_read_meminfo(struct device *dev,
{
int n;
int nid = dev->id;
+ struct pglist_data *pgdat = NODE_DATA(nid);
struct sysinfo i;
si_meminfo_node(&i, nid);
@@ -74,16 +75,16 @@ static ssize_t node_read_meminfo(struct device *dev,
nid, K(i.totalram),
nid, K(i.freeram),
nid, K(i.totalram - i.freeram),
- nid, K(node_page_state(nid, NR_ACTIVE_ANON) +
- node_page_state(nid, NR_ACTIVE_FILE)),
- nid, K(node_page_state(nid, NR_INACTIVE_ANON) +
- node_page_state(nid, NR_INACTIVE_FILE)),
- nid, K(node_page_state(nid, NR_ACTIVE_ANON)),
- nid, K(node_page_state(nid, NR_INACTIVE_ANON)),
- nid, K(node_page_state(nid, NR_ACTIVE_FILE)),
- nid, K(node_page_state(nid, NR_INACTIVE_FILE)),
- nid, K(node_page_state(nid, NR_UNEVICTABLE)),
- nid, K(node_page_state(nid, NR_MLOCK)));
+ nid, K(node_page_state(pgdat, NR_ACTIVE_ANON) +
+ node_page_state(pgdat, NR_ACTIVE_FILE)),
+ nid, K(node_page_state(pgdat, NR_INACTIVE_ANON) +
+ node_page_state(pgdat, NR_INACTIVE_FILE)),
+ nid, K(node_page_state(pgdat, NR_ACTIVE_ANON)),
+ nid, K(node_page_state(pgdat, NR_INACTIVE_ANON)),
+ nid, K(node_page_state(pgdat, NR_ACTIVE_FILE)),
+ nid, K(node_page_state(pgdat, NR_INACTIVE_FILE)),
+ nid, K(node_page_state(pgdat, NR_UNEVICTABLE)),
+ nid, K(sum_zone_node_page_state(nid, NR_MLOCK)));
#ifdef CONFIG_HIGHMEM
n += sprintf(buf + n,
@@ -113,30 +114,34 @@ static ssize_t node_read_meminfo(struct device *dev,
"Node %d SUnreclaim: %8lu kB\n"
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
"Node %d AnonHugePages: %8lu kB\n"
+ "Node %d ShmemHugePages: %8lu kB\n"
+ "Node %d ShmemPmdMapped: %8lu kB\n"
#endif
,
- nid, K(node_page_state(nid, NR_FILE_DIRTY)),
- nid, K(node_page_state(nid, NR_WRITEBACK)),
- nid, K(node_page_state(nid, NR_FILE_PAGES)),
- nid, K(node_page_state(nid, NR_FILE_MAPPED)),
- nid, K(node_page_state(nid, NR_ANON_PAGES)),
+ nid, K(node_page_state(pgdat, NR_FILE_DIRTY)),
+ nid, K(node_page_state(pgdat, NR_WRITEBACK)),
+ nid, K(node_page_state(pgdat, NR_FILE_PAGES)),
+ nid, K(node_page_state(pgdat, NR_FILE_MAPPED)),
+ nid, K(node_page_state(pgdat, NR_ANON_MAPPED)),
nid, K(i.sharedram),
- nid, node_page_state(nid, NR_KERNEL_STACK) *
- THREAD_SIZE / 1024,
- nid, K(node_page_state(nid, NR_PAGETABLE)),
- nid, K(node_page_state(nid, NR_UNSTABLE_NFS)),
- nid, K(node_page_state(nid, NR_BOUNCE)),
- nid, K(node_page_state(nid, NR_WRITEBACK_TEMP)),
- nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE) +
- node_page_state(nid, NR_SLAB_UNRECLAIMABLE)),
- nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE)),
+ nid, sum_zone_node_page_state(nid, NR_KERNEL_STACK_KB),
+ nid, K(sum_zone_node_page_state(nid, NR_PAGETABLE)),
+ nid, K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
+ nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)),
+ nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
+ nid, K(sum_zone_node_page_state(nid, NR_SLAB_RECLAIMABLE) +
+ sum_zone_node_page_state(nid, NR_SLAB_UNRECLAIMABLE)),
+ nid, K(sum_zone_node_page_state(nid, NR_SLAB_RECLAIMABLE)),
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE))
- , nid,
- K(node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) *
- HPAGE_PMD_NR));
+ nid, K(sum_zone_node_page_state(nid, NR_SLAB_UNRECLAIMABLE)),
+ nid, K(node_page_state(pgdat, NR_ANON_THPS) *
+ HPAGE_PMD_NR),
+ nid, K(node_page_state(pgdat, NR_SHMEM_THPS) *
+ HPAGE_PMD_NR),
+ nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) *
+ HPAGE_PMD_NR));
#else
- nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE)));
+ nid, K(sum_zone_node_page_state(nid, NR_SLAB_UNRECLAIMABLE)));
#endif
n += hugetlb_report_node_meminfo(nid, buf + n);
return n;
@@ -155,12 +160,12 @@ static ssize_t node_read_numastat(struct device *dev,
"interleave_hit %lu\n"
"local_node %lu\n"
"other_node %lu\n",
- node_page_state(dev->id, NUMA_HIT),
- node_page_state(dev->id, NUMA_MISS),
- node_page_state(dev->id, NUMA_FOREIGN),
- node_page_state(dev->id, NUMA_INTERLEAVE_HIT),
- node_page_state(dev->id, NUMA_LOCAL),
- node_page_state(dev->id, NUMA_OTHER));
+ sum_zone_node_page_state(dev->id, NUMA_HIT),
+ sum_zone_node_page_state(dev->id, NUMA_MISS),
+ sum_zone_node_page_state(dev->id, NUMA_FOREIGN),
+ sum_zone_node_page_state(dev->id, NUMA_INTERLEAVE_HIT),
+ sum_zone_node_page_state(dev->id, NUMA_LOCAL),
+ sum_zone_node_page_state(dev->id, NUMA_OTHER));
}
static DEVICE_ATTR(numastat, S_IRUGO, node_read_numastat, NULL);
@@ -168,12 +173,18 @@ static ssize_t node_read_vmstat(struct device *dev,
struct device_attribute *attr, char *buf)
{
int nid = dev->id;
+ struct pglist_data *pgdat = NODE_DATA(nid);
int i;
int n = 0;
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
n += sprintf(buf+n, "%s %lu\n", vmstat_text[i],
- node_page_state(nid, i));
+ sum_zone_node_page_state(nid, i));
+
+ for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
+ n += sprintf(buf+n, "%s %lu\n",
+ vmstat_text[i + NR_VM_ZONE_STAT_ITEMS],
+ node_page_state(pgdat, i));
return n;
}
@@ -359,7 +370,7 @@ int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
#define page_initialized(page) (page->lru.next)
-static int __init_refok get_nid_for_pfn(unsigned long pfn)
+static int __ref get_nid_for_pfn(unsigned long pfn)
{
struct page *page;
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 3657ac1cb..8e2e4757a 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -121,6 +121,7 @@ int pm_clk_add(struct device *dev, const char *con_id)
{
return __pm_clk_add(dev, con_id, NULL);
}
+EXPORT_SYMBOL_GPL(pm_clk_add);
/**
* pm_clk_add_clk - Start using a device clock for power management.
@@ -136,9 +137,42 @@ int pm_clk_add_clk(struct device *dev, struct clk *clk)
{
return __pm_clk_add(dev, NULL, clk);
}
+EXPORT_SYMBOL_GPL(pm_clk_add_clk);
/**
+ * of_pm_clk_add_clk - Start using a device clock for power management.
+ * @dev: Device whose clock is going to be used for power management.
+ * @name: Name of clock that is going to be used for power management.
+ *
+ * Add the clock described in the 'clocks' device-tree node that matches
+ * with the 'name' provided, to the list of clocks used for the power
+ * management of @dev. On success, returns 0. Returns a negative error
+ * code if the clock is not found or cannot be added.
+ */
+int of_pm_clk_add_clk(struct device *dev, const char *name)
+{
+ struct clk *clk;
+ int ret;
+
+ if (!dev || !dev->of_node || !name)
+ return -EINVAL;
+
+ clk = of_clk_get_by_name(dev->of_node, name);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = pm_clk_add_clk(dev, clk);
+ if (ret) {
+ clk_put(clk);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_pm_clk_add_clk);
+
+/**
* of_pm_clk_add_clks - Start using device clock(s) for power management.
* @dev: Device whose clock(s) is going to be used for power management.
*
@@ -192,6 +226,7 @@ error:
return ret;
}
+EXPORT_SYMBOL_GPL(of_pm_clk_add_clks);
/**
* __pm_clk_remove - Destroy PM clock entry.
@@ -252,6 +287,7 @@ void pm_clk_remove(struct device *dev, const char *con_id)
__pm_clk_remove(ce);
}
+EXPORT_SYMBOL_GPL(pm_clk_remove);
/**
* pm_clk_remove_clk - Stop using a device clock for power management.
@@ -285,6 +321,7 @@ void pm_clk_remove_clk(struct device *dev, struct clk *clk)
__pm_clk_remove(ce);
}
+EXPORT_SYMBOL_GPL(pm_clk_remove_clk);
/**
* pm_clk_init - Initialize a device's list of power management clocks.
@@ -299,6 +336,7 @@ void pm_clk_init(struct device *dev)
if (psd)
INIT_LIST_HEAD(&psd->clock_list);
}
+EXPORT_SYMBOL_GPL(pm_clk_init);
/**
* pm_clk_create - Create and initialize a device's list of PM clocks.
@@ -311,6 +349,7 @@ int pm_clk_create(struct device *dev)
{
return dev_pm_get_subsys_data(dev);
}
+EXPORT_SYMBOL_GPL(pm_clk_create);
/**
* pm_clk_destroy - Destroy a device's list of power management clocks.
@@ -345,6 +384,7 @@ void pm_clk_destroy(struct device *dev)
__pm_clk_remove(ce);
}
}
+EXPORT_SYMBOL_GPL(pm_clk_destroy);
/**
* pm_clk_suspend - Disable clocks in a device's PM clock list.
@@ -375,6 +415,7 @@ int pm_clk_suspend(struct device *dev)
return 0;
}
+EXPORT_SYMBOL_GPL(pm_clk_suspend);
/**
* pm_clk_resume - Enable clocks in a device's PM clock list.
@@ -400,6 +441,7 @@ int pm_clk_resume(struct device *dev)
return 0;
}
+EXPORT_SYMBOL_GPL(pm_clk_resume);
/**
* pm_clk_notify - Notify routine for device addition and removal.
@@ -480,6 +522,7 @@ int pm_clk_runtime_suspend(struct device *dev)
return 0;
}
+EXPORT_SYMBOL_GPL(pm_clk_runtime_suspend);
int pm_clk_runtime_resume(struct device *dev)
{
@@ -495,6 +538,7 @@ int pm_clk_runtime_resume(struct device *dev)
return pm_generic_runtime_resume(dev);
}
+EXPORT_SYMBOL_GPL(pm_clk_runtime_resume);
#else /* !CONFIG_PM_CLK */
@@ -598,3 +642,4 @@ void pm_clk_add_notifier(struct bus_type *bus,
clknb->nb.notifier_call = pm_clk_notify;
bus_register_notifier(bus, &clknb->nb);
}
+EXPORT_SYMBOL_GPL(pm_clk_add_notifier);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index de23b648f..a1f2aff33 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -187,8 +187,7 @@ static int genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth)
struct gpd_link *link;
int ret = 0;
- if (genpd->status == GPD_STATE_ACTIVE
- || (genpd->prepared_count > 0 && genpd->suspend_power_off))
+ if (genpd->status == GPD_STATE_ACTIVE)
return 0;
/*
@@ -735,82 +734,24 @@ static int pm_genpd_prepare(struct device *dev)
mutex_lock(&genpd->lock);
- if (genpd->prepared_count++ == 0) {
+ if (genpd->prepared_count++ == 0)
genpd->suspended_count = 0;
- genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
- }
mutex_unlock(&genpd->lock);
- if (genpd->suspend_power_off)
- return 0;
-
- /*
- * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
- * so genpd_poweron() will return immediately, but if the device
- * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
- * to make it operational.
- */
- pm_runtime_resume(dev);
- __pm_runtime_disable(dev, false);
-
ret = pm_generic_prepare(dev);
if (ret) {
mutex_lock(&genpd->lock);
- if (--genpd->prepared_count == 0)
- genpd->suspend_power_off = false;
+ genpd->prepared_count--;
mutex_unlock(&genpd->lock);
- pm_runtime_enable(dev);
}
return ret;
}
/**
- * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
- * @dev: Device to suspend.
- *
- * Suspend a device under the assumption that its pm_domain field points to the
- * domain member of an object of type struct generic_pm_domain representing
- * a PM domain consisting of I/O devices.
- */
-static int pm_genpd_suspend(struct device *dev)
-{
- struct generic_pm_domain *genpd;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
-}
-
-/**
- * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain.
- * @dev: Device to suspend.
- *
- * Carry out a late suspend of a device under the assumption that its
- * pm_domain field points to the domain member of an object of type
- * struct generic_pm_domain representing a PM domain consisting of I/O devices.
- */
-static int pm_genpd_suspend_late(struct device *dev)
-{
- struct generic_pm_domain *genpd;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- return genpd->suspend_power_off ? 0 : pm_generic_suspend_late(dev);
-}
-
-/**
* pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
* @dev: Device to suspend.
*
@@ -820,6 +761,7 @@ static int pm_genpd_suspend_late(struct device *dev)
static int pm_genpd_suspend_noirq(struct device *dev)
{
struct generic_pm_domain *genpd;
+ int ret;
dev_dbg(dev, "%s()\n", __func__);
@@ -827,11 +769,14 @@ static int pm_genpd_suspend_noirq(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- if (genpd->suspend_power_off
- || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
+ if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
return 0;
- genpd_stop_dev(genpd, dev);
+ if (genpd->dev_ops.stop && genpd->dev_ops.start) {
+ ret = pm_runtime_force_suspend(dev);
+ if (ret)
+ return ret;
+ }
/*
* Since all of the "noirq" callbacks are executed sequentially, it is
@@ -853,6 +798,7 @@ static int pm_genpd_suspend_noirq(struct device *dev)
static int pm_genpd_resume_noirq(struct device *dev)
{
struct generic_pm_domain *genpd;
+ int ret = 0;
dev_dbg(dev, "%s()\n", __func__);
@@ -860,8 +806,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- if (genpd->suspend_power_off
- || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
+ if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
return 0;
/*
@@ -872,93 +817,10 @@ static int pm_genpd_resume_noirq(struct device *dev)
pm_genpd_sync_poweron(genpd, true);
genpd->suspended_count--;
- return genpd_start_dev(genpd, dev);
-}
-
-/**
- * pm_genpd_resume_early - Early resume of a device in an I/O PM domain.
- * @dev: Device to resume.
- *
- * Carry out an early resume of a device under the assumption that its
- * pm_domain field points to the domain member of an object of type
- * struct generic_pm_domain representing a power domain consisting of I/O
- * devices.
- */
-static int pm_genpd_resume_early(struct device *dev)
-{
- struct generic_pm_domain *genpd;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- return genpd->suspend_power_off ? 0 : pm_generic_resume_early(dev);
-}
-
-/**
- * pm_genpd_resume - Resume of device in an I/O PM domain.
- * @dev: Device to resume.
- *
- * Resume a device under the assumption that its pm_domain field points to the
- * domain member of an object of type struct generic_pm_domain representing
- * a power domain consisting of I/O devices.
- */
-static int pm_genpd_resume(struct device *dev)
-{
- struct generic_pm_domain *genpd;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
-}
-
-/**
- * pm_genpd_freeze - Freezing a device in an I/O PM domain.
- * @dev: Device to freeze.
- *
- * Freeze a device under the assumption that its pm_domain field points to the
- * domain member of an object of type struct generic_pm_domain representing
- * a power domain consisting of I/O devices.
- */
-static int pm_genpd_freeze(struct device *dev)
-{
- struct generic_pm_domain *genpd;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
-}
+ if (genpd->dev_ops.stop && genpd->dev_ops.start)
+ ret = pm_runtime_force_resume(dev);
-/**
- * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain.
- * @dev: Device to freeze.
- *
- * Carry out a late freeze of a device under the assumption that its
- * pm_domain field points to the domain member of an object of type
- * struct generic_pm_domain representing a power domain consisting of I/O
- * devices.
- */
-static int pm_genpd_freeze_late(struct device *dev)
-{
- struct generic_pm_domain *genpd;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- return genpd->suspend_power_off ? 0 : pm_generic_freeze_late(dev);
+ return ret;
}
/**
@@ -973,6 +835,7 @@ static int pm_genpd_freeze_late(struct device *dev)
static int pm_genpd_freeze_noirq(struct device *dev)
{
struct generic_pm_domain *genpd;
+ int ret = 0;
dev_dbg(dev, "%s()\n", __func__);
@@ -980,7 +843,10 @@ static int pm_genpd_freeze_noirq(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev);
+ if (genpd->dev_ops.stop && genpd->dev_ops.start)
+ ret = pm_runtime_force_suspend(dev);
+
+ return ret;
}
/**
@@ -993,6 +859,7 @@ static int pm_genpd_freeze_noirq(struct device *dev)
static int pm_genpd_thaw_noirq(struct device *dev)
{
struct generic_pm_domain *genpd;
+ int ret = 0;
dev_dbg(dev, "%s()\n", __func__);
@@ -1000,51 +867,10 @@ static int pm_genpd_thaw_noirq(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- return genpd->suspend_power_off ?
- 0 : genpd_start_dev(genpd, dev);
-}
-
-/**
- * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain.
- * @dev: Device to thaw.
- *
- * Carry out an early thaw of a device under the assumption that its
- * pm_domain field points to the domain member of an object of type
- * struct generic_pm_domain representing a power domain consisting of I/O
- * devices.
- */
-static int pm_genpd_thaw_early(struct device *dev)
-{
- struct generic_pm_domain *genpd;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- return genpd->suspend_power_off ? 0 : pm_generic_thaw_early(dev);
-}
-
-/**
- * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
- * @dev: Device to thaw.
- *
- * Thaw a device under the assumption that its pm_domain field points to the
- * domain member of an object of type struct generic_pm_domain representing
- * a power domain consisting of I/O devices.
- */
-static int pm_genpd_thaw(struct device *dev)
-{
- struct generic_pm_domain *genpd;
-
- dev_dbg(dev, "%s()\n", __func__);
+ if (genpd->dev_ops.stop && genpd->dev_ops.start)
+ ret = pm_runtime_force_resume(dev);
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
+ return ret;
}
/**
@@ -1057,6 +883,7 @@ static int pm_genpd_thaw(struct device *dev)
static int pm_genpd_restore_noirq(struct device *dev)
{
struct generic_pm_domain *genpd;
+ int ret = 0;
dev_dbg(dev, "%s()\n", __func__);
@@ -1072,30 +899,20 @@ static int pm_genpd_restore_noirq(struct device *dev)
* At this point suspended_count == 0 means we are being run for the
* first time for the given domain in the present cycle.
*/
- if (genpd->suspended_count++ == 0) {
+ if (genpd->suspended_count++ == 0)
/*
* The boot kernel might put the domain into arbitrary state,
* so make it appear as powered off to pm_genpd_sync_poweron(),
* so that it tries to power it on in case it was really off.
*/
genpd->status = GPD_STATE_POWER_OFF;
- if (genpd->suspend_power_off) {
- /*
- * If the domain was off before the hibernation, make
- * sure it will be off going forward.
- */
- genpd_power_off(genpd, true);
-
- return 0;
- }
- }
-
- if (genpd->suspend_power_off)
- return 0;
pm_genpd_sync_poweron(genpd, true);
- return genpd_start_dev(genpd, dev);
+ if (genpd->dev_ops.stop && genpd->dev_ops.start)
+ ret = pm_runtime_force_resume(dev);
+
+ return ret;
}
/**
@@ -1110,7 +927,6 @@ static int pm_genpd_restore_noirq(struct device *dev)
static void pm_genpd_complete(struct device *dev)
{
struct generic_pm_domain *genpd;
- bool run_complete;
dev_dbg(dev, "%s()\n", __func__);
@@ -1118,20 +934,15 @@ static void pm_genpd_complete(struct device *dev)
if (IS_ERR(genpd))
return;
+ pm_generic_complete(dev);
+
mutex_lock(&genpd->lock);
- run_complete = !genpd->suspend_power_off;
- if (--genpd->prepared_count == 0)
- genpd->suspend_power_off = false;
+ genpd->prepared_count--;
+ if (!genpd->prepared_count)
+ genpd_queue_power_off_work(genpd);
mutex_unlock(&genpd->lock);
-
- if (run_complete) {
- pm_generic_complete(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
- pm_request_idle(dev);
- }
}
/**
@@ -1173,18 +984,10 @@ EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
#else /* !CONFIG_PM_SLEEP */
#define pm_genpd_prepare NULL
-#define pm_genpd_suspend NULL
-#define pm_genpd_suspend_late NULL
#define pm_genpd_suspend_noirq NULL
-#define pm_genpd_resume_early NULL
#define pm_genpd_resume_noirq NULL
-#define pm_genpd_resume NULL
-#define pm_genpd_freeze NULL
-#define pm_genpd_freeze_late NULL
#define pm_genpd_freeze_noirq NULL
-#define pm_genpd_thaw_early NULL
#define pm_genpd_thaw_noirq NULL
-#define pm_genpd_thaw NULL
#define pm_genpd_restore_noirq NULL
#define pm_genpd_complete NULL
@@ -1455,12 +1258,14 @@ EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
* @genpd: PM domain object to initialize.
* @gov: PM domain governor to associate with the domain (may be NULL).
* @is_off: Initial value of the domain's power_is_off field.
+ *
+ * Returns 0 on successful initialization, else a negative error code.
*/
-void pm_genpd_init(struct generic_pm_domain *genpd,
- struct dev_power_governor *gov, bool is_off)
+int pm_genpd_init(struct generic_pm_domain *genpd,
+ struct dev_power_governor *gov, bool is_off)
{
if (IS_ERR_OR_NULL(genpd))
- return;
+ return -EINVAL;
INIT_LIST_HEAD(&genpd->master_links);
INIT_LIST_HEAD(&genpd->slave_links);
@@ -1476,24 +1281,24 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
genpd->domain.ops.runtime_resume = genpd_runtime_resume;
genpd->domain.ops.prepare = pm_genpd_prepare;
- genpd->domain.ops.suspend = pm_genpd_suspend;
- genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
+ genpd->domain.ops.suspend = pm_generic_suspend;
+ genpd->domain.ops.suspend_late = pm_generic_suspend_late;
genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
- genpd->domain.ops.resume_early = pm_genpd_resume_early;
- genpd->domain.ops.resume = pm_genpd_resume;
- genpd->domain.ops.freeze = pm_genpd_freeze;
- genpd->domain.ops.freeze_late = pm_genpd_freeze_late;
+ genpd->domain.ops.resume_early = pm_generic_resume_early;
+ genpd->domain.ops.resume = pm_generic_resume;
+ genpd->domain.ops.freeze = pm_generic_freeze;
+ genpd->domain.ops.freeze_late = pm_generic_freeze_late;
genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
- genpd->domain.ops.thaw_early = pm_genpd_thaw_early;
- genpd->domain.ops.thaw = pm_genpd_thaw;
- genpd->domain.ops.poweroff = pm_genpd_suspend;
- genpd->domain.ops.poweroff_late = pm_genpd_suspend_late;
+ genpd->domain.ops.thaw_early = pm_generic_thaw_early;
+ genpd->domain.ops.thaw = pm_generic_thaw;
+ genpd->domain.ops.poweroff = pm_generic_poweroff;
+ genpd->domain.ops.poweroff_late = pm_generic_poweroff_late;
genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
- genpd->domain.ops.restore_early = pm_genpd_resume_early;
- genpd->domain.ops.restore = pm_genpd_resume;
+ genpd->domain.ops.restore_early = pm_generic_restore_early;
+ genpd->domain.ops.restore = pm_generic_restore;
genpd->domain.ops.complete = pm_genpd_complete;
if (genpd->flags & GENPD_FLAG_PM_CLK) {
@@ -1518,6 +1323,8 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
mutex_lock(&gpd_list_lock);
list_add(&genpd->gpd_list_node, &gpd_list);
mutex_unlock(&gpd_list_lock);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(pm_genpd_init);
diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
index 7c04c8773..df0c70963 100644
--- a/drivers/base/power/opp/core.c
+++ b/drivers/base/power/opp/core.c
@@ -402,6 +402,22 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
+static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
+ unsigned long *freq)
+{
+ struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+
+ list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
+ if (temp_opp->available && temp_opp->rate >= *freq) {
+ opp = temp_opp;
+ *freq = opp->rate;
+ break;
+ }
+ }
+
+ return opp;
+}
+
/**
* dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
* @dev: device for which we do this operation
@@ -427,7 +443,6 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
unsigned long *freq)
{
struct opp_table *opp_table;
- struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
opp_rcu_lockdep_assert();
@@ -440,15 +455,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
if (IS_ERR(opp_table))
return ERR_CAST(opp_table);
- list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
- if (temp_opp->available && temp_opp->rate >= *freq) {
- opp = temp_opp;
- *freq = opp->rate;
- break;
- }
- }
-
- return opp;
+ return _find_freq_ceil(opp_table, freq);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
@@ -612,7 +619,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
return PTR_ERR(opp_table);
}
- old_opp = dev_pm_opp_find_freq_ceil(dev, &old_freq);
+ old_opp = _find_freq_ceil(opp_table, &old_freq);
if (!IS_ERR(old_opp)) {
ou_volt = old_opp->u_volt;
ou_volt_min = old_opp->u_volt_min;
@@ -622,7 +629,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
__func__, old_freq, PTR_ERR(old_opp));
}
- opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+ opp = _find_freq_ceil(opp_table, &freq);
if (IS_ERR(opp)) {
ret = PTR_ERR(opp);
dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index b74690418..82a081ea4 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -301,7 +301,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
int (*callback)(struct device *);
int retval;
- trace_rpm_idle(dev, rpmflags);
+ trace_rpm_idle_rcuidle(dev, rpmflags);
retval = rpm_check_suspend_allowed(dev);
if (retval < 0)
; /* Conditions are wrong. */
@@ -337,7 +337,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
dev->power.request_pending = true;
queue_work(pm_wq, &dev->power.work);
}
- trace_rpm_return_int(dev, _THIS_IP_, 0);
+ trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
return 0;
}
@@ -352,7 +352,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
wake_up_all(&dev->power.wait_queue);
out:
- trace_rpm_return_int(dev, _THIS_IP_, retval);
+ trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
}
@@ -419,7 +419,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
struct device *parent = NULL;
int retval;
- trace_rpm_suspend(dev, rpmflags);
+ trace_rpm_suspend_rcuidle(dev, rpmflags);
repeat:
retval = rpm_check_suspend_allowed(dev);
@@ -549,7 +549,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
}
out:
- trace_rpm_return_int(dev, _THIS_IP_, retval);
+ trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
return retval;
@@ -601,7 +601,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
struct device *parent = NULL;
int retval = 0;
- trace_rpm_resume(dev, rpmflags);
+ trace_rpm_resume_rcuidle(dev, rpmflags);
repeat:
if (dev->power.runtime_error)
@@ -764,7 +764,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
spin_lock_irq(&dev->power.lock);
}
- trace_rpm_return_int(dev, _THIS_IP_, retval);
+ trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
return retval;
}
@@ -1045,10 +1045,14 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
*/
if (!parent->power.disable_depth
&& !parent->power.ignore_children
- && parent->power.runtime_status != RPM_ACTIVE)
+ && parent->power.runtime_status != RPM_ACTIVE) {
+ dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
+ dev_name(dev),
+ dev_name(parent));
error = -EBUSY;
- else if (dev->power.runtime_status == RPM_SUSPENDED)
+ } else if (dev->power.runtime_status == RPM_SUSPENDED) {
atomic_inc(&parent->power.child_count);
+ }
spin_unlock(&parent->power.lock);
@@ -1256,7 +1260,7 @@ void pm_runtime_allow(struct device *dev)
dev->power.runtime_auto = true;
if (atomic_dec_and_test(&dev->power.usage_count))
- rpm_idle(dev, RPM_AUTO);
+ rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
out:
spin_unlock_irq(&dev->power.lock);
@@ -1506,6 +1510,9 @@ int pm_runtime_force_resume(struct device *dev)
goto out;
}
+ if (!pm_runtime_status_suspended(dev))
+ goto out;
+
ret = pm_runtime_set_active(dev);
if (ret)
goto out;
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index a6975795e..efec10b49 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -11,7 +11,7 @@
#include <linux/export.h>
#include <linux/rtc.h>
-#include <asm/rtc.h>
+#include <linux/mc146818rtc.h>
#include "power.h"
@@ -103,7 +103,7 @@ static int set_magic_time(unsigned int user, unsigned int file, unsigned int dev
n /= 24;
time.tm_min = (n % 20) * 3;
n /= 20;
- set_rtc_time(&time);
+ mc146818_set_time(&time);
return n ? -1 : 0;
}
@@ -112,7 +112,7 @@ static unsigned int read_magic_time(void)
struct rtc_time time;
unsigned int val;
- get_rtc_time(&time);
+ mc146818_get_time(&time);
pr_info("RTC time: %2d:%02d:%02d, date: %02d/%02d/%02d\n",
time.tm_hour, time.tm_min, time.tm_sec,
time.tm_mon + 1, time.tm_mday, time.tm_year % 100);
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 5fb7718f2..62e4de2aa 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -334,10 +334,9 @@ void device_wakeup_arm_wake_irqs(void)
struct wakeup_source *ws;
rcu_read_lock();
- list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
- if (ws->wakeirq)
- dev_pm_arm_wake_irq(ws->wakeirq);
- }
+ list_for_each_entry_rcu(ws, &wakeup_sources, entry)
+ dev_pm_arm_wake_irq(ws->wakeirq);
+
rcu_read_unlock();
}
@@ -351,10 +350,9 @@ void device_wakeup_disarm_wake_irqs(void)
struct wakeup_source *ws;
rcu_read_lock();
- list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
- if (ws->wakeirq)
- dev_pm_disarm_wake_irq(ws->wakeirq);
- }
+ list_for_each_entry_rcu(ws, &wakeup_sources, entry)
+ dev_pm_disarm_wake_irq(ws->wakeirq);
+
rcu_read_unlock();
}
@@ -390,9 +388,7 @@ int device_wakeup_disable(struct device *dev)
return -EINVAL;
ws = device_wakeup_detach(dev);
- if (ws)
- wakeup_source_unregister(ws);
-
+ wakeup_source_unregister(ws);
return 0;
}
EXPORT_SYMBOL_GPL(device_wakeup_disable);
diff --git a/drivers/base/property.c b/drivers/base/property.c
index f38c21de2..43a36d68c 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -888,6 +888,34 @@ struct fwnode_handle *device_get_next_child_node(struct device *dev,
EXPORT_SYMBOL_GPL(device_get_next_child_node);
/**
+ * device_get_named_child_node - Return first matching named child node handle
+ * @dev: Device to find the named child node for.
+ * @childname: String to match child node name against.
+ */
+struct fwnode_handle *device_get_named_child_node(struct device *dev,
+ const char *childname)
+{
+ struct fwnode_handle *child;
+
+ /*
+ * Find first matching named child node of this device.
+ * For ACPI this will be a data only sub-node.
+ */
+ device_for_each_child_node(dev, child) {
+ if (is_of_node(child)) {
+ if (!of_node_cmp(to_of_node(child)->name, childname))
+ return child;
+ } else if (is_acpi_data_node(child)) {
+ if (acpi_data_node_match(child, childname))
+ return child;
+ }
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(device_get_named_child_node);
+
+/**
* fwnode_handle_put - Drop reference to a device node
* @fwnode: Pointer to the device node to drop the reference to.
*
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index aa56af87d..b11af3f2c 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -404,6 +404,7 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
unsigned int new_base_reg, new_top_reg;
unsigned int min, max;
unsigned int max_dist;
+ unsigned int dist, best_dist = UINT_MAX;
max_dist = map->reg_stride * sizeof(*rbnode_tmp) /
map->cache_word_size;
@@ -423,24 +424,41 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
&base_reg, &top_reg);
if (base_reg <= max && top_reg >= min) {
- new_base_reg = min(reg, base_reg);
- new_top_reg = max(reg, top_reg);
- } else {
- if (max < base_reg)
- node = node->rb_left;
+ if (reg < base_reg)
+ dist = base_reg - reg;
+ else if (reg > top_reg)
+ dist = reg - top_reg;
else
- node = node->rb_right;
-
- continue;
+ dist = 0;
+ if (dist < best_dist) {
+ rbnode = rbnode_tmp;
+ best_dist = dist;
+ new_base_reg = min(reg, base_reg);
+ new_top_reg = max(reg, top_reg);
+ }
}
- ret = regcache_rbtree_insert_to_block(map, rbnode_tmp,
+ /*
+ * Keep looking, we want to choose the closest block,
+ * otherwise we might end up creating overlapping
+ * blocks, which breaks the rbtree.
+ */
+ if (reg < base_reg)
+ node = node->rb_left;
+ else if (reg > top_reg)
+ node = node->rb_right;
+ else
+ break;
+ }
+
+ if (rbnode) {
+ ret = regcache_rbtree_insert_to_block(map, rbnode,
new_base_reg,
new_top_reg, reg,
value);
if (ret)
return ret;
- rbtree_ctx->cached_rbnode = rbnode_tmp;
+ rbtree_ctx->cached_rbnode = rbnode;
return 0;
}
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index df7ff7290..4e582561e 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -38,10 +38,11 @@ static int regcache_hw_init(struct regmap *map)
/* calculate the size of reg_defaults */
for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++)
- if (!regmap_volatile(map, i * map->reg_stride))
+ if (regmap_readable(map, i * map->reg_stride) &&
+ !regmap_volatile(map, i * map->reg_stride))
count++;
- /* all registers are volatile, so just bypass */
+ /* all registers are unreadable or volatile, so just bypass */
if (!count) {
map->cache_bypass = true;
return 0;
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index 1a8ec3b2b..4735318f4 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -259,7 +259,7 @@ static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
{
if (i2c_check_functionality(i2c->adapter, I2C_FUNC_I2C))
return &regmap_i2c;
- else if (config->reg_bits == 8 &&
+ else if (config->val_bits == 8 && config->reg_bits == 8 &&
i2c_check_functionality(i2c->adapter,
I2C_FUNC_SMBUS_I2C_BLOCK))
return &regmap_i2c_smbus_i2c_block;
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 26f799e71..ec262476d 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -268,13 +268,16 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
bool handled = false;
u32 reg;
+ if (chip->handle_pre_irq)
+ chip->handle_pre_irq(chip->irq_drv_data);
+
if (chip->runtime_pm) {
ret = pm_runtime_get_sync(map->dev);
if (ret < 0) {
dev_err(map->dev, "IRQ thread failed to resume: %d\n",
ret);
pm_runtime_put(map->dev);
- return IRQ_NONE;
+ goto exit;
}
}
@@ -296,7 +299,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
if (ret != 0) {
dev_err(map->dev, "Failed to read IRQ status: %d\n",
ret);
- return IRQ_NONE;
+ goto exit;
}
for (i = 0; i < data->chip->num_regs; i++) {
@@ -312,7 +315,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
break;
default:
BUG();
- return IRQ_NONE;
+ goto exit;
}
}
@@ -329,7 +332,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
ret);
if (chip->runtime_pm)
pm_runtime_put(map->dev);
- return IRQ_NONE;
+ goto exit;
}
}
}
@@ -365,6 +368,10 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
if (chip->runtime_pm)
pm_runtime_put(map->dev);
+exit:
+ if (chip->handle_post_irq)
+ chip->handle_post_irq(chip->irq_drv_data);
+
if (handled)
return IRQ_HANDLED;
else
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index df2d2ef5d..e964d0688 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1474,6 +1474,12 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
ret = map->bus->write(map->bus_context, buf, len);
kfree(buf);
+ } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) {
+ /* regcache_drop_region() takes lock that we already have,
+ * thus call map->cache_ops->drop() directly
+ */
+ if (map->cache_ops && map->cache_ops->drop)
+ map->cache_ops->drop(map, reg, reg + 1);
}
trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
@@ -1777,8 +1783,6 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
size_t val_bytes = map->format.val_bytes;
size_t total_size = val_bytes * val_count;
- if (map->bus && !map->format.parse_inplace)
- return -EINVAL;
if (!IS_ALIGNED(reg, map->reg_stride))
return -EINVAL;
@@ -1789,7 +1793,8 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
*
* The first if block is used for memory mapped io. It does not allow
* val_bytes of 3 for example.
- * The second one is used for busses which do not have this limitation
+ * The second one is for busses that do not provide raw I/O.
+ * The third one is used for busses which do not have these limitations
* and can write arbitrary value lengths.
*/
if (!map->bus) {
@@ -1825,6 +1830,32 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
}
out:
map->unlock(map->lock_arg);
+ } else if (map->bus && !map->format.parse_inplace) {
+ const u8 *u8 = val;
+ const u16 *u16 = val;
+ const u32 *u32 = val;
+ unsigned int ival;
+
+ for (i = 0; i < val_count; i++) {
+ switch (map->format.val_bytes) {
+ case 4:
+ ival = u32[i];
+ break;
+ case 2:
+ ival = u16[i];
+ break;
+ case 1:
+ ival = u8[i];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_write(map, reg + (i * map->reg_stride),
+ ival);
+ if (ret)
+ return ret;
+ }
} else if (map->use_single_write ||
(map->max_raw_write && map->max_raw_write < total_size)) {
int chunk_stride = map->reg_stride;
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index 8b7d7f8e5..df3c97cb4 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -77,6 +77,14 @@ static DEVICE_ATTR_RO(book_siblings);
static DEVICE_ATTR_RO(book_siblings_list);
#endif
+#ifdef CONFIG_SCHED_DRAWER
+define_id_show_func(drawer_id);
+static DEVICE_ATTR_RO(drawer_id);
+define_siblings_show_func(drawer_siblings, drawer_cpumask);
+static DEVICE_ATTR_RO(drawer_siblings);
+static DEVICE_ATTR_RO(drawer_siblings_list);
+#endif
+
static struct attribute *default_attrs[] = {
&dev_attr_physical_package_id.attr,
&dev_attr_core_id.attr,
@@ -89,6 +97,11 @@ static struct attribute *default_attrs[] = {
&dev_attr_book_siblings.attr,
&dev_attr_book_siblings_list.attr,
#endif
+#ifdef CONFIG_SCHED_DRAWER
+ &dev_attr_drawer_id.attr,
+ &dev_attr_drawer_siblings.attr,
+ &dev_attr_drawer_siblings_list.attr,
+#endif
NULL
};
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index efdc2ae84..b5c48a8d4 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -76,9 +76,16 @@ config BCMA_PFLASH
default y
config BCMA_SFLASH
- bool
- depends on BCMA_DRIVER_MIPS
+ bool "ChipCommon-attached serial flash support"
+ depends on BCMA_HOST_SOC
default y
+ help
+ Some cheap devices have serial flash connected to the ChipCommon
+ instead of independent SPI controller. It requires using a separated
+ driver that implements ChipCommon specific interface communication.
+
+ Enabling this symbol will let bcma recognize serial flash and register
+ it as platform device.
config BCMA_NFLASH
bool
diff --git a/drivers/bcma/driver_chipcommon_b.c b/drivers/bcma/driver_chipcommon_b.c
index c20b5f4ff..57f10b58b 100644
--- a/drivers/bcma/driver_chipcommon_b.c
+++ b/drivers/bcma/driver_chipcommon_b.c
@@ -33,11 +33,12 @@ static bool bcma_wait_reg(struct bcma_bus *bus, void __iomem *addr, u32 mask,
void bcma_chipco_b_mii_write(struct bcma_drv_cc_b *ccb, u32 offset, u32 value)
{
struct bcma_bus *bus = ccb->core->bus;
+ void __iomem *mii = ccb->mii;
- writel(offset, ccb->mii + 0x00);
- bcma_wait_reg(bus, ccb->mii + 0x00, 0x0100, 0x0000, 100);
- writel(value, ccb->mii + 0x04);
- bcma_wait_reg(bus, ccb->mii + 0x00, 0x0100, 0x0000, 100);
+ writel(offset, mii + BCMA_CCB_MII_MNG_CTL);
+ bcma_wait_reg(bus, mii + BCMA_CCB_MII_MNG_CTL, 0x0100, 0x0000, 100);
+ writel(value, mii + BCMA_CCB_MII_MNG_CMD_DATA);
+ bcma_wait_reg(bus, mii + BCMA_CCB_MII_MNG_CTL, 0x0100, 0x0000, 100);
}
EXPORT_SYMBOL_GPL(bcma_chipco_b_mii_write);
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index c04bd9bc3..0c76d4016 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -300,20 +300,20 @@ static void copy_from_brd(void *dst, struct brd_device *brd,
* Process a single bvec of a bio.
*/
static int brd_do_bvec(struct brd_device *brd, struct page *page,
- unsigned int len, unsigned int off, int rw,
+ unsigned int len, unsigned int off, bool is_write,
sector_t sector)
{
void *mem;
int err = 0;
- if (rw != READ) {
+ if (is_write) {
err = copy_to_brd_setup(brd, sector, len);
if (err)
goto out;
}
mem = kmap_atomic(page);
- if (rw == READ) {
+ if (!is_write) {
copy_from_brd(mem + off, brd, sector, len);
flush_dcache_page(page);
} else {
@@ -330,7 +330,6 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
{
struct block_device *bdev = bio->bi_bdev;
struct brd_device *brd = bdev->bd_disk->private_data;
- int rw;
struct bio_vec bvec;
sector_t sector;
struct bvec_iter iter;
@@ -339,7 +338,7 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
goto io_error;
- if (unlikely(bio->bi_rw & REQ_DISCARD)) {
+ if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
if (sector & ((PAGE_SIZE >> SECTOR_SHIFT) - 1) ||
bio->bi_iter.bi_size & ~PAGE_MASK)
goto io_error;
@@ -347,16 +346,12 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
goto out;
}
- rw = bio_rw(bio);
- if (rw == READA)
- rw = READ;
-
bio_for_each_segment(bvec, bio, iter) {
unsigned int len = bvec.bv_len;
int err;
- err = brd_do_bvec(brd, bvec.bv_page, len,
- bvec.bv_offset, rw, sector);
+ err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
+ op_is_write(bio_op(bio)), sector);
if (err)
goto io_error;
sector += len >> SECTOR_SHIFT;
@@ -371,17 +366,17 @@ io_error:
}
static int brd_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, int rw)
+ struct page *page, bool is_write)
{
struct brd_device *brd = bdev->bd_disk->private_data;
- int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, rw, sector);
- page_endio(page, rw & WRITE, err);
+ int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, is_write, sector);
+ page_endio(page, is_write, err);
return err;
}
#ifdef CONFIG_BLK_DEV_RAM_DAX
static long brd_direct_access(struct block_device *bdev, sector_t sector,
- void __pmem **kaddr, pfn_t *pfn, long size)
+ void **kaddr, pfn_t *pfn, long size)
{
struct brd_device *brd = bdev->bd_disk->private_data;
struct page *page;
@@ -391,7 +386,7 @@ static long brd_direct_access(struct block_device *bdev, sector_t sector,
page = brd_insert_page(brd, sector);
if (!page)
return -ENOSPC;
- *kaddr = (void __pmem *)page_address(page);
+ *kaddr = page_address(page);
*pfn = page_to_pfn_t(page);
return PAGE_SIZE;
@@ -509,7 +504,9 @@ static struct brd_device *brd_alloc(int i)
blk_queue_max_discard_sectors(brd->brd_queue, UINT_MAX);
brd->brd_queue->limits.discard_zeroes_data = 1;
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, brd->brd_queue);
-
+#ifdef CONFIG_BLK_DEV_RAM_DAX
+ queue_flag_set_unlocked(QUEUE_FLAG_DAX, brd->brd_queue);
+#endif
disk = brd->brd_disk = alloc_disk(max_part);
if (!disk)
goto out_free_queue;
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 63c206468..db9d6bb63 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1951,7 +1951,6 @@ static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
if (cciss_create_ld_sysfs_entry(h, drv_index))
goto cleanup_queue;
disk->private_data = h->drv[drv_index];
- disk->driverfs_dev = &h->drv[drv_index]->dev;
/* Set up queue information */
blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask);
@@ -1973,7 +1972,7 @@ static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
/* allows the interrupt handler to start the queue */
wmb();
h->drv[drv_index]->queue = disk->queue;
- add_disk(disk);
+ device_add_disk(&h->drv[drv_index]->dev, disk);
return 0;
cleanup_queue:
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 10459a145..2d3d50ab7 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -27,7 +27,6 @@
#include <linux/crc32c.h>
#include <linux/drbd.h>
#include <linux/drbd_limits.h>
-#include <linux/dynamic_debug.h>
#include "drbd_int.h"
@@ -137,19 +136,19 @@ void wait_until_done_or_force_detached(struct drbd_device *device, struct drbd_b
static int _drbd_md_sync_page_io(struct drbd_device *device,
struct drbd_backing_dev *bdev,
- sector_t sector, int rw)
+ sector_t sector, int op)
{
struct bio *bio;
/* we do all our meta data IO in aligned 4k blocks. */
const int size = 4096;
- int err;
+ int err, op_flags = 0;
device->md_io.done = 0;
device->md_io.error = -ENODEV;
- if ((rw & WRITE) && !test_bit(MD_NO_FUA, &device->flags))
- rw |= REQ_FUA | REQ_FLUSH;
- rw |= REQ_SYNC | REQ_NOIDLE;
+ if ((op == REQ_OP_WRITE) && !test_bit(MD_NO_FUA, &device->flags))
+ op_flags |= REQ_FUA | REQ_PREFLUSH;
+ op_flags |= REQ_SYNC | REQ_NOIDLE;
bio = bio_alloc_drbd(GFP_NOIO);
bio->bi_bdev = bdev->md_bdev;
@@ -159,9 +158,9 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
goto out;
bio->bi_private = device;
bio->bi_end_io = drbd_md_endio;
- bio->bi_rw = rw;
+ bio_set_op_attrs(bio, op, op_flags);
- if (!(rw & WRITE) && device->state.disk == D_DISKLESS && device->ldev == NULL)
+ if (op != REQ_OP_WRITE && device->state.disk == D_DISKLESS && device->ldev == NULL)
/* special case, drbd_md_read() during drbd_adm_attach(): no get_ldev */
;
else if (!get_ldev_if_state(device, D_ATTACHING)) {
@@ -174,10 +173,10 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
bio_get(bio); /* one bio_put() is in the completion handler */
atomic_inc(&device->md_io.in_use); /* drbd_md_put_buffer() is in the completion handler */
device->md_io.submit_jif = jiffies;
- if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
+ if (drbd_insert_fault(device, (op == REQ_OP_WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
bio_io_error(bio);
else
- submit_bio(rw, bio);
+ submit_bio(bio);
wait_until_done_or_force_detached(device, bdev, &device->md_io.done);
if (!bio->bi_error)
err = device->md_io.error;
@@ -188,7 +187,7 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
}
int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bdev,
- sector_t sector, int rw)
+ sector_t sector, int op)
{
int err;
D_ASSERT(device, atomic_read(&device->md_io.in_use) == 1);
@@ -197,19 +196,21 @@ int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bd
dynamic_drbd_dbg(device, "meta_data io: %s [%d]:%s(,%llus,%s) %pS\n",
current->comm, current->pid, __func__,
- (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ",
+ (unsigned long long)sector, (op == REQ_OP_WRITE) ? "WRITE" : "READ",
(void*)_RET_IP_ );
if (sector < drbd_md_first_sector(bdev) ||
sector + 7 > drbd_md_last_sector(bdev))
drbd_alert(device, "%s [%d]:%s(,%llus,%s) out of range md access!\n",
current->comm, current->pid, __func__,
- (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ");
+ (unsigned long long)sector,
+ (op == REQ_OP_WRITE) ? "WRITE" : "READ");
- err = _drbd_md_sync_page_io(device, bdev, sector, rw);
+ err = _drbd_md_sync_page_io(device, bdev, sector, op);
if (err) {
drbd_err(device, "drbd_md_sync_page_io(,%llus,%s) failed with error %d\n",
- (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ", err);
+ (unsigned long long)sector,
+ (op == REQ_OP_WRITE) ? "WRITE" : "READ", err);
}
return err;
}
@@ -256,7 +257,7 @@ bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval
unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
- D_ASSERT(device, (unsigned)(last - first) <= 1);
+ D_ASSERT(device, first <= last);
D_ASSERT(device, atomic_read(&device->local_cnt) > 0);
/* FIXME figure out a fast path for bios crossing AL extent boundaries */
@@ -339,6 +340,8 @@ static int __al_write_transaction(struct drbd_device *device, struct al_transact
i = 0;
+ drbd_bm_reset_al_hints(device);
+
/* Even though no one can start to change this list
* once we set the LC_LOCKED -- from drbd_al_begin_io(),
* lc_try_lock_for_transaction() --, someone may still
@@ -768,10 +771,18 @@ static bool lazy_bitmap_update_due(struct drbd_device *device)
static void maybe_schedule_on_disk_bitmap_update(struct drbd_device *device, bool rs_done)
{
- if (rs_done)
- set_bit(RS_DONE, &device->flags);
- /* and also set RS_PROGRESS below */
- else if (!lazy_bitmap_update_due(device))
+ if (rs_done) {
+ struct drbd_connection *connection = first_peer_device(device)->connection;
+ if (connection->agreed_pro_version <= 95 ||
+ is_sync_target_state(device->state.conn))
+ set_bit(RS_DONE, &device->flags);
+ /* and also set RS_PROGRESS below */
+
+ /* Else: rather wait for explicit notification via receive_state,
+ * to avoid uuids-rotated-too-fast causing full resync
+ * in next handshake, in case the replication link breaks
+ * at the most unfortunate time... */
+ } else if (!lazy_bitmap_update_due(device))
return;
drbd_device_post_work(device, RS_PROGRESS);
@@ -830,6 +841,13 @@ static int update_sync_bits(struct drbd_device *device,
return count;
}
+static bool plausible_request_size(int size)
+{
+ return size > 0
+ && size <= DRBD_MAX_BATCH_BIO_SIZE
+ && IS_ALIGNED(size, 512);
+}
+
/* clear the bit corresponding to the piece of storage in question:
* size byte of data starting from sector. Only clear a bits of the affected
* one ore more _aligned_ BM_BLOCK_SIZE blocks.
@@ -845,11 +863,11 @@ int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size,
unsigned long count = 0;
sector_t esector, nr_sectors;
- /* This would be an empty REQ_FLUSH, be silent. */
+ /* This would be an empty REQ_PREFLUSH, be silent. */
if ((mode == SET_OUT_OF_SYNC) && size == 0)
return 0;
- if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_DISCARD_SIZE) {
+ if (!plausible_request_size(size)) {
drbd_err(device, "%s: sector=%llus size=%d nonsense!\n",
drbd_change_sync_fname[mode],
(unsigned long long)sector, size);
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 92d6fc020..ab62b81c2 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -96,6 +96,13 @@ struct drbd_bitmap {
struct page **bm_pages;
spinlock_t bm_lock;
+ /* exclusively to be used by __al_write_transaction(),
+ * drbd_bm_mark_for_writeout() and
+ * and drbd_bm_write_hinted() -> bm_rw() called from there.
+ */
+ unsigned int n_bitmap_hints;
+ unsigned int al_bitmap_hints[AL_UPDATES_PER_TRANSACTION];
+
/* see LIMITATIONS: above */
unsigned long bm_set; /* nr of set bits; THINK maybe atomic_t? */
@@ -242,6 +249,11 @@ static void bm_set_page_need_writeout(struct page *page)
set_bit(BM_PAGE_NEED_WRITEOUT, &page_private(page));
}
+void drbd_bm_reset_al_hints(struct drbd_device *device)
+{
+ device->bitmap->n_bitmap_hints = 0;
+}
+
/**
* drbd_bm_mark_for_writeout() - mark a page with a "hint" to be considered for writeout
* @device: DRBD device.
@@ -253,6 +265,7 @@ static void bm_set_page_need_writeout(struct page *page)
*/
void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr)
{
+ struct drbd_bitmap *b = device->bitmap;
struct page *page;
if (page_nr >= device->bitmap->bm_number_of_pages) {
drbd_warn(device, "BAD: page_nr: %u, number_of_pages: %u\n",
@@ -260,7 +273,9 @@ void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr)
return;
}
page = device->bitmap->bm_pages[page_nr];
- set_bit(BM_PAGE_HINT_WRITEOUT, &page_private(page));
+ BUG_ON(b->n_bitmap_hints >= ARRAY_SIZE(b->al_bitmap_hints));
+ if (!test_and_set_bit(BM_PAGE_HINT_WRITEOUT, &page_private(page)))
+ b->al_bitmap_hints[b->n_bitmap_hints++] = page_nr;
}
static int bm_test_page_unchanged(struct page *page)
@@ -427,8 +442,7 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
}
/*
- * called on driver init only. TODO call when a device is created.
- * allocates the drbd_bitmap, and stores it in device->bitmap.
+ * allocates the drbd_bitmap and stores it in device->bitmap.
*/
int drbd_bm_init(struct drbd_device *device)
{
@@ -633,7 +647,8 @@ int drbd_bm_resize(struct drbd_device *device, sector_t capacity, int set_new_bi
unsigned long bits, words, owords, obits;
unsigned long want, have, onpages; /* number of pages */
struct page **npages, **opages = NULL;
- int err = 0, growing;
+ int err = 0;
+ bool growing;
if (!expect(b))
return -ENOMEM;
@@ -980,7 +995,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
struct drbd_bitmap *b = device->bitmap;
struct page *page;
unsigned int len;
- unsigned int rw = (ctx->flags & BM_AIO_READ) ? READ : WRITE;
+ unsigned int op = (ctx->flags & BM_AIO_READ) ? REQ_OP_READ : REQ_OP_WRITE;
sector_t on_disk_sector =
device->ldev->md.md_offset + device->ldev->md.bm_offset;
@@ -1011,12 +1026,12 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
bio_add_page(bio, page, len, 0);
bio->bi_private = ctx;
bio->bi_end_io = drbd_bm_endio;
+ bio_set_op_attrs(bio, op, 0);
- if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
- bio->bi_rw |= rw;
+ if (drbd_insert_fault(device, (op == REQ_OP_WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
bio_io_error(bio);
} else {
- submit_bio(rw, bio);
+ submit_bio(bio);
/* this should not count as user activity and cause the
* resync to throttle -- see drbd_rs_should_slow_down(). */
atomic_add(len >> 9, &device->rs_sect_ev);
@@ -1030,7 +1045,7 @@ static int bm_rw(struct drbd_device *device, const unsigned int flags, unsigned
{
struct drbd_bm_aio_ctx *ctx;
struct drbd_bitmap *b = device->bitmap;
- int num_pages, i, count = 0;
+ unsigned int num_pages, i, count = 0;
unsigned long now;
char ppb[10];
int err = 0;
@@ -1078,16 +1093,37 @@ static int bm_rw(struct drbd_device *device, const unsigned int flags, unsigned
now = jiffies;
/* let the layers below us try to merge these bios... */
- for (i = 0; i < num_pages; i++) {
- /* ignore completely unchanged pages */
- if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx)
- break;
- if (!(flags & BM_AIO_READ)) {
- if ((flags & BM_AIO_WRITE_HINTED) &&
- !test_and_clear_bit(BM_PAGE_HINT_WRITEOUT,
- &page_private(b->bm_pages[i])))
- continue;
+ if (flags & BM_AIO_READ) {
+ for (i = 0; i < num_pages; i++) {
+ atomic_inc(&ctx->in_flight);
+ bm_page_io_async(ctx, i);
+ ++count;
+ cond_resched();
+ }
+ } else if (flags & BM_AIO_WRITE_HINTED) {
+ /* ASSERT: BM_AIO_WRITE_ALL_PAGES is not set. */
+ unsigned int hint;
+ for (hint = 0; hint < b->n_bitmap_hints; hint++) {
+ i = b->al_bitmap_hints[hint];
+ if (i >= num_pages) /* == -1U: no hint here. */
+ continue;
+ /* Several AL-extents may point to the same page. */
+ if (!test_and_clear_bit(BM_PAGE_HINT_WRITEOUT,
+ &page_private(b->bm_pages[i])))
+ continue;
+ /* Has it even changed? */
+ if (bm_test_page_unchanged(b->bm_pages[i]))
+ continue;
+ atomic_inc(&ctx->in_flight);
+ bm_page_io_async(ctx, i);
+ ++count;
+ }
+ } else {
+ for (i = 0; i < num_pages; i++) {
+ /* ignore completely unchanged pages */
+ if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx)
+ break;
if (!(flags & BM_AIO_WRITE_ALL_PAGES) &&
bm_test_page_unchanged(b->bm_pages[i])) {
dynamic_drbd_dbg(device, "skipped bm write for idx %u\n", i);
@@ -1100,11 +1136,11 @@ static int bm_rw(struct drbd_device *device, const unsigned int flags, unsigned
dynamic_drbd_dbg(device, "skipped bm lazy write for idx %u\n", i);
continue;
}
+ atomic_inc(&ctx->in_flight);
+ bm_page_io_async(ctx, i);
+ ++count;
+ cond_resched();
}
- atomic_inc(&ctx->in_flight);
- bm_page_io_async(ctx, i);
- ++count;
- cond_resched();
}
/*
@@ -1121,10 +1157,14 @@ static int bm_rw(struct drbd_device *device, const unsigned int flags, unsigned
kref_put(&ctx->kref, &drbd_bm_aio_ctx_destroy);
/* summary for global bitmap IO */
- if (flags == 0)
- drbd_info(device, "bitmap %s of %u pages took %lu jiffies\n",
- (flags & BM_AIO_READ) ? "READ" : "WRITE",
- count, jiffies - now);
+ if (flags == 0) {
+ unsigned int ms = jiffies_to_msecs(jiffies - now);
+ if (ms > 5) {
+ drbd_info(device, "bitmap %s of %u pages took %u ms\n",
+ (flags & BM_AIO_READ) ? "READ" : "WRITE",
+ count, ms);
+ }
+ }
if (ctx->error) {
drbd_alert(device, "we had at least one MD IO ERROR during bitmap IO\n");
diff --git a/drivers/block/drbd/drbd_debugfs.c b/drivers/block/drbd/drbd_debugfs.c
index 4de95bbff..de5c3ee8a 100644
--- a/drivers/block/drbd/drbd_debugfs.c
+++ b/drivers/block/drbd/drbd_debugfs.c
@@ -237,14 +237,9 @@ static void seq_print_peer_request_flags(struct seq_file *m, struct drbd_peer_re
seq_print_rq_state_bit(m, f & EE_SEND_WRITE_ACK, &sep, "C");
seq_print_rq_state_bit(m, f & EE_MAY_SET_IN_SYNC, &sep, "set-in-sync");
- if (f & EE_IS_TRIM) {
- seq_putc(m, sep);
- sep = '|';
- if (f & EE_IS_TRIM_USE_ZEROOUT)
- seq_puts(m, "zero-out");
- else
- seq_puts(m, "trim");
- }
+ if (f & EE_IS_TRIM)
+ __seq_print_rq_state_bit(m, f & EE_IS_TRIM_USE_ZEROOUT, &sep, "zero-out", "trim");
+ seq_print_rq_state_bit(m, f & EE_WRITE_SAME, &sep, "write-same");
seq_putc(m, '\n');
}
@@ -430,9 +425,6 @@ static int drbd_single_open(struct file *file, int (*show)(struct seq_file *, vo
/* Are we still linked,
* or has debugfs_remove() already been called? */
parent = file->f_path.dentry->d_parent;
- /* not sure if this can happen: */
- if (!parent || d_really_is_negative(parent))
- goto out;
/* serialize with d_delete() */
inode_lock(d_inode(parent));
/* Make sure the object is still alive */
@@ -445,7 +437,6 @@ static int drbd_single_open(struct file *file, int (*show)(struct seq_file *, vo
if (ret)
kref_put(kref, release);
}
-out:
return ret;
}
@@ -908,7 +899,7 @@ static int drbd_version_open(struct inode *inode, struct file *file)
return single_open(file, drbd_version_show, NULL);
}
-static struct file_operations drbd_version_fops = {
+static const struct file_operations drbd_version_fops = {
.owner = THIS_MODULE,
.open = drbd_version_open,
.llseek = seq_lseek,
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 7a1cf7eaa..4cb8f21ff 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -41,6 +41,7 @@
#include <linux/backing-dev.h>
#include <linux/genhd.h>
#include <linux/idr.h>
+#include <linux/dynamic_debug.h>
#include <net/tcp.h>
#include <linux/lru_cache.h>
#include <linux/prefetch.h>
@@ -468,9 +469,15 @@ enum {
/* this is/was a write request */
__EE_WRITE,
+ /* this is/was a write same request */
+ __EE_WRITE_SAME,
+
/* this originates from application on peer
* (not some resync or verify or other DRBD internal request) */
__EE_APPLICATION,
+
+ /* If it contains only 0 bytes, send back P_RS_DEALLOCATED */
+ __EE_RS_THIN_REQ,
};
#define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
#define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC)
@@ -484,7 +491,9 @@ enum {
#define EE_IN_INTERVAL_TREE (1<<__EE_IN_INTERVAL_TREE)
#define EE_SUBMITTED (1<<__EE_SUBMITTED)
#define EE_WRITE (1<<__EE_WRITE)
+#define EE_WRITE_SAME (1<<__EE_WRITE_SAME)
#define EE_APPLICATION (1<<__EE_APPLICATION)
+#define EE_RS_THIN_REQ (1<<__EE_RS_THIN_REQ)
/* flag bits per device */
enum {
@@ -1123,6 +1132,7 @@ extern int drbd_send_ov_request(struct drbd_peer_device *, sector_t sector, int
extern int drbd_send_bitmap(struct drbd_device *device);
extern void drbd_send_sr_reply(struct drbd_peer_device *, enum drbd_state_rv retcode);
extern void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode);
+extern int drbd_send_rs_deallocated(struct drbd_peer_device *, struct drbd_peer_request *);
extern void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev);
extern void drbd_device_cleanup(struct drbd_device *device);
void drbd_print_uuids(struct drbd_device *device, const char *text);
@@ -1327,14 +1337,14 @@ struct bm_extent {
#endif
#endif
-/* BIO_MAX_SIZE is 256 * PAGE_SIZE,
+/* Estimate max bio size as 256 * PAGE_SIZE,
* so for typical PAGE_SIZE of 4k, that is (1<<20) Byte.
* Since we may live in a mixed-platform cluster,
* we limit us to a platform agnostic constant here for now.
* A followup commit may allow even bigger BIO sizes,
* once we thought that through. */
#define DRBD_MAX_BIO_SIZE (1U << 20)
-#if DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE
+#if DRBD_MAX_BIO_SIZE > (BIO_MAX_PAGES << PAGE_SHIFT)
#error Architecture not supported: DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE
#endif
#define DRBD_MAX_BIO_SIZE_SAFE (1U << 12) /* Works always = 4k */
@@ -1342,11 +1352,11 @@ struct bm_extent {
#define DRBD_MAX_SIZE_H80_PACKET (1U << 15) /* Header 80 only allows packets up to 32KiB data */
#define DRBD_MAX_BIO_SIZE_P95 (1U << 17) /* Protocol 95 to 99 allows bios up to 128KiB */
-/* For now, don't allow more than one activity log extent worth of data
- * to be discarded in one go. We may need to rework drbd_al_begin_io()
- * to allow for even larger discard ranges */
-#define DRBD_MAX_DISCARD_SIZE AL_EXTENT_SIZE
-#define DRBD_MAX_DISCARD_SECTORS (DRBD_MAX_DISCARD_SIZE >> 9)
+/* For now, don't allow more than half of what we can "activate" in one
+ * activity log transaction to be discarded in one go. We may need to rework
+ * drbd_al_begin_io() to allow for even larger discard ranges */
+#define DRBD_MAX_BATCH_BIO_SIZE (AL_UPDATES_PER_TRANSACTION/2*AL_EXTENT_SIZE)
+#define DRBD_MAX_BBIO_SECTORS (DRBD_MAX_BATCH_BIO_SIZE >> 9)
extern int drbd_bm_init(struct drbd_device *device);
extern int drbd_bm_resize(struct drbd_device *device, sector_t sectors, int set_new_bits);
@@ -1369,6 +1379,7 @@ extern int drbd_bm_e_weight(struct drbd_device *device, unsigned long enr);
extern int drbd_bm_read(struct drbd_device *device) __must_hold(local);
extern void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr);
extern int drbd_bm_write(struct drbd_device *device) __must_hold(local);
+extern void drbd_bm_reset_al_hints(struct drbd_device *device) __must_hold(local);
extern int drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local);
extern int drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_hold(local);
extern int drbd_bm_write_all(struct drbd_device *device) __must_hold(local);
@@ -1483,12 +1494,14 @@ enum determine_dev_size {
extern enum determine_dev_size
drbd_determine_dev_size(struct drbd_device *, enum dds_flags, struct resize_parms *) __must_hold(local);
extern void resync_after_online_grow(struct drbd_device *);
-extern void drbd_reconsider_max_bio_size(struct drbd_device *device, struct drbd_backing_dev *bdev);
+extern void drbd_reconsider_queue_parameters(struct drbd_device *device,
+ struct drbd_backing_dev *bdev, struct o_qlim *o);
extern enum drbd_state_rv drbd_set_role(struct drbd_device *device,
enum drbd_role new_role,
int force);
extern bool conn_try_outdate_peer(struct drbd_connection *connection);
extern void conn_try_outdate_peer_async(struct drbd_connection *connection);
+extern enum drbd_peer_state conn_khelper(struct drbd_connection *connection, char *cmd);
extern int drbd_khelper(struct drbd_device *device, char *cmd);
/* drbd_worker.c */
@@ -1507,7 +1520,7 @@ extern int drbd_resync_finished(struct drbd_device *device);
extern void *drbd_md_get_buffer(struct drbd_device *device, const char *intent);
extern void drbd_md_put_buffer(struct drbd_device *device);
extern int drbd_md_sync_page_io(struct drbd_device *device,
- struct drbd_backing_dev *bdev, sector_t sector, int rw);
+ struct drbd_backing_dev *bdev, sector_t sector, int op);
extern void drbd_ov_out_of_sync_found(struct drbd_device *, sector_t, int);
extern void wait_until_done_or_force_detached(struct drbd_device *device,
struct drbd_backing_dev *bdev, unsigned int *done);
@@ -1548,6 +1561,8 @@ extern void start_resync_timer_fn(unsigned long data);
extern void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req);
/* drbd_receiver.c */
+extern int drbd_issue_discard_or_zero_out(struct drbd_device *device,
+ sector_t start, unsigned int nr_sectors, bool discard);
extern int drbd_receiver(struct drbd_thread *thi);
extern int drbd_ack_receiver(struct drbd_thread *thi);
extern void drbd_send_ping_wf(struct work_struct *ws);
@@ -1557,11 +1572,11 @@ extern bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector
bool throttle_if_app_is_waiting);
extern int drbd_submit_peer_request(struct drbd_device *,
struct drbd_peer_request *, const unsigned,
- const int);
+ const unsigned, const int);
extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *);
extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *, u64,
sector_t, unsigned int,
- bool,
+ unsigned int,
gfp_t) __must_hold(local);
extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *,
int);
@@ -1635,8 +1650,6 @@ void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backin
/* drbd_proc.c */
extern struct proc_dir_entry *drbd_proc;
extern const struct file_operations drbd_proc_fops;
-extern const char *drbd_conn_str(enum drbd_conns s);
-extern const char *drbd_role_str(enum drbd_role s);
/* drbd_actlog.c */
extern bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i);
@@ -2095,13 +2108,22 @@ static inline void _sub_unacked(struct drbd_device *device, int n, const char *f
ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
}
+static inline bool is_sync_target_state(enum drbd_conns connection_state)
+{
+ return connection_state == C_SYNC_TARGET ||
+ connection_state == C_PAUSED_SYNC_T;
+}
+
+static inline bool is_sync_source_state(enum drbd_conns connection_state)
+{
+ return connection_state == C_SYNC_SOURCE ||
+ connection_state == C_PAUSED_SYNC_S;
+}
+
static inline bool is_sync_state(enum drbd_conns connection_state)
{
- return
- (connection_state == C_SYNC_SOURCE
- || connection_state == C_SYNC_TARGET
- || connection_state == C_PAUSED_SYNC_S
- || connection_state == C_PAUSED_SYNC_T);
+ return is_sync_source_state(connection_state) ||
+ is_sync_target_state(connection_state);
}
/**
diff --git a/drivers/block/drbd/drbd_interval.h b/drivers/block/drbd/drbd_interval.h
index f210543f0..23c5a9442 100644
--- a/drivers/block/drbd/drbd_interval.h
+++ b/drivers/block/drbd/drbd_interval.h
@@ -6,13 +6,13 @@
struct drbd_interval {
struct rb_node rb;
- sector_t sector; /* start sector of the interval */
- unsigned int size; /* size in bytes */
- sector_t end; /* highest interval end in subtree */
- int local:1 /* local or remote request? */;
- int waiting:1; /* someone is waiting for this to complete */
- int completed:1; /* this has been completed already;
- * ignore for conflict detection */
+ sector_t sector; /* start sector of the interval */
+ unsigned int size; /* size in bytes */
+ sector_t end; /* highest interval end in subtree */
+ unsigned int local:1 /* local or remote request? */;
+ unsigned int waiting:1; /* someone is waiting for completion */
+ unsigned int completed:1; /* this has been completed already;
+ * ignore for conflict detection */
};
static inline void drbd_clear_interval(struct drbd_interval *i)
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 2ba1494b2..100be556e 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -31,7 +31,7 @@
#include <linux/module.h>
#include <linux/jiffies.h>
#include <linux/drbd.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/types.h>
#include <net/sock.h>
#include <linux/ctype.h>
@@ -920,6 +920,31 @@ void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *peer_device)
}
}
+/* communicated if (agreed_features & DRBD_FF_WSAME) */
+void assign_p_sizes_qlim(struct drbd_device *device, struct p_sizes *p, struct request_queue *q)
+{
+ if (q) {
+ p->qlim->physical_block_size = cpu_to_be32(queue_physical_block_size(q));
+ p->qlim->logical_block_size = cpu_to_be32(queue_logical_block_size(q));
+ p->qlim->alignment_offset = cpu_to_be32(queue_alignment_offset(q));
+ p->qlim->io_min = cpu_to_be32(queue_io_min(q));
+ p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
+ p->qlim->discard_enabled = blk_queue_discard(q);
+ p->qlim->discard_zeroes_data = queue_discard_zeroes_data(q);
+ p->qlim->write_same_capable = !!q->limits.max_write_same_sectors;
+ } else {
+ q = device->rq_queue;
+ p->qlim->physical_block_size = cpu_to_be32(queue_physical_block_size(q));
+ p->qlim->logical_block_size = cpu_to_be32(queue_logical_block_size(q));
+ p->qlim->alignment_offset = 0;
+ p->qlim->io_min = cpu_to_be32(queue_io_min(q));
+ p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
+ p->qlim->discard_enabled = 0;
+ p->qlim->discard_zeroes_data = 0;
+ p->qlim->write_same_capable = 0;
+ }
+}
+
int drbd_send_sizes(struct drbd_peer_device *peer_device, int trigger_reply, enum dds_flags flags)
{
struct drbd_device *device = peer_device->device;
@@ -928,29 +953,37 @@ int drbd_send_sizes(struct drbd_peer_device *peer_device, int trigger_reply, enu
sector_t d_size, u_size;
int q_order_type;
unsigned int max_bio_size;
+ unsigned int packet_size;
+
+ sock = &peer_device->connection->data;
+ p = drbd_prepare_command(peer_device, sock);
+ if (!p)
+ return -EIO;
+ packet_size = sizeof(*p);
+ if (peer_device->connection->agreed_features & DRBD_FF_WSAME)
+ packet_size += sizeof(p->qlim[0]);
+
+ memset(p, 0, packet_size);
if (get_ldev_if_state(device, D_NEGOTIATING)) {
- D_ASSERT(device, device->ldev->backing_bdev);
+ struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev);
d_size = drbd_get_max_capacity(device->ldev);
rcu_read_lock();
u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
rcu_read_unlock();
q_order_type = drbd_queue_order_type(device);
- max_bio_size = queue_max_hw_sectors(device->ldev->backing_bdev->bd_disk->queue) << 9;
+ max_bio_size = queue_max_hw_sectors(q) << 9;
max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE);
+ assign_p_sizes_qlim(device, p, q);
put_ldev(device);
} else {
d_size = 0;
u_size = 0;
q_order_type = QUEUE_ORDERED_NONE;
max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
+ assign_p_sizes_qlim(device, p, NULL);
}
- sock = &peer_device->connection->data;
- p = drbd_prepare_command(peer_device, sock);
- if (!p)
- return -EIO;
-
if (peer_device->connection->agreed_pro_version <= 94)
max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
else if (peer_device->connection->agreed_pro_version < 100)
@@ -962,7 +995,8 @@ int drbd_send_sizes(struct drbd_peer_device *peer_device, int trigger_reply, enu
p->max_bio_size = cpu_to_be32(max_bio_size);
p->queue_order_type = cpu_to_be16(q_order_type);
p->dds_flags = cpu_to_be16(flags);
- return drbd_send_command(peer_device, sock, P_SIZES, sizeof(*p), NULL, 0);
+
+ return drbd_send_command(peer_device, sock, P_SIZES, packet_size, NULL, 0);
}
/**
@@ -1377,6 +1411,22 @@ int drbd_send_ack_ex(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
cpu_to_be64(block_id));
}
+int drbd_send_rs_deallocated(struct drbd_peer_device *peer_device,
+ struct drbd_peer_request *peer_req)
+{
+ struct drbd_socket *sock;
+ struct p_block_desc *p;
+
+ sock = &peer_device->connection->data;
+ p = drbd_prepare_command(peer_device, sock);
+ if (!p)
+ return -EIO;
+ p->sector = cpu_to_be64(peer_req->i.sector);
+ p->blksize = cpu_to_be32(peer_req->i.size);
+ p->pad = 0;
+ return drbd_send_command(peer_device, sock, P_RS_DEALLOCATED, sizeof(*p), NULL, 0);
+}
+
int drbd_send_drequest(struct drbd_peer_device *peer_device, int cmd,
sector_t sector, int size, u64 block_id)
{
@@ -1561,6 +1611,9 @@ static int _drbd_send_bio(struct drbd_peer_device *peer_device, struct bio *bio)
? 0 : MSG_MORE);
if (err)
return err;
+ /* REQ_OP_WRITE_SAME has only one segment */
+ if (bio_op(bio) == REQ_OP_WRITE_SAME)
+ break;
}
return 0;
}
@@ -1579,6 +1632,9 @@ static int _drbd_send_zc_bio(struct drbd_peer_device *peer_device, struct bio *b
bio_iter_last(bvec, iter) ? 0 : MSG_MORE);
if (err)
return err;
+ /* REQ_OP_WRITE_SAME has only one segment */
+ if (bio_op(bio) == REQ_OP_WRITE_SAME)
+ break;
}
return 0;
}
@@ -1603,15 +1659,17 @@ static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
return 0;
}
-static u32 bio_flags_to_wire(struct drbd_connection *connection, unsigned long bi_rw)
+static u32 bio_flags_to_wire(struct drbd_connection *connection,
+ struct bio *bio)
{
if (connection->agreed_pro_version >= 95)
- return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
- (bi_rw & REQ_FUA ? DP_FUA : 0) |
- (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
- (bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
+ return (bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0) |
+ (bio->bi_opf & REQ_FUA ? DP_FUA : 0) |
+ (bio->bi_opf & REQ_PREFLUSH ? DP_FLUSH : 0) |
+ (bio_op(bio) == REQ_OP_WRITE_SAME ? DP_WSAME : 0) |
+ (bio_op(bio) == REQ_OP_DISCARD ? DP_DISCARD : 0);
else
- return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
+ return bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0;
}
/* Used to send write or TRIM aka REQ_DISCARD requests
@@ -1622,6 +1680,8 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
struct drbd_device *device = peer_device->device;
struct drbd_socket *sock;
struct p_data *p;
+ struct p_wsame *wsame = NULL;
+ void *digest_out;
unsigned int dp_flags = 0;
int digest_size;
int err;
@@ -1636,7 +1696,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
p->sector = cpu_to_be64(req->i.sector);
p->block_id = (unsigned long)req;
p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
- dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio->bi_rw);
+ dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio);
if (device->state.conn >= C_SYNC_SOURCE &&
device->state.conn <= C_PAUSED_SYNC_T)
dp_flags |= DP_MAY_SET_IN_SYNC;
@@ -1657,12 +1717,29 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
err = __send_command(peer_device->connection, device->vnr, sock, P_TRIM, sizeof(*t), NULL, 0);
goto out;
}
+ if (dp_flags & DP_WSAME) {
+ /* this will only work if DRBD_FF_WSAME is set AND the
+ * handshake agreed that all nodes and backend devices are
+ * WRITE_SAME capable and agree on logical_block_size */
+ wsame = (struct p_wsame*)p;
+ digest_out = wsame + 1;
+ wsame->size = cpu_to_be32(req->i.size);
+ } else
+ digest_out = p + 1;
/* our digest is still only over the payload.
* TRIM does not carry any payload. */
if (digest_size)
- drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, p + 1);
- err = __send_command(peer_device->connection, device->vnr, sock, P_DATA, sizeof(*p) + digest_size, NULL, req->i.size);
+ drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, digest_out);
+ if (wsame) {
+ err =
+ __send_command(peer_device->connection, device->vnr, sock, P_WSAME,
+ sizeof(*wsame) + digest_size, NULL,
+ bio_iovec(req->master_bio).bv_len);
+ } else
+ err =
+ __send_command(peer_device->connection, device->vnr, sock, P_DATA,
+ sizeof(*p) + digest_size, NULL, req->i.size);
if (!err) {
/* For protocol A, we have to memcpy the payload into
* socket buffers, as we may complete right away
@@ -3061,7 +3138,7 @@ void drbd_md_write(struct drbd_device *device, void *b)
D_ASSERT(device, drbd_md_ss(device->ldev) == device->ldev->md.md_offset);
sector = device->ldev->md.md_offset;
- if (drbd_md_sync_page_io(device, device->ldev, sector, WRITE)) {
+ if (drbd_md_sync_page_io(device, device->ldev, sector, REQ_OP_WRITE)) {
/* this was a try anyways ... */
drbd_err(device, "meta data update failed!\n");
drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
@@ -3263,7 +3340,8 @@ int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev)
* Affects the paranoia out-of-range access check in drbd_md_sync_page_io(). */
bdev->md.md_size_sect = 8;
- if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset, READ)) {
+ if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset,
+ REQ_OP_READ)) {
/* NOTE: can't do normal error processing here as this is
called BEFORE disk is attached */
drbd_err(device, "Error while reading metadata.\n");
@@ -3505,7 +3583,12 @@ static int w_bitmap_io(struct drbd_work *w, int unused)
struct bm_io_work *work = &device->bm_io_work;
int rv = -EIO;
- D_ASSERT(device, atomic_read(&device->ap_bio_cnt) == 0);
+ if (work->flags != BM_LOCKED_CHANGE_ALLOWED) {
+ int cnt = atomic_read(&device->ap_bio_cnt);
+ if (cnt)
+ drbd_err(device, "FIXME: ap_bio_cnt %d, expected 0; queued for '%s'\n",
+ cnt, work->why);
+ }
if (get_ldev(device)) {
drbd_bm_lock(device, work->why, work->flags);
@@ -3585,18 +3668,20 @@ void drbd_queue_bitmap_io(struct drbd_device *device,
int drbd_bitmap_io(struct drbd_device *device, int (*io_fn)(struct drbd_device *),
char *why, enum bm_flag flags)
{
+ /* Only suspend io, if some operation is supposed to be locked out */
+ const bool do_suspend_io = flags & (BM_DONT_CLEAR|BM_DONT_SET|BM_DONT_TEST);
int rv;
D_ASSERT(device, current != first_peer_device(device)->connection->worker.task);
- if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
+ if (do_suspend_io)
drbd_suspend_io(device);
drbd_bm_lock(device, why, flags);
rv = io_fn(device);
drbd_bm_unlock(device);
- if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
+ if (do_suspend_io)
drbd_resume_io(device);
return rv;
@@ -3635,6 +3720,8 @@ const char *cmdname(enum drbd_packet cmd)
* one PRO_VERSION */
static const char *cmdnames[] = {
[P_DATA] = "Data",
+ [P_WSAME] = "WriteSame",
+ [P_TRIM] = "Trim",
[P_DATA_REPLY] = "DataReply",
[P_RS_DATA_REPLY] = "RSDataReply",
[P_BARRIER] = "Barrier",
@@ -3679,6 +3766,8 @@ const char *cmdname(enum drbd_packet cmd)
[P_CONN_ST_CHG_REPLY] = "conn_st_chg_reply",
[P_RETRY_WRITE] = "retry_write",
[P_PROTOCOL_UPDATE] = "protocol_update",
+ [P_RS_THIN_REQ] = "rs_thin_req",
+ [P_RS_DEALLOCATED] = "rs_deallocated",
/* enum drbd_packet, but not commands - obsoleted flags:
* P_MAY_IGNORE
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 0bac9c824..f35db29ca 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -343,7 +343,7 @@ int drbd_khelper(struct drbd_device *device, char *cmd)
(char[20]) { }, /* address family */
(char[60]) { }, /* address */
NULL };
- char mb[12];
+ char mb[14];
char *argv[] = {usermode_helper, cmd, mb, NULL };
struct drbd_connection *connection = first_peer_device(device)->connection;
struct sib_info sib;
@@ -352,7 +352,7 @@ int drbd_khelper(struct drbd_device *device, char *cmd)
if (current == connection->worker.task)
set_bit(CALLBACK_PENDING, &connection->flags);
- snprintf(mb, 12, "minor-%d", device_to_minor(device));
+ snprintf(mb, 14, "minor-%d", device_to_minor(device));
setup_khelper_env(connection, envp);
/* The helper may take some time.
@@ -387,7 +387,7 @@ int drbd_khelper(struct drbd_device *device, char *cmd)
return ret;
}
-static int conn_khelper(struct drbd_connection *connection, char *cmd)
+enum drbd_peer_state conn_khelper(struct drbd_connection *connection, char *cmd)
{
char *envp[] = { "HOME=/",
"TERM=linux",
@@ -442,19 +442,17 @@ static enum drbd_fencing_p highest_fencing_policy(struct drbd_connection *connec
}
rcu_read_unlock();
- if (fp == FP_NOT_AVAIL) {
- /* IO Suspending works on the whole resource.
- Do it only for one device. */
- vnr = 0;
- peer_device = idr_get_next(&connection->peer_devices, &vnr);
- drbd_change_state(peer_device->device, CS_VERBOSE | CS_HARD, NS(susp_fen, 0));
- }
-
return fp;
}
+static bool resource_is_supended(struct drbd_resource *resource)
+{
+ return resource->susp || resource->susp_fen || resource->susp_nod;
+}
+
bool conn_try_outdate_peer(struct drbd_connection *connection)
{
+ struct drbd_resource * const resource = connection->resource;
unsigned int connect_cnt;
union drbd_state mask = { };
union drbd_state val = { };
@@ -462,21 +460,41 @@ bool conn_try_outdate_peer(struct drbd_connection *connection)
char *ex_to_string;
int r;
- spin_lock_irq(&connection->resource->req_lock);
+ spin_lock_irq(&resource->req_lock);
if (connection->cstate >= C_WF_REPORT_PARAMS) {
drbd_err(connection, "Expected cstate < C_WF_REPORT_PARAMS\n");
- spin_unlock_irq(&connection->resource->req_lock);
+ spin_unlock_irq(&resource->req_lock);
return false;
}
connect_cnt = connection->connect_cnt;
- spin_unlock_irq(&connection->resource->req_lock);
+ spin_unlock_irq(&resource->req_lock);
fp = highest_fencing_policy(connection);
switch (fp) {
case FP_NOT_AVAIL:
drbd_warn(connection, "Not fencing peer, I'm not even Consistent myself.\n");
- goto out;
+ spin_lock_irq(&resource->req_lock);
+ if (connection->cstate < C_WF_REPORT_PARAMS) {
+ _conn_request_state(connection,
+ (union drbd_state) { { .susp_fen = 1 } },
+ (union drbd_state) { { .susp_fen = 0 } },
+ CS_VERBOSE | CS_HARD | CS_DC_SUSP);
+ /* We are no longer suspended due to the fencing policy.
+ * We may still be suspended due to the on-no-data-accessible policy.
+ * If that was OND_IO_ERROR, fail pending requests. */
+ if (!resource_is_supended(resource))
+ _tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
+ }
+ /* Else: in case we raced with a connection handshake,
+ * let the handshake figure out if we maybe can RESEND,
+ * and do not resume/fail pending requests here.
+ * Worst case is we stay suspended for now, which may be
+ * resolved by either re-establishing the replication link, or
+ * the next link failure, or eventually the administrator. */
+ spin_unlock_irq(&resource->req_lock);
+ return false;
+
case FP_DONT_CARE:
return true;
default: ;
@@ -485,17 +503,17 @@ bool conn_try_outdate_peer(struct drbd_connection *connection)
r = conn_khelper(connection, "fence-peer");
switch ((r>>8) & 0xff) {
- case 3: /* peer is inconsistent */
+ case P_INCONSISTENT: /* peer is inconsistent */
ex_to_string = "peer is inconsistent or worse";
mask.pdsk = D_MASK;
val.pdsk = D_INCONSISTENT;
break;
- case 4: /* peer got outdated, or was already outdated */
+ case P_OUTDATED: /* peer got outdated, or was already outdated */
ex_to_string = "peer was fenced";
mask.pdsk = D_MASK;
val.pdsk = D_OUTDATED;
break;
- case 5: /* peer was down */
+ case P_DOWN: /* peer was down */
if (conn_highest_disk(connection) == D_UP_TO_DATE) {
/* we will(have) create(d) a new UUID anyways... */
ex_to_string = "peer is unreachable, assumed to be dead";
@@ -505,7 +523,7 @@ bool conn_try_outdate_peer(struct drbd_connection *connection)
ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
}
break;
- case 6: /* Peer is primary, voluntarily outdate myself.
+ case P_PRIMARY: /* Peer is primary, voluntarily outdate myself.
* This is useful when an unconnected R_SECONDARY is asked to
* become R_PRIMARY, but finds the other peer being active. */
ex_to_string = "peer is active";
@@ -513,7 +531,9 @@ bool conn_try_outdate_peer(struct drbd_connection *connection)
mask.disk = D_MASK;
val.disk = D_OUTDATED;
break;
- case 7:
+ case P_FENCING:
+ /* THINK: do we need to handle this
+ * like case 4, or more like case 5? */
if (fp != FP_STONITH)
drbd_err(connection, "fence-peer() = 7 && fencing != Stonith !!!\n");
ex_to_string = "peer was stonithed";
@@ -529,13 +549,11 @@ bool conn_try_outdate_peer(struct drbd_connection *connection)
drbd_info(connection, "fence-peer helper returned %d (%s)\n",
(r>>8) & 0xff, ex_to_string);
- out:
-
/* Not using
conn_request_state(connection, mask, val, CS_VERBOSE);
here, because we might were able to re-establish the connection in the
meantime. */
- spin_lock_irq(&connection->resource->req_lock);
+ spin_lock_irq(&resource->req_lock);
if (connection->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &connection->flags)) {
if (connection->connect_cnt != connect_cnt)
/* In case the connection was established and droped
@@ -544,7 +562,7 @@ bool conn_try_outdate_peer(struct drbd_connection *connection)
else
_conn_request_state(connection, mask, val, CS_VERBOSE);
}
- spin_unlock_irq(&connection->resource->req_lock);
+ spin_unlock_irq(&resource->req_lock);
return conn_highest_pdsk(connection) <= D_OUTDATED;
}
@@ -1154,51 +1172,160 @@ static int drbd_check_al_size(struct drbd_device *device, struct disk_conf *dc)
return 0;
}
+static void blk_queue_discard_granularity(struct request_queue *q, unsigned int granularity)
+{
+ q->limits.discard_granularity = granularity;
+}
+
+static unsigned int drbd_max_discard_sectors(struct drbd_connection *connection)
+{
+ /* when we introduced REQ_WRITE_SAME support, we also bumped
+ * our maximum supported batch bio size used for discards. */
+ if (connection->agreed_features & DRBD_FF_WSAME)
+ return DRBD_MAX_BBIO_SECTORS;
+ /* before, with DRBD <= 8.4.6, we only allowed up to one AL_EXTENT_SIZE. */
+ return AL_EXTENT_SIZE >> 9;
+}
+
+static void decide_on_discard_support(struct drbd_device *device,
+ struct request_queue *q,
+ struct request_queue *b,
+ bool discard_zeroes_if_aligned)
+{
+ /* q = drbd device queue (device->rq_queue)
+ * b = backing device queue (device->ldev->backing_bdev->bd_disk->queue),
+ * or NULL if diskless
+ */
+ struct drbd_connection *connection = first_peer_device(device)->connection;
+ bool can_do = b ? blk_queue_discard(b) : true;
+
+ if (can_do && b && !b->limits.discard_zeroes_data && !discard_zeroes_if_aligned) {
+ can_do = false;
+ drbd_info(device, "discard_zeroes_data=0 and discard_zeroes_if_aligned=no: disabling discards\n");
+ }
+ if (can_do && connection->cstate >= C_CONNECTED && !(connection->agreed_features & DRBD_FF_TRIM)) {
+ can_do = false;
+ drbd_info(connection, "peer DRBD too old, does not support TRIM: disabling discards\n");
+ }
+ if (can_do) {
+ /* We don't care for the granularity, really.
+ * Stacking limits below should fix it for the local
+ * device. Whether or not it is a suitable granularity
+ * on the remote device is not our problem, really. If
+ * you care, you need to use devices with similar
+ * topology on all peers. */
+ blk_queue_discard_granularity(q, 512);
+ q->limits.max_discard_sectors = drbd_max_discard_sectors(connection);
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+ } else {
+ queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
+ blk_queue_discard_granularity(q, 0);
+ q->limits.max_discard_sectors = 0;
+ }
+}
+
+static void fixup_discard_if_not_supported(struct request_queue *q)
+{
+ /* To avoid confusion, if this queue does not support discard, clear
+ * max_discard_sectors, which is what lsblk -D reports to the user.
+ * Older kernels got this wrong in "stack limits".
+ * */
+ if (!blk_queue_discard(q)) {
+ blk_queue_max_discard_sectors(q, 0);
+ blk_queue_discard_granularity(q, 0);
+ }
+}
+
+static void decide_on_write_same_support(struct drbd_device *device,
+ struct request_queue *q,
+ struct request_queue *b, struct o_qlim *o)
+{
+ struct drbd_peer_device *peer_device = first_peer_device(device);
+ struct drbd_connection *connection = peer_device->connection;
+ bool can_do = b ? b->limits.max_write_same_sectors : true;
+
+ if (can_do && connection->cstate >= C_CONNECTED && !(connection->agreed_features & DRBD_FF_WSAME)) {
+ can_do = false;
+ drbd_info(peer_device, "peer does not support WRITE_SAME\n");
+ }
+
+ if (o) {
+ /* logical block size; queue_logical_block_size(NULL) is 512 */
+ unsigned int peer_lbs = be32_to_cpu(o->logical_block_size);
+ unsigned int me_lbs_b = queue_logical_block_size(b);
+ unsigned int me_lbs = queue_logical_block_size(q);
+
+ if (me_lbs_b != me_lbs) {
+ drbd_warn(device,
+ "logical block size of local backend does not match (drbd:%u, backend:%u); was this a late attach?\n",
+ me_lbs, me_lbs_b);
+ /* rather disable write same than trigger some BUG_ON later in the scsi layer. */
+ can_do = false;
+ }
+ if (me_lbs_b != peer_lbs) {
+ drbd_warn(peer_device, "logical block sizes do not match (me:%u, peer:%u); this may cause problems.\n",
+ me_lbs, peer_lbs);
+ if (can_do) {
+ drbd_dbg(peer_device, "logical block size mismatch: WRITE_SAME disabled.\n");
+ can_do = false;
+ }
+ me_lbs = max(me_lbs, me_lbs_b);
+ /* We cannot change the logical block size of an in-use queue.
+ * We can only hope that access happens to be properly aligned.
+ * If not, the peer will likely produce an IO error, and detach. */
+ if (peer_lbs > me_lbs) {
+ if (device->state.role != R_PRIMARY) {
+ blk_queue_logical_block_size(q, peer_lbs);
+ drbd_warn(peer_device, "logical block size set to %u\n", peer_lbs);
+ } else {
+ drbd_warn(peer_device,
+ "current Primary must NOT adjust logical block size (%u -> %u); hope for the best.\n",
+ me_lbs, peer_lbs);
+ }
+ }
+ }
+ if (can_do && !o->write_same_capable) {
+ /* If we introduce an open-coded write-same loop on the receiving side,
+ * the peer would present itself as "capable". */
+ drbd_dbg(peer_device, "WRITE_SAME disabled (peer device not capable)\n");
+ can_do = false;
+ }
+ }
+
+ blk_queue_max_write_same_sectors(q, can_do ? DRBD_MAX_BBIO_SECTORS : 0);
+}
+
static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backing_dev *bdev,
- unsigned int max_bio_size)
+ unsigned int max_bio_size, struct o_qlim *o)
{
struct request_queue * const q = device->rq_queue;
unsigned int max_hw_sectors = max_bio_size >> 9;
unsigned int max_segments = 0;
struct request_queue *b = NULL;
+ struct disk_conf *dc;
+ bool discard_zeroes_if_aligned = true;
if (bdev) {
b = bdev->backing_bdev->bd_disk->queue;
max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
rcu_read_lock();
- max_segments = rcu_dereference(device->ldev->disk_conf)->max_bio_bvecs;
+ dc = rcu_dereference(device->ldev->disk_conf);
+ max_segments = dc->max_bio_bvecs;
+ discard_zeroes_if_aligned = dc->discard_zeroes_if_aligned;
rcu_read_unlock();
blk_set_stacking_limits(&q->limits);
- blk_queue_max_write_same_sectors(q, 0);
}
- blk_queue_logical_block_size(q, 512);
blk_queue_max_hw_sectors(q, max_hw_sectors);
/* This is the workaround for "bio would need to, but cannot, be split" */
blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
blk_queue_segment_boundary(q, PAGE_SIZE-1);
+ decide_on_discard_support(device, q, b, discard_zeroes_if_aligned);
+ decide_on_write_same_support(device, q, b, o);
if (b) {
- struct drbd_connection *connection = first_peer_device(device)->connection;
-
- blk_queue_max_discard_sectors(q, DRBD_MAX_DISCARD_SECTORS);
-
- if (blk_queue_discard(b) &&
- (connection->cstate < C_CONNECTED || connection->agreed_features & FF_TRIM)) {
- /* We don't care, stacking below should fix it for the local device.
- * Whether or not it is a suitable granularity on the remote device
- * is not our problem, really. If you care, you need to
- * use devices with similar topology on all peers. */
- q->limits.discard_granularity = 512;
- queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
- } else {
- blk_queue_max_discard_sectors(q, 0);
- queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
- q->limits.discard_granularity = 0;
- }
-
blk_queue_stack_limits(q, b);
if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
@@ -1208,15 +1335,10 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
}
}
- /* To avoid confusion, if this queue does not support discard, clear
- * max_discard_sectors, which is what lsblk -D reports to the user. */
- if (!blk_queue_discard(q)) {
- blk_queue_max_discard_sectors(q, 0);
- q->limits.discard_granularity = 0;
- }
+ fixup_discard_if_not_supported(q);
}
-void drbd_reconsider_max_bio_size(struct drbd_device *device, struct drbd_backing_dev *bdev)
+void drbd_reconsider_queue_parameters(struct drbd_device *device, struct drbd_backing_dev *bdev, struct o_qlim *o)
{
unsigned int now, new, local, peer;
@@ -1259,7 +1381,7 @@ void drbd_reconsider_max_bio_size(struct drbd_device *device, struct drbd_backin
if (new != now)
drbd_info(device, "max BIO size = %u\n", new);
- drbd_setup_queue_param(device, bdev, new);
+ drbd_setup_queue_param(device, bdev, new, o);
}
/* Starts the worker thread */
@@ -1348,6 +1470,43 @@ static bool write_ordering_changed(struct disk_conf *a, struct disk_conf *b)
a->disk_drain != b->disk_drain;
}
+static void sanitize_disk_conf(struct drbd_device *device, struct disk_conf *disk_conf,
+ struct drbd_backing_dev *nbc)
+{
+ struct request_queue * const q = nbc->backing_bdev->bd_disk->queue;
+
+ if (disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
+ disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
+ if (disk_conf->al_extents > drbd_al_extents_max(nbc))
+ disk_conf->al_extents = drbd_al_extents_max(nbc);
+
+ if (!blk_queue_discard(q)
+ || (!q->limits.discard_zeroes_data && !disk_conf->discard_zeroes_if_aligned)) {
+ if (disk_conf->rs_discard_granularity) {
+ disk_conf->rs_discard_granularity = 0; /* disable feature */
+ drbd_info(device, "rs_discard_granularity feature disabled\n");
+ }
+ }
+
+ if (disk_conf->rs_discard_granularity) {
+ int orig_value = disk_conf->rs_discard_granularity;
+ int remainder;
+
+ if (q->limits.discard_granularity > disk_conf->rs_discard_granularity)
+ disk_conf->rs_discard_granularity = q->limits.discard_granularity;
+
+ remainder = disk_conf->rs_discard_granularity % q->limits.discard_granularity;
+ disk_conf->rs_discard_granularity += remainder;
+
+ if (disk_conf->rs_discard_granularity > q->limits.max_discard_sectors << 9)
+ disk_conf->rs_discard_granularity = q->limits.max_discard_sectors << 9;
+
+ if (disk_conf->rs_discard_granularity != orig_value)
+ drbd_info(device, "rs_discard_granularity changed to %d\n",
+ disk_conf->rs_discard_granularity);
+ }
+}
+
int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
{
struct drbd_config_context adm_ctx;
@@ -1395,10 +1554,7 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
if (!expect(new_disk_conf->resync_rate >= 1))
new_disk_conf->resync_rate = 1;
- if (new_disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
- new_disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
- if (new_disk_conf->al_extents > drbd_al_extents_max(device->ldev))
- new_disk_conf->al_extents = drbd_al_extents_max(device->ldev);
+ sanitize_disk_conf(device, new_disk_conf, device->ldev);
if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
@@ -1457,6 +1613,9 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
if (write_ordering_changed(old_disk_conf, new_disk_conf))
drbd_bump_write_ordering(device->resource, NULL, WO_BDEV_FLUSH);
+ if (old_disk_conf->discard_zeroes_if_aligned != new_disk_conf->discard_zeroes_if_aligned)
+ drbd_reconsider_queue_parameters(device, device->ldev, NULL);
+
drbd_md_sync(device);
if (device->state.conn >= C_CONNECTED) {
@@ -1693,10 +1852,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
if (retcode != NO_ERROR)
goto fail;
- if (new_disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
- new_disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
- if (new_disk_conf->al_extents > drbd_al_extents_max(nbc))
- new_disk_conf->al_extents = drbd_al_extents_max(nbc);
+ sanitize_disk_conf(device, new_disk_conf, nbc);
if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
drbd_err(device, "max capacity %llu smaller than disk size %llu\n",
@@ -1838,7 +1994,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
device->read_cnt = 0;
device->writ_cnt = 0;
- drbd_reconsider_max_bio_size(device, device->ldev);
+ drbd_reconsider_queue_parameters(device, device->ldev, NULL);
/* If I am currently not R_PRIMARY,
* but meta data primary indicator is set,
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index 6537b25db..be2b93fd2 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -25,7 +25,7 @@
#include <linux/module.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/proc_fs.h>
@@ -122,18 +122,18 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se
x = res/50;
y = 20-x;
- seq_printf(seq, "\t[");
+ seq_puts(seq, "\t[");
for (i = 1; i < x; i++)
- seq_printf(seq, "=");
- seq_printf(seq, ">");
+ seq_putc(seq, '=');
+ seq_putc(seq, '>');
for (i = 0; i < y; i++)
seq_printf(seq, ".");
- seq_printf(seq, "] ");
+ seq_puts(seq, "] ");
if (state.conn == C_VERIFY_S || state.conn == C_VERIFY_T)
- seq_printf(seq, "verified:");
+ seq_puts(seq, "verified:");
else
- seq_printf(seq, "sync'ed:");
+ seq_puts(seq, "sync'ed:");
seq_printf(seq, "%3u.%u%% ", res / 10, res % 10);
/* if more than a few GB, display in MB */
@@ -146,7 +146,7 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se
(unsigned long) Bit2KB(rs_left),
(unsigned long) Bit2KB(rs_total));
- seq_printf(seq, "\n\t");
+ seq_puts(seq, "\n\t");
/* see drivers/md/md.c
* We do not want to overflow, so the order of operands and
@@ -175,9 +175,9 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se
rt / 3600, (rt % 3600) / 60, rt % 60);
dbdt = Bit2KB(db/dt);
- seq_printf(seq, " speed: ");
+ seq_puts(seq, " speed: ");
seq_printf_with_thousands_grouping(seq, dbdt);
- seq_printf(seq, " (");
+ seq_puts(seq, " (");
/* ------------------------- ~3s average ------------------------ */
if (proc_details >= 1) {
/* this is what drbd_rs_should_slow_down() uses */
@@ -188,7 +188,7 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se
db = device->rs_mark_left[i] - rs_left;
dbdt = Bit2KB(db/dt);
seq_printf_with_thousands_grouping(seq, dbdt);
- seq_printf(seq, " -- ");
+ seq_puts(seq, " -- ");
}
/* --------------------- long term average ---------------------- */
@@ -200,11 +200,11 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se
db = rs_total - rs_left;
dbdt = Bit2KB(db/dt);
seq_printf_with_thousands_grouping(seq, dbdt);
- seq_printf(seq, ")");
+ seq_putc(seq, ')');
if (state.conn == C_SYNC_TARGET ||
state.conn == C_VERIFY_S) {
- seq_printf(seq, " want: ");
+ seq_puts(seq, " want: ");
seq_printf_with_thousands_grouping(seq, device->c_sync_rate);
}
seq_printf(seq, " K/sec%s\n", stalled ? " (stalled)" : "");
@@ -231,7 +231,7 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se
(unsigned long long)bm_bits * BM_SECT_PER_BIT);
if (stop_sector != 0 && stop_sector != ULLONG_MAX)
seq_printf(seq, " stop sector: %llu", stop_sector);
- seq_printf(seq, "\n");
+ seq_putc(seq, '\n');
}
}
@@ -276,7 +276,7 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
rcu_read_lock();
idr_for_each_entry(&drbd_devices, device, i) {
if (prev_i != i - 1)
- seq_printf(seq, "\n");
+ seq_putc(seq, '\n');
prev_i = i;
state = device->state;
diff --git a/drivers/block/drbd/drbd_protocol.h b/drivers/block/drbd/drbd_protocol.h
index ef9245363..4d296800f 100644
--- a/drivers/block/drbd/drbd_protocol.h
+++ b/drivers/block/drbd/drbd_protocol.h
@@ -60,6 +60,15 @@ enum drbd_packet {
* which is why I chose TRIM here, to disambiguate. */
P_TRIM = 0x31,
+ /* Only use these two if both support FF_THIN_RESYNC */
+ P_RS_THIN_REQ = 0x32, /* Request a block for resync or reply P_RS_DEALLOCATED */
+ P_RS_DEALLOCATED = 0x33, /* Contains only zeros on sync source node */
+
+ /* REQ_WRITE_SAME.
+ * On a receiving side without REQ_WRITE_SAME,
+ * we may fall back to an opencoded loop instead. */
+ P_WSAME = 0x34,
+
P_MAY_IGNORE = 0x100, /* Flag to test if (cmd > P_MAY_IGNORE) ... */
P_MAX_OPT_CMD = 0x101,
@@ -106,16 +115,20 @@ struct p_header100 {
u32 pad;
} __packed;
-/* these defines must not be changed without changing the protocol version */
-#define DP_HARDBARRIER 1 /* depricated */
+/* These defines must not be changed without changing the protocol version.
+ * New defines may only be introduced together with protocol version bump or
+ * new protocol feature flags.
+ */
+#define DP_HARDBARRIER 1 /* no longer used */
#define DP_RW_SYNC 2 /* equals REQ_SYNC */
#define DP_MAY_SET_IN_SYNC 4
#define DP_UNPLUG 8 /* not used anymore */
#define DP_FUA 16 /* equals REQ_FUA */
-#define DP_FLUSH 32 /* equals REQ_FLUSH */
+#define DP_FLUSH 32 /* equals REQ_PREFLUSH */
#define DP_DISCARD 64 /* equals REQ_DISCARD */
#define DP_SEND_RECEIVE_ACK 128 /* This is a proto B write request */
#define DP_SEND_WRITE_ACK 256 /* This is a proto C write request */
+#define DP_WSAME 512 /* equiv. REQ_WRITE_SAME */
struct p_data {
u64 sector; /* 64 bits sector number */
@@ -129,6 +142,11 @@ struct p_trim {
u32 size; /* == bio->bi_size */
} __packed;
+struct p_wsame {
+ struct p_data p_data;
+ u32 size; /* == bio->bi_size */
+} __packed;
+
/*
* commands which share a struct:
* p_block_ack:
@@ -160,7 +178,23 @@ struct p_block_req {
* ReportParams
*/
-#define FF_TRIM 1
+/* supports TRIM/DISCARD on the "wire" protocol */
+#define DRBD_FF_TRIM 1
+
+/* Detect all-zeros during resync, and rather TRIM/UNMAP/DISCARD those blocks
+ * instead of fully allocate a supposedly thin volume on initial resync */
+#define DRBD_FF_THIN_RESYNC 2
+
+/* supports REQ_WRITE_SAME on the "wire" protocol.
+ * Note: this flag is overloaded,
+ * its presence also
+ * - indicates support for 128 MiB "batch bios",
+ * max discard size of 128 MiB
+ * instead of 4M before that.
+ * - indicates that we exchange additional settings in p_sizes
+ * drbd_send_sizes()/receive_sizes()
+ */
+#define DRBD_FF_WSAME 4
struct p_connection_features {
u32 protocol_min;
@@ -235,6 +269,40 @@ struct p_rs_uuid {
u64 uuid;
} __packed;
+/* optional queue_limits if (agreed_features & DRBD_FF_WSAME)
+ * see also struct queue_limits, as of late 2015 */
+struct o_qlim {
+ /* we don't need it yet, but we may as well communicate it now */
+ u32 physical_block_size;
+
+ /* so the original in struct queue_limits is unsigned short,
+ * but I'd have to put in padding anyways. */
+ u32 logical_block_size;
+
+ /* One incoming bio becomes one DRBD request,
+ * which may be translated to several bio on the receiving side.
+ * We don't need to communicate chunk/boundary/segment ... limits.
+ */
+
+ /* various IO hints may be useful with "diskless client" setups */
+ u32 alignment_offset;
+ u32 io_min;
+ u32 io_opt;
+
+ /* We may need to communicate integrity stuff at some point,
+ * but let's not get ahead of ourselves. */
+
+ /* Backend discard capabilities.
+ * Receiving side uses "blkdev_issue_discard()", no need to communicate
+ * more specifics. If the backend cannot do discards, the DRBD peer
+ * may fall back to blkdev_issue_zeroout().
+ */
+ u8 discard_enabled;
+ u8 discard_zeroes_data;
+ u8 write_same_capable;
+ u8 _pad;
+} __packed;
+
struct p_sizes {
u64 d_size; /* size of disk */
u64 u_size; /* user requested size */
@@ -242,6 +310,9 @@ struct p_sizes {
u32 max_bio_size; /* Maximal size of a BIO */
u16 queue_order_type; /* not yet implemented in DRBD*/
u16 dds_flags; /* use enum dds_flags here. */
+
+ /* optional queue_limits if (agreed_features & DRBD_FF_WSAME) */
+ struct o_qlim qlim[0];
} __packed;
struct p_state {
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 050aaa1c0..942384f34 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -25,7 +25,7 @@
#include <linux/module.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <net/sock.h>
#include <linux/drbd.h>
@@ -48,7 +48,7 @@
#include "drbd_req.h"
#include "drbd_vli.h"
-#define PRO_FEATURES (FF_TRIM)
+#define PRO_FEATURES (DRBD_FF_TRIM|DRBD_FF_THIN_RESYNC|DRBD_FF_WSAME)
struct packet_info {
enum drbd_packet cmd;
@@ -361,14 +361,17 @@ You must not have the req_lock:
drbd_wait_ee_list_empty()
*/
+/* normal: payload_size == request size (bi_size)
+ * w_same: payload_size == logical_block_size
+ * trim: payload_size == 0 */
struct drbd_peer_request *
drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
- unsigned int data_size, bool has_payload, gfp_t gfp_mask) __must_hold(local)
+ unsigned int request_size, unsigned int payload_size, gfp_t gfp_mask) __must_hold(local)
{
struct drbd_device *device = peer_device->device;
struct drbd_peer_request *peer_req;
struct page *page = NULL;
- unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
+ unsigned nr_pages = (payload_size + PAGE_SIZE -1) >> PAGE_SHIFT;
if (drbd_insert_fault(device, DRBD_FAULT_AL_EE))
return NULL;
@@ -380,7 +383,7 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto
return NULL;
}
- if (has_payload && data_size) {
+ if (nr_pages) {
page = drbd_alloc_pages(peer_device, nr_pages,
gfpflags_allow_blocking(gfp_mask));
if (!page)
@@ -390,7 +393,7 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto
memset(peer_req, 0, sizeof(*peer_req));
INIT_LIST_HEAD(&peer_req->w.list);
drbd_clear_interval(&peer_req->i);
- peer_req->i.size = data_size;
+ peer_req->i.size = request_size;
peer_req->i.sector = sector;
peer_req->submit_jif = jiffies;
peer_req->peer_device = peer_device;
@@ -1204,13 +1207,84 @@ static int drbd_recv_header(struct drbd_connection *connection, struct packet_in
return err;
}
-static void drbd_flush(struct drbd_connection *connection)
+/* This is blkdev_issue_flush, but asynchronous.
+ * We want to submit to all component volumes in parallel,
+ * then wait for all completions.
+ */
+struct issue_flush_context {
+ atomic_t pending;
+ int error;
+ struct completion done;
+};
+struct one_flush_context {
+ struct drbd_device *device;
+ struct issue_flush_context *ctx;
+};
+
+void one_flush_endio(struct bio *bio)
{
- int rv;
- struct drbd_peer_device *peer_device;
- int vnr;
+ struct one_flush_context *octx = bio->bi_private;
+ struct drbd_device *device = octx->device;
+ struct issue_flush_context *ctx = octx->ctx;
+
+ if (bio->bi_error) {
+ ctx->error = bio->bi_error;
+ drbd_info(device, "local disk FLUSH FAILED with status %d\n", bio->bi_error);
+ }
+ kfree(octx);
+ bio_put(bio);
+
+ clear_bit(FLUSH_PENDING, &device->flags);
+ put_ldev(device);
+ kref_put(&device->kref, drbd_destroy_device);
+
+ if (atomic_dec_and_test(&ctx->pending))
+ complete(&ctx->done);
+}
+
+static void submit_one_flush(struct drbd_device *device, struct issue_flush_context *ctx)
+{
+ struct bio *bio = bio_alloc(GFP_NOIO, 0);
+ struct one_flush_context *octx = kmalloc(sizeof(*octx), GFP_NOIO);
+ if (!bio || !octx) {
+ drbd_warn(device, "Could not allocate a bio, CANNOT ISSUE FLUSH\n");
+ /* FIXME: what else can I do now? disconnecting or detaching
+ * really does not help to improve the state of the world, either.
+ */
+ kfree(octx);
+ if (bio)
+ bio_put(bio);
+
+ ctx->error = -ENOMEM;
+ put_ldev(device);
+ kref_put(&device->kref, drbd_destroy_device);
+ return;
+ }
+ octx->device = device;
+ octx->ctx = ctx;
+ bio->bi_bdev = device->ldev->backing_bdev;
+ bio->bi_private = octx;
+ bio->bi_end_io = one_flush_endio;
+ bio_set_op_attrs(bio, REQ_OP_FLUSH, WRITE_FLUSH);
+
+ device->flush_jif = jiffies;
+ set_bit(FLUSH_PENDING, &device->flags);
+ atomic_inc(&ctx->pending);
+ submit_bio(bio);
+}
+
+static void drbd_flush(struct drbd_connection *connection)
+{
if (connection->resource->write_ordering >= WO_BDEV_FLUSH) {
+ struct drbd_peer_device *peer_device;
+ struct issue_flush_context ctx;
+ int vnr;
+
+ atomic_set(&ctx.pending, 1);
+ ctx.error = 0;
+ init_completion(&ctx.done);
+
rcu_read_lock();
idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
struct drbd_device *device = peer_device->device;
@@ -1220,31 +1294,24 @@ static void drbd_flush(struct drbd_connection *connection)
kref_get(&device->kref);
rcu_read_unlock();
- /* Right now, we have only this one synchronous code path
- * for flushes between request epochs.
- * We may want to make those asynchronous,
- * or at least parallelize the flushes to the volume devices.
- */
- device->flush_jif = jiffies;
- set_bit(FLUSH_PENDING, &device->flags);
- rv = blkdev_issue_flush(device->ldev->backing_bdev,
- GFP_NOIO, NULL);
- clear_bit(FLUSH_PENDING, &device->flags);
- if (rv) {
- drbd_info(device, "local disk flush failed with status %d\n", rv);
- /* would rather check on EOPNOTSUPP, but that is not reliable.
- * don't try again for ANY return value != 0
- * if (rv == -EOPNOTSUPP) */
- drbd_bump_write_ordering(connection->resource, NULL, WO_DRAIN_IO);
- }
- put_ldev(device);
- kref_put(&device->kref, drbd_destroy_device);
+ submit_one_flush(device, &ctx);
rcu_read_lock();
- if (rv)
- break;
}
rcu_read_unlock();
+
+ /* Do we want to add a timeout,
+ * if disk-timeout is set? */
+ if (!atomic_dec_and_test(&ctx.pending))
+ wait_for_completion(&ctx.done);
+
+ if (ctx.error) {
+ /* would rather check on EOPNOTSUPP, but that is not reliable.
+ * don't try again for ANY return value != 0
+ * if (rv == -EOPNOTSUPP) */
+ /* Any error is already reported by bio_endio callback. */
+ drbd_bump_write_ordering(connection->resource, NULL, WO_DRAIN_IO);
+ }
}
}
@@ -1379,11 +1446,125 @@ void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backin
drbd_info(resource, "Method to ensure write ordering: %s\n", write_ordering_str[resource->write_ordering]);
}
+/*
+ * We *may* ignore the discard-zeroes-data setting, if so configured.
+ *
+ * Assumption is that it "discard_zeroes_data=0" is only because the backend
+ * may ignore partial unaligned discards.
+ *
+ * LVM/DM thin as of at least
+ * LVM version: 2.02.115(2)-RHEL7 (2015-01-28)
+ * Library version: 1.02.93-RHEL7 (2015-01-28)
+ * Driver version: 4.29.0
+ * still behaves this way.
+ *
+ * For unaligned (wrt. alignment and granularity) or too small discards,
+ * we zero-out the initial (and/or) trailing unaligned partial chunks,
+ * but discard all the aligned full chunks.
+ *
+ * At least for LVM/DM thin, the result is effectively "discard_zeroes_data=1".
+ */
+int drbd_issue_discard_or_zero_out(struct drbd_device *device, sector_t start, unsigned int nr_sectors, bool discard)
+{
+ struct block_device *bdev = device->ldev->backing_bdev;
+ struct request_queue *q = bdev_get_queue(bdev);
+ sector_t tmp, nr;
+ unsigned int max_discard_sectors, granularity;
+ int alignment;
+ int err = 0;
+
+ if (!discard)
+ goto zero_out;
+
+ /* Zero-sector (unknown) and one-sector granularities are the same. */
+ granularity = max(q->limits.discard_granularity >> 9, 1U);
+ alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
+
+ max_discard_sectors = min(q->limits.max_discard_sectors, (1U << 22));
+ max_discard_sectors -= max_discard_sectors % granularity;
+ if (unlikely(!max_discard_sectors))
+ goto zero_out;
+
+ if (nr_sectors < granularity)
+ goto zero_out;
+
+ tmp = start;
+ if (sector_div(tmp, granularity) != alignment) {
+ if (nr_sectors < 2*granularity)
+ goto zero_out;
+ /* start + gran - (start + gran - align) % gran */
+ tmp = start + granularity - alignment;
+ tmp = start + granularity - sector_div(tmp, granularity);
+
+ nr = tmp - start;
+ err |= blkdev_issue_zeroout(bdev, start, nr, GFP_NOIO, 0);
+ nr_sectors -= nr;
+ start = tmp;
+ }
+ while (nr_sectors >= granularity) {
+ nr = min_t(sector_t, nr_sectors, max_discard_sectors);
+ err |= blkdev_issue_discard(bdev, start, nr, GFP_NOIO, 0);
+ nr_sectors -= nr;
+ start += nr;
+ }
+ zero_out:
+ if (nr_sectors) {
+ err |= blkdev_issue_zeroout(bdev, start, nr_sectors, GFP_NOIO, 0);
+ }
+ return err != 0;
+}
+
+static bool can_do_reliable_discards(struct drbd_device *device)
+{
+ struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev);
+ struct disk_conf *dc;
+ bool can_do;
+
+ if (!blk_queue_discard(q))
+ return false;
+
+ if (q->limits.discard_zeroes_data)
+ return true;
+
+ rcu_read_lock();
+ dc = rcu_dereference(device->ldev->disk_conf);
+ can_do = dc->discard_zeroes_if_aligned;
+ rcu_read_unlock();
+ return can_do;
+}
+
+static void drbd_issue_peer_discard(struct drbd_device *device, struct drbd_peer_request *peer_req)
+{
+ /* If the backend cannot discard, or does not guarantee
+ * read-back zeroes in discarded ranges, we fall back to
+ * zero-out. Unless configuration specifically requested
+ * otherwise. */
+ if (!can_do_reliable_discards(device))
+ peer_req->flags |= EE_IS_TRIM_USE_ZEROOUT;
+
+ if (drbd_issue_discard_or_zero_out(device, peer_req->i.sector,
+ peer_req->i.size >> 9, !(peer_req->flags & EE_IS_TRIM_USE_ZEROOUT)))
+ peer_req->flags |= EE_WAS_ERROR;
+ drbd_endio_write_sec_final(peer_req);
+}
+
+static void drbd_issue_peer_wsame(struct drbd_device *device,
+ struct drbd_peer_request *peer_req)
+{
+ struct block_device *bdev = device->ldev->backing_bdev;
+ sector_t s = peer_req->i.sector;
+ sector_t nr = peer_req->i.size >> 9;
+ if (blkdev_issue_write_same(bdev, s, nr, GFP_NOIO, peer_req->pages))
+ peer_req->flags |= EE_WAS_ERROR;
+ drbd_endio_write_sec_final(peer_req);
+}
+
+
/**
* drbd_submit_peer_request()
* @device: DRBD device.
* @peer_req: peer request
- * @rw: flag field, see bio->bi_rw
+ * @rw: flag field, see bio->bi_opf
*
* May spread the pages to multiple bios,
* depending on bio_add_page restrictions.
@@ -1398,7 +1579,8 @@ void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backin
/* TODO allocate from our own bio_set. */
int drbd_submit_peer_request(struct drbd_device *device,
struct drbd_peer_request *peer_req,
- const unsigned rw, const int fault_type)
+ const unsigned op, const unsigned op_flags,
+ const int fault_type)
{
struct bio *bios = NULL;
struct bio *bio;
@@ -1409,7 +1591,13 @@ int drbd_submit_peer_request(struct drbd_device *device,
unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
int err = -ENOMEM;
- if (peer_req->flags & EE_IS_TRIM_USE_ZEROOUT) {
+ /* TRIM/DISCARD: for now, always use the helper function
+ * blkdev_issue_zeroout(..., discard=true).
+ * It's synchronous, but it does the right thing wrt. bio splitting.
+ * Correctness first, performance later. Next step is to code an
+ * asynchronous variant of the same.
+ */
+ if (peer_req->flags & (EE_IS_TRIM|EE_WRITE_SAME)) {
/* wait for all pending IO completions, before we start
* zeroing things out. */
conn_wait_active_ee_empty(peer_req->peer_device->connection);
@@ -1417,22 +1605,22 @@ int drbd_submit_peer_request(struct drbd_device *device,
* so we can find it to present it in debugfs */
peer_req->submit_jif = jiffies;
peer_req->flags |= EE_SUBMITTED;
- spin_lock_irq(&device->resource->req_lock);
- list_add_tail(&peer_req->w.list, &device->active_ee);
- spin_unlock_irq(&device->resource->req_lock);
- if (blkdev_issue_zeroout(device->ldev->backing_bdev,
- sector, data_size >> 9, GFP_NOIO, false))
- peer_req->flags |= EE_WAS_ERROR;
- drbd_endio_write_sec_final(peer_req);
+
+ /* If this was a resync request from receive_rs_deallocated(),
+ * it is already on the sync_ee list */
+ if (list_empty(&peer_req->w.list)) {
+ spin_lock_irq(&device->resource->req_lock);
+ list_add_tail(&peer_req->w.list, &device->active_ee);
+ spin_unlock_irq(&device->resource->req_lock);
+ }
+
+ if (peer_req->flags & EE_IS_TRIM)
+ drbd_issue_peer_discard(device, peer_req);
+ else /* EE_WRITE_SAME */
+ drbd_issue_peer_wsame(device, peer_req);
return 0;
}
- /* Discards don't have any payload.
- * But the scsi layer still expects a bio_vec it can use internally,
- * see sd_setup_discard_cmnd() and blk_add_request_payload(). */
- if (peer_req->flags & EE_IS_TRIM)
- nr_pages = 1;
-
/* In most cases, we will only need one bio. But in case the lower
* level restrictions happen to be different at this offset on this
* side than those of the sending peer, we may need to submit the
@@ -1450,7 +1638,7 @@ next_bio:
/* > peer_req->i.sector, unless this is the first bio */
bio->bi_iter.bi_sector = sector;
bio->bi_bdev = device->ldev->backing_bdev;
- bio->bi_rw = rw;
+ bio_set_op_attrs(bio, op, op_flags);
bio->bi_private = peer_req;
bio->bi_end_io = drbd_peer_request_endio;
@@ -1458,11 +1646,6 @@ next_bio:
bios = bio;
++n_bios;
- if (rw & REQ_DISCARD) {
- bio->bi_iter.bi_size = data_size;
- goto submit;
- }
-
page_chain_for_each(page) {
unsigned len = min_t(unsigned, data_size, PAGE_SIZE);
if (!bio_add_page(bio, page, len, 0)) {
@@ -1484,7 +1667,6 @@ next_bio:
--nr_pages;
}
D_ASSERT(device, data_size == 0);
-submit:
D_ASSERT(device, page == NULL);
atomic_set(&peer_req->pending_bios, n_bios);
@@ -1608,8 +1790,26 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
return 0;
}
+/* quick wrapper in case payload size != request_size (write same) */
+static void drbd_csum_ee_size(struct crypto_ahash *h,
+ struct drbd_peer_request *r, void *d,
+ unsigned int payload_size)
+{
+ unsigned int tmp = r->i.size;
+ r->i.size = payload_size;
+ drbd_csum_ee(h, r, d);
+ r->i.size = tmp;
+}
+
/* used from receive_RSDataReply (recv_resync_read)
- * and from receive_Data */
+ * and from receive_Data.
+ * data_size: actual payload ("data in")
+ * for normal writes that is bi_size.
+ * for discards, that is zero.
+ * for write same, it is logical_block_size.
+ * both trim and write same have the bi_size ("data len to be affected")
+ * as extra argument in the packet header.
+ */
static struct drbd_peer_request *
read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
struct packet_info *pi) __must_hold(local)
@@ -1624,6 +1824,7 @@ read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
void *dig_vv = peer_device->connection->int_dig_vv;
unsigned long *data;
struct p_trim *trim = (pi->cmd == P_TRIM) ? pi->data : NULL;
+ struct p_trim *wsame = (pi->cmd == P_WSAME) ? pi->data : NULL;
digest_size = 0;
if (!trim && peer_device->connection->peer_integrity_tfm) {
@@ -1638,38 +1839,60 @@ read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
data_size -= digest_size;
}
+ /* assume request_size == data_size, but special case trim and wsame. */
+ ds = data_size;
if (trim) {
- D_ASSERT(peer_device, data_size == 0);
- data_size = be32_to_cpu(trim->size);
+ if (!expect(data_size == 0))
+ return NULL;
+ ds = be32_to_cpu(trim->size);
+ } else if (wsame) {
+ if (data_size != queue_logical_block_size(device->rq_queue)) {
+ drbd_err(peer_device, "data size (%u) != drbd logical block size (%u)\n",
+ data_size, queue_logical_block_size(device->rq_queue));
+ return NULL;
+ }
+ if (data_size != bdev_logical_block_size(device->ldev->backing_bdev)) {
+ drbd_err(peer_device, "data size (%u) != backend logical block size (%u)\n",
+ data_size, bdev_logical_block_size(device->ldev->backing_bdev));
+ return NULL;
+ }
+ ds = be32_to_cpu(wsame->size);
}
- if (!expect(IS_ALIGNED(data_size, 512)))
+ if (!expect(IS_ALIGNED(ds, 512)))
return NULL;
- /* prepare for larger trim requests. */
- if (!trim && !expect(data_size <= DRBD_MAX_BIO_SIZE))
+ if (trim || wsame) {
+ if (!expect(ds <= (DRBD_MAX_BBIO_SECTORS << 9)))
+ return NULL;
+ } else if (!expect(ds <= DRBD_MAX_BIO_SIZE))
return NULL;
/* even though we trust out peer,
* we sometimes have to double check. */
- if (sector + (data_size>>9) > capacity) {
+ if (sector + (ds>>9) > capacity) {
drbd_err(device, "request from peer beyond end of local disk: "
"capacity: %llus < sector: %llus + size: %u\n",
(unsigned long long)capacity,
- (unsigned long long)sector, data_size);
+ (unsigned long long)sector, ds);
return NULL;
}
/* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
* "criss-cross" setup, that might cause write-out on some other DRBD,
* which in turn might block on the other node at this very place. */
- peer_req = drbd_alloc_peer_req(peer_device, id, sector, data_size, trim == NULL, GFP_NOIO);
+ peer_req = drbd_alloc_peer_req(peer_device, id, sector, ds, data_size, GFP_NOIO);
if (!peer_req)
return NULL;
peer_req->flags |= EE_WRITE;
- if (trim)
+ if (trim) {
+ peer_req->flags |= EE_IS_TRIM;
return peer_req;
+ }
+ if (wsame)
+ peer_req->flags |= EE_WRITE_SAME;
+ /* receive payload size bytes into page chain */
ds = data_size;
page = peer_req->pages;
page_chain_for_each(page) {
@@ -1689,7 +1912,7 @@ read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
}
if (digest_size) {
- drbd_csum_ee(peer_device->connection->peer_integrity_tfm, peer_req, dig_vv);
+ drbd_csum_ee_size(peer_device->connection->peer_integrity_tfm, peer_req, dig_vv, data_size);
if (memcmp(dig_in, dig_vv, digest_size)) {
drbd_err(device, "Digest integrity check FAILED: %llus +%u\n",
(unsigned long long)sector, data_size);
@@ -1830,7 +2053,8 @@ static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t secto
spin_unlock_irq(&device->resource->req_lock);
atomic_add(pi->size >> 9, &device->rs_sect_ev);
- if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
+ if (drbd_submit_peer_request(device, peer_req, REQ_OP_WRITE, 0,
+ DRBD_FAULT_RS_WR) == 0)
return 0;
/* don't care for the reason here */
@@ -2065,13 +2289,13 @@ static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
static bool overlapping_resync_write(struct drbd_device *device, struct drbd_peer_request *peer_req)
{
struct drbd_peer_request *rs_req;
- bool rv = 0;
+ bool rv = false;
spin_lock_irq(&device->resource->req_lock);
list_for_each_entry(rs_req, &device->sync_ee, w.list) {
if (overlaps(peer_req->i.sector, peer_req->i.size,
rs_req->i.sector, rs_req->i.size)) {
- rv = 1;
+ rv = true;
break;
}
}
@@ -2152,12 +2376,19 @@ static int wait_for_and_update_peer_seq(struct drbd_peer_device *peer_device, co
/* see also bio_flags_to_wire()
* DRBD_REQ_*, because we need to semantically map the flags to data packet
* flags and back. We may replicate to other kernel versions. */
-static unsigned long wire_flags_to_bio(u32 dpf)
+static unsigned long wire_flags_to_bio_flags(u32 dpf)
{
return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
(dpf & DP_FUA ? REQ_FUA : 0) |
- (dpf & DP_FLUSH ? REQ_FLUSH : 0) |
- (dpf & DP_DISCARD ? REQ_DISCARD : 0);
+ (dpf & DP_FLUSH ? REQ_PREFLUSH : 0);
+}
+
+static unsigned long wire_flags_to_bio_op(u32 dpf)
+{
+ if (dpf & DP_DISCARD)
+ return REQ_OP_DISCARD;
+ else
+ return REQ_OP_WRITE;
}
static void fail_postponed_requests(struct drbd_device *device, sector_t sector,
@@ -2303,7 +2534,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
struct drbd_peer_request *peer_req;
struct p_data *p = pi->data;
u32 peer_seq = be32_to_cpu(p->seq_num);
- int rw = WRITE;
+ int op, op_flags;
u32 dp_flags;
int err, tp;
@@ -2342,14 +2573,11 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
peer_req->flags |= EE_APPLICATION;
dp_flags = be32_to_cpu(p->dp_flags);
- rw |= wire_flags_to_bio(dp_flags);
+ op = wire_flags_to_bio_op(dp_flags);
+ op_flags = wire_flags_to_bio_flags(dp_flags);
if (pi->cmd == P_TRIM) {
- struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev);
- peer_req->flags |= EE_IS_TRIM;
- if (!blk_queue_discard(q))
- peer_req->flags |= EE_IS_TRIM_USE_ZEROOUT;
D_ASSERT(peer_device, peer_req->i.size > 0);
- D_ASSERT(peer_device, rw & REQ_DISCARD);
+ D_ASSERT(peer_device, op == REQ_OP_DISCARD);
D_ASSERT(peer_device, peer_req->pages == NULL);
} else if (peer_req->pages == NULL) {
D_ASSERT(device, peer_req->i.size == 0);
@@ -2414,11 +2642,11 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
update_peer_seq(peer_device, peer_seq);
spin_lock_irq(&device->resource->req_lock);
}
- /* if we use the zeroout fallback code, we process synchronously
- * and we wait for all pending requests, respectively wait for
+ /* TRIM and WRITE_SAME are processed synchronously,
+ * we wait for all pending requests, respectively wait for
* active_ee to become empty in drbd_submit_peer_request();
* better not add ourselves here. */
- if ((peer_req->flags & EE_IS_TRIM_USE_ZEROOUT) == 0)
+ if ((peer_req->flags & (EE_IS_TRIM|EE_WRITE_SAME)) == 0)
list_add_tail(&peer_req->w.list, &device->active_ee);
spin_unlock_irq(&device->resource->req_lock);
@@ -2433,7 +2661,8 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
}
- err = drbd_submit_peer_request(device, peer_req, rw, DRBD_FAULT_DT_WR);
+ err = drbd_submit_peer_request(device, peer_req, op, op_flags,
+ DRBD_FAULT_DT_WR);
if (!err)
return 0;
@@ -2449,7 +2678,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
}
out_interrupted:
- drbd_may_finish_epoch(connection, peer_req->epoch, EV_PUT + EV_CLEANUP);
+ drbd_may_finish_epoch(connection, peer_req->epoch, EV_PUT | EV_CLEANUP);
put_ldev(device);
drbd_free_peer_req(device, peer_req);
return err;
@@ -2574,6 +2803,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
case P_DATA_REQUEST:
drbd_send_ack_rp(peer_device, P_NEG_DREPLY, p);
break;
+ case P_RS_THIN_REQ:
case P_RS_DATA_REQUEST:
case P_CSUM_RS_REQUEST:
case P_OV_REQUEST:
@@ -2599,7 +2829,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
* "criss-cross" setup, that might cause write-out on some other DRBD,
* which in turn might block on the other node at this very place. */
peer_req = drbd_alloc_peer_req(peer_device, p->block_id, sector, size,
- true /* has real payload */, GFP_NOIO);
+ size, GFP_NOIO);
if (!peer_req) {
put_ldev(device);
return -ENOMEM;
@@ -2613,6 +2843,12 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
peer_req->flags |= EE_APPLICATION;
goto submit;
+ case P_RS_THIN_REQ:
+ /* If at some point in the future we have a smart way to
+ find out if this data block is completely deallocated,
+ then we would do something smarter here than reading
+ the block... */
+ peer_req->flags |= EE_RS_THIN_REQ;
case P_RS_DATA_REQUEST:
peer_req->w.cb = w_e_end_rsdata_req;
fault_type = DRBD_FAULT_RS_RD;
@@ -2723,7 +2959,8 @@ submit_for_resync:
submit:
update_receiver_timing_details(connection, drbd_submit_peer_request);
inc_unacked(device);
- if (drbd_submit_peer_request(device, peer_req, READ, fault_type) == 0)
+ if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ, 0,
+ fault_type) == 0)
return 0;
/* don't care for the reason here */
@@ -2957,7 +3194,8 @@ static void drbd_uuid_dump(struct drbd_device *device, char *text, u64 *uuid,
-1091 requires proto 91
-1096 requires proto 96
*/
-static int drbd_uuid_compare(struct drbd_device *const device, int *rule_nr) __must_hold(local)
+
+static int drbd_uuid_compare(struct drbd_device *const device, enum drbd_role const peer_role, int *rule_nr) __must_hold(local)
{
struct drbd_peer_device *const peer_device = first_peer_device(device);
struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
@@ -3037,8 +3275,39 @@ static int drbd_uuid_compare(struct drbd_device *const device, int *rule_nr) __m
* next bit (weight 2) is set when peer was primary */
*rule_nr = 40;
+ /* Neither has the "crashed primary" flag set,
+ * only a replication link hickup. */
+ if (rct == 0)
+ return 0;
+
+ /* Current UUID equal and no bitmap uuid; does not necessarily
+ * mean this was a "simultaneous hard crash", maybe IO was
+ * frozen, so no UUID-bump happened.
+ * This is a protocol change, overload DRBD_FF_WSAME as flag
+ * for "new-enough" peer DRBD version. */
+ if (device->state.role == R_PRIMARY || peer_role == R_PRIMARY) {
+ *rule_nr = 41;
+ if (!(connection->agreed_features & DRBD_FF_WSAME)) {
+ drbd_warn(peer_device, "Equivalent unrotated UUIDs, but current primary present.\n");
+ return -(0x10000 | PRO_VERSION_MAX | (DRBD_FF_WSAME << 8));
+ }
+ if (device->state.role == R_PRIMARY && peer_role == R_PRIMARY) {
+ /* At least one has the "crashed primary" bit set,
+ * both are primary now, but neither has rotated its UUIDs?
+ * "Can not happen." */
+ drbd_err(peer_device, "Equivalent unrotated UUIDs, but both are primary. Can not resolve this.\n");
+ return -100;
+ }
+ if (device->state.role == R_PRIMARY)
+ return 1;
+ return -1;
+ }
+
+ /* Both are secondary.
+ * Really looks like recovery from simultaneous hard crash.
+ * Check which had been primary before, and arbitrate. */
switch (rct) {
- case 0: /* !self_pri && !peer_pri */ return 0;
+ case 0: /* !self_pri && !peer_pri */ return 0; /* already handled */
case 1: /* self_pri && !peer_pri */ return 1;
case 2: /* !self_pri && peer_pri */ return -1;
case 3: /* self_pri && peer_pri */
@@ -3165,7 +3434,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device,
drbd_uuid_dump(device, "peer", device->p_uuid,
device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
- hg = drbd_uuid_compare(device, &rule_nr);
+ hg = drbd_uuid_compare(device, peer_role, &rule_nr);
spin_unlock_irq(&device->ldev->md.uuid_lock);
drbd_info(device, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
@@ -3174,6 +3443,15 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device,
drbd_alert(device, "Unrelated data, aborting!\n");
return C_MASK;
}
+ if (hg < -0x10000) {
+ int proto, fflags;
+ hg = -hg;
+ proto = hg & 0xff;
+ fflags = (hg >> 8) & 0xff;
+ drbd_alert(device, "To resolve this both sides have to support at least protocol %d and feature flags 0x%x\n",
+ proto, fflags);
+ return C_MASK;
+ }
if (hg < -1000) {
drbd_alert(device, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
return C_MASK;
@@ -3403,7 +3681,8 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in
*/
peer_integrity_tfm = crypto_alloc_ahash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
- if (!peer_integrity_tfm) {
+ if (IS_ERR(peer_integrity_tfm)) {
+ peer_integrity_tfm = NULL;
drbd_err(connection, "peer data-integrity-alg %s not supported\n",
integrity_alg);
goto disconnect;
@@ -3754,6 +4033,7 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
struct drbd_peer_device *peer_device;
struct drbd_device *device;
struct p_sizes *p = pi->data;
+ struct o_qlim *o = (connection->agreed_features & DRBD_FF_WSAME) ? p->qlim : NULL;
enum determine_dev_size dd = DS_UNCHANGED;
sector_t p_size, p_usize, p_csize, my_usize;
int ldsc = 0; /* local disk size changed */
@@ -3773,6 +4053,7 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
device->p_size = p_size;
if (get_ldev(device)) {
+ sector_t new_size, cur_size;
rcu_read_lock();
my_usize = rcu_dereference(device->ldev->disk_conf)->disk_size;
rcu_read_unlock();
@@ -3789,11 +4070,13 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
/* Never shrink a device with usable data during connect.
But allow online shrinking if we are connected. */
- if (drbd_new_dev_size(device, device->ldev, p_usize, 0) <
- drbd_get_capacity(device->this_bdev) &&
+ new_size = drbd_new_dev_size(device, device->ldev, p_usize, 0);
+ cur_size = drbd_get_capacity(device->this_bdev);
+ if (new_size < cur_size &&
device->state.disk >= D_OUTDATED &&
device->state.conn < C_CONNECTED) {
- drbd_err(device, "The peer's disk size is too small!\n");
+ drbd_err(device, "The peer's disk size is too small! (%llu < %llu sectors)\n",
+ (unsigned long long)new_size, (unsigned long long)cur_size);
conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
put_ldev(device);
return -EIO;
@@ -3827,14 +4110,14 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
}
device->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
- /* Leave drbd_reconsider_max_bio_size() before drbd_determine_dev_size().
+ /* Leave drbd_reconsider_queue_parameters() before drbd_determine_dev_size().
In case we cleared the QUEUE_FLAG_DISCARD from our queue in
- drbd_reconsider_max_bio_size(), we can be sure that after
+ drbd_reconsider_queue_parameters(), we can be sure that after
drbd_determine_dev_size() no REQ_DISCARDs are in the queue. */
ddsf = be16_to_cpu(p->dds_flags);
if (get_ldev(device)) {
- drbd_reconsider_max_bio_size(device, device->ldev);
+ drbd_reconsider_queue_parameters(device, device->ldev, o);
dd = drbd_determine_dev_size(device, ddsf, NULL);
put_ldev(device);
if (dd == DS_ERROR)
@@ -3854,7 +4137,7 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
* However, if he sends a zero current size,
* take his (user-capped or) backing disk size anyways.
*/
- drbd_reconsider_max_bio_size(device, NULL);
+ drbd_reconsider_queue_parameters(device, NULL, o);
drbd_set_my_capacity(device, p_csize ?: p_usize ?: p_size);
}
@@ -4587,9 +4870,75 @@ static int receive_out_of_sync(struct drbd_connection *connection, struct packet
return 0;
}
+static int receive_rs_deallocated(struct drbd_connection *connection, struct packet_info *pi)
+{
+ struct drbd_peer_device *peer_device;
+ struct p_block_desc *p = pi->data;
+ struct drbd_device *device;
+ sector_t sector;
+ int size, err = 0;
+
+ peer_device = conn_peer_device(connection, pi->vnr);
+ if (!peer_device)
+ return -EIO;
+ device = peer_device->device;
+
+ sector = be64_to_cpu(p->sector);
+ size = be32_to_cpu(p->blksize);
+
+ dec_rs_pending(device);
+
+ if (get_ldev(device)) {
+ struct drbd_peer_request *peer_req;
+ const int op = REQ_OP_DISCARD;
+
+ peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER, sector,
+ size, 0, GFP_NOIO);
+ if (!peer_req) {
+ put_ldev(device);
+ return -ENOMEM;
+ }
+
+ peer_req->w.cb = e_end_resync_block;
+ peer_req->submit_jif = jiffies;
+ peer_req->flags |= EE_IS_TRIM;
+
+ spin_lock_irq(&device->resource->req_lock);
+ list_add_tail(&peer_req->w.list, &device->sync_ee);
+ spin_unlock_irq(&device->resource->req_lock);
+
+ atomic_add(pi->size >> 9, &device->rs_sect_ev);
+ err = drbd_submit_peer_request(device, peer_req, op, 0, DRBD_FAULT_RS_WR);
+
+ if (err) {
+ spin_lock_irq(&device->resource->req_lock);
+ list_del(&peer_req->w.list);
+ spin_unlock_irq(&device->resource->req_lock);
+
+ drbd_free_peer_req(device, peer_req);
+ put_ldev(device);
+ err = 0;
+ goto fail;
+ }
+
+ inc_unacked(device);
+
+ /* No put_ldev() here. Gets called in drbd_endio_write_sec_final(),
+ as well as drbd_rs_complete_io() */
+ } else {
+ fail:
+ drbd_rs_complete_io(device, sector);
+ drbd_send_ack_ex(peer_device, P_NEG_ACK, sector, size, ID_SYNCER);
+ }
+
+ atomic_add(size >> 9, &device->rs_sect_in);
+
+ return err;
+}
+
struct data_cmd {
int expect_payload;
- size_t pkt_size;
+ unsigned int pkt_size;
int (*fn)(struct drbd_connection *, struct packet_info *);
};
@@ -4614,11 +4963,14 @@ static struct data_cmd drbd_cmd_handler[] = {
[P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
[P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
[P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
+ [P_RS_THIN_REQ] = { 0, sizeof(struct p_block_req), receive_DataRequest },
[P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
[P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
[P_CONN_ST_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_conn_state },
[P_PROTOCOL_UPDATE] = { 1, sizeof(struct p_protocol), receive_protocol },
[P_TRIM] = { 0, sizeof(struct p_trim), receive_Data },
+ [P_RS_DEALLOCATED] = { 0, sizeof(struct p_block_desc), receive_rs_deallocated },
+ [P_WSAME] = { 1, sizeof(struct p_wsame), receive_Data },
};
static void drbdd(struct drbd_connection *connection)
@@ -4628,7 +4980,7 @@ static void drbdd(struct drbd_connection *connection)
int err;
while (get_t_state(&connection->receiver) == RUNNING) {
- struct data_cmd *cmd;
+ struct data_cmd const *cmd;
drbd_thread_current_set_cpu(&connection->receiver);
update_receiver_timing_details(connection, drbd_recv_header);
@@ -4643,11 +4995,18 @@ static void drbdd(struct drbd_connection *connection)
}
shs = cmd->pkt_size;
+ if (pi.cmd == P_SIZES && connection->agreed_features & DRBD_FF_WSAME)
+ shs += sizeof(struct o_qlim);
if (pi.size > shs && !cmd->expect_payload) {
drbd_err(connection, "No payload expected %s l:%d\n",
cmdname(pi.cmd), pi.size);
goto err_out;
}
+ if (pi.size < shs) {
+ drbd_err(connection, "%s: unexpected packet size, expected:%d received:%d\n",
+ cmdname(pi.cmd), (int)shs, pi.size);
+ goto err_out;
+ }
if (shs) {
update_receiver_timing_details(connection, drbd_recv_all_warn);
@@ -4783,9 +5142,11 @@ static int drbd_disconnected(struct drbd_peer_device *peer_device)
drbd_md_sync(device);
- /* serialize with bitmap writeout triggered by the state change,
- * if any. */
- wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
+ if (get_ldev(device)) {
+ drbd_bitmap_io(device, &drbd_bm_write_copy_pages,
+ "write from disconnected", BM_LOCKED_CHANGE_ALLOWED);
+ put_ldev(device);
+ }
/* tcp_close and release of sendpage pages can be deferred. I don't
* want to use SO_LINGER, because apparently it can be deferred for
@@ -4892,8 +5253,12 @@ static int drbd_do_features(struct drbd_connection *connection)
drbd_info(connection, "Handshake successful: "
"Agreed network protocol version %d\n", connection->agreed_pro_version);
- drbd_info(connection, "Agreed to%ssupport TRIM on protocol level\n",
- connection->agreed_features & FF_TRIM ? " " : " not ");
+ drbd_info(connection, "Feature flags enabled on protocol level: 0x%x%s%s%s.\n",
+ connection->agreed_features,
+ connection->agreed_features & DRBD_FF_TRIM ? " TRIM" : "",
+ connection->agreed_features & DRBD_FF_THIN_RESYNC ? " THIN_RESYNC" : "",
+ connection->agreed_features & DRBD_FF_WSAME ? " WRITE_SAME" :
+ connection->agreed_features ? "" : " none");
return 1;
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 2255dcfeb..de279fe4e 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -47,8 +47,7 @@ static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *r
&device->vdisk->part0, req->start_jif);
}
-static struct drbd_request *drbd_req_new(struct drbd_device *device,
- struct bio *bio_src)
+static struct drbd_request *drbd_req_new(struct drbd_device *device, struct bio *bio_src)
{
struct drbd_request *req;
@@ -58,10 +57,12 @@ static struct drbd_request *drbd_req_new(struct drbd_device *device,
memset(req, 0, sizeof(*req));
drbd_req_make_private_bio(req, bio_src);
- req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0;
- req->device = device;
- req->master_bio = bio_src;
- req->epoch = 0;
+ req->rq_state = (bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0)
+ | (bio_op(bio_src) == REQ_OP_WRITE_SAME ? RQ_WSAME : 0)
+ | (bio_op(bio_src) == REQ_OP_DISCARD ? RQ_UNMAP : 0);
+ req->device = device;
+ req->master_bio = bio_src;
+ req->epoch = 0;
drbd_clear_interval(&req->i);
req->i.sector = bio_src->bi_iter.bi_sector;
@@ -218,7 +219,6 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
{
const unsigned s = req->rq_state;
struct drbd_device *device = req->device;
- int rw;
int error, ok;
/* we must not complete the master bio, while it is
@@ -242,8 +242,6 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
return;
}
- rw = bio_rw(req->master_bio);
-
/*
* figure out whether to report success or failure.
*
@@ -267,7 +265,7 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
* epoch number. If they match, increase the current_tle_nr,
* and reset the transfer log epoch write_cnt.
*/
- if (rw == WRITE &&
+ if (op_is_write(bio_op(req->master_bio)) &&
req->epoch == atomic_read(&first_peer_device(device)->connection->current_tle_nr))
start_new_tl_epoch(first_peer_device(device)->connection);
@@ -284,11 +282,14 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
* because no path was available, in which case
* it was not even added to the transfer_log.
*
- * READA may fail, and will not be retried.
+ * read-ahead may fail, and will not be retried.
*
* WRITE should have used all available paths already.
*/
- if (!ok && rw == READ && !list_empty(&req->tl_requests))
+ if (!ok &&
+ bio_op(req->master_bio) == REQ_OP_READ &&
+ !(req->master_bio->bi_opf & REQ_RAHEAD) &&
+ !list_empty(&req->tl_requests))
req->rq_state |= RQ_POSTPONED;
if (!(req->rq_state & RQ_POSTPONED)) {
@@ -644,7 +645,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
__drbd_chk_io_error(device, DRBD_READ_ERROR);
/* fall through. */
case READ_AHEAD_COMPLETED_WITH_ERROR:
- /* it is legal to fail READA, no __drbd_chk_io_error in that case. */
+ /* it is legal to fail read-ahead, no __drbd_chk_io_error in that case. */
mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
break;
@@ -656,7 +657,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
break;
case QUEUE_FOR_NET_READ:
- /* READ or READA, and
+ /* READ, and
* no local disk,
* or target area marked as invalid,
* or just got an io-error. */
@@ -977,16 +978,20 @@ static void complete_conflicting_writes(struct drbd_request *req)
sector_t sector = req->i.sector;
int size = req->i.size;
- i = drbd_find_overlap(&device->write_requests, sector, size);
- if (!i)
- return;
-
for (;;) {
- prepare_to_wait(&device->misc_wait, &wait, TASK_UNINTERRUPTIBLE);
- i = drbd_find_overlap(&device->write_requests, sector, size);
- if (!i)
+ drbd_for_each_overlap(i, &device->write_requests, sector, size) {
+ /* Ignore, if already completed to upper layers. */
+ if (i->completed)
+ continue;
+ /* Handle the first found overlap. After the schedule
+ * we have to restart the tree walk. */
+ break;
+ }
+ if (!i) /* if any */
break;
+
/* Indicate to wake up device->misc_wait on progress. */
+ prepare_to_wait(&device->misc_wait, &wait, TASK_UNINTERRUPTIBLE);
i->waiting = true;
spin_unlock_irq(&device->resource->req_lock);
schedule();
@@ -995,7 +1000,7 @@ static void complete_conflicting_writes(struct drbd_request *req)
finish_wait(&device->misc_wait, &wait);
}
-/* called within req_lock and rcu_read_lock() */
+/* called within req_lock */
static void maybe_pull_ahead(struct drbd_device *device)
{
struct drbd_connection *connection = first_peer_device(device)->connection;
@@ -1132,7 +1137,7 @@ static int drbd_process_write_request(struct drbd_request *req)
* replicating, in which case there is no point. */
if (unlikely(req->i.size == 0)) {
/* The only size==0 bios we expect are empty flushes. */
- D_ASSERT(device, req->master_bio->bi_rw & REQ_FLUSH);
+ D_ASSERT(device, req->master_bio->bi_opf & REQ_PREFLUSH);
if (remote)
_req_mod(req, QUEUE_AS_DRBD_BARRIER);
return remote;
@@ -1152,12 +1157,29 @@ static int drbd_process_write_request(struct drbd_request *req)
return remote;
}
+static void drbd_process_discard_req(struct drbd_request *req)
+{
+ int err = drbd_issue_discard_or_zero_out(req->device,
+ req->i.sector, req->i.size >> 9, true);
+
+ if (err)
+ req->private_bio->bi_error = -EIO;
+ bio_endio(req->private_bio);
+}
+
static void
drbd_submit_req_private_bio(struct drbd_request *req)
{
struct drbd_device *device = req->device;
struct bio *bio = req->private_bio;
- const int rw = bio_rw(bio);
+ unsigned int type;
+
+ if (bio_op(bio) != REQ_OP_READ)
+ type = DRBD_FAULT_DT_WR;
+ else if (bio->bi_opf & REQ_RAHEAD)
+ type = DRBD_FAULT_DT_RA;
+ else
+ type = DRBD_FAULT_DT_RD;
bio->bi_bdev = device->ldev->backing_bdev;
@@ -1167,11 +1189,10 @@ drbd_submit_req_private_bio(struct drbd_request *req)
* stable storage, and this is a WRITE, we may not even submit
* this bio. */
if (get_ldev(device)) {
- if (drbd_insert_fault(device,
- rw == WRITE ? DRBD_FAULT_DT_WR
- : rw == READ ? DRBD_FAULT_DT_RD
- : DRBD_FAULT_DT_RA))
+ if (drbd_insert_fault(device, type))
bio_io_error(bio);
+ else if (bio_op(bio) == REQ_OP_DISCARD)
+ drbd_process_discard_req(req);
else
generic_make_request(bio);
put_ldev(device);
@@ -1223,24 +1244,45 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long
/* Update disk stats */
_drbd_start_io_acct(device, req);
+ /* process discards always from our submitter thread */
+ if (bio_op(bio) & REQ_OP_DISCARD)
+ goto queue_for_submitter_thread;
+
if (rw == WRITE && req->private_bio && req->i.size
&& !test_bit(AL_SUSPENDED, &device->flags)) {
- if (!drbd_al_begin_io_fastpath(device, &req->i)) {
- atomic_inc(&device->ap_actlog_cnt);
- drbd_queue_write(device, req);
- return NULL;
- }
+ if (!drbd_al_begin_io_fastpath(device, &req->i))
+ goto queue_for_submitter_thread;
req->rq_state |= RQ_IN_ACT_LOG;
req->in_actlog_jif = jiffies;
}
-
return req;
+
+ queue_for_submitter_thread:
+ atomic_inc(&device->ap_actlog_cnt);
+ drbd_queue_write(device, req);
+ return NULL;
+}
+
+/* Require at least one path to current data.
+ * We don't want to allow writes on C_STANDALONE D_INCONSISTENT:
+ * We would not allow to read what was written,
+ * we would not have bumped the data generation uuids,
+ * we would cause data divergence for all the wrong reasons.
+ *
+ * If we don't see at least one D_UP_TO_DATE, we will fail this request,
+ * which either returns EIO, or, if OND_SUSPEND_IO is set, suspends IO,
+ * and queues for retry later.
+ */
+static bool may_do_writes(struct drbd_device *device)
+{
+ const union drbd_dev_state s = device->state;
+ return s.disk == D_UP_TO_DATE || s.pdsk == D_UP_TO_DATE;
}
static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req)
{
struct drbd_resource *resource = device->resource;
- const int rw = bio_rw(req->master_bio);
+ const int rw = bio_data_dir(req->master_bio);
struct bio_and_error m = { NULL, };
bool no_remote = false;
bool submit_private_bio = false;
@@ -1270,7 +1312,7 @@ static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request
goto out;
}
- /* We fail READ/READA early, if we can not serve it.
+ /* We fail READ early, if we can not serve it.
* We must do this before req is registered on any lists.
* Otherwise, drbd_req_complete() will queue failed READ for retry. */
if (rw != WRITE) {
@@ -1291,6 +1333,12 @@ static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request
}
if (rw == WRITE) {
+ if (req->private_bio && !may_do_writes(device)) {
+ bio_put(req->private_bio);
+ req->private_bio = NULL;
+ put_ldev(device);
+ goto nodata;
+ }
if (!drbd_process_write_request(req))
no_remote = true;
} else {
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index bb2ef7816..eb49e7f2d 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -206,6 +206,8 @@ enum drbd_req_state_bits {
/* Set when this is a write, clear for a read */
__RQ_WRITE,
+ __RQ_WSAME,
+ __RQ_UNMAP,
/* Should call drbd_al_complete_io() for this request... */
__RQ_IN_ACT_LOG,
@@ -241,10 +243,11 @@ enum drbd_req_state_bits {
#define RQ_NET_OK (1UL << __RQ_NET_OK)
#define RQ_NET_SIS (1UL << __RQ_NET_SIS)
-/* 0x1f8 */
#define RQ_NET_MASK (((1UL << __RQ_NET_MAX)-1) & ~RQ_LOCAL_MASK)
#define RQ_WRITE (1UL << __RQ_WRITE)
+#define RQ_WSAME (1UL << __RQ_WSAME)
+#define RQ_UNMAP (1UL << __RQ_UNMAP)
#define RQ_IN_ACT_LOG (1UL << __RQ_IN_ACT_LOG)
#define RQ_POSTPONED (1UL << __RQ_POSTPONED)
#define RQ_COMPLETION_SUSP (1UL << __RQ_COMPLETION_SUSP)
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c
index 5a7ef7873..eea0c4aec 100644
--- a/drivers/block/drbd/drbd_state.c
+++ b/drivers/block/drbd/drbd_state.c
@@ -814,7 +814,7 @@ is_valid_state(struct drbd_device *device, union drbd_state ns)
}
if (rv <= 0)
- /* already found a reason to abort */;
+ goto out; /* already found a reason to abort */
else if (ns.role == R_SECONDARY && device->open_cnt)
rv = SS_DEVICE_IN_USE;
@@ -862,6 +862,7 @@ is_valid_state(struct drbd_device *device, union drbd_state ns)
else if (ns.conn >= C_CONNECTED && ns.pdsk == D_UNKNOWN)
rv = SS_CONNECTED_OUTDATES;
+out:
rcu_read_unlock();
return rv;
@@ -906,6 +907,15 @@ is_valid_soft_transition(union drbd_state os, union drbd_state ns, struct drbd_c
(ns.conn >= C_CONNECTED && os.conn == C_WF_REPORT_PARAMS)))
rv = SS_IN_TRANSIENT_STATE;
+ /* Do not promote during resync handshake triggered by "force primary".
+ * This is a hack. It should really be rejected by the peer during the
+ * cluster wide state change request. */
+ if (os.role != R_PRIMARY && ns.role == R_PRIMARY
+ && ns.pdsk == D_UP_TO_DATE
+ && ns.disk != D_UP_TO_DATE && ns.disk != D_DISKLESS
+ && (ns.conn <= C_WF_SYNC_UUID || ns.conn != os.conn))
+ rv = SS_IN_TRANSIENT_STATE;
+
if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
rv = SS_NEED_CONNECTION;
@@ -1628,6 +1638,26 @@ static void broadcast_state_change(struct drbd_state_change *state_change)
#undef REMEMBER_STATE_CHANGE
}
+/* takes old and new peer disk state */
+static bool lost_contact_to_peer_data(enum drbd_disk_state os, enum drbd_disk_state ns)
+{
+ if ((os >= D_INCONSISTENT && os != D_UNKNOWN && os != D_OUTDATED)
+ && (ns < D_INCONSISTENT || ns == D_UNKNOWN || ns == D_OUTDATED))
+ return true;
+
+ /* Scenario, starting with normal operation
+ * Connected Primary/Secondary UpToDate/UpToDate
+ * NetworkFailure Primary/Unknown UpToDate/DUnknown (frozen)
+ * ...
+ * Connected Primary/Secondary UpToDate/Diskless (resumed; needs to bump uuid!)
+ */
+ if (os == D_UNKNOWN
+ && (ns == D_DISKLESS || ns == D_FAILED || ns == D_OUTDATED))
+ return true;
+
+ return false;
+}
+
/**
* after_state_ch() - Perform after state change actions that may sleep
* @device: DRBD device.
@@ -1675,7 +1705,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
what = RESEND;
if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
- conn_lowest_disk(connection) > D_NEGOTIATING)
+ conn_lowest_disk(connection) == D_UP_TO_DATE)
what = RESTART_FROZEN_DISK_IO;
if (resource->susp_nod && what != NOTHING) {
@@ -1699,6 +1729,13 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
clear_bit(NEW_CUR_UUID, &peer_device->device->flags);
rcu_read_unlock();
+
+ /* We should actively create a new uuid, _before_
+ * we resume/resent, if the peer is diskless
+ * (recovery from a multiple error scenario).
+ * Currently, this happens with a slight delay
+ * below when checking lost_contact_to_peer_data() ...
+ */
_tl_restart(connection, RESEND);
_conn_request_state(connection,
(union drbd_state) { { .susp_fen = 1 } },
@@ -1742,12 +1779,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
BM_LOCKED_TEST_ALLOWED);
/* Lost contact to peer's copy of the data */
- if ((os.pdsk >= D_INCONSISTENT &&
- os.pdsk != D_UNKNOWN &&
- os.pdsk != D_OUTDATED)
- && (ns.pdsk < D_INCONSISTENT ||
- ns.pdsk == D_UNKNOWN ||
- ns.pdsk == D_OUTDATED)) {
+ if (lost_contact_to_peer_data(os.pdsk, ns.pdsk)) {
if (get_ldev(device)) {
if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
device->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
@@ -1934,12 +1966,17 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
/* This triggers bitmap writeout of potentially still unwritten pages
* if the resync finished cleanly, or aborted because of peer disk
- * failure, or because of connection loss.
+ * failure, or on transition from resync back to AHEAD/BEHIND.
+ *
+ * Connection loss is handled in drbd_disconnected() by the receiver.
+ *
* For resync aborted because of local disk failure, we cannot do
* any bitmap writeout anymore.
+ *
* No harm done if some bits change during this phase.
*/
- if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED && get_ldev(device)) {
+ if ((os.conn > C_CONNECTED && os.conn < C_AHEAD) &&
+ (ns.conn == C_CONNECTED || ns.conn >= C_AHEAD) && get_ldev(device)) {
drbd_queue_bitmap_io(device, &drbd_bm_write_copy_pages, NULL,
"write from resync_finished", BM_LOCKED_CHANGE_ALLOWED);
put_ldev(device);
@@ -2160,9 +2197,7 @@ conn_set_state(struct drbd_connection *connection, union drbd_state mask, union
ns.disk = os.disk;
rv = _drbd_set_state(device, ns, flags, NULL);
- if (rv < SS_SUCCESS)
- BUG();
-
+ BUG_ON(rv < SS_SUCCESS);
ns.i = device->state.i;
ns_max.role = max_role(ns.role, ns_max.role);
ns_max.peer = max_role(ns.peer, ns_max.peer);
diff --git a/drivers/block/drbd/drbd_state.h b/drivers/block/drbd/drbd_state.h
index bd989536f..6c9d5d4a8 100644
--- a/drivers/block/drbd/drbd_state.h
+++ b/drivers/block/drbd/drbd_state.h
@@ -140,7 +140,7 @@ extern void drbd_resume_al(struct drbd_device *device);
extern bool conn_all_vols_unconf(struct drbd_connection *connection);
/**
- * drbd_request_state() - Reqest a state change
+ * drbd_request_state() - Request a state change
* @device: DRBD device.
* @mask: mask of state bits to change.
* @val: value of new state bits.
diff --git a/drivers/block/drbd/drbd_strings.c b/drivers/block/drbd/drbd_strings.c
index 80b0f63c7..0eeab1477 100644
--- a/drivers/block/drbd/drbd_strings.c
+++ b/drivers/block/drbd/drbd_strings.c
@@ -26,7 +26,7 @@
#include <linux/drbd.h>
#include "drbd_strings.h"
-static const char *drbd_conn_s_names[] = {
+static const char * const drbd_conn_s_names[] = {
[C_STANDALONE] = "StandAlone",
[C_DISCONNECTING] = "Disconnecting",
[C_UNCONNECTED] = "Unconnected",
@@ -53,13 +53,13 @@ static const char *drbd_conn_s_names[] = {
[C_BEHIND] = "Behind",
};
-static const char *drbd_role_s_names[] = {
+static const char * const drbd_role_s_names[] = {
[R_PRIMARY] = "Primary",
[R_SECONDARY] = "Secondary",
[R_UNKNOWN] = "Unknown"
};
-static const char *drbd_disk_s_names[] = {
+static const char * const drbd_disk_s_names[] = {
[D_DISKLESS] = "Diskless",
[D_ATTACHING] = "Attaching",
[D_FAILED] = "Failed",
@@ -71,7 +71,7 @@ static const char *drbd_disk_s_names[] = {
[D_UP_TO_DATE] = "UpToDate",
};
-static const char *drbd_state_sw_errors[] = {
+static const char * const drbd_state_sw_errors[] = {
[-SS_TWO_PRIMARIES] = "Multiple primaries not allowed by config",
[-SS_NO_UP_TO_DATE_DISK] = "Need access to UpToDate data",
[-SS_NO_LOCAL_DISK] = "Can not resync without local disk",
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 4d87499f0..c6755c9a0 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -173,8 +173,8 @@ void drbd_peer_request_endio(struct bio *bio)
{
struct drbd_peer_request *peer_req = bio->bi_private;
struct drbd_device *device = peer_req->peer_device->device;
- int is_write = bio_data_dir(bio) == WRITE;
- int is_discard = !!(bio->bi_rw & REQ_DISCARD);
+ bool is_write = bio_data_dir(bio) == WRITE;
+ bool is_discard = !!(bio_op(bio) == REQ_OP_DISCARD);
if (bio->bi_error && __ratelimit(&drbd_ratelimit_state))
drbd_warn(device, "%s: error=%d s=%llus\n",
@@ -248,18 +248,26 @@ void drbd_request_endio(struct bio *bio)
/* to avoid recursion in __req_mod */
if (unlikely(bio->bi_error)) {
- if (bio->bi_rw & REQ_DISCARD)
- what = (bio->bi_error == -EOPNOTSUPP)
- ? DISCARD_COMPLETED_NOTSUPP
- : DISCARD_COMPLETED_WITH_ERROR;
- else
- what = (bio_data_dir(bio) == WRITE)
- ? WRITE_COMPLETED_WITH_ERROR
- : (bio_rw(bio) == READ)
- ? READ_COMPLETED_WITH_ERROR
- : READ_AHEAD_COMPLETED_WITH_ERROR;
- } else
+ switch (bio_op(bio)) {
+ case REQ_OP_DISCARD:
+ if (bio->bi_error == -EOPNOTSUPP)
+ what = DISCARD_COMPLETED_NOTSUPP;
+ else
+ what = DISCARD_COMPLETED_WITH_ERROR;
+ break;
+ case REQ_OP_READ:
+ if (bio->bi_opf & REQ_RAHEAD)
+ what = READ_AHEAD_COMPLETED_WITH_ERROR;
+ else
+ what = READ_COMPLETED_WITH_ERROR;
+ break;
+ default:
+ what = WRITE_COMPLETED_WITH_ERROR;
+ break;
+ }
+ } else {
what = COMPLETED_OK;
+ }
bio_put(req->private_bio);
req->private_bio = ERR_PTR(bio->bi_error);
@@ -320,6 +328,10 @@ void drbd_csum_bio(struct crypto_ahash *tfm, struct bio *bio, void *digest)
sg_set_page(&sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
ahash_request_set_crypt(req, &sg, NULL, sg.length);
crypto_ahash_update(req);
+ /* REQ_OP_WRITE_SAME has only one segment,
+ * checksum the payload only once. */
+ if (bio_op(bio) == REQ_OP_WRITE_SAME)
+ break;
}
ahash_request_set_crypt(req, NULL, digest, 0);
crypto_ahash_final(req);
@@ -387,7 +399,7 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector,
/* GFP_TRY, because if there is no memory available right now, this may
* be rescheduled for later. It is "only" background resync, after all. */
peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER /* unused */, sector,
- size, true /* has real payload */, GFP_TRY);
+ size, size, GFP_TRY);
if (!peer_req)
goto defer;
@@ -397,7 +409,8 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector,
spin_unlock_irq(&device->resource->req_lock);
atomic_add(size >> 9, &device->rs_sect_ev);
- if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
+ if (drbd_submit_peer_request(device, peer_req, REQ_OP_READ, 0,
+ DRBD_FAULT_RS_RD) == 0)
return 0;
/* If it failed because of ENOMEM, retry should help. If it failed
@@ -582,6 +595,7 @@ static int make_resync_request(struct drbd_device *const device, int cancel)
int number, rollback_i, size;
int align, requeue = 0;
int i = 0;
+ int discard_granularity = 0;
if (unlikely(cancel))
return 0;
@@ -601,6 +615,12 @@ static int make_resync_request(struct drbd_device *const device, int cancel)
return 0;
}
+ if (connection->agreed_features & DRBD_FF_THIN_RESYNC) {
+ rcu_read_lock();
+ discard_granularity = rcu_dereference(device->ldev->disk_conf)->rs_discard_granularity;
+ rcu_read_unlock();
+ }
+
max_bio_size = queue_max_hw_sectors(device->rq_queue) << 9;
number = drbd_rs_number_requests(device);
if (number <= 0)
@@ -665,6 +685,9 @@ next_sector:
if (sector & ((1<<(align+3))-1))
break;
+ if (discard_granularity && size == discard_granularity)
+ break;
+
/* do not cross extent boundaries */
if (((bit+1) & BM_BLOCKS_PER_BM_EXT_MASK) == 0)
break;
@@ -711,7 +734,8 @@ next_sector:
int err;
inc_rs_pending(device);
- err = drbd_send_drequest(peer_device, P_RS_DATA_REQUEST,
+ err = drbd_send_drequest(peer_device,
+ size == discard_granularity ? P_RS_THIN_REQ : P_RS_DATA_REQUEST,
sector, size, ID_SYNCER);
if (err) {
drbd_err(device, "drbd_send_drequest() failed, aborting...\n");
@@ -828,6 +852,7 @@ static void ping_peer(struct drbd_device *device)
int drbd_resync_finished(struct drbd_device *device)
{
+ struct drbd_connection *connection = first_peer_device(device)->connection;
unsigned long db, dt, dbdt;
unsigned long n_oos;
union drbd_state os, ns;
@@ -849,8 +874,7 @@ int drbd_resync_finished(struct drbd_device *device)
if (dw) {
dw->w.cb = w_resync_finished;
dw->device = device;
- drbd_queue_work(&first_peer_device(device)->connection->sender_work,
- &dw->w);
+ drbd_queue_work(&connection->sender_work, &dw->w);
return 1;
}
drbd_err(device, "Warn failed to drbd_rs_del_all() and to kmalloc(dw).\n");
@@ -963,6 +987,30 @@ int drbd_resync_finished(struct drbd_device *device)
_drbd_set_state(device, ns, CS_VERBOSE, NULL);
out_unlock:
spin_unlock_irq(&device->resource->req_lock);
+
+ /* If we have been sync source, and have an effective fencing-policy,
+ * once *all* volumes are back in sync, call "unfence". */
+ if (os.conn == C_SYNC_SOURCE) {
+ enum drbd_disk_state disk_state = D_MASK;
+ enum drbd_disk_state pdsk_state = D_MASK;
+ enum drbd_fencing_p fp = FP_DONT_CARE;
+
+ rcu_read_lock();
+ fp = rcu_dereference(device->ldev->disk_conf)->fencing;
+ if (fp != FP_DONT_CARE) {
+ struct drbd_peer_device *peer_device;
+ int vnr;
+ idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ struct drbd_device *device = peer_device->device;
+ disk_state = min_t(enum drbd_disk_state, disk_state, device->state.disk);
+ pdsk_state = min_t(enum drbd_disk_state, pdsk_state, device->state.pdsk);
+ }
+ }
+ rcu_read_unlock();
+ if (disk_state == D_UP_TO_DATE && pdsk_state == D_UP_TO_DATE)
+ conn_khelper(connection, "unfence-peer");
+ }
+
put_ldev(device);
out:
device->rs_total = 0;
@@ -999,7 +1047,6 @@ static void move_to_net_ee_or_free(struct drbd_device *device, struct drbd_peer_
/**
* w_e_end_data_req() - Worker callback, to send a P_DATA_REPLY packet in response to a P_DATA_REQUEST
- * @device: DRBD device.
* @w: work object.
* @cancel: The connection will be closed anyways
*/
@@ -1035,6 +1082,30 @@ int w_e_end_data_req(struct drbd_work *w, int cancel)
return err;
}
+static bool all_zero(struct drbd_peer_request *peer_req)
+{
+ struct page *page = peer_req->pages;
+ unsigned int len = peer_req->i.size;
+
+ page_chain_for_each(page) {
+ unsigned int l = min_t(unsigned int, len, PAGE_SIZE);
+ unsigned int i, words = l / sizeof(long);
+ unsigned long *d;
+
+ d = kmap_atomic(page);
+ for (i = 0; i < words; i++) {
+ if (d[i]) {
+ kunmap_atomic(d);
+ return false;
+ }
+ }
+ kunmap_atomic(d);
+ len -= l;
+ }
+
+ return true;
+}
+
/**
* w_e_end_rsdata_req() - Worker callback to send a P_RS_DATA_REPLY packet in response to a P_RS_DATA_REQUEST
* @w: work object.
@@ -1063,7 +1134,10 @@ int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
} else if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
if (likely(device->state.pdsk >= D_INCONSISTENT)) {
inc_rs_pending(device);
- err = drbd_send_block(peer_device, P_RS_DATA_REPLY, peer_req);
+ if (peer_req->flags & EE_RS_THIN_REQ && all_zero(peer_req))
+ err = drbd_send_rs_deallocated(peer_device, peer_req);
+ else
+ err = drbd_send_block(peer_device, P_RS_DATA_REPLY, peer_req);
} else {
if (__ratelimit(&drbd_ratelimit_state))
drbd_err(device, "Not sending RSDataReply, "
@@ -1633,7 +1707,7 @@ static bool use_checksum_based_resync(struct drbd_connection *connection, struct
rcu_read_unlock();
return connection->agreed_pro_version >= 89 && /* supported? */
connection->csums_tfm && /* configured? */
- (csums_after_crash_only == 0 /* use for each resync? */
+ (csums_after_crash_only == false /* use for each resync? */
|| test_bit(CRASHED_PRIMARY, &device->flags)); /* or only after Primary crash? */
}
@@ -1768,7 +1842,7 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
device->bm_resync_fo = 0;
device->use_csums = use_checksum_based_resync(connection, device);
} else {
- device->use_csums = 0;
+ device->use_csums = false;
}
/* Since protocol 96, we must serialize drbd_gen_and_send_sync_uuid
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index b206115d7..e3d8e4ced 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3818,8 +3818,9 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive)
bio.bi_flags |= (1 << BIO_QUIET);
bio.bi_private = &cbdata;
bio.bi_end_io = floppy_rb0_cb;
+ bio_set_op_attrs(&bio, REQ_OP_READ, 0);
- submit_bio(READ, &bio);
+ submit_bio(&bio);
process_fd_request();
init_completion(&cbdata.complete);
@@ -4345,8 +4346,7 @@ static int __init do_floppy_init(void)
/* to be cleaned up... */
disks[drive]->private_data = (void *)(long)drive;
disks[drive]->flags |= GENHD_FL_REMOVABLE;
- disks[drive]->driverfs_dev = &floppy_device[drive].dev;
- add_disk(disks[drive]);
+ device_add_disk(&floppy_device[drive].dev, disks[drive]);
}
return 0;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 7339e65f6..005e2921a 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -447,7 +447,7 @@ static int lo_req_flush(struct loop_device *lo, struct request *rq)
static inline void handle_partial_read(struct loop_cmd *cmd, long bytes)
{
- if (bytes < 0 || (cmd->rq->cmd_flags & REQ_WRITE))
+ if (bytes < 0 || op_is_write(req_op(cmd->rq)))
return;
if (unlikely(bytes < blk_rq_bytes(cmd->rq))) {
@@ -510,14 +510,10 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
return 0;
}
-
-static inline int lo_rw_simple(struct loop_device *lo,
- struct request *rq, loff_t pos, bool rw)
+static int do_req_filebacked(struct loop_device *lo, struct request *rq)
{
struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
-
- if (cmd->use_aio)
- return lo_rw_aio(lo, cmd, pos, rw);
+ loff_t pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset;
/*
* lo_write_simple and lo_read_simple should have been covered
@@ -528,37 +524,30 @@ static inline int lo_rw_simple(struct loop_device *lo,
* of the req at one time. And direct read IO doesn't need to
* run flush_dcache_page().
*/
- if (rw == WRITE)
- return lo_write_simple(lo, rq, pos);
- else
- return lo_read_simple(lo, rq, pos);
-}
-
-static int do_req_filebacked(struct loop_device *lo, struct request *rq)
-{
- loff_t pos;
- int ret;
-
- pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset;
-
- if (rq->cmd_flags & REQ_WRITE) {
- if (rq->cmd_flags & REQ_FLUSH)
- ret = lo_req_flush(lo, rq);
- else if (rq->cmd_flags & REQ_DISCARD)
- ret = lo_discard(lo, rq, pos);
- else if (lo->transfer)
- ret = lo_write_transfer(lo, rq, pos);
+ switch (req_op(rq)) {
+ case REQ_OP_FLUSH:
+ return lo_req_flush(lo, rq);
+ case REQ_OP_DISCARD:
+ return lo_discard(lo, rq, pos);
+ case REQ_OP_WRITE:
+ if (lo->transfer)
+ return lo_write_transfer(lo, rq, pos);
+ else if (cmd->use_aio)
+ return lo_rw_aio(lo, cmd, pos, WRITE);
else
- ret = lo_rw_simple(lo, rq, pos, WRITE);
-
- } else {
+ return lo_write_simple(lo, rq, pos);
+ case REQ_OP_READ:
if (lo->transfer)
- ret = lo_read_transfer(lo, rq, pos);
+ return lo_read_transfer(lo, rq, pos);
+ else if (cmd->use_aio)
+ return lo_rw_aio(lo, cmd, pos, READ);
else
- ret = lo_rw_simple(lo, rq, pos, READ);
+ return lo_read_simple(lo, rq, pos);
+ default:
+ WARN_ON_ONCE(1);
+ return -EIO;
+ break;
}
-
- return ret;
}
struct switch_request {
@@ -1677,11 +1666,15 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
if (lo->lo_state != Lo_bound)
return -EIO;
- if (lo->use_dio && !(cmd->rq->cmd_flags & (REQ_FLUSH |
- REQ_DISCARD)))
- cmd->use_aio = true;
- else
+ switch (req_op(cmd->rq)) {
+ case REQ_OP_FLUSH:
+ case REQ_OP_DISCARD:
cmd->use_aio = false;
+ break;
+ default:
+ cmd->use_aio = lo->use_dio;
+ break;
+ }
queue_kthread_work(&lo->worker, &cmd->work);
@@ -1690,7 +1683,7 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
static void loop_handle_cmd(struct loop_cmd *cmd)
{
- const bool write = cmd->rq->cmd_flags & REQ_WRITE;
+ const bool write = op_is_write(req_op(cmd->rq));
struct loop_device *lo = cmd->rq->q->queuedata;
int ret = 0;
@@ -1783,6 +1776,7 @@ static int loop_add(struct loop_device **l, int i)
*/
queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue);
+ err = -ENOMEM;
disk = lo->lo_disk = alloc_disk(1 << part_shift);
if (!disk)
goto out_free_queue;
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index 145ce2aa2..e937fcf71 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -687,15 +687,13 @@ static unsigned int mg_issue_req(struct request *req,
unsigned int sect_num,
unsigned int sect_cnt)
{
- switch (rq_data_dir(req)) {
- case READ:
+ if (rq_data_dir(req) == READ) {
if (mg_out(host, sect_num, sect_cnt, MG_CMD_RD, &mg_read_intr)
!= MG_ERR_NONE) {
mg_bad_rw_intr(host);
return host->error;
}
- break;
- case WRITE:
+ } else {
/* TODO : handler */
outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
if (mg_out(host, sect_num, sect_cnt, MG_CMD_WR, &mg_write_intr)
@@ -714,7 +712,6 @@ static unsigned int mg_issue_req(struct request *req,
mod_timer(&host->timer, jiffies + 3 * HZ);
outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
MG_REG_COMMAND);
- break;
}
return MG_ERR_NONE;
}
@@ -1018,7 +1015,7 @@ probe_err_7:
probe_err_6:
blk_cleanup_queue(host->breq);
probe_err_5:
- unregister_blkdev(MG_DISK_MAJ, MG_DISK_NAME);
+ unregister_blkdev(host->major, MG_DISK_NAME);
probe_err_4:
if (!prv_data->use_polling)
free_irq(host->irq, host);
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 6053e4659..2aca98e8e 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3765,7 +3765,7 @@ static int mtip_submit_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
return -ENODATA;
}
- if (rq->cmd_flags & REQ_DISCARD) {
+ if (req_op(rq) == REQ_OP_DISCARD) {
int err;
err = mtip_send_trim(dd, blk_rq_pos(rq), blk_rq_sectors(rq));
@@ -3956,7 +3956,6 @@ static int mtip_block_initialize(struct driver_data *dd)
if (rv)
goto disk_index_error;
- dd->disk->driverfs_dev = &dd->pdev->dev;
dd->disk->major = dd->major;
dd->disk->first_minor = index * MTIP_MAX_MINORS;
dd->disk->minors = MTIP_MAX_MINORS;
@@ -4008,7 +4007,7 @@ skip_create_disk:
/*
* if rebuild pending, start the service thread, and delay the block
- * queue creation and add_disk()
+ * queue creation and device_add_disk()
*/
if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC)
goto start_service_thread;
@@ -4042,7 +4041,7 @@ skip_create_disk:
set_capacity(dd->disk, capacity);
/* Enable the block device and add it to /dev */
- add_disk(dd->disk);
+ device_add_disk(&dd->pdev->dev, dd->disk);
dd->bdev = bdget_disk(dd->disk, 0);
/*
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 6a48ed419..a9e398019 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -282,9 +282,9 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req)
if (req->cmd_type == REQ_TYPE_DRV_PRIV)
type = NBD_CMD_DISC;
- else if (req->cmd_flags & REQ_DISCARD)
+ else if (req_op(req) == REQ_OP_DISCARD)
type = NBD_CMD_TRIM;
- else if (req->cmd_flags & REQ_FLUSH)
+ else if (req_op(req) == REQ_OP_FLUSH)
type = NBD_CMD_FLUSH;
else if (rq_data_dir(req) == WRITE)
type = NBD_CMD_WRITE;
@@ -451,14 +451,9 @@ static int nbd_thread_recv(struct nbd_device *nbd, struct block_device *bdev)
sk_set_memalloc(nbd->sock->sk);
- nbd->task_recv = current;
-
ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
if (ret) {
dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
-
- nbd->task_recv = NULL;
-
return ret;
}
@@ -477,9 +472,6 @@ static int nbd_thread_recv(struct nbd_device *nbd, struct block_device *bdev)
nbd_size_clear(nbd, bdev);
device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
-
- nbd->task_recv = NULL;
-
return ret;
}
@@ -788,6 +780,8 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
if (!nbd->sock)
return -EINVAL;
+ /* We have to claim the device under the lock */
+ nbd->task_recv = current;
mutex_unlock(&nbd->tx_lock);
nbd_parse_flags(nbd, bdev);
@@ -796,6 +790,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
nbd_name(nbd));
if (IS_ERR(thread)) {
mutex_lock(&nbd->tx_lock);
+ nbd->task_recv = NULL;
return PTR_ERR(thread);
}
@@ -805,6 +800,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
kthread_stop(thread);
mutex_lock(&nbd->tx_lock);
+ nbd->task_recv = NULL;
sock_shutdown(nbd);
nbd_clear_que(nbd);
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index cab97593b..75a7f88d6 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -448,7 +448,7 @@ static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
struct request *rq;
struct bio *bio = rqd->bio;
- rq = blk_mq_alloc_request(q, bio_rw(bio), 0);
+ rq = blk_mq_alloc_request(q, bio_data_dir(bio), 0);
if (IS_ERR(rq))
return -ENOMEM;
diff --git a/drivers/block/osdblk.c b/drivers/block/osdblk.c
index c2854a2bf..92900f5f0 100644
--- a/drivers/block/osdblk.c
+++ b/drivers/block/osdblk.c
@@ -321,7 +321,7 @@ static void osdblk_rq_fn(struct request_queue *q)
* driver-specific, etc.
*/
- do_flush = rq->cmd_flags & REQ_FLUSH;
+ do_flush = (req_op(rq) == REQ_OP_FLUSH);
do_write = (rq_data_dir(rq) == WRITE);
if (!do_flush) { /* osd_flush does not use a bio */
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index d06c62ecc..90fa4ac14 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -1074,7 +1074,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
BUG();
atomic_inc(&pkt->io_wait);
- bio->bi_rw = READ;
+ bio_set_op_attrs(bio, REQ_OP_READ, 0);
pkt_queue_bio(pd, bio);
frames_read++;
}
@@ -1157,7 +1157,7 @@ static int pkt_start_recovery(struct packet_data *pkt)
bio_reset(pkt->bio);
pkt->bio->bi_bdev = pd->bdev;
- pkt->bio->bi_rw = REQ_WRITE;
+ bio_set_op_attrs(pkt->bio, REQ_OP_WRITE, 0);
pkt->bio->bi_iter.bi_sector = new_sector;
pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE;
pkt->bio->bi_vcnt = pkt->frames;
@@ -1336,7 +1336,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
/* Start the write request */
atomic_set(&pkt->io_wait, 1);
- pkt->w_bio->bi_rw = WRITE;
+ bio_set_op_attrs(pkt->w_bio, REQ_OP_WRITE, 0);
pkt_queue_bio(pd, pkt->w_bio);
}
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index 4b7e40583..76f33c84c 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -196,7 +196,7 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
while ((req = blk_fetch_request(q))) {
- if (req->cmd_flags & REQ_FLUSH) {
+ if (req_op(req) == REQ_OP_FLUSH) {
if (ps3disk_submit_flush_request(dev, req))
break;
} else if (req->cmd_type == REQ_TYPE_FS) {
@@ -256,7 +256,7 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
return IRQ_HANDLED;
}
- if (req->cmd_flags & REQ_FLUSH) {
+ if (req_op(req) == REQ_OP_FLUSH) {
read = 0;
op = "flush";
} else {
@@ -487,7 +487,6 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
gendisk->fops = &ps3disk_fops;
gendisk->queue = queue;
gendisk->private_data = dev;
- gendisk->driverfs_dev = &dev->sbd.core;
snprintf(gendisk->disk_name, sizeof(gendisk->disk_name), PS3DISK_NAME,
devidx+'a');
priv->blocking_factor = dev->blk_size >> 9;
@@ -499,7 +498,7 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
gendisk->disk_name, priv->model, priv->raw_capacity >> 11,
get_capacity(gendisk) >> 11);
- add_disk(gendisk);
+ device_add_disk(&dev->sbd.core, gendisk);
return 0;
fail_cleanup_queue:
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index 56847fcda..456b4fe21 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -773,14 +773,13 @@ static int ps3vram_probe(struct ps3_system_bus_device *dev)
gendisk->fops = &ps3vram_fops;
gendisk->queue = queue;
gendisk->private_data = dev;
- gendisk->driverfs_dev = &dev->core;
strlcpy(gendisk->disk_name, DEVICE_NAME, sizeof(gendisk->disk_name));
set_capacity(gendisk, priv->size >> 9);
dev_info(&dev->core, "%s: Using %lu MiB of GPU memory\n",
gendisk->disk_name, get_capacity(gendisk) >> 11);
- add_disk(gendisk);
+ device_add_disk(&dev->core, gendisk);
return 0;
fail_cleanup_queue:
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 81666a564..6c6519f64 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1937,7 +1937,7 @@ static struct ceph_osd_request *rbd_osd_req_create(
osd_req->r_callback = rbd_osd_req_callback;
osd_req->r_priv = obj_request;
- osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
+ osd_req->r_base_oloc.pool = rbd_dev->layout.pool_id;
if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s",
obj_request->object_name))
goto fail;
@@ -1991,7 +1991,7 @@ rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
osd_req->r_callback = rbd_osd_req_callback;
osd_req->r_priv = obj_request;
- osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
+ osd_req->r_base_oloc.pool = rbd_dev->layout.pool_id;
if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s",
obj_request->object_name))
goto fail;
@@ -3286,9 +3286,9 @@ static void rbd_queue_workfn(struct work_struct *work)
goto err;
}
- if (rq->cmd_flags & REQ_DISCARD)
+ if (req_op(rq) == REQ_OP_DISCARD)
op_type = OBJ_OP_DISCARD;
- else if (rq->cmd_flags & REQ_WRITE)
+ else if (req_op(rq) == REQ_OP_WRITE)
op_type = OBJ_OP_WRITE;
else
op_type = OBJ_OP_READ;
@@ -3950,6 +3950,7 @@ static void rbd_dev_release(struct device *dev)
bool need_put = !!rbd_dev->opts;
ceph_oid_destroy(&rbd_dev->header_oid);
+ ceph_oloc_destroy(&rbd_dev->header_oloc);
rbd_put_client(rbd_dev->rbd_client);
rbd_spec_put(rbd_dev->spec);
@@ -3995,10 +3996,11 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
/* Initialize the layout used for all rbd requests */
- rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
- rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
- rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
- rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
+ rbd_dev->layout.stripe_unit = 1 << RBD_MAX_OBJ_ORDER;
+ rbd_dev->layout.stripe_count = 1;
+ rbd_dev->layout.object_size = 1 << RBD_MAX_OBJ_ORDER;
+ rbd_dev->layout.pool_id = spec->pool_id;
+ RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
/*
* If this is a mapping rbd_dev (as opposed to a parent one),
@@ -5187,7 +5189,7 @@ static int rbd_dev_header_name(struct rbd_device *rbd_dev)
rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
- rbd_dev->header_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
+ rbd_dev->header_oloc.pool = rbd_dev->layout.pool_id;
if (rbd_dev->image_format == 1)
ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
spec->image_name, RBD_SUFFIX);
@@ -5335,15 +5337,6 @@ static ssize_t do_rbd_add(struct bus_type *bus,
}
spec->pool_id = (u64)rc;
- /* The ceph file layout needs to fit pool id in 32 bits */
-
- if (spec->pool_id > (u64)U32_MAX) {
- rbd_warn(NULL, "pool id too large (%llu > %u)",
- (unsigned long long)spec->pool_id, U32_MAX);
- rc = -EIO;
- goto err_out_client;
- }
-
rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
if (!rbd_dev) {
rc = -ENOMEM;
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index e1b8b7061..f81d70b39 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -230,8 +230,7 @@ int rsxx_attach_dev(struct rsxx_cardinfo *card)
set_capacity(card->gendisk, card->size8 >> 9);
else
set_capacity(card->gendisk, 0);
- add_disk(card->gendisk);
-
+ device_add_disk(CARD_TO_DEV(card), card->gendisk);
card->bdev_attached = 1;
}
@@ -308,7 +307,6 @@ int rsxx_setup_dev(struct rsxx_cardinfo *card)
snprintf(card->gendisk->disk_name, sizeof(card->gendisk->disk_name),
"rsxx%d", card->disk_id);
- card->gendisk->driverfs_dev = &card->dev->dev;
card->gendisk->major = card->major;
card->gendisk->first_minor = 0;
card->gendisk->fops = &rsxx_fops;
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c
index cf8cd293a..5a20385f8 100644
--- a/drivers/block/rsxx/dma.c
+++ b/drivers/block/rsxx/dma.c
@@ -705,7 +705,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
dma_cnt[i] = 0;
}
- if (bio->bi_rw & REQ_DISCARD) {
+ if (bio_op(bio) == REQ_OP_DISCARD) {
bv_len = bio->bi_iter.bi_size;
while (bv_len > 0) {
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 910e06591..3822eae10 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -597,7 +597,7 @@ static void skd_request_fn(struct request_queue *q)
data_dir = rq_data_dir(req);
io_flags = req->cmd_flags;
- if (io_flags & REQ_FLUSH)
+ if (req_op(req) == REQ_OP_FLUSH)
flush++;
if (io_flags & REQ_FUA)
@@ -4690,10 +4690,10 @@ static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return -EIO;
}
-static int skd_bdev_attach(struct skd_device *skdev)
+static int skd_bdev_attach(struct device *parent, struct skd_device *skdev)
{
pr_debug("%s:%s:%d add_disk\n", skdev->name, __func__, __LINE__);
- add_disk(skdev->disk);
+ device_add_disk(parent, skdev->disk);
return 0;
}
@@ -4812,8 +4812,6 @@ static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, skdev);
- skdev->disk->driverfs_dev = &pdev->dev;
-
for (i = 0; i < SKD_MAX_BARS; i++) {
skdev->mem_phys[i] = pci_resource_start(pdev, i);
skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
@@ -4851,7 +4849,7 @@ static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
(SKD_START_WAIT_SECONDS * HZ));
if (skdev->gendisk_on > 0) {
/* device came on-line after reset */
- skd_bdev_attach(skdev);
+ skd_bdev_attach(&pdev->dev, skdev);
rc = 0;
} else {
/* we timed out, something is wrong with the device,
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 4b911ed96..cab157331 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -804,7 +804,6 @@ static int probe_disk(struct vdc_port *port)
g->fops = &vdc_fops;
g->queue = q;
g->private_data = port;
- g->driverfs_dev = &port->vio.vdev->dev;
set_capacity(g, port->vdisk_size);
@@ -835,7 +834,7 @@ static int probe_disk(struct vdc_port *port)
port->vdisk_size, (port->vdisk_size >> (20 - 9)),
port->vio.ver.major, port->vio.ver.minor);
- add_disk(g);
+ device_add_disk(&port->vio.vdev->dev, g);
return 0;
}
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 7939b9f87..be90e1585 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -344,7 +344,6 @@ static int add_bio(struct cardinfo *card)
int offset;
struct bio *bio;
struct bio_vec vec;
- int rw;
bio = card->currentbio;
if (!bio && card->bio) {
@@ -359,7 +358,6 @@ static int add_bio(struct cardinfo *card)
if (!bio)
return 0;
- rw = bio_rw(bio);
if (card->mm_pages[card->Ready].cnt >= DESC_PER_PAGE)
return 0;
@@ -369,7 +367,7 @@ static int add_bio(struct cardinfo *card)
vec.bv_page,
vec.bv_offset,
vec.bv_len,
- (rw == READ) ?
+ bio_op(bio) == REQ_OP_READ ?
PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
p = &card->mm_pages[card->Ready];
@@ -398,7 +396,7 @@ static int add_bio(struct cardinfo *card)
DMASCR_CHAIN_EN |
DMASCR_SEM_EN |
pci_cmds);
- if (rw == WRITE)
+ if (bio_op(bio) == REQ_OP_WRITE)
desc->control_bits |= cpu_to_le32(DMASCR_TRANSFER_READ);
desc->sem_control_bits = desc->control_bits;
@@ -462,7 +460,7 @@ static void process_page(unsigned long data)
le32_to_cpu(desc->local_addr)>>9,
le32_to_cpu(desc->transfer_size));
dump_dmastat(card, control);
- } else if ((bio->bi_rw & REQ_WRITE) &&
+ } else if (op_is_write(bio_op(bio)) &&
le32_to_cpu(desc->local_addr) >> 9 ==
card->init_size) {
card->init_size += le32_to_cpu(desc->transfer_size) >> 9;
@@ -537,7 +535,7 @@ static blk_qc_t mm_make_request(struct request_queue *q, struct bio *bio)
*card->biotail = bio;
bio->bi_next = NULL;
card->biotail = &bio->bi_next;
- if (bio->bi_rw & REQ_SYNC || !mm_check_plugged(card))
+ if (bio->bi_opf & REQ_SYNC || !mm_check_plugged(card))
activate(card);
spin_unlock_irq(&card->lock);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 42758b527..93b1aaa5b 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -172,7 +172,7 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
vbr->req = req;
- if (req->cmd_flags & REQ_FLUSH) {
+ if (req_op(req) == REQ_OP_FLUSH) {
vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_FLUSH);
vbr->out_hdr.sector = 0;
vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
@@ -236,25 +236,22 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
static int virtblk_get_id(struct gendisk *disk, char *id_str)
{
struct virtio_blk *vblk = disk->private_data;
+ struct request_queue *q = vblk->disk->queue;
struct request *req;
- struct bio *bio;
int err;
- bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES,
- GFP_KERNEL);
- if (IS_ERR(bio))
- return PTR_ERR(bio);
-
- req = blk_make_request(vblk->disk->queue, bio, GFP_KERNEL);
- if (IS_ERR(req)) {
- bio_put(bio);
+ req = blk_get_request(q, READ, GFP_KERNEL);
+ if (IS_ERR(req))
return PTR_ERR(req);
- }
-
req->cmd_type = REQ_TYPE_DRV_PRIV;
+
+ err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
+ if (err)
+ goto out;
+
err = blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
+out:
blk_put_request(req);
-
return err;
}
@@ -394,22 +391,16 @@ static int init_vq(struct virtio_blk *vblk)
num_vqs = 1;
vblk->vqs = kmalloc(sizeof(*vblk->vqs) * num_vqs, GFP_KERNEL);
- if (!vblk->vqs) {
- err = -ENOMEM;
- goto out;
- }
+ if (!vblk->vqs)
+ return -ENOMEM;
names = kmalloc(sizeof(*names) * num_vqs, GFP_KERNEL);
- if (!names)
- goto err_names;
-
callbacks = kmalloc(sizeof(*callbacks) * num_vqs, GFP_KERNEL);
- if (!callbacks)
- goto err_callbacks;
-
vqs = kmalloc(sizeof(*vqs) * num_vqs, GFP_KERNEL);
- if (!vqs)
- goto err_vqs;
+ if (!names || !callbacks || !vqs) {
+ err = -ENOMEM;
+ goto out;
+ }
for (i = 0; i < num_vqs; i++) {
callbacks[i] = virtblk_done;
@@ -420,7 +411,7 @@ static int init_vq(struct virtio_blk *vblk)
/* Discover virtqueues and write information to configuration. */
err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names);
if (err)
- goto err_find_vqs;
+ goto out;
for (i = 0; i < num_vqs; i++) {
spin_lock_init(&vblk->vqs[i].lock);
@@ -428,16 +419,12 @@ static int init_vq(struct virtio_blk *vblk)
}
vblk->num_vqs = num_vqs;
- err_find_vqs:
+out:
kfree(vqs);
- err_vqs:
kfree(callbacks);
- err_callbacks:
kfree(names);
- err_names:
if (err)
kfree(vblk->vqs);
- out:
return err;
}
@@ -656,7 +643,6 @@ static int virtblk_probe(struct virtio_device *vdev)
vblk->disk->first_minor = index_to_minor(index);
vblk->disk->private_data = vblk;
vblk->disk->fops = &virtblk_fops;
- vblk->disk->driverfs_dev = &vdev->dev;
vblk->disk->flags |= GENHD_FL_EXT_DEVT;
vblk->index = index;
@@ -733,7 +719,7 @@ static int virtblk_probe(struct virtio_device *vdev)
virtio_device_ready(vdev);
- add_disk(vblk->disk);
+ device_add_disk(&vdev->dev, vblk->disk);
err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial);
if (err)
goto out_del_disk;
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 4809c1501..4a80ee752 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -501,7 +501,7 @@ static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
struct xen_vbd *vbd = &blkif->vbd;
int rc = -EACCES;
- if ((operation != READ) && vbd->readonly)
+ if ((operation != REQ_OP_READ) && vbd->readonly)
goto out;
if (likely(req->nr_sects)) {
@@ -1014,7 +1014,7 @@ static int dispatch_discard_io(struct xen_blkif_ring *ring,
preq.sector_number = req->u.discard.sector_number;
preq.nr_sects = req->u.discard.nr_sectors;
- err = xen_vbd_translate(&preq, blkif, WRITE);
+ err = xen_vbd_translate(&preq, blkif, REQ_OP_WRITE);
if (err) {
pr_warn("access denied: DISCARD [%llu->%llu] on dev=%04x\n",
preq.sector_number,
@@ -1229,6 +1229,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
struct bio **biolist = pending_req->biolist;
int i, nbio = 0;
int operation;
+ int operation_flags = 0;
struct blk_plug plug;
bool drain = false;
struct grant_page **pages = pending_req->segments;
@@ -1247,17 +1248,19 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
switch (req_operation) {
case BLKIF_OP_READ:
ring->st_rd_req++;
- operation = READ;
+ operation = REQ_OP_READ;
break;
case BLKIF_OP_WRITE:
ring->st_wr_req++;
- operation = WRITE_ODIRECT;
+ operation = REQ_OP_WRITE;
+ operation_flags = WRITE_ODIRECT;
break;
case BLKIF_OP_WRITE_BARRIER:
drain = true;
case BLKIF_OP_FLUSH_DISKCACHE:
ring->st_f_req++;
- operation = WRITE_FLUSH;
+ operation = REQ_OP_WRITE;
+ operation_flags = WRITE_FLUSH;
break;
default:
operation = 0; /* make gcc happy */
@@ -1269,7 +1272,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
nseg = req->operation == BLKIF_OP_INDIRECT ?
req->u.indirect.nr_segments : req->u.rw.nr_segments;
- if (unlikely(nseg == 0 && operation != WRITE_FLUSH) ||
+ if (unlikely(nseg == 0 && operation_flags != WRITE_FLUSH) ||
unlikely((req->operation != BLKIF_OP_INDIRECT) &&
(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
unlikely((req->operation == BLKIF_OP_INDIRECT) &&
@@ -1310,7 +1313,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
if (xen_vbd_translate(&preq, ring->blkif, operation) != 0) {
pr_debug("access denied: %s of [%llu,%llu] on dev=%04x\n",
- operation == READ ? "read" : "write",
+ operation == REQ_OP_READ ? "read" : "write",
preq.sector_number,
preq.sector_number + preq.nr_sects,
ring->blkif->vbd.pdevice);
@@ -1369,6 +1372,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
bio->bi_private = pending_req;
bio->bi_end_io = end_block_io_op;
bio->bi_iter.bi_sector = preq.sector_number;
+ bio_set_op_attrs(bio, operation, operation_flags);
}
preq.sector_number += seg[i].nsec;
@@ -1376,7 +1380,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
/* This will be hit if the operation was a flush or discard. */
if (!bio) {
- BUG_ON(operation != WRITE_FLUSH);
+ BUG_ON(operation_flags != WRITE_FLUSH);
bio = bio_alloc(GFP_KERNEL, 0);
if (unlikely(bio == NULL))
@@ -1386,20 +1390,21 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
bio->bi_bdev = preq.bdev;
bio->bi_private = pending_req;
bio->bi_end_io = end_block_io_op;
+ bio_set_op_attrs(bio, operation, operation_flags);
}
atomic_set(&pending_req->pendcnt, nbio);
blk_start_plug(&plug);
for (i = 0; i < nbio; i++)
- submit_bio(operation, biolist[i]);
+ submit_bio(biolist[i]);
/* Let the I/Os go.. */
blk_finish_plug(&plug);
- if (operation == READ)
+ if (operation == REQ_OP_READ)
ring->st_rd_sect += preq.nr_sects;
- else if (operation & WRITE)
+ else if (operation == REQ_OP_WRITE)
ring->st_wr_sect += preq.nr_sects;
return 0;
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 3355f1cdd..3cc6d1d86 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -379,7 +379,7 @@ static struct attribute *xen_vbdstat_attrs[] = {
NULL
};
-static struct attribute_group xen_vbdstat_group = {
+static const struct attribute_group xen_vbdstat_group = {
.name = "statistics",
.attrs = xen_vbdstat_attrs,
};
@@ -480,7 +480,7 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
if (q && test_bit(QUEUE_FLAG_WC, &q->queue_flags))
vbd->flush_support = true;
- if (q && blk_queue_secdiscard(q))
+ if (q && blk_queue_secure_erase(q))
vbd->discard_secure = true;
pr_debug("Successful creation of handle=%04x (dom=%u)\n",
@@ -715,8 +715,11 @@ static void backend_changed(struct xenbus_watch *watch,
/* Front end dir is a number, which is used as the handle. */
err = kstrtoul(strrchr(dev->otherend, '/') + 1, 0, &handle);
- if (err)
+ if (err) {
+ kfree(be->mode);
+ be->mode = NULL;
return;
+ }
be->major = major;
be->minor = minor;
@@ -1022,9 +1025,9 @@ static int connect_ring(struct backend_info *be)
pr_debug("%s %s\n", __func__, dev->otherend);
be->blkif->blk_protocol = BLKIF_PROTOCOL_DEFAULT;
- err = xenbus_gather(XBT_NIL, dev->otherend, "protocol",
- "%63s", protocol, NULL);
- if (err)
+ err = xenbus_scanf(XBT_NIL, dev->otherend, "protocol",
+ "%63s", protocol);
+ if (err <= 0)
strcpy(protocol, "unspecified, assuming default");
else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
@@ -1036,10 +1039,9 @@ static int connect_ring(struct backend_info *be)
xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
return -ENOSYS;
}
- err = xenbus_gather(XBT_NIL, dev->otherend,
- "feature-persistent", "%u",
- &pers_grants, NULL);
- if (err)
+ err = xenbus_scanf(XBT_NIL, dev->otherend,
+ "feature-persistent", "%u", &pers_grants);
+ if (err <= 0)
pers_grants = 0;
be->blkif->vbd.feature_gnt_persistent = pers_grants;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index fcc5b4e0a..88ef6d472 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -189,6 +189,8 @@ struct blkfront_info
struct mutex mutex;
struct xenbus_device *xbdev;
struct gendisk *gd;
+ u16 sector_size;
+ unsigned int physical_sector_size;
int vdevice;
blkif_vdev_t handle;
enum blkif_state connected;
@@ -196,6 +198,7 @@ struct blkfront_info
unsigned int nr_ring_pages;
struct request_queue *rq;
unsigned int feature_flush;
+ unsigned int feature_fua;
unsigned int feature_discard:1;
unsigned int feature_secdiscard:1;
unsigned int discard_granularity;
@@ -547,7 +550,7 @@ static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_inf
ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
ring_req->u.discard.id = id;
ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req);
- if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard)
+ if (req_op(req) == REQ_OP_SECURE_ERASE && info->feature_secdiscard)
ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
else
ring_req->u.discard.flag = 0;
@@ -746,7 +749,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
* The indirect operation can only be a BLKIF_OP_READ or
* BLKIF_OP_WRITE
*/
- BUG_ON(req->cmd_flags & (REQ_FLUSH | REQ_FUA));
+ BUG_ON(req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA);
ring_req->operation = BLKIF_OP_INDIRECT;
ring_req->u.indirect.indirect_op = rq_data_dir(req) ?
BLKIF_OP_WRITE : BLKIF_OP_READ;
@@ -758,7 +761,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
ring_req->u.rw.handle = info->handle;
ring_req->operation = rq_data_dir(req) ?
BLKIF_OP_WRITE : BLKIF_OP_READ;
- if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
+ if (req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA) {
/*
* Ideally we can do an unordered flush-to-disk.
* In case the backend onlysupports barriers, use that.
@@ -766,19 +769,14 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
* implement it the same way. (It's also a FLUSH+FUA,
* since it is guaranteed ordered WRT previous writes.)
*/
- switch (info->feature_flush &
- ((REQ_FLUSH|REQ_FUA))) {
- case REQ_FLUSH|REQ_FUA:
+ if (info->feature_flush && info->feature_fua)
ring_req->operation =
BLKIF_OP_WRITE_BARRIER;
- break;
- case REQ_FLUSH:
+ else if (info->feature_flush)
ring_req->operation =
BLKIF_OP_FLUSH_DISKCACHE;
- break;
- default:
+ else
ring_req->operation = 0;
- }
}
ring_req->u.rw.nr_segments = num_grant;
if (unlikely(require_extra_req)) {
@@ -847,7 +845,8 @@ static int blkif_queue_request(struct request *req, struct blkfront_ring_info *r
if (unlikely(rinfo->dev_info->connected != BLKIF_STATE_CONNECTED))
return 1;
- if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE)))
+ if (unlikely(req_op(req) == REQ_OP_DISCARD ||
+ req_op(req) == REQ_OP_SECURE_ERASE))
return blkif_queue_discard_req(req, rinfo);
else
return blkif_queue_rw_req(req, rinfo);
@@ -867,10 +866,10 @@ static inline bool blkif_request_flush_invalid(struct request *req,
struct blkfront_info *info)
{
return ((req->cmd_type != REQ_TYPE_FS) ||
- ((req->cmd_flags & REQ_FLUSH) &&
- !(info->feature_flush & REQ_FLUSH)) ||
+ ((req_op(req) == REQ_OP_FLUSH) &&
+ !info->feature_flush) ||
((req->cmd_flags & REQ_FUA) &&
- !(info->feature_flush & REQ_FUA)));
+ !info->feature_fua));
}
static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
@@ -913,9 +912,45 @@ static struct blk_mq_ops blkfront_mq_ops = {
.map_queue = blk_mq_map_queue,
};
+static void blkif_set_queue_limits(struct blkfront_info *info)
+{
+ struct request_queue *rq = info->rq;
+ struct gendisk *gd = info->gd;
+ unsigned int segments = info->max_indirect_segments ? :
+ BLKIF_MAX_SEGMENTS_PER_REQUEST;
+
+ queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
+
+ if (info->feature_discard) {
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq);
+ blk_queue_max_discard_sectors(rq, get_capacity(gd));
+ rq->limits.discard_granularity = info->discard_granularity;
+ rq->limits.discard_alignment = info->discard_alignment;
+ if (info->feature_secdiscard)
+ queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, rq);
+ }
+
+ /* Hard sector size and max sectors impersonate the equiv. hardware. */
+ blk_queue_logical_block_size(rq, info->sector_size);
+ blk_queue_physical_block_size(rq, info->physical_sector_size);
+ blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512);
+
+ /* Each segment in a request is up to an aligned page in size. */
+ blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
+ blk_queue_max_segment_size(rq, PAGE_SIZE);
+
+ /* Ensure a merged request will fit in a single I/O ring slot. */
+ blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG);
+
+ /* Make sure buffer addresses are sector-aligned. */
+ blk_queue_dma_alignment(rq, 511);
+
+ /* Make sure we don't use bounce buffers. */
+ blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
+}
+
static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
- unsigned int physical_sector_size,
- unsigned int segments)
+ unsigned int physical_sector_size)
{
struct request_queue *rq;
struct blkfront_info *info = gd->private_data;
@@ -947,58 +982,31 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
}
rq->queuedata = info;
- queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
-
- if (info->feature_discard) {
- queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq);
- blk_queue_max_discard_sectors(rq, get_capacity(gd));
- rq->limits.discard_granularity = info->discard_granularity;
- rq->limits.discard_alignment = info->discard_alignment;
- if (info->feature_secdiscard)
- queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, rq);
- }
-
- /* Hard sector size and max sectors impersonate the equiv. hardware. */
- blk_queue_logical_block_size(rq, sector_size);
- blk_queue_physical_block_size(rq, physical_sector_size);
- blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512);
-
- /* Each segment in a request is up to an aligned page in size. */
- blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
- blk_queue_max_segment_size(rq, PAGE_SIZE);
-
- /* Ensure a merged request will fit in a single I/O ring slot. */
- blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG);
-
- /* Make sure buffer addresses are sector-aligned. */
- blk_queue_dma_alignment(rq, 511);
-
- /* Make sure we don't use bounce buffers. */
- blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
-
- gd->queue = rq;
+ info->rq = gd->queue = rq;
+ info->gd = gd;
+ info->sector_size = sector_size;
+ info->physical_sector_size = physical_sector_size;
+ blkif_set_queue_limits(info);
return 0;
}
-static const char *flush_info(unsigned int feature_flush)
+static const char *flush_info(struct blkfront_info *info)
{
- switch (feature_flush & ((REQ_FLUSH | REQ_FUA))) {
- case REQ_FLUSH|REQ_FUA:
+ if (info->feature_flush && info->feature_fua)
return "barrier: enabled;";
- case REQ_FLUSH:
+ else if (info->feature_flush)
return "flush diskcache: enabled;";
- default:
+ else
return "barrier or flush: disabled;";
- }
}
static void xlvbd_flush(struct blkfront_info *info)
{
- blk_queue_write_cache(info->rq, info->feature_flush & REQ_FLUSH,
- info->feature_flush & REQ_FUA);
+ blk_queue_write_cache(info->rq, info->feature_flush ? true : false,
+ info->feature_fua ? true : false);
pr_info("blkfront: %s: %s %s %s %s %s\n",
- info->gd->disk_name, flush_info(info->feature_flush),
+ info->gd->disk_name, flush_info(info),
"persistent grants:", info->feature_persistent ?
"enabled;" : "disabled;", "indirect descriptors:",
info->max_indirect_segments ? "enabled;" : "disabled;");
@@ -1139,19 +1147,13 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
gd->first_minor = minor;
gd->fops = &xlvbd_block_fops;
gd->private_data = info;
- gd->driverfs_dev = &(info->xbdev->dev);
set_capacity(gd, capacity);
- if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size,
- info->max_indirect_segments ? :
- BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
+ if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size)) {
del_gendisk(gd);
goto release;
}
- info->rq = gd->queue;
- info->gd = gd;
-
xlvbd_flush(info);
if (vdisk_info & VDISK_READONLY)
@@ -1321,7 +1323,7 @@ free_shadow:
rinfo->ring_ref[i] = GRANT_INVALID_REF;
}
}
- free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * PAGE_SIZE));
+ free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * XEN_PAGE_SIZE));
rinfo->ring.sring = NULL;
if (rinfo->irq)
@@ -1597,7 +1599,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
info->feature_discard = 0;
info->feature_secdiscard = 0;
queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
- queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq);
+ queue_flag_clear(QUEUE_FLAG_SECERASE, rq);
}
blk_mq_complete_request(req, error);
break;
@@ -1617,6 +1619,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
if (unlikely(error)) {
if (error == -EOPNOTSUPP)
error = 0;
+ info->feature_fua = 0;
info->feature_flush = 0;
xlvbd_flush(info);
}
@@ -2012,8 +2015,10 @@ static int blkif_recover(struct blkfront_info *info)
struct split_bio *split_bio;
blkfront_gather_backend_features(info);
+ /* Reset limits changed by blk_mq_update_nr_hw_queues(). */
+ blkif_set_queue_limits(info);
segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
- blk_queue_max_segments(info->rq, segs);
+ blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG);
for (r_index = 0; r_index < info->nr_rings; r_index++) {
struct blkfront_ring_info *rinfo = &info->rinfo[r_index];
@@ -2064,7 +2069,7 @@ static int blkif_recover(struct blkfront_info *info)
bio_trim(cloned_bio, offset, size);
cloned_bio->bi_private = split_bio;
cloned_bio->bi_end_io = split_bio_end;
- submit_bio(cloned_bio->bi_rw, cloned_bio);
+ submit_bio(cloned_bio);
}
/*
* Now we have to wait for all those smaller bios to
@@ -2073,7 +2078,7 @@ static int blkif_recover(struct blkfront_info *info)
continue;
}
/* We don't need to split this bio */
- submit_bio(bio->bi_rw, bio);
+ submit_bio(bio);
}
return 0;
@@ -2108,11 +2113,16 @@ static int blkfront_resume(struct xenbus_device *dev)
/*
* Get the bios in the request so we can re-queue them.
*/
- if (shadow[j].request->cmd_flags &
- (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
+ if (req_op(shadow[i].request) == REQ_OP_FLUSH ||
+ req_op(shadow[i].request) == REQ_OP_DISCARD ||
+ req_op(shadow[i].request) == REQ_OP_SECURE_ERASE ||
+ shadow[j].request->cmd_flags & REQ_FUA) {
/*
* Flush operations don't contain bios, so
* we need to requeue the whole request
+ *
+ * XXX: but this doesn't make any sense for a
+ * write with the FUA flag set..
*/
list_add(&shadow[j].request->queuelist, &info->requests);
continue;
@@ -2197,10 +2207,9 @@ static void blkfront_setup_discard(struct blkfront_info *info)
info->discard_granularity = discard_granularity;
info->discard_alignment = discard_alignment;
}
- err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
- "discard-secure", "%d", &discard_secure,
- NULL);
- if (!err)
+ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
+ "discard-secure", "%u", &discard_secure);
+ if (err > 0)
info->feature_secdiscard = !!discard_secure;
}
@@ -2298,10 +2307,10 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
unsigned int indirect_segments;
info->feature_flush = 0;
+ info->feature_fua = 0;
- err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
- "feature-barrier", "%d", &barrier,
- NULL);
+ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
+ "feature-barrier", "%d", &barrier);
/*
* If there's no "feature-barrier" defined, then it means
@@ -2310,38 +2319,40 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
*
* If there are barriers, then we use flush.
*/
- if (!err && barrier)
- info->feature_flush = REQ_FLUSH | REQ_FUA;
+ if (err > 0 && barrier) {
+ info->feature_flush = 1;
+ info->feature_fua = 1;
+ }
+
/*
* And if there is "feature-flush-cache" use that above
* barriers.
*/
- err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
- "feature-flush-cache", "%d", &flush,
- NULL);
+ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
+ "feature-flush-cache", "%d", &flush);
- if (!err && flush)
- info->feature_flush = REQ_FLUSH;
+ if (err > 0 && flush) {
+ info->feature_flush = 1;
+ info->feature_fua = 0;
+ }
- err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
- "feature-discard", "%d", &discard,
- NULL);
+ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
+ "feature-discard", "%d", &discard);
- if (!err && discard)
+ if (err > 0 && discard)
blkfront_setup_discard(info);
- err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
- "feature-persistent", "%u", &persistent,
- NULL);
- if (err)
+ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
+ "feature-persistent", "%d", &persistent);
+ if (err <= 0)
info->feature_persistent = 0;
else
info->feature_persistent = persistent;
- err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
- "feature-max-indirect-segments", "%u", &indirect_segments,
- NULL);
- if (err)
+ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
+ "feature-max-indirect-segments", "%u",
+ &indirect_segments);
+ if (err <= 0)
info->max_indirect_segments = 0;
else
info->max_indirect_segments = min(indirect_segments,
@@ -2431,7 +2442,7 @@ static void blkfront_connect(struct blkfront_info *info)
if (err) {
xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
info->xbdev->otherend);
- return;
+ goto fail;
}
xenbus_switch_state(info->xbdev, XenbusStateConnected);
@@ -2441,9 +2452,14 @@ static void blkfront_connect(struct blkfront_info *info)
for (i = 0; i < info->nr_rings; i++)
kick_pending_request_queues(&info->rinfo[i]);
- add_disk(info->gd);
+ device_add_disk(&info->xbdev->dev, info->gd);
info->is_ready = 1;
+ return;
+
+fail:
+ blkif_free(info, 0);
+ return;
}
/**
diff --git a/drivers/block/zram/Kconfig b/drivers/block/zram/Kconfig
index 386ba3d1a..b8ecba6dc 100644
--- a/drivers/block/zram/Kconfig
+++ b/drivers/block/zram/Kconfig
@@ -1,8 +1,7 @@
config ZRAM
tristate "Compressed RAM block device support"
- depends on BLOCK && SYSFS && ZSMALLOC
- select LZO_COMPRESS
- select LZO_DECOMPRESS
+ depends on BLOCK && SYSFS && ZSMALLOC && CRYPTO
+ select CRYPTO_LZO
default n
help
Creates virtual block devices called /dev/zramX (X = 0, 1, ...).
@@ -14,13 +13,3 @@ config ZRAM
disks and maybe many more.
See zram.txt for more information.
-
-config ZRAM_LZ4_COMPRESS
- bool "Enable LZ4 algorithm support"
- depends on ZRAM
- select LZ4_COMPRESS
- select LZ4_DECOMPRESS
- default n
- help
- This option enables LZ4 compression algorithm support. Compression
- algorithm can be changed using `comp_algorithm' device attribute. \ No newline at end of file
diff --git a/drivers/block/zram/Makefile b/drivers/block/zram/Makefile
index be0763ff5..9e2b79e9a 100644
--- a/drivers/block/zram/Makefile
+++ b/drivers/block/zram/Makefile
@@ -1,5 +1,3 @@
-zram-y := zcomp_lzo.o zcomp.o zram_drv.o
-
-zram-$(CONFIG_ZRAM_LZ4_COMPRESS) += zcomp_lz4.o
+zram-y := zcomp.o zram_drv.o
obj-$(CONFIG_ZRAM) += zram.o
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
index b51a816d7..4b5cd3a7b 100644
--- a/drivers/block/zram/zcomp.c
+++ b/drivers/block/zram/zcomp.c
@@ -14,108 +14,150 @@
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/cpu.h>
+#include <linux/crypto.h>
#include "zcomp.h"
-#include "zcomp_lzo.h"
-#ifdef CONFIG_ZRAM_LZ4_COMPRESS
-#include "zcomp_lz4.h"
-#endif
-static struct zcomp_backend *backends[] = {
- &zcomp_lzo,
-#ifdef CONFIG_ZRAM_LZ4_COMPRESS
- &zcomp_lz4,
+static const char * const backends[] = {
+ "lzo",
+#if IS_ENABLED(CONFIG_CRYPTO_LZ4)
+ "lz4",
+#endif
+#if IS_ENABLED(CONFIG_CRYPTO_DEFLATE)
+ "deflate",
+#endif
+#if IS_ENABLED(CONFIG_CRYPTO_LZ4HC)
+ "lz4hc",
+#endif
+#if IS_ENABLED(CONFIG_CRYPTO_842)
+ "842",
#endif
NULL
};
-static struct zcomp_backend *find_backend(const char *compress)
-{
- int i = 0;
- while (backends[i]) {
- if (sysfs_streq(compress, backends[i]->name))
- break;
- i++;
- }
- return backends[i];
-}
-
-static void zcomp_strm_free(struct zcomp *comp, struct zcomp_strm *zstrm)
+static void zcomp_strm_free(struct zcomp_strm *zstrm)
{
- if (zstrm->private)
- comp->backend->destroy(zstrm->private);
+ if (!IS_ERR_OR_NULL(zstrm->tfm))
+ crypto_free_comp(zstrm->tfm);
free_pages((unsigned long)zstrm->buffer, 1);
kfree(zstrm);
}
/*
- * allocate new zcomp_strm structure with ->private initialized by
+ * allocate new zcomp_strm structure with ->tfm initialized by
* backend, return NULL on error
*/
-static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp, gfp_t flags)
+static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp)
{
- struct zcomp_strm *zstrm = kmalloc(sizeof(*zstrm), flags);
+ struct zcomp_strm *zstrm = kmalloc(sizeof(*zstrm), GFP_KERNEL);
if (!zstrm)
return NULL;
- zstrm->private = comp->backend->create(flags);
+ zstrm->tfm = crypto_alloc_comp(comp->name, 0, 0);
/*
* allocate 2 pages. 1 for compressed data, plus 1 extra for the
* case when compressed size is larger than the original one
*/
- zstrm->buffer = (void *)__get_free_pages(flags | __GFP_ZERO, 1);
- if (!zstrm->private || !zstrm->buffer) {
- zcomp_strm_free(comp, zstrm);
+ zstrm->buffer = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
+ if (IS_ERR_OR_NULL(zstrm->tfm) || !zstrm->buffer) {
+ zcomp_strm_free(zstrm);
zstrm = NULL;
}
return zstrm;
}
+bool zcomp_available_algorithm(const char *comp)
+{
+ int i = 0;
+
+ while (backends[i]) {
+ if (sysfs_streq(comp, backends[i]))
+ return true;
+ i++;
+ }
+
+ /*
+ * Crypto does not ignore a trailing new line symbol,
+ * so make sure you don't supply a string containing
+ * one.
+ * This also means that we permit zcomp initialisation
+ * with any compressing algorithm known to crypto api.
+ */
+ return crypto_has_comp(comp, 0, 0) == 1;
+}
+
/* show available compressors */
ssize_t zcomp_available_show(const char *comp, char *buf)
{
+ bool known_algorithm = false;
ssize_t sz = 0;
int i = 0;
- while (backends[i]) {
- if (!strcmp(comp, backends[i]->name))
+ for (; backends[i]; i++) {
+ if (!strcmp(comp, backends[i])) {
+ known_algorithm = true;
sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
- "[%s] ", backends[i]->name);
- else
+ "[%s] ", backends[i]);
+ } else {
sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
- "%s ", backends[i]->name);
- i++;
+ "%s ", backends[i]);
+ }
}
+
+ /*
+ * Out-of-tree module known to crypto api or a missing
+ * entry in `backends'.
+ */
+ if (!known_algorithm && crypto_has_comp(comp, 0, 0) == 1)
+ sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
+ "[%s] ", comp);
+
sz += scnprintf(buf + sz, PAGE_SIZE - sz, "\n");
return sz;
}
-bool zcomp_available_algorithm(const char *comp)
-{
- return find_backend(comp) != NULL;
-}
-
-struct zcomp_strm *zcomp_strm_find(struct zcomp *comp)
+struct zcomp_strm *zcomp_stream_get(struct zcomp *comp)
{
return *get_cpu_ptr(comp->stream);
}
-void zcomp_strm_release(struct zcomp *comp, struct zcomp_strm *zstrm)
+void zcomp_stream_put(struct zcomp *comp)
{
put_cpu_ptr(comp->stream);
}
-int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm,
- const unsigned char *src, size_t *dst_len)
+int zcomp_compress(struct zcomp_strm *zstrm,
+ const void *src, unsigned int *dst_len)
{
- return comp->backend->compress(src, zstrm->buffer, dst_len,
- zstrm->private);
+ /*
+ * Our dst memory (zstrm->buffer) is always `2 * PAGE_SIZE' sized
+ * because sometimes we can endup having a bigger compressed data
+ * due to various reasons: for example compression algorithms tend
+ * to add some padding to the compressed buffer. Speaking of padding,
+ * comp algorithm `842' pads the compressed length to multiple of 8
+ * and returns -ENOSP when the dst memory is not big enough, which
+ * is not something that ZRAM wants to see. We can handle the
+ * `compressed_size > PAGE_SIZE' case easily in ZRAM, but when we
+ * receive -ERRNO from the compressing backend we can't help it
+ * anymore. To make `842' happy we need to tell the exact size of
+ * the dst buffer, zram_drv will take care of the fact that
+ * compressed buffer is too big.
+ */
+ *dst_len = PAGE_SIZE * 2;
+
+ return crypto_comp_compress(zstrm->tfm,
+ src, PAGE_SIZE,
+ zstrm->buffer, dst_len);
}
-int zcomp_decompress(struct zcomp *comp, const unsigned char *src,
- size_t src_len, unsigned char *dst)
+int zcomp_decompress(struct zcomp_strm *zstrm,
+ const void *src, unsigned int src_len, void *dst)
{
- return comp->backend->decompress(src, src_len, dst);
+ unsigned int dst_len = PAGE_SIZE;
+
+ return crypto_comp_decompress(zstrm->tfm,
+ src, src_len,
+ dst, &dst_len);
}
static int __zcomp_cpu_notifier(struct zcomp *comp,
@@ -127,7 +169,7 @@ static int __zcomp_cpu_notifier(struct zcomp *comp,
case CPU_UP_PREPARE:
if (WARN_ON(*per_cpu_ptr(comp->stream, cpu)))
break;
- zstrm = zcomp_strm_alloc(comp, GFP_KERNEL);
+ zstrm = zcomp_strm_alloc(comp);
if (IS_ERR_OR_NULL(zstrm)) {
pr_err("Can't allocate a compression stream\n");
return NOTIFY_BAD;
@@ -138,7 +180,7 @@ static int __zcomp_cpu_notifier(struct zcomp *comp,
case CPU_UP_CANCELED:
zstrm = *per_cpu_ptr(comp->stream, cpu);
if (!IS_ERR_OR_NULL(zstrm))
- zcomp_strm_free(comp, zstrm);
+ zcomp_strm_free(zstrm);
*per_cpu_ptr(comp->stream, cpu) = NULL;
break;
default:
@@ -209,18 +251,16 @@ void zcomp_destroy(struct zcomp *comp)
struct zcomp *zcomp_create(const char *compress)
{
struct zcomp *comp;
- struct zcomp_backend *backend;
int error;
- backend = find_backend(compress);
- if (!backend)
+ if (!zcomp_available_algorithm(compress))
return ERR_PTR(-EINVAL);
comp = kzalloc(sizeof(struct zcomp), GFP_KERNEL);
if (!comp)
return ERR_PTR(-ENOMEM);
- comp->backend = backend;
+ comp->name = compress;
error = zcomp_init(comp);
if (error) {
kfree(comp);
diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h
index ffd88cb74..478cac2ed 100644
--- a/drivers/block/zram/zcomp.h
+++ b/drivers/block/zram/zcomp.h
@@ -13,33 +13,15 @@
struct zcomp_strm {
/* compression/decompression buffer */
void *buffer;
- /*
- * The private data of the compression stream, only compression
- * stream backend can touch this (e.g. compression algorithm
- * working memory)
- */
- void *private;
-};
-
-/* static compression backend */
-struct zcomp_backend {
- int (*compress)(const unsigned char *src, unsigned char *dst,
- size_t *dst_len, void *private);
-
- int (*decompress)(const unsigned char *src, size_t src_len,
- unsigned char *dst);
-
- void *(*create)(gfp_t flags);
- void (*destroy)(void *private);
-
- const char *name;
+ struct crypto_comp *tfm;
};
/* dynamic per-device compression frontend */
struct zcomp {
struct zcomp_strm * __percpu *stream;
- struct zcomp_backend *backend;
struct notifier_block notifier;
+
+ const char *name;
};
ssize_t zcomp_available_show(const char *comp, char *buf);
@@ -48,14 +30,14 @@ bool zcomp_available_algorithm(const char *comp);
struct zcomp *zcomp_create(const char *comp);
void zcomp_destroy(struct zcomp *comp);
-struct zcomp_strm *zcomp_strm_find(struct zcomp *comp);
-void zcomp_strm_release(struct zcomp *comp, struct zcomp_strm *zstrm);
+struct zcomp_strm *zcomp_stream_get(struct zcomp *comp);
+void zcomp_stream_put(struct zcomp *comp);
-int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm,
- const unsigned char *src, size_t *dst_len);
+int zcomp_compress(struct zcomp_strm *zstrm,
+ const void *src, unsigned int *dst_len);
-int zcomp_decompress(struct zcomp *comp, const unsigned char *src,
- size_t src_len, unsigned char *dst);
+int zcomp_decompress(struct zcomp_strm *zstrm,
+ const void *src, unsigned int src_len, void *dst);
bool zcomp_set_max_streams(struct zcomp *comp, int num_strm);
#endif /* _ZCOMP_H_ */
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 8fcad8b76..04365b17e 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -342,9 +342,16 @@ static ssize_t comp_algorithm_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct zram *zram = dev_to_zram(dev);
+ char compressor[CRYPTO_MAX_ALG_NAME];
size_t sz;
- if (!zcomp_available_algorithm(buf))
+ strlcpy(compressor, buf, sizeof(compressor));
+ /* ignore trailing newline */
+ sz = strlen(compressor);
+ if (sz > 0 && compressor[sz - 1] == '\n')
+ compressor[sz - 1] = 0x00;
+
+ if (!zcomp_available_algorithm(compressor))
return -EINVAL;
down_write(&zram->init_lock);
@@ -353,13 +360,8 @@ static ssize_t comp_algorithm_store(struct device *dev,
pr_info("Can't change algorithm for initialized device\n");
return -EBUSY;
}
- strlcpy(zram->compressor, buf, sizeof(zram->compressor));
-
- /* ignore trailing newline */
- sz = strlen(zram->compressor);
- if (sz > 0 && zram->compressor[sz - 1] == '\n')
- zram->compressor[sz - 1] = 0x00;
+ strlcpy(zram->compressor, compressor, sizeof(compressor));
up_write(&zram->init_lock);
return len;
}
@@ -563,7 +565,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
unsigned char *cmem;
struct zram_meta *meta = zram->meta;
unsigned long handle;
- size_t size;
+ unsigned int size;
bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
handle = meta->table[index].handle;
@@ -576,10 +578,14 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
}
cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
- if (size == PAGE_SIZE)
+ if (size == PAGE_SIZE) {
copy_page(mem, cmem);
- else
- ret = zcomp_decompress(zram->comp, cmem, size, mem);
+ } else {
+ struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
+
+ ret = zcomp_decompress(zstrm, cmem, size, mem);
+ zcomp_stream_put(zram->comp);
+ }
zs_unmap_object(meta->mem_pool, handle);
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
@@ -646,7 +652,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
int offset)
{
int ret = 0;
- size_t clen;
+ unsigned int clen;
unsigned long handle = 0;
struct page *page;
unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
@@ -695,8 +701,8 @@ compress_again:
goto out;
}
- zstrm = zcomp_strm_find(zram->comp);
- ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen);
+ zstrm = zcomp_stream_get(zram->comp);
+ ret = zcomp_compress(zstrm, uncmem, &clen);
if (!is_partial_io(bvec)) {
kunmap_atomic(user_mem);
user_mem = NULL;
@@ -732,19 +738,21 @@ compress_again:
handle = zs_malloc(meta->mem_pool, clen,
__GFP_KSWAPD_RECLAIM |
__GFP_NOWARN |
- __GFP_HIGHMEM);
+ __GFP_HIGHMEM |
+ __GFP_MOVABLE);
if (!handle) {
- zcomp_strm_release(zram->comp, zstrm);
+ zcomp_stream_put(zram->comp);
zstrm = NULL;
atomic64_inc(&zram->stats.writestall);
handle = zs_malloc(meta->mem_pool, clen,
- GFP_NOIO | __GFP_HIGHMEM);
+ GFP_NOIO | __GFP_HIGHMEM |
+ __GFP_MOVABLE);
if (handle)
goto compress_again;
- pr_err("Error allocating memory for compressed page: %u, size=%zu\n",
+ pr_err("Error allocating memory for compressed page: %u, size=%u\n",
index, clen);
ret = -ENOMEM;
goto out;
@@ -769,7 +777,7 @@ compress_again:
memcpy(cmem, src, clen);
}
- zcomp_strm_release(zram->comp, zstrm);
+ zcomp_stream_put(zram->comp);
zstrm = NULL;
zs_unmap_object(meta->mem_pool, handle);
@@ -789,7 +797,7 @@ compress_again:
atomic64_inc(&zram->stats.pages_stored);
out:
if (zstrm)
- zcomp_strm_release(zram->comp, zstrm);
+ zcomp_stream_put(zram->comp);
if (is_partial_io(bvec))
kfree(uncmem);
return ret;
@@ -835,15 +843,16 @@ static void zram_bio_discard(struct zram *zram, u32 index,
}
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
- int offset, int rw)
+ int offset, bool is_write)
{
unsigned long start_time = jiffies;
+ int rw_acct = is_write ? REQ_OP_WRITE : REQ_OP_READ;
int ret;
- generic_start_io_acct(rw, bvec->bv_len >> SECTOR_SHIFT,
+ generic_start_io_acct(rw_acct, bvec->bv_len >> SECTOR_SHIFT,
&zram->disk->part0);
- if (rw == READ) {
+ if (!is_write) {
atomic64_inc(&zram->stats.num_reads);
ret = zram_bvec_read(zram, bvec, index, offset);
} else {
@@ -851,10 +860,10 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
ret = zram_bvec_write(zram, bvec, index, offset);
}
- generic_end_io_acct(rw, &zram->disk->part0, start_time);
+ generic_end_io_acct(rw_acct, &zram->disk->part0, start_time);
if (unlikely(ret)) {
- if (rw == READ)
+ if (!is_write)
atomic64_inc(&zram->stats.failed_reads);
else
atomic64_inc(&zram->stats.failed_writes);
@@ -865,7 +874,7 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
static void __zram_make_request(struct zram *zram, struct bio *bio)
{
- int offset, rw;
+ int offset;
u32 index;
struct bio_vec bvec;
struct bvec_iter iter;
@@ -874,13 +883,12 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
offset = (bio->bi_iter.bi_sector &
(SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
- if (unlikely(bio->bi_rw & REQ_DISCARD)) {
+ if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
zram_bio_discard(zram, index, offset, bio);
bio_endio(bio);
return;
}
- rw = bio_data_dir(bio);
bio_for_each_segment(bvec, bio, iter) {
int max_transfer_size = PAGE_SIZE - offset;
@@ -895,15 +903,18 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
bv.bv_len = max_transfer_size;
bv.bv_offset = bvec.bv_offset;
- if (zram_bvec_rw(zram, &bv, index, offset, rw) < 0)
+ if (zram_bvec_rw(zram, &bv, index, offset,
+ op_is_write(bio_op(bio))) < 0)
goto out;
bv.bv_len = bvec.bv_len - max_transfer_size;
bv.bv_offset += max_transfer_size;
- if (zram_bvec_rw(zram, &bv, index + 1, 0, rw) < 0)
+ if (zram_bvec_rw(zram, &bv, index + 1, 0,
+ op_is_write(bio_op(bio))) < 0)
goto out;
} else
- if (zram_bvec_rw(zram, &bvec, index, offset, rw) < 0)
+ if (zram_bvec_rw(zram, &bvec, index, offset,
+ op_is_write(bio_op(bio))) < 0)
goto out;
update_position(&index, &offset, &bvec);
@@ -960,7 +971,7 @@ static void zram_slot_free_notify(struct block_device *bdev,
}
static int zram_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, int rw)
+ struct page *page, bool is_write)
{
int offset, err = -EIO;
u32 index;
@@ -984,7 +995,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
bv.bv_len = PAGE_SIZE;
bv.bv_offset = 0;
- err = zram_bvec_rw(zram, &bv, index, offset, rw);
+ err = zram_bvec_rw(zram, &bv, index, offset, is_write);
put_zram:
zram_meta_put(zram);
out:
@@ -997,7 +1008,7 @@ out:
* (e.g., SetPageError, set_page_dirty and extra works).
*/
if (err == 0)
- page_endio(page, rw, 0);
+ page_endio(page, is_write, 0);
return err;
}
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 3f5bf66a2..74fcf10da 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -15,8 +15,9 @@
#ifndef _ZRAM_DRV_H_
#define _ZRAM_DRV_H_
-#include <linux/spinlock.h>
+#include <linux/rwsem.h>
#include <linux/zsmalloc.h>
+#include <linux/crypto.h>
#include "zcomp.h"
@@ -113,7 +114,7 @@ struct zram {
* we can store in a disk.
*/
u64 disksize; /* bytes */
- char compressor[10];
+ char compressor[CRYPTO_MAX_ALG_NAME];
/*
* zram is claimed so open request will be failed
*/
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index fd6b53e9b..a9932fe57 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -274,6 +274,8 @@ static int bpa10x_setup(struct hci_dev *hdev)
BT_INFO("%s: %s", hdev->name, (char *)(skb->data + 1));
+ hci_set_fw_info(hdev, "%s", skb->data + 1);
+
kfree_skb(skb);
return 0;
}
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 7ad8d61c0..e6a85f0e6 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -138,7 +138,7 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb)
if (event->length > 3 && event->data[3])
priv->btmrvl_dev.dev_type = HCI_AMP;
else
- priv->btmrvl_dev.dev_type = HCI_BREDR;
+ priv->btmrvl_dev.dev_type = HCI_PRIMARY;
BT_DBG("dev_type: %d", priv->btmrvl_dev.dev_type);
} else if (priv->btmrvl_dev.sendcmdflag &&
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index e569ff003..73eea4f21 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -1071,7 +1071,6 @@ static int btmrvl_sdio_host_to_card(struct btmrvl_private *priv,
{
struct btmrvl_sdio_card *card = priv->btmrvl_dev.card;
int ret = 0;
- int buf_block_len;
int blksz;
int i = 0;
u8 *buf = NULL;
@@ -1083,9 +1082,13 @@ static int btmrvl_sdio_host_to_card(struct btmrvl_private *priv,
return -EINVAL;
}
+ blksz = DIV_ROUND_UP(nb, SDIO_BLOCK_SIZE) * SDIO_BLOCK_SIZE;
+
buf = payload;
- if ((unsigned long) payload & (BTSDIO_DMA_ALIGN - 1)) {
- tmpbufsz = ALIGN_SZ(nb, BTSDIO_DMA_ALIGN);
+ if ((unsigned long) payload & (BTSDIO_DMA_ALIGN - 1) ||
+ nb < blksz) {
+ tmpbufsz = ALIGN_SZ(blksz, BTSDIO_DMA_ALIGN) +
+ BTSDIO_DMA_ALIGN;
tmpbuf = kzalloc(tmpbufsz, GFP_KERNEL);
if (!tmpbuf)
return -ENOMEM;
@@ -1093,15 +1096,12 @@ static int btmrvl_sdio_host_to_card(struct btmrvl_private *priv,
memcpy(buf, payload, nb);
}
- blksz = SDIO_BLOCK_SIZE;
- buf_block_len = DIV_ROUND_UP(nb, blksz);
-
sdio_claim_host(card->func);
do {
/* Transfer data to card */
ret = sdio_writesb(card->func, card->ioport, buf,
- buf_block_len * blksz);
+ blksz);
if (ret < 0) {
i++;
BT_ERR("i=%d writesb failed: %d", i, ret);
@@ -1625,6 +1625,7 @@ static int btmrvl_sdio_suspend(struct device *dev)
if (priv->adapter->hs_state != HS_ACTIVATED) {
if (btmrvl_enable_hs(priv)) {
BT_ERR("HS not actived, suspend failed!");
+ priv->adapter->is_suspending = false;
return -EBUSY;
}
}
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index 2b05661e3..1cb958e19 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -311,7 +311,7 @@ static int btsdio_probe(struct sdio_func *func,
if (id->class == SDIO_CLASS_BT_AMP)
hdev->dev_type = HCI_AMP;
else
- hdev->dev_type = HCI_BREDR;
+ hdev->dev_type = HCI_PRIMARY;
data->hdev = hdev;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 0e09dec8e..77dd700fd 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -250,6 +250,8 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0cf3, 0xe300), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME },
+ { USB_DEVICE(0x0489, 0xe092), .driver_info = BTUSB_QCA_ROME },
+ { USB_DEVICE(0x04ca, 0x3011), .driver_info = BTUSB_QCA_ROME },
/* Broadcom BCM2035 */
{ USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 },
@@ -315,6 +317,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
{ USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
{ USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_NEW },
+ { USB_DEVICE(0x8087, 0x0aa7), .driver_info = BTUSB_INTEL },
/* Other Intel Bluetooth devices */
{ USB_VENDOR_AND_INTERFACE_INFO(0x8087, 0xe0, 0x01, 0x01),
@@ -2104,10 +2107,14 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
/* With this Intel bootloader only the hardware variant and device
* revision information are used to select the right firmware.
*
- * Currently this bootloader support is limited to hardware variant
- * iBT 3.0 (LnP/SfP) which is identified by the value 11 (0x0b).
+ * The firmware filename is ibt-<hw_variant>-<dev_revid>.sfi.
+ *
+ * Currently the supported hardware variants are:
+ * 11 (0x0b) for iBT3.0 (LnP/SfP)
+ * 12 (0x0c) for iBT3.5 (WsP)
*/
snprintf(fwname, sizeof(fwname), "/*(DEBLOBBED)*/",
+ le16_to_cpu(ver.hw_variant),
le16_to_cpu(params->dev_revid));
err = reject_firmware(&fw, fwname, &hdev->dev);
@@ -2123,7 +2130,8 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
/* Save the DDC file name for later use to apply once the firmware
* downloading is done.
*/
- snprintf(fwname, sizeof(fwname), "intel/ibt-11-%u.ddc",
+ snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.ddc",
+ le16_to_cpu(ver.hw_variant),
le16_to_cpu(params->dev_revid));
kfree_skb(skb);
@@ -2826,7 +2834,7 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_AMP)
hdev->dev_type = HCI_AMP;
else
- hdev->dev_type = HCI_BREDR;
+ hdev->dev_type = HCI_PRIMARY;
data->hdev = hdev;
diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
index 24a652f92..485281b3f 100644
--- a/drivers/bluetooth/btwilink.c
+++ b/drivers/bluetooth/btwilink.c
@@ -51,7 +51,7 @@
*/
struct ti_st {
struct hci_dev *hdev;
- char reg_status;
+ int reg_status;
long (*st_write) (struct sk_buff *);
struct completion wait_reg_completion;
};
@@ -83,7 +83,7 @@ static inline void ti_st_tx_complete(struct ti_st *hst, int pkt_type)
* status.ti_st_open() function will wait for signal from this
* API when st_register() function returns ST_PENDING.
*/
-static void st_reg_completion_cb(void *priv_data, char data)
+static void st_reg_completion_cb(void *priv_data, int data)
{
struct ti_st *lhst = priv_data;
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index 4124269aa..affc8e4fa 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -537,9 +537,7 @@ static int intel_setup(struct hci_uart *hu)
{
static const u8 reset_param[] = { 0x00, 0x01, 0x00, 0x01,
0x00, 0x08, 0x04, 0x00 };
- static const u8 lpm_param[] = { 0x03, 0x07, 0x01, 0x0b };
struct intel_data *intel = hu->priv;
- struct intel_device *idev = NULL;
struct hci_dev *hdev = hu->hdev;
struct sk_buff *skb;
struct intel_version ver;
@@ -884,35 +882,23 @@ done:
bt_dev_info(hdev, "Device booted in %llu usecs", duration);
- /* Enable LPM if matching pdev with wakeup enabled */
+ /* Enable LPM if matching pdev with wakeup enabled, set TX active
+ * until further LPM TX notification.
+ */
mutex_lock(&intel_device_list_lock);
list_for_each(p, &intel_device_list) {
struct intel_device *dev = list_entry(p, struct intel_device,
list);
if (hu->tty->dev->parent == dev->pdev->dev.parent) {
- if (device_may_wakeup(&dev->pdev->dev))
- idev = dev;
+ if (device_may_wakeup(&dev->pdev->dev)) {
+ set_bit(STATE_LPM_ENABLED, &intel->flags);
+ set_bit(STATE_TX_ACTIVE, &intel->flags);
+ }
break;
}
}
mutex_unlock(&intel_device_list_lock);
- if (!idev)
- goto no_lpm;
-
- bt_dev_info(hdev, "Enabling LPM");
-
- skb = __hci_cmd_sync(hdev, 0xfc8b, sizeof(lpm_param), lpm_param,
- HCI_CMD_TIMEOUT);
- if (IS_ERR(skb)) {
- bt_dev_err(hdev, "Failed to enable LPM");
- goto no_lpm;
- }
- kfree_skb(skb);
-
- set_bit(STATE_LPM_ENABLED, &intel->flags);
-
-no_lpm:
/* Ignore errors, device can work without DDC parameters */
btintel_load_ddc_config(hdev, fwname);
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 49b3e1e2d..dda97398c 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -609,7 +609,7 @@ static int hci_uart_register_dev(struct hci_uart *hu)
if (test_bit(HCI_UART_CREATE_AMP, &hu->hdev_flags))
hdev->dev_type = HCI_AMP;
else
- hdev->dev_type = HCI_BREDR;
+ hdev->dev_type = HCI_PRIMARY;
if (test_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags))
return 0;
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index aba31210c..3ff229b2e 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -97,10 +97,10 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
if (data->hdev)
return -EBADFD;
- /* bits 0-1 are dev_type (BR/EDR or AMP) */
+ /* bits 0-1 are dev_type (Primary or AMP) */
dev_type = opcode & 0x03;
- if (dev_type != HCI_BREDR && dev_type != HCI_AMP)
+ if (dev_type != HCI_PRIMARY && dev_type != HCI_AMP)
return -EINVAL;
/* bits 2-5 are reserved (must be zero) */
@@ -316,7 +316,7 @@ static void vhci_open_timeout(struct work_struct *work)
struct vhci_data *data = container_of(work, struct vhci_data,
open_timeout.work);
- vhci_create_device(data, amp ? HCI_AMP : HCI_BREDR);
+ vhci_create_device(data, amp ? HCI_AMP : HCI_PRIMARY);
}
static int vhci_open(struct inode *inode, struct file *file)
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index c5a7de9bc..3b205e212 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -132,6 +132,19 @@ config SUNXI_RSB
with various RSB based devices, such as AXP223, AXP8XX PMICs,
and AC100/AC200 ICs.
+# TODO: This uses pm_clk_*() symbols that aren't exported in v4.7 and hence
+# the driver will fail to build as a module. However there are patches to
+# address that queued for v4.8, so this can be turned into a tristate symbol
+# after v4.8-rc1.
+config TEGRA_ACONNECT
+ bool "Tegra ACONNECT Bus Driver"
+ depends on ARCH_TEGRA_210_SOC
+ depends on OF && PM
+ select PM_CLK
+ help
+ Driver for the Tegra ACONNECT bus which is used to interface with
+ the devices inside the Audio Processing Engine (APE) for Tegra210.
+
config UNIPHIER_SYSTEM_BUS
tristate "UniPhier System Bus driver"
depends on ARCH_UNIPHIER && OF
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index ccff007ee..ac84cc434 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -17,5 +17,6 @@ obj-$(CONFIG_OMAP_INTERCONNECT) += omap_l3_smx.o omap_l3_noc.o
obj-$(CONFIG_OMAP_OCP2SCP) += omap-ocp2scp.o
obj-$(CONFIG_SUNXI_RSB) += sunxi-rsb.o
obj-$(CONFIG_SIMPLE_PM_BUS) += simple-pm-bus.o
+obj-$(CONFIG_TEGRA_ACONNECT) += tegra-aconnect.o
obj-$(CONFIG_UNIPHIER_SYSTEM_BUS) += uniphier-system-bus.o
obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index a49b28378..ffa7c9dcb 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -144,12 +144,15 @@ struct cci_pmu {
int num_cntrs;
atomic_t active_events;
struct mutex reserve_mutex;
- struct notifier_block cpu_nb;
+ struct list_head entry;
cpumask_t cpus;
};
#define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu))
+static DEFINE_MUTEX(cci_pmu_mutex);
+static LIST_HEAD(cci_pmu_list);
+
enum cci_models {
#ifdef CONFIG_ARM_CCI400_PMU
CCI400_R0,
@@ -548,7 +551,7 @@ static struct attribute *cci5xx_pmu_event_attrs[] = {
CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_wrq, 0xB),
CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_cd_hs, 0xC),
CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_rq_stall_addr_hazard, 0xD),
- CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snopp_rq_stall_tt_full, 0xE),
+ CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_stall_tt_full, 0xE),
CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_tzmp1_prot, 0xF),
NULL
};
@@ -1503,31 +1506,26 @@ static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev)
return perf_pmu_register(&cci_pmu->pmu, name, -1);
}
-static int cci_pmu_cpu_notifier(struct notifier_block *self,
- unsigned long action, void *hcpu)
+static int cci_pmu_offline_cpu(unsigned int cpu)
{
- struct cci_pmu *cci_pmu = container_of(self,
- struct cci_pmu, cpu_nb);
- unsigned int cpu = (long)hcpu;
+ struct cci_pmu *cci_pmu;
unsigned int target;
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_DOWN_PREPARE:
+ mutex_lock(&cci_pmu_mutex);
+ list_for_each_entry(cci_pmu, &cci_pmu_list, entry) {
if (!cpumask_test_and_clear_cpu(cpu, &cci_pmu->cpus))
- break;
+ continue;
target = cpumask_any_but(cpu_online_mask, cpu);
- if (target >= nr_cpu_ids) // UP, last CPU
- break;
+ if (target >= nr_cpu_ids)
+ continue;
/*
* TODO: migrate context once core races on event->ctx have
* been fixed.
*/
cpumask_set_cpu(target, &cci_pmu->cpus);
- default:
- break;
}
-
- return NOTIFY_OK;
+ mutex_unlock(&cci_pmu_mutex);
+ return 0;
}
static struct cci_pmu_model cci_pmu_models[] = {
@@ -1766,24 +1764,13 @@ static int cci_pmu_probe(struct platform_device *pdev)
atomic_set(&cci_pmu->active_events, 0);
cpumask_set_cpu(smp_processor_id(), &cci_pmu->cpus);
- cci_pmu->cpu_nb = (struct notifier_block) {
- .notifier_call = cci_pmu_cpu_notifier,
- /*
- * to migrate uncore events, our notifier should be executed
- * before perf core's notifier.
- */
- .priority = CPU_PRI_PERF + 1,
- };
-
- ret = register_cpu_notifier(&cci_pmu->cpu_nb);
+ ret = cci_pmu_init(cci_pmu, pdev);
if (ret)
return ret;
- ret = cci_pmu_init(cci_pmu, pdev);
- if (ret) {
- unregister_cpu_notifier(&cci_pmu->cpu_nb);
- return ret;
- }
+ mutex_lock(&cci_pmu_mutex);
+ list_add(&cci_pmu->entry, &cci_pmu_list);
+ mutex_unlock(&cci_pmu_mutex);
pr_info("ARM %s PMU driver probed", cci_pmu->model->name);
return 0;
@@ -1817,6 +1804,12 @@ static int __init cci_platform_init(void)
{
int ret;
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE,
+ "AP_PERF_ARM_CCI_ONLINE", NULL,
+ cci_pmu_offline_cpu);
+ if (ret)
+ return ret;
+
ret = platform_driver_register(&cci_pmu_driver);
if (ret)
return ret;
diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
index d88372b79..884c0305e 100644
--- a/drivers/bus/arm-ccn.c
+++ b/drivers/bus/arm-ccn.c
@@ -167,7 +167,7 @@ struct arm_ccn_dt {
struct hrtimer hrtimer;
cpumask_t cpu;
- struct notifier_block cpu_nb;
+ struct list_head entry;
struct pmu pmu;
};
@@ -190,6 +190,8 @@ struct arm_ccn {
int mn_id;
};
+static DEFINE_MUTEX(arm_ccn_mutex);
+static LIST_HEAD(arm_ccn_list);
static int arm_ccn_node_to_xp(int node)
{
@@ -211,6 +213,7 @@ static int arm_ccn_node_to_xp_port(int node)
#define CCN_CONFIG_TYPE(_config) (((_config) >> 8) & 0xff)
#define CCN_CONFIG_EVENT(_config) (((_config) >> 16) & 0xff)
#define CCN_CONFIG_PORT(_config) (((_config) >> 24) & 0x3)
+#define CCN_CONFIG_BUS(_config) (((_config) >> 24) & 0x3)
#define CCN_CONFIG_VC(_config) (((_config) >> 26) & 0x7)
#define CCN_CONFIG_DIR(_config) (((_config) >> 29) & 0x1)
#define CCN_CONFIG_MASK(_config) (((_config) >> 30) & 0xf)
@@ -240,6 +243,7 @@ static CCN_FORMAT_ATTR(xp, "config:0-7");
static CCN_FORMAT_ATTR(type, "config:8-15");
static CCN_FORMAT_ATTR(event, "config:16-23");
static CCN_FORMAT_ATTR(port, "config:24-25");
+static CCN_FORMAT_ATTR(bus, "config:24-25");
static CCN_FORMAT_ATTR(vc, "config:26-28");
static CCN_FORMAT_ATTR(dir, "config:29-29");
static CCN_FORMAT_ATTR(mask, "config:30-33");
@@ -252,6 +256,7 @@ static struct attribute *arm_ccn_pmu_format_attrs[] = {
&arm_ccn_pmu_format_attr_type.attr.attr,
&arm_ccn_pmu_format_attr_event.attr.attr,
&arm_ccn_pmu_format_attr_port.attr.attr,
+ &arm_ccn_pmu_format_attr_bus.attr.attr,
&arm_ccn_pmu_format_attr_vc.attr.attr,
&arm_ccn_pmu_format_attr_dir.attr.attr,
&arm_ccn_pmu_format_attr_mask.attr.attr,
@@ -349,10 +354,14 @@ static ssize_t arm_ccn_pmu_event_show(struct device *dev,
break;
case CCN_TYPE_XP:
res += snprintf(buf + res, PAGE_SIZE - res,
- ",xp=?,port=?,vc=?,dir=?");
+ ",xp=?,vc=?");
if (event->event == CCN_EVENT_WATCHPOINT)
res += snprintf(buf + res, PAGE_SIZE - res,
- ",cmp_l=?,cmp_h=?,mask=?");
+ ",port=?,dir=?,cmp_l=?,cmp_h=?,mask=?");
+ else
+ res += snprintf(buf + res, PAGE_SIZE - res,
+ ",bus=?");
+
break;
case CCN_TYPE_MN:
res += snprintf(buf + res, PAGE_SIZE - res, ",node=%d", ccn->mn_id);
@@ -736,9 +745,10 @@ static int arm_ccn_pmu_event_init(struct perf_event *event)
if (has_branch_stack(event) || event->attr.exclude_user ||
event->attr.exclude_kernel || event->attr.exclude_hv ||
- event->attr.exclude_idle) {
+ event->attr.exclude_idle || event->attr.exclude_host ||
+ event->attr.exclude_guest) {
dev_warn(ccn->dev, "Can't exclude execution levels!\n");
- return -EOPNOTSUPP;
+ return -EINVAL;
}
if (event->cpu < 0) {
@@ -930,38 +940,17 @@ static void arm_ccn_pmu_event_start(struct perf_event *event, int flags)
arm_ccn_pmu_read_counter(ccn, hw->idx));
hw->state = 0;
- /*
- * Pin the timer, so that the overflows are handled by the chosen
- * event->cpu (this is the same one as presented in "cpumask"
- * attribute).
- */
- if (!ccn->irq)
- hrtimer_start(&ccn->dt.hrtimer, arm_ccn_pmu_timer_period(),
- HRTIMER_MODE_REL_PINNED);
-
/* Set the DT bus input, engaging the counter */
arm_ccn_pmu_xp_dt_config(event, 1);
}
static void arm_ccn_pmu_event_stop(struct perf_event *event, int flags)
{
- struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
struct hw_perf_event *hw = &event->hw;
- u64 timeout;
/* Disable counting, setting the DT bus to pass-through mode */
arm_ccn_pmu_xp_dt_config(event, 0);
- if (!ccn->irq)
- hrtimer_cancel(&ccn->dt.hrtimer);
-
- /* Let the DT bus drain */
- timeout = arm_ccn_pmu_read_counter(ccn, CCN_IDX_PMU_CYCLE_COUNTER) +
- ccn->num_xps;
- while (arm_ccn_pmu_read_counter(ccn, CCN_IDX_PMU_CYCLE_COUNTER) <
- timeout)
- cpu_relax();
-
if (flags & PERF_EF_UPDATE)
arm_ccn_pmu_event_update(event);
@@ -1027,7 +1016,7 @@ static void arm_ccn_pmu_xp_event_config(struct perf_event *event)
hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__XP_PMU_EVENT(hw->config_base);
id = (CCN_CONFIG_VC(event->attr.config) << 4) |
- (CCN_CONFIG_PORT(event->attr.config) << 3) |
+ (CCN_CONFIG_BUS(event->attr.config) << 3) |
(CCN_CONFIG_EVENT(event->attr.config) << 0);
val = readl(source->base + CCN_XP_PMU_EVENT_SEL);
@@ -1112,15 +1101,31 @@ static void arm_ccn_pmu_event_config(struct perf_event *event)
spin_unlock(&ccn->dt.config_lock);
}
+static int arm_ccn_pmu_active_counters(struct arm_ccn *ccn)
+{
+ return bitmap_weight(ccn->dt.pmu_counters_mask,
+ CCN_NUM_PMU_EVENT_COUNTERS + 1);
+}
+
static int arm_ccn_pmu_event_add(struct perf_event *event, int flags)
{
int err;
struct hw_perf_event *hw = &event->hw;
+ struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
err = arm_ccn_pmu_event_alloc(event);
if (err)
return err;
+ /*
+ * Pin the timer, so that the overflows are handled by the chosen
+ * event->cpu (this is the same one as presented in "cpumask"
+ * attribute).
+ */
+ if (!ccn->irq && arm_ccn_pmu_active_counters(ccn) == 1)
+ hrtimer_start(&ccn->dt.hrtimer, arm_ccn_pmu_timer_period(),
+ HRTIMER_MODE_REL_PINNED);
+
arm_ccn_pmu_event_config(event);
hw->state = PERF_HES_STOPPED;
@@ -1133,9 +1138,14 @@ static int arm_ccn_pmu_event_add(struct perf_event *event, int flags)
static void arm_ccn_pmu_event_del(struct perf_event *event, int flags)
{
+ struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+
arm_ccn_pmu_event_stop(event, PERF_EF_UPDATE);
arm_ccn_pmu_event_release(event);
+
+ if (!ccn->irq && arm_ccn_pmu_active_counters(ccn) == 0)
+ hrtimer_cancel(&ccn->dt.hrtimer);
}
static void arm_ccn_pmu_event_read(struct perf_event *event)
@@ -1143,6 +1153,24 @@ static void arm_ccn_pmu_event_read(struct perf_event *event)
arm_ccn_pmu_event_update(event);
}
+static void arm_ccn_pmu_enable(struct pmu *pmu)
+{
+ struct arm_ccn *ccn = pmu_to_arm_ccn(pmu);
+
+ u32 val = readl(ccn->dt.base + CCN_DT_PMCR);
+ val |= CCN_DT_PMCR__PMU_EN;
+ writel(val, ccn->dt.base + CCN_DT_PMCR);
+}
+
+static void arm_ccn_pmu_disable(struct pmu *pmu)
+{
+ struct arm_ccn *ccn = pmu_to_arm_ccn(pmu);
+
+ u32 val = readl(ccn->dt.base + CCN_DT_PMCR);
+ val &= ~CCN_DT_PMCR__PMU_EN;
+ writel(val, ccn->dt.base + CCN_DT_PMCR);
+}
+
static irqreturn_t arm_ccn_pmu_overflow_handler(struct arm_ccn_dt *dt)
{
u32 pmovsr = readl(dt->base + CCN_DT_PMOVSR);
@@ -1186,30 +1214,27 @@ static enum hrtimer_restart arm_ccn_pmu_timer_handler(struct hrtimer *hrtimer)
}
-static int arm_ccn_pmu_cpu_notifier(struct notifier_block *nb,
- unsigned long action, void *hcpu)
+static int arm_ccn_pmu_offline_cpu(unsigned int cpu)
{
- struct arm_ccn_dt *dt = container_of(nb, struct arm_ccn_dt, cpu_nb);
- struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt);
- unsigned int cpu = (long)hcpu; /* for (long) see kernel/cpu.c */
+ struct arm_ccn_dt *dt;
unsigned int target;
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_DOWN_PREPARE:
+ mutex_lock(&arm_ccn_mutex);
+ list_for_each_entry(dt, &arm_ccn_list, entry) {
+ struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt);
+
if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu))
- break;
+ continue;
target = cpumask_any_but(cpu_online_mask, cpu);
if (target >= nr_cpu_ids)
- break;
+ continue;
perf_pmu_migrate_context(&dt->pmu, cpu, target);
cpumask_set_cpu(target, &dt->cpu);
if (ccn->irq)
WARN_ON(irq_set_affinity_hint(ccn->irq, &dt->cpu) != 0);
- default:
- break;
}
-
- return NOTIFY_OK;
+ mutex_unlock(&arm_ccn_mutex);
+ return 0;
}
@@ -1268,6 +1293,8 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
.start = arm_ccn_pmu_event_start,
.stop = arm_ccn_pmu_event_stop,
.read = arm_ccn_pmu_event_read,
+ .pmu_enable = arm_ccn_pmu_enable,
+ .pmu_disable = arm_ccn_pmu_disable,
};
/* No overflow interrupt? Have to use a timer instead. */
@@ -1281,16 +1308,6 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
/* Pick one CPU which we will use to collect data from CCN... */
cpumask_set_cpu(smp_processor_id(), &ccn->dt.cpu);
- /*
- * ... and change the selection when it goes offline. Priority is
- * picked to have a chance to migrate events before perf is notified.
- */
- ccn->dt.cpu_nb.notifier_call = arm_ccn_pmu_cpu_notifier;
- ccn->dt.cpu_nb.priority = CPU_PRI_PERF + 1,
- err = register_cpu_notifier(&ccn->dt.cpu_nb);
- if (err)
- goto error_cpu_notifier;
-
/* Also make sure that the overflow interrupt is handled by this CPU */
if (ccn->irq) {
err = irq_set_affinity_hint(ccn->irq, &ccn->dt.cpu);
@@ -1304,12 +1321,13 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
if (err)
goto error_pmu_register;
+ mutex_lock(&arm_ccn_mutex);
+ list_add(&ccn->dt.entry, &arm_ccn_list);
+ mutex_unlock(&arm_ccn_mutex);
return 0;
error_pmu_register:
error_set_affinity:
- unregister_cpu_notifier(&ccn->dt.cpu_nb);
-error_cpu_notifier:
ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id);
for (i = 0; i < ccn->num_xps; i++)
writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
@@ -1321,9 +1339,12 @@ static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn)
{
int i;
+ mutex_lock(&arm_ccn_mutex);
+ list_del(&ccn->dt.entry);
+ mutex_unlock(&arm_ccn_mutex);
+
if (ccn->irq)
irq_set_affinity_hint(ccn->irq, NULL);
- unregister_cpu_notifier(&ccn->dt.cpu_nb);
for (i = 0; i < ccn->num_xps; i++)
writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
writel(0, ccn->dt.base + CCN_DT_PMCR);
@@ -1331,7 +1352,6 @@ static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn)
ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id);
}
-
static int arm_ccn_for_each_valid_region(struct arm_ccn *ccn,
int (*callback)(struct arm_ccn *ccn, int region,
void __iomem *base, u32 type, u32 id))
@@ -1496,8 +1516,9 @@ static int arm_ccn_probe(struct platform_device *pdev)
/* Can set 'disable' bits, so can acknowledge interrupts */
writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE,
ccn->base + CCN_MN_ERRINT_STATUS);
- err = devm_request_irq(ccn->dev, irq, arm_ccn_irq_handler, 0,
- dev_name(ccn->dev), ccn);
+ err = devm_request_irq(ccn->dev, irq, arm_ccn_irq_handler,
+ IRQF_NOBALANCING | IRQF_NO_THREAD,
+ dev_name(ccn->dev), ccn);
if (err)
return err;
@@ -1550,7 +1571,13 @@ static struct platform_driver arm_ccn_driver = {
static int __init arm_ccn_init(void)
{
- int i;
+ int i, ret;
+
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
+ "AP_PERF_ARM_CCN_ONLINE", NULL,
+ arm_ccn_pmu_offline_cpu);
+ if (ret)
+ return ret;
for (i = 0; i < ARRAY_SIZE(arm_ccn_pmu_events); i++)
arm_ccn_pmu_events_attrs[i] = &arm_ccn_pmu_events[i].attr.attr;
@@ -1560,6 +1587,7 @@ static int __init arm_ccn_init(void)
static void __exit arm_ccn_exit(void)
{
+ cpuhp_remove_state_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE);
platform_driver_unregister(&arm_ccn_driver);
}
diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
index 1827fc4d1..4bd361d64 100644
--- a/drivers/bus/imx-weim.c
+++ b/drivers/bus/imx-weim.c
@@ -163,9 +163,8 @@ static int __init weim_parse_dt(struct platform_device *pdev,
}
if (have_child)
- ret = of_platform_populate(pdev->dev.of_node,
- of_default_bus_match_table,
- NULL, &pdev->dev);
+ ret = of_platform_default_populate(pdev->dev.of_node,
+ NULL, &pdev->dev);
if (ret)
dev_err(&pdev->dev, "%s fail to create devices.\n",
pdev->dev.of_node->full_name);
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index ce54a0160..c7f396903 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -117,7 +117,7 @@ struct mvebu_mbus_soc_data {
unsigned int (*win_remap_offset)(const int win);
void (*setup_cpu_target)(struct mvebu_mbus_state *s);
int (*save_cpu_target)(struct mvebu_mbus_state *s,
- u32 *store_addr);
+ u32 __iomem *store_addr);
int (*show_cpu_target)(struct mvebu_mbus_state *s,
struct seq_file *seq, void *v);
};
@@ -728,7 +728,7 @@ mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus)
static int
mvebu_mbus_default_save_cpu_target(struct mvebu_mbus_state *mbus,
- u32 *store_addr)
+ u32 __iomem *store_addr)
{
int i;
@@ -780,7 +780,7 @@ mvebu_mbus_dove_setup_cpu_target(struct mvebu_mbus_state *mbus)
static int
mvebu_mbus_dove_save_cpu_target(struct mvebu_mbus_state *mbus,
- u32 *store_addr)
+ u32 __iomem *store_addr)
{
int i;
@@ -796,7 +796,7 @@ mvebu_mbus_dove_save_cpu_target(struct mvebu_mbus_state *mbus,
return 4;
}
-int mvebu_mbus_save_cpu_target(u32 *store_addr)
+int mvebu_mbus_save_cpu_target(u32 __iomem *store_addr)
{
return mbus_state.soc->save_cpu_target(&mbus_state, store_addr);
}
@@ -1089,7 +1089,7 @@ static void mvebu_mbus_resume(void)
}
}
-struct syscore_ops mvebu_mbus_syscore_ops = {
+static struct syscore_ops mvebu_mbus_syscore_ops = {
.suspend = mvebu_mbus_suspend,
.resume = mvebu_mbus_resume,
};
diff --git a/drivers/bus/tegra-aconnect.c b/drivers/bus/tegra-aconnect.c
new file mode 100644
index 000000000..7e4104b74
--- /dev/null
+++ b/drivers/bus/tegra-aconnect.c
@@ -0,0 +1,112 @@
+/*
+ * Tegra ACONNECT Bus Driver
+ *
+ * Copyright (C) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_runtime.h>
+
+static int tegra_aconnect_add_clock(struct device *dev, char *name)
+{
+ struct clk *clk;
+ int ret;
+
+ clk = clk_get(dev, name);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "%s clock not found\n", name);
+ return PTR_ERR(clk);
+ }
+
+ ret = pm_clk_add_clk(dev, clk);
+ if (ret)
+ clk_put(clk);
+
+ return ret;
+}
+
+static int tegra_aconnect_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ if (!pdev->dev.of_node)
+ return -EINVAL;
+
+ ret = pm_clk_create(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = tegra_aconnect_add_clock(&pdev->dev, "ape");
+ if (ret)
+ goto clk_destroy;
+
+ ret = tegra_aconnect_add_clock(&pdev->dev, "apb2ape");
+ if (ret)
+ goto clk_destroy;
+
+ pm_runtime_enable(&pdev->dev);
+
+ of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+
+ dev_info(&pdev->dev, "Tegra ACONNECT bus registered\n");
+
+ return 0;
+
+clk_destroy:
+ pm_clk_destroy(&pdev->dev);
+
+ return ret;
+}
+
+static int tegra_aconnect_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+
+ pm_clk_destroy(&pdev->dev);
+
+ return 0;
+}
+
+static int tegra_aconnect_runtime_resume(struct device *dev)
+{
+ return pm_clk_resume(dev);
+}
+
+static int tegra_aconnect_runtime_suspend(struct device *dev)
+{
+ return pm_clk_suspend(dev);
+}
+
+static const struct dev_pm_ops tegra_aconnect_pm_ops = {
+ SET_RUNTIME_PM_OPS(tegra_aconnect_runtime_suspend,
+ tegra_aconnect_runtime_resume, NULL)
+};
+
+static const struct of_device_id tegra_aconnect_of_match[] = {
+ { .compatible = "nvidia,tegra210-aconnect", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tegra_aconnect_of_match);
+
+static struct platform_driver tegra_aconnect_driver = {
+ .probe = tegra_aconnect_probe,
+ .remove = tegra_aconnect_remove,
+ .driver = {
+ .name = "tegra-aconnect",
+ .of_match_table = tegra_aconnect_of_match,
+ .pm = &tegra_aconnect_pm_ops,
+ },
+};
+module_platform_driver(tegra_aconnect_driver);
+
+MODULE_DESCRIPTION("NVIDIA Tegra ACONNECT Bus Driver");
+MODULE_AUTHOR("Jon Hunter <jonathanh@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/uniphier-system-bus.c b/drivers/bus/uniphier-system-bus.c
index 350b7309c..1e6e0269e 100644
--- a/drivers/bus/uniphier-system-bus.c
+++ b/drivers/bus/uniphier-system-bus.c
@@ -257,8 +257,7 @@ static int uniphier_system_bus_probe(struct platform_device *pdev)
uniphier_system_bus_set_reg(priv);
/* Now, the bus is configured. Populate platform_devices below it */
- return of_platform_populate(dev->of_node, of_default_bus_match_table,
- NULL, dev);
+ return of_platform_default_populate(dev->of_node, NULL, dev);
}
static const struct of_device_id uniphier_system_bus_match[] = {
diff --git a/drivers/bus/vexpress-config.c b/drivers/bus/vexpress-config.c
index c3cb76b36..9efdf1de4 100644
--- a/drivers/bus/vexpress-config.c
+++ b/drivers/bus/vexpress-config.c
@@ -178,6 +178,7 @@ static int vexpress_config_populate(struct device_node *node)
parent = class_find_device(vexpress_config_class, NULL, bridge,
vexpress_config_node_match);
+ of_node_put(bridge);
if (WARN_ON(!parent))
return -ENODEV;
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 1b257ea97..5d475b3a0 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2032,7 +2032,7 @@ static int cdrom_read_subchannel(struct cdrom_device_info *cdi,
init_cdrom_command(&cgc, buffer, 16, CGC_DATA_READ);
cgc.cmd[0] = GPCMD_READ_SUBCHANNEL;
- cgc.cmd[1] = 2; /* MSF addressing */
+ cgc.cmd[1] = subchnl->cdsc_format;/* MSF or LBA addressing */
cgc.cmd[2] = 0x40; /* request subQ data */
cgc.cmd[3] = mcn ? 2 : 1;
cgc.cmd[8] = 16;
@@ -2041,17 +2041,27 @@ static int cdrom_read_subchannel(struct cdrom_device_info *cdi,
return ret;
subchnl->cdsc_audiostatus = cgc.buffer[1];
- subchnl->cdsc_format = CDROM_MSF;
subchnl->cdsc_ctrl = cgc.buffer[5] & 0xf;
subchnl->cdsc_trk = cgc.buffer[6];
subchnl->cdsc_ind = cgc.buffer[7];
- subchnl->cdsc_reladdr.msf.minute = cgc.buffer[13];
- subchnl->cdsc_reladdr.msf.second = cgc.buffer[14];
- subchnl->cdsc_reladdr.msf.frame = cgc.buffer[15];
- subchnl->cdsc_absaddr.msf.minute = cgc.buffer[9];
- subchnl->cdsc_absaddr.msf.second = cgc.buffer[10];
- subchnl->cdsc_absaddr.msf.frame = cgc.buffer[11];
+ if (subchnl->cdsc_format == CDROM_LBA) {
+ subchnl->cdsc_absaddr.lba = ((cgc.buffer[8] << 24) |
+ (cgc.buffer[9] << 16) |
+ (cgc.buffer[10] << 8) |
+ (cgc.buffer[11]));
+ subchnl->cdsc_reladdr.lba = ((cgc.buffer[12] << 24) |
+ (cgc.buffer[13] << 16) |
+ (cgc.buffer[14] << 8) |
+ (cgc.buffer[15]));
+ } else {
+ subchnl->cdsc_reladdr.msf.minute = cgc.buffer[13];
+ subchnl->cdsc_reladdr.msf.second = cgc.buffer[14];
+ subchnl->cdsc_reladdr.msf.frame = cgc.buffer[15];
+ subchnl->cdsc_absaddr.msf.minute = cgc.buffer[9];
+ subchnl->cdsc_absaddr.msf.second = cgc.buffer[10];
+ subchnl->cdsc_absaddr.msf.frame = cgc.buffer[11];
+ }
return 0;
}
@@ -3022,7 +3032,7 @@ static noinline int mmc_ioctl_cdrom_subchannel(struct cdrom_device_info *cdi,
if (!((requested == CDROM_MSF) ||
(requested == CDROM_LBA)))
return -EINVAL;
- q.cdsc_format = CDROM_MSF;
+
ret = cdrom_read_subchannel(cdi, &q, 0);
if (ret)
return ret;
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 601f64fcc..dcc09739a 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -178,6 +178,20 @@ config IBM_BSR
of threads across a large system which avoids bouncing a cacheline
between several cores on a system
+config POWERNV_OP_PANEL
+ tristate "IBM POWERNV Operator Panel Display support"
+ depends on PPC_POWERNV
+ default m
+ help
+ If you say Y here, a special character device node, /dev/op_panel,
+ will be created which exposes the operator panel display on IBM
+ Power Systems machines with FSPs.
+
+ If you don't require access to the operator panel display from user
+ space, say N.
+
+ If unsure, say M here to build it as a module called powernv-op-panel.
+
source "drivers/char/ipmi/Kconfig"
config DS1620
@@ -279,7 +293,7 @@ if RTC_LIB=n
config RTC
tristate "Enhanced Real Time Clock Support (legacy PC RTC driver)"
- depends on ALPHA || (MIPS && MACH_LOONGSON64) || MN10300
+ depends on ALPHA || (MIPS && MACH_LOONGSON64)
---help---
If you say Y here and create a character special file /dev/rtc with
major number 10 and minor number 135 using mknod ("man mknod"), you
@@ -325,32 +339,6 @@ config JS_RTC
To compile this driver as a module, choose M here: the
module will be called js-rtc.
-config GEN_RTC
- tristate "Generic /dev/rtc emulation"
- depends on RTC!=y
- depends on ALPHA || M68K || MN10300 || PARISC || PPC || X86
- ---help---
- If you say Y here and create a character special file /dev/rtc with
- major number 10 and minor number 135 using mknod ("man mknod"), you
- will get access to the real time clock (or hardware clock) built
- into your computer.
-
- It reports status information via the file /proc/driver/rtc and its
- behaviour is set by various ioctls on /dev/rtc. If you enable the
- "extended RTC operation" below it will also provide an emulation
- for RTC_UIE which is required by some programs and may improve
- precision in some cases.
-
- To compile this driver as a module, choose M here: the
- module will be called genrtc.
-
-config GEN_RTC_X
- bool "Extended RTC operation"
- depends on GEN_RTC
- help
- Provides an emulation for RTC_UIE which is required by some programs
- and may improve precision of the generic RTC support in some cases.
-
config EFI_RTC
bool "EFI Real Time Clock Services"
depends on IA64
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index d8a757930..6e6c244a6 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -25,7 +25,6 @@ obj-$(CONFIG_APPLICOM) += applicom.o
obj-$(CONFIG_SONYPI) += sonypi.o
obj-$(CONFIG_RTC) += rtc.o
obj-$(CONFIG_HPET) += hpet.o
-obj-$(CONFIG_GEN_RTC) += genrtc.o
obj-$(CONFIG_EFI_RTC) += efirtc.o
obj-$(CONFIG_DS1302) += ds1302.o
obj-$(CONFIG_XILINX_HWICAP) += xilinx_hwicap/
@@ -60,3 +59,4 @@ js-rtc-y = rtc.o
obj-$(CONFIG_TILE_SROM) += tile-srom.o
obj-$(CONFIG_XILLYBUS) += xillybus/
+obj-$(CONFIG_POWERNV_OP_PANEL) += powernv-op-panel.o
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index aef87fdbd..44311296e 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -840,6 +840,14 @@ static bool i830_check_flags(unsigned int flags)
return false;
}
+void intel_gtt_insert_page(dma_addr_t addr,
+ unsigned int pg,
+ unsigned int flags)
+{
+ intel_private.driver->write_entry(addr, pg, flags);
+}
+EXPORT_SYMBOL(intel_gtt_insert_page);
+
void intel_gtt_insert_sg_entries(struct sg_table *st,
unsigned int pg_start,
unsigned int flags)
diff --git a/drivers/char/dsp56k.c b/drivers/char/dsp56k.c
index 8bf70e8c3..50aa9ba91 100644
--- a/drivers/char/dsp56k.c
+++ b/drivers/char/dsp56k.c
@@ -325,7 +325,7 @@ static long dsp56k_ioctl(struct file *file, unsigned int cmd,
if(get_user(bin, &binary->bin) < 0)
return -EFAULT;
- if (len == 0) {
+ if (len <= 0) {
return -EINVAL; /* nothing to upload?!? */
}
if (len > DSP56K_MAX_BINARY_LENGTH) {
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index ac51149e9..8c0770bf8 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -90,7 +90,7 @@ config HW_RANDOM_BCM63XX
config HW_RANDOM_BCM2835
tristate "Broadcom BCM2835 Random Number Generator support"
- depends on ARCH_BCM2835
+ depends on ARCH_BCM2835 || ARCH_BCM_NSP || ARCH_BCM_5301X
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
@@ -244,7 +244,7 @@ config HW_RANDOM_TX4939
config HW_RANDOM_MXC_RNGA
tristate "Freescale i.MX RNGA Random Number Generator"
- depends on ARCH_HAS_RNGA
+ depends on SOC_IMX31
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
@@ -396,6 +396,20 @@ config HW_RANDOM_PIC32
If unsure, say Y.
+config HW_RANDOM_MESON
+ tristate "Amlogic Meson Random Number Generator support"
+ depends on HW_RANDOM
+ depends on ARCH_MESON || COMPILE_TEST
+ default y
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on Amlogic Meson SoCs.
+
+ To compile this driver as a module, choose M here. the
+ module will be called meson-rng.
+
+ If unsure, say Y.
+
endif # HW_RANDOM
config UML_RANDOM
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 63022b49f..04bb0b033 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -34,3 +34,4 @@ obj-$(CONFIG_HW_RANDOM_ST) += st-rng.o
obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o
obj-$(CONFIG_HW_RANDOM_STM32) += stm32-rng.o
obj-$(CONFIG_HW_RANDOM_PIC32) += pic32-rng.o
+obj-$(CONFIG_HW_RANDOM_MESON) += meson-rng.o
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
index 7192ec25f..af2149273 100644
--- a/drivers/char/hw_random/bcm2835-rng.c
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -19,6 +19,7 @@
#define RNG_CTRL 0x0
#define RNG_STATUS 0x4
#define RNG_DATA 0x8
+#define RNG_INT_MASK 0x10
/* enable rng */
#define RNG_RBGEN 0x1
@@ -26,10 +27,24 @@
/* the initial numbers generated are "less random" so will be discarded */
#define RNG_WARMUP_COUNT 0x40000
+#define RNG_INT_OFF 0x1
+
+static void __init nsp_rng_init(void __iomem *base)
+{
+ u32 val;
+
+ /* mask the interrupt */
+ val = readl(base + RNG_INT_MASK);
+ val |= RNG_INT_OFF;
+ writel(val, base + RNG_INT_MASK);
+}
+
static int bcm2835_rng_read(struct hwrng *rng, void *buf, size_t max,
bool wait)
{
void __iomem *rng_base = (void __iomem *)rng->priv;
+ u32 max_words = max / sizeof(u32);
+ u32 num_words, count;
while ((__raw_readl(rng_base + RNG_STATUS) >> 24) == 0) {
if (!wait)
@@ -37,8 +52,14 @@ static int bcm2835_rng_read(struct hwrng *rng, void *buf, size_t max,
cpu_relax();
}
- *(u32 *)buf = __raw_readl(rng_base + RNG_DATA);
- return sizeof(u32);
+ num_words = readl(rng_base + RNG_STATUS) >> 24;
+ if (num_words > max_words)
+ num_words = max_words;
+
+ for (count = 0; count < num_words; count++)
+ ((u32 *)buf)[count] = readl(rng_base + RNG_DATA);
+
+ return num_words * sizeof(u32);
}
static struct hwrng bcm2835_rng_ops = {
@@ -46,10 +67,19 @@ static struct hwrng bcm2835_rng_ops = {
.read = bcm2835_rng_read,
};
+static const struct of_device_id bcm2835_rng_of_match[] = {
+ { .compatible = "brcm,bcm2835-rng"},
+ { .compatible = "brcm,bcm-nsp-rng", .data = nsp_rng_init},
+ { .compatible = "brcm,bcm5301x-rng", .data = nsp_rng_init},
+ {},
+};
+
static int bcm2835_rng_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
+ void (*rng_setup)(void __iomem *base);
+ const struct of_device_id *rng_id;
void __iomem *rng_base;
int err;
@@ -61,6 +91,15 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
}
bcm2835_rng_ops.priv = (unsigned long)rng_base;
+ rng_id = of_match_node(bcm2835_rng_of_match, np);
+ if (!rng_id)
+ return -EINVAL;
+
+ /* Check for rng init function, execute it */
+ rng_setup = rng_id->data;
+ if (rng_setup)
+ rng_setup(rng_base);
+
/* set warm-up count & enable */
__raw_writel(RNG_WARMUP_COUNT, rng_base + RNG_STATUS);
__raw_writel(RNG_RBGEN, rng_base + RNG_CTRL);
@@ -90,10 +129,6 @@ static int bcm2835_rng_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id bcm2835_rng_of_match[] = {
- { .compatible = "brcm,bcm2835-rng", },
- {},
-};
MODULE_DEVICE_TABLE(of, bcm2835_rng_of_match);
static struct platform_driver bcm2835_rng_driver = {
diff --git a/drivers/char/hw_random/exynos-rng.c b/drivers/char/hw_random/exynos-rng.c
index ed44561ea..23d358553 100644
--- a/drivers/char/hw_random/exynos-rng.c
+++ b/drivers/char/hw_random/exynos-rng.c
@@ -45,12 +45,12 @@ struct exynos_rng {
static u32 exynos_rng_readl(struct exynos_rng *rng, u32 offset)
{
- return __raw_readl(rng->mem + offset);
+ return readl_relaxed(rng->mem + offset);
}
static void exynos_rng_writel(struct exynos_rng *rng, u32 val, u32 offset)
{
- __raw_writel(val, rng->mem + offset);
+ writel_relaxed(val, rng->mem + offset);
}
static int exynos_rng_configure(struct exynos_rng *exynos_rng)
diff --git a/drivers/char/hw_random/meson-rng.c b/drivers/char/hw_random/meson-rng.c
new file mode 100644
index 000000000..0cfd81bca
--- /dev/null
+++ b/drivers/char/hw_random/meson-rng.c
@@ -0,0 +1,131 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2014 Amlogic, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2014 Amlogic, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/hw_random.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/of.h>
+
+#define RNG_DATA 0x00
+
+struct meson_rng_data {
+ void __iomem *base;
+ struct platform_device *pdev;
+ struct hwrng rng;
+};
+
+static int meson_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct meson_rng_data *data =
+ container_of(rng, struct meson_rng_data, rng);
+
+ if (max < sizeof(u32))
+ return 0;
+
+ *(u32 *)buf = readl_relaxed(data->base + RNG_DATA);
+
+ return sizeof(u32);
+}
+
+static int meson_rng_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct meson_rng_data *data;
+ struct resource *res;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->pdev = pdev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(data->base))
+ return PTR_ERR(data->base);
+
+ data->rng.name = pdev->name;
+ data->rng.read = meson_rng_read;
+
+ platform_set_drvdata(pdev, data);
+
+ return devm_hwrng_register(dev, &data->rng);
+}
+
+static const struct of_device_id meson_rng_of_match[] = {
+ { .compatible = "amlogic,meson-rng", },
+ {},
+};
+
+static struct platform_driver meson_rng_driver = {
+ .probe = meson_rng_probe,
+ .driver = {
+ .name = "meson-rng",
+ .of_match_table = meson_rng_of_match,
+ },
+};
+
+module_platform_driver(meson_rng_driver);
+
+MODULE_ALIAS("platform:meson-rng");
+MODULE_DESCRIPTION("Meson H/W Random Number Generator driver");
+MODULE_AUTHOR("Lawrence Mok <lawrence.mok@amlogic.com>");
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 8a1432e8b..01d4be2c3 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -384,7 +384,12 @@ static int omap_rng_probe(struct platform_device *pdev)
}
pm_runtime_enable(&pdev->dev);
- pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to runtime_get device: %d\n", ret);
+ pm_runtime_put_noidle(&pdev->dev);
+ goto err_ioremap;
+ }
ret = (dev->of_node) ? of_get_omap_rng_device_details(priv, pdev) :
get_omap_rng_device_details(priv);
@@ -435,8 +440,15 @@ static int __maybe_unused omap_rng_suspend(struct device *dev)
static int __maybe_unused omap_rng_resume(struct device *dev)
{
struct omap_rng_dev *priv = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret) {
+ dev_err(dev, "Failed to runtime_get device: %d\n", ret);
+ pm_runtime_put_noidle(dev);
+ return ret;
+ }
- pm_runtime_get_sync(dev);
priv->pdata->init(priv);
return 0;
diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c
index 92a810648..63d84e6f1 100644
--- a/drivers/char/hw_random/stm32-rng.c
+++ b/drivers/char/hw_random/stm32-rng.c
@@ -69,8 +69,12 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
}
/* If error detected or data not ready... */
- if (sr != RNG_SR_DRDY)
+ if (sr != RNG_SR_DRDY) {
+ if (WARN_ONCE(sr & (RNG_SR_SEIS | RNG_SR_CEIS),
+ "bad RNG status - %x\n", sr))
+ writel_relaxed(0, priv->base + RNG_SR);
break;
+ }
*(u32 *)data = readl_relaxed(priv->base + RNG_DR);
@@ -79,10 +83,6 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
max -= sizeof(u32);
}
- if (WARN_ONCE(sr & (RNG_SR_SEIS | RNG_SR_CEIS),
- "bad RNG status - %x\n", sr))
- writel_relaxed(0, priv->base + RNG_SR);
-
pm_runtime_mark_last_busy((struct device *) priv->rng.priv);
pm_runtime_put_sync_autosuspend((struct device *) priv->rng.priv);
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
index 6ed9e9fe5..5a9350b10 100644
--- a/drivers/char/ipmi/Kconfig
+++ b/drivers/char/ipmi/Kconfig
@@ -50,18 +50,6 @@ config IPMI_SI
Currently, only KCS and SMIC are supported. If
you are using IPMI, you should probably say "y" here.
-config IPMI_SI_PROBE_DEFAULTS
- bool 'Probe for all possible IPMI system interfaces by default'
- default n
- depends on IPMI_SI
- help
- Modern systems will usually expose IPMI interfaces via a discoverable
- firmware mechanism such as ACPI or DMI. Older systems do not, and so
- the driver is forced to probe hardware manually. This may cause boot
- delays. Say "n" here to disable this manual probing. IPMI will then
- only be available on older systems if the "ipmi_si_intf.trydefaults=1"
- boot argument is passed.
-
config IPMI_SSIF
tristate 'IPMI SMBus handler (SSIF)'
select I2C
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 44b1bd6ba..d8619998c 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -474,12 +474,12 @@ static DEFINE_MUTEX(smi_watchers_mutex);
static const char * const addr_src_to_str[] = {
"invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI",
- "device-tree", "default"
+ "device-tree"
};
const char *ipmi_addr_src_to_str(enum ipmi_addr_src src)
{
- if (src > SI_DEFAULT)
+ if (src >= SI_LAST)
src = 0; /* Invalid */
return addr_src_to_str[src];
}
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 7b1c412b4..a112c0146 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -1322,7 +1322,6 @@ static bool si_tryplatform = true;
#ifdef CONFIG_PCI
static bool si_trypci = true;
#endif
-static bool si_trydefaults = IS_ENABLED(CONFIG_IPMI_SI_PROBE_DEFAULTS);
static char *si_type[SI_MAX_PARMS];
#define MAX_SI_TYPE_STR 30
static char si_type_str[MAX_SI_TYPE_STR];
@@ -1371,10 +1370,6 @@ module_param_named(trypci, si_trypci, bool, 0);
MODULE_PARM_DESC(trypci, "Setting this to zero will disable the"
" default scan of the interfaces identified via pci");
#endif
-module_param_named(trydefaults, si_trydefaults, bool, 0);
-MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
- " default scan of the KCS and SMIC interface at the standard"
- " address");
module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
MODULE_PARM_DESC(type, "Defines the type of each interface, each"
" interface separated by commas. The types are 'kcs',"
@@ -3461,62 +3456,6 @@ static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
del_timer_sync(&smi_info->si_timer);
}
-static const struct ipmi_default_vals
-{
- const int type;
- const int port;
-} ipmi_defaults[] =
-{
- { .type = SI_KCS, .port = 0xca2 },
- { .type = SI_SMIC, .port = 0xca9 },
- { .type = SI_BT, .port = 0xe4 },
- { .port = 0 }
-};
-
-static void default_find_bmc(void)
-{
- struct smi_info *info;
- int i;
-
- for (i = 0; ; i++) {
- if (!ipmi_defaults[i].port)
- break;
-#ifdef CONFIG_PPC
- if (check_legacy_ioport(ipmi_defaults[i].port))
- continue;
-#endif
- info = smi_info_alloc();
- if (!info)
- return;
-
- info->addr_source = SI_DEFAULT;
-
- info->si_type = ipmi_defaults[i].type;
- info->io_setup = port_setup;
- info->io.addr_data = ipmi_defaults[i].port;
- info->io.addr_type = IPMI_IO_ADDR_SPACE;
-
- info->io.addr = NULL;
- info->io.regspacing = DEFAULT_REGSPACING;
- info->io.regsize = DEFAULT_REGSPACING;
- info->io.regshift = 0;
-
- if (add_smi(info) == 0) {
- if ((try_smi_init(info)) == 0) {
- /* Found one... */
- printk(KERN_INFO PFX "Found default %s"
- " state machine at %s address 0x%lx\n",
- si_to_str[info->si_type],
- addr_space_to_str[info->io.addr_type],
- info->io.addr_data);
- } else
- cleanup_one_si(info);
- } else {
- kfree(info);
- }
- }
-}
-
static int is_new_interface(struct smi_info *info)
{
struct smi_info *e;
@@ -3844,8 +3783,6 @@ static int init_ipmi_si(void)
#ifdef CONFIG_PARISC
register_parisc_driver(&ipmi_parisc_driver);
parisc_registered = true;
- /* poking PC IO addresses will crash machine, don't do it */
- si_trydefaults = 0;
#endif
/* We prefer devices with interrupts, but in the case of a machine
@@ -3885,16 +3822,6 @@ static int init_ipmi_si(void)
if (type)
return 0;
- if (si_trydefaults) {
- mutex_lock(&smi_infos_lock);
- if (list_empty(&smi_infos)) {
- /* No BMC was found, try defaults. */
- mutex_unlock(&smi_infos_lock);
- default_find_bmc();
- } else
- mutex_unlock(&smi_infos_lock);
- }
-
mutex_lock(&smi_infos_lock);
if (unload_when_empty && list_empty(&smi_infos)) {
mutex_unlock(&smi_infos_lock);
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 097c86898..5673ffff0 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -568,12 +568,16 @@ static void retry_timeout(unsigned long data)
}
-static void ssif_alert(struct i2c_client *client, unsigned int data)
+static void ssif_alert(struct i2c_client *client, enum i2c_alert_protocol type,
+ unsigned int data)
{
struct ssif_info *ssif_info = i2c_get_clientdata(client);
unsigned long oflags, *flags;
bool do_get = false;
+ if (type != I2C_PROTOCOL_SMBUS_ALERT)
+ return;
+
ssif_inc_stat(ssif_info, alerts);
flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 71025c2f6..a33163dbb 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -22,6 +22,7 @@
#include <linux/device.h>
#include <linux/highmem.h>
#include <linux/backing-dev.h>
+#include <linux/shmem_fs.h>
#include <linux/splice.h>
#include <linux/pfn.h>
#include <linux/export.h>
@@ -66,12 +67,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
u64 cursor = from;
while (cursor < to) {
- if (!devmem_is_allowed(pfn)) {
- printk(KERN_INFO
- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
- current->comm, from, to);
+ if (!devmem_is_allowed(pfn))
return 0;
- }
cursor += PAGE_SIZE;
pfn++;
}
@@ -661,6 +658,28 @@ static int mmap_zero(struct file *file, struct vm_area_struct *vma)
return 0;
}
+static unsigned long get_unmapped_area_zero(struct file *file,
+ unsigned long addr, unsigned long len,
+ unsigned long pgoff, unsigned long flags)
+{
+#ifdef CONFIG_MMU
+ if (flags & MAP_SHARED) {
+ /*
+ * mmap_zero() will call shmem_zero_setup() to create a file,
+ * so use shmem's get_unmapped_area in case it can be huge;
+ * and pass NULL for file as in mmap.c's get_unmapped_area(),
+ * so as not to confuse shmem with our handle on "/dev/zero".
+ */
+ return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags);
+ }
+
+ /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
+ return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
+#else
+ return -ENOSYS;
+#endif
+}
+
static ssize_t write_full(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
@@ -768,6 +787,7 @@ static const struct file_operations zero_fops = {
.read_iter = read_iter_zero,
.write_iter = write_iter_zero,
.mmap = mmap_zero,
+ .get_unmapped_area = get_unmapped_area_zero,
#ifndef CONFIG_MMU
.mmap_capabilities = zero_mmap_capabilities,
#endif
diff --git a/drivers/char/powernv-op-panel.c b/drivers/char/powernv-op-panel.c
new file mode 100644
index 000000000..a45dabcc8
--- /dev/null
+++ b/drivers/char/powernv-op-panel.c
@@ -0,0 +1,223 @@
+/*
+ * OPAL Operator Panel Display Driver
+ *
+ * Copyright 2016, Suraj Jitindar Singh, IBM Corporation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/miscdevice.h>
+
+#include <asm/opal.h>
+
+/*
+ * This driver creates a character device (/dev/op_panel) which exposes the
+ * operator panel (character LCD display) on IBM Power Systems machines
+ * with FSPs.
+ * A character buffer written to the device will be displayed on the
+ * operator panel.
+ */
+
+static DEFINE_MUTEX(oppanel_mutex);
+
+static u32 num_lines, oppanel_size;
+static oppanel_line_t *oppanel_lines;
+static char *oppanel_data;
+
+static loff_t oppanel_llseek(struct file *filp, loff_t offset, int whence)
+{
+ return fixed_size_llseek(filp, offset, whence, oppanel_size);
+}
+
+static ssize_t oppanel_read(struct file *filp, char __user *userbuf, size_t len,
+ loff_t *f_pos)
+{
+ return simple_read_from_buffer(userbuf, len, f_pos, oppanel_data,
+ oppanel_size);
+}
+
+static int __op_panel_update_display(void)
+{
+ struct opal_msg msg;
+ int rc, token;
+
+ token = opal_async_get_token_interruptible();
+ if (token < 0) {
+ if (token != -ERESTARTSYS)
+ pr_debug("Couldn't get OPAL async token [token=%d]\n",
+ token);
+ return token;
+ }
+
+ rc = opal_write_oppanel_async(token, oppanel_lines, num_lines);
+ switch (rc) {
+ case OPAL_ASYNC_COMPLETION:
+ rc = opal_async_wait_response(token, &msg);
+ if (rc) {
+ pr_debug("Failed to wait for async response [rc=%d]\n",
+ rc);
+ break;
+ }
+ rc = opal_get_async_rc(msg);
+ if (rc != OPAL_SUCCESS) {
+ pr_debug("OPAL async call returned failed [rc=%d]\n",
+ rc);
+ break;
+ }
+ case OPAL_SUCCESS:
+ break;
+ default:
+ pr_debug("OPAL write op-panel call failed [rc=%d]\n", rc);
+ }
+
+ opal_async_release_token(token);
+ return rc;
+}
+
+static ssize_t oppanel_write(struct file *filp, const char __user *userbuf,
+ size_t len, loff_t *f_pos)
+{
+ loff_t f_pos_prev = *f_pos;
+ ssize_t ret;
+ int rc;
+
+ if (!*f_pos)
+ memset(oppanel_data, ' ', oppanel_size);
+ else if (*f_pos >= oppanel_size)
+ return -EFBIG;
+
+ ret = simple_write_to_buffer(oppanel_data, oppanel_size, f_pos, userbuf,
+ len);
+ if (ret > 0) {
+ rc = __op_panel_update_display();
+ if (rc != OPAL_SUCCESS) {
+ pr_err_ratelimited("OPAL call failed to write to op panel display [rc=%d]\n",
+ rc);
+ *f_pos = f_pos_prev;
+ return -EIO;
+ }
+ }
+ return ret;
+}
+
+static int oppanel_open(struct inode *inode, struct file *filp)
+{
+ if (!mutex_trylock(&oppanel_mutex)) {
+ pr_debug("Device Busy\n");
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static int oppanel_release(struct inode *inode, struct file *filp)
+{
+ mutex_unlock(&oppanel_mutex);
+ return 0;
+}
+
+static const struct file_operations oppanel_fops = {
+ .owner = THIS_MODULE,
+ .llseek = oppanel_llseek,
+ .read = oppanel_read,
+ .write = oppanel_write,
+ .open = oppanel_open,
+ .release = oppanel_release
+};
+
+static struct miscdevice oppanel_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "op_panel",
+ .fops = &oppanel_fops
+};
+
+static int oppanel_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ u32 line_len;
+ int rc, i;
+
+ rc = of_property_read_u32(np, "#length", &line_len);
+ if (rc) {
+ pr_err_ratelimited("Operator panel length property not found\n");
+ return rc;
+ }
+ rc = of_property_read_u32(np, "#lines", &num_lines);
+ if (rc) {
+ pr_err_ratelimited("Operator panel lines property not found\n");
+ return rc;
+ }
+ oppanel_size = line_len * num_lines;
+
+ pr_devel("Operator panel of size %u found with %u lines of length %u\n",
+ oppanel_size, num_lines, line_len);
+
+ oppanel_data = kcalloc(oppanel_size, sizeof(*oppanel_data), GFP_KERNEL);
+ if (!oppanel_data)
+ return -ENOMEM;
+
+ oppanel_lines = kcalloc(num_lines, sizeof(oppanel_line_t), GFP_KERNEL);
+ if (!oppanel_lines) {
+ rc = -ENOMEM;
+ goto free_oppanel_data;
+ }
+
+ memset(oppanel_data, ' ', oppanel_size);
+ for (i = 0; i < num_lines; i++) {
+ oppanel_lines[i].line_len = cpu_to_be64(line_len);
+ oppanel_lines[i].line = cpu_to_be64(__pa(&oppanel_data[i *
+ line_len]));
+ }
+
+ rc = misc_register(&oppanel_dev);
+ if (rc) {
+ pr_err_ratelimited("Failed to register as misc device\n");
+ goto free_oppanel;
+ }
+
+ return 0;
+
+free_oppanel:
+ kfree(oppanel_lines);
+free_oppanel_data:
+ kfree(oppanel_data);
+ return rc;
+}
+
+static int oppanel_remove(struct platform_device *pdev)
+{
+ misc_deregister(&oppanel_dev);
+ kfree(oppanel_lines);
+ kfree(oppanel_data);
+ return 0;
+}
+
+static const struct of_device_id oppanel_match[] = {
+ { .compatible = "ibm,opal-oppanel" },
+ { },
+};
+
+static struct platform_driver oppanel_driver = {
+ .driver = {
+ .name = "powernv-op-panel",
+ .of_match_table = oppanel_match,
+ },
+ .probe = oppanel_probe,
+ .remove = oppanel_remove,
+};
+
+module_platform_driver(oppanel_driver);
+
+MODULE_DEVICE_TABLE(of, oppanel_match);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PowerNV Operator Panel LCD Display Driver");
+MODULE_AUTHOR("Suraj Jitindar Singh <sjitindarsingh@gmail.com>");
diff --git a/drivers/char/random.c b/drivers/char/random.c
index d72c6d14a..3efb3bf0a 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -249,6 +249,7 @@
#include <linux/genhd.h>
#include <linux/interrupt.h>
#include <linux/mm.h>
+#include <linux/nodemask.h>
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/percpu.h>
@@ -261,6 +262,7 @@
#include <linux/syscalls.h>
#include <linux/completion.h>
#include <linux/uuid.h>
+#include <crypto/chacha20.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
@@ -413,6 +415,34 @@ static struct fasync_struct *fasync;
static DEFINE_SPINLOCK(random_ready_list_lock);
static LIST_HEAD(random_ready_list);
+struct crng_state {
+ __u32 state[16];
+ unsigned long init_time;
+ spinlock_t lock;
+};
+
+struct crng_state primary_crng = {
+ .lock = __SPIN_LOCK_UNLOCKED(primary_crng.lock),
+};
+
+/*
+ * crng_init = 0 --> Uninitialized
+ * 1 --> Initialized
+ * 2 --> Initialized from input_pool
+ *
+ * crng_init is protected by primary_crng->lock, and only increases
+ * its value (from 0->1->2).
+ */
+static int crng_init = 0;
+#define crng_ready() (likely(crng_init > 0))
+static int crng_init_cnt = 0;
+#define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE)
+static void _extract_crng(struct crng_state *crng,
+ __u8 out[CHACHA20_BLOCK_SIZE]);
+static void _crng_backtrack_protect(struct crng_state *crng,
+ __u8 tmp[CHACHA20_BLOCK_SIZE], int used);
+static void process_random_ready_list(void);
+
/**********************************************************************
*
* OS independent entropy store. Here are the functions which handle
@@ -442,10 +472,15 @@ struct entropy_store {
__u8 last_data[EXTRACT_SIZE];
};
+static ssize_t extract_entropy(struct entropy_store *r, void *buf,
+ size_t nbytes, int min, int rsvd);
+static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
+ size_t nbytes, int fips);
+
+static void crng_reseed(struct crng_state *crng, struct entropy_store *r);
static void push_to_pool(struct work_struct *work);
static __u32 input_pool_data[INPUT_POOL_WORDS];
static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
-static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
static struct entropy_store input_pool = {
.poolinfo = &poolinfo_table[0],
@@ -466,16 +501,6 @@ static struct entropy_store blocking_pool = {
push_to_pool),
};
-static struct entropy_store nonblocking_pool = {
- .poolinfo = &poolinfo_table[1],
- .name = "nonblocking",
- .pull = &input_pool,
- .lock = __SPIN_LOCK_UNLOCKED(nonblocking_pool.lock),
- .pool = nonblocking_pool_data,
- .push_work = __WORK_INITIALIZER(nonblocking_pool.push_work,
- push_to_pool),
-};
-
static __u32 const twist_table[8] = {
0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
@@ -678,12 +703,6 @@ retry:
if (!r->initialized && r->entropy_total > 128) {
r->initialized = 1;
r->entropy_total = 0;
- if (r == &nonblocking_pool) {
- prandom_reseed_late();
- process_random_ready_list();
- wake_up_all(&urandom_init_wait);
- pr_notice("random: %s pool is initialized\n", r->name);
- }
}
trace_credit_entropy_bits(r->name, nbits,
@@ -693,30 +712,27 @@ retry:
if (r == &input_pool) {
int entropy_bits = entropy_count >> ENTROPY_SHIFT;
+ if (crng_init < 2 && entropy_bits >= 128) {
+ crng_reseed(&primary_crng, r);
+ entropy_bits = r->entropy_count >> ENTROPY_SHIFT;
+ }
+
/* should we wake readers? */
if (entropy_bits >= random_read_wakeup_bits) {
wake_up_interruptible(&random_read_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
}
/* If the input pool is getting full, send some
- * entropy to the two output pools, flipping back and
- * forth between them, until the output pools are 75%
- * full.
+ * entropy to the blocking pool until it is 75% full.
*/
if (entropy_bits > random_write_wakeup_bits &&
r->initialized &&
r->entropy_total >= 2*random_read_wakeup_bits) {
- static struct entropy_store *last = &blocking_pool;
struct entropy_store *other = &blocking_pool;
- if (last == &blocking_pool)
- other = &nonblocking_pool;
if (other->entropy_count <=
- 3 * other->poolinfo->poolfracbits / 4)
- last = other;
- if (last->entropy_count <=
- 3 * last->poolinfo->poolfracbits / 4) {
- schedule_work(&last->push_work);
+ 3 * other->poolinfo->poolfracbits / 4) {
+ schedule_work(&other->push_work);
r->entropy_total = 0;
}
}
@@ -739,6 +755,223 @@ static int credit_entropy_bits_safe(struct entropy_store *r, int nbits)
/*********************************************************************
*
+ * CRNG using CHACHA20
+ *
+ *********************************************************************/
+
+#define CRNG_RESEED_INTERVAL (300*HZ)
+
+static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
+
+#ifdef CONFIG_NUMA
+/*
+ * Hack to deal with crazy userspace progams when they are all trying
+ * to access /dev/urandom in parallel. The programs are almost
+ * certainly doing something terribly wrong, but we'll work around
+ * their brain damage.
+ */
+static struct crng_state **crng_node_pool __read_mostly;
+#endif
+
+static void crng_initialize(struct crng_state *crng)
+{
+ int i;
+ unsigned long rv;
+
+ memcpy(&crng->state[0], "expand 32-byte k", 16);
+ if (crng == &primary_crng)
+ _extract_entropy(&input_pool, &crng->state[4],
+ sizeof(__u32) * 12, 0);
+ else
+ get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
+ for (i = 4; i < 16; i++) {
+ if (!arch_get_random_seed_long(&rv) &&
+ !arch_get_random_long(&rv))
+ rv = random_get_entropy();
+ crng->state[i] ^= rv;
+ }
+ crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
+}
+
+static int crng_fast_load(const char *cp, size_t len)
+{
+ unsigned long flags;
+ char *p;
+
+ if (!spin_trylock_irqsave(&primary_crng.lock, flags))
+ return 0;
+ if (crng_ready()) {
+ spin_unlock_irqrestore(&primary_crng.lock, flags);
+ return 0;
+ }
+ p = (unsigned char *) &primary_crng.state[4];
+ while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) {
+ p[crng_init_cnt % CHACHA20_KEY_SIZE] ^= *cp;
+ cp++; crng_init_cnt++; len--;
+ }
+ if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
+ crng_init = 1;
+ wake_up_interruptible(&crng_init_wait);
+ pr_notice("random: fast init done\n");
+ }
+ spin_unlock_irqrestore(&primary_crng.lock, flags);
+ return 1;
+}
+
+static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
+{
+ unsigned long flags;
+ int i, num;
+ union {
+ __u8 block[CHACHA20_BLOCK_SIZE];
+ __u32 key[8];
+ } buf;
+
+ if (r) {
+ num = extract_entropy(r, &buf, 32, 16, 0);
+ if (num == 0)
+ return;
+ } else {
+ _extract_crng(&primary_crng, buf.block);
+ _crng_backtrack_protect(&primary_crng, buf.block,
+ CHACHA20_KEY_SIZE);
+ }
+ spin_lock_irqsave(&primary_crng.lock, flags);
+ for (i = 0; i < 8; i++) {
+ unsigned long rv;
+ if (!arch_get_random_seed_long(&rv) &&
+ !arch_get_random_long(&rv))
+ rv = random_get_entropy();
+ crng->state[i+4] ^= buf.key[i] ^ rv;
+ }
+ memzero_explicit(&buf, sizeof(buf));
+ crng->init_time = jiffies;
+ if (crng == &primary_crng && crng_init < 2) {
+ crng_init = 2;
+ process_random_ready_list();
+ wake_up_interruptible(&crng_init_wait);
+ pr_notice("random: crng init done\n");
+ }
+ spin_unlock_irqrestore(&primary_crng.lock, flags);
+}
+
+static inline void maybe_reseed_primary_crng(void)
+{
+ if (crng_init > 2 &&
+ time_after(jiffies, primary_crng.init_time + CRNG_RESEED_INTERVAL))
+ crng_reseed(&primary_crng, &input_pool);
+}
+
+static inline void crng_wait_ready(void)
+{
+ wait_event_interruptible(crng_init_wait, crng_ready());
+}
+
+static void _extract_crng(struct crng_state *crng,
+ __u8 out[CHACHA20_BLOCK_SIZE])
+{
+ unsigned long v, flags;
+
+ if (crng_init > 1 &&
+ time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL))
+ crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL);
+ spin_lock_irqsave(&crng->lock, flags);
+ if (arch_get_random_long(&v))
+ crng->state[14] ^= v;
+ chacha20_block(&crng->state[0], out);
+ if (crng->state[12] == 0)
+ crng->state[13]++;
+ spin_unlock_irqrestore(&crng->lock, flags);
+}
+
+static void extract_crng(__u8 out[CHACHA20_BLOCK_SIZE])
+{
+ struct crng_state *crng = NULL;
+
+#ifdef CONFIG_NUMA
+ if (crng_node_pool)
+ crng = crng_node_pool[numa_node_id()];
+ if (crng == NULL)
+#endif
+ crng = &primary_crng;
+ _extract_crng(crng, out);
+}
+
+/*
+ * Use the leftover bytes from the CRNG block output (if there is
+ * enough) to mutate the CRNG key to provide backtracking protection.
+ */
+static void _crng_backtrack_protect(struct crng_state *crng,
+ __u8 tmp[CHACHA20_BLOCK_SIZE], int used)
+{
+ unsigned long flags;
+ __u32 *s, *d;
+ int i;
+
+ used = round_up(used, sizeof(__u32));
+ if (used + CHACHA20_KEY_SIZE > CHACHA20_BLOCK_SIZE) {
+ extract_crng(tmp);
+ used = 0;
+ }
+ spin_lock_irqsave(&crng->lock, flags);
+ s = (__u32 *) &tmp[used];
+ d = &crng->state[4];
+ for (i=0; i < 8; i++)
+ *d++ ^= *s++;
+ spin_unlock_irqrestore(&crng->lock, flags);
+}
+
+static void crng_backtrack_protect(__u8 tmp[CHACHA20_BLOCK_SIZE], int used)
+{
+ struct crng_state *crng = NULL;
+
+#ifdef CONFIG_NUMA
+ if (crng_node_pool)
+ crng = crng_node_pool[numa_node_id()];
+ if (crng == NULL)
+#endif
+ crng = &primary_crng;
+ _crng_backtrack_protect(crng, tmp, used);
+}
+
+static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
+{
+ ssize_t ret = 0, i = CHACHA20_BLOCK_SIZE;
+ __u8 tmp[CHACHA20_BLOCK_SIZE];
+ int large_request = (nbytes > 256);
+
+ while (nbytes) {
+ if (large_request && need_resched()) {
+ if (signal_pending(current)) {
+ if (ret == 0)
+ ret = -ERESTARTSYS;
+ break;
+ }
+ schedule();
+ }
+
+ extract_crng(tmp);
+ i = min_t(int, nbytes, CHACHA20_BLOCK_SIZE);
+ if (copy_to_user(buf, tmp, i)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ nbytes -= i;
+ buf += i;
+ ret += i;
+ }
+ crng_backtrack_protect(tmp, i);
+
+ /* Wipe data just written to memory */
+ memzero_explicit(tmp, sizeof(tmp));
+
+ return ret;
+}
+
+
+/*********************************************************************
+ *
* Entropy input management
*
*********************************************************************/
@@ -753,12 +986,12 @@ struct timer_rand_state {
#define INIT_TIMER_RAND_STATE { INITIAL_JIFFIES, };
/*
- * Add device- or boot-specific data to the input and nonblocking
- * pools to help initialize them to unique values.
+ * Add device- or boot-specific data to the input pool to help
+ * initialize it.
*
- * None of this adds any entropy, it is meant to avoid the
- * problem of the nonblocking pool having similar initial state
- * across largely identical devices.
+ * None of this adds any entropy; it is meant to avoid the problem of
+ * the entropy pool having similar initial state across largely
+ * identical devices.
*/
void add_device_randomness(const void *buf, unsigned int size)
{
@@ -770,11 +1003,6 @@ void add_device_randomness(const void *buf, unsigned int size)
_mix_pool_bytes(&input_pool, buf, size);
_mix_pool_bytes(&input_pool, &time, sizeof(time));
spin_unlock_irqrestore(&input_pool.lock, flags);
-
- spin_lock_irqsave(&nonblocking_pool.lock, flags);
- _mix_pool_bytes(&nonblocking_pool, buf, size);
- _mix_pool_bytes(&nonblocking_pool, &time, sizeof(time));
- spin_unlock_irqrestore(&nonblocking_pool.lock, flags);
}
EXPORT_SYMBOL(add_device_randomness);
@@ -805,7 +1033,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
sample.jiffies = jiffies;
sample.cycles = random_get_entropy();
sample.num = num;
- r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
+ r = &input_pool;
mix_pool_bytes(r, &sample, sizeof(sample));
/*
@@ -921,11 +1149,21 @@ void add_interrupt_randomness(int irq, int irq_flags)
fast_mix(fast_pool);
add_interrupt_bench(cycles);
+ if (!crng_ready()) {
+ if ((fast_pool->count >= 64) &&
+ crng_fast_load((char *) fast_pool->pool,
+ sizeof(fast_pool->pool))) {
+ fast_pool->count = 0;
+ fast_pool->last = now;
+ }
+ return;
+ }
+
if ((fast_pool->count < 64) &&
!time_after(now, fast_pool->last + HZ))
return;
- r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
+ r = &input_pool;
if (!spin_trylock(&r->lock))
return;
@@ -969,9 +1207,6 @@ EXPORT_SYMBOL_GPL(add_disk_randomness);
*
*********************************************************************/
-static ssize_t extract_entropy(struct entropy_store *r, void *buf,
- size_t nbytes, int min, int rsvd);
-
/*
* This utility inline function is responsible for transferring entropy
* from the primary pool to the secondary extraction pool. We make
@@ -1146,6 +1381,36 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
memzero_explicit(&hash, sizeof(hash));
}
+static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
+ size_t nbytes, int fips)
+{
+ ssize_t ret = 0, i;
+ __u8 tmp[EXTRACT_SIZE];
+ unsigned long flags;
+
+ while (nbytes) {
+ extract_buf(r, tmp);
+
+ if (fips) {
+ spin_lock_irqsave(&r->lock, flags);
+ if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
+ panic("Hardware RNG duplicated output!\n");
+ memcpy(r->last_data, tmp, EXTRACT_SIZE);
+ spin_unlock_irqrestore(&r->lock, flags);
+ }
+ i = min_t(int, nbytes, EXTRACT_SIZE);
+ memcpy(buf, tmp, i);
+ nbytes -= i;
+ buf += i;
+ ret += i;
+ }
+
+ /* Wipe data just returned from memory */
+ memzero_explicit(tmp, sizeof(tmp));
+
+ return ret;
+}
+
/*
* This function extracts randomness from the "entropy pool", and
* returns it in a buffer.
@@ -1158,7 +1423,6 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
static ssize_t extract_entropy(struct entropy_store *r, void *buf,
size_t nbytes, int min, int reserved)
{
- ssize_t ret = 0, i;
__u8 tmp[EXTRACT_SIZE];
unsigned long flags;
@@ -1182,27 +1446,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
xfer_secondary_pool(r, nbytes);
nbytes = account(r, nbytes, min, reserved);
- while (nbytes) {
- extract_buf(r, tmp);
-
- if (fips_enabled) {
- spin_lock_irqsave(&r->lock, flags);
- if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
- panic("Hardware RNG duplicated output!\n");
- memcpy(r->last_data, tmp, EXTRACT_SIZE);
- spin_unlock_irqrestore(&r->lock, flags);
- }
- i = min_t(int, nbytes, EXTRACT_SIZE);
- memcpy(buf, tmp, i);
- nbytes -= i;
- buf += i;
- ret += i;
- }
-
- /* Wipe data just returned from memory */
- memzero_explicit(tmp, sizeof(tmp));
-
- return ret;
+ return _extract_entropy(r, buf, nbytes, fips_enabled);
}
/*
@@ -1257,15 +1501,28 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
*/
void get_random_bytes(void *buf, int nbytes)
{
+ __u8 tmp[CHACHA20_BLOCK_SIZE];
+
#if DEBUG_RANDOM_BOOT > 0
- if (unlikely(nonblocking_pool.initialized == 0))
+ if (!crng_ready())
printk(KERN_NOTICE "random: %pF get_random_bytes called "
- "with %d bits of entropy available\n",
- (void *) _RET_IP_,
- nonblocking_pool.entropy_total);
+ "with crng_init = %d\n", (void *) _RET_IP_, crng_init);
#endif
trace_get_random_bytes(nbytes, _RET_IP_);
- extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0);
+
+ while (nbytes >= CHACHA20_BLOCK_SIZE) {
+ extract_crng(buf);
+ buf += CHACHA20_BLOCK_SIZE;
+ nbytes -= CHACHA20_BLOCK_SIZE;
+ }
+
+ if (nbytes > 0) {
+ extract_crng(tmp);
+ memcpy(buf, tmp, nbytes);
+ crng_backtrack_protect(tmp, nbytes);
+ } else
+ crng_backtrack_protect(tmp, CHACHA20_BLOCK_SIZE);
+ memzero_explicit(tmp, sizeof(tmp));
}
EXPORT_SYMBOL(get_random_bytes);
@@ -1283,7 +1540,7 @@ int add_random_ready_callback(struct random_ready_callback *rdy)
unsigned long flags;
int err = -EALREADY;
- if (likely(nonblocking_pool.initialized))
+ if (crng_ready())
return err;
owner = rdy->owner;
@@ -1291,7 +1548,7 @@ int add_random_ready_callback(struct random_ready_callback *rdy)
return -ENOENT;
spin_lock_irqsave(&random_ready_list_lock, flags);
- if (nonblocking_pool.initialized)
+ if (crng_ready())
goto out;
owner = NULL;
@@ -1355,7 +1612,7 @@ void get_random_bytes_arch(void *buf, int nbytes)
}
if (nbytes)
- extract_entropy(&nonblocking_pool, p, nbytes, 0, 0);
+ get_random_bytes(p, nbytes);
}
EXPORT_SYMBOL(get_random_bytes_arch);
@@ -1398,9 +1655,28 @@ static void init_std_data(struct entropy_store *r)
*/
static int rand_initialize(void)
{
+#ifdef CONFIG_NUMA
+ int i;
+ struct crng_state *crng;
+ struct crng_state **pool;
+#endif
+
init_std_data(&input_pool);
init_std_data(&blocking_pool);
- init_std_data(&nonblocking_pool);
+ crng_initialize(&primary_crng);
+
+#ifdef CONFIG_NUMA
+ pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
+ for_each_online_node(i) {
+ crng = kmalloc_node(sizeof(struct crng_state),
+ GFP_KERNEL | __GFP_NOFAIL, i);
+ spin_lock_init(&crng->lock);
+ crng_initialize(crng);
+ pool[i] = crng;
+ }
+ mb();
+ crng_node_pool = pool;
+#endif
return 0;
}
early_initcall(rand_initialize);
@@ -1462,22 +1738,22 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
static ssize_t
urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
{
+ unsigned long flags;
static int maxwarn = 10;
int ret;
- if (unlikely(nonblocking_pool.initialized == 0) &&
- maxwarn > 0) {
+ if (!crng_ready() && maxwarn > 0) {
maxwarn--;
printk(KERN_NOTICE "random: %s: uninitialized urandom read "
- "(%zd bytes read, %d bits of entropy available)\n",
- current->comm, nbytes, nonblocking_pool.entropy_total);
+ "(%zd bytes read)\n",
+ current->comm, nbytes);
+ spin_lock_irqsave(&primary_crng.lock, flags);
+ crng_init_cnt = 0;
+ spin_unlock_irqrestore(&primary_crng.lock, flags);
}
-
nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
- ret = extract_entropy_user(&nonblocking_pool, buf, nbytes);
-
- trace_urandom_read(8 * nbytes, ENTROPY_BITS(&nonblocking_pool),
- ENTROPY_BITS(&input_pool));
+ ret = extract_crng_user(buf, nbytes);
+ trace_urandom_read(8 * nbytes, 0, ENTROPY_BITS(&input_pool));
return ret;
}
@@ -1523,10 +1799,7 @@ static ssize_t random_write(struct file *file, const char __user *buffer,
{
size_t ret;
- ret = write_pool(&blocking_pool, buffer, count);
- if (ret)
- return ret;
- ret = write_pool(&nonblocking_pool, buffer, count);
+ ret = write_pool(&input_pool, buffer, count);
if (ret)
return ret;
@@ -1575,7 +1848,6 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
input_pool.entropy_count = 0;
- nonblocking_pool.entropy_count = 0;
blocking_pool.entropy_count = 0;
return 0;
default:
@@ -1617,11 +1889,10 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
if (flags & GRND_RANDOM)
return _random_read(flags & GRND_NONBLOCK, buf, count);
- if (unlikely(nonblocking_pool.initialized == 0)) {
+ if (!crng_ready()) {
if (flags & GRND_NONBLOCK)
return -EAGAIN;
- wait_event_interruptible(urandom_init_wait,
- nonblocking_pool.initialized);
+ crng_wait_ready();
if (signal_pending(current))
return -ERESTARTSYS;
}
@@ -1857,18 +2128,17 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
{
struct entropy_store *poolp = &input_pool;
- if (unlikely(nonblocking_pool.initialized == 0))
- poolp = &nonblocking_pool;
- else {
- /* Suspend writing if we're above the trickle
- * threshold. We'll be woken up again once below
- * random_write_wakeup_thresh, or when the calling
- * thread is about to terminate.
- */
- wait_event_interruptible(random_write_wait,
- kthread_should_stop() ||
- ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
+ if (!crng_ready()) {
+ crng_fast_load(buffer, count);
+ return;
}
+
+ /* Suspend writing if we're above the trickle threshold.
+ * We'll be woken up again once below random_write_wakeup_thresh,
+ * or when the calling thread is about to terminate.
+ */
+ wait_event_interruptible(random_write_wait, kthread_should_stop() ||
+ ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
mix_pool_bytes(poolp, buffer, count);
credit_entropy_bits(poolp, entropy);
}
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 3b84a8b1b..9faa0b1e7 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -24,9 +24,16 @@ menuconfig TCG_TPM
if TCG_TPM
+config TCG_TIS_CORE
+ tristate
+ ---help---
+ TCG TIS TPM core driver. It implements the TPM TCG TIS logic and hooks
+ into the TPM kernel APIs. Physical layers will register against it.
+
config TCG_TIS
tristate "TPM Interface Specification 1.2 Interface / TPM 2.0 FIFO Interface"
depends on X86
+ select TCG_TIS_CORE
---help---
If you have a TPM security chip that is compliant with the
TCG TIS 1.2 TPM specification (TPM1.2) or the TCG PTP FIFO
@@ -34,6 +41,18 @@ config TCG_TIS
within Linux. To compile this driver as a module, choose M here;
the module will be called tpm_tis.
+config TCG_TIS_SPI
+ tristate "TPM Interface Specification 1.3 Interface / TPM 2.0 FIFO Interface - (SPI)"
+ depends on SPI
+ select TCG_TIS_CORE
+ ---help---
+ If you have a TPM security chip which is connected to a regular,
+ non-tcg SPI master (i.e. most embedded platforms) that is compliant with the
+ TCG TIS 1.3 TPM specification (TPM1.2) or the TCG PTP FIFO
+ specification (TPM2.0) say Yes and it will be accessible from
+ within Linux. To compile this driver as a module, choose M here;
+ the module will be called tpm_tis_spi.
+
config TCG_TIS_I2C_ATMEL
tristate "TPM Interface Specification 1.2 Interface (I2C - Atmel)"
depends on I2C
@@ -122,5 +141,16 @@ config TCG_CRB
from within Linux. To compile this driver as a module, choose
M here; the module will be called tpm_crb.
+config TCG_VTPM_PROXY
+ tristate "VTPM Proxy Interface"
+ depends on TCG_TPM
+ select ANON_INODES
+ ---help---
+ This driver proxies for an emulated TPM (vTPM) running in userspace.
+ A device /dev/vtpmx is provided that creates a device pair
+ /dev/vtpmX and a server-side file descriptor on which the vTPM
+ can receive commands.
+
+
source "drivers/char/tpm/st33zp24/Kconfig"
endif # TCG_TPM
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index 56e8f1f3d..a385fb8c1 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -12,7 +12,9 @@ ifdef CONFIG_TCG_IBMVTPM
tpm-y += tpm_eventlog.o tpm_of.o
endif
endif
+obj-$(CONFIG_TCG_TIS_CORE) += tpm_tis_core.o
obj-$(CONFIG_TCG_TIS) += tpm_tis.o
+obj-$(CONFIG_TCG_TIS_SPI) += tpm_tis_spi.o
obj-$(CONFIG_TCG_TIS_I2C_ATMEL) += tpm_i2c_atmel.o
obj-$(CONFIG_TCG_TIS_I2C_INFINEON) += tpm_i2c_infineon.o
obj-$(CONFIG_TCG_TIS_I2C_NUVOTON) += tpm_i2c_nuvoton.o
@@ -23,3 +25,4 @@ obj-$(CONFIG_TCG_IBMVTPM) += tpm_ibmvtpm.o
obj-$(CONFIG_TCG_TIS_ST33ZP24) += st33zp24/
obj-$(CONFIG_TCG_XEN) += xen-tpmfront.o
obj-$(CONFIG_TCG_CRB) += tpm_crb.o
+obj-$(CONFIG_TCG_VTPM_PROXY) += tpm_vtpm_proxy.o
diff --git a/drivers/char/tpm/st33zp24/Kconfig b/drivers/char/tpm/st33zp24/Kconfig
index 19c007461..e74c6f29f 100644
--- a/drivers/char/tpm/st33zp24/Kconfig
+++ b/drivers/char/tpm/st33zp24/Kconfig
@@ -1,6 +1,5 @@
config TCG_TIS_ST33ZP24
- tristate "STMicroelectronics TPM Interface Specification 1.2 Interface"
- depends on GPIOLIB || COMPILE_TEST
+ tristate
---help---
STMicroelectronics ST33ZP24 core driver. It implements the core
TPM1.2 logic and hooks into the TPM kernel APIs. Physical layers will
@@ -10,9 +9,9 @@ config TCG_TIS_ST33ZP24
tpm_st33zp24.
config TCG_TIS_ST33ZP24_I2C
- tristate "TPM 1.2 ST33ZP24 I2C support"
- depends on TCG_TIS_ST33ZP24
+ tristate "STMicroelectronics TPM Interface Specification 1.2 Interface (I2C)"
depends on I2C
+ select TCG_TIS_ST33ZP24
---help---
This module adds support for the STMicroelectronics TPM security chip
ST33ZP24 with i2c interface.
@@ -20,9 +19,9 @@ config TCG_TIS_ST33ZP24_I2C
called tpm_st33zp24_i2c.
config TCG_TIS_ST33ZP24_SPI
- tristate "TPM 1.2 ST33ZP24 SPI support"
- depends on TCG_TIS_ST33ZP24
+ tristate "STMicroelectronics TPM Interface Specification 1.2 Interface (SPI)"
depends on SPI
+ select TCG_TIS_ST33ZP24
---help---
This module adds support for the STMicroelectronics TPM security chip
ST33ZP24 with spi interface.
diff --git a/drivers/char/tpm/st33zp24/i2c.c b/drivers/char/tpm/st33zp24/i2c.c
index 309d2767c..028a9cd76 100644
--- a/drivers/char/tpm/st33zp24/i2c.c
+++ b/drivers/char/tpm/st33zp24/i2c.c
@@ -1,6 +1,6 @@
/*
* STMicroelectronics TPM I2C Linux driver for TPM ST33ZP24
- * Copyright (C) 2009 - 2015 STMicroelectronics
+ * Copyright (C) 2009 - 2016 STMicroelectronics
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -19,11 +19,14 @@
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/of_irq.h>
#include <linux/of_gpio.h>
+#include <linux/acpi.h>
#include <linux/tpm.h>
#include <linux/platform_data/st33zp24.h>
+#include "../tpm.h"
#include "st33zp24.h"
#define TPM_DUMMY_BYTE 0xAA
@@ -108,11 +111,40 @@ static const struct st33zp24_phy_ops i2c_phy_ops = {
.recv = st33zp24_i2c_recv,
};
-#ifdef CONFIG_OF
-static int st33zp24_i2c_of_request_resources(struct st33zp24_i2c_phy *phy)
+static int st33zp24_i2c_acpi_request_resources(struct i2c_client *client)
{
+ struct tpm_chip *chip = i2c_get_clientdata(client);
+ struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+ struct st33zp24_i2c_phy *phy = tpm_dev->phy_id;
+ struct gpio_desc *gpiod_lpcpd;
+ struct device *dev = &client->dev;
+
+ /* Get LPCPD GPIO from ACPI */
+ gpiod_lpcpd = devm_gpiod_get_index(dev, "TPM IO LPCPD", 1,
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(gpiod_lpcpd)) {
+ dev_err(&client->dev,
+ "Failed to retrieve lpcpd-gpios from acpi.\n");
+ phy->io_lpcpd = -1;
+ /*
+ * lpcpd pin is not specified. This is not an issue as
+ * power management can be also managed by TPM specific
+ * commands. So leave with a success status code.
+ */
+ return 0;
+ }
+
+ phy->io_lpcpd = desc_to_gpio(gpiod_lpcpd);
+
+ return 0;
+}
+
+static int st33zp24_i2c_of_request_resources(struct i2c_client *client)
+{
+ struct tpm_chip *chip = i2c_get_clientdata(client);
+ struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+ struct st33zp24_i2c_phy *phy = tpm_dev->phy_id;
struct device_node *pp;
- struct i2c_client *client = phy->client;
int gpio;
int ret;
@@ -146,16 +178,12 @@ static int st33zp24_i2c_of_request_resources(struct st33zp24_i2c_phy *phy)
return 0;
}
-#else
-static int st33zp24_i2c_of_request_resources(struct st33zp24_i2c_phy *phy)
-{
- return -ENODEV;
-}
-#endif
-static int st33zp24_i2c_request_resources(struct i2c_client *client,
- struct st33zp24_i2c_phy *phy)
+static int st33zp24_i2c_request_resources(struct i2c_client *client)
{
+ struct tpm_chip *chip = i2c_get_clientdata(client);
+ struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+ struct st33zp24_i2c_phy *phy = tpm_dev->phy_id;
struct st33zp24_platform_data *pdata;
int ret;
@@ -212,13 +240,18 @@ static int st33zp24_i2c_probe(struct i2c_client *client,
return -ENOMEM;
phy->client = client;
+
pdata = client->dev.platform_data;
if (!pdata && client->dev.of_node) {
- ret = st33zp24_i2c_of_request_resources(phy);
+ ret = st33zp24_i2c_of_request_resources(client);
if (ret)
return ret;
} else if (pdata) {
- ret = st33zp24_i2c_request_resources(client, phy);
+ ret = st33zp24_i2c_request_resources(client);
+ if (ret)
+ return ret;
+ } else if (ACPI_HANDLE(&client->dev)) {
+ ret = st33zp24_i2c_acpi_request_resources(client);
if (ret)
return ret;
}
@@ -245,13 +278,17 @@ static const struct i2c_device_id st33zp24_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, st33zp24_i2c_id);
-#ifdef CONFIG_OF
static const struct of_device_id of_st33zp24_i2c_match[] = {
{ .compatible = "st,st33zp24-i2c", },
{}
};
MODULE_DEVICE_TABLE(of, of_st33zp24_i2c_match);
-#endif
+
+static const struct acpi_device_id st33zp24_i2c_acpi_match[] = {
+ {"SMO3324"},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, st33zp24_i2c_acpi_match);
static SIMPLE_DEV_PM_OPS(st33zp24_i2c_ops, st33zp24_pm_suspend,
st33zp24_pm_resume);
@@ -261,6 +298,7 @@ static struct i2c_driver st33zp24_i2c_driver = {
.name = TPM_ST33_I2C,
.pm = &st33zp24_i2c_ops,
.of_match_table = of_match_ptr(of_st33zp24_i2c_match),
+ .acpi_match_table = ACPI_PTR(st33zp24_i2c_acpi_match),
},
.probe = st33zp24_i2c_probe,
.remove = st33zp24_i2c_remove,
diff --git a/drivers/char/tpm/st33zp24/spi.c b/drivers/char/tpm/st33zp24/spi.c
index f974c945c..9f5a01170 100644
--- a/drivers/char/tpm/st33zp24/spi.c
+++ b/drivers/char/tpm/st33zp24/spi.c
@@ -1,6 +1,6 @@
/*
* STMicroelectronics TPM SPI Linux driver for TPM ST33ZP24
- * Copyright (C) 2009 - 2015 STMicroelectronics
+ * Copyright (C) 2009 - 2016 STMicroelectronics
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -19,11 +19,14 @@
#include <linux/module.h>
#include <linux/spi/spi.h>
#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/of_irq.h>
#include <linux/of_gpio.h>
+#include <linux/acpi.h>
#include <linux/tpm.h>
#include <linux/platform_data/st33zp24.h>
+#include "../tpm.h"
#include "st33zp24.h"
#define TPM_DATA_FIFO 0x24
@@ -66,7 +69,7 @@
struct st33zp24_spi_phy {
struct spi_device *spi_device;
- struct spi_transfer spi_xfer;
+
u8 tx_buf[ST33ZP24_SPI_BUFFER_SIZE];
u8 rx_buf[ST33ZP24_SPI_BUFFER_SIZE];
@@ -110,43 +113,39 @@ static int st33zp24_status_to_errno(u8 code)
static int st33zp24_spi_send(void *phy_id, u8 tpm_register, u8 *tpm_data,
int tpm_size)
{
- u8 data = 0;
- int total_length = 0, nbr_dummy_bytes = 0, ret = 0;
+ int total_length = 0, ret = 0;
struct st33zp24_spi_phy *phy = phy_id;
struct spi_device *dev = phy->spi_device;
- u8 *tx_buf = (u8 *)phy->spi_xfer.tx_buf;
- u8 *rx_buf = phy->spi_xfer.rx_buf;
+ struct spi_transfer spi_xfer = {
+ .tx_buf = phy->tx_buf,
+ .rx_buf = phy->rx_buf,
+ };
/* Pre-Header */
- data = TPM_WRITE_DIRECTION | LOCALITY0;
- memcpy(tx_buf + total_length, &data, sizeof(data));
- total_length++;
- data = tpm_register;
- memcpy(tx_buf + total_length, &data, sizeof(data));
- total_length++;
+ phy->tx_buf[total_length++] = TPM_WRITE_DIRECTION | LOCALITY0;
+ phy->tx_buf[total_length++] = tpm_register;
if (tpm_size > 0 && tpm_register == TPM_DATA_FIFO) {
- tx_buf[total_length++] = tpm_size >> 8;
- tx_buf[total_length++] = tpm_size;
+ phy->tx_buf[total_length++] = tpm_size >> 8;
+ phy->tx_buf[total_length++] = tpm_size;
}
- memcpy(&tx_buf[total_length], tpm_data, tpm_size);
+ memcpy(&phy->tx_buf[total_length], tpm_data, tpm_size);
total_length += tpm_size;
- nbr_dummy_bytes = phy->latency;
- memset(&tx_buf[total_length], TPM_DUMMY_BYTE, nbr_dummy_bytes);
+ memset(&phy->tx_buf[total_length], TPM_DUMMY_BYTE, phy->latency);
- phy->spi_xfer.len = total_length + nbr_dummy_bytes;
+ spi_xfer.len = total_length + phy->latency;
- ret = spi_sync_transfer(dev, &phy->spi_xfer, 1);
+ ret = spi_sync_transfer(dev, &spi_xfer, 1);
if (ret == 0)
- ret = rx_buf[total_length + nbr_dummy_bytes - 1];
+ ret = phy->rx_buf[total_length + phy->latency - 1];
return st33zp24_status_to_errno(ret);
} /* st33zp24_spi_send() */
/*
- * read8_recv
+ * st33zp24_spi_read8_recv
* Recv byte from the TIS register according to the ST33ZP24 SPI protocol.
* @param: phy_id, the phy description
* @param: tpm_register, the tpm tis register where the data should be read
@@ -154,40 +153,37 @@ static int st33zp24_spi_send(void *phy_id, u8 tpm_register, u8 *tpm_data,
* @param: tpm_size, tpm TPM response size to read.
* @return: should be zero if success else a negative error code.
*/
-static int read8_reg(void *phy_id, u8 tpm_register, u8 *tpm_data, int tpm_size)
+static int st33zp24_spi_read8_reg(void *phy_id, u8 tpm_register, u8 *tpm_data,
+ int tpm_size)
{
- u8 data = 0;
- int total_length = 0, nbr_dummy_bytes, ret;
+ int total_length = 0, ret;
struct st33zp24_spi_phy *phy = phy_id;
struct spi_device *dev = phy->spi_device;
- u8 *tx_buf = (u8 *)phy->spi_xfer.tx_buf;
- u8 *rx_buf = phy->spi_xfer.rx_buf;
+ struct spi_transfer spi_xfer = {
+ .tx_buf = phy->tx_buf,
+ .rx_buf = phy->rx_buf,
+ };
/* Pre-Header */
- data = LOCALITY0;
- memcpy(tx_buf + total_length, &data, sizeof(data));
- total_length++;
- data = tpm_register;
- memcpy(tx_buf + total_length, &data, sizeof(data));
- total_length++;
+ phy->tx_buf[total_length++] = LOCALITY0;
+ phy->tx_buf[total_length++] = tpm_register;
- nbr_dummy_bytes = phy->latency;
- memset(&tx_buf[total_length], TPM_DUMMY_BYTE,
- nbr_dummy_bytes + tpm_size);
+ memset(&phy->tx_buf[total_length], TPM_DUMMY_BYTE,
+ phy->latency + tpm_size);
- phy->spi_xfer.len = total_length + nbr_dummy_bytes + tpm_size;
+ spi_xfer.len = total_length + phy->latency + tpm_size;
/* header + status byte + size of the data + status byte */
- ret = spi_sync_transfer(dev, &phy->spi_xfer, 1);
+ ret = spi_sync_transfer(dev, &spi_xfer, 1);
if (tpm_size > 0 && ret == 0) {
- ret = rx_buf[total_length + nbr_dummy_bytes - 1];
+ ret = phy->rx_buf[total_length + phy->latency - 1];
- memcpy(tpm_data, rx_buf + total_length + nbr_dummy_bytes,
+ memcpy(tpm_data, phy->rx_buf + total_length + phy->latency,
tpm_size);
}
return ret;
-} /* read8_reg() */
+} /* st33zp24_spi_read8_reg() */
/*
* st33zp24_spi_recv
@@ -203,13 +199,13 @@ static int st33zp24_spi_recv(void *phy_id, u8 tpm_register, u8 *tpm_data,
{
int ret;
- ret = read8_reg(phy_id, tpm_register, tpm_data, tpm_size);
+ ret = st33zp24_spi_read8_reg(phy_id, tpm_register, tpm_data, tpm_size);
if (!st33zp24_status_to_errno(ret))
return tpm_size;
return ret;
} /* st33zp24_spi_recv() */
-static int evaluate_latency(void *phy_id)
+static int st33zp24_spi_evaluate_latency(void *phy_id)
{
struct st33zp24_spi_phy *phy = phy_id;
int latency = 1, status = 0;
@@ -217,9 +213,15 @@ static int evaluate_latency(void *phy_id)
while (!status && latency < MAX_SPI_LATENCY) {
phy->latency = latency;
- status = read8_reg(phy_id, TPM_INTF_CAPABILITY, &data, 1);
+ status = st33zp24_spi_read8_reg(phy_id, TPM_INTF_CAPABILITY,
+ &data, 1);
latency++;
}
+ if (status < 0)
+ return status;
+ if (latency == MAX_SPI_LATENCY)
+ return -ENODEV;
+
return latency - 1;
} /* evaluate_latency() */
@@ -228,24 +230,52 @@ static const struct st33zp24_phy_ops spi_phy_ops = {
.recv = st33zp24_spi_recv,
};
-#ifdef CONFIG_OF
-static int tpm_stm_spi_of_request_resources(struct st33zp24_spi_phy *phy)
+static int st33zp24_spi_acpi_request_resources(struct spi_device *spi_dev)
{
+ struct tpm_chip *chip = spi_get_drvdata(spi_dev);
+ struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+ struct st33zp24_spi_phy *phy = tpm_dev->phy_id;
+ struct gpio_desc *gpiod_lpcpd;
+ struct device *dev = &spi_dev->dev;
+
+ /* Get LPCPD GPIO from ACPI */
+ gpiod_lpcpd = devm_gpiod_get_index(dev, "TPM IO LPCPD", 1,
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(gpiod_lpcpd)) {
+ dev_err(dev, "Failed to retrieve lpcpd-gpios from acpi.\n");
+ phy->io_lpcpd = -1;
+ /*
+ * lpcpd pin is not specified. This is not an issue as
+ * power management can be also managed by TPM specific
+ * commands. So leave with a success status code.
+ */
+ return 0;
+ }
+
+ phy->io_lpcpd = desc_to_gpio(gpiod_lpcpd);
+
+ return 0;
+}
+
+static int st33zp24_spi_of_request_resources(struct spi_device *spi_dev)
+{
+ struct tpm_chip *chip = spi_get_drvdata(spi_dev);
+ struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+ struct st33zp24_spi_phy *phy = tpm_dev->phy_id;
struct device_node *pp;
- struct spi_device *dev = phy->spi_device;
int gpio;
int ret;
- pp = dev->dev.of_node;
+ pp = spi_dev->dev.of_node;
if (!pp) {
- dev_err(&dev->dev, "No platform data\n");
+ dev_err(&spi_dev->dev, "No platform data\n");
return -ENODEV;
}
/* Get GPIO from device tree */
gpio = of_get_named_gpio(pp, "lpcpd-gpios", 0);
if (gpio < 0) {
- dev_err(&dev->dev,
+ dev_err(&spi_dev->dev,
"Failed to retrieve lpcpd-gpios from dts.\n");
phy->io_lpcpd = -1;
/*
@@ -256,26 +286,22 @@ static int tpm_stm_spi_of_request_resources(struct st33zp24_spi_phy *phy)
return 0;
}
/* GPIO request and configuration */
- ret = devm_gpio_request_one(&dev->dev, gpio,
+ ret = devm_gpio_request_one(&spi_dev->dev, gpio,
GPIOF_OUT_INIT_HIGH, "TPM IO LPCPD");
if (ret) {
- dev_err(&dev->dev, "Failed to request lpcpd pin\n");
+ dev_err(&spi_dev->dev, "Failed to request lpcpd pin\n");
return -ENODEV;
}
phy->io_lpcpd = gpio;
return 0;
}
-#else
-static int tpm_stm_spi_of_request_resources(struct st33zp24_spi_phy *phy)
-{
- return -ENODEV;
-}
-#endif
-static int tpm_stm_spi_request_resources(struct spi_device *dev,
- struct st33zp24_spi_phy *phy)
+static int st33zp24_spi_request_resources(struct spi_device *dev)
{
+ struct tpm_chip *chip = spi_get_drvdata(dev);
+ struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
+ struct st33zp24_spi_phy *phy = tpm_dev->phy_id;
struct st33zp24_platform_data *pdata;
int ret;
@@ -303,13 +329,12 @@ static int tpm_stm_spi_request_resources(struct spi_device *dev,
}
/*
- * tpm_st33_spi_probe initialize the TPM device
+ * st33zp24_spi_probe initialize the TPM device
* @param: dev, the spi_device drescription (TPM SPI description).
* @return: 0 in case of success.
* or a negative value describing the error.
*/
-static int
-tpm_st33_spi_probe(struct spi_device *dev)
+static int st33zp24_spi_probe(struct spi_device *dev)
{
int ret;
struct st33zp24_platform_data *pdata;
@@ -328,21 +353,23 @@ tpm_st33_spi_probe(struct spi_device *dev)
return -ENOMEM;
phy->spi_device = dev;
+
pdata = dev->dev.platform_data;
if (!pdata && dev->dev.of_node) {
- ret = tpm_stm_spi_of_request_resources(phy);
+ ret = st33zp24_spi_of_request_resources(dev);
if (ret)
return ret;
} else if (pdata) {
- ret = tpm_stm_spi_request_resources(dev, phy);
+ ret = st33zp24_spi_request_resources(dev);
+ if (ret)
+ return ret;
+ } else if (ACPI_HANDLE(&dev->dev)) {
+ ret = st33zp24_spi_acpi_request_resources(dev);
if (ret)
return ret;
}
- phy->spi_xfer.tx_buf = phy->tx_buf;
- phy->spi_xfer.rx_buf = phy->rx_buf;
-
- phy->latency = evaluate_latency(phy);
+ phy->latency = st33zp24_spi_evaluate_latency(phy);
if (phy->latency <= 0)
return -ENODEV;
@@ -351,11 +378,11 @@ tpm_st33_spi_probe(struct spi_device *dev)
}
/*
- * tpm_st33_spi_remove remove the TPM device
+ * st33zp24_spi_remove remove the TPM device
* @param: client, the spi_device drescription (TPM SPI description).
* @return: 0 in case of success.
*/
-static int tpm_st33_spi_remove(struct spi_device *dev)
+static int st33zp24_spi_remove(struct spi_device *dev)
{
struct tpm_chip *chip = spi_get_drvdata(dev);
@@ -368,29 +395,34 @@ static const struct spi_device_id st33zp24_spi_id[] = {
};
MODULE_DEVICE_TABLE(spi, st33zp24_spi_id);
-#ifdef CONFIG_OF
static const struct of_device_id of_st33zp24_spi_match[] = {
{ .compatible = "st,st33zp24-spi", },
{}
};
MODULE_DEVICE_TABLE(of, of_st33zp24_spi_match);
-#endif
+
+static const struct acpi_device_id st33zp24_spi_acpi_match[] = {
+ {"SMO3324"},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, st33zp24_spi_acpi_match);
static SIMPLE_DEV_PM_OPS(st33zp24_spi_ops, st33zp24_pm_suspend,
st33zp24_pm_resume);
-static struct spi_driver tpm_st33_spi_driver = {
+static struct spi_driver st33zp24_spi_driver = {
.driver = {
.name = TPM_ST33_SPI,
.pm = &st33zp24_spi_ops,
.of_match_table = of_match_ptr(of_st33zp24_spi_match),
+ .acpi_match_table = ACPI_PTR(st33zp24_spi_acpi_match),
},
- .probe = tpm_st33_spi_probe,
- .remove = tpm_st33_spi_remove,
+ .probe = st33zp24_spi_probe,
+ .remove = st33zp24_spi_remove,
.id_table = st33zp24_spi_id,
};
-module_spi_driver(tpm_st33_spi_driver);
+module_spi_driver(st33zp24_spi_driver);
MODULE_AUTHOR("TPM support (TPMsupport@list.st.com)");
MODULE_DESCRIPTION("STM TPM 1.2 SPI ST33 Driver");
diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c
index 8d626784c..c2ee30451 100644
--- a/drivers/char/tpm/st33zp24/st33zp24.c
+++ b/drivers/char/tpm/st33zp24/st33zp24.c
@@ -1,6 +1,6 @@
/*
* STMicroelectronics TPM Linux driver for TPM ST33ZP24
- * Copyright (C) 2009 - 2015 STMicroelectronics
+ * Copyright (C) 2009 - 2016 STMicroelectronics
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -73,14 +73,6 @@ enum tis_defaults {
TIS_LONG_TIMEOUT = 2000,
};
-struct st33zp24_dev {
- struct tpm_chip *chip;
- void *phy_id;
- const struct st33zp24_phy_ops *ops;
- u32 intrs;
- int io_lpcpd;
-};
-
/*
* clear_interruption clear the pending interrupt.
* @param: tpm_dev, the tpm device device.
@@ -102,11 +94,9 @@ static u8 clear_interruption(struct st33zp24_dev *tpm_dev)
*/
static void st33zp24_cancel(struct tpm_chip *chip)
{
- struct st33zp24_dev *tpm_dev;
+ struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
u8 data;
- tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip);
-
data = TPM_STS_COMMAND_READY;
tpm_dev->ops->send(tpm_dev->phy_id, TPM_STS, &data, 1);
} /* st33zp24_cancel() */
@@ -118,11 +108,9 @@ static void st33zp24_cancel(struct tpm_chip *chip)
*/
static u8 st33zp24_status(struct tpm_chip *chip)
{
- struct st33zp24_dev *tpm_dev;
+ struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
u8 data;
- tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip);
-
tpm_dev->ops->recv(tpm_dev->phy_id, TPM_STS, &data, 1);
return data;
} /* st33zp24_status() */
@@ -134,17 +122,15 @@ static u8 st33zp24_status(struct tpm_chip *chip)
*/
static int check_locality(struct tpm_chip *chip)
{
- struct st33zp24_dev *tpm_dev;
+ struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
u8 data;
u8 status;
- tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip);
-
status = tpm_dev->ops->recv(tpm_dev->phy_id, TPM_ACCESS, &data, 1);
if (status && (data &
(TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
(TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID))
- return chip->vendor.locality;
+ return tpm_dev->locality;
return -EACCES;
} /* check_locality() */
@@ -156,27 +142,25 @@ static int check_locality(struct tpm_chip *chip)
*/
static int request_locality(struct tpm_chip *chip)
{
+ struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
unsigned long stop;
long ret;
- struct st33zp24_dev *tpm_dev;
u8 data;
- if (check_locality(chip) == chip->vendor.locality)
- return chip->vendor.locality;
-
- tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip);
+ if (check_locality(chip) == tpm_dev->locality)
+ return tpm_dev->locality;
data = TPM_ACCESS_REQUEST_USE;
ret = tpm_dev->ops->send(tpm_dev->phy_id, TPM_ACCESS, &data, 1);
if (ret < 0)
return ret;
- stop = jiffies + chip->vendor.timeout_a;
+ stop = jiffies + chip->timeout_a;
/* Request locality is usually effective after the request */
do {
if (check_locality(chip) >= 0)
- return chip->vendor.locality;
+ return tpm_dev->locality;
msleep(TPM_TIMEOUT);
} while (time_before(jiffies, stop));
@@ -190,10 +174,9 @@ static int request_locality(struct tpm_chip *chip)
*/
static void release_locality(struct tpm_chip *chip)
{
- struct st33zp24_dev *tpm_dev;
+ struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
u8 data;
- tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip);
data = TPM_ACCESS_ACTIVE_LOCALITY;
tpm_dev->ops->send(tpm_dev->phy_id, TPM_ACCESS, &data, 1);
@@ -206,23 +189,21 @@ static void release_locality(struct tpm_chip *chip)
*/
static int get_burstcount(struct tpm_chip *chip)
{
+ struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
unsigned long stop;
int burstcnt, status;
- u8 tpm_reg, temp;
- struct st33zp24_dev *tpm_dev;
-
- tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip);
+ u8 temp;
- stop = jiffies + chip->vendor.timeout_d;
+ stop = jiffies + chip->timeout_d;
do {
- tpm_reg = TPM_STS + 1;
- status = tpm_dev->ops->recv(tpm_dev->phy_id, tpm_reg, &temp, 1);
+ status = tpm_dev->ops->recv(tpm_dev->phy_id, TPM_STS + 1,
+ &temp, 1);
if (status < 0)
return -EBUSY;
- tpm_reg = TPM_STS + 2;
burstcnt = temp;
- status = tpm_dev->ops->recv(tpm_dev->phy_id, tpm_reg, &temp, 1);
+ status = tpm_dev->ops->recv(tpm_dev->phy_id, TPM_STS + 2,
+ &temp, 1);
if (status < 0)
return -EBUSY;
@@ -271,15 +252,13 @@ static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
wait_queue_head_t *queue, bool check_cancel)
{
+ struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
unsigned long stop;
int ret = 0;
bool canceled = false;
bool condition;
u32 cur_intrs;
u8 status;
- struct st33zp24_dev *tpm_dev;
-
- tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip);
/* check current status */
status = st33zp24_status(chip);
@@ -288,10 +267,10 @@ static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
stop = jiffies + timeout;
- if (chip->vendor.irq) {
+ if (chip->flags & TPM_CHIP_FLAG_IRQ) {
cur_intrs = tpm_dev->intrs;
clear_interruption(tpm_dev);
- enable_irq(chip->vendor.irq);
+ enable_irq(tpm_dev->irq);
do {
if (ret == -ERESTARTSYS && freezing(current))
@@ -314,7 +293,7 @@ static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
}
} while (ret == -ERESTARTSYS && freezing(current));
- disable_irq_nosync(chip->vendor.irq);
+ disable_irq_nosync(tpm_dev->irq);
} else {
do {
@@ -337,16 +316,14 @@ static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
*/
static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
{
+ struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
int size = 0, burstcnt, len, ret;
- struct st33zp24_dev *tpm_dev;
-
- tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip);
while (size < count &&
wait_for_stat(chip,
TPM_STS_DATA_AVAIL | TPM_STS_VALID,
- chip->vendor.timeout_c,
- &chip->vendor.read_queue, true) == 0) {
+ chip->timeout_c,
+ &tpm_dev->read_queue, true) == 0) {
burstcnt = get_burstcount(chip);
if (burstcnt < 0)
return burstcnt;
@@ -370,13 +347,11 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
static irqreturn_t tpm_ioserirq_handler(int irq, void *dev_id)
{
struct tpm_chip *chip = dev_id;
- struct st33zp24_dev *tpm_dev;
-
- tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip);
+ struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
tpm_dev->intrs++;
- wake_up_interruptible(&chip->vendor.read_queue);
- disable_irq_nosync(chip->vendor.irq);
+ wake_up_interruptible(&tpm_dev->read_queue);
+ disable_irq_nosync(tpm_dev->irq);
return IRQ_HANDLED;
} /* tpm_ioserirq_handler() */
@@ -393,19 +368,17 @@ static irqreturn_t tpm_ioserirq_handler(int irq, void *dev_id)
static int st33zp24_send(struct tpm_chip *chip, unsigned char *buf,
size_t len)
{
+ struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
u32 status, i, size, ordinal;
int burstcnt = 0;
int ret;
u8 data;
- struct st33zp24_dev *tpm_dev;
if (!chip)
return -EBUSY;
if (len < TPM_HEADER_SIZE)
return -EBUSY;
- tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip);
-
ret = request_locality(chip);
if (ret < 0)
return ret;
@@ -414,8 +387,8 @@ static int st33zp24_send(struct tpm_chip *chip, unsigned char *buf,
if ((status & TPM_STS_COMMAND_READY) == 0) {
st33zp24_cancel(chip);
if (wait_for_stat
- (chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b,
- &chip->vendor.read_queue, false) < 0) {
+ (chip, TPM_STS_COMMAND_READY, chip->timeout_b,
+ &tpm_dev->read_queue, false) < 0) {
ret = -ETIME;
goto out_err;
}
@@ -456,12 +429,12 @@ static int st33zp24_send(struct tpm_chip *chip, unsigned char *buf,
if (ret < 0)
goto out_err;
- if (chip->vendor.irq) {
+ if (chip->flags & TPM_CHIP_FLAG_IRQ) {
ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
ret = wait_for_stat(chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
tpm_calc_ordinal_duration(chip, ordinal),
- &chip->vendor.read_queue, false);
+ &tpm_dev->read_queue, false);
if (ret < 0)
goto out_err;
}
@@ -532,6 +505,7 @@ static bool st33zp24_req_canceled(struct tpm_chip *chip, u8 status)
}
static const struct tpm_class_ops st33zp24_tpm = {
+ .flags = TPM_OPS_AUTO_STARTUP,
.send = st33zp24_send,
.recv = st33zp24_recv,
.cancel = st33zp24_cancel,
@@ -565,20 +539,20 @@ int st33zp24_probe(void *phy_id, const struct st33zp24_phy_ops *ops,
if (!tpm_dev)
return -ENOMEM;
- TPM_VPRIV(chip) = tpm_dev;
tpm_dev->phy_id = phy_id;
tpm_dev->ops = ops;
+ dev_set_drvdata(&chip->dev, tpm_dev);
- chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
- chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
- chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
- chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+ chip->timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+ chip->timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
+ chip->timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+ chip->timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
- chip->vendor.locality = LOCALITY0;
+ tpm_dev->locality = LOCALITY0;
if (irq) {
/* INTERRUPT Setup */
- init_waitqueue_head(&chip->vendor.read_queue);
+ init_waitqueue_head(&tpm_dev->read_queue);
tpm_dev->intrs = 0;
if (request_locality(chip) != LOCALITY0) {
@@ -611,16 +585,14 @@ int st33zp24_probe(void *phy_id, const struct st33zp24_phy_ops *ops,
if (ret < 0)
goto _tpm_clean_answer;
- chip->vendor.irq = irq;
+ tpm_dev->irq = irq;
+ chip->flags |= TPM_CHIP_FLAG_IRQ;
- disable_irq_nosync(chip->vendor.irq);
+ disable_irq_nosync(tpm_dev->irq);
tpm_gen_interrupt(chip);
}
- tpm_get_timeouts(chip);
- tpm_do_selftest(chip);
-
return tpm_chip_register(chip);
_tpm_clean_answer:
dev_info(&chip->dev, "TPM initialization fail\n");
@@ -650,10 +622,9 @@ EXPORT_SYMBOL(st33zp24_remove);
int st33zp24_pm_suspend(struct device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
- struct st33zp24_dev *tpm_dev;
- int ret = 0;
+ struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
- tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip);
+ int ret = 0;
if (gpio_is_valid(tpm_dev->io_lpcpd))
gpio_set_value(tpm_dev->io_lpcpd, 0);
@@ -672,16 +643,14 @@ EXPORT_SYMBOL(st33zp24_pm_suspend);
int st33zp24_pm_resume(struct device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
- struct st33zp24_dev *tpm_dev;
+ struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
int ret = 0;
- tpm_dev = (struct st33zp24_dev *)TPM_VPRIV(chip);
-
if (gpio_is_valid(tpm_dev->io_lpcpd)) {
gpio_set_value(tpm_dev->io_lpcpd, 1);
ret = wait_for_stat(chip,
- TPM_STS_VALID, chip->vendor.timeout_b,
- &chip->vendor.read_queue, false);
+ TPM_STS_VALID, chip->timeout_b,
+ &tpm_dev->read_queue, false);
} else {
ret = tpm_pm_resume(dev);
if (!ret)
diff --git a/drivers/char/tpm/st33zp24/st33zp24.h b/drivers/char/tpm/st33zp24/st33zp24.h
index c207cebf6..6f4a4198a 100644
--- a/drivers/char/tpm/st33zp24/st33zp24.h
+++ b/drivers/char/tpm/st33zp24/st33zp24.h
@@ -1,6 +1,6 @@
/*
* STMicroelectronics TPM Linux driver for TPM ST33ZP24
- * Copyright (C) 2009 - 2015 STMicroelectronics
+ * Copyright (C) 2009 - 2016 STMicroelectronics
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -21,6 +21,18 @@
#define TPM_WRITE_DIRECTION 0x80
#define TPM_BUFSIZE 2048
+struct st33zp24_dev {
+ struct tpm_chip *chip;
+ void *phy_id;
+ const struct st33zp24_phy_ops *ops;
+ int locality;
+ int irq;
+ u32 intrs;
+ int io_lpcpd;
+ wait_queue_head_t read_queue;
+};
+
+
struct st33zp24_phy_ops {
int (*send)(void *phy_id, u8 tpm_register, u8 *tpm_data, int tpm_size);
int (*recv)(void *phy_id, u8 tpm_register, u8 *tpm_data, int tpm_size);
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 274dd0123..e5950131b 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -29,33 +29,88 @@
#include "tpm.h"
#include "tpm_eventlog.h"
-static DECLARE_BITMAP(dev_mask, TPM_NUM_DEVICES);
-static LIST_HEAD(tpm_chip_list);
-static DEFINE_SPINLOCK(driver_lock);
+DEFINE_IDR(dev_nums_idr);
+static DEFINE_MUTEX(idr_lock);
struct class *tpm_class;
dev_t tpm_devt;
-/*
- * tpm_chip_find_get - return tpm_chip for a given chip number
- * @chip_num the device number for the chip
+/**
+ * tpm_try_get_ops() - Get a ref to the tpm_chip
+ * @chip: Chip to ref
+ *
+ * The caller must already have some kind of locking to ensure that chip is
+ * valid. This function will lock the chip so that the ops member can be
+ * accessed safely. The locking prevents tpm_chip_unregister from
+ * completing, so it should not be held for long periods.
+ *
+ * Returns -ERRNO if the chip could not be got.
*/
-struct tpm_chip *tpm_chip_find_get(int chip_num)
+int tpm_try_get_ops(struct tpm_chip *chip)
{
- struct tpm_chip *pos, *chip = NULL;
+ int rc = -EIO;
- rcu_read_lock();
- list_for_each_entry_rcu(pos, &tpm_chip_list, list) {
- if (chip_num != TPM_ANY_NUM && chip_num != pos->dev_num)
- continue;
+ get_device(&chip->dev);
- if (try_module_get(pos->pdev->driver->owner)) {
- chip = pos;
- break;
- }
+ down_read(&chip->ops_sem);
+ if (!chip->ops)
+ goto out_lock;
+
+ return 0;
+out_lock:
+ up_read(&chip->ops_sem);
+ put_device(&chip->dev);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_try_get_ops);
+
+/**
+ * tpm_put_ops() - Release a ref to the tpm_chip
+ * @chip: Chip to put
+ *
+ * This is the opposite pair to tpm_try_get_ops(). After this returns chip may
+ * be kfree'd.
+ */
+void tpm_put_ops(struct tpm_chip *chip)
+{
+ up_read(&chip->ops_sem);
+ put_device(&chip->dev);
+}
+EXPORT_SYMBOL_GPL(tpm_put_ops);
+
+/**
+ * tpm_chip_find_get() - return tpm_chip for a given chip number
+ * @chip_num: id to find
+ *
+ * The return'd chip has been tpm_try_get_ops'd and must be released via
+ * tpm_put_ops
+ */
+struct tpm_chip *tpm_chip_find_get(int chip_num)
+{
+ struct tpm_chip *chip, *res = NULL;
+ int chip_prev;
+
+ mutex_lock(&idr_lock);
+
+ if (chip_num == TPM_ANY_NUM) {
+ chip_num = 0;
+ do {
+ chip_prev = chip_num;
+ chip = idr_get_next(&dev_nums_idr, &chip_num);
+ if (chip && !tpm_try_get_ops(chip)) {
+ res = chip;
+ break;
+ }
+ } while (chip_prev != chip_num);
+ } else {
+ chip = idr_find_slowpath(&dev_nums_idr, chip_num);
+ if (chip && !tpm_try_get_ops(chip))
+ res = chip;
}
- rcu_read_unlock();
- return chip;
+
+ mutex_unlock(&idr_lock);
+
+ return res;
}
/**
@@ -68,24 +123,25 @@ static void tpm_dev_release(struct device *dev)
{
struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev);
- spin_lock(&driver_lock);
- clear_bit(chip->dev_num, dev_mask);
- spin_unlock(&driver_lock);
+ mutex_lock(&idr_lock);
+ idr_remove(&dev_nums_idr, chip->dev_num);
+ mutex_unlock(&idr_lock);
+
kfree(chip);
}
/**
- * tpmm_chip_alloc() - allocate a new struct tpm_chip instance
- * @dev: device to which the chip is associated
+ * tpm_chip_alloc() - allocate a new struct tpm_chip instance
+ * @pdev: device to which the chip is associated
+ * At this point pdev mst be initialized, but does not have to
+ * be registered
* @ops: struct tpm_class_ops instance
*
* Allocates a new struct tpm_chip instance and assigns a free
- * device number for it. Caller does not have to worry about
- * freeing the allocated resources. When the devices is removed
- * devres calls tpmm_chip_remove() to do the job.
+ * device number for it. Must be paired with put_device(&chip->dev).
*/
-struct tpm_chip *tpmm_chip_alloc(struct device *dev,
- const struct tpm_class_ops *ops)
+struct tpm_chip *tpm_chip_alloc(struct device *dev,
+ const struct tpm_class_ops *ops)
{
struct tpm_chip *chip;
int rc;
@@ -95,53 +151,75 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev,
return ERR_PTR(-ENOMEM);
mutex_init(&chip->tpm_mutex);
- INIT_LIST_HEAD(&chip->list);
+ init_rwsem(&chip->ops_sem);
chip->ops = ops;
- spin_lock(&driver_lock);
- chip->dev_num = find_first_zero_bit(dev_mask, TPM_NUM_DEVICES);
- spin_unlock(&driver_lock);
-
- if (chip->dev_num >= TPM_NUM_DEVICES) {
+ mutex_lock(&idr_lock);
+ rc = idr_alloc(&dev_nums_idr, NULL, 0, TPM_NUM_DEVICES, GFP_KERNEL);
+ mutex_unlock(&idr_lock);
+ if (rc < 0) {
dev_err(dev, "No available tpm device numbers\n");
kfree(chip);
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(rc);
}
+ chip->dev_num = rc;
- set_bit(chip->dev_num, dev_mask);
-
- scnprintf(chip->devname, sizeof(chip->devname), "tpm%d", chip->dev_num);
-
- chip->pdev = dev;
-
- dev_set_drvdata(dev, chip);
+ device_initialize(&chip->dev);
chip->dev.class = tpm_class;
chip->dev.release = tpm_dev_release;
- chip->dev.parent = chip->pdev;
-#ifdef CONFIG_ACPI
+ chip->dev.parent = dev;
chip->dev.groups = chip->groups;
-#endif
if (chip->dev_num == 0)
chip->dev.devt = MKDEV(MISC_MAJOR, TPM_MINOR);
else
chip->dev.devt = MKDEV(MAJOR(tpm_devt), chip->dev_num);
- dev_set_name(&chip->dev, "%s", chip->devname);
+ rc = dev_set_name(&chip->dev, "tpm%d", chip->dev_num);
+ if (rc)
+ goto out;
- device_initialize(&chip->dev);
+ if (!dev)
+ chip->flags |= TPM_CHIP_FLAG_VIRTUAL;
cdev_init(&chip->cdev, &tpm_fops);
- chip->cdev.owner = chip->pdev->driver->owner;
+ chip->cdev.owner = THIS_MODULE;
chip->cdev.kobj.parent = &chip->dev.kobj;
- rc = devm_add_action(dev, (void (*)(void *)) put_device, &chip->dev);
- if (rc) {
- put_device(&chip->dev);
+ return chip;
+
+out:
+ put_device(&chip->dev);
+ return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_GPL(tpm_chip_alloc);
+
+/**
+ * tpmm_chip_alloc() - allocate a new struct tpm_chip instance
+ * @pdev: parent device to which the chip is associated
+ * @ops: struct tpm_class_ops instance
+ *
+ * Same as tpm_chip_alloc except devm is used to do the put_device
+ */
+struct tpm_chip *tpmm_chip_alloc(struct device *pdev,
+ const struct tpm_class_ops *ops)
+{
+ struct tpm_chip *chip;
+ int rc;
+
+ chip = tpm_chip_alloc(pdev, ops);
+ if (IS_ERR(chip))
+ return chip;
+
+ rc = devm_add_action_or_reset(pdev,
+ (void (*)(void *)) put_device,
+ &chip->dev);
+ if (rc)
return ERR_PTR(rc);
- }
+
+ dev_set_drvdata(pdev, chip);
return chip;
}
@@ -155,7 +233,7 @@ static int tpm_add_char_device(struct tpm_chip *chip)
if (rc) {
dev_err(&chip->dev,
"unable to cdev_add() %s, major %d, minor %d, err=%d\n",
- chip->devname, MAJOR(chip->dev.devt),
+ dev_name(&chip->dev), MAJOR(chip->dev.devt),
MINOR(chip->dev.devt), rc);
return rc;
@@ -165,13 +243,18 @@ static int tpm_add_char_device(struct tpm_chip *chip)
if (rc) {
dev_err(&chip->dev,
"unable to device_register() %s, major %d, minor %d, err=%d\n",
- chip->devname, MAJOR(chip->dev.devt),
+ dev_name(&chip->dev), MAJOR(chip->dev.devt),
MINOR(chip->dev.devt), rc);
cdev_del(&chip->cdev);
return rc;
}
+ /* Make the chip available. */
+ mutex_lock(&idr_lock);
+ idr_replace(&dev_nums_idr, chip, chip->dev_num);
+ mutex_unlock(&idr_lock);
+
return rc;
}
@@ -179,20 +262,28 @@ static void tpm_del_char_device(struct tpm_chip *chip)
{
cdev_del(&chip->cdev);
device_del(&chip->dev);
+
+ /* Make the chip unavailable. */
+ mutex_lock(&idr_lock);
+ idr_replace(&dev_nums_idr, NULL, chip->dev_num);
+ mutex_unlock(&idr_lock);
+
+ /* Make the driver uncallable. */
+ down_write(&chip->ops_sem);
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ tpm2_shutdown(chip, TPM2_SU_CLEAR);
+ chip->ops = NULL;
+ up_write(&chip->ops_sem);
}
static int tpm1_chip_register(struct tpm_chip *chip)
{
- int rc;
-
if (chip->flags & TPM_CHIP_FLAG_TPM2)
return 0;
- rc = tpm_sysfs_add_device(chip);
- if (rc)
- return rc;
+ tpm_sysfs_add_device(chip);
- chip->bios_dir = tpm_bios_log_setup(chip->devname);
+ chip->bios_dir = tpm_bios_log_setup(dev_name(&chip->dev));
return 0;
}
@@ -204,10 +295,50 @@ static void tpm1_chip_unregister(struct tpm_chip *chip)
if (chip->bios_dir)
tpm_bios_log_teardown(chip->bios_dir);
+}
+
+static void tpm_del_legacy_sysfs(struct tpm_chip *chip)
+{
+ struct attribute **i;
+
+ if (chip->flags & (TPM_CHIP_FLAG_TPM2 | TPM_CHIP_FLAG_VIRTUAL))
+ return;
+
+ sysfs_remove_link(&chip->dev.parent->kobj, "ppi");
- tpm_sysfs_del_device(chip);
+ for (i = chip->groups[0]->attrs; *i != NULL; ++i)
+ sysfs_remove_link(&chip->dev.parent->kobj, (*i)->name);
}
+/* For compatibility with legacy sysfs paths we provide symlinks from the
+ * parent dev directory to selected names within the tpm chip directory. Old
+ * kernel versions created these files directly under the parent.
+ */
+static int tpm_add_legacy_sysfs(struct tpm_chip *chip)
+{
+ struct attribute **i;
+ int rc;
+
+ if (chip->flags & (TPM_CHIP_FLAG_TPM2 | TPM_CHIP_FLAG_VIRTUAL))
+ return 0;
+
+ rc = __compat_only_sysfs_link_entry_to_kobj(
+ &chip->dev.parent->kobj, &chip->dev.kobj, "ppi");
+ if (rc && rc != -ENOENT)
+ return rc;
+
+ /* All the names from tpm-sysfs */
+ for (i = chip->groups[0]->attrs; *i != NULL; ++i) {
+ rc = __compat_only_sysfs_link_entry_to_kobj(
+ &chip->dev.parent->kobj, &chip->dev.kobj, (*i)->name);
+ if (rc) {
+ tpm_del_legacy_sysfs(chip);
+ return rc;
+ }
+ }
+
+ return 0;
+}
/*
* tpm_chip_register() - create a character device for the TPM chip
* @chip: TPM chip to use.
@@ -223,6 +354,15 @@ int tpm_chip_register(struct tpm_chip *chip)
{
int rc;
+ if (chip->ops->flags & TPM_OPS_AUTO_STARTUP) {
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ rc = tpm2_auto_startup(chip);
+ else
+ rc = tpm1_auto_startup(chip);
+ if (rc)
+ return rc;
+ }
+
rc = tpm1_chip_register(chip);
if (rc)
return rc;
@@ -230,30 +370,20 @@ int tpm_chip_register(struct tpm_chip *chip)
tpm_add_ppi(chip);
rc = tpm_add_char_device(chip);
- if (rc)
- goto out_err;
-
- /* Make the chip available. */
- spin_lock(&driver_lock);
- list_add_tail_rcu(&chip->list, &tpm_chip_list);
- spin_unlock(&driver_lock);
+ if (rc) {
+ tpm1_chip_unregister(chip);
+ return rc;
+ }
chip->flags |= TPM_CHIP_FLAG_REGISTERED;
- if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
- rc = __compat_only_sysfs_link_entry_to_kobj(&chip->pdev->kobj,
- &chip->dev.kobj,
- "ppi");
- if (rc && rc != -ENOENT) {
- tpm_chip_unregister(chip);
- return rc;
- }
+ rc = tpm_add_legacy_sysfs(chip);
+ if (rc) {
+ tpm_chip_unregister(chip);
+ return rc;
}
return 0;
-out_err:
- tpm1_chip_unregister(chip);
- return rc;
}
EXPORT_SYMBOL_GPL(tpm_chip_register);
@@ -264,6 +394,9 @@ EXPORT_SYMBOL_GPL(tpm_chip_register);
* Takes the chip first away from the list of available TPM chips and then
* cleans up all the resources reserved by tpm_chip_register().
*
+ * Once this function returns the driver call backs in 'op's will not be
+ * running and will no longer start.
+ *
* NOTE: This function should be only called before deinitializing chip
* resources.
*/
@@ -272,13 +405,7 @@ void tpm_chip_unregister(struct tpm_chip *chip)
if (!(chip->flags & TPM_CHIP_FLAG_REGISTERED))
return;
- spin_lock(&driver_lock);
- list_del_rcu(&chip->list);
- spin_unlock(&driver_lock);
- synchronize_rcu();
-
- if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
- sysfs_remove_link(&chip->pdev->kobj, "ppi");
+ tpm_del_legacy_sysfs(chip);
tpm1_chip_unregister(chip);
tpm_del_char_device(chip);
diff --git a/drivers/char/tpm/tpm-dev.c b/drivers/char/tpm/tpm-dev.c
index de0337ebd..912ad30be 100644
--- a/drivers/char/tpm/tpm-dev.c
+++ b/drivers/char/tpm/tpm-dev.c
@@ -61,7 +61,7 @@ static int tpm_open(struct inode *inode, struct file *file)
* by the check of is_open variable, which is protected
* by driver_lock. */
if (test_and_set_bit(0, &chip->is_open)) {
- dev_dbg(chip->pdev, "Another process owns this TPM\n");
+ dev_dbg(&chip->dev, "Another process owns this TPM\n");
return -EBUSY;
}
@@ -79,7 +79,6 @@ static int tpm_open(struct inode *inode, struct file *file)
INIT_WORK(&priv->work, timeout_work);
file->private_data = priv;
- get_device(chip->pdev);
return 0;
}
@@ -137,9 +136,18 @@ static ssize_t tpm_write(struct file *file, const char __user *buf,
return -EFAULT;
}
- /* atomic tpm command send and result receive */
+ /* atomic tpm command send and result receive. We only hold the ops
+ * lock during this period so that the tpm can be unregistered even if
+ * the char dev is held open.
+ */
+ if (tpm_try_get_ops(priv->chip)) {
+ mutex_unlock(&priv->buffer_mutex);
+ return -EPIPE;
+ }
out_size = tpm_transmit(priv->chip, priv->data_buffer,
- sizeof(priv->data_buffer));
+ sizeof(priv->data_buffer), 0);
+
+ tpm_put_ops(priv->chip);
if (out_size < 0) {
mutex_unlock(&priv->buffer_mutex);
return out_size;
@@ -166,7 +174,6 @@ static int tpm_release(struct inode *inode, struct file *file)
file->private_data = NULL;
atomic_set(&priv->data_pending, 0);
clear_bit(0, &priv->chip->is_open);
- put_device(priv->chip->pdev);
kfree(priv);
return 0;
}
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index e2fa89c88..aef20ee23 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -319,7 +319,7 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip,
duration_idx = tpm_ordinal_duration[ordinal];
if (duration_idx != TPM_UNDEFINED)
- duration = chip->vendor.duration[duration_idx];
+ duration = chip->duration[duration_idx];
if (duration <= 0)
return 2 * 60 * HZ;
else
@@ -330,8 +330,8 @@ EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
/*
* Internal kernel interface to transmit TPM commands
*/
-ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
- size_t bufsiz)
+ssize_t tpm_transmit(struct tpm_chip *chip, const u8 *buf, size_t bufsiz,
+ unsigned int flags)
{
ssize_t rc;
u32 count, ordinal;
@@ -345,21 +345,22 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
if (count == 0)
return -ENODATA;
if (count > bufsiz) {
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
"invalid count value %x %zx\n", count, bufsiz);
return -E2BIG;
}
- mutex_lock(&chip->tpm_mutex);
+ if (!(flags & TPM_TRANSMIT_UNLOCKED))
+ mutex_lock(&chip->tpm_mutex);
rc = chip->ops->send(chip, (u8 *) buf, count);
if (rc < 0) {
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
"tpm_transmit: tpm_send: error %zd\n", rc);
goto out;
}
- if (chip->vendor.irq)
+ if (chip->flags & TPM_CHIP_FLAG_IRQ)
goto out_recv;
if (chip->flags & TPM_CHIP_FLAG_TPM2)
@@ -373,7 +374,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
goto out_recv;
if (chip->ops->req_canceled(chip, status)) {
- dev_err(chip->pdev, "Operation Canceled\n");
+ dev_err(&chip->dev, "Operation Canceled\n");
rc = -ECANCELED;
goto out;
}
@@ -383,30 +384,31 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
} while (time_before(jiffies, stop));
chip->ops->cancel(chip);
- dev_err(chip->pdev, "Operation Timed out\n");
+ dev_err(&chip->dev, "Operation Timed out\n");
rc = -ETIME;
goto out;
out_recv:
rc = chip->ops->recv(chip, (u8 *) buf, bufsiz);
if (rc < 0)
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
"tpm_transmit: tpm_recv: error %zd\n", rc);
out:
- mutex_unlock(&chip->tpm_mutex);
+ if (!(flags & TPM_TRANSMIT_UNLOCKED))
+ mutex_unlock(&chip->tpm_mutex);
return rc;
}
#define TPM_DIGEST_SIZE 20
#define TPM_RET_CODE_IDX 6
-ssize_t tpm_transmit_cmd(struct tpm_chip *chip, void *cmd,
- int len, const char *desc)
+ssize_t tpm_transmit_cmd(struct tpm_chip *chip, const void *cmd,
+ int len, unsigned int flags, const char *desc)
{
- struct tpm_output_header *header;
+ const struct tpm_output_header *header;
int err;
- len = tpm_transmit(chip, (u8 *) cmd, len);
+ len = tpm_transmit(chip, (const u8 *)cmd, len, flags);
if (len < 0)
return len;
else if (len < TPM_HEADER_SIZE)
@@ -416,7 +418,7 @@ ssize_t tpm_transmit_cmd(struct tpm_chip *chip, void *cmd,
err = be32_to_cpu(header->return_code);
if (err != 0 && desc)
- dev_err(chip->pdev, "A TPM error (%d) occurred %s\n", err,
+ dev_err(&chip->dev, "A TPM error (%d) occurred %s\n", err,
desc);
return err;
@@ -432,12 +434,11 @@ static const struct tpm_input_header tpm_getcap_header = {
.ordinal = TPM_ORD_GET_CAP
};
-ssize_t tpm_getcap(struct device *dev, __be32 subcap_id, cap_t *cap,
+ssize_t tpm_getcap(struct tpm_chip *chip, __be32 subcap_id, cap_t *cap,
const char *desc)
{
struct tpm_cmd_t tpm_cmd;
int rc;
- struct tpm_chip *chip = dev_get_drvdata(dev);
tpm_cmd.header.in = tpm_getcap_header;
if (subcap_id == CAP_VERSION_1_1 || subcap_id == CAP_VERSION_1_2) {
@@ -454,7 +455,8 @@ ssize_t tpm_getcap(struct device *dev, __be32 subcap_id, cap_t *cap,
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
tpm_cmd.params.getcap_in.subcap = subcap_id;
}
- rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, desc);
+ rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, 0,
+ desc);
if (!rc)
*cap = tpm_cmd.params.getcap_out.cap;
return rc;
@@ -470,7 +472,7 @@ void tpm_gen_interrupt(struct tpm_chip *chip)
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT;
- rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
+ rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, 0,
"attempting to determine the timeouts");
}
EXPORT_SYMBOL_GPL(tpm_gen_interrupt);
@@ -491,7 +493,7 @@ static int tpm_startup(struct tpm_chip *chip, __be16 startup_type)
start_cmd.header.in = tpm_startup_header;
start_cmd.params.startup_in.startup_type = startup_type;
- return tpm_transmit_cmd(chip, &start_cmd, TPM_INTERNAL_RESULT_SIZE,
+ return tpm_transmit_cmd(chip, &start_cmd, TPM_INTERNAL_RESULT_SIZE, 0,
"attempting to start the TPM");
}
@@ -505,15 +507,15 @@ int tpm_get_timeouts(struct tpm_chip *chip)
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
/* Fixed timeouts for TPM2 */
- chip->vendor.timeout_a = msecs_to_jiffies(TPM2_TIMEOUT_A);
- chip->vendor.timeout_b = msecs_to_jiffies(TPM2_TIMEOUT_B);
- chip->vendor.timeout_c = msecs_to_jiffies(TPM2_TIMEOUT_C);
- chip->vendor.timeout_d = msecs_to_jiffies(TPM2_TIMEOUT_D);
- chip->vendor.duration[TPM_SHORT] =
+ chip->timeout_a = msecs_to_jiffies(TPM2_TIMEOUT_A);
+ chip->timeout_b = msecs_to_jiffies(TPM2_TIMEOUT_B);
+ chip->timeout_c = msecs_to_jiffies(TPM2_TIMEOUT_C);
+ chip->timeout_d = msecs_to_jiffies(TPM2_TIMEOUT_D);
+ chip->duration[TPM_SHORT] =
msecs_to_jiffies(TPM2_DURATION_SHORT);
- chip->vendor.duration[TPM_MEDIUM] =
+ chip->duration[TPM_MEDIUM] =
msecs_to_jiffies(TPM2_DURATION_MEDIUM);
- chip->vendor.duration[TPM_LONG] =
+ chip->duration[TPM_LONG] =
msecs_to_jiffies(TPM2_DURATION_LONG);
return 0;
}
@@ -522,12 +524,13 @@ int tpm_get_timeouts(struct tpm_chip *chip)
tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT;
- rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, NULL);
+ rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, 0,
+ NULL);
if (rc == TPM_ERR_INVALID_POSTINIT) {
/* The TPM is not started, we are the first to talk to it.
Execute a startup command. */
- dev_info(chip->pdev, "Issuing TPM_STARTUP");
+ dev_info(&chip->dev, "Issuing TPM_STARTUP");
if (tpm_startup(chip, TPM_ST_CLEAR))
return rc;
@@ -536,10 +539,10 @@ int tpm_get_timeouts(struct tpm_chip *chip)
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT;
rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
- NULL);
+ 0, NULL);
}
if (rc) {
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
"A TPM error (%zd) occurred attempting to determine the timeouts\n",
rc);
goto duration;
@@ -561,10 +564,10 @@ int tpm_get_timeouts(struct tpm_chip *chip)
* of misreporting.
*/
if (chip->ops->update_timeouts != NULL)
- chip->vendor.timeout_adjusted =
+ chip->timeout_adjusted =
chip->ops->update_timeouts(chip, new_timeout);
- if (!chip->vendor.timeout_adjusted) {
+ if (!chip->timeout_adjusted) {
/* Don't overwrite default if value is 0 */
if (new_timeout[0] != 0 && new_timeout[0] < 1000) {
int i;
@@ -572,13 +575,13 @@ int tpm_get_timeouts(struct tpm_chip *chip)
/* timeouts in msec rather usec */
for (i = 0; i != ARRAY_SIZE(new_timeout); i++)
new_timeout[i] *= 1000;
- chip->vendor.timeout_adjusted = true;
+ chip->timeout_adjusted = true;
}
}
/* Report adjusted timeouts */
- if (chip->vendor.timeout_adjusted) {
- dev_info(chip->pdev,
+ if (chip->timeout_adjusted) {
+ dev_info(&chip->dev,
HW_ERR "Adjusting reported timeouts: A %lu->%luus B %lu->%luus C %lu->%luus D %lu->%luus\n",
old_timeout[0], new_timeout[0],
old_timeout[1], new_timeout[1],
@@ -586,10 +589,10 @@ int tpm_get_timeouts(struct tpm_chip *chip)
old_timeout[3], new_timeout[3]);
}
- chip->vendor.timeout_a = usecs_to_jiffies(new_timeout[0]);
- chip->vendor.timeout_b = usecs_to_jiffies(new_timeout[1]);
- chip->vendor.timeout_c = usecs_to_jiffies(new_timeout[2]);
- chip->vendor.timeout_d = usecs_to_jiffies(new_timeout[3]);
+ chip->timeout_a = usecs_to_jiffies(new_timeout[0]);
+ chip->timeout_b = usecs_to_jiffies(new_timeout[1]);
+ chip->timeout_c = usecs_to_jiffies(new_timeout[2]);
+ chip->timeout_d = usecs_to_jiffies(new_timeout[3]);
duration:
tpm_cmd.header.in = tpm_getcap_header;
@@ -597,7 +600,7 @@ duration:
tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_DURATION;
- rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
+ rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, 0,
"attempting to determine the durations");
if (rc)
return rc;
@@ -608,11 +611,11 @@ duration:
return -EINVAL;
duration_cap = &tpm_cmd.params.getcap_out.cap.duration;
- chip->vendor.duration[TPM_SHORT] =
+ chip->duration[TPM_SHORT] =
usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short));
- chip->vendor.duration[TPM_MEDIUM] =
+ chip->duration[TPM_MEDIUM] =
usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_medium));
- chip->vendor.duration[TPM_LONG] =
+ chip->duration[TPM_LONG] =
usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_long));
/* The Broadcom BCM0102 chipset in a Dell Latitude D820 gets the above
@@ -620,12 +623,12 @@ duration:
* fix up the resulting too-small TPM_SHORT value to make things work.
* We also scale the TPM_MEDIUM and -_LONG values by 1000.
*/
- if (chip->vendor.duration[TPM_SHORT] < (HZ / 100)) {
- chip->vendor.duration[TPM_SHORT] = HZ;
- chip->vendor.duration[TPM_MEDIUM] *= 1000;
- chip->vendor.duration[TPM_LONG] *= 1000;
- chip->vendor.duration_adjusted = true;
- dev_info(chip->pdev, "Adjusting TPM timeout parameters.");
+ if (chip->duration[TPM_SHORT] < (HZ / 100)) {
+ chip->duration[TPM_SHORT] = HZ;
+ chip->duration[TPM_MEDIUM] *= 1000;
+ chip->duration[TPM_LONG] *= 1000;
+ chip->duration_adjusted = true;
+ dev_info(&chip->dev, "Adjusting TPM timeout parameters.");
}
return 0;
}
@@ -653,7 +656,7 @@ static int tpm_continue_selftest(struct tpm_chip *chip)
struct tpm_cmd_t cmd;
cmd.header.in = continue_selftest_header;
- rc = tpm_transmit_cmd(chip, &cmd, CONTINUE_SELFTEST_RESULT_SIZE,
+ rc = tpm_transmit_cmd(chip, &cmd, CONTINUE_SELFTEST_RESULT_SIZE, 0,
"continue selftest");
return rc;
}
@@ -673,7 +676,7 @@ int tpm_pcr_read_dev(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
cmd.header.in = pcrread_header;
cmd.params.pcrread_in.pcr_idx = cpu_to_be32(pcr_idx);
- rc = tpm_transmit_cmd(chip, &cmd, READ_PCR_RESULT_SIZE,
+ rc = tpm_transmit_cmd(chip, &cmd, READ_PCR_RESULT_SIZE, 0,
"attempting to read a pcr value");
if (rc == 0)
@@ -700,7 +703,7 @@ int tpm_is_tpm2(u32 chip_num)
rc = (chip->flags & TPM_CHIP_FLAG_TPM2) != 0;
- tpm_chip_put(chip);
+ tpm_put_ops(chip);
return rc;
}
@@ -729,7 +732,7 @@ int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf)
rc = tpm2_pcr_read(chip, pcr_idx, res_buf);
else
rc = tpm_pcr_read_dev(chip, pcr_idx, res_buf);
- tpm_chip_put(chip);
+ tpm_put_ops(chip);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_pcr_read);
@@ -764,17 +767,17 @@ int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash)
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
rc = tpm2_pcr_extend(chip, pcr_idx, hash);
- tpm_chip_put(chip);
+ tpm_put_ops(chip);
return rc;
}
cmd.header.in = pcrextend_header;
cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(pcr_idx);
memcpy(cmd.params.pcrextend_in.hash, hash, TPM_DIGEST_SIZE);
- rc = tpm_transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE,
+ rc = tpm_transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE, 0,
"attempting extend a PCR value");
- tpm_chip_put(chip);
+ tpm_put_ops(chip);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_pcr_extend);
@@ -810,12 +813,14 @@ int tpm_do_selftest(struct tpm_chip *chip)
/* Attempt to read a PCR value */
cmd.header.in = pcrread_header;
cmd.params.pcrread_in.pcr_idx = cpu_to_be32(0);
- rc = tpm_transmit(chip, (u8 *) &cmd, READ_PCR_RESULT_SIZE);
+ rc = tpm_transmit(chip, (u8 *) &cmd, READ_PCR_RESULT_SIZE, 0);
/* Some buggy TPMs will not respond to tpm_tis_ready() for
* around 300ms while the self test is ongoing, keep trying
* until the self test duration expires. */
if (rc == -ETIME) {
- dev_info(chip->pdev, HW_ERR "TPM command timed out during continue self test");
+ dev_info(
+ &chip->dev, HW_ERR
+ "TPM command timed out during continue self test");
msleep(delay_msec);
continue;
}
@@ -825,7 +830,7 @@ int tpm_do_selftest(struct tpm_chip *chip)
rc = be32_to_cpu(cmd.header.out.return_code);
if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) {
- dev_info(chip->pdev,
+ dev_info(&chip->dev,
"TPM is disabled/deactivated (0x%X)\n", rc);
/* TPM is disabled and/or deactivated; driver can
* proceed and TPM does handle commands for
@@ -842,6 +847,33 @@ int tpm_do_selftest(struct tpm_chip *chip)
}
EXPORT_SYMBOL_GPL(tpm_do_selftest);
+/**
+ * tpm1_auto_startup - Perform the standard automatic TPM initialization
+ * sequence
+ * @chip: TPM chip to use
+ *
+ * Returns 0 on success, < 0 in case of fatal error.
+ */
+int tpm1_auto_startup(struct tpm_chip *chip)
+{
+ int rc;
+
+ rc = tpm_get_timeouts(chip);
+ if (rc)
+ goto out;
+ rc = tpm_do_selftest(chip);
+ if (rc) {
+ dev_err(&chip->dev, "TPM self test failed\n");
+ goto out;
+ }
+
+ return rc;
+out:
+ if (rc > 0)
+ rc = -ENODEV;
+ return rc;
+}
+
int tpm_send(u32 chip_num, void *cmd, size_t buflen)
{
struct tpm_chip *chip;
@@ -851,9 +883,9 @@ int tpm_send(u32 chip_num, void *cmd, size_t buflen)
if (chip == NULL)
return -ENODEV;
- rc = tpm_transmit_cmd(chip, cmd, buflen, "attempting tpm_cmd");
+ rc = tpm_transmit_cmd(chip, cmd, buflen, 0, "attempting tpm_cmd");
- tpm_chip_put(chip);
+ tpm_put_ops(chip);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_send);
@@ -888,7 +920,7 @@ int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
stop = jiffies + timeout;
- if (chip->vendor.irq) {
+ if (chip->flags & TPM_CHIP_FLAG_IRQ) {
again:
timeout = stop - jiffies;
if ((long)timeout <= 0)
@@ -953,14 +985,15 @@ int tpm_pm_suspend(struct device *dev)
cmd.params.pcrextend_in.pcr_idx = cpu_to_be32(tpm_suspend_pcr);
memcpy(cmd.params.pcrextend_in.hash, dummy_hash,
TPM_DIGEST_SIZE);
- rc = tpm_transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE,
+ rc = tpm_transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE, 0,
"extending dummy pcr before suspend");
}
/* now do the actual savestate */
for (try = 0; try < TPM_RETRY; try++) {
cmd.header.in = savestate_header;
- rc = tpm_transmit_cmd(chip, &cmd, SAVESTATE_RESULT_SIZE, NULL);
+ rc = tpm_transmit_cmd(chip, &cmd, SAVESTATE_RESULT_SIZE, 0,
+ NULL);
/*
* If the TPM indicates that it is too busy to respond to
@@ -978,10 +1011,10 @@ int tpm_pm_suspend(struct device *dev)
}
if (rc)
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
"Error (%d) sending savestate before suspend\n", rc);
else if (try > 0)
- dev_warn(chip->pdev, "TPM savestate took %dms\n",
+ dev_warn(&chip->dev, "TPM savestate took %dms\n",
try * TPM_TIMEOUT_RETRY);
return rc;
@@ -1035,7 +1068,7 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
err = tpm2_get_random(chip, out, max);
- tpm_chip_put(chip);
+ tpm_put_ops(chip);
return err;
}
@@ -1044,8 +1077,8 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
tpm_cmd.params.getrandom_in.num_bytes = cpu_to_be32(num_bytes);
err = tpm_transmit_cmd(chip, &tpm_cmd,
- TPM_GETRANDOM_RESULT_SIZE + num_bytes,
- "attempting get random");
+ TPM_GETRANDOM_RESULT_SIZE + num_bytes,
+ 0, "attempting get random");
if (err)
break;
@@ -1057,7 +1090,7 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
num_bytes -= recd;
} while (retries-- && total < max);
- tpm_chip_put(chip);
+ tpm_put_ops(chip);
return total ? total : -EIO;
}
EXPORT_SYMBOL_GPL(tpm_get_random);
@@ -1083,7 +1116,7 @@ int tpm_seal_trusted(u32 chip_num, struct trusted_key_payload *payload,
rc = tpm2_seal_trusted(chip, payload, options);
- tpm_chip_put(chip);
+ tpm_put_ops(chip);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_seal_trusted);
@@ -1109,7 +1142,8 @@ int tpm_unseal_trusted(u32 chip_num, struct trusted_key_payload *payload,
rc = tpm2_unseal_trusted(chip, payload, options);
- tpm_chip_put(chip);
+ tpm_put_ops(chip);
+
return rc;
}
EXPORT_SYMBOL_GPL(tpm_unseal_trusted);
@@ -1136,6 +1170,7 @@ static int __init tpm_init(void)
static void __exit tpm_exit(void)
{
+ idr_destroy(&dev_nums_idr);
class_destroy(tpm_class);
unregister_chrdev_region(tpm_devt, TPM_NUM_DEVICES);
}
diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c
index ee66fd467..e1f7236c1 100644
--- a/drivers/char/tpm/tpm-sysfs.c
+++ b/drivers/char/tpm/tpm-sysfs.c
@@ -36,10 +36,10 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr,
int i, rc;
char *str = buf;
- struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct tpm_chip *chip = to_tpm_chip(dev);
tpm_cmd.header.in = tpm_readpubek_header;
- err = tpm_transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
+ err = tpm_transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE, 0,
"attempting to read the PUBEK");
if (err)
goto out;
@@ -92,9 +92,9 @@ static ssize_t pcrs_show(struct device *dev, struct device_attribute *attr,
ssize_t rc;
int i, j, num_pcrs;
char *str = buf;
- struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct tpm_chip *chip = to_tpm_chip(dev);
- rc = tpm_getcap(dev, TPM_CAP_PROP_PCR, &cap,
+ rc = tpm_getcap(chip, TPM_CAP_PROP_PCR, &cap,
"attempting to determine the number of PCRS");
if (rc)
return 0;
@@ -119,8 +119,8 @@ static ssize_t enabled_show(struct device *dev, struct device_attribute *attr,
cap_t cap;
ssize_t rc;
- rc = tpm_getcap(dev, TPM_CAP_FLAG_PERM, &cap,
- "attempting to determine the permanent enabled state");
+ rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_PERM, &cap,
+ "attempting to determine the permanent enabled state");
if (rc)
return 0;
@@ -135,8 +135,8 @@ static ssize_t active_show(struct device *dev, struct device_attribute *attr,
cap_t cap;
ssize_t rc;
- rc = tpm_getcap(dev, TPM_CAP_FLAG_PERM, &cap,
- "attempting to determine the permanent active state");
+ rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_PERM, &cap,
+ "attempting to determine the permanent active state");
if (rc)
return 0;
@@ -151,8 +151,8 @@ static ssize_t owned_show(struct device *dev, struct device_attribute *attr,
cap_t cap;
ssize_t rc;
- rc = tpm_getcap(dev, TPM_CAP_PROP_OWNER, &cap,
- "attempting to determine the owner state");
+ rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_PROP_OWNER, &cap,
+ "attempting to determine the owner state");
if (rc)
return 0;
@@ -167,8 +167,8 @@ static ssize_t temp_deactivated_show(struct device *dev,
cap_t cap;
ssize_t rc;
- rc = tpm_getcap(dev, TPM_CAP_FLAG_VOL, &cap,
- "attempting to determine the temporary state");
+ rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_VOL, &cap,
+ "attempting to determine the temporary state");
if (rc)
return 0;
@@ -180,11 +180,12 @@ static DEVICE_ATTR_RO(temp_deactivated);
static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
+ struct tpm_chip *chip = to_tpm_chip(dev);
cap_t cap;
ssize_t rc;
char *str = buf;
- rc = tpm_getcap(dev, TPM_CAP_PROP_MANUFACTURER, &cap,
+ rc = tpm_getcap(chip, TPM_CAP_PROP_MANUFACTURER, &cap,
"attempting to determine the manufacturer");
if (rc)
return 0;
@@ -192,8 +193,8 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
be32_to_cpu(cap.manufacturer_id));
/* Try to get a TPM version 1.2 TPM_CAP_VERSION_INFO */
- rc = tpm_getcap(dev, CAP_VERSION_1_2, &cap,
- "attempting to determine the 1.2 version");
+ rc = tpm_getcap(chip, CAP_VERSION_1_2, &cap,
+ "attempting to determine the 1.2 version");
if (!rc) {
str += sprintf(str,
"TCG version: %d.%d\nFirmware version: %d.%d\n",
@@ -203,7 +204,7 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
cap.tpm_version_1_2.revMinor);
} else {
/* Otherwise just use TPM_STRUCT_VER */
- rc = tpm_getcap(dev, CAP_VERSION_1_1, &cap,
+ rc = tpm_getcap(chip, CAP_VERSION_1_1, &cap,
"attempting to determine the 1.1 version");
if (rc)
return 0;
@@ -222,7 +223,7 @@ static DEVICE_ATTR_RO(caps);
static ssize_t cancel_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct tpm_chip *chip = to_tpm_chip(dev);
if (chip == NULL)
return 0;
@@ -234,16 +235,16 @@ static DEVICE_ATTR_WO(cancel);
static ssize_t durations_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct tpm_chip *chip = to_tpm_chip(dev);
- if (chip->vendor.duration[TPM_LONG] == 0)
+ if (chip->duration[TPM_LONG] == 0)
return 0;
return sprintf(buf, "%d %d %d [%s]\n",
- jiffies_to_usecs(chip->vendor.duration[TPM_SHORT]),
- jiffies_to_usecs(chip->vendor.duration[TPM_MEDIUM]),
- jiffies_to_usecs(chip->vendor.duration[TPM_LONG]),
- chip->vendor.duration_adjusted
+ jiffies_to_usecs(chip->duration[TPM_SHORT]),
+ jiffies_to_usecs(chip->duration[TPM_MEDIUM]),
+ jiffies_to_usecs(chip->duration[TPM_LONG]),
+ chip->duration_adjusted
? "adjusted" : "original");
}
static DEVICE_ATTR_RO(durations);
@@ -251,14 +252,14 @@ static DEVICE_ATTR_RO(durations);
static ssize_t timeouts_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct tpm_chip *chip = to_tpm_chip(dev);
return sprintf(buf, "%d %d %d %d [%s]\n",
- jiffies_to_usecs(chip->vendor.timeout_a),
- jiffies_to_usecs(chip->vendor.timeout_b),
- jiffies_to_usecs(chip->vendor.timeout_c),
- jiffies_to_usecs(chip->vendor.timeout_d),
- chip->vendor.timeout_adjusted
+ jiffies_to_usecs(chip->timeout_a),
+ jiffies_to_usecs(chip->timeout_b),
+ jiffies_to_usecs(chip->timeout_c),
+ jiffies_to_usecs(chip->timeout_d),
+ chip->timeout_adjusted
? "adjusted" : "original");
}
static DEVICE_ATTR_RO(timeouts);
@@ -281,19 +282,12 @@ static const struct attribute_group tpm_dev_group = {
.attrs = tpm_dev_attrs,
};
-int tpm_sysfs_add_device(struct tpm_chip *chip)
+void tpm_sysfs_add_device(struct tpm_chip *chip)
{
- int err;
- err = sysfs_create_group(&chip->pdev->kobj,
- &tpm_dev_group);
-
- if (err)
- dev_err(chip->pdev,
- "failed to create sysfs attributes, %d\n", err);
- return err;
-}
-
-void tpm_sysfs_del_device(struct tpm_chip *chip)
-{
- sysfs_remove_group(&chip->pdev->kobj, &tpm_dev_group);
+ /* The sysfs routines rely on an implicit tpm_try_get_ops, device_del
+ * is called before ops is null'd and the sysfs core synchronizes this
+ * removal so that no callbacks are running or can run again
+ */
+ WARN_ON(chip->groups_cnt != 0);
+ chip->groups[chip->groups_cnt++] = &tpm_dev_group;
}
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 28b477e8d..b0585e99d 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -19,6 +19,10 @@
* License.
*
*/
+
+#ifndef __TPM_H__
+#define __TPM_H__
+
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/fs.h>
@@ -34,7 +38,7 @@
enum tpm_const {
TPM_MINOR = 224, /* officially assigned */
TPM_BUFSIZE = 4096,
- TPM_NUM_DEVICES = 256,
+ TPM_NUM_DEVICES = 65536,
TPM_RETRY = 50, /* 5 seconds */
};
@@ -128,33 +132,6 @@ enum tpm2_startup_types {
TPM2_SU_STATE = 0x0001,
};
-struct tpm_chip;
-
-struct tpm_vendor_specific {
- void __iomem *iobase; /* ioremapped address */
- unsigned long base; /* TPM base address */
-
- int irq;
-
- int region_size;
- int have_region;
-
- struct list_head list;
- int locality;
- unsigned long timeout_a, timeout_b, timeout_c, timeout_d; /* jiffies */
- bool timeout_adjusted;
- unsigned long duration[3]; /* jiffies */
- bool duration_adjusted;
- void *priv;
-
- wait_queue_head_t read_queue;
- wait_queue_head_t int_queue;
-
- u16 manufacturer_id;
-};
-
-#define TPM_VPRIV(c) ((c)->vendor.priv)
-
#define TPM_VID_INTEL 0x8086
#define TPM_VID_WINBOND 0x1050
#define TPM_VID_STM 0x104A
@@ -164,44 +141,48 @@ struct tpm_vendor_specific {
enum tpm_chip_flags {
TPM_CHIP_FLAG_REGISTERED = BIT(0),
TPM_CHIP_FLAG_TPM2 = BIT(1),
+ TPM_CHIP_FLAG_IRQ = BIT(2),
+ TPM_CHIP_FLAG_VIRTUAL = BIT(3),
};
struct tpm_chip {
- struct device *pdev; /* Device stuff */
struct device dev;
struct cdev cdev;
+ /* A driver callback under ops cannot be run unless ops_sem is held
+ * (sometimes implicitly, eg for the sysfs code). ops becomes null
+ * when the driver is unregistered, see tpm_try_get_ops.
+ */
+ struct rw_semaphore ops_sem;
const struct tpm_class_ops *ops;
+
unsigned int flags;
int dev_num; /* /dev/tpm# */
- char devname[7];
unsigned long is_open; /* only one allowed */
- int time_expired;
struct mutex tpm_mutex; /* tpm is processing */
- struct tpm_vendor_specific vendor;
+ unsigned long timeout_a; /* jiffies */
+ unsigned long timeout_b; /* jiffies */
+ unsigned long timeout_c; /* jiffies */
+ unsigned long timeout_d; /* jiffies */
+ bool timeout_adjusted;
+ unsigned long duration[3]; /* jiffies */
+ bool duration_adjusted;
struct dentry **bios_dir;
-#ifdef CONFIG_ACPI
- const struct attribute_group *groups[2];
+ const struct attribute_group *groups[3];
unsigned int groups_cnt;
+#ifdef CONFIG_ACPI
acpi_handle acpi_dev_handle;
char ppi_version[TPM_PPI_VERSION_LEN + 1];
#endif /* CONFIG_ACPI */
-
- struct list_head list;
};
#define to_tpm_chip(d) container_of(d, struct tpm_chip, dev)
-static inline void tpm_chip_put(struct tpm_chip *chip)
-{
- module_put(chip->pdev->driver->owner);
-}
-
static inline int tpm_read_index(int base, int index)
{
outb(index, base);
@@ -493,14 +474,21 @@ static inline void tpm_buf_append_u32(struct tpm_buf *buf, const u32 value)
extern struct class *tpm_class;
extern dev_t tpm_devt;
extern const struct file_operations tpm_fops;
+extern struct idr dev_nums_idr;
+
+enum tpm_transmit_flags {
+ TPM_TRANSMIT_UNLOCKED = BIT(0),
+};
-ssize_t tpm_getcap(struct device *, __be32, cap_t *, const char *);
-ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
- size_t bufsiz);
-ssize_t tpm_transmit_cmd(struct tpm_chip *chip, void *cmd, int len,
- const char *desc);
+ssize_t tpm_transmit(struct tpm_chip *chip, const u8 *buf, size_t bufsiz,
+ unsigned int flags);
+ssize_t tpm_transmit_cmd(struct tpm_chip *chip, const void *cmd, int len,
+ unsigned int flags, const char *desc);
+ssize_t tpm_getcap(struct tpm_chip *chip, __be32 subcap_id, cap_t *cap,
+ const char *desc);
extern int tpm_get_timeouts(struct tpm_chip *);
extern void tpm_gen_interrupt(struct tpm_chip *);
+int tpm1_auto_startup(struct tpm_chip *chip);
extern int tpm_do_selftest(struct tpm_chip *);
extern unsigned long tpm_calc_ordinal_duration(struct tpm_chip *, u32);
extern int tpm_pm_suspend(struct device *);
@@ -509,13 +497,17 @@ extern int wait_for_tpm_stat(struct tpm_chip *, u8, unsigned long,
wait_queue_head_t *, bool);
struct tpm_chip *tpm_chip_find_get(int chip_num);
-extern struct tpm_chip *tpmm_chip_alloc(struct device *dev,
+__must_check int tpm_try_get_ops(struct tpm_chip *chip);
+void tpm_put_ops(struct tpm_chip *chip);
+
+extern struct tpm_chip *tpm_chip_alloc(struct device *dev,
+ const struct tpm_class_ops *ops);
+extern struct tpm_chip *tpmm_chip_alloc(struct device *pdev,
const struct tpm_class_ops *ops);
extern int tpm_chip_register(struct tpm_chip *chip);
extern void tpm_chip_unregister(struct tpm_chip *chip);
-int tpm_sysfs_add_device(struct tpm_chip *chip);
-void tpm_sysfs_del_device(struct tpm_chip *chip);
+void tpm_sysfs_add_device(struct tpm_chip *chip);
int tpm_pcr_read_dev(struct tpm_chip *chip, int pcr_idx, u8 *res_buf);
@@ -539,9 +531,9 @@ int tpm2_unseal_trusted(struct tpm_chip *chip,
ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id,
u32 *value, const char *desc);
-extern int tpm2_startup(struct tpm_chip *chip, u16 startup_type);
+int tpm2_auto_startup(struct tpm_chip *chip);
extern void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type);
extern unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *, u32);
-extern int tpm2_do_selftest(struct tpm_chip *chip);
extern int tpm2_gen_interrupt(struct tpm_chip *chip);
extern int tpm2_probe(struct tpm_chip *chip);
+#endif
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index b28e4da3d..ef5a58b98 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -282,7 +282,7 @@ int tpm2_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
sizeof(cmd.params.pcrread_in.pcr_select));
cmd.params.pcrread_in.pcr_select[pcr_idx >> 3] = 1 << (pcr_idx & 0x7);
- rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd),
+ rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), 0,
"attempting to read a pcr value");
if (rc == 0) {
buf = cmd.params.pcrread_out.digest;
@@ -330,7 +330,7 @@ int tpm2_pcr_extend(struct tpm_chip *chip, int pcr_idx, const u8 *hash)
cmd.params.pcrextend_in.hash_alg = cpu_to_be16(TPM2_ALG_SHA1);
memcpy(cmd.params.pcrextend_in.digest, hash, TPM_DIGEST_SIZE);
- rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd),
+ rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), 0,
"attempting extend a PCR value");
return rc;
@@ -376,7 +376,7 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *out, size_t max)
cmd.header.in = tpm2_getrandom_header;
cmd.params.getrandom_in.size = cpu_to_be16(num_bytes);
- err = tpm_transmit_cmd(chip, &cmd, sizeof(cmd),
+ err = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), 0,
"attempting get random");
if (err)
break;
@@ -434,12 +434,12 @@ static void tpm2_buf_append_auth(struct tpm_buf *buf, u32 session_handle,
}
/**
- * tpm2_seal_trusted() - seal a trusted key
- * @chip_num: A specific chip number for the request or TPM_ANY_NUM
- * @options: authentication values and other options
+ * tpm2_seal_trusted() - seal the payload of a trusted key
+ * @chip_num: TPM chip to use
* @payload: the key data in clear and encrypted form
+ * @options: authentication values and other options
*
- * Returns < 0 on error and 0 on success.
+ * Return: < 0 on error and 0 on success.
*/
int tpm2_seal_trusted(struct tpm_chip *chip,
struct trusted_key_payload *payload,
@@ -512,7 +512,7 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
goto out;
}
- rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, "sealing data");
+ rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, 0, "sealing data");
if (rc)
goto out;
@@ -538,10 +538,18 @@ out:
return rc;
}
-static int tpm2_load(struct tpm_chip *chip,
- struct trusted_key_payload *payload,
- struct trusted_key_options *options,
- u32 *blob_handle)
+/**
+ * tpm2_load_cmd() - execute a TPM2_Load command
+ * @chip_num: TPM chip to use
+ * @payload: the key data in clear and encrypted form
+ * @options: authentication values and other options
+ *
+ * Return: same as with tpm_transmit_cmd
+ */
+static int tpm2_load_cmd(struct tpm_chip *chip,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options,
+ u32 *blob_handle, unsigned int flags)
{
struct tpm_buf buf;
unsigned int private_len;
@@ -576,7 +584,7 @@ static int tpm2_load(struct tpm_chip *chip,
goto out;
}
- rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, "loading blob");
+ rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, flags, "loading blob");
if (!rc)
*blob_handle = be32_to_cpup(
(__be32 *) &buf.data[TPM_HEADER_SIZE]);
@@ -590,32 +598,50 @@ out:
return rc;
}
-static void tpm2_flush_context(struct tpm_chip *chip, u32 handle)
+/**
+ * tpm2_flush_context_cmd() - execute a TPM2_FlushContext command
+ * @chip_num: TPM chip to use
+ * @payload: the key data in clear and encrypted form
+ * @options: authentication values and other options
+ *
+ * Return: same as with tpm_transmit_cmd
+ */
+static void tpm2_flush_context_cmd(struct tpm_chip *chip, u32 handle,
+ unsigned int flags)
{
struct tpm_buf buf;
int rc;
rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_FLUSH_CONTEXT);
if (rc) {
- dev_warn(chip->pdev, "0x%08x was not flushed, out of memory\n",
+ dev_warn(&chip->dev, "0x%08x was not flushed, out of memory\n",
handle);
return;
}
tpm_buf_append_u32(&buf, handle);
- rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, "flushing context");
+ rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, flags,
+ "flushing context");
if (rc)
- dev_warn(chip->pdev, "0x%08x was not flushed, rc=%d\n", handle,
+ dev_warn(&chip->dev, "0x%08x was not flushed, rc=%d\n", handle,
rc);
tpm_buf_destroy(&buf);
}
-static int tpm2_unseal(struct tpm_chip *chip,
- struct trusted_key_payload *payload,
- struct trusted_key_options *options,
- u32 blob_handle)
+/**
+ * tpm2_unseal_cmd() - execute a TPM2_Unload command
+ * @chip_num: TPM chip to use
+ * @payload: the key data in clear and encrypted form
+ * @options: authentication values and other options
+ *
+ * Return: same as with tpm_transmit_cmd
+ */
+static int tpm2_unseal_cmd(struct tpm_chip *chip,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options,
+ u32 blob_handle, unsigned int flags)
{
struct tpm_buf buf;
u16 data_len;
@@ -635,7 +661,7 @@ static int tpm2_unseal(struct tpm_chip *chip,
options->blobauth /* hmac */,
TPM_DIGEST_SIZE);
- rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, "unsealing");
+ rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, flags, "unsealing");
if (rc > 0)
rc = -EPERM;
@@ -654,12 +680,12 @@ static int tpm2_unseal(struct tpm_chip *chip,
}
/**
- * tpm_unseal_trusted() - unseal a trusted key
- * @chip_num: A specific chip number for the request or TPM_ANY_NUM
- * @options: authentication values and other options
+ * tpm_unseal_trusted() - unseal the payload of a trusted key
+ * @chip_num: TPM chip to use
* @payload: the key data in clear and encrypted form
+ * @options: authentication values and other options
*
- * Returns < 0 on error and 0 on success.
+ * Return: < 0 on error and 0 on success.
*/
int tpm2_unseal_trusted(struct tpm_chip *chip,
struct trusted_key_payload *payload,
@@ -668,14 +694,17 @@ int tpm2_unseal_trusted(struct tpm_chip *chip,
u32 blob_handle;
int rc;
- rc = tpm2_load(chip, payload, options, &blob_handle);
+ mutex_lock(&chip->tpm_mutex);
+ rc = tpm2_load_cmd(chip, payload, options, &blob_handle,
+ TPM_TRANSMIT_UNLOCKED);
if (rc)
- return rc;
-
- rc = tpm2_unseal(chip, payload, options, blob_handle);
-
- tpm2_flush_context(chip, blob_handle);
+ goto out;
+ rc = tpm2_unseal_cmd(chip, payload, options, blob_handle,
+ TPM_TRANSMIT_UNLOCKED);
+ tpm2_flush_context_cmd(chip, blob_handle, TPM_TRANSMIT_UNLOCKED);
+out:
+ mutex_unlock(&chip->tpm_mutex);
return rc;
}
@@ -701,9 +730,9 @@ ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, u32 *value,
cmd.params.get_tpm_pt_in.property_id = cpu_to_be32(property_id);
cmd.params.get_tpm_pt_in.property_cnt = cpu_to_be32(1);
- rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), desc);
+ rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), 0, desc);
if (!rc)
- *value = cmd.params.get_tpm_pt_out.value;
+ *value = be32_to_cpu(cmd.params.get_tpm_pt_out.value);
return rc;
}
@@ -728,17 +757,16 @@ static const struct tpm_input_header tpm2_startup_header = {
* returned it remarks a POSIX error code. If a positive number is returned
* it remarks a TPM error.
*/
-int tpm2_startup(struct tpm_chip *chip, u16 startup_type)
+static int tpm2_startup(struct tpm_chip *chip, u16 startup_type)
{
struct tpm2_cmd cmd;
cmd.header.in = tpm2_startup_header;
cmd.params.startup_in.startup_type = cpu_to_be16(startup_type);
- return tpm_transmit_cmd(chip, &cmd, sizeof(cmd),
+ return tpm_transmit_cmd(chip, &cmd, sizeof(cmd), 0,
"attempting to start the TPM");
}
-EXPORT_SYMBOL_GPL(tpm2_startup);
#define TPM2_SHUTDOWN_IN_SIZE \
(sizeof(struct tpm_input_header) + \
@@ -764,16 +792,15 @@ void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type)
cmd.header.in = tpm2_shutdown_header;
cmd.params.startup_in.startup_type = cpu_to_be16(shutdown_type);
- rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), "stopping the TPM");
+ rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), 0, "stopping the TPM");
/* In places where shutdown command is sent there's no much we can do
* except print the error code on a system failure.
*/
if (rc < 0)
- dev_warn(chip->pdev, "transmit returned %d while stopping the TPM",
+ dev_warn(&chip->dev, "transmit returned %d while stopping the TPM",
rc);
}
-EXPORT_SYMBOL_GPL(tpm2_shutdown);
/*
* tpm2_calc_ordinal_duration() - maximum duration for a command
@@ -793,7 +820,7 @@ unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal)
index = tpm2_ordinal_duration[ordinal - TPM2_CC_FIRST];
if (index != TPM_UNDEFINED)
- duration = chip->vendor.duration[index];
+ duration = chip->duration[index];
if (duration <= 0)
duration = 2 * 60 * HZ;
@@ -830,14 +857,14 @@ static int tpm2_start_selftest(struct tpm_chip *chip, bool full)
cmd.header.in = tpm2_selftest_header;
cmd.params.selftest_in.full_test = full;
- rc = tpm_transmit_cmd(chip, &cmd, TPM2_SELF_TEST_IN_SIZE,
+ rc = tpm_transmit_cmd(chip, &cmd, TPM2_SELF_TEST_IN_SIZE, 0,
"continue selftest");
/* At least some prototype chips seem to give RC_TESTING error
* immediately. This is a workaround for that.
*/
if (rc == TPM2_RC_TESTING) {
- dev_warn(chip->pdev, "Got RC_TESTING, ignoring\n");
+ dev_warn(&chip->dev, "Got RC_TESTING, ignoring\n");
rc = 0;
}
@@ -855,7 +882,7 @@ static int tpm2_start_selftest(struct tpm_chip *chip, bool full)
* returned it remarks a POSIX error code. If a positive number is returned
* it remarks a TPM error.
*/
-int tpm2_do_selftest(struct tpm_chip *chip)
+static int tpm2_do_selftest(struct tpm_chip *chip)
{
int rc;
unsigned int loops;
@@ -882,7 +909,7 @@ int tpm2_do_selftest(struct tpm_chip *chip)
cmd.params.pcrread_in.pcr_select[1] = 0x00;
cmd.params.pcrread_in.pcr_select[2] = 0x00;
- rc = tpm_transmit_cmd(chip, (u8 *) &cmd, sizeof(cmd), NULL);
+ rc = tpm_transmit_cmd(chip, &cmd, sizeof(cmd), 0, NULL);
if (rc < 0)
break;
@@ -895,7 +922,6 @@ int tpm2_do_selftest(struct tpm_chip *chip)
return rc;
}
-EXPORT_SYMBOL_GPL(tpm2_do_selftest);
/**
* tpm2_gen_interrupt() - generate an interrupt
@@ -931,7 +957,7 @@ int tpm2_probe(struct tpm_chip *chip)
cmd.params.get_tpm_pt_in.property_id = cpu_to_be32(0x100);
cmd.params.get_tpm_pt_in.property_cnt = cpu_to_be32(1);
- rc = tpm_transmit(chip, (const char *) &cmd, sizeof(cmd));
+ rc = tpm_transmit(chip, (const u8 *)&cmd, sizeof(cmd), 0);
if (rc < 0)
return rc;
else if (rc < TPM_HEADER_SIZE)
@@ -943,3 +969,42 @@ int tpm2_probe(struct tpm_chip *chip)
return 0;
}
EXPORT_SYMBOL_GPL(tpm2_probe);
+
+/**
+ * tpm2_auto_startup - Perform the standard automatic TPM initialization
+ * sequence
+ * @chip: TPM chip to use
+ *
+ * Returns 0 on success, < 0 in case of fatal error.
+ */
+int tpm2_auto_startup(struct tpm_chip *chip)
+{
+ int rc;
+
+ rc = tpm_get_timeouts(chip);
+ if (rc)
+ goto out;
+
+ rc = tpm2_do_selftest(chip);
+ if (rc != 0 && rc != TPM2_RC_INITIALIZE) {
+ dev_err(&chip->dev, "TPM self test failed\n");
+ goto out;
+ }
+
+ if (rc == TPM2_RC_INITIALIZE) {
+ rc = tpm2_startup(chip, TPM2_SU_CLEAR);
+ if (rc)
+ goto out;
+
+ rc = tpm2_do_selftest(chip);
+ if (rc) {
+ dev_err(&chip->dev, "TPM self test failed\n");
+ goto out;
+ }
+ }
+
+out:
+ if (rc > 0)
+ rc = -ENODEV;
+ return rc;
+}
diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
index dfadad091..0d322ab11 100644
--- a/drivers/char/tpm/tpm_atmel.c
+++ b/drivers/char/tpm/tpm_atmel.c
@@ -37,6 +37,7 @@ enum tpm_atmel_read_status {
static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count)
{
+ struct tpm_atmel_priv *priv = dev_get_drvdata(&chip->dev);
u8 status, *hdr = buf;
u32 size;
int i;
@@ -47,12 +48,12 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count)
return -EIO;
for (i = 0; i < 6; i++) {
- status = ioread8(chip->vendor.iobase + 1);
+ status = ioread8(priv->iobase + 1);
if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
- dev_err(chip->pdev, "error reading header\n");
+ dev_err(&chip->dev, "error reading header\n");
return -EIO;
}
- *buf++ = ioread8(chip->vendor.iobase);
+ *buf++ = ioread8(priv->iobase);
}
/* size of the data received */
@@ -60,12 +61,12 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count)
size = be32_to_cpu(*native_size);
if (count < size) {
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
"Recv size(%d) less than available space\n", size);
for (; i < size; i++) { /* clear the waiting data anyway */
- status = ioread8(chip->vendor.iobase + 1);
+ status = ioread8(priv->iobase + 1);
if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
- dev_err(chip->pdev, "error reading data\n");
+ dev_err(&chip->dev, "error reading data\n");
return -EIO;
}
}
@@ -74,19 +75,19 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count)
/* read all the data available */
for (; i < size; i++) {
- status = ioread8(chip->vendor.iobase + 1);
+ status = ioread8(priv->iobase + 1);
if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
- dev_err(chip->pdev, "error reading data\n");
+ dev_err(&chip->dev, "error reading data\n");
return -EIO;
}
- *buf++ = ioread8(chip->vendor.iobase);
+ *buf++ = ioread8(priv->iobase);
}
/* make sure data available is gone */
- status = ioread8(chip->vendor.iobase + 1);
+ status = ioread8(priv->iobase + 1);
if (status & ATML_STATUS_DATA_AVAIL) {
- dev_err(chip->pdev, "data available is stuck\n");
+ dev_err(&chip->dev, "data available is stuck\n");
return -EIO;
}
@@ -95,12 +96,13 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count)
static int tpm_atml_send(struct tpm_chip *chip, u8 *buf, size_t count)
{
+ struct tpm_atmel_priv *priv = dev_get_drvdata(&chip->dev);
int i;
- dev_dbg(chip->pdev, "tpm_atml_send:\n");
+ dev_dbg(&chip->dev, "tpm_atml_send:\n");
for (i = 0; i < count; i++) {
- dev_dbg(chip->pdev, "%d 0x%x(%d)\n", i, buf[i], buf[i]);
- iowrite8(buf[i], chip->vendor.iobase);
+ dev_dbg(&chip->dev, "%d 0x%x(%d)\n", i, buf[i], buf[i]);
+ iowrite8(buf[i], priv->iobase);
}
return count;
@@ -108,12 +110,16 @@ static int tpm_atml_send(struct tpm_chip *chip, u8 *buf, size_t count)
static void tpm_atml_cancel(struct tpm_chip *chip)
{
- iowrite8(ATML_STATUS_ABORT, chip->vendor.iobase + 1);
+ struct tpm_atmel_priv *priv = dev_get_drvdata(&chip->dev);
+
+ iowrite8(ATML_STATUS_ABORT, priv->iobase + 1);
}
static u8 tpm_atml_status(struct tpm_chip *chip)
{
- return ioread8(chip->vendor.iobase + 1);
+ struct tpm_atmel_priv *priv = dev_get_drvdata(&chip->dev);
+
+ return ioread8(priv->iobase + 1);
}
static bool tpm_atml_req_canceled(struct tpm_chip *chip, u8 status)
@@ -136,13 +142,13 @@ static struct platform_device *pdev;
static void atml_plat_remove(void)
{
struct tpm_chip *chip = dev_get_drvdata(&pdev->dev);
+ struct tpm_atmel_priv *priv = dev_get_drvdata(&chip->dev);
if (chip) {
tpm_chip_unregister(chip);
- if (chip->vendor.have_region)
- atmel_release_region(chip->vendor.base,
- chip->vendor.region_size);
- atmel_put_base_addr(chip->vendor.iobase);
+ if (priv->have_region)
+ atmel_release_region(priv->base, priv->region_size);
+ atmel_put_base_addr(priv->iobase);
platform_device_unregister(pdev);
}
}
@@ -163,6 +169,7 @@ static int __init init_atmel(void)
int have_region, region_size;
unsigned long base;
struct tpm_chip *chip;
+ struct tpm_atmel_priv *priv;
rc = platform_driver_register(&atml_drv);
if (rc)
@@ -183,16 +190,24 @@ static int __init init_atmel(void)
goto err_rel_reg;
}
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ rc = -ENOMEM;
+ goto err_unreg_dev;
+ }
+
+ priv->iobase = iobase;
+ priv->base = base;
+ priv->have_region = have_region;
+ priv->region_size = region_size;
+
chip = tpmm_chip_alloc(&pdev->dev, &tpm_atmel);
if (IS_ERR(chip)) {
rc = PTR_ERR(chip);
goto err_unreg_dev;
}
- chip->vendor.iobase = iobase;
- chip->vendor.base = base;
- chip->vendor.have_region = have_region;
- chip->vendor.region_size = region_size;
+ dev_set_drvdata(&chip->dev, priv);
rc = tpm_chip_register(chip);
if (rc)
diff --git a/drivers/char/tpm/tpm_atmel.h b/drivers/char/tpm/tpm_atmel.h
index 6c831f946..4f96d80cd 100644
--- a/drivers/char/tpm/tpm_atmel.h
+++ b/drivers/char/tpm/tpm_atmel.h
@@ -22,12 +22,19 @@
*
*/
+struct tpm_atmel_priv {
+ int region_size;
+ int have_region;
+ unsigned long base;
+ void __iomem *iobase;
+};
+
#ifdef CONFIG_PPC64
#include <asm/prom.h>
-#define atmel_getb(chip, offset) readb(chip->vendor->iobase + offset);
-#define atmel_putb(val, chip, offset) writeb(val, chip->vendor->iobase + offset)
+#define atmel_getb(priv, offset) readb(priv->iobase + offset)
+#define atmel_putb(val, priv, offset) writeb(val, priv->iobase + offset)
#define atmel_request_region request_mem_region
#define atmel_release_region release_mem_region
@@ -78,8 +85,9 @@ static void __iomem * atmel_get_base_addr(unsigned long *base, int *region_size)
return ioremap(*base, *region_size);
}
#else
-#define atmel_getb(chip, offset) inb(chip->vendor->base + offset)
-#define atmel_putb(val, chip, offset) outb(val, chip->vendor->base + offset)
+#define atmel_getb(chip, offset) inb(atmel_get_priv(chip)->base + offset)
+#define atmel_putb(val, chip, offset) \
+ outb(val, atmel_get_priv(chip)->base + offset)
#define atmel_request_region request_region
#define atmel_release_region release_region
/* Atmel definitions */
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index e9fd1d83f..1801f3823 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -77,7 +77,6 @@ enum crb_flags {
struct crb_priv {
unsigned int flags;
- struct resource res;
void __iomem *iobase;
struct crb_control_area __iomem *cca;
u8 __iomem *cmd;
@@ -88,7 +87,7 @@ static SIMPLE_DEV_PM_OPS(crb_pm, tpm_pm_suspend, tpm_pm_resume);
static u8 crb_status(struct tpm_chip *chip)
{
- struct crb_priv *priv = chip->vendor.priv;
+ struct crb_priv *priv = dev_get_drvdata(&chip->dev);
u8 sts = 0;
if ((ioread32(&priv->cca->start) & CRB_START_INVOKE) !=
@@ -100,7 +99,7 @@ static u8 crb_status(struct tpm_chip *chip)
static int crb_recv(struct tpm_chip *chip, u8 *buf, size_t count)
{
- struct crb_priv *priv = chip->vendor.priv;
+ struct crb_priv *priv = dev_get_drvdata(&chip->dev);
unsigned int expected;
/* sanity check */
@@ -140,9 +139,14 @@ static int crb_do_acpi_start(struct tpm_chip *chip)
static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len)
{
- struct crb_priv *priv = chip->vendor.priv;
+ struct crb_priv *priv = dev_get_drvdata(&chip->dev);
int rc = 0;
+ /* Zero the cancel register so that the next command will not get
+ * canceled.
+ */
+ iowrite32(0, &priv->cca->cancel);
+
if (len > ioread32(&priv->cca->cmd_size)) {
dev_err(&chip->dev,
"invalid command count value %x %zx\n",
@@ -167,7 +171,7 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len)
static void crb_cancel(struct tpm_chip *chip)
{
- struct crb_priv *priv = chip->vendor.priv;
+ struct crb_priv *priv = dev_get_drvdata(&chip->dev);
iowrite32(cpu_to_le32(CRB_CANCEL_INVOKE), &priv->cca->cancel);
@@ -176,19 +180,18 @@ static void crb_cancel(struct tpm_chip *chip)
if ((priv->flags & CRB_FL_ACPI_START) && crb_do_acpi_start(chip))
dev_err(&chip->dev, "ACPI Start failed\n");
-
- iowrite32(0, &priv->cca->cancel);
}
static bool crb_req_canceled(struct tpm_chip *chip, u8 status)
{
- struct crb_priv *priv = chip->vendor.priv;
+ struct crb_priv *priv = dev_get_drvdata(&chip->dev);
u32 cancel = ioread32(&priv->cca->cancel);
return (cancel & CRB_CANCEL_INVOKE) == CRB_CANCEL_INVOKE;
}
static const struct tpm_class_ops tpm_crb = {
+ .flags = TPM_OPS_AUTO_STARTUP,
.status = crb_status,
.recv = crb_recv,
.send = crb_send,
@@ -201,42 +204,33 @@ static const struct tpm_class_ops tpm_crb = {
static int crb_init(struct acpi_device *device, struct crb_priv *priv)
{
struct tpm_chip *chip;
- int rc;
chip = tpmm_chip_alloc(&device->dev, &tpm_crb);
if (IS_ERR(chip))
return PTR_ERR(chip);
- chip->vendor.priv = priv;
+ dev_set_drvdata(&chip->dev, priv);
chip->acpi_dev_handle = device->handle;
chip->flags = TPM_CHIP_FLAG_TPM2;
- rc = tpm_get_timeouts(chip);
- if (rc)
- return rc;
-
- rc = tpm2_do_selftest(chip);
- if (rc)
- return rc;
-
return tpm_chip_register(chip);
}
static int crb_check_resource(struct acpi_resource *ares, void *data)
{
- struct crb_priv *priv = data;
+ struct resource *io_res = data;
struct resource res;
if (acpi_dev_resource_memory(ares, &res)) {
- priv->res = res;
- priv->res.name = NULL;
+ *io_res = res;
+ io_res->name = NULL;
}
return 1;
}
static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv,
- u64 start, u32 size)
+ struct resource *io_res, u64 start, u32 size)
{
struct resource new_res = {
.start = start,
@@ -248,51 +242,72 @@ static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv,
if (start != new_res.start)
return (void __iomem *) ERR_PTR(-EINVAL);
- if (!resource_contains(&priv->res, &new_res))
+ if (!resource_contains(io_res, &new_res))
return devm_ioremap_resource(dev, &new_res);
- return priv->iobase + (new_res.start - priv->res.start);
+ return priv->iobase + (new_res.start - io_res->start);
}
static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
struct acpi_table_tpm2 *buf)
{
struct list_head resources;
+ struct resource io_res;
struct device *dev = &device->dev;
- u64 pa;
+ u64 cmd_pa;
+ u32 cmd_size;
+ u64 rsp_pa;
+ u32 rsp_size;
int ret;
INIT_LIST_HEAD(&resources);
ret = acpi_dev_get_resources(device, &resources, crb_check_resource,
- priv);
+ &io_res);
if (ret < 0)
return ret;
acpi_dev_free_resource_list(&resources);
- if (resource_type(&priv->res) != IORESOURCE_MEM) {
+ if (resource_type(&io_res) != IORESOURCE_MEM) {
dev_err(dev,
FW_BUG "TPM2 ACPI table does not define a memory resource\n");
return -EINVAL;
}
- priv->iobase = devm_ioremap_resource(dev, &priv->res);
+ priv->iobase = devm_ioremap_resource(dev, &io_res);
if (IS_ERR(priv->iobase))
return PTR_ERR(priv->iobase);
- priv->cca = crb_map_res(dev, priv, buf->control_address, 0x1000);
+ priv->cca = crb_map_res(dev, priv, &io_res, buf->control_address,
+ sizeof(struct crb_control_area));
if (IS_ERR(priv->cca))
return PTR_ERR(priv->cca);
- pa = ((u64) ioread32(&priv->cca->cmd_pa_high) << 32) |
- (u64) ioread32(&priv->cca->cmd_pa_low);
- priv->cmd = crb_map_res(dev, priv, pa, ioread32(&priv->cca->cmd_size));
+ cmd_pa = ((u64) ioread32(&priv->cca->cmd_pa_high) << 32) |
+ (u64) ioread32(&priv->cca->cmd_pa_low);
+ cmd_size = ioread32(&priv->cca->cmd_size);
+ priv->cmd = crb_map_res(dev, priv, &io_res, cmd_pa, cmd_size);
if (IS_ERR(priv->cmd))
return PTR_ERR(priv->cmd);
- memcpy_fromio(&pa, &priv->cca->rsp_pa, 8);
- pa = le64_to_cpu(pa);
- priv->rsp = crb_map_res(dev, priv, pa, ioread32(&priv->cca->rsp_size));
- return PTR_ERR_OR_ZERO(priv->rsp);
+ memcpy_fromio(&rsp_pa, &priv->cca->rsp_pa, 8);
+ rsp_pa = le64_to_cpu(rsp_pa);
+ rsp_size = ioread32(&priv->cca->rsp_size);
+
+ if (cmd_pa != rsp_pa) {
+ priv->rsp = crb_map_res(dev, priv, &io_res, rsp_pa, rsp_size);
+ return PTR_ERR_OR_ZERO(priv->rsp);
+ }
+
+ /* According to the PTP specification, overlapping command and response
+ * buffer sizes must be identical.
+ */
+ if (cmd_size != rsp_size) {
+ dev_err(dev, FW_BUG "overlapping command and response buffer sizes are not identical");
+ return -EINVAL;
+ }
+
+ priv->rsp = priv->cmd;
+ return 0;
}
static int crb_acpi_add(struct acpi_device *device)
@@ -344,9 +359,6 @@ static int crb_acpi_remove(struct acpi_device *device)
struct device *dev = &device->dev;
struct tpm_chip *chip = dev_get_drvdata(dev);
- if (chip->flags & TPM_CHIP_FLAG_TPM2)
- tpm2_shutdown(chip, TPM2_SU_CLEAR);
-
tpm_chip_unregister(chip);
return 0;
diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
index 4e6940acf..e72288632 100644
--- a/drivers/char/tpm/tpm_eventlog.c
+++ b/drivers/char/tpm/tpm_eventlog.c
@@ -403,7 +403,7 @@ static int is_bad(void *p)
return 0;
}
-struct dentry **tpm_bios_log_setup(char *name)
+struct dentry **tpm_bios_log_setup(const char *name)
{
struct dentry **ret = NULL, *tpm_dir, *bin_file, *ascii_file;
diff --git a/drivers/char/tpm/tpm_eventlog.h b/drivers/char/tpm/tpm_eventlog.h
index 267bfbd1b..8de62b09b 100644
--- a/drivers/char/tpm/tpm_eventlog.h
+++ b/drivers/char/tpm/tpm_eventlog.h
@@ -77,10 +77,10 @@ int read_log(struct tpm_bios_log *log);
#if defined(CONFIG_TCG_IBMVTPM) || defined(CONFIG_TCG_IBMVTPM_MODULE) || \
defined(CONFIG_ACPI)
-extern struct dentry **tpm_bios_log_setup(char *);
+extern struct dentry **tpm_bios_log_setup(const char *);
extern void tpm_bios_log_teardown(struct dentry **);
#else
-static inline struct dentry **tpm_bios_log_setup(char *name)
+static inline struct dentry **tpm_bios_log_setup(const char *name)
{
return NULL;
}
diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c
index 8dfb88b97..95ce2e9cc 100644
--- a/drivers/char/tpm/tpm_i2c_atmel.c
+++ b/drivers/char/tpm/tpm_i2c_atmel.c
@@ -51,8 +51,8 @@ struct priv_data {
static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len)
{
- struct priv_data *priv = chip->vendor.priv;
- struct i2c_client *client = to_i2c_client(chip->pdev);
+ struct priv_data *priv = dev_get_drvdata(&chip->dev);
+ struct i2c_client *client = to_i2c_client(chip->dev.parent);
s32 status;
priv->len = 0;
@@ -62,7 +62,7 @@ static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len)
status = i2c_master_send(client, buf, len);
- dev_dbg(chip->pdev,
+ dev_dbg(&chip->dev,
"%s(buf=%*ph len=%0zx) -> sts=%d\n", __func__,
(int)min_t(size_t, 64, len), buf, len, status);
return status;
@@ -70,8 +70,8 @@ static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len)
static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count)
{
- struct priv_data *priv = chip->vendor.priv;
- struct i2c_client *client = to_i2c_client(chip->pdev);
+ struct priv_data *priv = dev_get_drvdata(&chip->dev);
+ struct i2c_client *client = to_i2c_client(chip->dev.parent);
struct tpm_output_header *hdr =
(struct tpm_output_header *)priv->buffer;
u32 expected_len;
@@ -88,7 +88,7 @@ static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count)
return -ENOMEM;
if (priv->len >= expected_len) {
- dev_dbg(chip->pdev,
+ dev_dbg(&chip->dev,
"%s early(buf=%*ph count=%0zx) -> ret=%d\n", __func__,
(int)min_t(size_t, 64, expected_len), buf, count,
expected_len);
@@ -97,7 +97,7 @@ static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count)
}
rc = i2c_master_recv(client, buf, expected_len);
- dev_dbg(chip->pdev,
+ dev_dbg(&chip->dev,
"%s reread(buf=%*ph count=%0zx) -> ret=%d\n", __func__,
(int)min_t(size_t, 64, expected_len), buf, count,
expected_len);
@@ -106,13 +106,13 @@ static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count)
static void i2c_atmel_cancel(struct tpm_chip *chip)
{
- dev_err(chip->pdev, "TPM operation cancellation was requested, but is not supported");
+ dev_err(&chip->dev, "TPM operation cancellation was requested, but is not supported");
}
static u8 i2c_atmel_read_status(struct tpm_chip *chip)
{
- struct priv_data *priv = chip->vendor.priv;
- struct i2c_client *client = to_i2c_client(chip->pdev);
+ struct priv_data *priv = dev_get_drvdata(&chip->dev);
+ struct i2c_client *client = to_i2c_client(chip->dev.parent);
int rc;
/* The TPM fails the I2C read until it is ready, so we do the entire
@@ -125,7 +125,7 @@ static u8 i2c_atmel_read_status(struct tpm_chip *chip)
/* Once the TPM has completed the command the command remains readable
* until another command is issued. */
rc = i2c_master_recv(client, priv->buffer, sizeof(priv->buffer));
- dev_dbg(chip->pdev,
+ dev_dbg(&chip->dev,
"%s: sts=%d", __func__, rc);
if (rc <= 0)
return 0;
@@ -141,6 +141,7 @@ static bool i2c_atmel_req_canceled(struct tpm_chip *chip, u8 status)
}
static const struct tpm_class_ops i2c_atmel = {
+ .flags = TPM_OPS_AUTO_STARTUP,
.status = i2c_atmel_read_status,
.recv = i2c_atmel_recv,
.send = i2c_atmel_send,
@@ -155,6 +156,7 @@ static int i2c_atmel_probe(struct i2c_client *client,
{
struct tpm_chip *chip;
struct device *dev = &client->dev;
+ struct priv_data *priv;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -ENODEV;
@@ -163,26 +165,21 @@ static int i2c_atmel_probe(struct i2c_client *client,
if (IS_ERR(chip))
return PTR_ERR(chip);
- chip->vendor.priv = devm_kzalloc(dev, sizeof(struct priv_data),
- GFP_KERNEL);
- if (!chip->vendor.priv)
+ priv = devm_kzalloc(dev, sizeof(struct priv_data), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
/* Default timeouts */
- chip->vendor.timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
- chip->vendor.timeout_b = msecs_to_jiffies(TPM_I2C_LONG_TIMEOUT);
- chip->vendor.timeout_c = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
- chip->vendor.timeout_d = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
- chip->vendor.irq = 0;
+ chip->timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+ chip->timeout_b = msecs_to_jiffies(TPM_I2C_LONG_TIMEOUT);
+ chip->timeout_c = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+ chip->timeout_d = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+
+ dev_set_drvdata(&chip->dev, priv);
/* There is no known way to probe for this device, and all version
* information seems to be read via TPM commands. Thus we rely on the
* TPM startup process in the common code to detect the device. */
- if (tpm_get_timeouts(chip))
- return -ENODEV;
-
- if (tpm_do_selftest(chip))
- return -ENODEV;
return tpm_chip_register(chip);
}
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index 63d5d22e9..62ee44e57 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -66,6 +66,7 @@ enum i2c_chip_type {
/* Structure to store I2C TPM specific stuff */
struct tpm_inf_dev {
struct i2c_client *client;
+ int locality;
u8 buf[TPM_BUFSIZE + sizeof(u8)]; /* max. buffer size + addr */
struct tpm_chip *chip;
enum i2c_chip_type chip_type;
@@ -288,7 +289,7 @@ static int check_locality(struct tpm_chip *chip, int loc)
if ((buf & (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
(TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) {
- chip->vendor.locality = loc;
+ tpm_dev.locality = loc;
return loc;
}
@@ -320,7 +321,7 @@ static int request_locality(struct tpm_chip *chip, int loc)
iic_tpm_write(TPM_ACCESS(loc), &buf, 1);
/* wait for burstcount */
- stop = jiffies + chip->vendor.timeout_a;
+ stop = jiffies + chip->timeout_a;
do {
if (check_locality(chip, loc) >= 0)
return loc;
@@ -337,7 +338,7 @@ static u8 tpm_tis_i2c_status(struct tpm_chip *chip)
u8 i = 0;
do {
- if (iic_tpm_read(TPM_STS(chip->vendor.locality), &buf, 1) < 0)
+ if (iic_tpm_read(TPM_STS(tpm_dev.locality), &buf, 1) < 0)
return 0;
i++;
@@ -351,7 +352,7 @@ static void tpm_tis_i2c_ready(struct tpm_chip *chip)
{
/* this causes the current command to be aborted */
u8 buf = TPM_STS_COMMAND_READY;
- iic_tpm_write_long(TPM_STS(chip->vendor.locality), &buf, 1);
+ iic_tpm_write_long(TPM_STS(tpm_dev.locality), &buf, 1);
}
static ssize_t get_burstcount(struct tpm_chip *chip)
@@ -362,10 +363,10 @@ static ssize_t get_burstcount(struct tpm_chip *chip)
/* wait for burstcount */
/* which timeout value, spec has 2 answers (c & d) */
- stop = jiffies + chip->vendor.timeout_d;
+ stop = jiffies + chip->timeout_d;
do {
/* Note: STS is little endian */
- if (iic_tpm_read(TPM_STS(chip->vendor.locality)+1, buf, 3) < 0)
+ if (iic_tpm_read(TPM_STS(tpm_dev.locality)+1, buf, 3) < 0)
burstcnt = 0;
else
burstcnt = (buf[2] << 16) + (buf[1] << 8) + buf[0];
@@ -419,7 +420,7 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
if (burstcnt > (count - size))
burstcnt = count - size;
- rc = iic_tpm_read(TPM_DATA_FIFO(chip->vendor.locality),
+ rc = iic_tpm_read(TPM_DATA_FIFO(tpm_dev.locality),
&(buf[size]), burstcnt);
if (rc == 0)
size += burstcnt;
@@ -446,7 +447,7 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
/* read first 10 bytes, including tag, paramsize, and result */
size = recv_data(chip, buf, TPM_HEADER_SIZE);
if (size < TPM_HEADER_SIZE) {
- dev_err(chip->pdev, "Unable to read header\n");
+ dev_err(&chip->dev, "Unable to read header\n");
goto out;
}
@@ -459,14 +460,14 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
size += recv_data(chip, &buf[TPM_HEADER_SIZE],
expected - TPM_HEADER_SIZE);
if (size < expected) {
- dev_err(chip->pdev, "Unable to read remainder of result\n");
+ dev_err(&chip->dev, "Unable to read remainder of result\n");
size = -ETIME;
goto out;
}
- wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, &status);
+ wait_for_stat(chip, TPM_STS_VALID, chip->timeout_c, &status);
if (status & TPM_STS_DATA_AVAIL) { /* retry? */
- dev_err(chip->pdev, "Error left over data\n");
+ dev_err(&chip->dev, "Error left over data\n");
size = -EIO;
goto out;
}
@@ -477,7 +478,7 @@ out:
* so we sleep rather than keeping the bus busy
*/
usleep_range(SLEEP_DURATION_RESET_LOW, SLEEP_DURATION_RESET_HI);
- release_locality(chip, chip->vendor.locality, 0);
+ release_locality(chip, tpm_dev.locality, 0);
return size;
}
@@ -500,7 +501,7 @@ static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t len)
tpm_tis_i2c_ready(chip);
if (wait_for_stat
(chip, TPM_STS_COMMAND_READY,
- chip->vendor.timeout_b, &status) < 0) {
+ chip->timeout_b, &status) < 0) {
rc = -ETIME;
goto out_err;
}
@@ -516,7 +517,7 @@ static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t len)
if (burstcnt > (len - 1 - count))
burstcnt = len - 1 - count;
- rc = iic_tpm_write(TPM_DATA_FIFO(chip->vendor.locality),
+ rc = iic_tpm_write(TPM_DATA_FIFO(tpm_dev.locality),
&(buf[count]), burstcnt);
if (rc == 0)
count += burstcnt;
@@ -530,7 +531,7 @@ static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t len)
}
wait_for_stat(chip, TPM_STS_VALID,
- chip->vendor.timeout_c, &status);
+ chip->timeout_c, &status);
if ((status & TPM_STS_DATA_EXPECT) == 0) {
rc = -EIO;
@@ -539,15 +540,15 @@ static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t len)
}
/* write last byte */
- iic_tpm_write(TPM_DATA_FIFO(chip->vendor.locality), &(buf[count]), 1);
- wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, &status);
+ iic_tpm_write(TPM_DATA_FIFO(tpm_dev.locality), &(buf[count]), 1);
+ wait_for_stat(chip, TPM_STS_VALID, chip->timeout_c, &status);
if ((status & TPM_STS_DATA_EXPECT) != 0) {
rc = -EIO;
goto out_err;
}
/* go and do it */
- iic_tpm_write(TPM_STS(chip->vendor.locality), &sts, 1);
+ iic_tpm_write(TPM_STS(tpm_dev.locality), &sts, 1);
return len;
out_err:
@@ -556,7 +557,7 @@ out_err:
* so we sleep rather than keeping the bus busy
*/
usleep_range(SLEEP_DURATION_RESET_LOW, SLEEP_DURATION_RESET_HI);
- release_locality(chip, chip->vendor.locality, 0);
+ release_locality(chip, tpm_dev.locality, 0);
return rc;
}
@@ -566,6 +567,7 @@ static bool tpm_tis_i2c_req_canceled(struct tpm_chip *chip, u8 status)
}
static const struct tpm_class_ops tpm_tis_i2c = {
+ .flags = TPM_OPS_AUTO_STARTUP,
.status = tpm_tis_i2c_status,
.recv = tpm_tis_i2c_recv,
.send = tpm_tis_i2c_send,
@@ -585,14 +587,11 @@ static int tpm_tis_i2c_init(struct device *dev)
if (IS_ERR(chip))
return PTR_ERR(chip);
- /* Disable interrupts */
- chip->vendor.irq = 0;
-
/* Default timeouts */
- chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
- chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
- chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
- chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+ chip->timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+ chip->timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
+ chip->timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
+ chip->timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
if (request_locality(chip, 0) != 0) {
dev_err(dev, "could not request locality\n");
@@ -619,15 +618,11 @@ static int tpm_tis_i2c_init(struct device *dev)
dev_info(dev, "1.2 TPM (device-id 0x%X)\n", vendor >> 16);
- INIT_LIST_HEAD(&chip->vendor.list);
tpm_dev.chip = chip;
- tpm_get_timeouts(chip);
- tpm_do_selftest(chip);
-
return tpm_chip_register(chip);
out_release:
- release_locality(chip, chip->vendor.locality, 1);
+ release_locality(chip, tpm_dev.locality, 1);
tpm_dev.client = NULL;
out_err:
return rc;
@@ -699,7 +694,7 @@ static int tpm_tis_i2c_remove(struct i2c_client *client)
struct tpm_chip *chip = tpm_dev.chip;
tpm_chip_unregister(chip);
- release_locality(chip, chip->vendor.locality, 1);
+ release_locality(chip, tpm_dev.locality, 1);
tpm_dev.client = NULL;
return 0;
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
index 847f1597f..e3a9155ee 100644
--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
+++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
@@ -1,5 +1,5 @@
-/******************************************************************************
- * Nuvoton TPM I2C Device Driver Interface for WPCT301/NPCT501,
+ /******************************************************************************
+ * Nuvoton TPM I2C Device Driver Interface for WPCT301/NPCT501/NPCT6XX,
* based on the TCG TPM Interface Spec version 1.2.
* Specifications at www.trustedcomputinggroup.org
*
@@ -31,6 +31,7 @@
#include <linux/interrupt.h>
#include <linux/wait.h>
#include <linux/i2c.h>
+#include <linux/of_device.h>
#include "tpm.h"
/* I2C interface offsets */
@@ -52,10 +53,13 @@
#define TPM_I2C_RETRY_DELAY_SHORT 2 /* msec */
#define TPM_I2C_RETRY_DELAY_LONG 10 /* msec */
-#define I2C_DRIVER_NAME "tpm_i2c_nuvoton"
+#define OF_IS_TPM2 ((void *)1)
+#define I2C_IS_TPM2 1
struct priv_data {
+ int irq;
unsigned int intrs;
+ wait_queue_head_t read_queue;
};
static s32 i2c_nuvoton_read_buf(struct i2c_client *client, u8 offset, u8 size,
@@ -96,13 +100,13 @@ static s32 i2c_nuvoton_write_buf(struct i2c_client *client, u8 offset, u8 size,
/* read TPM_STS register */
static u8 i2c_nuvoton_read_status(struct tpm_chip *chip)
{
- struct i2c_client *client = to_i2c_client(chip->pdev);
+ struct i2c_client *client = to_i2c_client(chip->dev.parent);
s32 status;
u8 data;
status = i2c_nuvoton_read_buf(client, TPM_STS, 1, &data);
if (status <= 0) {
- dev_err(chip->pdev, "%s() error return %d\n", __func__,
+ dev_err(&chip->dev, "%s() error return %d\n", __func__,
status);
data = TPM_STS_ERR_VAL;
}
@@ -127,13 +131,13 @@ static s32 i2c_nuvoton_write_status(struct i2c_client *client, u8 data)
/* write commandReady to TPM_STS register */
static void i2c_nuvoton_ready(struct tpm_chip *chip)
{
- struct i2c_client *client = to_i2c_client(chip->pdev);
+ struct i2c_client *client = to_i2c_client(chip->dev.parent);
s32 status;
/* this causes the current command to be aborted */
status = i2c_nuvoton_write_status(client, TPM_STS_COMMAND_READY);
if (status < 0)
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
"%s() fail to write TPM_STS.commandReady\n", __func__);
}
@@ -142,7 +146,7 @@ static void i2c_nuvoton_ready(struct tpm_chip *chip)
static int i2c_nuvoton_get_burstcount(struct i2c_client *client,
struct tpm_chip *chip)
{
- unsigned long stop = jiffies + chip->vendor.timeout_d;
+ unsigned long stop = jiffies + chip->timeout_d;
s32 status;
int burst_count = -1;
u8 data;
@@ -163,7 +167,7 @@ static int i2c_nuvoton_get_burstcount(struct i2c_client *client,
}
/*
- * WPCT301/NPCT501 SINT# supports only dataAvail
+ * WPCT301/NPCT501/NPCT6XX SINT# supports only dataAvail
* any call to this function which is not waiting for dataAvail will
* set queue to NULL to avoid waiting for interrupt
*/
@@ -176,12 +180,12 @@ static bool i2c_nuvoton_check_status(struct tpm_chip *chip, u8 mask, u8 value)
static int i2c_nuvoton_wait_for_stat(struct tpm_chip *chip, u8 mask, u8 value,
u32 timeout, wait_queue_head_t *queue)
{
- if (chip->vendor.irq && queue) {
+ if ((chip->flags & TPM_CHIP_FLAG_IRQ) && queue) {
s32 rc;
- struct priv_data *priv = chip->vendor.priv;
+ struct priv_data *priv = dev_get_drvdata(&chip->dev);
unsigned int cur_intrs = priv->intrs;
- enable_irq(chip->vendor.irq);
+ enable_irq(priv->irq);
rc = wait_event_interruptible_timeout(*queue,
cur_intrs != priv->intrs,
timeout);
@@ -212,7 +216,7 @@ static int i2c_nuvoton_wait_for_stat(struct tpm_chip *chip, u8 mask, u8 value,
return 0;
} while (time_before(jiffies, stop));
}
- dev_err(chip->pdev, "%s(%02x, %02x) -> timeout\n", __func__, mask,
+ dev_err(&chip->dev, "%s(%02x, %02x) -> timeout\n", __func__, mask,
value);
return -ETIMEDOUT;
}
@@ -231,16 +235,17 @@ static int i2c_nuvoton_wait_for_data_avail(struct tpm_chip *chip, u32 timeout,
static int i2c_nuvoton_recv_data(struct i2c_client *client,
struct tpm_chip *chip, u8 *buf, size_t count)
{
+ struct priv_data *priv = dev_get_drvdata(&chip->dev);
s32 rc;
int burst_count, bytes2read, size = 0;
while (size < count &&
i2c_nuvoton_wait_for_data_avail(chip,
- chip->vendor.timeout_c,
- &chip->vendor.read_queue) == 0) {
+ chip->timeout_c,
+ &priv->read_queue) == 0) {
burst_count = i2c_nuvoton_get_burstcount(client, chip);
if (burst_count < 0) {
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
"%s() fail to read burstCount=%d\n", __func__,
burst_count);
return -EIO;
@@ -249,12 +254,12 @@ static int i2c_nuvoton_recv_data(struct i2c_client *client,
rc = i2c_nuvoton_read_buf(client, TPM_DATA_FIFO_R,
bytes2read, &buf[size]);
if (rc < 0) {
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
"%s() fail on i2c_nuvoton_read_buf()=%d\n",
__func__, rc);
return -EIO;
}
- dev_dbg(chip->pdev, "%s(%d):", __func__, bytes2read);
+ dev_dbg(&chip->dev, "%s(%d):", __func__, bytes2read);
size += bytes2read;
}
@@ -264,7 +269,8 @@ static int i2c_nuvoton_recv_data(struct i2c_client *client,
/* Read TPM command results */
static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
{
- struct device *dev = chip->pdev;
+ struct priv_data *priv = dev_get_drvdata(&chip->dev);
+ struct device *dev = chip->dev.parent;
struct i2c_client *client = to_i2c_client(dev);
s32 rc;
int expected, status, burst_count, retries, size = 0;
@@ -285,7 +291,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
* tag, paramsize, and result
*/
status = i2c_nuvoton_wait_for_data_avail(
- chip, chip->vendor.timeout_c, &chip->vendor.read_queue);
+ chip, chip->timeout_c, &priv->read_queue);
if (status != 0) {
dev_err(dev, "%s() timeout on dataAvail\n", __func__);
size = -ETIMEDOUT;
@@ -325,7 +331,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
}
if (i2c_nuvoton_wait_for_stat(
chip, TPM_STS_VALID | TPM_STS_DATA_AVAIL,
- TPM_STS_VALID, chip->vendor.timeout_c,
+ TPM_STS_VALID, chip->timeout_c,
NULL)) {
dev_err(dev, "%s() error left over data\n", __func__);
size = -ETIMEDOUT;
@@ -334,7 +340,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
break;
}
i2c_nuvoton_ready(chip);
- dev_dbg(chip->pdev, "%s() -> %d\n", __func__, size);
+ dev_dbg(&chip->dev, "%s() -> %d\n", __func__, size);
return size;
}
@@ -347,7 +353,8 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
*/
static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len)
{
- struct device *dev = chip->pdev;
+ struct priv_data *priv = dev_get_drvdata(&chip->dev);
+ struct device *dev = chip->dev.parent;
struct i2c_client *client = to_i2c_client(dev);
u32 ordinal;
size_t count = 0;
@@ -357,7 +364,7 @@ static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len)
i2c_nuvoton_ready(chip);
if (i2c_nuvoton_wait_for_stat(chip, TPM_STS_COMMAND_READY,
TPM_STS_COMMAND_READY,
- chip->vendor.timeout_b, NULL)) {
+ chip->timeout_b, NULL)) {
dev_err(dev, "%s() timeout on commandReady\n",
__func__);
rc = -EIO;
@@ -389,7 +396,7 @@ static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len)
TPM_STS_EXPECT,
TPM_STS_VALID |
TPM_STS_EXPECT,
- chip->vendor.timeout_c,
+ chip->timeout_c,
NULL);
if (rc < 0) {
dev_err(dev, "%s() timeout on Expect\n",
@@ -414,7 +421,7 @@ static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len)
rc = i2c_nuvoton_wait_for_stat(chip,
TPM_STS_VALID | TPM_STS_EXPECT,
TPM_STS_VALID,
- chip->vendor.timeout_c, NULL);
+ chip->timeout_c, NULL);
if (rc) {
dev_err(dev, "%s() timeout on Expect to clear\n",
__func__);
@@ -439,7 +446,7 @@ static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len)
rc = i2c_nuvoton_wait_for_data_avail(chip,
tpm_calc_ordinal_duration(chip,
ordinal),
- &chip->vendor.read_queue);
+ &priv->read_queue);
if (rc) {
dev_err(dev, "%s() timeout command duration\n", __func__);
i2c_nuvoton_ready(chip);
@@ -456,6 +463,7 @@ static bool i2c_nuvoton_req_canceled(struct tpm_chip *chip, u8 status)
}
static const struct tpm_class_ops tpm_i2c = {
+ .flags = TPM_OPS_AUTO_STARTUP,
.status = i2c_nuvoton_read_status,
.recv = i2c_nuvoton_recv,
.send = i2c_nuvoton_send,
@@ -473,11 +481,11 @@ static const struct tpm_class_ops tpm_i2c = {
static irqreturn_t i2c_nuvoton_int_handler(int dummy, void *dev_id)
{
struct tpm_chip *chip = dev_id;
- struct priv_data *priv = chip->vendor.priv;
+ struct priv_data *priv = dev_get_drvdata(&chip->dev);
priv->intrs++;
- wake_up(&chip->vendor.read_queue);
- disable_irq_nosync(chip->vendor.irq);
+ wake_up(&priv->read_queue);
+ disable_irq_nosync(priv->irq);
return IRQ_HANDLED;
}
@@ -521,6 +529,7 @@ static int i2c_nuvoton_probe(struct i2c_client *client,
int rc;
struct tpm_chip *chip;
struct device *dev = &client->dev;
+ struct priv_data *priv;
u32 vid = 0;
rc = get_vid(client, &vid);
@@ -534,46 +543,56 @@ static int i2c_nuvoton_probe(struct i2c_client *client,
if (IS_ERR(chip))
return PTR_ERR(chip);
- chip->vendor.priv = devm_kzalloc(dev, sizeof(struct priv_data),
- GFP_KERNEL);
- if (!chip->vendor.priv)
+ priv = devm_kzalloc(dev, sizeof(struct priv_data), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
- init_waitqueue_head(&chip->vendor.read_queue);
- init_waitqueue_head(&chip->vendor.int_queue);
+ if (dev->of_node) {
+ const struct of_device_id *of_id;
+
+ of_id = of_match_device(dev->driver->of_match_table, dev);
+ if (of_id && of_id->data == OF_IS_TPM2)
+ chip->flags |= TPM_CHIP_FLAG_TPM2;
+ } else
+ if (id->driver_data == I2C_IS_TPM2)
+ chip->flags |= TPM_CHIP_FLAG_TPM2;
+
+ init_waitqueue_head(&priv->read_queue);
/* Default timeouts */
- chip->vendor.timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
- chip->vendor.timeout_b = msecs_to_jiffies(TPM_I2C_LONG_TIMEOUT);
- chip->vendor.timeout_c = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
- chip->vendor.timeout_d = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+ chip->timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+ chip->timeout_b = msecs_to_jiffies(TPM_I2C_LONG_TIMEOUT);
+ chip->timeout_c = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+ chip->timeout_d = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
+
+ dev_set_drvdata(&chip->dev, priv);
/*
* I2C intfcaps (interrupt capabilitieis) in the chip are hard coded to:
* TPM_INTF_INT_LEVEL_LOW | TPM_INTF_DATA_AVAIL_INT
* The IRQ should be set in the i2c_board_info (which is done
* automatically in of_i2c_register_devices, for device tree users */
- chip->vendor.irq = client->irq;
-
- if (chip->vendor.irq) {
- dev_dbg(dev, "%s() chip-vendor.irq\n", __func__);
- rc = devm_request_irq(dev, chip->vendor.irq,
+ priv->irq = client->irq;
+ if (client->irq) {
+ dev_dbg(dev, "%s() priv->irq\n", __func__);
+ rc = devm_request_irq(dev, client->irq,
i2c_nuvoton_int_handler,
IRQF_TRIGGER_LOW,
- chip->devname,
+ dev_name(&chip->dev),
chip);
if (rc) {
dev_err(dev, "%s() Unable to request irq: %d for use\n",
- __func__, chip->vendor.irq);
- chip->vendor.irq = 0;
+ __func__, priv->irq);
+ priv->irq = 0;
} else {
+ chip->flags |= TPM_CHIP_FLAG_IRQ;
/* Clear any pending interrupt */
i2c_nuvoton_ready(chip);
/* - wait for TPM_STS==0xA0 (stsValid, commandReady) */
rc = i2c_nuvoton_wait_for_stat(chip,
TPM_STS_COMMAND_READY,
TPM_STS_COMMAND_READY,
- chip->vendor.timeout_b,
+ chip->timeout_b,
NULL);
if (rc == 0) {
/*
@@ -601,25 +620,20 @@ static int i2c_nuvoton_probe(struct i2c_client *client,
}
}
- if (tpm_get_timeouts(chip))
- return -ENODEV;
-
- if (tpm_do_selftest(chip))
- return -ENODEV;
-
return tpm_chip_register(chip);
}
static int i2c_nuvoton_remove(struct i2c_client *client)
{
- struct device *dev = &(client->dev);
- struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct tpm_chip *chip = i2c_get_clientdata(client);
+
tpm_chip_unregister(chip);
return 0;
}
static const struct i2c_device_id i2c_nuvoton_id[] = {
- {I2C_DRIVER_NAME, 0},
+ {"tpm_i2c_nuvoton"},
+ {"tpm2_i2c_nuvoton", .driver_data = I2C_IS_TPM2},
{}
};
MODULE_DEVICE_TABLE(i2c, i2c_nuvoton_id);
@@ -628,6 +642,7 @@ MODULE_DEVICE_TABLE(i2c, i2c_nuvoton_id);
static const struct of_device_id i2c_nuvoton_of_match[] = {
{.compatible = "nuvoton,npct501"},
{.compatible = "winbond,wpct301"},
+ {.compatible = "nuvoton,npct601", .data = OF_IS_TPM2},
{},
};
MODULE_DEVICE_TABLE(of, i2c_nuvoton_of_match);
@@ -640,7 +655,7 @@ static struct i2c_driver i2c_nuvoton_driver = {
.probe = i2c_nuvoton_probe,
.remove = i2c_nuvoton_remove,
.driver = {
- .name = I2C_DRIVER_NAME,
+ .name = "tpm_i2c_nuvoton",
.pm = &i2c_nuvoton_pm_ops,
.of_match_table = of_match_ptr(i2c_nuvoton_of_match),
},
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
index b0a9a9e34..946025a74 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.c
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
@@ -54,21 +54,6 @@ static int ibmvtpm_send_crq(struct vio_dev *vdev, u64 w1, u64 w2)
}
/**
- * ibmvtpm_get_data - Retrieve ibm vtpm data
- * @dev: device struct
- *
- * Return value:
- * vtpm device struct
- */
-static struct ibmvtpm_dev *ibmvtpm_get_data(const struct device *dev)
-{
- struct tpm_chip *chip = dev_get_drvdata(dev);
- if (chip)
- return (struct ibmvtpm_dev *)TPM_VPRIV(chip);
- return NULL;
-}
-
-/**
* tpm_ibmvtpm_recv - Receive data after send
* @chip: tpm chip struct
* @buf: buffer to read
@@ -79,12 +64,10 @@ static struct ibmvtpm_dev *ibmvtpm_get_data(const struct device *dev)
*/
static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
{
- struct ibmvtpm_dev *ibmvtpm;
+ struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
u16 len;
int sig;
- ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip);
-
if (!ibmvtpm->rtce_buf) {
dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
return 0;
@@ -122,13 +105,11 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
*/
static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
{
- struct ibmvtpm_dev *ibmvtpm;
+ struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
struct ibmvtpm_crq crq;
__be64 *word = (__be64 *)&crq;
int rc, sig;
- ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip);
-
if (!ibmvtpm->rtce_buf) {
dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
return 0;
@@ -289,8 +270,8 @@ static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
*/
static int tpm_ibmvtpm_remove(struct vio_dev *vdev)
{
- struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(&vdev->dev);
- struct tpm_chip *chip = dev_get_drvdata(ibmvtpm->dev);
+ struct tpm_chip *chip = dev_get_drvdata(&vdev->dev);
+ struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
int rc = 0;
tpm_chip_unregister(chip);
@@ -327,7 +308,8 @@ static int tpm_ibmvtpm_remove(struct vio_dev *vdev)
*/
static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev)
{
- struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(&vdev->dev);
+ struct tpm_chip *chip = dev_get_drvdata(&vdev->dev);
+ struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
/* ibmvtpm initializes at probe time, so the data we are
* asking for may not be set yet. Estimate that 4K required
@@ -348,7 +330,8 @@ static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev)
*/
static int tpm_ibmvtpm_suspend(struct device *dev)
{
- struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(dev);
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
struct ibmvtpm_crq crq;
u64 *buf = (u64 *) &crq;
int rc = 0;
@@ -400,7 +383,8 @@ static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm)
*/
static int tpm_ibmvtpm_resume(struct device *dev)
{
- struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(dev);
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
int rc = 0;
do {
@@ -643,7 +627,7 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
crq_q->index = 0;
- TPM_VPRIV(chip) = (void *)ibmvtpm;
+ dev_set_drvdata(&chip->dev, ibmvtpm);
spin_lock_init(&ibmvtpm->rtce_lock);
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
index 6c488e635..e3cf9f354 100644
--- a/drivers/char/tpm/tpm_infineon.c
+++ b/drivers/char/tpm/tpm_infineon.c
@@ -195,9 +195,9 @@ static int wait(struct tpm_chip *chip, int wait_for_bit)
}
if (i == TPM_MAX_TRIES) { /* timeout occurs */
if (wait_for_bit == STAT_XFE)
- dev_err(chip->pdev, "Timeout in wait(STAT_XFE)\n");
+ dev_err(&chip->dev, "Timeout in wait(STAT_XFE)\n");
if (wait_for_bit == STAT_RDA)
- dev_err(chip->pdev, "Timeout in wait(STAT_RDA)\n");
+ dev_err(&chip->dev, "Timeout in wait(STAT_RDA)\n");
return -EIO;
}
return 0;
@@ -220,7 +220,7 @@ static void wait_and_send(struct tpm_chip *chip, u8 sendbyte)
static void tpm_wtx(struct tpm_chip *chip)
{
number_of_wtx++;
- dev_info(chip->pdev, "Granting WTX (%02d / %02d)\n",
+ dev_info(&chip->dev, "Granting WTX (%02d / %02d)\n",
number_of_wtx, TPM_MAX_WTX_PACKAGES);
wait_and_send(chip, TPM_VL_VER);
wait_and_send(chip, TPM_CTRL_WTX);
@@ -231,7 +231,7 @@ static void tpm_wtx(struct tpm_chip *chip)
static void tpm_wtx_abort(struct tpm_chip *chip)
{
- dev_info(chip->pdev, "Aborting WTX\n");
+ dev_info(&chip->dev, "Aborting WTX\n");
wait_and_send(chip, TPM_VL_VER);
wait_and_send(chip, TPM_CTRL_WTX_ABORT);
wait_and_send(chip, 0x00);
@@ -257,7 +257,7 @@ recv_begin:
}
if (buf[0] != TPM_VL_VER) {
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
"Wrong transport protocol implementation!\n");
return -EIO;
}
@@ -272,7 +272,7 @@ recv_begin:
}
if ((size == 0x6D00) && (buf[1] == 0x80)) {
- dev_err(chip->pdev, "Error handling on vendor layer!\n");
+ dev_err(&chip->dev, "Error handling on vendor layer!\n");
return -EIO;
}
@@ -284,7 +284,7 @@ recv_begin:
}
if (buf[1] == TPM_CTRL_WTX) {
- dev_info(chip->pdev, "WTX-package received\n");
+ dev_info(&chip->dev, "WTX-package received\n");
if (number_of_wtx < TPM_MAX_WTX_PACKAGES) {
tpm_wtx(chip);
goto recv_begin;
@@ -295,14 +295,14 @@ recv_begin:
}
if (buf[1] == TPM_CTRL_WTX_ABORT_ACK) {
- dev_info(chip->pdev, "WTX-abort acknowledged\n");
+ dev_info(&chip->dev, "WTX-abort acknowledged\n");
return size;
}
if (buf[1] == TPM_CTRL_ERROR) {
- dev_err(chip->pdev, "ERROR-package received:\n");
+ dev_err(&chip->dev, "ERROR-package received:\n");
if (buf[4] == TPM_INF_NAK)
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
"-> Negative acknowledgement"
" - retransmit command!\n");
return -EIO;
@@ -321,7 +321,7 @@ static int tpm_inf_send(struct tpm_chip *chip, u8 * buf, size_t count)
ret = empty_fifo(chip, 1);
if (ret) {
- dev_err(chip->pdev, "Timeout while clearing FIFO\n");
+ dev_err(&chip->dev, "Timeout while clearing FIFO\n");
return -EIO;
}
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
index 289389ece..9ff0e072c 100644
--- a/drivers/char/tpm/tpm_nsc.c
+++ b/drivers/char/tpm/tpm_nsc.c
@@ -64,15 +64,21 @@ enum tpm_nsc_cmd_mode {
NSC_COMMAND_EOC = 0x03,
NSC_COMMAND_CANCEL = 0x22
};
+
+struct tpm_nsc_priv {
+ unsigned long base;
+};
+
/*
* Wait for a certain status to appear
*/
static int wait_for_stat(struct tpm_chip *chip, u8 mask, u8 val, u8 * data)
{
+ struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev);
unsigned long stop;
/* status immediately available check */
- *data = inb(chip->vendor.base + NSC_STATUS);
+ *data = inb(priv->base + NSC_STATUS);
if ((*data & mask) == val)
return 0;
@@ -80,7 +86,7 @@ static int wait_for_stat(struct tpm_chip *chip, u8 mask, u8 val, u8 * data)
stop = jiffies + 10 * HZ;
do {
msleep(TPM_TIMEOUT);
- *data = inb(chip->vendor.base + 1);
+ *data = inb(priv->base + 1);
if ((*data & mask) == val)
return 0;
}
@@ -91,13 +97,14 @@ static int wait_for_stat(struct tpm_chip *chip, u8 mask, u8 val, u8 * data)
static int nsc_wait_for_ready(struct tpm_chip *chip)
{
+ struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev);
int status;
unsigned long stop;
/* status immediately available check */
- status = inb(chip->vendor.base + NSC_STATUS);
+ status = inb(priv->base + NSC_STATUS);
if (status & NSC_STATUS_OBF)
- status = inb(chip->vendor.base + NSC_DATA);
+ status = inb(priv->base + NSC_DATA);
if (status & NSC_STATUS_RDY)
return 0;
@@ -105,21 +112,22 @@ static int nsc_wait_for_ready(struct tpm_chip *chip)
stop = jiffies + 100;
do {
msleep(TPM_TIMEOUT);
- status = inb(chip->vendor.base + NSC_STATUS);
+ status = inb(priv->base + NSC_STATUS);
if (status & NSC_STATUS_OBF)
- status = inb(chip->vendor.base + NSC_DATA);
+ status = inb(priv->base + NSC_DATA);
if (status & NSC_STATUS_RDY)
return 0;
}
while (time_before(jiffies, stop));
- dev_info(chip->pdev, "wait for ready failed\n");
+ dev_info(&chip->dev, "wait for ready failed\n");
return -EBUSY;
}
static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count)
{
+ struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev);
u8 *buffer = buf;
u8 data, *p;
u32 size;
@@ -129,12 +137,13 @@ static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count)
return -EIO;
if (wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0) {
- dev_err(chip->pdev, "F0 timeout\n");
+ dev_err(&chip->dev, "F0 timeout\n");
return -EIO;
}
- if ((data =
- inb(chip->vendor.base + NSC_DATA)) != NSC_COMMAND_NORMAL) {
- dev_err(chip->pdev, "not in normal mode (0x%x)\n",
+
+ data = inb(priv->base + NSC_DATA);
+ if (data != NSC_COMMAND_NORMAL) {
+ dev_err(&chip->dev, "not in normal mode (0x%x)\n",
data);
return -EIO;
}
@@ -143,22 +152,24 @@ static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count)
for (p = buffer; p < &buffer[count]; p++) {
if (wait_for_stat
(chip, NSC_STATUS_OBF, NSC_STATUS_OBF, &data) < 0) {
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
"OBF timeout (while reading data)\n");
return -EIO;
}
if (data & NSC_STATUS_F0)
break;
- *p = inb(chip->vendor.base + NSC_DATA);
+ *p = inb(priv->base + NSC_DATA);
}
if ((data & NSC_STATUS_F0) == 0 &&
(wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0)) {
- dev_err(chip->pdev, "F0 not set\n");
+ dev_err(&chip->dev, "F0 not set\n");
return -EIO;
}
- if ((data = inb(chip->vendor.base + NSC_DATA)) != NSC_COMMAND_EOC) {
- dev_err(chip->pdev,
+
+ data = inb(priv->base + NSC_DATA);
+ if (data != NSC_COMMAND_EOC) {
+ dev_err(&chip->dev,
"expected end of command(0x%x)\n", data);
return -EIO;
}
@@ -174,6 +185,7 @@ static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count)
static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count)
{
+ struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev);
u8 data;
int i;
@@ -183,48 +195,52 @@ static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count)
* fix it. Not sure why this is needed, we followed the flow
* chart in the manual to the letter.
*/
- outb(NSC_COMMAND_CANCEL, chip->vendor.base + NSC_COMMAND);
+ outb(NSC_COMMAND_CANCEL, priv->base + NSC_COMMAND);
if (nsc_wait_for_ready(chip) != 0)
return -EIO;
if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
- dev_err(chip->pdev, "IBF timeout\n");
+ dev_err(&chip->dev, "IBF timeout\n");
return -EIO;
}
- outb(NSC_COMMAND_NORMAL, chip->vendor.base + NSC_COMMAND);
+ outb(NSC_COMMAND_NORMAL, priv->base + NSC_COMMAND);
if (wait_for_stat(chip, NSC_STATUS_IBR, NSC_STATUS_IBR, &data) < 0) {
- dev_err(chip->pdev, "IBR timeout\n");
+ dev_err(&chip->dev, "IBR timeout\n");
return -EIO;
}
for (i = 0; i < count; i++) {
if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
"IBF timeout (while writing data)\n");
return -EIO;
}
- outb(buf[i], chip->vendor.base + NSC_DATA);
+ outb(buf[i], priv->base + NSC_DATA);
}
if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
- dev_err(chip->pdev, "IBF timeout\n");
+ dev_err(&chip->dev, "IBF timeout\n");
return -EIO;
}
- outb(NSC_COMMAND_EOC, chip->vendor.base + NSC_COMMAND);
+ outb(NSC_COMMAND_EOC, priv->base + NSC_COMMAND);
return count;
}
static void tpm_nsc_cancel(struct tpm_chip *chip)
{
- outb(NSC_COMMAND_CANCEL, chip->vendor.base + NSC_COMMAND);
+ struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev);
+
+ outb(NSC_COMMAND_CANCEL, priv->base + NSC_COMMAND);
}
static u8 tpm_nsc_status(struct tpm_chip *chip)
{
- return inb(chip->vendor.base + NSC_STATUS);
+ struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev);
+
+ return inb(priv->base + NSC_STATUS);
}
static bool tpm_nsc_req_canceled(struct tpm_chip *chip, u8 status)
@@ -247,9 +263,10 @@ static struct platform_device *pdev = NULL;
static void tpm_nsc_remove(struct device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev);
tpm_chip_unregister(chip);
- release_region(chip->vendor.base, 2);
+ release_region(priv->base, 2);
}
static SIMPLE_DEV_PM_OPS(tpm_nsc_pm, tpm_pm_suspend, tpm_pm_resume);
@@ -268,6 +285,7 @@ static int __init init_nsc(void)
int nscAddrBase = TPM_ADDR;
struct tpm_chip *chip;
unsigned long base;
+ struct tpm_nsc_priv *priv;
/* verify that it is a National part (SID) */
if (tpm_read_index(TPM_ADDR, NSC_SID_INDEX) != 0xEF) {
@@ -301,6 +319,14 @@ static int __init init_nsc(void)
if ((rc = platform_device_add(pdev)) < 0)
goto err_put_dev;
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ rc = -ENOMEM;
+ goto err_del_dev;
+ }
+
+ priv->base = base;
+
if (request_region(base, 2, "tpm_nsc0") == NULL ) {
rc = -EBUSY;
goto err_del_dev;
@@ -312,6 +338,8 @@ static int __init init_nsc(void)
goto err_rel_reg;
}
+ dev_set_drvdata(&chip->dev, priv);
+
rc = tpm_chip_register(chip);
if (rc)
goto err_rel_reg;
@@ -349,8 +377,6 @@ static int __init init_nsc(void)
"NSC TPM revision %d\n",
tpm_read_index(nscAddrBase, 0x27) & 0x1F);
- chip->vendor.base = base;
-
return 0;
err_rel_reg:
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index a50700672..eaf5730d7 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -29,40 +29,7 @@
#include <linux/acpi.h>
#include <linux/freezer.h>
#include "tpm.h"
-
-enum tis_access {
- TPM_ACCESS_VALID = 0x80,
- TPM_ACCESS_ACTIVE_LOCALITY = 0x20,
- TPM_ACCESS_REQUEST_PENDING = 0x04,
- TPM_ACCESS_REQUEST_USE = 0x02,
-};
-
-enum tis_status {
- TPM_STS_VALID = 0x80,
- TPM_STS_COMMAND_READY = 0x40,
- TPM_STS_GO = 0x20,
- TPM_STS_DATA_AVAIL = 0x10,
- TPM_STS_DATA_EXPECT = 0x08,
-};
-
-enum tis_int_flags {
- TPM_GLOBAL_INT_ENABLE = 0x80000000,
- TPM_INTF_BURST_COUNT_STATIC = 0x100,
- TPM_INTF_CMD_READY_INT = 0x080,
- TPM_INTF_INT_EDGE_FALLING = 0x040,
- TPM_INTF_INT_EDGE_RISING = 0x020,
- TPM_INTF_INT_LEVEL_LOW = 0x010,
- TPM_INTF_INT_LEVEL_HIGH = 0x008,
- TPM_INTF_LOCALITY_CHANGE_INT = 0x004,
- TPM_INTF_STS_VALID_INT = 0x002,
- TPM_INTF_DATA_AVAIL_INT = 0x001,
-};
-
-enum tis_defaults {
- TIS_MEM_LEN = 0x5000,
- TIS_SHORT_TIMEOUT = 750, /* ms */
- TIS_LONG_TIMEOUT = 2000, /* 2 sec */
-};
+#include "tpm_tis_core.h"
struct tpm_info {
struct resource res;
@@ -73,30 +40,30 @@ struct tpm_info {
int irq;
};
-/* Some timeout values are needed before it is known whether the chip is
- * TPM 1.0 or TPM 2.0.
- */
-#define TIS_TIMEOUT_A_MAX max(TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_A)
-#define TIS_TIMEOUT_B_MAX max(TIS_LONG_TIMEOUT, TPM2_TIMEOUT_B)
-#define TIS_TIMEOUT_C_MAX max(TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_C)
-#define TIS_TIMEOUT_D_MAX max(TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_D)
-
-#define TPM_ACCESS(l) (0x0000 | ((l) << 12))
-#define TPM_INT_ENABLE(l) (0x0008 | ((l) << 12))
-#define TPM_INT_VECTOR(l) (0x000C | ((l) << 12))
-#define TPM_INT_STATUS(l) (0x0010 | ((l) << 12))
-#define TPM_INTF_CAPS(l) (0x0014 | ((l) << 12))
-#define TPM_STS(l) (0x0018 | ((l) << 12))
-#define TPM_STS3(l) (0x001b | ((l) << 12))
-#define TPM_DATA_FIFO(l) (0x0024 | ((l) << 12))
-
-#define TPM_DID_VID(l) (0x0F00 | ((l) << 12))
-#define TPM_RID(l) (0x0F04 | ((l) << 12))
-
-struct priv_data {
- bool irq_tested;
+struct tpm_tis_tcg_phy {
+ struct tpm_tis_data priv;
+ void __iomem *iobase;
};
+static inline struct tpm_tis_tcg_phy *to_tpm_tis_tcg_phy(struct tpm_tis_data *data)
+{
+ return container_of(data, struct tpm_tis_tcg_phy, priv);
+}
+
+static bool interrupts = true;
+module_param(interrupts, bool, 0444);
+MODULE_PARM_DESC(interrupts, "Enable interrupts");
+
+static bool itpm;
+module_param(itpm, bool, 0444);
+MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)");
+
+static bool force;
+#ifdef CONFIG_X86
+module_param(force, bool, 0444);
+MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry");
+#endif
+
#if defined(CONFIG_PNP) && defined(CONFIG_ACPI)
static int has_hid(struct acpi_device *dev, const char *hid)
{
@@ -120,744 +87,82 @@ static inline int is_itpm(struct acpi_device *dev)
}
#endif
-/* Before we attempt to access the TPM we must see that the valid bit is set.
- * The specification says that this bit is 0 at reset and remains 0 until the
- * 'TPM has gone through its self test and initialization and has established
- * correct values in the other bits.' */
-static int wait_startup(struct tpm_chip *chip, int l)
-{
- unsigned long stop = jiffies + chip->vendor.timeout_a;
- do {
- if (ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
- TPM_ACCESS_VALID)
- return 0;
- msleep(TPM_TIMEOUT);
- } while (time_before(jiffies, stop));
- return -1;
-}
-
-static int check_locality(struct tpm_chip *chip, int l)
-{
- if ((ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
- (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
- (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID))
- return chip->vendor.locality = l;
-
- return -1;
-}
-
-static void release_locality(struct tpm_chip *chip, int l, int force)
-{
- if (force || (ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
- (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) ==
- (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID))
- iowrite8(TPM_ACCESS_ACTIVE_LOCALITY,
- chip->vendor.iobase + TPM_ACCESS(l));
-}
-
-static int request_locality(struct tpm_chip *chip, int l)
-{
- unsigned long stop, timeout;
- long rc;
-
- if (check_locality(chip, l) >= 0)
- return l;
-
- iowrite8(TPM_ACCESS_REQUEST_USE,
- chip->vendor.iobase + TPM_ACCESS(l));
-
- stop = jiffies + chip->vendor.timeout_a;
-
- if (chip->vendor.irq) {
-again:
- timeout = stop - jiffies;
- if ((long)timeout <= 0)
- return -1;
- rc = wait_event_interruptible_timeout(chip->vendor.int_queue,
- (check_locality
- (chip, l) >= 0),
- timeout);
- if (rc > 0)
- return l;
- if (rc == -ERESTARTSYS && freezing(current)) {
- clear_thread_flag(TIF_SIGPENDING);
- goto again;
- }
- } else {
- /* wait for burstcount */
- do {
- if (check_locality(chip, l) >= 0)
- return l;
- msleep(TPM_TIMEOUT);
- }
- while (time_before(jiffies, stop));
- }
- return -1;
-}
-
-static u8 tpm_tis_status(struct tpm_chip *chip)
-{
- return ioread8(chip->vendor.iobase +
- TPM_STS(chip->vendor.locality));
-}
-
-static void tpm_tis_ready(struct tpm_chip *chip)
-{
- /* this causes the current command to be aborted */
- iowrite8(TPM_STS_COMMAND_READY,
- chip->vendor.iobase + TPM_STS(chip->vendor.locality));
-}
-
-static int get_burstcount(struct tpm_chip *chip)
-{
- unsigned long stop;
- int burstcnt;
-
- /* wait for burstcount */
- /* which timeout value, spec has 2 answers (c & d) */
- stop = jiffies + chip->vendor.timeout_d;
- do {
- burstcnt = ioread8(chip->vendor.iobase +
- TPM_STS(chip->vendor.locality) + 1);
- burstcnt += ioread8(chip->vendor.iobase +
- TPM_STS(chip->vendor.locality) +
- 2) << 8;
- if (burstcnt)
- return burstcnt;
- msleep(TPM_TIMEOUT);
- } while (time_before(jiffies, stop));
- return -EBUSY;
-}
-
-static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
+static int tpm_tcg_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
+ u8 *result)
{
- int size = 0, burstcnt;
- while (size < count &&
- wait_for_tpm_stat(chip,
- TPM_STS_DATA_AVAIL | TPM_STS_VALID,
- chip->vendor.timeout_c,
- &chip->vendor.read_queue, true)
- == 0) {
- burstcnt = get_burstcount(chip);
- for (; burstcnt > 0 && size < count; burstcnt--)
- buf[size++] = ioread8(chip->vendor.iobase +
- TPM_DATA_FIFO(chip->vendor.
- locality));
- }
- return size;
-}
-
-static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
-{
- int size = 0;
- int expected, status;
-
- if (count < TPM_HEADER_SIZE) {
- size = -EIO;
- goto out;
- }
-
- /* read first 10 bytes, including tag, paramsize, and result */
- if ((size =
- recv_data(chip, buf, TPM_HEADER_SIZE)) < TPM_HEADER_SIZE) {
- dev_err(chip->pdev, "Unable to read header\n");
- goto out;
- }
-
- expected = be32_to_cpu(*(__be32 *) (buf + 2));
- if (expected > count) {
- size = -EIO;
- goto out;
- }
-
- if ((size +=
- recv_data(chip, &buf[TPM_HEADER_SIZE],
- expected - TPM_HEADER_SIZE)) < expected) {
- dev_err(chip->pdev, "Unable to read remainder of result\n");
- size = -ETIME;
- goto out;
- }
-
- wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
- &chip->vendor.int_queue, false);
- status = tpm_tis_status(chip);
- if (status & TPM_STS_DATA_AVAIL) { /* retry? */
- dev_err(chip->pdev, "Error left over data\n");
- size = -EIO;
- goto out;
- }
-
-out:
- tpm_tis_ready(chip);
- release_locality(chip, chip->vendor.locality, 0);
- return size;
-}
-
-static bool itpm;
-module_param(itpm, bool, 0444);
-MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)");
-
-/*
- * If interrupts are used (signaled by an irq set in the vendor structure)
- * tpm.c can skip polling for the data to be available as the interrupt is
- * waited for here
- */
-static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
-{
- int rc, status, burstcnt;
- size_t count = 0;
-
- if (request_locality(chip, 0) < 0)
- return -EBUSY;
-
- status = tpm_tis_status(chip);
- if ((status & TPM_STS_COMMAND_READY) == 0) {
- tpm_tis_ready(chip);
- if (wait_for_tpm_stat
- (chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b,
- &chip->vendor.int_queue, false) < 0) {
- rc = -ETIME;
- goto out_err;
- }
- }
-
- while (count < len - 1) {
- burstcnt = get_burstcount(chip);
- for (; burstcnt > 0 && count < len - 1; burstcnt--) {
- iowrite8(buf[count], chip->vendor.iobase +
- TPM_DATA_FIFO(chip->vendor.locality));
- count++;
- }
-
- wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
- &chip->vendor.int_queue, false);
- status = tpm_tis_status(chip);
- if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) {
- rc = -EIO;
- goto out_err;
- }
- }
-
- /* write last byte */
- iowrite8(buf[count],
- chip->vendor.iobase + TPM_DATA_FIFO(chip->vendor.locality));
- wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
- &chip->vendor.int_queue, false);
- status = tpm_tis_status(chip);
- if ((status & TPM_STS_DATA_EXPECT) != 0) {
- rc = -EIO;
- goto out_err;
- }
+ struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
+ while (len--)
+ *result++ = ioread8(phy->iobase + addr);
return 0;
-
-out_err:
- tpm_tis_ready(chip);
- release_locality(chip, chip->vendor.locality, 0);
- return rc;
-}
-
-static void disable_interrupts(struct tpm_chip *chip)
-{
- u32 intmask;
-
- intmask =
- ioread32(chip->vendor.iobase +
- TPM_INT_ENABLE(chip->vendor.locality));
- intmask &= ~TPM_GLOBAL_INT_ENABLE;
- iowrite32(intmask,
- chip->vendor.iobase +
- TPM_INT_ENABLE(chip->vendor.locality));
- devm_free_irq(chip->pdev, chip->vendor.irq, chip);
- chip->vendor.irq = 0;
-}
-
-/*
- * If interrupts are used (signaled by an irq set in the vendor structure)
- * tpm.c can skip polling for the data to be available as the interrupt is
- * waited for here
- */
-static int tpm_tis_send_main(struct tpm_chip *chip, u8 *buf, size_t len)
-{
- int rc;
- u32 ordinal;
- unsigned long dur;
-
- rc = tpm_tis_send_data(chip, buf, len);
- if (rc < 0)
- return rc;
-
- /* go and do it */
- iowrite8(TPM_STS_GO,
- chip->vendor.iobase + TPM_STS(chip->vendor.locality));
-
- if (chip->vendor.irq) {
- ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
-
- if (chip->flags & TPM_CHIP_FLAG_TPM2)
- dur = tpm2_calc_ordinal_duration(chip, ordinal);
- else
- dur = tpm_calc_ordinal_duration(chip, ordinal);
-
- if (wait_for_tpm_stat
- (chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID, dur,
- &chip->vendor.read_queue, false) < 0) {
- rc = -ETIME;
- goto out_err;
- }
- }
- return len;
-out_err:
- tpm_tis_ready(chip);
- release_locality(chip, chip->vendor.locality, 0);
- return rc;
-}
-
-static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
-{
- int rc, irq;
- struct priv_data *priv = chip->vendor.priv;
-
- if (!chip->vendor.irq || priv->irq_tested)
- return tpm_tis_send_main(chip, buf, len);
-
- /* Verify receipt of the expected IRQ */
- irq = chip->vendor.irq;
- chip->vendor.irq = 0;
- rc = tpm_tis_send_main(chip, buf, len);
- chip->vendor.irq = irq;
- if (!priv->irq_tested)
- msleep(1);
- if (!priv->irq_tested)
- disable_interrupts(chip);
- priv->irq_tested = true;
- return rc;
}
-struct tis_vendor_timeout_override {
- u32 did_vid;
- unsigned long timeout_us[4];
-};
-
-static const struct tis_vendor_timeout_override vendor_timeout_overrides[] = {
- /* Atmel 3204 */
- { 0x32041114, { (TIS_SHORT_TIMEOUT*1000), (TIS_LONG_TIMEOUT*1000),
- (TIS_SHORT_TIMEOUT*1000), (TIS_SHORT_TIMEOUT*1000) } },
-};
-
-static bool tpm_tis_update_timeouts(struct tpm_chip *chip,
- unsigned long *timeout_cap)
+static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
+ u8 *value)
{
- int i;
- u32 did_vid;
+ struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
- did_vid = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
-
- for (i = 0; i != ARRAY_SIZE(vendor_timeout_overrides); i++) {
- if (vendor_timeout_overrides[i].did_vid != did_vid)
- continue;
- memcpy(timeout_cap, vendor_timeout_overrides[i].timeout_us,
- sizeof(vendor_timeout_overrides[i].timeout_us));
- return true;
- }
-
- return false;
+ while (len--)
+ iowrite8(*value++, phy->iobase + addr);
+ return 0;
}
-/*
- * Early probing for iTPM with STS_DATA_EXPECT flaw.
- * Try sending command without itpm flag set and if that
- * fails, repeat with itpm flag set.
- */
-static int probe_itpm(struct tpm_chip *chip)
+static int tpm_tcg_read16(struct tpm_tis_data *data, u32 addr, u16 *result)
{
- int rc = 0;
- u8 cmd_getticks[] = {
- 0x00, 0xc1, 0x00, 0x00, 0x00, 0x0a,
- 0x00, 0x00, 0x00, 0xf1
- };
- size_t len = sizeof(cmd_getticks);
- bool rem_itpm = itpm;
- u16 vendor = ioread16(chip->vendor.iobase + TPM_DID_VID(0));
-
- /* probe only iTPMS */
- if (vendor != TPM_VID_INTEL)
- return 0;
-
- itpm = false;
-
- rc = tpm_tis_send_data(chip, cmd_getticks, len);
- if (rc == 0)
- goto out;
-
- tpm_tis_ready(chip);
- release_locality(chip, chip->vendor.locality, 0);
+ struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
- itpm = true;
-
- rc = tpm_tis_send_data(chip, cmd_getticks, len);
- if (rc == 0) {
- dev_info(chip->pdev, "Detected an iTPM.\n");
- rc = 1;
- } else
- rc = -EFAULT;
-
-out:
- itpm = rem_itpm;
- tpm_tis_ready(chip);
- release_locality(chip, chip->vendor.locality, 0);
-
- return rc;
+ *result = ioread16(phy->iobase + addr);
+ return 0;
}
-static bool tpm_tis_req_canceled(struct tpm_chip *chip, u8 status)
+static int tpm_tcg_read32(struct tpm_tis_data *data, u32 addr, u32 *result)
{
- switch (chip->vendor.manufacturer_id) {
- case TPM_VID_WINBOND:
- return ((status == TPM_STS_VALID) ||
- (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY)));
- case TPM_VID_STM:
- return (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY));
- default:
- return (status == TPM_STS_COMMAND_READY);
- }
-}
-
-static const struct tpm_class_ops tpm_tis = {
- .status = tpm_tis_status,
- .recv = tpm_tis_recv,
- .send = tpm_tis_send,
- .cancel = tpm_tis_ready,
- .update_timeouts = tpm_tis_update_timeouts,
- .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
- .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
- .req_canceled = tpm_tis_req_canceled,
-};
+ struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
-static irqreturn_t tis_int_handler(int dummy, void *dev_id)
-{
- struct tpm_chip *chip = dev_id;
- u32 interrupt;
- int i;
-
- interrupt = ioread32(chip->vendor.iobase +
- TPM_INT_STATUS(chip->vendor.locality));
-
- if (interrupt == 0)
- return IRQ_NONE;
-
- ((struct priv_data *)chip->vendor.priv)->irq_tested = true;
- if (interrupt & TPM_INTF_DATA_AVAIL_INT)
- wake_up_interruptible(&chip->vendor.read_queue);
- if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT)
- for (i = 0; i < 5; i++)
- if (check_locality(chip, i) >= 0)
- break;
- if (interrupt &
- (TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_STS_VALID_INT |
- TPM_INTF_CMD_READY_INT))
- wake_up_interruptible(&chip->vendor.int_queue);
-
- /* Clear interrupts handled with TPM_EOI */
- iowrite32(interrupt,
- chip->vendor.iobase +
- TPM_INT_STATUS(chip->vendor.locality));
- ioread32(chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality));
- return IRQ_HANDLED;
+ *result = ioread32(phy->iobase + addr);
+ return 0;
}
-/* Register the IRQ and issue a command that will cause an interrupt. If an
- * irq is seen then leave the chip setup for IRQ operation, otherwise reverse
- * everything and leave in polling mode. Returns 0 on success.
- */
-static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask,
- int flags, int irq)
+static int tpm_tcg_write32(struct tpm_tis_data *data, u32 addr, u32 value)
{
- struct priv_data *priv = chip->vendor.priv;
- u8 original_int_vec;
-
- if (devm_request_irq(chip->pdev, irq, tis_int_handler, flags,
- chip->devname, chip) != 0) {
- dev_info(chip->pdev, "Unable to request irq: %d for probe\n",
- irq);
- return -1;
- }
- chip->vendor.irq = irq;
-
- original_int_vec = ioread8(chip->vendor.iobase +
- TPM_INT_VECTOR(chip->vendor.locality));
- iowrite8(irq,
- chip->vendor.iobase + TPM_INT_VECTOR(chip->vendor.locality));
-
- /* Clear all existing */
- iowrite32(ioread32(chip->vendor.iobase +
- TPM_INT_STATUS(chip->vendor.locality)),
- chip->vendor.iobase + TPM_INT_STATUS(chip->vendor.locality));
-
- /* Turn on */
- iowrite32(intmask | TPM_GLOBAL_INT_ENABLE,
- chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality));
-
- priv->irq_tested = false;
-
- /* Generate an interrupt by having the core call through to
- * tpm_tis_send
- */
- if (chip->flags & TPM_CHIP_FLAG_TPM2)
- tpm2_gen_interrupt(chip);
- else
- tpm_gen_interrupt(chip);
-
- /* tpm_tis_send will either confirm the interrupt is working or it
- * will call disable_irq which undoes all of the above.
- */
- if (!chip->vendor.irq) {
- iowrite8(original_int_vec,
- chip->vendor.iobase +
- TPM_INT_VECTOR(chip->vendor.locality));
- return 1;
- }
+ struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
+ iowrite32(value, phy->iobase + addr);
return 0;
}
-/* Try to find the IRQ the TPM is using. This is for legacy x86 systems that
- * do not have ACPI/etc. We typically expect the interrupt to be declared if
- * present.
- */
-static void tpm_tis_probe_irq(struct tpm_chip *chip, u32 intmask)
-{
- u8 original_int_vec;
- int i;
-
- original_int_vec = ioread8(chip->vendor.iobase +
- TPM_INT_VECTOR(chip->vendor.locality));
-
- if (!original_int_vec) {
- if (IS_ENABLED(CONFIG_X86))
- for (i = 3; i <= 15; i++)
- if (!tpm_tis_probe_irq_single(chip, intmask, 0,
- i))
- return;
- } else if (!tpm_tis_probe_irq_single(chip, intmask, 0,
- original_int_vec))
- return;
-}
-
-static bool interrupts = true;
-module_param(interrupts, bool, 0444);
-MODULE_PARM_DESC(interrupts, "Enable interrupts");
-
-static void tpm_tis_remove(struct tpm_chip *chip)
-{
- if (chip->flags & TPM_CHIP_FLAG_TPM2)
- tpm2_shutdown(chip, TPM2_SU_CLEAR);
-
- iowrite32(~TPM_GLOBAL_INT_ENABLE &
- ioread32(chip->vendor.iobase +
- TPM_INT_ENABLE(chip->vendor.
- locality)),
- chip->vendor.iobase +
- TPM_INT_ENABLE(chip->vendor.locality));
- release_locality(chip, chip->vendor.locality, 1);
-}
+static const struct tpm_tis_phy_ops tpm_tcg = {
+ .read_bytes = tpm_tcg_read_bytes,
+ .write_bytes = tpm_tcg_write_bytes,
+ .read16 = tpm_tcg_read16,
+ .read32 = tpm_tcg_read32,
+ .write32 = tpm_tcg_write32,
+};
static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info,
acpi_handle acpi_dev_handle)
{
- u32 vendor, intfcaps, intmask;
- int rc, probe;
- struct tpm_chip *chip;
- struct priv_data *priv;
+ struct tpm_tis_tcg_phy *phy;
+ int irq = -1;
- priv = devm_kzalloc(dev, sizeof(struct priv_data), GFP_KERNEL);
- if (priv == NULL)
+ phy = devm_kzalloc(dev, sizeof(struct tpm_tis_tcg_phy), GFP_KERNEL);
+ if (phy == NULL)
return -ENOMEM;
- chip = tpmm_chip_alloc(dev, &tpm_tis);
- if (IS_ERR(chip))
- return PTR_ERR(chip);
-
- chip->vendor.priv = priv;
-#ifdef CONFIG_ACPI
- chip->acpi_dev_handle = acpi_dev_handle;
-#endif
+ phy->iobase = devm_ioremap_resource(dev, &tpm_info->res);
+ if (IS_ERR(phy->iobase))
+ return PTR_ERR(phy->iobase);
- chip->vendor.iobase = devm_ioremap_resource(dev, &tpm_info->res);
- if (IS_ERR(chip->vendor.iobase))
- return PTR_ERR(chip->vendor.iobase);
-
- /* Maximum timeouts */
- chip->vendor.timeout_a = TIS_TIMEOUT_A_MAX;
- chip->vendor.timeout_b = TIS_TIMEOUT_B_MAX;
- chip->vendor.timeout_c = TIS_TIMEOUT_C_MAX;
- chip->vendor.timeout_d = TIS_TIMEOUT_D_MAX;
-
- if (wait_startup(chip, 0) != 0) {
- rc = -ENODEV;
- goto out_err;
- }
-
- /* Take control of the TPM's interrupt hardware and shut it off */
- intmask = ioread32(chip->vendor.iobase +
- TPM_INT_ENABLE(chip->vendor.locality));
- intmask |= TPM_INTF_CMD_READY_INT | TPM_INTF_LOCALITY_CHANGE_INT |
- TPM_INTF_DATA_AVAIL_INT | TPM_INTF_STS_VALID_INT;
- intmask &= ~TPM_GLOBAL_INT_ENABLE;
- iowrite32(intmask,
- chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality));
-
- if (request_locality(chip, 0) != 0) {
- rc = -ENODEV;
- goto out_err;
- }
-
- rc = tpm2_probe(chip);
- if (rc)
- goto out_err;
-
- vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
- chip->vendor.manufacturer_id = vendor;
-
- dev_info(dev, "%s TPM (device-id 0x%X, rev-id %d)\n",
- (chip->flags & TPM_CHIP_FLAG_TPM2) ? "2.0" : "1.2",
- vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
-
- if (!itpm) {
- probe = probe_itpm(chip);
- if (probe < 0) {
- rc = -ENODEV;
- goto out_err;
- }
- itpm = !!probe;
- }
+ if (interrupts)
+ irq = tpm_info->irq;
if (itpm)
- dev_info(dev, "Intel iTPM workaround enabled\n");
-
-
- /* Figure out the capabilities */
- intfcaps =
- ioread32(chip->vendor.iobase +
- TPM_INTF_CAPS(chip->vendor.locality));
- dev_dbg(dev, "TPM interface capabilities (0x%x):\n",
- intfcaps);
- if (intfcaps & TPM_INTF_BURST_COUNT_STATIC)
- dev_dbg(dev, "\tBurst Count Static\n");
- if (intfcaps & TPM_INTF_CMD_READY_INT)
- dev_dbg(dev, "\tCommand Ready Int Support\n");
- if (intfcaps & TPM_INTF_INT_EDGE_FALLING)
- dev_dbg(dev, "\tInterrupt Edge Falling\n");
- if (intfcaps & TPM_INTF_INT_EDGE_RISING)
- dev_dbg(dev, "\tInterrupt Edge Rising\n");
- if (intfcaps & TPM_INTF_INT_LEVEL_LOW)
- dev_dbg(dev, "\tInterrupt Level Low\n");
- if (intfcaps & TPM_INTF_INT_LEVEL_HIGH)
- dev_dbg(dev, "\tInterrupt Level High\n");
- if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT)
- dev_dbg(dev, "\tLocality Change Int Support\n");
- if (intfcaps & TPM_INTF_STS_VALID_INT)
- dev_dbg(dev, "\tSts Valid Int Support\n");
- if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
- dev_dbg(dev, "\tData Avail Int Support\n");
-
- /* Very early on issue a command to the TPM in polling mode to make
- * sure it works. May as well use that command to set the proper
- * timeouts for the driver.
- */
- if (tpm_get_timeouts(chip)) {
- dev_err(dev, "Could not get TPM timeouts and durations\n");
- rc = -ENODEV;
- goto out_err;
- }
-
- /* INTERRUPT Setup */
- init_waitqueue_head(&chip->vendor.read_queue);
- init_waitqueue_head(&chip->vendor.int_queue);
- if (interrupts && tpm_info->irq != -1) {
- if (tpm_info->irq) {
- tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED,
- tpm_info->irq);
- if (!chip->vendor.irq)
- dev_err(chip->pdev, FW_BUG
- "TPM interrupt not working, polling instead\n");
- } else
- tpm_tis_probe_irq(chip, intmask);
- }
+ phy->priv.flags |= TPM_TIS_ITPM_POSSIBLE;
- if (chip->flags & TPM_CHIP_FLAG_TPM2) {
- rc = tpm2_do_selftest(chip);
- if (rc == TPM2_RC_INITIALIZE) {
- dev_warn(dev, "Firmware has not started TPM\n");
- rc = tpm2_startup(chip, TPM2_SU_CLEAR);
- if (!rc)
- rc = tpm2_do_selftest(chip);
- }
-
- if (rc) {
- dev_err(dev, "TPM self test failed\n");
- if (rc > 0)
- rc = -ENODEV;
- goto out_err;
- }
- } else {
- if (tpm_do_selftest(chip)) {
- dev_err(dev, "TPM self test failed\n");
- rc = -ENODEV;
- goto out_err;
- }
- }
-
- return tpm_chip_register(chip);
-out_err:
- tpm_tis_remove(chip);
- return rc;
+ return tpm_tis_core_init(dev, &phy->priv, irq, &tpm_tcg,
+ acpi_dev_handle);
}
-#ifdef CONFIG_PM_SLEEP
-static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
-{
- u32 intmask;
-
- /* reenable interrupts that device may have lost or
- BIOS/firmware may have disabled */
- iowrite8(chip->vendor.irq, chip->vendor.iobase +
- TPM_INT_VECTOR(chip->vendor.locality));
-
- intmask =
- ioread32(chip->vendor.iobase +
- TPM_INT_ENABLE(chip->vendor.locality));
-
- intmask |= TPM_INTF_CMD_READY_INT
- | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
- | TPM_INTF_STS_VALID_INT | TPM_GLOBAL_INT_ENABLE;
-
- iowrite32(intmask,
- chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality));
-}
-
-static int tpm_tis_resume(struct device *dev)
-{
- struct tpm_chip *chip = dev_get_drvdata(dev);
- int ret;
-
- if (chip->vendor.irq)
- tpm_tis_reenable_interrupts(chip);
-
- ret = tpm_pm_resume(dev);
- if (ret)
- return ret;
-
- /* TPM 1.2 requires self-test on resume. This function actually returns
- * an error code but for unknown reason it isn't handled.
- */
- if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
- tpm_do_selftest(chip);
-
- return 0;
-}
-#endif
-
static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume);
static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
@@ -1058,12 +363,6 @@ static struct platform_driver tis_drv = {
},
};
-static bool force;
-#ifdef CONFIG_X86
-module_param(force, bool, 0444);
-MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry");
-#endif
-
static int tpm_tis_force_device(void)
{
struct platform_device *pdev;
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
new file mode 100644
index 000000000..d66f51b36
--- /dev/null
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -0,0 +1,835 @@
+/*
+ * Copyright (C) 2005, 2006 IBM Corporation
+ * Copyright (C) 2014, 2015 Intel Corporation
+ *
+ * Authors:
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * This device driver implements the TPM interface as defined in
+ * the TCG TPM Interface Spec version 1.2, revision 1.0.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pnp.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/acpi.h>
+#include <linux/freezer.h>
+#include "tpm.h"
+#include "tpm_tis_core.h"
+
+/* Before we attempt to access the TPM we must see that the valid bit is set.
+ * The specification says that this bit is 0 at reset and remains 0 until the
+ * 'TPM has gone through its self test and initialization and has established
+ * correct values in the other bits.'
+ */
+static int wait_startup(struct tpm_chip *chip, int l)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ unsigned long stop = jiffies + chip->timeout_a;
+
+ do {
+ int rc;
+ u8 access;
+
+ rc = tpm_tis_read8(priv, TPM_ACCESS(l), &access);
+ if (rc < 0)
+ return rc;
+
+ if (access & TPM_ACCESS_VALID)
+ return 0;
+ msleep(TPM_TIMEOUT);
+ } while (time_before(jiffies, stop));
+ return -1;
+}
+
+static int check_locality(struct tpm_chip *chip, int l)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ int rc;
+ u8 access;
+
+ rc = tpm_tis_read8(priv, TPM_ACCESS(l), &access);
+ if (rc < 0)
+ return rc;
+
+ if ((access & (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
+ (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID))
+ return priv->locality = l;
+
+ return -1;
+}
+
+static void release_locality(struct tpm_chip *chip, int l, int force)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ int rc;
+ u8 access;
+
+ rc = tpm_tis_read8(priv, TPM_ACCESS(l), &access);
+ if (rc < 0)
+ return;
+
+ if (force || (access &
+ (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) ==
+ (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID))
+ tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_ACTIVE_LOCALITY);
+
+}
+
+static int request_locality(struct tpm_chip *chip, int l)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ unsigned long stop, timeout;
+ long rc;
+
+ if (check_locality(chip, l) >= 0)
+ return l;
+
+ rc = tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_REQUEST_USE);
+ if (rc < 0)
+ return rc;
+
+ stop = jiffies + chip->timeout_a;
+
+ if (chip->flags & TPM_CHIP_FLAG_IRQ) {
+again:
+ timeout = stop - jiffies;
+ if ((long)timeout <= 0)
+ return -1;
+ rc = wait_event_interruptible_timeout(priv->int_queue,
+ (check_locality
+ (chip, l) >= 0),
+ timeout);
+ if (rc > 0)
+ return l;
+ if (rc == -ERESTARTSYS && freezing(current)) {
+ clear_thread_flag(TIF_SIGPENDING);
+ goto again;
+ }
+ } else {
+ /* wait for burstcount */
+ do {
+ if (check_locality(chip, l) >= 0)
+ return l;
+ msleep(TPM_TIMEOUT);
+ } while (time_before(jiffies, stop));
+ }
+ return -1;
+}
+
+static u8 tpm_tis_status(struct tpm_chip *chip)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ int rc;
+ u8 status;
+
+ rc = tpm_tis_read8(priv, TPM_STS(priv->locality), &status);
+ if (rc < 0)
+ return 0;
+
+ return status;
+}
+
+static void tpm_tis_ready(struct tpm_chip *chip)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+
+ /* this causes the current command to be aborted */
+ tpm_tis_write8(priv, TPM_STS(priv->locality), TPM_STS_COMMAND_READY);
+}
+
+static int get_burstcount(struct tpm_chip *chip)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ unsigned long stop;
+ int burstcnt, rc;
+ u32 value;
+
+ /* wait for burstcount */
+ /* which timeout value, spec has 2 answers (c & d) */
+ stop = jiffies + chip->timeout_d;
+ do {
+ rc = tpm_tis_read32(priv, TPM_STS(priv->locality), &value);
+ if (rc < 0)
+ return rc;
+
+ burstcnt = (value >> 8) & 0xFFFF;
+ if (burstcnt)
+ return burstcnt;
+ msleep(TPM_TIMEOUT);
+ } while (time_before(jiffies, stop));
+ return -EBUSY;
+}
+
+static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ int size = 0, burstcnt, rc;
+
+ while (size < count &&
+ wait_for_tpm_stat(chip,
+ TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ chip->timeout_c,
+ &priv->read_queue, true) == 0) {
+ burstcnt = min_t(int, get_burstcount(chip), count - size);
+
+ rc = tpm_tis_read_bytes(priv, TPM_DATA_FIFO(priv->locality),
+ burstcnt, buf + size);
+ if (rc < 0)
+ return rc;
+
+ size += burstcnt;
+ }
+ return size;
+}
+
+static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ int size = 0;
+ int expected, status;
+
+ if (count < TPM_HEADER_SIZE) {
+ size = -EIO;
+ goto out;
+ }
+
+ size = recv_data(chip, buf, TPM_HEADER_SIZE);
+ /* read first 10 bytes, including tag, paramsize, and result */
+ if (size < TPM_HEADER_SIZE) {
+ dev_err(&chip->dev, "Unable to read header\n");
+ goto out;
+ }
+
+ expected = be32_to_cpu(*(__be32 *) (buf + 2));
+ if (expected > count) {
+ size = -EIO;
+ goto out;
+ }
+
+ size += recv_data(chip, &buf[TPM_HEADER_SIZE],
+ expected - TPM_HEADER_SIZE);
+ if (size < expected) {
+ dev_err(&chip->dev, "Unable to read remainder of result\n");
+ size = -ETIME;
+ goto out;
+ }
+
+ wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
+ &priv->int_queue, false);
+ status = tpm_tis_status(chip);
+ if (status & TPM_STS_DATA_AVAIL) { /* retry? */
+ dev_err(&chip->dev, "Error left over data\n");
+ size = -EIO;
+ goto out;
+ }
+
+out:
+ tpm_tis_ready(chip);
+ release_locality(chip, priv->locality, 0);
+ return size;
+}
+
+/*
+ * If interrupts are used (signaled by an irq set in the vendor structure)
+ * tpm.c can skip polling for the data to be available as the interrupt is
+ * waited for here
+ */
+static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ int rc, status, burstcnt;
+ size_t count = 0;
+ bool itpm = priv->flags & TPM_TIS_ITPM_POSSIBLE;
+
+ if (request_locality(chip, 0) < 0)
+ return -EBUSY;
+
+ status = tpm_tis_status(chip);
+ if ((status & TPM_STS_COMMAND_READY) == 0) {
+ tpm_tis_ready(chip);
+ if (wait_for_tpm_stat
+ (chip, TPM_STS_COMMAND_READY, chip->timeout_b,
+ &priv->int_queue, false) < 0) {
+ rc = -ETIME;
+ goto out_err;
+ }
+ }
+
+ while (count < len - 1) {
+ burstcnt = min_t(int, get_burstcount(chip), len - count - 1);
+ rc = tpm_tis_write_bytes(priv, TPM_DATA_FIFO(priv->locality),
+ burstcnt, buf + count);
+ if (rc < 0)
+ goto out_err;
+
+ count += burstcnt;
+
+ wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
+ &priv->int_queue, false);
+ status = tpm_tis_status(chip);
+ if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) {
+ rc = -EIO;
+ goto out_err;
+ }
+ }
+
+ /* write last byte */
+ rc = tpm_tis_write8(priv, TPM_DATA_FIFO(priv->locality), buf[count]);
+ if (rc < 0)
+ goto out_err;
+
+ wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
+ &priv->int_queue, false);
+ status = tpm_tis_status(chip);
+ if (!itpm && (status & TPM_STS_DATA_EXPECT) != 0) {
+ rc = -EIO;
+ goto out_err;
+ }
+
+ return 0;
+
+out_err:
+ tpm_tis_ready(chip);
+ release_locality(chip, priv->locality, 0);
+ return rc;
+}
+
+static void disable_interrupts(struct tpm_chip *chip)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ u32 intmask;
+ int rc;
+
+ rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask);
+ if (rc < 0)
+ intmask = 0;
+
+ intmask &= ~TPM_GLOBAL_INT_ENABLE;
+ rc = tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask);
+
+ devm_free_irq(chip->dev.parent, priv->irq, chip);
+ priv->irq = 0;
+ chip->flags &= ~TPM_CHIP_FLAG_IRQ;
+}
+
+/*
+ * If interrupts are used (signaled by an irq set in the vendor structure)
+ * tpm.c can skip polling for the data to be available as the interrupt is
+ * waited for here
+ */
+static int tpm_tis_send_main(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ int rc;
+ u32 ordinal;
+ unsigned long dur;
+
+ rc = tpm_tis_send_data(chip, buf, len);
+ if (rc < 0)
+ return rc;
+
+ /* go and do it */
+ rc = tpm_tis_write8(priv, TPM_STS(priv->locality), TPM_STS_GO);
+ if (rc < 0)
+ goto out_err;
+
+ if (chip->flags & TPM_CHIP_FLAG_IRQ) {
+ ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ dur = tpm2_calc_ordinal_duration(chip, ordinal);
+ else
+ dur = tpm_calc_ordinal_duration(chip, ordinal);
+
+ if (wait_for_tpm_stat
+ (chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID, dur,
+ &priv->read_queue, false) < 0) {
+ rc = -ETIME;
+ goto out_err;
+ }
+ }
+ return len;
+out_err:
+ tpm_tis_ready(chip);
+ release_locality(chip, priv->locality, 0);
+ return rc;
+}
+
+static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+ int rc, irq;
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+
+ if (!(chip->flags & TPM_CHIP_FLAG_IRQ) || priv->irq_tested)
+ return tpm_tis_send_main(chip, buf, len);
+
+ /* Verify receipt of the expected IRQ */
+ irq = priv->irq;
+ priv->irq = 0;
+ chip->flags &= ~TPM_CHIP_FLAG_IRQ;
+ rc = tpm_tis_send_main(chip, buf, len);
+ priv->irq = irq;
+ chip->flags |= TPM_CHIP_FLAG_IRQ;
+ if (!priv->irq_tested)
+ msleep(1);
+ if (!priv->irq_tested)
+ disable_interrupts(chip);
+ priv->irq_tested = true;
+ return rc;
+}
+
+struct tis_vendor_timeout_override {
+ u32 did_vid;
+ unsigned long timeout_us[4];
+};
+
+static const struct tis_vendor_timeout_override vendor_timeout_overrides[] = {
+ /* Atmel 3204 */
+ { 0x32041114, { (TIS_SHORT_TIMEOUT*1000), (TIS_LONG_TIMEOUT*1000),
+ (TIS_SHORT_TIMEOUT*1000), (TIS_SHORT_TIMEOUT*1000) } },
+};
+
+static bool tpm_tis_update_timeouts(struct tpm_chip *chip,
+ unsigned long *timeout_cap)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ int i, rc;
+ u32 did_vid;
+
+ rc = tpm_tis_read32(priv, TPM_DID_VID(0), &did_vid);
+ if (rc < 0)
+ return rc;
+
+ for (i = 0; i != ARRAY_SIZE(vendor_timeout_overrides); i++) {
+ if (vendor_timeout_overrides[i].did_vid != did_vid)
+ continue;
+ memcpy(timeout_cap, vendor_timeout_overrides[i].timeout_us,
+ sizeof(vendor_timeout_overrides[i].timeout_us));
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Early probing for iTPM with STS_DATA_EXPECT flaw.
+ * Try sending command without itpm flag set and if that
+ * fails, repeat with itpm flag set.
+ */
+static int probe_itpm(struct tpm_chip *chip)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ int rc = 0;
+ u8 cmd_getticks[] = {
+ 0x00, 0xc1, 0x00, 0x00, 0x00, 0x0a,
+ 0x00, 0x00, 0x00, 0xf1
+ };
+ size_t len = sizeof(cmd_getticks);
+ bool itpm;
+ u16 vendor;
+
+ rc = tpm_tis_read16(priv, TPM_DID_VID(0), &vendor);
+ if (rc < 0)
+ return rc;
+
+ /* probe only iTPMS */
+ if (vendor != TPM_VID_INTEL)
+ return 0;
+
+ itpm = false;
+
+ rc = tpm_tis_send_data(chip, cmd_getticks, len);
+ if (rc == 0)
+ goto out;
+
+ tpm_tis_ready(chip);
+ release_locality(chip, priv->locality, 0);
+
+ itpm = true;
+
+ rc = tpm_tis_send_data(chip, cmd_getticks, len);
+ if (rc == 0) {
+ dev_info(&chip->dev, "Detected an iTPM.\n");
+ rc = 1;
+ } else
+ rc = -EFAULT;
+
+out:
+ tpm_tis_ready(chip);
+ release_locality(chip, priv->locality, 0);
+
+ return rc;
+}
+
+static bool tpm_tis_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+
+ switch (priv->manufacturer_id) {
+ case TPM_VID_WINBOND:
+ return ((status == TPM_STS_VALID) ||
+ (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY)));
+ case TPM_VID_STM:
+ return (status == (TPM_STS_VALID | TPM_STS_COMMAND_READY));
+ default:
+ return (status == TPM_STS_COMMAND_READY);
+ }
+}
+
+static irqreturn_t tis_int_handler(int dummy, void *dev_id)
+{
+ struct tpm_chip *chip = dev_id;
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ u32 interrupt;
+ int i, rc;
+
+ rc = tpm_tis_read32(priv, TPM_INT_STATUS(priv->locality), &interrupt);
+ if (rc < 0)
+ return IRQ_NONE;
+
+ if (interrupt == 0)
+ return IRQ_NONE;
+
+ priv->irq_tested = true;
+ if (interrupt & TPM_INTF_DATA_AVAIL_INT)
+ wake_up_interruptible(&priv->read_queue);
+ if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT)
+ for (i = 0; i < 5; i++)
+ if (check_locality(chip, i) >= 0)
+ break;
+ if (interrupt &
+ (TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_STS_VALID_INT |
+ TPM_INTF_CMD_READY_INT))
+ wake_up_interruptible(&priv->int_queue);
+
+ /* Clear interrupts handled with TPM_EOI */
+ rc = tpm_tis_write32(priv, TPM_INT_STATUS(priv->locality), interrupt);
+ if (rc < 0)
+ return IRQ_NONE;
+
+ tpm_tis_read32(priv, TPM_INT_STATUS(priv->locality), &interrupt);
+ return IRQ_HANDLED;
+}
+
+/* Register the IRQ and issue a command that will cause an interrupt. If an
+ * irq is seen then leave the chip setup for IRQ operation, otherwise reverse
+ * everything and leave in polling mode. Returns 0 on success.
+ */
+static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask,
+ int flags, int irq)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ u8 original_int_vec;
+ int rc;
+ u32 int_status;
+
+ if (devm_request_irq(chip->dev.parent, irq, tis_int_handler, flags,
+ dev_name(&chip->dev), chip) != 0) {
+ dev_info(&chip->dev, "Unable to request irq: %d for probe\n",
+ irq);
+ return -1;
+ }
+ priv->irq = irq;
+
+ rc = tpm_tis_read8(priv, TPM_INT_VECTOR(priv->locality),
+ &original_int_vec);
+ if (rc < 0)
+ return rc;
+
+ rc = tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), irq);
+ if (rc < 0)
+ return rc;
+
+ rc = tpm_tis_read32(priv, TPM_INT_STATUS(priv->locality), &int_status);
+ if (rc < 0)
+ return rc;
+
+ /* Clear all existing */
+ rc = tpm_tis_write32(priv, TPM_INT_STATUS(priv->locality), int_status);
+ if (rc < 0)
+ return rc;
+
+ /* Turn on */
+ rc = tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality),
+ intmask | TPM_GLOBAL_INT_ENABLE);
+ if (rc < 0)
+ return rc;
+
+ priv->irq_tested = false;
+
+ /* Generate an interrupt by having the core call through to
+ * tpm_tis_send
+ */
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ tpm2_gen_interrupt(chip);
+ else
+ tpm_gen_interrupt(chip);
+
+ /* tpm_tis_send will either confirm the interrupt is working or it
+ * will call disable_irq which undoes all of the above.
+ */
+ if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) {
+ rc = tpm_tis_write8(priv, original_int_vec,
+ TPM_INT_VECTOR(priv->locality));
+ if (rc < 0)
+ return rc;
+
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Try to find the IRQ the TPM is using. This is for legacy x86 systems that
+ * do not have ACPI/etc. We typically expect the interrupt to be declared if
+ * present.
+ */
+static void tpm_tis_probe_irq(struct tpm_chip *chip, u32 intmask)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ u8 original_int_vec;
+ int i, rc;
+
+ rc = tpm_tis_read8(priv, TPM_INT_VECTOR(priv->locality),
+ &original_int_vec);
+ if (rc < 0)
+ return;
+
+ if (!original_int_vec) {
+ if (IS_ENABLED(CONFIG_X86))
+ for (i = 3; i <= 15; i++)
+ if (!tpm_tis_probe_irq_single(chip, intmask, 0,
+ i))
+ return;
+ } else if (!tpm_tis_probe_irq_single(chip, intmask, 0,
+ original_int_vec))
+ return;
+}
+
+void tpm_tis_remove(struct tpm_chip *chip)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ u32 reg = TPM_INT_ENABLE(priv->locality);
+ u32 interrupt;
+ int rc;
+
+ rc = tpm_tis_read32(priv, reg, &interrupt);
+ if (rc < 0)
+ interrupt = 0;
+
+ tpm_tis_write32(priv, reg, ~TPM_GLOBAL_INT_ENABLE & interrupt);
+ release_locality(chip, priv->locality, 1);
+}
+EXPORT_SYMBOL_GPL(tpm_tis_remove);
+
+static const struct tpm_class_ops tpm_tis = {
+ .flags = TPM_OPS_AUTO_STARTUP,
+ .status = tpm_tis_status,
+ .recv = tpm_tis_recv,
+ .send = tpm_tis_send,
+ .cancel = tpm_tis_ready,
+ .update_timeouts = tpm_tis_update_timeouts,
+ .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ .req_canceled = tpm_tis_req_canceled,
+};
+
+int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
+ const struct tpm_tis_phy_ops *phy_ops,
+ acpi_handle acpi_dev_handle)
+{
+ u32 vendor, intfcaps, intmask;
+ u8 rid;
+ int rc, probe;
+ struct tpm_chip *chip;
+
+ chip = tpmm_chip_alloc(dev, &tpm_tis);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+#ifdef CONFIG_ACPI
+ chip->acpi_dev_handle = acpi_dev_handle;
+#endif
+
+ /* Maximum timeouts */
+ chip->timeout_a = msecs_to_jiffies(TIS_TIMEOUT_A_MAX);
+ chip->timeout_b = msecs_to_jiffies(TIS_TIMEOUT_B_MAX);
+ chip->timeout_c = msecs_to_jiffies(TIS_TIMEOUT_C_MAX);
+ chip->timeout_d = msecs_to_jiffies(TIS_TIMEOUT_D_MAX);
+ priv->phy_ops = phy_ops;
+ dev_set_drvdata(&chip->dev, priv);
+
+ if (wait_startup(chip, 0) != 0) {
+ rc = -ENODEV;
+ goto out_err;
+ }
+
+ /* Take control of the TPM's interrupt hardware and shut it off */
+ rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask);
+ if (rc < 0)
+ goto out_err;
+
+ intmask |= TPM_INTF_CMD_READY_INT | TPM_INTF_LOCALITY_CHANGE_INT |
+ TPM_INTF_DATA_AVAIL_INT | TPM_INTF_STS_VALID_INT;
+ intmask &= ~TPM_GLOBAL_INT_ENABLE;
+ tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask);
+
+ if (request_locality(chip, 0) != 0) {
+ rc = -ENODEV;
+ goto out_err;
+ }
+
+ rc = tpm2_probe(chip);
+ if (rc)
+ goto out_err;
+
+ rc = tpm_tis_read32(priv, TPM_DID_VID(0), &vendor);
+ if (rc < 0)
+ goto out_err;
+
+ priv->manufacturer_id = vendor;
+
+ rc = tpm_tis_read8(priv, TPM_RID(0), &rid);
+ if (rc < 0)
+ goto out_err;
+
+ dev_info(dev, "%s TPM (device-id 0x%X, rev-id %d)\n",
+ (chip->flags & TPM_CHIP_FLAG_TPM2) ? "2.0" : "1.2",
+ vendor >> 16, rid);
+
+ if (!(priv->flags & TPM_TIS_ITPM_POSSIBLE)) {
+ probe = probe_itpm(chip);
+ if (probe < 0) {
+ rc = -ENODEV;
+ goto out_err;
+ }
+
+ if (!!probe)
+ priv->flags |= TPM_TIS_ITPM_POSSIBLE;
+ }
+
+ /* Figure out the capabilities */
+ rc = tpm_tis_read32(priv, TPM_INTF_CAPS(priv->locality), &intfcaps);
+ if (rc < 0)
+ goto out_err;
+
+ dev_dbg(dev, "TPM interface capabilities (0x%x):\n",
+ intfcaps);
+ if (intfcaps & TPM_INTF_BURST_COUNT_STATIC)
+ dev_dbg(dev, "\tBurst Count Static\n");
+ if (intfcaps & TPM_INTF_CMD_READY_INT)
+ dev_dbg(dev, "\tCommand Ready Int Support\n");
+ if (intfcaps & TPM_INTF_INT_EDGE_FALLING)
+ dev_dbg(dev, "\tInterrupt Edge Falling\n");
+ if (intfcaps & TPM_INTF_INT_EDGE_RISING)
+ dev_dbg(dev, "\tInterrupt Edge Rising\n");
+ if (intfcaps & TPM_INTF_INT_LEVEL_LOW)
+ dev_dbg(dev, "\tInterrupt Level Low\n");
+ if (intfcaps & TPM_INTF_INT_LEVEL_HIGH)
+ dev_dbg(dev, "\tInterrupt Level High\n");
+ if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT)
+ dev_dbg(dev, "\tLocality Change Int Support\n");
+ if (intfcaps & TPM_INTF_STS_VALID_INT)
+ dev_dbg(dev, "\tSts Valid Int Support\n");
+ if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
+ dev_dbg(dev, "\tData Avail Int Support\n");
+
+ /* Very early on issue a command to the TPM in polling mode to make
+ * sure it works. May as well use that command to set the proper
+ * timeouts for the driver.
+ */
+ if (tpm_get_timeouts(chip)) {
+ dev_err(dev, "Could not get TPM timeouts and durations\n");
+ rc = -ENODEV;
+ goto out_err;
+ }
+
+ /* INTERRUPT Setup */
+ init_waitqueue_head(&priv->read_queue);
+ init_waitqueue_head(&priv->int_queue);
+ if (irq != -1) {
+ if (irq) {
+ tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED,
+ irq);
+ if (!(chip->flags & TPM_CHIP_FLAG_IRQ))
+ dev_err(&chip->dev, FW_BUG
+ "TPM interrupt not working, polling instead\n");
+ } else {
+ tpm_tis_probe_irq(chip, intmask);
+ }
+ }
+
+ return tpm_chip_register(chip);
+out_err:
+ tpm_tis_remove(chip);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_tis_core_init);
+
+#ifdef CONFIG_PM_SLEEP
+static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ u32 intmask;
+ int rc;
+
+ /* reenable interrupts that device may have lost or
+ * BIOS/firmware may have disabled
+ */
+ rc = tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), priv->irq);
+ if (rc < 0)
+ return;
+
+ rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask);
+ if (rc < 0)
+ return;
+
+ intmask |= TPM_INTF_CMD_READY_INT
+ | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
+ | TPM_INTF_STS_VALID_INT | TPM_GLOBAL_INT_ENABLE;
+
+ tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask);
+}
+
+int tpm_tis_resume(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ int ret;
+
+ if (chip->flags & TPM_CHIP_FLAG_IRQ)
+ tpm_tis_reenable_interrupts(chip);
+
+ ret = tpm_pm_resume(dev);
+ if (ret)
+ return ret;
+
+ /* TPM 1.2 requires self-test on resume. This function actually returns
+ * an error code but for unknown reason it isn't handled.
+ */
+ if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
+ tpm_do_selftest(chip);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tpm_tis_resume);
+#endif
+
+MODULE_AUTHOR("Leendert van Doorn (leendert@watson.ibm.com)");
+MODULE_DESCRIPTION("TPM Driver");
+MODULE_VERSION("2.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
new file mode 100644
index 000000000..9191aabbf
--- /dev/null
+++ b/drivers/char/tpm/tpm_tis_core.h
@@ -0,0 +1,156 @@
+/*
+ * Copyright (C) 2005, 2006 IBM Corporation
+ * Copyright (C) 2014, 2015 Intel Corporation
+ *
+ * Authors:
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * This device driver implements the TPM interface as defined in
+ * the TCG TPM Interface Spec version 1.2, revision 1.0.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#ifndef __TPM_TIS_CORE_H__
+#define __TPM_TIS_CORE_H__
+
+#include "tpm.h"
+
+enum tis_access {
+ TPM_ACCESS_VALID = 0x80,
+ TPM_ACCESS_ACTIVE_LOCALITY = 0x20,
+ TPM_ACCESS_REQUEST_PENDING = 0x04,
+ TPM_ACCESS_REQUEST_USE = 0x02,
+};
+
+enum tis_status {
+ TPM_STS_VALID = 0x80,
+ TPM_STS_COMMAND_READY = 0x40,
+ TPM_STS_GO = 0x20,
+ TPM_STS_DATA_AVAIL = 0x10,
+ TPM_STS_DATA_EXPECT = 0x08,
+};
+
+enum tis_int_flags {
+ TPM_GLOBAL_INT_ENABLE = 0x80000000,
+ TPM_INTF_BURST_COUNT_STATIC = 0x100,
+ TPM_INTF_CMD_READY_INT = 0x080,
+ TPM_INTF_INT_EDGE_FALLING = 0x040,
+ TPM_INTF_INT_EDGE_RISING = 0x020,
+ TPM_INTF_INT_LEVEL_LOW = 0x010,
+ TPM_INTF_INT_LEVEL_HIGH = 0x008,
+ TPM_INTF_LOCALITY_CHANGE_INT = 0x004,
+ TPM_INTF_STS_VALID_INT = 0x002,
+ TPM_INTF_DATA_AVAIL_INT = 0x001,
+};
+
+enum tis_defaults {
+ TIS_MEM_LEN = 0x5000,
+ TIS_SHORT_TIMEOUT = 750, /* ms */
+ TIS_LONG_TIMEOUT = 2000, /* 2 sec */
+};
+
+/* Some timeout values are needed before it is known whether the chip is
+ * TPM 1.0 or TPM 2.0.
+ */
+#define TIS_TIMEOUT_A_MAX max(TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_A)
+#define TIS_TIMEOUT_B_MAX max(TIS_LONG_TIMEOUT, TPM2_TIMEOUT_B)
+#define TIS_TIMEOUT_C_MAX max(TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_C)
+#define TIS_TIMEOUT_D_MAX max(TIS_SHORT_TIMEOUT, TPM2_TIMEOUT_D)
+
+#define TPM_ACCESS(l) (0x0000 | ((l) << 12))
+#define TPM_INT_ENABLE(l) (0x0008 | ((l) << 12))
+#define TPM_INT_VECTOR(l) (0x000C | ((l) << 12))
+#define TPM_INT_STATUS(l) (0x0010 | ((l) << 12))
+#define TPM_INTF_CAPS(l) (0x0014 | ((l) << 12))
+#define TPM_STS(l) (0x0018 | ((l) << 12))
+#define TPM_STS3(l) (0x001b | ((l) << 12))
+#define TPM_DATA_FIFO(l) (0x0024 | ((l) << 12))
+
+#define TPM_DID_VID(l) (0x0F00 | ((l) << 12))
+#define TPM_RID(l) (0x0F04 | ((l) << 12))
+
+enum tpm_tis_flags {
+ TPM_TIS_ITPM_POSSIBLE = BIT(0),
+};
+
+struct tpm_tis_data {
+ u16 manufacturer_id;
+ int locality;
+ int irq;
+ bool irq_tested;
+ unsigned int flags;
+ wait_queue_head_t int_queue;
+ wait_queue_head_t read_queue;
+ const struct tpm_tis_phy_ops *phy_ops;
+};
+
+struct tpm_tis_phy_ops {
+ int (*read_bytes)(struct tpm_tis_data *data, u32 addr, u16 len,
+ u8 *result);
+ int (*write_bytes)(struct tpm_tis_data *data, u32 addr, u16 len,
+ u8 *value);
+ int (*read16)(struct tpm_tis_data *data, u32 addr, u16 *result);
+ int (*read32)(struct tpm_tis_data *data, u32 addr, u32 *result);
+ int (*write32)(struct tpm_tis_data *data, u32 addr, u32 src);
+};
+
+static inline int tpm_tis_read_bytes(struct tpm_tis_data *data, u32 addr,
+ u16 len, u8 *result)
+{
+ return data->phy_ops->read_bytes(data, addr, len, result);
+}
+
+static inline int tpm_tis_read8(struct tpm_tis_data *data, u32 addr, u8 *result)
+{
+ return data->phy_ops->read_bytes(data, addr, 1, result);
+}
+
+static inline int tpm_tis_read16(struct tpm_tis_data *data, u32 addr,
+ u16 *result)
+{
+ return data->phy_ops->read16(data, addr, result);
+}
+
+static inline int tpm_tis_read32(struct tpm_tis_data *data, u32 addr,
+ u32 *result)
+{
+ return data->phy_ops->read32(data, addr, result);
+}
+
+static inline int tpm_tis_write_bytes(struct tpm_tis_data *data, u32 addr,
+ u16 len, u8 *value)
+{
+ return data->phy_ops->write_bytes(data, addr, len, value);
+}
+
+static inline int tpm_tis_write8(struct tpm_tis_data *data, u32 addr, u8 value)
+{
+ return data->phy_ops->write_bytes(data, addr, 1, &value);
+}
+
+static inline int tpm_tis_write32(struct tpm_tis_data *data, u32 addr,
+ u32 value)
+{
+ return data->phy_ops->write32(data, addr, value);
+}
+
+void tpm_tis_remove(struct tpm_chip *chip);
+int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
+ const struct tpm_tis_phy_ops *phy_ops,
+ acpi_handle acpi_dev_handle);
+
+#ifdef CONFIG_PM_SLEEP
+int tpm_tis_resume(struct device *dev);
+#endif
+
+#endif
diff --git a/drivers/char/tpm/tpm_tis_spi.c b/drivers/char/tpm/tpm_tis_spi.c
new file mode 100644
index 000000000..dbaad9c68
--- /dev/null
+++ b/drivers/char/tpm/tpm_tis_spi.c
@@ -0,0 +1,272 @@
+/*
+ * Copyright (C) 2015 Infineon Technologies AG
+ * Copyright (C) 2016 STMicroelectronics SAS
+ *
+ * Authors:
+ * Peter Huewe <peter.huewe@infineon.com>
+ * Christophe Ricard <christophe-h.ricard@st.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * This device driver implements the TPM interface as defined in
+ * the TCG TPM Interface Spec version 1.3, revision 27 via _raw/native
+ * SPI access_.
+ *
+ * It is based on the original tpm_tis device driver from Leendert van
+ * Dorn and Kyleen Hall and Jarko Sakkinnen.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/acpi.h>
+#include <linux/freezer.h>
+
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/gpio.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/tpm.h>
+#include "tpm.h"
+#include "tpm_tis_core.h"
+
+#define MAX_SPI_FRAMESIZE 64
+
+struct tpm_tis_spi_phy {
+ struct tpm_tis_data priv;
+ struct spi_device *spi_device;
+
+ u8 tx_buf[MAX_SPI_FRAMESIZE + 4];
+ u8 rx_buf[MAX_SPI_FRAMESIZE + 4];
+};
+
+static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *data)
+{
+ return container_of(data, struct tpm_tis_spi_phy, priv);
+}
+
+static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr,
+ u16 len, u8 *result)
+{
+ struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
+ int ret, i;
+ struct spi_message m;
+ struct spi_transfer spi_xfer = {
+ .tx_buf = phy->tx_buf,
+ .rx_buf = phy->rx_buf,
+ .len = 4,
+ };
+
+ if (len > MAX_SPI_FRAMESIZE)
+ return -ENOMEM;
+
+ phy->tx_buf[0] = 0x80 | (len - 1);
+ phy->tx_buf[1] = 0xd4;
+ phy->tx_buf[2] = (addr >> 8) & 0xFF;
+ phy->tx_buf[3] = addr & 0xFF;
+
+ spi_xfer.cs_change = 1;
+ spi_message_init(&m);
+ spi_message_add_tail(&spi_xfer, &m);
+
+ spi_bus_lock(phy->spi_device->master);
+ ret = spi_sync_locked(phy->spi_device, &m);
+ if (ret < 0)
+ goto exit;
+
+ memset(phy->tx_buf, 0, len);
+
+ /* According to TCG PTP specification, if there is no TPM present at
+ * all, then the design has a weak pull-up on MISO. If a TPM is not
+ * present, a pull-up on MISO means that the SB controller sees a 1,
+ * and will latch in 0xFF on the read.
+ */
+ for (i = 0; (phy->rx_buf[0] & 0x01) == 0 && i < TPM_RETRY; i++) {
+ spi_xfer.len = 1;
+ spi_message_init(&m);
+ spi_message_add_tail(&spi_xfer, &m);
+ ret = spi_sync_locked(phy->spi_device, &m);
+ if (ret < 0)
+ goto exit;
+ }
+
+ spi_xfer.cs_change = 0;
+ spi_xfer.len = len;
+ spi_xfer.rx_buf = result;
+
+ spi_message_init(&m);
+ spi_message_add_tail(&spi_xfer, &m);
+ ret = spi_sync_locked(phy->spi_device, &m);
+
+exit:
+ spi_bus_unlock(phy->spi_device->master);
+ return ret;
+}
+
+static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr,
+ u16 len, u8 *value)
+{
+ struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
+ int ret, i;
+ struct spi_message m;
+ struct spi_transfer spi_xfer = {
+ .tx_buf = phy->tx_buf,
+ .rx_buf = phy->rx_buf,
+ .len = 4,
+ };
+
+ if (len > MAX_SPI_FRAMESIZE)
+ return -ENOMEM;
+
+ phy->tx_buf[0] = len - 1;
+ phy->tx_buf[1] = 0xd4;
+ phy->tx_buf[2] = (addr >> 8) & 0xFF;
+ phy->tx_buf[3] = addr & 0xFF;
+
+ spi_xfer.cs_change = 1;
+ spi_message_init(&m);
+ spi_message_add_tail(&spi_xfer, &m);
+
+ spi_bus_lock(phy->spi_device->master);
+ ret = spi_sync_locked(phy->spi_device, &m);
+ if (ret < 0)
+ goto exit;
+
+ memset(phy->tx_buf, 0, len);
+
+ /* According to TCG PTP specification, if there is no TPM present at
+ * all, then the design has a weak pull-up on MISO. If a TPM is not
+ * present, a pull-up on MISO means that the SB controller sees a 1,
+ * and will latch in 0xFF on the read.
+ */
+ for (i = 0; (phy->rx_buf[0] & 0x01) == 0 && i < TPM_RETRY; i++) {
+ spi_xfer.len = 1;
+ spi_message_init(&m);
+ spi_message_add_tail(&spi_xfer, &m);
+ ret = spi_sync_locked(phy->spi_device, &m);
+ if (ret < 0)
+ goto exit;
+ }
+
+ spi_xfer.len = len;
+ spi_xfer.tx_buf = value;
+ spi_xfer.cs_change = 0;
+ spi_xfer.tx_buf = value;
+ spi_message_init(&m);
+ spi_message_add_tail(&spi_xfer, &m);
+ ret = spi_sync_locked(phy->spi_device, &m);
+
+exit:
+ spi_bus_unlock(phy->spi_device->master);
+ return ret;
+}
+
+static int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result)
+{
+ int rc;
+
+ rc = data->phy_ops->read_bytes(data, addr, sizeof(u16), (u8 *)result);
+ if (!rc)
+ *result = le16_to_cpu(*result);
+ return rc;
+}
+
+static int tpm_tis_spi_read32(struct tpm_tis_data *data, u32 addr, u32 *result)
+{
+ int rc;
+
+ rc = data->phy_ops->read_bytes(data, addr, sizeof(u32), (u8 *)result);
+ if (!rc)
+ *result = le32_to_cpu(*result);
+ return rc;
+}
+
+static int tpm_tis_spi_write32(struct tpm_tis_data *data, u32 addr, u32 value)
+{
+ value = cpu_to_le32(value);
+ return data->phy_ops->write_bytes(data, addr, sizeof(u32),
+ (u8 *)&value);
+}
+
+static const struct tpm_tis_phy_ops tpm_spi_phy_ops = {
+ .read_bytes = tpm_tis_spi_read_bytes,
+ .write_bytes = tpm_tis_spi_write_bytes,
+ .read16 = tpm_tis_spi_read16,
+ .read32 = tpm_tis_spi_read32,
+ .write32 = tpm_tis_spi_write32,
+};
+
+static int tpm_tis_spi_probe(struct spi_device *dev)
+{
+ struct tpm_tis_spi_phy *phy;
+
+ phy = devm_kzalloc(&dev->dev, sizeof(struct tpm_tis_spi_phy),
+ GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ phy->spi_device = dev;
+
+ return tpm_tis_core_init(&dev->dev, &phy->priv, -1, &tpm_spi_phy_ops,
+ NULL);
+}
+
+static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume);
+
+static int tpm_tis_spi_remove(struct spi_device *dev)
+{
+ struct tpm_chip *chip = spi_get_drvdata(dev);
+
+ tpm_chip_unregister(chip);
+ tpm_tis_remove(chip);
+ return 0;
+}
+
+static const struct spi_device_id tpm_tis_spi_id[] = {
+ {"tpm_tis_spi", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, tpm_tis_spi_id);
+
+static const struct of_device_id of_tis_spi_match[] = {
+ { .compatible = "st,st33htpm-spi", },
+ { .compatible = "infineon,slb9670", },
+ { .compatible = "tcg,tpm_tis-spi", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_tis_spi_match);
+
+static const struct acpi_device_id acpi_tis_spi_match[] = {
+ {"SMO0768", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, acpi_tis_spi_match);
+
+static struct spi_driver tpm_tis_spi_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "tpm_tis_spi",
+ .pm = &tpm_tis_pm,
+ .of_match_table = of_match_ptr(of_tis_spi_match),
+ .acpi_match_table = ACPI_PTR(acpi_tis_spi_match),
+ },
+ .probe = tpm_tis_spi_probe,
+ .remove = tpm_tis_spi_remove,
+ .id_table = tpm_tis_spi_id,
+};
+module_spi_driver(tpm_tis_spi_driver);
+
+MODULE_DESCRIPTION("TPM Driver for native SPI access");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c
new file mode 100644
index 000000000..9a940332c
--- /dev/null
+++ b/drivers/char/tpm/tpm_vtpm_proxy.c
@@ -0,0 +1,637 @@
+/*
+ * Copyright (C) 2015, 2016 IBM Corporation
+ *
+ * Author: Stefan Berger <stefanb@us.ibm.com>
+ *
+ * Maintained by: <tpmdd-devel@lists.sourceforge.net>
+ *
+ * Device driver for vTPM (vTPM proxy driver)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+#include <linux/miscdevice.h>
+#include <linux/vtpm_proxy.h>
+#include <linux/file.h>
+#include <linux/anon_inodes.h>
+#include <linux/poll.h>
+#include <linux/compat.h>
+
+#include "tpm.h"
+
+#define VTPM_PROXY_REQ_COMPLETE_FLAG BIT(0)
+
+struct proxy_dev {
+ struct tpm_chip *chip;
+
+ u32 flags; /* public API flags */
+
+ wait_queue_head_t wq;
+
+ struct mutex buf_lock; /* protect buffer and flags */
+
+ long state; /* internal state */
+#define STATE_OPENED_FLAG BIT(0)
+#define STATE_WAIT_RESPONSE_FLAG BIT(1) /* waiting for emulator response */
+
+ size_t req_len; /* length of queued TPM request */
+ size_t resp_len; /* length of queued TPM response */
+ u8 buffer[TPM_BUFSIZE]; /* request/response buffer */
+
+ struct work_struct work; /* task that retrieves TPM timeouts */
+};
+
+/* all supported flags */
+#define VTPM_PROXY_FLAGS_ALL (VTPM_PROXY_FLAG_TPM2)
+
+static struct workqueue_struct *workqueue;
+
+static void vtpm_proxy_delete_device(struct proxy_dev *proxy_dev);
+
+/*
+ * Functions related to 'server side'
+ */
+
+/**
+ * vtpm_proxy_fops_read - Read TPM commands on 'server side'
+ *
+ * Return value:
+ * Number of bytes read or negative error code
+ */
+static ssize_t vtpm_proxy_fops_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *off)
+{
+ struct proxy_dev *proxy_dev = filp->private_data;
+ size_t len;
+ int sig, rc;
+
+ sig = wait_event_interruptible(proxy_dev->wq,
+ proxy_dev->req_len != 0 ||
+ !(proxy_dev->state & STATE_OPENED_FLAG));
+ if (sig)
+ return -EINTR;
+
+ mutex_lock(&proxy_dev->buf_lock);
+
+ if (!(proxy_dev->state & STATE_OPENED_FLAG)) {
+ mutex_unlock(&proxy_dev->buf_lock);
+ return -EPIPE;
+ }
+
+ len = proxy_dev->req_len;
+
+ if (count < len) {
+ mutex_unlock(&proxy_dev->buf_lock);
+ pr_debug("Invalid size in recv: count=%zd, req_len=%zd\n",
+ count, len);
+ return -EIO;
+ }
+
+ rc = copy_to_user(buf, proxy_dev->buffer, len);
+ memset(proxy_dev->buffer, 0, len);
+ proxy_dev->req_len = 0;
+
+ if (!rc)
+ proxy_dev->state |= STATE_WAIT_RESPONSE_FLAG;
+
+ mutex_unlock(&proxy_dev->buf_lock);
+
+ if (rc)
+ return -EFAULT;
+
+ return len;
+}
+
+/**
+ * vtpm_proxy_fops_write - Write TPM responses on 'server side'
+ *
+ * Return value:
+ * Number of bytes read or negative error value
+ */
+static ssize_t vtpm_proxy_fops_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *off)
+{
+ struct proxy_dev *proxy_dev = filp->private_data;
+
+ mutex_lock(&proxy_dev->buf_lock);
+
+ if (!(proxy_dev->state & STATE_OPENED_FLAG)) {
+ mutex_unlock(&proxy_dev->buf_lock);
+ return -EPIPE;
+ }
+
+ if (count > sizeof(proxy_dev->buffer) ||
+ !(proxy_dev->state & STATE_WAIT_RESPONSE_FLAG)) {
+ mutex_unlock(&proxy_dev->buf_lock);
+ return -EIO;
+ }
+
+ proxy_dev->state &= ~STATE_WAIT_RESPONSE_FLAG;
+
+ proxy_dev->req_len = 0;
+
+ if (copy_from_user(proxy_dev->buffer, buf, count)) {
+ mutex_unlock(&proxy_dev->buf_lock);
+ return -EFAULT;
+ }
+
+ proxy_dev->resp_len = count;
+
+ mutex_unlock(&proxy_dev->buf_lock);
+
+ wake_up_interruptible(&proxy_dev->wq);
+
+ return count;
+}
+
+/*
+ * vtpm_proxy_fops_poll: Poll status on 'server side'
+ *
+ * Return value:
+ * Poll flags
+ */
+static unsigned int vtpm_proxy_fops_poll(struct file *filp, poll_table *wait)
+{
+ struct proxy_dev *proxy_dev = filp->private_data;
+ unsigned ret;
+
+ poll_wait(filp, &proxy_dev->wq, wait);
+
+ ret = POLLOUT;
+
+ mutex_lock(&proxy_dev->buf_lock);
+
+ if (proxy_dev->req_len)
+ ret |= POLLIN | POLLRDNORM;
+
+ if (!(proxy_dev->state & STATE_OPENED_FLAG))
+ ret |= POLLHUP;
+
+ mutex_unlock(&proxy_dev->buf_lock);
+
+ return ret;
+}
+
+/*
+ * vtpm_proxy_fops_open - Open vTPM device on 'server side'
+ *
+ * Called when setting up the anonymous file descriptor
+ */
+static void vtpm_proxy_fops_open(struct file *filp)
+{
+ struct proxy_dev *proxy_dev = filp->private_data;
+
+ proxy_dev->state |= STATE_OPENED_FLAG;
+}
+
+/**
+ * vtpm_proxy_fops_undo_open - counter-part to vtpm_fops_open
+ *
+ * Call to undo vtpm_proxy_fops_open
+ */
+static void vtpm_proxy_fops_undo_open(struct proxy_dev *proxy_dev)
+{
+ mutex_lock(&proxy_dev->buf_lock);
+
+ proxy_dev->state &= ~STATE_OPENED_FLAG;
+
+ mutex_unlock(&proxy_dev->buf_lock);
+
+ /* no more TPM responses -- wake up anyone waiting for them */
+ wake_up_interruptible(&proxy_dev->wq);
+}
+
+/*
+ * vtpm_proxy_fops_release: Close 'server side'
+ *
+ * Return value:
+ * Always returns 0.
+ */
+static int vtpm_proxy_fops_release(struct inode *inode, struct file *filp)
+{
+ struct proxy_dev *proxy_dev = filp->private_data;
+
+ filp->private_data = NULL;
+
+ vtpm_proxy_delete_device(proxy_dev);
+
+ return 0;
+}
+
+static const struct file_operations vtpm_proxy_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .read = vtpm_proxy_fops_read,
+ .write = vtpm_proxy_fops_write,
+ .poll = vtpm_proxy_fops_poll,
+ .release = vtpm_proxy_fops_release,
+};
+
+/*
+ * Functions invoked by the core TPM driver to send TPM commands to
+ * 'server side' and receive responses from there.
+ */
+
+/*
+ * Called when core TPM driver reads TPM responses from 'server side'
+ *
+ * Return value:
+ * Number of TPM response bytes read, negative error value otherwise
+ */
+static int vtpm_proxy_tpm_op_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev);
+ size_t len;
+
+ /* process gone ? */
+ mutex_lock(&proxy_dev->buf_lock);
+
+ if (!(proxy_dev->state & STATE_OPENED_FLAG)) {
+ mutex_unlock(&proxy_dev->buf_lock);
+ return -EPIPE;
+ }
+
+ len = proxy_dev->resp_len;
+ if (count < len) {
+ dev_err(&chip->dev,
+ "Invalid size in recv: count=%zd, resp_len=%zd\n",
+ count, len);
+ len = -EIO;
+ goto out;
+ }
+
+ memcpy(buf, proxy_dev->buffer, len);
+ proxy_dev->resp_len = 0;
+
+out:
+ mutex_unlock(&proxy_dev->buf_lock);
+
+ return len;
+}
+
+/*
+ * Called when core TPM driver forwards TPM requests to 'server side'.
+ *
+ * Return value:
+ * 0 in case of success, negative error value otherwise.
+ */
+static int vtpm_proxy_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev);
+ int rc = 0;
+
+ if (count > sizeof(proxy_dev->buffer)) {
+ dev_err(&chip->dev,
+ "Invalid size in send: count=%zd, buffer size=%zd\n",
+ count, sizeof(proxy_dev->buffer));
+ return -EIO;
+ }
+
+ mutex_lock(&proxy_dev->buf_lock);
+
+ if (!(proxy_dev->state & STATE_OPENED_FLAG)) {
+ mutex_unlock(&proxy_dev->buf_lock);
+ return -EPIPE;
+ }
+
+ proxy_dev->resp_len = 0;
+
+ proxy_dev->req_len = count;
+ memcpy(proxy_dev->buffer, buf, count);
+
+ proxy_dev->state &= ~STATE_WAIT_RESPONSE_FLAG;
+
+ mutex_unlock(&proxy_dev->buf_lock);
+
+ wake_up_interruptible(&proxy_dev->wq);
+
+ return rc;
+}
+
+static void vtpm_proxy_tpm_op_cancel(struct tpm_chip *chip)
+{
+ /* not supported */
+}
+
+static u8 vtpm_proxy_tpm_op_status(struct tpm_chip *chip)
+{
+ struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev);
+
+ if (proxy_dev->resp_len)
+ return VTPM_PROXY_REQ_COMPLETE_FLAG;
+
+ return 0;
+}
+
+static bool vtpm_proxy_tpm_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev);
+ bool ret;
+
+ mutex_lock(&proxy_dev->buf_lock);
+
+ ret = !(proxy_dev->state & STATE_OPENED_FLAG);
+
+ mutex_unlock(&proxy_dev->buf_lock);
+
+ return ret;
+}
+
+static const struct tpm_class_ops vtpm_proxy_tpm_ops = {
+ .flags = TPM_OPS_AUTO_STARTUP,
+ .recv = vtpm_proxy_tpm_op_recv,
+ .send = vtpm_proxy_tpm_op_send,
+ .cancel = vtpm_proxy_tpm_op_cancel,
+ .status = vtpm_proxy_tpm_op_status,
+ .req_complete_mask = VTPM_PROXY_REQ_COMPLETE_FLAG,
+ .req_complete_val = VTPM_PROXY_REQ_COMPLETE_FLAG,
+ .req_canceled = vtpm_proxy_tpm_req_canceled,
+};
+
+/*
+ * Code related to the startup of the TPM 2 and startup of TPM 1.2 +
+ * retrieval of timeouts and durations.
+ */
+
+static void vtpm_proxy_work(struct work_struct *work)
+{
+ struct proxy_dev *proxy_dev = container_of(work, struct proxy_dev,
+ work);
+ int rc;
+
+ rc = tpm_chip_register(proxy_dev->chip);
+ if (rc)
+ goto err;
+
+ return;
+
+err:
+ vtpm_proxy_fops_undo_open(proxy_dev);
+}
+
+/*
+ * vtpm_proxy_work_stop: make sure the work has finished
+ *
+ * This function is useful when user space closed the fd
+ * while the driver still determines timeouts.
+ */
+static void vtpm_proxy_work_stop(struct proxy_dev *proxy_dev)
+{
+ vtpm_proxy_fops_undo_open(proxy_dev);
+ flush_work(&proxy_dev->work);
+}
+
+/*
+ * vtpm_proxy_work_start: Schedule the work for TPM 1.2 & 2 initialization
+ */
+static inline void vtpm_proxy_work_start(struct proxy_dev *proxy_dev)
+{
+ queue_work(workqueue, &proxy_dev->work);
+}
+
+/*
+ * Code related to creation and deletion of device pairs
+ */
+static struct proxy_dev *vtpm_proxy_create_proxy_dev(void)
+{
+ struct proxy_dev *proxy_dev;
+ struct tpm_chip *chip;
+ int err;
+
+ proxy_dev = kzalloc(sizeof(*proxy_dev), GFP_KERNEL);
+ if (proxy_dev == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ init_waitqueue_head(&proxy_dev->wq);
+ mutex_init(&proxy_dev->buf_lock);
+ INIT_WORK(&proxy_dev->work, vtpm_proxy_work);
+
+ chip = tpm_chip_alloc(NULL, &vtpm_proxy_tpm_ops);
+ if (IS_ERR(chip)) {
+ err = PTR_ERR(chip);
+ goto err_proxy_dev_free;
+ }
+ dev_set_drvdata(&chip->dev, proxy_dev);
+
+ proxy_dev->chip = chip;
+
+ return proxy_dev;
+
+err_proxy_dev_free:
+ kfree(proxy_dev);
+
+ return ERR_PTR(err);
+}
+
+/*
+ * Undo what has been done in vtpm_create_proxy_dev
+ */
+static inline void vtpm_proxy_delete_proxy_dev(struct proxy_dev *proxy_dev)
+{
+ put_device(&proxy_dev->chip->dev); /* frees chip */
+ kfree(proxy_dev);
+}
+
+/*
+ * Create a /dev/tpm%d and 'server side' file descriptor pair
+ *
+ * Return value:
+ * Returns file pointer on success, an error value otherwise
+ */
+static struct file *vtpm_proxy_create_device(
+ struct vtpm_proxy_new_dev *vtpm_new_dev)
+{
+ struct proxy_dev *proxy_dev;
+ int rc, fd;
+ struct file *file;
+
+ if (vtpm_new_dev->flags & ~VTPM_PROXY_FLAGS_ALL)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ proxy_dev = vtpm_proxy_create_proxy_dev();
+ if (IS_ERR(proxy_dev))
+ return ERR_CAST(proxy_dev);
+
+ proxy_dev->flags = vtpm_new_dev->flags;
+
+ /* setup an anonymous file for the server-side */
+ fd = get_unused_fd_flags(O_RDWR);
+ if (fd < 0) {
+ rc = fd;
+ goto err_delete_proxy_dev;
+ }
+
+ file = anon_inode_getfile("[vtpms]", &vtpm_proxy_fops, proxy_dev,
+ O_RDWR);
+ if (IS_ERR(file)) {
+ rc = PTR_ERR(file);
+ goto err_put_unused_fd;
+ }
+
+ /* from now on we can unwind with put_unused_fd() + fput() */
+ /* simulate an open() on the server side */
+ vtpm_proxy_fops_open(file);
+
+ if (proxy_dev->flags & VTPM_PROXY_FLAG_TPM2)
+ proxy_dev->chip->flags |= TPM_CHIP_FLAG_TPM2;
+
+ vtpm_proxy_work_start(proxy_dev);
+
+ vtpm_new_dev->fd = fd;
+ vtpm_new_dev->major = MAJOR(proxy_dev->chip->dev.devt);
+ vtpm_new_dev->minor = MINOR(proxy_dev->chip->dev.devt);
+ vtpm_new_dev->tpm_num = proxy_dev->chip->dev_num;
+
+ return file;
+
+err_put_unused_fd:
+ put_unused_fd(fd);
+
+err_delete_proxy_dev:
+ vtpm_proxy_delete_proxy_dev(proxy_dev);
+
+ return ERR_PTR(rc);
+}
+
+/*
+ * Counter part to vtpm_create_device.
+ */
+static void vtpm_proxy_delete_device(struct proxy_dev *proxy_dev)
+{
+ vtpm_proxy_work_stop(proxy_dev);
+
+ /*
+ * A client may hold the 'ops' lock, so let it know that the server
+ * side shuts down before we try to grab the 'ops' lock when
+ * unregistering the chip.
+ */
+ vtpm_proxy_fops_undo_open(proxy_dev);
+
+ tpm_chip_unregister(proxy_dev->chip);
+
+ vtpm_proxy_delete_proxy_dev(proxy_dev);
+}
+
+/*
+ * Code related to the control device /dev/vtpmx
+ */
+
+/*
+ * vtpmx_fops_ioctl: ioctl on /dev/vtpmx
+ *
+ * Return value:
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static long vtpmx_fops_ioctl(struct file *f, unsigned int ioctl,
+ unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ struct vtpm_proxy_new_dev __user *vtpm_new_dev_p;
+ struct vtpm_proxy_new_dev vtpm_new_dev;
+ struct file *file;
+
+ switch (ioctl) {
+ case VTPM_PROXY_IOC_NEW_DEV:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ vtpm_new_dev_p = argp;
+ if (copy_from_user(&vtpm_new_dev, vtpm_new_dev_p,
+ sizeof(vtpm_new_dev)))
+ return -EFAULT;
+ file = vtpm_proxy_create_device(&vtpm_new_dev);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+ if (copy_to_user(vtpm_new_dev_p, &vtpm_new_dev,
+ sizeof(vtpm_new_dev))) {
+ put_unused_fd(vtpm_new_dev.fd);
+ fput(file);
+ return -EFAULT;
+ }
+
+ fd_install(vtpm_new_dev.fd, file);
+ return 0;
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+#ifdef CONFIG_COMPAT
+static long vtpmx_fops_compat_ioctl(struct file *f, unsigned int ioctl,
+ unsigned long arg)
+{
+ return vtpmx_fops_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
+}
+#endif
+
+static const struct file_operations vtpmx_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = vtpmx_fops_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = vtpmx_fops_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
+static struct miscdevice vtpmx_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "vtpmx",
+ .fops = &vtpmx_fops,
+};
+
+static int vtpmx_init(void)
+{
+ return misc_register(&vtpmx_miscdev);
+}
+
+static void vtpmx_cleanup(void)
+{
+ misc_deregister(&vtpmx_miscdev);
+}
+
+static int __init vtpm_module_init(void)
+{
+ int rc;
+
+ rc = vtpmx_init();
+ if (rc) {
+ pr_err("couldn't create vtpmx device\n");
+ return rc;
+ }
+
+ workqueue = create_workqueue("tpm-vtpm");
+ if (!workqueue) {
+ pr_err("couldn't create workqueue\n");
+ rc = -ENOMEM;
+ goto err_vtpmx_cleanup;
+ }
+
+ return 0;
+
+err_vtpmx_cleanup:
+ vtpmx_cleanup();
+
+ return rc;
+}
+
+static void __exit vtpm_module_exit(void)
+{
+ destroy_workqueue(workqueue);
+ vtpmx_cleanup();
+}
+
+module_init(vtpm_module_init);
+module_exit(vtpm_module_exit);
+
+MODULE_AUTHOR("Stefan Berger (stefanb@us.ibm.com)");
+MODULE_DESCRIPTION("vTPM Driver");
+MODULE_VERSION("0.1");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index 3111f2778..62028f483 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -28,6 +28,8 @@ struct tpm_private {
unsigned int evtchn;
int ring_ref;
domid_t backend_id;
+ int irq;
+ wait_queue_head_t read_queue;
};
enum status_bits {
@@ -39,7 +41,7 @@ enum status_bits {
static u8 vtpm_status(struct tpm_chip *chip)
{
- struct tpm_private *priv = TPM_VPRIV(chip);
+ struct tpm_private *priv = dev_get_drvdata(&chip->dev);
switch (priv->shr->state) {
case VTPM_STATE_IDLE:
return VTPM_STATUS_IDLE | VTPM_STATUS_CANCELED;
@@ -60,7 +62,7 @@ static bool vtpm_req_canceled(struct tpm_chip *chip, u8 status)
static void vtpm_cancel(struct tpm_chip *chip)
{
- struct tpm_private *priv = TPM_VPRIV(chip);
+ struct tpm_private *priv = dev_get_drvdata(&chip->dev);
priv->shr->state = VTPM_STATE_CANCEL;
wmb();
notify_remote_via_evtchn(priv->evtchn);
@@ -73,7 +75,7 @@ static unsigned int shr_data_offset(struct vtpm_shared_page *shr)
static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
{
- struct tpm_private *priv = TPM_VPRIV(chip);
+ struct tpm_private *priv = dev_get_drvdata(&chip->dev);
struct vtpm_shared_page *shr = priv->shr;
unsigned int offset = shr_data_offset(shr);
@@ -87,8 +89,8 @@ static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
return -EINVAL;
/* Wait for completion of any existing command or cancellation */
- if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, chip->vendor.timeout_c,
- &chip->vendor.read_queue, true) < 0) {
+ if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, chip->timeout_c,
+ &priv->read_queue, true) < 0) {
vtpm_cancel(chip);
return -ETIME;
}
@@ -104,7 +106,7 @@ static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
duration = tpm_calc_ordinal_duration(chip, ordinal);
if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, duration,
- &chip->vendor.read_queue, true) < 0) {
+ &priv->read_queue, true) < 0) {
/* got a signal or timeout, try to cancel */
vtpm_cancel(chip);
return -ETIME;
@@ -115,7 +117,7 @@ static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
{
- struct tpm_private *priv = TPM_VPRIV(chip);
+ struct tpm_private *priv = dev_get_drvdata(&chip->dev);
struct vtpm_shared_page *shr = priv->shr;
unsigned int offset = shr_data_offset(shr);
size_t length = shr->length;
@@ -124,8 +126,8 @@ static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
return -ECANCELED;
/* In theory the wait at the end of _send makes this one unnecessary */
- if (wait_for_tpm_stat(chip, VTPM_STATUS_RESULT, chip->vendor.timeout_c,
- &chip->vendor.read_queue, true) < 0) {
+ if (wait_for_tpm_stat(chip, VTPM_STATUS_RESULT, chip->timeout_c,
+ &priv->read_queue, true) < 0) {
vtpm_cancel(chip);
return -ETIME;
}
@@ -161,7 +163,7 @@ static irqreturn_t tpmif_interrupt(int dummy, void *dev_id)
switch (priv->shr->state) {
case VTPM_STATE_IDLE:
case VTPM_STATE_FINISH:
- wake_up_interruptible(&priv->chip->vendor.read_queue);
+ wake_up_interruptible(&priv->read_queue);
break;
case VTPM_STATE_SUBMIT:
case VTPM_STATE_CANCEL:
@@ -179,10 +181,10 @@ static int setup_chip(struct device *dev, struct tpm_private *priv)
if (IS_ERR(chip))
return PTR_ERR(chip);
- init_waitqueue_head(&chip->vendor.read_queue);
+ init_waitqueue_head(&priv->read_queue);
priv->chip = chip;
- TPM_VPRIV(chip) = priv;
+ dev_set_drvdata(&chip->dev, priv);
return 0;
}
@@ -217,7 +219,7 @@ static int setup_ring(struct xenbus_device *dev, struct tpm_private *priv)
xenbus_dev_fatal(dev, rv, "allocating TPM irq");
return rv;
}
- priv->chip->vendor.irq = rv;
+ priv->irq = rv;
again:
rv = xenbus_transaction_start(&xbt);
@@ -277,8 +279,8 @@ static void ring_free(struct tpm_private *priv)
else
free_page((unsigned long)priv->shr);
- if (priv->chip && priv->chip->vendor.irq)
- unbind_from_irqhandler(priv->chip->vendor.irq, priv);
+ if (priv->irq)
+ unbind_from_irqhandler(priv->irq, priv);
kfree(priv);
}
@@ -318,10 +320,10 @@ static int tpmfront_probe(struct xenbus_device *dev,
static int tpmfront_remove(struct xenbus_device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(&dev->dev);
- struct tpm_private *priv = TPM_VPRIV(chip);
+ struct tpm_private *priv = dev_get_drvdata(&chip->dev);
tpm_chip_unregister(chip);
ring_free(priv);
- TPM_VPRIV(chip) = NULL;
+ dev_set_drvdata(&chip->dev, NULL);
return 0;
}
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index d2406fe25..5da47e26a 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -165,6 +165,12 @@ struct ports_device {
*/
struct virtqueue *c_ivq, *c_ovq;
+ /*
+ * A control packet buffer for guest->host requests, protected
+ * by c_ovq_lock.
+ */
+ struct virtio_console_control cpkt;
+
/* Array of per-port IO virtqueues */
struct virtqueue **in_vqs, **out_vqs;
@@ -560,28 +566,29 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
unsigned int event, unsigned int value)
{
struct scatterlist sg[1];
- struct virtio_console_control cpkt;
struct virtqueue *vq;
unsigned int len;
if (!use_multiport(portdev))
return 0;
- cpkt.id = cpu_to_virtio32(portdev->vdev, port_id);
- cpkt.event = cpu_to_virtio16(portdev->vdev, event);
- cpkt.value = cpu_to_virtio16(portdev->vdev, value);
-
vq = portdev->c_ovq;
- sg_init_one(sg, &cpkt, sizeof(cpkt));
-
spin_lock(&portdev->c_ovq_lock);
- if (virtqueue_add_outbuf(vq, sg, 1, &cpkt, GFP_ATOMIC) == 0) {
+
+ portdev->cpkt.id = cpu_to_virtio32(portdev->vdev, port_id);
+ portdev->cpkt.event = cpu_to_virtio16(portdev->vdev, event);
+ portdev->cpkt.value = cpu_to_virtio16(portdev->vdev, value);
+
+ sg_init_one(sg, &portdev->cpkt, sizeof(struct virtio_console_control));
+
+ if (virtqueue_add_outbuf(vq, sg, 1, &portdev->cpkt, GFP_ATOMIC) == 0) {
virtqueue_kick(vq);
while (!virtqueue_get_buf(vq, &len)
&& !virtqueue_is_broken(vq))
cpu_relax();
}
+
spin_unlock(&portdev->c_ovq_lock);
return 0;
}
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 98efbfcdb..e2d9bd760 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -49,10 +49,10 @@ config COMMON_CLK_MAX77802
This driver supports Maxim 77802 crystal oscillator clock.
config COMMON_CLK_RK808
- tristate "Clock driver for RK808"
+ tristate "Clock driver for RK808/RK818"
depends on MFD_RK808
---help---
- This driver supports RK808 crystal oscillator clock. These
+ This driver supports RK808 and RK818 crystal oscillator clock. These
multi-function devices have two fixed-rate oscillators,
clocked at 32KHz each. Clkout1 is always on, Clkout2 can off
by control register.
@@ -203,16 +203,19 @@ config COMMON_CLK_PIC32
config COMMON_CLK_OXNAS
bool "Clock driver for the OXNAS SoC Family"
+ depends on ARCH_OXNAS || COMPILE_TEST
select MFD_SYSCON
---help---
Support for the OXNAS SoC Family clocks.
source "drivers/clk/bcm/Kconfig"
source "drivers/clk/hisilicon/Kconfig"
+source "drivers/clk/meson/Kconfig"
source "drivers/clk/mvebu/Kconfig"
source "drivers/clk/qcom/Kconfig"
source "drivers/clk/renesas/Kconfig"
source "drivers/clk/samsung/Kconfig"
+source "drivers/clk/sunxi-ng/Kconfig"
source "drivers/clk/tegra/Kconfig"
source "drivers/clk/ti/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index dcc5e698f..3b6f9cf34 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -16,13 +16,14 @@ obj-$(CONFIG_COMMON_CLK) += clk-conf.o
endif
# hardware specific clock types
-# please keep this section sorted lexicographically by file/directory path name
+# please keep this section sorted lexicographically by file path name
obj-$(CONFIG_MACH_ASM9260) += clk-asm9260.o
obj-$(CONFIG_COMMON_CLK_AXI_CLKGEN) += clk-axi-clkgen.o
obj-$(CONFIG_ARCH_AXXIA) += clk-axm5516.o
obj-$(CONFIG_COMMON_CLK_CDCE706) += clk-cdce706.o
-obj-$(CONFIG_COMMON_CLK_CS2000_CP) += clk-cs2000-cp.o
+obj-$(CONFIG_COMMON_CLK_CDCE925) += clk-cdce925.o
obj-$(CONFIG_ARCH_CLPS711X) += clk-clps711x.o
+obj-$(CONFIG_COMMON_CLK_CS2000_CP) += clk-cs2000-cp.o
obj-$(CONFIG_ARCH_EFM32) += clk-efm32gg.o
obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o
obj-$(CONFIG_MACH_LOONGSON32) += clk-ls1x.o
@@ -35,6 +36,7 @@ obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o
obj-$(CONFIG_ARCH_NSPIRE) += clk-nspire.o
obj-$(CONFIG_COMMON_CLK_OXNAS) += clk-oxnas.o
obj-$(CONFIG_COMMON_CLK_PALMAS) += clk-palmas.o
+obj-$(CONFIG_COMMON_CLK_PWM) += clk-pwm.o
obj-$(CONFIG_CLK_QORIQ) += clk-qoriq.o
obj-$(CONFIG_COMMON_CLK_RK808) += clk-rk808.o
obj-$(CONFIG_COMMON_CLK_S2MPS11) += clk-s2mps11.o
@@ -42,7 +44,6 @@ obj-$(CONFIG_COMMON_CLK_SCPI) += clk-scpi.o
obj-$(CONFIG_COMMON_CLK_SI5351) += clk-si5351.o
obj-$(CONFIG_COMMON_CLK_SI514) += clk-si514.o
obj-$(CONFIG_COMMON_CLK_SI570) += clk-si570.o
-obj-$(CONFIG_COMMON_CLK_CDCE925) += clk-cdce925.o
obj-$(CONFIG_ARCH_STM32) += clk-stm32f4.o
obj-$(CONFIG_ARCH_TANGO) += clk-tango4.o
obj-$(CONFIG_CLK_TWL6040) += clk-twl6040.o
@@ -50,35 +51,39 @@ obj-$(CONFIG_ARCH_U300) += clk-u300.o
obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o
obj-$(CONFIG_COMMON_CLK_WM831X) += clk-wm831x.o
obj-$(CONFIG_COMMON_CLK_XGENE) += clk-xgene.o
-obj-$(CONFIG_COMMON_CLK_PWM) += clk-pwm.o
+
+# please keep this section sorted lexicographically by directory path name
obj-$(CONFIG_COMMON_CLK_AT91) += at91/
obj-$(CONFIG_ARCH_ARTPEC) += axis/
+obj-$(CONFIG_ARC_PLAT_AXS10X) += axs10x/
obj-y += bcm/
obj-$(CONFIG_ARCH_BERLIN) += berlin/
+obj-$(CONFIG_H8300) += h8300/
obj-$(CONFIG_ARCH_HISI) += hisilicon/
obj-$(CONFIG_ARCH_MXC) += imx/
obj-$(CONFIG_MACH_INGENIC) += ingenic/
obj-$(CONFIG_COMMON_CLK_KEYSTONE) += keystone/
obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/
+obj-$(CONFIG_COMMON_CLK_AMLOGIC) += meson/
obj-$(CONFIG_MACH_PIC32) += microchip/
ifeq ($(CONFIG_COMMON_CLK), y)
obj-$(CONFIG_ARCH_MMP) += mmp/
endif
obj-y += mvebu/
-obj-$(CONFIG_ARCH_MESON) += meson/
obj-$(CONFIG_ARCH_MXS) += mxs/
-obj-$(CONFIG_MACH_PISTACHIO) += pistachio/
obj-$(CONFIG_COMMON_CLK_NXP) += nxp/
+obj-$(CONFIG_MACH_PISTACHIO) += pistachio/
obj-$(CONFIG_COMMON_CLK_PXA) += pxa/
obj-$(CONFIG_COMMON_CLK_QCOM) += qcom/
+obj-$(CONFIG_ARCH_RENESAS) += renesas/
obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
obj-$(CONFIG_COMMON_CLK_SAMSUNG) += samsung/
-obj-$(CONFIG_ARCH_RENESAS) += renesas/
obj-$(CONFIG_ARCH_SIRF) += sirf/
obj-$(CONFIG_ARCH_SOCFPGA) += socfpga/
obj-$(CONFIG_PLAT_SPEAR) += spear/
obj-$(CONFIG_ARCH_STI) += st/
obj-$(CONFIG_ARCH_SUNXI) += sunxi/
+obj-$(CONFIG_ARCH_SUNXI) += sunxi-ng/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-y += ti/
obj-$(CONFIG_ARCH_U8500) += ux500/
@@ -86,5 +91,3 @@ obj-$(CONFIG_COMMON_CLK_VERSATILE) += versatile/
obj-$(CONFIG_X86) += x86/
obj-$(CONFIG_ARCH_ZX) += zte/
obj-$(CONFIG_ARCH_ZYNQ) += zynq/
-obj-$(CONFIG_H8300) += h8300/
-obj-$(CONFIG_ARC_PLAT_AXS10X) += axs10x/
diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
index e1aa210dd..7f6bec883 100644
--- a/drivers/clk/at91/clk-generated.c
+++ b/drivers/clk/at91/clk-generated.c
@@ -267,7 +267,7 @@ at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock, const char
return clk;
}
-void __init of_sama5d2_clk_generated_setup(struct device_node *np)
+static void __init of_sama5d2_clk_generated_setup(struct device_node *np)
{
int num;
u32 id;
diff --git a/drivers/clk/bcm/clk-iproc-armpll.c b/drivers/clk/bcm/clk-iproc-armpll.c
index a196ee28a..d7d628214 100644
--- a/drivers/clk/bcm/clk-iproc-armpll.c
+++ b/drivers/clk/bcm/clk-iproc-armpll.c
@@ -20,6 +20,8 @@
#include <linux/clkdev.h>
#include <linux/of_address.h>
+#include "clk-iproc.h"
+
#define IPROC_CLK_MAX_FREQ_POLICY 0x3
#define IPROC_CLK_POLICY_FREQ_OFFSET 0x008
#define IPROC_CLK_POLICY_FREQ_POLICY_FREQ_SHIFT 8
@@ -242,7 +244,6 @@ static const struct clk_ops iproc_arm_pll_ops = {
void __init iproc_armpll_setup(struct device_node *node)
{
int ret;
- struct clk *clk;
struct iproc_arm_pll *pll;
struct clk_init_data init;
const char *parent_name;
@@ -263,18 +264,18 @@ void __init iproc_armpll_setup(struct device_node *node)
init.num_parents = (parent_name ? 1 : 0);
pll->hw.init = &init;
- clk = clk_register(NULL, &pll->hw);
- if (WARN_ON(IS_ERR(clk)))
+ ret = clk_hw_register(NULL, &pll->hw);
+ if (WARN_ON(ret))
goto err_iounmap;
- ret = of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, &pll->hw);
if (WARN_ON(ret))
goto err_clk_unregister;
return;
err_clk_unregister:
- clk_unregister(clk);
+ clk_hw_unregister(&pll->hw);
err_iounmap:
iounmap(pll->base);
err_free_pll:
diff --git a/drivers/clk/bcm/clk-iproc-asiu.c b/drivers/clk/bcm/clk-iproc-asiu.c
index f630e1bbd..4360e4813 100644
--- a/drivers/clk/bcm/clk-iproc-asiu.c
+++ b/drivers/clk/bcm/clk-iproc-asiu.c
@@ -37,7 +37,7 @@ struct iproc_asiu {
void __iomem *div_base;
void __iomem *gate_base;
- struct clk_onecell_data clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct iproc_asiu_clk *clks;
};
@@ -197,11 +197,11 @@ void __init iproc_asiu_setup(struct device_node *node,
if (WARN_ON(!asiu))
return;
- asiu->clk_data.clk_num = num_clks;
- asiu->clk_data.clks = kcalloc(num_clks, sizeof(*asiu->clk_data.clks),
- GFP_KERNEL);
- if (WARN_ON(!asiu->clk_data.clks))
+ asiu->clk_data = kzalloc(sizeof(*asiu->clk_data->hws) * num_clks +
+ sizeof(*asiu->clk_data), GFP_KERNEL);
+ if (WARN_ON(!asiu->clk_data))
goto err_clks;
+ asiu->clk_data->num = num_clks;
asiu->clks = kcalloc(num_clks, sizeof(*asiu->clks), GFP_KERNEL);
if (WARN_ON(!asiu->clks))
@@ -217,7 +217,6 @@ void __init iproc_asiu_setup(struct device_node *node,
for (i = 0; i < num_clks; i++) {
struct clk_init_data init;
- struct clk *clk;
const char *parent_name;
struct iproc_asiu_clk *asiu_clk;
const char *clk_name;
@@ -240,22 +239,22 @@ void __init iproc_asiu_setup(struct device_node *node,
init.num_parents = (parent_name ? 1 : 0);
asiu_clk->hw.init = &init;
- clk = clk_register(NULL, &asiu_clk->hw);
- if (WARN_ON(IS_ERR(clk)))
+ ret = clk_hw_register(NULL, &asiu_clk->hw);
+ if (WARN_ON(ret))
goto err_clk_register;
- asiu->clk_data.clks[i] = clk;
+ asiu->clk_data->hws[i] = &asiu_clk->hw;
}
- ret = of_clk_add_provider(node, of_clk_src_onecell_get,
- &asiu->clk_data);
+ ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
+ asiu->clk_data);
if (WARN_ON(ret))
goto err_clk_register;
return;
err_clk_register:
- for (i = 0; i < num_clks; i++)
- clk_unregister(asiu->clk_data.clks[i]);
+ while (--i >= 0)
+ clk_hw_unregister(asiu->clk_data->hws[i]);
iounmap(asiu->gate_base);
err_iomap_gate:
@@ -265,7 +264,7 @@ err_iomap_div:
kfree(asiu->clks);
err_asiu_clks:
- kfree(asiu->clk_data.clks);
+ kfree(asiu->clk_data);
err_clks:
kfree(asiu);
diff --git a/drivers/clk/bcm/clk-iproc-pll.c b/drivers/clk/bcm/clk-iproc-pll.c
index fd492a5da..e04634c46 100644
--- a/drivers/clk/bcm/clk-iproc-pll.c
+++ b/drivers/clk/bcm/clk-iproc-pll.c
@@ -89,7 +89,7 @@ struct iproc_pll {
const struct iproc_pll_vco_param *vco_param;
unsigned int num_vco_entries;
- struct clk_onecell_data clk_data;
+ struct clk_hw_onecell_data *clk_data;
struct iproc_clk *clks;
};
@@ -625,7 +625,6 @@ void __init iproc_pll_clk_setup(struct device_node *node,
unsigned int num_clks)
{
int i, ret;
- struct clk *clk;
struct iproc_pll *pll;
struct iproc_clk *iclk;
struct clk_init_data init;
@@ -638,11 +637,11 @@ void __init iproc_pll_clk_setup(struct device_node *node,
if (WARN_ON(!pll))
return;
- pll->clk_data.clk_num = num_clks;
- pll->clk_data.clks = kcalloc(num_clks, sizeof(*pll->clk_data.clks),
- GFP_KERNEL);
- if (WARN_ON(!pll->clk_data.clks))
+ pll->clk_data = kzalloc(sizeof(*pll->clk_data->hws) * num_clks +
+ sizeof(*pll->clk_data), GFP_KERNEL);
+ if (WARN_ON(!pll->clk_data))
goto err_clk_data;
+ pll->clk_data->num = num_clks;
pll->clks = kcalloc(num_clks, sizeof(*pll->clks), GFP_KERNEL);
if (WARN_ON(!pll->clks))
@@ -694,11 +693,11 @@ void __init iproc_pll_clk_setup(struct device_node *node,
iproc_pll_sw_cfg(pll);
- clk = clk_register(NULL, &iclk->hw);
- if (WARN_ON(IS_ERR(clk)))
+ ret = clk_hw_register(NULL, &iclk->hw);
+ if (WARN_ON(ret))
goto err_pll_register;
- pll->clk_data.clks[0] = clk;
+ pll->clk_data->hws[0] = &iclk->hw;
/* now initialize and register all leaf clocks */
for (i = 1; i < num_clks; i++) {
@@ -724,22 +723,23 @@ void __init iproc_pll_clk_setup(struct device_node *node,
init.num_parents = (parent_name ? 1 : 0);
iclk->hw.init = &init;
- clk = clk_register(NULL, &iclk->hw);
- if (WARN_ON(IS_ERR(clk)))
+ ret = clk_hw_register(NULL, &iclk->hw);
+ if (WARN_ON(ret))
goto err_clk_register;
- pll->clk_data.clks[i] = clk;
+ pll->clk_data->hws[i] = &iclk->hw;
}
- ret = of_clk_add_provider(node, of_clk_src_onecell_get, &pll->clk_data);
+ ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
+ pll->clk_data);
if (WARN_ON(ret))
goto err_clk_register;
return;
err_clk_register:
- for (i = 0; i < num_clks; i++)
- clk_unregister(pll->clk_data.clks[i]);
+ while (--i >= 0)
+ clk_hw_unregister(pll->clk_data->hws[i]);
err_pll_register:
if (pll->status_base != pll->control_base)
@@ -759,7 +759,7 @@ err_pll_iomap:
kfree(pll->clks);
err_clks:
- kfree(pll->clk_data.clks);
+ kfree(pll->clk_data);
err_clk_data:
kfree(pll);
diff --git a/drivers/clk/clk-clps711x.c b/drivers/clk/clk-clps711x.c
index 1f60b0241..adaf109f2 100644
--- a/drivers/clk/clk-clps711x.c
+++ b/drivers/clk/clk-clps711x.c
@@ -184,5 +184,5 @@ static void __init clps711x_clk_init_dt(struct device_node *np)
of_clk_add_provider(np, of_clk_src_onecell_get,
&clps711x_clk->clk_data);
}
-CLK_OF_DECLARE(clps711x, "cirrus,clps711x-clk", clps711x_clk_init_dt);
+CLK_OF_DECLARE(clps711x, "cirrus,ep7209-clk", clps711x_clk_init_dt);
#endif
diff --git a/drivers/clk/clk-conf.c b/drivers/clk/clk-conf.c
index 43a218f35..674785d96 100644
--- a/drivers/clk/clk-conf.c
+++ b/drivers/clk/clk-conf.c
@@ -55,7 +55,7 @@ static int __set_clk_parents(struct device_node *node, bool clk_supplier)
}
clk = of_clk_get_from_provider(&clkspec);
if (IS_ERR(clk)) {
- pr_warn("clk: couldn't get parent clock %d for %s\n",
+ pr_warn("clk: couldn't get assigned clock %d for %s\n",
index, node->full_name);
rc = PTR_ERR(clk);
goto err;
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index 75cd6c792..4db3be214 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -142,6 +142,11 @@ void clk_hw_unregister_fixed_factor(struct clk_hw *hw)
EXPORT_SYMBOL_GPL(clk_hw_unregister_fixed_factor);
#ifdef CONFIG_OF
+static const struct of_device_id set_rate_parent_matches[] = {
+ { .compatible = "allwinner,sun4i-a10-pll3-2x-clk" },
+ { /* Sentinel */ },
+};
+
/**
* of_fixed_factor_clk_setup() - Setup function for simple fixed factor clock
*/
@@ -150,6 +155,7 @@ void __init of_fixed_factor_clk_setup(struct device_node *node)
struct clk *clk;
const char *clk_name = node->name;
const char *parent_name;
+ unsigned long flags = 0;
u32 div, mult;
if (of_property_read_u32(node, "clock-div", &div)) {
@@ -167,7 +173,10 @@ void __init of_fixed_factor_clk_setup(struct device_node *node)
of_property_read_string(node, "clock-output-names", &clk_name);
parent_name = of_clk_get_parent_name(node, 0);
- clk = clk_register_fixed_factor(NULL, clk_name, parent_name, 0,
+ if (of_match_node(set_rate_parent_matches, node))
+ flags |= CLK_SET_RATE_PARENT;
+
+ clk = clk_register_fixed_factor(NULL, clk_name, parent_name, flags,
mult, div);
if (!IS_ERR(clk))
of_clk_add_provider(node, of_clk_src_simple_get, clk);
diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
index 8e4453eb5..2edb39342 100644
--- a/drivers/clk/clk-fixed-rate.c
+++ b/drivers/clk/clk-fixed-rate.c
@@ -145,6 +145,17 @@ void clk_unregister_fixed_rate(struct clk *clk)
}
EXPORT_SYMBOL_GPL(clk_unregister_fixed_rate);
+void clk_hw_unregister_fixed_rate(struct clk_hw *hw)
+{
+ struct clk_fixed_rate *fixed;
+
+ fixed = to_clk_fixed_rate(hw);
+
+ clk_hw_unregister(hw);
+ kfree(fixed);
+}
+EXPORT_SYMBOL_GPL(clk_hw_unregister_fixed_rate);
+
#ifdef CONFIG_OF
/**
* of_fixed_clk_setup() - Setup function for simple fixed rate clock
diff --git a/drivers/clk/clk-highbank.c b/drivers/clk/clk-highbank.c
index be3a21abb..727ed8e1b 100644
--- a/drivers/clk/clk-highbank.c
+++ b/drivers/clk/clk-highbank.c
@@ -275,7 +275,6 @@ static const struct clk_ops periclk_ops = {
static __init struct clk *hb_clk_init(struct device_node *node, const struct clk_ops *ops)
{
u32 reg;
- struct clk *clk;
struct hb_clk *hb_clk;
const char *clk_name = node->name;
const char *parent_name;
@@ -308,13 +307,13 @@ static __init struct clk *hb_clk_init(struct device_node *node, const struct clk
hb_clk->hw.init = &init;
- clk = clk_register(NULL, &hb_clk->hw);
- if (WARN_ON(IS_ERR(clk))) {
+ rc = clk_hw_register(NULL, &hb_clk->hw);
+ if (WARN_ON(rc)) {
kfree(hb_clk);
return NULL;
}
- rc = of_clk_add_provider(node, of_clk_src_simple_get, clk);
- return clk;
+ rc = of_clk_add_hw_provider(node, of_clk_hw_simple_get, &hb_clk->hw);
+ return hb_clk->hw.clk;
}
static void __init hb_pll_init(struct device_node *node)
diff --git a/drivers/clk/clk-multiplier.c b/drivers/clk/clk-multiplier.c
index 9e449c7b7..dc037c957 100644
--- a/drivers/clk/clk-multiplier.c
+++ b/drivers/clk/clk-multiplier.c
@@ -52,14 +52,28 @@ static unsigned long __bestmult(struct clk_hw *hw, unsigned long rate,
unsigned long *best_parent_rate,
u8 width, unsigned long flags)
{
+ struct clk_multiplier *mult = to_clk_multiplier(hw);
unsigned long orig_parent_rate = *best_parent_rate;
unsigned long parent_rate, current_rate, best_rate = ~0;
unsigned int i, bestmult = 0;
+ unsigned int maxmult = (1 << width) - 1;
+
+ if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
+ bestmult = rate / orig_parent_rate;
+
+ /* Make sure we don't end up with a 0 multiplier */
+ if ((bestmult == 0) &&
+ !(mult->flags & CLK_MULTIPLIER_ZERO_BYPASS))
+ bestmult = 1;
- if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT))
- return rate / *best_parent_rate;
+ /* Make sure we don't overflow the multiplier */
+ if (bestmult > maxmult)
+ bestmult = maxmult;
+
+ return bestmult;
+ }
- for (i = 1; i < ((1 << width) - 1); i++) {
+ for (i = 1; i < maxmult; i++) {
if (rate == orig_parent_rate * i) {
/*
* This is the best case for us if we have a
diff --git a/drivers/clk/clk-nomadik.c b/drivers/clk/clk-nomadik.c
index e4d8a991c..71677eb12 100644
--- a/drivers/clk/clk-nomadik.c
+++ b/drivers/clk/clk-nomadik.c
@@ -253,11 +253,11 @@ static const struct clk_ops pll_clk_ops = {
.recalc_rate = pll_clk_recalc_rate,
};
-static struct clk * __init
+static struct clk_hw * __init
pll_clk_register(struct device *dev, const char *name,
const char *parent_name, u32 id)
{
- struct clk *clk;
+ int ret;
struct clk_pll *pll;
struct clk_init_data init;
@@ -281,11 +281,13 @@ pll_clk_register(struct device *dev, const char *name,
pr_debug("register PLL1 clock \"%s\"\n", name);
- clk = clk_register(dev, &pll->hw);
- if (IS_ERR(clk))
+ ret = clk_hw_register(dev, &pll->hw);
+ if (ret) {
kfree(pll);
+ return ERR_PTR(ret);
+ }
- return clk;
+ return &pll->hw;
}
/*
@@ -345,11 +347,11 @@ static const struct clk_ops src_clk_ops = {
.recalc_rate = src_clk_recalc_rate,
};
-static struct clk * __init
+static struct clk_hw * __init
src_clk_register(struct device *dev, const char *name,
const char *parent_name, u8 id)
{
- struct clk *clk;
+ int ret;
struct clk_src *sclk;
struct clk_init_data init;
@@ -376,11 +378,13 @@ src_clk_register(struct device *dev, const char *name,
pr_debug("register clock \"%s\" ID: %d group: %d bits: %08x\n",
name, id, sclk->group1, sclk->clkbit);
- clk = clk_register(dev, &sclk->hw);
- if (IS_ERR(clk))
+ ret = clk_hw_register(dev, &sclk->hw);
+ if (ret) {
kfree(sclk);
+ return ERR_PTR(ret);
+ }
- return clk;
+ return &sclk->hw;
}
#ifdef CONFIG_DEBUG_FS
@@ -508,7 +512,7 @@ device_initcall(nomadik_src_clk_init_debugfs);
static void __init of_nomadik_pll_setup(struct device_node *np)
{
- struct clk *clk = ERR_PTR(-EINVAL);
+ struct clk_hw *hw;
const char *clk_name = np->name;
const char *parent_name;
u32 pll_id;
@@ -522,16 +526,16 @@ static void __init of_nomadik_pll_setup(struct device_node *np)
return;
}
parent_name = of_clk_get_parent_name(np, 0);
- clk = pll_clk_register(NULL, clk_name, parent_name, pll_id);
- if (!IS_ERR(clk))
- of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ hw = pll_clk_register(NULL, clk_name, parent_name, pll_id);
+ if (!IS_ERR(hw))
+ of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
}
CLK_OF_DECLARE(nomadik_pll_clk,
"st,nomadik-pll-clock", of_nomadik_pll_setup);
static void __init of_nomadik_hclk_setup(struct device_node *np)
{
- struct clk *clk = ERR_PTR(-EINVAL);
+ struct clk_hw *hw;
const char *clk_name = np->name;
const char *parent_name;
@@ -542,20 +546,20 @@ static void __init of_nomadik_hclk_setup(struct device_node *np)
/*
* The HCLK divides PLL1 with 1 (passthru), 2, 3 or 4.
*/
- clk = clk_register_divider(NULL, clk_name, parent_name,
+ hw = clk_hw_register_divider(NULL, clk_name, parent_name,
0, src_base + SRC_CR,
13, 2,
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
&src_lock);
- if (!IS_ERR(clk))
- of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ if (!IS_ERR(hw))
+ of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
}
CLK_OF_DECLARE(nomadik_hclk_clk,
"st,nomadik-hclk-clock", of_nomadik_hclk_setup);
static void __init of_nomadik_src_clk_setup(struct device_node *np)
{
- struct clk *clk = ERR_PTR(-EINVAL);
+ struct clk_hw *hw;
const char *clk_name = np->name;
const char *parent_name;
u32 clk_id;
@@ -569,9 +573,9 @@ static void __init of_nomadik_src_clk_setup(struct device_node *np)
return;
}
parent_name = of_clk_get_parent_name(np, 0);
- clk = src_clk_register(NULL, clk_name, parent_name, clk_id);
- if (!IS_ERR(clk))
- of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ hw = src_clk_register(NULL, clk_name, parent_name, clk_id);
+ if (!IS_ERR(hw))
+ of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
}
CLK_OF_DECLARE(nomadik_src_clk,
"st,nomadik-src-clock", of_nomadik_src_clk_setup);
diff --git a/drivers/clk/clk-oxnas.c b/drivers/clk/clk-oxnas.c
index 79bcb2e42..47649ac5d 100644
--- a/drivers/clk/clk-oxnas.c
+++ b/drivers/clk/clk-oxnas.c
@@ -18,7 +18,7 @@
#include <linux/clk-provider.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/stringify.h>
@@ -170,26 +170,17 @@ static int oxnas_stdclk_probe(struct platform_device *pdev)
clk_oxnas->onecell_data);
}
-static int oxnas_stdclk_remove(struct platform_device *pdev)
-{
- of_clk_del_provider(pdev->dev.of_node);
-
- return 0;
-}
-
static const struct of_device_id oxnas_stdclk_dt_ids[] = {
{ .compatible = "oxsemi,ox810se-stdclk" },
{ }
};
-MODULE_DEVICE_TABLE(of, oxnas_stdclk_dt_ids);
static struct platform_driver oxnas_stdclk_driver = {
.probe = oxnas_stdclk_probe,
- .remove = oxnas_stdclk_remove,
.driver = {
.name = "oxnas-stdclk",
+ .suppress_bind_attrs = true,
.of_match_table = oxnas_stdclk_dt_ids,
},
};
-
-module_platform_driver(oxnas_stdclk_driver);
+builtin_platform_driver(oxnas_stdclk_driver);
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index f8c83977c..fbaa84a33 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -137,7 +137,7 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
{
struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
struct s2mps11_clk *s2mps11_clks;
- struct clk_onecell_data *clk_data;
+ struct clk_hw_onecell_data *clk_data;
unsigned int s2mps11_reg;
int i, ret = 0;
enum sec_device_type hwid = platform_get_device_id(pdev)->driver_data;
@@ -147,15 +147,12 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
if (!s2mps11_clks)
return -ENOMEM;
- clk_data = devm_kzalloc(&pdev->dev, sizeof(*clk_data), GFP_KERNEL);
+ clk_data = devm_kzalloc(&pdev->dev, sizeof(*clk_data) +
+ sizeof(*clk_data->hws) * S2MPS11_CLKS_NUM,
+ GFP_KERNEL);
if (!clk_data)
return -ENOMEM;
- clk_data->clks = devm_kcalloc(&pdev->dev, S2MPS11_CLKS_NUM,
- sizeof(struct clk *), GFP_KERNEL);
- if (!clk_data->clks)
- return -ENOMEM;
-
switch (hwid) {
case S2MPS11X:
s2mps11_reg = S2MPS11_REG_RTC_CTRL;
@@ -196,18 +193,18 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
goto err_reg;
}
- s2mps11_clks[i].lookup = clkdev_create(s2mps11_clks[i].clk,
+ s2mps11_clks[i].lookup = clkdev_hw_create(&s2mps11_clks[i].hw,
s2mps11_clks_init[i].name, NULL);
if (!s2mps11_clks[i].lookup) {
ret = -ENOMEM;
goto err_reg;
}
- clk_data->clks[i] = s2mps11_clks[i].clk;
+ clk_data->hws[i] = &s2mps11_clks[i].hw;
}
- clk_data->clk_num = S2MPS11_CLKS_NUM;
- of_clk_add_provider(s2mps11_clks->clk_np, of_clk_src_onecell_get,
- clk_data);
+ clk_data->num = S2MPS11_CLKS_NUM;
+ of_clk_add_hw_provider(s2mps11_clks->clk_np, of_clk_hw_onecell_get,
+ clk_data);
platform_set_drvdata(pdev, s2mps11_clks);
diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c
index fd89e7711..02d681008 100644
--- a/drivers/clk/clk-stm32f4.c
+++ b/drivers/clk/clk-stm32f4.c
@@ -136,7 +136,7 @@ static const u64 stm32f42xx_gate_map[] = { 0x000000f17ef417ffull,
0x0000000000000001ull,
0x04777f33f6fec9ffull };
-static struct clk *clks[MAX_CLKS];
+static struct clk_hw *clks[MAX_CLKS];
static DEFINE_SPINLOCK(stm32f4_clk_lock);
static void __iomem *base;
@@ -281,7 +281,7 @@ static int stm32f4_rcc_lookup_clk_idx(u8 primary, u8 secondary)
(BIT_ULL_WORD(secondary) >= 2 ? hweight64(table[2]) : 0);
}
-static struct clk *
+static struct clk_hw *
stm32f4_rcc_lookup_clk(struct of_phandle_args *clkspec, void *data)
{
int i = stm32f4_rcc_lookup_clk_idx(clkspec->args[0], clkspec->args[1]);
@@ -346,9 +346,9 @@ static void __init stm32f4_rcc_init(struct device_node *np)
clk_register_apb_mul(NULL, "apb2_mul", "apb2_div",
CLK_SET_RATE_PARENT, 15);
- clks[SYSTICK] = clk_register_fixed_factor(NULL, "systick", "ahb_div",
+ clks[SYSTICK] = clk_hw_register_fixed_factor(NULL, "systick", "ahb_div",
0, 1, 8);
- clks[FCLK] = clk_register_fixed_factor(NULL, "fclk", "ahb_div",
+ clks[FCLK] = clk_hw_register_fixed_factor(NULL, "fclk", "ahb_div",
0, 1, 1);
for (n = 0; n < ARRAY_SIZE(stm32f4_gates); n++) {
@@ -360,18 +360,18 @@ static void __init stm32f4_rcc_init(struct device_node *np)
if (idx < 0)
goto fail;
- clks[idx] = clk_register_gate(
+ clks[idx] = clk_hw_register_gate(
NULL, gd->name, gd->parent_name, gd->flags,
base + gd->offset, gd->bit_idx, 0, &stm32f4_clk_lock);
- if (IS_ERR(clks[n])) {
+ if (IS_ERR(clks[idx])) {
pr_err("%s: Unable to register leaf clock %s\n",
np->full_name, gd->name);
goto fail;
}
}
- of_clk_add_provider(np, stm32f4_rcc_lookup_clk, NULL);
+ of_clk_add_hw_provider(np, stm32f4_rcc_lookup_clk, NULL);
return;
fail:
iounmap(base);
diff --git a/drivers/clk/clk-u300.c b/drivers/clk/clk-u300.c
index 95d1742da..ec8aafda6 100644
--- a/drivers/clk/clk-u300.c
+++ b/drivers/clk/clk-u300.c
@@ -689,7 +689,7 @@ static const struct clk_ops syscon_clk_ops = {
.set_rate = syscon_clk_set_rate,
};
-static struct clk * __init
+static struct clk_hw * __init
syscon_clk_register(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
bool hw_ctrld,
@@ -697,9 +697,10 @@ syscon_clk_register(struct device *dev, const char *name,
void __iomem *en_reg, u8 en_bit,
u16 clk_val)
{
- struct clk *clk;
+ struct clk_hw *hw;
struct clk_syscon *sclk;
struct clk_init_data init;
+ int ret;
sclk = kzalloc(sizeof(struct clk_syscon), GFP_KERNEL);
if (!sclk) {
@@ -722,11 +723,14 @@ syscon_clk_register(struct device *dev, const char *name,
sclk->en_bit = en_bit;
sclk->clk_val = clk_val;
- clk = clk_register(dev, &sclk->hw);
- if (IS_ERR(clk))
+ hw = &sclk->hw;
+ ret = clk_hw_register(dev, hw);
+ if (ret) {
kfree(sclk);
+ hw = ERR_PTR(ret);
+ }
- return clk;
+ return hw;
}
#define U300_CLK_TYPE_SLOW 0
@@ -868,7 +872,7 @@ static struct u300_clock const u300_clk_lookup[] __initconst = {
static void __init of_u300_syscon_clk_init(struct device_node *np)
{
- struct clk *clk = ERR_PTR(-EINVAL);
+ struct clk_hw *hw = ERR_PTR(-EINVAL);
const char *clk_name = np->name;
const char *parent_name;
void __iomem *res_reg;
@@ -911,16 +915,15 @@ static void __init of_u300_syscon_clk_init(struct device_node *np)
const struct u300_clock *u3clk = &u300_clk_lookup[i];
if (u3clk->type == clk_type && u3clk->id == clk_id)
- clk = syscon_clk_register(NULL,
- clk_name, parent_name,
- 0, u3clk->hw_ctrld,
- res_reg, u3clk->id,
- en_reg, u3clk->id,
- u3clk->clk_val);
+ hw = syscon_clk_register(NULL, clk_name, parent_name,
+ 0, u3clk->hw_ctrld,
+ res_reg, u3clk->id,
+ en_reg, u3clk->id,
+ u3clk->clk_val);
}
- if (!IS_ERR(clk)) {
- of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ if (!IS_ERR(hw)) {
+ of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
/*
* Some few system clocks - device tree does not
@@ -928,11 +931,11 @@ static void __init of_u300_syscon_clk_init(struct device_node *np)
* for now we add these three clocks here.
*/
if (clk_type == U300_CLK_TYPE_REST && clk_id == 5)
- clk_register_clkdev(clk, NULL, "pl172");
+ clk_hw_register_clkdev(hw, NULL, "pl172");
if (clk_type == U300_CLK_TYPE_REST && clk_id == 9)
- clk_register_clkdev(clk, NULL, "semi");
+ clk_hw_register_clkdev(hw, NULL, "semi");
if (clk_type == U300_CLK_TYPE_REST && clk_id == 12)
- clk_register_clkdev(clk, NULL, "intcon");
+ clk_hw_register_clkdev(hw, NULL, "intcon");
}
}
@@ -1111,13 +1114,14 @@ static const struct clk_ops mclk_ops = {
.set_rate = mclk_clk_set_rate,
};
-static struct clk * __init
+static struct clk_hw * __init
mclk_clk_register(struct device *dev, const char *name,
const char *parent_name, bool is_mspro)
{
- struct clk *clk;
+ struct clk_hw *hw;
struct clk_mclk *mclk;
struct clk_init_data init;
+ int ret;
mclk = kzalloc(sizeof(struct clk_mclk), GFP_KERNEL);
if (!mclk) {
@@ -1133,23 +1137,26 @@ mclk_clk_register(struct device *dev, const char *name,
mclk->hw.init = &init;
mclk->is_mspro = is_mspro;
- clk = clk_register(dev, &mclk->hw);
- if (IS_ERR(clk))
+ hw = &mclk->hw;
+ ret = clk_hw_register(dev, hw);
+ if (ret) {
kfree(mclk);
+ hw = ERR_PTR(ret);
+ }
- return clk;
+ return hw;
}
static void __init of_u300_syscon_mclk_init(struct device_node *np)
{
- struct clk *clk = ERR_PTR(-EINVAL);
+ struct clk_hw *hw;
const char *clk_name = np->name;
const char *parent_name;
parent_name = of_clk_get_parent_name(np, 0);
- clk = mclk_clk_register(NULL, clk_name, parent_name, false);
- if (!IS_ERR(clk))
- of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ hw = mclk_clk_register(NULL, clk_name, parent_name, false);
+ if (!IS_ERR(hw))
+ of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
}
static const struct of_device_id u300_clk_match[] __initconst = {
diff --git a/drivers/clk/clk-vt8500.c b/drivers/clk/clk-vt8500.c
index b0f76a84f..37368a399 100644
--- a/drivers/clk/clk-vt8500.c
+++ b/drivers/clk/clk-vt8500.c
@@ -383,51 +383,49 @@ static int vt8500_find_pll_bits(unsigned long rate, unsigned long parent_rate,
return 0;
}
-static int wm8650_find_pll_bits(unsigned long rate, unsigned long parent_rate,
- u32 *multiplier, u32 *divisor1, u32 *divisor2)
+/*
+ * M * parent [O1] => / P [O2] => / D [O3]
+ * Where O1 is 900MHz...3GHz;
+ * O2 is 600MHz >= (M * parent) / P >= 300MHz;
+ * M is 36...120 [25MHz parent]; D is 1 or 2 or 4 or 8.
+ * Possible ranges (O3):
+ * D = 8: 37,5MHz...75MHz
+ * D = 4: 75MHz...150MHz
+ * D = 2: 150MHz...300MHz
+ * D = 1: 300MHz...600MHz
+ */
+static int wm8650_find_pll_bits(unsigned long rate,
+ unsigned long parent_rate, u32 *multiplier, u32 *divisor1,
+ u32 *divisor2)
{
- u32 mul, div1;
- int div2;
- u32 best_mul, best_div1, best_div2;
- unsigned long tclk, rate_err, best_err;
-
- best_err = (unsigned long)-1;
+ unsigned long O1, min_err, rate_err;
- /* Find the closest match (lower or equal to requested) */
- for (div1 = 5; div1 >= 3; div1--)
- for (div2 = 3; div2 >= 0; div2--)
- for (mul = 3; mul <= 1023; mul++) {
- tclk = parent_rate * mul / (div1 * (1 << div2));
- if (tclk > rate)
- continue;
- /* error will always be +ve */
- rate_err = rate - tclk;
- if (rate_err == 0) {
- *multiplier = mul;
- *divisor1 = div1;
- *divisor2 = div2;
- return 0;
- }
+ if (!parent_rate || (rate < 37500000) || (rate > 600000000))
+ return -EINVAL;
- if (rate_err < best_err) {
- best_err = rate_err;
- best_mul = mul;
- best_div1 = div1;
- best_div2 = div2;
- }
- }
+ *divisor2 = rate <= 75000000 ? 3 : rate <= 150000000 ? 2 :
+ rate <= 300000000 ? 1 : 0;
+ /*
+ * Divisor P cannot be calculated. Test all divisors and find where M
+ * will be as close as possible to the requested rate.
+ */
+ min_err = ULONG_MAX;
+ for (*divisor1 = 5; *divisor1 >= 3; (*divisor1)--) {
+ O1 = rate * *divisor1 * (1 << (*divisor2));
+ rate_err = O1 % parent_rate;
+ if (rate_err < min_err) {
+ *multiplier = O1 / parent_rate;
+ if (rate_err == 0)
+ return 0;
+
+ min_err = rate_err;
+ }
+ }
- if (best_err == (unsigned long)-1) {
- pr_warn("%s: impossible rate %lu\n", __func__, rate);
+ if ((*multiplier < 3) || (*multiplier > 1023))
return -EINVAL;
- }
- /* if we got here, it wasn't an exact match */
- pr_warn("%s: requested rate %lu, found rate %lu\n", __func__, rate,
- rate - best_err);
- *multiplier = best_mul;
- *divisor1 = best_div1;
- *divisor2 = best_div2;
+ pr_warn("%s: rate error is %lu\n", __func__, min_err);
return 0;
}
@@ -464,7 +462,6 @@ static int wm8750_find_pll_bits(unsigned long rate, unsigned long parent_rate,
{
u32 mul;
int div1, div2;
- u32 best_mul, best_div1, best_div2;
unsigned long tclk, rate_err, best_err;
best_err = (unsigned long)-1;
@@ -488,9 +485,9 @@ static int wm8750_find_pll_bits(unsigned long rate, unsigned long parent_rate,
if (rate_err < best_err) {
best_err = rate_err;
- best_mul = mul;
- best_div1 = div1;
- best_div2 = div2;
+ *multiplier = mul;
+ *divisor1 = div1;
+ *divisor2 = div2;
}
}
@@ -503,10 +500,7 @@ static int wm8750_find_pll_bits(unsigned long rate, unsigned long parent_rate,
pr_warn("%s: requested rate %lu, found rate %lu\n", __func__, rate,
rate - best_err);
- *filter = wm8750_get_filter(parent_rate, best_div1);
- *multiplier = best_mul;
- *divisor1 = best_div1;
- *divisor2 = best_div2;
+ *filter = wm8750_get_filter(parent_rate, *divisor1);
return 0;
}
@@ -516,7 +510,6 @@ static int wm8850_find_pll_bits(unsigned long rate, unsigned long parent_rate,
{
u32 mul;
int div1, div2;
- u32 best_mul, best_div1, best_div2;
unsigned long tclk, rate_err, best_err;
best_err = (unsigned long)-1;
@@ -540,9 +533,9 @@ static int wm8850_find_pll_bits(unsigned long rate, unsigned long parent_rate,
if (rate_err < best_err) {
best_err = rate_err;
- best_mul = mul;
- best_div1 = div1;
- best_div2 = div2;
+ *multiplier = mul;
+ *divisor1 = div1;
+ *divisor2 = div2;
}
}
@@ -555,10 +548,6 @@ static int wm8850_find_pll_bits(unsigned long rate, unsigned long parent_rate,
pr_warn("%s: requested rate %lu, found rate %lu\n", __func__, rate,
rate - best_err);
- *multiplier = best_mul;
- *divisor1 = best_div1;
- *divisor2 = best_div2;
-
return 0;
}
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index d584004f7..820a939fb 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -172,104 +172,6 @@ static bool clk_core_is_enabled(struct clk_core *core)
return core->ops->is_enabled(core->hw);
}
-static void clk_unprepare_unused_subtree(struct clk_core *core)
-{
- struct clk_core *child;
-
- lockdep_assert_held(&prepare_lock);
-
- hlist_for_each_entry(child, &core->children, child_node)
- clk_unprepare_unused_subtree(child);
-
- if (core->prepare_count)
- return;
-
- if (core->flags & CLK_IGNORE_UNUSED)
- return;
-
- if (clk_core_is_prepared(core)) {
- trace_clk_unprepare(core);
- if (core->ops->unprepare_unused)
- core->ops->unprepare_unused(core->hw);
- else if (core->ops->unprepare)
- core->ops->unprepare(core->hw);
- trace_clk_unprepare_complete(core);
- }
-}
-
-static void clk_disable_unused_subtree(struct clk_core *core)
-{
- struct clk_core *child;
- unsigned long flags;
-
- lockdep_assert_held(&prepare_lock);
-
- hlist_for_each_entry(child, &core->children, child_node)
- clk_disable_unused_subtree(child);
-
- flags = clk_enable_lock();
-
- if (core->enable_count)
- goto unlock_out;
-
- if (core->flags & CLK_IGNORE_UNUSED)
- goto unlock_out;
-
- /*
- * some gate clocks have special needs during the disable-unused
- * sequence. call .disable_unused if available, otherwise fall
- * back to .disable
- */
- if (clk_core_is_enabled(core)) {
- trace_clk_disable(core);
- if (core->ops->disable_unused)
- core->ops->disable_unused(core->hw);
- else if (core->ops->disable)
- core->ops->disable(core->hw);
- trace_clk_disable_complete(core);
- }
-
-unlock_out:
- clk_enable_unlock(flags);
-}
-
-static bool clk_ignore_unused;
-static int __init clk_ignore_unused_setup(char *__unused)
-{
- clk_ignore_unused = true;
- return 1;
-}
-__setup("clk_ignore_unused", clk_ignore_unused_setup);
-
-static int clk_disable_unused(void)
-{
- struct clk_core *core;
-
- if (clk_ignore_unused) {
- pr_warn("clk: Not disabling unused clocks\n");
- return 0;
- }
-
- clk_prepare_lock();
-
- hlist_for_each_entry(core, &clk_root_list, child_node)
- clk_disable_unused_subtree(core);
-
- hlist_for_each_entry(core, &clk_orphan_list, child_node)
- clk_disable_unused_subtree(core);
-
- hlist_for_each_entry(core, &clk_root_list, child_node)
- clk_unprepare_unused_subtree(core);
-
- hlist_for_each_entry(core, &clk_orphan_list, child_node)
- clk_unprepare_unused_subtree(core);
-
- clk_prepare_unlock();
-
- return 0;
-}
-late_initcall_sync(clk_disable_unused);
-
/*** helper functions ***/
const char *__clk_get_name(const struct clk *clk)
@@ -591,6 +493,13 @@ static void clk_core_unprepare(struct clk_core *core)
clk_core_unprepare(core->parent);
}
+static void clk_core_unprepare_lock(struct clk_core *core)
+{
+ clk_prepare_lock();
+ clk_core_unprepare(core);
+ clk_prepare_unlock();
+}
+
/**
* clk_unprepare - undo preparation of a clock source
* @clk: the clk being unprepared
@@ -607,9 +516,7 @@ void clk_unprepare(struct clk *clk)
if (IS_ERR_OR_NULL(clk))
return;
- clk_prepare_lock();
- clk_core_unprepare(clk->core);
- clk_prepare_unlock();
+ clk_core_unprepare_lock(clk->core);
}
EXPORT_SYMBOL_GPL(clk_unprepare);
@@ -645,6 +552,17 @@ static int clk_core_prepare(struct clk_core *core)
return 0;
}
+static int clk_core_prepare_lock(struct clk_core *core)
+{
+ int ret;
+
+ clk_prepare_lock();
+ ret = clk_core_prepare(core);
+ clk_prepare_unlock();
+
+ return ret;
+}
+
/**
* clk_prepare - prepare a clock source
* @clk: the clk being prepared
@@ -659,16 +577,10 @@ static int clk_core_prepare(struct clk_core *core)
*/
int clk_prepare(struct clk *clk)
{
- int ret;
-
if (!clk)
return 0;
- clk_prepare_lock();
- ret = clk_core_prepare(clk->core);
- clk_prepare_unlock();
-
- return ret;
+ return clk_core_prepare_lock(clk->core);
}
EXPORT_SYMBOL_GPL(clk_prepare);
@@ -688,16 +600,25 @@ static void clk_core_disable(struct clk_core *core)
if (--core->enable_count > 0)
return;
- trace_clk_disable(core);
+ trace_clk_disable_rcuidle(core);
if (core->ops->disable)
core->ops->disable(core->hw);
- trace_clk_disable_complete(core);
+ trace_clk_disable_complete_rcuidle(core);
clk_core_disable(core->parent);
}
+static void clk_core_disable_lock(struct clk_core *core)
+{
+ unsigned long flags;
+
+ flags = clk_enable_lock();
+ clk_core_disable(core);
+ clk_enable_unlock(flags);
+}
+
/**
* clk_disable - gate a clock
* @clk: the clk being gated
@@ -712,14 +633,10 @@ static void clk_core_disable(struct clk_core *core)
*/
void clk_disable(struct clk *clk)
{
- unsigned long flags;
-
if (IS_ERR_OR_NULL(clk))
return;
- flags = clk_enable_lock();
- clk_core_disable(clk->core);
- clk_enable_unlock(flags);
+ clk_core_disable_lock(clk->core);
}
EXPORT_SYMBOL_GPL(clk_disable);
@@ -741,12 +658,12 @@ static int clk_core_enable(struct clk_core *core)
if (ret)
return ret;
- trace_clk_enable(core);
+ trace_clk_enable_rcuidle(core);
if (core->ops->enable)
ret = core->ops->enable(core->hw);
- trace_clk_enable_complete(core);
+ trace_clk_enable_complete_rcuidle(core);
if (ret) {
clk_core_disable(core->parent);
@@ -758,6 +675,18 @@ static int clk_core_enable(struct clk_core *core)
return 0;
}
+static int clk_core_enable_lock(struct clk_core *core)
+{
+ unsigned long flags;
+ int ret;
+
+ flags = clk_enable_lock();
+ ret = clk_core_enable(core);
+ clk_enable_unlock(flags);
+
+ return ret;
+}
+
/**
* clk_enable - ungate a clock
* @clk: the clk being ungated
@@ -773,19 +702,136 @@ static int clk_core_enable(struct clk_core *core)
*/
int clk_enable(struct clk *clk)
{
- unsigned long flags;
- int ret;
-
if (!clk)
return 0;
+ return clk_core_enable_lock(clk->core);
+}
+EXPORT_SYMBOL_GPL(clk_enable);
+
+static int clk_core_prepare_enable(struct clk_core *core)
+{
+ int ret;
+
+ ret = clk_core_prepare_lock(core);
+ if (ret)
+ return ret;
+
+ ret = clk_core_enable_lock(core);
+ if (ret)
+ clk_core_unprepare_lock(core);
+
+ return ret;
+}
+
+static void clk_core_disable_unprepare(struct clk_core *core)
+{
+ clk_core_disable_lock(core);
+ clk_core_unprepare_lock(core);
+}
+
+static void clk_unprepare_unused_subtree(struct clk_core *core)
+{
+ struct clk_core *child;
+
+ lockdep_assert_held(&prepare_lock);
+
+ hlist_for_each_entry(child, &core->children, child_node)
+ clk_unprepare_unused_subtree(child);
+
+ if (core->prepare_count)
+ return;
+
+ if (core->flags & CLK_IGNORE_UNUSED)
+ return;
+
+ if (clk_core_is_prepared(core)) {
+ trace_clk_unprepare(core);
+ if (core->ops->unprepare_unused)
+ core->ops->unprepare_unused(core->hw);
+ else if (core->ops->unprepare)
+ core->ops->unprepare(core->hw);
+ trace_clk_unprepare_complete(core);
+ }
+}
+
+static void clk_disable_unused_subtree(struct clk_core *core)
+{
+ struct clk_core *child;
+ unsigned long flags;
+
+ lockdep_assert_held(&prepare_lock);
+
+ hlist_for_each_entry(child, &core->children, child_node)
+ clk_disable_unused_subtree(child);
+
+ if (core->flags & CLK_OPS_PARENT_ENABLE)
+ clk_core_prepare_enable(core->parent);
+
flags = clk_enable_lock();
- ret = clk_core_enable(clk->core);
+
+ if (core->enable_count)
+ goto unlock_out;
+
+ if (core->flags & CLK_IGNORE_UNUSED)
+ goto unlock_out;
+
+ /*
+ * some gate clocks have special needs during the disable-unused
+ * sequence. call .disable_unused if available, otherwise fall
+ * back to .disable
+ */
+ if (clk_core_is_enabled(core)) {
+ trace_clk_disable(core);
+ if (core->ops->disable_unused)
+ core->ops->disable_unused(core->hw);
+ else if (core->ops->disable)
+ core->ops->disable(core->hw);
+ trace_clk_disable_complete(core);
+ }
+
+unlock_out:
clk_enable_unlock(flags);
+ if (core->flags & CLK_OPS_PARENT_ENABLE)
+ clk_core_disable_unprepare(core->parent);
+}
- return ret;
+static bool clk_ignore_unused;
+static int __init clk_ignore_unused_setup(char *__unused)
+{
+ clk_ignore_unused = true;
+ return 1;
}
-EXPORT_SYMBOL_GPL(clk_enable);
+__setup("clk_ignore_unused", clk_ignore_unused_setup);
+
+static int clk_disable_unused(void)
+{
+ struct clk_core *core;
+
+ if (clk_ignore_unused) {
+ pr_warn("clk: Not disabling unused clocks\n");
+ return 0;
+ }
+
+ clk_prepare_lock();
+
+ hlist_for_each_entry(core, &clk_root_list, child_node)
+ clk_disable_unused_subtree(core);
+
+ hlist_for_each_entry(core, &clk_orphan_list, child_node)
+ clk_disable_unused_subtree(core);
+
+ hlist_for_each_entry(core, &clk_root_list, child_node)
+ clk_unprepare_unused_subtree(core);
+
+ hlist_for_each_entry(core, &clk_orphan_list, child_node)
+ clk_unprepare_unused_subtree(core);
+
+ clk_prepare_unlock();
+
+ return 0;
+}
+late_initcall_sync(clk_disable_unused);
static int clk_core_round_rate_nolock(struct clk_core *core,
struct clk_rate_request *req)
@@ -828,9 +874,7 @@ static int clk_core_round_rate_nolock(struct clk_core *core,
/**
* __clk_determine_rate - get the closest rate actually supported by a clock
* @hw: determine the rate of this clock
- * @rate: target rate
- * @min_rate: returned rate must be greater than this rate
- * @max_rate: returned rate must be less than this rate
+ * @req: target rate request
*
* Useful for clk_ops such as .set_rate and .determine_rate.
*/
@@ -1128,7 +1172,9 @@ static struct clk_core *__clk_set_parent_before(struct clk_core *core,
struct clk_core *old_parent = core->parent;
/*
- * Migrate prepare state between parents and prevent race with
+ * 1. enable parents for CLK_OPS_PARENT_ENABLE clock
+ *
+ * 2. Migrate prepare state between parents and prevent race with
* clk_enable().
*
* If the clock is not prepared, then a race with
@@ -1144,12 +1190,17 @@ static struct clk_core *__clk_set_parent_before(struct clk_core *core,
*
* See also: Comment for clk_set_parent() below.
*/
+
+ /* enable old_parent & parent if CLK_OPS_PARENT_ENABLE is set */
+ if (core->flags & CLK_OPS_PARENT_ENABLE) {
+ clk_core_prepare_enable(old_parent);
+ clk_core_prepare_enable(parent);
+ }
+
+ /* migrate prepare count if > 0 */
if (core->prepare_count) {
- clk_core_prepare(parent);
- flags = clk_enable_lock();
- clk_core_enable(parent);
- clk_core_enable(core);
- clk_enable_unlock(flags);
+ clk_core_prepare_enable(parent);
+ clk_core_enable_lock(core);
}
/* update the clk tree topology */
@@ -1164,18 +1215,19 @@ static void __clk_set_parent_after(struct clk_core *core,
struct clk_core *parent,
struct clk_core *old_parent)
{
- unsigned long flags;
-
/*
* Finish the migration of prepare state and undo the changes done
* for preventing a race with clk_enable().
*/
if (core->prepare_count) {
- flags = clk_enable_lock();
- clk_core_disable(core);
- clk_core_disable(old_parent);
- clk_enable_unlock(flags);
- clk_core_unprepare(old_parent);
+ clk_core_disable_lock(core);
+ clk_core_disable_unprepare(old_parent);
+ }
+
+ /* re-balance ref counting if CLK_OPS_PARENT_ENABLE is set */
+ if (core->flags & CLK_OPS_PARENT_ENABLE) {
+ clk_core_disable_unprepare(parent);
+ clk_core_disable_unprepare(old_parent);
}
}
@@ -1422,13 +1474,17 @@ static void clk_change_rate(struct clk_core *core)
unsigned long best_parent_rate = 0;
bool skip_set_rate = false;
struct clk_core *old_parent;
+ struct clk_core *parent = NULL;
old_rate = core->rate;
- if (core->new_parent)
+ if (core->new_parent) {
+ parent = core->new_parent;
best_parent_rate = core->new_parent->rate;
- else if (core->parent)
+ } else if (core->parent) {
+ parent = core->parent;
best_parent_rate = core->parent->rate;
+ }
if (core->flags & CLK_SET_RATE_UNGATE) {
unsigned long flags;
@@ -1456,6 +1512,9 @@ static void clk_change_rate(struct clk_core *core)
__clk_set_parent_after(core, core->new_parent, old_parent);
}
+ if (core->flags & CLK_OPS_PARENT_ENABLE)
+ clk_core_prepare_enable(parent);
+
trace_clk_set_rate(core, core->new_rate);
if (!skip_set_rate && core->ops->set_rate)
@@ -1474,6 +1533,9 @@ static void clk_change_rate(struct clk_core *core)
clk_core_unprepare(core);
}
+ if (core->flags & CLK_OPS_PARENT_ENABLE)
+ clk_core_disable_unprepare(parent);
+
if (core->notifier_count && old_rate != core->rate)
__clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate);
@@ -1501,7 +1563,6 @@ static int clk_core_set_rate_nolock(struct clk_core *core,
{
struct clk_core *top, *fail_clk;
unsigned long rate = req_rate;
- int ret = 0;
if (!core)
return 0;
@@ -1532,7 +1593,7 @@ static int clk_core_set_rate_nolock(struct clk_core *core,
core->req_rate = req_rate;
- return ret;
+ return 0;
}
/**
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index 89cc700fb..97ae60fa1 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -250,7 +250,7 @@ struct clk_lookup_alloc {
char con_id[MAX_CON_ID];
};
-static struct clk_lookup * __init_refok
+static struct clk_lookup * __ref
vclkdev_alloc(struct clk_hw *hw, const char *con_id, const char *dev_fmt,
va_list ap)
{
@@ -287,7 +287,7 @@ vclkdev_create(struct clk_hw *hw, const char *con_id, const char *dev_fmt,
return cl;
}
-struct clk_lookup * __init_refok
+struct clk_lookup * __ref
clkdev_alloc(struct clk *clk, const char *con_id, const char *dev_fmt, ...)
{
struct clk_lookup *cl;
diff --git a/drivers/clk/hisilicon/clk-hi3519.c b/drivers/clk/hisilicon/clk-hi3519.c
index 715c7301a..51b173ef1 100644
--- a/drivers/clk/hisilicon/clk-hi3519.c
+++ b/drivers/clk/hisilicon/clk-hi3519.c
@@ -38,6 +38,11 @@
#define HI3519_NR_CLKS 128
+struct hi3519_crg_data {
+ struct hisi_clock_data *clk_data;
+ struct hisi_reset_controller *rstc;
+};
+
static const struct hisi_fixed_rate_clock hi3519_fixed_rate_clks[] = {
{ HI3519_FIXED_24M, "24m", NULL, 0, 24000000, },
{ HI3519_FIXED_50M, "50m", NULL, 0, 50000000, },
@@ -80,33 +85,105 @@ static const struct hisi_gate_clock hi3519_gate_clks[] = {
CLK_SET_RATE_PARENT, 0xe4, 18, 0, },
};
-static int hi3519_clk_probe(struct platform_device *pdev)
+static struct hisi_clock_data *hi3519_clk_register(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
struct hisi_clock_data *clk_data;
- struct hisi_reset_controller *rstc;
+ int ret;
- rstc = hisi_reset_init(np);
- if (!rstc)
+ clk_data = hisi_clk_alloc(pdev, HI3519_NR_CLKS);
+ if (!clk_data)
+ return ERR_PTR(-ENOMEM);
+
+ ret = hisi_clk_register_fixed_rate(hi3519_fixed_rate_clks,
+ ARRAY_SIZE(hi3519_fixed_rate_clks),
+ clk_data);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = hisi_clk_register_mux(hi3519_mux_clks,
+ ARRAY_SIZE(hi3519_mux_clks),
+ clk_data);
+ if (ret)
+ goto unregister_fixed_rate;
+
+ ret = hisi_clk_register_gate(hi3519_gate_clks,
+ ARRAY_SIZE(hi3519_gate_clks),
+ clk_data);
+ if (ret)
+ goto unregister_mux;
+
+ ret = of_clk_add_provider(pdev->dev.of_node,
+ of_clk_src_onecell_get, &clk_data->clk_data);
+ if (ret)
+ goto unregister_gate;
+
+ return clk_data;
+
+unregister_fixed_rate:
+ hisi_clk_unregister_fixed_rate(hi3519_fixed_rate_clks,
+ ARRAY_SIZE(hi3519_fixed_rate_clks),
+ clk_data);
+
+unregister_mux:
+ hisi_clk_unregister_mux(hi3519_mux_clks,
+ ARRAY_SIZE(hi3519_mux_clks),
+ clk_data);
+unregister_gate:
+ hisi_clk_unregister_gate(hi3519_gate_clks,
+ ARRAY_SIZE(hi3519_gate_clks),
+ clk_data);
+ return ERR_PTR(ret);
+}
+
+static void hi3519_clk_unregister(struct platform_device *pdev)
+{
+ struct hi3519_crg_data *crg = platform_get_drvdata(pdev);
+
+ of_clk_del_provider(pdev->dev.of_node);
+
+ hisi_clk_unregister_gate(hi3519_gate_clks,
+ ARRAY_SIZE(hi3519_mux_clks),
+ crg->clk_data);
+ hisi_clk_unregister_mux(hi3519_mux_clks,
+ ARRAY_SIZE(hi3519_mux_clks),
+ crg->clk_data);
+ hisi_clk_unregister_fixed_rate(hi3519_fixed_rate_clks,
+ ARRAY_SIZE(hi3519_fixed_rate_clks),
+ crg->clk_data);
+}
+
+static int hi3519_clk_probe(struct platform_device *pdev)
+{
+ struct hi3519_crg_data *crg;
+
+ crg = devm_kmalloc(&pdev->dev, sizeof(*crg), GFP_KERNEL);
+ if (!crg)
+ return -ENOMEM;
+
+ crg->rstc = hisi_reset_init(pdev);
+ if (!crg->rstc)
return -ENOMEM;
- clk_data = hisi_clk_init(np, HI3519_NR_CLKS);
- if (!clk_data) {
- hisi_reset_exit(rstc);
- return -ENODEV;
+ crg->clk_data = hi3519_clk_register(pdev);
+ if (IS_ERR(crg->clk_data)) {
+ hisi_reset_exit(crg->rstc);
+ return PTR_ERR(crg->clk_data);
}
- hisi_clk_register_fixed_rate(hi3519_fixed_rate_clks,
- ARRAY_SIZE(hi3519_fixed_rate_clks),
- clk_data);
- hisi_clk_register_mux(hi3519_mux_clks, ARRAY_SIZE(hi3519_mux_clks),
- clk_data);
- hisi_clk_register_gate(hi3519_gate_clks,
- ARRAY_SIZE(hi3519_gate_clks), clk_data);
+ platform_set_drvdata(pdev, crg);
+ return 0;
+}
+
+static int hi3519_clk_remove(struct platform_device *pdev)
+{
+ struct hi3519_crg_data *crg = platform_get_drvdata(pdev);
+ hisi_reset_exit(crg->rstc);
+ hi3519_clk_unregister(pdev);
return 0;
}
+
static const struct of_device_id hi3519_clk_match_table[] = {
{ .compatible = "hisilicon,hi3519-crg" },
{ }
@@ -115,6 +192,7 @@ MODULE_DEVICE_TABLE(of, hi3519_clk_match_table);
static struct platform_driver hi3519_clk_driver = {
.probe = hi3519_clk_probe,
+ .remove = hi3519_clk_remove,
.driver = {
.name = "hi3519-clk",
.of_match_table = hi3519_clk_match_table,
@@ -127,5 +205,11 @@ static int __init hi3519_clk_init(void)
}
core_initcall(hi3519_clk_init);
+static void __exit hi3519_clk_exit(void)
+{
+ platform_driver_unregister(&hi3519_clk_driver);
+}
+module_exit(hi3519_clk_exit);
+
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("HiSilicon Hi3519 Clock Driver");
diff --git a/drivers/clk/hisilicon/clk-hi6220.c b/drivers/clk/hisilicon/clk-hi6220.c
index f02cb41d4..fe364e63f 100644
--- a/drivers/clk/hisilicon/clk-hi6220.c
+++ b/drivers/clk/hisilicon/clk-hi6220.c
@@ -34,8 +34,8 @@ static struct hisi_fixed_rate_clock hi6220_fixed_rate_clks[] __initdata = {
{ HI6220_PLL_BBP, "bbppll0", NULL, 0, 245760000, },
{ HI6220_PLL_GPU, "gpupll", NULL, 0, 1000000000,},
{ HI6220_PLL1_DDR, "ddrpll1", NULL, 0, 1066000000,},
- { HI6220_PLL_SYS, "syspll", NULL, 0, 1200000000,},
- { HI6220_PLL_SYS_MEDIA, "media_syspll", NULL, 0, 1200000000,},
+ { HI6220_PLL_SYS, "syspll", NULL, 0, 1190400000,},
+ { HI6220_PLL_SYS_MEDIA, "media_syspll", NULL, 0, 1190400000,},
{ HI6220_DDR_SRC, "ddr_sel_src", NULL, 0, 1200000000,},
{ HI6220_PLL_MEDIA, "media_pll", NULL, 0, 1440000000,},
{ HI6220_PLL_DDR, "ddrpll0", NULL, 0, 1600000000,},
@@ -68,6 +68,8 @@ static struct hisi_gate_clock hi6220_separated_gate_clks_ao[] __initdata = {
{ HI6220_TIMER7_PCLK, "timer7_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 22, 0, },
{ HI6220_TIMER8_PCLK, "timer8_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 23, 0, },
{ HI6220_UART0_PCLK, "uart0_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 24, 0, },
+ { HI6220_RTC0_PCLK, "rtc0_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 25, 0, },
+ { HI6220_RTC1_PCLK, "rtc1_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 26, 0, },
};
static void __init hi6220_clk_ao_init(struct device_node *np)
diff --git a/drivers/clk/hisilicon/clk.c b/drivers/clk/hisilicon/clk.c
index 9b15adbfc..9ba2d91f4 100644
--- a/drivers/clk/hisilicon/clk.c
+++ b/drivers/clk/hisilicon/clk.c
@@ -37,6 +37,35 @@
static DEFINE_SPINLOCK(hisi_clk_lock);
+struct hisi_clock_data *hisi_clk_alloc(struct platform_device *pdev,
+ int nr_clks)
+{
+ struct hisi_clock_data *clk_data;
+ struct resource *res;
+ struct clk **clk_table;
+
+ clk_data = devm_kmalloc(&pdev->dev, sizeof(*clk_data), GFP_KERNEL);
+ if (!clk_data)
+ return NULL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ clk_data->base = devm_ioremap(&pdev->dev,
+ res->start, resource_size(res));
+ if (!clk_data->base)
+ return NULL;
+
+ clk_table = devm_kmalloc(&pdev->dev, sizeof(struct clk *) * nr_clks,
+ GFP_KERNEL);
+ if (!clk_table)
+ return NULL;
+
+ clk_data->clk_data.clks = clk_table;
+ clk_data->clk_data.clk_num = nr_clks;
+
+ return clk_data;
+}
+EXPORT_SYMBOL_GPL(hisi_clk_alloc);
+
struct hisi_clock_data *hisi_clk_init(struct device_node *np,
int nr_clks)
{
@@ -73,7 +102,7 @@ err:
}
EXPORT_SYMBOL_GPL(hisi_clk_init);
-void hisi_clk_register_fixed_rate(const struct hisi_fixed_rate_clock *clks,
+int hisi_clk_register_fixed_rate(const struct hisi_fixed_rate_clock *clks,
int nums, struct hisi_clock_data *data)
{
struct clk *clk;
@@ -87,14 +116,22 @@ void hisi_clk_register_fixed_rate(const struct hisi_fixed_rate_clock *clks,
if (IS_ERR(clk)) {
pr_err("%s: failed to register clock %s\n",
__func__, clks[i].name);
- continue;
+ goto err;
}
data->clk_data.clks[clks[i].id] = clk;
}
+
+ return 0;
+
+err:
+ while (i--)
+ clk_unregister_fixed_rate(data->clk_data.clks[clks[i].id]);
+
+ return PTR_ERR(clk);
}
EXPORT_SYMBOL_GPL(hisi_clk_register_fixed_rate);
-void hisi_clk_register_fixed_factor(const struct hisi_fixed_factor_clock *clks,
+int hisi_clk_register_fixed_factor(const struct hisi_fixed_factor_clock *clks,
int nums,
struct hisi_clock_data *data)
{
@@ -109,14 +146,22 @@ void hisi_clk_register_fixed_factor(const struct hisi_fixed_factor_clock *clks,
if (IS_ERR(clk)) {
pr_err("%s: failed to register clock %s\n",
__func__, clks[i].name);
- continue;
+ goto err;
}
data->clk_data.clks[clks[i].id] = clk;
}
+
+ return 0;
+
+err:
+ while (i--)
+ clk_unregister_fixed_factor(data->clk_data.clks[clks[i].id]);
+
+ return PTR_ERR(clk);
}
EXPORT_SYMBOL_GPL(hisi_clk_register_fixed_factor);
-void hisi_clk_register_mux(const struct hisi_mux_clock *clks,
+int hisi_clk_register_mux(const struct hisi_mux_clock *clks,
int nums, struct hisi_clock_data *data)
{
struct clk *clk;
@@ -135,7 +180,7 @@ void hisi_clk_register_mux(const struct hisi_mux_clock *clks,
if (IS_ERR(clk)) {
pr_err("%s: failed to register clock %s\n",
__func__, clks[i].name);
- continue;
+ goto err;
}
if (clks[i].alias)
@@ -143,10 +188,18 @@ void hisi_clk_register_mux(const struct hisi_mux_clock *clks,
data->clk_data.clks[clks[i].id] = clk;
}
+
+ return 0;
+
+err:
+ while (i--)
+ clk_unregister_mux(data->clk_data.clks[clks[i].id]);
+
+ return PTR_ERR(clk);
}
EXPORT_SYMBOL_GPL(hisi_clk_register_mux);
-void hisi_clk_register_divider(const struct hisi_divider_clock *clks,
+int hisi_clk_register_divider(const struct hisi_divider_clock *clks,
int nums, struct hisi_clock_data *data)
{
struct clk *clk;
@@ -165,7 +218,7 @@ void hisi_clk_register_divider(const struct hisi_divider_clock *clks,
if (IS_ERR(clk)) {
pr_err("%s: failed to register clock %s\n",
__func__, clks[i].name);
- continue;
+ goto err;
}
if (clks[i].alias)
@@ -173,10 +226,18 @@ void hisi_clk_register_divider(const struct hisi_divider_clock *clks,
data->clk_data.clks[clks[i].id] = clk;
}
+
+ return 0;
+
+err:
+ while (i--)
+ clk_unregister_divider(data->clk_data.clks[clks[i].id]);
+
+ return PTR_ERR(clk);
}
EXPORT_SYMBOL_GPL(hisi_clk_register_divider);
-void hisi_clk_register_gate(const struct hisi_gate_clock *clks,
+int hisi_clk_register_gate(const struct hisi_gate_clock *clks,
int nums, struct hisi_clock_data *data)
{
struct clk *clk;
@@ -194,7 +255,7 @@ void hisi_clk_register_gate(const struct hisi_gate_clock *clks,
if (IS_ERR(clk)) {
pr_err("%s: failed to register clock %s\n",
__func__, clks[i].name);
- continue;
+ goto err;
}
if (clks[i].alias)
@@ -202,6 +263,14 @@ void hisi_clk_register_gate(const struct hisi_gate_clock *clks,
data->clk_data.clks[clks[i].id] = clk;
}
+
+ return 0;
+
+err:
+ while (i--)
+ clk_unregister_gate(data->clk_data.clks[clks[i].id]);
+
+ return PTR_ERR(clk);
}
EXPORT_SYMBOL_GPL(hisi_clk_register_gate);
diff --git a/drivers/clk/hisilicon/clk.h b/drivers/clk/hisilicon/clk.h
index 20d64afe4..4e1d1affc 100644
--- a/drivers/clk/hisilicon/clk.h
+++ b/drivers/clk/hisilicon/clk.h
@@ -30,6 +30,8 @@
#include <linux/io.h>
#include <linux/spinlock.h>
+struct platform_device;
+
struct hisi_clock_data {
struct clk_onecell_data clk_data;
void __iomem *base;
@@ -110,19 +112,41 @@ struct clk *hi6220_register_clkdiv(struct device *dev, const char *name,
const char *parent_name, unsigned long flags, void __iomem *reg,
u8 shift, u8 width, u32 mask_bit, spinlock_t *lock);
+struct hisi_clock_data *hisi_clk_alloc(struct platform_device *, int);
struct hisi_clock_data *hisi_clk_init(struct device_node *, int);
-void hisi_clk_register_fixed_rate(const struct hisi_fixed_rate_clock *,
+int hisi_clk_register_fixed_rate(const struct hisi_fixed_rate_clock *,
int, struct hisi_clock_data *);
-void hisi_clk_register_fixed_factor(const struct hisi_fixed_factor_clock *,
+int hisi_clk_register_fixed_factor(const struct hisi_fixed_factor_clock *,
int, struct hisi_clock_data *);
-void hisi_clk_register_mux(const struct hisi_mux_clock *, int,
+int hisi_clk_register_mux(const struct hisi_mux_clock *, int,
struct hisi_clock_data *);
-void hisi_clk_register_divider(const struct hisi_divider_clock *,
+int hisi_clk_register_divider(const struct hisi_divider_clock *,
int, struct hisi_clock_data *);
-void hisi_clk_register_gate(const struct hisi_gate_clock *,
+int hisi_clk_register_gate(const struct hisi_gate_clock *,
int, struct hisi_clock_data *);
void hisi_clk_register_gate_sep(const struct hisi_gate_clock *,
int, struct hisi_clock_data *);
void hi6220_clk_register_divider(const struct hi6220_divider_clock *,
int, struct hisi_clock_data *);
+
+#define hisi_clk_unregister(type) \
+static inline \
+void hisi_clk_unregister_##type(const struct hisi_##type##_clock *clks, \
+ int nums, struct hisi_clock_data *data) \
+{ \
+ struct clk **clocks = data->clk_data.clks; \
+ int i; \
+ for (i = 0; i < nums; i++) { \
+ int id = clks[i].id; \
+ if (clocks[id]) \
+ clk_unregister_##type(clocks[id]); \
+ } \
+}
+
+hisi_clk_unregister(fixed_rate)
+hisi_clk_unregister(fixed_factor)
+hisi_clk_unregister(mux)
+hisi_clk_unregister(divider)
+hisi_clk_unregister(gate)
+
#endif /* __HISI_CLK_H */
diff --git a/drivers/clk/hisilicon/clkdivider-hi6220.c b/drivers/clk/hisilicon/clkdivider-hi6220.c
index 113eee8ed..a1c1f684a 100644
--- a/drivers/clk/hisilicon/clkdivider-hi6220.c
+++ b/drivers/clk/hisilicon/clkdivider-hi6220.c
@@ -18,6 +18,8 @@
#include <linux/err.h>
#include <linux/spinlock.h>
+#include "clk.h"
+
#define div_mask(width) ((1 << (width)) - 1)
/**
diff --git a/drivers/clk/hisilicon/reset.c b/drivers/clk/hisilicon/reset.c
index 6aa49c220..2a5015c73 100644
--- a/drivers/clk/hisilicon/reset.c
+++ b/drivers/clk/hisilicon/reset.c
@@ -19,6 +19,7 @@
#include <linux/io.h>
#include <linux/of_address.h>
+#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -98,25 +99,25 @@ static const struct reset_control_ops hisi_reset_ops = {
.deassert = hisi_reset_deassert,
};
-struct hisi_reset_controller *hisi_reset_init(struct device_node *np)
+struct hisi_reset_controller *hisi_reset_init(struct platform_device *pdev)
{
struct hisi_reset_controller *rstc;
+ struct resource *res;
- rstc = kzalloc(sizeof(*rstc), GFP_KERNEL);
+ rstc = devm_kmalloc(&pdev->dev, sizeof(*rstc), GFP_KERNEL);
if (!rstc)
return NULL;
- rstc->membase = of_iomap(np, 0);
- if (!rstc->membase) {
- kfree(rstc);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rstc->membase = devm_ioremap(&pdev->dev,
+ res->start, resource_size(res));
+ if (!rstc->membase)
return NULL;
- }
spin_lock_init(&rstc->lock);
-
rstc->rcdev.owner = THIS_MODULE;
rstc->rcdev.ops = &hisi_reset_ops;
- rstc->rcdev.of_node = np;
+ rstc->rcdev.of_node = pdev->dev.of_node;
rstc->rcdev.of_reset_n_cells = 2;
rstc->rcdev.of_xlate = hisi_reset_of_xlate;
reset_controller_register(&rstc->rcdev);
@@ -128,7 +129,5 @@ EXPORT_SYMBOL_GPL(hisi_reset_init);
void hisi_reset_exit(struct hisi_reset_controller *rstc)
{
reset_controller_unregister(&rstc->rcdev);
- iounmap(rstc->membase);
- kfree(rstc);
}
EXPORT_SYMBOL_GPL(hisi_reset_exit);
diff --git a/drivers/clk/hisilicon/reset.h b/drivers/clk/hisilicon/reset.h
index 677d773ed..9a69374a0 100644
--- a/drivers/clk/hisilicon/reset.h
+++ b/drivers/clk/hisilicon/reset.h
@@ -22,10 +22,11 @@ struct device_node;
struct hisi_reset_controller;
#ifdef CONFIG_RESET_CONTROLLER
-struct hisi_reset_controller *hisi_reset_init(struct device_node *np);
+struct hisi_reset_controller *hisi_reset_init(struct platform_device *pdev);
void hisi_reset_exit(struct hisi_reset_controller *rstc);
#else
-static inline hisi_reset_controller *hisi_reset_init(struct device_node *np)
+static inline
+struct hisi_reset_controller *hisi_reset_init(struct platform_device *pdev)
{
return 0;
}
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
index 2beb396fe..ba1c1ae72 100644
--- a/drivers/clk/imx/clk-imx6q.c
+++ b/drivers/clk/imx/clk-imx6q.c
@@ -192,13 +192,13 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_PLL7_BYPASS_SRC] = imx_clk_mux("pll7_bypass_src", base + 0x20, 14, 2, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
/* type name parent_name base div_mask */
- clk[IMX6QDL_CLK_PLL1] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll1", "pll1_bypass_src", base + 0x00, 0x7f);
- clk[IMX6QDL_CLK_PLL2] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2", "pll2_bypass_src", base + 0x30, 0x1);
- clk[IMX6QDL_CLK_PLL3] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3", "pll3_bypass_src", base + 0x10, 0x3);
- clk[IMX6QDL_CLK_PLL4] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4", "pll4_bypass_src", base + 0x70, 0x7f);
- clk[IMX6QDL_CLK_PLL5] = imx_clk_pllv3(IMX_PLLV3_AV, "pll5", "pll5_bypass_src", base + 0xa0, 0x7f);
- clk[IMX6QDL_CLK_PLL6] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll6", "pll6_bypass_src", base + 0xe0, 0x3);
- clk[IMX6QDL_CLK_PLL7] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7", "pll7_bypass_src", base + 0x20, 0x3);
+ clk[IMX6QDL_CLK_PLL1] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll1", "osc", base + 0x00, 0x7f);
+ clk[IMX6QDL_CLK_PLL2] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2", "osc", base + 0x30, 0x1);
+ clk[IMX6QDL_CLK_PLL3] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3", "osc", base + 0x10, 0x3);
+ clk[IMX6QDL_CLK_PLL4] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4", "osc", base + 0x70, 0x7f);
+ clk[IMX6QDL_CLK_PLL5] = imx_clk_pllv3(IMX_PLLV3_AV, "pll5", "osc", base + 0xa0, 0x7f);
+ clk[IMX6QDL_CLK_PLL6] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll6", "osc", base + 0xe0, 0x3);
+ clk[IMX6QDL_CLK_PLL7] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7", "osc", base + 0x20, 0x3);
clk[IMX6QDL_PLL1_BYPASS] = imx_clk_mux_flags("pll1_bypass", base + 0x00, 16, 1, pll1_bypass_sels, ARRAY_SIZE(pll1_bypass_sels), CLK_SET_RATE_PARENT);
clk[IMX6QDL_PLL2_BYPASS] = imx_clk_mux_flags("pll2_bypass", base + 0x30, 16, 1, pll2_bypass_sels, ARRAY_SIZE(pll2_bypass_sels), CLK_SET_RATE_PARENT);
diff --git a/drivers/clk/imx/clk-imx6sl.c b/drivers/clk/imx/clk-imx6sl.c
index 1be6230a0..5fd4ddac1 100644
--- a/drivers/clk/imx/clk-imx6sl.c
+++ b/drivers/clk/imx/clk-imx6sl.c
@@ -218,13 +218,13 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node)
clks[IMX6SL_PLL7_BYPASS_SRC] = imx_clk_mux("pll7_bypass_src", base + 0x20, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
/* type name parent_name base div_mask */
- clks[IMX6SL_CLK_PLL1] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll1", "pll1_bypass_src", base + 0x00, 0x7f);
- clks[IMX6SL_CLK_PLL2] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2", "pll2_bypass_src", base + 0x30, 0x1);
- clks[IMX6SL_CLK_PLL3] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3", "pll3_bypass_src", base + 0x10, 0x3);
- clks[IMX6SL_CLK_PLL4] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4", "pll4_bypass_src", base + 0x70, 0x7f);
- clks[IMX6SL_CLK_PLL5] = imx_clk_pllv3(IMX_PLLV3_AV, "pll5", "pll5_bypass_src", base + 0xa0, 0x7f);
- clks[IMX6SL_CLK_PLL6] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll6", "pll6_bypass_src", base + 0xe0, 0x3);
- clks[IMX6SL_CLK_PLL7] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7", "pll7_bypass_src", base + 0x20, 0x3);
+ clks[IMX6SL_CLK_PLL1] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll1", "osc", base + 0x00, 0x7f);
+ clks[IMX6SL_CLK_PLL2] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2", "osc", base + 0x30, 0x1);
+ clks[IMX6SL_CLK_PLL3] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3", "osc", base + 0x10, 0x3);
+ clks[IMX6SL_CLK_PLL4] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4", "osc", base + 0x70, 0x7f);
+ clks[IMX6SL_CLK_PLL5] = imx_clk_pllv3(IMX_PLLV3_AV, "pll5", "osc", base + 0xa0, 0x7f);
+ clks[IMX6SL_CLK_PLL6] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll6", "osc", base + 0xe0, 0x3);
+ clks[IMX6SL_CLK_PLL7] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7", "osc", base + 0x20, 0x3);
clks[IMX6SL_PLL1_BYPASS] = imx_clk_mux_flags("pll1_bypass", base + 0x00, 16, 1, pll1_bypass_sels, ARRAY_SIZE(pll1_bypass_sels), CLK_SET_RATE_PARENT);
clks[IMX6SL_PLL2_BYPASS] = imx_clk_mux_flags("pll2_bypass", base + 0x30, 16, 1, pll2_bypass_sels, ARRAY_SIZE(pll2_bypass_sels), CLK_SET_RATE_PARENT);
diff --git a/drivers/clk/imx/clk-imx6sx.c b/drivers/clk/imx/clk-imx6sx.c
index 97e742a8b..b5c96de41 100644
--- a/drivers/clk/imx/clk-imx6sx.c
+++ b/drivers/clk/imx/clk-imx6sx.c
@@ -174,13 +174,13 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
clks[IMX6SX_PLL7_BYPASS_SRC] = imx_clk_mux("pll7_bypass_src", base + 0x20, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
/* type name parent_name base div_mask */
- clks[IMX6SX_CLK_PLL1] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll1", "pll1_bypass_src", base + 0x00, 0x7f);
- clks[IMX6SX_CLK_PLL2] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2", "pll2_bypass_src", base + 0x30, 0x1);
- clks[IMX6SX_CLK_PLL3] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3", "pll3_bypass_src", base + 0x10, 0x3);
- clks[IMX6SX_CLK_PLL4] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4", "pll4_bypass_src", base + 0x70, 0x7f);
- clks[IMX6SX_CLK_PLL5] = imx_clk_pllv3(IMX_PLLV3_AV, "pll5", "pll5_bypass_src", base + 0xa0, 0x7f);
- clks[IMX6SX_CLK_PLL6] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll6", "pll6_bypass_src", base + 0xe0, 0x3);
- clks[IMX6SX_CLK_PLL7] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7", "pll7_bypass_src", base + 0x20, 0x3);
+ clks[IMX6SX_CLK_PLL1] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll1", "osc", base + 0x00, 0x7f);
+ clks[IMX6SX_CLK_PLL2] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2", "osc", base + 0x30, 0x1);
+ clks[IMX6SX_CLK_PLL3] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3", "osc", base + 0x10, 0x3);
+ clks[IMX6SX_CLK_PLL4] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4", "osc", base + 0x70, 0x7f);
+ clks[IMX6SX_CLK_PLL5] = imx_clk_pllv3(IMX_PLLV3_AV, "pll5", "osc", base + 0xa0, 0x7f);
+ clks[IMX6SX_CLK_PLL6] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll6", "osc", base + 0xe0, 0x3);
+ clks[IMX6SX_CLK_PLL7] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7", "osc", base + 0x20, 0x3);
clks[IMX6SX_PLL1_BYPASS] = imx_clk_mux_flags("pll1_bypass", base + 0x00, 16, 1, pll1_bypass_sels, ARRAY_SIZE(pll1_bypass_sels), CLK_SET_RATE_PARENT);
clks[IMX6SX_PLL2_BYPASS] = imx_clk_mux_flags("pll2_bypass", base + 0x30, 16, 1, pll2_bypass_sels, ARRAY_SIZE(pll2_bypass_sels), CLK_SET_RATE_PARENT);
diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c
index 0f1f17a8f..d1d7787ce 100644
--- a/drivers/clk/imx/clk-imx6ul.c
+++ b/drivers/clk/imx/clk-imx6ul.c
@@ -130,13 +130,13 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_PLL6_BYPASS_SRC] = imx_clk_mux("pll6_bypass_src", base + 0xe0, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
clks[IMX6UL_PLL7_BYPASS_SRC] = imx_clk_mux("pll7_bypass_src", base + 0x20, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
- clks[IMX6UL_CLK_PLL1] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll1", "pll1_bypass_src", base + 0x00, 0x7f);
- clks[IMX6UL_CLK_PLL2] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2", "pll2_bypass_src", base + 0x30, 0x1);
- clks[IMX6UL_CLK_PLL3] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3", "pll3_bypass_src", base + 0x10, 0x3);
- clks[IMX6UL_CLK_PLL4] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4", "pll4_bypass_src", base + 0x70, 0x7f);
- clks[IMX6UL_CLK_PLL5] = imx_clk_pllv3(IMX_PLLV3_AV, "pll5", "pll5_bypass_src", base + 0xa0, 0x7f);
- clks[IMX6UL_CLK_PLL6] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll6", "pll6_bypass_src", base + 0xe0, 0x3);
- clks[IMX6UL_CLK_PLL7] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7", "pll7_bypass_src", base + 0x20, 0x3);
+ clks[IMX6UL_CLK_PLL1] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll1", "osc", base + 0x00, 0x7f);
+ clks[IMX6UL_CLK_PLL2] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2", "osc", base + 0x30, 0x1);
+ clks[IMX6UL_CLK_PLL3] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3", "osc", base + 0x10, 0x3);
+ clks[IMX6UL_CLK_PLL4] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4", "osc", base + 0x70, 0x7f);
+ clks[IMX6UL_CLK_PLL5] = imx_clk_pllv3(IMX_PLLV3_AV, "pll5", "osc", base + 0xa0, 0x7f);
+ clks[IMX6UL_CLK_PLL6] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll6", "osc", base + 0xe0, 0x3);
+ clks[IMX6UL_CLK_PLL7] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7", "osc", base + 0x20, 0x3);
clks[IMX6UL_PLL1_BYPASS] = imx_clk_mux_flags("pll1_bypass", base + 0x00, 16, 1, pll1_bypass_sels, ARRAY_SIZE(pll1_bypass_sels), CLK_SET_RATE_PARENT);
clks[IMX6UL_PLL2_BYPASS] = imx_clk_mux_flags("pll2_bypass", base + 0x30, 16, 1, pll2_bypass_sels, ARRAY_SIZE(pll2_bypass_sels), CLK_SET_RATE_PARENT);
@@ -305,8 +305,8 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_CAN1_SERIAL] = imx_clk_gate2("can1_serial", "can_podf", base + 0x68, 16);
clks[IMX6UL_CLK_CAN2_IPG] = imx_clk_gate2("can2_ipg", "ipg", base + 0x68, 18);
clks[IMX6UL_CLK_CAN2_SERIAL] = imx_clk_gate2("can2_serial", "can_podf", base + 0x68, 20);
- clks[IMX6UL_CLK_GPT2_BUS] = imx_clk_gate2("gpt_bus", "perclk", base + 0x68, 24);
- clks[IMX6UL_CLK_GPT2_SERIAL] = imx_clk_gate2("gpt_serial", "perclk", base + 0x68, 26);
+ clks[IMX6UL_CLK_GPT2_BUS] = imx_clk_gate2("gpt2_bus", "perclk", base + 0x68, 24);
+ clks[IMX6UL_CLK_GPT2_SERIAL] = imx_clk_gate2("gpt2_serial", "perclk", base + 0x68, 26);
clks[IMX6UL_CLK_UART2_IPG] = imx_clk_gate2("uart2_ipg", "ipg", base + 0x68, 28);
clks[IMX6UL_CLK_UART2_SERIAL] = imx_clk_gate2("uart2_serial", "uart_podf", base + 0x68, 28);
clks[IMX6UL_CLK_AIPSTZ3] = imx_clk_gate2("aips_tz3", "ahb", base + 0x68, 30);
diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
index 522996800..6ed4f8fa0 100644
--- a/drivers/clk/imx/clk-imx7d.c
+++ b/drivers/clk/imx/clk-imx7d.c
@@ -65,7 +65,7 @@ static const char *dram_phym_sel[] = { "pll_dram_main_clk",
"dram_phym_alt_clk", };
static const char *dram_sel[] = { "pll_dram_main_clk",
- "dram_alt_clk", };
+ "dram_alt_root_clk", };
static const char *dram_phym_alt_sel[] = { "osc", "pll_dram_533m_clk",
"pll_sys_main_clk", "pll_enet_500m_clk",
@@ -361,6 +361,14 @@ static const char *pll_enet_bypass_sel[] = { "pll_enet_main", "pll_enet_main_src
static const char *pll_audio_bypass_sel[] = { "pll_audio_main", "pll_audio_main_src", };
static const char *pll_video_bypass_sel[] = { "pll_video_main", "pll_video_main_src", };
+static int const clks_init_on[] __initconst = {
+ IMX7D_ARM_A7_ROOT_CLK, IMX7D_MAIN_AXI_ROOT_CLK,
+ IMX7D_PLL_SYS_MAIN_480M_CLK, IMX7D_NAND_USDHC_BUS_ROOT_CLK,
+ IMX7D_DRAM_PHYM_ROOT_CLK, IMX7D_DRAM_ROOT_CLK,
+ IMX7D_DRAM_PHYM_ALT_ROOT_CLK, IMX7D_DRAM_ALT_ROOT_CLK,
+ IMX7D_AHB_CHANNEL_ROOT_CLK,
+};
+
static struct clk_onecell_data clk_data;
static struct clk ** const uart_clks[] __initconst = {
@@ -395,12 +403,12 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
clks[IMX7D_PLL_AUDIO_MAIN_SRC] = imx_clk_mux("pll_audio_main_src", base + 0xf0, 14, 2, pll_bypass_src_sel, ARRAY_SIZE(pll_bypass_src_sel));
clks[IMX7D_PLL_VIDEO_MAIN_SRC] = imx_clk_mux("pll_video_main_src", base + 0x130, 14, 2, pll_bypass_src_sel, ARRAY_SIZE(pll_bypass_src_sel));
- clks[IMX7D_PLL_ARM_MAIN] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll_arm_main", "pll_arm_main_src", base + 0x60, 0x7f);
- clks[IMX7D_PLL_DRAM_MAIN] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll_dram_main", "pll_dram_main_src", base + 0x70, 0x7f);
- clks[IMX7D_PLL_SYS_MAIN] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll_sys_main", "pll_sys_main_src", base + 0xb0, 0x1);
- clks[IMX7D_PLL_ENET_MAIN] = imx_clk_pllv3(IMX_PLLV3_ENET_IMX7, "pll_enet_main", "pll_enet_main_src", base + 0xe0, 0x0);
- clks[IMX7D_PLL_AUDIO_MAIN] = imx_clk_pllv3(IMX_PLLV3_AV, "pll_audio_main", "pll_audio_main_src", base + 0xf0, 0x7f);
- clks[IMX7D_PLL_VIDEO_MAIN] = imx_clk_pllv3(IMX_PLLV3_AV, "pll_video_main", "pll_video_main_src", base + 0x130, 0x7f);
+ clks[IMX7D_PLL_ARM_MAIN] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll_arm_main", "osc", base + 0x60, 0x7f);
+ clks[IMX7D_PLL_DRAM_MAIN] = imx_clk_pllv3(IMX_PLLV3_AV, "pll_dram_main", "osc", base + 0x70, 0x7f);
+ clks[IMX7D_PLL_SYS_MAIN] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll_sys_main", "osc", base + 0xb0, 0x1);
+ clks[IMX7D_PLL_ENET_MAIN] = imx_clk_pllv3(IMX_PLLV3_ENET_IMX7, "pll_enet_main", "osc", base + 0xe0, 0x0);
+ clks[IMX7D_PLL_AUDIO_MAIN] = imx_clk_pllv3(IMX_PLLV3_AV, "pll_audio_main", "osc", base + 0xf0, 0x7f);
+ clks[IMX7D_PLL_VIDEO_MAIN] = imx_clk_pllv3(IMX_PLLV3_AV, "pll_video_main", "osc", base + 0x130, 0x7f);
clks[IMX7D_PLL_ARM_MAIN_BYPASS] = imx_clk_mux_flags("pll_arm_main_bypass", base + 0x60, 16, 1, pll_arm_bypass_sel, ARRAY_SIZE(pll_arm_bypass_sel), CLK_SET_RATE_PARENT);
clks[IMX7D_PLL_DRAM_MAIN_BYPASS] = imx_clk_mux_flags("pll_dram_main_bypass", base + 0x70, 16, 1, pll_dram_bypass_sel, ARRAY_SIZE(pll_dram_bypass_sel), CLK_SET_RATE_PARENT);
@@ -474,363 +482,363 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
base = of_iomap(np, 0);
WARN_ON(!base);
- clks[IMX7D_ARM_A7_ROOT_SRC] = imx_clk_mux("arm_a7_src", base + 0x8000, 24, 3, arm_a7_sel, ARRAY_SIZE(arm_a7_sel));
- clks[IMX7D_ARM_M4_ROOT_SRC] = imx_clk_mux("arm_m4_src", base + 0x8080, 24, 3, arm_m4_sel, ARRAY_SIZE(arm_m4_sel));
- clks[IMX7D_ARM_M0_ROOT_SRC] = imx_clk_mux("arm_m0_src", base + 0x8100, 24, 3, arm_m0_sel, ARRAY_SIZE(arm_m0_sel));
- clks[IMX7D_MAIN_AXI_ROOT_SRC] = imx_clk_mux("axi_src", base + 0x8800, 24, 3, axi_sel, ARRAY_SIZE(axi_sel));
- clks[IMX7D_DISP_AXI_ROOT_SRC] = imx_clk_mux("disp_axi_src", base + 0x8880, 24, 3, disp_axi_sel, ARRAY_SIZE(disp_axi_sel));
- clks[IMX7D_ENET_AXI_ROOT_SRC] = imx_clk_mux("enet_axi_src", base + 0x8900, 24, 3, enet_axi_sel, ARRAY_SIZE(enet_axi_sel));
- clks[IMX7D_NAND_USDHC_BUS_ROOT_SRC] = imx_clk_mux("nand_usdhc_src", base + 0x8980, 24, 3, nand_usdhc_bus_sel, ARRAY_SIZE(nand_usdhc_bus_sel));
- clks[IMX7D_AHB_CHANNEL_ROOT_SRC] = imx_clk_mux("ahb_src", base + 0x9000, 24, 3, ahb_channel_sel, ARRAY_SIZE(ahb_channel_sel));
- clks[IMX7D_DRAM_PHYM_ROOT_SRC] = imx_clk_mux("dram_phym_src", base + 0x9800, 24, 1, dram_phym_sel, ARRAY_SIZE(dram_phym_sel));
- clks[IMX7D_DRAM_ROOT_SRC] = imx_clk_mux("dram_src", base + 0x9880, 24, 1, dram_sel, ARRAY_SIZE(dram_sel));
- clks[IMX7D_DRAM_PHYM_ALT_ROOT_SRC] = imx_clk_mux("dram_phym_alt_src", base + 0xa000, 24, 3, dram_phym_alt_sel, ARRAY_SIZE(dram_phym_alt_sel));
- clks[IMX7D_DRAM_ALT_ROOT_SRC] = imx_clk_mux("dram_alt_src", base + 0xa080, 24, 3, dram_alt_sel, ARRAY_SIZE(dram_alt_sel));
- clks[IMX7D_USB_HSIC_ROOT_SRC] = imx_clk_mux("usb_hsic_src", base + 0xa100, 24, 3, usb_hsic_sel, ARRAY_SIZE(usb_hsic_sel));
- clks[IMX7D_PCIE_CTRL_ROOT_SRC] = imx_clk_mux("pcie_ctrl_src", base + 0xa180, 24, 3, pcie_ctrl_sel, ARRAY_SIZE(pcie_ctrl_sel));
- clks[IMX7D_PCIE_PHY_ROOT_SRC] = imx_clk_mux("pcie_phy_src", base + 0xa200, 24, 3, pcie_phy_sel, ARRAY_SIZE(pcie_phy_sel));
- clks[IMX7D_EPDC_PIXEL_ROOT_SRC] = imx_clk_mux("epdc_pixel_src", base + 0xa280, 24, 3, epdc_pixel_sel, ARRAY_SIZE(epdc_pixel_sel));
- clks[IMX7D_LCDIF_PIXEL_ROOT_SRC] = imx_clk_mux("lcdif_pixel_src", base + 0xa300, 24, 3, lcdif_pixel_sel, ARRAY_SIZE(lcdif_pixel_sel));
- clks[IMX7D_MIPI_DSI_ROOT_SRC] = imx_clk_mux("mipi_dsi_src", base + 0xa380, 24, 3, mipi_dsi_sel, ARRAY_SIZE(mipi_dsi_sel));
- clks[IMX7D_MIPI_CSI_ROOT_SRC] = imx_clk_mux("mipi_csi_src", base + 0xa400, 24, 3, mipi_csi_sel, ARRAY_SIZE(mipi_csi_sel));
- clks[IMX7D_MIPI_DPHY_ROOT_SRC] = imx_clk_mux("mipi_dphy_src", base + 0xa480, 24, 3, mipi_dphy_sel, ARRAY_SIZE(mipi_dphy_sel));
- clks[IMX7D_SAI1_ROOT_SRC] = imx_clk_mux("sai1_src", base + 0xa500, 24, 3, sai1_sel, ARRAY_SIZE(sai1_sel));
- clks[IMX7D_SAI2_ROOT_SRC] = imx_clk_mux("sai2_src", base + 0xa580, 24, 3, sai2_sel, ARRAY_SIZE(sai2_sel));
- clks[IMX7D_SAI3_ROOT_SRC] = imx_clk_mux("sai3_src", base + 0xa600, 24, 3, sai3_sel, ARRAY_SIZE(sai3_sel));
- clks[IMX7D_SPDIF_ROOT_SRC] = imx_clk_mux("spdif_src", base + 0xa680, 24, 3, spdif_sel, ARRAY_SIZE(spdif_sel));
- clks[IMX7D_ENET1_REF_ROOT_SRC] = imx_clk_mux("enet1_ref_src", base + 0xa700, 24, 3, enet1_ref_sel, ARRAY_SIZE(enet1_ref_sel));
- clks[IMX7D_ENET1_TIME_ROOT_SRC] = imx_clk_mux("enet1_time_src", base + 0xa780, 24, 3, enet1_time_sel, ARRAY_SIZE(enet1_time_sel));
- clks[IMX7D_ENET2_REF_ROOT_SRC] = imx_clk_mux("enet2_ref_src", base + 0xa800, 24, 3, enet2_ref_sel, ARRAY_SIZE(enet2_ref_sel));
- clks[IMX7D_ENET2_TIME_ROOT_SRC] = imx_clk_mux("enet2_time_src", base + 0xa880, 24, 3, enet2_time_sel, ARRAY_SIZE(enet2_time_sel));
- clks[IMX7D_ENET_PHY_REF_ROOT_SRC] = imx_clk_mux("enet_phy_ref_src", base + 0xa900, 24, 3, enet_phy_ref_sel, ARRAY_SIZE(enet_phy_ref_sel));
- clks[IMX7D_EIM_ROOT_SRC] = imx_clk_mux("eim_src", base + 0xa980, 24, 3, eim_sel, ARRAY_SIZE(eim_sel));
- clks[IMX7D_NAND_ROOT_SRC] = imx_clk_mux("nand_src", base + 0xaa00, 24, 3, nand_sel, ARRAY_SIZE(nand_sel));
- clks[IMX7D_QSPI_ROOT_SRC] = imx_clk_mux("qspi_src", base + 0xaa80, 24, 3, qspi_sel, ARRAY_SIZE(qspi_sel));
- clks[IMX7D_USDHC1_ROOT_SRC] = imx_clk_mux("usdhc1_src", base + 0xab00, 24, 3, usdhc1_sel, ARRAY_SIZE(usdhc1_sel));
- clks[IMX7D_USDHC2_ROOT_SRC] = imx_clk_mux("usdhc2_src", base + 0xab80, 24, 3, usdhc2_sel, ARRAY_SIZE(usdhc2_sel));
- clks[IMX7D_USDHC3_ROOT_SRC] = imx_clk_mux("usdhc3_src", base + 0xac00, 24, 3, usdhc3_sel, ARRAY_SIZE(usdhc3_sel));
- clks[IMX7D_CAN1_ROOT_SRC] = imx_clk_mux("can1_src", base + 0xac80, 24, 3, can1_sel, ARRAY_SIZE(can1_sel));
- clks[IMX7D_CAN2_ROOT_SRC] = imx_clk_mux("can2_src", base + 0xad00, 24, 3, can2_sel, ARRAY_SIZE(can2_sel));
- clks[IMX7D_I2C1_ROOT_SRC] = imx_clk_mux("i2c1_src", base + 0xad80, 24, 3, i2c1_sel, ARRAY_SIZE(i2c1_sel));
- clks[IMX7D_I2C2_ROOT_SRC] = imx_clk_mux("i2c2_src", base + 0xae00, 24, 3, i2c2_sel, ARRAY_SIZE(i2c2_sel));
- clks[IMX7D_I2C3_ROOT_SRC] = imx_clk_mux("i2c3_src", base + 0xae80, 24, 3, i2c3_sel, ARRAY_SIZE(i2c3_sel));
- clks[IMX7D_I2C4_ROOT_SRC] = imx_clk_mux("i2c4_src", base + 0xaf00, 24, 3, i2c4_sel, ARRAY_SIZE(i2c4_sel));
- clks[IMX7D_UART1_ROOT_SRC] = imx_clk_mux("uart1_src", base + 0xaf80, 24, 3, uart1_sel, ARRAY_SIZE(uart1_sel));
- clks[IMX7D_UART2_ROOT_SRC] = imx_clk_mux("uart2_src", base + 0xb000, 24, 3, uart2_sel, ARRAY_SIZE(uart2_sel));
- clks[IMX7D_UART3_ROOT_SRC] = imx_clk_mux("uart3_src", base + 0xb080, 24, 3, uart3_sel, ARRAY_SIZE(uart3_sel));
- clks[IMX7D_UART4_ROOT_SRC] = imx_clk_mux("uart4_src", base + 0xb100, 24, 3, uart4_sel, ARRAY_SIZE(uart4_sel));
- clks[IMX7D_UART5_ROOT_SRC] = imx_clk_mux("uart5_src", base + 0xb180, 24, 3, uart5_sel, ARRAY_SIZE(uart5_sel));
- clks[IMX7D_UART6_ROOT_SRC] = imx_clk_mux("uart6_src", base + 0xb200, 24, 3, uart6_sel, ARRAY_SIZE(uart6_sel));
- clks[IMX7D_UART7_ROOT_SRC] = imx_clk_mux("uart7_src", base + 0xb280, 24, 3, uart7_sel, ARRAY_SIZE(uart7_sel));
- clks[IMX7D_ECSPI1_ROOT_SRC] = imx_clk_mux("ecspi1_src", base + 0xb300, 24, 3, ecspi1_sel, ARRAY_SIZE(ecspi1_sel));
- clks[IMX7D_ECSPI2_ROOT_SRC] = imx_clk_mux("ecspi2_src", base + 0xb380, 24, 3, ecspi2_sel, ARRAY_SIZE(ecspi2_sel));
- clks[IMX7D_ECSPI3_ROOT_SRC] = imx_clk_mux("ecspi3_src", base + 0xb400, 24, 3, ecspi3_sel, ARRAY_SIZE(ecspi3_sel));
- clks[IMX7D_ECSPI4_ROOT_SRC] = imx_clk_mux("ecspi4_src", base + 0xb480, 24, 3, ecspi4_sel, ARRAY_SIZE(ecspi4_sel));
- clks[IMX7D_PWM1_ROOT_SRC] = imx_clk_mux("pwm1_src", base + 0xb500, 24, 3, pwm1_sel, ARRAY_SIZE(pwm1_sel));
- clks[IMX7D_PWM2_ROOT_SRC] = imx_clk_mux("pwm2_src", base + 0xb580, 24, 3, pwm2_sel, ARRAY_SIZE(pwm2_sel));
- clks[IMX7D_PWM3_ROOT_SRC] = imx_clk_mux("pwm3_src", base + 0xb600, 24, 3, pwm3_sel, ARRAY_SIZE(pwm3_sel));
- clks[IMX7D_PWM4_ROOT_SRC] = imx_clk_mux("pwm4_src", base + 0xb680, 24, 3, pwm4_sel, ARRAY_SIZE(pwm4_sel));
- clks[IMX7D_FLEXTIMER1_ROOT_SRC] = imx_clk_mux("flextimer1_src", base + 0xb700, 24, 3, flextimer1_sel, ARRAY_SIZE(flextimer1_sel));
- clks[IMX7D_FLEXTIMER2_ROOT_SRC] = imx_clk_mux("flextimer2_src", base + 0xb780, 24, 3, flextimer2_sel, ARRAY_SIZE(flextimer2_sel));
- clks[IMX7D_SIM1_ROOT_SRC] = imx_clk_mux("sim1_src", base + 0xb800, 24, 3, sim1_sel, ARRAY_SIZE(sim1_sel));
- clks[IMX7D_SIM2_ROOT_SRC] = imx_clk_mux("sim2_src", base + 0xb880, 24, 3, sim2_sel, ARRAY_SIZE(sim2_sel));
- clks[IMX7D_GPT1_ROOT_SRC] = imx_clk_mux("gpt1_src", base + 0xb900, 24, 3, gpt1_sel, ARRAY_SIZE(gpt1_sel));
- clks[IMX7D_GPT2_ROOT_SRC] = imx_clk_mux("gpt2_src", base + 0xb980, 24, 3, gpt2_sel, ARRAY_SIZE(gpt2_sel));
- clks[IMX7D_GPT3_ROOT_SRC] = imx_clk_mux("gpt3_src", base + 0xba00, 24, 3, gpt3_sel, ARRAY_SIZE(gpt3_sel));
- clks[IMX7D_GPT4_ROOT_SRC] = imx_clk_mux("gpt4_src", base + 0xba80, 24, 3, gpt4_sel, ARRAY_SIZE(gpt4_sel));
- clks[IMX7D_TRACE_ROOT_SRC] = imx_clk_mux("trace_src", base + 0xbb00, 24, 3, trace_sel, ARRAY_SIZE(trace_sel));
- clks[IMX7D_WDOG_ROOT_SRC] = imx_clk_mux("wdog_src", base + 0xbb80, 24, 3, wdog_sel, ARRAY_SIZE(wdog_sel));
- clks[IMX7D_CSI_MCLK_ROOT_SRC] = imx_clk_mux("csi_mclk_src", base + 0xbc00, 24, 3, csi_mclk_sel, ARRAY_SIZE(csi_mclk_sel));
- clks[IMX7D_AUDIO_MCLK_ROOT_SRC] = imx_clk_mux("audio_mclk_src", base + 0xbc80, 24, 3, audio_mclk_sel, ARRAY_SIZE(audio_mclk_sel));
- clks[IMX7D_WRCLK_ROOT_SRC] = imx_clk_mux("wrclk_src", base + 0xbd00, 24, 3, wrclk_sel, ARRAY_SIZE(wrclk_sel));
- clks[IMX7D_CLKO1_ROOT_SRC] = imx_clk_mux("clko1_src", base + 0xbd80, 24, 3, clko1_sel, ARRAY_SIZE(clko1_sel));
- clks[IMX7D_CLKO2_ROOT_SRC] = imx_clk_mux("clko2_src", base + 0xbe00, 24, 3, clko2_sel, ARRAY_SIZE(clko2_sel));
-
- clks[IMX7D_ARM_A7_ROOT_CG] = imx_clk_gate("arm_a7_cg", "arm_a7_src", base + 0x8000, 28);
- clks[IMX7D_ARM_M4_ROOT_CG] = imx_clk_gate("arm_m4_cg", "arm_m4_src", base + 0x8080, 28);
- clks[IMX7D_ARM_M0_ROOT_CG] = imx_clk_gate("arm_m0_cg", "arm_m0_src", base + 0x8100, 28);
- clks[IMX7D_MAIN_AXI_ROOT_CG] = imx_clk_gate("axi_cg", "axi_src", base + 0x8800, 28);
- clks[IMX7D_DISP_AXI_ROOT_CG] = imx_clk_gate("disp_axi_cg", "disp_axi_src", base + 0x8880, 28);
- clks[IMX7D_ENET_AXI_ROOT_CG] = imx_clk_gate("enet_axi_cg", "enet_axi_src", base + 0x8900, 28);
- clks[IMX7D_NAND_USDHC_BUS_ROOT_CG] = imx_clk_gate("nand_usdhc_cg", "nand_usdhc_src", base + 0x8980, 28);
- clks[IMX7D_AHB_CHANNEL_ROOT_CG] = imx_clk_gate("ahb_cg", "ahb_src", base + 0x9000, 28);
- clks[IMX7D_DRAM_PHYM_ROOT_CG] = imx_clk_gate("dram_phym_cg", "dram_phym_src", base + 0x9800, 28);
- clks[IMX7D_DRAM_ROOT_CG] = imx_clk_gate("dram_cg", "dram_src", base + 0x9880, 28);
- clks[IMX7D_DRAM_PHYM_ALT_ROOT_CG] = imx_clk_gate("dram_phym_alt_cg", "dram_phym_alt_src", base + 0xa000, 28);
- clks[IMX7D_DRAM_ALT_ROOT_CG] = imx_clk_gate("dram_alt_cg", "dram_alt_src", base + 0xa080, 28);
- clks[IMX7D_USB_HSIC_ROOT_CG] = imx_clk_gate("usb_hsic_cg", "usb_hsic_src", base + 0xa100, 28);
- clks[IMX7D_PCIE_CTRL_ROOT_CG] = imx_clk_gate("pcie_ctrl_cg", "pcie_ctrl_src", base + 0xa180, 28);
- clks[IMX7D_PCIE_PHY_ROOT_CG] = imx_clk_gate("pcie_phy_cg", "pcie_phy_src", base + 0xa200, 28);
- clks[IMX7D_EPDC_PIXEL_ROOT_CG] = imx_clk_gate("epdc_pixel_cg", "epdc_pixel_src", base + 0xa280, 28);
- clks[IMX7D_LCDIF_PIXEL_ROOT_CG] = imx_clk_gate("lcdif_pixel_cg", "lcdif_pixel_src", base + 0xa300, 28);
- clks[IMX7D_MIPI_DSI_ROOT_CG] = imx_clk_gate("mipi_dsi_cg", "mipi_dsi_src", base + 0xa380, 28);
- clks[IMX7D_MIPI_CSI_ROOT_CG] = imx_clk_gate("mipi_csi_cg", "mipi_csi_src", base + 0xa400, 28);
- clks[IMX7D_MIPI_DPHY_ROOT_CG] = imx_clk_gate("mipi_dphy_cg", "mipi_dphy_src", base + 0xa480, 28);
- clks[IMX7D_SAI1_ROOT_CG] = imx_clk_gate("sai1_cg", "sai1_src", base + 0xa500, 28);
- clks[IMX7D_SAI2_ROOT_CG] = imx_clk_gate("sai2_cg", "sai2_src", base + 0xa580, 28);
- clks[IMX7D_SAI3_ROOT_CG] = imx_clk_gate("sai3_cg", "sai3_src", base + 0xa600, 28);
- clks[IMX7D_SPDIF_ROOT_CG] = imx_clk_gate("spdif_cg", "spdif_src", base + 0xa680, 28);
- clks[IMX7D_ENET1_REF_ROOT_CG] = imx_clk_gate("enet1_ref_cg", "enet1_ref_src", base + 0xa700, 28);
- clks[IMX7D_ENET1_TIME_ROOT_CG] = imx_clk_gate("enet1_time_cg", "enet1_time_src", base + 0xa780, 28);
- clks[IMX7D_ENET2_REF_ROOT_CG] = imx_clk_gate("enet2_ref_cg", "enet2_ref_src", base + 0xa800, 28);
- clks[IMX7D_ENET2_TIME_ROOT_CG] = imx_clk_gate("enet2_time_cg", "enet2_time_src", base + 0xa880, 28);
- clks[IMX7D_ENET_PHY_REF_ROOT_CG] = imx_clk_gate("enet_phy_ref_cg", "enet_phy_ref_src", base + 0xa900, 28);
- clks[IMX7D_EIM_ROOT_CG] = imx_clk_gate("eim_cg", "eim_src", base + 0xa980, 28);
- clks[IMX7D_NAND_ROOT_CG] = imx_clk_gate("nand_cg", "nand_src", base + 0xaa00, 28);
- clks[IMX7D_QSPI_ROOT_CG] = imx_clk_gate("qspi_cg", "qspi_src", base + 0xaa80, 28);
- clks[IMX7D_USDHC1_ROOT_CG] = imx_clk_gate("usdhc1_cg", "usdhc1_src", base + 0xab00, 28);
- clks[IMX7D_USDHC2_ROOT_CG] = imx_clk_gate("usdhc2_cg", "usdhc2_src", base + 0xab80, 28);
- clks[IMX7D_USDHC3_ROOT_CG] = imx_clk_gate("usdhc3_cg", "usdhc3_src", base + 0xac00, 28);
- clks[IMX7D_CAN1_ROOT_CG] = imx_clk_gate("can1_cg", "can1_src", base + 0xac80, 28);
- clks[IMX7D_CAN2_ROOT_CG] = imx_clk_gate("can2_cg", "can2_src", base + 0xad00, 28);
- clks[IMX7D_I2C1_ROOT_CG] = imx_clk_gate("i2c1_cg", "i2c1_src", base + 0xad80, 28);
- clks[IMX7D_I2C2_ROOT_CG] = imx_clk_gate("i2c2_cg", "i2c2_src", base + 0xae00, 28);
- clks[IMX7D_I2C3_ROOT_CG] = imx_clk_gate("i2c3_cg", "i2c3_src", base + 0xae80, 28);
- clks[IMX7D_I2C4_ROOT_CG] = imx_clk_gate("i2c4_cg", "i2c4_src", base + 0xaf00, 28);
- clks[IMX7D_UART1_ROOT_CG] = imx_clk_gate("uart1_cg", "uart1_src", base + 0xaf80, 28);
- clks[IMX7D_UART2_ROOT_CG] = imx_clk_gate("uart2_cg", "uart2_src", base + 0xb000, 28);
- clks[IMX7D_UART3_ROOT_CG] = imx_clk_gate("uart3_cg", "uart3_src", base + 0xb080, 28);
- clks[IMX7D_UART4_ROOT_CG] = imx_clk_gate("uart4_cg", "uart4_src", base + 0xb100, 28);
- clks[IMX7D_UART5_ROOT_CG] = imx_clk_gate("uart5_cg", "uart5_src", base + 0xb180, 28);
- clks[IMX7D_UART6_ROOT_CG] = imx_clk_gate("uart6_cg", "uart6_src", base + 0xb200, 28);
- clks[IMX7D_UART7_ROOT_CG] = imx_clk_gate("uart7_cg", "uart7_src", base + 0xb280, 28);
- clks[IMX7D_ECSPI1_ROOT_CG] = imx_clk_gate("ecspi1_cg", "ecspi1_src", base + 0xb300, 28);
- clks[IMX7D_ECSPI2_ROOT_CG] = imx_clk_gate("ecspi2_cg", "ecspi2_src", base + 0xb380, 28);
- clks[IMX7D_ECSPI3_ROOT_CG] = imx_clk_gate("ecspi3_cg", "ecspi3_src", base + 0xb400, 28);
- clks[IMX7D_ECSPI4_ROOT_CG] = imx_clk_gate("ecspi4_cg", "ecspi4_src", base + 0xb480, 28);
- clks[IMX7D_PWM1_ROOT_CG] = imx_clk_gate("pwm1_cg", "pwm1_src", base + 0xb500, 28);
- clks[IMX7D_PWM2_ROOT_CG] = imx_clk_gate("pwm2_cg", "pwm2_src", base + 0xb580, 28);
- clks[IMX7D_PWM3_ROOT_CG] = imx_clk_gate("pwm3_cg", "pwm3_src", base + 0xb600, 28);
- clks[IMX7D_PWM4_ROOT_CG] = imx_clk_gate("pwm4_cg", "pwm4_src", base + 0xb680, 28);
- clks[IMX7D_FLEXTIMER1_ROOT_CG] = imx_clk_gate("flextimer1_cg", "flextimer1_src", base + 0xb700, 28);
- clks[IMX7D_FLEXTIMER2_ROOT_CG] = imx_clk_gate("flextimer2_cg", "flextimer2_src", base + 0xb780, 28);
- clks[IMX7D_SIM1_ROOT_CG] = imx_clk_gate("sim1_cg", "sim1_src", base + 0xb800, 28);
- clks[IMX7D_SIM2_ROOT_CG] = imx_clk_gate("sim2_cg", "sim2_src", base + 0xb880, 28);
- clks[IMX7D_GPT1_ROOT_CG] = imx_clk_gate("gpt1_cg", "gpt1_src", base + 0xb900, 28);
- clks[IMX7D_GPT2_ROOT_CG] = imx_clk_gate("gpt2_cg", "gpt2_src", base + 0xb980, 28);
- clks[IMX7D_GPT3_ROOT_CG] = imx_clk_gate("gpt3_cg", "gpt3_src", base + 0xbA00, 28);
- clks[IMX7D_GPT4_ROOT_CG] = imx_clk_gate("gpt4_cg", "gpt4_src", base + 0xbA80, 28);
- clks[IMX7D_TRACE_ROOT_CG] = imx_clk_gate("trace_cg", "trace_src", base + 0xbb00, 28);
- clks[IMX7D_WDOG_ROOT_CG] = imx_clk_gate("wdog_cg", "wdog_src", base + 0xbb80, 28);
- clks[IMX7D_CSI_MCLK_ROOT_CG] = imx_clk_gate("csi_mclk_cg", "csi_mclk_src", base + 0xbc00, 28);
- clks[IMX7D_AUDIO_MCLK_ROOT_CG] = imx_clk_gate("audio_mclk_cg", "audio_mclk_src", base + 0xbc80, 28);
- clks[IMX7D_WRCLK_ROOT_CG] = imx_clk_gate("wrclk_cg", "wrclk_src", base + 0xbd00, 28);
- clks[IMX7D_CLKO1_ROOT_CG] = imx_clk_gate("clko1_cg", "clko1_src", base + 0xbd80, 28);
- clks[IMX7D_CLKO2_ROOT_CG] = imx_clk_gate("clko2_cg", "clko2_src", base + 0xbe00, 28);
-
- clks[IMX7D_MAIN_AXI_ROOT_PRE_DIV] = imx_clk_divider("axi_pre_div", "axi_cg", base + 0x8800, 16, 3);
- clks[IMX7D_DISP_AXI_ROOT_PRE_DIV] = imx_clk_divider("disp_axi_pre_div", "disp_axi_cg", base + 0x8880, 16, 3);
- clks[IMX7D_ENET_AXI_ROOT_PRE_DIV] = imx_clk_divider("enet_axi_pre_div", "enet_axi_cg", base + 0x8900, 16, 3);
- clks[IMX7D_NAND_USDHC_BUS_ROOT_PRE_DIV] = imx_clk_divider("nand_usdhc_pre_div", "nand_usdhc_cg", base + 0x8980, 16, 3);
- clks[IMX7D_AHB_CHANNEL_ROOT_PRE_DIV] = imx_clk_divider("ahb_pre_div", "ahb_cg", base + 0x9000, 16, 3);
- clks[IMX7D_DRAM_PHYM_ALT_ROOT_PRE_DIV] = imx_clk_divider("dram_phym_alt_pre_div", "dram_phym_alt_cg", base + 0xa000, 16, 3);
- clks[IMX7D_DRAM_ALT_ROOT_PRE_DIV] = imx_clk_divider("dram_alt_pre_div", "dram_alt_cg", base + 0xa080, 16, 3);
- clks[IMX7D_USB_HSIC_ROOT_PRE_DIV] = imx_clk_divider("usb_hsic_pre_div", "usb_hsic_cg", base + 0xa100, 16, 3);
- clks[IMX7D_PCIE_CTRL_ROOT_PRE_DIV] = imx_clk_divider("pcie_ctrl_pre_div", "pcie_ctrl_cg", base + 0xa180, 16, 3);
- clks[IMX7D_PCIE_PHY_ROOT_PRE_DIV] = imx_clk_divider("pcie_phy_pre_div", "pcie_phy_cg", base + 0xa200, 16, 3);
- clks[IMX7D_EPDC_PIXEL_ROOT_PRE_DIV] = imx_clk_divider("epdc_pixel_pre_div", "epdc_pixel_cg", base + 0xa280, 16, 3);
- clks[IMX7D_LCDIF_PIXEL_ROOT_PRE_DIV] = imx_clk_divider("lcdif_pixel_pre_div", "lcdif_pixel_cg", base + 0xa300, 16, 3);
- clks[IMX7D_MIPI_DSI_ROOT_PRE_DIV] = imx_clk_divider("mipi_dsi_pre_div", "mipi_dsi_cg", base + 0xa380, 16, 3);
- clks[IMX7D_MIPI_CSI_ROOT_PRE_DIV] = imx_clk_divider("mipi_csi_pre_div", "mipi_csi_cg", base + 0xa400, 16, 3);
- clks[IMX7D_MIPI_DPHY_ROOT_PRE_DIV] = imx_clk_divider("mipi_dphy_pre_div", "mipi_dphy_cg", base + 0xa480, 16, 3);
- clks[IMX7D_SAI1_ROOT_PRE_DIV] = imx_clk_divider("sai1_pre_div", "sai1_cg", base + 0xa500, 16, 3);
- clks[IMX7D_SAI2_ROOT_PRE_DIV] = imx_clk_divider("sai2_pre_div", "sai2_cg", base + 0xa580, 16, 3);
- clks[IMX7D_SAI3_ROOT_PRE_DIV] = imx_clk_divider("sai3_pre_div", "sai3_cg", base + 0xa600, 16, 3);
- clks[IMX7D_SPDIF_ROOT_PRE_DIV] = imx_clk_divider("spdif_pre_div", "spdif_cg", base + 0xa680, 16, 3);
- clks[IMX7D_ENET1_REF_ROOT_PRE_DIV] = imx_clk_divider("enet1_ref_pre_div", "enet1_ref_cg", base + 0xa700, 16, 3);
- clks[IMX7D_ENET1_TIME_ROOT_PRE_DIV] = imx_clk_divider("enet1_time_pre_div", "enet1_time_cg", base + 0xa780, 16, 3);
- clks[IMX7D_ENET2_REF_ROOT_PRE_DIV] = imx_clk_divider("enet2_ref_pre_div", "enet2_ref_cg", base + 0xa800, 16, 3);
- clks[IMX7D_ENET2_TIME_ROOT_PRE_DIV] = imx_clk_divider("enet2_time_pre_div", "enet2_time_cg", base + 0xa880, 16, 3);
- clks[IMX7D_ENET_PHY_REF_ROOT_PRE_DIV] = imx_clk_divider("enet_phy_ref_pre_div", "enet_phy_ref_cg", base + 0xa900, 16, 3);
- clks[IMX7D_EIM_ROOT_PRE_DIV] = imx_clk_divider("eim_pre_div", "eim_cg", base + 0xa980, 16, 3);
- clks[IMX7D_NAND_ROOT_PRE_DIV] = imx_clk_divider("nand_pre_div", "nand_cg", base + 0xaa00, 16, 3);
- clks[IMX7D_QSPI_ROOT_PRE_DIV] = imx_clk_divider("qspi_pre_div", "qspi_cg", base + 0xaa80, 16, 3);
- clks[IMX7D_USDHC1_ROOT_PRE_DIV] = imx_clk_divider("usdhc1_pre_div", "usdhc1_cg", base + 0xab00, 16, 3);
- clks[IMX7D_USDHC2_ROOT_PRE_DIV] = imx_clk_divider("usdhc2_pre_div", "usdhc2_cg", base + 0xab80, 16, 3);
- clks[IMX7D_USDHC3_ROOT_PRE_DIV] = imx_clk_divider("usdhc3_pre_div", "usdhc3_cg", base + 0xac00, 16, 3);
- clks[IMX7D_CAN1_ROOT_PRE_DIV] = imx_clk_divider("can1_pre_div", "can1_cg", base + 0xac80, 16, 3);
- clks[IMX7D_CAN2_ROOT_PRE_DIV] = imx_clk_divider("can2_pre_div", "can2_cg", base + 0xad00, 16, 3);
- clks[IMX7D_I2C1_ROOT_PRE_DIV] = imx_clk_divider("i2c1_pre_div", "i2c1_cg", base + 0xad80, 16, 3);
- clks[IMX7D_I2C2_ROOT_PRE_DIV] = imx_clk_divider("i2c2_pre_div", "i2c2_cg", base + 0xae00, 16, 3);
- clks[IMX7D_I2C3_ROOT_PRE_DIV] = imx_clk_divider("i2c3_pre_div", "i2c3_cg", base + 0xae80, 16, 3);
- clks[IMX7D_I2C4_ROOT_PRE_DIV] = imx_clk_divider("i2c4_pre_div", "i2c4_cg", base + 0xaf00, 16, 3);
- clks[IMX7D_UART1_ROOT_PRE_DIV] = imx_clk_divider("uart1_pre_div", "uart1_cg", base + 0xaf80, 16, 3);
- clks[IMX7D_UART2_ROOT_PRE_DIV] = imx_clk_divider("uart2_pre_div", "uart2_cg", base + 0xb000, 16, 3);
- clks[IMX7D_UART3_ROOT_PRE_DIV] = imx_clk_divider("uart3_pre_div", "uart3_cg", base + 0xb080, 16, 3);
- clks[IMX7D_UART4_ROOT_PRE_DIV] = imx_clk_divider("uart4_pre_div", "uart4_cg", base + 0xb100, 16, 3);
- clks[IMX7D_UART5_ROOT_PRE_DIV] = imx_clk_divider("uart5_pre_div", "uart5_cg", base + 0xb180, 16, 3);
- clks[IMX7D_UART6_ROOT_PRE_DIV] = imx_clk_divider("uart6_pre_div", "uart6_cg", base + 0xb200, 16, 3);
- clks[IMX7D_UART7_ROOT_PRE_DIV] = imx_clk_divider("uart7_pre_div", "uart7_cg", base + 0xb280, 16, 3);
- clks[IMX7D_ECSPI1_ROOT_PRE_DIV] = imx_clk_divider("ecspi1_pre_div", "ecspi1_cg", base + 0xb300, 16, 3);
- clks[IMX7D_ECSPI2_ROOT_PRE_DIV] = imx_clk_divider("ecspi2_pre_div", "ecspi2_cg", base + 0xb380, 16, 3);
- clks[IMX7D_ECSPI3_ROOT_PRE_DIV] = imx_clk_divider("ecspi3_pre_div", "ecspi3_cg", base + 0xb400, 16, 3);
- clks[IMX7D_ECSPI4_ROOT_PRE_DIV] = imx_clk_divider("ecspi4_pre_div", "ecspi4_cg", base + 0xb480, 16, 3);
- clks[IMX7D_PWM1_ROOT_PRE_DIV] = imx_clk_divider("pwm1_pre_div", "pwm1_cg", base + 0xb500, 16, 3);
- clks[IMX7D_PWM2_ROOT_PRE_DIV] = imx_clk_divider("pwm2_pre_div", "pwm2_cg", base + 0xb580, 16, 3);
- clks[IMX7D_PWM3_ROOT_PRE_DIV] = imx_clk_divider("pwm3_pre_div", "pwm3_cg", base + 0xb600, 16, 3);
- clks[IMX7D_PWM4_ROOT_PRE_DIV] = imx_clk_divider("pwm4_pre_div", "pwm4_cg", base + 0xb680, 16, 3);
- clks[IMX7D_FLEXTIMER1_ROOT_PRE_DIV] = imx_clk_divider("flextimer1_pre_div", "flextimer1_cg", base + 0xb700, 16, 3);
- clks[IMX7D_FLEXTIMER2_ROOT_PRE_DIV] = imx_clk_divider("flextimer2_pre_div", "flextimer2_cg", base + 0xb780, 16, 3);
- clks[IMX7D_SIM1_ROOT_PRE_DIV] = imx_clk_divider("sim1_pre_div", "sim1_cg", base + 0xb800, 16, 3);
- clks[IMX7D_SIM2_ROOT_PRE_DIV] = imx_clk_divider("sim2_pre_div", "sim2_cg", base + 0xb880, 16, 3);
- clks[IMX7D_GPT1_ROOT_PRE_DIV] = imx_clk_divider("gpt1_pre_div", "gpt1_cg", base + 0xb900, 16, 3);
- clks[IMX7D_GPT2_ROOT_PRE_DIV] = imx_clk_divider("gpt2_pre_div", "gpt2_cg", base + 0xb980, 16, 3);
- clks[IMX7D_GPT3_ROOT_PRE_DIV] = imx_clk_divider("gpt3_pre_div", "gpt3_cg", base + 0xba00, 16, 3);
- clks[IMX7D_GPT4_ROOT_PRE_DIV] = imx_clk_divider("gpt4_pre_div", "gpt4_cg", base + 0xba80, 16, 3);
- clks[IMX7D_TRACE_ROOT_PRE_DIV] = imx_clk_divider("trace_pre_div", "trace_cg", base + 0xbb00, 16, 3);
- clks[IMX7D_WDOG_ROOT_PRE_DIV] = imx_clk_divider("wdog_pre_div", "wdog_cg", base + 0xbb80, 16, 3);
- clks[IMX7D_CSI_MCLK_ROOT_PRE_DIV] = imx_clk_divider("csi_mclk_pre_div", "csi_mclk_cg", base + 0xbc00, 16, 3);
- clks[IMX7D_AUDIO_MCLK_ROOT_PRE_DIV] = imx_clk_divider("audio_mclk_pre_div", "audio_mclk_cg", base + 0xbc80, 16, 3);
- clks[IMX7D_WRCLK_ROOT_PRE_DIV] = imx_clk_divider("wrclk_pre_div", "wrclk_cg", base + 0xbd00, 16, 3);
- clks[IMX7D_CLKO1_ROOT_PRE_DIV] = imx_clk_divider("clko1_pre_div", "clko1_cg", base + 0xbd80, 16, 3);
- clks[IMX7D_CLKO2_ROOT_PRE_DIV] = imx_clk_divider("clko2_pre_div", "clko2_cg", base + 0xbe00, 16, 3);
-
- clks[IMX7D_ARM_A7_ROOT_DIV] = imx_clk_divider("arm_a7_div", "arm_a7_cg", base + 0x8000, 0, 3);
- clks[IMX7D_ARM_M4_ROOT_DIV] = imx_clk_divider("arm_m4_div", "arm_m4_cg", base + 0x8080, 0, 3);
- clks[IMX7D_ARM_M0_ROOT_DIV] = imx_clk_divider("arm_m0_div", "arm_m0_cg", base + 0x8100, 0, 3);
- clks[IMX7D_MAIN_AXI_ROOT_DIV] = imx_clk_divider("axi_post_div", "axi_pre_div", base + 0x8800, 0, 6);
- clks[IMX7D_DISP_AXI_ROOT_DIV] = imx_clk_divider("disp_axi_post_div", "disp_axi_pre_div", base + 0x8880, 0, 6);
- clks[IMX7D_ENET_AXI_ROOT_DIV] = imx_clk_divider("enet_axi_post_div", "enet_axi_pre_div", base + 0x8900, 0, 6);
- clks[IMX7D_NAND_USDHC_BUS_ROOT_DIV] = imx_clk_divider("nand_usdhc_post_div", "nand_usdhc_pre_div", base + 0x8980, 0, 6);
- clks[IMX7D_AHB_CHANNEL_ROOT_DIV] = imx_clk_divider("ahb_post_div", "ahb_pre_div", base + 0x9000, 0, 6);
- clks[IMX7D_DRAM_ROOT_DIV] = imx_clk_divider("dram_post_div", "dram_cg", base + 0x9880, 0, 3);
- clks[IMX7D_DRAM_PHYM_ALT_ROOT_DIV] = imx_clk_divider("dram_phym_alt_post_div", "dram_phym_alt_pre_div", base + 0xa000, 0, 3);
- clks[IMX7D_DRAM_ALT_ROOT_DIV] = imx_clk_divider("dram_alt_post_div", "dram_alt_pre_div", base + 0xa080, 0, 3);
- clks[IMX7D_USB_HSIC_ROOT_DIV] = imx_clk_divider("usb_hsic_post_div", "usb_hsic_pre_div", base + 0xa100, 0, 6);
- clks[IMX7D_PCIE_CTRL_ROOT_DIV] = imx_clk_divider("pcie_ctrl_post_div", "pcie_ctrl_pre_div", base + 0xa180, 0, 6);
- clks[IMX7D_PCIE_PHY_ROOT_DIV] = imx_clk_divider("pcie_phy_post_div", "pcie_phy_pre_div", base + 0xa200, 0, 6);
- clks[IMX7D_EPDC_PIXEL_ROOT_DIV] = imx_clk_divider("epdc_pixel_post_div", "epdc_pixel_pre_div", base + 0xa280, 0, 6);
- clks[IMX7D_LCDIF_PIXEL_ROOT_DIV] = imx_clk_divider("lcdif_pixel_post_div", "lcdif_pixel_pre_div", base + 0xa300, 0, 6);
- clks[IMX7D_MIPI_DSI_ROOT_DIV] = imx_clk_divider("mipi_dsi_post_div", "mipi_dsi_pre_div", base + 0xa380, 0, 6);
- clks[IMX7D_MIPI_CSI_ROOT_DIV] = imx_clk_divider("mipi_csi_post_div", "mipi_csi_pre_div", base + 0xa400, 0, 6);
- clks[IMX7D_MIPI_DPHY_ROOT_DIV] = imx_clk_divider("mipi_dphy_post_div", "mipi_csi_dphy_div", base + 0xa480, 0, 6);
- clks[IMX7D_SAI1_ROOT_DIV] = imx_clk_divider("sai1_post_div", "sai1_pre_div", base + 0xa500, 0, 6);
- clks[IMX7D_SAI2_ROOT_DIV] = imx_clk_divider("sai2_post_div", "sai2_pre_div", base + 0xa580, 0, 6);
- clks[IMX7D_SAI3_ROOT_DIV] = imx_clk_divider("sai3_post_div", "sai3_pre_div", base + 0xa600, 0, 6);
- clks[IMX7D_SPDIF_ROOT_DIV] = imx_clk_divider("spdif_post_div", "spdif_pre_div", base + 0xa680, 0, 6);
- clks[IMX7D_ENET1_REF_ROOT_DIV] = imx_clk_divider("enet1_ref_post_div", "enet1_ref_pre_div", base + 0xa700, 0, 6);
- clks[IMX7D_ENET1_TIME_ROOT_DIV] = imx_clk_divider("enet1_time_post_div", "enet1_time_pre_div", base + 0xa780, 0, 6);
- clks[IMX7D_ENET2_REF_ROOT_DIV] = imx_clk_divider("enet2_ref_post_div", "enet2_ref_pre_div", base + 0xa800, 0, 6);
- clks[IMX7D_ENET2_TIME_ROOT_DIV] = imx_clk_divider("enet2_time_post_div", "enet2_time_pre_div", base + 0xa880, 0, 6);
- clks[IMX7D_ENET_PHY_REF_ROOT_DIV] = imx_clk_divider("enet_phy_ref_post_div", "enet_phy_ref_pre_div", base + 0xa900, 0, 6);
- clks[IMX7D_EIM_ROOT_DIV] = imx_clk_divider("eim_post_div", "eim_pre_div", base + 0xa980, 0, 6);
- clks[IMX7D_NAND_ROOT_DIV] = imx_clk_divider("nand_post_div", "nand_pre_div", base + 0xaa00, 0, 6);
- clks[IMX7D_QSPI_ROOT_DIV] = imx_clk_divider("qspi_post_div", "qspi_pre_div", base + 0xaa80, 0, 6);
- clks[IMX7D_USDHC1_ROOT_DIV] = imx_clk_divider("usdhc1_post_div", "usdhc1_pre_div", base + 0xab00, 0, 6);
- clks[IMX7D_USDHC2_ROOT_DIV] = imx_clk_divider("usdhc2_post_div", "usdhc2_pre_div", base + 0xab80, 0, 6);
- clks[IMX7D_USDHC3_ROOT_DIV] = imx_clk_divider("usdhc3_post_div", "usdhc3_pre_div", base + 0xac00, 0, 6);
- clks[IMX7D_CAN1_ROOT_DIV] = imx_clk_divider("can1_post_div", "can1_pre_div", base + 0xac80, 0, 6);
- clks[IMX7D_CAN2_ROOT_DIV] = imx_clk_divider("can2_post_div", "can2_pre_div", base + 0xad00, 0, 6);
- clks[IMX7D_I2C1_ROOT_DIV] = imx_clk_divider("i2c1_post_div", "i2c1_pre_div", base + 0xad80, 0, 6);
- clks[IMX7D_I2C2_ROOT_DIV] = imx_clk_divider("i2c2_post_div", "i2c2_pre_div", base + 0xae00, 0, 6);
- clks[IMX7D_I2C3_ROOT_DIV] = imx_clk_divider("i2c3_post_div", "i2c3_pre_div", base + 0xae80, 0, 6);
- clks[IMX7D_I2C4_ROOT_DIV] = imx_clk_divider("i2c4_post_div", "i2c4_pre_div", base + 0xaf00, 0, 6);
- clks[IMX7D_UART1_ROOT_DIV] = imx_clk_divider("uart1_post_div", "uart1_pre_div", base + 0xaf80, 0, 6);
- clks[IMX7D_UART2_ROOT_DIV] = imx_clk_divider("uart2_post_div", "uart2_pre_div", base + 0xb000, 0, 6);
- clks[IMX7D_UART3_ROOT_DIV] = imx_clk_divider("uart3_post_div", "uart3_pre_div", base + 0xb080, 0, 6);
- clks[IMX7D_UART4_ROOT_DIV] = imx_clk_divider("uart4_post_div", "uart4_pre_div", base + 0xb100, 0, 6);
- clks[IMX7D_UART5_ROOT_DIV] = imx_clk_divider("uart5_post_div", "uart5_pre_div", base + 0xb180, 0, 6);
- clks[IMX7D_UART6_ROOT_DIV] = imx_clk_divider("uart6_post_div", "uart6_pre_div", base + 0xb200, 0, 6);
- clks[IMX7D_UART7_ROOT_DIV] = imx_clk_divider("uart7_post_div", "uart7_pre_div", base + 0xb280, 0, 6);
- clks[IMX7D_ECSPI1_ROOT_DIV] = imx_clk_divider("ecspi1_post_div", "ecspi1_pre_div", base + 0xb300, 0, 6);
- clks[IMX7D_ECSPI2_ROOT_DIV] = imx_clk_divider("ecspi2_post_div", "ecspi2_pre_div", base + 0xb380, 0, 6);
- clks[IMX7D_ECSPI3_ROOT_DIV] = imx_clk_divider("ecspi3_post_div", "ecspi3_pre_div", base + 0xb400, 0, 6);
- clks[IMX7D_ECSPI4_ROOT_DIV] = imx_clk_divider("ecspi4_post_div", "ecspi4_pre_div", base + 0xb480, 0, 6);
- clks[IMX7D_PWM1_ROOT_DIV] = imx_clk_divider("pwm1_post_div", "pwm1_pre_div", base + 0xb500, 0, 6);
- clks[IMX7D_PWM2_ROOT_DIV] = imx_clk_divider("pwm2_post_div", "pwm2_pre_div", base + 0xb580, 0, 6);
- clks[IMX7D_PWM3_ROOT_DIV] = imx_clk_divider("pwm3_post_div", "pwm3_pre_div", base + 0xb600, 0, 6);
- clks[IMX7D_PWM4_ROOT_DIV] = imx_clk_divider("pwm4_post_div", "pwm4_pre_div", base + 0xb680, 0, 6);
- clks[IMX7D_FLEXTIMER1_ROOT_DIV] = imx_clk_divider("flextimer1_post_div", "flextimer1_pre_div", base + 0xb700, 0, 6);
- clks[IMX7D_FLEXTIMER2_ROOT_DIV] = imx_clk_divider("flextimer2_post_div", "flextimer2_pre_div", base + 0xb780, 0, 6);
- clks[IMX7D_SIM1_ROOT_DIV] = imx_clk_divider("sim1_post_div", "sim1_pre_div", base + 0xb800, 0, 6);
- clks[IMX7D_SIM2_ROOT_DIV] = imx_clk_divider("sim2_post_div", "sim2_pre_div", base + 0xb880, 0, 6);
- clks[IMX7D_GPT1_ROOT_DIV] = imx_clk_divider("gpt1_post_div", "gpt1_pre_div", base + 0xb900, 0, 6);
- clks[IMX7D_GPT2_ROOT_DIV] = imx_clk_divider("gpt2_post_div", "gpt2_pre_div", base + 0xb980, 0, 6);
- clks[IMX7D_GPT3_ROOT_DIV] = imx_clk_divider("gpt3_post_div", "gpt3_pre_div", base + 0xba00, 0, 6);
- clks[IMX7D_GPT4_ROOT_DIV] = imx_clk_divider("gpt4_post_div", "gpt4_pre_div", base + 0xba80, 0, 6);
- clks[IMX7D_TRACE_ROOT_DIV] = imx_clk_divider("trace_post_div", "trace_pre_div", base + 0xbb00, 0, 6);
- clks[IMX7D_WDOG_ROOT_DIV] = imx_clk_divider("wdog_post_div", "wdog_pre_div", base + 0xbb80, 0, 6);
- clks[IMX7D_CSI_MCLK_ROOT_DIV] = imx_clk_divider("csi_mclk_post_div", "csi_mclk_pre_div", base + 0xbc00, 0, 6);
- clks[IMX7D_AUDIO_MCLK_ROOT_DIV] = imx_clk_divider("audio_mclk_post_div", "audio_mclk_pre_div", base + 0xbc80, 0, 6);
- clks[IMX7D_WRCLK_ROOT_DIV] = imx_clk_divider("wrclk_post_div", "wrclk_pre_div", base + 0xbd00, 0, 6);
- clks[IMX7D_CLKO1_ROOT_DIV] = imx_clk_divider("clko1_post_div", "clko1_pre_div", base + 0xbd80, 0, 6);
- clks[IMX7D_CLKO2_ROOT_DIV] = imx_clk_divider("clko2_post_div", "clko2_pre_div", base + 0xbe00, 0, 6);
-
- clks[IMX7D_ARM_A7_ROOT_CLK] = imx_clk_gate2("arm_a7_root_clk", "arm_a7_div", base + 0x4000, 0);
- clks[IMX7D_ARM_M4_ROOT_CLK] = imx_clk_gate2("arm_m4_root_clk", "arm_m4_div", base + 0x4010, 0);
- clks[IMX7D_ARM_M0_ROOT_CLK] = imx_clk_gate2("arm_m0_root_clk", "arm_m0_div", base + 0x4020, 0);
- clks[IMX7D_MAIN_AXI_ROOT_CLK] = imx_clk_gate2("main_axi_root_clk", "axi_post_div", base + 0x4040, 0);
- clks[IMX7D_DISP_AXI_ROOT_CLK] = imx_clk_gate2("disp_axi_root_clk", "disp_axi_post_div", base + 0x4050, 0);
- clks[IMX7D_ENET_AXI_ROOT_CLK] = imx_clk_gate2("enet_axi_root_clk", "enet_axi_post_div", base + 0x4060, 0);
- clks[IMX7D_OCRAM_CLK] = imx_clk_gate2("ocram_clk", "axi_post_div", base + 0x4110, 0);
- clks[IMX7D_OCRAM_S_CLK] = imx_clk_gate2("ocram_s_clk", "ahb_post_div", base + 0x4120, 0);
- clks[IMX7D_NAND_USDHC_BUS_ROOT_CLK] = imx_clk_gate2("nand_usdhc_root_clk", "nand_usdhc_post_div", base + 0x4130, 0);
- clks[IMX7D_AHB_CHANNEL_ROOT_CLK] = imx_clk_gate2("ahb_root_clk", "ahb_post_div", base + 0x4200, 0);
- clks[IMX7D_DRAM_ROOT_CLK] = imx_clk_gate2("dram_root_clk", "dram_post_div", base + 0x4130, 0);
- clks[IMX7D_DRAM_PHYM_ROOT_CLK] = imx_clk_gate2("dram_phym_root_clk", "dram_phym_cg", base + 0x4130, 0);
- clks[IMX7D_DRAM_PHYM_ALT_ROOT_CLK] = imx_clk_gate2("dram_phym_alt_root_clk", "dram_phym_alt_post_div", base + 0x4130, 0);
- clks[IMX7D_DRAM_ALT_ROOT_CLK] = imx_clk_gate2("dram_alt_root_clk", "dram_alt_post_div", base + 0x4130, 0);
- clks[IMX7D_USB_HSIC_ROOT_CLK] = imx_clk_gate2("usb_hsic_root_clk", "usb_hsic_post_div", base + 0x4420, 0);
- clks[IMX7D_PCIE_CTRL_ROOT_CLK] = imx_clk_gate2("pcie_ctrl_root_clk", "pcie_ctrl_post_div", base + 0x4600, 0);
- clks[IMX7D_PCIE_PHY_ROOT_CLK] = imx_clk_gate2("pcie_phy_root_clk", "pcie_phy_post_div", base + 0x4600, 0);
- clks[IMX7D_EPDC_PIXEL_ROOT_CLK] = imx_clk_gate2("epdc_pixel_root_clk", "epdc_pixel_post_div", base + 0x44a0, 0);
- clks[IMX7D_LCDIF_PIXEL_ROOT_CLK] = imx_clk_gate2("lcdif_pixel_root_clk", "lcdif_pixel_post_div", base + 0x44b0, 0);
- clks[IMX7D_MIPI_DSI_ROOT_CLK] = imx_clk_gate2("mipi_dsi_root_clk", "mipi_dsi_post_div", base + 0x4650, 0);
- clks[IMX7D_MIPI_CSI_ROOT_CLK] = imx_clk_gate2("mipi_csi_root_clk", "mipi_csi_post_div", base + 0x4640, 0);
- clks[IMX7D_MIPI_DPHY_ROOT_CLK] = imx_clk_gate2("mipi_dphy_root_clk", "mipi_dphy_post_div", base + 0x4660, 0);
- clks[IMX7D_SAI1_ROOT_CLK] = imx_clk_gate2("sai1_root_clk", "sai1_post_div", base + 0x48c0, 0);
- clks[IMX7D_SAI2_ROOT_CLK] = imx_clk_gate2("sai2_root_clk", "sai2_post_div", base + 0x48d0, 0);
- clks[IMX7D_SAI3_ROOT_CLK] = imx_clk_gate2("sai3_root_clk", "sai3_post_div", base + 0x48e0, 0);
- clks[IMX7D_SPDIF_ROOT_CLK] = imx_clk_gate2("spdif_root_clk", "spdif_post_div", base + 0x44d0, 0);
- clks[IMX7D_ENET1_REF_ROOT_CLK] = imx_clk_gate2("enet1_ref_root_clk", "enet1_ref_post_div", base + 0x44e0, 0);
- clks[IMX7D_ENET1_TIME_ROOT_CLK] = imx_clk_gate2("enet1_time_root_clk", "enet1_time_post_div", base + 0x44f0, 0);
- clks[IMX7D_ENET2_REF_ROOT_CLK] = imx_clk_gate2("enet2_ref_root_clk", "enet2_ref_post_div", base + 0x4500, 0);
- clks[IMX7D_ENET2_TIME_ROOT_CLK] = imx_clk_gate2("enet2_time_root_clk", "enet2_time_post_div", base + 0x4510, 0);
- clks[IMX7D_ENET_PHY_REF_ROOT_CLK] = imx_clk_gate2("enet_phy_ref_root_clk", "enet_phy_ref_post_div", base + 0x4520, 0);
- clks[IMX7D_EIM_ROOT_CLK] = imx_clk_gate2("eim_root_clk", "eim_post_div", base + 0x4160, 0);
- clks[IMX7D_NAND_ROOT_CLK] = imx_clk_gate2("nand_root_clk", "nand_post_div", base + 0x4140, 0);
- clks[IMX7D_QSPI_ROOT_CLK] = imx_clk_gate2("qspi_root_clk", "qspi_post_div", base + 0x4150, 0);
- clks[IMX7D_USDHC1_ROOT_CLK] = imx_clk_gate2("usdhc1_root_clk", "usdhc1_post_div", base + 0x46c0, 0);
- clks[IMX7D_USDHC2_ROOT_CLK] = imx_clk_gate2("usdhc2_root_clk", "usdhc2_post_div", base + 0x46d0, 0);
- clks[IMX7D_USDHC3_ROOT_CLK] = imx_clk_gate2("usdhc3_root_clk", "usdhc3_post_div", base + 0x46e0, 0);
- clks[IMX7D_CAN1_ROOT_CLK] = imx_clk_gate2("can1_root_clk", "can1_post_div", base + 0x4740, 0);
- clks[IMX7D_CAN2_ROOT_CLK] = imx_clk_gate2("can2_root_clk", "can2_post_div", base + 0x4750, 0);
- clks[IMX7D_I2C1_ROOT_CLK] = imx_clk_gate2("i2c1_root_clk", "i2c1_post_div", base + 0x4880, 0);
- clks[IMX7D_I2C2_ROOT_CLK] = imx_clk_gate2("i2c2_root_clk", "i2c2_post_div", base + 0x4890, 0);
- clks[IMX7D_I2C3_ROOT_CLK] = imx_clk_gate2("i2c3_root_clk", "i2c3_post_div", base + 0x48a0, 0);
- clks[IMX7D_I2C4_ROOT_CLK] = imx_clk_gate2("i2c4_root_clk", "i2c4_post_div", base + 0x48b0, 0);
- clks[IMX7D_UART1_ROOT_CLK] = imx_clk_gate2("uart1_root_clk", "uart1_post_div", base + 0x4940, 0);
- clks[IMX7D_UART2_ROOT_CLK] = imx_clk_gate2("uart2_root_clk", "uart2_post_div", base + 0x4950, 0);
- clks[IMX7D_UART3_ROOT_CLK] = imx_clk_gate2("uart3_root_clk", "uart3_post_div", base + 0x4960, 0);
- clks[IMX7D_UART4_ROOT_CLK] = imx_clk_gate2("uart4_root_clk", "uart4_post_div", base + 0x4970, 0);
- clks[IMX7D_UART5_ROOT_CLK] = imx_clk_gate2("uart5_root_clk", "uart5_post_div", base + 0x4980, 0);
- clks[IMX7D_UART6_ROOT_CLK] = imx_clk_gate2("uart6_root_clk", "uart6_post_div", base + 0x4990, 0);
- clks[IMX7D_UART7_ROOT_CLK] = imx_clk_gate2("uart7_root_clk", "uart7_post_div", base + 0x49a0, 0);
- clks[IMX7D_ECSPI1_ROOT_CLK] = imx_clk_gate2("ecspi1_root_clk", "ecspi1_post_div", base + 0x4780, 0);
- clks[IMX7D_ECSPI2_ROOT_CLK] = imx_clk_gate2("ecspi2_root_clk", "ecspi2_post_div", base + 0x4790, 0);
- clks[IMX7D_ECSPI3_ROOT_CLK] = imx_clk_gate2("ecspi3_root_clk", "ecspi3_post_div", base + 0x47a0, 0);
- clks[IMX7D_ECSPI4_ROOT_CLK] = imx_clk_gate2("ecspi4_root_clk", "ecspi4_post_div", base + 0x47b0, 0);
- clks[IMX7D_PWM1_ROOT_CLK] = imx_clk_gate2("pwm1_root_clk", "pwm1_post_div", base + 0x4840, 0);
- clks[IMX7D_PWM2_ROOT_CLK] = imx_clk_gate2("pwm2_root_clk", "pwm2_post_div", base + 0x4850, 0);
- clks[IMX7D_PWM3_ROOT_CLK] = imx_clk_gate2("pwm3_root_clk", "pwm3_post_div", base + 0x4860, 0);
- clks[IMX7D_PWM4_ROOT_CLK] = imx_clk_gate2("pwm4_root_clk", "pwm4_post_div", base + 0x4870, 0);
- clks[IMX7D_FLEXTIMER1_ROOT_CLK] = imx_clk_gate2("flextimer1_root_clk", "flextimer1_post_div", base + 0x4800, 0);
- clks[IMX7D_FLEXTIMER2_ROOT_CLK] = imx_clk_gate2("flextimer2_root_clk", "flextimer2_post_div", base + 0x4810, 0);
- clks[IMX7D_SIM1_ROOT_CLK] = imx_clk_gate2("sim1_root_clk", "sim1_post_div", base + 0x4900, 0);
- clks[IMX7D_SIM2_ROOT_CLK] = imx_clk_gate2("sim2_root_clk", "sim2_post_div", base + 0x4910, 0);
- clks[IMX7D_GPT1_ROOT_CLK] = imx_clk_gate2("gpt1_root_clk", "gpt1_post_div", base + 0x47c0, 0);
- clks[IMX7D_GPT2_ROOT_CLK] = imx_clk_gate2("gpt2_root_clk", "gpt2_post_div", base + 0x47d0, 0);
- clks[IMX7D_GPT3_ROOT_CLK] = imx_clk_gate2("gpt3_root_clk", "gpt3_post_div", base + 0x47e0, 0);
- clks[IMX7D_GPT4_ROOT_CLK] = imx_clk_gate2("gpt4_root_clk", "gpt4_post_div", base + 0x47f0, 0);
- clks[IMX7D_TRACE_ROOT_CLK] = imx_clk_gate2("trace_root_clk", "trace_post_div", base + 0x4300, 0);
- clks[IMX7D_WDOG1_ROOT_CLK] = imx_clk_gate2("wdog1_root_clk", "wdog_post_div", base + 0x49c0, 0);
- clks[IMX7D_WDOG2_ROOT_CLK] = imx_clk_gate2("wdog2_root_clk", "wdog_post_div", base + 0x49d0, 0);
- clks[IMX7D_WDOG3_ROOT_CLK] = imx_clk_gate2("wdog3_root_clk", "wdog_post_div", base + 0x49e0, 0);
- clks[IMX7D_WDOG4_ROOT_CLK] = imx_clk_gate2("wdog4_root_clk", "wdog_post_div", base + 0x49f0, 0);
- clks[IMX7D_CSI_MCLK_ROOT_CLK] = imx_clk_gate2("csi_mclk_root_clk", "csi_mclk_post_div", base + 0x4490, 0);
- clks[IMX7D_AUDIO_MCLK_ROOT_CLK] = imx_clk_gate2("audio_mclk_root_clk", "audio_mclk_post_div", base + 0x4790, 0);
- clks[IMX7D_WRCLK_ROOT_CLK] = imx_clk_gate2("wrclk_root_clk", "wrclk_post_div", base + 0x47a0, 0);
- clks[IMX7D_ADC_ROOT_CLK] = imx_clk_gate2("adc_root_clk", "ipg_root_clk", base + 0x4200, 0);
+ clks[IMX7D_ARM_A7_ROOT_SRC] = imx_clk_mux2("arm_a7_src", base + 0x8000, 24, 3, arm_a7_sel, ARRAY_SIZE(arm_a7_sel));
+ clks[IMX7D_ARM_M4_ROOT_SRC] = imx_clk_mux2("arm_m4_src", base + 0x8080, 24, 3, arm_m4_sel, ARRAY_SIZE(arm_m4_sel));
+ clks[IMX7D_ARM_M0_ROOT_SRC] = imx_clk_mux2("arm_m0_src", base + 0x8100, 24, 3, arm_m0_sel, ARRAY_SIZE(arm_m0_sel));
+ clks[IMX7D_MAIN_AXI_ROOT_SRC] = imx_clk_mux2("axi_src", base + 0x8800, 24, 3, axi_sel, ARRAY_SIZE(axi_sel));
+ clks[IMX7D_DISP_AXI_ROOT_SRC] = imx_clk_mux2("disp_axi_src", base + 0x8880, 24, 3, disp_axi_sel, ARRAY_SIZE(disp_axi_sel));
+ clks[IMX7D_ENET_AXI_ROOT_SRC] = imx_clk_mux2("enet_axi_src", base + 0x8900, 24, 3, enet_axi_sel, ARRAY_SIZE(enet_axi_sel));
+ clks[IMX7D_NAND_USDHC_BUS_ROOT_SRC] = imx_clk_mux2("nand_usdhc_src", base + 0x8980, 24, 3, nand_usdhc_bus_sel, ARRAY_SIZE(nand_usdhc_bus_sel));
+ clks[IMX7D_AHB_CHANNEL_ROOT_SRC] = imx_clk_mux2("ahb_src", base + 0x9000, 24, 3, ahb_channel_sel, ARRAY_SIZE(ahb_channel_sel));
+ clks[IMX7D_DRAM_PHYM_ROOT_SRC] = imx_clk_mux2("dram_phym_src", base + 0x9800, 24, 1, dram_phym_sel, ARRAY_SIZE(dram_phym_sel));
+ clks[IMX7D_DRAM_ROOT_SRC] = imx_clk_mux2("dram_src", base + 0x9880, 24, 1, dram_sel, ARRAY_SIZE(dram_sel));
+ clks[IMX7D_DRAM_PHYM_ALT_ROOT_SRC] = imx_clk_mux2("dram_phym_alt_src", base + 0xa000, 24, 3, dram_phym_alt_sel, ARRAY_SIZE(dram_phym_alt_sel));
+ clks[IMX7D_DRAM_ALT_ROOT_SRC] = imx_clk_mux2("dram_alt_src", base + 0xa080, 24, 3, dram_alt_sel, ARRAY_SIZE(dram_alt_sel));
+ clks[IMX7D_USB_HSIC_ROOT_SRC] = imx_clk_mux2("usb_hsic_src", base + 0xa100, 24, 3, usb_hsic_sel, ARRAY_SIZE(usb_hsic_sel));
+ clks[IMX7D_PCIE_CTRL_ROOT_SRC] = imx_clk_mux2("pcie_ctrl_src", base + 0xa180, 24, 3, pcie_ctrl_sel, ARRAY_SIZE(pcie_ctrl_sel));
+ clks[IMX7D_PCIE_PHY_ROOT_SRC] = imx_clk_mux2("pcie_phy_src", base + 0xa200, 24, 3, pcie_phy_sel, ARRAY_SIZE(pcie_phy_sel));
+ clks[IMX7D_EPDC_PIXEL_ROOT_SRC] = imx_clk_mux2("epdc_pixel_src", base + 0xa280, 24, 3, epdc_pixel_sel, ARRAY_SIZE(epdc_pixel_sel));
+ clks[IMX7D_LCDIF_PIXEL_ROOT_SRC] = imx_clk_mux2("lcdif_pixel_src", base + 0xa300, 24, 3, lcdif_pixel_sel, ARRAY_SIZE(lcdif_pixel_sel));
+ clks[IMX7D_MIPI_DSI_ROOT_SRC] = imx_clk_mux2("mipi_dsi_src", base + 0xa380, 24, 3, mipi_dsi_sel, ARRAY_SIZE(mipi_dsi_sel));
+ clks[IMX7D_MIPI_CSI_ROOT_SRC] = imx_clk_mux2("mipi_csi_src", base + 0xa400, 24, 3, mipi_csi_sel, ARRAY_SIZE(mipi_csi_sel));
+ clks[IMX7D_MIPI_DPHY_ROOT_SRC] = imx_clk_mux2("mipi_dphy_src", base + 0xa480, 24, 3, mipi_dphy_sel, ARRAY_SIZE(mipi_dphy_sel));
+ clks[IMX7D_SAI1_ROOT_SRC] = imx_clk_mux2("sai1_src", base + 0xa500, 24, 3, sai1_sel, ARRAY_SIZE(sai1_sel));
+ clks[IMX7D_SAI2_ROOT_SRC] = imx_clk_mux2("sai2_src", base + 0xa580, 24, 3, sai2_sel, ARRAY_SIZE(sai2_sel));
+ clks[IMX7D_SAI3_ROOT_SRC] = imx_clk_mux2("sai3_src", base + 0xa600, 24, 3, sai3_sel, ARRAY_SIZE(sai3_sel));
+ clks[IMX7D_SPDIF_ROOT_SRC] = imx_clk_mux2("spdif_src", base + 0xa680, 24, 3, spdif_sel, ARRAY_SIZE(spdif_sel));
+ clks[IMX7D_ENET1_REF_ROOT_SRC] = imx_clk_mux2("enet1_ref_src", base + 0xa700, 24, 3, enet1_ref_sel, ARRAY_SIZE(enet1_ref_sel));
+ clks[IMX7D_ENET1_TIME_ROOT_SRC] = imx_clk_mux2("enet1_time_src", base + 0xa780, 24, 3, enet1_time_sel, ARRAY_SIZE(enet1_time_sel));
+ clks[IMX7D_ENET2_REF_ROOT_SRC] = imx_clk_mux2("enet2_ref_src", base + 0xa800, 24, 3, enet2_ref_sel, ARRAY_SIZE(enet2_ref_sel));
+ clks[IMX7D_ENET2_TIME_ROOT_SRC] = imx_clk_mux2("enet2_time_src", base + 0xa880, 24, 3, enet2_time_sel, ARRAY_SIZE(enet2_time_sel));
+ clks[IMX7D_ENET_PHY_REF_ROOT_SRC] = imx_clk_mux2("enet_phy_ref_src", base + 0xa900, 24, 3, enet_phy_ref_sel, ARRAY_SIZE(enet_phy_ref_sel));
+ clks[IMX7D_EIM_ROOT_SRC] = imx_clk_mux2("eim_src", base + 0xa980, 24, 3, eim_sel, ARRAY_SIZE(eim_sel));
+ clks[IMX7D_NAND_ROOT_SRC] = imx_clk_mux2("nand_src", base + 0xaa00, 24, 3, nand_sel, ARRAY_SIZE(nand_sel));
+ clks[IMX7D_QSPI_ROOT_SRC] = imx_clk_mux2("qspi_src", base + 0xaa80, 24, 3, qspi_sel, ARRAY_SIZE(qspi_sel));
+ clks[IMX7D_USDHC1_ROOT_SRC] = imx_clk_mux2("usdhc1_src", base + 0xab00, 24, 3, usdhc1_sel, ARRAY_SIZE(usdhc1_sel));
+ clks[IMX7D_USDHC2_ROOT_SRC] = imx_clk_mux2("usdhc2_src", base + 0xab80, 24, 3, usdhc2_sel, ARRAY_SIZE(usdhc2_sel));
+ clks[IMX7D_USDHC3_ROOT_SRC] = imx_clk_mux2("usdhc3_src", base + 0xac00, 24, 3, usdhc3_sel, ARRAY_SIZE(usdhc3_sel));
+ clks[IMX7D_CAN1_ROOT_SRC] = imx_clk_mux2("can1_src", base + 0xac80, 24, 3, can1_sel, ARRAY_SIZE(can1_sel));
+ clks[IMX7D_CAN2_ROOT_SRC] = imx_clk_mux2("can2_src", base + 0xad00, 24, 3, can2_sel, ARRAY_SIZE(can2_sel));
+ clks[IMX7D_I2C1_ROOT_SRC] = imx_clk_mux2("i2c1_src", base + 0xad80, 24, 3, i2c1_sel, ARRAY_SIZE(i2c1_sel));
+ clks[IMX7D_I2C2_ROOT_SRC] = imx_clk_mux2("i2c2_src", base + 0xae00, 24, 3, i2c2_sel, ARRAY_SIZE(i2c2_sel));
+ clks[IMX7D_I2C3_ROOT_SRC] = imx_clk_mux2("i2c3_src", base + 0xae80, 24, 3, i2c3_sel, ARRAY_SIZE(i2c3_sel));
+ clks[IMX7D_I2C4_ROOT_SRC] = imx_clk_mux2("i2c4_src", base + 0xaf00, 24, 3, i2c4_sel, ARRAY_SIZE(i2c4_sel));
+ clks[IMX7D_UART1_ROOT_SRC] = imx_clk_mux2("uart1_src", base + 0xaf80, 24, 3, uart1_sel, ARRAY_SIZE(uart1_sel));
+ clks[IMX7D_UART2_ROOT_SRC] = imx_clk_mux2("uart2_src", base + 0xb000, 24, 3, uart2_sel, ARRAY_SIZE(uart2_sel));
+ clks[IMX7D_UART3_ROOT_SRC] = imx_clk_mux2("uart3_src", base + 0xb080, 24, 3, uart3_sel, ARRAY_SIZE(uart3_sel));
+ clks[IMX7D_UART4_ROOT_SRC] = imx_clk_mux2("uart4_src", base + 0xb100, 24, 3, uart4_sel, ARRAY_SIZE(uart4_sel));
+ clks[IMX7D_UART5_ROOT_SRC] = imx_clk_mux2("uart5_src", base + 0xb180, 24, 3, uart5_sel, ARRAY_SIZE(uart5_sel));
+ clks[IMX7D_UART6_ROOT_SRC] = imx_clk_mux2("uart6_src", base + 0xb200, 24, 3, uart6_sel, ARRAY_SIZE(uart6_sel));
+ clks[IMX7D_UART7_ROOT_SRC] = imx_clk_mux2("uart7_src", base + 0xb280, 24, 3, uart7_sel, ARRAY_SIZE(uart7_sel));
+ clks[IMX7D_ECSPI1_ROOT_SRC] = imx_clk_mux2("ecspi1_src", base + 0xb300, 24, 3, ecspi1_sel, ARRAY_SIZE(ecspi1_sel));
+ clks[IMX7D_ECSPI2_ROOT_SRC] = imx_clk_mux2("ecspi2_src", base + 0xb380, 24, 3, ecspi2_sel, ARRAY_SIZE(ecspi2_sel));
+ clks[IMX7D_ECSPI3_ROOT_SRC] = imx_clk_mux2("ecspi3_src", base + 0xb400, 24, 3, ecspi3_sel, ARRAY_SIZE(ecspi3_sel));
+ clks[IMX7D_ECSPI4_ROOT_SRC] = imx_clk_mux2("ecspi4_src", base + 0xb480, 24, 3, ecspi4_sel, ARRAY_SIZE(ecspi4_sel));
+ clks[IMX7D_PWM1_ROOT_SRC] = imx_clk_mux2("pwm1_src", base + 0xb500, 24, 3, pwm1_sel, ARRAY_SIZE(pwm1_sel));
+ clks[IMX7D_PWM2_ROOT_SRC] = imx_clk_mux2("pwm2_src", base + 0xb580, 24, 3, pwm2_sel, ARRAY_SIZE(pwm2_sel));
+ clks[IMX7D_PWM3_ROOT_SRC] = imx_clk_mux2("pwm3_src", base + 0xb600, 24, 3, pwm3_sel, ARRAY_SIZE(pwm3_sel));
+ clks[IMX7D_PWM4_ROOT_SRC] = imx_clk_mux2("pwm4_src", base + 0xb680, 24, 3, pwm4_sel, ARRAY_SIZE(pwm4_sel));
+ clks[IMX7D_FLEXTIMER1_ROOT_SRC] = imx_clk_mux2("flextimer1_src", base + 0xb700, 24, 3, flextimer1_sel, ARRAY_SIZE(flextimer1_sel));
+ clks[IMX7D_FLEXTIMER2_ROOT_SRC] = imx_clk_mux2("flextimer2_src", base + 0xb780, 24, 3, flextimer2_sel, ARRAY_SIZE(flextimer2_sel));
+ clks[IMX7D_SIM1_ROOT_SRC] = imx_clk_mux2("sim1_src", base + 0xb800, 24, 3, sim1_sel, ARRAY_SIZE(sim1_sel));
+ clks[IMX7D_SIM2_ROOT_SRC] = imx_clk_mux2("sim2_src", base + 0xb880, 24, 3, sim2_sel, ARRAY_SIZE(sim2_sel));
+ clks[IMX7D_GPT1_ROOT_SRC] = imx_clk_mux2("gpt1_src", base + 0xb900, 24, 3, gpt1_sel, ARRAY_SIZE(gpt1_sel));
+ clks[IMX7D_GPT2_ROOT_SRC] = imx_clk_mux2("gpt2_src", base + 0xb980, 24, 3, gpt2_sel, ARRAY_SIZE(gpt2_sel));
+ clks[IMX7D_GPT3_ROOT_SRC] = imx_clk_mux2("gpt3_src", base + 0xba00, 24, 3, gpt3_sel, ARRAY_SIZE(gpt3_sel));
+ clks[IMX7D_GPT4_ROOT_SRC] = imx_clk_mux2("gpt4_src", base + 0xba80, 24, 3, gpt4_sel, ARRAY_SIZE(gpt4_sel));
+ clks[IMX7D_TRACE_ROOT_SRC] = imx_clk_mux2("trace_src", base + 0xbb00, 24, 3, trace_sel, ARRAY_SIZE(trace_sel));
+ clks[IMX7D_WDOG_ROOT_SRC] = imx_clk_mux2("wdog_src", base + 0xbb80, 24, 3, wdog_sel, ARRAY_SIZE(wdog_sel));
+ clks[IMX7D_CSI_MCLK_ROOT_SRC] = imx_clk_mux2("csi_mclk_src", base + 0xbc00, 24, 3, csi_mclk_sel, ARRAY_SIZE(csi_mclk_sel));
+ clks[IMX7D_AUDIO_MCLK_ROOT_SRC] = imx_clk_mux2("audio_mclk_src", base + 0xbc80, 24, 3, audio_mclk_sel, ARRAY_SIZE(audio_mclk_sel));
+ clks[IMX7D_WRCLK_ROOT_SRC] = imx_clk_mux2("wrclk_src", base + 0xbd00, 24, 3, wrclk_sel, ARRAY_SIZE(wrclk_sel));
+ clks[IMX7D_CLKO1_ROOT_SRC] = imx_clk_mux2("clko1_src", base + 0xbd80, 24, 3, clko1_sel, ARRAY_SIZE(clko1_sel));
+ clks[IMX7D_CLKO2_ROOT_SRC] = imx_clk_mux2("clko2_src", base + 0xbe00, 24, 3, clko2_sel, ARRAY_SIZE(clko2_sel));
+
+ clks[IMX7D_ARM_A7_ROOT_CG] = imx_clk_gate3("arm_a7_cg", "arm_a7_src", base + 0x8000, 28);
+ clks[IMX7D_ARM_M4_ROOT_CG] = imx_clk_gate3("arm_m4_cg", "arm_m4_src", base + 0x8080, 28);
+ clks[IMX7D_ARM_M0_ROOT_CG] = imx_clk_gate3("arm_m0_cg", "arm_m0_src", base + 0x8100, 28);
+ clks[IMX7D_MAIN_AXI_ROOT_CG] = imx_clk_gate3("axi_cg", "axi_src", base + 0x8800, 28);
+ clks[IMX7D_DISP_AXI_ROOT_CG] = imx_clk_gate3("disp_axi_cg", "disp_axi_src", base + 0x8880, 28);
+ clks[IMX7D_ENET_AXI_ROOT_CG] = imx_clk_gate3("enet_axi_cg", "enet_axi_src", base + 0x8900, 28);
+ clks[IMX7D_NAND_USDHC_BUS_ROOT_CG] = imx_clk_gate3("nand_usdhc_cg", "nand_usdhc_src", base + 0x8980, 28);
+ clks[IMX7D_AHB_CHANNEL_ROOT_CG] = imx_clk_gate3("ahb_cg", "ahb_src", base + 0x9000, 28);
+ clks[IMX7D_DRAM_PHYM_ROOT_CG] = imx_clk_gate3("dram_phym_cg", "dram_phym_src", base + 0x9800, 28);
+ clks[IMX7D_DRAM_ROOT_CG] = imx_clk_gate3("dram_cg", "dram_src", base + 0x9880, 28);
+ clks[IMX7D_DRAM_PHYM_ALT_ROOT_CG] = imx_clk_gate3("dram_phym_alt_cg", "dram_phym_alt_src", base + 0xa000, 28);
+ clks[IMX7D_DRAM_ALT_ROOT_CG] = imx_clk_gate3("dram_alt_cg", "dram_alt_src", base + 0xa080, 28);
+ clks[IMX7D_USB_HSIC_ROOT_CG] = imx_clk_gate3("usb_hsic_cg", "usb_hsic_src", base + 0xa100, 28);
+ clks[IMX7D_PCIE_CTRL_ROOT_CG] = imx_clk_gate3("pcie_ctrl_cg", "pcie_ctrl_src", base + 0xa180, 28);
+ clks[IMX7D_PCIE_PHY_ROOT_CG] = imx_clk_gate3("pcie_phy_cg", "pcie_phy_src", base + 0xa200, 28);
+ clks[IMX7D_EPDC_PIXEL_ROOT_CG] = imx_clk_gate3("epdc_pixel_cg", "epdc_pixel_src", base + 0xa280, 28);
+ clks[IMX7D_LCDIF_PIXEL_ROOT_CG] = imx_clk_gate3("lcdif_pixel_cg", "lcdif_pixel_src", base + 0xa300, 28);
+ clks[IMX7D_MIPI_DSI_ROOT_CG] = imx_clk_gate3("mipi_dsi_cg", "mipi_dsi_src", base + 0xa380, 28);
+ clks[IMX7D_MIPI_CSI_ROOT_CG] = imx_clk_gate3("mipi_csi_cg", "mipi_csi_src", base + 0xa400, 28);
+ clks[IMX7D_MIPI_DPHY_ROOT_CG] = imx_clk_gate3("mipi_dphy_cg", "mipi_dphy_src", base + 0xa480, 28);
+ clks[IMX7D_SAI1_ROOT_CG] = imx_clk_gate3("sai1_cg", "sai1_src", base + 0xa500, 28);
+ clks[IMX7D_SAI2_ROOT_CG] = imx_clk_gate3("sai2_cg", "sai2_src", base + 0xa580, 28);
+ clks[IMX7D_SAI3_ROOT_CG] = imx_clk_gate3("sai3_cg", "sai3_src", base + 0xa600, 28);
+ clks[IMX7D_SPDIF_ROOT_CG] = imx_clk_gate3("spdif_cg", "spdif_src", base + 0xa680, 28);
+ clks[IMX7D_ENET1_REF_ROOT_CG] = imx_clk_gate3("enet1_ref_cg", "enet1_ref_src", base + 0xa700, 28);
+ clks[IMX7D_ENET1_TIME_ROOT_CG] = imx_clk_gate3("enet1_time_cg", "enet1_time_src", base + 0xa780, 28);
+ clks[IMX7D_ENET2_REF_ROOT_CG] = imx_clk_gate3("enet2_ref_cg", "enet2_ref_src", base + 0xa800, 28);
+ clks[IMX7D_ENET2_TIME_ROOT_CG] = imx_clk_gate3("enet2_time_cg", "enet2_time_src", base + 0xa880, 28);
+ clks[IMX7D_ENET_PHY_REF_ROOT_CG] = imx_clk_gate3("enet_phy_ref_cg", "enet_phy_ref_src", base + 0xa900, 28);
+ clks[IMX7D_EIM_ROOT_CG] = imx_clk_gate3("eim_cg", "eim_src", base + 0xa980, 28);
+ clks[IMX7D_NAND_ROOT_CG] = imx_clk_gate3("nand_cg", "nand_src", base + 0xaa00, 28);
+ clks[IMX7D_QSPI_ROOT_CG] = imx_clk_gate3("qspi_cg", "qspi_src", base + 0xaa80, 28);
+ clks[IMX7D_USDHC1_ROOT_CG] = imx_clk_gate3("usdhc1_cg", "usdhc1_src", base + 0xab00, 28);
+ clks[IMX7D_USDHC2_ROOT_CG] = imx_clk_gate3("usdhc2_cg", "usdhc2_src", base + 0xab80, 28);
+ clks[IMX7D_USDHC3_ROOT_CG] = imx_clk_gate3("usdhc3_cg", "usdhc3_src", base + 0xac00, 28);
+ clks[IMX7D_CAN1_ROOT_CG] = imx_clk_gate3("can1_cg", "can1_src", base + 0xac80, 28);
+ clks[IMX7D_CAN2_ROOT_CG] = imx_clk_gate3("can2_cg", "can2_src", base + 0xad00, 28);
+ clks[IMX7D_I2C1_ROOT_CG] = imx_clk_gate3("i2c1_cg", "i2c1_src", base + 0xad80, 28);
+ clks[IMX7D_I2C2_ROOT_CG] = imx_clk_gate3("i2c2_cg", "i2c2_src", base + 0xae00, 28);
+ clks[IMX7D_I2C3_ROOT_CG] = imx_clk_gate3("i2c3_cg", "i2c3_src", base + 0xae80, 28);
+ clks[IMX7D_I2C4_ROOT_CG] = imx_clk_gate3("i2c4_cg", "i2c4_src", base + 0xaf00, 28);
+ clks[IMX7D_UART1_ROOT_CG] = imx_clk_gate3("uart1_cg", "uart1_src", base + 0xaf80, 28);
+ clks[IMX7D_UART2_ROOT_CG] = imx_clk_gate3("uart2_cg", "uart2_src", base + 0xb000, 28);
+ clks[IMX7D_UART3_ROOT_CG] = imx_clk_gate3("uart3_cg", "uart3_src", base + 0xb080, 28);
+ clks[IMX7D_UART4_ROOT_CG] = imx_clk_gate3("uart4_cg", "uart4_src", base + 0xb100, 28);
+ clks[IMX7D_UART5_ROOT_CG] = imx_clk_gate3("uart5_cg", "uart5_src", base + 0xb180, 28);
+ clks[IMX7D_UART6_ROOT_CG] = imx_clk_gate3("uart6_cg", "uart6_src", base + 0xb200, 28);
+ clks[IMX7D_UART7_ROOT_CG] = imx_clk_gate3("uart7_cg", "uart7_src", base + 0xb280, 28);
+ clks[IMX7D_ECSPI1_ROOT_CG] = imx_clk_gate3("ecspi1_cg", "ecspi1_src", base + 0xb300, 28);
+ clks[IMX7D_ECSPI2_ROOT_CG] = imx_clk_gate3("ecspi2_cg", "ecspi2_src", base + 0xb380, 28);
+ clks[IMX7D_ECSPI3_ROOT_CG] = imx_clk_gate3("ecspi3_cg", "ecspi3_src", base + 0xb400, 28);
+ clks[IMX7D_ECSPI4_ROOT_CG] = imx_clk_gate3("ecspi4_cg", "ecspi4_src", base + 0xb480, 28);
+ clks[IMX7D_PWM1_ROOT_CG] = imx_clk_gate3("pwm1_cg", "pwm1_src", base + 0xb500, 28);
+ clks[IMX7D_PWM2_ROOT_CG] = imx_clk_gate3("pwm2_cg", "pwm2_src", base + 0xb580, 28);
+ clks[IMX7D_PWM3_ROOT_CG] = imx_clk_gate3("pwm3_cg", "pwm3_src", base + 0xb600, 28);
+ clks[IMX7D_PWM4_ROOT_CG] = imx_clk_gate3("pwm4_cg", "pwm4_src", base + 0xb680, 28);
+ clks[IMX7D_FLEXTIMER1_ROOT_CG] = imx_clk_gate3("flextimer1_cg", "flextimer1_src", base + 0xb700, 28);
+ clks[IMX7D_FLEXTIMER2_ROOT_CG] = imx_clk_gate3("flextimer2_cg", "flextimer2_src", base + 0xb780, 28);
+ clks[IMX7D_SIM1_ROOT_CG] = imx_clk_gate3("sim1_cg", "sim1_src", base + 0xb800, 28);
+ clks[IMX7D_SIM2_ROOT_CG] = imx_clk_gate3("sim2_cg", "sim2_src", base + 0xb880, 28);
+ clks[IMX7D_GPT1_ROOT_CG] = imx_clk_gate3("gpt1_cg", "gpt1_src", base + 0xb900, 28);
+ clks[IMX7D_GPT2_ROOT_CG] = imx_clk_gate3("gpt2_cg", "gpt2_src", base + 0xb980, 28);
+ clks[IMX7D_GPT3_ROOT_CG] = imx_clk_gate3("gpt3_cg", "gpt3_src", base + 0xbA00, 28);
+ clks[IMX7D_GPT4_ROOT_CG] = imx_clk_gate3("gpt4_cg", "gpt4_src", base + 0xbA80, 28);
+ clks[IMX7D_TRACE_ROOT_CG] = imx_clk_gate3("trace_cg", "trace_src", base + 0xbb00, 28);
+ clks[IMX7D_WDOG_ROOT_CG] = imx_clk_gate3("wdog_cg", "wdog_src", base + 0xbb80, 28);
+ clks[IMX7D_CSI_MCLK_ROOT_CG] = imx_clk_gate3("csi_mclk_cg", "csi_mclk_src", base + 0xbc00, 28);
+ clks[IMX7D_AUDIO_MCLK_ROOT_CG] = imx_clk_gate3("audio_mclk_cg", "audio_mclk_src", base + 0xbc80, 28);
+ clks[IMX7D_WRCLK_ROOT_CG] = imx_clk_gate3("wrclk_cg", "wrclk_src", base + 0xbd00, 28);
+ clks[IMX7D_CLKO1_ROOT_CG] = imx_clk_gate3("clko1_cg", "clko1_src", base + 0xbd80, 28);
+ clks[IMX7D_CLKO2_ROOT_CG] = imx_clk_gate3("clko2_cg", "clko2_src", base + 0xbe00, 28);
+
+ clks[IMX7D_MAIN_AXI_ROOT_PRE_DIV] = imx_clk_divider2("axi_pre_div", "axi_cg", base + 0x8800, 16, 3);
+ clks[IMX7D_DISP_AXI_ROOT_PRE_DIV] = imx_clk_divider2("disp_axi_pre_div", "disp_axi_cg", base + 0x8880, 16, 3);
+ clks[IMX7D_ENET_AXI_ROOT_PRE_DIV] = imx_clk_divider2("enet_axi_pre_div", "enet_axi_cg", base + 0x8900, 16, 3);
+ clks[IMX7D_NAND_USDHC_BUS_ROOT_PRE_DIV] = imx_clk_divider2("nand_usdhc_pre_div", "nand_usdhc_cg", base + 0x8980, 16, 3);
+ clks[IMX7D_AHB_CHANNEL_ROOT_PRE_DIV] = imx_clk_divider2("ahb_pre_div", "ahb_cg", base + 0x9000, 16, 3);
+ clks[IMX7D_DRAM_PHYM_ALT_ROOT_PRE_DIV] = imx_clk_divider2("dram_phym_alt_pre_div", "dram_phym_alt_cg", base + 0xa000, 16, 3);
+ clks[IMX7D_DRAM_ALT_ROOT_PRE_DIV] = imx_clk_divider2("dram_alt_pre_div", "dram_alt_cg", base + 0xa080, 16, 3);
+ clks[IMX7D_USB_HSIC_ROOT_PRE_DIV] = imx_clk_divider2("usb_hsic_pre_div", "usb_hsic_cg", base + 0xa100, 16, 3);
+ clks[IMX7D_PCIE_CTRL_ROOT_PRE_DIV] = imx_clk_divider2("pcie_ctrl_pre_div", "pcie_ctrl_cg", base + 0xa180, 16, 3);
+ clks[IMX7D_PCIE_PHY_ROOT_PRE_DIV] = imx_clk_divider2("pcie_phy_pre_div", "pcie_phy_cg", base + 0xa200, 16, 3);
+ clks[IMX7D_EPDC_PIXEL_ROOT_PRE_DIV] = imx_clk_divider2("epdc_pixel_pre_div", "epdc_pixel_cg", base + 0xa280, 16, 3);
+ clks[IMX7D_LCDIF_PIXEL_ROOT_PRE_DIV] = imx_clk_divider2("lcdif_pixel_pre_div", "lcdif_pixel_cg", base + 0xa300, 16, 3);
+ clks[IMX7D_MIPI_DSI_ROOT_PRE_DIV] = imx_clk_divider2("mipi_dsi_pre_div", "mipi_dsi_cg", base + 0xa380, 16, 3);
+ clks[IMX7D_MIPI_CSI_ROOT_PRE_DIV] = imx_clk_divider2("mipi_csi_pre_div", "mipi_csi_cg", base + 0xa400, 16, 3);
+ clks[IMX7D_MIPI_DPHY_ROOT_PRE_DIV] = imx_clk_divider2("mipi_dphy_pre_div", "mipi_dphy_cg", base + 0xa480, 16, 3);
+ clks[IMX7D_SAI1_ROOT_PRE_DIV] = imx_clk_divider2("sai1_pre_div", "sai1_cg", base + 0xa500, 16, 3);
+ clks[IMX7D_SAI2_ROOT_PRE_DIV] = imx_clk_divider2("sai2_pre_div", "sai2_cg", base + 0xa580, 16, 3);
+ clks[IMX7D_SAI3_ROOT_PRE_DIV] = imx_clk_divider2("sai3_pre_div", "sai3_cg", base + 0xa600, 16, 3);
+ clks[IMX7D_SPDIF_ROOT_PRE_DIV] = imx_clk_divider2("spdif_pre_div", "spdif_cg", base + 0xa680, 16, 3);
+ clks[IMX7D_ENET1_REF_ROOT_PRE_DIV] = imx_clk_divider2("enet1_ref_pre_div", "enet1_ref_cg", base + 0xa700, 16, 3);
+ clks[IMX7D_ENET1_TIME_ROOT_PRE_DIV] = imx_clk_divider2("enet1_time_pre_div", "enet1_time_cg", base + 0xa780, 16, 3);
+ clks[IMX7D_ENET2_REF_ROOT_PRE_DIV] = imx_clk_divider2("enet2_ref_pre_div", "enet2_ref_cg", base + 0xa800, 16, 3);
+ clks[IMX7D_ENET2_TIME_ROOT_PRE_DIV] = imx_clk_divider2("enet2_time_pre_div", "enet2_time_cg", base + 0xa880, 16, 3);
+ clks[IMX7D_ENET_PHY_REF_ROOT_PRE_DIV] = imx_clk_divider2("enet_phy_ref_pre_div", "enet_phy_ref_cg", base + 0xa900, 16, 3);
+ clks[IMX7D_EIM_ROOT_PRE_DIV] = imx_clk_divider2("eim_pre_div", "eim_cg", base + 0xa980, 16, 3);
+ clks[IMX7D_NAND_ROOT_PRE_DIV] = imx_clk_divider2("nand_pre_div", "nand_cg", base + 0xaa00, 16, 3);
+ clks[IMX7D_QSPI_ROOT_PRE_DIV] = imx_clk_divider2("qspi_pre_div", "qspi_cg", base + 0xaa80, 16, 3);
+ clks[IMX7D_USDHC1_ROOT_PRE_DIV] = imx_clk_divider2("usdhc1_pre_div", "usdhc1_cg", base + 0xab00, 16, 3);
+ clks[IMX7D_USDHC2_ROOT_PRE_DIV] = imx_clk_divider2("usdhc2_pre_div", "usdhc2_cg", base + 0xab80, 16, 3);
+ clks[IMX7D_USDHC3_ROOT_PRE_DIV] = imx_clk_divider2("usdhc3_pre_div", "usdhc3_cg", base + 0xac00, 16, 3);
+ clks[IMX7D_CAN1_ROOT_PRE_DIV] = imx_clk_divider2("can1_pre_div", "can1_cg", base + 0xac80, 16, 3);
+ clks[IMX7D_CAN2_ROOT_PRE_DIV] = imx_clk_divider2("can2_pre_div", "can2_cg", base + 0xad00, 16, 3);
+ clks[IMX7D_I2C1_ROOT_PRE_DIV] = imx_clk_divider2("i2c1_pre_div", "i2c1_cg", base + 0xad80, 16, 3);
+ clks[IMX7D_I2C2_ROOT_PRE_DIV] = imx_clk_divider2("i2c2_pre_div", "i2c2_cg", base + 0xae00, 16, 3);
+ clks[IMX7D_I2C3_ROOT_PRE_DIV] = imx_clk_divider2("i2c3_pre_div", "i2c3_cg", base + 0xae80, 16, 3);
+ clks[IMX7D_I2C4_ROOT_PRE_DIV] = imx_clk_divider2("i2c4_pre_div", "i2c4_cg", base + 0xaf00, 16, 3);
+ clks[IMX7D_UART1_ROOT_PRE_DIV] = imx_clk_divider2("uart1_pre_div", "uart1_cg", base + 0xaf80, 16, 3);
+ clks[IMX7D_UART2_ROOT_PRE_DIV] = imx_clk_divider2("uart2_pre_div", "uart2_cg", base + 0xb000, 16, 3);
+ clks[IMX7D_UART3_ROOT_PRE_DIV] = imx_clk_divider2("uart3_pre_div", "uart3_cg", base + 0xb080, 16, 3);
+ clks[IMX7D_UART4_ROOT_PRE_DIV] = imx_clk_divider2("uart4_pre_div", "uart4_cg", base + 0xb100, 16, 3);
+ clks[IMX7D_UART5_ROOT_PRE_DIV] = imx_clk_divider2("uart5_pre_div", "uart5_cg", base + 0xb180, 16, 3);
+ clks[IMX7D_UART6_ROOT_PRE_DIV] = imx_clk_divider2("uart6_pre_div", "uart6_cg", base + 0xb200, 16, 3);
+ clks[IMX7D_UART7_ROOT_PRE_DIV] = imx_clk_divider2("uart7_pre_div", "uart7_cg", base + 0xb280, 16, 3);
+ clks[IMX7D_ECSPI1_ROOT_PRE_DIV] = imx_clk_divider2("ecspi1_pre_div", "ecspi1_cg", base + 0xb300, 16, 3);
+ clks[IMX7D_ECSPI2_ROOT_PRE_DIV] = imx_clk_divider2("ecspi2_pre_div", "ecspi2_cg", base + 0xb380, 16, 3);
+ clks[IMX7D_ECSPI3_ROOT_PRE_DIV] = imx_clk_divider2("ecspi3_pre_div", "ecspi3_cg", base + 0xb400, 16, 3);
+ clks[IMX7D_ECSPI4_ROOT_PRE_DIV] = imx_clk_divider2("ecspi4_pre_div", "ecspi4_cg", base + 0xb480, 16, 3);
+ clks[IMX7D_PWM1_ROOT_PRE_DIV] = imx_clk_divider2("pwm1_pre_div", "pwm1_cg", base + 0xb500, 16, 3);
+ clks[IMX7D_PWM2_ROOT_PRE_DIV] = imx_clk_divider2("pwm2_pre_div", "pwm2_cg", base + 0xb580, 16, 3);
+ clks[IMX7D_PWM3_ROOT_PRE_DIV] = imx_clk_divider2("pwm3_pre_div", "pwm3_cg", base + 0xb600, 16, 3);
+ clks[IMX7D_PWM4_ROOT_PRE_DIV] = imx_clk_divider2("pwm4_pre_div", "pwm4_cg", base + 0xb680, 16, 3);
+ clks[IMX7D_FLEXTIMER1_ROOT_PRE_DIV] = imx_clk_divider2("flextimer1_pre_div", "flextimer1_cg", base + 0xb700, 16, 3);
+ clks[IMX7D_FLEXTIMER2_ROOT_PRE_DIV] = imx_clk_divider2("flextimer2_pre_div", "flextimer2_cg", base + 0xb780, 16, 3);
+ clks[IMX7D_SIM1_ROOT_PRE_DIV] = imx_clk_divider2("sim1_pre_div", "sim1_cg", base + 0xb800, 16, 3);
+ clks[IMX7D_SIM2_ROOT_PRE_DIV] = imx_clk_divider2("sim2_pre_div", "sim2_cg", base + 0xb880, 16, 3);
+ clks[IMX7D_GPT1_ROOT_PRE_DIV] = imx_clk_divider2("gpt1_pre_div", "gpt1_cg", base + 0xb900, 16, 3);
+ clks[IMX7D_GPT2_ROOT_PRE_DIV] = imx_clk_divider2("gpt2_pre_div", "gpt2_cg", base + 0xb980, 16, 3);
+ clks[IMX7D_GPT3_ROOT_PRE_DIV] = imx_clk_divider2("gpt3_pre_div", "gpt3_cg", base + 0xba00, 16, 3);
+ clks[IMX7D_GPT4_ROOT_PRE_DIV] = imx_clk_divider2("gpt4_pre_div", "gpt4_cg", base + 0xba80, 16, 3);
+ clks[IMX7D_TRACE_ROOT_PRE_DIV] = imx_clk_divider2("trace_pre_div", "trace_cg", base + 0xbb00, 16, 3);
+ clks[IMX7D_WDOG_ROOT_PRE_DIV] = imx_clk_divider2("wdog_pre_div", "wdog_cg", base + 0xbb80, 16, 3);
+ clks[IMX7D_CSI_MCLK_ROOT_PRE_DIV] = imx_clk_divider2("csi_mclk_pre_div", "csi_mclk_cg", base + 0xbc00, 16, 3);
+ clks[IMX7D_AUDIO_MCLK_ROOT_PRE_DIV] = imx_clk_divider2("audio_mclk_pre_div", "audio_mclk_cg", base + 0xbc80, 16, 3);
+ clks[IMX7D_WRCLK_ROOT_PRE_DIV] = imx_clk_divider2("wrclk_pre_div", "wrclk_cg", base + 0xbd00, 16, 3);
+ clks[IMX7D_CLKO1_ROOT_PRE_DIV] = imx_clk_divider2("clko1_pre_div", "clko1_cg", base + 0xbd80, 16, 3);
+ clks[IMX7D_CLKO2_ROOT_PRE_DIV] = imx_clk_divider2("clko2_pre_div", "clko2_cg", base + 0xbe00, 16, 3);
+
+ clks[IMX7D_ARM_A7_ROOT_DIV] = imx_clk_divider2("arm_a7_div", "arm_a7_cg", base + 0x8000, 0, 3);
+ clks[IMX7D_ARM_M4_ROOT_DIV] = imx_clk_divider2("arm_m4_div", "arm_m4_cg", base + 0x8080, 0, 3);
+ clks[IMX7D_ARM_M0_ROOT_DIV] = imx_clk_divider2("arm_m0_div", "arm_m0_cg", base + 0x8100, 0, 3);
+ clks[IMX7D_MAIN_AXI_ROOT_DIV] = imx_clk_divider2("axi_post_div", "axi_pre_div", base + 0x8800, 0, 6);
+ clks[IMX7D_DISP_AXI_ROOT_DIV] = imx_clk_divider2("disp_axi_post_div", "disp_axi_pre_div", base + 0x8880, 0, 6);
+ clks[IMX7D_ENET_AXI_ROOT_DIV] = imx_clk_divider2("enet_axi_post_div", "enet_axi_pre_div", base + 0x8900, 0, 6);
+ clks[IMX7D_NAND_USDHC_BUS_ROOT_DIV] = imx_clk_divider2("nand_usdhc_post_div", "nand_usdhc_pre_div", base + 0x8980, 0, 6);
+ clks[IMX7D_AHB_CHANNEL_ROOT_DIV] = imx_clk_divider2("ahb_post_div", "ahb_pre_div", base + 0x9000, 0, 6);
+ clks[IMX7D_DRAM_ROOT_DIV] = imx_clk_divider2("dram_post_div", "dram_cg", base + 0x9880, 0, 3);
+ clks[IMX7D_DRAM_PHYM_ALT_ROOT_DIV] = imx_clk_divider2("dram_phym_alt_post_div", "dram_phym_alt_pre_div", base + 0xa000, 0, 3);
+ clks[IMX7D_DRAM_ALT_ROOT_DIV] = imx_clk_divider2("dram_alt_post_div", "dram_alt_pre_div", base + 0xa080, 0, 3);
+ clks[IMX7D_USB_HSIC_ROOT_DIV] = imx_clk_divider2("usb_hsic_post_div", "usb_hsic_pre_div", base + 0xa100, 0, 6);
+ clks[IMX7D_PCIE_CTRL_ROOT_DIV] = imx_clk_divider2("pcie_ctrl_post_div", "pcie_ctrl_pre_div", base + 0xa180, 0, 6);
+ clks[IMX7D_PCIE_PHY_ROOT_DIV] = imx_clk_divider2("pcie_phy_post_div", "pcie_phy_pre_div", base + 0xa200, 0, 6);
+ clks[IMX7D_EPDC_PIXEL_ROOT_DIV] = imx_clk_divider2("epdc_pixel_post_div", "epdc_pixel_pre_div", base + 0xa280, 0, 6);
+ clks[IMX7D_LCDIF_PIXEL_ROOT_DIV] = imx_clk_divider2("lcdif_pixel_post_div", "lcdif_pixel_pre_div", base + 0xa300, 0, 6);
+ clks[IMX7D_MIPI_DSI_ROOT_DIV] = imx_clk_divider2("mipi_dsi_post_div", "mipi_dsi_pre_div", base + 0xa380, 0, 6);
+ clks[IMX7D_MIPI_CSI_ROOT_DIV] = imx_clk_divider2("mipi_csi_post_div", "mipi_csi_pre_div", base + 0xa400, 0, 6);
+ clks[IMX7D_MIPI_DPHY_ROOT_DIV] = imx_clk_divider2("mipi_dphy_post_div", "mipi_csi_dphy_div", base + 0xa480, 0, 6);
+ clks[IMX7D_SAI1_ROOT_DIV] = imx_clk_divider2("sai1_post_div", "sai1_pre_div", base + 0xa500, 0, 6);
+ clks[IMX7D_SAI2_ROOT_DIV] = imx_clk_divider2("sai2_post_div", "sai2_pre_div", base + 0xa580, 0, 6);
+ clks[IMX7D_SAI3_ROOT_DIV] = imx_clk_divider2("sai3_post_div", "sai3_pre_div", base + 0xa600, 0, 6);
+ clks[IMX7D_SPDIF_ROOT_DIV] = imx_clk_divider2("spdif_post_div", "spdif_pre_div", base + 0xa680, 0, 6);
+ clks[IMX7D_ENET1_REF_ROOT_DIV] = imx_clk_divider2("enet1_ref_post_div", "enet1_ref_pre_div", base + 0xa700, 0, 6);
+ clks[IMX7D_ENET1_TIME_ROOT_DIV] = imx_clk_divider2("enet1_time_post_div", "enet1_time_pre_div", base + 0xa780, 0, 6);
+ clks[IMX7D_ENET2_REF_ROOT_DIV] = imx_clk_divider2("enet2_ref_post_div", "enet2_ref_pre_div", base + 0xa800, 0, 6);
+ clks[IMX7D_ENET2_TIME_ROOT_DIV] = imx_clk_divider2("enet2_time_post_div", "enet2_time_pre_div", base + 0xa880, 0, 6);
+ clks[IMX7D_ENET_PHY_REF_ROOT_DIV] = imx_clk_divider2("enet_phy_ref_post_div", "enet_phy_ref_pre_div", base + 0xa900, 0, 6);
+ clks[IMX7D_EIM_ROOT_DIV] = imx_clk_divider2("eim_post_div", "eim_pre_div", base + 0xa980, 0, 6);
+ clks[IMX7D_NAND_ROOT_DIV] = imx_clk_divider2("nand_post_div", "nand_pre_div", base + 0xaa00, 0, 6);
+ clks[IMX7D_QSPI_ROOT_DIV] = imx_clk_divider2("qspi_post_div", "qspi_pre_div", base + 0xaa80, 0, 6);
+ clks[IMX7D_USDHC1_ROOT_DIV] = imx_clk_divider2("usdhc1_post_div", "usdhc1_pre_div", base + 0xab00, 0, 6);
+ clks[IMX7D_USDHC2_ROOT_DIV] = imx_clk_divider2("usdhc2_post_div", "usdhc2_pre_div", base + 0xab80, 0, 6);
+ clks[IMX7D_USDHC3_ROOT_DIV] = imx_clk_divider2("usdhc3_post_div", "usdhc3_pre_div", base + 0xac00, 0, 6);
+ clks[IMX7D_CAN1_ROOT_DIV] = imx_clk_divider2("can1_post_div", "can1_pre_div", base + 0xac80, 0, 6);
+ clks[IMX7D_CAN2_ROOT_DIV] = imx_clk_divider2("can2_post_div", "can2_pre_div", base + 0xad00, 0, 6);
+ clks[IMX7D_I2C1_ROOT_DIV] = imx_clk_divider2("i2c1_post_div", "i2c1_pre_div", base + 0xad80, 0, 6);
+ clks[IMX7D_I2C2_ROOT_DIV] = imx_clk_divider2("i2c2_post_div", "i2c2_pre_div", base + 0xae00, 0, 6);
+ clks[IMX7D_I2C3_ROOT_DIV] = imx_clk_divider2("i2c3_post_div", "i2c3_pre_div", base + 0xae80, 0, 6);
+ clks[IMX7D_I2C4_ROOT_DIV] = imx_clk_divider2("i2c4_post_div", "i2c4_pre_div", base + 0xaf00, 0, 6);
+ clks[IMX7D_UART1_ROOT_DIV] = imx_clk_divider2("uart1_post_div", "uart1_pre_div", base + 0xaf80, 0, 6);
+ clks[IMX7D_UART2_ROOT_DIV] = imx_clk_divider2("uart2_post_div", "uart2_pre_div", base + 0xb000, 0, 6);
+ clks[IMX7D_UART3_ROOT_DIV] = imx_clk_divider2("uart3_post_div", "uart3_pre_div", base + 0xb080, 0, 6);
+ clks[IMX7D_UART4_ROOT_DIV] = imx_clk_divider2("uart4_post_div", "uart4_pre_div", base + 0xb100, 0, 6);
+ clks[IMX7D_UART5_ROOT_DIV] = imx_clk_divider2("uart5_post_div", "uart5_pre_div", base + 0xb180, 0, 6);
+ clks[IMX7D_UART6_ROOT_DIV] = imx_clk_divider2("uart6_post_div", "uart6_pre_div", base + 0xb200, 0, 6);
+ clks[IMX7D_UART7_ROOT_DIV] = imx_clk_divider2("uart7_post_div", "uart7_pre_div", base + 0xb280, 0, 6);
+ clks[IMX7D_ECSPI1_ROOT_DIV] = imx_clk_divider2("ecspi1_post_div", "ecspi1_pre_div", base + 0xb300, 0, 6);
+ clks[IMX7D_ECSPI2_ROOT_DIV] = imx_clk_divider2("ecspi2_post_div", "ecspi2_pre_div", base + 0xb380, 0, 6);
+ clks[IMX7D_ECSPI3_ROOT_DIV] = imx_clk_divider2("ecspi3_post_div", "ecspi3_pre_div", base + 0xb400, 0, 6);
+ clks[IMX7D_ECSPI4_ROOT_DIV] = imx_clk_divider2("ecspi4_post_div", "ecspi4_pre_div", base + 0xb480, 0, 6);
+ clks[IMX7D_PWM1_ROOT_DIV] = imx_clk_divider2("pwm1_post_div", "pwm1_pre_div", base + 0xb500, 0, 6);
+ clks[IMX7D_PWM2_ROOT_DIV] = imx_clk_divider2("pwm2_post_div", "pwm2_pre_div", base + 0xb580, 0, 6);
+ clks[IMX7D_PWM3_ROOT_DIV] = imx_clk_divider2("pwm3_post_div", "pwm3_pre_div", base + 0xb600, 0, 6);
+ clks[IMX7D_PWM4_ROOT_DIV] = imx_clk_divider2("pwm4_post_div", "pwm4_pre_div", base + 0xb680, 0, 6);
+ clks[IMX7D_FLEXTIMER1_ROOT_DIV] = imx_clk_divider2("flextimer1_post_div", "flextimer1_pre_div", base + 0xb700, 0, 6);
+ clks[IMX7D_FLEXTIMER2_ROOT_DIV] = imx_clk_divider2("flextimer2_post_div", "flextimer2_pre_div", base + 0xb780, 0, 6);
+ clks[IMX7D_SIM1_ROOT_DIV] = imx_clk_divider2("sim1_post_div", "sim1_pre_div", base + 0xb800, 0, 6);
+ clks[IMX7D_SIM2_ROOT_DIV] = imx_clk_divider2("sim2_post_div", "sim2_pre_div", base + 0xb880, 0, 6);
+ clks[IMX7D_GPT1_ROOT_DIV] = imx_clk_divider2("gpt1_post_div", "gpt1_pre_div", base + 0xb900, 0, 6);
+ clks[IMX7D_GPT2_ROOT_DIV] = imx_clk_divider2("gpt2_post_div", "gpt2_pre_div", base + 0xb980, 0, 6);
+ clks[IMX7D_GPT3_ROOT_DIV] = imx_clk_divider2("gpt3_post_div", "gpt3_pre_div", base + 0xba00, 0, 6);
+ clks[IMX7D_GPT4_ROOT_DIV] = imx_clk_divider2("gpt4_post_div", "gpt4_pre_div", base + 0xba80, 0, 6);
+ clks[IMX7D_TRACE_ROOT_DIV] = imx_clk_divider2("trace_post_div", "trace_pre_div", base + 0xbb00, 0, 6);
+ clks[IMX7D_WDOG_ROOT_DIV] = imx_clk_divider2("wdog_post_div", "wdog_pre_div", base + 0xbb80, 0, 6);
+ clks[IMX7D_CSI_MCLK_ROOT_DIV] = imx_clk_divider2("csi_mclk_post_div", "csi_mclk_pre_div", base + 0xbc00, 0, 6);
+ clks[IMX7D_AUDIO_MCLK_ROOT_DIV] = imx_clk_divider2("audio_mclk_post_div", "audio_mclk_pre_div", base + 0xbc80, 0, 6);
+ clks[IMX7D_WRCLK_ROOT_DIV] = imx_clk_divider2("wrclk_post_div", "wrclk_pre_div", base + 0xbd00, 0, 6);
+ clks[IMX7D_CLKO1_ROOT_DIV] = imx_clk_divider2("clko1_post_div", "clko1_pre_div", base + 0xbd80, 0, 6);
+ clks[IMX7D_CLKO2_ROOT_DIV] = imx_clk_divider2("clko2_post_div", "clko2_pre_div", base + 0xbe00, 0, 6);
+
+ clks[IMX7D_ARM_A7_ROOT_CLK] = imx_clk_gate4("arm_a7_root_clk", "arm_a7_div", base + 0x4000, 0);
+ clks[IMX7D_ARM_M4_ROOT_CLK] = imx_clk_gate4("arm_m4_root_clk", "arm_m4_div", base + 0x4010, 0);
+ clks[IMX7D_ARM_M0_ROOT_CLK] = imx_clk_gate4("arm_m0_root_clk", "arm_m0_div", base + 0x4020, 0);
+ clks[IMX7D_MAIN_AXI_ROOT_CLK] = imx_clk_gate4("main_axi_root_clk", "axi_post_div", base + 0x4040, 0);
+ clks[IMX7D_DISP_AXI_ROOT_CLK] = imx_clk_gate4("disp_axi_root_clk", "disp_axi_post_div", base + 0x4050, 0);
+ clks[IMX7D_ENET_AXI_ROOT_CLK] = imx_clk_gate4("enet_axi_root_clk", "enet_axi_post_div", base + 0x4060, 0);
+ clks[IMX7D_OCRAM_CLK] = imx_clk_gate4("ocram_clk", "axi_post_div", base + 0x4110, 0);
+ clks[IMX7D_OCRAM_S_CLK] = imx_clk_gate4("ocram_s_clk", "ahb_post_div", base + 0x4120, 0);
+ clks[IMX7D_NAND_USDHC_BUS_ROOT_CLK] = imx_clk_gate4("nand_usdhc_root_clk", "nand_usdhc_post_div", base + 0x4130, 0);
+ clks[IMX7D_AHB_CHANNEL_ROOT_CLK] = imx_clk_gate4("ahb_root_clk", "ahb_post_div", base + 0x4200, 0);
+ clks[IMX7D_DRAM_ROOT_CLK] = imx_clk_gate4("dram_root_clk", "dram_post_div", base + 0x4130, 0);
+ clks[IMX7D_DRAM_PHYM_ROOT_CLK] = imx_clk_gate4("dram_phym_root_clk", "dram_phym_cg", base + 0x4130, 0);
+ clks[IMX7D_DRAM_PHYM_ALT_ROOT_CLK] = imx_clk_gate4("dram_phym_alt_root_clk", "dram_phym_alt_post_div", base + 0x4130, 0);
+ clks[IMX7D_DRAM_ALT_ROOT_CLK] = imx_clk_gate4("dram_alt_root_clk", "dram_alt_post_div", base + 0x4130, 0);
+ clks[IMX7D_USB_HSIC_ROOT_CLK] = imx_clk_gate4("usb_hsic_root_clk", "usb_hsic_post_div", base + 0x4420, 0);
+ clks[IMX7D_PCIE_CTRL_ROOT_CLK] = imx_clk_gate4("pcie_ctrl_root_clk", "pcie_ctrl_post_div", base + 0x4600, 0);
+ clks[IMX7D_PCIE_PHY_ROOT_CLK] = imx_clk_gate4("pcie_phy_root_clk", "pcie_phy_post_div", base + 0x4600, 0);
+ clks[IMX7D_EPDC_PIXEL_ROOT_CLK] = imx_clk_gate4("epdc_pixel_root_clk", "epdc_pixel_post_div", base + 0x44a0, 0);
+ clks[IMX7D_LCDIF_PIXEL_ROOT_CLK] = imx_clk_gate4("lcdif_pixel_root_clk", "lcdif_pixel_post_div", base + 0x44b0, 0);
+ clks[IMX7D_MIPI_DSI_ROOT_CLK] = imx_clk_gate4("mipi_dsi_root_clk", "mipi_dsi_post_div", base + 0x4650, 0);
+ clks[IMX7D_MIPI_CSI_ROOT_CLK] = imx_clk_gate4("mipi_csi_root_clk", "mipi_csi_post_div", base + 0x4640, 0);
+ clks[IMX7D_MIPI_DPHY_ROOT_CLK] = imx_clk_gate4("mipi_dphy_root_clk", "mipi_dphy_post_div", base + 0x4660, 0);
+ clks[IMX7D_SAI1_ROOT_CLK] = imx_clk_gate4("sai1_root_clk", "sai1_post_div", base + 0x48c0, 0);
+ clks[IMX7D_SAI2_ROOT_CLK] = imx_clk_gate4("sai2_root_clk", "sai2_post_div", base + 0x48d0, 0);
+ clks[IMX7D_SAI3_ROOT_CLK] = imx_clk_gate4("sai3_root_clk", "sai3_post_div", base + 0x48e0, 0);
+ clks[IMX7D_SPDIF_ROOT_CLK] = imx_clk_gate4("spdif_root_clk", "spdif_post_div", base + 0x44d0, 0);
+ clks[IMX7D_ENET1_REF_ROOT_CLK] = imx_clk_gate4("enet1_ref_root_clk", "enet1_ref_post_div", base + 0x44e0, 0);
+ clks[IMX7D_ENET1_TIME_ROOT_CLK] = imx_clk_gate4("enet1_time_root_clk", "enet1_time_post_div", base + 0x44f0, 0);
+ clks[IMX7D_ENET2_REF_ROOT_CLK] = imx_clk_gate4("enet2_ref_root_clk", "enet2_ref_post_div", base + 0x4500, 0);
+ clks[IMX7D_ENET2_TIME_ROOT_CLK] = imx_clk_gate4("enet2_time_root_clk", "enet2_time_post_div", base + 0x4510, 0);
+ clks[IMX7D_ENET_PHY_REF_ROOT_CLK] = imx_clk_gate4("enet_phy_ref_root_clk", "enet_phy_ref_post_div", base + 0x4520, 0);
+ clks[IMX7D_EIM_ROOT_CLK] = imx_clk_gate4("eim_root_clk", "eim_post_div", base + 0x4160, 0);
+ clks[IMX7D_NAND_ROOT_CLK] = imx_clk_gate4("nand_root_clk", "nand_post_div", base + 0x4140, 0);
+ clks[IMX7D_QSPI_ROOT_CLK] = imx_clk_gate4("qspi_root_clk", "qspi_post_div", base + 0x4150, 0);
+ clks[IMX7D_USDHC1_ROOT_CLK] = imx_clk_gate4("usdhc1_root_clk", "usdhc1_post_div", base + 0x46c0, 0);
+ clks[IMX7D_USDHC2_ROOT_CLK] = imx_clk_gate4("usdhc2_root_clk", "usdhc2_post_div", base + 0x46d0, 0);
+ clks[IMX7D_USDHC3_ROOT_CLK] = imx_clk_gate4("usdhc3_root_clk", "usdhc3_post_div", base + 0x46e0, 0);
+ clks[IMX7D_CAN1_ROOT_CLK] = imx_clk_gate4("can1_root_clk", "can1_post_div", base + 0x4740, 0);
+ clks[IMX7D_CAN2_ROOT_CLK] = imx_clk_gate4("can2_root_clk", "can2_post_div", base + 0x4750, 0);
+ clks[IMX7D_I2C1_ROOT_CLK] = imx_clk_gate4("i2c1_root_clk", "i2c1_post_div", base + 0x4880, 0);
+ clks[IMX7D_I2C2_ROOT_CLK] = imx_clk_gate4("i2c2_root_clk", "i2c2_post_div", base + 0x4890, 0);
+ clks[IMX7D_I2C3_ROOT_CLK] = imx_clk_gate4("i2c3_root_clk", "i2c3_post_div", base + 0x48a0, 0);
+ clks[IMX7D_I2C4_ROOT_CLK] = imx_clk_gate4("i2c4_root_clk", "i2c4_post_div", base + 0x48b0, 0);
+ clks[IMX7D_UART1_ROOT_CLK] = imx_clk_gate4("uart1_root_clk", "uart1_post_div", base + 0x4940, 0);
+ clks[IMX7D_UART2_ROOT_CLK] = imx_clk_gate4("uart2_root_clk", "uart2_post_div", base + 0x4950, 0);
+ clks[IMX7D_UART3_ROOT_CLK] = imx_clk_gate4("uart3_root_clk", "uart3_post_div", base + 0x4960, 0);
+ clks[IMX7D_UART4_ROOT_CLK] = imx_clk_gate4("uart4_root_clk", "uart4_post_div", base + 0x4970, 0);
+ clks[IMX7D_UART5_ROOT_CLK] = imx_clk_gate4("uart5_root_clk", "uart5_post_div", base + 0x4980, 0);
+ clks[IMX7D_UART6_ROOT_CLK] = imx_clk_gate4("uart6_root_clk", "uart6_post_div", base + 0x4990, 0);
+ clks[IMX7D_UART7_ROOT_CLK] = imx_clk_gate4("uart7_root_clk", "uart7_post_div", base + 0x49a0, 0);
+ clks[IMX7D_ECSPI1_ROOT_CLK] = imx_clk_gate4("ecspi1_root_clk", "ecspi1_post_div", base + 0x4780, 0);
+ clks[IMX7D_ECSPI2_ROOT_CLK] = imx_clk_gate4("ecspi2_root_clk", "ecspi2_post_div", base + 0x4790, 0);
+ clks[IMX7D_ECSPI3_ROOT_CLK] = imx_clk_gate4("ecspi3_root_clk", "ecspi3_post_div", base + 0x47a0, 0);
+ clks[IMX7D_ECSPI4_ROOT_CLK] = imx_clk_gate4("ecspi4_root_clk", "ecspi4_post_div", base + 0x47b0, 0);
+ clks[IMX7D_PWM1_ROOT_CLK] = imx_clk_gate4("pwm1_root_clk", "pwm1_post_div", base + 0x4840, 0);
+ clks[IMX7D_PWM2_ROOT_CLK] = imx_clk_gate4("pwm2_root_clk", "pwm2_post_div", base + 0x4850, 0);
+ clks[IMX7D_PWM3_ROOT_CLK] = imx_clk_gate4("pwm3_root_clk", "pwm3_post_div", base + 0x4860, 0);
+ clks[IMX7D_PWM4_ROOT_CLK] = imx_clk_gate4("pwm4_root_clk", "pwm4_post_div", base + 0x4870, 0);
+ clks[IMX7D_FLEXTIMER1_ROOT_CLK] = imx_clk_gate4("flextimer1_root_clk", "flextimer1_post_div", base + 0x4800, 0);
+ clks[IMX7D_FLEXTIMER2_ROOT_CLK] = imx_clk_gate4("flextimer2_root_clk", "flextimer2_post_div", base + 0x4810, 0);
+ clks[IMX7D_SIM1_ROOT_CLK] = imx_clk_gate4("sim1_root_clk", "sim1_post_div", base + 0x4900, 0);
+ clks[IMX7D_SIM2_ROOT_CLK] = imx_clk_gate4("sim2_root_clk", "sim2_post_div", base + 0x4910, 0);
+ clks[IMX7D_GPT1_ROOT_CLK] = imx_clk_gate4("gpt1_root_clk", "gpt1_post_div", base + 0x47c0, 0);
+ clks[IMX7D_GPT2_ROOT_CLK] = imx_clk_gate4("gpt2_root_clk", "gpt2_post_div", base + 0x47d0, 0);
+ clks[IMX7D_GPT3_ROOT_CLK] = imx_clk_gate4("gpt3_root_clk", "gpt3_post_div", base + 0x47e0, 0);
+ clks[IMX7D_GPT4_ROOT_CLK] = imx_clk_gate4("gpt4_root_clk", "gpt4_post_div", base + 0x47f0, 0);
+ clks[IMX7D_TRACE_ROOT_CLK] = imx_clk_gate4("trace_root_clk", "trace_post_div", base + 0x4300, 0);
+ clks[IMX7D_WDOG1_ROOT_CLK] = imx_clk_gate4("wdog1_root_clk", "wdog_post_div", base + 0x49c0, 0);
+ clks[IMX7D_WDOG2_ROOT_CLK] = imx_clk_gate4("wdog2_root_clk", "wdog_post_div", base + 0x49d0, 0);
+ clks[IMX7D_WDOG3_ROOT_CLK] = imx_clk_gate4("wdog3_root_clk", "wdog_post_div", base + 0x49e0, 0);
+ clks[IMX7D_WDOG4_ROOT_CLK] = imx_clk_gate4("wdog4_root_clk", "wdog_post_div", base + 0x49f0, 0);
+ clks[IMX7D_CSI_MCLK_ROOT_CLK] = imx_clk_gate4("csi_mclk_root_clk", "csi_mclk_post_div", base + 0x4490, 0);
+ clks[IMX7D_AUDIO_MCLK_ROOT_CLK] = imx_clk_gate4("audio_mclk_root_clk", "audio_mclk_post_div", base + 0x4790, 0);
+ clks[IMX7D_WRCLK_ROOT_CLK] = imx_clk_gate4("wrclk_root_clk", "wrclk_post_div", base + 0x47a0, 0);
+ clks[IMX7D_ADC_ROOT_CLK] = imx_clk_gate4("adc_root_clk", "ipg_root_clk", base + 0x4200, 0);
clks[IMX7D_GPT_3M_CLK] = imx_clk_fixed_factor("gpt_3m", "osc", 1, 8);
@@ -846,28 +854,13 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
clk_data.clk_num = ARRAY_SIZE(clks);
of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
- /* TO BE FIXED LATER
- * Enable all clock to bring up imx7, otherwise system will be halt and block
- * the other part upstream Because imx7d clock design changed, clock framework
- * need do a little modify.
- * Dong Aisheng is working on this. After that, this part need be changed.
- */
- for (i = 0; i < IMX7D_CLK_END; i++)
- clk_prepare_enable(clks[i]);
+ for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
+ clk_prepare_enable(clks[clks_init_on[i]]);
/* use old gpt clk setting, gpt1 root clk must be twice as gpt counter freq */
clk_set_parent(clks[IMX7D_GPT1_ROOT_SRC], clks[IMX7D_OSC_24M_CLK]);
- /*
- * init enet clock source:
- * AXI clock source is 250MHz
- * Phy refrence clock is 25MHz
- * 1588 time clock source is 100MHz
- */
clk_set_parent(clks[IMX7D_ENET_AXI_ROOT_SRC], clks[IMX7D_PLL_ENET_MAIN_250M_CLK]);
- clk_set_parent(clks[IMX7D_ENET_PHY_REF_ROOT_SRC], clks[IMX7D_PLL_ENET_MAIN_25M_CLK]);
- clk_set_parent(clks[IMX7D_ENET1_TIME_ROOT_SRC], clks[IMX7D_PLL_ENET_MAIN_100M_CLK]);
- clk_set_parent(clks[IMX7D_ENET2_TIME_ROOT_SRC], clks[IMX7D_PLL_ENET_MAIN_100M_CLK]);
/* set uart module clock's parent clock source that must be great then 80MHz */
clk_set_parent(clks[IMX7D_UART1_ROOT_SRC], clks[IMX7D_OSC_24M_CLK]);
diff --git a/drivers/clk/imx/clk-pllv3.c b/drivers/clk/imx/clk-pllv3.c
index 4826b3c9e..19f9b6229 100644
--- a/drivers/clk/imx/clk-pllv3.c
+++ b/drivers/clk/imx/clk-pllv3.c
@@ -29,8 +29,8 @@
* struct clk_pllv3 - IMX PLL clock version 3
* @clk_hw: clock source
* @base: base address of PLL registers
- * @powerup_set: set POWER bit to power up the PLL
- * @powerdown: pll powerdown offset bit
+ * @power_bit: pll power bit mask
+ * @powerup_set: set power_bit to power up the PLL
* @div_mask: mask of divider bits
* @div_shift: shift of divider bits
*
@@ -40,8 +40,8 @@
struct clk_pllv3 {
struct clk_hw hw;
void __iomem *base;
+ u32 power_bit;
bool powerup_set;
- u32 powerdown;
u32 div_mask;
u32 div_shift;
unsigned long ref_clock;
@@ -52,7 +52,7 @@ struct clk_pllv3 {
static int clk_pllv3_wait_lock(struct clk_pllv3 *pll)
{
unsigned long timeout = jiffies + msecs_to_jiffies(10);
- u32 val = readl_relaxed(pll->base) & pll->powerdown;
+ u32 val = readl_relaxed(pll->base) & pll->power_bit;
/* No need to wait for lock when pll is not powered up */
if ((pll->powerup_set && !val) || (!pll->powerup_set && val))
@@ -77,9 +77,9 @@ static int clk_pllv3_prepare(struct clk_hw *hw)
val = readl_relaxed(pll->base);
if (pll->powerup_set)
- val |= BM_PLL_POWER;
+ val |= pll->power_bit;
else
- val &= ~BM_PLL_POWER;
+ val &= ~pll->power_bit;
writel_relaxed(val, pll->base);
return clk_pllv3_wait_lock(pll);
@@ -92,9 +92,9 @@ static void clk_pllv3_unprepare(struct clk_hw *hw)
val = readl_relaxed(pll->base);
if (pll->powerup_set)
- val &= ~BM_PLL_POWER;
+ val &= ~pll->power_bit;
else
- val |= BM_PLL_POWER;
+ val |= pll->power_bit;
writel_relaxed(val, pll->base);
}
@@ -218,8 +218,12 @@ static unsigned long clk_pllv3_av_recalc_rate(struct clk_hw *hw,
u32 mfn = readl_relaxed(pll->base + PLL_NUM_OFFSET);
u32 mfd = readl_relaxed(pll->base + PLL_DENOM_OFFSET);
u32 div = readl_relaxed(pll->base) & pll->div_mask;
+ u64 temp64 = (u64)parent_rate;
- return (parent_rate * div) + ((parent_rate / mfd) * mfn);
+ temp64 *= mfn;
+ do_div(temp64, mfd);
+
+ return (parent_rate * div) + (u32)temp64;
}
static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
@@ -243,7 +247,7 @@ static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
do_div(temp64, parent_rate);
mfn = temp64;
- return parent_rate * div + parent_rate / mfd * mfn;
+ return parent_rate * div + parent_rate * mfn / mfd;
}
static int clk_pllv3_av_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -312,7 +316,7 @@ struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name,
if (!pll)
return ERR_PTR(-ENOMEM);
- pll->powerdown = BM_PLL_POWER;
+ pll->power_bit = BM_PLL_POWER;
switch (type) {
case IMX_PLLV3_SYS:
@@ -328,7 +332,7 @@ struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name,
ops = &clk_pllv3_av_ops;
break;
case IMX_PLLV3_ENET_IMX7:
- pll->powerdown = IMX7_ENET_PLL_POWER;
+ pll->power_bit = IMX7_ENET_PLL_POWER;
pll->ref_clock = 1000000000;
ops = &clk_pllv3_enet_ops;
break;
diff --git a/drivers/clk/imx/clk-vf610.c b/drivers/clk/imx/clk-vf610.c
index 3a1f24475..0476353ab 100644
--- a/drivers/clk/imx/clk-vf610.c
+++ b/drivers/clk/imx/clk-vf610.c
@@ -315,12 +315,12 @@ static void __init vf610_clocks_init(struct device_node *ccm_node)
clk[VF610_CLK_PIT] = imx_clk_gate2("pit", "ipg_bus", CCM_CCGR1, CCM_CCGRx_CGn(7));
- clk[VF610_CLK_UART0] = imx_clk_gate2("uart0", "ipg_bus", CCM_CCGR0, CCM_CCGRx_CGn(7));
- clk[VF610_CLK_UART1] = imx_clk_gate2("uart1", "ipg_bus", CCM_CCGR0, CCM_CCGRx_CGn(8));
- clk[VF610_CLK_UART2] = imx_clk_gate2("uart2", "ipg_bus", CCM_CCGR0, CCM_CCGRx_CGn(9));
- clk[VF610_CLK_UART3] = imx_clk_gate2("uart3", "ipg_bus", CCM_CCGR0, CCM_CCGRx_CGn(10));
- clk[VF610_CLK_UART4] = imx_clk_gate2("uart4", "ipg_bus", CCM_CCGR6, CCM_CCGRx_CGn(9));
- clk[VF610_CLK_UART5] = imx_clk_gate2("uart5", "ipg_bus", CCM_CCGR6, CCM_CCGRx_CGn(10));
+ clk[VF610_CLK_UART0] = imx_clk_gate2_cgr("uart0", "ipg_bus", CCM_CCGR0, CCM_CCGRx_CGn(7), 0x2);
+ clk[VF610_CLK_UART1] = imx_clk_gate2_cgr("uart1", "ipg_bus", CCM_CCGR0, CCM_CCGRx_CGn(8), 0x2);
+ clk[VF610_CLK_UART2] = imx_clk_gate2_cgr("uart2", "ipg_bus", CCM_CCGR0, CCM_CCGRx_CGn(9), 0x2);
+ clk[VF610_CLK_UART3] = imx_clk_gate2_cgr("uart3", "ipg_bus", CCM_CCGR0, CCM_CCGRx_CGn(10), 0x2);
+ clk[VF610_CLK_UART4] = imx_clk_gate2_cgr("uart4", "ipg_bus", CCM_CCGR6, CCM_CCGRx_CGn(9), 0x2);
+ clk[VF610_CLK_UART5] = imx_clk_gate2_cgr("uart5", "ipg_bus", CCM_CCGR6, CCM_CCGRx_CGn(10), 0x2);
clk[VF610_CLK_I2C0] = imx_clk_gate2("i2c0", "ipg_bus", CCM_CCGR4, CCM_CCGRx_CGn(6));
clk[VF610_CLK_I2C1] = imx_clk_gate2("i2c1", "ipg_bus", CCM_CCGR4, CCM_CCGRx_CGn(7));
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index 508d0fad8..a81c0385e 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -51,28 +51,6 @@ struct clk * imx_obtain_fixed_clock(
struct clk *imx_clk_gate_exclusive(const char *name, const char *parent,
void __iomem *reg, u8 shift, u32 exclusive_mask);
-static inline struct clk *imx_clk_gate2(const char *name, const char *parent,
- void __iomem *reg, u8 shift)
-{
- return clk_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT, reg,
- shift, 0x3, 0, &imx_ccm_lock, NULL);
-}
-
-static inline struct clk *imx_clk_gate2_shared(const char *name,
- const char *parent, void __iomem *reg, u8 shift,
- unsigned int *share_count)
-{
- return clk_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT, reg,
- shift, 0x3, 0, &imx_ccm_lock, share_count);
-}
-
-static inline struct clk *imx_clk_gate2_cgr(const char *name, const char *parent,
- void __iomem *reg, u8 shift, u8 cgr_val)
-{
- return clk_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT, reg,
- shift, cgr_val, 0, &imx_ccm_lock, NULL);
-}
-
struct clk *imx_clk_pfd(const char *name, const char *parent_name,
void __iomem *reg, u8 idx);
@@ -97,6 +75,13 @@ static inline struct clk *imx_clk_fixed(const char *name, int rate)
return clk_register_fixed_rate(NULL, name, NULL, 0, rate);
}
+static inline struct clk *imx_clk_fixed_factor(const char *name,
+ const char *parent, unsigned int mult, unsigned int div)
+{
+ return clk_register_fixed_factor(NULL, name, parent,
+ CLK_SET_RATE_PARENT, mult, div);
+}
+
static inline struct clk *imx_clk_divider(const char *name, const char *parent,
void __iomem *reg, u8 shift, u8 width)
{
@@ -112,6 +97,14 @@ static inline struct clk *imx_clk_divider_flags(const char *name,
reg, shift, width, 0, &imx_ccm_lock);
}
+static inline struct clk *imx_clk_divider2(const char *name, const char *parent,
+ void __iomem *reg, u8 shift, u8 width)
+{
+ return clk_register_divider(NULL, name, parent,
+ CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ reg, shift, width, 0, &imx_ccm_lock);
+}
+
static inline struct clk *imx_clk_gate(const char *name, const char *parent,
void __iomem *reg, u8 shift)
{
@@ -126,6 +119,44 @@ static inline struct clk *imx_clk_gate_dis(const char *name, const char *parent,
shift, CLK_GATE_SET_TO_DISABLE, &imx_ccm_lock);
}
+static inline struct clk *imx_clk_gate2(const char *name, const char *parent,
+ void __iomem *reg, u8 shift)
+{
+ return clk_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT, reg,
+ shift, 0x3, 0, &imx_ccm_lock, NULL);
+}
+
+static inline struct clk *imx_clk_gate2_shared(const char *name,
+ const char *parent, void __iomem *reg, u8 shift,
+ unsigned int *share_count)
+{
+ return clk_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT, reg,
+ shift, 0x3, 0, &imx_ccm_lock, share_count);
+}
+
+static inline struct clk *imx_clk_gate2_cgr(const char *name,
+ const char *parent, void __iomem *reg, u8 shift, u8 cgr_val)
+{
+ return clk_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT, reg,
+ shift, cgr_val, 0, &imx_ccm_lock, NULL);
+}
+
+static inline struct clk *imx_clk_gate3(const char *name, const char *parent,
+ void __iomem *reg, u8 shift)
+{
+ return clk_register_gate(NULL, name, parent,
+ CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ reg, shift, 0, &imx_ccm_lock);
+}
+
+static inline struct clk *imx_clk_gate4(const char *name, const char *parent,
+ void __iomem *reg, u8 shift)
+{
+ return clk_register_gate2(NULL, name, parent,
+ CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ reg, shift, 0x3, 0, &imx_ccm_lock, NULL);
+}
+
static inline struct clk *imx_clk_mux(const char *name, void __iomem *reg,
u8 shift, u8 width, const char **parents, int num_parents)
{
@@ -134,6 +165,14 @@ static inline struct clk *imx_clk_mux(const char *name, void __iomem *reg,
width, 0, &imx_ccm_lock);
}
+static inline struct clk *imx_clk_mux2(const char *name, void __iomem *reg,
+ u8 shift, u8 width, const char **parents, int num_parents)
+{
+ return clk_register_mux(NULL, name, parents, num_parents,
+ CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE,
+ reg, shift, width, 0, &imx_ccm_lock);
+}
+
static inline struct clk *imx_clk_mux_flags(const char *name,
void __iomem *reg, u8 shift, u8 width, const char **parents,
int num_parents, unsigned long flags)
@@ -143,13 +182,6 @@ static inline struct clk *imx_clk_mux_flags(const char *name,
&imx_ccm_lock);
}
-static inline struct clk *imx_clk_fixed_factor(const char *name,
- const char *parent, unsigned int mult, unsigned int div)
-{
- return clk_register_fixed_factor(NULL, name, parent,
- CLK_SET_RATE_PARENT, mult, div);
-}
-
struct clk *imx_clk_cpu(const char *name, const char *parent_name,
struct clk *div, struct clk *mux, struct clk *pll,
struct clk *step);
diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig
new file mode 100644
index 000000000..19480bcc7
--- /dev/null
+++ b/drivers/clk/meson/Kconfig
@@ -0,0 +1,19 @@
+config COMMON_CLK_AMLOGIC
+ bool
+ depends on OF
+ depends on ARCH_MESON || COMPILE_TEST
+
+config COMMON_CLK_MESON8B
+ bool
+ depends on COMMON_CLK_AMLOGIC
+ help
+ Support for the clock controller on AmLogic S805 devices, aka
+ meson8b. Say Y if you want peripherals and CPU frequency scaling to
+ work.
+
+config COMMON_CLK_GXBB
+ bool
+ depends on COMMON_CLK_AMLOGIC
+ help
+ Support for the clock controller on AmLogic S905 devices, aka gxbb.
+ Say Y if you want peripherals and CPU frequency scaling to work.
diff --git a/drivers/clk/meson/Makefile b/drivers/clk/meson/Makefile
index 6d45531df..197e40175 100644
--- a/drivers/clk/meson/Makefile
+++ b/drivers/clk/meson/Makefile
@@ -2,5 +2,6 @@
# Makefile for Meson specific clk
#
-obj-y += clkc.o clk-pll.o clk-cpu.o
-obj-y += meson8b-clkc.o
+obj-$(CONFIG_COMMON_CLK_AMLOGIC) += clk-pll.o clk-cpu.o clk-mpll.o
+obj-$(CONFIG_COMMON_CLK_MESON8B) += meson8b-clkc.o
+obj-$(CONFIG_COMMON_CLK_GXBB) += gxbb.o
diff --git a/drivers/clk/meson/clk-cpu.c b/drivers/clk/meson/clk-cpu.c
index f7c30ea54..f8b2b7efd 100644
--- a/drivers/clk/meson/clk-cpu.c
+++ b/drivers/clk/meson/clk-cpu.c
@@ -51,13 +51,6 @@
#include "clkc.h"
-struct meson_clk_cpu {
- struct notifier_block clk_nb;
- const struct clk_div_table *div_table;
- struct clk_hw hw;
- void __iomem *base;
- u16 reg_off;
-};
#define to_meson_clk_cpu_hw(_hw) container_of(_hw, struct meson_clk_cpu, hw)
#define to_meson_clk_cpu_nb(_nb) container_of(_nb, struct meson_clk_cpu, clk_nb)
@@ -119,6 +112,7 @@ static unsigned long meson_clk_cpu_recalc_rate(struct clk_hw *hw,
return parent_rate / div;
}
+/* FIXME MUX1 & MUX2 should be struct clk_hw objects */
static int meson_clk_cpu_pre_rate_change(struct meson_clk_cpu *clk_cpu,
struct clk_notifier_data *ndata)
{
@@ -140,6 +134,7 @@ static int meson_clk_cpu_pre_rate_change(struct meson_clk_cpu *clk_cpu,
return 0;
}
+/* FIXME MUX1 & MUX2 should be struct clk_hw objects */
static int meson_clk_cpu_post_rate_change(struct meson_clk_cpu *clk_cpu,
struct clk_notifier_data *ndata)
{
@@ -161,7 +156,7 @@ static int meson_clk_cpu_post_rate_change(struct meson_clk_cpu *clk_cpu,
* PLL clock is to be changed. We use the xtal input as temporary parent
* while the PLL frequency is stabilized.
*/
-static int meson_clk_cpu_notifier_cb(struct notifier_block *nb,
+int meson_clk_cpu_notifier_cb(struct notifier_block *nb,
unsigned long event, void *data)
{
struct clk_notifier_data *ndata = data;
@@ -176,68 +171,8 @@ static int meson_clk_cpu_notifier_cb(struct notifier_block *nb,
return notifier_from_errno(ret);
}
-static const struct clk_ops meson_clk_cpu_ops = {
+const struct clk_ops meson_clk_cpu_ops = {
.recalc_rate = meson_clk_cpu_recalc_rate,
.round_rate = meson_clk_cpu_round_rate,
.set_rate = meson_clk_cpu_set_rate,
};
-
-struct clk *meson_clk_register_cpu(const struct clk_conf *clk_conf,
- void __iomem *reg_base,
- spinlock_t *lock)
-{
- struct clk *clk;
- struct clk *pclk;
- struct meson_clk_cpu *clk_cpu;
- struct clk_init_data init;
- int ret;
-
- clk_cpu = kzalloc(sizeof(*clk_cpu), GFP_KERNEL);
- if (!clk_cpu)
- return ERR_PTR(-ENOMEM);
-
- clk_cpu->base = reg_base;
- clk_cpu->reg_off = clk_conf->reg_off;
- clk_cpu->div_table = clk_conf->conf.div_table;
- clk_cpu->clk_nb.notifier_call = meson_clk_cpu_notifier_cb;
-
- init.name = clk_conf->clk_name;
- init.ops = &meson_clk_cpu_ops;
- init.flags = clk_conf->flags | CLK_GET_RATE_NOCACHE;
- init.flags |= CLK_SET_RATE_PARENT;
- init.parent_names = clk_conf->clks_parent;
- init.num_parents = 1;
-
- clk_cpu->hw.init = &init;
-
- pclk = __clk_lookup(clk_conf->clks_parent[0]);
- if (!pclk) {
- pr_err("%s: could not lookup parent clock %s\n",
- __func__, clk_conf->clks_parent[0]);
- ret = -EINVAL;
- goto free_clk;
- }
-
- ret = clk_notifier_register(pclk, &clk_cpu->clk_nb);
- if (ret) {
- pr_err("%s: failed to register clock notifier for %s\n",
- __func__, clk_conf->clk_name);
- goto free_clk;
- }
-
- clk = clk_register(NULL, &clk_cpu->hw);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- goto unregister_clk_nb;
- }
-
- return clk;
-
-unregister_clk_nb:
- clk_notifier_unregister(pclk, &clk_cpu->clk_nb);
-free_clk:
- kfree(clk_cpu);
-
- return ERR_PTR(ret);
-}
-
diff --git a/drivers/clk/meson/clk-mpll.c b/drivers/clk/meson/clk-mpll.c
new file mode 100644
index 000000000..03af79005
--- /dev/null
+++ b/drivers/clk/meson/clk-mpll.c
@@ -0,0 +1,94 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright (c) 2016 AmLogic, Inc.
+ * Author: Michael Turquette <mturquette@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING
+ *
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 AmLogic, Inc.
+ * Author: Michael Turquette <mturquette@baylibre.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * MultiPhase Locked Loops are outputs from a PLL with additional frequency
+ * scaling capabilities. MPLL rates are calculated as:
+ *
+ * f(N2_integer, SDM_IN ) = 2.0G/(N2_integer + SDM_IN/16384)
+ */
+
+#include <linux/clk-provider.h>
+#include "clkc.h"
+
+#define SDM_MAX 16384
+
+#define to_meson_clk_mpll(_hw) container_of(_hw, struct meson_clk_mpll, hw)
+
+static unsigned long mpll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct meson_clk_mpll *mpll = to_meson_clk_mpll(hw);
+ struct parm *p;
+ unsigned long rate = 0;
+ unsigned long reg, sdm, n2;
+
+ p = &mpll->sdm;
+ reg = readl(mpll->base + p->reg_off);
+ sdm = PARM_GET(p->width, p->shift, reg);
+
+ p = &mpll->n2;
+ reg = readl(mpll->base + p->reg_off);
+ n2 = PARM_GET(p->width, p->shift, reg);
+
+ rate = (parent_rate * SDM_MAX) / ((SDM_MAX * n2) + sdm);
+
+ return rate;
+}
+
+const struct clk_ops meson_clk_mpll_ro_ops = {
+ .recalc_rate = mpll_recalc_rate,
+};
diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c
index 664edf070..4adc1e892 100644
--- a/drivers/clk/meson/clk-pll.c
+++ b/drivers/clk/meson/clk-pll.c
@@ -44,13 +44,6 @@
#define MESON_PLL_RESET BIT(29)
#define MESON_PLL_LOCK BIT(31)
-struct meson_clk_pll {
- struct clk_hw hw;
- void __iomem *base;
- struct pll_conf *conf;
- unsigned int rate_count;
- spinlock_t *lock;
-};
#define to_meson_clk_pll(_hw) container_of(_hw, struct meson_clk_pll, hw)
static unsigned long meson_clk_pll_recalc_rate(struct clk_hw *hw,
@@ -60,22 +53,36 @@ static unsigned long meson_clk_pll_recalc_rate(struct clk_hw *hw,
struct parm *p;
unsigned long parent_rate_mhz = parent_rate / 1000000;
unsigned long rate_mhz;
- u16 n, m, od;
+ u16 n, m, frac = 0, od, od2 = 0;
u32 reg;
- p = &pll->conf->n;
+ p = &pll->n;
reg = readl(pll->base + p->reg_off);
n = PARM_GET(p->width, p->shift, reg);
- p = &pll->conf->m;
+ p = &pll->m;
reg = readl(pll->base + p->reg_off);
m = PARM_GET(p->width, p->shift, reg);
- p = &pll->conf->od;
+ p = &pll->od;
reg = readl(pll->base + p->reg_off);
od = PARM_GET(p->width, p->shift, reg);
- rate_mhz = (parent_rate_mhz * m / n) >> od;
+ p = &pll->od2;
+ if (p->width) {
+ reg = readl(pll->base + p->reg_off);
+ od2 = PARM_GET(p->width, p->shift, reg);
+ }
+
+ p = &pll->frac;
+ if (p->width) {
+ reg = readl(pll->base + p->reg_off);
+ frac = PARM_GET(p->width, p->shift, reg);
+ rate_mhz = (parent_rate_mhz * m + \
+ (parent_rate_mhz * frac >> 12)) * 2 / n;
+ rate_mhz = rate_mhz >> od >> od2;
+ } else
+ rate_mhz = (parent_rate_mhz * m / n) >> od >> od2;
return rate_mhz * 1000000;
}
@@ -84,7 +91,7 @@ static long meson_clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
struct meson_clk_pll *pll = to_meson_clk_pll(hw);
- const struct pll_rate_table *rate_table = pll->conf->rate_table;
+ const struct pll_rate_table *rate_table = pll->rate_table;
int i;
for (i = 0; i < pll->rate_count; i++) {
@@ -99,7 +106,7 @@ static long meson_clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
static const struct pll_rate_table *meson_clk_get_pll_settings(struct meson_clk_pll *pll,
unsigned long rate)
{
- const struct pll_rate_table *rate_table = pll->conf->rate_table;
+ const struct pll_rate_table *rate_table = pll->rate_table;
int i;
for (i = 0; i < pll->rate_count; i++) {
@@ -145,24 +152,38 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
return -EINVAL;
/* PLL reset */
- p = &pll->conf->n;
+ p = &pll->n;
reg = readl(pll->base + p->reg_off);
writel(reg | MESON_PLL_RESET, pll->base + p->reg_off);
reg = PARM_SET(p->width, p->shift, reg, rate_set->n);
writel(reg, pll->base + p->reg_off);
- p = &pll->conf->m;
+ p = &pll->m;
reg = readl(pll->base + p->reg_off);
reg = PARM_SET(p->width, p->shift, reg, rate_set->m);
writel(reg, pll->base + p->reg_off);
- p = &pll->conf->od;
+ p = &pll->od;
reg = readl(pll->base + p->reg_off);
reg = PARM_SET(p->width, p->shift, reg, rate_set->od);
writel(reg, pll->base + p->reg_off);
- p = &pll->conf->n;
+ p = &pll->od2;
+ if (p->width) {
+ reg = readl(pll->base + p->reg_off);
+ reg = PARM_SET(p->width, p->shift, reg, rate_set->od2);
+ writel(reg, pll->base + p->reg_off);
+ }
+
+ p = &pll->frac;
+ if (p->width) {
+ reg = readl(pll->base + p->reg_off);
+ reg = PARM_SET(p->width, p->shift, reg, rate_set->frac);
+ writel(reg, pll->base + p->reg_off);
+ }
+
+ p = &pll->n;
ret = meson_clk_pll_wait_lock(pll, p);
if (ret) {
pr_warn("%s: pll did not lock, trying to restore old rate %lu\n",
@@ -173,55 +194,12 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
return ret;
}
-static const struct clk_ops meson_clk_pll_ops = {
+const struct clk_ops meson_clk_pll_ops = {
.recalc_rate = meson_clk_pll_recalc_rate,
.round_rate = meson_clk_pll_round_rate,
.set_rate = meson_clk_pll_set_rate,
};
-static const struct clk_ops meson_clk_pll_ro_ops = {
+const struct clk_ops meson_clk_pll_ro_ops = {
.recalc_rate = meson_clk_pll_recalc_rate,
};
-
-struct clk *meson_clk_register_pll(const struct clk_conf *clk_conf,
- void __iomem *reg_base,
- spinlock_t *lock)
-{
- struct clk *clk;
- struct meson_clk_pll *clk_pll;
- struct clk_init_data init;
-
- clk_pll = kzalloc(sizeof(*clk_pll), GFP_KERNEL);
- if (!clk_pll)
- return ERR_PTR(-ENOMEM);
-
- clk_pll->base = reg_base + clk_conf->reg_off;
- clk_pll->lock = lock;
- clk_pll->conf = clk_conf->conf.pll;
-
- init.name = clk_conf->clk_name;
- init.flags = clk_conf->flags | CLK_GET_RATE_NOCACHE;
-
- init.parent_names = &clk_conf->clks_parent[0];
- init.num_parents = 1;
- init.ops = &meson_clk_pll_ro_ops;
-
- /* If no rate_table is specified we assume the PLL is read-only */
- if (clk_pll->conf->rate_table) {
- int len;
-
- for (len = 0; clk_pll->conf->rate_table[len].rate != 0; )
- len++;
-
- clk_pll->rate_count = len;
- init.ops = &meson_clk_pll_ops;
- }
-
- clk_pll->hw.init = &init;
-
- clk = clk_register(NULL, &clk_pll->hw);
- if (IS_ERR(clk))
- kfree(clk_pll);
-
- return clk;
-}
diff --git a/drivers/clk/meson/clkc.h b/drivers/clk/meson/clkc.h
index 609ae92cc..53326c32e 100644
--- a/drivers/clk/meson/clkc.h
+++ b/drivers/clk/meson/clkc.h
@@ -34,19 +34,16 @@ struct parm {
u8 shift;
u8 width;
};
-#define PARM(_r, _s, _w) \
- { \
- .reg_off = (_r), \
- .shift = (_s), \
- .width = (_w), \
- } \
struct pll_rate_table {
unsigned long rate;
u16 m;
u16 n;
u16 od;
+ u16 od2;
+ u16 frac;
};
+
#define PLL_RATE(_r, _m, _n, _od) \
{ \
.rate = (_r), \
@@ -55,133 +52,69 @@ struct pll_rate_table {
.od = (_od), \
} \
-struct pll_conf {
- const struct pll_rate_table *rate_table;
- struct parm m;
- struct parm n;
- struct parm od;
-};
+#define PLL_FRAC_RATE(_r, _m, _n, _od, _od2, _frac) \
+ { \
+ .rate = (_r), \
+ .m = (_m), \
+ .n = (_n), \
+ .od = (_od), \
+ .od2 = (_od2), \
+ .frac = (_frac), \
+ } \
-struct fixed_fact_conf {
- unsigned int div;
- unsigned int mult;
- struct parm div_parm;
- struct parm mult_parm;
+struct meson_clk_pll {
+ struct clk_hw hw;
+ void __iomem *base;
+ struct parm m;
+ struct parm n;
+ struct parm frac;
+ struct parm od;
+ struct parm od2;
+ const struct pll_rate_table *rate_table;
+ unsigned int rate_count;
+ spinlock_t *lock;
};
-struct fixed_rate_conf {
- unsigned long rate;
- struct parm rate_parm;
-};
+#define to_meson_clk_pll(_hw) container_of(_hw, struct meson_clk_pll, hw)
-struct composite_conf {
- struct parm mux_parm;
- struct parm div_parm;
- struct parm gate_parm;
- struct clk_div_table *div_table;
- u32 *mux_table;
- u8 mux_flags;
- u8 div_flags;
- u8 gate_flags;
+struct meson_clk_cpu {
+ struct clk_hw hw;
+ void __iomem *base;
+ u16 reg_off;
+ struct notifier_block clk_nb;
+ const struct clk_div_table *div_table;
};
-#define PNAME(x) static const char *x[]
+int meson_clk_cpu_notifier_cb(struct notifier_block *nb, unsigned long event,
+ void *data);
-enum clk_type {
- CLK_FIXED_FACTOR,
- CLK_FIXED_RATE,
- CLK_COMPOSITE,
- CLK_CPU,
- CLK_PLL,
+struct meson_clk_mpll {
+ struct clk_hw hw;
+ void __iomem *base;
+ struct parm sdm;
+ struct parm n2;
+ /* FIXME ssen gate control? */
+ spinlock_t *lock;
};
-struct clk_conf {
- u16 reg_off;
- enum clk_type clk_type;
- unsigned int clk_id;
- const char *clk_name;
- const char **clks_parent;
- int num_parents;
- unsigned long flags;
- union {
- struct fixed_fact_conf fixed_fact;
- struct fixed_rate_conf fixed_rate;
- const struct composite_conf *composite;
- struct pll_conf *pll;
- const struct clk_div_table *div_table;
- } conf;
+#define MESON_GATE(_name, _reg, _bit) \
+struct clk_gate gxbb_##_name = { \
+ .reg = (void __iomem *) _reg, \
+ .bit_idx = (_bit), \
+ .lock = &clk_lock, \
+ .hw.init = &(struct clk_init_data) { \
+ .name = #_name, \
+ .ops = &clk_gate_ops, \
+ .parent_names = (const char *[]){ "clk81" }, \
+ .num_parents = 1, \
+ .flags = (CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED), \
+ }, \
};
-#define FIXED_RATE_P(_ro, _ci, _cn, _f, _c) \
- { \
- .reg_off = (_ro), \
- .clk_type = CLK_FIXED_RATE, \
- .clk_id = (_ci), \
- .clk_name = (_cn), \
- .flags = (_f), \
- .conf.fixed_rate.rate_parm = _c, \
- } \
-
-#define FIXED_RATE(_ci, _cn, _f, _r) \
- { \
- .clk_type = CLK_FIXED_RATE, \
- .clk_id = (_ci), \
- .clk_name = (_cn), \
- .flags = (_f), \
- .conf.fixed_rate.rate = (_r), \
- } \
-
-#define PLL(_ro, _ci, _cn, _cp, _f, _c) \
- { \
- .reg_off = (_ro), \
- .clk_type = CLK_PLL, \
- .clk_id = (_ci), \
- .clk_name = (_cn), \
- .clks_parent = (_cp), \
- .num_parents = ARRAY_SIZE(_cp), \
- .flags = (_f), \
- .conf.pll = (_c), \
- } \
-
-#define FIXED_FACTOR_DIV(_ci, _cn, _cp, _f, _d) \
- { \
- .clk_type = CLK_FIXED_FACTOR, \
- .clk_id = (_ci), \
- .clk_name = (_cn), \
- .clks_parent = (_cp), \
- .num_parents = ARRAY_SIZE(_cp), \
- .conf.fixed_fact.div = (_d), \
- } \
-
-#define CPU(_ro, _ci, _cn, _cp, _dt) \
- { \
- .reg_off = (_ro), \
- .clk_type = CLK_CPU, \
- .clk_id = (_ci), \
- .clk_name = (_cn), \
- .clks_parent = (_cp), \
- .num_parents = ARRAY_SIZE(_cp), \
- .conf.div_table = (_dt), \
- } \
-
-#define COMPOSITE(_ro, _ci, _cn, _cp, _f, _c) \
- { \
- .reg_off = (_ro), \
- .clk_type = CLK_COMPOSITE, \
- .clk_id = (_ci), \
- .clk_name = (_cn), \
- .clks_parent = (_cp), \
- .num_parents = ARRAY_SIZE(_cp), \
- .flags = (_f), \
- .conf.composite = (_c), \
- } \
-
-struct clk **meson_clk_init(struct device_node *np, unsigned long nr_clks);
-void meson_clk_register_clks(const struct clk_conf *clk_confs,
- unsigned int nr_confs, void __iomem *clk_base);
-struct clk *meson_clk_register_cpu(const struct clk_conf *clk_conf,
- void __iomem *reg_base, spinlock_t *lock);
-struct clk *meson_clk_register_pll(const struct clk_conf *clk_conf,
- void __iomem *reg_base, spinlock_t *lock);
+/* clk_ops */
+extern const struct clk_ops meson_clk_pll_ro_ops;
+extern const struct clk_ops meson_clk_pll_ops;
+extern const struct clk_ops meson_clk_cpu_ops;
+extern const struct clk_ops meson_clk_mpll_ro_ops;
#endif /* __CLKC_H */
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
new file mode 100644
index 000000000..a4c6684b3
--- /dev/null
+++ b/drivers/clk/meson/gxbb.c
@@ -0,0 +1,944 @@
+/*
+ * AmLogic S905 / GXBB Clock Controller Driver
+ *
+ * Copyright (c) 2016 AmLogic, Inc.
+ * Michael Turquette <mturquette@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+
+#include "clkc.h"
+#include "gxbb.h"
+
+static DEFINE_SPINLOCK(clk_lock);
+
+static const struct pll_rate_table sys_pll_rate_table[] = {
+ PLL_RATE(24000000, 56, 1, 2),
+ PLL_RATE(48000000, 64, 1, 2),
+ PLL_RATE(72000000, 72, 1, 2),
+ PLL_RATE(96000000, 64, 1, 2),
+ PLL_RATE(120000000, 80, 1, 2),
+ PLL_RATE(144000000, 96, 1, 2),
+ PLL_RATE(168000000, 56, 1, 1),
+ PLL_RATE(192000000, 64, 1, 1),
+ PLL_RATE(216000000, 72, 1, 1),
+ PLL_RATE(240000000, 80, 1, 1),
+ PLL_RATE(264000000, 88, 1, 1),
+ PLL_RATE(288000000, 96, 1, 1),
+ PLL_RATE(312000000, 52, 1, 2),
+ PLL_RATE(336000000, 56, 1, 2),
+ PLL_RATE(360000000, 60, 1, 2),
+ PLL_RATE(384000000, 64, 1, 2),
+ PLL_RATE(408000000, 68, 1, 2),
+ PLL_RATE(432000000, 72, 1, 2),
+ PLL_RATE(456000000, 76, 1, 2),
+ PLL_RATE(480000000, 80, 1, 2),
+ PLL_RATE(504000000, 84, 1, 2),
+ PLL_RATE(528000000, 88, 1, 2),
+ PLL_RATE(552000000, 92, 1, 2),
+ PLL_RATE(576000000, 96, 1, 2),
+ PLL_RATE(600000000, 50, 1, 1),
+ PLL_RATE(624000000, 52, 1, 1),
+ PLL_RATE(648000000, 54, 1, 1),
+ PLL_RATE(672000000, 56, 1, 1),
+ PLL_RATE(696000000, 58, 1, 1),
+ PLL_RATE(720000000, 60, 1, 1),
+ PLL_RATE(744000000, 62, 1, 1),
+ PLL_RATE(768000000, 64, 1, 1),
+ PLL_RATE(792000000, 66, 1, 1),
+ PLL_RATE(816000000, 68, 1, 1),
+ PLL_RATE(840000000, 70, 1, 1),
+ PLL_RATE(864000000, 72, 1, 1),
+ PLL_RATE(888000000, 74, 1, 1),
+ PLL_RATE(912000000, 76, 1, 1),
+ PLL_RATE(936000000, 78, 1, 1),
+ PLL_RATE(960000000, 80, 1, 1),
+ PLL_RATE(984000000, 82, 1, 1),
+ PLL_RATE(1008000000, 84, 1, 1),
+ PLL_RATE(1032000000, 86, 1, 1),
+ PLL_RATE(1056000000, 88, 1, 1),
+ PLL_RATE(1080000000, 90, 1, 1),
+ PLL_RATE(1104000000, 92, 1, 1),
+ PLL_RATE(1128000000, 94, 1, 1),
+ PLL_RATE(1152000000, 96, 1, 1),
+ PLL_RATE(1176000000, 98, 1, 1),
+ PLL_RATE(1200000000, 50, 1, 0),
+ PLL_RATE(1224000000, 51, 1, 0),
+ PLL_RATE(1248000000, 52, 1, 0),
+ PLL_RATE(1272000000, 53, 1, 0),
+ PLL_RATE(1296000000, 54, 1, 0),
+ PLL_RATE(1320000000, 55, 1, 0),
+ PLL_RATE(1344000000, 56, 1, 0),
+ PLL_RATE(1368000000, 57, 1, 0),
+ PLL_RATE(1392000000, 58, 1, 0),
+ PLL_RATE(1416000000, 59, 1, 0),
+ PLL_RATE(1440000000, 60, 1, 0),
+ PLL_RATE(1464000000, 61, 1, 0),
+ PLL_RATE(1488000000, 62, 1, 0),
+ PLL_RATE(1512000000, 63, 1, 0),
+ PLL_RATE(1536000000, 64, 1, 0),
+ PLL_RATE(1560000000, 65, 1, 0),
+ PLL_RATE(1584000000, 66, 1, 0),
+ PLL_RATE(1608000000, 67, 1, 0),
+ PLL_RATE(1632000000, 68, 1, 0),
+ PLL_RATE(1656000000, 68, 1, 0),
+ PLL_RATE(1680000000, 68, 1, 0),
+ PLL_RATE(1704000000, 68, 1, 0),
+ PLL_RATE(1728000000, 69, 1, 0),
+ PLL_RATE(1752000000, 69, 1, 0),
+ PLL_RATE(1776000000, 69, 1, 0),
+ PLL_RATE(1800000000, 69, 1, 0),
+ PLL_RATE(1824000000, 70, 1, 0),
+ PLL_RATE(1848000000, 70, 1, 0),
+ PLL_RATE(1872000000, 70, 1, 0),
+ PLL_RATE(1896000000, 70, 1, 0),
+ PLL_RATE(1920000000, 71, 1, 0),
+ PLL_RATE(1944000000, 71, 1, 0),
+ PLL_RATE(1968000000, 71, 1, 0),
+ PLL_RATE(1992000000, 71, 1, 0),
+ PLL_RATE(2016000000, 72, 1, 0),
+ PLL_RATE(2040000000, 72, 1, 0),
+ PLL_RATE(2064000000, 72, 1, 0),
+ PLL_RATE(2088000000, 72, 1, 0),
+ PLL_RATE(2112000000, 73, 1, 0),
+ { /* sentinel */ },
+};
+
+static const struct pll_rate_table gp0_pll_rate_table[] = {
+ PLL_RATE(96000000, 32, 1, 3),
+ PLL_RATE(99000000, 33, 1, 3),
+ PLL_RATE(102000000, 34, 1, 3),
+ PLL_RATE(105000000, 35, 1, 3),
+ PLL_RATE(108000000, 36, 1, 3),
+ PLL_RATE(111000000, 37, 1, 3),
+ PLL_RATE(114000000, 38, 1, 3),
+ PLL_RATE(117000000, 39, 1, 3),
+ PLL_RATE(120000000, 40, 1, 3),
+ PLL_RATE(123000000, 41, 1, 3),
+ PLL_RATE(126000000, 42, 1, 3),
+ PLL_RATE(129000000, 43, 1, 3),
+ PLL_RATE(132000000, 44, 1, 3),
+ PLL_RATE(135000000, 45, 1, 3),
+ PLL_RATE(138000000, 46, 1, 3),
+ PLL_RATE(141000000, 47, 1, 3),
+ PLL_RATE(144000000, 48, 1, 3),
+ PLL_RATE(147000000, 49, 1, 3),
+ PLL_RATE(150000000, 50, 1, 3),
+ PLL_RATE(153000000, 51, 1, 3),
+ PLL_RATE(156000000, 52, 1, 3),
+ PLL_RATE(159000000, 53, 1, 3),
+ PLL_RATE(162000000, 54, 1, 3),
+ PLL_RATE(165000000, 55, 1, 3),
+ PLL_RATE(168000000, 56, 1, 3),
+ PLL_RATE(171000000, 57, 1, 3),
+ PLL_RATE(174000000, 58, 1, 3),
+ PLL_RATE(177000000, 59, 1, 3),
+ PLL_RATE(180000000, 60, 1, 3),
+ PLL_RATE(183000000, 61, 1, 3),
+ PLL_RATE(186000000, 62, 1, 3),
+ PLL_RATE(192000000, 32, 1, 2),
+ PLL_RATE(198000000, 33, 1, 2),
+ PLL_RATE(204000000, 34, 1, 2),
+ PLL_RATE(210000000, 35, 1, 2),
+ PLL_RATE(216000000, 36, 1, 2),
+ PLL_RATE(222000000, 37, 1, 2),
+ PLL_RATE(228000000, 38, 1, 2),
+ PLL_RATE(234000000, 39, 1, 2),
+ PLL_RATE(240000000, 40, 1, 2),
+ PLL_RATE(246000000, 41, 1, 2),
+ PLL_RATE(252000000, 42, 1, 2),
+ PLL_RATE(258000000, 43, 1, 2),
+ PLL_RATE(264000000, 44, 1, 2),
+ PLL_RATE(270000000, 45, 1, 2),
+ PLL_RATE(276000000, 46, 1, 2),
+ PLL_RATE(282000000, 47, 1, 2),
+ PLL_RATE(288000000, 48, 1, 2),
+ PLL_RATE(294000000, 49, 1, 2),
+ PLL_RATE(300000000, 50, 1, 2),
+ PLL_RATE(306000000, 51, 1, 2),
+ PLL_RATE(312000000, 52, 1, 2),
+ PLL_RATE(318000000, 53, 1, 2),
+ PLL_RATE(324000000, 54, 1, 2),
+ PLL_RATE(330000000, 55, 1, 2),
+ PLL_RATE(336000000, 56, 1, 2),
+ PLL_RATE(342000000, 57, 1, 2),
+ PLL_RATE(348000000, 58, 1, 2),
+ PLL_RATE(354000000, 59, 1, 2),
+ PLL_RATE(360000000, 60, 1, 2),
+ PLL_RATE(366000000, 61, 1, 2),
+ PLL_RATE(372000000, 62, 1, 2),
+ PLL_RATE(384000000, 32, 1, 1),
+ PLL_RATE(396000000, 33, 1, 1),
+ PLL_RATE(408000000, 34, 1, 1),
+ PLL_RATE(420000000, 35, 1, 1),
+ PLL_RATE(432000000, 36, 1, 1),
+ PLL_RATE(444000000, 37, 1, 1),
+ PLL_RATE(456000000, 38, 1, 1),
+ PLL_RATE(468000000, 39, 1, 1),
+ PLL_RATE(480000000, 40, 1, 1),
+ PLL_RATE(492000000, 41, 1, 1),
+ PLL_RATE(504000000, 42, 1, 1),
+ PLL_RATE(516000000, 43, 1, 1),
+ PLL_RATE(528000000, 44, 1, 1),
+ PLL_RATE(540000000, 45, 1, 1),
+ PLL_RATE(552000000, 46, 1, 1),
+ PLL_RATE(564000000, 47, 1, 1),
+ PLL_RATE(576000000, 48, 1, 1),
+ PLL_RATE(588000000, 49, 1, 1),
+ PLL_RATE(600000000, 50, 1, 1),
+ PLL_RATE(612000000, 51, 1, 1),
+ PLL_RATE(624000000, 52, 1, 1),
+ PLL_RATE(636000000, 53, 1, 1),
+ PLL_RATE(648000000, 54, 1, 1),
+ PLL_RATE(660000000, 55, 1, 1),
+ PLL_RATE(672000000, 56, 1, 1),
+ PLL_RATE(684000000, 57, 1, 1),
+ PLL_RATE(696000000, 58, 1, 1),
+ PLL_RATE(708000000, 59, 1, 1),
+ PLL_RATE(720000000, 60, 1, 1),
+ PLL_RATE(732000000, 61, 1, 1),
+ PLL_RATE(744000000, 62, 1, 1),
+ PLL_RATE(768000000, 32, 1, 0),
+ PLL_RATE(792000000, 33, 1, 0),
+ PLL_RATE(816000000, 34, 1, 0),
+ PLL_RATE(840000000, 35, 1, 0),
+ PLL_RATE(864000000, 36, 1, 0),
+ PLL_RATE(888000000, 37, 1, 0),
+ PLL_RATE(912000000, 38, 1, 0),
+ PLL_RATE(936000000, 39, 1, 0),
+ PLL_RATE(960000000, 40, 1, 0),
+ PLL_RATE(984000000, 41, 1, 0),
+ PLL_RATE(1008000000, 42, 1, 0),
+ PLL_RATE(1032000000, 43, 1, 0),
+ PLL_RATE(1056000000, 44, 1, 0),
+ PLL_RATE(1080000000, 45, 1, 0),
+ PLL_RATE(1104000000, 46, 1, 0),
+ PLL_RATE(1128000000, 47, 1, 0),
+ PLL_RATE(1152000000, 48, 1, 0),
+ PLL_RATE(1176000000, 49, 1, 0),
+ PLL_RATE(1200000000, 50, 1, 0),
+ PLL_RATE(1224000000, 51, 1, 0),
+ PLL_RATE(1248000000, 52, 1, 0),
+ PLL_RATE(1272000000, 53, 1, 0),
+ PLL_RATE(1296000000, 54, 1, 0),
+ PLL_RATE(1320000000, 55, 1, 0),
+ PLL_RATE(1344000000, 56, 1, 0),
+ PLL_RATE(1368000000, 57, 1, 0),
+ PLL_RATE(1392000000, 58, 1, 0),
+ PLL_RATE(1416000000, 59, 1, 0),
+ PLL_RATE(1440000000, 60, 1, 0),
+ PLL_RATE(1464000000, 61, 1, 0),
+ PLL_RATE(1488000000, 62, 1, 0),
+ { /* sentinel */ },
+};
+
+static const struct clk_div_table cpu_div_table[] = {
+ { .val = 1, .div = 1 },
+ { .val = 2, .div = 2 },
+ { .val = 3, .div = 3 },
+ { .val = 2, .div = 4 },
+ { .val = 3, .div = 6 },
+ { .val = 4, .div = 8 },
+ { .val = 5, .div = 10 },
+ { .val = 6, .div = 12 },
+ { .val = 7, .div = 14 },
+ { .val = 8, .div = 16 },
+ { /* sentinel */ },
+};
+
+static struct meson_clk_pll gxbb_fixed_pll = {
+ .m = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 0,
+ .width = 9,
+ },
+ .n = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 9,
+ .width = 5,
+ },
+ .od = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 16,
+ .width = 2,
+ },
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data){
+ .name = "fixed_pll",
+ .ops = &meson_clk_pll_ro_ops,
+ .parent_names = (const char *[]){ "xtal" },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct meson_clk_pll gxbb_hdmi_pll = {
+ .m = {
+ .reg_off = HHI_HDMI_PLL_CNTL,
+ .shift = 0,
+ .width = 9,
+ },
+ .n = {
+ .reg_off = HHI_HDMI_PLL_CNTL,
+ .shift = 9,
+ .width = 5,
+ },
+ .frac = {
+ .reg_off = HHI_HDMI_PLL_CNTL2,
+ .shift = 0,
+ .width = 12,
+ },
+ .od = {
+ .reg_off = HHI_HDMI_PLL_CNTL2,
+ .shift = 16,
+ .width = 2,
+ },
+ .od2 = {
+ .reg_off = HHI_HDMI_PLL_CNTL2,
+ .shift = 22,
+ .width = 2,
+ },
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_pll",
+ .ops = &meson_clk_pll_ro_ops,
+ .parent_names = (const char *[]){ "xtal" },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct meson_clk_pll gxbb_sys_pll = {
+ .m = {
+ .reg_off = HHI_SYS_PLL_CNTL,
+ .shift = 0,
+ .width = 9,
+ },
+ .n = {
+ .reg_off = HHI_SYS_PLL_CNTL,
+ .shift = 9,
+ .width = 5,
+ },
+ .od = {
+ .reg_off = HHI_SYS_PLL_CNTL,
+ .shift = 10,
+ .width = 2,
+ },
+ .rate_table = sys_pll_rate_table,
+ .rate_count = ARRAY_SIZE(sys_pll_rate_table),
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data){
+ .name = "sys_pll",
+ .ops = &meson_clk_pll_ro_ops,
+ .parent_names = (const char *[]){ "xtal" },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct meson_clk_pll gxbb_gp0_pll = {
+ .m = {
+ .reg_off = HHI_GP0_PLL_CNTL,
+ .shift = 0,
+ .width = 9,
+ },
+ .n = {
+ .reg_off = HHI_GP0_PLL_CNTL,
+ .shift = 9,
+ .width = 5,
+ },
+ .od = {
+ .reg_off = HHI_GP0_PLL_CNTL,
+ .shift = 16,
+ .width = 2,
+ },
+ .rate_table = gp0_pll_rate_table,
+ .rate_count = ARRAY_SIZE(gp0_pll_rate_table),
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data){
+ .name = "gp0_pll",
+ .ops = &meson_clk_pll_ops,
+ .parent_names = (const char *[]){ "xtal" },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_fixed_factor gxbb_fclk_div2 = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div2",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "fixed_pll" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor gxbb_fclk_div3 = {
+ .mult = 1,
+ .div = 3,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div3",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "fixed_pll" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor gxbb_fclk_div4 = {
+ .mult = 1,
+ .div = 4,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div4",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "fixed_pll" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor gxbb_fclk_div5 = {
+ .mult = 1,
+ .div = 5,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div5",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "fixed_pll" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor gxbb_fclk_div7 = {
+ .mult = 1,
+ .div = 7,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div7",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "fixed_pll" },
+ .num_parents = 1,
+ },
+};
+
+static struct meson_clk_mpll gxbb_mpll0 = {
+ .sdm = {
+ .reg_off = HHI_MPLL_CNTL7,
+ .shift = 0,
+ .width = 14,
+ },
+ .n2 = {
+ .reg_off = HHI_MPLL_CNTL7,
+ .shift = 16,
+ .width = 9,
+ },
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll0",
+ .ops = &meson_clk_mpll_ro_ops,
+ .parent_names = (const char *[]){ "fixed_pll" },
+ .num_parents = 1,
+ },
+};
+
+static struct meson_clk_mpll gxbb_mpll1 = {
+ .sdm = {
+ .reg_off = HHI_MPLL_CNTL8,
+ .shift = 0,
+ .width = 14,
+ },
+ .n2 = {
+ .reg_off = HHI_MPLL_CNTL8,
+ .shift = 16,
+ .width = 9,
+ },
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll1",
+ .ops = &meson_clk_mpll_ro_ops,
+ .parent_names = (const char *[]){ "fixed_pll" },
+ .num_parents = 1,
+ },
+};
+
+static struct meson_clk_mpll gxbb_mpll2 = {
+ .sdm = {
+ .reg_off = HHI_MPLL_CNTL9,
+ .shift = 0,
+ .width = 14,
+ },
+ .n2 = {
+ .reg_off = HHI_MPLL_CNTL9,
+ .shift = 16,
+ .width = 9,
+ },
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll2",
+ .ops = &meson_clk_mpll_ro_ops,
+ .parent_names = (const char *[]){ "fixed_pll" },
+ .num_parents = 1,
+ },
+};
+
+/*
+ * FIXME cpu clocks and the legacy composite clocks (e.g. clk81) are both PLL
+ * post-dividers and should be modeled with their respective PLLs via the
+ * forthcoming coordinated clock rates feature
+ */
+static struct meson_clk_cpu gxbb_cpu_clk = {
+ .reg_off = HHI_SYS_CPU_CLK_CNTL1,
+ .div_table = cpu_div_table,
+ .clk_nb.notifier_call = meson_clk_cpu_notifier_cb,
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_clk",
+ .ops = &meson_clk_cpu_ops,
+ .parent_names = (const char *[]){ "sys_pll" },
+ .num_parents = 1,
+ },
+};
+
+static u32 mux_table_clk81[] = { 6, 5, 7 };
+
+static struct clk_mux gxbb_mpeg_clk_sel = {
+ .reg = (void *)HHI_MPEG_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 12,
+ .flags = CLK_MUX_READ_ONLY,
+ .table = mux_table_clk81,
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data){
+ .name = "mpeg_clk_sel",
+ .ops = &clk_mux_ro_ops,
+ /*
+ * FIXME bits 14:12 selects from 8 possible parents:
+ * xtal, 1'b0 (wtf), fclk_div7, mpll_clkout1, mpll_clkout2,
+ * fclk_div4, fclk_div3, fclk_div5
+ */
+ .parent_names = (const char *[]){ "fclk_div3", "fclk_div4",
+ "fclk_div5" },
+ .num_parents = 3,
+ .flags = (CLK_SET_RATE_NO_REPARENT | CLK_IGNORE_UNUSED),
+ },
+};
+
+static struct clk_divider gxbb_mpeg_clk_div = {
+ .reg = (void *)HHI_MPEG_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data){
+ .name = "mpeg_clk_div",
+ .ops = &clk_divider_ops,
+ .parent_names = (const char *[]){ "mpeg_clk_sel" },
+ .num_parents = 1,
+ .flags = (CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED),
+ },
+};
+
+/* the mother of dragons^W gates */
+static struct clk_gate gxbb_clk81 = {
+ .reg = (void *)HHI_MPEG_CLK_CNTL,
+ .bit_idx = 7,
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data){
+ .name = "clk81",
+ .ops = &clk_gate_ops,
+ .parent_names = (const char *[]){ "mpeg_clk_div" },
+ .num_parents = 1,
+ .flags = (CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED | CLK_IS_CRITICAL),
+ },
+};
+
+/* Everything Else (EE) domain gates */
+static MESON_GATE(ddr, HHI_GCLK_MPEG0, 0);
+static MESON_GATE(dos, HHI_GCLK_MPEG0, 1);
+static MESON_GATE(isa, HHI_GCLK_MPEG0, 5);
+static MESON_GATE(pl301, HHI_GCLK_MPEG0, 6);
+static MESON_GATE(periphs, HHI_GCLK_MPEG0, 7);
+static MESON_GATE(spicc, HHI_GCLK_MPEG0, 8);
+static MESON_GATE(i2c, HHI_GCLK_MPEG0, 9);
+static MESON_GATE(sar_adc, HHI_GCLK_MPEG0, 10);
+static MESON_GATE(smart_card, HHI_GCLK_MPEG0, 11);
+static MESON_GATE(rng0, HHI_GCLK_MPEG0, 12);
+static MESON_GATE(uart0, HHI_GCLK_MPEG0, 13);
+static MESON_GATE(sdhc, HHI_GCLK_MPEG0, 14);
+static MESON_GATE(stream, HHI_GCLK_MPEG0, 15);
+static MESON_GATE(async_fifo, HHI_GCLK_MPEG0, 16);
+static MESON_GATE(sdio, HHI_GCLK_MPEG0, 17);
+static MESON_GATE(abuf, HHI_GCLK_MPEG0, 18);
+static MESON_GATE(hiu_iface, HHI_GCLK_MPEG0, 19);
+static MESON_GATE(assist_misc, HHI_GCLK_MPEG0, 23);
+static MESON_GATE(spi, HHI_GCLK_MPEG0, 30);
+
+static MESON_GATE(i2s_spdif, HHI_GCLK_MPEG1, 2);
+static MESON_GATE(eth, HHI_GCLK_MPEG1, 3);
+static MESON_GATE(demux, HHI_GCLK_MPEG1, 4);
+static MESON_GATE(aiu_glue, HHI_GCLK_MPEG1, 6);
+static MESON_GATE(iec958, HHI_GCLK_MPEG1, 7);
+static MESON_GATE(i2s_out, HHI_GCLK_MPEG1, 8);
+static MESON_GATE(amclk, HHI_GCLK_MPEG1, 9);
+static MESON_GATE(aififo2, HHI_GCLK_MPEG1, 10);
+static MESON_GATE(mixer, HHI_GCLK_MPEG1, 11);
+static MESON_GATE(mixer_iface, HHI_GCLK_MPEG1, 12);
+static MESON_GATE(adc, HHI_GCLK_MPEG1, 13);
+static MESON_GATE(blkmv, HHI_GCLK_MPEG1, 14);
+static MESON_GATE(aiu, HHI_GCLK_MPEG1, 15);
+static MESON_GATE(uart1, HHI_GCLK_MPEG1, 16);
+static MESON_GATE(g2d, HHI_GCLK_MPEG1, 20);
+static MESON_GATE(usb0, HHI_GCLK_MPEG1, 21);
+static MESON_GATE(usb1, HHI_GCLK_MPEG1, 22);
+static MESON_GATE(reset, HHI_GCLK_MPEG1, 23);
+static MESON_GATE(nand, HHI_GCLK_MPEG1, 24);
+static MESON_GATE(dos_parser, HHI_GCLK_MPEG1, 25);
+static MESON_GATE(usb, HHI_GCLK_MPEG1, 26);
+static MESON_GATE(vdin1, HHI_GCLK_MPEG1, 28);
+static MESON_GATE(ahb_arb0, HHI_GCLK_MPEG1, 29);
+static MESON_GATE(efuse, HHI_GCLK_MPEG1, 30);
+static MESON_GATE(boot_rom, HHI_GCLK_MPEG1, 31);
+
+static MESON_GATE(ahb_data_bus, HHI_GCLK_MPEG2, 1);
+static MESON_GATE(ahb_ctrl_bus, HHI_GCLK_MPEG2, 2);
+static MESON_GATE(hdmi_intr_sync, HHI_GCLK_MPEG2, 3);
+static MESON_GATE(hdmi_pclk, HHI_GCLK_MPEG2, 4);
+static MESON_GATE(usb1_ddr_bridge, HHI_GCLK_MPEG2, 8);
+static MESON_GATE(usb0_ddr_bridge, HHI_GCLK_MPEG2, 9);
+static MESON_GATE(mmc_pclk, HHI_GCLK_MPEG2, 11);
+static MESON_GATE(dvin, HHI_GCLK_MPEG2, 12);
+static MESON_GATE(uart2, HHI_GCLK_MPEG2, 15);
+static MESON_GATE(sana, HHI_GCLK_MPEG2, 22);
+static MESON_GATE(vpu_intr, HHI_GCLK_MPEG2, 25);
+static MESON_GATE(sec_ahb_ahb3_bridge, HHI_GCLK_MPEG2, 26);
+static MESON_GATE(clk81_a53, HHI_GCLK_MPEG2, 29);
+
+static MESON_GATE(vclk2_venci0, HHI_GCLK_OTHER, 1);
+static MESON_GATE(vclk2_venci1, HHI_GCLK_OTHER, 2);
+static MESON_GATE(vclk2_vencp0, HHI_GCLK_OTHER, 3);
+static MESON_GATE(vclk2_vencp1, HHI_GCLK_OTHER, 4);
+static MESON_GATE(gclk_venci_int0, HHI_GCLK_OTHER, 8);
+static MESON_GATE(gclk_vencp_int, HHI_GCLK_OTHER, 9);
+static MESON_GATE(dac_clk, HHI_GCLK_OTHER, 10);
+static MESON_GATE(aoclk_gate, HHI_GCLK_OTHER, 14);
+static MESON_GATE(iec958_gate, HHI_GCLK_OTHER, 16);
+static MESON_GATE(enc480p, HHI_GCLK_OTHER, 20);
+static MESON_GATE(rng1, HHI_GCLK_OTHER, 21);
+static MESON_GATE(gclk_venci_int1, HHI_GCLK_OTHER, 22);
+static MESON_GATE(vclk2_venclmcc, HHI_GCLK_OTHER, 24);
+static MESON_GATE(vclk2_vencl, HHI_GCLK_OTHER, 25);
+static MESON_GATE(vclk_other, HHI_GCLK_OTHER, 26);
+static MESON_GATE(edp, HHI_GCLK_OTHER, 31);
+
+/* Always On (AO) domain gates */
+
+static MESON_GATE(ao_media_cpu, HHI_GCLK_AO, 0);
+static MESON_GATE(ao_ahb_sram, HHI_GCLK_AO, 1);
+static MESON_GATE(ao_ahb_bus, HHI_GCLK_AO, 2);
+static MESON_GATE(ao_iface, HHI_GCLK_AO, 3);
+static MESON_GATE(ao_i2c, HHI_GCLK_AO, 4);
+
+/* Array of all clocks provided by this provider */
+
+static struct clk_hw_onecell_data gxbb_hw_onecell_data = {
+ .hws = {
+ [CLKID_SYS_PLL] = &gxbb_sys_pll.hw,
+ [CLKID_CPUCLK] = &gxbb_cpu_clk.hw,
+ [CLKID_HDMI_PLL] = &gxbb_hdmi_pll.hw,
+ [CLKID_FIXED_PLL] = &gxbb_fixed_pll.hw,
+ [CLKID_FCLK_DIV2] = &gxbb_fclk_div2.hw,
+ [CLKID_FCLK_DIV3] = &gxbb_fclk_div3.hw,
+ [CLKID_FCLK_DIV4] = &gxbb_fclk_div4.hw,
+ [CLKID_FCLK_DIV5] = &gxbb_fclk_div5.hw,
+ [CLKID_FCLK_DIV7] = &gxbb_fclk_div7.hw,
+ [CLKID_GP0_PLL] = &gxbb_gp0_pll.hw,
+ [CLKID_MPEG_SEL] = &gxbb_mpeg_clk_sel.hw,
+ [CLKID_MPEG_DIV] = &gxbb_mpeg_clk_div.hw,
+ [CLKID_CLK81] = &gxbb_clk81.hw,
+ [CLKID_MPLL0] = &gxbb_mpll0.hw,
+ [CLKID_MPLL1] = &gxbb_mpll1.hw,
+ [CLKID_MPLL2] = &gxbb_mpll2.hw,
+ [CLKID_DDR] = &gxbb_ddr.hw,
+ [CLKID_DOS] = &gxbb_dos.hw,
+ [CLKID_ISA] = &gxbb_isa.hw,
+ [CLKID_PL301] = &gxbb_pl301.hw,
+ [CLKID_PERIPHS] = &gxbb_periphs.hw,
+ [CLKID_SPICC] = &gxbb_spicc.hw,
+ [CLKID_I2C] = &gxbb_i2c.hw,
+ [CLKID_SAR_ADC] = &gxbb_sar_adc.hw,
+ [CLKID_SMART_CARD] = &gxbb_smart_card.hw,
+ [CLKID_RNG0] = &gxbb_rng0.hw,
+ [CLKID_UART0] = &gxbb_uart0.hw,
+ [CLKID_SDHC] = &gxbb_sdhc.hw,
+ [CLKID_STREAM] = &gxbb_stream.hw,
+ [CLKID_ASYNC_FIFO] = &gxbb_async_fifo.hw,
+ [CLKID_SDIO] = &gxbb_sdio.hw,
+ [CLKID_ABUF] = &gxbb_abuf.hw,
+ [CLKID_HIU_IFACE] = &gxbb_hiu_iface.hw,
+ [CLKID_ASSIST_MISC] = &gxbb_assist_misc.hw,
+ [CLKID_SPI] = &gxbb_spi.hw,
+ [CLKID_I2S_SPDIF] = &gxbb_i2s_spdif.hw,
+ [CLKID_ETH] = &gxbb_eth.hw,
+ [CLKID_DEMUX] = &gxbb_demux.hw,
+ [CLKID_AIU_GLUE] = &gxbb_aiu_glue.hw,
+ [CLKID_IEC958] = &gxbb_iec958.hw,
+ [CLKID_I2S_OUT] = &gxbb_i2s_out.hw,
+ [CLKID_AMCLK] = &gxbb_amclk.hw,
+ [CLKID_AIFIFO2] = &gxbb_aififo2.hw,
+ [CLKID_MIXER] = &gxbb_mixer.hw,
+ [CLKID_MIXER_IFACE] = &gxbb_mixer_iface.hw,
+ [CLKID_ADC] = &gxbb_adc.hw,
+ [CLKID_BLKMV] = &gxbb_blkmv.hw,
+ [CLKID_AIU] = &gxbb_aiu.hw,
+ [CLKID_UART1] = &gxbb_uart1.hw,
+ [CLKID_G2D] = &gxbb_g2d.hw,
+ [CLKID_USB0] = &gxbb_usb0.hw,
+ [CLKID_USB1] = &gxbb_usb1.hw,
+ [CLKID_RESET] = &gxbb_reset.hw,
+ [CLKID_NAND] = &gxbb_nand.hw,
+ [CLKID_DOS_PARSER] = &gxbb_dos_parser.hw,
+ [CLKID_USB] = &gxbb_usb.hw,
+ [CLKID_VDIN1] = &gxbb_vdin1.hw,
+ [CLKID_AHB_ARB0] = &gxbb_ahb_arb0.hw,
+ [CLKID_EFUSE] = &gxbb_efuse.hw,
+ [CLKID_BOOT_ROM] = &gxbb_boot_rom.hw,
+ [CLKID_AHB_DATA_BUS] = &gxbb_ahb_data_bus.hw,
+ [CLKID_AHB_CTRL_BUS] = &gxbb_ahb_ctrl_bus.hw,
+ [CLKID_HDMI_INTR_SYNC] = &gxbb_hdmi_intr_sync.hw,
+ [CLKID_HDMI_PCLK] = &gxbb_hdmi_pclk.hw,
+ [CLKID_USB1_DDR_BRIDGE] = &gxbb_usb1_ddr_bridge.hw,
+ [CLKID_USB0_DDR_BRIDGE] = &gxbb_usb0_ddr_bridge.hw,
+ [CLKID_MMC_PCLK] = &gxbb_mmc_pclk.hw,
+ [CLKID_DVIN] = &gxbb_dvin.hw,
+ [CLKID_UART2] = &gxbb_uart2.hw,
+ [CLKID_SANA] = &gxbb_sana.hw,
+ [CLKID_VPU_INTR] = &gxbb_vpu_intr.hw,
+ [CLKID_SEC_AHB_AHB3_BRIDGE] = &gxbb_sec_ahb_ahb3_bridge.hw,
+ [CLKID_CLK81_A53] = &gxbb_clk81_a53.hw,
+ [CLKID_VCLK2_VENCI0] = &gxbb_vclk2_venci0.hw,
+ [CLKID_VCLK2_VENCI1] = &gxbb_vclk2_venci1.hw,
+ [CLKID_VCLK2_VENCP0] = &gxbb_vclk2_vencp0.hw,
+ [CLKID_VCLK2_VENCP1] = &gxbb_vclk2_vencp1.hw,
+ [CLKID_GCLK_VENCI_INT0] = &gxbb_gclk_venci_int0.hw,
+ [CLKID_GCLK_VENCI_INT] = &gxbb_gclk_vencp_int.hw,
+ [CLKID_DAC_CLK] = &gxbb_dac_clk.hw,
+ [CLKID_AOCLK_GATE] = &gxbb_aoclk_gate.hw,
+ [CLKID_IEC958_GATE] = &gxbb_iec958_gate.hw,
+ [CLKID_ENC480P] = &gxbb_enc480p.hw,
+ [CLKID_RNG1] = &gxbb_rng1.hw,
+ [CLKID_GCLK_VENCI_INT1] = &gxbb_gclk_venci_int1.hw,
+ [CLKID_VCLK2_VENCLMCC] = &gxbb_vclk2_venclmcc.hw,
+ [CLKID_VCLK2_VENCL] = &gxbb_vclk2_vencl.hw,
+ [CLKID_VCLK_OTHER] = &gxbb_vclk_other.hw,
+ [CLKID_EDP] = &gxbb_edp.hw,
+ [CLKID_AO_MEDIA_CPU] = &gxbb_ao_media_cpu.hw,
+ [CLKID_AO_AHB_SRAM] = &gxbb_ao_ahb_sram.hw,
+ [CLKID_AO_AHB_BUS] = &gxbb_ao_ahb_bus.hw,
+ [CLKID_AO_IFACE] = &gxbb_ao_iface.hw,
+ [CLKID_AO_I2C] = &gxbb_ao_i2c.hw,
+ },
+ .num = NR_CLKS,
+};
+
+/* Convenience tables to populate base addresses in .probe */
+
+static struct meson_clk_pll *const gxbb_clk_plls[] = {
+ &gxbb_fixed_pll,
+ &gxbb_hdmi_pll,
+ &gxbb_sys_pll,
+ &gxbb_gp0_pll,
+};
+
+static struct meson_clk_mpll *const gxbb_clk_mplls[] = {
+ &gxbb_mpll0,
+ &gxbb_mpll1,
+ &gxbb_mpll2,
+};
+
+static struct clk_gate *gxbb_clk_gates[] = {
+ &gxbb_clk81,
+ &gxbb_ddr,
+ &gxbb_dos,
+ &gxbb_isa,
+ &gxbb_pl301,
+ &gxbb_periphs,
+ &gxbb_spicc,
+ &gxbb_i2c,
+ &gxbb_sar_adc,
+ &gxbb_smart_card,
+ &gxbb_rng0,
+ &gxbb_uart0,
+ &gxbb_sdhc,
+ &gxbb_stream,
+ &gxbb_async_fifo,
+ &gxbb_sdio,
+ &gxbb_abuf,
+ &gxbb_hiu_iface,
+ &gxbb_assist_misc,
+ &gxbb_spi,
+ &gxbb_i2s_spdif,
+ &gxbb_eth,
+ &gxbb_demux,
+ &gxbb_aiu_glue,
+ &gxbb_iec958,
+ &gxbb_i2s_out,
+ &gxbb_amclk,
+ &gxbb_aififo2,
+ &gxbb_mixer,
+ &gxbb_mixer_iface,
+ &gxbb_adc,
+ &gxbb_blkmv,
+ &gxbb_aiu,
+ &gxbb_uart1,
+ &gxbb_g2d,
+ &gxbb_usb0,
+ &gxbb_usb1,
+ &gxbb_reset,
+ &gxbb_nand,
+ &gxbb_dos_parser,
+ &gxbb_usb,
+ &gxbb_vdin1,
+ &gxbb_ahb_arb0,
+ &gxbb_efuse,
+ &gxbb_boot_rom,
+ &gxbb_ahb_data_bus,
+ &gxbb_ahb_ctrl_bus,
+ &gxbb_hdmi_intr_sync,
+ &gxbb_hdmi_pclk,
+ &gxbb_usb1_ddr_bridge,
+ &gxbb_usb0_ddr_bridge,
+ &gxbb_mmc_pclk,
+ &gxbb_dvin,
+ &gxbb_uart2,
+ &gxbb_sana,
+ &gxbb_vpu_intr,
+ &gxbb_sec_ahb_ahb3_bridge,
+ &gxbb_clk81_a53,
+ &gxbb_vclk2_venci0,
+ &gxbb_vclk2_venci1,
+ &gxbb_vclk2_vencp0,
+ &gxbb_vclk2_vencp1,
+ &gxbb_gclk_venci_int0,
+ &gxbb_gclk_vencp_int,
+ &gxbb_dac_clk,
+ &gxbb_aoclk_gate,
+ &gxbb_iec958_gate,
+ &gxbb_enc480p,
+ &gxbb_rng1,
+ &gxbb_gclk_venci_int1,
+ &gxbb_vclk2_venclmcc,
+ &gxbb_vclk2_vencl,
+ &gxbb_vclk_other,
+ &gxbb_edp,
+ &gxbb_ao_media_cpu,
+ &gxbb_ao_ahb_sram,
+ &gxbb_ao_ahb_bus,
+ &gxbb_ao_iface,
+ &gxbb_ao_i2c,
+};
+
+static int gxbb_clkc_probe(struct platform_device *pdev)
+{
+ void __iomem *clk_base;
+ int ret, clkid, i;
+ struct clk_hw *parent_hw;
+ struct clk *parent_clk;
+ struct device *dev = &pdev->dev;
+
+ /* Generic clocks and PLLs */
+ clk_base = of_iomap(dev->of_node, 0);
+ if (!clk_base) {
+ pr_err("%s: Unable to map clk base\n", __func__);
+ return -ENXIO;
+ }
+
+ /* Populate base address for PLLs */
+ for (i = 0; i < ARRAY_SIZE(gxbb_clk_plls); i++)
+ gxbb_clk_plls[i]->base = clk_base;
+
+ /* Populate base address for MPLLs */
+ for (i = 0; i < ARRAY_SIZE(gxbb_clk_mplls); i++)
+ gxbb_clk_mplls[i]->base = clk_base;
+
+ /* Populate the base address for CPU clk */
+ gxbb_cpu_clk.base = clk_base;
+
+ /* Populate the base address for the MPEG clks */
+ gxbb_mpeg_clk_sel.reg = clk_base + (u64)gxbb_mpeg_clk_sel.reg;
+ gxbb_mpeg_clk_div.reg = clk_base + (u64)gxbb_mpeg_clk_div.reg;
+
+ /* Populate base address for gates */
+ for (i = 0; i < ARRAY_SIZE(gxbb_clk_gates); i++)
+ gxbb_clk_gates[i]->reg = clk_base +
+ (u64)gxbb_clk_gates[i]->reg;
+
+ /*
+ * register all clks
+ */
+ for (clkid = 0; clkid < NR_CLKS; clkid++) {
+ ret = devm_clk_hw_register(dev, gxbb_hw_onecell_data.hws[clkid]);
+ if (ret)
+ goto iounmap;
+ }
+
+ /*
+ * Register CPU clk notifier
+ *
+ * FIXME this is wrong for a lot of reasons. First, the muxes should be
+ * struct clk_hw objects. Second, we shouldn't program the muxes in
+ * notifier handlers. The tricky programming sequence will be handled
+ * by the forthcoming coordinated clock rates mechanism once that
+ * feature is released.
+ *
+ * Furthermore, looking up the parent this way is terrible. At some
+ * point we will stop allocating a default struct clk when registering
+ * a new clk_hw, and this hack will no longer work. Releasing the ccr
+ * feature before that time solves the problem :-)
+ */
+ parent_hw = clk_hw_get_parent(&gxbb_cpu_clk.hw);
+ parent_clk = parent_hw->clk;
+ ret = clk_notifier_register(parent_clk, &gxbb_cpu_clk.clk_nb);
+ if (ret) {
+ pr_err("%s: failed to register clock notifier for cpu_clk\n",
+ __func__);
+ goto iounmap;
+ }
+
+ return of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
+ &gxbb_hw_onecell_data);
+
+iounmap:
+ iounmap(clk_base);
+ return ret;
+}
+
+static const struct of_device_id gxbb_clkc_match_table[] = {
+ { .compatible = "amlogic,gxbb-clkc" },
+ { }
+};
+
+static struct platform_driver gxbb_driver = {
+ .probe = gxbb_clkc_probe,
+ .driver = {
+ .name = "gxbb-clkc",
+ .of_match_table = gxbb_clkc_match_table,
+ },
+};
+
+static int __init gxbb_clkc_init(void)
+{
+ return platform_driver_register(&gxbb_driver);
+}
+device_initcall(gxbb_clkc_init);
diff --git a/drivers/clk/meson/gxbb.h b/drivers/clk/meson/gxbb.h
new file mode 100644
index 000000000..a2adf3448
--- /dev/null
+++ b/drivers/clk/meson/gxbb.h
@@ -0,0 +1,271 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright (c) 2016 AmLogic, Inc.
+ * Author: Michael Turquette <mturquette@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING
+ *
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 BayLibre, Inc.
+ * Author: Michael Turquette <mturquette@baylibre.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __GXBB_H
+#define __GXBB_H
+
+/*
+ * Clock controller register offsets
+ *
+ * Register offsets from the data sheet are listed in comment blocks below.
+ * Those offsets must be multiplied by 4 before adding them to the base address
+ * to get the right value
+ */
+#define SCR 0x2C /* 0x0b offset in data sheet */
+#define TIMEOUT_VALUE 0x3c /* 0x0f offset in data sheet */
+
+#define HHI_GP0_PLL_CNTL 0x40 /* 0x10 offset in data sheet */
+#define HHI_GP0_PLL_CNTL2 0x44 /* 0x11 offset in data sheet */
+#define HHI_GP0_PLL_CNTL3 0x48 /* 0x12 offset in data sheet */
+#define HHI_GP0_PLL_CNTL4 0x4c /* 0x13 offset in data sheet */
+
+#define HHI_XTAL_DIVN_CNTL 0xbc /* 0x2f offset in data sheet */
+#define HHI_TIMER90K 0xec /* 0x3b offset in data sheet */
+
+#define HHI_MEM_PD_REG0 0x100 /* 0x40 offset in data sheet */
+#define HHI_MEM_PD_REG1 0x104 /* 0x41 offset in data sheet */
+#define HHI_VPU_MEM_PD_REG1 0x108 /* 0x42 offset in data sheet */
+#define HHI_VIID_CLK_DIV 0x128 /* 0x4a offset in data sheet */
+#define HHI_VIID_CLK_CNTL 0x12c /* 0x4b offset in data sheet */
+
+#define HHI_GCLK_MPEG0 0x140 /* 0x50 offset in data sheet */
+#define HHI_GCLK_MPEG1 0x144 /* 0x51 offset in data sheet */
+#define HHI_GCLK_MPEG2 0x148 /* 0x52 offset in data sheet */
+#define HHI_GCLK_OTHER 0x150 /* 0x54 offset in data sheet */
+#define HHI_GCLK_AO 0x154 /* 0x55 offset in data sheet */
+#define HHI_SYS_OSCIN_CNTL 0x158 /* 0x56 offset in data sheet */
+#define HHI_SYS_CPU_CLK_CNTL1 0x15c /* 0x57 offset in data sheet */
+#define HHI_SYS_CPU_RESET_CNTL 0x160 /* 0x58 offset in data sheet */
+#define HHI_VID_CLK_DIV 0x164 /* 0x59 offset in data sheet */
+
+#define HHI_MPEG_CLK_CNTL 0x174 /* 0x5d offset in data sheet */
+#define HHI_AUD_CLK_CNTL 0x178 /* 0x5e offset in data sheet */
+#define HHI_VID_CLK_CNTL 0x17c /* 0x5f offset in data sheet */
+#define HHI_AUD_CLK_CNTL2 0x190 /* 0x64 offset in data sheet */
+#define HHI_VID_CLK_CNTL2 0x194 /* 0x65 offset in data sheet */
+#define HHI_SYS_CPU_CLK_CNTL0 0x19c /* 0x67 offset in data sheet */
+#define HHI_VID_PLL_CLK_DIV 0x1a0 /* 0x68 offset in data sheet */
+#define HHI_AUD_CLK_CNTL3 0x1a4 /* 0x69 offset in data sheet */
+#define HHI_MALI_CLK_CNTL 0x1b0 /* 0x6c offset in data sheet */
+#define HHI_VPU_CLK_CNTL 0x1bC /* 0x6f offset in data sheet */
+
+#define HHI_HDMI_CLK_CNTL 0x1CC /* 0x73 offset in data sheet */
+#define HHI_VDEC_CLK_CNTL 0x1E0 /* 0x78 offset in data sheet */
+#define HHI_VDEC2_CLK_CNTL 0x1E4 /* 0x79 offset in data sheet */
+#define HHI_VDEC3_CLK_CNTL 0x1E8 /* 0x7a offset in data sheet */
+#define HHI_VDEC4_CLK_CNTL 0x1EC /* 0x7b offset in data sheet */
+#define HHI_HDCP22_CLK_CNTL 0x1F0 /* 0x7c offset in data sheet */
+#define HHI_VAPBCLK_CNTL 0x1F4 /* 0x7d offset in data sheet */
+
+#define HHI_VPU_CLKB_CNTL 0x20C /* 0x83 offset in data sheet */
+#define HHI_USB_CLK_CNTL 0x220 /* 0x88 offset in data sheet */
+#define HHI_32K_CLK_CNTL 0x224 /* 0x89 offset in data sheet */
+#define HHI_GEN_CLK_CNTL 0x228 /* 0x8a offset in data sheet */
+#define HHI_GEN_CLK_CNTL 0x228 /* 0x8a offset in data sheet */
+
+#define HHI_PCM_CLK_CNTL 0x258 /* 0x96 offset in data sheet */
+#define HHI_NAND_CLK_CNTL 0x25C /* 0x97 offset in data sheet */
+#define HHI_SD_EMMC_CLK_CNTL 0x264 /* 0x99 offset in data sheet */
+
+#define HHI_MPLL_CNTL 0x280 /* 0xa0 offset in data sheet */
+#define HHI_MPLL_CNTL2 0x284 /* 0xa1 offset in data sheet */
+#define HHI_MPLL_CNTL3 0x288 /* 0xa2 offset in data sheet */
+#define HHI_MPLL_CNTL4 0x28C /* 0xa3 offset in data sheet */
+#define HHI_MPLL_CNTL5 0x290 /* 0xa4 offset in data sheet */
+#define HHI_MPLL_CNTL6 0x294 /* 0xa5 offset in data sheet */
+#define HHI_MPLL_CNTL7 0x298 /* MP0, 0xa6 offset in data sheet */
+#define HHI_MPLL_CNTL8 0x29C /* MP1, 0xa7 offset in data sheet */
+#define HHI_MPLL_CNTL9 0x2A0 /* MP2, 0xa8 offset in data sheet */
+#define HHI_MPLL_CNTL10 0x2A4 /* MP2, 0xa9 offset in data sheet */
+
+#define HHI_MPLL3_CNTL0 0x2E0 /* 0xb8 offset in data sheet */
+#define HHI_MPLL3_CNTL1 0x2E4 /* 0xb9 offset in data sheet */
+#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */
+#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */
+
+#define HHI_SYS_PLL_CNTL 0x300 /* 0xc0 offset in data sheet */
+#define HHI_SYS_PLL_CNTL2 0x304 /* 0xc1 offset in data sheet */
+#define HHI_SYS_PLL_CNTL3 0x308 /* 0xc2 offset in data sheet */
+#define HHI_SYS_PLL_CNTL4 0x30c /* 0xc3 offset in data sheet */
+#define HHI_SYS_PLL_CNTL5 0x310 /* 0xc4 offset in data sheet */
+#define HHI_DPLL_TOP_I 0x318 /* 0xc6 offset in data sheet */
+#define HHI_DPLL_TOP2_I 0x31C /* 0xc7 offset in data sheet */
+#define HHI_HDMI_PLL_CNTL 0x320 /* 0xc8 offset in data sheet */
+#define HHI_HDMI_PLL_CNTL2 0x324 /* 0xc9 offset in data sheet */
+#define HHI_HDMI_PLL_CNTL3 0x328 /* 0xca offset in data sheet */
+#define HHI_HDMI_PLL_CNTL4 0x32C /* 0xcb offset in data sheet */
+#define HHI_HDMI_PLL_CNTL5 0x330 /* 0xcc offset in data sheet */
+#define HHI_HDMI_PLL_CNTL6 0x334 /* 0xcd offset in data sheet */
+#define HHI_HDMI_PLL_CNTL_I 0x338 /* 0xce offset in data sheet */
+#define HHI_HDMI_PLL_CNTL7 0x33C /* 0xcf offset in data sheet */
+
+#define HHI_HDMI_PHY_CNTL0 0x3A0 /* 0xe8 offset in data sheet */
+#define HHI_HDMI_PHY_CNTL1 0x3A4 /* 0xe9 offset in data sheet */
+#define HHI_HDMI_PHY_CNTL2 0x3A8 /* 0xea offset in data sheet */
+#define HHI_HDMI_PHY_CNTL3 0x3AC /* 0xeb offset in data sheet */
+
+#define HHI_VID_LOCK_CLK_CNTL 0x3C8 /* 0xf2 offset in data sheet */
+#define HHI_BT656_CLK_CNTL 0x3D4 /* 0xf5 offset in data sheet */
+#define HHI_SAR_CLK_CNTL 0x3D8 /* 0xf6 offset in data sheet */
+
+/*
+ * CLKID index values
+ *
+ * These indices are entirely contrived and do not map onto the hardware.
+ * Migrate them out of this header and into the DT header file when they need
+ * to be exposed to client nodes in DT: include/dt-bindings/clock/gxbb-clkc.h
+ */
+#define CLKID_SYS_PLL 0
+/* CLKID_CPUCLK */
+#define CLKID_HDMI_PLL 2
+#define CLKID_FIXED_PLL 3
+#define CLKID_FCLK_DIV2 4
+#define CLKID_FCLK_DIV3 5
+#define CLKID_FCLK_DIV4 6
+#define CLKID_FCLK_DIV5 7
+#define CLKID_FCLK_DIV7 8
+#define CLKID_GP0_PLL 9
+#define CLKID_MPEG_SEL 10
+#define CLKID_MPEG_DIV 11
+/* CLKID_CLK81 */
+#define CLKID_MPLL0 13
+#define CLKID_MPLL1 14
+#define CLKID_MPLL2 15
+#define CLKID_DDR 16
+#define CLKID_DOS 17
+#define CLKID_ISA 18
+#define CLKID_PL301 19
+#define CLKID_PERIPHS 20
+#define CLKID_SPICC 21
+#define CLKID_I2C 22
+#define CLKID_SAR_ADC 23
+#define CLKID_SMART_CARD 24
+#define CLKID_RNG0 25
+#define CLKID_UART0 26
+#define CLKID_SDHC 27
+#define CLKID_STREAM 28
+#define CLKID_ASYNC_FIFO 29
+#define CLKID_SDIO 30
+#define CLKID_ABUF 31
+#define CLKID_HIU_IFACE 32
+#define CLKID_ASSIST_MISC 33
+#define CLKID_SPI 34
+#define CLKID_I2S_SPDIF 35
+#define CLKID_ETH 36
+#define CLKID_DEMUX 37
+#define CLKID_AIU_GLUE 38
+#define CLKID_IEC958 39
+#define CLKID_I2S_OUT 40
+#define CLKID_AMCLK 41
+#define CLKID_AIFIFO2 42
+#define CLKID_MIXER 43
+#define CLKID_MIXER_IFACE 44
+#define CLKID_ADC 45
+#define CLKID_BLKMV 46
+#define CLKID_AIU 47
+#define CLKID_UART1 48
+#define CLKID_G2D 49
+#define CLKID_USB0 50
+#define CLKID_USB1 51
+#define CLKID_RESET 52
+#define CLKID_NAND 53
+#define CLKID_DOS_PARSER 54
+#define CLKID_USB 55
+#define CLKID_VDIN1 56
+#define CLKID_AHB_ARB0 57
+#define CLKID_EFUSE 58
+#define CLKID_BOOT_ROM 59
+#define CLKID_AHB_DATA_BUS 60
+#define CLKID_AHB_CTRL_BUS 61
+#define CLKID_HDMI_INTR_SYNC 62
+#define CLKID_HDMI_PCLK 63
+#define CLKID_USB1_DDR_BRIDGE 64
+#define CLKID_USB0_DDR_BRIDGE 65
+#define CLKID_MMC_PCLK 66
+#define CLKID_DVIN 67
+#define CLKID_UART2 68
+#define CLKID_SANA 69
+#define CLKID_VPU_INTR 70
+#define CLKID_SEC_AHB_AHB3_BRIDGE 71
+#define CLKID_CLK81_A53 72
+#define CLKID_VCLK2_VENCI0 73
+#define CLKID_VCLK2_VENCI1 74
+#define CLKID_VCLK2_VENCP0 75
+#define CLKID_VCLK2_VENCP1 76
+#define CLKID_GCLK_VENCI_INT0 77
+#define CLKID_GCLK_VENCI_INT 78
+#define CLKID_DAC_CLK 79
+#define CLKID_AOCLK_GATE 80
+#define CLKID_IEC958_GATE 81
+#define CLKID_ENC480P 82
+#define CLKID_RNG1 83
+#define CLKID_GCLK_VENCI_INT1 84
+#define CLKID_VCLK2_VENCLMCC 85
+#define CLKID_VCLK2_VENCL 86
+#define CLKID_VCLK_OTHER 87
+#define CLKID_EDP 88
+#define CLKID_AO_MEDIA_CPU 89
+#define CLKID_AO_AHB_SRAM 90
+#define CLKID_AO_AHB_BUS 91
+#define CLKID_AO_IFACE 92
+#define CLKID_AO_I2C 93
+
+#define NR_CLKS 94
+
+/* include the CLKIDs that have been made part of the stable DT binding */
+#include <dt-bindings/clock/gxbb-clkc.h>
+
+#endif /* __GXBB_H */
diff --git a/drivers/clk/meson/meson8b-clkc.c b/drivers/clk/meson/meson8b-clkc.c
index 4d057b3e2..4c9413cdf 100644
--- a/drivers/clk/meson/meson8b-clkc.c
+++ b/drivers/clk/meson/meson8b-clkc.c
@@ -1,7 +1,12 @@
/*
+ * AmLogic S805 / Meson8b Clock Controller Driver
+ *
* Copyright (c) 2015 Endless Mobile, Inc.
* Author: Carlo Caione <carlo@endlessm.com>
*
+ * Copyright (c) 2016 BayLibre, Inc.
+ * Michael Turquette <mturquette@baylibre.com>
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
@@ -15,23 +20,33 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/clk.h>
#include <linux/clk-provider.h>
-#include <linux/kernel.h>
-#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/slab.h>
#include <dt-bindings/clock/meson8b-clkc.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
#include "clkc.h"
-#define MESON8B_REG_CTL0_ADDR 0x0000
-#define MESON8B_REG_SYS_CPU_CNTL1 0x015c
-#define MESON8B_REG_HHI_MPEG 0x0174
-#define MESON8B_REG_MALI 0x01b0
+/*
+ * Clock controller register offsets
+ *
+ * Register offsets from the HardKernel[0] data sheet are listed in comment
+ * blocks below. Those offsets must be multiplied by 4 before adding them to
+ * the base address to get the right value
+ *
+ * [0] http://dn.odroid.com/S805/Datasheet/S805_Datasheet%20V0.8%2020150126.pdf
+ */
+#define MESON8B_REG_SYS_CPU_CNTL1 0x015c /* 0x57 offset in data sheet */
+#define MESON8B_REG_HHI_MPEG 0x0174 /* 0x5d offset in data sheet */
+#define MESON8B_REG_MALI 0x01b0 /* 0x6c offset in data sheet */
#define MESON8B_REG_PLL_FIXED 0x0280
#define MESON8B_REG_PLL_SYS 0x0300
#define MESON8B_REG_PLL_VID 0x0320
+static DEFINE_SPINLOCK(clk_lock);
+
static const struct pll_rate_table sys_pll_rate_table[] = {
PLL_RATE(312000000, 52, 1, 2),
PLL_RATE(336000000, 56, 1, 2),
@@ -102,95 +117,331 @@ static const struct clk_div_table cpu_div_table[] = {
{ /* sentinel */ },
};
-PNAME(p_xtal) = { "xtal" };
-PNAME(p_fclk_div) = { "fixed_pll" };
-PNAME(p_cpu_clk) = { "sys_pll" };
-PNAME(p_clk81) = { "fclk_div3", "fclk_div4", "fclk_div5" };
-PNAME(p_mali) = { "fclk_div3", "fclk_div4", "fclk_div5",
- "fclk_div7", "zero" };
+static struct clk_fixed_rate meson8b_xtal = {
+ .fixed_rate = 24000000,
+ .hw.init = &(struct clk_init_data){
+ .name = "xtal",
+ .num_parents = 0,
+ .ops = &clk_fixed_rate_ops,
+ },
+};
+
+static struct meson_clk_pll meson8b_fixed_pll = {
+ .m = {
+ .reg_off = MESON8B_REG_PLL_FIXED,
+ .shift = 0,
+ .width = 9,
+ },
+ .n = {
+ .reg_off = MESON8B_REG_PLL_FIXED,
+ .shift = 9,
+ .width = 5,
+ },
+ .od = {
+ .reg_off = MESON8B_REG_PLL_FIXED,
+ .shift = 16,
+ .width = 2,
+ },
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data){
+ .name = "fixed_pll",
+ .ops = &meson_clk_pll_ro_ops,
+ .parent_names = (const char *[]){ "xtal" },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct meson_clk_pll meson8b_vid_pll = {
+ .m = {
+ .reg_off = MESON8B_REG_PLL_VID,
+ .shift = 0,
+ .width = 9,
+ },
+ .n = {
+ .reg_off = MESON8B_REG_PLL_VID,
+ .shift = 9,
+ .width = 5,
+ },
+ .od = {
+ .reg_off = MESON8B_REG_PLL_VID,
+ .shift = 16,
+ .width = 2,
+ },
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data){
+ .name = "vid_pll",
+ .ops = &meson_clk_pll_ro_ops,
+ .parent_names = (const char *[]){ "xtal" },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct meson_clk_pll meson8b_sys_pll = {
+ .m = {
+ .reg_off = MESON8B_REG_PLL_SYS,
+ .shift = 0,
+ .width = 9,
+ },
+ .n = {
+ .reg_off = MESON8B_REG_PLL_SYS,
+ .shift = 9,
+ .width = 5,
+ },
+ .od = {
+ .reg_off = MESON8B_REG_PLL_SYS,
+ .shift = 16,
+ .width = 2,
+ },
+ .rate_table = sys_pll_rate_table,
+ .rate_count = ARRAY_SIZE(sys_pll_rate_table),
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data){
+ .name = "sys_pll",
+ .ops = &meson_clk_pll_ops,
+ .parent_names = (const char *[]){ "xtal" },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_fixed_factor meson8b_fclk_div2 = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div2",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "fixed_pll" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor meson8b_fclk_div3 = {
+ .mult = 1,
+ .div = 3,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div3",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "fixed_pll" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor meson8b_fclk_div4 = {
+ .mult = 1,
+ .div = 4,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div4",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "fixed_pll" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor meson8b_fclk_div5 = {
+ .mult = 1,
+ .div = 5,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div5",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "fixed_pll" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor meson8b_fclk_div7 = {
+ .mult = 1,
+ .div = 7,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div7",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "fixed_pll" },
+ .num_parents = 1,
+ },
+};
+
+/*
+ * FIXME cpu clocks and the legacy composite clocks (e.g. clk81) are both PLL
+ * post-dividers and should be modeled with their respective PLLs via the
+ * forthcoming coordinated clock rates feature
+ */
+static struct meson_clk_cpu meson8b_cpu_clk = {
+ .reg_off = MESON8B_REG_SYS_CPU_CNTL1,
+ .div_table = cpu_div_table,
+ .clk_nb.notifier_call = meson_clk_cpu_notifier_cb,
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_clk",
+ .ops = &meson_clk_cpu_ops,
+ .parent_names = (const char *[]){ "sys_pll" },
+ .num_parents = 1,
+ },
+};
static u32 mux_table_clk81[] = { 6, 5, 7 };
-static u32 mux_table_mali[] = { 6, 5, 7, 4, 0 };
-
-static struct pll_conf pll_confs = {
- .m = PARM(0x00, 0, 9),
- .n = PARM(0x00, 9, 5),
- .od = PARM(0x00, 16, 2),
-};
-
-static struct pll_conf sys_pll_conf = {
- .m = PARM(0x00, 0, 9),
- .n = PARM(0x00, 9, 5),
- .od = PARM(0x00, 16, 2),
- .rate_table = sys_pll_rate_table,
-};
-
-static const struct composite_conf clk81_conf __initconst = {
- .mux_table = mux_table_clk81,
- .mux_flags = CLK_MUX_READ_ONLY,
- .mux_parm = PARM(0x00, 12, 3),
- .div_parm = PARM(0x00, 0, 7),
- .gate_parm = PARM(0x00, 7, 1),
-};
-
-static const struct composite_conf mali_conf __initconst = {
- .mux_table = mux_table_mali,
- .mux_parm = PARM(0x00, 9, 3),
- .div_parm = PARM(0x00, 0, 7),
- .gate_parm = PARM(0x00, 8, 1),
-};
-
-static const struct clk_conf meson8b_xtal_conf __initconst =
- FIXED_RATE_P(MESON8B_REG_CTL0_ADDR, CLKID_XTAL, "xtal", 0,
- PARM(0x00, 4, 7));
-
-static const struct clk_conf meson8b_clk_confs[] __initconst = {
- FIXED_RATE(CLKID_ZERO, "zero", 0, 0),
- PLL(MESON8B_REG_PLL_FIXED, CLKID_PLL_FIXED, "fixed_pll",
- p_xtal, 0, &pll_confs),
- PLL(MESON8B_REG_PLL_VID, CLKID_PLL_VID, "vid_pll",
- p_xtal, 0, &pll_confs),
- PLL(MESON8B_REG_PLL_SYS, CLKID_PLL_SYS, "sys_pll",
- p_xtal, 0, &sys_pll_conf),
- FIXED_FACTOR_DIV(CLKID_FCLK_DIV2, "fclk_div2", p_fclk_div, 0, 2),
- FIXED_FACTOR_DIV(CLKID_FCLK_DIV3, "fclk_div3", p_fclk_div, 0, 3),
- FIXED_FACTOR_DIV(CLKID_FCLK_DIV4, "fclk_div4", p_fclk_div, 0, 4),
- FIXED_FACTOR_DIV(CLKID_FCLK_DIV5, "fclk_div5", p_fclk_div, 0, 5),
- FIXED_FACTOR_DIV(CLKID_FCLK_DIV7, "fclk_div7", p_fclk_div, 0, 7),
- CPU(MESON8B_REG_SYS_CPU_CNTL1, CLKID_CPUCLK, "a5_clk", p_cpu_clk,
- cpu_div_table),
- COMPOSITE(MESON8B_REG_HHI_MPEG, CLKID_CLK81, "clk81", p_clk81,
- CLK_SET_RATE_NO_REPARENT | CLK_IGNORE_UNUSED, &clk81_conf),
- COMPOSITE(MESON8B_REG_MALI, CLKID_MALI, "mali", p_mali,
- CLK_IGNORE_UNUSED, &mali_conf),
-};
-
-static void __init meson8b_clkc_init(struct device_node *np)
-{
- void __iomem *clk_base;
- if (!meson_clk_init(np, CLK_NR_CLKS))
- return;
+struct clk_mux meson8b_mpeg_clk_sel = {
+ .reg = (void *)MESON8B_REG_HHI_MPEG,
+ .mask = 0x7,
+ .shift = 12,
+ .flags = CLK_MUX_READ_ONLY,
+ .table = mux_table_clk81,
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data){
+ .name = "mpeg_clk_sel",
+ .ops = &clk_mux_ro_ops,
+ /*
+ * FIXME bits 14:12 selects from 8 possible parents:
+ * xtal, 1'b0 (wtf), fclk_div7, mpll_clkout1, mpll_clkout2,
+ * fclk_div4, fclk_div3, fclk_div5
+ */
+ .parent_names = (const char *[]){ "fclk_div3", "fclk_div4",
+ "fclk_div5" },
+ .num_parents = 3,
+ .flags = (CLK_SET_RATE_NO_REPARENT | CLK_IGNORE_UNUSED),
+ },
+};
- /* XTAL */
- clk_base = of_iomap(np, 0);
- if (!clk_base) {
- pr_err("%s: Unable to map xtal base\n", __func__);
- return;
- }
+struct clk_divider meson8b_mpeg_clk_div = {
+ .reg = (void *)MESON8B_REG_HHI_MPEG,
+ .shift = 0,
+ .width = 7,
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data){
+ .name = "mpeg_clk_div",
+ .ops = &clk_divider_ops,
+ .parent_names = (const char *[]){ "mpeg_clk_sel" },
+ .num_parents = 1,
+ .flags = (CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED),
+ },
+};
- meson_clk_register_clks(&meson8b_xtal_conf, 1, clk_base);
- iounmap(clk_base);
+struct clk_gate meson8b_clk81 = {
+ .reg = (void *)MESON8B_REG_HHI_MPEG,
+ .bit_idx = 7,
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data){
+ .name = "clk81",
+ .ops = &clk_gate_ops,
+ .parent_names = (const char *[]){ "mpeg_clk_div" },
+ .num_parents = 1,
+ .flags = (CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED),
+ },
+};
+
+static struct clk_hw_onecell_data meson8b_hw_onecell_data = {
+ .hws = {
+ [CLKID_XTAL] = &meson8b_xtal.hw,
+ [CLKID_PLL_FIXED] = &meson8b_fixed_pll.hw,
+ [CLKID_PLL_VID] = &meson8b_vid_pll.hw,
+ [CLKID_PLL_SYS] = &meson8b_sys_pll.hw,
+ [CLKID_FCLK_DIV2] = &meson8b_fclk_div2.hw,
+ [CLKID_FCLK_DIV3] = &meson8b_fclk_div3.hw,
+ [CLKID_FCLK_DIV4] = &meson8b_fclk_div4.hw,
+ [CLKID_FCLK_DIV5] = &meson8b_fclk_div5.hw,
+ [CLKID_FCLK_DIV7] = &meson8b_fclk_div7.hw,
+ [CLKID_CPUCLK] = &meson8b_cpu_clk.hw,
+ [CLKID_MPEG_SEL] = &meson8b_mpeg_clk_sel.hw,
+ [CLKID_MPEG_DIV] = &meson8b_mpeg_clk_div.hw,
+ [CLKID_CLK81] = &meson8b_clk81.hw,
+ },
+ .num = CLK_NR_CLKS,
+};
+
+static struct meson_clk_pll *const meson8b_clk_plls[] = {
+ &meson8b_fixed_pll,
+ &meson8b_vid_pll,
+ &meson8b_sys_pll,
+};
+
+static int meson8b_clkc_probe(struct platform_device *pdev)
+{
+ void __iomem *clk_base;
+ int ret, clkid, i;
+ struct clk_hw *parent_hw;
+ struct clk *parent_clk;
+ struct device *dev = &pdev->dev;
/* Generic clocks and PLLs */
- clk_base = of_iomap(np, 1);
+ clk_base = of_iomap(dev->of_node, 1);
if (!clk_base) {
pr_err("%s: Unable to map clk base\n", __func__);
- return;
+ return -ENXIO;
+ }
+
+ /* Populate base address for PLLs */
+ for (i = 0; i < ARRAY_SIZE(meson8b_clk_plls); i++)
+ meson8b_clk_plls[i]->base = clk_base;
+
+ /* Populate the base address for CPU clk */
+ meson8b_cpu_clk.base = clk_base;
+
+ /* Populate the base address for the MPEG clks */
+ meson8b_mpeg_clk_sel.reg = clk_base + (u32)meson8b_mpeg_clk_sel.reg;
+ meson8b_mpeg_clk_div.reg = clk_base + (u32)meson8b_mpeg_clk_div.reg;
+ meson8b_clk81.reg = clk_base + (u32)meson8b_clk81.reg;
+
+ /*
+ * register all clks
+ * CLKID_UNUSED = 0, so skip it and start with CLKID_XTAL = 1
+ */
+ for (clkid = CLKID_XTAL; clkid < CLK_NR_CLKS; clkid++) {
+ /* array might be sparse */
+ if (!meson8b_hw_onecell_data.hws[clkid])
+ continue;
+
+ /* FIXME convert to devm_clk_register */
+ ret = devm_clk_hw_register(dev, meson8b_hw_onecell_data.hws[clkid]);
+ if (ret)
+ goto iounmap;
}
- meson_clk_register_clks(meson8b_clk_confs,
- ARRAY_SIZE(meson8b_clk_confs),
- clk_base);
+ /*
+ * Register CPU clk notifier
+ *
+ * FIXME this is wrong for a lot of reasons. First, the muxes should be
+ * struct clk_hw objects. Second, we shouldn't program the muxes in
+ * notifier handlers. The tricky programming sequence will be handled
+ * by the forthcoming coordinated clock rates mechanism once that
+ * feature is released.
+ *
+ * Furthermore, looking up the parent this way is terrible. At some
+ * point we will stop allocating a default struct clk when registering
+ * a new clk_hw, and this hack will no longer work. Releasing the ccr
+ * feature before that time solves the problem :-)
+ */
+ parent_hw = clk_hw_get_parent(&meson8b_cpu_clk.hw);
+ parent_clk = parent_hw->clk;
+ ret = clk_notifier_register(parent_clk, &meson8b_cpu_clk.clk_nb);
+ if (ret) {
+ pr_err("%s: failed to register clock notifier for cpu_clk\n",
+ __func__);
+ goto iounmap;
+ }
+
+ return of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
+ &meson8b_hw_onecell_data);
+
+iounmap:
+ iounmap(clk_base);
+ return ret;
+}
+
+static const struct of_device_id meson8b_clkc_match_table[] = {
+ { .compatible = "amlogic,meson8b-clkc" },
+ { }
+};
+
+static struct platform_driver meson8b_driver = {
+ .probe = meson8b_clkc_probe,
+ .driver = {
+ .name = "meson8b-clkc",
+ .of_match_table = meson8b_clkc_match_table,
+ },
+};
+
+static int __init meson8b_clkc_init(void)
+{
+ return platform_driver_register(&meson8b_driver);
}
-CLK_OF_DECLARE(meson8b_clock, "amlogic,meson8b-clkc", meson8b_clkc_init);
+device_initcall(meson8b_clkc_init);
diff --git a/drivers/clk/nxp/clk-lpc32xx.c b/drivers/clk/nxp/clk-lpc32xx.c
index 481b2646b..90d740a2f 100644
--- a/drivers/clk/nxp/clk-lpc32xx.c
+++ b/drivers/clk/nxp/clk-lpc32xx.c
@@ -87,7 +87,7 @@ enum {
enum {
/* Start from the last defined clock in dt bindings */
- LPC32XX_CLK_ADC_DIV = LPC32XX_CLK_HCLK_PLL + 1,
+ LPC32XX_CLK_ADC_DIV = LPC32XX_CLK_PERIPH + 1,
LPC32XX_CLK_ADC_RTC,
LPC32XX_CLK_TEST1,
LPC32XX_CLK_TEST2,
@@ -99,7 +99,6 @@ enum {
LPC32XX_CLK_HCLK_DIV_PERIPH,
LPC32XX_CLK_HCLK_DIV,
LPC32XX_CLK_HCLK,
- LPC32XX_CLK_PERIPH,
LPC32XX_CLK_ARM,
LPC32XX_CLK_ARM_VFP,
diff --git a/drivers/clk/qcom/gcc-msm8660.c b/drivers/clk/qcom/gcc-msm8660.c
index 6dc558649..c347a0d44 100644
--- a/drivers/clk/qcom/gcc-msm8660.c
+++ b/drivers/clk/qcom/gcc-msm8660.c
@@ -2290,6 +2290,32 @@ static struct clk_branch sdc5_h_clk = {
},
};
+static struct clk_branch ebi2_2x_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 18,
+ .clkr = {
+ .enable_reg = 0x2660,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "ebi2_2x_clk",
+ .ops = &clk_branch_ops,
+ },
+ },
+};
+
+static struct clk_branch ebi2_clk = {
+ .halt_reg = 0x2fcc,
+ .halt_bit = 19,
+ .clkr = {
+ .enable_reg = 0x2664,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "ebi2_clk",
+ .ops = &clk_branch_ops,
+ },
+ },
+};
+
static struct clk_branch adm0_clk = {
.halt_reg = 0x2fdc,
.halt_check = BRANCH_HALT_VOTED,
@@ -2533,6 +2559,8 @@ static struct clk_regmap *gcc_msm8660_clks[] = {
[SDC3_H_CLK] = &sdc3_h_clk.clkr,
[SDC4_H_CLK] = &sdc4_h_clk.clkr,
[SDC5_H_CLK] = &sdc5_h_clk.clkr,
+ [EBI2_2X_CLK] = &ebi2_2x_clk.clkr,
+ [EBI2_CLK] = &ebi2_clk.clkr,
[ADM0_CLK] = &adm0_clk.clkr,
[ADM0_PBUS_CLK] = &adm0_pbus_clk.clkr,
[ADM1_CLK] = &adm1_clk.clkr,
diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c
index c9b96f318..bbf732bbc 100644
--- a/drivers/clk/qcom/gcc-msm8996.c
+++ b/drivers/clk/qcom/gcc-msm8996.c
@@ -2891,21 +2891,6 @@ static struct clk_branch gcc_smmu_aggre0_ahb_clk = {
},
};
-static struct clk_branch gcc_aggre1_pnoc_ahb_clk = {
- .halt_reg = 0x82014,
- .clkr = {
- .enable_reg = 0x82014,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_aggre1_pnoc_ahb_clk",
- .parent_names = (const char *[]){ "periph_noc_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_aggre2_ufs_axi_clk = {
.halt_reg = 0x83014,
.clkr = {
@@ -3308,7 +3293,6 @@ static struct clk_regmap *gcc_msm8996_clocks[] = {
[GCC_AGGRE0_CNOC_AHB_CLK] = &gcc_aggre0_cnoc_ahb_clk.clkr,
[GCC_SMMU_AGGRE0_AXI_CLK] = &gcc_smmu_aggre0_axi_clk.clkr,
[GCC_SMMU_AGGRE0_AHB_CLK] = &gcc_smmu_aggre0_ahb_clk.clkr,
- [GCC_AGGRE1_PNOC_AHB_CLK] = &gcc_aggre1_pnoc_ahb_clk.clkr,
[GCC_AGGRE2_UFS_AXI_CLK] = &gcc_aggre2_ufs_axi_clk.clkr,
[GCC_AGGRE2_USB3_AXI_CLK] = &gcc_aggre2_usb3_axi_clk.clkr,
[GCC_QSPI_AHB_CLK] = &gcc_qspi_ahb_clk.clkr,
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
index 2115ce410..41a12d376 100644
--- a/drivers/clk/renesas/Kconfig
+++ b/drivers/clk/renesas/Kconfig
@@ -1,6 +1,7 @@
config CLK_RENESAS_CPG_MSSR
bool
default y if ARCH_R8A7795
+ default y if ARCH_R8A7796
config CLK_RENESAS_CPG_MSTP
bool
@@ -11,6 +12,7 @@ config CLK_RENESAS_CPG_MSTP
default y if ARCH_R8A7779
default y if ARCH_R8A7790
default y if ARCH_R8A7791
+ default y if ARCH_R8A7792
default y if ARCH_R8A7793
default y if ARCH_R8A7794
default y if ARCH_SH73A0
diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile
index ead8bb843..90dd0db7d 100644
--- a/drivers/clk/renesas/Makefile
+++ b/drivers/clk/renesas/Makefile
@@ -6,9 +6,11 @@ obj-$(CONFIG_ARCH_R8A7778) += clk-r8a7778.o
obj-$(CONFIG_ARCH_R8A7779) += clk-r8a7779.o
obj-$(CONFIG_ARCH_R8A7790) += clk-rcar-gen2.o clk-div6.o
obj-$(CONFIG_ARCH_R8A7791) += clk-rcar-gen2.o clk-div6.o
+obj-$(CONFIG_ARCH_R8A7792) += clk-rcar-gen2.o clk-div6.o
obj-$(CONFIG_ARCH_R8A7793) += clk-rcar-gen2.o clk-div6.o
obj-$(CONFIG_ARCH_R8A7794) += clk-rcar-gen2.o clk-div6.o
-obj-$(CONFIG_ARCH_R8A7795) += r8a7795-cpg-mssr.o
+obj-$(CONFIG_ARCH_R8A7795) += r8a7795-cpg-mssr.o rcar-gen3-cpg.o
+obj-$(CONFIG_ARCH_R8A7796) += r8a7796-cpg-mssr.o rcar-gen3-cpg.o
obj-$(CONFIG_ARCH_SH73A0) += clk-sh73a0.o clk-div6.o
obj-$(CONFIG_CLK_RENESAS_CPG_MSSR) += renesas-cpg-mssr.o clk-div6.o
diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c
index e7b98c4d4..e38bf60c0 100644
--- a/drivers/clk/renesas/r8a7795-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c
@@ -12,22 +12,14 @@
* the Free Software Foundation; version 2 of the License.
*/
-#include <linux/bug.h>
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
#include <linux/device.h>
-#include <linux/err.h>
#include <linux/init.h>
-#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/of.h>
-#include <linux/slab.h>
#include <dt-bindings/clock/r8a7795-cpg-mssr.h>
#include "renesas-cpg-mssr.h"
-
-#define CPG_RCKCR 0x240
+#include "rcar-gen3-cpg.h"
enum clk_ids {
/* Core Clock Outputs exported to DT */
@@ -58,20 +50,6 @@ enum clk_ids {
MOD_CLK_BASE
};
-enum r8a7795_clk_types {
- CLK_TYPE_GEN3_MAIN = CLK_TYPE_CUSTOM,
- CLK_TYPE_GEN3_PLL0,
- CLK_TYPE_GEN3_PLL1,
- CLK_TYPE_GEN3_PLL2,
- CLK_TYPE_GEN3_PLL3,
- CLK_TYPE_GEN3_PLL4,
- CLK_TYPE_GEN3_SD,
- CLK_TYPE_GEN3_R,
-};
-
-#define DEF_GEN3_SD(_name, _id, _parent, _offset) \
- DEF_BASE(_name, _id, CLK_TYPE_GEN3_SD, _parent, .offset = _offset)
-
static const struct cpg_core_clk r8a7795_core_clks[] __initconst = {
/* External Clock Inputs */
DEF_INPUT("extal", CLK_EXTAL),
@@ -130,6 +108,9 @@ static const struct cpg_core_clk r8a7795_core_clks[] __initconst = {
};
static const struct mssr_mod_clk r8a7795_mod_clks[] __initconst = {
+ DEF_MOD("fdp1-2", 117, R8A7795_CLK_S2D1),
+ DEF_MOD("fdp1-1", 118, R8A7795_CLK_S2D1),
+ DEF_MOD("fdp1-0", 119, R8A7795_CLK_S2D1),
DEF_MOD("scif5", 202, R8A7795_CLK_S3D4),
DEF_MOD("scif4", 203, R8A7795_CLK_S3D4),
DEF_MOD("scif3", 204, R8A7795_CLK_S3D4),
@@ -158,11 +139,20 @@ static const struct mssr_mod_clk r8a7795_mod_clks[] __initconst = {
DEF_MOD("intc-ap", 408, R8A7795_CLK_S3D1),
DEF_MOD("audmac0", 502, R8A7795_CLK_S3D4),
DEF_MOD("audmac1", 501, R8A7795_CLK_S3D4),
+ DEF_MOD("drif7", 508, R8A7795_CLK_S3D2),
+ DEF_MOD("drif6", 509, R8A7795_CLK_S3D2),
+ DEF_MOD("drif5", 510, R8A7795_CLK_S3D2),
+ DEF_MOD("drif4", 511, R8A7795_CLK_S3D2),
+ DEF_MOD("drif3", 512, R8A7795_CLK_S3D2),
+ DEF_MOD("drif2", 513, R8A7795_CLK_S3D2),
+ DEF_MOD("drif1", 514, R8A7795_CLK_S3D2),
+ DEF_MOD("drif0", 515, R8A7795_CLK_S3D2),
DEF_MOD("hscif4", 516, R8A7795_CLK_S3D1),
DEF_MOD("hscif3", 517, R8A7795_CLK_S3D1),
DEF_MOD("hscif2", 518, R8A7795_CLK_S3D1),
DEF_MOD("hscif1", 519, R8A7795_CLK_S3D1),
DEF_MOD("hscif0", 520, R8A7795_CLK_S3D1),
+ DEF_MOD("thermal", 522, R8A7795_CLK_CP),
DEF_MOD("pwm", 523, R8A7795_CLK_S3D4),
DEF_MOD("fcpvd3", 600, R8A7795_CLK_S2D1),
DEF_MOD("fcpvd2", 601, R8A7795_CLK_S2D1),
@@ -200,7 +190,7 @@ static const struct mssr_mod_clk r8a7795_mod_clks[] __initconst = {
DEF_MOD("du2", 722, R8A7795_CLK_S2D1),
DEF_MOD("du1", 723, R8A7795_CLK_S2D1),
DEF_MOD("du0", 724, R8A7795_CLK_S2D1),
- DEF_MOD("lvds", 727, R8A7795_CLK_S2D1),
+ DEF_MOD("lvds", 727, R8A7795_CLK_S0D4),
DEF_MOD("hdmi1", 728, R8A7795_CLK_HDMI),
DEF_MOD("hdmi0", 729, R8A7795_CLK_HDMI),
DEF_MOD("vin7", 804, R8A7795_CLK_S2D1),
@@ -263,225 +253,6 @@ static const unsigned int r8a7795_crit_mod_clks[] __initconst = {
MOD_CLK_ID(408), /* INTC-AP (GIC) */
};
-/* -----------------------------------------------------------------------------
- * SDn Clock
- *
- */
-#define CPG_SD_STP_HCK BIT(9)
-#define CPG_SD_STP_CK BIT(8)
-
-#define CPG_SD_STP_MASK (CPG_SD_STP_HCK | CPG_SD_STP_CK)
-#define CPG_SD_FC_MASK (0x7 << 2 | 0x3 << 0)
-
-#define CPG_SD_DIV_TABLE_DATA(stp_hck, stp_ck, sd_srcfc, sd_fc, sd_div) \
-{ \
- .val = ((stp_hck) ? CPG_SD_STP_HCK : 0) | \
- ((stp_ck) ? CPG_SD_STP_CK : 0) | \
- ((sd_srcfc) << 2) | \
- ((sd_fc) << 0), \
- .div = (sd_div), \
-}
-
-struct sd_div_table {
- u32 val;
- unsigned int div;
-};
-
-struct sd_clock {
- struct clk_hw hw;
- void __iomem *reg;
- const struct sd_div_table *div_table;
- unsigned int div_num;
- unsigned int div_min;
- unsigned int div_max;
-};
-
-/* SDn divider
- * sd_srcfc sd_fc div
- * stp_hck stp_ck (div) (div) = sd_srcfc x sd_fc
- *-------------------------------------------------------------------
- * 0 0 0 (1) 1 (4) 4
- * 0 0 1 (2) 1 (4) 8
- * 1 0 2 (4) 1 (4) 16
- * 1 0 3 (8) 1 (4) 32
- * 1 0 4 (16) 1 (4) 64
- * 0 0 0 (1) 0 (2) 2
- * 0 0 1 (2) 0 (2) 4
- * 1 0 2 (4) 0 (2) 8
- * 1 0 3 (8) 0 (2) 16
- * 1 0 4 (16) 0 (2) 32
- */
-static const struct sd_div_table cpg_sd_div_table[] = {
-/* CPG_SD_DIV_TABLE_DATA(stp_hck, stp_ck, sd_srcfc, sd_fc, sd_div) */
- CPG_SD_DIV_TABLE_DATA(0, 0, 0, 1, 4),
- CPG_SD_DIV_TABLE_DATA(0, 0, 1, 1, 8),
- CPG_SD_DIV_TABLE_DATA(1, 0, 2, 1, 16),
- CPG_SD_DIV_TABLE_DATA(1, 0, 3, 1, 32),
- CPG_SD_DIV_TABLE_DATA(1, 0, 4, 1, 64),
- CPG_SD_DIV_TABLE_DATA(0, 0, 0, 0, 2),
- CPG_SD_DIV_TABLE_DATA(0, 0, 1, 0, 4),
- CPG_SD_DIV_TABLE_DATA(1, 0, 2, 0, 8),
- CPG_SD_DIV_TABLE_DATA(1, 0, 3, 0, 16),
- CPG_SD_DIV_TABLE_DATA(1, 0, 4, 0, 32),
-};
-
-#define to_sd_clock(_hw) container_of(_hw, struct sd_clock, hw)
-
-static int cpg_sd_clock_enable(struct clk_hw *hw)
-{
- struct sd_clock *clock = to_sd_clock(hw);
- u32 val, sd_fc;
- unsigned int i;
-
- val = clk_readl(clock->reg);
-
- sd_fc = val & CPG_SD_FC_MASK;
- for (i = 0; i < clock->div_num; i++)
- if (sd_fc == (clock->div_table[i].val & CPG_SD_FC_MASK))
- break;
-
- if (i >= clock->div_num)
- return -EINVAL;
-
- val &= ~(CPG_SD_STP_MASK);
- val |= clock->div_table[i].val & CPG_SD_STP_MASK;
-
- clk_writel(val, clock->reg);
-
- return 0;
-}
-
-static void cpg_sd_clock_disable(struct clk_hw *hw)
-{
- struct sd_clock *clock = to_sd_clock(hw);
-
- clk_writel(clk_readl(clock->reg) | CPG_SD_STP_MASK, clock->reg);
-}
-
-static int cpg_sd_clock_is_enabled(struct clk_hw *hw)
-{
- struct sd_clock *clock = to_sd_clock(hw);
-
- return !(clk_readl(clock->reg) & CPG_SD_STP_MASK);
-}
-
-static unsigned long cpg_sd_clock_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct sd_clock *clock = to_sd_clock(hw);
- unsigned long rate = parent_rate;
- u32 val, sd_fc;
- unsigned int i;
-
- val = clk_readl(clock->reg);
-
- sd_fc = val & CPG_SD_FC_MASK;
- for (i = 0; i < clock->div_num; i++)
- if (sd_fc == (clock->div_table[i].val & CPG_SD_FC_MASK))
- break;
-
- if (i >= clock->div_num)
- return -EINVAL;
-
- return DIV_ROUND_CLOSEST(rate, clock->div_table[i].div);
-}
-
-static unsigned int cpg_sd_clock_calc_div(struct sd_clock *clock,
- unsigned long rate,
- unsigned long parent_rate)
-{
- unsigned int div;
-
- if (!rate)
- rate = 1;
-
- div = DIV_ROUND_CLOSEST(parent_rate, rate);
-
- return clamp_t(unsigned int, div, clock->div_min, clock->div_max);
-}
-
-static long cpg_sd_clock_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
-{
- struct sd_clock *clock = to_sd_clock(hw);
- unsigned int div = cpg_sd_clock_calc_div(clock, rate, *parent_rate);
-
- return DIV_ROUND_CLOSEST(*parent_rate, div);
-}
-
-static int cpg_sd_clock_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- struct sd_clock *clock = to_sd_clock(hw);
- unsigned int div = cpg_sd_clock_calc_div(clock, rate, parent_rate);
- u32 val;
- unsigned int i;
-
- for (i = 0; i < clock->div_num; i++)
- if (div == clock->div_table[i].div)
- break;
-
- if (i >= clock->div_num)
- return -EINVAL;
-
- val = clk_readl(clock->reg);
- val &= ~(CPG_SD_STP_MASK | CPG_SD_FC_MASK);
- val |= clock->div_table[i].val & (CPG_SD_STP_MASK | CPG_SD_FC_MASK);
- clk_writel(val, clock->reg);
-
- return 0;
-}
-
-static const struct clk_ops cpg_sd_clock_ops = {
- .enable = cpg_sd_clock_enable,
- .disable = cpg_sd_clock_disable,
- .is_enabled = cpg_sd_clock_is_enabled,
- .recalc_rate = cpg_sd_clock_recalc_rate,
- .round_rate = cpg_sd_clock_round_rate,
- .set_rate = cpg_sd_clock_set_rate,
-};
-
-static struct clk * __init cpg_sd_clk_register(const struct cpg_core_clk *core,
- void __iomem *base,
- const char *parent_name)
-{
- struct clk_init_data init;
- struct sd_clock *clock;
- struct clk *clk;
- unsigned int i;
-
- clock = kzalloc(sizeof(*clock), GFP_KERNEL);
- if (!clock)
- return ERR_PTR(-ENOMEM);
-
- init.name = core->name;
- init.ops = &cpg_sd_clock_ops;
- init.flags = CLK_IS_BASIC | CLK_SET_RATE_PARENT;
- init.parent_names = &parent_name;
- init.num_parents = 1;
-
- clock->reg = base + core->offset;
- clock->hw.init = &init;
- clock->div_table = cpg_sd_div_table;
- clock->div_num = ARRAY_SIZE(cpg_sd_div_table);
-
- clock->div_max = clock->div_table[0].div;
- clock->div_min = clock->div_max;
- for (i = 1; i < clock->div_num; i++) {
- clock->div_max = max(clock->div_max, clock->div_table[i].div);
- clock->div_min = min(clock->div_min, clock->div_table[i].div);
- }
-
- clk = clk_register(NULL, &clock->hw);
- if (IS_ERR(clk))
- kfree(clock);
-
- return clk;
-}
-
-#define CPG_PLL0CR 0x00d8
-#define CPG_PLL2CR 0x002c
-#define CPG_PLL4CR 0x01f4
/*
* CPG Clock Data
@@ -513,13 +284,7 @@ static struct clk * __init cpg_sd_clk_register(const struct cpg_core_clk *core,
(((md) & BIT(19)) >> 18) | \
(((md) & BIT(17)) >> 17))
-struct cpg_pll_config {
- unsigned int extal_div;
- unsigned int pll1_mult;
- unsigned int pll3_mult;
-};
-
-static const struct cpg_pll_config cpg_pll_configs[16] __initconst = {
+static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[16] __initconst = {
/* EXTAL div PLL1 mult PLL3 mult */
{ 1, 192, 192, },
{ 1, 192, 128, },
@@ -539,112 +304,9 @@ static const struct cpg_pll_config cpg_pll_configs[16] __initconst = {
{ 2, 192, 192, },
};
-static const struct cpg_pll_config *cpg_pll_config __initdata;
-
-static
-struct clk * __init r8a7795_cpg_clk_register(struct device *dev,
- const struct cpg_core_clk *core,
- const struct cpg_mssr_info *info,
- struct clk **clks,
- void __iomem *base)
-{
- const struct clk *parent;
- unsigned int mult = 1;
- unsigned int div = 1;
- u32 value;
-
- parent = clks[core->parent];
- if (IS_ERR(parent))
- return ERR_CAST(parent);
-
- switch (core->type) {
- case CLK_TYPE_GEN3_MAIN:
- div = cpg_pll_config->extal_div;
- break;
-
- case CLK_TYPE_GEN3_PLL0:
- /*
- * PLL0 is a configurable multiplier clock. Register it as a
- * fixed factor clock for now as there's no generic multiplier
- * clock implementation and we currently have no need to change
- * the multiplier value.
- */
- value = readl(base + CPG_PLL0CR);
- mult = (((value >> 24) & 0x7f) + 1) * 2;
- break;
-
- case CLK_TYPE_GEN3_PLL1:
- mult = cpg_pll_config->pll1_mult;
- break;
-
- case CLK_TYPE_GEN3_PLL2:
- /*
- * PLL2 is a configurable multiplier clock. Register it as a
- * fixed factor clock for now as there's no generic multiplier
- * clock implementation and we currently have no need to change
- * the multiplier value.
- */
- value = readl(base + CPG_PLL2CR);
- mult = (((value >> 24) & 0x7f) + 1) * 2;
- break;
-
- case CLK_TYPE_GEN3_PLL3:
- mult = cpg_pll_config->pll3_mult;
- break;
-
- case CLK_TYPE_GEN3_PLL4:
- /*
- * PLL4 is a configurable multiplier clock. Register it as a
- * fixed factor clock for now as there's no generic multiplier
- * clock implementation and we currently have no need to change
- * the multiplier value.
- */
- value = readl(base + CPG_PLL4CR);
- mult = (((value >> 24) & 0x7f) + 1) * 2;
- break;
-
- case CLK_TYPE_GEN3_SD:
- return cpg_sd_clk_register(core, base, __clk_get_name(parent));
-
- case CLK_TYPE_GEN3_R:
- /* RINT is default. Only if EXTALR is populated, we switch to it */
- value = readl(base + CPG_RCKCR) & 0x3f;
-
- if (clk_get_rate(clks[CLK_EXTALR])) {
- parent = clks[CLK_EXTALR];
- value |= BIT(15);
- }
-
- writel(value, base + CPG_RCKCR);
- break;
-
- default:
- return ERR_PTR(-EINVAL);
- }
-
- return clk_register_fixed_factor(NULL, core->name,
- __clk_get_name(parent), 0, mult, div);
-}
-
-/*
- * Reset register definitions.
- */
-#define MODEMR 0xe6160060
-
-static u32 rcar_gen3_read_mode_pins(void)
-{
- void __iomem *modemr = ioremap_nocache(MODEMR, 4);
- u32 mode;
-
- BUG_ON(!modemr);
- mode = ioread32(modemr);
- iounmap(modemr);
-
- return mode;
-}
-
static int __init r8a7795_cpg_mssr_init(struct device *dev)
{
+ const struct rcar_gen3_cpg_pll_config *cpg_pll_config;
u32 cpg_mode = rcar_gen3_read_mode_pins();
cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
@@ -653,7 +315,7 @@ static int __init r8a7795_cpg_mssr_init(struct device *dev)
return -EINVAL;
}
- return 0;
+ return rcar_gen3_cpg_init(cpg_pll_config, CLK_EXTALR);
}
const struct cpg_mssr_info r8a7795_cpg_mssr_info __initconst = {
@@ -674,5 +336,5 @@ const struct cpg_mssr_info r8a7795_cpg_mssr_info __initconst = {
/* Callbacks */
.init = r8a7795_cpg_mssr_init,
- .cpg_clk_register = r8a7795_cpg_clk_register,
+ .cpg_clk_register = rcar_gen3_cpg_clk_register,
};
diff --git a/drivers/clk/renesas/r8a7796-cpg-mssr.c b/drivers/clk/renesas/r8a7796-cpg-mssr.c
new file mode 100644
index 000000000..c84b549c1
--- /dev/null
+++ b/drivers/clk/renesas/r8a7796-cpg-mssr.c
@@ -0,0 +1,192 @@
+/*
+ * r8a7796 Clock Pulse Generator / Module Standby and Software Reset
+ *
+ * Copyright (C) 2016 Glider bvba
+ *
+ * Based on r8a7795-cpg-mssr.c
+ *
+ * Copyright (C) 2015 Glider bvba
+ * Copyright (C) 2015 Renesas Electronics Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/clock/r8a7796-cpg-mssr.h>
+
+#include "renesas-cpg-mssr.h"
+#include "rcar-gen3-cpg.h"
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R8A7796_CLK_OSC,
+
+ /* External Input Clocks */
+ CLK_EXTAL,
+ CLK_EXTALR,
+
+ /* Internal Core Clocks */
+ CLK_MAIN,
+ CLK_PLL0,
+ CLK_PLL1,
+ CLK_PLL2,
+ CLK_PLL3,
+ CLK_PLL4,
+ CLK_PLL1_DIV2,
+ CLK_PLL1_DIV4,
+ CLK_S0,
+ CLK_S1,
+ CLK_S2,
+ CLK_S3,
+ CLK_SDSRC,
+ CLK_SSPSRC,
+
+ /* Module Clocks */
+ MOD_CLK_BASE
+};
+
+static const struct cpg_core_clk r8a7796_core_clks[] __initconst = {
+ /* External Clock Inputs */
+ DEF_INPUT("extal", CLK_EXTAL),
+ DEF_INPUT("extalr", CLK_EXTALR),
+
+ /* Internal Core Clocks */
+ DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN3_MAIN, CLK_EXTAL),
+ DEF_BASE(".pll0", CLK_PLL0, CLK_TYPE_GEN3_PLL0, CLK_MAIN),
+ DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN3_PLL1, CLK_MAIN),
+ DEF_BASE(".pll2", CLK_PLL2, CLK_TYPE_GEN3_PLL2, CLK_MAIN),
+ DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN3_PLL3, CLK_MAIN),
+ DEF_BASE(".pll4", CLK_PLL4, CLK_TYPE_GEN3_PLL4, CLK_MAIN),
+
+ DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1),
+ DEF_FIXED(".pll1_div4", CLK_PLL1_DIV4, CLK_PLL1_DIV2, 2, 1),
+ DEF_FIXED(".s0", CLK_S0, CLK_PLL1_DIV2, 2, 1),
+ DEF_FIXED(".s1", CLK_S1, CLK_PLL1_DIV2, 3, 1),
+ DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1),
+ DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1),
+
+ /* Core Clock Outputs */
+ DEF_FIXED("ztr", R8A7796_CLK_ZTR, CLK_PLL1_DIV2, 6, 1),
+ DEF_FIXED("ztrd2", R8A7796_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1),
+ DEF_FIXED("zt", R8A7796_CLK_ZT, CLK_PLL1_DIV2, 4, 1),
+ DEF_FIXED("zx", R8A7796_CLK_ZX, CLK_PLL1_DIV2, 2, 1),
+ DEF_FIXED("s0d1", R8A7796_CLK_S0D1, CLK_S0, 1, 1),
+ DEF_FIXED("s0d2", R8A7796_CLK_S0D2, CLK_S0, 2, 1),
+ DEF_FIXED("s0d3", R8A7796_CLK_S0D3, CLK_S0, 3, 1),
+ DEF_FIXED("s0d4", R8A7796_CLK_S0D4, CLK_S0, 4, 1),
+ DEF_FIXED("s0d6", R8A7796_CLK_S0D6, CLK_S0, 6, 1),
+ DEF_FIXED("s0d8", R8A7796_CLK_S0D8, CLK_S0, 8, 1),
+ DEF_FIXED("s0d12", R8A7796_CLK_S0D12, CLK_S0, 12, 1),
+ DEF_FIXED("s1d1", R8A7796_CLK_S1D1, CLK_S1, 1, 1),
+ DEF_FIXED("s1d2", R8A7796_CLK_S1D2, CLK_S1, 2, 1),
+ DEF_FIXED("s1d4", R8A7796_CLK_S1D4, CLK_S1, 4, 1),
+ DEF_FIXED("s2d1", R8A7796_CLK_S2D1, CLK_S2, 1, 1),
+ DEF_FIXED("s2d2", R8A7796_CLK_S2D2, CLK_S2, 2, 1),
+ DEF_FIXED("s2d4", R8A7796_CLK_S2D4, CLK_S2, 4, 1),
+ DEF_FIXED("s3d1", R8A7796_CLK_S3D1, CLK_S3, 1, 1),
+ DEF_FIXED("s3d2", R8A7796_CLK_S3D2, CLK_S3, 2, 1),
+ DEF_FIXED("s3d4", R8A7796_CLK_S3D4, CLK_S3, 4, 1),
+
+ DEF_FIXED("cl", R8A7796_CLK_CL, CLK_PLL1_DIV2, 48, 1),
+ DEF_FIXED("cp", R8A7796_CLK_CP, CLK_EXTAL, 2, 1),
+};
+
+static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = {
+ DEF_MOD("scif2", 310, R8A7796_CLK_S3D4),
+ DEF_MOD("intc-ap", 408, R8A7796_CLK_S3D1),
+};
+
+static const unsigned int r8a7796_crit_mod_clks[] __initconst = {
+ MOD_CLK_ID(408), /* INTC-AP (GIC) */
+};
+
+
+/*
+ * CPG Clock Data
+ */
+
+/*
+ * MD EXTAL PLL0 PLL1 PLL2 PLL3 PLL4
+ * 14 13 19 17 (MHz)
+ *-------------------------------------------------------------------
+ * 0 0 0 0 16.66 x 1 x180 x192 x144 x192 x144
+ * 0 0 0 1 16.66 x 1 x180 x192 x144 x128 x144
+ * 0 0 1 0 Prohibited setting
+ * 0 0 1 1 16.66 x 1 x180 x192 x144 x192 x144
+ * 0 1 0 0 20 x 1 x150 x160 x120 x160 x120
+ * 0 1 0 1 20 x 1 x150 x160 x120 x106 x120
+ * 0 1 1 0 Prohibited setting
+ * 0 1 1 1 20 x 1 x150 x160 x120 x160 x120
+ * 1 0 0 0 25 x 1 x120 x128 x96 x128 x96
+ * 1 0 0 1 25 x 1 x120 x128 x96 x84 x96
+ * 1 0 1 0 Prohibited setting
+ * 1 0 1 1 25 x 1 x120 x128 x96 x128 x96
+ * 1 1 0 0 33.33 / 2 x180 x192 x144 x192 x144
+ * 1 1 0 1 33.33 / 2 x180 x192 x144 x128 x144
+ * 1 1 1 0 Prohibited setting
+ * 1 1 1 1 33.33 / 2 x180 x192 x144 x192 x144
+ */
+#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 11) | \
+ (((md) & BIT(13)) >> 11) | \
+ (((md) & BIT(19)) >> 18) | \
+ (((md) & BIT(17)) >> 17))
+
+static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[16] __initconst = {
+ /* EXTAL div PLL1 mult PLL3 mult */
+ { 1, 192, 192, },
+ { 1, 192, 128, },
+ { 0, /* Prohibited setting */ },
+ { 1, 192, 192, },
+ { 1, 160, 160, },
+ { 1, 160, 106, },
+ { 0, /* Prohibited setting */ },
+ { 1, 160, 160, },
+ { 1, 128, 128, },
+ { 1, 128, 84, },
+ { 0, /* Prohibited setting */ },
+ { 1, 128, 128, },
+ { 2, 192, 192, },
+ { 2, 192, 128, },
+ { 0, /* Prohibited setting */ },
+ { 2, 192, 192, },
+};
+
+static int __init r8a7796_cpg_mssr_init(struct device *dev)
+{
+ const struct rcar_gen3_cpg_pll_config *cpg_pll_config;
+ u32 cpg_mode = rcar_gen3_read_mode_pins();
+
+ cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
+ if (!cpg_pll_config->extal_div) {
+ dev_err(dev, "Prohibited setting (cpg_mode=0x%x)\n", cpg_mode);
+ return -EINVAL;
+ }
+
+ return rcar_gen3_cpg_init(cpg_pll_config, CLK_EXTALR);
+}
+
+const struct cpg_mssr_info r8a7796_cpg_mssr_info __initconst = {
+ /* Core Clocks */
+ .core_clks = r8a7796_core_clks,
+ .num_core_clks = ARRAY_SIZE(r8a7796_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r8a7796_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r8a7796_mod_clks),
+ .num_hw_mod_clks = 12 * 32,
+
+ /* Critical Module Clocks */
+ .crit_mod_clks = r8a7796_crit_mod_clks,
+ .num_crit_mod_clks = ARRAY_SIZE(r8a7796_crit_mod_clks),
+
+ /* Callbacks */
+ .init = r8a7796_cpg_mssr_init,
+ .cpg_clk_register = rcar_gen3_cpg_clk_register,
+};
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c
new file mode 100644
index 000000000..bb4f2f9a8
--- /dev/null
+++ b/drivers/clk/renesas/rcar-gen3-cpg.c
@@ -0,0 +1,359 @@
+/*
+ * R-Car Gen3 Clock Pulse Generator
+ *
+ * Copyright (C) 2015-2016 Glider bvba
+ *
+ * Based on clk-rcar-gen3.c
+ *
+ * Copyright (C) 2015 Renesas Electronics Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/bug.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#include "renesas-cpg-mssr.h"
+#include "rcar-gen3-cpg.h"
+
+#define CPG_PLL0CR 0x00d8
+#define CPG_PLL2CR 0x002c
+#define CPG_PLL4CR 0x01f4
+
+
+/*
+ * SDn Clock
+ */
+#define CPG_SD_STP_HCK BIT(9)
+#define CPG_SD_STP_CK BIT(8)
+
+#define CPG_SD_STP_MASK (CPG_SD_STP_HCK | CPG_SD_STP_CK)
+#define CPG_SD_FC_MASK (0x7 << 2 | 0x3 << 0)
+
+#define CPG_SD_DIV_TABLE_DATA(stp_hck, stp_ck, sd_srcfc, sd_fc, sd_div) \
+{ \
+ .val = ((stp_hck) ? CPG_SD_STP_HCK : 0) | \
+ ((stp_ck) ? CPG_SD_STP_CK : 0) | \
+ ((sd_srcfc) << 2) | \
+ ((sd_fc) << 0), \
+ .div = (sd_div), \
+}
+
+struct sd_div_table {
+ u32 val;
+ unsigned int div;
+};
+
+struct sd_clock {
+ struct clk_hw hw;
+ void __iomem *reg;
+ const struct sd_div_table *div_table;
+ unsigned int div_num;
+ unsigned int div_min;
+ unsigned int div_max;
+};
+
+/* SDn divider
+ * sd_srcfc sd_fc div
+ * stp_hck stp_ck (div) (div) = sd_srcfc x sd_fc
+ *-------------------------------------------------------------------
+ * 0 0 0 (1) 1 (4) 4
+ * 0 0 1 (2) 1 (4) 8
+ * 1 0 2 (4) 1 (4) 16
+ * 1 0 3 (8) 1 (4) 32
+ * 1 0 4 (16) 1 (4) 64
+ * 0 0 0 (1) 0 (2) 2
+ * 0 0 1 (2) 0 (2) 4
+ * 1 0 2 (4) 0 (2) 8
+ * 1 0 3 (8) 0 (2) 16
+ * 1 0 4 (16) 0 (2) 32
+ */
+static const struct sd_div_table cpg_sd_div_table[] = {
+/* CPG_SD_DIV_TABLE_DATA(stp_hck, stp_ck, sd_srcfc, sd_fc, sd_div) */
+ CPG_SD_DIV_TABLE_DATA(0, 0, 0, 1, 4),
+ CPG_SD_DIV_TABLE_DATA(0, 0, 1, 1, 8),
+ CPG_SD_DIV_TABLE_DATA(1, 0, 2, 1, 16),
+ CPG_SD_DIV_TABLE_DATA(1, 0, 3, 1, 32),
+ CPG_SD_DIV_TABLE_DATA(1, 0, 4, 1, 64),
+ CPG_SD_DIV_TABLE_DATA(0, 0, 0, 0, 2),
+ CPG_SD_DIV_TABLE_DATA(0, 0, 1, 0, 4),
+ CPG_SD_DIV_TABLE_DATA(1, 0, 2, 0, 8),
+ CPG_SD_DIV_TABLE_DATA(1, 0, 3, 0, 16),
+ CPG_SD_DIV_TABLE_DATA(1, 0, 4, 0, 32),
+};
+
+#define to_sd_clock(_hw) container_of(_hw, struct sd_clock, hw)
+
+static int cpg_sd_clock_enable(struct clk_hw *hw)
+{
+ struct sd_clock *clock = to_sd_clock(hw);
+ u32 val, sd_fc;
+ unsigned int i;
+
+ val = clk_readl(clock->reg);
+
+ sd_fc = val & CPG_SD_FC_MASK;
+ for (i = 0; i < clock->div_num; i++)
+ if (sd_fc == (clock->div_table[i].val & CPG_SD_FC_MASK))
+ break;
+
+ if (i >= clock->div_num)
+ return -EINVAL;
+
+ val &= ~(CPG_SD_STP_MASK);
+ val |= clock->div_table[i].val & CPG_SD_STP_MASK;
+
+ clk_writel(val, clock->reg);
+
+ return 0;
+}
+
+static void cpg_sd_clock_disable(struct clk_hw *hw)
+{
+ struct sd_clock *clock = to_sd_clock(hw);
+
+ clk_writel(clk_readl(clock->reg) | CPG_SD_STP_MASK, clock->reg);
+}
+
+static int cpg_sd_clock_is_enabled(struct clk_hw *hw)
+{
+ struct sd_clock *clock = to_sd_clock(hw);
+
+ return !(clk_readl(clock->reg) & CPG_SD_STP_MASK);
+}
+
+static unsigned long cpg_sd_clock_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct sd_clock *clock = to_sd_clock(hw);
+ unsigned long rate = parent_rate;
+ u32 val, sd_fc;
+ unsigned int i;
+
+ val = clk_readl(clock->reg);
+
+ sd_fc = val & CPG_SD_FC_MASK;
+ for (i = 0; i < clock->div_num; i++)
+ if (sd_fc == (clock->div_table[i].val & CPG_SD_FC_MASK))
+ break;
+
+ if (i >= clock->div_num)
+ return -EINVAL;
+
+ return DIV_ROUND_CLOSEST(rate, clock->div_table[i].div);
+}
+
+static unsigned int cpg_sd_clock_calc_div(struct sd_clock *clock,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ unsigned int div;
+
+ if (!rate)
+ rate = 1;
+
+ div = DIV_ROUND_CLOSEST(parent_rate, rate);
+
+ return clamp_t(unsigned int, div, clock->div_min, clock->div_max);
+}
+
+static long cpg_sd_clock_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct sd_clock *clock = to_sd_clock(hw);
+ unsigned int div = cpg_sd_clock_calc_div(clock, rate, *parent_rate);
+
+ return DIV_ROUND_CLOSEST(*parent_rate, div);
+}
+
+static int cpg_sd_clock_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct sd_clock *clock = to_sd_clock(hw);
+ unsigned int div = cpg_sd_clock_calc_div(clock, rate, parent_rate);
+ u32 val;
+ unsigned int i;
+
+ for (i = 0; i < clock->div_num; i++)
+ if (div == clock->div_table[i].div)
+ break;
+
+ if (i >= clock->div_num)
+ return -EINVAL;
+
+ val = clk_readl(clock->reg);
+ val &= ~(CPG_SD_STP_MASK | CPG_SD_FC_MASK);
+ val |= clock->div_table[i].val & (CPG_SD_STP_MASK | CPG_SD_FC_MASK);
+ clk_writel(val, clock->reg);
+
+ return 0;
+}
+
+static const struct clk_ops cpg_sd_clock_ops = {
+ .enable = cpg_sd_clock_enable,
+ .disable = cpg_sd_clock_disable,
+ .is_enabled = cpg_sd_clock_is_enabled,
+ .recalc_rate = cpg_sd_clock_recalc_rate,
+ .round_rate = cpg_sd_clock_round_rate,
+ .set_rate = cpg_sd_clock_set_rate,
+};
+
+static struct clk * __init cpg_sd_clk_register(const struct cpg_core_clk *core,
+ void __iomem *base,
+ const char *parent_name)
+{
+ struct clk_init_data init;
+ struct sd_clock *clock;
+ struct clk *clk;
+ unsigned int i;
+
+ clock = kzalloc(sizeof(*clock), GFP_KERNEL);
+ if (!clock)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = core->name;
+ init.ops = &cpg_sd_clock_ops;
+ init.flags = CLK_IS_BASIC | CLK_SET_RATE_PARENT;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ clock->reg = base + core->offset;
+ clock->hw.init = &init;
+ clock->div_table = cpg_sd_div_table;
+ clock->div_num = ARRAY_SIZE(cpg_sd_div_table);
+
+ clock->div_max = clock->div_table[0].div;
+ clock->div_min = clock->div_max;
+ for (i = 1; i < clock->div_num; i++) {
+ clock->div_max = max(clock->div_max, clock->div_table[i].div);
+ clock->div_min = min(clock->div_min, clock->div_table[i].div);
+ }
+
+ clk = clk_register(NULL, &clock->hw);
+ if (IS_ERR(clk))
+ kfree(clock);
+
+ return clk;
+}
+
+
+static const struct rcar_gen3_cpg_pll_config *cpg_pll_config __initdata;
+static unsigned int cpg_clk_extalr __initdata;
+
+struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
+ const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
+ struct clk **clks, void __iomem *base)
+{
+ const struct clk *parent;
+ unsigned int mult = 1;
+ unsigned int div = 1;
+ u32 value;
+
+ parent = clks[core->parent];
+ if (IS_ERR(parent))
+ return ERR_CAST(parent);
+
+ switch (core->type) {
+ case CLK_TYPE_GEN3_MAIN:
+ div = cpg_pll_config->extal_div;
+ break;
+
+ case CLK_TYPE_GEN3_PLL0:
+ /*
+ * PLL0 is a configurable multiplier clock. Register it as a
+ * fixed factor clock for now as there's no generic multiplier
+ * clock implementation and we currently have no need to change
+ * the multiplier value.
+ */
+ value = readl(base + CPG_PLL0CR);
+ mult = (((value >> 24) & 0x7f) + 1) * 2;
+ break;
+
+ case CLK_TYPE_GEN3_PLL1:
+ mult = cpg_pll_config->pll1_mult;
+ break;
+
+ case CLK_TYPE_GEN3_PLL2:
+ /*
+ * PLL2 is a configurable multiplier clock. Register it as a
+ * fixed factor clock for now as there's no generic multiplier
+ * clock implementation and we currently have no need to change
+ * the multiplier value.
+ */
+ value = readl(base + CPG_PLL2CR);
+ mult = (((value >> 24) & 0x7f) + 1) * 2;
+ break;
+
+ case CLK_TYPE_GEN3_PLL3:
+ mult = cpg_pll_config->pll3_mult;
+ break;
+
+ case CLK_TYPE_GEN3_PLL4:
+ /*
+ * PLL4 is a configurable multiplier clock. Register it as a
+ * fixed factor clock for now as there's no generic multiplier
+ * clock implementation and we currently have no need to change
+ * the multiplier value.
+ */
+ value = readl(base + CPG_PLL4CR);
+ mult = (((value >> 24) & 0x7f) + 1) * 2;
+ break;
+
+ case CLK_TYPE_GEN3_SD:
+ return cpg_sd_clk_register(core, base, __clk_get_name(parent));
+
+ case CLK_TYPE_GEN3_R:
+ /*
+ * RINT is default.
+ * Only if EXTALR is populated, we switch to it.
+ */
+ value = readl(base + CPG_RCKCR) & 0x3f;
+
+ if (clk_get_rate(clks[cpg_clk_extalr])) {
+ parent = clks[cpg_clk_extalr];
+ value |= BIT(15);
+ }
+
+ writel(value, base + CPG_RCKCR);
+ break;
+
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ return clk_register_fixed_factor(NULL, core->name,
+ __clk_get_name(parent), 0, mult, div);
+}
+
+/*
+ * Reset register definitions.
+ */
+#define MODEMR 0xe6160060
+
+u32 __init rcar_gen3_read_mode_pins(void)
+{
+ void __iomem *modemr = ioremap_nocache(MODEMR, 4);
+ u32 mode;
+
+ BUG_ON(!modemr);
+ mode = ioread32(modemr);
+ iounmap(modemr);
+
+ return mode;
+}
+
+int __init rcar_gen3_cpg_init(const struct rcar_gen3_cpg_pll_config *config,
+ unsigned int clk_extalr)
+{
+ cpg_pll_config = config;
+ cpg_clk_extalr = clk_extalr;
+ return 0;
+}
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.h b/drivers/clk/renesas/rcar-gen3-cpg.h
new file mode 100644
index 000000000..f69908514
--- /dev/null
+++ b/drivers/clk/renesas/rcar-gen3-cpg.h
@@ -0,0 +1,43 @@
+/*
+ * R-Car Gen3 Clock Pulse Generator
+ *
+ * Copyright (C) 2015-2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#ifndef __CLK_RENESAS_RCAR_GEN3_CPG_H__
+#define __CLK_RENESAS_RCAR_GEN3_CPG_H__
+
+enum rcar_gen3_clk_types {
+ CLK_TYPE_GEN3_MAIN = CLK_TYPE_CUSTOM,
+ CLK_TYPE_GEN3_PLL0,
+ CLK_TYPE_GEN3_PLL1,
+ CLK_TYPE_GEN3_PLL2,
+ CLK_TYPE_GEN3_PLL3,
+ CLK_TYPE_GEN3_PLL4,
+ CLK_TYPE_GEN3_SD,
+ CLK_TYPE_GEN3_R,
+};
+
+#define DEF_GEN3_SD(_name, _id, _parent, _offset) \
+ DEF_BASE(_name, _id, CLK_TYPE_GEN3_SD, _parent, .offset = _offset)
+
+struct rcar_gen3_cpg_pll_config {
+ unsigned int extal_div;
+ unsigned int pll1_mult;
+ unsigned int pll3_mult;
+};
+
+#define CPG_RCKCR 0x240
+
+u32 rcar_gen3_read_mode_pins(void);
+struct clk *rcar_gen3_cpg_clk_register(struct device *dev,
+ const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
+ struct clk **clks, void __iomem *base);
+int rcar_gen3_cpg_init(const struct rcar_gen3_cpg_pll_config *config,
+ unsigned int clk_extalr);
+
+#endif
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index 210cd744a..e1365e749 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -509,6 +509,12 @@ static const struct of_device_id cpg_mssr_match[] = {
.data = &r8a7795_cpg_mssr_info,
},
#endif
+#ifdef CONFIG_ARCH_R8A7796
+ {
+ .compatible = "renesas,r8a7796-cpg-mssr",
+ .data = &r8a7796_cpg_mssr_info,
+ },
+#endif
{ /* sentinel */ }
};
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.h b/drivers/clk/renesas/renesas-cpg-mssr.h
index 0d1e3e811..ee7edfaf1 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.h
+++ b/drivers/clk/renesas/renesas-cpg-mssr.h
@@ -131,4 +131,5 @@ struct cpg_mssr_info {
};
extern const struct cpg_mssr_info r8a7795_cpg_mssr_info;
+extern const struct cpg_mssr_info r8a7796_cpg_mssr_info;
#endif
diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c
index 016bdb0b7..db6e5a9e6 100644
--- a/drivers/clk/rockchip/clk-rk3228.c
+++ b/drivers/clk/rockchip/clk-rk3228.c
@@ -151,8 +151,8 @@ PNAME(mux_uart0_p) = { "uart0_src", "uart0_frac", "xin24m" };
PNAME(mux_uart1_p) = { "uart1_src", "uart1_frac", "xin24m" };
PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" };
-PNAME(mux_sclk_macphy_50m_p) = { "ext_gmac", "phy_50m_out" };
-PNAME(mux_sclk_gmac_pre_p) = { "sclk_gmac_src", "sclk_macphy_50m" };
+PNAME(mux_sclk_mac_extclk_p) = { "ext_gmac", "phy_50m_out" };
+PNAME(mux_sclk_gmac_pre_p) = { "sclk_gmac_src", "sclk_mac_extclk" };
PNAME(mux_sclk_macphy_p) = { "sclk_gmac_src", "ext_gmac" };
static struct rockchip_pll_clock rk3228_pll_clks[] __initdata = {
@@ -170,6 +170,34 @@ static struct rockchip_pll_clock rk3228_pll_clks[] __initdata = {
#define DFLAGS CLK_DIVIDER_HIWORD_MASK
#define GFLAGS (CLK_GATE_HIWORD_MASK | CLK_GATE_SET_TO_DISABLE)
+static struct rockchip_clk_branch rk3228_i2s0_fracmux __initdata =
+ MUX(0, "i2s0_pre", mux_i2s0_p, CLK_SET_RATE_PARENT,
+ RK2928_CLKSEL_CON(9), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3228_i2s1_fracmux __initdata =
+ MUX(0, "i2s1_pre", mux_i2s1_pre_p, CLK_SET_RATE_PARENT,
+ RK2928_CLKSEL_CON(3), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3228_i2s2_fracmux __initdata =
+ MUX(0, "i2s2_pre", mux_i2s2_p, CLK_SET_RATE_PARENT,
+ RK2928_CLKSEL_CON(16), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3228_spdif_fracmux __initdata =
+ MUX(SCLK_SPDIF, "sclk_spdif", mux_sclk_spdif_p, CLK_SET_RATE_PARENT,
+ RK2928_CLKSEL_CON(6), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3228_uart0_fracmux __initdata =
+ MUX(SCLK_UART0, "sclk_uart0", mux_uart0_p, CLK_SET_RATE_PARENT,
+ RK2928_CLKSEL_CON(13), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3228_uart1_fracmux __initdata =
+ MUX(SCLK_UART1, "sclk_uart1", mux_uart1_p, CLK_SET_RATE_PARENT,
+ RK2928_CLKSEL_CON(14), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3228_uart2_fracmux __initdata =
+ MUX(SCLK_UART2, "sclk_uart2", mux_uart2_p, CLK_SET_RATE_PARENT,
+ RK2928_CLKSEL_CON(15), 8, 2, MFLAGS);
+
static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
/*
* Clock-Architecture Diagram 1
@@ -335,7 +363,7 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
RK2928_CLKGATE_CON(2), 6, GFLAGS),
GATE(0, "sclk_hsadc", "ext_hsadc", 0,
- RK3288_CLKGATE_CON(10), 12, GFLAGS),
+ RK2928_CLKGATE_CON(10), 12, GFLAGS),
COMPOSITE(0, "sclk_wifi", mux_pll_src_cpll_gpll_usb480m_p, 0,
RK2928_CLKSEL_CON(23), 5, 2, MFLAGS, 0, 6, DFLAGS,
@@ -379,22 +407,21 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
COMPOSITE(0, "i2s0_src", mux_pll_src_2plls_p, 0,
RK2928_CLKSEL_CON(9), 15, 1, MFLAGS, 0, 7, DFLAGS,
RK2928_CLKGATE_CON(0), 3, GFLAGS),
- COMPOSITE_FRAC(0, "i2s0_frac", "i2s0_src", CLK_SET_RATE_PARENT,
- RK3288_CLKSEL_CON(8), 0,
- RK3288_CLKGATE_CON(0), 4, GFLAGS),
- COMPOSITE_NODIV(SCLK_I2S0, "sclk_i2s0", mux_i2s0_p, 0,
- RK2928_CLKSEL_CON(9), 8, 2, MFLAGS,
+ COMPOSITE_FRACMUX(0, "i2s0_frac", "i2s0_src", CLK_SET_RATE_PARENT,
+ RK2928_CLKSEL_CON(8), 0,
+ RK2928_CLKGATE_CON(0), 4, GFLAGS,
+ &rk3228_i2s0_fracmux),
+ GATE(SCLK_I2S0, "sclk_i2s0", "i2s0_pre", CLK_SET_RATE_PARENT,
RK2928_CLKGATE_CON(0), 5, GFLAGS),
COMPOSITE(0, "i2s1_src", mux_pll_src_2plls_p, 0,
RK2928_CLKSEL_CON(3), 15, 1, MFLAGS, 0, 7, DFLAGS,
RK2928_CLKGATE_CON(0), 10, GFLAGS),
- COMPOSITE_FRAC(0, "i2s1_frac", "i2s1_src", CLK_SET_RATE_PARENT,
- RK3288_CLKSEL_CON(7), 0,
- RK3288_CLKGATE_CON(0), 11, GFLAGS),
- MUX(0, "i2s1_pre", mux_i2s1_pre_p, 0,
- RK2928_CLKSEL_CON(3), 8, 2, MFLAGS),
- GATE(SCLK_I2S1, "sclk_i2s1", "i2s1_pre", 0,
+ COMPOSITE_FRACMUX(0, "i2s1_frac", "i2s1_src", CLK_SET_RATE_PARENT,
+ RK2928_CLKSEL_CON(7), 0,
+ RK2928_CLKGATE_CON(0), 11, GFLAGS,
+ &rk3228_i2s1_fracmux),
+ GATE(SCLK_I2S1, "sclk_i2s1", "i2s1_pre", CLK_SET_RATE_PARENT,
RK2928_CLKGATE_CON(0), 14, GFLAGS),
COMPOSITE_NODIV(SCLK_I2S_OUT, "i2s_out", mux_i2s_out_p, 0,
RK2928_CLKSEL_CON(3), 12, 1, MFLAGS,
@@ -403,21 +430,20 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
COMPOSITE(0, "i2s2_src", mux_pll_src_2plls_p, 0,
RK2928_CLKSEL_CON(16), 15, 1, MFLAGS, 0, 7, DFLAGS,
RK2928_CLKGATE_CON(0), 7, GFLAGS),
- COMPOSITE_FRAC(0, "i2s2_frac", "i2s2_src", CLK_SET_RATE_PARENT,
- RK3288_CLKSEL_CON(30), 0,
- RK3288_CLKGATE_CON(0), 8, GFLAGS),
- COMPOSITE_NODIV(SCLK_I2S2, "sclk_i2s2", mux_i2s2_p, 0,
- RK2928_CLKSEL_CON(16), 8, 2, MFLAGS,
+ COMPOSITE_FRACMUX(0, "i2s2_frac", "i2s2_src", CLK_SET_RATE_PARENT,
+ RK2928_CLKSEL_CON(30), 0,
+ RK2928_CLKGATE_CON(0), 8, GFLAGS,
+ &rk3228_i2s2_fracmux),
+ GATE(SCLK_I2S2, "sclk_i2s2", "i2s2_pre", CLK_SET_RATE_PARENT,
RK2928_CLKGATE_CON(0), 9, GFLAGS),
COMPOSITE(0, "sclk_spdif_src", mux_pll_src_2plls_p, 0,
RK2928_CLKSEL_CON(6), 15, 1, MFLAGS, 0, 7, DFLAGS,
RK2928_CLKGATE_CON(2), 10, GFLAGS),
- COMPOSITE_FRAC(0, "spdif_frac", "sclk_spdif_src", CLK_SET_RATE_PARENT,
- RK3288_CLKSEL_CON(20), 0,
- RK3288_CLKGATE_CON(2), 12, GFLAGS),
- MUX(SCLK_SPDIF, "sclk_spdif", mux_sclk_spdif_p, 0,
- RK2928_CLKSEL_CON(6), 8, 2, MFLAGS),
+ COMPOSITE_FRACMUX(0, "spdif_frac", "sclk_spdif_src", CLK_SET_RATE_PARENT,
+ RK2928_CLKSEL_CON(20), 0,
+ RK2928_CLKGATE_CON(2), 12, GFLAGS,
+ &rk3228_spdif_fracmux),
GATE(0, "jtag", "ext_jtag", 0,
RK2928_CLKGATE_CON(1), 3, GFLAGS),
@@ -456,45 +482,42 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
COMPOSITE(0, "uart2_src", mux_pll_src_cpll_gpll_usb480m_p,
0, RK2928_CLKSEL_CON(15), 12, 2,
MFLAGS, 0, 7, DFLAGS, RK2928_CLKGATE_CON(1), 12, GFLAGS),
- COMPOSITE_FRAC(0, "uart0_frac", "uart0_src", CLK_SET_RATE_PARENT,
+ COMPOSITE_FRACMUX(0, "uart0_frac", "uart0_src", CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(17), 0,
- RK2928_CLKGATE_CON(1), 9, GFLAGS),
- COMPOSITE_FRAC(0, "uart1_frac", "uart1_src", CLK_SET_RATE_PARENT,
+ RK2928_CLKGATE_CON(1), 9, GFLAGS,
+ &rk3228_uart0_fracmux),
+ COMPOSITE_FRACMUX(0, "uart1_frac", "uart1_src", CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(18), 0,
- RK2928_CLKGATE_CON(1), 11, GFLAGS),
- COMPOSITE_FRAC(0, "uart2_frac", "uart2_src", CLK_SET_RATE_PARENT,
+ RK2928_CLKGATE_CON(1), 11, GFLAGS,
+ &rk3228_uart1_fracmux),
+ COMPOSITE_FRACMUX(0, "uart2_frac", "uart2_src", CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(19), 0,
- RK2928_CLKGATE_CON(1), 13, GFLAGS),
- MUX(SCLK_UART0, "sclk_uart0", mux_uart0_p, CLK_SET_RATE_PARENT,
- RK2928_CLKSEL_CON(13), 8, 2, MFLAGS),
- MUX(SCLK_UART1, "sclk_uart1", mux_uart1_p, CLK_SET_RATE_PARENT,
- RK2928_CLKSEL_CON(14), 8, 2, MFLAGS),
- MUX(SCLK_UART2, "sclk_uart2", mux_uart2_p, CLK_SET_RATE_PARENT,
- RK2928_CLKSEL_CON(15), 8, 2, MFLAGS),
+ RK2928_CLKGATE_CON(1), 13, GFLAGS,
+ &rk3228_uart2_fracmux),
COMPOSITE(SCLK_NANDC, "sclk_nandc", mux_pll_src_2plls_p, 0,
RK2928_CLKSEL_CON(2), 14, 1, MFLAGS, 8, 5, DFLAGS,
RK2928_CLKGATE_CON(1), 0, GFLAGS),
- COMPOSITE(0, "sclk_gmac_src", mux_pll_src_2plls_p, 0,
+ COMPOSITE(SCLK_MAC_SRC, "sclk_gmac_src", mux_pll_src_2plls_p, 0,
RK2928_CLKSEL_CON(5), 7, 1, MFLAGS, 0, 5, DFLAGS,
RK2928_CLKGATE_CON(1), 7, GFLAGS),
- MUX(0, "sclk_macphy_50m", mux_sclk_macphy_50m_p, 0,
+ MUX(SCLK_MAC_EXTCLK, "sclk_mac_extclk", mux_sclk_mac_extclk_p, 0,
RK2928_CLKSEL_CON(29), 10, 1, MFLAGS),
- MUX(0, "sclk_gmac_pre", mux_sclk_gmac_pre_p, 0,
+ MUX(SCLK_MAC, "sclk_gmac_pre", mux_sclk_gmac_pre_p, 0,
RK2928_CLKSEL_CON(5), 5, 1, MFLAGS),
- GATE(0, "sclk_mac_refout", "sclk_gmac_pre", 0,
+ GATE(SCLK_MAC_REFOUT, "sclk_mac_refout", "sclk_gmac_pre", 0,
RK2928_CLKGATE_CON(5), 4, GFLAGS),
- GATE(0, "sclk_mac_ref", "sclk_gmac_pre", 0,
+ GATE(SCLK_MAC_REF, "sclk_mac_ref", "sclk_gmac_pre", 0,
RK2928_CLKGATE_CON(5), 3, GFLAGS),
- GATE(0, "sclk_mac_rx", "sclk_gmac_pre", 0,
+ GATE(SCLK_MAC_RX, "sclk_mac_rx", "sclk_gmac_pre", 0,
RK2928_CLKGATE_CON(5), 5, GFLAGS),
- GATE(0, "sclk_mac_tx", "sclk_gmac_pre", 0,
+ GATE(SCLK_MAC_TX, "sclk_mac_tx", "sclk_gmac_pre", 0,
RK2928_CLKGATE_CON(5), 6, GFLAGS),
- COMPOSITE(0, "sclk_macphy", mux_sclk_macphy_p, 0,
+ COMPOSITE(SCLK_MAC_PHY, "sclk_macphy", mux_sclk_macphy_p, 0,
RK2928_CLKSEL_CON(29), 12, 1, MFLAGS, 8, 2, DFLAGS,
RK2928_CLKGATE_CON(5), 7, GFLAGS),
- COMPOSITE(0, "sclk_gmac_out", mux_pll_src_2plls_p, 0,
+ COMPOSITE(SCLK_MAC_OUT, "sclk_gmac_out", mux_pll_src_2plls_p, 0,
RK2928_CLKSEL_CON(5), 15, 1, MFLAGS, 8, 5, DFLAGS,
RK2928_CLKGATE_CON(2), 2, GFLAGS),
@@ -528,7 +551,7 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
/* PD_PERI */
GATE(0, "aclk_peri_noc", "aclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(12), 0, GFLAGS),
- GATE(0, "aclk_gmac", "aclk_peri", 0, RK2928_CLKGATE_CON(11), 4, GFLAGS),
+ GATE(ACLK_GMAC, "aclk_gmac", "aclk_peri", 0, RK2928_CLKGATE_CON(11), 4, GFLAGS),
GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_peri", 0, RK2928_CLKGATE_CON(11), 0, GFLAGS),
GATE(HCLK_SDIO, "hclk_sdio", "hclk_peri", 0, RK2928_CLKGATE_CON(11), 1, GFLAGS),
@@ -544,7 +567,7 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
GATE(0, "hclk_host2_arb", "hclk_peri", 0, RK2928_CLKGATE_CON(11), 14, GFLAGS),
GATE(0, "hclk_peri_noc", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(12), 1, GFLAGS),
- GATE(0, "pclk_gmac", "pclk_peri", 0, RK2928_CLKGATE_CON(11), 5, GFLAGS),
+ GATE(PCLK_GMAC, "pclk_gmac", "pclk_peri", 0, RK2928_CLKGATE_CON(11), 5, GFLAGS),
GATE(0, "pclk_peri_noc", "pclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(12), 2, GFLAGS),
/* PD_GPU */
@@ -558,10 +581,10 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
GATE(0, "aclk_bus_noc", "aclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(10), 1, GFLAGS),
GATE(0, "hclk_rom", "hclk_cpu", 0, RK2928_CLKGATE_CON(8), 3, GFLAGS),
- GATE(0, "hclk_i2s0_8ch", "hclk_cpu", 0, RK2928_CLKGATE_CON(8), 7, GFLAGS),
- GATE(0, "hclk_i2s1_8ch", "hclk_cpu", 0, RK2928_CLKGATE_CON(8), 8, GFLAGS),
- GATE(0, "hclk_i2s2_2ch", "hclk_cpu", 0, RK2928_CLKGATE_CON(8), 9, GFLAGS),
- GATE(0, "hclk_spdif_8ch", "hclk_cpu", 0, RK2928_CLKGATE_CON(8), 10, GFLAGS),
+ GATE(HCLK_I2S0_8CH, "hclk_i2s0_8ch", "hclk_cpu", 0, RK2928_CLKGATE_CON(8), 7, GFLAGS),
+ GATE(HCLK_I2S1_8CH, "hclk_i2s1_8ch", "hclk_cpu", 0, RK2928_CLKGATE_CON(8), 8, GFLAGS),
+ GATE(HCLK_I2S2_2CH, "hclk_i2s2_2ch", "hclk_cpu", 0, RK2928_CLKGATE_CON(8), 9, GFLAGS),
+ GATE(HCLK_SPDIF_8CH, "hclk_spdif_8ch", "hclk_cpu", 0, RK2928_CLKGATE_CON(8), 10, GFLAGS),
GATE(0, "hclk_tsp", "hclk_cpu", 0, RK2928_CLKGATE_CON(10), 11, GFLAGS),
GATE(0, "hclk_crypto_mst", "hclk_cpu", 0, RK2928_CLKGATE_CON(8), 11, GFLAGS),
GATE(0, "hclk_crypto_slv", "hclk_cpu", 0, RK2928_CLKGATE_CON(8), 12, GFLAGS),
diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
index 31b77f713..cdfabeb9a 100644
--- a/drivers/clk/rockchip/clk-rk3399.c
+++ b/drivers/clk/rockchip/clk-rk3399.c
@@ -833,9 +833,9 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
/* perihp */
GATE(0, "cpll_aclk_perihp_src", "cpll", CLK_IGNORE_UNUSED,
- RK3399_CLKGATE_CON(5), 0, GFLAGS),
- GATE(0, "gpll_aclk_perihp_src", "gpll", CLK_IGNORE_UNUSED,
RK3399_CLKGATE_CON(5), 1, GFLAGS),
+ GATE(0, "gpll_aclk_perihp_src", "gpll", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(5), 0, GFLAGS),
COMPOSITE(ACLK_PERIHP, "aclk_perihp", mux_aclk_perihp_p, CLK_IGNORE_UNUSED,
RK3399_CLKSEL_CON(14), 7, 1, MFLAGS, 0, 5, DFLAGS,
RK3399_CLKGATE_CON(5), 2, GFLAGS),
@@ -923,9 +923,9 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
RK3399_CLKGATE_CON(6), 14, GFLAGS),
GATE(0, "cpll_aclk_emmc_src", "cpll", CLK_IGNORE_UNUSED,
- RK3399_CLKGATE_CON(6), 12, GFLAGS),
- GATE(0, "gpll_aclk_emmc_src", "gpll", CLK_IGNORE_UNUSED,
RK3399_CLKGATE_CON(6), 13, GFLAGS),
+ GATE(0, "gpll_aclk_emmc_src", "gpll", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(6), 12, GFLAGS),
COMPOSITE_NOGATE(ACLK_EMMC, "aclk_emmc", mux_aclk_emmc_p, CLK_IGNORE_UNUSED,
RK3399_CLKSEL_CON(21), 7, 1, MFLAGS, 0, 5, DFLAGS),
GATE(ACLK_EMMC_CORE, "aclk_emmccore", "aclk_emmc", CLK_IGNORE_UNUSED,
@@ -1071,7 +1071,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
/* vio */
COMPOSITE(ACLK_VIO, "aclk_vio", mux_pll_src_cpll_gpll_ppll_p, CLK_IGNORE_UNUSED,
RK3399_CLKSEL_CON(42), 6, 2, MFLAGS, 0, 5, DFLAGS,
- RK3399_CLKGATE_CON(11), 10, GFLAGS),
+ RK3399_CLKGATE_CON(11), 0, GFLAGS),
COMPOSITE_NOMUX(PCLK_VIO, "pclk_vio", "aclk_vio", 0,
RK3399_CLKSEL_CON(43), 0, 5, DFLAGS,
RK3399_CLKGATE_CON(11), 1, GFLAGS),
@@ -1484,6 +1484,7 @@ static const char *const rk3399_cru_critical_clocks[] __initconst = {
"hclk_perilp1",
"hclk_perilp1_noc",
"aclk_dmac0_perilp",
+ "aclk_emmc_noc",
"gpll_hclk_perilp1_src",
"gpll_aclk_perilp0_src",
"gpll_aclk_perihp_src",
@@ -1500,6 +1501,7 @@ static void __init rk3399_clk_init(struct device_node *np)
{
struct rockchip_clk_provider *ctx;
void __iomem *reg_base;
+ struct clk *clk;
reg_base = of_iomap(np, 0);
if (!reg_base) {
@@ -1514,6 +1516,14 @@ static void __init rk3399_clk_init(struct device_node *np)
return;
}
+ /* Watchdog pclk is controlled by RK3399 SECURE_GRF_SOC_CON3[8]. */
+ clk = clk_register_fixed_factor(NULL, "pclk_wdt", "pclk_alive", 0, 1, 1);
+ if (IS_ERR(clk))
+ pr_warn("%s: could not register clock pclk_wdt: %ld\n",
+ __func__, PTR_ERR(clk));
+ else
+ rockchip_clk_add_lookup(ctx, clk, PCLK_WDT);
+
rockchip_clk_register_plls(ctx, rk3399_pll_clks,
ARRAY_SIZE(rk3399_pll_clks), -1);
diff --git a/drivers/clk/samsung/Kconfig b/drivers/clk/samsung/Kconfig
index 20c5fe92a..addc65270 100644
--- a/drivers/clk/samsung/Kconfig
+++ b/drivers/clk/samsung/Kconfig
@@ -9,6 +9,15 @@ config EXYNOS_ARM64_COMMON_CLK
bool "Samsung Exynos ARMv8-family clock controller support" if COMPILE_TEST
depends on COMMON_CLK_SAMSUNG
+config EXYNOS_AUDSS_CLK_CON
+ tristate "Samsung Exynos AUDSS clock controller support"
+ depends on COMMON_CLK_SAMSUNG
+ default y if ARCH_EXYNOS
+ help
+ Support for the Audio Subsystem CLKCON clock controller present
+ on some Exynos SoC variants. Choose M or Y here if you want to
+ use audio devices such as I2S, PCM, etc.
+
# For S3C24XX platforms, select following symbols:
config S3C2410_COMMON_CLK
bool "Samsung S3C2410 clock controller support" if COMPILE_TEST
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index fc367d4b2..57f4dc6dc 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -12,7 +12,7 @@ obj-$(CONFIG_SOC_EXYNOS5410) += clk-exynos5410.o
obj-$(CONFIG_SOC_EXYNOS5420) += clk-exynos5420.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos5433.o
obj-$(CONFIG_SOC_EXYNOS5440) += clk-exynos5440.o
-obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos-audss.o
+obj-$(CONFIG_EXYNOS_AUDSS_CLK_CON) += clk-exynos-audss.o
obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos-clkout.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7.o
obj-$(CONFIG_S3C2410_COMMON_CLK)+= clk-s3c2410.o
diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c
index 813003d6c..8bf7e805f 100644
--- a/drivers/clk/samsung/clk-cpu.c
+++ b/drivers/clk/samsung/clk-cpu.c
@@ -45,6 +45,13 @@
#define E4210_DIV_STAT_CPU0 0x400
#define E4210_DIV_STAT_CPU1 0x404
+#define E5433_MUX_SEL2 0x008
+#define E5433_MUX_STAT2 0x208
+#define E5433_DIV_CPU0 0x400
+#define E5433_DIV_CPU1 0x404
+#define E5433_DIV_STAT_CPU0 0x500
+#define E5433_DIV_STAT_CPU1 0x504
+
#define E4210_DIV0_RATIO0_MASK 0x7
#define E4210_DIV1_HPM_MASK (0x7 << 4)
#define E4210_DIV1_COPY_MASK (0x7 << 0)
@@ -253,6 +260,102 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
}
/*
+ * Helper function to set the 'safe' dividers for the CPU clock. The parameters
+ * div and mask contain the divider value and the register bit mask of the
+ * dividers to be programmed.
+ */
+static void exynos5433_set_safe_div(void __iomem *base, unsigned long div,
+ unsigned long mask)
+{
+ unsigned long div0;
+
+ div0 = readl(base + E5433_DIV_CPU0);
+ div0 = (div0 & ~mask) | (div & mask);
+ writel(div0, base + E5433_DIV_CPU0);
+ wait_until_divider_stable(base + E5433_DIV_STAT_CPU0, mask);
+}
+
+/* handler for pre-rate change notification from parent clock */
+static int exynos5433_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
+ struct exynos_cpuclk *cpuclk, void __iomem *base)
+{
+ const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
+ unsigned long alt_prate = clk_get_rate(cpuclk->alt_parent);
+ unsigned long alt_div = 0, alt_div_mask = DIV_MASK;
+ unsigned long div0, div1 = 0, mux_reg;
+ unsigned long flags;
+
+ /* find out the divider values to use for clock data */
+ while ((cfg_data->prate * 1000) != ndata->new_rate) {
+ if (cfg_data->prate == 0)
+ return -EINVAL;
+ cfg_data++;
+ }
+
+ spin_lock_irqsave(cpuclk->lock, flags);
+
+ /*
+ * For the selected PLL clock frequency, get the pre-defined divider
+ * values.
+ */
+ div0 = cfg_data->div0;
+ div1 = cfg_data->div1;
+
+ /*
+ * If the old parent clock speed is less than the clock speed of
+ * the alternate parent, then it should be ensured that at no point
+ * the armclk speed is more than the old_prate until the dividers are
+ * set. Also workaround the issue of the dividers being set to lower
+ * values before the parent clock speed is set to new lower speed
+ * (this can result in too high speed of armclk output clocks).
+ */
+ if (alt_prate > ndata->old_rate || ndata->old_rate > ndata->new_rate) {
+ unsigned long tmp_rate = min(ndata->old_rate, ndata->new_rate);
+
+ alt_div = DIV_ROUND_UP(alt_prate, tmp_rate) - 1;
+ WARN_ON(alt_div >= MAX_DIV);
+
+ exynos5433_set_safe_div(base, alt_div, alt_div_mask);
+ div0 |= alt_div;
+ }
+
+ /* select the alternate parent */
+ mux_reg = readl(base + E5433_MUX_SEL2);
+ writel(mux_reg | 1, base + E5433_MUX_SEL2);
+ wait_until_mux_stable(base + E5433_MUX_STAT2, 0, 2);
+
+ /* alternate parent is active now. set the dividers */
+ writel(div0, base + E5433_DIV_CPU0);
+ wait_until_divider_stable(base + E5433_DIV_STAT_CPU0, DIV_MASK_ALL);
+
+ writel(div1, base + E5433_DIV_CPU1);
+ wait_until_divider_stable(base + E5433_DIV_STAT_CPU1, DIV_MASK_ALL);
+
+ spin_unlock_irqrestore(cpuclk->lock, flags);
+ return 0;
+}
+
+/* handler for post-rate change notification from parent clock */
+static int exynos5433_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
+ struct exynos_cpuclk *cpuclk, void __iomem *base)
+{
+ unsigned long div = 0, div_mask = DIV_MASK;
+ unsigned long mux_reg;
+ unsigned long flags;
+
+ spin_lock_irqsave(cpuclk->lock, flags);
+
+ /* select apll as the alternate parent */
+ mux_reg = readl(base + E5433_MUX_SEL2);
+ writel(mux_reg & ~1, base + E5433_MUX_SEL2);
+ wait_until_mux_stable(base + E5433_MUX_STAT2, 0, 1);
+
+ exynos5433_set_safe_div(base, div, div_mask);
+ spin_unlock_irqrestore(cpuclk->lock, flags);
+ return 0;
+}
+
+/*
* This notifier function is called for the pre-rate and post-rate change
* notifications of the parent clock of cpuclk.
*/
@@ -275,6 +378,29 @@ static int exynos_cpuclk_notifier_cb(struct notifier_block *nb,
return notifier_from_errno(err);
}
+/*
+ * This notifier function is called for the pre-rate and post-rate change
+ * notifications of the parent clock of cpuclk.
+ */
+static int exynos5433_cpuclk_notifier_cb(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct clk_notifier_data *ndata = data;
+ struct exynos_cpuclk *cpuclk;
+ void __iomem *base;
+ int err = 0;
+
+ cpuclk = container_of(nb, struct exynos_cpuclk, clk_nb);
+ base = cpuclk->ctrl_base;
+
+ if (event == PRE_RATE_CHANGE)
+ err = exynos5433_cpuclk_pre_rate_change(ndata, cpuclk, base);
+ else if (event == POST_RATE_CHANGE)
+ err = exynos5433_cpuclk_post_rate_change(ndata, cpuclk, base);
+
+ return notifier_from_errno(err);
+}
+
/* helper function to register a CPU clock */
int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx,
unsigned int lookup_id, const char *name, const char *parent,
@@ -301,7 +427,10 @@ int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx,
cpuclk->ctrl_base = ctx->reg_base + offset;
cpuclk->lock = &ctx->lock;
cpuclk->flags = flags;
- cpuclk->clk_nb.notifier_call = exynos_cpuclk_notifier_cb;
+ if (flags & CLK_CPU_HAS_E5433_REGS_LAYOUT)
+ cpuclk->clk_nb.notifier_call = exynos5433_cpuclk_notifier_cb;
+ else
+ cpuclk->clk_nb.notifier_call = exynos_cpuclk_notifier_cb;
cpuclk->alt_parent = __clk_lookup(alt_parent);
if (!cpuclk->alt_parent) {
diff --git a/drivers/clk/samsung/clk-cpu.h b/drivers/clk/samsung/clk-cpu.h
index 37874d3c3..d4b6b517f 100644
--- a/drivers/clk/samsung/clk-cpu.h
+++ b/drivers/clk/samsung/clk-cpu.h
@@ -57,10 +57,12 @@ struct exynos_cpuclk {
struct notifier_block clk_nb;
unsigned long flags;
-/* The CPU clock registers has DIV1 configuration register */
+/* The CPU clock registers have DIV1 configuration register */
#define CLK_CPU_HAS_DIV1 (1 << 0)
/* When ALT parent is active, debug clocks need safe divider values */
#define CLK_CPU_NEEDS_DEBUG_ALT_DIV (1 << 1)
+/* The CPU clock registers have Exynos5433-compatible layout */
+#define CLK_CPU_HAS_E5433_REGS_LAYOUT (1 << 2)
};
extern int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx,
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index 4e9584d79..bdf8b971f 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -273,17 +273,7 @@ static struct platform_driver exynos_audss_clk_driver = {
.remove = exynos_audss_clk_remove,
};
-static int __init exynos_audss_clk_init(void)
-{
- return platform_driver_register(&exynos_audss_clk_driver);
-}
-core_initcall(exynos_audss_clk_init);
-
-static void __exit exynos_audss_clk_exit(void)
-{
- platform_driver_unregister(&exynos_audss_clk_driver);
-}
-module_exit(exynos_audss_clk_exit);
+module_platform_driver(exynos_audss_clk_driver);
MODULE_AUTHOR("Padmavathi Venna <padma.v@samsung.com>");
MODULE_DESCRIPTION("Exynos Audio Subsystem Clock Controller");
diff --git a/drivers/clk/samsung/clk-exynos-clkout.c b/drivers/clk/samsung/clk-exynos-clkout.c
index 7cd02ff37..96fab6cfb 100644
--- a/drivers/clk/samsung/clk-exynos-clkout.c
+++ b/drivers/clk/samsung/clk-exynos-clkout.c
@@ -151,6 +151,8 @@ static void __init exynos5_clkout_init(struct device_node *node)
}
CLK_OF_DECLARE(exynos5250_clkout, "samsung,exynos5250-pmu",
exynos5_clkout_init);
+CLK_OF_DECLARE(exynos5410_clkout, "samsung,exynos5410-pmu",
+ exynos5_clkout_init);
CLK_OF_DECLARE(exynos5420_clkout, "samsung,exynos5420-pmu",
exynos5_clkout_init);
CLK_OF_DECLARE(exynos5433_clkout, "samsung,exynos5433-pmu",
diff --git a/drivers/clk/samsung/clk-exynos3250.c b/drivers/clk/samsung/clk-exynos3250.c
index 16575ee87..1b81e283f 100644
--- a/drivers/clk/samsung/clk-exynos3250.c
+++ b/drivers/clk/samsung/clk-exynos3250.c
@@ -103,7 +103,7 @@
#define PWR_CTRL1_USE_CORE1_WFI (1 << 1)
#define PWR_CTRL1_USE_CORE0_WFI (1 << 0)
-static unsigned long exynos3250_cmu_clk_regs[] __initdata = {
+static const unsigned long exynos3250_cmu_clk_regs[] __initconst = {
SRC_LEFTBUS,
DIV_LEFTBUS,
GATE_IP_LEFTBUS,
@@ -226,7 +226,7 @@ PNAME(group_sclk_fimd0_p) = { "xxti", "xusbxti",
PNAME(mout_mfc_p) = { "mout_mfc_0", "mout_mfc_1" };
PNAME(mout_g3d_p) = { "mout_g3d_0", "mout_g3d_1" };
-static struct samsung_fixed_factor_clock fixed_factor_clks[] __initdata = {
+static const struct samsung_fixed_factor_clock fixed_factor_clks[] __initconst = {
FFACTOR(0, "sclk_mpll_1600", "mout_mpll", 1, 1, 0),
FFACTOR(0, "sclk_mpll_mif", "mout_mpll", 1, 2, 0),
FFACTOR(0, "sclk_bpll", "fout_bpll", 1, 2, 0),
@@ -237,7 +237,7 @@ static struct samsung_fixed_factor_clock fixed_factor_clks[] __initdata = {
FFACTOR(CLK_FIN_PLL, "fin_pll", "xusbxti", 1, 1, 0),
};
-static struct samsung_mux_clock mux_clks[] __initdata = {
+static const struct samsung_mux_clock mux_clks[] __initconst = {
/*
* NOTE: Following table is sorted by register address in ascending
* order and then bitfield shift in descending order, as it is done
@@ -326,7 +326,7 @@ static struct samsung_mux_clock mux_clks[] __initdata = {
CLK_SET_RATE_PARENT, 0),
};
-static struct samsung_div_clock div_clks[] __initdata = {
+static const struct samsung_div_clock div_clks[] __initconst = {
/*
* NOTE: Following table is sorted by register address in ascending
* order and then bitfield shift in descending order, as it is done
@@ -429,7 +429,7 @@ static struct samsung_div_clock div_clks[] __initdata = {
DIV(CLK_DIV_COPY, "div_copy", "mout_hpm", DIV_CPU1, 0, 3),
};
-static struct samsung_gate_clock gate_clks[] __initdata = {
+static const struct samsung_gate_clock gate_clks[] __initconst = {
/*
* NOTE: Following table is sorted by register address in ascending
* order and then bitfield shift in descending order, as it is done
@@ -669,7 +669,7 @@ static struct samsung_gate_clock gate_clks[] __initdata = {
};
/* APLL & MPLL & BPLL & UPLL */
-static struct samsung_pll_rate_table exynos3250_pll_rates[] = {
+static const struct samsung_pll_rate_table exynos3250_pll_rates[] __initconst = {
PLL_35XX_RATE(1200000000, 400, 4, 1),
PLL_35XX_RATE(1100000000, 275, 3, 1),
PLL_35XX_RATE(1066000000, 533, 6, 1),
@@ -691,7 +691,7 @@ static struct samsung_pll_rate_table exynos3250_pll_rates[] = {
};
/* EPLL */
-static struct samsung_pll_rate_table exynos3250_epll_rates[] = {
+static const struct samsung_pll_rate_table exynos3250_epll_rates[] __initconst = {
PLL_36XX_RATE(800000000, 200, 3, 1, 0),
PLL_36XX_RATE(288000000, 96, 2, 2, 0),
PLL_36XX_RATE(192000000, 128, 2, 3, 0),
@@ -710,7 +710,7 @@ static struct samsung_pll_rate_table exynos3250_epll_rates[] = {
};
/* VPLL */
-static struct samsung_pll_rate_table exynos3250_vpll_rates[] = {
+static const struct samsung_pll_rate_table exynos3250_vpll_rates[] __initconst = {
PLL_36XX_RATE(600000000, 100, 2, 1, 0),
PLL_36XX_RATE(533000000, 266, 3, 2, 32768),
PLL_36XX_RATE(519230987, 173, 2, 2, 5046),
@@ -740,7 +740,7 @@ static struct samsung_pll_rate_table exynos3250_vpll_rates[] = {
{ /* sentinel */ }
};
-static struct samsung_pll_clock exynos3250_plls[] __initdata = {
+static const struct samsung_pll_clock exynos3250_plls[] __initconst = {
PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll",
APLL_LOCK, APLL_CON0, exynos3250_pll_rates),
PLL(pll_35xx, CLK_FOUT_MPLL, "fout_mpll", "fin_pll",
@@ -772,7 +772,7 @@ static void __init exynos3_core_down_clock(void __iomem *reg_base)
__raw_writel(0x0, reg_base + PWR_CTRL2);
}
-static struct samsung_cmu_info cmu_info __initdata = {
+static const struct samsung_cmu_info cmu_info __initconst = {
.pll_clks = exynos3250_plls,
.nr_pll_clks = ARRAY_SIZE(exynos3250_plls),
.mux_clks = mux_clks,
@@ -848,7 +848,7 @@ CLK_OF_DECLARE(exynos3250_cmu, "samsung,exynos3250-cmu", exynos3250_cmu_init);
#define EPLL_CON2 0x111c
#define SRC_EPLL 0x1120
-static unsigned long exynos3250_cmu_dmc_clk_regs[] __initdata = {
+static const unsigned long exynos3250_cmu_dmc_clk_regs[] __initconst = {
BPLL_LOCK,
BPLL_CON0,
BPLL_CON1,
@@ -874,7 +874,7 @@ PNAME(mout_bpll_p) = { "fin_pll", "fout_bpll", };
PNAME(mout_mpll_mif_p) = { "fin_pll", "sclk_mpll_mif", };
PNAME(mout_dphy_p) = { "mout_mpll_mif", "mout_bpll", };
-static struct samsung_mux_clock dmc_mux_clks[] __initdata = {
+static const struct samsung_mux_clock dmc_mux_clks[] __initconst = {
/*
* NOTE: Following table is sorted by register address in ascending
* order and then bitfield shift in descending order, as it is done
@@ -893,7 +893,7 @@ static struct samsung_mux_clock dmc_mux_clks[] __initdata = {
MUX(CLK_MOUT_EPLL, "mout_epll", mout_epll_p, SRC_EPLL, 4, 1),
};
-static struct samsung_div_clock dmc_div_clks[] __initdata = {
+static const struct samsung_div_clock dmc_div_clks[] __initconst = {
/*
* NOTE: Following table is sorted by register address in ascending
* order and then bitfield shift in descending order, as it is done
@@ -910,14 +910,14 @@ static struct samsung_div_clock dmc_div_clks[] __initdata = {
DIV(CLK_DIV_DMCD, "div_dmcd", "div_dmc", DIV_DMC1, 11, 3),
};
-static struct samsung_pll_clock exynos3250_dmc_plls[] __initdata = {
+static const struct samsung_pll_clock exynos3250_dmc_plls[] __initconst = {
PLL(pll_35xx, CLK_FOUT_BPLL, "fout_bpll", "fin_pll",
BPLL_LOCK, BPLL_CON0, exynos3250_pll_rates),
PLL(pll_36xx, CLK_FOUT_EPLL, "fout_epll", "fin_pll",
EPLL_LOCK, EPLL_CON0, exynos3250_epll_rates),
};
-static struct samsung_cmu_info dmc_cmu_info __initdata = {
+static const struct samsung_cmu_info dmc_cmu_info __initconst = {
.pll_clks = exynos3250_dmc_plls,
.nr_pll_clks = ARRAY_SIZE(exynos3250_dmc_plls),
.mux_clks = dmc_mux_clks,
@@ -947,7 +947,7 @@ CLK_OF_DECLARE(exynos3250_cmu_dmc, "samsung,exynos3250-cmu-dmc",
#define GATE_IP_ISP1 0x804
#define GATE_SCLK_ISP 0x900
-static struct samsung_div_clock isp_div_clks[] __initdata = {
+static const struct samsung_div_clock isp_div_clks[] __initconst = {
/*
* NOTE: Following table is sorted by register address in ascending
* order and then bitfield shift in descending order, as it is done
@@ -967,7 +967,7 @@ static struct samsung_div_clock isp_div_clks[] __initdata = {
DIV(CLK_DIV_MPWM, "div_mpwm", "div_isp1", DIV_ISP1, 0, 3),
};
-static struct samsung_gate_clock isp_gate_clks[] __initdata = {
+static const struct samsung_gate_clock isp_gate_clks[] __initconst = {
/*
* NOTE: Following table is sorted by register address in ascending
* order and then bitfield shift in descending order, as it is done
@@ -1063,7 +1063,7 @@ static struct samsung_gate_clock isp_gate_clks[] __initdata = {
GATE_SCLK_ISP, 0, CLK_IGNORE_UNUSED, 0),
};
-static struct samsung_cmu_info isp_cmu_info __initdata = {
+static const struct samsung_cmu_info isp_cmu_info __initconst = {
.div_clks = isp_div_clks,
.nr_div_clks = ARRAY_SIZE(isp_div_clks),
.gate_clks = isp_gate_clks,
@@ -1079,14 +1079,15 @@ static int __init exynos3250_cmu_isp_probe(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id exynos3250_cmu_isp_of_match[] = {
+static const struct of_device_id exynos3250_cmu_isp_of_match[] __initconst = {
{ .compatible = "samsung,exynos3250-cmu-isp", },
{ /* sentinel */ }
};
-static struct platform_driver exynos3250_cmu_isp_driver = {
+static struct platform_driver exynos3250_cmu_isp_driver __initdata = {
.driver = {
.name = "exynos3250-cmu-isp",
+ .suppress_bind_attrs = true,
.of_match_table = exynos3250_cmu_isp_of_match,
},
};
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 7b3d0f975..faab9b31b 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -169,7 +169,7 @@ static struct samsung_clk_reg_dump *exynos4_save_pll;
* list of controller registers to be saved and restored during a
* suspend/resume cycle.
*/
-static unsigned long exynos4210_clk_save[] __initdata = {
+static const unsigned long exynos4210_clk_save[] __initconst = {
E4210_SRC_IMAGE,
E4210_SRC_LCD1,
E4210_SRC_MASK_LCD1,
@@ -181,7 +181,7 @@ static unsigned long exynos4210_clk_save[] __initdata = {
PWR_CTRL1,
};
-static unsigned long exynos4x12_clk_save[] __initdata = {
+static const unsigned long exynos4x12_clk_save[] __initconst = {
E4X12_GATE_IP_IMAGE,
E4X12_GATE_IP_PERIR,
E4X12_SRC_CAM1,
@@ -192,7 +192,7 @@ static unsigned long exynos4x12_clk_save[] __initdata = {
E4X12_PWR_CTRL2,
};
-static unsigned long exynos4_clk_pll_regs[] __initdata = {
+static const unsigned long exynos4_clk_pll_regs[] __initconst = {
EPLL_LOCK,
VPLL_LOCK,
EPLL_CON0,
@@ -203,7 +203,7 @@ static unsigned long exynos4_clk_pll_regs[] __initdata = {
VPLL_CON2,
};
-static unsigned long exynos4_clk_regs[] __initdata = {
+static const unsigned long exynos4_clk_regs[] __initconst = {
SRC_LEFTBUS,
DIV_LEFTBUS,
GATE_IP_LEFTBUS,
@@ -505,28 +505,28 @@ static struct samsung_fixed_rate_clock exynos4_fixed_rate_ext_clks[] __initdata
};
/* fixed rate clocks generated inside the soc */
-static struct samsung_fixed_rate_clock exynos4_fixed_rate_clks[] __initdata = {
+static const struct samsung_fixed_rate_clock exynos4_fixed_rate_clks[] __initconst = {
FRATE(0, "sclk_hdmi24m", NULL, 0, 24000000),
FRATE(CLK_SCLK_HDMIPHY, "sclk_hdmiphy", "hdmi", 0, 27000000),
FRATE(0, "sclk_usbphy0", NULL, 0, 48000000),
};
-static struct samsung_fixed_rate_clock exynos4210_fixed_rate_clks[] __initdata = {
+static const struct samsung_fixed_rate_clock exynos4210_fixed_rate_clks[] __initconst = {
FRATE(0, "sclk_usbphy1", NULL, 0, 48000000),
};
-static struct samsung_fixed_factor_clock exynos4_fixed_factor_clks[] __initdata = {
+static const struct samsung_fixed_factor_clock exynos4_fixed_factor_clks[] __initconst = {
FFACTOR(0, "sclk_apll_div_2", "sclk_apll", 1, 2, 0),
FFACTOR(0, "fout_mpll_div_2", "fout_mpll", 1, 2, 0),
FFACTOR(0, "fout_apll_div_2", "fout_apll", 1, 2, 0),
FFACTOR(0, "arm_clk_div_2", "div_core2", 1, 2, 0),
};
-static struct samsung_fixed_factor_clock exynos4210_fixed_factor_clks[] __initdata = {
+static const struct samsung_fixed_factor_clock exynos4210_fixed_factor_clks[] __initconst = {
FFACTOR(0, "sclk_mpll_div_2", "sclk_mpll", 1, 2, 0),
};
-static struct samsung_fixed_factor_clock exynos4x12_fixed_factor_clks[] __initdata = {
+static const struct samsung_fixed_factor_clock exynos4x12_fixed_factor_clks[] __initconst = {
FFACTOR(0, "sclk_mpll_user_l_div_2", "mout_mpll_user_l", 1, 2, 0),
FFACTOR(0, "sclk_mpll_user_r_div_2", "mout_mpll_user_r", 1, 2, 0),
FFACTOR(0, "sclk_mpll_user_t_div_2", "mout_mpll_user_t", 1, 2, 0),
@@ -534,7 +534,7 @@ static struct samsung_fixed_factor_clock exynos4x12_fixed_factor_clks[] __initda
};
/* list of mux clocks supported in all exynos4 soc's */
-static struct samsung_mux_clock exynos4_mux_clks[] __initdata = {
+static const struct samsung_mux_clock exynos4_mux_clks[] __initconst = {
MUX_FA(CLK_MOUT_APLL, "mout_apll", mout_apll_p, SRC_CPU, 0, 1,
CLK_SET_RATE_PARENT | CLK_RECALC_NEW_RATES, 0,
"mout_apll"),
@@ -555,11 +555,11 @@ static struct samsung_mux_clock exynos4_mux_clks[] __initdata = {
};
/* list of mux clocks supported in exynos4210 soc */
-static struct samsung_mux_clock exynos4210_mux_early[] __initdata = {
+static const struct samsung_mux_clock exynos4210_mux_early[] __initconst = {
MUX(0, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP1, 0, 1),
};
-static struct samsung_mux_clock exynos4210_mux_clks[] __initdata = {
+static const struct samsung_mux_clock exynos4210_mux_clks[] __initconst = {
MUX(0, "mout_gdl", sclk_ampll_p4210, SRC_LEFTBUS, 0, 1),
MUX(0, "mout_clkout_leftbus", clkout_left_p4210,
CLKOUT_CMU_LEFTBUS, 0, 5),
@@ -622,7 +622,7 @@ static struct samsung_mux_clock exynos4210_mux_clks[] __initdata = {
};
/* list of mux clocks supported in exynos4x12 soc */
-static struct samsung_mux_clock exynos4x12_mux_clks[] __initdata = {
+static const struct samsung_mux_clock exynos4x12_mux_clks[] __initconst = {
MUX(0, "mout_mpll_user_l", mout_mpll_p, SRC_LEFTBUS, 4, 1),
MUX(0, "mout_gdl", mout_gdl_p4x12, SRC_LEFTBUS, 0, 1),
MUX(0, "mout_clkout_leftbus", clkout_left_p4x12,
@@ -705,7 +705,7 @@ static struct samsung_mux_clock exynos4x12_mux_clks[] __initdata = {
};
/* list of divider clocks supported in all exynos4 soc's */
-static struct samsung_div_clock exynos4_div_clks[] __initdata = {
+static const struct samsung_div_clock exynos4_div_clks[] __initconst = {
DIV(CLK_DIV_GDL, "div_gdl", "mout_gdl", DIV_LEFTBUS, 0, 3),
DIV(0, "div_gpl", "div_gdl", DIV_LEFTBUS, 4, 3),
DIV(0, "div_clkout_leftbus", "mout_clkout_leftbus",
@@ -795,7 +795,7 @@ static struct samsung_div_clock exynos4_div_clks[] __initdata = {
};
/* list of divider clocks supported in exynos4210 soc */
-static struct samsung_div_clock exynos4210_div_clks[] __initdata = {
+static const struct samsung_div_clock exynos4210_div_clks[] __initconst = {
DIV(CLK_ACLK200, "aclk200", "mout_aclk200", DIV_TOP, 0, 3),
DIV(CLK_SCLK_FIMG2D, "sclk_fimg2d", "mout_g2d", DIV_IMAGE, 0, 4),
DIV(0, "div_fimd1", "mout_fimd1", E4210_DIV_LCD1, 0, 4),
@@ -806,7 +806,7 @@ static struct samsung_div_clock exynos4210_div_clks[] __initdata = {
};
/* list of divider clocks supported in exynos4x12 soc */
-static struct samsung_div_clock exynos4x12_div_clks[] __initdata = {
+static const struct samsung_div_clock exynos4x12_div_clks[] __initconst = {
DIV(0, "div_mdnie0", "mout_mdnie0", DIV_LCD0, 4, 4),
DIV(0, "div_mdnie_pwm0", "mout_mdnie_pwm0", DIV_LCD0, 8, 4),
DIV(0, "div_mdnie_pwm_pre0", "div_mdnie_pwm0", DIV_LCD0, 12, 4),
@@ -837,7 +837,7 @@ static struct samsung_div_clock exynos4x12_div_clks[] __initdata = {
};
/* list of gate clocks supported in all exynos4 soc's */
-static struct samsung_gate_clock exynos4_gate_clks[] __initdata = {
+static const struct samsung_gate_clock exynos4_gate_clks[] __initconst = {
/*
* After all Exynos4 based platforms are migrated to use device tree,
* the device name and clock alias names specified below for some
@@ -1043,7 +1043,7 @@ static struct samsung_gate_clock exynos4_gate_clks[] __initdata = {
};
/* list of gate clocks supported in exynos4210 soc */
-static struct samsung_gate_clock exynos4210_gate_clks[] __initdata = {
+static const struct samsung_gate_clock exynos4210_gate_clks[] __initconst = {
GATE(CLK_TVENC, "tvenc", "aclk160", GATE_IP_TV, 2, 0, 0),
GATE(CLK_G2D, "g2d", "aclk200", E4210_GATE_IP_IMAGE, 0, 0, 0),
GATE(CLK_ROTATOR, "rotator", "aclk200", E4210_GATE_IP_IMAGE, 1, 0, 0),
@@ -1090,7 +1090,7 @@ static struct samsung_gate_clock exynos4210_gate_clks[] __initdata = {
};
/* list of gate clocks supported in exynos4x12 soc */
-static struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = {
+static const struct samsung_gate_clock exynos4x12_gate_clks[] __initconst = {
GATE(CLK_AUDSS, "audss", "sclk_epll", E4X12_GATE_IP_MAUDIO, 0, 0, 0),
GATE(CLK_MDNIE0, "mdnie0", "aclk160", GATE_IP_LCD0, 2, 0, 0),
GATE(CLK_ROTATOR, "rotator", "aclk200", E4X12_GATE_IP_IMAGE, 1, 0, 0),
@@ -1190,17 +1190,17 @@ static struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = {
0),
};
-static struct samsung_clock_alias exynos4_aliases[] __initdata = {
+static const struct samsung_clock_alias exynos4_aliases[] __initconst = {
ALIAS(CLK_MOUT_CORE, NULL, "moutcore"),
ALIAS(CLK_ARM_CLK, NULL, "armclk"),
ALIAS(CLK_SCLK_APLL, NULL, "mout_apll"),
};
-static struct samsung_clock_alias exynos4210_aliases[] __initdata = {
+static const struct samsung_clock_alias exynos4210_aliases[] __initconst = {
ALIAS(CLK_SCLK_MPLL, NULL, "mout_mpll"),
};
-static struct samsung_clock_alias exynos4x12_aliases[] __initdata = {
+static const struct samsung_clock_alias exynos4x12_aliases[] __initconst = {
ALIAS(CLK_MOUT_MPLL_USER_C, NULL, "mout_mpll"),
};
@@ -1211,7 +1211,7 @@ static struct samsung_clock_alias exynos4x12_aliases[] __initdata = {
* controller is first remapped and the value of XOM[0] bit is read to
* determine the parent clock.
*/
-static unsigned long exynos4_get_xom(void)
+static unsigned long __init exynos4_get_xom(void)
{
unsigned long xom = 0;
void __iomem *chipid_base;
@@ -1264,7 +1264,7 @@ static const struct of_device_id ext_clk_match[] __initconst = {
};
/* PLLs PMS values */
-static struct samsung_pll_rate_table exynos4210_apll_rates[] __initdata = {
+static const struct samsung_pll_rate_table exynos4210_apll_rates[] __initconst = {
PLL_45XX_RATE(1200000000, 150, 3, 1, 28),
PLL_45XX_RATE(1000000000, 250, 6, 1, 28),
PLL_45XX_RATE( 800000000, 200, 6, 1, 28),
@@ -1277,7 +1277,7 @@ static struct samsung_pll_rate_table exynos4210_apll_rates[] __initdata = {
{ /* sentinel */ }
};
-static struct samsung_pll_rate_table exynos4210_epll_rates[] __initdata = {
+static const struct samsung_pll_rate_table exynos4210_epll_rates[] __initconst = {
PLL_4600_RATE(192000000, 48, 3, 1, 0, 0),
PLL_4600_RATE(180633605, 45, 3, 1, 10381, 0),
PLL_4600_RATE(180000000, 45, 3, 1, 0, 0),
@@ -1288,7 +1288,7 @@ static struct samsung_pll_rate_table exynos4210_epll_rates[] __initdata = {
{ /* sentinel */ }
};
-static struct samsung_pll_rate_table exynos4210_vpll_rates[] __initdata = {
+static const struct samsung_pll_rate_table exynos4210_vpll_rates[] __initconst = {
PLL_4650_RATE(360000000, 44, 3, 0, 1024, 0, 14, 0),
PLL_4650_RATE(324000000, 53, 2, 1, 1024, 1, 1, 1),
PLL_4650_RATE(259617187, 63, 3, 1, 1950, 0, 20, 1),
@@ -1297,7 +1297,7 @@ static struct samsung_pll_rate_table exynos4210_vpll_rates[] __initdata = {
{ /* sentinel */ }
};
-static struct samsung_pll_rate_table exynos4x12_apll_rates[] __initdata = {
+static const struct samsung_pll_rate_table exynos4x12_apll_rates[] __initconst = {
PLL_35XX_RATE(1500000000, 250, 4, 0),
PLL_35XX_RATE(1400000000, 175, 3, 0),
PLL_35XX_RATE(1300000000, 325, 6, 0),
@@ -1315,7 +1315,7 @@ static struct samsung_pll_rate_table exynos4x12_apll_rates[] __initdata = {
{ /* sentinel */ }
};
-static struct samsung_pll_rate_table exynos4x12_epll_rates[] __initdata = {
+static const struct samsung_pll_rate_table exynos4x12_epll_rates[] __initconst = {
PLL_36XX_RATE(192000000, 48, 3, 1, 0),
PLL_36XX_RATE(180633605, 45, 3, 1, 10381),
PLL_36XX_RATE(180000000, 45, 3, 1, 0),
@@ -1326,7 +1326,7 @@ static struct samsung_pll_rate_table exynos4x12_epll_rates[] __initdata = {
{ /* sentinel */ }
};
-static struct samsung_pll_rate_table exynos4x12_vpll_rates[] __initdata = {
+static const struct samsung_pll_rate_table exynos4x12_vpll_rates[] __initconst = {
PLL_36XX_RATE(533000000, 133, 3, 1, 16384),
PLL_36XX_RATE(440000000, 110, 3, 1, 0),
PLL_36XX_RATE(350000000, 175, 3, 2, 0),
@@ -1375,12 +1375,12 @@ static void __init exynos4x12_core_down_clock(void)
if (num_possible_cpus() == 4)
tmp |= PWR_CTRL1_USE_CORE3_WFE | PWR_CTRL1_USE_CORE2_WFE |
PWR_CTRL1_USE_CORE3_WFI | PWR_CTRL1_USE_CORE2_WFI;
- __raw_writel(tmp, reg_base + PWR_CTRL1);
+ writel_relaxed(tmp, reg_base + PWR_CTRL1);
/*
* Disable the clock up feature in case it was enabled by bootloader.
*/
- __raw_writel(0x0, reg_base + E4X12_PWR_CTRL2);
+ writel_relaxed(0x0, reg_base + E4X12_PWR_CTRL2);
}
#define E4210_CPU_DIV0(apll, pclk_dbg, atb, periph, corem1, corem0) \
@@ -1450,8 +1450,6 @@ static void __init exynos4_clk_init(struct device_node *np,
panic("%s: failed to map registers\n", __func__);
ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS);
- if (!ctx)
- panic("%s: unable to allocate context.\n", __func__);
samsung_clk_of_register_fixed_ext(ctx, exynos4_fixed_rate_ext_clks,
ARRAY_SIZE(exynos4_fixed_rate_ext_clks),
diff --git a/drivers/clk/samsung/clk-exynos4415.c b/drivers/clk/samsung/clk-exynos4415.c
index 86ee06b22..6c9063159 100644
--- a/drivers/clk/samsung/clk-exynos4415.c
+++ b/drivers/clk/samsung/clk-exynos4415.c
@@ -111,7 +111,7 @@
#define DIV_CPU0 0x14500
#define DIV_CPU1 0x14504
-static unsigned long exynos4415_cmu_clk_regs[] __initdata = {
+static const unsigned long exynos4415_cmu_clk_regs[] __initconst = {
SRC_LEFTBUS,
DIV_LEFTBUS,
GATE_IP_LEFTBUS,
@@ -268,16 +268,16 @@ PNAME(group_aclk_isp0_300_user_p) = { "fin_pll", "mout_aclk_isp0_300" };
PNAME(group_aclk_isp1_300_user_p) = { "fin_pll", "mout_aclk_isp1_300" };
PNAME(group_mout_mpll_user_t_p) = { "mout_mpll_user_t" };
-static struct samsung_fixed_factor_clock exynos4415_fixed_factor_clks[] __initdata = {
+static const struct samsung_fixed_factor_clock exynos4415_fixed_factor_clks[] __initconst = {
/* HACK: fin_pll hardcoded to xusbxti until detection is implemented. */
FFACTOR(CLK_FIN_PLL, "fin_pll", "xusbxti", 1, 1, 0),
};
-static struct samsung_fixed_rate_clock exynos4415_fixed_rate_clks[] __initdata = {
+static const struct samsung_fixed_rate_clock exynos4415_fixed_rate_clks[] __initconst = {
FRATE(CLK_SCLK_HDMIPHY, "sclk_hdmiphy", NULL, 0, 27000000),
};
-static struct samsung_mux_clock exynos4415_mux_clks[] __initdata = {
+static const struct samsung_mux_clock exynos4415_mux_clks[] __initconst = {
/*
* NOTE: Following table is sorted by register address in ascending
* order and then bitfield shift in descending order, as it is done
@@ -427,7 +427,7 @@ static struct samsung_mux_clock exynos4415_mux_clks[] __initdata = {
group_aclk_isp1_300_user_p, SRC_TOP_ISP1, 0, 1),
};
-static struct samsung_div_clock exynos4415_div_clks[] __initdata = {
+static const struct samsung_div_clock exynos4415_div_clks[] __initconst = {
/*
* NOTE: Following table is sorted by register address in ascending
* order and then bitfield shift in descending order, as it is done
@@ -566,7 +566,7 @@ static struct samsung_div_clock exynos4415_div_clks[] __initdata = {
DIV(CLK_DIV_COPY, "div_copy", "mout_hpm", DIV_CPU1, 0, 3),
};
-static struct samsung_gate_clock exynos4415_gate_clks[] __initdata = {
+static const struct samsung_gate_clock exynos4415_gate_clks[] __initconst = {
/*
* NOTE: Following table is sorted by register address in ascending
* order and then bitfield shift in descending order, as it is done
@@ -859,7 +859,7 @@ static struct samsung_gate_clock exynos4415_gate_clks[] __initdata = {
/*
* APLL & MPLL & BPLL & ISP_PLL & DISP_PLL & G3D_PLL
*/
-static struct samsung_pll_rate_table exynos4415_pll_rates[] = {
+static const struct samsung_pll_rate_table exynos4415_pll_rates[] __initconst = {
PLL_35XX_RATE(1600000000, 400, 3, 1),
PLL_35XX_RATE(1500000000, 250, 2, 1),
PLL_35XX_RATE(1400000000, 175, 3, 0),
@@ -891,7 +891,7 @@ static struct samsung_pll_rate_table exynos4415_pll_rates[] = {
};
/* EPLL */
-static struct samsung_pll_rate_table exynos4415_epll_rates[] = {
+static const struct samsung_pll_rate_table exynos4415_epll_rates[] __initconst = {
PLL_36XX_RATE(800000000, 200, 3, 1, 0),
PLL_36XX_RATE(288000000, 96, 2, 2, 0),
PLL_36XX_RATE(192000000, 128, 2, 3, 0),
@@ -909,7 +909,7 @@ static struct samsung_pll_rate_table exynos4415_epll_rates[] = {
{ /* sentinel */ }
};
-static struct samsung_pll_clock exynos4415_plls[] __initdata = {
+static const struct samsung_pll_clock exynos4415_plls[] __initconst = {
PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll",
APLL_LOCK, APLL_CON0, exynos4415_pll_rates),
PLL(pll_36xx, CLK_FOUT_EPLL, "fout_epll", "fin_pll",
@@ -922,7 +922,7 @@ static struct samsung_pll_clock exynos4415_plls[] __initdata = {
"fin_pll", DISP_PLL_LOCK, DISP_PLL_CON0, exynos4415_pll_rates),
};
-static struct samsung_cmu_info cmu_info __initdata = {
+static const struct samsung_cmu_info cmu_info __initconst = {
.pll_clks = exynos4415_plls,
.nr_pll_clks = ARRAY_SIZE(exynos4415_plls),
.mux_clks = exynos4415_mux_clks,
@@ -961,7 +961,7 @@ CLK_OF_DECLARE(exynos4415_cmu, "samsung,exynos4415-cmu", exynos4415_cmu_init);
#define SRC_DMC 0x300
#define DIV_DMC1 0x504
-static unsigned long exynos4415_cmu_dmc_clk_regs[] __initdata = {
+static const unsigned long exynos4415_cmu_dmc_clk_regs[] __initconst = {
MPLL_LOCK,
MPLL_CON0,
MPLL_CON1,
@@ -978,14 +978,14 @@ PNAME(mout_mpll_p) = { "fin_pll", "fout_mpll", };
PNAME(mout_bpll_p) = { "fin_pll", "fout_bpll", };
PNAME(mbpll_p) = { "mout_mpll", "mout_bpll", };
-static struct samsung_mux_clock exynos4415_dmc_mux_clks[] __initdata = {
+static const struct samsung_mux_clock exynos4415_dmc_mux_clks[] __initconst = {
MUX(CLK_DMC_MOUT_MPLL, "mout_mpll", mout_mpll_p, SRC_DMC, 12, 1),
MUX(CLK_DMC_MOUT_BPLL, "mout_bpll", mout_bpll_p, SRC_DMC, 10, 1),
MUX(CLK_DMC_MOUT_DPHY, "mout_dphy", mbpll_p, SRC_DMC, 8, 1),
MUX(CLK_DMC_MOUT_DMC_BUS, "mout_dmc_bus", mbpll_p, SRC_DMC, 4, 1),
};
-static struct samsung_div_clock exynos4415_dmc_div_clks[] __initdata = {
+static const struct samsung_div_clock exynos4415_dmc_div_clks[] __initconst = {
DIV(CLK_DMC_DIV_DMC, "div_dmc", "div_dmc_pre", DIV_DMC1, 27, 3),
DIV(CLK_DMC_DIV_DPHY, "div_dphy", "mout_dphy", DIV_DMC1, 23, 3),
DIV(CLK_DMC_DIV_DMC_PRE, "div_dmc_pre", "mout_dmc_bus",
@@ -995,14 +995,14 @@ static struct samsung_div_clock exynos4415_dmc_div_clks[] __initdata = {
DIV(CLK_DMC_DIV_MPLL_PRE, "div_mpll_pre", "mout_mpll", DIV_DMC1, 8, 2),
};
-static struct samsung_pll_clock exynos4415_dmc_plls[] __initdata = {
+static const struct samsung_pll_clock exynos4415_dmc_plls[] __initconst = {
PLL(pll_35xx, CLK_DMC_FOUT_MPLL, "fout_mpll", "fin_pll",
MPLL_LOCK, MPLL_CON0, exynos4415_pll_rates),
PLL(pll_35xx, CLK_DMC_FOUT_BPLL, "fout_bpll", "fin_pll",
BPLL_LOCK, BPLL_CON0, exynos4415_pll_rates),
};
-static struct samsung_cmu_info cmu_dmc_info __initdata = {
+static const struct samsung_cmu_info cmu_dmc_info __initconst = {
.pll_clks = exynos4415_dmc_plls,
.nr_pll_clks = ARRAY_SIZE(exynos4415_dmc_plls),
.mux_clks = exynos4415_dmc_mux_clks,
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index 837197db4..27a227d66 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -117,7 +117,7 @@ static struct samsung_clk_reg_dump *exynos5250_save;
* list of controller registers to be saved and restored during a
* suspend/resume cycle.
*/
-static unsigned long exynos5250_clk_regs[] __initdata = {
+static const unsigned long exynos5250_clk_regs[] __initconst = {
SRC_CPU,
DIV_CPU0,
PWR_CTRL1,
@@ -190,7 +190,7 @@ static struct syscore_ops exynos5250_clk_syscore_ops = {
.resume = exynos5250_clk_resume,
};
-static void exynos5250_clk_sleep_init(void)
+static void __init exynos5250_clk_sleep_init(void)
{
exynos5250_save = samsung_clk_alloc_reg_dump(exynos5250_clk_regs,
ARRAY_SIZE(exynos5250_clk_regs));
@@ -203,7 +203,7 @@ static void exynos5250_clk_sleep_init(void)
register_syscore_ops(&exynos5250_clk_syscore_ops);
}
#else
-static void exynos5250_clk_sleep_init(void) {}
+static void __init exynos5250_clk_sleep_init(void) {}
#endif
/* list of all parent clock list */
@@ -266,23 +266,23 @@ static struct samsung_fixed_rate_clock exynos5250_fixed_rate_ext_clks[] __initda
};
/* fixed rate clocks generated inside the soc */
-static struct samsung_fixed_rate_clock exynos5250_fixed_rate_clks[] __initdata = {
+static const struct samsung_fixed_rate_clock exynos5250_fixed_rate_clks[] __initconst = {
FRATE(CLK_SCLK_HDMIPHY, "sclk_hdmiphy", NULL, 0, 24000000),
FRATE(0, "sclk_hdmi27m", NULL, 0, 27000000),
FRATE(0, "sclk_dptxphy", NULL, 0, 24000000),
FRATE(0, "sclk_uhostphy", NULL, 0, 48000000),
};
-static struct samsung_fixed_factor_clock exynos5250_fixed_factor_clks[] __initdata = {
+static const struct samsung_fixed_factor_clock exynos5250_fixed_factor_clks[] __initconst = {
FFACTOR(0, "fout_mplldiv2", "fout_mpll", 1, 2, 0),
FFACTOR(0, "fout_bplldiv2", "fout_bpll", 1, 2, 0),
};
-static struct samsung_mux_clock exynos5250_pll_pmux_clks[] __initdata = {
+static const struct samsung_mux_clock exynos5250_pll_pmux_clks[] __initconst = {
MUX(0, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP2, 0, 1),
};
-static struct samsung_mux_clock exynos5250_mux_clks[] __initdata = {
+static const struct samsung_mux_clock exynos5250_mux_clks[] __initconst = {
/*
* NOTE: Following table is sorted by (clock domain, register address,
* bitfield shift) triplet in ascending order. When adding new entries,
@@ -378,7 +378,7 @@ static struct samsung_mux_clock exynos5250_mux_clks[] __initdata = {
MUX(0, "mout_bpll_fout", mout_bpll_fout_p, PLL_DIV2_SEL, 0, 1),
};
-static struct samsung_div_clock exynos5250_div_clks[] __initdata = {
+static const struct samsung_div_clock exynos5250_div_clks[] __initconst = {
/*
* NOTE: Following table is sorted by (clock domain, register address,
* bitfield shift) triplet in ascending order. When adding new entries,
@@ -470,7 +470,7 @@ static struct samsung_div_clock exynos5250_div_clks[] __initdata = {
DIV(CLK_DIV_I2S2, "div_i2s2", "sclk_audio2", DIV_PERIC5, 8, 6),
};
-static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
+static const struct samsung_gate_clock exynos5250_gate_clks[] __initconst = {
/*
* NOTE: Following table is sorted by (clock domain, register address,
* bitfield shift) triplet in ascending order. When adding new entries,
@@ -698,7 +698,7 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
GATE_IP_ISP1, 7, 0, 0),
};
-static struct samsung_pll_rate_table vpll_24mhz_tbl[] __initdata = {
+static const struct samsung_pll_rate_table vpll_24mhz_tbl[] __initconst = {
/* sorted in descending order */
/* PLL_36XX_RATE(rate, m, p, s, k) */
PLL_36XX_RATE(266000000, 266, 3, 3, 0),
@@ -707,7 +707,7 @@ static struct samsung_pll_rate_table vpll_24mhz_tbl[] __initdata = {
{ },
};
-static struct samsung_pll_rate_table epll_24mhz_tbl[] __initdata = {
+static const struct samsung_pll_rate_table epll_24mhz_tbl[] __initconst = {
/* sorted in descending order */
/* PLL_36XX_RATE(rate, m, p, s, k) */
PLL_36XX_RATE(192000000, 64, 2, 2, 0),
@@ -721,7 +721,7 @@ static struct samsung_pll_rate_table epll_24mhz_tbl[] __initdata = {
{ },
};
-static struct samsung_pll_rate_table apll_24mhz_tbl[] __initdata = {
+static const struct samsung_pll_rate_table apll_24mhz_tbl[] __initconst = {
/* sorted in descending order */
/* PLL_35XX_RATE(rate, m, p, s) */
PLL_35XX_RATE(1700000000, 425, 6, 0),
@@ -805,8 +805,7 @@ static void __init exynos5250_clk_init(struct device_node *np)
}
ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS);
- if (!ctx)
- panic("%s: unable to allocate context.\n", __func__);
+
samsung_clk_of_register_fixed_ext(ctx, exynos5250_fixed_rate_ext_clks,
ARRAY_SIZE(exynos5250_fixed_rate_ext_clks),
ext_clk_match);
diff --git a/drivers/clk/samsung/clk-exynos5260.c b/drivers/clk/samsung/clk-exynos5260.c
index 7a7ed075a..a43642c36 100644
--- a/drivers/clk/samsung/clk-exynos5260.c
+++ b/drivers/clk/samsung/clk-exynos5260.c
@@ -22,7 +22,7 @@
* Applicable for all 2550 Type PLLS for Exynos5260, listed below
* DISP_PLL, EGL_PLL, KFC_PLL, MEM_PLL, BUS_PLL, MEDIA_PLL, G3D_PLL.
*/
-static struct samsung_pll_rate_table pll2550_24mhz_tbl[] __initdata = {
+static const struct samsung_pll_rate_table pll2550_24mhz_tbl[] __initconst = {
PLL_35XX_RATE(1700000000, 425, 6, 0),
PLL_35XX_RATE(1600000000, 200, 3, 0),
PLL_35XX_RATE(1500000000, 250, 4, 0),
@@ -55,7 +55,7 @@ static struct samsung_pll_rate_table pll2550_24mhz_tbl[] __initdata = {
/*
* Applicable for 2650 Type PLL for AUD_PLL.
*/
-static struct samsung_pll_rate_table pll2650_24mhz_tbl[] __initdata = {
+static const struct samsung_pll_rate_table pll2650_24mhz_tbl[] __initconst = {
PLL_36XX_RATE(1600000000, 200, 3, 0, 0),
PLL_36XX_RATE(1200000000, 100, 2, 0, 0),
PLL_36XX_RATE(1000000000, 250, 3, 1, 0),
@@ -78,7 +78,7 @@ static struct samsung_pll_rate_table pll2650_24mhz_tbl[] __initdata = {
/* CMU_AUD */
-static unsigned long aud_clk_regs[] __initdata = {
+static const unsigned long aud_clk_regs[] __initconst = {
MUX_SEL_AUD,
DIV_AUD0,
DIV_AUD1,
@@ -92,7 +92,7 @@ PNAME(mout_aud_pll_user_p) = {"fin_pll", "fout_aud_pll"};
PNAME(mout_sclk_aud_i2s_p) = {"mout_aud_pll_user", "ioclk_i2s_cdclk"};
PNAME(mout_sclk_aud_pcm_p) = {"mout_aud_pll_user", "ioclk_pcm_extclk"};
-static struct samsung_mux_clock aud_mux_clks[] __initdata = {
+static const struct samsung_mux_clock aud_mux_clks[] __initconst = {
MUX(AUD_MOUT_AUD_PLL_USER, "mout_aud_pll_user", mout_aud_pll_user_p,
MUX_SEL_AUD, 0, 1),
MUX(AUD_MOUT_SCLK_AUD_I2S, "mout_sclk_aud_i2s", mout_sclk_aud_i2s_p,
@@ -101,7 +101,7 @@ static struct samsung_mux_clock aud_mux_clks[] __initdata = {
MUX_SEL_AUD, 8, 1),
};
-static struct samsung_div_clock aud_div_clks[] __initdata = {
+static const struct samsung_div_clock aud_div_clks[] __initconst = {
DIV(AUD_DOUT_ACLK_AUD_131, "dout_aclk_aud_131", "mout_aud_pll_user",
DIV_AUD0, 0, 4),
@@ -113,7 +113,7 @@ static struct samsung_div_clock aud_div_clks[] __initdata = {
DIV_AUD1, 12, 4),
};
-static struct samsung_gate_clock aud_gate_clks[] __initdata = {
+static const struct samsung_gate_clock aud_gate_clks[] __initconst = {
GATE(AUD_SCLK_I2S, "sclk_aud_i2s", "dout_sclk_aud_i2s",
EN_SCLK_AUD, 0, CLK_SET_RATE_PARENT, 0),
GATE(AUD_SCLK_PCM, "sclk_aud_pcm", "dout_sclk_aud_pcm",
@@ -154,7 +154,7 @@ CLK_OF_DECLARE(exynos5260_clk_aud, "samsung,exynos5260-clock-aud",
/* CMU_DISP */
-static unsigned long disp_clk_regs[] __initdata = {
+static const unsigned long disp_clk_regs[] __initconst = {
MUX_SEL_DISP0,
MUX_SEL_DISP1,
MUX_SEL_DISP2,
@@ -201,7 +201,7 @@ PNAME(mout_phyclk_mipi_dphy_4lmrxclk_esc0_user_p) = {"fin_pll",
PNAME(mout_sclk_hdmi_spdif_p) = {"fin_pll", "ioclk_spdif_extclk",
"dout_aclk_peri_aud", "phyclk_hdmi_phy_ref_cko"};
-static struct samsung_mux_clock disp_mux_clks[] __initdata = {
+static const struct samsung_mux_clock disp_mux_clks[] __initconst = {
MUX(DISP_MOUT_ACLK_DISP_333_USER, "mout_aclk_disp_333_user",
mout_aclk_disp_333_user_p,
MUX_SEL_DISP0, 0, 1),
@@ -270,7 +270,7 @@ static struct samsung_mux_clock disp_mux_clks[] __initdata = {
MUX_SEL_DISP4, 4, 2),
};
-static struct samsung_div_clock disp_div_clks[] __initdata = {
+static const struct samsung_div_clock disp_div_clks[] __initconst = {
DIV(DISP_DOUT_PCLK_DISP_111, "dout_pclk_disp_111",
"mout_aclk_disp_222_user",
DIV_DISP, 8, 4),
@@ -283,7 +283,7 @@ static struct samsung_div_clock disp_div_clks[] __initdata = {
DIV_DISP, 16, 4),
};
-static struct samsung_gate_clock disp_gate_clks[] __initdata = {
+static const struct samsung_gate_clock disp_gate_clks[] __initconst = {
GATE(DISP_MOUT_HDMI_PHY_PIXEL_USER, "sclk_hdmi_link_i_pixel",
"mout_phyclk_hdmi_phy_pixel_clko_user",
EN_SCLK_DISP0, 26, CLK_SET_RATE_PARENT, 0),
@@ -344,7 +344,7 @@ CLK_OF_DECLARE(exynos5260_clk_disp, "samsung,exynos5260-clock-disp",
/* CMU_EGL */
-static unsigned long egl_clk_regs[] __initdata = {
+static const unsigned long egl_clk_regs[] __initconst = {
EGL_PLL_LOCK,
EGL_PLL_CON0,
EGL_PLL_CON1,
@@ -361,13 +361,13 @@ static unsigned long egl_clk_regs[] __initdata = {
PNAME(mout_egl_b_p) = {"mout_egl_pll", "dout_bus_pll"};
PNAME(mout_egl_pll_p) = {"fin_pll", "fout_egl_pll"};
-static struct samsung_mux_clock egl_mux_clks[] __initdata = {
+static const struct samsung_mux_clock egl_mux_clks[] __initconst = {
MUX(EGL_MOUT_EGL_PLL, "mout_egl_pll", mout_egl_pll_p,
MUX_SEL_EGL, 4, 1),
MUX(EGL_MOUT_EGL_B, "mout_egl_b", mout_egl_b_p, MUX_SEL_EGL, 16, 1),
};
-static struct samsung_div_clock egl_div_clks[] __initdata = {
+static const struct samsung_div_clock egl_div_clks[] __initconst = {
DIV(EGL_DOUT_EGL1, "dout_egl1", "mout_egl_b", DIV_EGL, 0, 3),
DIV(EGL_DOUT_EGL2, "dout_egl2", "dout_egl1", DIV_EGL, 4, 3),
DIV(EGL_DOUT_ACLK_EGL, "dout_aclk_egl", "dout_egl2", DIV_EGL, 8, 3),
@@ -379,7 +379,7 @@ static struct samsung_div_clock egl_div_clks[] __initdata = {
DIV(EGL_DOUT_EGL_PLL, "dout_egl_pll", "mout_egl_b", DIV_EGL, 24, 3),
};
-static struct samsung_pll_clock egl_pll_clks[] __initdata = {
+static const struct samsung_pll_clock egl_pll_clks[] __initconst = {
PLL(pll_2550xx, EGL_FOUT_EGL_PLL, "fout_egl_pll", "fin_pll",
EGL_PLL_LOCK, EGL_PLL_CON0,
pll2550_24mhz_tbl),
@@ -408,7 +408,7 @@ CLK_OF_DECLARE(exynos5260_clk_egl, "samsung,exynos5260-clock-egl",
/* CMU_FSYS */
-static unsigned long fsys_clk_regs[] __initdata = {
+static const unsigned long fsys_clk_regs[] __initconst = {
MUX_SEL_FSYS0,
MUX_SEL_FSYS1,
EN_ACLK_FSYS,
@@ -431,7 +431,7 @@ PNAME(mout_phyclk_usbdrd30_pipe_pclk_user_p) = {"fin_pll",
PNAME(mout_phyclk_usbdrd30_phyclock_user_p) = {"fin_pll",
"phyclk_usbdrd30_udrd30_phyclock"};
-static struct samsung_mux_clock fsys_mux_clks[] __initdata = {
+static const struct samsung_mux_clock fsys_mux_clks[] __initconst = {
MUX(FSYS_MOUT_PHYCLK_USBDRD30_PHYCLOCK_USER,
"mout_phyclk_usbdrd30_phyclock_user",
mout_phyclk_usbdrd30_phyclock_user_p,
@@ -454,7 +454,7 @@ static struct samsung_mux_clock fsys_mux_clks[] __initdata = {
MUX_SEL_FSYS1, 16, 1),
};
-static struct samsung_gate_clock fsys_gate_clks[] __initdata = {
+static const struct samsung_gate_clock fsys_gate_clks[] __initconst = {
GATE(FSYS_PHYCLK_USBHOST20, "phyclk_usbhost20_phyclock",
"mout_phyclk_usbdrd30_phyclock_user",
EN_SCLK_FSYS, 1, 0, 0),
@@ -508,7 +508,7 @@ CLK_OF_DECLARE(exynos5260_clk_fsys, "samsung,exynos5260-clock-fsys",
/* CMU_G2D */
-static unsigned long g2d_clk_regs[] __initdata = {
+static const unsigned long g2d_clk_regs[] __initconst = {
MUX_SEL_G2D,
MUX_STAT_G2D,
DIV_G2D,
@@ -535,18 +535,18 @@ static unsigned long g2d_clk_regs[] __initdata = {
PNAME(mout_aclk_g2d_333_user_p) = {"fin_pll", "dout_aclk_g2d_333"};
-static struct samsung_mux_clock g2d_mux_clks[] __initdata = {
+static const struct samsung_mux_clock g2d_mux_clks[] __initconst = {
MUX(G2D_MOUT_ACLK_G2D_333_USER, "mout_aclk_g2d_333_user",
mout_aclk_g2d_333_user_p,
MUX_SEL_G2D, 0, 1),
};
-static struct samsung_div_clock g2d_div_clks[] __initdata = {
+static const struct samsung_div_clock g2d_div_clks[] __initconst = {
DIV(G2D_DOUT_PCLK_G2D_83, "dout_pclk_g2d_83", "mout_aclk_g2d_333_user",
DIV_G2D, 0, 3),
};
-static struct samsung_gate_clock g2d_gate_clks[] __initdata = {
+static const struct samsung_gate_clock g2d_gate_clks[] __initconst = {
GATE(G2D_CLK_G2D, "clk_g2d", "mout_aclk_g2d_333_user",
EN_IP_G2D, 4, 0, 0),
GATE(G2D_CLK_JPEG, "clk_jpeg", "mout_aclk_g2d_333_user",
@@ -599,7 +599,7 @@ CLK_OF_DECLARE(exynos5260_clk_g2d, "samsung,exynos5260-clock-g2d",
/* CMU_G3D */
-static unsigned long g3d_clk_regs[] __initdata = {
+static const unsigned long g3d_clk_regs[] __initconst = {
G3D_PLL_LOCK,
G3D_PLL_CON0,
G3D_PLL_CON1,
@@ -615,23 +615,23 @@ static unsigned long g3d_clk_regs[] __initdata = {
PNAME(mout_g3d_pll_p) = {"fin_pll", "fout_g3d_pll"};
-static struct samsung_mux_clock g3d_mux_clks[] __initdata = {
+static const struct samsung_mux_clock g3d_mux_clks[] __initconst = {
MUX(G3D_MOUT_G3D_PLL, "mout_g3d_pll", mout_g3d_pll_p,
MUX_SEL_G3D, 0, 1),
};
-static struct samsung_div_clock g3d_div_clks[] __initdata = {
+static const struct samsung_div_clock g3d_div_clks[] __initconst = {
DIV(G3D_DOUT_PCLK_G3D, "dout_pclk_g3d", "dout_aclk_g3d", DIV_G3D, 0, 3),
DIV(G3D_DOUT_ACLK_G3D, "dout_aclk_g3d", "mout_g3d_pll", DIV_G3D, 4, 3),
};
-static struct samsung_gate_clock g3d_gate_clks[] __initdata = {
+static const struct samsung_gate_clock g3d_gate_clks[] __initconst = {
GATE(G3D_CLK_G3D, "clk_g3d", "dout_aclk_g3d", EN_IP_G3D, 2, 0, 0),
GATE(G3D_CLK_G3D_HPM, "clk_g3d_hpm", "dout_aclk_g3d",
EN_IP_G3D, 3, 0, 0),
};
-static struct samsung_pll_clock g3d_pll_clks[] __initdata = {
+static const struct samsung_pll_clock g3d_pll_clks[] __initconst = {
PLL(pll_2550, G3D_FOUT_G3D_PLL, "fout_g3d_pll", "fin_pll",
G3D_PLL_LOCK, G3D_PLL_CON0,
pll2550_24mhz_tbl),
@@ -662,7 +662,7 @@ CLK_OF_DECLARE(exynos5260_clk_g3d, "samsung,exynos5260-clock-g3d",
/* CMU_GSCL */
-static unsigned long gscl_clk_regs[] __initdata = {
+static const unsigned long gscl_clk_regs[] __initconst = {
MUX_SEL_GSCL,
DIV_GSCL,
EN_ACLK_GSCL,
@@ -692,7 +692,7 @@ PNAME(mout_aclk_m2m_400_user_p) = {"fin_pll", "dout_aclk_gscl_400"};
PNAME(mout_aclk_gscl_fimc_user_p) = {"fin_pll", "dout_aclk_gscl_400"};
PNAME(mout_aclk_csis_p) = {"dout_aclk_csis_200", "mout_aclk_gscl_fimc_user"};
-static struct samsung_mux_clock gscl_mux_clks[] __initdata = {
+static const struct samsung_mux_clock gscl_mux_clks[] __initconst = {
MUX(GSCL_MOUT_ACLK_GSCL_333_USER, "mout_aclk_gscl_333_user",
mout_aclk_gscl_333_user_p,
MUX_SEL_GSCL, 0, 1),
@@ -706,7 +706,7 @@ static struct samsung_mux_clock gscl_mux_clks[] __initdata = {
MUX_SEL_GSCL, 24, 1),
};
-static struct samsung_div_clock gscl_div_clks[] __initdata = {
+static const struct samsung_div_clock gscl_div_clks[] __initconst = {
DIV(GSCL_DOUT_PCLK_M2M_100, "dout_pclk_m2m_100",
"mout_aclk_m2m_400_user",
DIV_GSCL, 0, 3),
@@ -715,7 +715,7 @@ static struct samsung_div_clock gscl_div_clks[] __initdata = {
DIV_GSCL, 4, 3),
};
-static struct samsung_gate_clock gscl_gate_clks[] __initdata = {
+static const struct samsung_gate_clock gscl_gate_clks[] __initconst = {
GATE(GSCL_SCLK_CSIS0_WRAP, "sclk_csis0_wrap", "dout_aclk_csis_200",
EN_SCLK_GSCL_FIMC, 0, CLK_SET_RATE_PARENT, 0),
GATE(GSCL_SCLK_CSIS1_WRAP, "sclk_csis1_wrap", "dout_aclk_csis_200",
@@ -795,7 +795,7 @@ CLK_OF_DECLARE(exynos5260_clk_gscl, "samsung,exynos5260-clock-gscl",
/* CMU_ISP */
-static unsigned long isp_clk_regs[] __initdata = {
+static const unsigned long isp_clk_regs[] __initconst = {
MUX_SEL_ISP0,
MUX_SEL_ISP1,
DIV_ISP,
@@ -811,14 +811,14 @@ static unsigned long isp_clk_regs[] __initdata = {
PNAME(mout_isp_400_user_p) = {"fin_pll", "dout_aclk_isp1_400"};
PNAME(mout_isp_266_user_p) = {"fin_pll", "dout_aclk_isp1_266"};
-static struct samsung_mux_clock isp_mux_clks[] __initdata = {
+static const struct samsung_mux_clock isp_mux_clks[] __initconst = {
MUX(ISP_MOUT_ISP_266_USER, "mout_isp_266_user", mout_isp_266_user_p,
MUX_SEL_ISP0, 0, 1),
MUX(ISP_MOUT_ISP_400_USER, "mout_isp_400_user", mout_isp_400_user_p,
MUX_SEL_ISP0, 4, 1),
};
-static struct samsung_div_clock isp_div_clks[] __initdata = {
+static const struct samsung_div_clock isp_div_clks[] __initconst = {
DIV(ISP_DOUT_PCLK_ISP_66, "dout_pclk_isp_66", "mout_kfc",
DIV_ISP, 0, 3),
DIV(ISP_DOUT_PCLK_ISP_133, "dout_pclk_isp_133", "mout_kfc",
@@ -830,7 +830,7 @@ static struct samsung_div_clock isp_div_clks[] __initdata = {
DIV(ISP_DOUT_SCLK_MPWM, "dout_sclk_mpwm", "mout_kfc", DIV_ISP, 20, 2),
};
-static struct samsung_gate_clock isp_gate_clks[] __initdata = {
+static const struct samsung_gate_clock isp_gate_clks[] __initconst = {
GATE(ISP_CLK_GIC, "clk_isp_gic", "mout_aclk_isp1_266",
EN_IP_ISP0, 15, 0, 0),
@@ -914,7 +914,7 @@ CLK_OF_DECLARE(exynos5260_clk_isp, "samsung,exynos5260-clock-isp",
/* CMU_KFC */
-static unsigned long kfc_clk_regs[] __initdata = {
+static const unsigned long kfc_clk_regs[] __initconst = {
KFC_PLL_LOCK,
KFC_PLL_CON0,
KFC_PLL_CON1,
@@ -932,13 +932,13 @@ static unsigned long kfc_clk_regs[] __initdata = {
PNAME(mout_kfc_pll_p) = {"fin_pll", "fout_kfc_pll"};
PNAME(mout_kfc_p) = {"mout_kfc_pll", "dout_media_pll"};
-static struct samsung_mux_clock kfc_mux_clks[] __initdata = {
+static const struct samsung_mux_clock kfc_mux_clks[] __initconst = {
MUX(KFC_MOUT_KFC_PLL, "mout_kfc_pll", mout_kfc_pll_p,
MUX_SEL_KFC0, 0, 1),
MUX(KFC_MOUT_KFC, "mout_kfc", mout_kfc_p, MUX_SEL_KFC2, 0, 1),
};
-static struct samsung_div_clock kfc_div_clks[] __initdata = {
+static const struct samsung_div_clock kfc_div_clks[] __initconst = {
DIV(KFC_DOUT_KFC1, "dout_kfc1", "mout_kfc", DIV_KFC, 0, 3),
DIV(KFC_DOUT_KFC2, "dout_kfc2", "dout_kfc1", DIV_KFC, 4, 3),
DIV(KFC_DOUT_KFC_ATCLK, "dout_kfc_atclk", "dout_kfc2", DIV_KFC, 8, 3),
@@ -949,7 +949,7 @@ static struct samsung_div_clock kfc_div_clks[] __initdata = {
DIV(KFC_DOUT_KFC_PLL, "dout_kfc_pll", "mout_kfc", DIV_KFC, 24, 3),
};
-static struct samsung_pll_clock kfc_pll_clks[] __initdata = {
+static const struct samsung_pll_clock kfc_pll_clks[] __initconst = {
PLL(pll_2550xx, KFC_FOUT_KFC_PLL, "fout_kfc_pll", "fin_pll",
KFC_PLL_LOCK, KFC_PLL_CON0,
pll2550_24mhz_tbl),
@@ -978,7 +978,7 @@ CLK_OF_DECLARE(exynos5260_clk_kfc, "samsung,exynos5260-clock-kfc",
/* CMU_MFC */
-static unsigned long mfc_clk_regs[] __initdata = {
+static const unsigned long mfc_clk_regs[] __initconst = {
MUX_SEL_MFC,
DIV_MFC,
EN_ACLK_MFC,
@@ -991,18 +991,18 @@ static unsigned long mfc_clk_regs[] __initdata = {
PNAME(mout_aclk_mfc_333_user_p) = {"fin_pll", "dout_aclk_mfc_333"};
-static struct samsung_mux_clock mfc_mux_clks[] __initdata = {
+static const struct samsung_mux_clock mfc_mux_clks[] __initconst = {
MUX(MFC_MOUT_ACLK_MFC_333_USER, "mout_aclk_mfc_333_user",
mout_aclk_mfc_333_user_p,
MUX_SEL_MFC, 0, 1),
};
-static struct samsung_div_clock mfc_div_clks[] __initdata = {
+static const struct samsung_div_clock mfc_div_clks[] __initconst = {
DIV(MFC_DOUT_PCLK_MFC_83, "dout_pclk_mfc_83", "mout_aclk_mfc_333_user",
DIV_MFC, 0, 3),
};
-static struct samsung_gate_clock mfc_gate_clks[] __initdata = {
+static const struct samsung_gate_clock mfc_gate_clks[] __initconst = {
GATE(MFC_CLK_MFC, "clk_mfc", "mout_aclk_mfc_333_user",
EN_IP_MFC, 1, 0, 0),
GATE(MFC_CLK_SMMU2_MFCM0, "clk_smmu2_mfcm0", "mout_aclk_mfc_333_user",
@@ -1034,7 +1034,7 @@ CLK_OF_DECLARE(exynos5260_clk_mfc, "samsung,exynos5260-clock-mfc",
/* CMU_MIF */
-static unsigned long mif_clk_regs[] __initdata = {
+static const unsigned long mif_clk_regs[] __initconst = {
MEM_PLL_LOCK,
BUS_PLL_LOCK,
MEDIA_PLL_LOCK,
@@ -1076,7 +1076,7 @@ PNAME(mout_mif_drex2x_p) = {"dout_mem_pll", "dout_bus_pll"};
PNAME(mout_clkm_phy_p) = {"mout_mif_drex", "dout_media_pll"};
PNAME(mout_clk2x_phy_p) = {"mout_mif_drex2x", "dout_media_pll"};
-static struct samsung_mux_clock mif_mux_clks[] __initdata = {
+static const struct samsung_mux_clock mif_mux_clks[] __initconst = {
MUX(MIF_MOUT_MEM_PLL, "mout_mem_pll", mout_mem_pll_p,
MUX_SEL_MIF, 0, 1),
MUX(MIF_MOUT_BUS_PLL, "mout_bus_pll", mout_bus_pll_p,
@@ -1093,7 +1093,7 @@ static struct samsung_mux_clock mif_mux_clks[] __initdata = {
MUX_SEL_MIF, 24, 1),
};
-static struct samsung_div_clock mif_div_clks[] __initdata = {
+static const struct samsung_div_clock mif_div_clks[] __initconst = {
DIV(MIF_DOUT_MEDIA_PLL, "dout_media_pll", "mout_media_pll",
DIV_MIF, 0, 3),
DIV(MIF_DOUT_MEM_PLL, "dout_mem_pll", "mout_mem_pll",
@@ -1112,7 +1112,7 @@ static struct samsung_div_clock mif_div_clks[] __initdata = {
DIV_MIF, 28, 4),
};
-static struct samsung_gate_clock mif_gate_clks[] __initdata = {
+static const struct samsung_gate_clock mif_gate_clks[] __initconst = {
GATE(MIF_CLK_LPDDR3PHY_WRAP0, "clk_lpddr3phy_wrap0", "dout_clk2x_phy",
EN_IP_MIF, 12, CLK_IGNORE_UNUSED, 0),
GATE(MIF_CLK_LPDDR3PHY_WRAP1, "clk_lpddr3phy_wrap1", "dout_clk2x_phy",
@@ -1146,7 +1146,7 @@ static struct samsung_gate_clock mif_gate_clks[] __initdata = {
CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0),
};
-static struct samsung_pll_clock mif_pll_clks[] __initdata = {
+static const struct samsung_pll_clock mif_pll_clks[] __initconst = {
PLL(pll_2550xx, MIF_FOUT_MEM_PLL, "fout_mem_pll", "fin_pll",
MEM_PLL_LOCK, MEM_PLL_CON0,
pll2550_24mhz_tbl),
@@ -1183,7 +1183,7 @@ CLK_OF_DECLARE(exynos5260_clk_mif, "samsung,exynos5260-clock-mif",
/* CMU_PERI */
-static unsigned long peri_clk_regs[] __initdata = {
+static const unsigned long peri_clk_regs[] __initconst = {
MUX_SEL_PERI,
MUX_SEL_PERI1,
DIV_PERI,
@@ -1219,7 +1219,7 @@ PNAME(mout_sclk_i2scod_p) = {"ioclk_i2s_cdclk", "fin_pll", "dout_aclk_peri_aud",
PNAME(mout_sclk_spdif_p) = {"ioclk_spdif_extclk", "fin_pll",
"dout_aclk_peri_aud", "phyclk_hdmi_phy_ref_cko"};
-static struct samsung_mux_clock peri_mux_clks[] __initdata = {
+static const struct samsung_mux_clock peri_mux_clks[] __initconst = {
MUX(PERI_MOUT_SCLK_PCM, "mout_sclk_pcm", mout_sclk_pcm_p,
MUX_SEL_PERI1, 4, 2),
MUX(PERI_MOUT_SCLK_I2SCOD, "mout_sclk_i2scod", mout_sclk_i2scod_p,
@@ -1228,12 +1228,12 @@ static struct samsung_mux_clock peri_mux_clks[] __initdata = {
MUX_SEL_PERI1, 20, 2),
};
-static struct samsung_div_clock peri_div_clks[] __initdata = {
+static const struct samsung_div_clock peri_div_clks[] __initconst = {
DIV(PERI_DOUT_PCM, "dout_pcm", "mout_sclk_pcm", DIV_PERI, 0, 8),
DIV(PERI_DOUT_I2S, "dout_i2s", "mout_sclk_i2scod", DIV_PERI, 8, 6),
};
-static struct samsung_gate_clock peri_gate_clks[] __initdata = {
+static const struct samsung_gate_clock peri_gate_clks[] __initconst = {
GATE(PERI_SCLK_PCM1, "sclk_pcm1", "dout_pcm", EN_SCLK_PERI, 0,
CLK_SET_RATE_PARENT, 0),
GATE(PERI_SCLK_I2S, "sclk_i2s", "dout_i2s", EN_SCLK_PERI, 1,
@@ -1389,7 +1389,7 @@ CLK_OF_DECLARE(exynos5260_clk_peri, "samsung,exynos5260-clock-peri",
/* CMU_TOP */
-static unsigned long top_clk_regs[] __initdata = {
+static const unsigned long top_clk_regs[] __initconst = {
DISP_PLL_LOCK,
AUD_PLL_LOCK,
DISP_PLL_CON0,
@@ -1430,7 +1430,7 @@ static unsigned long top_clk_regs[] __initdata = {
};
/* fixed rate clocks generated inside the soc */
-static struct samsung_fixed_rate_clock fixed_rate_clks[] __initdata = {
+static const struct samsung_fixed_rate_clock fixed_rate_clks[] __initconst = {
FRATE(PHYCLK_DPTX_PHY_CH3_TXD_CLK, "phyclk_dptx_phy_ch3_txd_clk", NULL,
0, 270000000),
FRATE(PHYCLK_DPTX_PHY_CH2_TXD_CLK, "phyclk_dptx_phy_ch2_txd_clk", NULL,
@@ -1513,7 +1513,7 @@ PNAME(mout_sclk_fsys_mmc1_sdclkin_b_p) = {"mout_sclk_fsys_mmc1_sdclkin_a",
PNAME(mout_sclk_fsys_mmc2_sdclkin_b_p) = {"mout_sclk_fsys_mmc2_sdclkin_a",
"mout_mediatop_pll_user"};
-static struct samsung_mux_clock top_mux_clks[] __initdata = {
+static const struct samsung_mux_clock top_mux_clks[] __initconst = {
MUX(TOP_MOUT_MEDIATOP_PLL_USER, "mout_mediatop_pll_user",
mout_mediatop_pll_user_p,
MUX_SEL_TOP_PLL0, 0, 1),
@@ -1673,7 +1673,7 @@ static struct samsung_mux_clock top_mux_clks[] __initdata = {
MUX_SEL_TOP_GSCL, 20, 1),
};
-static struct samsung_div_clock top_div_clks[] __initdata = {
+static const struct samsung_div_clock top_div_clks[] __initconst = {
DIV(TOP_DOUT_ACLK_G2D_333, "dout_aclk_g2d_333", "mout_aclk_g2d_333",
DIV_TOP_G2D_MFC, 0, 3),
DIV(TOP_DOUT_ACLK_MFC_333, "dout_aclk_mfc_333", "mout_aclk_mfc_333",
@@ -1794,7 +1794,7 @@ static struct samsung_div_clock top_div_clks[] __initdata = {
};
-static struct samsung_gate_clock top_gate_clks[] __initdata = {
+static const struct samsung_gate_clock top_gate_clks[] __initconst = {
GATE(TOP_SCLK_MMC0, "sclk_fsys_mmc0_sdclkin",
"dout_sclk_fsys_mmc0_sdclkin_b",
EN_SCLK_TOP, 7, CLK_SET_RATE_PARENT, 0),
@@ -1809,7 +1809,7 @@ static struct samsung_gate_clock top_gate_clks[] __initdata = {
CLK_SET_RATE_PARENT, 0),
};
-static struct samsung_pll_clock top_pll_clks[] __initdata = {
+static const struct samsung_pll_clock top_pll_clks[] __initconst = {
PLL(pll_2550xx, TOP_FOUT_DISP_PLL, "fout_disp_pll", "fin_pll",
DISP_PLL_LOCK, DISP_PLL_CON0,
pll2550_24mhz_tbl),
diff --git a/drivers/clk/samsung/clk-exynos5410.c b/drivers/clk/samsung/clk-exynos5410.c
index d5d5dcabc..54ec486a5 100644
--- a/drivers/clk/samsung/clk-exynos5410.c
+++ b/drivers/clk/samsung/clk-exynos5410.c
@@ -31,11 +31,14 @@
#define SRC_CPU 0x200
#define DIV_CPU0 0x500
#define SRC_CPERI1 0x4204
+#define GATE_IP_G2D 0x8800
#define DIV_TOP0 0x10510
#define DIV_TOP1 0x10514
+#define DIV_FSYS0 0x10548
#define DIV_FSYS1 0x1054c
#define DIV_FSYS2 0x10550
#define DIV_PERIC0 0x10558
+#define DIV_PERIC3 0x10564
#define SRC_TOP0 0x10210
#define SRC_TOP1 0x10214
#define SRC_TOP2 0x10218
@@ -44,6 +47,8 @@
#define SRC_MASK_FSYS 0x10340
#define SRC_MASK_PERIC0 0x10350
#define GATE_BUS_FSYS0 0x10740
+#define GATE_TOP_SCLK_FSYS 0x10840
+#define GATE_TOP_SCLK_PERIC 0x10850
#define GATE_IP_FSYS 0x10944
#define GATE_IP_PERIC 0x10950
#define GATE_IP_PERIS 0x10960
@@ -71,12 +76,13 @@ PNAME(mout_kfc_p) = { "mout_kpll", "sclk_mpll", };
PNAME(mpll_user_p) = { "fin_pll", "sclk_mpll", };
PNAME(bpll_user_p) = { "fin_pll", "sclk_bpll", };
PNAME(mpll_bpll_p) = { "sclk_mpll_muxed", "sclk_bpll_muxed", };
+PNAME(sclk_mpll_bpll_p) = { "sclk_mpll_bpll", "fin_pll", };
PNAME(group2_p) = { "fin_pll", "fin_pll", "none", "none",
"none", "none", "sclk_mpll_bpll",
"none", "none", "sclk_cpll" };
-static struct samsung_mux_clock exynos5410_mux_clks[] __initdata = {
+static const struct samsung_mux_clock exynos5410_mux_clks[] __initconst = {
MUX(0, "mout_apll", apll_p, SRC_CPU, 0, 1),
MUX(0, "mout_cpu", mout_cpu_p, SRC_CPU, 16, 1),
@@ -96,16 +102,20 @@ static struct samsung_mux_clock exynos5410_mux_clks[] __initdata = {
MUX(0, "mout_mmc0", group2_p, SRC_FSYS, 0, 4),
MUX(0, "mout_mmc1", group2_p, SRC_FSYS, 4, 4),
MUX(0, "mout_mmc2", group2_p, SRC_FSYS, 8, 4),
+ MUX(0, "mout_usbd300", sclk_mpll_bpll_p, SRC_FSYS, 28, 1),
+ MUX(0, "mout_usbd301", sclk_mpll_bpll_p, SRC_FSYS, 29, 1),
MUX(0, "mout_uart0", group2_p, SRC_PERIC0, 0, 4),
MUX(0, "mout_uart1", group2_p, SRC_PERIC0, 4, 4),
MUX(0, "mout_uart2", group2_p, SRC_PERIC0, 8, 4),
+ MUX(0, "mout_uart3", group2_p, SRC_PERIC0, 12, 4),
+ MUX(0, "mout_pwm", group2_p, SRC_PERIC0, 24, 4),
MUX(0, "mout_aclk200", mpll_bpll_p, SRC_TOP0, 12, 1),
MUX(0, "mout_aclk400", mpll_bpll_p, SRC_TOP0, 20, 1),
};
-static struct samsung_div_clock exynos5410_div_clks[] __initdata = {
+static const struct samsung_div_clock exynos5410_div_clks[] __initconst = {
DIV(0, "div_arm", "mout_cpu", DIV_CPU0, 0, 3),
DIV(0, "div_arm2", "div_arm", DIV_CPU0, 28, 3),
@@ -121,6 +131,11 @@ static struct samsung_div_clock exynos5410_div_clks[] __initdata = {
DIV(0, "aclk66_pre", "sclk_mpll_muxed", DIV_TOP1, 24, 3),
DIV(0, "aclk66", "aclk66_pre", DIV_TOP0, 0, 3),
+ DIV(0, "dout_usbphy300", "mout_usbd300", DIV_FSYS0, 16, 4),
+ DIV(0, "dout_usbphy301", "mout_usbd301", DIV_FSYS0, 20, 4),
+ DIV(0, "dout_usbd300", "mout_usbd300", DIV_FSYS0, 24, 4),
+ DIV(0, "dout_usbd301", "mout_usbd301", DIV_FSYS0, 28, 4),
+
DIV(0, "div_mmc0", "mout_mmc0", DIV_FSYS1, 0, 4),
DIV(0, "div_mmc1", "mout_mmc1", DIV_FSYS1, 16, 4),
DIV(0, "div_mmc2", "mout_mmc2", DIV_FSYS2, 0, 4),
@@ -137,12 +152,19 @@ static struct samsung_div_clock exynos5410_div_clks[] __initdata = {
DIV(0, "div_uart2", "mout_uart2", DIV_PERIC0, 8, 4),
DIV(0, "div_uart3", "mout_uart3", DIV_PERIC0, 12, 4),
+ DIV(0, "dout_pwm", "mout_pwm", DIV_PERIC3, 0, 4),
+
DIV(0, "aclk200", "mout_aclk200", DIV_TOP0, 12, 3),
+ DIV(0, "aclk266", "mpll_user_p", DIV_TOP0, 16, 3),
DIV(0, "aclk400", "mout_aclk400", DIV_TOP0, 24, 3),
};
-static struct samsung_gate_clock exynos5410_gate_clks[] __initdata = {
+static const struct samsung_gate_clock exynos5410_gate_clks[] __initconst = {
+ GATE(CLK_SSS, "sss", "aclk266", GATE_IP_G2D, 2, 0, 0),
GATE(CLK_MCT, "mct", "aclk66", GATE_IP_PERIS, 18, 0, 0),
+ GATE(CLK_WDT, "wdt", "aclk66", GATE_IP_PERIS, 19, 0, 0),
+ GATE(CLK_RTC, "rtc", "aclk66", GATE_IP_PERIS, 20, 0, 0),
+ GATE(CLK_TMU, "tmu", "aclk66", GATE_IP_PERIS, 21, 0, 0),
GATE(CLK_SCLK_MMC0, "sclk_mmc0", "div_mmc_pre0",
SRC_MASK_FSYS, 0, CLK_SET_RATE_PARENT, 0),
@@ -155,9 +177,31 @@ static struct samsung_gate_clock exynos5410_gate_clks[] __initdata = {
GATE(CLK_MMC1, "sdmmc1", "aclk200", GATE_BUS_FSYS0, 13, 0, 0),
GATE(CLK_MMC2, "sdmmc2", "aclk200", GATE_BUS_FSYS0, 14, 0, 0),
+ GATE(CLK_SCLK_USBPHY301, "sclk_usbphy301", "dout_usbphy301",
+ GATE_TOP_SCLK_FSYS, 7, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_USBPHY300, "sclk_usbphy300", "dout_usbphy300",
+ GATE_TOP_SCLK_FSYS, 8, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_USBD300, "sclk_usbd300", "dout_usbd300",
+ GATE_TOP_SCLK_FSYS, 9, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_USBD301, "sclk_usbd301", "dout_usbd301",
+ GATE_TOP_SCLK_FSYS, 10, CLK_SET_RATE_PARENT, 0),
+
+ GATE(CLK_SCLK_PWM, "sclk_pwm", "dout_pwm",
+ GATE_TOP_SCLK_PERIC, 11, CLK_SET_RATE_PARENT, 0),
+
GATE(CLK_UART0, "uart0", "aclk66", GATE_IP_PERIC, 0, 0, 0),
GATE(CLK_UART1, "uart1", "aclk66", GATE_IP_PERIC, 1, 0, 0),
GATE(CLK_UART2, "uart2", "aclk66", GATE_IP_PERIC, 2, 0, 0),
+ GATE(CLK_UART3, "uart3", "aclk66", GATE_IP_PERIC, 3, 0, 0),
+ GATE(CLK_I2C0, "i2c0", "aclk66", GATE_IP_PERIC, 6, 0, 0),
+ GATE(CLK_I2C1, "i2c1", "aclk66", GATE_IP_PERIC, 7, 0, 0),
+ GATE(CLK_I2C2, "i2c2", "aclk66", GATE_IP_PERIC, 8, 0, 0),
+ GATE(CLK_I2C3, "i2c3", "aclk66", GATE_IP_PERIC, 9, 0, 0),
+ GATE(CLK_USI0, "usi0", "aclk66", GATE_IP_PERIC, 10, 0, 0),
+ GATE(CLK_USI1, "usi1", "aclk66", GATE_IP_PERIC, 11, 0, 0),
+ GATE(CLK_USI2, "usi2", "aclk66", GATE_IP_PERIC, 12, 0, 0),
+ GATE(CLK_USI3, "usi3", "aclk66", GATE_IP_PERIC, 13, 0, 0),
+ GATE(CLK_PWM, "pwm", "aclk66", GATE_IP_PERIC, 24, 0, 0),
GATE(CLK_SCLK_UART0, "sclk_uart0", "div_uart0",
SRC_MASK_PERIC0, 0, CLK_SET_RATE_PARENT, 0),
@@ -165,9 +209,15 @@ static struct samsung_gate_clock exynos5410_gate_clks[] __initdata = {
SRC_MASK_PERIC0, 4, CLK_SET_RATE_PARENT, 0),
GATE(CLK_SCLK_UART2, "sclk_uart2", "div_uart2",
SRC_MASK_PERIC0, 8, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_UART3, "sclk_uart3", "div_uart3",
+ SRC_MASK_PERIC0, 12, CLK_SET_RATE_PARENT, 0),
+
+ GATE(CLK_USBH20, "usbh20", "aclk200_fsys", GATE_IP_FSYS, 18, 0, 0),
+ GATE(CLK_USBD300, "usbd300", "aclk200_fsys", GATE_IP_FSYS, 19, 0, 0),
+ GATE(CLK_USBD301, "usbd301", "aclk200_fsys", GATE_IP_FSYS, 20, 0, 0),
};
-static struct samsung_pll_clock exynos5410_plls[nr_plls] __initdata = {
+static const struct samsung_pll_clock exynos5410_plls[nr_plls] __initconst = {
[apll] = PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll", APLL_LOCK,
APLL_CON0, NULL),
[cpll] = PLL(pll_35xx, CLK_FOUT_CPLL, "fout_cpll", "fin_pll", CPLL_LOCK,
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 92382cef9..bb196ca21 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -160,7 +160,7 @@ static struct samsung_clk_reg_dump *exynos5800_save;
* list of controller registers to be saved and restored during a
* suspend/resume cycle.
*/
-static unsigned long exynos5x_clk_regs[] __initdata = {
+static const unsigned long exynos5x_clk_regs[] __initconst = {
SRC_CPU,
DIV_CPU0,
DIV_CPU1,
@@ -248,7 +248,7 @@ static unsigned long exynos5x_clk_regs[] __initdata = {
DIV_KFC0,
};
-static unsigned long exynos5800_clk_regs[] __initdata = {
+static const unsigned long exynos5800_clk_regs[] __initconst = {
SRC_TOP8,
SRC_TOP9,
SRC_CAM,
@@ -306,7 +306,7 @@ static struct syscore_ops exynos5420_clk_syscore_ops = {
.resume = exynos5420_clk_resume,
};
-static void exynos5420_clk_sleep_init(void)
+static void __init exynos5420_clk_sleep_init(void)
{
exynos5x_save = samsung_clk_alloc_reg_dump(exynos5x_clk_regs,
ARRAY_SIZE(exynos5x_clk_regs));
@@ -333,7 +333,7 @@ err_soc:
return;
}
#else
-static void exynos5420_clk_sleep_init(void) {}
+static void __init exynos5420_clk_sleep_init(void) {}
#endif
/* list of all parent clocks */
@@ -484,7 +484,7 @@ static struct samsung_fixed_rate_clock
};
/* fixed rate clocks generated inside the soc */
-static struct samsung_fixed_rate_clock exynos5x_fixed_rate_clks[] __initdata = {
+static const struct samsung_fixed_rate_clock exynos5x_fixed_rate_clks[] __initconst = {
FRATE(CLK_SCLK_HDMIPHY, "sclk_hdmiphy", NULL, 0, 24000000),
FRATE(0, "sclk_pwi", NULL, 0, 24000000),
FRATE(0, "sclk_usbh20", NULL, 0, 48000000),
@@ -492,19 +492,19 @@ static struct samsung_fixed_rate_clock exynos5x_fixed_rate_clks[] __initdata = {
FRATE(0, "sclk_usbh20_scan_clk", NULL, 0, 480000000),
};
-static struct samsung_fixed_factor_clock
- exynos5x_fixed_factor_clks[] __initdata = {
+static const struct samsung_fixed_factor_clock
+ exynos5x_fixed_factor_clks[] __initconst = {
FFACTOR(0, "ff_hsic_12m", "fin_pll", 1, 2, 0),
FFACTOR(0, "ff_sw_aclk66", "mout_sw_aclk66", 1, 2, 0),
};
-static struct samsung_fixed_factor_clock
- exynos5800_fixed_factor_clks[] __initdata = {
+static const struct samsung_fixed_factor_clock
+ exynos5800_fixed_factor_clks[] __initconst = {
FFACTOR(0, "ff_dout_epll2", "mout_sclk_epll", 1, 2, 0),
FFACTOR(0, "ff_dout_spll2", "mout_sclk_spll", 1, 2, 0),
};
-static struct samsung_mux_clock exynos5800_mux_clks[] __initdata = {
+static const struct samsung_mux_clock exynos5800_mux_clks[] __initconst = {
MUX(0, "mout_aclk400_isp", mout_group3_5800_p, SRC_TOP0, 0, 3),
MUX(0, "mout_aclk400_mscl", mout_group3_5800_p, SRC_TOP0, 4, 3),
MUX(0, "mout_aclk400_wcore", mout_group2_5800_p, SRC_TOP0, 16, 3),
@@ -553,7 +553,7 @@ static struct samsung_mux_clock exynos5800_mux_clks[] __initdata = {
MUX(0, "mout_fimd1", mout_group2_p, SRC_DISP10, 4, 3),
};
-static struct samsung_div_clock exynos5800_div_clks[] __initdata = {
+static const struct samsung_div_clock exynos5800_div_clks[] __initconst = {
DIV(CLK_DOUT_ACLK400_WCORE, "dout_aclk400_wcore",
"mout_aclk400_wcore", DIV_TOP0, 16, 3),
DIV(0, "dout_aclk550_cam", "mout_aclk550_cam",
@@ -569,14 +569,14 @@ static struct samsung_div_clock exynos5800_div_clks[] __initdata = {
DIV(0, "dout_sclk_sw", "sclk_spll", DIV_TOP9, 24, 6),
};
-static struct samsung_gate_clock exynos5800_gate_clks[] __initdata = {
+static const struct samsung_gate_clock exynos5800_gate_clks[] __initconst = {
GATE(CLK_ACLK550_CAM, "aclk550_cam", "mout_user_aclk550_cam",
GATE_BUS_TOP, 24, 0, 0),
GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler",
GATE_BUS_TOP, 27, 0, 0),
};
-static struct samsung_mux_clock exynos5420_mux_clks[] __initdata = {
+static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = {
MUX(0, "sclk_bpll", mout_bpll_p, TOP_SPARE2, 0, 1),
MUX(0, "mout_aclk400_wcore_bpll", mout_aclk400_wcore_bpll_p,
TOP_SPARE2, 4, 1),
@@ -606,12 +606,12 @@ static struct samsung_mux_clock exynos5420_mux_clks[] __initdata = {
MUX(0, "mout_fimd1", mout_group3_p, SRC_DISP10, 4, 1),
};
-static struct samsung_div_clock exynos5420_div_clks[] __initdata = {
+static const struct samsung_div_clock exynos5420_div_clks[] __initconst = {
DIV(CLK_DOUT_ACLK400_WCORE, "dout_aclk400_wcore",
"mout_aclk400_wcore_bpll", DIV_TOP0, 16, 3),
};
-static struct samsung_mux_clock exynos5x_mux_clks[] __initdata = {
+static const struct samsung_mux_clock exynos5x_mux_clks[] __initconst = {
MUX(0, "mout_user_pclk66_gpio", mout_user_pclk66_gpio_p,
SRC_TOP7, 4, 1),
MUX(0, "mout_mspll_kfc", mout_mspll_cpu_p, SRC_TOP7, 8, 2),
@@ -778,7 +778,7 @@ static struct samsung_mux_clock exynos5x_mux_clks[] __initdata = {
MUX(0, "mout_isp_sensor", mout_group2_p, SRC_ISP, 28, 3),
};
-static struct samsung_div_clock exynos5x_div_clks[] __initdata = {
+static const struct samsung_div_clock exynos5x_div_clks[] __initconst = {
DIV(0, "div_arm", "mout_cpu", DIV_CPU0, 0, 3),
DIV(0, "sclk_apll", "mout_apll", DIV_CPU0, 24, 3),
DIV(0, "armclk2", "div_arm", DIV_CPU0, 28, 3),
@@ -911,7 +911,7 @@ static struct samsung_div_clock exynos5x_div_clks[] __initdata = {
CLK_SET_RATE_PARENT, 0),
};
-static struct samsung_gate_clock exynos5x_gate_clks[] __initdata = {
+static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
/* G2D */
GATE(CLK_MDMA0, "mdma0", "aclk266_g2d", GATE_IP_G2D, 1, 0, 0),
GATE(CLK_SSS, "sss", "aclk266_g2d", GATE_IP_G2D, 2, 0, 0),
@@ -946,7 +946,7 @@ static struct samsung_gate_clock exynos5x_gate_clks[] __initdata = {
GATE_BUS_TOP, 13, 0, 0),
GATE(0, "aclk166", "mout_user_aclk166",
GATE_BUS_TOP, 14, CLK_IGNORE_UNUSED, 0),
- GATE(0, "aclk333", "mout_user_aclk333",
+ GATE(CLK_ACLK333, "aclk333", "mout_user_aclk333",
GATE_BUS_TOP, 15, CLK_IGNORE_UNUSED, 0),
GATE(0, "aclk400_isp", "mout_user_aclk400_isp",
GATE_BUS_TOP, 16, 0, 0),
@@ -1219,7 +1219,7 @@ static struct samsung_gate_clock exynos5x_gate_clks[] __initdata = {
GATE(CLK_G3D, "g3d", "mout_user_aclk_g3d", GATE_IP_G3D, 9, 0, 0),
};
-static const struct samsung_pll_rate_table exynos5420_pll2550x_24mhz_tbl[] = {
+static const struct samsung_pll_rate_table exynos5420_pll2550x_24mhz_tbl[] __initconst = {
PLL_35XX_RATE(2000000000, 250, 3, 0),
PLL_35XX_RATE(1900000000, 475, 6, 0),
PLL_35XX_RATE(1800000000, 225, 3, 0),
@@ -1356,8 +1356,6 @@ static void __init exynos5x_clk_init(struct device_node *np,
exynos5x_soc = soc;
ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS);
- if (!ctx)
- panic("%s: unable to allocate context.\n", __func__);
samsung_clk_of_register_fixed_ext(ctx, exynos5x_fixed_rate_ext_clks,
ARRAY_SIZE(exynos5x_fixed_rate_ext_clks),
diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c
index 128527b8f..ea1608682 100644
--- a/drivers/clk/samsung/clk-exynos5433.c
+++ b/drivers/clk/samsung/clk-exynos5433.c
@@ -11,10 +11,12 @@
#include <linux/clk-provider.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <dt-bindings/clock/exynos5433.h>
#include "clk.h"
+#include "clk-cpu.h"
#include "clk-pll.h"
/*
@@ -108,7 +110,7 @@
#define ENABLE_CMU_TOP 0x0c00
#define ENABLE_CMU_TOP_DIV_STAT 0x0c04
-static unsigned long top_clk_regs[] __initdata = {
+static const unsigned long top_clk_regs[] __initconst = {
ISP_PLL_LOCK,
AUD_PLL_LOCK,
ISP_PLL_CON0,
@@ -218,11 +220,11 @@ PNAME(mout_sclk_audio0_p) = { "ioclk_audiocdclk0", "oscclk",
PNAME(mout_sclk_hdmi_spdif_p) = { "sclk_audio1", "ioclk_spdif_extclk", };
-static struct samsung_fixed_factor_clock top_fixed_factor_clks[] __initdata = {
+static const struct samsung_fixed_factor_clock top_fixed_factor_clks[] __initconst = {
FFACTOR(0, "oscclk_efuse_common", "oscclk", 1, 1, 0),
};
-static struct samsung_fixed_rate_clock top_fixed_clks[] __initdata = {
+static const struct samsung_fixed_rate_clock top_fixed_clks[] __initconst = {
/* Xi2s{0|1}CDCLK input clock for I2S/PCM */
FRATE(0, "ioclk_audiocdclk1", NULL, 0, 100000000),
FRATE(0, "ioclk_audiocdclk0", NULL, 0, 100000000),
@@ -238,7 +240,7 @@ static struct samsung_fixed_rate_clock top_fixed_clks[] __initdata = {
FRATE(0, "ioclk_i2s1_bclk_in", NULL, 0, 12288000),
};
-static struct samsung_mux_clock top_mux_clks[] __initdata = {
+static const struct samsung_mux_clock top_mux_clks[] __initconst = {
/* MUX_SEL_TOP0 */
MUX(CLK_MOUT_AUD_PLL, "mout_aud_pll", mout_aud_pll_p, MUX_SEL_TOP0,
4, 1),
@@ -374,7 +376,7 @@ static struct samsung_mux_clock top_mux_clks[] __initdata = {
mout_sclk_hdmi_spdif_p, MUX_SEL_TOP_DISP, 0, 1),
};
-static struct samsung_div_clock top_div_clks[] __initdata = {
+static const struct samsung_div_clock top_div_clks[] __initconst = {
/* DIV_TOP0 */
DIV(CLK_DIV_ACLK_CAM1_333, "div_aclk_cam1_333", "mout_aclk_cam1_333",
DIV_TOP0, 28, 3),
@@ -538,7 +540,7 @@ static struct samsung_div_clock top_div_clks[] __initdata = {
DIV_TOP_PERIC4, 0, 4),
};
-static struct samsung_gate_clock top_gate_clks[] __initdata = {
+static const struct samsung_gate_clock top_gate_clks[] __initconst = {
/* ENABLE_ACLK_TOP */
GATE(CLK_ACLK_G3D_400, "aclk_g3d_400", "div_aclk_g3d_400",
ENABLE_ACLK_TOP, 30, 0, 0),
@@ -639,7 +641,7 @@ static struct samsung_gate_clock top_gate_clks[] __initdata = {
/* ENABLE_SCLK_TOP_FSYS */
GATE(CLK_SCLK_PCIE_100_FSYS, "sclk_pcie_100_fsys", "div_sclk_pcie_100",
- ENABLE_SCLK_TOP_FSYS, 7, 0, 0),
+ ENABLE_SCLK_TOP_FSYS, 7, CLK_IGNORE_UNUSED, 0),
GATE(CLK_SCLK_MMC2_FSYS, "sclk_mmc2_fsys", "div_sclk_mmc2_b",
ENABLE_SCLK_TOP_FSYS, 6, CLK_SET_RATE_PARENT, 0),
GATE(CLK_SCLK_MMC1_FSYS, "sclk_mmc1_fsys", "div_sclk_mmc1_b",
@@ -668,11 +670,14 @@ static struct samsung_gate_clock top_gate_clks[] __initdata = {
GATE(CLK_SCLK_PCM1_PERIC, "sclk_pcm1_peric", "div_sclk_pcm1",
ENABLE_SCLK_TOP_PERIC, 7, CLK_SET_RATE_PARENT, 0),
GATE(CLK_SCLK_UART2_PERIC, "sclk_uart2_peric", "div_sclk_uart2",
- ENABLE_SCLK_TOP_PERIC, 5, CLK_SET_RATE_PARENT, 0),
+ ENABLE_SCLK_TOP_PERIC, 5, CLK_SET_RATE_PARENT |
+ CLK_IGNORE_UNUSED, 0),
GATE(CLK_SCLK_UART1_PERIC, "sclk_uart1_peric", "div_sclk_uart1",
- ENABLE_SCLK_TOP_PERIC, 4, CLK_SET_RATE_PARENT, 0),
+ ENABLE_SCLK_TOP_PERIC, 4, CLK_SET_RATE_PARENT |
+ CLK_IGNORE_UNUSED, 0),
GATE(CLK_SCLK_UART0_PERIC, "sclk_uart0_peric", "div_sclk_uart0",
- ENABLE_SCLK_TOP_PERIC, 3, CLK_SET_RATE_PARENT, 0),
+ ENABLE_SCLK_TOP_PERIC, 3, CLK_SET_RATE_PARENT |
+ CLK_IGNORE_UNUSED, 0),
GATE(CLK_SCLK_SPI2_PERIC, "sclk_spi2_peric", "div_sclk_spi2_b",
ENABLE_SCLK_TOP_PERIC, 2, CLK_SET_RATE_PARENT, 0),
GATE(CLK_SCLK_SPI1_PERIC, "sclk_spi1_peric", "div_sclk_spi1_b",
@@ -693,7 +698,7 @@ static struct samsung_gate_clock top_gate_clks[] __initdata = {
* ATLAS_PLL & APOLLO_PLL & MEM0_PLL & MEM1_PLL & BUS_PLL & MFC_PLL
* & MPHY_PLL & G3D_PLL & DISP_PLL & ISP_PLL
*/
-static struct samsung_pll_rate_table exynos5443_pll_rates[] = {
+static const struct samsung_pll_rate_table exynos5443_pll_rates[] __initconst = {
PLL_35XX_RATE(2500000000U, 625, 6, 0),
PLL_35XX_RATE(2400000000U, 500, 5, 0),
PLL_35XX_RATE(2300000000U, 575, 6, 0),
@@ -744,7 +749,7 @@ static struct samsung_pll_rate_table exynos5443_pll_rates[] = {
};
/* AUD_PLL */
-static struct samsung_pll_rate_table exynos5443_aud_pll_rates[] = {
+static const struct samsung_pll_rate_table exynos5443_aud_pll_rates[] __initconst = {
PLL_36XX_RATE(400000000U, 200, 3, 2, 0),
PLL_36XX_RATE(393216000U, 197, 3, 2, -25690),
PLL_36XX_RATE(384000000U, 128, 2, 2, 0),
@@ -757,14 +762,14 @@ static struct samsung_pll_rate_table exynos5443_aud_pll_rates[] = {
{ /* sentinel */ }
};
-static struct samsung_pll_clock top_pll_clks[] __initdata = {
+static const struct samsung_pll_clock top_pll_clks[] __initconst = {
PLL(pll_35xx, CLK_FOUT_ISP_PLL, "fout_isp_pll", "oscclk",
ISP_PLL_LOCK, ISP_PLL_CON0, exynos5443_pll_rates),
PLL(pll_36xx, CLK_FOUT_AUD_PLL, "fout_aud_pll", "oscclk",
AUD_PLL_LOCK, AUD_PLL_CON0, exynos5443_aud_pll_rates),
};
-static struct samsung_cmu_info top_cmu_info __initdata = {
+static const struct samsung_cmu_info top_cmu_info __initconst = {
.pll_clks = top_pll_clks,
.nr_pll_clks = ARRAY_SIZE(top_pll_clks),
.mux_clks = top_mux_clks,
@@ -800,7 +805,7 @@ CLK_OF_DECLARE(exynos5433_cmu_top, "samsung,exynos5433-cmu-top",
#define DIV_CPIF 0x0600
#define ENABLE_SCLK_CPIF 0x0a00
-static unsigned long cpif_clk_regs[] __initdata = {
+static const unsigned long cpif_clk_regs[] __initconst = {
MPHY_PLL_LOCK,
MPHY_PLL_CON0,
MPHY_PLL_CON1,
@@ -813,32 +818,32 @@ static unsigned long cpif_clk_regs[] __initdata = {
/* list of all parent clock list */
PNAME(mout_mphy_pll_p) = { "oscclk", "fout_mphy_pll", };
-static struct samsung_pll_clock cpif_pll_clks[] __initdata = {
+static const struct samsung_pll_clock cpif_pll_clks[] __initconst = {
PLL(pll_35xx, CLK_FOUT_MPHY_PLL, "fout_mphy_pll", "oscclk",
MPHY_PLL_LOCK, MPHY_PLL_CON0, exynos5443_pll_rates),
};
-static struct samsung_mux_clock cpif_mux_clks[] __initdata = {
+static const struct samsung_mux_clock cpif_mux_clks[] __initconst = {
/* MUX_SEL_CPIF0 */
MUX(CLK_MOUT_MPHY_PLL, "mout_mphy_pll", mout_mphy_pll_p, MUX_SEL_CPIF0,
0, 1),
};
-static struct samsung_div_clock cpif_div_clks[] __initdata = {
+static const struct samsung_div_clock cpif_div_clks[] __initconst = {
/* DIV_CPIF */
DIV(CLK_DIV_SCLK_MPHY, "div_sclk_mphy", "mout_mphy_pll", DIV_CPIF,
0, 6),
};
-static struct samsung_gate_clock cpif_gate_clks[] __initdata = {
+static const struct samsung_gate_clock cpif_gate_clks[] __initconst = {
/* ENABLE_SCLK_CPIF */
GATE(CLK_SCLK_MPHY_PLL, "sclk_mphy_pll", "mout_mphy_pll",
- ENABLE_SCLK_CPIF, 9, 0, 0),
+ ENABLE_SCLK_CPIF, 9, CLK_IGNORE_UNUSED, 0),
GATE(CLK_SCLK_UFS_MPHY, "sclk_ufs_mphy", "div_sclk_mphy",
ENABLE_SCLK_CPIF, 4, 0, 0),
};
-static struct samsung_cmu_info cpif_cmu_info __initdata = {
+static const struct samsung_cmu_info cpif_cmu_info __initconst = {
.pll_clks = cpif_pll_clks,
.nr_pll_clks = ARRAY_SIZE(cpif_pll_clks),
.mux_clks = cpif_mux_clks,
@@ -939,7 +944,7 @@ CLK_OF_DECLARE(exynos5433_cmu_cpif, "samsung,exynos5433-cmu-cpif",
#define PAUSE 0x1008
#define DDRPHY_LOCK_CTRL 0x100c
-static unsigned long mif_clk_regs[] __initdata = {
+static const unsigned long mif_clk_regs[] __initconst = {
MEM0_PLL_LOCK,
MEM1_PLL_LOCK,
BUS_PLL_LOCK,
@@ -1004,7 +1009,7 @@ static unsigned long mif_clk_regs[] __initdata = {
DDRPHY_LOCK_CTRL,
};
-static struct samsung_pll_clock mif_pll_clks[] __initdata = {
+static const struct samsung_pll_clock mif_pll_clks[] __initconst = {
PLL(pll_35xx, CLK_FOUT_MEM0_PLL, "fout_mem0_pll", "oscclk",
MEM0_PLL_LOCK, MEM0_PLL_CON0, exynos5443_pll_rates),
PLL(pll_35xx, CLK_FOUT_MEM1_PLL, "fout_mem1_pll", "oscclk",
@@ -1065,7 +1070,7 @@ PNAME(mout_sclk_decon_tv_vclk_b_p) = { "mout_sclk_decon_tv_vclk_a",
PNAME(mout_sclk_dsim1_c_p) = { "mout_sclk_dsim1_b", "sclk_mphy_pll", };
PNAME(mout_sclk_dsim1_b_p) = { "mout_sclk_dsim1_a", "mout_mfc_pll_div2",};
-static struct samsung_fixed_factor_clock mif_fixed_factor_clks[] __initdata = {
+static const struct samsung_fixed_factor_clock mif_fixed_factor_clks[] __initconst = {
/* dout_{mfc|bus|mem1|mem0}_pll is half fixed rate from parent mux */
FFACTOR(CLK_DOUT_MFC_PLL, "dout_mfc_pll", "mout_mfc_pll", 1, 1, 0),
FFACTOR(CLK_DOUT_BUS_PLL, "dout_bus_pll", "mout_bus_pll", 1, 1, 0),
@@ -1073,7 +1078,7 @@ static struct samsung_fixed_factor_clock mif_fixed_factor_clks[] __initdata = {
FFACTOR(CLK_DOUT_MEM0_PLL, "dout_mem0_pll", "mout_mem0_pll", 1, 1, 0),
};
-static struct samsung_mux_clock mif_mux_clks[] __initdata = {
+static const struct samsung_mux_clock mif_mux_clks[] __initconst = {
/* MUX_SEL_MIF0 */
MUX(CLK_MOUT_MFC_PLL_DIV2, "mout_mfc_pll_div2", mout_mfc_pll_div2_p,
MUX_SEL_MIF0, 28, 1),
@@ -1169,7 +1174,7 @@ static struct samsung_mux_clock mif_mux_clks[] __initdata = {
MUX_SEL_MIF7, 0, 1),
};
-static struct samsung_div_clock mif_div_clks[] __initdata = {
+static const struct samsung_div_clock mif_div_clks[] __initconst = {
/* DIV_MIF1 */
DIV(CLK_DIV_SCLK_HPM_MIF, "div_sclk_hpm_mif", "div_clk2x_phy",
DIV_MIF1, 16, 2),
@@ -1223,7 +1228,7 @@ static struct samsung_div_clock mif_div_clks[] __initdata = {
0, 3),
};
-static struct samsung_gate_clock mif_gate_clks[] __initdata = {
+static const struct samsung_gate_clock mif_gate_clks[] __initconst = {
/* ENABLE_ACLK_MIF0 */
GATE(CLK_CLK2X_PHY1, "clk2k_phy1", "div_clk2x_phy", ENABLE_ACLK_MIF0,
19, CLK_IGNORE_UNUSED, 0),
@@ -1440,11 +1445,13 @@ static struct samsung_gate_clock mif_gate_clks[] __initdata = {
/* ENABLE_PCLK_MIF_SECURE_DREX0_TZ */
GATE(CLK_PCLK_DREX0_TZ, "pclk_drex0_tz", "div_aclk_mif_133",
- ENABLE_PCLK_MIF_SECURE_DREX0_TZ, 0, 0, 0),
+ ENABLE_PCLK_MIF_SECURE_DREX0_TZ, 0,
+ CLK_IGNORE_UNUSED, 0),
/* ENABLE_PCLK_MIF_SECURE_DREX1_TZ */
GATE(CLK_PCLK_DREX1_TZ, "pclk_drex1_tz", "div_aclk_mif_133",
- ENABLE_PCLK_MIF_SECURE_DREX1_TZ, 0, 0, 0),
+ ENABLE_PCLK_MIF_SECURE_DREX1_TZ, 0,
+ CLK_IGNORE_UNUSED, 0),
/* ENABLE_PCLK_MIF_SECURE_MONOTONIC_CNT */
GATE(CLK_PCLK_MONOTONIC_CNT, "pclk_monotonic_cnt", "div_aclk_mif_133",
@@ -1486,7 +1493,7 @@ static struct samsung_gate_clock mif_gate_clks[] __initdata = {
ENABLE_SCLK_MIF, 0, CLK_IGNORE_UNUSED, 0),
};
-static struct samsung_cmu_info mif_cmu_info __initdata = {
+static const struct samsung_cmu_info mif_cmu_info __initconst = {
.pll_clks = mif_pll_clks,
.nr_pll_clks = ARRAY_SIZE(mif_pll_clks),
.mux_clks = mif_mux_clks,
@@ -1522,7 +1529,7 @@ CLK_OF_DECLARE(exynos5433_cmu_mif, "samsung,exynos5433-cmu-mif",
#define ENABLE_IP_PERIC1 0x0B04
#define ENABLE_IP_PERIC2 0x0B08
-static unsigned long peric_clk_regs[] __initdata = {
+static const unsigned long peric_clk_regs[] __initconst = {
DIV_PERIC,
ENABLE_ACLK_PERIC,
ENABLE_PCLK_PERIC0,
@@ -1533,13 +1540,13 @@ static unsigned long peric_clk_regs[] __initdata = {
ENABLE_IP_PERIC2,
};
-static struct samsung_div_clock peric_div_clks[] __initdata = {
+static const struct samsung_div_clock peric_div_clks[] __initconst = {
/* DIV_PERIC */
DIV(CLK_DIV_SCLK_SCI, "div_sclk_sci", "oscclk", DIV_PERIC, 4, 4),
DIV(CLK_DIV_SCLK_SC_IN, "div_sclk_sc_in", "oscclk", DIV_PERIC, 0, 4),
};
-static struct samsung_gate_clock peric_gate_clks[] __initdata = {
+static const struct samsung_gate_clock peric_gate_clks[] __initconst = {
/* ENABLE_ACLK_PERIC */
GATE(CLK_ACLK_AHB2APB_PERIC2P, "aclk_ahb2apb_peric2p", "aclk_peric_66",
ENABLE_ACLK_PERIC, 3, CLK_IGNORE_UNUSED, 0),
@@ -1654,8 +1661,7 @@ static struct samsung_gate_clock peric_gate_clks[] __initdata = {
GATE(CLK_SCLK_IOCLK_SPI2, "sclk_ioclk_spi2", "ioclk_spi2_clk_in",
ENABLE_SCLK_PERIC, 13, CLK_SET_RATE_PARENT, 0),
GATE(CLK_SCLK_IOCLK_SPI1, "sclk_ioclk_spi1", "ioclk_spi1_clk_in",
- ENABLE_SCLK_PERIC, 12,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0),
+ ENABLE_SCLK_PERIC, 12, CLK_SET_RATE_PARENT, 0),
GATE(CLK_SCLK_IOCLK_SPI0, "sclk_ioclk_spi0", "ioclk_spi0_clk_in",
ENABLE_SCLK_PERIC, 11, CLK_SET_RATE_PARENT, 0),
GATE(CLK_SCLK_IOCLK_I2S1_BCLK, "sclk_ioclk_i2s1_bclk",
@@ -1670,18 +1676,21 @@ static struct samsung_gate_clock peric_gate_clks[] __initdata = {
GATE(CLK_SCLK_SPI2, "sclk_spi2", "sclk_spi2_peric", ENABLE_SCLK_PERIC,
5, CLK_SET_RATE_PARENT, 0),
GATE(CLK_SCLK_SPI1, "sclk_spi1", "sclk_spi1_peric", ENABLE_SCLK_PERIC,
- 4, CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0),
+ 4, CLK_SET_RATE_PARENT, 0),
GATE(CLK_SCLK_SPI0, "sclk_spi0", "sclk_spi0_peric", ENABLE_SCLK_PERIC,
3, CLK_SET_RATE_PARENT, 0),
GATE(CLK_SCLK_UART2, "sclk_uart2", "sclk_uart2_peric",
- ENABLE_SCLK_PERIC, 2, CLK_SET_RATE_PARENT, 0),
+ ENABLE_SCLK_PERIC, 2,
+ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
GATE(CLK_SCLK_UART1, "sclk_uart1", "sclk_uart1_peric",
- ENABLE_SCLK_PERIC, 1, CLK_SET_RATE_PARENT, 0),
+ ENABLE_SCLK_PERIC, 1,
+ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
GATE(CLK_SCLK_UART0, "sclk_uart0", "sclk_uart0_peric",
- ENABLE_SCLK_PERIC, 0, CLK_SET_RATE_PARENT, 0),
+ ENABLE_SCLK_PERIC, 0,
+ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
};
-static struct samsung_cmu_info peric_cmu_info __initdata = {
+static const struct samsung_cmu_info peric_cmu_info __initconst = {
.div_clks = peric_div_clks,
.nr_div_clks = ARRAY_SIZE(peric_div_clks),
.gate_clks = peric_gate_clks,
@@ -1728,7 +1737,7 @@ CLK_OF_DECLARE(exynos5433_cmu_peric, "samsung,exynos5433-cmu-peric",
#define ENABLE_IP_PERIS_SECURE_ANTIBRK_CNT 0x0b1c
#define ENABLE_IP_PERIS_SECURE_OTP_CON 0x0b20
-static unsigned long peris_clk_regs[] __initdata = {
+static const unsigned long peris_clk_regs[] __initconst = {
ENABLE_ACLK_PERIS,
ENABLE_PCLK_PERIS,
ENABLE_PCLK_PERIS_SECURE_TZPC,
@@ -1756,7 +1765,7 @@ static unsigned long peris_clk_regs[] __initdata = {
ENABLE_IP_PERIS_SECURE_OTP_CON,
};
-static struct samsung_gate_clock peris_gate_clks[] __initdata = {
+static const struct samsung_gate_clock peris_gate_clks[] __initconst = {
/* ENABLE_ACLK_PERIS */
GATE(CLK_ACLK_AHB2APB_PERIS1P, "aclk_ahb2apb_peris1p", "aclk_peris_66",
ENABLE_ACLK_PERIS, 2, CLK_IGNORE_UNUSED, 0),
@@ -1875,7 +1884,7 @@ static struct samsung_gate_clock peris_gate_clks[] __initdata = {
ENABLE_SCLK_PERIS_SECURE_OTP_CON, 0, 0, 0),
};
-static struct samsung_cmu_info peris_cmu_info __initdata = {
+static const struct samsung_cmu_info peris_cmu_info __initconst = {
.gate_clks = peris_gate_clks,
.nr_gate_clks = ARRAY_SIZE(peris_gate_clks),
.nr_clk_ids = PERIS_NR_CLK,
@@ -1959,7 +1968,7 @@ PNAME(mout_sclk_mphy_p)
= { "mout_sclk_ufs_mphy_user",
"mout_phyclk_lli_mphy_to_ufs_user", };
-static unsigned long fsys_clk_regs[] __initdata = {
+static const unsigned long fsys_clk_regs[] __initconst = {
MUX_SEL_FSYS0,
MUX_SEL_FSYS1,
MUX_SEL_FSYS2,
@@ -1980,7 +1989,7 @@ static unsigned long fsys_clk_regs[] __initdata = {
ENABLE_IP_FSYS1,
};
-static struct samsung_fixed_rate_clock fsys_fixed_clks[] __initdata = {
+static const struct samsung_fixed_rate_clock fsys_fixed_clks[] __initconst = {
/* PHY clocks from USBDRD30_PHY */
FRATE(CLK_PHYCLK_USBDRD30_UDRD30_PHYCLOCK_PHY,
"phyclk_usbdrd30_udrd30_phyclock_phy", NULL,
@@ -2020,7 +2029,7 @@ static struct samsung_fixed_rate_clock fsys_fixed_clks[] __initdata = {
NULL, 0, 26000000),
};
-static struct samsung_mux_clock fsys_mux_clks[] __initdata = {
+static const struct samsung_mux_clock fsys_mux_clks[] __initconst = {
/* MUX_SEL_FSYS0 */
MUX(CLK_MOUT_SCLK_UFS_MPHY_USER, "mout_sclk_ufs_mphy_user",
mout_sclk_ufs_mphy_user_p, MUX_SEL_FSYS0, 4, 1),
@@ -2104,7 +2113,7 @@ static struct samsung_mux_clock fsys_mux_clks[] __initdata = {
MUX_SEL_FSYS4, 0, 1),
};
-static struct samsung_gate_clock fsys_gate_clks[] __initdata = {
+static const struct samsung_gate_clock fsys_gate_clks[] __initconst = {
/* ENABLE_ACLK_FSYS0 */
GATE(CLK_ACLK_PCIE, "aclk_pcie", "mout_aclk_fsys_200_user",
ENABLE_ACLK_FSYS0, 13, CLK_IGNORE_UNUSED, 0),
@@ -2138,7 +2147,7 @@ static struct samsung_gate_clock fsys_gate_clks[] __initdata = {
GATE(CLK_ACLK_SMMU_PDMA1, "aclk_smmu_pdma1", "mout_aclk_fsys_200_user",
ENABLE_ACLK_FSYS1, 25, CLK_IGNORE_UNUSED, 0),
GATE(CLK_ACLK_BTS_PCIE, "aclk_bts_pcie", "mout_aclk_fsys_200_user",
- ENABLE_ACLK_FSYS1, 24, 0, 0),
+ ENABLE_ACLK_FSYS1, 24, CLK_IGNORE_UNUSED, 0),
GATE(CLK_ACLK_AXIUS_PDMA1, "aclk_axius_pdma1",
"mout_aclk_fsys_200_user", ENABLE_ACLK_FSYS1,
22, CLK_IGNORE_UNUSED, 0),
@@ -2185,13 +2194,13 @@ static struct samsung_gate_clock fsys_gate_clks[] __initdata = {
/* ENABLE_PCLK_FSYS */
GATE(CLK_PCLK_PCIE_CTRL, "pclk_pcie_ctrl", "mout_aclk_fsys_200_user",
- ENABLE_PCLK_FSYS, 17, 0, 0),
+ ENABLE_PCLK_FSYS, 17, CLK_IGNORE_UNUSED, 0),
GATE(CLK_PCLK_SMMU_PDMA1, "pclk_smmu_pdma1", "mout_aclk_fsys_200_user",
ENABLE_PCLK_FSYS, 16, CLK_IGNORE_UNUSED, 0),
GATE(CLK_PCLK_PCIE_PHY, "pclk_pcie_phy", "mout_aclk_fsys_200_user",
- ENABLE_PCLK_FSYS, 14, 0, 0),
+ ENABLE_PCLK_FSYS, 14, CLK_IGNORE_UNUSED, 0),
GATE(CLK_PCLK_BTS_PCIE, "pclk_bts_pcie", "mout_aclk_fsys_200_user",
- ENABLE_PCLK_FSYS, 13, 0, 0),
+ ENABLE_PCLK_FSYS, 13, CLK_IGNORE_UNUSED, 0),
GATE(CLK_PCLK_SMMU_PDMA0, "pclk_smmu_pdma0", "mout_aclk_fsys_200_user",
ENABLE_PCLK_FSYS, 8, CLK_IGNORE_UNUSED, 0),
GATE(CLK_PCLK_BTS_UFS, "pclk_bts_ufs", "mout_aclk_fsys_200_user",
@@ -2270,11 +2279,12 @@ static struct samsung_gate_clock fsys_gate_clks[] __initdata = {
ENABLE_SCLK_FSYS, 0, 0, 0),
/* ENABLE_IP_FSYS0 */
+ GATE(CLK_PCIE, "pcie", "sclk_pcie_100", ENABLE_IP_FSYS0, 17, 0, 0),
GATE(CLK_PDMA1, "pdma1", "aclk_pdma1", ENABLE_IP_FSYS0, 15, 0, 0),
GATE(CLK_PDMA0, "pdma0", "aclk_pdma0", ENABLE_IP_FSYS0, 0, 0, 0),
};
-static struct samsung_cmu_info fsys_cmu_info __initdata = {
+static const struct samsung_cmu_info fsys_cmu_info __initconst = {
.mux_clks = fsys_mux_clks,
.nr_mux_clks = ARRAY_SIZE(fsys_mux_clks),
.gate_clks = fsys_gate_clks,
@@ -2310,7 +2320,7 @@ CLK_OF_DECLARE(exynos5433_cmu_fsys, "samsung,exynos5433-cmu-fsys",
#define DIV_ENABLE_IP_G2D1 0x0b04
#define DIV_ENABLE_IP_G2D_SECURE_SMMU_G2D 0x0b08
-static unsigned long g2d_clk_regs[] __initdata = {
+static const unsigned long g2d_clk_regs[] __initconst = {
MUX_SEL_G2D0,
MUX_SEL_ENABLE_G2D0,
DIV_G2D,
@@ -2327,7 +2337,7 @@ static unsigned long g2d_clk_regs[] __initdata = {
PNAME(mout_aclk_g2d_266_user_p) = { "oscclk", "aclk_g2d_266", };
PNAME(mout_aclk_g2d_400_user_p) = { "oscclk", "aclk_g2d_400", };
-static struct samsung_mux_clock g2d_mux_clks[] __initdata = {
+static const struct samsung_mux_clock g2d_mux_clks[] __initconst = {
/* MUX_SEL_G2D0 */
MUX(CLK_MUX_ACLK_G2D_266_USER, "mout_aclk_g2d_266_user",
mout_aclk_g2d_266_user_p, MUX_SEL_G2D0, 4, 1),
@@ -2335,13 +2345,13 @@ static struct samsung_mux_clock g2d_mux_clks[] __initdata = {
mout_aclk_g2d_400_user_p, MUX_SEL_G2D0, 0, 1),
};
-static struct samsung_div_clock g2d_div_clks[] __initdata = {
+static const struct samsung_div_clock g2d_div_clks[] __initconst = {
/* DIV_G2D */
DIV(CLK_DIV_PCLK_G2D, "div_pclk_g2d", "mout_aclk_g2d_266_user",
DIV_G2D, 0, 2),
};
-static struct samsung_gate_clock g2d_gate_clks[] __initdata = {
+static const struct samsung_gate_clock g2d_gate_clks[] __initconst = {
/* DIV_ENABLE_ACLK_G2D */
GATE(CLK_ACLK_SMMU_MDMA1, "aclk_smmu_mdma1", "mout_aclk_g2d_266_user",
DIV_ENABLE_ACLK_G2D, 12, 0, 0),
@@ -2398,7 +2408,7 @@ static struct samsung_gate_clock g2d_gate_clks[] __initdata = {
DIV_ENABLE_PCLK_G2D_SECURE_SMMU_G2D, 0, 0, 0),
};
-static struct samsung_cmu_info g2d_cmu_info __initdata = {
+static const struct samsung_cmu_info g2d_cmu_info __initconst = {
.mux_clks = g2d_mux_clks,
.nr_mux_clks = ARRAY_SIZE(g2d_mux_clks),
.div_clks = g2d_div_clks,
@@ -2454,7 +2464,7 @@ CLK_OF_DECLARE(exynos5433_cmu_g2d, "samsung,exynos5433-cmu-g2d",
#define CLKOUT_CMU_DISP 0x0c00
#define CLKOUT_CMU_DISP_DIV_STAT 0x0c04
-static unsigned long disp_clk_regs[] __initdata = {
+static const unsigned long disp_clk_regs[] __initconst = {
DISP_PLL_LOCK,
DISP_PLL_CON0,
DISP_PLL_CON1,
@@ -2527,12 +2537,12 @@ PNAME(mout_sclk_decon_tv_vclk_c_disp_p) = {
PNAME(mout_sclk_decon_tv_vclk_b_disp_p) = { "mout_sclk_decon_tv_vclk_a_disp",
"mout_sclk_decon_tv_vclk_user", };
-static struct samsung_pll_clock disp_pll_clks[] __initdata = {
+static const struct samsung_pll_clock disp_pll_clks[] __initconst = {
PLL(pll_35xx, CLK_FOUT_DISP_PLL, "fout_disp_pll", "oscclk",
DISP_PLL_LOCK, DISP_PLL_CON0, exynos5443_pll_rates),
};
-static struct samsung_fixed_factor_clock disp_fixed_factor_clks[] __initdata = {
+static const struct samsung_fixed_factor_clock disp_fixed_factor_clks[] __initconst = {
/*
* sclk_rgb_{vclk|tv_vclk} is half clock of sclk_decon_{vclk|tv_vclk}.
* The divider has fixed value (2) between sclk_rgb_{vclk|tv_vclk}
@@ -2544,7 +2554,7 @@ static struct samsung_fixed_factor_clock disp_fixed_factor_clks[] __initdata = {
1, 2, 0),
};
-static struct samsung_fixed_rate_clock disp_fixed_clks[] __initdata = {
+static const struct samsung_fixed_rate_clock disp_fixed_clks[] __initconst = {
/* PHY clocks from MIPI_DPHY1 */
FRATE(0, "phyclk_mipidphy1_bitclkdiv8_phy", NULL, 0, 188000000),
FRATE(0, "phyclk_mipidphy1_rxclkesc0_phy", NULL, 0, 100000000),
@@ -2558,7 +2568,7 @@ static struct samsung_fixed_rate_clock disp_fixed_clks[] __initdata = {
NULL, 0, 166000000),
};
-static struct samsung_mux_clock disp_mux_clks[] __initdata = {
+static const struct samsung_mux_clock disp_mux_clks[] __initconst = {
/* MUX_SEL_DISP0 */
MUX(CLK_MOUT_DISP_PLL, "mout_disp_pll", mout_disp_pll_p, MUX_SEL_DISP0,
0, 1),
@@ -2633,7 +2643,7 @@ static struct samsung_mux_clock disp_mux_clks[] __initdata = {
mout_sclk_decon_vclk_p, MUX_SEL_DISP4, 0, 1),
};
-static struct samsung_div_clock disp_div_clks[] __initdata = {
+static const struct samsung_div_clock disp_div_clks[] __initconst = {
/* DIV_DISP */
DIV(CLK_DIV_SCLK_DSIM1_DISP, "div_sclk_dsim1_disp",
"mout_sclk_dsim1_b_disp", DIV_DISP, 24, 3),
@@ -2651,7 +2661,7 @@ static struct samsung_div_clock disp_div_clks[] __initdata = {
DIV_DISP, 0, 2),
};
-static struct samsung_gate_clock disp_gate_clks[] __initdata = {
+static const struct samsung_gate_clock disp_gate_clks[] __initconst = {
/* ENABLE_ACLK_DISP0 */
GATE(CLK_ACLK_DECON_TV, "aclk_decon_tv", "mout_aclk_disp_333_user",
ENABLE_ACLK_DISP0, 2, 0, 0),
@@ -2811,7 +2821,7 @@ static struct samsung_gate_clock disp_gate_clks[] __initdata = {
"div_sclk_decon_eclk_disp", ENABLE_SCLK_DISP, 2, 0, 0),
};
-static struct samsung_cmu_info disp_cmu_info __initdata = {
+static const struct samsung_cmu_info disp_cmu_info __initconst = {
.pll_clks = disp_pll_clks,
.nr_pll_clks = ARRAY_SIZE(disp_pll_clks),
.mux_clks = disp_mux_clks,
@@ -2856,7 +2866,7 @@ CLK_OF_DECLARE(exynos5433_cmu_disp, "samsung,exynos5433-cmu-disp",
#define ENABLE_IP_AUD0 0x0b00
#define ENABLE_IP_AUD1 0x0b04
-static unsigned long aud_clk_regs[] __initdata = {
+static const unsigned long aud_clk_regs[] __initconst = {
MUX_SEL_AUD0,
MUX_SEL_AUD1,
MUX_ENABLE_AUD0,
@@ -2875,13 +2885,13 @@ static unsigned long aud_clk_regs[] __initdata = {
PNAME(mout_aud_pll_user_aud_p) = { "oscclk", "fout_aud_pll", };
PNAME(mout_sclk_aud_pcm_p) = { "mout_aud_pll_user", "ioclk_audiocdclk0",};
-static struct samsung_fixed_rate_clock aud_fixed_clks[] __initdata = {
+static const struct samsung_fixed_rate_clock aud_fixed_clks[] __initconst = {
FRATE(0, "ioclk_jtag_tclk", NULL, 0, 33000000),
FRATE(0, "ioclk_slimbus_clk", NULL, 0, 25000000),
FRATE(0, "ioclk_i2s_bclk", NULL, 0, 50000000),
};
-static struct samsung_mux_clock aud_mux_clks[] __initdata = {
+static const struct samsung_mux_clock aud_mux_clks[] __initconst = {
/* MUX_SEL_AUD0 */
MUX(CLK_MOUT_AUD_PLL_USER, "mout_aud_pll_user",
mout_aud_pll_user_aud_p, MUX_SEL_AUD0, 0, 1),
@@ -2893,7 +2903,7 @@ static struct samsung_mux_clock aud_mux_clks[] __initdata = {
MUX_SEL_AUD1, 0, 1),
};
-static struct samsung_div_clock aud_div_clks[] __initdata = {
+static const struct samsung_div_clock aud_div_clks[] __initconst = {
/* DIV_AUD0 */
DIV(CLK_DIV_ATCLK_AUD, "div_atclk_aud", "div_aud_ca5", DIV_AUD0,
12, 4),
@@ -2915,7 +2925,7 @@ static struct samsung_div_clock aud_div_clks[] __initdata = {
DIV_AUD1, 0, 4),
};
-static struct samsung_gate_clock aud_gate_clks[] __initdata = {
+static const struct samsung_gate_clock aud_gate_clks[] __initconst = {
/* ENABLE_ACLK_AUD */
GATE(CLK_ACLK_INTR_CTRL, "aclk_intr_ctrl", "div_aclk_aud",
ENABLE_ACLK_AUD, 12, 0, 0),
@@ -2962,7 +2972,7 @@ static struct samsung_gate_clock aud_gate_clks[] __initdata = {
/* ENABLE_SCLK_AUD0 */
GATE(CLK_ATCLK_AUD, "atclk_aud", "div_atclk_aud", ENABLE_SCLK_AUD0,
- 2, 0, 0),
+ 2, CLK_IGNORE_UNUSED, 0),
GATE(CLK_PCLK_DBG_AUD, "pclk_dbg_aud", "div_pclk_dbg_aud",
ENABLE_SCLK_AUD0, 1, 0, 0),
GATE(CLK_SCLK_AUD_CA5, "sclk_aud_ca5", "div_aud_ca5", ENABLE_SCLK_AUD0,
@@ -2976,7 +2986,7 @@ static struct samsung_gate_clock aud_gate_clks[] __initdata = {
GATE(CLK_SCLK_AUD_SLIMBUS, "sclk_aud_slimbus", "div_sclk_aud_slimbus",
ENABLE_SCLK_AUD1, 4, 0, 0),
GATE(CLK_SCLK_AUD_UART, "sclk_aud_uart", "div_sclk_aud_uart",
- ENABLE_SCLK_AUD1, 3, 0, 0),
+ ENABLE_SCLK_AUD1, 3, CLK_IGNORE_UNUSED, 0),
GATE(CLK_SCLK_AUD_PCM, "sclk_aud_pcm", "div_sclk_aud_pcm",
ENABLE_SCLK_AUD1, 2, 0, 0),
GATE(CLK_SCLK_I2S_BCLK, "sclk_i2s_bclk", "ioclk_i2s_bclk",
@@ -2985,7 +2995,7 @@ static struct samsung_gate_clock aud_gate_clks[] __initdata = {
ENABLE_SCLK_AUD1, 0, CLK_IGNORE_UNUSED, 0),
};
-static struct samsung_cmu_info aud_cmu_info __initdata = {
+static const struct samsung_cmu_info aud_cmu_info __initconst = {
.mux_clks = aud_mux_clks,
.nr_mux_clks = ARRAY_SIZE(aud_mux_clks),
.div_clks = aud_div_clks,
@@ -3031,24 +3041,24 @@ PNAME(mout_aclk_bus2_400_p) = { "oscclk", "aclk_bus2_400", };
ENABLE_IP_BUS0, \
ENABLE_IP_BUS1
-static unsigned long bus01_clk_regs[] __initdata = {
+static const unsigned long bus01_clk_regs[] __initconst = {
CMU_BUS_COMMON_CLK_REGS,
};
-static unsigned long bus2_clk_regs[] __initdata = {
+static const unsigned long bus2_clk_regs[] __initconst = {
MUX_SEL_BUS2,
MUX_ENABLE_BUS2,
CMU_BUS_COMMON_CLK_REGS,
};
-static struct samsung_div_clock bus0_div_clks[] __initdata = {
+static const struct samsung_div_clock bus0_div_clks[] __initconst = {
/* DIV_BUS0 */
DIV(CLK_DIV_PCLK_BUS_133, "div_pclk_bus0_133", "aclk_bus0_400",
DIV_BUS, 0, 3),
};
/* CMU_BUS0 clocks */
-static struct samsung_gate_clock bus0_gate_clks[] __initdata = {
+static const struct samsung_gate_clock bus0_gate_clks[] __initconst = {
/* ENABLE_ACLK_BUS0 */
GATE(CLK_ACLK_AHB2APB_BUSP, "aclk_ahb2apb_bus0p", "div_pclk_bus0_133",
ENABLE_ACLK_BUS, 4, CLK_IGNORE_UNUSED, 0),
@@ -3067,13 +3077,13 @@ static struct samsung_gate_clock bus0_gate_clks[] __initdata = {
};
/* CMU_BUS1 clocks */
-static struct samsung_div_clock bus1_div_clks[] __initdata = {
+static const struct samsung_div_clock bus1_div_clks[] __initconst = {
/* DIV_BUS1 */
DIV(CLK_DIV_PCLK_BUS_133, "div_pclk_bus1_133", "aclk_bus1_400",
DIV_BUS, 0, 3),
};
-static struct samsung_gate_clock bus1_gate_clks[] __initdata = {
+static const struct samsung_gate_clock bus1_gate_clks[] __initconst = {
/* ENABLE_ACLK_BUS1 */
GATE(CLK_ACLK_AHB2APB_BUSP, "aclk_ahb2apb_bus1p", "div_pclk_bus1_133",
ENABLE_ACLK_BUS, 4, CLK_IGNORE_UNUSED, 0),
@@ -3092,19 +3102,19 @@ static struct samsung_gate_clock bus1_gate_clks[] __initdata = {
};
/* CMU_BUS2 clocks */
-static struct samsung_mux_clock bus2_mux_clks[] __initdata = {
+static const struct samsung_mux_clock bus2_mux_clks[] __initconst = {
/* MUX_SEL_BUS2 */
MUX(CLK_MOUT_ACLK_BUS2_400_USER, "mout_aclk_bus2_400_user",
mout_aclk_bus2_400_p, MUX_SEL_BUS2, 0, 1),
};
-static struct samsung_div_clock bus2_div_clks[] __initdata = {
+static const struct samsung_div_clock bus2_div_clks[] __initconst = {
/* DIV_BUS2 */
DIV(CLK_DIV_PCLK_BUS_133, "div_pclk_bus2_133",
"mout_aclk_bus2_400_user", DIV_BUS, 0, 3),
};
-static struct samsung_gate_clock bus2_gate_clks[] __initdata = {
+static const struct samsung_gate_clock bus2_gate_clks[] __initconst = {
/* ENABLE_ACLK_BUS2 */
GATE(CLK_ACLK_AHB2APB_BUSP, "aclk_ahb2apb_bus2p", "div_pclk_bus2_133",
ENABLE_ACLK_BUS, 3, CLK_IGNORE_UNUSED, 0),
@@ -3133,19 +3143,19 @@ static struct samsung_gate_clock bus2_gate_clks[] __initdata = {
.nr_gate_clks = ARRAY_SIZE(bus##id##_gate_clks), \
.nr_clk_ids = BUSx_NR_CLK
-static struct samsung_cmu_info bus0_cmu_info __initdata = {
+static const struct samsung_cmu_info bus0_cmu_info __initconst = {
CMU_BUS_INFO_CLKS(0),
.clk_regs = bus01_clk_regs,
.nr_clk_regs = ARRAY_SIZE(bus01_clk_regs),
};
-static struct samsung_cmu_info bus1_cmu_info __initdata = {
+static const struct samsung_cmu_info bus1_cmu_info __initconst = {
CMU_BUS_INFO_CLKS(1),
.clk_regs = bus01_clk_regs,
.nr_clk_regs = ARRAY_SIZE(bus01_clk_regs),
};
-static struct samsung_cmu_info bus2_cmu_info __initdata = {
+static const struct samsung_cmu_info bus2_cmu_info __initconst = {
CMU_BUS_INFO_CLKS(2),
.mux_clks = bus2_mux_clks,
.nr_mux_clks = ARRAY_SIZE(bus2_mux_clks),
@@ -3189,7 +3199,7 @@ exynos5433_cmu_bus_init(2);
#define CLKOUT_CMU_G3D_DIV_STAT 0x0c04
#define CLK_STOPCTRL 0x1000
-static unsigned long g3d_clk_regs[] __initdata = {
+static const unsigned long g3d_clk_regs[] __initconst = {
G3D_PLL_LOCK,
G3D_PLL_CON0,
G3D_PLL_CON1,
@@ -3212,12 +3222,12 @@ static unsigned long g3d_clk_regs[] __initdata = {
PNAME(mout_aclk_g3d_400_p) = { "mout_g3d_pll", "aclk_g3d_400", };
PNAME(mout_g3d_pll_p) = { "oscclk", "fout_g3d_pll", };
-static struct samsung_pll_clock g3d_pll_clks[] __initdata = {
+static const struct samsung_pll_clock g3d_pll_clks[] __initconst = {
PLL(pll_35xx, CLK_FOUT_G3D_PLL, "fout_g3d_pll", "oscclk",
G3D_PLL_LOCK, G3D_PLL_CON0, exynos5443_pll_rates),
};
-static struct samsung_mux_clock g3d_mux_clks[] __initdata = {
+static const struct samsung_mux_clock g3d_mux_clks[] __initconst = {
/* MUX_SEL_G3D */
MUX_F(CLK_MOUT_ACLK_G3D_400, "mout_aclk_g3d_400", mout_aclk_g3d_400_p,
MUX_SEL_G3D, 8, 1, CLK_SET_RATE_PARENT, 0),
@@ -3225,7 +3235,7 @@ static struct samsung_mux_clock g3d_mux_clks[] __initdata = {
MUX_SEL_G3D, 0, 1, CLK_SET_RATE_PARENT, 0),
};
-static struct samsung_div_clock g3d_div_clks[] __initdata = {
+static const struct samsung_div_clock g3d_div_clks[] __initconst = {
/* DIV_G3D */
DIV(CLK_DIV_SCLK_HPM_G3D, "div_sclk_hpm_g3d", "mout_g3d_pll", DIV_G3D,
8, 2),
@@ -3235,7 +3245,7 @@ static struct samsung_div_clock g3d_div_clks[] __initdata = {
0, 3, CLK_SET_RATE_PARENT, 0),
};
-static struct samsung_gate_clock g3d_gate_clks[] __initdata = {
+static const struct samsung_gate_clock g3d_gate_clks[] __initconst = {
/* ENABLE_ACLK_G3D */
GATE(CLK_ACLK_BTS_G3D1, "aclk_bts_g3d1", "div_aclk_g3d",
ENABLE_ACLK_G3D, 7, 0, 0),
@@ -3269,7 +3279,7 @@ static struct samsung_gate_clock g3d_gate_clks[] __initdata = {
ENABLE_SCLK_G3D, 0, 0, 0),
};
-static struct samsung_cmu_info g3d_cmu_info __initdata = {
+static const struct samsung_cmu_info g3d_cmu_info __initconst = {
.pll_clks = g3d_pll_clks,
.nr_pll_clks = ARRAY_SIZE(g3d_pll_clks),
.mux_clks = g3d_mux_clks,
@@ -3310,7 +3320,7 @@ CLK_OF_DECLARE(exynos5433_cmu_g3d, "samsung,exynos5433-cmu-g3d",
#define ENABLE_IP_GSCL_SECURE_SMMU_GSCL1 0x0b0c
#define ENABLE_IP_GSCL_SECURE_SMMU_GSCL2 0x0b10
-static unsigned long gscl_clk_regs[] __initdata = {
+static const unsigned long gscl_clk_regs[] __initconst = {
MUX_SEL_GSCL,
MUX_ENABLE_GSCL,
ENABLE_ACLK_GSCL,
@@ -3332,7 +3342,7 @@ static unsigned long gscl_clk_regs[] __initdata = {
PNAME(aclk_gscl_111_user_p) = { "oscclk", "aclk_gscl_111", };
PNAME(aclk_gscl_333_user_p) = { "oscclk", "aclk_gscl_333", };
-static struct samsung_mux_clock gscl_mux_clks[] __initdata = {
+static const struct samsung_mux_clock gscl_mux_clks[] __initconst = {
/* MUX_SEL_GSCL */
MUX(CLK_MOUT_ACLK_GSCL_111_USER, "mout_aclk_gscl_111_user",
aclk_gscl_111_user_p, MUX_SEL_GSCL, 4, 1),
@@ -3340,7 +3350,7 @@ static struct samsung_mux_clock gscl_mux_clks[] __initdata = {
aclk_gscl_333_user_p, MUX_SEL_GSCL, 0, 1),
};
-static struct samsung_gate_clock gscl_gate_clks[] __initdata = {
+static const struct samsung_gate_clock gscl_gate_clks[] __initconst = {
/* ENABLE_ACLK_GSCL */
GATE(CLK_ACLK_BTS_GSCL2, "aclk_bts_gscl2", "mout_aclk_gscl_333_user",
ENABLE_ACLK_GSCL, 11, 0, 0),
@@ -3356,9 +3366,11 @@ static struct samsung_gate_clock gscl_gate_clks[] __initdata = {
GATE(CLK_ACLK_GSCLNP_111, "aclk_gsclnp_111", "mout_aclk_gscl_111_user",
ENABLE_ACLK_GSCL, 6, CLK_IGNORE_UNUSED, 0),
GATE(CLK_ACLK_GSCLRTND_333, "aclk_gsclrtnd_333",
- "mout_aclk_gscl_333_user", ENABLE_ACLK_GSCL, 5, 0, 0),
+ "mout_aclk_gscl_333_user", ENABLE_ACLK_GSCL, 5,
+ CLK_IGNORE_UNUSED, 0),
GATE(CLK_ACLK_GSCLBEND_333, "aclk_gsclbend_333",
- "mout_aclk_gscl_333_user", ENABLE_ACLK_GSCL, 4, 0, 0),
+ "mout_aclk_gscl_333_user", ENABLE_ACLK_GSCL, 4,
+ CLK_IGNORE_UNUSED, 0),
GATE(CLK_ACLK_GSD, "aclk_gsd", "mout_aclk_gscl_333_user",
ENABLE_ACLK_GSCL, 3, 0, 0),
GATE(CLK_ACLK_GSCL2, "aclk_gscl2", "mout_aclk_gscl_333_user",
@@ -3412,7 +3424,7 @@ static struct samsung_gate_clock gscl_gate_clks[] __initdata = {
ENABLE_PCLK_GSCL_SECURE_SMMU_GSCL2, 0, 0, 0),
};
-static struct samsung_cmu_info gscl_cmu_info __initdata = {
+static const struct samsung_cmu_info gscl_cmu_info __initconst = {
.mux_clks = gscl_mux_clks,
.nr_mux_clks = ARRAY_SIZE(gscl_mux_clks),
.gate_clks = gscl_gate_clks,
@@ -3465,7 +3477,7 @@ CLK_OF_DECLARE(exynos5433_cmu_gscl, "samsung,exynos5433-cmu-gscl",
#define APOLLO_INTR_SPREAD_USE_STANDBYWFI 0x1084
#define APOLLO_INTR_SPREAD_BLOCKING_DURATION 0x1088
-static unsigned long apollo_clk_regs[] __initdata = {
+static const unsigned long apollo_clk_regs[] __initconst = {
APOLLO_PLL_LOCK,
APOLLO_PLL_CON0,
APOLLO_PLL_CON1,
@@ -3500,15 +3512,16 @@ PNAME(mout_bus_pll_apollo_user_p) = { "oscclk", "sclk_bus_pll_apollo", };
PNAME(mout_apollo_p) = { "mout_apollo_pll",
"mout_bus_pll_apollo_user", };
-static struct samsung_pll_clock apollo_pll_clks[] __initdata = {
+static const struct samsung_pll_clock apollo_pll_clks[] __initconst = {
PLL(pll_35xx, CLK_FOUT_APOLLO_PLL, "fout_apollo_pll", "oscclk",
APOLLO_PLL_LOCK, APOLLO_PLL_CON0, exynos5443_pll_rates),
};
-static struct samsung_mux_clock apollo_mux_clks[] __initdata = {
+static const struct samsung_mux_clock apollo_mux_clks[] __initconst = {
/* MUX_SEL_APOLLO0 */
MUX_F(CLK_MOUT_APOLLO_PLL, "mout_apollo_pll", mout_apollo_pll_p,
- MUX_SEL_APOLLO0, 0, 1, CLK_SET_RATE_PARENT, 0),
+ MUX_SEL_APOLLO0, 0, 1, CLK_SET_RATE_PARENT |
+ CLK_RECALC_NEW_RATES, 0),
/* MUX_SEL_APOLLO1 */
MUX(CLK_MOUT_BUS_PLL_APOLLO_USER, "mout_bus_pll_apollo_user",
@@ -3519,7 +3532,7 @@ static struct samsung_mux_clock apollo_mux_clks[] __initdata = {
0, 1, CLK_SET_RATE_PARENT, 0),
};
-static struct samsung_div_clock apollo_div_clks[] __initdata = {
+static const struct samsung_div_clock apollo_div_clks[] __initconst = {
/* DIV_APOLLO0 */
DIV_F(CLK_DIV_CNTCLK_APOLLO, "div_cntclk_apollo", "div_apollo2",
DIV_APOLLO0, 24, 3, CLK_GET_RATE_NOCACHE,
@@ -3550,7 +3563,7 @@ static struct samsung_div_clock apollo_div_clks[] __initdata = {
CLK_DIVIDER_READ_ONLY),
};
-static struct samsung_gate_clock apollo_gate_clks[] __initdata = {
+static const struct samsung_gate_clock apollo_gate_clks[] __initconst = {
/* ENABLE_ACLK_APOLLO */
GATE(CLK_ACLK_ASATBSLV_APOLLO_3_CSSYS, "aclk_asatbslv_apollo_3_cssys",
"div_atclk_apollo", ENABLE_ACLK_APOLLO,
@@ -3589,28 +3602,64 @@ static struct samsung_gate_clock apollo_gate_clks[] __initdata = {
ENABLE_SCLK_APOLLO, 3, CLK_IGNORE_UNUSED, 0),
GATE(CLK_SCLK_HPM_APOLLO, "sclk_hpm_apollo", "div_sclk_hpm_apollo",
ENABLE_SCLK_APOLLO, 1, CLK_IGNORE_UNUSED, 0),
- GATE(CLK_SCLK_APOLLO, "sclk_apollo", "div_apollo2",
- ENABLE_SCLK_APOLLO, 0,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0),
};
-static struct samsung_cmu_info apollo_cmu_info __initdata = {
- .pll_clks = apollo_pll_clks,
- .nr_pll_clks = ARRAY_SIZE(apollo_pll_clks),
- .mux_clks = apollo_mux_clks,
- .nr_mux_clks = ARRAY_SIZE(apollo_mux_clks),
- .div_clks = apollo_div_clks,
- .nr_div_clks = ARRAY_SIZE(apollo_div_clks),
- .gate_clks = apollo_gate_clks,
- .nr_gate_clks = ARRAY_SIZE(apollo_gate_clks),
- .nr_clk_ids = APOLLO_NR_CLK,
- .clk_regs = apollo_clk_regs,
- .nr_clk_regs = ARRAY_SIZE(apollo_clk_regs),
+#define E5433_APOLLO_DIV0(cntclk, pclk_dbg, atclk, pclk, aclk) \
+ (((cntclk) << 24) | ((pclk_dbg) << 20) | ((atclk) << 16) | \
+ ((pclk) << 12) | ((aclk) << 8))
+
+#define E5433_APOLLO_DIV1(hpm, copy) \
+ (((hpm) << 4) | ((copy) << 0))
+
+static const struct exynos_cpuclk_cfg_data exynos5433_apolloclk_d[] __initconst = {
+ { 1300000, E5433_APOLLO_DIV0(3, 7, 7, 7, 2), E5433_APOLLO_DIV1(7, 1), },
+ { 1200000, E5433_APOLLO_DIV0(3, 7, 7, 7, 2), E5433_APOLLO_DIV1(7, 1), },
+ { 1100000, E5433_APOLLO_DIV0(3, 7, 7, 7, 2), E5433_APOLLO_DIV1(7, 1), },
+ { 1000000, E5433_APOLLO_DIV0(3, 7, 7, 7, 2), E5433_APOLLO_DIV1(7, 1), },
+ { 900000, E5433_APOLLO_DIV0(3, 7, 7, 7, 2), E5433_APOLLO_DIV1(7, 1), },
+ { 800000, E5433_APOLLO_DIV0(3, 7, 7, 7, 2), E5433_APOLLO_DIV1(7, 1), },
+ { 700000, E5433_APOLLO_DIV0(3, 7, 7, 7, 2), E5433_APOLLO_DIV1(7, 1), },
+ { 600000, E5433_APOLLO_DIV0(3, 7, 7, 7, 1), E5433_APOLLO_DIV1(7, 1), },
+ { 500000, E5433_APOLLO_DIV0(3, 7, 7, 7, 1), E5433_APOLLO_DIV1(7, 1), },
+ { 400000, E5433_APOLLO_DIV0(3, 7, 7, 7, 1), E5433_APOLLO_DIV1(7, 1), },
+ { 0 },
};
static void __init exynos5433_cmu_apollo_init(struct device_node *np)
{
- samsung_cmu_register_one(np, &apollo_cmu_info);
+ void __iomem *reg_base;
+ struct samsung_clk_provider *ctx;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base) {
+ panic("%s: failed to map registers\n", __func__);
+ return;
+ }
+
+ ctx = samsung_clk_init(np, reg_base, APOLLO_NR_CLK);
+ if (!ctx) {
+ panic("%s: unable to allocate ctx\n", __func__);
+ return;
+ }
+
+ samsung_clk_register_pll(ctx, apollo_pll_clks,
+ ARRAY_SIZE(apollo_pll_clks), reg_base);
+ samsung_clk_register_mux(ctx, apollo_mux_clks,
+ ARRAY_SIZE(apollo_mux_clks));
+ samsung_clk_register_div(ctx, apollo_div_clks,
+ ARRAY_SIZE(apollo_div_clks));
+ samsung_clk_register_gate(ctx, apollo_gate_clks,
+ ARRAY_SIZE(apollo_gate_clks));
+
+ exynos_register_cpu_clock(ctx, CLK_SCLK_APOLLO, "apolloclk",
+ mout_apollo_p[0], mout_apollo_p[1], 0x200,
+ exynos5433_apolloclk_d, ARRAY_SIZE(exynos5433_apolloclk_d),
+ CLK_CPU_HAS_E5433_REGS_LAYOUT);
+
+ samsung_clk_sleep_init(reg_base, apollo_clk_regs,
+ ARRAY_SIZE(apollo_clk_regs));
+
+ samsung_clk_of_add_provider(np, ctx);
}
CLK_OF_DECLARE(exynos5433_cmu_apollo, "samsung,exynos5433-cmu-apollo",
exynos5433_cmu_apollo_init);
@@ -3651,7 +3700,7 @@ CLK_OF_DECLARE(exynos5433_cmu_apollo, "samsung,exynos5433-cmu-apollo",
#define ATLAS_INTR_SPREAD_USE_STANDBYWFI 0x1084
#define ATLAS_INTR_SPREAD_BLOCKING_DURATION 0x1088
-static unsigned long atlas_clk_regs[] __initdata = {
+static const unsigned long atlas_clk_regs[] __initconst = {
ATLAS_PLL_LOCK,
ATLAS_PLL_CON0,
ATLAS_PLL_CON1,
@@ -3686,15 +3735,16 @@ PNAME(mout_bus_pll_atlas_user_p) = { "oscclk", "sclk_bus_pll_atlas", };
PNAME(mout_atlas_p) = { "mout_atlas_pll",
"mout_bus_pll_atlas_user", };
-static struct samsung_pll_clock atlas_pll_clks[] __initdata = {
+static const struct samsung_pll_clock atlas_pll_clks[] __initconst = {
PLL(pll_35xx, CLK_FOUT_ATLAS_PLL, "fout_atlas_pll", "oscclk",
ATLAS_PLL_LOCK, ATLAS_PLL_CON0, exynos5443_pll_rates),
};
-static struct samsung_mux_clock atlas_mux_clks[] __initdata = {
+static const struct samsung_mux_clock atlas_mux_clks[] __initconst = {
/* MUX_SEL_ATLAS0 */
MUX_F(CLK_MOUT_ATLAS_PLL, "mout_atlas_pll", mout_atlas_pll_p,
- MUX_SEL_ATLAS0, 0, 1, CLK_SET_RATE_PARENT, 0),
+ MUX_SEL_ATLAS0, 0, 1, CLK_SET_RATE_PARENT |
+ CLK_RECALC_NEW_RATES, 0),
/* MUX_SEL_ATLAS1 */
MUX(CLK_MOUT_BUS_PLL_ATLAS_USER, "mout_bus_pll_atlas_user",
@@ -3705,7 +3755,7 @@ static struct samsung_mux_clock atlas_mux_clks[] __initdata = {
0, 1, CLK_SET_RATE_PARENT, 0),
};
-static struct samsung_div_clock atlas_div_clks[] __initdata = {
+static const struct samsung_div_clock atlas_div_clks[] __initconst = {
/* DIV_ATLAS0 */
DIV_F(CLK_DIV_CNTCLK_ATLAS, "div_cntclk_atlas", "div_atlas2",
DIV_ATLAS0, 24, 3, CLK_GET_RATE_NOCACHE,
@@ -3736,7 +3786,7 @@ static struct samsung_div_clock atlas_div_clks[] __initdata = {
CLK_DIVIDER_READ_ONLY),
};
-static struct samsung_gate_clock atlas_gate_clks[] __initdata = {
+static const struct samsung_gate_clock atlas_gate_clks[] __initconst = {
/* ENABLE_ACLK_ATLAS */
GATE(CLK_ACLK_ATB_AUD_CSSYS, "aclk_atb_aud_cssys",
"div_atclk_atlas", ENABLE_ACLK_ATLAS,
@@ -3801,28 +3851,69 @@ static struct samsung_gate_clock atlas_gate_clks[] __initdata = {
ENABLE_SCLK_ATLAS, 2, CLK_IGNORE_UNUSED, 0),
GATE(CLK_ATCLK, "atclk", "div_atclk_atlas",
ENABLE_SCLK_ATLAS, 1, CLK_IGNORE_UNUSED, 0),
- GATE(CLK_SCLK_ATLAS, "sclk_atlas", "div_atlas2",
- ENABLE_SCLK_ATLAS, 0,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0),
};
-static struct samsung_cmu_info atlas_cmu_info __initdata = {
- .pll_clks = atlas_pll_clks,
- .nr_pll_clks = ARRAY_SIZE(atlas_pll_clks),
- .mux_clks = atlas_mux_clks,
- .nr_mux_clks = ARRAY_SIZE(atlas_mux_clks),
- .div_clks = atlas_div_clks,
- .nr_div_clks = ARRAY_SIZE(atlas_div_clks),
- .gate_clks = atlas_gate_clks,
- .nr_gate_clks = ARRAY_SIZE(atlas_gate_clks),
- .nr_clk_ids = ATLAS_NR_CLK,
- .clk_regs = atlas_clk_regs,
- .nr_clk_regs = ARRAY_SIZE(atlas_clk_regs),
+#define E5433_ATLAS_DIV0(cntclk, pclk_dbg, atclk, pclk, aclk) \
+ (((cntclk) << 24) | ((pclk_dbg) << 20) | ((atclk) << 16) | \
+ ((pclk) << 12) | ((aclk) << 8))
+
+#define E5433_ATLAS_DIV1(hpm, copy) \
+ (((hpm) << 4) | ((copy) << 0))
+
+static const struct exynos_cpuclk_cfg_data exynos5433_atlasclk_d[] __initconst = {
+ { 1900000, E5433_ATLAS_DIV0(7, 7, 7, 7, 4), E5433_ATLAS_DIV1(7, 1), },
+ { 1800000, E5433_ATLAS_DIV0(7, 7, 7, 7, 4), E5433_ATLAS_DIV1(7, 1), },
+ { 1700000, E5433_ATLAS_DIV0(7, 7, 7, 7, 4), E5433_ATLAS_DIV1(7, 1), },
+ { 1600000, E5433_ATLAS_DIV0(7, 7, 7, 7, 4), E5433_ATLAS_DIV1(7, 1), },
+ { 1500000, E5433_ATLAS_DIV0(7, 7, 7, 7, 3), E5433_ATLAS_DIV1(7, 1), },
+ { 1400000, E5433_ATLAS_DIV0(7, 7, 7, 7, 3), E5433_ATLAS_DIV1(7, 1), },
+ { 1300000, E5433_ATLAS_DIV0(7, 7, 7, 7, 3), E5433_ATLAS_DIV1(7, 1), },
+ { 1200000, E5433_ATLAS_DIV0(7, 7, 7, 7, 3), E5433_ATLAS_DIV1(7, 1), },
+ { 1100000, E5433_ATLAS_DIV0(7, 7, 7, 7, 3), E5433_ATLAS_DIV1(7, 1), },
+ { 1000000, E5433_ATLAS_DIV0(7, 7, 7, 7, 3), E5433_ATLAS_DIV1(7, 1), },
+ { 900000, E5433_ATLAS_DIV0(7, 7, 7, 7, 2), E5433_ATLAS_DIV1(7, 1), },
+ { 800000, E5433_ATLAS_DIV0(7, 7, 7, 7, 2), E5433_ATLAS_DIV1(7, 1), },
+ { 700000, E5433_ATLAS_DIV0(7, 7, 7, 7, 2), E5433_ATLAS_DIV1(7, 1), },
+ { 600000, E5433_ATLAS_DIV0(7, 7, 7, 7, 2), E5433_ATLAS_DIV1(7, 1), },
+ { 500000, E5433_ATLAS_DIV0(7, 7, 7, 7, 2), E5433_ATLAS_DIV1(7, 1), },
+ { 0 },
};
static void __init exynos5433_cmu_atlas_init(struct device_node *np)
{
- samsung_cmu_register_one(np, &atlas_cmu_info);
+ void __iomem *reg_base;
+ struct samsung_clk_provider *ctx;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base) {
+ panic("%s: failed to map registers\n", __func__);
+ return;
+ }
+
+ ctx = samsung_clk_init(np, reg_base, ATLAS_NR_CLK);
+ if (!ctx) {
+ panic("%s: unable to allocate ctx\n", __func__);
+ return;
+ }
+
+ samsung_clk_register_pll(ctx, atlas_pll_clks,
+ ARRAY_SIZE(atlas_pll_clks), reg_base);
+ samsung_clk_register_mux(ctx, atlas_mux_clks,
+ ARRAY_SIZE(atlas_mux_clks));
+ samsung_clk_register_div(ctx, atlas_div_clks,
+ ARRAY_SIZE(atlas_div_clks));
+ samsung_clk_register_gate(ctx, atlas_gate_clks,
+ ARRAY_SIZE(atlas_gate_clks));
+
+ exynos_register_cpu_clock(ctx, CLK_SCLK_ATLAS, "atlasclk",
+ mout_atlas_p[0], mout_atlas_p[1], 0x200,
+ exynos5433_atlasclk_d, ARRAY_SIZE(exynos5433_atlasclk_d),
+ CLK_CPU_HAS_E5433_REGS_LAYOUT);
+
+ samsung_clk_sleep_init(reg_base, atlas_clk_regs,
+ ARRAY_SIZE(atlas_clk_regs));
+
+ samsung_clk_of_add_provider(np, ctx);
}
CLK_OF_DECLARE(exynos5433_cmu_atlas, "samsung,exynos5433-cmu-atlas",
exynos5433_cmu_atlas_init);
@@ -3853,7 +3944,7 @@ CLK_OF_DECLARE(exynos5433_cmu_atlas, "samsung,exynos5433-cmu-atlas",
#define ENABLE_IP_MSCL_SECURE_SMMU_M2MSCALER1 0x0b0c
#define ENABLE_IP_MSCL_SECURE_SMMU_JPEG 0x0b10
-static unsigned long mscl_clk_regs[] __initdata = {
+static const unsigned long mscl_clk_regs[] __initconst = {
MUX_SEL_MSCL0,
MUX_SEL_MSCL1,
MUX_ENABLE_MSCL0,
@@ -3881,7 +3972,7 @@ PNAME(mout_aclk_mscl_400_user_p) = { "oscclk", "aclk_mscl_400", };
PNAME(mout_sclk_jpeg_p) = { "mout_sclk_jpeg_user",
"mout_aclk_mscl_400_user", };
-static struct samsung_mux_clock mscl_mux_clks[] __initdata = {
+static const struct samsung_mux_clock mscl_mux_clks[] __initconst = {
/* MUX_SEL_MSCL0 */
MUX(CLK_MOUT_SCLK_JPEG_USER, "mout_sclk_jpeg_user",
mout_sclk_jpeg_user_p, MUX_SEL_MSCL0, 4, 1),
@@ -3893,13 +3984,13 @@ static struct samsung_mux_clock mscl_mux_clks[] __initdata = {
MUX_SEL_MSCL1, 0, 1),
};
-static struct samsung_div_clock mscl_div_clks[] __initdata = {
+static const struct samsung_div_clock mscl_div_clks[] __initconst = {
/* DIV_MSCL */
DIV(CLK_DIV_PCLK_MSCL, "div_pclk_mscl", "mout_aclk_mscl_400_user",
DIV_MSCL, 0, 3),
};
-static struct samsung_gate_clock mscl_gate_clks[] __initdata = {
+static const struct samsung_gate_clock mscl_gate_clks[] __initconst = {
/* ENABLE_ACLK_MSCL */
GATE(CLK_ACLK_BTS_JPEG, "aclk_bts_jpeg", "mout_aclk_mscl_400_user",
ENABLE_ACLK_MSCL, 9, 0, 0),
@@ -3977,7 +4068,7 @@ static struct samsung_gate_clock mscl_gate_clks[] __initdata = {
CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0),
};
-static struct samsung_cmu_info mscl_cmu_info __initdata = {
+static const struct samsung_cmu_info mscl_cmu_info __initconst = {
.mux_clks = mscl_mux_clks,
.nr_mux_clks = ARRAY_SIZE(mscl_mux_clks),
.div_clks = mscl_div_clks,
@@ -4012,7 +4103,7 @@ CLK_OF_DECLARE(exynos5433_cmu_mscl, "samsung,exynos5433-cmu-mscl",
#define ENABLE_IP_MFC1 0x0b04
#define ENABLE_IP_MFC_SECURE_SMMU_MFC 0x0b08
-static unsigned long mfc_clk_regs[] __initdata = {
+static const unsigned long mfc_clk_regs[] __initconst = {
MUX_SEL_MFC,
MUX_ENABLE_MFC,
DIV_MFC,
@@ -4027,19 +4118,19 @@ static unsigned long mfc_clk_regs[] __initdata = {
PNAME(mout_aclk_mfc_400_user_p) = { "oscclk", "aclk_mfc_400", };
-static struct samsung_mux_clock mfc_mux_clks[] __initdata = {
+static const struct samsung_mux_clock mfc_mux_clks[] __initconst = {
/* MUX_SEL_MFC */
MUX(CLK_MOUT_ACLK_MFC_400_USER, "mout_aclk_mfc_400_user",
mout_aclk_mfc_400_user_p, MUX_SEL_MFC, 0, 0),
};
-static struct samsung_div_clock mfc_div_clks[] __initdata = {
+static const struct samsung_div_clock mfc_div_clks[] __initconst = {
/* DIV_MFC */
DIV(CLK_DIV_PCLK_MFC, "div_pclk_mfc", "mout_aclk_mfc_400_user",
DIV_MFC, 0, 2),
};
-static struct samsung_gate_clock mfc_gate_clks[] __initdata = {
+static const struct samsung_gate_clock mfc_gate_clks[] __initconst = {
/* ENABLE_ACLK_MFC */
GATE(CLK_ACLK_BTS_MFC_1, "aclk_bts_mfc_1", "mout_aclk_mfc_400_user",
ENABLE_ACLK_MFC, 6, 0, 0),
@@ -4085,7 +4176,7 @@ static struct samsung_gate_clock mfc_gate_clks[] __initdata = {
0, CLK_IGNORE_UNUSED, 0),
};
-static struct samsung_cmu_info mfc_cmu_info __initdata = {
+static const struct samsung_cmu_info mfc_cmu_info __initconst = {
.mux_clks = mfc_mux_clks,
.nr_mux_clks = ARRAY_SIZE(mfc_mux_clks),
.div_clks = mfc_div_clks,
@@ -4120,7 +4211,7 @@ CLK_OF_DECLARE(exynos5433_cmu_mfc, "samsung,exynos5433-cmu-mfc",
#define ENABLE_IP_HEVC1 0x0b04
#define ENABLE_IP_HEVC_SECURE_SMMU_HEVC 0x0b08
-static unsigned long hevc_clk_regs[] __initdata = {
+static const unsigned long hevc_clk_regs[] __initconst = {
MUX_SEL_HEVC,
MUX_ENABLE_HEVC,
DIV_HEVC,
@@ -4135,19 +4226,19 @@ static unsigned long hevc_clk_regs[] __initdata = {
PNAME(mout_aclk_hevc_400_user_p) = { "oscclk", "aclk_hevc_400", };
-static struct samsung_mux_clock hevc_mux_clks[] __initdata = {
+static const struct samsung_mux_clock hevc_mux_clks[] __initconst = {
/* MUX_SEL_HEVC */
MUX(CLK_MOUT_ACLK_HEVC_400_USER, "mout_aclk_hevc_400_user",
mout_aclk_hevc_400_user_p, MUX_SEL_HEVC, 0, 0),
};
-static struct samsung_div_clock hevc_div_clks[] __initdata = {
+static const struct samsung_div_clock hevc_div_clks[] __initconst = {
/* DIV_HEVC */
DIV(CLK_DIV_PCLK_HEVC, "div_pclk_hevc", "mout_aclk_hevc_400_user",
DIV_HEVC, 0, 2),
};
-static struct samsung_gate_clock hevc_gate_clks[] __initdata = {
+static const struct samsung_gate_clock hevc_gate_clks[] __initconst = {
/* ENABLE_ACLK_HEVC */
GATE(CLK_ACLK_BTS_HEVC_1, "aclk_bts_hevc_1", "mout_aclk_hevc_400_user",
ENABLE_ACLK_HEVC, 6, 0, 0),
@@ -4195,7 +4286,7 @@ static struct samsung_gate_clock hevc_gate_clks[] __initdata = {
0, CLK_IGNORE_UNUSED, 0),
};
-static struct samsung_cmu_info hevc_cmu_info __initdata = {
+static const struct samsung_cmu_info hevc_cmu_info __initconst = {
.mux_clks = hevc_mux_clks,
.nr_mux_clks = ARRAY_SIZE(hevc_mux_clks),
.div_clks = hevc_div_clks,
@@ -4232,7 +4323,7 @@ CLK_OF_DECLARE(exynos5433_cmu_hevc, "samsung,exynos5433-cmu-hevc",
#define ENABLE_IP_ISP2 0x0b08
#define ENABLE_IP_ISP3 0x0b0c
-static unsigned long isp_clk_regs[] __initdata = {
+static const unsigned long isp_clk_regs[] __initconst = {
MUX_SEL_ISP,
MUX_ENABLE_ISP,
DIV_ISP,
@@ -4250,7 +4341,7 @@ static unsigned long isp_clk_regs[] __initdata = {
PNAME(mout_aclk_isp_dis_400_user_p) = { "oscclk", "aclk_isp_dis_400", };
PNAME(mout_aclk_isp_400_user_p) = { "oscclk", "aclk_isp_400", };
-static struct samsung_mux_clock isp_mux_clks[] __initdata = {
+static const struct samsung_mux_clock isp_mux_clks[] __initconst = {
/* MUX_SEL_ISP */
MUX(CLK_MOUT_ACLK_ISP_DIS_400_USER, "mout_aclk_isp_dis_400_user",
mout_aclk_isp_dis_400_user_p, MUX_SEL_ISP, 4, 0),
@@ -4258,7 +4349,7 @@ static struct samsung_mux_clock isp_mux_clks[] __initdata = {
mout_aclk_isp_400_user_p, MUX_SEL_ISP, 0, 0),
};
-static struct samsung_div_clock isp_div_clks[] __initdata = {
+static const struct samsung_div_clock isp_div_clks[] __initconst = {
/* DIV_ISP */
DIV(CLK_DIV_PCLK_ISP_DIS, "div_pclk_isp_dis",
"mout_aclk_isp_dis_400_user", DIV_ISP, 12, 3),
@@ -4270,7 +4361,7 @@ static struct samsung_div_clock isp_div_clks[] __initdata = {
"mout_aclk_isp_400_user", DIV_ISP, 0, 3),
};
-static struct samsung_gate_clock isp_gate_clks[] __initdata = {
+static const struct samsung_gate_clock isp_gate_clks[] __initconst = {
/* ENABLE_ACLK_ISP0 */
GATE(CLK_ACLK_ISP_D_GLUE, "aclk_isp_d_glue", "mout_aclk_isp_400_user",
ENABLE_ACLK_ISP0, 6, CLK_IGNORE_UNUSED, 0),
@@ -4448,7 +4539,7 @@ static struct samsung_gate_clock isp_gate_clks[] __initdata = {
0, CLK_IGNORE_UNUSED, 0),
};
-static struct samsung_cmu_info isp_cmu_info __initdata = {
+static const struct samsung_cmu_info isp_cmu_info __initconst = {
.mux_clks = isp_mux_clks,
.nr_mux_clks = ARRAY_SIZE(isp_mux_clks),
.div_clks = isp_div_clks,
@@ -4504,7 +4595,7 @@ CLK_OF_DECLARE(exynos5433_cmu_isp, "samsung,exynos5433-cmu-isp",
#define ENABLE_IP_CAM02 0X0b08
#define ENABLE_IP_CAM03 0X0b0C
-static unsigned long cam0_clk_regs[] __initdata = {
+static const unsigned long cam0_clk_regs[] __initconst = {
MUX_SEL_CAM00,
MUX_SEL_CAM01,
MUX_SEL_CAM02,
@@ -4588,14 +4679,14 @@ PNAME(mout_sclk_pixelasync_lite_c_init_a_p) = {
"mout_aclk_cam0_552_user",
"mout_aclk_cam0_400_user", };
-static struct samsung_fixed_rate_clock cam0_fixed_clks[] __initdata = {
+static const struct samsung_fixed_rate_clock cam0_fixed_clks[] __initconst = {
FRATE(CLK_PHYCLK_RXBYTEECLKHS0_S4_PHY, "phyclk_rxbyteclkhs0_s4_phy",
NULL, 0, 100000000),
FRATE(CLK_PHYCLK_RXBYTEECLKHS0_S2A_PHY, "phyclk_rxbyteclkhs0_s2a_phy",
NULL, 0, 100000000),
};
-static struct samsung_mux_clock cam0_mux_clks[] __initdata = {
+static const struct samsung_mux_clock cam0_mux_clks[] __initconst = {
/* MUX_SEL_CAM00 */
MUX(CLK_MOUT_ACLK_CAM0_333_USER, "mout_aclk_cam0_333_user",
mout_aclk_cam0_333_user_p, MUX_SEL_CAM00, 8, 1),
@@ -4669,7 +4760,7 @@ static struct samsung_mux_clock cam0_mux_clks[] __initdata = {
MUX_SEL_CAM04, 0, 1),
};
-static struct samsung_div_clock cam0_div_clks[] __initdata = {
+static const struct samsung_div_clock cam0_div_clks[] __initconst = {
/* DIV_CAM00 */
DIV(CLK_DIV_PCLK_CAM0_50, "div_pclk_cam0_50", "div_aclk_cam0_200",
DIV_CAM00, 8, 2),
@@ -4716,7 +4807,7 @@ static struct samsung_div_clock cam0_div_clks[] __initdata = {
"mout_sclk_pixelasync_lite_c_init_b", DIV_CAM03, 0, 3),
};
-static struct samsung_gate_clock cam0_gate_clks[] __initdata = {
+static const struct samsung_gate_clock cam0_gate_clks[] __initconst = {
/* ENABLE_ACLK_CAM00 */
GATE(CLK_ACLK_CSIS1, "aclk_csis1", "div_aclk_csis1", ENABLE_ACLK_CAM00,
6, 0, 0),
@@ -4923,7 +5014,7 @@ static struct samsung_gate_clock cam0_gate_clks[] __initdata = {
ENABLE_SCLK_CAM0, 0, 0, 0),
};
-static struct samsung_cmu_info cam0_cmu_info __initdata = {
+static const struct samsung_cmu_info cam0_cmu_info __initconst = {
.mux_clks = cam0_mux_clks,
.nr_mux_clks = ARRAY_SIZE(cam0_mux_clks),
.div_clks = cam0_div_clks,
@@ -4970,7 +5061,7 @@ CLK_OF_DECLARE(exynos5433_cmu_cam0, "samsung,exynos5433-cmu-cam0",
#define ENABLE_IP_CAM11 0X0b04
#define ENABLE_IP_CAM12 0X0b08
-static unsigned long cam1_clk_regs[] __initdata = {
+static const unsigned long cam1_clk_regs[] __initconst = {
MUX_SEL_CAM10,
MUX_SEL_CAM11,
MUX_SEL_CAM12,
@@ -5016,12 +5107,12 @@ PNAME(mout_aclk_lite_c_b_p) = { "mout_aclk_lite_c_a",
PNAME(mout_aclk_lite_c_a_p) = { "mout_aclk_cam1_552_user",
"mout_aclk_cam1_400_user", };
-static struct samsung_fixed_rate_clock cam1_fixed_clks[] __initdata = {
+static const struct samsung_fixed_rate_clock cam1_fixed_clks[] __initconst = {
FRATE(CLK_PHYCLK_RXBYTEECLKHS0_S2B, "phyclk_rxbyteclkhs0_s2b_phy", NULL,
0, 100000000),
};
-static struct samsung_mux_clock cam1_mux_clks[] __initdata = {
+static const struct samsung_mux_clock cam1_mux_clks[] __initconst = {
/* MUX_SEL_CAM10 */
MUX(CLK_MOUT_SCLK_ISP_UART_USER, "mout_sclk_isp_uart_user",
mout_sclk_isp_uart_user_p, MUX_SEL_CAM10, 20, 1),
@@ -5057,7 +5148,7 @@ static struct samsung_mux_clock cam1_mux_clks[] __initdata = {
MUX_SEL_CAM12, 0, 1),
};
-static struct samsung_div_clock cam1_div_clks[] __initdata = {
+static const struct samsung_div_clock cam1_div_clks[] __initconst = {
/* DIV_CAM10 */
DIV(CLK_DIV_SCLK_ISP_MPWM, "div_sclk_isp_mpwm",
"div_pclk_cam1_83", DIV_CAM10, 16, 2),
@@ -5081,7 +5172,7 @@ static struct samsung_div_clock cam1_div_clks[] __initdata = {
DIV_CAM11, 0, 3),
};
-static struct samsung_gate_clock cam1_gate_clks[] __initdata = {
+static const struct samsung_gate_clock cam1_gate_clks[] __initconst = {
/* ENABLE_ACLK_CAM10 */
GATE(CLK_ACLK_ISP_GIC, "aclk_isp_gic", "mout_aclk_cam1_333_user",
ENABLE_ACLK_CAM10, 4, 0, 0),
@@ -5296,7 +5387,7 @@ static struct samsung_gate_clock cam1_gate_clks[] __initdata = {
ENABLE_SCLK_CAM1, 0, 0, 0),
};
-static struct samsung_cmu_info cam1_cmu_info __initdata = {
+static const struct samsung_cmu_info cam1_cmu_info __initconst = {
.mux_clks = cam1_mux_clks,
.nr_mux_clks = ARRAY_SIZE(cam1_mux_clks),
.div_clks = cam1_div_clks,
diff --git a/drivers/clk/samsung/clk-exynos5440.c b/drivers/clk/samsung/clk-exynos5440.c
index c57cff1e1..a57d01b99 100644
--- a/drivers/clk/samsung/clk-exynos5440.c
+++ b/drivers/clk/samsung/clk-exynos5440.c
@@ -35,7 +35,7 @@ static struct samsung_fixed_rate_clock exynos5440_fixed_rate_ext_clks[] __initda
};
/* fixed rate clocks */
-static struct samsung_fixed_rate_clock exynos5440_fixed_rate_clks[] __initdata = {
+static const struct samsung_fixed_rate_clock exynos5440_fixed_rate_clks[] __initconst = {
FRATE(0, "ppll", NULL, 0, 1000000000),
FRATE(0, "usb_phy0", NULL, 0, 60000000),
FRATE(0, "usb_phy1", NULL, 0, 60000000),
@@ -44,26 +44,26 @@ static struct samsung_fixed_rate_clock exynos5440_fixed_rate_clks[] __initdata =
};
/* fixed factor clocks */
-static struct samsung_fixed_factor_clock exynos5440_fixed_factor_clks[] __initdata = {
+static const struct samsung_fixed_factor_clock exynos5440_fixed_factor_clks[] __initconst = {
FFACTOR(0, "div250", "ppll", 1, 4, 0),
FFACTOR(0, "div200", "ppll", 1, 5, 0),
FFACTOR(0, "div125", "div250", 1, 2, 0),
};
/* mux clocks */
-static struct samsung_mux_clock exynos5440_mux_clks[] __initdata = {
+static const struct samsung_mux_clock exynos5440_mux_clks[] __initconst = {
MUX(0, "mout_spi", mout_spi_p, MISC_DOUT1, 5, 1),
MUX_A(CLK_ARM_CLK, "arm_clk", mout_armclk_p,
CPU_CLK_STATUS, 0, 1, "armclk"),
};
/* divider clocks */
-static struct samsung_div_clock exynos5440_div_clks[] __initdata = {
+static const struct samsung_div_clock exynos5440_div_clks[] __initconst = {
DIV(CLK_SPI_BAUD, "div_spi", "mout_spi", MISC_DOUT1, 3, 2),
};
/* gate clocks */
-static struct samsung_gate_clock exynos5440_gate_clks[] __initdata = {
+static const struct samsung_gate_clock exynos5440_gate_clks[] __initconst = {
GATE(CLK_PB0_250, "pb0_250", "div250", CLKEN_OV_VAL, 3, 0, 0),
GATE(CLK_PR0_250, "pr0_250", "div250", CLKEN_OV_VAL, 4, 0, 0),
GATE(CLK_PR1_250, "pr1_250", "div250", CLKEN_OV_VAL, 5, 0, 0),
@@ -125,8 +125,6 @@ static void __init exynos5440_clk_init(struct device_node *np)
}
ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS);
- if (!ctx)
- panic("%s: unable to allocate context.\n", __func__);
samsung_clk_of_register_fixed_ext(ctx, exynos5440_fixed_rate_ext_clks,
ARRAY_SIZE(exynos5440_fixed_rate_ext_clks), ext_clk_match);
diff --git a/drivers/clk/samsung/clk-exynos7.c b/drivers/clk/samsung/clk-exynos7.c
index ad68d463b..5931a4140 100644
--- a/drivers/clk/samsung/clk-exynos7.c
+++ b/drivers/clk/samsung/clk-exynos7.c
@@ -36,7 +36,7 @@
#define ENABLE_ACLK_TOPC1 0x0804
#define ENABLE_SCLK_TOPC1 0x0A04
-static struct samsung_fixed_factor_clock topc_fixed_factor_clks[] __initdata = {
+static const struct samsung_fixed_factor_clock topc_fixed_factor_clks[] __initconst = {
FFACTOR(0, "ffac_topc_bus0_pll_div2", "mout_topc_bus0_pll", 1, 2, 0),
FFACTOR(0, "ffac_topc_bus0_pll_div4",
"ffac_topc_bus0_pll_div2", 1, 2, 0),
@@ -69,7 +69,7 @@ PNAME(mout_topc_mfc_pll_half_p) = { "mout_topc_mfc_pll",
PNAME(mout_topc_bus0_pll_out_p) = {"mout_topc_bus0_pll",
"ffac_topc_bus0_pll_div2"};
-static unsigned long topc_clk_regs[] __initdata = {
+static const unsigned long topc_clk_regs[] __initconst = {
CC_PLL_LOCK,
BUS0_PLL_LOCK,
BUS1_DPLL_LOCK,
@@ -89,7 +89,7 @@ static unsigned long topc_clk_regs[] __initdata = {
DIV_TOPC3,
};
-static struct samsung_mux_clock topc_mux_clks[] __initdata = {
+static const struct samsung_mux_clock topc_mux_clks[] __initconst = {
MUX(0, "mout_topc_bus0_pll", mout_topc_bus0_pll_ctrl_p,
MUX_SEL_TOPC0, 0, 1),
MUX(0, "mout_topc_bus1_pll", mout_topc_bus1_pll_ctrl_p,
@@ -118,7 +118,7 @@ static struct samsung_mux_clock topc_mux_clks[] __initdata = {
MUX(0, "mout_aclk_peris_66", mout_topc_group2, MUX_SEL_TOPC3, 24, 2),
};
-static struct samsung_div_clock topc_div_clks[] __initdata = {
+static const struct samsung_div_clock topc_div_clks[] __initconst = {
DIV(DOUT_ACLK_CCORE_133, "dout_aclk_ccore_133", "mout_aclk_ccore_133",
DIV_TOPC0, 4, 4),
@@ -139,14 +139,14 @@ static struct samsung_div_clock topc_div_clks[] __initdata = {
DIV_TOPC3, 28, 4),
};
-static struct samsung_pll_rate_table pll1460x_24mhz_tbl[] __initdata = {
+static const struct samsung_pll_rate_table pll1460x_24mhz_tbl[] __initconst = {
PLL_36XX_RATE(491520000, 20, 1, 0, 31457),
{},
};
-static struct samsung_gate_clock topc_gate_clks[] __initdata = {
+static const struct samsung_gate_clock topc_gate_clks[] __initconst = {
GATE(ACLK_CCORE_133, "aclk_ccore_133", "dout_aclk_ccore_133",
- ENABLE_ACLK_TOPC0, 4, 0, 0),
+ ENABLE_ACLK_TOPC0, 4, CLK_IS_CRITICAL, 0),
GATE(ACLK_MSCL_532, "aclk_mscl_532", "dout_aclk_mscl_532",
ENABLE_ACLK_TOPC1, 20, 0, 0),
@@ -174,7 +174,7 @@ static struct samsung_gate_clock topc_gate_clks[] __initdata = {
ENABLE_SCLK_TOPC1, 0, 0, 0),
};
-static struct samsung_pll_clock topc_pll_clks[] __initdata = {
+static const struct samsung_pll_clock topc_pll_clks[] __initconst = {
PLL(pll_1451x, 0, "fout_bus0_pll", "fin_pll", BUS0_PLL_LOCK,
BUS0_PLL_CON0, NULL),
PLL(pll_1452x, 0, "fout_cc_pll", "fin_pll", CC_PLL_LOCK,
@@ -187,7 +187,7 @@ static struct samsung_pll_clock topc_pll_clks[] __initdata = {
AUD_PLL_CON0, pll1460x_24mhz_tbl),
};
-static struct samsung_cmu_info topc_cmu_info __initdata = {
+static const struct samsung_cmu_info topc_cmu_info __initconst = {
.pll_clks = topc_pll_clks,
.nr_pll_clks = ARRAY_SIZE(topc_pll_clks),
.mux_clks = topc_mux_clks,
@@ -256,7 +256,7 @@ PNAME(mout_top0_group3) = {"ioclk_audiocdclk0",
PNAME(mout_top0_group4) = {"ioclk_audiocdclk1", "mout_top0_aud_pll_user",
"mout_top0_bus0_pll_half", "mout_top0_bus1_pll_half"};
-static unsigned long top0_clk_regs[] __initdata = {
+static const unsigned long top0_clk_regs[] __initconst = {
MUX_SEL_TOP00,
MUX_SEL_TOP01,
MUX_SEL_TOP03,
@@ -275,7 +275,7 @@ static unsigned long top0_clk_regs[] __initdata = {
ENABLE_SCLK_TOP0_PERIC3,
};
-static struct samsung_mux_clock top0_mux_clks[] __initdata = {
+static const struct samsung_mux_clock top0_mux_clks[] __initconst = {
MUX(0, "mout_top0_aud_pll_user", mout_top0_aud_pll_user_p,
MUX_SEL_TOP00, 0, 1),
MUX(0, "mout_top0_mfc_pll_user", mout_top0_mfc_pll_user_p,
@@ -315,7 +315,7 @@ static struct samsung_mux_clock top0_mux_clks[] __initdata = {
MUX(0, "mout_sclk_spi4", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 20, 2),
};
-static struct samsung_div_clock top0_div_clks[] __initdata = {
+static const struct samsung_div_clock top0_div_clks[] __initconst = {
DIV(DOUT_ACLK_PERIC1, "dout_aclk_peric1_66", "mout_aclk_peric1_66",
DIV_TOP03, 12, 6),
DIV(DOUT_ACLK_PERIC0, "dout_aclk_peric0_66", "mout_aclk_peric0_66",
@@ -338,7 +338,7 @@ static struct samsung_div_clock top0_div_clks[] __initdata = {
DIV(0, "dout_sclk_spi4", "mout_sclk_spi4", DIV_TOP0_PERIC3, 20, 12),
};
-static struct samsung_gate_clock top0_gate_clks[] __initdata = {
+static const struct samsung_gate_clock top0_gate_clks[] __initconst = {
GATE(CLK_ACLK_PERIC0_66, "aclk_peric0_66", "dout_aclk_peric0_66",
ENABLE_ACLK_TOP03, 20, CLK_SET_RATE_PARENT, 0),
GATE(CLK_ACLK_PERIC1_66, "aclk_peric1_66", "dout_aclk_peric1_66",
@@ -372,7 +372,7 @@ static struct samsung_gate_clock top0_gate_clks[] __initdata = {
ENABLE_SCLK_TOP0_PERIC3, 20, CLK_SET_RATE_PARENT, 0),
};
-static struct samsung_fixed_factor_clock top0_fixed_factor_clks[] __initdata = {
+static const struct samsung_fixed_factor_clock top0_fixed_factor_clks[] __initconst = {
FFACTOR(0, "ffac_top0_bus0_pll_div2", "mout_top0_bus0_pll_user",
1, 2, 0),
FFACTOR(0, "ffac_top0_bus1_pll_div2", "mout_top0_bus1_pll_user",
@@ -381,7 +381,7 @@ static struct samsung_fixed_factor_clock top0_fixed_factor_clks[] __initdata = {
FFACTOR(0, "ffac_top0_mfc_pll_div2", "mout_top0_mfc_pll_user", 1, 2, 0),
};
-static struct samsung_cmu_info top0_cmu_info __initdata = {
+static const struct samsung_cmu_info top0_cmu_info __initconst = {
.mux_clks = top0_mux_clks,
.nr_mux_clks = ARRAY_SIZE(top0_mux_clks),
.div_clks = top0_div_clks,
@@ -438,7 +438,7 @@ PNAME(mout_top1_group1) = {"mout_top1_bus0_pll_half",
"mout_top1_bus1_pll_half", "mout_top1_cc_pll_half",
"mout_top1_mfc_pll_half"};
-static unsigned long top1_clk_regs[] __initdata = {
+static const unsigned long top1_clk_regs[] __initconst = {
MUX_SEL_TOP10,
MUX_SEL_TOP11,
MUX_SEL_TOP13,
@@ -455,7 +455,7 @@ static unsigned long top1_clk_regs[] __initdata = {
ENABLE_SCLK_TOP1_FSYS11,
};
-static struct samsung_mux_clock top1_mux_clks[] __initdata = {
+static const struct samsung_mux_clock top1_mux_clks[] __initconst = {
MUX(0, "mout_top1_mfc_pll_user", mout_top1_mfc_pll_user_p,
MUX_SEL_TOP10, 4, 1),
MUX(0, "mout_top1_cc_pll_user", mout_top1_cc_pll_user_p,
@@ -494,7 +494,7 @@ static struct samsung_mux_clock top1_mux_clks[] __initdata = {
MUX_SEL_TOP1_FSYS11, 24, 2),
};
-static struct samsung_div_clock top1_div_clks[] __initdata = {
+static const struct samsung_div_clock top1_div_clks[] __initconst = {
DIV(DOUT_ACLK_FSYS1_200, "dout_aclk_fsys1_200", "mout_aclk_fsys1_200",
DIV_TOP13, 24, 4),
DIV(DOUT_ACLK_FSYS0_200, "dout_aclk_fsys0_200", "mout_aclk_fsys0_200",
@@ -521,7 +521,7 @@ static struct samsung_div_clock top1_div_clks[] __initdata = {
"mout_sclk_phy_fsys1_26m", DIV_TOP1_FSYS11, 24, 6),
};
-static struct samsung_gate_clock top1_gate_clks[] __initdata = {
+static const struct samsung_gate_clock top1_gate_clks[] __initconst = {
GATE(CLK_SCLK_MMC2, "sclk_mmc2", "dout_sclk_mmc2",
ENABLE_SCLK_TOP1_FSYS0, 16, CLK_SET_RATE_PARENT, 0),
GATE(0, "sclk_usbdrd300", "dout_sclk_usbdrd300",
@@ -539,7 +539,8 @@ static struct samsung_gate_clock top1_gate_clks[] __initdata = {
ENABLE_SCLK_TOP1_FSYS11, 12, CLK_SET_RATE_PARENT, 0),
GATE(CLK_ACLK_FSYS0_200, "aclk_fsys0_200", "dout_aclk_fsys0_200",
- ENABLE_ACLK_TOP13, 28, CLK_SET_RATE_PARENT, 0),
+ ENABLE_ACLK_TOP13, 28, CLK_SET_RATE_PARENT |
+ CLK_IS_CRITICAL, 0),
GATE(CLK_ACLK_FSYS1_200, "aclk_fsys1_200", "dout_aclk_fsys1_200",
ENABLE_ACLK_TOP13, 24, CLK_SET_RATE_PARENT, 0),
@@ -548,7 +549,7 @@ static struct samsung_gate_clock top1_gate_clks[] __initdata = {
24, CLK_SET_RATE_PARENT, 0),
};
-static struct samsung_fixed_factor_clock top1_fixed_factor_clks[] __initdata = {
+static const struct samsung_fixed_factor_clock top1_fixed_factor_clks[] __initconst = {
FFACTOR(0, "ffac_top1_bus0_pll_div2", "mout_top1_bus0_pll_user",
1, 2, 0),
FFACTOR(0, "ffac_top1_bus1_pll_div2", "mout_top1_bus1_pll_user",
@@ -557,7 +558,7 @@ static struct samsung_fixed_factor_clock top1_fixed_factor_clks[] __initdata = {
FFACTOR(0, "ffac_top1_mfc_pll_div2", "mout_top1_mfc_pll_user", 1, 2, 0),
};
-static struct samsung_cmu_info top1_cmu_info __initdata = {
+static const struct samsung_cmu_info top1_cmu_info __initconst = {
.mux_clks = top1_mux_clks,
.nr_mux_clks = ARRAY_SIZE(top1_mux_clks),
.div_clks = top1_div_clks,
@@ -591,22 +592,22 @@ CLK_OF_DECLARE(exynos7_clk_top1, "samsung,exynos7-clock-top1",
*/
PNAME(mout_aclk_ccore_133_user_p) = { "fin_pll", "aclk_ccore_133" };
-static unsigned long ccore_clk_regs[] __initdata = {
+static const unsigned long ccore_clk_regs[] __initconst = {
MUX_SEL_CCORE,
ENABLE_PCLK_CCORE,
};
-static struct samsung_mux_clock ccore_mux_clks[] __initdata = {
+static const struct samsung_mux_clock ccore_mux_clks[] __initconst = {
MUX(0, "mout_aclk_ccore_133_user", mout_aclk_ccore_133_user_p,
MUX_SEL_CCORE, 1, 1),
};
-static struct samsung_gate_clock ccore_gate_clks[] __initdata = {
+static const struct samsung_gate_clock ccore_gate_clks[] __initconst = {
GATE(PCLK_RTC, "pclk_rtc", "mout_aclk_ccore_133_user",
ENABLE_PCLK_CCORE, 8, 0, 0),
};
-static struct samsung_cmu_info ccore_cmu_info __initdata = {
+static const struct samsung_cmu_info ccore_cmu_info __initconst = {
.mux_clks = ccore_mux_clks,
.nr_mux_clks = ARRAY_SIZE(ccore_mux_clks),
.gate_clks = ccore_gate_clks,
@@ -633,20 +634,20 @@ CLK_OF_DECLARE(exynos7_clk_ccore, "samsung,exynos7-clock-ccore",
PNAME(mout_aclk_peric0_66_user_p) = { "fin_pll", "aclk_peric0_66" };
PNAME(mout_sclk_uart0_user_p) = { "fin_pll", "sclk_uart0" };
-static unsigned long peric0_clk_regs[] __initdata = {
+static const unsigned long peric0_clk_regs[] __initconst = {
MUX_SEL_PERIC0,
ENABLE_PCLK_PERIC0,
ENABLE_SCLK_PERIC0,
};
-static struct samsung_mux_clock peric0_mux_clks[] __initdata = {
+static const struct samsung_mux_clock peric0_mux_clks[] __initconst = {
MUX(0, "mout_aclk_peric0_66_user", mout_aclk_peric0_66_user_p,
MUX_SEL_PERIC0, 0, 1),
MUX(0, "mout_sclk_uart0_user", mout_sclk_uart0_user_p,
MUX_SEL_PERIC0, 16, 1),
};
-static struct samsung_gate_clock peric0_gate_clks[] __initdata = {
+static const struct samsung_gate_clock peric0_gate_clks[] __initconst = {
GATE(PCLK_HSI2C0, "pclk_hsi2c0", "mout_aclk_peric0_66_user",
ENABLE_PCLK_PERIC0, 8, 0, 0),
GATE(PCLK_HSI2C1, "pclk_hsi2c1", "mout_aclk_peric0_66_user",
@@ -673,7 +674,7 @@ static struct samsung_gate_clock peric0_gate_clks[] __initdata = {
GATE(SCLK_PWM, "sclk_pwm", "fin_pll", ENABLE_SCLK_PERIC0, 21, 0, 0),
};
-static struct samsung_cmu_info peric0_cmu_info __initdata = {
+static const struct samsung_cmu_info peric0_cmu_info __initconst = {
.mux_clks = peric0_mux_clks,
.nr_mux_clks = ARRAY_SIZE(peric0_mux_clks),
.gate_clks = peric0_gate_clks,
@@ -709,7 +710,7 @@ PNAME(mout_sclk_spi2_user_p) = { "fin_pll", "sclk_spi2" };
PNAME(mout_sclk_spi3_user_p) = { "fin_pll", "sclk_spi3" };
PNAME(mout_sclk_spi4_user_p) = { "fin_pll", "sclk_spi4" };
-static unsigned long peric1_clk_regs[] __initdata = {
+static const unsigned long peric1_clk_regs[] __initconst = {
MUX_SEL_PERIC10,
MUX_SEL_PERIC11,
MUX_SEL_PERIC12,
@@ -717,7 +718,7 @@ static unsigned long peric1_clk_regs[] __initdata = {
ENABLE_SCLK_PERIC10,
};
-static struct samsung_mux_clock peric1_mux_clks[] __initdata = {
+static const struct samsung_mux_clock peric1_mux_clks[] __initconst = {
MUX(0, "mout_aclk_peric1_66_user", mout_aclk_peric1_66_user_p,
MUX_SEL_PERIC10, 0, 1),
@@ -739,7 +740,7 @@ static struct samsung_mux_clock peric1_mux_clks[] __initdata = {
MUX_SEL_PERIC11, 28, 1),
};
-static struct samsung_gate_clock peric1_gate_clks[] __initdata = {
+static const struct samsung_gate_clock peric1_gate_clks[] __initconst = {
GATE(PCLK_HSI2C2, "pclk_hsi2c2", "mout_aclk_peric1_66_user",
ENABLE_PCLK_PERIC1, 4, 0, 0),
GATE(PCLK_HSI2C3, "pclk_hsi2c3", "mout_aclk_peric1_66_user",
@@ -797,7 +798,7 @@ static struct samsung_gate_clock peric1_gate_clks[] __initdata = {
ENABLE_SCLK_PERIC10, 19, CLK_SET_RATE_PARENT, 0),
};
-static struct samsung_cmu_info peric1_cmu_info __initdata = {
+static const struct samsung_cmu_info peric1_cmu_info __initconst = {
.mux_clks = peric1_mux_clks,
.nr_mux_clks = ARRAY_SIZE(peric1_mux_clks),
.gate_clks = peric1_gate_clks,
@@ -825,7 +826,7 @@ CLK_OF_DECLARE(exynos7_clk_peric1, "samsung,exynos7-clock-peric1",
/* List of parent clocks for Muxes in CMU_PERIS */
PNAME(mout_aclk_peris_66_user_p) = { "fin_pll", "aclk_peris_66" };
-static unsigned long peris_clk_regs[] __initdata = {
+static const unsigned long peris_clk_regs[] __initconst = {
MUX_SEL_PERIS,
ENABLE_PCLK_PERIS,
ENABLE_PCLK_PERIS_SECURE_CHIPID,
@@ -833,12 +834,12 @@ static unsigned long peris_clk_regs[] __initdata = {
ENABLE_SCLK_PERIS_SECURE_CHIPID,
};
-static struct samsung_mux_clock peris_mux_clks[] __initdata = {
+static const struct samsung_mux_clock peris_mux_clks[] __initconst = {
MUX(0, "mout_aclk_peris_66_user",
mout_aclk_peris_66_user_p, MUX_SEL_PERIS, 0, 1),
};
-static struct samsung_gate_clock peris_gate_clks[] __initdata = {
+static const struct samsung_gate_clock peris_gate_clks[] __initconst = {
GATE(PCLK_WDT, "pclk_wdt", "mout_aclk_peris_66_user",
ENABLE_PCLK_PERIS, 6, 0, 0),
GATE(PCLK_TMU, "pclk_tmu_apbif", "mout_aclk_peris_66_user",
@@ -852,7 +853,7 @@ static struct samsung_gate_clock peris_gate_clks[] __initdata = {
GATE(SCLK_TMU, "sclk_tmu", "fin_pll", ENABLE_SCLK_PERIS, 10, 0, 0),
};
-static struct samsung_cmu_info peris_cmu_info __initdata = {
+static const struct samsung_cmu_info peris_cmu_info __initconst = {
.mux_clks = peris_mux_clks,
.nr_mux_clks = ARRAY_SIZE(peris_mux_clks),
.gate_clks = peris_gate_clks,
@@ -893,12 +894,12 @@ PNAME(mout_phyclk_usbdrd300_udrd30_pipe_pclk_user_p) = { "fin_pll",
"phyclk_usbdrd300_udrd30_pipe_pclk" };
/* fixed rate clocks used in the FSYS0 block */
-static struct samsung_fixed_rate_clock fixed_rate_clks_fsys0[] __initdata = {
+static const struct samsung_fixed_rate_clock fixed_rate_clks_fsys0[] __initconst = {
FRATE(0, "phyclk_usbdrd300_udrd30_phyclock", NULL, 0, 60000000),
FRATE(0, "phyclk_usbdrd300_udrd30_pipe_pclk", NULL, 0, 125000000),
};
-static unsigned long fsys0_clk_regs[] __initdata = {
+static const unsigned long fsys0_clk_regs[] __initconst = {
MUX_SEL_FSYS00,
MUX_SEL_FSYS01,
MUX_SEL_FSYS02,
@@ -909,7 +910,7 @@ static unsigned long fsys0_clk_regs[] __initdata = {
ENABLE_SCLK_FSYS04,
};
-static struct samsung_mux_clock fsys0_mux_clks[] __initdata = {
+static const struct samsung_mux_clock fsys0_mux_clks[] __initconst = {
MUX(0, "mout_aclk_fsys0_200_user", mout_aclk_fsys0_200_user_p,
MUX_SEL_FSYS00, 24, 1),
@@ -926,7 +927,7 @@ static struct samsung_mux_clock fsys0_mux_clks[] __initdata = {
MUX_SEL_FSYS02, 28, 1),
};
-static struct samsung_gate_clock fsys0_gate_clks[] __initdata = {
+static const struct samsung_gate_clock fsys0_gate_clks[] __initconst = {
GATE(ACLK_PDMA1, "aclk_pdma1", "mout_aclk_fsys0_200_user",
ENABLE_ACLK_FSYS00, 3, 0, 0),
GATE(ACLK_PDMA0, "aclk_pdma0", "mout_aclk_fsys0_200_user",
@@ -960,7 +961,7 @@ static struct samsung_gate_clock fsys0_gate_clks[] __initdata = {
ENABLE_SCLK_FSYS04, 28, 0, 0),
};
-static struct samsung_cmu_info fsys0_cmu_info __initdata = {
+static const struct samsung_cmu_info fsys0_cmu_info __initconst = {
.fixed_clks = fixed_rate_clks_fsys0,
.nr_fixed_clks = ARRAY_SIZE(fixed_rate_clks_fsys0),
.mux_clks = fsys0_mux_clks,
@@ -1005,7 +1006,7 @@ PNAME(mout_phyclk_ufs20_rx0_user_p) = { "fin_pll", "phyclk_ufs20_rx0_symbol" };
PNAME(mout_phyclk_ufs20_rx1_user_p) = { "fin_pll", "phyclk_ufs20_rx1_symbol" };
/* fixed rate clocks used in the FSYS1 block */
-static struct samsung_fixed_rate_clock fixed_rate_clks_fsys1[] __initdata = {
+static const struct samsung_fixed_rate_clock fixed_rate_clks_fsys1[] __initconst = {
FRATE(PHYCLK_UFS20_TX0_SYMBOL, "phyclk_ufs20_tx0_symbol", NULL,
0, 300000000),
FRATE(PHYCLK_UFS20_RX0_SYMBOL, "phyclk_ufs20_rx0_symbol", NULL,
@@ -1014,7 +1015,7 @@ static struct samsung_fixed_rate_clock fixed_rate_clks_fsys1[] __initdata = {
0, 300000000),
};
-static unsigned long fsys1_clk_regs[] __initdata = {
+static const unsigned long fsys1_clk_regs[] __initconst = {
MUX_SEL_FSYS10,
MUX_SEL_FSYS11,
MUX_SEL_FSYS12,
@@ -1026,7 +1027,7 @@ static unsigned long fsys1_clk_regs[] __initdata = {
ENABLE_SCLK_FSYS13,
};
-static struct samsung_mux_clock fsys1_mux_clks[] __initdata = {
+static const struct samsung_mux_clock fsys1_mux_clks[] __initconst = {
MUX(MOUT_FSYS1_PHYCLK_SEL1, "mout_fsys1_phyclk_sel1",
mout_fsys1_group_p, MUX_SEL_FSYS10, 16, 2),
MUX(0, "mout_fsys1_phyclk_sel0", mout_fsys1_group_p,
@@ -1049,12 +1050,12 @@ static struct samsung_mux_clock fsys1_mux_clks[] __initdata = {
mout_phyclk_ufs20_tx0_user_p, MUX_SEL_FSYS12, 28, 1),
};
-static struct samsung_div_clock fsys1_div_clks[] __initdata = {
+static const struct samsung_div_clock fsys1_div_clks[] __initconst = {
DIV(DOUT_PCLK_FSYS1, "dout_pclk_fsys1", "mout_aclk_fsys1_200_user",
DIV_FSYS1, 0, 2),
};
-static struct samsung_gate_clock fsys1_gate_clks[] __initdata = {
+static const struct samsung_gate_clock fsys1_gate_clks[] __initconst = {
GATE(SCLK_UFSUNIPRO20_USER, "sclk_ufsunipro20_user",
"mout_sclk_ufsunipro20_user",
ENABLE_SCLK_FSYS11, 20, 0, 0),
@@ -1089,7 +1090,7 @@ static struct samsung_gate_clock fsys1_gate_clks[] __initdata = {
ENABLE_SCLK_FSYS13, 24, CLK_IGNORE_UNUSED, 0),
};
-static struct samsung_cmu_info fsys1_cmu_info __initdata = {
+static const struct samsung_cmu_info fsys1_cmu_info __initconst = {
.fixed_clks = fixed_rate_clks_fsys1,
.nr_fixed_clks = ARRAY_SIZE(fixed_rate_clks_fsys1),
.mux_clks = fsys1_mux_clks,
@@ -1119,22 +1120,22 @@ CLK_OF_DECLARE(exynos7_clk_fsys1, "samsung,exynos7-clock-fsys1",
/* List of parent clocks for Muxes in CMU_MSCL */
PNAME(mout_aclk_mscl_532_user_p) = { "fin_pll", "aclk_mscl_532" };
-static unsigned long mscl_clk_regs[] __initdata = {
+static const unsigned long mscl_clk_regs[] __initconst = {
MUX_SEL_MSCL,
DIV_MSCL,
ENABLE_ACLK_MSCL,
ENABLE_PCLK_MSCL,
};
-static struct samsung_mux_clock mscl_mux_clks[] __initdata = {
+static const struct samsung_mux_clock mscl_mux_clks[] __initconst = {
MUX(USERMUX_ACLK_MSCL_532, "usermux_aclk_mscl_532",
mout_aclk_mscl_532_user_p, MUX_SEL_MSCL, 0, 1),
};
-static struct samsung_div_clock mscl_div_clks[] __initdata = {
+static const struct samsung_div_clock mscl_div_clks[] __initconst = {
DIV(DOUT_PCLK_MSCL, "dout_pclk_mscl", "usermux_aclk_mscl_532",
DIV_MSCL, 0, 3),
};
-static struct samsung_gate_clock mscl_gate_clks[] __initdata = {
+static const struct samsung_gate_clock mscl_gate_clks[] __initconst = {
GATE(ACLK_MSCL_0, "aclk_mscl_0", "usermux_aclk_mscl_532",
ENABLE_ACLK_MSCL, 31, 0, 0),
@@ -1204,7 +1205,7 @@ static struct samsung_gate_clock mscl_gate_clks[] __initdata = {
ENABLE_PCLK_MSCL, 20, 0, 0),
};
-static struct samsung_cmu_info mscl_cmu_info __initdata = {
+static const struct samsung_cmu_info mscl_cmu_info __initconst = {
.mux_clks = mscl_mux_clks,
.nr_mux_clks = ARRAY_SIZE(mscl_mux_clks),
.div_clks = mscl_div_clks,
@@ -1238,7 +1239,7 @@ CLK_OF_DECLARE(exynos7_clk_mscl, "samsung,exynos7-clock-mscl",
PNAME(mout_aud_pll_user_p) = { "fin_pll", "fout_aud_pll" };
PNAME(mout_aud_group_p) = { "dout_aud_cdclk", "ioclk_audiocdclk0" };
-static unsigned long aud_clk_regs[] __initdata = {
+static const unsigned long aud_clk_regs[] __initconst = {
MUX_SEL_AUD,
DIV_AUD0,
DIV_AUD1,
@@ -1247,13 +1248,13 @@ static unsigned long aud_clk_regs[] __initdata = {
ENABLE_SCLK_AUD,
};
-static struct samsung_mux_clock aud_mux_clks[] __initdata = {
+static const struct samsung_mux_clock aud_mux_clks[] __initconst = {
MUX(0, "mout_sclk_i2s", mout_aud_group_p, MUX_SEL_AUD, 12, 1),
MUX(0, "mout_sclk_pcm", mout_aud_group_p, MUX_SEL_AUD, 16, 1),
MUX(0, "mout_aud_pll_user", mout_aud_pll_user_p, MUX_SEL_AUD, 20, 1),
};
-static struct samsung_div_clock aud_div_clks[] __initdata = {
+static const struct samsung_div_clock aud_div_clks[] __initconst = {
DIV(0, "dout_aud_ca5", "mout_aud_pll_user", DIV_AUD0, 0, 4),
DIV(0, "dout_aclk_aud", "dout_aud_ca5", DIV_AUD0, 4, 4),
DIV(0, "dout_aud_pclk_dbg", "dout_aud_ca5", DIV_AUD0, 8, 4),
@@ -1265,7 +1266,7 @@ static struct samsung_div_clock aud_div_clks[] __initdata = {
DIV(0, "dout_aud_cdclk", "mout_aud_pll_user", DIV_AUD1, 24, 4),
};
-static struct samsung_gate_clock aud_gate_clks[] __initdata = {
+static const struct samsung_gate_clock aud_gate_clks[] __initconst = {
GATE(SCLK_PCM, "sclk_pcm", "dout_sclk_pcm",
ENABLE_SCLK_AUD, 27, CLK_SET_RATE_PARENT, 0),
GATE(SCLK_I2S, "sclk_i2s", "dout_sclk_i2s",
@@ -1293,7 +1294,7 @@ static struct samsung_gate_clock aud_gate_clks[] __initdata = {
GATE(ACLK_ADMA, "aclk_dmac", "dout_aclk_aud", ENABLE_ACLK_AUD, 31, 0, 0),
};
-static struct samsung_cmu_info aud_cmu_info __initdata = {
+static const struct samsung_cmu_info aud_cmu_info __initconst = {
.mux_clks = aud_mux_clks,
.nr_mux_clks = ARRAY_SIZE(aud_mux_clks),
.div_clks = aud_div_clks,
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index b7dd39610..48139bd51 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -79,7 +79,7 @@ static unsigned long samsung_pll2126_recalc_rate(struct clk_hw *hw,
u32 pll_con, mdiv, pdiv, sdiv;
u64 fvco = parent_rate;
- pll_con = __raw_readl(pll->con_reg);
+ pll_con = readl_relaxed(pll->con_reg);
mdiv = (pll_con >> PLL2126_MDIV_SHIFT) & PLL2126_MDIV_MASK;
pdiv = (pll_con >> PLL2126_PDIV_SHIFT) & PLL2126_PDIV_MASK;
sdiv = (pll_con >> PLL2126_SDIV_SHIFT) & PLL2126_SDIV_MASK;
@@ -112,7 +112,7 @@ static unsigned long samsung_pll3000_recalc_rate(struct clk_hw *hw,
u32 pll_con, mdiv, pdiv, sdiv;
u64 fvco = parent_rate;
- pll_con = __raw_readl(pll->con_reg);
+ pll_con = readl_relaxed(pll->con_reg);
mdiv = (pll_con >> PLL3000_MDIV_SHIFT) & PLL3000_MDIV_MASK;
pdiv = (pll_con >> PLL3000_PDIV_SHIFT) & PLL3000_PDIV_MASK;
sdiv = (pll_con >> PLL3000_SDIV_SHIFT) & PLL3000_SDIV_MASK;
@@ -149,7 +149,7 @@ static unsigned long samsung_pll35xx_recalc_rate(struct clk_hw *hw,
u32 mdiv, pdiv, sdiv, pll_con;
u64 fvco = parent_rate;
- pll_con = __raw_readl(pll->con_reg);
+ pll_con = readl_relaxed(pll->con_reg);
mdiv = (pll_con >> PLL35XX_MDIV_SHIFT) & PLL35XX_MDIV_MASK;
pdiv = (pll_con >> PLL35XX_PDIV_SHIFT) & PLL35XX_PDIV_MASK;
sdiv = (pll_con >> PLL35XX_SDIV_SHIFT) & PLL35XX_SDIV_MASK;
@@ -186,19 +186,19 @@ static int samsung_pll35xx_set_rate(struct clk_hw *hw, unsigned long drate,
return -EINVAL;
}
- tmp = __raw_readl(pll->con_reg);
+ tmp = readl_relaxed(pll->con_reg);
if (!(samsung_pll35xx_mp_change(rate, tmp))) {
/* If only s change, change just s value only*/
tmp &= ~(PLL35XX_SDIV_MASK << PLL35XX_SDIV_SHIFT);
tmp |= rate->sdiv << PLL35XX_SDIV_SHIFT;
- __raw_writel(tmp, pll->con_reg);
+ writel_relaxed(tmp, pll->con_reg);
return 0;
}
/* Set PLL lock time. */
- __raw_writel(rate->pdiv * PLL35XX_LOCK_FACTOR,
+ writel_relaxed(rate->pdiv * PLL35XX_LOCK_FACTOR,
pll->lock_reg);
/* Change PLL PMS values */
@@ -208,12 +208,12 @@ static int samsung_pll35xx_set_rate(struct clk_hw *hw, unsigned long drate,
tmp |= (rate->mdiv << PLL35XX_MDIV_SHIFT) |
(rate->pdiv << PLL35XX_PDIV_SHIFT) |
(rate->sdiv << PLL35XX_SDIV_SHIFT);
- __raw_writel(tmp, pll->con_reg);
+ writel_relaxed(tmp, pll->con_reg);
/* wait_lock_time */
do {
cpu_relax();
- tmp = __raw_readl(pll->con_reg);
+ tmp = readl_relaxed(pll->con_reg);
} while (!(tmp & (PLL35XX_LOCK_STAT_MASK
<< PLL35XX_LOCK_STAT_SHIFT)));
return 0;
@@ -253,8 +253,8 @@ static unsigned long samsung_pll36xx_recalc_rate(struct clk_hw *hw,
s16 kdiv;
u64 fvco = parent_rate;
- pll_con0 = __raw_readl(pll->con_reg);
- pll_con1 = __raw_readl(pll->con_reg + 4);
+ pll_con0 = readl_relaxed(pll->con_reg);
+ pll_con1 = readl_relaxed(pll->con_reg + 4);
mdiv = (pll_con0 >> PLL36XX_MDIV_SHIFT) & PLL36XX_MDIV_MASK;
pdiv = (pll_con0 >> PLL36XX_PDIV_SHIFT) & PLL36XX_PDIV_MASK;
sdiv = (pll_con0 >> PLL36XX_SDIV_SHIFT) & PLL36XX_SDIV_MASK;
@@ -294,20 +294,20 @@ static int samsung_pll36xx_set_rate(struct clk_hw *hw, unsigned long drate,
return -EINVAL;
}
- pll_con0 = __raw_readl(pll->con_reg);
- pll_con1 = __raw_readl(pll->con_reg + 4);
+ pll_con0 = readl_relaxed(pll->con_reg);
+ pll_con1 = readl_relaxed(pll->con_reg + 4);
if (!(samsung_pll36xx_mpk_change(rate, pll_con0, pll_con1))) {
/* If only s change, change just s value only*/
pll_con0 &= ~(PLL36XX_SDIV_MASK << PLL36XX_SDIV_SHIFT);
pll_con0 |= (rate->sdiv << PLL36XX_SDIV_SHIFT);
- __raw_writel(pll_con0, pll->con_reg);
+ writel_relaxed(pll_con0, pll->con_reg);
return 0;
}
/* Set PLL lock time. */
- __raw_writel(rate->pdiv * PLL36XX_LOCK_FACTOR, pll->lock_reg);
+ writel_relaxed(rate->pdiv * PLL36XX_LOCK_FACTOR, pll->lock_reg);
/* Change PLL PMS values */
pll_con0 &= ~((PLL36XX_MDIV_MASK << PLL36XX_MDIV_SHIFT) |
@@ -316,16 +316,16 @@ static int samsung_pll36xx_set_rate(struct clk_hw *hw, unsigned long drate,
pll_con0 |= (rate->mdiv << PLL36XX_MDIV_SHIFT) |
(rate->pdiv << PLL36XX_PDIV_SHIFT) |
(rate->sdiv << PLL36XX_SDIV_SHIFT);
- __raw_writel(pll_con0, pll->con_reg);
+ writel_relaxed(pll_con0, pll->con_reg);
pll_con1 &= ~(PLL36XX_KDIV_MASK << PLL36XX_KDIV_SHIFT);
pll_con1 |= rate->kdiv << PLL36XX_KDIV_SHIFT;
- __raw_writel(pll_con1, pll->con_reg + 4);
+ writel_relaxed(pll_con1, pll->con_reg + 4);
/* wait_lock_time */
do {
cpu_relax();
- tmp = __raw_readl(pll->con_reg);
+ tmp = readl_relaxed(pll->con_reg);
} while (!(tmp & (1 << PLL36XX_LOCK_STAT_SHIFT)));
return 0;
@@ -366,7 +366,7 @@ static unsigned long samsung_pll45xx_recalc_rate(struct clk_hw *hw,
u32 mdiv, pdiv, sdiv, pll_con;
u64 fvco = parent_rate;
- pll_con = __raw_readl(pll->con_reg);
+ pll_con = readl_relaxed(pll->con_reg);
mdiv = (pll_con >> PLL45XX_MDIV_SHIFT) & PLL45XX_MDIV_MASK;
pdiv = (pll_con >> PLL45XX_PDIV_SHIFT) & PLL45XX_PDIV_MASK;
sdiv = (pll_con >> PLL45XX_SDIV_SHIFT) & PLL45XX_SDIV_MASK;
@@ -409,14 +409,14 @@ static int samsung_pll45xx_set_rate(struct clk_hw *hw, unsigned long drate,
return -EINVAL;
}
- con0 = __raw_readl(pll->con_reg);
- con1 = __raw_readl(pll->con_reg + 0x4);
+ con0 = readl_relaxed(pll->con_reg);
+ con1 = readl_relaxed(pll->con_reg + 0x4);
if (!(samsung_pll45xx_mp_change(con0, con1, rate))) {
/* If only s change, change just s value only*/
con0 &= ~(PLL45XX_SDIV_MASK << PLL45XX_SDIV_SHIFT);
con0 |= rate->sdiv << PLL45XX_SDIV_SHIFT;
- __raw_writel(con0, pll->con_reg);
+ writel_relaxed(con0, pll->con_reg);
return 0;
}
@@ -430,29 +430,29 @@ static int samsung_pll45xx_set_rate(struct clk_hw *hw, unsigned long drate,
(rate->sdiv << PLL45XX_SDIV_SHIFT);
/* Set PLL AFC value. */
- con1 = __raw_readl(pll->con_reg + 0x4);
+ con1 = readl_relaxed(pll->con_reg + 0x4);
con1 &= ~(PLL45XX_AFC_MASK << PLL45XX_AFC_SHIFT);
con1 |= (rate->afc << PLL45XX_AFC_SHIFT);
/* Set PLL lock time. */
switch (pll->type) {
case pll_4502:
- __raw_writel(rate->pdiv * PLL4502_LOCK_FACTOR, pll->lock_reg);
+ writel_relaxed(rate->pdiv * PLL4502_LOCK_FACTOR, pll->lock_reg);
break;
case pll_4508:
- __raw_writel(rate->pdiv * PLL4508_LOCK_FACTOR, pll->lock_reg);
+ writel_relaxed(rate->pdiv * PLL4508_LOCK_FACTOR, pll->lock_reg);
break;
default:
break;
}
/* Set new configuration. */
- __raw_writel(con1, pll->con_reg + 0x4);
- __raw_writel(con0, pll->con_reg);
+ writel_relaxed(con1, pll->con_reg + 0x4);
+ writel_relaxed(con0, pll->con_reg);
/* Wait for locking. */
start = ktime_get();
- while (!(__raw_readl(pll->con_reg) & PLL45XX_LOCKED)) {
+ while (!(readl_relaxed(pll->con_reg) & PLL45XX_LOCKED)) {
ktime_t delta = ktime_sub(ktime_get(), start);
if (ktime_to_ms(delta) > PLL_TIMEOUT_MS) {
@@ -513,8 +513,8 @@ static unsigned long samsung_pll46xx_recalc_rate(struct clk_hw *hw,
u32 mdiv, pdiv, sdiv, kdiv, pll_con0, pll_con1, shift;
u64 fvco = parent_rate;
- pll_con0 = __raw_readl(pll->con_reg);
- pll_con1 = __raw_readl(pll->con_reg + 4);
+ pll_con0 = readl_relaxed(pll->con_reg);
+ pll_con1 = readl_relaxed(pll->con_reg + 4);
mdiv = (pll_con0 >> PLL46XX_MDIV_SHIFT) & ((pll->type == pll_1460x) ?
PLL1460X_MDIV_MASK : PLL46XX_MDIV_MASK);
pdiv = (pll_con0 >> PLL46XX_PDIV_SHIFT) & PLL46XX_PDIV_MASK;
@@ -560,14 +560,14 @@ static int samsung_pll46xx_set_rate(struct clk_hw *hw, unsigned long drate,
return -EINVAL;
}
- con0 = __raw_readl(pll->con_reg);
- con1 = __raw_readl(pll->con_reg + 0x4);
+ con0 = readl_relaxed(pll->con_reg);
+ con1 = readl_relaxed(pll->con_reg + 0x4);
if (!(samsung_pll46xx_mpk_change(con0, con1, rate))) {
/* If only s change, change just s value only*/
con0 &= ~(PLL46XX_SDIV_MASK << PLL46XX_SDIV_SHIFT);
con0 |= rate->sdiv << PLL46XX_SDIV_SHIFT;
- __raw_writel(con0, pll->con_reg);
+ writel_relaxed(con0, pll->con_reg);
return 0;
}
@@ -596,7 +596,7 @@ static int samsung_pll46xx_set_rate(struct clk_hw *hw, unsigned long drate,
(rate->sdiv << PLL46XX_SDIV_SHIFT);
/* Set PLL K, MFR and MRR values. */
- con1 = __raw_readl(pll->con_reg + 0x4);
+ con1 = readl_relaxed(pll->con_reg + 0x4);
con1 &= ~((PLL46XX_KDIV_MASK << PLL46XX_KDIV_SHIFT) |
(PLL46XX_MFR_MASK << PLL46XX_MFR_SHIFT) |
(PLL46XX_MRR_MASK << PLL46XX_MRR_SHIFT));
@@ -605,13 +605,13 @@ static int samsung_pll46xx_set_rate(struct clk_hw *hw, unsigned long drate,
(rate->mrr << PLL46XX_MRR_SHIFT);
/* Write configuration to PLL */
- __raw_writel(lock, pll->lock_reg);
- __raw_writel(con0, pll->con_reg);
- __raw_writel(con1, pll->con_reg + 0x4);
+ writel_relaxed(lock, pll->lock_reg);
+ writel_relaxed(con0, pll->con_reg);
+ writel_relaxed(con1, pll->con_reg + 0x4);
/* Wait for locking. */
start = ktime_get();
- while (!(__raw_readl(pll->con_reg) & PLL46XX_LOCKED)) {
+ while (!(readl_relaxed(pll->con_reg) & PLL46XX_LOCKED)) {
ktime_t delta = ktime_sub(ktime_get(), start);
if (ktime_to_ms(delta) > PLL_TIMEOUT_MS) {
@@ -656,7 +656,7 @@ static unsigned long samsung_pll6552_recalc_rate(struct clk_hw *hw,
u32 mdiv, pdiv, sdiv, pll_con;
u64 fvco = parent_rate;
- pll_con = __raw_readl(pll->con_reg);
+ pll_con = readl_relaxed(pll->con_reg);
if (pll->type == pll_6552_s3c2416) {
mdiv = (pll_con >> PLL6552_MDIV_SHIFT_2416) & PLL6552_MDIV_MASK;
pdiv = (pll_con >> PLL6552_PDIV_SHIFT_2416) & PLL6552_PDIV_MASK;
@@ -696,8 +696,8 @@ static unsigned long samsung_pll6553_recalc_rate(struct clk_hw *hw,
u32 mdiv, pdiv, sdiv, kdiv, pll_con0, pll_con1;
u64 fvco = parent_rate;
- pll_con0 = __raw_readl(pll->con_reg);
- pll_con1 = __raw_readl(pll->con_reg + 0x4);
+ pll_con0 = readl_relaxed(pll->con_reg);
+ pll_con1 = readl_relaxed(pll->con_reg + 0x4);
mdiv = (pll_con0 >> PLL6553_MDIV_SHIFT) & PLL6553_MDIV_MASK;
pdiv = (pll_con0 >> PLL6553_PDIV_SHIFT) & PLL6553_PDIV_MASK;
sdiv = (pll_con0 >> PLL6553_SDIV_SHIFT) & PLL6553_SDIV_MASK;
@@ -734,7 +734,7 @@ static unsigned long samsung_s3c2410_pll_recalc_rate(struct clk_hw *hw,
u32 pll_con, mdiv, pdiv, sdiv;
u64 fvco = parent_rate;
- pll_con = __raw_readl(pll->con_reg);
+ pll_con = readl_relaxed(pll->con_reg);
mdiv = (pll_con >> PLLS3C2410_MDIV_SHIFT) & PLLS3C2410_MDIV_MASK;
pdiv = (pll_con >> PLLS3C2410_PDIV_SHIFT) & PLLS3C2410_PDIV_MASK;
sdiv = (pll_con >> PLLS3C2410_SDIV_SHIFT) & PLLS3C2410_SDIV_MASK;
@@ -752,7 +752,7 @@ static unsigned long samsung_s3c2440_mpll_recalc_rate(struct clk_hw *hw,
u32 pll_con, mdiv, pdiv, sdiv;
u64 fvco = parent_rate;
- pll_con = __raw_readl(pll->con_reg);
+ pll_con = readl_relaxed(pll->con_reg);
mdiv = (pll_con >> PLLS3C2410_MDIV_SHIFT) & PLLS3C2410_MDIV_MASK;
pdiv = (pll_con >> PLLS3C2410_PDIV_SHIFT) & PLLS3C2410_PDIV_MASK;
sdiv = (pll_con >> PLLS3C2410_SDIV_SHIFT) & PLLS3C2410_SDIV_MASK;
@@ -778,7 +778,7 @@ static int samsung_s3c2410_pll_set_rate(struct clk_hw *hw, unsigned long drate,
return -EINVAL;
}
- tmp = __raw_readl(pll->con_reg);
+ tmp = readl_relaxed(pll->con_reg);
/* Change PLL PMS values */
tmp &= ~((PLLS3C2410_MDIV_MASK << PLLS3C2410_MDIV_SHIFT) |
@@ -787,7 +787,7 @@ static int samsung_s3c2410_pll_set_rate(struct clk_hw *hw, unsigned long drate,
tmp |= (rate->mdiv << PLLS3C2410_MDIV_SHIFT) |
(rate->pdiv << PLLS3C2410_PDIV_SHIFT) |
(rate->sdiv << PLLS3C2410_SDIV_SHIFT);
- __raw_writel(tmp, pll->con_reg);
+ writel_relaxed(tmp, pll->con_reg);
/* Time to settle according to the manual */
udelay(300);
@@ -798,7 +798,7 @@ static int samsung_s3c2410_pll_set_rate(struct clk_hw *hw, unsigned long drate,
static int samsung_s3c2410_pll_enable(struct clk_hw *hw, int bit, bool enable)
{
struct samsung_clk_pll *pll = to_clk_pll(hw);
- u32 pll_en = __raw_readl(pll->lock_reg + PLLS3C2410_ENABLE_REG_OFFSET);
+ u32 pll_en = readl_relaxed(pll->lock_reg + PLLS3C2410_ENABLE_REG_OFFSET);
u32 pll_en_orig = pll_en;
if (enable)
@@ -806,7 +806,7 @@ static int samsung_s3c2410_pll_enable(struct clk_hw *hw, int bit, bool enable)
else
pll_en |= BIT(bit);
- __raw_writel(pll_en, pll->lock_reg + PLLS3C2410_ENABLE_REG_OFFSET);
+ writel_relaxed(pll_en, pll->lock_reg + PLLS3C2410_ENABLE_REG_OFFSET);
/* if we started the UPLL, then allow to settle */
if (enable && (pll_en_orig & BIT(bit)))
@@ -905,7 +905,7 @@ static unsigned long samsung_pll2550x_recalc_rate(struct clk_hw *hw,
u32 r, p, m, s, pll_stat;
u64 fvco = parent_rate;
- pll_stat = __raw_readl(pll->reg_base + pll->offset * 3);
+ pll_stat = readl_relaxed(pll->reg_base + pll->offset * 3);
r = (pll_stat >> PLL2550X_R_SHIFT) & PLL2550X_R_MASK;
if (!r)
return 0;
@@ -983,7 +983,7 @@ static unsigned long samsung_pll2550xx_recalc_rate(struct clk_hw *hw,
u32 mdiv, pdiv, sdiv, pll_con;
u64 fvco = parent_rate;
- pll_con = __raw_readl(pll->con_reg);
+ pll_con = readl_relaxed(pll->con_reg);
mdiv = (pll_con >> PLL2550XX_M_SHIFT) & PLL2550XX_M_MASK;
pdiv = (pll_con >> PLL2550XX_P_SHIFT) & PLL2550XX_P_MASK;
sdiv = (pll_con >> PLL2550XX_S_SHIFT) & PLL2550XX_S_MASK;
@@ -1019,19 +1019,19 @@ static int samsung_pll2550xx_set_rate(struct clk_hw *hw, unsigned long drate,
return -EINVAL;
}
- tmp = __raw_readl(pll->con_reg);
+ tmp = readl_relaxed(pll->con_reg);
if (!(samsung_pll2550xx_mp_change(rate->mdiv, rate->pdiv, tmp))) {
/* If only s change, change just s value only*/
tmp &= ~(PLL2550XX_S_MASK << PLL2550XX_S_SHIFT);
tmp |= rate->sdiv << PLL2550XX_S_SHIFT;
- __raw_writel(tmp, pll->con_reg);
+ writel_relaxed(tmp, pll->con_reg);
return 0;
}
/* Set PLL lock time. */
- __raw_writel(rate->pdiv * PLL2550XX_LOCK_FACTOR, pll->lock_reg);
+ writel_relaxed(rate->pdiv * PLL2550XX_LOCK_FACTOR, pll->lock_reg);
/* Change PLL PMS values */
tmp &= ~((PLL2550XX_M_MASK << PLL2550XX_M_SHIFT) |
@@ -1040,12 +1040,12 @@ static int samsung_pll2550xx_set_rate(struct clk_hw *hw, unsigned long drate,
tmp |= (rate->mdiv << PLL2550XX_M_SHIFT) |
(rate->pdiv << PLL2550XX_P_SHIFT) |
(rate->sdiv << PLL2550XX_S_SHIFT);
- __raw_writel(tmp, pll->con_reg);
+ writel_relaxed(tmp, pll->con_reg);
/* wait_lock_time */
do {
cpu_relax();
- tmp = __raw_readl(pll->con_reg);
+ tmp = readl_relaxed(pll->con_reg);
} while (!(tmp & (PLL2550XX_LOCK_STAT_MASK
<< PLL2550XX_LOCK_STAT_SHIFT)));
@@ -1089,8 +1089,8 @@ static unsigned long samsung_pll2650xx_recalc_rate(struct clk_hw *hw,
s16 kdiv;
u64 fvco = parent_rate;
- pll_con0 = __raw_readl(pll->con_reg);
- pll_con2 = __raw_readl(pll->con_reg + 8);
+ pll_con0 = readl_relaxed(pll->con_reg);
+ pll_con2 = readl_relaxed(pll->con_reg + 8);
mdiv = (pll_con0 >> PLL2650XX_MDIV_SHIFT) & PLL2650XX_MDIV_MASK;
pdiv = (pll_con0 >> PLL2650XX_PDIV_SHIFT) & PLL2650XX_PDIV_MASK;
sdiv = (pll_con0 >> PLL2650XX_SDIV_SHIFT) & PLL2650XX_SDIV_MASK;
@@ -1117,8 +1117,8 @@ static int samsung_pll2650xx_set_rate(struct clk_hw *hw, unsigned long drate,
return -EINVAL;
}
- pll_con0 = __raw_readl(pll->con_reg);
- pll_con2 = __raw_readl(pll->con_reg + 8);
+ pll_con0 = readl_relaxed(pll->con_reg);
+ pll_con2 = readl_relaxed(pll->con_reg + 8);
/* Change PLL PMS values */
pll_con0 &= ~(PLL2650XX_MDIV_MASK << PLL2650XX_MDIV_SHIFT |
@@ -1135,13 +1135,13 @@ static int samsung_pll2650xx_set_rate(struct clk_hw *hw, unsigned long drate,
<< PLL2650XX_KDIV_SHIFT;
/* Set PLL lock time. */
- __raw_writel(PLL2650XX_LOCK_FACTOR * rate->pdiv, pll->lock_reg);
+ writel_relaxed(PLL2650XX_LOCK_FACTOR * rate->pdiv, pll->lock_reg);
- __raw_writel(pll_con0, pll->con_reg);
- __raw_writel(pll_con2, pll->con_reg + 8);
+ writel_relaxed(pll_con0, pll->con_reg);
+ writel_relaxed(pll_con2, pll->con_reg + 8);
do {
- tmp = __raw_readl(pll->con_reg);
+ tmp = readl_relaxed(pll->con_reg);
} while (!(tmp & (0x1 << PLL2650XX_PLL_LOCKTIME_SHIFT)));
return 0;
diff --git a/drivers/clk/samsung/clk-s3c2410-dclk.c b/drivers/clk/samsung/clk-s3c2410-dclk.c
index ec6fb14d9..ae9a595c7 100644
--- a/drivers/clk/samsung/clk-s3c2410-dclk.c
+++ b/drivers/clk/samsung/clk-s3c2410-dclk.c
@@ -428,8 +428,9 @@ MODULE_DEVICE_TABLE(platform, s3c24xx_dclk_driver_ids);
static struct platform_driver s3c24xx_dclk_driver = {
.driver = {
- .name = "s3c24xx-dclk",
- .pm = &s3c24xx_dclk_pm_ops,
+ .name = "s3c24xx-dclk",
+ .pm = &s3c24xx_dclk_pm_ops,
+ .suppress_bind_attrs = true,
},
.probe = s3c24xx_dclk_probe,
.remove = s3c24xx_dclk_remove,
diff --git a/drivers/clk/samsung/clk-s3c2410.c b/drivers/clk/samsung/clk-s3c2410.c
index d7b011c1f..d7a1e772d 100644
--- a/drivers/clk/samsung/clk-s3c2410.c
+++ b/drivers/clk/samsung/clk-s3c2410.c
@@ -374,8 +374,6 @@ void __init s3c2410_common_clk_init(struct device_node *np, unsigned long xti_f,
}
ctx = samsung_clk_init(np, reg_base, NR_CLKS);
- if (!ctx)
- panic("%s: unable to allocate context.\n", __func__);
/* Register external clocks only in non-dt cases */
if (!np)
diff --git a/drivers/clk/samsung/clk-s3c2412.c b/drivers/clk/samsung/clk-s3c2412.c
index effe3736e..ec873ee15 100644
--- a/drivers/clk/samsung/clk-s3c2412.c
+++ b/drivers/clk/samsung/clk-s3c2412.c
@@ -265,8 +265,6 @@ void __init s3c2412_common_clk_init(struct device_node *np, unsigned long xti_f,
}
ctx = samsung_clk_init(np, reg_base, NR_CLKS);
- if (!ctx)
- panic("%s: unable to allocate context.\n", __func__);
/* Register external clocks only in non-dt cases */
if (!np)
diff --git a/drivers/clk/samsung/clk-s3c2443.c b/drivers/clk/samsung/clk-s3c2443.c
index 37562783b..5e24a17e1 100644
--- a/drivers/clk/samsung/clk-s3c2443.c
+++ b/drivers/clk/samsung/clk-s3c2443.c
@@ -400,8 +400,6 @@ void __init s3c2443_common_clk_init(struct device_node *np, unsigned long xti_f,
}
ctx = samsung_clk_init(np, reg_base, NR_CLKS);
- if (!ctx)
- panic("%s: unable to allocate context.\n", __func__);
/* Register external clocks only in non-dt cases */
if (!np)
diff --git a/drivers/clk/samsung/clk-s3c64xx.c b/drivers/clk/samsung/clk-s3c64xx.c
index 60aa775bd..a48bd5f17 100644
--- a/drivers/clk/samsung/clk-s3c64xx.c
+++ b/drivers/clk/samsung/clk-s3c64xx.c
@@ -471,8 +471,6 @@ void __init s3c64xx_clk_init(struct device_node *np, unsigned long xtal_f,
}
ctx = samsung_clk_init(np, reg_base, NR_CLKS);
- if (!ctx)
- panic("%s: unable to allocate context.\n", __func__);
/* Register external clocks. */
if (!np)
diff --git a/drivers/clk/samsung/clk-s5pv210-audss.c b/drivers/clk/samsung/clk-s5pv210-audss.c
index eefb84b22..c66ed2d14 100644
--- a/drivers/clk/samsung/clk-s5pv210-audss.c
+++ b/drivers/clk/samsung/clk-s5pv210-audss.c
@@ -18,7 +18,7 @@
#include <linux/clk-provider.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/platform_device.h>
#include <dt-bindings/clock/s5pv210-audss.h>
@@ -194,20 +194,6 @@ unregister:
return ret;
}
-static int s5pv210_audss_clk_remove(struct platform_device *pdev)
-{
- int i;
-
- of_clk_del_provider(pdev->dev.of_node);
-
- for (i = 0; i < clk_data.clk_num; i++) {
- if (!IS_ERR(clk_table[i]))
- clk_unregister(clk_table[i]);
- }
-
- return 0;
-}
-
static const struct of_device_id s5pv210_audss_clk_of_match[] = {
{ .compatible = "samsung,s5pv210-audss-clock", },
{},
@@ -216,10 +202,10 @@ static const struct of_device_id s5pv210_audss_clk_of_match[] = {
static struct platform_driver s5pv210_audss_clk_driver = {
.driver = {
.name = "s5pv210-audss-clk",
+ .suppress_bind_attrs = true,
.of_match_table = s5pv210_audss_clk_of_match,
},
.probe = s5pv210_audss_clk_probe,
- .remove = s5pv210_audss_clk_remove,
};
static int __init s5pv210_audss_clk_init(void)
@@ -227,14 +213,3 @@ static int __init s5pv210_audss_clk_init(void)
return platform_driver_register(&s5pv210_audss_clk_driver);
}
core_initcall(s5pv210_audss_clk_init);
-
-static void __exit s5pv210_audss_clk_exit(void)
-{
- platform_driver_unregister(&s5pv210_audss_clk_driver);
-}
-module_exit(s5pv210_audss_clk_exit);
-
-MODULE_AUTHOR("Tomasz Figa <t.figa@samsung.com>");
-MODULE_DESCRIPTION("S5PV210 Audio Subsystem Clock Controller");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:s5pv210-audss-clk");
diff --git a/drivers/clk/samsung/clk-s5pv210.c b/drivers/clk/samsung/clk-s5pv210.c
index 523022620..fd2725710 100644
--- a/drivers/clk/samsung/clk-s5pv210.c
+++ b/drivers/clk/samsung/clk-s5pv210.c
@@ -784,8 +784,6 @@ static void __init __s5pv210_clk_init(struct device_node *np,
struct samsung_clk_provider *ctx;
ctx = samsung_clk_init(np, reg_base, NR_CLKS);
- if (!ctx)
- panic("%s: unable to allocate context.\n", __func__);
samsung_clk_register_mux(ctx, early_mux_clks,
ARRAY_SIZE(early_mux_clks));
diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c
index f38a6c49f..b7d87d6db 100644
--- a/drivers/clk/samsung/clk.c
+++ b/drivers/clk/samsung/clk.c
@@ -346,9 +346,9 @@ static struct syscore_ops samsung_clk_syscore_ops = {
.resume = samsung_clk_resume,
};
-static void samsung_clk_sleep_init(void __iomem *reg_base,
- const unsigned long *rdump,
- unsigned long nr_rdump)
+void samsung_clk_sleep_init(void __iomem *reg_base,
+ const unsigned long *rdump,
+ unsigned long nr_rdump)
{
struct samsung_clock_reg_cache *reg_cache;
@@ -370,9 +370,9 @@ static void samsung_clk_sleep_init(void __iomem *reg_base,
}
#else
-static void samsung_clk_sleep_init(void __iomem *reg_base,
- const unsigned long *rdump,
- unsigned long nr_rdump) {}
+void samsung_clk_sleep_init(void __iomem *reg_base,
+ const unsigned long *rdump,
+ unsigned long nr_rdump) {}
#endif
/*
@@ -381,7 +381,7 @@ static void samsung_clk_sleep_init(void __iomem *reg_base,
*/
struct samsung_clk_provider * __init samsung_cmu_register_one(
struct device_node *np,
- struct samsung_cmu_info *cmu)
+ const struct samsung_cmu_info *cmu)
{
void __iomem *reg_base;
struct samsung_clk_provider *ctx;
diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h
index aa872d2c5..da3bdebab 100644
--- a/drivers/clk/samsung/clk.h
+++ b/drivers/clk/samsung/clk.h
@@ -261,7 +261,7 @@ struct samsung_gate_clock {
#define GATE_DA(_id, dname, cname, pname, o, b, f, gf, a) \
__GATE(_id, dname, cname, pname, o, b, f, gf, a)
-#define PNAME(x) static const char *x[] __initdata
+#define PNAME(x) static const char * const x[] __initconst
/**
* struct samsung_clk_reg_dump: register dump of clock controller registers.
@@ -330,28 +330,28 @@ struct samsung_clock_reg_cache {
struct samsung_cmu_info {
/* list of pll clocks and respective count */
- struct samsung_pll_clock *pll_clks;
+ const struct samsung_pll_clock *pll_clks;
unsigned int nr_pll_clks;
/* list of mux clocks and respective count */
- struct samsung_mux_clock *mux_clks;
+ const struct samsung_mux_clock *mux_clks;
unsigned int nr_mux_clks;
/* list of div clocks and respective count */
- struct samsung_div_clock *div_clks;
+ const struct samsung_div_clock *div_clks;
unsigned int nr_div_clks;
/* list of gate clocks and respective count */
- struct samsung_gate_clock *gate_clks;
+ const struct samsung_gate_clock *gate_clks;
unsigned int nr_gate_clks;
/* list of fixed clocks and respective count */
- struct samsung_fixed_rate_clock *fixed_clks;
+ const struct samsung_fixed_rate_clock *fixed_clks;
unsigned int nr_fixed_clks;
/* list of fixed factor clocks and respective count */
- struct samsung_fixed_factor_clock *fixed_factor_clks;
+ const struct samsung_fixed_factor_clock *fixed_factor_clks;
unsigned int nr_fixed_factor_clks;
/* total number of clocks with IDs assigned*/
unsigned int nr_clk_ids;
/* list and number of clocks registers */
- unsigned long *clk_regs;
+ const unsigned long *clk_regs;
unsigned int nr_clk_regs;
};
@@ -395,10 +395,14 @@ extern void __init samsung_clk_register_pll(struct samsung_clk_provider *ctx,
extern struct samsung_clk_provider __init *samsung_cmu_register_one(
struct device_node *,
- struct samsung_cmu_info *);
+ const struct samsung_cmu_info *);
extern unsigned long _get_rate(const char *clk_name);
+extern void samsung_clk_sleep_init(void __iomem *reg_base,
+ const unsigned long *rdump,
+ unsigned long nr_rdump);
+
extern void samsung_clk_save(void __iomem *base,
struct samsung_clk_reg_dump *rd,
unsigned int num_regs);
diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c
index 627267c7e..546bd79c8 100644
--- a/drivers/clk/st/clk-flexgen.c
+++ b/drivers/clk/st/clk-flexgen.c
@@ -267,7 +267,6 @@ static void __init st_of_flexgen_setup(struct device_node *np)
const char **parents;
int num_parents, i;
spinlock_t *rlock = NULL;
- unsigned long flex_flags = 0;
int ret;
pnode = of_get_parent(np);
@@ -308,12 +307,15 @@ static void __init st_of_flexgen_setup(struct device_node *np)
for (i = 0; i < clk_data->clk_num; i++) {
struct clk *clk;
const char *clk_name;
+ unsigned long flex_flags = 0;
if (of_property_read_string_index(np, "clock-output-names",
i, &clk_name)) {
break;
}
+ of_clk_detect_critical(np, i, &flex_flags);
+
/*
* If we read an empty clock name then the output is unused
*/
diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c
index dec4eaaec..09afeb851 100644
--- a/drivers/clk/st/clkgen-fsyn.c
+++ b/drivers/clk/st/clkgen-fsyn.c
@@ -1027,7 +1027,7 @@ static const struct clk_ops st_quadfs_ops = {
static struct clk * __init st_clk_register_quadfs_fsynth(
const char *name, const char *parent_name,
struct clkgen_quadfs_data *quadfs, void __iomem *reg, u32 chan,
- spinlock_t *lock)
+ unsigned long flags, spinlock_t *lock)
{
struct st_clk_quadfs_fsynth *fs;
struct clk *clk;
@@ -1045,7 +1045,7 @@ static struct clk * __init st_clk_register_quadfs_fsynth(
init.name = name;
init.ops = &st_quadfs_ops;
- init.flags = CLK_GET_RATE_NOCACHE | CLK_IS_BASIC;
+ init.flags = flags | CLK_GET_RATE_NOCACHE | CLK_IS_BASIC;
init.parent_names = &parent_name;
init.num_parents = 1;
@@ -1115,6 +1115,7 @@ static void __init st_of_create_quadfs_fsynths(
for (fschan = 0; fschan < QUADFS_MAX_CHAN; fschan++) {
struct clk *clk;
const char *clk_name;
+ unsigned long flags = 0;
if (of_property_read_string_index(np, "clock-output-names",
fschan, &clk_name)) {
@@ -1127,8 +1128,11 @@ static void __init st_of_create_quadfs_fsynths(
if (*clk_name == '\0')
continue;
+ of_clk_detect_critical(np, fschan, &flags);
+
clk = st_clk_register_quadfs_fsynth(clk_name, pll_name,
- quadfs, reg, fschan, lock);
+ quadfs, reg, fschan,
+ flags, lock);
/*
* If there was an error registering this clock output, clean
diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c
index 38f6f3a90..0b5990e82 100644
--- a/drivers/clk/st/clkgen-pll.c
+++ b/drivers/clk/st/clkgen-pll.c
@@ -840,7 +840,7 @@ static const struct clk_ops stm_pll4600c28_ops = {
static struct clk * __init clkgen_pll_register(const char *parent_name,
struct clkgen_pll_data *pll_data,
- void __iomem *reg,
+ void __iomem *reg, unsigned long pll_flags,
const char *clk_name, spinlock_t *lock)
{
struct clkgen_pll *pll;
@@ -854,7 +854,7 @@ static struct clk * __init clkgen_pll_register(const char *parent_name,
init.name = clk_name;
init.ops = pll_data->ops;
- init.flags = CLK_IS_BASIC | CLK_GET_RATE_NOCACHE;
+ init.flags = pll_flags | CLK_IS_BASIC | CLK_GET_RATE_NOCACHE;
init.parent_names = &parent_name;
init.num_parents = 1;
@@ -948,7 +948,7 @@ static void __init clkgena_c65_pll_setup(struct device_node *np)
*/
clk_data->clks[0] = clkgen_pll_register(parent_name,
(struct clkgen_pll_data *) &st_pll1600c65_ax,
- reg + CLKGENAx_PLL0_OFFSET, clk_name, NULL);
+ reg + CLKGENAx_PLL0_OFFSET, 0, clk_name, NULL);
if (IS_ERR(clk_data->clks[0]))
goto err;
@@ -977,7 +977,7 @@ static void __init clkgena_c65_pll_setup(struct device_node *np)
*/
clk_data->clks[2] = clkgen_pll_register(parent_name,
(struct clkgen_pll_data *) &st_pll800c65_ax,
- reg + CLKGENAx_PLL1_OFFSET, clk_name, NULL);
+ reg + CLKGENAx_PLL1_OFFSET, 0, clk_name, NULL);
if (IS_ERR(clk_data->clks[2]))
goto err;
@@ -995,7 +995,7 @@ CLK_OF_DECLARE(clkgena_c65_plls,
static struct clk * __init clkgen_odf_register(const char *parent_name,
void __iomem *reg,
struct clkgen_pll_data *pll_data,
- int odf,
+ unsigned long pll_flags, int odf,
spinlock_t *odf_lock,
const char *odf_name)
{
@@ -1004,7 +1004,7 @@ static struct clk * __init clkgen_odf_register(const char *parent_name,
struct clk_gate *gate;
struct clk_divider *div;
- flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT;
+ flags = pll_flags | CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT;
gate = kzalloc(sizeof(*gate), GFP_KERNEL);
if (!gate)
@@ -1099,6 +1099,7 @@ static void __init clkgen_c32_pll_setup(struct device_node *np)
int num_odfs, odf;
struct clk_onecell_data *clk_data;
struct clkgen_pll_data *data;
+ unsigned long pll_flags = 0;
match = of_match_node(c32_pll_of_match, np);
if (!match) {
@@ -1116,8 +1117,10 @@ static void __init clkgen_c32_pll_setup(struct device_node *np)
if (!pll_base)
return;
- clk = clkgen_pll_register(parent_name, data, pll_base, np->name,
- data->lock);
+ of_clk_detect_critical(np, 0, &pll_flags);
+
+ clk = clkgen_pll_register(parent_name, data, pll_base, pll_flags,
+ np->name, data->lock);
if (IS_ERR(clk))
return;
@@ -1139,12 +1142,15 @@ static void __init clkgen_c32_pll_setup(struct device_node *np)
for (odf = 0; odf < num_odfs; odf++) {
struct clk *clk;
const char *clk_name;
+ unsigned long odf_flags = 0;
if (of_property_read_string_index(np, "clock-output-names",
odf, &clk_name))
return;
- clk = clkgen_odf_register(pll_name, pll_base, data,
+ of_clk_detect_critical(np, odf, &odf_flags);
+
+ clk = clkgen_odf_register(pll_name, pll_base, data, odf_flags,
odf, &clkgena_c32_odf_lock, clk_name);
if (IS_ERR(clk))
goto err;
@@ -1206,7 +1212,8 @@ static void __init clkgengpu_c32_pll_setup(struct device_node *np)
/*
* PLL 1200MHz output
*/
- clk = clkgen_pll_register(parent_name, data, reg, clk_name, data->lock);
+ clk = clkgen_pll_register(parent_name, data, reg,
+ 0, clk_name, data->lock);
if (!IS_ERR(clk))
of_clk_add_provider(np, of_clk_src_simple_get, clk);
diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig
new file mode 100644
index 000000000..2afcbd39e
--- /dev/null
+++ b/drivers/clk/sunxi-ng/Kconfig
@@ -0,0 +1,65 @@
+config SUNXI_CCU
+ bool "Clock support for Allwinner SoCs"
+ default ARCH_SUNXI
+
+if SUNXI_CCU
+
+# Base clock types
+
+config SUNXI_CCU_DIV
+ bool
+ select SUNXI_CCU_MUX
+
+config SUNXI_CCU_FRAC
+ bool
+
+config SUNXI_CCU_GATE
+ bool
+
+config SUNXI_CCU_MUX
+ bool
+
+config SUNXI_CCU_PHASE
+ bool
+
+# Multi-factor clocks
+
+config SUNXI_CCU_NK
+ bool
+ select SUNXI_CCU_GATE
+
+config SUNXI_CCU_NKM
+ bool
+ select RATIONAL
+ select SUNXI_CCU_GATE
+
+config SUNXI_CCU_NKMP
+ bool
+ select RATIONAL
+ select SUNXI_CCU_GATE
+
+config SUNXI_CCU_NM
+ bool
+ select RATIONAL
+ select SUNXI_CCU_FRAC
+ select SUNXI_CCU_GATE
+
+config SUNXI_CCU_MP
+ bool
+ select SUNXI_CCU_GATE
+ select SUNXI_CCU_MUX
+
+# SoC Drivers
+
+config SUN8I_H3_CCU
+ bool "Support for the Allwinner H3 CCU"
+ select SUNXI_CCU_DIV
+ select SUNXI_CCU_NK
+ select SUNXI_CCU_NKM
+ select SUNXI_CCU_NKMP
+ select SUNXI_CCU_NM
+ select SUNXI_CCU_MP
+ select SUNXI_CCU_PHASE
+ default MACH_SUN8I
+
+endif
diff --git a/drivers/clk/sunxi-ng/Makefile b/drivers/clk/sunxi-ng/Makefile
new file mode 100644
index 000000000..633ce642f
--- /dev/null
+++ b/drivers/clk/sunxi-ng/Makefile
@@ -0,0 +1,20 @@
+# Common objects
+obj-$(CONFIG_SUNXI_CCU) += ccu_common.o
+obj-$(CONFIG_SUNXI_CCU) += ccu_reset.o
+
+# Base clock types
+obj-$(CONFIG_SUNXI_CCU_DIV) += ccu_div.o
+obj-$(CONFIG_SUNXI_CCU_FRAC) += ccu_frac.o
+obj-$(CONFIG_SUNXI_CCU_GATE) += ccu_gate.o
+obj-$(CONFIG_SUNXI_CCU_MUX) += ccu_mux.o
+obj-$(CONFIG_SUNXI_CCU_PHASE) += ccu_phase.o
+
+# Multi-factor clocks
+obj-$(CONFIG_SUNXI_CCU_NK) += ccu_nk.o
+obj-$(CONFIG_SUNXI_CCU_NKM) += ccu_nkm.o
+obj-$(CONFIG_SUNXI_CCU_NKMP) += ccu_nkmp.o
+obj-$(CONFIG_SUNXI_CCU_NM) += ccu_nm.o
+obj-$(CONFIG_SUNXI_CCU_MP) += ccu_mp.o
+
+# SoC support
+obj-$(CONFIG_SUN8I_H3_CCU) += ccu-sun8i-h3.o
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
new file mode 100644
index 000000000..267f99523
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
@@ -0,0 +1,826 @@
+/*
+ * Copyright (c) 2016 Maxime Ripard. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of_address.h>
+
+#include "ccu_common.h"
+#include "ccu_reset.h"
+
+#include "ccu_div.h"
+#include "ccu_gate.h"
+#include "ccu_mp.h"
+#include "ccu_mult.h"
+#include "ccu_nk.h"
+#include "ccu_nkm.h"
+#include "ccu_nkmp.h"
+#include "ccu_nm.h"
+#include "ccu_phase.h"
+
+#include "ccu-sun8i-h3.h"
+
+static SUNXI_CCU_NKMP_WITH_GATE_LOCK(pll_cpux_clk, "pll-cpux",
+ "osc24M", 0x000,
+ 8, 5, /* N */
+ 4, 2, /* K */
+ 0, 2, /* M */
+ 16, 2, /* P */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ 0);
+
+/*
+ * The Audio PLL is supposed to have 4 outputs: 3 fixed factors from
+ * the base (2x, 4x and 8x), and one variable divider (the one true
+ * pll audio).
+ *
+ * We don't have any need for the variable divider for now, so we just
+ * hardcode it to match with the clock names
+ */
+#define SUN8I_H3_PLL_AUDIO_REG 0x008
+
+static SUNXI_CCU_NM_WITH_GATE_LOCK(pll_audio_base_clk, "pll-audio-base",
+ "osc24M", 0x008,
+ 8, 7, /* N */
+ 0, 5, /* M */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ 0);
+
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_video_clk, "pll-video",
+ "osc24M", 0x0010,
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ 0);
+
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_ve_clk, "pll-ve",
+ "osc24M", 0x0018,
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ 0);
+
+static SUNXI_CCU_NKM_WITH_GATE_LOCK(pll_ddr_clk, "pll-ddr",
+ "osc24M", 0x020,
+ 8, 5, /* N */
+ 4, 2, /* K */
+ 0, 2, /* M */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ 0);
+
+static SUNXI_CCU_NK_WITH_GATE_LOCK_POSTDIV(pll_periph0_clk, "pll-periph0",
+ "osc24M", 0x028,
+ 8, 5, /* N */
+ 4, 2, /* K */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ 2, /* post-div */
+ 0);
+
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_gpu_clk, "pll-gpu",
+ "osc24M", 0x0038,
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ 0);
+
+static SUNXI_CCU_NK_WITH_GATE_LOCK_POSTDIV(pll_periph1_clk, "pll-periph1",
+ "osc24M", 0x044,
+ 8, 5, /* N */
+ 4, 2, /* K */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ 2, /* post-div */
+ 0);
+
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_de_clk, "pll-de",
+ "osc24M", 0x0048,
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ 0);
+
+static const char * const cpux_parents[] = { "osc32k", "osc24M",
+ "pll-cpux" , "pll-cpux" };
+static SUNXI_CCU_MUX(cpux_clk, "cpux", cpux_parents,
+ 0x050, 16, 2, CLK_IS_CRITICAL);
+
+static SUNXI_CCU_M(axi_clk, "axi", "cpux", 0x050, 0, 2, 0);
+
+static const char * const ahb1_parents[] = { "osc32k", "osc24M",
+ "axi" , "pll-periph0" };
+static struct ccu_div ahb1_clk = {
+ .div = _SUNXI_CCU_DIV_FLAGS(4, 2, CLK_DIVIDER_POWER_OF_TWO),
+
+ .mux = {
+ .shift = 12,
+ .width = 2,
+
+ .variable_prediv = {
+ .index = 3,
+ .shift = 6,
+ .width = 2,
+ },
+ },
+
+ .common = {
+ .reg = 0x054,
+ .features = CCU_FEATURE_VARIABLE_PREDIV,
+ .hw.init = CLK_HW_INIT_PARENTS("ahb1",
+ ahb1_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static struct clk_div_table apb1_div_table[] = {
+ { .val = 0, .div = 2 },
+ { .val = 1, .div = 2 },
+ { .val = 2, .div = 4 },
+ { .val = 3, .div = 8 },
+ { /* Sentinel */ },
+};
+static SUNXI_CCU_DIV_TABLE(apb1_clk, "apb1", "ahb1",
+ 0x054, 8, 2, apb1_div_table, 0);
+
+static const char * const apb2_parents[] = { "osc32k", "osc24M",
+ "pll-periph0" , "pll-periph0" };
+static SUNXI_CCU_MP_WITH_MUX(apb2_clk, "apb2", apb2_parents, 0x058,
+ 0, 5, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ 0);
+
+static const char * const ahb2_parents[] = { "ahb1" , "pll-periph0" };
+static struct ccu_mux ahb2_clk = {
+ .mux = {
+ .shift = 0,
+ .width = 1,
+
+ .fixed_prediv = {
+ .index = 1,
+ .div = 2,
+ },
+ },
+
+ .common = {
+ .reg = 0x05c,
+ .features = CCU_FEATURE_FIXED_PREDIV,
+ .hw.init = CLK_HW_INIT_PARENTS("ahb2",
+ ahb2_parents,
+ &ccu_mux_ops,
+ 0),
+ },
+};
+
+static SUNXI_CCU_GATE(bus_ce_clk, "bus-ce", "ahb1",
+ 0x060, BIT(5), 0);
+static SUNXI_CCU_GATE(bus_dma_clk, "bus-dma", "ahb1",
+ 0x060, BIT(6), 0);
+static SUNXI_CCU_GATE(bus_mmc0_clk, "bus-mmc0", "ahb1",
+ 0x060, BIT(8), 0);
+static SUNXI_CCU_GATE(bus_mmc1_clk, "bus-mmc1", "ahb1",
+ 0x060, BIT(9), 0);
+static SUNXI_CCU_GATE(bus_mmc2_clk, "bus-mmc2", "ahb1",
+ 0x060, BIT(10), 0);
+static SUNXI_CCU_GATE(bus_nand_clk, "bus-nand", "ahb1",
+ 0x060, BIT(13), 0);
+static SUNXI_CCU_GATE(bus_dram_clk, "bus-dram", "ahb1",
+ 0x060, BIT(14), 0);
+static SUNXI_CCU_GATE(bus_emac_clk, "bus-emac", "ahb2",
+ 0x060, BIT(17), 0);
+static SUNXI_CCU_GATE(bus_ts_clk, "bus-ts", "ahb1",
+ 0x060, BIT(18), 0);
+static SUNXI_CCU_GATE(bus_hstimer_clk, "bus-hstimer", "ahb1",
+ 0x060, BIT(19), 0);
+static SUNXI_CCU_GATE(bus_spi0_clk, "bus-spi0", "ahb1",
+ 0x060, BIT(20), 0);
+static SUNXI_CCU_GATE(bus_spi1_clk, "bus-spi1", "ahb1",
+ 0x060, BIT(21), 0);
+static SUNXI_CCU_GATE(bus_otg_clk, "bus-otg", "ahb1",
+ 0x060, BIT(23), 0);
+static SUNXI_CCU_GATE(bus_ehci0_clk, "bus-ehci0", "ahb1",
+ 0x060, BIT(24), 0);
+static SUNXI_CCU_GATE(bus_ehci1_clk, "bus-ehci1", "ahb2",
+ 0x060, BIT(25), 0);
+static SUNXI_CCU_GATE(bus_ehci2_clk, "bus-ehci2", "ahb2",
+ 0x060, BIT(26), 0);
+static SUNXI_CCU_GATE(bus_ehci3_clk, "bus-ehci3", "ahb2",
+ 0x060, BIT(27), 0);
+static SUNXI_CCU_GATE(bus_ohci0_clk, "bus-ohci0", "ahb1",
+ 0x060, BIT(28), 0);
+static SUNXI_CCU_GATE(bus_ohci1_clk, "bus-ohci1", "ahb2",
+ 0x060, BIT(29), 0);
+static SUNXI_CCU_GATE(bus_ohci2_clk, "bus-ohci2", "ahb2",
+ 0x060, BIT(30), 0);
+static SUNXI_CCU_GATE(bus_ohci3_clk, "bus-ohci3", "ahb2",
+ 0x060, BIT(31), 0);
+
+static SUNXI_CCU_GATE(bus_ve_clk, "bus-ve", "ahb1",
+ 0x064, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_tcon0_clk, "bus-tcon0", "ahb1",
+ 0x064, BIT(3), 0);
+static SUNXI_CCU_GATE(bus_tcon1_clk, "bus-tcon1", "ahb1",
+ 0x064, BIT(4), 0);
+static SUNXI_CCU_GATE(bus_deinterlace_clk, "bus-deinterlace", "ahb1",
+ 0x064, BIT(5), 0);
+static SUNXI_CCU_GATE(bus_csi_clk, "bus-csi", "ahb1",
+ 0x064, BIT(8), 0);
+static SUNXI_CCU_GATE(bus_tve_clk, "bus-tve", "ahb1",
+ 0x064, BIT(9), 0);
+static SUNXI_CCU_GATE(bus_hdmi_clk, "bus-hdmi", "ahb1",
+ 0x064, BIT(11), 0);
+static SUNXI_CCU_GATE(bus_de_clk, "bus-de", "ahb1",
+ 0x064, BIT(12), 0);
+static SUNXI_CCU_GATE(bus_gpu_clk, "bus-gpu", "ahb1",
+ 0x064, BIT(20), 0);
+static SUNXI_CCU_GATE(bus_msgbox_clk, "bus-msgbox", "ahb1",
+ 0x064, BIT(21), 0);
+static SUNXI_CCU_GATE(bus_spinlock_clk, "bus-spinlock", "ahb1",
+ 0x064, BIT(22), 0);
+
+static SUNXI_CCU_GATE(bus_codec_clk, "bus-codec", "apb1",
+ 0x068, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_spdif_clk, "bus-spdif", "apb1",
+ 0x068, BIT(1), 0);
+static SUNXI_CCU_GATE(bus_pio_clk, "bus-pio", "apb1",
+ 0x068, BIT(5), 0);
+static SUNXI_CCU_GATE(bus_ths_clk, "bus-ths", "apb1",
+ 0x068, BIT(8), 0);
+static SUNXI_CCU_GATE(bus_i2s0_clk, "bus-i2s0", "apb1",
+ 0x068, BIT(12), 0);
+static SUNXI_CCU_GATE(bus_i2s1_clk, "bus-i2s1", "apb1",
+ 0x068, BIT(13), 0);
+static SUNXI_CCU_GATE(bus_i2s2_clk, "bus-i2s2", "apb1",
+ 0x068, BIT(14), 0);
+
+static SUNXI_CCU_GATE(bus_i2c0_clk, "bus-i2c0", "apb2",
+ 0x06c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_i2c1_clk, "bus-i2c1", "apb2",
+ 0x06c, BIT(1), 0);
+static SUNXI_CCU_GATE(bus_i2c2_clk, "bus-i2c2", "apb2",
+ 0x06c, BIT(2), 0);
+static SUNXI_CCU_GATE(bus_uart0_clk, "bus-uart0", "apb2",
+ 0x06c, BIT(16), 0);
+static SUNXI_CCU_GATE(bus_uart1_clk, "bus-uart1", "apb2",
+ 0x06c, BIT(17), 0);
+static SUNXI_CCU_GATE(bus_uart2_clk, "bus-uart2", "apb2",
+ 0x06c, BIT(18), 0);
+static SUNXI_CCU_GATE(bus_uart3_clk, "bus-uart3", "apb2",
+ 0x06c, BIT(19), 0);
+static SUNXI_CCU_GATE(bus_scr_clk, "bus-scr", "apb2",
+ 0x06c, BIT(20), 0);
+
+static SUNXI_CCU_GATE(bus_ephy_clk, "bus-ephy", "ahb1",
+ 0x070, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_dbg_clk, "bus-dbg", "ahb1",
+ 0x070, BIT(7), 0);
+
+static struct clk_div_table ths_div_table[] = {
+ { .val = 0, .div = 1 },
+ { .val = 1, .div = 2 },
+ { .val = 2, .div = 4 },
+ { .val = 3, .div = 6 },
+};
+static SUNXI_CCU_DIV_TABLE_WITH_GATE(ths_clk, "ths", "osc24M",
+ 0x074, 0, 2, ths_div_table, BIT(31), 0);
+
+static const char * const mod0_default_parents[] = { "osc24M", "pll-periph0",
+ "pll-periph1" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(nand_clk, "nand", mod0_default_parents, 0x080,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(mmc0_clk, "mmc0", mod0_default_parents, 0x088,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_PHASE(mmc0_sample_clk, "mmc0_sample", "mmc0",
+ 0x088, 20, 3, 0);
+static SUNXI_CCU_PHASE(mmc0_output_clk, "mmc0_output", "mmc0",
+ 0x088, 8, 3, 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(mmc1_clk, "mmc1", mod0_default_parents, 0x08c,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_PHASE(mmc1_sample_clk, "mmc1_sample", "mmc1",
+ 0x08c, 20, 3, 0);
+static SUNXI_CCU_PHASE(mmc1_output_clk, "mmc1_output", "mmc1",
+ 0x08c, 8, 3, 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(mmc2_clk, "mmc2", mod0_default_parents, 0x090,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_PHASE(mmc2_sample_clk, "mmc2_sample", "mmc2",
+ 0x090, 20, 3, 0);
+static SUNXI_CCU_PHASE(mmc2_output_clk, "mmc2_output", "mmc2",
+ 0x090, 8, 3, 0);
+
+static const char * const ts_parents[] = { "osc24M", "pll-periph0", };
+static SUNXI_CCU_MP_WITH_MUX_GATE(ts_clk, "ts", ts_parents, 0x098,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(ce_clk, "ce", mod0_default_parents, 0x09c,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(spi0_clk, "spi0", mod0_default_parents, 0x0a0,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(spi1_clk, "spi1", mod0_default_parents, 0x0a4,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static const char * const i2s_parents[] = { "pll-audio-8x", "pll-audio-4x",
+ "pll-audio-2x", "pll-audio" };
+static SUNXI_CCU_MUX_WITH_GATE(i2s0_clk, "i2s0", i2s_parents,
+ 0x0b0, 16, 2, BIT(31), 0);
+
+static SUNXI_CCU_MUX_WITH_GATE(i2s1_clk, "i2s1", i2s_parents,
+ 0x0b4, 16, 2, BIT(31), 0);
+
+static SUNXI_CCU_MUX_WITH_GATE(i2s2_clk, "i2s2", i2s_parents,
+ 0x0b8, 16, 2, BIT(31), 0);
+
+static SUNXI_CCU_M_WITH_GATE(spdif_clk, "spdif", "pll-audio",
+ 0x0c0, 0, 4, BIT(31), 0);
+
+static SUNXI_CCU_GATE(usb_phy0_clk, "usb-phy0", "osc24M",
+ 0x0cc, BIT(8), 0);
+static SUNXI_CCU_GATE(usb_phy1_clk, "usb-phy1", "osc24M",
+ 0x0cc, BIT(9), 0);
+static SUNXI_CCU_GATE(usb_phy2_clk, "usb-phy2", "osc24M",
+ 0x0cc, BIT(10), 0);
+static SUNXI_CCU_GATE(usb_phy3_clk, "usb-phy3", "osc24M",
+ 0x0cc, BIT(11), 0);
+static SUNXI_CCU_GATE(usb_ohci0_clk, "usb-ohci0", "osc24M",
+ 0x0cc, BIT(16), 0);
+static SUNXI_CCU_GATE(usb_ohci1_clk, "usb-ohci1", "osc24M",
+ 0x0cc, BIT(17), 0);
+static SUNXI_CCU_GATE(usb_ohci2_clk, "usb-ohci2", "osc24M",
+ 0x0cc, BIT(18), 0);
+static SUNXI_CCU_GATE(usb_ohci3_clk, "usb-ohci3", "osc24M",
+ 0x0cc, BIT(19), 0);
+
+static const char * const dram_parents[] = { "pll-ddr", "pll-periph0-2x" };
+static SUNXI_CCU_M_WITH_MUX(dram_clk, "dram", dram_parents,
+ 0x0f4, 0, 4, 20, 2, CLK_IS_CRITICAL);
+
+static SUNXI_CCU_GATE(dram_ve_clk, "dram-ve", "dram",
+ 0x100, BIT(0), 0);
+static SUNXI_CCU_GATE(dram_csi_clk, "dram-csi", "dram",
+ 0x100, BIT(1), 0);
+static SUNXI_CCU_GATE(dram_deinterlace_clk, "dram-deinterlace", "dram",
+ 0x100, BIT(2), 0);
+static SUNXI_CCU_GATE(dram_ts_clk, "dram-ts", "dram",
+ 0x100, BIT(3), 0);
+
+static const char * const de_parents[] = { "pll-periph0-2x", "pll-de" };
+static SUNXI_CCU_M_WITH_MUX_GATE(de_clk, "de", de_parents,
+ 0x104, 0, 4, 24, 3, BIT(31), 0);
+
+static const char * const tcon_parents[] = { "pll-video" };
+static SUNXI_CCU_M_WITH_MUX_GATE(tcon_clk, "tcon", tcon_parents,
+ 0x118, 0, 4, 24, 3, BIT(31), 0);
+
+static const char * const tve_parents[] = { "pll-de", "pll-periph1" };
+static SUNXI_CCU_M_WITH_MUX_GATE(tve_clk, "tve", tve_parents,
+ 0x120, 0, 4, 24, 3, BIT(31), 0);
+
+static const char * const deinterlace_parents[] = { "pll-periph0", "pll-periph1" };
+static SUNXI_CCU_M_WITH_MUX_GATE(deinterlace_clk, "deinterlace", deinterlace_parents,
+ 0x124, 0, 4, 24, 3, BIT(31), 0);
+
+static SUNXI_CCU_GATE(csi_misc_clk, "csi-misc", "osc24M",
+ 0x130, BIT(31), 0);
+
+static const char * const csi_sclk_parents[] = { "pll-periph0", "pll-periph1" };
+static SUNXI_CCU_M_WITH_MUX_GATE(csi_sclk_clk, "csi-sclk", csi_sclk_parents,
+ 0x134, 16, 4, 24, 3, BIT(31), 0);
+
+static const char * const csi_mclk_parents[] = { "osc24M", "pll-video", "pll-periph0" };
+static SUNXI_CCU_M_WITH_MUX_GATE(csi_mclk_clk, "csi-mclk", csi_mclk_parents,
+ 0x134, 0, 5, 8, 3, BIT(15), 0);
+
+static SUNXI_CCU_M_WITH_GATE(ve_clk, "ve", "pll-ve",
+ 0x13c, 16, 3, BIT(31), 0);
+
+static SUNXI_CCU_GATE(ac_dig_clk, "ac-dig", "pll-audio",
+ 0x140, BIT(31), 0);
+static SUNXI_CCU_GATE(avs_clk, "avs", "osc24M",
+ 0x144, BIT(31), 0);
+
+static const char * const hdmi_parents[] = { "pll-video" };
+static SUNXI_CCU_M_WITH_MUX_GATE(hdmi_clk, "hdmi", hdmi_parents,
+ 0x150, 0, 4, 24, 2, BIT(31), 0);
+
+static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M",
+ 0x154, BIT(31), 0);
+
+static const char * const mbus_parents[] = { "osc24M", "pll-periph0-2x", "pll-ddr" };
+static SUNXI_CCU_M_WITH_MUX_GATE(mbus_clk, "mbus", mbus_parents,
+ 0x15c, 0, 3, 24, 2, BIT(31), CLK_IS_CRITICAL);
+
+static SUNXI_CCU_M_WITH_GATE(gpu_clk, "gpu", "pll-gpu",
+ 0x1a0, 0, 3, BIT(31), 0);
+
+static struct ccu_common *sun8i_h3_ccu_clks[] = {
+ &pll_cpux_clk.common,
+ &pll_audio_base_clk.common,
+ &pll_video_clk.common,
+ &pll_ve_clk.common,
+ &pll_ddr_clk.common,
+ &pll_periph0_clk.common,
+ &pll_gpu_clk.common,
+ &pll_periph1_clk.common,
+ &pll_de_clk.common,
+ &cpux_clk.common,
+ &axi_clk.common,
+ &ahb1_clk.common,
+ &apb1_clk.common,
+ &apb2_clk.common,
+ &ahb2_clk.common,
+ &bus_ce_clk.common,
+ &bus_dma_clk.common,
+ &bus_mmc0_clk.common,
+ &bus_mmc1_clk.common,
+ &bus_mmc2_clk.common,
+ &bus_nand_clk.common,
+ &bus_dram_clk.common,
+ &bus_emac_clk.common,
+ &bus_ts_clk.common,
+ &bus_hstimer_clk.common,
+ &bus_spi0_clk.common,
+ &bus_spi1_clk.common,
+ &bus_otg_clk.common,
+ &bus_ehci0_clk.common,
+ &bus_ehci1_clk.common,
+ &bus_ehci2_clk.common,
+ &bus_ehci3_clk.common,
+ &bus_ohci0_clk.common,
+ &bus_ohci1_clk.common,
+ &bus_ohci2_clk.common,
+ &bus_ohci3_clk.common,
+ &bus_ve_clk.common,
+ &bus_tcon0_clk.common,
+ &bus_tcon1_clk.common,
+ &bus_deinterlace_clk.common,
+ &bus_csi_clk.common,
+ &bus_tve_clk.common,
+ &bus_hdmi_clk.common,
+ &bus_de_clk.common,
+ &bus_gpu_clk.common,
+ &bus_msgbox_clk.common,
+ &bus_spinlock_clk.common,
+ &bus_codec_clk.common,
+ &bus_spdif_clk.common,
+ &bus_pio_clk.common,
+ &bus_ths_clk.common,
+ &bus_i2s0_clk.common,
+ &bus_i2s1_clk.common,
+ &bus_i2s2_clk.common,
+ &bus_i2c0_clk.common,
+ &bus_i2c1_clk.common,
+ &bus_i2c2_clk.common,
+ &bus_uart0_clk.common,
+ &bus_uart1_clk.common,
+ &bus_uart2_clk.common,
+ &bus_uart3_clk.common,
+ &bus_scr_clk.common,
+ &bus_ephy_clk.common,
+ &bus_dbg_clk.common,
+ &ths_clk.common,
+ &nand_clk.common,
+ &mmc0_clk.common,
+ &mmc0_sample_clk.common,
+ &mmc0_output_clk.common,
+ &mmc1_clk.common,
+ &mmc1_sample_clk.common,
+ &mmc1_output_clk.common,
+ &mmc2_clk.common,
+ &mmc2_sample_clk.common,
+ &mmc2_output_clk.common,
+ &ts_clk.common,
+ &ce_clk.common,
+ &spi0_clk.common,
+ &spi1_clk.common,
+ &i2s0_clk.common,
+ &i2s1_clk.common,
+ &i2s2_clk.common,
+ &spdif_clk.common,
+ &usb_phy0_clk.common,
+ &usb_phy1_clk.common,
+ &usb_phy2_clk.common,
+ &usb_phy3_clk.common,
+ &usb_ohci0_clk.common,
+ &usb_ohci1_clk.common,
+ &usb_ohci2_clk.common,
+ &usb_ohci3_clk.common,
+ &dram_clk.common,
+ &dram_ve_clk.common,
+ &dram_csi_clk.common,
+ &dram_deinterlace_clk.common,
+ &dram_ts_clk.common,
+ &de_clk.common,
+ &tcon_clk.common,
+ &tve_clk.common,
+ &deinterlace_clk.common,
+ &csi_misc_clk.common,
+ &csi_sclk_clk.common,
+ &csi_mclk_clk.common,
+ &ve_clk.common,
+ &ac_dig_clk.common,
+ &avs_clk.common,
+ &hdmi_clk.common,
+ &hdmi_ddc_clk.common,
+ &mbus_clk.common,
+ &gpu_clk.common,
+};
+
+/* We hardcode the divider to 4 for now */
+static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio",
+ "pll-audio-base", 4, 1, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x",
+ "pll-audio-base", 2, 1, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x",
+ "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR(pll_audio_8x_clk, "pll-audio-8x",
+ "pll-audio-base", 1, 2, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR(pll_periph0_2x_clk, "pll-periph0-2x",
+ "pll-periph0", 1, 2, 0);
+
+static struct clk_hw_onecell_data sun8i_h3_hw_clks = {
+ .hws = {
+ [CLK_PLL_CPUX] = &pll_cpux_clk.common.hw,
+ [CLK_PLL_AUDIO_BASE] = &pll_audio_base_clk.common.hw,
+ [CLK_PLL_AUDIO] = &pll_audio_clk.hw,
+ [CLK_PLL_AUDIO_2X] = &pll_audio_2x_clk.hw,
+ [CLK_PLL_AUDIO_4X] = &pll_audio_4x_clk.hw,
+ [CLK_PLL_AUDIO_8X] = &pll_audio_8x_clk.hw,
+ [CLK_PLL_VIDEO] = &pll_video_clk.common.hw,
+ [CLK_PLL_VE] = &pll_ve_clk.common.hw,
+ [CLK_PLL_DDR] = &pll_ddr_clk.common.hw,
+ [CLK_PLL_PERIPH0] = &pll_periph0_clk.common.hw,
+ [CLK_PLL_PERIPH0_2X] = &pll_periph0_2x_clk.hw,
+ [CLK_PLL_GPU] = &pll_gpu_clk.common.hw,
+ [CLK_PLL_PERIPH1] = &pll_periph1_clk.common.hw,
+ [CLK_PLL_DE] = &pll_de_clk.common.hw,
+ [CLK_CPUX] = &cpux_clk.common.hw,
+ [CLK_AXI] = &axi_clk.common.hw,
+ [CLK_AHB1] = &ahb1_clk.common.hw,
+ [CLK_APB1] = &apb1_clk.common.hw,
+ [CLK_APB2] = &apb2_clk.common.hw,
+ [CLK_AHB2] = &ahb2_clk.common.hw,
+ [CLK_BUS_CE] = &bus_ce_clk.common.hw,
+ [CLK_BUS_DMA] = &bus_dma_clk.common.hw,
+ [CLK_BUS_MMC0] = &bus_mmc0_clk.common.hw,
+ [CLK_BUS_MMC1] = &bus_mmc1_clk.common.hw,
+ [CLK_BUS_MMC2] = &bus_mmc2_clk.common.hw,
+ [CLK_BUS_NAND] = &bus_nand_clk.common.hw,
+ [CLK_BUS_DRAM] = &bus_dram_clk.common.hw,
+ [CLK_BUS_EMAC] = &bus_emac_clk.common.hw,
+ [CLK_BUS_TS] = &bus_ts_clk.common.hw,
+ [CLK_BUS_HSTIMER] = &bus_hstimer_clk.common.hw,
+ [CLK_BUS_SPI0] = &bus_spi0_clk.common.hw,
+ [CLK_BUS_SPI1] = &bus_spi1_clk.common.hw,
+ [CLK_BUS_OTG] = &bus_otg_clk.common.hw,
+ [CLK_BUS_EHCI0] = &bus_ehci0_clk.common.hw,
+ [CLK_BUS_EHCI1] = &bus_ehci1_clk.common.hw,
+ [CLK_BUS_EHCI2] = &bus_ehci2_clk.common.hw,
+ [CLK_BUS_EHCI3] = &bus_ehci3_clk.common.hw,
+ [CLK_BUS_OHCI0] = &bus_ohci0_clk.common.hw,
+ [CLK_BUS_OHCI1] = &bus_ohci1_clk.common.hw,
+ [CLK_BUS_OHCI2] = &bus_ohci2_clk.common.hw,
+ [CLK_BUS_OHCI3] = &bus_ohci3_clk.common.hw,
+ [CLK_BUS_VE] = &bus_ve_clk.common.hw,
+ [CLK_BUS_TCON0] = &bus_tcon0_clk.common.hw,
+ [CLK_BUS_TCON1] = &bus_tcon1_clk.common.hw,
+ [CLK_BUS_DEINTERLACE] = &bus_deinterlace_clk.common.hw,
+ [CLK_BUS_CSI] = &bus_csi_clk.common.hw,
+ [CLK_BUS_TVE] = &bus_tve_clk.common.hw,
+ [CLK_BUS_HDMI] = &bus_hdmi_clk.common.hw,
+ [CLK_BUS_DE] = &bus_de_clk.common.hw,
+ [CLK_BUS_GPU] = &bus_gpu_clk.common.hw,
+ [CLK_BUS_MSGBOX] = &bus_msgbox_clk.common.hw,
+ [CLK_BUS_SPINLOCK] = &bus_spinlock_clk.common.hw,
+ [CLK_BUS_CODEC] = &bus_codec_clk.common.hw,
+ [CLK_BUS_SPDIF] = &bus_spdif_clk.common.hw,
+ [CLK_BUS_PIO] = &bus_pio_clk.common.hw,
+ [CLK_BUS_THS] = &bus_ths_clk.common.hw,
+ [CLK_BUS_I2S0] = &bus_i2s0_clk.common.hw,
+ [CLK_BUS_I2S1] = &bus_i2s1_clk.common.hw,
+ [CLK_BUS_I2S2] = &bus_i2s2_clk.common.hw,
+ [CLK_BUS_I2C0] = &bus_i2c0_clk.common.hw,
+ [CLK_BUS_I2C1] = &bus_i2c1_clk.common.hw,
+ [CLK_BUS_I2C2] = &bus_i2c2_clk.common.hw,
+ [CLK_BUS_UART0] = &bus_uart0_clk.common.hw,
+ [CLK_BUS_UART1] = &bus_uart1_clk.common.hw,
+ [CLK_BUS_UART2] = &bus_uart2_clk.common.hw,
+ [CLK_BUS_UART3] = &bus_uart3_clk.common.hw,
+ [CLK_BUS_SCR] = &bus_scr_clk.common.hw,
+ [CLK_BUS_EPHY] = &bus_ephy_clk.common.hw,
+ [CLK_BUS_DBG] = &bus_dbg_clk.common.hw,
+ [CLK_THS] = &ths_clk.common.hw,
+ [CLK_NAND] = &nand_clk.common.hw,
+ [CLK_MMC0] = &mmc0_clk.common.hw,
+ [CLK_MMC0_SAMPLE] = &mmc0_sample_clk.common.hw,
+ [CLK_MMC0_OUTPUT] = &mmc0_output_clk.common.hw,
+ [CLK_MMC1] = &mmc1_clk.common.hw,
+ [CLK_MMC1_SAMPLE] = &mmc1_sample_clk.common.hw,
+ [CLK_MMC1_OUTPUT] = &mmc1_output_clk.common.hw,
+ [CLK_MMC2] = &mmc2_clk.common.hw,
+ [CLK_MMC2_SAMPLE] = &mmc2_sample_clk.common.hw,
+ [CLK_MMC2_OUTPUT] = &mmc2_output_clk.common.hw,
+ [CLK_TS] = &ts_clk.common.hw,
+ [CLK_CE] = &ce_clk.common.hw,
+ [CLK_SPI0] = &spi0_clk.common.hw,
+ [CLK_SPI1] = &spi1_clk.common.hw,
+ [CLK_I2S0] = &i2s0_clk.common.hw,
+ [CLK_I2S1] = &i2s1_clk.common.hw,
+ [CLK_I2S2] = &i2s2_clk.common.hw,
+ [CLK_SPDIF] = &spdif_clk.common.hw,
+ [CLK_USB_PHY0] = &usb_phy0_clk.common.hw,
+ [CLK_USB_PHY1] = &usb_phy1_clk.common.hw,
+ [CLK_USB_PHY2] = &usb_phy2_clk.common.hw,
+ [CLK_USB_PHY3] = &usb_phy3_clk.common.hw,
+ [CLK_USB_OHCI0] = &usb_ohci0_clk.common.hw,
+ [CLK_USB_OHCI1] = &usb_ohci1_clk.common.hw,
+ [CLK_USB_OHCI2] = &usb_ohci2_clk.common.hw,
+ [CLK_USB_OHCI3] = &usb_ohci3_clk.common.hw,
+ [CLK_DRAM] = &dram_clk.common.hw,
+ [CLK_DRAM_VE] = &dram_ve_clk.common.hw,
+ [CLK_DRAM_CSI] = &dram_csi_clk.common.hw,
+ [CLK_DRAM_DEINTERLACE] = &dram_deinterlace_clk.common.hw,
+ [CLK_DRAM_TS] = &dram_ts_clk.common.hw,
+ [CLK_DE] = &de_clk.common.hw,
+ [CLK_TCON0] = &tcon_clk.common.hw,
+ [CLK_TVE] = &tve_clk.common.hw,
+ [CLK_DEINTERLACE] = &deinterlace_clk.common.hw,
+ [CLK_CSI_MISC] = &csi_misc_clk.common.hw,
+ [CLK_CSI_SCLK] = &csi_sclk_clk.common.hw,
+ [CLK_CSI_MCLK] = &csi_mclk_clk.common.hw,
+ [CLK_VE] = &ve_clk.common.hw,
+ [CLK_AC_DIG] = &ac_dig_clk.common.hw,
+ [CLK_AVS] = &avs_clk.common.hw,
+ [CLK_HDMI] = &hdmi_clk.common.hw,
+ [CLK_HDMI_DDC] = &hdmi_ddc_clk.common.hw,
+ [CLK_MBUS] = &mbus_clk.common.hw,
+ [CLK_GPU] = &gpu_clk.common.hw,
+ },
+ .num = CLK_NUMBER,
+};
+
+static struct ccu_reset_map sun8i_h3_ccu_resets[] = {
+ [RST_USB_PHY0] = { 0x0cc, BIT(0) },
+ [RST_USB_PHY1] = { 0x0cc, BIT(1) },
+ [RST_USB_PHY2] = { 0x0cc, BIT(2) },
+ [RST_USB_PHY3] = { 0x0cc, BIT(3) },
+
+ [RST_MBUS] = { 0x0fc, BIT(31) },
+
+ [RST_BUS_CE] = { 0x2c0, BIT(5) },
+ [RST_BUS_DMA] = { 0x2c0, BIT(6) },
+ [RST_BUS_MMC0] = { 0x2c0, BIT(8) },
+ [RST_BUS_MMC1] = { 0x2c0, BIT(9) },
+ [RST_BUS_MMC2] = { 0x2c0, BIT(10) },
+ [RST_BUS_NAND] = { 0x2c0, BIT(13) },
+ [RST_BUS_DRAM] = { 0x2c0, BIT(14) },
+ [RST_BUS_EMAC] = { 0x2c0, BIT(17) },
+ [RST_BUS_TS] = { 0x2c0, BIT(18) },
+ [RST_BUS_HSTIMER] = { 0x2c0, BIT(19) },
+ [RST_BUS_SPI0] = { 0x2c0, BIT(20) },
+ [RST_BUS_SPI1] = { 0x2c0, BIT(21) },
+ [RST_BUS_OTG] = { 0x2c0, BIT(23) },
+ [RST_BUS_EHCI0] = { 0x2c0, BIT(24) },
+ [RST_BUS_EHCI1] = { 0x2c0, BIT(25) },
+ [RST_BUS_EHCI2] = { 0x2c0, BIT(26) },
+ [RST_BUS_EHCI3] = { 0x2c0, BIT(27) },
+ [RST_BUS_OHCI0] = { 0x2c0, BIT(28) },
+ [RST_BUS_OHCI1] = { 0x2c0, BIT(29) },
+ [RST_BUS_OHCI2] = { 0x2c0, BIT(30) },
+ [RST_BUS_OHCI3] = { 0x2c0, BIT(31) },
+
+ [RST_BUS_VE] = { 0x2c4, BIT(0) },
+ [RST_BUS_TCON0] = { 0x2c4, BIT(3) },
+ [RST_BUS_TCON1] = { 0x2c4, BIT(4) },
+ [RST_BUS_DEINTERLACE] = { 0x2c4, BIT(5) },
+ [RST_BUS_CSI] = { 0x2c4, BIT(8) },
+ [RST_BUS_TVE] = { 0x2c4, BIT(9) },
+ [RST_BUS_HDMI0] = { 0x2c4, BIT(10) },
+ [RST_BUS_HDMI1] = { 0x2c4, BIT(11) },
+ [RST_BUS_DE] = { 0x2c4, BIT(12) },
+ [RST_BUS_GPU] = { 0x2c4, BIT(20) },
+ [RST_BUS_MSGBOX] = { 0x2c4, BIT(21) },
+ [RST_BUS_SPINLOCK] = { 0x2c4, BIT(22) },
+ [RST_BUS_DBG] = { 0x2c4, BIT(31) },
+
+ [RST_BUS_EPHY] = { 0x2c8, BIT(2) },
+
+ [RST_BUS_CODEC] = { 0x2d0, BIT(0) },
+ [RST_BUS_SPDIF] = { 0x2d0, BIT(1) },
+ [RST_BUS_THS] = { 0x2d0, BIT(8) },
+ [RST_BUS_I2S0] = { 0x2d0, BIT(12) },
+ [RST_BUS_I2S1] = { 0x2d0, BIT(13) },
+ [RST_BUS_I2S2] = { 0x2d0, BIT(14) },
+
+ [RST_BUS_I2C0] = { 0x2d8, BIT(0) },
+ [RST_BUS_I2C1] = { 0x2d8, BIT(1) },
+ [RST_BUS_I2C2] = { 0x2d8, BIT(2) },
+ [RST_BUS_UART0] = { 0x2d8, BIT(16) },
+ [RST_BUS_UART1] = { 0x2d8, BIT(17) },
+ [RST_BUS_UART2] = { 0x2d8, BIT(18) },
+ [RST_BUS_UART3] = { 0x2d8, BIT(19) },
+ [RST_BUS_SCR] = { 0x2d8, BIT(20) },
+};
+
+static const struct sunxi_ccu_desc sun8i_h3_ccu_desc = {
+ .ccu_clks = sun8i_h3_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_h3_ccu_clks),
+
+ .hw_clks = &sun8i_h3_hw_clks,
+
+ .resets = sun8i_h3_ccu_resets,
+ .num_resets = ARRAY_SIZE(sun8i_h3_ccu_resets),
+};
+
+static void __init sun8i_h3_ccu_setup(struct device_node *node)
+{
+ void __iomem *reg;
+ u32 val;
+
+ reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+ if (IS_ERR(reg)) {
+ pr_err("%s: Could not map the clock registers\n",
+ of_node_full_name(node));
+ return;
+ }
+
+ /* Force the PLL-Audio-1x divider to 4 */
+ val = readl(reg + SUN8I_H3_PLL_AUDIO_REG);
+ val &= ~GENMASK(19, 16);
+ writel(val | (3 << 16), reg + SUN8I_H3_PLL_AUDIO_REG);
+
+ sunxi_ccu_probe(node, reg, &sun8i_h3_ccu_desc);
+}
+CLK_OF_DECLARE(sun8i_h3_ccu, "allwinner,sun8i-h3-ccu",
+ sun8i_h3_ccu_setup);
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.h b/drivers/clk/sunxi-ng/ccu-sun8i-h3.h
new file mode 100644
index 000000000..78be712c7
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2016 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CCU_SUN8I_H3_H_
+#define _CCU_SUN8I_H3_H_
+
+#include <dt-bindings/clock/sun8i-h3-ccu.h>
+#include <dt-bindings/reset/sun8i-h3-ccu.h>
+
+#define CLK_PLL_CPUX 0
+#define CLK_PLL_AUDIO_BASE 1
+#define CLK_PLL_AUDIO 2
+#define CLK_PLL_AUDIO_2X 3
+#define CLK_PLL_AUDIO_4X 4
+#define CLK_PLL_AUDIO_8X 5
+#define CLK_PLL_VIDEO 6
+#define CLK_PLL_VE 7
+#define CLK_PLL_DDR 8
+#define CLK_PLL_PERIPH0 9
+#define CLK_PLL_PERIPH0_2X 10
+#define CLK_PLL_GPU 11
+#define CLK_PLL_PERIPH1 12
+#define CLK_PLL_DE 13
+
+/* The CPUX clock is exported */
+
+#define CLK_AXI 15
+#define CLK_AHB1 16
+#define CLK_APB1 17
+#define CLK_APB2 18
+#define CLK_AHB2 19
+
+/* All the bus gates are exported */
+
+/* The first bunch of module clocks are exported */
+
+#define CLK_DRAM 96
+
+/* All the DRAM gates are exported */
+
+/* Some more module clocks are exported */
+
+#define CLK_MBUS 113
+
+/* And the GPU module clock is exported */
+
+#define CLK_NUMBER (CLK_GPU + 1)
+
+#endif /* _CCU_SUN8I_H3_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu_common.c b/drivers/clk/sunxi-ng/ccu_common.c
new file mode 100644
index 000000000..51d4bac97
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu_common.c
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2016 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/iopoll.h>
+#include <linux/slab.h>
+
+#include "ccu_common.h"
+#include "ccu_reset.h"
+
+static DEFINE_SPINLOCK(ccu_lock);
+
+void ccu_helper_wait_for_lock(struct ccu_common *common, u32 lock)
+{
+ u32 reg;
+
+ if (!lock)
+ return;
+
+ WARN_ON(readl_relaxed_poll_timeout(common->base + common->reg, reg,
+ reg & lock, 100, 70000));
+}
+
+int sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
+ const struct sunxi_ccu_desc *desc)
+{
+ struct ccu_reset *reset;
+ int i, ret;
+
+ for (i = 0; i < desc->num_ccu_clks; i++) {
+ struct ccu_common *cclk = desc->ccu_clks[i];
+
+ if (!cclk)
+ continue;
+
+ cclk->base = reg;
+ cclk->lock = &ccu_lock;
+ }
+
+ for (i = 0; i < desc->hw_clks->num ; i++) {
+ struct clk_hw *hw = desc->hw_clks->hws[i];
+
+ if (!hw)
+ continue;
+
+ ret = clk_hw_register(NULL, hw);
+ if (ret) {
+ pr_err("Couldn't register clock %s\n",
+ clk_hw_get_name(hw));
+ goto err_clk_unreg;
+ }
+ }
+
+ ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
+ desc->hw_clks);
+ if (ret)
+ goto err_clk_unreg;
+
+ reset = kzalloc(sizeof(*reset), GFP_KERNEL);
+ reset->rcdev.of_node = node;
+ reset->rcdev.ops = &ccu_reset_ops;
+ reset->rcdev.owner = THIS_MODULE;
+ reset->rcdev.nr_resets = desc->num_resets;
+ reset->base = reg;
+ reset->lock = &ccu_lock;
+ reset->reset_map = desc->resets;
+
+ ret = reset_controller_register(&reset->rcdev);
+ if (ret)
+ goto err_of_clk_unreg;
+
+ return 0;
+
+err_of_clk_unreg:
+err_clk_unreg:
+ return ret;
+}
diff --git a/drivers/clk/sunxi-ng/ccu_common.h b/drivers/clk/sunxi-ng/ccu_common.h
new file mode 100644
index 000000000..b3d9abfbd
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu_common.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2016 Maxime Ripard. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _COMMON_H_
+#define _COMMON_H_
+
+#include <linux/compiler.h>
+#include <linux/clk-provider.h>
+
+#define CCU_FEATURE_FRACTIONAL BIT(0)
+#define CCU_FEATURE_VARIABLE_PREDIV BIT(1)
+#define CCU_FEATURE_FIXED_PREDIV BIT(2)
+#define CCU_FEATURE_FIXED_POSTDIV BIT(3)
+
+struct device_node;
+
+#define CLK_HW_INIT(_name, _parent, _ops, _flags) \
+ &(struct clk_init_data) { \
+ .flags = _flags, \
+ .name = _name, \
+ .parent_names = (const char *[]) { _parent }, \
+ .num_parents = 1, \
+ .ops = _ops, \
+ }
+
+#define CLK_HW_INIT_PARENTS(_name, _parents, _ops, _flags) \
+ &(struct clk_init_data) { \
+ .flags = _flags, \
+ .name = _name, \
+ .parent_names = _parents, \
+ .num_parents = ARRAY_SIZE(_parents), \
+ .ops = _ops, \
+ }
+
+#define CLK_FIXED_FACTOR(_struct, _name, _parent, \
+ _div, _mult, _flags) \
+ struct clk_fixed_factor _struct = { \
+ .div = _div, \
+ .mult = _mult, \
+ .hw.init = CLK_HW_INIT(_name, \
+ _parent, \
+ &clk_fixed_factor_ops, \
+ _flags), \
+ }
+
+struct ccu_common {
+ void __iomem *base;
+ u16 reg;
+
+ unsigned long features;
+ spinlock_t *lock;
+ struct clk_hw hw;
+};
+
+static inline struct ccu_common *hw_to_ccu_common(struct clk_hw *hw)
+{
+ return container_of(hw, struct ccu_common, hw);
+}
+
+struct sunxi_ccu_desc {
+ struct ccu_common **ccu_clks;
+ unsigned long num_ccu_clks;
+
+ struct clk_hw_onecell_data *hw_clks;
+
+ struct ccu_reset_map *resets;
+ unsigned long num_resets;
+};
+
+void ccu_helper_wait_for_lock(struct ccu_common *common, u32 lock);
+
+int sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
+ const struct sunxi_ccu_desc *desc);
+
+#endif /* _COMMON_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu_div.c b/drivers/clk/sunxi-ng/ccu_div.c
new file mode 100644
index 000000000..8659b4cb6
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu_div.c
@@ -0,0 +1,136 @@
+/*
+ * Copyright (C) 2016 Maxime Ripard
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <linux/clk-provider.h>
+
+#include "ccu_gate.h"
+#include "ccu_div.h"
+
+static unsigned long ccu_div_round_rate(struct ccu_mux_internal *mux,
+ unsigned long parent_rate,
+ unsigned long rate,
+ void *data)
+{
+ struct ccu_div *cd = data;
+ unsigned long val;
+
+ /*
+ * We can't use divider_round_rate that assumes that there's
+ * several parents, while we might be called to evaluate
+ * several different parents.
+ */
+ val = divider_get_val(rate, parent_rate, cd->div.table, cd->div.width,
+ cd->div.flags);
+
+ return divider_recalc_rate(&cd->common.hw, parent_rate, val,
+ cd->div.table, cd->div.flags);
+}
+
+static void ccu_div_disable(struct clk_hw *hw)
+{
+ struct ccu_div *cd = hw_to_ccu_div(hw);
+
+ return ccu_gate_helper_disable(&cd->common, cd->enable);
+}
+
+static int ccu_div_enable(struct clk_hw *hw)
+{
+ struct ccu_div *cd = hw_to_ccu_div(hw);
+
+ return ccu_gate_helper_enable(&cd->common, cd->enable);
+}
+
+static int ccu_div_is_enabled(struct clk_hw *hw)
+{
+ struct ccu_div *cd = hw_to_ccu_div(hw);
+
+ return ccu_gate_helper_is_enabled(&cd->common, cd->enable);
+}
+
+static unsigned long ccu_div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ccu_div *cd = hw_to_ccu_div(hw);
+ unsigned long val;
+ u32 reg;
+
+ reg = readl(cd->common.base + cd->common.reg);
+ val = reg >> cd->div.shift;
+ val &= (1 << cd->div.width) - 1;
+
+ ccu_mux_helper_adjust_parent_for_prediv(&cd->common, &cd->mux, -1,
+ &parent_rate);
+
+ return divider_recalc_rate(hw, parent_rate, val, cd->div.table,
+ cd->div.flags);
+}
+
+static int ccu_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct ccu_div *cd = hw_to_ccu_div(hw);
+
+ return ccu_mux_helper_determine_rate(&cd->common, &cd->mux,
+ req, ccu_div_round_rate, cd);
+}
+
+static int ccu_div_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct ccu_div *cd = hw_to_ccu_div(hw);
+ unsigned long flags;
+ unsigned long val;
+ u32 reg;
+
+ ccu_mux_helper_adjust_parent_for_prediv(&cd->common, &cd->mux, -1,
+ &parent_rate);
+
+ val = divider_get_val(rate, parent_rate, cd->div.table, cd->div.width,
+ cd->div.flags);
+
+ spin_lock_irqsave(cd->common.lock, flags);
+
+ reg = readl(cd->common.base + cd->common.reg);
+ reg &= ~GENMASK(cd->div.width + cd->div.shift - 1, cd->div.shift);
+
+ writel(reg | (val << cd->div.shift),
+ cd->common.base + cd->common.reg);
+
+ spin_unlock_irqrestore(cd->common.lock, flags);
+
+ return 0;
+}
+
+static u8 ccu_div_get_parent(struct clk_hw *hw)
+{
+ struct ccu_div *cd = hw_to_ccu_div(hw);
+
+ return ccu_mux_helper_get_parent(&cd->common, &cd->mux);
+}
+
+static int ccu_div_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct ccu_div *cd = hw_to_ccu_div(hw);
+
+ return ccu_mux_helper_set_parent(&cd->common, &cd->mux, index);
+}
+
+const struct clk_ops ccu_div_ops = {
+ .disable = ccu_div_disable,
+ .enable = ccu_div_enable,
+ .is_enabled = ccu_div_is_enabled,
+
+ .get_parent = ccu_div_get_parent,
+ .set_parent = ccu_div_set_parent,
+
+ .determine_rate = ccu_div_determine_rate,
+ .recalc_rate = ccu_div_recalc_rate,
+ .set_rate = ccu_div_set_rate,
+};
diff --git a/drivers/clk/sunxi-ng/ccu_div.h b/drivers/clk/sunxi-ng/ccu_div.h
new file mode 100644
index 000000000..653ade576
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu_div.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2016 Maxime Ripard. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CCU_DIV_H_
+#define _CCU_DIV_H_
+
+#include <linux/clk-provider.h>
+
+#include "ccu_common.h"
+#include "ccu_mux.h"
+
+struct _ccu_div {
+ u8 shift;
+ u8 width;
+
+ u32 flags;
+
+ struct clk_div_table *table;
+};
+
+#define _SUNXI_CCU_DIV_TABLE_FLAGS(_shift, _width, _table, _flags) \
+ { \
+ .shift = _shift, \
+ .width = _width, \
+ .flags = _flags, \
+ .table = _table, \
+ }
+
+#define _SUNXI_CCU_DIV_FLAGS(_shift, _width, _flags) \
+ _SUNXI_CCU_DIV_TABLE_FLAGS(_shift, _width, NULL, _flags)
+
+#define _SUNXI_CCU_DIV_TABLE(_shift, _width, _table) \
+ _SUNXI_CCU_DIV_TABLE_FLAGS(_shift, _width, _table, 0)
+
+#define _SUNXI_CCU_DIV(_shift, _width) \
+ _SUNXI_CCU_DIV_TABLE_FLAGS(_shift, _width, NULL, 0)
+
+struct ccu_div {
+ u32 enable;
+
+ struct _ccu_div div;
+ struct ccu_mux_internal mux;
+ struct ccu_common common;
+};
+
+#define SUNXI_CCU_DIV_TABLE_WITH_GATE(_struct, _name, _parent, _reg, \
+ _shift, _width, \
+ _table, _gate, _flags) \
+ struct ccu_div _struct = { \
+ .div = _SUNXI_CCU_DIV_TABLE(_shift, _width, \
+ _table), \
+ .enable = _gate, \
+ .common = { \
+ .reg = _reg, \
+ .hw.init = CLK_HW_INIT(_name, \
+ _parent, \
+ &ccu_div_ops, \
+ _flags), \
+ } \
+ }
+
+
+#define SUNXI_CCU_DIV_TABLE(_struct, _name, _parent, _reg, \
+ _shift, _width, \
+ _table, _flags) \
+ SUNXI_CCU_DIV_TABLE_WITH_GATE(_struct, _name, _parent, _reg, \
+ _shift, _width, _table, 0, \
+ _flags)
+
+#define SUNXI_CCU_M_WITH_MUX_GATE(_struct, _name, _parents, _reg, \
+ _mshift, _mwidth, _muxshift, _muxwidth, \
+ _gate, _flags) \
+ struct ccu_div _struct = { \
+ .enable = _gate, \
+ .div = _SUNXI_CCU_DIV(_mshift, _mwidth), \
+ .mux = SUNXI_CLK_MUX(_muxshift, _muxwidth), \
+ .common = { \
+ .reg = _reg, \
+ .hw.init = CLK_HW_INIT_PARENTS(_name, \
+ _parents, \
+ &ccu_div_ops, \
+ _flags), \
+ }, \
+ }
+
+#define SUNXI_CCU_M_WITH_MUX(_struct, _name, _parents, _reg, \
+ _mshift, _mwidth, _muxshift, _muxwidth, \
+ _flags) \
+ SUNXI_CCU_M_WITH_MUX_GATE(_struct, _name, _parents, _reg, \
+ _mshift, _mwidth, _muxshift, _muxwidth, \
+ 0, _flags)
+
+
+#define SUNXI_CCU_M_WITH_GATE(_struct, _name, _parent, _reg, \
+ _mshift, _mwidth, _gate, \
+ _flags) \
+ struct ccu_div _struct = { \
+ .enable = _gate, \
+ .div = _SUNXI_CCU_DIV(_mshift, _mwidth), \
+ .common = { \
+ .reg = _reg, \
+ .hw.init = CLK_HW_INIT(_name, \
+ _parent, \
+ &ccu_div_ops, \
+ _flags), \
+ }, \
+ }
+
+#define SUNXI_CCU_M(_struct, _name, _parent, _reg, _mshift, _mwidth, \
+ _flags) \
+ SUNXI_CCU_M_WITH_GATE(_struct, _name, _parent, _reg, \
+ _mshift, _mwidth, 0, _flags)
+
+static inline struct ccu_div *hw_to_ccu_div(struct clk_hw *hw)
+{
+ struct ccu_common *common = hw_to_ccu_common(hw);
+
+ return container_of(common, struct ccu_div, common);
+}
+
+extern const struct clk_ops ccu_div_ops;
+
+#endif /* _CCU_DIV_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu_frac.c b/drivers/clk/sunxi-ng/ccu_frac.c
new file mode 100644
index 000000000..5c4b10cd1
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu_frac.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2016 Maxime Ripard
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/spinlock.h>
+
+#include "ccu_frac.h"
+
+bool ccu_frac_helper_is_enabled(struct ccu_common *common,
+ struct _ccu_frac *cf)
+{
+ if (!(common->features & CCU_FEATURE_FRACTIONAL))
+ return false;
+
+ return !(readl(common->base + common->reg) & cf->enable);
+}
+
+void ccu_frac_helper_enable(struct ccu_common *common,
+ struct _ccu_frac *cf)
+{
+ unsigned long flags;
+ u32 reg;
+
+ if (!(common->features & CCU_FEATURE_FRACTIONAL))
+ return;
+
+ spin_lock_irqsave(common->lock, flags);
+ reg = readl(common->base + common->reg);
+ writel(reg & ~cf->enable, common->base + common->reg);
+ spin_unlock_irqrestore(common->lock, flags);
+}
+
+void ccu_frac_helper_disable(struct ccu_common *common,
+ struct _ccu_frac *cf)
+{
+ unsigned long flags;
+ u32 reg;
+
+ if (!(common->features & CCU_FEATURE_FRACTIONAL))
+ return;
+
+ spin_lock_irqsave(common->lock, flags);
+ reg = readl(common->base + common->reg);
+ writel(reg | cf->enable, common->base + common->reg);
+ spin_unlock_irqrestore(common->lock, flags);
+}
+
+bool ccu_frac_helper_has_rate(struct ccu_common *common,
+ struct _ccu_frac *cf,
+ unsigned long rate)
+{
+ if (!(common->features & CCU_FEATURE_FRACTIONAL))
+ return false;
+
+ return (cf->rates[0] == rate) || (cf->rates[1] == rate);
+}
+
+unsigned long ccu_frac_helper_read_rate(struct ccu_common *common,
+ struct _ccu_frac *cf)
+{
+ u32 reg;
+
+ printk("%s: Read fractional\n", clk_hw_get_name(&common->hw));
+
+ if (!(common->features & CCU_FEATURE_FRACTIONAL))
+ return 0;
+
+ printk("%s: clock is fractional (rates %lu and %lu)\n",
+ clk_hw_get_name(&common->hw), cf->rates[0], cf->rates[1]);
+
+ reg = readl(common->base + common->reg);
+
+ printk("%s: clock reg is 0x%x (select is 0x%x)\n",
+ clk_hw_get_name(&common->hw), reg, cf->select);
+
+ return (reg & cf->select) ? cf->rates[1] : cf->rates[0];
+}
+
+int ccu_frac_helper_set_rate(struct ccu_common *common,
+ struct _ccu_frac *cf,
+ unsigned long rate)
+{
+ unsigned long flags;
+ u32 reg, sel;
+
+ if (!(common->features & CCU_FEATURE_FRACTIONAL))
+ return -EINVAL;
+
+ if (cf->rates[0] == rate)
+ sel = 0;
+ else if (cf->rates[1] == rate)
+ sel = cf->select;
+ else
+ return -EINVAL;
+
+ spin_lock_irqsave(common->lock, flags);
+ reg = readl(common->base + common->reg);
+ reg &= ~cf->select;
+ writel(reg | sel, common->base + common->reg);
+ spin_unlock_irqrestore(common->lock, flags);
+
+ return 0;
+}
diff --git a/drivers/clk/sunxi-ng/ccu_frac.h b/drivers/clk/sunxi-ng/ccu_frac.h
new file mode 100644
index 000000000..e4c670b1c
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu_frac.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2016 Maxime Ripard. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CCU_FRAC_H_
+#define _CCU_FRAC_H_
+
+#include <linux/clk-provider.h>
+
+#include "ccu_common.h"
+
+struct _ccu_frac {
+ u32 enable;
+ u32 select;
+
+ unsigned long rates[2];
+};
+
+#define _SUNXI_CCU_FRAC(_enable, _select, _rate1, _rate2) \
+ { \
+ .enable = _enable, \
+ .select = _select, \
+ .rates = { _rate1, _rate2 }, \
+ }
+
+bool ccu_frac_helper_is_enabled(struct ccu_common *common,
+ struct _ccu_frac *cf);
+void ccu_frac_helper_enable(struct ccu_common *common,
+ struct _ccu_frac *cf);
+void ccu_frac_helper_disable(struct ccu_common *common,
+ struct _ccu_frac *cf);
+
+bool ccu_frac_helper_has_rate(struct ccu_common *common,
+ struct _ccu_frac *cf,
+ unsigned long rate);
+
+unsigned long ccu_frac_helper_read_rate(struct ccu_common *common,
+ struct _ccu_frac *cf);
+
+int ccu_frac_helper_set_rate(struct ccu_common *common,
+ struct _ccu_frac *cf,
+ unsigned long rate);
+
+#endif /* _CCU_FRAC_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu_gate.c b/drivers/clk/sunxi-ng/ccu_gate.c
new file mode 100644
index 000000000..8a81f9d4a
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu_gate.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2016 Maxime Ripard
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <linux/clk-provider.h>
+
+#include "ccu_gate.h"
+
+void ccu_gate_helper_disable(struct ccu_common *common, u32 gate)
+{
+ unsigned long flags;
+ u32 reg;
+
+ if (!gate)
+ return;
+
+ spin_lock_irqsave(common->lock, flags);
+
+ reg = readl(common->base + common->reg);
+ writel(reg & ~gate, common->base + common->reg);
+
+ spin_unlock_irqrestore(common->lock, flags);
+}
+
+static void ccu_gate_disable(struct clk_hw *hw)
+{
+ struct ccu_gate *cg = hw_to_ccu_gate(hw);
+
+ return ccu_gate_helper_disable(&cg->common, cg->enable);
+}
+
+int ccu_gate_helper_enable(struct ccu_common *common, u32 gate)
+{
+ unsigned long flags;
+ u32 reg;
+
+ if (!gate)
+ return 0;
+
+ spin_lock_irqsave(common->lock, flags);
+
+ reg = readl(common->base + common->reg);
+ writel(reg | gate, common->base + common->reg);
+
+ spin_unlock_irqrestore(common->lock, flags);
+
+ return 0;
+}
+
+static int ccu_gate_enable(struct clk_hw *hw)
+{
+ struct ccu_gate *cg = hw_to_ccu_gate(hw);
+
+ return ccu_gate_helper_enable(&cg->common, cg->enable);
+}
+
+int ccu_gate_helper_is_enabled(struct ccu_common *common, u32 gate)
+{
+ if (!gate)
+ return 1;
+
+ return readl(common->base + common->reg) & gate;
+}
+
+static int ccu_gate_is_enabled(struct clk_hw *hw)
+{
+ struct ccu_gate *cg = hw_to_ccu_gate(hw);
+
+ return ccu_gate_helper_is_enabled(&cg->common, cg->enable);
+}
+
+const struct clk_ops ccu_gate_ops = {
+ .disable = ccu_gate_disable,
+ .enable = ccu_gate_enable,
+ .is_enabled = ccu_gate_is_enabled,
+};
diff --git a/drivers/clk/sunxi-ng/ccu_gate.h b/drivers/clk/sunxi-ng/ccu_gate.h
new file mode 100644
index 000000000..4466169bd
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu_gate.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2016 Maxime Ripard. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CCU_GATE_H_
+#define _CCU_GATE_H_
+
+#include <linux/clk-provider.h>
+
+#include "ccu_common.h"
+
+struct ccu_gate {
+ u32 enable;
+
+ struct ccu_common common;
+};
+
+#define SUNXI_CCU_GATE(_struct, _name, _parent, _reg, _gate, _flags) \
+ struct ccu_gate _struct = { \
+ .enable = _gate, \
+ .common = { \
+ .reg = _reg, \
+ .hw.init = CLK_HW_INIT(_name, \
+ _parent, \
+ &ccu_gate_ops, \
+ _flags), \
+ } \
+ }
+
+static inline struct ccu_gate *hw_to_ccu_gate(struct clk_hw *hw)
+{
+ struct ccu_common *common = hw_to_ccu_common(hw);
+
+ return container_of(common, struct ccu_gate, common);
+}
+
+void ccu_gate_helper_disable(struct ccu_common *common, u32 gate);
+int ccu_gate_helper_enable(struct ccu_common *common, u32 gate);
+int ccu_gate_helper_is_enabled(struct ccu_common *common, u32 gate);
+
+extern const struct clk_ops ccu_gate_ops;
+
+#endif /* _CCU_GATE_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu_mp.c b/drivers/clk/sunxi-ng/ccu_mp.c
new file mode 100644
index 000000000..cbf33ef5f
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu_mp.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2016 Maxime Ripard
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <linux/clk-provider.h>
+
+#include "ccu_gate.h"
+#include "ccu_mp.h"
+
+static void ccu_mp_find_best(unsigned long parent, unsigned long rate,
+ unsigned int max_m, unsigned int max_p,
+ unsigned int *m, unsigned int *p)
+{
+ unsigned long best_rate = 0;
+ unsigned int best_m = 0, best_p = 0;
+ unsigned int _m, _p;
+
+ for (_p = 0; _p <= max_p; _p++) {
+ for (_m = 1; _m <= max_m; _m++) {
+ unsigned long tmp_rate = (parent >> _p) / _m;
+
+ if (tmp_rate > rate)
+ continue;
+
+ if ((rate - tmp_rate) < (rate - best_rate)) {
+ best_rate = tmp_rate;
+ best_m = _m;
+ best_p = _p;
+ }
+ }
+ }
+
+ *m = best_m;
+ *p = best_p;
+}
+
+static unsigned long ccu_mp_round_rate(struct ccu_mux_internal *mux,
+ unsigned long parent_rate,
+ unsigned long rate,
+ void *data)
+{
+ struct ccu_mp *cmp = data;
+ unsigned int m, p;
+
+ ccu_mp_find_best(parent_rate, rate,
+ 1 << cmp->m.width, (1 << cmp->p.width) - 1,
+ &m, &p);
+
+ return (parent_rate >> p) / m;
+}
+
+static void ccu_mp_disable(struct clk_hw *hw)
+{
+ struct ccu_mp *cmp = hw_to_ccu_mp(hw);
+
+ return ccu_gate_helper_disable(&cmp->common, cmp->enable);
+}
+
+static int ccu_mp_enable(struct clk_hw *hw)
+{
+ struct ccu_mp *cmp = hw_to_ccu_mp(hw);
+
+ return ccu_gate_helper_enable(&cmp->common, cmp->enable);
+}
+
+static int ccu_mp_is_enabled(struct clk_hw *hw)
+{
+ struct ccu_mp *cmp = hw_to_ccu_mp(hw);
+
+ return ccu_gate_helper_is_enabled(&cmp->common, cmp->enable);
+}
+
+static unsigned long ccu_mp_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ccu_mp *cmp = hw_to_ccu_mp(hw);
+ unsigned int m, p;
+ u32 reg;
+
+ reg = readl(cmp->common.base + cmp->common.reg);
+
+ m = reg >> cmp->m.shift;
+ m &= (1 << cmp->m.width) - 1;
+
+ p = reg >> cmp->p.shift;
+ p &= (1 << cmp->p.width) - 1;
+
+ return (parent_rate >> p) / (m + 1);
+}
+
+static int ccu_mp_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct ccu_mp *cmp = hw_to_ccu_mp(hw);
+
+ return ccu_mux_helper_determine_rate(&cmp->common, &cmp->mux,
+ req, ccu_mp_round_rate, cmp);
+}
+
+static int ccu_mp_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct ccu_mp *cmp = hw_to_ccu_mp(hw);
+ unsigned long flags;
+ unsigned int m, p;
+ u32 reg;
+
+ ccu_mp_find_best(parent_rate, rate,
+ 1 << cmp->m.width, (1 << cmp->p.width) - 1,
+ &m, &p);
+
+
+ spin_lock_irqsave(cmp->common.lock, flags);
+
+ reg = readl(cmp->common.base + cmp->common.reg);
+ reg &= ~GENMASK(cmp->m.width + cmp->m.shift - 1, cmp->m.shift);
+ reg &= ~GENMASK(cmp->p.width + cmp->p.shift - 1, cmp->p.shift);
+
+ writel(reg | (p << cmp->p.shift) | ((m - 1) << cmp->m.shift),
+ cmp->common.base + cmp->common.reg);
+
+ spin_unlock_irqrestore(cmp->common.lock, flags);
+
+ return 0;
+}
+
+static u8 ccu_mp_get_parent(struct clk_hw *hw)
+{
+ struct ccu_mp *cmp = hw_to_ccu_mp(hw);
+
+ return ccu_mux_helper_get_parent(&cmp->common, &cmp->mux);
+}
+
+static int ccu_mp_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct ccu_mp *cmp = hw_to_ccu_mp(hw);
+
+ return ccu_mux_helper_set_parent(&cmp->common, &cmp->mux, index);
+}
+
+const struct clk_ops ccu_mp_ops = {
+ .disable = ccu_mp_disable,
+ .enable = ccu_mp_enable,
+ .is_enabled = ccu_mp_is_enabled,
+
+ .get_parent = ccu_mp_get_parent,
+ .set_parent = ccu_mp_set_parent,
+
+ .determine_rate = ccu_mp_determine_rate,
+ .recalc_rate = ccu_mp_recalc_rate,
+ .set_rate = ccu_mp_set_rate,
+};
diff --git a/drivers/clk/sunxi-ng/ccu_mp.h b/drivers/clk/sunxi-ng/ccu_mp.h
new file mode 100644
index 000000000..3cf12bf95
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu_mp.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2016 Maxime Ripard. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CCU_MP_H_
+#define _CCU_MP_H_
+
+#include <linux/clk-provider.h>
+
+#include "ccu_common.h"
+#include "ccu_div.h"
+#include "ccu_mult.h"
+#include "ccu_mux.h"
+
+/*
+ * struct ccu_mp - Definition of an M-P clock
+ *
+ * Clocks based on the formula parent >> P / M
+ */
+struct ccu_mp {
+ u32 enable;
+
+ struct _ccu_div m;
+ struct _ccu_div p;
+ struct ccu_mux_internal mux;
+ struct ccu_common common;
+};
+
+#define SUNXI_CCU_MP_WITH_MUX_GATE(_struct, _name, _parents, _reg, \
+ _mshift, _mwidth, \
+ _pshift, _pwidth, \
+ _muxshift, _muxwidth, \
+ _gate, _flags) \
+ struct ccu_mp _struct = { \
+ .enable = _gate, \
+ .m = _SUNXI_CCU_DIV(_mshift, _mwidth), \
+ .p = _SUNXI_CCU_DIV(_pshift, _pwidth), \
+ .mux = SUNXI_CLK_MUX(_muxshift, _muxwidth), \
+ .common = { \
+ .reg = _reg, \
+ .hw.init = CLK_HW_INIT_PARENTS(_name, \
+ _parents, \
+ &ccu_mp_ops, \
+ _flags), \
+ } \
+ }
+
+#define SUNXI_CCU_MP_WITH_MUX(_struct, _name, _parents, _reg, \
+ _mshift, _mwidth, \
+ _pshift, _pwidth, \
+ _muxshift, _muxwidth, \
+ _flags) \
+ SUNXI_CCU_MP_WITH_MUX_GATE(_struct, _name, _parents, _reg, \
+ _mshift, _mwidth, \
+ _pshift, _pwidth, \
+ _muxshift, _muxwidth, \
+ 0, _flags)
+
+static inline struct ccu_mp *hw_to_ccu_mp(struct clk_hw *hw)
+{
+ struct ccu_common *common = hw_to_ccu_common(hw);
+
+ return container_of(common, struct ccu_mp, common);
+}
+
+extern const struct clk_ops ccu_mp_ops;
+
+#endif /* _CCU_MP_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu_mult.h b/drivers/clk/sunxi-ng/ccu_mult.h
new file mode 100644
index 000000000..609db6610
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu_mult.h
@@ -0,0 +1,15 @@
+#ifndef _CCU_MULT_H_
+#define _CCU_MULT_H_
+
+struct _ccu_mult {
+ u8 shift;
+ u8 width;
+};
+
+#define _SUNXI_CCU_MULT(_shift, _width) \
+ { \
+ .shift = _shift, \
+ .width = _width, \
+ }
+
+#endif /* _CCU_MULT_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu_mux.c b/drivers/clk/sunxi-ng/ccu_mux.c
new file mode 100644
index 000000000..58fc36e7d
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu_mux.c
@@ -0,0 +1,187 @@
+/*
+ * Copyright (C) 2016 Maxime Ripard
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <linux/clk-provider.h>
+
+#include "ccu_gate.h"
+#include "ccu_mux.h"
+
+void ccu_mux_helper_adjust_parent_for_prediv(struct ccu_common *common,
+ struct ccu_mux_internal *cm,
+ int parent_index,
+ unsigned long *parent_rate)
+{
+ u8 prediv = 1;
+ u32 reg;
+
+ if (!((common->features & CCU_FEATURE_FIXED_PREDIV) ||
+ (common->features & CCU_FEATURE_VARIABLE_PREDIV)))
+ return;
+
+ reg = readl(common->base + common->reg);
+ if (parent_index < 0) {
+ parent_index = reg >> cm->shift;
+ parent_index &= (1 << cm->width) - 1;
+ }
+
+ if (common->features & CCU_FEATURE_FIXED_PREDIV)
+ if (parent_index == cm->fixed_prediv.index)
+ prediv = cm->fixed_prediv.div;
+
+ if (common->features & CCU_FEATURE_VARIABLE_PREDIV)
+ if (parent_index == cm->variable_prediv.index) {
+ u8 div;
+
+ div = reg >> cm->variable_prediv.shift;
+ div &= (1 << cm->variable_prediv.width) - 1;
+ prediv = div + 1;
+ }
+
+ *parent_rate = *parent_rate / prediv;
+}
+
+int ccu_mux_helper_determine_rate(struct ccu_common *common,
+ struct ccu_mux_internal *cm,
+ struct clk_rate_request *req,
+ unsigned long (*round)(struct ccu_mux_internal *,
+ unsigned long,
+ unsigned long,
+ void *),
+ void *data)
+{
+ unsigned long best_parent_rate = 0, best_rate = 0;
+ struct clk_hw *best_parent, *hw = &common->hw;
+ unsigned int i;
+
+ for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
+ unsigned long tmp_rate, parent_rate;
+ struct clk_hw *parent;
+
+ parent = clk_hw_get_parent_by_index(hw, i);
+ if (!parent)
+ continue;
+
+ parent_rate = clk_hw_get_rate(parent);
+ ccu_mux_helper_adjust_parent_for_prediv(common, cm, i,
+ &parent_rate);
+
+ tmp_rate = round(cm, clk_hw_get_rate(parent), req->rate, data);
+ if (tmp_rate == req->rate) {
+ best_parent = parent;
+ best_parent_rate = parent_rate;
+ best_rate = tmp_rate;
+ goto out;
+ }
+
+ if ((req->rate - tmp_rate) < (req->rate - best_rate)) {
+ best_rate = tmp_rate;
+ best_parent_rate = parent_rate;
+ best_parent = parent;
+ }
+ }
+
+ if (best_rate == 0)
+ return -EINVAL;
+
+out:
+ req->best_parent_hw = best_parent;
+ req->best_parent_rate = best_parent_rate;
+ req->rate = best_rate;
+ return 0;
+}
+
+u8 ccu_mux_helper_get_parent(struct ccu_common *common,
+ struct ccu_mux_internal *cm)
+{
+ u32 reg;
+ u8 parent;
+
+ reg = readl(common->base + common->reg);
+ parent = reg >> cm->shift;
+ parent &= (1 << cm->width) - 1;
+
+ return parent;
+}
+
+int ccu_mux_helper_set_parent(struct ccu_common *common,
+ struct ccu_mux_internal *cm,
+ u8 index)
+{
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(common->lock, flags);
+
+ reg = readl(common->base + common->reg);
+ reg &= ~GENMASK(cm->width + cm->shift - 1, cm->shift);
+ writel(reg | (index << cm->shift), common->base + common->reg);
+
+ spin_unlock_irqrestore(common->lock, flags);
+
+ return 0;
+}
+
+static void ccu_mux_disable(struct clk_hw *hw)
+{
+ struct ccu_mux *cm = hw_to_ccu_mux(hw);
+
+ return ccu_gate_helper_disable(&cm->common, cm->enable);
+}
+
+static int ccu_mux_enable(struct clk_hw *hw)
+{
+ struct ccu_mux *cm = hw_to_ccu_mux(hw);
+
+ return ccu_gate_helper_enable(&cm->common, cm->enable);
+}
+
+static int ccu_mux_is_enabled(struct clk_hw *hw)
+{
+ struct ccu_mux *cm = hw_to_ccu_mux(hw);
+
+ return ccu_gate_helper_is_enabled(&cm->common, cm->enable);
+}
+
+static u8 ccu_mux_get_parent(struct clk_hw *hw)
+{
+ struct ccu_mux *cm = hw_to_ccu_mux(hw);
+
+ return ccu_mux_helper_get_parent(&cm->common, &cm->mux);
+}
+
+static int ccu_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct ccu_mux *cm = hw_to_ccu_mux(hw);
+
+ return ccu_mux_helper_set_parent(&cm->common, &cm->mux, index);
+}
+
+static unsigned long ccu_mux_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ccu_mux *cm = hw_to_ccu_mux(hw);
+
+ ccu_mux_helper_adjust_parent_for_prediv(&cm->common, &cm->mux, -1,
+ &parent_rate);
+
+ return parent_rate;
+}
+
+const struct clk_ops ccu_mux_ops = {
+ .disable = ccu_mux_disable,
+ .enable = ccu_mux_enable,
+ .is_enabled = ccu_mux_is_enabled,
+
+ .get_parent = ccu_mux_get_parent,
+ .set_parent = ccu_mux_set_parent,
+
+ .determine_rate = __clk_mux_determine_rate,
+ .recalc_rate = ccu_mux_recalc_rate,
+};
diff --git a/drivers/clk/sunxi-ng/ccu_mux.h b/drivers/clk/sunxi-ng/ccu_mux.h
new file mode 100644
index 000000000..945082631
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu_mux.h
@@ -0,0 +1,91 @@
+#ifndef _CCU_MUX_H_
+#define _CCU_MUX_H_
+
+#include <linux/clk-provider.h>
+
+#include "ccu_common.h"
+
+struct ccu_mux_internal {
+ u8 shift;
+ u8 width;
+
+ struct {
+ u8 index;
+ u8 div;
+ } fixed_prediv;
+
+ struct {
+ u8 index;
+ u8 shift;
+ u8 width;
+ } variable_prediv;
+};
+
+#define SUNXI_CLK_MUX(_shift, _width) \
+ { \
+ .shift = _shift, \
+ .width = _width, \
+ }
+
+struct ccu_mux {
+ u16 reg;
+ u32 enable;
+
+ struct ccu_mux_internal mux;
+ struct ccu_common common;
+};
+
+#define SUNXI_CCU_MUX(_struct, _name, _parents, _reg, _shift, _width, _flags) \
+ struct ccu_mux _struct = { \
+ .mux = SUNXI_CLK_MUX(_shift, _width), \
+ .common = { \
+ .reg = _reg, \
+ .hw.init = CLK_HW_INIT_PARENTS(_name, \
+ _parents, \
+ &ccu_mux_ops, \
+ _flags), \
+ } \
+ }
+
+#define SUNXI_CCU_MUX_WITH_GATE(_struct, _name, _parents, _reg, \
+ _shift, _width, _gate, _flags) \
+ struct ccu_mux _struct = { \
+ .enable = _gate, \
+ .mux = SUNXI_CLK_MUX(_shift, _width), \
+ .common = { \
+ .reg = _reg, \
+ .hw.init = CLK_HW_INIT_PARENTS(_name, \
+ _parents, \
+ &ccu_mux_ops, \
+ _flags), \
+ } \
+ }
+
+static inline struct ccu_mux *hw_to_ccu_mux(struct clk_hw *hw)
+{
+ struct ccu_common *common = hw_to_ccu_common(hw);
+
+ return container_of(common, struct ccu_mux, common);
+}
+
+extern const struct clk_ops ccu_mux_ops;
+
+void ccu_mux_helper_adjust_parent_for_prediv(struct ccu_common *common,
+ struct ccu_mux_internal *cm,
+ int parent_index,
+ unsigned long *parent_rate);
+int ccu_mux_helper_determine_rate(struct ccu_common *common,
+ struct ccu_mux_internal *cm,
+ struct clk_rate_request *req,
+ unsigned long (*round)(struct ccu_mux_internal *,
+ unsigned long,
+ unsigned long,
+ void *),
+ void *data);
+u8 ccu_mux_helper_get_parent(struct ccu_common *common,
+ struct ccu_mux_internal *cm);
+int ccu_mux_helper_set_parent(struct ccu_common *common,
+ struct ccu_mux_internal *cm,
+ u8 index);
+
+#endif /* _CCU_MUX_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu_nk.c b/drivers/clk/sunxi-ng/ccu_nk.c
new file mode 100644
index 000000000..d6fafb397
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu_nk.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright (C) 2016 Maxime Ripard
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/rational.h>
+
+#include "ccu_gate.h"
+#include "ccu_nk.h"
+
+static void ccu_nk_find_best(unsigned long parent, unsigned long rate,
+ unsigned int max_n, unsigned int max_k,
+ unsigned int *n, unsigned int *k)
+{
+ unsigned long best_rate = 0;
+ unsigned int best_k = 0, best_n = 0;
+ unsigned int _k, _n;
+
+ for (_k = 1; _k <= max_k; _k++) {
+ for (_n = 1; _n <= max_n; _n++) {
+ unsigned long tmp_rate = parent * _n * _k;
+
+ if (tmp_rate > rate)
+ continue;
+
+ if ((rate - tmp_rate) < (rate - best_rate)) {
+ best_rate = tmp_rate;
+ best_k = _k;
+ best_n = _n;
+ }
+ }
+ }
+
+ *k = best_k;
+ *n = best_n;
+}
+
+static void ccu_nk_disable(struct clk_hw *hw)
+{
+ struct ccu_nk *nk = hw_to_ccu_nk(hw);
+
+ return ccu_gate_helper_disable(&nk->common, nk->enable);
+}
+
+static int ccu_nk_enable(struct clk_hw *hw)
+{
+ struct ccu_nk *nk = hw_to_ccu_nk(hw);
+
+ return ccu_gate_helper_enable(&nk->common, nk->enable);
+}
+
+static int ccu_nk_is_enabled(struct clk_hw *hw)
+{
+ struct ccu_nk *nk = hw_to_ccu_nk(hw);
+
+ return ccu_gate_helper_is_enabled(&nk->common, nk->enable);
+}
+
+static unsigned long ccu_nk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ccu_nk *nk = hw_to_ccu_nk(hw);
+ unsigned long rate, n, k;
+ u32 reg;
+
+ reg = readl(nk->common.base + nk->common.reg);
+
+ n = reg >> nk->n.shift;
+ n &= (1 << nk->n.width) - 1;
+
+ k = reg >> nk->k.shift;
+ k &= (1 << nk->k.width) - 1;
+
+ rate = parent_rate * (n + 1) * (k + 1);
+
+ if (nk->common.features & CCU_FEATURE_FIXED_POSTDIV)
+ rate /= nk->fixed_post_div;
+
+ return rate;
+}
+
+static long ccu_nk_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct ccu_nk *nk = hw_to_ccu_nk(hw);
+ unsigned int n, k;
+
+ if (nk->common.features & CCU_FEATURE_FIXED_POSTDIV)
+ rate *= nk->fixed_post_div;
+
+ ccu_nk_find_best(*parent_rate, rate,
+ 1 << nk->n.width, 1 << nk->k.width,
+ &n, &k);
+
+ rate = *parent_rate * n * k;
+ if (nk->common.features & CCU_FEATURE_FIXED_POSTDIV)
+ rate = rate / nk->fixed_post_div;
+
+ return rate;
+}
+
+static int ccu_nk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct ccu_nk *nk = hw_to_ccu_nk(hw);
+ unsigned long flags;
+ unsigned int n, k;
+ u32 reg;
+
+ if (nk->common.features & CCU_FEATURE_FIXED_POSTDIV)
+ rate = rate * nk->fixed_post_div;
+
+ ccu_nk_find_best(parent_rate, rate,
+ 1 << nk->n.width, 1 << nk->k.width,
+ &n, &k);
+
+ spin_lock_irqsave(nk->common.lock, flags);
+
+ reg = readl(nk->common.base + nk->common.reg);
+ reg &= ~GENMASK(nk->n.width + nk->n.shift - 1, nk->n.shift);
+ reg &= ~GENMASK(nk->k.width + nk->k.shift - 1, nk->k.shift);
+
+ writel(reg | ((k - 1) << nk->k.shift) | ((n - 1) << nk->n.shift),
+ nk->common.base + nk->common.reg);
+
+ spin_unlock_irqrestore(nk->common.lock, flags);
+
+ ccu_helper_wait_for_lock(&nk->common, nk->lock);
+
+ return 0;
+}
+
+const struct clk_ops ccu_nk_ops = {
+ .disable = ccu_nk_disable,
+ .enable = ccu_nk_enable,
+ .is_enabled = ccu_nk_is_enabled,
+
+ .recalc_rate = ccu_nk_recalc_rate,
+ .round_rate = ccu_nk_round_rate,
+ .set_rate = ccu_nk_set_rate,
+};
diff --git a/drivers/clk/sunxi-ng/ccu_nk.h b/drivers/clk/sunxi-ng/ccu_nk.h
new file mode 100644
index 000000000..4b52da0c2
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu_nk.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2016 Maxime Ripard. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CCU_NK_H_
+#define _CCU_NK_H_
+
+#include <linux/clk-provider.h>
+
+#include "ccu_common.h"
+#include "ccu_div.h"
+#include "ccu_mult.h"
+
+/*
+ * struct ccu_nk - Definition of an N-K clock
+ *
+ * Clocks based on the formula parent * N * K
+ */
+struct ccu_nk {
+ u16 reg;
+ u32 enable;
+ u32 lock;
+
+ struct _ccu_mult n;
+ struct _ccu_mult k;
+
+ unsigned int fixed_post_div;
+
+ struct ccu_common common;
+};
+
+#define SUNXI_CCU_NK_WITH_GATE_LOCK_POSTDIV(_struct, _name, _parent, _reg, \
+ _nshift, _nwidth, \
+ _kshift, _kwidth, \
+ _gate, _lock, _postdiv, \
+ _flags) \
+ struct ccu_nk _struct = { \
+ .enable = _gate, \
+ .lock = _lock, \
+ .k = _SUNXI_CCU_MULT(_kshift, _kwidth), \
+ .n = _SUNXI_CCU_MULT(_nshift, _nwidth), \
+ .fixed_post_div = _postdiv, \
+ .common = { \
+ .reg = _reg, \
+ .features = CCU_FEATURE_FIXED_POSTDIV, \
+ .hw.init = CLK_HW_INIT(_name, \
+ _parent, \
+ &ccu_nk_ops, \
+ _flags), \
+ }, \
+ }
+
+static inline struct ccu_nk *hw_to_ccu_nk(struct clk_hw *hw)
+{
+ struct ccu_common *common = hw_to_ccu_common(hw);
+
+ return container_of(common, struct ccu_nk, common);
+}
+
+extern const struct clk_ops ccu_nk_ops;
+
+#endif /* _CCU_NK_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu_nkm.c b/drivers/clk/sunxi-ng/ccu_nkm.c
new file mode 100644
index 000000000..2071822b1
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu_nkm.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2016 Maxime Ripard
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/rational.h>
+
+#include "ccu_gate.h"
+#include "ccu_nkm.h"
+
+struct _ccu_nkm {
+ unsigned long n, max_n;
+ unsigned long k, max_k;
+ unsigned long m, max_m;
+};
+
+static void ccu_nkm_find_best(unsigned long parent, unsigned long rate,
+ struct _ccu_nkm *nkm)
+{
+ unsigned long best_rate = 0;
+ unsigned long best_n = 0, best_k = 0, best_m = 0;
+ unsigned long _n, _k, _m;
+
+ for (_k = 1; _k <= nkm->max_k; _k++) {
+ unsigned long tmp_rate;
+
+ rational_best_approximation(rate / _k, parent,
+ nkm->max_n, nkm->max_m, &_n, &_m);
+
+ tmp_rate = parent * _n * _k / _m;
+
+ if (tmp_rate > rate)
+ continue;
+
+ if ((rate - tmp_rate) < (rate - best_rate)) {
+ best_rate = tmp_rate;
+ best_n = _n;
+ best_k = _k;
+ best_m = _m;
+ }
+ }
+
+ nkm->n = best_n;
+ nkm->k = best_k;
+ nkm->m = best_m;
+}
+
+static void ccu_nkm_disable(struct clk_hw *hw)
+{
+ struct ccu_nkm *nkm = hw_to_ccu_nkm(hw);
+
+ return ccu_gate_helper_disable(&nkm->common, nkm->enable);
+}
+
+static int ccu_nkm_enable(struct clk_hw *hw)
+{
+ struct ccu_nkm *nkm = hw_to_ccu_nkm(hw);
+
+ return ccu_gate_helper_enable(&nkm->common, nkm->enable);
+}
+
+static int ccu_nkm_is_enabled(struct clk_hw *hw)
+{
+ struct ccu_nkm *nkm = hw_to_ccu_nkm(hw);
+
+ return ccu_gate_helper_is_enabled(&nkm->common, nkm->enable);
+}
+
+static unsigned long ccu_nkm_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ccu_nkm *nkm = hw_to_ccu_nkm(hw);
+ unsigned long n, m, k;
+ u32 reg;
+
+ reg = readl(nkm->common.base + nkm->common.reg);
+
+ n = reg >> nkm->n.shift;
+ n &= (1 << nkm->n.width) - 1;
+
+ k = reg >> nkm->k.shift;
+ k &= (1 << nkm->k.width) - 1;
+
+ m = reg >> nkm->m.shift;
+ m &= (1 << nkm->m.width) - 1;
+
+ return parent_rate * (n + 1) * (k + 1) / (m + 1);
+}
+
+static long ccu_nkm_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct ccu_nkm *nkm = hw_to_ccu_nkm(hw);
+ struct _ccu_nkm _nkm;
+
+ _nkm.max_n = 1 << nkm->n.width;
+ _nkm.max_k = 1 << nkm->k.width;
+ _nkm.max_m = 1 << nkm->m.width;
+
+ ccu_nkm_find_best(*parent_rate, rate, &_nkm);
+
+ return *parent_rate * _nkm.n * _nkm.k / _nkm.m;
+}
+
+static int ccu_nkm_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct ccu_nkm *nkm = hw_to_ccu_nkm(hw);
+ struct _ccu_nkm _nkm;
+ unsigned long flags;
+ u32 reg;
+
+ _nkm.max_n = 1 << nkm->n.width;
+ _nkm.max_k = 1 << nkm->k.width;
+ _nkm.max_m = 1 << nkm->m.width;
+
+ ccu_nkm_find_best(parent_rate, rate, &_nkm);
+
+ spin_lock_irqsave(nkm->common.lock, flags);
+
+ reg = readl(nkm->common.base + nkm->common.reg);
+ reg &= ~GENMASK(nkm->n.width + nkm->n.shift - 1, nkm->n.shift);
+ reg &= ~GENMASK(nkm->k.width + nkm->k.shift - 1, nkm->k.shift);
+ reg &= ~GENMASK(nkm->m.width + nkm->m.shift - 1, nkm->m.shift);
+
+ reg |= (_nkm.n - 1) << nkm->n.shift;
+ reg |= (_nkm.k - 1) << nkm->k.shift;
+ reg |= (_nkm.m - 1) << nkm->m.shift;
+
+ writel(reg, nkm->common.base + nkm->common.reg);
+
+ spin_unlock_irqrestore(nkm->common.lock, flags);
+
+ ccu_helper_wait_for_lock(&nkm->common, nkm->lock);
+
+ return 0;
+}
+
+const struct clk_ops ccu_nkm_ops = {
+ .disable = ccu_nkm_disable,
+ .enable = ccu_nkm_enable,
+ .is_enabled = ccu_nkm_is_enabled,
+
+ .recalc_rate = ccu_nkm_recalc_rate,
+ .round_rate = ccu_nkm_round_rate,
+ .set_rate = ccu_nkm_set_rate,
+};
diff --git a/drivers/clk/sunxi-ng/ccu_nkm.h b/drivers/clk/sunxi-ng/ccu_nkm.h
new file mode 100644
index 000000000..1936ac1c6
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu_nkm.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2016 Maxime Ripard. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CCU_NKM_H_
+#define _CCU_NKM_H_
+
+#include <linux/clk-provider.h>
+
+#include "ccu_common.h"
+#include "ccu_div.h"
+#include "ccu_mult.h"
+
+/*
+ * struct ccu_nkm - Definition of an N-K-M clock
+ *
+ * Clocks based on the formula parent * N * K / M
+ */
+struct ccu_nkm {
+ u32 enable;
+ u32 lock;
+
+ struct _ccu_mult n;
+ struct _ccu_mult k;
+ struct _ccu_div m;
+
+ struct ccu_common common;
+};
+
+#define SUNXI_CCU_NKM_WITH_GATE_LOCK(_struct, _name, _parent, _reg, \
+ _nshift, _nwidth, \
+ _kshift, _kwidth, \
+ _mshift, _mwidth, \
+ _gate, _lock, _flags) \
+ struct ccu_nkm _struct = { \
+ .enable = _gate, \
+ .lock = _lock, \
+ .k = _SUNXI_CCU_MULT(_kshift, _kwidth), \
+ .n = _SUNXI_CCU_MULT(_nshift, _nwidth), \
+ .m = _SUNXI_CCU_DIV(_mshift, _mwidth), \
+ .common = { \
+ .reg = _reg, \
+ .hw.init = CLK_HW_INIT(_name, \
+ _parent, \
+ &ccu_nkm_ops, \
+ _flags), \
+ }, \
+ }
+
+static inline struct ccu_nkm *hw_to_ccu_nkm(struct clk_hw *hw)
+{
+ struct ccu_common *common = hw_to_ccu_common(hw);
+
+ return container_of(common, struct ccu_nkm, common);
+}
+
+extern const struct clk_ops ccu_nkm_ops;
+
+#endif /* _CCU_NKM_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.c b/drivers/clk/sunxi-ng/ccu_nkmp.c
new file mode 100644
index 000000000..9f2b98e19
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu_nkmp.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright (C) 2016 Maxime Ripard
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/rational.h>
+
+#include "ccu_gate.h"
+#include "ccu_nkmp.h"
+
+struct _ccu_nkmp {
+ unsigned long n, max_n;
+ unsigned long k, max_k;
+ unsigned long m, max_m;
+ unsigned long p, max_p;
+};
+
+static void ccu_nkmp_find_best(unsigned long parent, unsigned long rate,
+ struct _ccu_nkmp *nkmp)
+{
+ unsigned long best_rate = 0;
+ unsigned long best_n = 0, best_k = 0, best_m = 0, best_p = 0;
+ unsigned long _n, _k, _m, _p;
+
+ for (_k = 1; _k <= nkmp->max_k; _k++) {
+ for (_p = 0; _p <= nkmp->max_p; _p++) {
+ unsigned long tmp_rate;
+
+ rational_best_approximation(rate / _k, parent >> _p,
+ nkmp->max_n, nkmp->max_m,
+ &_n, &_m);
+
+ tmp_rate = (parent * _n * _k >> _p) / _m;
+
+ if (tmp_rate > rate)
+ continue;
+
+ if ((rate - tmp_rate) < (rate - best_rate)) {
+ best_rate = tmp_rate;
+ best_n = _n;
+ best_k = _k;
+ best_m = _m;
+ best_p = _p;
+ }
+ }
+ }
+
+ nkmp->n = best_n;
+ nkmp->k = best_k;
+ nkmp->m = best_m;
+ nkmp->p = best_p;
+}
+
+static void ccu_nkmp_disable(struct clk_hw *hw)
+{
+ struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
+
+ return ccu_gate_helper_disable(&nkmp->common, nkmp->enable);
+}
+
+static int ccu_nkmp_enable(struct clk_hw *hw)
+{
+ struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
+
+ return ccu_gate_helper_enable(&nkmp->common, nkmp->enable);
+}
+
+static int ccu_nkmp_is_enabled(struct clk_hw *hw)
+{
+ struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
+
+ return ccu_gate_helper_is_enabled(&nkmp->common, nkmp->enable);
+}
+
+static unsigned long ccu_nkmp_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
+ unsigned long n, m, k, p;
+ u32 reg;
+
+ reg = readl(nkmp->common.base + nkmp->common.reg);
+
+ n = reg >> nkmp->n.shift;
+ n &= (1 << nkmp->n.width) - 1;
+
+ k = reg >> nkmp->k.shift;
+ k &= (1 << nkmp->k.width) - 1;
+
+ m = reg >> nkmp->m.shift;
+ m &= (1 << nkmp->m.width) - 1;
+
+ p = reg >> nkmp->p.shift;
+ p &= (1 << nkmp->p.width) - 1;
+
+ return (parent_rate * (n + 1) * (k + 1) >> p) / (m + 1);
+}
+
+static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
+ struct _ccu_nkmp _nkmp;
+
+ _nkmp.max_n = 1 << nkmp->n.width;
+ _nkmp.max_k = 1 << nkmp->k.width;
+ _nkmp.max_m = 1 << nkmp->m.width;
+ _nkmp.max_p = (1 << nkmp->p.width) - 1;
+
+ ccu_nkmp_find_best(*parent_rate, rate,
+ &_nkmp);
+
+ return (*parent_rate * _nkmp.n * _nkmp.k >> _nkmp.p) / _nkmp.m;
+}
+
+static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
+ struct _ccu_nkmp _nkmp;
+ unsigned long flags;
+ u32 reg;
+
+ _nkmp.max_n = 1 << nkmp->n.width;
+ _nkmp.max_k = 1 << nkmp->k.width;
+ _nkmp.max_m = 1 << nkmp->m.width;
+ _nkmp.max_p = (1 << nkmp->p.width) - 1;
+
+ ccu_nkmp_find_best(parent_rate, rate, &_nkmp);
+
+ spin_lock_irqsave(nkmp->common.lock, flags);
+
+ reg = readl(nkmp->common.base + nkmp->common.reg);
+ reg &= ~GENMASK(nkmp->n.width + nkmp->n.shift - 1, nkmp->n.shift);
+ reg &= ~GENMASK(nkmp->k.width + nkmp->k.shift - 1, nkmp->k.shift);
+ reg &= ~GENMASK(nkmp->m.width + nkmp->m.shift - 1, nkmp->m.shift);
+ reg &= ~GENMASK(nkmp->p.width + nkmp->p.shift - 1, nkmp->p.shift);
+
+ reg |= (_nkmp.n - 1) << nkmp->n.shift;
+ reg |= (_nkmp.k - 1) << nkmp->k.shift;
+ reg |= (_nkmp.m - 1) << nkmp->m.shift;
+ reg |= _nkmp.p << nkmp->p.shift;
+
+ writel(reg, nkmp->common.base + nkmp->common.reg);
+
+ spin_unlock_irqrestore(nkmp->common.lock, flags);
+
+ ccu_helper_wait_for_lock(&nkmp->common, nkmp->lock);
+
+ return 0;
+}
+
+const struct clk_ops ccu_nkmp_ops = {
+ .disable = ccu_nkmp_disable,
+ .enable = ccu_nkmp_enable,
+ .is_enabled = ccu_nkmp_is_enabled,
+
+ .recalc_rate = ccu_nkmp_recalc_rate,
+ .round_rate = ccu_nkmp_round_rate,
+ .set_rate = ccu_nkmp_set_rate,
+};
diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.h b/drivers/clk/sunxi-ng/ccu_nkmp.h
new file mode 100644
index 000000000..5adb0c92a
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu_nkmp.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2016 Maxime Ripard. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CCU_NKMP_H_
+#define _CCU_NKMP_H_
+
+#include <linux/clk-provider.h>
+
+#include "ccu_common.h"
+#include "ccu_div.h"
+#include "ccu_mult.h"
+
+/*
+ * struct ccu_nkmp - Definition of an N-K-M-P clock
+ *
+ * Clocks based on the formula parent * N * K >> P / M
+ */
+struct ccu_nkmp {
+ u32 enable;
+ u32 lock;
+
+ struct _ccu_mult n;
+ struct _ccu_mult k;
+ struct _ccu_div m;
+ struct _ccu_div p;
+
+ struct ccu_common common;
+};
+
+#define SUNXI_CCU_NKMP_WITH_GATE_LOCK(_struct, _name, _parent, _reg, \
+ _nshift, _nwidth, \
+ _kshift, _kwidth, \
+ _mshift, _mwidth, \
+ _pshift, _pwidth, \
+ _gate, _lock, _flags) \
+ struct ccu_nkmp _struct = { \
+ .enable = _gate, \
+ .lock = _lock, \
+ .n = _SUNXI_CCU_MULT(_nshift, _nwidth), \
+ .k = _SUNXI_CCU_MULT(_kshift, _kwidth), \
+ .m = _SUNXI_CCU_DIV(_mshift, _mwidth), \
+ .p = _SUNXI_CCU_DIV(_pshift, _pwidth), \
+ .common = { \
+ .reg = _reg, \
+ .hw.init = CLK_HW_INIT(_name, \
+ _parent, \
+ &ccu_nkmp_ops, \
+ _flags), \
+ }, \
+ }
+
+static inline struct ccu_nkmp *hw_to_ccu_nkmp(struct clk_hw *hw)
+{
+ struct ccu_common *common = hw_to_ccu_common(hw);
+
+ return container_of(common, struct ccu_nkmp, common);
+}
+
+extern const struct clk_ops ccu_nkmp_ops;
+
+#endif /* _CCU_NKMP_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu_nm.c b/drivers/clk/sunxi-ng/ccu_nm.c
new file mode 100644
index 000000000..e35ddd8ee
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu_nm.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2016 Maxime Ripard
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/rational.h>
+
+#include "ccu_frac.h"
+#include "ccu_gate.h"
+#include "ccu_nm.h"
+
+static void ccu_nm_disable(struct clk_hw *hw)
+{
+ struct ccu_nm *nm = hw_to_ccu_nm(hw);
+
+ return ccu_gate_helper_disable(&nm->common, nm->enable);
+}
+
+static int ccu_nm_enable(struct clk_hw *hw)
+{
+ struct ccu_nm *nm = hw_to_ccu_nm(hw);
+
+ return ccu_gate_helper_enable(&nm->common, nm->enable);
+}
+
+static int ccu_nm_is_enabled(struct clk_hw *hw)
+{
+ struct ccu_nm *nm = hw_to_ccu_nm(hw);
+
+ return ccu_gate_helper_is_enabled(&nm->common, nm->enable);
+}
+
+static unsigned long ccu_nm_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ccu_nm *nm = hw_to_ccu_nm(hw);
+ unsigned long n, m;
+ u32 reg;
+
+ if (ccu_frac_helper_is_enabled(&nm->common, &nm->frac))
+ return ccu_frac_helper_read_rate(&nm->common, &nm->frac);
+
+ reg = readl(nm->common.base + nm->common.reg);
+
+ n = reg >> nm->n.shift;
+ n &= (1 << nm->n.width) - 1;
+
+ m = reg >> nm->m.shift;
+ m &= (1 << nm->m.width) - 1;
+
+ return parent_rate * (n + 1) / (m + 1);
+}
+
+static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct ccu_nm *nm = hw_to_ccu_nm(hw);
+ unsigned long n, m;
+
+ rational_best_approximation(rate, *parent_rate,
+ 1 << nm->n.width, 1 << nm->m.width,
+ &n, &m);
+
+ return *parent_rate * n / m;
+}
+
+static int ccu_nm_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct ccu_nm *nm = hw_to_ccu_nm(hw);
+ unsigned long flags;
+ unsigned long n, m;
+ u32 reg;
+
+ if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate))
+ return ccu_frac_helper_set_rate(&nm->common, &nm->frac, rate);
+ else
+ ccu_frac_helper_disable(&nm->common, &nm->frac);
+
+ rational_best_approximation(rate, parent_rate,
+ 1 << nm->n.width, 1 << nm->m.width,
+ &n, &m);
+
+ spin_lock_irqsave(nm->common.lock, flags);
+
+ reg = readl(nm->common.base + nm->common.reg);
+ reg &= ~GENMASK(nm->n.width + nm->n.shift - 1, nm->n.shift);
+ reg &= ~GENMASK(nm->m.width + nm->m.shift - 1, nm->m.shift);
+
+ writel(reg | ((m - 1) << nm->m.shift) | ((n - 1) << nm->n.shift),
+ nm->common.base + nm->common.reg);
+
+ spin_unlock_irqrestore(nm->common.lock, flags);
+
+ ccu_helper_wait_for_lock(&nm->common, nm->lock);
+
+ return 0;
+}
+
+const struct clk_ops ccu_nm_ops = {
+ .disable = ccu_nm_disable,
+ .enable = ccu_nm_enable,
+ .is_enabled = ccu_nm_is_enabled,
+
+ .recalc_rate = ccu_nm_recalc_rate,
+ .round_rate = ccu_nm_round_rate,
+ .set_rate = ccu_nm_set_rate,
+};
diff --git a/drivers/clk/sunxi-ng/ccu_nm.h b/drivers/clk/sunxi-ng/ccu_nm.h
new file mode 100644
index 000000000..0b7bcd33a
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu_nm.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2016 Maxime Ripard. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CCU_NM_H_
+#define _CCU_NM_H_
+
+#include <linux/clk-provider.h>
+
+#include "ccu_common.h"
+#include "ccu_div.h"
+#include "ccu_frac.h"
+#include "ccu_mult.h"
+
+/*
+ * struct ccu_nm - Definition of an N-M clock
+ *
+ * Clocks based on the formula parent * N / M
+ */
+struct ccu_nm {
+ u32 enable;
+ u32 lock;
+
+ struct _ccu_mult n;
+ struct _ccu_div m;
+ struct _ccu_frac frac;
+
+ struct ccu_common common;
+};
+
+#define SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(_struct, _name, _parent, _reg, \
+ _nshift, _nwidth, \
+ _mshift, _mwidth, \
+ _frac_en, _frac_sel, \
+ _frac_rate_0, _frac_rate_1, \
+ _gate, _lock, _flags) \
+ struct ccu_nm _struct = { \
+ .enable = _gate, \
+ .lock = _lock, \
+ .n = _SUNXI_CCU_MULT(_nshift, _nwidth), \
+ .m = _SUNXI_CCU_DIV(_mshift, _mwidth), \
+ .frac = _SUNXI_CCU_FRAC(_frac_en, _frac_sel, \
+ _frac_rate_0, \
+ _frac_rate_1), \
+ .common = { \
+ .reg = _reg, \
+ .features = CCU_FEATURE_FRACTIONAL, \
+ .hw.init = CLK_HW_INIT(_name, \
+ _parent, \
+ &ccu_nm_ops, \
+ _flags), \
+ }, \
+ }
+
+#define SUNXI_CCU_NM_WITH_GATE_LOCK(_struct, _name, _parent, _reg, \
+ _nshift, _nwidth, \
+ _mshift, _mwidth, \
+ _gate, _lock, _flags) \
+ struct ccu_nm _struct = { \
+ .enable = _gate, \
+ .lock = _lock, \
+ .n = _SUNXI_CCU_MULT(_nshift, _nwidth), \
+ .m = _SUNXI_CCU_DIV(_mshift, _mwidth), \
+ .common = { \
+ .reg = _reg, \
+ .hw.init = CLK_HW_INIT(_name, \
+ _parent, \
+ &ccu_nm_ops, \
+ _flags), \
+ }, \
+ }
+
+static inline struct ccu_nm *hw_to_ccu_nm(struct clk_hw *hw)
+{
+ struct ccu_common *common = hw_to_ccu_common(hw);
+
+ return container_of(common, struct ccu_nm, common);
+}
+
+extern const struct clk_ops ccu_nm_ops;
+
+#endif /* _CCU_NM_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu_phase.c b/drivers/clk/sunxi-ng/ccu_phase.c
new file mode 100644
index 000000000..400c58ad7
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu_phase.c
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2016 Maxime Ripard
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/spinlock.h>
+
+#include "ccu_phase.h"
+
+static int ccu_phase_get_phase(struct clk_hw *hw)
+{
+ struct ccu_phase *phase = hw_to_ccu_phase(hw);
+ struct clk_hw *parent, *grandparent;
+ unsigned int parent_rate, grandparent_rate;
+ u16 step, parent_div;
+ u32 reg;
+ u8 delay;
+
+ reg = readl(phase->common.base + phase->common.reg);
+ delay = (reg >> phase->shift);
+ delay &= (1 << phase->width) - 1;
+
+ if (!delay)
+ return 180;
+
+ /* Get our parent clock, it's the one that can adjust its rate */
+ parent = clk_hw_get_parent(hw);
+ if (!parent)
+ return -EINVAL;
+
+ /* And its rate */
+ parent_rate = clk_hw_get_rate(parent);
+ if (!parent_rate)
+ return -EINVAL;
+
+ /* Now, get our parent's parent (most likely some PLL) */
+ grandparent = clk_hw_get_parent(parent);
+ if (!grandparent)
+ return -EINVAL;
+
+ /* And its rate */
+ grandparent_rate = clk_hw_get_rate(grandparent);
+ if (!grandparent_rate)
+ return -EINVAL;
+
+ /* Get our parent clock divider */
+ parent_div = grandparent_rate / parent_rate;
+
+ step = DIV_ROUND_CLOSEST(360, parent_div);
+ return delay * step;
+}
+
+static int ccu_phase_set_phase(struct clk_hw *hw, int degrees)
+{
+ struct ccu_phase *phase = hw_to_ccu_phase(hw);
+ struct clk_hw *parent, *grandparent;
+ unsigned int parent_rate, grandparent_rate;
+ unsigned long flags;
+ u32 reg;
+ u8 delay;
+
+ /* Get our parent clock, it's the one that can adjust its rate */
+ parent = clk_hw_get_parent(hw);
+ if (!parent)
+ return -EINVAL;
+
+ /* And its rate */
+ parent_rate = clk_hw_get_rate(parent);
+ if (!parent_rate)
+ return -EINVAL;
+
+ /* Now, get our parent's parent (most likely some PLL) */
+ grandparent = clk_hw_get_parent(parent);
+ if (!grandparent)
+ return -EINVAL;
+
+ /* And its rate */
+ grandparent_rate = clk_hw_get_rate(grandparent);
+ if (!grandparent_rate)
+ return -EINVAL;
+
+ if (degrees != 180) {
+ u16 step, parent_div;
+
+ /* Get our parent divider */
+ parent_div = grandparent_rate / parent_rate;
+
+ /*
+ * We can only outphase the clocks by multiple of the
+ * PLL's period.
+ *
+ * Since our parent clock is only a divider, and the
+ * formula to get the outphasing in degrees is deg =
+ * 360 * delta / period
+ *
+ * If we simplify this formula, we can see that the
+ * only thing that we're concerned about is the number
+ * of period we want to outphase our clock from, and
+ * the divider set by our parent clock.
+ */
+ step = DIV_ROUND_CLOSEST(360, parent_div);
+ delay = DIV_ROUND_CLOSEST(degrees, step);
+ } else {
+ delay = 0;
+ }
+
+ spin_lock_irqsave(phase->common.lock, flags);
+ reg = readl(phase->common.base + phase->common.reg);
+ reg &= ~GENMASK(phase->width + phase->shift - 1, phase->shift);
+ writel(reg | (delay << phase->shift),
+ phase->common.base + phase->common.reg);
+ spin_unlock_irqrestore(phase->common.lock, flags);
+
+ return 0;
+}
+
+const struct clk_ops ccu_phase_ops = {
+ .get_phase = ccu_phase_get_phase,
+ .set_phase = ccu_phase_set_phase,
+};
diff --git a/drivers/clk/sunxi-ng/ccu_phase.h b/drivers/clk/sunxi-ng/ccu_phase.h
new file mode 100644
index 000000000..75a091a4c
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu_phase.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2016 Maxime Ripard. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CCU_PHASE_H_
+#define _CCU_PHASE_H_
+
+#include <linux/clk-provider.h>
+
+#include "ccu_common.h"
+
+struct ccu_phase {
+ u8 shift;
+ u8 width;
+
+ struct ccu_common common;
+};
+
+#define SUNXI_CCU_PHASE(_struct, _name, _parent, _reg, _shift, _width, _flags) \
+ struct ccu_phase _struct = { \
+ .shift = _shift, \
+ .width = _width, \
+ .common = { \
+ .reg = _reg, \
+ .hw.init = CLK_HW_INIT(_name, \
+ _parent, \
+ &ccu_phase_ops, \
+ _flags), \
+ } \
+ }
+
+static inline struct ccu_phase *hw_to_ccu_phase(struct clk_hw *hw)
+{
+ struct ccu_common *common = hw_to_ccu_common(hw);
+
+ return container_of(common, struct ccu_phase, common);
+}
+
+extern const struct clk_ops ccu_phase_ops;
+
+#endif /* _CCU_PHASE_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu_reset.c b/drivers/clk/sunxi-ng/ccu_reset.c
new file mode 100644
index 000000000..6c31d4878
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu_reset.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2016 Maxime Ripard
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <linux/io.h>
+#include <linux/reset-controller.h>
+
+#include "ccu_reset.h"
+
+static int ccu_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct ccu_reset *ccu = rcdev_to_ccu_reset(rcdev);
+ const struct ccu_reset_map *map = &ccu->reset_map[id];
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(ccu->lock, flags);
+
+ reg = readl(ccu->base + map->reg);
+ writel(reg & ~map->bit, ccu->base + map->reg);
+
+ spin_unlock_irqrestore(ccu->lock, flags);
+
+ return 0;
+}
+
+static int ccu_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct ccu_reset *ccu = rcdev_to_ccu_reset(rcdev);
+ const struct ccu_reset_map *map = &ccu->reset_map[id];
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(ccu->lock, flags);
+
+ reg = readl(ccu->base + map->reg);
+ writel(reg | map->bit, ccu->base + map->reg);
+
+ spin_unlock_irqrestore(ccu->lock, flags);
+
+ return 0;
+}
+
+const struct reset_control_ops ccu_reset_ops = {
+ .assert = ccu_reset_assert,
+ .deassert = ccu_reset_deassert,
+};
diff --git a/drivers/clk/sunxi-ng/ccu_reset.h b/drivers/clk/sunxi-ng/ccu_reset.h
new file mode 100644
index 000000000..36a467921
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu_reset.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2016 Maxime Ripard. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CCU_RESET_H_
+#define _CCU_RESET_H_
+
+#include <linux/reset-controller.h>
+
+struct ccu_reset_map {
+ u16 reg;
+ u32 bit;
+};
+
+
+struct ccu_reset {
+ void __iomem *base;
+ struct ccu_reset_map *reset_map;
+ spinlock_t *lock;
+
+ struct reset_controller_dev rcdev;
+};
+
+static inline struct ccu_reset *rcdev_to_ccu_reset(struct reset_controller_dev *rcdev)
+{
+ return container_of(rcdev, struct ccu_reset, rcdev);
+}
+
+extern const struct reset_control_ops ccu_reset_ops;
+
+#endif /* _CCU_RESET_H_ */
diff --git a/drivers/clk/sunxi/clk-a10-pll2.c b/drivers/clk/sunxi/clk-a10-pll2.c
index 0ee1f363e..d8eab90ae 100644
--- a/drivers/clk/sunxi/clk-a10-pll2.c
+++ b/drivers/clk/sunxi/clk-a10-pll2.c
@@ -73,7 +73,7 @@ static void __init sun4i_pll2_setup(struct device_node *node,
SUN4I_PLL2_PRE_DIV_WIDTH,
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
&sun4i_a10_pll2_lock);
- if (!prediv_clk) {
+ if (IS_ERR(prediv_clk)) {
pr_err("Couldn't register the prediv clock\n");
goto err_free_array;
}
@@ -106,7 +106,7 @@ static void __init sun4i_pll2_setup(struct device_node *node,
&mult->hw, &clk_multiplier_ops,
&gate->hw, &clk_gate_ops,
CLK_SET_RATE_PARENT);
- if (!base_clk) {
+ if (IS_ERR(base_clk)) {
pr_err("Couldn't register the base multiplier clock\n");
goto err_free_multiplier;
}
diff --git a/drivers/clk/sunxi/clk-factors.c b/drivers/clk/sunxi/clk-factors.c
index ddefe9668..dfe5e3e32 100644
--- a/drivers/clk/sunxi/clk-factors.c
+++ b/drivers/clk/sunxi/clk-factors.c
@@ -12,7 +12,6 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/string.h>
diff --git a/drivers/clk/sunxi/clk-sun6i-apb0-gates.c b/drivers/clk/sunxi/clk-sun6i-apb0-gates.c
index 68021fa5e..09cdb9874 100644
--- a/drivers/clk/sunxi/clk-sun6i-apb0-gates.c
+++ b/drivers/clk/sunxi/clk-sun6i-apb0-gates.c
@@ -9,7 +9,7 @@
*/
#include <linux/clk-provider.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
@@ -33,7 +33,6 @@ static const struct of_device_id sun6i_a31_apb0_gates_clk_dt_ids[] = {
{ .compatible = "allwinner,sun8i-a23-apb0-gates-clk", .data = &sun8i_a23_apb0_gates },
{ /* sentinel */ }
};
-MODULE_DEVICE_TABLE(of, sun6i_a31_apb0_gates_clk_dt_ids);
static int sun6i_a31_apb0_gates_clk_probe(struct platform_device *pdev)
{
@@ -102,8 +101,4 @@ static struct platform_driver sun6i_a31_apb0_gates_clk_driver = {
},
.probe = sun6i_a31_apb0_gates_clk_probe,
};
-module_platform_driver(sun6i_a31_apb0_gates_clk_driver);
-
-MODULE_AUTHOR("Boris BREZILLON <boris.brezillon@free-electrons.com>");
-MODULE_DESCRIPTION("Allwinner A31 APB0 gate clocks driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(sun6i_a31_apb0_gates_clk_driver);
diff --git a/drivers/clk/sunxi/clk-sun6i-apb0.c b/drivers/clk/sunxi/clk-sun6i-apb0.c
index e703e1895..b9c8d3592 100644
--- a/drivers/clk/sunxi/clk-sun6i-apb0.c
+++ b/drivers/clk/sunxi/clk-sun6i-apb0.c
@@ -9,7 +9,7 @@
*/
#include <linux/clk-provider.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -61,7 +61,6 @@ static const struct of_device_id sun6i_a31_apb0_clk_dt_ids[] = {
{ .compatible = "allwinner,sun6i-a31-apb0-clk" },
{ /* sentinel */ }
};
-MODULE_DEVICE_TABLE(of, sun6i_a31_apb0_clk_dt_ids);
static struct platform_driver sun6i_a31_apb0_clk_driver = {
.driver = {
@@ -70,8 +69,4 @@ static struct platform_driver sun6i_a31_apb0_clk_driver = {
},
.probe = sun6i_a31_apb0_clk_probe,
};
-module_platform_driver(sun6i_a31_apb0_clk_driver);
-
-MODULE_AUTHOR("Boris BREZILLON <boris.brezillon@free-electrons.com>");
-MODULE_DESCRIPTION("Allwinner A31 APB0 clock Driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(sun6i_a31_apb0_clk_driver);
diff --git a/drivers/clk/sunxi/clk-sun6i-ar100.c b/drivers/clk/sunxi/clk-sun6i-ar100.c
index 84a187e55..64ca3e9e3 100644
--- a/drivers/clk/sunxi/clk-sun6i-ar100.c
+++ b/drivers/clk/sunxi/clk-sun6i-ar100.c
@@ -10,7 +10,7 @@
#include <linux/bitops.h>
#include <linux/clk-provider.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
@@ -91,32 +91,17 @@ static int sun6i_a31_ar100_clk_probe(struct platform_device *pdev)
return 0;
}
-static int sun6i_a31_ar100_clk_remove(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct clk *clk = platform_get_drvdata(pdev);
-
- sunxi_factors_unregister(np, clk);
-
- return 0;
-}
-
static const struct of_device_id sun6i_a31_ar100_clk_dt_ids[] = {
{ .compatible = "allwinner,sun6i-a31-ar100-clk" },
{ /* sentinel */ }
};
-MODULE_DEVICE_TABLE(of, sun6i_a31_ar100_clk_dt_ids);
static struct platform_driver sun6i_a31_ar100_clk_driver = {
.driver = {
.name = "sun6i-a31-ar100-clk",
.of_match_table = sun6i_a31_ar100_clk_dt_ids,
+ .suppress_bind_attrs = true,
},
.probe = sun6i_a31_ar100_clk_probe,
- .remove = sun6i_a31_ar100_clk_remove,
};
-module_platform_driver(sun6i_a31_ar100_clk_driver);
-
-MODULE_AUTHOR("Boris BREZILLON <boris.brezillon@free-electrons.com>");
-MODULE_DESCRIPTION("Allwinner A31 AR100 clock Driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(sun6i_a31_ar100_clk_driver);
diff --git a/drivers/clk/sunxi/clk-sun8i-apb0.c b/drivers/clk/sunxi/clk-sun8i-apb0.c
index 2ea61debf..a5666e1d0 100644
--- a/drivers/clk/sunxi/clk-sun8i-apb0.c
+++ b/drivers/clk/sunxi/clk-sun8i-apb0.c
@@ -15,7 +15,7 @@
*/
#include <linux/clk-provider.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
@@ -108,7 +108,6 @@ static const struct of_device_id sun8i_a23_apb0_clk_dt_ids[] = {
{ .compatible = "allwinner,sun8i-a23-apb0-clk" },
{ /* sentinel */ }
};
-MODULE_DEVICE_TABLE(of, sun8i_a23_apb0_clk_dt_ids);
static struct platform_driver sun8i_a23_apb0_clk_driver = {
.driver = {
@@ -117,8 +116,4 @@ static struct platform_driver sun8i_a23_apb0_clk_driver = {
},
.probe = sun8i_a23_apb0_clk_probe,
};
-module_platform_driver(sun8i_a23_apb0_clk_driver);
-
-MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>");
-MODULE_DESCRIPTION("Allwinner A23 APB0 clock Driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(sun8i_a23_apb0_clk_driver);
diff --git a/drivers/clk/sunxi/clk-sun8i-mbus.c b/drivers/clk/sunxi/clk-sun8i-mbus.c
index 411d3033a..b200ebf15 100644
--- a/drivers/clk/sunxi/clk-sun8i-mbus.c
+++ b/drivers/clk/sunxi/clk-sun8i-mbus.c
@@ -48,7 +48,7 @@ static void __init sun8i_a23_mbus_setup(struct device_node *node)
return;
reg = of_io_request_and_map(node, 0, of_node_full_name(node));
- if (!reg) {
+ if (IS_ERR(reg)) {
pr_err("Could not get registers for sun8i-mbus-clk\n");
goto err_free_parents;
}
diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c
index 716737388..6041bdba2 100644
--- a/drivers/clk/sunxi/clk-sun9i-mmc.c
+++ b/drivers/clk/sunxi/clk-sun9i-mmc.c
@@ -16,7 +16,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/reset.h>
@@ -183,39 +183,17 @@ err_clk_register:
return ret;
}
-static int sun9i_a80_mmc_config_clk_remove(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct sun9i_mmc_clk_data *data = platform_get_drvdata(pdev);
- struct clk_onecell_data *clk_data = &data->clk_data;
- int i;
-
- reset_controller_unregister(&data->rcdev);
- of_clk_del_provider(np);
- for (i = 0; i < clk_data->clk_num; i++)
- clk_unregister(clk_data->clks[i]);
-
- reset_control_assert(data->reset);
-
- return 0;
-}
-
static const struct of_device_id sun9i_a80_mmc_config_clk_dt_ids[] = {
{ .compatible = "allwinner,sun9i-a80-mmc-config-clk" },
{ /* sentinel */ }
};
-MODULE_DEVICE_TABLE(of, sun9i_a80_mmc_config_clk_dt_ids);
static struct platform_driver sun9i_a80_mmc_config_clk_driver = {
.driver = {
.name = "sun9i-a80-mmc-config-clk",
+ .suppress_bind_attrs = true,
.of_match_table = sun9i_a80_mmc_config_clk_dt_ids,
},
.probe = sun9i_a80_mmc_config_clk_probe,
- .remove = sun9i_a80_mmc_config_clk_remove,
};
-module_platform_driver(sun9i_a80_mmc_config_clk_driver);
-
-MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>");
-MODULE_DESCRIPTION("Allwinner A80 MMC clock/reset Driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(sun9i_a80_mmc_config_clk_driver);
diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h
index 36c974916..5738635c5 100644
--- a/drivers/clk/tegra/clk-id.h
+++ b/drivers/clk/tegra/clk-id.h
@@ -238,7 +238,6 @@ enum clk_id {
tegra_clk_sor0,
tegra_clk_sor0_lvds,
tegra_clk_sor1,
- tegra_clk_sor1_brick,
tegra_clk_sor1_src,
tegra_clk_spdif,
tegra_clk_spdif_2x,
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index 4e194ecc8..b3855360d 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -191,6 +191,53 @@
#define PLLSS_REF_SRC_SEL_SHIFT 25
#define PLLSS_REF_SRC_SEL_MASK (3 << PLLSS_REF_SRC_SEL_SHIFT)
+#define UTMIP_PLL_CFG1 0x484
+#define UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(x) (((x) & 0xfff) << 0)
+#define UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(x) (((x) & 0x1f) << 27)
+#define UTMIP_PLL_CFG1_FORCE_PLL_ACTIVE_POWERDOWN BIT(12)
+#define UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN BIT(14)
+#define UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERUP BIT(15)
+#define UTMIP_PLL_CFG1_FORCE_PLLU_POWERDOWN BIT(16)
+#define UTMIP_PLL_CFG1_FORCE_PLLU_POWERUP BIT(17)
+
+#define UTMIP_PLL_CFG2 0x488
+#define UTMIP_PLL_CFG2_STABLE_COUNT(x) (((x) & 0xfff) << 6)
+#define UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(x) (((x) & 0x3f) << 18)
+#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN BIT(0)
+#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERUP BIT(1)
+#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN BIT(2)
+#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERUP BIT(3)
+#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERDOWN BIT(4)
+#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERUP BIT(5)
+#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_D_POWERDOWN BIT(24)
+#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_D_POWERUP BIT(25)
+#define UTMIP_PLL_CFG2_PHY_XTAL_CLOCKEN BIT(30)
+
+#define UTMIPLL_HW_PWRDN_CFG0 0x52c
+#define UTMIPLL_HW_PWRDN_CFG0_IDDQ_SWCTL BIT(0)
+#define UTMIPLL_HW_PWRDN_CFG0_IDDQ_OVERRIDE BIT(1)
+#define UTMIPLL_HW_PWRDN_CFG0_CLK_ENABLE_SWCTL BIT(2)
+#define UTMIPLL_HW_PWRDN_CFG0_SEQ_IN_SWCTL BIT(4)
+#define UTMIPLL_HW_PWRDN_CFG0_SEQ_RESET_INPUT_VALUE BIT(5)
+#define UTMIPLL_HW_PWRDN_CFG0_USE_LOCKDET BIT(6)
+#define UTMIPLL_HW_PWRDN_CFG0_SEQ_ENABLE BIT(24)
+#define UTMIPLL_HW_PWRDN_CFG0_SEQ_START_STATE BIT(25)
+
+#define PLLU_HW_PWRDN_CFG0 0x530
+#define PLLU_HW_PWRDN_CFG0_CLK_SWITCH_SWCTL BIT(0)
+#define PLLU_HW_PWRDN_CFG0_CLK_ENABLE_SWCTL BIT(2)
+#define PLLU_HW_PWRDN_CFG0_USE_LOCKDET BIT(6)
+#define PLLU_HW_PWRDN_CFG0_USE_SWITCH_DETECT BIT(7)
+#define PLLU_HW_PWRDN_CFG0_SEQ_ENABLE BIT(24)
+#define PLLU_HW_PWRDN_CFG0_IDDQ_PD_INCLUDE BIT(28)
+
+#define XUSB_PLL_CFG0 0x534
+#define XUSB_PLL_CFG0_UTMIPLL_LOCK_DLY 0x3ff
+#define XUSB_PLL_CFG0_PLLU_LOCK_DLY (0x3ff << 14)
+
+#define PLLU_BASE_CLKENABLE_USB BIT(21)
+#define PLLU_BASE_OVERRIDE BIT(24)
+
#define pll_readl(offset, p) readl_relaxed(p->clk_base + offset)
#define pll_readl_base(p) pll_readl(p->params->base_reg, p)
#define pll_readl_misc(p) pll_readl(p->params->misc_reg, p)
@@ -973,6 +1020,133 @@ const struct clk_ops tegra_clk_plle_ops = {
.enable = clk_plle_enable,
};
+/*
+ * Structure defining the fields for USB UTMI clocks Parameters.
+ */
+struct utmi_clk_param {
+ /* Oscillator Frequency in Hz */
+ u32 osc_frequency;
+ /* UTMIP PLL Enable Delay Count */
+ u8 enable_delay_count;
+ /* UTMIP PLL Stable count */
+ u8 stable_count;
+ /* UTMIP PLL Active delay count */
+ u8 active_delay_count;
+ /* UTMIP PLL Xtal frequency count */
+ u8 xtal_freq_count;
+};
+
+static const struct utmi_clk_param utmi_parameters[] = {
+ {
+ .osc_frequency = 13000000, .enable_delay_count = 0x02,
+ .stable_count = 0x33, .active_delay_count = 0x05,
+ .xtal_freq_count = 0x7f
+ }, {
+ .osc_frequency = 19200000, .enable_delay_count = 0x03,
+ .stable_count = 0x4b, .active_delay_count = 0x06,
+ .xtal_freq_count = 0xbb
+ }, {
+ .osc_frequency = 12000000, .enable_delay_count = 0x02,
+ .stable_count = 0x2f, .active_delay_count = 0x04,
+ .xtal_freq_count = 0x76
+ }, {
+ .osc_frequency = 26000000, .enable_delay_count = 0x04,
+ .stable_count = 0x66, .active_delay_count = 0x09,
+ .xtal_freq_count = 0xfe
+ }, {
+ .osc_frequency = 16800000, .enable_delay_count = 0x03,
+ .stable_count = 0x41, .active_delay_count = 0x0a,
+ .xtal_freq_count = 0xa4
+ }, {
+ .osc_frequency = 38400000, .enable_delay_count = 0x0,
+ .stable_count = 0x0, .active_delay_count = 0x6,
+ .xtal_freq_count = 0x80
+ },
+};
+
+static int clk_pllu_enable(struct clk_hw *hw)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ struct clk_hw *pll_ref = clk_hw_get_parent(hw);
+ struct clk_hw *osc = clk_hw_get_parent(pll_ref);
+ const struct utmi_clk_param *params = NULL;
+ unsigned long flags = 0, input_rate;
+ unsigned int i;
+ int ret = 0;
+ u32 value;
+
+ if (!osc) {
+ pr_err("%s: failed to get OSC clock\n", __func__);
+ return -EINVAL;
+ }
+
+ input_rate = clk_hw_get_rate(osc);
+
+ if (pll->lock)
+ spin_lock_irqsave(pll->lock, flags);
+
+ _clk_pll_enable(hw);
+
+ ret = clk_pll_wait_for_lock(pll);
+ if (ret < 0)
+ goto out;
+
+ for (i = 0; i < ARRAY_SIZE(utmi_parameters); i++) {
+ if (input_rate == utmi_parameters[i].osc_frequency) {
+ params = &utmi_parameters[i];
+ break;
+ }
+ }
+
+ if (!params) {
+ pr_err("%s: unexpected input rate %lu Hz\n", __func__,
+ input_rate);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ value = pll_readl_base(pll);
+ value &= ~PLLU_BASE_OVERRIDE;
+ pll_writel_base(value, pll);
+
+ value = readl_relaxed(pll->clk_base + UTMIP_PLL_CFG2);
+ /* Program UTMIP PLL stable and active counts */
+ value &= ~UTMIP_PLL_CFG2_STABLE_COUNT(~0);
+ value |= UTMIP_PLL_CFG2_STABLE_COUNT(params->stable_count);
+ value &= ~UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(~0);
+ value |= UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(params->active_delay_count);
+ /* Remove power downs from UTMIP PLL control bits */
+ value &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN;
+ value &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN;
+ value &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERDOWN;
+ writel_relaxed(value, pll->clk_base + UTMIP_PLL_CFG2);
+
+ value = readl_relaxed(pll->clk_base + UTMIP_PLL_CFG1);
+ /* Program UTMIP PLL delay and oscillator frequency counts */
+ value &= ~UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(~0);
+ value |= UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(params->enable_delay_count);
+ value &= ~UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(~0);
+ value |= UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(params->xtal_freq_count);
+ /* Remove power downs from UTMIP PLL control bits */
+ value &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN;
+ value &= ~UTMIP_PLL_CFG1_FORCE_PLL_ACTIVE_POWERDOWN;
+ value &= ~UTMIP_PLL_CFG1_FORCE_PLLU_POWERDOWN;
+ writel_relaxed(value, pll->clk_base + UTMIP_PLL_CFG1);
+
+out:
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+
+ return ret;
+}
+
+static const struct clk_ops tegra_clk_pllu_ops = {
+ .is_enabled = clk_pll_is_enabled,
+ .enable = clk_pllu_enable,
+ .disable = clk_pll_disable,
+ .recalc_rate = clk_pll_recalc_rate,
+};
+
static int _pll_fixed_mdiv(struct tegra_clk_pll_params *pll_params,
unsigned long parent_rate)
{
@@ -1505,6 +1679,112 @@ static void clk_plle_tegra114_disable(struct clk_hw *hw)
if (pll->lock)
spin_unlock_irqrestore(pll->lock, flags);
}
+
+static int clk_pllu_tegra114_enable(struct clk_hw *hw)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ const struct utmi_clk_param *params = NULL;
+ struct clk *osc = __clk_lookup("osc");
+ unsigned long flags = 0, input_rate;
+ unsigned int i;
+ int ret = 0;
+ u32 value;
+
+ if (!osc) {
+ pr_err("%s: failed to get OSC clock\n", __func__);
+ return -EINVAL;
+ }
+
+ input_rate = clk_hw_get_rate(__clk_get_hw(osc));
+
+ if (pll->lock)
+ spin_lock_irqsave(pll->lock, flags);
+
+ _clk_pll_enable(hw);
+
+ ret = clk_pll_wait_for_lock(pll);
+ if (ret < 0)
+ goto out;
+
+ for (i = 0; i < ARRAY_SIZE(utmi_parameters); i++) {
+ if (input_rate == utmi_parameters[i].osc_frequency) {
+ params = &utmi_parameters[i];
+ break;
+ }
+ }
+
+ if (!params) {
+ pr_err("%s: unexpected input rate %lu Hz\n", __func__,
+ input_rate);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ value = pll_readl_base(pll);
+ value &= ~PLLU_BASE_OVERRIDE;
+ pll_writel_base(value, pll);
+
+ value = readl_relaxed(pll->clk_base + UTMIP_PLL_CFG2);
+ /* Program UTMIP PLL stable and active counts */
+ value &= ~UTMIP_PLL_CFG2_STABLE_COUNT(~0);
+ value |= UTMIP_PLL_CFG2_STABLE_COUNT(params->stable_count);
+ value &= ~UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(~0);
+ value |= UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(params->active_delay_count);
+ /* Remove power downs from UTMIP PLL control bits */
+ value &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN;
+ value &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN;
+ value &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERDOWN;
+ writel_relaxed(value, pll->clk_base + UTMIP_PLL_CFG2);
+
+ value = readl_relaxed(pll->clk_base + UTMIP_PLL_CFG1);
+ /* Program UTMIP PLL delay and oscillator frequency counts */
+ value &= ~UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(~0);
+ value |= UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(params->enable_delay_count);
+ value &= ~UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(~0);
+ value |= UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(params->xtal_freq_count);
+ /* Remove power downs from UTMIP PLL control bits */
+ value &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN;
+ value &= ~UTMIP_PLL_CFG1_FORCE_PLL_ACTIVE_POWERDOWN;
+ value &= ~UTMIP_PLL_CFG1_FORCE_PLLU_POWERUP;
+ value &= ~UTMIP_PLL_CFG1_FORCE_PLLU_POWERDOWN;
+ writel_relaxed(value, pll->clk_base + UTMIP_PLL_CFG1);
+
+ /* Setup HW control of UTMIPLL */
+ value = readl_relaxed(pll->clk_base + UTMIPLL_HW_PWRDN_CFG0);
+ value |= UTMIPLL_HW_PWRDN_CFG0_USE_LOCKDET;
+ value &= ~UTMIPLL_HW_PWRDN_CFG0_CLK_ENABLE_SWCTL;
+ value |= UTMIPLL_HW_PWRDN_CFG0_SEQ_START_STATE;
+ writel_relaxed(value, pll->clk_base + UTMIPLL_HW_PWRDN_CFG0);
+
+ value = readl_relaxed(pll->clk_base + UTMIP_PLL_CFG1);
+ value &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERUP;
+ value &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN;
+ writel_relaxed(value, pll->clk_base + UTMIP_PLL_CFG1);
+
+ udelay(1);
+
+ /*
+ * Setup SW override of UTMIPLL assuming USB2.0 ports are assigned
+ * to USB2
+ */
+ value = readl_relaxed(pll->clk_base + UTMIPLL_HW_PWRDN_CFG0);
+ value |= UTMIPLL_HW_PWRDN_CFG0_IDDQ_SWCTL;
+ value &= ~UTMIPLL_HW_PWRDN_CFG0_IDDQ_OVERRIDE;
+ writel_relaxed(value, pll->clk_base + UTMIPLL_HW_PWRDN_CFG0);
+
+ udelay(1);
+
+ /* Enable HW control of UTMIPLL */
+ value = readl_relaxed(pll->clk_base + UTMIPLL_HW_PWRDN_CFG0);
+ value |= UTMIPLL_HW_PWRDN_CFG0_SEQ_ENABLE;
+ writel_relaxed(value, pll->clk_base + UTMIPLL_HW_PWRDN_CFG0);
+
+out:
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+
+ return ret;
+}
#endif
static struct tegra_clk_pll *_tegra_init_pll(void __iomem *clk_base,
@@ -1614,6 +1894,27 @@ struct clk *tegra_clk_register_plle(const char *name, const char *parent_name,
return clk;
}
+struct clk *tegra_clk_register_pllu(const char *name, const char *parent_name,
+ void __iomem *clk_base, unsigned long flags,
+ struct tegra_clk_pll_params *pll_params, spinlock_t *lock)
+{
+ struct tegra_clk_pll *pll;
+ struct clk *clk;
+
+ pll_params->flags |= TEGRA_PLLU;
+
+ pll = _tegra_init_pll(clk_base, NULL, pll_params, lock);
+ if (IS_ERR(pll))
+ return ERR_CAST(pll);
+
+ clk = _tegra_clk_register_pll(pll, name, parent_name, flags,
+ &tegra_clk_pllu_ops);
+ if (IS_ERR(clk))
+ kfree(pll);
+
+ return clk;
+}
+
#if defined(CONFIG_ARCH_TEGRA_114_SOC) || \
defined(CONFIG_ARCH_TEGRA_124_SOC) || \
defined(CONFIG_ARCH_TEGRA_132_SOC) || \
@@ -1652,6 +1953,12 @@ static const struct clk_ops tegra_clk_plle_tegra114_ops = {
.recalc_rate = clk_pll_recalc_rate,
};
+static const struct clk_ops tegra_clk_pllu_tegra114_ops = {
+ .is_enabled = clk_pll_is_enabled,
+ .enable = clk_pllu_tegra114_enable,
+ .disable = clk_pll_disable,
+ .recalc_rate = clk_pll_recalc_rate,
+};
struct clk *tegra_clk_register_pllxc(const char *name, const char *parent_name,
void __iomem *clk_base, void __iomem *pmc,
@@ -1919,6 +2226,29 @@ struct clk *tegra_clk_register_plle_tegra114(const char *name,
return clk;
}
+
+struct clk *
+tegra_clk_register_pllu_tegra114(const char *name, const char *parent_name,
+ void __iomem *clk_base, unsigned long flags,
+ struct tegra_clk_pll_params *pll_params,
+ spinlock_t *lock)
+{
+ struct tegra_clk_pll *pll;
+ struct clk *clk;
+
+ pll_params->flags |= TEGRA_PLLU;
+
+ pll = _tegra_init_pll(clk_base, NULL, pll_params, lock);
+ if (IS_ERR(pll))
+ return ERR_CAST(pll);
+
+ clk = _tegra_clk_register_pll(pll, name, parent_name, flags,
+ &tegra_clk_pllu_tegra114_ops);
+ if (IS_ERR(clk))
+ kfree(pll);
+
+ return clk;
+}
#endif
#if defined(CONFIG_ARCH_TEGRA_124_SOC) || defined(CONFIG_ARCH_TEGRA_132_SOC)
@@ -2187,6 +2517,152 @@ static int clk_plle_tegra210_is_enabled(struct clk_hw *hw)
return val & PLLE_BASE_ENABLE ? 1 : 0;
}
+static int clk_pllu_tegra210_enable(struct clk_hw *hw)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ struct clk_hw *pll_ref = clk_hw_get_parent(hw);
+ struct clk_hw *osc = clk_hw_get_parent(pll_ref);
+ const struct utmi_clk_param *params = NULL;
+ unsigned long flags = 0, input_rate;
+ unsigned int i;
+ int ret = 0;
+ u32 value;
+
+ if (!osc) {
+ pr_err("%s: failed to get OSC clock\n", __func__);
+ return -EINVAL;
+ }
+
+ input_rate = clk_hw_get_rate(osc);
+
+ if (pll->lock)
+ spin_lock_irqsave(pll->lock, flags);
+
+ _clk_pll_enable(hw);
+
+ ret = clk_pll_wait_for_lock(pll);
+ if (ret < 0)
+ goto out;
+
+ for (i = 0; i < ARRAY_SIZE(utmi_parameters); i++) {
+ if (input_rate == utmi_parameters[i].osc_frequency) {
+ params = &utmi_parameters[i];
+ break;
+ }
+ }
+
+ if (!params) {
+ pr_err("%s: unexpected input rate %lu Hz\n", __func__,
+ input_rate);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ value = pll_readl_base(pll);
+ value &= ~PLLU_BASE_OVERRIDE;
+ pll_writel_base(value, pll);
+
+ /* Put PLLU under HW control */
+ value = readl_relaxed(pll->clk_base + PLLU_HW_PWRDN_CFG0);
+ value |= PLLU_HW_PWRDN_CFG0_IDDQ_PD_INCLUDE |
+ PLLU_HW_PWRDN_CFG0_USE_SWITCH_DETECT |
+ PLLU_HW_PWRDN_CFG0_USE_LOCKDET;
+ value &= ~(PLLU_HW_PWRDN_CFG0_CLK_ENABLE_SWCTL |
+ PLLU_HW_PWRDN_CFG0_CLK_SWITCH_SWCTL);
+ writel_relaxed(value, pll->clk_base + PLLU_HW_PWRDN_CFG0);
+
+ value = readl_relaxed(pll->clk_base + XUSB_PLL_CFG0);
+ value &= ~XUSB_PLL_CFG0_PLLU_LOCK_DLY;
+ writel_relaxed(value, pll->clk_base + XUSB_PLL_CFG0);
+
+ udelay(1);
+
+ value = readl_relaxed(pll->clk_base + PLLU_HW_PWRDN_CFG0);
+ value |= PLLU_HW_PWRDN_CFG0_SEQ_ENABLE;
+ writel_relaxed(value, pll->clk_base + PLLU_HW_PWRDN_CFG0);
+
+ udelay(1);
+
+ /* Disable PLLU clock branch to UTMIPLL since it uses OSC */
+ value = pll_readl_base(pll);
+ value &= ~PLLU_BASE_CLKENABLE_USB;
+ pll_writel_base(value, pll);
+
+ value = readl_relaxed(pll->clk_base + UTMIPLL_HW_PWRDN_CFG0);
+ if (value & UTMIPLL_HW_PWRDN_CFG0_SEQ_ENABLE) {
+ pr_debug("UTMIPLL already enabled\n");
+ goto out;
+ }
+
+ value &= ~UTMIPLL_HW_PWRDN_CFG0_IDDQ_OVERRIDE;
+ writel_relaxed(value, pll->clk_base + UTMIPLL_HW_PWRDN_CFG0);
+
+ /* Program UTMIP PLL stable and active counts */
+ value = readl_relaxed(pll->clk_base + UTMIP_PLL_CFG2);
+ value &= ~UTMIP_PLL_CFG2_STABLE_COUNT(~0);
+ value |= UTMIP_PLL_CFG2_STABLE_COUNT(params->stable_count);
+ value &= ~UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(~0);
+ value |= UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(params->active_delay_count);
+ value |= UTMIP_PLL_CFG2_PHY_XTAL_CLOCKEN;
+ writel_relaxed(value, pll->clk_base + UTMIP_PLL_CFG2);
+
+ /* Program UTMIP PLL delay and oscillator frequency counts */
+ value = readl_relaxed(pll->clk_base + UTMIP_PLL_CFG1);
+ value &= ~UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(~0);
+ value |= UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(params->enable_delay_count);
+ value &= ~UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(~0);
+ value |= UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(params->xtal_freq_count);
+ writel_relaxed(value, pll->clk_base + UTMIP_PLL_CFG1);
+
+ /* Remove power downs from UTMIP PLL control bits */
+ value = readl_relaxed(pll->clk_base + UTMIP_PLL_CFG1);
+ value &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN;
+ value |= UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERUP;
+ writel(value, pll->clk_base + UTMIP_PLL_CFG1);
+
+ udelay(1);
+
+ /* Enable samplers for SNPS, XUSB_HOST, XUSB_DEV */
+ value = readl_relaxed(pll->clk_base + UTMIP_PLL_CFG2);
+ value |= UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERUP;
+ value |= UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERUP;
+ value |= UTMIP_PLL_CFG2_FORCE_PD_SAMP_D_POWERUP;
+ value &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN;
+ value &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN;
+ value &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_D_POWERDOWN;
+ writel_relaxed(value, pll->clk_base + UTMIP_PLL_CFG2);
+
+ /* Setup HW control of UTMIPLL */
+ value = readl_relaxed(pll->clk_base + UTMIP_PLL_CFG1);
+ value &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERUP;
+ value &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN;
+ writel_relaxed(value, pll->clk_base + UTMIP_PLL_CFG1);
+
+ value = readl_relaxed(pll->clk_base + UTMIPLL_HW_PWRDN_CFG0);
+ value |= UTMIPLL_HW_PWRDN_CFG0_USE_LOCKDET;
+ value &= ~UTMIPLL_HW_PWRDN_CFG0_CLK_ENABLE_SWCTL;
+ writel_relaxed(value, pll->clk_base + UTMIPLL_HW_PWRDN_CFG0);
+
+ udelay(1);
+
+ value = readl_relaxed(pll->clk_base + XUSB_PLL_CFG0);
+ value &= ~XUSB_PLL_CFG0_UTMIPLL_LOCK_DLY;
+ writel_relaxed(value, pll->clk_base + XUSB_PLL_CFG0);
+
+ udelay(1);
+
+ /* Enable HW control of UTMIPLL */
+ value = readl_relaxed(pll->clk_base + UTMIPLL_HW_PWRDN_CFG0);
+ value |= UTMIPLL_HW_PWRDN_CFG0_SEQ_ENABLE;
+ writel_relaxed(value, pll->clk_base + UTMIPLL_HW_PWRDN_CFG0);
+
+out:
+ if (pll->lock)
+ spin_unlock_irqrestore(pll->lock, flags);
+
+ return ret;
+}
+
static const struct clk_ops tegra_clk_plle_tegra210_ops = {
.is_enabled = clk_plle_tegra210_is_enabled,
.enable = clk_plle_tegra210_enable,
@@ -2194,6 +2670,13 @@ static const struct clk_ops tegra_clk_plle_tegra210_ops = {
.recalc_rate = clk_pll_recalc_rate,
};
+static const struct clk_ops tegra_clk_pllu_tegra210_ops = {
+ .is_enabled = clk_pll_is_enabled,
+ .enable = clk_pllu_tegra210_enable,
+ .disable = clk_pll_disable,
+ .recalc_rate = clk_pllre_recalc_rate,
+};
+
struct clk *tegra_clk_register_plle_tegra210(const char *name,
const char *parent_name,
void __iomem *clk_base, unsigned long flags,
@@ -2434,4 +2917,26 @@ struct clk *tegra_clk_register_pllmb(const char *name, const char *parent_name,
return clk;
}
+
+struct clk *tegra_clk_register_pllu_tegra210(const char *name,
+ const char *parent_name, void __iomem *clk_base,
+ unsigned long flags, struct tegra_clk_pll_params *pll_params,
+ spinlock_t *lock)
+{
+ struct tegra_clk_pll *pll;
+ struct clk *clk;
+
+ pll_params->flags |= TEGRA_PLLU;
+
+ pll = _tegra_init_pll(clk_base, NULL, pll_params, lock);
+ if (IS_ERR(pll))
+ return ERR_CAST(pll);
+
+ clk = _tegra_clk_register_pll(pll, name, parent_name, flags,
+ &tegra_clk_pllu_tegra210_ops);
+ if (IS_ERR(clk))
+ kfree(pll);
+
+ return clk;
+}
#endif
diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c
index 29d04c663..4ce4e7fb1 100644
--- a/drivers/clk/tegra/clk-tegra-periph.c
+++ b/drivers/clk/tegra/clk-tegra-periph.c
@@ -594,15 +594,17 @@ static u32 mux_pllp_plld_plld2_clkm_idx[] = {
[0] = 0, [1] = 2, [2] = 5, [3] = 6
};
-static const char *mux_plldp_sor1_src[] = {
- "pll_dp", "clk_sor1_src"
-};
-#define mux_plldp_sor1_src_idx NULL
-
-static const char *mux_clkm_sor1_brick_sor1_src[] = {
- "clk_m", "sor1_brick", "sor1_src", "sor1_brick"
-};
-#define mux_clkm_sor1_brick_sor1_src_idx NULL
+static const char *mux_sor_safe_sor1_brick_sor1_src[] = {
+ /*
+ * Bit 0 of the mux selects sor1_brick, irrespective of bit 1, so the
+ * sor1_brick parent appears twice in the list below. This is merely
+ * to support clk_get_parent() if firmware happened to set these bits
+ * to 0b11. While not an invalid setting, code should always set the
+ * bits to 0b01 to select sor1_brick.
+ */
+ "sor_safe", "sor1_brick", "sor1_src", "sor1_brick"
+};
+#define mux_sor_safe_sor1_brick_sor1_src_idx NULL
static const char *mux_pllp_pllre_clkm[] = {
"pll_p", "pll_re_out1", "clk_m"
@@ -778,8 +780,7 @@ static struct tegra_periph_init_data periph_clks[] = {
MUX8("nvjpg", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVJPG, 195, 0, tegra_clk_nvjpg),
MUX8("ape", mux_plla_pllc4_out0_pllc_pllc4_out1_pllp_pllc4_out2_clkm, CLK_SOURCE_APE, 198, TEGRA_PERIPH_ON_APB, tegra_clk_ape),
MUX8_NOGATE_LOCK("sor1_src", mux_pllp_plld_plld2_clkm, CLK_SOURCE_SOR1, tegra_clk_sor1_src, &sor1_lock),
- NODIV("sor1_brick", mux_plldp_sor1_src, CLK_SOURCE_SOR1, 14, MASK(1), 183, 0, tegra_clk_sor1_brick, &sor1_lock),
- NODIV("sor1", mux_clkm_sor1_brick_sor1_src, CLK_SOURCE_SOR1, 15, MASK(1), 183, 0, tegra_clk_sor1, &sor1_lock),
+ NODIV("sor1", mux_sor_safe_sor1_brick_sor1_src, CLK_SOURCE_SOR1, 14, MASK(2), 183, 0, tegra_clk_sor1, &sor1_lock),
MUX8("sdmmc_legacy", mux_pllp_out3_clkm_pllp_pllc4, CLK_SOURCE_SDMMC_LEGACY, 193, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_sdmmc_legacy),
MUX8("qspi", mux_pllp_pllc_pllc_out1_pllc4_out2_pllc4_out1_clkm_pllc4_out0, CLK_SOURCE_QSPI, 211, TEGRA_PERIPH_ON_APB, tegra_clk_qspi),
I2C("vii2c", mux_pllp_pllc_clkm, CLK_SOURCE_VI_I2C, 208, tegra_clk_vi_i2c),
@@ -791,7 +792,7 @@ static struct tegra_periph_init_data periph_clks[] = {
static struct tegra_periph_init_data gate_clks[] = {
GATE("rtc", "clk_32k", 4, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_rtc, 0),
- GATE("timer", "clk_m", 5, 0, tegra_clk_timer, 0),
+ GATE("timer", "clk_m", 5, 0, tegra_clk_timer, CLK_IS_CRITICAL),
GATE("isp", "clk_m", 23, 0, tegra_clk_isp, 0),
GATE("vcp", "clk_m", 29, 0, tegra_clk_vcp, 0),
GATE("apbdma", "clk_m", 34, 0, tegra_clk_apbdma, 0),
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
index b78054fac..933b5dd69 100644
--- a/drivers/clk/tegra/clk-tegra114.c
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -113,32 +113,6 @@
#define CCLKG_BURST_POLICY 0x368
-#define UTMIP_PLL_CFG2 0x488
-#define UTMIP_PLL_CFG2_STABLE_COUNT(x) (((x) & 0xffff) << 6)
-#define UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(x) (((x) & 0x3f) << 18)
-#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN BIT(0)
-#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN BIT(2)
-#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERDOWN BIT(4)
-
-#define UTMIP_PLL_CFG1 0x484
-#define UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(x) (((x) & 0x1f) << 6)
-#define UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(x) (((x) & 0xfff) << 0)
-#define UTMIP_PLL_CFG1_FORCE_PLLU_POWERUP BIT(17)
-#define UTMIP_PLL_CFG1_FORCE_PLLU_POWERDOWN BIT(16)
-#define UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERUP BIT(15)
-#define UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN BIT(14)
-#define UTMIP_PLL_CFG1_FORCE_PLL_ACTIVE_POWERDOWN BIT(12)
-
-#define UTMIPLL_HW_PWRDN_CFG0 0x52c
-#define UTMIPLL_HW_PWRDN_CFG0_SEQ_START_STATE BIT(25)
-#define UTMIPLL_HW_PWRDN_CFG0_SEQ_ENABLE BIT(24)
-#define UTMIPLL_HW_PWRDN_CFG0_USE_LOCKDET BIT(6)
-#define UTMIPLL_HW_PWRDN_CFG0_SEQ_RESET_INPUT_VALUE BIT(5)
-#define UTMIPLL_HW_PWRDN_CFG0_SEQ_IN_SWCTL BIT(4)
-#define UTMIPLL_HW_PWRDN_CFG0_CLK_ENABLE_SWCTL BIT(2)
-#define UTMIPLL_HW_PWRDN_CFG0_IDDQ_OVERRIDE BIT(1)
-#define UTMIPLL_HW_PWRDN_CFG0_IDDQ_SWCTL BIT(0)
-
#define CLK_SOURCE_CSITE 0x1d4
#define CLK_SOURCE_EMC 0x19c
@@ -454,7 +428,7 @@ static struct tegra_clk_pll_params pll_d_params = {
.div_nmp = &pllp_nmp,
.freq_table = pll_d_freq_table,
.flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON |
- TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+ TEGRA_PLL_HAS_LOCK_ENABLE,
};
static struct tegra_clk_pll_params pll_d2_params = {
@@ -472,7 +446,7 @@ static struct tegra_clk_pll_params pll_d2_params = {
.div_nmp = &pllp_nmp,
.freq_table = pll_d_freq_table,
.flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON |
- TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+ TEGRA_PLL_HAS_LOCK_ENABLE,
};
static const struct pdiv_map pllu_p[] = {
@@ -649,43 +623,6 @@ static unsigned long tegra114_input_freq[] = {
#define MASK(x) (BIT(x) - 1)
-struct utmi_clk_param {
- /* Oscillator Frequency in KHz */
- u32 osc_frequency;
- /* UTMIP PLL Enable Delay Count */
- u8 enable_delay_count;
- /* UTMIP PLL Stable count */
- u8 stable_count;
- /* UTMIP PLL Active delay count */
- u8 active_delay_count;
- /* UTMIP PLL Xtal frequency count */
- u8 xtal_freq_count;
-};
-
-static const struct utmi_clk_param utmi_parameters[] = {
- {
- .osc_frequency = 13000000, .enable_delay_count = 0x02,
- .stable_count = 0x33, .active_delay_count = 0x05,
- .xtal_freq_count = 0x7f
- }, {
- .osc_frequency = 19200000, .enable_delay_count = 0x03,
- .stable_count = 0x4b, .active_delay_count = 0x06,
- .xtal_freq_count = 0xbb
- }, {
- .osc_frequency = 12000000, .enable_delay_count = 0x02,
- .stable_count = 0x2f, .active_delay_count = 0x04,
- .xtal_freq_count = 0x76
- }, {
- .osc_frequency = 26000000, .enable_delay_count = 0x04,
- .stable_count = 0x66, .active_delay_count = 0x09,
- .xtal_freq_count = 0xfe
- }, {
- .osc_frequency = 16800000, .enable_delay_count = 0x03,
- .stable_count = 0x41, .active_delay_count = 0x0a,
- .xtal_freq_count = 0xa4
- },
-};
-
/* peripheral mux definitions */
static const char *mux_plld_out0_plld2_out0[] = {
@@ -986,92 +923,9 @@ static void __init tegra114_fixed_clk_init(void __iomem *clk_base)
}
-static __init void tegra114_utmi_param_configure(void __iomem *clk_base)
-{
- unsigned int i;
- u32 reg;
-
- for (i = 0; i < ARRAY_SIZE(utmi_parameters); i++) {
- if (osc_freq == utmi_parameters[i].osc_frequency)
- break;
- }
-
- if (i >= ARRAY_SIZE(utmi_parameters)) {
- pr_err("%s: Unexpected oscillator freq %lu\n", __func__,
- osc_freq);
- return;
- }
-
- reg = readl_relaxed(clk_base + UTMIP_PLL_CFG2);
-
- /* Program UTMIP PLL stable and active counts */
- /* [FIXME] arclk_rst.h says WRONG! This should be 1ms -> 0x50 Check! */
- reg &= ~UTMIP_PLL_CFG2_STABLE_COUNT(~0);
- reg |= UTMIP_PLL_CFG2_STABLE_COUNT(utmi_parameters[i].stable_count);
-
- reg &= ~UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(~0);
-
- reg |= UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(utmi_parameters[i].
- active_delay_count);
-
- /* Remove power downs from UTMIP PLL control bits */
- reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN;
- reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN;
- reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERDOWN;
-
- writel_relaxed(reg, clk_base + UTMIP_PLL_CFG2);
-
- /* Program UTMIP PLL delay and oscillator frequency counts */
- reg = readl_relaxed(clk_base + UTMIP_PLL_CFG1);
- reg &= ~UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(~0);
-
- reg |= UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(utmi_parameters[i].
- enable_delay_count);
-
- reg &= ~UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(~0);
- reg |= UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(utmi_parameters[i].
- xtal_freq_count);
-
- /* Remove power downs from UTMIP PLL control bits */
- reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN;
- reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ACTIVE_POWERDOWN;
- reg &= ~UTMIP_PLL_CFG1_FORCE_PLLU_POWERUP;
- reg &= ~UTMIP_PLL_CFG1_FORCE_PLLU_POWERDOWN;
- writel_relaxed(reg, clk_base + UTMIP_PLL_CFG1);
-
- /* Setup HW control of UTMIPLL */
- reg = readl_relaxed(clk_base + UTMIPLL_HW_PWRDN_CFG0);
- reg |= UTMIPLL_HW_PWRDN_CFG0_USE_LOCKDET;
- reg &= ~UTMIPLL_HW_PWRDN_CFG0_CLK_ENABLE_SWCTL;
- reg |= UTMIPLL_HW_PWRDN_CFG0_SEQ_START_STATE;
- writel_relaxed(reg, clk_base + UTMIPLL_HW_PWRDN_CFG0);
-
- reg = readl_relaxed(clk_base + UTMIP_PLL_CFG1);
- reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERUP;
- reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN;
- writel_relaxed(reg, clk_base + UTMIP_PLL_CFG1);
-
- udelay(1);
-
- /* Setup SW override of UTMIPLL assuming USB2.0
- ports are assigned to USB2 */
- reg = readl_relaxed(clk_base + UTMIPLL_HW_PWRDN_CFG0);
- reg |= UTMIPLL_HW_PWRDN_CFG0_IDDQ_SWCTL;
- reg &= ~UTMIPLL_HW_PWRDN_CFG0_IDDQ_OVERRIDE;
- writel_relaxed(reg, clk_base + UTMIPLL_HW_PWRDN_CFG0);
-
- udelay(1);
-
- /* Enable HW control UTMIPLL */
- reg = readl_relaxed(clk_base + UTMIPLL_HW_PWRDN_CFG0);
- reg |= UTMIPLL_HW_PWRDN_CFG0_SEQ_ENABLE;
- writel_relaxed(reg, clk_base + UTMIPLL_HW_PWRDN_CFG0);
-}
-
static void __init tegra114_pll_init(void __iomem *clk_base,
void __iomem *pmc)
{
- u32 val;
struct clk *clk;
/* PLLC */
@@ -1118,16 +972,10 @@ static void __init tegra114_pll_init(void __iomem *clk_base,
CLK_SET_RATE_PARENT, 1, 1);
/* PLLU */
- val = readl(clk_base + pll_u_params.base_reg);
- val &= ~BIT(24); /* disable PLLU_OVERRIDE */
- writel(val, clk_base + pll_u_params.base_reg);
-
- clk = tegra_clk_register_pll("pll_u", "pll_ref", clk_base, pmc, 0,
- &pll_u_params, &pll_u_lock);
+ clk = tegra_clk_register_pllu_tegra114("pll_u", "pll_ref", clk_base, 0,
+ &pll_u_params, &pll_u_lock);
clks[TEGRA114_CLK_PLL_U] = clk;
- tegra114_utmi_param_configure(clk_base);
-
/* PLLU_480M */
clk = clk_register_gate(NULL, "pll_u_480M", "pll_u",
CLK_SET_RATE_PARENT, clk_base + PLLU_BASE,
diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
index f4fbbf16a..a112d3d2b 100644
--- a/drivers/clk/tegra/clk-tegra124.c
+++ b/drivers/clk/tegra/clk-tegra124.c
@@ -99,32 +99,6 @@
#define CCLKG_BURST_POLICY 0x368
-#define UTMIP_PLL_CFG2 0x488
-#define UTMIP_PLL_CFG2_STABLE_COUNT(x) (((x) & 0xffff) << 6)
-#define UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(x) (((x) & 0x3f) << 18)
-#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN BIT(0)
-#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN BIT(2)
-#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERDOWN BIT(4)
-
-#define UTMIP_PLL_CFG1 0x484
-#define UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(x) (((x) & 0x1f) << 6)
-#define UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(x) (((x) & 0xfff) << 0)
-#define UTMIP_PLL_CFG1_FORCE_PLLU_POWERUP BIT(17)
-#define UTMIP_PLL_CFG1_FORCE_PLLU_POWERDOWN BIT(16)
-#define UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERUP BIT(15)
-#define UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN BIT(14)
-#define UTMIP_PLL_CFG1_FORCE_PLL_ACTIVE_POWERDOWN BIT(12)
-
-#define UTMIPLL_HW_PWRDN_CFG0 0x52c
-#define UTMIPLL_HW_PWRDN_CFG0_SEQ_START_STATE BIT(25)
-#define UTMIPLL_HW_PWRDN_CFG0_SEQ_ENABLE BIT(24)
-#define UTMIPLL_HW_PWRDN_CFG0_USE_LOCKDET BIT(6)
-#define UTMIPLL_HW_PWRDN_CFG0_SEQ_RESET_INPUT_VALUE BIT(5)
-#define UTMIPLL_HW_PWRDN_CFG0_SEQ_IN_SWCTL BIT(4)
-#define UTMIPLL_HW_PWRDN_CFG0_CLK_ENABLE_SWCTL BIT(2)
-#define UTMIPLL_HW_PWRDN_CFG0_IDDQ_OVERRIDE BIT(1)
-#define UTMIPLL_HW_PWRDN_CFG0_IDDQ_SWCTL BIT(0)
-
/* Tegra CPU clock and reset control regs */
#define CLK_RST_CONTROLLER_CPU_CMPLX_STATUS 0x470
@@ -764,43 +738,6 @@ static struct tegra_clk_pll_params pll_u_params = {
TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
};
-struct utmi_clk_param {
- /* Oscillator Frequency in KHz */
- u32 osc_frequency;
- /* UTMIP PLL Enable Delay Count */
- u8 enable_delay_count;
- /* UTMIP PLL Stable count */
- u8 stable_count;
- /* UTMIP PLL Active delay count */
- u8 active_delay_count;
- /* UTMIP PLL Xtal frequency count */
- u8 xtal_freq_count;
-};
-
-static const struct utmi_clk_param utmi_parameters[] = {
- {
- .osc_frequency = 13000000, .enable_delay_count = 0x02,
- .stable_count = 0x33, .active_delay_count = 0x05,
- .xtal_freq_count = 0x7f
- }, {
- .osc_frequency = 19200000, .enable_delay_count = 0x03,
- .stable_count = 0x4b, .active_delay_count = 0x06,
- .xtal_freq_count = 0xbb
- }, {
- .osc_frequency = 12000000, .enable_delay_count = 0x02,
- .stable_count = 0x2f, .active_delay_count = 0x04,
- .xtal_freq_count = 0x76
- }, {
- .osc_frequency = 26000000, .enable_delay_count = 0x04,
- .stable_count = 0x66, .active_delay_count = 0x09,
- .xtal_freq_count = 0xfe
- }, {
- .osc_frequency = 16800000, .enable_delay_count = 0x03,
- .stable_count = 0x41, .active_delay_count = 0x0a,
- .xtal_freq_count = 0xa4
- },
-};
-
static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
[tegra_clk_ispb] = { .dt_id = TEGRA124_CLK_ISPB, .present = true },
[tegra_clk_rtc] = { .dt_id = TEGRA124_CLK_RTC, .present = true },
@@ -1063,88 +1000,6 @@ static struct tegra_devclk devclks[] __initdata = {
static struct clk **clks;
-static void tegra124_utmi_param_configure(void __iomem *clk_base)
-{
- unsigned int i;
- u32 reg;
-
- for (i = 0; i < ARRAY_SIZE(utmi_parameters); i++) {
- if (osc_freq == utmi_parameters[i].osc_frequency)
- break;
- }
-
- if (i >= ARRAY_SIZE(utmi_parameters)) {
- pr_err("%s: Unexpected oscillator freq %lu\n", __func__,
- osc_freq);
- return;
- }
-
- reg = readl_relaxed(clk_base + UTMIP_PLL_CFG2);
-
- /* Program UTMIP PLL stable and active counts */
- /* [FIXME] arclk_rst.h says WRONG! This should be 1ms -> 0x50 Check! */
- reg &= ~UTMIP_PLL_CFG2_STABLE_COUNT(~0);
- reg |= UTMIP_PLL_CFG2_STABLE_COUNT(utmi_parameters[i].stable_count);
-
- reg &= ~UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(~0);
-
- reg |= UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(utmi_parameters[i].
- active_delay_count);
-
- /* Remove power downs from UTMIP PLL control bits */
- reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN;
- reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN;
- reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERDOWN;
-
- writel_relaxed(reg, clk_base + UTMIP_PLL_CFG2);
-
- /* Program UTMIP PLL delay and oscillator frequency counts */
- reg = readl_relaxed(clk_base + UTMIP_PLL_CFG1);
- reg &= ~UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(~0);
-
- reg |= UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(utmi_parameters[i].
- enable_delay_count);
-
- reg &= ~UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(~0);
- reg |= UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(utmi_parameters[i].
- xtal_freq_count);
-
- /* Remove power downs from UTMIP PLL control bits */
- reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN;
- reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ACTIVE_POWERDOWN;
- reg &= ~UTMIP_PLL_CFG1_FORCE_PLLU_POWERUP;
- reg &= ~UTMIP_PLL_CFG1_FORCE_PLLU_POWERDOWN;
- writel_relaxed(reg, clk_base + UTMIP_PLL_CFG1);
-
- /* Setup HW control of UTMIPLL */
- reg = readl_relaxed(clk_base + UTMIPLL_HW_PWRDN_CFG0);
- reg |= UTMIPLL_HW_PWRDN_CFG0_USE_LOCKDET;
- reg &= ~UTMIPLL_HW_PWRDN_CFG0_CLK_ENABLE_SWCTL;
- reg |= UTMIPLL_HW_PWRDN_CFG0_SEQ_START_STATE;
- writel_relaxed(reg, clk_base + UTMIPLL_HW_PWRDN_CFG0);
-
- reg = readl_relaxed(clk_base + UTMIP_PLL_CFG1);
- reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERUP;
- reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN;
- writel_relaxed(reg, clk_base + UTMIP_PLL_CFG1);
-
- udelay(1);
-
- /* Setup SW override of UTMIPLL assuming USB2.0
- ports are assigned to USB2 */
- reg = readl_relaxed(clk_base + UTMIPLL_HW_PWRDN_CFG0);
- reg |= UTMIPLL_HW_PWRDN_CFG0_IDDQ_SWCTL;
- reg &= ~UTMIPLL_HW_PWRDN_CFG0_IDDQ_OVERRIDE;
- writel_relaxed(reg, clk_base + UTMIPLL_HW_PWRDN_CFG0);
-
- udelay(1);
-
- /* Enable HW control UTMIPLL */
- reg = readl_relaxed(clk_base + UTMIPLL_HW_PWRDN_CFG0);
- reg |= UTMIPLL_HW_PWRDN_CFG0_SEQ_ENABLE;
- writel_relaxed(reg, clk_base + UTMIPLL_HW_PWRDN_CFG0);
-}
-
static __init void tegra124_periph_clk_init(void __iomem *clk_base,
void __iomem *pmc_base)
{
@@ -1195,7 +1050,6 @@ static __init void tegra124_periph_clk_init(void __iomem *clk_base,
static void __init tegra124_pll_init(void __iomem *clk_base,
void __iomem *pmc)
{
- u32 val;
struct clk *clk;
/* PLLC */
@@ -1256,17 +1110,11 @@ static void __init tegra124_pll_init(void __iomem *clk_base,
clks[TEGRA124_CLK_PLL_M_UD] = clk;
/* PLLU */
- val = readl(clk_base + pll_u_params.base_reg);
- val &= ~BIT(24); /* disable PLLU_OVERRIDE */
- writel(val, clk_base + pll_u_params.base_reg);
-
- clk = tegra_clk_register_pll("pll_u", "pll_ref", clk_base, pmc, 0,
- &pll_u_params, &pll_u_lock);
+ clk = tegra_clk_register_pllu_tegra114("pll_u", "pll_ref", clk_base, 0,
+ &pll_u_params, &pll_u_lock);
clk_register_clkdev(clk, "pll_u", NULL);
clks[TEGRA124_CLK_PLL_U] = clk;
- tegra124_utmi_param_configure(clk_base);
-
/* PLLU_480M */
clk = clk_register_gate(NULL, "pll_u_480M", "pll_u",
CLK_SET_RATE_PARENT, clk_base + PLLU_BASE,
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index 456cf586d..2896d2e78 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -155,27 +155,6 @@
#define PMC_PLLM_WB0_OVERRIDE 0x1dc
#define PMC_PLLM_WB0_OVERRIDE_2 0x2b0
-#define UTMIP_PLL_CFG2 0x488
-#define UTMIP_PLL_CFG2_STABLE_COUNT(x) (((x) & 0xfff) << 6)
-#define UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(x) (((x) & 0x3f) << 18)
-#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN BIT(0)
-#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERUP BIT(1)
-#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN BIT(2)
-#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERUP BIT(3)
-#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERDOWN BIT(4)
-#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERUP BIT(5)
-#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_D_POWERDOWN BIT(24)
-#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_D_POWERUP BIT(25)
-
-#define UTMIP_PLL_CFG1 0x484
-#define UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(x) (((x) & 0x1f) << 27)
-#define UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(x) (((x) & 0xfff) << 0)
-#define UTMIP_PLL_CFG1_FORCE_PLLU_POWERUP BIT(17)
-#define UTMIP_PLL_CFG1_FORCE_PLLU_POWERDOWN BIT(16)
-#define UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERUP BIT(15)
-#define UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN BIT(14)
-#define UTMIP_PLL_CFG1_FORCE_PLL_ACTIVE_POWERDOWN BIT(12)
-
#define SATA_PLL_CFG0 0x490
#define SATA_PLL_CFG0_PADPLL_RESET_SWCTL BIT(0)
#define SATA_PLL_CFG0_PADPLL_USE_LOCKDET BIT(2)
@@ -1366,9 +1345,9 @@ static u32 pll_expo_p_to_pdiv(u32 p, u32 *pdiv)
static struct tegra_clk_pll_freq_table pll_x_freq_table[] = {
/* 1 GHz */
- { 12000000, 1000000000, 166, 1, 1, 0 }, /* actual: 996.0 MHz */
- { 13000000, 1000000000, 153, 1, 1, 0 }, /* actual: 994.0 MHz */
- { 38400000, 1000000000, 156, 3, 1, 0 }, /* actual: 998.4 MHz */
+ { 12000000, 1000000000, 166, 1, 2, 0 }, /* actual: 996.0 MHz */
+ { 13000000, 1000000000, 153, 1, 2, 0 }, /* actual: 994.0 MHz */
+ { 38400000, 1000000000, 156, 3, 2, 0 }, /* actual: 998.4 MHz */
{ 0, 0, 0, 0, 0, 0 },
};
@@ -1417,9 +1396,9 @@ static struct div_nmp pllc_nmp = {
};
static struct tegra_clk_pll_freq_table pll_cx_freq_table[] = {
- { 12000000, 510000000, 85, 1, 1, 0 },
- { 13000000, 510000000, 78, 1, 1, 0 }, /* actual: 507.0 MHz */
- { 38400000, 510000000, 79, 3, 1, 0 }, /* actual: 505.6 MHz */
+ { 12000000, 510000000, 85, 1, 2, 0 },
+ { 13000000, 510000000, 78, 1, 2, 0 }, /* actual: 507.0 MHz */
+ { 38400000, 510000000, 79, 3, 2, 0 }, /* actual: 505.6 MHz */
{ 0, 0, 0, 0, 0, 0 },
};
@@ -1532,9 +1511,9 @@ static struct div_nmp pllss_nmp = {
};
static struct tegra_clk_pll_freq_table pll_c4_vco_freq_table[] = {
- { 12000000, 600000000, 50, 1, 0, 0 },
- { 13000000, 600000000, 46, 1, 0, 0 }, /* actual: 598.0 MHz */
- { 38400000, 600000000, 62, 4, 0, 0 }, /* actual: 595.2 MHz */
+ { 12000000, 600000000, 50, 1, 1, 0 },
+ { 13000000, 600000000, 46, 1, 1, 0 }, /* actual: 598.0 MHz */
+ { 38400000, 600000000, 62, 4, 1, 0 }, /* actual: 595.2 MHz */
{ 0, 0, 0, 0, 0, 0 },
};
@@ -1583,19 +1562,19 @@ static struct tegra_clk_pll_params pll_c4_vco_params = {
};
static struct tegra_clk_pll_freq_table pll_m_freq_table[] = {
- { 12000000, 800000000, 66, 1, 0, 0 }, /* actual: 792.0 MHz */
- { 13000000, 800000000, 61, 1, 0, 0 }, /* actual: 793.0 MHz */
- { 38400000, 297600000, 93, 4, 2, 0 },
- { 38400000, 400000000, 125, 4, 2, 0 },
- { 38400000, 532800000, 111, 4, 1, 0 },
- { 38400000, 665600000, 104, 3, 1, 0 },
- { 38400000, 800000000, 125, 3, 1, 0 },
- { 38400000, 931200000, 97, 4, 0, 0 },
- { 38400000, 1065600000, 111, 4, 0, 0 },
- { 38400000, 1200000000, 125, 4, 0, 0 },
- { 38400000, 1331200000, 104, 3, 0, 0 },
- { 38400000, 1459200000, 76, 2, 0, 0 },
- { 38400000, 1600000000, 125, 3, 0, 0 },
+ { 12000000, 800000000, 66, 1, 1, 0 }, /* actual: 792.0 MHz */
+ { 13000000, 800000000, 61, 1, 1, 0 }, /* actual: 793.0 MHz */
+ { 38400000, 297600000, 93, 4, 3, 0 },
+ { 38400000, 400000000, 125, 4, 3, 0 },
+ { 38400000, 532800000, 111, 4, 2, 0 },
+ { 38400000, 665600000, 104, 3, 2, 0 },
+ { 38400000, 800000000, 125, 3, 2, 0 },
+ { 38400000, 931200000, 97, 4, 1, 0 },
+ { 38400000, 1065600000, 111, 4, 1, 0 },
+ { 38400000, 1200000000, 125, 4, 1, 0 },
+ { 38400000, 1331200000, 104, 3, 1, 0 },
+ { 38400000, 1459200000, 76, 2, 1, 0 },
+ { 38400000, 1600000000, 125, 3, 1, 0 },
{ 0, 0, 0, 0, 0, 0 },
};
@@ -1705,9 +1684,9 @@ static struct tegra_clk_pll_params pll_e_params = {
};
static struct tegra_clk_pll_freq_table pll_re_vco_freq_table[] = {
- { 12000000, 672000000, 56, 1, 0, 0 },
- { 13000000, 672000000, 51, 1, 0, 0 }, /* actual: 663.0 MHz */
- { 38400000, 672000000, 70, 4, 0, 0 },
+ { 12000000, 672000000, 56, 1, 1, 0 },
+ { 13000000, 672000000, 51, 1, 1, 0 }, /* actual: 663.0 MHz */
+ { 38400000, 672000000, 70, 4, 1, 0 },
{ 0, 0, 0, 0, 0, 0 },
};
@@ -1754,8 +1733,8 @@ static struct div_nmp pllp_nmp = {
};
static struct tegra_clk_pll_freq_table pll_p_freq_table[] = {
- { 12000000, 408000000, 34, 1, 0, 0 },
- { 38400000, 408000000, 85, 8, 0, 0 }, /* cf = 4.8MHz, allowed exception */
+ { 12000000, 408000000, 34, 1, 1, 0 },
+ { 38400000, 408000000, 85, 8, 1, 0 }, /* cf = 4.8MHz, allowed exception */
{ 0, 0, 0, 0, 0, 0 },
};
@@ -1820,14 +1799,14 @@ static struct div_nmp plla_nmp = {
};
static struct tegra_clk_pll_freq_table pll_a_freq_table[] = {
- { 12000000, 282240000, 47, 1, 1, 1, 0xf148 }, /* actual: 282240234 */
- { 12000000, 368640000, 61, 1, 1, 1, 0xfe15 }, /* actual: 368640381 */
- { 12000000, 240000000, 60, 1, 2, 1, 0 },
- { 13000000, 282240000, 43, 1, 1, 1, 0xfd7d }, /* actual: 282239807 */
- { 13000000, 368640000, 56, 1, 1, 1, 0x06d8 }, /* actual: 368640137 */
- { 13000000, 240000000, 55, 1, 2, 1, 0 }, /* actual: 238.3 MHz */
- { 38400000, 282240000, 44, 3, 1, 1, 0xf333 }, /* actual: 282239844 */
- { 38400000, 368640000, 57, 3, 1, 1, 0x0333 }, /* actual: 368639844 */
+ { 12000000, 282240000, 47, 1, 2, 1, 0xf148 }, /* actual: 282240234 */
+ { 12000000, 368640000, 61, 1, 2, 1, 0xfe15 }, /* actual: 368640381 */
+ { 12000000, 240000000, 60, 1, 3, 1, 0 },
+ { 13000000, 282240000, 43, 1, 2, 1, 0xfd7d }, /* actual: 282239807 */
+ { 13000000, 368640000, 56, 1, 2, 1, 0x06d8 }, /* actual: 368640137 */
+ { 13000000, 240000000, 55, 1, 3, 1, 0 }, /* actual: 238.3 MHz */
+ { 38400000, 282240000, 44, 3, 2, 1, 0xf333 }, /* actual: 282239844 */
+ { 38400000, 368640000, 57, 3, 2, 1, 0x0333 }, /* actual: 368639844 */
{ 38400000, 240000000, 75, 3, 3, 1, 0 },
{ 0, 0, 0, 0, 0, 0, 0 },
};
@@ -1873,9 +1852,9 @@ static struct div_nmp plld_nmp = {
};
static struct tegra_clk_pll_freq_table pll_d_freq_table[] = {
- { 12000000, 594000000, 99, 1, 1, 0, 0 },
- { 13000000, 594000000, 91, 1, 1, 0, 0xfc4f }, /* actual: 594000183 */
- { 38400000, 594000000, 30, 1, 1, 0, 0x0e00 },
+ { 12000000, 594000000, 99, 1, 2, 0, 0 },
+ { 13000000, 594000000, 91, 1, 2, 0, 0xfc4f }, /* actual: 594000183 */
+ { 38400000, 594000000, 30, 1, 2, 0, 0x0e00 },
{ 0, 0, 0, 0, 0, 0, 0 },
};
@@ -1911,9 +1890,9 @@ static struct tegra_clk_pll_params pll_d_params = {
};
static struct tegra_clk_pll_freq_table tegra210_pll_d2_freq_table[] = {
- { 12000000, 594000000, 99, 1, 1, 0, 0xf000 },
- { 13000000, 594000000, 91, 1, 1, 0, 0xfc4f }, /* actual: 594000183 */
- { 38400000, 594000000, 30, 1, 1, 0, 0x0e00 },
+ { 12000000, 594000000, 99, 1, 2, 0, 0xf000 },
+ { 13000000, 594000000, 91, 1, 2, 0, 0xfc4f }, /* actual: 594000183 */
+ { 38400000, 594000000, 30, 1, 2, 0, 0x0e00 },
{ 0, 0, 0, 0, 0, 0, 0 },
};
@@ -1935,8 +1914,9 @@ static struct tegra_clk_pll_params pll_d2_params = {
.sdm_din_mask = PLLA_SDM_DIN_MASK,
.sdm_ctrl_reg = PLLD2_MISC1,
.sdm_ctrl_en_mask = PLLD2_SDM_EN_MASK,
- .ssc_ctrl_reg = PLLD2_MISC1,
- .ssc_ctrl_en_mask = PLLD2_SSC_EN_MASK,
+ /* disable spread-spectrum for pll_d2 */
+ .ssc_ctrl_reg = 0,
+ .ssc_ctrl_en_mask = 0,
.round_p_to_pdiv = pll_qlin_p_to_pdiv,
.pdiv_tohw = pll_qlin_pdiv_to_hw,
.div_nmp = &pllss_nmp,
@@ -1955,9 +1935,9 @@ static struct tegra_clk_pll_params pll_d2_params = {
};
static struct tegra_clk_pll_freq_table pll_dp_freq_table[] = {
- { 12000000, 270000000, 90, 1, 3, 0, 0xf000 },
- { 13000000, 270000000, 83, 1, 3, 0, 0xf000 }, /* actual: 269.8 MHz */
- { 38400000, 270000000, 28, 1, 3, 0, 0xf400 },
+ { 12000000, 270000000, 90, 1, 4, 0, 0xf000 },
+ { 13000000, 270000000, 83, 1, 4, 0, 0xf000 }, /* actual: 269.8 MHz */
+ { 38400000, 270000000, 28, 1, 4, 0, 0xf400 },
{ 0, 0, 0, 0, 0, 0, 0 },
};
@@ -2007,9 +1987,9 @@ static struct div_nmp pllu_nmp = {
};
static struct tegra_clk_pll_freq_table pll_u_freq_table[] = {
- { 12000000, 480000000, 40, 1, 0, 0 },
- { 13000000, 480000000, 36, 1, 0, 0 }, /* actual: 468.0 MHz */
- { 38400000, 480000000, 25, 2, 0, 0 },
+ { 12000000, 480000000, 40, 1, 1, 0 },
+ { 13000000, 480000000, 36, 1, 1, 0 }, /* actual: 468.0 MHz */
+ { 38400000, 480000000, 25, 2, 1, 0 },
{ 0, 0, 0, 0, 0, 0 },
};
@@ -2037,47 +2017,6 @@ static struct tegra_clk_pll_params pll_u_vco_params = {
.calc_rate = tegra210_pll_fixed_mdiv_cfg,
};
-struct utmi_clk_param {
- /* Oscillator Frequency in KHz */
- u32 osc_frequency;
- /* UTMIP PLL Enable Delay Count */
- u8 enable_delay_count;
- /* UTMIP PLL Stable count */
- u16 stable_count;
- /* UTMIP PLL Active delay count */
- u8 active_delay_count;
- /* UTMIP PLL Xtal frequency count */
- u16 xtal_freq_count;
-};
-
-static const struct utmi_clk_param utmi_parameters[] = {
- {
- .osc_frequency = 38400000, .enable_delay_count = 0x0,
- .stable_count = 0x0, .active_delay_count = 0x6,
- .xtal_freq_count = 0x80
- }, {
- .osc_frequency = 13000000, .enable_delay_count = 0x02,
- .stable_count = 0x33, .active_delay_count = 0x05,
- .xtal_freq_count = 0x7f
- }, {
- .osc_frequency = 19200000, .enable_delay_count = 0x03,
- .stable_count = 0x4b, .active_delay_count = 0x06,
- .xtal_freq_count = 0xbb
- }, {
- .osc_frequency = 12000000, .enable_delay_count = 0x02,
- .stable_count = 0x2f, .active_delay_count = 0x08,
- .xtal_freq_count = 0x76
- }, {
- .osc_frequency = 26000000, .enable_delay_count = 0x04,
- .stable_count = 0x66, .active_delay_count = 0x09,
- .xtal_freq_count = 0xfe
- }, {
- .osc_frequency = 16800000, .enable_delay_count = 0x03,
- .stable_count = 0x41, .active_delay_count = 0x0a,
- .xtal_freq_count = 0xa4
- },
-};
-
static struct tegra_clk tegra210_clks[tegra_clk_max] __initdata = {
[tegra_clk_ispb] = { .dt_id = TEGRA210_CLK_ISPB, .present = true },
[tegra_clk_rtc] = { .dt_id = TEGRA210_CLK_RTC, .present = true },
@@ -2154,6 +2093,8 @@ static struct tegra_clk tegra210_clks[tegra_clk_max] __initdata = {
[tegra_clk_dpaux1] = { .dt_id = TEGRA210_CLK_DPAUX1, .present = true },
[tegra_clk_sor0] = { .dt_id = TEGRA210_CLK_SOR0, .present = true },
[tegra_clk_sor0_lvds] = { .dt_id = TEGRA210_CLK_SOR0_LVDS, .present = true },
+ [tegra_clk_sor1] = { .dt_id = TEGRA210_CLK_SOR1, .present = true },
+ [tegra_clk_sor1_src] = { .dt_id = TEGRA210_CLK_SOR1_SRC, .present = true },
[tegra_clk_gpu] = { .dt_id = TEGRA210_CLK_GPU, .present = true },
[tegra_clk_pll_g_ref] = { .dt_id = TEGRA210_CLK_PLL_G_REF, .present = true, },
[tegra_clk_uartb_8] = { .dt_id = TEGRA210_CLK_UARTB, .present = true },
@@ -2345,114 +2286,6 @@ static struct tegra_audio_clk_info tegra210_audio_plls[] = {
static struct clk **clks;
-static void tegra210_utmi_param_configure(void __iomem *clk_base)
-{
- u32 reg;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(utmi_parameters); i++) {
- if (osc_freq == utmi_parameters[i].osc_frequency)
- break;
- }
-
- if (i >= ARRAY_SIZE(utmi_parameters)) {
- pr_err("%s: Unexpected oscillator freq %lu\n", __func__,
- osc_freq);
- return;
- }
-
- reg = readl_relaxed(clk_base + PLLU_HW_PWRDN_CFG0);
- reg |= PLLU_HW_PWRDN_CFG0_IDDQ_PD_INCLUDE |
- PLLU_HW_PWRDN_CFG0_USE_SWITCH_DETECT |
- PLLU_HW_PWRDN_CFG0_USE_LOCKDET;
- reg &= ~(PLLU_HW_PWRDN_CFG0_CLK_ENABLE_SWCTL |
- PLLU_HW_PWRDN_CFG0_CLK_SWITCH_SWCTL);
- writel_relaxed(reg, clk_base + PLLU_HW_PWRDN_CFG0);
-
- reg = readl_relaxed(clk_base + PLLU_HW_PWRDN_CFG0);
- reg |= PLLU_HW_PWRDN_CFG0_SEQ_ENABLE;
- writel_relaxed(reg, clk_base + PLLU_HW_PWRDN_CFG0);
- udelay(1);
-
- reg = readl_relaxed(clk_base + PLLU_BASE);
- reg &= ~PLLU_BASE_CLKENABLE_USB;
- writel_relaxed(reg, clk_base + PLLU_BASE);
-
- reg = readl_relaxed(clk_base + UTMIPLL_HW_PWRDN_CFG0);
- reg &= ~UTMIPLL_HW_PWRDN_CFG0_IDDQ_OVERRIDE;
- writel_relaxed(reg, clk_base + UTMIPLL_HW_PWRDN_CFG0);
-
- udelay(10);
-
- reg = readl_relaxed(clk_base + UTMIP_PLL_CFG2);
-
- /* Program UTMIP PLL stable and active counts */
- /* [FIXME] arclk_rst.h says WRONG! This should be 1ms -> 0x50 Check! */
- reg &= ~UTMIP_PLL_CFG2_STABLE_COUNT(~0);
- reg |= UTMIP_PLL_CFG2_STABLE_COUNT(utmi_parameters[i].stable_count);
-
- reg &= ~UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(~0);
-
- reg |= UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(utmi_parameters[i].
- active_delay_count);
- writel_relaxed(reg, clk_base + UTMIP_PLL_CFG2);
-
- /* Program UTMIP PLL delay and oscillator frequency counts */
- reg = readl_relaxed(clk_base + UTMIP_PLL_CFG1);
- reg &= ~UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(~0);
-
- reg |= UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(utmi_parameters[i].
- enable_delay_count);
-
- reg &= ~UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(~0);
- reg |= UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(utmi_parameters[i].
- xtal_freq_count);
-
- reg |= UTMIP_PLL_CFG1_FORCE_PLLU_POWERDOWN;
- writel_relaxed(reg, clk_base + UTMIP_PLL_CFG1);
-
- /* Remove power downs from UTMIP PLL control bits */
- reg = readl_relaxed(clk_base + UTMIP_PLL_CFG1);
- reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN;
- reg |= UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERUP;
- writel_relaxed(reg, clk_base + UTMIP_PLL_CFG1);
- udelay(1);
-
- /* Enable samplers for SNPS, XUSB_HOST, XUSB_DEV */
- reg = readl_relaxed(clk_base + UTMIP_PLL_CFG2);
- reg |= UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERUP;
- reg |= UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERUP;
- reg |= UTMIP_PLL_CFG2_FORCE_PD_SAMP_D_POWERUP;
- reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN;
- reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN;
- reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_D_POWERDOWN;
- writel_relaxed(reg, clk_base + UTMIP_PLL_CFG2);
-
- /* Setup HW control of UTMIPLL */
- reg = readl_relaxed(clk_base + UTMIP_PLL_CFG1);
- reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN;
- reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERUP;
- writel_relaxed(reg, clk_base + UTMIP_PLL_CFG1);
-
- reg = readl_relaxed(clk_base + UTMIPLL_HW_PWRDN_CFG0);
- reg |= UTMIPLL_HW_PWRDN_CFG0_USE_LOCKDET;
- reg &= ~UTMIPLL_HW_PWRDN_CFG0_CLK_ENABLE_SWCTL;
- writel_relaxed(reg, clk_base + UTMIPLL_HW_PWRDN_CFG0);
-
- udelay(1);
-
- reg = readl_relaxed(clk_base + XUSB_PLL_CFG0);
- reg &= ~XUSB_PLL_CFG0_UTMIPLL_LOCK_DLY;
- writel_relaxed(reg, clk_base + XUSB_PLL_CFG0);
-
- udelay(1);
-
- /* Enable HW control UTMIPLL */
- reg = readl_relaxed(clk_base + UTMIPLL_HW_PWRDN_CFG0);
- reg |= UTMIPLL_HW_PWRDN_CFG0_SEQ_ENABLE;
- writel_relaxed(reg, clk_base + UTMIPLL_HW_PWRDN_CFG0);
-}
-
static __init void tegra210_periph_clk_init(void __iomem *clk_base,
void __iomem *pmc_base)
{
@@ -2463,18 +2296,18 @@ static __init void tegra210_periph_clk_init(void __iomem *clk_base,
1, 2);
clks[TEGRA210_CLK_XUSB_SS_DIV2] = clk;
- clk = tegra_clk_register_periph_fixed("dpaux", "pll_p", 0, clk_base,
+ clk = tegra_clk_register_periph_fixed("sor_safe", "pll_p", 0, clk_base,
+ 1, 17, 222);
+ clks[TEGRA210_CLK_SOR_SAFE] = clk;
+
+ clk = tegra_clk_register_periph_fixed("dpaux", "sor_safe", 0, clk_base,
1, 17, 181);
clks[TEGRA210_CLK_DPAUX] = clk;
- clk = tegra_clk_register_periph_fixed("dpaux1", "pll_p", 0, clk_base,
+ clk = tegra_clk_register_periph_fixed("dpaux1", "sor_safe", 0, clk_base,
1, 17, 207);
clks[TEGRA210_CLK_DPAUX1] = clk;
- clk = tegra_clk_register_periph_fixed("sor_safe", "pll_p", 0, clk_base,
- 1, 17, 222);
- clks[TEGRA210_CLK_SOR_SAFE] = clk;
-
/* pll_d_dsi_out */
clk = clk_register_gate(NULL, "pll_d_dsi_out", "pll_d_out0", 0,
clk_base + PLLD_MISC0, 21, 0, &pll_d_lock);
@@ -2520,7 +2353,6 @@ static __init void tegra210_periph_clk_init(void __iomem *clk_base,
static void __init tegra210_pll_init(void __iomem *clk_base,
void __iomem *pmc)
{
- u32 val;
struct clk *clk;
/* PLLC */
@@ -2580,12 +2412,9 @@ static void __init tegra210_pll_init(void __iomem *clk_base,
clks[TEGRA210_CLK_PLL_M_UD] = clk;
/* PLLU_VCO */
- val = readl(clk_base + pll_u_vco_params.base_reg);
- val &= ~PLLU_BASE_OVERRIDE; /* disable PLLU_OVERRIDE */
- writel(val, clk_base + pll_u_vco_params.base_reg);
-
- clk = tegra_clk_register_pllre("pll_u_vco", "pll_ref", clk_base, pmc,
- 0, &pll_u_vco_params, &pll_u_lock, pll_ref_freq);
+ clk = tegra_clk_register_pllu_tegra210("pll_u_vco", "pll_ref",
+ clk_base, 0, &pll_u_vco_params,
+ &pll_u_lock);
clk_register_clkdev(clk, "pll_u_vco", NULL);
clks[TEGRA210_CLK_PLL_U] = clk;
@@ -2618,8 +2447,6 @@ static void __init tegra210_pll_init(void __iomem *clk_base,
clk_register_clkdev(clk, "pll_u_out2", NULL);
clks[TEGRA210_CLK_PLL_U_OUT2] = clk;
- tegra210_utmi_param_configure(clk_base);
-
/* PLLU_480M */
clk = clk_register_gate(NULL, "pll_u_480M", "pll_u_vco",
CLK_SET_RATE_PARENT, clk_base + PLLU_BASE,
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index 9396f4930..8e2db5ead 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -118,20 +118,6 @@
#define AUDIO_SYNC_DOUBLER 0x49c
-#define UTMIP_PLL_CFG2 0x488
-#define UTMIP_PLL_CFG2_STABLE_COUNT(x) (((x) & 0xffff) << 6)
-#define UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(x) (((x) & 0x3f) << 18)
-#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN BIT(0)
-#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN BIT(2)
-#define UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERDOWN BIT(4)
-
-#define UTMIP_PLL_CFG1 0x484
-#define UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(x) (((x) & 0x1f) << 6)
-#define UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(x) (((x) & 0xfff) << 0)
-#define UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN BIT(14)
-#define UTMIP_PLL_CFG1_FORCE_PLL_ACTIVE_POWERDOWN BIT(12)
-#define UTMIP_PLL_CFG1_FORCE_PLLU_POWERDOWN BIT(16)
-
/* Tegra CPU clock and reset control regs */
#define TEGRA_CLK_RST_CONTROLLER_CLK_CPU_CMPLX 0x4c
#define TEGRA_CLK_RST_CONTROLLER_RST_CPU_CMPLX_SET 0x340
@@ -207,46 +193,6 @@ static DEFINE_SPINLOCK(emc_lock);
static struct clk **clks;
-/*
- * Structure defining the fields for USB UTMI clocks Parameters.
- */
-struct utmi_clk_param {
- /* Oscillator Frequency in KHz */
- u32 osc_frequency;
- /* UTMIP PLL Enable Delay Count */
- u8 enable_delay_count;
- /* UTMIP PLL Stable count */
- u8 stable_count;
- /* UTMIP PLL Active delay count */
- u8 active_delay_count;
- /* UTMIP PLL Xtal frequency count */
- u8 xtal_freq_count;
-};
-
-static const struct utmi_clk_param utmi_parameters[] = {
- {
- .osc_frequency = 13000000, .enable_delay_count = 0x02,
- .stable_count = 0x33, .active_delay_count = 0x05,
- .xtal_freq_count = 0x7f
- }, {
- .osc_frequency = 19200000, .enable_delay_count = 0x03,
- .stable_count = 0x4b, .active_delay_count = 0x06,
- .xtal_freq_count = 0xbb
- }, {
- .osc_frequency = 12000000, .enable_delay_count = 0x02,
- .stable_count = 0x2f, .active_delay_count = 0x04,
- .xtal_freq_count = 0x76
- }, {
- .osc_frequency = 26000000, .enable_delay_count = 0x04,
- .stable_count = 0x66, .active_delay_count = 0x09,
- .xtal_freq_count = 0xfe
- }, {
- .osc_frequency = 16800000, .enable_delay_count = 0x03,
- .stable_count = 0x41, .active_delay_count = 0x0a,
- .xtal_freq_count = 0xa4
- },
-};
-
static struct tegra_clk_pll_freq_table pll_c_freq_table[] = {
{ 12000000, 1040000000, 520, 6, 1, 8 },
{ 13000000, 1040000000, 480, 6, 1, 8 },
@@ -873,59 +819,6 @@ static struct tegra_clk tegra30_clks[tegra_clk_max] __initdata = {
[tegra_clk_pll_a_out0] = { .dt_id = TEGRA30_CLK_PLL_A_OUT0, .present = true },
};
-static void tegra30_utmi_param_configure(void)
-{
- unsigned int i;
- u32 reg;
-
- for (i = 0; i < ARRAY_SIZE(utmi_parameters); i++) {
- if (input_freq == utmi_parameters[i].osc_frequency)
- break;
- }
-
- if (i >= ARRAY_SIZE(utmi_parameters)) {
- pr_err("%s: Unexpected input rate %lu\n", __func__, input_freq);
- return;
- }
-
- reg = readl_relaxed(clk_base + UTMIP_PLL_CFG2);
-
- /* Program UTMIP PLL stable and active counts */
- reg &= ~UTMIP_PLL_CFG2_STABLE_COUNT(~0);
- reg |= UTMIP_PLL_CFG2_STABLE_COUNT(
- utmi_parameters[i].stable_count);
-
- reg &= ~UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(~0);
-
- reg |= UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(
- utmi_parameters[i].active_delay_count);
-
- /* Remove power downs from UTMIP PLL control bits */
- reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_A_POWERDOWN;
- reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_B_POWERDOWN;
- reg &= ~UTMIP_PLL_CFG2_FORCE_PD_SAMP_C_POWERDOWN;
-
- writel_relaxed(reg, clk_base + UTMIP_PLL_CFG2);
-
- /* Program UTMIP PLL delay and oscillator frequency counts */
- reg = readl_relaxed(clk_base + UTMIP_PLL_CFG1);
- reg &= ~UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(~0);
-
- reg |= UTMIP_PLL_CFG1_ENABLE_DLY_COUNT(
- utmi_parameters[i].enable_delay_count);
-
- reg &= ~UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(~0);
- reg |= UTMIP_PLL_CFG1_XTAL_FREQ_COUNT(
- utmi_parameters[i].xtal_freq_count);
-
- /* Remove power downs from UTMIP PLL control bits */
- reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN;
- reg &= ~UTMIP_PLL_CFG1_FORCE_PLL_ACTIVE_POWERDOWN;
- reg &= ~UTMIP_PLL_CFG1_FORCE_PLLU_POWERDOWN;
-
- writel_relaxed(reg, clk_base + UTMIP_PLL_CFG1);
-}
-
static const char *pll_e_parents[] = { "pll_ref", "pll_p" };
static void __init tegra30_pll_init(void)
@@ -972,12 +865,10 @@ static void __init tegra30_pll_init(void)
clks[TEGRA30_CLK_PLL_X_OUT0] = clk;
/* PLLU */
- clk = tegra_clk_register_pll("pll_u", "pll_ref", clk_base, pmc_base, 0,
- &pll_u_params, NULL);
+ clk = tegra_clk_register_pllu("pll_u", "pll_ref", clk_base, 0,
+ &pll_u_params, NULL);
clks[TEGRA30_CLK_PLL_U] = clk;
- tegra30_utmi_param_configure();
-
/* PLLD */
clk = tegra_clk_register_pll("pll_d", "pll_ref", clk_base, pmc_base, 0,
&pll_d_params, &pll_d_lock);
diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h
index 9421f0310..6ba82ecff 100644
--- a/drivers/clk/tegra/clk.h
+++ b/drivers/clk/tegra/clk.h
@@ -427,6 +427,23 @@ struct clk *tegra_clk_register_pllmb(const char *name, const char *parent_name,
struct tegra_clk_pll_params *pll_params,
spinlock_t *lock);
+struct clk *tegra_clk_register_pllu(const char *name, const char *parent_name,
+ void __iomem *clk_base, unsigned long flags,
+ struct tegra_clk_pll_params *pll_params,
+ spinlock_t *lock);
+
+struct clk *tegra_clk_register_pllu_tegra114(const char *name,
+ const char *parent_name,
+ void __iomem *clk_base, unsigned long flags,
+ struct tegra_clk_pll_params *pll_params,
+ spinlock_t *lock);
+
+struct clk *tegra_clk_register_pllu_tegra210(const char *name,
+ const char *parent_name,
+ void __iomem *clk_base, unsigned long flags,
+ struct tegra_clk_pll_params *pll_params,
+ spinlock_t *lock);
+
/**
* struct tegra_clk_pll_out - PLL divider down clock
*
diff --git a/drivers/clk/ti/clk-33xx.c b/drivers/clk/ti/clk-33xx.c
index ef2ec64fe..0e47d95fa 100644
--- a/drivers/clk/ti/clk-33xx.c
+++ b/drivers/clk/ti/clk-33xx.c
@@ -108,6 +108,9 @@ static struct ti_dt_clk am33xx_clks[] = {
DT_CLK("48300200.ehrpwm", "tbclk", "ehrpwm0_tbclk"),
DT_CLK("48302200.ehrpwm", "tbclk", "ehrpwm1_tbclk"),
DT_CLK("48304200.ehrpwm", "tbclk", "ehrpwm2_tbclk"),
+ DT_CLK("48300200.pwm", "tbclk", "ehrpwm0_tbclk"),
+ DT_CLK("48302200.pwm", "tbclk", "ehrpwm1_tbclk"),
+ DT_CLK("48304200.pwm", "tbclk", "ehrpwm2_tbclk"),
{ .node_name = NULL },
};
diff --git a/drivers/clk/ti/clk-43xx.c b/drivers/clk/ti/clk-43xx.c
index 097fc90bf..e816a7500 100644
--- a/drivers/clk/ti/clk-43xx.c
+++ b/drivers/clk/ti/clk-43xx.c
@@ -58,6 +58,7 @@ static struct ti_dt_clk am43xx_clks[] = {
DT_CLK(NULL, "smartreflex1_fck", "smartreflex1_fck"),
DT_CLK(NULL, "sha0_fck", "sha0_fck"),
DT_CLK(NULL, "aes0_fck", "aes0_fck"),
+ DT_CLK(NULL, "rng_fck", "rng_fck"),
DT_CLK(NULL, "timer1_fck", "timer1_fck"),
DT_CLK(NULL, "timer2_fck", "timer2_fck"),
DT_CLK(NULL, "timer3_fck", "timer3_fck"),
@@ -115,6 +116,12 @@ static struct ti_dt_clk am43xx_clks[] = {
DT_CLK("48306200.ehrpwm", "tbclk", "ehrpwm3_tbclk"),
DT_CLK("48308200.ehrpwm", "tbclk", "ehrpwm4_tbclk"),
DT_CLK("4830a200.ehrpwm", "tbclk", "ehrpwm5_tbclk"),
+ DT_CLK("48300200.pwm", "tbclk", "ehrpwm0_tbclk"),
+ DT_CLK("48302200.pwm", "tbclk", "ehrpwm1_tbclk"),
+ DT_CLK("48304200.pwm", "tbclk", "ehrpwm2_tbclk"),
+ DT_CLK("48306200.pwm", "tbclk", "ehrpwm3_tbclk"),
+ DT_CLK("48308200.pwm", "tbclk", "ehrpwm4_tbclk"),
+ DT_CLK("4830a200.pwm", "tbclk", "ehrpwm5_tbclk"),
{ .node_name = NULL },
};
diff --git a/drivers/clk/ux500/u8500_of_clk.c b/drivers/clk/ux500/u8500_of_clk.c
index 9a736d939..e960d686d 100644
--- a/drivers/clk/ux500/u8500_of_clk.c
+++ b/drivers/clk/ux500/u8500_of_clk.c
@@ -11,7 +11,6 @@
#include <linux/of_address.h>
#include <linux/clk-provider.h>
#include <linux/mfd/dbx500-prcmu.h>
-#include <linux/platform_data/clk-ux500.h>
#include "clk.h"
#define PRCC_NUM_PERIPH_CLUSTERS 6
@@ -48,11 +47,6 @@ static struct clk *ux500_twocell_get(struct of_phandle_args *clkspec,
return PRCC_SHOW(clk_data, base, bit);
}
-static const struct of_device_id u8500_clk_of_match[] = {
- { .compatible = "stericsson,u8500-clks", },
- { },
-};
-
/* CLKRST4 is missing making it hard to index things */
enum clkrst_index {
CLKRST1_INDEX = 0,
@@ -63,22 +57,15 @@ enum clkrst_index {
CLKRST_MAX,
};
-void u8500_clk_init(void)
+static void u8500_clk_init(struct device_node *np)
{
struct prcmu_fw_version *fw_version;
- struct device_node *np = NULL;
struct device_node *child = NULL;
const char *sgaclk_parent = NULL;
struct clk *clk, *rtc_clk, *twd_clk;
u32 bases[CLKRST_MAX];
int i;
- if (of_have_populated_dt())
- np = of_find_matching_node(NULL, u8500_clk_of_match);
- if (!np) {
- pr_err("Either DT or U8500 Clock node not found\n");
- return;
- }
for (i = 0; i < ARRAY_SIZE(bases); i++) {
struct resource r;
@@ -573,3 +560,4 @@ void u8500_clk_init(void)
of_clk_add_provider(child, of_clk_src_simple_get, twd_clk);
}
}
+CLK_OF_DECLARE(u8500_clks, "stericsson,u8500-clks", u8500_clk_init);
diff --git a/drivers/clk/ux500/u8540_clk.c b/drivers/clk/ux500/u8540_clk.c
index 86549e59f..133859f0e 100644
--- a/drivers/clk/ux500/u8540_clk.c
+++ b/drivers/clk/ux500/u8540_clk.c
@@ -12,14 +12,8 @@
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/mfd/dbx500-prcmu.h>
-#include <linux/platform_data/clk-ux500.h>
#include "clk.h"
-static const struct of_device_id u8540_clk_of_match[] = {
- { .compatible = "stericsson,u8540-clks", },
- { }
-};
-
/* CLKRST4 is missing making it hard to index things */
enum clkrst_index {
CLKRST1_INDEX = 0,
@@ -30,19 +24,12 @@ enum clkrst_index {
CLKRST_MAX,
};
-void u8540_clk_init(void)
+static void u8540_clk_init(struct device_node *np)
{
struct clk *clk;
- struct device_node *np = NULL;
u32 bases[CLKRST_MAX];
int i;
- if (of_have_populated_dt())
- np = of_find_matching_node(NULL, u8540_clk_of_match);
- if (!np) {
- pr_err("Either DT or U8540 Clock node not found\n");
- return;
- }
for (i = 0; i < ARRAY_SIZE(bases); i++) {
struct resource r;
@@ -607,3 +594,4 @@ void u8540_clk_init(void)
bases[CLKRST6_INDEX], BIT(0), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "rng");
}
+CLK_OF_DECLARE(u8540_clks, "stericsson,u8540-clks", u8540_clk_init);
diff --git a/drivers/clk/ux500/u9540_clk.c b/drivers/clk/ux500/u9540_clk.c
index 2138a4c8c..7b6bca49c 100644
--- a/drivers/clk/ux500/u9540_clk.c
+++ b/drivers/clk/ux500/u9540_clk.c
@@ -9,10 +9,10 @@
#include <linux/clk-provider.h>
#include <linux/mfd/dbx500-prcmu.h>
-#include <linux/platform_data/clk-ux500.h>
#include "clk.h"
-void u9540_clk_init(void)
+static void u9540_clk_init(struct device_node *np)
{
/* register clocks here */
}
+CLK_OF_DECLARE(u9540_clks, "stericsson,u9540-clks", u9540_clk_init);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 47352d25c..567788664 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -27,6 +27,20 @@ config CLKBLD_I8253
config CLKSRC_MMIO
bool
+config BCM2835_TIMER
+ bool "BCM2835 timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ select CLKSRC_MMIO
+ help
+ Enables the support for the BCM2835 timer driver.
+
+config BCM_KONA_TIMER
+ bool "BCM mobile timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ select CLKSRC_MMIO
+ help
+ Enables the support for the BCM Kona mobile timer driver.
+
config DIGICOLOR_TIMER
bool "Digicolor timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
@@ -141,6 +155,72 @@ config CLKSRC_DBX500_PRCMU
help
Use the always on PRCMU Timer as clocksource
+config CLPS711X_TIMER
+ bool "Cirrus logic timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ select CLKSRC_MMIO
+ help
+ Enables support for the Cirrus Logic PS711 timer.
+
+config ATLAS7_TIMER
+ bool "Atlas7 timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ select CLKSRC_MMIO
+ help
+ Enables support for the Atlas7 timer.
+
+config MOXART_TIMER
+ bool "Moxart timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ select CLKSRC_MMIO
+ help
+ Enables support for the Moxart timer.
+
+config MXS_TIMER
+ bool "Mxs timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ select CLKSRC_MMIO
+ select STMP_DEVICE
+ help
+ Enables support for the Mxs timer.
+
+config PRIMA2_TIMER
+ bool "Prima2 timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ select CLKSRC_MMIO
+ help
+ Enables support for the Prima2 timer.
+
+config U300_TIMER
+ bool "U300 timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ depends on ARM
+ select CLKSRC_MMIO
+ help
+ Enables support for the U300 timer.
+
+config NSPIRE_TIMER
+ bool "NSpire timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ select CLKSRC_MMIO
+ help
+ Enables support for the Nspire timer.
+
+config KEYSTONE_TIMER
+ bool "Keystone timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ depends on ARM || ARM64
+ select CLKSRC_MMIO
+ help
+ Enables support for the Keystone timer.
+
+config INTEGRATOR_AP_TIMER
+ bool "Integrator-ap timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ select CLKSRC_MMIO
+ help
+ Enables support for the Integrator-ap timer.
+
config CLKSRC_DBX500_PRCMU_SCHED_CLOCK
bool "Clocksource PRCMU Timer sched_clock"
depends on (CLKSRC_DBX500_PRCMU && !CLKSRC_NOMADIK_MTU_SCHED_CLOCK)
@@ -208,14 +288,16 @@ config ARM_ARCH_TIMER
select CLKSRC_ACPI if ACPI
config ARM_ARCH_TIMER_EVTSTREAM
- bool "Support for ARM architected timer event stream generation"
+ bool "Enable ARM architected timer event stream generation by default"
default y if ARM_ARCH_TIMER
depends on ARM_ARCH_TIMER
help
- This option enables support for event stream generation based on
- the ARM architected timer. It is used for waking up CPUs executing
- the wfe instruction at a frequency represented as a power-of-2
- divisor of the clock rate.
+ This option enables support by default for event stream generation
+ based on the ARM architected timer. It is used for waking up CPUs
+ executing the wfe instruction at a frequency represented as a
+ power-of-2 divisor of the clock rate. The behaviour can also be
+ overridden on the command line using the
+ clocksource.arm_arch_timer.evtstream parameter.
The main use of the event stream is wfe-based timeouts of userspace
locking implementations. It might also be useful for imposing timeout
on wfe to safeguard against any programming errors in case an expected
@@ -224,8 +306,9 @@ config ARM_ARCH_TIMER_EVTSTREAM
hardware anomalies of missing events.
config ARM_GLOBAL_TIMER
- bool
+ bool "Support for the ARM global timer" if COMPILE_TEST
select CLKSRC_OF if OF
+ depends on ARM
help
This options enables support for the ARM global timer unit
@@ -243,7 +326,7 @@ config CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
Use ARM global timer clock source as sched_clock
config ARMV7M_SYSTICK
- bool
+ bool "Support for the ARMv7M system time" if COMPILE_TEST
select CLKSRC_OF if OF
select CLKSRC_MMIO
help
@@ -254,9 +337,12 @@ config ATMEL_PIT
def_bool SOC_AT91SAM9 || SOC_SAMA5
config ATMEL_ST
- bool
+ bool "Atmel ST timer support" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
select CLKSRC_OF
select MFD_SYSCON
+ help
+ Support for the Atmel ST timer.
config CLKSRC_METAG_GENERIC
def_bool y if METAG
@@ -270,7 +356,7 @@ config CLKSRC_EXYNOS_MCT
Support for Multi Core Timer controller on Exynos SoCs.
config CLKSRC_SAMSUNG_PWM
- bool "PWM timer drvier for Samsung S3C, S5P" if COMPILE_TEST
+ bool "PWM timer driver for Samsung S3C, S5P" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
depends on HAS_IOMEM
help
@@ -293,6 +379,14 @@ config VF_PIT_TIMER
help
Support for Period Interrupt Timer on Freescale Vybrid Family SoCs.
+config OXNAS_RPS_TIMER
+ bool "Oxford Semiconductor OXNAS RPS Timers driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ select CLKSRC_OF
+ select CLKSRC_MMIO
+ help
+ This enables support for the Oxford Semiconductor OXNAS RPS timers.
+
config SYS_SUPPORTS_SH_CMT
bool
@@ -361,8 +455,8 @@ config CLKSRC_QCOM
Qualcomm SoCs.
config CLKSRC_VERSATILE
- bool "ARM Versatile (Express) reference platforms clock source"
- depends on PLAT_VERSATILE && GENERIC_SCHED_CLOCK && !ARCH_USES_GETTIMEOFFSET
+ bool "ARM Versatile (Express) reference platforms clock source" if COMPILE_TEST
+ depends on GENERIC_SCHED_CLOCK && !ARCH_USES_GETTIMEOFFSET
select CLKSRC_OF
default y if MFD_VEXPRESS_SYSREG
help
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 473974f95..fd9d6df0b 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -19,21 +19,21 @@ obj-$(CONFIG_CLKSRC_NOMADIK_MTU) += nomadik-mtu.o
obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o
obj-$(CONFIG_ARMADA_370_XP_TIMER) += time-armada-370-xp.o
obj-$(CONFIG_ORION_TIMER) += time-orion.o
-obj-$(CONFIG_ARCH_BCM2835) += bcm2835_timer.o
-obj-$(CONFIG_ARCH_CLPS711X) += clps711x-timer.o
-obj-$(CONFIG_ARCH_ATLAS7) += timer-atlas7.o
-obj-$(CONFIG_ARCH_MOXART) += moxart_timer.o
-obj-$(CONFIG_ARCH_MXS) += mxs_timer.o
+obj-$(CONFIG_BCM2835_TIMER) += bcm2835_timer.o
+obj-$(CONFIG_CLPS711X_TIMER) += clps711x-timer.o
+obj-$(CONFIG_ATLAS7_TIMER) += timer-atlas7.o
+obj-$(CONFIG_MOXART_TIMER) += moxart_timer.o
+obj-$(CONFIG_MXS_TIMER) += mxs_timer.o
obj-$(CONFIG_CLKSRC_PXA) += pxa_timer.o
-obj-$(CONFIG_ARCH_PRIMA2) += timer-prima2.o
-obj-$(CONFIG_ARCH_U300) += timer-u300.o
+obj-$(CONFIG_PRIMA2_TIMER) += timer-prima2.o
+obj-$(CONFIG_U300_TIMER) += timer-u300.o
obj-$(CONFIG_SUN4I_TIMER) += sun4i_timer.o
obj-$(CONFIG_SUN5I_HSTIMER) += timer-sun5i.o
obj-$(CONFIG_MESON6_TIMER) += meson6_timer.o
obj-$(CONFIG_TEGRA_TIMER) += tegra20_timer.o
obj-$(CONFIG_VT8500_TIMER) += vt8500_timer.o
-obj-$(CONFIG_ARCH_NSPIRE) += zevio-timer.o
-obj-$(CONFIG_ARCH_BCM_MOBILE) += bcm_kona_timer.o
+obj-$(CONFIG_NSPIRE_TIMER) += zevio-timer.o
+obj-$(CONFIG_BCM_KONA_TIMER) += bcm_kona_timer.o
obj-$(CONFIG_CADENCE_TTC_TIMER) += cadence_ttc_timer.o
obj-$(CONFIG_CLKSRC_EFM32) += time-efm32.o
obj-$(CONFIG_CLKSRC_STM32) += timer-stm32.o
@@ -48,6 +48,7 @@ obj-$(CONFIG_MTK_TIMER) += mtk_timer.o
obj-$(CONFIG_CLKSRC_PISTACHIO) += time-pistachio.o
obj-$(CONFIG_CLKSRC_TI_32K) += timer-ti-32k.o
obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o
+obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o
obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o
@@ -55,8 +56,8 @@ obj-$(CONFIG_ARMV7M_SYSTICK) += armv7m_systick.o
obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp804.o
obj-$(CONFIG_CLKSRC_METAG_GENERIC) += metag_generic.o
obj-$(CONFIG_ARCH_HAS_TICK_BROADCAST) += dummy_timer.o
-obj-$(CONFIG_ARCH_KEYSTONE) += timer-keystone.o
-obj-$(CONFIG_ARCH_INTEGRATOR_AP) += timer-integrator-ap.o
+obj-$(CONFIG_KEYSTONE_TIMER) += timer-keystone.o
+obj-$(CONFIG_INTEGRATOR_AP_TIMER) += timer-integrator-ap.o
obj-$(CONFIG_CLKSRC_VERSATILE) += versatile.o
obj-$(CONFIG_CLKSRC_MIPS_GIC) += mips-gic-timer.o
obj-$(CONFIG_CLKSRC_TANGO_XTAL) += tango_xtal.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 4814446a0..57700541f 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -8,6 +8,9 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+
+#define pr_fmt(fmt) "arm_arch_timer: " fmt
+
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/device.h>
@@ -79,6 +82,14 @@ static enum ppi_nr arch_timer_uses_ppi = VIRT_PPI;
static bool arch_timer_c3stop;
static bool arch_timer_mem_use_virtual;
+static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
+
+static int __init early_evtstrm_cfg(char *buf)
+{
+ return strtobool(buf, &evtstrm_enable);
+}
+early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
+
/*
* Architected system timer support.
*/
@@ -362,17 +373,36 @@ static bool arch_timer_has_nonsecure_ppi(void)
arch_timer_ppi[PHYS_NONSECURE_PPI]);
}
-static int arch_timer_setup(struct clock_event_device *clk)
+static u32 check_ppi_trigger(int irq)
+{
+ u32 flags = irq_get_trigger_type(irq);
+
+ if (flags != IRQF_TRIGGER_HIGH && flags != IRQF_TRIGGER_LOW) {
+ pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq);
+ pr_warn("WARNING: Please fix your firmware\n");
+ flags = IRQF_TRIGGER_LOW;
+ }
+
+ return flags;
+}
+
+static int arch_timer_starting_cpu(unsigned int cpu)
{
+ struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
+ u32 flags;
+
__arch_timer_setup(ARCH_CP15_TIMER, clk);
- enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], 0);
+ flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]);
+ enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);
- if (arch_timer_has_nonsecure_ppi())
- enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0);
+ if (arch_timer_has_nonsecure_ppi()) {
+ flags = check_ppi_trigger(arch_timer_ppi[PHYS_NONSECURE_PPI]);
+ enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], flags);
+ }
arch_counter_set_user_access();
- if (IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM))
+ if (evtstrm_enable)
arch_timer_configure_evtstream();
return 0;
@@ -519,29 +549,14 @@ static void arch_timer_stop(struct clock_event_device *clk)
clk->set_state_shutdown(clk);
}
-static int arch_timer_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
+static int arch_timer_dying_cpu(unsigned int cpu)
{
- /*
- * Grab cpu pointer in each case to avoid spurious
- * preemptible warnings
- */
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_STARTING:
- arch_timer_setup(this_cpu_ptr(arch_timer_evt));
- break;
- case CPU_DYING:
- arch_timer_stop(this_cpu_ptr(arch_timer_evt));
- break;
- }
+ struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
- return NOTIFY_OK;
+ arch_timer_stop(clk);
+ return 0;
}
-static struct notifier_block arch_timer_cpu_nb = {
- .notifier_call = arch_timer_cpu_notify,
-};
-
#ifdef CONFIG_CPU_PM
static unsigned int saved_cntkctl;
static int arch_timer_cpu_pm_notify(struct notifier_block *self,
@@ -562,11 +577,21 @@ static int __init arch_timer_cpu_pm_init(void)
{
return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
}
+
+static void __init arch_timer_cpu_pm_deinit(void)
+{
+ WARN_ON(cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier));
+}
+
#else
static int __init arch_timer_cpu_pm_init(void)
{
return 0;
}
+
+static void __init arch_timer_cpu_pm_deinit(void)
+{
+}
#endif
static int __init arch_timer_register(void)
@@ -613,22 +638,23 @@ static int __init arch_timer_register(void)
goto out_free;
}
- err = register_cpu_notifier(&arch_timer_cpu_nb);
- if (err)
- goto out_free_irq;
-
err = arch_timer_cpu_pm_init();
if (err)
goto out_unreg_notify;
- /* Immediately configure the timer on the boot CPU */
- arch_timer_setup(this_cpu_ptr(arch_timer_evt));
+ /* Register and immediately configure the timer on the boot CPU */
+ err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
+ "AP_ARM_ARCH_TIMER_STARTING",
+ arch_timer_starting_cpu, arch_timer_dying_cpu);
+ if (err)
+ goto out_unreg_cpupm;
return 0;
+out_unreg_cpupm:
+ arch_timer_cpu_pm_deinit();
+
out_unreg_notify:
- unregister_cpu_notifier(&arch_timer_cpu_nb);
-out_free_irq:
free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt);
if (arch_timer_has_nonsecure_ppi())
free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
@@ -693,25 +719,26 @@ arch_timer_needs_probing(int type, const struct of_device_id *matches)
return needs_probing;
}
-static void __init arch_timer_common_init(void)
+static int __init arch_timer_common_init(void)
{
unsigned mask = ARCH_CP15_TIMER | ARCH_MEM_TIMER;
/* Wait until both nodes are probed if we have two timers */
if ((arch_timers_present & mask) != mask) {
if (arch_timer_needs_probing(ARCH_MEM_TIMER, arch_timer_mem_of_match))
- return;
+ return 0;
if (arch_timer_needs_probing(ARCH_CP15_TIMER, arch_timer_of_match))
- return;
+ return 0;
}
arch_timer_banner(arch_timers_present);
arch_counter_register(arch_timers_present);
- arch_timer_arch_init();
+ return arch_timer_arch_init();
}
-static void __init arch_timer_init(void)
+static int __init arch_timer_init(void)
{
+ int ret;
/*
* If HYP mode is available, we know that the physical timer
* has been configured to be accessible from PL1. Use it, so
@@ -739,23 +766,30 @@ static void __init arch_timer_init(void)
if (!has_ppi) {
pr_warn("arch_timer: No interrupt available, giving up\n");
- return;
+ return -EINVAL;
}
}
- arch_timer_register();
- arch_timer_common_init();
+ ret = arch_timer_register();
+ if (ret)
+ return ret;
+
+ ret = arch_timer_common_init();
+ if (ret)
+ return ret;
arch_timer_kvm_info.virtual_irq = arch_timer_ppi[VIRT_PPI];
+
+ return 0;
}
-static void __init arch_timer_of_init(struct device_node *np)
+static int __init arch_timer_of_init(struct device_node *np)
{
int i;
if (arch_timers_present & ARCH_CP15_TIMER) {
pr_warn("arch_timer: multiple nodes in dt, skipping\n");
- return;
+ return 0;
}
arch_timers_present |= ARCH_CP15_TIMER;
@@ -774,23 +808,23 @@ static void __init arch_timer_of_init(struct device_node *np)
of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
arch_timer_uses_ppi = PHYS_SECURE_PPI;
- arch_timer_init();
+ return arch_timer_init();
}
CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
-static void __init arch_timer_mem_init(struct device_node *np)
+static int __init arch_timer_mem_init(struct device_node *np)
{
struct device_node *frame, *best_frame = NULL;
void __iomem *cntctlbase, *base;
- unsigned int irq;
+ unsigned int irq, ret = -EINVAL;
u32 cnttidr;
arch_timers_present |= ARCH_MEM_TIMER;
cntctlbase = of_iomap(np, 0);
if (!cntctlbase) {
pr_err("arch_timer: Can't find CNTCTLBase\n");
- return;
+ return -ENXIO;
}
cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
@@ -830,6 +864,7 @@ static void __init arch_timer_mem_init(struct device_node *np)
best_frame = of_node_get(frame);
}
+ ret= -ENXIO;
base = arch_counter_base = of_iomap(best_frame, 0);
if (!base) {
pr_err("arch_timer: Can't map frame's registers\n");
@@ -841,6 +876,7 @@ static void __init arch_timer_mem_init(struct device_node *np)
else
irq = irq_of_parse_and_map(best_frame, 0);
+ ret = -EINVAL;
if (!irq) {
pr_err("arch_timer: Frame missing %s irq",
arch_timer_mem_use_virtual ? "virt" : "phys");
@@ -848,11 +884,15 @@ static void __init arch_timer_mem_init(struct device_node *np)
}
arch_timer_detect_rate(base, np);
- arch_timer_mem_register(base, irq);
- arch_timer_common_init();
+ ret = arch_timer_mem_register(base, irq);
+ if (ret)
+ goto out;
+
+ return arch_timer_common_init();
out:
iounmap(cntctlbase);
of_node_put(best_frame);
+ return ret;
}
CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
arch_timer_mem_init);
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
index 9df0d1699..8da03298f 100644
--- a/drivers/clocksource/arm_global_timer.c
+++ b/drivers/clocksource/arm_global_timer.c
@@ -165,9 +165,9 @@ static irqreturn_t gt_clockevent_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int gt_clockevents_init(struct clock_event_device *clk)
+static int gt_starting_cpu(unsigned int cpu)
{
- int cpu = smp_processor_id();
+ struct clock_event_device *clk = this_cpu_ptr(gt_evt);
clk->name = "arm_global_timer";
clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
@@ -186,10 +186,13 @@ static int gt_clockevents_init(struct clock_event_device *clk)
return 0;
}
-static void gt_clockevents_stop(struct clock_event_device *clk)
+static int gt_dying_cpu(unsigned int cpu)
{
+ struct clock_event_device *clk = this_cpu_ptr(gt_evt);
+
gt_clockevent_shutdown(clk);
disable_percpu_irq(clk->irq);
+ return 0;
}
static cycle_t gt_clocksource_read(struct clocksource *cs)
@@ -238,7 +241,7 @@ static void __init gt_delay_timer_init(void)
register_current_timer_delay(&gt_delay_timer);
}
-static void __init gt_clocksource_init(void)
+static int __init gt_clocksource_init(void)
{
writel(0, gt_base + GT_CONTROL);
writel(0, gt_base + GT_COUNTER0);
@@ -249,28 +252,10 @@ static void __init gt_clocksource_init(void)
#ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
sched_clock_register(gt_sched_clock_read, 64, gt_clk_rate);
#endif
- clocksource_register_hz(&gt_clocksource, gt_clk_rate);
+ return clocksource_register_hz(&gt_clocksource, gt_clk_rate);
}
-static int gt_cpu_notify(struct notifier_block *self, unsigned long action,
- void *hcpu)
-{
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_STARTING:
- gt_clockevents_init(this_cpu_ptr(gt_evt));
- break;
- case CPU_DYING:
- gt_clockevents_stop(this_cpu_ptr(gt_evt));
- break;
- }
-
- return NOTIFY_OK;
-}
-static struct notifier_block gt_cpu_nb = {
- .notifier_call = gt_cpu_notify,
-};
-
-static void __init global_timer_of_register(struct device_node *np)
+static int __init global_timer_of_register(struct device_node *np)
{
struct clk *gt_clk;
int err = 0;
@@ -283,19 +268,19 @@ static void __init global_timer_of_register(struct device_node *np)
if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9
&& (read_cpuid_id() & 0xf0000f) < 0x200000) {
pr_warn("global-timer: non support for this cpu version.\n");
- return;
+ return -ENOSYS;
}
gt_ppi = irq_of_parse_and_map(np, 0);
if (!gt_ppi) {
pr_warn("global-timer: unable to parse irq\n");
- return;
+ return -EINVAL;
}
gt_base = of_iomap(np, 0);
if (!gt_base) {
pr_warn("global-timer: invalid base address\n");
- return;
+ return -ENXIO;
}
gt_clk = of_clk_get(np, 0);
@@ -325,18 +310,20 @@ static void __init global_timer_of_register(struct device_node *np)
goto out_free;
}
- err = register_cpu_notifier(&gt_cpu_nb);
- if (err) {
- pr_warn("global-timer: unable to register cpu notifier.\n");
+ /* Register and immediately configure the timer on the boot CPU */
+ err = gt_clocksource_init();
+ if (err)
+ goto out_irq;
+
+ err = cpuhp_setup_state(CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
+ "AP_ARM_GLOBAL_TIMER_STARTING",
+ gt_starting_cpu, gt_dying_cpu);
+ if (err)
goto out_irq;
- }
- /* Immediately configure the timer on the boot CPU */
- gt_clocksource_init();
- gt_clockevents_init(this_cpu_ptr(gt_evt));
gt_delay_timer_init();
- return;
+ return 0;
out_irq:
free_percpu_irq(gt_ppi, gt_evt);
@@ -347,6 +334,8 @@ out_clk:
out_unmap:
iounmap(gt_base);
WARN(err, "ARM Global timer register failed (%d)\n", err);
+
+ return err;
}
/* Only tested on r2p2 and r3p0 */
diff --git a/drivers/clocksource/armv7m_systick.c b/drivers/clocksource/armv7m_systick.c
index addfd2c64..a315491b7 100644
--- a/drivers/clocksource/armv7m_systick.c
+++ b/drivers/clocksource/armv7m_systick.c
@@ -7,6 +7,7 @@
#include <linux/kernel.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/clk.h>
@@ -21,7 +22,7 @@
#define SYSTICK_LOAD_RELOAD_MASK 0x00FFFFFF
-static void __init system_timer_of_register(struct device_node *np)
+static int __init system_timer_of_register(struct device_node *np)
{
struct clk *clk = NULL;
void __iomem *base;
@@ -31,22 +32,26 @@ static void __init system_timer_of_register(struct device_node *np)
base = of_iomap(np, 0);
if (!base) {
pr_warn("system-timer: invalid base address\n");
- return;
+ return -ENXIO;
}
ret = of_property_read_u32(np, "clock-frequency", &rate);
if (ret) {
clk = of_clk_get(np, 0);
- if (IS_ERR(clk))
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
goto out_unmap;
+ }
ret = clk_prepare_enable(clk);
if (ret)
goto out_clk_put;
rate = clk_get_rate(clk);
- if (!rate)
+ if (!rate) {
+ ret = -EINVAL;
goto out_clk_disable;
+ }
}
writel_relaxed(SYSTICK_LOAD_RELOAD_MASK, base + SYST_RVR);
@@ -64,7 +69,7 @@ static void __init system_timer_of_register(struct device_node *np)
pr_info("ARM System timer initialized as clocksource\n");
- return;
+ return 0;
out_clk_disable:
clk_disable_unprepare(clk);
@@ -73,6 +78,8 @@ out_clk_put:
out_unmap:
iounmap(base);
pr_warn("ARM System timer register failed (%d)\n", ret);
+
+ return ret;
}
CLOCKSOURCE_OF_DECLARE(arm_systick, "arm,armv7m-systick",
diff --git a/drivers/clocksource/asm9260_timer.c b/drivers/clocksource/asm9260_timer.c
index 217438d39..1ba871b7f 100644
--- a/drivers/clocksource/asm9260_timer.c
+++ b/drivers/clocksource/asm9260_timer.c
@@ -184,7 +184,7 @@ static irqreturn_t asm9260_timer_interrupt(int irq, void *dev_id)
* Timer initialization
* ---------------------------------------------------------------------------
*/
-static void __init asm9260_timer_init(struct device_node *np)
+static int __init asm9260_timer_init(struct device_node *np)
{
int irq;
struct clk *clk;
@@ -192,20 +192,26 @@ static void __init asm9260_timer_init(struct device_node *np)
unsigned long rate;
priv.base = of_io_request_and_map(np, 0, np->name);
- if (IS_ERR(priv.base))
- panic("%s: unable to map resource", np->name);
+ if (IS_ERR(priv.base)) {
+ pr_err("%s: unable to map resource", np->name);
+ return PTR_ERR(priv.base);
+ }
clk = of_clk_get(np, 0);
ret = clk_prepare_enable(clk);
- if (ret)
- panic("Failed to enable clk!\n");
+ if (ret) {
+ pr_err("Failed to enable clk!\n");
+ return ret;
+ }
irq = irq_of_parse_and_map(np, 0);
ret = request_irq(irq, asm9260_timer_interrupt, IRQF_TIMER,
DRIVER_NAME, &event_dev);
- if (ret)
- panic("Failed to setup irq!\n");
+ if (ret) {
+ pr_err("Failed to setup irq!\n");
+ return ret;
+ }
/* set all timers for count-up */
writel_relaxed(BM_DIR_DEFAULT, priv.base + HW_DIR);
@@ -229,6 +235,8 @@ static void __init asm9260_timer_init(struct device_node *np)
priv.ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ);
event_dev.cpumask = cpumask_of(0);
clockevents_config_and_register(&event_dev, rate, 0x2c00, 0xfffffffe);
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(asm9260_timer, "alphascale,asm9260-timer",
asm9260_timer_init);
diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c
index 6f2822928..e71acf231 100644
--- a/drivers/clocksource/bcm2835_timer.c
+++ b/drivers/clocksource/bcm2835_timer.c
@@ -80,19 +80,24 @@ static irqreturn_t bcm2835_time_interrupt(int irq, void *dev_id)
}
}
-static void __init bcm2835_timer_init(struct device_node *node)
+static int __init bcm2835_timer_init(struct device_node *node)
{
void __iomem *base;
u32 freq;
- int irq;
+ int irq, ret;
struct bcm2835_timer *timer;
base = of_iomap(node, 0);
- if (!base)
- panic("Can't remap registers");
+ if (!base) {
+ pr_err("Can't remap registers");
+ return -ENXIO;
+ }
- if (of_property_read_u32(node, "clock-frequency", &freq))
- panic("Can't read clock-frequency");
+ ret = of_property_read_u32(node, "clock-frequency", &freq);
+ if (ret) {
+ pr_err("Can't read clock-frequency");
+ return ret;
+ }
system_clock = base + REG_COUNTER_LO;
sched_clock_register(bcm2835_sched_read, 32, freq);
@@ -101,12 +106,16 @@ static void __init bcm2835_timer_init(struct device_node *node)
freq, 300, 32, clocksource_mmio_readl_up);
irq = irq_of_parse_and_map(node, DEFAULT_TIMER);
- if (irq <= 0)
- panic("Can't parse IRQ");
+ if (irq <= 0) {
+ pr_err("Can't parse IRQ");
+ return -EINVAL;
+ }
timer = kzalloc(sizeof(*timer), GFP_KERNEL);
- if (!timer)
- panic("Can't allocate timer struct\n");
+ if (!timer) {
+ pr_err("Can't allocate timer struct\n");
+ return -ENOMEM;
+ }
timer->control = base + REG_CONTROL;
timer->compare = base + REG_COMPARE(DEFAULT_TIMER);
@@ -121,12 +130,17 @@ static void __init bcm2835_timer_init(struct device_node *node)
timer->act.dev_id = timer;
timer->act.handler = bcm2835_time_interrupt;
- if (setup_irq(irq, &timer->act))
- panic("Can't set up timer IRQ\n");
+ ret = setup_irq(irq, &timer->act);
+ if (ret) {
+ pr_err("Can't set up timer IRQ\n");
+ return ret;
+ }
clockevents_config_and_register(&timer->evt, freq, 0xf, 0xffffffff);
pr_info("bcm2835: system timer (irq = %d)\n", irq);
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(bcm2835, "brcm,bcm2835-system-timer",
bcm2835_timer_init);
diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c
index e717e87df..92f6e4dee 100644
--- a/drivers/clocksource/bcm_kona_timer.c
+++ b/drivers/clocksource/bcm_kona_timer.c
@@ -20,7 +20,6 @@
#include <linux/clk.h>
#include <linux/io.h>
-#include <asm/mach/time.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -67,10 +66,10 @@ static void kona_timer_disable_and_clear(void __iomem *base)
}
-static void
+static int
kona_timer_get_counter(void __iomem *timer_base, uint32_t *msw, uint32_t *lsw)
{
- int loop_limit = 4;
+ int loop_limit = 3;
/*
* Read 64-bit free running counter
@@ -84,18 +83,19 @@ kona_timer_get_counter(void __iomem *timer_base, uint32_t *msw, uint32_t *lsw)
* if new hi-word is equal to previously read hi-word then stop.
*/
- while (--loop_limit) {
+ do {
*msw = readl(timer_base + KONA_GPTIMER_STCHI_OFFSET);
*lsw = readl(timer_base + KONA_GPTIMER_STCLO_OFFSET);
if (*msw == readl(timer_base + KONA_GPTIMER_STCHI_OFFSET))
break;
- }
+ } while (--loop_limit);
if (!loop_limit) {
pr_err("bcm_kona_timer: getting counter failed.\n");
pr_err(" Timer will be impacted\n");
+ return -ETIMEDOUT;
}
- return;
+ return 0;
}
static int kona_timer_set_next_event(unsigned long clc,
@@ -113,8 +113,11 @@ static int kona_timer_set_next_event(unsigned long clc,
uint32_t lsw, msw;
uint32_t reg;
+ int ret;
- kona_timer_get_counter(timers.tmr_regs, &msw, &lsw);
+ ret = kona_timer_get_counter(timers.tmr_regs, &msw, &lsw);
+ if (ret)
+ return ret;
/* Load the "next" event tick value */
writel(lsw + clc, timers.tmr_regs + KONA_GPTIMER_STCM0_OFFSET);
@@ -163,16 +166,11 @@ static struct irqaction kona_timer_irq = {
.handler = kona_timer_interrupt,
};
-static void __init kona_timer_init(struct device_node *node)
+static int __init kona_timer_init(struct device_node *node)
{
u32 freq;
struct clk *external_clk;
- if (!of_device_is_available(node)) {
- pr_info("Kona Timer v1 marked as disabled in device tree\n");
- return;
- }
-
external_clk = of_clk_get_by_name(node, NULL);
if (!IS_ERR(external_clk)) {
@@ -182,7 +180,7 @@ static void __init kona_timer_init(struct device_node *node)
arch_timer_rate = freq;
} else {
pr_err("Kona Timer v1 unable to determine clock-frequency");
- return;
+ return -EINVAL;
}
/* Setup IRQ numbers */
@@ -196,6 +194,8 @@ static void __init kona_timer_init(struct device_node *node)
kona_timer_clockevents_init();
setup_irq(timers.tmr_irq, &kona_timer_irq);
kona_timer_set_next_event((arch_timer_rate / HZ), NULL);
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(brcm_kona, "brcm,kona-timer", kona_timer_init);
diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/cadence_ttc_timer.c
index 9be6018bd..fbfbdec13 100644
--- a/drivers/clocksource/cadence_ttc_timer.c
+++ b/drivers/clocksource/cadence_ttc_timer.c
@@ -322,22 +322,22 @@ static int ttc_rate_change_clocksource_cb(struct notifier_block *nb,
return NOTIFY_DONE;
}
-static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base,
+static int __init ttc_setup_clocksource(struct clk *clk, void __iomem *base,
u32 timer_width)
{
struct ttc_timer_clocksource *ttccs;
int err;
ttccs = kzalloc(sizeof(*ttccs), GFP_KERNEL);
- if (WARN_ON(!ttccs))
- return;
+ if (!ttccs)
+ return -ENOMEM;
ttccs->ttc.clk = clk;
err = clk_prepare_enable(ttccs->ttc.clk);
- if (WARN_ON(err)) {
+ if (err) {
kfree(ttccs);
- return;
+ return err;
}
ttccs->ttc.freq = clk_get_rate(ttccs->ttc.clk);
@@ -345,8 +345,10 @@ static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base,
ttccs->ttc.clk_rate_change_nb.notifier_call =
ttc_rate_change_clocksource_cb;
ttccs->ttc.clk_rate_change_nb.next = NULL;
- if (clk_notifier_register(ttccs->ttc.clk,
- &ttccs->ttc.clk_rate_change_nb))
+
+ err = clk_notifier_register(ttccs->ttc.clk,
+ &ttccs->ttc.clk_rate_change_nb);
+ if (err)
pr_warn("Unable to register clock notifier.\n");
ttccs->ttc.base_addr = base;
@@ -368,14 +370,16 @@ static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base,
ttccs->ttc.base_addr + TTC_CNT_CNTRL_OFFSET);
err = clocksource_register_hz(&ttccs->cs, ttccs->ttc.freq / PRESCALE);
- if (WARN_ON(err)) {
+ if (err) {
kfree(ttccs);
- return;
+ return err;
}
ttc_sched_clock_val_reg = base + TTC_COUNT_VAL_OFFSET;
sched_clock_register(ttc_sched_clock_read, timer_width,
ttccs->ttc.freq / PRESCALE);
+
+ return 0;
}
static int ttc_rate_change_clockevent_cb(struct notifier_block *nb,
@@ -401,30 +405,35 @@ static int ttc_rate_change_clockevent_cb(struct notifier_block *nb,
}
}
-static void __init ttc_setup_clockevent(struct clk *clk,
- void __iomem *base, u32 irq)
+static int __init ttc_setup_clockevent(struct clk *clk,
+ void __iomem *base, u32 irq)
{
struct ttc_timer_clockevent *ttcce;
int err;
ttcce = kzalloc(sizeof(*ttcce), GFP_KERNEL);
- if (WARN_ON(!ttcce))
- return;
+ if (!ttcce)
+ return -ENOMEM;
ttcce->ttc.clk = clk;
err = clk_prepare_enable(ttcce->ttc.clk);
- if (WARN_ON(err)) {
+ if (err) {
kfree(ttcce);
- return;
+ return err;
}
ttcce->ttc.clk_rate_change_nb.notifier_call =
ttc_rate_change_clockevent_cb;
ttcce->ttc.clk_rate_change_nb.next = NULL;
- if (clk_notifier_register(ttcce->ttc.clk,
- &ttcce->ttc.clk_rate_change_nb))
+
+ err = clk_notifier_register(ttcce->ttc.clk,
+ &ttcce->ttc.clk_rate_change_nb);
+ if (err) {
pr_warn("Unable to register clock notifier.\n");
+ return err;
+ }
+
ttcce->ttc.freq = clk_get_rate(ttcce->ttc.clk);
ttcce->ttc.base_addr = base;
@@ -451,13 +460,15 @@ static void __init ttc_setup_clockevent(struct clk *clk,
err = request_irq(irq, ttc_clock_event_interrupt,
IRQF_TIMER, ttcce->ce.name, ttcce);
- if (WARN_ON(err)) {
+ if (err) {
kfree(ttcce);
- return;
+ return err;
}
clockevents_config_and_register(&ttcce->ce,
ttcce->ttc.freq / PRESCALE, 1, 0xfffe);
+
+ return 0;
}
/**
@@ -466,17 +477,17 @@ static void __init ttc_setup_clockevent(struct clk *clk,
* Initializes the timer hardware and register the clock source and clock event
* timers with Linux kernal timer framework
*/
-static void __init ttc_timer_init(struct device_node *timer)
+static int __init ttc_timer_init(struct device_node *timer)
{
unsigned int irq;
void __iomem *timer_baseaddr;
struct clk *clk_cs, *clk_ce;
static int initialized;
- int clksel;
+ int clksel, ret;
u32 timer_width = 16;
if (initialized)
- return;
+ return 0;
initialized = 1;
@@ -488,13 +499,13 @@ static void __init ttc_timer_init(struct device_node *timer)
timer_baseaddr = of_iomap(timer, 0);
if (!timer_baseaddr) {
pr_err("ERROR: invalid timer base address\n");
- BUG();
+ return -ENXIO;
}
irq = irq_of_parse_and_map(timer, 1);
if (irq <= 0) {
pr_err("ERROR: invalid interrupt number\n");
- BUG();
+ return -EINVAL;
}
of_property_read_u32(timer, "timer-width", &timer_width);
@@ -504,7 +515,7 @@ static void __init ttc_timer_init(struct device_node *timer)
clk_cs = of_clk_get(timer, clksel);
if (IS_ERR(clk_cs)) {
pr_err("ERROR: timer input clock not found\n");
- BUG();
+ return PTR_ERR(clk_cs);
}
clksel = readl_relaxed(timer_baseaddr + 4 + TTC_CLK_CNTRL_OFFSET);
@@ -512,13 +523,20 @@ static void __init ttc_timer_init(struct device_node *timer)
clk_ce = of_clk_get(timer, clksel);
if (IS_ERR(clk_ce)) {
pr_err("ERROR: timer input clock not found\n");
- BUG();
+ return PTR_ERR(clk_ce);
}
- ttc_setup_clocksource(clk_cs, timer_baseaddr, timer_width);
- ttc_setup_clockevent(clk_ce, timer_baseaddr + 4, irq);
+ ret = ttc_setup_clocksource(clk_cs, timer_baseaddr, timer_width);
+ if (ret)
+ return ret;
+
+ ret = ttc_setup_clockevent(clk_ce, timer_baseaddr + 4, irq);
+ if (ret)
+ return ret;
pr_info("%s #0 at %p, irq=%d\n", timer->name, timer_baseaddr, irq);
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(ttc, "cdns,ttc", ttc_timer_init);
diff --git a/drivers/clocksource/clksrc-dbx500-prcmu.c b/drivers/clocksource/clksrc-dbx500-prcmu.c
index dfad6eb99..77a365f57 100644
--- a/drivers/clocksource/clksrc-dbx500-prcmu.c
+++ b/drivers/clocksource/clksrc-dbx500-prcmu.c
@@ -64,7 +64,7 @@ static u64 notrace dbx500_prcmu_sched_clock_read(void)
#endif
-static void __init clksrc_dbx500_prcmu_init(struct device_node *node)
+static int __init clksrc_dbx500_prcmu_init(struct device_node *node)
{
clksrc_dbx500_timer_base = of_iomap(node, 0);
@@ -84,7 +84,7 @@ static void __init clksrc_dbx500_prcmu_init(struct device_node *node)
#ifdef CONFIG_CLKSRC_DBX500_PRCMU_SCHED_CLOCK
sched_clock_register(dbx500_prcmu_sched_clock_read, 32, RATE_32K);
#endif
- clocksource_register_hz(&clocksource_dbx500_prcmu, RATE_32K);
+ return clocksource_register_hz(&clocksource_dbx500_prcmu, RATE_32K);
}
CLOCKSOURCE_OF_DECLARE(dbx500_prcmu, "stericsson,db8500-prcmu-timer-4",
clksrc_dbx500_prcmu_init);
diff --git a/drivers/clocksource/clksrc-probe.c b/drivers/clocksource/clksrc-probe.c
index 7cb6c923a..bc62be97f 100644
--- a/drivers/clocksource/clksrc-probe.c
+++ b/drivers/clocksource/clksrc-probe.c
@@ -28,15 +28,23 @@ void __init clocksource_probe(void)
{
struct device_node *np;
const struct of_device_id *match;
- of_init_fn_1 init_func;
+ of_init_fn_1_ret init_func_ret;
unsigned clocksources = 0;
+ int ret;
for_each_matching_node_and_match(np, __clksrc_of_table, &match) {
if (!of_device_is_available(np))
continue;
- init_func = match->data;
- init_func(np);
+ init_func_ret = match->data;
+
+ ret = init_func_ret(np);
+ if (ret) {
+ pr_err("Failed to initialize '%s': %d",
+ of_node_full_name(np), ret);
+ continue;
+ }
+
clocksources++;
}
diff --git a/drivers/clocksource/clksrc_st_lpc.c b/drivers/clocksource/clksrc_st_lpc.c
index 65ec46744..03cc49217 100644
--- a/drivers/clocksource/clksrc_st_lpc.c
+++ b/drivers/clocksource/clksrc_st_lpc.c
@@ -92,7 +92,7 @@ static int __init st_clksrc_setup_clk(struct device_node *np)
return 0;
}
-static void __init st_clksrc_of_register(struct device_node *np)
+static int __init st_clksrc_of_register(struct device_node *np)
{
int ret;
uint32_t mode;
@@ -100,32 +100,36 @@ static void __init st_clksrc_of_register(struct device_node *np)
ret = of_property_read_u32(np, "st,lpc-mode", &mode);
if (ret) {
pr_err("clksrc-st-lpc: An LPC mode must be provided\n");
- return;
+ return ret;
}
/* LPC can either run as a Clocksource or in RTC or WDT mode */
if (mode != ST_LPC_MODE_CLKSRC)
- return;
+ return 0;
ddata.base = of_iomap(np, 0);
if (!ddata.base) {
pr_err("clksrc-st-lpc: Unable to map iomem\n");
- return;
+ return -ENXIO;
}
- if (st_clksrc_setup_clk(np)) {
+ ret = st_clksrc_setup_clk(np);
+ if (ret) {
iounmap(ddata.base);
- return;
+ return ret;
}
- if (st_clksrc_init()) {
+ ret = st_clksrc_init();
+ if (ret) {
clk_disable_unprepare(ddata.clk);
clk_put(ddata.clk);
iounmap(ddata.base);
- return;
+ return ret;
}
pr_info("clksrc-st-lpc: clocksource initialised - running @ %luHz\n",
clk_get_rate(ddata.clk));
+
+ return ret;
}
CLOCKSOURCE_OF_DECLARE(ddata, "st,stih407-lpc", st_clksrc_of_register);
diff --git a/drivers/clocksource/clps711x-timer.c b/drivers/clocksource/clps711x-timer.c
index cdd86e352..24db6d605 100644
--- a/drivers/clocksource/clps711x-timer.c
+++ b/drivers/clocksource/clps711x-timer.c
@@ -104,7 +104,7 @@ void __init clps711x_clksrc_init(void __iomem *tc1_base, void __iomem *tc2_base,
}
#ifdef CONFIG_CLKSRC_OF
-static void __init clps711x_timer_init(struct device_node *np)
+static int __init clps711x_timer_init(struct device_node *np)
{
unsigned int irq = irq_of_parse_and_map(np, 0);
struct clk *clock = of_clk_get(np, 0);
@@ -112,14 +112,12 @@ static void __init clps711x_timer_init(struct device_node *np)
switch (of_alias_get_id(np, "timer")) {
case CLPS711X_CLKSRC_CLOCKSOURCE:
- BUG_ON(_clps711x_clksrc_init(clock, base));
- break;
+ return _clps711x_clksrc_init(clock, base);
case CLPS711X_CLKSRC_CLOCKEVENT:
- BUG_ON(_clps711x_clkevt_init(clock, base, irq));
- break;
+ return _clps711x_clkevt_init(clock, base, irq);
default:
- break;
+ return -EINVAL;
}
}
-CLOCKSOURCE_OF_DECLARE(clps711x, "cirrus,clps711x-timer", clps711x_timer_init);
+CLOCKSOURCE_OF_DECLARE(clps711x, "cirrus,ep7209-timer", clps711x_timer_init);
#endif
diff --git a/drivers/clocksource/dummy_timer.c b/drivers/clocksource/dummy_timer.c
index 776b6c86d..89f1c2edb 100644
--- a/drivers/clocksource/dummy_timer.c
+++ b/drivers/clocksource/dummy_timer.c
@@ -16,10 +16,9 @@
static DEFINE_PER_CPU(struct clock_event_device, dummy_timer_evt);
-static void dummy_timer_setup(void)
+static int dummy_timer_starting_cpu(unsigned int cpu)
{
- int cpu = smp_processor_id();
- struct clock_event_device *evt = raw_cpu_ptr(&dummy_timer_evt);
+ struct clock_event_device *evt = per_cpu_ptr(&dummy_timer_evt, cpu);
evt->name = "dummy_timer";
evt->features = CLOCK_EVT_FEAT_PERIODIC |
@@ -29,36 +28,13 @@ static void dummy_timer_setup(void)
evt->cpumask = cpumask_of(cpu);
clockevents_register_device(evt);
+ return 0;
}
-static int dummy_timer_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- if ((action & ~CPU_TASKS_FROZEN) == CPU_STARTING)
- dummy_timer_setup();
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block dummy_timer_cpu_nb = {
- .notifier_call = dummy_timer_cpu_notify,
-};
-
static int __init dummy_timer_register(void)
{
- int err = 0;
-
- cpu_notifier_register_begin();
- err = __register_cpu_notifier(&dummy_timer_cpu_nb);
- if (err)
- goto out;
-
- /* We won't get a call on the boot CPU, so register immediately */
- if (num_possible_cpus() > 1)
- dummy_timer_setup();
-
-out:
- cpu_notifier_register_done();
- return err;
+ return cpuhp_setup_state(CPUHP_AP_DUMMY_TIMER_STARTING,
+ "AP_DUMMY_TIMER_STARTING",
+ dummy_timer_starting_cpu, NULL);
}
early_initcall(dummy_timer_register);
diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
index 860843cef..aee6c0d39 100644
--- a/drivers/clocksource/dw_apb_timer_of.c
+++ b/drivers/clocksource/dw_apb_timer_of.c
@@ -143,7 +143,7 @@ static struct delay_timer dw_apb_delay_timer = {
#endif
static int num_called;
-static void __init dw_apb_timer_init(struct device_node *timer)
+static int __init dw_apb_timer_init(struct device_node *timer)
{
switch (num_called) {
case 0:
@@ -164,6 +164,8 @@ static void __init dw_apb_timer_init(struct device_node *timer)
}
num_called++;
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(pc3x2_timer, "picochip,pc3x2-timer", dw_apb_timer_init);
CLOCKSOURCE_OF_DECLARE(apb_timer_osc, "snps,dw-apb-timer-osc", dw_apb_timer_init);
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index be09bc0b5..41840d02c 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -232,7 +232,7 @@ static cycles_t exynos4_read_current_timer(void)
return exynos4_read_count_32();
}
-static void __init exynos4_clocksource_init(void)
+static int __init exynos4_clocksource_init(void)
{
exynos4_mct_frc_start();
@@ -244,6 +244,8 @@ static void __init exynos4_clocksource_init(void)
panic("%s: can't register clocksource\n", mct_frc.name);
sched_clock_register(exynos4_read_sched_clock, 32, clk_rate);
+
+ return 0;
}
static void exynos4_mct_comp0_stop(void)
@@ -335,12 +337,14 @@ static struct irqaction mct_comp_event_irq = {
.dev_id = &mct_comp_device,
};
-static void exynos4_clockevent_init(void)
+static int exynos4_clockevent_init(void)
{
mct_comp_device.cpumask = cpumask_of(0);
clockevents_config_and_register(&mct_comp_device, clk_rate,
0xf, 0xffffffff);
setup_irq(mct_irqs[MCT_G0_IRQ], &mct_comp_event_irq);
+
+ return 0;
}
static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
@@ -439,10 +443,11 @@ static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int exynos4_local_timer_setup(struct mct_clock_event_device *mevt)
+static int exynos4_mct_starting_cpu(unsigned int cpu)
{
+ struct mct_clock_event_device *mevt =
+ per_cpu_ptr(&percpu_mct_tick, cpu);
struct clock_event_device *evt = &mevt->evt;
- unsigned int cpu = smp_processor_id();
mevt->base = EXYNOS4_MCT_L_BASE(cpu);
snprintf(mevt->name, sizeof(mevt->name), "mct_tick%d", cpu);
@@ -476,8 +481,10 @@ static int exynos4_local_timer_setup(struct mct_clock_event_device *mevt)
return 0;
}
-static void exynos4_local_timer_stop(struct mct_clock_event_device *mevt)
+static int exynos4_mct_dying_cpu(unsigned int cpu)
{
+ struct mct_clock_event_device *mevt =
+ per_cpu_ptr(&percpu_mct_tick, cpu);
struct clock_event_device *evt = &mevt->evt;
evt->set_state_shutdown(evt);
@@ -487,39 +494,12 @@ static void exynos4_local_timer_stop(struct mct_clock_event_device *mevt)
} else {
disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
}
+ return 0;
}
-static int exynos4_mct_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- struct mct_clock_event_device *mevt;
-
- /*
- * Grab cpu pointer in each case to avoid spurious
- * preemptible warnings
- */
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_STARTING:
- mevt = this_cpu_ptr(&percpu_mct_tick);
- exynos4_local_timer_setup(mevt);
- break;
- case CPU_DYING:
- mevt = this_cpu_ptr(&percpu_mct_tick);
- exynos4_local_timer_stop(mevt);
- break;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block exynos4_mct_cpu_nb = {
- .notifier_call = exynos4_mct_cpu_notify,
-};
-
-static void __init exynos4_timer_resources(struct device_node *np, void __iomem *base)
+static int __init exynos4_timer_resources(struct device_node *np, void __iomem *base)
{
int err, cpu;
- struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
struct clk *mct_clk, *tick_clk;
tick_clk = np ? of_clk_get_by_name(np, "fin_pll") :
@@ -566,21 +546,25 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem
}
}
- err = register_cpu_notifier(&exynos4_mct_cpu_nb);
+ /* Install hotplug callbacks which configure the timer on this CPU */
+ err = cpuhp_setup_state(CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
+ "AP_EXYNOS4_MCT_TIMER_STARTING",
+ exynos4_mct_starting_cpu,
+ exynos4_mct_dying_cpu);
if (err)
goto out_irq;
- /* Immediately configure the timer on the boot CPU */
- exynos4_local_timer_setup(mevt);
- return;
+ return 0;
out_irq:
free_percpu_irq(mct_irqs[MCT_L0_IRQ], &percpu_mct_tick);
+ return err;
}
-static void __init mct_init_dt(struct device_node *np, unsigned int int_type)
+static int __init mct_init_dt(struct device_node *np, unsigned int int_type)
{
u32 nr_irqs, i;
+ int ret;
mct_int_type = int_type;
@@ -600,18 +584,24 @@ static void __init mct_init_dt(struct device_node *np, unsigned int int_type)
for (i = MCT_L0_IRQ; i < nr_irqs; i++)
mct_irqs[i] = irq_of_parse_and_map(np, i);
- exynos4_timer_resources(np, of_iomap(np, 0));
- exynos4_clocksource_init();
- exynos4_clockevent_init();
+ ret = exynos4_timer_resources(np, of_iomap(np, 0));
+ if (ret)
+ return ret;
+
+ ret = exynos4_clocksource_init();
+ if (ret)
+ return ret;
+
+ return exynos4_clockevent_init();
}
-static void __init mct_init_spi(struct device_node *np)
+static int __init mct_init_spi(struct device_node *np)
{
return mct_init_dt(np, MCT_INT_SPI);
}
-static void __init mct_init_ppi(struct device_node *np)
+static int __init mct_init_ppi(struct device_node *np)
{
return mct_init_dt(np, MCT_INT_PPI);
}
diff --git a/drivers/clocksource/fsl_ftm_timer.c b/drivers/clocksource/fsl_ftm_timer.c
index 517e1c762..738515b89 100644
--- a/drivers/clocksource/fsl_ftm_timer.c
+++ b/drivers/clocksource/fsl_ftm_timer.c
@@ -316,15 +316,16 @@ static int __init ftm_calc_closest_round_cyc(unsigned long freq)
return 0;
}
-static void __init ftm_timer_init(struct device_node *np)
+static int __init ftm_timer_init(struct device_node *np)
{
unsigned long freq;
- int irq;
+ int ret, irq;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
- return;
+ return -ENOMEM;
+ ret = -ENXIO;
priv->clkevt_base = of_iomap(np, 0);
if (!priv->clkevt_base) {
pr_err("ftm: unable to map event timer registers\n");
@@ -337,6 +338,7 @@ static void __init ftm_timer_init(struct device_node *np)
goto err;
}
+ ret = -EINVAL;
irq = irq_of_parse_and_map(np, 0);
if (irq <= 0) {
pr_err("ftm: unable to get IRQ from DT, %d\n", irq);
@@ -349,18 +351,22 @@ static void __init ftm_timer_init(struct device_node *np)
if (!freq)
goto err;
- if (ftm_calc_closest_round_cyc(freq))
+ ret = ftm_calc_closest_round_cyc(freq);
+ if (ret)
goto err;
- if (ftm_clocksource_init(freq))
+ ret = ftm_clocksource_init(freq);
+ if (ret)
goto err;
- if (ftm_clockevent_init(freq, irq))
+ ret = ftm_clockevent_init(freq, irq);
+ if (ret)
goto err;
- return;
+ return 0;
err:
kfree(priv);
+ return ret;
}
CLOCKSOURCE_OF_DECLARE(flextimer, "fsl,ftm-timer", ftm_timer_init);
diff --git a/drivers/clocksource/h8300_timer16.c b/drivers/clocksource/h8300_timer16.c
index 75c44079b..07d9d5be9 100644
--- a/drivers/clocksource/h8300_timer16.c
+++ b/drivers/clocksource/h8300_timer16.c
@@ -126,7 +126,7 @@ static struct timer16_priv timer16_priv = {
#define REG_CH 0
#define REG_COMM 1
-static void __init h8300_16timer_init(struct device_node *node)
+static int __init h8300_16timer_init(struct device_node *node)
{
void __iomem *base[2];
int ret, irq;
@@ -136,9 +136,10 @@ static void __init h8300_16timer_init(struct device_node *node)
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
pr_err("failed to get clock for clocksource\n");
- return;
+ return PTR_ERR(clk);
}
+ ret = -ENXIO;
base[REG_CH] = of_iomap(node, 0);
if (!base[REG_CH]) {
pr_err("failed to map registers for clocksource\n");
@@ -151,6 +152,7 @@ static void __init h8300_16timer_init(struct device_node *node)
goto unmap_ch;
}
+ ret = -EINVAL;
irq = irq_of_parse_and_map(node, 0);
if (!irq) {
pr_err("failed to get irq for clockevent\n");
@@ -174,7 +176,7 @@ static void __init h8300_16timer_init(struct device_node *node)
clocksource_register_hz(&timer16_priv.cs,
clk_get_rate(clk) / 8);
- return;
+ return 0;
unmap_comm:
iounmap(base[REG_COMM]);
@@ -182,6 +184,8 @@ unmap_ch:
iounmap(base[REG_CH]);
free_clk:
clk_put(clk);
+ return ret;
}
-CLOCKSOURCE_OF_DECLARE(h8300_16bit, "renesas,16bit-timer", h8300_16timer_init);
+CLOCKSOURCE_OF_DECLARE(h8300_16bit, "renesas,16bit-timer",
+ h8300_16timer_init);
diff --git a/drivers/clocksource/h8300_timer8.c b/drivers/clocksource/h8300_timer8.c
index c151941e1..546bb180f 100644
--- a/drivers/clocksource/h8300_timer8.c
+++ b/drivers/clocksource/h8300_timer8.c
@@ -164,24 +164,26 @@ static struct timer8_priv timer8_priv = {
},
};
-static void __init h8300_8timer_init(struct device_node *node)
+static int __init h8300_8timer_init(struct device_node *node)
{
void __iomem *base;
- int irq;
+ int irq, ret;
struct clk *clk;
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
pr_err("failed to get clock for clockevent\n");
- return;
+ return PTR_ERR(clk);
}
+ ret = ENXIO;
base = of_iomap(node, 0);
if (!base) {
pr_err("failed to map registers for clockevent\n");
goto free_clk;
}
+ ret = -EINVAL;
irq = irq_of_parse_and_map(node, 0);
if (!irq) {
pr_err("failed to get irq for clockevent\n");
@@ -205,11 +207,12 @@ static void __init h8300_8timer_init(struct device_node *node)
clockevents_config_and_register(&timer8_priv.ced,
timer8_priv.rate, 1, 0x0000ffff);
- return;
+ return 0;
unmap_reg:
iounmap(base);
free_clk:
clk_put(clk);
+ return ret;
}
CLOCKSOURCE_OF_DECLARE(h8300_8bit, "renesas,8bit-timer", h8300_8timer_init);
diff --git a/drivers/clocksource/h8300_tpu.c b/drivers/clocksource/h8300_tpu.c
index d4c1a287c..7bdf1991c 100644
--- a/drivers/clocksource/h8300_tpu.c
+++ b/drivers/clocksource/h8300_tpu.c
@@ -119,15 +119,16 @@ static struct tpu_priv tpu_priv = {
#define CH_L 0
#define CH_H 1
-static void __init h8300_tpu_init(struct device_node *node)
+static int __init h8300_tpu_init(struct device_node *node)
{
void __iomem *base[2];
struct clk *clk;
+ int ret = -ENXIO;
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
pr_err("failed to get clock for clocksource\n");
- return;
+ return PTR_ERR(clk);
}
base[CH_L] = of_iomap(node, CH_L);
@@ -144,14 +145,13 @@ static void __init h8300_tpu_init(struct device_node *node)
tpu_priv.mapbase1 = base[CH_L];
tpu_priv.mapbase2 = base[CH_H];
- clocksource_register_hz(&tpu_priv.cs, clk_get_rate(clk) / 64);
-
- return;
+ return clocksource_register_hz(&tpu_priv.cs, clk_get_rate(clk) / 64);
unmap_L:
iounmap(base[CH_H]);
free_clk:
clk_put(clk);
+ return ret;
}
CLOCKSOURCE_OF_DECLARE(h8300_tpu, "renesas,tpu", h8300_tpu_init);
diff --git a/drivers/clocksource/meson6_timer.c b/drivers/clocksource/meson6_timer.c
index 1fa22c4d2..52af591a9 100644
--- a/drivers/clocksource/meson6_timer.c
+++ b/drivers/clocksource/meson6_timer.c
@@ -126,18 +126,22 @@ static struct irqaction meson6_timer_irq = {
.dev_id = &meson6_clockevent,
};
-static void __init meson6_timer_init(struct device_node *node)
+static int __init meson6_timer_init(struct device_node *node)
{
u32 val;
int ret, irq;
timer_base = of_io_request_and_map(node, 0, "meson6-timer");
- if (IS_ERR(timer_base))
- panic("Can't map registers");
+ if (IS_ERR(timer_base)) {
+ pr_err("Can't map registers");
+ return -ENXIO;
+ }
irq = irq_of_parse_and_map(node, 0);
- if (irq <= 0)
- panic("Can't parse IRQ");
+ if (irq <= 0) {
+ pr_err("Can't parse IRQ");
+ return -EINVAL;
+ }
/* Set 1us for timer E */
val = readl(timer_base + TIMER_ISA_MUX);
@@ -158,14 +162,17 @@ static void __init meson6_timer_init(struct device_node *node)
meson6_clkevt_time_stop(CED_ID);
ret = setup_irq(irq, &meson6_timer_irq);
- if (ret)
+ if (ret) {
pr_warn("failed to setup irq %d\n", irq);
+ return ret;
+ }
meson6_clockevent.cpumask = cpu_possible_mask;
meson6_clockevent.irq = irq;
clockevents_config_and_register(&meson6_clockevent, USEC_PER_SEC,
1, 0xfffe);
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(meson6, "amlogic,meson6-timer",
meson6_timer_init);
diff --git a/drivers/clocksource/metag_generic.c b/drivers/clocksource/metag_generic.c
index bcd5c0d60..a80ab3e44 100644
--- a/drivers/clocksource/metag_generic.c
+++ b/drivers/clocksource/metag_generic.c
@@ -90,7 +90,7 @@ unsigned long long sched_clock(void)
return ticks << HARDWARE_TO_NS_SHIFT;
}
-static void arch_timer_setup(unsigned int cpu)
+static int arch_timer_starting_cpu(unsigned int cpu)
{
unsigned int txdivtime;
struct clock_event_device *clk = &per_cpu(local_clockevent, cpu);
@@ -132,27 +132,9 @@ static void arch_timer_setup(unsigned int cpu)
val = core_reg_read(TXUCT_ID, TXTIMER_REGNUM, thread0);
__core_reg_set(TXTIMER, val);
}
+ return 0;
}
-static int arch_timer_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- int cpu = (long)hcpu;
-
- switch (action) {
- case CPU_STARTING:
- case CPU_STARTING_FROZEN:
- arch_timer_setup(cpu);
- break;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block arch_timer_cpu_nb = {
- .notifier_call = arch_timer_cpu_notify,
-};
-
int __init metag_generic_timer_init(void)
{
/*
@@ -170,11 +152,8 @@ int __init metag_generic_timer_init(void)
setup_irq(tbisig_map(TBID_SIGNUM_TRT), &metag_timer_irq);
- /* Configure timer on boot CPU */
- arch_timer_setup(smp_processor_id());
-
- /* Hook cpu boot to configure other CPU's timers */
- register_cpu_notifier(&arch_timer_cpu_nb);
-
- return 0;
+ /* Hook cpu boot to configure the CPU's timers */
+ return cpuhp_setup_state(CPUHP_AP_METAG_TIMER_STARTING,
+ "AP_METAG_TIMER_STARTING",
+ arch_timer_starting_cpu, NULL);
}
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
index 89d3e4d79..b4b3ab5a1 100644
--- a/drivers/clocksource/mips-gic-timer.c
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -49,10 +49,9 @@ struct irqaction gic_compare_irqaction = {
.name = "timer",
};
-static void gic_clockevent_cpu_init(struct clock_event_device *cd)
+static void gic_clockevent_cpu_init(unsigned int cpu,
+ struct clock_event_device *cd)
{
- unsigned int cpu = smp_processor_id();
-
cd->name = "MIPS GIC";
cd->features = CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_C3STOP;
@@ -79,19 +78,10 @@ static void gic_update_frequency(void *data)
clockevents_update_freq(this_cpu_ptr(&gic_clockevent_device), rate);
}
-static int gic_cpu_notifier(struct notifier_block *nb, unsigned long action,
- void *data)
+static int gic_starting_cpu(unsigned int cpu)
{
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_STARTING:
- gic_clockevent_cpu_init(this_cpu_ptr(&gic_clockevent_device));
- break;
- case CPU_DYING:
- gic_clockevent_cpu_exit(this_cpu_ptr(&gic_clockevent_device));
- break;
- }
-
- return NOTIFY_OK;
+ gic_clockevent_cpu_init(cpu, this_cpu_ptr(&gic_clockevent_device));
+ return 0;
}
static int gic_clk_notifier(struct notifier_block *nb, unsigned long action,
@@ -105,10 +95,11 @@ static int gic_clk_notifier(struct notifier_block *nb, unsigned long action,
return NOTIFY_OK;
}
-
-static struct notifier_block gic_cpu_nb = {
- .notifier_call = gic_cpu_notifier,
-};
+static int gic_dying_cpu(unsigned int cpu)
+{
+ gic_clockevent_cpu_exit(this_cpu_ptr(&gic_clockevent_device));
+ return 0;
+}
static struct notifier_block gic_clk_nb = {
.notifier_call = gic_clk_notifier,
@@ -125,12 +116,9 @@ static int gic_clockevent_init(void)
if (ret < 0)
return ret;
- ret = register_cpu_notifier(&gic_cpu_nb);
- if (ret < 0)
- pr_warn("GIC: Unable to register CPU notifier\n");
-
- gic_clockevent_cpu_init(this_cpu_ptr(&gic_clockevent_device));
-
+ cpuhp_setup_state(CPUHP_AP_MIPS_GIC_TIMER_STARTING,
+ "AP_MIPS_GIC_TIMER_STARTING", gic_starting_cpu,
+ gic_dying_cpu);
return 0;
}
@@ -146,7 +134,7 @@ static struct clocksource gic_clocksource = {
.archdata = { .vdso_clock_mode = VDSO_CLOCK_GIC },
};
-static void __init __gic_clocksource_init(void)
+static int __init __gic_clocksource_init(void)
{
int ret;
@@ -159,6 +147,8 @@ static void __init __gic_clocksource_init(void)
ret = clocksource_register_hz(&gic_clocksource, gic_frequency);
if (ret < 0)
pr_warn("GIC: Unable to register clocksource\n");
+
+ return ret;
}
void __init gic_clocksource_init(unsigned int frequency)
@@ -174,36 +164,40 @@ void __init gic_clocksource_init(unsigned int frequency)
gic_start_count();
}
-static void __init gic_clocksource_of_init(struct device_node *node)
+static int __init gic_clocksource_of_init(struct device_node *node)
{
struct clk *clk;
int ret;
- if (WARN_ON(!gic_present || !node->parent ||
- !of_device_is_compatible(node->parent, "mti,gic")))
- return;
+ if (!gic_present || !node->parent ||
+ !of_device_is_compatible(node->parent, "mti,gic")) {
+ pr_warn("No DT definition for the mips gic driver");
+ return -ENXIO;
+ }
clk = of_clk_get(node, 0);
if (!IS_ERR(clk)) {
if (clk_prepare_enable(clk) < 0) {
pr_err("GIC failed to enable clock\n");
clk_put(clk);
- return;
+ return PTR_ERR(clk);
}
gic_frequency = clk_get_rate(clk);
} else if (of_property_read_u32(node, "clock-frequency",
&gic_frequency)) {
pr_err("GIC frequency not specified.\n");
- return;
+ return -EINVAL;;
}
gic_timer_irq = irq_of_parse_and_map(node, 0);
if (!gic_timer_irq) {
pr_err("GIC timer IRQ not specified.\n");
- return;
+ return -EINVAL;;
}
- __gic_clocksource_init();
+ ret = __gic_clocksource_init();
+ if (ret)
+ return ret;
ret = gic_clockevent_init();
if (!ret && !IS_ERR(clk)) {
@@ -213,6 +207,8 @@ static void __init gic_clocksource_of_init(struct device_node *node)
/* And finally start the counter */
gic_start_count();
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(mips_gic_timer, "mti,gic-timer",
gic_clocksource_of_init);
diff --git a/drivers/clocksource/moxart_timer.c b/drivers/clocksource/moxart_timer.c
index 19857af65..841454417 100644
--- a/drivers/clocksource/moxart_timer.c
+++ b/drivers/clocksource/moxart_timer.c
@@ -119,34 +119,45 @@ static struct irqaction moxart_timer_irq = {
.dev_id = &moxart_clockevent,
};
-static void __init moxart_timer_init(struct device_node *node)
+static int __init moxart_timer_init(struct device_node *node)
{
int ret, irq;
unsigned long pclk;
struct clk *clk;
base = of_iomap(node, 0);
- if (!base)
- panic("%s: of_iomap failed\n", node->full_name);
+ if (!base) {
+ pr_err("%s: of_iomap failed\n", node->full_name);
+ return -ENXIO;
+ }
irq = irq_of_parse_and_map(node, 0);
- if (irq <= 0)
- panic("%s: irq_of_parse_and_map failed\n", node->full_name);
+ if (irq <= 0) {
+ pr_err("%s: irq_of_parse_and_map failed\n", node->full_name);
+ return -EINVAL;
+ }
ret = setup_irq(irq, &moxart_timer_irq);
- if (ret)
- panic("%s: setup_irq failed\n", node->full_name);
+ if (ret) {
+ pr_err("%s: setup_irq failed\n", node->full_name);
+ return ret;
+ }
clk = of_clk_get(node, 0);
- if (IS_ERR(clk))
- panic("%s: of_clk_get failed\n", node->full_name);
+ if (IS_ERR(clk)) {
+ pr_err("%s: of_clk_get failed\n", node->full_name);
+ return PTR_ERR(clk);
+ }
pclk = clk_get_rate(clk);
- if (clocksource_mmio_init(base + TIMER2_BASE + REG_COUNT,
- "moxart_timer", pclk, 200, 32,
- clocksource_mmio_readl_down))
- panic("%s: clocksource_mmio_init failed\n", node->full_name);
+ ret = clocksource_mmio_init(base + TIMER2_BASE + REG_COUNT,
+ "moxart_timer", pclk, 200, 32,
+ clocksource_mmio_readl_down);
+ if (ret) {
+ pr_err("%s: clocksource_mmio_init failed\n", node->full_name);
+ return ret;
+ }
clock_count_per_tick = DIV_ROUND_CLOSEST(pclk, HZ);
@@ -164,5 +175,7 @@ static void __init moxart_timer_init(struct device_node *node)
*/
clockevents_config_and_register(&moxart_clockevent, pclk,
0x4, 0xfffffffe);
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(moxart, "moxa,moxart-timer", moxart_timer_init);
diff --git a/drivers/clocksource/mps2-timer.c b/drivers/clocksource/mps2-timer.c
index 3d33a5e23..3e4431ed9 100644
--- a/drivers/clocksource/mps2-timer.c
+++ b/drivers/clocksource/mps2-timer.c
@@ -250,7 +250,7 @@ out:
return ret;
}
-static void __init mps2_timer_init(struct device_node *np)
+static int __init mps2_timer_init(struct device_node *np)
{
static int has_clocksource, has_clockevent;
int ret;
@@ -259,7 +259,7 @@ static void __init mps2_timer_init(struct device_node *np)
ret = mps2_clocksource_init(np);
if (!ret) {
has_clocksource = 1;
- return;
+ return 0;
}
}
@@ -267,9 +267,11 @@ static void __init mps2_timer_init(struct device_node *np)
ret = mps2_clockevent_init(np);
if (!ret) {
has_clockevent = 1;
- return;
+ return 0;
}
}
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(mps2_timer, "arm,mps2-timer", mps2_timer_init);
diff --git a/drivers/clocksource/mtk_timer.c b/drivers/clocksource/mtk_timer.c
index 7e583f8ea..90659493c 100644
--- a/drivers/clocksource/mtk_timer.c
+++ b/drivers/clocksource/mtk_timer.c
@@ -181,7 +181,7 @@ static void mtk_timer_enable_irq(struct mtk_clock_event_device *evt, u8 timer)
evt->gpt_base + GPT_IRQ_EN_REG);
}
-static void __init mtk_timer_init(struct device_node *node)
+static int __init mtk_timer_init(struct device_node *node)
{
struct mtk_clock_event_device *evt;
struct resource res;
@@ -190,7 +190,7 @@ static void __init mtk_timer_init(struct device_node *node)
evt = kzalloc(sizeof(*evt), GFP_KERNEL);
if (!evt)
- return;
+ return -ENOMEM;
evt->dev.name = "mtk_tick";
evt->dev.rating = 300;
@@ -248,7 +248,7 @@ static void __init mtk_timer_init(struct device_node *node)
mtk_timer_enable_irq(evt, GPT_CLK_EVT);
- return;
+ return 0;
err_clk_disable:
clk_disable_unprepare(clk);
@@ -262,5 +262,7 @@ err_mem:
release_mem_region(res.start, resource_size(&res));
err_kzalloc:
kfree(evt);
+
+ return -EINVAL;
}
CLOCKSOURCE_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_timer_init);
diff --git a/drivers/clocksource/mxs_timer.c b/drivers/clocksource/mxs_timer.c
index f5ce2961c..0ba0a913b 100644
--- a/drivers/clocksource/mxs_timer.c
+++ b/drivers/clocksource/mxs_timer.c
@@ -31,8 +31,6 @@
#include <linux/stmp_device.h>
#include <linux/sched_clock.h>
-#include <asm/mach/time.h>
-
/*
* There are 2 versions of the timrot on Freescale MXS-based SoCs.
* The v1 on MX23 only gets 16 bits counter, while v2 on MX28
@@ -226,10 +224,10 @@ static int __init mxs_clocksource_init(struct clk *timer_clk)
return 0;
}
-static void __init mxs_timer_init(struct device_node *np)
+static int __init mxs_timer_init(struct device_node *np)
{
struct clk *timer_clk;
- int irq;
+ int irq, ret;
mxs_timrot_base = of_iomap(np, 0);
WARN_ON(!mxs_timrot_base);
@@ -237,10 +235,12 @@ static void __init mxs_timer_init(struct device_node *np)
timer_clk = of_clk_get(np, 0);
if (IS_ERR(timer_clk)) {
pr_err("%s: failed to get clk\n", __func__);
- return;
+ return PTR_ERR(timer_clk);
}
- clk_prepare_enable(timer_clk);
+ ret = clk_prepare_enable(timer_clk);
+ if (ret)
+ return ret;
/*
* Initialize timers to a known state
@@ -278,11 +278,19 @@ static void __init mxs_timer_init(struct device_node *np)
mxs_timrot_base + HW_TIMROT_FIXED_COUNTn(1));
/* init and register the timer to the framework */
- mxs_clocksource_init(timer_clk);
- mxs_clockevent_init(timer_clk);
+ ret = mxs_clocksource_init(timer_clk);
+ if (ret)
+ return ret;
+
+ ret = mxs_clockevent_init(timer_clk);
+ if (ret)
+ return ret;
/* Make irqs happen */
irq = irq_of_parse_and_map(np, 0);
- setup_irq(irq, &mxs_timer_irq);
+ if (irq <= 0)
+ return -EINVAL;
+
+ return setup_irq(irq, &mxs_timer_irq);
}
CLOCKSOURCE_OF_DECLARE(mxs, "fsl,timrot", mxs_timer_init);
diff --git a/drivers/clocksource/nomadik-mtu.c b/drivers/clocksource/nomadik-mtu.c
index bc8dd443c..3c124d1ca 100644
--- a/drivers/clocksource/nomadik-mtu.c
+++ b/drivers/clocksource/nomadik-mtu.c
@@ -193,10 +193,11 @@ static struct irqaction nmdk_timer_irq = {
.dev_id = &nmdk_clkevt,
};
-static void __init nmdk_timer_init(void __iomem *base, int irq,
+static int __init nmdk_timer_init(void __iomem *base, int irq,
struct clk *pclk, struct clk *clk)
{
unsigned long rate;
+ int ret;
mtu_base = base;
@@ -226,10 +227,12 @@ static void __init nmdk_timer_init(void __iomem *base, int irq,
/* Timer 0 is the free running clocksource */
nmdk_clksrc_reset();
- if (clocksource_mmio_init(mtu_base + MTU_VAL(0), "mtu_0",
- rate, 200, 32, clocksource_mmio_readl_down))
- pr_err("timer: failed to initialize clock source %s\n",
- "mtu_0");
+ ret = clocksource_mmio_init(mtu_base + MTU_VAL(0), "mtu_0",
+ rate, 200, 32, clocksource_mmio_readl_down);
+ if (ret) {
+ pr_err("timer: failed to initialize clock source %s\n", "mtu_0");
+ return ret;
+ }
#ifdef CONFIG_CLKSRC_NOMADIK_MTU_SCHED_CLOCK
sched_clock_register(nomadik_read_sched_clock, 32, rate);
@@ -244,9 +247,11 @@ static void __init nmdk_timer_init(void __iomem *base, int irq,
mtu_delay_timer.read_current_timer = &nmdk_timer_read_current_timer;
mtu_delay_timer.freq = rate;
register_current_timer_delay(&mtu_delay_timer);
+
+ return 0;
}
-static void __init nmdk_timer_of_init(struct device_node *node)
+static int __init nmdk_timer_of_init(struct device_node *node)
{
struct clk *pclk;
struct clk *clk;
@@ -254,22 +259,30 @@ static void __init nmdk_timer_of_init(struct device_node *node)
int irq;
base = of_iomap(node, 0);
- if (!base)
- panic("Can't remap registers");
+ if (!base) {
+ pr_err("Can't remap registers");
+ return -ENXIO;
+ }
pclk = of_clk_get_by_name(node, "apb_pclk");
- if (IS_ERR(pclk))
- panic("could not get apb_pclk");
+ if (IS_ERR(pclk)) {
+ pr_err("could not get apb_pclk");
+ return PTR_ERR(pclk);
+ }
clk = of_clk_get_by_name(node, "timclk");
- if (IS_ERR(clk))
- panic("could not get timclk");
+ if (IS_ERR(clk)) {
+ pr_err("could not get timclk");
+ return PTR_ERR(clk);
+ }
irq = irq_of_parse_and_map(node, 0);
- if (irq <= 0)
- panic("Can't parse IRQ");
+ if (irq <= 0) {
+ pr_err("Can't parse IRQ");
+ return -EINVAL;
+ }
- nmdk_timer_init(base, irq, pclk, clk);
+ return nmdk_timer_init(base, irq, pclk, clk);
}
CLOCKSOURCE_OF_DECLARE(nomadik_mtu, "st,nomadik-mtu",
nmdk_timer_of_init);
diff --git a/drivers/clocksource/pxa_timer.c b/drivers/clocksource/pxa_timer.c
index 45b6a4999..3e1cb512f 100644
--- a/drivers/clocksource/pxa_timer.c
+++ b/drivers/clocksource/pxa_timer.c
@@ -21,6 +21,8 @@
#include <linux/of_irq.h>
#include <linux/sched_clock.h>
+#include <clocksource/pxa.h>
+
#include <asm/div64.h>
#define OSMR0 0x00 /* OS Timer 0 Match Register */
@@ -150,8 +152,10 @@ static struct irqaction pxa_ost0_irq = {
.dev_id = &ckevt_pxa_osmr0,
};
-static void __init pxa_timer_common_init(int irq, unsigned long clock_tick_rate)
+static int __init pxa_timer_common_init(int irq, unsigned long clock_tick_rate)
{
+ int ret;
+
timer_writel(0, OIER);
timer_writel(OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3, OSSR);
@@ -159,39 +163,57 @@ static void __init pxa_timer_common_init(int irq, unsigned long clock_tick_rate)
ckevt_pxa_osmr0.cpumask = cpumask_of(0);
- setup_irq(irq, &pxa_ost0_irq);
+ ret = setup_irq(irq, &pxa_ost0_irq);
+ if (ret) {
+ pr_err("Failed to setup irq");
+ return ret;
+ }
+
+ ret = clocksource_mmio_init(timer_base + OSCR, "oscr0", clock_tick_rate, 200,
+ 32, clocksource_mmio_readl_up);
+ if (ret) {
+ pr_err("Failed to init clocksource");
+ return ret;
+ }
- clocksource_mmio_init(timer_base + OSCR, "oscr0", clock_tick_rate, 200,
- 32, clocksource_mmio_readl_up);
clockevents_config_and_register(&ckevt_pxa_osmr0, clock_tick_rate,
MIN_OSCR_DELTA * 2, 0x7fffffff);
+
+ return 0;
}
-static void __init pxa_timer_dt_init(struct device_node *np)
+static int __init pxa_timer_dt_init(struct device_node *np)
{
struct clk *clk;
- int irq;
+ int irq, ret;
/* timer registers are shared with watchdog timer */
timer_base = of_iomap(np, 0);
- if (!timer_base)
- panic("%s: unable to map resource\n", np->name);
+ if (!timer_base) {
+ pr_err("%s: unable to map resource\n", np->name);
+ return -ENXIO;
+ }
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
pr_crit("%s: unable to get clk\n", np->name);
- return;
+ return PTR_ERR(clk);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ pr_crit("Failed to prepare clock");
+ return ret;
}
- clk_prepare_enable(clk);
/* we are only interested in OS-timer0 irq */
irq = irq_of_parse_and_map(np, 0);
if (irq <= 0) {
pr_crit("%s: unable to parse OS-timer0 irq\n", np->name);
- return;
+ return -EINVAL;
}
- pxa_timer_common_init(irq, clk_get_rate(clk));
+ return pxa_timer_common_init(irq, clk_get_rate(clk));
}
CLOCKSOURCE_OF_DECLARE(pxa_timer, "marvell,pxa-timer", pxa_timer_dt_init);
diff --git a/drivers/clocksource/qcom-timer.c b/drivers/clocksource/qcom-timer.c
index f8e09f923..3283cfa2a 100644
--- a/drivers/clocksource/qcom-timer.c
+++ b/drivers/clocksource/qcom-timer.c
@@ -105,9 +105,9 @@ static struct clocksource msm_clocksource = {
static int msm_timer_irq;
static int msm_timer_has_ppi;
-static int msm_local_timer_setup(struct clock_event_device *evt)
+static int msm_local_timer_starting_cpu(unsigned int cpu)
{
- int cpu = smp_processor_id();
+ struct clock_event_device *evt = per_cpu_ptr(msm_evt, cpu);
int err;
evt->irq = msm_timer_irq;
@@ -135,35 +135,15 @@ static int msm_local_timer_setup(struct clock_event_device *evt)
return 0;
}
-static void msm_local_timer_stop(struct clock_event_device *evt)
+static int msm_local_timer_dying_cpu(unsigned int cpu)
{
+ struct clock_event_device *evt = per_cpu_ptr(msm_evt, cpu);
+
evt->set_state_shutdown(evt);
disable_percpu_irq(evt->irq);
+ return 0;
}
-static int msm_timer_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- /*
- * Grab cpu pointer in each case to avoid spurious
- * preemptible warnings
- */
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_STARTING:
- msm_local_timer_setup(this_cpu_ptr(msm_evt));
- break;
- case CPU_DYING:
- msm_local_timer_stop(this_cpu_ptr(msm_evt));
- break;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block msm_timer_cpu_nb = {
- .notifier_call = msm_timer_cpu_notify,
-};
-
static u64 notrace msm_sched_clock_read(void)
{
return msm_clocksource.read(&msm_clocksource);
@@ -178,7 +158,7 @@ static struct delay_timer msm_delay_timer = {
.read_current_timer = msm_read_current_timer,
};
-static void __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq,
+static int __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq,
bool percpu)
{
struct clocksource *cs = &msm_clocksource;
@@ -200,14 +180,15 @@ static void __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq,
if (res) {
pr_err("request_percpu_irq failed\n");
} else {
- res = register_cpu_notifier(&msm_timer_cpu_nb);
+ /* Install and invoke hotplug callbacks */
+ res = cpuhp_setup_state(CPUHP_AP_QCOM_TIMER_STARTING,
+ "AP_QCOM_TIMER_STARTING",
+ msm_local_timer_starting_cpu,
+ msm_local_timer_dying_cpu);
if (res) {
free_percpu_irq(irq, msm_evt);
goto err;
}
-
- /* Immediately configure the timer on the boot CPU */
- msm_local_timer_setup(raw_cpu_ptr(msm_evt));
}
err:
@@ -218,12 +199,14 @@ err:
sched_clock_register(msm_sched_clock_read, sched_bits, dgt_hz);
msm_delay_timer.freq = dgt_hz;
register_current_timer_delay(&msm_delay_timer);
+
+ return res;
}
-static void __init msm_dt_timer_init(struct device_node *np)
+static int __init msm_dt_timer_init(struct device_node *np)
{
u32 freq;
- int irq;
+ int irq, ret;
struct resource res;
u32 percpu_offset;
void __iomem *base;
@@ -232,34 +215,35 @@ static void __init msm_dt_timer_init(struct device_node *np)
base = of_iomap(np, 0);
if (!base) {
pr_err("Failed to map event base\n");
- return;
+ return -ENXIO;
}
/* We use GPT0 for the clockevent */
irq = irq_of_parse_and_map(np, 1);
if (irq <= 0) {
pr_err("Can't get irq\n");
- return;
+ return -EINVAL;
}
/* We use CPU0's DGT for the clocksource */
if (of_property_read_u32(np, "cpu-offset", &percpu_offset))
percpu_offset = 0;
- if (of_address_to_resource(np, 0, &res)) {
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret) {
pr_err("Failed to parse DGT resource\n");
- return;
+ return ret;
}
cpu0_base = ioremap(res.start + percpu_offset, resource_size(&res));
if (!cpu0_base) {
pr_err("Failed to map source base\n");
- return;
+ return -EINVAL;
}
if (of_property_read_u32(np, "clock-frequency", &freq)) {
pr_err("Unknown frequency\n");
- return;
+ return -EINVAL;
}
event_base = base + 0x4;
@@ -268,7 +252,7 @@ static void __init msm_dt_timer_init(struct device_node *np)
freq /= 4;
writel_relaxed(DGT_CLK_CTL_DIV_4, source_base + DGT_CLK_CTL);
- msm_timer_init(freq, 32, irq, !!percpu_offset);
+ return msm_timer_init(freq, 32, irq, !!percpu_offset);
}
CLOCKSOURCE_OF_DECLARE(kpss_timer, "qcom,kpss-timer", msm_dt_timer_init);
CLOCKSOURCE_OF_DECLARE(scss_timer, "qcom,scss-timer", msm_dt_timer_init);
diff --git a/drivers/clocksource/rockchip_timer.c b/drivers/clocksource/rockchip_timer.c
index b991b288c..23e267acb 100644
--- a/drivers/clocksource/rockchip_timer.c
+++ b/drivers/clocksource/rockchip_timer.c
@@ -19,7 +19,8 @@
#define TIMER_LOAD_COUNT0 0x00
#define TIMER_LOAD_COUNT1 0x04
-#define TIMER_CONTROL_REG 0x10
+#define TIMER_CONTROL_REG3288 0x10
+#define TIMER_CONTROL_REG3399 0x1c
#define TIMER_INT_STATUS 0x18
#define TIMER_DISABLE 0x0
@@ -31,6 +32,7 @@
struct bc_timer {
struct clock_event_device ce;
void __iomem *base;
+ void __iomem *ctrl;
u32 freq;
};
@@ -46,15 +48,20 @@ static inline void __iomem *rk_base(struct clock_event_device *ce)
return rk_timer(ce)->base;
}
+static inline void __iomem *rk_ctrl(struct clock_event_device *ce)
+{
+ return rk_timer(ce)->ctrl;
+}
+
static inline void rk_timer_disable(struct clock_event_device *ce)
{
- writel_relaxed(TIMER_DISABLE, rk_base(ce) + TIMER_CONTROL_REG);
+ writel_relaxed(TIMER_DISABLE, rk_ctrl(ce));
}
static inline void rk_timer_enable(struct clock_event_device *ce, u32 flags)
{
writel_relaxed(TIMER_ENABLE | TIMER_INT_UNMASK | flags,
- rk_base(ce) + TIMER_CONTROL_REG);
+ rk_ctrl(ce));
}
static void rk_timer_update_counter(unsigned long cycles,
@@ -106,37 +113,42 @@ static irqreturn_t rk_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void __init rk_timer_init(struct device_node *np)
+static int __init rk_timer_init(struct device_node *np, u32 ctrl_reg)
{
struct clock_event_device *ce = &bc_timer.ce;
struct clk *timer_clk;
struct clk *pclk;
- int ret, irq;
+ int ret = -EINVAL, irq;
bc_timer.base = of_iomap(np, 0);
if (!bc_timer.base) {
pr_err("Failed to get base address for '%s'\n", TIMER_NAME);
- return;
+ return -ENXIO;
}
+ bc_timer.ctrl = bc_timer.base + ctrl_reg;
pclk = of_clk_get_by_name(np, "pclk");
if (IS_ERR(pclk)) {
+ ret = PTR_ERR(pclk);
pr_err("Failed to get pclk for '%s'\n", TIMER_NAME);
goto out_unmap;
}
- if (clk_prepare_enable(pclk)) {
+ ret = clk_prepare_enable(pclk);
+ if (ret) {
pr_err("Failed to enable pclk for '%s'\n", TIMER_NAME);
goto out_unmap;
}
timer_clk = of_clk_get_by_name(np, "timer");
if (IS_ERR(timer_clk)) {
+ ret = PTR_ERR(timer_clk);
pr_err("Failed to get timer clock for '%s'\n", TIMER_NAME);
goto out_timer_clk;
}
- if (clk_prepare_enable(timer_clk)) {
+ ret = clk_prepare_enable(timer_clk);
+ if (ret) {
pr_err("Failed to enable timer clock\n");
goto out_timer_clk;
}
@@ -145,17 +157,19 @@ static void __init rk_timer_init(struct device_node *np)
irq = irq_of_parse_and_map(np, 0);
if (!irq) {
+ ret = -EINVAL;
pr_err("Failed to map interrupts for '%s'\n", TIMER_NAME);
goto out_irq;
}
ce->name = TIMER_NAME;
- ce->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ ce->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_DYNIRQ;
ce->set_next_event = rk_timer_set_next_event;
ce->set_state_shutdown = rk_timer_shutdown;
ce->set_state_periodic = rk_timer_set_periodic;
ce->irq = irq;
- ce->cpumask = cpumask_of(0);
+ ce->cpumask = cpu_possible_mask;
ce->rating = 250;
rk_timer_interrupt_clear(ce);
@@ -169,7 +183,7 @@ static void __init rk_timer_init(struct device_node *np)
clockevents_config_and_register(ce, bc_timer.freq, 1, UINT_MAX);
- return;
+ return 0;
out_irq:
clk_disable_unprepare(timer_clk);
@@ -177,6 +191,21 @@ out_timer_clk:
clk_disable_unprepare(pclk);
out_unmap:
iounmap(bc_timer.base);
+
+ return ret;
+}
+
+static int __init rk3288_timer_init(struct device_node *np)
+{
+ return rk_timer_init(np, TIMER_CONTROL_REG3288);
+}
+
+static int __init rk3399_timer_init(struct device_node *np)
+{
+ return rk_timer_init(np, TIMER_CONTROL_REG3399);
}
-CLOCKSOURCE_OF_DECLARE(rk_timer, "rockchip,rk3288-timer", rk_timer_init);
+CLOCKSOURCE_OF_DECLARE(rk3288_timer, "rockchip,rk3288-timer",
+ rk3288_timer_init);
+CLOCKSOURCE_OF_DECLARE(rk3399_timer, "rockchip,rk3399-timer",
+ rk3399_timer_init);
diff --git a/drivers/clocksource/samsung_pwm_timer.c b/drivers/clocksource/samsung_pwm_timer.c
index 9502bc4c3..54565bd00 100644
--- a/drivers/clocksource/samsung_pwm_timer.c
+++ b/drivers/clocksource/samsung_pwm_timer.c
@@ -130,9 +130,9 @@ static void samsung_time_stop(unsigned int channel)
spin_lock_irqsave(&samsung_pwm_lock, flags);
- tcon = __raw_readl(pwm.base + REG_TCON);
+ tcon = readl_relaxed(pwm.base + REG_TCON);
tcon &= ~TCON_START(channel);
- __raw_writel(tcon, pwm.base + REG_TCON);
+ writel_relaxed(tcon, pwm.base + REG_TCON);
spin_unlock_irqrestore(&samsung_pwm_lock, flags);
}
@@ -148,14 +148,14 @@ static void samsung_time_setup(unsigned int channel, unsigned long tcnt)
spin_lock_irqsave(&samsung_pwm_lock, flags);
- tcon = __raw_readl(pwm.base + REG_TCON);
+ tcon = readl_relaxed(pwm.base + REG_TCON);
tcon &= ~(TCON_START(tcon_chan) | TCON_AUTORELOAD(tcon_chan));
tcon |= TCON_MANUALUPDATE(tcon_chan);
- __raw_writel(tcnt, pwm.base + REG_TCNTB(channel));
- __raw_writel(tcnt, pwm.base + REG_TCMPB(channel));
- __raw_writel(tcon, pwm.base + REG_TCON);
+ writel_relaxed(tcnt, pwm.base + REG_TCNTB(channel));
+ writel_relaxed(tcnt, pwm.base + REG_TCMPB(channel));
+ writel_relaxed(tcon, pwm.base + REG_TCON);
spin_unlock_irqrestore(&samsung_pwm_lock, flags);
}
@@ -170,7 +170,7 @@ static void samsung_time_start(unsigned int channel, bool periodic)
spin_lock_irqsave(&samsung_pwm_lock, flags);
- tcon = __raw_readl(pwm.base + REG_TCON);
+ tcon = readl_relaxed(pwm.base + REG_TCON);
tcon &= ~TCON_MANUALUPDATE(channel);
tcon |= TCON_START(channel);
@@ -180,7 +180,7 @@ static void samsung_time_start(unsigned int channel, bool periodic)
else
tcon &= ~TCON_AUTORELOAD(channel);
- __raw_writel(tcon, pwm.base + REG_TCON);
+ writel_relaxed(tcon, pwm.base + REG_TCON);
spin_unlock_irqrestore(&samsung_pwm_lock, flags);
}
@@ -333,11 +333,10 @@ static u64 notrace samsung_read_sched_clock(void)
return samsung_clocksource_read(NULL);
}
-static void __init samsung_clocksource_init(void)
+static int __init samsung_clocksource_init(void)
{
unsigned long pclk;
unsigned long clock_rate;
- int ret;
pclk = clk_get_rate(pwm.timerclk);
@@ -358,9 +357,7 @@ static void __init samsung_clocksource_init(void)
pwm.variant.bits, clock_rate);
samsung_clocksource.mask = CLOCKSOURCE_MASK(pwm.variant.bits);
- ret = clocksource_register_hz(&samsung_clocksource, clock_rate);
- if (ret)
- panic("samsung_clocksource_timer: can't register clocksource\n");
+ return clocksource_register_hz(&samsung_clocksource, clock_rate);
}
static void __init samsung_timer_resources(void)
@@ -380,26 +377,31 @@ static void __init samsung_timer_resources(void)
/*
* PWM master driver
*/
-static void __init _samsung_pwm_clocksource_init(void)
+static int __init _samsung_pwm_clocksource_init(void)
{
u8 mask;
int channel;
mask = ~pwm.variant.output_mask & ((1 << SAMSUNG_PWM_NUM) - 1);
channel = fls(mask) - 1;
- if (channel < 0)
- panic("failed to find PWM channel for clocksource");
+ if (channel < 0) {
+ pr_crit("failed to find PWM channel for clocksource");
+ return -EINVAL;
+ }
pwm.source_id = channel;
mask &= ~(1 << channel);
channel = fls(mask) - 1;
- if (channel < 0)
- panic("failed to find PWM channel for clock event");
+ if (channel < 0) {
+ pr_crit("failed to find PWM channel for clock event");
+ return -EINVAL;
+ }
pwm.event_id = channel;
samsung_timer_resources();
samsung_clockevent_init();
- samsung_clocksource_init();
+
+ return samsung_clocksource_init();
}
void __init samsung_pwm_clocksource_init(void __iomem *base,
@@ -417,8 +419,8 @@ void __init samsung_pwm_clocksource_init(void __iomem *base,
}
#ifdef CONFIG_CLKSRC_OF
-static void __init samsung_pwm_alloc(struct device_node *np,
- const struct samsung_pwm_variant *variant)
+static int __init samsung_pwm_alloc(struct device_node *np,
+ const struct samsung_pwm_variant *variant)
{
struct property *prop;
const __be32 *cur;
@@ -441,14 +443,16 @@ static void __init samsung_pwm_alloc(struct device_node *np,
pwm.base = of_iomap(np, 0);
if (!pwm.base) {
pr_err("%s: failed to map PWM registers\n", __func__);
- return;
+ return -ENXIO;
}
pwm.timerclk = of_clk_get_by_name(np, "timers");
- if (IS_ERR(pwm.timerclk))
- panic("failed to get timers clock for timer");
+ if (IS_ERR(pwm.timerclk)) {
+ pr_crit("failed to get timers clock for timer");
+ return PTR_ERR(pwm.timerclk);
+ }
- _samsung_pwm_clocksource_init();
+ return _samsung_pwm_clocksource_init();
}
static const struct samsung_pwm_variant s3c24xx_variant = {
@@ -458,9 +462,9 @@ static const struct samsung_pwm_variant s3c24xx_variant = {
.tclk_mask = (1 << 4),
};
-static void __init s3c2410_pwm_clocksource_init(struct device_node *np)
+static int __init s3c2410_pwm_clocksource_init(struct device_node *np)
{
- samsung_pwm_alloc(np, &s3c24xx_variant);
+ return samsung_pwm_alloc(np, &s3c24xx_variant);
}
CLOCKSOURCE_OF_DECLARE(s3c2410_pwm, "samsung,s3c2410-pwm", s3c2410_pwm_clocksource_init);
@@ -471,9 +475,9 @@ static const struct samsung_pwm_variant s3c64xx_variant = {
.tclk_mask = (1 << 7) | (1 << 6) | (1 << 5),
};
-static void __init s3c64xx_pwm_clocksource_init(struct device_node *np)
+static int __init s3c64xx_pwm_clocksource_init(struct device_node *np)
{
- samsung_pwm_alloc(np, &s3c64xx_variant);
+ return samsung_pwm_alloc(np, &s3c64xx_variant);
}
CLOCKSOURCE_OF_DECLARE(s3c6400_pwm, "samsung,s3c6400-pwm", s3c64xx_pwm_clocksource_init);
@@ -484,9 +488,9 @@ static const struct samsung_pwm_variant s5p64x0_variant = {
.tclk_mask = 0,
};
-static void __init s5p64x0_pwm_clocksource_init(struct device_node *np)
+static int __init s5p64x0_pwm_clocksource_init(struct device_node *np)
{
- samsung_pwm_alloc(np, &s5p64x0_variant);
+ return samsung_pwm_alloc(np, &s5p64x0_variant);
}
CLOCKSOURCE_OF_DECLARE(s5p6440_pwm, "samsung,s5p6440-pwm", s5p64x0_pwm_clocksource_init);
@@ -497,9 +501,9 @@ static const struct samsung_pwm_variant s5p_variant = {
.tclk_mask = (1 << 5),
};
-static void __init s5p_pwm_clocksource_init(struct device_node *np)
+static int __init s5p_pwm_clocksource_init(struct device_node *np)
{
- samsung_pwm_alloc(np, &s5p_variant);
+ return samsung_pwm_alloc(np, &s5p_variant);
}
CLOCKSOURCE_OF_DECLARE(s5pc100_pwm, "samsung,s5pc100-pwm", s5p_pwm_clocksource_init);
#endif
diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c
index e84877a2c..c83452cac 100644
--- a/drivers/clocksource/sun4i_timer.c
+++ b/drivers/clocksource/sun4i_timer.c
@@ -150,7 +150,7 @@ static u64 notrace sun4i_timer_sched_read(void)
return ~readl(timer_base + TIMER_CNTVAL_REG(1));
}
-static void __init sun4i_timer_init(struct device_node *node)
+static int __init sun4i_timer_init(struct device_node *node)
{
unsigned long rate = 0;
struct clk *clk;
@@ -158,17 +158,28 @@ static void __init sun4i_timer_init(struct device_node *node)
u32 val;
timer_base = of_iomap(node, 0);
- if (!timer_base)
- panic("Can't map registers");
+ if (!timer_base) {
+ pr_crit("Can't map registers");
+ return -ENXIO;
+ }
irq = irq_of_parse_and_map(node, 0);
- if (irq <= 0)
- panic("Can't parse IRQ");
+ if (irq <= 0) {
+ pr_crit("Can't parse IRQ");
+ return -EINVAL;
+ }
clk = of_clk_get(node, 0);
- if (IS_ERR(clk))
- panic("Can't get timer clock");
- clk_prepare_enable(clk);
+ if (IS_ERR(clk)) {
+ pr_crit("Can't get timer clock");
+ return PTR_ERR(clk);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ pr_err("Failed to prepare clock");
+ return ret;
+ }
rate = clk_get_rate(clk);
@@ -186,8 +197,12 @@ static void __init sun4i_timer_init(struct device_node *node)
of_machine_is_compatible("allwinner,sun5i-a10s"))
sched_clock_register(sun4i_timer_sched_read, 32, rate);
- clocksource_mmio_init(timer_base + TIMER_CNTVAL_REG(1), node->name,
- rate, 350, 32, clocksource_mmio_readl_down);
+ ret = clocksource_mmio_init(timer_base + TIMER_CNTVAL_REG(1), node->name,
+ rate, 350, 32, clocksource_mmio_readl_down);
+ if (ret) {
+ pr_err("Failed to register clocksource");
+ return ret;
+ }
ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
@@ -207,12 +222,16 @@ static void __init sun4i_timer_init(struct device_node *node)
TIMER_SYNC_TICKS, 0xffffffff);
ret = setup_irq(irq, &sun4i_timer_irq);
- if (ret)
- pr_warn("failed to setup irq %d\n", irq);
+ if (ret) {
+ pr_err("failed to setup irq %d\n", irq);
+ return ret;
+ }
/* Enable timer0 interrupt */
val = readl(timer_base + TIMER_IRQ_EN_REG);
writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG);
+
+ return ret;
}
CLOCKSOURCE_OF_DECLARE(sun4i, "allwinner,sun4i-a10-timer",
sun4i_timer_init);
diff --git a/drivers/clocksource/tango_xtal.c b/drivers/clocksource/tango_xtal.c
index c407c47a3..12fcef8cf 100644
--- a/drivers/clocksource/tango_xtal.c
+++ b/drivers/clocksource/tango_xtal.c
@@ -19,7 +19,7 @@ static u64 notrace read_sched_clock(void)
return read_xtal_counter();
}
-static void __init tango_clocksource_init(struct device_node *np)
+static int __init tango_clocksource_init(struct device_node *np)
{
struct clk *clk;
int xtal_freq, ret;
@@ -27,13 +27,13 @@ static void __init tango_clocksource_init(struct device_node *np)
xtal_in_cnt = of_iomap(np, 0);
if (xtal_in_cnt == NULL) {
pr_err("%s: invalid address\n", np->full_name);
- return;
+ return -ENXIO;
}
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
pr_err("%s: invalid clock\n", np->full_name);
- return;
+ return PTR_ERR(clk);
}
xtal_freq = clk_get_rate(clk);
@@ -44,11 +44,13 @@ static void __init tango_clocksource_init(struct device_node *np)
32, clocksource_mmio_readl_up);
if (ret) {
pr_err("%s: registration failed\n", np->full_name);
- return;
+ return ret;
}
sched_clock_register(read_sched_clock, 32, xtal_freq);
register_current_timer_delay(&delay_timer);
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(tango, "sigma,tick-counter", tango_clocksource_init);
diff --git a/drivers/clocksource/tegra20_timer.c b/drivers/clocksource/tegra20_timer.c
index 7b94ad2ab..f960891aa 100644
--- a/drivers/clocksource/tegra20_timer.c
+++ b/drivers/clocksource/tegra20_timer.c
@@ -165,7 +165,7 @@ static struct irqaction tegra_timer_irq = {
.dev_id = &tegra_clockevent,
};
-static void __init tegra20_init_timer(struct device_node *np)
+static int __init tegra20_init_timer(struct device_node *np)
{
struct clk *clk;
unsigned long rate;
@@ -174,13 +174,13 @@ static void __init tegra20_init_timer(struct device_node *np)
timer_reg_base = of_iomap(np, 0);
if (!timer_reg_base) {
pr_err("Can't map timer registers\n");
- BUG();
+ return -ENXIO;
}
tegra_timer_irq.irq = irq_of_parse_and_map(np, 2);
if (tegra_timer_irq.irq <= 0) {
pr_err("Failed to map timer IRQ\n");
- BUG();
+ return -EINVAL;
}
clk = of_clk_get(np, 0);
@@ -211,10 +211,12 @@ static void __init tegra20_init_timer(struct device_node *np)
sched_clock_register(tegra_read_sched_clock, 32, 1000000);
- if (clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US,
- "timer_us", 1000000, 300, 32, clocksource_mmio_readl_up)) {
+ ret = clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US,
+ "timer_us", 1000000, 300, 32,
+ clocksource_mmio_readl_up);
+ if (ret) {
pr_err("Failed to register clocksource\n");
- BUG();
+ return ret;
}
tegra_delay_timer.read_current_timer =
@@ -225,24 +227,26 @@ static void __init tegra20_init_timer(struct device_node *np)
ret = setup_irq(tegra_timer_irq.irq, &tegra_timer_irq);
if (ret) {
pr_err("Failed to register timer IRQ: %d\n", ret);
- BUG();
+ return ret;
}
tegra_clockevent.cpumask = cpu_all_mask;
tegra_clockevent.irq = tegra_timer_irq.irq;
clockevents_config_and_register(&tegra_clockevent, 1000000,
0x1, 0x1fffffff);
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(tegra20_timer, "nvidia,tegra20-timer", tegra20_init_timer);
-static void __init tegra20_init_rtc(struct device_node *np)
+static int __init tegra20_init_rtc(struct device_node *np)
{
struct clk *clk;
rtc_base = of_iomap(np, 0);
if (!rtc_base) {
pr_err("Can't map RTC registers");
- BUG();
+ return -ENXIO;
}
/*
@@ -255,6 +259,6 @@ static void __init tegra20_init_rtc(struct device_node *np)
else
clk_prepare_enable(clk);
- register_persistent_clock(NULL, tegra_read_persistent_clock64);
+ return register_persistent_clock(NULL, tegra_read_persistent_clock64);
}
CLOCKSOURCE_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc);
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
index d93ec3c4f..3c39e6f45 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -170,10 +170,10 @@ static irqreturn_t armada_370_xp_timer_interrupt(int irq, void *dev_id)
/*
* Setup the local clock events for a CPU.
*/
-static int armada_370_xp_timer_setup(struct clock_event_device *evt)
+static int armada_370_xp_timer_starting_cpu(unsigned int cpu)
{
+ struct clock_event_device *evt = per_cpu_ptr(armada_370_xp_evt, cpu);
u32 clr = 0, set = 0;
- int cpu = smp_processor_id();
if (timer25Mhz)
set = TIMER0_25MHZ;
@@ -200,35 +200,15 @@ static int armada_370_xp_timer_setup(struct clock_event_device *evt)
return 0;
}
-static void armada_370_xp_timer_stop(struct clock_event_device *evt)
+static int armada_370_xp_timer_dying_cpu(unsigned int cpu)
{
+ struct clock_event_device *evt = per_cpu_ptr(armada_370_xp_evt, cpu);
+
evt->set_state_shutdown(evt);
disable_percpu_irq(evt->irq);
+ return 0;
}
-static int armada_370_xp_timer_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- /*
- * Grab cpu pointer in each case to avoid spurious
- * preemptible warnings
- */
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_STARTING:
- armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt));
- break;
- case CPU_DYING:
- armada_370_xp_timer_stop(this_cpu_ptr(armada_370_xp_evt));
- break;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block armada_370_xp_timer_cpu_nb = {
- .notifier_call = armada_370_xp_timer_cpu_notify,
-};
-
static u32 timer0_ctrl_reg, timer0_local_ctrl_reg;
static int armada_370_xp_timer_suspend(void)
@@ -246,7 +226,7 @@ static void armada_370_xp_timer_resume(void)
writel(timer0_local_ctrl_reg, local_base + TIMER_CTRL_OFF);
}
-struct syscore_ops armada_370_xp_timer_syscore_ops = {
+static struct syscore_ops armada_370_xp_timer_syscore_ops = {
.suspend = armada_370_xp_timer_suspend,
.resume = armada_370_xp_timer_resume,
};
@@ -260,14 +240,22 @@ static struct delay_timer armada_370_delay_timer = {
.read_current_timer = armada_370_delay_timer_read,
};
-static void __init armada_370_xp_timer_common_init(struct device_node *np)
+static int __init armada_370_xp_timer_common_init(struct device_node *np)
{
u32 clr = 0, set = 0;
int res;
timer_base = of_iomap(np, 0);
- WARN_ON(!timer_base);
+ if (!timer_base) {
+ pr_err("Failed to iomap");
+ return -ENXIO;
+ }
+
local_base = of_iomap(np, 1);
+ if (!local_base) {
+ pr_err("Failed to iomap");
+ return -ENXIO;
+ }
if (timer25Mhz) {
set = TIMER0_25MHZ;
@@ -306,14 +294,17 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
*/
sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk);
- clocksource_mmio_init(timer_base + TIMER0_VAL_OFF,
- "armada_370_xp_clocksource",
- timer_clk, 300, 32, clocksource_mmio_readl_down);
-
- register_cpu_notifier(&armada_370_xp_timer_cpu_nb);
+ res = clocksource_mmio_init(timer_base + TIMER0_VAL_OFF,
+ "armada_370_xp_clocksource",
+ timer_clk, 300, 32, clocksource_mmio_readl_down);
+ if (res) {
+ pr_err("Failed to initialize clocksource mmio");
+ return res;
+ }
armada_370_xp_evt = alloc_percpu(struct clock_event_device);
-
+ if (!armada_370_xp_evt)
+ return -ENOMEM;
/*
* Setup clockevent timer (interrupt-driven).
@@ -323,33 +314,56 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
"armada_370_xp_per_cpu_tick",
armada_370_xp_evt);
/* Immediately configure the timer on the boot CPU */
- if (!res)
- armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt));
+ if (res) {
+ pr_err("Failed to request percpu irq");
+ return res;
+ }
+
+ res = cpuhp_setup_state(CPUHP_AP_ARMADA_TIMER_STARTING,
+ "AP_ARMADA_TIMER_STARTING",
+ armada_370_xp_timer_starting_cpu,
+ armada_370_xp_timer_dying_cpu);
+ if (res) {
+ pr_err("Failed to setup hotplug state and timer");
+ return res;
+ }
register_syscore_ops(&armada_370_xp_timer_syscore_ops);
+
+ return 0;
}
-static void __init armada_xp_timer_init(struct device_node *np)
+static int __init armada_xp_timer_init(struct device_node *np)
{
struct clk *clk = of_clk_get_by_name(np, "fixed");
+ int ret;
+
+ if (IS_ERR(clk)) {
+ pr_err("Failed to get clock");
+ return PTR_ERR(clk);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
- /* The 25Mhz fixed clock is mandatory, and must always be available */
- BUG_ON(IS_ERR(clk));
- clk_prepare_enable(clk);
timer_clk = clk_get_rate(clk);
- armada_370_xp_timer_common_init(np);
+ return armada_370_xp_timer_common_init(np);
}
CLOCKSOURCE_OF_DECLARE(armada_xp, "marvell,armada-xp-timer",
armada_xp_timer_init);
-static void __init armada_375_timer_init(struct device_node *np)
+static int __init armada_375_timer_init(struct device_node *np)
{
struct clk *clk;
+ int ret;
clk = of_clk_get_by_name(np, "fixed");
if (!IS_ERR(clk)) {
- clk_prepare_enable(clk);
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
timer_clk = clk_get_rate(clk);
} else {
@@ -360,27 +374,43 @@ static void __init armada_375_timer_init(struct device_node *np)
clk = of_clk_get(np, 0);
/* Must have at least a clock */
- BUG_ON(IS_ERR(clk));
- clk_prepare_enable(clk);
+ if (IS_ERR(clk)) {
+ pr_err("Failed to get clock");
+ return PTR_ERR(clk);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
timer_clk = clk_get_rate(clk) / TIMER_DIVIDER;
timer25Mhz = false;
}
- armada_370_xp_timer_common_init(np);
+ return armada_370_xp_timer_common_init(np);
}
CLOCKSOURCE_OF_DECLARE(armada_375, "marvell,armada-375-timer",
armada_375_timer_init);
-static void __init armada_370_timer_init(struct device_node *np)
+static int __init armada_370_timer_init(struct device_node *np)
{
- struct clk *clk = of_clk_get(np, 0);
+ struct clk *clk;
+ int ret;
+
+ clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ pr_err("Failed to get clock");
+ return PTR_ERR(clk);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
- BUG_ON(IS_ERR(clk));
- clk_prepare_enable(clk);
timer_clk = clk_get_rate(clk) / TIMER_DIVIDER;
timer25Mhz = false;
- armada_370_xp_timer_common_init(np);
+ return armada_370_xp_timer_common_init(np);
}
CLOCKSOURCE_OF_DECLARE(armada_370, "marvell,armada-370-timer",
armada_370_timer_init);
diff --git a/drivers/clocksource/time-efm32.c b/drivers/clocksource/time-efm32.c
index b06e4c2be..5ac344b38 100644
--- a/drivers/clocksource/time-efm32.c
+++ b/drivers/clocksource/time-efm32.c
@@ -233,10 +233,15 @@ static int __init efm32_clockevent_init(struct device_node *np)
DIV_ROUND_CLOSEST(rate, 1024),
0xf, 0xffff);
- setup_irq(irq, &efm32_clock_event_irq);
+ ret = setup_irq(irq, &efm32_clock_event_irq);
+ if (ret) {
+ pr_err("Failed setup irq");
+ goto err_setup_irq;
+ }
return 0;
+err_setup_irq:
err_get_irq:
iounmap(base);
@@ -255,16 +260,16 @@ err_clk_get:
* This function asserts that we have exactly one clocksource and one
* clock_event_device in the end.
*/
-static void __init efm32_timer_init(struct device_node *np)
+static int __init efm32_timer_init(struct device_node *np)
{
static int has_clocksource, has_clockevent;
- int ret;
+ int ret = 0;
if (!has_clocksource) {
ret = efm32_clocksource_init(np);
if (!ret) {
has_clocksource = 1;
- return;
+ return 0;
}
}
@@ -272,9 +277,11 @@ static void __init efm32_timer_init(struct device_node *np)
ret = efm32_clockevent_init(np);
if (!ret) {
has_clockevent = 1;
- return;
+ return 0;
}
}
+
+ return ret;
}
CLOCKSOURCE_OF_DECLARE(efm32compat, "efm32,timer", efm32_timer_init);
CLOCKSOURCE_OF_DECLARE(efm32, "energymicro,efm32-timer", efm32_timer_init);
diff --git a/drivers/clocksource/time-lpc32xx.c b/drivers/clocksource/time-lpc32xx.c
index daae61e8c..9649cfdb9 100644
--- a/drivers/clocksource/time-lpc32xx.c
+++ b/drivers/clocksource/time-lpc32xx.c
@@ -288,16 +288,16 @@ err_clk_enable:
* This function asserts that we have exactly one clocksource and one
* clock_event_device in the end.
*/
-static void __init lpc32xx_timer_init(struct device_node *np)
+static int __init lpc32xx_timer_init(struct device_node *np)
{
static int has_clocksource, has_clockevent;
- int ret;
+ int ret = 0;
if (!has_clocksource) {
ret = lpc32xx_clocksource_init(np);
if (!ret) {
has_clocksource = 1;
- return;
+ return 0;
}
}
@@ -305,8 +305,10 @@ static void __init lpc32xx_timer_init(struct device_node *np)
ret = lpc32xx_clockevent_init(np);
if (!ret) {
has_clockevent = 1;
- return;
+ return 0;
}
}
+
+ return ret;
}
CLOCKSOURCE_OF_DECLARE(lpc32xx_timer, "nxp,lpc3220-timer", lpc32xx_timer_init);
diff --git a/drivers/clocksource/time-orion.c b/drivers/clocksource/time-orion.c
index 0ece7427b..a28f496e9 100644
--- a/drivers/clocksource/time-orion.c
+++ b/drivers/clocksource/time-orion.c
@@ -104,25 +104,36 @@ static struct irqaction orion_clkevt_irq = {
.handler = orion_clkevt_irq_handler,
};
-static void __init orion_timer_init(struct device_node *np)
+static int __init orion_timer_init(struct device_node *np)
{
struct clk *clk;
- int irq;
+ int irq, ret;
/* timer registers are shared with watchdog timer */
timer_base = of_iomap(np, 0);
- if (!timer_base)
- panic("%s: unable to map resource\n", np->name);
+ if (!timer_base) {
+ pr_err("%s: unable to map resource\n", np->name);
+ return -ENXIO;
+ }
clk = of_clk_get(np, 0);
- if (IS_ERR(clk))
- panic("%s: unable to get clk\n", np->name);
- clk_prepare_enable(clk);
+ if (IS_ERR(clk)) {
+ pr_err("%s: unable to get clk\n", np->name);
+ return PTR_ERR(clk);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ pr_err("Failed to prepare clock");
+ return ret;
+ }
/* we are only interested in timer1 irq */
irq = irq_of_parse_and_map(np, 1);
- if (irq <= 0)
- panic("%s: unable to parse timer1 irq\n", np->name);
+ if (irq <= 0) {
+ pr_err("%s: unable to parse timer1 irq\n", np->name);
+ return -EINVAL;
+ }
/* setup timer0 as free-running clocksource */
writel(~0, timer_base + TIMER0_VAL);
@@ -130,19 +141,30 @@ static void __init orion_timer_init(struct device_node *np)
atomic_io_modify(timer_base + TIMER_CTRL,
TIMER0_RELOAD_EN | TIMER0_EN,
TIMER0_RELOAD_EN | TIMER0_EN);
- clocksource_mmio_init(timer_base + TIMER0_VAL, "orion_clocksource",
- clk_get_rate(clk), 300, 32,
- clocksource_mmio_readl_down);
+
+ ret = clocksource_mmio_init(timer_base + TIMER0_VAL, "orion_clocksource",
+ clk_get_rate(clk), 300, 32,
+ clocksource_mmio_readl_down);
+ if (ret) {
+ pr_err("Failed to initialize mmio timer");
+ return ret;
+ }
+
sched_clock_register(orion_read_sched_clock, 32, clk_get_rate(clk));
/* setup timer1 as clockevent timer */
- if (setup_irq(irq, &orion_clkevt_irq))
- panic("%s: unable to setup irq\n", np->name);
+ ret = setup_irq(irq, &orion_clkevt_irq);
+ if (ret) {
+ pr_err("%s: unable to setup irq\n", np->name);
+ return ret;
+ }
ticks_per_jiffy = (clk_get_rate(clk) + HZ/2) / HZ;
orion_clkevt.cpumask = cpumask_of(0);
orion_clkevt.irq = irq;
clockevents_config_and_register(&orion_clkevt, clk_get_rate(clk),
ORION_ONESHOT_MIN, ORION_ONESHOT_MAX);
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(orion_timer, "marvell,orion-timer", orion_timer_init);
diff --git a/drivers/clocksource/time-pistachio.c b/drivers/clocksource/time-pistachio.c
index 376e59bc5..a8e6c7df8 100644
--- a/drivers/clocksource/time-pistachio.c
+++ b/drivers/clocksource/time-pistachio.c
@@ -148,7 +148,7 @@ static struct pistachio_clocksource pcs_gpt = {
},
};
-static void __init pistachio_clksrc_of_init(struct device_node *node)
+static int __init pistachio_clksrc_of_init(struct device_node *node)
{
struct clk *sys_clk, *fast_clk;
struct regmap *periph_regs;
@@ -158,61 +158,61 @@ static void __init pistachio_clksrc_of_init(struct device_node *node)
pcs_gpt.base = of_iomap(node, 0);
if (!pcs_gpt.base) {
pr_err("cannot iomap\n");
- return;
+ return -ENXIO;
}
periph_regs = syscon_regmap_lookup_by_phandle(node, "img,cr-periph");
if (IS_ERR(periph_regs)) {
pr_err("cannot get peripheral regmap (%ld)\n",
PTR_ERR(periph_regs));
- return;
+ return PTR_ERR(periph_regs);
}
/* Switch to using the fast counter clock */
ret = regmap_update_bits(periph_regs, PERIP_TIMER_CONTROL,
0xf, 0x0);
if (ret)
- return;
+ return ret;
sys_clk = of_clk_get_by_name(node, "sys");
if (IS_ERR(sys_clk)) {
pr_err("clock get failed (%ld)\n", PTR_ERR(sys_clk));
- return;
+ return PTR_ERR(sys_clk);
}
fast_clk = of_clk_get_by_name(node, "fast");
if (IS_ERR(fast_clk)) {
pr_err("clock get failed (%lu)\n", PTR_ERR(fast_clk));
- return;
+ return PTR_ERR(fast_clk);
}
ret = clk_prepare_enable(sys_clk);
if (ret < 0) {
pr_err("failed to enable clock (%d)\n", ret);
- return;
+ return ret;
}
ret = clk_prepare_enable(fast_clk);
if (ret < 0) {
pr_err("failed to enable clock (%d)\n", ret);
clk_disable_unprepare(sys_clk);
- return;
+ return ret;
}
rate = clk_get_rate(fast_clk);
/* Disable irq's for clocksource usage */
- gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 0);
- gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 1);
- gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 2);
- gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 3);
+ gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 0);
+ gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 1);
+ gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 2);
+ gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 3);
/* Enable timer block */
writel(TIMER_ME_GLOBAL, pcs_gpt.base);
raw_spin_lock_init(&pcs_gpt.lock);
sched_clock_register(pistachio_read_sched_clock, 32, rate);
- clocksource_register_hz(&pcs_gpt.cs, rate);
+ return clocksource_register_hz(&pcs_gpt.cs, rate);
}
CLOCKSOURCE_OF_DECLARE(pistachio_gptimer, "img,pistachio-gptimer",
pistachio_clksrc_of_init);
diff --git a/drivers/clocksource/timer-atlas7.c b/drivers/clocksource/timer-atlas7.c
index 27fa13680..4334e0330 100644
--- a/drivers/clocksource/timer-atlas7.c
+++ b/drivers/clocksource/timer-atlas7.c
@@ -172,9 +172,9 @@ static struct irqaction sirfsoc_timer1_irq = {
.handler = sirfsoc_timer_interrupt,
};
-static int sirfsoc_local_timer_setup(struct clock_event_device *ce)
+static int sirfsoc_local_timer_starting_cpu(unsigned int cpu)
{
- int cpu = smp_processor_id();
+ struct clock_event_device *ce = per_cpu_ptr(sirfsoc_clockevent, cpu);
struct irqaction *action;
if (cpu == 0)
@@ -203,54 +203,31 @@ static int sirfsoc_local_timer_setup(struct clock_event_device *ce)
return 0;
}
-static void sirfsoc_local_timer_stop(struct clock_event_device *ce)
+static int sirfsoc_local_timer_dying_cpu(unsigned int cpu)
{
- int cpu = smp_processor_id();
-
sirfsoc_timer_count_disable(1);
if (cpu == 0)
remove_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq);
else
remove_irq(sirfsoc_timer1_irq.irq, &sirfsoc_timer1_irq);
+ return 0;
}
-static int sirfsoc_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- /*
- * Grab cpu pointer in each case to avoid spurious
- * preemptible warnings
- */
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_STARTING:
- sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent));
- break;
- case CPU_DYING:
- sirfsoc_local_timer_stop(this_cpu_ptr(sirfsoc_clockevent));
- break;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block sirfsoc_cpu_nb = {
- .notifier_call = sirfsoc_cpu_notify,
-};
-
-static void __init sirfsoc_clockevent_init(void)
+static int __init sirfsoc_clockevent_init(void)
{
sirfsoc_clockevent = alloc_percpu(struct clock_event_device);
BUG_ON(!sirfsoc_clockevent);
- BUG_ON(register_cpu_notifier(&sirfsoc_cpu_nb));
-
- /* Immediately configure the timer on the boot CPU */
- sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent));
+ /* Install and invoke hotplug callbacks */
+ return cpuhp_setup_state(CPUHP_AP_MARCO_TIMER_STARTING,
+ "AP_MARCO_TIMER_STARTING",
+ sirfsoc_local_timer_starting_cpu,
+ sirfsoc_local_timer_dying_cpu);
}
/* initialize the kernel jiffy timer source */
-static void __init sirfsoc_atlas7_timer_init(struct device_node *np)
+static int __init sirfsoc_atlas7_timer_init(struct device_node *np)
{
struct clk *clk;
@@ -279,23 +256,29 @@ static void __init sirfsoc_atlas7_timer_init(struct device_node *np)
BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, atlas7_timer_rate));
- sirfsoc_clockevent_init();
+ return sirfsoc_clockevent_init();
}
-static void __init sirfsoc_of_timer_init(struct device_node *np)
+static int __init sirfsoc_of_timer_init(struct device_node *np)
{
sirfsoc_timer_base = of_iomap(np, 0);
- if (!sirfsoc_timer_base)
- panic("unable to map timer cpu registers\n");
+ if (!sirfsoc_timer_base) {
+ pr_err("unable to map timer cpu registers\n");
+ return -ENXIO;
+ }
sirfsoc_timer_irq.irq = irq_of_parse_and_map(np, 0);
- if (!sirfsoc_timer_irq.irq)
- panic("No irq passed for timer0 via DT\n");
+ if (!sirfsoc_timer_irq.irq) {
+ pr_err("No irq passed for timer0 via DT\n");
+ return -EINVAL;
+ }
sirfsoc_timer1_irq.irq = irq_of_parse_and_map(np, 1);
- if (!sirfsoc_timer1_irq.irq)
- panic("No irq passed for timer1 via DT\n");
+ if (!sirfsoc_timer1_irq.irq) {
+ pr_err("No irq passed for timer1 via DT\n");
+ return -EINVAL;
+ }
- sirfsoc_atlas7_timer_init(np);
+ return sirfsoc_atlas7_timer_init(np);
}
CLOCKSOURCE_OF_DECLARE(sirfsoc_atlas7_timer, "sirf,atlas7-tick", sirfsoc_of_timer_init);
diff --git a/drivers/clocksource/timer-atmel-pit.c b/drivers/clocksource/timer-atmel-pit.c
index d911c5dca..7f0f5b26d 100644
--- a/drivers/clocksource/timer-atmel-pit.c
+++ b/drivers/clocksource/timer-atmel-pit.c
@@ -177,7 +177,7 @@ static irqreturn_t at91sam926x_pit_interrupt(int irq, void *dev_id)
/*
* Set up both clocksource and clockevent support.
*/
-static void __init at91sam926x_pit_common_init(struct pit_data *data)
+static int __init at91sam926x_pit_common_init(struct pit_data *data)
{
unsigned long pit_rate;
unsigned bits;
@@ -204,14 +204,21 @@ static void __init at91sam926x_pit_common_init(struct pit_data *data)
data->clksrc.rating = 175;
data->clksrc.read = read_pit_clk;
data->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
- clocksource_register_hz(&data->clksrc, pit_rate);
+
+ ret = clocksource_register_hz(&data->clksrc, pit_rate);
+ if (ret) {
+ pr_err("Failed to register clocksource");
+ return ret;
+ }
/* Set up irq handler */
ret = request_irq(data->irq, at91sam926x_pit_interrupt,
IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
"at91_tick", data);
- if (ret)
- panic(pr_fmt("Unable to setup IRQ\n"));
+ if (ret) {
+ pr_err("Unable to setup IRQ\n");
+ return ret;
+ }
/* Set up and register clockevents */
data->clkevt.name = "pit";
@@ -226,34 +233,49 @@ static void __init at91sam926x_pit_common_init(struct pit_data *data)
data->clkevt.resume = at91sam926x_pit_resume;
data->clkevt.suspend = at91sam926x_pit_suspend;
clockevents_register_device(&data->clkevt);
+
+ return 0;
}
-static void __init at91sam926x_pit_dt_init(struct device_node *node)
+static int __init at91sam926x_pit_dt_init(struct device_node *node)
{
struct pit_data *data;
+ int ret;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
- panic(pr_fmt("Unable to allocate memory\n"));
+ return -ENOMEM;
data->base = of_iomap(node, 0);
- if (!data->base)
- panic(pr_fmt("Could not map PIT address\n"));
+ if (!data->base) {
+ pr_err("Could not map PIT address\n");
+ return -ENXIO;
+ }
data->mck = of_clk_get(node, 0);
if (IS_ERR(data->mck))
/* Fallback on clkdev for !CCF-based boards */
data->mck = clk_get(NULL, "mck");
- if (IS_ERR(data->mck))
- panic(pr_fmt("Unable to get mck clk\n"));
+ if (IS_ERR(data->mck)) {
+ pr_err("Unable to get mck clk\n");
+ return PTR_ERR(data->mck);
+ }
+
+ ret = clk_prepare_enable(data->mck);
+ if (ret) {
+ pr_err("Unable to enable mck\n");
+ return ret;
+ }
/* Get the interrupts property */
data->irq = irq_of_parse_and_map(node, 0);
- if (!data->irq)
- panic(pr_fmt("Unable to get IRQ from DT\n"));
+ if (!data->irq) {
+ pr_err("Unable to get IRQ from DT\n");
+ return -EINVAL;
+ }
- at91sam926x_pit_common_init(data);
+ return at91sam926x_pit_common_init(data);
}
CLOCKSOURCE_OF_DECLARE(at91sam926x_pit, "atmel,at91sam9260-pit",
at91sam926x_pit_dt_init);
diff --git a/drivers/clocksource/timer-atmel-st.c b/drivers/clocksource/timer-atmel-st.c
index 29d21d68d..e90ab5b63 100644
--- a/drivers/clocksource/timer-atmel-st.c
+++ b/drivers/clocksource/timer-atmel-st.c
@@ -194,15 +194,17 @@ static struct clock_event_device clkevt = {
/*
* ST (system timer) module supports both clockevents and clocksource.
*/
-static void __init atmel_st_timer_init(struct device_node *node)
+static int __init atmel_st_timer_init(struct device_node *node)
{
struct clk *sclk;
unsigned int sclk_rate, val;
int irq, ret;
regmap_st = syscon_node_to_regmap(node);
- if (IS_ERR(regmap_st))
- panic(pr_fmt("Unable to get regmap\n"));
+ if (IS_ERR(regmap_st)) {
+ pr_err("Unable to get regmap\n");
+ return PTR_ERR(regmap_st);
+ }
/* Disable all timer interrupts, and clear any pending ones */
regmap_write(regmap_st, AT91_ST_IDR,
@@ -211,27 +213,37 @@ static void __init atmel_st_timer_init(struct device_node *node)
/* Get the interrupts property */
irq = irq_of_parse_and_map(node, 0);
- if (!irq)
- panic(pr_fmt("Unable to get IRQ from DT\n"));
+ if (!irq) {
+ pr_err("Unable to get IRQ from DT\n");
+ return -EINVAL;
+ }
/* Make IRQs happen for the system timer */
ret = request_irq(irq, at91rm9200_timer_interrupt,
IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
"at91_tick", regmap_st);
- if (ret)
- panic(pr_fmt("Unable to setup IRQ\n"));
+ if (ret) {
+ pr_err("Unable to setup IRQ\n");
+ return ret;
+ }
sclk = of_clk_get(node, 0);
- if (IS_ERR(sclk))
- panic(pr_fmt("Unable to get slow clock\n"));
+ if (IS_ERR(sclk)) {
+ pr_err("Unable to get slow clock\n");
+ return PTR_ERR(sclk);
+ }
- clk_prepare_enable(sclk);
- if (ret)
- panic(pr_fmt("Could not enable slow clock\n"));
+ ret = clk_prepare_enable(sclk);
+ if (ret) {
+ pr_err("Could not enable slow clock\n");
+ return ret;
+ }
sclk_rate = clk_get_rate(sclk);
- if (!sclk_rate)
- panic(pr_fmt("Invalid slow clock rate\n"));
+ if (!sclk_rate) {
+ pr_err("Invalid slow clock rate\n");
+ return -EINVAL;
+ }
timer_latch = (sclk_rate + HZ / 2) / HZ;
/* The 32KiHz "Slow Clock" (tick every 30517.58 nanoseconds) is used
@@ -246,7 +258,7 @@ static void __init atmel_st_timer_init(struct device_node *node)
2, AT91_ST_ALMV);
/* register clocksource */
- clocksource_register_hz(&clk32k, sclk_rate);
+ return clocksource_register_hz(&clk32k, sclk_rate);
}
CLOCKSOURCE_OF_DECLARE(atmel_st_timer, "atmel,at91rm9200-st",
atmel_st_timer_init);
diff --git a/drivers/clocksource/timer-digicolor.c b/drivers/clocksource/timer-digicolor.c
index a536eeb63..10318cc99 100644
--- a/drivers/clocksource/timer-digicolor.c
+++ b/drivers/clocksource/timer-digicolor.c
@@ -63,7 +63,7 @@ struct digicolor_timer {
int timer_id; /* one of TIMER_* */
};
-struct digicolor_timer *dc_timer(struct clock_event_device *ce)
+static struct digicolor_timer *dc_timer(struct clock_event_device *ce)
{
return container_of(ce, struct digicolor_timer, ce);
}
@@ -148,7 +148,7 @@ static u64 notrace digicolor_timer_sched_read(void)
return ~readl(dc_timer_dev.base + COUNT(TIMER_B));
}
-static void __init digicolor_timer_init(struct device_node *node)
+static int __init digicolor_timer_init(struct device_node *node)
{
unsigned long rate;
struct clk *clk;
@@ -161,19 +161,19 @@ static void __init digicolor_timer_init(struct device_node *node)
dc_timer_dev.base = of_iomap(node, 0);
if (!dc_timer_dev.base) {
pr_err("Can't map registers");
- return;
+ return -ENXIO;
}
irq = irq_of_parse_and_map(node, dc_timer_dev.timer_id);
if (irq <= 0) {
pr_err("Can't parse IRQ");
- return;
+ return -EINVAL;
}
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
pr_err("Can't get timer clock");
- return;
+ return PTR_ERR(clk);
}
clk_prepare_enable(clk);
rate = clk_get_rate(clk);
@@ -190,13 +190,17 @@ static void __init digicolor_timer_init(struct device_node *node)
ret = request_irq(irq, digicolor_timer_interrupt,
IRQF_TIMER | IRQF_IRQPOLL, "digicolor_timerC",
&dc_timer_dev.ce);
- if (ret)
+ if (ret) {
pr_warn("request of timer irq %d failed (%d)\n", irq, ret);
+ return ret;
+ }
dc_timer_dev.ce.cpumask = cpu_possible_mask;
dc_timer_dev.ce.irq = irq;
clockevents_config_and_register(&dc_timer_dev.ce, rate, 0, 0xffffffff);
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(conexant_digicolor, "cnxt,cx92755-timer",
digicolor_timer_init);
diff --git a/drivers/clocksource/timer-imx-gpt.c b/drivers/clocksource/timer-imx-gpt.c
index 99ec96769..f595460bf 100644
--- a/drivers/clocksource/timer-imx-gpt.c
+++ b/drivers/clocksource/timer-imx-gpt.c
@@ -407,8 +407,10 @@ static const struct imx_gpt_data imx6dl_gpt_data = {
.set_next_event = v2_set_next_event,
};
-static void __init _mxc_timer_init(struct imx_timer *imxtm)
+static int __init _mxc_timer_init(struct imx_timer *imxtm)
{
+ int ret;
+
switch (imxtm->type) {
case GPT_TYPE_IMX1:
imxtm->gpt = &imx1_gpt_data;
@@ -423,12 +425,12 @@ static void __init _mxc_timer_init(struct imx_timer *imxtm)
imxtm->gpt = &imx6dl_gpt_data;
break;
default:
- BUG();
+ return -EINVAL;
}
if (IS_ERR(imxtm->clk_per)) {
pr_err("i.MX timer: unable to get clk\n");
- return;
+ return PTR_ERR(imxtm->clk_per);
}
if (!IS_ERR(imxtm->clk_ipg))
@@ -446,8 +448,11 @@ static void __init _mxc_timer_init(struct imx_timer *imxtm)
imxtm->gpt->gpt_setup_tctl(imxtm);
/* init and register the timer to the framework */
- mxc_clocksource_init(imxtm);
- mxc_clockevent_init(imxtm);
+ ret = mxc_clocksource_init(imxtm);
+ if (ret)
+ return ret;
+
+ return mxc_clockevent_init(imxtm);
}
void __init mxc_timer_init(unsigned long pbase, int irq, enum imx_gpt_type type)
@@ -469,21 +474,27 @@ void __init mxc_timer_init(unsigned long pbase, int irq, enum imx_gpt_type type)
_mxc_timer_init(imxtm);
}
-static void __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type type)
+static int __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type type)
{
struct imx_timer *imxtm;
static int initialized;
+ int ret;
/* Support one instance only */
if (initialized)
- return;
+ return 0;
imxtm = kzalloc(sizeof(*imxtm), GFP_KERNEL);
- BUG_ON(!imxtm);
+ if (!imxtm)
+ return -ENOMEM;
imxtm->base = of_iomap(np, 0);
- WARN_ON(!imxtm->base);
+ if (!imxtm->base)
+ return -ENXIO;
+
imxtm->irq = irq_of_parse_and_map(np, 0);
+ if (imxtm->irq <= 0)
+ return -EINVAL;
imxtm->clk_ipg = of_clk_get_by_name(np, "ipg");
@@ -494,22 +505,26 @@ static void __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type
imxtm->type = type;
- _mxc_timer_init(imxtm);
+ ret = _mxc_timer_init(imxtm);
+ if (ret)
+ return ret;
initialized = 1;
+
+ return 0;
}
-static void __init imx1_timer_init_dt(struct device_node *np)
+static int __init imx1_timer_init_dt(struct device_node *np)
{
- mxc_timer_init_dt(np, GPT_TYPE_IMX1);
+ return mxc_timer_init_dt(np, GPT_TYPE_IMX1);
}
-static void __init imx21_timer_init_dt(struct device_node *np)
+static int __init imx21_timer_init_dt(struct device_node *np)
{
- mxc_timer_init_dt(np, GPT_TYPE_IMX21);
+ return mxc_timer_init_dt(np, GPT_TYPE_IMX21);
}
-static void __init imx31_timer_init_dt(struct device_node *np)
+static int __init imx31_timer_init_dt(struct device_node *np)
{
enum imx_gpt_type type = GPT_TYPE_IMX31;
@@ -522,12 +537,12 @@ static void __init imx31_timer_init_dt(struct device_node *np)
if (of_machine_is_compatible("fsl,imx6dl"))
type = GPT_TYPE_IMX6DL;
- mxc_timer_init_dt(np, type);
+ return mxc_timer_init_dt(np, type);
}
-static void __init imx6dl_timer_init_dt(struct device_node *np)
+static int __init imx6dl_timer_init_dt(struct device_node *np)
{
- mxc_timer_init_dt(np, GPT_TYPE_IMX6DL);
+ return mxc_timer_init_dt(np, GPT_TYPE_IMX6DL);
}
CLOCKSOURCE_OF_DECLARE(imx1_timer, "fsl,imx1-gpt", imx1_timer_init_dt);
diff --git a/drivers/clocksource/timer-integrator-ap.c b/drivers/clocksource/timer-integrator-ap.c
index 3f59ac218..df6e672af 100644
--- a/drivers/clocksource/timer-integrator-ap.c
+++ b/drivers/clocksource/timer-integrator-ap.c
@@ -36,11 +36,12 @@ static u64 notrace integrator_read_sched_clock(void)
return -readl(sched_clk_base + TIMER_VALUE);
}
-static void integrator_clocksource_init(unsigned long inrate,
- void __iomem *base)
+static int integrator_clocksource_init(unsigned long inrate,
+ void __iomem *base)
{
u32 ctrl = TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC;
unsigned long rate = inrate;
+ int ret;
if (rate >= 1500000) {
rate /= 16;
@@ -50,11 +51,15 @@ static void integrator_clocksource_init(unsigned long inrate,
writel(0xffff, base + TIMER_LOAD);
writel(ctrl, base + TIMER_CTRL);
- clocksource_mmio_init(base + TIMER_VALUE, "timer2",
- rate, 200, 16, clocksource_mmio_readl_down);
+ ret = clocksource_mmio_init(base + TIMER_VALUE, "timer2",
+ rate, 200, 16, clocksource_mmio_readl_down);
+ if (ret)
+ return ret;
sched_clk_base = base;
sched_clock_register(integrator_read_sched_clock, 16, rate);
+
+ return 0;
}
static unsigned long timer_reload;
@@ -138,11 +143,12 @@ static struct irqaction integrator_timer_irq = {
.dev_id = &integrator_clockevent,
};
-static void integrator_clockevent_init(unsigned long inrate,
- void __iomem *base, int irq)
+static int integrator_clockevent_init(unsigned long inrate,
+ void __iomem *base, int irq)
{
unsigned long rate = inrate;
unsigned int ctrl = 0;
+ int ret;
clkevt_base = base;
/* Calculate and program a divisor */
@@ -156,14 +162,18 @@ static void integrator_clockevent_init(unsigned long inrate,
timer_reload = rate / HZ;
writel(ctrl, clkevt_base + TIMER_CTRL);
- setup_irq(irq, &integrator_timer_irq);
+ ret = setup_irq(irq, &integrator_timer_irq);
+ if (ret)
+ return ret;
+
clockevents_config_and_register(&integrator_clockevent,
rate,
1,
0xffffU);
+ return 0;
}
-static void __init integrator_ap_timer_init_of(struct device_node *node)
+static int __init integrator_ap_timer_init_of(struct device_node *node)
{
const char *path;
void __iomem *base;
@@ -176,12 +186,12 @@ static void __init integrator_ap_timer_init_of(struct device_node *node)
base = of_io_request_and_map(node, 0, "integrator-timer");
if (IS_ERR(base))
- return;
+ return PTR_ERR(base);
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
pr_err("No clock for %s\n", node->name);
- return;
+ return PTR_ERR(clk);
}
clk_prepare_enable(clk);
rate = clk_get_rate(clk);
@@ -189,30 +199,37 @@ static void __init integrator_ap_timer_init_of(struct device_node *node)
err = of_property_read_string(of_aliases,
"arm,timer-primary", &path);
- if (WARN_ON(err))
- return;
+ if (err) {
+ pr_warn("Failed to read property");
+ return err;
+ }
+
pri_node = of_find_node_by_path(path);
+
err = of_property_read_string(of_aliases,
"arm,timer-secondary", &path);
- if (WARN_ON(err))
- return;
+ if (err) {
+ pr_warn("Failed to read property");
+ return err;
+ }
+
+
sec_node = of_find_node_by_path(path);
- if (node == pri_node) {
+ if (node == pri_node)
/* The primary timer lacks IRQ, use as clocksource */
- integrator_clocksource_init(rate, base);
- return;
- }
+ return integrator_clocksource_init(rate, base);
if (node == sec_node) {
/* The secondary timer will drive the clock event */
irq = irq_of_parse_and_map(node, 0);
- integrator_clockevent_init(rate, base, irq);
- return;
+ return integrator_clockevent_init(rate, base, irq);
}
pr_info("Timer @%p unused\n", base);
clk_disable_unprepare(clk);
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(integrator_ap_timer, "arm,integrator-timer",
diff --git a/drivers/clocksource/timer-keystone.c b/drivers/clocksource/timer-keystone.c
index 1cea08cf6..ab68a47ab 100644
--- a/drivers/clocksource/timer-keystone.c
+++ b/drivers/clocksource/timer-keystone.c
@@ -144,7 +144,7 @@ static int keystone_set_periodic(struct clock_event_device *evt)
return 0;
}
-static void __init keystone_timer_init(struct device_node *np)
+static int __init keystone_timer_init(struct device_node *np)
{
struct clock_event_device *event_dev = &timer.event_dev;
unsigned long rate;
@@ -154,20 +154,20 @@ static void __init keystone_timer_init(struct device_node *np)
irq = irq_of_parse_and_map(np, 0);
if (!irq) {
pr_err("%s: failed to map interrupts\n", __func__);
- return;
+ return -EINVAL;
}
timer.base = of_iomap(np, 0);
if (!timer.base) {
pr_err("%s: failed to map registers\n", __func__);
- return;
+ return -ENXIO;
}
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
pr_err("%s: failed to get clock\n", __func__);
iounmap(timer.base);
- return;
+ return PTR_ERR(clk);
}
error = clk_prepare_enable(clk);
@@ -219,11 +219,12 @@ static void __init keystone_timer_init(struct device_node *np)
clockevents_config_and_register(event_dev, rate, 1, ULONG_MAX);
pr_info("keystone timer clock @%lu Hz\n", rate);
- return;
+ return 0;
err:
clk_put(clk);
iounmap(timer.base);
+ return error;
}
CLOCKSOURCE_OF_DECLARE(keystone_timer, "ti,keystone-timer",
- keystone_timer_init);
+ keystone_timer_init);
diff --git a/drivers/clocksource/timer-nps.c b/drivers/clocksource/timer-nps.c
index d46108920..70c149af8 100644
--- a/drivers/clocksource/timer-nps.c
+++ b/drivers/clocksource/timer-nps.c
@@ -55,8 +55,8 @@ static cycle_t nps_clksrc_read(struct clocksource *clksrc)
return (cycle_t)ioread32be(nps_msu_reg_low_addr[cluster]);
}
-static void __init nps_setup_clocksource(struct device_node *node,
- struct clk *clk)
+static int __init nps_setup_clocksource(struct device_node *node,
+ struct clk *clk)
{
int ret, cluster;
@@ -68,7 +68,7 @@ static void __init nps_setup_clocksource(struct device_node *node,
ret = clk_prepare_enable(clk);
if (ret) {
pr_err("Couldn't enable parent clock\n");
- return;
+ return ret;
}
nps_timer_rate = clk_get_rate(clk);
@@ -79,19 +79,21 @@ static void __init nps_setup_clocksource(struct device_node *node,
pr_err("Couldn't register clock source.\n");
clk_disable_unprepare(clk);
}
+
+ return ret;
}
-static void __init nps_timer_init(struct device_node *node)
+static int __init nps_timer_init(struct device_node *node)
{
struct clk *clk;
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
pr_err("Can't get timer clock.\n");
- return;
+ return PTR_ERR(clk);
}
- nps_setup_clocksource(node, clk);
+ return nps_setup_clocksource(node, clk);
}
CLOCKSOURCE_OF_DECLARE(ezchip_nps400_clksrc, "ezchip,nps400-timer",
diff --git a/drivers/clocksource/timer-oxnas-rps.c b/drivers/clocksource/timer-oxnas-rps.c
new file mode 100644
index 000000000..bd887e2a8
--- /dev/null
+++ b/drivers/clocksource/timer-oxnas-rps.c
@@ -0,0 +1,297 @@
+/*
+ * drivers/clocksource/timer-oxnas-rps.c
+ *
+ * Copyright (C) 2009 Oxford Semiconductor Ltd
+ * Copyright (C) 2013 Ma Haijun <mahaijuns@gmail.com>
+ * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/clockchips.h>
+#include <linux/sched_clock.h>
+
+/* TIMER1 used as tick
+ * TIMER2 used as clocksource
+ */
+
+/* Registers definitions */
+
+#define TIMER_LOAD_REG 0x0
+#define TIMER_CURR_REG 0x4
+#define TIMER_CTRL_REG 0x8
+#define TIMER_CLRINT_REG 0xC
+
+#define TIMER_BITS 24
+
+#define TIMER_MAX_VAL (BIT(TIMER_BITS) - 1)
+
+#define TIMER_PERIODIC BIT(6)
+#define TIMER_ENABLE BIT(7)
+
+#define TIMER_DIV1 (0)
+#define TIMER_DIV16 (1 << 2)
+#define TIMER_DIV256 (2 << 2)
+
+#define TIMER1_REG_OFFSET 0
+#define TIMER2_REG_OFFSET 0x20
+
+/* Clockevent & Clocksource data */
+
+struct oxnas_rps_timer {
+ struct clock_event_device clkevent;
+ void __iomem *clksrc_base;
+ void __iomem *clkevt_base;
+ unsigned long timer_period;
+ unsigned int timer_prescaler;
+ struct clk *clk;
+ int irq;
+};
+
+static irqreturn_t oxnas_rps_timer_irq(int irq, void *dev_id)
+{
+ struct oxnas_rps_timer *rps = dev_id;
+
+ writel_relaxed(0, rps->clkevt_base + TIMER_CLRINT_REG);
+
+ rps->clkevent.event_handler(&rps->clkevent);
+
+ return IRQ_HANDLED;
+}
+
+static void oxnas_rps_timer_config(struct oxnas_rps_timer *rps,
+ unsigned long period,
+ unsigned int periodic)
+{
+ uint32_t cfg = rps->timer_prescaler;
+
+ if (period)
+ cfg |= TIMER_ENABLE;
+
+ if (periodic)
+ cfg |= TIMER_PERIODIC;
+
+ writel_relaxed(period, rps->clkevt_base + TIMER_LOAD_REG);
+ writel_relaxed(cfg, rps->clkevt_base + TIMER_CTRL_REG);
+}
+
+static int oxnas_rps_timer_shutdown(struct clock_event_device *evt)
+{
+ struct oxnas_rps_timer *rps =
+ container_of(evt, struct oxnas_rps_timer, clkevent);
+
+ oxnas_rps_timer_config(rps, 0, 0);
+
+ return 0;
+}
+
+static int oxnas_rps_timer_set_periodic(struct clock_event_device *evt)
+{
+ struct oxnas_rps_timer *rps =
+ container_of(evt, struct oxnas_rps_timer, clkevent);
+
+ oxnas_rps_timer_config(rps, rps->timer_period, 1);
+
+ return 0;
+}
+
+static int oxnas_rps_timer_set_oneshot(struct clock_event_device *evt)
+{
+ struct oxnas_rps_timer *rps =
+ container_of(evt, struct oxnas_rps_timer, clkevent);
+
+ oxnas_rps_timer_config(rps, rps->timer_period, 0);
+
+ return 0;
+}
+
+static int oxnas_rps_timer_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ struct oxnas_rps_timer *rps =
+ container_of(evt, struct oxnas_rps_timer, clkevent);
+
+ oxnas_rps_timer_config(rps, delta, 0);
+
+ return 0;
+}
+
+static int __init oxnas_rps_clockevent_init(struct oxnas_rps_timer *rps)
+{
+ ulong clk_rate = clk_get_rate(rps->clk);
+ ulong timer_rate;
+
+ /* Start with prescaler 1 */
+ rps->timer_prescaler = TIMER_DIV1;
+ rps->timer_period = DIV_ROUND_UP(clk_rate, HZ);
+ timer_rate = clk_rate;
+
+ if (rps->timer_period > TIMER_MAX_VAL) {
+ rps->timer_prescaler = TIMER_DIV16;
+ timer_rate = clk_rate / 16;
+ rps->timer_period = DIV_ROUND_UP(timer_rate, HZ);
+ }
+ if (rps->timer_period > TIMER_MAX_VAL) {
+ rps->timer_prescaler = TIMER_DIV256;
+ timer_rate = clk_rate / 256;
+ rps->timer_period = DIV_ROUND_UP(timer_rate, HZ);
+ }
+
+ rps->clkevent.name = "oxnas-rps";
+ rps->clkevent.features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_DYNIRQ;
+ rps->clkevent.tick_resume = oxnas_rps_timer_shutdown;
+ rps->clkevent.set_state_shutdown = oxnas_rps_timer_shutdown;
+ rps->clkevent.set_state_periodic = oxnas_rps_timer_set_periodic;
+ rps->clkevent.set_state_oneshot = oxnas_rps_timer_set_oneshot;
+ rps->clkevent.set_next_event = oxnas_rps_timer_next_event;
+ rps->clkevent.rating = 200;
+ rps->clkevent.cpumask = cpu_possible_mask;
+ rps->clkevent.irq = rps->irq;
+ clockevents_config_and_register(&rps->clkevent,
+ timer_rate,
+ 1,
+ TIMER_MAX_VAL);
+
+ pr_info("Registered clock event rate %luHz prescaler %x period %lu\n",
+ clk_rate,
+ rps->timer_prescaler,
+ rps->timer_period);
+
+ return 0;
+}
+
+/* Clocksource */
+
+static void __iomem *timer_sched_base;
+
+static u64 notrace oxnas_rps_read_sched_clock(void)
+{
+ return ~readl_relaxed(timer_sched_base);
+}
+
+static int __init oxnas_rps_clocksource_init(struct oxnas_rps_timer *rps)
+{
+ ulong clk_rate = clk_get_rate(rps->clk);
+ int ret;
+
+ /* use prescale 16 */
+ clk_rate = clk_rate / 16;
+
+ writel_relaxed(TIMER_MAX_VAL, rps->clksrc_base + TIMER_LOAD_REG);
+ writel_relaxed(TIMER_PERIODIC | TIMER_ENABLE | TIMER_DIV16,
+ rps->clksrc_base + TIMER_CTRL_REG);
+
+ timer_sched_base = rps->clksrc_base + TIMER_CURR_REG;
+ sched_clock_register(oxnas_rps_read_sched_clock,
+ TIMER_BITS, clk_rate);
+ ret = clocksource_mmio_init(timer_sched_base,
+ "oxnas_rps_clocksource_timer",
+ clk_rate, 250, TIMER_BITS,
+ clocksource_mmio_readl_down);
+ if (WARN_ON(ret)) {
+ pr_err("can't register clocksource\n");
+ return ret;
+ }
+
+ pr_info("Registered clocksource rate %luHz\n", clk_rate);
+
+ return 0;
+}
+
+static int __init oxnas_rps_timer_init(struct device_node *np)
+{
+ struct oxnas_rps_timer *rps;
+ void __iomem *base;
+ int ret;
+
+ rps = kzalloc(sizeof(*rps), GFP_KERNEL);
+ if (!rps)
+ return -ENOMEM;
+
+ rps->clk = of_clk_get(np, 0);
+ if (IS_ERR(rps->clk)) {
+ ret = PTR_ERR(rps->clk);
+ goto err_alloc;
+ }
+
+ ret = clk_prepare_enable(rps->clk);
+ if (ret)
+ goto err_clk;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ ret = -ENXIO;
+ goto err_clk_prepare;
+ }
+
+ rps->irq = irq_of_parse_and_map(np, 0);
+ if (rps->irq < 0) {
+ ret = -EINVAL;
+ goto err_iomap;
+ }
+
+ rps->clkevt_base = base + TIMER1_REG_OFFSET;
+ rps->clksrc_base = base + TIMER2_REG_OFFSET;
+
+ /* Disable timers */
+ writel_relaxed(0, rps->clkevt_base + TIMER_CTRL_REG);
+ writel_relaxed(0, rps->clksrc_base + TIMER_CTRL_REG);
+ writel_relaxed(0, rps->clkevt_base + TIMER_LOAD_REG);
+ writel_relaxed(0, rps->clksrc_base + TIMER_LOAD_REG);
+ writel_relaxed(0, rps->clkevt_base + TIMER_CLRINT_REG);
+ writel_relaxed(0, rps->clksrc_base + TIMER_CLRINT_REG);
+
+ ret = request_irq(rps->irq, oxnas_rps_timer_irq,
+ IRQF_TIMER | IRQF_IRQPOLL,
+ "rps-timer", rps);
+ if (ret)
+ goto err_iomap;
+
+ ret = oxnas_rps_clocksource_init(rps);
+ if (ret)
+ goto err_irqreq;
+
+ ret = oxnas_rps_clockevent_init(rps);
+ if (ret)
+ goto err_irqreq;
+
+ return 0;
+
+err_irqreq:
+ free_irq(rps->irq, rps);
+err_iomap:
+ iounmap(base);
+err_clk_prepare:
+ clk_disable_unprepare(rps->clk);
+err_clk:
+ clk_put(rps->clk);
+err_alloc:
+ kfree(rps);
+
+ return ret;
+}
+
+CLOCKSOURCE_OF_DECLARE(ox810se_rps,
+ "oxsemi,ox810se-rps-timer", oxnas_rps_timer_init);
diff --git a/drivers/clocksource/timer-prima2.c b/drivers/clocksource/timer-prima2.c
index 2854c663e..c32148ec7 100644
--- a/drivers/clocksource/timer-prima2.c
+++ b/drivers/clocksource/timer-prima2.c
@@ -19,7 +19,6 @@
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/sched_clock.h>
-#include <asm/mach/time.h>
#define PRIMA2_CLOCK_FREQ 1000000
@@ -189,24 +188,36 @@ static void __init sirfsoc_clockevent_init(void)
}
/* initialize the kernel jiffy timer source */
-static void __init sirfsoc_prima2_timer_init(struct device_node *np)
+static int __init sirfsoc_prima2_timer_init(struct device_node *np)
{
unsigned long rate;
struct clk *clk;
+ int ret;
clk = of_clk_get(np, 0);
- BUG_ON(IS_ERR(clk));
+ if (IS_ERR(clk)) {
+ pr_err("Failed to get clock");
+ return PTR_ERR(clk);
+ }
- BUG_ON(clk_prepare_enable(clk));
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ pr_err("Failed to enable clock");
+ return ret;
+ }
rate = clk_get_rate(clk);
- BUG_ON(rate < PRIMA2_CLOCK_FREQ);
- BUG_ON(rate % PRIMA2_CLOCK_FREQ);
+ if (rate < PRIMA2_CLOCK_FREQ || rate % PRIMA2_CLOCK_FREQ) {
+ pr_err("Invalid clock rate");
+ return -EINVAL;
+ }
sirfsoc_timer_base = of_iomap(np, 0);
- if (!sirfsoc_timer_base)
- panic("unable to map timer cpu registers\n");
+ if (!sirfsoc_timer_base) {
+ pr_err("unable to map timer cpu registers\n");
+ return -ENXIO;
+ }
sirfsoc_timer_irq.irq = irq_of_parse_and_map(np, 0);
@@ -216,14 +227,23 @@ static void __init sirfsoc_prima2_timer_init(struct device_node *np)
writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_HI);
writel_relaxed(BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_STATUS);
- BUG_ON(clocksource_register_hz(&sirfsoc_clocksource,
- PRIMA2_CLOCK_FREQ));
+ ret = clocksource_register_hz(&sirfsoc_clocksource, PRIMA2_CLOCK_FREQ);
+ if (ret) {
+ pr_err("Failed to register clocksource");
+ return ret;
+ }
sched_clock_register(sirfsoc_read_sched_clock, 64, PRIMA2_CLOCK_FREQ);
- BUG_ON(setup_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq));
+ ret = setup_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq);
+ if (ret) {
+ pr_err("Failed to setup irq");
+ return ret;
+ }
sirfsoc_clockevent_init();
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(sirfsoc_prima2_timer,
"sirf,prima2-tick", sirfsoc_prima2_timer_init);
diff --git a/drivers/clocksource/timer-sp804.c b/drivers/clocksource/timer-sp804.c
index 5f45b9ade..d07863388 100644
--- a/drivers/clocksource/timer-sp804.c
+++ b/drivers/clocksource/timer-sp804.c
@@ -77,7 +77,7 @@ void __init sp804_timer_disable(void __iomem *base)
writel(0, base + TIMER_CTRL);
}
-void __init __sp804_clocksource_and_sched_clock_init(void __iomem *base,
+int __init __sp804_clocksource_and_sched_clock_init(void __iomem *base,
const char *name,
struct clk *clk,
int use_sched_clock)
@@ -89,14 +89,13 @@ void __init __sp804_clocksource_and_sched_clock_init(void __iomem *base,
if (IS_ERR(clk)) {
pr_err("sp804: clock not found: %d\n",
(int)PTR_ERR(clk));
- return;
+ return PTR_ERR(clk);
}
}
rate = sp804_get_clock_rate(clk);
-
if (rate < 0)
- return;
+ return -EINVAL;
/* setup timer 0 as free-running clocksource */
writel(0, base + TIMER_CTRL);
@@ -112,6 +111,8 @@ void __init __sp804_clocksource_and_sched_clock_init(void __iomem *base,
sched_clock_base = base;
sched_clock_register(sp804_read, 32, rate);
}
+
+ return 0;
}
@@ -186,7 +187,7 @@ static struct irqaction sp804_timer_irq = {
.dev_id = &sp804_clockevent,
};
-void __init __sp804_clockevents_init(void __iomem *base, unsigned int irq, struct clk *clk, const char *name)
+int __init __sp804_clockevents_init(void __iomem *base, unsigned int irq, struct clk *clk, const char *name)
{
struct clock_event_device *evt = &sp804_clockevent;
long rate;
@@ -196,12 +197,12 @@ void __init __sp804_clockevents_init(void __iomem *base, unsigned int irq, struc
if (IS_ERR(clk)) {
pr_err("sp804: %s clock not found: %d\n", name,
(int)PTR_ERR(clk));
- return;
+ return PTR_ERR(clk);
}
rate = sp804_get_clock_rate(clk);
if (rate < 0)
- return;
+ return -EINVAL;
clkevt_base = base;
clkevt_reload = DIV_ROUND_CLOSEST(rate, HZ);
@@ -213,27 +214,31 @@ void __init __sp804_clockevents_init(void __iomem *base, unsigned int irq, struc
setup_irq(irq, &sp804_timer_irq);
clockevents_config_and_register(evt, rate, 0xf, 0xffffffff);
+
+ return 0;
}
-static void __init sp804_of_init(struct device_node *np)
+static int __init sp804_of_init(struct device_node *np)
{
static bool initialized = false;
void __iomem *base;
- int irq;
+ int irq, ret = -EINVAL;
u32 irq_num = 0;
struct clk *clk1, *clk2;
const char *name = of_get_property(np, "compatible", NULL);
base = of_iomap(np, 0);
- if (WARN_ON(!base))
- return;
+ if (!base)
+ return -ENXIO;
/* Ensure timers are disabled */
writel(0, base + TIMER_CTRL);
writel(0, base + TIMER_2_BASE + TIMER_CTRL);
- if (initialized || !of_device_is_available(np))
+ if (initialized || !of_device_is_available(np)) {
+ ret = -EINVAL;
goto err;
+ }
clk1 = of_clk_get(np, 0);
if (IS_ERR(clk1))
@@ -256,35 +261,53 @@ static void __init sp804_of_init(struct device_node *np)
of_property_read_u32(np, "arm,sp804-has-irq", &irq_num);
if (irq_num == 2) {
- __sp804_clockevents_init(base + TIMER_2_BASE, irq, clk2, name);
- __sp804_clocksource_and_sched_clock_init(base, name, clk1, 1);
+
+ ret = __sp804_clockevents_init(base + TIMER_2_BASE, irq, clk2, name);
+ if (ret)
+ goto err;
+
+ ret = __sp804_clocksource_and_sched_clock_init(base, name, clk1, 1);
+ if (ret)
+ goto err;
} else {
- __sp804_clockevents_init(base, irq, clk1 , name);
- __sp804_clocksource_and_sched_clock_init(base + TIMER_2_BASE,
- name, clk2, 1);
+
+ ret = __sp804_clockevents_init(base, irq, clk1 , name);
+ if (ret)
+ goto err;
+
+ ret =__sp804_clocksource_and_sched_clock_init(base + TIMER_2_BASE,
+ name, clk2, 1);
+ if (ret)
+ goto err;
}
initialized = true;
- return;
+ return 0;
err:
iounmap(base);
+ return ret;
}
CLOCKSOURCE_OF_DECLARE(sp804, "arm,sp804", sp804_of_init);
-static void __init integrator_cp_of_init(struct device_node *np)
+static int __init integrator_cp_of_init(struct device_node *np)
{
static int init_count = 0;
void __iomem *base;
- int irq;
+ int irq, ret = -EINVAL;
const char *name = of_get_property(np, "compatible", NULL);
struct clk *clk;
base = of_iomap(np, 0);
- if (WARN_ON(!base))
- return;
+ if (!base) {
+ pr_err("Failed to iomap");
+ return -ENXIO;
+ }
+
clk = of_clk_get(np, 0);
- if (WARN_ON(IS_ERR(clk)))
- return;
+ if (IS_ERR(clk)) {
+ pr_err("Failed to get clock");
+ return PTR_ERR(clk);
+ }
/* Ensure timer is disabled */
writel(0, base + TIMER_CTRL);
@@ -292,19 +315,24 @@ static void __init integrator_cp_of_init(struct device_node *np)
if (init_count == 2 || !of_device_is_available(np))
goto err;
- if (!init_count)
- __sp804_clocksource_and_sched_clock_init(base, name, clk, 0);
- else {
+ if (!init_count) {
+ ret = __sp804_clocksource_and_sched_clock_init(base, name, clk, 0);
+ if (ret)
+ goto err;
+ } else {
irq = irq_of_parse_and_map(np, 0);
if (irq <= 0)
goto err;
- __sp804_clockevents_init(base, irq, clk, name);
+ ret = __sp804_clockevents_init(base, irq, clk, name);
+ if (ret)
+ goto err;
}
init_count++;
- return;
+ return 0;
err:
iounmap(base);
+ return ret;
}
CLOCKSOURCE_OF_DECLARE(intcp, "arm,integrator-cp-timer", integrator_cp_of_init);
diff --git a/drivers/clocksource/timer-stm32.c b/drivers/clocksource/timer-stm32.c
index f3dcb7679..1b2574c4f 100644
--- a/drivers/clocksource/timer-stm32.c
+++ b/drivers/clocksource/timer-stm32.c
@@ -98,7 +98,7 @@ static struct stm32_clock_event_ddata clock_event_ddata = {
},
};
-static void __init stm32_clockevent_init(struct device_node *np)
+static int __init stm32_clockevent_init(struct device_node *np)
{
struct stm32_clock_event_ddata *data = &clock_event_ddata;
struct clk *clk;
@@ -130,12 +130,14 @@ static void __init stm32_clockevent_init(struct device_node *np)
data->base = of_iomap(np, 0);
if (!data->base) {
+ ret = -ENXIO;
pr_err("failed to map registers for clockevent\n");
goto err_iomap;
}
irq = irq_of_parse_and_map(np, 0);
if (!irq) {
+ ret = -EINVAL;
pr_err("%s: failed to get irq.\n", np->full_name);
goto err_get_irq;
}
@@ -173,7 +175,7 @@ static void __init stm32_clockevent_init(struct device_node *np)
pr_info("%s: STM32 clockevent driver initialized (%d bits)\n",
np->full_name, bits);
- return;
+ return ret;
err_get_irq:
iounmap(data->base);
@@ -182,7 +184,7 @@ err_iomap:
err_clk_enable:
clk_put(clk);
err_clk_get:
- return;
+ return ret;
}
CLOCKSOURCE_OF_DECLARE(stm32, "st,stm32-timer", stm32_clockevent_init);
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index 24c83f9ef..c184eb841 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -311,33 +311,42 @@ err_free:
return ret;
}
-static void __init sun5i_timer_init(struct device_node *node)
+static int __init sun5i_timer_init(struct device_node *node)
{
struct reset_control *rstc;
void __iomem *timer_base;
struct clk *clk;
- int irq;
+ int irq, ret;
timer_base = of_io_request_and_map(node, 0, of_node_full_name(node));
- if (IS_ERR(timer_base))
- panic("Can't map registers");
+ if (IS_ERR(timer_base)) {
+ pr_err("Can't map registers");
+ return PTR_ERR(timer_base);;
+ }
irq = irq_of_parse_and_map(node, 0);
- if (irq <= 0)
- panic("Can't parse IRQ");
+ if (irq <= 0) {
+ pr_err("Can't parse IRQ");
+ return -EINVAL;
+ }
clk = of_clk_get(node, 0);
- if (IS_ERR(clk))
- panic("Can't get timer clock");
+ if (IS_ERR(clk)) {
+ pr_err("Can't get timer clock");
+ return PTR_ERR(clk);
+ }
rstc = of_reset_control_get(node, NULL);
if (!IS_ERR(rstc))
reset_control_deassert(rstc);
- sun5i_setup_clocksource(node, timer_base, clk, irq);
- sun5i_setup_clockevent(node, timer_base, clk, irq);
+ ret = sun5i_setup_clocksource(node, timer_base, clk, irq);
+ if (ret)
+ return ret;
+
+ return sun5i_setup_clockevent(node, timer_base, clk, irq);
}
CLOCKSOURCE_OF_DECLARE(sun5i_a13, "allwinner,sun5i-a13-hstimer",
- sun5i_timer_init);
+ sun5i_timer_init);
CLOCKSOURCE_OF_DECLARE(sun7i_a20, "allwinner,sun7i-a20-hstimer",
- sun5i_timer_init);
+ sun5i_timer_init);
diff --git a/drivers/clocksource/timer-ti-32k.c b/drivers/clocksource/timer-ti-32k.c
index 8518d9dfb..92b7e390f 100644
--- a/drivers/clocksource/timer-ti-32k.c
+++ b/drivers/clocksource/timer-ti-32k.c
@@ -88,14 +88,14 @@ static u64 notrace omap_32k_read_sched_clock(void)
return ti_32k_read_cycles(&ti_32k_timer.cs);
}
-static void __init ti_32k_timer_init(struct device_node *np)
+static int __init ti_32k_timer_init(struct device_node *np)
{
int ret;
ti_32k_timer.base = of_iomap(np, 0);
if (!ti_32k_timer.base) {
pr_err("Can't ioremap 32k timer base\n");
- return;
+ return -ENXIO;
}
ti_32k_timer.counter = ti_32k_timer.base;
@@ -116,11 +116,13 @@ static void __init ti_32k_timer_init(struct device_node *np)
ret = clocksource_register_hz(&ti_32k_timer.cs, 32768);
if (ret) {
pr_err("32k_counter: can't register clocksource\n");
- return;
+ return ret;
}
sched_clock_register(omap_32k_read_sched_clock, 32, 32768);
pr_info("OMAP clocksource: 32k_counter at 32768 Hz\n");
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(ti_32k_timer, "ti,omap-counter32k",
ti_32k_timer_init);
diff --git a/drivers/clocksource/timer-u300.c b/drivers/clocksource/timer-u300.c
index 1744b2438..704e40c6f 100644
--- a/drivers/clocksource/timer-u300.c
+++ b/drivers/clocksource/timer-u300.c
@@ -359,27 +359,37 @@ static struct delay_timer u300_delay_timer;
/*
* This sets up the system timers, clock source and clock event.
*/
-static void __init u300_timer_init_of(struct device_node *np)
+static int __init u300_timer_init_of(struct device_node *np)
{
unsigned int irq;
struct clk *clk;
unsigned long rate;
+ int ret;
u300_timer_base = of_iomap(np, 0);
- if (!u300_timer_base)
- panic("could not ioremap system timer\n");
+ if (!u300_timer_base) {
+ pr_err("could not ioremap system timer\n");
+ return -ENXIO;
+ }
/* Get the IRQ for the GP1 timer */
irq = irq_of_parse_and_map(np, 2);
- if (!irq)
- panic("no IRQ for system timer\n");
+ if (!irq) {
+ pr_err("no IRQ for system timer\n");
+ return -EINVAL;
+ }
pr_info("U300 GP1 timer @ base: %p, IRQ: %u\n", u300_timer_base, irq);
/* Clock the interrupt controller */
clk = of_clk_get(np, 0);
- BUG_ON(IS_ERR(clk));
- clk_prepare_enable(clk);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
rate = clk_get_rate(clk);
u300_clockevent_data.ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ);
@@ -410,7 +420,9 @@ static void __init u300_timer_init_of(struct device_node *np)
u300_timer_base + U300_TIMER_APP_RGPT1);
/* Set up the IRQ handler */
- setup_irq(irq, &u300_timer_irq);
+ ret = setup_irq(irq, &u300_timer_irq);
+ if (ret)
+ return ret;
/* Reset the General Purpose timer 2 */
writel(U300_TIMER_APP_RGPT2_TIMER_RESET,
@@ -428,9 +440,12 @@ static void __init u300_timer_init_of(struct device_node *np)
u300_timer_base + U300_TIMER_APP_EGPT2);
/* Use general purpose timer 2 as clock source */
- if (clocksource_mmio_init(u300_timer_base + U300_TIMER_APP_GPT2CC,
- "GPT2", rate, 300, 32, clocksource_mmio_readl_up))
+ ret = clocksource_mmio_init(u300_timer_base + U300_TIMER_APP_GPT2CC,
+ "GPT2", rate, 300, 32, clocksource_mmio_readl_up);
+ if (ret) {
pr_err("timer: failed to initialize U300 clock source\n");
+ return ret;
+ }
/* Configure and register the clockevent */
clockevents_config_and_register(&u300_clockevent_data.cevd, rate,
@@ -440,6 +455,7 @@ static void __init u300_timer_init_of(struct device_node *np)
* TODO: init and register the rest of the timers too, they can be
* used by hrtimers!
*/
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(u300_timer, "stericsson,u300-apptimer",
diff --git a/drivers/clocksource/versatile.c b/drivers/clocksource/versatile.c
index 0a26d3dde..220b490a8 100644
--- a/drivers/clocksource/versatile.c
+++ b/drivers/clocksource/versatile.c
@@ -25,16 +25,18 @@ static u64 notrace versatile_sys_24mhz_read(void)
return readl(versatile_sys_24mhz);
}
-static void __init versatile_sched_clock_init(struct device_node *node)
+static int __init versatile_sched_clock_init(struct device_node *node)
{
void __iomem *base = of_iomap(node, 0);
if (!base)
- return;
+ return -ENXIO;
versatile_sys_24mhz = base + SYS_24MHZ;
sched_clock_register(versatile_sys_24mhz_read, 32, 24000000);
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(vexpress, "arm,vexpress-sysreg",
versatile_sched_clock_init);
diff --git a/drivers/clocksource/vf_pit_timer.c b/drivers/clocksource/vf_pit_timer.c
index a0e6c6853..55d8d8402 100644
--- a/drivers/clocksource/vf_pit_timer.c
+++ b/drivers/clocksource/vf_pit_timer.c
@@ -156,15 +156,18 @@ static int __init pit_clockevent_init(unsigned long rate, int irq)
return 0;
}
-static void __init pit_timer_init(struct device_node *np)
+static int __init pit_timer_init(struct device_node *np)
{
struct clk *pit_clk;
void __iomem *timer_base;
unsigned long clk_rate;
- int irq;
+ int irq, ret;
timer_base = of_iomap(np, 0);
- BUG_ON(!timer_base);
+ if (!timer_base) {
+ pr_err("Failed to iomap");
+ return -ENXIO;
+ }
/*
* PIT0 and PIT1 can be chained to build a 64-bit timer,
@@ -175,12 +178,16 @@ static void __init pit_timer_init(struct device_node *np)
clkevt_base = timer_base + PITn_OFFSET(3);
irq = irq_of_parse_and_map(np, 0);
- BUG_ON(irq <= 0);
+ if (irq <= 0)
+ return -EINVAL;
pit_clk = of_clk_get(np, 0);
- BUG_ON(IS_ERR(pit_clk));
+ if (IS_ERR(pit_clk))
+ return PTR_ERR(pit_clk);
- BUG_ON(clk_prepare_enable(pit_clk));
+ ret = clk_prepare_enable(pit_clk);
+ if (ret)
+ return ret;
clk_rate = clk_get_rate(pit_clk);
cycle_per_jiffy = clk_rate / (HZ);
@@ -188,8 +195,10 @@ static void __init pit_timer_init(struct device_node *np)
/* enable the pit module */
__raw_writel(~PITMCR_MDIS, timer_base + PITMCR);
- BUG_ON(pit_clocksource_init(clk_rate));
+ ret = pit_clocksource_init(clk_rate);
+ if (ret)
+ return ret;
- pit_clockevent_init(clk_rate, irq);
+ return pit_clockevent_init(clk_rate, irq);
}
CLOCKSOURCE_OF_DECLARE(vf610, "fsl,vf610-pit", pit_timer_init);
diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c
index ddb409274..b15069483 100644
--- a/drivers/clocksource/vt8500_timer.c
+++ b/drivers/clocksource/vt8500_timer.c
@@ -121,38 +121,48 @@ static struct irqaction irq = {
.dev_id = &clockevent,
};
-static void __init vt8500_timer_init(struct device_node *np)
+static int __init vt8500_timer_init(struct device_node *np)
{
- int timer_irq;
+ int timer_irq, ret;
regbase = of_iomap(np, 0);
if (!regbase) {
pr_err("%s: Missing iobase description in Device Tree\n",
__func__);
- return;
+ return -ENXIO;
}
+
timer_irq = irq_of_parse_and_map(np, 0);
if (!timer_irq) {
pr_err("%s: Missing irq description in Device Tree\n",
__func__);
- return;
+ return -EINVAL;
}
writel(1, regbase + TIMER_CTRL_VAL);
writel(0xf, regbase + TIMER_STATUS_VAL);
writel(~0, regbase + TIMER_MATCH_VAL);
- if (clocksource_register_hz(&clocksource, VT8500_TIMER_HZ))
+ ret = clocksource_register_hz(&clocksource, VT8500_TIMER_HZ);
+ if (ret) {
pr_err("%s: vt8500_timer_init: clocksource_register failed for %s\n",
- __func__, clocksource.name);
+ __func__, clocksource.name);
+ return ret;
+ }
clockevent.cpumask = cpumask_of(0);
- if (setup_irq(timer_irq, &irq))
+ ret = setup_irq(timer_irq, &irq);
+ if (ret) {
pr_err("%s: setup_irq failed for %s\n", __func__,
clockevent.name);
+ return ret;
+ }
+
clockevents_config_and_register(&clockevent, VT8500_TIMER_HZ,
MIN_OSCR_DELTA * 2, 0xf0000000);
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init);
diff --git a/drivers/clocksource/zevio-timer.c b/drivers/clocksource/zevio-timer.c
index ceaa6133f..9a53f5ef6 100644
--- a/drivers/clocksource/zevio-timer.c
+++ b/drivers/clocksource/zevio-timer.c
@@ -210,9 +210,9 @@ error_free:
return ret;
}
-static void __init zevio_timer_init(struct device_node *node)
+static int __init zevio_timer_init(struct device_node *node)
{
- BUG_ON(zevio_timer_add(node));
+ return zevio_timer_add(node);
}
CLOCKSOURCE_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_init);
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index b02f9c606..a782ce877 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -22,7 +22,6 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/ktime.h>
#include <linux/init.h>
@@ -390,5 +389,4 @@ static int __init cn_proc_init(void)
}
return 0;
}
-
-module_init(cn_proc_init);
+device_initcall(cn_proc_init);
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index b7445b6ae..74919aa81 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -31,23 +31,17 @@ config CPU_FREQ_BOOST_SW
depends on THERMAL
config CPU_FREQ_STAT
- tristate "CPU frequency translation statistics"
- default y
+ bool "CPU frequency transition statistics"
help
- This driver exports CPU frequency statistics information through sysfs
- file system.
-
- To compile this driver as a module, choose M here: the
- module will be called cpufreq_stats.
+ Export CPU frequency statistics information through sysfs.
If in doubt, say N.
config CPU_FREQ_STAT_DETAILS
- bool "CPU frequency translation statistics details"
+ bool "CPU frequency transition statistics details"
depends on CPU_FREQ_STAT
help
- This will show detail CPU frequency translation table in sysfs file
- system.
+ Show detailed CPU frequency transition table in sysfs.
If in doubt, say N.
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 32a15052f..297e9128f 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -468,20 +468,17 @@ unsigned int acpi_cpufreq_fast_switch(struct cpufreq_policy *policy,
struct acpi_cpufreq_data *data = policy->driver_data;
struct acpi_processor_performance *perf;
struct cpufreq_frequency_table *entry;
- unsigned int next_perf_state, next_freq, freq;
+ unsigned int next_perf_state, next_freq, index;
/*
* Find the closest frequency above target_freq.
- *
- * The table is sorted in the reverse order with respect to the
- * frequency and all of the entries are valid (see the initialization).
*/
- entry = policy->freq_table;
- do {
- entry++;
- freq = entry->frequency;
- } while (freq >= target_freq && freq != CPUFREQ_TABLE_END);
- entry--;
+ if (policy->cached_target_freq == target_freq)
+ index = policy->cached_resolved_idx;
+ else
+ index = cpufreq_table_find_index_dl(policy, target_freq);
+
+ entry = &policy->freq_table[index];
next_freq = entry->frequency;
next_perf_state = entry->driver_data;
diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c
index 404360cad..042023bbb 100644
--- a/drivers/cpufreq/amd_freq_sensitivity.c
+++ b/drivers/cpufreq/amd_freq_sensitivity.c
@@ -48,9 +48,8 @@ static unsigned int amd_powersave_bias_target(struct cpufreq_policy *policy,
struct policy_dbs_info *policy_dbs = policy->governor_data;
struct dbs_data *od_data = policy_dbs->dbs_data;
struct od_dbs_tuners *od_tuners = od_data->tuners;
- struct od_policy_dbs_info *od_info = to_dbs_info(policy_dbs);
- if (!od_info->freq_table)
+ if (!policy->freq_table)
return freq_next;
rdmsr_on_cpu(policy->cpu, MSR_AMD64_FREQ_SENSITIVITY_ACTUAL,
@@ -92,10 +91,9 @@ static unsigned int amd_powersave_bias_target(struct cpufreq_policy *policy,
else {
unsigned int index;
- cpufreq_frequency_table_target(policy,
- od_info->freq_table, policy->cur - 1,
- CPUFREQ_RELATION_H, &index);
- freq_next = od_info->freq_table[index].frequency;
+ index = cpufreq_table_find_index_h(policy,
+ policy->cur - 1);
+ freq_next = policy->freq_table[index].frequency;
}
data->freq_prev = freq_next;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 5617c7087..3dd4884c6 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -74,19 +74,12 @@ static inline bool has_target(void)
}
/* internal prototypes */
-static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
+static int cpufreq_init_governor(struct cpufreq_policy *policy);
+static void cpufreq_exit_governor(struct cpufreq_policy *policy);
static int cpufreq_start_governor(struct cpufreq_policy *policy);
-
-static inline void cpufreq_exit_governor(struct cpufreq_policy *policy)
-{
- (void)cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
-}
-
-static inline void cpufreq_stop_governor(struct cpufreq_policy *policy)
-{
- (void)cpufreq_governor(policy, CPUFREQ_GOV_STOP);
-}
+static void cpufreq_stop_governor(struct cpufreq_policy *policy);
+static void cpufreq_governor_limits(struct cpufreq_policy *policy);
/**
* Two notifier lists: the "policy" list is involved in the
@@ -133,15 +126,6 @@ struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
}
EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
-struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
-{
- struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
-
- return policy && !policy_is_inactive(policy) ?
- policy->freq_table : NULL;
-}
-EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
-
static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
{
u64 idle_time;
@@ -354,6 +338,7 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
pr_debug("FREQ: %lu - CPU: %lu\n",
(unsigned long)freqs->new, (unsigned long)freqs->cpu);
trace_cpu_frequency(freqs->new, freqs->cpu);
+ cpufreq_stats_record_transition(policy, freqs->new);
srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
CPUFREQ_POSTCHANGE, freqs);
if (likely(policy) && likely(policy->cpu == freqs->cpu))
@@ -507,6 +492,38 @@ void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
}
EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
+/**
+ * cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported
+ * one.
+ * @target_freq: target frequency to resolve.
+ *
+ * The target to driver frequency mapping is cached in the policy.
+ *
+ * Return: Lowest driver-supported frequency greater than or equal to the
+ * given target_freq, subject to policy (min/max) and driver limitations.
+ */
+unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ target_freq = clamp_val(target_freq, policy->min, policy->max);
+ policy->cached_target_freq = target_freq;
+
+ if (cpufreq_driver->target_index) {
+ int idx;
+
+ idx = cpufreq_frequency_table_target(policy, target_freq,
+ CPUFREQ_RELATION_L);
+ policy->cached_resolved_idx = idx;
+ return policy->freq_table[idx].frequency;
+ }
+
+ if (cpufreq_driver->resolve_freq)
+ return cpufreq_driver->resolve_freq(policy, target_freq);
+
+ return target_freq;
+}
+EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
+
/*********************************************************************
* SYSFS INTERFACE *
*********************************************************************/
@@ -1115,6 +1132,7 @@ static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify)
CPUFREQ_REMOVE_POLICY, policy);
down_write(&policy->rwsem);
+ cpufreq_stats_free_table(policy);
cpufreq_remove_dev_symlink(policy);
kobj = &policy->kobj;
cmp = &policy->kobj_unregister;
@@ -1265,13 +1283,12 @@ static int cpufreq_online(unsigned int cpu)
}
}
- blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
- CPUFREQ_START, policy);
-
if (new_policy) {
ret = cpufreq_add_dev_interface(policy);
if (ret)
goto out_exit_policy;
+
+ cpufreq_stats_create_table(policy);
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_CREATE_POLICY, policy);
@@ -1280,6 +1297,9 @@ static int cpufreq_online(unsigned int cpu)
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
}
+ blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+ CPUFREQ_START, policy);
+
ret = cpufreq_init_policy(policy);
if (ret) {
pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
@@ -1556,9 +1576,6 @@ static unsigned int cpufreq_update_current_freq(struct cpufreq_policy *policy)
{
unsigned int new_freq;
- if (cpufreq_suspended)
- return 0;
-
new_freq = cpufreq_driver->get(policy->cpu);
if (!new_freq)
return 0;
@@ -1864,14 +1881,17 @@ static int __target_intermediate(struct cpufreq_policy *policy,
return ret;
}
-static int __target_index(struct cpufreq_policy *policy,
- struct cpufreq_frequency_table *freq_table, int index)
+static int __target_index(struct cpufreq_policy *policy, int index)
{
struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
unsigned int intermediate_freq = 0;
+ unsigned int newfreq = policy->freq_table[index].frequency;
int retval = -EINVAL;
bool notify;
+ if (newfreq == policy->cur)
+ return 0;
+
notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
if (notify) {
/* Handle switching to intermediate frequency */
@@ -1886,7 +1906,7 @@ static int __target_index(struct cpufreq_policy *policy,
freqs.old = freqs.new;
}
- freqs.new = freq_table[index].frequency;
+ freqs.new = newfreq;
pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
__func__, policy->cpu, freqs.old, freqs.new);
@@ -1923,17 +1943,13 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int relation)
{
unsigned int old_target_freq = target_freq;
- struct cpufreq_frequency_table *freq_table;
- int index, retval;
+ int index;
if (cpufreq_disabled())
return -ENODEV;
/* Make sure that target_freq is within supported range */
- if (target_freq > policy->max)
- target_freq = policy->max;
- if (target_freq < policy->min)
- target_freq = policy->min;
+ target_freq = clamp_val(target_freq, policy->min, policy->max);
pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
policy->cpu, target_freq, relation, old_target_freq);
@@ -1956,23 +1972,9 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
if (!cpufreq_driver->target_index)
return -EINVAL;
- freq_table = cpufreq_frequency_get_table(policy->cpu);
- if (unlikely(!freq_table)) {
- pr_err("%s: Unable to find freq_table\n", __func__);
- return -EINVAL;
- }
-
- retval = cpufreq_frequency_table_target(policy, freq_table, target_freq,
- relation, &index);
- if (unlikely(retval)) {
- pr_err("%s: Unable to find matching freq\n", __func__);
- return retval;
- }
-
- if (freq_table[index].frequency == policy->cur)
- return 0;
+ index = cpufreq_frequency_table_target(policy, target_freq, relation);
- return __target_index(policy, freq_table, index);
+ return __target_index(policy, index);
}
EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
@@ -1997,7 +1999,7 @@ __weak struct cpufreq_governor *cpufreq_fallback_governor(void)
return NULL;
}
-static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
+static int cpufreq_init_governor(struct cpufreq_policy *policy)
{
int ret;
@@ -2025,36 +2027,82 @@ static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
}
}
- if (event == CPUFREQ_GOV_POLICY_INIT)
- if (!try_module_get(policy->governor->owner))
- return -EINVAL;
-
- pr_debug("%s: for CPU %u, event %u\n", __func__, policy->cpu, event);
+ if (!try_module_get(policy->governor->owner))
+ return -EINVAL;
- ret = policy->governor->governor(policy, event);
+ pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
- if (event == CPUFREQ_GOV_POLICY_INIT) {
- if (ret)
+ if (policy->governor->init) {
+ ret = policy->governor->init(policy);
+ if (ret) {
module_put(policy->governor->owner);
- else
- policy->governor->initialized++;
- } else if (event == CPUFREQ_GOV_POLICY_EXIT) {
- policy->governor->initialized--;
- module_put(policy->governor->owner);
+ return ret;
+ }
}
- return ret;
+ return 0;
+}
+
+static void cpufreq_exit_governor(struct cpufreq_policy *policy)
+{
+ if (cpufreq_suspended || !policy->governor)
+ return;
+
+ pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
+
+ if (policy->governor->exit)
+ policy->governor->exit(policy);
+
+ module_put(policy->governor->owner);
}
static int cpufreq_start_governor(struct cpufreq_policy *policy)
{
int ret;
+ if (cpufreq_suspended)
+ return 0;
+
+ if (!policy->governor)
+ return -EINVAL;
+
+ pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
+
if (cpufreq_driver->get && !cpufreq_driver->setpolicy)
cpufreq_update_current_freq(policy);
- ret = cpufreq_governor(policy, CPUFREQ_GOV_START);
- return ret ? ret : cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+ if (policy->governor->start) {
+ ret = policy->governor->start(policy);
+ if (ret)
+ return ret;
+ }
+
+ if (policy->governor->limits)
+ policy->governor->limits(policy);
+
+ return 0;
+}
+
+static void cpufreq_stop_governor(struct cpufreq_policy *policy)
+{
+ if (cpufreq_suspended || !policy->governor)
+ return;
+
+ pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
+
+ if (policy->governor->stop)
+ policy->governor->stop(policy);
+}
+
+static void cpufreq_governor_limits(struct cpufreq_policy *policy)
+{
+ if (cpufreq_suspended || !policy->governor)
+ return;
+
+ pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
+
+ if (policy->governor->limits)
+ policy->governor->limits(policy);
}
int cpufreq_register_governor(struct cpufreq_governor *governor)
@@ -2069,7 +2117,6 @@ int cpufreq_register_governor(struct cpufreq_governor *governor)
mutex_lock(&cpufreq_governor_mutex);
- governor->initialized = 0;
err = -EBUSY;
if (!find_governor(governor->name)) {
err = 0;
@@ -2184,6 +2231,8 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
policy->min = new_policy->min;
policy->max = new_policy->max;
+ policy->cached_target_freq = UINT_MAX;
+
pr_debug("new min and max freqs are %u - %u kHz\n",
policy->min, policy->max);
@@ -2195,7 +2244,8 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
if (new_policy->governor == policy->governor) {
pr_debug("cpufreq: governor limits update\n");
- return cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+ cpufreq_governor_limits(policy);
+ return 0;
}
pr_debug("governor switch\n");
@@ -2210,7 +2260,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
/* start new governor */
policy->governor = new_policy->governor;
- ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
+ ret = cpufreq_init_governor(policy);
if (!ret) {
ret = cpufreq_start_governor(policy);
if (!ret) {
@@ -2224,7 +2274,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
pr_debug("starting governor %s failed\n", policy->governor->name);
if (old_gov) {
policy->governor = old_gov;
- if (cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT))
+ if (cpufreq_init_governor(policy))
policy->governor = NULL;
else
cpufreq_start_governor(policy);
@@ -2309,26 +2359,25 @@ static struct notifier_block __refdata cpufreq_cpu_notifier = {
*********************************************************************/
static int cpufreq_boost_set_sw(int state)
{
- struct cpufreq_frequency_table *freq_table;
struct cpufreq_policy *policy;
int ret = -EINVAL;
for_each_active_policy(policy) {
- freq_table = cpufreq_frequency_get_table(policy->cpu);
- if (freq_table) {
- ret = cpufreq_frequency_table_cpuinfo(policy,
- freq_table);
- if (ret) {
- pr_err("%s: Policy frequency update failed\n",
- __func__);
- break;
- }
-
- down_write(&policy->rwsem);
- policy->user_policy.max = policy->max;
- cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
- up_write(&policy->rwsem);
+ if (!policy->freq_table)
+ continue;
+
+ ret = cpufreq_frequency_table_cpuinfo(policy,
+ policy->freq_table);
+ if (ret) {
+ pr_err("%s: Policy frequency update failed\n",
+ __func__);
+ break;
}
+
+ down_write(&policy->rwsem);
+ policy->user_policy.max = policy->max;
+ cpufreq_governor_limits(policy);
+ up_write(&policy->rwsem);
}
return ret;
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 7306830c5..a70367f53 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -17,7 +17,6 @@
struct cs_policy_dbs_info {
struct policy_dbs_info policy_dbs;
unsigned int down_skip;
- unsigned int requested_freq;
};
static inline struct cs_policy_dbs_info *to_dbs_info(struct policy_dbs_info *policy_dbs)
@@ -81,19 +80,17 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
/* Check for frequency increase */
if (load > dbs_data->up_threshold) {
+ unsigned int requested_freq = policy->cur;
+
dbs_info->down_skip = 0;
/* if we are already at full speed then break out early */
- if (dbs_info->requested_freq == policy->max)
+ if (requested_freq == policy->max)
goto out;
- dbs_info->requested_freq += get_freq_target(cs_tuners, policy);
-
- if (dbs_info->requested_freq > policy->max)
- dbs_info->requested_freq = policy->max;
+ requested_freq += get_freq_target(cs_tuners, policy);
- __cpufreq_driver_target(policy, dbs_info->requested_freq,
- CPUFREQ_RELATION_H);
+ __cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_H);
goto out;
}
@@ -104,36 +101,27 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
/* Check for frequency decrease */
if (load < cs_tuners->down_threshold) {
- unsigned int freq_target;
+ unsigned int freq_target, requested_freq = policy->cur;
/*
* if we cannot reduce the frequency anymore, break out early
*/
- if (policy->cur == policy->min)
+ if (requested_freq == policy->min)
goto out;
freq_target = get_freq_target(cs_tuners, policy);
- if (dbs_info->requested_freq > freq_target)
- dbs_info->requested_freq -= freq_target;
+ if (requested_freq > freq_target)
+ requested_freq -= freq_target;
else
- dbs_info->requested_freq = policy->min;
+ requested_freq = policy->min;
- __cpufreq_driver_target(policy, dbs_info->requested_freq,
- CPUFREQ_RELATION_L);
+ __cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_L);
}
out:
return dbs_data->sampling_rate;
}
-static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
- void *data);
-
-static struct notifier_block cs_cpufreq_notifier_block = {
- .notifier_call = dbs_cpufreq_notifier,
-};
-
/************************** sysfs interface ************************/
-static struct dbs_governor cs_dbs_gov;
static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set,
const char *buf, size_t count)
@@ -274,15 +262,13 @@ static void cs_free(struct policy_dbs_info *policy_dbs)
kfree(to_dbs_info(policy_dbs));
}
-static int cs_init(struct dbs_data *dbs_data, bool notify)
+static int cs_init(struct dbs_data *dbs_data)
{
struct cs_dbs_tuners *tuners;
tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
- if (!tuners) {
- pr_err("%s: kzalloc failed\n", __func__);
+ if (!tuners)
return -ENOMEM;
- }
tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD;
tuners->freq_step = DEF_FREQUENCY_STEP;
@@ -294,19 +280,11 @@ static int cs_init(struct dbs_data *dbs_data, bool notify)
dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
jiffies_to_usecs(10);
- if (notify)
- cpufreq_register_notifier(&cs_cpufreq_notifier_block,
- CPUFREQ_TRANSITION_NOTIFIER);
-
return 0;
}
-static void cs_exit(struct dbs_data *dbs_data, bool notify)
+static void cs_exit(struct dbs_data *dbs_data)
{
- if (notify)
- cpufreq_unregister_notifier(&cs_cpufreq_notifier_block,
- CPUFREQ_TRANSITION_NOTIFIER);
-
kfree(dbs_data->tuners);
}
@@ -315,16 +293,10 @@ static void cs_start(struct cpufreq_policy *policy)
struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
dbs_info->down_skip = 0;
- dbs_info->requested_freq = policy->cur;
}
-static struct dbs_governor cs_dbs_gov = {
- .gov = {
- .name = "conservative",
- .governor = cpufreq_governor_dbs,
- .max_transition_latency = TRANSITION_LATENCY_LIMIT,
- .owner = THIS_MODULE,
- },
+static struct dbs_governor cs_governor = {
+ .gov = CPUFREQ_DBS_GOVERNOR_INITIALIZER("conservative"),
.kobj_type = { .default_attrs = cs_attributes },
.gov_dbs_timer = cs_dbs_timer,
.alloc = cs_alloc,
@@ -334,33 +306,7 @@ static struct dbs_governor cs_dbs_gov = {
.start = cs_start,
};
-#define CPU_FREQ_GOV_CONSERVATIVE (&cs_dbs_gov.gov)
-
-static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
- void *data)
-{
- struct cpufreq_freqs *freq = data;
- struct cpufreq_policy *policy = cpufreq_cpu_get_raw(freq->cpu);
- struct cs_policy_dbs_info *dbs_info;
-
- if (!policy)
- return 0;
-
- /* policy isn't governed by conservative governor */
- if (policy->governor != CPU_FREQ_GOV_CONSERVATIVE)
- return 0;
-
- dbs_info = to_dbs_info(policy->governor_data);
- /*
- * we only care if our internally tracked freq moves outside the 'valid'
- * ranges of frequency available to us otherwise we do not change it
- */
- if (dbs_info->requested_freq > policy->max
- || dbs_info->requested_freq < policy->min)
- dbs_info->requested_freq = freq->new;
-
- return 0;
-}
+#define CPU_FREQ_GOV_CONSERVATIVE (&cs_governor.gov)
static int __init cpufreq_gov_dbs_init(void)
{
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index be498d56d..e415349ab 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -336,17 +336,6 @@ static inline void gov_clear_update_util(struct cpufreq_policy *policy)
synchronize_sched();
}
-static void gov_cancel_work(struct cpufreq_policy *policy)
-{
- struct policy_dbs_info *policy_dbs = policy->governor_data;
-
- gov_clear_update_util(policy_dbs->policy);
- irq_work_sync(&policy_dbs->irq_work);
- cancel_work_sync(&policy_dbs->work);
- atomic_set(&policy_dbs->work_count, 0);
- policy_dbs->work_in_progress = false;
-}
-
static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy,
struct dbs_governor *gov)
{
@@ -389,7 +378,7 @@ static void free_policy_dbs_info(struct policy_dbs_info *policy_dbs,
gov->free(policy_dbs);
}
-static int cpufreq_governor_init(struct cpufreq_policy *policy)
+int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
{
struct dbs_governor *gov = dbs_governor_of(policy);
struct dbs_data *dbs_data;
@@ -429,7 +418,7 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy)
gov_attr_set_init(&dbs_data->attr_set, &policy_dbs->list);
- ret = gov->init(dbs_data, !policy->governor->initialized);
+ ret = gov->init(dbs_data);
if (ret)
goto free_policy_dbs_info;
@@ -458,13 +447,13 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy)
goto out;
/* Failure, so roll back. */
- pr_err("cpufreq: Governor initialization failed (dbs_data kobject init error %d)\n", ret);
+ pr_err("initialization failed (dbs_data kobject init error %d)\n", ret);
policy->governor_data = NULL;
if (!have_governor_per_policy())
gov->gdbs_data = NULL;
- gov->exit(dbs_data, !policy->governor->initialized);
+ gov->exit(dbs_data);
kfree(dbs_data);
free_policy_dbs_info:
@@ -474,8 +463,9 @@ out:
mutex_unlock(&gov_dbs_data_mutex);
return ret;
}
+EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_init);
-static int cpufreq_governor_exit(struct cpufreq_policy *policy)
+void cpufreq_dbs_governor_exit(struct cpufreq_policy *policy)
{
struct dbs_governor *gov = dbs_governor_of(policy);
struct policy_dbs_info *policy_dbs = policy->governor_data;
@@ -493,17 +483,17 @@ static int cpufreq_governor_exit(struct cpufreq_policy *policy)
if (!have_governor_per_policy())
gov->gdbs_data = NULL;
- gov->exit(dbs_data, policy->governor->initialized == 1);
+ gov->exit(dbs_data);
kfree(dbs_data);
}
free_policy_dbs_info(policy_dbs, gov);
mutex_unlock(&gov_dbs_data_mutex);
- return 0;
}
+EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_exit);
-static int cpufreq_governor_start(struct cpufreq_policy *policy)
+int cpufreq_dbs_governor_start(struct cpufreq_policy *policy)
{
struct dbs_governor *gov = dbs_governor_of(policy);
struct policy_dbs_info *policy_dbs = policy->governor_data;
@@ -539,47 +529,28 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy)
gov_set_update_util(policy_dbs, sampling_rate);
return 0;
}
+EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_start);
-static int cpufreq_governor_stop(struct cpufreq_policy *policy)
+void cpufreq_dbs_governor_stop(struct cpufreq_policy *policy)
{
- gov_cancel_work(policy);
- return 0;
+ struct policy_dbs_info *policy_dbs = policy->governor_data;
+
+ gov_clear_update_util(policy_dbs->policy);
+ irq_work_sync(&policy_dbs->irq_work);
+ cancel_work_sync(&policy_dbs->work);
+ atomic_set(&policy_dbs->work_count, 0);
+ policy_dbs->work_in_progress = false;
}
+EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_stop);
-static int cpufreq_governor_limits(struct cpufreq_policy *policy)
+void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy)
{
struct policy_dbs_info *policy_dbs = policy->governor_data;
mutex_lock(&policy_dbs->timer_mutex);
-
- if (policy->max < policy->cur)
- __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
- else if (policy->min > policy->cur)
- __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
-
+ cpufreq_policy_apply_limits(policy);
gov_update_sample_delay(policy_dbs, 0);
mutex_unlock(&policy_dbs->timer_mutex);
-
- return 0;
-}
-
-int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event)
-{
- if (event == CPUFREQ_GOV_POLICY_INIT) {
- return cpufreq_governor_init(policy);
- } else if (policy->governor_data) {
- switch (event) {
- case CPUFREQ_GOV_POLICY_EXIT:
- return cpufreq_governor_exit(policy);
- case CPUFREQ_GOV_START:
- return cpufreq_governor_start(policy);
- case CPUFREQ_GOV_STOP:
- return cpufreq_governor_stop(policy);
- case CPUFREQ_GOV_LIMITS:
- return cpufreq_governor_limits(policy);
- }
- }
- return -EINVAL;
}
-EXPORT_SYMBOL_GPL(cpufreq_governor_dbs);
+EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_limits);
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index 34eb214b6..ef1037e9c 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -138,8 +138,8 @@ struct dbs_governor {
unsigned int (*gov_dbs_timer)(struct cpufreq_policy *policy);
struct policy_dbs_info *(*alloc)(void);
void (*free)(struct policy_dbs_info *policy_dbs);
- int (*init)(struct dbs_data *dbs_data, bool notify);
- void (*exit)(struct dbs_data *dbs_data, bool notify);
+ int (*init)(struct dbs_data *dbs_data);
+ void (*exit)(struct dbs_data *dbs_data);
void (*start)(struct cpufreq_policy *policy);
};
@@ -148,6 +148,25 @@ static inline struct dbs_governor *dbs_governor_of(struct cpufreq_policy *policy
return container_of(policy->governor, struct dbs_governor, gov);
}
+/* Governor callback routines */
+int cpufreq_dbs_governor_init(struct cpufreq_policy *policy);
+void cpufreq_dbs_governor_exit(struct cpufreq_policy *policy);
+int cpufreq_dbs_governor_start(struct cpufreq_policy *policy);
+void cpufreq_dbs_governor_stop(struct cpufreq_policy *policy);
+void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy);
+
+#define CPUFREQ_DBS_GOVERNOR_INITIALIZER(_name_) \
+ { \
+ .name = _name_, \
+ .max_transition_latency = TRANSITION_LATENCY_LIMIT, \
+ .owner = THIS_MODULE, \
+ .init = cpufreq_dbs_governor_init, \
+ .exit = cpufreq_dbs_governor_exit, \
+ .start = cpufreq_dbs_governor_start, \
+ .stop = cpufreq_dbs_governor_stop, \
+ .limits = cpufreq_dbs_governor_limits, \
+ }
+
/* Governor specific operations */
struct od_ops {
unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
@@ -155,7 +174,6 @@ struct od_ops {
};
unsigned int dbs_update(struct cpufreq_policy *policy);
-int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event);
void od_register_powersave_bias_handler(unsigned int (*f)
(struct cpufreq_policy *, unsigned int, unsigned int),
unsigned int powersave_bias);
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index b596c8410..58d6fe616 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -74,34 +74,30 @@ static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
{
unsigned int freq_req, freq_reduc, freq_avg;
unsigned int freq_hi, freq_lo;
- unsigned int index = 0;
+ unsigned int index;
unsigned int delay_hi_us;
struct policy_dbs_info *policy_dbs = policy->governor_data;
struct od_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
struct dbs_data *dbs_data = policy_dbs->dbs_data;
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
+ struct cpufreq_frequency_table *freq_table = policy->freq_table;
- if (!dbs_info->freq_table) {
+ if (!freq_table) {
dbs_info->freq_lo = 0;
dbs_info->freq_lo_delay_us = 0;
return freq_next;
}
- cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
- relation, &index);
- freq_req = dbs_info->freq_table[index].frequency;
+ index = cpufreq_frequency_table_target(policy, freq_next, relation);
+ freq_req = freq_table[index].frequency;
freq_reduc = freq_req * od_tuners->powersave_bias / 1000;
freq_avg = freq_req - freq_reduc;
/* Find freq bounds for freq_avg in freq_table */
- index = 0;
- cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
- CPUFREQ_RELATION_H, &index);
- freq_lo = dbs_info->freq_table[index].frequency;
- index = 0;
- cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
- CPUFREQ_RELATION_L, &index);
- freq_hi = dbs_info->freq_table[index].frequency;
+ index = cpufreq_table_find_index_h(policy, freq_avg);
+ freq_lo = freq_table[index].frequency;
+ index = cpufreq_table_find_index_l(policy, freq_avg);
+ freq_hi = freq_table[index].frequency;
/* Find out how long we have to be in hi and lo freqs */
if (freq_hi == freq_lo) {
@@ -122,7 +118,6 @@ static void ondemand_powersave_bias_init(struct cpufreq_policy *policy)
{
struct od_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
- dbs_info->freq_table = cpufreq_frequency_get_table(policy->cpu);
dbs_info->freq_lo = 0;
}
@@ -370,17 +365,15 @@ static void od_free(struct policy_dbs_info *policy_dbs)
kfree(to_dbs_info(policy_dbs));
}
-static int od_init(struct dbs_data *dbs_data, bool notify)
+static int od_init(struct dbs_data *dbs_data)
{
struct od_dbs_tuners *tuners;
u64 idle_time;
int cpu;
tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
- if (!tuners) {
- pr_err("%s: kzalloc failed\n", __func__);
+ if (!tuners)
return -ENOMEM;
- }
cpu = get_cpu();
idle_time = get_cpu_idle_time_us(cpu, NULL);
@@ -411,7 +404,7 @@ static int od_init(struct dbs_data *dbs_data, bool notify)
return 0;
}
-static void od_exit(struct dbs_data *dbs_data, bool notify)
+static void od_exit(struct dbs_data *dbs_data)
{
kfree(dbs_data->tuners);
}
@@ -429,12 +422,7 @@ static struct od_ops od_ops = {
};
static struct dbs_governor od_dbs_gov = {
- .gov = {
- .name = "ondemand",
- .governor = cpufreq_governor_dbs,
- .max_transition_latency = TRANSITION_LATENCY_LIMIT,
- .owner = THIS_MODULE,
- },
+ .gov = CPUFREQ_DBS_GOVERNOR_INITIALIZER("ondemand"),
.kobj_type = { .default_attrs = od_attributes },
.gov_dbs_timer = od_dbs_timer,
.alloc = od_alloc,
diff --git a/drivers/cpufreq/cpufreq_ondemand.h b/drivers/cpufreq/cpufreq_ondemand.h
index f0121db3c..640ea4e97 100644
--- a/drivers/cpufreq/cpufreq_ondemand.h
+++ b/drivers/cpufreq/cpufreq_ondemand.h
@@ -13,7 +13,6 @@
struct od_policy_dbs_info {
struct policy_dbs_info policy_dbs;
- struct cpufreq_frequency_table *freq_table;
unsigned int freq_lo;
unsigned int freq_lo_delay_us;
unsigned int freq_hi_delay_us;
diff --git a/drivers/cpufreq/cpufreq_performance.c b/drivers/cpufreq/cpufreq_performance.c
index af9f4b96f..dafb679ad 100644
--- a/drivers/cpufreq/cpufreq_performance.c
+++ b/drivers/cpufreq/cpufreq_performance.c
@@ -16,27 +16,16 @@
#include <linux/init.h>
#include <linux/module.h>
-static int cpufreq_governor_performance(struct cpufreq_policy *policy,
- unsigned int event)
+static void cpufreq_gov_performance_limits(struct cpufreq_policy *policy)
{
- switch (event) {
- case CPUFREQ_GOV_START:
- case CPUFREQ_GOV_LIMITS:
- pr_debug("setting to %u kHz because of event %u\n",
- policy->max, event);
- __cpufreq_driver_target(policy, policy->max,
- CPUFREQ_RELATION_H);
- break;
- default:
- break;
- }
- return 0;
+ pr_debug("setting to %u kHz\n", policy->max);
+ __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
}
static struct cpufreq_governor cpufreq_gov_performance = {
.name = "performance",
- .governor = cpufreq_governor_performance,
.owner = THIS_MODULE,
+ .limits = cpufreq_gov_performance_limits,
};
static int __init cpufreq_gov_performance_init(void)
diff --git a/drivers/cpufreq/cpufreq_powersave.c b/drivers/cpufreq/cpufreq_powersave.c
index b8b400232..78a651038 100644
--- a/drivers/cpufreq/cpufreq_powersave.c
+++ b/drivers/cpufreq/cpufreq_powersave.c
@@ -16,26 +16,15 @@
#include <linux/init.h>
#include <linux/module.h>
-static int cpufreq_governor_powersave(struct cpufreq_policy *policy,
- unsigned int event)
+static void cpufreq_gov_powersave_limits(struct cpufreq_policy *policy)
{
- switch (event) {
- case CPUFREQ_GOV_START:
- case CPUFREQ_GOV_LIMITS:
- pr_debug("setting to %u kHz because of event %u\n",
- policy->min, event);
- __cpufreq_driver_target(policy, policy->min,
- CPUFREQ_RELATION_L);
- break;
- default:
- break;
- }
- return 0;
+ pr_debug("setting to %u kHz\n", policy->min);
+ __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
}
static struct cpufreq_governor cpufreq_gov_powersave = {
.name = "powersave",
- .governor = cpufreq_governor_powersave,
+ .limits = cpufreq_gov_powersave_limits,
.owner = THIS_MODULE,
};
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 5e370a30a..06d3abdff 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -15,7 +15,7 @@
#include <linux/slab.h>
#include <linux/cputime.h>
-static spinlock_t cpufreq_stats_lock;
+static DEFINE_SPINLOCK(cpufreq_stats_lock);
struct cpufreq_stats {
unsigned int total_trans;
@@ -52,6 +52,9 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
ssize_t len = 0;
int i;
+ if (policy->fast_switch_enabled)
+ return 0;
+
cpufreq_stats_update(stats);
for (i = 0; i < stats->state_num; i++) {
len += sprintf(buf + len, "%u %llu\n", stats->freq_table[i],
@@ -68,6 +71,9 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
ssize_t len = 0;
int i, j;
+ if (policy->fast_switch_enabled)
+ return 0;
+
len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n");
len += snprintf(buf + len, PAGE_SIZE - len, " : ");
for (i = 0; i < stats->state_num; i++) {
@@ -130,7 +136,7 @@ static int freq_table_get_index(struct cpufreq_stats *stats, unsigned int freq)
return -1;
}
-static void __cpufreq_stats_free_table(struct cpufreq_policy *policy)
+void cpufreq_stats_free_table(struct cpufreq_policy *policy)
{
struct cpufreq_stats *stats = policy->stats;
@@ -146,39 +152,25 @@ static void __cpufreq_stats_free_table(struct cpufreq_policy *policy)
policy->stats = NULL;
}
-static void cpufreq_stats_free_table(unsigned int cpu)
-{
- struct cpufreq_policy *policy;
-
- policy = cpufreq_cpu_get(cpu);
- if (!policy)
- return;
-
- __cpufreq_stats_free_table(policy);
-
- cpufreq_cpu_put(policy);
-}
-
-static int __cpufreq_stats_create_table(struct cpufreq_policy *policy)
+void cpufreq_stats_create_table(struct cpufreq_policy *policy)
{
unsigned int i = 0, count = 0, ret = -ENOMEM;
struct cpufreq_stats *stats;
unsigned int alloc_size;
- unsigned int cpu = policy->cpu;
struct cpufreq_frequency_table *pos, *table;
/* We need cpufreq table for creating stats table */
- table = cpufreq_frequency_get_table(cpu);
+ table = policy->freq_table;
if (unlikely(!table))
- return 0;
+ return;
/* stats already initialized */
if (policy->stats)
- return -EEXIST;
+ return;
stats = kzalloc(sizeof(*stats), GFP_KERNEL);
if (!stats)
- return -ENOMEM;
+ return;
/* Find total allocation size */
cpufreq_for_each_valid_entry(pos, table)
@@ -215,80 +207,32 @@ static int __cpufreq_stats_create_table(struct cpufreq_policy *policy)
policy->stats = stats;
ret = sysfs_create_group(&policy->kobj, &stats_attr_group);
if (!ret)
- return 0;
+ return;
/* We failed, release resources */
policy->stats = NULL;
kfree(stats->time_in_state);
free_stat:
kfree(stats);
-
- return ret;
-}
-
-static void cpufreq_stats_create_table(unsigned int cpu)
-{
- struct cpufreq_policy *policy;
-
- /*
- * "likely(!policy)" because normally cpufreq_stats will be registered
- * before cpufreq driver
- */
- policy = cpufreq_cpu_get(cpu);
- if (likely(!policy))
- return;
-
- __cpufreq_stats_create_table(policy);
-
- cpufreq_cpu_put(policy);
}
-static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
- unsigned long val, void *data)
+void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
+ unsigned int new_freq)
{
- int ret = 0;
- struct cpufreq_policy *policy = data;
-
- if (val == CPUFREQ_CREATE_POLICY)
- ret = __cpufreq_stats_create_table(policy);
- else if (val == CPUFREQ_REMOVE_POLICY)
- __cpufreq_stats_free_table(policy);
-
- return ret;
-}
-
-static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
- unsigned long val, void *data)
-{
- struct cpufreq_freqs *freq = data;
- struct cpufreq_policy *policy = cpufreq_cpu_get(freq->cpu);
- struct cpufreq_stats *stats;
+ struct cpufreq_stats *stats = policy->stats;
int old_index, new_index;
- if (!policy) {
- pr_err("%s: No policy found\n", __func__);
- return 0;
- }
-
- if (val != CPUFREQ_POSTCHANGE)
- goto put_policy;
-
- if (!policy->stats) {
+ if (!stats) {
pr_debug("%s: No stats found\n", __func__);
- goto put_policy;
+ return;
}
- stats = policy->stats;
-
old_index = stats->last_index;
- new_index = freq_table_get_index(stats, freq->new);
+ new_index = freq_table_get_index(stats, new_freq);
/* We can't do stats->time_in_state[-1]= .. */
- if (old_index == -1 || new_index == -1)
- goto put_policy;
-
- if (old_index == new_index)
- goto put_policy;
+ if (old_index == -1 || new_index == -1 || old_index == new_index)
+ return;
cpufreq_stats_update(stats);
@@ -297,61 +241,4 @@ static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
stats->trans_table[old_index * stats->max_state + new_index]++;
#endif
stats->total_trans++;
-
-put_policy:
- cpufreq_cpu_put(policy);
- return 0;
}
-
-static struct notifier_block notifier_policy_block = {
- .notifier_call = cpufreq_stat_notifier_policy
-};
-
-static struct notifier_block notifier_trans_block = {
- .notifier_call = cpufreq_stat_notifier_trans
-};
-
-static int __init cpufreq_stats_init(void)
-{
- int ret;
- unsigned int cpu;
-
- spin_lock_init(&cpufreq_stats_lock);
- ret = cpufreq_register_notifier(&notifier_policy_block,
- CPUFREQ_POLICY_NOTIFIER);
- if (ret)
- return ret;
-
- for_each_online_cpu(cpu)
- cpufreq_stats_create_table(cpu);
-
- ret = cpufreq_register_notifier(&notifier_trans_block,
- CPUFREQ_TRANSITION_NOTIFIER);
- if (ret) {
- cpufreq_unregister_notifier(&notifier_policy_block,
- CPUFREQ_POLICY_NOTIFIER);
- for_each_online_cpu(cpu)
- cpufreq_stats_free_table(cpu);
- return ret;
- }
-
- return 0;
-}
-static void __exit cpufreq_stats_exit(void)
-{
- unsigned int cpu;
-
- cpufreq_unregister_notifier(&notifier_policy_block,
- CPUFREQ_POLICY_NOTIFIER);
- cpufreq_unregister_notifier(&notifier_trans_block,
- CPUFREQ_TRANSITION_NOTIFIER);
- for_each_online_cpu(cpu)
- cpufreq_stats_free_table(cpu);
-}
-
-MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>");
-MODULE_DESCRIPTION("Export cpufreq stats via sysfs");
-MODULE_LICENSE("GPL");
-
-module_init(cpufreq_stats_init);
-module_exit(cpufreq_stats_exit);
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index 9f3dec9a3..bd897e3e1 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -65,66 +65,66 @@ static int cpufreq_userspace_policy_init(struct cpufreq_policy *policy)
return 0;
}
-static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
- unsigned int event)
+static void cpufreq_userspace_policy_exit(struct cpufreq_policy *policy)
+{
+ mutex_lock(&userspace_mutex);
+ kfree(policy->governor_data);
+ policy->governor_data = NULL;
+ mutex_unlock(&userspace_mutex);
+}
+
+static int cpufreq_userspace_policy_start(struct cpufreq_policy *policy)
{
unsigned int *setspeed = policy->governor_data;
- unsigned int cpu = policy->cpu;
- int rc = 0;
- if (event == CPUFREQ_GOV_POLICY_INIT)
- return cpufreq_userspace_policy_init(policy);
+ BUG_ON(!policy->cur);
+ pr_debug("started managing cpu %u\n", policy->cpu);
- if (!setspeed)
- return -EINVAL;
-
- switch (event) {
- case CPUFREQ_GOV_POLICY_EXIT:
- mutex_lock(&userspace_mutex);
- policy->governor_data = NULL;
- kfree(setspeed);
- mutex_unlock(&userspace_mutex);
- break;
- case CPUFREQ_GOV_START:
- BUG_ON(!policy->cur);
- pr_debug("started managing cpu %u\n", cpu);
-
- mutex_lock(&userspace_mutex);
- per_cpu(cpu_is_managed, cpu) = 1;
- *setspeed = policy->cur;
- mutex_unlock(&userspace_mutex);
- break;
- case CPUFREQ_GOV_STOP:
- pr_debug("managing cpu %u stopped\n", cpu);
-
- mutex_lock(&userspace_mutex);
- per_cpu(cpu_is_managed, cpu) = 0;
- *setspeed = 0;
- mutex_unlock(&userspace_mutex);
- break;
- case CPUFREQ_GOV_LIMITS:
- mutex_lock(&userspace_mutex);
- pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz, last set to %u kHz\n",
- cpu, policy->min, policy->max, policy->cur, *setspeed);
-
- if (policy->max < *setspeed)
- __cpufreq_driver_target(policy, policy->max,
- CPUFREQ_RELATION_H);
- else if (policy->min > *setspeed)
- __cpufreq_driver_target(policy, policy->min,
- CPUFREQ_RELATION_L);
- else
- __cpufreq_driver_target(policy, *setspeed,
- CPUFREQ_RELATION_L);
- mutex_unlock(&userspace_mutex);
- break;
- }
- return rc;
+ mutex_lock(&userspace_mutex);
+ per_cpu(cpu_is_managed, policy->cpu) = 1;
+ *setspeed = policy->cur;
+ mutex_unlock(&userspace_mutex);
+ return 0;
+}
+
+static void cpufreq_userspace_policy_stop(struct cpufreq_policy *policy)
+{
+ unsigned int *setspeed = policy->governor_data;
+
+ pr_debug("managing cpu %u stopped\n", policy->cpu);
+
+ mutex_lock(&userspace_mutex);
+ per_cpu(cpu_is_managed, policy->cpu) = 0;
+ *setspeed = 0;
+ mutex_unlock(&userspace_mutex);
+}
+
+static void cpufreq_userspace_policy_limits(struct cpufreq_policy *policy)
+{
+ unsigned int *setspeed = policy->governor_data;
+
+ mutex_lock(&userspace_mutex);
+
+ pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz, last set to %u kHz\n",
+ policy->cpu, policy->min, policy->max, policy->cur, *setspeed);
+
+ if (policy->max < *setspeed)
+ __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
+ else if (policy->min > *setspeed)
+ __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
+ else
+ __cpufreq_driver_target(policy, *setspeed, CPUFREQ_RELATION_L);
+
+ mutex_unlock(&userspace_mutex);
}
static struct cpufreq_governor cpufreq_gov_userspace = {
.name = "userspace",
- .governor = cpufreq_governor_userspace,
+ .init = cpufreq_userspace_policy_init,
+ .exit = cpufreq_userspace_policy_exit,
+ .start = cpufreq_userspace_policy_start,
+ .stop = cpufreq_userspace_policy_stop,
+ .limits = cpufreq_userspace_policy_limits,
.store_setspeed = cpufreq_set,
.show_setspeed = show_speed,
.owner = THIS_MODULE,
diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c
index 7e336d20c..b95a87280 100644
--- a/drivers/cpufreq/davinci-cpufreq.c
+++ b/drivers/cpufreq/davinci-cpufreq.c
@@ -38,26 +38,6 @@ struct davinci_cpufreq {
};
static struct davinci_cpufreq cpufreq;
-static int davinci_verify_speed(struct cpufreq_policy *policy)
-{
- struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
- struct cpufreq_frequency_table *freq_table = pdata->freq_table;
- struct clk *armclk = cpufreq.armclk;
-
- if (freq_table)
- return cpufreq_frequency_table_verify(policy, freq_table);
-
- if (policy->cpu)
- return -EINVAL;
-
- cpufreq_verify_within_cpu_limits(policy);
- policy->min = clk_round_rate(armclk, policy->min * 1000) / 1000;
- policy->max = clk_round_rate(armclk, policy->max * 1000) / 1000;
- cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
- return 0;
-}
-
static int davinci_target(struct cpufreq_policy *policy, unsigned int idx)
{
struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
@@ -121,7 +101,7 @@ static int davinci_cpu_init(struct cpufreq_policy *policy)
static struct cpufreq_driver davinci_driver = {
.flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
- .verify = davinci_verify_speed,
+ .verify = cpufreq_generic_frequency_table_verify,
.target_index = davinci_target,
.get = cpufreq_generic_get,
.init = davinci_cpu_init,
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index a8f1daffc..3bbbf9e69 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -63,8 +63,6 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
else
return 0;
}
-EXPORT_SYMBOL_GPL(cpufreq_frequency_table_cpuinfo);
-
int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table)
@@ -108,20 +106,16 @@ EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify);
*/
int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy)
{
- struct cpufreq_frequency_table *table =
- cpufreq_frequency_get_table(policy->cpu);
- if (!table)
+ if (!policy->freq_table)
return -ENODEV;
- return cpufreq_frequency_table_verify(policy, table);
+ return cpufreq_frequency_table_verify(policy, policy->freq_table);
}
EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify);
-int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
- struct cpufreq_frequency_table *table,
- unsigned int target_freq,
- unsigned int relation,
- unsigned int *index)
+int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
{
struct cpufreq_frequency_table optimal = {
.driver_data = ~0,
@@ -132,7 +126,9 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
.frequency = 0,
};
struct cpufreq_frequency_table *pos;
+ struct cpufreq_frequency_table *table = policy->freq_table;
unsigned int freq, diff, i = 0;
+ int index;
pr_debug("request for target %u kHz (relation: %u) for cpu %u\n",
target_freq, relation, policy->cpu);
@@ -196,25 +192,26 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
}
}
if (optimal.driver_data > i) {
- if (suboptimal.driver_data > i)
- return -EINVAL;
- *index = suboptimal.driver_data;
- } else
- *index = optimal.driver_data;
+ if (suboptimal.driver_data > i) {
+ WARN(1, "Invalid frequency table: %d\n", policy->cpu);
+ return 0;
+ }
- pr_debug("target index is %u, freq is:%u kHz\n", *index,
- table[*index].frequency);
+ index = suboptimal.driver_data;
+ } else
+ index = optimal.driver_data;
- return 0;
+ pr_debug("target index is %u, freq is:%u kHz\n", index,
+ table[index].frequency);
+ return index;
}
-EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target);
+EXPORT_SYMBOL_GPL(cpufreq_table_index_unsorted);
int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
unsigned int freq)
{
- struct cpufreq_frequency_table *pos, *table;
+ struct cpufreq_frequency_table *pos, *table = policy->freq_table;
- table = cpufreq_frequency_get_table(policy->cpu);
if (unlikely(!table)) {
pr_debug("%s: Unable to find frequency table\n", __func__);
return -ENOENT;
@@ -300,15 +297,72 @@ struct freq_attr *cpufreq_generic_attr[] = {
};
EXPORT_SYMBOL_GPL(cpufreq_generic_attr);
+static int set_freq_table_sorted(struct cpufreq_policy *policy)
+{
+ struct cpufreq_frequency_table *pos, *table = policy->freq_table;
+ struct cpufreq_frequency_table *prev = NULL;
+ int ascending = 0;
+
+ policy->freq_table_sorted = CPUFREQ_TABLE_UNSORTED;
+
+ cpufreq_for_each_valid_entry(pos, table) {
+ if (!prev) {
+ prev = pos;
+ continue;
+ }
+
+ if (pos->frequency == prev->frequency) {
+ pr_warn("Duplicate freq-table entries: %u\n",
+ pos->frequency);
+ return -EINVAL;
+ }
+
+ /* Frequency increased from prev to pos */
+ if (pos->frequency > prev->frequency) {
+ /* But frequency was decreasing earlier */
+ if (ascending < 0) {
+ pr_debug("Freq table is unsorted\n");
+ return 0;
+ }
+
+ ascending++;
+ } else {
+ /* Frequency decreased from prev to pos */
+
+ /* But frequency was increasing earlier */
+ if (ascending > 0) {
+ pr_debug("Freq table is unsorted\n");
+ return 0;
+ }
+
+ ascending--;
+ }
+
+ prev = pos;
+ }
+
+ if (ascending > 0)
+ policy->freq_table_sorted = CPUFREQ_TABLE_SORTED_ASCENDING;
+ else
+ policy->freq_table_sorted = CPUFREQ_TABLE_SORTED_DESCENDING;
+
+ pr_debug("Freq table is sorted in %s order\n",
+ ascending > 0 ? "ascending" : "descending");
+
+ return 0;
+}
+
int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table)
{
- int ret = cpufreq_frequency_table_cpuinfo(policy, table);
+ int ret;
- if (!ret)
- policy->freq_table = table;
+ ret = cpufreq_frequency_table_cpuinfo(policy, table);
+ if (ret)
+ return ret;
- return ret;
+ policy->freq_table = table;
+ return set_freq_table_sorted(policy);
}
EXPORT_SYMBOL_GPL(cpufreq_table_validate_and_show);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 1b159171f..be9eade14 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -35,6 +35,7 @@
#include <asm/msr.h>
#include <asm/cpu_device_id.h>
#include <asm/cpufeature.h>
+#include <asm/intel-family.h>
#define ATOM_RATIOS 0x66a
#define ATOM_VIDS 0x66b
@@ -96,7 +97,6 @@ static inline u64 div_ext_fp(u64 x, u64 y)
* read from MPERF MSR between last and current sample
* @tsc: Difference of time stamp counter between last and
* current sample
- * @freq: Effective frequency calculated from APERF/MPERF
* @time: Current time from scheduler
*
* This structure is used in the cpudata structure to store performance sample
@@ -108,7 +108,6 @@ struct sample {
u64 aperf;
u64 mperf;
u64 tsc;
- int freq;
u64 time;
};
@@ -281,9 +280,9 @@ struct cpu_defaults {
static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu);
static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu);
-static struct pstate_adjust_policy pid_params;
-static struct pstate_funcs pstate_funcs;
-static int hwp_active;
+static struct pstate_adjust_policy pid_params __read_mostly;
+static struct pstate_funcs pstate_funcs __read_mostly;
+static int hwp_active __read_mostly;
#ifdef CONFIG_ACPI
static bool acpi_ppc;
@@ -807,7 +806,8 @@ static void __init intel_pstate_sysfs_expose_params(void)
static void intel_pstate_hwp_enable(struct cpudata *cpudata)
{
/* First disable HWP notification interrupt as we don't process them */
- wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
+ if (static_cpu_has(X86_FEATURE_HWP_NOTIFY))
+ wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
}
@@ -972,7 +972,7 @@ static int core_get_turbo_pstate(void)
u64 value;
int nont, ret;
- rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
+ rdmsrl(MSR_TURBO_RATIO_LIMIT, value);
nont = core_get_max_pstate();
ret = (value) & 255;
if (ret <= nont)
@@ -1001,7 +1001,7 @@ static int knl_get_turbo_pstate(void)
u64 value;
int nont, ret;
- rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
+ rdmsrl(MSR_TURBO_RATIO_LIMIT, value);
nont = core_get_max_pstate();
ret = (((value) >> 8) & 0xFF);
if (ret <= nont)
@@ -1091,6 +1091,26 @@ static struct cpu_defaults knl_params = {
},
};
+static struct cpu_defaults bxt_params = {
+ .pid_policy = {
+ .sample_rate_ms = 10,
+ .deadband = 0,
+ .setpoint = 60,
+ .p_gain_pct = 14,
+ .d_gain_pct = 0,
+ .i_gain_pct = 4,
+ },
+ .funcs = {
+ .get_max = core_get_max_pstate,
+ .get_max_physical = core_get_max_pstate_physical,
+ .get_min = core_get_min_pstate,
+ .get_turbo = core_get_turbo_pstate,
+ .get_scaling = core_get_scaling,
+ .get_val = core_get_val,
+ .get_target_pstate = get_target_pstate_use_cpu_load,
+ },
+};
+
static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
{
int max_perf = cpu->pstate.turbo_pstate;
@@ -1113,17 +1133,12 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
*min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
}
-static inline void intel_pstate_record_pstate(struct cpudata *cpu, int pstate)
-{
- trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
- cpu->pstate.current_pstate = pstate;
-}
-
static void intel_pstate_set_min_pstate(struct cpudata *cpu)
{
int pstate = cpu->pstate.min_pstate;
- intel_pstate_record_pstate(cpu, pstate);
+ trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
+ cpu->pstate.current_pstate = pstate;
/*
* Generally, there is no guarantee that this code will always run on
* the CPU being updated, so force the register update to run on the
@@ -1283,10 +1298,11 @@ static inline void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
pstate = clamp_t(int, pstate, min_perf, max_perf);
+ trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
if (pstate == cpu->pstate.current_pstate)
return;
- intel_pstate_record_pstate(cpu, pstate);
+ cpu->pstate.current_pstate = pstate;
wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate));
}
@@ -1334,29 +1350,32 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time,
(unsigned long)&policy }
static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
- ICPU(0x2a, core_params),
- ICPU(0x2d, core_params),
- ICPU(0x37, silvermont_params),
- ICPU(0x3a, core_params),
- ICPU(0x3c, core_params),
- ICPU(0x3d, core_params),
- ICPU(0x3e, core_params),
- ICPU(0x3f, core_params),
- ICPU(0x45, core_params),
- ICPU(0x46, core_params),
- ICPU(0x47, core_params),
- ICPU(0x4c, airmont_params),
- ICPU(0x4e, core_params),
- ICPU(0x4f, core_params),
- ICPU(0x5e, core_params),
- ICPU(0x56, core_params),
- ICPU(0x57, knl_params),
+ ICPU(INTEL_FAM6_SANDYBRIDGE, core_params),
+ ICPU(INTEL_FAM6_SANDYBRIDGE_X, core_params),
+ ICPU(INTEL_FAM6_ATOM_SILVERMONT1, silvermont_params),
+ ICPU(INTEL_FAM6_IVYBRIDGE, core_params),
+ ICPU(INTEL_FAM6_HASWELL_CORE, core_params),
+ ICPU(INTEL_FAM6_BROADWELL_CORE, core_params),
+ ICPU(INTEL_FAM6_IVYBRIDGE_X, core_params),
+ ICPU(INTEL_FAM6_HASWELL_X, core_params),
+ ICPU(INTEL_FAM6_HASWELL_ULT, core_params),
+ ICPU(INTEL_FAM6_HASWELL_GT3E, core_params),
+ ICPU(INTEL_FAM6_BROADWELL_GT3E, core_params),
+ ICPU(INTEL_FAM6_ATOM_AIRMONT, airmont_params),
+ ICPU(INTEL_FAM6_SKYLAKE_MOBILE, core_params),
+ ICPU(INTEL_FAM6_BROADWELL_X, core_params),
+ ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, core_params),
+ ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_params),
+ ICPU(INTEL_FAM6_XEON_PHI_KNL, knl_params),
+ ICPU(INTEL_FAM6_ATOM_GOLDMONT, bxt_params),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
-static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] = {
- ICPU(0x56, core_params),
+static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
+ ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_params),
+ ICPU(INTEL_FAM6_BROADWELL_X, core_params),
+ ICPU(INTEL_FAM6_SKYLAKE_X, core_params),
{}
};
@@ -1575,12 +1594,12 @@ static struct cpufreq_driver intel_pstate_driver = {
.name = "intel_pstate",
};
-static int __initdata no_load;
-static int __initdata no_hwp;
-static int __initdata hwp_only;
-static unsigned int force_load;
+static int no_load __initdata;
+static int no_hwp __initdata;
+static int hwp_only __initdata;
+static unsigned int force_load __initdata;
-static int intel_pstate_msrs_not_valid(void)
+static int __init intel_pstate_msrs_not_valid(void)
{
if (!pstate_funcs.get_max() ||
!pstate_funcs.get_min() ||
@@ -1590,7 +1609,7 @@ static int intel_pstate_msrs_not_valid(void)
return 0;
}
-static void copy_pid_params(struct pstate_adjust_policy *policy)
+static void __init copy_pid_params(struct pstate_adjust_policy *policy)
{
pid_params.sample_rate_ms = policy->sample_rate_ms;
pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC;
@@ -1601,7 +1620,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
pid_params.setpoint = policy->setpoint;
}
-static void copy_cpu_funcs(struct pstate_funcs *funcs)
+static void __init copy_cpu_funcs(struct pstate_funcs *funcs)
{
pstate_funcs.get_max = funcs->get_max;
pstate_funcs.get_max_physical = funcs->get_max_physical;
@@ -1616,7 +1635,7 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs)
#ifdef CONFIG_ACPI
-static bool intel_pstate_no_acpi_pss(void)
+static bool __init intel_pstate_no_acpi_pss(void)
{
int i;
@@ -1645,7 +1664,7 @@ static bool intel_pstate_no_acpi_pss(void)
return true;
}
-static bool intel_pstate_has_acpi_ppc(void)
+static bool __init intel_pstate_has_acpi_ppc(void)
{
int i;
@@ -1673,7 +1692,7 @@ struct hw_vendor_info {
};
/* Hardware vendor-specific info that has its own power management modes */
-static struct hw_vendor_info vendor_info[] = {
+static struct hw_vendor_info vendor_info[] __initdata = {
{1, "HP ", "ProLiant", PSS},
{1, "ORACLE", "X4-2 ", PPC},
{1, "ORACLE", "X4-2L ", PPC},
@@ -1692,7 +1711,7 @@ static struct hw_vendor_info vendor_info[] = {
{0, "", ""},
};
-static bool intel_pstate_platform_pwr_mgmt_exists(void)
+static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
{
struct acpi_table_header hdr;
struct hw_vendor_info *v_info;
diff --git a/drivers/cpufreq/mvebu-cpufreq.c b/drivers/cpufreq/mvebu-cpufreq.c
index e920889b9..ed915ee85 100644
--- a/drivers/cpufreq/mvebu-cpufreq.c
+++ b/drivers/cpufreq/mvebu-cpufreq.c
@@ -70,7 +70,7 @@ static int __init armada_xp_pmsu_cpufreq_init(void)
continue;
}
- clk = clk_get(cpu_dev, 0);
+ clk = clk_get(cpu_dev, NULL);
if (IS_ERR(clk)) {
pr_err("Cannot get clock for CPU %d\n", cpu);
return PTR_ERR(clk);
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 54c45368e..d3ffde806 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -64,12 +64,14 @@
/**
* struct global_pstate_info - Per policy data structure to maintain history of
* global pstates
- * @highest_lpstate: The local pstate from which we are ramping down
+ * @highest_lpstate_idx: The local pstate index from which we are
+ * ramping down
* @elapsed_time: Time in ms spent in ramping down from
- * highest_lpstate
+ * highest_lpstate_idx
* @last_sampled_time: Time from boot in ms when global pstates were
* last set
- * @last_lpstate,last_gpstate: Last set values for local and global pstates
+ * @last_lpstate_idx, Last set value of local pstate and global
+ * last_gpstate_idx pstate in terms of cpufreq table index
* @timer: Is used for ramping down if cpu goes idle for
* a long time with global pstate held high
* @gpstate_lock: A spinlock to maintain synchronization between
@@ -77,11 +79,11 @@
* governer's target_index calls
*/
struct global_pstate_info {
- int highest_lpstate;
+ int highest_lpstate_idx;
unsigned int elapsed_time;
unsigned int last_sampled_time;
- int last_lpstate;
- int last_gpstate;
+ int last_lpstate_idx;
+ int last_gpstate_idx;
spinlock_t gpstate_lock;
struct timer_list timer;
};
@@ -124,29 +126,66 @@ static int nr_chips;
static DEFINE_PER_CPU(struct chip *, chip_info);
/*
- * Note: The set of pstates consists of contiguous integers, the
- * smallest of which is indicated by powernv_pstate_info.min, the
- * largest of which is indicated by powernv_pstate_info.max.
+ * Note:
+ * The set of pstates consists of contiguous integers.
+ * powernv_pstate_info stores the index of the frequency table for
+ * max, min and nominal frequencies. It also stores number of
+ * available frequencies.
*
- * The nominal pstate is the highest non-turbo pstate in this
- * platform. This is indicated by powernv_pstate_info.nominal.
+ * powernv_pstate_info.nominal indicates the index to the highest
+ * non-turbo frequency.
*/
static struct powernv_pstate_info {
- int min;
- int max;
- int nominal;
- int nr_pstates;
+ unsigned int min;
+ unsigned int max;
+ unsigned int nominal;
+ unsigned int nr_pstates;
} powernv_pstate_info;
+/* Use following macros for conversions between pstate_id and index */
+static inline int idx_to_pstate(unsigned int i)
+{
+ if (unlikely(i >= powernv_pstate_info.nr_pstates)) {
+ pr_warn_once("index %u is out of bound\n", i);
+ return powernv_freqs[powernv_pstate_info.nominal].driver_data;
+ }
+
+ return powernv_freqs[i].driver_data;
+}
+
+static inline unsigned int pstate_to_idx(int pstate)
+{
+ int min = powernv_freqs[powernv_pstate_info.min].driver_data;
+ int max = powernv_freqs[powernv_pstate_info.max].driver_data;
+
+ if (min > 0) {
+ if (unlikely((pstate < max) || (pstate > min))) {
+ pr_warn_once("pstate %d is out of bound\n", pstate);
+ return powernv_pstate_info.nominal;
+ }
+ } else {
+ if (unlikely((pstate > max) || (pstate < min))) {
+ pr_warn_once("pstate %d is out of bound\n", pstate);
+ return powernv_pstate_info.nominal;
+ }
+ }
+ /*
+ * abs() is deliberately used so that is works with
+ * both monotonically increasing and decreasing
+ * pstate values
+ */
+ return abs(pstate - idx_to_pstate(powernv_pstate_info.max));
+}
+
static inline void reset_gpstates(struct cpufreq_policy *policy)
{
struct global_pstate_info *gpstates = policy->driver_data;
- gpstates->highest_lpstate = 0;
+ gpstates->highest_lpstate_idx = 0;
gpstates->elapsed_time = 0;
gpstates->last_sampled_time = 0;
- gpstates->last_lpstate = 0;
- gpstates->last_gpstate = 0;
+ gpstates->last_lpstate_idx = 0;
+ gpstates->last_gpstate_idx = 0;
}
/*
@@ -156,9 +195,10 @@ static inline void reset_gpstates(struct cpufreq_policy *policy)
static int init_powernv_pstates(void)
{
struct device_node *power_mgt;
- int i, pstate_min, pstate_max, pstate_nominal, nr_pstates = 0;
+ int i, nr_pstates = 0;
const __be32 *pstate_ids, *pstate_freqs;
u32 len_ids, len_freqs;
+ u32 pstate_min, pstate_max, pstate_nominal;
power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
if (!power_mgt) {
@@ -208,6 +248,7 @@ static int init_powernv_pstates(void)
return -ENODEV;
}
+ powernv_pstate_info.nr_pstates = nr_pstates;
pr_debug("NR PStates %d\n", nr_pstates);
for (i = 0; i < nr_pstates; i++) {
u32 id = be32_to_cpu(pstate_ids[i]);
@@ -216,15 +257,17 @@ static int init_powernv_pstates(void)
pr_debug("PState id %d freq %d MHz\n", id, freq);
powernv_freqs[i].frequency = freq * 1000; /* kHz */
powernv_freqs[i].driver_data = id;
+
+ if (id == pstate_max)
+ powernv_pstate_info.max = i;
+ else if (id == pstate_nominal)
+ powernv_pstate_info.nominal = i;
+ else if (id == pstate_min)
+ powernv_pstate_info.min = i;
}
+
/* End of list marker entry */
powernv_freqs[i].frequency = CPUFREQ_TABLE_END;
-
- powernv_pstate_info.min = pstate_min;
- powernv_pstate_info.max = pstate_max;
- powernv_pstate_info.nominal = pstate_nominal;
- powernv_pstate_info.nr_pstates = nr_pstates;
-
return 0;
}
@@ -233,12 +276,12 @@ static unsigned int pstate_id_to_freq(int pstate_id)
{
int i;
- i = powernv_pstate_info.max - pstate_id;
+ i = pstate_to_idx(pstate_id);
if (i >= powernv_pstate_info.nr_pstates || i < 0) {
pr_warn("PState id %d outside of PState table, "
"reporting nominal id %d instead\n",
- pstate_id, powernv_pstate_info.nominal);
- i = powernv_pstate_info.max - powernv_pstate_info.nominal;
+ pstate_id, idx_to_pstate(powernv_pstate_info.nominal));
+ i = powernv_pstate_info.nominal;
}
return powernv_freqs[i].frequency;
@@ -252,7 +295,7 @@ static ssize_t cpuinfo_nominal_freq_show(struct cpufreq_policy *policy,
char *buf)
{
return sprintf(buf, "%u\n",
- pstate_id_to_freq(powernv_pstate_info.nominal));
+ powernv_freqs[powernv_pstate_info.nominal].frequency);
}
struct freq_attr cpufreq_freq_attr_cpuinfo_nominal_freq =
@@ -426,7 +469,7 @@ static void set_pstate(void *data)
*/
static inline unsigned int get_nominal_index(void)
{
- return powernv_pstate_info.max - powernv_pstate_info.nominal;
+ return powernv_pstate_info.nominal;
}
static void powernv_cpufreq_throttle_check(void *data)
@@ -435,20 +478,22 @@ static void powernv_cpufreq_throttle_check(void *data)
unsigned int cpu = smp_processor_id();
unsigned long pmsr;
int pmsr_pmax;
+ unsigned int pmsr_pmax_idx;
pmsr = get_pmspr(SPRN_PMSR);
chip = this_cpu_read(chip_info);
/* Check for Pmax Capping */
pmsr_pmax = (s8)PMSR_MAX(pmsr);
- if (pmsr_pmax != powernv_pstate_info.max) {
+ pmsr_pmax_idx = pstate_to_idx(pmsr_pmax);
+ if (pmsr_pmax_idx != powernv_pstate_info.max) {
if (chip->throttled)
goto next;
chip->throttled = true;
- if (pmsr_pmax < powernv_pstate_info.nominal) {
- pr_warn_once("CPU %d on Chip %u has Pmax reduced below nominal frequency (%d < %d)\n",
+ if (pmsr_pmax_idx > powernv_pstate_info.nominal) {
+ pr_warn_once("CPU %d on Chip %u has Pmax(%d) reduced below nominal frequency(%d)\n",
cpu, chip->id, pmsr_pmax,
- powernv_pstate_info.nominal);
+ idx_to_pstate(powernv_pstate_info.nominal));
chip->throttle_sub_turbo++;
} else {
chip->throttle_turbo++;
@@ -484,34 +529,35 @@ next:
/**
* calc_global_pstate - Calculate global pstate
- * @elapsed_time: Elapsed time in milliseconds
- * @local_pstate: New local pstate
- * @highest_lpstate: pstate from which its ramping down
+ * @elapsed_time: Elapsed time in milliseconds
+ * @local_pstate_idx: New local pstate
+ * @highest_lpstate_idx: pstate from which its ramping down
*
* Finds the appropriate global pstate based on the pstate from which its
* ramping down and the time elapsed in ramping down. It follows a quadratic
* equation which ensures that it reaches ramping down to pmin in 5sec.
*/
static inline int calc_global_pstate(unsigned int elapsed_time,
- int highest_lpstate, int local_pstate)
+ int highest_lpstate_idx,
+ int local_pstate_idx)
{
- int pstate_diff;
+ int index_diff;
/*
* Using ramp_down_percent we get the percentage of rampdown
* that we are expecting to be dropping. Difference between
- * highest_lpstate and powernv_pstate_info.min will give a absolute
+ * highest_lpstate_idx and powernv_pstate_info.min will give a absolute
* number of how many pstates we will drop eventually by the end of
* 5 seconds, then just scale it get the number pstates to be dropped.
*/
- pstate_diff = ((int)ramp_down_percent(elapsed_time) *
- (highest_lpstate - powernv_pstate_info.min)) / 100;
+ index_diff = ((int)ramp_down_percent(elapsed_time) *
+ (powernv_pstate_info.min - highest_lpstate_idx)) / 100;
/* Ensure that global pstate is >= to local pstate */
- if (highest_lpstate - pstate_diff < local_pstate)
- return local_pstate;
+ if (highest_lpstate_idx + index_diff >= local_pstate_idx)
+ return local_pstate_idx;
else
- return highest_lpstate - pstate_diff;
+ return highest_lpstate_idx + index_diff;
}
static inline void queue_gpstate_timer(struct global_pstate_info *gpstates)
@@ -530,8 +576,7 @@ static inline void queue_gpstate_timer(struct global_pstate_info *gpstates)
else
timer_interval = GPSTATE_TIMER_INTERVAL;
- mod_timer_pinned(&gpstates->timer, jiffies +
- msecs_to_jiffies(timer_interval));
+ mod_timer(&gpstates->timer, jiffies + msecs_to_jiffies(timer_interval));
}
/**
@@ -547,7 +592,7 @@ void gpstate_timer_handler(unsigned long data)
{
struct cpufreq_policy *policy = (struct cpufreq_policy *)data;
struct global_pstate_info *gpstates = policy->driver_data;
- int gpstate_id;
+ int gpstate_idx;
unsigned int time_diff = jiffies_to_msecs(jiffies)
- gpstates->last_sampled_time;
struct powernv_smp_call_data freq_data;
@@ -557,29 +602,29 @@ void gpstate_timer_handler(unsigned long data)
gpstates->last_sampled_time += time_diff;
gpstates->elapsed_time += time_diff;
- freq_data.pstate_id = gpstates->last_lpstate;
+ freq_data.pstate_id = idx_to_pstate(gpstates->last_lpstate_idx);
- if ((gpstates->last_gpstate == freq_data.pstate_id) ||
+ if ((gpstates->last_gpstate_idx == gpstates->last_lpstate_idx) ||
(gpstates->elapsed_time > MAX_RAMP_DOWN_TIME)) {
- gpstate_id = freq_data.pstate_id;
+ gpstate_idx = pstate_to_idx(freq_data.pstate_id);
reset_gpstates(policy);
- gpstates->highest_lpstate = freq_data.pstate_id;
+ gpstates->highest_lpstate_idx = gpstate_idx;
} else {
- gpstate_id = calc_global_pstate(gpstates->elapsed_time,
- gpstates->highest_lpstate,
- freq_data.pstate_id);
+ gpstate_idx = calc_global_pstate(gpstates->elapsed_time,
+ gpstates->highest_lpstate_idx,
+ gpstates->last_lpstate_idx);
}
/*
* If local pstate is equal to global pstate, rampdown is over
* So timer is not required to be queued.
*/
- if (gpstate_id != freq_data.pstate_id)
+ if (gpstate_idx != gpstates->last_lpstate_idx)
queue_gpstate_timer(gpstates);
- freq_data.gpstate_id = gpstate_id;
- gpstates->last_gpstate = freq_data.gpstate_id;
- gpstates->last_lpstate = freq_data.pstate_id;
+ freq_data.gpstate_id = idx_to_pstate(gpstate_idx);
+ gpstates->last_gpstate_idx = pstate_to_idx(freq_data.gpstate_id);
+ gpstates->last_lpstate_idx = pstate_to_idx(freq_data.pstate_id);
spin_unlock(&gpstates->gpstate_lock);
@@ -596,7 +641,7 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
unsigned int new_index)
{
struct powernv_smp_call_data freq_data;
- unsigned int cur_msec, gpstate_id;
+ unsigned int cur_msec, gpstate_idx;
struct global_pstate_info *gpstates = policy->driver_data;
if (unlikely(rebooting) && new_index != get_nominal_index())
@@ -608,15 +653,15 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
cur_msec = jiffies_to_msecs(get_jiffies_64());
spin_lock(&gpstates->gpstate_lock);
- freq_data.pstate_id = powernv_freqs[new_index].driver_data;
+ freq_data.pstate_id = idx_to_pstate(new_index);
if (!gpstates->last_sampled_time) {
- gpstate_id = freq_data.pstate_id;
- gpstates->highest_lpstate = freq_data.pstate_id;
+ gpstate_idx = new_index;
+ gpstates->highest_lpstate_idx = new_index;
goto gpstates_done;
}
- if (gpstates->last_gpstate > freq_data.pstate_id) {
+ if (gpstates->last_gpstate_idx < new_index) {
gpstates->elapsed_time += cur_msec -
gpstates->last_sampled_time;
@@ -627,34 +672,34 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
*/
if (gpstates->elapsed_time > MAX_RAMP_DOWN_TIME) {
reset_gpstates(policy);
- gpstates->highest_lpstate = freq_data.pstate_id;
- gpstate_id = freq_data.pstate_id;
+ gpstates->highest_lpstate_idx = new_index;
+ gpstate_idx = new_index;
} else {
/* Elaspsed_time is less than 5 seconds, continue to rampdown */
- gpstate_id = calc_global_pstate(gpstates->elapsed_time,
- gpstates->highest_lpstate,
- freq_data.pstate_id);
+ gpstate_idx = calc_global_pstate(gpstates->elapsed_time,
+ gpstates->highest_lpstate_idx,
+ new_index);
}
} else {
reset_gpstates(policy);
- gpstates->highest_lpstate = freq_data.pstate_id;
- gpstate_id = freq_data.pstate_id;
+ gpstates->highest_lpstate_idx = new_index;
+ gpstate_idx = new_index;
}
/*
* If local pstate is equal to global pstate, rampdown is over
* So timer is not required to be queued.
*/
- if (gpstate_id != freq_data.pstate_id)
+ if (gpstate_idx != new_index)
queue_gpstate_timer(gpstates);
else
del_timer_sync(&gpstates->timer);
gpstates_done:
- freq_data.gpstate_id = gpstate_id;
+ freq_data.gpstate_id = idx_to_pstate(gpstate_idx);
gpstates->last_sampled_time = cur_msec;
- gpstates->last_gpstate = freq_data.gpstate_id;
- gpstates->last_lpstate = freq_data.pstate_id;
+ gpstates->last_gpstate_idx = gpstate_idx;
+ gpstates->last_lpstate_idx = new_index;
spin_unlock(&gpstates->gpstate_lock);
@@ -699,7 +744,7 @@ static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
policy->driver_data = gpstates;
/* initialize timer */
- init_timer_deferrable(&gpstates->timer);
+ init_timer_pinned_deferrable(&gpstates->timer);
gpstates->timer.data = (unsigned long)policy;
gpstates->timer.function = gpstate_timer_handler;
gpstates->timer.expires = jiffies +
@@ -760,9 +805,7 @@ void powernv_cpufreq_work_fn(struct work_struct *work)
struct cpufreq_policy policy;
cpufreq_get_policy(&policy, cpu);
- cpufreq_frequency_table_target(&policy, policy.freq_table,
- policy.cur,
- CPUFREQ_RELATION_C, &index);
+ index = cpufreq_table_find_index_c(&policy, policy.cur);
powernv_cpufreq_target_index(&policy, index);
cpumask_andnot(&mask, &mask, policy.cpus);
}
@@ -848,8 +891,8 @@ static void powernv_cpufreq_stop_cpu(struct cpufreq_policy *policy)
struct powernv_smp_call_data freq_data;
struct global_pstate_info *gpstates = policy->driver_data;
- freq_data.pstate_id = powernv_pstate_info.min;
- freq_data.gpstate_id = powernv_pstate_info.min;
+ freq_data.pstate_id = idx_to_pstate(powernv_pstate_info.min);
+ freq_data.gpstate_id = idx_to_pstate(powernv_pstate_info.min);
smp_call_function_single(policy->cpu, set_pstate, &freq_data, 1);
del_timer_sync(&gpstates->timer);
}
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
index 7c4cd5c63..dc112481a 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
+++ b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
@@ -94,7 +94,7 @@ static int pmi_notifier(struct notifier_block *nb,
unsigned long event, void *data)
{
struct cpufreq_policy *policy = data;
- struct cpufreq_frequency_table *cbe_freqs;
+ struct cpufreq_frequency_table *cbe_freqs = policy->freq_table;
u8 node;
/* Should this really be called for CPUFREQ_ADJUST and CPUFREQ_NOTIFY
@@ -103,7 +103,6 @@ static int pmi_notifier(struct notifier_block *nb,
if (event == CPUFREQ_START)
return 0;
- cbe_freqs = cpufreq_frequency_get_table(policy->cpu);
node = cbe_cpu_to_node(policy->cpu);
pr_debug("got notified, event=%lu, node=%u\n", event, node);
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
index ae8eaed77..7b596fa38 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
@@ -293,12 +293,8 @@ static int s3c_cpufreq_target(struct cpufreq_policy *policy,
__func__, policy, target_freq, relation);
if (ftab) {
- if (cpufreq_frequency_table_target(policy, ftab,
- target_freq, relation,
- &index)) {
- s3c_freq_dbg("%s: table failed\n", __func__);
- return -EINVAL;
- }
+ index = cpufreq_frequency_table_target(policy, target_freq,
+ relation);
s3c_freq_dbg("%s: adjust %d to entry %d (%u)\n", __func__,
target_freq, index, ftab[index].frequency);
@@ -315,7 +311,6 @@ static int s3c_cpufreq_target(struct cpufreq_policy *policy,
pll = NULL;
} else {
struct cpufreq_policy tmp_policy;
- int ret;
/* we keep the cpu pll table in Hz, to ensure we get an
* accurate value for the PLL output. */
@@ -323,20 +318,14 @@ static int s3c_cpufreq_target(struct cpufreq_policy *policy,
tmp_policy.min = policy->min * 1000;
tmp_policy.max = policy->max * 1000;
tmp_policy.cpu = policy->cpu;
+ tmp_policy.freq_table = pll_reg;
- /* cpufreq_frequency_table_target uses a pointer to 'index'
- * which is the number of the table entry, not the value of
+ /* cpufreq_frequency_table_target returns the index
+ * of the table entry, not the value of
* the table entry's index field. */
- ret = cpufreq_frequency_table_target(&tmp_policy, pll_reg,
- target_freq, relation,
- &index);
-
- if (ret < 0) {
- pr_err("%s: no PLL available\n", __func__);
- goto err_notpossible;
- }
-
+ index = cpufreq_frequency_table_target(&tmp_policy, target_freq,
+ relation);
pll = pll_reg + index;
s3c_freq_dbg("%s: target %u => %u\n",
@@ -346,10 +335,6 @@ static int s3c_cpufreq_target(struct cpufreq_policy *policy,
}
return s3c_cpufreq_settarget(policy, target_freq, pll);
-
- err_notpossible:
- pr_err("no compatible settings for %d\n", target_freq);
- return -EINVAL;
}
struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
@@ -571,11 +556,7 @@ static int s3c_cpufreq_build_freq(void)
{
int size, ret;
- if (!cpu_cur.info->calc_freqtable)
- return -EINVAL;
-
kfree(ftab);
- ftab = NULL;
size = cpu_cur.info->calc_freqtable(&cpu_cur, NULL, 0);
size++;
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index 06d85917b..f82074eea 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -220,7 +220,7 @@ static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq)
tmp1 /= tmp;
- __raw_writel(tmp1, reg);
+ writel_relaxed(tmp1, reg);
}
static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index)
@@ -246,12 +246,7 @@ static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index)
new_freq = s5pv210_freq_table[index].frequency;
/* Finding current running level index */
- if (cpufreq_frequency_table_target(policy, s5pv210_freq_table,
- old_freq, CPUFREQ_RELATION_H,
- &priv_index)) {
- ret = -EINVAL;
- goto exit;
- }
+ priv_index = cpufreq_table_find_index_h(policy, old_freq);
arm_volt = dvs_conf[index].arm_volt;
int_volt = dvs_conf[index].int_volt;
@@ -301,29 +296,29 @@ static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index)
* 1. Temporary Change divider for MFC and G3D
* SCLKA2M(200/1=200)->(200/4=50)Mhz
*/
- reg = __raw_readl(S5P_CLK_DIV2);
+ reg = readl_relaxed(S5P_CLK_DIV2);
reg &= ~(S5P_CLKDIV2_G3D_MASK | S5P_CLKDIV2_MFC_MASK);
reg |= (3 << S5P_CLKDIV2_G3D_SHIFT) |
(3 << S5P_CLKDIV2_MFC_SHIFT);
- __raw_writel(reg, S5P_CLK_DIV2);
+ writel_relaxed(reg, S5P_CLK_DIV2);
/* For MFC, G3D dividing */
do {
- reg = __raw_readl(S5P_CLKDIV_STAT0);
+ reg = readl_relaxed(S5P_CLKDIV_STAT0);
} while (reg & ((1 << 16) | (1 << 17)));
/*
* 2. Change SCLKA2M(200Mhz)to SCLKMPLL in MFC_MUX, G3D MUX
* (200/4=50)->(667/4=166)Mhz
*/
- reg = __raw_readl(S5P_CLK_SRC2);
+ reg = readl_relaxed(S5P_CLK_SRC2);
reg &= ~(S5P_CLKSRC2_G3D_MASK | S5P_CLKSRC2_MFC_MASK);
reg |= (1 << S5P_CLKSRC2_G3D_SHIFT) |
(1 << S5P_CLKSRC2_MFC_SHIFT);
- __raw_writel(reg, S5P_CLK_SRC2);
+ writel_relaxed(reg, S5P_CLK_SRC2);
do {
- reg = __raw_readl(S5P_CLKMUX_STAT1);
+ reg = readl_relaxed(S5P_CLKMUX_STAT1);
} while (reg & ((1 << 7) | (1 << 3)));
/*
@@ -335,19 +330,19 @@ static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index)
s5pv210_set_refresh(DMC1, 133000);
/* 4. SCLKAPLL -> SCLKMPLL */
- reg = __raw_readl(S5P_CLK_SRC0);
+ reg = readl_relaxed(S5P_CLK_SRC0);
reg &= ~(S5P_CLKSRC0_MUX200_MASK);
reg |= (0x1 << S5P_CLKSRC0_MUX200_SHIFT);
- __raw_writel(reg, S5P_CLK_SRC0);
+ writel_relaxed(reg, S5P_CLK_SRC0);
do {
- reg = __raw_readl(S5P_CLKMUX_STAT0);
+ reg = readl_relaxed(S5P_CLKMUX_STAT0);
} while (reg & (0x1 << 18));
}
/* Change divider */
- reg = __raw_readl(S5P_CLK_DIV0);
+ reg = readl_relaxed(S5P_CLK_DIV0);
reg &= ~(S5P_CLKDIV0_APLL_MASK | S5P_CLKDIV0_A2M_MASK |
S5P_CLKDIV0_HCLK200_MASK | S5P_CLKDIV0_PCLK100_MASK |
@@ -363,25 +358,25 @@ static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index)
(clkdiv_val[index][6] << S5P_CLKDIV0_HCLK133_SHIFT) |
(clkdiv_val[index][7] << S5P_CLKDIV0_PCLK66_SHIFT));
- __raw_writel(reg, S5P_CLK_DIV0);
+ writel_relaxed(reg, S5P_CLK_DIV0);
do {
- reg = __raw_readl(S5P_CLKDIV_STAT0);
+ reg = readl_relaxed(S5P_CLKDIV_STAT0);
} while (reg & 0xff);
/* ARM MCS value changed */
- reg = __raw_readl(S5P_ARM_MCS_CON);
+ reg = readl_relaxed(S5P_ARM_MCS_CON);
reg &= ~0x3;
if (index >= L3)
reg |= 0x3;
else
reg |= 0x1;
- __raw_writel(reg, S5P_ARM_MCS_CON);
+ writel_relaxed(reg, S5P_ARM_MCS_CON);
if (pll_changing) {
/* 5. Set Lock time = 30us*24Mhz = 0x2cf */
- __raw_writel(0x2cf, S5P_APLL_LOCK);
+ writel_relaxed(0x2cf, S5P_APLL_LOCK);
/*
* 6. Turn on APLL
@@ -389,12 +384,12 @@ static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index)
* 6-2. Wait untile the PLL is locked
*/
if (index == L0)
- __raw_writel(APLL_VAL_1000, S5P_APLL_CON);
+ writel_relaxed(APLL_VAL_1000, S5P_APLL_CON);
else
- __raw_writel(APLL_VAL_800, S5P_APLL_CON);
+ writel_relaxed(APLL_VAL_800, S5P_APLL_CON);
do {
- reg = __raw_readl(S5P_APLL_CON);
+ reg = readl_relaxed(S5P_APLL_CON);
} while (!(reg & (0x1 << 29)));
/*
@@ -402,39 +397,39 @@ static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index)
* to SCLKA2M(200Mhz) in MFC_MUX and G3D MUX
* (667/4=166)->(200/4=50)Mhz
*/
- reg = __raw_readl(S5P_CLK_SRC2);
+ reg = readl_relaxed(S5P_CLK_SRC2);
reg &= ~(S5P_CLKSRC2_G3D_MASK | S5P_CLKSRC2_MFC_MASK);
reg |= (0 << S5P_CLKSRC2_G3D_SHIFT) |
(0 << S5P_CLKSRC2_MFC_SHIFT);
- __raw_writel(reg, S5P_CLK_SRC2);
+ writel_relaxed(reg, S5P_CLK_SRC2);
do {
- reg = __raw_readl(S5P_CLKMUX_STAT1);
+ reg = readl_relaxed(S5P_CLKMUX_STAT1);
} while (reg & ((1 << 7) | (1 << 3)));
/*
* 8. Change divider for MFC and G3D
* (200/4=50)->(200/1=200)Mhz
*/
- reg = __raw_readl(S5P_CLK_DIV2);
+ reg = readl_relaxed(S5P_CLK_DIV2);
reg &= ~(S5P_CLKDIV2_G3D_MASK | S5P_CLKDIV2_MFC_MASK);
reg |= (clkdiv_val[index][10] << S5P_CLKDIV2_G3D_SHIFT) |
(clkdiv_val[index][9] << S5P_CLKDIV2_MFC_SHIFT);
- __raw_writel(reg, S5P_CLK_DIV2);
+ writel_relaxed(reg, S5P_CLK_DIV2);
/* For MFC, G3D dividing */
do {
- reg = __raw_readl(S5P_CLKDIV_STAT0);
+ reg = readl_relaxed(S5P_CLKDIV_STAT0);
} while (reg & ((1 << 16) | (1 << 17)));
/* 9. Change MPLL to APLL in MSYS_MUX */
- reg = __raw_readl(S5P_CLK_SRC0);
+ reg = readl_relaxed(S5P_CLK_SRC0);
reg &= ~(S5P_CLKSRC0_MUX200_MASK);
reg |= (0x0 << S5P_CLKSRC0_MUX200_SHIFT);
- __raw_writel(reg, S5P_CLK_SRC0);
+ writel_relaxed(reg, S5P_CLK_SRC0);
do {
- reg = __raw_readl(S5P_CLKMUX_STAT0);
+ reg = readl_relaxed(S5P_CLKMUX_STAT0);
} while (reg & (0x1 << 18));
/*
@@ -451,13 +446,13 @@ static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index)
* and memory refresh parameter should be changed
*/
if (bus_speed_changing) {
- reg = __raw_readl(S5P_CLK_DIV6);
+ reg = readl_relaxed(S5P_CLK_DIV6);
reg &= ~S5P_CLKDIV6_ONEDRAM_MASK;
reg |= (clkdiv_val[index][8] << S5P_CLKDIV6_ONEDRAM_SHIFT);
- __raw_writel(reg, S5P_CLK_DIV6);
+ writel_relaxed(reg, S5P_CLK_DIV6);
do {
- reg = __raw_readl(S5P_CLKDIV_STAT1);
+ reg = readl_relaxed(S5P_CLKDIV_STAT1);
} while (reg & (1 << 15));
/* Reconfigure DRAM refresh counter value */
@@ -497,7 +492,7 @@ static int check_mem_type(void __iomem *dmc_reg)
{
unsigned long val;
- val = __raw_readl(dmc_reg + 0x4);
+ val = readl_relaxed(dmc_reg + 0x4);
val = (val & (0xf << 8));
return val >> 8;
@@ -542,10 +537,10 @@ static int s5pv210_cpu_init(struct cpufreq_policy *policy)
}
/* Find current refresh counter and frequency each DMC */
- s5pv210_dram_conf[0].refresh = (__raw_readl(dmc_base[0] + 0x30) * 1000);
+ s5pv210_dram_conf[0].refresh = (readl_relaxed(dmc_base[0] + 0x30) * 1000);
s5pv210_dram_conf[0].freq = clk_get_rate(dmc0_clk);
- s5pv210_dram_conf[1].refresh = (__raw_readl(dmc_base[1] + 0x30) * 1000);
+ s5pv210_dram_conf[1].refresh = (readl_relaxed(dmc_base[1] + 0x30) * 1000);
s5pv210_dram_conf[1].freq = clk_get_rate(dmc1_clk);
policy->suspend_freq = SLEEP_FREQ;
diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
index e342565e8..f440d385e 100644
--- a/drivers/cpuidle/cpuidle-arm.c
+++ b/drivers/cpuidle/cpuidle-arm.c
@@ -36,26 +36,12 @@
static int arm_enter_idle_state(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int idx)
{
- int ret;
-
- if (!idx) {
- cpu_do_idle();
- return idx;
- }
-
- ret = cpu_pm_enter();
- if (!ret) {
- /*
- * Pass idle state index to cpu_suspend which in turn will
- * call the CPU ops suspend protocol with idle index as a
- * parameter.
- */
- ret = arm_cpuidle_suspend(idx);
-
- cpu_pm_exit();
- }
-
- return ret ? -1 : idx;
+ /*
+ * Pass idle state index to arm_cpuidle_suspend which in turn
+ * will call the CPU ops suspend protocol with idle index as a
+ * parameter.
+ */
+ return CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, idx);
}
static struct cpuidle_driver arm_idle_driver = {
@@ -135,6 +121,7 @@ static int __init arm_idle_init(void)
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev) {
pr_err("Failed to allocate cpuidle device\n");
+ ret = -ENOMEM;
goto out_fail;
}
dev->cpu = cpu;
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index e12dc30d8..f7ca891b5 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -20,7 +20,7 @@
#include <asm/opal.h>
#include <asm/runlatch.h>
-#define MAX_POWERNV_IDLE_STATES 8
+#define POWERNV_THRESHOLD_LATENCY_NS 200000
struct cpuidle_driver powernv_idle_driver = {
.name = "powernv_idle",
@@ -29,6 +29,9 @@ struct cpuidle_driver powernv_idle_driver = {
static int max_idle_state;
static struct cpuidle_state *cpuidle_state_table;
+
+static u64 stop_psscr_table[CPUIDLE_STATE_MAX];
+
static u64 snooze_timeout;
static bool snooze_timeout_en;
@@ -93,16 +96,27 @@ static int fastsleep_loop(struct cpuidle_device *dev,
return index;
}
#endif
+
+static int stop_loop(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ ppc64_runlatch_off();
+ power9_idle_stop(stop_psscr_table[index]);
+ ppc64_runlatch_on();
+ return index;
+}
+
/*
* States for dedicated partition case.
*/
-static struct cpuidle_state powernv_states[MAX_POWERNV_IDLE_STATES] = {
+static struct cpuidle_state powernv_states[CPUIDLE_STATE_MAX] = {
{ /* Snooze */
.name = "snooze",
.desc = "snooze",
.exit_latency = 0,
.target_residency = 0,
- .enter = &snooze_loop },
+ .enter = snooze_loop },
};
static int powernv_cpuidle_add_cpu_notifier(struct notifier_block *n,
@@ -168,7 +182,11 @@ static int powernv_add_idle_states(void)
struct device_node *power_mgt;
int nr_idle_states = 1; /* Snooze */
int dt_idle_states;
- u32 *latency_ns, *residency_ns, *flags;
+ u32 latency_ns[CPUIDLE_STATE_MAX];
+ u32 residency_ns[CPUIDLE_STATE_MAX];
+ u32 flags[CPUIDLE_STATE_MAX];
+ u64 psscr_val[CPUIDLE_STATE_MAX];
+ const char *names[CPUIDLE_STATE_MAX];
int i, rc;
/* Currently we have snooze statically defined */
@@ -186,26 +204,55 @@ static int powernv_add_idle_states(void)
goto out;
}
- flags = kzalloc(sizeof(*flags) * dt_idle_states, GFP_KERNEL);
+ /*
+ * Since snooze is used as first idle state, max idle states allowed is
+ * CPUIDLE_STATE_MAX -1
+ */
+ if (dt_idle_states > CPUIDLE_STATE_MAX - 1) {
+ pr_warn("cpuidle-powernv: discovered idle states more than allowed");
+ dt_idle_states = CPUIDLE_STATE_MAX - 1;
+ }
+
if (of_property_read_u32_array(power_mgt,
"ibm,cpu-idle-state-flags", flags, dt_idle_states)) {
pr_warn("cpuidle-powernv : missing ibm,cpu-idle-state-flags in DT\n");
- goto out_free_flags;
+ goto out;
}
- latency_ns = kzalloc(sizeof(*latency_ns) * dt_idle_states, GFP_KERNEL);
- rc = of_property_read_u32_array(power_mgt,
- "ibm,cpu-idle-state-latencies-ns", latency_ns, dt_idle_states);
- if (rc) {
+ if (of_property_read_u32_array(power_mgt,
+ "ibm,cpu-idle-state-latencies-ns", latency_ns,
+ dt_idle_states)) {
pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n");
- goto out_free_latency;
+ goto out;
+ }
+ if (of_property_read_string_array(power_mgt,
+ "ibm,cpu-idle-state-names", names, dt_idle_states) < 0) {
+ pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-names in DT\n");
+ goto out;
}
- residency_ns = kzalloc(sizeof(*residency_ns) * dt_idle_states, GFP_KERNEL);
+ /*
+ * If the idle states use stop instruction, probe for psscr values
+ * which are necessary to specify required stop level.
+ */
+ if (flags[0] & (OPAL_PM_STOP_INST_FAST | OPAL_PM_STOP_INST_DEEP))
+ if (of_property_read_u64_array(power_mgt,
+ "ibm,cpu-idle-state-psscr", psscr_val, dt_idle_states)) {
+ pr_warn("cpuidle-powernv: missing ibm,cpu-idle-states-psscr in DT\n");
+ goto out;
+ }
+
rc = of_property_read_u32_array(power_mgt,
"ibm,cpu-idle-state-residency-ns", residency_ns, dt_idle_states);
for (i = 0; i < dt_idle_states; i++) {
+ /*
+ * If an idle state has exit latency beyond
+ * POWERNV_THRESHOLD_LATENCY_NS then don't use it
+ * in cpu-idle.
+ */
+ if (latency_ns[i] > POWERNV_THRESHOLD_LATENCY_NS)
+ continue;
/*
* Cpuidle accepts exit_latency and target_residency in us.
@@ -217,7 +264,17 @@ static int powernv_add_idle_states(void)
strcpy(powernv_states[nr_idle_states].desc, "Nap");
powernv_states[nr_idle_states].flags = 0;
powernv_states[nr_idle_states].target_residency = 100;
- powernv_states[nr_idle_states].enter = &nap_loop;
+ powernv_states[nr_idle_states].enter = nap_loop;
+ } else if ((flags[i] & OPAL_PM_STOP_INST_FAST) &&
+ !(flags[i] & OPAL_PM_TIMEBASE_STOP)) {
+ strncpy(powernv_states[nr_idle_states].name,
+ names[i], CPUIDLE_NAME_LEN);
+ strncpy(powernv_states[nr_idle_states].desc,
+ names[i], CPUIDLE_NAME_LEN);
+ powernv_states[nr_idle_states].flags = 0;
+
+ powernv_states[nr_idle_states].enter = stop_loop;
+ stop_psscr_table[nr_idle_states] = psscr_val[i];
}
/*
@@ -232,7 +289,17 @@ static int powernv_add_idle_states(void)
strcpy(powernv_states[nr_idle_states].desc, "FastSleep");
powernv_states[nr_idle_states].flags = CPUIDLE_FLAG_TIMER_STOP;
powernv_states[nr_idle_states].target_residency = 300000;
- powernv_states[nr_idle_states].enter = &fastsleep_loop;
+ powernv_states[nr_idle_states].enter = fastsleep_loop;
+ } else if ((flags[i] & OPAL_PM_STOP_INST_DEEP) &&
+ (flags[i] & OPAL_PM_TIMEBASE_STOP)) {
+ strncpy(powernv_states[nr_idle_states].name,
+ names[i], CPUIDLE_NAME_LEN);
+ strncpy(powernv_states[nr_idle_states].desc,
+ names[i], CPUIDLE_NAME_LEN);
+
+ powernv_states[nr_idle_states].flags = CPUIDLE_FLAG_TIMER_STOP;
+ powernv_states[nr_idle_states].enter = stop_loop;
+ stop_psscr_table[nr_idle_states] = psscr_val[i];
}
#endif
powernv_states[nr_idle_states].exit_latency =
@@ -245,12 +312,6 @@ static int powernv_add_idle_states(void)
nr_idle_states++;
}
-
- kfree(residency_ns);
-out_free_latency:
- kfree(latency_ns);
-out_free_flags:
- kfree(flags);
out:
return nr_idle_states;
}
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index d77ba2f12..1af94e2d1 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -159,6 +159,19 @@ config CRYPTO_GHASH_S390
It is available as of z196.
+config CRYPTO_CRC32_S390
+ tristate "CRC-32 algorithms"
+ depends on S390
+ select CRYPTO_HASH
+ select CRC32
+ help
+ Select this option if you want to use hardware accelerated
+ implementations of CRC algorithms. With this option, you
+ can optimize the computation of CRC-32 (IEEE 802.3 Ethernet)
+ and CRC-32C (Castagnoli).
+
+ It is available with IBM z13 or later.
+
config CRYPTO_DEV_MV_CESA
tristate "Marvell's Cryptographic Engine"
depends on PLAT_ORION
diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c
index 95b73968c..10db7df36 100644
--- a/drivers/crypto/bfin_crc.c
+++ b/drivers/crypto/bfin_crc.c
@@ -588,11 +588,6 @@ static int bfin_crypto_crc_probe(struct platform_device *pdev)
crypto_init_queue(&crc->queue, CRC_CCRYPTO_QUEUE_LENGTH);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
- return -ENOENT;
- }
-
crc->regs = devm_ioremap_resource(dev, res);
if (IS_ERR((void *)crc->regs)) {
dev_err(&pdev->dev, "Cannot map CRC IO\n");
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index 5652a5341..64bf3024b 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -1,6 +1,6 @@
config CRYPTO_DEV_FSL_CAAM
tristate "Freescale CAAM-Multicore driver backend"
- depends on FSL_SOC || ARCH_MXC
+ depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
help
Enables the driver module for Freescale's Cryptographic Accelerator
and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
@@ -99,6 +99,18 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
To compile this as a module, choose M here: the module
will be called caamhash.
+config CRYPTO_DEV_FSL_CAAM_PKC_API
+ tristate "Register public key cryptography implementations with Crypto API"
+ depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
+ default y
+ select CRYPTO_RSA
+ help
+ Selecting this will allow SEC Public key support for RSA.
+ Supported cryptographic primitives: encryption, decryption,
+ signature and verification.
+ To compile this as a module, choose M here: the module
+ will be called caam_pkc.
+
config CRYPTO_DEV_FSL_CAAM_RNG_API
tristate "Register caam device for hwrng API"
depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
@@ -116,10 +128,6 @@ config CRYPTO_DEV_FSL_CAAM_IMX
def_bool SOC_IMX6 || SOC_IMX7D
depends on CRYPTO_DEV_FSL_CAAM
-config CRYPTO_DEV_FSL_CAAM_LE
- def_bool CRYPTO_DEV_FSL_CAAM_IMX || SOC_LS1021A
- depends on CRYPTO_DEV_FSL_CAAM
-
config CRYPTO_DEV_FSL_CAAM_DEBUG
bool "Enable debug output in CAAM driver"
depends on CRYPTO_DEV_FSL_CAAM
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index 550758a33..08bf5515a 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -2,7 +2,7 @@
# Makefile for the CAAM backend and dependent components
#
ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG), y)
- EXTRA_CFLAGS := -DDEBUG
+ ccflags-y := -DDEBUG
endif
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
@@ -10,6 +10,8 @@ obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
caam-objs := ctrl.o
caam_jr-objs := jr.o key_gen.o error.o
+caam_pkc-y := caampkc.o pkc_desc.o
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index e9703f9d1..36365b3ef 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -847,7 +847,7 @@ static int ahash_update_ctx(struct ahash_request *req)
*next_buflen, 0);
} else {
(edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
- SEC4_SG_LEN_FIN;
+ cpu_to_caam32(SEC4_SG_LEN_FIN);
}
state->current_buf = !state->current_buf;
@@ -949,7 +949,8 @@ static int ahash_final_ctx(struct ahash_request *req)
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
buf, state->buf_dma, buflen,
last_buflen);
- (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN;
+ (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
+ cpu_to_caam32(SEC4_SG_LEN_FIN);
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
new file mode 100644
index 000000000..851015e65
--- /dev/null
+++ b/drivers/crypto/caam/caampkc.c
@@ -0,0 +1,607 @@
+/*
+ * caam - Freescale FSL CAAM support for Public Key Cryptography
+ *
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ *
+ * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
+ * all the desired key parameters, input and output pointers.
+ */
+#include "compat.h"
+#include "regs.h"
+#include "intern.h"
+#include "jr.h"
+#include "error.h"
+#include "desc_constr.h"
+#include "sg_sw_sec4.h"
+#include "caampkc.h"
+
+#define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb))
+#define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
+ sizeof(struct rsa_priv_f1_pdb))
+
+static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
+ struct akcipher_request *req)
+{
+ dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
+ dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
+
+ if (edesc->sec4_sg_bytes)
+ dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
+ DMA_TO_DEVICE);
+}
+
+static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc,
+ struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_key *key = &ctx->key;
+ struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
+
+ dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
+ dma_unmap_single(dev, pdb->e_dma, key->e_sz, DMA_TO_DEVICE);
+}
+
+static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
+ struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_key *key = &ctx->key;
+ struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
+
+ dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
+ dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
+}
+
+/* RSA Job Completion handler */
+static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
+{
+ struct akcipher_request *req = context;
+ struct rsa_edesc *edesc;
+
+ if (err)
+ caam_jr_strstatus(dev, err);
+
+ edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
+
+ rsa_pub_unmap(dev, edesc, req);
+ rsa_io_unmap(dev, edesc, req);
+ kfree(edesc);
+
+ akcipher_request_complete(req, err);
+}
+
+static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err,
+ void *context)
+{
+ struct akcipher_request *req = context;
+ struct rsa_edesc *edesc;
+
+ if (err)
+ caam_jr_strstatus(dev, err);
+
+ edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
+
+ rsa_priv_f1_unmap(dev, edesc, req);
+ rsa_io_unmap(dev, edesc, req);
+ kfree(edesc);
+
+ akcipher_request_complete(req, err);
+}
+
+static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
+ size_t desclen)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct device *dev = ctx->dev;
+ struct rsa_edesc *edesc;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+ int sgc;
+ int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
+ int src_nents, dst_nents;
+
+ src_nents = sg_nents_for_len(req->src, req->src_len);
+ dst_nents = sg_nents_for_len(req->dst, req->dst_len);
+
+ if (src_nents > 1)
+ sec4_sg_len = src_nents;
+ if (dst_nents > 1)
+ sec4_sg_len += dst_nents;
+
+ sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
+
+ /* allocate space for base edesc, hw desc commands and link tables */
+ edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes,
+ GFP_DMA | flags);
+ if (!edesc)
+ return ERR_PTR(-ENOMEM);
+
+ sgc = dma_map_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
+ if (unlikely(!sgc)) {
+ dev_err(dev, "unable to map source\n");
+ goto src_fail;
+ }
+
+ sgc = dma_map_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
+ if (unlikely(!sgc)) {
+ dev_err(dev, "unable to map destination\n");
+ goto dst_fail;
+ }
+
+ edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
+
+ sec4_sg_index = 0;
+ if (src_nents > 1) {
+ sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
+ sec4_sg_index += src_nents;
+ }
+ if (dst_nents > 1)
+ sg_to_sec4_sg_last(req->dst, dst_nents,
+ edesc->sec4_sg + sec4_sg_index, 0);
+
+ /* Save nents for later use in Job Descriptor */
+ edesc->src_nents = src_nents;
+ edesc->dst_nents = dst_nents;
+
+ if (!sec4_sg_bytes)
+ return edesc;
+
+ edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
+ dev_err(dev, "unable to map S/G table\n");
+ goto sec4_sg_fail;
+ }
+
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+
+ return edesc;
+
+sec4_sg_fail:
+ dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
+dst_fail:
+ dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
+src_fail:
+ kfree(edesc);
+ return ERR_PTR(-ENOMEM);
+}
+
+static int set_rsa_pub_pdb(struct akcipher_request *req,
+ struct rsa_edesc *edesc)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_key *key = &ctx->key;
+ struct device *dev = ctx->dev;
+ struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
+ int sec4_sg_index = 0;
+
+ pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, pdb->n_dma)) {
+ dev_err(dev, "Unable to map RSA modulus memory\n");
+ return -ENOMEM;
+ }
+
+ pdb->e_dma = dma_map_single(dev, key->e, key->e_sz, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, pdb->e_dma)) {
+ dev_err(dev, "Unable to map RSA public exponent memory\n");
+ dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+
+ if (edesc->src_nents > 1) {
+ pdb->sgf |= RSA_PDB_SGF_F;
+ pdb->f_dma = edesc->sec4_sg_dma;
+ sec4_sg_index += edesc->src_nents;
+ } else {
+ pdb->f_dma = sg_dma_address(req->src);
+ }
+
+ if (edesc->dst_nents > 1) {
+ pdb->sgf |= RSA_PDB_SGF_G;
+ pdb->g_dma = edesc->sec4_sg_dma +
+ sec4_sg_index * sizeof(struct sec4_sg_entry);
+ } else {
+ pdb->g_dma = sg_dma_address(req->dst);
+ }
+
+ pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
+ pdb->f_len = req->src_len;
+
+ return 0;
+}
+
+static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
+ struct rsa_edesc *edesc)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_key *key = &ctx->key;
+ struct device *dev = ctx->dev;
+ struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
+ int sec4_sg_index = 0;
+
+ pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, pdb->n_dma)) {
+ dev_err(dev, "Unable to map modulus memory\n");
+ return -ENOMEM;
+ }
+
+ pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, pdb->d_dma)) {
+ dev_err(dev, "Unable to map RSA private exponent memory\n");
+ dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+
+ if (edesc->src_nents > 1) {
+ pdb->sgf |= RSA_PRIV_PDB_SGF_G;
+ pdb->g_dma = edesc->sec4_sg_dma;
+ sec4_sg_index += edesc->src_nents;
+ } else {
+ pdb->g_dma = sg_dma_address(req->src);
+ }
+
+ if (edesc->dst_nents > 1) {
+ pdb->sgf |= RSA_PRIV_PDB_SGF_F;
+ pdb->f_dma = edesc->sec4_sg_dma +
+ sec4_sg_index * sizeof(struct sec4_sg_entry);
+ } else {
+ pdb->f_dma = sg_dma_address(req->dst);
+ }
+
+ pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
+
+ return 0;
+}
+
+static int caam_rsa_enc(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_key *key = &ctx->key;
+ struct device *jrdev = ctx->dev;
+ struct rsa_edesc *edesc;
+ int ret;
+
+ if (unlikely(!key->n || !key->e))
+ return -EINVAL;
+
+ if (req->dst_len < key->n_sz) {
+ req->dst_len = key->n_sz;
+ dev_err(jrdev, "Output buffer length less than parameter n\n");
+ return -EOVERFLOW;
+ }
+
+ /* Allocate extended descriptor */
+ edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ /* Set RSA Encrypt Protocol Data Block */
+ ret = set_rsa_pub_pdb(req, edesc);
+ if (ret)
+ goto init_fail;
+
+ /* Initialize Job Descriptor */
+ init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
+
+ ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_pub_done, req);
+ if (!ret)
+ return -EINPROGRESS;
+
+ rsa_pub_unmap(jrdev, edesc, req);
+
+init_fail:
+ rsa_io_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ return ret;
+}
+
+static int caam_rsa_dec(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_key *key = &ctx->key;
+ struct device *jrdev = ctx->dev;
+ struct rsa_edesc *edesc;
+ int ret;
+
+ if (unlikely(!key->n || !key->d))
+ return -EINVAL;
+
+ if (req->dst_len < key->n_sz) {
+ req->dst_len = key->n_sz;
+ dev_err(jrdev, "Output buffer length less than parameter n\n");
+ return -EOVERFLOW;
+ }
+
+ /* Allocate extended descriptor */
+ edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ /* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */
+ ret = set_rsa_priv_f1_pdb(req, edesc);
+ if (ret)
+ goto init_fail;
+
+ /* Initialize Job Descriptor */
+ init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
+
+ ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f1_done, req);
+ if (!ret)
+ return -EINPROGRESS;
+
+ rsa_priv_f1_unmap(jrdev, edesc, req);
+
+init_fail:
+ rsa_io_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ return ret;
+}
+
+static void caam_rsa_free_key(struct caam_rsa_key *key)
+{
+ kzfree(key->d);
+ kfree(key->e);
+ kfree(key->n);
+ key->d = NULL;
+ key->e = NULL;
+ key->n = NULL;
+ key->d_sz = 0;
+ key->e_sz = 0;
+ key->n_sz = 0;
+}
+
+/**
+ * caam_read_raw_data - Read a raw byte stream as a positive integer.
+ * The function skips buffer's leading zeros, copies the remained data
+ * to a buffer allocated in the GFP_DMA | GFP_KERNEL zone and returns
+ * the address of the new buffer.
+ *
+ * @buf : The data to read
+ * @nbytes: The amount of data to read
+ */
+static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
+{
+ u8 *val;
+
+ while (!*buf && *nbytes) {
+ buf++;
+ (*nbytes)--;
+ }
+
+ val = kzalloc(*nbytes, GFP_DMA | GFP_KERNEL);
+ if (!val)
+ return NULL;
+
+ memcpy(val, buf, *nbytes);
+
+ return val;
+}
+
+static int caam_rsa_check_key_length(unsigned int len)
+{
+ if (len > 4096)
+ return -EINVAL;
+ return 0;
+}
+
+static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen)
+{
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct rsa_key raw_key = {0};
+ struct caam_rsa_key *rsa_key = &ctx->key;
+ int ret;
+
+ /* Free the old RSA key if any */
+ caam_rsa_free_key(rsa_key);
+
+ ret = rsa_parse_pub_key(&raw_key, key, keylen);
+ if (ret)
+ return ret;
+
+ /* Copy key in DMA zone */
+ rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL);
+ if (!rsa_key->e)
+ goto err;
+
+ /*
+ * Skip leading zeros and copy the positive integer to a buffer
+ * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
+ * expects a positive integer for the RSA modulus and uses its length as
+ * decryption output length.
+ */
+ rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
+ if (!rsa_key->n)
+ goto err;
+
+ if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
+ caam_rsa_free_key(rsa_key);
+ return -EINVAL;
+ }
+
+ rsa_key->e_sz = raw_key.e_sz;
+ rsa_key->n_sz = raw_key.n_sz;
+
+ memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
+
+ return 0;
+err:
+ caam_rsa_free_key(rsa_key);
+ return -ENOMEM;
+}
+
+static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen)
+{
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct rsa_key raw_key = {0};
+ struct caam_rsa_key *rsa_key = &ctx->key;
+ int ret;
+
+ /* Free the old RSA key if any */
+ caam_rsa_free_key(rsa_key);
+
+ ret = rsa_parse_priv_key(&raw_key, key, keylen);
+ if (ret)
+ return ret;
+
+ /* Copy key in DMA zone */
+ rsa_key->d = kzalloc(raw_key.d_sz, GFP_DMA | GFP_KERNEL);
+ if (!rsa_key->d)
+ goto err;
+
+ rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL);
+ if (!rsa_key->e)
+ goto err;
+
+ /*
+ * Skip leading zeros and copy the positive integer to a buffer
+ * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
+ * expects a positive integer for the RSA modulus and uses its length as
+ * decryption output length.
+ */
+ rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
+ if (!rsa_key->n)
+ goto err;
+
+ if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
+ caam_rsa_free_key(rsa_key);
+ return -EINVAL;
+ }
+
+ rsa_key->d_sz = raw_key.d_sz;
+ rsa_key->e_sz = raw_key.e_sz;
+ rsa_key->n_sz = raw_key.n_sz;
+
+ memcpy(rsa_key->d, raw_key.d, raw_key.d_sz);
+ memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
+
+ return 0;
+
+err:
+ caam_rsa_free_key(rsa_key);
+ return -ENOMEM;
+}
+
+static int caam_rsa_max_size(struct crypto_akcipher *tfm)
+{
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_key *key = &ctx->key;
+
+ return (key->n) ? key->n_sz : -EINVAL;
+}
+
+/* Per session pkc's driver context creation function */
+static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
+{
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+ ctx->dev = caam_jr_alloc();
+
+ if (IS_ERR(ctx->dev)) {
+ dev_err(ctx->dev, "Job Ring Device allocation for transform failed\n");
+ return PTR_ERR(ctx->dev);
+ }
+
+ return 0;
+}
+
+/* Per session pkc's driver context cleanup function */
+static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
+{
+ struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct caam_rsa_key *key = &ctx->key;
+
+ caam_rsa_free_key(key);
+ caam_jr_free(ctx->dev);
+}
+
+static struct akcipher_alg caam_rsa = {
+ .encrypt = caam_rsa_enc,
+ .decrypt = caam_rsa_dec,
+ .sign = caam_rsa_dec,
+ .verify = caam_rsa_enc,
+ .set_pub_key = caam_rsa_set_pub_key,
+ .set_priv_key = caam_rsa_set_priv_key,
+ .max_size = caam_rsa_max_size,
+ .init = caam_rsa_init_tfm,
+ .exit = caam_rsa_exit_tfm,
+ .base = {
+ .cra_name = "rsa",
+ .cra_driver_name = "rsa-caam",
+ .cra_priority = 3000,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct caam_rsa_ctx),
+ },
+};
+
+/* Public Key Cryptography module initialization handler */
+static int __init caam_pkc_init(void)
+{
+ struct device_node *dev_node;
+ struct platform_device *pdev;
+ struct device *ctrldev;
+ struct caam_drv_private *priv;
+ u32 cha_inst, pk_inst;
+ int err;
+
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+ if (!dev_node) {
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+ if (!dev_node)
+ return -ENODEV;
+ }
+
+ pdev = of_find_device_by_node(dev_node);
+ if (!pdev) {
+ of_node_put(dev_node);
+ return -ENODEV;
+ }
+
+ ctrldev = &pdev->dev;
+ priv = dev_get_drvdata(ctrldev);
+ of_node_put(dev_node);
+
+ /*
+ * If priv is NULL, it's probably because the caam driver wasn't
+ * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+ */
+ if (!priv)
+ return -ENODEV;
+
+ /* Determine public key hardware accelerator presence. */
+ cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+ pk_inst = (cha_inst & CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
+
+ /* Do not register algorithms if PKHA is not present. */
+ if (!pk_inst)
+ return -ENODEV;
+
+ err = crypto_register_akcipher(&caam_rsa);
+ if (err)
+ dev_warn(ctrldev, "%s alg registration failed\n",
+ caam_rsa.base.cra_driver_name);
+ else
+ dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
+
+ return err;
+}
+
+static void __exit caam_pkc_exit(void)
+{
+ crypto_unregister_akcipher(&caam_rsa);
+}
+
+module_init(caam_pkc_init);
+module_exit(caam_pkc_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("FSL CAAM support for PKC functions of crypto API");
+MODULE_AUTHOR("Freescale Semiconductor");
diff --git a/drivers/crypto/caam/caampkc.h b/drivers/crypto/caam/caampkc.h
new file mode 100644
index 000000000..f595d159b
--- /dev/null
+++ b/drivers/crypto/caam/caampkc.h
@@ -0,0 +1,70 @@
+/*
+ * caam - Freescale FSL CAAM support for Public Key Cryptography descriptors
+ *
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ *
+ * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
+ * all the desired key parameters, input and output pointers.
+ */
+
+#ifndef _PKC_DESC_H_
+#define _PKC_DESC_H_
+#include "compat.h"
+#include "pdb.h"
+
+/**
+ * caam_rsa_key - CAAM RSA key structure. Keys are allocated in DMA zone.
+ * @n : RSA modulus raw byte stream
+ * @e : RSA public exponent raw byte stream
+ * @d : RSA private exponent raw byte stream
+ * @n_sz : length in bytes of RSA modulus n
+ * @e_sz : length in bytes of RSA public exponent
+ * @d_sz : length in bytes of RSA private exponent
+ */
+struct caam_rsa_key {
+ u8 *n;
+ u8 *e;
+ u8 *d;
+ size_t n_sz;
+ size_t e_sz;
+ size_t d_sz;
+};
+
+/**
+ * caam_rsa_ctx - per session context.
+ * @key : RSA key in DMA zone
+ * @dev : device structure
+ */
+struct caam_rsa_ctx {
+ struct caam_rsa_key key;
+ struct device *dev;
+};
+
+/**
+ * rsa_edesc - s/w-extended rsa descriptor
+ * @src_nents : number of segments in input scatterlist
+ * @dst_nents : number of segments in output scatterlist
+ * @sec4_sg_bytes : length of h/w link table
+ * @sec4_sg_dma : dma address of h/w link table
+ * @sec4_sg : pointer to h/w link table
+ * @pdb : specific RSA Protocol Data Block (PDB)
+ * @hw_desc : descriptor followed by link tables if any
+ */
+struct rsa_edesc {
+ int src_nents;
+ int dst_nents;
+ int sec4_sg_bytes;
+ dma_addr_t sec4_sg_dma;
+ struct sec4_sg_entry *sec4_sg;
+ union {
+ struct rsa_pub_pdb pub;
+ struct rsa_priv_f1_pdb priv_f1;
+ } pdb;
+ u32 hw_desc[];
+};
+
+/* Descriptor construction primitives. */
+void init_rsa_pub_desc(u32 *desc, struct rsa_pub_pdb *pdb);
+void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb);
+
+#endif
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index b6955ecdf..7149cd249 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -35,8 +35,11 @@
#include <crypto/md5.h>
#include <crypto/internal/aead.h>
#include <crypto/authenc.h>
+#include <crypto/akcipher.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/skcipher.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/rsa.h>
+#include <crypto/internal/akcipher.h>
#endif /* !defined(CAAM_COMPAT_H) */
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 5ad5f3009..0ec112ee5 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -15,6 +15,9 @@
#include "desc_constr.h"
#include "error.h"
+bool caam_little_end;
+EXPORT_SYMBOL(caam_little_end);
+
/*
* i.MX targets tend to have clock control subsystems that can
* enable/disable clocking to our device.
@@ -106,7 +109,7 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
if (ctrlpriv->virt_en == 1) {
- setbits32(&ctrl->deco_rsr, DECORSR_JR0);
+ clrsetbits_32(&ctrl->deco_rsr, 0, DECORSR_JR0);
while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) &&
--timeout)
@@ -115,7 +118,7 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
timeout = 100000;
}
- setbits32(&ctrl->deco_rq, DECORR_RQD0ENABLE);
+ clrsetbits_32(&ctrl->deco_rq, 0, DECORR_RQD0ENABLE);
while (!(rd_reg32(&ctrl->deco_rq) & DECORR_DEN0) &&
--timeout)
@@ -123,12 +126,12 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
if (!timeout) {
dev_err(ctrldev, "failed to acquire DECO 0\n");
- clrbits32(&ctrl->deco_rq, DECORR_RQD0ENABLE);
+ clrsetbits_32(&ctrl->deco_rq, DECORR_RQD0ENABLE, 0);
return -ENODEV;
}
for (i = 0; i < desc_len(desc); i++)
- wr_reg32(&deco->descbuf[i], *(desc + i));
+ wr_reg32(&deco->descbuf[i], caam32_to_cpu(*(desc + i)));
flags = DECO_JQCR_WHL;
/*
@@ -139,7 +142,7 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
flags |= DECO_JQCR_FOUR;
/* Instruct the DECO to execute it */
- setbits32(&deco->jr_ctl_hi, flags);
+ clrsetbits_32(&deco->jr_ctl_hi, 0, flags);
timeout = 10000000;
do {
@@ -158,10 +161,10 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
DECO_OP_STATUS_HI_ERR_MASK;
if (ctrlpriv->virt_en == 1)
- clrbits32(&ctrl->deco_rsr, DECORSR_JR0);
+ clrsetbits_32(&ctrl->deco_rsr, DECORSR_JR0, 0);
/* Mark the DECO as free */
- clrbits32(&ctrl->deco_rq, DECORR_RQD0ENABLE);
+ clrsetbits_32(&ctrl->deco_rq, DECORR_RQD0ENABLE, 0);
if (!timeout)
return -EAGAIN;
@@ -349,7 +352,7 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
r4tst = &ctrl->r4tst[0];
/* put RNG4 into program mode */
- setbits32(&r4tst->rtmctl, RTMCTL_PRGM);
+ clrsetbits_32(&r4tst->rtmctl, 0, RTMCTL_PRGM);
/*
* Performance-wise, it does not make sense to
@@ -363,7 +366,7 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
>> RTSDCTL_ENT_DLY_SHIFT;
if (ent_delay <= val) {
/* put RNG4 into run mode */
- clrbits32(&r4tst->rtmctl, RTMCTL_PRGM);
+ clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, 0);
return;
}
@@ -381,9 +384,9 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
* select raw sampling in both entropy shifter
* and statistical checker
*/
- setbits32(&val, RTMCTL_SAMP_MODE_RAW_ES_SC);
+ clrsetbits_32(&val, 0, RTMCTL_SAMP_MODE_RAW_ES_SC);
/* put RNG4 into run mode */
- clrbits32(&val, RTMCTL_PRGM);
+ clrsetbits_32(&val, RTMCTL_PRGM, 0);
/* write back the control register */
wr_reg32(&r4tst->rtmctl, val);
}
@@ -406,6 +409,23 @@ int caam_get_era(void)
}
EXPORT_SYMBOL(caam_get_era);
+#ifdef CONFIG_DEBUG_FS
+static int caam_debugfs_u64_get(void *data, u64 *val)
+{
+ *val = caam64_to_cpu(*(u64 *)data);
+ return 0;
+}
+
+static int caam_debugfs_u32_get(void *data, u64 *val)
+{
+ *val = caam32_to_cpu(*(u32 *)data);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
+#endif
+
/* Probe routine for CAAM top (controller) level */
static int caam_probe(struct platform_device *pdev)
{
@@ -504,6 +524,10 @@ static int caam_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto disable_caam_emi_slow;
}
+
+ caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) &
+ (CSTA_PLEND | CSTA_ALT_PLEND));
+
/* Finding the page size for using the CTPR_MS register */
comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
@@ -559,9 +583,9 @@ static int caam_probe(struct platform_device *pdev)
}
if (ctrlpriv->virt_en == 1)
- setbits32(&ctrl->jrstart, JRSTART_JR0_START |
- JRSTART_JR1_START | JRSTART_JR2_START |
- JRSTART_JR3_START);
+ clrsetbits_32(&ctrl->jrstart, 0, JRSTART_JR0_START |
+ JRSTART_JR1_START | JRSTART_JR2_START |
+ JRSTART_JR3_START);
if (sizeof(dma_addr_t) == sizeof(u64))
if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
@@ -693,7 +717,7 @@ static int caam_probe(struct platform_device *pdev)
ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK;
/* Enable RDB bit so that RNG works faster */
- setbits32(&ctrl->scfgr, SCFGR_RDBENABLE);
+ clrsetbits_32(&ctrl->scfgr, 0, SCFGR_RDBENABLE);
}
/* NOTE: RTIC detection ought to go here, around Si time */
@@ -719,48 +743,59 @@ static int caam_probe(struct platform_device *pdev)
ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
/* Controller-level - performance monitor counters */
+
ctrlpriv->ctl_rq_dequeued =
- debugfs_create_u64("rq_dequeued",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->req_dequeued);
+ debugfs_create_file("rq_dequeued",
+ S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->req_dequeued,
+ &caam_fops_u64_ro);
ctrlpriv->ctl_ob_enc_req =
- debugfs_create_u64("ob_rq_encrypted",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ob_enc_req);
+ debugfs_create_file("ob_rq_encrypted",
+ S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->ob_enc_req,
+ &caam_fops_u64_ro);
ctrlpriv->ctl_ib_dec_req =
- debugfs_create_u64("ib_rq_decrypted",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ib_dec_req);
+ debugfs_create_file("ib_rq_decrypted",
+ S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->ib_dec_req,
+ &caam_fops_u64_ro);
ctrlpriv->ctl_ob_enc_bytes =
- debugfs_create_u64("ob_bytes_encrypted",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ob_enc_bytes);
+ debugfs_create_file("ob_bytes_encrypted",
+ S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->ob_enc_bytes,
+ &caam_fops_u64_ro);
ctrlpriv->ctl_ob_prot_bytes =
- debugfs_create_u64("ob_bytes_protected",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ob_prot_bytes);
+ debugfs_create_file("ob_bytes_protected",
+ S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->ob_prot_bytes,
+ &caam_fops_u64_ro);
ctrlpriv->ctl_ib_dec_bytes =
- debugfs_create_u64("ib_bytes_decrypted",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ib_dec_bytes);
+ debugfs_create_file("ib_bytes_decrypted",
+ S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->ib_dec_bytes,
+ &caam_fops_u64_ro);
ctrlpriv->ctl_ib_valid_bytes =
- debugfs_create_u64("ib_bytes_validated",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ib_valid_bytes);
+ debugfs_create_file("ib_bytes_validated",
+ S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->ib_valid_bytes,
+ &caam_fops_u64_ro);
/* Controller level - global status values */
ctrlpriv->ctl_faultaddr =
- debugfs_create_u64("fault_addr",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->faultaddr);
+ debugfs_create_file("fault_addr",
+ S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->faultaddr,
+ &caam_fops_u32_ro);
ctrlpriv->ctl_faultdetail =
- debugfs_create_u32("fault_detail",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->faultdetail);
+ debugfs_create_file("fault_detail",
+ S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->faultdetail,
+ &caam_fops_u32_ro);
ctrlpriv->ctl_faultstatus =
- debugfs_create_u32("fault_status",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->status);
+ debugfs_create_file("fault_status",
+ S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->status,
+ &caam_fops_u32_ro);
/* Internal covering keys (useful in non-secure mode only) */
ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0];
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index 1e93c6af2..26427c11a 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -20,19 +20,18 @@
#define SEC4_SG_BPID_MASK 0x000000ff
#define SEC4_SG_BPID_SHIFT 16
#define SEC4_SG_LEN_MASK 0x3fffffff /* Excludes EXT and FINAL */
-#define SEC4_SG_OFFS_MASK 0x00001fff
+#define SEC4_SG_OFFSET_MASK 0x00001fff
struct sec4_sg_entry {
-#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
+#if !defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) && \
+ defined(CONFIG_CRYPTO_DEV_FSL_CAAM_IMX)
u32 rsvd1;
dma_addr_t ptr;
#else
u64 ptr;
#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_IMX */
u32 len;
- u8 rsvd2;
- u8 buf_pool_id;
- u16 offset;
+ u32 bpid_offset;
};
/* Max size of any CAAM descriptor in 32-bit words, inclusive of header */
@@ -454,6 +453,8 @@ struct sec4_sg_entry {
#define OP_PCLID_PUBLICKEYPAIR (0x14 << OP_PCLID_SHIFT)
#define OP_PCLID_DSASIGN (0x15 << OP_PCLID_SHIFT)
#define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT)
+#define OP_PCLID_RSAENC_PUBKEY (0x18 << OP_PCLID_SHIFT)
+#define OP_PCLID_RSADEC_PRVKEY (0x19 << OP_PCLID_SHIFT)
/* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
#define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT)
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index 98d07de24..d3869b95e 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -5,6 +5,7 @@
*/
#include "desc.h"
+#include "regs.h"
#define IMMEDIATE (1 << 23)
#define CAAM_CMD_SZ sizeof(u32)
@@ -30,9 +31,11 @@
LDST_SRCDST_WORD_DECOCTRL | \
(LDOFF_ENABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT))
+extern bool caam_little_end;
+
static inline int desc_len(u32 *desc)
{
- return *desc & HDR_DESCLEN_MASK;
+ return caam32_to_cpu(*desc) & HDR_DESCLEN_MASK;
}
static inline int desc_bytes(void *desc)
@@ -52,7 +55,7 @@ static inline void *sh_desc_pdb(u32 *desc)
static inline void init_desc(u32 *desc, u32 options)
{
- *desc = (options | HDR_ONE) + 1;
+ *desc = cpu_to_caam32((options | HDR_ONE) + 1);
}
static inline void init_sh_desc(u32 *desc, u32 options)
@@ -74,13 +77,21 @@ static inline void init_job_desc(u32 *desc, u32 options)
init_desc(desc, CMD_DESC_HDR | options);
}
+static inline void init_job_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
+{
+ u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
+
+ init_job_desc(desc, (((pdb_len + 1) << HDR_START_IDX_SHIFT)) | options);
+}
+
static inline void append_ptr(u32 *desc, dma_addr_t ptr)
{
dma_addr_t *offset = (dma_addr_t *)desc_end(desc);
- *offset = ptr;
+ *offset = cpu_to_caam_dma(ptr);
- (*desc) += CAAM_PTR_SZ / CAAM_CMD_SZ;
+ (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) +
+ CAAM_PTR_SZ / CAAM_CMD_SZ);
}
static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len,
@@ -99,16 +110,17 @@ static inline void append_data(u32 *desc, void *data, int len)
if (len) /* avoid sparse warning: memcpy with byte count of 0 */
memcpy(offset, data, len);
- (*desc) += (len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
+ (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) +
+ (len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ);
}
static inline void append_cmd(u32 *desc, u32 command)
{
u32 *cmd = desc_end(desc);
- *cmd = command;
+ *cmd = cpu_to_caam32(command);
- (*desc)++;
+ (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + 1);
}
#define append_u32 append_cmd
@@ -117,16 +129,22 @@ static inline void append_u64(u32 *desc, u64 data)
{
u32 *offset = desc_end(desc);
- *offset = upper_32_bits(data);
- *(++offset) = lower_32_bits(data);
+ /* Only 32-bit alignment is guaranteed in descriptor buffer */
+ if (caam_little_end) {
+ *offset = cpu_to_caam32(lower_32_bits(data));
+ *(++offset) = cpu_to_caam32(upper_32_bits(data));
+ } else {
+ *offset = cpu_to_caam32(upper_32_bits(data));
+ *(++offset) = cpu_to_caam32(lower_32_bits(data));
+ }
- (*desc) += 2;
+ (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + 2);
}
/* Write command without affecting header, and return pointer to next word */
static inline u32 *write_cmd(u32 *desc, u32 command)
{
- *desc = command;
+ *desc = cpu_to_caam32(command);
return desc + 1;
}
@@ -168,14 +186,17 @@ APPEND_CMD_RET(move, MOVE)
static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd)
{
- *jump_cmd = *jump_cmd | (desc_len(desc) - (jump_cmd - desc));
+ *jump_cmd = cpu_to_caam32(caam32_to_cpu(*jump_cmd) |
+ (desc_len(desc) - (jump_cmd - desc)));
}
static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd)
{
- *move_cmd &= ~MOVE_OFFSET_MASK;
- *move_cmd = *move_cmd | ((desc_len(desc) << (MOVE_OFFSET_SHIFT + 2)) &
- MOVE_OFFSET_MASK);
+ u32 val = caam32_to_cpu(*move_cmd);
+
+ val &= ~MOVE_OFFSET_MASK;
+ val |= (desc_len(desc) << (MOVE_OFFSET_SHIFT + 2)) & MOVE_OFFSET_MASK;
+ *move_cmd = cpu_to_caam32(val);
}
#define APPEND_CMD(cmd, op) \
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 5ef4be22e..a81f551ac 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -31,7 +31,7 @@ static int caam_reset_hw_jr(struct device *dev)
* mask interrupts since we are going to poll
* for reset completion status
*/
- setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
+ clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK);
/* initiate flush (required prior to reset) */
wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
@@ -57,7 +57,7 @@ static int caam_reset_hw_jr(struct device *dev)
}
/* unmask interrupts */
- clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
+ clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
return 0;
}
@@ -147,7 +147,7 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
}
/* mask valid interrupts */
- setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
+ clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK);
/* Have valid interrupt at this point, just ACK and trigger */
wr_reg32(&jrp->rregs->jrintstatus, irqstate);
@@ -182,7 +182,7 @@ static void caam_jr_dequeue(unsigned long devarg)
sw_idx = (tail + i) & (JOBR_DEPTH - 1);
if (jrp->outring[hw_idx].desc ==
- jrp->entinfo[sw_idx].desc_addr_dma)
+ caam_dma_to_cpu(jrp->entinfo[sw_idx].desc_addr_dma))
break; /* found */
}
/* we should never fail to find a matching descriptor */
@@ -200,7 +200,7 @@ static void caam_jr_dequeue(unsigned long devarg)
usercall = jrp->entinfo[sw_idx].callbk;
userarg = jrp->entinfo[sw_idx].cbkarg;
userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
- userstatus = jrp->outring[hw_idx].jrstatus;
+ userstatus = caam32_to_cpu(jrp->outring[hw_idx].jrstatus);
/*
* Make sure all information from the job has been obtained
@@ -236,7 +236,7 @@ static void caam_jr_dequeue(unsigned long devarg)
}
/* reenable / unmask IRQs */
- clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
+ clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
}
/**
@@ -330,7 +330,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
int head, tail, desc_size;
dma_addr_t desc_dma;
- desc_size = (*desc & HDR_JD_LENGTH_MASK) * sizeof(u32);
+ desc_size = (caam32_to_cpu(*desc) & HDR_JD_LENGTH_MASK) * sizeof(u32);
desc_dma = dma_map_single(dev, desc, desc_size, DMA_TO_DEVICE);
if (dma_mapping_error(dev, desc_dma)) {
dev_err(dev, "caam_jr_enqueue(): can't map jobdesc\n");
@@ -356,7 +356,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
head_entry->cbkarg = areq;
head_entry->desc_addr_dma = desc_dma;
- jrp->inpring[jrp->inp_ring_write_index] = desc_dma;
+ jrp->inpring[jrp->inp_ring_write_index] = cpu_to_caam_dma(desc_dma);
/*
* Guarantee that the descriptor's DMA address has been written to
@@ -444,9 +444,9 @@ static int caam_jr_init(struct device *dev)
spin_lock_init(&jrp->outlock);
/* Select interrupt coalescing parameters */
- setbits32(&jrp->rregs->rconfig_lo, JOBR_INTC |
- (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
- (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
+ clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JOBR_INTC |
+ (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
+ (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
return 0;
diff --git a/drivers/crypto/caam/pdb.h b/drivers/crypto/caam/pdb.h
index 3a87c0cf8..aaa00dd1c 100644
--- a/drivers/crypto/caam/pdb.h
+++ b/drivers/crypto/caam/pdb.h
@@ -1,18 +1,19 @@
/*
* CAAM Protocol Data Block (PDB) definition header file
*
- * Copyright 2008-2012 Freescale Semiconductor, Inc.
+ * Copyright 2008-2016 Freescale Semiconductor, Inc.
*
*/
#ifndef CAAM_PDB_H
#define CAAM_PDB_H
+#include "compat.h"
/*
* PDB- IPSec ESP Header Modification Options
*/
-#define PDBHMO_ESP_DECAP_SHIFT 12
-#define PDBHMO_ESP_ENCAP_SHIFT 4
+#define PDBHMO_ESP_DECAP_SHIFT 28
+#define PDBHMO_ESP_ENCAP_SHIFT 28
/*
* Encap and Decap - Decrement TTL (Hop Limit) - Based on the value of the
* Options Byte IP version (IPvsn) field:
@@ -32,12 +33,23 @@
*/
#define PDBHMO_ESP_DFBIT (0x04 << PDBHMO_ESP_ENCAP_SHIFT)
+#define PDBNH_ESP_ENCAP_SHIFT 16
+#define PDBNH_ESP_ENCAP_MASK (0xff << PDBNH_ESP_ENCAP_SHIFT)
+
+#define PDBHDRLEN_ESP_DECAP_SHIFT 16
+#define PDBHDRLEN_MASK (0x0fff << PDBHDRLEN_ESP_DECAP_SHIFT)
+
+#define PDB_NH_OFFSET_SHIFT 8
+#define PDB_NH_OFFSET_MASK (0xff << PDB_NH_OFFSET_SHIFT)
+
/*
* PDB - IPSec ESP Encap/Decap Options
*/
#define PDBOPTS_ESP_ARSNONE 0x00 /* no antireplay window */
#define PDBOPTS_ESP_ARS32 0x40 /* 32-entry antireplay window */
+#define PDBOPTS_ESP_ARS128 0x80 /* 128-entry antireplay window */
#define PDBOPTS_ESP_ARS64 0xc0 /* 64-entry antireplay window */
+#define PDBOPTS_ESP_ARS_MASK 0xc0 /* antireplay window mask */
#define PDBOPTS_ESP_IVSRC 0x20 /* IV comes from internal random gen */
#define PDBOPTS_ESP_ESN 0x10 /* extended sequence included */
#define PDBOPTS_ESP_OUTFMT 0x08 /* output only decapsulation (decap) */
@@ -54,35 +66,73 @@
/*
* General IPSec encap/decap PDB definitions
*/
+
+/**
+ * ipsec_encap_cbc - PDB part for IPsec CBC encapsulation
+ * @iv: 16-byte array initialization vector
+ */
struct ipsec_encap_cbc {
- u32 iv[4];
+ u8 iv[16];
};
+/**
+ * ipsec_encap_ctr - PDB part for IPsec CTR encapsulation
+ * @ctr_nonce: 4-byte array nonce
+ * @ctr_initial: initial count constant
+ * @iv: initialization vector
+ */
struct ipsec_encap_ctr {
- u32 ctr_nonce;
+ u8 ctr_nonce[4];
u32 ctr_initial;
- u32 iv[2];
+ u64 iv;
};
+/**
+ * ipsec_encap_ccm - PDB part for IPsec CCM encapsulation
+ * @salt: 3-byte array salt (lower 24 bits)
+ * @ccm_opt: CCM algorithm options - MSB-LSB description:
+ * b0_flags (8b) - CCM B0; use 0x5B for 8-byte ICV, 0x6B for 12-byte ICV,
+ * 0x7B for 16-byte ICV (cf. RFC4309, RFC3610)
+ * ctr_flags (8b) - counter flags; constant equal to 0x3
+ * ctr_initial (16b) - initial count constant
+ * @iv: initialization vector
+ */
struct ipsec_encap_ccm {
- u32 salt; /* lower 24 bits */
- u8 b0_flags;
- u8 ctr_flags;
- u16 ctr_initial;
- u32 iv[2];
+ u8 salt[4];
+ u32 ccm_opt;
+ u64 iv;
};
+/**
+ * ipsec_encap_gcm - PDB part for IPsec GCM encapsulation
+ * @salt: 3-byte array salt (lower 24 bits)
+ * @rsvd: reserved, do not use
+ * @iv: initialization vector
+ */
struct ipsec_encap_gcm {
- u32 salt; /* lower 24 bits */
+ u8 salt[4];
u32 rsvd1;
- u32 iv[2];
+ u64 iv;
};
+/**
+ * ipsec_encap_pdb - PDB for IPsec encapsulation
+ * @options: MSB-LSB description
+ * hmo (header manipulation options) - 4b
+ * reserved - 4b
+ * next header - 8b
+ * next header offset - 8b
+ * option flags (depend on selected algorithm) - 8b
+ * @seq_num_ext_hi: (optional) IPsec Extended Sequence Number (ESN)
+ * @seq_num: IPsec sequence number
+ * @spi: IPsec SPI (Security Parameters Index)
+ * @ip_hdr_len: optional IP Header length (in bytes)
+ * reserved - 16b
+ * Opt. IP Hdr Len - 16b
+ * @ip_hdr: optional IP Header content
+ */
struct ipsec_encap_pdb {
- u8 hmo_rsvd;
- u8 ip_nh;
- u8 ip_nh_offset;
- u8 options;
+ u32 options;
u32 seq_num_ext_hi;
u32 seq_num;
union {
@@ -92,36 +142,65 @@ struct ipsec_encap_pdb {
struct ipsec_encap_gcm gcm;
};
u32 spi;
- u16 rsvd1;
- u16 ip_hdr_len;
- u32 ip_hdr[0]; /* optional IP Header content */
+ u32 ip_hdr_len;
+ u32 ip_hdr[0];
};
+/**
+ * ipsec_decap_cbc - PDB part for IPsec CBC decapsulation
+ * @rsvd: reserved, do not use
+ */
struct ipsec_decap_cbc {
u32 rsvd[2];
};
+/**
+ * ipsec_decap_ctr - PDB part for IPsec CTR decapsulation
+ * @ctr_nonce: 4-byte array nonce
+ * @ctr_initial: initial count constant
+ */
struct ipsec_decap_ctr {
- u32 salt;
+ u8 ctr_nonce[4];
u32 ctr_initial;
};
+/**
+ * ipsec_decap_ccm - PDB part for IPsec CCM decapsulation
+ * @salt: 3-byte salt (lower 24 bits)
+ * @ccm_opt: CCM algorithm options - MSB-LSB description:
+ * b0_flags (8b) - CCM B0; use 0x5B for 8-byte ICV, 0x6B for 12-byte ICV,
+ * 0x7B for 16-byte ICV (cf. RFC4309, RFC3610)
+ * ctr_flags (8b) - counter flags; constant equal to 0x3
+ * ctr_initial (16b) - initial count constant
+ */
struct ipsec_decap_ccm {
- u32 salt;
- u8 iv_flags;
- u8 ctr_flags;
- u16 ctr_initial;
+ u8 salt[4];
+ u32 ccm_opt;
};
+/**
+ * ipsec_decap_gcm - PDB part for IPsec GCN decapsulation
+ * @salt: 4-byte salt
+ * @rsvd: reserved, do not use
+ */
struct ipsec_decap_gcm {
- u32 salt;
+ u8 salt[4];
u32 resvd;
};
+/**
+ * ipsec_decap_pdb - PDB for IPsec decapsulation
+ * @options: MSB-LSB description
+ * hmo (header manipulation options) - 4b
+ * IP header length - 12b
+ * next header offset - 8b
+ * option flags (depend on selected algorithm) - 8b
+ * @seq_num_ext_hi: (optional) IPsec Extended Sequence Number (ESN)
+ * @seq_num: IPsec sequence number
+ * @anti_replay: Anti-replay window; size depends on ARS (option flags)
+ */
struct ipsec_decap_pdb {
- u16 hmo_ip_hdr_len;
- u8 ip_nh_offset;
- u8 options;
+ u32 options;
union {
struct ipsec_decap_cbc cbc;
struct ipsec_decap_ctr ctr;
@@ -130,8 +209,7 @@ struct ipsec_decap_pdb {
};
u32 seq_num_ext_hi;
u32 seq_num;
- u32 anti_replay[2];
- u32 end_index[0];
+ __be32 anti_replay[4];
};
/*
@@ -399,4 +477,52 @@ struct dsa_verify_pdb {
u8 *ab; /* only used if ECC processing */
};
+/* RSA Protocol Data Block */
+#define RSA_PDB_SGF_SHIFT 28
+#define RSA_PDB_E_SHIFT 12
+#define RSA_PDB_E_MASK (0xFFF << RSA_PDB_E_SHIFT)
+#define RSA_PDB_D_SHIFT 12
+#define RSA_PDB_D_MASK (0xFFF << RSA_PDB_D_SHIFT)
+
+#define RSA_PDB_SGF_F (0x8 << RSA_PDB_SGF_SHIFT)
+#define RSA_PDB_SGF_G (0x4 << RSA_PDB_SGF_SHIFT)
+#define RSA_PRIV_PDB_SGF_F (0x4 << RSA_PDB_SGF_SHIFT)
+#define RSA_PRIV_PDB_SGF_G (0x8 << RSA_PDB_SGF_SHIFT)
+
+#define RSA_PRIV_KEY_FRM_1 0
+
+/**
+ * RSA Encrypt Protocol Data Block
+ * @sgf: scatter-gather field
+ * @f_dma: dma address of input data
+ * @g_dma: dma address of encrypted output data
+ * @n_dma: dma address of RSA modulus
+ * @e_dma: dma address of RSA public exponent
+ * @f_len: length in octets of the input data
+ */
+struct rsa_pub_pdb {
+ u32 sgf;
+ dma_addr_t f_dma;
+ dma_addr_t g_dma;
+ dma_addr_t n_dma;
+ dma_addr_t e_dma;
+ u32 f_len;
+} __packed;
+
+/**
+ * RSA Decrypt PDB - Private Key Form #1
+ * @sgf: scatter-gather field
+ * @g_dma: dma address of encrypted input data
+ * @f_dma: dma address of output data
+ * @n_dma: dma address of RSA modulus
+ * @d_dma: dma address of RSA private exponent
+ */
+struct rsa_priv_f1_pdb {
+ u32 sgf;
+ dma_addr_t g_dma;
+ dma_addr_t f_dma;
+ dma_addr_t n_dma;
+ dma_addr_t d_dma;
+} __packed;
+
#endif
diff --git a/drivers/crypto/caam/pkc_desc.c b/drivers/crypto/caam/pkc_desc.c
new file mode 100644
index 000000000..4e4183e61
--- /dev/null
+++ b/drivers/crypto/caam/pkc_desc.c
@@ -0,0 +1,36 @@
+/*
+ * caam - Freescale FSL CAAM support for Public Key Cryptography descriptors
+ *
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ *
+ * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
+ * all the desired key parameters, input and output pointers.
+ */
+#include "caampkc.h"
+#include "desc_constr.h"
+
+/* Descriptor for RSA Public operation */
+void init_rsa_pub_desc(u32 *desc, struct rsa_pub_pdb *pdb)
+{
+ init_job_desc_pdb(desc, 0, sizeof(*pdb));
+ append_cmd(desc, pdb->sgf);
+ append_ptr(desc, pdb->f_dma);
+ append_ptr(desc, pdb->g_dma);
+ append_ptr(desc, pdb->n_dma);
+ append_ptr(desc, pdb->e_dma);
+ append_cmd(desc, pdb->f_len);
+ append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSAENC_PUBKEY);
+}
+
+/* Descriptor for RSA Private operation - Private Key Form #1 */
+void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb)
+{
+ init_job_desc_pdb(desc, 0, sizeof(*pdb));
+ append_cmd(desc, pdb->sgf);
+ append_ptr(desc, pdb->g_dma);
+ append_ptr(desc, pdb->f_dma);
+ append_ptr(desc, pdb->n_dma);
+ append_ptr(desc, pdb->d_dma);
+ append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
+ RSA_PRIV_KEY_FRM_1);
+}
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 0ba9c4059..b3c5016f6 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -8,6 +8,7 @@
#define REGS_H
#include <linux/types.h>
+#include <linux/bitops.h>
#include <linux/io.h>
/*
@@ -65,46 +66,56 @@
*
*/
-#ifdef CONFIG_ARM
-/* These are common macros for Power, put here for ARM */
-#define setbits32(_addr, _v) writel((readl(_addr) | (_v)), (_addr))
-#define clrbits32(_addr, _v) writel((readl(_addr) & ~(_v)), (_addr))
+extern bool caam_little_end;
-#define out_arch(type, endian, a, v) __raw_write##type(cpu_to_##endian(v), a)
-#define in_arch(type, endian, a) endian##_to_cpu(__raw_read##type(a))
+#define caam_to_cpu(len) \
+static inline u##len caam##len ## _to_cpu(u##len val) \
+{ \
+ if (caam_little_end) \
+ return le##len ## _to_cpu(val); \
+ else \
+ return be##len ## _to_cpu(val); \
+}
-#define out_le32(a, v) out_arch(l, le32, a, v)
-#define in_le32(a) in_arch(l, le32, a)
+#define cpu_to_caam(len) \
+static inline u##len cpu_to_caam##len(u##len val) \
+{ \
+ if (caam_little_end) \
+ return cpu_to_le##len(val); \
+ else \
+ return cpu_to_be##len(val); \
+}
-#define out_be32(a, v) out_arch(l, be32, a, v)
-#define in_be32(a) in_arch(l, be32, a)
+caam_to_cpu(16)
+caam_to_cpu(32)
+caam_to_cpu(64)
+cpu_to_caam(16)
+cpu_to_caam(32)
+cpu_to_caam(64)
-#define clrsetbits(type, addr, clear, set) \
- out_##type((addr), (in_##type(addr) & ~(clear)) | (set))
+static inline void wr_reg32(void __iomem *reg, u32 data)
+{
+ if (caam_little_end)
+ iowrite32(data, reg);
+ else
+ iowrite32be(data, reg);
+}
-#define clrsetbits_be32(addr, clear, set) clrsetbits(be32, addr, clear, set)
-#define clrsetbits_le32(addr, clear, set) clrsetbits(le32, addr, clear, set)
-#endif
+static inline u32 rd_reg32(void __iomem *reg)
+{
+ if (caam_little_end)
+ return ioread32(reg);
-#ifdef __BIG_ENDIAN
-#define wr_reg32(reg, data) out_be32(reg, data)
-#define rd_reg32(reg) in_be32(reg)
-#define clrsetbits_32(addr, clear, set) clrsetbits_be32(addr, clear, set)
-#ifdef CONFIG_64BIT
-#define wr_reg64(reg, data) out_be64(reg, data)
-#define rd_reg64(reg) in_be64(reg)
-#endif
-#else
-#ifdef __LITTLE_ENDIAN
-#define wr_reg32(reg, data) __raw_writel(data, reg)
-#define rd_reg32(reg) __raw_readl(reg)
-#define clrsetbits_32(addr, clear, set) clrsetbits_le32(addr, clear, set)
-#ifdef CONFIG_64BIT
-#define wr_reg64(reg, data) __raw_writeq(data, reg)
-#define rd_reg64(reg) __raw_readq(reg)
-#endif
-#endif
-#endif
+ return ioread32be(reg);
+}
+
+static inline void clrsetbits_32(void __iomem *reg, u32 clear, u32 set)
+{
+ if (caam_little_end)
+ iowrite32((ioread32(reg) & ~clear) | set, reg);
+ else
+ iowrite32be((ioread32be(reg) & ~clear) | set, reg);
+}
/*
* The only users of these wr/rd_reg64 functions is the Job Ring (JR).
@@ -123,29 +134,67 @@
* base + 0x0000 : least-significant 32 bits
* base + 0x0004 : most-significant 32 bits
*/
+#ifdef CONFIG_64BIT
+static inline void wr_reg64(void __iomem *reg, u64 data)
+{
+ if (caam_little_end)
+ iowrite64(data, reg);
+ else
+ iowrite64be(data, reg);
+}
-#ifndef CONFIG_64BIT
-#if !defined(CONFIG_CRYPTO_DEV_FSL_CAAM_LE) || \
- defined(CONFIG_CRYPTO_DEV_FSL_CAAM_IMX)
-#define REG64_MS32(reg) ((u32 __iomem *)(reg))
-#define REG64_LS32(reg) ((u32 __iomem *)(reg) + 1)
-#else
-#define REG64_MS32(reg) ((u32 __iomem *)(reg) + 1)
-#define REG64_LS32(reg) ((u32 __iomem *)(reg))
-#endif
-
-static inline void wr_reg64(u64 __iomem *reg, u64 data)
+static inline u64 rd_reg64(void __iomem *reg)
{
- wr_reg32(REG64_MS32(reg), data >> 32);
- wr_reg32(REG64_LS32(reg), data);
+ if (caam_little_end)
+ return ioread64(reg);
+ else
+ return ioread64be(reg);
}
-static inline u64 rd_reg64(u64 __iomem *reg)
+#else /* CONFIG_64BIT */
+static inline void wr_reg64(void __iomem *reg, u64 data)
{
- return ((u64)rd_reg32(REG64_MS32(reg)) << 32 |
- (u64)rd_reg32(REG64_LS32(reg)));
+#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
+ if (caam_little_end) {
+ wr_reg32((u32 __iomem *)(reg) + 1, data >> 32);
+ wr_reg32((u32 __iomem *)(reg), data);
+ } else
+#endif
+ {
+ wr_reg32((u32 __iomem *)(reg), data >> 32);
+ wr_reg32((u32 __iomem *)(reg) + 1, data);
+ }
}
+
+static inline u64 rd_reg64(void __iomem *reg)
+{
+#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
+ if (caam_little_end)
+ return ((u64)rd_reg32((u32 __iomem *)(reg) + 1) << 32 |
+ (u64)rd_reg32((u32 __iomem *)(reg)));
+ else
#endif
+ return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 |
+ (u64)rd_reg32((u32 __iomem *)(reg) + 1));
+}
+#endif /* CONFIG_64BIT */
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+#ifdef CONFIG_SOC_IMX7D
+#define cpu_to_caam_dma(value) \
+ (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \
+ (u64)cpu_to_caam32(upper_32_bits(value)))
+#define caam_dma_to_cpu(value) \
+ (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) | \
+ (u64)caam32_to_cpu(upper_32_bits(value)))
+#else
+#define cpu_to_caam_dma(value) cpu_to_caam64(value)
+#define caam_dma_to_cpu(value) caam64_to_cpu(value)
+#endif /* CONFIG_SOC_IMX7D */
+#else
+#define cpu_to_caam_dma(value) cpu_to_caam32(value)
+#define caam_dma_to_cpu(value) caam32_to_cpu(value)
+#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
/*
* jr_outentry
@@ -249,6 +298,8 @@ struct caam_perfmon {
u32 faultliodn; /* FALR - Fault Address LIODN */
u32 faultdetail; /* FADR - Fault Addr Detail */
u32 rsvd2;
+#define CSTA_PLEND BIT(10)
+#define CSTA_ALT_PLEND BIT(18)
u32 status; /* CSTA - CAAM Status */
u64 rsvd3;
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
index 12ec6616e..19dc64fed 100644
--- a/drivers/crypto/caam/sg_sw_sec4.h
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -5,18 +5,19 @@
*
*/
+#include "regs.h"
+
struct sec4_sg_entry;
/*
* convert single dma address to h/w link table format
*/
static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
- dma_addr_t dma, u32 len, u32 offset)
+ dma_addr_t dma, u32 len, u16 offset)
{
- sec4_sg_ptr->ptr = dma;
- sec4_sg_ptr->len = len;
- sec4_sg_ptr->buf_pool_id = 0;
- sec4_sg_ptr->offset = offset;
+ sec4_sg_ptr->ptr = cpu_to_caam_dma(dma);
+ sec4_sg_ptr->len = cpu_to_caam32(len);
+ sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset & SEC4_SG_OFFSET_MASK);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ",
DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr,
@@ -30,7 +31,7 @@ static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
*/
static inline struct sec4_sg_entry *
sg_to_sec4_sg(struct scatterlist *sg, int sg_count,
- struct sec4_sg_entry *sec4_sg_ptr, u32 offset)
+ struct sec4_sg_entry *sec4_sg_ptr, u16 offset)
{
while (sg_count) {
dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg),
@@ -48,10 +49,10 @@ sg_to_sec4_sg(struct scatterlist *sg, int sg_count,
*/
static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count,
struct sec4_sg_entry *sec4_sg_ptr,
- u32 offset)
+ u16 offset)
{
sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset);
- sec4_sg_ptr->len |= SEC4_SG_LEN_FIN;
+ sec4_sg_ptr->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
}
static inline struct sec4_sg_entry *sg_to_sec4_sg_len(
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
index 0d0d4529e..58a4244b4 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -14,9 +14,8 @@
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/scatterlist.h>
-#include <linux/crypto.h>
-#include <crypto/algapi.h>
#include <crypto/aes.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include "ccp-crypto.h"
@@ -110,15 +109,12 @@ static int ccp_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
ctx->u.aes.key_len = key_len / 2;
sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
- return crypto_ablkcipher_setkey(ctx->u.aes.tfm_ablkcipher, key,
- key_len);
+ return crypto_skcipher_setkey(ctx->u.aes.tfm_skcipher, key, key_len);
}
static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
unsigned int encrypt)
{
- struct crypto_tfm *tfm =
- crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
unsigned int unit;
@@ -146,14 +142,19 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
if ((unit_size == CCP_XTS_AES_UNIT_SIZE__LAST) ||
(ctx->u.aes.key_len != AES_KEYSIZE_128)) {
+ SKCIPHER_REQUEST_ON_STACK(subreq, ctx->u.aes.tfm_skcipher);
+
/* Use the fallback to process the request for any
* unsupported unit sizes or key sizes
*/
- ablkcipher_request_set_tfm(req, ctx->u.aes.tfm_ablkcipher);
- ret = (encrypt) ? crypto_ablkcipher_encrypt(req) :
- crypto_ablkcipher_decrypt(req);
- ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
-
+ skcipher_request_set_tfm(subreq, ctx->u.aes.tfm_skcipher);
+ skcipher_request_set_callback(subreq, req->base.flags,
+ NULL, NULL);
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ req->nbytes, req->info);
+ ret = encrypt ? crypto_skcipher_encrypt(subreq) :
+ crypto_skcipher_decrypt(subreq);
+ skcipher_request_zero(subreq);
return ret;
}
@@ -192,23 +193,21 @@ static int ccp_aes_xts_decrypt(struct ablkcipher_request *req)
static int ccp_aes_xts_cra_init(struct crypto_tfm *tfm)
{
struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_ablkcipher *fallback_tfm;
+ struct crypto_skcipher *fallback_tfm;
ctx->complete = ccp_aes_xts_complete;
ctx->u.aes.key_len = 0;
- fallback_tfm = crypto_alloc_ablkcipher(crypto_tfm_alg_name(tfm), 0,
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK);
+ fallback_tfm = crypto_alloc_skcipher("xts(aes)", 0,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback_tfm)) {
- pr_warn("could not load fallback driver %s\n",
- crypto_tfm_alg_name(tfm));
+ pr_warn("could not load fallback driver xts(aes)\n");
return PTR_ERR(fallback_tfm);
}
- ctx->u.aes.tfm_ablkcipher = fallback_tfm;
+ ctx->u.aes.tfm_skcipher = fallback_tfm;
- tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx) +
- fallback_tfm->base.crt_ablkcipher.reqsize;
+ tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx);
return 0;
}
@@ -217,9 +216,7 @@ static void ccp_aes_xts_cra_exit(struct crypto_tfm *tfm)
{
struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
- if (ctx->u.aes.tfm_ablkcipher)
- crypto_free_ablkcipher(ctx->u.aes.tfm_ablkcipher);
- ctx->u.aes.tfm_ablkcipher = NULL;
+ crypto_free_skcipher(ctx->u.aes.tfm_skcipher);
}
static int ccp_register_aes_xts_alg(struct list_head *head,
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
index a326ec20b..8335b32e8 100644
--- a/drivers/crypto/ccp/ccp-crypto.h
+++ b/drivers/crypto/ccp/ccp-crypto.h
@@ -17,7 +17,6 @@
#include <linux/wait.h>
#include <linux/pci.h>
#include <linux/ccp.h>
-#include <linux/crypto.h>
#include <crypto/algapi.h>
#include <crypto/aes.h>
#include <crypto/ctr.h>
@@ -69,7 +68,7 @@ static inline struct ccp_crypto_ahash_alg *
/***** AES related defines *****/
struct ccp_aes_ctx {
/* Fallback cipher for XTS with unsupported unit sizes */
- struct crypto_ablkcipher *tfm_ablkcipher;
+ struct crypto_skcipher *tfm_skcipher;
/* Cipher used to generate CMAC K1/K2 keys */
struct crypto_cipher *tfm_cipher;
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index e8ef9fd24..d64af8625 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -31,22 +31,42 @@
#include "cesa.h"
+/* Limit of the crypto queue before reaching the backlog */
+#define CESA_CRYPTO_DEFAULT_MAX_QLEN 128
+
static int allhwsupport = !IS_ENABLED(CONFIG_CRYPTO_DEV_MV_CESA);
module_param_named(allhwsupport, allhwsupport, int, 0444);
MODULE_PARM_DESC(allhwsupport, "Enable support for all hardware (even it if overlaps with the mv_cesa driver)");
struct mv_cesa_dev *cesa_dev;
-static void mv_cesa_dequeue_req_unlocked(struct mv_cesa_engine *engine)
+struct crypto_async_request *
+mv_cesa_dequeue_req_locked(struct mv_cesa_engine *engine,
+ struct crypto_async_request **backlog)
{
- struct crypto_async_request *req, *backlog;
+ struct crypto_async_request *req;
+
+ *backlog = crypto_get_backlog(&engine->queue);
+ req = crypto_dequeue_request(&engine->queue);
+
+ if (!req)
+ return NULL;
+
+ return req;
+}
+
+static void mv_cesa_rearm_engine(struct mv_cesa_engine *engine)
+{
+ struct crypto_async_request *req = NULL, *backlog = NULL;
struct mv_cesa_ctx *ctx;
- spin_lock_bh(&cesa_dev->lock);
- backlog = crypto_get_backlog(&cesa_dev->queue);
- req = crypto_dequeue_request(&cesa_dev->queue);
- engine->req = req;
- spin_unlock_bh(&cesa_dev->lock);
+
+ spin_lock_bh(&engine->lock);
+ if (!engine->req) {
+ req = mv_cesa_dequeue_req_locked(engine, &backlog);
+ engine->req = req;
+ }
+ spin_unlock_bh(&engine->lock);
if (!req)
return;
@@ -55,8 +75,47 @@ static void mv_cesa_dequeue_req_unlocked(struct mv_cesa_engine *engine)
backlog->complete(backlog, -EINPROGRESS);
ctx = crypto_tfm_ctx(req->tfm);
- ctx->ops->prepare(req, engine);
ctx->ops->step(req);
+
+ return;
+}
+
+static int mv_cesa_std_process(struct mv_cesa_engine *engine, u32 status)
+{
+ struct crypto_async_request *req;
+ struct mv_cesa_ctx *ctx;
+ int res;
+
+ req = engine->req;
+ ctx = crypto_tfm_ctx(req->tfm);
+ res = ctx->ops->process(req, status);
+
+ if (res == 0) {
+ ctx->ops->complete(req);
+ mv_cesa_engine_enqueue_complete_request(engine, req);
+ } else if (res == -EINPROGRESS) {
+ ctx->ops->step(req);
+ }
+
+ return res;
+}
+
+static int mv_cesa_int_process(struct mv_cesa_engine *engine, u32 status)
+{
+ if (engine->chain.first && engine->chain.last)
+ return mv_cesa_tdma_process(engine, status);
+
+ return mv_cesa_std_process(engine, status);
+}
+
+static inline void
+mv_cesa_complete_req(struct mv_cesa_ctx *ctx, struct crypto_async_request *req,
+ int res)
+{
+ ctx->ops->cleanup(req);
+ local_bh_disable();
+ req->complete(req, res);
+ local_bh_enable();
}
static irqreturn_t mv_cesa_int(int irq, void *priv)
@@ -83,49 +142,55 @@ static irqreturn_t mv_cesa_int(int irq, void *priv)
writel(~status, engine->regs + CESA_SA_FPGA_INT_STATUS);
writel(~status, engine->regs + CESA_SA_INT_STATUS);
+ /* Process fetched requests */
+ res = mv_cesa_int_process(engine, status & mask);
ret = IRQ_HANDLED;
+
spin_lock_bh(&engine->lock);
req = engine->req;
+ if (res != -EINPROGRESS)
+ engine->req = NULL;
spin_unlock_bh(&engine->lock);
- if (req) {
- ctx = crypto_tfm_ctx(req->tfm);
- res = ctx->ops->process(req, status & mask);
- if (res != -EINPROGRESS) {
- spin_lock_bh(&engine->lock);
- engine->req = NULL;
- mv_cesa_dequeue_req_unlocked(engine);
- spin_unlock_bh(&engine->lock);
- ctx->ops->cleanup(req);
- local_bh_disable();
- req->complete(req, res);
- local_bh_enable();
- } else {
- ctx->ops->step(req);
- }
+
+ ctx = crypto_tfm_ctx(req->tfm);
+
+ if (res && res != -EINPROGRESS)
+ mv_cesa_complete_req(ctx, req, res);
+
+ /* Launch the next pending request */
+ mv_cesa_rearm_engine(engine);
+
+ /* Iterate over the complete queue */
+ while (true) {
+ req = mv_cesa_engine_dequeue_complete_request(engine);
+ if (!req)
+ break;
+
+ mv_cesa_complete_req(ctx, req, 0);
}
}
return ret;
}
-int mv_cesa_queue_req(struct crypto_async_request *req)
+int mv_cesa_queue_req(struct crypto_async_request *req,
+ struct mv_cesa_req *creq)
{
int ret;
- int i;
+ struct mv_cesa_engine *engine = creq->engine;
- spin_lock_bh(&cesa_dev->lock);
- ret = crypto_enqueue_request(&cesa_dev->queue, req);
- spin_unlock_bh(&cesa_dev->lock);
+ spin_lock_bh(&engine->lock);
+ ret = crypto_enqueue_request(&engine->queue, req);
+ if ((mv_cesa_req_get_type(creq) == CESA_DMA_REQ) &&
+ (ret == -EINPROGRESS ||
+ (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
+ mv_cesa_tdma_chain(engine, creq);
+ spin_unlock_bh(&engine->lock);
if (ret != -EINPROGRESS)
return ret;
- for (i = 0; i < cesa_dev->caps->nengines; i++) {
- spin_lock_bh(&cesa_dev->engines[i].lock);
- if (!cesa_dev->engines[i].req)
- mv_cesa_dequeue_req_unlocked(&cesa_dev->engines[i]);
- spin_unlock_bh(&cesa_dev->engines[i].lock);
- }
+ mv_cesa_rearm_engine(engine);
return -EINPROGRESS;
}
@@ -309,6 +374,10 @@ static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa)
if (!dma->padding_pool)
return -ENOMEM;
+ dma->iv_pool = dmam_pool_create("cesa_iv", dev, 16, 1, 0);
+ if (!dma->iv_pool)
+ return -ENOMEM;
+
cesa->dma = dma;
return 0;
@@ -416,7 +485,7 @@ static int mv_cesa_probe(struct platform_device *pdev)
return -ENOMEM;
spin_lock_init(&cesa->lock);
- crypto_init_queue(&cesa->queue, 50);
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
cesa->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(cesa->regs))
@@ -489,6 +558,10 @@ static int mv_cesa_probe(struct platform_device *pdev)
engine);
if (ret)
goto err_cleanup;
+
+ crypto_init_queue(&engine->queue, CESA_CRYPTO_DEFAULT_MAX_QLEN);
+ atomic_set(&engine->load, 0);
+ INIT_LIST_HEAD(&engine->complete_queue);
}
cesa_dev = cesa;
diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h
index 74071e45a..e423d33de 100644
--- a/drivers/crypto/marvell/cesa.h
+++ b/drivers/crypto/marvell/cesa.h
@@ -271,10 +271,13 @@ struct mv_cesa_op_ctx {
/* TDMA descriptor flags */
#define CESA_TDMA_DST_IN_SRAM BIT(31)
#define CESA_TDMA_SRC_IN_SRAM BIT(30)
-#define CESA_TDMA_TYPE_MSK GENMASK(29, 0)
+#define CESA_TDMA_END_OF_REQ BIT(29)
+#define CESA_TDMA_BREAK_CHAIN BIT(28)
+#define CESA_TDMA_TYPE_MSK GENMASK(27, 0)
#define CESA_TDMA_DUMMY 0
#define CESA_TDMA_DATA 1
#define CESA_TDMA_OP 2
+#define CESA_TDMA_IV 3
/**
* struct mv_cesa_tdma_desc - TDMA descriptor
@@ -390,6 +393,7 @@ struct mv_cesa_dev_dma {
struct dma_pool *op_pool;
struct dma_pool *cache_pool;
struct dma_pool *padding_pool;
+ struct dma_pool *iv_pool;
};
/**
@@ -398,7 +402,6 @@ struct mv_cesa_dev_dma {
* @regs: device registers
* @sram_size: usable SRAM size
* @lock: device lock
- * @queue: crypto request queue
* @engines: array of engines
* @dma: dma pools
*
@@ -410,7 +413,6 @@ struct mv_cesa_dev {
struct device *dev;
unsigned int sram_size;
spinlock_t lock;
- struct crypto_queue queue;
struct mv_cesa_engine *engines;
struct mv_cesa_dev_dma *dma;
};
@@ -429,6 +431,11 @@ struct mv_cesa_dev {
* @int_mask: interrupt mask cache
* @pool: memory pool pointing to the memory region reserved in
* SRAM
+ * @queue: fifo of the pending crypto requests
+ * @load: engine load counter, useful for load balancing
+ * @chain: list of the current tdma descriptors being processed
+ * by this engine.
+ * @complete_queue: fifo of the processed requests by the engine
*
* Structure storing CESA engine information.
*/
@@ -444,23 +451,27 @@ struct mv_cesa_engine {
size_t max_req_len;
u32 int_mask;
struct gen_pool *pool;
+ struct crypto_queue queue;
+ atomic_t load;
+ struct mv_cesa_tdma_chain chain;
+ struct list_head complete_queue;
};
/**
* struct mv_cesa_req_ops - CESA request operations
- * @prepare: prepare a request to be executed on the specified engine
* @process: process a request chunk result (should return 0 if the
* operation, -EINPROGRESS if it needs more steps or an error
* code)
* @step: launch the crypto operation on the next chunk
* @cleanup: cleanup the crypto request (release associated data)
+ * @complete: complete the request, i.e copy result or context from sram when
+ * needed.
*/
struct mv_cesa_req_ops {
- void (*prepare)(struct crypto_async_request *req,
- struct mv_cesa_engine *engine);
int (*process)(struct crypto_async_request *req, u32 status);
void (*step)(struct crypto_async_request *req);
void (*cleanup)(struct crypto_async_request *req);
+ void (*complete)(struct crypto_async_request *req);
};
/**
@@ -507,21 +518,11 @@ enum mv_cesa_req_type {
/**
* struct mv_cesa_req - CESA request
- * @type: request type
* @engine: engine associated with this request
+ * @chain: list of tdma descriptors associated with this request
*/
struct mv_cesa_req {
- enum mv_cesa_req_type type;
struct mv_cesa_engine *engine;
-};
-
-/**
- * struct mv_cesa_tdma_req - CESA TDMA request
- * @base: base information
- * @chain: TDMA chain
- */
-struct mv_cesa_tdma_req {
- struct mv_cesa_req base;
struct mv_cesa_tdma_chain chain;
};
@@ -538,13 +539,11 @@ struct mv_cesa_sg_std_iter {
/**
* struct mv_cesa_ablkcipher_std_req - cipher standard request
- * @base: base information
* @op: operation context
* @offset: current operation offset
* @size: size of the crypto operation
*/
struct mv_cesa_ablkcipher_std_req {
- struct mv_cesa_req base;
struct mv_cesa_op_ctx op;
unsigned int offset;
unsigned int size;
@@ -558,34 +557,27 @@ struct mv_cesa_ablkcipher_std_req {
* @dst_nents: number of entries in the dest sg list
*/
struct mv_cesa_ablkcipher_req {
- union {
- struct mv_cesa_req base;
- struct mv_cesa_tdma_req dma;
- struct mv_cesa_ablkcipher_std_req std;
- } req;
+ struct mv_cesa_req base;
+ struct mv_cesa_ablkcipher_std_req std;
int src_nents;
int dst_nents;
};
/**
* struct mv_cesa_ahash_std_req - standard hash request
- * @base: base information
* @offset: current operation offset
*/
struct mv_cesa_ahash_std_req {
- struct mv_cesa_req base;
unsigned int offset;
};
/**
* struct mv_cesa_ahash_dma_req - DMA hash request
- * @base: base information
* @padding: padding buffer
* @padding_dma: DMA address of the padding buffer
* @cache_dma: DMA address of the cache buffer
*/
struct mv_cesa_ahash_dma_req {
- struct mv_cesa_tdma_req base;
u8 *padding;
dma_addr_t padding_dma;
u8 *cache;
@@ -604,8 +596,8 @@ struct mv_cesa_ahash_dma_req {
* @state: hash state
*/
struct mv_cesa_ahash_req {
+ struct mv_cesa_req base;
union {
- struct mv_cesa_req base;
struct mv_cesa_ahash_dma_req dma;
struct mv_cesa_ahash_std_req std;
} req;
@@ -623,6 +615,35 @@ struct mv_cesa_ahash_req {
extern struct mv_cesa_dev *cesa_dev;
+
+static inline void
+mv_cesa_engine_enqueue_complete_request(struct mv_cesa_engine *engine,
+ struct crypto_async_request *req)
+{
+ list_add_tail(&req->list, &engine->complete_queue);
+}
+
+static inline struct crypto_async_request *
+mv_cesa_engine_dequeue_complete_request(struct mv_cesa_engine *engine)
+{
+ struct crypto_async_request *req;
+
+ req = list_first_entry_or_null(&engine->complete_queue,
+ struct crypto_async_request,
+ list);
+ if (req)
+ list_del(&req->list);
+
+ return req;
+}
+
+
+static inline enum mv_cesa_req_type
+mv_cesa_req_get_type(struct mv_cesa_req *req)
+{
+ return req->chain.first ? CESA_DMA_REQ : CESA_STD_REQ;
+}
+
static inline void mv_cesa_update_op_cfg(struct mv_cesa_op_ctx *op,
u32 cfg, u32 mask)
{
@@ -695,7 +716,32 @@ static inline bool mv_cesa_mac_op_is_first_frag(const struct mv_cesa_op_ctx *op)
CESA_SA_DESC_CFG_FIRST_FRAG;
}
-int mv_cesa_queue_req(struct crypto_async_request *req);
+int mv_cesa_queue_req(struct crypto_async_request *req,
+ struct mv_cesa_req *creq);
+
+struct crypto_async_request *
+mv_cesa_dequeue_req_locked(struct mv_cesa_engine *engine,
+ struct crypto_async_request **backlog);
+
+static inline struct mv_cesa_engine *mv_cesa_select_engine(int weight)
+{
+ int i;
+ u32 min_load = U32_MAX;
+ struct mv_cesa_engine *selected = NULL;
+
+ for (i = 0; i < cesa_dev->caps->nengines; i++) {
+ struct mv_cesa_engine *engine = cesa_dev->engines + i;
+ u32 load = atomic_read(&engine->load);
+ if (load < min_load) {
+ min_load = load;
+ selected = engine;
+ }
+ }
+
+ atomic_add(weight, &selected->load);
+
+ return selected;
+}
/*
* Helper function that indicates whether a crypto request needs to be
@@ -765,9 +811,9 @@ static inline bool mv_cesa_req_dma_iter_next_op(struct mv_cesa_dma_iter *iter)
return iter->op_len;
}
-void mv_cesa_dma_step(struct mv_cesa_tdma_req *dreq);
+void mv_cesa_dma_step(struct mv_cesa_req *dreq);
-static inline int mv_cesa_dma_process(struct mv_cesa_tdma_req *dreq,
+static inline int mv_cesa_dma_process(struct mv_cesa_req *dreq,
u32 status)
{
if (!(status & CESA_SA_INT_ACC0_IDMA_DONE))
@@ -779,10 +825,13 @@ static inline int mv_cesa_dma_process(struct mv_cesa_tdma_req *dreq,
return 0;
}
-void mv_cesa_dma_prepare(struct mv_cesa_tdma_req *dreq,
+void mv_cesa_dma_prepare(struct mv_cesa_req *dreq,
struct mv_cesa_engine *engine);
+void mv_cesa_dma_cleanup(struct mv_cesa_req *dreq);
+void mv_cesa_tdma_chain(struct mv_cesa_engine *engine,
+ struct mv_cesa_req *dreq);
+int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status);
-void mv_cesa_dma_cleanup(struct mv_cesa_tdma_req *dreq);
static inline void
mv_cesa_tdma_desc_iter_init(struct mv_cesa_tdma_chain *chain)
@@ -790,6 +839,9 @@ mv_cesa_tdma_desc_iter_init(struct mv_cesa_tdma_chain *chain)
memset(chain, 0, sizeof(*chain));
}
+int mv_cesa_dma_add_iv_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
+ u32 size, u32 flags, gfp_t gfp_flags);
+
struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain,
const struct mv_cesa_op_ctx *op_templ,
bool skip_ctx,
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
index dcf1fceb9..d19dc9614 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cipher.c
@@ -70,25 +70,28 @@ mv_cesa_ablkcipher_dma_cleanup(struct ablkcipher_request *req)
dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
DMA_BIDIRECTIONAL);
}
- mv_cesa_dma_cleanup(&creq->req.dma);
+ mv_cesa_dma_cleanup(&creq->base);
}
static inline void mv_cesa_ablkcipher_cleanup(struct ablkcipher_request *req)
{
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
- if (creq->req.base.type == CESA_DMA_REQ)
+ if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
mv_cesa_ablkcipher_dma_cleanup(req);
}
static void mv_cesa_ablkcipher_std_step(struct ablkcipher_request *req)
{
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
- struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std;
- struct mv_cesa_engine *engine = sreq->base.engine;
+ struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
+ struct mv_cesa_engine *engine = creq->base.engine;
size_t len = min_t(size_t, req->nbytes - sreq->offset,
CESA_SA_SRAM_PAYLOAD_SIZE);
+ mv_cesa_adjust_op(engine, &sreq->op);
+ memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
+
len = sg_pcopy_to_buffer(req->src, creq->src_nents,
engine->sram + CESA_SA_DATA_SRAM_OFFSET,
len, sreq->offset);
@@ -106,6 +109,8 @@ static void mv_cesa_ablkcipher_std_step(struct ablkcipher_request *req)
mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
+ BUG_ON(readl(engine->regs + CESA_SA_CMD) &
+ CESA_SA_CMD_EN_CESA_SA_ACCL0);
writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
}
@@ -113,8 +118,8 @@ static int mv_cesa_ablkcipher_std_process(struct ablkcipher_request *req,
u32 status)
{
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
- struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std;
- struct mv_cesa_engine *engine = sreq->base.engine;
+ struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
+ struct mv_cesa_engine *engine = creq->base.engine;
size_t len;
len = sg_pcopy_from_buffer(req->dst, creq->dst_nents,
@@ -133,23 +138,12 @@ static int mv_cesa_ablkcipher_process(struct crypto_async_request *req,
{
struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
- struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std;
- struct mv_cesa_engine *engine = sreq->base.engine;
- int ret;
-
- if (creq->req.base.type == CESA_DMA_REQ)
- ret = mv_cesa_dma_process(&creq->req.dma, status);
- else
- ret = mv_cesa_ablkcipher_std_process(ablkreq, status);
-
- if (ret)
- return ret;
+ struct mv_cesa_req *basereq = &creq->base;
- memcpy_fromio(ablkreq->info,
- engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
- crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(ablkreq)));
+ if (mv_cesa_req_get_type(basereq) == CESA_STD_REQ)
+ return mv_cesa_ablkcipher_std_process(ablkreq, status);
- return 0;
+ return mv_cesa_dma_process(basereq, status);
}
static void mv_cesa_ablkcipher_step(struct crypto_async_request *req)
@@ -157,8 +151,8 @@ static void mv_cesa_ablkcipher_step(struct crypto_async_request *req)
struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
- if (creq->req.base.type == CESA_DMA_REQ)
- mv_cesa_dma_step(&creq->req.dma);
+ if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
+ mv_cesa_dma_step(&creq->base);
else
mv_cesa_ablkcipher_std_step(ablkreq);
}
@@ -167,22 +161,19 @@ static inline void
mv_cesa_ablkcipher_dma_prepare(struct ablkcipher_request *req)
{
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
- struct mv_cesa_tdma_req *dreq = &creq->req.dma;
+ struct mv_cesa_req *basereq = &creq->base;
- mv_cesa_dma_prepare(dreq, dreq->base.engine);
+ mv_cesa_dma_prepare(basereq, basereq->engine);
}
static inline void
mv_cesa_ablkcipher_std_prepare(struct ablkcipher_request *req)
{
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
- struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std;
- struct mv_cesa_engine *engine = sreq->base.engine;
+ struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
sreq->size = 0;
sreq->offset = 0;
- mv_cesa_adjust_op(engine, &sreq->op);
- memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
}
static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req,
@@ -190,9 +181,9 @@ static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req,
{
struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
- creq->req.base.engine = engine;
+ creq->base.engine = engine;
- if (creq->req.base.type == CESA_DMA_REQ)
+ if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
mv_cesa_ablkcipher_dma_prepare(ablkreq);
else
mv_cesa_ablkcipher_std_prepare(ablkreq);
@@ -206,11 +197,34 @@ mv_cesa_ablkcipher_req_cleanup(struct crypto_async_request *req)
mv_cesa_ablkcipher_cleanup(ablkreq);
}
+static void
+mv_cesa_ablkcipher_complete(struct crypto_async_request *req)
+{
+ struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
+ struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
+ struct mv_cesa_engine *engine = creq->base.engine;
+ unsigned int ivsize;
+
+ atomic_sub(ablkreq->nbytes, &engine->load);
+ ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(ablkreq));
+
+ if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) {
+ struct mv_cesa_req *basereq;
+
+ basereq = &creq->base;
+ memcpy(ablkreq->info, basereq->chain.last->data, ivsize);
+ } else {
+ memcpy_fromio(ablkreq->info,
+ engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
+ ivsize);
+ }
+}
+
static const struct mv_cesa_req_ops mv_cesa_ablkcipher_req_ops = {
.step = mv_cesa_ablkcipher_step,
.process = mv_cesa_ablkcipher_process,
- .prepare = mv_cesa_ablkcipher_prepare,
.cleanup = mv_cesa_ablkcipher_req_cleanup,
+ .complete = mv_cesa_ablkcipher_complete,
};
static int mv_cesa_ablkcipher_cra_init(struct crypto_tfm *tfm)
@@ -295,15 +309,14 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- struct mv_cesa_tdma_req *dreq = &creq->req.dma;
+ struct mv_cesa_req *basereq = &creq->base;
struct mv_cesa_ablkcipher_dma_iter iter;
- struct mv_cesa_tdma_chain chain;
bool skip_ctx = false;
int ret;
+ unsigned int ivsize;
- dreq->base.type = CESA_DMA_REQ;
- dreq->chain.first = NULL;
- dreq->chain.last = NULL;
+ basereq->chain.first = NULL;
+ basereq->chain.last = NULL;
if (req->src != req->dst) {
ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
@@ -324,13 +337,13 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
return -ENOMEM;
}
- mv_cesa_tdma_desc_iter_init(&chain);
+ mv_cesa_tdma_desc_iter_init(&basereq->chain);
mv_cesa_ablkcipher_req_iter_init(&iter, req);
do {
struct mv_cesa_op_ctx *op;
- op = mv_cesa_dma_add_op(&chain, op_templ, skip_ctx, flags);
+ op = mv_cesa_dma_add_op(&basereq->chain, op_templ, skip_ctx, flags);
if (IS_ERR(op)) {
ret = PTR_ERR(op);
goto err_free_tdma;
@@ -340,30 +353,38 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
mv_cesa_set_crypt_op_len(op, iter.base.op_len);
/* Add input transfers */
- ret = mv_cesa_dma_add_op_transfers(&chain, &iter.base,
+ ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
&iter.src, flags);
if (ret)
goto err_free_tdma;
/* Add dummy desc to launch the crypto operation */
- ret = mv_cesa_dma_add_dummy_launch(&chain, flags);
+ ret = mv_cesa_dma_add_dummy_launch(&basereq->chain, flags);
if (ret)
goto err_free_tdma;
/* Add output transfers */
- ret = mv_cesa_dma_add_op_transfers(&chain, &iter.base,
+ ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
&iter.dst, flags);
if (ret)
goto err_free_tdma;
} while (mv_cesa_ablkcipher_req_iter_next_op(&iter));
- dreq->chain = chain;
+ /* Add output data for IV */
+ ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
+ ret = mv_cesa_dma_add_iv_op(&basereq->chain, CESA_SA_CRYPT_IV_SRAM_OFFSET,
+ ivsize, CESA_TDMA_SRC_IN_SRAM, flags);
+
+ if (ret)
+ goto err_free_tdma;
+
+ basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
return 0;
err_free_tdma:
- mv_cesa_dma_cleanup(dreq);
+ mv_cesa_dma_cleanup(basereq);
if (req->dst != req->src)
dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
DMA_FROM_DEVICE);
@@ -380,11 +401,13 @@ mv_cesa_ablkcipher_std_req_init(struct ablkcipher_request *req,
const struct mv_cesa_op_ctx *op_templ)
{
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
- struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std;
+ struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
+ struct mv_cesa_req *basereq = &creq->base;
- sreq->base.type = CESA_STD_REQ;
sreq->op = *op_templ;
sreq->skip_ctx = false;
+ basereq->chain.first = NULL;
+ basereq->chain.last = NULL;
return 0;
}
@@ -414,7 +437,6 @@ static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req,
mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY,
CESA_SA_DESC_CFG_OP_MSK);
- /* TODO: add a threshold for DMA usage */
if (cesa_dev->caps->has_tdma)
ret = mv_cesa_ablkcipher_dma_req_init(req, tmpl);
else
@@ -423,28 +445,41 @@ static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req,
return ret;
}
-static int mv_cesa_des_op(struct ablkcipher_request *req,
- struct mv_cesa_op_ctx *tmpl)
+static int mv_cesa_ablkcipher_queue_req(struct ablkcipher_request *req,
+ struct mv_cesa_op_ctx *tmpl)
{
- struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
int ret;
-
- mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_DES,
- CESA_SA_DESC_CFG_CRYPTM_MSK);
-
- memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES_KEY_SIZE);
+ struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
+ struct mv_cesa_engine *engine;
ret = mv_cesa_ablkcipher_req_init(req, tmpl);
if (ret)
return ret;
- ret = mv_cesa_queue_req(&req->base);
+ engine = mv_cesa_select_engine(req->nbytes);
+ mv_cesa_ablkcipher_prepare(&req->base, engine);
+
+ ret = mv_cesa_queue_req(&req->base, &creq->base);
+
if (mv_cesa_req_needs_cleanup(&req->base, ret))
mv_cesa_ablkcipher_cleanup(req);
return ret;
}
+static int mv_cesa_des_op(struct ablkcipher_request *req,
+ struct mv_cesa_op_ctx *tmpl)
+{
+ struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_DES,
+ CESA_SA_DESC_CFG_CRYPTM_MSK);
+
+ memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES_KEY_SIZE);
+
+ return mv_cesa_ablkcipher_queue_req(req, tmpl);
+}
+
static int mv_cesa_ecb_des_encrypt(struct ablkcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
@@ -547,22 +582,13 @@ static int mv_cesa_des3_op(struct ablkcipher_request *req,
struct mv_cesa_op_ctx *tmpl)
{
struct mv_cesa_des3_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- int ret;
mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_3DES,
CESA_SA_DESC_CFG_CRYPTM_MSK);
memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES3_EDE_KEY_SIZE);
- ret = mv_cesa_ablkcipher_req_init(req, tmpl);
- if (ret)
- return ret;
-
- ret = mv_cesa_queue_req(&req->base);
- if (mv_cesa_req_needs_cleanup(&req->base, ret))
- mv_cesa_ablkcipher_cleanup(req);
-
- return ret;
+ return mv_cesa_ablkcipher_queue_req(req, tmpl);
}
static int mv_cesa_ecb_des3_ede_encrypt(struct ablkcipher_request *req)
@@ -673,7 +699,7 @@ static int mv_cesa_aes_op(struct ablkcipher_request *req,
struct mv_cesa_op_ctx *tmpl)
{
struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- int ret, i;
+ int i;
u32 *key;
u32 cfg;
@@ -696,15 +722,7 @@ static int mv_cesa_aes_op(struct ablkcipher_request *req,
CESA_SA_DESC_CFG_CRYPTM_MSK |
CESA_SA_DESC_CFG_AES_LEN_MSK);
- ret = mv_cesa_ablkcipher_req_init(req, tmpl);
- if (ret)
- return ret;
-
- ret = mv_cesa_queue_req(&req->base);
- if (mv_cesa_req_needs_cleanup(&req->base, ret))
- mv_cesa_ablkcipher_cleanup(req);
-
- return ret;
+ return mv_cesa_ablkcipher_queue_req(req, tmpl);
}
static int mv_cesa_ecb_aes_encrypt(struct ablkcipher_request *req)
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index 7a5058da9..82e0f4e6e 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -103,14 +103,14 @@ static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req)
dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
mv_cesa_ahash_dma_free_cache(&creq->req.dma);
- mv_cesa_dma_cleanup(&creq->req.dma.base);
+ mv_cesa_dma_cleanup(&creq->base);
}
static inline void mv_cesa_ahash_cleanup(struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
- if (creq->req.base.type == CESA_DMA_REQ)
+ if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
mv_cesa_ahash_dma_cleanup(req);
}
@@ -118,7 +118,7 @@ static void mv_cesa_ahash_last_cleanup(struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
- if (creq->req.base.type == CESA_DMA_REQ)
+ if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
mv_cesa_ahash_dma_last_cleanup(req);
}
@@ -157,11 +157,23 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
- struct mv_cesa_engine *engine = sreq->base.engine;
+ struct mv_cesa_engine *engine = creq->base.engine;
struct mv_cesa_op_ctx *op;
unsigned int new_cache_ptr = 0;
u32 frag_mode;
size_t len;
+ unsigned int digsize;
+ int i;
+
+ mv_cesa_adjust_op(engine, &creq->op_tmpl);
+ memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
+
+ digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
+ for (i = 0; i < digsize / 4; i++)
+ writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i));
+
+ mv_cesa_adjust_op(engine, &creq->op_tmpl);
+ memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
if (creq->cache_ptr)
memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,
@@ -237,6 +249,8 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
+ BUG_ON(readl(engine->regs + CESA_SA_CMD) &
+ CESA_SA_CMD_EN_CESA_SA_ACCL0);
writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
}
@@ -254,20 +268,17 @@ static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status)
static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
- struct mv_cesa_tdma_req *dreq = &creq->req.dma.base;
+ struct mv_cesa_req *basereq = &creq->base;
- mv_cesa_dma_prepare(dreq, dreq->base.engine);
+ mv_cesa_dma_prepare(basereq, basereq->engine);
}
static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
- struct mv_cesa_engine *engine = sreq->base.engine;
sreq->offset = 0;
- mv_cesa_adjust_op(engine, &creq->op_tmpl);
- memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
}
static void mv_cesa_ahash_step(struct crypto_async_request *req)
@@ -275,8 +286,8 @@ static void mv_cesa_ahash_step(struct crypto_async_request *req)
struct ahash_request *ahashreq = ahash_request_cast(req);
struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
- if (creq->req.base.type == CESA_DMA_REQ)
- mv_cesa_dma_step(&creq->req.dma.base);
+ if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
+ mv_cesa_dma_step(&creq->base);
else
mv_cesa_ahash_std_step(ahashreq);
}
@@ -285,28 +296,25 @@ static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status)
{
struct ahash_request *ahashreq = ahash_request_cast(req);
struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
- struct mv_cesa_engine *engine = creq->req.base.engine;
- unsigned int digsize;
- int ret, i;
- if (creq->req.base.type == CESA_DMA_REQ)
- ret = mv_cesa_dma_process(&creq->req.dma.base, status);
- else
- ret = mv_cesa_ahash_std_process(ahashreq, status);
+ if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
+ return mv_cesa_dma_process(&creq->base, status);
- if (ret == -EINPROGRESS)
- return ret;
+ return mv_cesa_ahash_std_process(ahashreq, status);
+}
+
+static void mv_cesa_ahash_complete(struct crypto_async_request *req)
+{
+ struct ahash_request *ahashreq = ahash_request_cast(req);
+ struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
+ struct mv_cesa_engine *engine = creq->base.engine;
+ unsigned int digsize;
+ int i;
digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
for (i = 0; i < digsize / 4; i++)
creq->state[i] = readl_relaxed(engine->regs + CESA_IVDIG(i));
- if (creq->cache_ptr)
- sg_pcopy_to_buffer(ahashreq->src, creq->src_nents,
- creq->cache,
- creq->cache_ptr,
- ahashreq->nbytes - creq->cache_ptr);
-
if (creq->last_req) {
/*
* Hardware's MD5 digest is in little endian format, but
@@ -325,7 +333,7 @@ static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status)
}
}
- return ret;
+ atomic_sub(ahashreq->nbytes, &engine->load);
}
static void mv_cesa_ahash_prepare(struct crypto_async_request *req,
@@ -333,19 +341,13 @@ static void mv_cesa_ahash_prepare(struct crypto_async_request *req,
{
struct ahash_request *ahashreq = ahash_request_cast(req);
struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
- unsigned int digsize;
- int i;
- creq->req.base.engine = engine;
+ creq->base.engine = engine;
- if (creq->req.base.type == CESA_DMA_REQ)
+ if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
mv_cesa_ahash_dma_prepare(ahashreq);
else
mv_cesa_ahash_std_prepare(ahashreq);
-
- digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
- for (i = 0; i < digsize / 4; i++)
- writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i));
}
static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req)
@@ -357,13 +359,19 @@ static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req)
mv_cesa_ahash_last_cleanup(ahashreq);
mv_cesa_ahash_cleanup(ahashreq);
+
+ if (creq->cache_ptr)
+ sg_pcopy_to_buffer(ahashreq->src, creq->src_nents,
+ creq->cache,
+ creq->cache_ptr,
+ ahashreq->nbytes - creq->cache_ptr);
}
static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = {
.step = mv_cesa_ahash_step,
.process = mv_cesa_ahash_process,
- .prepare = mv_cesa_ahash_prepare,
.cleanup = mv_cesa_ahash_req_cleanup,
+ .complete = mv_cesa_ahash_complete,
};
static int mv_cesa_ahash_init(struct ahash_request *req,
@@ -553,15 +561,14 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
- struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
- struct mv_cesa_tdma_req *dreq = &ahashdreq->base;
+ struct mv_cesa_req *basereq = &creq->base;
struct mv_cesa_ahash_dma_iter iter;
struct mv_cesa_op_ctx *op = NULL;
unsigned int frag_len;
int ret;
- dreq->chain.first = NULL;
- dreq->chain.last = NULL;
+ basereq->chain.first = NULL;
+ basereq->chain.last = NULL;
if (creq->src_nents) {
ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
@@ -572,14 +579,14 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
}
}
- mv_cesa_tdma_desc_iter_init(&dreq->chain);
+ mv_cesa_tdma_desc_iter_init(&basereq->chain);
mv_cesa_ahash_req_iter_init(&iter, req);
/*
* Add the cache (left-over data from a previous block) first.
* This will never overflow the SRAM size.
*/
- ret = mv_cesa_ahash_dma_add_cache(&dreq->chain, &iter, creq, flags);
+ ret = mv_cesa_ahash_dma_add_cache(&basereq->chain, &iter, creq, flags);
if (ret)
goto err_free_tdma;
@@ -590,7 +597,7 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
* data. We intentionally do not add the final op block.
*/
while (true) {
- ret = mv_cesa_dma_add_op_transfers(&dreq->chain,
+ ret = mv_cesa_dma_add_op_transfers(&basereq->chain,
&iter.base,
&iter.src, flags);
if (ret)
@@ -601,7 +608,7 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
if (!mv_cesa_ahash_req_iter_next_op(&iter))
break;
- op = mv_cesa_dma_add_frag(&dreq->chain, &creq->op_tmpl,
+ op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl,
frag_len, flags);
if (IS_ERR(op)) {
ret = PTR_ERR(op);
@@ -619,10 +626,10 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
* operation, which depends whether this is the final request.
*/
if (creq->last_req)
- op = mv_cesa_ahash_dma_last_req(&dreq->chain, &iter, creq,
+ op = mv_cesa_ahash_dma_last_req(&basereq->chain, &iter, creq,
frag_len, flags);
else if (frag_len)
- op = mv_cesa_dma_add_frag(&dreq->chain, &creq->op_tmpl,
+ op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl,
frag_len, flags);
if (IS_ERR(op)) {
@@ -632,7 +639,7 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
if (op) {
/* Add dummy desc to wait for crypto operation end */
- ret = mv_cesa_dma_add_dummy_end(&dreq->chain, flags);
+ ret = mv_cesa_dma_add_dummy_end(&basereq->chain, flags);
if (ret)
goto err_free_tdma;
}
@@ -643,10 +650,13 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
else
creq->cache_ptr = 0;
+ basereq->chain.last->flags |= (CESA_TDMA_END_OF_REQ |
+ CESA_TDMA_BREAK_CHAIN);
+
return 0;
err_free_tdma:
- mv_cesa_dma_cleanup(dreq);
+ mv_cesa_dma_cleanup(basereq);
dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
err:
@@ -660,11 +670,6 @@ static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached)
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
int ret;
- if (cesa_dev->caps->has_tdma)
- creq->req.base.type = CESA_DMA_REQ;
- else
- creq->req.base.type = CESA_STD_REQ;
-
creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
if (creq->src_nents < 0) {
dev_err(cesa_dev->dev, "Invalid number of src SG");
@@ -678,19 +683,19 @@ static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached)
if (*cached)
return 0;
- if (creq->req.base.type == CESA_DMA_REQ)
+ if (cesa_dev->caps->has_tdma)
ret = mv_cesa_ahash_dma_req_init(req);
return ret;
}
-static int mv_cesa_ahash_update(struct ahash_request *req)
+static int mv_cesa_ahash_queue_req(struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
+ struct mv_cesa_engine *engine;
bool cached = false;
int ret;
- creq->len += req->nbytes;
ret = mv_cesa_ahash_req_init(req, &cached);
if (ret)
return ret;
@@ -698,61 +703,48 @@ static int mv_cesa_ahash_update(struct ahash_request *req)
if (cached)
return 0;
- ret = mv_cesa_queue_req(&req->base);
+ engine = mv_cesa_select_engine(req->nbytes);
+ mv_cesa_ahash_prepare(&req->base, engine);
+
+ ret = mv_cesa_queue_req(&req->base, &creq->base);
+
if (mv_cesa_req_needs_cleanup(&req->base, ret))
mv_cesa_ahash_cleanup(req);
return ret;
}
+static int mv_cesa_ahash_update(struct ahash_request *req)
+{
+ struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
+
+ creq->len += req->nbytes;
+
+ return mv_cesa_ahash_queue_req(req);
+}
+
static int mv_cesa_ahash_final(struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
- bool cached = false;
- int ret;
mv_cesa_set_mac_op_total_len(tmpl, creq->len);
creq->last_req = true;
req->nbytes = 0;
- ret = mv_cesa_ahash_req_init(req, &cached);
- if (ret)
- return ret;
-
- if (cached)
- return 0;
-
- ret = mv_cesa_queue_req(&req->base);
- if (mv_cesa_req_needs_cleanup(&req->base, ret))
- mv_cesa_ahash_cleanup(req);
-
- return ret;
+ return mv_cesa_ahash_queue_req(req);
}
static int mv_cesa_ahash_finup(struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
- bool cached = false;
- int ret;
creq->len += req->nbytes;
mv_cesa_set_mac_op_total_len(tmpl, creq->len);
creq->last_req = true;
- ret = mv_cesa_ahash_req_init(req, &cached);
- if (ret)
- return ret;
-
- if (cached)
- return 0;
-
- ret = mv_cesa_queue_req(&req->base);
- if (mv_cesa_req_needs_cleanup(&req->base, ret))
- mv_cesa_ahash_cleanup(req);
-
- return ret;
+ return mv_cesa_ahash_queue_req(req);
}
static int mv_cesa_ahash_export(struct ahash_request *req, void *hash,
diff --git a/drivers/crypto/marvell/tdma.c b/drivers/crypto/marvell/tdma.c
index 0ad8f1ecf..86a065bcc 100644
--- a/drivers/crypto/marvell/tdma.c
+++ b/drivers/crypto/marvell/tdma.c
@@ -37,9 +37,9 @@ bool mv_cesa_req_dma_iter_next_transfer(struct mv_cesa_dma_iter *iter,
return true;
}
-void mv_cesa_dma_step(struct mv_cesa_tdma_req *dreq)
+void mv_cesa_dma_step(struct mv_cesa_req *dreq)
{
- struct mv_cesa_engine *engine = dreq->base.engine;
+ struct mv_cesa_engine *engine = dreq->engine;
writel_relaxed(0, engine->regs + CESA_SA_CFG);
@@ -53,19 +53,25 @@ void mv_cesa_dma_step(struct mv_cesa_tdma_req *dreq)
engine->regs + CESA_SA_CFG);
writel_relaxed(dreq->chain.first->cur_dma,
engine->regs + CESA_TDMA_NEXT_ADDR);
+ BUG_ON(readl(engine->regs + CESA_SA_CMD) &
+ CESA_SA_CMD_EN_CESA_SA_ACCL0);
writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
}
-void mv_cesa_dma_cleanup(struct mv_cesa_tdma_req *dreq)
+void mv_cesa_dma_cleanup(struct mv_cesa_req *dreq)
{
struct mv_cesa_tdma_desc *tdma;
for (tdma = dreq->chain.first; tdma;) {
struct mv_cesa_tdma_desc *old_tdma = tdma;
+ u32 type = tdma->flags & CESA_TDMA_TYPE_MSK;
- if (tdma->flags & CESA_TDMA_OP)
+ if (type == CESA_TDMA_OP)
dma_pool_free(cesa_dev->dma->op_pool, tdma->op,
le32_to_cpu(tdma->src));
+ else if (type == CESA_TDMA_IV)
+ dma_pool_free(cesa_dev->dma->iv_pool, tdma->data,
+ le32_to_cpu(tdma->dst));
tdma = tdma->next;
dma_pool_free(cesa_dev->dma->tdma_desc_pool, old_tdma,
@@ -76,7 +82,7 @@ void mv_cesa_dma_cleanup(struct mv_cesa_tdma_req *dreq)
dreq->chain.last = NULL;
}
-void mv_cesa_dma_prepare(struct mv_cesa_tdma_req *dreq,
+void mv_cesa_dma_prepare(struct mv_cesa_req *dreq,
struct mv_cesa_engine *engine)
{
struct mv_cesa_tdma_desc *tdma;
@@ -88,11 +94,97 @@ void mv_cesa_dma_prepare(struct mv_cesa_tdma_req *dreq,
if (tdma->flags & CESA_TDMA_SRC_IN_SRAM)
tdma->src = cpu_to_le32(tdma->src + engine->sram_dma);
- if (tdma->flags & CESA_TDMA_OP)
+ if ((tdma->flags & CESA_TDMA_TYPE_MSK) == CESA_TDMA_OP)
mv_cesa_adjust_op(engine, tdma->op);
}
}
+void mv_cesa_tdma_chain(struct mv_cesa_engine *engine,
+ struct mv_cesa_req *dreq)
+{
+ if (engine->chain.first == NULL && engine->chain.last == NULL) {
+ engine->chain.first = dreq->chain.first;
+ engine->chain.last = dreq->chain.last;
+ } else {
+ struct mv_cesa_tdma_desc *last;
+
+ last = engine->chain.last;
+ last->next = dreq->chain.first;
+ engine->chain.last = dreq->chain.last;
+
+ if (!(last->flags & CESA_TDMA_BREAK_CHAIN))
+ last->next_dma = dreq->chain.first->cur_dma;
+ }
+}
+
+int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status)
+{
+ struct crypto_async_request *req = NULL;
+ struct mv_cesa_tdma_desc *tdma = NULL, *next = NULL;
+ dma_addr_t tdma_cur;
+ int res = 0;
+
+ tdma_cur = readl(engine->regs + CESA_TDMA_CUR);
+
+ for (tdma = engine->chain.first; tdma; tdma = next) {
+ spin_lock_bh(&engine->lock);
+ next = tdma->next;
+ spin_unlock_bh(&engine->lock);
+
+ if (tdma->flags & CESA_TDMA_END_OF_REQ) {
+ struct crypto_async_request *backlog = NULL;
+ struct mv_cesa_ctx *ctx;
+ u32 current_status;
+
+ spin_lock_bh(&engine->lock);
+ /*
+ * if req is NULL, this means we're processing the
+ * request in engine->req.
+ */
+ if (!req)
+ req = engine->req;
+ else
+ req = mv_cesa_dequeue_req_locked(engine,
+ &backlog);
+
+ /* Re-chaining to the next request */
+ engine->chain.first = tdma->next;
+ tdma->next = NULL;
+
+ /* If this is the last request, clear the chain */
+ if (engine->chain.first == NULL)
+ engine->chain.last = NULL;
+ spin_unlock_bh(&engine->lock);
+
+ ctx = crypto_tfm_ctx(req->tfm);
+ current_status = (tdma->cur_dma == tdma_cur) ?
+ status : CESA_SA_INT_ACC0_IDMA_DONE;
+ res = ctx->ops->process(req, current_status);
+ ctx->ops->complete(req);
+
+ if (res == 0)
+ mv_cesa_engine_enqueue_complete_request(engine,
+ req);
+
+ if (backlog)
+ backlog->complete(backlog, -EINPROGRESS);
+ }
+
+ if (res || tdma->cur_dma == tdma_cur)
+ break;
+ }
+
+ /* Save the last request in error to engine->req, so that the core
+ * knows which request was fautly */
+ if (res) {
+ spin_lock_bh(&engine->lock);
+ engine->req = req;
+ spin_unlock_bh(&engine->lock);
+ }
+
+ return res;
+}
+
static struct mv_cesa_tdma_desc *
mv_cesa_dma_add_desc(struct mv_cesa_tdma_chain *chain, gfp_t flags)
{
@@ -117,6 +209,32 @@ mv_cesa_dma_add_desc(struct mv_cesa_tdma_chain *chain, gfp_t flags)
return new_tdma;
}
+int mv_cesa_dma_add_iv_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
+ u32 size, u32 flags, gfp_t gfp_flags)
+{
+
+ struct mv_cesa_tdma_desc *tdma;
+ u8 *iv;
+ dma_addr_t dma_handle;
+
+ tdma = mv_cesa_dma_add_desc(chain, gfp_flags);
+ if (IS_ERR(tdma))
+ return PTR_ERR(tdma);
+
+ iv = dma_pool_alloc(cesa_dev->dma->iv_pool, gfp_flags, &dma_handle);
+ if (!iv)
+ return -ENOMEM;
+
+ tdma->byte_cnt = cpu_to_le32(size | BIT(31));
+ tdma->src = src;
+ tdma->dst = cpu_to_le32(dma_handle);
+ tdma->data = iv;
+
+ flags &= (CESA_TDMA_DST_IN_SRAM | CESA_TDMA_SRC_IN_SRAM);
+ tdma->flags = flags | CESA_TDMA_IV;
+ return 0;
+}
+
struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain,
const struct mv_cesa_op_ctx *op_templ,
bool skip_ctx,
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index 59ed54e46..625ee50fd 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -11,7 +11,6 @@
* http://www.gnu.org/copyleft/gpl.html
*/
-#include <linux/crypto.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -25,6 +24,7 @@
#include <crypto/aes.h>
#include <crypto/sha.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
#define DCP_MAX_CHANS 4
#define DCP_BUF_SZ PAGE_SIZE
@@ -84,7 +84,7 @@ struct dcp_async_ctx {
unsigned int hot:1;
/* Crypto-specific context */
- struct crypto_ablkcipher *fallback;
+ struct crypto_skcipher *fallback;
unsigned int key_len;
uint8_t key[AES_KEYSIZE_128];
};
@@ -374,20 +374,22 @@ static int dcp_chan_thread_aes(void *data)
static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc)
{
- struct crypto_tfm *tfm =
- crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
- struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx(
- crypto_ablkcipher_reqtfm(req));
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
int ret;
- ablkcipher_request_set_tfm(req, ctx->fallback);
+ skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ req->nbytes, req->info);
if (enc)
- ret = crypto_ablkcipher_encrypt(req);
+ ret = crypto_skcipher_encrypt(subreq);
else
- ret = crypto_ablkcipher_decrypt(req);
+ ret = crypto_skcipher_decrypt(subreq);
- ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
+ skcipher_request_zero(subreq);
return ret;
}
@@ -453,28 +455,22 @@ static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return 0;
}
- /* Check if the key size is supported by kernel at all. */
- if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
- tfm->base.crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
- return -EINVAL;
- }
-
/*
* If the requested AES key size is not supported by the hardware,
* but is supported by in-kernel software implementation, we use
* software fallback.
*/
- actx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
- actx->fallback->base.crt_flags |=
- tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK;
+ crypto_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(actx->fallback,
+ tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
- ret = crypto_ablkcipher_setkey(actx->fallback, key, len);
+ ret = crypto_skcipher_setkey(actx->fallback, key, len);
if (!ret)
return 0;
tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->base.crt_flags |=
- actx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK;
+ tfm->base.crt_flags |= crypto_skcipher_get_flags(actx->fallback) &
+ CRYPTO_TFM_RES_MASK;
return ret;
}
@@ -484,9 +480,9 @@ static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm)
const char *name = crypto_tfm_alg_name(tfm);
const uint32_t flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
- struct crypto_ablkcipher *blk;
+ struct crypto_skcipher *blk;
- blk = crypto_alloc_ablkcipher(name, 0, flags);
+ blk = crypto_alloc_skcipher(name, 0, flags);
if (IS_ERR(blk))
return PTR_ERR(blk);
@@ -499,8 +495,7 @@ static void mxs_dcp_aes_fallback_exit(struct crypto_tfm *tfm)
{
struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
- crypto_free_ablkcipher(actx->fallback);
- actx->fallback = NULL;
+ crypto_free_skcipher(actx->fallback);
}
/*
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index ce174d3b8..4ab53a604 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -528,8 +528,6 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
omap_aes_dma_stop(dd);
- dmaengine_terminate_all(dd->dma_lch_in);
- dmaengine_terminate_all(dd->dma_lch_out);
return 0;
}
@@ -580,10 +578,12 @@ static int omap_aes_copy_sgs(struct omap_aes_dev *dd)
sg_init_table(&dd->in_sgl, 1);
sg_set_buf(&dd->in_sgl, buf_in, total);
dd->in_sg = &dd->in_sgl;
+ dd->in_sg_len = 1;
sg_init_table(&dd->out_sgl, 1);
sg_set_buf(&dd->out_sgl, buf_out, total);
dd->out_sg = &dd->out_sgl;
+ dd->out_sg_len = 1;
return 0;
}
@@ -604,7 +604,6 @@ static int omap_aes_prepare_req(struct crypto_engine *engine,
crypto_ablkcipher_reqtfm(req));
struct omap_aes_dev *dd = omap_aes_find_dev(ctx);
struct omap_aes_reqctx *rctx;
- int len;
if (!dd)
return -ENODEV;
@@ -616,6 +615,14 @@ static int omap_aes_prepare_req(struct crypto_engine *engine,
dd->in_sg = req->src;
dd->out_sg = req->dst;
+ dd->in_sg_len = sg_nents_for_len(dd->in_sg, dd->total);
+ if (dd->in_sg_len < 0)
+ return dd->in_sg_len;
+
+ dd->out_sg_len = sg_nents_for_len(dd->out_sg, dd->total);
+ if (dd->out_sg_len < 0)
+ return dd->out_sg_len;
+
if (omap_aes_check_aligned(dd->in_sg, dd->total) ||
omap_aes_check_aligned(dd->out_sg, dd->total)) {
if (omap_aes_copy_sgs(dd))
@@ -625,11 +632,6 @@ static int omap_aes_prepare_req(struct crypto_engine *engine,
dd->sgs_copied = 0;
}
- len = ALIGN(dd->total, AES_BLOCK_SIZE);
- dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, len);
- dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, len);
- BUG_ON(dd->in_sg_len < 0 || dd->out_sg_len < 0);
-
rctx = ablkcipher_request_ctx(req);
ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
rctx->mode &= FLAGS_MODE_MASK;
@@ -1185,17 +1187,19 @@ static int omap_aes_probe(struct platform_device *pdev)
spin_unlock(&list_lock);
for (i = 0; i < dd->pdata->algs_info_size; i++) {
- for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
- algp = &dd->pdata->algs_info[i].algs_list[j];
+ if (!dd->pdata->algs_info[i].registered) {
+ for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
+ algp = &dd->pdata->algs_info[i].algs_list[j];
- pr_debug("reg alg: %s\n", algp->cra_name);
- INIT_LIST_HEAD(&algp->cra_list);
+ pr_debug("reg alg: %s\n", algp->cra_name);
+ INIT_LIST_HEAD(&algp->cra_list);
- err = crypto_register_alg(algp);
- if (err)
- goto err_algs;
+ err = crypto_register_alg(algp);
+ if (err)
+ goto err_algs;
- dd->pdata->algs_info[i].registered++;
+ dd->pdata->algs_info[i].registered++;
+ }
}
}
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index 3eedb0311..5691434ff 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -560,10 +560,12 @@ static int omap_des_copy_sgs(struct omap_des_dev *dd)
sg_init_table(&dd->in_sgl, 1);
sg_set_buf(&dd->in_sgl, buf_in, dd->total);
dd->in_sg = &dd->in_sgl;
+ dd->in_sg_len = 1;
sg_init_table(&dd->out_sgl, 1);
sg_set_buf(&dd->out_sgl, buf_out, dd->total);
dd->out_sg = &dd->out_sgl;
+ dd->out_sg_len = 1;
return 0;
}
@@ -595,6 +597,14 @@ static int omap_des_prepare_req(struct crypto_engine *engine,
dd->in_sg = req->src;
dd->out_sg = req->dst;
+ dd->in_sg_len = sg_nents_for_len(dd->in_sg, dd->total);
+ if (dd->in_sg_len < 0)
+ return dd->in_sg_len;
+
+ dd->out_sg_len = sg_nents_for_len(dd->out_sg, dd->total);
+ if (dd->out_sg_len < 0)
+ return dd->out_sg_len;
+
if (omap_des_copy_needed(dd->in_sg) ||
omap_des_copy_needed(dd->out_sg)) {
if (omap_des_copy_sgs(dd))
@@ -604,10 +614,6 @@ static int omap_des_prepare_req(struct crypto_engine *engine,
dd->sgs_copied = 0;
}
- dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, dd->total);
- dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, dd->total);
- BUG_ON(dd->in_sg_len < 0 || dd->out_sg_len < 0);
-
rctx = ablkcipher_request_ctx(req);
ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
rctx->mode &= FLAGS_MODE_MASK;
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 63464e86f..7fe4eef12 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -100,6 +100,8 @@
#define DEFAULT_TIMEOUT_INTERVAL HZ
+#define DEFAULT_AUTOSUSPEND_DELAY 1000
+
/* mostly device flags */
#define FLAGS_BUSY 0
#define FLAGS_FINAL 1
@@ -173,7 +175,7 @@ struct omap_sham_ctx {
struct omap_sham_hmac_ctx base[0];
};
-#define OMAP_SHAM_QUEUE_LENGTH 1
+#define OMAP_SHAM_QUEUE_LENGTH 10
struct omap_sham_algs_info {
struct ahash_alg *algs_list;
@@ -813,7 +815,6 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
{
struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
- dmaengine_terminate_all(dd->dma_lch);
if (ctx->flags & BIT(FLAGS_SG)) {
dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
@@ -999,7 +1000,8 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY));
- pm_runtime_put(dd->dev);
+ pm_runtime_mark_last_busy(dd->dev);
+ pm_runtime_put_autosuspend(dd->dev);
if (req->base.complete)
req->base.complete(&req->base, err);
@@ -1093,7 +1095,7 @@ static int omap_sham_update(struct ahash_request *req)
ctx->offset = 0;
if (ctx->flags & BIT(FLAGS_FINUP)) {
- if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 9) {
+ if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 240) {
/*
* OMAP HW accel works only with buffers >= 9
* will switch to bypass in final()
@@ -1149,9 +1151,13 @@ static int omap_sham_final(struct ahash_request *req)
if (ctx->flags & BIT(FLAGS_ERROR))
return 0; /* uncompleted hash is not needed */
- /* OMAP HW accel works only with buffers >= 9 */
- /* HMAC is always >= 9 because ipad == block size */
- if ((ctx->digcnt + ctx->bufcnt) < 9)
+ /*
+ * OMAP HW accel works only with buffers >= 9.
+ * HMAC is always >= 9 because ipad == block size.
+ * If buffersize is less than 240, we use fallback SW encoding,
+ * as using DMA + HW in this case doesn't provide any benefit.
+ */
+ if ((ctx->digcnt + ctx->bufcnt) < 240)
return omap_sham_final_shash(req);
else if (ctx->bufcnt)
return omap_sham_enqueue(req, OP_FINAL);
@@ -1328,7 +1334,7 @@ static struct ahash_alg algs_sha1_md5[] = {
.halg.base = {
.cra_name = "sha1",
.cra_driver_name = "omap-sha1",
- .cra_priority = 100,
+ .cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
@@ -1351,7 +1357,7 @@ static struct ahash_alg algs_sha1_md5[] = {
.halg.base = {
.cra_name = "md5",
.cra_driver_name = "omap-md5",
- .cra_priority = 100,
+ .cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
@@ -1375,7 +1381,7 @@ static struct ahash_alg algs_sha1_md5[] = {
.halg.base = {
.cra_name = "hmac(sha1)",
.cra_driver_name = "omap-hmac-sha1",
- .cra_priority = 100,
+ .cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
@@ -1400,7 +1406,7 @@ static struct ahash_alg algs_sha1_md5[] = {
.halg.base = {
.cra_name = "hmac(md5)",
.cra_driver_name = "omap-hmac-md5",
- .cra_priority = 100,
+ .cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC |
@@ -1428,7 +1434,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.halg.base = {
.cra_name = "sha224",
.cra_driver_name = "omap-sha224",
- .cra_priority = 100,
+ .cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
@@ -1450,7 +1456,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.halg.base = {
.cra_name = "sha256",
.cra_driver_name = "omap-sha256",
- .cra_priority = 100,
+ .cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
@@ -1473,7 +1479,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.halg.base = {
.cra_name = "hmac(sha224)",
.cra_driver_name = "omap-hmac-sha224",
- .cra_priority = 100,
+ .cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
@@ -1497,7 +1503,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.halg.base = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "omap-hmac-sha256",
- .cra_priority = 100,
+ .cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
@@ -1523,7 +1529,7 @@ static struct ahash_alg algs_sha384_sha512[] = {
.halg.base = {
.cra_name = "sha384",
.cra_driver_name = "omap-sha384",
- .cra_priority = 100,
+ .cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
@@ -1545,7 +1551,7 @@ static struct ahash_alg algs_sha384_sha512[] = {
.halg.base = {
.cra_name = "sha512",
.cra_driver_name = "omap-sha512",
- .cra_priority = 100,
+ .cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
@@ -1568,7 +1574,7 @@ static struct ahash_alg algs_sha384_sha512[] = {
.halg.base = {
.cra_name = "hmac(sha384)",
.cra_driver_name = "omap-hmac-sha384",
- .cra_priority = 100,
+ .cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
@@ -1592,7 +1598,7 @@ static struct ahash_alg algs_sha384_sha512[] = {
.halg.base = {
.cra_name = "hmac(sha512)",
.cra_driver_name = "omap-hmac-sha512",
- .cra_priority = 100,
+ .cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
@@ -1946,6 +1952,9 @@ static int omap_sham_probe(struct platform_device *pdev)
dd->flags |= dd->pdata->flags;
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
+
pm_runtime_enable(dev);
pm_runtime_irq_safe(dev);
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 3b1c7ecf0..475760988 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -171,7 +171,7 @@ struct spacc_ablk_ctx {
* The fallback cipher. If the operation can't be done in hardware,
* fallback to a software version.
*/
- struct crypto_ablkcipher *sw_cipher;
+ struct crypto_skcipher *sw_cipher;
};
/* AEAD cipher context. */
@@ -789,33 +789,35 @@ static int spacc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
* request for any other size (192 bits) then we need to do a software
* fallback.
*/
- if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256 &&
- ctx->sw_cipher) {
+ if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256) {
+ if (!ctx->sw_cipher)
+ return -EINVAL;
+
/*
* Set the fallback transform to use the same request flags as
* the hardware transform.
*/
- ctx->sw_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
- ctx->sw_cipher->base.crt_flags |=
- cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK;
+ crypto_skcipher_clear_flags(ctx->sw_cipher,
+ CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(ctx->sw_cipher,
+ cipher->base.crt_flags &
+ CRYPTO_TFM_REQ_MASK);
+
+ err = crypto_skcipher_setkey(ctx->sw_cipher, key, len);
+
+ tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
+ tfm->crt_flags |=
+ crypto_skcipher_get_flags(ctx->sw_cipher) &
+ CRYPTO_TFM_RES_MASK;
- err = crypto_ablkcipher_setkey(ctx->sw_cipher, key, len);
if (err)
goto sw_setkey_failed;
- } else if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256 &&
- !ctx->sw_cipher)
- err = -EINVAL;
+ }
memcpy(ctx->key, key, len);
ctx->key_len = len;
sw_setkey_failed:
- if (err && ctx->sw_cipher) {
- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |=
- ctx->sw_cipher->base.crt_flags & CRYPTO_TFM_RES_MASK;
- }
-
return err;
}
@@ -910,20 +912,21 @@ static int spacc_ablk_do_fallback(struct ablkcipher_request *req,
struct crypto_tfm *old_tfm =
crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(old_tfm);
+ SKCIPHER_REQUEST_ON_STACK(subreq, ctx->sw_cipher);
int err;
- if (!ctx->sw_cipher)
- return -EINVAL;
-
/*
* Change the request to use the software fallback transform, and once
* the ciphering has completed, put the old transform back into the
* request.
*/
- ablkcipher_request_set_tfm(req, ctx->sw_cipher);
- err = is_encrypt ? crypto_ablkcipher_encrypt(req) :
- crypto_ablkcipher_decrypt(req);
- ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(old_tfm));
+ skcipher_request_set_tfm(subreq, ctx->sw_cipher);
+ skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ req->nbytes, req->info);
+ err = is_encrypt ? crypto_skcipher_encrypt(subreq) :
+ crypto_skcipher_decrypt(subreq);
+ skcipher_request_zero(subreq);
return err;
}
@@ -1015,12 +1018,13 @@ static int spacc_ablk_cra_init(struct crypto_tfm *tfm)
ctx->generic.flags = spacc_alg->type;
ctx->generic.engine = engine;
if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
- ctx->sw_cipher = crypto_alloc_ablkcipher(alg->cra_name, 0,
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ ctx->sw_cipher = crypto_alloc_skcipher(
+ alg->cra_name, 0, CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->sw_cipher)) {
dev_warn(engine->dev, "failed to allocate fallback for %s\n",
alg->cra_name);
- ctx->sw_cipher = NULL;
+ return PTR_ERR(ctx->sw_cipher);
}
}
ctx->generic.key_offs = spacc_alg->key_offs;
@@ -1035,9 +1039,7 @@ static void spacc_ablk_cra_exit(struct crypto_tfm *tfm)
{
struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
- if (ctx->sw_cipher)
- crypto_free_ablkcipher(ctx->sw_cipher);
- ctx->sw_cipher = NULL;
+ crypto_free_skcipher(ctx->sw_cipher);
}
static int spacc_ablk_encrypt(struct ablkcipher_request *req)
diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig
index 85b44e577..ce3cae40f 100644
--- a/drivers/crypto/qat/Kconfig
+++ b/drivers/crypto/qat/Kconfig
@@ -4,12 +4,13 @@ config CRYPTO_DEV_QAT
select CRYPTO_AUTHENC
select CRYPTO_BLKCIPHER
select CRYPTO_AKCIPHER
+ select CRYPTO_DH
select CRYPTO_HMAC
+ select CRYPTO_RSA
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_SHA512
select FW_LOADER
- select ASN1
config CRYPTO_DEV_QAT_DH895xCC
tristate "Support for Intel(R) DH895xCC"
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
index c5bd5a9ab..6bc68bc00 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
+++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
@@ -229,6 +229,7 @@ void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
hw_data->get_arb_mapping = adf_get_arbiter_mapping;
hw_data->enable_ints = adf_enable_ints;
hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
+ hw_data->reset_device = adf_reset_flr;
hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
}
diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
index 879e04cae..618cec360 100644
--- a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
+++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
@@ -239,6 +239,7 @@ void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
hw_data->get_arb_mapping = adf_get_arbiter_mapping;
hw_data->enable_ints = adf_enable_ints;
hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
+ hw_data->reset_device = adf_reset_flr;
hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
}
diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile
index 5fc3dbb9a..92fb6ffdc 100644
--- a/drivers/crypto/qat/qat_common/Makefile
+++ b/drivers/crypto/qat/qat_common/Makefile
@@ -1,12 +1,3 @@
-$(obj)/qat_rsapubkey-asn1.o: $(obj)/qat_rsapubkey-asn1.c \
- $(obj)/qat_rsapubkey-asn1.h
-$(obj)/qat_rsaprivkey-asn1.o: $(obj)/qat_rsaprivkey-asn1.c \
- $(obj)/qat_rsaprivkey-asn1.h
-$(obj)/qat_asym_algs.o: $(obj)/qat_rsapubkey-asn1.h $(obj)/qat_rsaprivkey-asn1.h
-
-clean-files += qat_rsapubkey-asn1.c qat_rsapubkey-asn1.h
-clean-files += qat_rsaprivkey-asn1.c qat_rsaprivkey-asn1.h
-
obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o
intel_qat-objs := adf_cfg.o \
adf_isr.o \
@@ -20,8 +11,6 @@ intel_qat-objs := adf_cfg.o \
adf_hw_arbiter.o \
qat_crypto.o \
qat_algs.o \
- qat_rsapubkey-asn1.o \
- qat_rsaprivkey-asn1.o \
qat_asym_algs.o \
qat_uclo.o \
qat_hal.o
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index 5a07208ce..e88225365 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -176,6 +176,7 @@ struct adf_hw_device_data {
void (*disable_iov)(struct adf_accel_dev *accel_dev);
void (*enable_ints)(struct adf_accel_dev *accel_dev);
int (*enable_vf2pf_comms)(struct adf_accel_dev *accel_dev);
+ void (*reset_device)(struct adf_accel_dev *accel_dev);
const char *fw_name;
const char *fw_mmp_name;
uint32_t fuses;
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
index b40d9c8da..2839fccdd 100644
--- a/drivers/crypto/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/qat/qat_common/adf_aer.c
@@ -82,18 +82,12 @@ struct adf_reset_dev_data {
struct work_struct reset_work;
};
-void adf_dev_restore(struct adf_accel_dev *accel_dev)
+void adf_reset_sbr(struct adf_accel_dev *accel_dev)
{
struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
struct pci_dev *parent = pdev->bus->self;
uint16_t bridge_ctl = 0;
- if (accel_dev->is_vf)
- return;
-
- dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n",
- accel_dev->accel_id);
-
if (!parent)
parent = pdev;
@@ -101,6 +95,8 @@ void adf_dev_restore(struct adf_accel_dev *accel_dev)
dev_info(&GET_DEV(accel_dev),
"Transaction still in progress. Proceeding\n");
+ dev_info(&GET_DEV(accel_dev), "Secondary bus reset\n");
+
pci_read_config_word(parent, PCI_BRIDGE_CONTROL, &bridge_ctl);
bridge_ctl |= PCI_BRIDGE_CTL_BUS_RESET;
pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl);
@@ -108,8 +104,40 @@ void adf_dev_restore(struct adf_accel_dev *accel_dev)
bridge_ctl &= ~PCI_BRIDGE_CTL_BUS_RESET;
pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl);
msleep(100);
- pci_restore_state(pdev);
- pci_save_state(pdev);
+}
+EXPORT_SYMBOL_GPL(adf_reset_sbr);
+
+void adf_reset_flr(struct adf_accel_dev *accel_dev)
+{
+ struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+ u16 control = 0;
+ int pos = 0;
+
+ dev_info(&GET_DEV(accel_dev), "Function level reset\n");
+ pos = pci_pcie_cap(pdev);
+ if (!pos) {
+ dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
+ return;
+ }
+ pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &control);
+ control |= PCI_EXP_DEVCTL_BCR_FLR;
+ pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, control);
+ msleep(100);
+}
+EXPORT_SYMBOL_GPL(adf_reset_flr);
+
+void adf_dev_restore(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+ struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+
+ if (hw_device->reset_device) {
+ dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n",
+ accel_dev->accel_id);
+ hw_device->reset_device(accel_dev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+ }
}
static void adf_device_reset_worker(struct work_struct *work)
@@ -243,7 +271,8 @@ EXPORT_SYMBOL_GPL(adf_disable_aer);
int adf_init_aer(void)
{
- device_reset_wq = create_workqueue("qat_device_reset_wq");
+ device_reset_wq = alloc_workqueue("qat_device_reset_wq",
+ WQ_MEM_RECLAIM, 0);
return !device_reset_wq ? -EFAULT : 0;
}
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
index 75faa39bc..980e07475 100644
--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -141,6 +141,8 @@ int adf_ae_stop(struct adf_accel_dev *accel_dev);
int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf);
void adf_disable_aer(struct adf_accel_dev *accel_dev);
+void adf_reset_sbr(struct adf_accel_dev *accel_dev);
+void adf_reset_flr(struct adf_accel_dev *accel_dev);
void adf_dev_restore(struct adf_accel_dev *accel_dev);
int adf_init_aer(void);
void adf_exit_aer(void);
diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c
index 4a526e2f1..9320ae1d0 100644
--- a/drivers/crypto/qat/qat_common/adf_sriov.c
+++ b/drivers/crypto/qat/qat_common/adf_sriov.c
@@ -292,7 +292,7 @@ EXPORT_SYMBOL_GPL(adf_sriov_configure);
int __init adf_init_pf_wq(void)
{
/* Workqueue for PF2VF responses */
- pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq");
+ pf2vf_resp_wq = alloc_workqueue("qat_pf2vf_resp_wq", WQ_MEM_RECLAIM, 0);
return !pf2vf_resp_wq ? -ENOMEM : 0;
}
diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c
index aa689cabe..bf99e11a3 100644
--- a/drivers/crypto/qat/qat_common/adf_vf_isr.c
+++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c
@@ -321,7 +321,7 @@ EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc);
int __init adf_init_vf_wq(void)
{
- adf_vf_stop_wq = create_workqueue("adf_vf_stop_wq");
+ adf_vf_stop_wq = alloc_workqueue("adf_vf_stop_wq", WQ_MEM_RECLAIM, 0);
return !adf_vf_stop_wq ? -EFAULT : 0;
}
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 4c9deef6a..20f35df8a 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -947,13 +947,13 @@ static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
return 0;
out_free_all:
- memset(ctx->dec_cd, 0, sizeof(*ctx->enc_cd));
- dma_free_coherent(dev, sizeof(*ctx->enc_cd),
+ memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
+ dma_free_coherent(dev, sizeof(*ctx->dec_cd),
ctx->dec_cd, ctx->dec_cd_paddr);
ctx->dec_cd = NULL;
out_free_enc:
- memset(ctx->enc_cd, 0, sizeof(*ctx->dec_cd));
- dma_free_coherent(dev, sizeof(*ctx->dec_cd),
+ memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
+ dma_free_coherent(dev, sizeof(*ctx->enc_cd),
ctx->enc_cd, ctx->enc_cd_paddr);
ctx->enc_cd = NULL;
return -ENOMEM;
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index 05f49d4f9..0d35dca2e 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -49,11 +49,12 @@
#include <crypto/internal/rsa.h>
#include <crypto/internal/akcipher.h>
#include <crypto/akcipher.h>
+#include <crypto/kpp.h>
+#include <crypto/internal/kpp.h>
+#include <crypto/dh.h>
#include <linux/dma-mapping.h>
#include <linux/fips.h>
#include <crypto/scatterwalk.h>
-#include "qat_rsapubkey-asn1.h"
-#include "qat_rsaprivkey-asn1.h"
#include "icp_qat_fw_pke.h"
#include "adf_accel_devices.h"
#include "adf_transport.h"
@@ -75,6 +76,14 @@ struct qat_rsa_input_params {
dma_addr_t d;
dma_addr_t n;
} dec;
+ struct {
+ dma_addr_t c;
+ dma_addr_t p;
+ dma_addr_t q;
+ dma_addr_t dp;
+ dma_addr_t dq;
+ dma_addr_t qinv;
+ } dec_crt;
u64 in_tab[8];
};
} __packed __aligned(64);
@@ -95,71 +104,480 @@ struct qat_rsa_ctx {
char *n;
char *e;
char *d;
+ char *p;
+ char *q;
+ char *dp;
+ char *dq;
+ char *qinv;
dma_addr_t dma_n;
dma_addr_t dma_e;
dma_addr_t dma_d;
+ dma_addr_t dma_p;
+ dma_addr_t dma_q;
+ dma_addr_t dma_dp;
+ dma_addr_t dma_dq;
+ dma_addr_t dma_qinv;
unsigned int key_sz;
+ bool crt_mode;
+ struct qat_crypto_instance *inst;
+} __packed __aligned(64);
+
+struct qat_dh_input_params {
+ union {
+ struct {
+ dma_addr_t b;
+ dma_addr_t xa;
+ dma_addr_t p;
+ } in;
+ struct {
+ dma_addr_t xa;
+ dma_addr_t p;
+ } in_g2;
+ u64 in_tab[8];
+ };
+} __packed __aligned(64);
+
+struct qat_dh_output_params {
+ union {
+ dma_addr_t r;
+ u64 out_tab[8];
+ };
+} __packed __aligned(64);
+
+struct qat_dh_ctx {
+ char *g;
+ char *xa;
+ char *p;
+ dma_addr_t dma_g;
+ dma_addr_t dma_xa;
+ dma_addr_t dma_p;
+ unsigned int p_size;
+ bool g2;
struct qat_crypto_instance *inst;
} __packed __aligned(64);
-struct qat_rsa_request {
- struct qat_rsa_input_params in;
- struct qat_rsa_output_params out;
+struct qat_asym_request {
+ union {
+ struct qat_rsa_input_params rsa;
+ struct qat_dh_input_params dh;
+ } in;
+ union {
+ struct qat_rsa_output_params rsa;
+ struct qat_dh_output_params dh;
+ } out;
dma_addr_t phy_in;
dma_addr_t phy_out;
char *src_align;
char *dst_align;
struct icp_qat_fw_pke_request req;
- struct qat_rsa_ctx *ctx;
+ union {
+ struct qat_rsa_ctx *rsa;
+ struct qat_dh_ctx *dh;
+ } ctx;
+ union {
+ struct akcipher_request *rsa;
+ struct kpp_request *dh;
+ } areq;
int err;
+ void (*cb)(struct icp_qat_fw_pke_resp *resp);
} __aligned(64);
-static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
+static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp)
{
- struct akcipher_request *areq = (void *)(__force long)resp->opaque;
- struct qat_rsa_request *req = PTR_ALIGN(akcipher_request_ctx(areq), 64);
- struct device *dev = &GET_DEV(req->ctx->inst->accel_dev);
+ struct qat_asym_request *req = (void *)(__force long)resp->opaque;
+ struct kpp_request *areq = req->areq.dh;
+ struct device *dev = &GET_DEV(req->ctx.dh->inst->accel_dev);
int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
resp->pke_resp_hdr.comn_resp_flags);
err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
- if (req->src_align)
- dma_free_coherent(dev, req->ctx->key_sz, req->src_align,
- req->in.enc.m);
- else
- dma_unmap_single(dev, req->in.enc.m, req->ctx->key_sz,
- DMA_TO_DEVICE);
+ if (areq->src) {
+ if (req->src_align)
+ dma_free_coherent(dev, req->ctx.dh->p_size,
+ req->src_align, req->in.dh.in.b);
+ else
+ dma_unmap_single(dev, req->in.dh.in.b,
+ req->ctx.dh->p_size, DMA_TO_DEVICE);
+ }
- areq->dst_len = req->ctx->key_sz;
+ areq->dst_len = req->ctx.dh->p_size;
if (req->dst_align) {
- char *ptr = req->dst_align;
+ scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
+ areq->dst_len, 1);
- while (!(*ptr) && areq->dst_len) {
- areq->dst_len--;
- ptr++;
- }
+ dma_free_coherent(dev, req->ctx.dh->p_size, req->dst_align,
+ req->out.dh.r);
+ } else {
+ dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
+ DMA_FROM_DEVICE);
+ }
- if (areq->dst_len != req->ctx->key_sz)
- memmove(req->dst_align, ptr, areq->dst_len);
+ dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params),
+ DMA_TO_DEVICE);
+ dma_unmap_single(dev, req->phy_out,
+ sizeof(struct qat_dh_output_params),
+ DMA_TO_DEVICE);
- scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
- areq->dst_len, 1);
+ kpp_request_complete(areq, err);
+}
+
+#define PKE_DH_1536 0x390c1a49
+#define PKE_DH_G2_1536 0x2e0b1a3e
+#define PKE_DH_2048 0x4d0c1a60
+#define PKE_DH_G2_2048 0x3e0b1a55
+#define PKE_DH_3072 0x510c1a77
+#define PKE_DH_G2_3072 0x3a0b1a6c
+#define PKE_DH_4096 0x690c1a8e
+#define PKE_DH_G2_4096 0x4a0b1a83
+
+static unsigned long qat_dh_fn_id(unsigned int len, bool g2)
+{
+ unsigned int bitslen = len << 3;
+
+ switch (bitslen) {
+ case 1536:
+ return g2 ? PKE_DH_G2_1536 : PKE_DH_1536;
+ case 2048:
+ return g2 ? PKE_DH_G2_2048 : PKE_DH_2048;
+ case 3072:
+ return g2 ? PKE_DH_G2_3072 : PKE_DH_3072;
+ case 4096:
+ return g2 ? PKE_DH_G2_4096 : PKE_DH_4096;
+ default:
+ return 0;
+ };
+}
+
+static inline struct qat_dh_ctx *qat_dh_get_params(struct crypto_kpp *tfm)
+{
+ return kpp_tfm_ctx(tfm);
+}
+
+static int qat_dh_compute_value(struct kpp_request *req)
+{
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+ struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+ struct qat_crypto_instance *inst = ctx->inst;
+ struct device *dev = &GET_DEV(inst->accel_dev);
+ struct qat_asym_request *qat_req =
+ PTR_ALIGN(kpp_request_ctx(req), 64);
+ struct icp_qat_fw_pke_request *msg = &qat_req->req;
+ int ret, ctr = 0;
+ int n_input_params = 0;
+
+ if (unlikely(!ctx->xa))
+ return -EINVAL;
+
+ if (req->dst_len < ctx->p_size) {
+ req->dst_len = ctx->p_size;
+ return -EOVERFLOW;
+ }
+ memset(msg, '\0', sizeof(*msg));
+ ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
+ ICP_QAT_FW_COMN_REQ_FLAG_SET);
+
+ msg->pke_hdr.cd_pars.func_id = qat_dh_fn_id(ctx->p_size,
+ !req->src && ctx->g2);
+ if (unlikely(!msg->pke_hdr.cd_pars.func_id))
+ return -EINVAL;
+
+ qat_req->cb = qat_dh_cb;
+ qat_req->ctx.dh = ctx;
+ qat_req->areq.dh = req;
+ msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+ msg->pke_hdr.comn_req_flags =
+ ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
+ QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
- dma_free_coherent(dev, req->ctx->key_sz, req->dst_align,
- req->out.enc.c);
+ /*
+ * If no source is provided use g as base
+ */
+ if (req->src) {
+ qat_req->in.dh.in.xa = ctx->dma_xa;
+ qat_req->in.dh.in.p = ctx->dma_p;
+ n_input_params = 3;
} else {
- char *ptr = sg_virt(areq->dst);
+ if (ctx->g2) {
+ qat_req->in.dh.in_g2.xa = ctx->dma_xa;
+ qat_req->in.dh.in_g2.p = ctx->dma_p;
+ n_input_params = 2;
+ } else {
+ qat_req->in.dh.in.b = ctx->dma_g;
+ qat_req->in.dh.in.xa = ctx->dma_xa;
+ qat_req->in.dh.in.p = ctx->dma_p;
+ n_input_params = 3;
+ }
+ }
- while (!(*ptr) && areq->dst_len) {
- areq->dst_len--;
- ptr++;
+ ret = -ENOMEM;
+ if (req->src) {
+ /*
+ * src can be of any size in valid range, but HW expects it to
+ * be the same as modulo p so in case it is different we need
+ * to allocate a new buf and copy src data.
+ * In other case we just need to map the user provided buffer.
+ * Also need to make sure that it is in contiguous buffer.
+ */
+ if (sg_is_last(req->src) && req->src_len == ctx->p_size) {
+ qat_req->src_align = NULL;
+ qat_req->in.dh.in.b = dma_map_single(dev,
+ sg_virt(req->src),
+ req->src_len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev,
+ qat_req->in.dh.in.b)))
+ return ret;
+
+ } else {
+ int shift = ctx->p_size - req->src_len;
+
+ qat_req->src_align = dma_zalloc_coherent(dev,
+ ctx->p_size,
+ &qat_req->in.dh.in.b,
+ GFP_KERNEL);
+ if (unlikely(!qat_req->src_align))
+ return ret;
+
+ scatterwalk_map_and_copy(qat_req->src_align + shift,
+ req->src, 0, req->src_len, 0);
}
+ }
+ /*
+ * dst can be of any size in valid range, but HW expects it to be the
+ * same as modulo m so in case it is different we need to allocate a
+ * new buf and copy src data.
+ * In other case we just need to map the user provided buffer.
+ * Also need to make sure that it is in contiguous buffer.
+ */
+ if (sg_is_last(req->dst) && req->dst_len == ctx->p_size) {
+ qat_req->dst_align = NULL;
+ qat_req->out.dh.r = dma_map_single(dev, sg_virt(req->dst),
+ req->dst_len,
+ DMA_FROM_DEVICE);
- if (sg_virt(areq->dst) != ptr && areq->dst_len)
- memmove(sg_virt(areq->dst), ptr, areq->dst_len);
+ if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r)))
+ goto unmap_src;
+
+ } else {
+ qat_req->dst_align = dma_zalloc_coherent(dev, ctx->p_size,
+ &qat_req->out.dh.r,
+ GFP_KERNEL);
+ if (unlikely(!qat_req->dst_align))
+ goto unmap_src;
+ }
- dma_unmap_single(dev, req->out.enc.c, req->ctx->key_sz,
+ qat_req->in.dh.in_tab[n_input_params] = 0;
+ qat_req->out.dh.out_tab[1] = 0;
+ /* Mapping in.in.b or in.in_g2.xa is the same */
+ qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh.in.b,
+ sizeof(struct qat_dh_input_params),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
+ goto unmap_dst;
+
+ qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh.r,
+ sizeof(struct qat_dh_output_params),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
+ goto unmap_in_params;
+
+ msg->pke_mid.src_data_addr = qat_req->phy_in;
+ msg->pke_mid.dest_data_addr = qat_req->phy_out;
+ msg->pke_mid.opaque = (uint64_t)(__force long)qat_req;
+ msg->input_param_count = n_input_params;
+ msg->output_param_count = 1;
+
+ do {
+ ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
+ } while (ret == -EBUSY && ctr++ < 100);
+
+ if (!ret)
+ return -EINPROGRESS;
+
+ if (!dma_mapping_error(dev, qat_req->phy_out))
+ dma_unmap_single(dev, qat_req->phy_out,
+ sizeof(struct qat_dh_output_params),
+ DMA_TO_DEVICE);
+unmap_in_params:
+ if (!dma_mapping_error(dev, qat_req->phy_in))
+ dma_unmap_single(dev, qat_req->phy_in,
+ sizeof(struct qat_dh_input_params),
+ DMA_TO_DEVICE);
+unmap_dst:
+ if (qat_req->dst_align)
+ dma_free_coherent(dev, ctx->p_size, qat_req->dst_align,
+ qat_req->out.dh.r);
+ else
+ if (!dma_mapping_error(dev, qat_req->out.dh.r))
+ dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size,
+ DMA_FROM_DEVICE);
+unmap_src:
+ if (req->src) {
+ if (qat_req->src_align)
+ dma_free_coherent(dev, ctx->p_size, qat_req->src_align,
+ qat_req->in.dh.in.b);
+ else
+ if (!dma_mapping_error(dev, qat_req->in.dh.in.b))
+ dma_unmap_single(dev, qat_req->in.dh.in.b,
+ ctx->p_size,
+ DMA_TO_DEVICE);
+ }
+ return ret;
+}
+
+static int qat_dh_check_params_length(unsigned int p_len)
+{
+ switch (p_len) {
+ case 1536:
+ case 2048:
+ case 3072:
+ case 4096:
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
+{
+ struct qat_crypto_instance *inst = ctx->inst;
+ struct device *dev = &GET_DEV(inst->accel_dev);
+
+ if (unlikely(!params->p || !params->g))
+ return -EINVAL;
+
+ if (qat_dh_check_params_length(params->p_size << 3))
+ return -EINVAL;
+
+ ctx->p_size = params->p_size;
+ ctx->p = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL);
+ if (!ctx->p)
+ return -ENOMEM;
+ memcpy(ctx->p, params->p, ctx->p_size);
+
+ /* If g equals 2 don't copy it */
+ if (params->g_size == 1 && *(char *)params->g == 0x02) {
+ ctx->g2 = true;
+ return 0;
+ }
+
+ ctx->g = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL);
+ if (!ctx->g) {
+ dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p);
+ ctx->p = NULL;
+ return -ENOMEM;
+ }
+ memcpy(ctx->g + (ctx->p_size - params->g_size), params->g,
+ params->g_size);
+
+ return 0;
+}
+
+static void qat_dh_clear_ctx(struct device *dev, struct qat_dh_ctx *ctx)
+{
+ if (ctx->g) {
+ dma_free_coherent(dev, ctx->p_size, ctx->g, ctx->dma_g);
+ ctx->g = NULL;
+ }
+ if (ctx->xa) {
+ dma_free_coherent(dev, ctx->p_size, ctx->xa, ctx->dma_xa);
+ ctx->xa = NULL;
+ }
+ if (ctx->p) {
+ dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p);
+ ctx->p = NULL;
+ }
+ ctx->p_size = 0;
+ ctx->g2 = false;
+}
+
+static int qat_dh_set_secret(struct crypto_kpp *tfm, void *buf,
+ unsigned int len)
+{
+ struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+ struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+ struct dh params;
+ int ret;
+
+ if (crypto_dh_decode_key(buf, len, &params) < 0)
+ return -EINVAL;
+
+ /* Free old secret if any */
+ qat_dh_clear_ctx(dev, ctx);
+
+ ret = qat_dh_set_params(ctx, &params);
+ if (ret < 0)
+ return ret;
+
+ ctx->xa = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_xa,
+ GFP_KERNEL);
+ if (!ctx->xa) {
+ qat_dh_clear_ctx(dev, ctx);
+ return -ENOMEM;
+ }
+ memcpy(ctx->xa + (ctx->p_size - params.key_size), params.key,
+ params.key_size);
+
+ return 0;
+}
+
+static int qat_dh_max_size(struct crypto_kpp *tfm)
+{
+ struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ return ctx->p ? ctx->p_size : -EINVAL;
+}
+
+static int qat_dh_init_tfm(struct crypto_kpp *tfm)
+{
+ struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+ struct qat_crypto_instance *inst =
+ qat_crypto_get_instance_node(get_current_node());
+
+ if (!inst)
+ return -EINVAL;
+
+ ctx->p_size = 0;
+ ctx->g2 = false;
+ ctx->inst = inst;
+ return 0;
+}
+
+static void qat_dh_exit_tfm(struct crypto_kpp *tfm)
+{
+ struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+ struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+
+ qat_dh_clear_ctx(dev, ctx);
+ qat_crypto_put_instance(ctx->inst);
+}
+
+static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
+{
+ struct qat_asym_request *req = (void *)(__force long)resp->opaque;
+ struct akcipher_request *areq = req->areq.rsa;
+ struct device *dev = &GET_DEV(req->ctx.rsa->inst->accel_dev);
+ int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
+ resp->pke_resp_hdr.comn_resp_flags);
+
+ err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
+
+ if (req->src_align)
+ dma_free_coherent(dev, req->ctx.rsa->key_sz, req->src_align,
+ req->in.rsa.enc.m);
+ else
+ dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz,
+ DMA_TO_DEVICE);
+
+ areq->dst_len = req->ctx.rsa->key_sz;
+ if (req->dst_align) {
+ scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
+ areq->dst_len, 1);
+
+ dma_free_coherent(dev, req->ctx.rsa->key_sz, req->dst_align,
+ req->out.rsa.enc.c);
+ } else {
+ dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
DMA_FROM_DEVICE);
}
@@ -175,8 +593,9 @@ static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
void qat_alg_asym_callback(void *_resp)
{
struct icp_qat_fw_pke_resp *resp = _resp;
+ struct qat_asym_request *areq = (void *)(__force long)resp->opaque;
- qat_rsa_cb(resp);
+ areq->cb(resp);
}
#define PKE_RSA_EP_512 0x1c161b21
@@ -237,13 +656,42 @@ static unsigned long qat_rsa_dec_fn_id(unsigned int len)
};
}
+#define PKE_RSA_DP2_512 0x1c131b57
+#define PKE_RSA_DP2_1024 0x26131c2d
+#define PKE_RSA_DP2_1536 0x45111d12
+#define PKE_RSA_DP2_2048 0x59121dfa
+#define PKE_RSA_DP2_3072 0x81121ed9
+#define PKE_RSA_DP2_4096 0xb1111fb2
+
+static unsigned long qat_rsa_dec_fn_id_crt(unsigned int len)
+{
+ unsigned int bitslen = len << 3;
+
+ switch (bitslen) {
+ case 512:
+ return PKE_RSA_DP2_512;
+ case 1024:
+ return PKE_RSA_DP2_1024;
+ case 1536:
+ return PKE_RSA_DP2_1536;
+ case 2048:
+ return PKE_RSA_DP2_2048;
+ case 3072:
+ return PKE_RSA_DP2_3072;
+ case 4096:
+ return PKE_RSA_DP2_4096;
+ default:
+ return 0;
+ };
+}
+
static int qat_rsa_enc(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev = &GET_DEV(inst->accel_dev);
- struct qat_rsa_request *qat_req =
+ struct qat_asym_request *qat_req =
PTR_ALIGN(akcipher_request_ctx(req), 64);
struct icp_qat_fw_pke_request *msg = &qat_req->req;
int ret, ctr = 0;
@@ -262,14 +710,16 @@ static int qat_rsa_enc(struct akcipher_request *req)
if (unlikely(!msg->pke_hdr.cd_pars.func_id))
return -EINVAL;
- qat_req->ctx = ctx;
+ qat_req->cb = qat_rsa_cb;
+ qat_req->ctx.rsa = ctx;
+ qat_req->areq.rsa = req;
msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
msg->pke_hdr.comn_req_flags =
ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
- qat_req->in.enc.e = ctx->dma_e;
- qat_req->in.enc.n = ctx->dma_n;
+ qat_req->in.rsa.enc.e = ctx->dma_e;
+ qat_req->in.rsa.enc.n = ctx->dma_n;
ret = -ENOMEM;
/*
@@ -281,16 +731,16 @@ static int qat_rsa_enc(struct akcipher_request *req)
*/
if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
qat_req->src_align = NULL;
- qat_req->in.enc.m = dma_map_single(dev, sg_virt(req->src),
+ qat_req->in.rsa.enc.m = dma_map_single(dev, sg_virt(req->src),
req->src_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, qat_req->in.enc.m)))
+ if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m)))
return ret;
} else {
int shift = ctx->key_sz - req->src_len;
qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
- &qat_req->in.enc.m,
+ &qat_req->in.rsa.enc.m,
GFP_KERNEL);
if (unlikely(!qat_req->src_align))
return ret;
@@ -300,30 +750,30 @@ static int qat_rsa_enc(struct akcipher_request *req)
}
if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
qat_req->dst_align = NULL;
- qat_req->out.enc.c = dma_map_single(dev, sg_virt(req->dst),
- req->dst_len,
- DMA_FROM_DEVICE);
+ qat_req->out.rsa.enc.c = dma_map_single(dev, sg_virt(req->dst),
+ req->dst_len,
+ DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, qat_req->out.enc.c)))
+ if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c)))
goto unmap_src;
} else {
qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
- &qat_req->out.enc.c,
+ &qat_req->out.rsa.enc.c,
GFP_KERNEL);
if (unlikely(!qat_req->dst_align))
goto unmap_src;
}
- qat_req->in.in_tab[3] = 0;
- qat_req->out.out_tab[1] = 0;
- qat_req->phy_in = dma_map_single(dev, &qat_req->in.enc.m,
+ qat_req->in.rsa.in_tab[3] = 0;
+ qat_req->out.rsa.out_tab[1] = 0;
+ qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.enc.m,
sizeof(struct qat_rsa_input_params),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
goto unmap_dst;
- qat_req->phy_out = dma_map_single(dev, &qat_req->out.enc.c,
+ qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.enc.c,
sizeof(struct qat_rsa_output_params),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
@@ -331,7 +781,7 @@ static int qat_rsa_enc(struct akcipher_request *req)
msg->pke_mid.src_data_addr = qat_req->phy_in;
msg->pke_mid.dest_data_addr = qat_req->phy_out;
- msg->pke_mid.opaque = (uint64_t)(__force long)req;
+ msg->pke_mid.opaque = (uint64_t)(__force long)qat_req;
msg->input_param_count = 3;
msg->output_param_count = 1;
do {
@@ -353,19 +803,19 @@ unmap_in_params:
unmap_dst:
if (qat_req->dst_align)
dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
- qat_req->out.enc.c);
+ qat_req->out.rsa.enc.c);
else
- if (!dma_mapping_error(dev, qat_req->out.enc.c))
- dma_unmap_single(dev, qat_req->out.enc.c, ctx->key_sz,
- DMA_FROM_DEVICE);
+ if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c))
+ dma_unmap_single(dev, qat_req->out.rsa.enc.c,
+ ctx->key_sz, DMA_FROM_DEVICE);
unmap_src:
if (qat_req->src_align)
dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
- qat_req->in.enc.m);
+ qat_req->in.rsa.enc.m);
else
- if (!dma_mapping_error(dev, qat_req->in.enc.m))
- dma_unmap_single(dev, qat_req->in.enc.m, ctx->key_sz,
- DMA_TO_DEVICE);
+ if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m))
+ dma_unmap_single(dev, qat_req->in.rsa.enc.m,
+ ctx->key_sz, DMA_TO_DEVICE);
return ret;
}
@@ -375,7 +825,7 @@ static int qat_rsa_dec(struct akcipher_request *req)
struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev = &GET_DEV(inst->accel_dev);
- struct qat_rsa_request *qat_req =
+ struct qat_asym_request *qat_req =
PTR_ALIGN(akcipher_request_ctx(req), 64);
struct icp_qat_fw_pke_request *msg = &qat_req->req;
int ret, ctr = 0;
@@ -390,18 +840,30 @@ static int qat_rsa_dec(struct akcipher_request *req)
memset(msg, '\0', sizeof(*msg));
ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
ICP_QAT_FW_COMN_REQ_FLAG_SET);
- msg->pke_hdr.cd_pars.func_id = qat_rsa_dec_fn_id(ctx->key_sz);
+ msg->pke_hdr.cd_pars.func_id = ctx->crt_mode ?
+ qat_rsa_dec_fn_id_crt(ctx->key_sz) :
+ qat_rsa_dec_fn_id(ctx->key_sz);
if (unlikely(!msg->pke_hdr.cd_pars.func_id))
return -EINVAL;
- qat_req->ctx = ctx;
+ qat_req->cb = qat_rsa_cb;
+ qat_req->ctx.rsa = ctx;
+ qat_req->areq.rsa = req;
msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
msg->pke_hdr.comn_req_flags =
ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
- qat_req->in.dec.d = ctx->dma_d;
- qat_req->in.dec.n = ctx->dma_n;
+ if (ctx->crt_mode) {
+ qat_req->in.rsa.dec_crt.p = ctx->dma_p;
+ qat_req->in.rsa.dec_crt.q = ctx->dma_q;
+ qat_req->in.rsa.dec_crt.dp = ctx->dma_dp;
+ qat_req->in.rsa.dec_crt.dq = ctx->dma_dq;
+ qat_req->in.rsa.dec_crt.qinv = ctx->dma_qinv;
+ } else {
+ qat_req->in.rsa.dec.d = ctx->dma_d;
+ qat_req->in.rsa.dec.n = ctx->dma_n;
+ }
ret = -ENOMEM;
/*
@@ -413,16 +875,16 @@ static int qat_rsa_dec(struct akcipher_request *req)
*/
if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
qat_req->src_align = NULL;
- qat_req->in.dec.c = dma_map_single(dev, sg_virt(req->src),
+ qat_req->in.rsa.dec.c = dma_map_single(dev, sg_virt(req->src),
req->dst_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, qat_req->in.dec.c)))
+ if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c)))
return ret;
} else {
int shift = ctx->key_sz - req->src_len;
qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
- &qat_req->in.dec.c,
+ &qat_req->in.rsa.dec.c,
GFP_KERNEL);
if (unlikely(!qat_req->src_align))
return ret;
@@ -432,31 +894,34 @@ static int qat_rsa_dec(struct akcipher_request *req)
}
if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
qat_req->dst_align = NULL;
- qat_req->out.dec.m = dma_map_single(dev, sg_virt(req->dst),
+ qat_req->out.rsa.dec.m = dma_map_single(dev, sg_virt(req->dst),
req->dst_len,
DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, qat_req->out.dec.m)))
+ if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m)))
goto unmap_src;
} else {
qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
- &qat_req->out.dec.m,
+ &qat_req->out.rsa.dec.m,
GFP_KERNEL);
if (unlikely(!qat_req->dst_align))
goto unmap_src;
}
- qat_req->in.in_tab[3] = 0;
- qat_req->out.out_tab[1] = 0;
- qat_req->phy_in = dma_map_single(dev, &qat_req->in.dec.c,
+ if (ctx->crt_mode)
+ qat_req->in.rsa.in_tab[6] = 0;
+ else
+ qat_req->in.rsa.in_tab[3] = 0;
+ qat_req->out.rsa.out_tab[1] = 0;
+ qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.dec.c,
sizeof(struct qat_rsa_input_params),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
goto unmap_dst;
- qat_req->phy_out = dma_map_single(dev, &qat_req->out.dec.m,
+ qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.dec.m,
sizeof(struct qat_rsa_output_params),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
@@ -464,8 +929,12 @@ static int qat_rsa_dec(struct akcipher_request *req)
msg->pke_mid.src_data_addr = qat_req->phy_in;
msg->pke_mid.dest_data_addr = qat_req->phy_out;
- msg->pke_mid.opaque = (uint64_t)(__force long)req;
- msg->input_param_count = 3;
+ msg->pke_mid.opaque = (uint64_t)(__force long)qat_req;
+ if (ctx->crt_mode)
+ msg->input_param_count = 6;
+ else
+ msg->input_param_count = 3;
+
msg->output_param_count = 1;
do {
ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
@@ -486,26 +955,24 @@ unmap_in_params:
unmap_dst:
if (qat_req->dst_align)
dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
- qat_req->out.dec.m);
+ qat_req->out.rsa.dec.m);
else
- if (!dma_mapping_error(dev, qat_req->out.dec.m))
- dma_unmap_single(dev, qat_req->out.dec.m, ctx->key_sz,
- DMA_FROM_DEVICE);
+ if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m))
+ dma_unmap_single(dev, qat_req->out.rsa.dec.m,
+ ctx->key_sz, DMA_FROM_DEVICE);
unmap_src:
if (qat_req->src_align)
dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
- qat_req->in.dec.c);
+ qat_req->in.rsa.dec.c);
else
- if (!dma_mapping_error(dev, qat_req->in.dec.c))
- dma_unmap_single(dev, qat_req->in.dec.c, ctx->key_sz,
- DMA_TO_DEVICE);
+ if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c))
+ dma_unmap_single(dev, qat_req->in.rsa.dec.c,
+ ctx->key_sz, DMA_TO_DEVICE);
return ret;
}
-int qat_rsa_get_n(void *context, size_t hdrlen, unsigned char tag,
- const void *value, size_t vlen)
+int qat_rsa_set_n(struct qat_rsa_ctx *ctx, const char *value, size_t vlen)
{
- struct qat_rsa_ctx *ctx = context;
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev = &GET_DEV(inst->accel_dev);
const char *ptr = value;
@@ -518,11 +985,6 @@ int qat_rsa_get_n(void *context, size_t hdrlen, unsigned char tag,
ctx->key_sz = vlen;
ret = -EINVAL;
- /* In FIPS mode only allow key size 2K & 3K */
- if (fips_enabled && (ctx->key_sz != 256 && ctx->key_sz != 384)) {
- pr_err("QAT: RSA: key size not allowed in FIPS mode\n");
- goto err;
- }
/* invalid key size provided */
if (!qat_rsa_enc_fn_id(ctx->key_sz))
goto err;
@@ -540,10 +1002,8 @@ err:
return ret;
}
-int qat_rsa_get_e(void *context, size_t hdrlen, unsigned char tag,
- const void *value, size_t vlen)
+int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value, size_t vlen)
{
- struct qat_rsa_ctx *ctx = context;
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev = &GET_DEV(inst->accel_dev);
const char *ptr = value;
@@ -559,18 +1019,15 @@ int qat_rsa_get_e(void *context, size_t hdrlen, unsigned char tag,
}
ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
- if (!ctx->e) {
- ctx->e = NULL;
+ if (!ctx->e)
return -ENOMEM;
- }
+
memcpy(ctx->e + (ctx->key_sz - vlen), ptr, vlen);
return 0;
}
-int qat_rsa_get_d(void *context, size_t hdrlen, unsigned char tag,
- const void *value, size_t vlen)
+int qat_rsa_set_d(struct qat_rsa_ctx *ctx, const char *value, size_t vlen)
{
- struct qat_rsa_ctx *ctx = context;
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev = &GET_DEV(inst->accel_dev);
const char *ptr = value;
@@ -585,12 +1042,6 @@ int qat_rsa_get_d(void *context, size_t hdrlen, unsigned char tag,
if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
goto err;
- /* In FIPS mode only allow key size 2K & 3K */
- if (fips_enabled && (vlen != 256 && vlen != 384)) {
- pr_err("QAT: RSA: key size not allowed in FIPS mode\n");
- goto err;
- }
-
ret = -ENOMEM;
ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
if (!ctx->d)
@@ -603,12 +1054,106 @@ err:
return ret;
}
-static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
- unsigned int keylen, bool private)
+static void qat_rsa_drop_leading_zeros(const char **ptr, unsigned int *len)
{
- struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
- struct device *dev = &GET_DEV(ctx->inst->accel_dev);
- int ret;
+ while (!**ptr && *len) {
+ (*ptr)++;
+ (*len)--;
+ }
+}
+
+static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
+{
+ struct qat_crypto_instance *inst = ctx->inst;
+ struct device *dev = &GET_DEV(inst->accel_dev);
+ const char *ptr;
+ unsigned int len;
+ unsigned int half_key_sz = ctx->key_sz / 2;
+
+ /* p */
+ ptr = rsa_key->p;
+ len = rsa_key->p_sz;
+ qat_rsa_drop_leading_zeros(&ptr, &len);
+ if (!len)
+ goto err;
+ ctx->p = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL);
+ if (!ctx->p)
+ goto err;
+ memcpy(ctx->p + (half_key_sz - len), ptr, len);
+
+ /* q */
+ ptr = rsa_key->q;
+ len = rsa_key->q_sz;
+ qat_rsa_drop_leading_zeros(&ptr, &len);
+ if (!len)
+ goto free_p;
+ ctx->q = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL);
+ if (!ctx->q)
+ goto free_p;
+ memcpy(ctx->q + (half_key_sz - len), ptr, len);
+
+ /* dp */
+ ptr = rsa_key->dp;
+ len = rsa_key->dp_sz;
+ qat_rsa_drop_leading_zeros(&ptr, &len);
+ if (!len)
+ goto free_q;
+ ctx->dp = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dp,
+ GFP_KERNEL);
+ if (!ctx->dp)
+ goto free_q;
+ memcpy(ctx->dp + (half_key_sz - len), ptr, len);
+
+ /* dq */
+ ptr = rsa_key->dq;
+ len = rsa_key->dq_sz;
+ qat_rsa_drop_leading_zeros(&ptr, &len);
+ if (!len)
+ goto free_dp;
+ ctx->dq = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dq,
+ GFP_KERNEL);
+ if (!ctx->dq)
+ goto free_dp;
+ memcpy(ctx->dq + (half_key_sz - len), ptr, len);
+
+ /* qinv */
+ ptr = rsa_key->qinv;
+ len = rsa_key->qinv_sz;
+ qat_rsa_drop_leading_zeros(&ptr, &len);
+ if (!len)
+ goto free_dq;
+ ctx->qinv = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_qinv,
+ GFP_KERNEL);
+ if (!ctx->qinv)
+ goto free_dq;
+ memcpy(ctx->qinv + (half_key_sz - len), ptr, len);
+
+ ctx->crt_mode = true;
+ return;
+
+free_dq:
+ memset(ctx->dq, '\0', half_key_sz);
+ dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq);
+ ctx->dq = NULL;
+free_dp:
+ memset(ctx->dp, '\0', half_key_sz);
+ dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp);
+ ctx->dp = NULL;
+free_q:
+ memset(ctx->q, '\0', half_key_sz);
+ dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
+ ctx->q = NULL;
+free_p:
+ memset(ctx->p, '\0', half_key_sz);
+ dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
+ ctx->p = NULL;
+err:
+ ctx->crt_mode = false;
+}
+
+static void qat_rsa_clear_ctx(struct device *dev, struct qat_rsa_ctx *ctx)
+{
+ unsigned int half_key_sz = ctx->key_sz / 2;
/* Free the old key if any */
if (ctx->n)
@@ -619,19 +1164,68 @@ static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
memset(ctx->d, '\0', ctx->key_sz);
dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
}
+ if (ctx->p) {
+ memset(ctx->p, '\0', half_key_sz);
+ dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
+ }
+ if (ctx->q) {
+ memset(ctx->q, '\0', half_key_sz);
+ dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
+ }
+ if (ctx->dp) {
+ memset(ctx->dp, '\0', half_key_sz);
+ dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp);
+ }
+ if (ctx->dq) {
+ memset(ctx->dq, '\0', half_key_sz);
+ dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq);
+ }
+ if (ctx->qinv) {
+ memset(ctx->qinv, '\0', half_key_sz);
+ dma_free_coherent(dev, half_key_sz, ctx->qinv, ctx->dma_qinv);
+ }
ctx->n = NULL;
ctx->e = NULL;
ctx->d = NULL;
+ ctx->p = NULL;
+ ctx->q = NULL;
+ ctx->dp = NULL;
+ ctx->dq = NULL;
+ ctx->qinv = NULL;
+ ctx->crt_mode = false;
+ ctx->key_sz = 0;
+}
+
+static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen, bool private)
+{
+ struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+ struct rsa_key rsa_key;
+ int ret;
+
+ qat_rsa_clear_ctx(dev, ctx);
if (private)
- ret = asn1_ber_decoder(&qat_rsaprivkey_decoder, ctx, key,
- keylen);
+ ret = rsa_parse_priv_key(&rsa_key, key, keylen);
else
- ret = asn1_ber_decoder(&qat_rsapubkey_decoder, ctx, key,
- keylen);
+ ret = rsa_parse_pub_key(&rsa_key, key, keylen);
+ if (ret < 0)
+ goto free;
+
+ ret = qat_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz);
if (ret < 0)
goto free;
+ ret = qat_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);
+ if (ret < 0)
+ goto free;
+ if (private) {
+ ret = qat_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz);
+ if (ret < 0)
+ goto free;
+ qat_rsa_setkey_crt(ctx, &rsa_key);
+ }
if (!ctx->n || !ctx->e) {
/* invalid key provided */
@@ -646,20 +1240,7 @@ static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
return 0;
free:
- if (ctx->d) {
- memset(ctx->d, '\0', ctx->key_sz);
- dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
- ctx->d = NULL;
- }
- if (ctx->e) {
- dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
- ctx->e = NULL;
- }
- if (ctx->n) {
- dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
- ctx->n = NULL;
- ctx->key_sz = 0;
- }
+ qat_rsa_clear_ctx(dev, ctx);
return ret;
}
@@ -725,7 +1306,7 @@ static struct akcipher_alg rsa = {
.max_size = qat_rsa_max_size,
.init = qat_rsa_init_tfm,
.exit = qat_rsa_exit_tfm,
- .reqsize = sizeof(struct qat_rsa_request) + 64,
+ .reqsize = sizeof(struct qat_asym_request) + 64,
.base = {
.cra_name = "rsa",
.cra_driver_name = "qat-rsa",
@@ -735,6 +1316,23 @@ static struct akcipher_alg rsa = {
},
};
+static struct kpp_alg dh = {
+ .set_secret = qat_dh_set_secret,
+ .generate_public_key = qat_dh_compute_value,
+ .compute_shared_secret = qat_dh_compute_value,
+ .max_size = qat_dh_max_size,
+ .init = qat_dh_init_tfm,
+ .exit = qat_dh_exit_tfm,
+ .reqsize = sizeof(struct qat_asym_request) + 64,
+ .base = {
+ .cra_name = "dh",
+ .cra_driver_name = "qat-dh",
+ .cra_priority = 1000,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct qat_dh_ctx),
+ },
+};
+
int qat_asym_algs_register(void)
{
int ret = 0;
@@ -743,7 +1341,11 @@ int qat_asym_algs_register(void)
if (++active_devs == 1) {
rsa.base.cra_flags = 0;
ret = crypto_register_akcipher(&rsa);
+ if (ret)
+ goto unlock;
+ ret = crypto_register_kpp(&dh);
}
+unlock:
mutex_unlock(&algs_lock);
return ret;
}
@@ -751,7 +1353,9 @@ int qat_asym_algs_register(void)
void qat_asym_algs_unregister(void)
{
mutex_lock(&algs_lock);
- if (--active_devs == 0)
+ if (--active_devs == 0) {
crypto_unregister_akcipher(&rsa);
+ crypto_unregister_kpp(&dh);
+ }
mutex_unlock(&algs_lock);
}
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
index 6e1d5e185..1dfcab317 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -252,6 +252,7 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
hw_data->get_arb_mapping = adf_get_arbiter_mapping;
hw_data->enable_ints = adf_enable_ints;
hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
+ hw_data->reset_device = adf_reset_sbr;
hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
}
diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c
index dbcbbe242..b04b42f48 100644
--- a/drivers/crypto/qce/ablkcipher.c
+++ b/drivers/crypto/qce/ablkcipher.c
@@ -15,8 +15,8 @@
#include <linux/interrupt.h>
#include <linux/types.h>
#include <crypto/aes.h>
-#include <crypto/algapi.h>
#include <crypto/des.h>
+#include <crypto/internal/skcipher.h>
#include "cipher.h"
@@ -189,7 +189,7 @@ static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
memcpy(ctx->enc_key, key, keylen);
return 0;
fallback:
- ret = crypto_ablkcipher_setkey(ctx->fallback, key, keylen);
+ ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
if (!ret)
ctx->enc_keylen = keylen;
return ret;
@@ -212,10 +212,16 @@ static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 &&
ctx->enc_keylen != AES_KEYSIZE_256) {
- ablkcipher_request_set_tfm(req, ctx->fallback);
- ret = encrypt ? crypto_ablkcipher_encrypt(req) :
- crypto_ablkcipher_decrypt(req);
- ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
+ SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+
+ skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_callback(subreq, req->base.flags,
+ NULL, NULL);
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ req->nbytes, req->info);
+ ret = encrypt ? crypto_skcipher_encrypt(subreq) :
+ crypto_skcipher_decrypt(subreq);
+ skcipher_request_zero(subreq);
return ret;
}
@@ -239,10 +245,9 @@ static int qce_ablkcipher_init(struct crypto_tfm *tfm)
memset(ctx, 0, sizeof(*ctx));
tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx);
- ctx->fallback = crypto_alloc_ablkcipher(crypto_tfm_alg_name(tfm),
- CRYPTO_ALG_TYPE_ABLKCIPHER,
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK);
+ ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(tfm), 0,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->fallback))
return PTR_ERR(ctx->fallback);
@@ -253,7 +258,7 @@ static void qce_ablkcipher_exit(struct crypto_tfm *tfm)
{
struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- crypto_free_ablkcipher(ctx->fallback);
+ crypto_free_skcipher(ctx->fallback);
}
struct qce_ablkcipher_def {
diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h
index 5c6a5f863..2b0278bb6 100644
--- a/drivers/crypto/qce/cipher.h
+++ b/drivers/crypto/qce/cipher.h
@@ -22,7 +22,7 @@
struct qce_cipher_ctx {
u8 enc_key[QCE_MAX_KEY_SIZE];
unsigned int enc_keylen;
- struct crypto_ablkcipher *fallback;
+ struct crypto_skcipher *fallback;
};
/**
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 2b3a0cfe3..dce1af0ce 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -155,43 +155,43 @@
* expansion of its usage.
*/
struct samsung_aes_variant {
- unsigned int aes_offset;
+ unsigned int aes_offset;
};
struct s5p_aes_reqctx {
- unsigned long mode;
+ unsigned long mode;
};
struct s5p_aes_ctx {
- struct s5p_aes_dev *dev;
+ struct s5p_aes_dev *dev;
- uint8_t aes_key[AES_MAX_KEY_SIZE];
- uint8_t nonce[CTR_RFC3686_NONCE_SIZE];
- int keylen;
+ uint8_t aes_key[AES_MAX_KEY_SIZE];
+ uint8_t nonce[CTR_RFC3686_NONCE_SIZE];
+ int keylen;
};
struct s5p_aes_dev {
- struct device *dev;
- struct clk *clk;
- void __iomem *ioaddr;
- void __iomem *aes_ioaddr;
- int irq_fc;
+ struct device *dev;
+ struct clk *clk;
+ void __iomem *ioaddr;
+ void __iomem *aes_ioaddr;
+ int irq_fc;
- struct ablkcipher_request *req;
- struct s5p_aes_ctx *ctx;
- struct scatterlist *sg_src;
- struct scatterlist *sg_dst;
+ struct ablkcipher_request *req;
+ struct s5p_aes_ctx *ctx;
+ struct scatterlist *sg_src;
+ struct scatterlist *sg_dst;
/* In case of unaligned access: */
- struct scatterlist *sg_src_cpy;
- struct scatterlist *sg_dst_cpy;
+ struct scatterlist *sg_src_cpy;
+ struct scatterlist *sg_dst_cpy;
- struct tasklet_struct tasklet;
- struct crypto_queue queue;
- bool busy;
- spinlock_t lock;
+ struct tasklet_struct tasklet;
+ struct crypto_queue queue;
+ bool busy;
+ spinlock_t lock;
- struct samsung_aes_variant *variant;
+ struct samsung_aes_variant *variant;
};
static struct s5p_aes_dev *s5p_dev;
@@ -421,11 +421,11 @@ static bool s5p_aes_rx(struct s5p_aes_dev *dev)
static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
{
struct platform_device *pdev = dev_id;
- struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
- uint32_t status;
- unsigned long flags;
- bool set_dma_tx = false;
- bool set_dma_rx = false;
+ struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
+ bool set_dma_tx = false;
+ bool set_dma_rx = false;
+ unsigned long flags;
+ uint32_t status;
spin_lock_irqsave(&dev->lock, flags);
@@ -538,10 +538,10 @@ static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
{
- struct ablkcipher_request *req = dev->req;
- uint32_t aes_control;
- int err;
- unsigned long flags;
+ struct ablkcipher_request *req = dev->req;
+ uint32_t aes_control;
+ unsigned long flags;
+ int err;
aes_control = SSS_AES_KEY_CHANGE_MODE;
if (mode & FLAGS_AES_DECRYPT)
@@ -653,10 +653,10 @@ exit:
static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- struct s5p_aes_reqctx *reqctx = ablkcipher_request_ctx(req);
- struct s5p_aes_dev *dev = ctx->dev;
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ struct s5p_aes_reqctx *reqctx = ablkcipher_request_ctx(req);
+ struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct s5p_aes_dev *dev = ctx->dev;
if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
dev_err(dev->dev, "request size is not exact amount of AES blocks\n");
@@ -671,7 +671,7 @@ static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
static int s5p_aes_setkey(struct crypto_ablkcipher *cipher,
const uint8_t *key, unsigned int keylen)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
if (keylen != AES_KEYSIZE_128 &&
@@ -763,11 +763,11 @@ static struct crypto_alg algs[] = {
static int s5p_aes_probe(struct platform_device *pdev)
{
- int i, j, err = -ENODEV;
- struct s5p_aes_dev *pdata;
- struct device *dev = &pdev->dev;
- struct resource *res;
+ struct device *dev = &pdev->dev;
+ int i, j, err = -ENODEV;
struct samsung_aes_variant *variant;
+ struct s5p_aes_dev *pdata;
+ struct resource *res;
if (s5p_dev)
return -EEXIST;
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index c3f3d89e4..0c49956ee 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -14,10 +14,9 @@
* Based on omap-aes.c and tegra-aes.c
*/
-#include <crypto/algapi.h>
#include <crypto/aes.h>
-#include <crypto/hash.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <crypto/sha.h>
@@ -150,10 +149,7 @@ struct sahara_ctx {
/* AES-specific context */
int keylen;
u8 key[AES_KEYSIZE_128];
- struct crypto_ablkcipher *fallback;
-
- /* SHA-specific context */
- struct crypto_shash *shash_fallback;
+ struct crypto_skcipher *fallback;
};
struct sahara_aes_reqctx {
@@ -620,25 +616,21 @@ static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return 0;
}
- if (keylen != AES_KEYSIZE_128 &&
- keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
+ if (keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
return -EINVAL;
/*
* The requested key size is not supported by HW, do a fallback.
*/
- ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
- ctx->fallback->base.crt_flags |=
- (tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
+ CRYPTO_TFM_REQ_MASK);
- ret = crypto_ablkcipher_setkey(ctx->fallback, key, keylen);
- if (ret) {
- struct crypto_tfm *tfm_aux = crypto_ablkcipher_tfm(tfm);
+ ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
- tfm_aux->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm_aux->crt_flags |=
- (ctx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK);
- }
+ tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
+ tfm->base.crt_flags |= crypto_skcipher_get_flags(ctx->fallback) &
+ CRYPTO_TFM_RES_MASK;
return ret;
}
@@ -670,16 +662,20 @@ static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
{
- struct crypto_tfm *tfm =
- crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
crypto_ablkcipher_reqtfm(req));
int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- ablkcipher_request_set_tfm(req, ctx->fallback);
- err = crypto_ablkcipher_encrypt(req);
- ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
+ SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+
+ skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_callback(subreq, req->base.flags,
+ NULL, NULL);
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ req->nbytes, req->info);
+ err = crypto_skcipher_encrypt(subreq);
+ skcipher_request_zero(subreq);
return err;
}
@@ -688,16 +684,20 @@ static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
{
- struct crypto_tfm *tfm =
- crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
crypto_ablkcipher_reqtfm(req));
int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- ablkcipher_request_set_tfm(req, ctx->fallback);
- err = crypto_ablkcipher_decrypt(req);
- ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
+ SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+
+ skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_callback(subreq, req->base.flags,
+ NULL, NULL);
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ req->nbytes, req->info);
+ err = crypto_skcipher_decrypt(subreq);
+ skcipher_request_zero(subreq);
return err;
}
@@ -706,16 +706,20 @@ static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
{
- struct crypto_tfm *tfm =
- crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
crypto_ablkcipher_reqtfm(req));
int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- ablkcipher_request_set_tfm(req, ctx->fallback);
- err = crypto_ablkcipher_encrypt(req);
- ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
+ SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+
+ skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_callback(subreq, req->base.flags,
+ NULL, NULL);
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ req->nbytes, req->info);
+ err = crypto_skcipher_encrypt(subreq);
+ skcipher_request_zero(subreq);
return err;
}
@@ -724,16 +728,20 @@ static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
{
- struct crypto_tfm *tfm =
- crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
crypto_ablkcipher_reqtfm(req));
int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- ablkcipher_request_set_tfm(req, ctx->fallback);
- err = crypto_ablkcipher_decrypt(req);
- ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
+ SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+
+ skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_callback(subreq, req->base.flags,
+ NULL, NULL);
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ req->nbytes, req->info);
+ err = crypto_skcipher_decrypt(subreq);
+ skcipher_request_zero(subreq);
return err;
}
@@ -745,8 +753,9 @@ static int sahara_aes_cra_init(struct crypto_tfm *tfm)
const char *name = crypto_tfm_alg_name(tfm);
struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
- ctx->fallback = crypto_alloc_ablkcipher(name, 0,
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ ctx->fallback = crypto_alloc_skcipher(name, 0,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->fallback)) {
pr_err("Error allocating fallback algo %s\n", name);
return PTR_ERR(ctx->fallback);
@@ -761,9 +770,7 @@ static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
{
struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
- if (ctx->fallback)
- crypto_free_ablkcipher(ctx->fallback);
- ctx->fallback = NULL;
+ crypto_free_skcipher(ctx->fallback);
}
static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
@@ -1180,15 +1187,6 @@ static int sahara_sha_import(struct ahash_request *req, const void *in)
static int sahara_sha_cra_init(struct crypto_tfm *tfm)
{
- const char *name = crypto_tfm_alg_name(tfm);
- struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
-
- ctx->shash_fallback = crypto_alloc_shash(name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(ctx->shash_fallback)) {
- pr_err("Error allocating fallback algo %s\n", name);
- return PTR_ERR(ctx->shash_fallback);
- }
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct sahara_sha_reqctx) +
SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
@@ -1196,14 +1194,6 @@ static int sahara_sha_cra_init(struct crypto_tfm *tfm)
return 0;
}
-static void sahara_sha_cra_exit(struct crypto_tfm *tfm)
-{
- struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
-
- crypto_free_shash(ctx->shash_fallback);
- ctx->shash_fallback = NULL;
-}
-
static struct crypto_alg aes_algs[] = {
{
.cra_name = "ecb(aes)",
@@ -1272,7 +1262,6 @@ static struct ahash_alg sha_v3_algs[] = {
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = sahara_sha_cra_init,
- .cra_exit = sahara_sha_cra_exit,
}
},
};
@@ -1300,7 +1289,6 @@ static struct ahash_alg sha_v4_algs[] = {
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = sahara_sha_cra_init,
- .cra_exit = sahara_sha_cra_exit,
}
},
};
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index b7ee8d301..0418a2f41 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -91,10 +91,17 @@ static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
return be16_to_cpu(ptr->len);
}
-static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr, bool is_sec1)
+static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
+ bool is_sec1)
{
if (!is_sec1)
- ptr->j_extent = 0;
+ ptr->j_extent = val;
+}
+
+static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
+{
+ if (!is_sec1)
+ ptr->j_extent |= val;
}
/*
@@ -111,7 +118,7 @@ static void map_single_talitos_ptr(struct device *dev,
to_talitos_ptr_len(ptr, len, is_sec1);
to_talitos_ptr(ptr, dma_addr, is_sec1);
- to_talitos_ptr_extent_clear(ptr, is_sec1);
+ to_talitos_ptr_ext_set(ptr, 0, is_sec1);
}
/*
@@ -804,6 +811,11 @@ static void talitos_unregister_rng(struct device *dev)
* crypto alg
*/
#define TALITOS_CRA_PRIORITY 3000
+/*
+ * Defines a priority for doing AEAD with descriptors type
+ * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
+ */
+#define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1)
#define TALITOS_MAX_KEY_SIZE 96
#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
@@ -904,35 +916,59 @@ struct talitos_edesc {
static void talitos_sg_unmap(struct device *dev,
struct talitos_edesc *edesc,
struct scatterlist *src,
- struct scatterlist *dst)
+ struct scatterlist *dst,
+ unsigned int len, unsigned int offset)
{
+ struct talitos_private *priv = dev_get_drvdata(dev);
+ bool is_sec1 = has_ftr_sec1(priv);
unsigned int src_nents = edesc->src_nents ? : 1;
unsigned int dst_nents = edesc->dst_nents ? : 1;
+ if (is_sec1 && dst && dst_nents > 1) {
+ dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
+ len, DMA_FROM_DEVICE);
+ sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
+ offset);
+ }
if (src != dst) {
- dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
+ if (src_nents == 1 || !is_sec1)
+ dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
- if (dst) {
+ if (dst && (dst_nents == 1 || !is_sec1))
dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
- }
- } else
+ } else if (src_nents == 1 || !is_sec1) {
dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
+ }
}
static void ipsec_esp_unmap(struct device *dev,
struct talitos_edesc *edesc,
struct aead_request *areq)
{
- unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
+ struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_aead_ctx(aead);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+
+ if (edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP)
+ unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
+ DMA_FROM_DEVICE);
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
- talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
+ talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen,
+ areq->assoclen);
if (edesc->dma_len)
dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
DMA_BIDIRECTIONAL);
+
+ if (!(edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP)) {
+ unsigned int dst_nents = edesc->dst_nents ? : 1;
+
+ sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
+ areq->assoclen + areq->cryptlen - ivsize);
+ }
}
/*
@@ -942,6 +978,8 @@ static void ipsec_esp_encrypt_done(struct device *dev,
struct talitos_desc *desc, void *context,
int err)
{
+ struct talitos_private *priv = dev_get_drvdata(dev);
+ bool is_sec1 = has_ftr_sec1(priv);
struct aead_request *areq = context;
struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
unsigned int authsize = crypto_aead_authsize(authenc);
@@ -955,8 +993,11 @@ static void ipsec_esp_encrypt_done(struct device *dev,
/* copy the generated ICV to dst */
if (edesc->icv_ool) {
- icvdata = &edesc->link_tbl[edesc->src_nents +
- edesc->dst_nents + 2];
+ if (is_sec1)
+ icvdata = edesc->buf + areq->assoclen + areq->cryptlen;
+ else
+ icvdata = &edesc->link_tbl[edesc->src_nents +
+ edesc->dst_nents + 2];
sg = sg_last(areq->dst, edesc->dst_nents);
memcpy((char *)sg_virt(sg) + sg->length - authsize,
icvdata, authsize);
@@ -977,6 +1018,8 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
struct talitos_edesc *edesc;
struct scatterlist *sg;
char *oicv, *icv;
+ struct talitos_private *priv = dev_get_drvdata(dev);
+ bool is_sec1 = has_ftr_sec1(priv);
edesc = container_of(desc, struct talitos_edesc, desc);
@@ -988,7 +1031,12 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
icv = (char *)sg_virt(sg) + sg->length - authsize;
if (edesc->dma_len) {
- oicv = (char *)&edesc->link_tbl[edesc->src_nents +
+ if (is_sec1)
+ oicv = (char *)&edesc->dma_link_tbl +
+ req->assoclen + req->cryptlen;
+ else
+ oicv = (char *)
+ &edesc->link_tbl[edesc->src_nents +
edesc->dst_nents + 2];
if (edesc->icv_ool)
icv = oicv + authsize;
@@ -1050,8 +1098,8 @@ static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
to_talitos_ptr(link_tbl_ptr + count,
sg_dma_address(sg) + offset, 0);
- link_tbl_ptr[count].len = cpu_to_be16(len);
- link_tbl_ptr[count].j_extent = 0;
+ to_talitos_ptr_len(link_tbl_ptr + count, len, 0);
+ to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
count++;
cryptlen -= len;
offset = 0;
@@ -1062,17 +1110,43 @@ next:
/* tag end of link table */
if (count > 0)
- link_tbl_ptr[count - 1].j_extent = DESC_PTR_LNKTBL_RETURN;
+ to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
+ DESC_PTR_LNKTBL_RETURN, 0);
return count;
}
-static inline int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
- int cryptlen,
- struct talitos_ptr *link_tbl_ptr)
+int talitos_sg_map(struct device *dev, struct scatterlist *src,
+ unsigned int len, struct talitos_edesc *edesc,
+ struct talitos_ptr *ptr,
+ int sg_count, unsigned int offset, int tbl_off)
{
- return sg_to_link_tbl_offset(sg, sg_count, 0, cryptlen,
- link_tbl_ptr);
+ struct talitos_private *priv = dev_get_drvdata(dev);
+ bool is_sec1 = has_ftr_sec1(priv);
+
+ to_talitos_ptr_len(ptr, len, is_sec1);
+ to_talitos_ptr_ext_set(ptr, 0, is_sec1);
+
+ if (sg_count == 1) {
+ to_talitos_ptr(ptr, sg_dma_address(src) + offset, is_sec1);
+ return sg_count;
+ }
+ if (is_sec1) {
+ to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, is_sec1);
+ return sg_count;
+ }
+ sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len,
+ &edesc->link_tbl[tbl_off]);
+ if (sg_count == 1) {
+ /* Only one segment now, so no link tbl needed*/
+ copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
+ return sg_count;
+ }
+ to_talitos_ptr(ptr, edesc->dma_link_tbl +
+ tbl_off * sizeof(struct talitos_ptr), is_sec1);
+ to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
+
+ return sg_count;
}
/*
@@ -1093,42 +1167,52 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
int tbl_off = 0;
int sg_count, ret;
int sg_link_tbl_len;
+ bool sync_needed = false;
+ struct talitos_private *priv = dev_get_drvdata(dev);
+ bool is_sec1 = has_ftr_sec1(priv);
/* hmac key */
map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
DMA_TO_DEVICE);
- sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1,
- (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
- : DMA_TO_DEVICE);
- /* hmac data */
- desc->ptr[1].len = cpu_to_be16(areq->assoclen);
- if (sg_count > 1 &&
- (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
- areq->assoclen,
- &edesc->link_tbl[tbl_off])) > 1) {
- to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
- sizeof(struct talitos_ptr), 0);
- desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
+ sg_count = edesc->src_nents ?: 1;
+ if (is_sec1 && sg_count > 1)
+ sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
+ areq->assoclen + cryptlen);
+ else
+ sg_count = dma_map_sg(dev, areq->src, sg_count,
+ (areq->src == areq->dst) ?
+ DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
- dma_sync_single_for_device(dev, edesc->dma_link_tbl,
- edesc->dma_len, DMA_BIDIRECTIONAL);
+ /* hmac data */
+ ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
+ &desc->ptr[1], sg_count, 0, tbl_off);
+ if (ret > 1) {
tbl_off += ret;
- } else {
- to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
- desc->ptr[1].j_extent = 0;
+ sync_needed = true;
}
/* cipher iv */
- to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, 0);
- desc->ptr[2].len = cpu_to_be16(ivsize);
- desc->ptr[2].j_extent = 0;
+ if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) {
+ to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, is_sec1);
+ to_talitos_ptr_len(&desc->ptr[2], ivsize, is_sec1);
+ to_talitos_ptr_ext_set(&desc->ptr[2], 0, is_sec1);
+ } else {
+ to_talitos_ptr(&desc->ptr[3], edesc->iv_dma, is_sec1);
+ to_talitos_ptr_len(&desc->ptr[3], ivsize, is_sec1);
+ to_talitos_ptr_ext_set(&desc->ptr[3], 0, is_sec1);
+ }
/* cipher key */
- map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
- (char *)&ctx->key + ctx->authkeylen,
- DMA_TO_DEVICE);
+ if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
+ map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
+ (char *)&ctx->key + ctx->authkeylen,
+ DMA_TO_DEVICE);
+ else
+ map_single_talitos_ptr(dev, &desc->ptr[2], ctx->enckeylen,
+ (char *)&ctx->key + ctx->authkeylen,
+ DMA_TO_DEVICE);
/*
* cipher in
@@ -1136,78 +1220,82 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
* extent is bytes of HMAC postpended to ciphertext,
* typically 12 for ipsec
*/
- desc->ptr[4].len = cpu_to_be16(cryptlen);
- desc->ptr[4].j_extent = authsize;
+ to_talitos_ptr_len(&desc->ptr[4], cryptlen, is_sec1);
+ to_talitos_ptr_ext_set(&desc->ptr[4], 0, is_sec1);
sg_link_tbl_len = cryptlen;
- if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
- sg_link_tbl_len += authsize;
- if (sg_count == 1) {
- to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src) +
- areq->assoclen, 0);
- } else if ((ret = sg_to_link_tbl_offset(areq->src, sg_count,
- areq->assoclen, sg_link_tbl_len,
- &edesc->link_tbl[tbl_off])) >
- 1) {
- desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
- to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
- tbl_off *
- sizeof(struct talitos_ptr), 0);
- dma_sync_single_for_device(dev, edesc->dma_link_tbl,
- edesc->dma_len,
- DMA_BIDIRECTIONAL);
- tbl_off += ret;
- } else {
- copy_talitos_ptr(&desc->ptr[4], &edesc->link_tbl[tbl_off], 0);
+ if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) {
+ to_talitos_ptr_ext_set(&desc->ptr[4], authsize, is_sec1);
+
+ if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
+ sg_link_tbl_len += authsize;
}
- /* cipher out */
- desc->ptr[5].len = cpu_to_be16(cryptlen);
- desc->ptr[5].j_extent = authsize;
+ sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
+ &desc->ptr[4], sg_count, areq->assoclen,
+ tbl_off);
- if (areq->src != areq->dst)
- sg_count = dma_map_sg(dev, areq->dst, edesc->dst_nents ? : 1,
- DMA_FROM_DEVICE);
+ if (sg_count > 1) {
+ tbl_off += sg_count;
+ sync_needed = true;
+ }
- edesc->icv_ool = false;
+ /* cipher out */
+ if (areq->src != areq->dst) {
+ sg_count = edesc->dst_nents ? : 1;
+ if (!is_sec1 || sg_count == 1)
+ dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
+ }
- if (sg_count == 1) {
- to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst) +
- areq->assoclen, 0);
- } else if ((sg_count =
- sg_to_link_tbl_offset(areq->dst, sg_count,
- areq->assoclen, cryptlen,
- &edesc->link_tbl[tbl_off])) > 1) {
- struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
-
- to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
- tbl_off * sizeof(struct talitos_ptr), 0);
-
- /* Add an entry to the link table for ICV data */
- tbl_ptr += sg_count - 1;
- tbl_ptr->j_extent = 0;
- tbl_ptr++;
- tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
- tbl_ptr->len = cpu_to_be16(authsize);
-
- /* icv data follows link tables */
- to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl +
- (edesc->src_nents + edesc->dst_nents +
- 2) * sizeof(struct talitos_ptr) +
- authsize, 0);
- desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
- dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
- edesc->dma_len, DMA_BIDIRECTIONAL);
+ sg_count = talitos_sg_map(dev, areq->dst, cryptlen, edesc,
+ &desc->ptr[5], sg_count, areq->assoclen,
+ tbl_off);
+ if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
+ to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
+
+ if (sg_count > 1) {
edesc->icv_ool = true;
+ sync_needed = true;
+
+ if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) {
+ struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
+ int offset = (edesc->src_nents + edesc->dst_nents + 2) *
+ sizeof(struct talitos_ptr) + authsize;
+
+ /* Add an entry to the link table for ICV data */
+ tbl_ptr += sg_count - 1;
+ to_talitos_ptr_ext_set(tbl_ptr, 0, is_sec1);
+ tbl_ptr++;
+ to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN,
+ is_sec1);
+ to_talitos_ptr_len(tbl_ptr, authsize, is_sec1);
+
+ /* icv data follows link tables */
+ to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset,
+ is_sec1);
+ }
} else {
- copy_talitos_ptr(&desc->ptr[5], &edesc->link_tbl[tbl_off], 0);
+ edesc->icv_ool = false;
+ }
+
+ /* ICV data */
+ if (!(desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)) {
+ to_talitos_ptr_len(&desc->ptr[6], authsize, is_sec1);
+ to_talitos_ptr(&desc->ptr[6], edesc->dma_link_tbl +
+ areq->assoclen + cryptlen, is_sec1);
}
/* iv out */
- map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
- DMA_FROM_DEVICE);
+ if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
+ map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
+ DMA_FROM_DEVICE);
+
+ if (sync_needed)
+ dma_sync_single_for_device(dev, edesc->dma_link_tbl,
+ edesc->dma_len,
+ DMA_BIDIRECTIONAL);
ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
if (ret != -EINPROGRESS) {
@@ -1233,7 +1321,7 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
bool encrypt)
{
struct talitos_edesc *edesc;
- int src_nents, dst_nents, alloc_len, dma_len;
+ int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
dma_addr_t iv_dma = 0;
gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
@@ -1251,8 +1339,8 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
if (!dst || dst == src) {
- src_nents = sg_nents_for_len(src,
- assoclen + cryptlen + authsize);
+ src_len = assoclen + cryptlen + authsize;
+ src_nents = sg_nents_for_len(src, src_len);
if (src_nents < 0) {
dev_err(dev, "Invalid number of src SG.\n");
err = ERR_PTR(-EINVAL);
@@ -1260,17 +1348,18 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
}
src_nents = (src_nents == 1) ? 0 : src_nents;
dst_nents = dst ? src_nents : 0;
+ dst_len = 0;
} else { /* dst && dst != src*/
- src_nents = sg_nents_for_len(src, assoclen + cryptlen +
- (encrypt ? 0 : authsize));
+ src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
+ src_nents = sg_nents_for_len(src, src_len);
if (src_nents < 0) {
dev_err(dev, "Invalid number of src SG.\n");
err = ERR_PTR(-EINVAL);
goto error_sg;
}
src_nents = (src_nents == 1) ? 0 : src_nents;
- dst_nents = sg_nents_for_len(dst, assoclen + cryptlen +
- (encrypt ? authsize : 0));
+ dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
+ dst_nents = sg_nents_for_len(dst, dst_len);
if (dst_nents < 0) {
dev_err(dev, "Invalid number of dst SG.\n");
err = ERR_PTR(-EINVAL);
@@ -1287,8 +1376,8 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
alloc_len = sizeof(struct talitos_edesc);
if (src_nents || dst_nents) {
if (is_sec1)
- dma_len = (src_nents ? cryptlen : 0) +
- (dst_nents ? cryptlen : 0);
+ dma_len = (src_nents ? src_len : 0) +
+ (dst_nents ? dst_len : 0);
else
dma_len = (src_nents + dst_nents + 2) *
sizeof(struct talitos_ptr) + authsize * 2;
@@ -1412,40 +1501,13 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
return 0;
}
-static void unmap_sg_talitos_ptr(struct device *dev, struct scatterlist *src,
- struct scatterlist *dst, unsigned int len,
- struct talitos_edesc *edesc)
-{
- struct talitos_private *priv = dev_get_drvdata(dev);
- bool is_sec1 = has_ftr_sec1(priv);
-
- if (is_sec1) {
- if (!edesc->src_nents) {
- dma_unmap_sg(dev, src, 1,
- dst != src ? DMA_TO_DEVICE
- : DMA_BIDIRECTIONAL);
- }
- if (dst && edesc->dst_nents) {
- dma_sync_single_for_device(dev,
- edesc->dma_link_tbl + len,
- len, DMA_FROM_DEVICE);
- sg_copy_from_buffer(dst, edesc->dst_nents ? : 1,
- edesc->buf + len, len);
- } else if (dst && dst != src) {
- dma_unmap_sg(dev, dst, 1, DMA_FROM_DEVICE);
- }
- } else {
- talitos_sg_unmap(dev, edesc, src, dst);
- }
-}
-
static void common_nonsnoop_unmap(struct device *dev,
struct talitos_edesc *edesc,
struct ablkcipher_request *areq)
{
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
- unmap_sg_talitos_ptr(dev, areq->src, areq->dst, areq->nbytes, edesc);
+ talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0);
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
@@ -1470,100 +1532,6 @@ static void ablkcipher_done(struct device *dev,
areq->base.complete(&areq->base, err);
}
-int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src,
- unsigned int len, struct talitos_edesc *edesc,
- enum dma_data_direction dir, struct talitos_ptr *ptr)
-{
- int sg_count;
- struct talitos_private *priv = dev_get_drvdata(dev);
- bool is_sec1 = has_ftr_sec1(priv);
-
- to_talitos_ptr_len(ptr, len, is_sec1);
-
- if (is_sec1) {
- sg_count = edesc->src_nents ? : 1;
-
- if (sg_count == 1) {
- dma_map_sg(dev, src, 1, dir);
- to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
- } else {
- sg_copy_to_buffer(src, sg_count, edesc->buf, len);
- to_talitos_ptr(ptr, edesc->dma_link_tbl, is_sec1);
- dma_sync_single_for_device(dev, edesc->dma_link_tbl,
- len, DMA_TO_DEVICE);
- }
- } else {
- to_talitos_ptr_extent_clear(ptr, is_sec1);
-
- sg_count = dma_map_sg(dev, src, edesc->src_nents ? : 1, dir);
-
- if (sg_count == 1) {
- to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
- } else {
- sg_count = sg_to_link_tbl(src, sg_count, len,
- &edesc->link_tbl[0]);
- if (sg_count > 1) {
- to_talitos_ptr(ptr, edesc->dma_link_tbl, 0);
- ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
- dma_sync_single_for_device(dev,
- edesc->dma_link_tbl,
- edesc->dma_len,
- DMA_BIDIRECTIONAL);
- } else {
- /* Only one segment now, so no link tbl needed*/
- to_talitos_ptr(ptr, sg_dma_address(src),
- is_sec1);
- }
- }
- }
- return sg_count;
-}
-
-void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst,
- unsigned int len, struct talitos_edesc *edesc,
- enum dma_data_direction dir,
- struct talitos_ptr *ptr, int sg_count)
-{
- struct talitos_private *priv = dev_get_drvdata(dev);
- bool is_sec1 = has_ftr_sec1(priv);
-
- if (dir != DMA_NONE)
- sg_count = dma_map_sg(dev, dst, edesc->dst_nents ? : 1, dir);
-
- to_talitos_ptr_len(ptr, len, is_sec1);
-
- if (is_sec1) {
- if (sg_count == 1) {
- if (dir != DMA_NONE)
- dma_map_sg(dev, dst, 1, dir);
- to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
- } else {
- to_talitos_ptr(ptr, edesc->dma_link_tbl + len, is_sec1);
- dma_sync_single_for_device(dev,
- edesc->dma_link_tbl + len,
- len, DMA_FROM_DEVICE);
- }
- } else {
- to_talitos_ptr_extent_clear(ptr, is_sec1);
-
- if (sg_count == 1) {
- to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
- } else {
- struct talitos_ptr *link_tbl_ptr =
- &edesc->link_tbl[edesc->src_nents + 1];
-
- to_talitos_ptr(ptr, edesc->dma_link_tbl +
- (edesc->src_nents + 1) *
- sizeof(struct talitos_ptr), 0);
- ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
- sg_to_link_tbl(dst, sg_count, len, link_tbl_ptr);
- dma_sync_single_for_device(dev, edesc->dma_link_tbl,
- edesc->dma_len,
- DMA_BIDIRECTIONAL);
- }
- }
-}
-
static int common_nonsnoop(struct talitos_edesc *edesc,
struct ablkcipher_request *areq,
void (*callback) (struct device *dev,
@@ -1577,6 +1545,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
unsigned int cryptlen = areq->nbytes;
unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
int sg_count, ret;
+ bool sync_needed = false;
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
@@ -1586,25 +1555,39 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
/* cipher iv */
to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1);
to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1);
- to_talitos_ptr_extent_clear(&desc->ptr[1], is_sec1);
+ to_talitos_ptr_ext_set(&desc->ptr[1], 0, is_sec1);
/* cipher key */
map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
(char *)&ctx->key, DMA_TO_DEVICE);
+ sg_count = edesc->src_nents ?: 1;
+ if (is_sec1 && sg_count > 1)
+ sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
+ cryptlen);
+ else
+ sg_count = dma_map_sg(dev, areq->src, sg_count,
+ (areq->src == areq->dst) ?
+ DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
/*
* cipher in
*/
- sg_count = map_sg_in_talitos_ptr(dev, areq->src, cryptlen, edesc,
- (areq->src == areq->dst) ?
- DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
- &desc->ptr[3]);
+ sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
+ &desc->ptr[3], sg_count, 0, 0);
+ if (sg_count > 1)
+ sync_needed = true;
/* cipher out */
- map_sg_out_talitos_ptr(dev, areq->dst, cryptlen, edesc,
- (areq->src == areq->dst) ? DMA_NONE
- : DMA_FROM_DEVICE,
- &desc->ptr[4], sg_count);
+ if (areq->src != areq->dst) {
+ sg_count = edesc->dst_nents ? : 1;
+ if (!is_sec1 || sg_count == 1)
+ dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
+ }
+
+ ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
+ sg_count, 0, (edesc->src_nents + 1));
+ if (ret > 1)
+ sync_needed = true;
/* iv out */
map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
@@ -1613,6 +1596,10 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
/* last DWORD empty */
desc->ptr[6] = zero_entry;
+ if (sync_needed)
+ dma_sync_single_for_device(dev, edesc->dma_link_tbl,
+ edesc->dma_len, DMA_BIDIRECTIONAL);
+
ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
if (ret != -EINPROGRESS) {
common_nonsnoop_unmap(dev, edesc, areq);
@@ -1676,7 +1663,7 @@ static void common_nonsnoop_hash_unmap(struct device *dev,
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
- unmap_sg_talitos_ptr(dev, req_ctx->psrc, NULL, 0, edesc);
+ talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
/* When using hashctx-in, must unmap it. */
if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
@@ -1747,8 +1734,10 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
struct device *dev = ctx->dev;
struct talitos_desc *desc = &edesc->desc;
int ret;
+ bool sync_needed = false;
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
+ int sg_count;
/* first DWORD empty */
desc->ptr[0] = zero_entry;
@@ -1773,11 +1762,19 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
else
desc->ptr[2] = zero_entry;
+ sg_count = edesc->src_nents ?: 1;
+ if (is_sec1 && sg_count > 1)
+ sg_copy_to_buffer(areq->src, sg_count, edesc->buf, length);
+ else
+ sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
+ DMA_TO_DEVICE);
/*
* data in
*/
- map_sg_in_talitos_ptr(dev, req_ctx->psrc, length, edesc,
- DMA_TO_DEVICE, &desc->ptr[3]);
+ sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
+ &desc->ptr[3], sg_count, 0, 0);
+ if (sg_count > 1)
+ sync_needed = true;
/* fifth DWORD empty */
desc->ptr[4] = zero_entry;
@@ -1798,6 +1795,10 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
+ if (sync_needed)
+ dma_sync_single_for_device(dev, edesc->dma_link_tbl,
+ edesc->dma_len, DMA_BIDIRECTIONAL);
+
ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
if (ret != -EINPROGRESS) {
common_nonsnoop_hash_unmap(dev, edesc, areq);
@@ -2124,6 +2125,7 @@ static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
struct talitos_alg_template {
u32 type;
+ u32 priority;
union {
struct crypto_alg crypto;
struct ahash_alg hash;
@@ -2155,6 +2157,27 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_SHA1_HMAC,
},
{ .type = CRYPTO_ALG_TYPE_AEAD,
+ .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "cbc-aes-talitos",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
+ DESC_HDR_SEL0_AESU |
+ DESC_HDR_MODE0_AESU_CBC |
+ DESC_HDR_SEL1_MDEUA |
+ DESC_HDR_MODE1_MDEU_INIT |
+ DESC_HDR_MODE1_MDEU_PAD |
+ DESC_HDR_MODE1_MDEU_SHA1_HMAC,
+ },
+ { .type = CRYPTO_ALG_TYPE_AEAD,
.alg.aead = {
.base = {
.cra_name = "authenc(hmac(sha1),"
@@ -2176,6 +2199,29 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_PAD |
DESC_HDR_MODE1_MDEU_SHA1_HMAC,
},
+ { .type = CRYPTO_ALG_TYPE_AEAD,
+ .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "cbc-3des-talitos",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
+ DESC_HDR_SEL0_DEU |
+ DESC_HDR_MODE0_DEU_CBC |
+ DESC_HDR_MODE0_DEU_3DES |
+ DESC_HDR_SEL1_MDEUA |
+ DESC_HDR_MODE1_MDEU_INIT |
+ DESC_HDR_MODE1_MDEU_PAD |
+ DESC_HDR_MODE1_MDEU_SHA1_HMAC,
+ },
{ .type = CRYPTO_ALG_TYPE_AEAD,
.alg.aead = {
.base = {
@@ -2196,6 +2242,27 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_PAD |
DESC_HDR_MODE1_MDEU_SHA224_HMAC,
},
+ { .type = CRYPTO_ALG_TYPE_AEAD,
+ .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "cbc-aes-talitos",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
+ DESC_HDR_SEL0_AESU |
+ DESC_HDR_MODE0_AESU_CBC |
+ DESC_HDR_SEL1_MDEUA |
+ DESC_HDR_MODE1_MDEU_INIT |
+ DESC_HDR_MODE1_MDEU_PAD |
+ DESC_HDR_MODE1_MDEU_SHA224_HMAC,
+ },
{ .type = CRYPTO_ALG_TYPE_AEAD,
.alg.aead = {
.base = {
@@ -2219,6 +2286,29 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_SHA224_HMAC,
},
{ .type = CRYPTO_ALG_TYPE_AEAD,
+ .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "cbc-3des-talitos",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
+ DESC_HDR_SEL0_DEU |
+ DESC_HDR_MODE0_DEU_CBC |
+ DESC_HDR_MODE0_DEU_3DES |
+ DESC_HDR_SEL1_MDEUA |
+ DESC_HDR_MODE1_MDEU_INIT |
+ DESC_HDR_MODE1_MDEU_PAD |
+ DESC_HDR_MODE1_MDEU_SHA224_HMAC,
+ },
+ { .type = CRYPTO_ALG_TYPE_AEAD,
.alg.aead = {
.base = {
.cra_name = "authenc(hmac(sha256),cbc(aes))",
@@ -2239,6 +2329,27 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_SHA256_HMAC,
},
{ .type = CRYPTO_ALG_TYPE_AEAD,
+ .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "cbc-aes-talitos",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
+ DESC_HDR_SEL0_AESU |
+ DESC_HDR_MODE0_AESU_CBC |
+ DESC_HDR_SEL1_MDEUA |
+ DESC_HDR_MODE1_MDEU_INIT |
+ DESC_HDR_MODE1_MDEU_PAD |
+ DESC_HDR_MODE1_MDEU_SHA256_HMAC,
+ },
+ { .type = CRYPTO_ALG_TYPE_AEAD,
.alg.aead = {
.base = {
.cra_name = "authenc(hmac(sha256),"
@@ -2261,6 +2372,29 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_SHA256_HMAC,
},
{ .type = CRYPTO_ALG_TYPE_AEAD,
+ .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "cbc-3des-talitos",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
+ DESC_HDR_SEL0_DEU |
+ DESC_HDR_MODE0_DEU_CBC |
+ DESC_HDR_MODE0_DEU_3DES |
+ DESC_HDR_SEL1_MDEUA |
+ DESC_HDR_MODE1_MDEU_INIT |
+ DESC_HDR_MODE1_MDEU_PAD |
+ DESC_HDR_MODE1_MDEU_SHA256_HMAC,
+ },
+ { .type = CRYPTO_ALG_TYPE_AEAD,
.alg.aead = {
.base = {
.cra_name = "authenc(hmac(sha384),cbc(aes))",
@@ -2365,6 +2499,27 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_MD5_HMAC,
},
{ .type = CRYPTO_ALG_TYPE_AEAD,
+ .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "cbc-aes-talitos",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
+ DESC_HDR_SEL0_AESU |
+ DESC_HDR_MODE0_AESU_CBC |
+ DESC_HDR_SEL1_MDEUA |
+ DESC_HDR_MODE1_MDEU_INIT |
+ DESC_HDR_MODE1_MDEU_PAD |
+ DESC_HDR_MODE1_MDEU_MD5_HMAC,
+ },
+ { .type = CRYPTO_ALG_TYPE_AEAD,
.alg.aead = {
.base = {
.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
@@ -2385,6 +2540,28 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_MODE1_MDEU_PAD |
DESC_HDR_MODE1_MDEU_MD5_HMAC,
},
+ { .type = CRYPTO_ALG_TYPE_AEAD,
+ .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "cbc-3des-talitos",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
+ DESC_HDR_SEL0_DEU |
+ DESC_HDR_MODE0_DEU_CBC |
+ DESC_HDR_MODE0_DEU_3DES |
+ DESC_HDR_SEL1_MDEUA |
+ DESC_HDR_MODE1_MDEU_INIT |
+ DESC_HDR_MODE1_MDEU_PAD |
+ DESC_HDR_MODE1_MDEU_MD5_HMAC,
+ },
/* ABLKCIPHER algorithms. */
{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.alg.crypto = {
@@ -2901,7 +3078,10 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
}
alg->cra_module = THIS_MODULE;
- alg->cra_priority = TALITOS_CRA_PRIORITY;
+ if (t_alg->algt.priority)
+ alg->cra_priority = t_alg->algt.priority;
+ else
+ alg->cra_priority = TALITOS_CRA_PRIORITY;
alg->cra_alignmask = 0;
alg->cra_ctxsize = sizeof(struct talitos_ctx);
alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
diff --git a/drivers/crypto/ux500/cryp/Makefile b/drivers/crypto/ux500/cryp/Makefile
index e5d362a6f..b497ae3dd 100644
--- a/drivers/crypto/ux500/cryp/Makefile
+++ b/drivers/crypto/ux500/cryp/Makefile
@@ -4,9 +4,9 @@
# * License terms: GNU General Public License (GPL) version 2 */
ifdef CONFIG_CRYPTO_DEV_UX500_DEBUG
-CFLAGS_cryp_core.o := -DDEBUG -O0
-CFLAGS_cryp.o := -DDEBUG -O0
-CFLAGS_cryp_irq.o := -DDEBUG -O0
+CFLAGS_cryp_core.o := -DDEBUG
+CFLAGS_cryp.o := -DDEBUG
+CFLAGS_cryp_irq.o := -DDEBUG
endif
obj-$(CONFIG_CRYPTO_DEV_UX500_CRYP) += ux500_cryp.o
diff --git a/drivers/crypto/ux500/hash/Makefile b/drivers/crypto/ux500/hash/Makefile
index b2f90d9ba..784d9c0a8 100644
--- a/drivers/crypto/ux500/hash/Makefile
+++ b/drivers/crypto/ux500/hash/Makefile
@@ -4,7 +4,7 @@
# License terms: GNU General Public License (GPL) version 2
#
ifdef CONFIG_CRYPTO_DEV_UX500_DEBUG
-CFLAGS_hash_core.o := -DDEBUG -O0
+CFLAGS_hash_core.o := -DDEBUG
endif
obj-$(CONFIG_CRYPTO_DEV_UX500_HASH) += ux500_hash.o
diff --git a/drivers/crypto/vmx/.gitignore b/drivers/crypto/vmx/.gitignore
new file mode 100644
index 000000000..af4a7ce47
--- /dev/null
+++ b/drivers/crypto/vmx/.gitignore
@@ -0,0 +1,2 @@
+aesp8-ppc.S
+ghashp8-ppc.S
diff --git a/drivers/crypto/vmx/Kconfig b/drivers/crypto/vmx/Kconfig
index 89d8208d9..a83ead109 100644
--- a/drivers/crypto/vmx/Kconfig
+++ b/drivers/crypto/vmx/Kconfig
@@ -1,7 +1,7 @@
config CRYPTO_DEV_VMX_ENCRYPT
tristate "Encryption acceleration support on P8 CPU"
depends on CRYPTO_DEV_VMX
- default y
+ default m
help
Support for VMX cryptographic acceleration instructions on Power8 CPU.
This module supports acceleration for AES and GHASH in hardware. If you
diff --git a/drivers/crypto/vmx/Makefile b/drivers/crypto/vmx/Makefile
index d28ab96a2..de6e241b0 100644
--- a/drivers/crypto/vmx/Makefile
+++ b/drivers/crypto/vmx/Makefile
@@ -1,5 +1,5 @@
obj-$(CONFIG_CRYPTO_DEV_VMX_ENCRYPT) += vmx-crypto.o
-vmx-crypto-objs := vmx.o aesp8-ppc.o ghashp8-ppc.o aes.o aes_cbc.o aes_ctr.o ghash.o
+vmx-crypto-objs := vmx.o aesp8-ppc.o ghashp8-ppc.o aes.o aes_cbc.o aes_ctr.o aes_xts.o ghash.o
ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
TARGET := linux-ppc64le
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
new file mode 100644
index 000000000..24353ec33
--- /dev/null
+++ b/drivers/crypto/vmx/aes_xts.c
@@ -0,0 +1,190 @@
+/**
+ * AES XTS routines supporting VMX In-core instructions on Power 8
+ *
+ * Copyright (C) 2015 International Business Machines Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundations; version 2 only.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY of FITNESS FOR A PARTICUPAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Leonidas S. Barbosa <leosilva@linux.vnet.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/hardirq.h>
+#include <asm/switch_to.h>
+#include <crypto/aes.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/xts.h>
+
+#include "aesp8-ppc.h"
+
+struct p8_aes_xts_ctx {
+ struct crypto_blkcipher *fallback;
+ struct aes_key enc_key;
+ struct aes_key dec_key;
+ struct aes_key tweak_key;
+};
+
+static int p8_aes_xts_init(struct crypto_tfm *tfm)
+{
+ const char *alg;
+ struct crypto_blkcipher *fallback;
+ struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (!(alg = crypto_tfm_alg_name(tfm))) {
+ printk(KERN_ERR "Failed to get algorithm name.\n");
+ return -ENOENT;
+ }
+
+ fallback =
+ crypto_alloc_blkcipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(fallback)) {
+ printk(KERN_ERR
+ "Failed to allocate transformation for '%s': %ld\n",
+ alg, PTR_ERR(fallback));
+ return PTR_ERR(fallback);
+ }
+ printk(KERN_INFO "Using '%s' as fallback implementation.\n",
+ crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback));
+
+ crypto_blkcipher_set_flags(
+ fallback,
+ crypto_blkcipher_get_flags((struct crypto_blkcipher *)tfm));
+ ctx->fallback = fallback;
+
+ return 0;
+}
+
+static void p8_aes_xts_exit(struct crypto_tfm *tfm)
+{
+ struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (ctx->fallback) {
+ crypto_free_blkcipher(ctx->fallback);
+ ctx->fallback = NULL;
+ }
+}
+
+static int p8_aes_xts_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ int ret;
+ struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ ret = xts_check_key(tfm, key, keylen);
+ if (ret)
+ return ret;
+
+ preempt_disable();
+ pagefault_disable();
+ enable_kernel_vsx();
+ ret = aes_p8_set_encrypt_key(key + keylen/2, (keylen/2) * 8, &ctx->tweak_key);
+ ret += aes_p8_set_encrypt_key(key, (keylen/2) * 8, &ctx->enc_key);
+ ret += aes_p8_set_decrypt_key(key, (keylen/2) * 8, &ctx->dec_key);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+
+ ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen);
+ return ret;
+}
+
+static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes, int enc)
+{
+ int ret;
+ u8 tweak[AES_BLOCK_SIZE];
+ u8 *iv;
+ struct blkcipher_walk walk;
+ struct p8_aes_xts_ctx *ctx =
+ crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
+ struct blkcipher_desc fallback_desc = {
+ .tfm = ctx->fallback,
+ .info = desc->info,
+ .flags = desc->flags
+ };
+
+ if (in_interrupt()) {
+ ret = enc ? crypto_blkcipher_encrypt(&fallback_desc, dst, src, nbytes) :
+ crypto_blkcipher_decrypt(&fallback_desc, dst, src, nbytes);
+ } else {
+ preempt_disable();
+ pagefault_disable();
+ enable_kernel_vsx();
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+
+ ret = blkcipher_walk_virt(desc, &walk);
+ iv = walk.iv;
+ memset(tweak, 0, AES_BLOCK_SIZE);
+ aes_p8_encrypt(iv, tweak, &ctx->tweak_key);
+
+ while ((nbytes = walk.nbytes)) {
+ if (enc)
+ aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
+ nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak);
+ else
+ aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
+ nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak);
+
+ nbytes &= AES_BLOCK_SIZE - 1;
+ ret = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+ }
+ return ret;
+}
+
+static int p8_aes_xts_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ return p8_aes_xts_crypt(desc, dst, src, nbytes, 1);
+}
+
+static int p8_aes_xts_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+{
+ return p8_aes_xts_crypt(desc, dst, src, nbytes, 0);
+}
+
+struct crypto_alg p8_aes_xts_alg = {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "p8_aes_xts",
+ .cra_module = THIS_MODULE,
+ .cra_priority = 2000,
+ .cra_type = &crypto_blkcipher_type,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_alignmask = 0,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct p8_aes_xts_ctx),
+ .cra_init = p8_aes_xts_init,
+ .cra_exit = p8_aes_xts_exit,
+ .cra_blkcipher = {
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .setkey = p8_aes_xts_setkey,
+ .encrypt = p8_aes_xts_encrypt,
+ .decrypt = p8_aes_xts_decrypt,
+ }
+};
diff --git a/drivers/crypto/vmx/aesp8-ppc.h b/drivers/crypto/vmx/aesp8-ppc.h
index 4cd34ee54..01972e16a 100644
--- a/drivers/crypto/vmx/aesp8-ppc.h
+++ b/drivers/crypto/vmx/aesp8-ppc.h
@@ -19,3 +19,7 @@ void aes_p8_cbc_encrypt(const u8 *in, u8 *out, size_t len,
void aes_p8_ctr32_encrypt_blocks(const u8 *in, u8 *out,
size_t len, const struct aes_key *key,
const u8 *iv);
+void aes_p8_xts_encrypt(const u8 *in, u8 *out, size_t len,
+ const struct aes_key *key1, const struct aes_key *key2, u8 *iv);
+void aes_p8_xts_decrypt(const u8 *in, u8 *out, size_t len,
+ const struct aes_key *key1, const struct aes_key *key2, u8 *iv);
diff --git a/drivers/crypto/vmx/aesp8-ppc.pl b/drivers/crypto/vmx/aesp8-ppc.pl
index 228053921..0b4a293b8 100644
--- a/drivers/crypto/vmx/aesp8-ppc.pl
+++ b/drivers/crypto/vmx/aesp8-ppc.pl
@@ -1,4 +1,11 @@
-#!/usr/bin/env perl
+#! /usr/bin/env perl
+# Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
#
# ====================================================================
# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
@@ -20,6 +27,19 @@
# instructions are interleaved. It's reckoned that eventual
# misalignment penalties at page boundaries are in average lower
# than additional overhead in pure AltiVec approach.
+#
+# May 2016
+#
+# Add XTS subroutine, 9x on little- and 12x improvement on big-endian
+# systems were measured.
+#
+######################################################################
+# Current large-block performance in cycles per byte processed with
+# 128-bit key (less is better).
+#
+# CBC en-/decrypt CTR XTS
+# POWER8[le] 3.96/0.72 0.74 1.1
+# POWER8[be] 3.75/0.65 0.66 1.0
$flavour = shift;
@@ -1875,6 +1895,1845 @@ Lctr32_enc8x_done:
___
}} }}}
+#########################################################################
+{{{ # XTS procedures #
+# int aes_p8_xts_[en|de]crypt(const char *inp, char *out, size_t len, #
+# const AES_KEY *key1, const AES_KEY *key2, #
+# [const] unsigned char iv[16]); #
+# If $key2 is NULL, then a "tweak chaining" mode is engaged, in which #
+# input tweak value is assumed to be encrypted already, and last tweak #
+# value, one suitable for consecutive call on same chunk of data, is #
+# written back to original buffer. In addition, in "tweak chaining" #
+# mode only complete input blocks are processed. #
+
+my ($inp,$out,$len,$key1,$key2,$ivp,$rounds,$idx) = map("r$_",(3..10));
+my ($rndkey0,$rndkey1,$inout) = map("v$_",(0..2));
+my ($output,$inptail,$inpperm,$leperm,$keyperm) = map("v$_",(3..7));
+my ($tweak,$seven,$eighty7,$tmp,$tweak1) = map("v$_",(8..12));
+my $taillen = $key2;
+
+ ($inp,$idx) = ($idx,$inp); # reassign
+
+$code.=<<___;
+.globl .${prefix}_xts_encrypt
+ mr $inp,r3 # reassign
+ li r3,-1
+ ${UCMP}i $len,16
+ bltlr-
+
+ lis r0,0xfff0
+ mfspr r12,256 # save vrsave
+ li r11,0
+ mtspr 256,r0
+
+ vspltisb $seven,0x07 # 0x070707..07
+ le?lvsl $leperm,r11,r11
+ le?vspltisb $tmp,0x0f
+ le?vxor $leperm,$leperm,$seven
+
+ li $idx,15
+ lvx $tweak,0,$ivp # load [unaligned] iv
+ lvsl $inpperm,0,$ivp
+ lvx $inptail,$idx,$ivp
+ le?vxor $inpperm,$inpperm,$tmp
+ vperm $tweak,$tweak,$inptail,$inpperm
+
+ neg r11,$inp
+ lvsr $inpperm,0,r11 # prepare for unaligned load
+ lvx $inout,0,$inp
+ addi $inp,$inp,15 # 15 is not typo
+ le?vxor $inpperm,$inpperm,$tmp
+
+ ${UCMP}i $key2,0 # key2==NULL?
+ beq Lxts_enc_no_key2
+
+ ?lvsl $keyperm,0,$key2 # prepare for unaligned key
+ lwz $rounds,240($key2)
+ srwi $rounds,$rounds,1
+ subi $rounds,$rounds,1
+ li $idx,16
+
+ lvx $rndkey0,0,$key2
+ lvx $rndkey1,$idx,$key2
+ addi $idx,$idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vxor $tweak,$tweak,$rndkey0
+ lvx $rndkey0,$idx,$key2
+ addi $idx,$idx,16
+ mtctr $rounds
+
+Ltweak_xts_enc:
+ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
+ vcipher $tweak,$tweak,$rndkey1
+ lvx $rndkey1,$idx,$key2
+ addi $idx,$idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vcipher $tweak,$tweak,$rndkey0
+ lvx $rndkey0,$idx,$key2
+ addi $idx,$idx,16
+ bdnz Ltweak_xts_enc
+
+ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
+ vcipher $tweak,$tweak,$rndkey1
+ lvx $rndkey1,$idx,$key2
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vcipherlast $tweak,$tweak,$rndkey0
+
+ li $ivp,0 # don't chain the tweak
+ b Lxts_enc
+
+Lxts_enc_no_key2:
+ li $idx,-16
+ and $len,$len,$idx # in "tweak chaining"
+ # mode only complete
+ # blocks are processed
+Lxts_enc:
+ lvx $inptail,0,$inp
+ addi $inp,$inp,16
+
+ ?lvsl $keyperm,0,$key1 # prepare for unaligned key
+ lwz $rounds,240($key1)
+ srwi $rounds,$rounds,1
+ subi $rounds,$rounds,1
+ li $idx,16
+
+ vslb $eighty7,$seven,$seven # 0x808080..80
+ vor $eighty7,$eighty7,$seven # 0x878787..87
+ vspltisb $tmp,1 # 0x010101..01
+ vsldoi $eighty7,$eighty7,$tmp,15 # 0x870101..01
+
+ ${UCMP}i $len,96
+ bge _aesp8_xts_encrypt6x
+
+ andi. $taillen,$len,15
+ subic r0,$len,32
+ subi $taillen,$taillen,16
+ subfe r0,r0,r0
+ and r0,r0,$taillen
+ add $inp,$inp,r0
+
+ lvx $rndkey0,0,$key1
+ lvx $rndkey1,$idx,$key1
+ addi $idx,$idx,16
+ vperm $inout,$inout,$inptail,$inpperm
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vxor $inout,$inout,$tweak
+ vxor $inout,$inout,$rndkey0
+ lvx $rndkey0,$idx,$key1
+ addi $idx,$idx,16
+ mtctr $rounds
+ b Loop_xts_enc
+
+.align 5
+Loop_xts_enc:
+ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
+ vcipher $inout,$inout,$rndkey1
+ lvx $rndkey1,$idx,$key1
+ addi $idx,$idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vcipher $inout,$inout,$rndkey0
+ lvx $rndkey0,$idx,$key1
+ addi $idx,$idx,16
+ bdnz Loop_xts_enc
+
+ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
+ vcipher $inout,$inout,$rndkey1
+ lvx $rndkey1,$idx,$key1
+ li $idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vxor $rndkey0,$rndkey0,$tweak
+ vcipherlast $output,$inout,$rndkey0
+
+ le?vperm $tmp,$output,$output,$leperm
+ be?nop
+ le?stvx_u $tmp,0,$out
+ be?stvx_u $output,0,$out
+ addi $out,$out,16
+
+ subic. $len,$len,16
+ beq Lxts_enc_done
+
+ vmr $inout,$inptail
+ lvx $inptail,0,$inp
+ addi $inp,$inp,16
+ lvx $rndkey0,0,$key1
+ lvx $rndkey1,$idx,$key1
+ addi $idx,$idx,16
+
+ subic r0,$len,32
+ subfe r0,r0,r0
+ and r0,r0,$taillen
+ add $inp,$inp,r0
+
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ vand $tmp,$tmp,$eighty7
+ vxor $tweak,$tweak,$tmp
+
+ vperm $inout,$inout,$inptail,$inpperm
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vxor $inout,$inout,$tweak
+ vxor $output,$output,$rndkey0 # just in case $len<16
+ vxor $inout,$inout,$rndkey0
+ lvx $rndkey0,$idx,$key1
+ addi $idx,$idx,16
+
+ mtctr $rounds
+ ${UCMP}i $len,16
+ bge Loop_xts_enc
+
+ vxor $output,$output,$tweak
+ lvsr $inpperm,0,$len # $inpperm is no longer needed
+ vxor $inptail,$inptail,$inptail # $inptail is no longer needed
+ vspltisb $tmp,-1
+ vperm $inptail,$inptail,$tmp,$inpperm
+ vsel $inout,$inout,$output,$inptail
+
+ subi r11,$out,17
+ subi $out,$out,16
+ mtctr $len
+ li $len,16
+Loop_xts_enc_steal:
+ lbzu r0,1(r11)
+ stb r0,16(r11)
+ bdnz Loop_xts_enc_steal
+
+ mtctr $rounds
+ b Loop_xts_enc # one more time...
+
+Lxts_enc_done:
+ ${UCMP}i $ivp,0
+ beq Lxts_enc_ret
+
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ vand $tmp,$tmp,$eighty7
+ vxor $tweak,$tweak,$tmp
+
+ le?vperm $tweak,$tweak,$tweak,$leperm
+ stvx_u $tweak,0,$ivp
+
+Lxts_enc_ret:
+ mtspr 256,r12 # restore vrsave
+ li r3,0
+ blr
+ .long 0
+ .byte 0,12,0x04,0,0x80,6,6,0
+ .long 0
+.size .${prefix}_xts_encrypt,.-.${prefix}_xts_encrypt
+
+.globl .${prefix}_xts_decrypt
+ mr $inp,r3 # reassign
+ li r3,-1
+ ${UCMP}i $len,16
+ bltlr-
+
+ lis r0,0xfff8
+ mfspr r12,256 # save vrsave
+ li r11,0
+ mtspr 256,r0
+
+ andi. r0,$len,15
+ neg r0,r0
+ andi. r0,r0,16
+ sub $len,$len,r0
+
+ vspltisb $seven,0x07 # 0x070707..07
+ le?lvsl $leperm,r11,r11
+ le?vspltisb $tmp,0x0f
+ le?vxor $leperm,$leperm,$seven
+
+ li $idx,15
+ lvx $tweak,0,$ivp # load [unaligned] iv
+ lvsl $inpperm,0,$ivp
+ lvx $inptail,$idx,$ivp
+ le?vxor $inpperm,$inpperm,$tmp
+ vperm $tweak,$tweak,$inptail,$inpperm
+
+ neg r11,$inp
+ lvsr $inpperm,0,r11 # prepare for unaligned load
+ lvx $inout,0,$inp
+ addi $inp,$inp,15 # 15 is not typo
+ le?vxor $inpperm,$inpperm,$tmp
+
+ ${UCMP}i $key2,0 # key2==NULL?
+ beq Lxts_dec_no_key2
+
+ ?lvsl $keyperm,0,$key2 # prepare for unaligned key
+ lwz $rounds,240($key2)
+ srwi $rounds,$rounds,1
+ subi $rounds,$rounds,1
+ li $idx,16
+
+ lvx $rndkey0,0,$key2
+ lvx $rndkey1,$idx,$key2
+ addi $idx,$idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vxor $tweak,$tweak,$rndkey0
+ lvx $rndkey0,$idx,$key2
+ addi $idx,$idx,16
+ mtctr $rounds
+
+Ltweak_xts_dec:
+ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
+ vcipher $tweak,$tweak,$rndkey1
+ lvx $rndkey1,$idx,$key2
+ addi $idx,$idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vcipher $tweak,$tweak,$rndkey0
+ lvx $rndkey0,$idx,$key2
+ addi $idx,$idx,16
+ bdnz Ltweak_xts_dec
+
+ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
+ vcipher $tweak,$tweak,$rndkey1
+ lvx $rndkey1,$idx,$key2
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vcipherlast $tweak,$tweak,$rndkey0
+
+ li $ivp,0 # don't chain the tweak
+ b Lxts_dec
+
+Lxts_dec_no_key2:
+ neg $idx,$len
+ andi. $idx,$idx,15
+ add $len,$len,$idx # in "tweak chaining"
+ # mode only complete
+ # blocks are processed
+Lxts_dec:
+ lvx $inptail,0,$inp
+ addi $inp,$inp,16
+
+ ?lvsl $keyperm,0,$key1 # prepare for unaligned key
+ lwz $rounds,240($key1)
+ srwi $rounds,$rounds,1
+ subi $rounds,$rounds,1
+ li $idx,16
+
+ vslb $eighty7,$seven,$seven # 0x808080..80
+ vor $eighty7,$eighty7,$seven # 0x878787..87
+ vspltisb $tmp,1 # 0x010101..01
+ vsldoi $eighty7,$eighty7,$tmp,15 # 0x870101..01
+
+ ${UCMP}i $len,96
+ bge _aesp8_xts_decrypt6x
+
+ lvx $rndkey0,0,$key1
+ lvx $rndkey1,$idx,$key1
+ addi $idx,$idx,16
+ vperm $inout,$inout,$inptail,$inpperm
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vxor $inout,$inout,$tweak
+ vxor $inout,$inout,$rndkey0
+ lvx $rndkey0,$idx,$key1
+ addi $idx,$idx,16
+ mtctr $rounds
+
+ ${UCMP}i $len,16
+ blt Ltail_xts_dec
+ be?b Loop_xts_dec
+
+.align 5
+Loop_xts_dec:
+ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
+ vncipher $inout,$inout,$rndkey1
+ lvx $rndkey1,$idx,$key1
+ addi $idx,$idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vncipher $inout,$inout,$rndkey0
+ lvx $rndkey0,$idx,$key1
+ addi $idx,$idx,16
+ bdnz Loop_xts_dec
+
+ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
+ vncipher $inout,$inout,$rndkey1
+ lvx $rndkey1,$idx,$key1
+ li $idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vxor $rndkey0,$rndkey0,$tweak
+ vncipherlast $output,$inout,$rndkey0
+
+ le?vperm $tmp,$output,$output,$leperm
+ be?nop
+ le?stvx_u $tmp,0,$out
+ be?stvx_u $output,0,$out
+ addi $out,$out,16
+
+ subic. $len,$len,16
+ beq Lxts_dec_done
+
+ vmr $inout,$inptail
+ lvx $inptail,0,$inp
+ addi $inp,$inp,16
+ lvx $rndkey0,0,$key1
+ lvx $rndkey1,$idx,$key1
+ addi $idx,$idx,16
+
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ vand $tmp,$tmp,$eighty7
+ vxor $tweak,$tweak,$tmp
+
+ vperm $inout,$inout,$inptail,$inpperm
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vxor $inout,$inout,$tweak
+ vxor $inout,$inout,$rndkey0
+ lvx $rndkey0,$idx,$key1
+ addi $idx,$idx,16
+
+ mtctr $rounds
+ ${UCMP}i $len,16
+ bge Loop_xts_dec
+
+Ltail_xts_dec:
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak1,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ vand $tmp,$tmp,$eighty7
+ vxor $tweak1,$tweak1,$tmp
+
+ subi $inp,$inp,16
+ add $inp,$inp,$len
+
+ vxor $inout,$inout,$tweak # :-(
+ vxor $inout,$inout,$tweak1 # :-)
+
+Loop_xts_dec_short:
+ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
+ vncipher $inout,$inout,$rndkey1
+ lvx $rndkey1,$idx,$key1
+ addi $idx,$idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vncipher $inout,$inout,$rndkey0
+ lvx $rndkey0,$idx,$key1
+ addi $idx,$idx,16
+ bdnz Loop_xts_dec_short
+
+ ?vperm $rndkey1,$rndkey1,$rndkey0,$keyperm
+ vncipher $inout,$inout,$rndkey1
+ lvx $rndkey1,$idx,$key1
+ li $idx,16
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+ vxor $rndkey0,$rndkey0,$tweak1
+ vncipherlast $output,$inout,$rndkey0
+
+ le?vperm $tmp,$output,$output,$leperm
+ be?nop
+ le?stvx_u $tmp,0,$out
+ be?stvx_u $output,0,$out
+
+ vmr $inout,$inptail
+ lvx $inptail,0,$inp
+ #addi $inp,$inp,16
+ lvx $rndkey0,0,$key1
+ lvx $rndkey1,$idx,$key1
+ addi $idx,$idx,16
+ vperm $inout,$inout,$inptail,$inpperm
+ ?vperm $rndkey0,$rndkey0,$rndkey1,$keyperm
+
+ lvsr $inpperm,0,$len # $inpperm is no longer needed
+ vxor $inptail,$inptail,$inptail # $inptail is no longer needed
+ vspltisb $tmp,-1
+ vperm $inptail,$inptail,$tmp,$inpperm
+ vsel $inout,$inout,$output,$inptail
+
+ vxor $rndkey0,$rndkey0,$tweak
+ vxor $inout,$inout,$rndkey0
+ lvx $rndkey0,$idx,$key1
+ addi $idx,$idx,16
+
+ subi r11,$out,1
+ mtctr $len
+ li $len,16
+Loop_xts_dec_steal:
+ lbzu r0,1(r11)
+ stb r0,16(r11)
+ bdnz Loop_xts_dec_steal
+
+ mtctr $rounds
+ b Loop_xts_dec # one more time...
+
+Lxts_dec_done:
+ ${UCMP}i $ivp,0
+ beq Lxts_dec_ret
+
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ vand $tmp,$tmp,$eighty7
+ vxor $tweak,$tweak,$tmp
+
+ le?vperm $tweak,$tweak,$tweak,$leperm
+ stvx_u $tweak,0,$ivp
+
+Lxts_dec_ret:
+ mtspr 256,r12 # restore vrsave
+ li r3,0
+ blr
+ .long 0
+ .byte 0,12,0x04,0,0x80,6,6,0
+ .long 0
+.size .${prefix}_xts_decrypt,.-.${prefix}_xts_decrypt
+___
+#########################################################################
+{{ # Optimized XTS procedures #
+my $key_=$key2;
+my ($x00,$x10,$x20,$x30,$x40,$x50,$x60,$x70)=map("r$_",(0,3,26..31));
+ $x00=0 if ($flavour =~ /osx/);
+my ($in0, $in1, $in2, $in3, $in4, $in5 )=map("v$_",(0..5));
+my ($out0, $out1, $out2, $out3, $out4, $out5)=map("v$_",(7,12..16));
+my ($twk0, $twk1, $twk2, $twk3, $twk4, $twk5)=map("v$_",(17..22));
+my $rndkey0="v23"; # v24-v25 rotating buffer for first found keys
+ # v26-v31 last 6 round keys
+my ($keyperm)=($out0); # aliases with "caller", redundant assignment
+my $taillen=$x70;
+
+$code.=<<___;
+.align 5
+_aesp8_xts_encrypt6x:
+ $STU $sp,-`($FRAME+21*16+6*$SIZE_T)`($sp)
+ mflr r11
+ li r7,`$FRAME+8*16+15`
+ li r3,`$FRAME+8*16+31`
+ $PUSH r11,`$FRAME+21*16+6*$SIZE_T+$LRSAVE`($sp)
+ stvx v20,r7,$sp # ABI says so
+ addi r7,r7,32
+ stvx v21,r3,$sp
+ addi r3,r3,32
+ stvx v22,r7,$sp
+ addi r7,r7,32
+ stvx v23,r3,$sp
+ addi r3,r3,32
+ stvx v24,r7,$sp
+ addi r7,r7,32
+ stvx v25,r3,$sp
+ addi r3,r3,32
+ stvx v26,r7,$sp
+ addi r7,r7,32
+ stvx v27,r3,$sp
+ addi r3,r3,32
+ stvx v28,r7,$sp
+ addi r7,r7,32
+ stvx v29,r3,$sp
+ addi r3,r3,32
+ stvx v30,r7,$sp
+ stvx v31,r3,$sp
+ li r0,-1
+ stw $vrsave,`$FRAME+21*16-4`($sp) # save vrsave
+ li $x10,0x10
+ $PUSH r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+ li $x20,0x20
+ $PUSH r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+ li $x30,0x30
+ $PUSH r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+ li $x40,0x40
+ $PUSH r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+ li $x50,0x50
+ $PUSH r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+ li $x60,0x60
+ $PUSH r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+ li $x70,0x70
+ mtspr 256,r0
+
+ subi $rounds,$rounds,3 # -4 in total
+
+ lvx $rndkey0,$x00,$key1 # load key schedule
+ lvx v30,$x10,$key1
+ addi $key1,$key1,0x20
+ lvx v31,$x00,$key1
+ ?vperm $rndkey0,$rndkey0,v30,$keyperm
+ addi $key_,$sp,$FRAME+15
+ mtctr $rounds
+
+Load_xts_enc_key:
+ ?vperm v24,v30,v31,$keyperm
+ lvx v30,$x10,$key1
+ addi $key1,$key1,0x20
+ stvx v24,$x00,$key_ # off-load round[1]
+ ?vperm v25,v31,v30,$keyperm
+ lvx v31,$x00,$key1
+ stvx v25,$x10,$key_ # off-load round[2]
+ addi $key_,$key_,0x20
+ bdnz Load_xts_enc_key
+
+ lvx v26,$x10,$key1
+ ?vperm v24,v30,v31,$keyperm
+ lvx v27,$x20,$key1
+ stvx v24,$x00,$key_ # off-load round[3]
+ ?vperm v25,v31,v26,$keyperm
+ lvx v28,$x30,$key1
+ stvx v25,$x10,$key_ # off-load round[4]
+ addi $key_,$sp,$FRAME+15 # rewind $key_
+ ?vperm v26,v26,v27,$keyperm
+ lvx v29,$x40,$key1
+ ?vperm v27,v27,v28,$keyperm
+ lvx v30,$x50,$key1
+ ?vperm v28,v28,v29,$keyperm
+ lvx v31,$x60,$key1
+ ?vperm v29,v29,v30,$keyperm
+ lvx $twk5,$x70,$key1 # borrow $twk5
+ ?vperm v30,v30,v31,$keyperm
+ lvx v24,$x00,$key_ # pre-load round[1]
+ ?vperm v31,v31,$twk5,$keyperm
+ lvx v25,$x10,$key_ # pre-load round[2]
+
+ vperm $in0,$inout,$inptail,$inpperm
+ subi $inp,$inp,31 # undo "caller"
+ vxor $twk0,$tweak,$rndkey0
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ vand $tmp,$tmp,$eighty7
+ vxor $out0,$in0,$twk0
+ vxor $tweak,$tweak,$tmp
+
+ lvx_u $in1,$x10,$inp
+ vxor $twk1,$tweak,$rndkey0
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ le?vperm $in1,$in1,$in1,$leperm
+ vand $tmp,$tmp,$eighty7
+ vxor $out1,$in1,$twk1
+ vxor $tweak,$tweak,$tmp
+
+ lvx_u $in2,$x20,$inp
+ andi. $taillen,$len,15
+ vxor $twk2,$tweak,$rndkey0
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ le?vperm $in2,$in2,$in2,$leperm
+ vand $tmp,$tmp,$eighty7
+ vxor $out2,$in2,$twk2
+ vxor $tweak,$tweak,$tmp
+
+ lvx_u $in3,$x30,$inp
+ sub $len,$len,$taillen
+ vxor $twk3,$tweak,$rndkey0
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ le?vperm $in3,$in3,$in3,$leperm
+ vand $tmp,$tmp,$eighty7
+ vxor $out3,$in3,$twk3
+ vxor $tweak,$tweak,$tmp
+
+ lvx_u $in4,$x40,$inp
+ subi $len,$len,0x60
+ vxor $twk4,$tweak,$rndkey0
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ le?vperm $in4,$in4,$in4,$leperm
+ vand $tmp,$tmp,$eighty7
+ vxor $out4,$in4,$twk4
+ vxor $tweak,$tweak,$tmp
+
+ lvx_u $in5,$x50,$inp
+ addi $inp,$inp,0x60
+ vxor $twk5,$tweak,$rndkey0
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ le?vperm $in5,$in5,$in5,$leperm
+ vand $tmp,$tmp,$eighty7
+ vxor $out5,$in5,$twk5
+ vxor $tweak,$tweak,$tmp
+
+ vxor v31,v31,$rndkey0
+ mtctr $rounds
+ b Loop_xts_enc6x
+
+.align 5
+Loop_xts_enc6x:
+ vcipher $out0,$out0,v24
+ vcipher $out1,$out1,v24
+ vcipher $out2,$out2,v24
+ vcipher $out3,$out3,v24
+ vcipher $out4,$out4,v24
+ vcipher $out5,$out5,v24
+ lvx v24,$x20,$key_ # round[3]
+ addi $key_,$key_,0x20
+
+ vcipher $out0,$out0,v25
+ vcipher $out1,$out1,v25
+ vcipher $out2,$out2,v25
+ vcipher $out3,$out3,v25
+ vcipher $out4,$out4,v25
+ vcipher $out5,$out5,v25
+ lvx v25,$x10,$key_ # round[4]
+ bdnz Loop_xts_enc6x
+
+ subic $len,$len,96 # $len-=96
+ vxor $in0,$twk0,v31 # xor with last round key
+ vcipher $out0,$out0,v24
+ vcipher $out1,$out1,v24
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vxor $twk0,$tweak,$rndkey0
+ vaddubm $tweak,$tweak,$tweak
+ vcipher $out2,$out2,v24
+ vcipher $out3,$out3,v24
+ vsldoi $tmp,$tmp,$tmp,15
+ vcipher $out4,$out4,v24
+ vcipher $out5,$out5,v24
+
+ subfe. r0,r0,r0 # borrow?-1:0
+ vand $tmp,$tmp,$eighty7
+ vcipher $out0,$out0,v25
+ vcipher $out1,$out1,v25
+ vxor $tweak,$tweak,$tmp
+ vcipher $out2,$out2,v25
+ vcipher $out3,$out3,v25
+ vxor $in1,$twk1,v31
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vxor $twk1,$tweak,$rndkey0
+ vcipher $out4,$out4,v25
+ vcipher $out5,$out5,v25
+
+ and r0,r0,$len
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ vcipher $out0,$out0,v26
+ vcipher $out1,$out1,v26
+ vand $tmp,$tmp,$eighty7
+ vcipher $out2,$out2,v26
+ vcipher $out3,$out3,v26
+ vxor $tweak,$tweak,$tmp
+ vcipher $out4,$out4,v26
+ vcipher $out5,$out5,v26
+
+ add $inp,$inp,r0 # $inp is adjusted in such
+ # way that at exit from the
+ # loop inX-in5 are loaded
+ # with last "words"
+ vxor $in2,$twk2,v31
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vxor $twk2,$tweak,$rndkey0
+ vaddubm $tweak,$tweak,$tweak
+ vcipher $out0,$out0,v27
+ vcipher $out1,$out1,v27
+ vsldoi $tmp,$tmp,$tmp,15
+ vcipher $out2,$out2,v27
+ vcipher $out3,$out3,v27
+ vand $tmp,$tmp,$eighty7
+ vcipher $out4,$out4,v27
+ vcipher $out5,$out5,v27
+
+ addi $key_,$sp,$FRAME+15 # rewind $key_
+ vxor $tweak,$tweak,$tmp
+ vcipher $out0,$out0,v28
+ vcipher $out1,$out1,v28
+ vxor $in3,$twk3,v31
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vxor $twk3,$tweak,$rndkey0
+ vcipher $out2,$out2,v28
+ vcipher $out3,$out3,v28
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ vcipher $out4,$out4,v28
+ vcipher $out5,$out5,v28
+ lvx v24,$x00,$key_ # re-pre-load round[1]
+ vand $tmp,$tmp,$eighty7
+
+ vcipher $out0,$out0,v29
+ vcipher $out1,$out1,v29
+ vxor $tweak,$tweak,$tmp
+ vcipher $out2,$out2,v29
+ vcipher $out3,$out3,v29
+ vxor $in4,$twk4,v31
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vxor $twk4,$tweak,$rndkey0
+ vcipher $out4,$out4,v29
+ vcipher $out5,$out5,v29
+ lvx v25,$x10,$key_ # re-pre-load round[2]
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+
+ vcipher $out0,$out0,v30
+ vcipher $out1,$out1,v30
+ vand $tmp,$tmp,$eighty7
+ vcipher $out2,$out2,v30
+ vcipher $out3,$out3,v30
+ vxor $tweak,$tweak,$tmp
+ vcipher $out4,$out4,v30
+ vcipher $out5,$out5,v30
+ vxor $in5,$twk5,v31
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vxor $twk5,$tweak,$rndkey0
+
+ vcipherlast $out0,$out0,$in0
+ lvx_u $in0,$x00,$inp # load next input block
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ vcipherlast $out1,$out1,$in1
+ lvx_u $in1,$x10,$inp
+ vcipherlast $out2,$out2,$in2
+ le?vperm $in0,$in0,$in0,$leperm
+ lvx_u $in2,$x20,$inp
+ vand $tmp,$tmp,$eighty7
+ vcipherlast $out3,$out3,$in3
+ le?vperm $in1,$in1,$in1,$leperm
+ lvx_u $in3,$x30,$inp
+ vcipherlast $out4,$out4,$in4
+ le?vperm $in2,$in2,$in2,$leperm
+ lvx_u $in4,$x40,$inp
+ vxor $tweak,$tweak,$tmp
+ vcipherlast $tmp,$out5,$in5 # last block might be needed
+ # in stealing mode
+ le?vperm $in3,$in3,$in3,$leperm
+ lvx_u $in5,$x50,$inp
+ addi $inp,$inp,0x60
+ le?vperm $in4,$in4,$in4,$leperm
+ le?vperm $in5,$in5,$in5,$leperm
+
+ le?vperm $out0,$out0,$out0,$leperm
+ le?vperm $out1,$out1,$out1,$leperm
+ stvx_u $out0,$x00,$out # store output
+ vxor $out0,$in0,$twk0
+ le?vperm $out2,$out2,$out2,$leperm
+ stvx_u $out1,$x10,$out
+ vxor $out1,$in1,$twk1
+ le?vperm $out3,$out3,$out3,$leperm
+ stvx_u $out2,$x20,$out
+ vxor $out2,$in2,$twk2
+ le?vperm $out4,$out4,$out4,$leperm
+ stvx_u $out3,$x30,$out
+ vxor $out3,$in3,$twk3
+ le?vperm $out5,$tmp,$tmp,$leperm
+ stvx_u $out4,$x40,$out
+ vxor $out4,$in4,$twk4
+ le?stvx_u $out5,$x50,$out
+ be?stvx_u $tmp, $x50,$out
+ vxor $out5,$in5,$twk5
+ addi $out,$out,0x60
+
+ mtctr $rounds
+ beq Loop_xts_enc6x # did $len-=96 borrow?
+
+ addic. $len,$len,0x60
+ beq Lxts_enc6x_zero
+ cmpwi $len,0x20
+ blt Lxts_enc6x_one
+ nop
+ beq Lxts_enc6x_two
+ cmpwi $len,0x40
+ blt Lxts_enc6x_three
+ nop
+ beq Lxts_enc6x_four
+
+Lxts_enc6x_five:
+ vxor $out0,$in1,$twk0
+ vxor $out1,$in2,$twk1
+ vxor $out2,$in3,$twk2
+ vxor $out3,$in4,$twk3
+ vxor $out4,$in5,$twk4
+
+ bl _aesp8_xts_enc5x
+
+ le?vperm $out0,$out0,$out0,$leperm
+ vmr $twk0,$twk5 # unused tweak
+ le?vperm $out1,$out1,$out1,$leperm
+ stvx_u $out0,$x00,$out # store output
+ le?vperm $out2,$out2,$out2,$leperm
+ stvx_u $out1,$x10,$out
+ le?vperm $out3,$out3,$out3,$leperm
+ stvx_u $out2,$x20,$out
+ vxor $tmp,$out4,$twk5 # last block prep for stealing
+ le?vperm $out4,$out4,$out4,$leperm
+ stvx_u $out3,$x30,$out
+ stvx_u $out4,$x40,$out
+ addi $out,$out,0x50
+ bne Lxts_enc6x_steal
+ b Lxts_enc6x_done
+
+.align 4
+Lxts_enc6x_four:
+ vxor $out0,$in2,$twk0
+ vxor $out1,$in3,$twk1
+ vxor $out2,$in4,$twk2
+ vxor $out3,$in5,$twk3
+ vxor $out4,$out4,$out4
+
+ bl _aesp8_xts_enc5x
+
+ le?vperm $out0,$out0,$out0,$leperm
+ vmr $twk0,$twk4 # unused tweak
+ le?vperm $out1,$out1,$out1,$leperm
+ stvx_u $out0,$x00,$out # store output
+ le?vperm $out2,$out2,$out2,$leperm
+ stvx_u $out1,$x10,$out
+ vxor $tmp,$out3,$twk4 # last block prep for stealing
+ le?vperm $out3,$out3,$out3,$leperm
+ stvx_u $out2,$x20,$out
+ stvx_u $out3,$x30,$out
+ addi $out,$out,0x40
+ bne Lxts_enc6x_steal
+ b Lxts_enc6x_done
+
+.align 4
+Lxts_enc6x_three:
+ vxor $out0,$in3,$twk0
+ vxor $out1,$in4,$twk1
+ vxor $out2,$in5,$twk2
+ vxor $out3,$out3,$out3
+ vxor $out4,$out4,$out4
+
+ bl _aesp8_xts_enc5x
+
+ le?vperm $out0,$out0,$out0,$leperm
+ vmr $twk0,$twk3 # unused tweak
+ le?vperm $out1,$out1,$out1,$leperm
+ stvx_u $out0,$x00,$out # store output
+ vxor $tmp,$out2,$twk3 # last block prep for stealing
+ le?vperm $out2,$out2,$out2,$leperm
+ stvx_u $out1,$x10,$out
+ stvx_u $out2,$x20,$out
+ addi $out,$out,0x30
+ bne Lxts_enc6x_steal
+ b Lxts_enc6x_done
+
+.align 4
+Lxts_enc6x_two:
+ vxor $out0,$in4,$twk0
+ vxor $out1,$in5,$twk1
+ vxor $out2,$out2,$out2
+ vxor $out3,$out3,$out3
+ vxor $out4,$out4,$out4
+
+ bl _aesp8_xts_enc5x
+
+ le?vperm $out0,$out0,$out0,$leperm
+ vmr $twk0,$twk2 # unused tweak
+ vxor $tmp,$out1,$twk2 # last block prep for stealing
+ le?vperm $out1,$out1,$out1,$leperm
+ stvx_u $out0,$x00,$out # store output
+ stvx_u $out1,$x10,$out
+ addi $out,$out,0x20
+ bne Lxts_enc6x_steal
+ b Lxts_enc6x_done
+
+.align 4
+Lxts_enc6x_one:
+ vxor $out0,$in5,$twk0
+ nop
+Loop_xts_enc1x:
+ vcipher $out0,$out0,v24
+ lvx v24,$x20,$key_ # round[3]
+ addi $key_,$key_,0x20
+
+ vcipher $out0,$out0,v25
+ lvx v25,$x10,$key_ # round[4]
+ bdnz Loop_xts_enc1x
+
+ add $inp,$inp,$taillen
+ cmpwi $taillen,0
+ vcipher $out0,$out0,v24
+
+ subi $inp,$inp,16
+ vcipher $out0,$out0,v25
+
+ lvsr $inpperm,0,$taillen
+ vcipher $out0,$out0,v26
+
+ lvx_u $in0,0,$inp
+ vcipher $out0,$out0,v27
+
+ addi $key_,$sp,$FRAME+15 # rewind $key_
+ vcipher $out0,$out0,v28
+ lvx v24,$x00,$key_ # re-pre-load round[1]
+
+ vcipher $out0,$out0,v29
+ lvx v25,$x10,$key_ # re-pre-load round[2]
+ vxor $twk0,$twk0,v31
+
+ le?vperm $in0,$in0,$in0,$leperm
+ vcipher $out0,$out0,v30
+
+ vperm $in0,$in0,$in0,$inpperm
+ vcipherlast $out0,$out0,$twk0
+
+ vmr $twk0,$twk1 # unused tweak
+ vxor $tmp,$out0,$twk1 # last block prep for stealing
+ le?vperm $out0,$out0,$out0,$leperm
+ stvx_u $out0,$x00,$out # store output
+ addi $out,$out,0x10
+ bne Lxts_enc6x_steal
+ b Lxts_enc6x_done
+
+.align 4
+Lxts_enc6x_zero:
+ cmpwi $taillen,0
+ beq Lxts_enc6x_done
+
+ add $inp,$inp,$taillen
+ subi $inp,$inp,16
+ lvx_u $in0,0,$inp
+ lvsr $inpperm,0,$taillen # $in5 is no more
+ le?vperm $in0,$in0,$in0,$leperm
+ vperm $in0,$in0,$in0,$inpperm
+ vxor $tmp,$tmp,$twk0
+Lxts_enc6x_steal:
+ vxor $in0,$in0,$twk0
+ vxor $out0,$out0,$out0
+ vspltisb $out1,-1
+ vperm $out0,$out0,$out1,$inpperm
+ vsel $out0,$in0,$tmp,$out0 # $tmp is last block, remember?
+
+ subi r30,$out,17
+ subi $out,$out,16
+ mtctr $taillen
+Loop_xts_enc6x_steal:
+ lbzu r0,1(r30)
+ stb r0,16(r30)
+ bdnz Loop_xts_enc6x_steal
+
+ li $taillen,0
+ mtctr $rounds
+ b Loop_xts_enc1x # one more time...
+
+.align 4
+Lxts_enc6x_done:
+ ${UCMP}i $ivp,0
+ beq Lxts_enc6x_ret
+
+ vxor $tweak,$twk0,$rndkey0
+ le?vperm $tweak,$tweak,$tweak,$leperm
+ stvx_u $tweak,0,$ivp
+
+Lxts_enc6x_ret:
+ mtlr r11
+ li r10,`$FRAME+15`
+ li r11,`$FRAME+31`
+ stvx $seven,r10,$sp # wipe copies of round keys
+ addi r10,r10,32
+ stvx $seven,r11,$sp
+ addi r11,r11,32
+ stvx $seven,r10,$sp
+ addi r10,r10,32
+ stvx $seven,r11,$sp
+ addi r11,r11,32
+ stvx $seven,r10,$sp
+ addi r10,r10,32
+ stvx $seven,r11,$sp
+ addi r11,r11,32
+ stvx $seven,r10,$sp
+ addi r10,r10,32
+ stvx $seven,r11,$sp
+ addi r11,r11,32
+
+ mtspr 256,$vrsave
+ lvx v20,r10,$sp # ABI says so
+ addi r10,r10,32
+ lvx v21,r11,$sp
+ addi r11,r11,32
+ lvx v22,r10,$sp
+ addi r10,r10,32
+ lvx v23,r11,$sp
+ addi r11,r11,32
+ lvx v24,r10,$sp
+ addi r10,r10,32
+ lvx v25,r11,$sp
+ addi r11,r11,32
+ lvx v26,r10,$sp
+ addi r10,r10,32
+ lvx v27,r11,$sp
+ addi r11,r11,32
+ lvx v28,r10,$sp
+ addi r10,r10,32
+ lvx v29,r11,$sp
+ addi r11,r11,32
+ lvx v30,r10,$sp
+ lvx v31,r11,$sp
+ $POP r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+ $POP r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+ $POP r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+ $POP r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+ $POP r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+ $POP r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+ addi $sp,$sp,`$FRAME+21*16+6*$SIZE_T`
+ blr
+ .long 0
+ .byte 0,12,0x04,1,0x80,6,6,0
+ .long 0
+
+.align 5
+_aesp8_xts_enc5x:
+ vcipher $out0,$out0,v24
+ vcipher $out1,$out1,v24
+ vcipher $out2,$out2,v24
+ vcipher $out3,$out3,v24
+ vcipher $out4,$out4,v24
+ lvx v24,$x20,$key_ # round[3]
+ addi $key_,$key_,0x20
+
+ vcipher $out0,$out0,v25
+ vcipher $out1,$out1,v25
+ vcipher $out2,$out2,v25
+ vcipher $out3,$out3,v25
+ vcipher $out4,$out4,v25
+ lvx v25,$x10,$key_ # round[4]
+ bdnz _aesp8_xts_enc5x
+
+ add $inp,$inp,$taillen
+ cmpwi $taillen,0
+ vcipher $out0,$out0,v24
+ vcipher $out1,$out1,v24
+ vcipher $out2,$out2,v24
+ vcipher $out3,$out3,v24
+ vcipher $out4,$out4,v24
+
+ subi $inp,$inp,16
+ vcipher $out0,$out0,v25
+ vcipher $out1,$out1,v25
+ vcipher $out2,$out2,v25
+ vcipher $out3,$out3,v25
+ vcipher $out4,$out4,v25
+ vxor $twk0,$twk0,v31
+
+ vcipher $out0,$out0,v26
+ lvsr $inpperm,r0,$taillen # $in5 is no more
+ vcipher $out1,$out1,v26
+ vcipher $out2,$out2,v26
+ vcipher $out3,$out3,v26
+ vcipher $out4,$out4,v26
+ vxor $in1,$twk1,v31
+
+ vcipher $out0,$out0,v27
+ lvx_u $in0,0,$inp
+ vcipher $out1,$out1,v27
+ vcipher $out2,$out2,v27
+ vcipher $out3,$out3,v27
+ vcipher $out4,$out4,v27
+ vxor $in2,$twk2,v31
+
+ addi $key_,$sp,$FRAME+15 # rewind $key_
+ vcipher $out0,$out0,v28
+ vcipher $out1,$out1,v28
+ vcipher $out2,$out2,v28
+ vcipher $out3,$out3,v28
+ vcipher $out4,$out4,v28
+ lvx v24,$x00,$key_ # re-pre-load round[1]
+ vxor $in3,$twk3,v31
+
+ vcipher $out0,$out0,v29
+ le?vperm $in0,$in0,$in0,$leperm
+ vcipher $out1,$out1,v29
+ vcipher $out2,$out2,v29
+ vcipher $out3,$out3,v29
+ vcipher $out4,$out4,v29
+ lvx v25,$x10,$key_ # re-pre-load round[2]
+ vxor $in4,$twk4,v31
+
+ vcipher $out0,$out0,v30
+ vperm $in0,$in0,$in0,$inpperm
+ vcipher $out1,$out1,v30
+ vcipher $out2,$out2,v30
+ vcipher $out3,$out3,v30
+ vcipher $out4,$out4,v30
+
+ vcipherlast $out0,$out0,$twk0
+ vcipherlast $out1,$out1,$in1
+ vcipherlast $out2,$out2,$in2
+ vcipherlast $out3,$out3,$in3
+ vcipherlast $out4,$out4,$in4
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,0,0
+
+.align 5
+_aesp8_xts_decrypt6x:
+ $STU $sp,-`($FRAME+21*16+6*$SIZE_T)`($sp)
+ mflr r11
+ li r7,`$FRAME+8*16+15`
+ li r3,`$FRAME+8*16+31`
+ $PUSH r11,`$FRAME+21*16+6*$SIZE_T+$LRSAVE`($sp)
+ stvx v20,r7,$sp # ABI says so
+ addi r7,r7,32
+ stvx v21,r3,$sp
+ addi r3,r3,32
+ stvx v22,r7,$sp
+ addi r7,r7,32
+ stvx v23,r3,$sp
+ addi r3,r3,32
+ stvx v24,r7,$sp
+ addi r7,r7,32
+ stvx v25,r3,$sp
+ addi r3,r3,32
+ stvx v26,r7,$sp
+ addi r7,r7,32
+ stvx v27,r3,$sp
+ addi r3,r3,32
+ stvx v28,r7,$sp
+ addi r7,r7,32
+ stvx v29,r3,$sp
+ addi r3,r3,32
+ stvx v30,r7,$sp
+ stvx v31,r3,$sp
+ li r0,-1
+ stw $vrsave,`$FRAME+21*16-4`($sp) # save vrsave
+ li $x10,0x10
+ $PUSH r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+ li $x20,0x20
+ $PUSH r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+ li $x30,0x30
+ $PUSH r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+ li $x40,0x40
+ $PUSH r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+ li $x50,0x50
+ $PUSH r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+ li $x60,0x60
+ $PUSH r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+ li $x70,0x70
+ mtspr 256,r0
+
+ subi $rounds,$rounds,3 # -4 in total
+
+ lvx $rndkey0,$x00,$key1 # load key schedule
+ lvx v30,$x10,$key1
+ addi $key1,$key1,0x20
+ lvx v31,$x00,$key1
+ ?vperm $rndkey0,$rndkey0,v30,$keyperm
+ addi $key_,$sp,$FRAME+15
+ mtctr $rounds
+
+Load_xts_dec_key:
+ ?vperm v24,v30,v31,$keyperm
+ lvx v30,$x10,$key1
+ addi $key1,$key1,0x20
+ stvx v24,$x00,$key_ # off-load round[1]
+ ?vperm v25,v31,v30,$keyperm
+ lvx v31,$x00,$key1
+ stvx v25,$x10,$key_ # off-load round[2]
+ addi $key_,$key_,0x20
+ bdnz Load_xts_dec_key
+
+ lvx v26,$x10,$key1
+ ?vperm v24,v30,v31,$keyperm
+ lvx v27,$x20,$key1
+ stvx v24,$x00,$key_ # off-load round[3]
+ ?vperm v25,v31,v26,$keyperm
+ lvx v28,$x30,$key1
+ stvx v25,$x10,$key_ # off-load round[4]
+ addi $key_,$sp,$FRAME+15 # rewind $key_
+ ?vperm v26,v26,v27,$keyperm
+ lvx v29,$x40,$key1
+ ?vperm v27,v27,v28,$keyperm
+ lvx v30,$x50,$key1
+ ?vperm v28,v28,v29,$keyperm
+ lvx v31,$x60,$key1
+ ?vperm v29,v29,v30,$keyperm
+ lvx $twk5,$x70,$key1 # borrow $twk5
+ ?vperm v30,v30,v31,$keyperm
+ lvx v24,$x00,$key_ # pre-load round[1]
+ ?vperm v31,v31,$twk5,$keyperm
+ lvx v25,$x10,$key_ # pre-load round[2]
+
+ vperm $in0,$inout,$inptail,$inpperm
+ subi $inp,$inp,31 # undo "caller"
+ vxor $twk0,$tweak,$rndkey0
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ vand $tmp,$tmp,$eighty7
+ vxor $out0,$in0,$twk0
+ vxor $tweak,$tweak,$tmp
+
+ lvx_u $in1,$x10,$inp
+ vxor $twk1,$tweak,$rndkey0
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ le?vperm $in1,$in1,$in1,$leperm
+ vand $tmp,$tmp,$eighty7
+ vxor $out1,$in1,$twk1
+ vxor $tweak,$tweak,$tmp
+
+ lvx_u $in2,$x20,$inp
+ andi. $taillen,$len,15
+ vxor $twk2,$tweak,$rndkey0
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ le?vperm $in2,$in2,$in2,$leperm
+ vand $tmp,$tmp,$eighty7
+ vxor $out2,$in2,$twk2
+ vxor $tweak,$tweak,$tmp
+
+ lvx_u $in3,$x30,$inp
+ sub $len,$len,$taillen
+ vxor $twk3,$tweak,$rndkey0
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ le?vperm $in3,$in3,$in3,$leperm
+ vand $tmp,$tmp,$eighty7
+ vxor $out3,$in3,$twk3
+ vxor $tweak,$tweak,$tmp
+
+ lvx_u $in4,$x40,$inp
+ subi $len,$len,0x60
+ vxor $twk4,$tweak,$rndkey0
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ le?vperm $in4,$in4,$in4,$leperm
+ vand $tmp,$tmp,$eighty7
+ vxor $out4,$in4,$twk4
+ vxor $tweak,$tweak,$tmp
+
+ lvx_u $in5,$x50,$inp
+ addi $inp,$inp,0x60
+ vxor $twk5,$tweak,$rndkey0
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ le?vperm $in5,$in5,$in5,$leperm
+ vand $tmp,$tmp,$eighty7
+ vxor $out5,$in5,$twk5
+ vxor $tweak,$tweak,$tmp
+
+ vxor v31,v31,$rndkey0
+ mtctr $rounds
+ b Loop_xts_dec6x
+
+.align 5
+Loop_xts_dec6x:
+ vncipher $out0,$out0,v24
+ vncipher $out1,$out1,v24
+ vncipher $out2,$out2,v24
+ vncipher $out3,$out3,v24
+ vncipher $out4,$out4,v24
+ vncipher $out5,$out5,v24
+ lvx v24,$x20,$key_ # round[3]
+ addi $key_,$key_,0x20
+
+ vncipher $out0,$out0,v25
+ vncipher $out1,$out1,v25
+ vncipher $out2,$out2,v25
+ vncipher $out3,$out3,v25
+ vncipher $out4,$out4,v25
+ vncipher $out5,$out5,v25
+ lvx v25,$x10,$key_ # round[4]
+ bdnz Loop_xts_dec6x
+
+ subic $len,$len,96 # $len-=96
+ vxor $in0,$twk0,v31 # xor with last round key
+ vncipher $out0,$out0,v24
+ vncipher $out1,$out1,v24
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vxor $twk0,$tweak,$rndkey0
+ vaddubm $tweak,$tweak,$tweak
+ vncipher $out2,$out2,v24
+ vncipher $out3,$out3,v24
+ vsldoi $tmp,$tmp,$tmp,15
+ vncipher $out4,$out4,v24
+ vncipher $out5,$out5,v24
+
+ subfe. r0,r0,r0 # borrow?-1:0
+ vand $tmp,$tmp,$eighty7
+ vncipher $out0,$out0,v25
+ vncipher $out1,$out1,v25
+ vxor $tweak,$tweak,$tmp
+ vncipher $out2,$out2,v25
+ vncipher $out3,$out3,v25
+ vxor $in1,$twk1,v31
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vxor $twk1,$tweak,$rndkey0
+ vncipher $out4,$out4,v25
+ vncipher $out5,$out5,v25
+
+ and r0,r0,$len
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ vncipher $out0,$out0,v26
+ vncipher $out1,$out1,v26
+ vand $tmp,$tmp,$eighty7
+ vncipher $out2,$out2,v26
+ vncipher $out3,$out3,v26
+ vxor $tweak,$tweak,$tmp
+ vncipher $out4,$out4,v26
+ vncipher $out5,$out5,v26
+
+ add $inp,$inp,r0 # $inp is adjusted in such
+ # way that at exit from the
+ # loop inX-in5 are loaded
+ # with last "words"
+ vxor $in2,$twk2,v31
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vxor $twk2,$tweak,$rndkey0
+ vaddubm $tweak,$tweak,$tweak
+ vncipher $out0,$out0,v27
+ vncipher $out1,$out1,v27
+ vsldoi $tmp,$tmp,$tmp,15
+ vncipher $out2,$out2,v27
+ vncipher $out3,$out3,v27
+ vand $tmp,$tmp,$eighty7
+ vncipher $out4,$out4,v27
+ vncipher $out5,$out5,v27
+
+ addi $key_,$sp,$FRAME+15 # rewind $key_
+ vxor $tweak,$tweak,$tmp
+ vncipher $out0,$out0,v28
+ vncipher $out1,$out1,v28
+ vxor $in3,$twk3,v31
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vxor $twk3,$tweak,$rndkey0
+ vncipher $out2,$out2,v28
+ vncipher $out3,$out3,v28
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ vncipher $out4,$out4,v28
+ vncipher $out5,$out5,v28
+ lvx v24,$x00,$key_ # re-pre-load round[1]
+ vand $tmp,$tmp,$eighty7
+
+ vncipher $out0,$out0,v29
+ vncipher $out1,$out1,v29
+ vxor $tweak,$tweak,$tmp
+ vncipher $out2,$out2,v29
+ vncipher $out3,$out3,v29
+ vxor $in4,$twk4,v31
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vxor $twk4,$tweak,$rndkey0
+ vncipher $out4,$out4,v29
+ vncipher $out5,$out5,v29
+ lvx v25,$x10,$key_ # re-pre-load round[2]
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+
+ vncipher $out0,$out0,v30
+ vncipher $out1,$out1,v30
+ vand $tmp,$tmp,$eighty7
+ vncipher $out2,$out2,v30
+ vncipher $out3,$out3,v30
+ vxor $tweak,$tweak,$tmp
+ vncipher $out4,$out4,v30
+ vncipher $out5,$out5,v30
+ vxor $in5,$twk5,v31
+ vsrab $tmp,$tweak,$seven # next tweak value
+ vxor $twk5,$tweak,$rndkey0
+
+ vncipherlast $out0,$out0,$in0
+ lvx_u $in0,$x00,$inp # load next input block
+ vaddubm $tweak,$tweak,$tweak
+ vsldoi $tmp,$tmp,$tmp,15
+ vncipherlast $out1,$out1,$in1
+ lvx_u $in1,$x10,$inp
+ vncipherlast $out2,$out2,$in2
+ le?vperm $in0,$in0,$in0,$leperm
+ lvx_u $in2,$x20,$inp
+ vand $tmp,$tmp,$eighty7
+ vncipherlast $out3,$out3,$in3
+ le?vperm $in1,$in1,$in1,$leperm
+ lvx_u $in3,$x30,$inp
+ vncipherlast $out4,$out4,$in4
+ le?vperm $in2,$in2,$in2,$leperm
+ lvx_u $in4,$x40,$inp
+ vxor $tweak,$tweak,$tmp
+ vncipherlast $out5,$out5,$in5
+ le?vperm $in3,$in3,$in3,$leperm
+ lvx_u $in5,$x50,$inp
+ addi $inp,$inp,0x60
+ le?vperm $in4,$in4,$in4,$leperm
+ le?vperm $in5,$in5,$in5,$leperm
+
+ le?vperm $out0,$out0,$out0,$leperm
+ le?vperm $out1,$out1,$out1,$leperm
+ stvx_u $out0,$x00,$out # store output
+ vxor $out0,$in0,$twk0
+ le?vperm $out2,$out2,$out2,$leperm
+ stvx_u $out1,$x10,$out
+ vxor $out1,$in1,$twk1
+ le?vperm $out3,$out3,$out3,$leperm
+ stvx_u $out2,$x20,$out
+ vxor $out2,$in2,$twk2
+ le?vperm $out4,$out4,$out4,$leperm
+ stvx_u $out3,$x30,$out
+ vxor $out3,$in3,$twk3
+ le?vperm $out5,$out5,$out5,$leperm
+ stvx_u $out4,$x40,$out
+ vxor $out4,$in4,$twk4
+ stvx_u $out5,$x50,$out
+ vxor $out5,$in5,$twk5
+ addi $out,$out,0x60
+
+ mtctr $rounds
+ beq Loop_xts_dec6x # did $len-=96 borrow?
+
+ addic. $len,$len,0x60
+ beq Lxts_dec6x_zero
+ cmpwi $len,0x20
+ blt Lxts_dec6x_one
+ nop
+ beq Lxts_dec6x_two
+ cmpwi $len,0x40
+ blt Lxts_dec6x_three
+ nop
+ beq Lxts_dec6x_four
+
+Lxts_dec6x_five:
+ vxor $out0,$in1,$twk0
+ vxor $out1,$in2,$twk1
+ vxor $out2,$in3,$twk2
+ vxor $out3,$in4,$twk3
+ vxor $out4,$in5,$twk4
+
+ bl _aesp8_xts_dec5x
+
+ le?vperm $out0,$out0,$out0,$leperm
+ vmr $twk0,$twk5 # unused tweak
+ vxor $twk1,$tweak,$rndkey0
+ le?vperm $out1,$out1,$out1,$leperm
+ stvx_u $out0,$x00,$out # store output
+ vxor $out0,$in0,$twk1
+ le?vperm $out2,$out2,$out2,$leperm
+ stvx_u $out1,$x10,$out
+ le?vperm $out3,$out3,$out3,$leperm
+ stvx_u $out2,$x20,$out
+ le?vperm $out4,$out4,$out4,$leperm
+ stvx_u $out3,$x30,$out
+ stvx_u $out4,$x40,$out
+ addi $out,$out,0x50
+ bne Lxts_dec6x_steal
+ b Lxts_dec6x_done
+
+.align 4
+Lxts_dec6x_four:
+ vxor $out0,$in2,$twk0
+ vxor $out1,$in3,$twk1
+ vxor $out2,$in4,$twk2
+ vxor $out3,$in5,$twk3
+ vxor $out4,$out4,$out4
+
+ bl _aesp8_xts_dec5x
+
+ le?vperm $out0,$out0,$out0,$leperm
+ vmr $twk0,$twk4 # unused tweak
+ vmr $twk1,$twk5
+ le?vperm $out1,$out1,$out1,$leperm
+ stvx_u $out0,$x00,$out # store output
+ vxor $out0,$in0,$twk5
+ le?vperm $out2,$out2,$out2,$leperm
+ stvx_u $out1,$x10,$out
+ le?vperm $out3,$out3,$out3,$leperm
+ stvx_u $out2,$x20,$out
+ stvx_u $out3,$x30,$out
+ addi $out,$out,0x40
+ bne Lxts_dec6x_steal
+ b Lxts_dec6x_done
+
+.align 4
+Lxts_dec6x_three:
+ vxor $out0,$in3,$twk0
+ vxor $out1,$in4,$twk1
+ vxor $out2,$in5,$twk2
+ vxor $out3,$out3,$out3
+ vxor $out4,$out4,$out4
+
+ bl _aesp8_xts_dec5x
+
+ le?vperm $out0,$out0,$out0,$leperm
+ vmr $twk0,$twk3 # unused tweak
+ vmr $twk1,$twk4
+ le?vperm $out1,$out1,$out1,$leperm
+ stvx_u $out0,$x00,$out # store output
+ vxor $out0,$in0,$twk4
+ le?vperm $out2,$out2,$out2,$leperm
+ stvx_u $out1,$x10,$out
+ stvx_u $out2,$x20,$out
+ addi $out,$out,0x30
+ bne Lxts_dec6x_steal
+ b Lxts_dec6x_done
+
+.align 4
+Lxts_dec6x_two:
+ vxor $out0,$in4,$twk0
+ vxor $out1,$in5,$twk1
+ vxor $out2,$out2,$out2
+ vxor $out3,$out3,$out3
+ vxor $out4,$out4,$out4
+
+ bl _aesp8_xts_dec5x
+
+ le?vperm $out0,$out0,$out0,$leperm
+ vmr $twk0,$twk2 # unused tweak
+ vmr $twk1,$twk3
+ le?vperm $out1,$out1,$out1,$leperm
+ stvx_u $out0,$x00,$out # store output
+ vxor $out0,$in0,$twk3
+ stvx_u $out1,$x10,$out
+ addi $out,$out,0x20
+ bne Lxts_dec6x_steal
+ b Lxts_dec6x_done
+
+.align 4
+Lxts_dec6x_one:
+ vxor $out0,$in5,$twk0
+ nop
+Loop_xts_dec1x:
+ vncipher $out0,$out0,v24
+ lvx v24,$x20,$key_ # round[3]
+ addi $key_,$key_,0x20
+
+ vncipher $out0,$out0,v25
+ lvx v25,$x10,$key_ # round[4]
+ bdnz Loop_xts_dec1x
+
+ subi r0,$taillen,1
+ vncipher $out0,$out0,v24
+
+ andi. r0,r0,16
+ cmpwi $taillen,0
+ vncipher $out0,$out0,v25
+
+ sub $inp,$inp,r0
+ vncipher $out0,$out0,v26
+
+ lvx_u $in0,0,$inp
+ vncipher $out0,$out0,v27
+
+ addi $key_,$sp,$FRAME+15 # rewind $key_
+ vncipher $out0,$out0,v28
+ lvx v24,$x00,$key_ # re-pre-load round[1]
+
+ vncipher $out0,$out0,v29
+ lvx v25,$x10,$key_ # re-pre-load round[2]
+ vxor $twk0,$twk0,v31
+
+ le?vperm $in0,$in0,$in0,$leperm
+ vncipher $out0,$out0,v30
+
+ mtctr $rounds
+ vncipherlast $out0,$out0,$twk0
+
+ vmr $twk0,$twk1 # unused tweak
+ vmr $twk1,$twk2
+ le?vperm $out0,$out0,$out0,$leperm
+ stvx_u $out0,$x00,$out # store output
+ addi $out,$out,0x10
+ vxor $out0,$in0,$twk2
+ bne Lxts_dec6x_steal
+ b Lxts_dec6x_done
+
+.align 4
+Lxts_dec6x_zero:
+ cmpwi $taillen,0
+ beq Lxts_dec6x_done
+
+ lvx_u $in0,0,$inp
+ le?vperm $in0,$in0,$in0,$leperm
+ vxor $out0,$in0,$twk1
+Lxts_dec6x_steal:
+ vncipher $out0,$out0,v24
+ lvx v24,$x20,$key_ # round[3]
+ addi $key_,$key_,0x20
+
+ vncipher $out0,$out0,v25
+ lvx v25,$x10,$key_ # round[4]
+ bdnz Lxts_dec6x_steal
+
+ add $inp,$inp,$taillen
+ vncipher $out0,$out0,v24
+
+ cmpwi $taillen,0
+ vncipher $out0,$out0,v25
+
+ lvx_u $in0,0,$inp
+ vncipher $out0,$out0,v26
+
+ lvsr $inpperm,0,$taillen # $in5 is no more
+ vncipher $out0,$out0,v27
+
+ addi $key_,$sp,$FRAME+15 # rewind $key_
+ vncipher $out0,$out0,v28
+ lvx v24,$x00,$key_ # re-pre-load round[1]
+
+ vncipher $out0,$out0,v29
+ lvx v25,$x10,$key_ # re-pre-load round[2]
+ vxor $twk1,$twk1,v31
+
+ le?vperm $in0,$in0,$in0,$leperm
+ vncipher $out0,$out0,v30
+
+ vperm $in0,$in0,$in0,$inpperm
+ vncipherlast $tmp,$out0,$twk1
+
+ le?vperm $out0,$tmp,$tmp,$leperm
+ le?stvx_u $out0,0,$out
+ be?stvx_u $tmp,0,$out
+
+ vxor $out0,$out0,$out0
+ vspltisb $out1,-1
+ vperm $out0,$out0,$out1,$inpperm
+ vsel $out0,$in0,$tmp,$out0
+ vxor $out0,$out0,$twk0
+
+ subi r30,$out,1
+ mtctr $taillen
+Loop_xts_dec6x_steal:
+ lbzu r0,1(r30)
+ stb r0,16(r30)
+ bdnz Loop_xts_dec6x_steal
+
+ li $taillen,0
+ mtctr $rounds
+ b Loop_xts_dec1x # one more time...
+
+.align 4
+Lxts_dec6x_done:
+ ${UCMP}i $ivp,0
+ beq Lxts_dec6x_ret
+
+ vxor $tweak,$twk0,$rndkey0
+ le?vperm $tweak,$tweak,$tweak,$leperm
+ stvx_u $tweak,0,$ivp
+
+Lxts_dec6x_ret:
+ mtlr r11
+ li r10,`$FRAME+15`
+ li r11,`$FRAME+31`
+ stvx $seven,r10,$sp # wipe copies of round keys
+ addi r10,r10,32
+ stvx $seven,r11,$sp
+ addi r11,r11,32
+ stvx $seven,r10,$sp
+ addi r10,r10,32
+ stvx $seven,r11,$sp
+ addi r11,r11,32
+ stvx $seven,r10,$sp
+ addi r10,r10,32
+ stvx $seven,r11,$sp
+ addi r11,r11,32
+ stvx $seven,r10,$sp
+ addi r10,r10,32
+ stvx $seven,r11,$sp
+ addi r11,r11,32
+
+ mtspr 256,$vrsave
+ lvx v20,r10,$sp # ABI says so
+ addi r10,r10,32
+ lvx v21,r11,$sp
+ addi r11,r11,32
+ lvx v22,r10,$sp
+ addi r10,r10,32
+ lvx v23,r11,$sp
+ addi r11,r11,32
+ lvx v24,r10,$sp
+ addi r10,r10,32
+ lvx v25,r11,$sp
+ addi r11,r11,32
+ lvx v26,r10,$sp
+ addi r10,r10,32
+ lvx v27,r11,$sp
+ addi r11,r11,32
+ lvx v28,r10,$sp
+ addi r10,r10,32
+ lvx v29,r11,$sp
+ addi r11,r11,32
+ lvx v30,r10,$sp
+ lvx v31,r11,$sp
+ $POP r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+ $POP r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+ $POP r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+ $POP r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+ $POP r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+ $POP r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+ addi $sp,$sp,`$FRAME+21*16+6*$SIZE_T`
+ blr
+ .long 0
+ .byte 0,12,0x04,1,0x80,6,6,0
+ .long 0
+
+.align 5
+_aesp8_xts_dec5x:
+ vncipher $out0,$out0,v24
+ vncipher $out1,$out1,v24
+ vncipher $out2,$out2,v24
+ vncipher $out3,$out3,v24
+ vncipher $out4,$out4,v24
+ lvx v24,$x20,$key_ # round[3]
+ addi $key_,$key_,0x20
+
+ vncipher $out0,$out0,v25
+ vncipher $out1,$out1,v25
+ vncipher $out2,$out2,v25
+ vncipher $out3,$out3,v25
+ vncipher $out4,$out4,v25
+ lvx v25,$x10,$key_ # round[4]
+ bdnz _aesp8_xts_dec5x
+
+ subi r0,$taillen,1
+ vncipher $out0,$out0,v24
+ vncipher $out1,$out1,v24
+ vncipher $out2,$out2,v24
+ vncipher $out3,$out3,v24
+ vncipher $out4,$out4,v24
+
+ andi. r0,r0,16
+ cmpwi $taillen,0
+ vncipher $out0,$out0,v25
+ vncipher $out1,$out1,v25
+ vncipher $out2,$out2,v25
+ vncipher $out3,$out3,v25
+ vncipher $out4,$out4,v25
+ vxor $twk0,$twk0,v31
+
+ sub $inp,$inp,r0
+ vncipher $out0,$out0,v26
+ vncipher $out1,$out1,v26
+ vncipher $out2,$out2,v26
+ vncipher $out3,$out3,v26
+ vncipher $out4,$out4,v26
+ vxor $in1,$twk1,v31
+
+ vncipher $out0,$out0,v27
+ lvx_u $in0,0,$inp
+ vncipher $out1,$out1,v27
+ vncipher $out2,$out2,v27
+ vncipher $out3,$out3,v27
+ vncipher $out4,$out4,v27
+ vxor $in2,$twk2,v31
+
+ addi $key_,$sp,$FRAME+15 # rewind $key_
+ vncipher $out0,$out0,v28
+ vncipher $out1,$out1,v28
+ vncipher $out2,$out2,v28
+ vncipher $out3,$out3,v28
+ vncipher $out4,$out4,v28
+ lvx v24,$x00,$key_ # re-pre-load round[1]
+ vxor $in3,$twk3,v31
+
+ vncipher $out0,$out0,v29
+ le?vperm $in0,$in0,$in0,$leperm
+ vncipher $out1,$out1,v29
+ vncipher $out2,$out2,v29
+ vncipher $out3,$out3,v29
+ vncipher $out4,$out4,v29
+ lvx v25,$x10,$key_ # re-pre-load round[2]
+ vxor $in4,$twk4,v31
+
+ vncipher $out0,$out0,v30
+ vncipher $out1,$out1,v30
+ vncipher $out2,$out2,v30
+ vncipher $out3,$out3,v30
+ vncipher $out4,$out4,v30
+
+ vncipherlast $out0,$out0,$twk0
+ vncipherlast $out1,$out1,$in1
+ vncipherlast $out2,$out2,$in2
+ vncipherlast $out3,$out3,$in3
+ vncipherlast $out4,$out4,$in4
+ mtctr $rounds
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,0,0
+___
+}} }}}
+
my $consts=1;
foreach(split("\n",$code)) {
s/\`([^\`]*)\`/eval($1)/geo;
@@ -1898,7 +3757,7 @@ foreach(split("\n",$code)) {
if ($flavour =~ /le$/o) {
SWITCH: for($conv) {
/\?inv/ && do { @bytes=map($_^0xf,@bytes); last; };
- /\?rev/ && do { @bytes=reverse(@bytes); last; };
+ /\?rev/ && do { @bytes=reverse(@bytes); last; };
}
}
diff --git a/drivers/crypto/vmx/vmx.c b/drivers/crypto/vmx/vmx.c
index e163d5770..31a98dc6f 100644
--- a/drivers/crypto/vmx/vmx.c
+++ b/drivers/crypto/vmx/vmx.c
@@ -23,6 +23,7 @@
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/err.h>
+#include <linux/cpufeature.h>
#include <linux/crypto.h>
#include <asm/cputable.h>
#include <crypto/internal/hash.h>
@@ -31,10 +32,12 @@ extern struct shash_alg p8_ghash_alg;
extern struct crypto_alg p8_aes_alg;
extern struct crypto_alg p8_aes_cbc_alg;
extern struct crypto_alg p8_aes_ctr_alg;
+extern struct crypto_alg p8_aes_xts_alg;
static struct crypto_alg *algs[] = {
&p8_aes_alg,
&p8_aes_cbc_alg,
&p8_aes_ctr_alg,
+ &p8_aes_xts_alg,
NULL,
};
@@ -43,9 +46,6 @@ int __init p8_init(void)
int ret = 0;
struct crypto_alg **alg_it;
- if (!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_VEC_CRYPTO))
- return -ENODEV;
-
for (alg_it = algs; *alg_it; alg_it++) {
ret = crypto_register_alg(*alg_it);
printk(KERN_INFO "crypto_register_alg '%s' = %d\n",
@@ -78,7 +78,7 @@ void __exit p8_exit(void)
crypto_unregister_shash(&p8_ghash_alg);
}
-module_init(p8_init);
+module_cpu_feature_match(PPC_MODULE_FEATURE_VEC_CRYPTO, p8_init);
module_exit(p8_exit);
MODULE_AUTHOR("Marcelo Cerri<mhcerri@br.ibm.com>");
diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c
index b891a129b..29f600f2c 100644
--- a/drivers/dax/dax.c
+++ b/drivers/dax/dax.c
@@ -211,11 +211,9 @@ int devm_create_dax_dev(struct dax_region *dax_region, struct resource *res,
}
dax_dev->dev = dev;
- rc = devm_add_action(dax_region->dev, unregister_dax_dev, dev);
- if (rc) {
- unregister_dax_dev(dev);
+ rc = devm_add_action_or_reset(dax_region->dev, unregister_dax_dev, dev);
+ if (rc)
return rc;
- }
return 0;
@@ -461,7 +459,7 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev,
}
pgoff = linear_page_index(vma, pmd_addr);
- phys = pgoff_to_phys(dax_dev, pgoff, PAGE_SIZE);
+ phys = pgoff_to_phys(dax_dev, pgoff, PMD_SIZE);
if (phys == -1) {
dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
pgoff);
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
index 82e6743c4..1f01e98c8 100644
--- a/drivers/dax/pmem.c
+++ b/drivers/dax/pmem.c
@@ -102,21 +102,19 @@ static int dax_pmem_probe(struct device *dev)
if (rc)
return rc;
- rc = devm_add_action(dev, dax_pmem_percpu_exit, &dax_pmem->ref);
- if (rc) {
- dax_pmem_percpu_exit(&dax_pmem->ref);
+ rc = devm_add_action_or_reset(dev, dax_pmem_percpu_exit,
+ &dax_pmem->ref);
+ if (rc)
return rc;
- }
addr = devm_memremap_pages(dev, &res, &dax_pmem->ref, altmap);
if (IS_ERR(addr))
return PTR_ERR(addr);
- rc = devm_add_action(dev, dax_pmem_percpu_kill, &dax_pmem->ref);
- if (rc) {
- dax_pmem_percpu_kill(&dax_pmem->ref);
+ rc = devm_add_action_or_reset(dev, dax_pmem_percpu_kill,
+ &dax_pmem->ref);
+ if (rc)
return rc;
- }
/* adjust the dax_region resource to the start of data */
res.start += le64_to_cpu(pfn_sb->dataoff);
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 78dac0e9d..a5be56ec5 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -75,7 +75,7 @@ config DEVFREQ_GOV_PASSIVE
comment "DEVFREQ Drivers"
config ARM_EXYNOS_BUS_DEVFREQ
- bool "ARM EXYNOS Generic Memory Bus DEVFREQ Driver"
+ tristate "ARM EXYNOS Generic Memory Bus DEVFREQ Driver"
depends on ARCH_EXYNOS
select DEVFREQ_GOV_SIMPLE_ONDEMAND
select DEVFREQ_GOV_PASSIVE
diff --git a/drivers/devfreq/devfreq-event.c b/drivers/devfreq/devfreq-event.c
index 39b048eda..9aea2c7ec 100644
--- a/drivers/devfreq/devfreq-event.c
+++ b/drivers/devfreq/devfreq-event.c
@@ -15,7 +15,7 @@
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/init.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/of.h>
@@ -481,13 +481,3 @@ static int __init devfreq_event_init(void)
return 0;
}
subsys_initcall(devfreq_event_init);
-
-static void __exit devfreq_event_exit(void)
-{
- class_destroy(devfreq_event_class);
-}
-module_exit(devfreq_event_exit);
-
-MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
-MODULE_DESCRIPTION("DEVFREQ-Event class support");
-MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index e92418fac..478006b77 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -15,7 +15,7 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/init.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/pm_opp.h>
@@ -707,10 +707,12 @@ struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index)
if (devfreq->dev.parent
&& devfreq->dev.parent->of_node == node) {
mutex_unlock(&devfreq_list_lock);
+ of_node_put(node);
return devfreq;
}
}
mutex_unlock(&devfreq_list_lock);
+ of_node_put(node);
return ERR_PTR(-EPROBE_DEFER);
}
@@ -1199,13 +1201,6 @@ static int __init devfreq_init(void)
}
subsys_initcall(devfreq_init);
-static void __exit devfreq_exit(void)
-{
- class_destroy(devfreq_class);
- destroy_workqueue(devfreq_wq);
-}
-module_exit(devfreq_exit);
-
/*
* The followings are helper functions for devfreq user device drivers with
* OPP framework.
@@ -1471,7 +1466,3 @@ void devm_devfreq_unregister_notifier(struct device *dev,
devm_devfreq_dev_match, devfreq));
}
EXPORT_SYMBOL(devm_devfreq_unregister_notifier);
-
-MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
-MODULE_DESCRIPTION("devfreq class support");
-MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/event/Kconfig b/drivers/devfreq/event/Kconfig
index 1e8b4f469..eb6f74a2b 100644
--- a/drivers/devfreq/event/Kconfig
+++ b/drivers/devfreq/event/Kconfig
@@ -14,7 +14,7 @@ menuconfig PM_DEVFREQ_EVENT
if PM_DEVFREQ_EVENT
config DEVFREQ_EVENT_EXYNOS_NOCP
- bool "EXYNOS NoC (Network On Chip) Probe DEVFREQ event Driver"
+ tristate "EXYNOS NoC (Network On Chip) Probe DEVFREQ event Driver"
depends on ARCH_EXYNOS
select PM_OPP
help
@@ -22,7 +22,7 @@ config DEVFREQ_EVENT_EXYNOS_NOCP
(Network on Chip) Probe counters to measure the bandwidth of AXI bus.
config DEVFREQ_EVENT_EXYNOS_PPMU
- bool "EXYNOS PPMU (Platform Performance Monitoring Unit) DEVFREQ event Driver"
+ tristate "EXYNOS PPMU (Platform Performance Monitoring Unit) DEVFREQ event Driver"
depends on ARCH_EXYNOS
select PM_OPP
help
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index f312485f1..845bf25fb 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -482,7 +482,8 @@ static int exynos_ppmu_probe(struct platform_device *pdev)
if (!info->edev) {
dev_err(&pdev->dev,
"failed to allocate memory devfreq-event devices\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err;
}
edev = info->edev;
platform_set_drvdata(pdev, info);
diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
index 2363d0a18..29866f7e6 100644
--- a/drivers/devfreq/exynos-bus.c
+++ b/drivers/devfreq/exynos-bus.c
@@ -383,7 +383,7 @@ err_clk:
static int exynos_bus_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
+ struct device_node *np = dev->of_node, *node;
struct devfreq_dev_profile *profile;
struct devfreq_simple_ondemand_data *ondemand_data;
struct devfreq_passive_data *passive_data;
@@ -407,7 +407,7 @@ static int exynos_bus_probe(struct platform_device *pdev)
/* Parse the device-tree to get the resource information */
ret = exynos_bus_parse_of(np, bus);
if (ret < 0)
- goto err;
+ return ret;
profile = devm_kzalloc(dev, sizeof(*profile), GFP_KERNEL);
if (!profile) {
@@ -415,10 +415,13 @@ static int exynos_bus_probe(struct platform_device *pdev)
goto err;
}
- if (of_parse_phandle(dev->of_node, "devfreq", 0))
+ node = of_parse_phandle(dev->of_node, "devfreq", 0);
+ if (node) {
+ of_node_put(node);
goto passive;
- else
+ } else {
ret = exynos_bus_parent_parse_of(np, bus);
+ }
if (ret < 0)
goto err;
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index 9824bc4ad..25bcfa0b4 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -1,11 +1,20 @@
menu "DMABUF options"
config SYNC_FILE
- bool "sync_file support for fences"
+ bool "Explicit Synchronization Framework"
default n
select ANON_INODES
select DMA_SHARED_BUFFER
---help---
- This option enables the fence framework synchronization to export
- sync_files to userspace that can represent one or more fences.
+ The Sync File Framework adds explicit syncronization via
+ userspace. It enables send/receive 'struct fence' objects to/from
+ userspace via Sync File fds for synchronization between drivers via
+ userspace components. It has been ported from Android.
+
+ The first and main user for this is graphics in which a fence is
+ associated with a buffer. When a job is submitted to the GPU a fence
+ is attached to the buffer and is transferred via userspace, using Sync
+ Files fds, to the DRM driver for example. More details at
+ Documentation/sync_file.txt.
+
endmenu
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index 4a424eca7..f353db213 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -1,2 +1,2 @@
-obj-y := dma-buf.o fence.o reservation.o seqno-fence.o
+obj-y := dma-buf.o fence.o reservation.o seqno-fence.o fence-array.o
obj-$(CONFIG_SYNC_FILE) += sync_file.o
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 6355ab38d..ddaee60ae 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -334,6 +334,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
struct reservation_object *resv = exp_info->resv;
struct file *file;
size_t alloc_size = sizeof(struct dma_buf);
+ int ret;
if (!exp_info->resv)
alloc_size += sizeof(struct reservation_object);
@@ -357,8 +358,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
dmabuf = kzalloc(alloc_size, GFP_KERNEL);
if (!dmabuf) {
- module_put(exp_info->owner);
- return ERR_PTR(-ENOMEM);
+ ret = -ENOMEM;
+ goto err_module;
}
dmabuf->priv = exp_info->priv;
@@ -379,8 +380,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf,
exp_info->flags);
if (IS_ERR(file)) {
- kfree(dmabuf);
- return ERR_CAST(file);
+ ret = PTR_ERR(file);
+ goto err_dmabuf;
}
file->f_mode |= FMODE_LSEEK;
@@ -394,6 +395,12 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
mutex_unlock(&db_list.lock);
return dmabuf;
+
+err_dmabuf:
+ kfree(dmabuf);
+err_module:
+ module_put(exp_info->owner);
+ return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(dma_buf_export);
@@ -824,7 +831,7 @@ void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
EXPORT_SYMBOL_GPL(dma_buf_vunmap);
#ifdef CONFIG_DEBUG_FS
-static int dma_buf_describe(struct seq_file *s)
+static int dma_buf_debug_show(struct seq_file *s, void *unused)
{
int ret;
struct dma_buf *buf_obj;
@@ -879,17 +886,9 @@ static int dma_buf_describe(struct seq_file *s)
return 0;
}
-static int dma_buf_show(struct seq_file *s, void *unused)
-{
- void (*func)(struct seq_file *) = s->private;
-
- func(s);
- return 0;
-}
-
static int dma_buf_debug_open(struct inode *inode, struct file *file)
{
- return single_open(file, dma_buf_show, inode->i_private);
+ return single_open(file, dma_buf_debug_show, NULL);
}
static const struct file_operations dma_buf_debug_fops = {
@@ -903,20 +902,23 @@ static struct dentry *dma_buf_debugfs_dir;
static int dma_buf_init_debugfs(void)
{
+ struct dentry *d;
int err = 0;
- dma_buf_debugfs_dir = debugfs_create_dir("dma_buf", NULL);
+ d = debugfs_create_dir("dma_buf", NULL);
+ if (IS_ERR(d))
+ return PTR_ERR(d);
- if (IS_ERR(dma_buf_debugfs_dir)) {
- err = PTR_ERR(dma_buf_debugfs_dir);
- dma_buf_debugfs_dir = NULL;
- return err;
- }
-
- err = dma_buf_debugfs_create_file("bufinfo", dma_buf_describe);
+ dma_buf_debugfs_dir = d;
- if (err)
+ d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
+ NULL, &dma_buf_debug_fops);
+ if (IS_ERR(d)) {
pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
+ debugfs_remove_recursive(dma_buf_debugfs_dir);
+ dma_buf_debugfs_dir = NULL;
+ err = PTR_ERR(d);
+ }
return err;
}
@@ -926,17 +928,6 @@ static void dma_buf_uninit_debugfs(void)
if (dma_buf_debugfs_dir)
debugfs_remove_recursive(dma_buf_debugfs_dir);
}
-
-int dma_buf_debugfs_create_file(const char *name,
- int (*write)(struct seq_file *))
-{
- struct dentry *d;
-
- d = debugfs_create_file(name, S_IRUGO, dma_buf_debugfs_dir,
- write, &dma_buf_debug_fops);
-
- return PTR_ERR_OR_ZERO(d);
-}
#else
static inline int dma_buf_init_debugfs(void)
{
diff --git a/drivers/dma-buf/fence-array.c b/drivers/dma-buf/fence-array.c
new file mode 100644
index 000000000..a8731c853
--- /dev/null
+++ b/drivers/dma-buf/fence-array.c
@@ -0,0 +1,144 @@
+/*
+ * fence-array: aggregate fences to be waited together
+ *
+ * Copyright (C) 2016 Collabora Ltd
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ * Authors:
+ * Gustavo Padovan <gustavo@padovan.org>
+ * Christian König <christian.koenig@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/fence-array.h>
+
+static void fence_array_cb_func(struct fence *f, struct fence_cb *cb);
+
+static const char *fence_array_get_driver_name(struct fence *fence)
+{
+ return "fence_array";
+}
+
+static const char *fence_array_get_timeline_name(struct fence *fence)
+{
+ return "unbound";
+}
+
+static void fence_array_cb_func(struct fence *f, struct fence_cb *cb)
+{
+ struct fence_array_cb *array_cb =
+ container_of(cb, struct fence_array_cb, cb);
+ struct fence_array *array = array_cb->array;
+
+ if (atomic_dec_and_test(&array->num_pending))
+ fence_signal(&array->base);
+ fence_put(&array->base);
+}
+
+static bool fence_array_enable_signaling(struct fence *fence)
+{
+ struct fence_array *array = to_fence_array(fence);
+ struct fence_array_cb *cb = (void *)(&array[1]);
+ unsigned i;
+
+ for (i = 0; i < array->num_fences; ++i) {
+ cb[i].array = array;
+ /*
+ * As we may report that the fence is signaled before all
+ * callbacks are complete, we need to take an additional
+ * reference count on the array so that we do not free it too
+ * early. The core fence handling will only hold the reference
+ * until we signal the array as complete (but that is now
+ * insufficient).
+ */
+ fence_get(&array->base);
+ if (fence_add_callback(array->fences[i], &cb[i].cb,
+ fence_array_cb_func)) {
+ fence_put(&array->base);
+ if (atomic_dec_and_test(&array->num_pending))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool fence_array_signaled(struct fence *fence)
+{
+ struct fence_array *array = to_fence_array(fence);
+
+ return atomic_read(&array->num_pending) <= 0;
+}
+
+static void fence_array_release(struct fence *fence)
+{
+ struct fence_array *array = to_fence_array(fence);
+ unsigned i;
+
+ for (i = 0; i < array->num_fences; ++i)
+ fence_put(array->fences[i]);
+
+ kfree(array->fences);
+ fence_free(fence);
+}
+
+const struct fence_ops fence_array_ops = {
+ .get_driver_name = fence_array_get_driver_name,
+ .get_timeline_name = fence_array_get_timeline_name,
+ .enable_signaling = fence_array_enable_signaling,
+ .signaled = fence_array_signaled,
+ .wait = fence_default_wait,
+ .release = fence_array_release,
+};
+
+/**
+ * fence_array_create - Create a custom fence array
+ * @num_fences: [in] number of fences to add in the array
+ * @fences: [in] array containing the fences
+ * @context: [in] fence context to use
+ * @seqno: [in] sequence number to use
+ * @signal_on_any [in] signal on any fence in the array
+ *
+ * Allocate a fence_array object and initialize the base fence with fence_init().
+ * In case of error it returns NULL.
+ *
+ * The caller should allocte the fences array with num_fences size
+ * and fill it with the fences it wants to add to the object. Ownership of this
+ * array is take and fence_put() is used on each fence on release.
+ *
+ * If @signal_on_any is true the fence array signals if any fence in the array
+ * signals, otherwise it signals when all fences in the array signal.
+ */
+struct fence_array *fence_array_create(int num_fences, struct fence **fences,
+ u64 context, unsigned seqno,
+ bool signal_on_any)
+{
+ struct fence_array *array;
+ size_t size = sizeof(*array);
+
+ /* Allocate the callback structures behind the array. */
+ size += num_fences * sizeof(struct fence_array_cb);
+ array = kzalloc(size, GFP_KERNEL);
+ if (!array)
+ return NULL;
+
+ spin_lock_init(&array->lock);
+ fence_init(&array->base, &fence_array_ops, &array->lock,
+ context, seqno);
+
+ array->num_fences = num_fences;
+ atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences);
+ array->fences = fences;
+
+ return array;
+}
+EXPORT_SYMBOL(fence_array_create);
diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/fence.c
index 7b05dbe9b..4d51f9e83 100644
--- a/drivers/dma-buf/fence.c
+++ b/drivers/dma-buf/fence.c
@@ -35,7 +35,7 @@ EXPORT_TRACEPOINT_SYMBOL(fence_emit);
* context or not. One device can have multiple separate contexts,
* and they're used if some engine can run independently of another.
*/
-static atomic_t fence_context_counter = ATOMIC_INIT(0);
+static atomic64_t fence_context_counter = ATOMIC64_INIT(0);
/**
* fence_context_alloc - allocate an array of fence contexts
@@ -44,10 +44,10 @@ static atomic_t fence_context_counter = ATOMIC_INIT(0);
* This function will return the first index of the number of fences allocated.
* The fence context is used for setting fence->context to a unique number.
*/
-unsigned fence_context_alloc(unsigned num)
+u64 fence_context_alloc(unsigned num)
{
BUG_ON(!num);
- return atomic_add_return(num, &fence_context_counter) - num;
+ return atomic64_add_return(num, &fence_context_counter) - num;
}
EXPORT_SYMBOL(fence_context_alloc);
@@ -513,7 +513,7 @@ EXPORT_SYMBOL(fence_wait_any_timeout);
*/
void
fence_init(struct fence *fence, const struct fence_ops *ops,
- spinlock_t *lock, unsigned context, unsigned seqno)
+ spinlock_t *lock, u64 context, unsigned seqno)
{
BUG_ON(!lock);
BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index f08cf2d83..9aaa608df 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -82,7 +82,7 @@ struct sync_file *sync_file_create(struct fence *fence)
sync_file->num_fences = 1;
atomic_set(&sync_file->status, 1);
- snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%d-%d",
+ snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%llu-%d",
fence->ops->get_driver_name(fence),
fence->ops->get_timeline_name(fence), fence->context,
fence->seqno);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 8c98779a1..739f797b4 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -339,6 +339,20 @@ config MV_XOR
---help---
Enable support for the Marvell XOR engine.
+config MV_XOR_V2
+ bool "Marvell XOR engine version 2 support "
+ depends on ARM64
+ select DMA_ENGINE
+ select DMA_ENGINE_RAID
+ select ASYNC_TX_ENABLE_CHANNEL_SWITCH
+ select GENERIC_MSI_IRQ_DOMAIN
+ ---help---
+ Enable support for the Marvell version 2 XOR engine.
+
+ This engine provides acceleration for copy, XOR and RAID6
+ operations, and is available on Marvell Armada 7K and 8K
+ platforms.
+
config MXS_DMA
bool "MXS DMA support"
depends on SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q || SOC_IMX6UL
@@ -519,19 +533,31 @@ config XGENE_DMA
help
Enable support for the APM X-Gene SoC DMA engine.
-config XILINX_VDMA
- tristate "Xilinx AXI VDMA Engine"
+config XILINX_DMA
+ tristate "Xilinx AXI DMAS Engine"
depends on (ARCH_ZYNQ || MICROBLAZE || ARM64)
select DMA_ENGINE
help
Enable support for Xilinx AXI VDMA Soft IP.
- This engine provides high-bandwidth direct memory access
+ AXI VDMA engine provides high-bandwidth direct memory access
between memory and AXI4-Stream video type target
peripherals including peripherals which support AXI4-
Stream Video Protocol. It has two stream interfaces/
channels, Memory Mapped to Stream (MM2S) and Stream to
Memory Mapped (S2MM) for the data transfers.
+ AXI CDMA engine provides high-bandwidth direct memory access
+ between a memory-mapped source address and a memory-mapped
+ destination address.
+ AXI DMA engine provides high-bandwidth one dimensional direct
+ memory access between memory and AXI4-Stream target peripherals.
+
+config XILINX_ZYNQMP_DMA
+ tristate "Xilinx ZynqMP DMA Engine"
+ depends on (ARCH_ZYNQ || MICROBLAZE || ARM64)
+ select DMA_ENGINE
+ help
+ Enable support for Xilinx ZynqMP DMA controller.
config ZX_DMA
tristate "ZTE ZX296702 DMA support"
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 614f28b0b..e4dc9cac7 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -45,6 +45,7 @@ obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o
obj-$(CONFIG_MV_XOR) += mv_xor.o
+obj-$(CONFIG_MV_XOR_V2) += mv_xor_v2.o
obj-$(CONFIG_MXS_DMA) += mxs-dma.o
obj-$(CONFIG_MX3_IPU) += ipu/
obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 81db1c481..939a7c31f 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1443,8 +1443,6 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
if (!dsg) {
pl08x_free_txd(pl08x, txd);
- dev_err(&pl08x->adev->dev, "%s no memory for pl080 sg\n",
- __func__);
return NULL;
}
list_add_tail(&dsg->node, &txd->dsg_list);
@@ -1901,11 +1899,8 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
*/
for (i = 0; i < channels; i++) {
chan = kzalloc(sizeof(*chan), GFP_KERNEL);
- if (!chan) {
- dev_err(&pl08x->adev->dev,
- "%s no memory for channel\n", __func__);
+ if (!chan)
return -ENOMEM;
- }
chan->host = pl08x;
chan->state = PL08X_CHAN_IDLE;
@@ -2360,9 +2355,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)),
GFP_KERNEL);
if (!pl08x->phy_chans) {
- dev_err(&adev->dev, "%s failed to allocate "
- "physical channel holders\n",
- __func__);
ret = -ENOMEM;
goto out_no_phychans;
}
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 75bd6621d..832cbd647 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -456,7 +456,7 @@ static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan,
return desc;
}
-void at_xdmac_init_used_desc(struct at_xdmac_desc *desc)
+static void at_xdmac_init_used_desc(struct at_xdmac_desc *desc)
{
memset(&desc->lld, 0, sizeof(desc->lld));
INIT_LIST_HEAD(&desc->descs_list);
@@ -1195,14 +1195,14 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan,
desc->lld.mbr_cfg = chan_cc;
dev_dbg(chan2dev(chan),
- "%s: lld: mbr_da=%pad, mbr_ds=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
- __func__, &desc->lld.mbr_da, &desc->lld.mbr_ds, desc->lld.mbr_ubc,
+ "%s: lld: mbr_da=%pad, mbr_ds=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
+ __func__, &desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc,
desc->lld.mbr_cfg);
return desc;
}
-struct dma_async_tx_descriptor *
+static struct dma_async_tx_descriptor *
at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
size_t len, unsigned long flags)
{
@@ -2067,7 +2067,7 @@ err_dma_unregister:
err_clk_disable:
clk_disable_unprepare(atxdmac->clk);
err_free_irq:
- free_irq(atxdmac->irq, atxdmac->dma.dev);
+ free_irq(atxdmac->irq, atxdmac);
return ret;
}
@@ -2081,7 +2081,7 @@ static int at_xdmac_remove(struct platform_device *pdev)
dma_async_device_unregister(&atxdmac->dma);
clk_disable_unprepare(atxdmac->clk);
- free_irq(atxdmac->irq, atxdmac->dma.dev);
+ free_irq(atxdmac->irq, atxdmac);
for (i = 0; i < atxdmac->dma.chancnt; i++) {
struct at_xdmac_chan *atchan = &atxdmac->chan[i];
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 6149b27c3..e18dc596c 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -393,11 +393,12 @@ static void bcm2835_dma_fill_cb_chain_with_sg(
unsigned int sg_len)
{
struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
- size_t max_len = bcm2835_dma_max_frame_length(c);
- unsigned int i, len;
+ size_t len, max_len;
+ unsigned int i;
dma_addr_t addr;
struct scatterlist *sgent;
+ max_len = bcm2835_dma_max_frame_length(c);
for_each_sg(sgl, sgent, sg_len, i) {
for (addr = sg_dma_address(sgent), len = sg_dma_len(sgent);
len > 0;
@@ -613,7 +614,7 @@ static void bcm2835_dma_issue_pending(struct dma_chan *chan)
spin_unlock_irqrestore(&c->vc.lock, flags);
}
-struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_memcpy(
+static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_memcpy(
struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
size_t len, unsigned long flags)
{
diff --git a/drivers/dma/bestcomm/bestcomm.c b/drivers/dma/bestcomm/bestcomm.c
index 180fedb41..7ce843723 100644
--- a/drivers/dma/bestcomm/bestcomm.c
+++ b/drivers/dma/bestcomm/bestcomm.c
@@ -397,8 +397,6 @@ static int mpc52xx_bcom_probe(struct platform_device *op)
/* Get a clean struct */
bcom_eng = kzalloc(sizeof(struct bcom_engine), GFP_KERNEL);
if (!bcom_eng) {
- printk(KERN_ERR DRIVER_NAME ": "
- "Can't allocate state structure\n");
rv = -ENOMEM;
goto error_sramclean;
}
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index c340ca9bd..e4acd63e4 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -266,7 +266,7 @@ static int dma_memcpy_channels[] = {
COH901318_CX_CTRL_DDMA_LEGACY | \
COH901318_CX_CTRL_PRDD_SOURCE)
-const struct coh_dma_channel chan_config[U300_DMA_CHANNELS] = {
+static const struct coh_dma_channel chan_config[U300_DMA_CHANNELS] = {
{
.number = U300_DMA_MSL_TX_0,
.name = "MSL TX 0",
@@ -1280,6 +1280,7 @@ struct coh901318_desc {
struct coh901318_base {
struct device *dev;
void __iomem *virtbase;
+ unsigned int irq;
struct coh901318_pool pool;
struct powersave pm;
struct dma_device dma_slave;
@@ -1364,7 +1365,6 @@ static int coh901318_debugfs_read(struct file *file, char __user *buf,
}
static const struct file_operations coh901318_debugfs_status_operations = {
- .owner = THIS_MODULE,
.open = simple_open,
.read = coh901318_debugfs_read,
.llseek = default_llseek,
@@ -2422,7 +2422,7 @@ coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
enum dma_status ret;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_COMPLETE)
+ if (ret == DMA_COMPLETE || !txstate)
return ret;
dma_set_residue(txstate, coh901318_get_bytes_left(chan));
@@ -2680,6 +2680,8 @@ static int __init coh901318_probe(struct platform_device *pdev)
if (err)
return err;
+ base->irq = irq;
+
err = coh901318_pool_create(&base->pool, &pdev->dev,
sizeof(struct coh901318_lli),
32);
@@ -2755,11 +2757,31 @@ static int __init coh901318_probe(struct platform_device *pdev)
coh901318_pool_destroy(&base->pool);
return err;
}
+static void coh901318_base_remove(struct coh901318_base *base, const int *pick_chans)
+{
+ int chans_i;
+ int i = 0;
+ struct coh901318_chan *cohc;
+
+ for (chans_i = 0; pick_chans[chans_i] != -1; chans_i += 2) {
+ for (i = pick_chans[chans_i]; i <= pick_chans[chans_i+1]; i++) {
+ cohc = &base->chans[i];
+
+ tasklet_kill(&cohc->tasklet);
+ }
+ }
+
+}
static int coh901318_remove(struct platform_device *pdev)
{
struct coh901318_base *base = platform_get_drvdata(pdev);
+ devm_free_irq(&pdev->dev, base->irq, base);
+
+ coh901318_base_remove(base, dma_slave_channels);
+ coh901318_base_remove(base, dma_memcpy_channels);
+
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&base->dma_memcpy);
dma_async_device_unregister(&base->dma_slave);
@@ -2780,13 +2802,13 @@ static struct platform_driver coh901318_driver = {
},
};
-int __init coh901318_init(void)
+static int __init coh901318_init(void)
{
return platform_driver_probe(&coh901318_driver, coh901318_probe);
}
subsys_initcall(coh901318_init);
-void __exit coh901318_exit(void)
+static void __exit coh901318_exit(void)
{
platform_driver_unregister(&coh901318_driver);
}
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index ceedafbd2..4b2317426 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -497,16 +497,13 @@ static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg(
struct cppi41_desc *d;
struct scatterlist *sg;
unsigned int i;
- unsigned int num;
- num = 0;
d = c->desc;
for_each_sg(sgl, sg, sg_len, i) {
u32 addr;
u32 len;
/* We need to use more than one desc once musb supports sg */
- BUG_ON(num > 0);
addr = lower_32_bits(sg_dma_address(sg));
len = sg_dma_len(sg);
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index c34680943..7f0b9aa15 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -270,6 +270,9 @@ static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid)
unsigned int pending;
pending = axi_dmac_read(dmac, AXI_DMAC_REG_IRQ_PENDING);
+ if (!pending)
+ return IRQ_NONE;
+
axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_PENDING, pending);
spin_lock(&dmac->chan.vchan.lock);
@@ -579,7 +582,9 @@ static int axi_dmac_probe(struct platform_device *pdev)
return -ENOMEM;
dmac->irq = platform_get_irq(pdev, 0);
- if (dmac->irq <= 0)
+ if (dmac->irq < 0)
+ return dmac->irq;
+ if (dmac->irq == 0)
return -EINVAL;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -683,6 +688,7 @@ static const struct of_device_id axi_dmac_of_match_table[] = {
{ .compatible = "adi,axi-dmac-1.00.a" },
{ },
};
+MODULE_DEVICE_TABLE(of, axi_dmac_of_match_table);
static struct platform_driver axi_dmac_driver = {
.driver = {
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c
index 7638b24ce..9689b36c0 100644
--- a/drivers/dma/dma-jz4740.c
+++ b/drivers/dma/dma-jz4740.c
@@ -573,12 +573,26 @@ err_unregister:
return ret;
}
+static void jz4740_cleanup_vchan(struct dma_device *dmadev)
+{
+ struct jz4740_dmaengine_chan *chan, *_chan;
+
+ list_for_each_entry_safe(chan, _chan,
+ &dmadev->channels, vchan.chan.device_node) {
+ list_del(&chan->vchan.chan.device_node);
+ tasklet_kill(&chan->vchan.task);
+ }
+}
+
+
static int jz4740_dma_remove(struct platform_device *pdev)
{
struct jz4740_dma_dev *dmadev = platform_get_drvdata(pdev);
int irq = platform_get_irq(pdev, 0);
free_irq(irq, dmadev);
+
+ jz4740_cleanup_vchan(&dmadev->ddev);
dma_async_device_unregister(&dmadev->ddev);
clk_disable_unprepare(dmadev->clk);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index b8576fd6b..1245db543 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -51,6 +51,16 @@ module_param(iterations, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(iterations,
"Iterations before stopping test (default: infinite)");
+static unsigned int sg_buffers = 1;
+module_param(sg_buffers, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(sg_buffers,
+ "Number of scatter gather buffers (default: 1)");
+
+static unsigned int dmatest = 1;
+module_param(dmatest, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dmatest,
+ "dmatest 0-memcpy 1-slave_sg (default: 1)");
+
static unsigned int xor_sources = 3;
module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(xor_sources,
@@ -431,6 +441,8 @@ static int dmatest_func(void *data)
dev = chan->device;
if (thread->type == DMA_MEMCPY)
src_cnt = dst_cnt = 1;
+ else if (thread->type == DMA_SG)
+ src_cnt = dst_cnt = sg_buffers;
else if (thread->type == DMA_XOR) {
/* force odd to ensure dst = src */
src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
@@ -485,6 +497,8 @@ static int dmatest_func(void *data)
dma_addr_t *dsts;
unsigned int src_off, dst_off, len;
u8 align = 0;
+ struct scatterlist tx_sg[src_cnt];
+ struct scatterlist rx_sg[src_cnt];
total_tests++;
@@ -577,10 +591,22 @@ static int dmatest_func(void *data)
um->bidi_cnt++;
}
+ sg_init_table(tx_sg, src_cnt);
+ sg_init_table(rx_sg, src_cnt);
+ for (i = 0; i < src_cnt; i++) {
+ sg_dma_address(&rx_sg[i]) = srcs[i];
+ sg_dma_address(&tx_sg[i]) = dsts[i] + dst_off;
+ sg_dma_len(&tx_sg[i]) = len;
+ sg_dma_len(&rx_sg[i]) = len;
+ }
+
if (thread->type == DMA_MEMCPY)
tx = dev->device_prep_dma_memcpy(chan,
dsts[0] + dst_off,
srcs[0], len, flags);
+ else if (thread->type == DMA_SG)
+ tx = dev->device_prep_dma_sg(chan, tx_sg, src_cnt,
+ rx_sg, src_cnt, flags);
else if (thread->type == DMA_XOR)
tx = dev->device_prep_dma_xor(chan,
dsts[0] + dst_off,
@@ -748,6 +774,8 @@ static int dmatest_add_threads(struct dmatest_info *info,
if (type == DMA_MEMCPY)
op = "copy";
+ else if (type == DMA_SG)
+ op = "sg";
else if (type == DMA_XOR)
op = "xor";
else if (type == DMA_PQ)
@@ -802,9 +830,19 @@ static int dmatest_add_channel(struct dmatest_info *info,
INIT_LIST_HEAD(&dtc->threads);
if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
- cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY);
- thread_count += cnt > 0 ? cnt : 0;
+ if (dmatest == 0) {
+ cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY);
+ thread_count += cnt > 0 ? cnt : 0;
+ }
}
+
+ if (dma_has_cap(DMA_SG, dma_dev->cap_mask)) {
+ if (dmatest == 1) {
+ cnt = dmatest_add_threads(info, dtc, DMA_SG);
+ thread_count += cnt > 0 ? cnt : 0;
+ }
+ }
+
if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
cnt = dmatest_add_threads(info, dtc, DMA_XOR);
thread_count += cnt > 0 ? cnt : 0;
@@ -877,6 +915,7 @@ static void run_threaded_test(struct dmatest_info *info)
request_channels(info, DMA_MEMCPY);
request_channels(info, DMA_XOR);
+ request_channels(info, DMA_SG);
request_channels(info, DMA_PQ);
}
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 8181ed131..3d277fa76 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -239,6 +239,9 @@ struct edma_cc {
bool chmap_exist;
enum dma_event_q default_queue;
+ unsigned int ccint;
+ unsigned int ccerrint;
+
/*
* The slot_inuse bit for each PaRAM slot is clear unless the slot is
* in use by Linux or if it is allocated to be used by DSP.
@@ -1069,10 +1072,8 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
edesc = kzalloc(sizeof(*edesc) + sg_len * sizeof(edesc->pset[0]),
GFP_ATOMIC);
- if (!edesc) {
- dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__);
+ if (!edesc)
return NULL;
- }
edesc->pset_nr = sg_len;
edesc->residue = 0;
@@ -1114,14 +1115,17 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
edesc->absync = ret;
edesc->residue += sg_dma_len(sg);
- /* If this is the last in a current SG set of transactions,
- enable interrupts so that next set is processed */
- if (!((i+1) % MAX_NR_SG))
- edesc->pset[i].param.opt |= TCINTEN;
-
- /* If this is the last set, enable completion interrupt flag */
if (i == sg_len - 1)
+ /* Enable completion interrupt */
edesc->pset[i].param.opt |= TCINTEN;
+ else if (!((i+1) % MAX_NR_SG))
+ /*
+ * Enable early completion interrupt for the
+ * intermediateset. In this case the driver will be
+ * notified when the paRAM set is submitted to TC. This
+ * will allow more time to set up the next set of slots.
+ */
+ edesc->pset[i].param.opt |= (TCINTEN | TCCMODE);
}
edesc->residue_stat = edesc->residue;
@@ -1173,10 +1177,8 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
GFP_ATOMIC);
- if (!edesc) {
- dev_dbg(dev, "Failed to allocate a descriptor\n");
+ if (!edesc)
return NULL;
- }
edesc->pset_nr = nslots;
edesc->residue = edesc->residue_stat = len;
@@ -1298,10 +1300,8 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
GFP_ATOMIC);
- if (!edesc) {
- dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__);
+ if (!edesc)
return NULL;
- }
edesc->cyclic = 1;
edesc->pset_nr = nslots;
@@ -2207,10 +2207,8 @@ static int edma_probe(struct platform_device *pdev)
return ret;
ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
- if (!ecc) {
- dev_err(dev, "Can't allocate controller\n");
+ if (!ecc)
return -ENOMEM;
- }
ecc->dev = dev;
ecc->id = pdev->id;
@@ -2288,6 +2286,7 @@ static int edma_probe(struct platform_device *pdev)
dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret);
return ret;
}
+ ecc->ccint = irq;
}
irq = platform_get_irq_byname(pdev, "edma3_ccerrint");
@@ -2303,6 +2302,7 @@ static int edma_probe(struct platform_device *pdev)
dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret);
return ret;
}
+ ecc->ccerrint = irq;
}
ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY);
@@ -2393,11 +2393,27 @@ err_reg1:
return ret;
}
+static void edma_cleanupp_vchan(struct dma_device *dmadev)
+{
+ struct edma_chan *echan, *_echan;
+
+ list_for_each_entry_safe(echan, _echan,
+ &dmadev->channels, vchan.chan.device_node) {
+ list_del(&echan->vchan.chan.device_node);
+ tasklet_kill(&echan->vchan.task);
+ }
+}
+
static int edma_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct edma_cc *ecc = dev_get_drvdata(dev);
+ devm_free_irq(dev, ecc->ccint, ecc);
+ devm_free_irq(dev, ecc->ccerrint, ecc);
+
+ edma_cleanupp_vchan(&ecc->dma_slave);
+
if (dev->of_node)
of_dma_controller_free(dev->of_node);
dma_async_device_unregister(&ecc->dma_slave);
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
index be2e62b87..6775f2c74 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma.c
@@ -852,6 +852,25 @@ fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma
return 0;
}
+static void fsl_edma_irq_exit(
+ struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
+{
+ if (fsl_edma->txirq == fsl_edma->errirq) {
+ devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma);
+ } else {
+ devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma);
+ devm_free_irq(&pdev->dev, fsl_edma->errirq, fsl_edma);
+ }
+}
+
+static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma)
+{
+ int i;
+
+ for (i = 0; i < DMAMUX_NR; i++)
+ clk_disable_unprepare(fsl_edma->muxclk[i]);
+}
+
static int fsl_edma_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -897,6 +916,10 @@ static int fsl_edma_probe(struct platform_device *pdev)
ret = clk_prepare_enable(fsl_edma->muxclk[i]);
if (ret) {
+ /* disable only clks which were enabled on error */
+ for (; i >= 0; i--)
+ clk_disable_unprepare(fsl_edma->muxclk[i]);
+
dev_err(&pdev->dev, "DMAMUX clk block failed.\n");
return ret;
}
@@ -951,14 +974,18 @@ static int fsl_edma_probe(struct platform_device *pdev)
ret = dma_async_device_register(&fsl_edma->dma_dev);
if (ret) {
- dev_err(&pdev->dev, "Can't register Freescale eDMA engine.\n");
+ dev_err(&pdev->dev,
+ "Can't register Freescale eDMA engine. (%d)\n", ret);
+ fsl_disable_clocks(fsl_edma);
return ret;
}
ret = of_dma_controller_register(np, fsl_edma_xlate, fsl_edma);
if (ret) {
- dev_err(&pdev->dev, "Can't register Freescale eDMA of_dma.\n");
+ dev_err(&pdev->dev,
+ "Can't register Freescale eDMA of_dma. (%d)\n", ret);
dma_async_device_unregister(&fsl_edma->dma_dev);
+ fsl_disable_clocks(fsl_edma);
return ret;
}
@@ -968,17 +995,27 @@ static int fsl_edma_probe(struct platform_device *pdev)
return 0;
}
+static void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
+{
+ struct fsl_edma_chan *chan, *_chan;
+
+ list_for_each_entry_safe(chan, _chan,
+ &dmadev->channels, vchan.chan.device_node) {
+ list_del(&chan->vchan.chan.device_node);
+ tasklet_kill(&chan->vchan.task);
+ }
+}
+
static int fsl_edma_remove(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct fsl_edma_engine *fsl_edma = platform_get_drvdata(pdev);
- int i;
+ fsl_edma_irq_exit(pdev, fsl_edma);
+ fsl_edma_cleanup_vchan(&fsl_edma->dma_dev);
of_dma_controller_free(np);
dma_async_device_unregister(&fsl_edma->dma_dev);
-
- for (i = 0; i < DMAMUX_NR; i++)
- clk_disable_unprepare(fsl_edma->muxclk[i]);
+ fsl_disable_clocks(fsl_edma);
return 0;
}
diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c
index 4d9470f16..de2a2a2b1 100644
--- a/drivers/dma/fsl_raid.c
+++ b/drivers/dma/fsl_raid.c
@@ -337,7 +337,7 @@ static struct dma_async_tx_descriptor *fsl_re_prep_dma_genq(
re_chan = container_of(chan, struct fsl_re_chan, chan);
if (len > FSL_RE_MAX_DATA_LEN) {
- dev_err(re_chan->dev, "genq tx length %lu, max length %d\n",
+ dev_err(re_chan->dev, "genq tx length %zu, max length %d\n",
len, FSL_RE_MAX_DATA_LEN);
return NULL;
}
@@ -424,7 +424,7 @@ static struct dma_async_tx_descriptor *fsl_re_prep_dma_pq(
re_chan = container_of(chan, struct fsl_re_chan, chan);
if (len > FSL_RE_MAX_DATA_LEN) {
- dev_err(re_chan->dev, "pq tx length is %lu, max length is %d\n",
+ dev_err(re_chan->dev, "pq tx length is %zu, max length is %d\n",
len, FSL_RE_MAX_DATA_LEN);
return NULL;
}
@@ -545,7 +545,7 @@ static struct dma_async_tx_descriptor *fsl_re_prep_dma_memcpy(
re_chan = container_of(chan, struct fsl_re_chan, chan);
if (len > FSL_RE_MAX_DATA_LEN) {
- dev_err(re_chan->dev, "cp tx length is %lu, max length is %d\n",
+ dev_err(re_chan->dev, "cp tx length is %zu, max length is %d\n",
len, FSL_RE_MAX_DATA_LEN);
return NULL;
}
@@ -836,6 +836,7 @@ static int fsl_re_probe(struct platform_device *ofdev)
rc = of_property_read_u32(np, "reg", &off);
if (rc) {
dev_err(dev, "Reg property not found in JQ node\n");
+ of_node_put(np);
return -ENODEV;
}
/* Find out the Job Rings present under each JQ */
@@ -856,6 +857,8 @@ static int fsl_re_probe(struct platform_device *ofdev)
static void fsl_re_remove_chan(struct fsl_re_chan *chan)
{
+ tasklet_kill(&chan->irqtask);
+
dma_pool_free(chan->re_dev->hw_desc_pool, chan->inb_ring_virt_addr,
chan->inb_phys_addr);
@@ -890,7 +893,6 @@ static struct of_device_id fsl_re_ids[] = {
static struct platform_driver fsl_re_driver = {
.driver = {
.name = "fsl-raideng",
- .owner = THIS_MODULE,
.of_match_table = fsl_re_ids,
},
.probe = fsl_re_probe,
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index a8828ed63..911b7177e 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -1234,7 +1234,6 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev,
/* alloc channel */
chan = kzalloc(sizeof(*chan), GFP_KERNEL);
if (!chan) {
- dev_err(fdev->dev, "no free memory for DMA channels!\n");
err = -ENOMEM;
goto out_return;
}
@@ -1340,7 +1339,6 @@ static int fsldma_of_probe(struct platform_device *op)
fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
if (!fdev) {
- dev_err(&op->dev, "No enough memory for 'priv'\n");
err = -ENOMEM;
goto out_return;
}
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
index f8c5cd533..c5f21efd6 100644
--- a/drivers/dma/hsu/hsu.c
+++ b/drivers/dma/hsu/hsu.c
@@ -126,28 +126,33 @@ static void hsu_dma_start_transfer(struct hsu_dma_chan *hsuc)
hsu_dma_start_channel(hsuc);
}
-static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc)
-{
- unsigned long flags;
- u32 sr;
-
- spin_lock_irqsave(&hsuc->vchan.lock, flags);
- sr = hsu_chan_readl(hsuc, HSU_CH_SR);
- spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
-
- return sr & ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY);
-}
-
-irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr)
+/*
+ * hsu_dma_get_status() - get DMA channel status
+ * @chip: HSUART DMA chip
+ * @nr: DMA channel number
+ * @status: pointer for DMA Channel Status Register value
+ *
+ * Description:
+ * The function reads and clears the DMA Channel Status Register, checks
+ * if it was a timeout interrupt and returns a corresponding value.
+ *
+ * Caller should provide a valid pointer for the DMA Channel Status
+ * Register value that will be returned in @status.
+ *
+ * Return:
+ * 1 for DMA timeout status, 0 for other DMA status, or error code for
+ * invalid parameters or no interrupt pending.
+ */
+int hsu_dma_get_status(struct hsu_dma_chip *chip, unsigned short nr,
+ u32 *status)
{
struct hsu_dma_chan *hsuc;
- struct hsu_dma_desc *desc;
unsigned long flags;
u32 sr;
/* Sanity check */
if (nr >= chip->hsu->nr_channels)
- return IRQ_NONE;
+ return -EINVAL;
hsuc = &chip->hsu->chan[nr];
@@ -155,22 +160,65 @@ irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr)
* No matter what situation, need read clear the IRQ status
* There is a bug, see Errata 5, HSD 2900918
*/
- sr = hsu_dma_chan_get_sr(hsuc);
+ spin_lock_irqsave(&hsuc->vchan.lock, flags);
+ sr = hsu_chan_readl(hsuc, HSU_CH_SR);
+ spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
+
+ /* Check if any interrupt is pending */
+ sr &= ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY);
if (!sr)
- return IRQ_NONE;
+ return -EIO;
/* Timeout IRQ, need wait some time, see Errata 2 */
if (sr & HSU_CH_SR_DESCTO_ANY)
udelay(2);
+ /*
+ * At this point, at least one of Descriptor Time Out, Channel Error
+ * or Descriptor Done bits must be set. Clear the Descriptor Time Out
+ * bits and if sr is still non-zero, it must be channel error or
+ * descriptor done which are higher priority than timeout and handled
+ * in hsu_dma_do_irq(). Else, it must be a timeout.
+ */
sr &= ~HSU_CH_SR_DESCTO_ANY;
- if (!sr)
- return IRQ_HANDLED;
+
+ *status = sr;
+
+ return sr ? 0 : 1;
+}
+EXPORT_SYMBOL_GPL(hsu_dma_get_status);
+
+/*
+ * hsu_dma_do_irq() - DMA interrupt handler
+ * @chip: HSUART DMA chip
+ * @nr: DMA channel number
+ * @status: Channel Status Register value
+ *
+ * Description:
+ * This function handles Channel Error and Descriptor Done interrupts.
+ * This function should be called after determining that the DMA interrupt
+ * is not a normal timeout interrupt, ie. hsu_dma_get_status() returned 0.
+ *
+ * Return:
+ * IRQ_NONE for invalid channel number, IRQ_HANDLED otherwise.
+ */
+irqreturn_t hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr,
+ u32 status)
+{
+ struct hsu_dma_chan *hsuc;
+ struct hsu_dma_desc *desc;
+ unsigned long flags;
+
+ /* Sanity check */
+ if (nr >= chip->hsu->nr_channels)
+ return IRQ_NONE;
+
+ hsuc = &chip->hsu->chan[nr];
spin_lock_irqsave(&hsuc->vchan.lock, flags);
desc = hsuc->desc;
if (desc) {
- if (sr & HSU_CH_SR_CHE) {
+ if (status & HSU_CH_SR_CHE) {
desc->status = DMA_ERROR;
} else if (desc->active < desc->nents) {
hsu_dma_start_channel(hsuc);
@@ -184,7 +232,7 @@ irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr)
return IRQ_HANDLED;
}
-EXPORT_SYMBOL_GPL(hsu_dma_irq);
+EXPORT_SYMBOL_GPL(hsu_dma_do_irq);
static struct hsu_dma_desc *hsu_dma_alloc_desc(unsigned int nents)
{
diff --git a/drivers/dma/hsu/pci.c b/drivers/dma/hsu/pci.c
index e2db76bd5..991605853 100644
--- a/drivers/dma/hsu/pci.c
+++ b/drivers/dma/hsu/pci.c
@@ -27,13 +27,20 @@ static irqreturn_t hsu_pci_irq(int irq, void *dev)
{
struct hsu_dma_chip *chip = dev;
u32 dmaisr;
+ u32 status;
unsigned short i;
irqreturn_t ret = IRQ_NONE;
+ int err;
dmaisr = readl(chip->regs + HSU_PCI_DMAISR);
for (i = 0; i < chip->hsu->nr_channels; i++) {
- if (dmaisr & 0x1)
- ret |= hsu_dma_irq(chip, i);
+ if (dmaisr & 0x1) {
+ err = hsu_dma_get_status(chip, i, &status);
+ if (err > 0)
+ ret |= IRQ_HANDLED;
+ else if (err == 0)
+ ret |= hsu_dma_do_irq(chip, i, status);
+ }
dmaisr >>= 1;
}
diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c
index a4c53be48..624f1e1e9 100644
--- a/drivers/dma/img-mdc-dma.c
+++ b/drivers/dma/img-mdc-dma.c
@@ -861,7 +861,6 @@ static int mdc_dma_probe(struct platform_device *pdev)
{
struct mdc_dma *mdma;
struct resource *res;
- const struct of_device_id *match;
unsigned int i;
u32 val;
int ret;
@@ -871,8 +870,7 @@ static int mdc_dma_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, mdma);
- match = of_match_device(mdc_dma_of_match, &pdev->dev);
- mdma->soc = match->data;
+ mdma->soc = of_device_get_match_data(&pdev->dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mdma->regs = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 48d85f8b9..a960608c0 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -167,6 +167,7 @@ struct imxdma_channel {
u32 ccr_to_device;
bool enabled_2d;
int slot_2d;
+ unsigned int irq;
};
enum imx_dma_type {
@@ -186,6 +187,9 @@ struct imxdma_engine {
struct imx_dma_2d_config slots_2d[IMX_DMA_2D_SLOTS];
struct imxdma_channel channel[IMX_DMA_CHANNELS];
enum imx_dma_type devtype;
+ unsigned int irq;
+ unsigned int irq_err;
+
};
struct imxdma_filter_data {
@@ -1048,7 +1052,7 @@ static struct dma_chan *imxdma_xlate(struct of_phandle_args *dma_spec,
}
static int __init imxdma_probe(struct platform_device *pdev)
- {
+{
struct imxdma_engine *imxdma;
struct resource *res;
const struct of_device_id *of_id;
@@ -1100,6 +1104,7 @@ static int __init imxdma_probe(struct platform_device *pdev)
dev_warn(imxdma->dev, "Can't register IRQ for DMA\n");
goto disable_dma_ahb_clk;
}
+ imxdma->irq = irq;
irq_err = platform_get_irq(pdev, 1);
if (irq_err < 0) {
@@ -1113,6 +1118,7 @@ static int __init imxdma_probe(struct platform_device *pdev)
dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n");
goto disable_dma_ahb_clk;
}
+ imxdma->irq_err = irq_err;
}
/* enable DMA module */
@@ -1150,6 +1156,8 @@ static int __init imxdma_probe(struct platform_device *pdev)
irq + i, i);
goto disable_dma_ahb_clk;
}
+
+ imxdmac->irq = irq + i;
init_timer(&imxdmac->watchdog);
imxdmac->watchdog.function = &imxdma_watchdog;
imxdmac->watchdog.data = (unsigned long)imxdmac;
@@ -1217,10 +1225,31 @@ disable_dma_ipg_clk:
return ret;
}
+static void imxdma_free_irq(struct platform_device *pdev, struct imxdma_engine *imxdma)
+{
+ int i;
+
+ if (is_imx1_dma(imxdma)) {
+ disable_irq(imxdma->irq);
+ disable_irq(imxdma->irq_err);
+ }
+
+ for (i = 0; i < IMX_DMA_CHANNELS; i++) {
+ struct imxdma_channel *imxdmac = &imxdma->channel[i];
+
+ if (!is_imx1_dma(imxdma))
+ disable_irq(imxdmac->irq);
+
+ tasklet_kill(&imxdmac->dma_tasklet);
+ }
+}
+
static int imxdma_remove(struct platform_device *pdev)
{
struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
+ imxdma_free_irq(pdev, imxdma);
+
dma_async_device_unregister(&imxdma->dma_device);
if (pdev->dev.of_node)
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index ff8f98e25..4db2316c8 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -18,6 +18,7 @@
*/
#include <linux/init.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/bitops.h>
@@ -385,6 +386,7 @@ struct sdma_engine {
const struct sdma_driver_data *drvdata;
u32 spba_start_addr;
u32 spba_end_addr;
+ unsigned int irq;
};
static struct sdma_driver_data sdma_imx31 = {
@@ -571,28 +573,20 @@ static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
static int sdma_run_channel0(struct sdma_engine *sdma)
{
int ret;
- unsigned long timeout = 500;
+ u32 reg;
sdma_enable_channel(sdma, 0);
- while (!(ret = readl_relaxed(sdma->regs + SDMA_H_INTR) & 1)) {
- if (timeout-- <= 0)
- break;
- udelay(1);
- }
-
- if (ret) {
- /* Clear the interrupt status */
- writel_relaxed(ret, sdma->regs + SDMA_H_INTR);
- } else {
+ ret = readl_relaxed_poll_timeout_atomic(sdma->regs + SDMA_H_STATSTOP,
+ reg, !(reg & 1), 1, 500);
+ if (ret)
dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
- }
/* Set bits of CONFIG register with dynamic context switching */
if (readl(sdma->regs + SDMA_H_CONFIG) == 0)
writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
- return ret ? 0 : -ETIMEDOUT;
+ return ret;
}
static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
@@ -727,9 +721,9 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id)
unsigned long stat;
stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
- /* not interested in channel 0 interrupts */
- stat &= ~1;
writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
+ /* channel 0 is special and not handled here, see run_channel0() */
+ stat &= ~1;
while (stat) {
int channel = fls(stat) - 1;
@@ -758,7 +752,7 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
* These are needed once we start to support transfers between
* two peripherals or memory-to-memory transfers
*/
- int per_2_per = 0, emi_2_emi = 0;
+ int per_2_per = 0;
sdmac->pc_from_device = 0;
sdmac->pc_to_device = 0;
@@ -766,7 +760,6 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
switch (peripheral_type) {
case IMX_DMATYPE_MEMORY:
- emi_2_emi = sdma->script_addrs->ap_2_ap_addr;
break;
case IMX_DMATYPE_DSP:
emi_2_per = sdma->script_addrs->bp_2_ap_addr;
@@ -999,8 +992,6 @@ static int sdma_config_channel(struct dma_chan *chan)
} else
__set_bit(sdmac->event_id0, sdmac->event_mask);
- /* Watermark Level */
- sdmac->watermark_level |= sdmac->watermark_level;
/* Address */
sdmac->shp_addr = sdmac->per_address;
sdmac->per_addr = sdmac->per_address2;
@@ -1715,6 +1706,8 @@ static int sdma_probe(struct platform_device *pdev)
if (ret)
return ret;
+ sdma->irq = irq;
+
sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
if (!sdma->script_addrs)
return -ENOMEM;
@@ -1840,6 +1833,7 @@ static int sdma_remove(struct platform_device *pdev)
struct sdma_engine *sdma = platform_get_drvdata(pdev);
int i;
+ devm_free_irq(&pdev->dev, sdma->irq, sdma);
dma_async_device_unregister(&sdma->dma_device);
kfree(sdma->script_addrs);
/* Kill the tasklet */
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index d406056e8..7145f7716 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -1212,7 +1212,7 @@ static void ioat_shutdown(struct pci_dev *pdev)
ioat_disable_interrupts(ioat_dma);
}
-void ioat_resume(struct ioatdma_device *ioat_dma)
+static void ioat_resume(struct ioatdma_device *ioat_dma)
{
struct ioatdma_chan *ioat_chan;
u32 chanerr;
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index 1ba2fd738..39de89801 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -102,6 +102,7 @@ struct k3_dma_dev {
struct clk *clk;
u32 dma_channels;
u32 dma_requests;
+ unsigned int irq;
};
#define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave)
@@ -425,10 +426,9 @@ static struct dma_async_tx_descriptor *k3_dma_prep_memcpy(
num = DIV_ROUND_UP(len, DMA_MAX_SIZE);
ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC);
- if (!ds) {
- dev_dbg(chan->device->dev, "vchan %p: kzalloc fail\n", &c->vc);
+ if (!ds)
return NULL;
- }
+
ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]);
ds->size = len;
ds->desc_num = num;
@@ -481,10 +481,9 @@ static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
}
ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC);
- if (!ds) {
- dev_dbg(chan->device->dev, "vchan %p: kzalloc fail\n", &c->vc);
+ if (!ds)
return NULL;
- }
+
ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]);
ds->desc_num = num;
num = 0;
@@ -705,6 +704,8 @@ static int k3_dma_probe(struct platform_device *op)
if (ret)
return ret;
+ d->irq = irq;
+
/* init phy channel */
d->phy = devm_kzalloc(&op->dev,
d->dma_channels * sizeof(struct k3_dma_phy), GFP_KERNEL);
@@ -759,7 +760,7 @@ static int k3_dma_probe(struct platform_device *op)
ret = dma_async_device_register(&d->slave);
if (ret)
- return ret;
+ goto dma_async_register_fail;
ret = of_dma_controller_register((&op->dev)->of_node,
k3_of_dma_simple_xlate, d);
@@ -776,6 +777,8 @@ static int k3_dma_probe(struct platform_device *op)
of_dma_register_fail:
dma_async_device_unregister(&d->slave);
+dma_async_register_fail:
+ clk_disable_unprepare(d->clk);
return ret;
}
@@ -787,6 +790,8 @@ static int k3_dma_remove(struct platform_device *op)
dma_async_device_unregister(&d->slave);
of_dma_controller_free((&op->dev)->of_node);
+ devm_free_irq(&op->dev, d->irq, d);
+
list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
list_del(&c->vc.chan.device_node);
tasklet_kill(&c->vc.task);
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index 56f1fd68b..f4b25fb0d 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -931,6 +931,25 @@ static void dma_do_tasklet(unsigned long data)
static int mmp_pdma_remove(struct platform_device *op)
{
struct mmp_pdma_device *pdev = platform_get_drvdata(op);
+ struct mmp_pdma_phy *phy;
+ int i, irq = 0, irq_num = 0;
+
+
+ for (i = 0; i < pdev->dma_channels; i++) {
+ if (platform_get_irq(op, i) > 0)
+ irq_num++;
+ }
+
+ if (irq_num != pdev->dma_channels) {
+ irq = platform_get_irq(op, 0);
+ devm_free_irq(&op->dev, irq, pdev);
+ } else {
+ for (i = 0; i < pdev->dma_channels; i++) {
+ phy = &pdev->phy[i];
+ irq = platform_get_irq(op, i);
+ devm_free_irq(&op->dev, irq, phy);
+ }
+ }
dma_async_device_unregister(&pdev->device);
return 0;
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index 3df042260..b3441f57a 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -404,7 +404,7 @@ static void mmp_tdma_free_chan_resources(struct dma_chan *chan)
return;
}
-struct mmp_tdma_desc *mmp_tdma_alloc_descriptor(struct mmp_tdma_chan *tdmac)
+static struct mmp_tdma_desc *mmp_tdma_alloc_descriptor(struct mmp_tdma_chan *tdmac)
{
struct gen_pool *gpool;
int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc);
@@ -551,10 +551,9 @@ static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
/* alloc channel */
tdmac = devm_kzalloc(tdev->dev, sizeof(*tdmac), GFP_KERNEL);
- if (!tdmac) {
- dev_err(tdev->dev, "no free memory for DMA channels!\n");
+ if (!tdmac)
return -ENOMEM;
- }
+
if (irq)
tdmac->irq = irq;
tdmac->dev = tdev->dev;
@@ -593,7 +592,7 @@ static bool mmp_tdma_filter_fn(struct dma_chan *chan, void *fn_param)
return true;
}
-struct dma_chan *mmp_tdma_xlate(struct of_phandle_args *dma_spec,
+static struct dma_chan *mmp_tdma_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
struct mmp_tdma_device *tdev = ofdma->of_dma_data;
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
index 631c4435e..a6e642792 100644
--- a/drivers/dma/moxart-dma.c
+++ b/drivers/dma/moxart-dma.c
@@ -148,6 +148,7 @@ struct moxart_chan {
struct moxart_dmadev {
struct dma_device dma_slave;
struct moxart_chan slave_chans[APB_DMA_MAX_CHANNEL];
+ unsigned int irq;
};
struct moxart_filter_data {
@@ -574,10 +575,8 @@ static int moxart_probe(struct platform_device *pdev)
struct moxart_dmadev *mdc;
mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
- if (!mdc) {
- dev_err(dev, "can't allocate DMA container\n");
+ if (!mdc)
return -ENOMEM;
- }
irq = irq_of_parse_and_map(node, 0);
if (irq == NO_IRQ) {
@@ -617,6 +616,7 @@ static int moxart_probe(struct platform_device *pdev)
dev_err(dev, "devm_request_irq failed\n");
return ret;
}
+ mdc->irq = irq;
ret = dma_async_device_register(&mdc->dma_slave);
if (ret) {
@@ -640,6 +640,8 @@ static int moxart_remove(struct platform_device *pdev)
{
struct moxart_dmadev *m = platform_get_drvdata(pdev);
+ devm_free_irq(&pdev->dev, m->irq, m);
+
dma_async_device_unregister(&m->dma_slave);
if (pdev->dev.of_node)
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index ccadafa51..fa86592c7 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -1110,6 +1110,7 @@ static int mpc_dma_remove(struct platform_device *op)
}
free_irq(mdma->irq, mdma);
irq_dispose_mapping(mdma->irq);
+ tasklet_kill(&mdma->tasklet);
return 0;
}
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index d0446a759..f4c9f98ec 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -1057,7 +1057,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
err_free_irq:
free_irq(mv_chan->irq, mv_chan);
- err_free_dma:
+err_free_dma:
dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
return ERR_PTR(ret);
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
new file mode 100644
index 000000000..a28a01fcb
--- /dev/null
+++ b/drivers/dma/mv_xor_v2.c
@@ -0,0 +1,878 @@
+/*
+ * Copyright (C) 2015-2016 Marvell International Ltd.
+
+ * This program is free software: you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#include "dmaengine.h"
+
+/* DMA Engine Registers */
+#define MV_XOR_V2_DMA_DESQ_BALR_OFF 0x000
+#define MV_XOR_V2_DMA_DESQ_BAHR_OFF 0x004
+#define MV_XOR_V2_DMA_DESQ_SIZE_OFF 0x008
+#define MV_XOR_V2_DMA_DESQ_DONE_OFF 0x00C
+#define MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK 0x7FFF
+#define MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT 0
+#define MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_MASK 0x1FFF
+#define MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_SHIFT 16
+#define MV_XOR_V2_DMA_DESQ_ARATTR_OFF 0x010
+#define MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK 0x3F3F
+#define MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE 0x202
+#define MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE 0x3C3C
+#define MV_XOR_V2_DMA_IMSG_CDAT_OFF 0x014
+#define MV_XOR_V2_DMA_IMSG_THRD_OFF 0x018
+#define MV_XOR_V2_DMA_IMSG_THRD_MASK 0x7FFF
+#define MV_XOR_V2_DMA_IMSG_THRD_SHIFT 0x0
+#define MV_XOR_V2_DMA_DESQ_AWATTR_OFF 0x01C
+ /* Same flags as MV_XOR_V2_DMA_DESQ_ARATTR_OFF */
+#define MV_XOR_V2_DMA_DESQ_ALLOC_OFF 0x04C
+#define MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK 0xFFFF
+#define MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT 16
+#define MV_XOR_V2_DMA_IMSG_BALR_OFF 0x050
+#define MV_XOR_V2_DMA_IMSG_BAHR_OFF 0x054
+#define MV_XOR_V2_DMA_DESQ_CTRL_OFF 0x100
+#define MV_XOR_V2_DMA_DESQ_CTRL_32B 1
+#define MV_XOR_V2_DMA_DESQ_CTRL_128B 7
+#define MV_XOR_V2_DMA_DESQ_STOP_OFF 0x800
+#define MV_XOR_V2_DMA_DESQ_DEALLOC_OFF 0x804
+#define MV_XOR_V2_DMA_DESQ_ADD_OFF 0x808
+
+/* XOR Global registers */
+#define MV_XOR_V2_GLOB_BW_CTRL 0x4
+#define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_SHIFT 0
+#define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_VAL 64
+#define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_SHIFT 8
+#define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_VAL 8
+#define MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_SHIFT 12
+#define MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_VAL 4
+#define MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_SHIFT 16
+#define MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_VAL 4
+#define MV_XOR_V2_GLOB_PAUSE 0x014
+#define MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL 0x8
+#define MV_XOR_V2_GLOB_SYS_INT_CAUSE 0x200
+#define MV_XOR_V2_GLOB_SYS_INT_MASK 0x204
+#define MV_XOR_V2_GLOB_MEM_INT_CAUSE 0x220
+#define MV_XOR_V2_GLOB_MEM_INT_MASK 0x224
+
+#define MV_XOR_V2_MIN_DESC_SIZE 32
+#define MV_XOR_V2_EXT_DESC_SIZE 128
+
+#define MV_XOR_V2_DESC_RESERVED_SIZE 12
+#define MV_XOR_V2_DESC_BUFF_D_ADDR_SIZE 12
+
+#define MV_XOR_V2_CMD_LINE_NUM_MAX_D_BUF 8
+
+/*
+ * Descriptors queue size. With 32 bytes descriptors, up to 2^14
+ * descriptors are allowed, with 128 bytes descriptors, up to 2^12
+ * descriptors are allowed. This driver uses 128 bytes descriptors,
+ * but experimentation has shown that a set of 1024 descriptors is
+ * sufficient to reach a good level of performance.
+ */
+#define MV_XOR_V2_DESC_NUM 1024
+
+/**
+ * struct mv_xor_v2_descriptor - DMA HW descriptor
+ * @desc_id: used by S/W and is not affected by H/W.
+ * @flags: error and status flags
+ * @crc32_result: CRC32 calculation result
+ * @desc_ctrl: operation mode and control flags
+ * @buff_size: amount of bytes to be processed
+ * @fill_pattern_src_addr: Fill-Pattern or Source-Address and
+ * AW-Attributes
+ * @data_buff_addr: Source (and might be RAID6 destination)
+ * addresses of data buffers in RAID5 and RAID6
+ * @reserved: reserved
+ */
+struct mv_xor_v2_descriptor {
+ u16 desc_id;
+ u16 flags;
+ u32 crc32_result;
+ u32 desc_ctrl;
+
+ /* Definitions for desc_ctrl */
+#define DESC_NUM_ACTIVE_D_BUF_SHIFT 22
+#define DESC_OP_MODE_SHIFT 28
+#define DESC_OP_MODE_NOP 0 /* Idle operation */
+#define DESC_OP_MODE_MEMCPY 1 /* Pure-DMA operation */
+#define DESC_OP_MODE_MEMSET 2 /* Mem-Fill operation */
+#define DESC_OP_MODE_MEMINIT 3 /* Mem-Init operation */
+#define DESC_OP_MODE_MEM_COMPARE 4 /* Mem-Compare operation */
+#define DESC_OP_MODE_CRC32 5 /* CRC32 calculation */
+#define DESC_OP_MODE_XOR 6 /* RAID5 (XOR) operation */
+#define DESC_OP_MODE_RAID6 7 /* RAID6 P&Q-generation */
+#define DESC_OP_MODE_RAID6_REC 8 /* RAID6 Recovery */
+#define DESC_Q_BUFFER_ENABLE BIT(16)
+#define DESC_P_BUFFER_ENABLE BIT(17)
+#define DESC_IOD BIT(27)
+
+ u32 buff_size;
+ u32 fill_pattern_src_addr[4];
+ u32 data_buff_addr[MV_XOR_V2_DESC_BUFF_D_ADDR_SIZE];
+ u32 reserved[MV_XOR_V2_DESC_RESERVED_SIZE];
+};
+
+/**
+ * struct mv_xor_v2_device - implements a xor device
+ * @lock: lock for the engine
+ * @dma_base: memory mapped DMA register base
+ * @glob_base: memory mapped global register base
+ * @irq_tasklet:
+ * @free_sw_desc: linked list of free SW descriptors
+ * @dmadev: dma device
+ * @dmachan: dma channel
+ * @hw_desq: HW descriptors queue
+ * @hw_desq_virt: virtual address of DESCQ
+ * @sw_desq: SW descriptors queue
+ * @desc_size: HW descriptor size
+ * @npendings: number of pending descriptors (for which tx_submit has
+ * been called, but not yet issue_pending)
+ */
+struct mv_xor_v2_device {
+ spinlock_t lock;
+ void __iomem *dma_base;
+ void __iomem *glob_base;
+ struct clk *clk;
+ struct tasklet_struct irq_tasklet;
+ struct list_head free_sw_desc;
+ struct dma_device dmadev;
+ struct dma_chan dmachan;
+ dma_addr_t hw_desq;
+ struct mv_xor_v2_descriptor *hw_desq_virt;
+ struct mv_xor_v2_sw_desc *sw_desq;
+ int desc_size;
+ unsigned int npendings;
+};
+
+/**
+ * struct mv_xor_v2_sw_desc - implements a xor SW descriptor
+ * @idx: descriptor index
+ * @async_tx: support for the async_tx api
+ * @hw_desc: assosiated HW descriptor
+ * @free_list: node of the free SW descriprots list
+*/
+struct mv_xor_v2_sw_desc {
+ int idx;
+ struct dma_async_tx_descriptor async_tx;
+ struct mv_xor_v2_descriptor hw_desc;
+ struct list_head free_list;
+};
+
+/*
+ * Fill the data buffers to a HW descriptor
+ */
+static void mv_xor_v2_set_data_buffers(struct mv_xor_v2_device *xor_dev,
+ struct mv_xor_v2_descriptor *desc,
+ dma_addr_t src, int index)
+{
+ int arr_index = ((index >> 1) * 3);
+
+ /*
+ * Fill the buffer's addresses to the descriptor.
+ *
+ * The format of the buffers address for 2 sequential buffers
+ * X and X + 1:
+ *
+ * First word: Buffer-DX-Address-Low[31:0]
+ * Second word: Buffer-DX+1-Address-Low[31:0]
+ * Third word: DX+1-Buffer-Address-High[47:32] [31:16]
+ * DX-Buffer-Address-High[47:32] [15:0]
+ */
+ if ((index & 0x1) == 0) {
+ desc->data_buff_addr[arr_index] = lower_32_bits(src);
+
+ desc->data_buff_addr[arr_index + 2] &= ~0xFFFF;
+ desc->data_buff_addr[arr_index + 2] |=
+ upper_32_bits(src) & 0xFFFF;
+ } else {
+ desc->data_buff_addr[arr_index + 1] =
+ lower_32_bits(src);
+
+ desc->data_buff_addr[arr_index + 2] &= ~0xFFFF0000;
+ desc->data_buff_addr[arr_index + 2] |=
+ (upper_32_bits(src) & 0xFFFF) << 16;
+ }
+}
+
+/*
+ * Return the next available index in the DESQ.
+ */
+static int mv_xor_v2_get_desq_write_ptr(struct mv_xor_v2_device *xor_dev)
+{
+ /* read the index for the next available descriptor in the DESQ */
+ u32 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ALLOC_OFF);
+
+ return ((reg >> MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT)
+ & MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK);
+}
+
+/*
+ * notify the engine of new descriptors, and update the available index.
+ */
+static void mv_xor_v2_add_desc_to_desq(struct mv_xor_v2_device *xor_dev,
+ int num_of_desc)
+{
+ /* write the number of new descriptors in the DESQ. */
+ writel(num_of_desc, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ADD_OFF);
+}
+
+/*
+ * free HW descriptors
+ */
+static void mv_xor_v2_free_desc_from_desq(struct mv_xor_v2_device *xor_dev,
+ int num_of_desc)
+{
+ /* write the number of new descriptors in the DESQ. */
+ writel(num_of_desc, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DEALLOC_OFF);
+}
+
+/*
+ * Set descriptor size
+ * Return the HW descriptor size in bytes
+ */
+static int mv_xor_v2_set_desc_size(struct mv_xor_v2_device *xor_dev)
+{
+ writel(MV_XOR_V2_DMA_DESQ_CTRL_128B,
+ xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_CTRL_OFF);
+
+ return MV_XOR_V2_EXT_DESC_SIZE;
+}
+
+/*
+ * Set the IMSG threshold
+ */
+static inline
+void mv_xor_v2_set_imsg_thrd(struct mv_xor_v2_device *xor_dev, int thrd_val)
+{
+ u32 reg;
+
+ reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
+
+ reg &= (~MV_XOR_V2_DMA_IMSG_THRD_MASK << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
+ reg |= (thrd_val << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
+
+ writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
+}
+
+static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
+{
+ struct mv_xor_v2_device *xor_dev = data;
+ unsigned int ndescs;
+ u32 reg;
+
+ reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DONE_OFF);
+
+ ndescs = ((reg >> MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT) &
+ MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK);
+
+ /* No descriptors to process */
+ if (!ndescs)
+ return IRQ_NONE;
+
+ /*
+ * Update IMSG threshold, to disable new IMSG interrupts until
+ * end of the tasklet
+ */
+ mv_xor_v2_set_imsg_thrd(xor_dev, MV_XOR_V2_DESC_NUM);
+
+ /* schedule a tasklet to handle descriptors callbacks */
+ tasklet_schedule(&xor_dev->irq_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * submit a descriptor to the DMA engine
+ */
+static dma_cookie_t
+mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ int desq_ptr;
+ void *dest_hw_desc;
+ dma_cookie_t cookie;
+ struct mv_xor_v2_sw_desc *sw_desc =
+ container_of(tx, struct mv_xor_v2_sw_desc, async_tx);
+ struct mv_xor_v2_device *xor_dev =
+ container_of(tx->chan, struct mv_xor_v2_device, dmachan);
+
+ dev_dbg(xor_dev->dmadev.dev,
+ "%s sw_desc %p: async_tx %p\n",
+ __func__, sw_desc, &sw_desc->async_tx);
+
+ /* assign coookie */
+ spin_lock_bh(&xor_dev->lock);
+ cookie = dma_cookie_assign(tx);
+
+ /* get the next available slot in the DESQ */
+ desq_ptr = mv_xor_v2_get_desq_write_ptr(xor_dev);
+
+ /* copy the HW descriptor from the SW descriptor to the DESQ */
+ dest_hw_desc = xor_dev->hw_desq_virt + desq_ptr;
+
+ memcpy(dest_hw_desc, &sw_desc->hw_desc, xor_dev->desc_size);
+
+ xor_dev->npendings++;
+
+ spin_unlock_bh(&xor_dev->lock);
+
+ return cookie;
+}
+
+/*
+ * Prepare a SW descriptor
+ */
+static struct mv_xor_v2_sw_desc *
+mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev)
+{
+ struct mv_xor_v2_sw_desc *sw_desc;
+
+ /* Lock the channel */
+ spin_lock_bh(&xor_dev->lock);
+
+ if (list_empty(&xor_dev->free_sw_desc)) {
+ spin_unlock_bh(&xor_dev->lock);
+ /* schedule tasklet to free some descriptors */
+ tasklet_schedule(&xor_dev->irq_tasklet);
+ return NULL;
+ }
+
+ /* get a free SW descriptor from the SW DESQ */
+ sw_desc = list_first_entry(&xor_dev->free_sw_desc,
+ struct mv_xor_v2_sw_desc, free_list);
+ list_del(&sw_desc->free_list);
+
+ /* Release the channel */
+ spin_unlock_bh(&xor_dev->lock);
+
+ /* set the async tx descriptor */
+ dma_async_tx_descriptor_init(&sw_desc->async_tx, &xor_dev->dmachan);
+ sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit;
+ async_tx_ack(&sw_desc->async_tx);
+
+ return sw_desc;
+}
+
+/*
+ * Prepare a HW descriptor for a memcpy operation
+ */
+static struct dma_async_tx_descriptor *
+mv_xor_v2_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
+ dma_addr_t src, size_t len, unsigned long flags)
+{
+ struct mv_xor_v2_sw_desc *sw_desc;
+ struct mv_xor_v2_descriptor *hw_descriptor;
+ struct mv_xor_v2_device *xor_dev;
+
+ xor_dev = container_of(chan, struct mv_xor_v2_device, dmachan);
+
+ dev_dbg(xor_dev->dmadev.dev,
+ "%s len: %zu src %pad dest %pad flags: %ld\n",
+ __func__, len, &src, &dest, flags);
+
+ sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
+
+ sw_desc->async_tx.flags = flags;
+
+ /* set the HW descriptor */
+ hw_descriptor = &sw_desc->hw_desc;
+
+ /* save the SW descriptor ID to restore when operation is done */
+ hw_descriptor->desc_id = sw_desc->idx;
+
+ /* Set the MEMCPY control word */
+ hw_descriptor->desc_ctrl =
+ DESC_OP_MODE_MEMCPY << DESC_OP_MODE_SHIFT;
+
+ if (flags & DMA_PREP_INTERRUPT)
+ hw_descriptor->desc_ctrl |= DESC_IOD;
+
+ /* Set source address */
+ hw_descriptor->fill_pattern_src_addr[0] = lower_32_bits(src);
+ hw_descriptor->fill_pattern_src_addr[1] =
+ upper_32_bits(src) & 0xFFFF;
+
+ /* Set Destination address */
+ hw_descriptor->fill_pattern_src_addr[2] = lower_32_bits(dest);
+ hw_descriptor->fill_pattern_src_addr[3] =
+ upper_32_bits(dest) & 0xFFFF;
+
+ /* Set buffers size */
+ hw_descriptor->buff_size = len;
+
+ /* return the async tx descriptor */
+ return &sw_desc->async_tx;
+}
+
+/*
+ * Prepare a HW descriptor for a XOR operation
+ */
+static struct dma_async_tx_descriptor *
+mv_xor_v2_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
+ unsigned int src_cnt, size_t len, unsigned long flags)
+{
+ struct mv_xor_v2_sw_desc *sw_desc;
+ struct mv_xor_v2_descriptor *hw_descriptor;
+ struct mv_xor_v2_device *xor_dev =
+ container_of(chan, struct mv_xor_v2_device, dmachan);
+ int i;
+
+ if (src_cnt > MV_XOR_V2_CMD_LINE_NUM_MAX_D_BUF || src_cnt < 1)
+ return NULL;
+
+ dev_dbg(xor_dev->dmadev.dev,
+ "%s src_cnt: %d len: %zu dest %pad flags: %ld\n",
+ __func__, src_cnt, len, &dest, flags);
+
+ sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
+
+ sw_desc->async_tx.flags = flags;
+
+ /* set the HW descriptor */
+ hw_descriptor = &sw_desc->hw_desc;
+
+ /* save the SW descriptor ID to restore when operation is done */
+ hw_descriptor->desc_id = sw_desc->idx;
+
+ /* Set the XOR control word */
+ hw_descriptor->desc_ctrl =
+ DESC_OP_MODE_XOR << DESC_OP_MODE_SHIFT;
+ hw_descriptor->desc_ctrl |= DESC_P_BUFFER_ENABLE;
+
+ if (flags & DMA_PREP_INTERRUPT)
+ hw_descriptor->desc_ctrl |= DESC_IOD;
+
+ /* Set the data buffers */
+ for (i = 0; i < src_cnt; i++)
+ mv_xor_v2_set_data_buffers(xor_dev, hw_descriptor, src[i], i);
+
+ hw_descriptor->desc_ctrl |=
+ src_cnt << DESC_NUM_ACTIVE_D_BUF_SHIFT;
+
+ /* Set Destination address */
+ hw_descriptor->fill_pattern_src_addr[2] = lower_32_bits(dest);
+ hw_descriptor->fill_pattern_src_addr[3] =
+ upper_32_bits(dest) & 0xFFFF;
+
+ /* Set buffers size */
+ hw_descriptor->buff_size = len;
+
+ /* return the async tx descriptor */
+ return &sw_desc->async_tx;
+}
+
+/*
+ * Prepare a HW descriptor for interrupt operation.
+ */
+static struct dma_async_tx_descriptor *
+mv_xor_v2_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
+{
+ struct mv_xor_v2_sw_desc *sw_desc;
+ struct mv_xor_v2_descriptor *hw_descriptor;
+ struct mv_xor_v2_device *xor_dev =
+ container_of(chan, struct mv_xor_v2_device, dmachan);
+
+ sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
+
+ /* set the HW descriptor */
+ hw_descriptor = &sw_desc->hw_desc;
+
+ /* save the SW descriptor ID to restore when operation is done */
+ hw_descriptor->desc_id = sw_desc->idx;
+
+ /* Set the INTERRUPT control word */
+ hw_descriptor->desc_ctrl =
+ DESC_OP_MODE_NOP << DESC_OP_MODE_SHIFT;
+ hw_descriptor->desc_ctrl |= DESC_IOD;
+
+ /* return the async tx descriptor */
+ return &sw_desc->async_tx;
+}
+
+/*
+ * push pending transactions to hardware
+ */
+static void mv_xor_v2_issue_pending(struct dma_chan *chan)
+{
+ struct mv_xor_v2_device *xor_dev =
+ container_of(chan, struct mv_xor_v2_device, dmachan);
+
+ spin_lock_bh(&xor_dev->lock);
+
+ /*
+ * update the engine with the number of descriptors to
+ * process
+ */
+ mv_xor_v2_add_desc_to_desq(xor_dev, xor_dev->npendings);
+ xor_dev->npendings = 0;
+
+ /* Activate the channel */
+ writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
+
+ spin_unlock_bh(&xor_dev->lock);
+}
+
+static inline
+int mv_xor_v2_get_pending_params(struct mv_xor_v2_device *xor_dev,
+ int *pending_ptr)
+{
+ u32 reg;
+
+ reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DONE_OFF);
+
+ /* get the next pending descriptor index */
+ *pending_ptr = ((reg >> MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_SHIFT) &
+ MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_MASK);
+
+ /* get the number of descriptors pending handle */
+ return ((reg >> MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT) &
+ MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK);
+}
+
+/*
+ * handle the descriptors after HW process
+ */
+static void mv_xor_v2_tasklet(unsigned long data)
+{
+ struct mv_xor_v2_device *xor_dev = (struct mv_xor_v2_device *) data;
+ int pending_ptr, num_of_pending, i;
+ struct mv_xor_v2_descriptor *next_pending_hw_desc = NULL;
+ struct mv_xor_v2_sw_desc *next_pending_sw_desc = NULL;
+
+ dev_dbg(xor_dev->dmadev.dev, "%s %d\n", __func__, __LINE__);
+
+ /* get the pending descriptors parameters */
+ num_of_pending = mv_xor_v2_get_pending_params(xor_dev, &pending_ptr);
+
+ /* next HW descriptor */
+ next_pending_hw_desc = xor_dev->hw_desq_virt + pending_ptr;
+
+ /* loop over free descriptors */
+ for (i = 0; i < num_of_pending; i++) {
+
+ if (pending_ptr > MV_XOR_V2_DESC_NUM)
+ pending_ptr = 0;
+
+ if (next_pending_sw_desc != NULL)
+ next_pending_hw_desc++;
+
+ /* get the SW descriptor related to the HW descriptor */
+ next_pending_sw_desc =
+ &xor_dev->sw_desq[next_pending_hw_desc->desc_id];
+
+ /* call the callback */
+ if (next_pending_sw_desc->async_tx.cookie > 0) {
+ /*
+ * update the channel's completed cookie - no
+ * lock is required the IMSG threshold provide
+ * the locking
+ */
+ dma_cookie_complete(&next_pending_sw_desc->async_tx);
+
+ if (next_pending_sw_desc->async_tx.callback)
+ next_pending_sw_desc->async_tx.callback(
+ next_pending_sw_desc->async_tx.callback_param);
+
+ dma_descriptor_unmap(&next_pending_sw_desc->async_tx);
+ }
+
+ dma_run_dependencies(&next_pending_sw_desc->async_tx);
+
+ /* Lock the channel */
+ spin_lock_bh(&xor_dev->lock);
+
+ /* add the SW descriptor to the free descriptors list */
+ list_add(&next_pending_sw_desc->free_list,
+ &xor_dev->free_sw_desc);
+
+ /* Release the channel */
+ spin_unlock_bh(&xor_dev->lock);
+
+ /* increment the next descriptor */
+ pending_ptr++;
+ }
+
+ if (num_of_pending != 0) {
+ /* free the descriptores */
+ mv_xor_v2_free_desc_from_desq(xor_dev, num_of_pending);
+ }
+
+ /* Update IMSG threshold, to enable new IMSG interrupts */
+ mv_xor_v2_set_imsg_thrd(xor_dev, 0);
+}
+
+/*
+ * Set DMA Interrupt-message (IMSG) parameters
+ */
+static void mv_xor_v2_set_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
+{
+ struct mv_xor_v2_device *xor_dev = dev_get_drvdata(desc->dev);
+
+ writel(msg->address_lo,
+ xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_BALR_OFF);
+ writel(msg->address_hi & 0xFFFF,
+ xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_BAHR_OFF);
+ writel(msg->data,
+ xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_CDAT_OFF);
+}
+
+static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev)
+{
+ u32 reg;
+
+ /* write the DESQ size to the DMA engine */
+ writel(MV_XOR_V2_DESC_NUM,
+ xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_SIZE_OFF);
+
+ /* write the DESQ address to the DMA enngine*/
+ writel(xor_dev->hw_desq & 0xFFFFFFFF,
+ xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BALR_OFF);
+ writel((xor_dev->hw_desq & 0xFFFF00000000) >> 32,
+ xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF);
+
+ /* enable the DMA engine */
+ writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
+
+ /*
+ * This is a temporary solution, until we activate the
+ * SMMU. Set the attributes for reading & writing data buffers
+ * & descriptors to:
+ *
+ * - OuterShareable - Snoops will be performed on CPU caches
+ * - Enable cacheable - Bufferable, Modifiable, Other Allocate
+ * and Allocate
+ */
+ reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ARATTR_OFF);
+ reg &= ~MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK;
+ reg |= MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE |
+ MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE;
+ writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ARATTR_OFF);
+
+ reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_AWATTR_OFF);
+ reg &= ~MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK;
+ reg |= MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE |
+ MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE;
+ writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_AWATTR_OFF);
+
+ /* BW CTRL - set values to optimize the XOR performance:
+ *
+ * - Set WrBurstLen & RdBurstLen - the unit will issue
+ * maximum of 256B write/read transactions.
+ * - Limit the number of outstanding write & read data
+ * (OBB/IBB) requests to the maximal value.
+ */
+ reg = ((MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_VAL <<
+ MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_SHIFT) |
+ (MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_VAL <<
+ MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_SHIFT) |
+ (MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_VAL <<
+ MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_SHIFT) |
+ (MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_VAL <<
+ MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_SHIFT));
+ writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_BW_CTRL);
+
+ /* Disable the AXI timer feature */
+ reg = readl(xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE);
+ reg |= MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL;
+ writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE);
+
+ return 0;
+}
+
+static int mv_xor_v2_probe(struct platform_device *pdev)
+{
+ struct mv_xor_v2_device *xor_dev;
+ struct resource *res;
+ int i, ret = 0;
+ struct dma_device *dma_dev;
+ struct mv_xor_v2_sw_desc *sw_desc;
+ struct msi_desc *msi_desc;
+
+ BUILD_BUG_ON(sizeof(struct mv_xor_v2_descriptor) !=
+ MV_XOR_V2_EXT_DESC_SIZE);
+
+ xor_dev = devm_kzalloc(&pdev->dev, sizeof(*xor_dev), GFP_KERNEL);
+ if (!xor_dev)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xor_dev->dma_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(xor_dev->dma_base))
+ return PTR_ERR(xor_dev->dma_base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ xor_dev->glob_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(xor_dev->glob_base))
+ return PTR_ERR(xor_dev->glob_base);
+
+ platform_set_drvdata(pdev, xor_dev);
+
+ xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (!IS_ERR(xor_dev->clk)) {
+ ret = clk_prepare_enable(xor_dev->clk);
+ if (ret)
+ return ret;
+ }
+
+ ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1,
+ mv_xor_v2_set_msi_msg);
+ if (ret)
+ goto disable_clk;
+
+ msi_desc = first_msi_entry(&pdev->dev);
+ if (!msi_desc)
+ goto free_msi_irqs;
+
+ ret = devm_request_irq(&pdev->dev, msi_desc->irq,
+ mv_xor_v2_interrupt_handler, 0,
+ dev_name(&pdev->dev), xor_dev);
+ if (ret)
+ goto free_msi_irqs;
+
+ tasklet_init(&xor_dev->irq_tasklet, mv_xor_v2_tasklet,
+ (unsigned long) xor_dev);
+
+ xor_dev->desc_size = mv_xor_v2_set_desc_size(xor_dev);
+
+ dma_cookie_init(&xor_dev->dmachan);
+
+ /*
+ * allocate coherent memory for hardware descriptors
+ * note: writecombine gives slightly better performance, but
+ * requires that we explicitly flush the writes
+ */
+ xor_dev->hw_desq_virt =
+ dma_alloc_coherent(&pdev->dev,
+ xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
+ &xor_dev->hw_desq, GFP_KERNEL);
+ if (!xor_dev->hw_desq_virt) {
+ ret = -ENOMEM;
+ goto free_msi_irqs;
+ }
+
+ /* alloc memory for the SW descriptors */
+ xor_dev->sw_desq = devm_kzalloc(&pdev->dev, sizeof(*sw_desc) *
+ MV_XOR_V2_DESC_NUM, GFP_KERNEL);
+ if (!xor_dev->sw_desq) {
+ ret = -ENOMEM;
+ goto free_hw_desq;
+ }
+
+ spin_lock_init(&xor_dev->lock);
+
+ /* init the free SW descriptors list */
+ INIT_LIST_HEAD(&xor_dev->free_sw_desc);
+
+ /* add all SW descriptors to the free list */
+ for (i = 0; i < MV_XOR_V2_DESC_NUM; i++) {
+ xor_dev->sw_desq[i].idx = i;
+ list_add(&xor_dev->sw_desq[i].free_list,
+ &xor_dev->free_sw_desc);
+ }
+
+ dma_dev = &xor_dev->dmadev;
+
+ /* set DMA capabilities */
+ dma_cap_zero(dma_dev->cap_mask);
+ dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+ dma_cap_set(DMA_XOR, dma_dev->cap_mask);
+ dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
+
+ /* init dma link list */
+ INIT_LIST_HEAD(&dma_dev->channels);
+
+ /* set base routines */
+ dma_dev->device_tx_status = dma_cookie_status;
+ dma_dev->device_issue_pending = mv_xor_v2_issue_pending;
+ dma_dev->dev = &pdev->dev;
+
+ dma_dev->device_prep_dma_memcpy = mv_xor_v2_prep_dma_memcpy;
+ dma_dev->device_prep_dma_interrupt = mv_xor_v2_prep_dma_interrupt;
+ dma_dev->max_xor = 8;
+ dma_dev->device_prep_dma_xor = mv_xor_v2_prep_dma_xor;
+
+ xor_dev->dmachan.device = dma_dev;
+
+ list_add_tail(&xor_dev->dmachan.device_node,
+ &dma_dev->channels);
+
+ mv_xor_v2_descq_init(xor_dev);
+
+ ret = dma_async_device_register(dma_dev);
+ if (ret)
+ goto free_hw_desq;
+
+ dev_notice(&pdev->dev, "Marvell Version 2 XOR driver\n");
+
+ return 0;
+
+free_hw_desq:
+ dma_free_coherent(&pdev->dev,
+ xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
+ xor_dev->hw_desq_virt, xor_dev->hw_desq);
+free_msi_irqs:
+ platform_msi_domain_free_irqs(&pdev->dev);
+disable_clk:
+ if (!IS_ERR(xor_dev->clk))
+ clk_disable_unprepare(xor_dev->clk);
+ return ret;
+}
+
+static int mv_xor_v2_remove(struct platform_device *pdev)
+{
+ struct mv_xor_v2_device *xor_dev = platform_get_drvdata(pdev);
+
+ dma_async_device_unregister(&xor_dev->dmadev);
+
+ dma_free_coherent(&pdev->dev,
+ xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
+ xor_dev->hw_desq_virt, xor_dev->hw_desq);
+
+ platform_msi_domain_free_irqs(&pdev->dev);
+
+ clk_disable_unprepare(xor_dev->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id mv_xor_v2_dt_ids[] = {
+ { .compatible = "marvell,xor-v2", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mv_xor_v2_dt_ids);
+#endif
+
+static struct platform_driver mv_xor_v2_driver = {
+ .probe = mv_xor_v2_probe,
+ .remove = mv_xor_v2_remove,
+ .driver = {
+ .name = "mv_xor_v2",
+ .of_match_table = of_match_ptr(mv_xor_v2_dt_ids),
+ },
+};
+
+module_platform_driver(mv_xor_v2_driver);
+
+MODULE_DESCRIPTION("DMA engine driver for Marvell's Version 2 of XOR engine");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index 2b5a198ac..08c45c185 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -227,6 +227,7 @@ struct nbpf_device {
void __iomem *base;
struct clk *clk;
const struct nbpf_config *config;
+ unsigned int eirq;
struct nbpf_channel chan[];
};
@@ -1300,10 +1301,9 @@ static int nbpf_probe(struct platform_device *pdev)
nbpf = devm_kzalloc(dev, sizeof(*nbpf) + num_channels *
sizeof(nbpf->chan[0]), GFP_KERNEL);
- if (!nbpf) {
- dev_err(dev, "Memory allocation failed\n");
+ if (!nbpf)
return -ENOMEM;
- }
+
dma_dev = &nbpf->dma_dev;
dma_dev->dev = dev;
@@ -1376,6 +1376,7 @@ static int nbpf_probe(struct platform_device *pdev)
IRQF_SHARED, "dma error", nbpf);
if (ret < 0)
return ret;
+ nbpf->eirq = eirq;
INIT_LIST_HEAD(&dma_dev->channels);
@@ -1447,6 +1448,17 @@ e_clk_off:
static int nbpf_remove(struct platform_device *pdev)
{
struct nbpf_device *nbpf = platform_get_drvdata(pdev);
+ int i;
+
+ devm_free_irq(&pdev->dev, nbpf->eirq, nbpf);
+
+ for (i = 0; i < nbpf->config->num_channels; i++) {
+ struct nbpf_channel *chan = nbpf->chan + i;
+
+ devm_free_irq(&pdev->dev, chan->irq, chan);
+
+ tasklet_kill(&chan->tasklet);
+ }
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&nbpf->dma_dev);
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 1e984e18c..d99ca2b51 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -59,6 +59,8 @@ struct omap_sg {
dma_addr_t addr;
uint32_t en; /* number of elements (24-bit) */
uint32_t fn; /* number of frames (16-bit) */
+ int32_t fi; /* for double indexing */
+ int16_t ei; /* for double indexing */
};
struct omap_desc {
@@ -66,7 +68,8 @@ struct omap_desc {
enum dma_transfer_direction dir;
dma_addr_t dev_addr;
- int16_t fi; /* for OMAP_DMA_SYNC_PACKET */
+ int32_t fi; /* for OMAP_DMA_SYNC_PACKET / double indexing */
+ int16_t ei; /* for double indexing */
uint8_t es; /* CSDP_DATA_TYPE_xxx */
uint32_t ccr; /* CCR value */
uint16_t clnk_ctrl; /* CLNK_CTRL value */
@@ -379,8 +382,8 @@ static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
}
omap_dma_chan_write(c, cxsa, sg->addr);
- omap_dma_chan_write(c, cxei, 0);
- omap_dma_chan_write(c, cxfi, 0);
+ omap_dma_chan_write(c, cxei, sg->ei);
+ omap_dma_chan_write(c, cxfi, sg->fi);
omap_dma_chan_write(c, CEN, sg->en);
omap_dma_chan_write(c, CFN, sg->fn);
@@ -425,7 +428,7 @@ static void omap_dma_start_desc(struct omap_chan *c)
}
omap_dma_chan_write(c, cxsa, d->dev_addr);
- omap_dma_chan_write(c, cxei, 0);
+ omap_dma_chan_write(c, cxei, d->ei);
omap_dma_chan_write(c, cxfi, d->fi);
omap_dma_chan_write(c, CSDP, d->csdp);
omap_dma_chan_write(c, CLNK_CTRL, d->clnk_ctrl);
@@ -971,6 +974,89 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_memcpy(
return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
}
+static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
+ struct dma_chan *chan, struct dma_interleaved_template *xt,
+ unsigned long flags)
+{
+ struct omap_chan *c = to_omap_dma_chan(chan);
+ struct omap_desc *d;
+ struct omap_sg *sg;
+ uint8_t data_type;
+ size_t src_icg, dst_icg;
+
+ /* Slave mode is not supported */
+ if (is_slave_direction(xt->dir))
+ return NULL;
+
+ if (xt->frame_size != 1 || xt->numf == 0)
+ return NULL;
+
+ d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
+ if (!d)
+ return NULL;
+
+ data_type = __ffs((xt->src_start | xt->dst_start | xt->sgl[0].size));
+ if (data_type > CSDP_DATA_TYPE_32)
+ data_type = CSDP_DATA_TYPE_32;
+
+ sg = &d->sg[0];
+ d->dir = DMA_MEM_TO_MEM;
+ d->dev_addr = xt->src_start;
+ d->es = data_type;
+ sg->en = xt->sgl[0].size / BIT(data_type);
+ sg->fn = xt->numf;
+ sg->addr = xt->dst_start;
+ d->sglen = 1;
+ d->ccr = c->ccr;
+
+ src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
+ dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]);
+ if (src_icg) {
+ d->ccr |= CCR_SRC_AMODE_DBLIDX;
+ d->ei = 1;
+ d->fi = src_icg;
+ } else if (xt->src_inc) {
+ d->ccr |= CCR_SRC_AMODE_POSTINC;
+ d->fi = 0;
+ } else {
+ dev_err(chan->device->dev,
+ "%s: SRC constant addressing is not supported\n",
+ __func__);
+ kfree(d);
+ return NULL;
+ }
+
+ if (dst_icg) {
+ d->ccr |= CCR_DST_AMODE_DBLIDX;
+ sg->ei = 1;
+ sg->fi = dst_icg;
+ } else if (xt->dst_inc) {
+ d->ccr |= CCR_DST_AMODE_POSTINC;
+ sg->fi = 0;
+ } else {
+ dev_err(chan->device->dev,
+ "%s: DST constant addressing is not supported\n",
+ __func__);
+ kfree(d);
+ return NULL;
+ }
+
+ d->cicr = CICR_DROP_IE | CICR_FRAME_IE;
+
+ d->csdp = data_type;
+
+ if (dma_omap1()) {
+ d->cicr |= CICR_TOUT_IE;
+ d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_EMIFF;
+ } else {
+ d->csdp |= CSDP_DST_PACKED | CSDP_SRC_PACKED;
+ d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
+ d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64;
+ }
+
+ return vchan_tx_prep(&c->vc, &d->vd, flags);
+}
+
static int omap_dma_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg)
{
struct omap_chan *c = to_omap_dma_chan(chan);
@@ -1116,6 +1202,7 @@ static int omap_dma_probe(struct platform_device *pdev)
dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
dma_cap_set(DMA_MEMCPY, od->ddev.cap_mask);
+ dma_cap_set(DMA_INTERLEAVE, od->ddev.cap_mask);
od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
od->ddev.device_tx_status = omap_dma_tx_status;
@@ -1123,6 +1210,7 @@ static int omap_dma_probe(struct platform_device *pdev)
od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
od->ddev.device_prep_dma_memcpy = omap_dma_prep_dma_memcpy;
+ od->ddev.device_prep_interleaved_dma = omap_dma_prep_dma_interleaved;
od->ddev.device_config = omap_dma_slave_config;
od->ddev.device_pause = omap_dma_pause;
od->ddev.device_resume = omap_dma_resume;
@@ -1204,10 +1292,14 @@ static int omap_dma_probe(struct platform_device *pdev)
static int omap_dma_remove(struct platform_device *pdev)
{
struct omap_dmadev *od = platform_get_drvdata(pdev);
+ int irq;
if (pdev->dev.of_node)
of_dma_controller_free(pdev->dev.of_node);
+ irq = platform_get_irq(pdev, 1);
+ devm_free_irq(&pdev->dev, irq, od);
+
dma_async_device_unregister(&od->ddev);
if (!od->legacy) {
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 372b4359d..4fc3ffbd5 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2828,10 +2828,8 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
/* Allocate a new DMAC and its Channels */
pl330 = devm_kzalloc(&adev->dev, sizeof(*pl330), GFP_KERNEL);
- if (!pl330) {
- dev_err(&adev->dev, "unable to allocate mem\n");
+ if (!pl330)
return -ENOMEM;
- }
pd = &pl330->ddma;
pd->dev = &adev->dev;
@@ -2890,7 +2888,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pl330->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
if (!pl330->peripherals) {
ret = -ENOMEM;
- dev_err(&adev->dev, "unable to allocate pl330->peripherals\n");
goto probe_err2;
}
@@ -3005,12 +3002,18 @@ static int pl330_remove(struct amba_device *adev)
{
struct pl330_dmac *pl330 = amba_get_drvdata(adev);
struct dma_pl330_chan *pch, *_p;
+ int i, irq;
pm_runtime_get_noresume(pl330->ddma.dev);
if (adev->dev.of_node)
of_dma_controller_free(adev->dev.of_node);
+ for (i = 0; i < AMBA_NR_IRQS; i++) {
+ irq = adev->irq[i];
+ devm_free_irq(&adev->dev, irq, pl330);
+ }
+
dma_async_device_unregister(&pl330->ddma);
/* Idle the DMAC */
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 9217f893b..da3688b94 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -4084,7 +4084,6 @@ static int ppc440spe_adma_probe(struct platform_device *ofdev)
/* create a device */
adev = kzalloc(sizeof(*adev), GFP_KERNEL);
if (!adev) {
- dev_err(&ofdev->dev, "failed to allocate device\n");
initcode = PPC_ADMA_INIT_ALLOC;
ret = -ENOMEM;
goto err_adev_alloc;
@@ -4145,7 +4144,6 @@ static int ppc440spe_adma_probe(struct platform_device *ofdev)
/* create a channel */
chan = kzalloc(sizeof(*chan), GFP_KERNEL);
if (!chan) {
- dev_err(&ofdev->dev, "can't allocate channel structure\n");
initcode = PPC_ADMA_INIT_CHANNEL;
ret = -ENOMEM;
goto err_chan_alloc;
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index e756a30cc..3f56f9ca4 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -21,6 +21,7 @@
#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/of.h>
+#include <linux/wait.h>
#include <linux/dma/pxa-dma.h>
#include "dmaengine.h"
@@ -118,6 +119,8 @@ struct pxad_chan {
struct pxad_phy *phy;
struct dma_pool *desc_pool; /* Descriptors pool */
dma_cookie_t bus_error;
+
+ wait_queue_head_t wq_state;
};
struct pxad_device {
@@ -318,7 +321,6 @@ static int dbg_open_##name(struct inode *inode, struct file *file) \
return single_open(file, dbg_show_##name, inode->i_private); \
} \
static const struct file_operations dbg_fops_##name = { \
- .owner = THIS_MODULE, \
.open = dbg_open_##name, \
.llseek = seq_lseek, \
.read = seq_read, \
@@ -572,6 +574,7 @@ static void pxad_launch_chan(struct pxad_chan *chan,
*/
phy_writel(chan->phy, desc->first, DDADR);
phy_enable(chan->phy, chan->misaligned);
+ wake_up(&chan->wq_state);
}
static void set_updater_desc(struct pxad_desc_sw *sw_desc,
@@ -635,7 +638,7 @@ static bool pxad_try_hotchain(struct virt_dma_chan *vc,
vd_last_issued = list_entry(vc->desc_issued.prev,
struct virt_dma_desc, node);
pxad_desc_chain(vd_last_issued, vd);
- if (is_chan_running(chan) || is_desc_completed(vd_last_issued))
+ if (is_chan_running(chan) || is_desc_completed(vd))
return true;
}
@@ -668,6 +671,7 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
struct virt_dma_desc *vd, *tmp;
unsigned int dcsr;
unsigned long flags;
+ bool vd_completed;
dma_cookie_t last_started = 0;
BUG_ON(!chan);
@@ -678,15 +682,17 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
spin_lock_irqsave(&chan->vc.lock, flags);
list_for_each_entry_safe(vd, tmp, &chan->vc.desc_issued, node) {
+ vd_completed = is_desc_completed(vd);
dev_dbg(&chan->vc.chan.dev->device,
- "%s(): checking txd %p[%x]: completed=%d\n",
- __func__, vd, vd->tx.cookie, is_desc_completed(vd));
+ "%s(): checking txd %p[%x]: completed=%d dcsr=0x%x\n",
+ __func__, vd, vd->tx.cookie, vd_completed,
+ dcsr);
last_started = vd->tx.cookie;
if (to_pxad_sw_desc(vd)->cyclic) {
vchan_cyclic_callback(vd);
break;
}
- if (is_desc_completed(vd)) {
+ if (vd_completed) {
list_del(&vd->node);
vchan_cookie_complete(vd);
} else {
@@ -717,6 +723,7 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
}
}
spin_unlock_irqrestore(&chan->vc.lock, flags);
+ wake_up(&chan->wq_state);
return IRQ_HANDLED;
}
@@ -1268,6 +1275,14 @@ static enum dma_status pxad_tx_status(struct dma_chan *dchan,
return ret;
}
+static void pxad_synchronize(struct dma_chan *dchan)
+{
+ struct pxad_chan *chan = to_pxad_chan(dchan);
+
+ wait_event(chan->wq_state, !is_chan_running(chan));
+ vchan_synchronize(&chan->vc);
+}
+
static void pxad_free_channels(struct dma_device *dmadev)
{
struct pxad_chan *c, *cn;
@@ -1372,6 +1387,7 @@ static int pxad_init_dmadev(struct platform_device *op,
pdev->slave.device_tx_status = pxad_tx_status;
pdev->slave.device_issue_pending = pxad_issue_pending;
pdev->slave.device_config = pxad_config;
+ pdev->slave.device_synchronize = pxad_synchronize;
pdev->slave.device_terminate_all = pxad_terminate_all;
if (op->dev.coherent_dma_mask)
@@ -1389,6 +1405,7 @@ static int pxad_init_dmadev(struct platform_device *op,
return -ENOMEM;
c->vc.desc_free = pxad_free_desc;
vchan_init(&c->vc, &pdev->slave);
+ init_waitqueue_head(&c->wq_state);
}
return dma_async_device_register(&pdev->slave);
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index 969b48176..03c4eb3fd 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -48,6 +48,7 @@
#include <linux/of_dma.h>
#include <linux/clk.h>
#include <linux/dmaengine.h>
+#include <linux/pm_runtime.h>
#include "../dmaengine.h"
#include "../virt-dma.h"
@@ -58,6 +59,8 @@ struct bam_desc_hw {
__le16 flags;
};
+#define BAM_DMA_AUTOSUSPEND_DELAY 100
+
#define DESC_FLAG_INT BIT(15)
#define DESC_FLAG_EOT BIT(14)
#define DESC_FLAG_EOB BIT(13)
@@ -527,12 +530,17 @@ static void bam_free_chan(struct dma_chan *chan)
struct bam_device *bdev = bchan->bdev;
u32 val;
unsigned long flags;
+ int ret;
+
+ ret = pm_runtime_get_sync(bdev->dev);
+ if (ret < 0)
+ return;
vchan_free_chan_resources(to_virt_chan(chan));
if (bchan->curr_txd) {
dev_err(bchan->bdev->dev, "Cannot free busy channel\n");
- return;
+ goto err;
}
spin_lock_irqsave(&bchan->vc.lock, flags);
@@ -550,6 +558,10 @@ static void bam_free_chan(struct dma_chan *chan)
/* disable irq */
writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_IRQ_EN));
+
+err:
+ pm_runtime_mark_last_busy(bdev->dev);
+ pm_runtime_put_autosuspend(bdev->dev);
}
/**
@@ -696,11 +708,18 @@ static int bam_pause(struct dma_chan *chan)
struct bam_chan *bchan = to_bam_chan(chan);
struct bam_device *bdev = bchan->bdev;
unsigned long flag;
+ int ret;
+
+ ret = pm_runtime_get_sync(bdev->dev);
+ if (ret < 0)
+ return ret;
spin_lock_irqsave(&bchan->vc.lock, flag);
writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT));
bchan->paused = 1;
spin_unlock_irqrestore(&bchan->vc.lock, flag);
+ pm_runtime_mark_last_busy(bdev->dev);
+ pm_runtime_put_autosuspend(bdev->dev);
return 0;
}
@@ -715,11 +734,18 @@ static int bam_resume(struct dma_chan *chan)
struct bam_chan *bchan = to_bam_chan(chan);
struct bam_device *bdev = bchan->bdev;
unsigned long flag;
+ int ret;
+
+ ret = pm_runtime_get_sync(bdev->dev);
+ if (ret < 0)
+ return ret;
spin_lock_irqsave(&bchan->vc.lock, flag);
writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT));
bchan->paused = 0;
spin_unlock_irqrestore(&bchan->vc.lock, flag);
+ pm_runtime_mark_last_busy(bdev->dev);
+ pm_runtime_put_autosuspend(bdev->dev);
return 0;
}
@@ -795,6 +821,7 @@ static irqreturn_t bam_dma_irq(int irq, void *data)
{
struct bam_device *bdev = data;
u32 clr_mask = 0, srcs = 0;
+ int ret;
srcs |= process_channel_irqs(bdev);
@@ -802,6 +829,10 @@ static irqreturn_t bam_dma_irq(int irq, void *data)
if (srcs & P_IRQ)
tasklet_schedule(&bdev->task);
+ ret = pm_runtime_get_sync(bdev->dev);
+ if (ret < 0)
+ return ret;
+
if (srcs & BAM_IRQ) {
clr_mask = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_STTS));
@@ -814,6 +845,9 @@ static irqreturn_t bam_dma_irq(int irq, void *data)
writel_relaxed(clr_mask, bam_addr(bdev, 0, BAM_IRQ_CLR));
}
+ pm_runtime_mark_last_busy(bdev->dev);
+ pm_runtime_put_autosuspend(bdev->dev);
+
return IRQ_HANDLED;
}
@@ -893,6 +927,7 @@ static void bam_start_dma(struct bam_chan *bchan)
struct bam_desc_hw *desc;
struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt,
sizeof(struct bam_desc_hw));
+ int ret;
lockdep_assert_held(&bchan->vc.lock);
@@ -904,6 +939,10 @@ static void bam_start_dma(struct bam_chan *bchan)
async_desc = container_of(vd, struct bam_async_desc, vd);
bchan->curr_txd = async_desc;
+ ret = pm_runtime_get_sync(bdev->dev);
+ if (ret < 0)
+ return;
+
/* on first use, initialize the channel hardware */
if (!bchan->initialized)
bam_chan_init_hw(bchan, async_desc->dir);
@@ -946,6 +985,9 @@ static void bam_start_dma(struct bam_chan *bchan)
wmb();
writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw),
bam_addr(bdev, bchan->id, BAM_P_EVNT_REG));
+
+ pm_runtime_mark_last_busy(bdev->dev);
+ pm_runtime_put_autosuspend(bdev->dev);
}
/**
@@ -970,6 +1012,7 @@ static void dma_tasklet(unsigned long data)
bam_start_dma(bchan);
spin_unlock_irqrestore(&bchan->vc.lock, flags);
}
+
}
/**
@@ -1213,6 +1256,13 @@ static int bam_dma_probe(struct platform_device *pdev)
if (ret)
goto err_unregister_dma;
+ pm_runtime_irq_safe(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, BAM_DMA_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
return 0;
err_unregister_dma:
@@ -1233,6 +1283,8 @@ static int bam_dma_remove(struct platform_device *pdev)
struct bam_device *bdev = platform_get_drvdata(pdev);
u32 i;
+ pm_runtime_force_suspend(&pdev->dev);
+
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&bdev->common);
@@ -1260,11 +1312,66 @@ static int bam_dma_remove(struct platform_device *pdev)
return 0;
}
+static int __maybe_unused bam_dma_runtime_suspend(struct device *dev)
+{
+ struct bam_device *bdev = dev_get_drvdata(dev);
+
+ clk_disable(bdev->bamclk);
+
+ return 0;
+}
+
+static int __maybe_unused bam_dma_runtime_resume(struct device *dev)
+{
+ struct bam_device *bdev = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_enable(bdev->bamclk);
+ if (ret < 0) {
+ dev_err(dev, "clk_enable failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused bam_dma_suspend(struct device *dev)
+{
+ struct bam_device *bdev = dev_get_drvdata(dev);
+
+ pm_runtime_force_suspend(dev);
+
+ clk_unprepare(bdev->bamclk);
+
+ return 0;
+}
+
+static int __maybe_unused bam_dma_resume(struct device *dev)
+{
+ struct bam_device *bdev = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare(bdev->bamclk);
+ if (ret)
+ return ret;
+
+ pm_runtime_force_resume(dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops bam_dma_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(bam_dma_suspend, bam_dma_resume)
+ SET_RUNTIME_PM_OPS(bam_dma_runtime_suspend, bam_dma_runtime_resume,
+ NULL)
+};
+
static struct platform_driver bam_dma_driver = {
.probe = bam_dma_probe,
.remove = bam_dma_remove,
.driver = {
.name = "bam-dma-engine",
+ .pm = &bam_dma_pm_ops,
.of_match_table = bam_of_match,
},
};
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index 41b5c6dee..b2374cd91 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -708,6 +708,7 @@ static int hidma_remove(struct platform_device *pdev)
pm_runtime_get_sync(dmadev->ddev.dev);
dma_async_device_unregister(&dmadev->ddev);
devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev);
+ tasklet_kill(&dmadev->task);
hidma_debug_uninit(dmadev);
hidma_ll_uninit(dmadev->lldev);
hidma_free(dmadev);
diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c
index f39290015..ad20dfb64 100644
--- a/drivers/dma/qcom/hidma_ll.c
+++ b/drivers/dma/qcom/hidma_ll.c
@@ -831,6 +831,7 @@ int hidma_ll_uninit(struct hidma_lldev *lldev)
required_bytes = sizeof(struct hidma_tre) * lldev->nr_tres;
tasklet_kill(&lldev->task);
+ tasklet_kill(&lldev->rst_task);
memset(lldev->trepool, 0, required_bytes);
lldev->trepool = NULL;
lldev->pending_tre_count = 0;
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
index c0e365321..82f36e466 100644
--- a/drivers/dma/qcom/hidma_mgmt.c
+++ b/drivers/dma/qcom/hidma_mgmt.c
@@ -371,8 +371,8 @@ static int __init hidma_mgmt_of_populate_channels(struct device_node *np)
pdevinfo.size_data = 0;
pdevinfo.dma_mask = DMA_BIT_MASK(64);
new_pdev = platform_device_register_full(&pdevinfo);
- if (!new_pdev) {
- ret = -ENODEV;
+ if (IS_ERR(new_pdev)) {
+ ret = PTR_ERR(new_pdev);
goto out;
}
of_dma_configure(&new_pdev->dev, child);
@@ -392,8 +392,7 @@ static int __init hidma_mgmt_init(void)
#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
struct device_node *child;
- for (child = of_find_matching_node(NULL, hidma_mgmt_match); child;
- child = of_find_matching_node(child, hidma_mgmt_match)) {
+ for_each_matching_node(child, hidma_mgmt_match) {
/* device tree based firmware here */
hidma_mgmt_of_populate_channels(child);
of_node_put(child);
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index 17ccdfd28..ce6707558 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -768,16 +768,12 @@ static enum dma_status s3c24xx_dma_tx_status(struct dma_chan *chan,
spin_lock_irqsave(&s3cchan->vc.lock, flags);
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_COMPLETE) {
- spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
- return ret;
- }
/*
* There's no point calculating the residue if there's
* no txstate to store the value.
*/
- if (!txstate) {
+ if (ret == DMA_COMPLETE || !txstate) {
spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
return ret;
}
@@ -1105,11 +1101,8 @@ static int s3c24xx_dma_init_virtual_channels(struct s3c24xx_dma_engine *s3cdma,
*/
for (i = 0; i < channels; i++) {
chan = devm_kzalloc(dmadev->dev, sizeof(*chan), GFP_KERNEL);
- if (!chan) {
- dev_err(dmadev->dev,
- "%s no memory for channel\n", __func__);
+ if (!chan)
return -ENOMEM;
- }
chan->id = i;
chan->host = s3cdma;
@@ -1143,8 +1136,10 @@ static void s3c24xx_dma_free_virtual_channels(struct dma_device *dmadev)
struct s3c24xx_dma_chan *next;
list_for_each_entry_safe(chan,
- next, &dmadev->channels, vc.chan.device_node)
+ next, &dmadev->channels, vc.chan.device_node) {
list_del(&chan->vc.chan.device_node);
+ tasklet_kill(&chan->vc.task);
+ }
}
/* s3c2410, s3c2440 and s3c2442 have a 0x40 stride without separate clocks */
@@ -1366,6 +1361,18 @@ err_memcpy:
return ret;
}
+static void s3c24xx_dma_free_irq(struct platform_device *pdev,
+ struct s3c24xx_dma_engine *s3cdma)
+{
+ int i;
+
+ for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) {
+ struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
+
+ devm_free_irq(&pdev->dev, phy->irq, phy);
+ }
+}
+
static int s3c24xx_dma_remove(struct platform_device *pdev)
{
const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev);
@@ -1376,6 +1383,8 @@ static int s3c24xx_dma_remove(struct platform_device *pdev)
dma_async_device_unregister(&s3cdma->slave);
dma_async_device_unregister(&s3cdma->memcpy);
+ s3c24xx_dma_free_irq(pdev, s3cdma);
+
s3c24xx_dma_free_virtual_channels(&s3cdma->slave);
s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy);
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index dfb179262..0dd953884 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -311,7 +311,7 @@ static bool rcar_dmac_chan_is_busy(struct rcar_dmac_chan *chan)
{
u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
- return (chcr & (RCAR_DMACHCR_DE | RCAR_DMACHCR_TE)) == RCAR_DMACHCR_DE;
+ return !!(chcr & (RCAR_DMACHCR_DE | RCAR_DMACHCR_TE));
}
static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan)
@@ -510,7 +510,7 @@ static void rcar_dmac_desc_put(struct rcar_dmac_chan *chan,
spin_lock_irqsave(&chan->lock, flags);
list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free);
- list_add_tail(&desc->node, &chan->desc.free);
+ list_add(&desc->node, &chan->desc.free);
spin_unlock_irqrestore(&chan->lock, flags);
}
@@ -990,6 +990,8 @@ static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
list_splice_init(&rchan->desc.done, &list);
list_splice_init(&rchan->desc.wait, &list);
+ rchan->desc.running = NULL;
+
list_for_each_entry(desc, &list, node)
rcar_dmac_realloc_hwdesc(rchan, desc, 0);
@@ -1143,6 +1145,7 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
struct rcar_dmac_desc *desc = chan->desc.running;
struct rcar_dmac_xfer_chunk *running = NULL;
struct rcar_dmac_xfer_chunk *chunk;
+ enum dma_status status;
unsigned int residue = 0;
unsigned int dptr = 0;
@@ -1150,12 +1153,38 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
return 0;
/*
+ * If the cookie corresponds to a descriptor that has been completed
+ * there is no residue. The same check has already been performed by the
+ * caller but without holding the channel lock, so the descriptor could
+ * now be complete.
+ */
+ status = dma_cookie_status(&chan->chan, cookie, NULL);
+ if (status == DMA_COMPLETE)
+ return 0;
+
+ /*
* If the cookie doesn't correspond to the currently running transfer
* then the descriptor hasn't been processed yet, and the residue is
* equal to the full descriptor size.
*/
- if (cookie != desc->async_tx.cookie)
- return desc->size;
+ if (cookie != desc->async_tx.cookie) {
+ list_for_each_entry(desc, &chan->desc.pending, node) {
+ if (cookie == desc->async_tx.cookie)
+ return desc->size;
+ }
+ list_for_each_entry(desc, &chan->desc.active, node) {
+ if (cookie == desc->async_tx.cookie)
+ return desc->size;
+ }
+
+ /*
+ * No descriptor found for the cookie, there's thus no residue.
+ * This shouldn't happen if the calling driver passes a correct
+ * cookie value.
+ */
+ WARN(1, "No descriptor for cookie!");
+ return 0;
+ }
/*
* In descriptor mode the descriptor running pointer is not maintained
@@ -1202,6 +1231,10 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
residue = rcar_dmac_chan_get_residue(rchan, cookie);
spin_unlock_irqrestore(&rchan->lock, flags);
+ /* if there's no residue, the cookie is complete */
+ if (!residue)
+ return DMA_COMPLETE;
+
dma_set_residue(txstate, residue);
return status;
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index 80d864024..c94ffab0d 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -532,11 +532,8 @@ static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
sh_chan = devm_kzalloc(sdev->dma_dev.dev, sizeof(struct sh_dmae_chan),
GFP_KERNEL);
- if (!sh_chan) {
- dev_err(sdev->dma_dev.dev,
- "No free memory for allocating dma channels!\n");
+ if (!sh_chan)
return -ENOMEM;
- }
schan = &sh_chan->shdma_chan;
schan->max_xfer_len = SH_DMA_TCR_MAX + 1;
@@ -732,10 +729,8 @@ static int sh_dmae_probe(struct platform_device *pdev)
shdev = devm_kzalloc(&pdev->dev, sizeof(struct sh_dmae_device),
GFP_KERNEL);
- if (!shdev) {
- dev_err(&pdev->dev, "Not enough memory\n");
+ if (!shdev)
return -ENOMEM;
- }
dma_dev = &shdev->shdma_dev.dma_dev;
diff --git a/drivers/dma/sh/sudmac.c b/drivers/dma/sh/sudmac.c
index 6da2eaa6c..69b9564dc 100644
--- a/drivers/dma/sh/sudmac.c
+++ b/drivers/dma/sh/sudmac.c
@@ -245,11 +245,8 @@ static int sudmac_chan_probe(struct sudmac_device *su_dev, int id, int irq,
int err;
sc = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_chan), GFP_KERNEL);
- if (!sc) {
- dev_err(sdev->dma_dev.dev,
- "No free memory for allocating dma channels!\n");
+ if (!sc)
return -ENOMEM;
- }
schan = &sc->shdma_chan;
schan->max_xfer_len = 64 * 1024 * 1024 - 1;
@@ -349,10 +346,8 @@ static int sudmac_probe(struct platform_device *pdev)
err = -ENOMEM;
su_dev = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_device),
GFP_KERNEL);
- if (!su_dev) {
- dev_err(&pdev->dev, "Not enough memory\n");
+ if (!su_dev)
return err;
- }
dma_dev = &su_dev->shdma_dev.dma_dev;
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index e48350e65..d8bc3f2a7 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -854,10 +854,9 @@ static int sirfsoc_dma_probe(struct platform_device *op)
int ret, i;
sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL);
- if (!sdma) {
- dev_err(dev, "Memory exhausted!\n");
+ if (!sdma)
return -ENOMEM;
- }
+
data = (struct sirfsoc_dmadata *)
(of_match_device(op->dev.driver->of_match_table,
&op->dev)->data);
@@ -981,6 +980,7 @@ static int sirfsoc_dma_remove(struct platform_device *op)
of_dma_controller_free(op->dev.of_node);
dma_async_device_unregister(&sdma->dma);
free_irq(sdma->irq, sdma);
+ tasklet_kill(&sdma->tasklet);
irq_dispose_mapping(sdma->irq);
pm_runtime_disable(&op->dev);
if (!pm_runtime_status_suspended(&op->dev))
@@ -1126,17 +1126,17 @@ static const struct dev_pm_ops sirfsoc_dma_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_dma_pm_suspend, sirfsoc_dma_pm_resume)
};
-struct sirfsoc_dmadata sirfsoc_dmadata_a6 = {
+static struct sirfsoc_dmadata sirfsoc_dmadata_a6 = {
.exec = sirfsoc_dma_execute_hw_a6,
.type = SIRFSOC_DMA_VER_A6,
};
-struct sirfsoc_dmadata sirfsoc_dmadata_a7v1 = {
+static struct sirfsoc_dmadata sirfsoc_dmadata_a7v1 = {
.exec = sirfsoc_dma_execute_hw_a7v1,
.type = SIRFSOC_DMA_VER_A7V1,
};
-struct sirfsoc_dmadata sirfsoc_dmadata_a7v2 = {
+static struct sirfsoc_dmadata sirfsoc_dmadata_a7v2 = {
.exec = sirfsoc_dma_execute_hw_a7v2,
.type = SIRFSOC_DMA_VER_A7V2,
};
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 6fb830746..8b18e44a0 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -2588,7 +2588,7 @@ static enum dma_status d40_tx_status(struct dma_chan *chan,
}
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret != DMA_COMPLETE)
+ if (ret != DMA_COMPLETE && txstate)
dma_set_residue(txstate, stedma40_residue(chan));
if (d40_is_paused(d40c))
@@ -3237,10 +3237,8 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
(num_phy_chans + num_log_chans + num_memcpy_chans) *
sizeof(struct d40_chan), GFP_KERNEL);
- if (base == NULL) {
- d40_err(&pdev->dev, "Out of memory\n");
+ if (base == NULL)
goto failure;
- }
base->rev = rev;
base->clk = clk;
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c
index 27b818dee..13b42dd99 100644
--- a/drivers/dma/ste_dma40_ll.c
+++ b/drivers/dma/ste_dma40_ll.c
@@ -10,7 +10,7 @@
#include "ste_dma40_ll.h"
-u8 d40_width_to_bits(enum dma_slave_buswidth width)
+static u8 d40_width_to_bits(enum dma_slave_buswidth width)
{
if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
return STEDMA40_ESIZE_8_BIT;
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index 5065ca43f..3835fcde3 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -865,7 +865,7 @@ static enum dma_status sun6i_dma_tx_status(struct dma_chan *chan,
size_t bytes = 0;
ret = dma_cookie_status(chan, cookie, state);
- if (ret == DMA_COMPLETE)
+ if (ret == DMA_COMPLETE || !state)
return ret;
spin_lock_irqsave(&vchan->vc.lock, flags);
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 01e316f73..6ab9eb985 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -300,10 +300,8 @@ static struct tegra_dma_desc *tegra_dma_desc_get(
/* Allocate DMA desc */
dma_desc = kzalloc(sizeof(*dma_desc), GFP_NOWAIT);
- if (!dma_desc) {
- dev_err(tdc2dev(tdc), "dma_desc alloc failed\n");
+ if (!dma_desc)
return NULL;
- }
dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan);
dma_desc->txd.tx_submit = tegra_dma_tx_submit;
@@ -340,8 +338,7 @@ static struct tegra_dma_sg_req *tegra_dma_sg_req_get(
spin_unlock_irqrestore(&tdc->lock, flags);
sg_req = kzalloc(sizeof(struct tegra_dma_sg_req), GFP_NOWAIT);
- if (!sg_req)
- dev_err(tdc2dev(tdc), "sg_req alloc failed\n");
+
return sg_req;
}
@@ -484,7 +481,7 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
* load new configuration.
*/
tegra_dma_pause(tdc, false);
- status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
+ status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
/*
* If interrupt is pending then do nothing as the ISR will handle
@@ -822,13 +819,8 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
/* Check on wait_ack desc status */
list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
if (dma_desc->txd.cookie == cookie) {
- residual = dma_desc->bytes_requested -
- (dma_desc->bytes_transferred %
- dma_desc->bytes_requested);
- dma_set_residue(txstate, residual);
ret = dma_desc->dma_status;
- spin_unlock_irqrestore(&tdc->lock, flags);
- return ret;
+ goto found;
}
}
@@ -836,17 +828,22 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
list_for_each_entry(sg_req, &tdc->pending_sg_req, node) {
dma_desc = sg_req->dma_desc;
if (dma_desc->txd.cookie == cookie) {
- residual = dma_desc->bytes_requested -
- (dma_desc->bytes_transferred %
- dma_desc->bytes_requested);
- dma_set_residue(txstate, residual);
ret = dma_desc->dma_status;
- spin_unlock_irqrestore(&tdc->lock, flags);
- return ret;
+ goto found;
}
}
- dev_dbg(tdc2dev(tdc), "cookie %d does not found\n", cookie);
+ dev_dbg(tdc2dev(tdc), "cookie %d not found\n", cookie);
+ dma_desc = NULL;
+
+found:
+ if (dma_desc && txstate) {
+ residual = dma_desc->bytes_requested -
+ (dma_desc->bytes_transferred %
+ dma_desc->bytes_requested);
+ dma_set_residue(txstate, residual);
+ }
+
spin_unlock_irqrestore(&tdc->lock, flags);
return ret;
}
@@ -905,7 +902,6 @@ static int get_transfer_param(struct tegra_dma_channel *tdc,
unsigned long *apb_seq, unsigned long *csr, unsigned int *burst_size,
enum dma_slave_buswidth *slave_bw)
{
-
switch (direction) {
case DMA_MEM_TO_DEV:
*apb_addr = tdc->dma_sconfig.dst_addr;
@@ -948,8 +944,8 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
{
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
struct tegra_dma_desc *dma_desc;
- unsigned int i;
- struct scatterlist *sg;
+ unsigned int i;
+ struct scatterlist *sg;
unsigned long csr, ahb_seq, apb_ptr, apb_seq;
struct list_head req_list;
struct tegra_dma_sg_req *sg_req = NULL;
@@ -1062,7 +1058,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
{
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
struct tegra_dma_desc *dma_desc = NULL;
- struct tegra_dma_sg_req *sg_req = NULL;
+ struct tegra_dma_sg_req *sg_req = NULL;
unsigned long csr, ahb_seq, apb_ptr, apb_seq;
int len;
size_t remain_len;
@@ -1204,7 +1200,6 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc)
{
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
struct tegra_dma *tdma = tdc->tdma;
-
struct tegra_dma_desc *dma_desc;
struct tegra_dma_sg_req *sg_req;
struct list_head dma_desc_list;
@@ -1305,7 +1300,7 @@ static const struct tegra_dma_chip_data tegra148_dma_chip_data = {
static int tegra_dma_probe(struct platform_device *pdev)
{
- struct resource *res;
+ struct resource *res;
struct tegra_dma *tdma;
int ret;
int i;
@@ -1319,10 +1314,8 @@ static int tegra_dma_probe(struct platform_device *pdev)
tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels *
sizeof(struct tegra_dma_channel), GFP_KERNEL);
- if (!tdma) {
- dev_err(&pdev->dev, "Error: memory allocation failed\n");
+ if (!tdma)
return -ENOMEM;
- }
tdma->dev = &pdev->dev;
tdma->chip_data = cdata;
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
index e107779b1..5ae294b25 100644
--- a/drivers/dma/ti-dma-crossbar.c
+++ b/drivers/dma/ti-dma-crossbar.c
@@ -452,7 +452,7 @@ static struct platform_driver ti_dma_xbar_driver = {
.probe = ti_dma_xbar_probe,
};
-int omap_dmaxbar_init(void)
+static int omap_dmaxbar_init(void)
{
return platform_driver_register(&ti_dma_xbar_driver);
}
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 559cd4073..e82745aa4 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -337,18 +337,14 @@ static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan)
int err;
td_desc = kzalloc(sizeof(struct timb_dma_desc), GFP_KERNEL);
- if (!td_desc) {
- dev_err(chan2dev(chan), "Failed to alloc descriptor\n");
+ if (!td_desc)
goto out;
- }
td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE;
td_desc->desc_list = kzalloc(td_desc->desc_list_len, GFP_KERNEL);
- if (!td_desc->desc_list) {
- dev_err(chan2dev(chan), "Failed to alloc descriptor\n");
+ if (!td_desc->desc_list)
goto err;
- }
dma_async_tx_descriptor_init(&td_desc->txd, chan);
td_desc->txd.tx_submit = td_tx_submit;
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 8849318b3..7632290e7 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -1165,9 +1165,12 @@ static int txx9dmac_chan_remove(struct platform_device *pdev)
{
struct txx9dmac_chan *dc = platform_get_drvdata(pdev);
+
dma_async_device_unregister(&dc->dma);
- if (dc->irq >= 0)
+ if (dc->irq >= 0) {
+ devm_free_irq(&pdev->dev, dc->irq, dc);
tasklet_kill(&dc->tasklet);
+ }
dc->ddev->chan[pdev->id % TXX9_DMA_MAX_NR_CHANNELS] = NULL;
return 0;
}
@@ -1228,8 +1231,10 @@ static int txx9dmac_remove(struct platform_device *pdev)
struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
txx9dmac_off(ddev);
- if (ddev->irq >= 0)
+ if (ddev->irq >= 0) {
+ devm_free_irq(&pdev->dev, ddev->irq, ddev);
tasklet_kill(&ddev->tasklet);
+ }
return 0;
}
diff --git a/drivers/dma/xilinx/Makefile b/drivers/dma/xilinx/Makefile
index 3c4e9f2fe..9e91f8f5b 100644
--- a/drivers/dma/xilinx/Makefile
+++ b/drivers/dma/xilinx/Makefile
@@ -1 +1,2 @@
-obj-$(CONFIG_XILINX_VDMA) += xilinx_vdma.o
+obj-$(CONFIG_XILINX_DMA) += xilinx_dma.o
+obj-$(CONFIG_XILINX_ZYNQMP_DMA) += zynqmp_dma.o
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
new file mode 100644
index 000000000..4e223d094
--- /dev/null
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -0,0 +1,2689 @@
+/*
+ * DMA driver for Xilinx Video DMA Engine
+ *
+ * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
+ *
+ * Based on the Freescale DMA driver.
+ *
+ * Description:
+ * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP
+ * core that provides high-bandwidth direct memory access between memory
+ * and AXI4-Stream type video target peripherals. The core provides efficient
+ * two dimensional DMA operations with independent asynchronous read (S2MM)
+ * and write (MM2S) channel operation. It can be configured to have either
+ * one channel or two channels. If configured as two channels, one is to
+ * transmit to the video device (MM2S) and another is to receive from the
+ * video device (S2MM). Initialization, status, interrupt and management
+ * registers are accessed through an AXI4-Lite slave interface.
+ *
+ * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that
+ * provides high-bandwidth one dimensional direct memory access between memory
+ * and AXI4-Stream target peripherals. It supports one receive and one
+ * transmit channel, both of them optional at synthesis time.
+ *
+ * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
+ * Access (DMA) between a memory-mapped source address and a memory-mapped
+ * destination address.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/bitops.h>
+#include <linux/dmapool.h>
+#include <linux/dma/xilinx_dma.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_dma.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+
+#include "../dmaengine.h"
+
+/* Register/Descriptor Offsets */
+#define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000
+#define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030
+#define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050
+#define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0
+
+/* Control Registers */
+#define XILINX_DMA_REG_DMACR 0x0000
+#define XILINX_DMA_DMACR_DELAY_MAX 0xff
+#define XILINX_DMA_DMACR_DELAY_SHIFT 24
+#define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff
+#define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16
+#define XILINX_DMA_DMACR_ERR_IRQ BIT(14)
+#define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13)
+#define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12)
+#define XILINX_DMA_DMACR_MASTER_SHIFT 8
+#define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5
+#define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4)
+#define XILINX_DMA_DMACR_GENLOCK_EN BIT(3)
+#define XILINX_DMA_DMACR_RESET BIT(2)
+#define XILINX_DMA_DMACR_CIRC_EN BIT(1)
+#define XILINX_DMA_DMACR_RUNSTOP BIT(0)
+#define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
+
+#define XILINX_DMA_REG_DMASR 0x0004
+#define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15)
+#define XILINX_DMA_DMASR_ERR_IRQ BIT(14)
+#define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13)
+#define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12)
+#define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11)
+#define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10)
+#define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9)
+#define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8)
+#define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7)
+#define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
+#define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5)
+#define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
+#define XILINX_DMA_DMASR_IDLE BIT(1)
+#define XILINX_DMA_DMASR_HALTED BIT(0)
+#define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24)
+#define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16)
+
+#define XILINX_DMA_REG_CURDESC 0x0008
+#define XILINX_DMA_REG_TAILDESC 0x0010
+#define XILINX_DMA_REG_REG_INDEX 0x0014
+#define XILINX_DMA_REG_FRMSTORE 0x0018
+#define XILINX_DMA_REG_THRESHOLD 0x001c
+#define XILINX_DMA_REG_FRMPTR_STS 0x0024
+#define XILINX_DMA_REG_PARK_PTR 0x0028
+#define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
+#define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
+#define XILINX_DMA_REG_VDMA_VERSION 0x002c
+
+/* Register Direct Mode Registers */
+#define XILINX_DMA_REG_VSIZE 0x0000
+#define XILINX_DMA_REG_HSIZE 0x0004
+
+#define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008
+#define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
+#define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0
+
+#define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n))
+#define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n))
+
+/* HW specific definitions */
+#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20
+
+#define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
+ (XILINX_DMA_DMASR_FRM_CNT_IRQ | \
+ XILINX_DMA_DMASR_DLY_CNT_IRQ | \
+ XILINX_DMA_DMASR_ERR_IRQ)
+
+#define XILINX_DMA_DMASR_ALL_ERR_MASK \
+ (XILINX_DMA_DMASR_EOL_LATE_ERR | \
+ XILINX_DMA_DMASR_SOF_LATE_ERR | \
+ XILINX_DMA_DMASR_SG_DEC_ERR | \
+ XILINX_DMA_DMASR_SG_SLV_ERR | \
+ XILINX_DMA_DMASR_EOF_EARLY_ERR | \
+ XILINX_DMA_DMASR_SOF_EARLY_ERR | \
+ XILINX_DMA_DMASR_DMA_DEC_ERR | \
+ XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
+ XILINX_DMA_DMASR_DMA_INT_ERR)
+
+/*
+ * Recoverable errors are DMA Internal error, SOF Early, EOF Early
+ * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC
+ * is enabled in the h/w system.
+ */
+#define XILINX_DMA_DMASR_ERR_RECOVER_MASK \
+ (XILINX_DMA_DMASR_SOF_LATE_ERR | \
+ XILINX_DMA_DMASR_EOF_EARLY_ERR | \
+ XILINX_DMA_DMASR_SOF_EARLY_ERR | \
+ XILINX_DMA_DMASR_DMA_INT_ERR)
+
+/* Axi VDMA Flush on Fsync bits */
+#define XILINX_DMA_FLUSH_S2MM 3
+#define XILINX_DMA_FLUSH_MM2S 2
+#define XILINX_DMA_FLUSH_BOTH 1
+
+/* Delay loop counter to prevent hardware failure */
+#define XILINX_DMA_LOOP_COUNT 1000000
+
+/* AXI DMA Specific Registers/Offsets */
+#define XILINX_DMA_REG_SRCDSTADDR 0x18
+#define XILINX_DMA_REG_BTT 0x28
+
+/* AXI DMA Specific Masks/Bit fields */
+#define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0)
+#define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
+#define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
+#define XILINX_DMA_CR_COALESCE_SHIFT 16
+#define XILINX_DMA_BD_SOP BIT(27)
+#define XILINX_DMA_BD_EOP BIT(26)
+#define XILINX_DMA_COALESCE_MAX 255
+#define XILINX_DMA_NUM_APP_WORDS 5
+
+/* Multi-Channel DMA Descriptor offsets*/
+#define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20)
+#define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20)
+
+/* Multi-Channel DMA Masks/Shifts */
+#define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0)
+#define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0)
+#define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19)
+#define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0)
+#define XILINX_DMA_BD_STRIDE_SHIFT 0
+#define XILINX_DMA_BD_VSIZE_SHIFT 19
+
+/* AXI CDMA Specific Registers/Offsets */
+#define XILINX_CDMA_REG_SRCADDR 0x18
+#define XILINX_CDMA_REG_DSTADDR 0x20
+
+/* AXI CDMA Specific Masks */
+#define XILINX_CDMA_CR_SGMODE BIT(3)
+
+/**
+ * struct xilinx_vdma_desc_hw - Hardware Descriptor
+ * @next_desc: Next Descriptor Pointer @0x00
+ * @pad1: Reserved @0x04
+ * @buf_addr: Buffer address @0x08
+ * @buf_addr_msb: MSB of Buffer address @0x0C
+ * @vsize: Vertical Size @0x10
+ * @hsize: Horizontal Size @0x14
+ * @stride: Number of bytes between the first
+ * pixels of each horizontal line @0x18
+ */
+struct xilinx_vdma_desc_hw {
+ u32 next_desc;
+ u32 pad1;
+ u32 buf_addr;
+ u32 buf_addr_msb;
+ u32 vsize;
+ u32 hsize;
+ u32 stride;
+} __aligned(64);
+
+/**
+ * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA
+ * @next_desc: Next Descriptor Pointer @0x00
+ * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
+ * @buf_addr: Buffer address @0x08
+ * @buf_addr_msb: MSB of Buffer address @0x0C
+ * @pad1: Reserved @0x10
+ * @pad2: Reserved @0x14
+ * @control: Control field @0x18
+ * @status: Status field @0x1C
+ * @app: APP Fields @0x20 - 0x30
+ */
+struct xilinx_axidma_desc_hw {
+ u32 next_desc;
+ u32 next_desc_msb;
+ u32 buf_addr;
+ u32 buf_addr_msb;
+ u32 mcdma_control;
+ u32 vsize_stride;
+ u32 control;
+ u32 status;
+ u32 app[XILINX_DMA_NUM_APP_WORDS];
+} __aligned(64);
+
+/**
+ * struct xilinx_cdma_desc_hw - Hardware Descriptor
+ * @next_desc: Next Descriptor Pointer @0x00
+ * @next_descmsb: Next Descriptor Pointer MSB @0x04
+ * @src_addr: Source address @0x08
+ * @src_addrmsb: Source address MSB @0x0C
+ * @dest_addr: Destination address @0x10
+ * @dest_addrmsb: Destination address MSB @0x14
+ * @control: Control field @0x18
+ * @status: Status field @0x1C
+ */
+struct xilinx_cdma_desc_hw {
+ u32 next_desc;
+ u32 next_desc_msb;
+ u32 src_addr;
+ u32 src_addr_msb;
+ u32 dest_addr;
+ u32 dest_addr_msb;
+ u32 control;
+ u32 status;
+} __aligned(64);
+
+/**
+ * struct xilinx_vdma_tx_segment - Descriptor segment
+ * @hw: Hardware descriptor
+ * @node: Node in the descriptor segments list
+ * @phys: Physical address of segment
+ */
+struct xilinx_vdma_tx_segment {
+ struct xilinx_vdma_desc_hw hw;
+ struct list_head node;
+ dma_addr_t phys;
+} __aligned(64);
+
+/**
+ * struct xilinx_axidma_tx_segment - Descriptor segment
+ * @hw: Hardware descriptor
+ * @node: Node in the descriptor segments list
+ * @phys: Physical address of segment
+ */
+struct xilinx_axidma_tx_segment {
+ struct xilinx_axidma_desc_hw hw;
+ struct list_head node;
+ dma_addr_t phys;
+} __aligned(64);
+
+/**
+ * struct xilinx_cdma_tx_segment - Descriptor segment
+ * @hw: Hardware descriptor
+ * @node: Node in the descriptor segments list
+ * @phys: Physical address of segment
+ */
+struct xilinx_cdma_tx_segment {
+ struct xilinx_cdma_desc_hw hw;
+ struct list_head node;
+ dma_addr_t phys;
+} __aligned(64);
+
+/**
+ * struct xilinx_dma_tx_descriptor - Per Transaction structure
+ * @async_tx: Async transaction descriptor
+ * @segments: TX segments list
+ * @node: Node in the channel descriptors list
+ * @cyclic: Check for cyclic transfers.
+ */
+struct xilinx_dma_tx_descriptor {
+ struct dma_async_tx_descriptor async_tx;
+ struct list_head segments;
+ struct list_head node;
+ bool cyclic;
+};
+
+/**
+ * struct xilinx_dma_chan - Driver specific DMA channel structure
+ * @xdev: Driver specific device structure
+ * @ctrl_offset: Control registers offset
+ * @desc_offset: TX descriptor registers offset
+ * @lock: Descriptor operation lock
+ * @pending_list: Descriptors waiting
+ * @active_list: Descriptors ready to submit
+ * @done_list: Complete descriptors
+ * @common: DMA common channel
+ * @desc_pool: Descriptors pool
+ * @dev: The dma device
+ * @irq: Channel IRQ
+ * @id: Channel ID
+ * @direction: Transfer direction
+ * @num_frms: Number of frames
+ * @has_sg: Support scatter transfers
+ * @cyclic: Check for cyclic transfers.
+ * @genlock: Support genlock mode
+ * @err: Channel has errors
+ * @tasklet: Cleanup work after irq
+ * @config: Device configuration info
+ * @flush_on_fsync: Flush on Frame sync
+ * @desc_pendingcount: Descriptor pending count
+ * @ext_addr: Indicates 64 bit addressing is supported by dma channel
+ * @desc_submitcount: Descriptor h/w submitted count
+ * @residue: Residue for AXI DMA
+ * @seg_v: Statically allocated segments base
+ * @cyclic_seg_v: Statically allocated segment base for cyclic transfers
+ * @start_transfer: Differentiate b/w DMA IP's transfer
+ */
+struct xilinx_dma_chan {
+ struct xilinx_dma_device *xdev;
+ u32 ctrl_offset;
+ u32 desc_offset;
+ spinlock_t lock;
+ struct list_head pending_list;
+ struct list_head active_list;
+ struct list_head done_list;
+ struct dma_chan common;
+ struct dma_pool *desc_pool;
+ struct device *dev;
+ int irq;
+ int id;
+ enum dma_transfer_direction direction;
+ int num_frms;
+ bool has_sg;
+ bool cyclic;
+ bool genlock;
+ bool err;
+ struct tasklet_struct tasklet;
+ struct xilinx_vdma_config config;
+ bool flush_on_fsync;
+ u32 desc_pendingcount;
+ bool ext_addr;
+ u32 desc_submitcount;
+ u32 residue;
+ struct xilinx_axidma_tx_segment *seg_v;
+ struct xilinx_axidma_tx_segment *cyclic_seg_v;
+ void (*start_transfer)(struct xilinx_dma_chan *chan);
+ u16 tdest;
+};
+
+struct xilinx_dma_config {
+ enum xdma_ip_type dmatype;
+ int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
+ struct clk **tx_clk, struct clk **txs_clk,
+ struct clk **rx_clk, struct clk **rxs_clk);
+};
+
+/**
+ * struct xilinx_dma_device - DMA device structure
+ * @regs: I/O mapped base address
+ * @dev: Device Structure
+ * @common: DMA device structure
+ * @chan: Driver specific DMA channel
+ * @has_sg: Specifies whether Scatter-Gather is present or not
+ * @mcdma: Specifies whether Multi-Channel is present or not
+ * @flush_on_fsync: Flush on frame sync
+ * @ext_addr: Indicates 64 bit addressing is supported by dma device
+ * @pdev: Platform device structure pointer
+ * @dma_config: DMA config structure
+ * @axi_clk: DMA Axi4-lite interace clock
+ * @tx_clk: DMA mm2s clock
+ * @txs_clk: DMA mm2s stream clock
+ * @rx_clk: DMA s2mm clock
+ * @rxs_clk: DMA s2mm stream clock
+ * @nr_channels: Number of channels DMA device supports
+ * @chan_id: DMA channel identifier
+ */
+struct xilinx_dma_device {
+ void __iomem *regs;
+ struct device *dev;
+ struct dma_device common;
+ struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
+ bool has_sg;
+ bool mcdma;
+ u32 flush_on_fsync;
+ bool ext_addr;
+ struct platform_device *pdev;
+ const struct xilinx_dma_config *dma_config;
+ struct clk *axi_clk;
+ struct clk *tx_clk;
+ struct clk *txs_clk;
+ struct clk *rx_clk;
+ struct clk *rxs_clk;
+ u32 nr_channels;
+ u32 chan_id;
+};
+
+/* Macros */
+#define to_xilinx_chan(chan) \
+ container_of(chan, struct xilinx_dma_chan, common)
+#define to_dma_tx_descriptor(tx) \
+ container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
+#define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
+ readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
+ cond, delay_us, timeout_us)
+
+/* IO accessors */
+static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
+{
+ return ioread32(chan->xdev->regs + reg);
+}
+
+static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value)
+{
+ iowrite32(value, chan->xdev->regs + reg);
+}
+
+static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg,
+ u32 value)
+{
+ dma_write(chan, chan->desc_offset + reg, value);
+}
+
+static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg)
+{
+ return dma_read(chan, chan->ctrl_offset + reg);
+}
+
+static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg,
+ u32 value)
+{
+ dma_write(chan, chan->ctrl_offset + reg, value);
+}
+
+static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg,
+ u32 clr)
+{
+ dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr);
+}
+
+static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg,
+ u32 set)
+{
+ dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set);
+}
+
+/**
+ * vdma_desc_write_64 - 64-bit descriptor write
+ * @chan: Driver specific VDMA channel
+ * @reg: Register to write
+ * @value_lsb: lower address of the descriptor.
+ * @value_msb: upper address of the descriptor.
+ *
+ * Since vdma driver is trying to write to a register offset which is not a
+ * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits
+ * instead of a single 64 bit register write.
+ */
+static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg,
+ u32 value_lsb, u32 value_msb)
+{
+ /* Write the lsb 32 bits*/
+ writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg);
+
+ /* Write the msb 32 bits */
+ writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4);
+}
+
+static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value)
+{
+ lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg);
+}
+
+static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg,
+ dma_addr_t addr)
+{
+ if (chan->ext_addr)
+ dma_writeq(chan, reg, addr);
+ else
+ dma_ctrl_write(chan, reg, addr);
+}
+
+static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan,
+ struct xilinx_axidma_desc_hw *hw,
+ dma_addr_t buf_addr, size_t sg_used,
+ size_t period_len)
+{
+ if (chan->ext_addr) {
+ hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len);
+ hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used +
+ period_len);
+ } else {
+ hw->buf_addr = buf_addr + sg_used + period_len;
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * Descriptors and segments alloc and free
+ */
+
+/**
+ * xilinx_vdma_alloc_tx_segment - Allocate transaction segment
+ * @chan: Driver specific DMA channel
+ *
+ * Return: The allocated segment on success and NULL on failure.
+ */
+static struct xilinx_vdma_tx_segment *
+xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
+{
+ struct xilinx_vdma_tx_segment *segment;
+ dma_addr_t phys;
+
+ segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
+ if (!segment)
+ return NULL;
+
+ segment->phys = phys;
+
+ return segment;
+}
+
+/**
+ * xilinx_cdma_alloc_tx_segment - Allocate transaction segment
+ * @chan: Driver specific DMA channel
+ *
+ * Return: The allocated segment on success and NULL on failure.
+ */
+static struct xilinx_cdma_tx_segment *
+xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
+{
+ struct xilinx_cdma_tx_segment *segment;
+ dma_addr_t phys;
+
+ segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
+ if (!segment)
+ return NULL;
+
+ segment->phys = phys;
+
+ return segment;
+}
+
+/**
+ * xilinx_axidma_alloc_tx_segment - Allocate transaction segment
+ * @chan: Driver specific DMA channel
+ *
+ * Return: The allocated segment on success and NULL on failure.
+ */
+static struct xilinx_axidma_tx_segment *
+xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
+{
+ struct xilinx_axidma_tx_segment *segment;
+ dma_addr_t phys;
+
+ segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
+ if (!segment)
+ return NULL;
+
+ segment->phys = phys;
+
+ return segment;
+}
+
+/**
+ * xilinx_dma_free_tx_segment - Free transaction segment
+ * @chan: Driver specific DMA channel
+ * @segment: DMA transaction segment
+ */
+static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
+ struct xilinx_axidma_tx_segment *segment)
+{
+ dma_pool_free(chan->desc_pool, segment, segment->phys);
+}
+
+/**
+ * xilinx_cdma_free_tx_segment - Free transaction segment
+ * @chan: Driver specific DMA channel
+ * @segment: DMA transaction segment
+ */
+static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan,
+ struct xilinx_cdma_tx_segment *segment)
+{
+ dma_pool_free(chan->desc_pool, segment, segment->phys);
+}
+
+/**
+ * xilinx_vdma_free_tx_segment - Free transaction segment
+ * @chan: Driver specific DMA channel
+ * @segment: DMA transaction segment
+ */
+static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan,
+ struct xilinx_vdma_tx_segment *segment)
+{
+ dma_pool_free(chan->desc_pool, segment, segment->phys);
+}
+
+/**
+ * xilinx_dma_tx_descriptor - Allocate transaction descriptor
+ * @chan: Driver specific DMA channel
+ *
+ * Return: The allocated descriptor on success and NULL on failure.
+ */
+static struct xilinx_dma_tx_descriptor *
+xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan)
+{
+ struct xilinx_dma_tx_descriptor *desc;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return NULL;
+
+ INIT_LIST_HEAD(&desc->segments);
+
+ return desc;
+}
+
+/**
+ * xilinx_dma_free_tx_descriptor - Free transaction descriptor
+ * @chan: Driver specific DMA channel
+ * @desc: DMA transaction descriptor
+ */
+static void
+xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
+ struct xilinx_dma_tx_descriptor *desc)
+{
+ struct xilinx_vdma_tx_segment *segment, *next;
+ struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next;
+ struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next;
+
+ if (!desc)
+ return;
+
+ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
+ list_for_each_entry_safe(segment, next, &desc->segments, node) {
+ list_del(&segment->node);
+ xilinx_vdma_free_tx_segment(chan, segment);
+ }
+ } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
+ list_for_each_entry_safe(cdma_segment, cdma_next,
+ &desc->segments, node) {
+ list_del(&cdma_segment->node);
+ xilinx_cdma_free_tx_segment(chan, cdma_segment);
+ }
+ } else {
+ list_for_each_entry_safe(axidma_segment, axidma_next,
+ &desc->segments, node) {
+ list_del(&axidma_segment->node);
+ xilinx_dma_free_tx_segment(chan, axidma_segment);
+ }
+ }
+
+ kfree(desc);
+}
+
+/* Required functions */
+
+/**
+ * xilinx_dma_free_desc_list - Free descriptors list
+ * @chan: Driver specific DMA channel
+ * @list: List to parse and delete the descriptor
+ */
+static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan,
+ struct list_head *list)
+{
+ struct xilinx_dma_tx_descriptor *desc, *next;
+
+ list_for_each_entry_safe(desc, next, list, node) {
+ list_del(&desc->node);
+ xilinx_dma_free_tx_descriptor(chan, desc);
+ }
+}
+
+/**
+ * xilinx_dma_free_descriptors - Free channel descriptors
+ * @chan: Driver specific DMA channel
+ */
+static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ xilinx_dma_free_desc_list(chan, &chan->pending_list);
+ xilinx_dma_free_desc_list(chan, &chan->done_list);
+ xilinx_dma_free_desc_list(chan, &chan->active_list);
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_dma_free_chan_resources - Free channel resources
+ * @dchan: DMA channel
+ */
+static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
+{
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+
+ dev_dbg(chan->dev, "Free all channel resources.\n");
+
+ xilinx_dma_free_descriptors(chan);
+ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+ xilinx_dma_free_tx_segment(chan, chan->cyclic_seg_v);
+ xilinx_dma_free_tx_segment(chan, chan->seg_v);
+ }
+ dma_pool_destroy(chan->desc_pool);
+ chan->desc_pool = NULL;
+}
+
+/**
+ * xilinx_dma_chan_handle_cyclic - Cyclic dma callback
+ * @chan: Driver specific dma channel
+ * @desc: dma transaction descriptor
+ * @flags: flags for spin lock
+ */
+static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan,
+ struct xilinx_dma_tx_descriptor *desc,
+ unsigned long *flags)
+{
+ dma_async_tx_callback callback;
+ void *callback_param;
+
+ callback = desc->async_tx.callback;
+ callback_param = desc->async_tx.callback_param;
+ if (callback) {
+ spin_unlock_irqrestore(&chan->lock, *flags);
+ callback(callback_param);
+ spin_lock_irqsave(&chan->lock, *flags);
+ }
+}
+
+/**
+ * xilinx_dma_chan_desc_cleanup - Clean channel descriptors
+ * @chan: Driver specific DMA channel
+ */
+static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
+{
+ struct xilinx_dma_tx_descriptor *desc, *next;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ list_for_each_entry_safe(desc, next, &chan->done_list, node) {
+ dma_async_tx_callback callback;
+ void *callback_param;
+
+ if (desc->cyclic) {
+ xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
+ break;
+ }
+
+ /* Remove from the list of running transactions */
+ list_del(&desc->node);
+
+ /* Run the link descriptor callback function */
+ callback = desc->async_tx.callback;
+ callback_param = desc->async_tx.callback_param;
+ if (callback) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ callback(callback_param);
+ spin_lock_irqsave(&chan->lock, flags);
+ }
+
+ /* Run any dependencies, then free the descriptor */
+ dma_run_dependencies(&desc->async_tx);
+ xilinx_dma_free_tx_descriptor(chan, desc);
+ }
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_dma_do_tasklet - Schedule completion tasklet
+ * @data: Pointer to the Xilinx DMA channel structure
+ */
+static void xilinx_dma_do_tasklet(unsigned long data)
+{
+ struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data;
+
+ xilinx_dma_chan_desc_cleanup(chan);
+}
+
+/**
+ * xilinx_dma_alloc_chan_resources - Allocate channel resources
+ * @dchan: DMA channel
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
+{
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+
+ /* Has this channel already been allocated? */
+ if (chan->desc_pool)
+ return 0;
+
+ /*
+ * We need the descriptor to be aligned to 64bytes
+ * for meeting Xilinx VDMA specification requirement.
+ */
+ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+ chan->desc_pool = dma_pool_create("xilinx_dma_desc_pool",
+ chan->dev,
+ sizeof(struct xilinx_axidma_tx_segment),
+ __alignof__(struct xilinx_axidma_tx_segment),
+ 0);
+ } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
+ chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
+ chan->dev,
+ sizeof(struct xilinx_cdma_tx_segment),
+ __alignof__(struct xilinx_cdma_tx_segment),
+ 0);
+ } else {
+ chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool",
+ chan->dev,
+ sizeof(struct xilinx_vdma_tx_segment),
+ __alignof__(struct xilinx_vdma_tx_segment),
+ 0);
+ }
+
+ if (!chan->desc_pool) {
+ dev_err(chan->dev,
+ "unable to allocate channel %d descriptor pool\n",
+ chan->id);
+ return -ENOMEM;
+ }
+
+ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+ /*
+ * For AXI DMA case after submitting a pending_list, keep
+ * an extra segment allocated so that the "next descriptor"
+ * pointer on the tail descriptor always points to a
+ * valid descriptor, even when paused after reaching taildesc.
+ * This way, it is possible to issue additional
+ * transfers without halting and restarting the channel.
+ */
+ chan->seg_v = xilinx_axidma_alloc_tx_segment(chan);
+
+ /*
+ * For cyclic DMA mode we need to program the tail Descriptor
+ * register with a value which is not a part of the BD chain
+ * so allocating a desc segment during channel allocation for
+ * programming tail descriptor.
+ */
+ chan->cyclic_seg_v = xilinx_axidma_alloc_tx_segment(chan);
+ }
+
+ dma_cookie_init(dchan);
+
+ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+ /* For AXI DMA resetting once channel will reset the
+ * other channel as well so enable the interrupts here.
+ */
+ dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
+ XILINX_DMA_DMAXR_ALL_IRQ_MASK);
+ }
+
+ if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
+ dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
+ XILINX_CDMA_CR_SGMODE);
+
+ return 0;
+}
+
+/**
+ * xilinx_dma_tx_status - Get DMA transaction status
+ * @dchan: DMA channel
+ * @cookie: Transaction identifier
+ * @txstate: Transaction state
+ *
+ * Return: DMA transaction status
+ */
+static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ struct xilinx_dma_tx_descriptor *desc;
+ struct xilinx_axidma_tx_segment *segment;
+ struct xilinx_axidma_desc_hw *hw;
+ enum dma_status ret;
+ unsigned long flags;
+ u32 residue = 0;
+
+ ret = dma_cookie_status(dchan, cookie, txstate);
+ if (ret == DMA_COMPLETE || !txstate)
+ return ret;
+
+ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+ spin_lock_irqsave(&chan->lock, flags);
+
+ desc = list_last_entry(&chan->active_list,
+ struct xilinx_dma_tx_descriptor, node);
+ if (chan->has_sg) {
+ list_for_each_entry(segment, &desc->segments, node) {
+ hw = &segment->hw;
+ residue += (hw->control - hw->status) &
+ XILINX_DMA_MAX_TRANS_LEN;
+ }
+ }
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ chan->residue = residue;
+ dma_set_residue(txstate, chan->residue);
+ }
+
+ return ret;
+}
+
+/**
+ * xilinx_dma_is_running - Check if DMA channel is running
+ * @chan: Driver specific DMA channel
+ *
+ * Return: '1' if running, '0' if not.
+ */
+static bool xilinx_dma_is_running(struct xilinx_dma_chan *chan)
+{
+ return !(dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
+ XILINX_DMA_DMASR_HALTED) &&
+ (dma_ctrl_read(chan, XILINX_DMA_REG_DMACR) &
+ XILINX_DMA_DMACR_RUNSTOP);
+}
+
+/**
+ * xilinx_dma_is_idle - Check if DMA channel is idle
+ * @chan: Driver specific DMA channel
+ *
+ * Return: '1' if idle, '0' if not.
+ */
+static bool xilinx_dma_is_idle(struct xilinx_dma_chan *chan)
+{
+ return dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
+ XILINX_DMA_DMASR_IDLE;
+}
+
+/**
+ * xilinx_dma_halt - Halt DMA channel
+ * @chan: Driver specific DMA channel
+ */
+static void xilinx_dma_halt(struct xilinx_dma_chan *chan)
+{
+ int err;
+ u32 val;
+
+ dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
+
+ /* Wait for the hardware to halt */
+ err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
+ (val & XILINX_DMA_DMASR_HALTED), 0,
+ XILINX_DMA_LOOP_COUNT);
+
+ if (err) {
+ dev_err(chan->dev, "Cannot stop channel %p: %x\n",
+ chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
+ chan->err = true;
+ }
+}
+
+/**
+ * xilinx_dma_start - Start DMA channel
+ * @chan: Driver specific DMA channel
+ */
+static void xilinx_dma_start(struct xilinx_dma_chan *chan)
+{
+ int err;
+ u32 val;
+
+ dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
+
+ /* Wait for the hardware to start */
+ err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
+ !(val & XILINX_DMA_DMASR_HALTED), 0,
+ XILINX_DMA_LOOP_COUNT);
+
+ if (err) {
+ dev_err(chan->dev, "Cannot start channel %p: %x\n",
+ chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
+
+ chan->err = true;
+ }
+}
+
+/**
+ * xilinx_vdma_start_transfer - Starts VDMA transfer
+ * @chan: Driver specific channel struct pointer
+ */
+static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
+{
+ struct xilinx_vdma_config *config = &chan->config;
+ struct xilinx_dma_tx_descriptor *desc, *tail_desc;
+ u32 reg;
+ struct xilinx_vdma_tx_segment *tail_segment;
+
+ /* This function was invoked with lock held */
+ if (chan->err)
+ return;
+
+ if (list_empty(&chan->pending_list))
+ return;
+
+ desc = list_first_entry(&chan->pending_list,
+ struct xilinx_dma_tx_descriptor, node);
+ tail_desc = list_last_entry(&chan->pending_list,
+ struct xilinx_dma_tx_descriptor, node);
+
+ tail_segment = list_last_entry(&tail_desc->segments,
+ struct xilinx_vdma_tx_segment, node);
+
+ /* If it is SG mode and hardware is busy, cannot submit */
+ if (chan->has_sg && xilinx_dma_is_running(chan) &&
+ !xilinx_dma_is_idle(chan)) {
+ dev_dbg(chan->dev, "DMA controller still busy\n");
+ return;
+ }
+
+ /*
+ * If hardware is idle, then all descriptors on the running lists are
+ * done, start new transfers
+ */
+ if (chan->has_sg)
+ dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
+ desc->async_tx.phys);
+
+ /* Configure the hardware using info in the config structure */
+ reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
+
+ if (config->frm_cnt_en)
+ reg |= XILINX_DMA_DMACR_FRAMECNT_EN;
+ else
+ reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
+
+ /* Configure channel to allow number frame buffers */
+ dma_ctrl_write(chan, XILINX_DMA_REG_FRMSTORE,
+ chan->desc_pendingcount);
+
+ /*
+ * With SG, start with circular mode, so that BDs can be fetched.
+ * In direct register mode, if not parking, enable circular mode
+ */
+ if (chan->has_sg || !config->park)
+ reg |= XILINX_DMA_DMACR_CIRC_EN;
+
+ if (config->park)
+ reg &= ~XILINX_DMA_DMACR_CIRC_EN;
+
+ dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
+
+ if (config->park && (config->park_frm >= 0) &&
+ (config->park_frm < chan->num_frms)) {
+ if (chan->direction == DMA_MEM_TO_DEV)
+ dma_write(chan, XILINX_DMA_REG_PARK_PTR,
+ config->park_frm <<
+ XILINX_DMA_PARK_PTR_RD_REF_SHIFT);
+ else
+ dma_write(chan, XILINX_DMA_REG_PARK_PTR,
+ config->park_frm <<
+ XILINX_DMA_PARK_PTR_WR_REF_SHIFT);
+ }
+
+ /* Start the hardware */
+ xilinx_dma_start(chan);
+
+ if (chan->err)
+ return;
+
+ /* Start the transfer */
+ if (chan->has_sg) {
+ dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
+ tail_segment->phys);
+ } else {
+ struct xilinx_vdma_tx_segment *segment, *last = NULL;
+ int i = 0;
+
+ if (chan->desc_submitcount < chan->num_frms)
+ i = chan->desc_submitcount;
+
+ list_for_each_entry(segment, &desc->segments, node) {
+ if (chan->ext_addr)
+ vdma_desc_write_64(chan,
+ XILINX_VDMA_REG_START_ADDRESS_64(i++),
+ segment->hw.buf_addr,
+ segment->hw.buf_addr_msb);
+ else
+ vdma_desc_write(chan,
+ XILINX_VDMA_REG_START_ADDRESS(i++),
+ segment->hw.buf_addr);
+
+ last = segment;
+ }
+
+ if (!last)
+ return;
+
+ /* HW expects these parameters to be same for one transaction */
+ vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
+ vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
+ last->hw.stride);
+ vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
+ }
+
+ if (!chan->has_sg) {
+ list_del(&desc->node);
+ list_add_tail(&desc->node, &chan->active_list);
+ chan->desc_submitcount++;
+ chan->desc_pendingcount--;
+ if (chan->desc_submitcount == chan->num_frms)
+ chan->desc_submitcount = 0;
+ } else {
+ list_splice_tail_init(&chan->pending_list, &chan->active_list);
+ chan->desc_pendingcount = 0;
+ }
+}
+
+/**
+ * xilinx_cdma_start_transfer - Starts cdma transfer
+ * @chan: Driver specific channel struct pointer
+ */
+static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
+{
+ struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
+ struct xilinx_cdma_tx_segment *tail_segment;
+ u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR);
+
+ if (chan->err)
+ return;
+
+ if (list_empty(&chan->pending_list))
+ return;
+
+ head_desc = list_first_entry(&chan->pending_list,
+ struct xilinx_dma_tx_descriptor, node);
+ tail_desc = list_last_entry(&chan->pending_list,
+ struct xilinx_dma_tx_descriptor, node);
+ tail_segment = list_last_entry(&tail_desc->segments,
+ struct xilinx_cdma_tx_segment, node);
+
+ if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
+ ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX;
+ ctrl_reg |= chan->desc_pendingcount <<
+ XILINX_DMA_CR_COALESCE_SHIFT;
+ dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg);
+ }
+
+ if (chan->has_sg) {
+ xilinx_write(chan, XILINX_DMA_REG_CURDESC,
+ head_desc->async_tx.phys);
+
+ /* Update tail ptr register which will start the transfer */
+ xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
+ tail_segment->phys);
+ } else {
+ /* In simple mode */
+ struct xilinx_cdma_tx_segment *segment;
+ struct xilinx_cdma_desc_hw *hw;
+
+ segment = list_first_entry(&head_desc->segments,
+ struct xilinx_cdma_tx_segment,
+ node);
+
+ hw = &segment->hw;
+
+ xilinx_write(chan, XILINX_CDMA_REG_SRCADDR, hw->src_addr);
+ xilinx_write(chan, XILINX_CDMA_REG_DSTADDR, hw->dest_addr);
+
+ /* Start the transfer */
+ dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
+ hw->control & XILINX_DMA_MAX_TRANS_LEN);
+ }
+
+ list_splice_tail_init(&chan->pending_list, &chan->active_list);
+ chan->desc_pendingcount = 0;
+}
+
+/**
+ * xilinx_dma_start_transfer - Starts DMA transfer
+ * @chan: Driver specific channel struct pointer
+ */
+static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
+{
+ struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
+ struct xilinx_axidma_tx_segment *tail_segment, *old_head, *new_head;
+ u32 reg;
+
+ if (chan->err)
+ return;
+
+ if (list_empty(&chan->pending_list))
+ return;
+
+ /* If it is SG mode and hardware is busy, cannot submit */
+ if (chan->has_sg && xilinx_dma_is_running(chan) &&
+ !xilinx_dma_is_idle(chan)) {
+ dev_dbg(chan->dev, "DMA controller still busy\n");
+ return;
+ }
+
+ head_desc = list_first_entry(&chan->pending_list,
+ struct xilinx_dma_tx_descriptor, node);
+ tail_desc = list_last_entry(&chan->pending_list,
+ struct xilinx_dma_tx_descriptor, node);
+ tail_segment = list_last_entry(&tail_desc->segments,
+ struct xilinx_axidma_tx_segment, node);
+
+ if (chan->has_sg && !chan->xdev->mcdma) {
+ old_head = list_first_entry(&head_desc->segments,
+ struct xilinx_axidma_tx_segment, node);
+ new_head = chan->seg_v;
+ /* Copy Buffer Descriptor fields. */
+ new_head->hw = old_head->hw;
+
+ /* Swap and save new reserve */
+ list_replace_init(&old_head->node, &new_head->node);
+ chan->seg_v = old_head;
+
+ tail_segment->hw.next_desc = chan->seg_v->phys;
+ head_desc->async_tx.phys = new_head->phys;
+ }
+
+ reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
+
+ if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
+ reg &= ~XILINX_DMA_CR_COALESCE_MAX;
+ reg |= chan->desc_pendingcount <<
+ XILINX_DMA_CR_COALESCE_SHIFT;
+ dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
+ }
+
+ if (chan->has_sg && !chan->xdev->mcdma)
+ xilinx_write(chan, XILINX_DMA_REG_CURDESC,
+ head_desc->async_tx.phys);
+
+ if (chan->has_sg && chan->xdev->mcdma) {
+ if (chan->direction == DMA_MEM_TO_DEV) {
+ dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
+ head_desc->async_tx.phys);
+ } else {
+ if (!chan->tdest) {
+ dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
+ head_desc->async_tx.phys);
+ } else {
+ dma_ctrl_write(chan,
+ XILINX_DMA_MCRX_CDESC(chan->tdest),
+ head_desc->async_tx.phys);
+ }
+ }
+ }
+
+ xilinx_dma_start(chan);
+
+ if (chan->err)
+ return;
+
+ /* Start the transfer */
+ if (chan->has_sg && !chan->xdev->mcdma) {
+ if (chan->cyclic)
+ xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
+ chan->cyclic_seg_v->phys);
+ else
+ xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
+ tail_segment->phys);
+ } else if (chan->has_sg && chan->xdev->mcdma) {
+ if (chan->direction == DMA_MEM_TO_DEV) {
+ dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
+ tail_segment->phys);
+ } else {
+ if (!chan->tdest) {
+ dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
+ tail_segment->phys);
+ } else {
+ dma_ctrl_write(chan,
+ XILINX_DMA_MCRX_TDESC(chan->tdest),
+ tail_segment->phys);
+ }
+ }
+ } else {
+ struct xilinx_axidma_tx_segment *segment;
+ struct xilinx_axidma_desc_hw *hw;
+
+ segment = list_first_entry(&head_desc->segments,
+ struct xilinx_axidma_tx_segment,
+ node);
+ hw = &segment->hw;
+
+ xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr);
+
+ /* Start the transfer */
+ dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
+ hw->control & XILINX_DMA_MAX_TRANS_LEN);
+ }
+
+ list_splice_tail_init(&chan->pending_list, &chan->active_list);
+ chan->desc_pendingcount = 0;
+}
+
+/**
+ * xilinx_dma_issue_pending - Issue pending transactions
+ * @dchan: DMA channel
+ */
+static void xilinx_dma_issue_pending(struct dma_chan *dchan)
+{
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ chan->start_transfer(chan);
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+/**
+ * xilinx_dma_complete_descriptor - Mark the active descriptor as complete
+ * @chan : xilinx DMA channel
+ *
+ * CONTEXT: hardirq
+ */
+static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
+{
+ struct xilinx_dma_tx_descriptor *desc, *next;
+
+ /* This function was invoked with lock held */
+ if (list_empty(&chan->active_list))
+ return;
+
+ list_for_each_entry_safe(desc, next, &chan->active_list, node) {
+ list_del(&desc->node);
+ if (!desc->cyclic)
+ dma_cookie_complete(&desc->async_tx);
+ list_add_tail(&desc->node, &chan->done_list);
+ }
+}
+
+/**
+ * xilinx_dma_reset - Reset DMA channel
+ * @chan: Driver specific DMA channel
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
+{
+ int err;
+ u32 tmp;
+
+ dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET);
+
+ /* Wait for the hardware to finish reset */
+ err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp,
+ !(tmp & XILINX_DMA_DMACR_RESET), 0,
+ XILINX_DMA_LOOP_COUNT);
+
+ if (err) {
+ dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
+ dma_ctrl_read(chan, XILINX_DMA_REG_DMACR),
+ dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
+ return -ETIMEDOUT;
+ }
+
+ chan->err = false;
+
+ return err;
+}
+
+/**
+ * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts
+ * @chan: Driver specific DMA channel
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan)
+{
+ int err;
+
+ /* Reset VDMA */
+ err = xilinx_dma_reset(chan);
+ if (err)
+ return err;
+
+ /* Enable interrupts */
+ dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
+ XILINX_DMA_DMAXR_ALL_IRQ_MASK);
+
+ return 0;
+}
+
+/**
+ * xilinx_dma_irq_handler - DMA Interrupt handler
+ * @irq: IRQ number
+ * @data: Pointer to the Xilinx DMA channel structure
+ *
+ * Return: IRQ_HANDLED/IRQ_NONE
+ */
+static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
+{
+ struct xilinx_dma_chan *chan = data;
+ u32 status;
+
+ /* Read the status and ack the interrupts. */
+ status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR);
+ if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK))
+ return IRQ_NONE;
+
+ dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
+ status & XILINX_DMA_DMAXR_ALL_IRQ_MASK);
+
+ if (status & XILINX_DMA_DMASR_ERR_IRQ) {
+ /*
+ * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the
+ * error is recoverable, ignore it. Otherwise flag the error.
+ *
+ * Only recoverable errors can be cleared in the DMASR register,
+ * make sure not to write to other error bits to 1.
+ */
+ u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK;
+
+ dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
+ errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK);
+
+ if (!chan->flush_on_fsync ||
+ (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) {
+ dev_err(chan->dev,
+ "Channel %p has errors %x, cdr %x tdr %x\n",
+ chan, errors,
+ dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC),
+ dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC));
+ chan->err = true;
+ }
+ }
+
+ if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
+ /*
+ * Device takes too long to do the transfer when user requires
+ * responsiveness.
+ */
+ dev_dbg(chan->dev, "Inter-packet latency too long\n");
+ }
+
+ if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
+ spin_lock(&chan->lock);
+ xilinx_dma_complete_descriptor(chan);
+ chan->start_transfer(chan);
+ spin_unlock(&chan->lock);
+ }
+
+ tasklet_schedule(&chan->tasklet);
+ return IRQ_HANDLED;
+}
+
+/**
+ * append_desc_queue - Queuing descriptor
+ * @chan: Driver specific dma channel
+ * @desc: dma transaction descriptor
+ */
+static void append_desc_queue(struct xilinx_dma_chan *chan,
+ struct xilinx_dma_tx_descriptor *desc)
+{
+ struct xilinx_vdma_tx_segment *tail_segment;
+ struct xilinx_dma_tx_descriptor *tail_desc;
+ struct xilinx_axidma_tx_segment *axidma_tail_segment;
+ struct xilinx_cdma_tx_segment *cdma_tail_segment;
+
+ if (list_empty(&chan->pending_list))
+ goto append;
+
+ /*
+ * Add the hardware descriptor to the chain of hardware descriptors
+ * that already exists in memory.
+ */
+ tail_desc = list_last_entry(&chan->pending_list,
+ struct xilinx_dma_tx_descriptor, node);
+ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
+ tail_segment = list_last_entry(&tail_desc->segments,
+ struct xilinx_vdma_tx_segment,
+ node);
+ tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
+ } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
+ cdma_tail_segment = list_last_entry(&tail_desc->segments,
+ struct xilinx_cdma_tx_segment,
+ node);
+ cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
+ } else {
+ axidma_tail_segment = list_last_entry(&tail_desc->segments,
+ struct xilinx_axidma_tx_segment,
+ node);
+ axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
+ }
+
+ /*
+ * Add the software descriptor and all children to the list
+ * of pending transactions
+ */
+append:
+ list_add_tail(&desc->node, &chan->pending_list);
+ chan->desc_pendingcount++;
+
+ if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA)
+ && unlikely(chan->desc_pendingcount > chan->num_frms)) {
+ dev_dbg(chan->dev, "desc pendingcount is too high\n");
+ chan->desc_pendingcount = chan->num_frms;
+ }
+}
+
+/**
+ * xilinx_dma_tx_submit - Submit DMA transaction
+ * @tx: Async transaction descriptor
+ *
+ * Return: cookie value on success and failure value on error
+ */
+static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
+ struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan);
+ dma_cookie_t cookie;
+ unsigned long flags;
+ int err;
+
+ if (chan->cyclic) {
+ xilinx_dma_free_tx_descriptor(chan, desc);
+ return -EBUSY;
+ }
+
+ if (chan->err) {
+ /*
+ * If reset fails, need to hard reset the system.
+ * Channel is no longer functional
+ */
+ err = xilinx_dma_chan_reset(chan);
+ if (err < 0)
+ return err;
+ }
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ cookie = dma_cookie_assign(tx);
+
+ /* Put this transaction onto the tail of the pending queue */
+ append_desc_queue(chan, desc);
+
+ if (desc->cyclic)
+ chan->cyclic = true;
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return cookie;
+}
+
+/**
+ * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a
+ * DMA_SLAVE transaction
+ * @dchan: DMA channel
+ * @xt: Interleaved template pointer
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *
+xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
+ struct dma_interleaved_template *xt,
+ unsigned long flags)
+{
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ struct xilinx_dma_tx_descriptor *desc;
+ struct xilinx_vdma_tx_segment *segment, *prev = NULL;
+ struct xilinx_vdma_desc_hw *hw;
+
+ if (!is_slave_direction(xt->dir))
+ return NULL;
+
+ if (!xt->numf || !xt->sgl[0].size)
+ return NULL;
+
+ if (xt->frame_size != 1)
+ return NULL;
+
+ /* Allocate a transaction descriptor. */
+ desc = xilinx_dma_alloc_tx_descriptor(chan);
+ if (!desc)
+ return NULL;
+
+ dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+ desc->async_tx.tx_submit = xilinx_dma_tx_submit;
+ async_tx_ack(&desc->async_tx);
+
+ /* Allocate the link descriptor from DMA pool */
+ segment = xilinx_vdma_alloc_tx_segment(chan);
+ if (!segment)
+ goto error;
+
+ /* Fill in the hardware descriptor */
+ hw = &segment->hw;
+ hw->vsize = xt->numf;
+ hw->hsize = xt->sgl[0].size;
+ hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) <<
+ XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT;
+ hw->stride |= chan->config.frm_dly <<
+ XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
+
+ if (xt->dir != DMA_MEM_TO_DEV) {
+ if (chan->ext_addr) {
+ hw->buf_addr = lower_32_bits(xt->dst_start);
+ hw->buf_addr_msb = upper_32_bits(xt->dst_start);
+ } else {
+ hw->buf_addr = xt->dst_start;
+ }
+ } else {
+ if (chan->ext_addr) {
+ hw->buf_addr = lower_32_bits(xt->src_start);
+ hw->buf_addr_msb = upper_32_bits(xt->src_start);
+ } else {
+ hw->buf_addr = xt->src_start;
+ }
+ }
+
+ /* Insert the segment into the descriptor segments list. */
+ list_add_tail(&segment->node, &desc->segments);
+
+ prev = segment;
+
+ /* Link the last hardware descriptor with the first. */
+ segment = list_first_entry(&desc->segments,
+ struct xilinx_vdma_tx_segment, node);
+ desc->async_tx.phys = segment->phys;
+
+ return &desc->async_tx;
+
+error:
+ xilinx_dma_free_tx_descriptor(chan, desc);
+ return NULL;
+}
+
+/**
+ * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction
+ * @dchan: DMA channel
+ * @dma_dst: destination address
+ * @dma_src: source address
+ * @len: transfer length
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *
+xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
+ dma_addr_t dma_src, size_t len, unsigned long flags)
+{
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ struct xilinx_dma_tx_descriptor *desc;
+ struct xilinx_cdma_tx_segment *segment, *prev;
+ struct xilinx_cdma_desc_hw *hw;
+
+ if (!len || len > XILINX_DMA_MAX_TRANS_LEN)
+ return NULL;
+
+ desc = xilinx_dma_alloc_tx_descriptor(chan);
+ if (!desc)
+ return NULL;
+
+ dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+ desc->async_tx.tx_submit = xilinx_dma_tx_submit;
+
+ /* Allocate the link descriptor from DMA pool */
+ segment = xilinx_cdma_alloc_tx_segment(chan);
+ if (!segment)
+ goto error;
+
+ hw = &segment->hw;
+ hw->control = len;
+ hw->src_addr = dma_src;
+ hw->dest_addr = dma_dst;
+ if (chan->ext_addr) {
+ hw->src_addr_msb = upper_32_bits(dma_src);
+ hw->dest_addr_msb = upper_32_bits(dma_dst);
+ }
+
+ /* Fill the previous next descriptor with current */
+ prev = list_last_entry(&desc->segments,
+ struct xilinx_cdma_tx_segment, node);
+ prev->hw.next_desc = segment->phys;
+
+ /* Insert the segment into the descriptor segments list. */
+ list_add_tail(&segment->node, &desc->segments);
+
+ prev = segment;
+
+ /* Link the last hardware descriptor with the first. */
+ segment = list_first_entry(&desc->segments,
+ struct xilinx_cdma_tx_segment, node);
+ desc->async_tx.phys = segment->phys;
+ prev->hw.next_desc = segment->phys;
+
+ return &desc->async_tx;
+
+error:
+ xilinx_dma_free_tx_descriptor(chan, desc);
+ return NULL;
+}
+
+/**
+ * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
+ * @dchan: DMA channel
+ * @sgl: scatterlist to transfer to/from
+ * @sg_len: number of entries in @scatterlist
+ * @direction: DMA direction
+ * @flags: transfer ack flags
+ * @context: APP words of the descriptor
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
+ struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
+ enum dma_transfer_direction direction, unsigned long flags,
+ void *context)
+{
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ struct xilinx_dma_tx_descriptor *desc;
+ struct xilinx_axidma_tx_segment *segment = NULL, *prev = NULL;
+ u32 *app_w = (u32 *)context;
+ struct scatterlist *sg;
+ size_t copy;
+ size_t sg_used;
+ unsigned int i;
+
+ if (!is_slave_direction(direction))
+ return NULL;
+
+ /* Allocate a transaction descriptor. */
+ desc = xilinx_dma_alloc_tx_descriptor(chan);
+ if (!desc)
+ return NULL;
+
+ dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+ desc->async_tx.tx_submit = xilinx_dma_tx_submit;
+
+ /* Build transactions using information in the scatter gather list */
+ for_each_sg(sgl, sg, sg_len, i) {
+ sg_used = 0;
+
+ /* Loop until the entire scatterlist entry is used */
+ while (sg_used < sg_dma_len(sg)) {
+ struct xilinx_axidma_desc_hw *hw;
+
+ /* Get a free segment */
+ segment = xilinx_axidma_alloc_tx_segment(chan);
+ if (!segment)
+ goto error;
+
+ /*
+ * Calculate the maximum number of bytes to transfer,
+ * making sure it is less than the hw limit
+ */
+ copy = min_t(size_t, sg_dma_len(sg) - sg_used,
+ XILINX_DMA_MAX_TRANS_LEN);
+ hw = &segment->hw;
+
+ /* Fill in the descriptor */
+ xilinx_axidma_buf(chan, hw, sg_dma_address(sg),
+ sg_used, 0);
+
+ hw->control = copy;
+
+ if (chan->direction == DMA_MEM_TO_DEV) {
+ if (app_w)
+ memcpy(hw->app, app_w, sizeof(u32) *
+ XILINX_DMA_NUM_APP_WORDS);
+ }
+
+ if (prev)
+ prev->hw.next_desc = segment->phys;
+
+ prev = segment;
+ sg_used += copy;
+
+ /*
+ * Insert the segment into the descriptor segments
+ * list.
+ */
+ list_add_tail(&segment->node, &desc->segments);
+ }
+ }
+
+ segment = list_first_entry(&desc->segments,
+ struct xilinx_axidma_tx_segment, node);
+ desc->async_tx.phys = segment->phys;
+ prev->hw.next_desc = segment->phys;
+
+ /* For the last DMA_MEM_TO_DEV transfer, set EOP */
+ if (chan->direction == DMA_MEM_TO_DEV) {
+ segment->hw.control |= XILINX_DMA_BD_SOP;
+ segment = list_last_entry(&desc->segments,
+ struct xilinx_axidma_tx_segment,
+ node);
+ segment->hw.control |= XILINX_DMA_BD_EOP;
+ }
+
+ return &desc->async_tx;
+
+error:
+ xilinx_dma_free_tx_descriptor(chan, desc);
+ return NULL;
+}
+
+/**
+ * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
+ * @chan: DMA channel
+ * @sgl: scatterlist to transfer to/from
+ * @sg_len: number of entries in @scatterlist
+ * @direction: DMA direction
+ * @flags: transfer ack flags
+ */
+static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
+ struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ struct xilinx_dma_tx_descriptor *desc;
+ struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL;
+ size_t copy, sg_used;
+ unsigned int num_periods;
+ int i;
+ u32 reg;
+
+ if (!period_len)
+ return NULL;
+
+ num_periods = buf_len / period_len;
+
+ if (!num_periods)
+ return NULL;
+
+ if (!is_slave_direction(direction))
+ return NULL;
+
+ /* Allocate a transaction descriptor. */
+ desc = xilinx_dma_alloc_tx_descriptor(chan);
+ if (!desc)
+ return NULL;
+
+ chan->direction = direction;
+ dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+ desc->async_tx.tx_submit = xilinx_dma_tx_submit;
+
+ for (i = 0; i < num_periods; ++i) {
+ sg_used = 0;
+
+ while (sg_used < period_len) {
+ struct xilinx_axidma_desc_hw *hw;
+
+ /* Get a free segment */
+ segment = xilinx_axidma_alloc_tx_segment(chan);
+ if (!segment)
+ goto error;
+
+ /*
+ * Calculate the maximum number of bytes to transfer,
+ * making sure it is less than the hw limit
+ */
+ copy = min_t(size_t, period_len - sg_used,
+ XILINX_DMA_MAX_TRANS_LEN);
+ hw = &segment->hw;
+ xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
+ period_len * i);
+ hw->control = copy;
+
+ if (prev)
+ prev->hw.next_desc = segment->phys;
+
+ prev = segment;
+ sg_used += copy;
+
+ /*
+ * Insert the segment into the descriptor segments
+ * list.
+ */
+ list_add_tail(&segment->node, &desc->segments);
+ }
+ }
+
+ head_segment = list_first_entry(&desc->segments,
+ struct xilinx_axidma_tx_segment, node);
+ desc->async_tx.phys = head_segment->phys;
+
+ desc->cyclic = true;
+ reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
+ reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
+ dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
+
+ segment = list_last_entry(&desc->segments,
+ struct xilinx_axidma_tx_segment,
+ node);
+ segment->hw.next_desc = (u32) head_segment->phys;
+
+ /* For the last DMA_MEM_TO_DEV transfer, set EOP */
+ if (direction == DMA_MEM_TO_DEV) {
+ head_segment->hw.control |= XILINX_DMA_BD_SOP;
+ segment->hw.control |= XILINX_DMA_BD_EOP;
+ }
+
+ return &desc->async_tx;
+
+error:
+ xilinx_dma_free_tx_descriptor(chan, desc);
+ return NULL;
+}
+
+/**
+ * xilinx_dma_prep_interleaved - prepare a descriptor for a
+ * DMA_SLAVE transaction
+ * @dchan: DMA channel
+ * @xt: Interleaved template pointer
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *
+xilinx_dma_prep_interleaved(struct dma_chan *dchan,
+ struct dma_interleaved_template *xt,
+ unsigned long flags)
+{
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ struct xilinx_dma_tx_descriptor *desc;
+ struct xilinx_axidma_tx_segment *segment;
+ struct xilinx_axidma_desc_hw *hw;
+
+ if (!is_slave_direction(xt->dir))
+ return NULL;
+
+ if (!xt->numf || !xt->sgl[0].size)
+ return NULL;
+
+ if (xt->frame_size != 1)
+ return NULL;
+
+ /* Allocate a transaction descriptor. */
+ desc = xilinx_dma_alloc_tx_descriptor(chan);
+ if (!desc)
+ return NULL;
+
+ chan->direction = xt->dir;
+ dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+ desc->async_tx.tx_submit = xilinx_dma_tx_submit;
+
+ /* Get a free segment */
+ segment = xilinx_axidma_alloc_tx_segment(chan);
+ if (!segment)
+ goto error;
+
+ hw = &segment->hw;
+
+ /* Fill in the descriptor */
+ if (xt->dir != DMA_MEM_TO_DEV)
+ hw->buf_addr = xt->dst_start;
+ else
+ hw->buf_addr = xt->src_start;
+
+ hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK;
+ hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) &
+ XILINX_DMA_BD_VSIZE_MASK;
+ hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) &
+ XILINX_DMA_BD_STRIDE_MASK;
+ hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK;
+
+ /*
+ * Insert the segment into the descriptor segments
+ * list.
+ */
+ list_add_tail(&segment->node, &desc->segments);
+
+
+ segment = list_first_entry(&desc->segments,
+ struct xilinx_axidma_tx_segment, node);
+ desc->async_tx.phys = segment->phys;
+
+ /* For the last DMA_MEM_TO_DEV transfer, set EOP */
+ if (xt->dir == DMA_MEM_TO_DEV) {
+ segment->hw.control |= XILINX_DMA_BD_SOP;
+ segment = list_last_entry(&desc->segments,
+ struct xilinx_axidma_tx_segment,
+ node);
+ segment->hw.control |= XILINX_DMA_BD_EOP;
+ }
+
+ return &desc->async_tx;
+
+error:
+ xilinx_dma_free_tx_descriptor(chan, desc);
+ return NULL;
+}
+
+/**
+ * xilinx_dma_terminate_all - Halt the channel and free descriptors
+ * @chan: Driver specific DMA Channel pointer
+ */
+static int xilinx_dma_terminate_all(struct dma_chan *dchan)
+{
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ u32 reg;
+
+ if (chan->cyclic)
+ xilinx_dma_chan_reset(chan);
+
+ /* Halt the DMA engine */
+ xilinx_dma_halt(chan);
+
+ /* Remove and free all of the descriptors in the lists */
+ xilinx_dma_free_descriptors(chan);
+
+ if (chan->cyclic) {
+ reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
+ reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
+ dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
+ chan->cyclic = false;
+ }
+
+ return 0;
+}
+
+/**
+ * xilinx_dma_channel_set_config - Configure VDMA channel
+ * Run-time configuration for Axi VDMA, supports:
+ * . halt the channel
+ * . configure interrupt coalescing and inter-packet delay threshold
+ * . start/stop parking
+ * . enable genlock
+ *
+ * @dchan: DMA channel
+ * @cfg: VDMA device configuration pointer
+ *
+ * Return: '0' on success and failure value on error
+ */
+int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
+ struct xilinx_vdma_config *cfg)
+{
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ u32 dmacr;
+
+ if (cfg->reset)
+ return xilinx_dma_chan_reset(chan);
+
+ dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
+
+ chan->config.frm_dly = cfg->frm_dly;
+ chan->config.park = cfg->park;
+
+ /* genlock settings */
+ chan->config.gen_lock = cfg->gen_lock;
+ chan->config.master = cfg->master;
+
+ if (cfg->gen_lock && chan->genlock) {
+ dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
+ dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
+ }
+
+ chan->config.frm_cnt_en = cfg->frm_cnt_en;
+ if (cfg->park)
+ chan->config.park_frm = cfg->park_frm;
+ else
+ chan->config.park_frm = -1;
+
+ chan->config.coalesc = cfg->coalesc;
+ chan->config.delay = cfg->delay;
+
+ if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
+ dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
+ chan->config.coalesc = cfg->coalesc;
+ }
+
+ if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
+ dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
+ chan->config.delay = cfg->delay;
+ }
+
+ /* FSync Source selection */
+ dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK;
+ dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT;
+
+ dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr);
+
+ return 0;
+}
+EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
+
+/* -----------------------------------------------------------------------------
+ * Probe and remove
+ */
+
+/**
+ * xilinx_dma_chan_remove - Per Channel remove function
+ * @chan: Driver specific DMA channel
+ */
+static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
+{
+ /* Disable all interrupts */
+ dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
+ XILINX_DMA_DMAXR_ALL_IRQ_MASK);
+
+ if (chan->irq > 0)
+ free_irq(chan->irq, chan);
+
+ tasklet_kill(&chan->tasklet);
+
+ list_del(&chan->common.device_node);
+}
+
+static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
+ struct clk **tx_clk, struct clk **rx_clk,
+ struct clk **sg_clk, struct clk **tmp_clk)
+{
+ int err;
+
+ *tmp_clk = NULL;
+
+ *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
+ if (IS_ERR(*axi_clk)) {
+ err = PTR_ERR(*axi_clk);
+ dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err);
+ return err;
+ }
+
+ *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
+ if (IS_ERR(*tx_clk))
+ *tx_clk = NULL;
+
+ *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
+ if (IS_ERR(*rx_clk))
+ *rx_clk = NULL;
+
+ *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
+ if (IS_ERR(*sg_clk))
+ *sg_clk = NULL;
+
+ err = clk_prepare_enable(*axi_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(*tx_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
+ goto err_disable_axiclk;
+ }
+
+ err = clk_prepare_enable(*rx_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
+ goto err_disable_txclk;
+ }
+
+ err = clk_prepare_enable(*sg_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable sg_clk (%u)\n", err);
+ goto err_disable_rxclk;
+ }
+
+ return 0;
+
+err_disable_rxclk:
+ clk_disable_unprepare(*rx_clk);
+err_disable_txclk:
+ clk_disable_unprepare(*tx_clk);
+err_disable_axiclk:
+ clk_disable_unprepare(*axi_clk);
+
+ return err;
+}
+
+static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
+ struct clk **dev_clk, struct clk **tmp_clk,
+ struct clk **tmp1_clk, struct clk **tmp2_clk)
+{
+ int err;
+
+ *tmp_clk = NULL;
+ *tmp1_clk = NULL;
+ *tmp2_clk = NULL;
+
+ *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
+ if (IS_ERR(*axi_clk)) {
+ err = PTR_ERR(*axi_clk);
+ dev_err(&pdev->dev, "failed to get axi_clk (%u)\n", err);
+ return err;
+ }
+
+ *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
+ if (IS_ERR(*dev_clk)) {
+ err = PTR_ERR(*dev_clk);
+ dev_err(&pdev->dev, "failed to get dev_clk (%u)\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(*axi_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(*dev_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable dev_clk (%u)\n", err);
+ goto err_disable_axiclk;
+ }
+
+ return 0;
+
+err_disable_axiclk:
+ clk_disable_unprepare(*axi_clk);
+
+ return err;
+}
+
+static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
+ struct clk **tx_clk, struct clk **txs_clk,
+ struct clk **rx_clk, struct clk **rxs_clk)
+{
+ int err;
+
+ *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
+ if (IS_ERR(*axi_clk)) {
+ err = PTR_ERR(*axi_clk);
+ dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err);
+ return err;
+ }
+
+ *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
+ if (IS_ERR(*tx_clk))
+ *tx_clk = NULL;
+
+ *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk");
+ if (IS_ERR(*txs_clk))
+ *txs_clk = NULL;
+
+ *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
+ if (IS_ERR(*rx_clk))
+ *rx_clk = NULL;
+
+ *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk");
+ if (IS_ERR(*rxs_clk))
+ *rxs_clk = NULL;
+
+ err = clk_prepare_enable(*axi_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(*tx_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
+ goto err_disable_axiclk;
+ }
+
+ err = clk_prepare_enable(*txs_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable txs_clk (%u)\n", err);
+ goto err_disable_txclk;
+ }
+
+ err = clk_prepare_enable(*rx_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
+ goto err_disable_txsclk;
+ }
+
+ err = clk_prepare_enable(*rxs_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable rxs_clk (%u)\n", err);
+ goto err_disable_rxclk;
+ }
+
+ return 0;
+
+err_disable_rxclk:
+ clk_disable_unprepare(*rx_clk);
+err_disable_txsclk:
+ clk_disable_unprepare(*txs_clk);
+err_disable_txclk:
+ clk_disable_unprepare(*tx_clk);
+err_disable_axiclk:
+ clk_disable_unprepare(*axi_clk);
+
+ return err;
+}
+
+static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
+{
+ clk_disable_unprepare(xdev->rxs_clk);
+ clk_disable_unprepare(xdev->rx_clk);
+ clk_disable_unprepare(xdev->txs_clk);
+ clk_disable_unprepare(xdev->tx_clk);
+ clk_disable_unprepare(xdev->axi_clk);
+}
+
+/**
+ * xilinx_dma_chan_probe - Per Channel Probing
+ * It get channel features from the device tree entry and
+ * initialize special channel handling routines
+ *
+ * @xdev: Driver specific device structure
+ * @node: Device node
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
+ struct device_node *node, int chan_id)
+{
+ struct xilinx_dma_chan *chan;
+ bool has_dre = false;
+ u32 value, width;
+ int err;
+
+ /* Allocate and initialize the channel structure */
+ chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+
+ chan->dev = xdev->dev;
+ chan->xdev = xdev;
+ chan->has_sg = xdev->has_sg;
+ chan->desc_pendingcount = 0x0;
+ chan->ext_addr = xdev->ext_addr;
+
+ spin_lock_init(&chan->lock);
+ INIT_LIST_HEAD(&chan->pending_list);
+ INIT_LIST_HEAD(&chan->done_list);
+ INIT_LIST_HEAD(&chan->active_list);
+
+ /* Retrieve the channel properties from the device tree */
+ has_dre = of_property_read_bool(node, "xlnx,include-dre");
+
+ chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
+
+ err = of_property_read_u32(node, "xlnx,datawidth", &value);
+ if (err) {
+ dev_err(xdev->dev, "missing xlnx,datawidth property\n");
+ return err;
+ }
+ width = value >> 3; /* Convert bits to bytes */
+
+ /* If data width is greater than 8 bytes, DRE is not in hw */
+ if (width > 8)
+ has_dre = false;
+
+ if (!has_dre)
+ xdev->common.copy_align = fls(width - 1);
+
+ if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
+ of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
+ of_device_is_compatible(node, "xlnx,axi-cdma-channel")) {
+ chan->direction = DMA_MEM_TO_DEV;
+ chan->id = chan_id;
+ chan->tdest = chan_id;
+
+ chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
+ if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
+ chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
+
+ if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
+ xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
+ chan->flush_on_fsync = true;
+ }
+ } else if (of_device_is_compatible(node,
+ "xlnx,axi-vdma-s2mm-channel") ||
+ of_device_is_compatible(node,
+ "xlnx,axi-dma-s2mm-channel")) {
+ chan->direction = DMA_DEV_TO_MEM;
+ chan->id = chan_id;
+ chan->tdest = chan_id - xdev->nr_channels;
+
+ chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
+ if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
+ chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
+
+ if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
+ xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)
+ chan->flush_on_fsync = true;
+ }
+ } else {
+ dev_err(xdev->dev, "Invalid channel compatible node\n");
+ return -EINVAL;
+ }
+
+ /* Request the interrupt */
+ chan->irq = irq_of_parse_and_map(node, 0);
+ err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED,
+ "xilinx-dma-controller", chan);
+ if (err) {
+ dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
+ return err;
+ }
+
+ if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
+ chan->start_transfer = xilinx_dma_start_transfer;
+ else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
+ chan->start_transfer = xilinx_cdma_start_transfer;
+ else
+ chan->start_transfer = xilinx_vdma_start_transfer;
+
+ /* Initialize the tasklet */
+ tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet,
+ (unsigned long)chan);
+
+ /*
+ * Initialize the DMA channel and add it to the DMA engine channels
+ * list.
+ */
+ chan->common.device = &xdev->common;
+
+ list_add_tail(&chan->common.device_node, &xdev->common.channels);
+ xdev->chan[chan->id] = chan;
+
+ /* Reset the channel */
+ err = xilinx_dma_chan_reset(chan);
+ if (err < 0) {
+ dev_err(xdev->dev, "Reset channel failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * xilinx_dma_child_probe - Per child node probe
+ * It get number of dma-channels per child node from
+ * device-tree and initializes all the channels.
+ *
+ * @xdev: Driver specific device structure
+ * @node: Device node
+ *
+ * Return: 0 always.
+ */
+static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
+ struct device_node *node) {
+ int ret, i, nr_channels = 1;
+
+ ret = of_property_read_u32(node, "dma-channels", &nr_channels);
+ if ((ret < 0) && xdev->mcdma)
+ dev_warn(xdev->dev, "missing dma-channels property\n");
+
+ for (i = 0; i < nr_channels; i++)
+ xilinx_dma_chan_probe(xdev, node, xdev->chan_id++);
+
+ xdev->nr_channels += nr_channels;
+
+ return 0;
+}
+
+/**
+ * of_dma_xilinx_xlate - Translation function
+ * @dma_spec: Pointer to DMA specifier as found in the device tree
+ * @ofdma: Pointer to DMA controller data
+ *
+ * Return: DMA channel pointer on success and NULL on error
+ */
+static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct xilinx_dma_device *xdev = ofdma->of_dma_data;
+ int chan_id = dma_spec->args[0];
+
+ if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id])
+ return NULL;
+
+ return dma_get_slave_channel(&xdev->chan[chan_id]->common);
+}
+
+static const struct xilinx_dma_config axidma_config = {
+ .dmatype = XDMA_TYPE_AXIDMA,
+ .clk_init = axidma_clk_init,
+};
+
+static const struct xilinx_dma_config axicdma_config = {
+ .dmatype = XDMA_TYPE_CDMA,
+ .clk_init = axicdma_clk_init,
+};
+
+static const struct xilinx_dma_config axivdma_config = {
+ .dmatype = XDMA_TYPE_VDMA,
+ .clk_init = axivdma_clk_init,
+};
+
+static const struct of_device_id xilinx_dma_of_ids[] = {
+ { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config },
+ { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config },
+ { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config },
+ {}
+};
+MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
+
+/**
+ * xilinx_dma_probe - Driver probe function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int xilinx_dma_probe(struct platform_device *pdev)
+{
+ int (*clk_init)(struct platform_device *, struct clk **, struct clk **,
+ struct clk **, struct clk **, struct clk **)
+ = axivdma_clk_init;
+ struct device_node *node = pdev->dev.of_node;
+ struct xilinx_dma_device *xdev;
+ struct device_node *child, *np = pdev->dev.of_node;
+ struct resource *io;
+ u32 num_frames, addr_width;
+ int i, err;
+
+ /* Allocate and initialize the DMA engine structure */
+ xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
+ if (!xdev)
+ return -ENOMEM;
+
+ xdev->dev = &pdev->dev;
+ if (np) {
+ const struct of_device_id *match;
+
+ match = of_match_node(xilinx_dma_of_ids, np);
+ if (match && match->data) {
+ xdev->dma_config = match->data;
+ clk_init = xdev->dma_config->clk_init;
+ }
+ }
+
+ err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk,
+ &xdev->rx_clk, &xdev->rxs_clk);
+ if (err)
+ return err;
+
+ /* Request and map I/O memory */
+ io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xdev->regs = devm_ioremap_resource(&pdev->dev, io);
+ if (IS_ERR(xdev->regs))
+ return PTR_ERR(xdev->regs);
+
+ /* Retrieve the DMA engine properties from the device tree */
+ xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg");
+ if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
+ xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma");
+
+ if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
+ err = of_property_read_u32(node, "xlnx,num-fstores",
+ &num_frames);
+ if (err < 0) {
+ dev_err(xdev->dev,
+ "missing xlnx,num-fstores property\n");
+ return err;
+ }
+
+ err = of_property_read_u32(node, "xlnx,flush-fsync",
+ &xdev->flush_on_fsync);
+ if (err < 0)
+ dev_warn(xdev->dev,
+ "missing xlnx,flush-fsync property\n");
+ }
+
+ err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width);
+ if (err < 0)
+ dev_warn(xdev->dev, "missing xlnx,addrwidth property\n");
+
+ if (addr_width > 32)
+ xdev->ext_addr = true;
+ else
+ xdev->ext_addr = false;
+
+ /* Set the dma mask bits */
+ dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width));
+
+ /* Initialize the DMA engine */
+ xdev->common.dev = &pdev->dev;
+
+ INIT_LIST_HEAD(&xdev->common.channels);
+ if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) {
+ dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
+ dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
+ }
+
+ xdev->common.device_alloc_chan_resources =
+ xilinx_dma_alloc_chan_resources;
+ xdev->common.device_free_chan_resources =
+ xilinx_dma_free_chan_resources;
+ xdev->common.device_terminate_all = xilinx_dma_terminate_all;
+ xdev->common.device_tx_status = xilinx_dma_tx_status;
+ xdev->common.device_issue_pending = xilinx_dma_issue_pending;
+ if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+ dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
+ xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
+ xdev->common.device_prep_dma_cyclic =
+ xilinx_dma_prep_dma_cyclic;
+ xdev->common.device_prep_interleaved_dma =
+ xilinx_dma_prep_interleaved;
+ /* Residue calculation is supported by only AXI DMA */
+ xdev->common.residue_granularity =
+ DMA_RESIDUE_GRANULARITY_SEGMENT;
+ } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
+ dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
+ xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
+ } else {
+ xdev->common.device_prep_interleaved_dma =
+ xilinx_vdma_dma_prep_interleaved;
+ }
+
+ platform_set_drvdata(pdev, xdev);
+
+ /* Initialize the channels */
+ for_each_child_of_node(node, child) {
+ err = xilinx_dma_child_probe(xdev, child);
+ if (err < 0)
+ goto disable_clks;
+ }
+
+ if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
+ for (i = 0; i < xdev->nr_channels; i++)
+ if (xdev->chan[i])
+ xdev->chan[i]->num_frms = num_frames;
+ }
+
+ /* Register the DMA engine with the core */
+ dma_async_device_register(&xdev->common);
+
+ err = of_dma_controller_register(node, of_dma_xilinx_xlate,
+ xdev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Unable to register DMA to DT\n");
+ dma_async_device_unregister(&xdev->common);
+ goto error;
+ }
+
+ dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
+
+ return 0;
+
+disable_clks:
+ xdma_disable_allclks(xdev);
+error:
+ for (i = 0; i < xdev->nr_channels; i++)
+ if (xdev->chan[i])
+ xilinx_dma_chan_remove(xdev->chan[i]);
+
+ return err;
+}
+
+/**
+ * xilinx_dma_remove - Driver remove function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: Always '0'
+ */
+static int xilinx_dma_remove(struct platform_device *pdev)
+{
+ struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
+ int i;
+
+ of_dma_controller_free(pdev->dev.of_node);
+
+ dma_async_device_unregister(&xdev->common);
+
+ for (i = 0; i < xdev->nr_channels; i++)
+ if (xdev->chan[i])
+ xilinx_dma_chan_remove(xdev->chan[i]);
+
+ xdma_disable_allclks(xdev);
+
+ return 0;
+}
+
+static struct platform_driver xilinx_vdma_driver = {
+ .driver = {
+ .name = "xilinx-vdma",
+ .of_match_table = xilinx_dma_of_ids,
+ },
+ .probe = xilinx_dma_probe,
+ .remove = xilinx_dma_remove,
+};
+
+module_platform_driver(xilinx_vdma_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx VDMA driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
new file mode 100644
index 000000000..6d221e5c7
--- /dev/null
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -0,0 +1,1151 @@
+/*
+ * DMA driver for Xilinx ZynqMP DMA Engine
+ *
+ * Copyright (C) 2016 Xilinx, Inc. All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/bitops.h>
+#include <linux/dmapool.h>
+#include <linux/dma/xilinx_dma.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_dma.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+
+#include "../dmaengine.h"
+
+/* Register Offsets */
+#define ZYNQMP_DMA_ISR 0x100
+#define ZYNQMP_DMA_IMR 0x104
+#define ZYNQMP_DMA_IER 0x108
+#define ZYNQMP_DMA_IDS 0x10C
+#define ZYNQMP_DMA_CTRL0 0x110
+#define ZYNQMP_DMA_CTRL1 0x114
+#define ZYNQMP_DMA_DATA_ATTR 0x120
+#define ZYNQMP_DMA_DSCR_ATTR 0x124
+#define ZYNQMP_DMA_SRC_DSCR_WRD0 0x128
+#define ZYNQMP_DMA_SRC_DSCR_WRD1 0x12C
+#define ZYNQMP_DMA_SRC_DSCR_WRD2 0x130
+#define ZYNQMP_DMA_SRC_DSCR_WRD3 0x134
+#define ZYNQMP_DMA_DST_DSCR_WRD0 0x138
+#define ZYNQMP_DMA_DST_DSCR_WRD1 0x13C
+#define ZYNQMP_DMA_DST_DSCR_WRD2 0x140
+#define ZYNQMP_DMA_DST_DSCR_WRD3 0x144
+#define ZYNQMP_DMA_SRC_START_LSB 0x158
+#define ZYNQMP_DMA_SRC_START_MSB 0x15C
+#define ZYNQMP_DMA_DST_START_LSB 0x160
+#define ZYNQMP_DMA_DST_START_MSB 0x164
+#define ZYNQMP_DMA_RATE_CTRL 0x18C
+#define ZYNQMP_DMA_IRQ_SRC_ACCT 0x190
+#define ZYNQMP_DMA_IRQ_DST_ACCT 0x194
+#define ZYNQMP_DMA_CTRL2 0x200
+
+/* Interrupt registers bit field definitions */
+#define ZYNQMP_DMA_DONE BIT(10)
+#define ZYNQMP_DMA_AXI_WR_DATA BIT(9)
+#define ZYNQMP_DMA_AXI_RD_DATA BIT(8)
+#define ZYNQMP_DMA_AXI_RD_DST_DSCR BIT(7)
+#define ZYNQMP_DMA_AXI_RD_SRC_DSCR BIT(6)
+#define ZYNQMP_DMA_IRQ_DST_ACCT_ERR BIT(5)
+#define ZYNQMP_DMA_IRQ_SRC_ACCT_ERR BIT(4)
+#define ZYNQMP_DMA_BYTE_CNT_OVRFL BIT(3)
+#define ZYNQMP_DMA_DST_DSCR_DONE BIT(2)
+#define ZYNQMP_DMA_INV_APB BIT(0)
+
+/* Control 0 register bit field definitions */
+#define ZYNQMP_DMA_OVR_FETCH BIT(7)
+#define ZYNQMP_DMA_POINT_TYPE_SG BIT(6)
+#define ZYNQMP_DMA_RATE_CTRL_EN BIT(3)
+
+/* Control 1 register bit field definitions */
+#define ZYNQMP_DMA_SRC_ISSUE GENMASK(4, 0)
+
+/* Data Attribute register bit field definitions */
+#define ZYNQMP_DMA_ARBURST GENMASK(27, 26)
+#define ZYNQMP_DMA_ARCACHE GENMASK(25, 22)
+#define ZYNQMP_DMA_ARCACHE_OFST 22
+#define ZYNQMP_DMA_ARQOS GENMASK(21, 18)
+#define ZYNQMP_DMA_ARQOS_OFST 18
+#define ZYNQMP_DMA_ARLEN GENMASK(17, 14)
+#define ZYNQMP_DMA_ARLEN_OFST 14
+#define ZYNQMP_DMA_AWBURST GENMASK(13, 12)
+#define ZYNQMP_DMA_AWCACHE GENMASK(11, 8)
+#define ZYNQMP_DMA_AWCACHE_OFST 8
+#define ZYNQMP_DMA_AWQOS GENMASK(7, 4)
+#define ZYNQMP_DMA_AWQOS_OFST 4
+#define ZYNQMP_DMA_AWLEN GENMASK(3, 0)
+#define ZYNQMP_DMA_AWLEN_OFST 0
+
+/* Descriptor Attribute register bit field definitions */
+#define ZYNQMP_DMA_AXCOHRNT BIT(8)
+#define ZYNQMP_DMA_AXCACHE GENMASK(7, 4)
+#define ZYNQMP_DMA_AXCACHE_OFST 4
+#define ZYNQMP_DMA_AXQOS GENMASK(3, 0)
+#define ZYNQMP_DMA_AXQOS_OFST 0
+
+/* Control register 2 bit field definitions */
+#define ZYNQMP_DMA_ENABLE BIT(0)
+
+/* Buffer Descriptor definitions */
+#define ZYNQMP_DMA_DESC_CTRL_STOP 0x10
+#define ZYNQMP_DMA_DESC_CTRL_COMP_INT 0x4
+#define ZYNQMP_DMA_DESC_CTRL_SIZE_256 0x2
+#define ZYNQMP_DMA_DESC_CTRL_COHRNT 0x1
+
+/* Interrupt Mask specific definitions */
+#define ZYNQMP_DMA_INT_ERR (ZYNQMP_DMA_AXI_RD_DATA | \
+ ZYNQMP_DMA_AXI_WR_DATA | \
+ ZYNQMP_DMA_AXI_RD_DST_DSCR | \
+ ZYNQMP_DMA_AXI_RD_SRC_DSCR | \
+ ZYNQMP_DMA_INV_APB)
+#define ZYNQMP_DMA_INT_OVRFL (ZYNQMP_DMA_BYTE_CNT_OVRFL | \
+ ZYNQMP_DMA_IRQ_SRC_ACCT_ERR | \
+ ZYNQMP_DMA_IRQ_DST_ACCT_ERR)
+#define ZYNQMP_DMA_INT_DONE (ZYNQMP_DMA_DONE | ZYNQMP_DMA_DST_DSCR_DONE)
+#define ZYNQMP_DMA_INT_EN_DEFAULT_MASK (ZYNQMP_DMA_INT_DONE | \
+ ZYNQMP_DMA_INT_ERR | \
+ ZYNQMP_DMA_INT_OVRFL | \
+ ZYNQMP_DMA_DST_DSCR_DONE)
+
+/* Max number of descriptors per channel */
+#define ZYNQMP_DMA_NUM_DESCS 32
+
+/* Max transfer size per descriptor */
+#define ZYNQMP_DMA_MAX_TRANS_LEN 0x40000000
+
+/* Reset values for data attributes */
+#define ZYNQMP_DMA_AXCACHE_VAL 0xF
+#define ZYNQMP_DMA_ARLEN_RST_VAL 0xF
+#define ZYNQMP_DMA_AWLEN_RST_VAL 0xF
+
+#define ZYNQMP_DMA_SRC_ISSUE_RST_VAL 0x1F
+
+#define ZYNQMP_DMA_IDS_DEFAULT_MASK 0xFFF
+
+/* Bus width in bits */
+#define ZYNQMP_DMA_BUS_WIDTH_64 64
+#define ZYNQMP_DMA_BUS_WIDTH_128 128
+
+#define ZYNQMP_DMA_DESC_SIZE(chan) (chan->desc_size)
+
+#define to_chan(chan) container_of(chan, struct zynqmp_dma_chan, \
+ common)
+#define tx_to_desc(tx) container_of(tx, struct zynqmp_dma_desc_sw, \
+ async_tx)
+
+/**
+ * struct zynqmp_dma_desc_ll - Hw linked list descriptor
+ * @addr: Buffer address
+ * @size: Size of the buffer
+ * @ctrl: Control word
+ * @nxtdscraddr: Next descriptor base address
+ * @rsvd: Reserved field and for Hw internal use.
+ */
+struct zynqmp_dma_desc_ll {
+ u64 addr;
+ u32 size;
+ u32 ctrl;
+ u64 nxtdscraddr;
+ u64 rsvd;
+}; __aligned(64)
+
+/**
+ * struct zynqmp_dma_desc_sw - Per Transaction structure
+ * @src: Source address for simple mode dma
+ * @dst: Destination address for simple mode dma
+ * @len: Transfer length for simple mode dma
+ * @node: Node in the channel descriptor list
+ * @tx_list: List head for the current transfer
+ * @async_tx: Async transaction descriptor
+ * @src_v: Virtual address of the src descriptor
+ * @src_p: Physical address of the src descriptor
+ * @dst_v: Virtual address of the dst descriptor
+ * @dst_p: Physical address of the dst descriptor
+ */
+struct zynqmp_dma_desc_sw {
+ u64 src;
+ u64 dst;
+ u32 len;
+ struct list_head node;
+ struct list_head tx_list;
+ struct dma_async_tx_descriptor async_tx;
+ struct zynqmp_dma_desc_ll *src_v;
+ dma_addr_t src_p;
+ struct zynqmp_dma_desc_ll *dst_v;
+ dma_addr_t dst_p;
+};
+
+/**
+ * struct zynqmp_dma_chan - Driver specific DMA channel structure
+ * @zdev: Driver specific device structure
+ * @regs: Control registers offset
+ * @lock: Descriptor operation lock
+ * @pending_list: Descriptors waiting
+ * @free_list: Descriptors free
+ * @active_list: Descriptors active
+ * @sw_desc_pool: SW descriptor pool
+ * @done_list: Complete descriptors
+ * @common: DMA common channel
+ * @desc_pool_v: Statically allocated descriptor base
+ * @desc_pool_p: Physical allocated descriptor base
+ * @desc_free_cnt: Descriptor available count
+ * @dev: The dma device
+ * @irq: Channel IRQ
+ * @is_dmacoherent: Tells whether dma operations are coherent or not
+ * @tasklet: Cleanup work after irq
+ * @idle : Channel status;
+ * @desc_size: Size of the low level descriptor
+ * @err: Channel has errors
+ * @bus_width: Bus width
+ * @src_burst_len: Source burst length
+ * @dst_burst_len: Dest burst length
+ * @clk_main: Pointer to main clock
+ * @clk_apb: Pointer to apb clock
+ */
+struct zynqmp_dma_chan {
+ struct zynqmp_dma_device *zdev;
+ void __iomem *regs;
+ spinlock_t lock;
+ struct list_head pending_list;
+ struct list_head free_list;
+ struct list_head active_list;
+ struct zynqmp_dma_desc_sw *sw_desc_pool;
+ struct list_head done_list;
+ struct dma_chan common;
+ void *desc_pool_v;
+ dma_addr_t desc_pool_p;
+ u32 desc_free_cnt;
+ struct device *dev;
+ int irq;
+ bool is_dmacoherent;
+ struct tasklet_struct tasklet;
+ bool idle;
+ u32 desc_size;
+ bool err;
+ u32 bus_width;
+ u32 src_burst_len;
+ u32 dst_burst_len;
+ struct clk *clk_main;
+ struct clk *clk_apb;
+};
+
+/**
+ * struct zynqmp_dma_device - DMA device structure
+ * @dev: Device Structure
+ * @common: DMA device structure
+ * @chan: Driver specific DMA channel
+ */
+struct zynqmp_dma_device {
+ struct device *dev;
+ struct dma_device common;
+ struct zynqmp_dma_chan *chan;
+};
+
+static inline void zynqmp_dma_writeq(struct zynqmp_dma_chan *chan, u32 reg,
+ u64 value)
+{
+ lo_hi_writeq(value, chan->regs + reg);
+}
+
+/**
+ * zynqmp_dma_update_desc_to_ctrlr - Updates descriptor to the controller
+ * @chan: ZynqMP DMA DMA channel pointer
+ * @desc: Transaction descriptor pointer
+ */
+static void zynqmp_dma_update_desc_to_ctrlr(struct zynqmp_dma_chan *chan,
+ struct zynqmp_dma_desc_sw *desc)
+{
+ dma_addr_t addr;
+
+ addr = desc->src_p;
+ zynqmp_dma_writeq(chan, ZYNQMP_DMA_SRC_START_LSB, addr);
+ addr = desc->dst_p;
+ zynqmp_dma_writeq(chan, ZYNQMP_DMA_DST_START_LSB, addr);
+}
+
+/**
+ * zynqmp_dma_desc_config_eod - Mark the descriptor as end descriptor
+ * @chan: ZynqMP DMA channel pointer
+ * @desc: Hw descriptor pointer
+ */
+static void zynqmp_dma_desc_config_eod(struct zynqmp_dma_chan *chan,
+ void *desc)
+{
+ struct zynqmp_dma_desc_ll *hw = (struct zynqmp_dma_desc_ll *)desc;
+
+ hw->ctrl |= ZYNQMP_DMA_DESC_CTRL_STOP;
+ hw++;
+ hw->ctrl |= ZYNQMP_DMA_DESC_CTRL_COMP_INT | ZYNQMP_DMA_DESC_CTRL_STOP;
+}
+
+/**
+ * zynqmp_dma_config_sg_ll_desc - Configure the linked list descriptor
+ * @chan: ZynqMP DMA channel pointer
+ * @sdesc: Hw descriptor pointer
+ * @src: Source buffer address
+ * @dst: Destination buffer address
+ * @len: Transfer length
+ * @prev: Previous hw descriptor pointer
+ */
+static void zynqmp_dma_config_sg_ll_desc(struct zynqmp_dma_chan *chan,
+ struct zynqmp_dma_desc_ll *sdesc,
+ dma_addr_t src, dma_addr_t dst, size_t len,
+ struct zynqmp_dma_desc_ll *prev)
+{
+ struct zynqmp_dma_desc_ll *ddesc = sdesc + 1;
+
+ sdesc->size = ddesc->size = len;
+ sdesc->addr = src;
+ ddesc->addr = dst;
+
+ sdesc->ctrl = ddesc->ctrl = ZYNQMP_DMA_DESC_CTRL_SIZE_256;
+ if (chan->is_dmacoherent) {
+ sdesc->ctrl |= ZYNQMP_DMA_DESC_CTRL_COHRNT;
+ ddesc->ctrl |= ZYNQMP_DMA_DESC_CTRL_COHRNT;
+ }
+
+ if (prev) {
+ dma_addr_t addr = chan->desc_pool_p +
+ ((uintptr_t)sdesc - (uintptr_t)chan->desc_pool_v);
+ ddesc = prev + 1;
+ prev->nxtdscraddr = addr;
+ ddesc->nxtdscraddr = addr + ZYNQMP_DMA_DESC_SIZE(chan);
+ }
+}
+
+/**
+ * zynqmp_dma_init - Initialize the channel
+ * @chan: ZynqMP DMA channel pointer
+ */
+static void zynqmp_dma_init(struct zynqmp_dma_chan *chan)
+{
+ u32 val;
+
+ writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS);
+ val = readl(chan->regs + ZYNQMP_DMA_ISR);
+ writel(val, chan->regs + ZYNQMP_DMA_ISR);
+
+ if (chan->is_dmacoherent) {
+ val = ZYNQMP_DMA_AXCOHRNT;
+ val = (val & ~ZYNQMP_DMA_AXCACHE) |
+ (ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_AXCACHE_OFST);
+ writel(val, chan->regs + ZYNQMP_DMA_DSCR_ATTR);
+ }
+
+ val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR);
+ if (chan->is_dmacoherent) {
+ val = (val & ~ZYNQMP_DMA_ARCACHE) |
+ (ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_ARCACHE_OFST);
+ val = (val & ~ZYNQMP_DMA_AWCACHE) |
+ (ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_AWCACHE_OFST);
+ }
+ writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR);
+
+ /* Clearing the interrupt account rgisters */
+ val = readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT);
+ val = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT);
+
+ chan->idle = true;
+}
+
+/**
+ * zynqmp_dma_tx_submit - Submit DMA transaction
+ * @tx: Async transaction descriptor pointer
+ *
+ * Return: cookie value
+ */
+static dma_cookie_t zynqmp_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct zynqmp_dma_chan *chan = to_chan(tx->chan);
+ struct zynqmp_dma_desc_sw *desc, *new;
+ dma_cookie_t cookie;
+
+ new = tx_to_desc(tx);
+ spin_lock_bh(&chan->lock);
+ cookie = dma_cookie_assign(tx);
+
+ if (!list_empty(&chan->pending_list)) {
+ desc = list_last_entry(&chan->pending_list,
+ struct zynqmp_dma_desc_sw, node);
+ if (!list_empty(&desc->tx_list))
+ desc = list_last_entry(&desc->tx_list,
+ struct zynqmp_dma_desc_sw, node);
+ desc->src_v->nxtdscraddr = new->src_p;
+ desc->src_v->ctrl &= ~ZYNQMP_DMA_DESC_CTRL_STOP;
+ desc->dst_v->nxtdscraddr = new->dst_p;
+ desc->dst_v->ctrl &= ~ZYNQMP_DMA_DESC_CTRL_STOP;
+ }
+
+ list_add_tail(&new->node, &chan->pending_list);
+ spin_unlock_bh(&chan->lock);
+
+ return cookie;
+}
+
+/**
+ * zynqmp_dma_get_descriptor - Get the sw descriptor from the pool
+ * @chan: ZynqMP DMA channel pointer
+ *
+ * Return: The sw descriptor
+ */
+static struct zynqmp_dma_desc_sw *
+zynqmp_dma_get_descriptor(struct zynqmp_dma_chan *chan)
+{
+ struct zynqmp_dma_desc_sw *desc;
+
+ spin_lock_bh(&chan->lock);
+ desc = list_first_entry(&chan->free_list,
+ struct zynqmp_dma_desc_sw, node);
+ list_del(&desc->node);
+ spin_unlock_bh(&chan->lock);
+
+ INIT_LIST_HEAD(&desc->tx_list);
+ /* Clear the src and dst descriptor memory */
+ memset((void *)desc->src_v, 0, ZYNQMP_DMA_DESC_SIZE(chan));
+ memset((void *)desc->dst_v, 0, ZYNQMP_DMA_DESC_SIZE(chan));
+
+ return desc;
+}
+
+/**
+ * zynqmp_dma_free_descriptor - Issue pending transactions
+ * @chan: ZynqMP DMA channel pointer
+ * @sdesc: Transaction descriptor pointer
+ */
+static void zynqmp_dma_free_descriptor(struct zynqmp_dma_chan *chan,
+ struct zynqmp_dma_desc_sw *sdesc)
+{
+ struct zynqmp_dma_desc_sw *child, *next;
+
+ chan->desc_free_cnt++;
+ list_add_tail(&sdesc->node, &chan->free_list);
+ list_for_each_entry_safe(child, next, &sdesc->tx_list, node) {
+ chan->desc_free_cnt++;
+ list_move_tail(&child->node, &chan->free_list);
+ }
+}
+
+/**
+ * zynqmp_dma_free_desc_list - Free descriptors list
+ * @chan: ZynqMP DMA channel pointer
+ * @list: List to parse and delete the descriptor
+ */
+static void zynqmp_dma_free_desc_list(struct zynqmp_dma_chan *chan,
+ struct list_head *list)
+{
+ struct zynqmp_dma_desc_sw *desc, *next;
+
+ list_for_each_entry_safe(desc, next, list, node)
+ zynqmp_dma_free_descriptor(chan, desc);
+}
+
+/**
+ * zynqmp_dma_alloc_chan_resources - Allocate channel resources
+ * @dchan: DMA channel
+ *
+ * Return: Number of descriptors on success and failure value on error
+ */
+static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
+{
+ struct zynqmp_dma_chan *chan = to_chan(dchan);
+ struct zynqmp_dma_desc_sw *desc;
+ int i;
+
+ chan->sw_desc_pool = kzalloc(sizeof(*desc) * ZYNQMP_DMA_NUM_DESCS,
+ GFP_KERNEL);
+ if (!chan->sw_desc_pool)
+ return -ENOMEM;
+
+ chan->idle = true;
+ chan->desc_free_cnt = ZYNQMP_DMA_NUM_DESCS;
+
+ INIT_LIST_HEAD(&chan->free_list);
+
+ for (i = 0; i < ZYNQMP_DMA_NUM_DESCS; i++) {
+ desc = chan->sw_desc_pool + i;
+ dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+ desc->async_tx.tx_submit = zynqmp_dma_tx_submit;
+ list_add_tail(&desc->node, &chan->free_list);
+ }
+
+ chan->desc_pool_v = dma_zalloc_coherent(chan->dev,
+ (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS),
+ &chan->desc_pool_p, GFP_KERNEL);
+ if (!chan->desc_pool_v)
+ return -ENOMEM;
+
+ for (i = 0; i < ZYNQMP_DMA_NUM_DESCS; i++) {
+ desc = chan->sw_desc_pool + i;
+ desc->src_v = (struct zynqmp_dma_desc_ll *) (chan->desc_pool_v +
+ (i * ZYNQMP_DMA_DESC_SIZE(chan) * 2));
+ desc->dst_v = (struct zynqmp_dma_desc_ll *) (desc->src_v + 1);
+ desc->src_p = chan->desc_pool_p +
+ (i * ZYNQMP_DMA_DESC_SIZE(chan) * 2);
+ desc->dst_p = desc->src_p + ZYNQMP_DMA_DESC_SIZE(chan);
+ }
+
+ return ZYNQMP_DMA_NUM_DESCS;
+}
+
+/**
+ * zynqmp_dma_start - Start DMA channel
+ * @chan: ZynqMP DMA channel pointer
+ */
+static void zynqmp_dma_start(struct zynqmp_dma_chan *chan)
+{
+ writel(ZYNQMP_DMA_INT_EN_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IER);
+ chan->idle = false;
+ writel(ZYNQMP_DMA_ENABLE, chan->regs + ZYNQMP_DMA_CTRL2);
+}
+
+/**
+ * zynqmp_dma_handle_ovfl_int - Process the overflow interrupt
+ * @chan: ZynqMP DMA channel pointer
+ * @status: Interrupt status value
+ */
+static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status)
+{
+ u32 val;
+
+ if (status & ZYNQMP_DMA_IRQ_DST_ACCT_ERR)
+ val = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT);
+ if (status & ZYNQMP_DMA_IRQ_SRC_ACCT_ERR)
+ val = readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT);
+}
+
+static void zynqmp_dma_config(struct zynqmp_dma_chan *chan)
+{
+ u32 val;
+
+ val = readl(chan->regs + ZYNQMP_DMA_CTRL0);
+ val |= ZYNQMP_DMA_POINT_TYPE_SG;
+ writel(val, chan->regs + ZYNQMP_DMA_CTRL0);
+
+ val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR);
+ val = (val & ~ZYNQMP_DMA_ARLEN) |
+ (chan->src_burst_len << ZYNQMP_DMA_ARLEN_OFST);
+ val = (val & ~ZYNQMP_DMA_AWLEN) |
+ (chan->dst_burst_len << ZYNQMP_DMA_AWLEN_OFST);
+ writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR);
+}
+
+/**
+ * zynqmp_dma_device_config - Zynqmp dma device configuration
+ * @dchan: DMA channel
+ * @config: DMA device config
+ */
+static int zynqmp_dma_device_config(struct dma_chan *dchan,
+ struct dma_slave_config *config)
+{
+ struct zynqmp_dma_chan *chan = to_chan(dchan);
+
+ chan->src_burst_len = config->src_maxburst;
+ chan->dst_burst_len = config->dst_maxburst;
+
+ return 0;
+}
+
+/**
+ * zynqmp_dma_start_transfer - Initiate the new transfer
+ * @chan: ZynqMP DMA channel pointer
+ */
+static void zynqmp_dma_start_transfer(struct zynqmp_dma_chan *chan)
+{
+ struct zynqmp_dma_desc_sw *desc;
+
+ if (!chan->idle)
+ return;
+
+ zynqmp_dma_config(chan);
+
+ desc = list_first_entry_or_null(&chan->pending_list,
+ struct zynqmp_dma_desc_sw, node);
+ if (!desc)
+ return;
+
+ list_splice_tail_init(&chan->pending_list, &chan->active_list);
+ zynqmp_dma_update_desc_to_ctrlr(chan, desc);
+ zynqmp_dma_start(chan);
+}
+
+
+/**
+ * zynqmp_dma_chan_desc_cleanup - Cleanup the completed descriptors
+ * @chan: ZynqMP DMA channel
+ */
+static void zynqmp_dma_chan_desc_cleanup(struct zynqmp_dma_chan *chan)
+{
+ struct zynqmp_dma_desc_sw *desc, *next;
+
+ list_for_each_entry_safe(desc, next, &chan->done_list, node) {
+ dma_async_tx_callback callback;
+ void *callback_param;
+
+ list_del(&desc->node);
+
+ callback = desc->async_tx.callback;
+ callback_param = desc->async_tx.callback_param;
+ if (callback) {
+ spin_unlock(&chan->lock);
+ callback(callback_param);
+ spin_lock(&chan->lock);
+ }
+
+ /* Run any dependencies, then free the descriptor */
+ zynqmp_dma_free_descriptor(chan, desc);
+ }
+}
+
+/**
+ * zynqmp_dma_complete_descriptor - Mark the active descriptor as complete
+ * @chan: ZynqMP DMA channel pointer
+ */
+static void zynqmp_dma_complete_descriptor(struct zynqmp_dma_chan *chan)
+{
+ struct zynqmp_dma_desc_sw *desc;
+
+ desc = list_first_entry_or_null(&chan->active_list,
+ struct zynqmp_dma_desc_sw, node);
+ if (!desc)
+ return;
+ list_del(&desc->node);
+ dma_cookie_complete(&desc->async_tx);
+ list_add_tail(&desc->node, &chan->done_list);
+}
+
+/**
+ * zynqmp_dma_issue_pending - Issue pending transactions
+ * @dchan: DMA channel pointer
+ */
+static void zynqmp_dma_issue_pending(struct dma_chan *dchan)
+{
+ struct zynqmp_dma_chan *chan = to_chan(dchan);
+
+ spin_lock_bh(&chan->lock);
+ zynqmp_dma_start_transfer(chan);
+ spin_unlock_bh(&chan->lock);
+}
+
+/**
+ * zynqmp_dma_free_descriptors - Free channel descriptors
+ * @dchan: DMA channel pointer
+ */
+static void zynqmp_dma_free_descriptors(struct zynqmp_dma_chan *chan)
+{
+ zynqmp_dma_free_desc_list(chan, &chan->active_list);
+ zynqmp_dma_free_desc_list(chan, &chan->pending_list);
+ zynqmp_dma_free_desc_list(chan, &chan->done_list);
+}
+
+/**
+ * zynqmp_dma_free_chan_resources - Free channel resources
+ * @dchan: DMA channel pointer
+ */
+static void zynqmp_dma_free_chan_resources(struct dma_chan *dchan)
+{
+ struct zynqmp_dma_chan *chan = to_chan(dchan);
+
+ spin_lock_bh(&chan->lock);
+ zynqmp_dma_free_descriptors(chan);
+ spin_unlock_bh(&chan->lock);
+ dma_free_coherent(chan->dev,
+ (2 * ZYNQMP_DMA_DESC_SIZE(chan) * ZYNQMP_DMA_NUM_DESCS),
+ chan->desc_pool_v, chan->desc_pool_p);
+ kfree(chan->sw_desc_pool);
+}
+
+/**
+ * zynqmp_dma_reset - Reset the channel
+ * @chan: ZynqMP DMA channel pointer
+ */
+static void zynqmp_dma_reset(struct zynqmp_dma_chan *chan)
+{
+ writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS);
+
+ zynqmp_dma_complete_descriptor(chan);
+ zynqmp_dma_chan_desc_cleanup(chan);
+ zynqmp_dma_free_descriptors(chan);
+ zynqmp_dma_init(chan);
+}
+
+/**
+ * zynqmp_dma_irq_handler - ZynqMP DMA Interrupt handler
+ * @irq: IRQ number
+ * @data: Pointer to the ZynqMP DMA channel structure
+ *
+ * Return: IRQ_HANDLED/IRQ_NONE
+ */
+static irqreturn_t zynqmp_dma_irq_handler(int irq, void *data)
+{
+ struct zynqmp_dma_chan *chan = (struct zynqmp_dma_chan *)data;
+ u32 isr, imr, status;
+ irqreturn_t ret = IRQ_NONE;
+
+ isr = readl(chan->regs + ZYNQMP_DMA_ISR);
+ imr = readl(chan->regs + ZYNQMP_DMA_IMR);
+ status = isr & ~imr;
+
+ writel(isr, chan->regs + ZYNQMP_DMA_ISR);
+ if (status & ZYNQMP_DMA_INT_DONE) {
+ tasklet_schedule(&chan->tasklet);
+ ret = IRQ_HANDLED;
+ }
+
+ if (status & ZYNQMP_DMA_DONE)
+ chan->idle = true;
+
+ if (status & ZYNQMP_DMA_INT_ERR) {
+ chan->err = true;
+ tasklet_schedule(&chan->tasklet);
+ dev_err(chan->dev, "Channel %p has errors\n", chan);
+ ret = IRQ_HANDLED;
+ }
+
+ if (status & ZYNQMP_DMA_INT_OVRFL) {
+ zynqmp_dma_handle_ovfl_int(chan, status);
+ dev_info(chan->dev, "Channel %p overflow interrupt\n", chan);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+/**
+ * zynqmp_dma_do_tasklet - Schedule completion tasklet
+ * @data: Pointer to the ZynqMP DMA channel structure
+ */
+static void zynqmp_dma_do_tasklet(unsigned long data)
+{
+ struct zynqmp_dma_chan *chan = (struct zynqmp_dma_chan *)data;
+ u32 count;
+
+ spin_lock(&chan->lock);
+
+ if (chan->err) {
+ zynqmp_dma_reset(chan);
+ chan->err = false;
+ goto unlock;
+ }
+
+ count = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT);
+
+ while (count) {
+ zynqmp_dma_complete_descriptor(chan);
+ zynqmp_dma_chan_desc_cleanup(chan);
+ count--;
+ }
+
+ if (chan->idle)
+ zynqmp_dma_start_transfer(chan);
+
+unlock:
+ spin_unlock(&chan->lock);
+}
+
+/**
+ * zynqmp_dma_device_terminate_all - Aborts all transfers on a channel
+ * @dchan: DMA channel pointer
+ *
+ * Return: Always '0'
+ */
+static int zynqmp_dma_device_terminate_all(struct dma_chan *dchan)
+{
+ struct zynqmp_dma_chan *chan = to_chan(dchan);
+
+ spin_lock_bh(&chan->lock);
+ writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS);
+ zynqmp_dma_free_descriptors(chan);
+ spin_unlock_bh(&chan->lock);
+
+ return 0;
+}
+
+/**
+ * zynqmp_dma_prep_memcpy - prepare descriptors for memcpy transaction
+ * @dchan: DMA channel
+ * @dma_dst: Destination buffer address
+ * @dma_src: Source buffer address
+ * @len: Transfer length
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *zynqmp_dma_prep_memcpy(
+ struct dma_chan *dchan, dma_addr_t dma_dst,
+ dma_addr_t dma_src, size_t len, ulong flags)
+{
+ struct zynqmp_dma_chan *chan;
+ struct zynqmp_dma_desc_sw *new, *first = NULL;
+ void *desc = NULL, *prev = NULL;
+ size_t copy;
+ u32 desc_cnt;
+
+ chan = to_chan(dchan);
+
+ if (len > ZYNQMP_DMA_MAX_TRANS_LEN)
+ return NULL;
+
+ desc_cnt = DIV_ROUND_UP(len, ZYNQMP_DMA_MAX_TRANS_LEN);
+
+ spin_lock_bh(&chan->lock);
+ if (desc_cnt > chan->desc_free_cnt) {
+ spin_unlock_bh(&chan->lock);
+ dev_dbg(chan->dev, "chan %p descs are not available\n", chan);
+ return NULL;
+ }
+ chan->desc_free_cnt = chan->desc_free_cnt - desc_cnt;
+ spin_unlock_bh(&chan->lock);
+
+ do {
+ /* Allocate and populate the descriptor */
+ new = zynqmp_dma_get_descriptor(chan);
+
+ copy = min_t(size_t, len, ZYNQMP_DMA_MAX_TRANS_LEN);
+ desc = (struct zynqmp_dma_desc_ll *)new->src_v;
+ zynqmp_dma_config_sg_ll_desc(chan, desc, dma_src,
+ dma_dst, copy, prev);
+ prev = desc;
+ len -= copy;
+ dma_src += copy;
+ dma_dst += copy;
+ if (!first)
+ first = new;
+ else
+ list_add_tail(&new->node, &first->tx_list);
+ } while (len);
+
+ zynqmp_dma_desc_config_eod(chan, desc);
+ async_tx_ack(&first->async_tx);
+ first->async_tx.flags = flags;
+ return &first->async_tx;
+}
+
+/**
+ * zynqmp_dma_prep_slave_sg - prepare descriptors for a memory sg transaction
+ * @dchan: DMA channel
+ * @dst_sg: Destination scatter list
+ * @dst_sg_len: Number of entries in destination scatter list
+ * @src_sg: Source scatter list
+ * @src_sg_len: Number of entries in source scatter list
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *zynqmp_dma_prep_sg(
+ struct dma_chan *dchan, struct scatterlist *dst_sg,
+ unsigned int dst_sg_len, struct scatterlist *src_sg,
+ unsigned int src_sg_len, unsigned long flags)
+{
+ struct zynqmp_dma_desc_sw *new, *first = NULL;
+ struct zynqmp_dma_chan *chan = to_chan(dchan);
+ void *desc = NULL, *prev = NULL;
+ size_t len, dst_avail, src_avail;
+ dma_addr_t dma_dst, dma_src;
+ u32 desc_cnt = 0, i;
+ struct scatterlist *sg;
+
+ for_each_sg(src_sg, sg, src_sg_len, i)
+ desc_cnt += DIV_ROUND_UP(sg_dma_len(sg),
+ ZYNQMP_DMA_MAX_TRANS_LEN);
+
+ spin_lock_bh(&chan->lock);
+ if (desc_cnt > chan->desc_free_cnt) {
+ spin_unlock_bh(&chan->lock);
+ dev_dbg(chan->dev, "chan %p descs are not available\n", chan);
+ return NULL;
+ }
+ chan->desc_free_cnt = chan->desc_free_cnt - desc_cnt;
+ spin_unlock_bh(&chan->lock);
+
+ dst_avail = sg_dma_len(dst_sg);
+ src_avail = sg_dma_len(src_sg);
+
+ /* Run until we are out of scatterlist entries */
+ while (true) {
+ /* Allocate and populate the descriptor */
+ new = zynqmp_dma_get_descriptor(chan);
+ desc = (struct zynqmp_dma_desc_ll *)new->src_v;
+ len = min_t(size_t, src_avail, dst_avail);
+ len = min_t(size_t, len, ZYNQMP_DMA_MAX_TRANS_LEN);
+ if (len == 0)
+ goto fetch;
+ dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) -
+ dst_avail;
+ dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) -
+ src_avail;
+
+ zynqmp_dma_config_sg_ll_desc(chan, desc, dma_src, dma_dst,
+ len, prev);
+ prev = desc;
+ dst_avail -= len;
+ src_avail -= len;
+
+ if (!first)
+ first = new;
+ else
+ list_add_tail(&new->node, &first->tx_list);
+fetch:
+ /* Fetch the next dst scatterlist entry */
+ if (dst_avail == 0) {
+ if (dst_sg_len == 0)
+ break;
+ dst_sg = sg_next(dst_sg);
+ if (dst_sg == NULL)
+ break;
+ dst_sg_len--;
+ dst_avail = sg_dma_len(dst_sg);
+ }
+ /* Fetch the next src scatterlist entry */
+ if (src_avail == 0) {
+ if (src_sg_len == 0)
+ break;
+ src_sg = sg_next(src_sg);
+ if (src_sg == NULL)
+ break;
+ src_sg_len--;
+ src_avail = sg_dma_len(src_sg);
+ }
+ }
+
+ zynqmp_dma_desc_config_eod(chan, desc);
+ first->async_tx.flags = flags;
+ return &first->async_tx;
+}
+
+/**
+ * zynqmp_dma_chan_remove - Channel remove function
+ * @chan: ZynqMP DMA channel pointer
+ */
+static void zynqmp_dma_chan_remove(struct zynqmp_dma_chan *chan)
+{
+ if (!chan)
+ return;
+
+ devm_free_irq(chan->zdev->dev, chan->irq, chan);
+ tasklet_kill(&chan->tasklet);
+ list_del(&chan->common.device_node);
+ clk_disable_unprepare(chan->clk_apb);
+ clk_disable_unprepare(chan->clk_main);
+}
+
+/**
+ * zynqmp_dma_chan_probe - Per Channel Probing
+ * @zdev: Driver specific device structure
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev,
+ struct platform_device *pdev)
+{
+ struct zynqmp_dma_chan *chan;
+ struct resource *res;
+ struct device_node *node = pdev->dev.of_node;
+ int err;
+
+ chan = devm_kzalloc(zdev->dev, sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+ chan->dev = zdev->dev;
+ chan->zdev = zdev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ chan->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(chan->regs))
+ return PTR_ERR(chan->regs);
+
+ chan->bus_width = ZYNQMP_DMA_BUS_WIDTH_64;
+ chan->dst_burst_len = ZYNQMP_DMA_AWLEN_RST_VAL;
+ chan->src_burst_len = ZYNQMP_DMA_ARLEN_RST_VAL;
+ err = of_property_read_u32(node, "xlnx,bus-width", &chan->bus_width);
+ if (err < 0) {
+ dev_err(&pdev->dev, "missing xlnx,bus-width property\n");
+ return err;
+ }
+
+ if (chan->bus_width != ZYNQMP_DMA_BUS_WIDTH_64 &&
+ chan->bus_width != ZYNQMP_DMA_BUS_WIDTH_128) {
+ dev_err(zdev->dev, "invalid bus-width value");
+ return -EINVAL;
+ }
+
+ chan->is_dmacoherent = of_property_read_bool(node, "dma-coherent");
+ zdev->chan = chan;
+ tasklet_init(&chan->tasklet, zynqmp_dma_do_tasklet, (ulong)chan);
+ spin_lock_init(&chan->lock);
+ INIT_LIST_HEAD(&chan->active_list);
+ INIT_LIST_HEAD(&chan->pending_list);
+ INIT_LIST_HEAD(&chan->done_list);
+ INIT_LIST_HEAD(&chan->free_list);
+
+ dma_cookie_init(&chan->common);
+ chan->common.device = &zdev->common;
+ list_add_tail(&chan->common.device_node, &zdev->common.channels);
+
+ zynqmp_dma_init(chan);
+ chan->irq = platform_get_irq(pdev, 0);
+ if (chan->irq < 0)
+ return -ENXIO;
+ err = devm_request_irq(&pdev->dev, chan->irq, zynqmp_dma_irq_handler, 0,
+ "zynqmp-dma", chan);
+ if (err)
+ return err;
+ chan->clk_main = devm_clk_get(&pdev->dev, "clk_main");
+ if (IS_ERR(chan->clk_main)) {
+ dev_err(&pdev->dev, "main clock not found.\n");
+ return PTR_ERR(chan->clk_main);
+ }
+
+ chan->clk_apb = devm_clk_get(&pdev->dev, "clk_apb");
+ if (IS_ERR(chan->clk_apb)) {
+ dev_err(&pdev->dev, "apb clock not found.\n");
+ return PTR_ERR(chan->clk_apb);
+ }
+
+ err = clk_prepare_enable(chan->clk_main);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to enable main clock.\n");
+ return err;
+ }
+
+ err = clk_prepare_enable(chan->clk_apb);
+ if (err) {
+ clk_disable_unprepare(chan->clk_main);
+ dev_err(&pdev->dev, "Unable to enable apb clock.\n");
+ return err;
+ }
+
+ chan->desc_size = sizeof(struct zynqmp_dma_desc_ll);
+ chan->idle = true;
+ return 0;
+}
+
+/**
+ * of_zynqmp_dma_xlate - Translation function
+ * @dma_spec: Pointer to DMA specifier as found in the device tree
+ * @ofdma: Pointer to DMA controller data
+ *
+ * Return: DMA channel pointer on success and NULL on error
+ */
+static struct dma_chan *of_zynqmp_dma_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct zynqmp_dma_device *zdev = ofdma->of_dma_data;
+
+ return dma_get_slave_channel(&zdev->chan->common);
+}
+
+/**
+ * zynqmp_dma_probe - Driver probe function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: '0' on success and failure value on error
+ */
+static int zynqmp_dma_probe(struct platform_device *pdev)
+{
+ struct zynqmp_dma_device *zdev;
+ struct dma_device *p;
+ int ret;
+
+ zdev = devm_kzalloc(&pdev->dev, sizeof(*zdev), GFP_KERNEL);
+ if (!zdev)
+ return -ENOMEM;
+
+ zdev->dev = &pdev->dev;
+ INIT_LIST_HEAD(&zdev->common.channels);
+
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
+ dma_cap_set(DMA_SG, zdev->common.cap_mask);
+ dma_cap_set(DMA_MEMCPY, zdev->common.cap_mask);
+
+ p = &zdev->common;
+ p->device_prep_dma_sg = zynqmp_dma_prep_sg;
+ p->device_prep_dma_memcpy = zynqmp_dma_prep_memcpy;
+ p->device_terminate_all = zynqmp_dma_device_terminate_all;
+ p->device_issue_pending = zynqmp_dma_issue_pending;
+ p->device_alloc_chan_resources = zynqmp_dma_alloc_chan_resources;
+ p->device_free_chan_resources = zynqmp_dma_free_chan_resources;
+ p->device_tx_status = dma_cookie_status;
+ p->device_config = zynqmp_dma_device_config;
+ p->dev = &pdev->dev;
+
+ platform_set_drvdata(pdev, zdev);
+
+ ret = zynqmp_dma_chan_probe(zdev, pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Probing channel failed\n");
+ goto free_chan_resources;
+ }
+
+ p->dst_addr_widths = BIT(zdev->chan->bus_width / 8);
+ p->src_addr_widths = BIT(zdev->chan->bus_width / 8);
+
+ dma_async_device_register(&zdev->common);
+
+ ret = of_dma_controller_register(pdev->dev.of_node,
+ of_zynqmp_dma_xlate, zdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to register DMA to DT\n");
+ dma_async_device_unregister(&zdev->common);
+ goto free_chan_resources;
+ }
+
+ dev_info(&pdev->dev, "ZynqMP DMA driver Probe success\n");
+
+ return 0;
+
+free_chan_resources:
+ zynqmp_dma_chan_remove(zdev->chan);
+ return ret;
+}
+
+/**
+ * zynqmp_dma_remove - Driver remove function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * Return: Always '0'
+ */
+static int zynqmp_dma_remove(struct platform_device *pdev)
+{
+ struct zynqmp_dma_device *zdev = platform_get_drvdata(pdev);
+
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&zdev->common);
+
+ zynqmp_dma_chan_remove(zdev->chan);
+
+ return 0;
+}
+
+static const struct of_device_id zynqmp_dma_of_match[] = {
+ { .compatible = "xlnx,zynqmp-dma-1.0", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, zynqmp_dma_of_match);
+
+static struct platform_driver zynqmp_dma_driver = {
+ .driver = {
+ .name = "xilinx-zynqmp-dma",
+ .of_match_table = zynqmp_dma_of_match,
+ },
+ .probe = zynqmp_dma_probe,
+ .remove = zynqmp_dma_remove,
+};
+
+module_platform_driver(zynqmp_dma_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Xilinx ZynqMP DMA driver");
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 6ca7474ba..dff1a4a6d 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -251,6 +251,14 @@ config EDAC_SBRIDGE
Support for error detection and correction the Intel
Sandy Bridge, Ivy Bridge and Haswell Integrated Memory Controllers.
+config EDAC_SKX
+ tristate "Intel Skylake server Integrated MC"
+ depends on EDAC_MM_EDAC && PCI && X86_64 && X86_MCE_INTEL
+ depends on PCI_MMCONFIG
+ help
+ Support for error detection and correction the Intel
+ Skylake server Integrated Memory Controllers.
+
config EDAC_MPC85XX
tristate "Freescale MPC83xx / MPC85xx"
depends on EDAC_MM_EDAC && FSL_SOC
@@ -391,6 +399,13 @@ config EDAC_ALTERA_OCRAM
Support for error detection and correction on the
Altera On-Chip RAM Memory for Altera SoCs.
+config EDAC_ALTERA_ETHERNET
+ bool "Altera Ethernet FIFO ECC"
+ depends on EDAC_ALTERA=y
+ help
+ Support for error detection and correction on the
+ Altera Ethernet FIFO Memory for Altera SoCs.
+
config EDAC_SYNOPSYS
tristate "Synopsys DDR Memory Controller"
depends on EDAC_MM_EDAC && ARCH_ZYNQ
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index f9e4a3e0e..986049925 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_EDAC_I5400) += i5400_edac.o
obj-$(CONFIG_EDAC_I7300) += i7300_edac.o
obj-$(CONFIG_EDAC_I7CORE) += i7core_edac.o
obj-$(CONFIG_EDAC_SBRIDGE) += sb_edac.o
+obj-$(CONFIG_EDAC_SKX) += skx_edac.o
obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o
obj-$(CONFIG_EDAC_E752X) += e752x_edac.o
obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 5b4d223d6..2398d0701 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -19,12 +19,15 @@
#include <asm/cacheflush.h>
#include <linux/ctype.h>
+#include <linux/delay.h>
#include <linux/edac.h>
#include <linux/genalloc.h>
#include <linux/interrupt.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -548,10 +551,10 @@ module_platform_driver(altr_edac_driver);
* trigger testing are different for each memory.
*/
-const struct edac_device_prv_data ocramecc_data;
-const struct edac_device_prv_data l2ecc_data;
-const struct edac_device_prv_data a10_ocramecc_data;
-const struct edac_device_prv_data a10_l2ecc_data;
+static const struct edac_device_prv_data ocramecc_data;
+static const struct edac_device_prv_data l2ecc_data;
+static const struct edac_device_prv_data a10_ocramecc_data;
+static const struct edac_device_prv_data a10_l2ecc_data;
static irqreturn_t altr_edac_device_handler(int irq, void *dev_id)
{
@@ -686,11 +689,9 @@ static void altr_create_edacdev_dbgfs(struct edac_device_ctl_info *edac_dci,
static const struct of_device_id altr_edac_device_of_match[] = {
#ifdef CONFIG_EDAC_ALTERA_L2C
{ .compatible = "altr,socfpga-l2-ecc", .data = &l2ecc_data },
- { .compatible = "altr,socfpga-a10-l2-ecc", .data = &a10_l2ecc_data },
#endif
#ifdef CONFIG_EDAC_ALTERA_OCRAM
{ .compatible = "altr,socfpga-ocram-ecc", .data = &ocramecc_data },
- { .compatible = "altr,socfpga-a10-ocram-ecc", .data = &a10_ocramecc_data },
#endif
{},
};
@@ -825,16 +826,16 @@ static struct platform_driver altr_edac_device_driver = {
};
module_platform_driver(altr_edac_device_driver);
-/*********************** OCRAM EDAC Device Functions *********************/
+/******************* Arria10 Device ECC Shared Functions *****************/
-#ifdef CONFIG_EDAC_ALTERA_OCRAM
/*
* Test for memory's ECC dependencies upon entry because platform specific
* startup should have initialized the memory and enabled the ECC.
* Can't turn on ECC here because accessing un-initialized memory will
* cause CE/UE errors possibly causing an ABORT.
*/
-static int altr_check_ecc_deps(struct altr_edac_device_dev *device)
+static int __maybe_unused
+altr_check_ecc_deps(struct altr_edac_device_dev *device)
{
void __iomem *base = device->base;
const struct edac_device_prv_data *prv = device->data;
@@ -848,6 +849,227 @@ static int altr_check_ecc_deps(struct altr_edac_device_dev *device)
return -ENODEV;
}
+static irqreturn_t __maybe_unused altr_edac_a10_ecc_irq(int irq, void *dev_id)
+{
+ struct altr_edac_device_dev *dci = dev_id;
+ void __iomem *base = dci->base;
+
+ if (irq == dci->sb_irq) {
+ writel(ALTR_A10_ECC_SERRPENA,
+ base + ALTR_A10_ECC_INTSTAT_OFST);
+ edac_device_handle_ce(dci->edac_dev, 0, 0, dci->edac_dev_name);
+
+ return IRQ_HANDLED;
+ } else if (irq == dci->db_irq) {
+ writel(ALTR_A10_ECC_DERRPENA,
+ base + ALTR_A10_ECC_INTSTAT_OFST);
+ edac_device_handle_ue(dci->edac_dev, 0, 0, dci->edac_dev_name);
+ if (dci->data->panic)
+ panic("\nEDAC:ECC_DEVICE[Uncorrectable errors]\n");
+
+ return IRQ_HANDLED;
+ }
+
+ WARN_ON(1);
+
+ return IRQ_NONE;
+}
+
+/******************* Arria10 Memory Buffer Functions *********************/
+
+static inline int a10_get_irq_mask(struct device_node *np)
+{
+ int irq;
+ const u32 *handle = of_get_property(np, "interrupts", NULL);
+
+ if (!handle)
+ return -ENODEV;
+ irq = be32_to_cpup(handle);
+ return irq;
+}
+
+static inline void ecc_set_bits(u32 bit_mask, void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr);
+
+ value |= bit_mask;
+ writel(value, ioaddr);
+}
+
+static inline void ecc_clear_bits(u32 bit_mask, void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr);
+
+ value &= ~bit_mask;
+ writel(value, ioaddr);
+}
+
+static inline int ecc_test_bits(u32 bit_mask, void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr);
+
+ return (value & bit_mask) ? 1 : 0;
+}
+
+/*
+ * This function uses the memory initialization block in the Arria10 ECC
+ * controller to initialize/clear the entire memory data and ECC data.
+ */
+static int __maybe_unused altr_init_memory_port(void __iomem *ioaddr, int port)
+{
+ int limit = ALTR_A10_ECC_INIT_WATCHDOG_10US;
+ u32 init_mask, stat_mask, clear_mask;
+ int ret = 0;
+
+ if (port) {
+ init_mask = ALTR_A10_ECC_INITB;
+ stat_mask = ALTR_A10_ECC_INITCOMPLETEB;
+ clear_mask = ALTR_A10_ECC_ERRPENB_MASK;
+ } else {
+ init_mask = ALTR_A10_ECC_INITA;
+ stat_mask = ALTR_A10_ECC_INITCOMPLETEA;
+ clear_mask = ALTR_A10_ECC_ERRPENA_MASK;
+ }
+
+ ecc_set_bits(init_mask, (ioaddr + ALTR_A10_ECC_CTRL_OFST));
+ while (limit--) {
+ if (ecc_test_bits(stat_mask,
+ (ioaddr + ALTR_A10_ECC_INITSTAT_OFST)))
+ break;
+ udelay(1);
+ }
+ if (limit < 0)
+ ret = -EBUSY;
+
+ /* Clear any pending ECC interrupts */
+ writel(clear_mask, (ioaddr + ALTR_A10_ECC_INTSTAT_OFST));
+
+ return ret;
+}
+
+static __init int __maybe_unused
+altr_init_a10_ecc_block(struct device_node *np, u32 irq_mask,
+ u32 ecc_ctrl_en_mask, bool dual_port)
+{
+ int ret = 0;
+ void __iomem *ecc_block_base;
+ struct regmap *ecc_mgr_map;
+ char *ecc_name;
+ struct device_node *np_eccmgr;
+
+ ecc_name = (char *)np->name;
+
+ /* Get the ECC Manager - parent of the device EDACs */
+ np_eccmgr = of_get_parent(np);
+ ecc_mgr_map = syscon_regmap_lookup_by_phandle(np_eccmgr,
+ "altr,sysmgr-syscon");
+ of_node_put(np_eccmgr);
+ if (IS_ERR(ecc_mgr_map)) {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "Unable to get syscon altr,sysmgr-syscon\n");
+ return -ENODEV;
+ }
+
+ /* Map the ECC Block */
+ ecc_block_base = of_iomap(np, 0);
+ if (!ecc_block_base) {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "Unable to map %s ECC block\n", ecc_name);
+ return -ENODEV;
+ }
+
+ /* Disable ECC */
+ regmap_write(ecc_mgr_map, A10_SYSMGR_ECC_INTMASK_SET_OFST, irq_mask);
+ writel(ALTR_A10_ECC_SERRINTEN,
+ (ecc_block_base + ALTR_A10_ECC_ERRINTENR_OFST));
+ ecc_clear_bits(ecc_ctrl_en_mask,
+ (ecc_block_base + ALTR_A10_ECC_CTRL_OFST));
+ /* Ensure all writes complete */
+ wmb();
+ /* Use HW initialization block to initialize memory for ECC */
+ ret = altr_init_memory_port(ecc_block_base, 0);
+ if (ret) {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "ECC: cannot init %s PORTA memory\n", ecc_name);
+ goto out;
+ }
+
+ if (dual_port) {
+ ret = altr_init_memory_port(ecc_block_base, 1);
+ if (ret) {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "ECC: cannot init %s PORTB memory\n",
+ ecc_name);
+ goto out;
+ }
+ }
+
+ /* Interrupt mode set to every SBERR */
+ regmap_write(ecc_mgr_map, ALTR_A10_ECC_INTMODE_OFST,
+ ALTR_A10_ECC_INTMODE);
+ /* Enable ECC */
+ ecc_set_bits(ecc_ctrl_en_mask, (ecc_block_base +
+ ALTR_A10_ECC_CTRL_OFST));
+ writel(ALTR_A10_ECC_SERRINTEN,
+ (ecc_block_base + ALTR_A10_ECC_ERRINTENS_OFST));
+ regmap_write(ecc_mgr_map, A10_SYSMGR_ECC_INTMASK_CLR_OFST, irq_mask);
+ /* Ensure all writes complete */
+ wmb();
+out:
+ iounmap(ecc_block_base);
+ return ret;
+}
+
+static int validate_parent_available(struct device_node *np);
+static const struct of_device_id altr_edac_a10_device_of_match[];
+static int __init __maybe_unused altr_init_a10_ecc_device_type(char *compat)
+{
+ int irq;
+ struct device_node *child, *np = of_find_compatible_node(NULL, NULL,
+ "altr,socfpga-a10-ecc-manager");
+ if (!np) {
+ edac_printk(KERN_ERR, EDAC_DEVICE, "ECC Manager not found\n");
+ return -ENODEV;
+ }
+
+ for_each_child_of_node(np, child) {
+ const struct of_device_id *pdev_id;
+ const struct edac_device_prv_data *prv;
+
+ if (!of_device_is_available(child))
+ continue;
+ if (!of_device_is_compatible(child, compat))
+ continue;
+
+ if (validate_parent_available(child))
+ continue;
+
+ irq = a10_get_irq_mask(child);
+ if (irq < 0)
+ continue;
+
+ /* Get matching node and check for valid result */
+ pdev_id = of_match_node(altr_edac_a10_device_of_match, child);
+ if (IS_ERR_OR_NULL(pdev_id))
+ continue;
+
+ /* Validate private data pointer before dereferencing */
+ prv = pdev_id->data;
+ if (!prv)
+ continue;
+
+ altr_init_a10_ecc_block(child, BIT(irq),
+ prv->ecc_enable_mask, 0);
+ }
+
+ of_node_put(np);
+ return 0;
+}
+
+/*********************** OCRAM EDAC Device Functions *********************/
+
+#ifdef CONFIG_EDAC_ALTERA_OCRAM
+
static void *ocram_alloc_mem(size_t size, void **other)
{
struct device_node *np;
@@ -882,25 +1104,7 @@ static void ocram_free_mem(void *p, size_t size, void *other)
gen_pool_free((struct gen_pool *)other, (u32)p, size);
}
-static irqreturn_t altr_edac_a10_ecc_irq(struct altr_edac_device_dev *dci,
- bool sberr)
-{
- void __iomem *base = dci->base;
-
- if (sberr) {
- writel(ALTR_A10_ECC_SERRPENA,
- base + ALTR_A10_ECC_INTSTAT_OFST);
- edac_device_handle_ce(dci->edac_dev, 0, 0, dci->edac_dev_name);
- } else {
- writel(ALTR_A10_ECC_DERRPENA,
- base + ALTR_A10_ECC_INTSTAT_OFST);
- edac_device_handle_ue(dci->edac_dev, 0, 0, dci->edac_dev_name);
- panic("\nEDAC:ECC_DEVICE[Uncorrectable errors]\n");
- }
- return IRQ_HANDLED;
-}
-
-const struct edac_device_prv_data ocramecc_data = {
+static const struct edac_device_prv_data ocramecc_data = {
.setup = altr_check_ecc_deps,
.ce_clear_mask = (ALTR_OCR_ECC_EN | ALTR_OCR_ECC_SERR),
.ue_clear_mask = (ALTR_OCR_ECC_EN | ALTR_OCR_ECC_DERR),
@@ -916,7 +1120,7 @@ const struct edac_device_prv_data ocramecc_data = {
.inject_fops = &altr_edac_device_inject_fops,
};
-const struct edac_device_prv_data a10_ocramecc_data = {
+static const struct edac_device_prv_data a10_ocramecc_data = {
.setup = altr_check_ecc_deps,
.ce_clear_mask = ALTR_A10_ECC_SERRPENA,
.ue_clear_mask = ALTR_A10_ECC_DERRPENA,
@@ -929,6 +1133,12 @@ const struct edac_device_prv_data a10_ocramecc_data = {
.set_err_ofst = ALTR_A10_ECC_INTTEST_OFST,
.ecc_irq_handler = altr_edac_a10_ecc_irq,
.inject_fops = &altr_edac_a10_device_inject_fops,
+ /*
+ * OCRAM panic on uncorrectable error because sleep/resume
+ * functions and FPGA contents are stored in OCRAM. Prefer
+ * a kernel panic over executing/loading corrupted data.
+ */
+ .panic = true,
};
#endif /* CONFIG_EDAC_ALTERA_OCRAM */
@@ -988,25 +1198,33 @@ static int altr_l2_check_deps(struct altr_edac_device_dev *device)
return -ENODEV;
}
-static irqreturn_t altr_edac_a10_l2_irq(struct altr_edac_device_dev *dci,
- bool sberr)
+static irqreturn_t altr_edac_a10_l2_irq(int irq, void *dev_id)
{
- if (sberr) {
+ struct altr_edac_device_dev *dci = dev_id;
+
+ if (irq == dci->sb_irq) {
regmap_write(dci->edac->ecc_mgr_map,
A10_SYSGMR_MPU_CLEAR_L2_ECC_OFST,
A10_SYSGMR_MPU_CLEAR_L2_ECC_SB);
edac_device_handle_ce(dci->edac_dev, 0, 0, dci->edac_dev_name);
- } else {
+
+ return IRQ_HANDLED;
+ } else if (irq == dci->db_irq) {
regmap_write(dci->edac->ecc_mgr_map,
A10_SYSGMR_MPU_CLEAR_L2_ECC_OFST,
A10_SYSGMR_MPU_CLEAR_L2_ECC_MB);
edac_device_handle_ue(dci->edac_dev, 0, 0, dci->edac_dev_name);
panic("\nEDAC:ECC_DEVICE[Uncorrectable errors]\n");
+
+ return IRQ_HANDLED;
}
- return IRQ_HANDLED;
+
+ WARN_ON(1);
+
+ return IRQ_NONE;
}
-const struct edac_device_prv_data l2ecc_data = {
+static const struct edac_device_prv_data l2ecc_data = {
.setup = altr_l2_check_deps,
.ce_clear_mask = 0,
.ue_clear_mask = 0,
@@ -1021,7 +1239,7 @@ const struct edac_device_prv_data l2ecc_data = {
.inject_fops = &altr_edac_device_inject_fops,
};
-const struct edac_device_prv_data a10_l2ecc_data = {
+static const struct edac_device_prv_data a10_l2ecc_data = {
.setup = altr_l2_check_deps,
.ce_clear_mask = ALTR_A10_L2_ECC_SERR_CLR,
.ue_clear_mask = ALTR_A10_L2_ECC_MERR_CLR,
@@ -1040,7 +1258,49 @@ const struct edac_device_prv_data a10_l2ecc_data = {
#endif /* CONFIG_EDAC_ALTERA_L2C */
+/********************* Ethernet Device Functions ********************/
+
+#ifdef CONFIG_EDAC_ALTERA_ETHERNET
+
+static const struct edac_device_prv_data a10_enetecc_data = {
+ .setup = altr_check_ecc_deps,
+ .ce_clear_mask = ALTR_A10_ECC_SERRPENA,
+ .ue_clear_mask = ALTR_A10_ECC_DERRPENA,
+ .dbgfs_name = "altr_trigger",
+ .ecc_enable_mask = ALTR_A10_COMMON_ECC_EN_CTL,
+ .ecc_en_ofst = ALTR_A10_ECC_CTRL_OFST,
+ .ce_set_mask = ALTR_A10_ECC_TSERRA,
+ .ue_set_mask = ALTR_A10_ECC_TDERRA,
+ .set_err_ofst = ALTR_A10_ECC_INTTEST_OFST,
+ .ecc_irq_handler = altr_edac_a10_ecc_irq,
+ .inject_fops = &altr_edac_a10_device_inject_fops,
+};
+
+static int __init socfpga_init_ethernet_ecc(void)
+{
+ return altr_init_a10_ecc_device_type("altr,socfpga-eth-mac-ecc");
+}
+
+early_initcall(socfpga_init_ethernet_ecc);
+
+#endif /* CONFIG_EDAC_ALTERA_ETHERNET */
+
/********************* Arria10 EDAC Device Functions *************************/
+static const struct of_device_id altr_edac_a10_device_of_match[] = {
+#ifdef CONFIG_EDAC_ALTERA_L2C
+ { .compatible = "altr,socfpga-a10-l2-ecc", .data = &a10_l2ecc_data },
+#endif
+#ifdef CONFIG_EDAC_ALTERA_OCRAM
+ { .compatible = "altr,socfpga-a10-ocram-ecc",
+ .data = &a10_ocramecc_data },
+#endif
+#ifdef CONFIG_EDAC_ALTERA_ETHERNET
+ { .compatible = "altr,socfpga-eth-mac-ecc",
+ .data = &a10_enetecc_data },
+#endif
+ {},
+};
+MODULE_DEVICE_TABLE(of, altr_edac_a10_device_of_match);
/*
* The Arria10 EDAC Device Functions differ from the Cyclone5/Arria5
@@ -1075,28 +1335,42 @@ static ssize_t altr_edac_a10_device_trig(struct file *file,
return count;
}
-static irqreturn_t altr_edac_a10_irq_handler(int irq, void *dev_id)
+static void altr_edac_a10_irq_handler(struct irq_desc *desc)
{
- irqreturn_t rc = IRQ_NONE;
- struct altr_arria10_edac *edac = dev_id;
- struct altr_edac_device_dev *dci;
- int irq_status;
- bool sberr = (irq == edac->sb_irq) ? 1 : 0;
- int sm_offset = sberr ? A10_SYSMGR_ECC_INTSTAT_SERR_OFST :
- A10_SYSMGR_ECC_INTSTAT_DERR_OFST;
+ int dberr, bit, sm_offset, irq_status;
+ struct altr_arria10_edac *edac = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ int irq = irq_desc_get_irq(desc);
+
+ dberr = (irq == edac->db_irq) ? 1 : 0;
+ sm_offset = dberr ? A10_SYSMGR_ECC_INTSTAT_DERR_OFST :
+ A10_SYSMGR_ECC_INTSTAT_SERR_OFST;
+
+ chained_irq_enter(chip, desc);
regmap_read(edac->ecc_mgr_map, sm_offset, &irq_status);
- if ((irq != edac->sb_irq) && (irq != edac->db_irq)) {
- WARN_ON(1);
- } else {
- list_for_each_entry(dci, &edac->a10_ecc_devices, next) {
- if (irq_status & dci->data->irq_status_mask)
- rc = dci->data->ecc_irq_handler(dci, sberr);
- }
+ for_each_set_bit(bit, (unsigned long *)&irq_status, 32) {
+ irq = irq_linear_revmap(edac->domain, dberr * 32 + bit);
+ if (irq)
+ generic_handle_irq(irq);
}
- return rc;
+ chained_irq_exit(chip, desc);
+}
+
+static int validate_parent_available(struct device_node *np)
+{
+ struct device_node *parent;
+ int ret = 0;
+
+ /* Ensure parent device is enabled if parent node exists */
+ parent = of_parse_phandle(np, "altr,ecc-parent", 0);
+ if (parent && !of_device_is_available(parent))
+ ret = -ENODEV;
+
+ of_node_put(parent);
+ return ret;
}
static int altr_edac_a10_device_add(struct altr_arria10_edac *edac,
@@ -1111,7 +1385,7 @@ static int altr_edac_a10_device_add(struct altr_arria10_edac *edac,
const struct edac_device_prv_data *prv;
/* Get matching node and check for valid result */
const struct of_device_id *pdev_id =
- of_match_node(altr_edac_device_of_match, np);
+ of_match_node(altr_edac_a10_device_of_match, np);
if (IS_ERR_OR_NULL(pdev_id))
return -ENODEV;
@@ -1120,6 +1394,9 @@ static int altr_edac_a10_device_add(struct altr_arria10_edac *edac,
if (IS_ERR_OR_NULL(prv))
return -ENODEV;
+ if (validate_parent_available(np))
+ return -ENODEV;
+
if (!devres_open_group(edac->dev, altr_edac_a10_device_add, GFP_KERNEL))
return -ENOMEM;
@@ -1168,6 +1445,34 @@ static int altr_edac_a10_device_add(struct altr_arria10_edac *edac,
goto err_release_group1;
}
+ altdev->sb_irq = irq_of_parse_and_map(np, 0);
+ if (!altdev->sb_irq) {
+ edac_printk(KERN_ERR, EDAC_DEVICE, "Error allocating SBIRQ\n");
+ rc = -ENODEV;
+ goto err_release_group1;
+ }
+ rc = devm_request_irq(edac->dev, altdev->sb_irq,
+ prv->ecc_irq_handler,
+ IRQF_SHARED, ecc_name, altdev);
+ if (rc) {
+ edac_printk(KERN_ERR, EDAC_DEVICE, "No DBERR IRQ resource\n");
+ goto err_release_group1;
+ }
+
+ altdev->db_irq = irq_of_parse_and_map(np, 1);
+ if (!altdev->db_irq) {
+ edac_printk(KERN_ERR, EDAC_DEVICE, "Error allocating DBIRQ\n");
+ rc = -ENODEV;
+ goto err_release_group1;
+ }
+ rc = devm_request_irq(edac->dev, altdev->db_irq,
+ prv->ecc_irq_handler,
+ IRQF_SHARED, ecc_name, altdev);
+ if (rc) {
+ edac_printk(KERN_ERR, EDAC_DEVICE, "No DBERR IRQ resource\n");
+ goto err_release_group1;
+ }
+
rc = edac_device_add_device(dci);
if (rc) {
dev_err(edac->dev, "edac_device_add_device failed\n");
@@ -1186,7 +1491,6 @@ static int altr_edac_a10_device_add(struct altr_arria10_edac *edac,
err_release_group1:
edac_device_free_ctl_info(dci);
err_release_group:
- edac_printk(KERN_ALERT, EDAC_DEVICE, "%s: %d\n", __func__, __LINE__);
devres_release_group(edac->dev, NULL);
edac_printk(KERN_ERR, EDAC_DEVICE,
"%s:Error setting up EDAC device: %d\n", ecc_name, rc);
@@ -1194,11 +1498,43 @@ err_release_group:
return rc;
}
+static void a10_eccmgr_irq_mask(struct irq_data *d)
+{
+ struct altr_arria10_edac *edac = irq_data_get_irq_chip_data(d);
+
+ regmap_write(edac->ecc_mgr_map, A10_SYSMGR_ECC_INTMASK_SET_OFST,
+ BIT(d->hwirq));
+}
+
+static void a10_eccmgr_irq_unmask(struct irq_data *d)
+{
+ struct altr_arria10_edac *edac = irq_data_get_irq_chip_data(d);
+
+ regmap_write(edac->ecc_mgr_map, A10_SYSMGR_ECC_INTMASK_CLR_OFST,
+ BIT(d->hwirq));
+}
+
+static int a10_eccmgr_irqdomain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct altr_arria10_edac *edac = d->host_data;
+
+ irq_set_chip_and_handler(irq, &edac->irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, edac);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+struct irq_domain_ops a10_eccmgr_ic_ops = {
+ .map = a10_eccmgr_irqdomain_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
static int altr_edac_a10_probe(struct platform_device *pdev)
{
struct altr_arria10_edac *edac;
struct device_node *child;
- int rc;
edac = devm_kzalloc(&pdev->dev, sizeof(*edac), GFP_KERNEL);
if (!edac)
@@ -1216,32 +1552,50 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
return PTR_ERR(edac->ecc_mgr_map);
}
+ edac->irq_chip.name = pdev->dev.of_node->name;
+ edac->irq_chip.irq_mask = a10_eccmgr_irq_mask;
+ edac->irq_chip.irq_unmask = a10_eccmgr_irq_unmask;
+ edac->domain = irq_domain_add_linear(pdev->dev.of_node, 64,
+ &a10_eccmgr_ic_ops, edac);
+ if (!edac->domain) {
+ dev_err(&pdev->dev, "Error adding IRQ domain\n");
+ return -ENOMEM;
+ }
+
edac->sb_irq = platform_get_irq(pdev, 0);
- rc = devm_request_irq(&pdev->dev, edac->sb_irq,
- altr_edac_a10_irq_handler,
- IRQF_SHARED, dev_name(&pdev->dev), edac);
- if (rc) {
- edac_printk(KERN_ERR, EDAC_DEVICE, "No SBERR IRQ resource\n");
- return rc;
+ if (edac->sb_irq < 0) {
+ dev_err(&pdev->dev, "No SBERR IRQ resource\n");
+ return edac->sb_irq;
}
+ irq_set_chained_handler_and_data(edac->sb_irq,
+ altr_edac_a10_irq_handler,
+ edac);
+
edac->db_irq = platform_get_irq(pdev, 1);
- rc = devm_request_irq(&pdev->dev, edac->db_irq,
- altr_edac_a10_irq_handler,
- IRQF_SHARED, dev_name(&pdev->dev), edac);
- if (rc) {
- edac_printk(KERN_ERR, EDAC_DEVICE, "No DBERR IRQ resource\n");
- return rc;
+ if (edac->db_irq < 0) {
+ dev_err(&pdev->dev, "No DBERR IRQ resource\n");
+ return edac->db_irq;
}
+ irq_set_chained_handler_and_data(edac->db_irq,
+ altr_edac_a10_irq_handler,
+ edac);
for_each_child_of_node(pdev->dev.of_node, child) {
if (!of_device_is_available(child))
continue;
if (of_device_is_compatible(child, "altr,socfpga-a10-l2-ecc"))
altr_edac_a10_device_add(edac, child);
- else if (of_device_is_compatible(child,
- "altr,socfpga-a10-ocram-ecc"))
+ else if ((of_device_is_compatible(child,
+ "altr,socfpga-a10-ocram-ecc")) ||
+ (of_device_is_compatible(child,
+ "altr,socfpga-eth-mac-ecc")))
altr_edac_a10_device_add(edac, child);
+ else if (of_device_is_compatible(child,
+ "altr,sdram-edac-a10"))
+ of_platform_populate(pdev->dev.of_node,
+ altr_sdram_ctrl_of_match,
+ NULL, &pdev->dev);
}
return 0;
diff --git a/drivers/edac/altera_edac.h b/drivers/edac/altera_edac.h
index 42090f36b..687d8e754 100644
--- a/drivers/edac/altera_edac.h
+++ b/drivers/edac/altera_edac.h
@@ -230,8 +230,13 @@ struct altr_sdram_mc_data {
#define ALTR_A10_ECC_INITCOMPLETEB BIT(8)
#define ALTR_A10_ECC_ERRINTEN_OFST 0x10
+#define ALTR_A10_ECC_ERRINTENS_OFST 0x14
+#define ALTR_A10_ECC_ERRINTENR_OFST 0x18
#define ALTR_A10_ECC_SERRINTEN BIT(0)
+#define ALTR_A10_ECC_INTMODE_OFST 0x1C
+#define ALTR_A10_ECC_INTMODE BIT(0)
+
#define ALTR_A10_ECC_INTSTAT_OFST 0x20
#define ALTR_A10_ECC_SERRPENA BIT(0)
#define ALTR_A10_ECC_DERRPENA BIT(8)
@@ -280,6 +285,12 @@ struct altr_sdram_mc_data {
/* Arria 10 OCRAM ECC Management Group Defines */
#define ALTR_A10_OCRAM_ECC_EN_CTL (BIT(1) | BIT(0))
+/* Arria 10 Ethernet ECC Management Group Defines */
+#define ALTR_A10_COMMON_ECC_EN_CTL BIT(0)
+
+/* A10 ECC Controller memory initialization timeout */
+#define ALTR_A10_ECC_INIT_WATCHDOG_10US 10000
+
struct altr_edac_device_dev;
struct edac_device_prv_data {
@@ -295,10 +306,10 @@ struct edac_device_prv_data {
int ce_set_mask;
int ue_set_mask;
int set_err_ofst;
- irqreturn_t (*ecc_irq_handler)(struct altr_edac_device_dev *dci,
- bool sb);
+ irqreturn_t (*ecc_irq_handler)(int irq, void *dev_id);
int trig_alloc_sz;
const struct file_operations *inject_fops;
+ bool panic;
};
struct altr_edac_device_dev {
@@ -320,6 +331,8 @@ struct altr_arria10_edac {
struct regmap *ecc_mgr_map;
int sb_irq;
int db_irq;
+ struct irq_domain *domain;
+ struct irq_chip irq_chip;
struct list_head a10_ecc_devices;
};
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 46784eb2e..8c0ec2128 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -2966,11 +2966,11 @@ static int __init amd64_edac_init(void)
int err = -ENODEV;
int i;
- opstate_init();
-
if (amd_cache_northbridges() < 0)
goto err_ret;
+ opstate_init();
+
err = -ENOMEM;
ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL);
if (!ecc_stngs)
diff --git a/drivers/edac/skx_edac.c b/drivers/edac/skx_edac.c
new file mode 100644
index 000000000..0ff4878c2
--- /dev/null
+++ b/drivers/edac/skx_edac.c
@@ -0,0 +1,1121 @@
+/*
+ * EDAC driver for Intel(R) Xeon(R) Skylake processors
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/edac.h>
+#include <linux/mmzone.h>
+#include <linux/smp.h>
+#include <linux/bitmap.h>
+#include <linux/math64.h>
+#include <linux/mod_devicetable.h>
+#include <asm/cpu_device_id.h>
+#include <asm/processor.h>
+#include <asm/mce.h>
+
+#include "edac_core.h"
+
+#define SKX_REVISION " Ver: 1.0 "
+
+/*
+ * Debug macros
+ */
+#define skx_printk(level, fmt, arg...) \
+ edac_printk(level, "skx", fmt, ##arg)
+
+#define skx_mc_printk(mci, level, fmt, arg...) \
+ edac_mc_chipset_printk(mci, level, "skx", fmt, ##arg)
+
+/*
+ * Get a bit field at register value <v>, from bit <lo> to bit <hi>
+ */
+#define GET_BITFIELD(v, lo, hi) \
+ (((v) & GENMASK_ULL((hi), (lo))) >> (lo))
+
+static LIST_HEAD(skx_edac_list);
+
+static u64 skx_tolm, skx_tohm;
+
+#define NUM_IMC 2 /* memory controllers per socket */
+#define NUM_CHANNELS 3 /* channels per memory controller */
+#define NUM_DIMMS 2 /* Max DIMMS per channel */
+
+#define MASK26 0x3FFFFFF /* Mask for 2^26 */
+#define MASK29 0x1FFFFFFF /* Mask for 2^29 */
+
+/*
+ * Each cpu socket contains some pci devices that provide global
+ * information, and also some that are local to each of the two
+ * memory controllers on the die.
+ */
+struct skx_dev {
+ struct list_head list;
+ u8 bus[4];
+ struct pci_dev *sad_all;
+ struct pci_dev *util_all;
+ u32 mcroute;
+ struct skx_imc {
+ struct mem_ctl_info *mci;
+ u8 mc; /* system wide mc# */
+ u8 lmc; /* socket relative mc# */
+ u8 src_id, node_id;
+ struct skx_channel {
+ struct pci_dev *cdev;
+ struct skx_dimm {
+ u8 close_pg;
+ u8 bank_xor_enable;
+ u8 fine_grain_bank;
+ u8 rowbits;
+ u8 colbits;
+ } dimms[NUM_DIMMS];
+ } chan[NUM_CHANNELS];
+ } imc[NUM_IMC];
+};
+static int skx_num_sockets;
+
+struct skx_pvt {
+ struct skx_imc *imc;
+};
+
+struct decoded_addr {
+ struct skx_dev *dev;
+ u64 addr;
+ int socket;
+ int imc;
+ int channel;
+ u64 chan_addr;
+ int sktways;
+ int chanways;
+ int dimm;
+ int rank;
+ int channel_rank;
+ u64 rank_address;
+ int row;
+ int column;
+ int bank_address;
+ int bank_group;
+};
+
+static struct skx_dev *get_skx_dev(u8 bus, u8 idx)
+{
+ struct skx_dev *d;
+
+ list_for_each_entry(d, &skx_edac_list, list) {
+ if (d->bus[idx] == bus)
+ return d;
+ }
+
+ return NULL;
+}
+
+enum munittype {
+ CHAN0, CHAN1, CHAN2, SAD_ALL, UTIL_ALL, SAD
+};
+
+struct munit {
+ u16 did;
+ u16 devfn[NUM_IMC];
+ u8 busidx;
+ u8 per_socket;
+ enum munittype mtype;
+};
+
+/*
+ * List of PCI device ids that we need together with some device
+ * number and function numbers to tell which memory controller the
+ * device belongs to.
+ */
+static const struct munit skx_all_munits[] = {
+ { 0x2054, { }, 1, 1, SAD_ALL },
+ { 0x2055, { }, 1, 1, UTIL_ALL },
+ { 0x2040, { PCI_DEVFN(10, 0), PCI_DEVFN(12, 0) }, 2, 2, CHAN0 },
+ { 0x2044, { PCI_DEVFN(10, 4), PCI_DEVFN(12, 4) }, 2, 2, CHAN1 },
+ { 0x2048, { PCI_DEVFN(11, 0), PCI_DEVFN(13, 0) }, 2, 2, CHAN2 },
+ { 0x208e, { }, 1, 0, SAD },
+ { }
+};
+
+/*
+ * We use the per-socket device 0x2016 to count how many sockets are present,
+ * and to detemine which PCI buses are associated with each socket. Allocate
+ * and build the full list of all the skx_dev structures that we need here.
+ */
+static int get_all_bus_mappings(void)
+{
+ struct pci_dev *pdev, *prev;
+ struct skx_dev *d;
+ u32 reg;
+ int ndev = 0;
+
+ prev = NULL;
+ for (;;) {
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2016, prev);
+ if (!pdev)
+ break;
+ ndev++;
+ d = kzalloc(sizeof(*d), GFP_KERNEL);
+ if (!d) {
+ pci_dev_put(pdev);
+ return -ENOMEM;
+ }
+ pci_read_config_dword(pdev, 0xCC, &reg);
+ d->bus[0] = GET_BITFIELD(reg, 0, 7);
+ d->bus[1] = GET_BITFIELD(reg, 8, 15);
+ d->bus[2] = GET_BITFIELD(reg, 16, 23);
+ d->bus[3] = GET_BITFIELD(reg, 24, 31);
+ edac_dbg(2, "busses: %x, %x, %x, %x\n",
+ d->bus[0], d->bus[1], d->bus[2], d->bus[3]);
+ list_add_tail(&d->list, &skx_edac_list);
+ skx_num_sockets++;
+ prev = pdev;
+ }
+
+ return ndev;
+}
+
+static int get_all_munits(const struct munit *m)
+{
+ struct pci_dev *pdev, *prev;
+ struct skx_dev *d;
+ u32 reg;
+ int i = 0, ndev = 0;
+
+ prev = NULL;
+ for (;;) {
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL, m->did, prev);
+ if (!pdev)
+ break;
+ ndev++;
+ if (m->per_socket == NUM_IMC) {
+ for (i = 0; i < NUM_IMC; i++)
+ if (m->devfn[i] == pdev->devfn)
+ break;
+ if (i == NUM_IMC)
+ goto fail;
+ }
+ d = get_skx_dev(pdev->bus->number, m->busidx);
+ if (!d)
+ goto fail;
+
+ /* Be sure that the device is enabled */
+ if (unlikely(pci_enable_device(pdev) < 0)) {
+ skx_printk(KERN_ERR,
+ "Couldn't enable %04x:%04x\n", PCI_VENDOR_ID_INTEL, m->did);
+ goto fail;
+ }
+
+ switch (m->mtype) {
+ case CHAN0: case CHAN1: case CHAN2:
+ pci_dev_get(pdev);
+ d->imc[i].chan[m->mtype].cdev = pdev;
+ break;
+ case SAD_ALL:
+ pci_dev_get(pdev);
+ d->sad_all = pdev;
+ break;
+ case UTIL_ALL:
+ pci_dev_get(pdev);
+ d->util_all = pdev;
+ break;
+ case SAD:
+ /*
+ * one of these devices per core, including cores
+ * that don't exist on this SKU. Ignore any that
+ * read a route table of zero, make sure all the
+ * non-zero values match.
+ */
+ pci_read_config_dword(pdev, 0xB4, &reg);
+ if (reg != 0) {
+ if (d->mcroute == 0)
+ d->mcroute = reg;
+ else if (d->mcroute != reg) {
+ skx_printk(KERN_ERR,
+ "mcroute mismatch\n");
+ goto fail;
+ }
+ }
+ ndev--;
+ break;
+ }
+
+ prev = pdev;
+ }
+
+ return ndev;
+fail:
+ pci_dev_put(pdev);
+ return -ENODEV;
+}
+
+const struct x86_cpu_id skx_cpuids[] = {
+ { X86_VENDOR_INTEL, 6, 0x55, 0, 0 }, /* Skylake */
+ { }
+};
+MODULE_DEVICE_TABLE(x86cpu, skx_cpuids);
+
+static u8 get_src_id(struct skx_dev *d)
+{
+ u32 reg;
+
+ pci_read_config_dword(d->util_all, 0xF0, &reg);
+
+ return GET_BITFIELD(reg, 12, 14);
+}
+
+static u8 skx_get_node_id(struct skx_dev *d)
+{
+ u32 reg;
+
+ pci_read_config_dword(d->util_all, 0xF4, &reg);
+
+ return GET_BITFIELD(reg, 0, 2);
+}
+
+static int get_dimm_attr(u32 reg, int lobit, int hibit, int add, int minval,
+ int maxval, char *name)
+{
+ u32 val = GET_BITFIELD(reg, lobit, hibit);
+
+ if (val < minval || val > maxval) {
+ edac_dbg(2, "bad %s = %d (raw=%x)\n", name, val, reg);
+ return -EINVAL;
+ }
+ return val + add;
+}
+
+#define IS_DIMM_PRESENT(mtr) GET_BITFIELD((mtr), 15, 15)
+
+#define numrank(reg) get_dimm_attr((reg), 12, 13, 0, 1, 2, "ranks")
+#define numrow(reg) get_dimm_attr((reg), 2, 4, 12, 1, 6, "rows")
+#define numcol(reg) get_dimm_attr((reg), 0, 1, 10, 0, 2, "cols")
+
+static int get_width(u32 mtr)
+{
+ switch (GET_BITFIELD(mtr, 8, 9)) {
+ case 0:
+ return DEV_X4;
+ case 1:
+ return DEV_X8;
+ case 2:
+ return DEV_X16;
+ }
+ return DEV_UNKNOWN;
+}
+
+static int skx_get_hi_lo(void)
+{
+ struct pci_dev *pdev;
+ u32 reg;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2034, NULL);
+ if (!pdev) {
+ edac_dbg(0, "Can't get tolm/tohm\n");
+ return -ENODEV;
+ }
+
+ pci_read_config_dword(pdev, 0xD0, &reg);
+ skx_tolm = reg;
+ pci_read_config_dword(pdev, 0xD4, &reg);
+ skx_tohm = reg;
+ pci_read_config_dword(pdev, 0xD8, &reg);
+ skx_tohm |= (u64)reg << 32;
+
+ pci_dev_put(pdev);
+ edac_dbg(2, "tolm=%llx tohm=%llx\n", skx_tolm, skx_tohm);
+
+ return 0;
+}
+
+static int get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm,
+ struct skx_imc *imc, int chan, int dimmno)
+{
+ int banks = 16, ranks, rows, cols, npages;
+ u64 size;
+
+ if (!IS_DIMM_PRESENT(mtr))
+ return 0;
+ ranks = numrank(mtr);
+ rows = numrow(mtr);
+ cols = numcol(mtr);
+
+ /*
+ * Compute size in 8-byte (2^3) words, then shift to MiB (2^20)
+ */
+ size = ((1ull << (rows + cols + ranks)) * banks) >> (20 - 3);
+ npages = MiB_TO_PAGES(size);
+
+ edac_dbg(0, "mc#%d: channel %d, dimm %d, %lld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
+ imc->mc, chan, dimmno, size, npages,
+ banks, ranks, rows, cols);
+
+ imc->chan[chan].dimms[dimmno].close_pg = GET_BITFIELD(mtr, 0, 0);
+ imc->chan[chan].dimms[dimmno].bank_xor_enable = GET_BITFIELD(mtr, 9, 9);
+ imc->chan[chan].dimms[dimmno].fine_grain_bank = GET_BITFIELD(amap, 0, 0);
+ imc->chan[chan].dimms[dimmno].rowbits = rows;
+ imc->chan[chan].dimms[dimmno].colbits = cols;
+
+ dimm->nr_pages = npages;
+ dimm->grain = 32;
+ dimm->dtype = get_width(mtr);
+ dimm->mtype = MEM_DDR4;
+ dimm->edac_mode = EDAC_SECDED; /* likely better than this */
+ snprintf(dimm->label, sizeof(dimm->label), "CPU_SrcID#%u_MC#%u_Chan#%u_DIMM#%u",
+ imc->src_id, imc->lmc, chan, dimmno);
+
+ return 1;
+}
+
+#define SKX_GET_MTMTR(dev, reg) \
+ pci_read_config_dword((dev), 0x87c, &reg)
+
+static bool skx_check_ecc(struct pci_dev *pdev)
+{
+ u32 mtmtr;
+
+ SKX_GET_MTMTR(pdev, mtmtr);
+
+ return !!GET_BITFIELD(mtmtr, 2, 2);
+}
+
+static int skx_get_dimm_config(struct mem_ctl_info *mci)
+{
+ struct skx_pvt *pvt = mci->pvt_info;
+ struct skx_imc *imc = pvt->imc;
+ struct dimm_info *dimm;
+ int i, j;
+ u32 mtr, amap;
+ int ndimms;
+
+ for (i = 0; i < NUM_CHANNELS; i++) {
+ ndimms = 0;
+ pci_read_config_dword(imc->chan[i].cdev, 0x8C, &amap);
+ for (j = 0; j < NUM_DIMMS; j++) {
+ dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
+ mci->n_layers, i, j, 0);
+ pci_read_config_dword(imc->chan[i].cdev,
+ 0x80 + 4*j, &mtr);
+ ndimms += get_dimm_info(mtr, amap, dimm, imc, i, j);
+ }
+ if (ndimms && !skx_check_ecc(imc->chan[0].cdev)) {
+ skx_printk(KERN_ERR, "ECC is disabled on imc %d\n", imc->mc);
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
+static void skx_unregister_mci(struct skx_imc *imc)
+{
+ struct mem_ctl_info *mci = imc->mci;
+
+ if (!mci)
+ return;
+
+ edac_dbg(0, "MC%d: mci = %p\n", imc->mc, mci);
+
+ /* Remove MC sysfs nodes */
+ edac_mc_del_mc(mci->pdev);
+
+ edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
+ kfree(mci->ctl_name);
+ edac_mc_free(mci);
+}
+
+static int skx_register_mci(struct skx_imc *imc)
+{
+ struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
+ struct pci_dev *pdev = imc->chan[0].cdev;
+ struct skx_pvt *pvt;
+ int rc;
+
+ /* allocate a new MC control structure */
+ layers[0].type = EDAC_MC_LAYER_CHANNEL;
+ layers[0].size = NUM_CHANNELS;
+ layers[0].is_virt_csrow = false;
+ layers[1].type = EDAC_MC_LAYER_SLOT;
+ layers[1].size = NUM_DIMMS;
+ layers[1].is_virt_csrow = true;
+ mci = edac_mc_alloc(imc->mc, ARRAY_SIZE(layers), layers,
+ sizeof(struct skx_pvt));
+
+ if (unlikely(!mci))
+ return -ENOMEM;
+
+ edac_dbg(0, "MC#%d: mci = %p\n", imc->mc, mci);
+
+ /* Associate skx_dev and mci for future usage */
+ imc->mci = mci;
+ pvt = mci->pvt_info;
+ pvt->imc = imc;
+
+ mci->ctl_name = kasprintf(GFP_KERNEL, "Skylake Socket#%d IMC#%d",
+ imc->node_id, imc->lmc);
+ mci->mtype_cap = MEM_FLAG_DDR4;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE;
+ mci->edac_cap = EDAC_FLAG_NONE;
+ mci->mod_name = "skx_edac.c";
+ mci->dev_name = pci_name(imc->chan[0].cdev);
+ mci->mod_ver = SKX_REVISION;
+ mci->ctl_page_to_phys = NULL;
+
+ rc = skx_get_dimm_config(mci);
+ if (rc < 0)
+ goto fail;
+
+ /* record ptr to the generic device */
+ mci->pdev = &pdev->dev;
+
+ /* add this new MC control structure to EDAC's list of MCs */
+ if (unlikely(edac_mc_add_mc(mci))) {
+ edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
+ rc = -EINVAL;
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ kfree(mci->ctl_name);
+ edac_mc_free(mci);
+ imc->mci = NULL;
+ return rc;
+}
+
+#define SKX_MAX_SAD 24
+
+#define SKX_GET_SAD(d, i, reg) \
+ pci_read_config_dword((d)->sad_all, 0x60 + 8 * (i), &reg)
+#define SKX_GET_ILV(d, i, reg) \
+ pci_read_config_dword((d)->sad_all, 0x64 + 8 * (i), &reg)
+
+#define SKX_SAD_MOD3MODE(sad) GET_BITFIELD((sad), 30, 31)
+#define SKX_SAD_MOD3(sad) GET_BITFIELD((sad), 27, 27)
+#define SKX_SAD_LIMIT(sad) (((u64)GET_BITFIELD((sad), 7, 26) << 26) | MASK26)
+#define SKX_SAD_MOD3ASMOD2(sad) GET_BITFIELD((sad), 5, 6)
+#define SKX_SAD_ATTR(sad) GET_BITFIELD((sad), 3, 4)
+#define SKX_SAD_INTERLEAVE(sad) GET_BITFIELD((sad), 1, 2)
+#define SKX_SAD_ENABLE(sad) GET_BITFIELD((sad), 0, 0)
+
+#define SKX_ILV_REMOTE(tgt) (((tgt) & 8) == 0)
+#define SKX_ILV_TARGET(tgt) ((tgt) & 7)
+
+static bool skx_sad_decode(struct decoded_addr *res)
+{
+ struct skx_dev *d = list_first_entry(&skx_edac_list, typeof(*d), list);
+ u64 addr = res->addr;
+ int i, idx, tgt, lchan, shift;
+ u32 sad, ilv;
+ u64 limit, prev_limit;
+ int remote = 0;
+
+ /* Simple sanity check for I/O space or out of range */
+ if (addr >= skx_tohm || (addr >= skx_tolm && addr < BIT_ULL(32))) {
+ edac_dbg(0, "Address %llx out of range\n", addr);
+ return false;
+ }
+
+restart:
+ prev_limit = 0;
+ for (i = 0; i < SKX_MAX_SAD; i++) {
+ SKX_GET_SAD(d, i, sad);
+ limit = SKX_SAD_LIMIT(sad);
+ if (SKX_SAD_ENABLE(sad)) {
+ if (addr >= prev_limit && addr <= limit)
+ goto sad_found;
+ }
+ prev_limit = limit + 1;
+ }
+ edac_dbg(0, "No SAD entry for %llx\n", addr);
+ return false;
+
+sad_found:
+ SKX_GET_ILV(d, i, ilv);
+
+ switch (SKX_SAD_INTERLEAVE(sad)) {
+ case 0:
+ idx = GET_BITFIELD(addr, 6, 8);
+ break;
+ case 1:
+ idx = GET_BITFIELD(addr, 8, 10);
+ break;
+ case 2:
+ idx = GET_BITFIELD(addr, 12, 14);
+ break;
+ case 3:
+ idx = GET_BITFIELD(addr, 30, 32);
+ break;
+ }
+
+ tgt = GET_BITFIELD(ilv, 4 * idx, 4 * idx + 3);
+
+ /* If point to another node, find it and start over */
+ if (SKX_ILV_REMOTE(tgt)) {
+ if (remote) {
+ edac_dbg(0, "Double remote!\n");
+ return false;
+ }
+ remote = 1;
+ list_for_each_entry(d, &skx_edac_list, list) {
+ if (d->imc[0].src_id == SKX_ILV_TARGET(tgt))
+ goto restart;
+ }
+ edac_dbg(0, "Can't find node %d\n", SKX_ILV_TARGET(tgt));
+ return false;
+ }
+
+ if (SKX_SAD_MOD3(sad) == 0)
+ lchan = SKX_ILV_TARGET(tgt);
+ else {
+ switch (SKX_SAD_MOD3MODE(sad)) {
+ case 0:
+ shift = 6;
+ break;
+ case 1:
+ shift = 8;
+ break;
+ case 2:
+ shift = 12;
+ break;
+ default:
+ edac_dbg(0, "illegal mod3mode\n");
+ return false;
+ }
+ switch (SKX_SAD_MOD3ASMOD2(sad)) {
+ case 0:
+ lchan = (addr >> shift) % 3;
+ break;
+ case 1:
+ lchan = (addr >> shift) % 2;
+ break;
+ case 2:
+ lchan = (addr >> shift) % 2;
+ lchan = (lchan << 1) | ~lchan;
+ break;
+ case 3:
+ lchan = ((addr >> shift) % 2) << 1;
+ break;
+ }
+ lchan = (lchan << 1) | (SKX_ILV_TARGET(tgt) & 1);
+ }
+
+ res->dev = d;
+ res->socket = d->imc[0].src_id;
+ res->imc = GET_BITFIELD(d->mcroute, lchan * 3, lchan * 3 + 2);
+ res->channel = GET_BITFIELD(d->mcroute, lchan * 2 + 18, lchan * 2 + 19);
+
+ edac_dbg(2, "%llx: socket=%d imc=%d channel=%d\n",
+ res->addr, res->socket, res->imc, res->channel);
+ return true;
+}
+
+#define SKX_MAX_TAD 8
+
+#define SKX_GET_TADBASE(d, mc, i, reg) \
+ pci_read_config_dword((d)->imc[mc].chan[0].cdev, 0x850 + 4 * (i), &reg)
+#define SKX_GET_TADWAYNESS(d, mc, i, reg) \
+ pci_read_config_dword((d)->imc[mc].chan[0].cdev, 0x880 + 4 * (i), &reg)
+#define SKX_GET_TADCHNILVOFFSET(d, mc, ch, i, reg) \
+ pci_read_config_dword((d)->imc[mc].chan[ch].cdev, 0x90 + 4 * (i), &reg)
+
+#define SKX_TAD_BASE(b) ((u64)GET_BITFIELD((b), 12, 31) << 26)
+#define SKX_TAD_SKT_GRAN(b) GET_BITFIELD((b), 4, 5)
+#define SKX_TAD_CHN_GRAN(b) GET_BITFIELD((b), 6, 7)
+#define SKX_TAD_LIMIT(b) (((u64)GET_BITFIELD((b), 12, 31) << 26) | MASK26)
+#define SKX_TAD_OFFSET(b) ((u64)GET_BITFIELD((b), 4, 23) << 26)
+#define SKX_TAD_SKTWAYS(b) (1 << GET_BITFIELD((b), 10, 11))
+#define SKX_TAD_CHNWAYS(b) (GET_BITFIELD((b), 8, 9) + 1)
+
+/* which bit used for both socket and channel interleave */
+static int skx_granularity[] = { 6, 8, 12, 30 };
+
+static u64 skx_do_interleave(u64 addr, int shift, int ways, u64 lowbits)
+{
+ addr >>= shift;
+ addr /= ways;
+ addr <<= shift;
+
+ return addr | (lowbits & ((1ull << shift) - 1));
+}
+
+static bool skx_tad_decode(struct decoded_addr *res)
+{
+ int i;
+ u32 base, wayness, chnilvoffset;
+ int skt_interleave_bit, chn_interleave_bit;
+ u64 channel_addr;
+
+ for (i = 0; i < SKX_MAX_TAD; i++) {
+ SKX_GET_TADBASE(res->dev, res->imc, i, base);
+ SKX_GET_TADWAYNESS(res->dev, res->imc, i, wayness);
+ if (SKX_TAD_BASE(base) <= res->addr && res->addr <= SKX_TAD_LIMIT(wayness))
+ goto tad_found;
+ }
+ edac_dbg(0, "No TAD entry for %llx\n", res->addr);
+ return false;
+
+tad_found:
+ res->sktways = SKX_TAD_SKTWAYS(wayness);
+ res->chanways = SKX_TAD_CHNWAYS(wayness);
+ skt_interleave_bit = skx_granularity[SKX_TAD_SKT_GRAN(base)];
+ chn_interleave_bit = skx_granularity[SKX_TAD_CHN_GRAN(base)];
+
+ SKX_GET_TADCHNILVOFFSET(res->dev, res->imc, res->channel, i, chnilvoffset);
+ channel_addr = res->addr - SKX_TAD_OFFSET(chnilvoffset);
+
+ if (res->chanways == 3 && skt_interleave_bit > chn_interleave_bit) {
+ /* Must handle channel first, then socket */
+ channel_addr = skx_do_interleave(channel_addr, chn_interleave_bit,
+ res->chanways, channel_addr);
+ channel_addr = skx_do_interleave(channel_addr, skt_interleave_bit,
+ res->sktways, channel_addr);
+ } else {
+ /* Handle socket then channel. Preserve low bits from original address */
+ channel_addr = skx_do_interleave(channel_addr, skt_interleave_bit,
+ res->sktways, res->addr);
+ channel_addr = skx_do_interleave(channel_addr, chn_interleave_bit,
+ res->chanways, res->addr);
+ }
+
+ res->chan_addr = channel_addr;
+
+ edac_dbg(2, "%llx: chan_addr=%llx sktways=%d chanways=%d\n",
+ res->addr, res->chan_addr, res->sktways, res->chanways);
+ return true;
+}
+
+#define SKX_MAX_RIR 4
+
+#define SKX_GET_RIRWAYNESS(d, mc, ch, i, reg) \
+ pci_read_config_dword((d)->imc[mc].chan[ch].cdev, \
+ 0x108 + 4 * (i), &reg)
+#define SKX_GET_RIRILV(d, mc, ch, idx, i, reg) \
+ pci_read_config_dword((d)->imc[mc].chan[ch].cdev, \
+ 0x120 + 16 * idx + 4 * (i), &reg)
+
+#define SKX_RIR_VALID(b) GET_BITFIELD((b), 31, 31)
+#define SKX_RIR_LIMIT(b) (((u64)GET_BITFIELD((b), 1, 11) << 29) | MASK29)
+#define SKX_RIR_WAYS(b) (1 << GET_BITFIELD((b), 28, 29))
+#define SKX_RIR_CHAN_RANK(b) GET_BITFIELD((b), 16, 19)
+#define SKX_RIR_OFFSET(b) ((u64)(GET_BITFIELD((b), 2, 15) << 26))
+
+static bool skx_rir_decode(struct decoded_addr *res)
+{
+ int i, idx, chan_rank;
+ int shift;
+ u32 rirway, rirlv;
+ u64 rank_addr, prev_limit = 0, limit;
+
+ if (res->dev->imc[res->imc].chan[res->channel].dimms[0].close_pg)
+ shift = 6;
+ else
+ shift = 13;
+
+ for (i = 0; i < SKX_MAX_RIR; i++) {
+ SKX_GET_RIRWAYNESS(res->dev, res->imc, res->channel, i, rirway);
+ limit = SKX_RIR_LIMIT(rirway);
+ if (SKX_RIR_VALID(rirway)) {
+ if (prev_limit <= res->chan_addr &&
+ res->chan_addr <= limit)
+ goto rir_found;
+ }
+ prev_limit = limit;
+ }
+ edac_dbg(0, "No RIR entry for %llx\n", res->addr);
+ return false;
+
+rir_found:
+ rank_addr = res->chan_addr >> shift;
+ rank_addr /= SKX_RIR_WAYS(rirway);
+ rank_addr <<= shift;
+ rank_addr |= res->chan_addr & GENMASK_ULL(shift - 1, 0);
+
+ res->rank_address = rank_addr;
+ idx = (res->chan_addr >> shift) % SKX_RIR_WAYS(rirway);
+
+ SKX_GET_RIRILV(res->dev, res->imc, res->channel, idx, i, rirlv);
+ res->rank_address = rank_addr - SKX_RIR_OFFSET(rirlv);
+ chan_rank = SKX_RIR_CHAN_RANK(rirlv);
+ res->channel_rank = chan_rank;
+ res->dimm = chan_rank / 4;
+ res->rank = chan_rank % 4;
+
+ edac_dbg(2, "%llx: dimm=%d rank=%d chan_rank=%d rank_addr=%llx\n",
+ res->addr, res->dimm, res->rank,
+ res->channel_rank, res->rank_address);
+ return true;
+}
+
+static u8 skx_close_row[] = {
+ 15, 16, 17, 18, 20, 21, 22, 28, 10, 11, 12, 13, 29, 30, 31, 32, 33
+};
+static u8 skx_close_column[] = {
+ 3, 4, 5, 14, 19, 23, 24, 25, 26, 27
+};
+static u8 skx_open_row[] = {
+ 14, 15, 16, 20, 28, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33
+};
+static u8 skx_open_column[] = {
+ 3, 4, 5, 6, 7, 8, 9, 10, 11, 12
+};
+static u8 skx_open_fine_column[] = {
+ 3, 4, 5, 7, 8, 9, 10, 11, 12, 13
+};
+
+static int skx_bits(u64 addr, int nbits, u8 *bits)
+{
+ int i, res = 0;
+
+ for (i = 0; i < nbits; i++)
+ res |= ((addr >> bits[i]) & 1) << i;
+ return res;
+}
+
+static int skx_bank_bits(u64 addr, int b0, int b1, int do_xor, int x0, int x1)
+{
+ int ret = GET_BITFIELD(addr, b0, b0) | (GET_BITFIELD(addr, b1, b1) << 1);
+
+ if (do_xor)
+ ret ^= GET_BITFIELD(addr, x0, x0) | (GET_BITFIELD(addr, x1, x1) << 1);
+
+ return ret;
+}
+
+static bool skx_mad_decode(struct decoded_addr *r)
+{
+ struct skx_dimm *dimm = &r->dev->imc[r->imc].chan[r->channel].dimms[r->dimm];
+ int bg0 = dimm->fine_grain_bank ? 6 : 13;
+
+ if (dimm->close_pg) {
+ r->row = skx_bits(r->rank_address, dimm->rowbits, skx_close_row);
+ r->column = skx_bits(r->rank_address, dimm->colbits, skx_close_column);
+ r->column |= 0x400; /* C10 is autoprecharge, always set */
+ r->bank_address = skx_bank_bits(r->rank_address, 8, 9, dimm->bank_xor_enable, 22, 28);
+ r->bank_group = skx_bank_bits(r->rank_address, 6, 7, dimm->bank_xor_enable, 20, 21);
+ } else {
+ r->row = skx_bits(r->rank_address, dimm->rowbits, skx_open_row);
+ if (dimm->fine_grain_bank)
+ r->column = skx_bits(r->rank_address, dimm->colbits, skx_open_fine_column);
+ else
+ r->column = skx_bits(r->rank_address, dimm->colbits, skx_open_column);
+ r->bank_address = skx_bank_bits(r->rank_address, 18, 19, dimm->bank_xor_enable, 22, 23);
+ r->bank_group = skx_bank_bits(r->rank_address, bg0, 17, dimm->bank_xor_enable, 20, 21);
+ }
+ r->row &= (1u << dimm->rowbits) - 1;
+
+ edac_dbg(2, "%llx: row=%x col=%x bank_addr=%d bank_group=%d\n",
+ r->addr, r->row, r->column, r->bank_address,
+ r->bank_group);
+ return true;
+}
+
+static bool skx_decode(struct decoded_addr *res)
+{
+
+ return skx_sad_decode(res) && skx_tad_decode(res) &&
+ skx_rir_decode(res) && skx_mad_decode(res);
+}
+
+#ifdef CONFIG_EDAC_DEBUG
+/*
+ * Debug feature. Make /sys/kernel/debug/skx_edac_test/addr.
+ * Write an address to this file to exercise the address decode
+ * logic in this driver.
+ */
+static struct dentry *skx_test;
+static u64 skx_fake_addr;
+
+static int debugfs_u64_set(void *data, u64 val)
+{
+ struct decoded_addr res;
+
+ res.addr = val;
+ skx_decode(&res);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
+
+static struct dentry *mydebugfs_create(const char *name, umode_t mode,
+ struct dentry *parent, u64 *value)
+{
+ return debugfs_create_file(name, mode, parent, value, &fops_u64_wo);
+}
+
+static void setup_skx_debug(void)
+{
+ skx_test = debugfs_create_dir("skx_edac_test", NULL);
+ mydebugfs_create("addr", S_IWUSR, skx_test, &skx_fake_addr);
+}
+
+static void teardown_skx_debug(void)
+{
+ debugfs_remove_recursive(skx_test);
+}
+#else
+static void setup_skx_debug(void)
+{
+}
+
+static void teardown_skx_debug(void)
+{
+}
+#endif /*CONFIG_EDAC_DEBUG*/
+
+static void skx_mce_output_error(struct mem_ctl_info *mci,
+ const struct mce *m,
+ struct decoded_addr *res)
+{
+ enum hw_event_mc_err_type tp_event;
+ char *type, *optype, msg[256];
+ bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
+ bool overflow = GET_BITFIELD(m->status, 62, 62);
+ bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
+ bool recoverable;
+ u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
+ u32 mscod = GET_BITFIELD(m->status, 16, 31);
+ u32 errcode = GET_BITFIELD(m->status, 0, 15);
+ u32 optypenum = GET_BITFIELD(m->status, 4, 6);
+
+ recoverable = GET_BITFIELD(m->status, 56, 56);
+
+ if (uncorrected_error) {
+ if (ripv) {
+ type = "FATAL";
+ tp_event = HW_EVENT_ERR_FATAL;
+ } else {
+ type = "NON_FATAL";
+ tp_event = HW_EVENT_ERR_UNCORRECTED;
+ }
+ } else {
+ type = "CORRECTED";
+ tp_event = HW_EVENT_ERR_CORRECTED;
+ }
+
+ /*
+ * According with Table 15-9 of the Intel Architecture spec vol 3A,
+ * memory errors should fit in this mask:
+ * 000f 0000 1mmm cccc (binary)
+ * where:
+ * f = Correction Report Filtering Bit. If 1, subsequent errors
+ * won't be shown
+ * mmm = error type
+ * cccc = channel
+ * If the mask doesn't match, report an error to the parsing logic
+ */
+ if (!((errcode & 0xef80) == 0x80)) {
+ optype = "Can't parse: it is not a mem";
+ } else {
+ switch (optypenum) {
+ case 0:
+ optype = "generic undef request error";
+ break;
+ case 1:
+ optype = "memory read error";
+ break;
+ case 2:
+ optype = "memory write error";
+ break;
+ case 3:
+ optype = "addr/cmd error";
+ break;
+ case 4:
+ optype = "memory scrubbing error";
+ break;
+ default:
+ optype = "reserved";
+ break;
+ }
+ }
+
+ snprintf(msg, sizeof(msg),
+ "%s%s err_code:%04x:%04x socket:%d imc:%d rank:%d bg:%d ba:%d row:%x col:%x",
+ overflow ? " OVERFLOW" : "",
+ (uncorrected_error && recoverable) ? " recoverable" : "",
+ mscod, errcode,
+ res->socket, res->imc, res->rank,
+ res->bank_group, res->bank_address, res->row, res->column);
+
+ edac_dbg(0, "%s\n", msg);
+
+ /* Call the helper to output message */
+ edac_mc_handle_error(tp_event, mci, core_err_cnt,
+ m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
+ res->channel, res->dimm, -1,
+ optype, msg);
+}
+
+static int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct mce *mce = (struct mce *)data;
+ struct decoded_addr res;
+ struct mem_ctl_info *mci;
+ char *type;
+
+ if (get_edac_report_status() == EDAC_REPORTING_DISABLED)
+ return NOTIFY_DONE;
+
+ /* ignore unless this is memory related with an address */
+ if ((mce->status & 0xefff) >> 7 != 1 || !(mce->status & MCI_STATUS_ADDRV))
+ return NOTIFY_DONE;
+
+ res.addr = mce->addr;
+ if (!skx_decode(&res))
+ return NOTIFY_DONE;
+ mci = res.dev->imc[res.imc].mci;
+
+ if (mce->mcgstatus & MCG_STATUS_MCIP)
+ type = "Exception";
+ else
+ type = "Event";
+
+ skx_mc_printk(mci, KERN_DEBUG, "HANDLING MCE MEMORY ERROR\n");
+
+ skx_mc_printk(mci, KERN_DEBUG, "CPU %d: Machine Check %s: %Lx "
+ "Bank %d: %016Lx\n", mce->extcpu, type,
+ mce->mcgstatus, mce->bank, mce->status);
+ skx_mc_printk(mci, KERN_DEBUG, "TSC %llx ", mce->tsc);
+ skx_mc_printk(mci, KERN_DEBUG, "ADDR %llx ", mce->addr);
+ skx_mc_printk(mci, KERN_DEBUG, "MISC %llx ", mce->misc);
+
+ skx_mc_printk(mci, KERN_DEBUG, "PROCESSOR %u:%x TIME %llu SOCKET "
+ "%u APIC %x\n", mce->cpuvendor, mce->cpuid,
+ mce->time, mce->socketid, mce->apicid);
+
+ skx_mce_output_error(mci, mce, &res);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block skx_mce_dec = {
+ .notifier_call = skx_mce_check_error,
+};
+
+static void skx_remove(void)
+{
+ int i, j;
+ struct skx_dev *d, *tmp;
+
+ edac_dbg(0, "\n");
+
+ list_for_each_entry_safe(d, tmp, &skx_edac_list, list) {
+ list_del(&d->list);
+ for (i = 0; i < NUM_IMC; i++) {
+ skx_unregister_mci(&d->imc[i]);
+ for (j = 0; j < NUM_CHANNELS; j++)
+ pci_dev_put(d->imc[i].chan[j].cdev);
+ }
+ pci_dev_put(d->util_all);
+ pci_dev_put(d->sad_all);
+
+ kfree(d);
+ }
+}
+
+/*
+ * skx_init:
+ * make sure we are running on the correct cpu model
+ * search for all the devices we need
+ * check which DIMMs are present.
+ */
+int __init skx_init(void)
+{
+ const struct x86_cpu_id *id;
+ const struct munit *m;
+ int rc = 0, i;
+ u8 mc = 0, src_id, node_id;
+ struct skx_dev *d;
+
+ edac_dbg(2, "\n");
+
+ id = x86_match_cpu(skx_cpuids);
+ if (!id)
+ return -ENODEV;
+
+ rc = skx_get_hi_lo();
+ if (rc)
+ return rc;
+
+ rc = get_all_bus_mappings();
+ if (rc < 0)
+ goto fail;
+ if (rc == 0) {
+ edac_dbg(2, "No memory controllers found\n");
+ return -ENODEV;
+ }
+
+ for (m = skx_all_munits; m->did; m++) {
+ rc = get_all_munits(m);
+ if (rc < 0)
+ goto fail;
+ if (rc != m->per_socket * skx_num_sockets) {
+ edac_dbg(2, "Expected %d, got %d of %x\n",
+ m->per_socket * skx_num_sockets, rc, m->did);
+ rc = -ENODEV;
+ goto fail;
+ }
+ }
+
+ list_for_each_entry(d, &skx_edac_list, list) {
+ src_id = get_src_id(d);
+ node_id = skx_get_node_id(d);
+ edac_dbg(2, "src_id=%d node_id=%d\n", src_id, node_id);
+ for (i = 0; i < NUM_IMC; i++) {
+ d->imc[i].mc = mc++;
+ d->imc[i].lmc = i;
+ d->imc[i].src_id = src_id;
+ d->imc[i].node_id = node_id;
+ rc = skx_register_mci(&d->imc[i]);
+ if (rc < 0)
+ goto fail;
+ }
+ }
+
+ /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+ opstate_init();
+
+ setup_skx_debug();
+
+ mce_register_decode_chain(&skx_mce_dec);
+
+ return 0;
+fail:
+ skx_remove();
+ return rc;
+}
+
+static void __exit skx_exit(void)
+{
+ edac_dbg(2, "\n");
+ mce_unregister_decode_chain(&skx_mce_dec);
+ skx_remove();
+ teardown_skx_debug();
+}
+
+module_init(skx_init);
+module_exit(skx_exit);
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Tony Luck");
+MODULE_DESCRIPTION("MC Driver for Intel Skylake server processors");
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile
index 2a0e4f45d..972c813c3 100644
--- a/drivers/extcon/Makefile
+++ b/drivers/extcon/Makefile
@@ -2,7 +2,8 @@
# Makefile for external connector class (extcon) devices
#
-obj-$(CONFIG_EXTCON) += extcon.o
+obj-$(CONFIG_EXTCON) += extcon-core.o
+extcon-core-objs += extcon.o devres.o
obj-$(CONFIG_EXTCON_ADC_JACK) += extcon-adc-jack.o
obj-$(CONFIG_EXTCON_ARIZONA) += extcon-arizona.o
obj-$(CONFIG_EXTCON_AXP288) += extcon-axp288.o
diff --git a/drivers/extcon/devres.c b/drivers/extcon/devres.c
new file mode 100644
index 000000000..e686acd1c
--- /dev/null
+++ b/drivers/extcon/devres.c
@@ -0,0 +1,216 @@
+/*
+ * drivers/extcon/devres.c - EXTCON device's resource management
+ *
+ * Copyright (C) 2016 Samsung Electronics
+ * Author: Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/extcon.h>
+
+static int devm_extcon_dev_match(struct device *dev, void *res, void *data)
+{
+ struct extcon_dev **r = res;
+
+ if (WARN_ON(!r || !*r))
+ return 0;
+
+ return *r == data;
+}
+
+static void devm_extcon_dev_release(struct device *dev, void *res)
+{
+ extcon_dev_free(*(struct extcon_dev **)res);
+}
+
+
+static void devm_extcon_dev_unreg(struct device *dev, void *res)
+{
+ extcon_dev_unregister(*(struct extcon_dev **)res);
+}
+
+struct extcon_dev_notifier_devres {
+ struct extcon_dev *edev;
+ unsigned int id;
+ struct notifier_block *nb;
+};
+
+static void devm_extcon_dev_notifier_unreg(struct device *dev, void *res)
+{
+ struct extcon_dev_notifier_devres *this = res;
+
+ extcon_unregister_notifier(this->edev, this->id, this->nb);
+}
+
+/**
+ * devm_extcon_dev_allocate - Allocate managed extcon device
+ * @dev: device owning the extcon device being created
+ * @supported_cable: Array of supported extcon ending with EXTCON_NONE.
+ * If supported_cable is NULL, cable name related APIs
+ * are disabled.
+ *
+ * This function manages automatically the memory of extcon device using device
+ * resource management and simplify the control of freeing the memory of extcon
+ * device.
+ *
+ * Returns the pointer memory of allocated extcon_dev if success
+ * or ERR_PTR(err) if fail
+ */
+struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
+ const unsigned int *supported_cable)
+{
+ struct extcon_dev **ptr, *edev;
+
+ ptr = devres_alloc(devm_extcon_dev_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ edev = extcon_dev_allocate(supported_cable);
+ if (IS_ERR(edev)) {
+ devres_free(ptr);
+ return edev;
+ }
+
+ edev->dev.parent = dev;
+
+ *ptr = edev;
+ devres_add(dev, ptr);
+
+ return edev;
+}
+EXPORT_SYMBOL_GPL(devm_extcon_dev_allocate);
+
+/**
+ * devm_extcon_dev_free() - Resource-managed extcon_dev_unregister()
+ * @dev: device the extcon belongs to
+ * @edev: the extcon device to unregister
+ *
+ * Free the memory that is allocated with devm_extcon_dev_allocate()
+ * function.
+ */
+void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev)
+{
+ WARN_ON(devres_release(dev, devm_extcon_dev_release,
+ devm_extcon_dev_match, edev));
+}
+EXPORT_SYMBOL_GPL(devm_extcon_dev_free);
+
+/**
+ * devm_extcon_dev_register() - Resource-managed extcon_dev_register()
+ * @dev: device to allocate extcon device
+ * @edev: the new extcon device to register
+ *
+ * Managed extcon_dev_register() function. If extcon device is attached with
+ * this function, that extcon device is automatically unregistered on driver
+ * detach. Internally this function calls extcon_dev_register() function.
+ * To get more information, refer that function.
+ *
+ * If extcon device is registered with this function and the device needs to be
+ * unregistered separately, devm_extcon_dev_unregister() should be used.
+ *
+ * Returns 0 if success or negaive error number if failure.
+ */
+int devm_extcon_dev_register(struct device *dev, struct extcon_dev *edev)
+{
+ struct extcon_dev **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_extcon_dev_unreg, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = extcon_dev_register(edev);
+ if (ret) {
+ devres_free(ptr);
+ return ret;
+ }
+
+ *ptr = edev;
+ devres_add(dev, ptr);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_extcon_dev_register);
+
+/**
+ * devm_extcon_dev_unregister() - Resource-managed extcon_dev_unregister()
+ * @dev: device the extcon belongs to
+ * @edev: the extcon device to unregister
+ *
+ * Unregister extcon device that is registered with devm_extcon_dev_register()
+ * function.
+ */
+void devm_extcon_dev_unregister(struct device *dev, struct extcon_dev *edev)
+{
+ WARN_ON(devres_release(dev, devm_extcon_dev_unreg,
+ devm_extcon_dev_match, edev));
+}
+EXPORT_SYMBOL_GPL(devm_extcon_dev_unregister);
+
+/**
+ * devm_extcon_register_notifier() - Resource-managed extcon_register_notifier()
+ * @dev: device to allocate extcon device
+ * @edev: the extcon device that has the external connecotr.
+ * @id: the unique id of each external connector in extcon enumeration.
+ * @nb: a notifier block to be registered.
+ *
+ * This function manages automatically the notifier of extcon device using
+ * device resource management and simplify the control of unregistering
+ * the notifier of extcon device.
+ *
+ * Note that the second parameter given to the callback of nb (val) is
+ * "old_state", not the current state. The current state can be retrieved
+ * by looking at the third pameter (edev pointer)'s state value.
+ *
+ * Returns 0 if success or negaive error number if failure.
+ */
+int devm_extcon_register_notifier(struct device *dev, struct extcon_dev *edev,
+ unsigned int id, struct notifier_block *nb)
+{
+ struct extcon_dev_notifier_devres *ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_extcon_dev_notifier_unreg, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = extcon_register_notifier(edev, id, nb);
+ if (ret) {
+ devres_free(ptr);
+ return ret;
+ }
+
+ ptr->edev = edev;
+ ptr->id = id;
+ ptr->nb = nb;
+ devres_add(dev, ptr);
+
+ return 0;
+}
+EXPORT_SYMBOL(devm_extcon_register_notifier);
+
+/**
+ * devm_extcon_unregister_notifier()
+ - Resource-managed extcon_unregister_notifier()
+ * @dev: device to allocate extcon device
+ * @edev: the extcon device that has the external connecotr.
+ * @id: the unique id of each external connector in extcon enumeration.
+ * @nb: a notifier block to be registered.
+ */
+void devm_extcon_unregister_notifier(struct device *dev,
+ struct extcon_dev *edev, unsigned int id,
+ struct notifier_block *nb)
+{
+ WARN_ON(devres_release(dev, devm_extcon_dev_notifier_unreg,
+ devm_extcon_dev_match, edev));
+}
+EXPORT_SYMBOL(devm_extcon_unregister_notifier);
diff --git a/drivers/extcon/extcon-adc-jack.c b/drivers/extcon/extcon-adc-jack.c
index 7fc0ae191..44e48aa78 100644
--- a/drivers/extcon/extcon-adc-jack.c
+++ b/drivers/extcon/extcon-adc-jack.c
@@ -38,6 +38,7 @@
* @chan: iio channel being queried.
*/
struct adc_jack_data {
+ struct device *dev;
struct extcon_dev *edev;
const unsigned int **cable_names;
@@ -49,6 +50,7 @@ struct adc_jack_data {
struct delayed_work handler;
struct iio_channel *chan;
+ bool wakeup_source;
};
static void adc_jack_handler(struct work_struct *work)
@@ -105,6 +107,7 @@ static int adc_jack_probe(struct platform_device *pdev)
return -EINVAL;
}
+ data->dev = &pdev->dev;
data->edev = devm_extcon_dev_allocate(&pdev->dev, pdata->cable_names);
if (IS_ERR(data->edev)) {
dev_err(&pdev->dev, "failed to allocate extcon device\n");
@@ -128,6 +131,7 @@ static int adc_jack_probe(struct platform_device *pdev)
return PTR_ERR(data->chan);
data->handling_delay = msecs_to_jiffies(pdata->handling_delay_ms);
+ data->wakeup_source = pdata->wakeup_source;
INIT_DEFERRABLE_WORK(&data->handler, adc_jack_handler);
@@ -151,6 +155,9 @@ static int adc_jack_probe(struct platform_device *pdev)
return err;
}
+ if (data->wakeup_source)
+ device_init_wakeup(&pdev->dev, 1);
+
return 0;
}
@@ -165,11 +172,38 @@ static int adc_jack_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int adc_jack_suspend(struct device *dev)
+{
+ struct adc_jack_data *data = dev_get_drvdata(dev);
+
+ cancel_delayed_work_sync(&data->handler);
+ if (device_may_wakeup(data->dev))
+ enable_irq_wake(data->irq);
+
+ return 0;
+}
+
+static int adc_jack_resume(struct device *dev)
+{
+ struct adc_jack_data *data = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(data->dev))
+ disable_irq_wake(data->irq);
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(adc_jack_pm_ops,
+ adc_jack_suspend, adc_jack_resume);
+
static struct platform_driver adc_jack_driver = {
.probe = adc_jack_probe,
.remove = adc_jack_remove,
.driver = {
.name = "adc-jack",
+ .pm = &adc_jack_pm_ops,
},
};
diff --git a/drivers/extcon/extcon-usb-gpio.c b/drivers/extcon/extcon-usb-gpio.c
index 2b2fecffb..2512660dc 100644
--- a/drivers/extcon/extcon-usb-gpio.c
+++ b/drivers/extcon/extcon-usb-gpio.c
@@ -24,8 +24,10 @@
#include <linux/module.h>
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
+#include <linux/acpi.h>
#define USB_GPIO_DEBOUNCE_MS 20 /* ms */
@@ -91,7 +93,7 @@ static int usb_extcon_probe(struct platform_device *pdev)
struct usb_extcon_info *info;
int ret;
- if (!np)
+ if (!np && !ACPI_HANDLE(dev))
return -EINVAL;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
@@ -141,7 +143,8 @@ static int usb_extcon_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, info);
- device_init_wakeup(dev, 1);
+ device_init_wakeup(dev, true);
+ dev_pm_set_wake_irq(dev, info->id_irq);
/* Perform initial detection */
usb_extcon_detect_cable(&info->wq_detcable.work);
@@ -155,6 +158,9 @@ static int usb_extcon_remove(struct platform_device *pdev)
cancel_delayed_work_sync(&info->wq_detcable);
+ dev_pm_clear_wake_irq(&pdev->dev);
+ device_init_wakeup(&pdev->dev, false);
+
return 0;
}
@@ -164,12 +170,6 @@ static int usb_extcon_suspend(struct device *dev)
struct usb_extcon_info *info = dev_get_drvdata(dev);
int ret = 0;
- if (device_may_wakeup(dev)) {
- ret = enable_irq_wake(info->id_irq);
- if (ret)
- return ret;
- }
-
/*
* We don't want to process any IRQs after this point
* as GPIOs used behind I2C subsystem might not be
@@ -185,13 +185,10 @@ static int usb_extcon_resume(struct device *dev)
struct usb_extcon_info *info = dev_get_drvdata(dev);
int ret = 0;
- if (device_may_wakeup(dev)) {
- ret = disable_irq_wake(info->id_irq);
- if (ret)
- return ret;
- }
-
enable_irq(info->id_irq);
+ if (!device_may_wakeup(dev))
+ queue_delayed_work(system_power_efficient_wq,
+ &info->wq_detcable, 0);
return ret;
}
@@ -206,6 +203,12 @@ static const struct of_device_id usb_extcon_dt_match[] = {
};
MODULE_DEVICE_TABLE(of, usb_extcon_dt_match);
+static const struct platform_device_id usb_extcon_platform_ids[] = {
+ { .name = "extcon-usb-gpio", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, usb_extcon_platform_ids);
+
static struct platform_driver usb_extcon_driver = {
.probe = usb_extcon_probe,
.remove = usb_extcon_remove,
@@ -214,6 +217,7 @@ static struct platform_driver usb_extcon_driver = {
.pm = &usb_extcon_pm_ops,
.of_match_table = usb_extcon_dt_match,
},
+ .id_table = usb_extcon_platform_ids,
};
module_platform_driver(usb_extcon_driver);
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index 21a123cad..8682efc0f 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -77,6 +77,26 @@ static const char *extcon_name[] = {
NULL,
};
+/**
+ * struct extcon_cable - An internal data for each cable of extcon device.
+ * @edev: The extcon device
+ * @cable_index: Index of this cable in the edev
+ * @attr_g: Attribute group for the cable
+ * @attr_name: "name" sysfs entry
+ * @attr_state: "state" sysfs entry
+ * @attrs: Array pointing to attr_name and attr_state for attr_g
+ */
+struct extcon_cable {
+ struct extcon_dev *edev;
+ int cable_index;
+
+ struct attribute_group attr_g;
+ struct device_attribute attr_name;
+ struct device_attribute attr_state;
+
+ struct attribute *attrs[3]; /* to be fed to attr_g.attrs */
+};
+
static struct class *extcon_class;
#if defined(CONFIG_ANDROID)
static struct class_compat *switch_class;
@@ -127,38 +147,6 @@ static int find_cable_index_by_id(struct extcon_dev *edev, const unsigned int id
return -EINVAL;
}
-static int find_cable_id_by_name(struct extcon_dev *edev, const char *name)
-{
- int id = -EINVAL;
- int i = 0;
-
- /* Find the id of extcon cable */
- while (extcon_name[i]) {
- if (!strncmp(extcon_name[i], name, CABLE_NAME_MAX)) {
- id = i;
- break;
- }
- i++;
- }
-
- return id;
-}
-
-static int find_cable_index_by_name(struct extcon_dev *edev, const char *name)
-{
- int id;
-
- if (edev->max_supported == 0)
- return -EINVAL;
-
- /* Find the the number of extcon cable */
- id = find_cable_id_by_name(edev, name);
- if (id < 0)
- return id;
-
- return find_cable_index_by_id(edev, id);
-}
-
static bool is_extcon_changed(u32 prev, u32 new, int idx, bool *attached)
{
if (((prev >> idx) & 0x1) != ((new >> idx) & 0x1)) {
@@ -374,25 +362,6 @@ int extcon_get_cable_state_(struct extcon_dev *edev, const unsigned int id)
EXPORT_SYMBOL_GPL(extcon_get_cable_state_);
/**
- * extcon_get_cable_state() - Get the status of a specific cable.
- * @edev: the extcon device that has the cable.
- * @cable_name: cable name.
- *
- * Note that this is slower than extcon_get_cable_state_.
- */
-int extcon_get_cable_state(struct extcon_dev *edev, const char *cable_name)
-{
- int id;
-
- id = find_cable_id_by_name(edev, cable_name);
- if (id < 0)
- return id;
-
- return extcon_get_cable_state_(edev, id);
-}
-EXPORT_SYMBOL_GPL(extcon_get_cable_state);
-
-/**
* extcon_set_cable_state_() - Set the status of a specific cable.
* @edev: the extcon device that has the cable.
* @id: the unique id of each external connector
@@ -422,28 +391,6 @@ int extcon_set_cable_state_(struct extcon_dev *edev, unsigned int id,
EXPORT_SYMBOL_GPL(extcon_set_cable_state_);
/**
- * extcon_set_cable_state() - Set the status of a specific cable.
- * @edev: the extcon device that has the cable.
- * @cable_name: cable name.
- * @cable_state: the new cable status. The default semantics is
- * true: attached / false: detached.
- *
- * Note that this is slower than extcon_set_cable_state_.
- */
-int extcon_set_cable_state(struct extcon_dev *edev,
- const char *cable_name, bool cable_state)
-{
- int id;
-
- id = find_cable_id_by_name(edev, cable_name);
- if (id < 0)
- return id;
-
- return extcon_set_cable_state_(edev, id, cable_state);
-}
-EXPORT_SYMBOL_GPL(extcon_set_cable_state);
-
-/**
* extcon_get_extcon_dev() - Get the extcon device instance from the name
* @extcon_name: The extcon name provided with extcon_dev_register()
*/
@@ -467,105 +414,6 @@ out:
EXPORT_SYMBOL_GPL(extcon_get_extcon_dev);
/**
- * extcon_register_interest() - Register a notifier for a state change of a
- * specific cable, not an entier set of cables of a
- * extcon device.
- * @obj: an empty extcon_specific_cable_nb object to be returned.
- * @extcon_name: the name of extcon device.
- * if NULL, extcon_register_interest will register
- * every cable with the target cable_name given.
- * @cable_name: the target cable name.
- * @nb: the notifier block to get notified.
- *
- * Provide an empty extcon_specific_cable_nb. extcon_register_interest() sets
- * the struct for you.
- *
- * extcon_register_interest is a helper function for those who want to get
- * notification for a single specific cable's status change. If a user wants
- * to get notification for any changes of all cables of a extcon device,
- * he/she should use the general extcon_register_notifier().
- *
- * Note that the second parameter given to the callback of nb (val) is
- * "old_state", not the current state. The current state can be retrieved
- * by looking at the third pameter (edev pointer)'s state value.
- */
-int extcon_register_interest(struct extcon_specific_cable_nb *obj,
- const char *extcon_name, const char *cable_name,
- struct notifier_block *nb)
-{
- unsigned long flags;
- int ret;
-
- if (!obj || !cable_name || !nb)
- return -EINVAL;
-
- if (extcon_name) {
- obj->edev = extcon_get_extcon_dev(extcon_name);
- if (!obj->edev)
- return -ENODEV;
-
- obj->cable_index = find_cable_index_by_name(obj->edev,
- cable_name);
- if (obj->cable_index < 0)
- return obj->cable_index;
-
- obj->user_nb = nb;
-
- spin_lock_irqsave(&obj->edev->lock, flags);
- ret = raw_notifier_chain_register(
- &obj->edev->nh[obj->cable_index],
- obj->user_nb);
- spin_unlock_irqrestore(&obj->edev->lock, flags);
- } else {
- struct class_dev_iter iter;
- struct extcon_dev *extd;
- struct device *dev;
-
- if (!extcon_class)
- return -ENODEV;
- class_dev_iter_init(&iter, extcon_class, NULL, NULL);
- while ((dev = class_dev_iter_next(&iter))) {
- extd = dev_get_drvdata(dev);
-
- if (find_cable_index_by_name(extd, cable_name) < 0)
- continue;
-
- class_dev_iter_exit(&iter);
- return extcon_register_interest(obj, extd->name,
- cable_name, nb);
- }
-
- ret = -ENODEV;
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(extcon_register_interest);
-
-/**
- * extcon_unregister_interest() - Unregister the notifier registered by
- * extcon_register_interest().
- * @obj: the extcon_specific_cable_nb object returned by
- * extcon_register_interest().
- */
-int extcon_unregister_interest(struct extcon_specific_cable_nb *obj)
-{
- unsigned long flags;
- int ret;
-
- if (!obj)
- return -EINVAL;
-
- spin_lock_irqsave(&obj->edev->lock, flags);
- ret = raw_notifier_chain_unregister(
- &obj->edev->nh[obj->cable_index], obj->user_nb);
- spin_unlock_irqrestore(&obj->edev->lock, flags);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(extcon_unregister_interest);
-
-/**
* extcon_register_notifier() - Register a notifiee to get notified by
* any attach status changes from the extcon.
* @edev: the extcon device that has the external connecotr.
@@ -582,14 +430,35 @@ int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
unsigned long flags;
int ret, idx;
- if (!edev || !nb)
+ if (!nb)
return -EINVAL;
- idx = find_cable_index_by_id(edev, id);
+ if (edev) {
+ idx = find_cable_index_by_id(edev, id);
+ if (idx < 0)
+ return idx;
- spin_lock_irqsave(&edev->lock, flags);
- ret = raw_notifier_chain_register(&edev->nh[idx], nb);
- spin_unlock_irqrestore(&edev->lock, flags);
+ spin_lock_irqsave(&edev->lock, flags);
+ ret = raw_notifier_chain_register(&edev->nh[idx], nb);
+ spin_unlock_irqrestore(&edev->lock, flags);
+ } else {
+ struct extcon_dev *extd;
+
+ mutex_lock(&extcon_dev_list_lock);
+ list_for_each_entry(extd, &extcon_dev_list, entry) {
+ idx = find_cable_index_by_id(extd, id);
+ if (idx >= 0)
+ break;
+ }
+ mutex_unlock(&extcon_dev_list_lock);
+
+ if (idx >= 0) {
+ edev = extd;
+ return extcon_register_notifier(extd, id, nb);
+ } else {
+ ret = -ENODEV;
+ }
+ }
return ret;
}
@@ -611,6 +480,8 @@ int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id,
return -EINVAL;
idx = find_cable_index_by_id(edev, id);
+ if (idx < 0)
+ return idx;
spin_lock_irqsave(&edev->lock, flags);
ret = raw_notifier_chain_unregister(&edev->nh[idx], nb);
@@ -693,66 +564,6 @@ void extcon_dev_free(struct extcon_dev *edev)
}
EXPORT_SYMBOL_GPL(extcon_dev_free);
-static int devm_extcon_dev_match(struct device *dev, void *res, void *data)
-{
- struct extcon_dev **r = res;
-
- if (WARN_ON(!r || !*r))
- return 0;
-
- return *r == data;
-}
-
-static void devm_extcon_dev_release(struct device *dev, void *res)
-{
- extcon_dev_free(*(struct extcon_dev **)res);
-}
-
-/**
- * devm_extcon_dev_allocate - Allocate managed extcon device
- * @dev: device owning the extcon device being created
- * @supported_cable: Array of supported extcon ending with EXTCON_NONE.
- * If supported_cable is NULL, cable name related APIs
- * are disabled.
- *
- * This function manages automatically the memory of extcon device using device
- * resource management and simplify the control of freeing the memory of extcon
- * device.
- *
- * Returns the pointer memory of allocated extcon_dev if success
- * or ERR_PTR(err) if fail
- */
-struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
- const unsigned int *supported_cable)
-{
- struct extcon_dev **ptr, *edev;
-
- ptr = devres_alloc(devm_extcon_dev_release, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return ERR_PTR(-ENOMEM);
-
- edev = extcon_dev_allocate(supported_cable);
- if (IS_ERR(edev)) {
- devres_free(ptr);
- return edev;
- }
-
- edev->dev.parent = dev;
-
- *ptr = edev;
- devres_add(dev, ptr);
-
- return edev;
-}
-EXPORT_SYMBOL_GPL(devm_extcon_dev_allocate);
-
-void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev)
-{
- WARN_ON(devres_release(dev, devm_extcon_dev_release,
- devm_extcon_dev_match, edev));
-}
-EXPORT_SYMBOL_GPL(devm_extcon_dev_free);
-
/**
* extcon_dev_register() - Register a new extcon device
* @edev : the new extcon device (should be allocated before calling)
@@ -1018,63 +829,6 @@ void extcon_dev_unregister(struct extcon_dev *edev)
}
EXPORT_SYMBOL_GPL(extcon_dev_unregister);
-static void devm_extcon_dev_unreg(struct device *dev, void *res)
-{
- extcon_dev_unregister(*(struct extcon_dev **)res);
-}
-
-/**
- * devm_extcon_dev_register() - Resource-managed extcon_dev_register()
- * @dev: device to allocate extcon device
- * @edev: the new extcon device to register
- *
- * Managed extcon_dev_register() function. If extcon device is attached with
- * this function, that extcon device is automatically unregistered on driver
- * detach. Internally this function calls extcon_dev_register() function.
- * To get more information, refer that function.
- *
- * If extcon device is registered with this function and the device needs to be
- * unregistered separately, devm_extcon_dev_unregister() should be used.
- *
- * Returns 0 if success or negaive error number if failure.
- */
-int devm_extcon_dev_register(struct device *dev, struct extcon_dev *edev)
-{
- struct extcon_dev **ptr;
- int ret;
-
- ptr = devres_alloc(devm_extcon_dev_unreg, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return -ENOMEM;
-
- ret = extcon_dev_register(edev);
- if (ret) {
- devres_free(ptr);
- return ret;
- }
-
- *ptr = edev;
- devres_add(dev, ptr);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(devm_extcon_dev_register);
-
-/**
- * devm_extcon_dev_unregister() - Resource-managed extcon_dev_unregister()
- * @dev: device the extcon belongs to
- * @edev: the extcon device to unregister
- *
- * Unregister extcon device that is registered with devm_extcon_dev_register()
- * function.
- */
-void devm_extcon_dev_unregister(struct device *dev, struct extcon_dev *edev)
-{
- WARN_ON(devres_release(dev, devm_extcon_dev_unreg,
- devm_extcon_dev_match, edev));
-}
-EXPORT_SYMBOL_GPL(devm_extcon_dev_unregister);
-
#ifdef CONFIG_OF
/*
* extcon_get_edev_by_phandle - Get the extcon device from devicetree
@@ -1107,10 +861,12 @@ struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index)
list_for_each_entry(edev, &extcon_dev_list, entry) {
if (edev->dev.parent && edev->dev.parent->of_node == node) {
mutex_unlock(&extcon_dev_list_lock);
+ of_node_put(node);
return edev;
}
}
mutex_unlock(&extcon_dev_list_lock);
+ of_node_put(node);
return ERR_PTR(-EPROBE_DEFER);
}
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 6664f1108..0e22f2414 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -10,7 +10,7 @@ config ARM_PSCI_FW
config ARM_SCPI_PROTOCOL
tristate "ARM System Control and Power Interface (SCPI) Message Protocol"
- depends on ARM_MHU
+ depends on MAILBOX
help
System Control and Power Interface (SCPI) Message Protocol is
defined for the purpose of communication between the Application
@@ -27,6 +27,15 @@ config ARM_SCPI_PROTOCOL
This protocol library provides interface for all the client drivers
making use of the features offered by the SCP.
+config ARM_SCPI_POWER_DOMAIN
+ tristate "SCPI power domain driver"
+ depends on ARM_SCPI_PROTOCOL || (COMPILE_TEST && OF)
+ default y
+ select PM_GENERIC_DOMAINS if PM
+ help
+ This enables support for the SCPI power domains which can be
+ enabled or disabled via the SCP firmware
+
config EDD
tristate "BIOS Enhanced Disk Drive calls determine boot disk"
depends on X86
@@ -184,6 +193,7 @@ config FW_CFG_SYSFS_CMDLINE
config QCOM_SCM
bool
depends on ARM || ARM64
+ select RESET_CONTROLLER
config QCOM_SCM_32
def_bool y
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 474bada56..44a59dcfc 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -3,6 +3,7 @@
#
obj-$(CONFIG_ARM_PSCI_FW) += psci.o
obj-$(CONFIG_ARM_SCPI_PROTOCOL) += arm_scpi.o
+obj-$(CONFIG_ARM_SCPI_POWER_DOMAIN) += scpi_pm_domain.o
obj-$(CONFIG_DMI) += dmi_scan.o
obj-$(CONFIG_DMI_SYSFS) += dmi-sysfs.o
obj-$(CONFIG_EDD) += edd.o
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
index 7e3e595c9..ce2bc2a38 100644
--- a/drivers/firmware/arm_scpi.c
+++ b/drivers/firmware/arm_scpi.c
@@ -210,10 +210,6 @@ struct dvfs_info {
} opps[MAX_DVFS_OPPS];
} __packed;
-struct dvfs_get {
- u8 index;
-} __packed;
-
struct dvfs_set {
u8 domain;
u8 index;
@@ -235,6 +231,11 @@ struct sensor_value {
__le32 hi_val;
} __packed;
+struct dev_pstate_set {
+ u16 dev_id;
+ u8 pstate;
+} __packed;
+
static struct scpi_drvinfo *scpi_info;
static int scpi_linux_errmap[SCPI_ERR_MAX] = {
@@ -431,11 +432,11 @@ static int scpi_clk_set_val(u16 clk_id, unsigned long rate)
static int scpi_dvfs_get_idx(u8 domain)
{
int ret;
- struct dvfs_get dvfs;
+ u8 dvfs_idx;
ret = scpi_send_message(SCPI_CMD_GET_DVFS, &domain, sizeof(domain),
- &dvfs, sizeof(dvfs));
- return ret ? ret : dvfs.index;
+ &dvfs_idx, sizeof(dvfs_idx));
+ return ret ? ret : dvfs_idx;
}
static int scpi_dvfs_set_idx(u8 domain, u8 index)
@@ -526,7 +527,7 @@ static int scpi_sensor_get_info(u16 sensor_id, struct scpi_sensor_info *info)
return ret;
}
-int scpi_sensor_get_value(u16 sensor, u64 *val)
+static int scpi_sensor_get_value(u16 sensor, u64 *val)
{
__le16 id = cpu_to_le16(sensor);
struct sensor_value buf;
@@ -541,6 +542,29 @@ int scpi_sensor_get_value(u16 sensor, u64 *val)
return ret;
}
+static int scpi_device_get_power_state(u16 dev_id)
+{
+ int ret;
+ u8 pstate;
+ __le16 id = cpu_to_le16(dev_id);
+
+ ret = scpi_send_message(SCPI_CMD_GET_DEVICE_PWR_STATE, &id,
+ sizeof(id), &pstate, sizeof(pstate));
+ return ret ? ret : pstate;
+}
+
+static int scpi_device_set_power_state(u16 dev_id, u8 pstate)
+{
+ int stat;
+ struct dev_pstate_set dev_set = {
+ .dev_id = cpu_to_le16(dev_id),
+ .pstate = pstate,
+ };
+
+ return scpi_send_message(SCPI_CMD_SET_DEVICE_PWR_STATE, &dev_set,
+ sizeof(dev_set), &stat, sizeof(stat));
+}
+
static struct scpi_ops scpi_ops = {
.get_version = scpi_get_version,
.clk_get_range = scpi_clk_get_range,
@@ -552,6 +576,8 @@ static struct scpi_ops scpi_ops = {
.sensor_get_capability = scpi_sensor_get_capability,
.sensor_get_info = scpi_sensor_get_info,
.sensor_get_value = scpi_sensor_get_value,
+ .device_get_power_state = scpi_device_get_power_state,
+ .device_set_power_state = scpi_device_set_power_state,
};
struct scpi_ops *get_scpi_ops(void)
@@ -683,9 +709,10 @@ static int scpi_probe(struct platform_device *pdev)
struct mbox_client *cl = &pchan->cl;
struct device_node *shmem = of_parse_phandle(np, "shmem", idx);
- if (of_address_to_resource(shmem, 0, &res)) {
+ ret = of_address_to_resource(shmem, 0, &res);
+ of_node_put(shmem);
+ if (ret) {
dev_err(dev, "failed to get SCPI payload mem resource\n");
- ret = -EINVAL;
goto err;
}
diff --git a/drivers/firmware/broadcom/bcm47xx_sprom.c b/drivers/firmware/broadcom/bcm47xx_sprom.c
index b6eb875d4..62aa3cf09 100644
--- a/drivers/firmware/broadcom/bcm47xx_sprom.c
+++ b/drivers/firmware/broadcom/bcm47xx_sprom.c
@@ -669,7 +669,7 @@ static int bcm47xx_get_sprom_bcma(struct bcma_bus *bus, struct ssb_sprom *out)
case BCMA_HOSTTYPE_PCI:
memset(out, 0, sizeof(struct ssb_sprom));
/* On BCM47XX all PCI buses share the same domain */
- if (config_enabled(CONFIG_BCM47XX))
+ if (IS_ENABLED(CONFIG_BCM47XX))
snprintf(buf, sizeof(buf), "pci/%u/%u/",
bus->host_pci->bus->number + 1,
PCI_SLOT(bus->host_pci->devfn));
diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
index 94a58a082..44c01390d 100644
--- a/drivers/firmware/dmi-id.c
+++ b/drivers/firmware/dmi-id.c
@@ -229,14 +229,14 @@ static int __init dmi_id_init(void)
ret = device_register(dmi_dev);
if (ret)
- goto fail_free_dmi_dev;
+ goto fail_put_dmi_dev;
return 0;
-fail_free_dmi_dev:
- kfree(dmi_dev);
-fail_class_unregister:
+fail_put_dmi_dev:
+ put_device(dmi_dev);
+fail_class_unregister:
class_unregister(&dmi_class);
return ret;
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index 17ccf0a87..c394b81fe 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -107,6 +107,11 @@ static int __init arm_enable_runtime_services(void)
return 0;
}
+ if (efi_enabled(EFI_RUNTIME_SERVICES)) {
+ pr_info("EFI runtime services access via paravirt.\n");
+ return 0;
+ }
+
pr_info("Remapping and enabling EFI services.\n");
mapsize = efi.memmap.map_end - efi.memmap.map;
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
index eac76a79a..30a24d09e 100644
--- a/drivers/firmware/efi/efi-pstore.c
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -34,6 +34,7 @@ struct pstore_read_data {
int *count;
struct timespec *timespec;
bool *compressed;
+ ssize_t *ecc_notice_size;
char **buf;
};
@@ -69,6 +70,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
*cb_data->compressed = true;
else
*cb_data->compressed = false;
+ *cb_data->ecc_notice_size = 0;
} else if (sscanf(name, "dump-type%u-%u-%d-%lu",
cb_data->type, &part, &cnt, &time) == 4) {
*cb_data->id = generic_id(time, part, cnt);
@@ -76,6 +78,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
cb_data->timespec->tv_sec = time;
cb_data->timespec->tv_nsec = 0;
*cb_data->compressed = false;
+ *cb_data->ecc_notice_size = 0;
} else if (sscanf(name, "dump-type%u-%u-%lu",
cb_data->type, &part, &time) == 3) {
/*
@@ -88,6 +91,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
cb_data->timespec->tv_sec = time;
cb_data->timespec->tv_nsec = 0;
*cb_data->compressed = false;
+ *cb_data->ecc_notice_size = 0;
} else
return 0;
@@ -210,6 +214,7 @@ static int efi_pstore_sysfs_entry_iter(void *data, struct efivar_entry **pos)
static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
int *count, struct timespec *timespec,
char **buf, bool *compressed,
+ ssize_t *ecc_notice_size,
struct pstore_info *psi)
{
struct pstore_read_data data;
@@ -220,6 +225,7 @@ static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
data.count = count;
data.timespec = timespec;
data.compressed = compressed;
+ data.ecc_notice_size = ecc_notice_size;
data.buf = buf;
*data.buf = kzalloc(EFIVARS_DATA_SIZE_MAX, GFP_KERNEL);
@@ -393,6 +399,13 @@ static __init int efivars_pstore_init(void)
static __exit void efivars_pstore_exit(void)
{
+ if (!efi_pstore_info.bufsize)
+ return;
+
+ pstore_unregister(&efi_pstore_info);
+ kfree(efi_pstore_info.buf);
+ efi_pstore_info.buf = NULL;
+ efi_pstore_info.bufsize = 0;
}
module_init(efivars_pstore_init);
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 05509f3aa..7dd2e2d37 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -24,6 +24,9 @@
#include <linux/of_fdt.h>
#include <linux/io.h>
#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/ucs2_string.h>
#include <asm/early_ioremap.h>
@@ -195,6 +198,96 @@ static void generic_ops_unregister(void)
efivars_unregister(&generic_efivars);
}
+#if IS_ENABLED(CONFIG_ACPI)
+#define EFIVAR_SSDT_NAME_MAX 16
+static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
+static int __init efivar_ssdt_setup(char *str)
+{
+ if (strlen(str) < sizeof(efivar_ssdt))
+ memcpy(efivar_ssdt, str, strlen(str));
+ else
+ pr_warn("efivar_ssdt: name too long: %s\n", str);
+ return 0;
+}
+__setup("efivar_ssdt=", efivar_ssdt_setup);
+
+static __init int efivar_ssdt_iter(efi_char16_t *name, efi_guid_t vendor,
+ unsigned long name_size, void *data)
+{
+ struct efivar_entry *entry;
+ struct list_head *list = data;
+ char utf8_name[EFIVAR_SSDT_NAME_MAX];
+ int limit = min_t(unsigned long, EFIVAR_SSDT_NAME_MAX, name_size);
+
+ ucs2_as_utf8(utf8_name, name, limit - 1);
+ if (strncmp(utf8_name, efivar_ssdt, limit) != 0)
+ return 0;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return 0;
+
+ memcpy(entry->var.VariableName, name, name_size);
+ memcpy(&entry->var.VendorGuid, &vendor, sizeof(efi_guid_t));
+
+ efivar_entry_add(entry, list);
+
+ return 0;
+}
+
+static __init int efivar_ssdt_load(void)
+{
+ LIST_HEAD(entries);
+ struct efivar_entry *entry, *aux;
+ unsigned long size;
+ void *data;
+ int ret;
+
+ ret = efivar_init(efivar_ssdt_iter, &entries, true, &entries);
+
+ list_for_each_entry_safe(entry, aux, &entries, list) {
+ pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt,
+ &entry->var.VendorGuid);
+
+ list_del(&entry->list);
+
+ ret = efivar_entry_size(entry, &size);
+ if (ret) {
+ pr_err("failed to get var size\n");
+ goto free_entry;
+ }
+
+ data = kmalloc(size, GFP_KERNEL);
+ if (!data)
+ goto free_entry;
+
+ ret = efivar_entry_get(entry, NULL, &size, data);
+ if (ret) {
+ pr_err("failed to get var data\n");
+ goto free_data;
+ }
+
+ ret = acpi_load_table(data);
+ if (ret) {
+ pr_err("failed to load table: %d\n", ret);
+ goto free_data;
+ }
+
+ goto free_entry;
+
+free_data:
+ kfree(data);
+
+free_entry:
+ kfree(entry);
+ }
+
+ return ret;
+}
+#else
+static inline int efivar_ssdt_load(void) { return 0; }
+#endif
+
/*
* We register the efi subsystem with the firmware subsystem and the
* efivars subsystem with the efi subsystem, if the system was booted with
@@ -218,6 +311,9 @@ static int __init efisubsys_init(void)
if (error)
goto err_put;
+ if (efi_enabled(EFI_RUNTIME_SERVICES))
+ efivar_ssdt_load();
+
error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group);
if (error) {
pr_err("efi: Sysfs attribute export failed with error %d.\n",
@@ -472,12 +568,14 @@ device_initcall(efi_load_efivars);
FIELD_SIZEOF(struct efi_fdt_params, field) \
}
-static __initdata struct {
+struct params {
const char name[32];
const char propname[32];
int offset;
int size;
-} dt_params[] = {
+};
+
+static __initdata struct params fdt_params[] = {
UEFI_PARAM("System Table", "linux,uefi-system-table", system_table),
UEFI_PARAM("MemMap Address", "linux,uefi-mmap-start", mmap),
UEFI_PARAM("MemMap Size", "linux,uefi-mmap-size", mmap_size),
@@ -485,44 +583,94 @@ static __initdata struct {
UEFI_PARAM("MemMap Desc. Version", "linux,uefi-mmap-desc-ver", desc_ver)
};
+static __initdata struct params xen_fdt_params[] = {
+ UEFI_PARAM("System Table", "xen,uefi-system-table", system_table),
+ UEFI_PARAM("MemMap Address", "xen,uefi-mmap-start", mmap),
+ UEFI_PARAM("MemMap Size", "xen,uefi-mmap-size", mmap_size),
+ UEFI_PARAM("MemMap Desc. Size", "xen,uefi-mmap-desc-size", desc_size),
+ UEFI_PARAM("MemMap Desc. Version", "xen,uefi-mmap-desc-ver", desc_ver)
+};
+
+#define EFI_FDT_PARAMS_SIZE ARRAY_SIZE(fdt_params)
+
+static __initdata struct {
+ const char *uname;
+ const char *subnode;
+ struct params *params;
+} dt_params[] = {
+ { "hypervisor", "uefi", xen_fdt_params },
+ { "chosen", NULL, fdt_params },
+};
+
struct param_info {
int found;
void *params;
+ const char *missing;
};
-static int __init fdt_find_uefi_params(unsigned long node, const char *uname,
- int depth, void *data)
+static int __init __find_uefi_params(unsigned long node,
+ struct param_info *info,
+ struct params *params)
{
- struct param_info *info = data;
const void *prop;
void *dest;
u64 val;
int i, len;
- if (depth != 1 || strcmp(uname, "chosen") != 0)
- return 0;
-
- for (i = 0; i < ARRAY_SIZE(dt_params); i++) {
- prop = of_get_flat_dt_prop(node, dt_params[i].propname, &len);
- if (!prop)
+ for (i = 0; i < EFI_FDT_PARAMS_SIZE; i++) {
+ prop = of_get_flat_dt_prop(node, params[i].propname, &len);
+ if (!prop) {
+ info->missing = params[i].name;
return 0;
- dest = info->params + dt_params[i].offset;
+ }
+
+ dest = info->params + params[i].offset;
info->found++;
val = of_read_number(prop, len / sizeof(u32));
- if (dt_params[i].size == sizeof(u32))
+ if (params[i].size == sizeof(u32))
*(u32 *)dest = val;
else
*(u64 *)dest = val;
if (efi_enabled(EFI_DBG))
- pr_info(" %s: 0x%0*llx\n", dt_params[i].name,
- dt_params[i].size * 2, val);
+ pr_info(" %s: 0x%0*llx\n", params[i].name,
+ params[i].size * 2, val);
}
+
return 1;
}
+static int __init fdt_find_uefi_params(unsigned long node, const char *uname,
+ int depth, void *data)
+{
+ struct param_info *info = data;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dt_params); i++) {
+ const char *subnode = dt_params[i].subnode;
+
+ if (depth != 1 || strcmp(uname, dt_params[i].uname) != 0) {
+ info->missing = dt_params[i].params[0].name;
+ continue;
+ }
+
+ if (subnode) {
+ int err = of_get_flat_dt_subnode_by_name(node, subnode);
+
+ if (err < 0)
+ return 0;
+
+ node = err;
+ }
+
+ return __find_uefi_params(node, info, dt_params[i].params);
+ }
+
+ return 0;
+}
+
int __init efi_get_fdt_params(struct efi_fdt_params *params)
{
struct param_info info;
@@ -538,7 +686,7 @@ int __init efi_get_fdt_params(struct efi_fdt_params *params)
pr_info("UEFI not found.\n");
else if (!ret)
pr_err("Can't find '%s' in device tree!\n",
- dt_params[info.found].name);
+ info.missing);
return ret;
}
diff --git a/drivers/firmware/efi/efibc.c b/drivers/firmware/efi/efibc.c
index 8dd0c7085..503bbe2a9 100644
--- a/drivers/firmware/efi/efibc.c
+++ b/drivers/firmware/efi/efibc.c
@@ -37,13 +37,13 @@ static int efibc_set_variable(const char *name, const char *value)
size_t size = (strlen(value) + 1) * sizeof(efi_char16_t);
if (size > sizeof(entry->var.Data)) {
- pr_err("value is too large");
+ pr_err("value is too large (%zu bytes) for '%s' EFI variable\n", size, name);
return -EINVAL;
}
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
- pr_err("failed to allocate efivar entry");
+ pr_err("failed to allocate efivar entry for '%s' EFI variable\n", name);
return -ENOMEM;
}
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
index 23bef6bb7..41958774c 100644
--- a/drivers/firmware/efi/runtime-wrappers.c
+++ b/drivers/firmware/efi/runtime-wrappers.c
@@ -22,7 +22,16 @@
#include <linux/stringify.h>
#include <asm/efi.h>
-static void efi_call_virt_check_flags(unsigned long flags, const char *call)
+/*
+ * Wrap around the new efi_call_virt_generic() macros so that the
+ * code doesn't get too cluttered:
+ */
+#define efi_call_virt(f, args...) \
+ efi_call_virt_pointer(efi.systab->runtime, f, args)
+#define __efi_call_virt(f, args...) \
+ __efi_call_virt_pointer(efi.systab->runtime, f, args)
+
+void efi_call_virt_check_flags(unsigned long flags, const char *call)
{
unsigned long cur_flags, mismatch;
@@ -39,48 +48,6 @@ static void efi_call_virt_check_flags(unsigned long flags, const char *call)
}
/*
- * Arch code can implement the following three template macros, avoiding
- * reptition for the void/non-void return cases of {__,}efi_call_virt:
- *
- * * arch_efi_call_virt_setup
- *
- * Sets up the environment for the call (e.g. switching page tables,
- * allowing kernel-mode use of floating point, if required).
- *
- * * arch_efi_call_virt
- *
- * Performs the call. The last expression in the macro must be the call
- * itself, allowing the logic to be shared by the void and non-void
- * cases.
- *
- * * arch_efi_call_virt_teardown
- *
- * Restores the usual kernel environment once the call has returned.
- */
-
-#define efi_call_virt(f, args...) \
-({ \
- efi_status_t __s; \
- unsigned long flags; \
- arch_efi_call_virt_setup(); \
- local_save_flags(flags); \
- __s = arch_efi_call_virt(f, args); \
- efi_call_virt_check_flags(flags, __stringify(f)); \
- arch_efi_call_virt_teardown(); \
- __s; \
-})
-
-#define __efi_call_virt(f, args...) \
-({ \
- unsigned long flags; \
- arch_efi_call_virt_setup(); \
- local_save_flags(flags); \
- arch_efi_call_virt(f, args); \
- efi_call_virt_check_flags(flags, __stringify(f)); \
- arch_efi_call_virt_teardown(); \
-})
-
-/*
* According to section 7.1 of the UEFI spec, Runtime Services are not fully
* reentrant, and there are particular combinations of calls that need to be
* serialized. (source: UEFI Specification v2.4A)
diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
index 03e045827..8263429e2 100644
--- a/drivers/firmware/psci.c
+++ b/drivers/firmware/psci.c
@@ -13,6 +13,7 @@
#define pr_fmt(fmt) "psci: " fmt
+#include <linux/acpi.h>
#include <linux/arm-smccc.h>
#include <linux/cpuidle.h>
#include <linux/errno.h>
@@ -256,13 +257,6 @@ static int psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu)
u32 *psci_states;
struct device_node *state_node;
- /*
- * If the PSCI cpu_suspend function hook has not been initialized
- * idle states must not be enabled, so bail out
- */
- if (!psci_ops.cpu_suspend)
- return -EOPNOTSUPP;
-
/* Count idle states */
while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
count))) {
@@ -310,11 +304,69 @@ free_mem:
return ret;
}
+#ifdef CONFIG_ACPI
+#include <acpi/processor.h>
+
+static int __maybe_unused psci_acpi_cpu_init_idle(unsigned int cpu)
+{
+ int i, count;
+ u32 *psci_states;
+ struct acpi_lpi_state *lpi;
+ struct acpi_processor *pr = per_cpu(processors, cpu);
+
+ if (unlikely(!pr || !pr->flags.has_lpi))
+ return -EINVAL;
+
+ count = pr->power.count - 1;
+ if (count <= 0)
+ return -ENODEV;
+
+ psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
+ if (!psci_states)
+ return -ENOMEM;
+
+ for (i = 0; i < count; i++) {
+ u32 state;
+
+ lpi = &pr->power.lpi_states[i + 1];
+ /*
+ * Only bits[31:0] represent a PSCI power_state while
+ * bits[63:32] must be 0x0 as per ARM ACPI FFH Specification
+ */
+ state = lpi->address;
+ if (!psci_power_state_is_valid(state)) {
+ pr_warn("Invalid PSCI power state %#x\n", state);
+ kfree(psci_states);
+ return -EINVAL;
+ }
+ psci_states[i] = state;
+ }
+ /* Idle states parsed correctly, initialize per-cpu pointer */
+ per_cpu(psci_power_state, cpu) = psci_states;
+ return 0;
+}
+#else
+static int __maybe_unused psci_acpi_cpu_init_idle(unsigned int cpu)
+{
+ return -EINVAL;
+}
+#endif
+
int psci_cpu_init_idle(unsigned int cpu)
{
struct device_node *cpu_node;
int ret;
+ /*
+ * If the PSCI cpu_suspend function hook has not been initialized
+ * idle states must not be enabled, so bail out
+ */
+ if (!psci_ops.cpu_suspend)
+ return -EOPNOTSUPP;
+
+ if (!acpi_disabled)
+ return psci_acpi_cpu_init_idle(cpu);
+
cpu_node = of_get_cpu_node(cpu, NULL);
if (!cpu_node)
return -ENODEV;
diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c
index 0883292f6..c6aeedbdc 100644
--- a/drivers/firmware/qcom_scm-32.c
+++ b/drivers/firmware/qcom_scm-32.c
@@ -23,8 +23,7 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/qcom_scm.h>
-
-#include <asm/cacheflush.h>
+#include <linux/dma-mapping.h>
#include "qcom_scm.h"
@@ -97,44 +96,6 @@ struct qcom_scm_response {
};
/**
- * alloc_qcom_scm_command() - Allocate an SCM command
- * @cmd_size: size of the command buffer
- * @resp_size: size of the response buffer
- *
- * Allocate an SCM command, including enough room for the command
- * and response headers as well as the command and response buffers.
- *
- * Returns a valid &qcom_scm_command on success or %NULL if the allocation fails.
- */
-static struct qcom_scm_command *alloc_qcom_scm_command(size_t cmd_size, size_t resp_size)
-{
- struct qcom_scm_command *cmd;
- size_t len = sizeof(*cmd) + sizeof(struct qcom_scm_response) + cmd_size +
- resp_size;
- u32 offset;
-
- cmd = kzalloc(PAGE_ALIGN(len), GFP_KERNEL);
- if (cmd) {
- cmd->len = cpu_to_le32(len);
- offset = offsetof(struct qcom_scm_command, buf);
- cmd->buf_offset = cpu_to_le32(offset);
- cmd->resp_hdr_offset = cpu_to_le32(offset + cmd_size);
- }
- return cmd;
-}
-
-/**
- * free_qcom_scm_command() - Free an SCM command
- * @cmd: command to free
- *
- * Free an SCM command.
- */
-static inline void free_qcom_scm_command(struct qcom_scm_command *cmd)
-{
- kfree(cmd);
-}
-
-/**
* qcom_scm_command_to_response() - Get a pointer to a qcom_scm_response
* @cmd: command
*
@@ -168,23 +129,6 @@ static inline void *qcom_scm_get_response_buffer(const struct qcom_scm_response
return (void *)rsp + le32_to_cpu(rsp->buf_offset);
}
-static int qcom_scm_remap_error(int err)
-{
- pr_err("qcom_scm_call failed with error code %d\n", err);
- switch (err) {
- case QCOM_SCM_ERROR:
- return -EIO;
- case QCOM_SCM_EINVAL_ADDR:
- case QCOM_SCM_EINVAL_ARG:
- return -EINVAL;
- case QCOM_SCM_EOPNOTSUPP:
- return -EOPNOTSUPP;
- case QCOM_SCM_ENOMEM:
- return -ENOMEM;
- }
- return -EINVAL;
-}
-
static u32 smc(u32 cmd_addr)
{
int context_id;
@@ -209,45 +153,9 @@ static u32 smc(u32 cmd_addr)
return r0;
}
-static int __qcom_scm_call(const struct qcom_scm_command *cmd)
-{
- int ret;
- u32 cmd_addr = virt_to_phys(cmd);
-
- /*
- * Flush the command buffer so that the secure world sees
- * the correct data.
- */
- secure_flush_area(cmd, cmd->len);
-
- ret = smc(cmd_addr);
- if (ret < 0)
- ret = qcom_scm_remap_error(ret);
-
- return ret;
-}
-
-static void qcom_scm_inv_range(unsigned long start, unsigned long end)
-{
- u32 cacheline_size, ctr;
-
- asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
- cacheline_size = 4 << ((ctr >> 16) & 0xf);
-
- start = round_down(start, cacheline_size);
- end = round_up(end, cacheline_size);
- outer_inv_range(start, end);
- while (start < end) {
- asm ("mcr p15, 0, %0, c7, c6, 1" : : "r" (start)
- : "memory");
- start += cacheline_size;
- }
- dsb();
- isb();
-}
-
/**
* qcom_scm_call() - Send an SCM command
+ * @dev: struct device
* @svc_id: service identifier
* @cmd_id: command identifier
* @cmd_buf: command buffer
@@ -264,42 +172,59 @@ static void qcom_scm_inv_range(unsigned long start, unsigned long end)
* and response buffers is taken care of by qcom_scm_call; however, callers are
* responsible for any other cached buffers passed over to the secure world.
*/
-static int qcom_scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf,
- size_t cmd_len, void *resp_buf, size_t resp_len)
+static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
+ const void *cmd_buf, size_t cmd_len, void *resp_buf,
+ size_t resp_len)
{
int ret;
struct qcom_scm_command *cmd;
struct qcom_scm_response *rsp;
- unsigned long start, end;
+ size_t alloc_len = sizeof(*cmd) + cmd_len + sizeof(*rsp) + resp_len;
+ dma_addr_t cmd_phys;
- cmd = alloc_qcom_scm_command(cmd_len, resp_len);
+ cmd = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
+ cmd->len = cpu_to_le32(alloc_len);
+ cmd->buf_offset = cpu_to_le32(sizeof(*cmd));
+ cmd->resp_hdr_offset = cpu_to_le32(sizeof(*cmd) + cmd_len);
+
cmd->id = cpu_to_le32((svc_id << 10) | cmd_id);
if (cmd_buf)
memcpy(qcom_scm_get_command_buffer(cmd), cmd_buf, cmd_len);
+ rsp = qcom_scm_command_to_response(cmd);
+
+ cmd_phys = dma_map_single(dev, cmd, alloc_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, cmd_phys)) {
+ kfree(cmd);
+ return -ENOMEM;
+ }
+
mutex_lock(&qcom_scm_lock);
- ret = __qcom_scm_call(cmd);
+ ret = smc(cmd_phys);
+ if (ret < 0)
+ ret = qcom_scm_remap_error(ret);
mutex_unlock(&qcom_scm_lock);
if (ret)
goto out;
- rsp = qcom_scm_command_to_response(cmd);
- start = (unsigned long)rsp;
-
do {
- qcom_scm_inv_range(start, start + sizeof(*rsp));
+ dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len,
+ sizeof(*rsp), DMA_FROM_DEVICE);
} while (!rsp->is_complete);
- end = (unsigned long)qcom_scm_get_response_buffer(rsp) + resp_len;
- qcom_scm_inv_range(start, end);
-
- if (resp_buf)
- memcpy(resp_buf, qcom_scm_get_response_buffer(rsp), resp_len);
+ if (resp_buf) {
+ dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len +
+ le32_to_cpu(rsp->buf_offset),
+ resp_len, DMA_FROM_DEVICE);
+ memcpy(resp_buf, qcom_scm_get_response_buffer(rsp),
+ resp_len);
+ }
out:
- free_qcom_scm_command(cmd);
+ dma_unmap_single(dev, cmd_phys, alloc_len, DMA_TO_DEVICE);
+ kfree(cmd);
return ret;
}
@@ -342,6 +267,41 @@ static s32 qcom_scm_call_atomic1(u32 svc, u32 cmd, u32 arg1)
return r0;
}
+/**
+ * qcom_scm_call_atomic2() - Send an atomic SCM command with two arguments
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @arg1: first argument
+ * @arg2: second argument
+ *
+ * This shall only be used with commands that are guaranteed to be
+ * uninterruptable, atomic and SMP safe.
+ */
+static s32 qcom_scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2)
+{
+ int context_id;
+
+ register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 2);
+ register u32 r1 asm("r1") = (u32)&context_id;
+ register u32 r2 asm("r2") = arg1;
+ register u32 r3 asm("r3") = arg2;
+
+ asm volatile(
+ __asmeq("%0", "r0")
+ __asmeq("%1", "r0")
+ __asmeq("%2", "r1")
+ __asmeq("%3", "r2")
+ __asmeq("%4", "r3")
+#ifdef REQUIRES_SEC
+ ".arch_extension sec\n"
+#endif
+ "smc #0 @ switch to secure world\n"
+ : "=r" (r0)
+ : "r" (r0), "r" (r1), "r" (r2), "r" (r3)
+ );
+ return r0;
+}
+
u32 qcom_scm_get_version(void)
{
int context_id;
@@ -378,22 +338,6 @@ u32 qcom_scm_get_version(void)
}
EXPORT_SYMBOL(qcom_scm_get_version);
-/*
- * Set the cold/warm boot address for one of the CPU cores.
- */
-static int qcom_scm_set_boot_addr(u32 addr, int flags)
-{
- struct {
- __le32 flags;
- __le32 addr;
- } cmd;
-
- cmd.addr = cpu_to_le32(addr);
- cmd.flags = cpu_to_le32(flags);
- return qcom_scm_call(QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR,
- &cmd, sizeof(cmd), NULL, 0);
-}
-
/**
* qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
* @entry: Entry point function for the cpus
@@ -423,7 +367,8 @@ int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
set_cpu_present(cpu, false);
}
- return qcom_scm_set_boot_addr(virt_to_phys(entry), flags);
+ return qcom_scm_call_atomic2(QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR,
+ flags, virt_to_phys(entry));
}
/**
@@ -434,11 +379,16 @@ int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
* Set the Linux entry point for the SCM to transfer control to when coming
* out of a power down. CPU power down may be executed on cpuidle or hotplug.
*/
-int __qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
+int __qcom_scm_set_warm_boot_addr(struct device *dev, void *entry,
+ const cpumask_t *cpus)
{
int ret;
int flags = 0;
int cpu;
+ struct {
+ __le32 flags;
+ __le32 addr;
+ } cmd;
/*
* Reassign only if we are switching from hotplug entry point
@@ -454,7 +404,10 @@ int __qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
if (!flags)
return 0;
- ret = qcom_scm_set_boot_addr(virt_to_phys(entry), flags);
+ cmd.addr = cpu_to_le32(virt_to_phys(entry));
+ cmd.flags = cpu_to_le32(flags);
+ ret = qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR,
+ &cmd, sizeof(cmd), NULL, 0);
if (!ret) {
for_each_cpu(cpu, cpus)
qcom_scm_wb[cpu].entry = entry;
@@ -477,25 +430,133 @@ void __qcom_scm_cpu_power_down(u32 flags)
flags & QCOM_SCM_FLUSH_FLAG_MASK);
}
-int __qcom_scm_is_call_available(u32 svc_id, u32 cmd_id)
+int __qcom_scm_is_call_available(struct device *dev, u32 svc_id, u32 cmd_id)
{
int ret;
__le32 svc_cmd = cpu_to_le32((svc_id << 10) | cmd_id);
__le32 ret_val = 0;
- ret = qcom_scm_call(QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD, &svc_cmd,
- sizeof(svc_cmd), &ret_val, sizeof(ret_val));
+ ret = qcom_scm_call(dev, QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD,
+ &svc_cmd, sizeof(svc_cmd), &ret_val,
+ sizeof(ret_val));
if (ret)
return ret;
return le32_to_cpu(ret_val);
}
-int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
+int __qcom_scm_hdcp_req(struct device *dev, struct qcom_scm_hdcp_req *req,
+ u32 req_cnt, u32 *resp)
{
if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
return -ERANGE;
- return qcom_scm_call(QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP,
+ return qcom_scm_call(dev, QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP,
req, req_cnt * sizeof(*req), resp, sizeof(*resp));
}
+
+void __qcom_scm_init(void)
+{
+}
+
+bool __qcom_scm_pas_supported(struct device *dev, u32 peripheral)
+{
+ __le32 out;
+ __le32 in;
+ int ret;
+
+ in = cpu_to_le32(peripheral);
+ ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
+ QCOM_SCM_PAS_IS_SUPPORTED_CMD,
+ &in, sizeof(in),
+ &out, sizeof(out));
+
+ return ret ? false : !!out;
+}
+
+int __qcom_scm_pas_init_image(struct device *dev, u32 peripheral,
+ dma_addr_t metadata_phys)
+{
+ __le32 scm_ret;
+ int ret;
+ struct {
+ __le32 proc;
+ __le32 image_addr;
+ } request;
+
+ request.proc = cpu_to_le32(peripheral);
+ request.image_addr = cpu_to_le32(metadata_phys);
+
+ ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
+ QCOM_SCM_PAS_INIT_IMAGE_CMD,
+ &request, sizeof(request),
+ &scm_ret, sizeof(scm_ret));
+
+ return ret ? : le32_to_cpu(scm_ret);
+}
+
+int __qcom_scm_pas_mem_setup(struct device *dev, u32 peripheral,
+ phys_addr_t addr, phys_addr_t size)
+{
+ __le32 scm_ret;
+ int ret;
+ struct {
+ __le32 proc;
+ __le32 addr;
+ __le32 len;
+ } request;
+
+ request.proc = cpu_to_le32(peripheral);
+ request.addr = cpu_to_le32(addr);
+ request.len = cpu_to_le32(size);
+
+ ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
+ QCOM_SCM_PAS_MEM_SETUP_CMD,
+ &request, sizeof(request),
+ &scm_ret, sizeof(scm_ret));
+
+ return ret ? : le32_to_cpu(scm_ret);
+}
+
+int __qcom_scm_pas_auth_and_reset(struct device *dev, u32 peripheral)
+{
+ __le32 out;
+ __le32 in;
+ int ret;
+
+ in = cpu_to_le32(peripheral);
+ ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
+ QCOM_SCM_PAS_AUTH_AND_RESET_CMD,
+ &in, sizeof(in),
+ &out, sizeof(out));
+
+ return ret ? : le32_to_cpu(out);
+}
+
+int __qcom_scm_pas_shutdown(struct device *dev, u32 peripheral)
+{
+ __le32 out;
+ __le32 in;
+ int ret;
+
+ in = cpu_to_le32(peripheral);
+ ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
+ QCOM_SCM_PAS_SHUTDOWN_CMD,
+ &in, sizeof(in),
+ &out, sizeof(out));
+
+ return ret ? : le32_to_cpu(out);
+}
+
+int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
+{
+ __le32 out;
+ __le32 in = cpu_to_le32(reset);
+ int ret;
+
+ ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MSS_RESET,
+ &in, sizeof(in),
+ &out, sizeof(out));
+
+ return ret ? : le32_to_cpu(out);
+}
diff --git a/drivers/firmware/qcom_scm-64.c b/drivers/firmware/qcom_scm-64.c
index bb6555f6d..4a0f5ead4 100644
--- a/drivers/firmware/qcom_scm-64.c
+++ b/drivers/firmware/qcom_scm-64.c
@@ -12,7 +12,150 @@
#include <linux/io.h>
#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/types.h>
#include <linux/qcom_scm.h>
+#include <linux/arm-smccc.h>
+#include <linux/dma-mapping.h>
+
+#include "qcom_scm.h"
+
+#define QCOM_SCM_FNID(s, c) ((((s) & 0xFF) << 8) | ((c) & 0xFF))
+
+#define MAX_QCOM_SCM_ARGS 10
+#define MAX_QCOM_SCM_RETS 3
+
+enum qcom_scm_arg_types {
+ QCOM_SCM_VAL,
+ QCOM_SCM_RO,
+ QCOM_SCM_RW,
+ QCOM_SCM_BUFVAL,
+};
+
+#define QCOM_SCM_ARGS_IMPL(num, a, b, c, d, e, f, g, h, i, j, ...) (\
+ (((a) & 0x3) << 4) | \
+ (((b) & 0x3) << 6) | \
+ (((c) & 0x3) << 8) | \
+ (((d) & 0x3) << 10) | \
+ (((e) & 0x3) << 12) | \
+ (((f) & 0x3) << 14) | \
+ (((g) & 0x3) << 16) | \
+ (((h) & 0x3) << 18) | \
+ (((i) & 0x3) << 20) | \
+ (((j) & 0x3) << 22) | \
+ ((num) & 0xf))
+
+#define QCOM_SCM_ARGS(...) QCOM_SCM_ARGS_IMPL(__VA_ARGS__, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+
+/**
+ * struct qcom_scm_desc
+ * @arginfo: Metadata describing the arguments in args[]
+ * @args: The array of arguments for the secure syscall
+ * @res: The values returned by the secure syscall
+ */
+struct qcom_scm_desc {
+ u32 arginfo;
+ u64 args[MAX_QCOM_SCM_ARGS];
+};
+
+static u64 qcom_smccc_convention = -1;
+static DEFINE_MUTEX(qcom_scm_lock);
+
+#define QCOM_SCM_EBUSY_WAIT_MS 30
+#define QCOM_SCM_EBUSY_MAX_RETRY 20
+
+#define N_EXT_QCOM_SCM_ARGS 7
+#define FIRST_EXT_ARG_IDX 3
+#define N_REGISTER_ARGS (MAX_QCOM_SCM_ARGS - N_EXT_QCOM_SCM_ARGS + 1)
+
+/**
+ * qcom_scm_call() - Invoke a syscall in the secure world
+ * @dev: device
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @desc: Descriptor structure containing arguments and return values
+ *
+ * Sends a command to the SCM and waits for the command to finish processing.
+ * This should *only* be called in pre-emptible context.
+*/
+static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
+ const struct qcom_scm_desc *desc,
+ struct arm_smccc_res *res)
+{
+ int arglen = desc->arginfo & 0xf;
+ int retry_count = 0, i;
+ u32 fn_id = QCOM_SCM_FNID(svc_id, cmd_id);
+ u64 cmd, x5 = desc->args[FIRST_EXT_ARG_IDX];
+ dma_addr_t args_phys = 0;
+ void *args_virt = NULL;
+ size_t alloc_len;
+
+ if (unlikely(arglen > N_REGISTER_ARGS)) {
+ alloc_len = N_EXT_QCOM_SCM_ARGS * sizeof(u64);
+ args_virt = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL);
+
+ if (!args_virt)
+ return -ENOMEM;
+
+ if (qcom_smccc_convention == ARM_SMCCC_SMC_32) {
+ __le32 *args = args_virt;
+
+ for (i = 0; i < N_EXT_QCOM_SCM_ARGS; i++)
+ args[i] = cpu_to_le32(desc->args[i +
+ FIRST_EXT_ARG_IDX]);
+ } else {
+ __le64 *args = args_virt;
+
+ for (i = 0; i < N_EXT_QCOM_SCM_ARGS; i++)
+ args[i] = cpu_to_le64(desc->args[i +
+ FIRST_EXT_ARG_IDX]);
+ }
+
+ args_phys = dma_map_single(dev, args_virt, alloc_len,
+ DMA_TO_DEVICE);
+
+ if (dma_mapping_error(dev, args_phys)) {
+ kfree(args_virt);
+ return -ENOMEM;
+ }
+
+ x5 = args_phys;
+ }
+
+ do {
+ mutex_lock(&qcom_scm_lock);
+
+ cmd = ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL,
+ qcom_smccc_convention,
+ ARM_SMCCC_OWNER_SIP, fn_id);
+
+ do {
+ arm_smccc_smc(cmd, desc->arginfo, desc->args[0],
+ desc->args[1], desc->args[2], x5, 0, 0,
+ res);
+ } while (res->a0 == QCOM_SCM_INTERRUPTED);
+
+ mutex_unlock(&qcom_scm_lock);
+
+ if (res->a0 == QCOM_SCM_V2_EBUSY) {
+ if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY)
+ break;
+ msleep(QCOM_SCM_EBUSY_WAIT_MS);
+ }
+ } while (res->a0 == QCOM_SCM_V2_EBUSY);
+
+ if (args_virt) {
+ dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE);
+ kfree(args_virt);
+ }
+
+ if (res->a0 < 0)
+ return qcom_scm_remap_error(res->a0);
+
+ return 0;
+}
/**
* qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
@@ -29,13 +172,15 @@ int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
/**
* qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
+ * @dev: Device pointer
* @entry: Entry point function for the cpus
* @cpus: The cpumask of cpus that will use the entry point
*
* Set the Linux entry point for the SCM to transfer control to when coming
* out of a power down. CPU power down may be executed on cpuidle or hotplug.
*/
-int __qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
+int __qcom_scm_set_warm_boot_addr(struct device *dev, void *entry,
+ const cpumask_t *cpus)
{
return -ENOTSUPP;
}
@@ -52,12 +197,164 @@ void __qcom_scm_cpu_power_down(u32 flags)
{
}
-int __qcom_scm_is_call_available(u32 svc_id, u32 cmd_id)
+int __qcom_scm_is_call_available(struct device *dev, u32 svc_id, u32 cmd_id)
{
- return -ENOTSUPP;
+ int ret;
+ struct qcom_scm_desc desc = {0};
+ struct arm_smccc_res res;
+
+ desc.arginfo = QCOM_SCM_ARGS(1);
+ desc.args[0] = QCOM_SCM_FNID(svc_id, cmd_id) |
+ (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
+
+ ret = qcom_scm_call(dev, QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD,
+ &desc, &res);
+
+ return ret ? : res.a1;
}
-int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
+int __qcom_scm_hdcp_req(struct device *dev, struct qcom_scm_hdcp_req *req,
+ u32 req_cnt, u32 *resp)
{
- return -ENOTSUPP;
+ int ret;
+ struct qcom_scm_desc desc = {0};
+ struct arm_smccc_res res;
+
+ if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
+ return -ERANGE;
+
+ desc.args[0] = req[0].addr;
+ desc.args[1] = req[0].val;
+ desc.args[2] = req[1].addr;
+ desc.args[3] = req[1].val;
+ desc.args[4] = req[2].addr;
+ desc.args[5] = req[2].val;
+ desc.args[6] = req[3].addr;
+ desc.args[7] = req[3].val;
+ desc.args[8] = req[4].addr;
+ desc.args[9] = req[4].val;
+ desc.arginfo = QCOM_SCM_ARGS(10);
+
+ ret = qcom_scm_call(dev, QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP, &desc,
+ &res);
+ *resp = res.a1;
+
+ return ret;
+}
+
+void __qcom_scm_init(void)
+{
+ u64 cmd;
+ struct arm_smccc_res res;
+ u32 function = QCOM_SCM_FNID(QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD);
+
+ /* First try a SMC64 call */
+ cmd = ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64,
+ ARM_SMCCC_OWNER_SIP, function);
+
+ arm_smccc_smc(cmd, QCOM_SCM_ARGS(1), cmd & (~BIT(ARM_SMCCC_TYPE_SHIFT)),
+ 0, 0, 0, 0, 0, &res);
+
+ if (!res.a0 && res.a1)
+ qcom_smccc_convention = ARM_SMCCC_SMC_64;
+ else
+ qcom_smccc_convention = ARM_SMCCC_SMC_32;
+}
+
+bool __qcom_scm_pas_supported(struct device *dev, u32 peripheral)
+{
+ int ret;
+ struct qcom_scm_desc desc = {0};
+ struct arm_smccc_res res;
+
+ desc.args[0] = peripheral;
+ desc.arginfo = QCOM_SCM_ARGS(1);
+
+ ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
+ QCOM_SCM_PAS_IS_SUPPORTED_CMD,
+ &desc, &res);
+
+ return ret ? false : !!res.a1;
+}
+
+int __qcom_scm_pas_init_image(struct device *dev, u32 peripheral,
+ dma_addr_t metadata_phys)
+{
+ int ret;
+ struct qcom_scm_desc desc = {0};
+ struct arm_smccc_res res;
+
+ desc.args[0] = peripheral;
+ desc.args[1] = metadata_phys;
+ desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW);
+
+ ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_INIT_IMAGE_CMD,
+ &desc, &res);
+
+ return ret ? : res.a1;
+}
+
+int __qcom_scm_pas_mem_setup(struct device *dev, u32 peripheral,
+ phys_addr_t addr, phys_addr_t size)
+{
+ int ret;
+ struct qcom_scm_desc desc = {0};
+ struct arm_smccc_res res;
+
+ desc.args[0] = peripheral;
+ desc.args[1] = addr;
+ desc.args[2] = size;
+ desc.arginfo = QCOM_SCM_ARGS(3);
+
+ ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MEM_SETUP_CMD,
+ &desc, &res);
+
+ return ret ? : res.a1;
+}
+
+int __qcom_scm_pas_auth_and_reset(struct device *dev, u32 peripheral)
+{
+ int ret;
+ struct qcom_scm_desc desc = {0};
+ struct arm_smccc_res res;
+
+ desc.args[0] = peripheral;
+ desc.arginfo = QCOM_SCM_ARGS(1);
+
+ ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
+ QCOM_SCM_PAS_AUTH_AND_RESET_CMD,
+ &desc, &res);
+
+ return ret ? : res.a1;
+}
+
+int __qcom_scm_pas_shutdown(struct device *dev, u32 peripheral)
+{
+ int ret;
+ struct qcom_scm_desc desc = {0};
+ struct arm_smccc_res res;
+
+ desc.args[0] = peripheral;
+ desc.arginfo = QCOM_SCM_ARGS(1);
+
+ ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_SHUTDOWN_CMD,
+ &desc, &res);
+
+ return ret ? : res.a1;
+}
+
+int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
+{
+ struct qcom_scm_desc desc = {0};
+ struct arm_smccc_res res;
+ int ret;
+
+ desc.args[0] = reset;
+ desc.args[1] = 0;
+ desc.arginfo = QCOM_SCM_ARGS(2);
+
+ ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MSS_RESET, &desc,
+ &res);
+
+ return ret ? : res.a1;
}
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index 45c008d68..e64a501ad 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -10,19 +10,64 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*/
-
+#include <linux/platform_device.h>
+#include <linux/module.h>
#include <linux/cpumask.h>
#include <linux/export.h>
+#include <linux/dma-mapping.h>
#include <linux/types.h>
#include <linux/qcom_scm.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/clk.h>
+#include <linux/reset-controller.h>
#include "qcom_scm.h"
+struct qcom_scm {
+ struct device *dev;
+ struct clk *core_clk;
+ struct clk *iface_clk;
+ struct clk *bus_clk;
+ struct reset_controller_dev reset;
+};
+
+static struct qcom_scm *__scm;
+
+static int qcom_scm_clk_enable(void)
+{
+ int ret;
+
+ ret = clk_prepare_enable(__scm->core_clk);
+ if (ret)
+ goto bail;
+
+ ret = clk_prepare_enable(__scm->iface_clk);
+ if (ret)
+ goto disable_core;
+
+ ret = clk_prepare_enable(__scm->bus_clk);
+ if (ret)
+ goto disable_iface;
+
+ return 0;
+
+disable_iface:
+ clk_disable_unprepare(__scm->iface_clk);
+disable_core:
+ clk_disable_unprepare(__scm->core_clk);
+bail:
+ return ret;
+}
+
+static void qcom_scm_clk_disable(void)
+{
+ clk_disable_unprepare(__scm->core_clk);
+ clk_disable_unprepare(__scm->iface_clk);
+ clk_disable_unprepare(__scm->bus_clk);
+}
+
/**
* qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
* @entry: Entry point function for the cpus
@@ -47,7 +92,7 @@ EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr);
*/
int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
{
- return __qcom_scm_set_warm_boot_addr(entry, cpus);
+ return __qcom_scm_set_warm_boot_addr(__scm->dev, entry, cpus);
}
EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr);
@@ -72,12 +117,17 @@ EXPORT_SYMBOL(qcom_scm_cpu_power_down);
*/
bool qcom_scm_hdcp_available(void)
{
- int ret;
+ int ret = qcom_scm_clk_enable();
- ret = __qcom_scm_is_call_available(QCOM_SCM_SVC_HDCP,
- QCOM_SCM_CMD_HDCP);
+ if (ret)
+ return ret;
- return (ret > 0) ? true : false;
+ ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
+ QCOM_SCM_CMD_HDCP);
+
+ qcom_scm_clk_disable();
+
+ return ret > 0 ? true : false;
}
EXPORT_SYMBOL(qcom_scm_hdcp_available);
@@ -91,6 +141,287 @@ EXPORT_SYMBOL(qcom_scm_hdcp_available);
*/
int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
{
- return __qcom_scm_hdcp_req(req, req_cnt, resp);
+ int ret = qcom_scm_clk_enable();
+
+ if (ret)
+ return ret;
+
+ ret = __qcom_scm_hdcp_req(__scm->dev, req, req_cnt, resp);
+ qcom_scm_clk_disable();
+ return ret;
}
EXPORT_SYMBOL(qcom_scm_hdcp_req);
+
+/**
+ * qcom_scm_pas_supported() - Check if the peripheral authentication service is
+ * available for the given peripherial
+ * @peripheral: peripheral id
+ *
+ * Returns true if PAS is supported for this peripheral, otherwise false.
+ */
+bool qcom_scm_pas_supported(u32 peripheral)
+{
+ int ret;
+
+ ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
+ QCOM_SCM_PAS_IS_SUPPORTED_CMD);
+ if (ret <= 0)
+ return false;
+
+ return __qcom_scm_pas_supported(__scm->dev, peripheral);
+}
+EXPORT_SYMBOL(qcom_scm_pas_supported);
+
+/**
+ * qcom_scm_pas_init_image() - Initialize peripheral authentication service
+ * state machine for a given peripheral, using the
+ * metadata
+ * @peripheral: peripheral id
+ * @metadata: pointer to memory containing ELF header, program header table
+ * and optional blob of data used for authenticating the metadata
+ * and the rest of the firmware
+ * @size: size of the metadata
+ *
+ * Returns 0 on success.
+ */
+int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size)
+{
+ dma_addr_t mdata_phys;
+ void *mdata_buf;
+ int ret;
+
+ /*
+ * During the scm call memory protection will be enabled for the meta
+ * data blob, so make sure it's physically contiguous, 4K aligned and
+ * non-cachable to avoid XPU violations.
+ */
+ mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
+ GFP_KERNEL);
+ if (!mdata_buf) {
+ dev_err(__scm->dev, "Allocation of metadata buffer failed.\n");
+ return -ENOMEM;
+ }
+ memcpy(mdata_buf, metadata, size);
+
+ ret = qcom_scm_clk_enable();
+ if (ret)
+ goto free_metadata;
+
+ ret = __qcom_scm_pas_init_image(__scm->dev, peripheral, mdata_phys);
+
+ qcom_scm_clk_disable();
+
+free_metadata:
+ dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_scm_pas_init_image);
+
+/**
+ * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
+ * for firmware loading
+ * @peripheral: peripheral id
+ * @addr: start address of memory area to prepare
+ * @size: size of the memory area to prepare
+ *
+ * Returns 0 on success.
+ */
+int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
+{
+ int ret;
+
+ ret = qcom_scm_clk_enable();
+ if (ret)
+ return ret;
+
+ ret = __qcom_scm_pas_mem_setup(__scm->dev, peripheral, addr, size);
+ qcom_scm_clk_disable();
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_scm_pas_mem_setup);
+
+/**
+ * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
+ * and reset the remote processor
+ * @peripheral: peripheral id
+ *
+ * Return 0 on success.
+ */
+int qcom_scm_pas_auth_and_reset(u32 peripheral)
+{
+ int ret;
+
+ ret = qcom_scm_clk_enable();
+ if (ret)
+ return ret;
+
+ ret = __qcom_scm_pas_auth_and_reset(__scm->dev, peripheral);
+ qcom_scm_clk_disable();
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset);
+
+/**
+ * qcom_scm_pas_shutdown() - Shut down the remote processor
+ * @peripheral: peripheral id
+ *
+ * Returns 0 on success.
+ */
+int qcom_scm_pas_shutdown(u32 peripheral)
+{
+ int ret;
+
+ ret = qcom_scm_clk_enable();
+ if (ret)
+ return ret;
+
+ ret = __qcom_scm_pas_shutdown(__scm->dev, peripheral);
+ qcom_scm_clk_disable();
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_scm_pas_shutdown);
+
+static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long idx)
+{
+ if (idx != 0)
+ return -EINVAL;
+
+ return __qcom_scm_pas_mss_reset(__scm->dev, 1);
+}
+
+static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long idx)
+{
+ if (idx != 0)
+ return -EINVAL;
+
+ return __qcom_scm_pas_mss_reset(__scm->dev, 0);
+}
+
+static const struct reset_control_ops qcom_scm_pas_reset_ops = {
+ .assert = qcom_scm_pas_reset_assert,
+ .deassert = qcom_scm_pas_reset_deassert,
+};
+
+/**
+ * qcom_scm_is_available() - Checks if SCM is available
+ */
+bool qcom_scm_is_available(void)
+{
+ return !!__scm;
+}
+EXPORT_SYMBOL(qcom_scm_is_available);
+
+static int qcom_scm_probe(struct platform_device *pdev)
+{
+ struct qcom_scm *scm;
+ int ret;
+
+ scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
+ if (!scm)
+ return -ENOMEM;
+
+ scm->core_clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(scm->core_clk)) {
+ if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER)
+ return PTR_ERR(scm->core_clk);
+
+ scm->core_clk = NULL;
+ }
+
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,scm")) {
+ scm->iface_clk = devm_clk_get(&pdev->dev, "iface");
+ if (IS_ERR(scm->iface_clk)) {
+ if (PTR_ERR(scm->iface_clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to acquire iface clk\n");
+ return PTR_ERR(scm->iface_clk);
+ }
+
+ scm->bus_clk = devm_clk_get(&pdev->dev, "bus");
+ if (IS_ERR(scm->bus_clk)) {
+ if (PTR_ERR(scm->bus_clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to acquire bus clk\n");
+ return PTR_ERR(scm->bus_clk);
+ }
+ }
+
+ scm->reset.ops = &qcom_scm_pas_reset_ops;
+ scm->reset.nr_resets = 1;
+ scm->reset.of_node = pdev->dev.of_node;
+ reset_controller_register(&scm->reset);
+
+ /* vote for max clk rate for highest performance */
+ ret = clk_set_rate(scm->core_clk, INT_MAX);
+ if (ret)
+ return ret;
+
+ __scm = scm;
+ __scm->dev = &pdev->dev;
+
+ __qcom_scm_init();
+
+ return 0;
+}
+
+static const struct of_device_id qcom_scm_dt_match[] = {
+ { .compatible = "qcom,scm-apq8064",},
+ { .compatible = "qcom,scm-msm8660",},
+ { .compatible = "qcom,scm-msm8960",},
+ { .compatible = "qcom,scm",},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, qcom_scm_dt_match);
+
+static struct platform_driver qcom_scm_driver = {
+ .driver = {
+ .name = "qcom_scm",
+ .of_match_table = qcom_scm_dt_match,
+ },
+ .probe = qcom_scm_probe,
+};
+
+static int __init qcom_scm_init(void)
+{
+ struct device_node *np, *fw_np;
+ int ret;
+
+ fw_np = of_find_node_by_name(NULL, "firmware");
+
+ if (!fw_np)
+ return -ENODEV;
+
+ np = of_find_matching_node(fw_np, qcom_scm_dt_match);
+
+ if (!np) {
+ of_node_put(fw_np);
+ return -ENODEV;
+ }
+
+ of_node_put(np);
+
+ ret = of_platform_populate(fw_np, qcom_scm_dt_match, NULL, NULL);
+
+ of_node_put(fw_np);
+
+ if (ret)
+ return ret;
+
+ return platform_driver_register(&qcom_scm_driver);
+}
+
+subsys_initcall(qcom_scm_init);
+
+static void __exit qcom_scm_exit(void)
+{
+ platform_driver_unregister(&qcom_scm_driver);
+}
+module_exit(qcom_scm_exit);
+
+MODULE_DESCRIPTION("Qualcomm SCM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h
index 2cce75c08..3584b00fe 100644
--- a/drivers/firmware/qcom_scm.h
+++ b/drivers/firmware/qcom_scm.h
@@ -19,7 +19,8 @@
#define QCOM_SCM_FLAG_HLOS 0x01
#define QCOM_SCM_FLAG_COLDBOOT_MC 0x02
#define QCOM_SCM_FLAG_WARMBOOT_MC 0x04
-extern int __qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus);
+extern int __qcom_scm_set_warm_boot_addr(struct device *dev, void *entry,
+ const cpumask_t *cpus);
extern int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus);
#define QCOM_SCM_CMD_TERMINATE_PC 0x2
@@ -29,14 +30,34 @@ extern void __qcom_scm_cpu_power_down(u32 flags);
#define QCOM_SCM_SVC_INFO 0x6
#define QCOM_IS_CALL_AVAIL_CMD 0x1
-extern int __qcom_scm_is_call_available(u32 svc_id, u32 cmd_id);
+extern int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
+ u32 cmd_id);
#define QCOM_SCM_SVC_HDCP 0x11
#define QCOM_SCM_CMD_HDCP 0x01
-extern int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
- u32 *resp);
+extern int __qcom_scm_hdcp_req(struct device *dev,
+ struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp);
+
+extern void __qcom_scm_init(void);
+
+#define QCOM_SCM_SVC_PIL 0x2
+#define QCOM_SCM_PAS_INIT_IMAGE_CMD 0x1
+#define QCOM_SCM_PAS_MEM_SETUP_CMD 0x2
+#define QCOM_SCM_PAS_AUTH_AND_RESET_CMD 0x5
+#define QCOM_SCM_PAS_SHUTDOWN_CMD 0x6
+#define QCOM_SCM_PAS_IS_SUPPORTED_CMD 0x7
+#define QCOM_SCM_PAS_MSS_RESET 0xa
+extern bool __qcom_scm_pas_supported(struct device *dev, u32 peripheral);
+extern int __qcom_scm_pas_init_image(struct device *dev, u32 peripheral,
+ dma_addr_t metadata_phys);
+extern int __qcom_scm_pas_mem_setup(struct device *dev, u32 peripheral,
+ phys_addr_t addr, phys_addr_t size);
+extern int __qcom_scm_pas_auth_and_reset(struct device *dev, u32 peripheral);
+extern int __qcom_scm_pas_shutdown(struct device *dev, u32 peripheral);
+extern int __qcom_scm_pas_mss_reset(struct device *dev, bool reset);
/* common error codes */
+#define QCOM_SCM_V2_EBUSY -12
#define QCOM_SCM_ENOMEM -5
#define QCOM_SCM_EOPNOTSUPP -4
#define QCOM_SCM_EINVAL_ADDR -3
@@ -44,4 +65,22 @@ extern int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
#define QCOM_SCM_ERROR -1
#define QCOM_SCM_INTERRUPTED 1
+static inline int qcom_scm_remap_error(int err)
+{
+ switch (err) {
+ case QCOM_SCM_ERROR:
+ return -EIO;
+ case QCOM_SCM_EINVAL_ADDR:
+ case QCOM_SCM_EINVAL_ARG:
+ return -EINVAL;
+ case QCOM_SCM_EOPNOTSUPP:
+ return -EOPNOTSUPP;
+ case QCOM_SCM_ENOMEM:
+ return -ENOMEM;
+ case QCOM_SCM_V2_EBUSY:
+ return -EBUSY;
+ }
+ return -EINVAL;
+}
+
#endif
diff --git a/drivers/firmware/scpi_pm_domain.c b/drivers/firmware/scpi_pm_domain.c
new file mode 100644
index 000000000..f395dec27
--- /dev/null
+++ b/drivers/firmware/scpi_pm_domain.c
@@ -0,0 +1,163 @@
+/*
+ * SCPI Generic power domain support.
+ *
+ * Copyright (C) 2016 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/pm_domain.h>
+#include <linux/scpi_protocol.h>
+
+struct scpi_pm_domain {
+ struct generic_pm_domain genpd;
+ struct scpi_ops *ops;
+ u32 domain;
+ char name[30];
+};
+
+/*
+ * These device power state values are not well-defined in the specification.
+ * In case, different implementations use different values, we can make these
+ * specific to compatibles rather than getting these values from device tree.
+ */
+enum scpi_power_domain_state {
+ SCPI_PD_STATE_ON = 0,
+ SCPI_PD_STATE_OFF = 3,
+};
+
+#define to_scpi_pd(gpd) container_of(gpd, struct scpi_pm_domain, genpd)
+
+static int scpi_pd_power(struct scpi_pm_domain *pd, bool power_on)
+{
+ int ret;
+ enum scpi_power_domain_state state;
+
+ if (power_on)
+ state = SCPI_PD_STATE_ON;
+ else
+ state = SCPI_PD_STATE_OFF;
+
+ ret = pd->ops->device_set_power_state(pd->domain, state);
+ if (ret)
+ return ret;
+
+ return !(state == pd->ops->device_get_power_state(pd->domain));
+}
+
+static int scpi_pd_power_on(struct generic_pm_domain *domain)
+{
+ struct scpi_pm_domain *pd = to_scpi_pd(domain);
+
+ return scpi_pd_power(pd, true);
+}
+
+static int scpi_pd_power_off(struct generic_pm_domain *domain)
+{
+ struct scpi_pm_domain *pd = to_scpi_pd(domain);
+
+ return scpi_pd_power(pd, false);
+}
+
+static int scpi_pm_domain_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct scpi_pm_domain *scpi_pd;
+ struct genpd_onecell_data *scpi_pd_data;
+ struct generic_pm_domain **domains;
+ struct scpi_ops *scpi_ops;
+ int ret, num_domains, i;
+
+ scpi_ops = get_scpi_ops();
+ if (!scpi_ops)
+ return -EPROBE_DEFER;
+
+ if (!np) {
+ dev_err(dev, "device tree node not found\n");
+ return -ENODEV;
+ }
+
+ if (!scpi_ops->device_set_power_state ||
+ !scpi_ops->device_get_power_state) {
+ dev_err(dev, "power domains not supported in the firmware\n");
+ return -ENODEV;
+ }
+
+ ret = of_property_read_u32(np, "num-domains", &num_domains);
+ if (ret) {
+ dev_err(dev, "number of domains not found\n");
+ return -EINVAL;
+ }
+
+ scpi_pd = devm_kcalloc(dev, num_domains, sizeof(*scpi_pd), GFP_KERNEL);
+ if (!scpi_pd)
+ return -ENOMEM;
+
+ scpi_pd_data = devm_kzalloc(dev, sizeof(*scpi_pd_data), GFP_KERNEL);
+ if (!scpi_pd_data)
+ return -ENOMEM;
+
+ domains = devm_kcalloc(dev, num_domains, sizeof(*domains), GFP_KERNEL);
+ if (!domains)
+ return -ENOMEM;
+
+ for (i = 0; i < num_domains; i++, scpi_pd++) {
+ domains[i] = &scpi_pd->genpd;
+
+ scpi_pd->domain = i;
+ scpi_pd->ops = scpi_ops;
+ sprintf(scpi_pd->name, "%s.%d", np->name, i);
+ scpi_pd->genpd.name = scpi_pd->name;
+ scpi_pd->genpd.power_off = scpi_pd_power_off;
+ scpi_pd->genpd.power_on = scpi_pd_power_on;
+
+ /*
+ * Treat all power domains as off at boot.
+ *
+ * The SCP firmware itself may have switched on some domains,
+ * but for reference counting purpose, keep it this way.
+ */
+ pm_genpd_init(&scpi_pd->genpd, NULL, true);
+ }
+
+ scpi_pd_data->domains = domains;
+ scpi_pd_data->num_domains = num_domains;
+
+ of_genpd_add_provider_onecell(np, scpi_pd_data);
+
+ return 0;
+}
+
+static const struct of_device_id scpi_power_domain_ids[] = {
+ { .compatible = "arm,scpi-power-domains", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, scpi_power_domain_ids);
+
+static struct platform_driver scpi_power_domain_driver = {
+ .driver = {
+ .name = "scpi_power_domain",
+ .of_match_table = scpi_power_domain_ids,
+ },
+ .probe = scpi_pm_domain_probe,
+};
+module_platform_driver(scpi_power_domain_driver);
+
+MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
+MODULE_DESCRIPTION("ARM SCPI power domain driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index c9b9fdf6c..d61410299 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -21,6 +21,7 @@ config FPGA_MGR_SOCFPGA
config FPGA_MGR_ZYNQ_FPGA
tristate "Xilinx Zynq FPGA"
+ depends on HAS_DMA
help
FPGA manager driver support for Xilinx Zynq FPGAs.
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 5d457ff61..24caedb00 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -189,7 +189,7 @@ config GPIO_EP93XX
config GPIO_ETRAXFS
bool "Axis ETRAX FS General I/O"
depends on CRIS || COMPILE_TEST
- depends on OF
+ depends on OF_GPIO
select GPIO_GENERIC
select GPIOLIB_IRQCHIP
help
@@ -215,7 +215,7 @@ config GPIO_GENERIC_PLATFORM
config GPIO_GRGPIO
tristate "Aeroflex Gaisler GRGPIO support"
- depends on OF
+ depends on OF_GPIO
select GPIO_GENERIC
select IRQ_DOMAIN
help
@@ -251,7 +251,7 @@ config GPIO_LOONGSON
driver for GPIO functionality on Loongson-2F/3A/3B processors.
config GPIO_LPC18XX
- bool "NXP LPC18XX/43XX GPIO support"
+ tristate "NXP LPC18XX/43XX GPIO support"
default y if ARCH_LPC18XX
depends on OF_GPIO && (ARCH_LPC18XX || COMPILE_TEST)
help
@@ -313,7 +313,7 @@ config GPIO_MPC8XXX
config GPIO_MVEBU
def_bool y
depends on PLAT_ORION
- depends on OF
+ depends on OF_GPIO
select GENERIC_IRQ_CHIP
config GPIO_MXC
@@ -406,7 +406,7 @@ config GPIO_TEGRA
bool "NVIDIA Tegra GPIO support"
default ARCH_TEGRA
depends on ARCH_TEGRA || COMPILE_TEST
- depends on OF
+ depends on OF_GPIO
help
Say yes here to support GPIO pins on NVIDIA Tegra SoCs.
@@ -875,6 +875,15 @@ config GPIO_LP3943
LP3943 can be used as a GPIO expander which provides up to 16 GPIOs.
Open drain outputs are required for this usage.
+config GPIO_MAX77620
+ tristate "GPIO support for PMIC MAX77620 and MAX20024"
+ depends on MFD_MAX77620
+ help
+ GPIO driver for MAX77620 and MAX20024 PMIC from Maxim Semiconductor.
+ MAX77620 PMIC has 8 pins that can be configured as GPIOs. The
+ driver also provides interrupt support for each of the gpios.
+ Say yes here to enable the max77620 to be used as gpio controller.
+
config GPIO_MSIC
bool "Intel MSIC mixed signal gpio support"
depends on MFD_INTEL_MSIC
@@ -1030,11 +1039,18 @@ config GPIO_BT8XX
If unsure, say N.
config GPIO_INTEL_MID
- bool "Intel Mid GPIO support"
- depends on X86
+ bool "Intel MID GPIO support"
+ depends on X86_INTEL_MID
+ select GPIOLIB_IRQCHIP
+ help
+ Say Y here to support Intel MID GPIO.
+
+config GPIO_MERRIFIELD
+ tristate "Intel Merrifield GPIO support"
+ depends on X86_INTEL_MID
select GPIOLIB_IRQCHIP
help
- Say Y here to support Intel Mid GPIO.
+ Say Y here to support Intel Merrifield GPIO.
config GPIO_ML_IOH
tristate "OKI SEMICONDUCTOR ML7213 IOH GPIO support"
@@ -1084,7 +1100,7 @@ menu "SPI GPIO expanders"
config GPIO_74X164
tristate "74x164 serial-in/parallel-out 8-bits shift register"
- depends on OF
+ depends on OF_GPIO
help
Driver for 74x164 compatible serial-in/parallel-out 8-outputs
shift registers. This driver can be used to provide access
@@ -1115,6 +1131,7 @@ menu "SPI or I2C GPIO expanders"
config GPIO_MCP23S08
tristate "Microchip MCP23xxx I/O expander"
+ depends on OF_GPIO
select GPIOLIB_IRQCHIP
help
SPI/I2C driver for Microchip MCP23S08/MCP23S17/MCP23008/MCP23017
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 991598ea3..2a035ed8f 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -61,8 +61,10 @@ obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o
obj-$(CONFIG_GPIO_MAX7300) += gpio-max7300.o
obj-$(CONFIG_GPIO_MAX7301) += gpio-max7301.o
obj-$(CONFIG_GPIO_MAX732X) += gpio-max732x.o
+obj-$(CONFIG_GPIO_MAX77620) += gpio-max77620.o
obj-$(CONFIG_GPIO_MB86S7X) += gpio-mb86s7x.o
obj-$(CONFIG_GPIO_MENZ127) += gpio-menz127.o
+obj-$(CONFIG_GPIO_MERRIFIELD) += gpio-merrifield.o
obj-$(CONFIG_GPIO_MC33880) += gpio-mc33880.o
obj-$(CONFIG_GPIO_MC9S08DZ60) += gpio-mc9s08dz60.o
obj-$(CONFIG_GPIO_MCP23S08) += gpio-mcp23s08.o
diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c
index 80f9ddf13..a6607faf2 100644
--- a/drivers/gpio/gpio-74x164.c
+++ b/drivers/gpio/gpio-74x164.c
@@ -35,13 +35,8 @@ struct gen_74x164_chip {
static int __gen_74x164_write_config(struct gen_74x164_chip *chip)
{
- struct spi_transfer xfer = {
- .tx_buf = chip->buffer,
- .len = chip->registers,
- };
-
- return spi_sync_transfer(to_spi_device(chip->gpio_chip.parent),
- &xfer, 1);
+ return spi_write(to_spi_device(chip->gpio_chip.parent), chip->buffer,
+ chip->registers);
}
static int gen_74x164_get_value(struct gpio_chip *gc, unsigned offset)
diff --git a/drivers/gpio/gpio-clps711x.c b/drivers/gpio/gpio-clps711x.c
index 5a690256a..52fd63f02 100644
--- a/drivers/gpio/gpio-clps711x.c
+++ b/drivers/gpio/gpio-clps711x.c
@@ -20,8 +20,12 @@ static int clps711x_gpio_probe(struct platform_device *pdev)
void __iomem *dat, *dir;
struct gpio_chip *gc;
struct resource *res;
- int err, id = np ? of_alias_get_id(np, "gpio") : pdev->id;
+ int err, id;
+ if (!np)
+ return -ENODEV;
+
+ id = of_alias_get_id(np, "gpio");
if ((id < 0) || (id > 4))
return -ENODEV;
@@ -63,7 +67,7 @@ static int clps711x_gpio_probe(struct platform_device *pdev)
break;
}
- gc->base = id * 8;
+ gc->base = -1;
gc->owner = THIS_MODULE;
platform_set_drvdata(pdev, gc);
@@ -71,7 +75,7 @@ static int clps711x_gpio_probe(struct platform_device *pdev)
}
static const struct of_device_id __maybe_unused clps711x_gpio_ids[] = {
- { .compatible = "cirrus,clps711x-gpio" },
+ { .compatible = "cirrus,ep7209-gpio" },
{ }
};
MODULE_DEVICE_TABLE(of, clps711x_gpio_ids);
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index 34779bb37..6193f62c0 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -486,6 +486,7 @@ dwapb_gpio_get_pdata(struct device *dev)
pp->idx >= DWAPB_MAX_PORTS) {
dev_err(dev,
"missing/invalid port index for port%d\n", i);
+ fwnode_handle_put(fwnode);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/gpio/gpio-f7188x.c b/drivers/gpio/gpio-f7188x.c
index 05aa538c3..600be8418 100644
--- a/drivers/gpio/gpio-f7188x.c
+++ b/drivers/gpio/gpio-f7188x.c
@@ -125,6 +125,7 @@ static inline void superio_exit(int base)
* GPIO chip.
*/
+static int f7188x_gpio_get_direction(struct gpio_chip *chip, unsigned offset);
static int f7188x_gpio_direction_in(struct gpio_chip *chip, unsigned offset);
static int f7188x_gpio_get(struct gpio_chip *chip, unsigned offset);
static int f7188x_gpio_direction_out(struct gpio_chip *chip,
@@ -139,6 +140,7 @@ static int f7188x_gpio_set_single_ended(struct gpio_chip *gc,
.chip = { \
.label = DRVNAME, \
.owner = THIS_MODULE, \
+ .get_direction = f7188x_gpio_get_direction, \
.direction_input = f7188x_gpio_direction_in, \
.get = f7188x_gpio_get, \
.direction_output = f7188x_gpio_direction_out, \
@@ -209,6 +211,26 @@ static struct f7188x_gpio_bank f81866_gpio_bank[] = {
F7188X_GPIO_BANK(80, 8, 0x88),
};
+static int f7188x_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+{
+ int err;
+ struct f7188x_gpio_bank *bank =
+ container_of(chip, struct f7188x_gpio_bank, chip);
+ struct f7188x_sio *sio = bank->data->sio;
+ u8 dir;
+
+ err = superio_enter(sio->addr);
+ if (err)
+ return err;
+ superio_select(sio->addr, SIO_LD_GPIO);
+
+ dir = superio_inb(sio->addr, gpio_dir(bank->regbase));
+
+ superio_exit(sio->addr);
+
+ return !(dir & 1 << offset);
+}
+
static int f7188x_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
{
int err;
diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c
index c0f7cce23..164de64b1 100644
--- a/drivers/gpio/gpio-intel-mid.c
+++ b/drivers/gpio/gpio-intel-mid.c
@@ -1,7 +1,7 @@
/*
* Intel MID GPIO driver
*
- * Copyright (c) 2008-2014 Intel Corporation.
+ * Copyright (c) 2008-2014,2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -19,18 +19,18 @@
* Clovertrail platform Cloverview chip.
*/
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/platform_device.h>
-#include <linux/kernel.h>
#include <linux/delay.h>
-#include <linux/stddef.h>
-#include <linux/interrupt.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/gpio/driver.h>
-#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
#define INTEL_MID_IRQ_TYPE_EDGE (1 << 0)
#define INTEL_MID_IRQ_TYPE_LEVEL (1 << 1)
diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c
index 9df015e85..fbd393b46 100644
--- a/drivers/gpio/gpio-lynxpoint.c
+++ b/drivers/gpio/gpio-lynxpoint.c
@@ -383,7 +383,6 @@ static int lp_gpio_probe(struct platform_device *pdev)
handle_simple_irq, IRQ_TYPE_NONE);
if (ret) {
dev_err(dev, "failed to add irqchip\n");
- gpiochip_remove(gc);
return ret;
}
diff --git a/drivers/gpio/gpio-max77620.c b/drivers/gpio/gpio-max77620.c
new file mode 100644
index 000000000..b46b436cb
--- /dev/null
+++ b/drivers/gpio/gpio-max77620.c
@@ -0,0 +1,315 @@
+/*
+ * MAXIM MAX77620 GPIO driver
+ *
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/max77620.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define GPIO_REG_ADDR(offset) (MAX77620_REG_GPIO0 + offset)
+
+struct max77620_gpio {
+ struct gpio_chip gpio_chip;
+ struct regmap *rmap;
+ struct device *dev;
+ int gpio_irq;
+ int irq_base;
+ int gpio_base;
+};
+
+static const struct regmap_irq max77620_gpio_irqs[] = {
+ [0] = {
+ .mask = MAX77620_IRQ_LVL2_GPIO_EDGE0,
+ .type_rising_mask = MAX77620_CNFG_GPIO_INT_RISING,
+ .type_falling_mask = MAX77620_CNFG_GPIO_INT_FALLING,
+ .reg_offset = 0,
+ .type_reg_offset = 0,
+ },
+ [1] = {
+ .mask = MAX77620_IRQ_LVL2_GPIO_EDGE1,
+ .type_rising_mask = MAX77620_CNFG_GPIO_INT_RISING,
+ .type_falling_mask = MAX77620_CNFG_GPIO_INT_FALLING,
+ .reg_offset = 0,
+ .type_reg_offset = 1,
+ },
+ [2] = {
+ .mask = MAX77620_IRQ_LVL2_GPIO_EDGE2,
+ .type_rising_mask = MAX77620_CNFG_GPIO_INT_RISING,
+ .type_falling_mask = MAX77620_CNFG_GPIO_INT_FALLING,
+ .reg_offset = 0,
+ .type_reg_offset = 2,
+ },
+ [3] = {
+ .mask = MAX77620_IRQ_LVL2_GPIO_EDGE3,
+ .type_rising_mask = MAX77620_CNFG_GPIO_INT_RISING,
+ .type_falling_mask = MAX77620_CNFG_GPIO_INT_FALLING,
+ .reg_offset = 0,
+ .type_reg_offset = 3,
+ },
+ [4] = {
+ .mask = MAX77620_IRQ_LVL2_GPIO_EDGE4,
+ .type_rising_mask = MAX77620_CNFG_GPIO_INT_RISING,
+ .type_falling_mask = MAX77620_CNFG_GPIO_INT_FALLING,
+ .reg_offset = 0,
+ .type_reg_offset = 4,
+ },
+ [5] = {
+ .mask = MAX77620_IRQ_LVL2_GPIO_EDGE5,
+ .type_rising_mask = MAX77620_CNFG_GPIO_INT_RISING,
+ .type_falling_mask = MAX77620_CNFG_GPIO_INT_FALLING,
+ .reg_offset = 0,
+ .type_reg_offset = 5,
+ },
+ [6] = {
+ .mask = MAX77620_IRQ_LVL2_GPIO_EDGE6,
+ .type_rising_mask = MAX77620_CNFG_GPIO_INT_RISING,
+ .type_falling_mask = MAX77620_CNFG_GPIO_INT_FALLING,
+ .reg_offset = 0,
+ .type_reg_offset = 6,
+ },
+ [7] = {
+ .mask = MAX77620_IRQ_LVL2_GPIO_EDGE7,
+ .type_rising_mask = MAX77620_CNFG_GPIO_INT_RISING,
+ .type_falling_mask = MAX77620_CNFG_GPIO_INT_FALLING,
+ .reg_offset = 0,
+ .type_reg_offset = 7,
+ },
+};
+
+static struct regmap_irq_chip max77620_gpio_irq_chip = {
+ .name = "max77620-gpio",
+ .irqs = max77620_gpio_irqs,
+ .num_irqs = ARRAY_SIZE(max77620_gpio_irqs),
+ .num_regs = 1,
+ .num_type_reg = 8,
+ .irq_reg_stride = 1,
+ .type_reg_stride = 1,
+ .status_base = MAX77620_REG_IRQ_LVL2_GPIO,
+ .type_base = MAX77620_REG_GPIO0,
+};
+
+static int max77620_gpio_dir_input(struct gpio_chip *gc, unsigned int offset)
+{
+ struct max77620_gpio *mgpio = gpiochip_get_data(gc);
+ int ret;
+
+ ret = regmap_update_bits(mgpio->rmap, GPIO_REG_ADDR(offset),
+ MAX77620_CNFG_GPIO_DIR_MASK,
+ MAX77620_CNFG_GPIO_DIR_INPUT);
+ if (ret < 0)
+ dev_err(mgpio->dev, "CNFG_GPIOx dir update failed: %d\n", ret);
+
+ return ret;
+}
+
+static int max77620_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ struct max77620_gpio *mgpio = gpiochip_get_data(gc);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(mgpio->rmap, GPIO_REG_ADDR(offset), &val);
+ if (ret < 0) {
+ dev_err(mgpio->dev, "CNFG_GPIOx read failed: %d\n", ret);
+ return ret;
+ }
+
+ if (val & MAX77620_CNFG_GPIO_DIR_MASK)
+ return !!(val & MAX77620_CNFG_GPIO_INPUT_VAL_MASK);
+ else
+ return !!(val & MAX77620_CNFG_GPIO_OUTPUT_VAL_MASK);
+}
+
+static int max77620_gpio_dir_output(struct gpio_chip *gc, unsigned int offset,
+ int value)
+{
+ struct max77620_gpio *mgpio = gpiochip_get_data(gc);
+ u8 val;
+ int ret;
+
+ val = (value) ? MAX77620_CNFG_GPIO_OUTPUT_VAL_HIGH :
+ MAX77620_CNFG_GPIO_OUTPUT_VAL_LOW;
+
+ ret = regmap_update_bits(mgpio->rmap, GPIO_REG_ADDR(offset),
+ MAX77620_CNFG_GPIO_OUTPUT_VAL_MASK, val);
+ if (ret < 0) {
+ dev_err(mgpio->dev, "CNFG_GPIOx val update failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_update_bits(mgpio->rmap, GPIO_REG_ADDR(offset),
+ MAX77620_CNFG_GPIO_DIR_MASK,
+ MAX77620_CNFG_GPIO_DIR_OUTPUT);
+ if (ret < 0)
+ dev_err(mgpio->dev, "CNFG_GPIOx dir update failed: %d\n", ret);
+
+ return ret;
+}
+
+static int max77620_gpio_set_debounce(struct gpio_chip *gc,
+ unsigned int offset,
+ unsigned int debounce)
+{
+ struct max77620_gpio *mgpio = gpiochip_get_data(gc);
+ u8 val;
+ int ret;
+
+ switch (debounce) {
+ case 0:
+ val = MAX77620_CNFG_GPIO_DBNC_None;
+ break;
+ case 1 ... 8:
+ val = MAX77620_CNFG_GPIO_DBNC_8ms;
+ break;
+ case 9 ... 16:
+ val = MAX77620_CNFG_GPIO_DBNC_16ms;
+ break;
+ case 17 ... 32:
+ val = MAX77620_CNFG_GPIO_DBNC_32ms;
+ break;
+ default:
+ dev_err(mgpio->dev, "Illegal value %u\n", debounce);
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(mgpio->rmap, GPIO_REG_ADDR(offset),
+ MAX77620_CNFG_GPIO_DBNC_MASK, val);
+ if (ret < 0)
+ dev_err(mgpio->dev, "CNFG_GPIOx_DBNC update failed: %d\n", ret);
+
+ return ret;
+}
+
+static void max77620_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
+{
+ struct max77620_gpio *mgpio = gpiochip_get_data(gc);
+ u8 val;
+ int ret;
+
+ val = (value) ? MAX77620_CNFG_GPIO_OUTPUT_VAL_HIGH :
+ MAX77620_CNFG_GPIO_OUTPUT_VAL_LOW;
+
+ ret = regmap_update_bits(mgpio->rmap, GPIO_REG_ADDR(offset),
+ MAX77620_CNFG_GPIO_OUTPUT_VAL_MASK, val);
+ if (ret < 0)
+ dev_err(mgpio->dev, "CNFG_GPIO_OUT update failed: %d\n", ret);
+}
+
+static int max77620_gpio_set_single_ended(struct gpio_chip *gc,
+ unsigned int offset,
+ enum single_ended_mode mode)
+{
+ struct max77620_gpio *mgpio = gpiochip_get_data(gc);
+
+ switch (mode) {
+ case LINE_MODE_OPEN_DRAIN:
+ return regmap_update_bits(mgpio->rmap, GPIO_REG_ADDR(offset),
+ MAX77620_CNFG_GPIO_DRV_MASK,
+ MAX77620_CNFG_GPIO_DRV_OPENDRAIN);
+ case LINE_MODE_PUSH_PULL:
+ return regmap_update_bits(mgpio->rmap, GPIO_REG_ADDR(offset),
+ MAX77620_CNFG_GPIO_DRV_MASK,
+ MAX77620_CNFG_GPIO_DRV_PUSHPULL);
+ default:
+ break;
+ }
+
+ return -ENOTSUPP;
+}
+
+static int max77620_gpio_to_irq(struct gpio_chip *gc, unsigned int offset)
+{
+ struct max77620_gpio *mgpio = gpiochip_get_data(gc);
+ struct max77620_chip *chip = dev_get_drvdata(mgpio->dev->parent);
+
+ return regmap_irq_get_virq(chip->gpio_irq_data, offset);
+}
+
+static int max77620_gpio_probe(struct platform_device *pdev)
+{
+ struct max77620_chip *chip = dev_get_drvdata(pdev->dev.parent);
+ struct max77620_gpio *mgpio;
+ int gpio_irq;
+ int ret;
+
+ gpio_irq = platform_get_irq(pdev, 0);
+ if (gpio_irq <= 0) {
+ dev_err(&pdev->dev, "GPIO irq not available %d\n", gpio_irq);
+ return -ENODEV;
+ }
+
+ mgpio = devm_kzalloc(&pdev->dev, sizeof(*mgpio), GFP_KERNEL);
+ if (!mgpio)
+ return -ENOMEM;
+
+ mgpio->rmap = chip->rmap;
+ mgpio->dev = &pdev->dev;
+ mgpio->gpio_irq = gpio_irq;
+
+ mgpio->gpio_chip.label = pdev->name;
+ mgpio->gpio_chip.parent = &pdev->dev;
+ mgpio->gpio_chip.direction_input = max77620_gpio_dir_input;
+ mgpio->gpio_chip.get = max77620_gpio_get;
+ mgpio->gpio_chip.direction_output = max77620_gpio_dir_output;
+ mgpio->gpio_chip.set_debounce = max77620_gpio_set_debounce;
+ mgpio->gpio_chip.set = max77620_gpio_set;
+ mgpio->gpio_chip.set_single_ended = max77620_gpio_set_single_ended;
+ mgpio->gpio_chip.to_irq = max77620_gpio_to_irq;
+ mgpio->gpio_chip.ngpio = MAX77620_GPIO_NR;
+ mgpio->gpio_chip.can_sleep = 1;
+ mgpio->gpio_chip.base = -1;
+ mgpio->irq_base = -1;
+#ifdef CONFIG_OF_GPIO
+ mgpio->gpio_chip.of_node = pdev->dev.parent->of_node;
+#endif
+
+ platform_set_drvdata(pdev, mgpio);
+
+ ret = devm_gpiochip_add_data(&pdev->dev, &mgpio->gpio_chip, mgpio);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "gpio_init: Failed to add max77620_gpio\n");
+ return ret;
+ }
+
+ mgpio->gpio_base = mgpio->gpio_chip.base;
+ ret = devm_regmap_add_irq_chip(&pdev->dev, chip->rmap, mgpio->gpio_irq,
+ IRQF_ONESHOT, mgpio->irq_base,
+ &max77620_gpio_irq_chip,
+ &chip->gpio_irq_data);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to add gpio irq_chip %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct platform_device_id max77620_gpio_devtype[] = {
+ { .name = "max77620-gpio", },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, max77620_gpio_devtype);
+
+static struct platform_driver max77620_gpio_driver = {
+ .driver.name = "max77620-gpio",
+ .probe = max77620_gpio_probe,
+ .id_table = max77620_gpio_devtype,
+};
+
+module_platform_driver(max77620_gpio_driver);
+
+MODULE_DESCRIPTION("GPIO interface for MAX77620 and MAX20024 PMIC");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_AUTHOR("Chaitanya Bandi <bandik@nvidia.com>");
+MODULE_ALIAS("platform:max77620-gpio");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index ac22efc18..99d37b56c 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -564,7 +564,7 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
mcp->chip.direction_output = mcp23s08_direction_output;
mcp->chip.set = mcp23s08_set;
mcp->chip.dbg_show = mcp23s08_dbg_show;
-#ifdef CONFIG_OF
+#ifdef CONFIG_OF_GPIO
mcp->chip.of_gpio_n_cells = 2;
mcp->chip.of_node = dev->of_node;
#endif
diff --git a/drivers/gpio/gpio-menz127.c b/drivers/gpio/gpio-menz127.c
index cc103aff4..a1210e330 100644
--- a/drivers/gpio/gpio-menz127.c
+++ b/drivers/gpio/gpio-menz127.c
@@ -187,7 +187,6 @@ MODULE_DEVICE_TABLE(mcb, men_z127_ids);
static struct mcb_driver men_z127_driver = {
.driver = {
.name = "z127-gpio",
- .owner = THIS_MODULE,
},
.probe = men_z127_probe,
.remove = men_z127_remove,
diff --git a/drivers/gpio/gpio-merrifield.c b/drivers/gpio/gpio-merrifield.c
new file mode 100644
index 000000000..45b51278b
--- /dev/null
+++ b/drivers/gpio/gpio-merrifield.c
@@ -0,0 +1,444 @@
+/*
+ * Intel Merrifield SoC GPIO driver
+ *
+ * Copyright (c) 2016 Intel Corporation.
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/gpio/driver.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pinctrl/consumer.h>
+
+#define GCCR 0x000 /* controller configuration */
+#define GPLR 0x004 /* pin level r/o */
+#define GPDR 0x01c /* pin direction */
+#define GPSR 0x034 /* pin set w/o */
+#define GPCR 0x04c /* pin clear w/o */
+#define GRER 0x064 /* rising edge detect */
+#define GFER 0x07c /* falling edge detect */
+#define GFBR 0x094 /* glitch filter bypass */
+#define GIMR 0x0ac /* interrupt mask */
+#define GISR 0x0c4 /* interrupt source */
+#define GITR 0x300 /* input type */
+#define GLPR 0x318 /* level input polarity */
+#define GWMR 0x400 /* wake mask */
+#define GWSR 0x418 /* wake source */
+#define GSIR 0xc00 /* secure input */
+
+/* Intel Merrifield has 192 GPIO pins */
+#define MRFLD_NGPIO 192
+
+struct mrfld_gpio_pinrange {
+ unsigned int gpio_base;
+ unsigned int pin_base;
+ unsigned int npins;
+};
+
+#define GPIO_PINRANGE(gstart, gend, pstart) \
+ { \
+ .gpio_base = (gstart), \
+ .pin_base = (pstart), \
+ .npins = (gend) - (gstart) + 1, \
+ }
+
+struct mrfld_gpio {
+ struct gpio_chip chip;
+ void __iomem *reg_base;
+ raw_spinlock_t lock;
+ struct device *dev;
+};
+
+static const struct mrfld_gpio_pinrange mrfld_gpio_ranges[] = {
+ GPIO_PINRANGE(0, 11, 146),
+ GPIO_PINRANGE(12, 13, 144),
+ GPIO_PINRANGE(14, 15, 35),
+ GPIO_PINRANGE(16, 16, 164),
+ GPIO_PINRANGE(17, 18, 105),
+ GPIO_PINRANGE(19, 22, 101),
+ GPIO_PINRANGE(23, 30, 107),
+ GPIO_PINRANGE(32, 43, 67),
+ GPIO_PINRANGE(44, 63, 195),
+ GPIO_PINRANGE(64, 67, 140),
+ GPIO_PINRANGE(68, 69, 165),
+ GPIO_PINRANGE(70, 71, 65),
+ GPIO_PINRANGE(72, 76, 228),
+ GPIO_PINRANGE(77, 86, 37),
+ GPIO_PINRANGE(87, 87, 48),
+ GPIO_PINRANGE(88, 88, 47),
+ GPIO_PINRANGE(89, 96, 49),
+ GPIO_PINRANGE(97, 97, 34),
+ GPIO_PINRANGE(102, 119, 83),
+ GPIO_PINRANGE(120, 123, 79),
+ GPIO_PINRANGE(124, 135, 115),
+ GPIO_PINRANGE(137, 142, 158),
+ GPIO_PINRANGE(154, 163, 24),
+ GPIO_PINRANGE(164, 176, 215),
+ GPIO_PINRANGE(177, 189, 127),
+ GPIO_PINRANGE(190, 191, 178),
+};
+
+static void __iomem *gpio_reg(struct gpio_chip *chip, unsigned int offset,
+ unsigned int reg_type_offset)
+{
+ struct mrfld_gpio *priv = gpiochip_get_data(chip);
+ u8 reg = offset / 32;
+
+ return priv->reg_base + reg_type_offset + reg * 4;
+}
+
+static int mrfld_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ void __iomem *gplr = gpio_reg(chip, offset, GPLR);
+
+ return !!(readl(gplr) & BIT(offset % 32));
+}
+
+static void mrfld_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct mrfld_gpio *priv = gpiochip_get_data(chip);
+ void __iomem *gpsr, *gpcr;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&priv->lock, flags);
+
+ if (value) {
+ gpsr = gpio_reg(chip, offset, GPSR);
+ writel(BIT(offset % 32), gpsr);
+ } else {
+ gpcr = gpio_reg(chip, offset, GPCR);
+ writel(BIT(offset % 32), gpcr);
+ }
+
+ raw_spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static int mrfld_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct mrfld_gpio *priv = gpiochip_get_data(chip);
+ void __iomem *gpdr = gpio_reg(chip, offset, GPDR);
+ unsigned long flags;
+ u32 value;
+
+ raw_spin_lock_irqsave(&priv->lock, flags);
+
+ value = readl(gpdr);
+ value &= ~BIT(offset % 32);
+ writel(value, gpdr);
+
+ raw_spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static int mrfld_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct mrfld_gpio *priv = gpiochip_get_data(chip);
+ void __iomem *gpdr = gpio_reg(chip, offset, GPDR);
+ unsigned long flags;
+
+ mrfld_gpio_set(chip, offset, value);
+
+ raw_spin_lock_irqsave(&priv->lock, flags);
+
+ value = readl(gpdr);
+ value |= BIT(offset % 32);
+ writel(value, gpdr);
+
+ raw_spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static void mrfld_irq_ack(struct irq_data *d)
+{
+ struct mrfld_gpio *priv = irq_data_get_irq_chip_data(d);
+ u32 gpio = irqd_to_hwirq(d);
+ void __iomem *gisr = gpio_reg(&priv->chip, gpio, GISR);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&priv->lock, flags);
+
+ writel(BIT(gpio % 32), gisr);
+
+ raw_spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void mrfld_irq_unmask_mask(struct irq_data *d, bool unmask)
+{
+ struct mrfld_gpio *priv = irq_data_get_irq_chip_data(d);
+ u32 gpio = irqd_to_hwirq(d);
+ void __iomem *gimr = gpio_reg(&priv->chip, gpio, GIMR);
+ unsigned long flags;
+ u32 value;
+
+ raw_spin_lock_irqsave(&priv->lock, flags);
+
+ if (unmask)
+ value = readl(gimr) | BIT(gpio % 32);
+ else
+ value = readl(gimr) & ~BIT(gpio % 32);
+ writel(value, gimr);
+
+ raw_spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void mrfld_irq_mask(struct irq_data *d)
+{
+ mrfld_irq_unmask_mask(d, false);
+}
+
+static void mrfld_irq_unmask(struct irq_data *d)
+{
+ mrfld_irq_unmask_mask(d, true);
+}
+
+static int mrfld_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct mrfld_gpio *priv = gpiochip_get_data(gc);
+ u32 gpio = irqd_to_hwirq(d);
+ void __iomem *grer = gpio_reg(&priv->chip, gpio, GRER);
+ void __iomem *gfer = gpio_reg(&priv->chip, gpio, GFER);
+ void __iomem *gitr = gpio_reg(&priv->chip, gpio, GITR);
+ void __iomem *glpr = gpio_reg(&priv->chip, gpio, GLPR);
+ unsigned long flags;
+ u32 value;
+
+ raw_spin_lock_irqsave(&priv->lock, flags);
+
+ if (type & IRQ_TYPE_EDGE_RISING)
+ value = readl(grer) | BIT(gpio % 32);
+ else
+ value = readl(grer) & ~BIT(gpio % 32);
+ writel(value, grer);
+
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ value = readl(gfer) | BIT(gpio % 32);
+ else
+ value = readl(gfer) & ~BIT(gpio % 32);
+ writel(value, gfer);
+
+ /*
+ * To prevent glitches from triggering an unintended level interrupt,
+ * configure GLPR register first and then configure GITR.
+ */
+ if (type & IRQ_TYPE_LEVEL_LOW)
+ value = readl(glpr) | BIT(gpio % 32);
+ else
+ value = readl(glpr) & ~BIT(gpio % 32);
+ writel(value, glpr);
+
+ if (type & IRQ_TYPE_LEVEL_MASK) {
+ value = readl(gitr) | BIT(gpio % 32);
+ writel(value, gitr);
+
+ irq_set_handler_locked(d, handle_level_irq);
+ } else if (type & IRQ_TYPE_EDGE_BOTH) {
+ value = readl(gitr) & ~BIT(gpio % 32);
+ writel(value, gitr);
+
+ irq_set_handler_locked(d, handle_edge_irq);
+ }
+
+ raw_spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static int mrfld_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct mrfld_gpio *priv = gpiochip_get_data(gc);
+ u32 gpio = irqd_to_hwirq(d);
+ void __iomem *gwmr = gpio_reg(&priv->chip, gpio, GWMR);
+ void __iomem *gwsr = gpio_reg(&priv->chip, gpio, GWSR);
+ unsigned long flags;
+ u32 value;
+
+ raw_spin_lock_irqsave(&priv->lock, flags);
+
+ /* Clear the existing wake status */
+ writel(BIT(gpio % 32), gwsr);
+
+ if (on)
+ value = readl(gwmr) | BIT(gpio % 32);
+ else
+ value = readl(gwmr) & ~BIT(gpio % 32);
+ writel(value, gwmr);
+
+ raw_spin_unlock_irqrestore(&priv->lock, flags);
+
+ dev_dbg(priv->dev, "%sable wake for gpio %u\n", on ? "en" : "dis", gpio);
+ return 0;
+}
+
+static struct irq_chip mrfld_irqchip = {
+ .name = "gpio-merrifield",
+ .irq_ack = mrfld_irq_ack,
+ .irq_mask = mrfld_irq_mask,
+ .irq_unmask = mrfld_irq_unmask,
+ .irq_set_type = mrfld_irq_set_type,
+ .irq_set_wake = mrfld_irq_set_wake,
+};
+
+static void mrfld_irq_handler(struct irq_desc *desc)
+{
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+ struct mrfld_gpio *priv = gpiochip_get_data(gc);
+ struct irq_chip *irqchip = irq_desc_get_chip(desc);
+ unsigned long base, gpio;
+
+ chained_irq_enter(irqchip, desc);
+
+ /* Check GPIO controller to check which pin triggered the interrupt */
+ for (base = 0; base < priv->chip.ngpio; base += 32) {
+ void __iomem *gisr = gpio_reg(&priv->chip, base, GISR);
+ void __iomem *gimr = gpio_reg(&priv->chip, base, GIMR);
+ unsigned long pending, enabled;
+
+ pending = readl(gisr);
+ enabled = readl(gimr);
+
+ /* Only interrupts that are enabled */
+ pending &= enabled;
+
+ for_each_set_bit(gpio, &pending, 32) {
+ unsigned int irq;
+
+ irq = irq_find_mapping(gc->irqdomain, base + gpio);
+ generic_handle_irq(irq);
+ }
+ }
+
+ chained_irq_exit(irqchip, desc);
+}
+
+static void mrfld_irq_init_hw(struct mrfld_gpio *priv)
+{
+ void __iomem *reg;
+ unsigned int base;
+
+ for (base = 0; base < priv->chip.ngpio; base += 32) {
+ /* Clear the rising-edge detect register */
+ reg = gpio_reg(&priv->chip, base, GRER);
+ writel(0, reg);
+ /* Clear the falling-edge detect register */
+ reg = gpio_reg(&priv->chip, base, GFER);
+ writel(0, reg);
+ }
+}
+
+static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ const struct mrfld_gpio_pinrange *range;
+ struct mrfld_gpio *priv;
+ u32 gpio_base, irq_base;
+ void __iomem *base;
+ unsigned int i;
+ int retval;
+
+ retval = pcim_enable_device(pdev);
+ if (retval)
+ return retval;
+
+ retval = pcim_iomap_regions(pdev, BIT(1) | BIT(0), pci_name(pdev));
+ if (retval) {
+ dev_err(&pdev->dev, "I/O memory mapping error\n");
+ return retval;
+ }
+
+ base = pcim_iomap_table(pdev)[1];
+
+ irq_base = readl(base);
+ gpio_base = readl(sizeof(u32) + base);
+
+ /* Release the IO mapping, since we already get the info from BAR1 */
+ pcim_iounmap_regions(pdev, BIT(1));
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(&pdev->dev, "can't allocate chip data\n");
+ return -ENOMEM;
+ }
+
+ priv->dev = &pdev->dev;
+ priv->reg_base = pcim_iomap_table(pdev)[0];
+
+ priv->chip.label = dev_name(&pdev->dev);
+ priv->chip.parent = &pdev->dev;
+ priv->chip.request = gpiochip_generic_request;
+ priv->chip.free = gpiochip_generic_free;
+ priv->chip.direction_input = mrfld_gpio_direction_input;
+ priv->chip.direction_output = mrfld_gpio_direction_output;
+ priv->chip.get = mrfld_gpio_get;
+ priv->chip.set = mrfld_gpio_set;
+ priv->chip.base = gpio_base;
+ priv->chip.ngpio = MRFLD_NGPIO;
+ priv->chip.can_sleep = false;
+
+ raw_spin_lock_init(&priv->lock);
+
+ pci_set_drvdata(pdev, priv);
+ retval = devm_gpiochip_add_data(&pdev->dev, &priv->chip, priv);
+ if (retval) {
+ dev_err(&pdev->dev, "gpiochip_add error %d\n", retval);
+ return retval;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mrfld_gpio_ranges); i++) {
+ range = &mrfld_gpio_ranges[i];
+ retval = gpiochip_add_pin_range(&priv->chip,
+ "pinctrl-merrifield",
+ range->gpio_base,
+ range->pin_base,
+ range->npins);
+ if (retval) {
+ dev_err(&pdev->dev, "failed to add GPIO pin range\n");
+ return retval;
+ }
+ }
+
+ retval = gpiochip_irqchip_add(&priv->chip, &mrfld_irqchip, irq_base,
+ handle_simple_irq, IRQ_TYPE_NONE);
+ if (retval) {
+ dev_err(&pdev->dev, "could not connect irqchip to gpiochip\n");
+ return retval;
+ }
+
+ mrfld_irq_init_hw(priv);
+
+ gpiochip_set_chained_irqchip(&priv->chip, &mrfld_irqchip, pdev->irq,
+ mrfld_irq_handler);
+
+ return 0;
+}
+
+static const struct pci_device_id mrfld_gpio_ids[] = {
+ { PCI_VDEVICE(INTEL, 0x1199) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, mrfld_gpio_ids);
+
+static struct pci_driver mrfld_gpio_driver = {
+ .name = "gpio-merrifield",
+ .id_table = mrfld_gpio_ids,
+ .probe = mrfld_gpio_probe,
+};
+
+module_pci_driver(mrfld_gpio_driver);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_DESCRIPTION("Intel Merrifield SoC GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c
index 6c1cb3b8c..6ec144bae 100644
--- a/drivers/gpio/gpio-mmio.c
+++ b/drivers/gpio/gpio-mmio.c
@@ -61,6 +61,8 @@ o ` ~~~~\___/~~~~ ` controller in FPGA is ,.`
#include <linux/bitops.h>
#include <linux/platform_device.h>
#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
static void bgpio_write8(void __iomem *reg, unsigned long data)
{
@@ -569,6 +571,41 @@ static void __iomem *bgpio_map(struct platform_device *pdev,
return devm_ioremap_resource(&pdev->dev, r);
}
+#ifdef CONFIG_OF
+static const struct of_device_id bgpio_of_match[] = {
+ { .compatible = "wd,mbl-gpio" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, bgpio_of_match);
+
+static struct bgpio_pdata *bgpio_parse_dt(struct platform_device *pdev,
+ unsigned long *flags)
+{
+ struct bgpio_pdata *pdata;
+
+ if (!of_match_device(bgpio_of_match, &pdev->dev))
+ return NULL;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(struct bgpio_pdata),
+ GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ pdata->base = -1;
+
+ if (of_property_read_bool(pdev->dev.of_node, "no-output"))
+ *flags |= BGPIOF_NO_OUTPUT;
+
+ return pdata;
+}
+#else
+static struct bgpio_pdata *bgpio_parse_dt(struct platform_device *pdev,
+ unsigned long *flags)
+{
+ return NULL;
+}
+#endif /* CONFIG_OF */
+
static int bgpio_pdev_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -579,10 +616,19 @@ static int bgpio_pdev_probe(struct platform_device *pdev)
void __iomem *dirout;
void __iomem *dirin;
unsigned long sz;
- unsigned long flags = pdev->id_entry->driver_data;
+ unsigned long flags = 0;
int err;
struct gpio_chip *gc;
- struct bgpio_pdata *pdata = dev_get_platdata(dev);
+ struct bgpio_pdata *pdata;
+
+ pdata = bgpio_parse_dt(pdev, &flags);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+
+ if (!pdata) {
+ pdata = dev_get_platdata(dev);
+ flags = pdev->id_entry->driver_data;
+ }
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dat");
if (!r)
@@ -646,6 +692,7 @@ MODULE_DEVICE_TABLE(platform, bgpio_id_table);
static struct platform_driver bgpio_driver = {
.driver = {
.name = "basic-mmio-gpio",
+ .of_match_table = of_match_ptr(bgpio_of_match),
},
.id_table = bgpio_id_table,
.probe = bgpio_pdev_probe,
diff --git a/drivers/gpio/gpio-palmas.c b/drivers/gpio/gpio-palmas.c
index e248707ca..839474430 100644
--- a/drivers/gpio/gpio-palmas.c
+++ b/drivers/gpio/gpio-palmas.c
@@ -208,7 +208,6 @@ static int palmas_gpio_probe(struct platform_device *pdev)
static struct platform_driver palmas_gpio_driver = {
.driver.name = "palmas-gpio",
- .driver.owner = THIS_MODULE,
.driver.of_match_table = of_palmas_gpio_match,
.probe = palmas_gpio_probe,
};
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 3745de659..02f2a5621 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -44,7 +44,7 @@
#define PCA_GPIO_MASK 0x00FF
#define PCA_INT 0x0100
-#define PCA_PCAL 0x0200
+#define PCA_PCAL 0x0200
#define PCA953X_TYPE 0x1000
#define PCA957X_TYPE 0x2000
#define PCA_TYPE_MASK 0xF000
@@ -67,6 +67,8 @@ static const struct i2c_device_id pca953x_id[] = {
{ "pca9575", 16 | PCA957X_TYPE | PCA_INT, },
{ "pca9698", 40 | PCA953X_TYPE, },
+ { "pcal9555a", 16 | PCA953X_TYPE | PCA_INT | PCA_PCAL, },
+
{ "max7310", 8 | PCA953X_TYPE, },
{ "max7312", 16 | PCA953X_TYPE | PCA_INT, },
{ "max7313", 16 | PCA953X_TYPE | PCA_INT, },
@@ -135,7 +137,7 @@ static int pca953x_read_single(struct pca953x_chip *chip, int reg, u32 *val,
static int pca953x_write_single(struct pca953x_chip *chip, int reg, u32 val,
int off)
{
- int ret = 0;
+ int ret;
int bank_shift = fls((chip->gpio_chip.ngpio - 1) / BANK_SZ);
int offset = off / BANK_SZ;
@@ -163,10 +165,13 @@ static int pca953x_write_regs(struct pca953x_chip *chip, int reg, u8 *val)
NBANK(chip), val);
} else {
switch (chip->chip_type) {
- case PCA953X_TYPE:
- ret = i2c_smbus_write_word_data(chip->client,
- reg << 1, cpu_to_le16(get_unaligned((u16 *)val)));
+ case PCA953X_TYPE: {
+ __le16 word = cpu_to_le16(get_unaligned((u16 *)val));
+
+ ret = i2c_smbus_write_word_data(chip->client, reg << 1,
+ (__force u16)word);
break;
+ }
case PCA957X_TYPE:
ret = i2c_smbus_write_byte_data(chip->client, reg << 1,
val[0]);
@@ -235,7 +240,6 @@ static int pca953x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
goto exit;
chip->reg_direction[off / BANK_SZ] = reg_val;
- ret = 0;
exit:
mutex_unlock(&chip->i2c_lock);
return ret;
@@ -286,7 +290,6 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
goto exit;
chip->reg_direction[off / BANK_SZ] = reg_val;
- ret = 0;
exit:
mutex_unlock(&chip->i2c_lock);
return ret;
@@ -351,7 +354,6 @@ exit:
mutex_unlock(&chip->i2c_lock);
}
-
static void pca953x_gpio_set_multiple(struct gpio_chip *gc,
unsigned long *mask, unsigned long *bits)
{
@@ -820,7 +822,7 @@ static int pca953x_remove(struct i2c_client *client)
{
struct pca953x_platform_data *pdata = dev_get_platdata(&client->dev);
struct pca953x_chip *chip = i2c_get_clientdata(client);
- int ret = 0;
+ int ret;
if (pdata && pdata->teardown) {
ret = pdata->teardown(client, chip->gpio_chip.base,
@@ -861,6 +863,7 @@ static const struct of_device_id pca953x_dt_ids[] = {
{ .compatible = "maxim,max7315", .data = OF_953X( 8, PCA_INT), },
{ .compatible = "ti,pca6107", .data = OF_953X( 8, PCA_INT), },
+ { .compatible = "ti,pca9536", .data = OF_953X( 4, 0), },
{ .compatible = "ti,tca6408", .data = OF_953X( 8, PCA_INT), },
{ .compatible = "ti,tca6416", .data = OF_953X(16, PCA_INT), },
{ .compatible = "ti,tca6424", .data = OF_953X(24, PCA_INT), },
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index 169c09aa3..d168410e2 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -440,6 +440,14 @@ static int pcf857x_remove(struct i2c_client *client)
return status;
}
+static void pcf857x_shutdown(struct i2c_client *client)
+{
+ struct pcf857x *gpio = i2c_get_clientdata(client);
+
+ /* Drive all the I/O lines high */
+ gpio->write(gpio->client, BIT(gpio->chip.ngpio) - 1);
+}
+
static struct i2c_driver pcf857x_driver = {
.driver = {
.name = "pcf857x",
@@ -447,6 +455,7 @@ static struct i2c_driver pcf857x_driver = {
},
.probe = pcf857x_probe,
.remove = pcf857x_remove,
+ .shutdown = pcf857x_shutdown,
.id_table = pcf857x_id,
};
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 681c93fb9..b96e0b466 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -335,6 +335,9 @@ static const struct of_device_id gpio_rcar_of_table[] = {
.compatible = "renesas,gpio-r8a7791",
.data = &gpio_rcar_info_gen2,
}, {
+ .compatible = "renesas,gpio-r8a7792",
+ .data = &gpio_rcar_info_gen2,
+ }, {
.compatible = "renesas,gpio-r8a7793",
.data = &gpio_rcar_info_gen2,
}, {
diff --git a/drivers/gpio/gpio-rdc321x.c b/drivers/gpio/gpio-rdc321x.c
index ec945b90f..cbf0f9e64 100644
--- a/drivers/gpio/gpio-rdc321x.c
+++ b/drivers/gpio/gpio-rdc321x.c
@@ -200,7 +200,6 @@ static int rdc321x_gpio_probe(struct platform_device *pdev)
static struct platform_driver rdc321x_gpio_driver = {
.driver.name = "rdc321x-gpio",
- .driver.owner = THIS_MODULE,
.probe = rdc321x_gpio_probe,
};
diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c
index 0c99e8fb9..8d8ee0ebf 100644
--- a/drivers/gpio/gpio-sa1100.c
+++ b/drivers/gpio/gpio-sa1100.c
@@ -155,7 +155,7 @@ static int sa1100_gpio_irqdomain_map(struct irq_domain *d,
{
irq_set_chip_and_handler(irq, &sa1100_gpio_irq_chip,
handle_edge_irq);
- irq_set_noprobe(irq);
+ irq_set_probe(irq);
return 0;
}
diff --git a/drivers/gpio/gpio-sch311x.c b/drivers/gpio/gpio-sch311x.c
index a03b38ee2..b96990c26 100644
--- a/drivers/gpio/gpio-sch311x.c
+++ b/drivers/gpio/gpio-sch311x.c
@@ -296,7 +296,6 @@ static int sch311x_gpio_remove(struct platform_device *pdev)
static struct platform_driver sch311x_gpio_driver = {
.driver.name = DRV_NAME,
- .driver.owner = THIS_MODULE,
.probe = sch311x_gpio_probe,
.remove = sch311x_gpio_remove,
};
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index 6f7af28b8..f675132de 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -68,6 +68,22 @@ static void stmpe_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
stmpe_reg_write(stmpe, reg, mask);
}
+static int stmpe_gpio_get_direction(struct gpio_chip *chip,
+ unsigned offset)
+{
+ struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(chip);
+ struct stmpe *stmpe = stmpe_gpio->stmpe;
+ u8 reg = stmpe->regs[STMPE_IDX_GPDR_LSB] - (offset / 8);
+ u8 mask = 1 << (offset % 8);
+ int ret;
+
+ ret = stmpe_reg_read(stmpe, reg);
+ if (ret < 0)
+ return ret;
+
+ return !(ret & mask);
+}
+
static int stmpe_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int val)
{
@@ -106,6 +122,7 @@ static int stmpe_gpio_request(struct gpio_chip *chip, unsigned offset)
static struct gpio_chip template_chip = {
.label = "stmpe",
.owner = THIS_MODULE,
+ .get_direction = stmpe_gpio_get_direction,
.direction_input = stmpe_gpio_direction_input,
.get = stmpe_gpio_get,
.direction_output = stmpe_gpio_direction_output,
@@ -416,7 +433,6 @@ static struct platform_driver stmpe_gpio_driver = {
.driver = {
.suppress_bind_attrs = true,
.name = "stmpe-gpio",
- .owner = THIS_MODULE,
},
.probe = stmpe_gpio_probe,
};
diff --git a/drivers/gpio/gpio-syscon.c b/drivers/gpio/gpio-syscon.c
index 24b6d643e..537cec758 100644
--- a/drivers/gpio/gpio-syscon.c
+++ b/drivers/gpio/gpio-syscon.c
@@ -129,7 +129,7 @@ static int syscon_gpio_dir_out(struct gpio_chip *chip, unsigned offset, int val)
static const struct syscon_gpio_data clps711x_mctrl_gpio = {
/* ARM CLPS711X SYSFLG1 Bits 8-10 */
- .compatible = "cirrus,clps711x-syscon1",
+ .compatible = "cirrus,ep7209-syscon1",
.flags = GPIO_SYSCON_FEAT_IN,
.bit_count = 3,
.dat_bit_offset = 0x40 * 8 + 8,
@@ -168,7 +168,7 @@ static const struct syscon_gpio_data keystone_dsp_gpio = {
static const struct of_device_id syscon_gpio_ids[] = {
{
- .compatible = "cirrus,clps711x-mctrl-gpio",
+ .compatible = "cirrus,ep7209-mctrl-gpio",
.data = &clps711x_mctrl_gpio,
},
{
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
index 2e35ed3ab..8b3659352 100644
--- a/drivers/gpio/gpio-tc3589x.c
+++ b/drivers/gpio/gpio-tc3589x.c
@@ -343,7 +343,6 @@ static int tc3589x_gpio_probe(struct platform_device *pdev)
static struct platform_driver tc3589x_gpio_driver = {
.driver.name = "tc3589x-gpio",
- .driver.owner = THIS_MODULE,
.probe = tc3589x_gpio_probe,
};
diff --git a/drivers/gpio/gpio-tps65218.c b/drivers/gpio/gpio-tps65218.c
index 0eaeac8de..1c09a19ae 100644
--- a/drivers/gpio/gpio-tps65218.c
+++ b/drivers/gpio/gpio-tps65218.c
@@ -230,6 +230,12 @@ static const struct of_device_id tps65218_dt_match[] = {
};
MODULE_DEVICE_TABLE(of, tps65218_dt_match);
+static const struct platform_device_id tps65218_gpio_id_table[] = {
+ { "tps65218-gpio", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, tps65218_gpio_id_table);
+
static struct platform_driver tps65218_gpio_driver = {
.driver = {
.name = "tps65218-gpio",
@@ -237,6 +243,7 @@ static struct platform_driver tps65218_gpio_driver = {
},
.probe = tps65218_gpio_probe,
.remove = tps65218_gpio_remove,
+ .id_table = tps65218_gpio_id_table,
};
module_platform_driver(tps65218_gpio_driver);
diff --git a/drivers/gpio/gpio-tps6586x.c b/drivers/gpio/gpio-tps6586x.c
index 6b15e68a3..042b9a207 100644
--- a/drivers/gpio/gpio-tps6586x.c
+++ b/drivers/gpio/gpio-tps6586x.c
@@ -131,7 +131,6 @@ static int tps6586x_gpio_probe(struct platform_device *pdev)
static struct platform_driver tps6586x_gpio_driver = {
.driver.name = "tps6586x-gpio",
- .driver.owner = THIS_MODULE,
.probe = tps6586x_gpio_probe,
};
diff --git a/drivers/gpio/gpio-tps65910.c b/drivers/gpio/gpio-tps65910.c
index 0ae6a5a54..e63d7dabf 100644
--- a/drivers/gpio/gpio-tps65910.c
+++ b/drivers/gpio/gpio-tps65910.c
@@ -184,7 +184,6 @@ skip_init:
static struct platform_driver tps65910_gpio_driver = {
.driver.name = "tps65910-gpio",
- .driver.owner = THIS_MODULE,
.probe = tps65910_gpio_probe,
};
diff --git a/drivers/gpio/gpio-viperboard.c b/drivers/gpio/gpio-viperboard.c
index dec47aafd..e6d1328dd 100644
--- a/drivers/gpio/gpio-viperboard.c
+++ b/drivers/gpio/gpio-viperboard.c
@@ -440,7 +440,6 @@ static int vprbrd_gpio_probe(struct platform_device *pdev)
static struct platform_driver vprbrd_gpio_driver = {
.driver.name = "viperboard-gpio",
- .driver.owner = THIS_MODULE,
.probe = vprbrd_gpio_probe,
};
diff --git a/drivers/gpio/gpio-wm831x.c b/drivers/gpio/gpio-wm831x.c
index 41ec78340..21f97bcd0 100644
--- a/drivers/gpio/gpio-wm831x.c
+++ b/drivers/gpio/gpio-wm831x.c
@@ -296,7 +296,6 @@ static int wm831x_gpio_probe(struct platform_device *pdev)
static struct platform_driver wm831x_gpio_driver = {
.driver.name = "wm831x-gpio",
- .driver.owner = THIS_MODULE,
.probe = wm831x_gpio_probe,
};
diff --git a/drivers/gpio/gpio-wm8350.c b/drivers/gpio/gpio-wm8350.c
index 07d45a3b2..e9765707d 100644
--- a/drivers/gpio/gpio-wm8350.c
+++ b/drivers/gpio/gpio-wm8350.c
@@ -139,7 +139,6 @@ static int wm8350_gpio_probe(struct platform_device *pdev)
static struct platform_driver wm8350_gpio_driver = {
.driver.name = "wm8350-gpio",
- .driver.owner = THIS_MODULE,
.probe = wm8350_gpio_probe,
};
diff --git a/drivers/gpio/gpio-wm8994.c b/drivers/gpio/gpio-wm8994.c
index 744af388c..2457aac85 100644
--- a/drivers/gpio/gpio-wm8994.c
+++ b/drivers/gpio/gpio-wm8994.c
@@ -299,7 +299,6 @@ static int wm8994_gpio_probe(struct platform_device *pdev)
static struct platform_driver wm8994_gpio_driver = {
.driver.name = "wm8994-gpio",
- .driver.owner = THIS_MODULE,
.probe = wm8994_gpio_probe,
};
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index d0fbb7f99..14b2a6233 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -133,6 +133,53 @@ static void xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
}
/**
+ * xgpio_set_multiple - Write the specified signals of the GPIO device.
+ * @gc: Pointer to gpio_chip device structure.
+ * @mask: Mask of the GPIOS to modify.
+ * @bits: Value to be wrote on each GPIO
+ *
+ * This function writes the specified values into the specified signals of the
+ * GPIO devices.
+ */
+static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
+{
+ unsigned long flags;
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct xgpio_instance *chip = gpiochip_get_data(gc);
+ int index = xgpio_index(chip, 0);
+ int offset, i;
+
+ spin_lock_irqsave(&chip->gpio_lock[index], flags);
+
+ /* Write to GPIO signals */
+ for (i = 0; i < gc->ngpio; i++) {
+ if (*mask == 0)
+ break;
+ if (index != xgpio_index(chip, i)) {
+ xgpio_writereg(mm_gc->regs + XGPIO_DATA_OFFSET +
+ xgpio_regoffset(chip, i),
+ chip->gpio_state[index]);
+ spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
+ index = xgpio_index(chip, i);
+ spin_lock_irqsave(&chip->gpio_lock[index], flags);
+ }
+ if (__test_and_clear_bit(i, mask)) {
+ offset = xgpio_offset(chip, i);
+ if (test_bit(i, bits))
+ chip->gpio_state[index] |= BIT(offset);
+ else
+ chip->gpio_state[index] &= ~BIT(offset);
+ }
+ }
+
+ xgpio_writereg(mm_gc->regs + XGPIO_DATA_OFFSET +
+ xgpio_regoffset(chip, i), chip->gpio_state[index]);
+
+ spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
+}
+
+/**
* xgpio_dir_in - Set the direction of the specified GPIO signal as input.
* @gc: Pointer to gpio_chip device structure.
* @gpio: GPIO signal number.
@@ -306,6 +353,7 @@ static int xgpio_probe(struct platform_device *pdev)
chip->mmchip.gc.direction_output = xgpio_dir_out;
chip->mmchip.gc.get = xgpio_get;
chip->mmchip.gc.set = xgpio_set;
+ chip->mmchip.gc.set_multiple = xgpio_set_multiple;
chip->mmchip.save_regs = xgpio_save_regs;
diff --git a/drivers/gpio/gpio-xlp.c b/drivers/gpio/gpio-xlp.c
index 1a33a19d9..4620d050e 100644
--- a/drivers/gpio/gpio-xlp.c
+++ b/drivers/gpio/gpio-xlp.c
@@ -19,6 +19,7 @@
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/acpi.h>
/*
* XLP GPIO has multiple 32 bit registers for each feature where each register
@@ -299,7 +300,6 @@ static int xlp_gpio_probe(struct platform_device *pdev)
struct gpio_chip *gc;
struct resource *iores;
struct xlp_gpio_priv *priv;
- const struct of_device_id *of_id;
void __iomem *gpio_base;
int irq_base, irq, err;
int ngpio;
@@ -321,13 +321,26 @@ static int xlp_gpio_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- of_id = of_match_device(xlp_gpio_of_ids, &pdev->dev);
- if (!of_id) {
- dev_err(&pdev->dev, "Failed to get soc type!\n");
- return -ENODEV;
- }
+ if (pdev->dev.of_node) {
+ const struct of_device_id *of_id;
- soc_type = (uintptr_t) of_id->data;
+ of_id = of_match_device(xlp_gpio_of_ids, &pdev->dev);
+ if (!of_id) {
+ dev_err(&pdev->dev, "Unable to match OF ID\n");
+ return -ENODEV;
+ }
+ soc_type = (uintptr_t) of_id->data;
+ } else {
+ const struct acpi_device_id *acpi_id;
+
+ acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
+ &pdev->dev);
+ if (!acpi_id || !acpi_id->driver_data) {
+ dev_err(&pdev->dev, "Unable to match ACPI ID\n");
+ return -ENODEV;
+ }
+ soc_type = (uintptr_t) acpi_id->driver_data;
+ }
switch (soc_type) {
case XLP_GPIO_VARIANT_XLP832:
@@ -388,14 +401,16 @@ static int xlp_gpio_probe(struct platform_device *pdev)
gc->get = xlp_gpio_get;
spin_lock_init(&priv->lock);
- /* XLP has fixed IRQ range for GPIO interrupts */
- if (soc_type == GPIO_VARIANT_VULCAN)
- irq_base = irq_alloc_descs(-1, 0, gc->ngpio, 0);
- else
+
+ /* XLP(MIPS) has fixed range for GPIO IRQs, Vulcan(ARM64) does not */
+ if (soc_type != GPIO_VARIANT_VULCAN) {
irq_base = irq_alloc_descs(-1, XLP_GPIO_IRQ_BASE, gc->ngpio, 0);
- if (irq_base < 0) {
- dev_err(&pdev->dev, "Failed to allocate IRQ numbers\n");
- return irq_base;
+ if (irq_base < 0) {
+ dev_err(&pdev->dev, "Failed to allocate IRQ numbers\n");
+ return irq_base;
+ }
+ } else {
+ irq_base = 0;
}
err = gpiochip_add_data(gc, priv);
@@ -423,10 +438,19 @@ out_free_desc:
return err;
}
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xlp_gpio_acpi_match[] = {
+ { "BRCM9006", GPIO_VARIANT_VULCAN },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, xlp_gpio_acpi_match);
+#endif
+
static struct platform_driver xlp_gpio_driver = {
.driver = {
.name = "xlp-gpio",
.of_match_table = xlp_gpio_of_ids,
+ .acpi_match_table = ACPI_PTR(xlp_gpio_acpi_match),
},
.probe = xlp_gpio_probe,
};
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 2dc52585e..af514618d 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -836,6 +836,7 @@ void acpi_gpiochip_add(struct gpio_chip *chip)
}
acpi_gpiochip_request_regions(acpi_gpio);
+ acpi_walk_dep_device_list(handle);
}
void acpi_gpiochip_remove(struct gpio_chip *chip)
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 4aabddb38..a28feb3ed 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -16,7 +16,6 @@
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/io.h>
-#include <linux/io-mapping.h>
#include <linux/gpio/consumer.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -27,38 +26,30 @@
#include "gpiolib.h"
-/* Private data structure for of_gpiochip_find_and_xlate */
-struct gg_data {
- enum of_gpio_flags *flags;
- struct of_phandle_args gpiospec;
+static int of_gpiochip_match_node(struct gpio_chip *chip, void *data)
+{
+ return chip->gpiodev->dev.of_node == data;
+}
- struct gpio_desc *out_gpio;
-};
+static struct gpio_chip *of_find_gpiochip_by_node(struct device_node *np)
+{
+ return gpiochip_find(np, of_gpiochip_match_node);
+}
-/* Private function for resolving node pointer to gpio_chip */
-static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data)
+static struct gpio_desc *of_xlate_and_get_gpiod_flags(struct gpio_chip *chip,
+ struct of_phandle_args *gpiospec,
+ enum of_gpio_flags *flags)
{
- struct gg_data *gg_data = data;
int ret;
- if ((gc->of_node != gg_data->gpiospec.np) ||
- (gc->of_gpio_n_cells != gg_data->gpiospec.args_count) ||
- (!gc->of_xlate))
- return false;
-
- ret = gc->of_xlate(gc, &gg_data->gpiospec, gg_data->flags);
- if (ret < 0) {
- /* We've found a gpio chip, but the translation failed.
- * Store translation error in out_gpio.
- * Return false to keep looking, as more than one gpio chip
- * could be registered per of-node.
- */
- gg_data->out_gpio = ERR_PTR(ret);
- return false;
- }
-
- gg_data->out_gpio = gpiochip_get_desc(gc, ret);
- return true;
+ if (chip->of_gpio_n_cells != gpiospec->args_count)
+ return ERR_PTR(-EINVAL);
+
+ ret = chip->of_xlate(chip, gpiospec, flags);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ return gpiochip_get_desc(chip, ret);
}
/**
@@ -75,34 +66,37 @@ static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data)
struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
const char *propname, int index, enum of_gpio_flags *flags)
{
- /* Return -EPROBE_DEFER to support probe() functions to be called
- * later when the GPIO actually becomes available
- */
- struct gg_data gg_data = {
- .flags = flags,
- .out_gpio = ERR_PTR(-EPROBE_DEFER)
- };
+ struct of_phandle_args gpiospec;
+ struct gpio_chip *chip;
+ struct gpio_desc *desc;
int ret;
- /* .of_xlate might decide to not fill in the flags, so clear it. */
- if (flags)
- *flags = 0;
-
ret = of_parse_phandle_with_args(np, propname, "#gpio-cells", index,
- &gg_data.gpiospec);
+ &gpiospec);
if (ret) {
pr_debug("%s: can't parse '%s' property of node '%s[%d]'\n",
__func__, propname, np->full_name, index);
return ERR_PTR(ret);
}
- gpiochip_find(&gg_data, of_gpiochip_find_and_xlate);
+ chip = of_find_gpiochip_by_node(gpiospec.np);
+ if (!chip) {
+ desc = ERR_PTR(-EPROBE_DEFER);
+ goto out;
+ }
+
+ desc = of_xlate_and_get_gpiod_flags(chip, &gpiospec, flags);
+ if (IS_ERR(desc))
+ goto out;
- of_node_put(gg_data.gpiospec.np);
pr_debug("%s: parsed '%s' property of node '%s[%d]' - status (%d)\n",
__func__, propname, np->full_name, index,
- PTR_ERR_OR_ZERO(gg_data.out_gpio));
- return gg_data.out_gpio;
+ PTR_ERR_OR_ZERO(desc));
+
+out:
+ of_node_put(gpiospec.np);
+
+ return desc;
}
int of_get_named_gpio_flags(struct device_node *np, const char *list_name,
@@ -122,6 +116,7 @@ EXPORT_SYMBOL(of_get_named_gpio_flags);
/**
* of_parse_own_gpio() - Get a GPIO hog descriptor, names and flags for GPIO API
* @np: device node to get GPIO from
+ * @chip: GPIO chip whose hog is parsed
* @name: GPIO line name
* @lflags: gpio_lookup_flags - returned from of_find_gpio() or
* of_parse_own_gpio()
@@ -131,19 +126,19 @@ EXPORT_SYMBOL(of_get_named_gpio_flags);
* value on the error condition.
*/
static struct gpio_desc *of_parse_own_gpio(struct device_node *np,
+ struct gpio_chip *chip,
const char **name,
enum gpio_lookup_flags *lflags,
enum gpiod_flags *dflags)
{
struct device_node *chip_np;
enum of_gpio_flags xlate_flags;
- struct gg_data gg_data = {
- .flags = &xlate_flags,
- };
+ struct of_phandle_args gpiospec;
+ struct gpio_desc *desc;
u32 tmp;
- int i, ret;
+ int ret;
- chip_np = np->parent;
+ chip_np = chip->of_node;
if (!chip_np)
return ERR_PTR(-EINVAL);
@@ -155,25 +150,16 @@ static struct gpio_desc *of_parse_own_gpio(struct device_node *np,
if (ret)
return ERR_PTR(ret);
- if (tmp > MAX_PHANDLE_ARGS)
- return ERR_PTR(-EINVAL);
+ gpiospec.np = chip_np;
+ gpiospec.args_count = tmp;
- gg_data.gpiospec.args_count = tmp;
- gg_data.gpiospec.np = chip_np;
- for (i = 0; i < tmp; i++) {
- ret = of_property_read_u32_index(np, "gpios", i,
- &gg_data.gpiospec.args[i]);
- if (ret)
- return ERR_PTR(ret);
- }
+ ret = of_property_read_u32_array(np, "gpios", gpiospec.args, tmp);
+ if (ret)
+ return ERR_PTR(ret);
- gpiochip_find(&gg_data, of_gpiochip_find_and_xlate);
- if (!gg_data.out_gpio) {
- if (np->parent == np)
- return ERR_PTR(-ENXIO);
- else
- return ERR_PTR(-EINVAL);
- }
+ desc = of_xlate_and_get_gpiod_flags(chip, &gpiospec, &xlate_flags);
+ if (IS_ERR(desc))
+ return desc;
if (xlate_flags & OF_GPIO_ACTIVE_LOW)
*lflags |= GPIO_ACTIVE_LOW;
@@ -186,14 +172,14 @@ static struct gpio_desc *of_parse_own_gpio(struct device_node *np,
*dflags |= GPIOD_OUT_HIGH;
else {
pr_warn("GPIO line %d (%s): no hogging state specified, bailing out\n",
- desc_to_gpio(gg_data.out_gpio), np->name);
+ desc_to_gpio(desc), np->name);
return ERR_PTR(-EINVAL);
}
if (name && of_property_read_string(np, "line-name", name))
*name = np->name;
- return gg_data.out_gpio;
+ return desc;
}
/**
@@ -262,7 +248,7 @@ static int of_gpiochip_scan_gpios(struct gpio_chip *chip)
if (!of_property_read_bool(np, "gpio-hog"))
continue;
- desc = of_parse_own_gpio(np, &name, &lflags, &dflags);
+ desc = of_parse_own_gpio(np, chip, &name, &lflags, &dflags);
if (IS_ERR(desc))
continue;
@@ -410,6 +396,7 @@ static int of_gpiochip_add_pin_range(struct gpio_chip *chip)
break;
pctldev = of_pinctrl_get(pinspec.np);
+ of_node_put(pinspec.np);
if (!pctldev)
return -EPROBE_DEFER;
@@ -487,6 +474,9 @@ int of_gpiochip_add(struct gpio_chip *chip)
chip->of_xlate = of_gpio_simple_xlate;
}
+ if (chip->of_gpio_n_cells > MAX_PHANDLE_ARGS)
+ return -EINVAL;
+
status = of_gpiochip_add_pin_range(chip);
if (status)
return status;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index be74bd370..53ff25ac6 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -16,11 +16,14 @@
#include <linux/gpio/driver.h>
#include <linux/gpio/machine.h>
#include <linux/pinctrl/consumer.h>
-#include <linux/idr.h>
#include <linux/cdev.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/compat.h>
+#include <linux/anon_inodes.h>
+#include <linux/kfifo.h>
+#include <linux/poll.h>
+#include <linux/timekeeping.h>
#include <uapi/linux/gpio.h>
#include "gpiolib.h"
@@ -310,6 +313,497 @@ static int gpiochip_set_desc_names(struct gpio_chip *gc)
return 0;
}
+/*
+ * GPIO line handle management
+ */
+
+/**
+ * struct linehandle_state - contains the state of a userspace handle
+ * @gdev: the GPIO device the handle pertains to
+ * @label: consumer label used to tag descriptors
+ * @descs: the GPIO descriptors held by this handle
+ * @numdescs: the number of descriptors held in the descs array
+ */
+struct linehandle_state {
+ struct gpio_device *gdev;
+ const char *label;
+ struct gpio_desc *descs[GPIOHANDLES_MAX];
+ u32 numdescs;
+};
+
+static long linehandle_ioctl(struct file *filep, unsigned int cmd,
+ unsigned long arg)
+{
+ struct linehandle_state *lh = filep->private_data;
+ void __user *ip = (void __user *)arg;
+ struct gpiohandle_data ghd;
+ int i;
+
+ if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {
+ int val;
+
+ /* TODO: check if descriptors are really input */
+ for (i = 0; i < lh->numdescs; i++) {
+ val = gpiod_get_value_cansleep(lh->descs[i]);
+ if (val < 0)
+ return val;
+ ghd.values[i] = val;
+ }
+
+ if (copy_to_user(ip, &ghd, sizeof(ghd)))
+ return -EFAULT;
+
+ return 0;
+ } else if (cmd == GPIOHANDLE_SET_LINE_VALUES_IOCTL) {
+ int vals[GPIOHANDLES_MAX];
+
+ /* TODO: check if descriptors are really output */
+ if (copy_from_user(&ghd, ip, sizeof(ghd)))
+ return -EFAULT;
+
+ /* Clamp all values to [0,1] */
+ for (i = 0; i < lh->numdescs; i++)
+ vals[i] = !!ghd.values[i];
+
+ /* Reuse the array setting function */
+ gpiod_set_array_value_complex(false,
+ true,
+ lh->numdescs,
+ lh->descs,
+ vals);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+#ifdef CONFIG_COMPAT
+static long linehandle_ioctl_compat(struct file *filep, unsigned int cmd,
+ unsigned long arg)
+{
+ return linehandle_ioctl(filep, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
+static int linehandle_release(struct inode *inode, struct file *filep)
+{
+ struct linehandle_state *lh = filep->private_data;
+ struct gpio_device *gdev = lh->gdev;
+ int i;
+
+ for (i = 0; i < lh->numdescs; i++)
+ gpiod_free(lh->descs[i]);
+ kfree(lh->label);
+ kfree(lh);
+ put_device(&gdev->dev);
+ return 0;
+}
+
+static const struct file_operations linehandle_fileops = {
+ .release = linehandle_release,
+ .owner = THIS_MODULE,
+ .llseek = noop_llseek,
+ .unlocked_ioctl = linehandle_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = linehandle_ioctl_compat,
+#endif
+};
+
+static int linehandle_create(struct gpio_device *gdev, void __user *ip)
+{
+ struct gpiohandle_request handlereq;
+ struct linehandle_state *lh;
+ int fd, i, ret;
+
+ if (copy_from_user(&handlereq, ip, sizeof(handlereq)))
+ return -EFAULT;
+ if ((handlereq.lines == 0) || (handlereq.lines > GPIOHANDLES_MAX))
+ return -EINVAL;
+
+ lh = kzalloc(sizeof(*lh), GFP_KERNEL);
+ if (!lh)
+ return -ENOMEM;
+ lh->gdev = gdev;
+ get_device(&gdev->dev);
+
+ /* Make sure this is terminated */
+ handlereq.consumer_label[sizeof(handlereq.consumer_label)-1] = '\0';
+ if (strlen(handlereq.consumer_label)) {
+ lh->label = kstrdup(handlereq.consumer_label,
+ GFP_KERNEL);
+ if (!lh->label) {
+ ret = -ENOMEM;
+ goto out_free_lh;
+ }
+ }
+
+ /* Request each GPIO */
+ for (i = 0; i < handlereq.lines; i++) {
+ u32 offset = handlereq.lineoffsets[i];
+ u32 lflags = handlereq.flags;
+ struct gpio_desc *desc;
+
+ desc = &gdev->descs[offset];
+ ret = gpiod_request(desc, lh->label);
+ if (ret)
+ goto out_free_descs;
+ lh->descs[i] = desc;
+
+ if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
+ set_bit(FLAG_ACTIVE_LOW, &desc->flags);
+ if (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN)
+ set_bit(FLAG_OPEN_DRAIN, &desc->flags);
+ if (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)
+ set_bit(FLAG_OPEN_SOURCE, &desc->flags);
+
+ /*
+ * Lines have to be requested explicitly for input
+ * or output, else the line will be treated "as is".
+ */
+ if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
+ int val = !!handlereq.default_values[i];
+
+ ret = gpiod_direction_output(desc, val);
+ if (ret)
+ goto out_free_descs;
+ } else if (lflags & GPIOHANDLE_REQUEST_INPUT) {
+ ret = gpiod_direction_input(desc);
+ if (ret)
+ goto out_free_descs;
+ }
+ dev_dbg(&gdev->dev, "registered chardev handle for line %d\n",
+ offset);
+ }
+ /* Let i point at the last handle */
+ i--;
+ lh->numdescs = handlereq.lines;
+
+ fd = anon_inode_getfd("gpio-linehandle",
+ &linehandle_fileops,
+ lh,
+ O_RDONLY | O_CLOEXEC);
+ if (fd < 0) {
+ ret = fd;
+ goto out_free_descs;
+ }
+
+ handlereq.fd = fd;
+ if (copy_to_user(ip, &handlereq, sizeof(handlereq))) {
+ ret = -EFAULT;
+ goto out_free_descs;
+ }
+
+ dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n",
+ lh->numdescs);
+
+ return 0;
+
+out_free_descs:
+ for (; i >= 0; i--)
+ gpiod_free(lh->descs[i]);
+ kfree(lh->label);
+out_free_lh:
+ kfree(lh);
+ put_device(&gdev->dev);
+ return ret;
+}
+
+/*
+ * GPIO line event management
+ */
+
+/**
+ * struct lineevent_state - contains the state of a userspace event
+ * @gdev: the GPIO device the event pertains to
+ * @label: consumer label used to tag descriptors
+ * @desc: the GPIO descriptor held by this event
+ * @eflags: the event flags this line was requested with
+ * @irq: the interrupt that trigger in response to events on this GPIO
+ * @wait: wait queue that handles blocking reads of events
+ * @events: KFIFO for the GPIO events
+ * @read_lock: mutex lock to protect reads from colliding with adding
+ * new events to the FIFO
+ */
+struct lineevent_state {
+ struct gpio_device *gdev;
+ const char *label;
+ struct gpio_desc *desc;
+ u32 eflags;
+ int irq;
+ wait_queue_head_t wait;
+ DECLARE_KFIFO(events, struct gpioevent_data, 16);
+ struct mutex read_lock;
+};
+
+static unsigned int lineevent_poll(struct file *filep,
+ struct poll_table_struct *wait)
+{
+ struct lineevent_state *le = filep->private_data;
+ unsigned int events = 0;
+
+ poll_wait(filep, &le->wait, wait);
+
+ if (!kfifo_is_empty(&le->events))
+ events = POLLIN | POLLRDNORM;
+
+ return events;
+}
+
+
+static ssize_t lineevent_read(struct file *filep,
+ char __user *buf,
+ size_t count,
+ loff_t *f_ps)
+{
+ struct lineevent_state *le = filep->private_data;
+ unsigned int copied;
+ int ret;
+
+ if (count < sizeof(struct gpioevent_data))
+ return -EINVAL;
+
+ do {
+ if (kfifo_is_empty(&le->events)) {
+ if (filep->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ ret = wait_event_interruptible(le->wait,
+ !kfifo_is_empty(&le->events));
+ if (ret)
+ return ret;
+ }
+
+ if (mutex_lock_interruptible(&le->read_lock))
+ return -ERESTARTSYS;
+ ret = kfifo_to_user(&le->events, buf, count, &copied);
+ mutex_unlock(&le->read_lock);
+
+ if (ret)
+ return ret;
+
+ /*
+ * If we couldn't read anything from the fifo (a different
+ * thread might have been faster) we either return -EAGAIN if
+ * the file descriptor is non-blocking, otherwise we go back to
+ * sleep and wait for more data to arrive.
+ */
+ if (copied == 0 && (filep->f_flags & O_NONBLOCK))
+ return -EAGAIN;
+
+ } while (copied == 0);
+
+ return copied;
+}
+
+static int lineevent_release(struct inode *inode, struct file *filep)
+{
+ struct lineevent_state *le = filep->private_data;
+ struct gpio_device *gdev = le->gdev;
+
+ free_irq(le->irq, le);
+ gpiod_free(le->desc);
+ kfree(le->label);
+ kfree(le);
+ put_device(&gdev->dev);
+ return 0;
+}
+
+static long lineevent_ioctl(struct file *filep, unsigned int cmd,
+ unsigned long arg)
+{
+ struct lineevent_state *le = filep->private_data;
+ void __user *ip = (void __user *)arg;
+ struct gpiohandle_data ghd;
+
+ /*
+ * We can get the value for an event line but not set it,
+ * because it is input by definition.
+ */
+ if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {
+ int val;
+
+ val = gpiod_get_value_cansleep(le->desc);
+ if (val < 0)
+ return val;
+ ghd.values[0] = val;
+
+ if (copy_to_user(ip, &ghd, sizeof(ghd)))
+ return -EFAULT;
+
+ return 0;
+ }
+ return -EINVAL;
+}
+
+#ifdef CONFIG_COMPAT
+static long lineevent_ioctl_compat(struct file *filep, unsigned int cmd,
+ unsigned long arg)
+{
+ return lineevent_ioctl(filep, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
+static const struct file_operations lineevent_fileops = {
+ .release = lineevent_release,
+ .read = lineevent_read,
+ .poll = lineevent_poll,
+ .owner = THIS_MODULE,
+ .llseek = noop_llseek,
+ .unlocked_ioctl = lineevent_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = lineevent_ioctl_compat,
+#endif
+};
+
+static irqreturn_t lineevent_irq_thread(int irq, void *p)
+{
+ struct lineevent_state *le = p;
+ struct gpioevent_data ge;
+ int ret;
+
+ ge.timestamp = ktime_get_real_ns();
+
+ if (le->eflags & GPIOEVENT_REQUEST_BOTH_EDGES) {
+ int level = gpiod_get_value_cansleep(le->desc);
+
+ if (level)
+ /* Emit low-to-high event */
+ ge.id = GPIOEVENT_EVENT_RISING_EDGE;
+ else
+ /* Emit high-to-low event */
+ ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
+ } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE) {
+ /* Emit low-to-high event */
+ ge.id = GPIOEVENT_EVENT_RISING_EDGE;
+ } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
+ /* Emit high-to-low event */
+ ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
+ } else {
+ return IRQ_NONE;
+ }
+
+ ret = kfifo_put(&le->events, ge);
+ if (ret != 0)
+ wake_up_poll(&le->wait, POLLIN);
+
+ return IRQ_HANDLED;
+}
+
+static int lineevent_create(struct gpio_device *gdev, void __user *ip)
+{
+ struct gpioevent_request eventreq;
+ struct lineevent_state *le;
+ struct gpio_desc *desc;
+ u32 offset;
+ u32 lflags;
+ u32 eflags;
+ int fd;
+ int ret;
+ int irqflags = 0;
+
+ if (copy_from_user(&eventreq, ip, sizeof(eventreq)))
+ return -EFAULT;
+
+ le = kzalloc(sizeof(*le), GFP_KERNEL);
+ if (!le)
+ return -ENOMEM;
+ le->gdev = gdev;
+ get_device(&gdev->dev);
+
+ /* Make sure this is terminated */
+ eventreq.consumer_label[sizeof(eventreq.consumer_label)-1] = '\0';
+ if (strlen(eventreq.consumer_label)) {
+ le->label = kstrdup(eventreq.consumer_label,
+ GFP_KERNEL);
+ if (!le->label) {
+ ret = -ENOMEM;
+ goto out_free_le;
+ }
+ }
+
+ offset = eventreq.lineoffset;
+ lflags = eventreq.handleflags;
+ eflags = eventreq.eventflags;
+
+ /* This is just wrong: we don't look for events on output lines */
+ if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
+ ret = -EINVAL;
+ goto out_free_label;
+ }
+
+ desc = &gdev->descs[offset];
+ ret = gpiod_request(desc, le->label);
+ if (ret)
+ goto out_free_desc;
+ le->desc = desc;
+ le->eflags = eflags;
+
+ if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
+ set_bit(FLAG_ACTIVE_LOW, &desc->flags);
+ if (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN)
+ set_bit(FLAG_OPEN_DRAIN, &desc->flags);
+ if (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)
+ set_bit(FLAG_OPEN_SOURCE, &desc->flags);
+
+ ret = gpiod_direction_input(desc);
+ if (ret)
+ goto out_free_desc;
+
+ le->irq = gpiod_to_irq(desc);
+ if (le->irq <= 0) {
+ ret = -ENODEV;
+ goto out_free_desc;
+ }
+
+ if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
+ irqflags |= IRQF_TRIGGER_RISING;
+ if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE)
+ irqflags |= IRQF_TRIGGER_FALLING;
+ irqflags |= IRQF_ONESHOT;
+ irqflags |= IRQF_SHARED;
+
+ INIT_KFIFO(le->events);
+ init_waitqueue_head(&le->wait);
+ mutex_init(&le->read_lock);
+
+ /* Request a thread to read the events */
+ ret = request_threaded_irq(le->irq,
+ NULL,
+ lineevent_irq_thread,
+ irqflags,
+ le->label,
+ le);
+ if (ret)
+ goto out_free_desc;
+
+ fd = anon_inode_getfd("gpio-event",
+ &lineevent_fileops,
+ le,
+ O_RDONLY | O_CLOEXEC);
+ if (fd < 0) {
+ ret = fd;
+ goto out_free_irq;
+ }
+
+ eventreq.fd = fd;
+ if (copy_to_user(ip, &eventreq, sizeof(eventreq))) {
+ ret = -EFAULT;
+ goto out_free_irq;
+ }
+
+ return 0;
+
+out_free_irq:
+ free_irq(le->irq, le);
+out_free_desc:
+ gpiod_free(le->desc);
+out_free_label:
+ kfree(le->label);
+out_free_le:
+ kfree(le);
+ put_device(&gdev->dev);
+ return ret;
+}
+
/**
* gpio_ioctl() - ioctl handler for the GPIO chardev
*/
@@ -385,6 +879,10 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
return -EFAULT;
return 0;
+ } else if (cmd == GPIO_GET_LINEHANDLE_IOCTL) {
+ return linehandle_create(gdev, ip);
+ } else if (cmd == GPIO_GET_LINEEVENT_IOCTL) {
+ return lineevent_create(gdev, ip);
}
return -EINVAL;
}
@@ -548,13 +1046,14 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
if (chip->parent) {
gdev->dev.parent = chip->parent;
gdev->dev.of_node = chip->parent->of_node;
- } else {
+ }
+
#ifdef CONFIG_OF_GPIO
/* If the gpiochip has an assigned OF node this takes precedence */
- if (chip->of_node)
- gdev->dev.of_node = chip->of_node;
+ if (chip->of_node)
+ gdev->dev.of_node = chip->of_node;
#endif
- }
+
gdev->id = ida_simple_get(&gpio_ida, 0, 0, GFP_KERNEL);
if (gdev->id < 0) {
status = gdev->id;
@@ -2333,7 +2832,7 @@ static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
desc = of_get_named_gpiod_flags(dev->of_node, prop_name, idx,
&of_flags);
- if (!IS_ERR(desc) || (PTR_ERR(desc) == -EPROBE_DEFER))
+ if (!IS_ERR(desc) || (PTR_ERR(desc) != -ENOENT))
break;
}
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index be43afb08..0238bf8bc 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -8,7 +8,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm_lock.o drm_memory.o drm_drv.o drm_vm.o \
drm_scatter.o drm_pci.o \
drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
- drm_crtc.o drm_modes.o drm_edid.o \
+ drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o \
drm_info.o drm_debugfs.o drm_encoder_slave.o \
drm_trace_points.o drm_global.o drm_prime.o \
drm_rect.o drm_vma_manager.o drm_flip_work.o \
@@ -23,7 +23,8 @@ drm-$(CONFIG_AGP) += drm_agpsupport.o
drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
- drm_kms_helper_common.o drm_dp_dual_mode_helper.o
+ drm_kms_helper_common.o drm_dp_dual_mode_helper.o \
+ drm_simple_kms_helper.o drm_blend.o
drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 56475b1f1..700c56baf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -85,8 +85,12 @@ extern int amdgpu_vm_debug;
extern int amdgpu_sched_jobs;
extern int amdgpu_sched_hw_submission;
extern int amdgpu_powerplay;
+extern int amdgpu_powercontainment;
extern unsigned amdgpu_pcie_gen_cap;
extern unsigned amdgpu_pcie_lane_cap;
+extern unsigned amdgpu_cg_mask;
+extern unsigned amdgpu_pg_mask;
+extern char *amdgpu_disable_cu;
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
@@ -183,6 +187,10 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
int amdgpu_set_powergating_state(struct amdgpu_device *adev,
enum amd_ip_block_type block_type,
enum amd_powergating_state state);
+int amdgpu_wait_for_idle(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type);
+bool amdgpu_is_idle(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type);
struct amdgpu_ip_block_version {
enum amd_ip_block_type type;
@@ -298,13 +306,16 @@ struct amdgpu_ring_funcs {
uint32_t oa_base, uint32_t oa_size);
/* testing functions */
int (*test_ring)(struct amdgpu_ring *ring);
- int (*test_ib)(struct amdgpu_ring *ring);
+ int (*test_ib)(struct amdgpu_ring *ring, long timeout);
/* insert NOP packets */
void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
/* pad the indirect buffer to the necessary number of dw */
void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
unsigned (*init_cond_exec)(struct amdgpu_ring *ring);
void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset);
+ /* note usage for clock and power gating */
+ void (*begin_use)(struct amdgpu_ring *ring);
+ void (*end_use)(struct amdgpu_ring *ring);
};
/*
@@ -596,11 +607,9 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
struct amdgpu_sync *sync,
struct reservation_object *resv,
void *owner);
-bool amdgpu_sync_is_idle(struct amdgpu_sync *sync);
-int amdgpu_sync_cycle_fences(struct amdgpu_sync *dst, struct amdgpu_sync *src,
- struct fence *fence);
+struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
+ struct amdgpu_ring *ring);
struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
-int amdgpu_sync_wait(struct amdgpu_sync *sync);
void amdgpu_sync_free(struct amdgpu_sync *sync);
int amdgpu_sync_init(void);
void amdgpu_sync_fini(void);
@@ -756,12 +765,11 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
struct amdgpu_job **job);
+void amdgpu_job_free_resources(struct amdgpu_job *job);
void amdgpu_job_free(struct amdgpu_job *job);
-void amdgpu_job_free_func(struct kref *refcount);
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
struct amd_sched_entity *entity, void *owner,
struct fence **f);
-void amdgpu_job_timeout_func(struct work_struct *work);
struct amdgpu_ring {
struct amdgpu_device *adev;
@@ -769,12 +777,9 @@ struct amdgpu_ring {
struct amdgpu_fence_driver fence_drv;
struct amd_gpu_scheduler sched;
- spinlock_t fence_lock;
struct amdgpu_bo *ring_obj;
volatile uint32_t *ring;
unsigned rptr_offs;
- u64 next_rptr_gpu_addr;
- volatile u32 *next_rptr_cpu_addr;
unsigned wptr;
unsigned wptr_old;
unsigned ring_size;
@@ -793,14 +798,16 @@ struct amdgpu_ring {
u32 doorbell_index;
bool use_doorbell;
unsigned wptr_offs;
- unsigned next_rptr_offs;
unsigned fence_offs;
uint64_t current_ctx;
enum amdgpu_ring_type type;
char name[16];
unsigned cond_exe_offs;
- u64 cond_exe_gpu_addr;
- volatile u32 *cond_exe_cpu_addr;
+ u64 cond_exe_gpu_addr;
+ volatile u32 *cond_exe_cpu_addr;
+#if defined(CONFIG_DEBUG_FS)
+ struct dentry *ent;
+#endif
};
/*
@@ -863,6 +870,7 @@ struct amdgpu_vm {
struct amdgpu_bo *page_directory;
unsigned max_pde_used;
struct fence *page_directory_fence;
+ uint64_t last_eviction_counter;
/* array of page tables, one for each page directory entry */
struct amdgpu_vm_pt *page_tables;
@@ -885,13 +893,14 @@ struct amdgpu_vm_id {
struct fence *first;
struct amdgpu_sync active;
struct fence *last_flush;
- struct amdgpu_ring *last_user;
atomic64_t owner;
uint64_t pd_gpu_addr;
/* last flushed PD/PT update */
struct fence *flushed_updates;
+ uint32_t current_gpu_reset_count;
+
uint32_t gds_base;
uint32_t gds_size;
uint32_t gws_base;
@@ -907,6 +916,10 @@ struct amdgpu_vm_manager {
struct list_head ids_lru;
struct amdgpu_vm_id ids[AMDGPU_NUM_VM];
+ /* Handling of VM fences */
+ u64 fence_context;
+ unsigned seqno[AMDGPU_MAX_RINGS];
+
uint32_t max_pfn;
/* vram base address for page table entry */
u64 vram_base_offset;
@@ -928,17 +941,14 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
struct list_head *validated,
struct amdgpu_bo_list_entry *entry);
-void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates);
+void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct list_head *duplicates);
void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_sync *sync, struct fence *fence,
- unsigned *vm_id, uint64_t *vm_pd_addr);
-int amdgpu_vm_flush(struct amdgpu_ring *ring,
- unsigned vm_id, uint64_t pd_addr,
- uint32_t gds_base, uint32_t gds_size,
- uint32_t gws_base, uint32_t gws_size,
- uint32_t oa_base, uint32_t oa_size);
+ struct amdgpu_job *job);
+int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
@@ -1144,6 +1154,12 @@ struct amdgpu_cu_info {
uint32_t bitmap[4][4];
};
+struct amdgpu_gfx_funcs {
+ /* get the gpu clock counter */
+ uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
+ void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
+};
+
struct amdgpu_gfx {
struct mutex gpu_clock_mutex;
struct amdgpu_gca_config config;
@@ -1180,6 +1196,7 @@ struct amdgpu_gfx {
/* ce ram size*/
unsigned ce_ram_size;
struct amdgpu_cu_info cu_info;
+ const struct amdgpu_gfx_funcs *funcs;
};
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
@@ -1197,10 +1214,6 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
void amdgpu_ring_commit(struct amdgpu_ring *ring);
void amdgpu_ring_undo(struct amdgpu_ring *ring);
-unsigned amdgpu_ring_backup(struct amdgpu_ring *ring,
- uint32_t **data);
-int amdgpu_ring_restore(struct amdgpu_ring *ring,
- unsigned size, uint32_t *data);
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
unsigned ring_size, u32 nop, u32 align_mask,
struct amdgpu_irq_src *irq_src, unsigned irq_type,
@@ -1252,6 +1265,7 @@ struct amdgpu_job {
uint32_t num_ibs;
void *owner;
uint64_t ctx;
+ bool vm_needs_flush;
unsigned vm_id;
uint64_t vm_pd_addr;
uint32_t gds_base, gds_size;
@@ -1259,8 +1273,7 @@ struct amdgpu_job {
uint32_t oa_base, oa_size;
/* user fence handling */
- struct amdgpu_bo *uf_bo;
- uint32_t uf_offset;
+ uint64_t uf_addr;
uint64_t uf_sequence;
};
@@ -1562,6 +1575,12 @@ struct amdgpu_dpm_funcs {
u32 (*get_fan_control_mode)(struct amdgpu_device *adev);
int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed);
int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed);
+ int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask);
+ int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf);
+ int (*get_sclk_od)(struct amdgpu_device *adev);
+ int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value);
+ int (*get_mclk_od)(struct amdgpu_device *adev);
+ int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value);
};
struct amdgpu_dpm {
@@ -1664,6 +1683,7 @@ struct amdgpu_uvd {
struct amdgpu_ring ring;
struct amdgpu_irq_src irq;
bool address_64_bit;
+ bool use_ctx_buf;
struct amd_sched_entity entity;
};
@@ -1685,6 +1705,7 @@ struct amdgpu_vce {
struct drm_file *filp[AMDGPU_MAX_VCE_HANDLES];
uint32_t img_size[AMDGPU_MAX_VCE_HANDLES];
struct delayed_work idle_work;
+ struct mutex idle_mutex;
const struct firmware *fw; /* VCE firmware */
struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS];
struct amdgpu_irq_src irq;
@@ -1769,6 +1790,8 @@ int amdgpu_debugfs_init(struct drm_minor *minor);
void amdgpu_debugfs_cleanup(struct drm_minor *minor);
#endif
+int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
+
/*
* amdgpu smumgr functions
*/
@@ -1813,12 +1836,8 @@ struct amdgpu_asic_funcs {
u32 sh_num, u32 reg_offset, u32 *value);
void (*set_vga_state)(struct amdgpu_device *adev, bool state);
int (*reset)(struct amdgpu_device *adev);
- /* wait for mc_idle */
- int (*wait_for_mc_idle)(struct amdgpu_device *adev);
/* get the reference clock */
u32 (*get_xclk)(struct amdgpu_device *adev);
- /* get the gpu clock counter */
- uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
/* MM block clocks */
int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
@@ -2005,6 +2024,10 @@ struct amdgpu_device {
spinlock_t didt_idx_lock;
amdgpu_rreg_t didt_rreg;
amdgpu_wreg_t didt_wreg;
+ /* protects concurrent gc_cac register access */
+ spinlock_t gc_cac_idx_lock;
+ amdgpu_rreg_t gc_cac_rreg;
+ amdgpu_wreg_t gc_cac_wreg;
/* protects concurrent ENDPOINT (audio) register access */
spinlock_t audio_endpt_idx_lock;
amdgpu_block_rreg_t audio_endpt_rreg;
@@ -2030,6 +2053,7 @@ struct amdgpu_device {
atomic64_t vram_vis_usage;
atomic64_t gtt_usage;
atomic64_t num_bytes_moved;
+ atomic64_t num_evictions;
atomic_t gpu_reset_counter;
/* display */
@@ -2040,7 +2064,7 @@ struct amdgpu_device {
struct amdgpu_irq_src hpd_irq;
/* rings */
- unsigned fence_context;
+ u64 fence_context;
unsigned num_rings;
struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
bool ib_pool_ready;
@@ -2133,6 +2157,8 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
#define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v))
#define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg))
#define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v))
+#define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg))
+#define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v))
#define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg))
#define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v))
#define WREG32_P(reg, val, mask) \
@@ -2208,12 +2234,10 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
*/
#define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state))
#define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev))
-#define amdgpu_asic_wait_for_mc_idle(adev) (adev)->asic_funcs->wait_for_mc_idle((adev))
#define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
#define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
#define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
#define amdgpu_asic_get_virtual_caps(adev) ((adev)->asic_funcs->get_virtual_caps((adev)))
-#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
#define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
@@ -2224,7 +2248,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
-#define amdgpu_ring_test_ib(r) (r)->funcs->test_ib((r))
+#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
@@ -2266,6 +2290,8 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps))
#define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
+#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
+#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
#define amdgpu_dpm_get_temperature(adev) \
((adev)->pp_enabled ? \
@@ -2344,6 +2370,18 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_dpm_force_clock_level(adev, type, level) \
(adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level)
+#define amdgpu_dpm_get_sclk_od(adev) \
+ (adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle)
+
+#define amdgpu_dpm_set_sclk_od(adev, value) \
+ (adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value)
+
+#define amdgpu_dpm_get_mclk_od(adev) \
+ ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle))
+
+#define amdgpu_dpm_set_mclk_od(adev, value) \
+ ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
+
#define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \
(adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output))
@@ -2385,9 +2423,13 @@ bool amdgpu_device_is_px(struct drm_device *dev);
#if defined(CONFIG_VGA_SWITCHEROO)
void amdgpu_register_atpx_handler(void);
void amdgpu_unregister_atpx_handler(void);
+bool amdgpu_has_atpx_dgpu_power_cntl(void);
+bool amdgpu_is_atpx_hybrid(void);
#else
static inline void amdgpu_register_atpx_handler(void) {}
static inline void amdgpu_unregister_atpx_handler(void) {}
+static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
+static inline bool amdgpu_is_atpx_hybrid(void) { return false; }
#endif
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index 252edba16..892d60fb2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -421,29 +421,6 @@ static int acp_suspend(void *handle)
static int acp_resume(void *handle)
{
- int i, ret;
- struct acp_pm_domain *apd;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- /* return early if no ACP */
- if (!adev->acp.acp_genpd)
- return 0;
-
- /* SMU block will power on ACP irrespective of ACP runtime status.
- * Power off explicitly based on genpd ACP runtime status so that ACP
- * hw and ACP-genpd status are in sync.
- * 'suspend_power_off' represents "Power status before system suspend"
- */
- if (adev->acp.acp_genpd->gpd.suspend_power_off == true) {
- apd = container_of(&adev->acp.acp_genpd->gpd,
- struct acp_pm_domain, gpd);
-
- for (i = 4; i >= 0 ; i--) {
- ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_P1 + i);
- if (ret)
- pr_err("ACP tile %d tile suspend failed\n", i);
- }
- }
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 32809f749..d080d0807 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -240,8 +240,8 @@ uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
{
struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
- if (rdev->asic_funcs->get_gpu_clock_counter)
- return rdev->asic_funcs->get_gpu_clock_counter(rdev);
+ if (rdev->gfx.funcs->get_gpu_clock_counter)
+ return rdev->gfx.funcs->get_gpu_clock_counter(rdev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 1b4c069f7..10b5ddf2c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -28,6 +28,7 @@ struct amdgpu_atpx_functions {
struct amdgpu_atpx {
acpi_handle handle;
struct amdgpu_atpx_functions functions;
+ bool is_hybrid;
};
static struct amdgpu_atpx_priv {
@@ -64,6 +65,14 @@ bool amdgpu_has_atpx(void) {
return amdgpu_atpx_priv.atpx_detected;
}
+bool amdgpu_has_atpx_dgpu_power_cntl(void) {
+ return amdgpu_atpx_priv.atpx.functions.power_cntl;
+}
+
+bool amdgpu_is_atpx_hybrid(void) {
+ return amdgpu_atpx_priv.atpx.is_hybrid;
+}
+
/**
* amdgpu_atpx_call - call an ATPX method
*
@@ -143,18 +152,12 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas
*/
static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
{
- /* make sure required functions are enabled */
- /* dGPU power control is required */
- if (atpx->functions.power_cntl == false) {
- printk("ATPX dGPU power cntl not present, forcing\n");
- atpx->functions.power_cntl = true;
- }
+ u32 valid_bits = 0;
if (atpx->functions.px_params) {
union acpi_object *info;
struct atpx_px_params output;
size_t size;
- u32 valid_bits;
info = amdgpu_atpx_call(atpx->handle, ATPX_FUNCTION_GET_PX_PARAMETERS, NULL);
if (!info)
@@ -173,19 +176,34 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
memcpy(&output, info->buffer.pointer, size);
valid_bits = output.flags & output.valid_flags;
- /* if separate mux flag is set, mux controls are required */
- if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) {
- atpx->functions.i2c_mux_cntl = true;
- atpx->functions.disp_mux_cntl = true;
- }
- /* if any outputs are muxed, mux controls are required */
- if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED |
- ATPX_TV_SIGNAL_MUXED |
- ATPX_DFP_SIGNAL_MUXED))
- atpx->functions.disp_mux_cntl = true;
kfree(info);
}
+
+ /* if separate mux flag is set, mux controls are required */
+ if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) {
+ atpx->functions.i2c_mux_cntl = true;
+ atpx->functions.disp_mux_cntl = true;
+ }
+ /* if any outputs are muxed, mux controls are required */
+ if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED |
+ ATPX_TV_SIGNAL_MUXED |
+ ATPX_DFP_SIGNAL_MUXED))
+ atpx->functions.disp_mux_cntl = true;
+
+
+ /* some bioses set these bits rather than flagging power_cntl as supported */
+ if (valid_bits & (ATPX_DYNAMIC_PX_SUPPORTED |
+ ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED))
+ atpx->functions.power_cntl = true;
+
+ atpx->is_hybrid = false;
+ if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
+ printk("ATPX Hybrid Graphics\n");
+ atpx->functions.power_cntl = false;
+ atpx->is_hybrid = true;
+ }
+
return 0;
}
@@ -512,7 +530,6 @@ static int amdgpu_atpx_get_client_id(struct pci_dev *pdev)
static const struct vga_switcheroo_handler amdgpu_atpx_handler = {
.switchto = amdgpu_atpx_switchto,
.power_state = amdgpu_atpx_power_state,
- .init = amdgpu_atpx_init,
.get_client_id = amdgpu_atpx_get_client_id,
};
@@ -547,6 +564,7 @@ static bool amdgpu_atpx_detect(void)
printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n",
acpi_method_name);
amdgpu_atpx_priv.atpx_detected = true;
+ amdgpu_atpx_init();
return true;
}
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 99ca75baa..2b6afe123 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -352,22 +352,22 @@ bool amdgpu_get_bios(struct amdgpu_device *adev)
uint16_t tmp, bios_header_start;
r = amdgpu_atrm_get_bios(adev);
- if (r == false)
+ if (!r)
r = amdgpu_acpi_vfct_bios(adev);
- if (r == false)
+ if (!r)
r = igp_read_bios_from_vram(adev);
- if (r == false)
+ if (!r)
r = amdgpu_read_bios(adev);
- if (r == false) {
+ if (!r) {
r = amdgpu_read_bios_from_rom(adev);
}
- if (r == false) {
+ if (!r) {
r = amdgpu_read_disabled_bios(adev);
}
- if (r == false) {
+ if (!r) {
r = amdgpu_read_platform_bios(adev);
}
- if (r == false || adev->bios == NULL) {
+ if (!r || adev->bios == NULL) {
DRM_ERROR("Unable to locate a BIOS ROM\n");
adev->bios = NULL;
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 823bf5e0b..651115dcc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -94,6 +94,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
unsigned last_entry = 0, first_userptr = num_entries;
unsigned i;
int r;
+ unsigned long total_size = 0;
array = drm_malloc_ab(num_entries, sizeof(struct amdgpu_bo_list_entry));
if (!array)
@@ -140,6 +141,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_OA)
oa_obj = entry->robj;
+ total_size += amdgpu_bo_size(entry->robj);
trace_amdgpu_bo_list_set(list, entry->robj);
}
@@ -155,6 +157,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
list->array = array;
list->num_entries = num_entries;
+ trace_amdgpu_cs_bo_status(list->num_entries, total_size);
return 0;
error_free:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 6f9dcfddc..16e248e3f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -312,6 +312,8 @@ static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device,
return RREG32_UVD_CTX(index);
case CGS_IND_REG__DIDT:
return RREG32_DIDT(index);
+ case CGS_IND_REG_GC_CAC:
+ return RREG32_GC_CAC(index);
case CGS_IND_REG__AUDIO_ENDPT:
DRM_ERROR("audio endpt register access not implemented.\n");
return 0;
@@ -336,6 +338,8 @@ static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
return WREG32_UVD_CTX(index, value);
case CGS_IND_REG__DIDT:
return WREG32_DIDT(index, value);
+ case CGS_IND_REG_GC_CAC:
+ return WREG32_GC_CAC(index, value);
case CGS_IND_REG__AUDIO_ENDPT:
DRM_ERROR("audio endpt register access not implemented.\n");
return;
@@ -748,6 +752,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
if (!adev->pm.fw) {
switch (adev->asic_type) {
+ case CHIP_TOPAZ:
+ strcpy(fw_name, "/*(DEBLOBBED)*/");
+ break;
case CHIP_TONGA:
strcpy(fw_name, "/*(DEBLOBBED)*/");
break;
@@ -787,6 +794,7 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
}
hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
+ amdgpu_ucode_print_smc_hdr(&hdr->header);
adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
@@ -795,13 +803,14 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
info->version = adev->pm.fw_version;
info->image_size = ucode_size;
+ info->ucode_start_address = ucode_start_address;
info->kptr = (void *)src;
}
return 0;
}
static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
- struct cgs_system_info *sys_info)
+ struct cgs_system_info *sys_info)
{
CGS_FUNC_ADEV;
@@ -821,6 +830,12 @@ static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
case CGS_SYSTEM_INFO_PCIE_MLW:
sys_info->value = adev->pm.pcie_mlw_mask;
break;
+ case CGS_SYSTEM_INFO_PCIE_DEV:
+ sys_info->value = adev->pdev->device;
+ break;
+ case CGS_SYSTEM_INFO_PCIE_REV:
+ sys_info->value = adev->pdev->revision;
+ break;
case CGS_SYSTEM_INFO_CG_FLAGS:
sys_info->value = adev->cg_flags;
break;
@@ -830,6 +845,9 @@ static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
case CGS_SYSTEM_INFO_GFX_CU_INFO:
sys_info->value = adev->gfx.cu_info.number;
break;
+ case CGS_SYSTEM_INFO_GFX_SE_INFO:
+ sys_info->value = adev->gfx.config.max_shader_engines;
+ break;
default:
return -ENODEV;
}
@@ -903,14 +921,12 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
acpi_handle handle;
struct acpi_object_list input;
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *params = NULL;
- union acpi_object *obj = NULL;
+ union acpi_object *params, *obj;
uint8_t name[5] = {'\0'};
- struct cgs_acpi_method_argument *argument = NULL;
+ struct cgs_acpi_method_argument *argument;
uint32_t i, count;
acpi_status status;
- int result = 0;
- uint32_t func_no = 0xFFFFFFFF;
+ int result;
handle = ACPI_HANDLE(&adev->pdev->dev);
if (!handle)
@@ -927,7 +943,6 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
if (info->pinput_argument == NULL)
return -EINVAL;
argument = info->pinput_argument;
- func_no = argument->value;
for (i = 0; i < info->input_count; i++) {
if (((argument->type == ACPI_TYPE_STRING) ||
(argument->type == ACPI_TYPE_BUFFER)) &&
@@ -972,11 +987,11 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
params->integer.value = argument->value;
break;
case ACPI_TYPE_STRING:
- params->string.length = argument->method_length;
+ params->string.length = argument->data_length;
params->string.pointer = argument->pointer;
break;
case ACPI_TYPE_BUFFER:
- params->buffer.length = argument->method_length;
+ params->buffer.length = argument->data_length;
params->buffer.pointer = argument->pointer;
break;
default:
@@ -996,7 +1011,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
if (ACPI_FAILURE(status)) {
result = -EIO;
- goto error;
+ goto free_input;
}
/* return the output info */
@@ -1006,7 +1021,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
if ((obj->type != ACPI_TYPE_PACKAGE) ||
(obj->package.count != count)) {
result = -EIO;
- goto error;
+ goto free_obj;
}
params = obj->package.elements;
} else
@@ -1014,13 +1029,13 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
if (params == NULL) {
result = -EIO;
- goto error;
+ goto free_obj;
}
for (i = 0; i < count; i++) {
if (argument->type != params->type) {
result = -EIO;
- goto error;
+ goto free_obj;
}
switch (params->type) {
case ACPI_TYPE_INTEGER:
@@ -1030,7 +1045,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
if ((params->string.length != argument->data_length) ||
(params->string.pointer == NULL)) {
result = -EIO;
- goto error;
+ goto free_obj;
}
strncpy(argument->pointer,
params->string.pointer,
@@ -1039,7 +1054,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
case ACPI_TYPE_BUFFER:
if (params->buffer.pointer == NULL) {
result = -EIO;
- goto error;
+ goto free_obj;
}
memcpy(argument->pointer,
params->buffer.pointer,
@@ -1052,9 +1067,10 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
params++;
}
-error:
- if (obj != NULL)
- kfree(obj);
+ result = 0;
+free_obj:
+ kfree(obj);
+free_input:
kfree((void *)input.pointer);
return result;
}
@@ -1066,7 +1082,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
}
#endif
-int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device,
+static int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device,
uint32_t acpi_method,
uint32_t acpi_function,
void *pinput, void *poutput,
@@ -1079,17 +1095,14 @@ int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device,
struct cgs_acpi_method_info info = {0};
acpi_input[0].type = CGS_ACPI_TYPE_INTEGER;
- acpi_input[0].method_length = sizeof(uint32_t);
acpi_input[0].data_length = sizeof(uint32_t);
acpi_input[0].value = acpi_function;
acpi_input[1].type = CGS_ACPI_TYPE_BUFFER;
- acpi_input[1].method_length = CGS_ACPI_MAX_BUFFER_SIZE;
acpi_input[1].data_length = input_size;
acpi_input[1].pointer = pinput;
acpi_output.type = CGS_ACPI_TYPE_BUFFER;
- acpi_output.method_length = CGS_ACPI_MAX_BUFFER_SIZE;
acpi_output.data_length = output_size;
acpi_output.pointer = poutput;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 9bc8f1d99..0307ff588 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -216,11 +216,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
if (ret)
goto free_all_kdata;
- if (p->uf_entry.robj) {
- p->job->uf_bo = amdgpu_bo_ref(p->uf_entry.robj);
- p->job->uf_offset = uf_offset;
- }
-
+ if (p->uf_entry.robj)
+ p->job->uf_addr = uf_offset;
kfree(chunk_array);
return 0;
@@ -459,7 +456,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
list_splice(&need_pages, &p->validated);
}
- amdgpu_vm_get_pt_bos(&fpriv->vm, &duplicates);
+ amdgpu_vm_get_pt_bos(p->adev, &fpriv->vm, &duplicates);
p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev);
p->bytes_moved = 0;
@@ -472,6 +469,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
if (r)
goto error_validate;
+ fpriv->vm.last_eviction_counter =
+ atomic64_read(&p->adev->num_evictions);
+
if (p->bo_list) {
struct amdgpu_bo *gds = p->bo_list->gds_obj;
struct amdgpu_bo *gws = p->bo_list->gws_obj;
@@ -499,6 +499,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
}
}
+ if (p->uf_entry.robj)
+ p->job->uf_addr += amdgpu_bo_gpu_offset(p->uf_entry.robj);
+
error_validate:
if (r) {
amdgpu_vm_move_pt_bos_in_lru(p->adev, &fpriv->vm);
@@ -653,18 +656,21 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
/* Only for UVD/VCE VM emulation */
if (ring->funcs->parse_cs) {
+ p->job->vm = NULL;
for (i = 0; i < p->job->num_ibs; i++) {
r = amdgpu_ring_parse_cs(ring, p, i);
if (r)
return r;
}
- }
+ } else {
+ p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
- r = amdgpu_bo_vm_update_pte(p, vm);
- if (!r)
- amdgpu_cs_sync_rings(p);
+ r = amdgpu_bo_vm_update_pte(p, vm);
+ if (r)
+ return r;
+ }
- return r;
+ return amdgpu_cs_sync_rings(p);
}
static int amdgpu_cs_handle_lockup(struct amdgpu_device *adev, int r)
@@ -761,7 +767,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
}
/* UVD & VCE fw doesn't support user fences */
- if (parser->job->uf_bo && (
+ if (parser->job->uf_addr && (
parser->job->ring->type == AMDGPU_RING_TYPE_UVD ||
parser->job->ring->type == AMDGPU_RING_TYPE_VCE))
return -EINVAL;
@@ -830,17 +836,13 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
{
struct amdgpu_ring *ring = p->job->ring;
struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
- struct fence *fence;
struct amdgpu_job *job;
int r;
job = p->job;
p->job = NULL;
- r = amd_sched_job_init(&job->base, &ring->sched,
- entity, amdgpu_job_timeout_func,
- amdgpu_job_free_func,
- p->filp, &fence);
+ r = amd_sched_job_init(&job->base, &ring->sched, entity, p->filp);
if (r) {
amdgpu_job_free(job);
return r;
@@ -848,9 +850,10 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job->owner = p->filp;
job->ctx = entity->fence_context;
- p->fence = fence_get(fence);
- cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, fence);
+ p->fence = fence_get(&job->base.s_fence->finished);
+ cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
job->uf_sequence = cs->out.handle;
+ amdgpu_job_free_resources(job);
trace_amdgpu_cs_ioctl(job);
amd_sched_entity_push_job(&job->base);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index b7f5650d8..39c01b942 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -25,6 +25,7 @@
* Alex Deucher
* Jerome Glisse
*/
+#include <linux/kthread.h>
#include <linux/console.h>
#include <linux/slab.h>
#include <linux/debugfs.h>
@@ -35,6 +36,7 @@
#include <linux/vga_switcheroo.h>
#include <linux/efi.h>
#include "amdgpu.h"
+#include "amdgpu_trace.h"
#include "amdgpu_i2c.h"
#include "atom.h"
#include "amdgpu_atombios.h"
@@ -79,24 +81,27 @@ bool amdgpu_device_is_px(struct drm_device *dev)
uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
bool always_indirect)
{
+ uint32_t ret;
+
if ((reg * 4) < adev->rmmio_size && !always_indirect)
- return readl(((void __iomem *)adev->rmmio) + (reg * 4));
+ ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
else {
unsigned long flags;
- uint32_t ret;
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
-
- return ret;
}
+ trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
+ return ret;
}
void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
bool always_indirect)
{
+ trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
+
if ((reg * 4) < adev->rmmio_size && !always_indirect)
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
else {
@@ -1070,11 +1075,14 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_block_status[i].valid)
+ continue;
if (adev->ip_blocks[i].type == block_type) {
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
state);
if (r)
return r;
+ break;
}
}
return r;
@@ -1087,16 +1095,53 @@ int amdgpu_set_powergating_state(struct amdgpu_device *adev,
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_block_status[i].valid)
+ continue;
if (adev->ip_blocks[i].type == block_type) {
r = adev->ip_blocks[i].funcs->set_powergating_state((void *)adev,
state);
if (r)
return r;
+ break;
}
}
return r;
}
+int amdgpu_wait_for_idle(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type)
+{
+ int i, r;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_block_status[i].valid)
+ continue;
+ if (adev->ip_blocks[i].type == block_type) {
+ r = adev->ip_blocks[i].funcs->wait_for_idle((void *)adev);
+ if (r)
+ return r;
+ break;
+ }
+ }
+ return 0;
+
+}
+
+bool amdgpu_is_idle(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type)
+{
+ int i;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_block_status[i].valid)
+ continue;
+ if (adev->ip_blocks[i].type == block_type)
+ return adev->ip_blocks[i].funcs->is_idle((void *)adev);
+ }
+ return true;
+
+}
+
const struct amdgpu_ip_block_version * amdgpu_get_ip_block(
struct amdgpu_device *adev,
enum amd_ip_block_type type)
@@ -1209,6 +1254,9 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
}
}
+ adev->cg_flags &= amdgpu_cg_mask;
+ adev->pg_flags &= amdgpu_pg_mask;
+
return 0;
}
@@ -1440,9 +1488,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
adev->didt_rreg = &amdgpu_invalid_rreg;
adev->didt_wreg = &amdgpu_invalid_wreg;
+ adev->gc_cac_rreg = &amdgpu_invalid_rreg;
+ adev->gc_cac_wreg = &amdgpu_invalid_wreg;
adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
+
DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
@@ -1467,6 +1518,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
spin_lock_init(&adev->pcie_idx_lock);
spin_lock_init(&adev->uvd_ctx_idx_lock);
spin_lock_init(&adev->didt_idx_lock);
+ spin_lock_init(&adev->gc_cac_idx_lock);
spin_lock_init(&adev->audio_endpt_idx_lock);
adev->rmmio_base = pci_resource_start(adev->pdev, 5);
@@ -1511,17 +1563,20 @@ int amdgpu_device_init(struct amdgpu_device *adev,
vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
/* Read BIOS */
- if (!amdgpu_get_bios(adev))
- return -EINVAL;
+ if (!amdgpu_get_bios(adev)) {
+ r = -EINVAL;
+ goto failed;
+ }
/* Must be an ATOMBIOS */
if (!adev->is_atom_bios) {
dev_err(adev->dev, "Expecting atombios for GPU\n");
- return -EINVAL;
+ r = -EINVAL;
+ goto failed;
}
r = amdgpu_atombios_init(adev);
if (r) {
dev_err(adev->dev, "amdgpu_atombios_init failed\n");
- return r;
+ goto failed;
}
/* See if the asic supports SR-IOV */
@@ -1538,7 +1593,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
!(adev->virtualization.caps & AMDGPU_VIRT_CAPS_SRIOV_EN))) {
if (!adev->bios) {
dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
- return -EINVAL;
+ r = -EINVAL;
+ goto failed;
}
DRM_INFO("GPU not posted. posting now...\n");
amdgpu_atom_asic_init(adev->mode_info.atom_context);
@@ -1548,7 +1604,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_atombios_get_clock_info(adev);
if (r) {
dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
- return r;
+ goto failed;
}
/* init i2c buses */
amdgpu_atombios_i2c_init(adev);
@@ -1557,7 +1613,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_fence_driver_init(adev);
if (r) {
dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
- return r;
+ goto failed;
}
/* init the mode config */
@@ -1567,7 +1623,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r) {
dev_err(adev->dev, "amdgpu_init failed\n");
amdgpu_fini(adev);
- return r;
+ goto failed;
}
adev->accel_working = true;
@@ -1577,7 +1633,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_ib_pool_init(adev);
if (r) {
dev_err(adev->dev, "IB initialization failed (%d).\n", r);
- return r;
+ goto failed;
}
r = amdgpu_ib_ring_tests(adev);
@@ -1594,6 +1650,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
DRM_ERROR("registering register debugfs failed (%d).\n", r);
}
+ r = amdgpu_debugfs_firmware_init(adev);
+ if (r) {
+ DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
+ return r;
+ }
+
if ((amdgpu_testing & 1)) {
if (adev->accel_working)
amdgpu_test_moves(adev);
@@ -1619,10 +1681,15 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_late_init(adev);
if (r) {
dev_err(adev->dev, "amdgpu_late_init failed\n");
- return r;
+ goto failed;
}
return 0;
+
+failed:
+ if (runtime)
+ vga_switcheroo_fini_domain_pm_ops(adev->dev);
+ return r;
}
static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev);
@@ -1641,6 +1708,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
DRM_INFO("amdgpu: finishing device.\n");
adev->shutdown = true;
+ drm_crtc_force_disable_all(adev->ddev);
/* evict vram memory */
amdgpu_bo_evict_vram(adev);
amdgpu_ib_pool_fini(adev);
@@ -1656,6 +1724,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
kfree(adev->bios);
adev->bios = NULL;
vga_switcheroo_unregister_client(adev->pdev);
+ if (adev->flags & AMD_IS_PX)
+ vga_switcheroo_fini_domain_pm_ops(adev->dev);
vga_client_register(adev->pdev, NULL, NULL, NULL);
if (adev->rio_mem)
pci_iounmap(adev->pdev, adev->rio_mem);
@@ -1877,11 +1947,6 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
*/
int amdgpu_gpu_reset(struct amdgpu_device *adev)
{
- unsigned ring_sizes[AMDGPU_MAX_RINGS];
- uint32_t *ring_data[AMDGPU_MAX_RINGS];
-
- bool saved = false;
-
int i, r;
int resched;
@@ -1890,22 +1955,30 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
/* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
- r = amdgpu_suspend(adev);
-
+ /* block scheduler */
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
+
if (!ring)
continue;
-
- ring_sizes[i] = amdgpu_ring_backup(ring, &ring_data[i]);
- if (ring_sizes[i]) {
- saved = true;
- dev_info(adev->dev, "Saved %d dwords of commands "
- "on ring %d.\n", ring_sizes[i], i);
- }
+ kthread_park(ring->sched.thread);
+ amd_sched_hw_job_reset(&ring->sched);
}
+ /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
+ amdgpu_fence_driver_force_completion(adev);
+
+ /* save scratch */
+ amdgpu_atombios_scratch_regs_save(adev);
+ r = amdgpu_suspend(adev);
retry:
+ /* Disable fb access */
+ if (adev->mode_info.num_crtc) {
+ struct amdgpu_mode_mc_save save;
+ amdgpu_display_stop_mc_access(adev, &save);
+ amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
+ }
+
r = amdgpu_asic_reset(adev);
/* post card */
amdgpu_atom_asic_init(adev->mode_info.atom_context);
@@ -1914,32 +1987,29 @@ retry:
dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
r = amdgpu_resume(adev);
}
-
+ /* restore scratch */
+ amdgpu_atombios_scratch_regs_restore(adev);
if (!r) {
+ r = amdgpu_ib_ring_tests(adev);
+ if (r) {
+ dev_err(adev->dev, "ib ring test failed (%d).\n", r);
+ r = amdgpu_suspend(adev);
+ goto retry;
+ }
+
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring)
continue;
-
- amdgpu_ring_restore(ring, ring_sizes[i], ring_data[i]);
- ring_sizes[i] = 0;
- ring_data[i] = NULL;
- }
-
- r = amdgpu_ib_ring_tests(adev);
- if (r) {
- dev_err(adev->dev, "ib ring test failed (%d).\n", r);
- if (saved) {
- saved = false;
- r = amdgpu_suspend(adev);
- goto retry;
- }
+ amd_sched_job_recovery(&ring->sched);
+ kthread_unpark(ring->sched.thread);
}
} else {
- amdgpu_fence_driver_force_completion(adev);
+ dev_err(adev->dev, "asic resume failed (%d).\n", r);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- if (adev->rings[i])
- kfree(ring_data[i]);
+ if (adev->rings[i]) {
+ kthread_unpark(adev->rings[i]->sched.thread);
+ }
}
}
@@ -1950,13 +2020,11 @@ retry:
/* bad news, how to tell it to userspace ? */
dev_info(adev->dev, "GPU reset failed\n");
}
+ amdgpu_irq_gpu_reset_resume_helper(adev);
return r;
}
-#define AMDGPU_DEFAULT_PCIE_GEN_MASK 0x30007 /* gen: chipset 1/2, asic 1/2/3 */
-#define AMDGPU_DEFAULT_PCIE_MLW_MASK 0x2f0000 /* 1/2/4/8/16 lanes */
-
void amdgpu_get_pcie_info(struct amdgpu_device *adev)
{
u32 mask;
@@ -2110,20 +2178,43 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
struct amdgpu_device *adev = f->f_inode->i_private;
ssize_t result = 0;
int r;
+ bool use_bank;
+ unsigned instance_bank, sh_bank, se_bank;
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
+ if (*pos & (1ULL << 62)) {
+ se_bank = (*pos >> 24) & 0x3FF;
+ sh_bank = (*pos >> 34) & 0x3FF;
+ instance_bank = (*pos >> 44) & 0x3FF;
+ use_bank = 1;
+ *pos &= 0xFFFFFF;
+ } else {
+ use_bank = 0;
+ }
+
+ if (use_bank) {
+ if (sh_bank >= adev->gfx.config.max_sh_per_se ||
+ se_bank >= adev->gfx.config.max_shader_engines)
+ return -EINVAL;
+ mutex_lock(&adev->grbm_idx_mutex);
+ amdgpu_gfx_select_se_sh(adev, se_bank,
+ sh_bank, instance_bank);
+ }
+
while (size) {
uint32_t value;
if (*pos > adev->rmmio_size)
- return result;
+ goto end;
value = RREG32(*pos >> 2);
r = put_user(value, (uint32_t *)buf);
- if (r)
- return r;
+ if (r) {
+ result = r;
+ goto end;
+ }
result += 4;
buf += 4;
@@ -2131,6 +2222,12 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
size -= 4;
}
+end:
+ if (use_bank) {
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ }
+
return result;
}
@@ -2330,6 +2427,68 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
return result;
}
+static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = f->f_inode->i_private;
+ ssize_t result = 0;
+ int r;
+ uint32_t *config, no_regs = 0;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ config = kmalloc(256 * sizeof(*config), GFP_KERNEL);
+ if (!config)
+ return -ENOMEM;
+
+ /* version, increment each time something is added */
+ config[no_regs++] = 0;
+ config[no_regs++] = adev->gfx.config.max_shader_engines;
+ config[no_regs++] = adev->gfx.config.max_tile_pipes;
+ config[no_regs++] = adev->gfx.config.max_cu_per_sh;
+ config[no_regs++] = adev->gfx.config.max_sh_per_se;
+ config[no_regs++] = adev->gfx.config.max_backends_per_se;
+ config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
+ config[no_regs++] = adev->gfx.config.max_gprs;
+ config[no_regs++] = adev->gfx.config.max_gs_threads;
+ config[no_regs++] = adev->gfx.config.max_hw_contexts;
+ config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
+ config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
+ config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
+ config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
+ config[no_regs++] = adev->gfx.config.num_tile_pipes;
+ config[no_regs++] = adev->gfx.config.backend_enable_mask;
+ config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
+ config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
+ config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
+ config[no_regs++] = adev->gfx.config.num_gpus;
+ config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
+ config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
+ config[no_regs++] = adev->gfx.config.gb_addr_config;
+ config[no_regs++] = adev->gfx.config.num_rbs;
+
+ while (size && (*pos < no_regs * 4)) {
+ uint32_t value;
+
+ value = config[*pos >> 2];
+ r = put_user(value, (uint32_t *)buf);
+ if (r) {
+ kfree(config);
+ return r;
+ }
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ kfree(config);
+ return result;
+}
+
+
static const struct file_operations amdgpu_debugfs_regs_fops = {
.owner = THIS_MODULE,
.read = amdgpu_debugfs_regs_read,
@@ -2355,11 +2514,18 @@ static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
.llseek = default_llseek
};
+static const struct file_operations amdgpu_debugfs_gca_config_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_gca_config_read,
+ .llseek = default_llseek
+};
+
static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_regs_fops,
&amdgpu_debugfs_regs_didt_fops,
&amdgpu_debugfs_regs_pcie_fops,
&amdgpu_debugfs_regs_smc_fops,
+ &amdgpu_debugfs_gca_config_fops,
};
static const char *debugfs_regs_names[] = {
@@ -2367,6 +2533,7 @@ static const char *debugfs_regs_names[] = {
"amdgpu_regs_didt",
"amdgpu_regs_pcie",
"amdgpu_regs_smc",
+ "amdgpu_gca_config",
};
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index b0832da2e..76f960283 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -122,7 +122,7 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
usleep_range(min_udelay, 2 * min_udelay);
spin_lock_irqsave(&crtc->dev->event_lock, flags);
- };
+ }
if (!repcnt)
DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
@@ -220,19 +220,17 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
r = amdgpu_bo_pin_restricted(new_rbo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, &base);
if (unlikely(r != 0)) {
- amdgpu_bo_unreserve(new_rbo);
r = -EINVAL;
DRM_ERROR("failed to pin new rbo buffer before flip\n");
- goto cleanup;
+ goto unreserve;
}
r = reservation_object_get_fences_rcu(new_rbo->tbo.resv, &work->excl,
&work->shared_count,
&work->shared);
if (unlikely(r != 0)) {
- amdgpu_bo_unreserve(new_rbo);
DRM_ERROR("failed to get fences for buffer\n");
- goto cleanup;
+ goto unpin;
}
amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags);
@@ -240,7 +238,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
work->base = base;
- r = drm_vblank_get(crtc->dev, amdgpu_crtc->crtc_id);
+ r = drm_crtc_vblank_get(crtc);
if (r) {
DRM_ERROR("failed to get vblank before flip\n");
goto pflip_cleanup;
@@ -268,16 +266,18 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
return 0;
vblank_cleanup:
- drm_vblank_put(crtc->dev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_put(crtc);
pflip_cleanup:
if (unlikely(amdgpu_bo_reserve(new_rbo, false) != 0)) {
DRM_ERROR("failed to reserve new rbo in error path\n");
goto cleanup;
}
+unpin:
if (unlikely(amdgpu_bo_unpin(new_rbo) != 0)) {
DRM_ERROR("failed to unpin new rbo in error path\n");
}
+unreserve:
amdgpu_bo_unreserve(new_rbo);
cleanup:
@@ -516,9 +516,7 @@ static void amdgpu_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb);
- if (amdgpu_fb->obj) {
- drm_gem_object_unreference_unlocked(amdgpu_fb->obj);
- }
+ drm_gem_object_unreference_unlocked(amdgpu_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(amdgpu_fb);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index f888c015f..9aa533cf4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -52,9 +52,10 @@
* - 3.1.0 - allow reading more status registers (GRBM, SRBM, SDMA, CP)
* - 3.2.0 - GFX8: Uses EOP_TC_WB_ACTION_EN, so UMDs don't have to do the same
* at the end of IBs.
+ * - 3.3.0 - Add VM support for UVD on supported hardware.
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 2
+#define KMS_DRIVER_MINOR 3
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
@@ -82,8 +83,12 @@ int amdgpu_exp_hw_support = 0;
int amdgpu_sched_jobs = 32;
int amdgpu_sched_hw_submission = 2;
int amdgpu_powerplay = -1;
+int amdgpu_powercontainment = 1;
unsigned amdgpu_pcie_gen_cap = 0;
unsigned amdgpu_pcie_lane_cap = 0;
+unsigned amdgpu_cg_mask = 0xffffffff;
+unsigned amdgpu_pg_mask = 0xffffffff;
+char *amdgpu_disable_cu = NULL;
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -160,6 +165,9 @@ module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
#ifdef CONFIG_DRM_AMD_POWERPLAY
MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 = auto (default))");
module_param_named(powerplay, amdgpu_powerplay, int, 0444);
+
+MODULE_PARM_DESC(powercontainment, "Power Containment (1 = enable (default), 0 = disable)");
+module_param_named(powercontainment, amdgpu_powercontainment, int, 0444);
#endif
MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))");
@@ -168,6 +176,15 @@ module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444);
MODULE_PARM_DESC(pcie_lane_cap, "PCIE Lane Caps (0: autodetect (default))");
module_param_named(pcie_lane_cap, amdgpu_pcie_lane_cap, uint, 0444);
+MODULE_PARM_DESC(cg_mask, "Clockgating flags mask (0 = disable clock gating)");
+module_param_named(cg_mask, amdgpu_cg_mask, uint, 0444);
+
+MODULE_PARM_DESC(pg_mask, "Powergating flags mask (0 = disable power gating)");
+module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444);
+
+MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)");
+module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444);
+
static const struct pci_device_id pciidlist[] = {
#ifdef CONFIG_DRM_AMDGPU_CIK
/* Kaveri */
@@ -413,7 +430,10 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
pci_save_state(pdev);
pci_disable_device(pdev);
pci_ignore_hotplug(pdev);
- pci_set_power_state(pdev, PCI_D3cold);
+ if (amdgpu_is_atpx_hybrid())
+ pci_set_power_state(pdev, PCI_D3cold);
+ else if (!amdgpu_has_atpx_dgpu_power_cntl())
+ pci_set_power_state(pdev, PCI_D3hot);
drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
return 0;
@@ -430,7 +450,9 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- pci_set_power_state(pdev, PCI_D0);
+ if (amdgpu_is_atpx_hybrid() ||
+ !amdgpu_has_atpx_dgpu_power_cntl())
+ pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
ret = pci_enable_device(pdev);
if (ret)
@@ -515,7 +537,7 @@ static struct drm_driver kms_driver = {
.driver_features =
DRIVER_USE_AGP |
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
- DRIVER_PRIME | DRIVER_RENDER,
+ DRIVER_PRIME | DRIVER_RENDER | DRIVER_MODESET,
.dev_priv_size = 0,
.load = amdgpu_driver_load_kms,
.open = amdgpu_driver_open_kms,
@@ -590,7 +612,6 @@ static int __init amdgpu_init(void)
DRM_INFO("amdgpu kernel modesetting enabled.\n");
driver = &kms_driver;
pdriver = &amdgpu_kms_pci_driver;
- driver->driver_features |= DRIVER_MODESET;
driver->num_ioctls = amdgpu_max_kms_ioctl;
amdgpu_register_atpx_handler();
/* let modprobe override vga console setting */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index d1558768c..0b109aebf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -204,16 +204,25 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
if (seq != ring->fence_drv.sync_seq)
amdgpu_fence_schedule_fallback(ring);
- while (last_seq != seq) {
+ if (unlikely(seq == last_seq))
+ return;
+
+ last_seq &= drv->num_fences_mask;
+ seq &= drv->num_fences_mask;
+
+ do {
struct fence *fence, **ptr;
- ptr = &drv->fences[++last_seq & drv->num_fences_mask];
+ ++last_seq;
+ last_seq &= drv->num_fences_mask;
+ ptr = &drv->fences[last_seq];
/* There is always exactly one thread signaling this fence slot */
fence = rcu_dereference_protected(*ptr, 1);
RCU_INIT_POINTER(*ptr, NULL);
- BUG_ON(!fence);
+ if (!fence)
+ continue;
r = fence_signal(fence);
if (!r)
@@ -222,7 +231,7 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
BUG();
fence_put(fence);
- }
+ } while (last_seq != seq);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 8fab64860..88fbed238 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -503,7 +503,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
if (r)
goto error_print;
- amdgpu_vm_get_pt_bos(bo_va->vm, &duplicates);
+ amdgpu_vm_get_pt_bos(adev, bo_va->vm, &duplicates);
list_for_each_entry(entry, &list, head) {
domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
/* if anything is swapped out don't swap it in here,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 9f95da4f0..a074edd95 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -70,3 +70,47 @@ void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg)
}
}
}
+
+/**
+ * amdgpu_gfx_parse_disable_cu - Parse the disable_cu module parameter
+ *
+ * @mask: array in which the per-shader array disable masks will be stored
+ * @max_se: number of SEs
+ * @max_sh: number of SHs
+ *
+ * The bitmask of CUs to be disabled in the shader array determined by se and
+ * sh is stored in mask[se * max_sh + sh].
+ */
+void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh)
+{
+ unsigned se, sh, cu;
+ const char *p;
+
+ memset(mask, 0, sizeof(*mask) * max_se * max_sh);
+
+ if (!amdgpu_disable_cu || !*amdgpu_disable_cu)
+ return;
+
+ p = amdgpu_disable_cu;
+ for (;;) {
+ char *next;
+ int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu);
+ if (ret < 3) {
+ DRM_ERROR("amdgpu: could not parse disable_cu\n");
+ return;
+ }
+
+ if (se < max_se && sh < max_sh && cu < 16) {
+ DRM_INFO("amdgpu: disabling CU %u.%u.%u\n", se, sh, cu);
+ mask[se * max_sh + sh] |= 1u << cu;
+ } else {
+ DRM_ERROR("amdgpu: disable_cu %u.%u.%u is out of range\n",
+ se, sh, cu);
+ }
+
+ next = strchr(p, ',');
+ if (!next)
+ break;
+ p = next + 1;
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index dc06cbda7..51321e154 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -27,4 +27,6 @@
int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg);
void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg);
+unsigned amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 194cfc1a8..ec1282af2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -33,6 +33,8 @@
#include "amdgpu.h"
#include "atom.h"
+#define AMDGPU_IB_TEST_TIMEOUT msecs_to_jiffies(1000)
+
/*
* IB
* IBs (Indirect Buffers) and areas of GPU accessible memory where
@@ -122,7 +124,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
bool skip_preamble, need_ctx_switch;
unsigned patch_offset = ~0;
struct amdgpu_vm *vm;
- struct fence *hwf;
uint64_t ctx;
unsigned i;
@@ -160,10 +161,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
patch_offset = amdgpu_ring_init_cond_exec(ring);
if (vm) {
- r = amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr,
- job->gds_base, job->gds_size,
- job->gws_base, job->gws_size,
- job->oa_base, job->oa_size);
+ r = amdgpu_vm_flush(ring, job);
if (r) {
amdgpu_ring_undo(ring);
return r;
@@ -193,7 +191,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if (ring->funcs->emit_hdp_invalidate)
amdgpu_ring_emit_hdp_invalidate(ring);
- r = amdgpu_fence_emit(ring, &hwf);
+ r = amdgpu_fence_emit(ring, f);
if (r) {
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
if (job && job->vm_id)
@@ -203,17 +201,11 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
}
/* wrap the last IB with fence */
- if (job && job->uf_bo) {
- uint64_t addr = amdgpu_bo_gpu_offset(job->uf_bo);
-
- addr += job->uf_offset;
- amdgpu_ring_emit_fence(ring, addr, job->uf_sequence,
+ if (job && job->uf_addr) {
+ amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
AMDGPU_FENCE_FLAG_64BIT);
}
- if (f)
- *f = fence_get(hwf);
-
if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
amdgpu_ring_patch_cond_exec(ring, patch_offset);
@@ -296,7 +288,7 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
if (!ring || !ring->ready)
continue;
- r = amdgpu_ring_test_ib(ring);
+ r = amdgpu_ring_test_ib(ring, AMDGPU_IB_TEST_TIMEOUT);
if (r) {
ring->ready = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 835a3fa8d..278708f5a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -383,6 +383,18 @@ int amdgpu_irq_update(struct amdgpu_device *adev,
return r;
}
+void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
+{
+ int i, j;
+ for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; i++) {
+ struct amdgpu_irq_src *src = adev->irq.sources[i];
+ if (!src)
+ continue;
+ for (j = 0; j < src->num_types; j++)
+ amdgpu_irq_update(adev, src, j);
+ }
+}
+
/**
* amdgpu_irq_get - enable interrupt
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
index e124b59f3..7ef09352e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
@@ -94,6 +94,7 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned type);
bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned type);
+void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev);
int amdgpu_irq_add_domain(struct amdgpu_device *adev);
void amdgpu_irq_remove_domain(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index f0dafa514..6674d40eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -28,21 +28,15 @@
#include "amdgpu.h"
#include "amdgpu_trace.h"
-static void amdgpu_job_free_handler(struct work_struct *ws)
+static void amdgpu_job_timedout(struct amd_sched_job *s_job)
{
- struct amdgpu_job *job = container_of(ws, struct amdgpu_job, base.work_free_job);
- amd_sched_job_put(&job->base);
-}
+ struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
-void amdgpu_job_timeout_func(struct work_struct *work)
-{
- struct amdgpu_job *job = container_of(work, struct amdgpu_job, base.work_tdr.work);
DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n",
- job->base.sched->name,
- (uint32_t)atomic_read(&job->ring->fence_drv.last_seq),
- job->ring->fence_drv.sync_seq);
-
- amd_sched_job_put(&job->base);
+ job->base.sched->name,
+ atomic_read(&job->ring->fence_drv.last_seq),
+ job->ring->fence_drv.sync_seq);
+ amdgpu_gpu_reset(job->adev);
}
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
@@ -63,7 +57,6 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
(*job)->vm = vm;
(*job)->ibs = (void *)&(*job)[1];
(*job)->num_ibs = num_ibs;
- INIT_WORK(&(*job)->base.work_free_job, amdgpu_job_free_handler);
amdgpu_sync_create(&(*job)->sync);
@@ -86,27 +79,33 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
return r;
}
-void amdgpu_job_free(struct amdgpu_job *job)
+void amdgpu_job_free_resources(struct amdgpu_job *job)
{
- unsigned i;
struct fence *f;
+ unsigned i;
+
/* use sched fence if available */
- f = (job->base.s_fence)? &job->base.s_fence->base : job->fence;
+ f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
for (i = 0; i < job->num_ibs; ++i)
- amdgpu_sa_bo_free(job->adev, &job->ibs[i].sa_bo, f);
- fence_put(job->fence);
+ amdgpu_ib_free(job->adev, &job->ibs[i], f);
+}
- amdgpu_bo_unref(&job->uf_bo);
- amdgpu_sync_free(&job->sync);
+void amdgpu_job_free_cb(struct amd_sched_job *s_job)
+{
+ struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
- if (!job->base.use_sched)
- kfree(job);
+ fence_put(job->fence);
+ amdgpu_sync_free(&job->sync);
+ kfree(job);
}
-void amdgpu_job_free_func(struct kref *refcount)
+void amdgpu_job_free(struct amdgpu_job *job)
{
- struct amdgpu_job *job = container_of(refcount, struct amdgpu_job, base.refcount);
+ amdgpu_job_free_resources(job);
+
+ fence_put(job->fence);
+ amdgpu_sync_free(&job->sync);
kfree(job);
}
@@ -114,22 +113,20 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
struct amd_sched_entity *entity, void *owner,
struct fence **f)
{
- struct fence *fence;
int r;
job->ring = ring;
if (!f)
return -EINVAL;
- r = amd_sched_job_init(&job->base, &ring->sched,
- entity, amdgpu_job_timeout_func,
- amdgpu_job_free_func, owner, &fence);
+ r = amd_sched_job_init(&job->base, &ring->sched, entity, owner);
if (r)
return r;
job->owner = owner;
job->ctx = entity->fence_context;
- *f = fence_get(fence);
+ *f = fence_get(&job->base.s_fence->finished);
+ amdgpu_job_free_resources(job);
amd_sched_entity_push_job(&job->base);
return 0;
@@ -147,8 +144,8 @@ static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
int r;
r = amdgpu_vm_grab_id(vm, ring, &job->sync,
- &job->base.s_fence->base,
- &job->vm_id, &job->vm_pd_addr);
+ &job->base.s_fence->finished,
+ job);
if (r)
DRM_ERROR("Error getting VM ID (%d)\n", r);
@@ -170,29 +167,24 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
}
job = to_amdgpu_job(sched_job);
- r = amdgpu_sync_wait(&job->sync);
- if (r) {
- DRM_ERROR("failed to sync wait (%d)\n", r);
- return NULL;
- }
+ BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
trace_amdgpu_sched_run_job(job);
r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs,
job->sync.last_vm_update, job, &fence);
- if (r) {
+ if (r)
DRM_ERROR("Error scheduling IBs (%d)\n", r);
- goto err;
- }
-err:
- job->fence = fence;
- amdgpu_job_free(job);
+ /* if gpu reset, hw fence will be replaced here */
+ fence_put(job->fence);
+ job->fence = fence_get(fence);
+ amdgpu_job_free_resources(job);
return fence;
}
const struct amd_sched_backend_ops amdgpu_sched_ops = {
.dependency = amdgpu_job_dependency,
.run_job = amdgpu_job_run,
- .begin_job = amd_sched_job_begin,
- .finish_job = amd_sched_job_finish,
+ .timedout_job = amdgpu_job_timedout,
+ .free_job = amdgpu_job_free_cb
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index d851ea150..d942654a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -60,7 +60,10 @@ int amdgpu_driver_unload_kms(struct drm_device *dev)
if (adev->rmmio == NULL)
goto done_free;
- pm_runtime_get_sync(dev->dev);
+ if (amdgpu_device_is_px(dev)) {
+ pm_runtime_get_sync(dev->dev);
+ pm_runtime_forbid(dev->dev);
+ }
amdgpu_amdkfd_device_fini(adev);
@@ -135,13 +138,75 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
}
out:
- if (r)
+ if (r) {
+ /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
+ if (adev->rmmio && amdgpu_device_is_px(dev))
+ pm_runtime_put_noidle(dev->dev);
amdgpu_driver_unload_kms(dev);
-
+ }
return r;
}
+static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
+ struct drm_amdgpu_query_fw *query_fw,
+ struct amdgpu_device *adev)
+{
+ switch (query_fw->fw_type) {
+ case AMDGPU_INFO_FW_VCE:
+ fw_info->ver = adev->vce.fw_version;
+ fw_info->feature = adev->vce.fb_version;
+ break;
+ case AMDGPU_INFO_FW_UVD:
+ fw_info->ver = adev->uvd.fw_version;
+ fw_info->feature = 0;
+ break;
+ case AMDGPU_INFO_FW_GMC:
+ fw_info->ver = adev->mc.fw_version;
+ fw_info->feature = 0;
+ break;
+ case AMDGPU_INFO_FW_GFX_ME:
+ fw_info->ver = adev->gfx.me_fw_version;
+ fw_info->feature = adev->gfx.me_feature_version;
+ break;
+ case AMDGPU_INFO_FW_GFX_PFP:
+ fw_info->ver = adev->gfx.pfp_fw_version;
+ fw_info->feature = adev->gfx.pfp_feature_version;
+ break;
+ case AMDGPU_INFO_FW_GFX_CE:
+ fw_info->ver = adev->gfx.ce_fw_version;
+ fw_info->feature = adev->gfx.ce_feature_version;
+ break;
+ case AMDGPU_INFO_FW_GFX_RLC:
+ fw_info->ver = adev->gfx.rlc_fw_version;
+ fw_info->feature = adev->gfx.rlc_feature_version;
+ break;
+ case AMDGPU_INFO_FW_GFX_MEC:
+ if (query_fw->index == 0) {
+ fw_info->ver = adev->gfx.mec_fw_version;
+ fw_info->feature = adev->gfx.mec_feature_version;
+ } else if (query_fw->index == 1) {
+ fw_info->ver = adev->gfx.mec2_fw_version;
+ fw_info->feature = adev->gfx.mec2_feature_version;
+ } else
+ return -EINVAL;
+ break;
+ case AMDGPU_INFO_FW_SMC:
+ fw_info->ver = adev->pm.fw_version;
+ fw_info->feature = 0;
+ break;
+ case AMDGPU_INFO_FW_SDMA:
+ if (query_fw->index >= adev->sdma.num_instances)
+ return -EINVAL;
+ fw_info->ver = adev->sdma.instance[query_fw->index].fw_version;
+ fw_info->feature = adev->sdma.instance[query_fw->index].feature_version;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
/*
* Userspace get information ioctl
*/
@@ -288,67 +353,20 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0;
}
case AMDGPU_INFO_TIMESTAMP:
- ui64 = amdgpu_asic_get_gpu_clock_counter(adev);
+ ui64 = amdgpu_gfx_get_gpu_clock_counter(adev);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_FW_VERSION: {
struct drm_amdgpu_info_firmware fw_info;
+ int ret;
/* We only support one instance of each IP block right now. */
if (info->query_fw.ip_instance != 0)
return -EINVAL;
- switch (info->query_fw.fw_type) {
- case AMDGPU_INFO_FW_VCE:
- fw_info.ver = adev->vce.fw_version;
- fw_info.feature = adev->vce.fb_version;
- break;
- case AMDGPU_INFO_FW_UVD:
- fw_info.ver = adev->uvd.fw_version;
- fw_info.feature = 0;
- break;
- case AMDGPU_INFO_FW_GMC:
- fw_info.ver = adev->mc.fw_version;
- fw_info.feature = 0;
- break;
- case AMDGPU_INFO_FW_GFX_ME:
- fw_info.ver = adev->gfx.me_fw_version;
- fw_info.feature = adev->gfx.me_feature_version;
- break;
- case AMDGPU_INFO_FW_GFX_PFP:
- fw_info.ver = adev->gfx.pfp_fw_version;
- fw_info.feature = adev->gfx.pfp_feature_version;
- break;
- case AMDGPU_INFO_FW_GFX_CE:
- fw_info.ver = adev->gfx.ce_fw_version;
- fw_info.feature = adev->gfx.ce_feature_version;
- break;
- case AMDGPU_INFO_FW_GFX_RLC:
- fw_info.ver = adev->gfx.rlc_fw_version;
- fw_info.feature = adev->gfx.rlc_feature_version;
- break;
- case AMDGPU_INFO_FW_GFX_MEC:
- if (info->query_fw.index == 0) {
- fw_info.ver = adev->gfx.mec_fw_version;
- fw_info.feature = adev->gfx.mec_feature_version;
- } else if (info->query_fw.index == 1) {
- fw_info.ver = adev->gfx.mec2_fw_version;
- fw_info.feature = adev->gfx.mec2_feature_version;
- } else
- return -EINVAL;
- break;
- case AMDGPU_INFO_FW_SMC:
- fw_info.ver = adev->pm.fw_version;
- fw_info.feature = 0;
- break;
- case AMDGPU_INFO_FW_SDMA:
- if (info->query_fw.index >= adev->sdma.num_instances)
- return -EINVAL;
- fw_info.ver = adev->sdma.instance[info->query_fw.index].fw_version;
- fw_info.feature = adev->sdma.instance[info->query_fw.index].feature_version;
- break;
- default:
- return -EINVAL;
- }
+ ret = amdgpu_firmware_info(&fw_info, &info->query_fw, adev);
+ if (ret)
+ return ret;
+
return copy_to_user(out, &fw_info,
min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0;
}
@@ -566,6 +584,9 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
+ amdgpu_uvd_free_handles(adev, file_priv);
+ amdgpu_vce_free_handles(adev, file_priv);
+
amdgpu_vm_fini(adev, &fpriv->vm);
idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
@@ -590,10 +611,6 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
void amdgpu_driver_preclose_kms(struct drm_device *dev,
struct drm_file *file_priv)
{
- struct amdgpu_device *adev = dev->dev_private;
-
- amdgpu_uvd_free_handles(adev, file_priv);
- amdgpu_vce_free_handles(adev, file_priv);
}
/*
@@ -756,3 +773,130 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
};
const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+
+static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct drm_amdgpu_info_firmware fw_info;
+ struct drm_amdgpu_query_fw query_fw;
+ int ret, i;
+
+ /* VCE */
+ query_fw.fw_type = AMDGPU_INFO_FW_VCE;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "VCE feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* UVD */
+ query_fw.fw_type = AMDGPU_INFO_FW_UVD;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "UVD feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* GMC */
+ query_fw.fw_type = AMDGPU_INFO_FW_GMC;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "MC feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* ME */
+ query_fw.fw_type = AMDGPU_INFO_FW_GFX_ME;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "ME feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* PFP */
+ query_fw.fw_type = AMDGPU_INFO_FW_GFX_PFP;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "PFP feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* CE */
+ query_fw.fw_type = AMDGPU_INFO_FW_GFX_CE;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "CE feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* RLC */
+ query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* MEC */
+ query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC;
+ query_fw.index = 0;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "MEC feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* MEC2 */
+ if (adev->asic_type == CHIP_KAVERI ||
+ (adev->asic_type > CHIP_TOPAZ && adev->asic_type != CHIP_STONEY)) {
+ query_fw.index = 1;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "MEC2 feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+ }
+
+ /* SMC */
+ query_fw.fw_type = AMDGPU_INFO_FW_SMC;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* SDMA */
+ query_fw.fw_type = AMDGPU_INFO_FW_SDMA;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ query_fw.index = i;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "SDMA%d feature version: %u, firmware version: 0x%08x\n",
+ i, fw_info.feature, fw_info.ver);
+ }
+
+ return 0;
+}
+
+static const struct drm_info_list amdgpu_firmware_info_list[] = {
+ {"amdgpu_firmware_info", amdgpu_debugfs_firmware_info, 0, NULL},
+};
+#endif
+
+int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ return amdgpu_debugfs_add_files(adev, amdgpu_firmware_info_list,
+ ARRAY_SIZE(amdgpu_firmware_info_list));
+#else
+ return 0;
+#endif
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 7ecea83ce..6f0873c75 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -589,6 +589,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem)
{
struct amdgpu_bo *rbo;
+ struct ttm_mem_reg *old_mem = &bo->mem;
if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
return;
@@ -602,6 +603,8 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
/* move_notify is called before move happens */
amdgpu_update_memory_usage(rbo->adev, &bo->mem, new_mem);
+
+ trace_amdgpu_ttm_bo_move(rbo, new_mem->mem_type, old_mem->mem_type);
}
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 0e13d80d2..5cc7052e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -305,7 +305,7 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
char *table = NULL;
- int size, i;
+ int size;
if (adev->pp_enabled)
size = amdgpu_dpm_get_pp_table(adev, &table);
@@ -315,10 +315,7 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
if (size >= PAGE_SIZE)
size = PAGE_SIZE - 1;
- for (i = 0; i < size; i++) {
- sprintf(buf + i, "%02x", table[i]);
- }
- sprintf(buf + i, "\n");
+ memcpy(buf, table, size);
return size;
}
@@ -347,6 +344,8 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
if (adev->pp_enabled)
size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
+ else if (adev->pm.funcs->print_clock_levels)
+ size = adev->pm.funcs->print_clock_levels(adev, PP_SCLK, buf);
return size;
}
@@ -363,7 +362,9 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
uint32_t i, mask = 0;
char sub_str[2];
- for (i = 0; i < strlen(buf) - 1; i++) {
+ for (i = 0; i < strlen(buf); i++) {
+ if (*(buf + i) == '\n')
+ continue;
sub_str[0] = *(buf + i);
sub_str[1] = '\0';
ret = kstrtol(sub_str, 0, &level);
@@ -377,6 +378,8 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
if (adev->pp_enabled)
amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
+ else if (adev->pm.funcs->force_clock_level)
+ adev->pm.funcs->force_clock_level(adev, PP_SCLK, mask);
fail:
return count;
}
@@ -391,6 +394,8 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
if (adev->pp_enabled)
size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
+ else if (adev->pm.funcs->print_clock_levels)
+ size = adev->pm.funcs->print_clock_levels(adev, PP_MCLK, buf);
return size;
}
@@ -407,7 +412,9 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
uint32_t i, mask = 0;
char sub_str[2];
- for (i = 0; i < strlen(buf) - 1; i++) {
+ for (i = 0; i < strlen(buf); i++) {
+ if (*(buf + i) == '\n')
+ continue;
sub_str[0] = *(buf + i);
sub_str[1] = '\0';
ret = kstrtol(sub_str, 0, &level);
@@ -421,6 +428,8 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
if (adev->pp_enabled)
amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
+ else if (adev->pm.funcs->force_clock_level)
+ adev->pm.funcs->force_clock_level(adev, PP_MCLK, mask);
fail:
return count;
}
@@ -435,6 +444,8 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
if (adev->pp_enabled)
size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
+ else if (adev->pm.funcs->print_clock_levels)
+ size = adev->pm.funcs->print_clock_levels(adev, PP_PCIE, buf);
return size;
}
@@ -451,7 +462,9 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
uint32_t i, mask = 0;
char sub_str[2];
- for (i = 0; i < strlen(buf) - 1; i++) {
+ for (i = 0; i < strlen(buf); i++) {
+ if (*(buf + i) == '\n')
+ continue;
sub_str[0] = *(buf + i);
sub_str[1] = '\0';
ret = kstrtol(sub_str, 0, &level);
@@ -465,6 +478,100 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
if (adev->pp_enabled)
amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
+ else if (adev->pm.funcs->force_clock_level)
+ adev->pm.funcs->force_clock_level(adev, PP_PCIE, mask);
+fail:
+ return count;
+}
+
+static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ uint32_t value = 0;
+
+ if (adev->pp_enabled)
+ value = amdgpu_dpm_get_sclk_od(adev);
+ else if (adev->pm.funcs->get_sclk_od)
+ value = adev->pm.funcs->get_sclk_od(adev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", value);
+}
+
+static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ int ret;
+ long int value;
+
+ ret = kstrtol(buf, 0, &value);
+
+ if (ret) {
+ count = -EINVAL;
+ goto fail;
+ }
+
+ if (adev->pp_enabled) {
+ amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
+ amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL);
+ } else if (adev->pm.funcs->set_sclk_od) {
+ adev->pm.funcs->set_sclk_od(adev, (uint32_t)value);
+ adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
+ amdgpu_pm_compute_clocks(adev);
+ }
+
+fail:
+ return count;
+}
+
+static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ uint32_t value = 0;
+
+ if (adev->pp_enabled)
+ value = amdgpu_dpm_get_mclk_od(adev);
+ else if (adev->pm.funcs->get_mclk_od)
+ value = adev->pm.funcs->get_mclk_od(adev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", value);
+}
+
+static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ int ret;
+ long int value;
+
+ ret = kstrtol(buf, 0, &value);
+
+ if (ret) {
+ count = -EINVAL;
+ goto fail;
+ }
+
+ if (adev->pp_enabled) {
+ amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
+ amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL);
+ } else if (adev->pm.funcs->set_mclk_od) {
+ adev->pm.funcs->set_mclk_od(adev, (uint32_t)value);
+ adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
+ amdgpu_pm_compute_clocks(adev);
+ }
+
fail:
return count;
}
@@ -490,6 +597,12 @@ static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR,
static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR,
amdgpu_get_pp_dpm_pcie,
amdgpu_set_pp_dpm_pcie);
+static DEVICE_ATTR(pp_sclk_od, S_IRUGO | S_IWUSR,
+ amdgpu_get_pp_sclk_od,
+ amdgpu_set_pp_sclk_od);
+static DEVICE_ATTR(pp_mclk_od, S_IRUGO | S_IWUSR,
+ amdgpu_get_pp_mclk_od,
+ amdgpu_set_pp_mclk_od);
static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
struct device_attribute *attr,
@@ -1108,22 +1221,34 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
DRM_ERROR("failed to create device file pp_table\n");
return ret;
}
- ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
- if (ret) {
- DRM_ERROR("failed to create device file pp_dpm_sclk\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
- if (ret) {
- DRM_ERROR("failed to create device file pp_dpm_mclk\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie);
- if (ret) {
- DRM_ERROR("failed to create device file pp_dpm_pcie\n");
- return ret;
- }
}
+
+ ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_dpm_sclk\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_dpm_mclk\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_dpm_pcie\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_pp_sclk_od);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_sclk_od\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_pp_mclk_od);
+ if (ret) {
+ DRM_ERROR("failed to create device file pp_mclk_od\n");
+ return ret;
+ }
+
ret = amdgpu_debugfs_pm_init(adev);
if (ret) {
DRM_ERROR("Failed to register debugfs file for dpm!\n");
@@ -1146,10 +1271,12 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
device_remove_file(adev->dev, &dev_attr_pp_cur_state);
device_remove_file(adev->dev, &dev_attr_pp_force_state);
device_remove_file(adev->dev, &dev_attr_pp_table);
- device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
- device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
- device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
}
+ device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
+ device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
+ device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
+ device_remove_file(adev->dev, &dev_attr_pp_sclk_od);
+ device_remove_file(adev->dev, &dev_attr_pp_mclk_od);
}
void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index 82256558e..c5738a22b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -52,6 +52,7 @@ static int amdgpu_powerplay_init(struct amdgpu_device *adev)
pp_init->chip_family = adev->family;
pp_init->chip_id = adev->asic_type;
pp_init->device = amdgpu_cgs_create_device(adev);
+ pp_init->powercontainment_enabled = amdgpu_powercontainment;
ret = amd_powerplay_init(pp_init, amd_pp);
kfree(pp_init);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 870f94942..85aeb0a80 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -28,6 +28,7 @@
*/
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/debugfs.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
@@ -48,6 +49,7 @@
*/
static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
+static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring);
/**
* amdgpu_ring_alloc - allocate space on the ring buffer
@@ -73,6 +75,10 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw)
ring->count_dw = ndw;
ring->wptr_old = ring->wptr;
+
+ if (ring->funcs->begin_use)
+ ring->funcs->begin_use(ring);
+
return 0;
}
@@ -125,6 +131,9 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring)
mb();
amdgpu_ring_set_wptr(ring);
+
+ if (ring->funcs->end_use)
+ ring->funcs->end_use(ring);
}
/**
@@ -137,78 +146,9 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring)
void amdgpu_ring_undo(struct amdgpu_ring *ring)
{
ring->wptr = ring->wptr_old;
-}
-
-/**
- * amdgpu_ring_backup - Back up the content of a ring
- *
- * @ring: the ring we want to back up
- *
- * Saves all unprocessed commits from a ring, returns the number of dwords saved.
- */
-unsigned amdgpu_ring_backup(struct amdgpu_ring *ring,
- uint32_t **data)
-{
- unsigned size, ptr, i;
-
- *data = NULL;
-
- if (ring->ring_obj == NULL)
- return 0;
-
- /* it doesn't make sense to save anything if all fences are signaled */
- if (!amdgpu_fence_count_emitted(ring))
- return 0;
-
- ptr = le32_to_cpu(*ring->next_rptr_cpu_addr);
-
- size = ring->wptr + (ring->ring_size / 4);
- size -= ptr;
- size &= ring->ptr_mask;
- if (size == 0)
- return 0;
-
- /* and then save the content of the ring */
- *data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
- if (!*data)
- return 0;
- for (i = 0; i < size; ++i) {
- (*data)[i] = ring->ring[ptr++];
- ptr &= ring->ptr_mask;
- }
-
- return size;
-}
-
-/**
- * amdgpu_ring_restore - append saved commands to the ring again
- *
- * @ring: ring to append commands to
- * @size: number of dwords we want to write
- * @data: saved commands
- *
- * Allocates space on the ring and restore the previously saved commands.
- */
-int amdgpu_ring_restore(struct amdgpu_ring *ring,
- unsigned size, uint32_t *data)
-{
- int i, r;
-
- if (!size || !data)
- return 0;
-
- /* restore the saved ring content */
- r = amdgpu_ring_alloc(ring, size);
- if (r)
- return r;
-
- for (i = 0; i < size; ++i) {
- amdgpu_ring_write(ring, data[i]);
- }
- amdgpu_ring_commit(ring);
- kfree(data);
- return 0;
+ if (ring->funcs->end_use)
+ ring->funcs->end_use(ring);
}
/**
@@ -260,14 +200,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
return r;
}
- r = amdgpu_wb_get(adev, &ring->next_rptr_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring next_rptr wb alloc failed\n", r);
- return r;
- }
- ring->next_rptr_gpu_addr = adev->wb.gpu_addr + ring->next_rptr_offs * 4;
- ring->next_rptr_cpu_addr = &adev->wb.wb[ring->next_rptr_offs];
-
r = amdgpu_wb_get(adev, &ring->cond_exe_offs);
if (r) {
dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
@@ -276,7 +208,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
ring->cond_exe_gpu_addr = adev->wb.gpu_addr + (ring->cond_exe_offs * 4);
ring->cond_exe_cpu_addr = &adev->wb.wb[ring->cond_exe_offs];
- spin_lock_init(&ring->fence_lock);
r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
if (r) {
dev_err(adev->dev, "failed initializing fences (%d).\n", r);
@@ -310,6 +241,9 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
}
r = amdgpu_bo_kmap(ring->ring_obj,
(void **)&ring->ring);
+
+ memset((void *)ring->ring, 0, ring->ring_size);
+
amdgpu_bo_unreserve(ring->ring_obj);
if (r) {
dev_err(adev->dev, "(%d) ring map failed\n", r);
@@ -347,7 +281,6 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
amdgpu_wb_free(ring->adev, ring->fence_offs);
amdgpu_wb_free(ring->adev, ring->rptr_offs);
amdgpu_wb_free(ring->adev, ring->wptr_offs);
- amdgpu_wb_free(ring->adev, ring->next_rptr_offs);
if (ring_obj) {
r = amdgpu_bo_reserve(ring_obj, false);
@@ -358,6 +291,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
}
amdgpu_bo_unref(&ring_obj);
}
+ amdgpu_debugfs_ring_fini(ring);
}
/*
@@ -365,57 +299,62 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
*/
#if defined(CONFIG_DEBUG_FS)
-static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data)
+/* Layout of file is 12 bytes consisting of
+ * - rptr
+ * - wptr
+ * - driver's copy of wptr
+ *
+ * followed by n-words of ring data
+ */
+static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = dev->dev_private;
- int roffset = (unsigned long)node->info_ent->data;
- struct amdgpu_ring *ring = (void *)(((uint8_t*)adev) + roffset);
- uint32_t rptr, wptr, rptr_next;
- unsigned i;
-
- wptr = amdgpu_ring_get_wptr(ring);
- seq_printf(m, "wptr: 0x%08x [%5d]\n", wptr, wptr);
-
- rptr = amdgpu_ring_get_rptr(ring);
- rptr_next = le32_to_cpu(*ring->next_rptr_cpu_addr);
-
- seq_printf(m, "rptr: 0x%08x [%5d]\n", rptr, rptr);
-
- seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n",
- ring->wptr, ring->wptr);
-
- if (!ring->ready)
- return 0;
-
- /* print 8 dw before current rptr as often it's the last executed
- * packet that is the root issue
- */
- i = (rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
- while (i != rptr) {
- seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]);
- if (i == rptr)
- seq_puts(m, " *");
- if (i == rptr_next)
- seq_puts(m, " #");
- seq_puts(m, "\n");
- i = (i + 1) & ring->ptr_mask;
+ struct amdgpu_ring *ring = (struct amdgpu_ring*)f->f_inode->i_private;
+ int r, i;
+ uint32_t value, result, early[3];
+
+ if (*pos & 3 || size & 3)
+ return -EINVAL;
+
+ result = 0;
+
+ if (*pos < 12) {
+ early[0] = amdgpu_ring_get_rptr(ring);
+ early[1] = amdgpu_ring_get_wptr(ring);
+ early[2] = ring->wptr;
+ for (i = *pos / 4; i < 3 && size; i++) {
+ r = put_user(early[i], (uint32_t *)buf);
+ if (r)
+ return r;
+ buf += 4;
+ result += 4;
+ size -= 4;
+ *pos += 4;
+ }
}
- while (i != wptr) {
- seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]);
- if (i == rptr)
- seq_puts(m, " *");
- if (i == rptr_next)
- seq_puts(m, " #");
- seq_puts(m, "\n");
- i = (i + 1) & ring->ptr_mask;
+
+ while (size) {
+ if (*pos >= (ring->ring_size + 12))
+ return result;
+
+ value = ring->ring[(*pos - 12)/4];
+ r = put_user(value, (uint32_t*)buf);
+ if (r)
+ return r;
+ buf += 4;
+ result += 4;
+ size -= 4;
+ *pos += 4;
}
- return 0;
+
+ return result;
}
-static struct drm_info_list amdgpu_debugfs_ring_info_list[AMDGPU_MAX_RINGS];
-static char amdgpu_debugfs_ring_names[AMDGPU_MAX_RINGS][32];
+static const struct file_operations amdgpu_debugfs_ring_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_ring_read,
+ .llseek = default_llseek
+};
#endif
@@ -423,28 +362,27 @@ static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
#if defined(CONFIG_DEBUG_FS)
- unsigned offset = (uint8_t*)ring - (uint8_t*)adev;
- unsigned i;
- struct drm_info_list *info;
- char *name;
-
- for (i = 0; i < ARRAY_SIZE(amdgpu_debugfs_ring_info_list); ++i) {
- info = &amdgpu_debugfs_ring_info_list[i];
- if (!info->data)
- break;
- }
+ struct drm_minor *minor = adev->ddev->primary;
+ struct dentry *ent, *root = minor->debugfs_root;
+ char name[32];
- if (i == ARRAY_SIZE(amdgpu_debugfs_ring_info_list))
- return -ENOSPC;
-
- name = &amdgpu_debugfs_ring_names[i][0];
sprintf(name, "amdgpu_ring_%s", ring->name);
- info->name = name;
- info->show = amdgpu_debugfs_ring_info;
- info->driver_features = 0;
- info->data = (void*)(uintptr_t)offset;
- return amdgpu_debugfs_add_files(adev, info, 1);
+ ent = debugfs_create_file(name,
+ S_IFREG | S_IRUGO, root,
+ ring, &amdgpu_debugfs_ring_fops);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+
+ i_size_write(ent->d_inode, ring->ring_size + 12);
+ ring->ent = ent;
#endif
return 0;
}
+
+static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring)
+{
+#if defined(CONFIG_DEBUG_FS)
+ debugfs_remove(ring->ent);
+#endif
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 48618ee32..d8af37a84 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -428,7 +428,7 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
soffset, eoffset, eoffset - soffset);
if (i->fence)
- seq_printf(m, " protected by 0x%08x on context %d",
+ seq_printf(m, " protected by 0x%08x on context %llu",
i->fence->seqno, i->fence->context);
seq_printf(m, "\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 34a92808b..5c8d3022f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -223,13 +223,16 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
}
/**
- * amdgpu_sync_is_idle - test if all fences are signaled
+ * amdgpu_sync_peek_fence - get the next fence not signaled yet
*
* @sync: the sync object
+ * @ring: optional ring to use for test
*
- * Returns true if all fences in the sync object are signaled.
+ * Returns the next fence not signaled yet without removing it from the sync
+ * object.
*/
-bool amdgpu_sync_is_idle(struct amdgpu_sync *sync)
+struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
+ struct amdgpu_ring *ring)
{
struct amdgpu_sync_entry *e;
struct hlist_node *tmp;
@@ -237,6 +240,19 @@ bool amdgpu_sync_is_idle(struct amdgpu_sync *sync)
hash_for_each_safe(sync->fences, i, tmp, e, node) {
struct fence *f = e->fence;
+ struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+
+ if (ring && s_fence) {
+ /* For fences from the same ring it is sufficient
+ * when they are scheduled.
+ */
+ if (s_fence->sched == &ring->sched) {
+ if (fence_is_signaled(&s_fence->scheduled))
+ continue;
+
+ return &s_fence->scheduled;
+ }
+ }
if (fence_is_signaled(f)) {
hash_del(&e->node);
@@ -245,58 +261,19 @@ bool amdgpu_sync_is_idle(struct amdgpu_sync *sync)
continue;
}
- return false;
+ return f;
}
- return true;
+ return NULL;
}
/**
- * amdgpu_sync_cycle_fences - move fences from one sync object into another
+ * amdgpu_sync_get_fence - get the next fence from the sync object
*
- * @dst: the destination sync object
- * @src: the source sync object
- * @fence: fence to add to source
+ * @sync: sync object to use
*
- * Remove all fences from source and put them into destination and add
- * fence as new one into source.
+ * Get and removes the next fence from the sync object not signaled yet.
*/
-int amdgpu_sync_cycle_fences(struct amdgpu_sync *dst, struct amdgpu_sync *src,
- struct fence *fence)
-{
- struct amdgpu_sync_entry *e, *newone;
- struct hlist_node *tmp;
- int i;
-
- /* Allocate the new entry before moving the old ones */
- newone = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
- if (!newone)
- return -ENOMEM;
-
- hash_for_each_safe(src->fences, i, tmp, e, node) {
- struct fence *f = e->fence;
-
- hash_del(&e->node);
- if (fence_is_signaled(f)) {
- fence_put(f);
- kmem_cache_free(amdgpu_sync_slab, e);
- continue;
- }
-
- if (amdgpu_sync_add_later(dst, f)) {
- kmem_cache_free(amdgpu_sync_slab, e);
- continue;
- }
-
- hash_add(dst->fences, &e->node, f->context);
- }
-
- hash_add(src->fences, &newone->node, fence->context);
- newone->fence = fence_get(fence);
-
- return 0;
-}
-
struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
{
struct amdgpu_sync_entry *e;
@@ -319,25 +296,6 @@ struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
return NULL;
}
-int amdgpu_sync_wait(struct amdgpu_sync *sync)
-{
- struct amdgpu_sync_entry *e;
- struct hlist_node *tmp;
- int i, r;
-
- hash_for_each_safe(sync->fences, i, tmp, e, node) {
- r = fence_wait(e->fence, false);
- if (r)
- return r;
-
- hash_del(&e->node);
- fence_put(e->fence);
- kmem_cache_free(amdgpu_sync_slab, e);
- }
-
- return 0;
-}
-
/**
* amdgpu_sync_free - free the sync object
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 26a5f4acf..0d8d65eb4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -11,19 +11,68 @@
#define TRACE_SYSTEM amdgpu
#define TRACE_INCLUDE_FILE amdgpu_trace
+TRACE_EVENT(amdgpu_mm_rreg,
+ TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
+ TP_ARGS(did, reg, value),
+ TP_STRUCT__entry(
+ __field(unsigned, did)
+ __field(uint32_t, reg)
+ __field(uint32_t, value)
+ ),
+ TP_fast_assign(
+ __entry->did = did;
+ __entry->reg = reg;
+ __entry->value = value;
+ ),
+ TP_printk("0x%04lx, 0x%04lx, 0x%08lx",
+ (unsigned long)__entry->did,
+ (unsigned long)__entry->reg,
+ (unsigned long)__entry->value)
+);
+
+TRACE_EVENT(amdgpu_mm_wreg,
+ TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
+ TP_ARGS(did, reg, value),
+ TP_STRUCT__entry(
+ __field(unsigned, did)
+ __field(uint32_t, reg)
+ __field(uint32_t, value)
+ ),
+ TP_fast_assign(
+ __entry->did = did;
+ __entry->reg = reg;
+ __entry->value = value;
+ ),
+ TP_printk("0x%04lx, 0x%04lx, 0x%08lx",
+ (unsigned long)__entry->did,
+ (unsigned long)__entry->reg,
+ (unsigned long)__entry->value)
+);
+
TRACE_EVENT(amdgpu_bo_create,
TP_PROTO(struct amdgpu_bo *bo),
TP_ARGS(bo),
TP_STRUCT__entry(
__field(struct amdgpu_bo *, bo)
__field(u32, pages)
+ __field(u32, type)
+ __field(u32, prefer)
+ __field(u32, allow)
+ __field(u32, visible)
),
TP_fast_assign(
__entry->bo = bo;
__entry->pages = bo->tbo.num_pages;
+ __entry->type = bo->tbo.mem.mem_type;
+ __entry->prefer = bo->prefered_domains;
+ __entry->allow = bo->allowed_domains;
+ __entry->visible = bo->flags;
),
- TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
+
+ TP_printk("bo=%p,pages=%u,type=%d,prefered=%d,allowed=%d,visible=%d",
+ __entry->bo, __entry->pages, __entry->type,
+ __entry->prefer, __entry->allow, __entry->visible)
);
TRACE_EVENT(amdgpu_cs,
@@ -64,7 +113,7 @@ TRACE_EVENT(amdgpu_cs_ioctl,
__entry->adev = job->adev;
__entry->sched_job = &job->base;
__entry->ib = job->ibs;
- __entry->fence = &job->base.s_fence->base;
+ __entry->fence = &job->base.s_fence->finished;
__entry->ring_name = job->ring->name;
__entry->num_ibs = job->num_ibs;
),
@@ -89,7 +138,7 @@ TRACE_EVENT(amdgpu_sched_run_job,
__entry->adev = job->adev;
__entry->sched_job = &job->base;
__entry->ib = job->ibs;
- __entry->fence = &job->base.s_fence->base;
+ __entry->fence = &job->base.s_fence->finished;
__entry->ring_name = job->ring->name;
__entry->num_ibs = job->num_ibs;
),
@@ -100,24 +149,26 @@ TRACE_EVENT(amdgpu_sched_run_job,
TRACE_EVENT(amdgpu_vm_grab_id,
- TP_PROTO(struct amdgpu_vm *vm, int ring, unsigned vmid,
- uint64_t pd_addr),
- TP_ARGS(vm, ring, vmid, pd_addr),
+ TP_PROTO(struct amdgpu_vm *vm, int ring, struct amdgpu_job *job),
+ TP_ARGS(vm, ring, job),
TP_STRUCT__entry(
__field(struct amdgpu_vm *, vm)
__field(u32, ring)
__field(u32, vmid)
__field(u64, pd_addr)
+ __field(u32, needs_flush)
),
TP_fast_assign(
__entry->vm = vm;
__entry->ring = ring;
- __entry->vmid = vmid;
- __entry->pd_addr = pd_addr;
+ __entry->vmid = job->vm_id;
+ __entry->pd_addr = job->vm_pd_addr;
+ __entry->needs_flush = job->vm_needs_flush;
),
- TP_printk("vm=%p, ring=%u, id=%u, pd_addr=%010Lx", __entry->vm,
- __entry->ring, __entry->vmid, __entry->pd_addr)
+ TP_printk("vm=%p, ring=%u, id=%u, pd_addr=%010Lx needs_flush=%u",
+ __entry->vm, __entry->ring, __entry->vmid,
+ __entry->pd_addr, __entry->needs_flush)
);
TRACE_EVENT(amdgpu_vm_bo_map,
@@ -244,13 +295,55 @@ TRACE_EVENT(amdgpu_bo_list_set,
TP_STRUCT__entry(
__field(struct amdgpu_bo_list *, list)
__field(struct amdgpu_bo *, bo)
+ __field(u64, bo_size)
),
TP_fast_assign(
__entry->list = list;
__entry->bo = bo;
+ __entry->bo_size = amdgpu_bo_size(bo);
),
- TP_printk("list=%p, bo=%p", __entry->list, __entry->bo)
+ TP_printk("list=%p, bo=%p, bo_size = %Ld",
+ __entry->list,
+ __entry->bo,
+ __entry->bo_size)
+);
+
+TRACE_EVENT(amdgpu_cs_bo_status,
+ TP_PROTO(uint64_t total_bo, uint64_t total_size),
+ TP_ARGS(total_bo, total_size),
+ TP_STRUCT__entry(
+ __field(u64, total_bo)
+ __field(u64, total_size)
+ ),
+
+ TP_fast_assign(
+ __entry->total_bo = total_bo;
+ __entry->total_size = total_size;
+ ),
+ TP_printk("total bo size = %Ld, total bo count = %Ld",
+ __entry->total_bo, __entry->total_size)
+);
+
+TRACE_EVENT(amdgpu_ttm_bo_move,
+ TP_PROTO(struct amdgpu_bo* bo, uint32_t new_placement, uint32_t old_placement),
+ TP_ARGS(bo, new_placement, old_placement),
+ TP_STRUCT__entry(
+ __field(struct amdgpu_bo *, bo)
+ __field(u64, bo_size)
+ __field(u32, new_placement)
+ __field(u32, old_placement)
+ ),
+
+ TP_fast_assign(
+ __entry->bo = bo;
+ __entry->bo_size = amdgpu_bo_size(bo);
+ __entry->new_placement = new_placement;
+ __entry->old_placement = old_placement;
+ ),
+ TP_printk("bo=%p from:%d to %d with size = %Ld",
+ __entry->bo, __entry->old_placement,
+ __entry->new_placement, __entry->bo_size)
);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 46c5297f6..716f2afeb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -286,9 +286,10 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
r = amdgpu_copy_buffer(ring, old_start, new_start,
new_mem->num_pages * PAGE_SIZE, /* bytes */
bo->resv, &fence);
- /* FIXME: handle copy error */
- r = ttm_bo_move_accel_cleanup(bo, fence,
- evict, no_wait_gpu, new_mem);
+ if (r)
+ return r;
+
+ r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
fence_put(fence);
return r;
}
@@ -334,7 +335,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
if (unlikely(r)) {
goto out_cleanup;
}
- r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
+ r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, new_mem);
out_cleanup:
ttm_bo_mem_put(bo, &tmp_mem);
return r;
@@ -367,7 +368,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
if (unlikely(r)) {
return r;
}
- r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
+ r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, &tmp_mem);
if (unlikely(r)) {
goto out_cleanup;
}
@@ -396,6 +397,11 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
return -EINVAL;
adev = amdgpu_get_adev(bo->bdev);
+
+ /* remember the eviction */
+ if (evict)
+ atomic64_inc(&adev->num_evictions);
+
if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
amdgpu_move_null(bo, new_mem);
return 0;
@@ -429,7 +435,8 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
if (r) {
memcpy:
- r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+ r = ttm_bo_move_memcpy(bo, evict, interruptible,
+ no_wait_gpu, new_mem);
if (r) {
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 3959055eb..91aeed825 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -40,9 +40,16 @@
#include "uvd/uvd_4_2_d.h"
/* 1 second timeout */
-#define UVD_IDLE_TIMEOUT_MS 1000
+#define UVD_IDLE_TIMEOUT msecs_to_jiffies(1000)
+
+/* Firmware versions for VI */
+#define FW_1_65_10 ((1 << 24) | (65 << 16) | (10 << 8))
+#define FW_1_87_11 ((1 << 24) | (87 << 16) | (11 << 8))
+#define FW_1_87_12 ((1 << 24) | (87 << 16) | (12 << 8))
+#define FW_1_37_15 ((1 << 24) | (37 << 16) | (15 << 8))
+
/* Polaris10/11 firmware version */
-#define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8))
+#define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8))
/* Firmware Names */
#ifdef CONFIG_DRM_AMDGPU_CIK
@@ -83,7 +90,6 @@ struct amdgpu_uvd_cs_ctx {
#endif
/*(DEBLOBBED)*/
-static void amdgpu_uvd_note_usage(struct amdgpu_device *adev);
static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
@@ -237,6 +243,23 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
if (!amdgpu_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
adev->uvd.address_64_bit = true;
+ switch (adev->asic_type) {
+ case CHIP_TONGA:
+ adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10;
+ break;
+ case CHIP_CARRIZO:
+ adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_11;
+ break;
+ case CHIP_FIJI:
+ adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_12;
+ break;
+ case CHIP_STONEY:
+ adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_37_15;
+ break;
+ default:
+ adev->uvd.use_ctx_buf = adev->asic_type >= CHIP_POLARIS10;
+ }
+
return 0;
}
@@ -337,8 +360,6 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
if (handle != 0 && adev->uvd.filp[i] == filp) {
struct fence *fence;
- amdgpu_uvd_note_usage(adev);
-
r = amdgpu_uvd_get_destroy_msg(ring, handle,
false, &fence);
if (r) {
@@ -429,7 +450,7 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
unsigned fs_in_mb = width_in_mb * height_in_mb;
unsigned image_size, tmp, min_dpb_size, num_dpb_buffer;
- unsigned min_ctx_size = 0;
+ unsigned min_ctx_size = ~0;
image_size = width * height;
image_size += image_size / 2;
@@ -548,7 +569,7 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
/* reference picture buffer */
min_dpb_size = image_size * num_dpb_buffer;
- if (adev->asic_type < CHIP_POLARIS10){
+ if (!adev->uvd.use_ctx_buf){
/* macroblock context buffer */
min_dpb_size +=
width_in_mb * height_in_mb * num_dpb_buffer * 192;
@@ -653,7 +674,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
}
DRM_ERROR("No more free UVD handles!\n");
- return -EINVAL;
+ return -ENOSPC;
case 1:
/* it's a decode msg, calc buffer sizes */
@@ -904,8 +925,6 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
return -EINVAL;
}
- amdgpu_uvd_note_usage(ctx.parser->adev);
-
return 0;
}
@@ -959,7 +978,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
if (direct) {
r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
- job->fence = f;
+ job->fence = fence_get(f);
if (r)
goto err_free;
@@ -1097,24 +1116,18 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
if (fences == 0 && handles == 0) {
if (adev->pm.dpm_enabled) {
amdgpu_dpm_enable_uvd(adev, false);
- /* just work around for uvd clock remain high even
- * when uvd dpm disabled on Polaris10 */
- if (adev->asic_type == CHIP_POLARIS10)
- amdgpu_asic_set_uvd_clocks(adev, 0, 0);
} else {
amdgpu_asic_set_uvd_clocks(adev, 0, 0);
}
} else {
- schedule_delayed_work(&adev->uvd.idle_work,
- msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
+ schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
}
}
-static void amdgpu_uvd_note_usage(struct amdgpu_device *adev)
+void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
{
+ struct amdgpu_device *adev = ring->adev;
bool set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
- set_clocks &= schedule_delayed_work(&adev->uvd.idle_work,
- msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
if (set_clocks) {
if (adev->pm.dpm_enabled) {
@@ -1124,3 +1137,49 @@ static void amdgpu_uvd_note_usage(struct amdgpu_device *adev)
}
}
}
+
+void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
+{
+ schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
+}
+
+/**
+ * amdgpu_uvd_ring_test_ib - test ib execution
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Test if we can successfully execute an IB
+ */
+int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+{
+ struct fence *fence;
+ long r;
+
+ r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
+ goto error;
+ }
+
+ r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
+ if (r) {
+ DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
+ goto error;
+ }
+
+ r = fence_wait_timeout(fence, false, timeout);
+ if (r == 0) {
+ DRM_ERROR("amdgpu: IB test timed out.\n");
+ r = -ETIMEDOUT;
+ } else if (r < 0) {
+ DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
+ } else {
+ DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ r = 0;
+ }
+
+ fence_put(fence);
+
+error:
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index 9a3b44908..c85000960 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -35,5 +35,8 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
void amdgpu_uvd_free_handles(struct amdgpu_device *adev,
struct drm_file *filp);
int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx);
+void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring);
+void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring);
+int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 0a08cf930..7f8c44308 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -36,7 +36,7 @@
#include "cikd.h"
/* 1 second timeout */
-#define VCE_IDLE_TIMEOUT_MS 1000
+#define VCE_IDLE_TIMEOUT msecs_to_jiffies(1000)
/* Firmware Names */
#ifdef CONFIG_DRM_AMDGPU_CIK
@@ -76,8 +76,6 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
unsigned ucode_version, version_major, version_minor, binary_id;
int i, r;
- INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler);
-
switch (adev->asic_type) {
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_BONAIRE:
@@ -188,6 +186,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
adev->vce.filp[i] = NULL;
}
+ INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler);
+ mutex_init(&adev->vce.idle_mutex);
+
return 0;
}
@@ -211,6 +212,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
amdgpu_ring_fini(&adev->vce.ring[1]);
release_firmware(adev->vce.fw);
+ mutex_destroy(&adev->vce.idle_mutex);
return 0;
}
@@ -301,37 +303,44 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work)
amdgpu_asic_set_vce_clocks(adev, 0, 0);
}
} else {
- schedule_delayed_work(&adev->vce.idle_work,
- msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
+ schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT);
}
}
/**
- * amdgpu_vce_note_usage - power up VCE
+ * amdgpu_vce_ring_begin_use - power up VCE
*
- * @adev: amdgpu_device pointer
+ * @ring: amdgpu ring
*
* Make sure VCE is powerd up when we want to use it
*/
-static void amdgpu_vce_note_usage(struct amdgpu_device *adev)
+void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
{
- bool streams_changed = false;
- bool set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work);
- set_clocks &= schedule_delayed_work(&adev->vce.idle_work,
- msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
-
- if (adev->pm.dpm_enabled) {
- /* XXX figure out if the streams changed */
- streams_changed = false;
- }
+ struct amdgpu_device *adev = ring->adev;
+ bool set_clocks;
- if (set_clocks || streams_changed) {
+ mutex_lock(&adev->vce.idle_mutex);
+ set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work);
+ if (set_clocks) {
if (adev->pm.dpm_enabled) {
amdgpu_dpm_enable_vce(adev, true);
} else {
amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
}
}
+ mutex_unlock(&adev->vce.idle_mutex);
+}
+
+/**
+ * amdgpu_vce_ring_end_use - power VCE down
+ *
+ * @ring: amdgpu ring
+ *
+ * Schedule work to power VCE down again
+ */
+void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring)
+{
+ schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT);
}
/**
@@ -348,11 +357,10 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
int i, r;
for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
uint32_t handle = atomic_read(&adev->vce.handles[i]);
+
if (!handle || adev->vce.filp[i] != filp)
continue;
- amdgpu_vce_note_usage(adev);
-
r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL);
if (r)
DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
@@ -428,7 +436,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
ib->ptr[i] = 0x0;
r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
- job->fence = f;
+ job->fence = fence_get(f);
if (r)
goto err;
@@ -460,7 +468,6 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct fence *f = NULL;
- uint64_t dummy;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -468,7 +475,6 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
return r;
ib = &job->ibs[0];
- dummy = ib->gpu_addr + 1024;
/* stitch together an VCE destroy msg */
ib->length_dw = 0;
@@ -476,11 +482,14 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
ib->ptr[ib->length_dw++] = handle;
- ib->ptr[ib->length_dw++] = 0x00000014; /* len */
- ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
- ib->ptr[ib->length_dw++] = dummy;
- ib->ptr[ib->length_dw++] = 0x00000001;
+ ib->ptr[ib->length_dw++] = 0x00000020; /* len */
+ ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
+ ib->ptr[ib->length_dw++] = 0xffffffff; /* next task info, set to 0xffffffff if no */
+ ib->ptr[ib->length_dw++] = 0x00000001; /* destroy session */
+ ib->ptr[ib->length_dw++] = 0x00000000;
+ ib->ptr[ib->length_dw++] = 0x00000000;
+ ib->ptr[ib->length_dw++] = 0xffffffff; /* feedback is not needed, set to 0xffffffff and firmware will not output feedback */
+ ib->ptr[ib->length_dw++] = 0x00000000;
ib->ptr[ib->length_dw++] = 0x00000008; /* len */
ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */
@@ -490,7 +499,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
if (direct) {
r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
- job->fence = f;
+ job->fence = fence_get(f);
if (r)
goto err;
@@ -571,12 +580,10 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
* we we don't have another free session index.
*/
static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
- uint32_t handle, bool *allocated)
+ uint32_t handle, uint32_t *allocated)
{
unsigned i;
- *allocated = false;
-
/* validate the handle */
for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
if (atomic_read(&p->adev->vce.handles[i]) == handle) {
@@ -593,7 +600,7 @@ static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
p->adev->vce.filp[i] = p->filp;
p->adev->vce.img_size[i] = 0;
- *allocated = true;
+ *allocated |= 1 << i;
return i;
}
}
@@ -613,15 +620,13 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
unsigned fb_idx = 0, bs_idx = 0;
int session_idx = -1;
- bool destroyed = false;
- bool created = false;
- bool allocated = false;
+ uint32_t destroyed = 0;
+ uint32_t created = 0;
+ uint32_t allocated = 0;
uint32_t tmp, handle = 0;
uint32_t *size = &tmp;
int i, r = 0, idx = 0;
- amdgpu_vce_note_usage(p->adev);
-
while (idx < ib->length_dw) {
uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
@@ -632,30 +637,30 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
goto out;
}
- if (destroyed) {
- DRM_ERROR("No other command allowed after destroy!\n");
- r = -EINVAL;
- goto out;
- }
-
switch (cmd) {
- case 0x00000001: // session
+ case 0x00000001: /* session */
handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
session_idx = amdgpu_vce_validate_handle(p, handle,
&allocated);
- if (session_idx < 0)
- return session_idx;
+ if (session_idx < 0) {
+ r = session_idx;
+ goto out;
+ }
size = &p->adev->vce.img_size[session_idx];
break;
- case 0x00000002: // task info
+ case 0x00000002: /* task info */
fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
break;
- case 0x01000001: // create
- created = true;
- if (!allocated) {
+ case 0x01000001: /* create */
+ created |= 1 << session_idx;
+ if (destroyed & (1 << session_idx)) {
+ destroyed &= ~(1 << session_idx);
+ allocated |= 1 << session_idx;
+
+ } else if (!(allocated & (1 << session_idx))) {
DRM_ERROR("Handle already in use!\n");
r = -EINVAL;
goto out;
@@ -666,16 +671,16 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
8 * 3 / 2;
break;
- case 0x04000001: // config extension
- case 0x04000002: // pic control
- case 0x04000005: // rate control
- case 0x04000007: // motion estimation
- case 0x04000008: // rdo
- case 0x04000009: // vui
- case 0x05000002: // auxiliary buffer
+ case 0x04000001: /* config extension */
+ case 0x04000002: /* pic control */
+ case 0x04000005: /* rate control */
+ case 0x04000007: /* motion estimation */
+ case 0x04000008: /* rdo */
+ case 0x04000009: /* vui */
+ case 0x05000002: /* auxiliary buffer */
break;
- case 0x03000001: // encode
+ case 0x03000001: /* encode */
r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9,
*size, 0);
if (r)
@@ -687,18 +692,18 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
goto out;
break;
- case 0x02000001: // destroy
- destroyed = true;
+ case 0x02000001: /* destroy */
+ destroyed |= 1 << session_idx;
break;
- case 0x05000001: // context buffer
+ case 0x05000001: /* context buffer */
r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
*size * 2, 0);
if (r)
goto out;
break;
- case 0x05000004: // video bitstream buffer
+ case 0x05000004: /* video bitstream buffer */
tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
tmp, bs_idx);
@@ -706,7 +711,7 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
goto out;
break;
- case 0x05000005: // feedback buffer
+ case 0x05000005: /* feedback buffer */
r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
4096, fb_idx);
if (r)
@@ -728,21 +733,24 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
idx += len / 4;
}
- if (allocated && !created) {
+ if (allocated & ~created) {
DRM_ERROR("New session without create command!\n");
r = -ENOENT;
}
out:
- if ((!r && destroyed) || (r && allocated)) {
- /*
- * IB contains a destroy msg or we have allocated an
- * handle and got an error, anyway free the handle
- */
- for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
- atomic_cmpxchg(&p->adev->vce.handles[i], handle, 0);
+ if (!r) {
+ /* No error, free all destroyed handle slots */
+ tmp = destroyed;
+ } else {
+ /* Error during parsing, free all allocated handle slots */
+ tmp = allocated;
}
+ for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
+ if (tmp & (1 << i))
+ atomic_set(&p->adev->vce.handles[i], 0);
+
return r;
}
@@ -828,10 +836,10 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
* @ring: the engine to test on
*
*/
-int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring)
+int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct fence *fence = NULL;
- int r;
+ long r;
/* skip vce ring1 ib test for now, since it's not reliable */
if (ring == &ring->adev->vce.ring[1])
@@ -839,21 +847,25 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring)
r = amdgpu_vce_get_create_msg(ring, 1, NULL);
if (r) {
- DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r);
+ DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
goto error;
}
r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence);
if (r) {
- DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
+ DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
goto error;
}
- r = fence_wait(fence, false);
- if (r) {
- DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
+ r = fence_wait_timeout(fence, false, timeout);
+ if (r == 0) {
+ DRM_ERROR("amdgpu: IB test timed out.\n");
+ r = -ETIMEDOUT;
+ } else if (r < 0) {
+ DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
} else {
DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ r = 0;
}
error:
fence_put(fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index f40cf761c..63f83d0d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -39,6 +39,8 @@ void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
unsigned flags);
int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring);
-int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring);
+int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout);
+void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring);
+void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 9f36ed30b..80120fa40 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -25,6 +25,7 @@
* Alex Deucher
* Jerome Glisse
*/
+#include <linux/fence-array.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
@@ -114,16 +115,26 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
/**
* amdgpu_vm_get_bos - add the vm BOs to a duplicates list
*
+ * @adev: amdgpu device pointer
* @vm: vm providing the BOs
* @duplicates: head of duplicates list
*
* Add the page directory to the BO duplicates list
* for command submission.
*/
-void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates)
+void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct list_head *duplicates)
{
+ uint64_t num_evictions;
unsigned i;
+ /* We only need to validate the page tables
+ * if they aren't already valid.
+ */
+ num_evictions = atomic64_read(&adev->num_evictions);
+ if (num_evictions == vm->last_eviction_counter)
+ return;
+
/* add the vm page table to the list */
for (i = 0; i <= vm->max_pde_used; ++i) {
struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
@@ -162,6 +173,13 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
spin_unlock(&glob->lru_lock);
}
+static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev,
+ struct amdgpu_vm_id *id)
+{
+ return id->current_gpu_reset_count !=
+ atomic_read(&adev->gpu_reset_counter) ? true : false;
+}
+
/**
* amdgpu_vm_grab_id - allocate the next free VMID
*
@@ -174,18 +192,67 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
*/
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_sync *sync, struct fence *fence,
- unsigned *vm_id, uint64_t *vm_pd_addr)
+ struct amdgpu_job *job)
{
- uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
struct amdgpu_device *adev = ring->adev;
+ uint64_t fence_context = adev->fence_context + ring->idx;
struct fence *updates = sync->last_vm_update;
- struct amdgpu_vm_id *id;
- unsigned i = ring->idx;
- int r;
+ struct amdgpu_vm_id *id, *idle;
+ struct fence **fences;
+ unsigned i;
+ int r = 0;
+
+ fences = kmalloc_array(sizeof(void *), adev->vm_manager.num_ids,
+ GFP_KERNEL);
+ if (!fences)
+ return -ENOMEM;
mutex_lock(&adev->vm_manager.lock);
+ /* Check if we have an idle VMID */
+ i = 0;
+ list_for_each_entry(idle, &adev->vm_manager.ids_lru, list) {
+ fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
+ if (!fences[i])
+ break;
+ ++i;
+ }
+
+ /* If we can't find a idle VMID to use, wait till one becomes available */
+ if (&idle->list == &adev->vm_manager.ids_lru) {
+ u64 fence_context = adev->vm_manager.fence_context + ring->idx;
+ unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
+ struct fence_array *array;
+ unsigned j;
+
+ for (j = 0; j < i; ++j)
+ fence_get(fences[j]);
+
+ array = fence_array_create(i, fences, fence_context,
+ seqno, true);
+ if (!array) {
+ for (j = 0; j < i; ++j)
+ fence_put(fences[j]);
+ kfree(fences);
+ r = -ENOMEM;
+ goto error;
+ }
+
+
+ r = amdgpu_sync_fence(ring->adev, sync, &array->base);
+ fence_put(&array->base);
+ if (r)
+ goto error;
+
+ mutex_unlock(&adev->vm_manager.lock);
+ return 0;
+
+ }
+ kfree(fences);
+
+ job->vm_needs_flush = true;
/* Check if we can use a VMID already assigned to this VM */
+ i = ring->idx;
do {
struct fence *flushed;
@@ -196,67 +263,52 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
/* Check all the prerequisites to using this VMID */
if (!id)
continue;
+ if (amdgpu_vm_is_gpu_reset(adev, id))
+ continue;
if (atomic64_read(&id->owner) != vm->client_id)
continue;
- if (pd_addr != id->pd_gpu_addr)
+ if (job->vm_pd_addr != id->pd_gpu_addr)
continue;
- if (id->last_user != ring &&
- (!id->last_flush || !fence_is_signaled(id->last_flush)))
+ if (!id->last_flush)
continue;
- flushed = id->flushed_updates;
- if (updates && (!flushed || fence_is_later(updates, flushed)))
+ if (id->last_flush->context != fence_context &&
+ !fence_is_signaled(id->last_flush))
continue;
- /* Good we can use this VMID */
- if (id->last_user == ring) {
- r = amdgpu_sync_fence(ring->adev, sync,
- id->first);
- if (r)
- goto error;
- }
+ flushed = id->flushed_updates;
+ if (updates &&
+ (!flushed || fence_is_later(updates, flushed)))
+ continue;
- /* And remember this submission as user of the VMID */
+ /* Good we can use this VMID. Remember this submission as
+ * user of the VMID.
+ */
r = amdgpu_sync_fence(ring->adev, &id->active, fence);
if (r)
goto error;
+ id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
list_move_tail(&id->list, &adev->vm_manager.ids_lru);
vm->ids[ring->idx] = id;
- *vm_id = id - adev->vm_manager.ids;
- *vm_pd_addr = AMDGPU_VM_NO_FLUSH;
- trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr);
+ job->vm_id = id - adev->vm_manager.ids;
+ job->vm_needs_flush = false;
+ trace_amdgpu_vm_grab_id(vm, ring->idx, job);
mutex_unlock(&adev->vm_manager.lock);
return 0;
} while (i != ring->idx);
- id = list_first_entry(&adev->vm_manager.ids_lru,
- struct amdgpu_vm_id,
- list);
-
- if (!amdgpu_sync_is_idle(&id->active)) {
- struct list_head *head = &adev->vm_manager.ids_lru;
- struct amdgpu_vm_id *tmp;
-
- list_for_each_entry_safe(id, tmp, &adev->vm_manager.ids_lru,
- list) {
- if (amdgpu_sync_is_idle(&id->active)) {
- list_move(&id->list, head);
- head = &id->list;
- }
- }
- id = list_first_entry(&adev->vm_manager.ids_lru,
- struct amdgpu_vm_id,
- list);
- }
+ /* Still no ID to use? Then use the idle one found earlier */
+ id = idle;
- r = amdgpu_sync_cycle_fences(sync, &id->active, fence);
+ /* Remember this submission as user of the VMID */
+ r = amdgpu_sync_fence(ring->adev, &id->active, fence);
if (r)
goto error;
@@ -269,22 +321,46 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
fence_put(id->flushed_updates);
id->flushed_updates = fence_get(updates);
- id->pd_gpu_addr = pd_addr;
-
+ id->pd_gpu_addr = job->vm_pd_addr;
+ id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
list_move_tail(&id->list, &adev->vm_manager.ids_lru);
- id->last_user = ring;
atomic64_set(&id->owner, vm->client_id);
vm->ids[ring->idx] = id;
- *vm_id = id - adev->vm_manager.ids;
- *vm_pd_addr = pd_addr;
- trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr);
+ job->vm_id = id - adev->vm_manager.ids;
+ trace_amdgpu_vm_grab_id(vm, ring->idx, job);
error:
mutex_unlock(&adev->vm_manager.lock);
return r;
}
+static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ const struct amdgpu_ip_block_version *ip_block;
+
+ if (ring->type != AMDGPU_RING_TYPE_COMPUTE)
+ /* only compute rings */
+ return false;
+
+ ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
+ if (!ip_block)
+ return false;
+
+ if (ip_block->major <= 7) {
+ /* gfx7 has no workaround */
+ return true;
+ } else if (ip_block->major == 8) {
+ if (adev->gfx.mec_fw_version >= 673)
+ /* gfx8 is fixed in MEC firmware 673 */
+ return false;
+ else
+ return true;
+ }
+ return false;
+}
+
/**
* amdgpu_vm_flush - hardware flush the vm
*
@@ -294,59 +370,52 @@ error:
*
* Emit a VM flush when it is necessary.
*/
-int amdgpu_vm_flush(struct amdgpu_ring *ring,
- unsigned vm_id, uint64_t pd_addr,
- uint32_t gds_base, uint32_t gds_size,
- uint32_t gws_base, uint32_t gws_size,
- uint32_t oa_base, uint32_t oa_size)
+int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
{
struct amdgpu_device *adev = ring->adev;
- struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
+ struct amdgpu_vm_id *id = &adev->vm_manager.ids[job->vm_id];
bool gds_switch_needed = ring->funcs->emit_gds_switch && (
- id->gds_base != gds_base ||
- id->gds_size != gds_size ||
- id->gws_base != gws_base ||
- id->gws_size != gws_size ||
- id->oa_base != oa_base ||
- id->oa_size != oa_size);
+ id->gds_base != job->gds_base ||
+ id->gds_size != job->gds_size ||
+ id->gws_base != job->gws_base ||
+ id->gws_size != job->gws_size ||
+ id->oa_base != job->oa_base ||
+ id->oa_size != job->oa_size);
int r;
if (ring->funcs->emit_pipeline_sync && (
- pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed ||
- ring->type == AMDGPU_RING_TYPE_COMPUTE))
+ job->vm_needs_flush || gds_switch_needed ||
+ amdgpu_vm_ring_has_compute_vm_bug(ring)))
amdgpu_ring_emit_pipeline_sync(ring);
- if (ring->funcs->emit_vm_flush &&
- pd_addr != AMDGPU_VM_NO_FLUSH) {
+ if (ring->funcs->emit_vm_flush && (job->vm_needs_flush ||
+ amdgpu_vm_is_gpu_reset(adev, id))) {
struct fence *fence;
- trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id);
- amdgpu_ring_emit_vm_flush(ring, vm_id, pd_addr);
+ trace_amdgpu_vm_flush(job->vm_pd_addr, ring->idx, job->vm_id);
+ amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr);
+
+ r = amdgpu_fence_emit(ring, &fence);
+ if (r)
+ return r;
mutex_lock(&adev->vm_manager.lock);
- if ((id->pd_gpu_addr == pd_addr) && (id->last_user == ring)) {
- r = amdgpu_fence_emit(ring, &fence);
- if (r) {
- mutex_unlock(&adev->vm_manager.lock);
- return r;
- }
- fence_put(id->last_flush);
- id->last_flush = fence;
- }
+ fence_put(id->last_flush);
+ id->last_flush = fence;
mutex_unlock(&adev->vm_manager.lock);
}
if (gds_switch_needed) {
- id->gds_base = gds_base;
- id->gds_size = gds_size;
- id->gws_base = gws_base;
- id->gws_size = gws_size;
- id->oa_base = oa_base;
- id->oa_size = oa_size;
- amdgpu_ring_emit_gds_switch(ring, vm_id,
- gds_base, gds_size,
- gws_base, gws_size,
- oa_base, oa_size);
+ id->gds_base = job->gds_base;
+ id->gds_size = job->gds_size;
+ id->gws_base = job->gws_base;
+ id->gws_size = job->gws_size;
+ id->oa_base = job->oa_base;
+ id->oa_size = job->oa_size;
+ amdgpu_ring_emit_gds_switch(ring, job->vm_id,
+ job->gds_base, job->gds_size,
+ job->gws_base, job->gws_size,
+ job->oa_base, job->oa_size);
}
return 0;
@@ -723,7 +792,7 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
* @vm: requested vm
* @start: start of GPU address range
* @end: end of GPU address range
- * @dst: destination address to map to
+ * @dst: destination address to map to, the next dst inside the function
* @flags: mapping flags
*
* Update the page tables in the range @start - @end.
@@ -737,49 +806,75 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
{
const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
- uint64_t last_pe_start = ~0, last_pe_end = ~0, last_dst = ~0;
- uint64_t addr;
+ uint64_t cur_pe_start, cur_pe_end, cur_dst;
+ uint64_t addr; /* next GPU address to be updated */
+ uint64_t pt_idx;
+ struct amdgpu_bo *pt;
+ unsigned nptes; /* next number of ptes to be updated */
+ uint64_t next_pe_start;
+
+ /* initialize the variables */
+ addr = start;
+ pt_idx = addr >> amdgpu_vm_block_size;
+ pt = vm->page_tables[pt_idx].entry.robj;
+
+ if ((addr & ~mask) == (end & ~mask))
+ nptes = end - addr;
+ else
+ nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
+
+ cur_pe_start = amdgpu_bo_gpu_offset(pt);
+ cur_pe_start += (addr & mask) * 8;
+ cur_pe_end = cur_pe_start + 8 * nptes;
+ cur_dst = dst;
+
+ /* for next ptb*/
+ addr += nptes;
+ dst += nptes * AMDGPU_GPU_PAGE_SIZE;
/* walk over the address space and update the page tables */
- for (addr = start; addr < end; ) {
- uint64_t pt_idx = addr >> amdgpu_vm_block_size;
- struct amdgpu_bo *pt = vm->page_tables[pt_idx].entry.robj;
- unsigned nptes;
- uint64_t pe_start;
+ while (addr < end) {
+ pt_idx = addr >> amdgpu_vm_block_size;
+ pt = vm->page_tables[pt_idx].entry.robj;
if ((addr & ~mask) == (end & ~mask))
nptes = end - addr;
else
nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
- pe_start = amdgpu_bo_gpu_offset(pt);
- pe_start += (addr & mask) * 8;
-
- if (last_pe_end != pe_start) {
+ next_pe_start = amdgpu_bo_gpu_offset(pt);
+ next_pe_start += (addr & mask) * 8;
+ if (cur_pe_end == next_pe_start) {
+ /* The next ptb is consecutive to current ptb.
+ * Don't call amdgpu_vm_frag_ptes now.
+ * Will update two ptbs together in future.
+ */
+ cur_pe_end += 8 * nptes;
+ } else {
amdgpu_vm_frag_ptes(adev, vm_update_params,
- last_pe_start, last_pe_end,
- last_dst, flags);
+ cur_pe_start, cur_pe_end,
+ cur_dst, flags);
- last_pe_start = pe_start;
- last_pe_end = pe_start + 8 * nptes;
- last_dst = dst;
- } else {
- last_pe_end += 8 * nptes;
+ cur_pe_start = next_pe_start;
+ cur_pe_end = next_pe_start + 8 * nptes;
+ cur_dst = dst;
}
+ /* for next ptb*/
addr += nptes;
dst += nptes * AMDGPU_GPU_PAGE_SIZE;
}
- amdgpu_vm_frag_ptes(adev, vm_update_params, last_pe_start,
- last_pe_end, last_dst, flags);
+ amdgpu_vm_frag_ptes(adev, vm_update_params, cur_pe_start,
+ cur_pe_end, cur_dst, flags);
}
/**
* amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
*
* @adev: amdgpu_device pointer
+ * @exclusive: fence we need to sync to
* @src: address where to copy page table entries from
* @pages_addr: DMA addresses to use for mapping
* @vm: requested vm
@@ -793,6 +888,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
* Returns 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
+ struct fence *exclusive,
uint64_t src,
dma_addr_t *pages_addr,
struct amdgpu_vm *vm,
@@ -853,6 +949,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
vm_update_params.ib = &job->ibs[0];
+ r = amdgpu_sync_fence(adev, &job->sync, exclusive);
+ if (r)
+ goto error_free;
+
r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
owner);
if (r)
@@ -889,6 +989,7 @@ error_free:
* amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
*
* @adev: amdgpu_device pointer
+ * @exclusive: fence we need to sync to
* @gtt_flags: flags as they are used for GTT
* @pages_addr: DMA addresses to use for mapping
* @vm: requested vm
@@ -902,6 +1003,7 @@ error_free:
* Returns 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
+ struct fence *exclusive,
uint32_t gtt_flags,
dma_addr_t *pages_addr,
struct amdgpu_vm *vm,
@@ -932,7 +1034,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
addr += mapping->offset;
if (!pages_addr || src)
- return amdgpu_vm_bo_update_mapping(adev, src, pages_addr, vm,
+ return amdgpu_vm_bo_update_mapping(adev, exclusive,
+ src, pages_addr, vm,
start, mapping->it.last,
flags, addr, fence);
@@ -940,7 +1043,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
uint64_t last;
last = min((uint64_t)mapping->it.last, start + max_size - 1);
- r = amdgpu_vm_bo_update_mapping(adev, src, pages_addr, vm,
+ r = amdgpu_vm_bo_update_mapping(adev, exclusive,
+ src, pages_addr, vm,
start, last, flags, addr,
fence);
if (r)
@@ -973,6 +1077,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping;
dma_addr_t *pages_addr = NULL;
uint32_t gtt_flags, flags;
+ struct fence *exclusive;
uint64_t addr;
int r;
@@ -994,8 +1099,11 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
default:
break;
}
+
+ exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
} else {
addr = 0;
+ exclusive = NULL;
}
flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
@@ -1007,7 +1115,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
spin_unlock(&vm->status_lock);
list_for_each_entry(mapping, &bo_va->invalids, list) {
- r = amdgpu_vm_bo_split_mapping(adev, gtt_flags, pages_addr, vm,
+ r = amdgpu_vm_bo_split_mapping(adev, exclusive,
+ gtt_flags, pages_addr, vm,
mapping, flags, addr,
&bo_va->last_pt_update);
if (r)
@@ -1054,7 +1163,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping, list);
list_del(&mapping->list);
- r = amdgpu_vm_bo_split_mapping(adev, 0, NULL, vm, mapping,
+ r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, NULL, vm, mapping,
0, 0, NULL);
kfree(mapping);
if (r)
@@ -1426,7 +1535,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
r = amd_sched_entity_init(&ring->sched, &vm->entity,
rq, amdgpu_sched_jobs);
if (r)
- return r;
+ goto err;
vm->page_directory_fence = NULL;
@@ -1445,6 +1554,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amdgpu_bo_unreserve(vm->page_directory);
if (r)
goto error_free_page_directory;
+ vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
return 0;
@@ -1455,6 +1565,9 @@ error_free_page_directory:
error_free_sched_entity:
amd_sched_entity_fini(&ring->sched, &vm->entity);
+err:
+ drm_free_large(vm->page_tables);
+
return r;
}
@@ -1516,6 +1629,10 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
&adev->vm_manager.ids_lru);
}
+ adev->vm_manager.fence_context = fence_context_alloc(AMDGPU_MAX_RINGS);
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
+ adev->vm_manager.seqno[i] = 0;
+
atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
atomic64_set(&adev->vm_manager.client_counter, 0);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 040dd26ff..43934248b 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -83,12 +83,14 @@ static const struct ci_pt_defaults defaults_bonaire_xt =
{ 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
};
+#if 0
static const struct ci_pt_defaults defaults_bonaire_pro =
{
1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
{ 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F },
{ 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
};
+#endif
static const struct ci_pt_defaults defaults_saturn_xt =
{
@@ -97,12 +99,14 @@ static const struct ci_pt_defaults defaults_saturn_xt =
{ 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
};
+#if 0
static const struct ci_pt_defaults defaults_saturn_pro =
{
1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
{ 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A },
{ 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
};
+#endif
static const struct ci_pt_config_reg didt_config_ci[] =
{
@@ -735,19 +739,19 @@ static int ci_enable_didt(struct amdgpu_device *adev, bool enable)
if (pi->caps_sq_ramping || pi->caps_db_ramping ||
pi->caps_td_ramping || pi->caps_tcp_ramping) {
- gfx_v7_0_enter_rlc_safe_mode(adev);
+ adev->gfx.rlc.funcs->enter_safe_mode(adev);
if (enable) {
ret = ci_program_pt_config_registers(adev, didt_config_ci);
if (ret) {
- gfx_v7_0_exit_rlc_safe_mode(adev);
+ adev->gfx.rlc.funcs->exit_safe_mode(adev);
return ret;
}
}
ci_do_enable_didt(adev, enable);
- gfx_v7_0_exit_rlc_safe_mode(adev);
+ adev->gfx.rlc.funcs->exit_safe_mode(adev);
}
return 0;
@@ -3029,7 +3033,7 @@ static int ci_populate_single_memory_level(struct amdgpu_device *adev,
if (pi->mclk_stutter_mode_threshold &&
(memory_clock <= pi->mclk_stutter_mode_threshold) &&
- (pi->uvd_enabled == false) &&
+ (!pi->uvd_enabled) &&
(RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) &&
(adev->pm.dpm.new_active_crtc_count <= 2))
memory_level->StutterEnable = true;
@@ -3635,6 +3639,10 @@ static int ci_setup_default_dpm_tables(struct amdgpu_device *adev)
ci_setup_default_pcie_tables(adev);
+ /* save a copy of the default DPM table */
+ memcpy(&(pi->golden_dpm_table), &(pi->dpm_table),
+ sizeof(struct ci_dpm_table));
+
return 0;
}
@@ -5753,13 +5761,22 @@ static int ci_dpm_init_microcode(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_BONAIRE:
- chip_name = "bonaire";
+ if ((adev->pdev->revision == 0x80) ||
+ (adev->pdev->revision == 0x81) ||
+ (adev->pdev->device == 0x665f))
+ chip_name = "bonaire_k";
+ else
+ chip_name = "bonaire";
break;
case CHIP_HAWAII:
- chip_name = "hawaii";
+ if (adev->pdev->revision == 0x80)
+ chip_name = "hawaii_k";
+ else
+ chip_name = "hawaii";
break;
case CHIP_KAVERI:
case CHIP_KABINI:
+ case CHIP_MULLINS:
default: BUG();
}
@@ -6403,6 +6420,186 @@ static int ci_dpm_set_powergating_state(void *handle,
return 0;
}
+static int ci_dpm_print_clock_levels(struct amdgpu_device *adev,
+ enum pp_clock_type type, char *buf)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
+ struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
+ struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
+
+ int i, now, size = 0;
+ uint32_t clock, pcie_speed;
+
+ switch (type) {
+ case PP_SCLK:
+ amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetSclkFrequency);
+ clock = RREG32(mmSMC_MSG_ARG_0);
+
+ for (i = 0; i < sclk_table->count; i++) {
+ if (clock > sclk_table->dpm_levels[i].value)
+ continue;
+ break;
+ }
+ now = i;
+
+ for (i = 0; i < sclk_table->count; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, sclk_table->dpm_levels[i].value / 100,
+ (i == now) ? "*" : "");
+ break;
+ case PP_MCLK:
+ amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetMclkFrequency);
+ clock = RREG32(mmSMC_MSG_ARG_0);
+
+ for (i = 0; i < mclk_table->count; i++) {
+ if (clock > mclk_table->dpm_levels[i].value)
+ continue;
+ break;
+ }
+ now = i;
+
+ for (i = 0; i < mclk_table->count; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, mclk_table->dpm_levels[i].value / 100,
+ (i == now) ? "*" : "");
+ break;
+ case PP_PCIE:
+ pcie_speed = ci_get_current_pcie_speed(adev);
+ for (i = 0; i < pcie_table->count; i++) {
+ if (pcie_speed != pcie_table->dpm_levels[i].value)
+ continue;
+ break;
+ }
+ now = i;
+
+ for (i = 0; i < pcie_table->count; i++)
+ size += sprintf(buf + size, "%d: %s %s\n", i,
+ (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x1" :
+ (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
+ (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
+ (i == now) ? "*" : "");
+ break;
+ default:
+ break;
+ }
+
+ return size;
+}
+
+static int ci_dpm_force_clock_level(struct amdgpu_device *adev,
+ enum pp_clock_type type, uint32_t mask)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+
+ if (adev->pm.dpm.forced_level
+ != AMDGPU_DPM_FORCED_LEVEL_MANUAL)
+ return -EINVAL;
+
+ switch (type) {
+ case PP_SCLK:
+ if (!pi->sclk_dpm_key_disabled)
+ amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+ PPSMC_MSG_SCLKDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
+ break;
+
+ case PP_MCLK:
+ if (!pi->mclk_dpm_key_disabled)
+ amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+ PPSMC_MSG_MCLKDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
+ break;
+
+ case PP_PCIE:
+ {
+ uint32_t tmp = mask & pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
+ uint32_t level = 0;
+
+ while (tmp >>= 1)
+ level++;
+
+ if (!pi->pcie_dpm_key_disabled)
+ amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+ PPSMC_MSG_PCIeDPM_ForceLevel,
+ level);
+ break;
+ }
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int ci_dpm_get_sclk_od(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct ci_single_dpm_table *sclk_table = &(pi->dpm_table.sclk_table);
+ struct ci_single_dpm_table *golden_sclk_table =
+ &(pi->golden_dpm_table.sclk_table);
+ int value;
+
+ value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
+ 100 /
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+ return value;
+}
+
+static int ci_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
+ struct ci_single_dpm_table *golden_sclk_table =
+ &(pi->golden_dpm_table.sclk_table);
+
+ if (value > 20)
+ value = 20;
+
+ ps->performance_levels[ps->performance_level_count - 1].sclk =
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
+ value / 100 +
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+ return 0;
+}
+
+static int ci_dpm_get_mclk_od(struct amdgpu_device *adev)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct ci_single_dpm_table *mclk_table = &(pi->dpm_table.mclk_table);
+ struct ci_single_dpm_table *golden_mclk_table =
+ &(pi->golden_dpm_table.mclk_table);
+ int value;
+
+ value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
+ 100 /
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+ return value;
+}
+
+static int ci_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
+{
+ struct ci_power_info *pi = ci_get_pi(adev);
+ struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
+ struct ci_single_dpm_table *golden_mclk_table =
+ &(pi->golden_dpm_table.mclk_table);
+
+ if (value > 20)
+ value = 20;
+
+ ps->performance_levels[ps->performance_level_count - 1].mclk =
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
+ value / 100 +
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+ return 0;
+}
+
const struct amd_ip_funcs ci_dpm_ip_funcs = {
.name = "ci_dpm",
.early_init = ci_dpm_early_init,
@@ -6437,6 +6634,12 @@ static const struct amdgpu_dpm_funcs ci_dpm_funcs = {
.get_fan_control_mode = &ci_dpm_get_fan_control_mode,
.set_fan_speed_percent = &ci_dpm_set_fan_speed_percent,
.get_fan_speed_percent = &ci_dpm_get_fan_speed_percent,
+ .print_clock_levels = ci_dpm_print_clock_levels,
+ .force_clock_level = ci_dpm_force_clock_level,
+ .get_sclk_od = ci_dpm_get_sclk_od,
+ .set_sclk_od = ci_dpm_set_sclk_od,
+ .get_mclk_od = ci_dpm_get_mclk_od,
+ .set_mclk_od = ci_dpm_set_mclk_od,
};
static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.h b/drivers/gpu/drm/amd/amdgpu/ci_dpm.h
index faccc30c9..91be2996a 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.h
@@ -193,6 +193,7 @@ struct ci_pt_defaults {
struct ci_power_info {
struct ci_dpm_table dpm_table;
+ struct ci_dpm_table golden_dpm_table;
u32 voltage_control;
u32 mvdd_control;
u32 vddci_control;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 910431808..4efc901f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -879,7 +879,7 @@ static void cik_vga_set_state(struct amdgpu_device *adev, bool state)
uint32_t tmp;
tmp = RREG32(mmCONFIG_CNTL);
- if (state == false)
+ if (!state)
tmp |= CONFIG_CNTL__VGA_DIS_MASK;
else
tmp &= ~CONFIG_CNTL__VGA_DIS_MASK;
@@ -1035,12 +1035,12 @@ static uint32_t cik_read_indexed_register(struct amdgpu_device *adev,
mutex_lock(&adev->grbm_idx_mutex);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
- gfx_v7_0_select_se_sh(adev, se_num, sh_num);
+ amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
val = RREG32(reg_offset);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
return val;
}
@@ -1158,10 +1158,11 @@ static void kv_restore_regs_for_reset(struct amdgpu_device *adev,
WREG32(mmGMCON_RENG_EXECUTE, save->gmcon_reng_execute);
}
-static void cik_gpu_pci_config_reset(struct amdgpu_device *adev)
+static int cik_gpu_pci_config_reset(struct amdgpu_device *adev)
{
struct kv_reset_save_regs kv_save = { 0 };
u32 i;
+ int r = -EINVAL;
dev_info(adev->dev, "GPU pci config reset\n");
@@ -1177,14 +1178,20 @@ static void cik_gpu_pci_config_reset(struct amdgpu_device *adev)
/* wait for asic to come out of reset */
for (i = 0; i < adev->usec_timeout; i++) {
- if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff)
+ if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
+ /* enable BM */
+ pci_set_master(adev->pdev);
+ r = 0;
break;
+ }
udelay(1);
}
/* does asic init need to be run first??? */
if (adev->flags & AMD_IS_APU)
kv_restore_regs_for_reset(adev, &kv_save);
+
+ return r;
}
static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
@@ -1210,13 +1217,14 @@ static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hu
*/
static int cik_asic_reset(struct amdgpu_device *adev)
{
+ int r;
cik_set_bios_scratch_engine_hung(adev, true);
- cik_gpu_pci_config_reset(adev);
+ r = cik_gpu_pci_config_reset(adev);
cik_set_bios_scratch_engine_hung(adev, false);
- return 0;
+ return r;
}
static int cik_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
@@ -2014,9 +2022,6 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
.set_uvd_clocks = &cik_set_uvd_clocks,
.set_vce_clocks = &cik_set_vce_clocks,
.get_virtual_caps = &cik_get_virtual_caps,
- /* these should be moved to their own ip modules */
- .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
- .wait_for_mc_idle = &gmc_v7_0_mc_wait_for_idle,
};
static int cik_common_early_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index cb429bfdc..af8b6bdf0 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -216,17 +216,6 @@ static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
unsigned vm_id, bool ctx_switch)
{
u32 extra_bits = vm_id & 0xf;
- u32 next_rptr = ring->wptr + 5;
-
- while ((next_rptr & 7) != 4)
- next_rptr++;
-
- next_rptr += 4;
- amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
- amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
- amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
- amdgpu_ring_write(ring, 1); /* number of DWs to follow */
- amdgpu_ring_write(ring, next_rptr);
/* IB packet must end on a 8 DW boundary */
cik_sdma_ring_insert_nop(ring, (12 - (ring->wptr & 7)) % 8);
@@ -357,7 +346,7 @@ static void cik_sdma_enable(struct amdgpu_device *adev, bool enable)
u32 me_cntl;
int i;
- if (enable == false) {
+ if (!enable) {
cik_sdma_gfx_stop(adev);
cik_sdma_rlc_stop(adev);
}
@@ -620,20 +609,19 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
* Test a simple IB in the DMA ring (CIK).
* Returns 0 on success, error on failure.
*/
-static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
+static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
struct fence *f = NULL;
- unsigned i;
unsigned index;
- int r;
u32 tmp = 0;
u64 gpu_addr;
+ long r;
r = amdgpu_wb_get(adev, &index);
if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
return r;
}
@@ -643,11 +631,12 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 256, &ib);
if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
goto err0;
}
- ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+ ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE,
+ SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
ib.ptr[1] = lower_32_bits(gpu_addr);
ib.ptr[2] = upper_32_bits(gpu_addr);
ib.ptr[3] = 1;
@@ -657,28 +646,25 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
if (r)
goto err1;
- r = fence_wait(f, false);
- if (r) {
- DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
+ r = fence_wait_timeout(f, false, timeout);
+ if (r == 0) {
+ DRM_ERROR("amdgpu: IB test timed out\n");
+ r = -ETIMEDOUT;
goto err1;
- }
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = le32_to_cpu(adev->wb.wb[index]);
- if (tmp == 0xDEADBEEF)
- break;
- DRM_UDELAY(1);
- }
- if (i < adev->usec_timeout) {
- DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
- ring->idx, i);
+ } else if (r < 0) {
+ DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err1;
+ }
+ tmp = le32_to_cpu(adev->wb.wb[index]);
+ if (tmp == 0xDEADBEEF) {
+ DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ r = 0;
} else {
DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
r = -EINVAL;
}
err1:
- fence_put(f);
amdgpu_ib_free(adev, &ib, NULL);
fence_put(f);
err0:
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index 933e425a8..2a11413ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -425,7 +425,7 @@ static int cz_dpm_init(struct amdgpu_device *adev)
pi->mgcg_cgtt_local1 = 0x0;
pi->clock_slow_down_step = 25000;
pi->skip_clock_slow_down = 1;
- pi->enable_nb_ps_policy = 0;
+ pi->enable_nb_ps_policy = false;
pi->caps_power_containment = true;
pi->caps_cac = true;
pi->didt_enabled = false;
@@ -2219,6 +2219,7 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
}
}
} else { /*pi->caps_vce_pg*/
+ pi->vce_power_gated = gate;
cz_update_vce_dpm(adev);
cz_enable_vce_dpm(adev, !gate);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 8227344d2..c1b04e9aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2667,19 +2667,21 @@ static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
}
}
-static void dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t start, uint32_t size)
+static int dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t size)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- int end = (start + size > 256) ? 256 : start + size, i;
+ int i;
/* userspace palettes are always correct as is */
- for (i = start; i < end; i++) {
+ for (i = 0; i < size; i++) {
amdgpu_crtc->lut_r[i] = red[i] >> 6;
amdgpu_crtc->lut_g[i] = green[i] >> 6;
amdgpu_crtc->lut_b[i] = blue[i] >> 6;
}
dce_v10_0_crtc_load_lut(crtc);
+
+ return 0;
}
static void dce_v10_0_crtc_destroy(struct drm_crtc *crtc)
@@ -2717,13 +2719,13 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
amdgpu_irq_update(adev, &adev->crtc_irq, type);
amdgpu_irq_update(adev, &adev->pageflip_irq, type);
- drm_vblank_on(dev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_on(crtc);
dce_v10_0_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- drm_vblank_off(dev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_off(crtc);
if (amdgpu_crtc->enabled) {
dce_v10_0_vga_enable(crtc, true);
amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
@@ -3372,7 +3374,7 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
- drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_put(&amdgpu_crtc->base);
schedule_work(&works->unpin_work);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index af26ec0bc..d4bf13390 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -307,11 +307,10 @@ static void dce_v11_0_page_flip(struct amdgpu_device *adev,
struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
u32 tmp;
- /* flip at hsync for async, default is vsync */
- /* use UPDATE_IMMEDIATE_EN instead for async? */
+ /* flip immediate for async, default is vsync */
tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
- GRPH_SURFACE_UPDATE_H_RETRACE_EN, async ? 1 : 0);
+ GRPH_SURFACE_UPDATE_IMMEDIATE_EN, async ? 1 : 0);
WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
/* update the scanout addresses */
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
@@ -2678,19 +2677,21 @@ static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
}
}
-static void dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t start, uint32_t size)
+static int dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t size)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- int end = (start + size > 256) ? 256 : start + size, i;
+ int i;
/* userspace palettes are always correct as is */
- for (i = start; i < end; i++) {
+ for (i = 0; i < size; i++) {
amdgpu_crtc->lut_r[i] = red[i] >> 6;
amdgpu_crtc->lut_g[i] = green[i] >> 6;
amdgpu_crtc->lut_b[i] = blue[i] >> 6;
}
dce_v11_0_crtc_load_lut(crtc);
+
+ return 0;
}
static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc)
@@ -2728,13 +2729,13 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
amdgpu_irq_update(adev, &adev->crtc_irq, type);
amdgpu_irq_update(adev, &adev->pageflip_irq, type);
- drm_vblank_on(dev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_on(crtc);
dce_v11_0_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- drm_vblank_off(dev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_off(crtc);
if (amdgpu_crtc->enabled) {
dce_v11_0_vga_enable(crtc, true);
amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
@@ -3433,7 +3434,7 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
- drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_put(&amdgpu_crtc->base);
schedule_work(&works->unpin_work);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 3fb65e41a..4fdfab1e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -526,36 +526,16 @@ static void dce_v8_0_stop_mc_access(struct amdgpu_device *adev,
crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
CRTC_CONTROL, CRTC_MASTER_EN);
if (crtc_enabled) {
-#if 0
- u32 frame_count;
- int j;
-
+#if 1
save->crtc_enabled[i] = true;
tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
- amdgpu_display_vblank_wait(adev, i);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ /*it is correct only for RGB ; black is 0*/
+ WREG32(mmCRTC_BLANK_DATA_COLOR + crtc_offsets[i], 0);
tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
- }
- /* wait for the next frame */
- frame_count = amdgpu_display_vblank_get_counter(adev, i);
- for (j = 0; j < adev->usec_timeout; j++) {
- if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
- break;
- udelay(1);
- }
- tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) {
- tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1);
- WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
- }
- tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) {
- tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1);
- WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
}
+ mdelay(20);
#else
/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
@@ -575,55 +555,22 @@ static void dce_v8_0_stop_mc_access(struct amdgpu_device *adev,
static void dce_v8_0_resume_mc_access(struct amdgpu_device *adev,
struct amdgpu_mode_mc_save *save)
{
- u32 tmp, frame_count;
- int i, j;
+ u32 tmp;
+ int i;
/* update crtc base addresses */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
upper_32_bits(adev->mc.vram_start));
- WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
- upper_32_bits(adev->mc.vram_start));
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
(u32)adev->mc.vram_start);
- WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
- (u32)adev->mc.vram_start);
if (save->crtc_enabled[i]) {
- tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) {
- tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3);
- WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp);
- }
- tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) {
- tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0);
- WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
- }
- tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) {
- tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0);
- WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
- }
- for (j = 0; j < adev->usec_timeout; j++) {
- tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0)
- break;
- udelay(1);
- }
tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
- /* wait for the next frame */
- frame_count = amdgpu_display_vblank_get_counter(adev, i);
- for (j = 0; j < adev->usec_timeout; j++) {
- if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
- break;
- udelay(1);
- }
}
+ mdelay(20);
}
WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
@@ -2574,19 +2521,21 @@ static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
}
}
-static void dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t start, uint32_t size)
+static int dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t size)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- int end = (start + size > 256) ? 256 : start + size, i;
+ int i;
/* userspace palettes are always correct as is */
- for (i = start; i < end; i++) {
+ for (i = 0; i < size; i++) {
amdgpu_crtc->lut_r[i] = red[i] >> 6;
amdgpu_crtc->lut_g[i] = green[i] >> 6;
amdgpu_crtc->lut_b[i] = blue[i] >> 6;
}
dce_v8_0_crtc_load_lut(crtc);
+
+ return 0;
}
static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc)
@@ -2624,13 +2573,13 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
amdgpu_irq_update(adev, &adev->crtc_irq, type);
amdgpu_irq_update(adev, &adev->pageflip_irq, type);
- drm_vblank_on(dev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_on(crtc);
dce_v8_0_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- drm_vblank_off(dev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_off(crtc);
if (amdgpu_crtc->enabled) {
dce_v8_0_vga_enable(crtc, true);
amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
@@ -3376,7 +3325,7 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
- drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
+ drm_crtc_vblank_put(&amdgpu_crtc->base);
schedule_work(&works->unpin_work);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
index b336c918d..b3e19ba4c 100644
--- a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
@@ -173,7 +173,7 @@ static int fiji_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
{
if (!fiji_is_smc_ram_running(adev))
{
- return -EINVAL;;
+ return -EINVAL;
}
if (wait_smu_response(adev)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 6d34dcd60..754566f51 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -1554,9 +1554,15 @@ static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device *adev)
* registers are instanced per SE or SH. 0xffffffff means
* broadcast to all SEs or SHs (CIK).
*/
-void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num)
+static void gfx_v7_0_select_se_sh(struct amdgpu_device *adev,
+ u32 se_num, u32 sh_num, u32 instance)
{
- u32 data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK;
+ u32 data;
+
+ if (instance == 0xffffffff)
+ data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
+ else
+ data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
@@ -1630,13 +1636,13 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v7_0_select_se_sh(adev, i, j);
+ gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
data = gfx_v7_0_get_rb_active_bitmap(adev);
active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
rb_bitmap_width_per_sh);
}
}
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
adev->gfx.config.backend_enable_mask = active_rbs;
@@ -1717,7 +1723,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
* making sure that the following register writes will be broadcasted
* to all the shaders
*/
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
/* XXX SH_MEM regs */
/* where to put LDS, scratch, GPUVM in FSA64 space */
@@ -2021,17 +2027,6 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
unsigned vm_id, bool ctx_switch)
{
u32 header, control = 0;
- u32 next_rptr = ring->wptr + 5;
-
- if (ctx_switch)
- next_rptr += 2;
-
- next_rptr += 4;
- amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
- amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
- amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
- amdgpu_ring_write(ring, next_rptr);
/* insert SWITCH_BUFFER packet before first IB in the ring frame */
if (ctx_switch) {
@@ -2060,22 +2055,9 @@ static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch)
{
- u32 header, control = 0;
- u32 next_rptr = ring->wptr + 5;
-
- control |= INDIRECT_BUFFER_VALID;
- next_rptr += 4;
- amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
- amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
- amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
- amdgpu_ring_write(ring, next_rptr);
-
- header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
+ u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24);
- control |= ib->length_dw | (vm_id << 24);
-
- amdgpu_ring_write(ring, header);
+ amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
amdgpu_ring_write(ring,
#ifdef __BIG_ENDIAN
(2 << 0) |
@@ -2094,26 +2076,25 @@ static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
* Provides a basic gfx ring test to verify that IBs are working.
* Returns 0 on success, error on failure.
*/
-static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
+static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
struct fence *f = NULL;
uint32_t scratch;
uint32_t tmp = 0;
- unsigned i;
- int r;
+ long r;
r = amdgpu_gfx_scratch_get(adev, &scratch);
if (r) {
- DRM_ERROR("amdgpu: failed to get scratch reg (%d).\n", r);
+ DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
return r;
}
WREG32(scratch, 0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 256, &ib);
if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
goto err1;
}
ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
@@ -2125,21 +2106,19 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
if (r)
goto err2;
- r = fence_wait(f, false);
- if (r) {
- DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
+ r = fence_wait_timeout(f, false, timeout);
+ if (r == 0) {
+ DRM_ERROR("amdgpu: IB test timed out\n");
+ r = -ETIMEDOUT;
goto err2;
- }
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(scratch);
- if (tmp == 0xDEADBEEF)
- break;
- DRM_UDELAY(1);
- }
- if (i < adev->usec_timeout) {
- DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
- ring->idx, i);
+ } else if (r < 0) {
+ DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err2;
+ }
+ tmp = RREG32(scratch);
+ if (tmp == 0xDEADBEEF) {
+ DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ r = 0;
} else {
DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
scratch, tmp);
@@ -2147,7 +2126,6 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
}
err2:
- fence_put(f);
amdgpu_ib_free(adev, &ib, NULL);
fence_put(f);
err1:
@@ -3198,7 +3176,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
}
}
adev->gfx.rlc.cs_data = ci_cs_data;
- adev->gfx.rlc.cp_table_size = CP_ME_TABLE_SIZE * 5 * 4;
+ adev->gfx.rlc.cp_table_size = ALIGN(CP_ME_TABLE_SIZE * 5 * 4, 2048); /* CP JT */
+ adev->gfx.rlc.cp_table_size += 64 * 1024; /* GDS */
src_ptr = adev->gfx.rlc.reg_list;
dws = adev->gfx.rlc.reg_list_size;
@@ -3356,7 +3335,7 @@ static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v7_0_select_se_sh(adev, i, j);
+ gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
for (k = 0; k < adev->usec_timeout; k++) {
if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0)
break;
@@ -3364,7 +3343,7 @@ static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
}
}
}
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
@@ -3411,7 +3390,7 @@ static u32 gfx_v7_0_halt_rlc(struct amdgpu_device *adev)
return orig;
}
-void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
+static void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
{
u32 tmp, i, mask;
@@ -3433,7 +3412,7 @@ void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
}
}
-void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
+static void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
{
u32 tmp;
@@ -3448,7 +3427,7 @@ void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
*
* Halt the RLC ME (MicroEngine) (CIK).
*/
-void gfx_v7_0_rlc_stop(struct amdgpu_device *adev)
+static void gfx_v7_0_rlc_stop(struct amdgpu_device *adev)
{
WREG32(mmRLC_CNTL, 0);
@@ -3524,7 +3503,7 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
WREG32(mmRLC_LB_CNTR_MAX, 0x00008000);
mutex_lock(&adev->grbm_idx_mutex);
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
WREG32(mmRLC_LB_INIT_CU_MASK, 0xffffffff);
WREG32(mmRLC_LB_PARAMS, 0x00600408);
WREG32(mmRLC_LB_CNTL, 0x80000004);
@@ -3564,7 +3543,7 @@ static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
tmp = gfx_v7_0_halt_rlc(adev);
mutex_lock(&adev->grbm_idx_mutex);
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
tmp2 = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK |
@@ -3615,7 +3594,7 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
tmp = gfx_v7_0_halt_rlc(adev);
mutex_lock(&adev->grbm_idx_mutex);
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK |
@@ -3666,7 +3645,7 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
tmp = gfx_v7_0_halt_rlc(adev);
mutex_lock(&adev->grbm_idx_mutex);
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_1_MASK;
@@ -3844,6 +3823,20 @@ static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev,
}
}
+static void gfx_v7_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
+ u32 bitmap)
+{
+ u32 data;
+
+ if (!bitmap)
+ return;
+
+ data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
+ data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
+
+ WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data);
+}
+
static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev)
{
u32 data, mask;
@@ -4100,7 +4093,7 @@ static void gfx_v7_0_fini_pg(struct amdgpu_device *adev)
* Fetches a GPU clock counter snapshot (SI).
* Returns the 64 bit clock counter snapshot.
*/
-uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev)
+static uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev)
{
uint64_t clock;
@@ -4160,12 +4153,24 @@ static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
}
+static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
+ .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
+ .select_se_sh = &gfx_v7_0_select_se_sh,
+};
+
+static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
+ .enter_safe_mode = gfx_v7_0_enter_rlc_safe_mode,
+ .exit_safe_mode = gfx_v7_0_exit_rlc_safe_mode
+};
+
static int gfx_v7_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->gfx.num_gfx_rings = GFX7_NUM_GFX_RINGS;
adev->gfx.num_compute_rings = GFX7_NUM_COMPUTE_RINGS;
+ adev->gfx.funcs = &gfx_v7_0_gfx_funcs;
+ adev->gfx.rlc.funcs = &gfx_v7_0_rlc_funcs;
gfx_v7_0_set_ring_funcs(adev);
gfx_v7_0_set_irq_funcs(adev);
gfx_v7_0_set_gds_init(adev);
@@ -5009,16 +5014,22 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
int i, j, k, counter, active_cu_number = 0;
u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
+ unsigned disable_masks[4 * 2];
memset(cu_info, 0, sizeof(*cu_info));
+ amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
+
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
mask = 1;
ao_bitmap = 0;
counter = 0;
- gfx_v7_0_select_se_sh(adev, i, j);
+ gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
+ if (i < 4 && j < 2)
+ gfx_v7_0_set_user_cu_inactive_bitmap(
+ adev, disable_masks[i * 2 + j]);
bitmap = gfx_v7_0_get_cu_active_bitmap(adev);
cu_info->bitmap[i][j] = bitmap;
@@ -5034,7 +5045,7 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
}
}
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
cu_info->number = active_cu_number;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h
index e747aa935..94e3ea147 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h
@@ -26,11 +26,4 @@
extern const struct amd_ip_funcs gfx_v7_0_ip_funcs;
-/* XXX these shouldn't be exported */
-void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev);
-void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev);
-void gfx_v7_0_rlc_stop(struct amdgpu_device *adev);
-uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev);
-void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num);
-
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 1127b2b62..1c8f2754f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -225,7 +225,8 @@ static const u32 tonga_mgcg_cgcg_init[] =
static const u32 golden_settings_polaris11_a11[] =
{
- mmCB_HW_CONTROL, 0xfffdf3cf, 0x00006208,
+ mmCB_HW_CONTROL, 0x0000f3cf, 0x00007208,
+ mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000,
mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
mmDB_DEBUG2, 0xf00fffff, 0x00000400,
mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
@@ -234,7 +235,7 @@ static const u32 golden_settings_polaris11_a11[] =
mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
- mmSQ_CONFIG, 0x07f80000, 0x07180000,
+ mmSQ_CONFIG, 0x07f80000, 0x01180000,
mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
mmTCC_CTRL, 0x00100000, 0xf31fff7f,
mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f3,
@@ -256,8 +257,8 @@ static const u32 polaris11_golden_common_all[] =
static const u32 golden_settings_polaris10_a11[] =
{
mmATC_MISC_CG, 0x000c0fc0, 0x000c0200,
- mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208,
- mmCB_HW_CONTROL_2, 0, 0x0f000000,
+ mmCB_HW_CONTROL, 0x0001f3cf, 0x00007208,
+ mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000,
mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
mmDB_DEBUG2, 0xf00fffff, 0x00000400,
mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
@@ -364,6 +365,7 @@ static const u32 golden_settings_iceland_a11[] =
mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x00000002,
mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
+ mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
mmTCC_CTRL, 0x00100000, 0xf31fff7f,
@@ -460,8 +462,10 @@ static const u32 cz_golden_settings_a11[] =
mmGB_GPU_ID, 0x0000000f, 0x00000000,
mmPA_SC_ENHANCE, 0xffffffff, 0x00000001,
mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
+ mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
mmTA_CNTL_AUX, 0x000f000f, 0x00010000,
+ mmTCC_CTRL, 0x00100000, 0xf31fff7f,
mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f3,
mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00001302
@@ -742,26 +746,25 @@ static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring)
return r;
}
-static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring)
+static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
struct fence *f = NULL;
uint32_t scratch;
uint32_t tmp = 0;
- unsigned i;
- int r;
+ long r;
r = amdgpu_gfx_scratch_get(adev, &scratch);
if (r) {
- DRM_ERROR("amdgpu: failed to get scratch reg (%d).\n", r);
+ DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
return r;
}
WREG32(scratch, 0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 256, &ib);
if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
goto err1;
}
ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
@@ -773,28 +776,25 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring)
if (r)
goto err2;
- r = fence_wait(f, false);
- if (r) {
- DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
+ r = fence_wait_timeout(f, false, timeout);
+ if (r == 0) {
+ DRM_ERROR("amdgpu: IB test timed out.\n");
+ r = -ETIMEDOUT;
goto err2;
- }
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(scratch);
- if (tmp == 0xDEADBEEF)
- break;
- DRM_UDELAY(1);
- }
- if (i < adev->usec_timeout) {
- DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
- ring->idx, i);
+ } else if (r < 0) {
+ DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err2;
+ }
+ tmp = RREG32(scratch);
+ if (tmp == 0xDEADBEEF) {
+ DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ r = 0;
} else {
DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
scratch, tmp);
r = -EINVAL;
}
err2:
- fence_put(f);
amdgpu_ib_free(adev, &ib, NULL);
fence_put(f);
err1:
@@ -1115,6 +1115,71 @@ static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
buffer[count++] = cpu_to_le32(0);
}
+static void cz_init_cp_jump_table(struct amdgpu_device *adev)
+{
+ const __le32 *fw_data;
+ volatile u32 *dst_ptr;
+ int me, i, max_me = 4;
+ u32 bo_offset = 0;
+ u32 table_offset, table_size;
+
+ if (adev->asic_type == CHIP_CARRIZO)
+ max_me = 5;
+
+ /* write the cp table buffer */
+ dst_ptr = adev->gfx.rlc.cp_table_ptr;
+ for (me = 0; me < max_me; me++) {
+ if (me == 0) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.ce_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 1) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.pfp_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 2) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.me_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 3) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.mec_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 4) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.mec2_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ }
+
+ for (i = 0; i < table_size; i ++) {
+ dst_ptr[bo_offset + i] =
+ cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
+ }
+
+ bo_offset += table_size;
+ }
+}
+
static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
{
int r;
@@ -1130,6 +1195,18 @@ static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
adev->gfx.rlc.clear_state_obj = NULL;
}
+
+ /* jump table block */
+ if (adev->gfx.rlc.cp_table_obj) {
+ r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
+ if (unlikely(r != 0))
+ dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
+ amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
+ amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
+
+ amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj);
+ adev->gfx.rlc.cp_table_obj = NULL;
+ }
}
static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
@@ -1186,6 +1263,46 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
}
+ if ((adev->asic_type == CHIP_CARRIZO) ||
+ (adev->asic_type == CHIP_STONEY)) {
+ adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
+ if (adev->gfx.rlc.cp_table_obj == NULL) {
+ r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ NULL, NULL,
+ &adev->gfx.rlc.cp_table_obj);
+ if (r) {
+ dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
+ return r;
+ }
+ }
+
+ r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
+ if (unlikely(r != 0)) {
+ dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
+ return r;
+ }
+ r = amdgpu_bo_pin(adev->gfx.rlc.cp_table_obj, AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.cp_table_gpu_addr);
+ if (r) {
+ amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
+ dev_warn(adev->dev, "(%d) pin RLC cp_table bo failed\n", r);
+ return r;
+ }
+ r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr);
+ if (r) {
+ dev_warn(adev->dev, "(%d) map RLC cp table bo failed\n", r);
+ return r;
+ }
+
+ cz_init_cp_jump_table(adev);
+
+ amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
+ amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
+
+ }
+
return 0;
}
@@ -1567,7 +1684,6 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
RREG32(sec_ded_counter_registers[i]);
fail:
- fence_put(f);
amdgpu_ib_free(adev, &ib, NULL);
fence_put(f);
@@ -3294,9 +3410,15 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
}
}
-void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num)
+static void gfx_v8_0_select_se_sh(struct amdgpu_device *adev,
+ u32 se_num, u32 sh_num, u32 instance)
{
- u32 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
+ u32 data;
+
+ if (instance == 0xffffffff)
+ data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
+ else
+ data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) {
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
@@ -3346,13 +3468,13 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v8_0_select_se_sh(adev, i, j);
+ gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
data = gfx_v8_0_get_rb_active_bitmap(adev);
active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
rb_bitmap_width_per_sh);
}
}
- gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
adev->gfx.config.backend_enable_mask = active_rbs;
@@ -3456,7 +3578,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
* making sure that the following register writes will be broadcasted
* to all the shaders
*/
- gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
WREG32(mmPA_SC_FIFO_SIZE,
(adev->gfx.config.sc_prim_fifo_size_frontend <<
@@ -3479,7 +3601,7 @@ static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v8_0_select_se_sh(adev, i, j);
+ gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
for (k = 0; k < adev->usec_timeout; k++) {
if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0)
break;
@@ -3487,7 +3609,7 @@ static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
}
}
}
- gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
@@ -3648,13 +3770,13 @@ static void gfx_v8_0_enable_save_restore_machine(struct amdgpu_device *adev)
WREG32(mmRLC_SRM_CNTL, data);
}
-static void polaris11_init_power_gating(struct amdgpu_device *adev)
+static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev)
{
uint32_t data;
if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
- AMD_PG_SUPPORT_GFX_SMG |
- AMD_PG_SUPPORT_GFX_DMG)) {
+ AMD_PG_SUPPORT_GFX_SMG |
+ AMD_PG_SUPPORT_GFX_DMG)) {
data = RREG32(mmCP_RB_WPTR_POLL_CNTL);
data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
@@ -3679,6 +3801,53 @@ static void polaris11_init_power_gating(struct amdgpu_device *adev)
}
}
+static void cz_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(mmRLC_PG_CNTL);
+
+ if (enable)
+ data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
+ else
+ data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
+
+ if (orig != data)
+ WREG32(mmRLC_PG_CNTL, data);
+}
+
+static void cz_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(mmRLC_PG_CNTL);
+
+ if (enable)
+ data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
+ else
+ data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
+
+ if (orig != data)
+ WREG32(mmRLC_PG_CNTL, data);
+}
+
+static void cz_enable_cp_power_gating(struct amdgpu_device *adev, bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(mmRLC_PG_CNTL);
+
+ if (enable)
+ data &= ~RLC_PG_CNTL__CP_PG_DISABLE_MASK;
+ else
+ data |= RLC_PG_CNTL__CP_PG_DISABLE_MASK;
+
+ if (orig != data)
+ WREG32(mmRLC_PG_CNTL, data);
+}
+
static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
{
if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
@@ -3691,8 +3860,25 @@ static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
gfx_v8_0_init_save_restore_list(adev);
gfx_v8_0_enable_save_restore_machine(adev);
- if (adev->asic_type == CHIP_POLARIS11)
- polaris11_init_power_gating(adev);
+ if ((adev->asic_type == CHIP_CARRIZO) ||
+ (adev->asic_type == CHIP_STONEY)) {
+ WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8);
+ gfx_v8_0_init_power_gating(adev);
+ WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
+ if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
+ cz_enable_sck_slow_down_on_power_up(adev, true);
+ cz_enable_sck_slow_down_on_power_down(adev, true);
+ } else {
+ cz_enable_sck_slow_down_on_power_up(adev, false);
+ cz_enable_sck_slow_down_on_power_down(adev, false);
+ }
+ if (adev->pg_flags & AMD_PG_SUPPORT_CP)
+ cz_enable_cp_power_gating(adev, true);
+ else
+ cz_enable_cp_power_gating(adev, false);
+ } else if (adev->asic_type == CHIP_POLARIS11) {
+ gfx_v8_0_init_power_gating(adev);
+ }
}
}
@@ -4931,7 +5117,7 @@ static int gfx_v8_0_soft_reset(void *handle)
* Fetches a GPU clock counter snapshot.
* Returns the 64 bit clock counter snapshot.
*/
-uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev)
+static uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev)
{
uint64_t clock;
@@ -4991,12 +5177,18 @@ static void gfx_v8_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
}
+static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = {
+ .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
+ .select_se_sh = &gfx_v8_0_select_se_sh,
+};
+
static int gfx_v8_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS;
adev->gfx.num_compute_rings = GFX8_NUM_COMPUTE_RINGS;
+ adev->gfx.funcs = &gfx_v8_0_gfx_funcs;
gfx_v8_0_set_ring_funcs(adev);
gfx_v8_0_set_irq_funcs(adev);
gfx_v8_0_set_gds_init(adev);
@@ -5029,51 +5221,43 @@ static int gfx_v8_0_late_init(void *handle)
return 0;
}
-static void polaris11_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
- bool enable)
+static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
+ bool enable)
{
uint32_t data, temp;
- /* Send msg to SMU via Powerplay */
- amdgpu_set_powergating_state(adev,
- AMD_IP_BLOCK_TYPE_SMC,
- enable ? AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE);
+ if (adev->asic_type == CHIP_POLARIS11)
+ /* Send msg to SMU via Powerplay */
+ amdgpu_set_powergating_state(adev,
+ AMD_IP_BLOCK_TYPE_SMC,
+ enable ?
+ AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE);
- if (enable) {
- /* Enable static MGPG */
- temp = data = RREG32(mmRLC_PG_CNTL);
+ temp = data = RREG32(mmRLC_PG_CNTL);
+ /* Enable static MGPG */
+ if (enable)
data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
-
- if (temp != data)
- WREG32(mmRLC_PG_CNTL, data);
- } else {
- temp = data = RREG32(mmRLC_PG_CNTL);
+ else
data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
- if (temp != data)
- WREG32(mmRLC_PG_CNTL, data);
- }
+ if (temp != data)
+ WREG32(mmRLC_PG_CNTL, data);
}
-static void polaris11_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
- bool enable)
+static void gfx_v8_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
+ bool enable)
{
uint32_t data, temp;
- if (enable) {
- /* Enable dynamic MGPG */
- temp = data = RREG32(mmRLC_PG_CNTL);
+ temp = data = RREG32(mmRLC_PG_CNTL);
+ /* Enable dynamic MGPG */
+ if (enable)
data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
-
- if (temp != data)
- WREG32(mmRLC_PG_CNTL, data);
- } else {
- temp = data = RREG32(mmRLC_PG_CNTL);
+ else
data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
- if (temp != data)
- WREG32(mmRLC_PG_CNTL, data);
- }
+ if (temp != data)
+ WREG32(mmRLC_PG_CNTL, data);
}
static void polaris11_enable_gfx_quick_mg_power_gating(struct amdgpu_device *adev,
@@ -5081,19 +5265,63 @@ static void polaris11_enable_gfx_quick_mg_power_gating(struct amdgpu_device *ade
{
uint32_t data, temp;
- if (enable) {
- /* Enable quick PG */
- temp = data = RREG32(mmRLC_PG_CNTL);
- data |= 0x100000;
+ temp = data = RREG32(mmRLC_PG_CNTL);
+ /* Enable quick PG */
+ if (enable)
+ data |= RLC_PG_CNTL__QUICK_PG_ENABLE_MASK;
+ else
+ data &= ~RLC_PG_CNTL__QUICK_PG_ENABLE_MASK;
- if (temp != data)
- WREG32(mmRLC_PG_CNTL, data);
- } else {
- temp = data = RREG32(mmRLC_PG_CNTL);
- data &= ~0x100000;
+ if (temp != data)
+ WREG32(mmRLC_PG_CNTL, data);
+}
- if (temp != data)
- WREG32(mmRLC_PG_CNTL, data);
+static void cz_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(mmRLC_PG_CNTL);
+
+ if (enable)
+ data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
+ else
+ data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
+
+ if (orig != data)
+ WREG32(mmRLC_PG_CNTL, data);
+}
+
+static void cz_enable_gfx_pipeline_power_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(mmRLC_PG_CNTL);
+
+ if (enable)
+ data |= RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK;
+ else
+ data &= ~RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK;
+
+ if (orig != data)
+ WREG32(mmRLC_PG_CNTL, data);
+
+ /* Read any GFX register to wake up GFX. */
+ if (!enable)
+ data = RREG32(mmDB_RENDER_CONTROL);
+}
+
+static void cz_update_gfx_cg_power_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
+ cz_enable_gfx_cg_power_gating(adev, true);
+ if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
+ cz_enable_gfx_pipeline_power_gating(adev, true);
+ } else {
+ cz_enable_gfx_cg_power_gating(adev, false);
+ cz_enable_gfx_pipeline_power_gating(adev, false);
}
}
@@ -5101,21 +5329,42 @@ static int gfx_v8_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
if (!(adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
return 0;
switch (adev->asic_type) {
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)
+ cz_update_gfx_cg_power_gating(adev, enable);
+
+ if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
+ gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
+ else
+ gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false);
+
+ if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
+ gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true);
+ else
+ gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false);
+ break;
case CHIP_POLARIS11:
- if (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG)
- polaris11_enable_gfx_static_mg_power_gating(adev,
- state == AMD_PG_STATE_GATE ? true : false);
- else if (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG)
- polaris11_enable_gfx_dynamic_mg_power_gating(adev,
- state == AMD_PG_STATE_GATE ? true : false);
+ if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
+ gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
+ else
+ gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false);
+
+ if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
+ gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true);
+ else
+ gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false);
+
+ if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_QUICK_MG) && enable)
+ polaris11_enable_gfx_quick_mg_power_gating(adev, true);
else
- polaris11_enable_gfx_quick_mg_power_gating(adev,
- state == AMD_PG_STATE_GATE ? true : false);
+ polaris11_enable_gfx_quick_mg_power_gating(adev, false);
break;
default:
break;
@@ -5129,7 +5378,7 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev,
{
uint32_t data;
- gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
@@ -5517,6 +5766,8 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
WREG32(mmRLC_CGCG_CGLS_CTRL, data);
}
+ gfx_v8_0_wait_for_rlc_serdes(adev);
+
adev->gfx.rlc.funcs->exit_safe_mode(adev);
}
static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
@@ -5642,17 +5893,6 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
unsigned vm_id, bool ctx_switch)
{
u32 header, control = 0;
- u32 next_rptr = ring->wptr + 5;
-
- if (ctx_switch)
- next_rptr += 2;
-
- next_rptr += 4;
- amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
- amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
- amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
- amdgpu_ring_write(ring, next_rptr);
/* insert SWITCH_BUFFER packet before first IB in the ring frame */
if (ctx_switch) {
@@ -5681,23 +5921,9 @@ static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch)
{
- u32 header, control = 0;
- u32 next_rptr = ring->wptr + 5;
-
- control |= INDIRECT_BUFFER_VALID;
-
- next_rptr += 4;
- amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
- amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
- amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
- amdgpu_ring_write(ring, next_rptr);
+ u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24);
- header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
-
- control |= ib->length_dw | (vm_id << 24);
-
- amdgpu_ring_write(ring, header);
+ amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
amdgpu_ring_write(ring,
#ifdef __BIG_ENDIAN
(2 << 0) |
@@ -6150,9 +6376,9 @@ static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_TOPAZ:
- case CHIP_STONEY:
adev->gfx.rlc.funcs = &iceland_rlc_funcs;
break;
+ case CHIP_STONEY:
case CHIP_CARRIZO:
adev->gfx.rlc.funcs = &cz_rlc_funcs;
break;
@@ -6190,6 +6416,20 @@ static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev)
}
}
+static void gfx_v8_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
+ u32 bitmap)
+{
+ u32 data;
+
+ if (!bitmap)
+ return;
+
+ data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
+ data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
+
+ WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data);
+}
+
static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev)
{
u32 data, mask;
@@ -6210,16 +6450,22 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
int i, j, k, counter, active_cu_number = 0;
u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
+ unsigned disable_masks[4 * 2];
memset(cu_info, 0, sizeof(*cu_info));
+ amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
+
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
mask = 1;
ao_bitmap = 0;
counter = 0;
- gfx_v8_0_select_se_sh(adev, i, j);
+ gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
+ if (i < 4 && j < 2)
+ gfx_v8_0_set_user_cu_inactive_bitmap(
+ adev, disable_masks[i * 2 + j]);
bitmap = gfx_v8_0_get_cu_active_bitmap(adev);
cu_info->bitmap[i][j] = bitmap;
@@ -6235,7 +6481,7 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
}
}
- gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
cu_info->number = active_cu_number;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
index 16a49f53a..bc82c7943 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
@@ -26,7 +26,6 @@
extern const struct amd_ip_funcs gfx_v8_0_ip_funcs;
-uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev);
void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index e05402728..c2b554c5c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -39,6 +39,7 @@
static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev);
static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
+static int gmc_v7_0_wait_for_idle(void *handle);
/*(DEBLOBBED)*/
@@ -71,39 +72,15 @@ static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
}
}
-/**
- * gmc7_mc_wait_for_idle - wait for MC idle callback.
- *
- * @adev: amdgpu_device pointer
- *
- * Wait for the MC (memory controller) to be idle.
- * (evergreen+).
- * Returns 0 if the MC is idle, -1 if not.
- */
-int gmc_v7_0_mc_wait_for_idle(struct amdgpu_device *adev)
-{
- unsigned i;
- u32 tmp;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- /* read MC_STATUS */
- tmp = RREG32(mmSRBM_STATUS) & 0x1F00;
- if (!tmp)
- return 0;
- udelay(1);
- }
- return -1;
-}
-
-void gmc_v7_0_mc_stop(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
+static void gmc_v7_0_mc_stop(struct amdgpu_device *adev,
+ struct amdgpu_mode_mc_save *save)
{
u32 blackout;
if (adev->mode_info.num_crtc)
amdgpu_display_stop_mc_access(adev, save);
- amdgpu_asic_wait_for_mc_idle(adev);
+ gmc_v7_0_wait_for_idle((void *)adev);
blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
@@ -118,8 +95,8 @@ void gmc_v7_0_mc_stop(struct amdgpu_device *adev,
udelay(100);
}
-void gmc_v7_0_mc_resume(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
+static void gmc_v7_0_mc_resume(struct amdgpu_device *adev,
+ struct amdgpu_mode_mc_save *save)
{
u32 tmp;
@@ -310,7 +287,7 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
amdgpu_display_set_vga_render_state(adev, false);
gmc_v7_0_mc_stop(adev, &save);
- if (amdgpu_asic_wait_for_mc_idle(adev)) {
+ if (gmc_v7_0_wait_for_idle((void *)adev)) {
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
}
/* Update configuration */
@@ -330,7 +307,7 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
WREG32(mmMC_VM_AGP_BASE, 0);
WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
- if (amdgpu_asic_wait_for_mc_idle(adev)) {
+ if (gmc_v7_0_wait_for_idle((void *)adev)) {
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
}
gmc_v7_0_mc_resume(adev, &save);
@@ -1136,7 +1113,7 @@ static int gmc_v7_0_soft_reset(void *handle)
if (srbm_soft_reset) {
gmc_v7_0_mc_stop(adev, &save);
- if (gmc_v7_0_wait_for_idle(adev)) {
+ if (gmc_v7_0_wait_for_idle((void *)adev)) {
dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h
index 36fcbbc46..0b386b5d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h
@@ -26,11 +26,4 @@
extern const struct amd_ip_funcs gmc_v7_0_ip_funcs;
-/* XXX these shouldn't be exported */
-void gmc_v7_0_mc_stop(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save);
-void gmc_v7_0_mc_resume(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save);
-int gmc_v7_0_mc_wait_for_idle(struct amdgpu_device *adev);
-
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index a7b6de8a1..34695e2e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -41,6 +41,7 @@
static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
+static int gmc_v8_0_wait_for_idle(void *handle);
/*(DEBLOBBED)*/
@@ -100,6 +101,11 @@ static const u32 stoney_mgcg_cgcg_init[] =
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
};
+static const u32 golden_settings_stoney_common[] =
+{
+ mmMC_HUB_RDREQ_UVD, MC_HUB_RDREQ_UVD__PRESCALE_MASK, 0x00000004,
+ mmMC_RD_GRP_OTH, MC_RD_GRP_OTH__UVD_MASK, 0x00600000
+};
static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
{
@@ -139,50 +145,24 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
amdgpu_program_register_sequence(adev,
stoney_mgcg_cgcg_init,
(const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
+ amdgpu_program_register_sequence(adev,
+ golden_settings_stoney_common,
+ (const u32)ARRAY_SIZE(golden_settings_stoney_common));
break;
default:
break;
}
}
-/**
- * gmc8_mc_wait_for_idle - wait for MC idle callback.
- *
- * @adev: amdgpu_device pointer
- *
- * Wait for the MC (memory controller) to be idle.
- * (evergreen+).
- * Returns 0 if the MC is idle, -1 if not.
- */
-int gmc_v8_0_mc_wait_for_idle(struct amdgpu_device *adev)
-{
- unsigned i;
- u32 tmp;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- /* read MC_STATUS */
- tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__VMC_BUSY_MASK |
- SRBM_STATUS__MCB_BUSY_MASK |
- SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
- SRBM_STATUS__MCC_BUSY_MASK |
- SRBM_STATUS__MCD_BUSY_MASK |
- SRBM_STATUS__VMC1_BUSY_MASK);
- if (!tmp)
- return 0;
- udelay(1);
- }
- return -1;
-}
-
-void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
+static void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
+ struct amdgpu_mode_mc_save *save)
{
u32 blackout;
if (adev->mode_info.num_crtc)
amdgpu_display_stop_mc_access(adev, save);
- amdgpu_asic_wait_for_mc_idle(adev);
+ gmc_v8_0_wait_for_idle(adev);
blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
@@ -197,8 +177,8 @@ void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
udelay(100);
}
-void gmc_v8_0_mc_resume(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
+static void gmc_v8_0_mc_resume(struct amdgpu_device *adev,
+ struct amdgpu_mode_mc_save *save)
{
u32 tmp;
@@ -391,7 +371,7 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
amdgpu_display_set_vga_render_state(adev, false);
gmc_v8_0_mc_stop(adev, &save);
- if (amdgpu_asic_wait_for_mc_idle(adev)) {
+ if (gmc_v8_0_wait_for_idle((void *)adev)) {
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
}
/* Update configuration */
@@ -411,7 +391,7 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
WREG32(mmMC_VM_AGP_BASE, 0);
WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
- if (amdgpu_asic_wait_for_mc_idle(adev)) {
+ if (gmc_v8_0_wait_for_idle((void *)adev)) {
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
}
gmc_v8_0_mc_resume(adev, &save);
@@ -1138,7 +1118,7 @@ static int gmc_v8_0_soft_reset(void *handle)
if (srbm_soft_reset) {
gmc_v8_0_mc_stop(adev, &save);
- if (gmc_v8_0_wait_for_idle(adev)) {
+ if (gmc_v8_0_wait_for_idle((void *)adev)) {
dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h
index 973436086..fc5001a81 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h
@@ -26,11 +26,4 @@
extern const struct amd_ip_funcs gmc_v8_0_ip_funcs;
-/* XXX these shouldn't be exported */
-void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save);
-void gmc_v8_0_mc_resume(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save);
-int gmc_v8_0_mc_wait_for_idle(struct amdgpu_device *adev);
-
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
index 571e37566..90b4bcc72 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
@@ -24,7 +24,7 @@
#include <linux/firmware.h>
#include "drmP.h"
#include "amdgpu.h"
-#include "iceland_smumgr.h"
+#include "iceland_smum.h"
/*(DEBLOBBED)*/
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
index 52ee08193..211839913 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
@@ -25,7 +25,7 @@
#include "drmP.h"
#include "amdgpu.h"
#include "ppsmc.h"
-#include "iceland_smumgr.h"
+#include "iceland_smum.h"
#include "smu_ucode_xfer_vi.h"
#include "amdgpu_ucode.h"
@@ -211,7 +211,7 @@ static int iceland_send_msg_to_smc_without_waiting(struct amdgpu_device *adev,
PPSMC_Msg msg)
{
if (!iceland_is_smc_ram_running(adev))
- return -EINVAL;;
+ return -EINVAL;
if (wait_smu_response(adev)) {
DRM_ERROR("Failed to send previous message\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smum.h b/drivers/gpu/drm/amd/amdgpu/iceland_smum.h
new file mode 100644
index 000000000..5983e3150
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_smum.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef ICELAND_SMUM_H
+#define ICELAND_SMUM_H
+
+#include "ppsmc.h"
+
+extern int iceland_smu_init(struct amdgpu_device *adev);
+extern int iceland_smu_fini(struct amdgpu_device *adev);
+extern int iceland_smu_start(struct amdgpu_device *adev);
+
+struct iceland_smu_private_data
+{
+ uint8_t *header;
+ uint8_t *mec_image;
+ uint32_t header_addr_high;
+ uint32_t header_addr_low;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index a789a863d..a845e883f 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -191,6 +191,7 @@ static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev,
vid_mapping_table->num_entries = i;
}
+#if 0
static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] =
{
{ 0, 4, 1 },
@@ -289,6 +290,7 @@ static const struct kv_lcac_config_reg cpl_cac_config_reg[] =
{
{ 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
};
+#endif
static const struct kv_pt_config_reg didt_config_kv[] =
{
@@ -507,19 +509,19 @@ static int kv_enable_didt(struct amdgpu_device *adev, bool enable)
pi->caps_db_ramping ||
pi->caps_td_ramping ||
pi->caps_tcp_ramping) {
- gfx_v7_0_enter_rlc_safe_mode(adev);
+ adev->gfx.rlc.funcs->enter_safe_mode(adev);
if (enable) {
ret = kv_program_pt_config_registers(adev, didt_config_kv);
if (ret) {
- gfx_v7_0_exit_rlc_safe_mode(adev);
+ adev->gfx.rlc.funcs->exit_safe_mode(adev);
return ret;
}
}
kv_do_enable_didt(adev, enable);
- gfx_v7_0_exit_rlc_safe_mode(adev);
+ adev->gfx.rlc.funcs->exit_safe_mode(adev);
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/ppsmc.h b/drivers/gpu/drm/amd/amdgpu/ppsmc.h
index 7837f2ecc..8463245f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/ppsmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/ppsmc.h
@@ -90,7 +90,9 @@ typedef uint8_t PPSMC_Result;
#define PPSMC_StartFanControl ((uint8_t)0x5B)
#define PPSMC_StopFanControl ((uint8_t)0x5C)
#define PPSMC_MSG_NoDisplay ((uint8_t)0x5D)
+#define PPSMC_NoDisplay ((uint8_t)0x5D)
#define PPSMC_MSG_HasDisplay ((uint8_t)0x5E)
+#define PPSMC_HasDisplay ((uint8_t)0x5E)
#define PPSMC_MSG_UVDPowerOFF ((uint8_t)0x60)
#define PPSMC_MSG_UVDPowerON ((uint8_t)0x61)
#define PPSMC_MSG_EnableULV ((uint8_t)0x62)
@@ -108,6 +110,7 @@ typedef uint8_t PPSMC_Result;
#define PPSMC_MSG_DisableDTE ((uint8_t)0x88)
#define PPSMC_MSG_ThrottleOVRDSCLKDS ((uint8_t)0x96)
#define PPSMC_MSG_CancelThrottleOVRDSCLKDS ((uint8_t)0x97)
+#define PPSMC_MSG_EnableACDCGPIOInterrupt ((uint16_t) 0x149)
/* CI/KV/KB */
#define PPSMC_MSG_UVDDPM_SetEnabledMask ((uint16_t) 0x12D)
@@ -161,6 +164,7 @@ typedef uint8_t PPSMC_Result;
#define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190)
#define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191)
#define PPSMC_MSG_SetFanPwmMax ((uint16_t) 0x19A)
+#define PPSMC_MSG_SetFanRpmMax ((uint16_t) 0x205)
#define PPSMC_MSG_ENABLE_THERMAL_DPM ((uint16_t) 0x19C)
#define PPSMC_MSG_DISABLE_THERMAL_DPM ((uint16_t) 0x19D)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 36d97195f..f6e37bdc1 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -254,19 +254,6 @@ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
unsigned vm_id, bool ctx_switch)
{
u32 vmid = vm_id & 0xf;
- u32 next_rptr = ring->wptr + 5;
-
- while ((next_rptr & 7) != 2)
- next_rptr++;
-
- next_rptr += 6;
-
- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
- SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
- amdgpu_ring_write(ring, lower_32_bits(ring->next_rptr_gpu_addr) & 0xfffffffc);
- amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
- amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
- amdgpu_ring_write(ring, next_rptr);
/* IB packet must end on a 8 DW boundary */
sdma_v2_4_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8);
@@ -405,7 +392,7 @@ static void sdma_v2_4_enable(struct amdgpu_device *adev, bool enable)
u32 f32_cntl;
int i;
- if (enable == false) {
+ if (!enable) {
sdma_v2_4_gfx_stop(adev);
sdma_v2_4_rlc_stop(adev);
}
@@ -579,19 +566,21 @@ static int sdma_v2_4_start(struct amdgpu_device *adev)
{
int r;
- if (!adev->firmware.smu_load) {
- r = sdma_v2_4_load_microcode(adev);
- if (r)
- return r;
- } else {
- r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
- AMDGPU_UCODE_ID_SDMA0);
- if (r)
- return -EINVAL;
- r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
- AMDGPU_UCODE_ID_SDMA1);
- if (r)
- return -EINVAL;
+ if (!adev->pp_enabled) {
+ if (!adev->firmware.smu_load) {
+ r = sdma_v2_4_load_microcode(adev);
+ if (r)
+ return r;
+ } else {
+ r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
+ AMDGPU_UCODE_ID_SDMA0);
+ if (r)
+ return -EINVAL;
+ r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
+ AMDGPU_UCODE_ID_SDMA1);
+ if (r)
+ return -EINVAL;
+ }
}
/* halt the engine before programing */
@@ -678,20 +667,19 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
* Test a simple IB in the DMA ring (VI).
* Returns 0 on success, error on failure.
*/
-static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
+static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
struct fence *f = NULL;
- unsigned i;
unsigned index;
- int r;
u32 tmp = 0;
u64 gpu_addr;
+ long r;
r = amdgpu_wb_get(adev, &index);
if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
return r;
}
@@ -701,7 +689,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 256, &ib);
if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
goto err0;
}
@@ -720,28 +708,25 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
if (r)
goto err1;
- r = fence_wait(f, false);
- if (r) {
- DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
+ r = fence_wait_timeout(f, false, timeout);
+ if (r == 0) {
+ DRM_ERROR("amdgpu: IB test timed out\n");
+ r = -ETIMEDOUT;
goto err1;
- }
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = le32_to_cpu(adev->wb.wb[index]);
- if (tmp == 0xDEADBEEF)
- break;
- DRM_UDELAY(1);
- }
- if (i < adev->usec_timeout) {
- DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
- ring->idx, i);
+ } else if (r < 0) {
+ DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err1;
+ }
+ tmp = le32_to_cpu(adev->wb.wb[index]);
+ if (tmp == 0xDEADBEEF) {
+ DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ r = 0;
} else {
DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
r = -EINVAL;
}
err1:
- fence_put(f);
amdgpu_ib_free(adev, &ib, NULL);
fence_put(f);
err0:
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 95c44942e..82bab2fb4 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -405,18 +405,6 @@ static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
unsigned vm_id, bool ctx_switch)
{
u32 vmid = vm_id & 0xf;
- u32 next_rptr = ring->wptr + 5;
-
- while ((next_rptr & 7) != 2)
- next_rptr++;
- next_rptr += 6;
-
- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
- SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
- amdgpu_ring_write(ring, lower_32_bits(ring->next_rptr_gpu_addr) & 0xfffffffc);
- amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
- amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
- amdgpu_ring_write(ring, next_rptr);
/* IB packet must end on a 8 DW boundary */
sdma_v3_0_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8);
@@ -606,7 +594,7 @@ static void sdma_v3_0_enable(struct amdgpu_device *adev, bool enable)
u32 f32_cntl;
int i;
- if (enable == false) {
+ if (!enable) {
sdma_v3_0_gfx_stop(adev);
sdma_v3_0_rlc_stop(adev);
}
@@ -898,20 +886,19 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring)
* Test a simple IB in the DMA ring (VI).
* Returns 0 on success, error on failure.
*/
-static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
+static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
struct fence *f = NULL;
- unsigned i;
unsigned index;
- int r;
u32 tmp = 0;
u64 gpu_addr;
+ long r;
r = amdgpu_wb_get(adev, &index);
if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
return r;
}
@@ -921,7 +908,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 256, &ib);
if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
+ DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
goto err0;
}
@@ -940,27 +927,24 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
if (r)
goto err1;
- r = fence_wait(f, false);
- if (r) {
- DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
+ r = fence_wait_timeout(f, false, timeout);
+ if (r == 0) {
+ DRM_ERROR("amdgpu: IB test timed out\n");
+ r = -ETIMEDOUT;
goto err1;
- }
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = le32_to_cpu(adev->wb.wb[index]);
- if (tmp == 0xDEADBEEF)
- break;
- DRM_UDELAY(1);
- }
- if (i < adev->usec_timeout) {
- DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
- ring->idx, i);
+ } else if (r < 0) {
+ DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err1;
+ }
+ tmp = le32_to_cpu(adev->wb.wb[index]);
+ if (tmp == 0xDEADBEEF) {
+ DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ r = 0;
} else {
DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
r = -EINVAL;
}
err1:
- fence_put(f);
amdgpu_ib_free(adev, &ib, NULL);
fence_put(f);
err0:
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
index 083893dd6..940de1836 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
@@ -173,7 +173,7 @@ static int tonga_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
{
if (!tonga_is_smc_ram_running(adev))
{
- return -EINVAL;;
+ return -EINVAL;
}
if (wait_smu_response(adev)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index f07551476..132e613ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -34,6 +34,8 @@
#include "oss/oss_2_0_d.h"
#include "oss/oss_2_0_sh_mask.h"
+#include "bif/bif_4_1_d.h"
+
static void uvd_v4_2_mc_resume(struct amdgpu_device *adev);
static void uvd_v4_2_init_cg(struct amdgpu_device *adev);
static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
@@ -439,6 +441,32 @@ static void uvd_v4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
}
/**
+ * uvd_v4_2_ring_emit_hdp_flush - emit an hdp flush
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Emits an hdp flush.
+ */
+static void uvd_v4_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
+ amdgpu_ring_write(ring, 0);
+}
+
+/**
+ * uvd_v4_2_ring_hdp_invalidate - emit an hdp invalidate
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Emits an hdp invalidate.
+ */
+static void uvd_v4_2_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
+ amdgpu_ring_write(ring, 1);
+}
+
+/**
* uvd_v4_2_ring_test_ring - register write test
*
* @ring: amdgpu_ring pointer
@@ -499,49 +527,6 @@ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
}
/**
- * uvd_v4_2_ring_test_ib - test ib execution
- *
- * @ring: amdgpu_ring pointer
- *
- * Test if we can successfully execute an IB
- */
-static int uvd_v4_2_ring_test_ib(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
- struct fence *fence = NULL;
- int r;
-
- r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
- if (r) {
- DRM_ERROR("amdgpu: failed to raise UVD clocks (%d).\n", r);
- return r;
- }
-
- r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
- if (r) {
- DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r);
- goto error;
- }
-
- r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
- if (r) {
- DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
- goto error;
- }
-
- r = fence_wait(fence, false);
- if (r) {
- DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
- goto error;
- }
- DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
-error:
- fence_put(fence);
- amdgpu_asic_set_uvd_clocks(adev, 0, 0);
- return r;
-}
-
-/**
* uvd_v4_2_mc_resume - memory controller programming
*
* @adev: amdgpu_device pointer
@@ -763,10 +748,14 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
.parse_cs = amdgpu_uvd_ring_parse_cs,
.emit_ib = uvd_v4_2_ring_emit_ib,
.emit_fence = uvd_v4_2_ring_emit_fence,
+ .emit_hdp_flush = uvd_v4_2_ring_emit_hdp_flush,
+ .emit_hdp_invalidate = uvd_v4_2_ring_emit_hdp_invalidate,
.test_ring = uvd_v4_2_ring_test_ring,
- .test_ib = uvd_v4_2_ring_test_ib,
+ .test_ib = amdgpu_uvd_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_uvd_ring_begin_use,
+ .end_use = amdgpu_uvd_ring_end_use,
};
static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index e0a76a883..101de136b 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -31,6 +31,7 @@
#include "uvd/uvd_5_0_sh_mask.h"
#include "oss/oss_2_0_d.h"
#include "oss/oss_2_0_sh_mask.h"
+#include "bif/bif_5_0_d.h"
#include "vi.h"
static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
@@ -489,6 +490,32 @@ static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
}
/**
+ * uvd_v5_0_ring_emit_hdp_flush - emit an hdp flush
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Emits an hdp flush.
+ */
+static void uvd_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
+ amdgpu_ring_write(ring, 0);
+}
+
+/**
+ * uvd_v5_0_ring_hdp_invalidate - emit an hdp invalidate
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Emits an hdp invalidate.
+ */
+static void uvd_v5_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
+ amdgpu_ring_write(ring, 1);
+}
+
+/**
* uvd_v5_0_ring_test_ring - register write test
*
* @ring: amdgpu_ring pointer
@@ -550,49 +577,6 @@ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, ib->length_dw);
}
-/**
- * uvd_v5_0_ring_test_ib - test ib execution
- *
- * @ring: amdgpu_ring pointer
- *
- * Test if we can successfully execute an IB
- */
-static int uvd_v5_0_ring_test_ib(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
- struct fence *fence = NULL;
- int r;
-
- r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
- if (r) {
- DRM_ERROR("amdgpu: failed to raise UVD clocks (%d).\n", r);
- return r;
- }
-
- r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
- if (r) {
- DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r);
- goto error;
- }
-
- r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
- if (r) {
- DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
- goto error;
- }
-
- r = fence_wait(fence, false);
- if (r) {
- DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
- goto error;
- }
- DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
-error:
- fence_put(fence);
- amdgpu_asic_set_uvd_clocks(adev, 0, 0);
- return r;
-}
-
static bool uvd_v5_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -815,10 +799,14 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
.parse_cs = amdgpu_uvd_ring_parse_cs,
.emit_ib = uvd_v5_0_ring_emit_ib,
.emit_fence = uvd_v5_0_ring_emit_fence,
+ .emit_hdp_flush = uvd_v5_0_ring_emit_hdp_flush,
+ .emit_hdp_invalidate = uvd_v5_0_ring_emit_hdp_invalidate,
.test_ring = uvd_v5_0_ring_test_ring,
- .test_ib = uvd_v5_0_ring_test_ib,
+ .test_ib = amdgpu_uvd_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_uvd_ring_begin_use,
+ .end_use = amdgpu_uvd_ring_end_use,
};
static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index c9929d665..7f21102bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -33,6 +33,8 @@
#include "oss/oss_2_0_sh_mask.h"
#include "smu/smu_7_1_3_d.h"
#include "smu/smu_7_1_3_sh_mask.h"
+#include "bif/bif_5_1_d.h"
+#include "gmc/gmc_8_1_d.h"
#include "vi.h"
static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
@@ -385,8 +387,8 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
uint32_t mp_swap_cntl;
int i, j, r;
- /*disable DPG */
- WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2));
+ /* disable DPG */
+ WREG32_P(mmUVD_POWER_STATUS, 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
/* disable byte swapping */
lmi_swap_cntl = 0;
@@ -405,17 +407,21 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
}
/* disable interupt */
- WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
+ WREG32_P(mmUVD_MASTINT_EN, 0, ~UVD_MASTINT_EN__VCPU_EN_MASK);
/* stall UMC and register bus before resetting VCPU */
- WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
+ WREG32_P(mmUVD_LMI_CTRL2, UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
mdelay(1);
/* put LMI, VCPU, RBC etc... into reset */
- WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
- UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
- UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
- UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
+ WREG32(mmUVD_SOFT_RESET,
+ UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
+ UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
mdelay(5);
@@ -424,8 +430,13 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
mdelay(5);
/* initialize UVD memory controller */
- WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
- (1 << 21) | (1 << 9) | (1 << 20));
+ WREG32(mmUVD_LMI_CTRL,
+ (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
+ UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+ UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
+ UVD_LMI_CTRL__REQ_MODE_MASK |
+ UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK);
#ifdef __BIG_ENDIAN
/* swap (8 in 32) RB and IB */
@@ -447,10 +458,10 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
mdelay(5);
/* enable VCPU clock */
- WREG32(mmUVD_VCPU_CNTL, 1 << 9);
+ WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
/* enable UMC */
- WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
+ WREG32_P(mmUVD_LMI_CTRL2, 0, ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
/* boot up the VCPU */
WREG32(mmUVD_SOFT_RESET, 0);
@@ -484,10 +495,12 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
return r;
}
/* enable master interrupt */
- WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1));
+ WREG32_P(mmUVD_MASTINT_EN,
+ (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
+ ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
/* clear the bit 4 of UVD_STATUS */
- WREG32_P(mmUVD_STATUS, 0, ~(2 << 1));
+ WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
rb_bufsz = order_base_2(ring->ring_size);
tmp = 0;
@@ -581,6 +594,32 @@ static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
}
/**
+ * uvd_v6_0_ring_emit_hdp_flush - emit an hdp flush
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Emits an hdp flush.
+ */
+static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
+ amdgpu_ring_write(ring, 0);
+}
+
+/**
+ * uvd_v6_0_ring_hdp_invalidate - emit an hdp invalidate
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Emits an hdp invalidate.
+ */
+static void uvd_v6_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
+ amdgpu_ring_write(ring, 1);
+}
+
+/**
* uvd_v6_0_ring_test_ring - register write test
*
* @ring: amdgpu_ring pointer
@@ -634,6 +673,9 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch)
{
+ amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
+ amdgpu_ring_write(ring, vm_id);
+
amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
@@ -642,39 +684,55 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, ib->length_dw);
}
-/**
- * uvd_v6_0_ring_test_ib - test ib execution
- *
- * @ring: amdgpu_ring pointer
- *
- * Test if we can successfully execute an IB
- */
-static int uvd_v6_0_ring_test_ib(struct amdgpu_ring *ring)
+static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned vm_id, uint64_t pd_addr)
{
- struct fence *fence = NULL;
- int r;
+ uint32_t reg;
- r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
- if (r) {
- DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r);
- goto error;
- }
+ if (vm_id < 8)
+ reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id;
+ else
+ reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8;
- r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
- if (r) {
- DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
- goto error;
- }
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
+ amdgpu_ring_write(ring, reg << 2);
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
+ amdgpu_ring_write(ring, pd_addr >> 12);
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
+ amdgpu_ring_write(ring, 0x8);
- r = fence_wait(fence, false);
- if (r) {
- DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
- goto error;
- }
- DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
-error:
- fence_put(fence);
- return r;
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
+ amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
+ amdgpu_ring_write(ring, 1 << vm_id);
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
+ amdgpu_ring_write(ring, 0x8);
+
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
+ amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
+ amdgpu_ring_write(ring, 1 << vm_id); /* mask */
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
+ amdgpu_ring_write(ring, 0xC);
+}
+
+static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+ uint32_t seq = ring->fence_drv.sync_seq;
+ uint64_t addr = ring->fence_drv.gpu_addr;
+
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
+ amdgpu_ring_write(ring, 0xffffffff); /* mask */
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0));
+ amdgpu_ring_write(ring, seq);
+ amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
+ amdgpu_ring_write(ring, 0xE);
}
static bool uvd_v6_0_is_idle(void *handle)
@@ -847,7 +905,8 @@ static int uvd_v6_0_set_clockgating_state(void *handle,
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
static int curstate = -1;
- if (adev->asic_type == CHIP_FIJI)
+ if (adev->asic_type == CHIP_FIJI ||
+ adev->asic_type == CHIP_POLARIS10)
uvd_v6_set_bypass_mode(adev, enable);
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
@@ -912,22 +971,51 @@ const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
.set_powergating_state = uvd_v6_0_set_powergating_state,
};
-static const struct amdgpu_ring_funcs uvd_v6_0_ring_funcs = {
+static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
.get_rptr = uvd_v6_0_ring_get_rptr,
.get_wptr = uvd_v6_0_ring_get_wptr,
.set_wptr = uvd_v6_0_ring_set_wptr,
.parse_cs = amdgpu_uvd_ring_parse_cs,
.emit_ib = uvd_v6_0_ring_emit_ib,
.emit_fence = uvd_v6_0_ring_emit_fence,
+ .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
+ .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate,
+ .test_ring = uvd_v6_0_ring_test_ring,
+ .test_ib = amdgpu_uvd_ring_test_ib,
+ .insert_nop = amdgpu_ring_insert_nop,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_uvd_ring_begin_use,
+ .end_use = amdgpu_uvd_ring_end_use,
+};
+
+static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
+ .get_rptr = uvd_v6_0_ring_get_rptr,
+ .get_wptr = uvd_v6_0_ring_get_wptr,
+ .set_wptr = uvd_v6_0_ring_set_wptr,
+ .parse_cs = NULL,
+ .emit_ib = uvd_v6_0_ring_emit_ib,
+ .emit_fence = uvd_v6_0_ring_emit_fence,
+ .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
+ .emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync,
+ .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
+ .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate,
.test_ring = uvd_v6_0_ring_test_ring,
- .test_ib = uvd_v6_0_ring_test_ib,
+ .test_ib = amdgpu_uvd_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_uvd_ring_begin_use,
+ .end_use = amdgpu_uvd_ring_end_use,
};
static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
{
- adev->uvd.ring.funcs = &uvd_v6_0_ring_funcs;
+ if (adev->asic_type >= CHIP_POLARIS10) {
+ adev->uvd.ring.funcs = &uvd_v6_0_ring_vm_funcs;
+ DRM_INFO("UVD is enabled in VM mode\n");
+ } else {
+ adev->uvd.ring.funcs = &uvd_v6_0_ring_phys_funcs;
+ DRM_INFO("UVD is enabled in physical mode\n");
+ }
}
static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index 45d92aceb..80a37a602 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -594,6 +594,8 @@ static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
.test_ib = amdgpu_vce_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_vce_ring_begin_use,
+ .end_use = amdgpu_vce_ring_end_use,
};
static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 30e8099e9..c271abffd 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -43,6 +43,7 @@
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618
+#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
#define VCE_V3_0_FW_SIZE (384 * 1024)
#define VCE_V3_0_STACK_SIZE (64 * 1024)
@@ -51,6 +52,7 @@
static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
+static int vce_v3_0_wait_for_idle(void *handle);
/**
* vce_v3_0_ring_get_rptr - get read pointer
@@ -205,6 +207,32 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
vce_v3_0_override_vce_clock_gating(adev, false);
}
+static int vce_v3_0_firmware_loaded(struct amdgpu_device *adev)
+{
+ int i, j;
+
+ for (i = 0; i < 10; ++i) {
+ for (j = 0; j < 100; ++j) {
+ uint32_t status = RREG32(mmVCE_STATUS);
+
+ if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
+ return 0;
+ mdelay(10);
+ }
+
+ DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
+ WREG32_P(mmVCE_SOFT_RESET,
+ VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
+ ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+ mdelay(10);
+ WREG32_P(mmVCE_SOFT_RESET, 0,
+ ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+ mdelay(10);
+ }
+
+ return -ETIMEDOUT;
+}
+
/**
* vce_v3_0_start - start VCE block
*
@@ -215,11 +243,24 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
static int vce_v3_0_start(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
- int idx, i, j, r;
+ int idx, r;
+
+ ring = &adev->vce.ring[0];
+ WREG32(mmVCE_RB_RPTR, ring->wptr);
+ WREG32(mmVCE_RB_WPTR, ring->wptr);
+ WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
+ WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
+
+ ring = &adev->vce.ring[1];
+ WREG32(mmVCE_RB_RPTR2, ring->wptr);
+ WREG32(mmVCE_RB_WPTR2, ring->wptr);
+ WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
+ WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+ WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
mutex_lock(&adev->grbm_idx_mutex);
for (idx = 0; idx < 2; ++idx) {
-
if (adev->vce.harvest_config & (1 << idx))
continue;
@@ -233,48 +274,24 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
vce_v3_0_mc_resume(adev, idx);
- /* set BUSY flag */
- WREG32_P(mmVCE_STATUS, 1, ~1);
+ WREG32_P(mmVCE_STATUS, VCE_STATUS__JOB_BUSY_MASK,
+ ~VCE_STATUS__JOB_BUSY_MASK);
+
if (adev->asic_type >= CHIP_STONEY)
WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001);
else
WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK,
~VCE_VCPU_CNTL__CLK_EN_MASK);
- WREG32_P(mmVCE_SOFT_RESET,
- VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
- ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
-
- mdelay(100);
-
WREG32_P(mmVCE_SOFT_RESET, 0,
~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
- for (i = 0; i < 10; ++i) {
- uint32_t status;
- for (j = 0; j < 100; ++j) {
- status = RREG32(mmVCE_STATUS);
- if (status & 2)
- break;
- mdelay(10);
- }
- r = 0;
- if (status & 2)
- break;
-
- DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
- WREG32_P(mmVCE_SOFT_RESET,
- VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
- ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
- mdelay(10);
- WREG32_P(mmVCE_SOFT_RESET, 0,
- ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
- mdelay(10);
- r = -1;
- }
+ mdelay(100);
+
+ r = vce_v3_0_firmware_loaded(adev);
/* clear BUSY flag */
- WREG32_P(mmVCE_STATUS, 0, ~1);
+ WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK);
/* Set Clock-Gating off */
if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
@@ -290,19 +307,46 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
mutex_unlock(&adev->grbm_idx_mutex);
- ring = &adev->vce.ring[0];
- WREG32(mmVCE_RB_RPTR, ring->wptr);
- WREG32(mmVCE_RB_WPTR, ring->wptr);
- WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
- WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
- WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
+ return 0;
+}
- ring = &adev->vce.ring[1];
- WREG32(mmVCE_RB_RPTR2, ring->wptr);
- WREG32(mmVCE_RB_WPTR2, ring->wptr);
- WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
- WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
- WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
+static int vce_v3_0_stop(struct amdgpu_device *adev)
+{
+ int idx;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (idx = 0; idx < 2; ++idx) {
+ if (adev->vce.harvest_config & (1 << idx))
+ continue;
+
+ if (idx == 0)
+ WREG32_P(mmGRBM_GFX_INDEX, 0,
+ ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
+ else
+ WREG32_P(mmGRBM_GFX_INDEX,
+ GRBM_GFX_INDEX__VCE_INSTANCE_MASK,
+ ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
+
+ if (adev->asic_type >= CHIP_STONEY)
+ WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001);
+ else
+ WREG32_P(mmVCE_VCPU_CNTL, 0,
+ ~VCE_VCPU_CNTL__CLK_EN_MASK);
+ /* hold on ECPU */
+ WREG32_P(mmVCE_SOFT_RESET,
+ VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
+ ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+
+ /* clear BUSY flag */
+ WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK);
+
+ /* Set Clock-Gating off */
+ if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
+ vce_v3_0_set_vce_sw_clock_gating(adev, false);
+ }
+
+ WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
+ mutex_unlock(&adev->grbm_idx_mutex);
return 0;
}
@@ -441,7 +485,14 @@ static int vce_v3_0_hw_init(void *handle)
static int vce_v3_0_hw_fini(void *handle)
{
- return 0;
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ r = vce_v3_0_wait_for_idle(handle);
+ if (r)
+ return r;
+
+ return vce_v3_0_stop(adev);
}
static int vce_v3_0_suspend(void *handle)
@@ -604,6 +655,18 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+static void vce_v3_set_bypass_mode(struct amdgpu_device *adev, bool enable)
+{
+ u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
+
+ if (enable)
+ tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
+ else
+ tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
+
+ WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
+}
+
static int vce_v3_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
@@ -611,6 +674,9 @@ static int vce_v3_0_set_clockgating_state(void *handle,
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
int i;
+ if (adev->asic_type == CHIP_POLARIS10)
+ vce_v3_set_bypass_mode(adev, enable);
+
if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
return 0;
@@ -701,6 +767,8 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = {
.test_ib = amdgpu_vce_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_vce_ring_begin_use,
+ .end_use = amdgpu_vce_ring_end_use,
};
static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index d8fca2e11..d2c7e4962 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -200,6 +200,29 @@ static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
}
+static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
+{
+ unsigned long flags;
+ u32 r;
+
+ spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
+ WREG32(mmGC_CAC_IND_INDEX, (reg));
+ r = RREG32(mmGC_CAC_IND_DATA);
+ spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
+ return r;
+}
+
+static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
+ WREG32(mmGC_CAC_IND_INDEX, (reg));
+ WREG32(mmGC_CAC_IND_DATA, (v));
+ spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
+}
+
+
static const u32 tonga_mgcg_cgcg_init[] =
{
mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
@@ -530,12 +553,12 @@ static uint32_t vi_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
mutex_lock(&adev->grbm_idx_mutex);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
- gfx_v8_0_select_se_sh(adev, se_num, sh_num);
+ amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
val = RREG32(reg_offset);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
- gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
return val;
}
@@ -594,7 +617,7 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
return -EINVAL;
}
-static void vi_gpu_pci_config_reset(struct amdgpu_device *adev)
+static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
{
u32 i;
@@ -609,11 +632,14 @@ static void vi_gpu_pci_config_reset(struct amdgpu_device *adev)
/* wait for asic to come out of reset */
for (i = 0; i < adev->usec_timeout; i++) {
- if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff)
- break;
+ if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
+ /* enable BM */
+ pci_set_master(adev->pdev);
+ return 0;
+ }
udelay(1);
}
-
+ return -EINVAL;
}
static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
@@ -639,13 +665,15 @@ static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hun
*/
static int vi_asic_reset(struct amdgpu_device *adev)
{
+ int r;
+
vi_set_bios_scratch_engine_hung(adev, true);
- vi_gpu_pci_config_reset(adev);
+ r = vi_gpu_pci_config_reset(adev);
vi_set_bios_scratch_engine_hung(adev, false);
- return 0;
+ return r;
}
static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
@@ -1130,9 +1158,6 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
.set_uvd_clocks = &vi_set_uvd_clocks,
.set_vce_clocks = &vi_set_vce_clocks,
.get_virtual_caps = &vi_get_virtual_caps,
- /* these should be moved to their own ip modules */
- .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
- .wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle,
};
static int vi_common_early_init(void *handle)
@@ -1153,6 +1178,8 @@ static int vi_common_early_init(void *handle)
adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg;
adev->didt_rreg = &vi_didt_rreg;
adev->didt_wreg = &vi_didt_wreg;
+ adev->gc_cac_rreg = &vi_gc_cac_rreg;
+ adev->gc_cac_wreg = &vi_gc_cac_wreg;
adev->asic_funcs = &vi_asic_funcs;
@@ -1226,12 +1253,18 @@ static int vi_common_early_init(void *handle)
adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
+ AMD_CG_SUPPORT_GFX_RLC_LS |
+ AMD_CG_SUPPORT_GFX_CP_LS |
+ AMD_CG_SUPPORT_GFX_CGTS |
+ AMD_CG_SUPPORT_GFX_MGLS |
+ AMD_CG_SUPPORT_GFX_CGTS_LS |
+ AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_BIF_LS |
AMD_CG_SUPPORT_HDP_MGCG |
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS;
- adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x1;
break;
default:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index ec4036a09..a625b9137 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -187,12 +187,12 @@ int init_pipelines(struct device_queue_manager *dqm,
unsigned int get_first_pipe(struct device_queue_manager *dqm);
unsigned int get_pipes_num(struct device_queue_manager *dqm);
-extern inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
+static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
{
return (pdd->lds_base >> 16) & 0xFF;
}
-extern inline unsigned int
+static inline unsigned int
get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
{
return (pdd->lds_base >> 60) & 0x0E;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index e621eba63..a7d3cb3fe 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -184,7 +184,7 @@ u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
sizeof(u32)) + inx;
pr_debug("kfd: get kernel queue doorbell\n"
- " doorbell offset == 0x%08d\n"
+ " doorbell offset == 0x%08X\n"
" kernel address == 0x%08lX\n",
*doorbell_off, (uintptr_t)(kfd->doorbell_kernel_ptr + inx));
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index d0d5f4baf..80113c335 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -617,10 +617,7 @@ int kgd2kfd_resume(struct kfd_dev *kfd);
int kfd_init_apertures(struct kfd_process *process);
/* Queue Context Management */
-inline uint32_t lower_32(uint64_t x);
-inline uint32_t upper_32(uint64_t x);
struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd);
-inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m);
int init_queue(struct queue **q, struct queue_properties properties);
void uninit_queue(struct queue *q);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 7708d90b9..4f3849ac8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -63,13 +63,12 @@ static struct kfd_process *create_process(const struct task_struct *thread);
void kfd_process_create_wq(void)
{
if (!kfd_process_wq)
- kfd_process_wq = create_workqueue("kfd_process_wq");
+ kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
}
void kfd_process_destroy_wq(void)
{
if (kfd_process_wq) {
- flush_workqueue(kfd_process_wq);
destroy_workqueue(kfd_process_wq);
kfd_process_wq = NULL;
}
@@ -330,6 +329,7 @@ err_process_pqm_init:
synchronize_rcu();
mmu_notifier_unregister_no_release(&process->mmu_notifier, process->mm);
err_mmu_notifier:
+ mutex_destroy(&process->mutex);
kfd_pasid_free(process->pasid);
err_alloc_pasid:
kfree(process->queues);
diff --git a/drivers/gpu/drm/amd/include/amd_pcie.h b/drivers/gpu/drm/amd/include/amd_pcie.h
index 7c2a916c1..5eb895fd9 100644
--- a/drivers/gpu/drm/amd/include/amd_pcie.h
+++ b/drivers/gpu/drm/amd/include/amd_pcie.h
@@ -37,6 +37,13 @@
#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_MASK 0x0000FFFF
#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_SHIFT 0
+/* gen: chipset 1/2, asic 1/2/3 */
+#define AMDGPU_DEFAULT_PCIE_GEN_MASK (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 \
+ | CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 \
+ | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 \
+ | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 \
+ | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3)
+
/* Following flags shows PCIe lane width switch supported in driver which are decided by chipset and ASIC */
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X1 0x00010000
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 0x00020000
@@ -47,4 +54,11 @@
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 0x00400000
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_SHIFT 16
+/* 1/2/4/8/16 lanes */
+#define AMDGPU_DEFAULT_PCIE_MLW_MASK (CAIL_PCIE_LINK_WIDTH_SUPPORT_X1 \
+ | CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 \
+ | CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 \
+ | CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 \
+ | CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
+
#endif
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index afce1edbe..a74a0d2ff 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -26,15 +26,6 @@
#define AMD_MAX_USEC_TIMEOUT 100000 /* 100 ms */
/*
-* Supported GPU families (aligned with amdgpu_drm.h)
-*/
-#define AMD_FAMILY_UNKNOWN 0
-#define AMD_FAMILY_CI 120 /* Bonaire, Hawaii */
-#define AMD_FAMILY_KV 125 /* Kaveri, Kabini, Mullins */
-#define AMD_FAMILY_VI 130 /* Iceland, Tonga */
-#define AMD_FAMILY_CZ 135 /* Carrizo */
-
-/*
* Supported ASIC types
*/
enum amd_asic_type {
@@ -120,6 +111,8 @@ enum amd_powergating_state {
#define AMD_PG_SUPPORT_SDMA (1 << 8)
#define AMD_PG_SUPPORT_ACP (1 << 9)
#define AMD_PG_SUPPORT_SAMU (1 << 10)
+#define AMD_PG_SUPPORT_GFX_QUICK_MG (1 << 11)
+#define AMD_PG_SUPPORT_GFX_PIPELINE (1 << 12)
enum amd_pm_state_type {
/* not used for dpm */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h
index 293329719..809759f7b 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h
@@ -27,6 +27,7 @@
#define mmMM_INDEX 0x0
#define mmMM_INDEX_HI 0x6
#define mmMM_DATA 0x1
+#define mmCC_BIF_BX_STRAP2 0x152A
#define mmBIF_MM_INDACCESS_CNTL 0x1500
#define mmBIF_DOORBELL_APER_EN 0x1501
#define mmBUS_CNTL 0x1508
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h
index ebaf67bb1..90ff7c8a6 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h
@@ -2823,4 +2823,7 @@
#define mmDC_EDC_CSINVOC_CNT 0x3192
#define mmDC_EDC_RESTORE_CNT 0x3193
+#define mmGC_CAC_IND_INDEX 0x129a
+#define mmGC_CAC_IND_DATA 0x129b
+
#endif /* GFX_8_0_D_H */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_sh_mask.h
index 7d722458d..4070ca3a6 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_sh_mask.h
@@ -8730,8 +8730,6 @@
#define RLC_GPM_STAT__DYN_CU_POWERING_DOWN__SHIFT 0x10
#define RLC_GPM_STAT__ABORTED_PD_SEQUENCE_MASK 0x20000
#define RLC_GPM_STAT__ABORTED_PD_SEQUENCE__SHIFT 0x11
-#define RLC_GPM_STAT__RESERVED_MASK 0xfc0000
-#define RLC_GPM_STAT__RESERVED__SHIFT 0x12
#define RLC_GPM_STAT__PG_ERROR_STATUS_MASK 0xff000000
#define RLC_GPM_STAT__PG_ERROR_STATUS__SHIFT 0x18
#define RLC_GPU_CLOCK_32_RES_SEL__RES_SEL_MASK 0x3f
@@ -8764,8 +8762,10 @@
#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE__SHIFT 0x12
#define RLC_PG_CNTL__SMU_HANDSHAKE_ENABLE_MASK 0x80000
#define RLC_PG_CNTL__SMU_HANDSHAKE_ENABLE__SHIFT 0x13
-#define RLC_PG_CNTL__RESERVED1_MASK 0xf00000
-#define RLC_PG_CNTL__RESERVED1__SHIFT 0x14
+#define RLC_PG_CNTL__QUICK_PG_ENABLE_MASK 0x100000
+#define RLC_PG_CNTL__QUICK_PG_ENABLE__SHIFT 0x14
+#define RLC_PG_CNTL__RESERVED1_MASK 0xe00000
+#define RLC_PG_CNTL__RESERVED1__SHIFT 0x15
#define RLC_GPM_THREAD_PRIORITY__THREAD0_PRIORITY_MASK 0xff
#define RLC_GPM_THREAD_PRIORITY__THREAD0_PRIORITY__SHIFT 0x0
#define RLC_GPM_THREAD_PRIORITY__THREAD1_PRIORITY_MASK 0xff00
@@ -9102,8 +9102,6 @@
#define RLC_GPM_LOG_CONT__CONT__SHIFT 0x0
#define RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK 0xff
#define RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT 0x0
-#define RLC_PG_DELAY_3__RESERVED_MASK 0xffffff00
-#define RLC_PG_DELAY_3__RESERVED__SHIFT 0x8
#define RLC_GPM_INT_DISABLE_TH0__DISABLE_MASK 0xffffffff
#define RLC_GPM_INT_DISABLE_TH0__DISABLE__SHIFT 0x0
#define RLC_GPM_INT_DISABLE_TH1__DISABLE_MASK 0xffffffff
@@ -9124,14 +9122,8 @@
#define RLC_SRM_DEBUG_SELECT__RESERVED__SHIFT 0x8
#define RLC_SRM_DEBUG__DATA_MASK 0xffffffff
#define RLC_SRM_DEBUG__DATA__SHIFT 0x0
-#define RLC_SRM_ARAM_ADDR__ADDR_MASK 0x3ff
-#define RLC_SRM_ARAM_ADDR__ADDR__SHIFT 0x0
-#define RLC_SRM_ARAM_ADDR__RESERVED_MASK 0xfffffc00
-#define RLC_SRM_ARAM_ADDR__RESERVED__SHIFT 0xa
#define RLC_SRM_ARAM_DATA__DATA_MASK 0xffffffff
#define RLC_SRM_ARAM_DATA__DATA__SHIFT 0x0
-#define RLC_SRM_DRAM_ADDR__ADDR_MASK 0x3ff
-#define RLC_SRM_DRAM_ADDR__ADDR__SHIFT 0x0
#define RLC_SRM_DRAM_ADDR__RESERVED_MASK 0xfffffc00
#define RLC_SRM_DRAM_ADDR__RESERVED__SHIFT 0xa
#define RLC_SRM_DRAM_DATA__DATA_MASK 0xffffffff
@@ -17946,8 +17938,6 @@
#define VGT_TESS_DISTRIBUTION__ACCUM_TRI__SHIFT 0x8
#define VGT_TESS_DISTRIBUTION__ACCUM_QUAD_MASK 0xff0000
#define VGT_TESS_DISTRIBUTION__ACCUM_QUAD__SHIFT 0x10
-#define VGT_TESS_DISTRIBUTION__DONUT_SPLIT_MASK 0xff000000
-#define VGT_TESS_DISTRIBUTION__DONUT_SPLIT__SHIFT 0x18
#define VGT_TF_RING_SIZE__SIZE_MASK 0xffff
#define VGT_TF_RING_SIZE__SIZE__SHIFT 0x0
#define VGT_SYS_CONFIG__DUAL_CORE_EN_MASK 0x1
@@ -20502,8 +20492,6 @@
#define DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT 0x4
#define DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20
#define DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5
-#define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xffffffc0
-#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x6
#define DIDT_SQ_CTRL1__MIN_POWER_MASK 0xffff
#define DIDT_SQ_CTRL1__MIN_POWER__SHIFT 0x0
#define DIDT_SQ_CTRL1__MAX_POWER_MASK 0xffff0000
@@ -20558,8 +20546,6 @@
#define DIDT_DB_CTRL0__DIDT_CTRL_RST__SHIFT 0x4
#define DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20
#define DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5
-#define DIDT_DB_CTRL0__UNUSED_0_MASK 0xffffffc0
-#define DIDT_DB_CTRL0__UNUSED_0__SHIFT 0x6
#define DIDT_DB_CTRL1__MIN_POWER_MASK 0xffff
#define DIDT_DB_CTRL1__MIN_POWER__SHIFT 0x0
#define DIDT_DB_CTRL1__MAX_POWER_MASK 0xffff0000
@@ -20614,8 +20600,6 @@
#define DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT 0x4
#define DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20
#define DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5
-#define DIDT_TD_CTRL0__UNUSED_0_MASK 0xffffffc0
-#define DIDT_TD_CTRL0__UNUSED_0__SHIFT 0x6
#define DIDT_TD_CTRL1__MIN_POWER_MASK 0xffff
#define DIDT_TD_CTRL1__MIN_POWER__SHIFT 0x0
#define DIDT_TD_CTRL1__MAX_POWER_MASK 0xffff0000
@@ -20670,8 +20654,6 @@
#define DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT 0x4
#define DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20
#define DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5
-#define DIDT_TCP_CTRL0__UNUSED_0_MASK 0xffffffc0
-#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT 0x6
#define DIDT_TCP_CTRL1__MIN_POWER_MASK 0xffff
#define DIDT_TCP_CTRL1__MIN_POWER__SHIFT 0x0
#define DIDT_TCP_CTRL1__MAX_POWER_MASK 0xffff0000
@@ -20726,8 +20708,6 @@
#define DIDT_DBR_CTRL0__DIDT_CTRL_RST__SHIFT 0x4
#define DIDT_DBR_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20
#define DIDT_DBR_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5
-#define DIDT_DBR_CTRL0__UNUSED_0_MASK 0xffffffc0
-#define DIDT_DBR_CTRL0__UNUSED_0__SHIFT 0x6
#define DIDT_DBR_CTRL1__MIN_POWER_MASK 0xffff
#define DIDT_DBR_CTRL1__MIN_POWER__SHIFT 0x0
#define DIDT_DBR_CTRL1__MAX_POWER_MASK 0xffff0000
@@ -20773,4 +20753,84 @@
#define DIDT_DBR_WEIGHT8_11__WEIGHT11_MASK 0xff000000
#define DIDT_DBR_WEIGHT8_11__WEIGHT11__SHIFT 0x18
+#define DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK 0x00000001
+#define DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT 0x00000000
+
+#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK 0x0000007e
+#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK 0x00001f80L
+#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT 0x00000001
+#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT 0x00000007
+
+#define DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK 0x1fffe000L
+#define DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT 0x0000000d
+
+#define DIDT_SQ_STALL_CTRL__UNUSED_0_MASK 0xe0000000L
+#define DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT 0x0000001d
+
+#define DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK 0x00000001L
+#define DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT 0x00000000
+
+#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK 0x00007ffeL
+#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT 0x00000001
+#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK 0x1fff8000L
+#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT 0x0000000f
+
+#define DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK 0x00000001L
+#define DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT 0x00000000
+
+#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK 0x0000007eL
+#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK 0x00001f80L
+#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT 0x00000001
+#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT 0x00000007
+
+#define DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK 0x1fffe000L
+#define DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT 0x0000000d
+
+#define DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK 0x00000fc0L
+#define DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK 0x0003f000L
+#define DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT 0x00000006
+#define DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT 0x0000000c
+
+#define DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK 0x00000001L
+#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK 0x00007ffeL
+#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK 0x1fff8000L
+
+#define DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT 0x00000000
+#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT 0x00000001
+#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT 0x0000000f
+
+#define DIDT_TD_STALL_CTRL__UNUSED_0_MASK 0xe0000000L
+#define DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT 0x0000001d
+
+#define DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK 0x00000fc0L
+#define DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK 0x0003f000L
+#define DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT 0x00000006
+#define DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT 0x0000000c
+
+#define DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK 0x00000001L
+#define DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT 0x00000000
+
+#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK 0x0000007eL
+#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK 0x00001f80L
+#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT 0x00000001
+#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT 0x00000007
+
+#define DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK 0x1fffe000L
+#define DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT 0x0000000d
+
+#define DIDT_TCP_STALL_CTRL__UNUSED_0_MASK 0xe0000000L
+#define DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT 0x0000001d
+
+#define DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK 0x00000001L
+#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK 0x00007ffeL
+#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK 0x1fff8000L
+#define DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT 0x00000000
+#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT 0x00000001
+#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT 0x0000000f
+
+#define DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK 0x00000fc0L
+#define DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK 0x0003f000L
+#define DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT 0x00000006
+#define DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT 0x0000000c
+
#endif /* GFX_8_0_SH_MASK_H */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
index 6f6fb3474..ec69869c5 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
@@ -111,6 +111,8 @@
#define mmUVD_MIF_RECON1_ADDR_CONFIG 0x39c5
#define ixUVD_MIF_SCLR_ADDR_CONFIG 0x4
#define mmUVD_JPEG_ADDR_CONFIG 0x3a1f
+#define mmUVD_GP_SCRATCH8 0x3c0a
+#define mmUVD_GP_SCRATCH9 0x3c0b
#define mmUVD_GP_SCRATCH4 0x3d38
#endif /* UVD_6_0_D_H */
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index 7464daf89..b86aba9d0 100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -49,6 +49,7 @@ enum cgs_ind_reg {
CGS_IND_REG__SMC,
CGS_IND_REG__UVD_CTX,
CGS_IND_REG__DIDT,
+ CGS_IND_REG_GC_CAC,
CGS_IND_REG__AUDIO_ENDPT
};
@@ -112,20 +113,23 @@ enum cgs_system_info_id {
CGS_SYSTEM_INFO_ADAPTER_BDF_ID = 1,
CGS_SYSTEM_INFO_PCIE_GEN_INFO,
CGS_SYSTEM_INFO_PCIE_MLW,
+ CGS_SYSTEM_INFO_PCIE_DEV,
+ CGS_SYSTEM_INFO_PCIE_REV,
CGS_SYSTEM_INFO_CG_FLAGS,
CGS_SYSTEM_INFO_PG_FLAGS,
CGS_SYSTEM_INFO_GFX_CU_INFO,
+ CGS_SYSTEM_INFO_GFX_SE_INFO,
CGS_SYSTEM_INFO_ID_MAXIMUM,
};
struct cgs_system_info {
- uint64_t size;
- uint64_t info_id;
+ uint64_t size;
+ enum cgs_system_info_id info_id;
union {
- void *ptr;
- uint64_t value;
+ void *ptr;
+ uint64_t value;
};
- uint64_t padding[13];
+ uint64_t padding[13];
};
/*
@@ -158,6 +162,10 @@ struct cgs_firmware_info {
uint16_t feature_version;
uint32_t image_size;
uint64_t mc_addr;
+
+ /* only for smc firmware */
+ uint32_t ucode_start_address;
+
void *kptr;
};
@@ -189,7 +197,6 @@ typedef unsigned long cgs_handle_t;
struct cgs_acpi_method_argument {
uint32_t type;
- uint32_t method_length;
uint32_t data_length;
union{
uint32_t value;
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index e629f8a9f..abbb658bd 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -176,7 +176,7 @@ static int pp_hw_fini(void *handle)
static bool pp_is_idle(void *handle)
{
- return 0;
+ return false;
}
static int pp_wait_for_idle(void *handle)
@@ -536,6 +536,10 @@ int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input,
case AMD_PP_EVENT_COMPLETE_INIT:
ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
break;
+ case AMD_PP_EVENT_READJUST_POWER_STATE:
+ pp_handle->hwmgr->current_ps = pp_handle->hwmgr->boot_ps;
+ ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
+ break;
default:
break;
}
@@ -740,12 +744,12 @@ static int pp_dpm_get_pp_table(void *handle, char **table)
PP_CHECK_HW(hwmgr);
- if (hwmgr->hwmgr_func->get_pp_table == NULL) {
- printk(KERN_INFO "%s was not implemented.\n", __func__);
- return 0;
- }
+ if (!hwmgr->soft_pp_table)
+ return -EINVAL;
- return hwmgr->hwmgr_func->get_pp_table(hwmgr, table);
+ *table = (char *)hwmgr->soft_pp_table;
+
+ return hwmgr->soft_pp_table_size;
}
static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
@@ -759,12 +763,23 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
PP_CHECK_HW(hwmgr);
- if (hwmgr->hwmgr_func->set_pp_table == NULL) {
- printk(KERN_INFO "%s was not implemented.\n", __func__);
- return 0;
+ if (!hwmgr->hardcode_pp_table) {
+ hwmgr->hardcode_pp_table =
+ kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
+
+ if (!hwmgr->hardcode_pp_table)
+ return -ENOMEM;
+
+ /* to avoid powerplay crash when hardcode pptable is empty */
+ memcpy(hwmgr->hardcode_pp_table, hwmgr->soft_pp_table,
+ hwmgr->soft_pp_table_size);
}
- return hwmgr->hwmgr_func->set_pp_table(hwmgr, buf, size);
+ memcpy(hwmgr->hardcode_pp_table, buf, size);
+
+ hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
+
+ return amd_powerplay_reset(handle);
}
static int pp_dpm_force_clock_level(void *handle,
@@ -806,6 +821,82 @@ static int pp_dpm_print_clock_levels(void *handle,
return hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
}
+static int pp_dpm_get_sclk_od(void *handle)
+{
+ struct pp_hwmgr *hwmgr;
+
+ if (!handle)
+ return -EINVAL;
+
+ hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+ PP_CHECK_HW(hwmgr);
+
+ if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
+ printk(KERN_INFO "%s was not implemented.\n", __func__);
+ return 0;
+ }
+
+ return hwmgr->hwmgr_func->get_sclk_od(hwmgr);
+}
+
+static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
+{
+ struct pp_hwmgr *hwmgr;
+
+ if (!handle)
+ return -EINVAL;
+
+ hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+ PP_CHECK_HW(hwmgr);
+
+ if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
+ printk(KERN_INFO "%s was not implemented.\n", __func__);
+ return 0;
+ }
+
+ return hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
+}
+
+static int pp_dpm_get_mclk_od(void *handle)
+{
+ struct pp_hwmgr *hwmgr;
+
+ if (!handle)
+ return -EINVAL;
+
+ hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+ PP_CHECK_HW(hwmgr);
+
+ if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
+ printk(KERN_INFO "%s was not implemented.\n", __func__);
+ return 0;
+ }
+
+ return hwmgr->hwmgr_func->get_mclk_od(hwmgr);
+}
+
+static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
+{
+ struct pp_hwmgr *hwmgr;
+
+ if (!handle)
+ return -EINVAL;
+
+ hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+ PP_CHECK_HW(hwmgr);
+
+ if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
+ printk(KERN_INFO "%s was not implemented.\n", __func__);
+ return 0;
+ }
+
+ return hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
+}
+
const struct amd_powerplay_funcs pp_dpm_funcs = {
.get_temperature = pp_dpm_get_temperature,
.load_firmware = pp_dpm_load_fw,
@@ -828,6 +919,10 @@ const struct amd_powerplay_funcs pp_dpm_funcs = {
.set_pp_table = pp_dpm_set_pp_table,
.force_clock_level = pp_dpm_force_clock_level,
.print_clock_levels = pp_dpm_print_clock_levels,
+ .get_sclk_od = pp_dpm_get_sclk_od,
+ .set_sclk_od = pp_dpm_set_sclk_od,
+ .get_mclk_od = pp_dpm_get_mclk_od,
+ .set_mclk_od = pp_dpm_set_mclk_od,
};
static int amd_pp_instance_init(struct amd_pp_init *pp_init,
@@ -909,6 +1004,44 @@ int amd_powerplay_fini(void *handle)
return 0;
}
+int amd_powerplay_reset(void *handle)
+{
+ struct pp_instance *instance = (struct pp_instance *)handle;
+ struct pp_eventmgr *eventmgr;
+ struct pem_event_data event_data = { {0} };
+ int ret;
+
+ if (instance == NULL)
+ return -EINVAL;
+
+ eventmgr = instance->eventmgr;
+ if (!eventmgr || !eventmgr->pp_eventmgr_fini)
+ return -EINVAL;
+
+ eventmgr->pp_eventmgr_fini(eventmgr);
+
+ ret = pp_sw_fini(handle);
+ if (ret)
+ return ret;
+
+ kfree(instance->hwmgr->ps);
+
+ ret = pp_sw_init(handle);
+ if (ret)
+ return ret;
+
+ hw_init_power_state_table(instance->hwmgr);
+
+ if (eventmgr == NULL || eventmgr->pp_eventmgr_init == NULL)
+ return -EINVAL;
+
+ ret = eventmgr->pp_eventmgr_init(eventmgr);
+ if (ret)
+ return ret;
+
+ return pem_handle_event(eventmgr, AMD_PP_EVENT_COMPLETE_INIT, &event_data);
+}
+
/* export this function to DAL */
int amd_powerplay_display_configuration_change(void *handle,
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
index d6635cc4b..635fc4b48 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
@@ -30,7 +30,6 @@ static const pem_event_action * const initialize_event[] = {
system_config_tasks,
setup_asic_tasks,
enable_dynamic_state_management_tasks,
- enable_clock_power_gatings_tasks,
get_2d_performance_state_tasks,
set_performance_state_tasks,
initialize_thermal_controller_tasks,
@@ -140,7 +139,6 @@ static const pem_event_action * const resume_event[] = {
setup_asic_tasks,
enable_stutter_mode_tasks, /*must do this in boot state and before SMC is started */
enable_dynamic_state_management_tasks,
- enable_clock_power_gatings_tasks,
enable_disable_bapm_tasks,
initialize_thermal_controller_tasks,
get_2d_performance_state_tasks,
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c
index 5cd123472..b6f45fd01 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c
@@ -132,8 +132,7 @@ int pem_task_enable_dynamic_state_management(struct pp_eventmgr *eventmgr, struc
int pem_task_disable_dynamic_state_management(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
{
- /* TODO */
- return 0;
+ return phm_disable_dynamic_state_management(eventmgr->hwmgr);
}
int pem_task_enable_clock_power_gatings_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
index 436fc16da..2028980f1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
@@ -177,12 +177,12 @@ int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
cz_dpm_powerdown_uvd(hwmgr);
} else {
cz_dpm_powerup_uvd(hwmgr);
- cgs_set_clockgating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_GATE);
cgs_set_powergating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_UNGATE);
+ cgs_set_clockgating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_GATE);
cz_dpm_update_uvd_dpm(hwmgr, false);
}
@@ -206,25 +206,26 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
AMD_IP_BLOCK_TYPE_VCE,
AMD_PG_STATE_GATE);
cz_enable_disable_vce_dpm(hwmgr, false);
- /* TODO: to figure out why vce can't be poweroff*/
+ cz_dpm_powerdown_vce(hwmgr);
cz_hwmgr->vce_power_gated = true;
} else {
cz_dpm_powerup_vce(hwmgr);
cz_hwmgr->vce_power_gated = false;
- cgs_set_clockgating_state(
- hwmgr->device,
- AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_GATE);
cgs_set_powergating_state(
hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_UNGATE);
+ cgs_set_clockgating_state(
+ hwmgr->device,
+ AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_GATE);
cz_dpm_update_vce_dpm(hwmgr);
cz_enable_disable_vce_dpm(hwmgr, true);
return 0;
}
}
} else {
+ cz_hwmgr->vce_power_gated = bgate;
cz_dpm_update_vce_dpm(hwmgr);
cz_enable_disable_vce_dpm(hwmgr, !bgate);
return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index 1f14c477d..8cc0df9b5 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -1167,9 +1167,9 @@ static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
cz_ps->action = cz_current_ps->action;
- if ((force_high == false) && (cz_ps->action == FORCE_HIGH))
+ if (!force_high && (cz_ps->action == FORCE_HIGH))
cz_ps->action = CANCEL_FORCE_HIGH;
- else if ((force_high == true) && (cz_ps->action != FORCE_HIGH))
+ else if (force_high && (cz_ps->action != FORCE_HIGH))
cz_ps->action = FORCE_HIGH;
else
cz_ps->action = DO_NOTHING;
@@ -1180,6 +1180,13 @@ static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
{
int result = 0;
+ struct cz_hwmgr *data;
+
+ data = kzalloc(sizeof(struct cz_hwmgr), GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ hwmgr->backend = data;
result = cz_initialize_dpm_defaults(hwmgr);
if (result != 0) {
@@ -1649,7 +1656,7 @@ static void cz_hw_print_display_cfg(
struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
uint32_t data = 0;
- if (hw_data->cc6_settings.cc6_setting_changed == true) {
+ if (hw_data->cc6_settings.cc6_setting_changed) {
hw_data->cc6_settings.cc6_setting_changed = false;
@@ -1909,15 +1916,7 @@ static const struct pp_hwmgr_func cz_hwmgr_funcs = {
int cz_hwmgr_init(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *cz_hwmgr;
- int ret = 0;
-
- cz_hwmgr = kzalloc(sizeof(struct cz_hwmgr), GFP_KERNEL);
- if (cz_hwmgr == NULL)
- return -ENOMEM;
-
- hwmgr->backend = cz_hwmgr;
hwmgr->hwmgr_func = &cz_hwmgr_funcs;
hwmgr->pptable_func = &pptable_funcs;
- return ret;
+ return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c
index e1b649bd5..5afe82068 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c
@@ -56,7 +56,7 @@ int fiji_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
fiji_update_uvd_dpm(hwmgr, false);
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_UNGATE);
+ AMD_CG_STATE_UNGATE);
}
return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
index 92912ab20..120a9e2c3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
@@ -581,25 +581,24 @@ static int fiji_patch_boot_state(struct pp_hwmgr *hwmgr,
static int fiji_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if (data->soft_pp_table) {
- kfree(data->soft_pp_table);
- data->soft_pp_table = NULL;
- }
-
return phm_hwmgr_backend_fini(hwmgr);
}
static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+ struct fiji_hwmgr *data;
uint32_t i;
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
bool stay_in_boot;
int result;
+ data = kzalloc(sizeof(struct fiji_hwmgr), GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ hwmgr->backend = data;
+
data->dll_default_on = false;
data->sram_end = SMC_RAM_END;
@@ -699,7 +698,7 @@ static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
if (0 == result) {
struct cgs_system_info sys_info = {0};
- data->is_tlu_enabled = 0;
+ data->is_tlu_enabled = false;
hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
FIJI_MAX_HARDWARE_POWERLEVELS;
hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
@@ -734,7 +733,7 @@ static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
result = cgs_query_system_info(hwmgr->device, &sys_info);
if (result)
- data->pcie_gen_cap = 0x30007;
+ data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
else
data->pcie_gen_cap = (uint32_t)sys_info.value;
if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
@@ -743,7 +742,7 @@ static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
result = cgs_query_system_info(hwmgr->device, &sys_info);
if (result)
- data->pcie_lane_cap = 0x2f0000;
+ data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
else
data->pcie_lane_cap = (uint32_t)sys_info.value;
} else {
@@ -1236,6 +1235,34 @@ static int fiji_program_voting_clients(struct pp_hwmgr *hwmgr)
return 0;
}
+static int fiji_clear_voting_clients(struct pp_hwmgr *hwmgr)
+{
+ /* Reset voting clients before disabling DPM */
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_0, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_1, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_2, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_3, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_4, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_5, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_6, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_7, 0);
+
+ return 0;
+}
+
/**
* Get the location of various tables inside the FW image.
*
@@ -1363,6 +1390,17 @@ static int fiji_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
}
/**
+* Call SMC to reset S0/S1 to S1 and Reset SMIO to initial value
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @return if success then 0;
+*/
+static int fiji_reset_to_default(struct pp_hwmgr *hwmgr)
+{
+ return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults);
+}
+
+/**
* Initial switch from ARB F0->F1
*
* @param hwmgr the address of the powerplay hardware manager.
@@ -1375,6 +1413,21 @@ static int fiji_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
}
+static int fiji_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
+{
+ uint32_t tmp;
+
+ tmp = (cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
+ 0x0000ff00) >> 8;
+
+ if (tmp == MC_CG_ARB_FREQ_F0)
+ return 0;
+
+ return fiji_copy_and_switch_arb_sets(hwmgr,
+ tmp, MC_CG_ARB_FREQ_F0);
+}
+
static int fiji_reset_single_dpm_table(struct pp_hwmgr *hwmgr,
struct fiji_single_dpm_table *dpm_table, uint32_t count)
{
@@ -1397,7 +1450,7 @@ static void fiji_setup_pcie_table_entry(
{
dpm_table->dpm_levels[index].value = pcie_gen;
dpm_table->dpm_levels[index].param1 = pcie_lanes;
- dpm_table->dpm_levels[index].enabled = 1;
+ dpm_table->dpm_levels[index].enabled = true;
}
static int fiji_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
@@ -1609,7 +1662,6 @@ static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr,
{
uint32_t count;
uint8_t index;
- int result = 0;
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
@@ -1631,7 +1683,7 @@ static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr,
VOLTAGE_SCALE)) / 25);
}
- return result;
+ return 0;
}
/**
@@ -3177,6 +3229,17 @@ static int fiji_enable_ulv(struct pp_hwmgr *hwmgr)
return 0;
}
+static int fiji_disable_ulv(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+ struct fiji_ulv_parm *ulv = &(data->ulv);
+
+ if (ulv->ulv_supported)
+ return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV);
+
+ return 0;
+}
+
static int fiji_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -3197,6 +3260,21 @@ static int fiji_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
return 0;
}
+static int fiji_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
+{
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkDeepSleep)) {
+ if (smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_MASTER_DeepSleep_OFF)) {
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to disable Master Deep Sleep switch failed!",
+ return -1);
+ }
+ }
+
+ return 0;
+}
+
static int fiji_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
@@ -3357,6 +3435,70 @@ static int fiji_start_dpm(struct pp_hwmgr *hwmgr)
return 0;
}
+static int fiji_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+
+ /* disable SCLK dpm */
+ if (!data->sclk_dpm_key_disabled)
+ PP_ASSERT_WITH_CODE(
+ (smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_DPM_Disable) == 0),
+ "Failed to disable SCLK DPM!",
+ return -1);
+
+ /* disable MCLK dpm */
+ if (!data->mclk_dpm_key_disabled) {
+ PP_ASSERT_WITH_CODE(
+ (smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_MCLKDPM_SetEnabledMask, 1) == 0),
+ "Failed to force MCLK DPM0!",
+ return -1);
+
+ PP_ASSERT_WITH_CODE(
+ (smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_MCLKDPM_Disable) == 0),
+ "Failed to disable MCLK DPM!",
+ return -1);
+ }
+
+ return 0;
+}
+
+static int fiji_stop_dpm(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+
+ /* disable general power management */
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
+ GLOBAL_PWRMGT_EN, 0);
+ /* disable sclk deep sleep */
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
+ DYNAMIC_PM_EN, 0);
+
+ /* disable PCIE dpm */
+ if (!data->pcie_dpm_key_disabled) {
+ PP_ASSERT_WITH_CODE(
+ (smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_PCIeDPM_Disable) == 0),
+ "Failed to disable pcie DPM during DPM Stop Function!",
+ return -1);
+ }
+
+ if (fiji_disable_sclk_mclk_dpm(hwmgr)) {
+ printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!");
+ return -1;
+ }
+
+ PP_ASSERT_WITH_CODE(
+ (smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_Voltage_Cntl_Disable) == 0),
+ "Failed to disable voltage DPM during DPM Stop Function!",
+ return -1);
+
+ return 0;
+}
+
static void fiji_set_dpm_event_sources(struct pp_hwmgr *hwmgr,
uint32_t sources)
{
@@ -3415,6 +3557,23 @@ static int fiji_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
return fiji_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
}
+static int fiji_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
+ PHM_AutoThrottleSource source)
+{
+ struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+
+ if (data->active_auto_throttle_sources & (1 << source)) {
+ data->active_auto_throttle_sources &= ~(1 << source);
+ fiji_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
+ }
+ return 0;
+}
+
+static int fiji_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
+{
+ return fiji_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
+}
+
static int fiji_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
int tmp_result, result = 0;
@@ -3529,6 +3688,64 @@ static int fiji_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
return result;
}
+static int fiji_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
+{
+ int tmp_result, result = 0;
+
+ tmp_result = (fiji_is_dpm_running(hwmgr)) ? 0 : -1;
+ PP_ASSERT_WITH_CODE(tmp_result == 0,
+ "DPM is not running right now, no need to disable DPM!",
+ return 0);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalController))
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
+
+ tmp_result = fiji_disable_power_containment(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable power containment!", result = tmp_result);
+
+ tmp_result = fiji_disable_smc_cac(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable SMC CAC!", result = tmp_result);
+
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
+
+ tmp_result = fiji_disable_thermal_auto_throttle(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable thermal auto throttle!", result = tmp_result);
+
+ tmp_result = fiji_stop_dpm(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to stop DPM!", result = tmp_result);
+
+ tmp_result = fiji_disable_deep_sleep_master_switch(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable deep sleep master switch!", result = tmp_result);
+
+ tmp_result = fiji_disable_ulv(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable ULV!", result = tmp_result);
+
+ tmp_result = fiji_clear_voting_clients(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to clear voting clients!", result = tmp_result);
+
+ tmp_result = fiji_reset_to_default(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to reset to default!", result = tmp_result);
+
+ tmp_result = fiji_force_switch_to_arbf0(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to force to switch arbf0!", result = tmp_result);
+
+ return result;
+}
+
static int fiji_force_dpm_highest(struct pp_hwmgr *hwmgr)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
@@ -4171,8 +4388,9 @@ static int fiji_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
if ((0 == data->sclk_dpm_key_disabled) &&
(data->need_update_smu7_dpm_table &
(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
- PP_ASSERT_WITH_CODE(true == fiji_is_dpm_running(hwmgr),
- "Trying to freeze SCLK DPM when DPM is disabled",);
+ PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
+ "Trying to freeze SCLK DPM when DPM is disabled",
+ );
PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
PPSMC_MSG_SCLKDPM_FreezeLevel),
"Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
@@ -4182,8 +4400,9 @@ static int fiji_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
if ((0 == data->mclk_dpm_key_disabled) &&
(data->need_update_smu7_dpm_table &
DPMTABLE_OD_UPDATE_MCLK)) {
- PP_ASSERT_WITH_CODE(true == fiji_is_dpm_running(hwmgr),
- "Trying to freeze MCLK DPM when DPM is disabled",);
+ PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
+ "Trying to freeze MCLK DPM when DPM is disabled",
+ );
PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
PPSMC_MSG_MCLKDPM_FreezeLevel),
"Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
@@ -4353,7 +4572,6 @@ static int fiji_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
static int fiji_trim_dpm_states(struct pp_hwmgr *hwmgr,
const struct fiji_power_state *fiji_ps)
{
- int result = 0;
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
uint32_t high_limit_count;
@@ -4373,7 +4591,7 @@ static int fiji_trim_dpm_states(struct pp_hwmgr *hwmgr,
fiji_ps->performance_levels[0].memory_clock,
fiji_ps->performance_levels[high_limit_count].memory_clock);
- return result;
+ return 0;
}
static int fiji_generate_dpm_level_enable_mask(
@@ -4632,8 +4850,9 @@ static int fiji_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
(data->need_update_smu7_dpm_table &
(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
- PP_ASSERT_WITH_CODE(true == fiji_is_dpm_running(hwmgr),
- "Trying to Unfreeze SCLK DPM when DPM is disabled",);
+ PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
+ "Trying to Unfreeze SCLK DPM when DPM is disabled",
+ );
PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
PPSMC_MSG_SCLKDPM_UnfreezeLevel),
"Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
@@ -4643,8 +4862,9 @@ static int fiji_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
if ((0 == data->mclk_dpm_key_disabled) &&
(data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
- PP_ASSERT_WITH_CODE(true == fiji_is_dpm_running(hwmgr),
- "Trying to Unfreeze MCLK DPM when DPM is disabled",);
+ PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
+ "Trying to Unfreeze MCLK DPM when DPM is disabled",
+ );
PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
PPSMC_MSG_SCLKDPM_UnfreezeLevel),
"Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
@@ -5071,42 +5291,6 @@ static int fiji_get_fan_control_mode(struct pp_hwmgr *hwmgr)
CG_FDO_CTRL2, FDO_PWM_MODE);
}
-static int fiji_get_pp_table(struct pp_hwmgr *hwmgr, char **table)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if (!data->soft_pp_table) {
- data->soft_pp_table = kmemdup(hwmgr->soft_pp_table,
- hwmgr->soft_pp_table_size,
- GFP_KERNEL);
- if (!data->soft_pp_table)
- return -ENOMEM;
- }
-
- *table = (char *)&data->soft_pp_table;
-
- return hwmgr->soft_pp_table_size;
-}
-
-static int fiji_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if (!data->soft_pp_table) {
- data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
- if (!data->soft_pp_table)
- return -ENOMEM;
- }
-
- memcpy(data->soft_pp_table, buf, size);
-
- hwmgr->soft_pp_table = data->soft_pp_table;
-
- /* TODO: re-init powerplay to implement modified pptable */
-
- return 0;
-}
-
static int fiji_force_clock_level(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, uint32_t mask)
{
@@ -5276,12 +5460,96 @@ bool fiji_check_smc_update_required_for_display_configuration(struct pp_hwmgr *h
return is_update_required;
}
+static int fiji_get_sclk_od(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+ struct fiji_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
+ struct fiji_single_dpm_table *golden_sclk_table =
+ &(data->golden_dpm_table.sclk_table);
+ int value;
+
+ value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
+ 100 /
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+ return value;
+}
+
+static int fiji_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+{
+ struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+ struct fiji_single_dpm_table *golden_sclk_table =
+ &(data->golden_dpm_table.sclk_table);
+ struct pp_power_state *ps;
+ struct fiji_power_state *fiji_ps;
+
+ if (value > 20)
+ value = 20;
+
+ ps = hwmgr->request_ps;
+
+ if (ps == NULL)
+ return -EINVAL;
+
+ fiji_ps = cast_phw_fiji_power_state(&ps->hardware);
+
+ fiji_ps->performance_levels[fiji_ps->performance_level_count - 1].engine_clock =
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
+ value / 100 +
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+ return 0;
+}
+
+static int fiji_get_mclk_od(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+ struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
+ struct fiji_single_dpm_table *golden_mclk_table =
+ &(data->golden_dpm_table.mclk_table);
+ int value;
+
+ value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
+ 100 /
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+ return value;
+}
+
+static int fiji_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+{
+ struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+ struct fiji_single_dpm_table *golden_mclk_table =
+ &(data->golden_dpm_table.mclk_table);
+ struct pp_power_state *ps;
+ struct fiji_power_state *fiji_ps;
+
+ if (value > 20)
+ value = 20;
+
+ ps = hwmgr->request_ps;
+
+ if (ps == NULL)
+ return -EINVAL;
+
+ fiji_ps = cast_phw_fiji_power_state(&ps->hardware);
+
+ fiji_ps->performance_levels[fiji_ps->performance_level_count - 1].memory_clock =
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
+ value / 100 +
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+ return 0;
+}
static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
.backend_init = &fiji_hwmgr_backend_init,
.backend_fini = &fiji_hwmgr_backend_fini,
.asic_setup = &fiji_setup_asic_task,
.dynamic_state_management_enable = &fiji_enable_dpm_tasks,
+ .dynamic_state_management_disable = &fiji_disable_dpm_tasks,
.force_dpm_level = &fiji_dpm_force_dpm_level,
.get_num_of_pp_table_entries = &tonga_get_number_of_powerplay_table_entries,
.get_power_state_size = &fiji_get_power_state_size,
@@ -5314,24 +5582,18 @@ static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
.get_fan_control_mode = fiji_get_fan_control_mode,
.check_states_equal = fiji_check_states_equal,
.check_smc_update_required_for_display_configuration = fiji_check_smc_update_required_for_display_configuration,
- .get_pp_table = fiji_get_pp_table,
- .set_pp_table = fiji_set_pp_table,
.force_clock_level = fiji_force_clock_level,
.print_clock_levels = fiji_print_clock_levels,
+ .get_sclk_od = fiji_get_sclk_od,
+ .set_sclk_od = fiji_set_sclk_od,
+ .get_mclk_od = fiji_get_mclk_od,
+ .set_mclk_od = fiji_set_mclk_od,
};
int fiji_hwmgr_init(struct pp_hwmgr *hwmgr)
{
- struct fiji_hwmgr *data;
- int ret = 0;
-
- data = kzalloc(sizeof(struct fiji_hwmgr), GFP_KERNEL);
- if (data == NULL)
- return -ENOMEM;
-
- hwmgr->backend = data;
hwmgr->hwmgr_func = &fiji_hwmgr_funcs;
hwmgr->pptable_func = &tonga_pptable_funcs;
pp_fiji_thermal_initialize(hwmgr);
- return ret;
+ return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h
index 170edf5a7..bf67c2a92 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h
@@ -302,9 +302,6 @@ struct fiji_hwmgr {
bool pg_acp_init;
bool frtc_enabled;
bool frtc_status_changed;
-
- /* soft pptable for re-uploading into smu */
- void *soft_pp_table;
};
/* To convert to Q8.8 format for firmware */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c
index db23a4068..44658451a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c
@@ -73,17 +73,18 @@ void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
if (!tmp) {
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_CAC);
fiji_hwmgr->fast_watermark_threshold = 100;
- tmp = 1;
- fiji_hwmgr->enable_dte_feature = tmp ? false : true;
- fiji_hwmgr->enable_tdc_limit_feature = tmp ? true : false;
- fiji_hwmgr->enable_pkg_pwr_tracking_feature = tmp ? true : false;
+ if (hwmgr->powercontainment_enabled) {
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment);
+ tmp = 1;
+ fiji_hwmgr->enable_dte_feature = tmp ? false : true;
+ fiji_hwmgr->enable_tdc_limit_feature = tmp ? true : false;
+ fiji_hwmgr->enable_pkg_pwr_tracking_feature = tmp ? true : false;
+ }
}
}
@@ -459,6 +460,23 @@ int fiji_enable_smc_cac(struct pp_hwmgr *hwmgr)
return result;
}
+int fiji_disable_smc_cac(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+ int result = 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_CAC) && data->cac_enabled) {
+ int smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ (uint16_t)(PPSMC_MSG_DisableCac));
+ PP_ASSERT_WITH_CODE((smc_result == 0),
+ "Failed to disable CAC in SMC.", result = -1);
+
+ data->cac_enabled = false;
+ }
+ return result;
+}
+
int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
@@ -528,6 +546,48 @@ int fiji_enable_power_containment(struct pp_hwmgr *hwmgr)
return result;
}
+int fiji_disable_power_containment(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+ int result = 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment) &&
+ data->power_containment_features) {
+ int smc_result;
+
+ if (data->power_containment_features &
+ POWERCONTAINMENT_FEATURE_TDCLimit) {
+ smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ (uint16_t)(PPSMC_MSG_TDCLimitDisable));
+ PP_ASSERT_WITH_CODE((smc_result == 0),
+ "Failed to disable TDCLimit in SMC.",
+ result = smc_result);
+ }
+
+ if (data->power_containment_features &
+ POWERCONTAINMENT_FEATURE_DTE) {
+ smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ (uint16_t)(PPSMC_MSG_DisableDTE));
+ PP_ASSERT_WITH_CODE((smc_result == 0),
+ "Failed to disable DTE in SMC.",
+ result = smc_result);
+ }
+
+ if (data->power_containment_features &
+ POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
+ smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable));
+ PP_ASSERT_WITH_CODE((smc_result == 0),
+ "Failed to disable PkgPwrTracking in SMC.",
+ result = smc_result);
+ }
+ data->power_containment_features = 0;
+ }
+
+ return result;
+}
+
int fiji_power_control_set_level(struct pp_hwmgr *hwmgr)
{
struct phm_ppt_v1_information *table_info =
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h
index 55e58200f..fec772421 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h
@@ -36,6 +36,19 @@ enum fiji_pt_config_reg_type {
#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004
+#define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xffffffc0
+#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x6
+#define DIDT_TD_CTRL0__UNUSED_0_MASK 0xffffffc0
+#define DIDT_TD_CTRL0__UNUSED_0__SHIFT 0x6
+#define DIDT_TCP_CTRL0__UNUSED_0_MASK 0xffffffc0
+#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT 0x6
+#define DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK 0xe0000000
+#define DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d
+#define DIDT_TD_TUNING_CTRL__UNUSED_0_MASK 0xe0000000
+#define DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d
+#define DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK 0xe0000000
+#define DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d
+
struct fiji_pt_config_reg {
uint32_t offset;
uint32_t mask;
@@ -58,7 +71,9 @@ void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr);
int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr);
int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr);
int fiji_enable_smc_cac(struct pp_hwmgr *hwmgr);
+int fiji_disable_smc_cac(struct pp_hwmgr *hwmgr);
int fiji_enable_power_containment(struct pp_hwmgr *hwmgr);
+int fiji_disable_power_containment(struct pp_hwmgr *hwmgr);
int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
int fiji_power_control_set_level(struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c
index 7a705cee0..a6abe81bc 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c
@@ -59,8 +59,8 @@ int phm_dispatch_table(struct pp_hwmgr *hwmgr,
struct phm_runtime_table_header *rt_table,
void *input, void *output)
{
- int result = 0;
- void *temp_storage = NULL;
+ int result;
+ void *temp_storage;
if (hwmgr == NULL || rt_table == NULL) {
printk(KERN_ERR "[ powerplay ] Invalid Parameter!\n");
@@ -73,12 +73,13 @@ int phm_dispatch_table(struct pp_hwmgr *hwmgr,
printk(KERN_ERR "[ powerplay ] Could not allocate table temporary storage\n");
return -ENOMEM;
}
+ } else {
+ temp_storage = NULL;
}
result = phm_run_table(hwmgr, rt_table, input, output, temp_storage);
- if (NULL != temp_storage)
- kfree(temp_storage);
+ kfree(temp_storage);
return result;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index efb77eda7..789f98ad2 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -154,6 +154,30 @@ int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
return ret;
}
+int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr)
+{
+ int ret = -1;
+ bool enabled;
+
+ PHM_FUNC_CHECK(hwmgr);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TablelessHardwareInterface)) {
+ if (hwmgr->hwmgr_func->dynamic_state_management_disable)
+ ret = hwmgr->hwmgr_func->dynamic_state_management_disable(hwmgr);
+ } else {
+ ret = phm_dispatch_table(hwmgr,
+ &(hwmgr->disable_dynamic_state_management),
+ NULL, NULL);
+ }
+
+ enabled = ret == 0 ? false : true;
+
+ cgs_notify_dpm_enabled(hwmgr->device, enabled);
+
+ return ret;
+}
+
int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level)
{
PHM_FUNC_CHECK(hwmgr);
@@ -314,7 +338,7 @@ int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr,
if (hwmgr->hwmgr_func->store_cc6_data == NULL)
return -EINVAL;
- /* to do pass other display configuration in furture */
+ /* TODO: pass other display configuration in the future */
if (hwmgr->hwmgr_func->store_cc6_data)
hwmgr->hwmgr_func->store_cc6_data(hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 20f20e075..27e07624a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -24,6 +24,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <drm/amdgpu_drm.h>
#include "cgs_common.h"
#include "power_state.h"
#include "hwmgr.h"
@@ -58,12 +59,13 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
hwmgr->hw_revision = pp_init->rev_id;
hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
hwmgr->power_source = PP_PowerSource_AC;
+ hwmgr->powercontainment_enabled = pp_init->powercontainment_enabled;
switch (hwmgr->chip_family) {
- case AMD_FAMILY_CZ:
+ case AMDGPU_FAMILY_CZ:
cz_hwmgr_init(hwmgr);
break;
- case AMD_FAMILY_VI:
+ case AMDGPU_FAMILY_VI:
switch (hwmgr->chip_id) {
case CHIP_TONGA:
tonga_hwmgr_init(hwmgr);
@@ -94,6 +96,8 @@ int hwmgr_fini(struct pp_hwmgr *hwmgr)
return -EINVAL;
/* do hwmgr finish*/
+ kfree(hwmgr->hardcode_pp_table);
+
kfree(hwmgr->backend);
kfree(hwmgr->start_thermal_controller.function_list);
@@ -530,7 +534,7 @@ int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr
/* initialize vddc_dep_on_dal_pwrl table */
table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record);
- table_clk_vlt = (struct phm_clock_voltage_dependency_table *)kzalloc(table_size, GFP_KERNEL);
+ table_clk_vlt = kzalloc(table_size, GFP_KERNEL);
if (NULL == table_clk_vlt) {
printk(KERN_ERR "[ powerplay ] Can not allocate space for vddc_dep_on_dal_pwrl! \n");
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c
index 8f142a74a..b5edb5105 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c
@@ -106,11 +106,17 @@ int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
data->uvd_power_gated = bgate;
if (bgate) {
+ cgs_set_clockgating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_UVD,
+ AMD_CG_STATE_GATE);
polaris10_update_uvd_dpm(hwmgr, true);
polaris10_phm_powerdown_uvd(hwmgr);
} else {
polaris10_phm_powerup_uvd(hwmgr);
polaris10_update_uvd_dpm(hwmgr, false);
+ cgs_set_clockgating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_UVD,
+ AMD_CG_STATE_UNGATE);
}
return 0;
@@ -125,11 +131,19 @@ int polaris10_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
data->vce_power_gated = bgate;
- if (bgate)
+ if (bgate) {
+ cgs_set_clockgating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_GATE);
+ polaris10_update_vce_dpm(hwmgr, true);
polaris10_phm_powerdown_vce(hwmgr);
- else
+ } else {
polaris10_phm_powerup_vce(hwmgr);
-
+ polaris10_update_vce_dpm(hwmgr, false);
+ cgs_set_clockgating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_UNGATE);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
index 91e25f942..769636a0c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
@@ -389,6 +389,34 @@ static int polaris10_program_voting_clients(struct pp_hwmgr *hwmgr)
return 0;
}
+static int polaris10_clear_voting_clients(struct pp_hwmgr *hwmgr)
+{
+ /* Reset voting clients before disabling DPM */
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_0, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_1, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_2, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_3, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_4, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_5, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_6, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_7, 0);
+
+ return 0;
+}
+
/**
* Get the location of various tables inside the FW image.
*
@@ -515,6 +543,11 @@ static int polaris10_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
return 0;
}
+static int polaris10_reset_to_default(struct pp_hwmgr *hwmgr)
+{
+ return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults);
+}
+
/**
* Initial switch from ARB F0->F1
*
@@ -528,6 +561,21 @@ static int polaris10_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
}
+static int polaris10_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
+{
+ uint32_t tmp;
+
+ tmp = (cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
+ 0x0000ff00) >> 8;
+
+ if (tmp == MC_CG_ARB_FREQ_F0)
+ return 0;
+
+ return polaris10_copy_and_switch_arb_sets(hwmgr,
+ tmp, MC_CG_ARB_FREQ_F0);
+}
+
static int polaris10_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
{
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
@@ -1356,9 +1404,9 @@ static int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
return result;
}
- /* in order to prevent MC activity from stutter mode to push DPM up.
+ /* In order to prevent MC activity from stutter mode to push DPM up,
* the UVD change complements this by putting the MCLK in
- * a higher state by default such that we are not effected by
+ * a higher state by default such that we are not affected by
* up threshold or and MCLK DPM latency.
*/
levels[0].ActivityLevel = 0x1f;
@@ -1425,7 +1473,7 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
/* Get MinVoltage and Frequency from DPM0,
* already converted to SMC_UL */
- sclk_frequency = data->dpm_table.sclk_table.dpm_levels[0].value;
+ sclk_frequency = data->vbios_boot_state.sclk_bootup_value;
result = polaris10_get_dependency_volt_by_clk(hwmgr,
table_info->vdd_dep_on_sclk,
sclk_frequency,
@@ -1461,8 +1509,7 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
/* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
- table->MemoryACPILevel.MclkFrequency =
- data->dpm_table.mclk_table.dpm_levels[0].value;
+ table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value;
result = polaris10_get_dependency_volt_by_clk(hwmgr,
table_info->vdd_dep_on_mclk,
table->MemoryACPILevel.MclkFrequency,
@@ -1780,7 +1827,7 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
{
uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0;
+ uint8_t i, stretch_amount, volt_offset = 0;
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
@@ -1831,11 +1878,8 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
/* Populate CKS Lookup Table */
- if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
- stretch_amount2 = 0;
- else if (stretch_amount == 3 || stretch_amount == 4)
- stretch_amount2 = 1;
- else {
+ if (stretch_amount != 1 && stretch_amount != 2 && stretch_amount != 3 &&
+ stretch_amount != 4 && stretch_amount != 5) {
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ClockStretcher);
PP_ASSERT_WITH_CODE(false,
@@ -1890,9 +1934,8 @@ static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr,
if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
config = VR_SVI2_PLANE_2;
table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
- } else if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
- config = VR_SMIO_PATTERN_2;
- table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start +
+ offsetof(SMU74_SoftRegisters, AllowMvddSwitch), 0x1);
} else {
config = VR_STATIC_VOLTAGE;
table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
@@ -2262,6 +2305,17 @@ static int polaris10_enable_ulv(struct pp_hwmgr *hwmgr)
return 0;
}
+static int polaris10_disable_ulv(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct polaris10_ulv_parm *ulv = &(data->ulv);
+
+ if (ulv->ulv_supported)
+ return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV);
+
+ return 0;
+}
+
static int polaris10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -2282,6 +2336,21 @@ static int polaris10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
return 0;
}
+static int polaris10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
+{
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkDeepSleep)) {
+ if (smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_MASTER_DeepSleep_OFF)) {
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to disable Master Deep Sleep switch failed!",
+ return -1);
+ }
+ }
+
+ return 0;
+}
+
static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
{
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
@@ -2379,6 +2448,58 @@ static int polaris10_start_dpm(struct pp_hwmgr *hwmgr)
return 0;
}
+static int polaris10_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ /* disable SCLK dpm */
+ if (!data->sclk_dpm_key_disabled)
+ PP_ASSERT_WITH_CODE(
+ (smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_DPM_Disable) == 0),
+ "Failed to disable SCLK DPM!",
+ return -1);
+
+ /* disable MCLK dpm */
+ if (!data->mclk_dpm_key_disabled) {
+ PP_ASSERT_WITH_CODE(
+ (smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_MCLKDPM_Disable) == 0),
+ "Failed to disable MCLK DPM!",
+ return -1);
+ }
+
+ return 0;
+}
+
+static int polaris10_stop_dpm(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ /* disable general power management */
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
+ GLOBAL_PWRMGT_EN, 0);
+ /* disable sclk deep sleep */
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
+ DYNAMIC_PM_EN, 0);
+
+ /* disable PCIE dpm */
+ if (!data->pcie_dpm_key_disabled) {
+ PP_ASSERT_WITH_CODE(
+ (smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_PCIeDPM_Disable) == 0),
+ "Failed to disable pcie DPM during DPM Stop Function!",
+ return -1);
+ }
+
+ if (polaris10_disable_sclk_mclk_dpm(hwmgr)) {
+ printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!");
+ return -1;
+ }
+
+ return 0;
+}
+
static void polaris10_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
{
bool protection;
@@ -2436,6 +2557,23 @@ static int polaris10_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
return polaris10_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
}
+static int polaris10_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
+ PHM_AutoThrottleSource source)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ if (data->active_auto_throttle_sources & (1 << source)) {
+ data->active_auto_throttle_sources &= ~(1 << source);
+ polaris10_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
+ }
+ return 0;
+}
+
+static int polaris10_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
+{
+ return polaris10_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
+}
+
int polaris10_pcie_performance_request(struct pp_hwmgr *hwmgr)
{
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
@@ -2530,6 +2668,10 @@ int polaris10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((0 == tmp_result),
"Failed to enable deep sleep master switch!", result = tmp_result);
+ tmp_result = polaris10_enable_didt_config(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to enable deep sleep master switch!", result = tmp_result);
+
tmp_result = polaris10_start_dpm(hwmgr);
PP_ASSERT_WITH_CODE((0 == tmp_result),
"Failed to start DPM!", result = tmp_result);
@@ -2559,8 +2701,60 @@ int polaris10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
int polaris10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
+ int tmp_result, result = 0;
- return 0;
+ tmp_result = (polaris10_is_dpm_running(hwmgr)) ? 0 : -1;
+ PP_ASSERT_WITH_CODE(tmp_result == 0,
+ "DPM is not running right now, no need to disable DPM!",
+ return 0);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalController))
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
+
+ tmp_result = polaris10_disable_power_containment(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable power containment!", result = tmp_result);
+
+ tmp_result = polaris10_disable_smc_cac(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable SMC CAC!", result = tmp_result);
+
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
+
+ tmp_result = polaris10_disable_thermal_auto_throttle(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable thermal auto throttle!", result = tmp_result);
+
+ tmp_result = polaris10_stop_dpm(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to stop DPM!", result = tmp_result);
+
+ tmp_result = polaris10_disable_deep_sleep_master_switch(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable deep sleep master switch!", result = tmp_result);
+
+ tmp_result = polaris10_disable_ulv(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable ULV!", result = tmp_result);
+
+ tmp_result = polaris10_clear_voting_clients(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to clear voting clients!", result = tmp_result);
+
+ tmp_result = polaris10_reset_to_default(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to reset to default!", result = tmp_result);
+
+ tmp_result = polaris10_force_switch_to_arbf0(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to force to switch arbf0!", result = tmp_result);
+
+ return result;
}
int polaris10_reset_asic_tasks(struct pp_hwmgr *hwmgr)
@@ -2571,13 +2765,6 @@ int polaris10_reset_asic_tasks(struct pp_hwmgr *hwmgr)
int polaris10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- if (data->soft_pp_table) {
- kfree(data->soft_pp_table);
- data->soft_pp_table = NULL;
- }
-
return phm_hwmgr_backend_fini(hwmgr);
}
@@ -2624,17 +2811,22 @@ int polaris10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_DynamicUVDState);
/* power tune caps Assume disabled */
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SQRamping);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DBRamping);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TDRamping);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TCPRamping);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment);
+ if (hwmgr->powercontainment_enabled)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment);
+ else
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment);
+
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_CAC);
@@ -2706,12 +2898,12 @@ static int polaris10_get_evv_voltages(struct pp_hwmgr *hwmgr)
}
}
-
- PP_ASSERT_WITH_CODE(0 == atomctrl_get_voltage_evv_on_sclk_ai(hwmgr,
- VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc),
- "Error retrieving EVV voltage value!",
- continue);
-
+ if (atomctrl_get_voltage_evv_on_sclk_ai(hwmgr,
+ VOLTAGE_TYPE_VDDC,
+ sclk, vv_id, &vddc) != 0) {
+ printk(KERN_WARNING "failed to retrieving EVV voltage!\n");
+ continue;
+ }
/* need to make sure vddc is less than 2v or else, it could burn the ASIC.
* real voltage level in unit of 0.01mv */
@@ -2968,13 +3160,19 @@ int polaris10_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct polaris10_hwmgr *data;
struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
uint32_t temp_reg;
int result;
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
+ data = kzalloc(sizeof(struct polaris10_hwmgr), GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ hwmgr->backend = data;
+
data->dll_default_on = false;
data->sram_end = SMC_RAM_END;
data->mclk_dpm0_activity_target = 0xa;
@@ -3063,7 +3261,7 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
if (0 == result) {
struct cgs_system_info sys_info = {0};
- data->is_tlu_enabled = 0;
+ data->is_tlu_enabled = false;
hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
POLARIS10_MAX_HARDWARE_POWERLEVELS;
@@ -3148,7 +3346,7 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
result = cgs_query_system_info(hwmgr->device, &sys_info);
if (result)
- data->pcie_gen_cap = 0x30007;
+ data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
else
data->pcie_gen_cap = (uint32_t)sys_info.value;
if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
@@ -3157,7 +3355,7 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
result = cgs_query_system_info(hwmgr->device, &sys_info);
if (result)
- data->pcie_lane_cap = 0x2f0000;
+ data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
else
data->pcie_lane_cap = (uint32_t)sys_info.value;
@@ -3446,6 +3644,7 @@ static int polaris10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
+
disable_mclk_switching = (1 < info.display_count) ||
disable_mclk_switching_for_frame_lock;
@@ -3950,8 +4149,8 @@ static int polaris10_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
if ((0 == data->sclk_dpm_key_disabled) &&
(data->need_update_smu7_dpm_table &
(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
- PP_ASSERT_WITH_CODE(true == polaris10_is_dpm_running(hwmgr),
- "Trying to freeze SCLK DPM when DPM is disabled",
+ PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr),
+ "Trying to freeze SCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
PPSMC_MSG_SCLKDPM_FreezeLevel),
@@ -3962,8 +4161,8 @@ static int polaris10_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
if ((0 == data->mclk_dpm_key_disabled) &&
(data->need_update_smu7_dpm_table &
DPMTABLE_OD_UPDATE_MCLK)) {
- PP_ASSERT_WITH_CODE(true == polaris10_is_dpm_running(hwmgr),
- "Trying to freeze MCLK DPM when DPM is disabled",
+ PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr),
+ "Trying to freeze MCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
PPSMC_MSG_MCLKDPM_FreezeLevel),
@@ -4123,7 +4322,6 @@ static int polaris10_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
static int polaris10_trim_dpm_states(struct pp_hwmgr *hwmgr,
const struct polaris10_power_state *polaris10_ps)
{
- int result = 0;
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
uint32_t high_limit_count;
@@ -4143,7 +4341,7 @@ static int polaris10_trim_dpm_states(struct pp_hwmgr *hwmgr,
polaris10_ps->performance_levels[0].memory_clock,
polaris10_ps->performance_levels[high_limit_count].memory_clock);
- return result;
+ return 0;
}
static int polaris10_generate_dpm_level_enable_mask(
@@ -4226,25 +4424,20 @@ int polaris10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
return polaris10_enable_disable_uvd_dpm(hwmgr, !bgate);
}
-static int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input)
+int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate)
{
- const struct phm_set_power_state_input *states =
- (const struct phm_set_power_state_input *)input;
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
- const struct polaris10_power_state *polaris10_nps =
- cast_const_phw_polaris10_power_state(states->pnew_state);
- const struct polaris10_power_state *polaris10_cps =
- cast_const_phw_polaris10_power_state(states->pcurrent_state);
-
uint32_t mm_boot_level_offset, mm_boot_level_value;
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
- if (polaris10_nps->vce_clks.evclk > 0 &&
- (polaris10_cps == NULL || polaris10_cps->vce_clks.evclk == 0)) {
-
- data->smc_state_table.VceBootLevel =
+ if (!bgate) {
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ data->smc_state_table.VceBootLevel =
(uint8_t) (table_info->mm_dep_table->count - 1);
+ else
+ data->smc_state_table.VceBootLevel = 0;
mm_boot_level_offset = data->dpm_table_start +
offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
@@ -4257,18 +4450,14 @@ static int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input)
cgs_write_ind_register(hwmgr->device,
CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) {
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_VCEDPM_SetEnabledMask,
(uint32_t)1 << data->smc_state_table.VceBootLevel);
-
- polaris10_enable_disable_vce_dpm(hwmgr, true);
- } else if (polaris10_nps->vce_clks.evclk == 0 &&
- polaris10_cps != NULL &&
- polaris10_cps->vce_clks.evclk > 0)
- polaris10_enable_disable_vce_dpm(hwmgr, false);
}
+ polaris10_enable_disable_vce_dpm(hwmgr, !bgate);
+
return 0;
}
@@ -4353,8 +4542,8 @@ static int polaris10_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
(data->need_update_smu7_dpm_table &
(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
- PP_ASSERT_WITH_CODE(true == polaris10_is_dpm_running(hwmgr),
- "Trying to Unfreeze SCLK DPM when DPM is disabled",
+ PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr),
+ "Trying to Unfreeze SCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
PPSMC_MSG_SCLKDPM_UnfreezeLevel),
@@ -4365,8 +4554,8 @@ static int polaris10_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
if ((0 == data->mclk_dpm_key_disabled) &&
(data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
- PP_ASSERT_WITH_CODE(true == polaris10_is_dpm_running(hwmgr),
- "Trying to Unfreeze MCLK DPM when DPM is disabled",
+ PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr),
+ "Trying to Unfreeze MCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
PPSMC_MSG_SCLKDPM_UnfreezeLevel),
@@ -4422,6 +4611,8 @@ static int polaris10_notify_smc_display(struct pp_hwmgr *hwmgr)
return (smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL;
}
+
+
static int polaris10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
{
int tmp_result, result = 0;
@@ -4455,11 +4646,6 @@ static int polaris10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *i
"Failed to generate DPM level enabled mask!",
result = tmp_result);
- tmp_result = polaris10_update_vce_dpm(hwmgr, input);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to update VCE DPM!",
- result = tmp_result);
-
tmp_result = polaris10_update_sclk_threshold(hwmgr);
PP_ASSERT_WITH_CODE((0 == tmp_result),
"Failed to update SCLK threshold!",
@@ -4530,6 +4716,7 @@ int polaris10_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwm
if (num_active_displays > 1) /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */
polaris10_notify_smc_display_change(hwmgr, false);
+
return 0;
}
@@ -4579,6 +4766,7 @@ int polaris10_program_display_gap(struct pp_hwmgr *hwmgr)
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU74_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
+
return 0;
}
@@ -4820,42 +5008,6 @@ int polaris10_setup_asic_task(struct pp_hwmgr *hwmgr)
return result;
}
-static int polaris10_get_pp_table(struct pp_hwmgr *hwmgr, char **table)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- if (!data->soft_pp_table) {
- data->soft_pp_table = kmemdup(hwmgr->soft_pp_table,
- hwmgr->soft_pp_table_size,
- GFP_KERNEL);
- if (!data->soft_pp_table)
- return -ENOMEM;
- }
-
- *table = (char *)&data->soft_pp_table;
-
- return hwmgr->soft_pp_table_size;
-}
-
-static int polaris10_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size)
-{
- struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
- if (!data->soft_pp_table) {
- data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
- if (!data->soft_pp_table)
- return -ENOMEM;
- }
-
- memcpy(data->soft_pp_table, buf, size);
-
- hwmgr->soft_pp_table = data->soft_pp_table;
-
- /* TODO: re-init powerplay to implement modified pptable */
-
- return 0;
-}
-
static int polaris10_force_clock_level(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, uint32_t mask)
{
@@ -4998,6 +5150,89 @@ static int polaris10_get_fan_control_mode(struct pp_hwmgr *hwmgr)
CG_FDO_CTRL2, FDO_PWM_MODE);
}
+static int polaris10_get_sclk_od(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct polaris10_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
+ struct polaris10_single_dpm_table *golden_sclk_table =
+ &(data->golden_dpm_table.sclk_table);
+ int value;
+
+ value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
+ 100 /
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+ return value;
+}
+
+static int polaris10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct polaris10_single_dpm_table *golden_sclk_table =
+ &(data->golden_dpm_table.sclk_table);
+ struct pp_power_state *ps;
+ struct polaris10_power_state *polaris10_ps;
+
+ if (value > 20)
+ value = 20;
+
+ ps = hwmgr->request_ps;
+
+ if (ps == NULL)
+ return -EINVAL;
+
+ polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware);
+
+ polaris10_ps->performance_levels[polaris10_ps->performance_level_count - 1].engine_clock =
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
+ value / 100 +
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+ return 0;
+}
+
+static int polaris10_get_mclk_od(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct polaris10_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
+ struct polaris10_single_dpm_table *golden_mclk_table =
+ &(data->golden_dpm_table.mclk_table);
+ int value;
+
+ value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
+ 100 /
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+ return value;
+}
+
+static int polaris10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct polaris10_single_dpm_table *golden_mclk_table =
+ &(data->golden_dpm_table.mclk_table);
+ struct pp_power_state *ps;
+ struct polaris10_power_state *polaris10_ps;
+
+ if (value > 20)
+ value = 20;
+
+ ps = hwmgr->request_ps;
+
+ if (ps == NULL)
+ return -EINVAL;
+
+ polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware);
+
+ polaris10_ps->performance_levels[polaris10_ps->performance_level_count - 1].memory_clock =
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
+ value / 100 +
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+ return 0;
+}
static const struct pp_hwmgr_func polaris10_hwmgr_funcs = {
.backend_init = &polaris10_hwmgr_backend_init,
.backend_fini = &polaris10_hwmgr_backend_fini,
@@ -5036,22 +5271,17 @@ static const struct pp_hwmgr_func polaris10_hwmgr_funcs = {
.check_states_equal = polaris10_check_states_equal,
.set_fan_control_mode = polaris10_set_fan_control_mode,
.get_fan_control_mode = polaris10_get_fan_control_mode,
- .get_pp_table = polaris10_get_pp_table,
- .set_pp_table = polaris10_set_pp_table,
.force_clock_level = polaris10_force_clock_level,
.print_clock_levels = polaris10_print_clock_levels,
.enable_per_cu_power_gating = polaris10_phm_enable_per_cu_power_gating,
+ .get_sclk_od = polaris10_get_sclk_od,
+ .set_sclk_od = polaris10_set_sclk_od,
+ .get_mclk_od = polaris10_get_mclk_od,
+ .set_mclk_od = polaris10_set_mclk_od,
};
int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr)
{
- struct polaris10_hwmgr *data;
-
- data = kzalloc (sizeof(struct polaris10_hwmgr), GFP_KERNEL);
- if (data == NULL)
- return -ENOMEM;
-
- hwmgr->backend = data;
hwmgr->hwmgr_func = &polaris10_hwmgr_funcs;
hwmgr->pptable_func = &tonga_pptable_funcs;
pp_polaris10_thermal_initialize(hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h
index afc343482..33c33947e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h
@@ -309,10 +309,6 @@ struct polaris10_hwmgr {
uint32_t up_hyst;
uint32_t disable_dpm_mask;
bool apply_optimized_settings;
-
- /* soft pptable for re-uploading into smu */
- void *soft_pp_table;
-
uint32_t avfs_vdroop_override_setting;
bool apply_avfs_cks_off_voltage;
uint32_t frame_time_x2;
@@ -356,6 +352,6 @@ int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr);
int polaris10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
int polaris10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate);
int polaris10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
-
+int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c
index ae96f14b8..b9cb240a1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c
@@ -28,10 +28,360 @@
#include "polaris10_smumgr.h"
#include "smu74_discrete.h"
#include "pp_debug.h"
+#include "gca/gfx_8_0_d.h"
+#include "gca/gfx_8_0_sh_mask.h"
+#include "oss/oss_3_0_sh_mask.h"
#define VOLTAGE_SCALE 4
#define POWERTUNE_DEFAULT_SET_MAX 1
+uint32_t DIDTBlock_Info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK;
+
+struct polaris10_pt_config_reg GCCACConfig_Polaris10[] = {
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value Type
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060013, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860013, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060013, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860013, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060013, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860013, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060013, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860013, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060013, POLARIS10_CONFIGREG_GC_CAC_IND },
+
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
+
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00100013, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00900013, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01100013, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01900013, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02100013, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02900013, POLARIS10_CONFIGREG_GC_CAC_IND },
+
+ { 0xFFFFFFFF }
+};
+
+struct polaris10_pt_config_reg GCCACConfig_Polaris11[] = {
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value Type
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060011, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860011, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060011, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860011, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060011, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860011, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060011, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860011, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060011, POLARIS10_CONFIGREG_GC_CAC_IND },
+
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
+
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00100011, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00900011, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01100011, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01900011, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02100011, POLARIS10_CONFIGREG_GC_CAC_IND },
+ { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02900011, POLARIS10_CONFIGREG_GC_CAC_IND },
+
+ { 0xFFFFFFFF }
+};
+
+struct polaris10_pt_config_reg DIDTConfig_Polaris10[] = {
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value Type
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x0073, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00ab, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x0067, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x0027, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x00aa, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0009, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0009, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { 0xFFFFFFFF }
+};
+
+struct polaris10_pt_config_reg DIDTConfig_Polaris11[] = {
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value Type
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x0073, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00ab, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x0067, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x0027, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x00aa, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0008, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0008, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
+ { 0xFFFFFFFF }
+};
+
static const struct polaris10_pt_defaults polaris10_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
/* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
* TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */
@@ -209,6 +559,187 @@ static int polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
return 0;
}
+static int polaris10_enable_didt(struct pp_hwmgr *hwmgr, const bool enable)
+{
+
+ uint32_t en = enable ? 1 : 0;
+ int32_t result = 0;
+ uint32_t data;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping)) {
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0);
+ data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
+ data |= ((en << DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0, data);
+ DIDTBlock_Info &= ~SQ_Enable_MASK;
+ DIDTBlock_Info |= en << SQ_Enable_SHIFT;
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping)) {
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0);
+ data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
+ data |= ((en << DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0, data);
+ DIDTBlock_Info &= ~DB_Enable_MASK;
+ DIDTBlock_Info |= en << DB_Enable_SHIFT;
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping)) {
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0);
+ data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
+ data |= ((en << DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0, data);
+ DIDTBlock_Info &= ~TD_Enable_MASK;
+ DIDTBlock_Info |= en << TD_Enable_SHIFT;
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0);
+ data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
+ data |= ((en << DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0, data);
+ DIDTBlock_Info &= ~TCP_Enable_MASK;
+ DIDTBlock_Info |= en << TCP_Enable_SHIFT;
+ }
+
+ if (enable)
+ result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_Didt_Block_Function, DIDTBlock_Info);
+
+ return result;
+}
+
+static int polaris10_program_pt_config_registers(struct pp_hwmgr *hwmgr,
+ struct polaris10_pt_config_reg *cac_config_regs)
+{
+ struct polaris10_pt_config_reg *config_regs = cac_config_regs;
+ uint32_t cache = 0;
+ uint32_t data = 0;
+
+ PP_ASSERT_WITH_CODE((config_regs != NULL), "Invalid config register table.", return -EINVAL);
+
+ while (config_regs->offset != 0xFFFFFFFF) {
+ if (config_regs->type == POLARIS10_CONFIGREG_CACHE)
+ cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+ else {
+ switch (config_regs->type) {
+ case POLARIS10_CONFIGREG_SMC_IND:
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, config_regs->offset);
+ break;
+
+ case POLARIS10_CONFIGREG_DIDT_IND:
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset);
+ break;
+
+ case POLARIS10_CONFIGREG_GC_CAC_IND:
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset);
+ break;
+
+ default:
+ data = cgs_read_register(hwmgr->device, config_regs->offset);
+ break;
+ }
+
+ data &= ~config_regs->mask;
+ data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+ data |= cache;
+
+ switch (config_regs->type) {
+ case POLARIS10_CONFIGREG_SMC_IND:
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, config_regs->offset, data);
+ break;
+
+ case POLARIS10_CONFIGREG_DIDT_IND:
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset, data);
+ break;
+
+ case POLARIS10_CONFIGREG_GC_CAC_IND:
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset, data);
+ break;
+
+ default:
+ cgs_write_register(hwmgr->device, config_regs->offset, data);
+ break;
+ }
+ cache = 0;
+ }
+
+ config_regs++;
+ }
+
+ return 0;
+}
+
+int polaris10_enable_didt_config(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ uint32_t num_se = 0;
+ uint32_t count, value, value2;
+ struct cgs_system_info sys_info = {0};
+
+ sys_info.size = sizeof(struct cgs_system_info);
+ sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO;
+ result = cgs_query_system_info(hwmgr->device, &sys_info);
+
+
+ if (result == 0)
+ num_se = sys_info.value;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping) ||
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) ||
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) ||
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
+
+ /* TO DO Pre DIDT disable clock gating */
+ value = 0;
+ value2 = cgs_read_register(hwmgr->device, mmGRBM_GFX_INDEX);
+ for (count = 0; count < num_se; count++) {
+ value = SYS_GRBM_GFX_INDEX_DATA__INSTANCE_BROADCAST_WRITES_MASK
+ | SYS_GRBM_GFX_INDEX_DATA__SH_BROADCAST_WRITES_MASK
+ | (count << SYS_GRBM_GFX_INDEX_DATA__SE_INDEX__SHIFT);
+ cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value);
+
+ if (hwmgr->chip_id == CHIP_POLARIS10) {
+ result = polaris10_program_pt_config_registers(hwmgr, GCCACConfig_Polaris10);
+ PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
+ result = polaris10_program_pt_config_registers(hwmgr, DIDTConfig_Polaris10);
+ PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
+ } else if (hwmgr->chip_id == CHIP_POLARIS11) {
+ result = polaris10_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11);
+ PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
+ result = polaris10_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11);
+ PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
+ }
+ }
+ cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value2);
+
+ result = polaris10_enable_didt(hwmgr, true);
+ PP_ASSERT_WITH_CODE((result == 0), "EnableDiDt failed.", return result);
+
+ /* TO DO Post DIDT enable clock gating */
+ }
+
+ return 0;
+}
+
+int polaris10_disable_didt_config(struct pp_hwmgr *hwmgr)
+{
+ int result;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping) ||
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) ||
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) ||
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
+ /* TO DO Pre DIDT disable clock gating */
+
+ result = polaris10_enable_didt(hwmgr, false);
+ PP_ASSERT_WITH_CODE((result == 0), "Post DIDT enable clock gating failed.", return result);
+ /* TO DO Post DIDT enable clock gating */
+ }
+
+ return 0;
+}
+
+
static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
{
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
@@ -312,6 +843,23 @@ int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr)
return result;
}
+int polaris10_disable_smc_cac(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ int result = 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_CAC) && data->cac_enabled) {
+ int smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ (uint16_t)(PPSMC_MSG_DisableCac));
+ PP_ASSERT_WITH_CODE((smc_result == 0),
+ "Failed to disable CAC in SMC.", result = -1);
+
+ data->cac_enabled = false;
+ }
+ return result;
+}
+
int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
{
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
@@ -373,6 +921,48 @@ int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr)
return result;
}
+int polaris10_disable_power_containment(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ int result = 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment) &&
+ data->power_containment_features) {
+ int smc_result;
+
+ if (data->power_containment_features &
+ POWERCONTAINMENT_FEATURE_TDCLimit) {
+ smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ (uint16_t)(PPSMC_MSG_TDCLimitDisable));
+ PP_ASSERT_WITH_CODE((smc_result == 0),
+ "Failed to disable TDCLimit in SMC.",
+ result = smc_result);
+ }
+
+ if (data->power_containment_features &
+ POWERCONTAINMENT_FEATURE_DTE) {
+ smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ (uint16_t)(PPSMC_MSG_DisableDTE));
+ PP_ASSERT_WITH_CODE((smc_result == 0),
+ "Failed to disable DTE in SMC.",
+ result = smc_result);
+ }
+
+ if (data->power_containment_features &
+ POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
+ smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable));
+ PP_ASSERT_WITH_CODE((smc_result == 0),
+ "Failed to disable PkgPwrTracking in SMC.",
+ result = smc_result);
+ }
+ data->power_containment_features = 0;
+ }
+
+ return result;
+}
+
int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr)
{
struct phm_ppt_v1_information *table_info =
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h
index 68bc1cb6d..bc78e28f0 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h
@@ -27,15 +27,37 @@ enum polaris10_pt_config_reg_type {
POLARIS10_CONFIGREG_MMR = 0,
POLARIS10_CONFIGREG_SMC_IND,
POLARIS10_CONFIGREG_DIDT_IND,
+ POLARIS10_CONFIGREG_GC_CAC_IND,
POLARIS10_CONFIGREG_CACHE,
POLARIS10_CONFIGREG_MAX
};
+#define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xfffc0000
+#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x12
+#define DIDT_TD_CTRL0__UNUSED_0_MASK 0xfffc0000
+#define DIDT_TD_CTRL0__UNUSED_0__SHIFT 0x12
+#define DIDT_TCP_CTRL0__UNUSED_0_MASK 0xfffc0000
+#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT 0x12
+#define DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK 0xc0000000
+#define DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001e
+#define DIDT_TD_TUNING_CTRL__UNUSED_0_MASK 0xc0000000
+#define DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001e
+#define DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK 0xc0000000
+#define DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001e
+
/* PowerContainment Features */
#define POWERCONTAINMENT_FEATURE_DTE 0x00000001
#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004
+#define ixGC_CAC_CNTL 0x0000
+#define ixDIDT_SQ_STALL_CTRL 0x0004
+#define ixDIDT_SQ_TUNING_CTRL 0x0005
+#define ixDIDT_TD_STALL_CTRL 0x0044
+#define ixDIDT_TD_TUNING_CTRL 0x0045
+#define ixDIDT_TCP_STALL_CTRL 0x0064
+#define ixDIDT_TCP_TUNING_CTRL 0x0065
+
struct polaris10_pt_config_reg {
uint32_t offset;
uint32_t mask;
@@ -62,9 +84,11 @@ void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr);
int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr);
int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr);
int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr);
+int polaris10_disable_smc_cac(struct pp_hwmgr *hwmgr);
int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr);
+int polaris10_disable_power_containment(struct pp_hwmgr *hwmgr);
int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr);
-
+int polaris10_enable_didt_config(struct pp_hwmgr *hwmgr);
#endif /* POLARIS10_POWERTUNE_H */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
index a3c38bbd1..1944d289f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
@@ -66,7 +66,7 @@ int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise)
int result;
struct cgs_system_info info = {0};
- if( 0 != acpi_atcs_notify_pcie_device_ready(device))
+ if (acpi_atcs_notify_pcie_device_ready(device))
return -EINVAL;
info.size = sizeof(struct cgs_system_info);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
index ffc7c0dd3..26f3e30d0 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
@@ -179,13 +179,12 @@ int atomctrl_set_engine_dram_timings_rv770(
/* They are both in 10KHz Units. */
engine_clock_parameters.ulTargetEngineClock =
- (uint32_t) engine_clock & SET_CLOCK_FREQ_MASK;
- engine_clock_parameters.ulTargetEngineClock |=
- (COMPUTE_ENGINE_PLL_PARAM << 24);
+ cpu_to_le32((engine_clock & SET_CLOCK_FREQ_MASK) |
+ ((COMPUTE_ENGINE_PLL_PARAM << 24)));
/* in 10 khz units.*/
engine_clock_parameters.sReserved.ulClock =
- (uint32_t) memory_clock & SET_CLOCK_FREQ_MASK;
+ cpu_to_le32(memory_clock & SET_CLOCK_FREQ_MASK);
return cgs_atom_exec_cmd_table(hwmgr->device,
GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings),
&engine_clock_parameters);
@@ -252,7 +251,7 @@ int atomctrl_get_memory_pll_dividers_si(
COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 mpll_parameters;
int result;
- mpll_parameters.ulClock = (uint32_t) clock_value;
+ mpll_parameters.ulClock = cpu_to_le32(clock_value);
mpll_parameters.ucInputFlag = (uint8_t)((strobe_mode) ? 1 : 0);
result = cgs_atom_exec_cmd_table
@@ -262,9 +261,9 @@ int atomctrl_get_memory_pll_dividers_si(
if (0 == result) {
mpll_param->mpll_fb_divider.clk_frac =
- mpll_parameters.ulFbDiv.usFbDivFrac;
+ le16_to_cpu(mpll_parameters.ulFbDiv.usFbDivFrac);
mpll_param->mpll_fb_divider.cl_kf =
- mpll_parameters.ulFbDiv.usFbDiv;
+ le16_to_cpu(mpll_parameters.ulFbDiv.usFbDiv);
mpll_param->mpll_post_divider =
(uint32_t)mpll_parameters.ucPostDiv;
mpll_param->vco_mode =
@@ -300,7 +299,7 @@ int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr,
COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2 mpll_parameters;
int result;
- mpll_parameters.ulClock.ulClock = (uint32_t)clock_value;
+ mpll_parameters.ulClock.ulClock = cpu_to_le32(clock_value);
result = cgs_atom_exec_cmd_table(hwmgr->device,
GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
@@ -320,7 +319,7 @@ int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr,
COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 pll_parameters;
int result;
- pll_parameters.ulClock = clock_value;
+ pll_parameters.ulClock = cpu_to_le32(clock_value);
result = cgs_atom_exec_cmd_table
(hwmgr->device,
@@ -329,7 +328,7 @@ int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr,
if (0 == result) {
dividers->pll_post_divider = pll_parameters.ucPostDiv;
- dividers->real_clock = pll_parameters.ulClock;
+ dividers->real_clock = le32_to_cpu(pll_parameters.ulClock);
}
return result;
@@ -343,7 +342,7 @@ int atomctrl_get_engine_pll_dividers_vi(
COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters;
int result;
- pll_patameters.ulClock.ulClock = clock_value;
+ pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value);
pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK;
result = cgs_atom_exec_cmd_table
@@ -355,12 +354,12 @@ int atomctrl_get_engine_pll_dividers_vi(
dividers->pll_post_divider =
pll_patameters.ulClock.ucPostDiv;
dividers->real_clock =
- pll_patameters.ulClock.ulClock;
+ le32_to_cpu(pll_patameters.ulClock.ulClock);
dividers->ul_fb_div.ul_fb_div_frac =
- pll_patameters.ulFbDiv.usFbDivFrac;
+ le16_to_cpu(pll_patameters.ulFbDiv.usFbDivFrac);
dividers->ul_fb_div.ul_fb_div =
- pll_patameters.ulFbDiv.usFbDiv;
+ le16_to_cpu(pll_patameters.ulFbDiv.usFbDiv);
dividers->uc_pll_ref_div =
pll_patameters.ucPllRefDiv;
@@ -380,7 +379,7 @@ int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr,
COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_7 pll_patameters;
int result;
- pll_patameters.ulClock.ulClock = clock_value;
+ pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value);
pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK;
result = cgs_atom_exec_cmd_table
@@ -412,7 +411,7 @@ int atomctrl_get_dfs_pll_dividers_vi(
COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters;
int result;
- pll_patameters.ulClock.ulClock = clock_value;
+ pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value);
pll_patameters.ulClock.ucPostDiv =
COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK;
@@ -425,12 +424,12 @@ int atomctrl_get_dfs_pll_dividers_vi(
dividers->pll_post_divider =
pll_patameters.ulClock.ucPostDiv;
dividers->real_clock =
- pll_patameters.ulClock.ulClock;
+ le32_to_cpu(pll_patameters.ulClock.ulClock);
dividers->ul_fb_div.ul_fb_div_frac =
- pll_patameters.ulFbDiv.usFbDivFrac;
+ le16_to_cpu(pll_patameters.ulFbDiv.usFbDivFrac);
dividers->ul_fb_div.ul_fb_div =
- pll_patameters.ulFbDiv.usFbDiv;
+ le16_to_cpu(pll_patameters.ulFbDiv.usFbDiv);
dividers->uc_pll_ref_div =
pll_patameters.ucPllRefDiv;
@@ -519,13 +518,13 @@ int atomctrl_get_voltage_table_v3(
for (i = 0; i < voltage_object->asGpioVoltageObj.ucGpioEntryNum; i++) {
voltage_table->entries[i].value =
- voltage_object->asGpioVoltageObj.asVolGpioLut[i].usVoltageValue;
+ le16_to_cpu(voltage_object->asGpioVoltageObj.asVolGpioLut[i].usVoltageValue);
voltage_table->entries[i].smio_low =
- voltage_object->asGpioVoltageObj.asVolGpioLut[i].ulVoltageId;
+ le32_to_cpu(voltage_object->asGpioVoltageObj.asVolGpioLut[i].ulVoltageId);
}
voltage_table->mask_low =
- voltage_object->asGpioVoltageObj.ulGpioMaskVal;
+ le32_to_cpu(voltage_object->asGpioVoltageObj.ulGpioMaskVal);
voltage_table->count =
voltage_object->asGpioVoltageObj.ucGpioEntryNum;
voltage_table->phase_delay =
@@ -552,13 +551,13 @@ static bool atomctrl_lookup_gpio_pin(
pin_assignment->ucGpioPinBitShift;
gpio_pin_assignment->us_gpio_pin_aindex =
le16_to_cpu(pin_assignment->usGpioPin_AIndex);
- return false;
+ return true;
}
offset += offsetof(ATOM_GPIO_PIN_ASSIGNMENT, ucGPIO_ID) + 1;
}
- return true;
+ return false;
}
/**
@@ -650,8 +649,8 @@ int atomctrl_calculate_voltage_evv_on_sclk(
return -1;
if (getASICProfilingInfo->asHeader.ucTableFormatRevision < 3 ||
- (getASICProfilingInfo->asHeader.ucTableFormatRevision == 3 &&
- getASICProfilingInfo->asHeader.ucTableContentRevision < 4))
+ (getASICProfilingInfo->asHeader.ucTableFormatRevision == 3 &&
+ getASICProfilingInfo->asHeader.ucTableContentRevision < 4))
return -1;
/*-----------------------------------------------------------
@@ -662,37 +661,37 @@ int atomctrl_calculate_voltage_evv_on_sclk(
switch (dpm_level) {
case 1:
- fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm1);
- fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM1, 1000);
+ fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm1));
+ fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM1), 1000);
break;
case 2:
- fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm2);
- fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM2, 1000);
+ fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm2));
+ fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM2), 1000);
break;
case 3:
- fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm3);
- fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM3, 1000);
+ fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm3));
+ fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM3), 1000);
break;
case 4:
- fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm4);
- fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM4, 1000);
+ fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm4));
+ fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM4), 1000);
break;
case 5:
- fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm5);
- fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM5, 1000);
+ fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm5));
+ fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM5), 1000);
break;
case 6:
- fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm6);
- fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM6, 1000);
+ fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm6));
+ fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM6), 1000);
break;
case 7:
- fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm7);
- fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM7, 1000);
+ fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm7));
+ fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM7), 1000);
break;
default:
printk(KERN_ERR "DPM Level not supported\n");
fPowerDPMx = Convert_ULONG_ToFraction(1);
- fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM0, 1000);
+ fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM0), 1000);
}
/*-------------------------
@@ -716,9 +715,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
return result;
/* Finally, the actual fuse value */
- ul_RO_fused = sOutput_FuseValues.ulEfuseValue;
- fMin = GetScaledFraction(sRO_fuse.ulEfuseMin, 1);
- fRange = GetScaledFraction(sRO_fuse.ulEfuseEncodeRange, 1);
+ ul_RO_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
+ fMin = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseMin), 1);
+ fRange = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseEncodeRange), 1);
fRO_fused = fDecodeLinearFuse(ul_RO_fused, fMin, fRange, sRO_fuse.ucEfuseLength);
sCACm_fuse = getASICProfilingInfo->sCACm;
@@ -736,9 +735,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
if (result)
return result;
- ul_CACm_fused = sOutput_FuseValues.ulEfuseValue;
- fMin = GetScaledFraction(sCACm_fuse.ulEfuseMin, 1000);
- fRange = GetScaledFraction(sCACm_fuse.ulEfuseEncodeRange, 1000);
+ ul_CACm_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
+ fMin = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseMin), 1000);
+ fRange = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseEncodeRange), 1000);
fCACm_fused = fDecodeLinearFuse(ul_CACm_fused, fMin, fRange, sCACm_fuse.ucEfuseLength);
@@ -756,9 +755,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
if (result)
return result;
- ul_CACb_fused = sOutput_FuseValues.ulEfuseValue;
- fMin = GetScaledFraction(sCACb_fuse.ulEfuseMin, 1000);
- fRange = GetScaledFraction(sCACb_fuse.ulEfuseEncodeRange, 1000);
+ ul_CACb_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
+ fMin = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseMin), 1000);
+ fRange = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseEncodeRange), 1000);
fCACb_fused = fDecodeLinearFuse(ul_CACb_fused, fMin, fRange, sCACb_fuse.ucEfuseLength);
@@ -777,9 +776,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
if (result)
return result;
- ul_Kt_Beta_fused = sOutput_FuseValues.ulEfuseValue;
- fAverage = GetScaledFraction(sKt_Beta_fuse.ulEfuseEncodeAverage, 1000);
- fRange = GetScaledFraction(sKt_Beta_fuse.ulEfuseEncodeRange, 1000);
+ ul_Kt_Beta_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
+ fAverage = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeAverage), 1000);
+ fRange = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeRange), 1000);
fKt_Beta_fused = fDecodeLogisticFuse(ul_Kt_Beta_fused,
fAverage, fRange, sKt_Beta_fuse.ucEfuseLength);
@@ -798,9 +797,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
if (result)
return result;
- ul_Kv_m_fused = sOutput_FuseValues.ulEfuseValue;
- fAverage = GetScaledFraction(sKv_m_fuse.ulEfuseEncodeAverage, 1000);
- fRange = GetScaledFraction((sKv_m_fuse.ulEfuseEncodeRange & 0x7fffffff), 1000);
+ ul_Kv_m_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
+ fAverage = GetScaledFraction(le32_to_cpu(sKv_m_fuse.ulEfuseEncodeAverage), 1000);
+ fRange = GetScaledFraction((le32_to_cpu(sKv_m_fuse.ulEfuseEncodeRange) & 0x7fffffff), 1000);
fRange = fMultiply(fRange, ConvertToFraction(-1));
fKv_m_fused = fDecodeLogisticFuse(ul_Kv_m_fused,
@@ -820,9 +819,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
if (result)
return result;
- ul_Kv_b_fused = sOutput_FuseValues.ulEfuseValue;
- fAverage = GetScaledFraction(sKv_b_fuse.ulEfuseEncodeAverage, 1000);
- fRange = GetScaledFraction(sKv_b_fuse.ulEfuseEncodeRange, 1000);
+ ul_Kv_b_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
+ fAverage = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeAverage), 1000);
+ fRange = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeRange), 1000);
fKv_b_fused = fDecodeLogisticFuse(ul_Kv_b_fused,
fAverage, fRange, sKv_b_fuse.ucEfuseLength);
@@ -851,9 +850,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
if (result)
return result;
- ul_FT_Lkg_V0NORM = sOutput_FuseValues.ulEfuseValue;
- fLn_MaxDivMin = GetScaledFraction(getASICProfilingInfo->ulLkgEncodeLn_MaxDivMin, 10000);
- fMin = GetScaledFraction(getASICProfilingInfo->ulLkgEncodeMin, 10000);
+ ul_FT_Lkg_V0NORM = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
+ fLn_MaxDivMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeLn_MaxDivMin), 10000);
+ fMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeMin), 10000);
fFT_Lkg_V0NORM = fDecodeLeakageID(ul_FT_Lkg_V0NORM,
fLn_MaxDivMin, fMin, getASICProfilingInfo->ucLkgEfuseLength);
@@ -863,40 +862,40 @@ int atomctrl_calculate_voltage_evv_on_sclk(
* PART 2 - Grabbing all required values
*-------------------------------------------
*/
- fSM_A0 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A0, 1000000),
+ fSM_A0 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A0), 1000000),
ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A0_sign)));
- fSM_A1 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A1, 1000000),
+ fSM_A1 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A1), 1000000),
ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A1_sign)));
- fSM_A2 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A2, 100000),
+ fSM_A2 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A2), 100000),
ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A2_sign)));
- fSM_A3 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A3, 1000000),
+ fSM_A3 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A3), 1000000),
ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A3_sign)));
- fSM_A4 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A4, 1000000),
+ fSM_A4 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A4), 1000000),
ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A4_sign)));
- fSM_A5 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A5, 1000),
+ fSM_A5 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A5), 1000),
ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A5_sign)));
- fSM_A6 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A6, 1000),
+ fSM_A6 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A6), 1000),
ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A6_sign)));
- fSM_A7 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A7, 1000),
+ fSM_A7 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A7), 1000),
ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A7_sign)));
- fMargin_RO_a = ConvertToFraction(getASICProfilingInfo->ulMargin_RO_a);
- fMargin_RO_b = ConvertToFraction(getASICProfilingInfo->ulMargin_RO_b);
- fMargin_RO_c = ConvertToFraction(getASICProfilingInfo->ulMargin_RO_c);
+ fMargin_RO_a = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_a));
+ fMargin_RO_b = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_b));
+ fMargin_RO_c = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_c));
- fMargin_fixed = ConvertToFraction(getASICProfilingInfo->ulMargin_fixed);
+ fMargin_fixed = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_fixed));
fMargin_FMAX_mean = GetScaledFraction(
- getASICProfilingInfo->ulMargin_Fmax_mean, 10000);
+ le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_mean), 10000);
fMargin_Plat_mean = GetScaledFraction(
- getASICProfilingInfo->ulMargin_plat_mean, 10000);
+ le32_to_cpu(getASICProfilingInfo->ulMargin_plat_mean), 10000);
fMargin_FMAX_sigma = GetScaledFraction(
- getASICProfilingInfo->ulMargin_Fmax_sigma, 10000);
+ le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_sigma), 10000);
fMargin_Plat_sigma = GetScaledFraction(
- getASICProfilingInfo->ulMargin_plat_sigma, 10000);
+ le32_to_cpu(getASICProfilingInfo->ulMargin_plat_sigma), 10000);
fMargin_DC_sigma = GetScaledFraction(
- getASICProfilingInfo->ulMargin_DC_sigma, 100);
+ le32_to_cpu(getASICProfilingInfo->ulMargin_DC_sigma), 100);
fMargin_DC_sigma = fDivide(fMargin_DC_sigma, ConvertToFraction(1000));
fCACm_fused = fDivide(fCACm_fused, ConvertToFraction(100));
@@ -908,14 +907,14 @@ int atomctrl_calculate_voltage_evv_on_sclk(
fSclk = GetScaledFraction(sclk, 100);
fV_max = fDivide(GetScaledFraction(
- getASICProfilingInfo->ulMaxVddc, 1000), ConvertToFraction(4));
- fT_prod = GetScaledFraction(getASICProfilingInfo->ulBoardCoreTemp, 10);
- fLKG_Factor = GetScaledFraction(getASICProfilingInfo->ulEvvLkgFactor, 100);
- fT_FT = GetScaledFraction(getASICProfilingInfo->ulLeakageTemp, 10);
+ le32_to_cpu(getASICProfilingInfo->ulMaxVddc), 1000), ConvertToFraction(4));
+ fT_prod = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulBoardCoreTemp), 10);
+ fLKG_Factor = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulEvvLkgFactor), 100);
+ fT_FT = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLeakageTemp), 10);
fV_FT = fDivide(GetScaledFraction(
- getASICProfilingInfo->ulLeakageVoltage, 1000), ConvertToFraction(4));
+ le32_to_cpu(getASICProfilingInfo->ulLeakageVoltage), 1000), ConvertToFraction(4));
fV_min = fDivide(GetScaledFraction(
- getASICProfilingInfo->ulMinVddc, 1000), ConvertToFraction(4));
+ le32_to_cpu(getASICProfilingInfo->ulMinVddc), 1000), ConvertToFraction(4));
/*-----------------------
* PART 3
@@ -925,7 +924,7 @@ int atomctrl_calculate_voltage_evv_on_sclk(
fA_Term = fAdd(fMargin_RO_a, fAdd(fMultiply(fSM_A4, fSclk), fSM_A5));
fB_Term = fAdd(fAdd(fMultiply(fSM_A2, fSclk), fSM_A6), fMargin_RO_b);
fC_Term = fAdd(fMargin_RO_c,
- fAdd(fMultiply(fSM_A0,fLkg_FT),
+ fAdd(fMultiply(fSM_A0, fLkg_FT),
fAdd(fMultiply(fSM_A1, fMultiply(fLkg_FT, fSclk)),
fAdd(fMultiply(fSM_A3, fSclk),
fSubtract(fSM_A7, fRO_fused)))));
@@ -1063,9 +1062,55 @@ int atomctrl_get_voltage_evv_on_sclk(
get_voltage_info_param_space.ucVoltageMode =
ATOM_GET_VOLTAGE_EVV_VOLTAGE;
get_voltage_info_param_space.usVoltageLevel =
- virtual_voltage_Id;
+ cpu_to_le16(virtual_voltage_Id);
get_voltage_info_param_space.ulSCLKFreq =
- sclk;
+ cpu_to_le32(sclk);
+
+ result = cgs_atom_exec_cmd_table(hwmgr->device,
+ GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
+ &get_voltage_info_param_space);
+
+ if (0 != result)
+ return result;
+
+ *voltage = le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *)
+ (&get_voltage_info_param_space))->usVoltageLevel);
+
+ return result;
+}
+
+/**
+ * atomctrl_get_voltage_evv gets voltage via call to ATOM COMMAND table.
+ * @param hwmgr input: pointer to hwManager
+ * @param virtual_voltage_id input: voltage id which match per voltage DPM state: 0xff01, 0xff02.. 0xff08
+ * @param voltage output: real voltage level in unit of mv
+ */
+int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr,
+ uint16_t virtual_voltage_id,
+ uint16_t *voltage)
+{
+ int result;
+ int entry_id;
+ GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space;
+
+ /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
+ for (entry_id = 0; entry_id < hwmgr->dyn_state.vddc_dependency_on_sclk->count; entry_id++) {
+ if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].v == virtual_voltage_id) {
+ /* found */
+ break;
+ }
+ }
+
+ PP_ASSERT_WITH_CODE(entry_id < hwmgr->dyn_state.vddc_dependency_on_sclk->count,
+ "Can't find requested voltage id in vddc_dependency_on_sclk table!",
+ return -EINVAL;
+ );
+
+ get_voltage_info_param_space.ucVoltageType = VOLTAGE_TYPE_VDDC;
+ get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
+ get_voltage_info_param_space.usVoltageLevel = virtual_voltage_id;
+ get_voltage_info_param_space.ulSCLKFreq =
+ cpu_to_le32(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].clk);
result = cgs_atom_exec_cmd_table(hwmgr->device,
GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
@@ -1074,8 +1119,8 @@ int atomctrl_get_voltage_evv_on_sclk(
if (0 != result)
return result;
- *voltage = ((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *)
- (&get_voltage_info_param_space))->usVoltageLevel;
+ *voltage = le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *)
+ (&get_voltage_info_param_space))->usVoltageLevel);
return result;
}
@@ -1165,8 +1210,8 @@ static int asic_internal_ss_get_ss_asignment(struct pp_hwmgr *hwmgr,
if (entry_found) {
ssEntry->speed_spectrum_percentage =
- ssInfo->usSpreadSpectrumPercentage;
- ssEntry->speed_spectrum_rate = ssInfo->usSpreadRateInKhz;
+ le16_to_cpu(ssInfo->usSpreadSpectrumPercentage);
+ ssEntry->speed_spectrum_rate = le16_to_cpu(ssInfo->usSpreadRateInKhz);
if (((GET_DATA_TABLE_MAJOR_REVISION(table) == 2) &&
(GET_DATA_TABLE_MINOR_REVISION(table) >= 2)) ||
@@ -1222,7 +1267,7 @@ int atomctrl_read_efuse(void *device, uint16_t start_index,
int result;
READ_EFUSE_VALUE_PARAMETER efuse_param;
- efuse_param.sEfuse.usEfuseIndex = (start_index / 32) * 4;
+ efuse_param.sEfuse.usEfuseIndex = cpu_to_le16((start_index / 32) * 4);
efuse_param.sEfuse.ucBitShift = (uint8_t)
(start_index - ((start_index / 32) * 32));
efuse_param.sEfuse.ucBitLength = (uint8_t)
@@ -1232,19 +1277,21 @@ int atomctrl_read_efuse(void *device, uint16_t start_index,
GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
&efuse_param);
if (!result)
- *efuse = efuse_param.ulEfuseValue & mask;
+ *efuse = le32_to_cpu(efuse_param.ulEfuseValue) & mask;
return result;
}
int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
- uint8_t level)
+ uint8_t level)
{
DYNAMICE_MEMORY_SETTINGS_PARAMETER_V2_1 memory_clock_parameters;
int result;
- memory_clock_parameters.asDPMMCReg.ulClock.ulClockFreq = memory_clock & SET_CLOCK_FREQ_MASK;
- memory_clock_parameters.asDPMMCReg.ulClock.ulComputeClockFlag = ADJUST_MC_SETTING_PARAM;
+ memory_clock_parameters.asDPMMCReg.ulClock.ulClockFreq =
+ memory_clock & SET_CLOCK_FREQ_MASK;
+ memory_clock_parameters.asDPMMCReg.ulClock.ulComputeClockFlag =
+ ADJUST_MC_SETTING_PARAM;
memory_clock_parameters.asDPMMCReg.ucMclkDPMState = level;
result = cgs_atom_exec_cmd_table
@@ -1264,8 +1311,8 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_
get_voltage_info_param_space.ucVoltageType = voltage_type;
get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
- get_voltage_info_param_space.usVoltageLevel = virtual_voltage_Id;
- get_voltage_info_param_space.ulSCLKFreq = sclk;
+ get_voltage_info_param_space.usVoltageLevel = cpu_to_le16(virtual_voltage_Id);
+ get_voltage_info_param_space.ulSCLKFreq = cpu_to_le32(sclk);
result = cgs_atom_exec_cmd_table(hwmgr->device,
GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
@@ -1274,7 +1321,7 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_
if (0 != result)
return result;
- *voltage = ((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel;
+ *voltage = le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel);
return result;
}
@@ -1295,15 +1342,19 @@ int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctr
for (i = 0; i < psmu_info->ucSclkEntryNum; i++) {
table->entry[i].ucVco_setting = psmu_info->asSclkFcwRangeEntry[i].ucVco_setting;
table->entry[i].ucPostdiv = psmu_info->asSclkFcwRangeEntry[i].ucPostdiv;
- table->entry[i].usFcw_pcc = psmu_info->asSclkFcwRangeEntry[i].ucFcw_pcc;
- table->entry[i].usFcw_trans_upper = psmu_info->asSclkFcwRangeEntry[i].ucFcw_trans_upper;
- table->entry[i].usRcw_trans_lower = psmu_info->asSclkFcwRangeEntry[i].ucRcw_trans_lower;
+ table->entry[i].usFcw_pcc =
+ le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucFcw_pcc);
+ table->entry[i].usFcw_trans_upper =
+ le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucFcw_trans_upper);
+ table->entry[i].usRcw_trans_lower =
+ le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucRcw_trans_lower);
}
return 0;
}
-int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param)
+int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr,
+ struct pp_atom_ctrl__avfs_parameters *param)
{
ATOM_ASIC_PROFILING_INFO_V3_6 *profile = NULL;
@@ -1317,30 +1368,30 @@ int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__a
if (!profile)
return -1;
- param->ulAVFS_meanNsigma_Acontant0 = profile->ulAVFS_meanNsigma_Acontant0;
- param->ulAVFS_meanNsigma_Acontant1 = profile->ulAVFS_meanNsigma_Acontant1;
- param->ulAVFS_meanNsigma_Acontant2 = profile->ulAVFS_meanNsigma_Acontant2;
- param->usAVFS_meanNsigma_DC_tol_sigma = profile->usAVFS_meanNsigma_DC_tol_sigma;
- param->usAVFS_meanNsigma_Platform_mean = profile->usAVFS_meanNsigma_Platform_mean;
- param->usAVFS_meanNsigma_Platform_sigma = profile->usAVFS_meanNsigma_Platform_sigma;
- param->ulGB_VDROOP_TABLE_CKSOFF_a0 = profile->ulGB_VDROOP_TABLE_CKSOFF_a0;
- param->ulGB_VDROOP_TABLE_CKSOFF_a1 = profile->ulGB_VDROOP_TABLE_CKSOFF_a1;
- param->ulGB_VDROOP_TABLE_CKSOFF_a2 = profile->ulGB_VDROOP_TABLE_CKSOFF_a2;
- param->ulGB_VDROOP_TABLE_CKSON_a0 = profile->ulGB_VDROOP_TABLE_CKSON_a0;
- param->ulGB_VDROOP_TABLE_CKSON_a1 = profile->ulGB_VDROOP_TABLE_CKSON_a1;
- param->ulGB_VDROOP_TABLE_CKSON_a2 = profile->ulGB_VDROOP_TABLE_CKSON_a2;
- param->ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = profile->ulAVFSGB_FUSE_TABLE_CKSOFF_m1;
- param->usAVFSGB_FUSE_TABLE_CKSOFF_m2 = profile->usAVFSGB_FUSE_TABLE_CKSOFF_m2;
- param->ulAVFSGB_FUSE_TABLE_CKSOFF_b = profile->ulAVFSGB_FUSE_TABLE_CKSOFF_b;
- param->ulAVFSGB_FUSE_TABLE_CKSON_m1 = profile->ulAVFSGB_FUSE_TABLE_CKSON_m1;
- param->usAVFSGB_FUSE_TABLE_CKSON_m2 = profile->usAVFSGB_FUSE_TABLE_CKSON_m2;
- param->ulAVFSGB_FUSE_TABLE_CKSON_b = profile->ulAVFSGB_FUSE_TABLE_CKSON_b;
- param->usMaxVoltage_0_25mv = profile->usMaxVoltage_0_25mv;
+ param->ulAVFS_meanNsigma_Acontant0 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant0);
+ param->ulAVFS_meanNsigma_Acontant1 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant1);
+ param->ulAVFS_meanNsigma_Acontant2 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant2);
+ param->usAVFS_meanNsigma_DC_tol_sigma = le16_to_cpu(profile->usAVFS_meanNsigma_DC_tol_sigma);
+ param->usAVFS_meanNsigma_Platform_mean = le16_to_cpu(profile->usAVFS_meanNsigma_Platform_mean);
+ param->usAVFS_meanNsigma_Platform_sigma = le16_to_cpu(profile->usAVFS_meanNsigma_Platform_sigma);
+ param->ulGB_VDROOP_TABLE_CKSOFF_a0 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a0);
+ param->ulGB_VDROOP_TABLE_CKSOFF_a1 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a1);
+ param->ulGB_VDROOP_TABLE_CKSOFF_a2 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a2);
+ param->ulGB_VDROOP_TABLE_CKSON_a0 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a0);
+ param->ulGB_VDROOP_TABLE_CKSON_a1 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a1);
+ param->ulGB_VDROOP_TABLE_CKSON_a2 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a2);
+ param->ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
+ param->usAVFSGB_FUSE_TABLE_CKSOFF_m2 = le16_to_cpu(profile->usAVFSGB_FUSE_TABLE_CKSOFF_m2);
+ param->ulAVFSGB_FUSE_TABLE_CKSOFF_b = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSOFF_b);
+ param->ulAVFSGB_FUSE_TABLE_CKSON_m1 = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSON_m1);
+ param->usAVFSGB_FUSE_TABLE_CKSON_m2 = le16_to_cpu(profile->usAVFSGB_FUSE_TABLE_CKSON_m2);
+ param->ulAVFSGB_FUSE_TABLE_CKSON_b = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSON_b);
+ param->usMaxVoltage_0_25mv = le16_to_cpu(profile->usMaxVoltage_0_25mv);
param->ucEnableGB_VDROOP_TABLE_CKSOFF = profile->ucEnableGB_VDROOP_TABLE_CKSOFF;
param->ucEnableGB_VDROOP_TABLE_CKSON = profile->ucEnableGB_VDROOP_TABLE_CKSON;
param->ucEnableGB_FUSE_TABLE_CKSOFF = profile->ucEnableGB_FUSE_TABLE_CKSOFF;
param->ucEnableGB_FUSE_TABLE_CKSON = profile->ucEnableGB_FUSE_TABLE_CKSON;
- param->usPSM_Age_ComFactor = profile->usPSM_Age_ComFactor;
+ param->usPSM_Age_ComFactor = le16_to_cpu(profile->usPSM_Age_ComFactor);
param->ucEnableApplyAVFS_CKS_OFF_Voltage = profile->ucEnableApplyAVFS_CKS_OFF_Voltage;
return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
index 1e35a9625..fc898afce 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
@@ -281,6 +281,7 @@ struct pp_atom_ctrl__avfs_parameters {
extern bool atomctrl_get_pp_assign_pin(struct pp_hwmgr *hwmgr, const uint32_t pinId, pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment);
extern int atomctrl_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage);
+extern int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr, uint16_t virtual_voltage_id, uint16_t *voltage);
extern uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr);
extern int atomctrl_get_memory_clock_spread_spectrum(struct pp_hwmgr *hwmgr, const uint32_t memory_clock, pp_atomctrl_internal_ss_info *ssInfo);
extern int atomctrl_get_engine_clock_spread_spectrum(struct pp_hwmgr *hwmgr, const uint32_t engine_clock, pp_atomctrl_internal_ss_info *ssInfo);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h
index 009bd5963..8f50a0383 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h
@@ -50,55 +50,45 @@ typedef union _fInt {
* Function Declarations
* -------------------------------------------------------------------------------
*/
-fInt ConvertToFraction(int); /* Use this to convert an INT to a FINT */
-fInt Convert_ULONG_ToFraction(uint32_t); /* Use this to convert an uint32_t to a FINT */
-fInt GetScaledFraction(int, int); /* Use this to convert an INT to a FINT after scaling it by a factor */
-int ConvertBackToInteger(fInt); /* Convert a FINT back to an INT that is scaled by 1000 (i.e. last 3 digits are the decimal digits) */
-
-fInt fNegate(fInt); /* Returns -1 * input fInt value */
-fInt fAdd (fInt, fInt); /* Returns the sum of two fInt numbers */
-fInt fSubtract (fInt A, fInt B); /* Returns A-B - Sometimes easier than Adding negative numbers */
-fInt fMultiply (fInt, fInt); /* Returns the product of two fInt numbers */
-fInt fDivide (fInt A, fInt B); /* Returns A/B */
-fInt fGetSquare(fInt); /* Returns the square of a fInt number */
-fInt fSqrt(fInt); /* Returns the Square Root of a fInt number */
-
-int uAbs(int); /* Returns the Absolute value of the Int */
-fInt fAbs(fInt); /* Returns the Absolute value of the fInt */
-int uPow(int base, int exponent); /* Returns base^exponent an INT */
-
-void SolveQuadracticEqn(fInt, fInt, fInt, fInt[]); /* Returns the 2 roots via the array */
-bool Equal(fInt, fInt); /* Returns true if two fInts are equal to each other */
-bool GreaterThan(fInt A, fInt B); /* Returns true if A > B */
-
-fInt fExponential(fInt exponent); /* Can be used to calculate e^exponent */
-fInt fNaturalLog(fInt value); /* Can be used to calculate ln(value) */
+static fInt ConvertToFraction(int); /* Use this to convert an INT to a FINT */
+static fInt Convert_ULONG_ToFraction(uint32_t); /* Use this to convert an uint32_t to a FINT */
+static fInt GetScaledFraction(int, int); /* Use this to convert an INT to a FINT after scaling it by a factor */
+static int ConvertBackToInteger(fInt); /* Convert a FINT back to an INT that is scaled by 1000 (i.e. last 3 digits are the decimal digits) */
+
+static fInt fNegate(fInt); /* Returns -1 * input fInt value */
+static fInt fAdd (fInt, fInt); /* Returns the sum of two fInt numbers */
+static fInt fSubtract (fInt A, fInt B); /* Returns A-B - Sometimes easier than Adding negative numbers */
+static fInt fMultiply (fInt, fInt); /* Returns the product of two fInt numbers */
+static fInt fDivide (fInt A, fInt B); /* Returns A/B */
+static fInt fGetSquare(fInt); /* Returns the square of a fInt number */
+static fInt fSqrt(fInt); /* Returns the Square Root of a fInt number */
+
+static int uAbs(int); /* Returns the Absolute value of the Int */
+static int uPow(int base, int exponent); /* Returns base^exponent an INT */
+
+static void SolveQuadracticEqn(fInt, fInt, fInt, fInt[]); /* Returns the 2 roots via the array */
+static bool Equal(fInt, fInt); /* Returns true if two fInts are equal to each other */
+static bool GreaterThan(fInt A, fInt B); /* Returns true if A > B */
+
+static fInt fExponential(fInt exponent); /* Can be used to calculate e^exponent */
+static fInt fNaturalLog(fInt value); /* Can be used to calculate ln(value) */
/* Fuse decoding functions
* -------------------------------------------------------------------------------------
*/
-fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength);
-fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength);
-fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength);
+static fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength);
+static fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength);
+static fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength);
/* Internal Support Functions - Use these ONLY for testing or adding to internal functions
* -------------------------------------------------------------------------------------
* Some of the following functions take two INTs as their input - This is unsafe for a variety of reasons.
*/
-fInt Add (int, int); /* Add two INTs and return Sum as FINT */
-fInt Multiply (int, int); /* Multiply two INTs and return Product as FINT */
-fInt Divide (int, int); /* You get the idea... */
-fInt fNegate(fInt);
+static fInt Divide (int, int); /* Divide two INTs and return result as FINT */
+static fInt fNegate(fInt);
-int uGetScaledDecimal (fInt); /* Internal function */
-int GetReal (fInt A); /* Internal function */
-
-/* Future Additions and Incomplete Functions
- * -------------------------------------------------------------------------------------
- */
-int GetRoundedValue(fInt); /* Incomplete function - Useful only when Precision is lacking */
- /* Let us say we have 2.126 but can only handle 2 decimal points. We could */
- /* either chop of 6 and keep 2.12 or use this function to get 2.13, which is more accurate */
+static int uGetScaledDecimal (fInt); /* Internal function */
+static int GetReal (fInt A); /* Internal function */
/* -------------------------------------------------------------------------------------
* TROUBLESHOOTING INFORMATION
@@ -115,7 +105,7 @@ int GetRoundedValue(fInt); /* Incomplete function - Usef
* START OF CODE
* -------------------------------------------------------------------------------------
*/
-fInt fExponential(fInt exponent) /*Can be used to calculate e^exponent*/
+static fInt fExponential(fInt exponent) /*Can be used to calculate e^exponent*/
{
uint32_t i;
bool bNegated = false;
@@ -154,7 +144,7 @@ fInt fExponential(fInt exponent) /*Can be used to calculate e^exponent*/
return solution;
}
-fInt fNaturalLog(fInt value)
+static fInt fNaturalLog(fInt value)
{
uint32_t i;
fInt upper_bound = Divide(8, 1000);
@@ -179,7 +169,7 @@ fInt fNaturalLog(fInt value)
return (fAdd(solution, error_term));
}
-fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength)
+static fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength)
{
fInt f_fuse_value = Convert_ULONG_ToFraction(fuse_value);
fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1);
@@ -194,7 +184,7 @@ fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t b
}
-fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength)
+static fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength)
{
fInt f_fuse_value = Convert_ULONG_ToFraction(fuse_value);
fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1);
@@ -212,7 +202,7 @@ fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint
return f_decoded_value;
}
-fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength)
+static fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength)
{
fInt fLeakage;
fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1);
@@ -225,7 +215,7 @@ fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min,
return fLeakage;
}
-fInt ConvertToFraction(int X) /*Add all range checking here. Is it possible to make fInt a private declaration? */
+static fInt ConvertToFraction(int X) /*Add all range checking here. Is it possible to make fInt a private declaration? */
{
fInt temp;
@@ -237,13 +227,13 @@ fInt ConvertToFraction(int X) /*Add all range checking here. Is it possible to m
return temp;
}
-fInt fNegate(fInt X)
+static fInt fNegate(fInt X)
{
fInt CONSTANT_NEGONE = ConvertToFraction(-1);
return (fMultiply(X, CONSTANT_NEGONE));
}
-fInt Convert_ULONG_ToFraction(uint32_t X)
+static fInt Convert_ULONG_ToFraction(uint32_t X)
{
fInt temp;
@@ -255,7 +245,7 @@ fInt Convert_ULONG_ToFraction(uint32_t X)
return temp;
}
-fInt GetScaledFraction(int X, int factor)
+static fInt GetScaledFraction(int X, int factor)
{
int times_shifted, factor_shifted;
bool bNEGATED;
@@ -304,7 +294,7 @@ fInt GetScaledFraction(int X, int factor)
}
/* Addition using two fInts */
-fInt fAdd (fInt X, fInt Y)
+static fInt fAdd (fInt X, fInt Y)
{
fInt Sum;
@@ -314,7 +304,7 @@ fInt fAdd (fInt X, fInt Y)
}
/* Addition using two fInts */
-fInt fSubtract (fInt X, fInt Y)
+static fInt fSubtract (fInt X, fInt Y)
{
fInt Difference;
@@ -323,7 +313,7 @@ fInt fSubtract (fInt X, fInt Y)
return Difference;
}
-bool Equal(fInt A, fInt B)
+static bool Equal(fInt A, fInt B)
{
if (A.full == B.full)
return true;
@@ -331,7 +321,7 @@ bool Equal(fInt A, fInt B)
return false;
}
-bool GreaterThan(fInt A, fInt B)
+static bool GreaterThan(fInt A, fInt B)
{
if (A.full > B.full)
return true;
@@ -339,7 +329,7 @@ bool GreaterThan(fInt A, fInt B)
return false;
}
-fInt fMultiply (fInt X, fInt Y) /* Uses 64-bit integers (int64_t) */
+static fInt fMultiply (fInt X, fInt Y) /* Uses 64-bit integers (int64_t) */
{
fInt Product;
int64_t tempProduct;
@@ -363,7 +353,7 @@ fInt fMultiply (fInt X, fInt Y) /* Uses 64-bit integers (int64_t) */
return Product;
}
-fInt fDivide (fInt X, fInt Y)
+static fInt fDivide (fInt X, fInt Y)
{
fInt fZERO, fQuotient;
int64_t longlongX, longlongY;
@@ -384,7 +374,7 @@ fInt fDivide (fInt X, fInt Y)
return fQuotient;
}
-int ConvertBackToInteger (fInt A) /*THIS is the function that will be used to check with the Golden settings table*/
+static int ConvertBackToInteger (fInt A) /*THIS is the function that will be used to check with the Golden settings table*/
{
fInt fullNumber, scaledDecimal, scaledReal;
@@ -397,13 +387,13 @@ int ConvertBackToInteger (fInt A) /*THIS is the function that will be used to ch
return fullNumber.full;
}
-fInt fGetSquare(fInt A)
+static fInt fGetSquare(fInt A)
{
return fMultiply(A,A);
}
/* x_new = x_old - (x_old^2 - C) / (2 * x_old) */
-fInt fSqrt(fInt num)
+static fInt fSqrt(fInt num)
{
fInt F_divide_Fprime, Fprime;
fInt test;
@@ -460,7 +450,7 @@ fInt fSqrt(fInt num)
return (x_new);
}
-void SolveQuadracticEqn(fInt A, fInt B, fInt C, fInt Roots[])
+static void SolveQuadracticEqn(fInt A, fInt B, fInt C, fInt Roots[])
{
fInt *pRoots = &Roots[0];
fInt temp, root_first, root_second;
@@ -498,52 +488,13 @@ void SolveQuadracticEqn(fInt A, fInt B, fInt C, fInt Roots[])
* -----------------------------------------------------------------------------
*/
-/* Addition using two normal ints - Temporary - Use only for testing purposes?. */
-fInt Add (int X, int Y)
-{
- fInt A, B, Sum;
-
- A.full = (X << SHIFT_AMOUNT);
- B.full = (Y << SHIFT_AMOUNT);
-
- Sum.full = A.full + B.full;
-
- return Sum;
-}
-
/* Conversion Functions */
-int GetReal (fInt A)
+static int GetReal (fInt A)
{
return (A.full >> SHIFT_AMOUNT);
}
-/* Temporarily Disabled */
-int GetRoundedValue(fInt A) /*For now, round the 3rd decimal place */
-{
- /* ROUNDING TEMPORARLY DISABLED
- int temp = A.full;
- int decimal_cutoff, decimal_mask = 0x000001FF;
- decimal_cutoff = temp & decimal_mask;
- if (decimal_cutoff > 0x147) {
- temp += 673;
- }*/
-
- return ConvertBackToInteger(A)/10000; /*Temporary - in case this was used somewhere else */
-}
-
-fInt Multiply (int X, int Y)
-{
- fInt A, B, Product;
-
- A.full = X << SHIFT_AMOUNT;
- B.full = Y << SHIFT_AMOUNT;
-
- Product = fMultiply(A, B);
-
- return Product;
-}
-
-fInt Divide (int X, int Y)
+static fInt Divide (int X, int Y)
{
fInt A, B, Quotient;
@@ -555,7 +506,7 @@ fInt Divide (int X, int Y)
return Quotient;
}
-int uGetScaledDecimal (fInt A) /*Converts the fractional portion to whole integers - Costly function */
+static int uGetScaledDecimal (fInt A) /*Converts the fractional portion to whole integers - Costly function */
{
int dec[PRECISION];
int i, scaledDecimal = 0, tmp = A.partial.decimal;
@@ -570,7 +521,7 @@ int uGetScaledDecimal (fInt A) /*Converts the fractional portion to whole intege
return scaledDecimal;
}
-int uPow(int base, int power)
+static int uPow(int base, int power)
{
if (power == 0)
return 1;
@@ -578,15 +529,7 @@ int uPow(int base, int power)
return (base)*uPow(base, power - 1);
}
-fInt fAbs(fInt A)
-{
- if (A.partial.real < 0)
- return (fMultiply(A, ConvertToFraction(-1)));
- else
- return A;
-}
-
-int uAbs(int X)
+static int uAbs(int X)
{
if (X < 0)
return (X * -1);
@@ -594,7 +537,7 @@ int uAbs(int X)
return X;
}
-fInt fRoundUpByStepSize(fInt A, fInt fStepSize, bool error_term)
+static fInt fRoundUpByStepSize(fInt A, fInt fStepSize, bool error_term)
{
fInt solution;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
index 2f1a14fe0..6c321b0d8 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
@@ -794,19 +794,35 @@ static const ATOM_PPLIB_STATE_V2 *get_state_entry_v2(
static const ATOM_PPLIB_POWERPLAYTABLE *get_powerplay_table(
struct pp_hwmgr *hwmgr)
{
- const void *table_addr = NULL;
+ const void *table_addr = hwmgr->soft_pp_table;
uint8_t frev, crev;
uint16_t size;
- table_addr = cgs_atom_get_data_table(hwmgr->device,
- GetIndexIntoMasterTable(DATA, PowerPlayInfo),
- &size, &frev, &crev);
+ if (!table_addr) {
+ table_addr = cgs_atom_get_data_table(hwmgr->device,
+ GetIndexIntoMasterTable(DATA, PowerPlayInfo),
+ &size, &frev, &crev);
- hwmgr->soft_pp_table = table_addr;
+ hwmgr->soft_pp_table = table_addr;
+ hwmgr->soft_pp_table_size = size;
+ }
return (const ATOM_PPLIB_POWERPLAYTABLE *)table_addr;
}
+int pp_tables_get_response_times(struct pp_hwmgr *hwmgr,
+ uint32_t *vol_rep_time, uint32_t *bb_rep_time)
+{
+ const ATOM_PPLIB_POWERPLAYTABLE *powerplay_tab = get_powerplay_table(hwmgr);
+
+ PP_ASSERT_WITH_CODE(NULL != powerplay_tab,
+ "Missing PowerPlay Table!", return -EINVAL);
+
+ *vol_rep_time = (uint32_t)le16_to_cpu(powerplay_tab->usVoltageTime);
+ *bb_rep_time = (uint32_t)le16_to_cpu(powerplay_tab->usBackbiasTime);
+
+ return 0;
+}
int pp_tables_get_num_of_entries(struct pp_hwmgr *hwmgr,
unsigned long *num_of_entries)
@@ -1499,7 +1515,7 @@ int get_number_of_vce_state_table_entries(
const ATOM_PPLIB_VCE_State_Table *vce_table =
get_vce_state_table(hwmgr, table);
- if (vce_table > 0)
+ if (vce_table)
return vce_table->numEntries;
return 0;
@@ -1589,11 +1605,6 @@ static int pp_tables_initialize(struct pp_hwmgr *hwmgr)
static int pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
{
- if (NULL != hwmgr->soft_pp_table) {
- kfree(hwmgr->soft_pp_table);
- hwmgr->soft_pp_table = NULL;
- }
-
if (NULL != hwmgr->dyn_state.vddc_dependency_on_sclk) {
kfree(hwmgr->dyn_state.vddc_dependency_on_sclk);
hwmgr->dyn_state.vddc_dependency_on_sclk = NULL;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.h b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.h
index 304348024..baddaa756 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.h
@@ -32,16 +32,19 @@ struct pp_hw_power_state;
extern const struct pp_table_func pptable_funcs;
typedef int (*pp_tables_hw_clock_info_callback)(struct pp_hwmgr *hwmgr,
- struct pp_hw_power_state *hw_ps,
- unsigned int index,
- const void *clock_info);
+ struct pp_hw_power_state *hw_ps,
+ unsigned int index,
+ const void *clock_info);
int pp_tables_get_num_of_entries(struct pp_hwmgr *hwmgr,
- unsigned long *num_of_entries);
+ unsigned long *num_of_entries);
int pp_tables_get_entry(struct pp_hwmgr *hwmgr,
- unsigned long entry_index,
- struct pp_power_state *ps,
- pp_tables_hw_clock_info_callback func);
+ unsigned long entry_index,
+ struct pp_power_state *ps,
+ pp_tables_hw_clock_info_callback func);
+
+int pp_tables_get_response_times(struct pp_hwmgr *hwmgr,
+ uint32_t *vol_rep_time, uint32_t *bb_rep_time);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
index 5d0f655bf..c7dc11122 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
@@ -571,7 +571,7 @@ int tonga_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
if (0 == data->sclk_dpm_key_disabled) {
/* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
PP_ASSERT_WITH_CODE(
- (0 == tonga_is_dpm_running(hwmgr)),
+ !tonga_is_dpm_running(hwmgr),
"Trying to Disable SCLK DPM when DPM is disabled",
return -1
);
@@ -587,7 +587,7 @@ int tonga_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
if (0 == data->mclk_dpm_key_disabled) {
/* Checking if DPM is running. If we discover hang because of this, we should skip this message. */
PP_ASSERT_WITH_CODE(
- (0 == tonga_is_dpm_running(hwmgr)),
+ !tonga_is_dpm_running(hwmgr),
"Trying to Disable MCLK DPM when DPM is disabled",
return -1
);
@@ -614,7 +614,7 @@ int tonga_stop_dpm(struct pp_hwmgr *hwmgr)
if (0 == data->pcie_dpm_key_disabled) {
/* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
PP_ASSERT_WITH_CODE(
- (0 == tonga_is_dpm_running(hwmgr)),
+ !tonga_is_dpm_running(hwmgr),
"Trying to Disable PCIE DPM when DPM is disabled",
return -1
);
@@ -630,7 +630,7 @@ int tonga_stop_dpm(struct pp_hwmgr *hwmgr)
/* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
PP_ASSERT_WITH_CODE(
- (0 == tonga_is_dpm_running(hwmgr)),
+ !tonga_is_dpm_running(hwmgr),
"Trying to Disable Voltage CNTL when DPM is disabled",
return -1
);
@@ -688,8 +688,9 @@ int tonga_dpm_force_state(struct pp_hwmgr *hwmgr, uint32_t n)
uint32_t level_mask = 1 << n;
/* Checking if DPM is running. If we discover hang because of this, we should skip this message. */
- PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr),
- "Trying to force SCLK when DPM is disabled", return -1;);
+ PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
+ "Trying to force SCLK when DPM is disabled",
+ return -1;);
if (0 == data->sclk_dpm_key_disabled)
return (0 == smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr,
@@ -712,8 +713,9 @@ int tonga_dpm_force_state_mclk(struct pp_hwmgr *hwmgr, uint32_t n)
uint32_t level_mask = 1 << n;
/* Checking if DPM is running. If we discover hang because of this, we should skip this message. */
- PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr),
- "Trying to Force MCLK when DPM is disabled", return -1;);
+ PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
+ "Trying to Force MCLK when DPM is disabled",
+ return -1;);
if (0 == data->mclk_dpm_key_disabled)
return (0 == smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr,
@@ -735,8 +737,9 @@ int tonga_dpm_force_state_pcie(struct pp_hwmgr *hwmgr, uint32_t n)
tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
/* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
- PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr),
- "Trying to Force PCIE level when DPM is disabled", return -1;);
+ PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
+ "Trying to Force PCIE level when DPM is disabled",
+ return -1;);
if (0 == data->pcie_dpm_key_disabled)
return (0 == smum_send_msg_to_smc_with_parameter(
hwmgr->smumgr,
@@ -774,7 +777,7 @@ int tonga_process_firmware_header(struct pp_hwmgr *hwmgr)
uint32_t tmp;
int result;
- bool error = 0;
+ bool error = false;
result = tonga_read_smc_sram_dword(hwmgr->smumgr,
SMU72_FIRMWARE_HEADER_LOCATION +
@@ -933,11 +936,11 @@ int tonga_init_power_gate_state(struct pp_hwmgr *hwmgr)
{
tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- data->uvd_power_gated = 0;
- data->vce_power_gated = 0;
- data->samu_power_gated = 0;
- data->acp_power_gated = 0;
- data->pg_acp_init = 1;
+ data->uvd_power_gated = false;
+ data->vce_power_gated = false;
+ data->samu_power_gated = false;
+ data->acp_power_gated = false;
+ data->pg_acp_init = true;
return 0;
}
@@ -955,7 +958,7 @@ int tonga_check_for_dpm_running(struct pp_hwmgr *hwmgr)
* because we may have test scenarios that need us intentionly disable SCLK/MCLK DPM,
* whereas voltage control is a fundemental change that will not be disabled
*/
- return (0 == tonga_is_dpm_running(hwmgr) ? 0 : 1);
+ return (!tonga_is_dpm_running(hwmgr) ? 0 : 1);
}
/**
@@ -968,7 +971,7 @@ int tonga_check_for_dpm_stopped(struct pp_hwmgr *hwmgr)
{
tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
- if (0 != tonga_is_dpm_running(hwmgr)) {
+ if (tonga_is_dpm_running(hwmgr)) {
/* If HW Virtualization is enabled, dpm_table_start will not have a valid value */
if (!data->dpm_table_start) {
return 1;
@@ -991,7 +994,7 @@ static int tonga_trim_voltage_table(struct pp_hwmgr *hwmgr,
{
uint32_t table_size, i, j;
uint16_t vvalue;
- bool bVoltageFound = 0;
+ bool bVoltageFound = false;
pp_atomctrl_voltage_table *table;
PP_ASSERT_WITH_CODE((NULL != voltage_table), "Voltage Table empty.", return -1;);
@@ -1007,11 +1010,11 @@ static int tonga_trim_voltage_table(struct pp_hwmgr *hwmgr,
for (i = 0; i < voltage_table->count; i++) {
vvalue = voltage_table->entries[i].value;
- bVoltageFound = 0;
+ bVoltageFound = false;
for (j = 0; j < table->count; j++) {
if (vvalue == table->entries[j].value) {
- bVoltageFound = 1;
+ bVoltageFound = true;
break;
}
}
@@ -1331,7 +1334,6 @@ static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr,
{
uint32_t count;
uint8_t index;
- int result = 0;
tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table = pptable_info->vddgfx_lookup_table;
@@ -1378,7 +1380,7 @@ static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr,
}
}
- return result;
+ return 0;
}
@@ -2042,7 +2044,7 @@ static int tonga_populate_single_memory_level(
if ((data->mclk_stutter_mode_threshold != 0) &&
(memory_clock <= data->mclk_stutter_mode_threshold) &&
- (data->is_uvd_enabled == 0)
+ (!data->is_uvd_enabled)
&& (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1)
&& (data->display_timing.num_existing_displays <= 2)
&& (data->display_timing.num_existing_displays != 0))
@@ -2705,7 +2707,7 @@ static int tonga_reset_single_dpm_table(
dpm_table->count = count;
for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++) {
- dpm_table->dpm_levels[i].enabled = 0;
+ dpm_table->dpm_levels[i].enabled = false;
}
return 0;
@@ -2718,7 +2720,7 @@ static void tonga_setup_pcie_table_entry(
{
dpm_table->dpm_levels[index].value = pcie_gen;
dpm_table->dpm_levels[index].param1 = pcie_lanes;
- dpm_table->dpm_levels[index].enabled = 1;
+ dpm_table->dpm_levels[index].enabled = true;
}
static int tonga_setup_default_pcie_tables(struct pp_hwmgr *hwmgr)
@@ -2828,7 +2830,7 @@ static int tonga_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
allowed_vdd_sclk_table->entries[i].clk) {
data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
allowed_vdd_sclk_table->entries[i].clk;
- data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; to do */
+ data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = true; /*(i==0) ? 1 : 0; to do */
data->dpm_table.sclk_table.count++;
}
}
@@ -2842,7 +2844,7 @@ static int tonga_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
allowed_vdd_mclk_table->entries[i].clk) {
data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
allowed_vdd_mclk_table->entries[i].clk;
- data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; */
+ data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = true; /*(i==0) ? 1 : 0; */
data->dpm_table.mclk_table.count++;
}
}
@@ -3026,8 +3028,8 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
reg_value = 0;
if ((0 == reg_value) &&
- (0 == atomctrl_get_pp_assign_pin(hwmgr,
- VDDC_VRHOT_GPIO_PINID, &gpio_pin_assignment))) {
+ (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID,
+ &gpio_pin_assignment))) {
table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_RegulatorHot);
@@ -3040,8 +3042,8 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
/* ACDC Switch GPIO */
reg_value = 0;
if ((0 == reg_value) &&
- (0 == atomctrl_get_pp_assign_pin(hwmgr,
- PP_AC_DC_SWITCH_GPIO_PINID, &gpio_pin_assignment))) {
+ (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
+ &gpio_pin_assignment))) {
table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_AutomaticDCTransition);
@@ -3063,8 +3065,7 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
}
reg_value = 0;
- if ((0 == reg_value) &&
- (0 == atomctrl_get_pp_assign_pin(hwmgr,
+ if ((0 == reg_value) && (atomctrl_get_pp_assign_pin(hwmgr,
THERMAL_INT_OUTPUT_GPIO_PINID, &gpio_pin_assignment))) {
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ThermalOutGPIO);
@@ -3135,7 +3136,7 @@ int tonga_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
if (0 == data->sclk_dpm_key_disabled) {
/* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
- if (0 != tonga_is_dpm_running(hwmgr))
+ if (tonga_is_dpm_running(hwmgr))
printk(KERN_ERR "[ powerplay ] Trying to set Enable Mask when DPM is disabled \n");
if (0 != data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
@@ -3150,7 +3151,7 @@ int tonga_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
if (0 == data->mclk_dpm_key_disabled) {
/* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
- if (0 != tonga_is_dpm_running(hwmgr))
+ if (tonga_is_dpm_running(hwmgr))
printk(KERN_ERR "[ powerplay ] Trying to set Enable Mask when DPM is disabled \n");
if (0 != data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
@@ -3261,7 +3262,7 @@ int tonga_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwm
/* initialize vddc_dep_on_dal_pwrl table */
table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record);
- table_clk_vlt = (struct phm_clock_voltage_dependency_table *)kzalloc(table_size, GFP_KERNEL);
+ table_clk_vlt = kzalloc(table_size, GFP_KERNEL);
if (NULL == table_clk_vlt) {
printk(KERN_ERR "[ powerplay ] Can not allocate space for vddc_dep_on_dal_pwrl! \n");
@@ -3336,9 +3337,9 @@ int tonga_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
int result = 1;
- PP_ASSERT_WITH_CODE (0 == tonga_is_dpm_running(hwmgr),
- "Trying to Unforce DPM when DPM is disabled. Returning without sending SMC message.",
- return result);
+ PP_ASSERT_WITH_CODE (!tonga_is_dpm_running(hwmgr),
+ "Trying to Unforce DPM when DPM is disabled. Returning without sending SMC message.",
+ return result);
if (0 == data->pcie_dpm_key_disabled) {
PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(
@@ -3742,7 +3743,7 @@ uint8_t tonga_get_memory_modile_index(struct pp_hwmgr *hwmgr)
bool tonga_check_s0_mc_reg_index(uint16_t inReg, uint16_t *outReg)
{
- bool result = 1;
+ bool result = true;
switch (inReg) {
case mmMC_SEQ_RAS_TIMING:
@@ -3826,7 +3827,7 @@ bool tonga_check_s0_mc_reg_index(uint16_t inReg, uint16_t *outReg)
break;
default:
- result = 0;
+ result = false;
break;
}
@@ -4422,13 +4423,6 @@ int tonga_reset_asic_tasks(struct pp_hwmgr *hwmgr)
int tonga_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
- if (data->soft_pp_table) {
- kfree(data->soft_pp_table);
- data->soft_pp_table = NULL;
- }
-
return phm_hwmgr_backend_fini(hwmgr);
}
@@ -4442,7 +4436,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
{
int result = 0;
SMU72_Discrete_DpmTable *table = NULL;
- tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+ tonga_hwmgr *data;
pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
phw_tonga_ulv_parm *ulv;
@@ -4451,7 +4445,13 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((NULL != hwmgr),
"Invalid Parameter!", return -1;);
- data->dll_defaule_on = 0;
+ data = kzalloc(sizeof(struct tonga_hwmgr), GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ hwmgr->backend = data;
+
+ data->dll_defaule_on = false;
data->sram_end = SMC_RAM_END;
data->activity_target[0] = PPTONGA_TARGETACTIVITY_DFLT;
@@ -4557,13 +4557,13 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
/* ULV Support*/
ulv = &(data->ulv);
- ulv->ulv_supported = 0;
+ ulv->ulv_supported = false;
/* Initalize Dynamic State Adjustment Rule Settings*/
result = tonga_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
if (result)
printk(KERN_ERR "[ powerplay ] tonga_initializa_dynamic_state_adjustment_rule_settings failed!\n");
- data->uvd_enabled = 0;
+ data->uvd_enabled = false;
table = &(data->smc_state_table);
@@ -4571,7 +4571,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
* if ucGPIO_ID=VDDC_PCC_GPIO_PINID in GPIO_LUTable,
* Peak Current Control feature is enabled and we should program PCC HW register
*/
- if (0 == atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
+ if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
uint32_t temp_reg = cgs_read_ind_register(hwmgr->device,
CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
@@ -4610,7 +4610,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SMU7);
- data->vddc_phase_shed_control = 0;
+ data->vddc_phase_shed_control = false;
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_UVDPowerGating);
@@ -4629,7 +4629,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
}
if (0 == result) {
- data->is_tlu_enabled = 0;
+ data->is_tlu_enabled = false;
hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
TONGA_MAX_HARDWARE_POWERLEVELS;
hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
@@ -4639,7 +4639,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
result = cgs_query_system_info(hwmgr->device, &sys_info);
if (result)
- data->pcie_gen_cap = 0x30007;
+ data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
else
data->pcie_gen_cap = (uint32_t)sys_info.value;
if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
@@ -4648,7 +4648,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
result = cgs_query_system_info(hwmgr->device, &sys_info);
if (result)
- data->pcie_lane_cap = 0x2f0000;
+ data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
else
data->pcie_lane_cap = (uint32_t)sys_info.value;
} else {
@@ -5310,9 +5310,8 @@ static int tonga_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
if ((0 == data->sclk_dpm_key_disabled) &&
(data->need_update_smu7_dpm_table &
(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
- PP_ASSERT_WITH_CODE(
- 0 == tonga_is_dpm_running(hwmgr),
- "Trying to freeze SCLK DPM when DPM is disabled",
+ PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
+ "Trying to freeze SCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(
0 == smum_send_msg_to_smc(hwmgr->smumgr,
@@ -5324,8 +5323,8 @@ static int tonga_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
if ((0 == data->mclk_dpm_key_disabled) &&
(data->need_update_smu7_dpm_table &
DPMTABLE_OD_UPDATE_MCLK)) {
- PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr),
- "Trying to freeze MCLK DPM when DPM is disabled",
+ PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
+ "Trying to freeze MCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(
0 == smum_send_msg_to_smc(hwmgr->smumgr,
@@ -5460,7 +5459,6 @@ static int tonga_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
static int tonga_trim_dpm_states(struct pp_hwmgr *hwmgr, const struct tonga_power_state *hw_state)
{
- int result = 0;
struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
uint32_t high_limit_count;
@@ -5480,7 +5478,7 @@ static int tonga_trim_dpm_states(struct pp_hwmgr *hwmgr, const struct tonga_powe
hw_state->performance_levels[0].memory_clock,
hw_state->performance_levels[high_limit_count].memory_clock);
- return result;
+ return 0;
}
static int tonga_generate_dpm_level_enable_mask(struct pp_hwmgr *hwmgr, const void *input)
@@ -5627,8 +5625,8 @@ static int tonga_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
(data->need_update_smu7_dpm_table &
(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
- PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr),
- "Trying to Unfreeze SCLK DPM when DPM is disabled",
+ PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
+ "Trying to Unfreeze SCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(
0 == smum_send_msg_to_smc(hwmgr->smumgr,
@@ -5640,9 +5638,8 @@ static int tonga_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
if ((0 == data->mclk_dpm_key_disabled) &&
(data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
- PP_ASSERT_WITH_CODE(
- 0 == tonga_is_dpm_running(hwmgr),
- "Trying to Unfreeze MCLK DPM when DPM is disabled",
+ PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
+ "Trying to Unfreeze MCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(
0 == smum_send_msg_to_smc(hwmgr->smumgr,
@@ -6031,42 +6028,6 @@ static int tonga_get_fan_control_mode(struct pp_hwmgr *hwmgr)
CG_FDO_CTRL2, FDO_PWM_MODE);
}
-static int tonga_get_pp_table(struct pp_hwmgr *hwmgr, char **table)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
- if (!data->soft_pp_table) {
- data->soft_pp_table = kmemdup(hwmgr->soft_pp_table,
- hwmgr->soft_pp_table_size,
- GFP_KERNEL);
- if (!data->soft_pp_table)
- return -ENOMEM;
- }
-
- *table = (char *)&data->soft_pp_table;
-
- return hwmgr->soft_pp_table_size;
-}
-
-static int tonga_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size)
-{
- struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
- if (!data->soft_pp_table) {
- data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
- if (!data->soft_pp_table)
- return -ENOMEM;
- }
-
- memcpy(data->soft_pp_table, buf, size);
-
- hwmgr->soft_pp_table = data->soft_pp_table;
-
- /* TODO: re-init powerplay to implement modified pptable */
-
- return 0;
-}
-
static int tonga_force_clock_level(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, uint32_t mask)
{
@@ -6174,11 +6135,96 @@ static int tonga_print_clock_levels(struct pp_hwmgr *hwmgr,
return size;
}
+static int tonga_get_sclk_od(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+ struct tonga_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
+ struct tonga_single_dpm_table *golden_sclk_table =
+ &(data->golden_dpm_table.sclk_table);
+ int value;
+
+ value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
+ 100 /
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+ return value;
+}
+
+static int tonga_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+{
+ struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+ struct tonga_single_dpm_table *golden_sclk_table =
+ &(data->golden_dpm_table.sclk_table);
+ struct pp_power_state *ps;
+ struct tonga_power_state *tonga_ps;
+
+ if (value > 20)
+ value = 20;
+
+ ps = hwmgr->request_ps;
+
+ if (ps == NULL)
+ return -EINVAL;
+
+ tonga_ps = cast_phw_tonga_power_state(&ps->hardware);
+
+ tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].engine_clock =
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
+ value / 100 +
+ golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+ return 0;
+}
+
+static int tonga_get_mclk_od(struct pp_hwmgr *hwmgr)
+{
+ struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+ struct tonga_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
+ struct tonga_single_dpm_table *golden_mclk_table =
+ &(data->golden_dpm_table.mclk_table);
+ int value;
+
+ value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
+ 100 /
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+ return value;
+}
+
+static int tonga_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+{
+ struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+ struct tonga_single_dpm_table *golden_mclk_table =
+ &(data->golden_dpm_table.mclk_table);
+ struct pp_power_state *ps;
+ struct tonga_power_state *tonga_ps;
+
+ if (value > 20)
+ value = 20;
+
+ ps = hwmgr->request_ps;
+
+ if (ps == NULL)
+ return -EINVAL;
+
+ tonga_ps = cast_phw_tonga_power_state(&ps->hardware);
+
+ tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].memory_clock =
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
+ value / 100 +
+ golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+ return 0;
+}
+
static const struct pp_hwmgr_func tonga_hwmgr_funcs = {
.backend_init = &tonga_hwmgr_backend_init,
.backend_fini = &tonga_hwmgr_backend_fini,
.asic_setup = &tonga_setup_asic_task,
.dynamic_state_management_enable = &tonga_enable_dpm_tasks,
+ .dynamic_state_management_disable = &tonga_disable_dpm_tasks,
.apply_state_adjust_rules = tonga_apply_state_adjust_rules,
.force_dpm_level = &tonga_force_dpm_level,
.power_state_set = tonga_set_power_state_tasks,
@@ -6212,22 +6258,16 @@ static const struct pp_hwmgr_func tonga_hwmgr_funcs = {
.check_states_equal = tonga_check_states_equal,
.set_fan_control_mode = tonga_set_fan_control_mode,
.get_fan_control_mode = tonga_get_fan_control_mode,
- .get_pp_table = tonga_get_pp_table,
- .set_pp_table = tonga_set_pp_table,
.force_clock_level = tonga_force_clock_level,
.print_clock_levels = tonga_print_clock_levels,
+ .get_sclk_od = tonga_get_sclk_od,
+ .set_sclk_od = tonga_set_sclk_od,
+ .get_mclk_od = tonga_get_mclk_od,
+ .set_mclk_od = tonga_set_mclk_od,
};
int tonga_hwmgr_init(struct pp_hwmgr *hwmgr)
{
- tonga_hwmgr *data;
-
- data = kzalloc (sizeof(tonga_hwmgr), GFP_KERNEL);
- if (data == NULL)
- return -ENOMEM;
- memset(data, 0x00, sizeof(tonga_hwmgr));
-
- hwmgr->backend = data;
hwmgr->hwmgr_func = &tonga_hwmgr_funcs;
hwmgr->pptable_func = &tonga_pptable_funcs;
pp_tonga_thermal_initialize(hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h
index 573cd39fe..3961884bf 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h
@@ -352,9 +352,6 @@ struct tonga_hwmgr {
bool samu_power_gated; /* 1: gated, 0:not gated */
bool acp_power_gated; /* 1: gated, 0:not gated */
bool pg_acp_init;
-
- /* soft pptable for re-uploading into smu */
- void *soft_pp_table;
};
typedef struct tonga_hwmgr tonga_hwmgr;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
index dccc859f6..cfb647f76 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
@@ -167,8 +167,7 @@ static int get_vddc_lookup_table(
table_size = sizeof(uint32_t) +
sizeof(phm_ppt_v1_voltage_lookup_record) * max_levels;
- table = (phm_ppt_v1_voltage_lookup_table *)
- kzalloc(table_size, GFP_KERNEL);
+ table = kzalloc(table_size, GFP_KERNEL);
if (NULL == table)
return -ENOMEM;
@@ -327,7 +326,7 @@ static int get_valid_clk(
table_size = sizeof(uint32_t) +
sizeof(uint32_t) * clk_volt_pp_table->count;
- table = (struct phm_clock_array *)kzalloc(table_size, GFP_KERNEL);
+ table = kzalloc(table_size, GFP_KERNEL);
if (NULL == table)
return -ENOMEM;
@@ -377,8 +376,7 @@ static int get_mclk_voltage_dependency_table(
table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
* mclk_dep_table->ucNumEntries;
- mclk_table = (phm_ppt_v1_clock_voltage_dependency_table *)
- kzalloc(table_size, GFP_KERNEL);
+ mclk_table = kzalloc(table_size, GFP_KERNEL);
if (NULL == mclk_table)
return -ENOMEM;
@@ -424,8 +422,7 @@ static int get_sclk_voltage_dependency_table(
table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
* tonga_table->ucNumEntries;
- sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *)
- kzalloc(table_size, GFP_KERNEL);
+ sclk_table = kzalloc(table_size, GFP_KERNEL);
if (NULL == sclk_table)
return -ENOMEM;
@@ -456,8 +453,7 @@ static int get_sclk_voltage_dependency_table(
table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
* polaris_table->ucNumEntries;
- sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *)
- kzalloc(table_size, GFP_KERNEL);
+ sclk_table = kzalloc(table_size, GFP_KERNEL);
if (NULL == sclk_table)
return -ENOMEM;
@@ -504,7 +500,7 @@ static int get_pcie_table(
table_size = sizeof(uint32_t) +
sizeof(phm_ppt_v1_pcie_record) * atom_pcie_table->ucNumEntries;
- pcie_table = (phm_ppt_v1_pcie_table *)kzalloc(table_size, GFP_KERNEL);
+ pcie_table = kzalloc(table_size, GFP_KERNEL);
if (pcie_table == NULL)
return -ENOMEM;
@@ -541,7 +537,7 @@ static int get_pcie_table(
table_size = sizeof(uint32_t) +
sizeof(phm_ppt_v1_pcie_record) * atom_pcie_table->ucNumEntries;
- pcie_table = (phm_ppt_v1_pcie_table *)kzalloc(table_size, GFP_KERNEL);
+ pcie_table = kzalloc(table_size, GFP_KERNEL);
if (pcie_table == NULL)
return -ENOMEM;
@@ -695,8 +691,7 @@ static int get_mm_clock_voltage_table(
table_size = sizeof(uint32_t) +
sizeof(phm_ppt_v1_mm_clock_voltage_dependency_record)
* mm_dependency_table->ucNumEntries;
- mm_table = (phm_ppt_v1_mm_clock_voltage_dependency_table *)
- kzalloc(table_size, GFP_KERNEL);
+ mm_table = kzalloc(table_size, GFP_KERNEL);
if (NULL == mm_table)
return -ENOMEM;
@@ -1073,13 +1068,9 @@ int tonga_pp_tables_initialize(struct pp_hwmgr *hwmgr)
int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
{
- int result = 0;
struct phm_ppt_v1_information *pp_table_information =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
- if (NULL != hwmgr->soft_pp_table)
- hwmgr->soft_pp_table = NULL;
-
kfree(pp_table_information->vdd_dep_on_sclk);
pp_table_information->vdd_dep_on_sclk = NULL;
@@ -1116,7 +1107,7 @@ int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
kfree(hwmgr->pptable);
hwmgr->pptable = NULL;
- return result;
+ return 0;
}
const struct pp_table_func tonga_pptable_funcs = {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
index 50b367d44..b764c8c05 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
@@ -132,6 +132,7 @@ struct amd_pp_init {
uint32_t chip_family;
uint32_t chip_id;
uint32_t rev_id;
+ bool powercontainment_enabled;
};
enum amd_pp_display_config_type{
AMD_PP_DisplayConfigType_None = 0,
@@ -342,6 +343,10 @@ struct amd_powerplay_funcs {
int (*set_pp_table)(void *handle, const char *buf, size_t size);
int (*force_clock_level)(void *handle, enum pp_clock_type type, uint32_t mask);
int (*print_clock_levels)(void *handle, enum pp_clock_type type, char *buf);
+ int (*get_sclk_od)(void *handle);
+ int (*set_sclk_od)(void *handle, uint32_t value);
+ int (*get_mclk_od)(void *handle);
+ int (*set_mclk_od)(void *handle, uint32_t value);
};
struct amd_powerplay {
@@ -355,6 +360,8 @@ int amd_powerplay_init(struct amd_pp_init *pp_init,
int amd_powerplay_fini(void *handle);
+int amd_powerplay_reset(void *handle);
+
int amd_powerplay_display_configuration_change(void *handle,
const struct amd_pp_display_configuration *input);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
index 56f712c7d..962cb5385 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
@@ -340,6 +340,7 @@ extern int phm_powergate_vce(struct pp_hwmgr *hwmgr, bool gate);
extern int phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
extern int phm_setup_asic(struct pp_hwmgr *hwmgr);
extern int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr);
+extern int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr);
extern void phm_init_dynamic_caps(struct pp_hwmgr *hwmgr);
extern bool phm_is_hw_access_blocked(struct pp_hwmgr *hwmgr);
extern int phm_block_hw_access(struct pp_hwmgr *hwmgr, bool block);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 77e8e33d5..bf0d2accf 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -278,6 +278,8 @@ struct pp_hwmgr_func {
int (*dynamic_state_management_enable)(
struct pp_hwmgr *hw_mgr);
+ int (*dynamic_state_management_disable)(
+ struct pp_hwmgr *hw_mgr);
int (*patch_boot_state)(struct pp_hwmgr *hwmgr,
struct pp_hw_power_state *hw_ps);
@@ -333,11 +335,13 @@ struct pp_hwmgr_func {
int (*get_clock_by_type)(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks);
int (*get_max_high_clocks)(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks);
int (*power_off_asic)(struct pp_hwmgr *hwmgr);
- int (*get_pp_table)(struct pp_hwmgr *hwmgr, char **table);
- int (*set_pp_table)(struct pp_hwmgr *hwmgr, const char *buf, size_t size);
int (*force_clock_level)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask);
int (*print_clock_levels)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf);
int (*enable_per_cu_power_gating)(struct pp_hwmgr *hwmgr, bool enable);
+ int (*get_sclk_od)(struct pp_hwmgr *hwmgr);
+ int (*set_sclk_od)(struct pp_hwmgr *hwmgr, uint32_t value);
+ int (*get_mclk_od)(struct pp_hwmgr *hwmgr);
+ int (*set_mclk_od)(struct pp_hwmgr *hwmgr, uint32_t value);
};
struct pp_table_func {
@@ -580,6 +584,7 @@ struct pp_hwmgr {
struct pp_smumgr *smumgr;
const void *soft_pp_table;
uint32_t soft_pp_table_size;
+ void *hardcode_pp_table;
bool need_pp_table_upload;
enum amd_dpm_forced_level dpm_level;
bool block_hw_access;
@@ -609,6 +614,7 @@ struct pp_hwmgr {
uint32_t num_ps;
struct pp_thermal_controller_info thermal_controller;
bool fan_ctrl_is_in_default_mode;
+ bool powercontainment_enabled;
uint32_t fan_ctrl_default_mode;
uint32_t tmin;
struct phm_microcode_version_info microcode_version_info;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
index fc9e3d1dd..3c235f017 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
@@ -131,6 +131,12 @@ extern int smu_free_memory(void *device, void *handle);
smum_wait_on_indirect_register(smumgr, \
mm##port##_INDEX, index, value, mask)
+#define SMUM_WAIT_INDIRECT_REGISTER(smumgr, port, reg, value, mask) \
+ SMUM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, port, ix##reg, value, mask)
+
+#define SMUM_WAIT_INDIRECT_FIELD(smumgr, port, reg, field, fieldval) \
+ SMUM_WAIT_INDIRECT_REGISTER(smumgr, port, reg, (fieldval) << SMUM_FIELD_SHIFT(reg, field), \
+ SMUM_FIELD_MASK(reg, field) )
#define SMUM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, \
index, value, mask) \
@@ -158,6 +164,10 @@ extern int smu_free_memory(void *device, void *handle);
(SMUM_FIELD_MASK(reg, field) & ((field_val) << \
SMUM_FIELD_SHIFT(reg, field))))
+#define SMUM_READ_INDIRECT_FIELD(device, port, reg, field) \
+ SMUM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
+ reg, field)
+
#define SMUM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, \
port, index, value, mask) \
smum_wait_on_indirect_register(smumgr, \
@@ -191,6 +201,13 @@ extern int smu_free_memory(void *device, void *handle);
SMUM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
reg, field, fieldval))
+
+#define SMUM_WRITE_INDIRECT_FIELD(device, port, reg, field, fieldval) \
+ cgs_write_ind_register(device, port, ix##reg, \
+ SMUM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
+ reg, field, fieldval))
+
+
#define SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, port, reg, field, fieldval) \
SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, port, reg, \
(fieldval) << SMUM_FIELD_SHIFT(reg, field), \
@@ -200,4 +217,16 @@ extern int smu_free_memory(void *device, void *handle);
SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, \
(fieldval) << SMUM_FIELD_SHIFT(reg, field), \
SMUM_FIELD_MASK(reg, field))
+
+#define SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, port, index, value, mask) \
+ smum_wait_for_indirect_register_unequal(smumgr, \
+ mm##port##_INDEX, index, value, mask)
+
+#define SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, value, mask) \
+ SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, port, ix##reg, value, mask)
+
+#define SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(smumgr, port, reg, field, fieldval) \
+ SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, (fieldval) << SMUM_FIELD_SHIFT(reg, field), \
+ SMUM_FIELD_MASK(reg, field) )
+
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index 0728c1e3d..7723473e5 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -23,6 +23,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <drm/amdgpu_drm.h>
#include "pp_instance.h"
#include "smumgr.h"
#include "cgs_common.h"
@@ -52,10 +53,10 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
handle->smu_mgr = smumgr;
switch (smumgr->chip_family) {
- case AMD_FAMILY_CZ:
+ case AMDGPU_FAMILY_CZ:
cz_smum_init(smumgr);
break;
- case AMD_FAMILY_VI:
+ case AMDGPU_FAMILY_VI:
switch (smumgr->chip_id) {
case CHIP_TONGA:
tonga_smum_init(smumgr);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
index b22722eab..f42c536b3 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
@@ -479,7 +479,6 @@ static int tonga_request_smu_reload_fw(struct pp_smumgr *smumgr)
struct tonga_smumgr *tonga_smu =
(struct tonga_smumgr *)(smumgr->backend);
uint16_t fw_to_load;
- int result = 0;
struct SMU_DRAMData_TOC *toc;
/**
* First time this gets called during SmuMgr init,
@@ -563,7 +562,7 @@ static int tonga_request_smu_reload_fw(struct pp_smumgr *smumgr)
smumgr, PPSMC_MSG_LoadUcodes, fw_to_load),
"Fail to Request SMU Load uCode", return 0);
- return result;
+ return 0;
}
static int tonga_request_smu_load_specific_fw(struct pp_smumgr *smumgr,
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
index c89dc7777..b961a1c6c 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
@@ -26,7 +26,7 @@ TRACE_EVENT(amd_sched_job,
TP_fast_assign(
__entry->entity = sched_job->s_entity;
__entry->sched_job = sched_job;
- __entry->fence = &sched_job->s_fence->base;
+ __entry->fence = &sched_job->s_fence->finished;
__entry->name = sched_job->sched->name;
__entry->job_count = kfifo_len(
&sched_job->s_entity->job_queue) / sizeof(sched_job);
@@ -46,7 +46,7 @@ TRACE_EVENT(amd_sched_process_job,
),
TP_fast_assign(
- __entry->fence = &fence->base;
+ __entry->fence = &fence->finished;
),
TP_printk("fence=%p signaled", __entry->fence)
);
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index c16248cee..963a24d46 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -32,6 +32,7 @@
static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
+static void amd_sched_process_job(struct fence *f, struct fence_cb *cb);
struct kmem_cache *sched_fence_slab;
atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
@@ -140,7 +141,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
return r;
atomic_set(&entity->fence_seq, 0);
- entity->fence_context = fence_context_alloc(1);
+ entity->fence_context = fence_context_alloc(2);
return 0;
}
@@ -251,17 +252,21 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
s_fence = to_amd_sched_fence(fence);
if (s_fence && s_fence->sched == sched) {
- /* Fence is from the same scheduler */
- if (test_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &fence->flags)) {
- /* Ignore it when it is already scheduled */
- fence_put(entity->dependency);
- return false;
- }
- /* Wait for fence to be scheduled */
- entity->cb.func = amd_sched_entity_clear_dep;
- list_add_tail(&entity->cb.node, &s_fence->scheduled_cb);
- return true;
+ /*
+ * Fence is from the same scheduler, only need to wait for
+ * it to be scheduled
+ */
+ fence = fence_get(&s_fence->scheduled);
+ fence_put(entity->dependency);
+ entity->dependency = fence;
+ if (!fence_add_callback(fence, &entity->cb,
+ amd_sched_entity_clear_dep))
+ return true;
+
+ /* Ignore it when it is already scheduled */
+ fence_put(fence);
+ return false;
}
if (!fence_add_callback(entity->dependency, &entity->cb,
@@ -319,46 +324,114 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
return added;
}
-static void amd_sched_free_job(struct fence *f, struct fence_cb *cb) {
- struct amd_sched_job *job = container_of(cb, struct amd_sched_job, cb_free_job);
- schedule_work(&job->work_free_job);
-}
-
/* job_finish is called after hw fence signaled, and
* the job had already been deleted from ring_mirror_list
*/
-void amd_sched_job_finish(struct amd_sched_job *s_job)
+static void amd_sched_job_finish(struct work_struct *work)
{
- struct amd_sched_job *next;
+ struct amd_sched_job *s_job = container_of(work, struct amd_sched_job,
+ finish_work);
struct amd_gpu_scheduler *sched = s_job->sched;
+ /* remove job from ring_mirror_list */
+ spin_lock(&sched->job_list_lock);
+ list_del_init(&s_job->node);
if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
- if (cancel_delayed_work(&s_job->work_tdr))
- amd_sched_job_put(s_job);
+ struct amd_sched_job *next;
+
+ spin_unlock(&sched->job_list_lock);
+ cancel_delayed_work_sync(&s_job->work_tdr);
+ spin_lock(&sched->job_list_lock);
/* queue TDR for next job */
next = list_first_entry_or_null(&sched->ring_mirror_list,
struct amd_sched_job, node);
- if (next) {
- INIT_DELAYED_WORK(&next->work_tdr, s_job->timeout_callback);
- amd_sched_job_get(next);
+ if (next)
schedule_delayed_work(&next->work_tdr, sched->timeout);
- }
}
+ spin_unlock(&sched->job_list_lock);
+ sched->ops->free_job(s_job);
}
-void amd_sched_job_begin(struct amd_sched_job *s_job)
+static void amd_sched_job_finish_cb(struct fence *f, struct fence_cb *cb)
+{
+ struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
+ finish_cb);
+ schedule_work(&job->finish_work);
+}
+
+static void amd_sched_job_begin(struct amd_sched_job *s_job)
{
struct amd_gpu_scheduler *sched = s_job->sched;
+ spin_lock(&sched->job_list_lock);
+ list_add_tail(&s_job->node, &sched->ring_mirror_list);
if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
- list_first_entry_or_null(&sched->ring_mirror_list, struct amd_sched_job, node) == s_job)
- {
- INIT_DELAYED_WORK(&s_job->work_tdr, s_job->timeout_callback);
- amd_sched_job_get(s_job);
+ list_first_entry_or_null(&sched->ring_mirror_list,
+ struct amd_sched_job, node) == s_job)
+ schedule_delayed_work(&s_job->work_tdr, sched->timeout);
+ spin_unlock(&sched->job_list_lock);
+}
+
+static void amd_sched_job_timedout(struct work_struct *work)
+{
+ struct amd_sched_job *job = container_of(work, struct amd_sched_job,
+ work_tdr.work);
+
+ job->sched->ops->timedout_job(job);
+}
+
+void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched)
+{
+ struct amd_sched_job *s_job;
+
+ spin_lock(&sched->job_list_lock);
+ list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
+ if (fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) {
+ fence_put(s_job->s_fence->parent);
+ s_job->s_fence->parent = NULL;
+ }
+ }
+ atomic_set(&sched->hw_rq_count, 0);
+ spin_unlock(&sched->job_list_lock);
+}
+
+void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
+{
+ struct amd_sched_job *s_job, *tmp;
+ int r;
+
+ spin_lock(&sched->job_list_lock);
+ s_job = list_first_entry_or_null(&sched->ring_mirror_list,
+ struct amd_sched_job, node);
+ if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
schedule_delayed_work(&s_job->work_tdr, sched->timeout);
+
+ list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
+ struct amd_sched_fence *s_fence = s_job->s_fence;
+ struct fence *fence;
+
+ spin_unlock(&sched->job_list_lock);
+ fence = sched->ops->run_job(s_job);
+ atomic_inc(&sched->hw_rq_count);
+ if (fence) {
+ s_fence->parent = fence_get(fence);
+ r = fence_add_callback(fence, &s_fence->cb,
+ amd_sched_process_job);
+ if (r == -ENOENT)
+ amd_sched_process_job(fence, &s_fence->cb);
+ else if (r)
+ DRM_ERROR("fence add callback failed (%d)\n",
+ r);
+ fence_put(fence);
+ } else {
+ DRM_ERROR("Failed to run job!\n");
+ amd_sched_process_job(NULL, &s_fence->cb);
+ }
+ spin_lock(&sched->job_list_lock);
}
+ spin_unlock(&sched->job_list_lock);
}
/**
@@ -372,36 +445,29 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
{
struct amd_sched_entity *entity = sched_job->s_entity;
- sched_job->use_sched = 1;
- fence_add_callback(&sched_job->s_fence->base,
- &sched_job->cb_free_job, amd_sched_free_job);
trace_amd_sched_job(sched_job);
+ fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb,
+ amd_sched_job_finish_cb);
wait_event(entity->sched->job_scheduled,
amd_sched_entity_in(sched_job));
}
/* init a sched_job with basic field */
int amd_sched_job_init(struct amd_sched_job *job,
- struct amd_gpu_scheduler *sched,
- struct amd_sched_entity *entity,
- void (*timeout_cb)(struct work_struct *work),
- void (*free_cb)(struct kref *refcount),
- void *owner, struct fence **fence)
+ struct amd_gpu_scheduler *sched,
+ struct amd_sched_entity *entity,
+ void *owner)
{
- INIT_LIST_HEAD(&job->node);
- kref_init(&job->refcount);
job->sched = sched;
job->s_entity = entity;
job->s_fence = amd_sched_fence_create(entity, owner);
if (!job->s_fence)
return -ENOMEM;
- job->s_fence->s_job = job;
- job->timeout_callback = timeout_cb;
- job->free_callback = free_cb;
+ INIT_WORK(&job->finish_work, amd_sched_job_finish);
+ INIT_LIST_HEAD(&job->node);
+ INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout);
- if (fence)
- *fence = &job->s_fence->base;
return 0;
}
@@ -450,23 +516,25 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
struct amd_sched_fence *s_fence =
container_of(cb, struct amd_sched_fence, cb);
struct amd_gpu_scheduler *sched = s_fence->sched;
- unsigned long flags;
atomic_dec(&sched->hw_rq_count);
-
- /* remove job from ring_mirror_list */
- spin_lock_irqsave(&sched->job_list_lock, flags);
- list_del_init(&s_fence->s_job->node);
- sched->ops->finish_job(s_fence->s_job);
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
-
- amd_sched_fence_signal(s_fence);
+ amd_sched_fence_finished(s_fence);
trace_amd_sched_process_job(s_fence);
- fence_put(&s_fence->base);
+ fence_put(&s_fence->finished);
wake_up_interruptible(&sched->wake_up_worker);
}
+static bool amd_sched_blocked(struct amd_gpu_scheduler *sched)
+{
+ if (kthread_should_park()) {
+ kthread_parkme();
+ return true;
+ }
+
+ return false;
+}
+
static int amd_sched_main(void *param)
{
struct sched_param sparam = {.sched_priority = 1};
@@ -476,14 +544,15 @@ static int amd_sched_main(void *param)
sched_setscheduler(current, SCHED_FIFO, &sparam);
while (!kthread_should_stop()) {
- struct amd_sched_entity *entity;
+ struct amd_sched_entity *entity = NULL;
struct amd_sched_fence *s_fence;
struct amd_sched_job *sched_job;
struct fence *fence;
wait_event_interruptible(sched->wake_up_worker,
- (entity = amd_sched_select_entity(sched)) ||
- kthread_should_stop());
+ (!amd_sched_blocked(sched) &&
+ (entity = amd_sched_select_entity(sched))) ||
+ kthread_should_stop());
if (!entity)
continue;
@@ -495,16 +564,19 @@ static int amd_sched_main(void *param)
s_fence = sched_job->s_fence;
atomic_inc(&sched->hw_rq_count);
- amd_sched_job_pre_schedule(sched, sched_job);
+ amd_sched_job_begin(sched_job);
+
fence = sched->ops->run_job(sched_job);
amd_sched_fence_scheduled(s_fence);
if (fence) {
+ s_fence->parent = fence_get(fence);
r = fence_add_callback(fence, &s_fence->cb,
amd_sched_process_job);
if (r == -ENOENT)
amd_sched_process_job(fence, &s_fence->cb);
else if (r)
- DRM_ERROR("fence add callback failed (%d)\n", r);
+ DRM_ERROR("fence add callback failed (%d)\n",
+ r);
fence_put(fence);
} else {
DRM_ERROR("Failed to run job!\n");
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 070095a94..7cbbbfb50 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -27,8 +27,6 @@
#include <linux/kfifo.h>
#include <linux/fence.h>
-#define AMD_SCHED_FENCE_SCHEDULED_BIT FENCE_FLAG_USER_BITS
-
struct amd_gpu_scheduler;
struct amd_sched_rq;
@@ -68,36 +66,34 @@ struct amd_sched_rq {
};
struct amd_sched_fence {
- struct fence base;
+ struct fence scheduled;
+ struct fence finished;
struct fence_cb cb;
- struct list_head scheduled_cb;
+ struct fence *parent;
struct amd_gpu_scheduler *sched;
spinlock_t lock;
void *owner;
- struct amd_sched_job *s_job;
};
struct amd_sched_job {
- struct kref refcount;
struct amd_gpu_scheduler *sched;
struct amd_sched_entity *s_entity;
struct amd_sched_fence *s_fence;
- bool use_sched; /* true if the job goes to scheduler */
- struct fence_cb cb_free_job;
- struct work_struct work_free_job;
- struct list_head node;
- struct delayed_work work_tdr;
- void (*timeout_callback) (struct work_struct *work);
- void (*free_callback)(struct kref *refcount);
+ struct fence_cb finish_cb;
+ struct work_struct finish_work;
+ struct list_head node;
+ struct delayed_work work_tdr;
};
-extern const struct fence_ops amd_sched_fence_ops;
+extern const struct fence_ops amd_sched_fence_ops_scheduled;
+extern const struct fence_ops amd_sched_fence_ops_finished;
static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
{
- struct amd_sched_fence *__f = container_of(f, struct amd_sched_fence, base);
+ if (f->ops == &amd_sched_fence_ops_scheduled)
+ return container_of(f, struct amd_sched_fence, scheduled);
- if (__f->base.ops == &amd_sched_fence_ops)
- return __f;
+ if (f->ops == &amd_sched_fence_ops_finished)
+ return container_of(f, struct amd_sched_fence, finished);
return NULL;
}
@@ -109,8 +105,8 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
struct amd_sched_backend_ops {
struct fence *(*dependency)(struct amd_sched_job *sched_job);
struct fence *(*run_job)(struct amd_sched_job *sched_job);
- void (*begin_job)(struct amd_sched_job *sched_job);
- void (*finish_job)(struct amd_sched_job *sched_job);
+ void (*timedout_job)(struct amd_sched_job *sched_job);
+ void (*free_job)(struct amd_sched_job *sched_job);
};
enum amd_sched_priority {
@@ -152,25 +148,11 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
struct amd_sched_fence *amd_sched_fence_create(
struct amd_sched_entity *s_entity, void *owner);
void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
-void amd_sched_fence_signal(struct amd_sched_fence *fence);
+void amd_sched_fence_finished(struct amd_sched_fence *fence);
int amd_sched_job_init(struct amd_sched_job *job,
- struct amd_gpu_scheduler *sched,
- struct amd_sched_entity *entity,
- void (*timeout_cb)(struct work_struct *work),
- void (*free_cb)(struct kref* refcount),
- void *owner, struct fence **fence);
-void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched ,
- struct amd_sched_job *s_job);
-void amd_sched_job_finish(struct amd_sched_job *s_job);
-void amd_sched_job_begin(struct amd_sched_job *s_job);
-static inline void amd_sched_job_get(struct amd_sched_job *job) {
- if (job)
- kref_get(&job->refcount);
-}
-
-static inline void amd_sched_job_put(struct amd_sched_job *job) {
- if (job)
- kref_put(&job->refcount, job->free_callback);
-}
-
+ struct amd_gpu_scheduler *sched,
+ struct amd_sched_entity *entity,
+ void *owner);
+void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched);
+void amd_sched_job_recovery(struct amd_gpu_scheduler *sched);
#endif
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
index 2a732c490..6b63beaf7 100644
--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -27,7 +27,8 @@
#include <drm/drmP.h>
#include "gpu_scheduler.h"
-struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity, void *owner)
+struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,
+ void *owner)
{
struct amd_sched_fence *fence = NULL;
unsigned seq;
@@ -36,46 +37,37 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity
if (fence == NULL)
return NULL;
- INIT_LIST_HEAD(&fence->scheduled_cb);
fence->owner = owner;
- fence->sched = s_entity->sched;
+ fence->sched = entity->sched;
spin_lock_init(&fence->lock);
- seq = atomic_inc_return(&s_entity->fence_seq);
- fence_init(&fence->base, &amd_sched_fence_ops, &fence->lock,
- s_entity->fence_context, seq);
+ seq = atomic_inc_return(&entity->fence_seq);
+ fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled,
+ &fence->lock, entity->fence_context, seq);
+ fence_init(&fence->finished, &amd_sched_fence_ops_finished,
+ &fence->lock, entity->fence_context + 1, seq);
return fence;
}
-void amd_sched_fence_signal(struct amd_sched_fence *fence)
+void amd_sched_fence_scheduled(struct amd_sched_fence *fence)
{
- int ret = fence_signal(&fence->base);
+ int ret = fence_signal(&fence->scheduled);
+
if (!ret)
- FENCE_TRACE(&fence->base, "signaled from irq context\n");
+ FENCE_TRACE(&fence->scheduled, "signaled from irq context\n");
else
- FENCE_TRACE(&fence->base, "was already signaled\n");
-}
-
-void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched ,
- struct amd_sched_job *s_job)
-{
- unsigned long flags;
- spin_lock_irqsave(&sched->job_list_lock, flags);
- list_add_tail(&s_job->node, &sched->ring_mirror_list);
- sched->ops->begin_job(s_job);
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ FENCE_TRACE(&fence->scheduled, "was already signaled\n");
}
-void amd_sched_fence_scheduled(struct amd_sched_fence *s_fence)
+void amd_sched_fence_finished(struct amd_sched_fence *fence)
{
- struct fence_cb *cur, *tmp;
+ int ret = fence_signal(&fence->finished);
- set_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &s_fence->base.flags);
- list_for_each_entry_safe(cur, tmp, &s_fence->scheduled_cb, node) {
- list_del_init(&cur->node);
- cur->func(&s_fence->base, cur);
- }
+ if (!ret)
+ FENCE_TRACE(&fence->finished, "signaled from irq context\n");
+ else
+ FENCE_TRACE(&fence->finished, "was already signaled\n");
}
static const char *amd_sched_fence_get_driver_name(struct fence *fence)
@@ -105,6 +97,8 @@ static void amd_sched_fence_free(struct rcu_head *rcu)
{
struct fence *f = container_of(rcu, struct fence, rcu);
struct amd_sched_fence *fence = to_amd_sched_fence(f);
+
+ fence_put(fence->parent);
kmem_cache_free(sched_fence_slab, fence);
}
@@ -116,16 +110,41 @@ static void amd_sched_fence_free(struct rcu_head *rcu)
* This function is called when the reference count becomes zero.
* It just RCU schedules freeing up the fence.
*/
-static void amd_sched_fence_release(struct fence *f)
+static void amd_sched_fence_release_scheduled(struct fence *f)
{
- call_rcu(&f->rcu, amd_sched_fence_free);
+ struct amd_sched_fence *fence = to_amd_sched_fence(f);
+
+ call_rcu(&fence->finished.rcu, amd_sched_fence_free);
}
-const struct fence_ops amd_sched_fence_ops = {
+/**
+ * amd_sched_fence_release_scheduled - drop extra reference
+ *
+ * @f: fence
+ *
+ * Drop the extra reference from the scheduled fence to the base fence.
+ */
+static void amd_sched_fence_release_finished(struct fence *f)
+{
+ struct amd_sched_fence *fence = to_amd_sched_fence(f);
+
+ fence_put(&fence->scheduled);
+}
+
+const struct fence_ops amd_sched_fence_ops_scheduled = {
+ .get_driver_name = amd_sched_fence_get_driver_name,
+ .get_timeline_name = amd_sched_fence_get_timeline_name,
+ .enable_signaling = amd_sched_fence_enable_signaling,
+ .signaled = NULL,
+ .wait = fence_default_wait,
+ .release = amd_sched_fence_release_scheduled,
+};
+
+const struct fence_ops amd_sched_fence_ops_finished = {
.get_driver_name = amd_sched_fence_get_driver_name,
.get_timeline_name = amd_sched_fence_get_timeline_name,
.enable_signaling = amd_sched_fence_enable_signaling,
.signaled = NULL,
.wait = fence_default_wait,
- .release = amd_sched_fence_release,
+ .release = amd_sched_fence_release_finished,
};
diff --git a/drivers/gpu/drm/arc/Kconfig b/drivers/gpu/drm/arc/Kconfig
index f9a13b658..f47d88ba4 100644
--- a/drivers/gpu/drm/arc/Kconfig
+++ b/drivers/gpu/drm/arc/Kconfig
@@ -2,7 +2,6 @@ config DRM_ARCPGU
tristate "ARC PGU"
depends on DRM && OF
select DRM_KMS_CMA_HELPER
- select DRM_KMS_FB_HELPER
select DRM_KMS_HELPER
help
Choose this option if you have an ARC PGU controller.
diff --git a/drivers/gpu/drm/arc/Makefile b/drivers/gpu/drm/arc/Makefile
index d48fda70f..73de56a01 100644
--- a/drivers/gpu/drm/arc/Makefile
+++ b/drivers/gpu/drm/arc/Makefile
@@ -1,2 +1,2 @@
-arcpgu-y := arcpgu_crtc.o arcpgu_hdmi.o arcpgu_drv.o
+arcpgu-y := arcpgu_crtc.o arcpgu_hdmi.o arcpgu_sim.o arcpgu_drv.o
obj-$(CONFIG_DRM_ARCPGU) += arcpgu.o
diff --git a/drivers/gpu/drm/arc/arcpgu.h b/drivers/gpu/drm/arc/arcpgu.h
index 86574b698..e8fcf3ab1 100644
--- a/drivers/gpu/drm/arc/arcpgu.h
+++ b/drivers/gpu/drm/arc/arcpgu.h
@@ -22,7 +22,6 @@ struct arcpgu_drm_private {
struct clk *clk;
struct drm_fbdev_cma *fbdev;
struct drm_framebuffer *fb;
- struct list_head event_list;
struct drm_crtc crtc;
struct drm_plane *plane;
};
@@ -43,6 +42,7 @@ static inline u32 arc_pgu_read(struct arcpgu_drm_private *arcpgu,
int arc_pgu_setup_crtc(struct drm_device *dev);
int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np);
+int arcpgu_drm_sim_init(struct drm_device *drm, struct device_node *np);
struct drm_fbdev_cma *arcpgu_fbdev_cma_init(struct drm_device *dev,
unsigned int preferred_bpp, unsigned int num_crtc,
unsigned int max_conn_count);
diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c
index 92f8beff8..ee0a61c28 100644
--- a/drivers/gpu/drm/arc/arcpgu_crtc.c
+++ b/drivers/gpu/drm/arc/arcpgu_crtc.c
@@ -145,20 +145,14 @@ static int arc_pgu_crtc_atomic_check(struct drm_crtc *crtc,
static void arc_pgu_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
- struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
- unsigned long flags;
-
- if (crtc->state->event) {
- struct drm_pending_vblank_event *event = crtc->state->event;
+ struct drm_pending_vblank_event *event = crtc->state->event;
+ if (event) {
crtc->state->event = NULL;
- event->pipe = drm_crtc_index(crtc);
-
- WARN_ON(drm_crtc_vblank_get(crtc) != 0);
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
- list_add_tail(&event->base.link, &arcpgu->event_list);
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ spin_lock_irq(&crtc->dev->event_lock);
+ drm_crtc_send_vblank_event(crtc, event);
+ spin_unlock_irq(&crtc->dev->event_lock);
}
}
diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c
index 76e187a5b..6d4ff3473 100644
--- a/drivers/gpu/drm/arc/arcpgu_drv.c
+++ b/drivers/gpu/drm/arc/arcpgu_drv.c
@@ -28,21 +28,14 @@ static void arcpgu_fb_output_poll_changed(struct drm_device *dev)
{
struct arcpgu_drm_private *arcpgu = dev->dev_private;
- if (arcpgu->fbdev)
- drm_fbdev_cma_hotplug_event(arcpgu->fbdev);
-}
-
-static int arcpgu_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *state, bool async)
-{
- return drm_atomic_helper_commit(dev, state, false);
+ drm_fbdev_cma_hotplug_event(arcpgu->fbdev);
}
static struct drm_mode_config_funcs arcpgu_drm_modecfg_funcs = {
.fb_create = drm_fb_cma_create,
.output_poll_changed = arcpgu_fb_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
- .atomic_commit = arcpgu_atomic_commit,
+ .atomic_commit = drm_atomic_helper_commit,
};
static void arcpgu_setup_mode_config(struct drm_device *drm)
@@ -55,7 +48,7 @@ static void arcpgu_setup_mode_config(struct drm_device *drm)
drm->mode_config.funcs = &arcpgu_drm_modecfg_funcs;
}
-int arcpgu_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+static int arcpgu_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
int ret;
@@ -81,22 +74,6 @@ static const struct file_operations arcpgu_drm_ops = {
.mmap = arcpgu_gem_mmap,
};
-static void arcpgu_preclose(struct drm_device *drm, struct drm_file *file)
-{
- struct arcpgu_drm_private *arcpgu = drm->dev_private;
- struct drm_pending_vblank_event *e, *t;
- unsigned long flags;
-
- spin_lock_irqsave(&drm->event_lock, flags);
- list_for_each_entry_safe(e, t, &arcpgu->event_list, base.link) {
- if (e->base.file_priv != file)
- continue;
- list_del(&e->base.link);
- e->base.destroy(&e->base);
- }
- spin_unlock_irqrestore(&drm->event_lock, flags);
-}
-
static void arcpgu_lastclose(struct drm_device *drm)
{
struct arcpgu_drm_private *arcpgu = drm->dev_private;
@@ -122,16 +99,12 @@ static int arcpgu_load(struct drm_device *drm)
if (IS_ERR(arcpgu->clk))
return PTR_ERR(arcpgu->clk);
- INIT_LIST_HEAD(&arcpgu->event_list);
-
arcpgu_setup_mode_config(drm);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
arcpgu->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(arcpgu->regs)) {
- dev_err(drm->dev, "Could not remap IO mem\n");
+ if (IS_ERR(arcpgu->regs))
return PTR_ERR(arcpgu->regs);
- }
dev_info(drm->dev, "arc_pgu ID: 0x%x\n",
arc_pgu_read(arcpgu, ARCPGU_REG_ID));
@@ -149,15 +122,17 @@ static int arcpgu_load(struct drm_device *drm)
/* find the encoder node and initialize it */
encoder_node = of_parse_phandle(drm->dev->of_node, "encoder-slave", 0);
- if (!encoder_node) {
- dev_err(drm->dev, "failed to get an encoder slave node\n");
- return -ENODEV;
+ if (encoder_node) {
+ ret = arcpgu_drm_hdmi_init(drm, encoder_node);
+ of_node_put(encoder_node);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = arcpgu_drm_sim_init(drm, NULL);
+ if (ret < 0)
+ return ret;
}
- ret = arcpgu_drm_hdmi_init(drm, encoder_node);
- if (ret < 0)
- return ret;
-
drm_mode_config_reset(drm);
drm_kms_helper_poll_init(drm);
@@ -174,7 +149,7 @@ static int arcpgu_load(struct drm_device *drm)
return 0;
}
-int arcpgu_unload(struct drm_device *drm)
+static int arcpgu_unload(struct drm_device *drm)
{
struct arcpgu_drm_private *arcpgu = drm->dev_private;
@@ -192,7 +167,6 @@ int arcpgu_unload(struct drm_device *drm)
static struct drm_driver arcpgu_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
DRIVER_ATOMIC,
- .preclose = arcpgu_preclose,
.lastclose = arcpgu_lastclose,
.name = "drm-arcpgu",
.desc = "ARC PGU Controller",
@@ -207,7 +181,7 @@ static struct drm_driver arcpgu_drm_driver = {
.get_vblank_counter = drm_vblank_no_hw_counter,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_free_object = drm_gem_cma_free_object,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_import = drm_gem_prime_import,
@@ -235,15 +209,8 @@ static int arcpgu_probe(struct platform_device *pdev)
if (ret)
goto err_unload;
- ret = drm_connector_register_all(drm);
- if (ret)
- goto err_unregister;
-
return 0;
-err_unregister:
- drm_dev_unregister(drm);
-
err_unload:
arcpgu_unload(drm);
@@ -257,7 +224,6 @@ static int arcpgu_remove(struct platform_device *pdev)
{
struct drm_device *drm = platform_get_drvdata(pdev);
- drm_connector_unregister_all(drm);
drm_dev_unregister(drm);
arcpgu_unload(drm);
drm_dev_unref(drm);
diff --git a/drivers/gpu/drm/arc/arcpgu_hdmi.c b/drivers/gpu/drm/arc/arcpgu_hdmi.c
index 08b6baeb3..b7a8b2ac4 100644
--- a/drivers/gpu/drm/arc/arcpgu_hdmi.c
+++ b/drivers/gpu/drm/arc/arcpgu_hdmi.c
@@ -46,23 +46,6 @@ static int arcpgu_drm_connector_get_modes(struct drm_connector *connector)
return sfuncs->get_modes(&slave->base, connector);
}
-struct drm_encoder *
-arcpgu_drm_connector_best_encoder(struct drm_connector *connector)
-{
- struct drm_encoder_slave *slave;
- struct arcpgu_drm_connector *con =
- container_of(connector, struct arcpgu_drm_connector, connector);
-
- slave = con->encoder_slave;
- if (slave == NULL) {
- dev_err(connector->dev->dev,
- "connector_best_encoder: cannot find slave encoder for connector\n");
- return NULL;
- }
-
- return &slave->base;
-}
-
static enum drm_connector_status
arcpgu_drm_connector_detect(struct drm_connector *connector, bool force)
{
@@ -97,7 +80,6 @@ static void arcpgu_drm_connector_destroy(struct drm_connector *connector)
static const struct drm_connector_helper_funcs
arcpgu_drm_connector_helper_funcs = {
.get_modes = arcpgu_drm_connector_get_modes,
- .best_encoder = arcpgu_drm_connector_best_encoder,
};
static const struct drm_connector_funcs arcpgu_drm_connector_funcs = {
diff --git a/drivers/gpu/drm/arc/arcpgu_sim.c b/drivers/gpu/drm/arc/arcpgu_sim.c
new file mode 100644
index 000000000..2bf06d715
--- /dev/null
+++ b/drivers/gpu/drm/arc/arcpgu_sim.c
@@ -0,0 +1,128 @@
+/*
+ * ARC PGU DRM driver.
+ *
+ * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_encoder_slave.h>
+#include <drm/drm_atomic_helper.h>
+
+#include "arcpgu.h"
+
+#define XRES_DEF 640
+#define YRES_DEF 480
+
+#define XRES_MAX 8192
+#define YRES_MAX 8192
+
+
+struct arcpgu_drm_connector {
+ struct drm_connector connector;
+ struct drm_encoder_slave *encoder_slave;
+};
+
+static int arcpgu_drm_connector_get_modes(struct drm_connector *connector)
+{
+ int count;
+
+ count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX);
+ drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF);
+ return count;
+}
+
+static enum drm_connector_status
+arcpgu_drm_connector_detect(struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static void arcpgu_drm_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_helper_funcs
+arcpgu_drm_connector_helper_funcs = {
+ .get_modes = arcpgu_drm_connector_get_modes,
+};
+
+static const struct drm_connector_funcs arcpgu_drm_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .reset = drm_atomic_helper_connector_reset,
+ .detect = arcpgu_drm_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = arcpgu_drm_connector_destroy,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static struct drm_encoder_funcs arcpgu_drm_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+int arcpgu_drm_sim_init(struct drm_device *drm, struct device_node *np)
+{
+ struct arcpgu_drm_connector *arcpgu_connector;
+ struct drm_encoder_slave *encoder;
+ struct drm_connector *connector;
+ int ret;
+
+ encoder = devm_kzalloc(drm->dev, sizeof(*encoder), GFP_KERNEL);
+ if (encoder == NULL)
+ return -ENOMEM;
+
+ encoder->base.possible_crtcs = 1;
+ encoder->base.possible_clones = 0;
+
+ ret = drm_encoder_init(drm, &encoder->base, &arcpgu_drm_encoder_funcs,
+ DRM_MODE_ENCODER_VIRTUAL, NULL);
+ if (ret)
+ return ret;
+
+ arcpgu_connector = devm_kzalloc(drm->dev, sizeof(*arcpgu_connector),
+ GFP_KERNEL);
+ if (!arcpgu_connector) {
+ ret = -ENOMEM;
+ goto error_encoder_cleanup;
+ }
+
+ connector = &arcpgu_connector->connector;
+ drm_connector_helper_add(connector, &arcpgu_drm_connector_helper_funcs);
+
+ ret = drm_connector_init(drm, connector, &arcpgu_drm_connector_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL);
+ if (ret < 0) {
+ dev_err(drm->dev, "failed to initialize drm connector\n");
+ goto error_encoder_cleanup;
+ }
+
+ ret = drm_mode_connector_attach_encoder(connector, &encoder->base);
+ if (ret < 0) {
+ dev_err(drm->dev, "could not attach connector to encoder\n");
+ drm_connector_unregister(connector);
+ goto error_connector_cleanup;
+ }
+
+ arcpgu_connector->encoder_slave = encoder;
+
+ return 0;
+
+error_connector_cleanup:
+ drm_connector_cleanup(connector);
+
+error_encoder_cleanup:
+ drm_encoder_cleanup(&encoder->base);
+ return ret;
+}
diff --git a/drivers/gpu/drm/arm/Kconfig b/drivers/gpu/drm/arm/Kconfig
index eaed454e0..9a18e1bd5 100644
--- a/drivers/gpu/drm/arm/Kconfig
+++ b/drivers/gpu/drm/arm/Kconfig
@@ -9,7 +9,6 @@ config DRM_HDLCD
depends on COMMON_CLK
select DRM_ARM
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_KMS_CMA_HELPER
help
Choose this option if you have an ARM High Definition Colour LCD
@@ -25,3 +24,19 @@ config DRM_HDLCD_SHOW_UNDERRUN
Enable this option to show in red colour the pixels that the
HDLCD device did not fetch from framebuffer due to underrun
conditions.
+
+config DRM_MALI_DISPLAY
+ tristate "ARM Mali Display Processor"
+ depends on DRM && OF && (ARM || ARM64)
+ depends on COMMON_CLK
+ select DRM_ARM
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
+ select VIDEOMODE_HELPERS
+ help
+ Choose this option if you want to compile the ARM Mali Display
+ Processor driver. It supports the DP500, DP550 and DP650 variants
+ of the hardware.
+
+ If compiled as a module it will be called mali-dp.
diff --git a/drivers/gpu/drm/arm/Makefile b/drivers/gpu/drm/arm/Makefile
index 89dcb7bab..bb8b158ff 100644
--- a/drivers/gpu/drm/arm/Makefile
+++ b/drivers/gpu/drm/arm/Makefile
@@ -1,2 +1,4 @@
hdlcd-y := hdlcd_drv.o hdlcd_crtc.o
obj-$(CONFIG_DRM_HDLCD) += hdlcd.o
+mali-dp-y := malidp_drv.o malidp_hw.o malidp_planes.o malidp_crtc.o
+obj-$(CONFIG_DRM_MALI_DISPLAY) += mali-dp.o
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index 0813c2f06..48019ae22 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -196,30 +196,11 @@ static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc,
}
}
-static void hdlcd_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
-{
-}
-
-static bool hdlcd_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = {
- .mode_fixup = hdlcd_crtc_mode_fixup,
- .mode_set = drm_helper_crtc_mode_set,
- .mode_set_base = drm_helper_crtc_mode_set_base,
- .mode_set_nofb = hdlcd_crtc_mode_set_nofb,
.enable = hdlcd_crtc_enable,
.disable = hdlcd_crtc_disable,
- .prepare = hdlcd_crtc_disable,
- .commit = hdlcd_crtc_enable,
.atomic_check = hdlcd_crtc_atomic_check,
.atomic_begin = hdlcd_crtc_atomic_begin,
- .atomic_flush = hdlcd_crtc_atomic_flush,
};
static int hdlcd_plane_atomic_check(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index a6ca36f00..d83b46a30 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -102,21 +102,14 @@ static void hdlcd_fb_output_poll_changed(struct drm_device *drm)
{
struct hdlcd_drm_private *hdlcd = drm->dev_private;
- if (hdlcd->fbdev)
- drm_fbdev_cma_hotplug_event(hdlcd->fbdev);
-}
-
-static int hdlcd_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *state, bool nonblock)
-{
- return drm_atomic_helper_commit(dev, state, false);
+ drm_fbdev_cma_hotplug_event(hdlcd->fbdev);
}
static const struct drm_mode_config_funcs hdlcd_mode_config_funcs = {
.fb_create = drm_fb_cma_create,
.output_poll_changed = hdlcd_fb_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
- .atomic_commit = hdlcd_atomic_commit,
+ .atomic_commit = drm_atomic_helper_commit,
};
static void hdlcd_setup_mode_config(struct drm_device *drm)
@@ -296,7 +289,7 @@ static struct drm_driver hdlcd_driver = {
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = hdlcd_enable_vblank,
.disable_vblank = hdlcd_disable_vblank,
- .gem_free_object = drm_gem_cma_free_object,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c
new file mode 100644
index 000000000..08e6a71f5
--- /dev/null
+++ b/drivers/gpu/drm/arm/malidp_crtc.c
@@ -0,0 +1,216 @@
+/*
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ * Author: Liviu Dudau <Liviu.Dudau@arm.com>
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * ARM Mali DP500/DP550/DP650 driver (crtc operations)
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <linux/clk.h>
+#include <video/videomode.h>
+
+#include "malidp_drv.h"
+#include "malidp_hw.h"
+
+static bool malidp_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
+ struct malidp_hw_device *hwdev = malidp->dev;
+
+ /*
+ * check that the hardware can drive the required clock rate,
+ * but skip the check if the clock is meant to be disabled (req_rate = 0)
+ */
+ long rate, req_rate = mode->crtc_clock * 1000;
+
+ if (req_rate) {
+ rate = clk_round_rate(hwdev->mclk, req_rate);
+ if (rate < req_rate) {
+ DRM_DEBUG_DRIVER("mclk clock unable to reach %d kHz\n",
+ mode->crtc_clock);
+ return false;
+ }
+
+ rate = clk_round_rate(hwdev->pxlclk, req_rate);
+ if (rate != req_rate) {
+ DRM_DEBUG_DRIVER("pxlclk doesn't support %ld Hz\n",
+ req_rate);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void malidp_crtc_enable(struct drm_crtc *crtc)
+{
+ struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
+ struct malidp_hw_device *hwdev = malidp->dev;
+ struct videomode vm;
+
+ drm_display_mode_to_videomode(&crtc->state->adjusted_mode, &vm);
+
+ clk_prepare_enable(hwdev->pxlclk);
+
+ /* mclk needs to be set to the same or higher rate than pxlclk */
+ clk_set_rate(hwdev->mclk, crtc->state->adjusted_mode.crtc_clock * 1000);
+ clk_set_rate(hwdev->pxlclk, crtc->state->adjusted_mode.crtc_clock * 1000);
+
+ hwdev->modeset(hwdev, &vm);
+ hwdev->leave_config_mode(hwdev);
+ drm_crtc_vblank_on(crtc);
+}
+
+static void malidp_crtc_disable(struct drm_crtc *crtc)
+{
+ struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
+ struct malidp_hw_device *hwdev = malidp->dev;
+
+ drm_crtc_vblank_off(crtc);
+ hwdev->enter_config_mode(hwdev);
+ clk_disable_unprepare(hwdev->pxlclk);
+}
+
+static int malidp_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
+ struct malidp_hw_device *hwdev = malidp->dev;
+ struct drm_plane *plane;
+ const struct drm_plane_state *pstate;
+ u32 rot_mem_free, rot_mem_usable;
+ int rotated_planes = 0;
+
+ /*
+ * check if there is enough rotation memory available for planes
+ * that need 90° and 270° rotation. Each plane has set its required
+ * memory size in the ->plane_check() callback, here we only make
+ * sure that the sums are less that the total usable memory.
+ *
+ * The rotation memory allocation algorithm (for each plane):
+ * a. If no more rotated planes exist, all remaining rotate
+ * memory in the bank is available for use by the plane.
+ * b. If other rotated planes exist, and plane's layer ID is
+ * DE_VIDEO1, it can use all the memory from first bank if
+ * secondary rotation memory bank is available, otherwise it can
+ * use up to half the bank's memory.
+ * c. If other rotated planes exist, and plane's layer ID is not
+ * DE_VIDEO1, it can use half of the available memory
+ *
+ * Note: this algorithm assumes that the order in which the planes are
+ * checked always has DE_VIDEO1 plane first in the list if it is
+ * rotated. Because that is how we create the planes in the first
+ * place, under current DRM version things work, but if ever the order
+ * in which drm_atomic_crtc_state_for_each_plane() iterates over planes
+ * changes, we need to pre-sort the planes before validation.
+ */
+
+ /* first count the number of rotated planes */
+ drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
+ if (pstate->rotation & MALIDP_ROTATED_MASK)
+ rotated_planes++;
+ }
+
+ rot_mem_free = hwdev->rotation_memory[0];
+ /*
+ * if we have more than 1 plane using rotation memory, use the second
+ * block of rotation memory as well
+ */
+ if (rotated_planes > 1)
+ rot_mem_free += hwdev->rotation_memory[1];
+
+ /* now validate the rotation memory requirements */
+ drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
+ struct malidp_plane *mp = to_malidp_plane(plane);
+ struct malidp_plane_state *ms = to_malidp_plane_state(pstate);
+
+ if (pstate->rotation & MALIDP_ROTATED_MASK) {
+ /* process current plane */
+ rotated_planes--;
+
+ if (!rotated_planes) {
+ /* no more rotated planes, we can use what's left */
+ rot_mem_usable = rot_mem_free;
+ } else {
+ if ((mp->layer->id != DE_VIDEO1) ||
+ (hwdev->rotation_memory[1] == 0))
+ rot_mem_usable = rot_mem_free / 2;
+ else
+ rot_mem_usable = hwdev->rotation_memory[0];
+ }
+
+ rot_mem_free -= rot_mem_usable;
+
+ if (ms->rotmem_size > rot_mem_usable)
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static const struct drm_crtc_helper_funcs malidp_crtc_helper_funcs = {
+ .mode_fixup = malidp_crtc_mode_fixup,
+ .enable = malidp_crtc_enable,
+ .disable = malidp_crtc_disable,
+ .atomic_check = malidp_crtc_atomic_check,
+};
+
+static const struct drm_crtc_funcs malidp_crtc_funcs = {
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+int malidp_crtc_init(struct drm_device *drm)
+{
+ struct malidp_drm *malidp = drm->dev_private;
+ struct drm_plane *primary = NULL, *plane;
+ int ret;
+
+ ret = malidp_de_planes_init(drm);
+ if (ret < 0) {
+ DRM_ERROR("Failed to initialise planes\n");
+ return ret;
+ }
+
+ drm_for_each_plane(plane, drm) {
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
+ primary = plane;
+ break;
+ }
+ }
+
+ if (!primary) {
+ DRM_ERROR("no primary plane found\n");
+ ret = -EINVAL;
+ goto crtc_cleanup_planes;
+ }
+
+ ret = drm_crtc_init_with_planes(drm, &malidp->crtc, primary, NULL,
+ &malidp_crtc_funcs, NULL);
+
+ if (!ret) {
+ drm_crtc_helper_add(&malidp->crtc, &malidp_crtc_helper_funcs);
+ return 0;
+ }
+
+crtc_cleanup_planes:
+ malidp_de_planes_destroy(drm);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
new file mode 100644
index 000000000..82171d223
--- /dev/null
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -0,0 +1,519 @@
+/*
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ * Author: Liviu Dudau <Liviu.Dudau@arm.com>
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * ARM Mali DP500/DP550/DP650 KMS/DRM driver
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/of_reserved_mem.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_of.h>
+
+#include "malidp_drv.h"
+#include "malidp_regs.h"
+#include "malidp_hw.h"
+
+#define MALIDP_CONF_VALID_TIMEOUT 250
+
+/*
+ * set the "config valid" bit and wait until the hardware acts on it
+ */
+static int malidp_set_and_wait_config_valid(struct drm_device *drm)
+{
+ struct malidp_drm *malidp = drm->dev_private;
+ struct malidp_hw_device *hwdev = malidp->dev;
+ int ret;
+
+ hwdev->set_config_valid(hwdev);
+ /* don't wait for config_valid flag if we are in config mode */
+ if (hwdev->in_config_mode(hwdev))
+ return 0;
+
+ ret = wait_event_interruptible_timeout(malidp->wq,
+ atomic_read(&malidp->config_valid) == 1,
+ msecs_to_jiffies(MALIDP_CONF_VALID_TIMEOUT));
+
+ return (ret > 0) ? 0 : -ETIMEDOUT;
+}
+
+static void malidp_output_poll_changed(struct drm_device *drm)
+{
+ struct malidp_drm *malidp = drm->dev_private;
+
+ drm_fbdev_cma_hotplug_event(malidp->fbdev);
+}
+
+static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state)
+{
+ struct drm_pending_vblank_event *event;
+ struct drm_device *drm = state->dev;
+ struct malidp_drm *malidp = drm->dev_private;
+ int ret = malidp_set_and_wait_config_valid(drm);
+
+ if (ret)
+ DRM_DEBUG_DRIVER("timed out waiting for updated configuration\n");
+
+ event = malidp->crtc.state->event;
+ if (event) {
+ malidp->crtc.state->event = NULL;
+
+ spin_lock_irq(&drm->event_lock);
+ if (drm_crtc_vblank_get(&malidp->crtc) == 0)
+ drm_crtc_arm_vblank_event(&malidp->crtc, event);
+ else
+ drm_crtc_send_vblank_event(&malidp->crtc, event);
+ spin_unlock_irq(&drm->event_lock);
+ }
+ drm_atomic_helper_commit_hw_done(state);
+}
+
+static void malidp_atomic_commit_tail(struct drm_atomic_state *state)
+{
+ struct drm_device *drm = state->dev;
+
+ drm_atomic_helper_commit_modeset_disables(drm, state);
+ drm_atomic_helper_commit_modeset_enables(drm, state);
+ drm_atomic_helper_commit_planes(drm, state, true);
+
+ malidp_atomic_commit_hw_done(state);
+
+ drm_atomic_helper_wait_for_vblanks(drm, state);
+
+ drm_atomic_helper_cleanup_planes(drm, state);
+}
+
+static struct drm_mode_config_helper_funcs malidp_mode_config_helpers = {
+ .atomic_commit_tail = malidp_atomic_commit_tail,
+};
+
+static const struct drm_mode_config_funcs malidp_mode_config_funcs = {
+ .fb_create = drm_fb_cma_create,
+ .output_poll_changed = malidp_output_poll_changed,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static int malidp_enable_vblank(struct drm_device *drm, unsigned int crtc)
+{
+ struct malidp_drm *malidp = drm->dev_private;
+ struct malidp_hw_device *hwdev = malidp->dev;
+
+ malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK,
+ hwdev->map.de_irq_map.vsync_irq);
+ return 0;
+}
+
+static void malidp_disable_vblank(struct drm_device *drm, unsigned int pipe)
+{
+ struct malidp_drm *malidp = drm->dev_private;
+ struct malidp_hw_device *hwdev = malidp->dev;
+
+ malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK,
+ hwdev->map.de_irq_map.vsync_irq);
+}
+
+static int malidp_init(struct drm_device *drm)
+{
+ int ret;
+ struct malidp_drm *malidp = drm->dev_private;
+ struct malidp_hw_device *hwdev = malidp->dev;
+
+ drm_mode_config_init(drm);
+
+ drm->mode_config.min_width = hwdev->min_line_size;
+ drm->mode_config.min_height = hwdev->min_line_size;
+ drm->mode_config.max_width = hwdev->max_line_size;
+ drm->mode_config.max_height = hwdev->max_line_size;
+ drm->mode_config.funcs = &malidp_mode_config_funcs;
+ drm->mode_config.helper_private = &malidp_mode_config_helpers;
+
+ ret = malidp_crtc_init(drm);
+ if (ret) {
+ drm_mode_config_cleanup(drm);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int malidp_irq_init(struct platform_device *pdev)
+{
+ int irq_de, irq_se, ret = 0;
+ struct drm_device *drm = dev_get_drvdata(&pdev->dev);
+
+ /* fetch the interrupts from DT */
+ irq_de = platform_get_irq_byname(pdev, "DE");
+ if (irq_de < 0) {
+ DRM_ERROR("no 'DE' IRQ specified!\n");
+ return irq_de;
+ }
+ irq_se = platform_get_irq_byname(pdev, "SE");
+ if (irq_se < 0) {
+ DRM_ERROR("no 'SE' IRQ specified!\n");
+ return irq_se;
+ }
+
+ ret = malidp_de_irq_init(drm, irq_de);
+ if (ret)
+ return ret;
+
+ ret = malidp_se_irq_init(drm, irq_se);
+ if (ret) {
+ malidp_de_irq_fini(drm);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void malidp_lastclose(struct drm_device *drm)
+{
+ struct malidp_drm *malidp = drm->dev_private;
+
+ drm_fbdev_cma_restore_mode(malidp->fbdev);
+}
+
+static const struct file_operations fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = noop_llseek,
+ .mmap = drm_gem_cma_mmap,
+};
+
+static struct drm_driver malidp_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC |
+ DRIVER_PRIME,
+ .lastclose = malidp_lastclose,
+ .get_vblank_counter = drm_vblank_no_hw_counter,
+ .enable_vblank = malidp_enable_vblank,
+ .disable_vblank = malidp_disable_vblank,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .dumb_create = drm_gem_cma_dumb_create,
+ .dumb_map_offset = drm_gem_cma_dumb_map_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ .fops = &fops,
+ .name = "mali-dp",
+ .desc = "ARM Mali Display Processor driver",
+ .date = "20160106",
+ .major = 1,
+ .minor = 0,
+};
+
+static const struct of_device_id malidp_drm_of_match[] = {
+ {
+ .compatible = "arm,mali-dp500",
+ .data = &malidp_device[MALIDP_500]
+ },
+ {
+ .compatible = "arm,mali-dp550",
+ .data = &malidp_device[MALIDP_550]
+ },
+ {
+ .compatible = "arm,mali-dp650",
+ .data = &malidp_device[MALIDP_650]
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, malidp_drm_of_match);
+
+#define MAX_OUTPUT_CHANNELS 3
+
+static int malidp_bind(struct device *dev)
+{
+ struct resource *res;
+ struct drm_device *drm;
+ struct device_node *ep;
+ struct malidp_drm *malidp;
+ struct malidp_hw_device *hwdev;
+ struct platform_device *pdev = to_platform_device(dev);
+ /* number of lines for the R, G and B output */
+ u8 output_width[MAX_OUTPUT_CHANNELS];
+ int ret = 0, i;
+ u32 version, out_depth = 0;
+
+ malidp = devm_kzalloc(dev, sizeof(*malidp), GFP_KERNEL);
+ if (!malidp)
+ return -ENOMEM;
+
+ hwdev = devm_kzalloc(dev, sizeof(*hwdev), GFP_KERNEL);
+ if (!hwdev)
+ return -ENOMEM;
+
+ /*
+ * copy the associated data from malidp_drm_of_match to avoid
+ * having to keep a reference to the OF node after binding
+ */
+ memcpy(hwdev, of_device_get_match_data(dev), sizeof(*hwdev));
+ malidp->dev = hwdev;
+
+ INIT_LIST_HEAD(&malidp->event_list);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hwdev->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(hwdev->regs))
+ return PTR_ERR(hwdev->regs);
+
+ hwdev->pclk = devm_clk_get(dev, "pclk");
+ if (IS_ERR(hwdev->pclk))
+ return PTR_ERR(hwdev->pclk);
+
+ hwdev->aclk = devm_clk_get(dev, "aclk");
+ if (IS_ERR(hwdev->aclk))
+ return PTR_ERR(hwdev->aclk);
+
+ hwdev->mclk = devm_clk_get(dev, "mclk");
+ if (IS_ERR(hwdev->mclk))
+ return PTR_ERR(hwdev->mclk);
+
+ hwdev->pxlclk = devm_clk_get(dev, "pxlclk");
+ if (IS_ERR(hwdev->pxlclk))
+ return PTR_ERR(hwdev->pxlclk);
+
+ /* Get the optional framebuffer memory resource */
+ ret = of_reserved_mem_device_init(dev);
+ if (ret && ret != -ENODEV)
+ return ret;
+
+ drm = drm_dev_alloc(&malidp_driver, dev);
+ if (!drm) {
+ ret = -ENOMEM;
+ goto alloc_fail;
+ }
+
+ /* Enable APB clock in order to get access to the registers */
+ clk_prepare_enable(hwdev->pclk);
+ /*
+ * Enable AXI clock and main clock so that prefetch can start once
+ * the registers are set
+ */
+ clk_prepare_enable(hwdev->aclk);
+ clk_prepare_enable(hwdev->mclk);
+
+ ret = hwdev->query_hw(hwdev);
+ if (ret) {
+ DRM_ERROR("Invalid HW configuration\n");
+ goto query_hw_fail;
+ }
+
+ version = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_DE_CORE_ID);
+ DRM_INFO("found ARM Mali-DP%3x version r%dp%d\n", version >> 16,
+ (version >> 12) & 0xf, (version >> 8) & 0xf);
+
+ /* set the number of lines used for output of RGB data */
+ ret = of_property_read_u8_array(dev->of_node,
+ "arm,malidp-output-port-lines",
+ output_width, MAX_OUTPUT_CHANNELS);
+ if (ret)
+ goto query_hw_fail;
+
+ for (i = 0; i < MAX_OUTPUT_CHANNELS; i++)
+ out_depth = (out_depth << 8) | (output_width[i] & 0xf);
+ malidp_hw_write(hwdev, out_depth, hwdev->map.out_depth_base);
+
+ drm->dev_private = malidp;
+ dev_set_drvdata(dev, drm);
+ atomic_set(&malidp->config_valid, 0);
+ init_waitqueue_head(&malidp->wq);
+
+ ret = malidp_init(drm);
+ if (ret < 0)
+ goto init_fail;
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ goto register_fail;
+
+ /* Set the CRTC's port so that the encoder component can find it */
+ ep = of_graph_get_next_endpoint(dev->of_node, NULL);
+ if (!ep) {
+ ret = -EINVAL;
+ goto port_fail;
+ }
+ malidp->crtc.port = of_get_next_parent(ep);
+
+ ret = component_bind_all(dev, drm);
+ if (ret) {
+ DRM_ERROR("Failed to bind all components\n");
+ goto bind_fail;
+ }
+
+ ret = malidp_irq_init(pdev);
+ if (ret < 0)
+ goto irq_init_fail;
+
+ ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
+ if (ret < 0) {
+ DRM_ERROR("failed to initialise vblank\n");
+ goto vblank_fail;
+ }
+
+ drm_mode_config_reset(drm);
+
+ malidp->fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc,
+ drm->mode_config.num_connector);
+
+ if (IS_ERR(malidp->fbdev)) {
+ ret = PTR_ERR(malidp->fbdev);
+ malidp->fbdev = NULL;
+ goto fbdev_fail;
+ }
+
+ drm_kms_helper_poll_init(drm);
+ return 0;
+
+fbdev_fail:
+ drm_vblank_cleanup(drm);
+vblank_fail:
+ malidp_se_irq_fini(drm);
+ malidp_de_irq_fini(drm);
+irq_init_fail:
+ component_unbind_all(dev, drm);
+bind_fail:
+ of_node_put(malidp->crtc.port);
+ malidp->crtc.port = NULL;
+port_fail:
+ drm_dev_unregister(drm);
+register_fail:
+ malidp_de_planes_destroy(drm);
+ drm_mode_config_cleanup(drm);
+init_fail:
+ drm->dev_private = NULL;
+ dev_set_drvdata(dev, NULL);
+query_hw_fail:
+ clk_disable_unprepare(hwdev->mclk);
+ clk_disable_unprepare(hwdev->aclk);
+ clk_disable_unprepare(hwdev->pclk);
+ drm_dev_unref(drm);
+alloc_fail:
+ of_reserved_mem_device_release(dev);
+
+ return ret;
+}
+
+static void malidp_unbind(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct malidp_drm *malidp = drm->dev_private;
+ struct malidp_hw_device *hwdev = malidp->dev;
+
+ if (malidp->fbdev) {
+ drm_fbdev_cma_fini(malidp->fbdev);
+ malidp->fbdev = NULL;
+ }
+ drm_kms_helper_poll_fini(drm);
+ malidp_se_irq_fini(drm);
+ malidp_de_irq_fini(drm);
+ drm_vblank_cleanup(drm);
+ component_unbind_all(dev, drm);
+ of_node_put(malidp->crtc.port);
+ malidp->crtc.port = NULL;
+ drm_dev_unregister(drm);
+ malidp_de_planes_destroy(drm);
+ drm_mode_config_cleanup(drm);
+ drm->dev_private = NULL;
+ dev_set_drvdata(dev, NULL);
+ clk_disable_unprepare(hwdev->mclk);
+ clk_disable_unprepare(hwdev->aclk);
+ clk_disable_unprepare(hwdev->pclk);
+ drm_dev_unref(drm);
+ of_reserved_mem_device_release(dev);
+}
+
+static const struct component_master_ops malidp_master_ops = {
+ .bind = malidp_bind,
+ .unbind = malidp_unbind,
+};
+
+static int malidp_compare_dev(struct device *dev, void *data)
+{
+ struct device_node *np = data;
+
+ return dev->of_node == np;
+}
+
+static int malidp_platform_probe(struct platform_device *pdev)
+{
+ struct device_node *port, *ep;
+ struct component_match *match = NULL;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ /* there is only one output port inside each device, find it */
+ ep = of_graph_get_next_endpoint(pdev->dev.of_node, NULL);
+ if (!ep)
+ return -ENODEV;
+
+ if (!of_device_is_available(ep)) {
+ of_node_put(ep);
+ return -ENODEV;
+ }
+
+ /* add the remote encoder port as component */
+ port = of_graph_get_remote_port_parent(ep);
+ of_node_put(ep);
+ if (!port || !of_device_is_available(port)) {
+ of_node_put(port);
+ return -EAGAIN;
+ }
+
+ component_match_add(&pdev->dev, &match, malidp_compare_dev, port);
+ return component_master_add_with_match(&pdev->dev, &malidp_master_ops,
+ match);
+}
+
+static int malidp_platform_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &malidp_master_ops);
+ return 0;
+}
+
+static struct platform_driver malidp_platform_driver = {
+ .probe = malidp_platform_probe,
+ .remove = malidp_platform_remove,
+ .driver = {
+ .name = "mali-dp",
+ .of_match_table = malidp_drm_of_match,
+ },
+};
+
+module_platform_driver(malidp_platform_driver);
+
+MODULE_AUTHOR("Liviu Dudau <Liviu.Dudau@arm.com>");
+MODULE_DESCRIPTION("ARM Mali DP DRM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/arm/malidp_drv.h b/drivers/gpu/drm/arm/malidp_drv.h
new file mode 100644
index 000000000..95558fde2
--- /dev/null
+++ b/drivers/gpu/drm/arm/malidp_drv.h
@@ -0,0 +1,54 @@
+/*
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ * Author: Liviu Dudau <Liviu.Dudau@arm.com>
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * ARM Mali DP500/DP550/DP650 KMS/DRM driver structures
+ */
+
+#ifndef __MALIDP_DRV_H__
+#define __MALIDP_DRV_H__
+
+#include <linux/mutex.h>
+#include <linux/wait.h>
+#include "malidp_hw.h"
+
+struct malidp_drm {
+ struct malidp_hw_device *dev;
+ struct drm_fbdev_cma *fbdev;
+ struct list_head event_list;
+ struct drm_crtc crtc;
+ wait_queue_head_t wq;
+ atomic_t config_valid;
+};
+
+#define crtc_to_malidp_device(x) container_of(x, struct malidp_drm, crtc)
+
+struct malidp_plane {
+ struct drm_plane base;
+ struct malidp_hw_device *hwdev;
+ const struct malidp_layer *layer;
+};
+
+struct malidp_plane_state {
+ struct drm_plane_state base;
+
+ /* size of the required rotation memory if plane is rotated */
+ u32 rotmem_size;
+};
+
+#define to_malidp_plane(x) container_of(x, struct malidp_plane, base)
+#define to_malidp_plane_state(x) container_of(x, struct malidp_plane_state, base)
+
+int malidp_de_planes_init(struct drm_device *drm);
+void malidp_de_planes_destroy(struct drm_device *drm);
+int malidp_crtc_init(struct drm_device *drm);
+
+/* often used combination of rotational bits */
+#define MALIDP_ROTATED_MASK (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))
+
+#endif /* __MALIDP_DRV_H__ */
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
new file mode 100644
index 000000000..a6132f1d5
--- /dev/null
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -0,0 +1,691 @@
+/*
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ * Author: Liviu Dudau <Liviu.Dudau@arm.com>
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * ARM Mali DP500/DP550/DP650 hardware manipulation routines. This is where
+ * the difference between various versions of the hardware is being dealt with
+ * in an attempt to provide to the rest of the driver code a unified view
+ */
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <drm/drmP.h>
+#include <video/videomode.h>
+#include <video/display_timing.h>
+
+#include "malidp_drv.h"
+#include "malidp_hw.h"
+
+static const struct malidp_input_format malidp500_de_formats[] = {
+ /* fourcc, layers supporting the format, internal id */
+ { DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 0 },
+ { DRM_FORMAT_ABGR2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 1 },
+ { DRM_FORMAT_ARGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 2 },
+ { DRM_FORMAT_ABGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 3 },
+ { DRM_FORMAT_XRGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 4 },
+ { DRM_FORMAT_XBGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 5 },
+ { DRM_FORMAT_RGB888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 6 },
+ { DRM_FORMAT_BGR888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 7 },
+ { DRM_FORMAT_RGBA5551, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 8 },
+ { DRM_FORMAT_ABGR1555, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 9 },
+ { DRM_FORMAT_RGB565, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 10 },
+ { DRM_FORMAT_BGR565, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 11 },
+ { DRM_FORMAT_UYVY, DE_VIDEO1, 12 },
+ { DRM_FORMAT_YUYV, DE_VIDEO1, 13 },
+ { DRM_FORMAT_NV12, DE_VIDEO1, 14 },
+ { DRM_FORMAT_YUV420, DE_VIDEO1, 15 },
+};
+
+#define MALIDP_ID(__group, __format) \
+ ((((__group) & 0x7) << 3) | ((__format) & 0x7))
+
+#define MALIDP_COMMON_FORMATS \
+ /* fourcc, layers supporting the format, internal id */ \
+ { DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 0) }, \
+ { DRM_FORMAT_ABGR2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 1) }, \
+ { DRM_FORMAT_RGBA1010102, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 2) }, \
+ { DRM_FORMAT_BGRA1010102, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 3) }, \
+ { DRM_FORMAT_ARGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 0) }, \
+ { DRM_FORMAT_ABGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 1) }, \
+ { DRM_FORMAT_RGBA8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 2) }, \
+ { DRM_FORMAT_BGRA8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 3) }, \
+ { DRM_FORMAT_XRGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 0) }, \
+ { DRM_FORMAT_XBGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 1) }, \
+ { DRM_FORMAT_RGBX8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 2) }, \
+ { DRM_FORMAT_BGRX8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 3) }, \
+ { DRM_FORMAT_RGB888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(3, 0) }, \
+ { DRM_FORMAT_BGR888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(3, 1) }, \
+ { DRM_FORMAT_RGBA5551, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 0) }, \
+ { DRM_FORMAT_ABGR1555, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 1) }, \
+ { DRM_FORMAT_RGB565, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 2) }, \
+ { DRM_FORMAT_BGR565, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 3) }, \
+ { DRM_FORMAT_YUYV, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 2) }, \
+ { DRM_FORMAT_UYVY, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 3) }, \
+ { DRM_FORMAT_NV12, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 6) }, \
+ { DRM_FORMAT_YUV420, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 7) }
+
+static const struct malidp_input_format malidp550_de_formats[] = {
+ MALIDP_COMMON_FORMATS,
+};
+
+static const struct malidp_layer malidp500_layers[] = {
+ { DE_VIDEO1, MALIDP500_DE_LV_BASE, MALIDP500_DE_LV_PTR_BASE },
+ { DE_GRAPHICS1, MALIDP500_DE_LG1_BASE, MALIDP500_DE_LG1_PTR_BASE },
+ { DE_GRAPHICS2, MALIDP500_DE_LG2_BASE, MALIDP500_DE_LG2_PTR_BASE },
+};
+
+static const struct malidp_layer malidp550_layers[] = {
+ { DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE },
+ { DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE },
+ { DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE },
+ { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE },
+};
+
+#define MALIDP_DE_DEFAULT_PREFETCH_START 5
+
+static int malidp500_query_hw(struct malidp_hw_device *hwdev)
+{
+ u32 conf = malidp_hw_read(hwdev, MALIDP500_CONFIG_ID);
+ /* bit 4 of the CONFIG_ID register holds the line size multiplier */
+ u8 ln_size_mult = conf & 0x10 ? 2 : 1;
+
+ hwdev->min_line_size = 2;
+ hwdev->max_line_size = SZ_2K * ln_size_mult;
+ hwdev->rotation_memory[0] = SZ_1K * 64 * ln_size_mult;
+ hwdev->rotation_memory[1] = 0; /* no second rotation memory bank */
+
+ return 0;
+}
+
+static void malidp500_enter_config_mode(struct malidp_hw_device *hwdev)
+{
+ u32 status, count = 100;
+
+ malidp_hw_setbits(hwdev, MALIDP500_DC_CONFIG_REQ, MALIDP500_DC_CONTROL);
+ while (count) {
+ status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+ if ((status & MALIDP500_DC_CONFIG_REQ) == MALIDP500_DC_CONFIG_REQ)
+ break;
+ /*
+ * entering config mode can take as long as the rendering
+ * of a full frame, hence the long sleep here
+ */
+ usleep_range(1000, 10000);
+ count--;
+ }
+ WARN(count == 0, "timeout while entering config mode");
+}
+
+static void malidp500_leave_config_mode(struct malidp_hw_device *hwdev)
+{
+ u32 status, count = 100;
+
+ malidp_hw_clearbits(hwdev, MALIDP500_DC_CONFIG_REQ, MALIDP500_DC_CONTROL);
+ while (count) {
+ status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+ if ((status & MALIDP500_DC_CONFIG_REQ) == 0)
+ break;
+ usleep_range(100, 1000);
+ count--;
+ }
+ WARN(count == 0, "timeout while leaving config mode");
+}
+
+static bool malidp500_in_config_mode(struct malidp_hw_device *hwdev)
+{
+ u32 status;
+
+ status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+ if ((status & MALIDP500_DC_CONFIG_REQ) == MALIDP500_DC_CONFIG_REQ)
+ return true;
+
+ return false;
+}
+
+static void malidp500_set_config_valid(struct malidp_hw_device *hwdev)
+{
+ malidp_hw_setbits(hwdev, MALIDP_CFG_VALID, MALIDP500_CONFIG_VALID);
+}
+
+static void malidp500_modeset(struct malidp_hw_device *hwdev, struct videomode *mode)
+{
+ u32 val = 0;
+
+ malidp_hw_clearbits(hwdev, MALIDP500_DC_CLEAR_MASK, MALIDP500_DC_CONTROL);
+ if (mode->flags & DISPLAY_FLAGS_HSYNC_HIGH)
+ val |= MALIDP500_HSYNCPOL;
+ if (mode->flags & DISPLAY_FLAGS_VSYNC_HIGH)
+ val |= MALIDP500_VSYNCPOL;
+ val |= MALIDP_DE_DEFAULT_PREFETCH_START;
+ malidp_hw_setbits(hwdev, val, MALIDP500_DC_CONTROL);
+
+ /*
+ * Mali-DP500 encodes the background color like this:
+ * - red @ MALIDP500_BGND_COLOR[12:0]
+ * - green @ MALIDP500_BGND_COLOR[27:16]
+ * - blue @ (MALIDP500_BGND_COLOR + 4)[12:0]
+ */
+ val = ((MALIDP_BGND_COLOR_G & 0xfff) << 16) |
+ (MALIDP_BGND_COLOR_R & 0xfff);
+ malidp_hw_write(hwdev, val, MALIDP500_BGND_COLOR);
+ malidp_hw_write(hwdev, MALIDP_BGND_COLOR_B, MALIDP500_BGND_COLOR + 4);
+
+ val = MALIDP_DE_H_FRONTPORCH(mode->hfront_porch) |
+ MALIDP_DE_H_BACKPORCH(mode->hback_porch);
+ malidp_hw_write(hwdev, val, MALIDP500_TIMINGS_BASE + MALIDP_DE_H_TIMINGS);
+
+ val = MALIDP500_DE_V_FRONTPORCH(mode->vfront_porch) |
+ MALIDP_DE_V_BACKPORCH(mode->vback_porch);
+ malidp_hw_write(hwdev, val, MALIDP500_TIMINGS_BASE + MALIDP_DE_V_TIMINGS);
+
+ val = MALIDP_DE_H_SYNCWIDTH(mode->hsync_len) |
+ MALIDP_DE_V_SYNCWIDTH(mode->vsync_len);
+ malidp_hw_write(hwdev, val, MALIDP500_TIMINGS_BASE + MALIDP_DE_SYNC_WIDTH);
+
+ val = MALIDP_DE_H_ACTIVE(mode->hactive) | MALIDP_DE_V_ACTIVE(mode->vactive);
+ malidp_hw_write(hwdev, val, MALIDP500_TIMINGS_BASE + MALIDP_DE_HV_ACTIVE);
+
+ if (mode->flags & DISPLAY_FLAGS_INTERLACED)
+ malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
+ else
+ malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
+}
+
+static int malidp500_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt)
+{
+ unsigned int depth;
+ int bpp;
+
+ /* RGB888 or BGR888 can't be rotated */
+ if ((fmt == DRM_FORMAT_RGB888) || (fmt == DRM_FORMAT_BGR888))
+ return -EINVAL;
+
+ /*
+ * Each layer needs enough rotation memory to fit 8 lines
+ * worth of pixel data. Required size is then:
+ * size = rotated_width * (bpp / 8) * 8;
+ */
+ drm_fb_get_bpp_depth(fmt, &depth, &bpp);
+
+ return w * bpp;
+}
+
+static int malidp550_query_hw(struct malidp_hw_device *hwdev)
+{
+ u32 conf = malidp_hw_read(hwdev, MALIDP550_CONFIG_ID);
+ u8 ln_size = (conf >> 4) & 0x3, rsize;
+
+ hwdev->min_line_size = 2;
+
+ switch (ln_size) {
+ case 0:
+ hwdev->max_line_size = SZ_2K;
+ /* two banks of 64KB for rotation memory */
+ rsize = 64;
+ break;
+ case 1:
+ hwdev->max_line_size = SZ_4K;
+ /* two banks of 128KB for rotation memory */
+ rsize = 128;
+ break;
+ case 2:
+ hwdev->max_line_size = 1280;
+ /* two banks of 40KB for rotation memory */
+ rsize = 40;
+ break;
+ case 3:
+ /* reserved value */
+ hwdev->max_line_size = 0;
+ return -EINVAL;
+ }
+
+ hwdev->rotation_memory[0] = hwdev->rotation_memory[1] = rsize * SZ_1K;
+ return 0;
+}
+
+static void malidp550_enter_config_mode(struct malidp_hw_device *hwdev)
+{
+ u32 status, count = 100;
+
+ malidp_hw_setbits(hwdev, MALIDP550_DC_CONFIG_REQ, MALIDP550_DC_CONTROL);
+ while (count) {
+ status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+ if ((status & MALIDP550_DC_CONFIG_REQ) == MALIDP550_DC_CONFIG_REQ)
+ break;
+ /*
+ * entering config mode can take as long as the rendering
+ * of a full frame, hence the long sleep here
+ */
+ usleep_range(1000, 10000);
+ count--;
+ }
+ WARN(count == 0, "timeout while entering config mode");
+}
+
+static void malidp550_leave_config_mode(struct malidp_hw_device *hwdev)
+{
+ u32 status, count = 100;
+
+ malidp_hw_clearbits(hwdev, MALIDP550_DC_CONFIG_REQ, MALIDP550_DC_CONTROL);
+ while (count) {
+ status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+ if ((status & MALIDP550_DC_CONFIG_REQ) == 0)
+ break;
+ usleep_range(100, 1000);
+ count--;
+ }
+ WARN(count == 0, "timeout while leaving config mode");
+}
+
+static bool malidp550_in_config_mode(struct malidp_hw_device *hwdev)
+{
+ u32 status;
+
+ status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+ if ((status & MALIDP550_DC_CONFIG_REQ) == MALIDP550_DC_CONFIG_REQ)
+ return true;
+
+ return false;
+}
+
+static void malidp550_set_config_valid(struct malidp_hw_device *hwdev)
+{
+ malidp_hw_setbits(hwdev, MALIDP_CFG_VALID, MALIDP550_CONFIG_VALID);
+}
+
+static void malidp550_modeset(struct malidp_hw_device *hwdev, struct videomode *mode)
+{
+ u32 val = MALIDP_DE_DEFAULT_PREFETCH_START;
+
+ malidp_hw_write(hwdev, val, MALIDP550_DE_CONTROL);
+ /*
+ * Mali-DP550 and Mali-DP650 encode the background color like this:
+ * - red @ MALIDP550_DE_BGND_COLOR[23:16]
+ * - green @ MALIDP550_DE_BGND_COLOR[15:8]
+ * - blue @ MALIDP550_DE_BGND_COLOR[7:0]
+ *
+ * We need to truncate the least significant 4 bits from the default
+ * MALIDP_BGND_COLOR_x values
+ */
+ val = (((MALIDP_BGND_COLOR_R >> 4) & 0xff) << 16) |
+ (((MALIDP_BGND_COLOR_G >> 4) & 0xff) << 8) |
+ ((MALIDP_BGND_COLOR_B >> 4) & 0xff);
+ malidp_hw_write(hwdev, val, MALIDP550_DE_BGND_COLOR);
+
+ val = MALIDP_DE_H_FRONTPORCH(mode->hfront_porch) |
+ MALIDP_DE_H_BACKPORCH(mode->hback_porch);
+ malidp_hw_write(hwdev, val, MALIDP550_TIMINGS_BASE + MALIDP_DE_H_TIMINGS);
+
+ val = MALIDP550_DE_V_FRONTPORCH(mode->vfront_porch) |
+ MALIDP_DE_V_BACKPORCH(mode->vback_porch);
+ malidp_hw_write(hwdev, val, MALIDP550_TIMINGS_BASE + MALIDP_DE_V_TIMINGS);
+
+ val = MALIDP_DE_H_SYNCWIDTH(mode->hsync_len) |
+ MALIDP_DE_V_SYNCWIDTH(mode->vsync_len);
+ if (mode->flags & DISPLAY_FLAGS_HSYNC_HIGH)
+ val |= MALIDP550_HSYNCPOL;
+ if (mode->flags & DISPLAY_FLAGS_VSYNC_HIGH)
+ val |= MALIDP550_VSYNCPOL;
+ malidp_hw_write(hwdev, val, MALIDP550_TIMINGS_BASE + MALIDP_DE_SYNC_WIDTH);
+
+ val = MALIDP_DE_H_ACTIVE(mode->hactive) | MALIDP_DE_V_ACTIVE(mode->vactive);
+ malidp_hw_write(hwdev, val, MALIDP550_TIMINGS_BASE + MALIDP_DE_HV_ACTIVE);
+
+ if (mode->flags & DISPLAY_FLAGS_INTERLACED)
+ malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
+ else
+ malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
+}
+
+static int malidp550_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt)
+{
+ u32 bytes_per_col;
+
+ /* raw RGB888 or BGR888 can't be rotated */
+ if ((fmt == DRM_FORMAT_RGB888) || (fmt == DRM_FORMAT_BGR888))
+ return -EINVAL;
+
+ switch (fmt) {
+ /* 8 lines at 4 bytes per pixel */
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_RGBA1010102:
+ case DRM_FORMAT_BGRA1010102:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_RGBA8888:
+ case DRM_FORMAT_BGRA8888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_BGRX8888:
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_BGR888:
+ /* 16 lines at 2 bytes per pixel */
+ case DRM_FORMAT_RGBA5551:
+ case DRM_FORMAT_ABGR1555:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_BGR565:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_YUYV:
+ bytes_per_col = 32;
+ break;
+ /* 16 lines at 1.5 bytes per pixel */
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_YUV420:
+ bytes_per_col = 24;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return w * bytes_per_col;
+}
+
+static int malidp650_query_hw(struct malidp_hw_device *hwdev)
+{
+ u32 conf = malidp_hw_read(hwdev, MALIDP550_CONFIG_ID);
+ u8 ln_size = (conf >> 4) & 0x3, rsize;
+
+ hwdev->min_line_size = 4;
+
+ switch (ln_size) {
+ case 0:
+ case 2:
+ /* reserved values */
+ hwdev->max_line_size = 0;
+ return -EINVAL;
+ case 1:
+ hwdev->max_line_size = SZ_4K;
+ /* two banks of 128KB for rotation memory */
+ rsize = 128;
+ break;
+ case 3:
+ hwdev->max_line_size = 2560;
+ /* two banks of 80KB for rotation memory */
+ rsize = 80;
+ }
+
+ hwdev->rotation_memory[0] = hwdev->rotation_memory[1] = rsize * SZ_1K;
+ return 0;
+}
+
+const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
+ [MALIDP_500] = {
+ .map = {
+ .se_base = MALIDP500_SE_BASE,
+ .dc_base = MALIDP500_DC_BASE,
+ .out_depth_base = MALIDP500_OUTPUT_DEPTH,
+ .features = 0, /* no CLEARIRQ register */
+ .n_layers = ARRAY_SIZE(malidp500_layers),
+ .layers = malidp500_layers,
+ .de_irq_map = {
+ .irq_mask = MALIDP_DE_IRQ_UNDERRUN |
+ MALIDP500_DE_IRQ_AXI_ERR |
+ MALIDP500_DE_IRQ_VSYNC |
+ MALIDP500_DE_IRQ_GLOBAL,
+ .vsync_irq = MALIDP500_DE_IRQ_VSYNC,
+ },
+ .se_irq_map = {
+ .irq_mask = MALIDP500_SE_IRQ_CONF_MODE,
+ .vsync_irq = 0,
+ },
+ .dc_irq_map = {
+ .irq_mask = MALIDP500_DE_IRQ_CONF_VALID,
+ .vsync_irq = MALIDP500_DE_IRQ_CONF_VALID,
+ },
+ .input_formats = malidp500_de_formats,
+ .n_input_formats = ARRAY_SIZE(malidp500_de_formats),
+ },
+ .query_hw = malidp500_query_hw,
+ .enter_config_mode = malidp500_enter_config_mode,
+ .leave_config_mode = malidp500_leave_config_mode,
+ .in_config_mode = malidp500_in_config_mode,
+ .set_config_valid = malidp500_set_config_valid,
+ .modeset = malidp500_modeset,
+ .rotmem_required = malidp500_rotmem_required,
+ },
+ [MALIDP_550] = {
+ .map = {
+ .se_base = MALIDP550_SE_BASE,
+ .dc_base = MALIDP550_DC_BASE,
+ .out_depth_base = MALIDP550_DE_OUTPUT_DEPTH,
+ .features = MALIDP_REGMAP_HAS_CLEARIRQ,
+ .n_layers = ARRAY_SIZE(malidp550_layers),
+ .layers = malidp550_layers,
+ .de_irq_map = {
+ .irq_mask = MALIDP_DE_IRQ_UNDERRUN |
+ MALIDP550_DE_IRQ_VSYNC,
+ .vsync_irq = MALIDP550_DE_IRQ_VSYNC,
+ },
+ .se_irq_map = {
+ .irq_mask = MALIDP550_SE_IRQ_EOW |
+ MALIDP550_SE_IRQ_AXI_ERR,
+ },
+ .dc_irq_map = {
+ .irq_mask = MALIDP550_DC_IRQ_CONF_VALID,
+ .vsync_irq = MALIDP550_DC_IRQ_CONF_VALID,
+ },
+ .input_formats = malidp550_de_formats,
+ .n_input_formats = ARRAY_SIZE(malidp550_de_formats),
+ },
+ .query_hw = malidp550_query_hw,
+ .enter_config_mode = malidp550_enter_config_mode,
+ .leave_config_mode = malidp550_leave_config_mode,
+ .in_config_mode = malidp550_in_config_mode,
+ .set_config_valid = malidp550_set_config_valid,
+ .modeset = malidp550_modeset,
+ .rotmem_required = malidp550_rotmem_required,
+ },
+ [MALIDP_650] = {
+ .map = {
+ .se_base = MALIDP550_SE_BASE,
+ .dc_base = MALIDP550_DC_BASE,
+ .out_depth_base = MALIDP550_DE_OUTPUT_DEPTH,
+ .features = MALIDP_REGMAP_HAS_CLEARIRQ,
+ .n_layers = ARRAY_SIZE(malidp550_layers),
+ .layers = malidp550_layers,
+ .de_irq_map = {
+ .irq_mask = MALIDP_DE_IRQ_UNDERRUN |
+ MALIDP650_DE_IRQ_DRIFT |
+ MALIDP550_DE_IRQ_VSYNC,
+ .vsync_irq = MALIDP550_DE_IRQ_VSYNC,
+ },
+ .se_irq_map = {
+ .irq_mask = MALIDP550_SE_IRQ_EOW |
+ MALIDP550_SE_IRQ_AXI_ERR,
+ },
+ .dc_irq_map = {
+ .irq_mask = MALIDP550_DC_IRQ_CONF_VALID,
+ .vsync_irq = MALIDP550_DC_IRQ_CONF_VALID,
+ },
+ .input_formats = malidp550_de_formats,
+ .n_input_formats = ARRAY_SIZE(malidp550_de_formats),
+ },
+ .query_hw = malidp650_query_hw,
+ .enter_config_mode = malidp550_enter_config_mode,
+ .leave_config_mode = malidp550_leave_config_mode,
+ .in_config_mode = malidp550_in_config_mode,
+ .set_config_valid = malidp550_set_config_valid,
+ .modeset = malidp550_modeset,
+ .rotmem_required = malidp550_rotmem_required,
+ },
+};
+
+u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map,
+ u8 layer_id, u32 format)
+{
+ unsigned int i;
+
+ for (i = 0; i < map->n_input_formats; i++) {
+ if (((map->input_formats[i].layer & layer_id) == layer_id) &&
+ (map->input_formats[i].format == format))
+ return map->input_formats[i].id;
+ }
+
+ return MALIDP_INVALID_FORMAT_ID;
+}
+
+static void malidp_hw_clear_irq(struct malidp_hw_device *hwdev, u8 block, u32 irq)
+{
+ u32 base = malidp_get_block_base(hwdev, block);
+
+ if (hwdev->map.features & MALIDP_REGMAP_HAS_CLEARIRQ)
+ malidp_hw_write(hwdev, irq, base + MALIDP_REG_CLEARIRQ);
+ else
+ malidp_hw_write(hwdev, irq, base + MALIDP_REG_STATUS);
+}
+
+static irqreturn_t malidp_de_irq(int irq, void *arg)
+{
+ struct drm_device *drm = arg;
+ struct malidp_drm *malidp = drm->dev_private;
+ struct malidp_hw_device *hwdev;
+ const struct malidp_irq_map *de;
+ u32 status, mask, dc_status;
+ irqreturn_t ret = IRQ_NONE;
+
+ if (!drm->dev_private)
+ return IRQ_HANDLED;
+
+ hwdev = malidp->dev;
+ de = &hwdev->map.de_irq_map;
+
+ /* first handle the config valid IRQ */
+ dc_status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+ if (dc_status & hwdev->map.dc_irq_map.vsync_irq) {
+ /* we have a page flip event */
+ atomic_set(&malidp->config_valid, 1);
+ malidp_hw_clear_irq(hwdev, MALIDP_DC_BLOCK, dc_status);
+ ret = IRQ_WAKE_THREAD;
+ }
+
+ status = malidp_hw_read(hwdev, MALIDP_REG_STATUS);
+ if (!(status & de->irq_mask))
+ return ret;
+
+ mask = malidp_hw_read(hwdev, MALIDP_REG_MASKIRQ);
+ status &= mask;
+ if (status & de->vsync_irq)
+ drm_crtc_handle_vblank(&malidp->crtc);
+
+ malidp_hw_clear_irq(hwdev, MALIDP_DE_BLOCK, status);
+
+ return (ret == IRQ_NONE) ? IRQ_HANDLED : ret;
+}
+
+static irqreturn_t malidp_de_irq_thread_handler(int irq, void *arg)
+{
+ struct drm_device *drm = arg;
+ struct malidp_drm *malidp = drm->dev_private;
+
+ wake_up(&malidp->wq);
+
+ return IRQ_HANDLED;
+}
+
+int malidp_de_irq_init(struct drm_device *drm, int irq)
+{
+ struct malidp_drm *malidp = drm->dev_private;
+ struct malidp_hw_device *hwdev = malidp->dev;
+ int ret;
+
+ /* ensure interrupts are disabled */
+ malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK, 0xffffffff);
+ malidp_hw_clear_irq(hwdev, MALIDP_DE_BLOCK, 0xffffffff);
+ malidp_hw_disable_irq(hwdev, MALIDP_DC_BLOCK, 0xffffffff);
+ malidp_hw_clear_irq(hwdev, MALIDP_DC_BLOCK, 0xffffffff);
+
+ ret = devm_request_threaded_irq(drm->dev, irq, malidp_de_irq,
+ malidp_de_irq_thread_handler,
+ IRQF_SHARED, "malidp-de", drm);
+ if (ret < 0) {
+ DRM_ERROR("failed to install DE IRQ handler\n");
+ return ret;
+ }
+
+ /* first enable the DC block IRQs */
+ malidp_hw_enable_irq(hwdev, MALIDP_DC_BLOCK,
+ hwdev->map.dc_irq_map.irq_mask);
+
+ /* now enable the DE block IRQs */
+ malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK,
+ hwdev->map.de_irq_map.irq_mask);
+
+ return 0;
+}
+
+void malidp_de_irq_fini(struct drm_device *drm)
+{
+ struct malidp_drm *malidp = drm->dev_private;
+ struct malidp_hw_device *hwdev = malidp->dev;
+
+ malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK,
+ hwdev->map.de_irq_map.irq_mask);
+ malidp_hw_disable_irq(hwdev, MALIDP_DC_BLOCK,
+ hwdev->map.dc_irq_map.irq_mask);
+}
+
+static irqreturn_t malidp_se_irq(int irq, void *arg)
+{
+ struct drm_device *drm = arg;
+ struct malidp_drm *malidp = drm->dev_private;
+ struct malidp_hw_device *hwdev = malidp->dev;
+ u32 status, mask;
+
+ status = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_STATUS);
+ if (!(status & hwdev->map.se_irq_map.irq_mask))
+ return IRQ_NONE;
+
+ mask = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_MASKIRQ);
+ status = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_STATUS);
+ status &= mask;
+ /* ToDo: status decoding and firing up of VSYNC and page flip events */
+
+ malidp_hw_clear_irq(hwdev, MALIDP_SE_BLOCK, status);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t malidp_se_irq_thread_handler(int irq, void *arg)
+{
+ return IRQ_HANDLED;
+}
+
+int malidp_se_irq_init(struct drm_device *drm, int irq)
+{
+ struct malidp_drm *malidp = drm->dev_private;
+ struct malidp_hw_device *hwdev = malidp->dev;
+ int ret;
+
+ /* ensure interrupts are disabled */
+ malidp_hw_disable_irq(hwdev, MALIDP_SE_BLOCK, 0xffffffff);
+ malidp_hw_clear_irq(hwdev, MALIDP_SE_BLOCK, 0xffffffff);
+
+ ret = devm_request_threaded_irq(drm->dev, irq, malidp_se_irq,
+ malidp_se_irq_thread_handler,
+ IRQF_SHARED, "malidp-se", drm);
+ if (ret < 0) {
+ DRM_ERROR("failed to install SE IRQ handler\n");
+ return ret;
+ }
+
+ malidp_hw_enable_irq(hwdev, MALIDP_SE_BLOCK,
+ hwdev->map.se_irq_map.irq_mask);
+
+ return 0;
+}
+
+void malidp_se_irq_fini(struct drm_device *drm)
+{
+ struct malidp_drm *malidp = drm->dev_private;
+ struct malidp_hw_device *hwdev = malidp->dev;
+
+ malidp_hw_disable_irq(hwdev, MALIDP_SE_BLOCK,
+ hwdev->map.se_irq_map.irq_mask);
+}
diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h
new file mode 100644
index 000000000..141743e9f
--- /dev/null
+++ b/drivers/gpu/drm/arm/malidp_hw.h
@@ -0,0 +1,241 @@
+/*
+ *
+ * (C) COPYRIGHT 2013-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * ARM Mali DP hardware manipulation routines.
+ */
+
+#ifndef __MALIDP_HW_H__
+#define __MALIDP_HW_H__
+
+#include <linux/bitops.h>
+#include "malidp_regs.h"
+
+struct videomode;
+struct clk;
+
+/* Mali DP IP blocks */
+enum {
+ MALIDP_DE_BLOCK = 0,
+ MALIDP_SE_BLOCK,
+ MALIDP_DC_BLOCK
+};
+
+/* Mali DP layer IDs */
+enum {
+ DE_VIDEO1 = BIT(0),
+ DE_GRAPHICS1 = BIT(1),
+ DE_GRAPHICS2 = BIT(2), /* used only in DP500 */
+ DE_VIDEO2 = BIT(3),
+ DE_SMART = BIT(4),
+};
+
+struct malidp_input_format {
+ u32 format; /* DRM fourcc */
+ u8 layer; /* bitmask of layers supporting it */
+ u8 id; /* used internally */
+};
+
+#define MALIDP_INVALID_FORMAT_ID 0xff
+
+/*
+ * hide the differences between register maps
+ * by using a common structure to hold the
+ * base register offsets
+ */
+
+struct malidp_irq_map {
+ u32 irq_mask; /* mask of IRQs that can be enabled in the block */
+ u32 vsync_irq; /* IRQ bit used for signaling during VSYNC */
+};
+
+struct malidp_layer {
+ u16 id; /* layer ID */
+ u16 base; /* address offset for the register bank */
+ u16 ptr; /* address offset for the pointer register */
+};
+
+/* regmap features */
+#define MALIDP_REGMAP_HAS_CLEARIRQ (1 << 0)
+
+struct malidp_hw_regmap {
+ /* address offset of the DE register bank */
+ /* is always 0x0000 */
+ /* address offset of the SE registers bank */
+ const u16 se_base;
+ /* address offset of the DC registers bank */
+ const u16 dc_base;
+
+ /* address offset for the output depth register */
+ const u16 out_depth_base;
+
+ /* bitmap with register map features */
+ const u8 features;
+
+ /* list of supported layers */
+ const u8 n_layers;
+ const struct malidp_layer *layers;
+
+ const struct malidp_irq_map de_irq_map;
+ const struct malidp_irq_map se_irq_map;
+ const struct malidp_irq_map dc_irq_map;
+
+ /* list of supported input formats for each layer */
+ const struct malidp_input_format *input_formats;
+ const u8 n_input_formats;
+};
+
+struct malidp_hw_device {
+ const struct malidp_hw_regmap map;
+ void __iomem *regs;
+
+ /* APB clock */
+ struct clk *pclk;
+ /* AXI clock */
+ struct clk *aclk;
+ /* main clock for display core */
+ struct clk *mclk;
+ /* pixel clock for display core */
+ struct clk *pxlclk;
+
+ /*
+ * Validate the driver instance against the hardware bits
+ */
+ int (*query_hw)(struct malidp_hw_device *hwdev);
+
+ /*
+ * Set the hardware into config mode, ready to accept mode changes
+ */
+ void (*enter_config_mode)(struct malidp_hw_device *hwdev);
+
+ /*
+ * Tell hardware to exit configuration mode
+ */
+ void (*leave_config_mode)(struct malidp_hw_device *hwdev);
+
+ /*
+ * Query if hardware is in configuration mode
+ */
+ bool (*in_config_mode)(struct malidp_hw_device *hwdev);
+
+ /*
+ * Set configuration valid flag for hardware parameters that can
+ * be changed outside the configuration mode. Hardware will use
+ * the new settings when config valid is set after the end of the
+ * current buffer scanout
+ */
+ void (*set_config_valid)(struct malidp_hw_device *hwdev);
+
+ /*
+ * Set a new mode in hardware. Requires the hardware to be in
+ * configuration mode before this function is called.
+ */
+ void (*modeset)(struct malidp_hw_device *hwdev, struct videomode *m);
+
+ /*
+ * Calculate the required rotation memory given the active area
+ * and the buffer format.
+ */
+ int (*rotmem_required)(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt);
+
+ u8 features;
+
+ u8 min_line_size;
+ u16 max_line_size;
+
+ /* size of memory used for rotating layers, up to two banks available */
+ u32 rotation_memory[2];
+};
+
+/* Supported variants of the hardware */
+enum {
+ MALIDP_500 = 0,
+ MALIDP_550,
+ MALIDP_650,
+ /* keep the next entry last */
+ MALIDP_MAX_DEVICES
+};
+
+extern const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES];
+
+static inline u32 malidp_hw_read(struct malidp_hw_device *hwdev, u32 reg)
+{
+ return readl(hwdev->regs + reg);
+}
+
+static inline void malidp_hw_write(struct malidp_hw_device *hwdev,
+ u32 value, u32 reg)
+{
+ writel(value, hwdev->regs + reg);
+}
+
+static inline void malidp_hw_setbits(struct malidp_hw_device *hwdev,
+ u32 mask, u32 reg)
+{
+ u32 data = malidp_hw_read(hwdev, reg);
+
+ data |= mask;
+ malidp_hw_write(hwdev, data, reg);
+}
+
+static inline void malidp_hw_clearbits(struct malidp_hw_device *hwdev,
+ u32 mask, u32 reg)
+{
+ u32 data = malidp_hw_read(hwdev, reg);
+
+ data &= ~mask;
+ malidp_hw_write(hwdev, data, reg);
+}
+
+static inline u32 malidp_get_block_base(struct malidp_hw_device *hwdev,
+ u8 block)
+{
+ switch (block) {
+ case MALIDP_SE_BLOCK:
+ return hwdev->map.se_base;
+ case MALIDP_DC_BLOCK:
+ return hwdev->map.dc_base;
+ }
+
+ return 0;
+}
+
+static inline void malidp_hw_disable_irq(struct malidp_hw_device *hwdev,
+ u8 block, u32 irq)
+{
+ u32 base = malidp_get_block_base(hwdev, block);
+
+ malidp_hw_clearbits(hwdev, irq, base + MALIDP_REG_MASKIRQ);
+}
+
+static inline void malidp_hw_enable_irq(struct malidp_hw_device *hwdev,
+ u8 block, u32 irq)
+{
+ u32 base = malidp_get_block_base(hwdev, block);
+
+ malidp_hw_setbits(hwdev, irq, base + MALIDP_REG_MASKIRQ);
+}
+
+int malidp_de_irq_init(struct drm_device *drm, int irq);
+void malidp_de_irq_fini(struct drm_device *drm);
+int malidp_se_irq_init(struct drm_device *drm, int irq);
+void malidp_se_irq_fini(struct drm_device *drm);
+
+u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map,
+ u8 layer_id, u32 format);
+
+/*
+ * background color components are defined as 12bits values,
+ * they will be shifted right when stored on hardware that
+ * supports only 8bits per channel
+ */
+#define MALIDP_BGND_COLOR_R 0x000
+#define MALIDP_BGND_COLOR_G 0x000
+#define MALIDP_BGND_COLOR_B 0x000
+
+#endif /* __MALIDP_HW_H__ */
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
new file mode 100644
index 000000000..725098d61
--- /dev/null
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -0,0 +1,298 @@
+/*
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ * Author: Liviu Dudau <Liviu.Dudau@arm.com>
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * ARM Mali DP plane manipulation routines.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include "malidp_hw.h"
+#include "malidp_drv.h"
+
+/* Layer specific register offsets */
+#define MALIDP_LAYER_FORMAT 0x000
+#define MALIDP_LAYER_CONTROL 0x004
+#define LAYER_ENABLE (1 << 0)
+#define LAYER_ROT_OFFSET 8
+#define LAYER_H_FLIP (1 << 10)
+#define LAYER_V_FLIP (1 << 11)
+#define LAYER_ROT_MASK (0xf << 8)
+#define MALIDP_LAYER_SIZE 0x00c
+#define LAYER_H_VAL(x) (((x) & 0x1fff) << 0)
+#define LAYER_V_VAL(x) (((x) & 0x1fff) << 16)
+#define MALIDP_LAYER_COMP_SIZE 0x010
+#define MALIDP_LAYER_OFFSET 0x014
+#define MALIDP_LAYER_STRIDE 0x018
+
+static void malidp_de_plane_destroy(struct drm_plane *plane)
+{
+ struct malidp_plane *mp = to_malidp_plane(plane);
+
+ if (mp->base.fb)
+ drm_framebuffer_unreference(mp->base.fb);
+
+ drm_plane_helper_disable(plane);
+ drm_plane_cleanup(plane);
+ devm_kfree(plane->dev->dev, mp);
+}
+
+struct drm_plane_state *malidp_duplicate_plane_state(struct drm_plane *plane)
+{
+ struct malidp_plane_state *state, *m_state;
+
+ if (!plane->state)
+ return NULL;
+
+ state = kmalloc(sizeof(*state), GFP_KERNEL);
+ if (state) {
+ m_state = to_malidp_plane_state(plane->state);
+ __drm_atomic_helper_plane_duplicate_state(plane, &state->base);
+ state->rotmem_size = m_state->rotmem_size;
+ }
+
+ return &state->base;
+}
+
+void malidp_destroy_plane_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct malidp_plane_state *m_state = to_malidp_plane_state(state);
+
+ __drm_atomic_helper_plane_destroy_state(state);
+ kfree(m_state);
+}
+
+static const struct drm_plane_funcs malidp_de_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = malidp_de_plane_destroy,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = malidp_duplicate_plane_state,
+ .atomic_destroy_state = malidp_destroy_plane_state,
+};
+
+static int malidp_de_plane_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct malidp_plane *mp = to_malidp_plane(plane);
+ struct malidp_plane_state *ms = to_malidp_plane_state(state);
+ u8 format_id;
+ u32 src_w, src_h;
+
+ if (!state->crtc || !state->fb)
+ return 0;
+
+ format_id = malidp_hw_get_format_id(&mp->hwdev->map, mp->layer->id,
+ state->fb->pixel_format);
+ if (format_id == MALIDP_INVALID_FORMAT_ID)
+ return -EINVAL;
+
+ src_w = state->src_w >> 16;
+ src_h = state->src_h >> 16;
+
+ if ((state->crtc_w > mp->hwdev->max_line_size) ||
+ (state->crtc_h > mp->hwdev->max_line_size) ||
+ (state->crtc_w < mp->hwdev->min_line_size) ||
+ (state->crtc_h < mp->hwdev->min_line_size) ||
+ (state->crtc_w != src_w) || (state->crtc_h != src_h))
+ return -EINVAL;
+
+ /* packed RGB888 / BGR888 can't be rotated or flipped */
+ if (state->rotation != BIT(DRM_ROTATE_0) &&
+ (state->fb->pixel_format == DRM_FORMAT_RGB888 ||
+ state->fb->pixel_format == DRM_FORMAT_BGR888))
+ return -EINVAL;
+
+ ms->rotmem_size = 0;
+ if (state->rotation & MALIDP_ROTATED_MASK) {
+ int val;
+
+ val = mp->hwdev->rotmem_required(mp->hwdev, state->crtc_h,
+ state->crtc_w,
+ state->fb->pixel_format);
+ if (val < 0)
+ return val;
+
+ ms->rotmem_size = val;
+ }
+
+ return 0;
+}
+
+static void malidp_de_plane_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct drm_gem_cma_object *obj;
+ struct malidp_plane *mp;
+ const struct malidp_hw_regmap *map;
+ u8 format_id;
+ u16 ptr;
+ u32 format, src_w, src_h, dest_w, dest_h, val = 0;
+ int num_planes, i;
+
+ mp = to_malidp_plane(plane);
+
+ map = &mp->hwdev->map;
+ format = plane->state->fb->pixel_format;
+ format_id = malidp_hw_get_format_id(map, mp->layer->id, format);
+ num_planes = drm_format_num_planes(format);
+
+ /* convert src values from Q16 fixed point to integer */
+ src_w = plane->state->src_w >> 16;
+ src_h = plane->state->src_h >> 16;
+ if (plane->state->rotation & MALIDP_ROTATED_MASK) {
+ dest_w = plane->state->crtc_h;
+ dest_h = plane->state->crtc_w;
+ } else {
+ dest_w = plane->state->crtc_w;
+ dest_h = plane->state->crtc_h;
+ }
+
+ malidp_hw_write(mp->hwdev, format_id, mp->layer->base);
+
+ for (i = 0; i < num_planes; i++) {
+ /* calculate the offset for the layer's plane registers */
+ ptr = mp->layer->ptr + (i << 4);
+
+ obj = drm_fb_cma_get_gem_obj(plane->state->fb, i);
+ malidp_hw_write(mp->hwdev, lower_32_bits(obj->paddr), ptr);
+ malidp_hw_write(mp->hwdev, upper_32_bits(obj->paddr), ptr + 4);
+ malidp_hw_write(mp->hwdev, plane->state->fb->pitches[i],
+ mp->layer->base + MALIDP_LAYER_STRIDE);
+ }
+
+ malidp_hw_write(mp->hwdev, LAYER_H_VAL(src_w) | LAYER_V_VAL(src_h),
+ mp->layer->base + MALIDP_LAYER_SIZE);
+
+ malidp_hw_write(mp->hwdev, LAYER_H_VAL(dest_w) | LAYER_V_VAL(dest_h),
+ mp->layer->base + MALIDP_LAYER_COMP_SIZE);
+
+ malidp_hw_write(mp->hwdev, LAYER_H_VAL(plane->state->crtc_x) |
+ LAYER_V_VAL(plane->state->crtc_y),
+ mp->layer->base + MALIDP_LAYER_OFFSET);
+
+ /* first clear the rotation bits in the register */
+ malidp_hw_clearbits(mp->hwdev, LAYER_ROT_MASK,
+ mp->layer->base + MALIDP_LAYER_CONTROL);
+
+ /* setup the rotation and axis flip bits */
+ if (plane->state->rotation & DRM_ROTATE_MASK)
+ val = ilog2(plane->state->rotation & DRM_ROTATE_MASK) << LAYER_ROT_OFFSET;
+ if (plane->state->rotation & BIT(DRM_REFLECT_X))
+ val |= LAYER_V_FLIP;
+ if (plane->state->rotation & BIT(DRM_REFLECT_Y))
+ val |= LAYER_H_FLIP;
+
+ /* set the 'enable layer' bit */
+ val |= LAYER_ENABLE;
+
+ malidp_hw_setbits(mp->hwdev, val,
+ mp->layer->base + MALIDP_LAYER_CONTROL);
+}
+
+static void malidp_de_plane_disable(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct malidp_plane *mp = to_malidp_plane(plane);
+
+ malidp_hw_clearbits(mp->hwdev, LAYER_ENABLE,
+ mp->layer->base + MALIDP_LAYER_CONTROL);
+}
+
+static const struct drm_plane_helper_funcs malidp_de_plane_helper_funcs = {
+ .atomic_check = malidp_de_plane_check,
+ .atomic_update = malidp_de_plane_update,
+ .atomic_disable = malidp_de_plane_disable,
+};
+
+int malidp_de_planes_init(struct drm_device *drm)
+{
+ struct malidp_drm *malidp = drm->dev_private;
+ const struct malidp_hw_regmap *map = &malidp->dev->map;
+ struct malidp_plane *plane = NULL;
+ enum drm_plane_type plane_type;
+ unsigned long crtcs = 1 << drm->mode_config.num_crtc;
+ u32 *formats;
+ int ret, i, j, n;
+
+ formats = kcalloc(map->n_input_formats, sizeof(*formats), GFP_KERNEL);
+ if (!formats) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ for (i = 0; i < map->n_layers; i++) {
+ u8 id = map->layers[i].id;
+
+ plane = kzalloc(sizeof(*plane), GFP_KERNEL);
+ if (!plane) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ /* build the list of DRM supported formats based on the map */
+ for (n = 0, j = 0; j < map->n_input_formats; j++) {
+ if ((map->input_formats[j].layer & id) == id)
+ formats[n++] = map->input_formats[j].format;
+ }
+
+ plane_type = (i == 0) ? DRM_PLANE_TYPE_PRIMARY :
+ DRM_PLANE_TYPE_OVERLAY;
+ ret = drm_universal_plane_init(drm, &plane->base, crtcs,
+ &malidp_de_plane_funcs, formats,
+ n, plane_type, NULL);
+ if (ret < 0)
+ goto cleanup;
+
+ if (!drm->mode_config.rotation_property) {
+ unsigned long flags = BIT(DRM_ROTATE_0) |
+ BIT(DRM_ROTATE_90) |
+ BIT(DRM_ROTATE_180) |
+ BIT(DRM_ROTATE_270) |
+ BIT(DRM_REFLECT_X) |
+ BIT(DRM_REFLECT_Y);
+ drm->mode_config.rotation_property =
+ drm_mode_create_rotation_property(drm, flags);
+ }
+ /* SMART layer can't be rotated */
+ if (drm->mode_config.rotation_property && (id != DE_SMART))
+ drm_object_attach_property(&plane->base.base,
+ drm->mode_config.rotation_property,
+ BIT(DRM_ROTATE_0));
+
+ drm_plane_helper_add(&plane->base,
+ &malidp_de_plane_helper_funcs);
+ plane->hwdev = malidp->dev;
+ plane->layer = &map->layers[i];
+ }
+
+ kfree(formats);
+
+ return 0;
+
+cleanup:
+ malidp_de_planes_destroy(drm);
+ kfree(formats);
+
+ return ret;
+}
+
+void malidp_de_planes_destroy(struct drm_device *drm)
+{
+ struct drm_plane *p, *pt;
+
+ list_for_each_entry_safe(p, pt, &drm->mode_config.plane_list, head) {
+ drm_plane_cleanup(p);
+ kfree(p);
+ }
+}
diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h
new file mode 100644
index 000000000..73fecb38f
--- /dev/null
+++ b/drivers/gpu/drm/arm/malidp_regs.h
@@ -0,0 +1,172 @@
+/*
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ * Author: Liviu Dudau <Liviu.Dudau@arm.com>
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * ARM Mali DP500/DP550/DP650 registers definition.
+ */
+
+#ifndef __MALIDP_REGS_H__
+#define __MALIDP_REGS_H__
+
+/*
+ * abbreviations used:
+ * - DC - display core (general settings)
+ * - DE - display engine
+ * - SE - scaling engine
+ */
+
+/* interrupt bit masks */
+#define MALIDP_DE_IRQ_UNDERRUN (1 << 0)
+
+#define MALIDP500_DE_IRQ_AXI_ERR (1 << 4)
+#define MALIDP500_DE_IRQ_VSYNC (1 << 5)
+#define MALIDP500_DE_IRQ_PROG_LINE (1 << 6)
+#define MALIDP500_DE_IRQ_SATURATION (1 << 7)
+#define MALIDP500_DE_IRQ_CONF_VALID (1 << 8)
+#define MALIDP500_DE_IRQ_CONF_MODE (1 << 11)
+#define MALIDP500_DE_IRQ_CONF_ACTIVE (1 << 17)
+#define MALIDP500_DE_IRQ_PM_ACTIVE (1 << 18)
+#define MALIDP500_DE_IRQ_TESTMODE_ACTIVE (1 << 19)
+#define MALIDP500_DE_IRQ_FORCE_BLNK_ACTIVE (1 << 24)
+#define MALIDP500_DE_IRQ_AXI_BUSY (1 << 28)
+#define MALIDP500_DE_IRQ_GLOBAL (1 << 31)
+#define MALIDP500_SE_IRQ_CONF_MODE (1 << 0)
+#define MALIDP500_SE_IRQ_CONF_VALID (1 << 4)
+#define MALIDP500_SE_IRQ_INIT_BUSY (1 << 5)
+#define MALIDP500_SE_IRQ_AXI_ERROR (1 << 8)
+#define MALIDP500_SE_IRQ_OVERRUN (1 << 9)
+#define MALIDP500_SE_IRQ_PROG_LINE1 (1 << 12)
+#define MALIDP500_SE_IRQ_PROG_LINE2 (1 << 13)
+#define MALIDP500_SE_IRQ_CONF_ACTIVE (1 << 17)
+#define MALIDP500_SE_IRQ_PM_ACTIVE (1 << 18)
+#define MALIDP500_SE_IRQ_AXI_BUSY (1 << 28)
+#define MALIDP500_SE_IRQ_GLOBAL (1 << 31)
+
+#define MALIDP550_DE_IRQ_SATURATION (1 << 8)
+#define MALIDP550_DE_IRQ_VSYNC (1 << 12)
+#define MALIDP550_DE_IRQ_PROG_LINE (1 << 13)
+#define MALIDP550_DE_IRQ_AXI_ERR (1 << 16)
+#define MALIDP550_SE_IRQ_EOW (1 << 0)
+#define MALIDP550_SE_IRQ_AXI_ERR (1 << 16)
+#define MALIDP550_DC_IRQ_CONF_VALID (1 << 0)
+#define MALIDP550_DC_IRQ_CONF_MODE (1 << 4)
+#define MALIDP550_DC_IRQ_CONF_ACTIVE (1 << 16)
+#define MALIDP550_DC_IRQ_DE (1 << 20)
+#define MALIDP550_DC_IRQ_SE (1 << 24)
+
+#define MALIDP650_DE_IRQ_DRIFT (1 << 4)
+
+/* bit masks that are common between products */
+#define MALIDP_CFG_VALID (1 << 0)
+#define MALIDP_DISP_FUNC_ILACED (1 << 8)
+
+/* register offsets for IRQ management */
+#define MALIDP_REG_STATUS 0x00000
+#define MALIDP_REG_SETIRQ 0x00004
+#define MALIDP_REG_MASKIRQ 0x00008
+#define MALIDP_REG_CLEARIRQ 0x0000c
+
+/* register offsets */
+#define MALIDP_DE_CORE_ID 0x00018
+#define MALIDP_DE_DISPLAY_FUNC 0x00020
+
+/* these offsets are relative to MALIDP5x0_TIMINGS_BASE */
+#define MALIDP_DE_H_TIMINGS 0x0
+#define MALIDP_DE_V_TIMINGS 0x4
+#define MALIDP_DE_SYNC_WIDTH 0x8
+#define MALIDP_DE_HV_ACTIVE 0xc
+
+/* macros to set values into registers */
+#define MALIDP_DE_H_FRONTPORCH(x) (((x) & 0xfff) << 0)
+#define MALIDP_DE_H_BACKPORCH(x) (((x) & 0x3ff) << 16)
+#define MALIDP500_DE_V_FRONTPORCH(x) (((x) & 0xff) << 0)
+#define MALIDP550_DE_V_FRONTPORCH(x) (((x) & 0xfff) << 0)
+#define MALIDP_DE_V_BACKPORCH(x) (((x) & 0xff) << 16)
+#define MALIDP_DE_H_SYNCWIDTH(x) (((x) & 0x3ff) << 0)
+#define MALIDP_DE_V_SYNCWIDTH(x) (((x) & 0xff) << 16)
+#define MALIDP_DE_H_ACTIVE(x) (((x) & 0x1fff) << 0)
+#define MALIDP_DE_V_ACTIVE(x) (((x) & 0x1fff) << 16)
+
+/* register offsets and bits specific to DP500 */
+#define MALIDP500_DC_BASE 0x00000
+#define MALIDP500_DC_CONTROL 0x0000c
+#define MALIDP500_DC_CONFIG_REQ (1 << 17)
+#define MALIDP500_HSYNCPOL (1 << 20)
+#define MALIDP500_VSYNCPOL (1 << 21)
+#define MALIDP500_DC_CLEAR_MASK 0x300fff
+#define MALIDP500_DE_LINE_COUNTER 0x00010
+#define MALIDP500_DE_AXI_CONTROL 0x00014
+#define MALIDP500_DE_SECURE_CTRL 0x0001c
+#define MALIDP500_DE_CHROMA_KEY 0x00024
+#define MALIDP500_TIMINGS_BASE 0x00028
+
+#define MALIDP500_CONFIG_3D 0x00038
+#define MALIDP500_BGND_COLOR 0x0003c
+#define MALIDP500_OUTPUT_DEPTH 0x00044
+#define MALIDP500_YUV_RGB_COEF 0x00048
+#define MALIDP500_COLOR_ADJ_COEF 0x00078
+#define MALIDP500_COEF_TABLE_ADDR 0x000a8
+#define MALIDP500_COEF_TABLE_DATA 0x000ac
+#define MALIDP500_DE_LV_BASE 0x00100
+#define MALIDP500_DE_LV_PTR_BASE 0x00124
+#define MALIDP500_DE_LG1_BASE 0x00200
+#define MALIDP500_DE_LG1_PTR_BASE 0x0021c
+#define MALIDP500_DE_LG2_BASE 0x00300
+#define MALIDP500_DE_LG2_PTR_BASE 0x0031c
+#define MALIDP500_SE_BASE 0x00c00
+#define MALIDP500_SE_PTR_BASE 0x00e0c
+#define MALIDP500_DC_IRQ_BASE 0x00f00
+#define MALIDP500_CONFIG_VALID 0x00f00
+#define MALIDP500_CONFIG_ID 0x00fd4
+
+/* register offsets and bits specific to DP550/DP650 */
+#define MALIDP550_DE_CONTROL 0x00010
+#define MALIDP550_DE_LINE_COUNTER 0x00014
+#define MALIDP550_DE_AXI_CONTROL 0x00018
+#define MALIDP550_DE_QOS 0x0001c
+#define MALIDP550_TIMINGS_BASE 0x00030
+#define MALIDP550_HSYNCPOL (1 << 12)
+#define MALIDP550_VSYNCPOL (1 << 28)
+
+#define MALIDP550_DE_DISP_SIDEBAND 0x00040
+#define MALIDP550_DE_BGND_COLOR 0x00044
+#define MALIDP550_DE_OUTPUT_DEPTH 0x0004c
+#define MALIDP550_DE_COLOR_COEF 0x00050
+#define MALIDP550_DE_COEF_TABLE_ADDR 0x00080
+#define MALIDP550_DE_COEF_TABLE_DATA 0x00084
+#define MALIDP550_DE_LV1_BASE 0x00100
+#define MALIDP550_DE_LV1_PTR_BASE 0x00124
+#define MALIDP550_DE_LV2_BASE 0x00200
+#define MALIDP550_DE_LV2_PTR_BASE 0x00224
+#define MALIDP550_DE_LG_BASE 0x00300
+#define MALIDP550_DE_LG_PTR_BASE 0x0031c
+#define MALIDP550_DE_LS_BASE 0x00400
+#define MALIDP550_DE_LS_PTR_BASE 0x0042c
+#define MALIDP550_DE_PERF_BASE 0x00500
+#define MALIDP550_SE_BASE 0x08000
+#define MALIDP550_DC_BASE 0x0c000
+#define MALIDP550_DC_CONTROL 0x0c010
+#define MALIDP550_DC_CONFIG_REQ (1 << 16)
+#define MALIDP550_CONFIG_VALID 0x0c014
+#define MALIDP550_CONFIG_ID 0x0ffd4
+
+/*
+ * Starting with DP550 the register map blocks has been standardised to the
+ * following layout:
+ *
+ * Offset Block registers
+ * 0x00000 Display Engine
+ * 0x08000 Scaling Engine
+ * 0x0c000 Display Core
+ * 0x10000 Secure control
+ *
+ * The old DP500 IP mixes some DC with the DE registers, hence the need
+ * for a mapping structure.
+ */
+
+#endif /* __MALIDP_REGS_H__ */
diff --git a/drivers/gpu/drm/armada/Kconfig b/drivers/gpu/drm/armada/Kconfig
index eb773e9af..15f3ecfb1 100644
--- a/drivers/gpu/drm/armada/Kconfig
+++ b/drivers/gpu/drm/armada/Kconfig
@@ -1,11 +1,7 @@
config DRM_ARMADA
tristate "DRM support for Marvell Armada SoCs"
depends on DRM && HAVE_CLK && ARM
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
help
Support the "LCD" controllers found on the Marvell Armada 510
devices. There are two controllers on the device, each controller
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 3130aa8bc..2f58e9e2a 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -199,7 +199,7 @@ static void armada_drm_plane_work_run(struct armada_crtc *dcrtc,
/* Handle any pending frame work. */
if (work) {
work->fn(dcrtc, plane, work);
- drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
+ drm_crtc_vblank_put(&dcrtc->crtc);
}
wake_up(&plane->frame_wait);
@@ -210,7 +210,7 @@ int armada_drm_plane_work_queue(struct armada_crtc *dcrtc,
{
int ret;
- ret = drm_vblank_get(dcrtc->crtc.dev, dcrtc->num);
+ ret = drm_crtc_vblank_get(&dcrtc->crtc);
if (ret) {
DRM_ERROR("failed to acquire vblank counter\n");
return ret;
@@ -218,7 +218,7 @@ int armada_drm_plane_work_queue(struct armada_crtc *dcrtc,
ret = cmpxchg(&plane->work, NULL, work) ? -EBUSY : 0;
if (ret)
- drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
+ drm_crtc_vblank_put(&dcrtc->crtc);
return ret;
}
@@ -234,7 +234,7 @@ struct armada_plane_work *armada_drm_plane_work_cancel(
struct armada_plane_work *work = xchg(&plane->work, NULL);
if (work)
- drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
+ drm_crtc_vblank_put(&dcrtc->crtc);
return work;
}
@@ -260,7 +260,7 @@ static void armada_drm_crtc_complete_frame_work(struct armada_crtc *dcrtc,
if (fwork->event) {
spin_lock_irqsave(&dev->event_lock, flags);
- drm_send_vblank_event(dev, dcrtc->num, fwork->event);
+ drm_crtc_send_vblank_event(&dcrtc->crtc, fwork->event);
spin_unlock_irqrestore(&dev->event_lock, flags);
}
@@ -410,7 +410,7 @@ static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
DRM_ERROR("graphics underflow on crtc %u\n", dcrtc->num);
if (stat & VSYNC_IRQ)
- drm_handle_vblank(dcrtc->crtc.dev, dcrtc->num);
+ drm_crtc_handle_vblank(&dcrtc->crtc);
spin_lock(&dcrtc->irq_lock);
ovl_plane = dcrtc->plane;
@@ -592,9 +592,9 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
if (interlaced ^ dcrtc->interlaced) {
if (adj->flags & DRM_MODE_FLAG_INTERLACE)
- drm_vblank_get(dcrtc->crtc.dev, dcrtc->num);
+ drm_crtc_vblank_get(&dcrtc->crtc);
else
- drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
+ drm_crtc_vblank_put(&dcrtc->crtc);
dcrtc->interlaced = interlaced;
}
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 439824a61..f5ebdd681 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -189,7 +189,6 @@ static struct drm_driver armada_drm_driver = {
.load = armada_drm_load,
.lastclose = armada_drm_lastclose,
.unload = armada_drm_unload,
- .set_busid = drm_platform_set_busid,
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = armada_drm_enable_vblank,
.disable_vblank = armada_drm_disable_vblank,
@@ -197,7 +196,7 @@ static struct drm_driver armada_drm_driver = {
.debugfs_init = armada_drm_debugfs_init,
.debugfs_cleanup = armada_drm_debugfs_cleanup,
#endif
- .gem_free_object = armada_gem_free_object,
+ .gem_free_object_unlocked = armada_gem_free_object,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = armada_gem_prime_export,
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 88e7fc797..cb8f0347b 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -231,7 +231,7 @@ struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
obj->dev_addr = DMA_ERROR_CODE;
- mapping = file_inode(obj->obj.filp)->i_mapping;
+ mapping = obj->obj.filp->f_mapping;
mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
@@ -441,7 +441,7 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
if (sg_alloc_table(sgt, count, GFP_KERNEL))
goto free_sgt;
- mapping = file_inode(dobj->obj.filp)->i_mapping;
+ mapping = dobj->obj.filp->f_mapping;
for_each_sg(sgt->sgl, sg, count, i) {
struct page *page;
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index 148e8a42b..1ee707ef6 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -121,6 +121,7 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
int ret;
ret = drm_plane_helper_check_update(plane, crtc, fb, &src, &dest, &clip,
+ BIT(DRM_ROTATE_0),
0, INT_MAX, true, false, &visible);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/ast/Kconfig b/drivers/gpu/drm/ast/Kconfig
index 8a784c460..15f6ce7ac 100644
--- a/drivers/gpu/drm/ast/Kconfig
+++ b/drivers/gpu/drm/ast/Kconfig
@@ -2,11 +2,7 @@ config DRM_AST
tristate "AST server chips"
depends on DRM && PCI
select DRM_TTM
- select FB_SYS_COPYAREA
- select FB_SYS_FILLRECT
- select FB_SYS_IMAGEBLIT
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_TTM
help
Say yes for experimental AST GPU driver. Do not enable
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index fcd9c0714..f54afd211 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -209,7 +209,7 @@ static struct drm_driver driver = {
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
- .gem_free_object = ast_gem_free_object,
+ .gem_free_object_unlocked = ast_gem_free_object,
.dumb_create = ast_dumb_create,
.dumb_map_offset = ast_dumb_mmap_offset,
.dumb_destroy = drm_gem_dumb_destroy,
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index 5320f8c57..c017a9330 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -167,12 +167,9 @@ static int astfb_create_object(struct ast_fbdev *afbdev,
struct drm_gem_object **gobj_p)
{
struct drm_device *dev = afbdev->helper.dev;
- u32 bpp, depth;
u32 size;
struct drm_gem_object *gobj;
-
int ret = 0;
- drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
size = mode_cmd->pitches[0] * mode_cmd->height;
ret = ast_gem_create(dev, size, true, &gobj);
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 7bc3aa6dd..904beaa93 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -295,9 +295,8 @@ static int ast_get_dram_info(struct drm_device *dev)
static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct ast_framebuffer *ast_fb = to_ast_framebuffer(fb);
- if (ast_fb->obj)
- drm_gem_object_unreference_unlocked(ast_fb->obj);
+ drm_gem_object_unreference_unlocked(ast_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(fb);
}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index c33792260..5957c3e65 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -624,19 +624,21 @@ static void ast_crtc_reset(struct drm_crtc *crtc)
}
-static void ast_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t start, uint32_t size)
+static int ast_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t size)
{
struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
- int end = (start + size > 256) ? 256 : start + size, i;
+ int i;
/* userspace palettes are always correct as is */
- for (i = start; i < end; i++) {
+ for (i = 0; i < size; i++) {
ast_crtc->lut_r[i] = red[i] >> 8;
ast_crtc->lut_g[i] = green[i] >> 8;
ast_crtc->lut_b[i] = blue[i] >> 8;
}
ast_crtc_load_lut(crtc);
+
+ return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 59f2f93b6..b29a41218 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -186,17 +186,6 @@ static void ast_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *
{
}
-static int ast_bo_move(struct ttm_buffer_object *bo,
- bool evict, bool interruptible,
- bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
-{
- int r;
- r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
- return r;
-}
-
-
static void ast_ttm_backend_destroy(struct ttm_tt *tt)
{
ttm_tt_fini(tt);
@@ -241,7 +230,7 @@ struct ttm_bo_driver ast_bo_driver = {
.ttm_tt_unpopulate = ast_ttm_tt_unpopulate,
.init_mem_type = ast_bo_init_mem_type,
.evict_flags = ast_bo_evict_flags,
- .move = ast_bo_move,
+ .move = NULL,
.verify_access = ast_bo_verify_access,
.io_mem_reserve = &ast_ttm_io_mem_reserve,
.io_mem_free = &ast_ttm_io_mem_free,
diff --git a/drivers/gpu/drm/atmel-hlcdc/Kconfig b/drivers/gpu/drm/atmel-hlcdc/Kconfig
index 99b4f0698..32bcc4bad 100644
--- a/drivers/gpu/drm/atmel-hlcdc/Kconfig
+++ b/drivers/gpu/drm/atmel-hlcdc/Kconfig
@@ -3,7 +3,6 @@ config DRM_ATMEL_HLCDC
depends on DRM && OF && COMMON_CLK && MFD_ATMEL_HLCDC && ARM
select DRM_GEM_CMA_HELPER
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_KMS_CMA_HELPER
select DRM_PANEL
help
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index bd12231ab..9b17a66cf 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -374,8 +374,8 @@ static void atmel_hlcdc_crtc_finish_page_flip(struct atmel_hlcdc_crtc *crtc)
spin_lock_irqsave(&dev->event_lock, flags);
if (crtc->event) {
- drm_send_vblank_event(dev, crtc->id, crtc->event);
- drm_vblank_put(dev, crtc->id);
+ drm_crtc_send_vblank_event(&crtc->base, crtc->event);
+ drm_crtc_vblank_put(&crtc->base);
crtc->event = NULL;
}
spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -383,11 +383,11 @@ static void atmel_hlcdc_crtc_finish_page_flip(struct atmel_hlcdc_crtc *crtc)
void atmel_hlcdc_crtc_irq(struct drm_crtc *c)
{
- drm_handle_vblank(c->dev, 0);
+ drm_crtc_handle_vblank(c);
atmel_hlcdc_crtc_finish_page_flip(drm_crtc_to_atmel_hlcdc_crtc(c));
}
-void atmel_hlcdc_crtc_reset(struct drm_crtc *crtc)
+static void atmel_hlcdc_crtc_reset(struct drm_crtc *crtc)
{
struct atmel_hlcdc_crtc_state *state;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 8ded76457..d4a3d61b7 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -519,7 +519,7 @@ static int atmel_hlcdc_dc_atomic_commit(struct drm_device *dev,
}
/* Swap the state, this is the point of no return. */
- drm_atomic_helper_swap_state(dev, state);
+ drm_atomic_helper_swap_state(state, true);
if (async)
queue_work(dc->wq, &commit->work);
@@ -691,13 +691,6 @@ static void atmel_hlcdc_dc_unload(struct drm_device *dev)
destroy_workqueue(dc->wq);
}
-static void atmel_hlcdc_dc_connector_unplug_all(struct drm_device *dev)
-{
- mutex_lock(&dev->mode_config.mutex);
- drm_connector_unregister_all(dev);
- mutex_unlock(&dev->mode_config.mutex);
-}
-
static void atmel_hlcdc_dc_lastclose(struct drm_device *dev)
{
struct atmel_hlcdc_dc *dc = dev->dev_private;
@@ -776,7 +769,7 @@ static struct drm_driver atmel_hlcdc_dc_driver = {
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = atmel_hlcdc_dc_enable_vblank,
.disable_vblank = atmel_hlcdc_dc_disable_vblank,
- .gem_free_object = drm_gem_cma_free_object,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
@@ -815,15 +808,8 @@ static int atmel_hlcdc_dc_drm_probe(struct platform_device *pdev)
if (ret)
goto err_unload;
- ret = drm_connector_register_all(ddev);
- if (ret)
- goto err_unregister;
-
return 0;
-err_unregister:
- drm_dev_unregister(ddev);
-
err_unload:
atmel_hlcdc_dc_unload(ddev);
@@ -837,7 +823,6 @@ static int atmel_hlcdc_dc_drm_remove(struct platform_device *pdev)
{
struct drm_device *ddev = platform_get_drvdata(pdev);
- atmel_hlcdc_dc_connector_unplug_all(ddev);
drm_dev_unregister(ddev);
atmel_hlcdc_dc_unload(ddev);
drm_dev_unref(ddev);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index 3d34fc4ca..6119b5085 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -113,21 +113,9 @@ static int atmel_hlcdc_rgb_mode_valid(struct drm_connector *connector,
return atmel_hlcdc_dc_mode_valid(rgb->dc, mode);
}
-
-
-static struct drm_encoder *
-atmel_hlcdc_rgb_best_encoder(struct drm_connector *connector)
-{
- struct atmel_hlcdc_rgb_output *rgb =
- drm_connector_to_atmel_hlcdc_rgb_output(connector);
-
- return &rgb->encoder;
-}
-
static const struct drm_connector_helper_funcs atmel_hlcdc_panel_connector_helper_funcs = {
.get_modes = atmel_hlcdc_panel_get_modes,
.mode_valid = atmel_hlcdc_rgb_mode_valid,
- .best_encoder = atmel_hlcdc_rgb_best_encoder,
};
static enum drm_connector_status
diff --git a/drivers/gpu/drm/bochs/Kconfig b/drivers/gpu/drm/bochs/Kconfig
index 5f8b0c2b9..f739763f4 100644
--- a/drivers/gpu/drm/bochs/Kconfig
+++ b/drivers/gpu/drm/bochs/Kconfig
@@ -2,10 +2,6 @@ config DRM_BOCHS
tristate "DRM Support for bochs dispi vga interface (qemu stdvga)"
depends on DRM && PCI
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
select DRM_TTM
help
Choose this option for qemu.
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index b332b4d3b..abace82de 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -89,7 +89,7 @@ static struct drm_driver bochs_driver = {
.date = "20130925",
.major = 1,
.minor = 0,
- .gem_free_object = bochs_gem_free_object,
+ .gem_free_object_unlocked = bochs_gem_free_object,
.dumb_create = bochs_dumb_create,
.dumb_map_offset = bochs_dumb_mmap_offset,
.dumb_destroy = drm_gem_dumb_destroy,
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index 6cf912c45..5c5638a77 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -165,15 +165,6 @@ static void bochs_ttm_io_mem_free(struct ttm_bo_device *bdev,
{
}
-static int bochs_bo_move(struct ttm_buffer_object *bo,
- bool evict, bool interruptible,
- bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
-{
- return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
-}
-
-
static void bochs_ttm_backend_destroy(struct ttm_tt *tt)
{
ttm_tt_fini(tt);
@@ -208,7 +199,7 @@ struct ttm_bo_driver bochs_bo_driver = {
.ttm_tt_unpopulate = ttm_pool_unpopulate,
.init_mem_type = bochs_bo_init_mem_type,
.evict_flags = bochs_bo_evict_flags,
- .move = bochs_bo_move,
+ .move = NULL,
.verify_access = bochs_bo_verify_access,
.io_mem_reserve = &bochs_ttm_io_mem_reserve,
.io_mem_free = &bochs_ttm_io_mem_free,
@@ -474,8 +465,8 @@ int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
static void bochs_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct bochs_framebuffer *bochs_fb = to_bochs_framebuffer(fb);
- if (bochs_fb->obj)
- drm_gem_object_unreference_unlocked(bochs_fb->obj);
+
+ drm_gem_object_unreference_unlocked(bochs_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(fb);
}
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 8f7423f18..b590e6780 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -50,6 +50,25 @@ config DRM_PARADE_PS8622
---help---
Parade eDP-LVDS bridge chip driver.
+config DRM_SII902X
+ tristate "Silicon Image sii902x RGB/HDMI bridge"
+ depends on OF
+ select DRM_KMS_HELPER
+ select REGMAP_I2C
+ ---help---
+ Silicon Image sii902x bridge chip driver.
+
+config DRM_TOSHIBA_TC358767
+ tristate "Toshiba TC358767 eDP bridge"
+ depends on OF
+ select DRM_KMS_HELPER
+ select REGMAP_I2C
+ select DRM_PANEL
+ ---help---
+ Toshiba TC358767 eDP bridge chip driver.
+
source "drivers/gpu/drm/bridge/analogix/Kconfig"
+source "drivers/gpu/drm/bridge/adv7511/Kconfig"
+
endmenu
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 96b13b30e..efdb07e87 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -5,4 +5,7 @@ obj-$(CONFIG_DRM_DW_HDMI) += dw-hdmi.o
obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o
obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
+obj-$(CONFIG_DRM_SII902X) += sii902x.o
+obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o
obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/
+obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/
diff --git a/drivers/gpu/drm/bridge/adv7511/Kconfig b/drivers/gpu/drm/bridge/adv7511/Kconfig
new file mode 100644
index 000000000..d2b0499ab
--- /dev/null
+++ b/drivers/gpu/drm/bridge/adv7511/Kconfig
@@ -0,0 +1,15 @@
+config DRM_I2C_ADV7511
+ tristate "AV7511 encoder"
+ depends on OF
+ select DRM_KMS_HELPER
+ select REGMAP_I2C
+ help
+ Support for the Analog Device ADV7511(W) and ADV7513 HDMI encoders.
+
+config DRM_I2C_ADV7533
+ bool "ADV7533 encoder"
+ depends on DRM_I2C_ADV7511
+ select DRM_MIPI_DSI
+ default y
+ help
+ Support for the Analog Devices ADV7533 DSI to HDMI encoder.
diff --git a/drivers/gpu/drm/bridge/adv7511/Makefile b/drivers/gpu/drm/bridge/adv7511/Makefile
new file mode 100644
index 000000000..9019327ff
--- /dev/null
+++ b/drivers/gpu/drm/bridge/adv7511/Makefile
@@ -0,0 +1,3 @@
+adv7511-y := adv7511_drv.o
+adv7511-$(CONFIG_DRM_I2C_ADV7533) += adv7533.o
+obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511.o
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
new file mode 100644
index 000000000..161c923d6
--- /dev/null
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
@@ -0,0 +1,392 @@
+/*
+ * Analog Devices ADV7511 HDMI transmitter driver
+ *
+ * Copyright 2012 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef __DRM_I2C_ADV7511_H__
+#define __DRM_I2C_ADV7511_H__
+
+#include <linux/hdmi.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_mipi_dsi.h>
+
+#define ADV7511_REG_CHIP_REVISION 0x00
+#define ADV7511_REG_N0 0x01
+#define ADV7511_REG_N1 0x02
+#define ADV7511_REG_N2 0x03
+#define ADV7511_REG_SPDIF_FREQ 0x04
+#define ADV7511_REG_CTS_AUTOMATIC1 0x05
+#define ADV7511_REG_CTS_AUTOMATIC2 0x06
+#define ADV7511_REG_CTS_MANUAL0 0x07
+#define ADV7511_REG_CTS_MANUAL1 0x08
+#define ADV7511_REG_CTS_MANUAL2 0x09
+#define ADV7511_REG_AUDIO_SOURCE 0x0a
+#define ADV7511_REG_AUDIO_CONFIG 0x0b
+#define ADV7511_REG_I2S_CONFIG 0x0c
+#define ADV7511_REG_I2S_WIDTH 0x0d
+#define ADV7511_REG_AUDIO_SUB_SRC0 0x0e
+#define ADV7511_REG_AUDIO_SUB_SRC1 0x0f
+#define ADV7511_REG_AUDIO_SUB_SRC2 0x10
+#define ADV7511_REG_AUDIO_SUB_SRC3 0x11
+#define ADV7511_REG_AUDIO_CFG1 0x12
+#define ADV7511_REG_AUDIO_CFG2 0x13
+#define ADV7511_REG_AUDIO_CFG3 0x14
+#define ADV7511_REG_I2C_FREQ_ID_CFG 0x15
+#define ADV7511_REG_VIDEO_INPUT_CFG1 0x16
+#define ADV7511_REG_CSC_UPPER(x) (0x18 + (x) * 2)
+#define ADV7511_REG_CSC_LOWER(x) (0x19 + (x) * 2)
+#define ADV7511_REG_SYNC_DECODER(x) (0x30 + (x))
+#define ADV7511_REG_DE_GENERATOR (0x35 + (x))
+#define ADV7511_REG_PIXEL_REPETITION 0x3b
+#define ADV7511_REG_VIC_MANUAL 0x3c
+#define ADV7511_REG_VIC_SEND 0x3d
+#define ADV7511_REG_VIC_DETECTED 0x3e
+#define ADV7511_REG_AUX_VIC_DETECTED 0x3f
+#define ADV7511_REG_PACKET_ENABLE0 0x40
+#define ADV7511_REG_POWER 0x41
+#define ADV7511_REG_STATUS 0x42
+#define ADV7511_REG_EDID_I2C_ADDR 0x43
+#define ADV7511_REG_PACKET_ENABLE1 0x44
+#define ADV7511_REG_PACKET_I2C_ADDR 0x45
+#define ADV7511_REG_DSD_ENABLE 0x46
+#define ADV7511_REG_VIDEO_INPUT_CFG2 0x48
+#define ADV7511_REG_INFOFRAME_UPDATE 0x4a
+#define ADV7511_REG_GC(x) (0x4b + (x)) /* 0x4b - 0x51 */
+#define ADV7511_REG_AVI_INFOFRAME_VERSION 0x52
+#define ADV7511_REG_AVI_INFOFRAME_LENGTH 0x53
+#define ADV7511_REG_AVI_INFOFRAME_CHECKSUM 0x54
+#define ADV7511_REG_AVI_INFOFRAME(x) (0x55 + (x)) /* 0x55 - 0x6f */
+#define ADV7511_REG_AUDIO_INFOFRAME_VERSION 0x70
+#define ADV7511_REG_AUDIO_INFOFRAME_LENGTH 0x71
+#define ADV7511_REG_AUDIO_INFOFRAME_CHECKSUM 0x72
+#define ADV7511_REG_AUDIO_INFOFRAME(x) (0x73 + (x)) /* 0x73 - 0x7c */
+#define ADV7511_REG_INT_ENABLE(x) (0x94 + (x))
+#define ADV7511_REG_INT(x) (0x96 + (x))
+#define ADV7511_REG_INPUT_CLK_DIV 0x9d
+#define ADV7511_REG_PLL_STATUS 0x9e
+#define ADV7511_REG_HDMI_POWER 0xa1
+#define ADV7511_REG_HDCP_HDMI_CFG 0xaf
+#define ADV7511_REG_AN(x) (0xb0 + (x)) /* 0xb0 - 0xb7 */
+#define ADV7511_REG_HDCP_STATUS 0xb8
+#define ADV7511_REG_BCAPS 0xbe
+#define ADV7511_REG_BKSV(x) (0xc0 + (x)) /* 0xc0 - 0xc3 */
+#define ADV7511_REG_EDID_SEGMENT 0xc4
+#define ADV7511_REG_DDC_STATUS 0xc8
+#define ADV7511_REG_EDID_READ_CTRL 0xc9
+#define ADV7511_REG_BSTATUS(x) (0xca + (x)) /* 0xca - 0xcb */
+#define ADV7511_REG_TIMING_GEN_SEQ 0xd0
+#define ADV7511_REG_POWER2 0xd6
+#define ADV7511_REG_HSYNC_PLACEMENT_MSB 0xfa
+
+#define ADV7511_REG_SYNC_ADJUSTMENT(x) (0xd7 + (x)) /* 0xd7 - 0xdc */
+#define ADV7511_REG_TMDS_CLOCK_INV 0xde
+#define ADV7511_REG_ARC_CTRL 0xdf
+#define ADV7511_REG_CEC_I2C_ADDR 0xe1
+#define ADV7511_REG_CEC_CTRL 0xe2
+#define ADV7511_REG_CHIP_ID_HIGH 0xf5
+#define ADV7511_REG_CHIP_ID_LOW 0xf6
+
+#define ADV7511_CSC_ENABLE BIT(7)
+#define ADV7511_CSC_UPDATE_MODE BIT(5)
+
+#define ADV7511_INT0_HPD BIT(7)
+#define ADV7511_INT0_VSYNC BIT(5)
+#define ADV7511_INT0_AUDIO_FIFO_FULL BIT(4)
+#define ADV7511_INT0_EDID_READY BIT(2)
+#define ADV7511_INT0_HDCP_AUTHENTICATED BIT(1)
+
+#define ADV7511_INT1_DDC_ERROR BIT(7)
+#define ADV7511_INT1_BKSV BIT(6)
+#define ADV7511_INT1_CEC_TX_READY BIT(5)
+#define ADV7511_INT1_CEC_TX_ARBIT_LOST BIT(4)
+#define ADV7511_INT1_CEC_TX_RETRY_TIMEOUT BIT(3)
+#define ADV7511_INT1_CEC_RX_READY3 BIT(2)
+#define ADV7511_INT1_CEC_RX_READY2 BIT(1)
+#define ADV7511_INT1_CEC_RX_READY1 BIT(0)
+
+#define ADV7511_ARC_CTRL_POWER_DOWN BIT(0)
+
+#define ADV7511_CEC_CTRL_POWER_DOWN BIT(0)
+
+#define ADV7511_POWER_POWER_DOWN BIT(6)
+
+#define ADV7511_HDMI_CFG_MODE_MASK 0x2
+#define ADV7511_HDMI_CFG_MODE_DVI 0x0
+#define ADV7511_HDMI_CFG_MODE_HDMI 0x2
+
+#define ADV7511_AUDIO_SELECT_I2C 0x0
+#define ADV7511_AUDIO_SELECT_SPDIF 0x1
+#define ADV7511_AUDIO_SELECT_DSD 0x2
+#define ADV7511_AUDIO_SELECT_HBR 0x3
+#define ADV7511_AUDIO_SELECT_DST 0x4
+
+#define ADV7511_I2S_SAMPLE_LEN_16 0x2
+#define ADV7511_I2S_SAMPLE_LEN_20 0x3
+#define ADV7511_I2S_SAMPLE_LEN_18 0x4
+#define ADV7511_I2S_SAMPLE_LEN_22 0x5
+#define ADV7511_I2S_SAMPLE_LEN_19 0x8
+#define ADV7511_I2S_SAMPLE_LEN_23 0x9
+#define ADV7511_I2S_SAMPLE_LEN_24 0xb
+#define ADV7511_I2S_SAMPLE_LEN_17 0xc
+#define ADV7511_I2S_SAMPLE_LEN_21 0xd
+
+#define ADV7511_SAMPLE_FREQ_44100 0x0
+#define ADV7511_SAMPLE_FREQ_48000 0x2
+#define ADV7511_SAMPLE_FREQ_32000 0x3
+#define ADV7511_SAMPLE_FREQ_88200 0x8
+#define ADV7511_SAMPLE_FREQ_96000 0xa
+#define ADV7511_SAMPLE_FREQ_176400 0xc
+#define ADV7511_SAMPLE_FREQ_192000 0xe
+
+#define ADV7511_STATUS_POWER_DOWN_POLARITY BIT(7)
+#define ADV7511_STATUS_HPD BIT(6)
+#define ADV7511_STATUS_MONITOR_SENSE BIT(5)
+#define ADV7511_STATUS_I2S_32BIT_MODE BIT(3)
+
+#define ADV7511_PACKET_ENABLE_N_CTS BIT(8+6)
+#define ADV7511_PACKET_ENABLE_AUDIO_SAMPLE BIT(8+5)
+#define ADV7511_PACKET_ENABLE_AVI_INFOFRAME BIT(8+4)
+#define ADV7511_PACKET_ENABLE_AUDIO_INFOFRAME BIT(8+3)
+#define ADV7511_PACKET_ENABLE_GC BIT(7)
+#define ADV7511_PACKET_ENABLE_SPD BIT(6)
+#define ADV7511_PACKET_ENABLE_MPEG BIT(5)
+#define ADV7511_PACKET_ENABLE_ACP BIT(4)
+#define ADV7511_PACKET_ENABLE_ISRC BIT(3)
+#define ADV7511_PACKET_ENABLE_GM BIT(2)
+#define ADV7511_PACKET_ENABLE_SPARE2 BIT(1)
+#define ADV7511_PACKET_ENABLE_SPARE1 BIT(0)
+
+#define ADV7511_REG_POWER2_HPD_SRC_MASK 0xc0
+#define ADV7511_REG_POWER2_HPD_SRC_BOTH 0x00
+#define ADV7511_REG_POWER2_HPD_SRC_HPD 0x40
+#define ADV7511_REG_POWER2_HPD_SRC_CEC 0x80
+#define ADV7511_REG_POWER2_HPD_SRC_NONE 0xc0
+#define ADV7511_REG_POWER2_TDMS_ENABLE BIT(4)
+#define ADV7511_REG_POWER2_GATE_INPUT_CLK BIT(0)
+
+#define ADV7511_LOW_REFRESH_RATE_NONE 0x0
+#define ADV7511_LOW_REFRESH_RATE_24HZ 0x1
+#define ADV7511_LOW_REFRESH_RATE_25HZ 0x2
+#define ADV7511_LOW_REFRESH_RATE_30HZ 0x3
+
+#define ADV7511_AUDIO_CFG3_LEN_MASK 0x0f
+#define ADV7511_I2C_FREQ_ID_CFG_RATE_MASK 0xf0
+
+#define ADV7511_AUDIO_SOURCE_I2S 0
+#define ADV7511_AUDIO_SOURCE_SPDIF 1
+
+#define ADV7511_I2S_FORMAT_I2S 0
+#define ADV7511_I2S_FORMAT_RIGHT_J 1
+#define ADV7511_I2S_FORMAT_LEFT_J 2
+
+#define ADV7511_PACKET(p, x) ((p) * 0x20 + (x))
+#define ADV7511_PACKET_SDP(x) ADV7511_PACKET(0, x)
+#define ADV7511_PACKET_MPEG(x) ADV7511_PACKET(1, x)
+#define ADV7511_PACKET_ACP(x) ADV7511_PACKET(2, x)
+#define ADV7511_PACKET_ISRC1(x) ADV7511_PACKET(3, x)
+#define ADV7511_PACKET_ISRC2(x) ADV7511_PACKET(4, x)
+#define ADV7511_PACKET_GM(x) ADV7511_PACKET(5, x)
+#define ADV7511_PACKET_SPARE(x) ADV7511_PACKET(6, x)
+
+enum adv7511_input_clock {
+ ADV7511_INPUT_CLOCK_1X,
+ ADV7511_INPUT_CLOCK_2X,
+ ADV7511_INPUT_CLOCK_DDR,
+};
+
+enum adv7511_input_justification {
+ ADV7511_INPUT_JUSTIFICATION_EVENLY = 0,
+ ADV7511_INPUT_JUSTIFICATION_RIGHT = 1,
+ ADV7511_INPUT_JUSTIFICATION_LEFT = 2,
+};
+
+enum adv7511_input_sync_pulse {
+ ADV7511_INPUT_SYNC_PULSE_DE = 0,
+ ADV7511_INPUT_SYNC_PULSE_HSYNC = 1,
+ ADV7511_INPUT_SYNC_PULSE_VSYNC = 2,
+ ADV7511_INPUT_SYNC_PULSE_NONE = 3,
+};
+
+/**
+ * enum adv7511_sync_polarity - Polarity for the input sync signals
+ * @ADV7511_SYNC_POLARITY_PASSTHROUGH: Sync polarity matches that of
+ * the currently configured mode.
+ * @ADV7511_SYNC_POLARITY_LOW: Sync polarity is low
+ * @ADV7511_SYNC_POLARITY_HIGH: Sync polarity is high
+ *
+ * If the polarity is set to either LOW or HIGH the driver will configure the
+ * ADV7511 to internally invert the sync signal if required to match the sync
+ * polarity setting for the currently selected output mode.
+ *
+ * If the polarity is set to PASSTHROUGH, the ADV7511 will route the signal
+ * unchanged. This is used when the upstream graphics core already generates
+ * the sync signals with the correct polarity.
+ */
+enum adv7511_sync_polarity {
+ ADV7511_SYNC_POLARITY_PASSTHROUGH,
+ ADV7511_SYNC_POLARITY_LOW,
+ ADV7511_SYNC_POLARITY_HIGH,
+};
+
+/**
+ * struct adv7511_link_config - Describes adv7511 hardware configuration
+ * @input_color_depth: Number of bits per color component (8, 10 or 12)
+ * @input_colorspace: The input colorspace (RGB, YUV444, YUV422)
+ * @input_clock: The input video clock style (1x, 2x, DDR)
+ * @input_style: The input component arrangement variant
+ * @input_justification: Video input format bit justification
+ * @clock_delay: Clock delay for the input clock (in ps)
+ * @embedded_sync: Video input uses BT.656-style embedded sync
+ * @sync_pulse: Select the sync pulse
+ * @vsync_polarity: vsync input signal configuration
+ * @hsync_polarity: hsync input signal configuration
+ */
+struct adv7511_link_config {
+ unsigned int input_color_depth;
+ enum hdmi_colorspace input_colorspace;
+ enum adv7511_input_clock input_clock;
+ unsigned int input_style;
+ enum adv7511_input_justification input_justification;
+
+ int clock_delay;
+
+ bool embedded_sync;
+ enum adv7511_input_sync_pulse sync_pulse;
+ enum adv7511_sync_polarity vsync_polarity;
+ enum adv7511_sync_polarity hsync_polarity;
+};
+
+/**
+ * enum adv7511_csc_scaling - Scaling factor for the ADV7511 CSC
+ * @ADV7511_CSC_SCALING_1: CSC results are not scaled
+ * @ADV7511_CSC_SCALING_2: CSC results are scaled by a factor of two
+ * @ADV7511_CSC_SCALING_4: CSC results are scalled by a factor of four
+ */
+enum adv7511_csc_scaling {
+ ADV7511_CSC_SCALING_1 = 0,
+ ADV7511_CSC_SCALING_2 = 1,
+ ADV7511_CSC_SCALING_4 = 2,
+};
+
+/**
+ * struct adv7511_video_config - Describes adv7511 hardware configuration
+ * @csc_enable: Whether to enable color space conversion
+ * @csc_scaling_factor: Color space conversion scaling factor
+ * @csc_coefficents: Color space conversion coefficents
+ * @hdmi_mode: Whether to use HDMI or DVI output mode
+ * @avi_infoframe: HDMI infoframe
+ */
+struct adv7511_video_config {
+ bool csc_enable;
+ enum adv7511_csc_scaling csc_scaling_factor;
+ const uint16_t *csc_coefficents;
+
+ bool hdmi_mode;
+ struct hdmi_avi_infoframe avi_infoframe;
+};
+
+enum adv7511_type {
+ ADV7511,
+ ADV7533,
+};
+
+struct adv7511 {
+ struct i2c_client *i2c_main;
+ struct i2c_client *i2c_edid;
+ struct i2c_client *i2c_cec;
+
+ struct regmap *regmap;
+ struct regmap *regmap_cec;
+ enum drm_connector_status status;
+ bool powered;
+
+ struct drm_display_mode curr_mode;
+
+ unsigned int f_tmds;
+
+ unsigned int current_edid_segment;
+ uint8_t edid_buf[256];
+ bool edid_read;
+
+ wait_queue_head_t wq;
+ struct drm_bridge bridge;
+ struct drm_connector connector;
+
+ bool embedded_sync;
+ enum adv7511_sync_polarity vsync_polarity;
+ enum adv7511_sync_polarity hsync_polarity;
+ bool rgb;
+
+ struct edid *edid;
+
+ struct gpio_desc *gpio_pd;
+
+ /* ADV7533 DSI RX related params */
+ struct device_node *host_node;
+ struct mipi_dsi_device *dsi;
+ u8 num_dsi_lanes;
+ bool use_timing_gen;
+
+ enum adv7511_type type;
+};
+
+#ifdef CONFIG_DRM_I2C_ADV7533
+void adv7533_dsi_power_on(struct adv7511 *adv);
+void adv7533_dsi_power_off(struct adv7511 *adv);
+void adv7533_mode_set(struct adv7511 *adv, struct drm_display_mode *mode);
+int adv7533_patch_registers(struct adv7511 *adv);
+void adv7533_uninit_cec(struct adv7511 *adv);
+int adv7533_init_cec(struct adv7511 *adv);
+int adv7533_attach_dsi(struct adv7511 *adv);
+void adv7533_detach_dsi(struct adv7511 *adv);
+int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv);
+#else
+static inline void adv7533_dsi_power_on(struct adv7511 *adv)
+{
+}
+
+static inline void adv7533_dsi_power_off(struct adv7511 *adv)
+{
+}
+
+static inline void adv7533_mode_set(struct adv7511 *adv,
+ struct drm_display_mode *mode)
+{
+}
+
+static inline int adv7533_patch_registers(struct adv7511 *adv)
+{
+ return -ENODEV;
+}
+
+static inline void adv7533_uninit_cec(struct adv7511 *adv)
+{
+}
+
+static inline int adv7533_init_cec(struct adv7511 *adv)
+{
+ return -ENODEV;
+}
+
+static inline int adv7533_attach_dsi(struct adv7511 *adv)
+{
+ return -ENODEV;
+}
+
+static inline void adv7533_detach_dsi(struct adv7511 *adv)
+{
+}
+
+static inline int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /* __DRM_I2C_ADV7511_H__ */
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
new file mode 100644
index 000000000..ec8fb2ed3
--- /dev/null
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -0,0 +1,1124 @@
+/*
+ * Analog Devices ADV7511 HDMI transmitter driver
+ *
+ * Copyright 2012 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
+
+#include "adv7511.h"
+
+/* ADI recommended values for proper operation. */
+static const struct reg_sequence adv7511_fixed_registers[] = {
+ { 0x98, 0x03 },
+ { 0x9a, 0xe0 },
+ { 0x9c, 0x30 },
+ { 0x9d, 0x61 },
+ { 0xa2, 0xa4 },
+ { 0xa3, 0xa4 },
+ { 0xe0, 0xd0 },
+ { 0xf9, 0x00 },
+ { 0x55, 0x02 },
+};
+
+/* -----------------------------------------------------------------------------
+ * Register access
+ */
+
+static const uint8_t adv7511_register_defaults[] = {
+ 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 00 */
+ 0x00, 0x00, 0x01, 0x0e, 0xbc, 0x18, 0x01, 0x13,
+ 0x25, 0x37, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 10 */
+ 0x46, 0x62, 0x04, 0xa8, 0x00, 0x00, 0x1c, 0x84,
+ 0x1c, 0xbf, 0x04, 0xa8, 0x1e, 0x70, 0x02, 0x1e, /* 20 */
+ 0x00, 0x00, 0x04, 0xa8, 0x08, 0x12, 0x1b, 0xac,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 30 */
+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0xb0,
+ 0x00, 0x50, 0x90, 0x7e, 0x79, 0x70, 0x00, 0x00, /* 40 */
+ 0x00, 0xa8, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x02, 0x0d, 0x00, 0x00, 0x00, 0x00, /* 50 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 60 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 70 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 80 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 90 */
+ 0x0b, 0x02, 0x00, 0x18, 0x5a, 0x60, 0x00, 0x00,
+ 0x00, 0x00, 0x80, 0x80, 0x08, 0x04, 0x00, 0x00, /* a0 */
+ 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x40, 0x14,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* b0 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* c0 */
+ 0x00, 0x03, 0x00, 0x00, 0x02, 0x00, 0x01, 0x04,
+ 0x30, 0xff, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00, /* d0 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x01,
+ 0x80, 0x75, 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, /* e0 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x75, 0x11, 0x00, /* f0 */
+ 0x00, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static bool adv7511_register_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case ADV7511_REG_CHIP_REVISION:
+ case ADV7511_REG_SPDIF_FREQ:
+ case ADV7511_REG_CTS_AUTOMATIC1:
+ case ADV7511_REG_CTS_AUTOMATIC2:
+ case ADV7511_REG_VIC_DETECTED:
+ case ADV7511_REG_VIC_SEND:
+ case ADV7511_REG_AUX_VIC_DETECTED:
+ case ADV7511_REG_STATUS:
+ case ADV7511_REG_GC(1):
+ case ADV7511_REG_INT(0):
+ case ADV7511_REG_INT(1):
+ case ADV7511_REG_PLL_STATUS:
+ case ADV7511_REG_AN(0):
+ case ADV7511_REG_AN(1):
+ case ADV7511_REG_AN(2):
+ case ADV7511_REG_AN(3):
+ case ADV7511_REG_AN(4):
+ case ADV7511_REG_AN(5):
+ case ADV7511_REG_AN(6):
+ case ADV7511_REG_AN(7):
+ case ADV7511_REG_HDCP_STATUS:
+ case ADV7511_REG_BCAPS:
+ case ADV7511_REG_BKSV(0):
+ case ADV7511_REG_BKSV(1):
+ case ADV7511_REG_BKSV(2):
+ case ADV7511_REG_BKSV(3):
+ case ADV7511_REG_BKSV(4):
+ case ADV7511_REG_DDC_STATUS:
+ case ADV7511_REG_EDID_READ_CTRL:
+ case ADV7511_REG_BSTATUS(0):
+ case ADV7511_REG_BSTATUS(1):
+ case ADV7511_REG_CHIP_ID_HIGH:
+ case ADV7511_REG_CHIP_ID_LOW:
+ return true;
+ }
+
+ return false;
+}
+
+static const struct regmap_config adv7511_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+ .cache_type = REGCACHE_RBTREE,
+ .reg_defaults_raw = adv7511_register_defaults,
+ .num_reg_defaults_raw = ARRAY_SIZE(adv7511_register_defaults),
+
+ .volatile_reg = adv7511_register_volatile,
+};
+
+/* -----------------------------------------------------------------------------
+ * Hardware configuration
+ */
+
+static void adv7511_set_colormap(struct adv7511 *adv7511, bool enable,
+ const uint16_t *coeff,
+ unsigned int scaling_factor)
+{
+ unsigned int i;
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(1),
+ ADV7511_CSC_UPDATE_MODE, ADV7511_CSC_UPDATE_MODE);
+
+ if (enable) {
+ for (i = 0; i < 12; ++i) {
+ regmap_update_bits(adv7511->regmap,
+ ADV7511_REG_CSC_UPPER(i),
+ 0x1f, coeff[i] >> 8);
+ regmap_write(adv7511->regmap,
+ ADV7511_REG_CSC_LOWER(i),
+ coeff[i] & 0xff);
+ }
+ }
+
+ if (enable)
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(0),
+ 0xe0, 0x80 | (scaling_factor << 5));
+ else
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(0),
+ 0x80, 0x00);
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(1),
+ ADV7511_CSC_UPDATE_MODE, 0);
+}
+
+static int adv7511_packet_enable(struct adv7511 *adv7511, unsigned int packet)
+{
+ if (packet & 0xff)
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE0,
+ packet, 0xff);
+
+ if (packet & 0xff00) {
+ packet >>= 8;
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1,
+ packet, 0xff);
+ }
+
+ return 0;
+}
+
+static int adv7511_packet_disable(struct adv7511 *adv7511, unsigned int packet)
+{
+ if (packet & 0xff)
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE0,
+ packet, 0x00);
+
+ if (packet & 0xff00) {
+ packet >>= 8;
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1,
+ packet, 0x00);
+ }
+
+ return 0;
+}
+
+/* Coefficients for adv7511 color space conversion */
+static const uint16_t adv7511_csc_ycbcr_to_rgb[] = {
+ 0x0734, 0x04ad, 0x0000, 0x1c1b,
+ 0x1ddc, 0x04ad, 0x1f24, 0x0135,
+ 0x0000, 0x04ad, 0x087c, 0x1b77,
+};
+
+static void adv7511_set_config_csc(struct adv7511 *adv7511,
+ struct drm_connector *connector,
+ bool rgb)
+{
+ struct adv7511_video_config config;
+ bool output_format_422, output_format_ycbcr;
+ unsigned int mode;
+ uint8_t infoframe[17];
+
+ if (adv7511->edid)
+ config.hdmi_mode = drm_detect_hdmi_monitor(adv7511->edid);
+ else
+ config.hdmi_mode = false;
+
+ hdmi_avi_infoframe_init(&config.avi_infoframe);
+
+ config.avi_infoframe.scan_mode = HDMI_SCAN_MODE_UNDERSCAN;
+
+ if (rgb) {
+ config.csc_enable = false;
+ config.avi_infoframe.colorspace = HDMI_COLORSPACE_RGB;
+ } else {
+ config.csc_scaling_factor = ADV7511_CSC_SCALING_4;
+ config.csc_coefficents = adv7511_csc_ycbcr_to_rgb;
+
+ if ((connector->display_info.color_formats &
+ DRM_COLOR_FORMAT_YCRCB422) &&
+ config.hdmi_mode) {
+ config.csc_enable = false;
+ config.avi_infoframe.colorspace =
+ HDMI_COLORSPACE_YUV422;
+ } else {
+ config.csc_enable = true;
+ config.avi_infoframe.colorspace = HDMI_COLORSPACE_RGB;
+ }
+ }
+
+ if (config.hdmi_mode) {
+ mode = ADV7511_HDMI_CFG_MODE_HDMI;
+
+ switch (config.avi_infoframe.colorspace) {
+ case HDMI_COLORSPACE_YUV444:
+ output_format_422 = false;
+ output_format_ycbcr = true;
+ break;
+ case HDMI_COLORSPACE_YUV422:
+ output_format_422 = true;
+ output_format_ycbcr = true;
+ break;
+ default:
+ output_format_422 = false;
+ output_format_ycbcr = false;
+ break;
+ }
+ } else {
+ mode = ADV7511_HDMI_CFG_MODE_DVI;
+ output_format_422 = false;
+ output_format_ycbcr = false;
+ }
+
+ adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
+
+ adv7511_set_colormap(adv7511, config.csc_enable,
+ config.csc_coefficents,
+ config.csc_scaling_factor);
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_VIDEO_INPUT_CFG1, 0x81,
+ (output_format_422 << 7) | output_format_ycbcr);
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_HDCP_HDMI_CFG,
+ ADV7511_HDMI_CFG_MODE_MASK, mode);
+
+ hdmi_avi_infoframe_pack(&config.avi_infoframe, infoframe,
+ sizeof(infoframe));
+
+ /* The AVI infoframe id is not configurable */
+ regmap_bulk_write(adv7511->regmap, ADV7511_REG_AVI_INFOFRAME_VERSION,
+ infoframe + 1, sizeof(infoframe) - 1);
+
+ adv7511_packet_enable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
+}
+
+static void adv7511_set_link_config(struct adv7511 *adv7511,
+ const struct adv7511_link_config *config)
+{
+ /*
+ * The input style values documented in the datasheet don't match the
+ * hardware register field values :-(
+ */
+ static const unsigned int input_styles[4] = { 0, 2, 1, 3 };
+
+ unsigned int clock_delay;
+ unsigned int color_depth;
+ unsigned int input_id;
+
+ clock_delay = (config->clock_delay + 1200) / 400;
+ color_depth = config->input_color_depth == 8 ? 3
+ : (config->input_color_depth == 10 ? 1 : 2);
+
+ /* TODO Support input ID 6 */
+ if (config->input_colorspace != HDMI_COLORSPACE_YUV422)
+ input_id = config->input_clock == ADV7511_INPUT_CLOCK_DDR
+ ? 5 : 0;
+ else if (config->input_clock == ADV7511_INPUT_CLOCK_DDR)
+ input_id = config->embedded_sync ? 8 : 7;
+ else if (config->input_clock == ADV7511_INPUT_CLOCK_2X)
+ input_id = config->embedded_sync ? 4 : 3;
+ else
+ input_id = config->embedded_sync ? 2 : 1;
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_I2C_FREQ_ID_CFG, 0xf,
+ input_id);
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_VIDEO_INPUT_CFG1, 0x7e,
+ (color_depth << 4) |
+ (input_styles[config->input_style] << 2));
+ regmap_write(adv7511->regmap, ADV7511_REG_VIDEO_INPUT_CFG2,
+ config->input_justification << 3);
+ regmap_write(adv7511->regmap, ADV7511_REG_TIMING_GEN_SEQ,
+ config->sync_pulse << 2);
+
+ regmap_write(adv7511->regmap, 0xba, clock_delay << 5);
+
+ adv7511->embedded_sync = config->embedded_sync;
+ adv7511->hsync_polarity = config->hsync_polarity;
+ adv7511->vsync_polarity = config->vsync_polarity;
+ adv7511->rgb = config->input_colorspace == HDMI_COLORSPACE_RGB;
+}
+
+static void adv7511_power_on(struct adv7511 *adv7511)
+{
+ adv7511->current_edid_segment = -1;
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+ ADV7511_POWER_POWER_DOWN, 0);
+ if (adv7511->i2c_main->irq) {
+ /*
+ * Documentation says the INT_ENABLE registers are reset in
+ * POWER_DOWN mode. My 7511w preserved the bits, however.
+ * Still, let's be safe and stick to the documentation.
+ */
+ regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
+ ADV7511_INT0_EDID_READY);
+ regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
+ ADV7511_INT1_DDC_ERROR);
+ }
+
+ /*
+ * Per spec it is allowed to pulse the HPD signal to indicate that the
+ * EDID information has changed. Some monitors do this when they wakeup
+ * from standby or are enabled. When the HPD goes low the adv7511 is
+ * reset and the outputs are disabled which might cause the monitor to
+ * go to standby again. To avoid this we ignore the HPD pin for the
+ * first few seconds after enabling the output.
+ */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
+ ADV7511_REG_POWER2_HPD_SRC_MASK,
+ ADV7511_REG_POWER2_HPD_SRC_NONE);
+
+ /*
+ * Most of the registers are reset during power down or when HPD is low.
+ */
+ regcache_sync(adv7511->regmap);
+
+ if (adv7511->type == ADV7533)
+ adv7533_dsi_power_on(adv7511);
+
+ adv7511->powered = true;
+}
+
+static void adv7511_power_off(struct adv7511 *adv7511)
+{
+ /* TODO: setup additional power down modes */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+ ADV7511_POWER_POWER_DOWN,
+ ADV7511_POWER_POWER_DOWN);
+ regcache_mark_dirty(adv7511->regmap);
+
+ if (adv7511->type == ADV7533)
+ adv7533_dsi_power_off(adv7511);
+
+ adv7511->powered = false;
+}
+
+/* -----------------------------------------------------------------------------
+ * Interrupt and hotplug detection
+ */
+
+static bool adv7511_hpd(struct adv7511 *adv7511)
+{
+ unsigned int irq0;
+ int ret;
+
+ ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(0), &irq0);
+ if (ret < 0)
+ return false;
+
+ if (irq0 & ADV7511_INT0_HPD) {
+ regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
+ ADV7511_INT0_HPD);
+ return true;
+ }
+
+ return false;
+}
+
+static int adv7511_irq_process(struct adv7511 *adv7511, bool process_hpd)
+{
+ unsigned int irq0, irq1;
+ int ret;
+
+ ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(0), &irq0);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(1), &irq1);
+ if (ret < 0)
+ return ret;
+
+ regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0);
+ regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1);
+
+ if (process_hpd && irq0 & ADV7511_INT0_HPD && adv7511->bridge.encoder)
+ drm_helper_hpd_irq_event(adv7511->connector.dev);
+
+ if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) {
+ adv7511->edid_read = true;
+
+ if (adv7511->i2c_main->irq)
+ wake_up_all(&adv7511->wq);
+ }
+
+ return 0;
+}
+
+static irqreturn_t adv7511_irq_handler(int irq, void *devid)
+{
+ struct adv7511 *adv7511 = devid;
+ int ret;
+
+ ret = adv7511_irq_process(adv7511, true);
+ return ret < 0 ? IRQ_NONE : IRQ_HANDLED;
+}
+
+/* -----------------------------------------------------------------------------
+ * EDID retrieval
+ */
+
+static int adv7511_wait_for_edid(struct adv7511 *adv7511, int timeout)
+{
+ int ret;
+
+ if (adv7511->i2c_main->irq) {
+ ret = wait_event_interruptible_timeout(adv7511->wq,
+ adv7511->edid_read, msecs_to_jiffies(timeout));
+ } else {
+ for (; timeout > 0; timeout -= 25) {
+ ret = adv7511_irq_process(adv7511, false);
+ if (ret < 0)
+ break;
+
+ if (adv7511->edid_read)
+ break;
+
+ msleep(25);
+ }
+ }
+
+ return adv7511->edid_read ? 0 : -EIO;
+}
+
+static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block,
+ size_t len)
+{
+ struct adv7511 *adv7511 = data;
+ struct i2c_msg xfer[2];
+ uint8_t offset;
+ unsigned int i;
+ int ret;
+
+ if (len > 128)
+ return -EINVAL;
+
+ if (adv7511->current_edid_segment != block / 2) {
+ unsigned int status;
+
+ ret = regmap_read(adv7511->regmap, ADV7511_REG_DDC_STATUS,
+ &status);
+ if (ret < 0)
+ return ret;
+
+ if (status != 2) {
+ adv7511->edid_read = false;
+ regmap_write(adv7511->regmap, ADV7511_REG_EDID_SEGMENT,
+ block);
+ ret = adv7511_wait_for_edid(adv7511, 200);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Break this apart, hopefully more I2C controllers will
+ * support 64 byte transfers than 256 byte transfers
+ */
+
+ xfer[0].addr = adv7511->i2c_edid->addr;
+ xfer[0].flags = 0;
+ xfer[0].len = 1;
+ xfer[0].buf = &offset;
+ xfer[1].addr = adv7511->i2c_edid->addr;
+ xfer[1].flags = I2C_M_RD;
+ xfer[1].len = 64;
+ xfer[1].buf = adv7511->edid_buf;
+
+ offset = 0;
+
+ for (i = 0; i < 4; ++i) {
+ ret = i2c_transfer(adv7511->i2c_edid->adapter, xfer,
+ ARRAY_SIZE(xfer));
+ if (ret < 0)
+ return ret;
+ else if (ret != 2)
+ return -EIO;
+
+ xfer[1].buf += 64;
+ offset += 64;
+ }
+
+ adv7511->current_edid_segment = block / 2;
+ }
+
+ if (block % 2 == 0)
+ memcpy(buf, adv7511->edid_buf, len);
+ else
+ memcpy(buf, adv7511->edid_buf + 128, len);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * ADV75xx helpers
+ */
+
+static int adv7511_get_modes(struct adv7511 *adv7511,
+ struct drm_connector *connector)
+{
+ struct edid *edid;
+ unsigned int count;
+
+ /* Reading the EDID only works if the device is powered */
+ if (!adv7511->powered) {
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+ ADV7511_POWER_POWER_DOWN, 0);
+ if (adv7511->i2c_main->irq) {
+ regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
+ ADV7511_INT0_EDID_READY);
+ regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
+ ADV7511_INT1_DDC_ERROR);
+ }
+ adv7511->current_edid_segment = -1;
+ }
+
+ edid = drm_do_get_edid(connector, adv7511_get_edid_block, adv7511);
+
+ if (!adv7511->powered)
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+ ADV7511_POWER_POWER_DOWN,
+ ADV7511_POWER_POWER_DOWN);
+
+ kfree(adv7511->edid);
+ adv7511->edid = edid;
+ if (!edid)
+ return 0;
+
+ drm_mode_connector_update_edid_property(connector, edid);
+ count = drm_add_edid_modes(connector, edid);
+
+ adv7511_set_config_csc(adv7511, connector, adv7511->rgb);
+
+ return count;
+}
+
+static enum drm_connector_status
+adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector)
+{
+ enum drm_connector_status status;
+ unsigned int val;
+ bool hpd;
+ int ret;
+
+ ret = regmap_read(adv7511->regmap, ADV7511_REG_STATUS, &val);
+ if (ret < 0)
+ return connector_status_disconnected;
+
+ if (val & ADV7511_STATUS_HPD)
+ status = connector_status_connected;
+ else
+ status = connector_status_disconnected;
+
+ hpd = adv7511_hpd(adv7511);
+
+ /* The chip resets itself when the cable is disconnected, so in case
+ * there is a pending HPD interrupt and the cable is connected there was
+ * at least one transition from disconnected to connected and the chip
+ * has to be reinitialized. */
+ if (status == connector_status_connected && hpd && adv7511->powered) {
+ regcache_mark_dirty(adv7511->regmap);
+ adv7511_power_on(adv7511);
+ adv7511_get_modes(adv7511, connector);
+ if (adv7511->status == connector_status_connected)
+ status = connector_status_disconnected;
+ } else {
+ /* Renable HPD sensing */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
+ ADV7511_REG_POWER2_HPD_SRC_MASK,
+ ADV7511_REG_POWER2_HPD_SRC_BOTH);
+ }
+
+ adv7511->status = status;
+ return status;
+}
+
+static int adv7511_mode_valid(struct adv7511 *adv7511,
+ struct drm_display_mode *mode)
+{
+ if (mode->clock > 165000)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static void adv7511_mode_set(struct adv7511 *adv7511,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ unsigned int low_refresh_rate;
+ unsigned int hsync_polarity = 0;
+ unsigned int vsync_polarity = 0;
+
+ if (adv7511->embedded_sync) {
+ unsigned int hsync_offset, hsync_len;
+ unsigned int vsync_offset, vsync_len;
+
+ hsync_offset = adj_mode->crtc_hsync_start -
+ adj_mode->crtc_hdisplay;
+ vsync_offset = adj_mode->crtc_vsync_start -
+ adj_mode->crtc_vdisplay;
+ hsync_len = adj_mode->crtc_hsync_end -
+ adj_mode->crtc_hsync_start;
+ vsync_len = adj_mode->crtc_vsync_end -
+ adj_mode->crtc_vsync_start;
+
+ /* The hardware vsync generator has a off-by-one bug */
+ vsync_offset += 1;
+
+ regmap_write(adv7511->regmap, ADV7511_REG_HSYNC_PLACEMENT_MSB,
+ ((hsync_offset >> 10) & 0x7) << 5);
+ regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(0),
+ (hsync_offset >> 2) & 0xff);
+ regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(1),
+ ((hsync_offset & 0x3) << 6) |
+ ((hsync_len >> 4) & 0x3f));
+ regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(2),
+ ((hsync_len & 0xf) << 4) |
+ ((vsync_offset >> 6) & 0xf));
+ regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(3),
+ ((vsync_offset & 0x3f) << 2) |
+ ((vsync_len >> 8) & 0x3));
+ regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(4),
+ vsync_len & 0xff);
+
+ hsync_polarity = !(adj_mode->flags & DRM_MODE_FLAG_PHSYNC);
+ vsync_polarity = !(adj_mode->flags & DRM_MODE_FLAG_PVSYNC);
+ } else {
+ enum adv7511_sync_polarity mode_hsync_polarity;
+ enum adv7511_sync_polarity mode_vsync_polarity;
+
+ /**
+ * If the input signal is always low or always high we want to
+ * invert or let it passthrough depending on the polarity of the
+ * current mode.
+ **/
+ if (adj_mode->flags & DRM_MODE_FLAG_NHSYNC)
+ mode_hsync_polarity = ADV7511_SYNC_POLARITY_LOW;
+ else
+ mode_hsync_polarity = ADV7511_SYNC_POLARITY_HIGH;
+
+ if (adj_mode->flags & DRM_MODE_FLAG_NVSYNC)
+ mode_vsync_polarity = ADV7511_SYNC_POLARITY_LOW;
+ else
+ mode_vsync_polarity = ADV7511_SYNC_POLARITY_HIGH;
+
+ if (adv7511->hsync_polarity != mode_hsync_polarity &&
+ adv7511->hsync_polarity !=
+ ADV7511_SYNC_POLARITY_PASSTHROUGH)
+ hsync_polarity = 1;
+
+ if (adv7511->vsync_polarity != mode_vsync_polarity &&
+ adv7511->vsync_polarity !=
+ ADV7511_SYNC_POLARITY_PASSTHROUGH)
+ vsync_polarity = 1;
+ }
+
+ if (mode->vrefresh <= 24000)
+ low_refresh_rate = ADV7511_LOW_REFRESH_RATE_24HZ;
+ else if (mode->vrefresh <= 25000)
+ low_refresh_rate = ADV7511_LOW_REFRESH_RATE_25HZ;
+ else if (mode->vrefresh <= 30000)
+ low_refresh_rate = ADV7511_LOW_REFRESH_RATE_30HZ;
+ else
+ low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE;
+
+ regmap_update_bits(adv7511->regmap, 0xfb,
+ 0x6, low_refresh_rate << 1);
+ regmap_update_bits(adv7511->regmap, 0x17,
+ 0x60, (vsync_polarity << 6) | (hsync_polarity << 5));
+
+ if (adv7511->type == ADV7533)
+ adv7533_mode_set(adv7511, adj_mode);
+
+ drm_mode_copy(&adv7511->curr_mode, adj_mode);
+
+ /*
+ * TODO Test first order 4:2:2 to 4:4:4 up conversion method, which is
+ * supposed to give better results.
+ */
+
+ adv7511->f_tmds = mode->clock;
+}
+
+/* Connector funcs */
+static struct adv7511 *connector_to_adv7511(struct drm_connector *connector)
+{
+ return container_of(connector, struct adv7511, connector);
+}
+
+static int adv7511_connector_get_modes(struct drm_connector *connector)
+{
+ struct adv7511 *adv = connector_to_adv7511(connector);
+
+ return adv7511_get_modes(adv, connector);
+}
+
+static enum drm_mode_status
+adv7511_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct adv7511 *adv = connector_to_adv7511(connector);
+
+ return adv7511_mode_valid(adv, mode);
+}
+
+static struct drm_connector_helper_funcs adv7511_connector_helper_funcs = {
+ .get_modes = adv7511_connector_get_modes,
+ .mode_valid = adv7511_connector_mode_valid,
+};
+
+static enum drm_connector_status
+adv7511_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct adv7511 *adv = connector_to_adv7511(connector);
+
+ return adv7511_detect(adv, connector);
+}
+
+static struct drm_connector_funcs adv7511_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = adv7511_connector_detect,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+/* Bridge funcs */
+static struct adv7511 *bridge_to_adv7511(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct adv7511, bridge);
+}
+
+static void adv7511_bridge_enable(struct drm_bridge *bridge)
+{
+ struct adv7511 *adv = bridge_to_adv7511(bridge);
+
+ adv7511_power_on(adv);
+}
+
+static void adv7511_bridge_disable(struct drm_bridge *bridge)
+{
+ struct adv7511 *adv = bridge_to_adv7511(bridge);
+
+ adv7511_power_off(adv);
+}
+
+static void adv7511_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct adv7511 *adv = bridge_to_adv7511(bridge);
+
+ adv7511_mode_set(adv, mode, adj_mode);
+}
+
+static int adv7511_bridge_attach(struct drm_bridge *bridge)
+{
+ struct adv7511 *adv = bridge_to_adv7511(bridge);
+ int ret;
+
+ if (!bridge->encoder) {
+ DRM_ERROR("Parent encoder object not found");
+ return -ENODEV;
+ }
+
+ adv->connector.polled = DRM_CONNECTOR_POLL_HPD;
+
+ ret = drm_connector_init(bridge->dev, &adv->connector,
+ &adv7511_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
+ if (ret) {
+ DRM_ERROR("Failed to initialize connector with drm\n");
+ return ret;
+ }
+ drm_connector_helper_add(&adv->connector,
+ &adv7511_connector_helper_funcs);
+ drm_mode_connector_attach_encoder(&adv->connector, bridge->encoder);
+
+ if (adv->type == ADV7533)
+ ret = adv7533_attach_dsi(adv);
+
+ return ret;
+}
+
+static struct drm_bridge_funcs adv7511_bridge_funcs = {
+ .enable = adv7511_bridge_enable,
+ .disable = adv7511_bridge_disable,
+ .mode_set = adv7511_bridge_mode_set,
+ .attach = adv7511_bridge_attach,
+};
+
+/* -----------------------------------------------------------------------------
+ * Probe & remove
+ */
+
+static int adv7511_parse_dt(struct device_node *np,
+ struct adv7511_link_config *config)
+{
+ const char *str;
+ int ret;
+
+ of_property_read_u32(np, "adi,input-depth", &config->input_color_depth);
+ if (config->input_color_depth != 8 && config->input_color_depth != 10 &&
+ config->input_color_depth != 12)
+ return -EINVAL;
+
+ ret = of_property_read_string(np, "adi,input-colorspace", &str);
+ if (ret < 0)
+ return ret;
+
+ if (!strcmp(str, "rgb"))
+ config->input_colorspace = HDMI_COLORSPACE_RGB;
+ else if (!strcmp(str, "yuv422"))
+ config->input_colorspace = HDMI_COLORSPACE_YUV422;
+ else if (!strcmp(str, "yuv444"))
+ config->input_colorspace = HDMI_COLORSPACE_YUV444;
+ else
+ return -EINVAL;
+
+ ret = of_property_read_string(np, "adi,input-clock", &str);
+ if (ret < 0)
+ return ret;
+
+ if (!strcmp(str, "1x"))
+ config->input_clock = ADV7511_INPUT_CLOCK_1X;
+ else if (!strcmp(str, "2x"))
+ config->input_clock = ADV7511_INPUT_CLOCK_2X;
+ else if (!strcmp(str, "ddr"))
+ config->input_clock = ADV7511_INPUT_CLOCK_DDR;
+ else
+ return -EINVAL;
+
+ if (config->input_colorspace == HDMI_COLORSPACE_YUV422 ||
+ config->input_clock != ADV7511_INPUT_CLOCK_1X) {
+ ret = of_property_read_u32(np, "adi,input-style",
+ &config->input_style);
+ if (ret)
+ return ret;
+
+ if (config->input_style < 1 || config->input_style > 3)
+ return -EINVAL;
+
+ ret = of_property_read_string(np, "adi,input-justification",
+ &str);
+ if (ret < 0)
+ return ret;
+
+ if (!strcmp(str, "left"))
+ config->input_justification =
+ ADV7511_INPUT_JUSTIFICATION_LEFT;
+ else if (!strcmp(str, "evenly"))
+ config->input_justification =
+ ADV7511_INPUT_JUSTIFICATION_EVENLY;
+ else if (!strcmp(str, "right"))
+ config->input_justification =
+ ADV7511_INPUT_JUSTIFICATION_RIGHT;
+ else
+ return -EINVAL;
+
+ } else {
+ config->input_style = 1;
+ config->input_justification = ADV7511_INPUT_JUSTIFICATION_LEFT;
+ }
+
+ of_property_read_u32(np, "adi,clock-delay", &config->clock_delay);
+ if (config->clock_delay < -1200 || config->clock_delay > 1600)
+ return -EINVAL;
+
+ config->embedded_sync = of_property_read_bool(np, "adi,embedded-sync");
+
+ /* Hardcode the sync pulse configurations for now. */
+ config->sync_pulse = ADV7511_INPUT_SYNC_PULSE_NONE;
+ config->vsync_polarity = ADV7511_SYNC_POLARITY_PASSTHROUGH;
+ config->hsync_polarity = ADV7511_SYNC_POLARITY_PASSTHROUGH;
+
+ return 0;
+}
+
+static const int edid_i2c_addr = 0x7e;
+static const int packet_i2c_addr = 0x70;
+static const int cec_i2c_addr = 0x78;
+
+static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
+{
+ struct adv7511_link_config link_config;
+ struct adv7511 *adv7511;
+ struct device *dev = &i2c->dev;
+ unsigned int val;
+ int ret;
+
+ if (!dev->of_node)
+ return -EINVAL;
+
+ adv7511 = devm_kzalloc(dev, sizeof(*adv7511), GFP_KERNEL);
+ if (!adv7511)
+ return -ENOMEM;
+
+ adv7511->powered = false;
+ adv7511->status = connector_status_disconnected;
+
+ if (dev->of_node)
+ adv7511->type = (enum adv7511_type)of_device_get_match_data(dev);
+ else
+ adv7511->type = id->driver_data;
+
+ memset(&link_config, 0, sizeof(link_config));
+
+ if (adv7511->type == ADV7511)
+ ret = adv7511_parse_dt(dev->of_node, &link_config);
+ else
+ ret = adv7533_parse_dt(dev->of_node, adv7511);
+ if (ret)
+ return ret;
+
+ /*
+ * The power down GPIO is optional. If present, toggle it from active to
+ * inactive to wake up the encoder.
+ */
+ adv7511->gpio_pd = devm_gpiod_get_optional(dev, "pd", GPIOD_OUT_HIGH);
+ if (IS_ERR(adv7511->gpio_pd))
+ return PTR_ERR(adv7511->gpio_pd);
+
+ if (adv7511->gpio_pd) {
+ mdelay(5);
+ gpiod_set_value_cansleep(adv7511->gpio_pd, 0);
+ }
+
+ adv7511->regmap = devm_regmap_init_i2c(i2c, &adv7511_regmap_config);
+ if (IS_ERR(adv7511->regmap))
+ return PTR_ERR(adv7511->regmap);
+
+ ret = regmap_read(adv7511->regmap, ADV7511_REG_CHIP_REVISION, &val);
+ if (ret)
+ return ret;
+ dev_dbg(dev, "Rev. %d\n", val);
+
+ if (adv7511->type == ADV7511)
+ ret = regmap_register_patch(adv7511->regmap,
+ adv7511_fixed_registers,
+ ARRAY_SIZE(adv7511_fixed_registers));
+ else
+ ret = adv7533_patch_registers(adv7511);
+ if (ret)
+ return ret;
+
+ regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR, edid_i2c_addr);
+ regmap_write(adv7511->regmap, ADV7511_REG_PACKET_I2C_ADDR,
+ packet_i2c_addr);
+ regmap_write(adv7511->regmap, ADV7511_REG_CEC_I2C_ADDR, cec_i2c_addr);
+ adv7511_packet_disable(adv7511, 0xffff);
+
+ adv7511->i2c_main = i2c;
+ adv7511->i2c_edid = i2c_new_dummy(i2c->adapter, edid_i2c_addr >> 1);
+ if (!adv7511->i2c_edid)
+ return -ENOMEM;
+
+ if (adv7511->type == ADV7533) {
+ ret = adv7533_init_cec(adv7511);
+ if (ret)
+ goto err_i2c_unregister_edid;
+ }
+
+ if (i2c->irq) {
+ init_waitqueue_head(&adv7511->wq);
+
+ ret = devm_request_threaded_irq(dev, i2c->irq, NULL,
+ adv7511_irq_handler,
+ IRQF_ONESHOT, dev_name(dev),
+ adv7511);
+ if (ret)
+ goto err_unregister_cec;
+ }
+
+ /* CEC is unused for now */
+ regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL,
+ ADV7511_CEC_CTRL_POWER_DOWN);
+
+ adv7511_power_off(adv7511);
+
+ i2c_set_clientdata(i2c, adv7511);
+
+ if (adv7511->type == ADV7511)
+ adv7511_set_link_config(adv7511, &link_config);
+
+ adv7511->bridge.funcs = &adv7511_bridge_funcs;
+ adv7511->bridge.of_node = dev->of_node;
+
+ ret = drm_bridge_add(&adv7511->bridge);
+ if (ret) {
+ dev_err(dev, "failed to add adv7511 bridge\n");
+ goto err_unregister_cec;
+ }
+
+ return 0;
+
+err_unregister_cec:
+ adv7533_uninit_cec(adv7511);
+err_i2c_unregister_edid:
+ i2c_unregister_device(adv7511->i2c_edid);
+
+ return ret;
+}
+
+static int adv7511_remove(struct i2c_client *i2c)
+{
+ struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
+
+ if (adv7511->type == ADV7533) {
+ adv7533_detach_dsi(adv7511);
+ adv7533_uninit_cec(adv7511);
+ }
+
+ drm_bridge_remove(&adv7511->bridge);
+
+ i2c_unregister_device(adv7511->i2c_edid);
+
+ kfree(adv7511->edid);
+
+ return 0;
+}
+
+static const struct i2c_device_id adv7511_i2c_ids[] = {
+ { "adv7511", ADV7511 },
+ { "adv7511w", ADV7511 },
+ { "adv7513", ADV7511 },
+#ifdef CONFIG_DRM_I2C_ADV7533
+ { "adv7533", ADV7533 },
+#endif
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, adv7511_i2c_ids);
+
+static const struct of_device_id adv7511_of_ids[] = {
+ { .compatible = "adi,adv7511", .data = (void *)ADV7511 },
+ { .compatible = "adi,adv7511w", .data = (void *)ADV7511 },
+ { .compatible = "adi,adv7513", .data = (void *)ADV7511 },
+#ifdef CONFIG_DRM_I2C_ADV7533
+ { .compatible = "adi,adv7533", .data = (void *)ADV7533 },
+#endif
+ { }
+};
+MODULE_DEVICE_TABLE(of, adv7511_of_ids);
+
+static struct mipi_dsi_driver adv7533_dsi_driver = {
+ .driver.name = "adv7533",
+};
+
+static struct i2c_driver adv7511_driver = {
+ .driver = {
+ .name = "adv7511",
+ .of_match_table = adv7511_of_ids,
+ },
+ .id_table = adv7511_i2c_ids,
+ .probe = adv7511_probe,
+ .remove = adv7511_remove,
+};
+
+static int __init adv7511_init(void)
+{
+ if (IS_ENABLED(CONFIG_DRM_MIPI_DSI))
+ mipi_dsi_driver_register(&adv7533_dsi_driver);
+
+ return i2c_add_driver(&adv7511_driver);
+}
+module_init(adv7511_init);
+
+static void __exit adv7511_exit(void)
+{
+ i2c_del_driver(&adv7511_driver);
+
+ if (IS_ENABLED(CONFIG_DRM_MIPI_DSI))
+ mipi_dsi_driver_unregister(&adv7533_dsi_driver);
+}
+module_exit(adv7511_exit);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("ADV7511 HDMI transmitter driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c
new file mode 100644
index 000000000..5eebd1589
--- /dev/null
+++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c
@@ -0,0 +1,265 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of_graph.h>
+
+#include "adv7511.h"
+
+static const struct reg_sequence adv7533_fixed_registers[] = {
+ { 0x16, 0x20 },
+ { 0x9a, 0xe0 },
+ { 0xba, 0x70 },
+ { 0xde, 0x82 },
+ { 0xe4, 0x40 },
+ { 0xe5, 0x80 },
+};
+
+static const struct reg_sequence adv7533_cec_fixed_registers[] = {
+ { 0x15, 0xd0 },
+ { 0x17, 0xd0 },
+ { 0x24, 0x20 },
+ { 0x57, 0x11 },
+};
+
+static const struct regmap_config adv7533_cec_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static void adv7511_dsi_config_timing_gen(struct adv7511 *adv)
+{
+ struct mipi_dsi_device *dsi = adv->dsi;
+ struct drm_display_mode *mode = &adv->curr_mode;
+ unsigned int hsw, hfp, hbp, vsw, vfp, vbp;
+ u8 clock_div_by_lanes[] = { 6, 4, 3 }; /* 2, 3, 4 lanes */
+
+ hsw = mode->hsync_end - mode->hsync_start;
+ hfp = mode->hsync_start - mode->hdisplay;
+ hbp = mode->htotal - mode->hsync_end;
+ vsw = mode->vsync_end - mode->vsync_start;
+ vfp = mode->vsync_start - mode->vdisplay;
+ vbp = mode->vtotal - mode->vsync_end;
+
+ /* set pixel clock divider mode */
+ regmap_write(adv->regmap_cec, 0x16,
+ clock_div_by_lanes[dsi->lanes - 2] << 3);
+
+ /* horizontal porch params */
+ regmap_write(adv->regmap_cec, 0x28, mode->htotal >> 4);
+ regmap_write(adv->regmap_cec, 0x29, (mode->htotal << 4) & 0xff);
+ regmap_write(adv->regmap_cec, 0x2a, hsw >> 4);
+ regmap_write(adv->regmap_cec, 0x2b, (hsw << 4) & 0xff);
+ regmap_write(adv->regmap_cec, 0x2c, hfp >> 4);
+ regmap_write(adv->regmap_cec, 0x2d, (hfp << 4) & 0xff);
+ regmap_write(adv->regmap_cec, 0x2e, hbp >> 4);
+ regmap_write(adv->regmap_cec, 0x2f, (hbp << 4) & 0xff);
+
+ /* vertical porch params */
+ regmap_write(adv->regmap_cec, 0x30, mode->vtotal >> 4);
+ regmap_write(adv->regmap_cec, 0x31, (mode->vtotal << 4) & 0xff);
+ regmap_write(adv->regmap_cec, 0x32, vsw >> 4);
+ regmap_write(adv->regmap_cec, 0x33, (vsw << 4) & 0xff);
+ regmap_write(adv->regmap_cec, 0x34, vfp >> 4);
+ regmap_write(adv->regmap_cec, 0x35, (vfp << 4) & 0xff);
+ regmap_write(adv->regmap_cec, 0x36, vbp >> 4);
+ regmap_write(adv->regmap_cec, 0x37, (vbp << 4) & 0xff);
+}
+
+void adv7533_dsi_power_on(struct adv7511 *adv)
+{
+ struct mipi_dsi_device *dsi = adv->dsi;
+
+ if (adv->use_timing_gen)
+ adv7511_dsi_config_timing_gen(adv);
+
+ /* set number of dsi lanes */
+ regmap_write(adv->regmap_cec, 0x1c, dsi->lanes << 4);
+
+ if (adv->use_timing_gen) {
+ /* reset internal timing generator */
+ regmap_write(adv->regmap_cec, 0x27, 0xcb);
+ regmap_write(adv->regmap_cec, 0x27, 0x8b);
+ regmap_write(adv->regmap_cec, 0x27, 0xcb);
+ } else {
+ /* disable internal timing generator */
+ regmap_write(adv->regmap_cec, 0x27, 0x0b);
+ }
+
+ /* enable hdmi */
+ regmap_write(adv->regmap_cec, 0x03, 0x89);
+ /* disable test mode */
+ regmap_write(adv->regmap_cec, 0x55, 0x00);
+
+ regmap_register_patch(adv->regmap_cec, adv7533_cec_fixed_registers,
+ ARRAY_SIZE(adv7533_cec_fixed_registers));
+}
+
+void adv7533_dsi_power_off(struct adv7511 *adv)
+{
+ /* disable hdmi */
+ regmap_write(adv->regmap_cec, 0x03, 0x0b);
+ /* disable internal timing generator */
+ regmap_write(adv->regmap_cec, 0x27, 0x0b);
+}
+
+void adv7533_mode_set(struct adv7511 *adv, struct drm_display_mode *mode)
+{
+ struct mipi_dsi_device *dsi = adv->dsi;
+ int lanes, ret;
+
+ if (adv->num_dsi_lanes != 4)
+ return;
+
+ if (mode->clock > 80000)
+ lanes = 4;
+ else
+ lanes = 3;
+
+ if (lanes != dsi->lanes) {
+ mipi_dsi_detach(dsi);
+ dsi->lanes = lanes;
+ ret = mipi_dsi_attach(dsi);
+ if (ret)
+ dev_err(&dsi->dev, "failed to change host lanes\n");
+ }
+}
+
+int adv7533_patch_registers(struct adv7511 *adv)
+{
+ return regmap_register_patch(adv->regmap,
+ adv7533_fixed_registers,
+ ARRAY_SIZE(adv7533_fixed_registers));
+}
+
+void adv7533_uninit_cec(struct adv7511 *adv)
+{
+ i2c_unregister_device(adv->i2c_cec);
+}
+
+static const int cec_i2c_addr = 0x78;
+
+int adv7533_init_cec(struct adv7511 *adv)
+{
+ int ret;
+
+ adv->i2c_cec = i2c_new_dummy(adv->i2c_main->adapter, cec_i2c_addr >> 1);
+ if (!adv->i2c_cec)
+ return -ENOMEM;
+
+ adv->regmap_cec = devm_regmap_init_i2c(adv->i2c_cec,
+ &adv7533_cec_regmap_config);
+ if (IS_ERR(adv->regmap_cec)) {
+ ret = PTR_ERR(adv->regmap_cec);
+ goto err;
+ }
+
+ ret = regmap_register_patch(adv->regmap_cec,
+ adv7533_cec_fixed_registers,
+ ARRAY_SIZE(adv7533_cec_fixed_registers));
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ adv7533_uninit_cec(adv);
+ return ret;
+}
+
+int adv7533_attach_dsi(struct adv7511 *adv)
+{
+ struct device *dev = &adv->i2c_main->dev;
+ struct mipi_dsi_host *host;
+ struct mipi_dsi_device *dsi;
+ int ret = 0;
+ const struct mipi_dsi_device_info info = { .type = "adv7533",
+ .channel = 0,
+ .node = NULL,
+ };
+
+ host = of_find_mipi_dsi_host_by_node(adv->host_node);
+ if (!host) {
+ dev_err(dev, "failed to find dsi host\n");
+ return -EPROBE_DEFER;
+ }
+
+ dsi = mipi_dsi_device_register_full(host, &info);
+ if (IS_ERR(dsi)) {
+ dev_err(dev, "failed to create dsi device\n");
+ ret = PTR_ERR(dsi);
+ goto err_dsi_device;
+ }
+
+ adv->dsi = dsi;
+
+ dsi->lanes = adv->num_dsi_lanes;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_EOT_PACKET | MIPI_DSI_MODE_VIDEO_HSE;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(dev, "failed to attach dsi to host\n");
+ goto err_dsi_attach;
+ }
+
+ return 0;
+
+err_dsi_attach:
+ mipi_dsi_device_unregister(dsi);
+err_dsi_device:
+ return ret;
+}
+
+void adv7533_detach_dsi(struct adv7511 *adv)
+{
+ mipi_dsi_detach(adv->dsi);
+ mipi_dsi_device_unregister(adv->dsi);
+}
+
+int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
+{
+ u32 num_lanes;
+ struct device_node *endpoint;
+
+ of_property_read_u32(np, "adi,dsi-lanes", &num_lanes);
+
+ if (num_lanes < 1 || num_lanes > 4)
+ return -EINVAL;
+
+ adv->num_dsi_lanes = num_lanes;
+
+ endpoint = of_graph_get_next_endpoint(np, NULL);
+ if (!endpoint)
+ return -ENODEV;
+
+ adv->host_node = of_graph_get_remote_port_parent(endpoint);
+ if (!adv->host_node) {
+ of_node_put(endpoint);
+ return -ENODEV;
+ }
+
+ of_node_put(endpoint);
+ of_node_put(adv->host_node);
+
+ adv->use_timing_gen = !of_property_read_bool(np,
+ "adi,disable-timing-generator");
+
+ /* TODO: Check if these need to be parsed by DT or not */
+ adv->rgb = true;
+ adv->embedded_sync = false;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix-anx78xx.c
index d087b054c..f9f03bcba 100644
--- a/drivers/gpu/drm/bridge/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix-anx78xx.c
@@ -986,16 +986,8 @@ unlock:
return num_modes;
}
-static struct drm_encoder *anx78xx_best_encoder(struct drm_connector *connector)
-{
- struct anx78xx *anx78xx = connector_to_anx78xx(connector);
-
- return anx78xx->bridge.encoder;
-}
-
static const struct drm_connector_helper_funcs anx78xx_connector_helper_funcs = {
.get_modes = anx78xx_get_modes,
- .best_encoder = anx78xx_best_encoder,
};
static enum drm_connector_status anx78xx_detect(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 769959707..32715daf7 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -938,7 +938,7 @@ int analogix_dp_get_modes(struct drm_connector *connector)
num_modes += drm_panel_get_modes(dp->plat_data->panel);
if (dp->plat_data->get_modes)
- num_modes += dp->plat_data->get_modes(dp->plat_data);
+ num_modes += dp->plat_data->get_modes(dp->plat_data, connector);
return num_modes;
}
@@ -1208,6 +1208,7 @@ static int analogix_dp_dt_parse_pdata(struct analogix_dp_device *dp)
switch (dp->plat_data->dev_type) {
case RK3288_DP:
+ case RK3399_EDP:
/*
* Like Rk3288 DisplayPort TRM indicate that "Main link
* containing 4 physical lanes of 2.7/1.62 Gbps/lane".
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
index f09275d40..b45638043 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
@@ -127,10 +127,10 @@ enum analog_power_block {
};
enum dp_irq_type {
- DP_IRQ_TYPE_HP_CABLE_IN,
- DP_IRQ_TYPE_HP_CABLE_OUT,
- DP_IRQ_TYPE_HP_CHANGE,
- DP_IRQ_TYPE_UNKNOWN,
+ DP_IRQ_TYPE_HP_CABLE_IN = BIT(0),
+ DP_IRQ_TYPE_HP_CABLE_OUT = BIT(1),
+ DP_IRQ_TYPE_HP_CHANGE = BIT(2),
+ DP_IRQ_TYPE_UNKNOWN = BIT(3),
};
struct video_info {
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
index 49205ef02..48030f0cf 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
@@ -74,8 +74,12 @@ void analogix_dp_init_analog_param(struct analogix_dp_device *dp)
reg = SEL_24M | TX_DVDD_BIT_1_0625V;
writel(reg, dp->reg_base + ANALOGIX_DP_ANALOG_CTL_2);
- if (dp->plat_data && (dp->plat_data->dev_type == RK3288_DP)) {
- writel(REF_CLK_24M, dp->reg_base + ANALOGIX_DP_PLL_REG_1);
+ if (dp->plat_data && is_rockchip(dp->plat_data->dev_type)) {
+ reg = REF_CLK_24M;
+ if (dp->plat_data->dev_type == RK3288_DP)
+ reg ^= REF_CLK_MASK;
+
+ writel(reg, dp->reg_base + ANALOGIX_DP_PLL_REG_1);
writel(0x95, dp->reg_base + ANALOGIX_DP_PLL_REG_2);
writel(0x40, dp->reg_base + ANALOGIX_DP_PLL_REG_3);
writel(0x58, dp->reg_base + ANALOGIX_DP_PLL_REG_4);
@@ -244,7 +248,7 @@ void analogix_dp_set_analog_power_down(struct analogix_dp_device *dp,
u32 reg;
u32 phy_pd_addr = ANALOGIX_DP_PHY_PD;
- if (dp->plat_data && (dp->plat_data->dev_type == RK3288_DP))
+ if (dp->plat_data && is_rockchip(dp->plat_data->dev_type))
phy_pd_addr = ANALOGIX_DP_PD;
switch (block) {
@@ -448,7 +452,7 @@ void analogix_dp_init_aux(struct analogix_dp_device *dp)
analogix_dp_reset_aux(dp);
/* Disable AUX transaction H/W retry */
- if (dp->plat_data && (dp->plat_data->dev_type == RK3288_DP))
+ if (dp->plat_data && is_rockchip(dp->plat_data->dev_type))
reg = AUX_BIT_PERIOD_EXPECTED_DELAY(0) |
AUX_HW_RETRY_COUNT_SEL(3) |
AUX_HW_RETRY_INTERVAL_600_MICROSECONDS;
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h
index 337912b0a..cdcc6c5ad 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h
@@ -163,8 +163,9 @@
#define HSYNC_POLARITY_CFG (0x1 << 0)
/* ANALOGIX_DP_PLL_REG_1 */
-#define REF_CLK_24M (0x1 << 1)
-#define REF_CLK_27M (0x0 << 1)
+#define REF_CLK_24M (0x1 << 0)
+#define REF_CLK_27M (0x0 << 0)
+#define REF_CLK_MASK (0x1 << 0)
/* ANALOGIX_DP_LANE_MAP */
#define LANE3_MAP_LOGIC_LANE_0 (0x0 << 6)
diff --git a/drivers/gpu/drm/bridge/dw-hdmi.c b/drivers/gpu/drm/bridge/dw-hdmi.c
index c9d941283..77ab47341 100644
--- a/drivers/gpu/drm/bridge/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/dw-hdmi.c
@@ -1476,15 +1476,6 @@ dw_hdmi_connector_mode_valid(struct drm_connector *connector,
return mode_status;
}
-static struct drm_encoder *dw_hdmi_connector_best_encoder(struct drm_connector
- *connector)
-{
- struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi,
- connector);
-
- return hdmi->encoder;
-}
-
static void dw_hdmi_connector_destroy(struct drm_connector *connector)
{
drm_connector_unregister(connector);
@@ -1504,14 +1495,6 @@ static void dw_hdmi_connector_force(struct drm_connector *connector)
}
static const struct drm_connector_funcs dw_hdmi_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .detect = dw_hdmi_connector_detect,
- .destroy = dw_hdmi_connector_destroy,
- .force = dw_hdmi_connector_force,
-};
-
-static const struct drm_connector_funcs dw_hdmi_atomic_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = dw_hdmi_connector_detect,
@@ -1525,7 +1508,7 @@ static const struct drm_connector_funcs dw_hdmi_atomic_connector_funcs = {
static const struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs = {
.get_modes = dw_hdmi_connector_get_modes,
.mode_valid = dw_hdmi_connector_mode_valid,
- .best_encoder = dw_hdmi_connector_best_encoder,
+ .best_encoder = drm_atomic_helper_best_encoder,
};
static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = {
@@ -1643,14 +1626,9 @@ static int dw_hdmi_register(struct drm_device *drm, struct dw_hdmi *hdmi)
drm_connector_helper_add(&hdmi->connector,
&dw_hdmi_connector_helper_funcs);
- if (drm_core_check_feature(drm, DRIVER_ATOMIC))
- drm_connector_init(drm, &hdmi->connector,
- &dw_hdmi_atomic_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
- else
- drm_connector_init(drm, &hdmi->connector,
- &dw_hdmi_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
+ drm_connector_init(drm, &hdmi->connector,
+ &dw_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
drm_mode_connector_attach_encoder(&hdmi->connector, encoder);
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 7ecd59f70..93f3dacf9 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -235,16 +235,8 @@ out:
return num_modes;
}
-static struct drm_encoder *ptn3460_best_encoder(struct drm_connector *connector)
-{
- struct ptn3460_bridge *ptn_bridge = connector_to_ptn3460(connector);
-
- return ptn_bridge->bridge.encoder;
-}
-
static const struct drm_connector_helper_funcs ptn3460_connector_helper_funcs = {
.get_modes = ptn3460_get_modes,
- .best_encoder = ptn3460_best_encoder,
};
static enum drm_connector_status ptn3460_detect(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index be881e9fe..583b8ce61 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -474,18 +474,8 @@ static int ps8622_get_modes(struct drm_connector *connector)
return drm_panel_get_modes(ps8622->panel);
}
-static struct drm_encoder *ps8622_best_encoder(struct drm_connector *connector)
-{
- struct ps8622_bridge *ps8622;
-
- ps8622 = connector_to_ps8622(connector);
-
- return ps8622->bridge.encoder;
-}
-
static const struct drm_connector_helper_funcs ps8622_connector_helper_funcs = {
.get_modes = ps8622_get_modes,
- .best_encoder = ps8622_best_encoder,
};
static enum drm_connector_status ps8622_detect(struct drm_connector *connector,
@@ -646,9 +636,7 @@ static int ps8622_remove(struct i2c_client *client)
{
struct ps8622_bridge *ps8622 = i2c_get_clientdata(client);
- if (ps8622->bl)
- backlight_device_unregister(ps8622->bl);
-
+ backlight_device_unregister(ps8622->bl);
drm_bridge_remove(&ps8622->bridge);
return 0;
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
new file mode 100644
index 000000000..9126d0306
--- /dev/null
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -0,0 +1,467 @@
+/*
+ * Copyright (C) 2016 Atmel
+ * Bo Shen <voice.shen@atmel.com>
+ *
+ * Authors: Bo Shen <voice.shen@atmel.com>
+ * Boris Brezillon <boris.brezillon@free-electrons.com>
+ * Wu, Songjun <Songjun.Wu@atmel.com>
+ *
+ *
+ * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+
+#define SII902X_TPI_VIDEO_DATA 0x0
+
+#define SII902X_TPI_PIXEL_REPETITION 0x8
+#define SII902X_TPI_AVI_PIXEL_REP_BUS_24BIT BIT(5)
+#define SII902X_TPI_AVI_PIXEL_REP_RISING_EDGE BIT(4)
+#define SII902X_TPI_AVI_PIXEL_REP_4X 3
+#define SII902X_TPI_AVI_PIXEL_REP_2X 1
+#define SII902X_TPI_AVI_PIXEL_REP_NONE 0
+#define SII902X_TPI_CLK_RATIO_HALF (0 << 6)
+#define SII902X_TPI_CLK_RATIO_1X (1 << 6)
+#define SII902X_TPI_CLK_RATIO_2X (2 << 6)
+#define SII902X_TPI_CLK_RATIO_4X (3 << 6)
+
+#define SII902X_TPI_AVI_IN_FORMAT 0x9
+#define SII902X_TPI_AVI_INPUT_BITMODE_12BIT BIT(7)
+#define SII902X_TPI_AVI_INPUT_DITHER BIT(6)
+#define SII902X_TPI_AVI_INPUT_RANGE_LIMITED (2 << 2)
+#define SII902X_TPI_AVI_INPUT_RANGE_FULL (1 << 2)
+#define SII902X_TPI_AVI_INPUT_RANGE_AUTO (0 << 2)
+#define SII902X_TPI_AVI_INPUT_COLORSPACE_BLACK (3 << 0)
+#define SII902X_TPI_AVI_INPUT_COLORSPACE_YUV422 (2 << 0)
+#define SII902X_TPI_AVI_INPUT_COLORSPACE_YUV444 (1 << 0)
+#define SII902X_TPI_AVI_INPUT_COLORSPACE_RGB (0 << 0)
+
+#define SII902X_TPI_AVI_INFOFRAME 0x0c
+
+#define SII902X_SYS_CTRL_DATA 0x1a
+#define SII902X_SYS_CTRL_PWR_DWN BIT(4)
+#define SII902X_SYS_CTRL_AV_MUTE BIT(3)
+#define SII902X_SYS_CTRL_DDC_BUS_REQ BIT(2)
+#define SII902X_SYS_CTRL_DDC_BUS_GRTD BIT(1)
+#define SII902X_SYS_CTRL_OUTPUT_MODE BIT(0)
+#define SII902X_SYS_CTRL_OUTPUT_HDMI 1
+#define SII902X_SYS_CTRL_OUTPUT_DVI 0
+
+#define SII902X_REG_CHIPID(n) (0x1b + (n))
+
+#define SII902X_PWR_STATE_CTRL 0x1e
+#define SII902X_AVI_POWER_STATE_MSK GENMASK(1, 0)
+#define SII902X_AVI_POWER_STATE_D(l) ((l) & SII902X_AVI_POWER_STATE_MSK)
+
+#define SII902X_INT_ENABLE 0x3c
+#define SII902X_INT_STATUS 0x3d
+#define SII902X_HOTPLUG_EVENT BIT(0)
+#define SII902X_PLUGGED_STATUS BIT(2)
+
+#define SII902X_REG_TPI_RQB 0xc7
+
+#define SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS 500
+
+struct sii902x {
+ struct i2c_client *i2c;
+ struct regmap *regmap;
+ struct drm_bridge bridge;
+ struct drm_connector connector;
+ struct gpio_desc *reset_gpio;
+};
+
+static inline struct sii902x *bridge_to_sii902x(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct sii902x, bridge);
+}
+
+static inline struct sii902x *connector_to_sii902x(struct drm_connector *con)
+{
+ return container_of(con, struct sii902x, connector);
+}
+
+static void sii902x_reset(struct sii902x *sii902x)
+{
+ if (!sii902x->reset_gpio)
+ return;
+
+ gpiod_set_value(sii902x->reset_gpio, 1);
+
+ /* The datasheet says treset-min = 100us. Make it 150us to be sure. */
+ usleep_range(150, 200);
+
+ gpiod_set_value(sii902x->reset_gpio, 0);
+}
+
+static enum drm_connector_status
+sii902x_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct sii902x *sii902x = connector_to_sii902x(connector);
+ unsigned int status;
+
+ regmap_read(sii902x->regmap, SII902X_INT_STATUS, &status);
+
+ return (status & SII902X_PLUGGED_STATUS) ?
+ connector_status_connected : connector_status_disconnected;
+}
+
+static const struct drm_connector_funcs sii902x_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .detect = sii902x_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int sii902x_get_modes(struct drm_connector *connector)
+{
+ struct sii902x *sii902x = connector_to_sii902x(connector);
+ struct regmap *regmap = sii902x->regmap;
+ u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ unsigned long timeout;
+ unsigned int status;
+ struct edid *edid;
+ int num = 0;
+ int ret;
+
+ ret = regmap_update_bits(regmap, SII902X_SYS_CTRL_DATA,
+ SII902X_SYS_CTRL_DDC_BUS_REQ,
+ SII902X_SYS_CTRL_DDC_BUS_REQ);
+ if (ret)
+ return ret;
+
+ timeout = jiffies +
+ msecs_to_jiffies(SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS);
+ do {
+ ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, &status);
+ if (ret)
+ return ret;
+ } while (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD) &&
+ time_before(jiffies, timeout));
+
+ if (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD)) {
+ dev_err(&sii902x->i2c->dev, "failed to acquire the i2c bus");
+ return -ETIMEDOUT;
+ }
+
+ ret = regmap_write(regmap, SII902X_SYS_CTRL_DATA, status);
+ if (ret)
+ return ret;
+
+ edid = drm_get_edid(connector, sii902x->i2c->adapter);
+ drm_mode_connector_update_edid_property(connector, edid);
+ if (edid) {
+ num = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ }
+
+ ret = drm_display_info_set_bus_formats(&connector->display_info,
+ &bus_format, 1);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, &status);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(regmap, SII902X_SYS_CTRL_DATA,
+ SII902X_SYS_CTRL_DDC_BUS_REQ |
+ SII902X_SYS_CTRL_DDC_BUS_GRTD, 0);
+ if (ret)
+ return ret;
+
+ timeout = jiffies +
+ msecs_to_jiffies(SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS);
+ do {
+ ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, &status);
+ if (ret)
+ return ret;
+ } while (status & (SII902X_SYS_CTRL_DDC_BUS_REQ |
+ SII902X_SYS_CTRL_DDC_BUS_GRTD) &&
+ time_before(jiffies, timeout));
+
+ if (status & (SII902X_SYS_CTRL_DDC_BUS_REQ |
+ SII902X_SYS_CTRL_DDC_BUS_GRTD)) {
+ dev_err(&sii902x->i2c->dev, "failed to release the i2c bus");
+ return -ETIMEDOUT;
+ }
+
+ return num;
+}
+
+static enum drm_mode_status sii902x_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ /* TODO: check mode */
+
+ return MODE_OK;
+}
+
+static const struct drm_connector_helper_funcs sii902x_connector_helper_funcs = {
+ .get_modes = sii902x_get_modes,
+ .mode_valid = sii902x_mode_valid,
+};
+
+static void sii902x_bridge_disable(struct drm_bridge *bridge)
+{
+ struct sii902x *sii902x = bridge_to_sii902x(bridge);
+
+ regmap_update_bits(sii902x->regmap, SII902X_SYS_CTRL_DATA,
+ SII902X_SYS_CTRL_PWR_DWN,
+ SII902X_SYS_CTRL_PWR_DWN);
+}
+
+static void sii902x_bridge_enable(struct drm_bridge *bridge)
+{
+ struct sii902x *sii902x = bridge_to_sii902x(bridge);
+
+ regmap_update_bits(sii902x->regmap, SII902X_PWR_STATE_CTRL,
+ SII902X_AVI_POWER_STATE_MSK,
+ SII902X_AVI_POWER_STATE_D(0));
+ regmap_update_bits(sii902x->regmap, SII902X_SYS_CTRL_DATA,
+ SII902X_SYS_CTRL_PWR_DWN, 0);
+}
+
+static void sii902x_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj)
+{
+ struct sii902x *sii902x = bridge_to_sii902x(bridge);
+ struct regmap *regmap = sii902x->regmap;
+ u8 buf[HDMI_INFOFRAME_SIZE(AVI)];
+ struct hdmi_avi_infoframe frame;
+ int ret;
+
+ buf[0] = adj->clock;
+ buf[1] = adj->clock >> 8;
+ buf[2] = adj->vrefresh;
+ buf[3] = 0x00;
+ buf[4] = adj->hdisplay;
+ buf[5] = adj->hdisplay >> 8;
+ buf[6] = adj->vdisplay;
+ buf[7] = adj->vdisplay >> 8;
+ buf[8] = SII902X_TPI_CLK_RATIO_1X | SII902X_TPI_AVI_PIXEL_REP_NONE |
+ SII902X_TPI_AVI_PIXEL_REP_BUS_24BIT;
+ buf[9] = SII902X_TPI_AVI_INPUT_RANGE_AUTO |
+ SII902X_TPI_AVI_INPUT_COLORSPACE_RGB;
+
+ ret = regmap_bulk_write(regmap, SII902X_TPI_VIDEO_DATA, buf, 10);
+ if (ret)
+ return;
+
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&frame, adj);
+ if (ret < 0) {
+ DRM_ERROR("couldn't fill AVI infoframe\n");
+ return;
+ }
+
+ ret = hdmi_avi_infoframe_pack(&frame, buf, sizeof(buf));
+ if (ret < 0) {
+ DRM_ERROR("failed to pack AVI infoframe: %d\n", ret);
+ return;
+ }
+
+ /* Do not send the infoframe header, but keep the CRC field. */
+ regmap_bulk_write(regmap, SII902X_TPI_AVI_INFOFRAME,
+ buf + HDMI_INFOFRAME_HEADER_SIZE - 1,
+ HDMI_AVI_INFOFRAME_SIZE + 1);
+}
+
+static int sii902x_bridge_attach(struct drm_bridge *bridge)
+{
+ struct sii902x *sii902x = bridge_to_sii902x(bridge);
+ struct drm_device *drm = bridge->dev;
+ int ret;
+
+ drm_connector_helper_add(&sii902x->connector,
+ &sii902x_connector_helper_funcs);
+
+ if (!drm_core_check_feature(drm, DRIVER_ATOMIC)) {
+ dev_err(&sii902x->i2c->dev,
+ "sii902x driver is only compatible with DRM devices supporting atomic updates");
+ return -ENOTSUPP;
+ }
+
+ ret = drm_connector_init(drm, &sii902x->connector,
+ &sii902x_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
+ if (ret)
+ return ret;
+
+ if (sii902x->i2c->irq > 0)
+ sii902x->connector.polled = DRM_CONNECTOR_POLL_HPD;
+ else
+ sii902x->connector.polled = DRM_CONNECTOR_POLL_CONNECT;
+
+ drm_mode_connector_attach_encoder(&sii902x->connector, bridge->encoder);
+
+ return 0;
+}
+
+static const struct drm_bridge_funcs sii902x_bridge_funcs = {
+ .attach = sii902x_bridge_attach,
+ .mode_set = sii902x_bridge_mode_set,
+ .disable = sii902x_bridge_disable,
+ .enable = sii902x_bridge_enable,
+};
+
+static const struct regmap_range sii902x_volatile_ranges[] = {
+ { .range_min = 0, .range_max = 0xff },
+};
+
+static const struct regmap_access_table sii902x_volatile_table = {
+ .yes_ranges = sii902x_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(sii902x_volatile_ranges),
+};
+
+static const struct regmap_config sii902x_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .volatile_table = &sii902x_volatile_table,
+ .cache_type = REGCACHE_NONE,
+};
+
+static irqreturn_t sii902x_interrupt(int irq, void *data)
+{
+ struct sii902x *sii902x = data;
+ unsigned int status = 0;
+
+ regmap_read(sii902x->regmap, SII902X_INT_STATUS, &status);
+ regmap_write(sii902x->regmap, SII902X_INT_STATUS, status);
+
+ if ((status & SII902X_HOTPLUG_EVENT) && sii902x->bridge.dev)
+ drm_helper_hpd_irq_event(sii902x->bridge.dev);
+
+ return IRQ_HANDLED;
+}
+
+static int sii902x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ unsigned int status = 0;
+ struct sii902x *sii902x;
+ u8 chipid[4];
+ int ret;
+
+ sii902x = devm_kzalloc(dev, sizeof(*sii902x), GFP_KERNEL);
+ if (!sii902x)
+ return -ENOMEM;
+
+ sii902x->i2c = client;
+ sii902x->regmap = devm_regmap_init_i2c(client, &sii902x_regmap_config);
+ if (IS_ERR(sii902x->regmap))
+ return PTR_ERR(sii902x->regmap);
+
+ sii902x->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(sii902x->reset_gpio)) {
+ dev_err(dev, "Failed to retrieve/request reset gpio: %ld\n",
+ PTR_ERR(sii902x->reset_gpio));
+ return PTR_ERR(sii902x->reset_gpio);
+ }
+
+ sii902x_reset(sii902x);
+
+ ret = regmap_write(sii902x->regmap, SII902X_REG_TPI_RQB, 0x0);
+ if (ret)
+ return ret;
+
+ ret = regmap_bulk_read(sii902x->regmap, SII902X_REG_CHIPID(0),
+ &chipid, 4);
+ if (ret) {
+ dev_err(dev, "regmap_read failed %d\n", ret);
+ return ret;
+ }
+
+ if (chipid[0] != 0xb0) {
+ dev_err(dev, "Invalid chipid: %02x (expecting 0xb0)\n",
+ chipid[0]);
+ return -EINVAL;
+ }
+
+ /* Clear all pending interrupts */
+ regmap_read(sii902x->regmap, SII902X_INT_STATUS, &status);
+ regmap_write(sii902x->regmap, SII902X_INT_STATUS, status);
+
+ if (client->irq > 0) {
+ regmap_write(sii902x->regmap, SII902X_INT_ENABLE,
+ SII902X_HOTPLUG_EVENT);
+
+ ret = devm_request_threaded_irq(dev, client->irq, NULL,
+ sii902x_interrupt,
+ IRQF_ONESHOT, dev_name(dev),
+ sii902x);
+ if (ret)
+ return ret;
+ }
+
+ sii902x->bridge.funcs = &sii902x_bridge_funcs;
+ sii902x->bridge.of_node = dev->of_node;
+ ret = drm_bridge_add(&sii902x->bridge);
+ if (ret) {
+ dev_err(dev, "Failed to add drm_bridge\n");
+ return ret;
+ }
+
+ i2c_set_clientdata(client, sii902x);
+
+ return 0;
+}
+
+static int sii902x_remove(struct i2c_client *client)
+
+{
+ struct sii902x *sii902x = i2c_get_clientdata(client);
+
+ drm_bridge_remove(&sii902x->bridge);
+
+ return 0;
+}
+
+static const struct of_device_id sii902x_dt_ids[] = {
+ { .compatible = "sil,sii9022", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sii902x_dt_ids);
+
+static const struct i2c_device_id sii902x_i2c_ids[] = {
+ { "sii9022", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, sii902x_i2c_ids);
+
+static struct i2c_driver sii902x_driver = {
+ .probe = sii902x_probe,
+ .remove = sii902x_remove,
+ .driver = {
+ .name = "sii902x",
+ .of_match_table = sii902x_dt_ids,
+ },
+ .id_table = sii902x_i2c_ids,
+};
+module_i2c_driver(sii902x_driver);
+
+MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
+MODULE_DESCRIPTION("SII902x RGB -> HDMI bridges");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
new file mode 100644
index 000000000..a09825d8c
--- /dev/null
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -0,0 +1,1413 @@
+/*
+ * tc358767 eDP bridge driver
+ *
+ * Copyright (C) 2016 CogentEmbedded Inc
+ * Author: Andrey Gusakov <andrey.gusakov@cogentembedded.com>
+ *
+ * Copyright (C) 2016 Pengutronix, Philipp Zabel <p.zabel@pengutronix.de>
+ *
+ * Initially based on: drivers/gpu/drm/i2c/tda998x_drv.c
+ *
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+
+/* Registers */
+
+/* Display Parallel Interface */
+#define DPIPXLFMT 0x0440
+#define VS_POL_ACTIVE_LOW (1 << 10)
+#define HS_POL_ACTIVE_LOW (1 << 9)
+#define DE_POL_ACTIVE_HIGH (0 << 8)
+#define SUB_CFG_TYPE_CONFIG1 (0 << 2) /* LSB aligned */
+#define SUB_CFG_TYPE_CONFIG2 (1 << 2) /* Loosely Packed */
+#define SUB_CFG_TYPE_CONFIG3 (2 << 2) /* LSB aligned 8-bit */
+#define DPI_BPP_RGB888 (0 << 0)
+#define DPI_BPP_RGB666 (1 << 0)
+#define DPI_BPP_RGB565 (2 << 0)
+
+/* Video Path */
+#define VPCTRL0 0x0450
+#define OPXLFMT_RGB666 (0 << 8)
+#define OPXLFMT_RGB888 (1 << 8)
+#define FRMSYNC_DISABLED (0 << 4) /* Video Timing Gen Disabled */
+#define FRMSYNC_ENABLED (1 << 4) /* Video Timing Gen Enabled */
+#define MSF_DISABLED (0 << 0) /* Magic Square FRC disabled */
+#define MSF_ENABLED (1 << 0) /* Magic Square FRC enabled */
+#define HTIM01 0x0454
+#define HTIM02 0x0458
+#define VTIM01 0x045c
+#define VTIM02 0x0460
+#define VFUEN0 0x0464
+#define VFUEN BIT(0) /* Video Frame Timing Upload */
+
+/* System */
+#define TC_IDREG 0x0500
+#define SYSCTRL 0x0510
+#define DP0_AUDSRC_NO_INPUT (0 << 3)
+#define DP0_AUDSRC_I2S_RX (1 << 3)
+#define DP0_VIDSRC_NO_INPUT (0 << 0)
+#define DP0_VIDSRC_DSI_RX (1 << 0)
+#define DP0_VIDSRC_DPI_RX (2 << 0)
+#define DP0_VIDSRC_COLOR_BAR (3 << 0)
+
+/* Control */
+#define DP0CTL 0x0600
+#define VID_MN_GEN BIT(6) /* Auto-generate M/N values */
+#define EF_EN BIT(5) /* Enable Enhanced Framing */
+#define VID_EN BIT(1) /* Video transmission enable */
+#define DP_EN BIT(0) /* Enable DPTX function */
+
+/* Clocks */
+#define DP0_VIDMNGEN0 0x0610
+#define DP0_VIDMNGEN1 0x0614
+#define DP0_VMNGENSTATUS 0x0618
+
+/* Main Channel */
+#define DP0_SECSAMPLE 0x0640
+#define DP0_VIDSYNCDELAY 0x0644
+#define DP0_TOTALVAL 0x0648
+#define DP0_STARTVAL 0x064c
+#define DP0_ACTIVEVAL 0x0650
+#define DP0_SYNCVAL 0x0654
+#define DP0_MISC 0x0658
+#define TU_SIZE_RECOMMENDED (0x3f << 16) /* LSCLK cycles per TU */
+#define BPC_6 (0 << 5)
+#define BPC_8 (1 << 5)
+
+/* AUX channel */
+#define DP0_AUXCFG0 0x0660
+#define DP0_AUXCFG1 0x0664
+#define AUX_RX_FILTER_EN BIT(16)
+
+#define DP0_AUXADDR 0x0668
+#define DP0_AUXWDATA(i) (0x066c + (i) * 4)
+#define DP0_AUXRDATA(i) (0x067c + (i) * 4)
+#define DP0_AUXSTATUS 0x068c
+#define AUX_STATUS_MASK 0xf0
+#define AUX_STATUS_SHIFT 4
+#define AUX_TIMEOUT BIT(1)
+#define AUX_BUSY BIT(0)
+#define DP0_AUXI2CADR 0x0698
+
+/* Link Training */
+#define DP0_SRCCTRL 0x06a0
+#define DP0_SRCCTRL_SCRMBLDIS BIT(13)
+#define DP0_SRCCTRL_EN810B BIT(12)
+#define DP0_SRCCTRL_NOTP (0 << 8)
+#define DP0_SRCCTRL_TP1 (1 << 8)
+#define DP0_SRCCTRL_TP2 (2 << 8)
+#define DP0_SRCCTRL_LANESKEW BIT(7)
+#define DP0_SRCCTRL_SSCG BIT(3)
+#define DP0_SRCCTRL_LANES_1 (0 << 2)
+#define DP0_SRCCTRL_LANES_2 (1 << 2)
+#define DP0_SRCCTRL_BW27 (1 << 1)
+#define DP0_SRCCTRL_BW162 (0 << 1)
+#define DP0_SRCCTRL_AUTOCORRECT BIT(0)
+#define DP0_LTSTAT 0x06d0
+#define LT_LOOPDONE BIT(13)
+#define LT_STATUS_MASK (0x1f << 8)
+#define LT_CHANNEL1_EQ_BITS (DP_CHANNEL_EQ_BITS << 4)
+#define LT_INTERLANE_ALIGN_DONE BIT(3)
+#define LT_CHANNEL0_EQ_BITS (DP_CHANNEL_EQ_BITS)
+#define DP0_SNKLTCHGREQ 0x06d4
+#define DP0_LTLOOPCTRL 0x06d8
+#define DP0_SNKLTCTRL 0x06e4
+
+/* PHY */
+#define DP_PHY_CTRL 0x0800
+#define DP_PHY_RST BIT(28) /* DP PHY Global Soft Reset */
+#define BGREN BIT(25) /* AUX PHY BGR Enable */
+#define PWR_SW_EN BIT(24) /* PHY Power Switch Enable */
+#define PHY_M1_RST BIT(12) /* Reset PHY1 Main Channel */
+#define PHY_RDY BIT(16) /* PHY Main Channels Ready */
+#define PHY_M0_RST BIT(8) /* Reset PHY0 Main Channel */
+#define PHY_A0_EN BIT(1) /* PHY Aux Channel0 Enable */
+#define PHY_M0_EN BIT(0) /* PHY Main Channel0 Enable */
+
+/* PLL */
+#define DP0_PLLCTRL 0x0900
+#define DP1_PLLCTRL 0x0904 /* not defined in DS */
+#define PXL_PLLCTRL 0x0908
+#define PLLUPDATE BIT(2)
+#define PLLBYP BIT(1)
+#define PLLEN BIT(0)
+#define PXL_PLLPARAM 0x0914
+#define IN_SEL_REFCLK (0 << 14)
+#define SYS_PLLPARAM 0x0918
+#define REF_FREQ_38M4 (0 << 8) /* 38.4 MHz */
+#define REF_FREQ_19M2 (1 << 8) /* 19.2 MHz */
+#define REF_FREQ_26M (2 << 8) /* 26 MHz */
+#define REF_FREQ_13M (3 << 8) /* 13 MHz */
+#define SYSCLK_SEL_LSCLK (0 << 4)
+#define LSCLK_DIV_1 (0 << 0)
+#define LSCLK_DIV_2 (1 << 0)
+
+/* Test & Debug */
+#define TSTCTL 0x0a00
+#define PLL_DBG 0x0a04
+
+static bool tc_test_pattern;
+module_param_named(test, tc_test_pattern, bool, 0644);
+
+struct tc_edp_link {
+ struct drm_dp_link base;
+ u8 assr;
+ int scrambler_dis;
+ int spread;
+ int coding8b10b;
+ u8 swing;
+ u8 preemp;
+};
+
+struct tc_data {
+ struct device *dev;
+ struct regmap *regmap;
+ struct drm_dp_aux aux;
+
+ struct drm_bridge bridge;
+ struct drm_connector connector;
+ struct drm_panel *panel;
+
+ /* link settings */
+ struct tc_edp_link link;
+
+ /* display edid */
+ struct edid *edid;
+ /* current mode */
+ struct drm_display_mode *mode;
+
+ u32 rev;
+ u8 assr;
+
+ struct gpio_desc *sd_gpio;
+ struct gpio_desc *reset_gpio;
+ struct clk *refclk;
+};
+
+static inline struct tc_data *aux_to_tc(struct drm_dp_aux *a)
+{
+ return container_of(a, struct tc_data, aux);
+}
+
+static inline struct tc_data *bridge_to_tc(struct drm_bridge *b)
+{
+ return container_of(b, struct tc_data, bridge);
+}
+
+static inline struct tc_data *connector_to_tc(struct drm_connector *c)
+{
+ return container_of(c, struct tc_data, connector);
+}
+
+/* Simple macros to avoid repeated error checks */
+#define tc_write(reg, var) \
+ do { \
+ ret = regmap_write(tc->regmap, reg, var); \
+ if (ret) \
+ goto err; \
+ } while (0)
+#define tc_read(reg, var) \
+ do { \
+ ret = regmap_read(tc->regmap, reg, var); \
+ if (ret) \
+ goto err; \
+ } while (0)
+
+static inline int tc_poll_timeout(struct regmap *map, unsigned int addr,
+ unsigned int cond_mask,
+ unsigned int cond_value,
+ unsigned long sleep_us, u64 timeout_us)
+{
+ ktime_t timeout = ktime_add_us(ktime_get(), timeout_us);
+ unsigned int val;
+ int ret;
+
+ for (;;) {
+ ret = regmap_read(map, addr, &val);
+ if (ret)
+ break;
+ if ((val & cond_mask) == cond_value)
+ break;
+ if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) {
+ ret = regmap_read(map, addr, &val);
+ break;
+ }
+ if (sleep_us)
+ usleep_range((sleep_us >> 2) + 1, sleep_us);
+ }
+ return ret ?: (((val & cond_mask) == cond_value) ? 0 : -ETIMEDOUT);
+}
+
+static int tc_aux_wait_busy(struct tc_data *tc, unsigned int timeout_ms)
+{
+ return tc_poll_timeout(tc->regmap, DP0_AUXSTATUS, AUX_BUSY, 0,
+ 1000, 1000 * timeout_ms);
+}
+
+static int tc_aux_get_status(struct tc_data *tc, u8 *reply)
+{
+ int ret;
+ u32 value;
+
+ ret = regmap_read(tc->regmap, DP0_AUXSTATUS, &value);
+ if (ret < 0)
+ return ret;
+ if (value & AUX_BUSY) {
+ if (value & AUX_TIMEOUT) {
+ dev_err(tc->dev, "i2c access timeout!\n");
+ return -ETIMEDOUT;
+ }
+ return -EBUSY;
+ }
+
+ *reply = (value & AUX_STATUS_MASK) >> AUX_STATUS_SHIFT;
+ return 0;
+}
+
+static ssize_t tc_aux_transfer(struct drm_dp_aux *aux,
+ struct drm_dp_aux_msg *msg)
+{
+ struct tc_data *tc = aux_to_tc(aux);
+ size_t size = min_t(size_t, 8, msg->size);
+ u8 request = msg->request & ~DP_AUX_I2C_MOT;
+ u8 *buf = msg->buffer;
+ u32 tmp = 0;
+ int i = 0;
+ int ret;
+
+ if (size == 0)
+ return 0;
+
+ ret = tc_aux_wait_busy(tc, 100);
+ if (ret)
+ goto err;
+
+ if (request == DP_AUX_I2C_WRITE || request == DP_AUX_NATIVE_WRITE) {
+ /* Store data */
+ while (i < size) {
+ if (request == DP_AUX_NATIVE_WRITE)
+ tmp = tmp | (buf[i] << (8 * (i & 0x3)));
+ else
+ tmp = (tmp << 8) | buf[i];
+ i++;
+ if (((i % 4) == 0) || (i == size)) {
+ tc_write(DP0_AUXWDATA(i >> 2), tmp);
+ tmp = 0;
+ }
+ }
+ } else if (request != DP_AUX_I2C_READ &&
+ request != DP_AUX_NATIVE_READ) {
+ return -EINVAL;
+ }
+
+ /* Store address */
+ tc_write(DP0_AUXADDR, msg->address);
+ /* Start transfer */
+ tc_write(DP0_AUXCFG0, ((size - 1) << 8) | request);
+
+ ret = tc_aux_wait_busy(tc, 100);
+ if (ret)
+ goto err;
+
+ ret = tc_aux_get_status(tc, &msg->reply);
+ if (ret)
+ goto err;
+
+ if (request == DP_AUX_I2C_READ || request == DP_AUX_NATIVE_READ) {
+ /* Read data */
+ while (i < size) {
+ if ((i % 4) == 0)
+ tc_read(DP0_AUXRDATA(i >> 2), &tmp);
+ buf[i] = tmp & 0xff;
+ tmp = tmp >> 8;
+ i++;
+ }
+ }
+
+ return size;
+err:
+ return ret;
+}
+
+static const char * const training_pattern1_errors[] = {
+ "No errors",
+ "Aux write error",
+ "Aux read error",
+ "Max voltage reached error",
+ "Loop counter expired error",
+ "res", "res", "res"
+};
+
+static const char * const training_pattern2_errors[] = {
+ "No errors",
+ "Aux write error",
+ "Aux read error",
+ "Clock recovery failed error",
+ "Loop counter expired error",
+ "res", "res", "res"
+};
+
+static u32 tc_srcctrl(struct tc_data *tc)
+{
+ /*
+ * No training pattern, skew lane 1 data by two LSCLK cycles with
+ * respect to lane 0 data, AutoCorrect Mode = 0
+ */
+ u32 reg = DP0_SRCCTRL_NOTP | DP0_SRCCTRL_LANESKEW;
+
+ if (tc->link.scrambler_dis)
+ reg |= DP0_SRCCTRL_SCRMBLDIS; /* Scrambler Disabled */
+ if (tc->link.coding8b10b)
+ /* Enable 8/10B Encoder (TxData[19:16] not used) */
+ reg |= DP0_SRCCTRL_EN810B;
+ if (tc->link.spread)
+ reg |= DP0_SRCCTRL_SSCG; /* Spread Spectrum Enable */
+ if (tc->link.base.num_lanes == 2)
+ reg |= DP0_SRCCTRL_LANES_2; /* Two Main Channel Lanes */
+ if (tc->link.base.rate != 162000)
+ reg |= DP0_SRCCTRL_BW27; /* 2.7 Gbps link */
+ return reg;
+}
+
+static void tc_wait_pll_lock(struct tc_data *tc)
+{
+ /* Wait for PLL to lock: up to 2.09 ms, depending on refclk */
+ usleep_range(3000, 6000);
+}
+
+static int tc_pxl_pll_en(struct tc_data *tc, u32 refclk, u32 pixelclock)
+{
+ int ret;
+ int i_pre, best_pre = 1;
+ int i_post, best_post = 1;
+ int div, best_div = 1;
+ int mul, best_mul = 1;
+ int delta, best_delta;
+ int ext_div[] = {1, 2, 3, 5, 7};
+ int best_pixelclock = 0;
+ int vco_hi = 0;
+
+ dev_dbg(tc->dev, "PLL: requested %d pixelclock, ref %d\n", pixelclock,
+ refclk);
+ best_delta = pixelclock;
+ /* Loop over all possible ext_divs, skipping invalid configurations */
+ for (i_pre = 0; i_pre < ARRAY_SIZE(ext_div); i_pre++) {
+ /*
+ * refclk / ext_pre_div should be in the 1 to 200 MHz range.
+ * We don't allow any refclk > 200 MHz, only check lower bounds.
+ */
+ if (refclk / ext_div[i_pre] < 1000000)
+ continue;
+ for (i_post = 0; i_post < ARRAY_SIZE(ext_div); i_post++) {
+ for (div = 1; div <= 16; div++) {
+ u32 clk;
+ u64 tmp;
+
+ tmp = pixelclock * ext_div[i_pre] *
+ ext_div[i_post] * div;
+ do_div(tmp, refclk);
+ mul = tmp;
+
+ /* Check limits */
+ if ((mul < 1) || (mul > 128))
+ continue;
+
+ clk = (refclk / ext_div[i_pre] / div) * mul;
+ /*
+ * refclk * mul / (ext_pre_div * pre_div)
+ * should be in the 150 to 650 MHz range
+ */
+ if ((clk > 650000000) || (clk < 150000000))
+ continue;
+
+ clk = clk / ext_div[i_post];
+ delta = clk - pixelclock;
+
+ if (abs(delta) < abs(best_delta)) {
+ best_pre = i_pre;
+ best_post = i_post;
+ best_div = div;
+ best_mul = mul;
+ best_delta = delta;
+ best_pixelclock = clk;
+ }
+ }
+ }
+ }
+ if (best_pixelclock == 0) {
+ dev_err(tc->dev, "Failed to calc clock for %d pixelclock\n",
+ pixelclock);
+ return -EINVAL;
+ }
+
+ dev_dbg(tc->dev, "PLL: got %d, delta %d\n", best_pixelclock,
+ best_delta);
+ dev_dbg(tc->dev, "PLL: %d / %d / %d * %d / %d\n", refclk,
+ ext_div[best_pre], best_div, best_mul, ext_div[best_post]);
+
+ /* if VCO >= 300 MHz */
+ if (refclk / ext_div[best_pre] / best_div * best_mul >= 300000000)
+ vco_hi = 1;
+ /* see DS */
+ if (best_div == 16)
+ best_div = 0;
+ if (best_mul == 128)
+ best_mul = 0;
+
+ /* Power up PLL and switch to bypass */
+ tc_write(PXL_PLLCTRL, PLLBYP | PLLEN);
+
+ tc_write(PXL_PLLPARAM,
+ (vco_hi << 24) | /* For PLL VCO >= 300 MHz = 1 */
+ (ext_div[best_pre] << 20) | /* External Pre-divider */
+ (ext_div[best_post] << 16) | /* External Post-divider */
+ IN_SEL_REFCLK | /* Use RefClk as PLL input */
+ (best_div << 8) | /* Divider for PLL RefClk */
+ (best_mul << 0)); /* Multiplier for PLL */
+
+ /* Force PLL parameter update and disable bypass */
+ tc_write(PXL_PLLCTRL, PLLUPDATE | PLLEN);
+
+ tc_wait_pll_lock(tc);
+
+ return 0;
+err:
+ return ret;
+}
+
+static int tc_pxl_pll_dis(struct tc_data *tc)
+{
+ /* Enable PLL bypass, power down PLL */
+ return regmap_write(tc->regmap, PXL_PLLCTRL, PLLBYP);
+}
+
+static int tc_stream_clock_calc(struct tc_data *tc)
+{
+ int ret;
+ /*
+ * If the Stream clock and Link Symbol clock are
+ * asynchronous with each other, the value of M changes over
+ * time. This way of generating link clock and stream
+ * clock is called Asynchronous Clock mode. The value M
+ * must change while the value N stays constant. The
+ * value of N in this Asynchronous Clock mode must be set
+ * to 2^15 or 32,768.
+ *
+ * LSCLK = 1/10 of high speed link clock
+ *
+ * f_STRMCLK = M/N * f_LSCLK
+ * M/N = f_STRMCLK / f_LSCLK
+ *
+ */
+ tc_write(DP0_VIDMNGEN1, 32768);
+
+ return 0;
+err:
+ return ret;
+}
+
+static int tc_aux_link_setup(struct tc_data *tc)
+{
+ unsigned long rate;
+ u32 value;
+ int ret;
+
+ rate = clk_get_rate(tc->refclk);
+ switch (rate) {
+ case 38400000:
+ value = REF_FREQ_38M4;
+ break;
+ case 26000000:
+ value = REF_FREQ_26M;
+ break;
+ case 19200000:
+ value = REF_FREQ_19M2;
+ break;
+ case 13000000:
+ value = REF_FREQ_13M;
+ break;
+ default:
+ dev_err(tc->dev, "Invalid refclk rate: %lu Hz\n", rate);
+ return -EINVAL;
+ }
+
+ /* Setup DP-PHY / PLL */
+ value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
+ tc_write(SYS_PLLPARAM, value);
+
+ tc_write(DP_PHY_CTRL, BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN);
+
+ /*
+ * Initially PLLs are in bypass. Force PLL parameter update,
+ * disable PLL bypass, enable PLL
+ */
+ tc_write(DP0_PLLCTRL, PLLUPDATE | PLLEN);
+ tc_wait_pll_lock(tc);
+
+ tc_write(DP1_PLLCTRL, PLLUPDATE | PLLEN);
+ tc_wait_pll_lock(tc);
+
+ ret = tc_poll_timeout(tc->regmap, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 1,
+ 1000);
+ if (ret == -ETIMEDOUT) {
+ dev_err(tc->dev, "Timeout waiting for PHY to become ready");
+ return ret;
+ } else if (ret)
+ goto err;
+
+ /* Setup AUX link */
+ tc_write(DP0_AUXCFG1, AUX_RX_FILTER_EN |
+ (0x06 << 8) | /* Aux Bit Period Calculator Threshold */
+ (0x3f << 0)); /* Aux Response Timeout Timer */
+
+ return 0;
+err:
+ dev_err(tc->dev, "tc_aux_link_setup failed: %d\n", ret);
+ return ret;
+}
+
+static int tc_get_display_props(struct tc_data *tc)
+{
+ int ret;
+ /* temp buffer */
+ u8 tmp[8];
+
+ /* Read DP Rx Link Capability */
+ ret = drm_dp_link_probe(&tc->aux, &tc->link.base);
+ if (ret < 0)
+ goto err_dpcd_read;
+ if ((tc->link.base.rate != 162000) && (tc->link.base.rate != 270000))
+ goto err_dpcd_inval;
+
+ ret = drm_dp_dpcd_readb(&tc->aux, DP_MAX_DOWNSPREAD, tmp);
+ if (ret < 0)
+ goto err_dpcd_read;
+ tc->link.spread = tmp[0] & BIT(0); /* 0.5% down spread */
+
+ ret = drm_dp_dpcd_readb(&tc->aux, DP_MAIN_LINK_CHANNEL_CODING, tmp);
+ if (ret < 0)
+ goto err_dpcd_read;
+ tc->link.coding8b10b = tmp[0] & BIT(0);
+ tc->link.scrambler_dis = 0;
+ /* read assr */
+ ret = drm_dp_dpcd_readb(&tc->aux, DP_EDP_CONFIGURATION_SET, tmp);
+ if (ret < 0)
+ goto err_dpcd_read;
+ tc->link.assr = tmp[0] & DP_ALTERNATE_SCRAMBLER_RESET_ENABLE;
+
+ dev_dbg(tc->dev, "DPCD rev: %d.%d, rate: %s, lanes: %d, framing: %s\n",
+ tc->link.base.revision >> 4, tc->link.base.revision & 0x0f,
+ (tc->link.base.rate == 162000) ? "1.62Gbps" : "2.7Gbps",
+ tc->link.base.num_lanes,
+ (tc->link.base.capabilities & DP_LINK_CAP_ENHANCED_FRAMING) ?
+ "enhanced" : "non-enhanced");
+ dev_dbg(tc->dev, "ANSI 8B/10B: %d\n", tc->link.coding8b10b);
+ dev_dbg(tc->dev, "Display ASSR: %d, TC358767 ASSR: %d\n",
+ tc->link.assr, tc->assr);
+
+ return 0;
+
+err_dpcd_read:
+ dev_err(tc->dev, "failed to read DPCD: %d\n", ret);
+ return ret;
+err_dpcd_inval:
+ dev_err(tc->dev, "invalid DPCD\n");
+ return -EINVAL;
+}
+
+static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
+{
+ int ret;
+ int vid_sync_dly;
+ int max_tu_symbol;
+
+ int left_margin = mode->htotal - mode->hsync_end;
+ int right_margin = mode->hsync_start - mode->hdisplay;
+ int hsync_len = mode->hsync_end - mode->hsync_start;
+ int upper_margin = mode->vtotal - mode->vsync_end;
+ int lower_margin = mode->vsync_start - mode->vdisplay;
+ int vsync_len = mode->vsync_end - mode->vsync_start;
+
+ dev_dbg(tc->dev, "set mode %dx%d\n",
+ mode->hdisplay, mode->vdisplay);
+ dev_dbg(tc->dev, "H margin %d,%d sync %d\n",
+ left_margin, right_margin, hsync_len);
+ dev_dbg(tc->dev, "V margin %d,%d sync %d\n",
+ upper_margin, lower_margin, vsync_len);
+ dev_dbg(tc->dev, "total: %dx%d\n", mode->htotal, mode->vtotal);
+
+
+ /* LCD Ctl Frame Size */
+ tc_write(VPCTRL0, (0x40 << 20) /* VSDELAY */ |
+ OPXLFMT_RGB888 | FRMSYNC_DISABLED | MSF_DISABLED);
+ tc_write(HTIM01, (left_margin << 16) | /* H back porch */
+ (hsync_len << 0)); /* Hsync */
+ tc_write(HTIM02, (right_margin << 16) | /* H front porch */
+ (mode->hdisplay << 0)); /* width */
+ tc_write(VTIM01, (upper_margin << 16) | /* V back porch */
+ (vsync_len << 0)); /* Vsync */
+ tc_write(VTIM02, (lower_margin << 16) | /* V front porch */
+ (mode->vdisplay << 0)); /* height */
+ tc_write(VFUEN0, VFUEN); /* update settings */
+
+ /* Test pattern settings */
+ tc_write(TSTCTL,
+ (120 << 24) | /* Red Color component value */
+ (20 << 16) | /* Green Color component value */
+ (99 << 8) | /* Blue Color component value */
+ (1 << 4) | /* Enable I2C Filter */
+ (2 << 0) | /* Color bar Mode */
+ 0);
+
+ /* DP Main Stream Attributes */
+ vid_sync_dly = hsync_len + left_margin + mode->hdisplay;
+ tc_write(DP0_VIDSYNCDELAY,
+ (0x003e << 16) | /* thresh_dly */
+ (vid_sync_dly << 0));
+
+ tc_write(DP0_TOTALVAL, (mode->vtotal << 16) | (mode->htotal));
+
+ tc_write(DP0_STARTVAL,
+ ((upper_margin + vsync_len) << 16) |
+ ((left_margin + hsync_len) << 0));
+
+ tc_write(DP0_ACTIVEVAL, (mode->vdisplay << 16) | (mode->hdisplay));
+
+ tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0));
+
+ tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW |
+ DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888);
+
+ /*
+ * Recommended maximum number of symbols transferred in a transfer unit:
+ * DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size,
+ * (output active video bandwidth in bytes))
+ * Must be less than tu_size.
+ */
+ max_tu_symbol = TU_SIZE_RECOMMENDED - 1;
+ tc_write(DP0_MISC, (max_tu_symbol << 23) | TU_SIZE_RECOMMENDED | BPC_8);
+
+ return 0;
+err:
+ return ret;
+}
+
+static int tc_link_training(struct tc_data *tc, int pattern)
+{
+ const char * const *errors;
+ u32 srcctrl = tc_srcctrl(tc) | DP0_SRCCTRL_SCRMBLDIS |
+ DP0_SRCCTRL_AUTOCORRECT;
+ int timeout;
+ int retry;
+ u32 value;
+ int ret;
+
+ if (pattern == DP_TRAINING_PATTERN_1) {
+ srcctrl |= DP0_SRCCTRL_TP1;
+ errors = training_pattern1_errors;
+ } else {
+ srcctrl |= DP0_SRCCTRL_TP2;
+ errors = training_pattern2_errors;
+ }
+
+ /* Set DPCD 0x102 for Training Part 1 or 2 */
+ tc_write(DP0_SNKLTCTRL, DP_LINK_SCRAMBLING_DISABLE | pattern);
+
+ tc_write(DP0_LTLOOPCTRL,
+ (0x0f << 28) | /* Defer Iteration Count */
+ (0x0f << 24) | /* Loop Iteration Count */
+ (0x0d << 0)); /* Loop Timer Delay */
+
+ retry = 5;
+ do {
+ /* Set DP0 Training Pattern */
+ tc_write(DP0_SRCCTRL, srcctrl);
+
+ /* Enable DP0 to start Link Training */
+ tc_write(DP0CTL, DP_EN);
+
+ /* wait */
+ timeout = 1000;
+ do {
+ tc_read(DP0_LTSTAT, &value);
+ udelay(1);
+ } while ((!(value & LT_LOOPDONE)) && (--timeout));
+ if (timeout == 0) {
+ dev_err(tc->dev, "Link training timeout!\n");
+ } else {
+ int pattern = (value >> 11) & 0x3;
+ int error = (value >> 8) & 0x7;
+
+ dev_dbg(tc->dev,
+ "Link training phase %d done after %d uS: %s\n",
+ pattern, 1000 - timeout, errors[error]);
+ if (pattern == DP_TRAINING_PATTERN_1 && error == 0)
+ break;
+ if (pattern == DP_TRAINING_PATTERN_2) {
+ value &= LT_CHANNEL1_EQ_BITS |
+ LT_INTERLANE_ALIGN_DONE |
+ LT_CHANNEL0_EQ_BITS;
+ /* in case of two lanes */
+ if ((tc->link.base.num_lanes == 2) &&
+ (value == (LT_CHANNEL1_EQ_BITS |
+ LT_INTERLANE_ALIGN_DONE |
+ LT_CHANNEL0_EQ_BITS)))
+ break;
+ /* in case of one line */
+ if ((tc->link.base.num_lanes == 1) &&
+ (value == (LT_INTERLANE_ALIGN_DONE |
+ LT_CHANNEL0_EQ_BITS)))
+ break;
+ }
+ }
+ /* restart */
+ tc_write(DP0CTL, 0);
+ usleep_range(10, 20);
+ } while (--retry);
+ if (retry == 0) {
+ dev_err(tc->dev, "Failed to finish training phase %d\n",
+ pattern);
+ }
+
+ return 0;
+err:
+ return ret;
+}
+
+static int tc_main_link_setup(struct tc_data *tc)
+{
+ struct drm_dp_aux *aux = &tc->aux;
+ struct device *dev = tc->dev;
+ unsigned int rate;
+ u32 dp_phy_ctrl;
+ int timeout;
+ bool aligned;
+ bool ready;
+ u32 value;
+ int ret;
+ u8 tmp[8];
+
+ /* display mode should be set at this point */
+ if (!tc->mode)
+ return -EINVAL;
+
+ /* from excel file - DP0_SrcCtrl */
+ tc_write(DP0_SRCCTRL, DP0_SRCCTRL_SCRMBLDIS | DP0_SRCCTRL_EN810B |
+ DP0_SRCCTRL_LANESKEW | DP0_SRCCTRL_LANES_2 |
+ DP0_SRCCTRL_BW27 | DP0_SRCCTRL_AUTOCORRECT);
+ /* from excel file - DP1_SrcCtrl */
+ tc_write(0x07a0, 0x00003083);
+
+ rate = clk_get_rate(tc->refclk);
+ switch (rate) {
+ case 38400000:
+ value = REF_FREQ_38M4;
+ break;
+ case 26000000:
+ value = REF_FREQ_26M;
+ break;
+ case 19200000:
+ value = REF_FREQ_19M2;
+ break;
+ case 13000000:
+ value = REF_FREQ_13M;
+ break;
+ default:
+ return -EINVAL;
+ }
+ value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
+ tc_write(SYS_PLLPARAM, value);
+ /* Setup Main Link */
+ dp_phy_ctrl = BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN | PHY_M0_EN;
+ tc_write(DP_PHY_CTRL, dp_phy_ctrl);
+ msleep(100);
+
+ /* PLL setup */
+ tc_write(DP0_PLLCTRL, PLLUPDATE | PLLEN);
+ tc_wait_pll_lock(tc);
+
+ tc_write(DP1_PLLCTRL, PLLUPDATE | PLLEN);
+ tc_wait_pll_lock(tc);
+
+ /* PXL PLL setup */
+ if (tc_test_pattern) {
+ ret = tc_pxl_pll_en(tc, clk_get_rate(tc->refclk),
+ 1000 * tc->mode->clock);
+ if (ret)
+ goto err;
+ }
+
+ /* Reset/Enable Main Links */
+ dp_phy_ctrl |= DP_PHY_RST | PHY_M1_RST | PHY_M0_RST;
+ tc_write(DP_PHY_CTRL, dp_phy_ctrl);
+ usleep_range(100, 200);
+ dp_phy_ctrl &= ~(DP_PHY_RST | PHY_M1_RST | PHY_M0_RST);
+ tc_write(DP_PHY_CTRL, dp_phy_ctrl);
+
+ timeout = 1000;
+ do {
+ tc_read(DP_PHY_CTRL, &value);
+ udelay(1);
+ } while ((!(value & PHY_RDY)) && (--timeout));
+
+ if (timeout == 0) {
+ dev_err(dev, "timeout waiting for phy become ready");
+ return -ETIMEDOUT;
+ }
+
+ /* Set misc: 8 bits per color */
+ ret = regmap_update_bits(tc->regmap, DP0_MISC, BPC_8, BPC_8);
+ if (ret)
+ goto err;
+
+ /*
+ * ASSR mode
+ * on TC358767 side ASSR configured through strap pin
+ * seems there is no way to change this setting from SW
+ *
+ * check is tc configured for same mode
+ */
+ if (tc->assr != tc->link.assr) {
+ dev_dbg(dev, "Trying to set display to ASSR: %d\n",
+ tc->assr);
+ /* try to set ASSR on display side */
+ tmp[0] = tc->assr;
+ ret = drm_dp_dpcd_writeb(aux, DP_EDP_CONFIGURATION_SET, tmp[0]);
+ if (ret < 0)
+ goto err_dpcd_read;
+ /* read back */
+ ret = drm_dp_dpcd_readb(aux, DP_EDP_CONFIGURATION_SET, tmp);
+ if (ret < 0)
+ goto err_dpcd_read;
+
+ if (tmp[0] != tc->assr) {
+ dev_warn(dev, "Failed to switch display ASSR to %d, falling back to unscrambled mode\n",
+ tc->assr);
+ /* trying with disabled scrambler */
+ tc->link.scrambler_dis = 1;
+ }
+ }
+
+ /* Setup Link & DPRx Config for Training */
+ ret = drm_dp_link_configure(aux, &tc->link.base);
+ if (ret < 0)
+ goto err_dpcd_write;
+
+ /* DOWNSPREAD_CTRL */
+ tmp[0] = tc->link.spread ? DP_SPREAD_AMP_0_5 : 0x00;
+ /* MAIN_LINK_CHANNEL_CODING_SET */
+ tmp[1] = tc->link.coding8b10b ? DP_SET_ANSI_8B10B : 0x00;
+ ret = drm_dp_dpcd_write(aux, DP_DOWNSPREAD_CTRL, tmp, 2);
+ if (ret < 0)
+ goto err_dpcd_write;
+
+ ret = tc_link_training(tc, DP_TRAINING_PATTERN_1);
+ if (ret)
+ goto err;
+
+ ret = tc_link_training(tc, DP_TRAINING_PATTERN_2);
+ if (ret)
+ goto err;
+
+ /* Clear DPCD 0x102 */
+ /* Note: Can Not use DP0_SNKLTCTRL (0x06E4) short cut */
+ tmp[0] = tc->link.scrambler_dis ? DP_LINK_SCRAMBLING_DISABLE : 0x00;
+ ret = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET, tmp[0]);
+ if (ret < 0)
+ goto err_dpcd_write;
+
+ /* Clear Training Pattern, set AutoCorrect Mode = 1 */
+ tc_write(DP0_SRCCTRL, tc_srcctrl(tc) | DP0_SRCCTRL_AUTOCORRECT);
+
+ /* Wait */
+ timeout = 100;
+ do {
+ udelay(1);
+ /* Read DPCD 0x202-0x207 */
+ ret = drm_dp_dpcd_read_link_status(aux, tmp + 2);
+ if (ret < 0)
+ goto err_dpcd_read;
+ ready = (tmp[2] == ((DP_CHANNEL_EQ_BITS << 4) | /* Lane1 */
+ DP_CHANNEL_EQ_BITS)); /* Lane0 */
+ aligned = tmp[4] & DP_INTERLANE_ALIGN_DONE;
+ } while ((--timeout) && !(ready && aligned));
+
+ if (timeout == 0) {
+ /* Read DPCD 0x200-0x201 */
+ ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT, tmp, 2);
+ if (ret < 0)
+ goto err_dpcd_read;
+ dev_info(dev, "0x0200 SINK_COUNT: 0x%02x\n", tmp[0]);
+ dev_info(dev, "0x0201 DEVICE_SERVICE_IRQ_VECTOR: 0x%02x\n",
+ tmp[1]);
+ dev_info(dev, "0x0202 LANE0_1_STATUS: 0x%02x\n", tmp[2]);
+ dev_info(dev, "0x0204 LANE_ALIGN_STATUS_UPDATED: 0x%02x\n",
+ tmp[4]);
+ dev_info(dev, "0x0205 SINK_STATUS: 0x%02x\n", tmp[5]);
+ dev_info(dev, "0x0206 ADJUST_REQUEST_LANE0_1: 0x%02x\n",
+ tmp[6]);
+
+ if (!ready)
+ dev_err(dev, "Lane0/1 not ready\n");
+ if (!aligned)
+ dev_err(dev, "Lane0/1 not aligned\n");
+ return -EAGAIN;
+ }
+
+ ret = tc_set_video_mode(tc, tc->mode);
+ if (ret)
+ goto err;
+
+ /* Set M/N */
+ ret = tc_stream_clock_calc(tc);
+ if (ret)
+ goto err;
+
+ return 0;
+err_dpcd_read:
+ dev_err(tc->dev, "Failed to read DPCD: %d\n", ret);
+ return ret;
+err_dpcd_write:
+ dev_err(tc->dev, "Failed to write DPCD: %d\n", ret);
+err:
+ return ret;
+}
+
+static int tc_main_link_stream(struct tc_data *tc, int state)
+{
+ int ret;
+ u32 value;
+
+ dev_dbg(tc->dev, "stream: %d\n", state);
+
+ if (state) {
+ value = VID_MN_GEN | DP_EN;
+ if (tc->link.base.capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
+ value |= EF_EN;
+ tc_write(DP0CTL, value);
+ /*
+ * VID_EN assertion should be delayed by at least N * LSCLK
+ * cycles from the time VID_MN_GEN is enabled in order to
+ * generate stable values for VID_M. LSCLK is 270 MHz or
+ * 162 MHz, VID_N is set to 32768 in tc_stream_clock_calc(),
+ * so a delay of at least 203 us should suffice.
+ */
+ usleep_range(500, 1000);
+ value |= VID_EN;
+ tc_write(DP0CTL, value);
+ /* Set input interface */
+ value = DP0_AUDSRC_NO_INPUT;
+ if (tc_test_pattern)
+ value |= DP0_VIDSRC_COLOR_BAR;
+ else
+ value |= DP0_VIDSRC_DPI_RX;
+ tc_write(SYSCTRL, value);
+ } else {
+ tc_write(DP0CTL, 0);
+ }
+
+ return 0;
+err:
+ return ret;
+}
+
+static enum drm_connector_status
+tc_connector_detect(struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static void tc_bridge_pre_enable(struct drm_bridge *bridge)
+{
+ struct tc_data *tc = bridge_to_tc(bridge);
+
+ drm_panel_prepare(tc->panel);
+}
+
+static void tc_bridge_enable(struct drm_bridge *bridge)
+{
+ struct tc_data *tc = bridge_to_tc(bridge);
+ int ret;
+
+ ret = tc_main_link_setup(tc);
+ if (ret < 0) {
+ dev_err(tc->dev, "main link setup error: %d\n", ret);
+ return;
+ }
+
+ ret = tc_main_link_stream(tc, 1);
+ if (ret < 0) {
+ dev_err(tc->dev, "main link stream start error: %d\n", ret);
+ return;
+ }
+
+ drm_panel_enable(tc->panel);
+}
+
+static void tc_bridge_disable(struct drm_bridge *bridge)
+{
+ struct tc_data *tc = bridge_to_tc(bridge);
+ int ret;
+
+ drm_panel_disable(tc->panel);
+
+ ret = tc_main_link_stream(tc, 0);
+ if (ret < 0)
+ dev_err(tc->dev, "main link stream stop error: %d\n", ret);
+}
+
+static void tc_bridge_post_disable(struct drm_bridge *bridge)
+{
+ struct tc_data *tc = bridge_to_tc(bridge);
+
+ drm_panel_unprepare(tc->panel);
+}
+
+static bool tc_bridge_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adj)
+{
+ /* Fixup sync polarities, both hsync and vsync are active low */
+ adj->flags = mode->flags;
+ adj->flags |= (DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC);
+ adj->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
+
+ return true;
+}
+
+static int tc_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ /* Accept any mode */
+ return MODE_OK;
+}
+
+static void tc_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj)
+{
+ struct tc_data *tc = bridge_to_tc(bridge);
+
+ tc->mode = mode;
+}
+
+static int tc_connector_get_modes(struct drm_connector *connector)
+{
+ struct tc_data *tc = connector_to_tc(connector);
+ struct edid *edid;
+ unsigned int count;
+
+ if (tc->panel && tc->panel->funcs && tc->panel->funcs->get_modes) {
+ count = tc->panel->funcs->get_modes(tc->panel);
+ if (count > 0)
+ return count;
+ }
+
+ edid = drm_get_edid(connector, &tc->aux.ddc);
+
+ kfree(tc->edid);
+ tc->edid = edid;
+ if (!edid)
+ return 0;
+
+ drm_mode_connector_update_edid_property(connector, edid);
+ count = drm_add_edid_modes(connector, edid);
+
+ return count;
+}
+
+static void tc_connector_set_polling(struct tc_data *tc,
+ struct drm_connector *connector)
+{
+ /* TODO: add support for HPD */
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+}
+
+static struct drm_encoder *
+tc_connector_best_encoder(struct drm_connector *connector)
+{
+ struct tc_data *tc = connector_to_tc(connector);
+
+ return tc->bridge.encoder;
+}
+
+static const struct drm_connector_helper_funcs tc_connector_helper_funcs = {
+ .get_modes = tc_connector_get_modes,
+ .mode_valid = tc_connector_mode_valid,
+ .best_encoder = tc_connector_best_encoder,
+};
+
+static void tc_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs tc_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = tc_connector_detect,
+ .destroy = tc_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int tc_bridge_attach(struct drm_bridge *bridge)
+{
+ u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ struct tc_data *tc = bridge_to_tc(bridge);
+ struct drm_device *drm = bridge->dev;
+ int ret;
+
+ /* Create eDP connector */
+ drm_connector_helper_add(&tc->connector, &tc_connector_helper_funcs);
+ ret = drm_connector_init(drm, &tc->connector, &tc_connector_funcs,
+ DRM_MODE_CONNECTOR_eDP);
+ if (ret)
+ return ret;
+
+ if (tc->panel)
+ drm_panel_attach(tc->panel, &tc->connector);
+
+ drm_display_info_set_bus_formats(&tc->connector.display_info,
+ &bus_format, 1);
+ drm_mode_connector_attach_encoder(&tc->connector, tc->bridge.encoder);
+
+ return 0;
+}
+
+static const struct drm_bridge_funcs tc_bridge_funcs = {
+ .attach = tc_bridge_attach,
+ .mode_set = tc_bridge_mode_set,
+ .pre_enable = tc_bridge_pre_enable,
+ .enable = tc_bridge_enable,
+ .disable = tc_bridge_disable,
+ .post_disable = tc_bridge_post_disable,
+ .mode_fixup = tc_bridge_mode_fixup,
+};
+
+static bool tc_readable_reg(struct device *dev, unsigned int reg)
+{
+ return reg != SYSCTRL;
+}
+
+static const struct regmap_range tc_volatile_ranges[] = {
+ regmap_reg_range(DP0_AUXWDATA(0), DP0_AUXSTATUS),
+ regmap_reg_range(DP0_LTSTAT, DP0_SNKLTCHGREQ),
+ regmap_reg_range(DP_PHY_CTRL, DP_PHY_CTRL),
+ regmap_reg_range(DP0_PLLCTRL, PXL_PLLCTRL),
+ regmap_reg_range(VFUEN0, VFUEN0),
+};
+
+static const struct regmap_access_table tc_volatile_table = {
+ .yes_ranges = tc_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(tc_volatile_ranges),
+};
+
+static bool tc_writeable_reg(struct device *dev, unsigned int reg)
+{
+ return (reg != TC_IDREG) &&
+ (reg != DP0_LTSTAT) &&
+ (reg != DP0_SNKLTCHGREQ);
+}
+
+static const struct regmap_config tc_regmap_config = {
+ .name = "tc358767",
+ .reg_bits = 16,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = PLL_DBG,
+ .cache_type = REGCACHE_RBTREE,
+ .readable_reg = tc_readable_reg,
+ .volatile_table = &tc_volatile_table,
+ .writeable_reg = tc_writeable_reg,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+};
+
+static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct device_node *ep;
+ struct tc_data *tc;
+ int ret;
+
+ tc = devm_kzalloc(dev, sizeof(*tc), GFP_KERNEL);
+ if (!tc)
+ return -ENOMEM;
+
+ tc->dev = dev;
+
+ /* port@2 is the output port */
+ ep = of_graph_get_endpoint_by_regs(dev->of_node, 2, -1);
+ if (ep) {
+ struct device_node *remote;
+
+ remote = of_graph_get_remote_port_parent(ep);
+ if (!remote) {
+ dev_warn(dev, "endpoint %s not connected\n",
+ ep->full_name);
+ of_node_put(ep);
+ return -ENODEV;
+ }
+ of_node_put(ep);
+ tc->panel = of_drm_find_panel(remote);
+ if (tc->panel) {
+ dev_dbg(dev, "found panel %s\n", remote->full_name);
+ } else {
+ dev_dbg(dev, "waiting for panel %s\n",
+ remote->full_name);
+ of_node_put(remote);
+ return -EPROBE_DEFER;
+ }
+ of_node_put(remote);
+ }
+
+ /* Shut down GPIO is optional */
+ tc->sd_gpio = devm_gpiod_get_optional(dev, "shutdown", GPIOD_OUT_HIGH);
+ if (IS_ERR(tc->sd_gpio))
+ return PTR_ERR(tc->sd_gpio);
+
+ if (tc->sd_gpio) {
+ gpiod_set_value_cansleep(tc->sd_gpio, 0);
+ usleep_range(5000, 10000);
+ }
+
+ /* Reset GPIO is optional */
+ tc->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(tc->reset_gpio))
+ return PTR_ERR(tc->reset_gpio);
+
+ if (tc->reset_gpio) {
+ gpiod_set_value_cansleep(tc->reset_gpio, 1);
+ usleep_range(5000, 10000);
+ }
+
+ tc->refclk = devm_clk_get(dev, "ref");
+ if (IS_ERR(tc->refclk)) {
+ ret = PTR_ERR(tc->refclk);
+ dev_err(dev, "Failed to get refclk: %d\n", ret);
+ return ret;
+ }
+
+ tc->regmap = devm_regmap_init_i2c(client, &tc_regmap_config);
+ if (IS_ERR(tc->regmap)) {
+ ret = PTR_ERR(tc->regmap);
+ dev_err(dev, "Failed to initialize regmap: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_read(tc->regmap, TC_IDREG, &tc->rev);
+ if (ret) {
+ dev_err(tc->dev, "can not read device ID: %d\n", ret);
+ return ret;
+ }
+
+ if ((tc->rev != 0x6601) && (tc->rev != 0x6603)) {
+ dev_err(tc->dev, "invalid device ID: 0x%08x\n", tc->rev);
+ return -EINVAL;
+ }
+
+ tc->assr = (tc->rev == 0x6601); /* Enable ASSR for eDP panels */
+
+ ret = tc_aux_link_setup(tc);
+ if (ret)
+ return ret;
+
+ /* Register DP AUX channel */
+ tc->aux.name = "TC358767 AUX i2c adapter";
+ tc->aux.dev = tc->dev;
+ tc->aux.transfer = tc_aux_transfer;
+ ret = drm_dp_aux_register(&tc->aux);
+ if (ret)
+ return ret;
+
+ ret = tc_get_display_props(tc);
+ if (ret)
+ goto err_unregister_aux;
+
+ tc_connector_set_polling(tc, &tc->connector);
+
+ tc->bridge.funcs = &tc_bridge_funcs;
+ tc->bridge.of_node = dev->of_node;
+ ret = drm_bridge_add(&tc->bridge);
+ if (ret) {
+ dev_err(dev, "Failed to add drm_bridge: %d\n", ret);
+ goto err_unregister_aux;
+ }
+
+ i2c_set_clientdata(client, tc);
+
+ return 0;
+err_unregister_aux:
+ drm_dp_aux_unregister(&tc->aux);
+ return ret;
+}
+
+static int tc_remove(struct i2c_client *client)
+{
+ struct tc_data *tc = i2c_get_clientdata(client);
+
+ drm_bridge_remove(&tc->bridge);
+ drm_dp_aux_unregister(&tc->aux);
+
+ tc_pxl_pll_dis(tc);
+
+ return 0;
+}
+
+static const struct i2c_device_id tc358767_i2c_ids[] = {
+ { "tc358767", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tc358767_i2c_ids);
+
+static const struct of_device_id tc358767_of_ids[] = {
+ { .compatible = "toshiba,tc358767", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tc358767_of_ids);
+
+static struct i2c_driver tc358767_driver = {
+ .driver = {
+ .name = "tc358767",
+ .of_match_table = tc358767_of_ids,
+ },
+ .id_table = tc358767_i2c_ids,
+ .probe = tc_probe,
+ .remove = tc_remove,
+};
+module_i2c_driver(tc358767_driver);
+
+MODULE_AUTHOR("Andrey Gusakov <andrey.gusakov@cogentembedded.com>");
+MODULE_DESCRIPTION("tc358767 eDP encoder driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/cirrus/Kconfig b/drivers/gpu/drm/cirrus/Kconfig
index 9864559e5..04b3c161d 100644
--- a/drivers/gpu/drm/cirrus/Kconfig
+++ b/drivers/gpu/drm/cirrus/Kconfig
@@ -1,11 +1,7 @@
config DRM_CIRRUS_QEMU
tristate "Cirrus driver for QEMU emulated device"
depends on DRM && PCI
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_TTM
help
This is a KMS driver for emulated cirrus device in qemu.
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index dc83f69da..b05f7eae3 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -142,7 +142,7 @@ static struct drm_driver driver = {
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
- .gem_free_object = cirrus_gem_free_object,
+ .gem_free_object_unlocked = cirrus_gem_free_object,
.dumb_create = cirrus_dumb_create,
.dumb_map_offset = cirrus_dumb_mmap_offset,
.dumb_destroy = drm_gem_dumb_destroy,
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index 32d32c5b7..76bcb43e7 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -17,8 +17,8 @@
static void cirrus_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct cirrus_framebuffer *cirrus_fb = to_cirrus_framebuffer(fb);
- if (cirrus_fb->obj)
- drm_gem_object_unreference_unlocked(cirrus_fb->obj);
+
+ drm_gem_object_unreference_unlocked(cirrus_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(fb);
}
@@ -185,14 +185,23 @@ int cirrus_driver_load(struct drm_device *dev, unsigned long flags)
goto out;
}
+ /*
+ * cirrus_modeset_init() is initializing/registering the emulated fbdev
+ * and DRM internals can access/test some of the fields in
+ * mode_config->funcs as part of the fbdev registration process.
+ * Make sure dev->mode_config.funcs is properly set to avoid
+ * dereferencing a NULL pointer.
+ * FIXME: mode_config.funcs assignment should probably be done in
+ * cirrus_modeset_init() (that's a common pattern seen in other DRM
+ * drivers).
+ */
+ dev->mode_config.funcs = &cirrus_mode_funcs;
r = cirrus_modeset_init(cdev);
if (r) {
dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
goto out;
}
- dev->mode_config.funcs = (void *)&cirrus_mode_funcs;
-
return 0;
out:
cirrus_driver_unload(dev);
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index d3d8d7bfc..17c915d9a 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -325,21 +325,20 @@ static void cirrus_crtc_commit(struct drm_crtc *crtc)
* use this for 8-bit mode so can't perform smooth fades on deeper modes,
* but it's a requirement that we provide the function
*/
-static void cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t start, uint32_t size)
+static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t size)
{
struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
int i;
- if (size != CIRRUS_LUT_SIZE)
- return;
-
- for (i = 0; i < CIRRUS_LUT_SIZE; i++) {
+ for (i = 0; i < size; i++) {
cirrus_crtc->lut_r[i] = red[i];
cirrus_crtc->lut_g[i] = green[i];
cirrus_crtc->lut_b[i] = blue[i];
}
cirrus_crtc_load_lut(crtc);
+
+ return 0;
}
/* Simple cleanup function */
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 6768b7b1a..1cc9ee607 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -186,17 +186,6 @@ static void cirrus_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
{
}
-static int cirrus_bo_move(struct ttm_buffer_object *bo,
- bool evict, bool interruptible,
- bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
-{
- int r;
- r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
- return r;
-}
-
-
static void cirrus_ttm_backend_destroy(struct ttm_tt *tt)
{
ttm_tt_fini(tt);
@@ -241,7 +230,7 @@ struct ttm_bo_driver cirrus_bo_driver = {
.ttm_tt_unpopulate = cirrus_ttm_tt_unpopulate,
.init_mem_type = cirrus_bo_init_mem_type,
.evict_flags = cirrus_bo_evict_flags,
- .move = cirrus_bo_move,
+ .move = NULL,
.verify_access = cirrus_bo_verify_access,
.io_mem_reserve = &cirrus_ttm_io_mem_reserve,
.io_mem_free = &cirrus_ttm_io_mem_free,
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 79a05a3bc..2a3ded44c 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -33,6 +33,20 @@
#include "drm_crtc_internal.h"
+static void crtc_commit_free(struct kref *kref)
+{
+ struct drm_crtc_commit *commit =
+ container_of(kref, struct drm_crtc_commit, ref);
+
+ kfree(commit);
+}
+
+void drm_crtc_commit_put(struct drm_crtc_commit *commit)
+{
+ kref_put(&commit->ref, crtc_commit_free);
+}
+EXPORT_SYMBOL(drm_crtc_commit_put);
+
/**
* drm_atomic_state_default_release -
* release memory initialized by drm_atomic_state_init
@@ -44,11 +58,8 @@
void drm_atomic_state_default_release(struct drm_atomic_state *state)
{
kfree(state->connectors);
- kfree(state->connector_states);
kfree(state->crtcs);
- kfree(state->crtc_states);
kfree(state->planes);
- kfree(state->plane_states);
}
EXPORT_SYMBOL(drm_atomic_state_default_release);
@@ -72,18 +83,10 @@ drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
sizeof(*state->crtcs), GFP_KERNEL);
if (!state->crtcs)
goto fail;
- state->crtc_states = kcalloc(dev->mode_config.num_crtc,
- sizeof(*state->crtc_states), GFP_KERNEL);
- if (!state->crtc_states)
- goto fail;
state->planes = kcalloc(dev->mode_config.num_total_plane,
sizeof(*state->planes), GFP_KERNEL);
if (!state->planes)
goto fail;
- state->plane_states = kcalloc(dev->mode_config.num_total_plane,
- sizeof(*state->plane_states), GFP_KERNEL);
- if (!state->plane_states)
- goto fail;
state->dev = dev;
@@ -139,40 +142,48 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state);
for (i = 0; i < state->num_connector; i++) {
- struct drm_connector *connector = state->connectors[i];
+ struct drm_connector *connector = state->connectors[i].ptr;
if (!connector)
continue;
connector->funcs->atomic_destroy_state(connector,
- state->connector_states[i]);
- state->connectors[i] = NULL;
- state->connector_states[i] = NULL;
+ state->connectors[i].state);
+ state->connectors[i].ptr = NULL;
+ state->connectors[i].state = NULL;
drm_connector_unreference(connector);
}
for (i = 0; i < config->num_crtc; i++) {
- struct drm_crtc *crtc = state->crtcs[i];
+ struct drm_crtc *crtc = state->crtcs[i].ptr;
if (!crtc)
continue;
crtc->funcs->atomic_destroy_state(crtc,
- state->crtc_states[i]);
- state->crtcs[i] = NULL;
- state->crtc_states[i] = NULL;
+ state->crtcs[i].state);
+
+ if (state->crtcs[i].commit) {
+ kfree(state->crtcs[i].commit->event);
+ state->crtcs[i].commit->event = NULL;
+ drm_crtc_commit_put(state->crtcs[i].commit);
+ }
+
+ state->crtcs[i].commit = NULL;
+ state->crtcs[i].ptr = NULL;
+ state->crtcs[i].state = NULL;
}
for (i = 0; i < config->num_total_plane; i++) {
- struct drm_plane *plane = state->planes[i];
+ struct drm_plane *plane = state->planes[i].ptr;
if (!plane)
continue;
plane->funcs->atomic_destroy_state(plane,
- state->plane_states[i]);
- state->planes[i] = NULL;
- state->plane_states[i] = NULL;
+ state->planes[i].state);
+ state->planes[i].ptr = NULL;
+ state->planes[i].state = NULL;
}
}
EXPORT_SYMBOL(drm_atomic_state_default_clear);
@@ -270,8 +281,8 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
if (!crtc_state)
return ERR_PTR(-ENOMEM);
- state->crtc_states[index] = crtc_state;
- state->crtcs[index] = crtc;
+ state->crtcs[index].state = crtc_state;
+ state->crtcs[index].ptr = crtc;
crtc_state->state = state;
DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n",
@@ -393,8 +404,7 @@ drm_atomic_replace_property_blob(struct drm_property_blob **blob,
if (old_blob == new_blob)
return;
- if (old_blob)
- drm_property_unreference_blob(old_blob);
+ drm_property_unreference_blob(old_blob);
if (new_blob)
drm_property_reference_blob(new_blob);
*blob = new_blob;
@@ -632,8 +642,8 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
if (!plane_state)
return ERR_PTR(-ENOMEM);
- state->plane_states[index] = plane_state;
- state->planes[index] = plane;
+ state->planes[index].state = plane_state;
+ state->planes[index].ptr = plane;
plane_state->state = state;
DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n",
@@ -701,6 +711,8 @@ int drm_atomic_plane_set_property(struct drm_plane *plane,
state->src_h = val;
} else if (property == config->rotation_property) {
state->rotation = val;
+ } else if (property == plane->zpos_property) {
+ state->zpos = val;
} else if (plane->funcs->atomic_set_property) {
return plane->funcs->atomic_set_property(plane, state,
property, val);
@@ -757,6 +769,8 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
*val = state->src_h;
} else if (property == config->rotation_property) {
*val = state->rotation;
+ } else if (property == plane->zpos_property) {
+ *val = state->zpos;
} else if (plane->funcs->atomic_get_property) {
return plane->funcs->atomic_get_property(plane, state, property, val);
} else {
@@ -897,8 +911,7 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
index = drm_connector_index(connector);
if (index >= state->num_connector) {
- struct drm_connector **c;
- struct drm_connector_state **cs;
+ struct __drm_connnectors_state *c;
int alloc = max(index + 1, config->num_connector);
c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL);
@@ -909,26 +922,19 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
memset(&state->connectors[state->num_connector], 0,
sizeof(*state->connectors) * (alloc - state->num_connector));
- cs = krealloc(state->connector_states, alloc * sizeof(*state->connector_states), GFP_KERNEL);
- if (!cs)
- return ERR_PTR(-ENOMEM);
-
- state->connector_states = cs;
- memset(&state->connector_states[state->num_connector], 0,
- sizeof(*state->connector_states) * (alloc - state->num_connector));
state->num_connector = alloc;
}
- if (state->connector_states[index])
- return state->connector_states[index];
+ if (state->connectors[index].state)
+ return state->connectors[index].state;
connector_state = connector->funcs->atomic_duplicate_state(connector);
if (!connector_state)
return ERR_PTR(-ENOMEM);
drm_connector_reference(connector);
- state->connector_states[index] = connector_state;
- state->connectors[index] = connector;
+ state->connectors[index].state = connector_state;
+ state->connectors[index].ptr = connector;
connector_state->state = state;
DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d] %p state to %p\n",
@@ -1457,7 +1463,8 @@ EXPORT_SYMBOL(drm_atomic_nonblocking_commit);
*/
static struct drm_pending_vblank_event *create_vblank_event(
- struct drm_device *dev, struct drm_file *file_priv, uint64_t user_data)
+ struct drm_device *dev, struct drm_file *file_priv,
+ struct fence *fence, uint64_t user_data)
{
struct drm_pending_vblank_event *e = NULL;
int ret;
@@ -1470,12 +1477,17 @@ static struct drm_pending_vblank_event *create_vblank_event(
e->event.base.length = sizeof(e->event);
e->event.user_data = user_data;
- ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base);
- if (ret) {
- kfree(e);
- return NULL;
+ if (file_priv) {
+ ret = drm_event_reserve_init(dev, file_priv, &e->base,
+ &e->event.base);
+ if (ret) {
+ kfree(e);
+ return NULL;
+ }
}
+ e->base.fence = fence;
+
return e;
}
@@ -1715,7 +1727,8 @@ retry:
for_each_crtc_in_state(state, crtc, crtc_state, i) {
struct drm_pending_vblank_event *e;
- e = create_vblank_event(dev, file_priv, arg->user_data);
+ e = create_vblank_event(dev, file_priv, NULL,
+ arg->user_data);
if (!e) {
ret = -ENOMEM;
goto out;
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index ddfa0d120..20be86d89 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -32,6 +32,8 @@
#include <drm/drm_atomic_helper.h>
#include <linux/fence.h>
+#include "drm_crtc_internal.h"
+
/**
* DOC: overview
*
@@ -110,8 +112,10 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state,
if (funcs->atomic_best_encoder)
new_encoder = funcs->atomic_best_encoder(connector, conn_state);
- else
+ else if (funcs->best_encoder)
new_encoder = funcs->best_encoder(connector);
+ else
+ new_encoder = drm_atomic_helper_best_encoder(connector);
if (new_encoder) {
if (encoder_mask & (1 << drm_encoder_index(new_encoder))) {
@@ -298,8 +302,10 @@ update_connector_routing(struct drm_atomic_state *state,
if (funcs->atomic_best_encoder)
new_encoder = funcs->atomic_best_encoder(connector,
connector_state);
- else
+ else if (funcs->best_encoder)
new_encoder = funcs->best_encoder(connector);
+ else
+ new_encoder = drm_atomic_helper_best_encoder(connector);
if (!new_encoder) {
DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n",
@@ -414,6 +420,9 @@ mode_fixup(struct drm_atomic_state *state)
for_each_crtc_in_state(state, crtc, crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
+ if (!crtc_state->enable)
+ continue;
+
if (!crtc_state->mode_changed &&
!crtc_state->connectors_changed)
continue;
@@ -458,7 +467,7 @@ mode_fixup(struct drm_atomic_state *state)
* times for the same update, e.g. when the ->atomic_check functions depend upon
* the adjusted dotclock for fifo space allocation and watermark computation.
*
- * RETURNS
+ * RETURNS:
* Zero for success or -errno
*/
int
@@ -572,7 +581,7 @@ EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
* It also sets crtc_state->planes_changed to indicate that a crtc has
* updated planes.
*
- * RETURNS
+ * RETURNS:
* Zero for success or -errno
*/
int
@@ -585,6 +594,10 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
struct drm_plane_state *plane_state;
int i, ret = 0;
+ ret = drm_atomic_helper_normalize_zpos(dev, state);
+ if (ret)
+ return ret;
+
for_each_plane_in_state(state, plane, plane_state, i) {
const struct drm_plane_helper_funcs *funcs;
@@ -611,7 +624,7 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
if (!funcs || !funcs->atomic_check)
continue;
- ret = funcs->atomic_check(crtc, state->crtc_states[i]);
+ ret = funcs->atomic_check(crtc, crtc_state);
if (ret) {
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n",
crtc->base.id, crtc->name);
@@ -640,7 +653,7 @@ EXPORT_SYMBOL(drm_atomic_helper_check_planes);
* ->atomic_check functions depend upon an updated adjusted_mode.clock to
* e.g. properly compute watermarks.
*
- * RETURNS
+ * RETURNS:
* Zero for success or -errno
*/
int drm_atomic_helper_check(struct drm_device *dev,
@@ -1113,22 +1126,17 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
/**
- * drm_atomic_helper_commit - commit validated state object
- * @dev: DRM device
- * @state: the driver state object
- * @nonblocking: whether nonblocking behavior is requested.
+ * drm_atomic_helper_commit_tail - commit atomic update to hardware
+ * @state: new modeset state to be committed
*
- * This function commits a with drm_atomic_helper_check() pre-validated state
- * object. This can still fail when e.g. the framebuffer reservation fails. For
- * now this doesn't implement nonblocking commits.
+ * This is the default implemenation for the ->atomic_commit_tail() hook of the
+ * &drm_mode_config_helper_funcs vtable.
*
- * Note that right now this function does not support nonblocking commits, hence
- * driver writers must implement their own version for now. Also note that the
- * default ordering of how the various stages are called is to match the legacy
- * modeset helper library closest. One peculiarity of that is that it doesn't
- * mesh well with runtime PM at all.
+ * Note that the default ordering of how the various stages are called is to
+ * match the legacy modeset helper library closest. One peculiarity of that is
+ * that it doesn't mesh well with runtime PM at all.
*
- * For drivers supporting runtime PM the recommended sequence is
+ * For drivers supporting runtime PM the recommended sequence is instead ::
*
* drm_atomic_helper_commit_modeset_disables(dev, state);
*
@@ -1136,9 +1144,75 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
*
* drm_atomic_helper_commit_planes(dev, state, true);
*
- * See the kerneldoc entries for these three functions for more details.
+ * for committing the atomic update to hardware. See the kerneldoc entries for
+ * these three functions for more details.
+ */
+void drm_atomic_helper_commit_tail(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+
+ drm_atomic_helper_commit_modeset_disables(dev, state);
+
+ drm_atomic_helper_commit_planes(dev, state, false);
+
+ drm_atomic_helper_commit_modeset_enables(dev, state);
+
+ drm_atomic_helper_commit_hw_done(state);
+
+ drm_atomic_helper_wait_for_vblanks(dev, state);
+
+ drm_atomic_helper_cleanup_planes(dev, state);
+}
+EXPORT_SYMBOL(drm_atomic_helper_commit_tail);
+
+static void commit_tail(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct drm_mode_config_helper_funcs *funcs;
+
+ funcs = dev->mode_config.helper_private;
+
+ drm_atomic_helper_wait_for_fences(dev, state);
+
+ drm_atomic_helper_wait_for_dependencies(state);
+
+ if (funcs && funcs->atomic_commit_tail)
+ funcs->atomic_commit_tail(state);
+ else
+ drm_atomic_helper_commit_tail(state);
+
+ drm_atomic_helper_commit_cleanup_done(state);
+
+ drm_atomic_state_free(state);
+}
+
+static void commit_work(struct work_struct *work)
+{
+ struct drm_atomic_state *state = container_of(work,
+ struct drm_atomic_state,
+ commit_work);
+ commit_tail(state);
+}
+
+/**
+ * drm_atomic_helper_commit - commit validated state object
+ * @dev: DRM device
+ * @state: the driver state object
+ * @nonblock: whether nonblocking behavior is requested.
+ *
+ * This function commits a with drm_atomic_helper_check() pre-validated state
+ * object. This can still fail when e.g. the framebuffer reservation fails. This
+ * function implements nonblocking commits, using
+ * drm_atomic_helper_setup_commit() and related functions.
+ *
+ * Note that right now this function does not support nonblocking commits, hence
+ * driver writers must implement their own version for now.
+ *
+ * Committing the actual hardware state is done through the
+ * ->atomic_commit_tail() callback of the &drm_mode_config_helper_funcs vtable,
+ * or it's default implementation drm_atomic_helper_commit_tail().
*
- * RETURNS
+ * RETURNS:
* Zero for success or -errno.
*/
int drm_atomic_helper_commit(struct drm_device *dev,
@@ -1147,8 +1221,11 @@ int drm_atomic_helper_commit(struct drm_device *dev,
{
int ret;
- if (nonblock)
- return -EBUSY;
+ ret = drm_atomic_helper_setup_commit(state, nonblock);
+ if (ret)
+ return ret;
+
+ INIT_WORK(&state->commit_work, commit_work);
ret = drm_atomic_helper_prepare_planes(dev, state);
if (ret)
@@ -1160,7 +1237,7 @@ int drm_atomic_helper_commit(struct drm_device *dev,
* the software side now.
*/
- drm_atomic_helper_swap_state(dev, state);
+ drm_atomic_helper_swap_state(state, true);
/*
* Everything below can be run asynchronously without the need to grab
@@ -1176,21 +1253,16 @@ int drm_atomic_helper_commit(struct drm_device *dev,
* update. Which is important since compositors need to figure out the
* composition of the next frame right after having submitted the
* current layout.
+ *
+ * NOTE: Commit work has multiple phases, first hardware commit, then
+ * cleanup. We want them to overlap, hence need system_unbound_wq to
+ * make sure work items don't artifically stall on each another.
*/
- drm_atomic_helper_wait_for_fences(dev, state);
-
- drm_atomic_helper_commit_modeset_disables(dev, state);
-
- drm_atomic_helper_commit_planes(dev, state, false);
-
- drm_atomic_helper_commit_modeset_enables(dev, state);
-
- drm_atomic_helper_wait_for_vblanks(dev, state);
-
- drm_atomic_helper_cleanup_planes(dev, state);
-
- drm_atomic_state_free(state);
+ if (nonblock)
+ queue_work(system_unbound_wq, &state->commit_work);
+ else
+ commit_tail(state);
return 0;
}
@@ -1199,12 +1271,7 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
/**
* DOC: implementing nonblocking commit
*
- * For now the atomic helpers don't support nonblocking commit directly. If
- * there is real need it could be added though, using the dma-buf fence
- * infrastructure for generic synchronization with outstanding rendering.
- *
- * For now drivers have to implement nonblocking commit themselves, with the
- * following sequence being the recommended one:
+ * Nonblocking atomic commits have to be implemented in the following sequence:
*
* 1. Run drm_atomic_helper_prepare_planes() first. This is the only function
* which commit needs to call which can fail, so we want to run it first and
@@ -1216,10 +1283,14 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
* cancelled updates. Note that it is important to ensure that the framebuffer
* cleanup is still done when cancelling.
*
- * For sufficient parallelism it is recommended to have a work item per crtc
- * (for updates which don't touch global state) and a global one. Then we only
- * need to synchronize with the crtc work items for changed crtcs and the global
- * work item, which allows nice concurrent updates on disjoint sets of crtcs.
+ * Asynchronous workers need to have sufficient parallelism to be able to run
+ * different atomic commits on different CRTCs in parallel. The simplest way to
+ * achive this is by running them on the &system_unbound_wq work queue. Note
+ * that drivers are not required to split up atomic commits and run an
+ * individual commit in parallel - userspace is supposed to do that if it cares.
+ * But it might be beneficial to do that for modesets, since those necessarily
+ * must be done as one global operation, and enabling or disabling a CRTC can
+ * take a long time. But even that is not required.
*
* 3. The software state is updated synchronously with
* drm_atomic_helper_swap_state(). Doing this under the protection of all modeset
@@ -1232,8 +1303,310 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
* commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and
* then cleaning up the framebuffers after the old framebuffer is no longer
* being displayed.
+ *
+ * The above scheme is implemented in the atomic helper libraries in
+ * drm_atomic_helper_commit() using a bunch of helper functions. See
+ * drm_atomic_helper_setup_commit() for a starting point.
*/
+static int stall_checks(struct drm_crtc *crtc, bool nonblock)
+{
+ struct drm_crtc_commit *commit, *stall_commit = NULL;
+ bool completed = true;
+ int i;
+ long ret = 0;
+
+ spin_lock(&crtc->commit_lock);
+ i = 0;
+ list_for_each_entry(commit, &crtc->commit_list, commit_entry) {
+ if (i == 0) {
+ completed = try_wait_for_completion(&commit->flip_done);
+ /* Userspace is not allowed to get ahead of the previous
+ * commit with nonblocking ones. */
+ if (!completed && nonblock) {
+ spin_unlock(&crtc->commit_lock);
+ return -EBUSY;
+ }
+ } else if (i == 1) {
+ stall_commit = commit;
+ drm_crtc_commit_get(stall_commit);
+ break;
+ }
+
+ i++;
+ }
+ spin_unlock(&crtc->commit_lock);
+
+ if (!stall_commit)
+ return 0;
+
+ /* We don't want to let commits get ahead of cleanup work too much,
+ * stalling on 2nd previous commit means triple-buffer won't ever stall.
+ */
+ ret = wait_for_completion_interruptible_timeout(&stall_commit->cleanup_done,
+ 10*HZ);
+ if (ret == 0)
+ DRM_ERROR("[CRTC:%d:%s] cleanup_done timed out\n",
+ crtc->base.id, crtc->name);
+
+ drm_crtc_commit_put(stall_commit);
+
+ return ret < 0 ? ret : 0;
+}
+
+/**
+ * drm_atomic_helper_setup_commit - setup possibly nonblocking commit
+ * @state: new modeset state to be committed
+ * @nonblock: whether nonblocking behavior is requested.
+ *
+ * This function prepares @state to be used by the atomic helper's support for
+ * nonblocking commits. Drivers using the nonblocking commit infrastructure
+ * should always call this function from their ->atomic_commit hook.
+ *
+ * To be able to use this support drivers need to use a few more helper
+ * functions. drm_atomic_helper_wait_for_dependencies() must be called before
+ * actually committing the hardware state, and for nonblocking commits this call
+ * must be placed in the async worker. See also drm_atomic_helper_swap_state()
+ * and it's stall parameter, for when a driver's commit hooks look at the
+ * ->state pointers of struct &drm_crtc, &drm_plane or &drm_connector directly.
+ *
+ * Completion of the hardware commit step must be signalled using
+ * drm_atomic_helper_commit_hw_done(). After this step the driver is not allowed
+ * to read or change any permanent software or hardware modeset state. The only
+ * exception is state protected by other means than &drm_modeset_lock locks.
+ * Only the free standing @state with pointers to the old state structures can
+ * be inspected, e.g. to clean up old buffers using
+ * drm_atomic_helper_cleanup_planes().
+ *
+ * At the very end, before cleaning up @state drivers must call
+ * drm_atomic_helper_commit_cleanup_done().
+ *
+ * This is all implemented by in drm_atomic_helper_commit(), giving drivers a
+ * complete and esay-to-use default implementation of the atomic_commit() hook.
+ *
+ * The tracking of asynchronously executed and still pending commits is done
+ * using the core structure &drm_crtc_commit.
+ *
+ * By default there's no need to clean up resources allocated by this function
+ * explicitly: drm_atomic_state_default_clear() will take care of that
+ * automatically.
+ *
+ * Returns:
+ *
+ * 0 on success. -EBUSY when userspace schedules nonblocking commits too fast,
+ * -ENOMEM on allocation failures and -EINTR when a signal is pending.
+ */
+int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
+ bool nonblock)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc_commit *commit;
+ int i, ret;
+
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ commit = kzalloc(sizeof(*commit), GFP_KERNEL);
+ if (!commit)
+ return -ENOMEM;
+
+ init_completion(&commit->flip_done);
+ init_completion(&commit->hw_done);
+ init_completion(&commit->cleanup_done);
+ INIT_LIST_HEAD(&commit->commit_entry);
+ kref_init(&commit->ref);
+ commit->crtc = crtc;
+
+ state->crtcs[i].commit = commit;
+
+ ret = stall_checks(crtc, nonblock);
+ if (ret)
+ return ret;
+
+ /* Drivers only send out events when at least either current or
+ * new CRTC state is active. Complete right away if everything
+ * stays off. */
+ if (!crtc->state->active && !crtc_state->active) {
+ complete_all(&commit->flip_done);
+ continue;
+ }
+
+ /* Legacy cursor updates are fully unsynced. */
+ if (state->legacy_cursor_update) {
+ complete_all(&commit->flip_done);
+ continue;
+ }
+
+ if (!crtc_state->event) {
+ commit->event = kzalloc(sizeof(*commit->event),
+ GFP_KERNEL);
+ if (!commit->event)
+ return -ENOMEM;
+
+ crtc_state->event = commit->event;
+ }
+
+ crtc_state->event->base.completion = &commit->flip_done;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_atomic_helper_setup_commit);
+
+
+static struct drm_crtc_commit *preceeding_commit(struct drm_crtc *crtc)
+{
+ struct drm_crtc_commit *commit;
+ int i = 0;
+
+ list_for_each_entry(commit, &crtc->commit_list, commit_entry) {
+ /* skip the first entry, that's the current commit */
+ if (i == 1)
+ return commit;
+ i++;
+ }
+
+ return NULL;
+}
+
+/**
+ * drm_atomic_helper_wait_for_dependencies - wait for required preceeding commits
+ * @state: new modeset state to be committed
+ *
+ * This function waits for all preceeding commits that touch the same CRTC as
+ * @state to both be committed to the hardware (as signalled by
+ * drm_atomic_helper_commit_hw_done) and executed by the hardware (as signalled
+ * by calling drm_crtc_vblank_send_event on the event member of
+ * &drm_crtc_state).
+ *
+ * This is part of the atomic helper support for nonblocking commits, see
+ * drm_atomic_helper_setup_commit() for an overview.
+ */
+void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc_commit *commit;
+ int i;
+ long ret;
+
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ spin_lock(&crtc->commit_lock);
+ commit = preceeding_commit(crtc);
+ if (commit)
+ drm_crtc_commit_get(commit);
+ spin_unlock(&crtc->commit_lock);
+
+ if (!commit)
+ continue;
+
+ ret = wait_for_completion_timeout(&commit->hw_done,
+ 10*HZ);
+ if (ret == 0)
+ DRM_ERROR("[CRTC:%d:%s] hw_done timed out\n",
+ crtc->base.id, crtc->name);
+
+ /* Currently no support for overwriting flips, hence
+ * stall for previous one to execute completely. */
+ ret = wait_for_completion_timeout(&commit->flip_done,
+ 10*HZ);
+ if (ret == 0)
+ DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
+ crtc->base.id, crtc->name);
+
+ drm_crtc_commit_put(commit);
+ }
+}
+EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
+
+/**
+ * drm_atomic_helper_commit_hw_done - setup possible nonblocking commit
+ * @state: new modeset state to be committed
+ *
+ * This function is used to signal completion of the hardware commit step. After
+ * this step the driver is not allowed to read or change any permanent software
+ * or hardware modeset state. The only exception is state protected by other
+ * means than &drm_modeset_lock locks.
+ *
+ * Drivers should try to postpone any expensive or delayed cleanup work after
+ * this function is called.
+ *
+ * This is part of the atomic helper support for nonblocking commits, see
+ * drm_atomic_helper_setup_commit() for an overview.
+ */
+void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc_commit *commit;
+ int i;
+
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ commit = state->crtcs[i].commit;
+ if (!commit)
+ continue;
+
+ /* backend must have consumed any event by now */
+ WARN_ON(crtc->state->event);
+ spin_lock(&crtc->commit_lock);
+ complete_all(&commit->hw_done);
+ spin_unlock(&crtc->commit_lock);
+ }
+}
+EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done);
+
+/**
+ * drm_atomic_helper_commit_cleanup_done - signal completion of commit
+ * @state: new modeset state to be committed
+ *
+ * This signals completion of the atomic update @state, including any cleanup
+ * work. If used, it must be called right before calling
+ * drm_atomic_state_free().
+ *
+ * This is part of the atomic helper support for nonblocking commits, see
+ * drm_atomic_helper_setup_commit() for an overview.
+ */
+void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc_commit *commit;
+ int i;
+ long ret;
+
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ commit = state->crtcs[i].commit;
+ if (WARN_ON(!commit))
+ continue;
+
+ spin_lock(&crtc->commit_lock);
+ complete_all(&commit->cleanup_done);
+ WARN_ON(!try_wait_for_completion(&commit->hw_done));
+
+ /* commit_list borrows our reference, need to remove before we
+ * clean up our drm_atomic_state. But only after it actually
+ * completed, otherwise subsequent commits won't stall properly. */
+ if (try_wait_for_completion(&commit->flip_done))
+ goto del_commit;
+
+ spin_unlock(&crtc->commit_lock);
+
+ /* We must wait for the vblank event to signal our completion
+ * before releasing our reference, since the vblank work does
+ * not hold a reference of its own. */
+ ret = wait_for_completion_timeout(&commit->flip_done,
+ 10*HZ);
+ if (ret == 0)
+ DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
+ crtc->base.id, crtc->name);
+
+ spin_lock(&crtc->commit_lock);
+del_commit:
+ list_del(&commit->commit_entry);
+ spin_unlock(&crtc->commit_lock);
+ }
+}
+EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done);
+
/**
* drm_atomic_helper_prepare_planes - prepare plane resources before commit
* @dev: DRM device
@@ -1249,16 +1622,12 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
int drm_atomic_helper_prepare_planes(struct drm_device *dev,
struct drm_atomic_state *state)
{
- int nplanes = dev->mode_config.num_total_plane;
- int ret, i;
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
+ int ret, i, j;
- for (i = 0; i < nplanes; i++) {
+ for_each_plane_in_state(state, plane, plane_state, i) {
const struct drm_plane_helper_funcs *funcs;
- struct drm_plane *plane = state->planes[i];
- struct drm_plane_state *plane_state = state->plane_states[i];
-
- if (!plane)
- continue;
funcs = plane->helper_private;
@@ -1272,12 +1641,10 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
return 0;
fail:
- for (i--; i >= 0; i--) {
+ for_each_plane_in_state(state, plane, plane_state, j) {
const struct drm_plane_helper_funcs *funcs;
- struct drm_plane *plane = state->planes[i];
- struct drm_plane_state *plane_state = state->plane_states[i];
- if (!plane)
+ if (j >= i)
continue;
funcs = plane->helper_private;
@@ -1537,8 +1904,8 @@ EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
/**
* drm_atomic_helper_swap_state - store atomic state into current sw state
- * @dev: DRM device
* @state: atomic state
+ * @stall: stall for proceeding commits
*
* This function stores the atomic state into the current state pointers in all
* driver objects. It should be called after all failing steps have been done
@@ -1559,42 +1926,70 @@ EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
*
* 5. Call drm_atomic_helper_cleanup_planes() with @state, which since step 3
* contains the old state. Also do any other cleanup required with that state.
+ *
+ * @stall must be set when nonblocking commits for this driver directly access
+ * the ->state pointer of &drm_plane, &drm_crtc or &drm_connector. With the
+ * current atomic helpers this is almost always the case, since the helpers
+ * don't pass the right state structures to the callbacks.
*/
-void drm_atomic_helper_swap_state(struct drm_device *dev,
- struct drm_atomic_state *state)
+void drm_atomic_helper_swap_state(struct drm_atomic_state *state,
+ bool stall)
{
int i;
+ long ret;
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
+ struct drm_crtc_commit *commit;
+
+ if (stall) {
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ spin_lock(&crtc->commit_lock);
+ commit = list_first_entry_or_null(&crtc->commit_list,
+ struct drm_crtc_commit, commit_entry);
+ if (commit)
+ drm_crtc_commit_get(commit);
+ spin_unlock(&crtc->commit_lock);
+
+ if (!commit)
+ continue;
- for (i = 0; i < state->num_connector; i++) {
- struct drm_connector *connector = state->connectors[i];
-
- if (!connector)
- continue;
+ ret = wait_for_completion_timeout(&commit->hw_done,
+ 10*HZ);
+ if (ret == 0)
+ DRM_ERROR("[CRTC:%d:%s] hw_done timed out\n",
+ crtc->base.id, crtc->name);
+ drm_crtc_commit_put(commit);
+ }
+ }
+ for_each_connector_in_state(state, connector, conn_state, i) {
connector->state->state = state;
- swap(state->connector_states[i], connector->state);
+ swap(state->connectors[i].state, connector->state);
connector->state->state = NULL;
}
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct drm_crtc *crtc = state->crtcs[i];
-
- if (!crtc)
- continue;
-
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
crtc->state->state = state;
- swap(state->crtc_states[i], crtc->state);
+ swap(state->crtcs[i].state, crtc->state);
crtc->state->state = NULL;
- }
- for (i = 0; i < dev->mode_config.num_total_plane; i++) {
- struct drm_plane *plane = state->planes[i];
+ if (state->crtcs[i].commit) {
+ spin_lock(&crtc->commit_lock);
+ list_add(&state->crtcs[i].commit->commit_entry,
+ &crtc->commit_list);
+ spin_unlock(&crtc->commit_lock);
- if (!plane)
- continue;
+ state->crtcs[i].commit->event = NULL;
+ }
+ }
+ for_each_plane_in_state(state, plane, plane_state, i) {
plane->state->state = state;
- swap(state->plane_states[i], plane->state);
+ swap(state->planes[i].state, plane->state);
plane->state->state = NULL;
}
}
@@ -2409,7 +2804,7 @@ EXPORT_SYMBOL(drm_atomic_helper_page_flip);
* This is the main helper function provided by the atomic helper framework for
* implementing the legacy DPMS connector interface. It computes the new desired
* ->active state for the corresponding CRTC (if the connector is enabled) and
- * updates it.
+ * updates it.
*
* Returns:
* Returns 0 on success, negative errno numbers on failure.
@@ -2566,6 +2961,7 @@ void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
state->planes_changed = false;
state->connectors_changed = false;
state->color_mgmt_changed = false;
+ state->zpos_changed = false;
state->event = NULL;
}
EXPORT_SYMBOL(__drm_atomic_helper_crtc_duplicate_state);
@@ -2930,16 +3326,15 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state);
* @red: red correction table
* @green: green correction table
* @blue: green correction table
- * @start:
* @size: size of the tables
*
* Implements support for legacy gamma correction table for drivers
* that support color management through the DEGAMMA_LUT/GAMMA_LUT
* properties.
*/
-void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
- u16 *red, u16 *green, u16 *blue,
- uint32_t start, uint32_t size)
+int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
+ u16 *red, u16 *green, u16 *blue,
+ uint32_t size)
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *config = &dev->mode_config;
@@ -2951,7 +3346,7 @@ void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
state = drm_atomic_state_alloc(crtc->dev);
if (!state)
- return;
+ return -ENOMEM;
blob = drm_property_create_blob(dev,
sizeof(struct drm_color_lut) * size,
@@ -3002,7 +3397,7 @@ retry:
drm_property_unreference_blob(blob);
- return;
+ return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
@@ -3010,7 +3405,7 @@ fail:
drm_atomic_state_free(state);
drm_property_unreference_blob(blob);
- return;
+ return ret;
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index 50d0baa06..4153e8a19 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -30,25 +30,36 @@
#include <drm/drmP.h>
#include "drm_internal.h"
+#include "drm_legacy.h"
/**
- * drm_getmagic - Get unique magic of a client
- * @dev: DRM device to operate on
- * @data: ioctl data containing the drm_auth object
- * @file_priv: DRM file that performs the operation
+ * DOC: master and authentication
*
- * This looks up the unique magic of the passed client and returns it. If the
- * client did not have a magic assigned, yet, a new one is registered. The magic
- * is stored in the passed drm_auth object.
+ * struct &drm_master is used to track groups of clients with open
+ * primary/legacy device nodes. For every struct &drm_file which has had at
+ * least once successfully became the device master (either through the
+ * SET_MASTER IOCTL, or implicitly through opening the primary device node when
+ * no one else is the current master that time) there exists one &drm_master.
+ * This is noted in the is_master member of &drm_file. All other clients have
+ * just a pointer to the &drm_master they are associated with.
*
- * Returns: 0 on success, negative error code on failure.
+ * In addition only one &drm_master can be the current master for a &drm_device.
+ * It can be switched through the DROP_MASTER and SET_MASTER IOCTL, or
+ * implicitly through closing/openeing the primary device node. See also
+ * drm_is_current_master().
+ *
+ * Clients can authenticate against the current master (if it matches their own)
+ * using the GETMAGIC and AUTHMAGIC IOCTLs. Together with exchanging masters,
+ * this allows controlled access to the device for an entire group of mutually
+ * trusted clients.
*/
+
int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_auth *auth = data;
int ret = 0;
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&dev->master_mutex);
if (!file_priv->magic) {
ret = idr_alloc(&file_priv->master->magic_map, file_priv,
1, 0, GFP_KERNEL);
@@ -56,23 +67,13 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
file_priv->magic = ret;
}
auth->magic = file_priv->magic;
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->master_mutex);
DRM_DEBUG("%u\n", auth->magic);
return ret < 0 ? ret : 0;
}
-/**
- * drm_authmagic - Authenticate client with a magic
- * @dev: DRM device to operate on
- * @data: ioctl data containing the drm_auth object
- * @file_priv: DRM file that performs the operation
- *
- * This looks up a DRM client by the passed magic and authenticates it.
- *
- * Returns: 0 on success, negative error code on failure.
- */
int drm_authmagic(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -81,13 +82,253 @@ int drm_authmagic(struct drm_device *dev, void *data,
DRM_DEBUG("%u\n", auth->magic);
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&dev->master_mutex);
file = idr_find(&file_priv->master->magic_map, auth->magic);
if (file) {
file->authenticated = 1;
idr_replace(&file_priv->master->magic_map, NULL, auth->magic);
}
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->master_mutex);
return file ? 0 : -EINVAL;
}
+
+static struct drm_master *drm_master_create(struct drm_device *dev)
+{
+ struct drm_master *master;
+
+ master = kzalloc(sizeof(*master), GFP_KERNEL);
+ if (!master)
+ return NULL;
+
+ kref_init(&master->refcount);
+ spin_lock_init(&master->lock.spinlock);
+ init_waitqueue_head(&master->lock.lock_queue);
+ idr_init(&master->magic_map);
+ master->dev = dev;
+
+ return master;
+}
+
+static int drm_set_master(struct drm_device *dev, struct drm_file *fpriv,
+ bool new_master)
+{
+ int ret = 0;
+
+ dev->master = drm_master_get(fpriv->master);
+ if (dev->driver->master_set) {
+ ret = dev->driver->master_set(dev, fpriv, new_master);
+ if (unlikely(ret != 0)) {
+ drm_master_put(&dev->master);
+ }
+ }
+
+ return ret;
+}
+
+static int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv)
+{
+ struct drm_master *old_master;
+ int ret;
+
+ lockdep_assert_held_once(&dev->master_mutex);
+
+ old_master = fpriv->master;
+ fpriv->master = drm_master_create(dev);
+ if (!fpriv->master) {
+ fpriv->master = old_master;
+ return -ENOMEM;
+ }
+
+ if (dev->driver->master_create) {
+ ret = dev->driver->master_create(dev, fpriv->master);
+ if (ret)
+ goto out_err;
+ }
+ fpriv->is_master = 1;
+ fpriv->authenticated = 1;
+
+ ret = drm_set_master(dev, fpriv, true);
+ if (ret)
+ goto out_err;
+
+ if (old_master)
+ drm_master_put(&old_master);
+
+ return 0;
+
+out_err:
+ /* drop references and restore old master on failure */
+ drm_master_put(&fpriv->master);
+ fpriv->master = old_master;
+
+ return ret;
+}
+
+int drm_setmaster_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ int ret = 0;
+
+ mutex_lock(&dev->master_mutex);
+ if (drm_is_current_master(file_priv))
+ goto out_unlock;
+
+ if (dev->master) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (!file_priv->master) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (!file_priv->is_master) {
+ ret = drm_new_set_master(dev, file_priv);
+ goto out_unlock;
+ }
+
+ ret = drm_set_master(dev, file_priv, false);
+out_unlock:
+ mutex_unlock(&dev->master_mutex);
+ return ret;
+}
+
+static void drm_drop_master(struct drm_device *dev,
+ struct drm_file *fpriv)
+{
+ if (dev->driver->master_drop)
+ dev->driver->master_drop(dev, fpriv);
+ drm_master_put(&dev->master);
+}
+
+int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ int ret = -EINVAL;
+
+ mutex_lock(&dev->master_mutex);
+ if (!drm_is_current_master(file_priv))
+ goto out_unlock;
+
+ if (!dev->master)
+ goto out_unlock;
+
+ ret = 0;
+ drm_drop_master(dev, file_priv);
+out_unlock:
+ mutex_unlock(&dev->master_mutex);
+ return ret;
+}
+
+int drm_master_open(struct drm_file *file_priv)
+{
+ struct drm_device *dev = file_priv->minor->dev;
+ int ret = 0;
+
+ /* if there is no current master make this fd it, but do not create
+ * any master object for render clients */
+ mutex_lock(&dev->master_mutex);
+ if (!dev->master)
+ ret = drm_new_set_master(dev, file_priv);
+ else
+ file_priv->master = drm_master_get(dev->master);
+ mutex_unlock(&dev->master_mutex);
+
+ return ret;
+}
+
+void drm_master_release(struct drm_file *file_priv)
+{
+ struct drm_device *dev = file_priv->minor->dev;
+ struct drm_master *master = file_priv->master;
+
+ mutex_lock(&dev->master_mutex);
+ if (file_priv->magic)
+ idr_remove(&file_priv->master->magic_map, file_priv->magic);
+
+ if (!drm_is_current_master(file_priv))
+ goto out;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /*
+ * Since the master is disappearing, so is the
+ * possibility to lock.
+ */
+ mutex_lock(&dev->struct_mutex);
+ if (master->lock.hw_lock) {
+ if (dev->sigdata.lock == master->lock.hw_lock)
+ dev->sigdata.lock = NULL;
+ master->lock.hw_lock = NULL;
+ master->lock.file_priv = NULL;
+ wake_up_interruptible_all(&master->lock.lock_queue);
+ }
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ if (dev->master == file_priv->master)
+ drm_drop_master(dev, file_priv);
+out:
+ /* drop the master reference held by the file priv */
+ if (file_priv->master)
+ drm_master_put(&file_priv->master);
+ mutex_unlock(&dev->master_mutex);
+}
+
+/**
+ * drm_is_current_master - checks whether @priv is the current master
+ * @fpriv: DRM file private
+ *
+ * Checks whether @fpriv is current master on its device. This decides whether a
+ * client is allowed to run DRM_MASTER IOCTLs.
+ *
+ * Most of the modern IOCTL which require DRM_MASTER are for kernel modesetting
+ * - the current master is assumed to own the non-shareable display hardware.
+ */
+bool drm_is_current_master(struct drm_file *fpriv)
+{
+ return fpriv->is_master && fpriv->master == fpriv->minor->dev->master;
+}
+EXPORT_SYMBOL(drm_is_current_master);
+
+/**
+ * drm_master_get - reference a master pointer
+ * @master: struct &drm_master
+ *
+ * Increments the reference count of @master and returns a pointer to @master.
+ */
+struct drm_master *drm_master_get(struct drm_master *master)
+{
+ kref_get(&master->refcount);
+ return master;
+}
+EXPORT_SYMBOL(drm_master_get);
+
+static void drm_master_destroy(struct kref *kref)
+{
+ struct drm_master *master = container_of(kref, struct drm_master, refcount);
+ struct drm_device *dev = master->dev;
+
+ if (dev->driver->master_destroy)
+ dev->driver->master_destroy(dev, master);
+
+ drm_legacy_master_rmmaps(dev, master);
+
+ idr_destroy(&master->magic_map);
+ kfree(master->unique);
+ kfree(master);
+}
+
+/**
+ * drm_master_put - unreference and clear a master pointer
+ * @master: pointer to a pointer of struct &drm_master
+ *
+ * This decrements the &drm_master behind @master and sets it to NULL.
+ */
+void drm_master_put(struct drm_master **master)
+{
+ kref_put(&(*master)->refcount, drm_master_destroy);
+ *master = NULL;
+}
+EXPORT_SYMBOL(drm_master_put);
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
new file mode 100644
index 000000000..f3c0942bd
--- /dev/null
+++ b/drivers/gpu/drm/drm_blend.c
@@ -0,0 +1,238 @@
+/*
+ * Copyright (C) 2016 Samsung Electronics Co.Ltd
+ * Authors:
+ * Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * DRM core plane blending related functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_crtc.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+
+#include "drm_internal.h"
+
+/**
+ * drm_plane_create_zpos_property - create mutable zpos property
+ * @plane: drm plane
+ * @zpos: initial value of zpos property
+ * @min: minimal possible value of zpos property
+ * @max: maximal possible value of zpos property
+ *
+ * This function initializes generic mutable zpos property and enables support
+ * for it in drm core. Drivers can then attach this property to planes to enable
+ * support for configurable planes arrangement during blending operation.
+ * Once mutable zpos property has been enabled, the DRM core will automatically
+ * calculate drm_plane_state->normalized_zpos values. Usually min should be set
+ * to 0 and max to maximal number of planes for given crtc - 1.
+ *
+ * If zpos of some planes cannot be changed (like fixed background or
+ * cursor/topmost planes), driver should adjust min/max values and assign those
+ * planes immutable zpos property with lower or higher values (for more
+ * information, see drm_mode_create_zpos_immutable_property() function). In such
+ * case driver should also assign proper initial zpos values for all planes in
+ * its plane_reset() callback, so the planes will be always sorted properly.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_plane_create_zpos_property(struct drm_plane *plane,
+ unsigned int zpos,
+ unsigned int min, unsigned int max)
+{
+ struct drm_property *prop;
+
+ prop = drm_property_create_range(plane->dev, 0, "zpos", min, max);
+ if (!prop)
+ return -ENOMEM;
+
+ drm_object_attach_property(&plane->base, prop, zpos);
+
+ plane->zpos_property = prop;
+
+ if (plane->state) {
+ plane->state->zpos = zpos;
+ plane->state->normalized_zpos = zpos;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_plane_create_zpos_property);
+
+/**
+ * drm_plane_create_zpos_immutable_property - create immuttable zpos property
+ * @plane: drm plane
+ * @zpos: value of zpos property
+ *
+ * This function initializes generic immutable zpos property and enables
+ * support for it in drm core. Using this property driver lets userspace
+ * to get the arrangement of the planes for blending operation and notifies
+ * it that the hardware (or driver) doesn't support changing of the planes'
+ * order.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_plane_create_zpos_immutable_property(struct drm_plane *plane,
+ unsigned int zpos)
+{
+ struct drm_property *prop;
+
+ prop = drm_property_create_range(plane->dev, DRM_MODE_PROP_IMMUTABLE,
+ "zpos", zpos, zpos);
+ if (!prop)
+ return -ENOMEM;
+
+ drm_object_attach_property(&plane->base, prop, zpos);
+
+ plane->zpos_property = prop;
+
+ if (plane->state) {
+ plane->state->zpos = zpos;
+ plane->state->normalized_zpos = zpos;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_plane_create_zpos_immutable_property);
+
+static int drm_atomic_state_zpos_cmp(const void *a, const void *b)
+{
+ const struct drm_plane_state *sa = *(struct drm_plane_state **)a;
+ const struct drm_plane_state *sb = *(struct drm_plane_state **)b;
+
+ if (sa->zpos != sb->zpos)
+ return sa->zpos - sb->zpos;
+ else
+ return sa->plane->base.id - sb->plane->base.id;
+}
+
+/**
+ * drm_atomic_helper_crtc_normalize_zpos - calculate normalized zpos values
+ * @crtc: crtc with planes, which have to be considered for normalization
+ * @crtc_state: new atomic state to apply
+ *
+ * This function checks new states of all planes assigned to given crtc and
+ * calculates normalized zpos value for them. Planes are compared first by their
+ * zpos values, then by plane id (if zpos equals). Plane with lowest zpos value
+ * is at the bottom. The plane_state->normalized_zpos is then filled with unique
+ * values from 0 to number of active planes in crtc minus one.
+ *
+ * RETURNS
+ * Zero for success or -errno
+ */
+static int drm_atomic_helper_crtc_normalize_zpos(struct drm_crtc *crtc,
+ struct drm_crtc_state *crtc_state)
+{
+ struct drm_atomic_state *state = crtc_state->state;
+ struct drm_device *dev = crtc->dev;
+ int total_planes = dev->mode_config.num_total_plane;
+ struct drm_plane_state **states;
+ struct drm_plane *plane;
+ int i, n = 0;
+ int ret = 0;
+
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] calculating normalized zpos values\n",
+ crtc->base.id, crtc->name);
+
+ states = kmalloc_array(total_planes, sizeof(*states), GFP_TEMPORARY);
+ if (!states)
+ return -ENOMEM;
+
+ /*
+ * Normalization process might create new states for planes which
+ * normalized_zpos has to be recalculated.
+ */
+ drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) {
+ struct drm_plane_state *plane_state =
+ drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ goto done;
+ }
+ states[n++] = plane_state;
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] processing zpos value %d\n",
+ plane->base.id, plane->name,
+ plane_state->zpos);
+ }
+
+ sort(states, n, sizeof(*states), drm_atomic_state_zpos_cmp, NULL);
+
+ for (i = 0; i < n; i++) {
+ plane = states[i]->plane;
+
+ states[i]->normalized_zpos = i;
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] normalized zpos value %d\n",
+ plane->base.id, plane->name, i);
+ }
+ crtc_state->zpos_changed = true;
+
+done:
+ kfree(states);
+ return ret;
+}
+
+/**
+ * drm_atomic_helper_normalize_zpos - calculate normalized zpos values for all
+ * crtcs
+ * @dev: DRM device
+ * @state: atomic state of DRM device
+ *
+ * This function calculates normalized zpos value for all modified planes in
+ * the provided atomic state of DRM device. For more information, see
+ * drm_atomic_helper_crtc_normalize_zpos() function.
+ *
+ * RETURNS
+ * Zero for success or -errno
+ */
+int drm_atomic_helper_normalize_zpos(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
+ int i, ret = 0;
+
+ for_each_plane_in_state(state, plane, plane_state, i) {
+ crtc = plane_state->crtc;
+ if (!crtc)
+ continue;
+ if (plane->state->zpos != plane_state->zpos) {
+ crtc_state =
+ drm_atomic_get_existing_crtc_state(state, crtc);
+ crtc_state->zpos_changed = true;
+ }
+ }
+
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ if (crtc_state->plane_mask != crtc->state->plane_mask ||
+ crtc_state->zpos_changed) {
+ ret = drm_atomic_helper_crtc_normalize_zpos(crtc,
+ crtc_state);
+ if (ret)
+ return ret;
+ }
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index b3654404a..255543086 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -36,7 +36,7 @@
* encoder chain.
*
* A bridge is always attached to a single &drm_encoder at a time, but can be
- * either connected to it directly, or through an intermediate bridge:
+ * either connected to it directly, or through an intermediate bridge::
*
* encoder ---> bridge B ---> bridge A
*
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 9b34158c0..c3a12cd8b 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -51,7 +51,7 @@ static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
*/
if (!entry->map ||
map->type != entry->map->type ||
- entry->master != dev->primary->master)
+ entry->master != dev->master)
continue;
switch (map->type) {
case _DRM_SHM:
@@ -245,12 +245,12 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
map->offset = (unsigned long)map->handle;
if (map->flags & _DRM_CONTAINS_LOCK) {
/* Prevent a 2nd X Server from creating a 2nd lock */
- if (dev->primary->master->lock.hw_lock != NULL) {
+ if (dev->master->lock.hw_lock != NULL) {
vfree(map->handle);
kfree(map);
return -EBUSY;
}
- dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */
+ dev->sigdata.lock = dev->master->lock.hw_lock = map->handle; /* Pointer to lock */
}
break;
case _DRM_AGP: {
@@ -356,7 +356,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
mutex_unlock(&dev->struct_mutex);
if (!(map->flags & _DRM_DRIVER))
- list->master = dev->primary->master;
+ list->master = dev->master;
*maplist = list;
return 0;
}
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index a5cae1b6d..ddebe54cd 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -39,6 +39,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_modeset_lock.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_auth.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
@@ -239,37 +240,6 @@ const char *drm_get_subpixel_order_name(enum subpixel_order order)
}
EXPORT_SYMBOL(drm_get_subpixel_order_name);
-static char printable_char(int c)
-{
- return isascii(c) && isprint(c) ? c : '?';
-}
-
-/**
- * drm_get_format_name - return a string for drm fourcc format
- * @format: format to compute name of
- *
- * Note that the buffer used by this function is globally shared and owned by
- * the function itself.
- *
- * FIXME: This isn't really multithreading safe.
- */
-const char *drm_get_format_name(uint32_t format)
-{
- static char buf[32];
-
- snprintf(buf, sizeof(buf),
- "%c%c%c%c %s-endian (0x%08x)",
- printable_char(format & 0xff),
- printable_char((format >> 8) & 0xff),
- printable_char((format >> 16) & 0xff),
- printable_char((format >> 24) & 0x7f),
- format & DRM_FORMAT_BIG_ENDIAN ? "big" : "little",
- format);
-
- return buf;
-}
-EXPORT_SYMBOL(drm_get_format_name);
-
/*
* Internal function to assign a slot in the object idr and optionally
* register the object into the idr.
@@ -426,6 +396,51 @@ void drm_mode_object_reference(struct drm_mode_object *obj)
}
EXPORT_SYMBOL(drm_mode_object_reference);
+/**
+ * drm_crtc_force_disable - Forcibly turn off a CRTC
+ * @crtc: CRTC to turn off
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_crtc_force_disable(struct drm_crtc *crtc)
+{
+ struct drm_mode_set set = {
+ .crtc = crtc,
+ };
+
+ return drm_mode_set_config_internal(&set);
+}
+EXPORT_SYMBOL(drm_crtc_force_disable);
+
+/**
+ * drm_crtc_force_disable_all - Forcibly turn off all enabled CRTCs
+ * @dev: DRM device whose CRTCs to turn off
+ *
+ * Drivers may want to call this on unload to ensure that all displays are
+ * unlit and the GPU is in a consistent, low power state. Takes modeset locks.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_crtc_force_disable_all(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+ int ret = 0;
+
+ drm_modeset_lock_all(dev);
+ drm_for_each_crtc(crtc, dev)
+ if (crtc->enabled) {
+ ret = drm_crtc_force_disable(crtc);
+ if (ret)
+ goto out;
+ }
+out:
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+EXPORT_SYMBOL(drm_crtc_force_disable_all);
+
static void drm_framebuffer_free(struct kref *kref)
{
struct drm_framebuffer *fb =
@@ -535,7 +550,7 @@ EXPORT_SYMBOL(drm_framebuffer_unregister_private);
*
* Cleanup framebuffer. This function is intended to be used from the drivers
* ->destroy callback. It can also be used to clean up driver private
- * framebuffers embedded into a larger structure.
+ * framebuffers embedded into a larger structure.
*
* Note that this function does not remove the fb from active usuage - if it is
* still used anywhere, hilarity can ensue since userspace could call getfb on
@@ -574,8 +589,6 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
struct drm_device *dev;
struct drm_crtc *crtc;
struct drm_plane *plane;
- struct drm_mode_set set;
- int ret;
if (!fb)
return;
@@ -605,11 +618,7 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
drm_for_each_crtc(crtc, dev) {
if (crtc->primary->fb == fb) {
/* should turn off the crtc */
- memset(&set, 0, sizeof(struct drm_mode_set));
- set.crtc = crtc;
- set.fb = NULL;
- ret = drm_mode_set_config_internal(&set);
- if (ret)
+ if (drm_crtc_force_disable(crtc))
DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
}
}
@@ -639,6 +648,31 @@ static unsigned int drm_num_crtcs(struct drm_device *dev)
return num;
}
+static int drm_crtc_register_all(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+ int ret = 0;
+
+ drm_for_each_crtc(crtc, dev) {
+ if (crtc->funcs->late_register)
+ ret = crtc->funcs->late_register(crtc);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void drm_crtc_unregister_all(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+
+ drm_for_each_crtc(crtc, dev) {
+ if (crtc->funcs->early_unregister)
+ crtc->funcs->early_unregister(crtc);
+ }
+}
+
/**
* drm_crtc_init_with_planes - Initialise a new CRTC object with
* specified primary and cursor planes.
@@ -669,6 +703,9 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
crtc->dev = dev;
crtc->funcs = funcs;
+ INIT_LIST_HEAD(&crtc->commit_list);
+ spin_lock_init(&crtc->commit_lock);
+
drm_modeset_lock_init(&crtc->mutex);
ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
if (ret)
@@ -692,7 +729,7 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
crtc->base.properties = &crtc->properties;
list_add_tail(&crtc->head, &config->crtc_list);
- config->num_crtc++;
+ crtc->index = config->num_crtc++;
crtc->primary = primary;
crtc->cursor = cursor;
@@ -722,6 +759,11 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
+ /* Note that the crtc_list is considered to be static; should we
+ * remove the drm_crtc at runtime we would have to decrement all
+ * the indices on the drm_crtc after us in the crtc_list.
+ */
+
kfree(crtc->gamma_store);
crtc->gamma_store = NULL;
@@ -741,29 +783,6 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
}
EXPORT_SYMBOL(drm_crtc_cleanup);
-/**
- * drm_crtc_index - find the index of a registered CRTC
- * @crtc: CRTC to find index for
- *
- * Given a registered CRTC, return the index of that CRTC within a DRM
- * device's list of CRTCs.
- */
-unsigned int drm_crtc_index(struct drm_crtc *crtc)
-{
- unsigned int index = 0;
- struct drm_crtc *tmp;
-
- drm_for_each_crtc(tmp, crtc->dev) {
- if (tmp == crtc)
- return index;
-
- index++;
- }
-
- BUG();
-}
-EXPORT_SYMBOL(drm_crtc_index);
-
/*
* drm_mode_remove - remove and free a mode
* @connector: connector list to modify
@@ -909,11 +928,11 @@ int drm_connector_init(struct drm_device *dev,
connector->dev = dev;
connector->funcs = funcs;
- connector->connector_id = ida_simple_get(&config->connector_ida, 0, 0, GFP_KERNEL);
- if (connector->connector_id < 0) {
- ret = connector->connector_id;
+ ret = ida_simple_get(&config->connector_ida, 0, 0, GFP_KERNEL);
+ if (ret < 0)
goto out_put;
- }
+ connector->index = ret;
+ ret = 0;
connector->connector_type = connector_type;
connector->connector_type_id =
@@ -961,7 +980,7 @@ out_put_type_id:
ida_remove(connector_ida, connector->connector_type_id);
out_put_id:
if (ret)
- ida_remove(&config->connector_ida, connector->connector_id);
+ ida_remove(&config->connector_ida, connector->index);
out_put:
if (ret)
drm_mode_object_unregister(dev, &connector->base);
@@ -984,6 +1003,12 @@ void drm_connector_cleanup(struct drm_connector *connector)
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode, *t;
+ /* The connector should have been removed from userspace long before
+ * it is finally destroyed.
+ */
+ if (WARN_ON(connector->registered))
+ drm_connector_unregister(connector);
+
if (connector->tile_group) {
drm_mode_put_tile_group(dev, connector->tile_group);
connector->tile_group = NULL;
@@ -999,7 +1024,7 @@ void drm_connector_cleanup(struct drm_connector *connector)
connector->connector_type_id);
ida_remove(&dev->mode_config.connector_ida,
- connector->connector_id);
+ connector->index);
kfree(connector->display_info.bus_formats);
drm_mode_object_unregister(dev, &connector->base);
@@ -1030,19 +1055,34 @@ int drm_connector_register(struct drm_connector *connector)
{
int ret;
+ if (connector->registered)
+ return 0;
+
ret = drm_sysfs_connector_add(connector);
if (ret)
return ret;
ret = drm_debugfs_connector_add(connector);
if (ret) {
- drm_sysfs_connector_remove(connector);
- return ret;
+ goto err_sysfs;
+ }
+
+ if (connector->funcs->late_register) {
+ ret = connector->funcs->late_register(connector);
+ if (ret)
+ goto err_debugfs;
}
drm_mode_object_register(connector->dev, &connector->base);
+ connector->registered = true;
return 0;
+
+err_debugfs:
+ drm_debugfs_connector_remove(connector);
+err_sysfs:
+ drm_sysfs_connector_remove(connector);
+ return ret;
}
EXPORT_SYMBOL(drm_connector_register);
@@ -1054,42 +1094,41 @@ EXPORT_SYMBOL(drm_connector_register);
*/
void drm_connector_unregister(struct drm_connector *connector)
{
+ if (!connector->registered)
+ return;
+
+ if (connector->funcs->early_unregister)
+ connector->funcs->early_unregister(connector);
+
drm_sysfs_connector_remove(connector);
drm_debugfs_connector_remove(connector);
+
+ connector->registered = false;
}
EXPORT_SYMBOL(drm_connector_unregister);
-/**
- * drm_connector_register_all - register all connectors
- * @dev: drm device
- *
- * This function registers all connectors in sysfs and other places so that
- * userspace can start to access them. Drivers can call it after calling
- * drm_dev_register() to complete the device registration, if they don't call
- * drm_connector_register() on each connector individually.
- *
- * When a device is unplugged and should be removed from userspace access,
- * call drm_connector_unregister_all(), which is the inverse of this
- * function.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_connector_register_all(struct drm_device *dev)
+static void drm_connector_unregister_all(struct drm_device *dev)
{
struct drm_connector *connector;
- int ret;
- mutex_lock(&dev->mode_config.mutex);
+ /* FIXME: taking the mode config mutex ends up in a clash with sysfs */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ drm_connector_unregister(connector);
+}
- drm_for_each_connector(connector, dev) {
+static int drm_connector_register_all(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+ int ret;
+
+ /* FIXME: taking the mode config mutex ends up in a clash with
+ * fbcon/backlight registration */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
ret = drm_connector_register(connector);
if (ret)
goto err;
}
- mutex_unlock(&dev->mode_config.mutex);
-
return 0;
err:
@@ -1097,27 +1136,31 @@ err:
drm_connector_unregister_all(dev);
return ret;
}
-EXPORT_SYMBOL(drm_connector_register_all);
-/**
- * drm_connector_unregister_all - unregister connector userspace interfaces
- * @dev: drm device
- *
- * This functions unregisters all connectors from sysfs and other places so
- * that userspace can no longer access them. Drivers should call this as the
- * first step tearing down the device instace, or when the underlying
- * physical device disappeared (e.g. USB unplug), right before calling
- * drm_dev_unregister().
- */
-void drm_connector_unregister_all(struct drm_device *dev)
+static int drm_encoder_register_all(struct drm_device *dev)
{
- struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ int ret = 0;
- /* FIXME: taking the mode config mutex ends up in a clash with sysfs */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
- drm_connector_unregister(connector);
+ drm_for_each_encoder(encoder, dev) {
+ if (encoder->funcs->late_register)
+ ret = encoder->funcs->late_register(encoder);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void drm_encoder_unregister_all(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+
+ drm_for_each_encoder(encoder, dev) {
+ if (encoder->funcs->early_unregister)
+ encoder->funcs->early_unregister(encoder);
+ }
}
-EXPORT_SYMBOL(drm_connector_unregister_all);
/**
* drm_encoder_init - Init a preallocated encoder
@@ -1166,7 +1209,7 @@ int drm_encoder_init(struct drm_device *dev,
}
list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
- dev->mode_config.num_encoder++;
+ encoder->index = dev->mode_config.num_encoder++;
out_put:
if (ret)
@@ -1180,29 +1223,6 @@ out_unlock:
EXPORT_SYMBOL(drm_encoder_init);
/**
- * drm_encoder_index - find the index of a registered encoder
- * @encoder: encoder to find index for
- *
- * Given a registered encoder, return the index of that encoder within a DRM
- * device's list of encoders.
- */
-unsigned int drm_encoder_index(struct drm_encoder *encoder)
-{
- unsigned int index = 0;
- struct drm_encoder *tmp;
-
- drm_for_each_encoder(tmp, encoder->dev) {
- if (tmp == encoder)
- return index;
-
- index++;
- }
-
- BUG();
-}
-EXPORT_SYMBOL(drm_encoder_index);
-
-/**
* drm_encoder_cleanup - cleans up an initialised encoder
* @encoder: encoder to cleanup
*
@@ -1212,6 +1232,11 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
+ /* Note that the encoder_list is considered to be static; should we
+ * remove the drm_encoder at runtime we would have to decrement all
+ * the indices on the drm_encoder after us in the encoder_list.
+ */
+
drm_modeset_lock_all(dev);
drm_mode_object_unregister(dev, &encoder->base);
kfree(encoder->name);
@@ -1300,7 +1325,7 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
plane->type = type;
list_add_tail(&plane->head, &config->plane_list);
- config->num_total_plane++;
+ plane->index = config->num_total_plane++;
if (plane->type == DRM_PLANE_TYPE_OVERLAY)
config->num_overlay_plane++;
@@ -1325,6 +1350,31 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
}
EXPORT_SYMBOL(drm_universal_plane_init);
+static int drm_plane_register_all(struct drm_device *dev)
+{
+ struct drm_plane *plane;
+ int ret = 0;
+
+ drm_for_each_plane(plane, dev) {
+ if (plane->funcs->late_register)
+ ret = plane->funcs->late_register(plane);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void drm_plane_unregister_all(struct drm_device *dev)
+{
+ struct drm_plane *plane;
+
+ drm_for_each_plane(plane, dev) {
+ if (plane->funcs->early_unregister)
+ plane->funcs->early_unregister(plane);
+ }
+}
+
/**
* drm_plane_init - Initialize a legacy plane
* @dev: DRM device
@@ -1374,6 +1424,11 @@ void drm_plane_cleanup(struct drm_plane *plane)
BUG_ON(list_empty(&plane->head));
+ /* Note that the plane_list is considered to be static; should we
+ * remove the drm_plane at runtime we would have to decrement all
+ * the indices on the drm_plane after us in the plane_list.
+ */
+
list_del(&plane->head);
dev->mode_config.num_total_plane--;
if (plane->type == DRM_PLANE_TYPE_OVERLAY)
@@ -1391,29 +1446,6 @@ void drm_plane_cleanup(struct drm_plane *plane)
EXPORT_SYMBOL(drm_plane_cleanup);
/**
- * drm_plane_index - find the index of a registered plane
- * @plane: plane to find index for
- *
- * Given a registered plane, return the index of that CRTC within a DRM
- * device's list of planes.
- */
-unsigned int drm_plane_index(struct drm_plane *plane)
-{
- unsigned int index = 0;
- struct drm_plane *tmp;
-
- drm_for_each_plane(tmp, plane->dev) {
- if (tmp == plane)
- return index;
-
- index++;
- }
-
- BUG();
-}
-EXPORT_SYMBOL(drm_plane_index);
-
-/**
* drm_plane_from_index - find the registered plane at an index
* @dev: DRM device
* @idx: index of registered plane to find for
@@ -1425,13 +1457,11 @@ struct drm_plane *
drm_plane_from_index(struct drm_device *dev, int idx)
{
struct drm_plane *plane;
- unsigned int i = 0;
- drm_for_each_plane(plane, dev) {
- if (i == idx)
+ drm_for_each_plane(plane, dev)
+ if (idx == plane->index)
return plane;
- i++;
- }
+
return NULL;
}
EXPORT_SYMBOL(drm_plane_from_index);
@@ -1467,6 +1497,46 @@ void drm_plane_force_disable(struct drm_plane *plane)
}
EXPORT_SYMBOL(drm_plane_force_disable);
+int drm_modeset_register_all(struct drm_device *dev)
+{
+ int ret;
+
+ ret = drm_plane_register_all(dev);
+ if (ret)
+ goto err_plane;
+
+ ret = drm_crtc_register_all(dev);
+ if (ret)
+ goto err_crtc;
+
+ ret = drm_encoder_register_all(dev);
+ if (ret)
+ goto err_encoder;
+
+ ret = drm_connector_register_all(dev);
+ if (ret)
+ goto err_connector;
+
+ return 0;
+
+err_connector:
+ drm_encoder_unregister_all(dev);
+err_encoder:
+ drm_crtc_unregister_all(dev);
+err_crtc:
+ drm_plane_unregister_all(dev);
+err_plane:
+ return ret;
+}
+
+void drm_modeset_unregister_all(struct drm_device *dev)
+{
+ drm_connector_unregister_all(dev);
+ drm_encoder_unregister_all(dev);
+ drm_crtc_unregister_all(dev);
+ drm_plane_unregister_all(dev);
+}
+
static int drm_mode_create_standard_properties(struct drm_device *dev)
{
struct drm_property *prop;
@@ -2975,6 +3045,8 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n");
return PTR_ERR(fb);
}
+ fb->hot_x = req->hot_x;
+ fb->hot_y = req->hot_y;
} else {
fb = NULL;
}
@@ -3581,7 +3653,7 @@ int drm_mode_getfb(struct drm_device *dev,
r->bpp = fb->bits_per_pixel;
r->pitch = fb->pitches[0];
if (fb->funcs->create_handle) {
- if (file_priv->is_master || capable(CAP_SYS_ADMIN) ||
+ if (drm_is_current_master(file_priv) || capable(CAP_SYS_ADMIN) ||
drm_is_control_client(file_priv)) {
ret = fb->funcs->create_handle(fb, file_priv,
&r->handle);
@@ -3738,6 +3810,13 @@ void drm_fb_release(struct drm_file *priv)
}
}
+static bool drm_property_type_valid(struct drm_property *property)
+{
+ if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE)
+ return !(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
+ return !!(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
+}
+
/**
* drm_property_create - create a new property type
* @dev: drm device
@@ -5138,6 +5217,9 @@ EXPORT_SYMBOL(drm_mode_connector_attach_encoder);
int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
int gamma_size)
{
+ uint16_t *r_base, *g_base, *b_base;
+ int i;
+
crtc->gamma_size = gamma_size;
crtc->gamma_store = kcalloc(gamma_size, sizeof(uint16_t) * 3,
@@ -5147,6 +5229,16 @@ int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
return -ENOMEM;
}
+ r_base = crtc->gamma_store;
+ g_base = r_base + gamma_size;
+ b_base = g_base + gamma_size;
+ for (i = 0; i < gamma_size; i++) {
+ r_base[i] = i << 8;
+ g_base[i] = i << 8;
+ b_base[i] = i << 8;
+ }
+
+
return 0;
}
EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
@@ -5214,7 +5306,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
goto out;
}
- crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
+ ret = crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size);
out:
drm_modeset_unlock_all(dev);
@@ -5547,264 +5639,6 @@ int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
}
/**
- * drm_fb_get_bpp_depth - get the bpp/depth values for format
- * @format: pixel format (DRM_FORMAT_*)
- * @depth: storage for the depth value
- * @bpp: storage for the bpp value
- *
- * This only supports RGB formats here for compat with code that doesn't use
- * pixel formats directly yet.
- */
-void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
- int *bpp)
-{
- switch (format) {
- case DRM_FORMAT_C8:
- case DRM_FORMAT_RGB332:
- case DRM_FORMAT_BGR233:
- *depth = 8;
- *bpp = 8;
- break;
- case DRM_FORMAT_XRGB1555:
- case DRM_FORMAT_XBGR1555:
- case DRM_FORMAT_RGBX5551:
- case DRM_FORMAT_BGRX5551:
- case DRM_FORMAT_ARGB1555:
- case DRM_FORMAT_ABGR1555:
- case DRM_FORMAT_RGBA5551:
- case DRM_FORMAT_BGRA5551:
- *depth = 15;
- *bpp = 16;
- break;
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_BGR565:
- *depth = 16;
- *bpp = 16;
- break;
- case DRM_FORMAT_RGB888:
- case DRM_FORMAT_BGR888:
- *depth = 24;
- *bpp = 24;
- break;
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_RGBX8888:
- case DRM_FORMAT_BGRX8888:
- *depth = 24;
- *bpp = 32;
- break;
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_RGBX1010102:
- case DRM_FORMAT_BGRX1010102:
- case DRM_FORMAT_ARGB2101010:
- case DRM_FORMAT_ABGR2101010:
- case DRM_FORMAT_RGBA1010102:
- case DRM_FORMAT_BGRA1010102:
- *depth = 30;
- *bpp = 32;
- break;
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_RGBA8888:
- case DRM_FORMAT_BGRA8888:
- *depth = 32;
- *bpp = 32;
- break;
- default:
- DRM_DEBUG_KMS("unsupported pixel format %s\n",
- drm_get_format_name(format));
- *depth = 0;
- *bpp = 0;
- break;
- }
-}
-EXPORT_SYMBOL(drm_fb_get_bpp_depth);
-
-/**
- * drm_format_num_planes - get the number of planes for format
- * @format: pixel format (DRM_FORMAT_*)
- *
- * Returns:
- * The number of planes used by the specified pixel format.
- */
-int drm_format_num_planes(uint32_t format)
-{
- switch (format) {
- case DRM_FORMAT_YUV410:
- case DRM_FORMAT_YVU410:
- case DRM_FORMAT_YUV411:
- case DRM_FORMAT_YVU411:
- case DRM_FORMAT_YUV420:
- case DRM_FORMAT_YVU420:
- case DRM_FORMAT_YUV422:
- case DRM_FORMAT_YVU422:
- case DRM_FORMAT_YUV444:
- case DRM_FORMAT_YVU444:
- return 3;
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV21:
- case DRM_FORMAT_NV16:
- case DRM_FORMAT_NV61:
- case DRM_FORMAT_NV24:
- case DRM_FORMAT_NV42:
- return 2;
- default:
- return 1;
- }
-}
-EXPORT_SYMBOL(drm_format_num_planes);
-
-/**
- * drm_format_plane_cpp - determine the bytes per pixel value
- * @format: pixel format (DRM_FORMAT_*)
- * @plane: plane index
- *
- * Returns:
- * The bytes per pixel value for the specified plane.
- */
-int drm_format_plane_cpp(uint32_t format, int plane)
-{
- unsigned int depth;
- int bpp;
-
- if (plane >= drm_format_num_planes(format))
- return 0;
-
- switch (format) {
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_YVYU:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_VYUY:
- return 2;
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV21:
- case DRM_FORMAT_NV16:
- case DRM_FORMAT_NV61:
- case DRM_FORMAT_NV24:
- case DRM_FORMAT_NV42:
- return plane ? 2 : 1;
- case DRM_FORMAT_YUV410:
- case DRM_FORMAT_YVU410:
- case DRM_FORMAT_YUV411:
- case DRM_FORMAT_YVU411:
- case DRM_FORMAT_YUV420:
- case DRM_FORMAT_YVU420:
- case DRM_FORMAT_YUV422:
- case DRM_FORMAT_YVU422:
- case DRM_FORMAT_YUV444:
- case DRM_FORMAT_YVU444:
- return 1;
- default:
- drm_fb_get_bpp_depth(format, &depth, &bpp);
- return bpp >> 3;
- }
-}
-EXPORT_SYMBOL(drm_format_plane_cpp);
-
-/**
- * drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor
- * @format: pixel format (DRM_FORMAT_*)
- *
- * Returns:
- * The horizontal chroma subsampling factor for the
- * specified pixel format.
- */
-int drm_format_horz_chroma_subsampling(uint32_t format)
-{
- switch (format) {
- case DRM_FORMAT_YUV411:
- case DRM_FORMAT_YVU411:
- case DRM_FORMAT_YUV410:
- case DRM_FORMAT_YVU410:
- return 4;
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_YVYU:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_VYUY:
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV21:
- case DRM_FORMAT_NV16:
- case DRM_FORMAT_NV61:
- case DRM_FORMAT_YUV422:
- case DRM_FORMAT_YVU422:
- case DRM_FORMAT_YUV420:
- case DRM_FORMAT_YVU420:
- return 2;
- default:
- return 1;
- }
-}
-EXPORT_SYMBOL(drm_format_horz_chroma_subsampling);
-
-/**
- * drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor
- * @format: pixel format (DRM_FORMAT_*)
- *
- * Returns:
- * The vertical chroma subsampling factor for the
- * specified pixel format.
- */
-int drm_format_vert_chroma_subsampling(uint32_t format)
-{
- switch (format) {
- case DRM_FORMAT_YUV410:
- case DRM_FORMAT_YVU410:
- return 4;
- case DRM_FORMAT_YUV420:
- case DRM_FORMAT_YVU420:
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_NV21:
- return 2;
- default:
- return 1;
- }
-}
-EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
-
-/**
- * drm_format_plane_width - width of the plane given the first plane
- * @width: width of the first plane
- * @format: pixel format
- * @plane: plane index
- *
- * Returns:
- * The width of @plane, given that the width of the first plane is @width.
- */
-int drm_format_plane_width(int width, uint32_t format, int plane)
-{
- if (plane >= drm_format_num_planes(format))
- return 0;
-
- if (plane == 0)
- return width;
-
- return width / drm_format_horz_chroma_subsampling(format);
-}
-EXPORT_SYMBOL(drm_format_plane_width);
-
-/**
- * drm_format_plane_height - height of the plane given the first plane
- * @height: height of the first plane
- * @format: pixel format
- * @plane: plane index
- *
- * Returns:
- * The height of @plane, given that the height of the first plane is @height.
- */
-int drm_format_plane_height(int height, uint32_t format, int plane)
-{
- if (plane >= drm_format_num_planes(format))
- return 0;
-
- if (plane == 0)
- return height;
-
- return height / drm_format_vert_chroma_subsampling(format);
-}
-EXPORT_SYMBOL(drm_format_plane_height);
-
-/**
* drm_rotation_simplify() - Try to simplify the rotation
* @rotation: Rotation to be simplified
* @supported_rotations: Supported rotations
@@ -6067,3 +5901,48 @@ struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
return tg;
}
EXPORT_SYMBOL(drm_mode_create_tile_group);
+
+/**
+ * drm_crtc_enable_color_mgmt - enable color management properties
+ * @crtc: DRM CRTC
+ * @degamma_lut_size: the size of the degamma lut (before CSC)
+ * @has_ctm: whether to attach ctm_property for CSC matrix
+ * @gamma_lut_size: the size of the gamma lut (after CSC)
+ *
+ * This function lets the driver enable the color correction
+ * properties on a CRTC. This includes 3 degamma, csc and gamma
+ * properties that userspace can set and 2 size properties to inform
+ * the userspace of the lut sizes. Each of the properties are
+ * optional. The gamma and degamma properties are only attached if
+ * their size is not 0 and ctm_property is only attached if has_ctm is
+ * true.
+ */
+void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
+ uint degamma_lut_size,
+ bool has_ctm,
+ uint gamma_lut_size)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+
+ if (degamma_lut_size) {
+ drm_object_attach_property(&crtc->base,
+ config->degamma_lut_property, 0);
+ drm_object_attach_property(&crtc->base,
+ config->degamma_lut_size_property,
+ degamma_lut_size);
+ }
+
+ if (has_ctm)
+ drm_object_attach_property(&crtc->base,
+ config->ctm_property, 0);
+
+ if (gamma_lut_size) {
+ drm_object_attach_property(&crtc->base,
+ config->gamma_lut_property, 0);
+ drm_object_attach_property(&crtc->base,
+ config->gamma_lut_size_property,
+ gamma_lut_size);
+ }
+}
+EXPORT_SYMBOL(drm_crtc_enable_color_mgmt);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 26feb2f84..604d3ef72 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -232,6 +232,9 @@ static void __drm_helper_disable_unused_functions(struct drm_device *dev)
*/
void drm_helper_disable_unused_functions(struct drm_device *dev)
{
+ if (drm_core_check_feature(dev, DRIVER_ATOMIC))
+ DRM_ERROR("Called for atomic driver, this is not what you want.\n");
+
drm_modeset_lock_all(dev);
__drm_helper_disable_unused_functions(dev);
drm_modeset_unlock_all(dev);
@@ -1123,36 +1126,3 @@ int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
return drm_plane_helper_commit(plane, plane_state, old_fb);
}
EXPORT_SYMBOL(drm_helper_crtc_mode_set_base);
-
-/**
- * drm_helper_crtc_enable_color_mgmt - enable color management properties
- * @crtc: DRM CRTC
- * @degamma_lut_size: the size of the degamma lut (before CSC)
- * @gamma_lut_size: the size of the gamma lut (after CSC)
- *
- * This function lets the driver enable the color correction properties on a
- * CRTC. This includes 3 degamma, csc and gamma properties that userspace can
- * set and 2 size properties to inform the userspace of the lut sizes.
- */
-void drm_helper_crtc_enable_color_mgmt(struct drm_crtc *crtc,
- int degamma_lut_size,
- int gamma_lut_size)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_mode_config *config = &dev->mode_config;
-
- drm_object_attach_property(&crtc->base,
- config->degamma_lut_property, 0);
- drm_object_attach_property(&crtc->base,
- config->ctm_property, 0);
- drm_object_attach_property(&crtc->base,
- config->gamma_lut_property, 0);
-
- drm_object_attach_property(&crtc->base,
- config->degamma_lut_size_property,
- degamma_lut_size);
- drm_object_attach_property(&crtc->base,
- config->gamma_lut_size_property,
- gamma_lut_size);
-}
-EXPORT_SYMBOL(drm_helper_crtc_enable_color_mgmt);
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index a78c13828..0c34e6d90 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -31,14 +31,104 @@
* and are not exported to drivers.
*/
+
+/* drm_crtc.c */
+void drm_connector_ida_init(void);
+void drm_connector_ida_destroy(void);
int drm_mode_object_get(struct drm_device *dev,
struct drm_mode_object *obj, uint32_t obj_type);
void drm_mode_object_unregister(struct drm_device *dev,
struct drm_mode_object *object);
+bool drm_property_change_valid_get(struct drm_property *property,
+ uint64_t value,
+ struct drm_mode_object **ref);
+void drm_property_change_valid_put(struct drm_property *property,
+ struct drm_mode_object *ref);
+
+int drm_plane_check_pixel_format(const struct drm_plane *plane,
+ u32 format);
+int drm_crtc_check_viewport(const struct drm_crtc *crtc,
+ int x, int y,
+ const struct drm_display_mode *mode,
+ const struct drm_framebuffer *fb);
+
+void drm_fb_release(struct drm_file *file_priv);
+void drm_property_destroy_user_blobs(struct drm_device *dev,
+ struct drm_file *file_priv);
+
+/* dumb buffer support IOCTLs */
+int drm_mode_create_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+
+/* framebuffer IOCTLs */
+extern int drm_mode_addfb(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_addfb2(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_rmfb(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_getfb(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+
+/* IOCTLs */
+int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+int drm_mode_getresources(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_getplane_res(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_mode_getcrtc(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_getconnector(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_setcrtc(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_getplane(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_setplane(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_cursor_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_cursor2_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_getproperty_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_getblob_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_createblob_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_destroyblob_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_getencoder(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_gamma_get_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+int drm_mode_gamma_set_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+
+int drm_mode_page_flip_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
/* drm_atomic.c */
int drm_atomic_get_property(struct drm_mode_object *obj,
- struct drm_property *property, uint64_t *val);
+ struct drm_property *property, uint64_t *val);
int drm_mode_atomic_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
+int drm_modeset_register_all(struct drm_device *dev);
+void drm_modeset_unregister_all(struct drm_device *dev);
+
+/* drm_blend.c */
+int drm_atomic_helper_normalize_zpos(struct drm_device *dev,
+ struct drm_atomic_state *state);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 3bcf8e6a8..fa10cef2b 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -46,11 +46,8 @@
static const struct drm_info_list drm_debugfs_list[] = {
{"name", drm_name_info, 0},
- {"vm", drm_vm_info, 0},
{"clients", drm_clients_info, 0},
- {"bufs", drm_bufs_info, 0},
{"gem_names", drm_gem_name_info, DRIVER_GEM},
- {"vma", drm_vma_info, 0},
};
#define DRM_DEBUGFS_ENTRIES ARRAY_SIZE(drm_debugfs_list)
diff --git a/drivers/gpu/drm/drm_dp_aux_dev.c b/drivers/gpu/drm/drm_dp_aux_dev.c
index 3334baacf..734f86a34 100644
--- a/drivers/gpu/drm/drm_dp_aux_dev.c
+++ b/drivers/gpu/drm/drm_dp_aux_dev.c
@@ -355,8 +355,7 @@ int drm_dp_aux_dev_init(void)
drm_dp_aux_dev_class = class_create(THIS_MODULE, "drm_dp_aux_dev");
if (IS_ERR(drm_dp_aux_dev_class)) {
- res = PTR_ERR(drm_dp_aux_dev_class);
- goto out;
+ return PTR_ERR(drm_dp_aux_dev_class);
}
drm_dp_aux_dev_class->dev_groups = drm_dp_aux_groups;
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 67b28f801..eae5ef963 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -708,8 +708,6 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
memset(&msg, 0, sizeof(msg));
- mutex_lock(&aux->hw_mutex);
-
for (i = 0; i < num; i++) {
msg.address = msgs[i].addr;
drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
@@ -764,8 +762,6 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
msg.size = 0;
(void)drm_dp_i2c_do_msg(aux, &msg);
- mutex_unlock(&aux->hw_mutex);
-
return err;
}
@@ -774,22 +770,64 @@ static const struct i2c_algorithm drm_dp_i2c_algo = {
.master_xfer = drm_dp_i2c_xfer,
};
+static struct drm_dp_aux *i2c_to_aux(struct i2c_adapter *i2c)
+{
+ return container_of(i2c, struct drm_dp_aux, ddc);
+}
+
+static void lock_bus(struct i2c_adapter *i2c, unsigned int flags)
+{
+ mutex_lock(&i2c_to_aux(i2c)->hw_mutex);
+}
+
+static int trylock_bus(struct i2c_adapter *i2c, unsigned int flags)
+{
+ return mutex_trylock(&i2c_to_aux(i2c)->hw_mutex);
+}
+
+static void unlock_bus(struct i2c_adapter *i2c, unsigned int flags)
+{
+ mutex_unlock(&i2c_to_aux(i2c)->hw_mutex);
+}
+
/**
- * drm_dp_aux_register() - initialise and register aux channel
+ * drm_dp_aux_init() - minimally initialise an aux channel
* @aux: DisplayPort AUX channel
*
- * Returns 0 on success or a negative error code on failure.
+ * If you need to use the drm_dp_aux's i2c adapter prior to registering it
+ * with the outside world, call drm_dp_aux_init() first. You must still
+ * call drm_dp_aux_register() once the connector has been registered to
+ * allow userspace access to the auxiliary DP channel.
*/
-int drm_dp_aux_register(struct drm_dp_aux *aux)
+void drm_dp_aux_init(struct drm_dp_aux *aux)
{
- int ret;
-
mutex_init(&aux->hw_mutex);
aux->ddc.algo = &drm_dp_i2c_algo;
aux->ddc.algo_data = aux;
aux->ddc.retries = 3;
+ aux->ddc.lock_bus = lock_bus;
+ aux->ddc.trylock_bus = trylock_bus;
+ aux->ddc.unlock_bus = unlock_bus;
+}
+EXPORT_SYMBOL(drm_dp_aux_init);
+
+/**
+ * drm_dp_aux_register() - initialise and register aux channel
+ * @aux: DisplayPort AUX channel
+ *
+ * Automatically calls drm_dp_aux_init() if this hasn't been done yet.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_aux_register(struct drm_dp_aux *aux)
+{
+ int ret;
+
+ if (!aux->ddc.algo)
+ drm_dp_aux_init(aux);
+
aux->ddc.class = I2C_CLASS_DDC;
aux->ddc.owner = THIS_MODULE;
aux->ddc.dev.parent = aux->dev;
@@ -822,3 +860,35 @@ void drm_dp_aux_unregister(struct drm_dp_aux *aux)
i2c_del_adapter(&aux->ddc);
}
EXPORT_SYMBOL(drm_dp_aux_unregister);
+
+#define PSR_SETUP_TIME(x) [DP_PSR_SETUP_TIME_ ## x >> DP_PSR_SETUP_TIME_SHIFT] = (x)
+
+/**
+ * drm_dp_psr_setup_time() - PSR setup in time usec
+ * @psr_cap: PSR capabilities from DPCD
+ *
+ * Returns:
+ * PSR setup time for the panel in microseconds, negative
+ * error code on failure.
+ */
+int drm_dp_psr_setup_time(const u8 psr_cap[EDP_PSR_RECEIVER_CAP_SIZE])
+{
+ static const u16 psr_setup_time_us[] = {
+ PSR_SETUP_TIME(330),
+ PSR_SETUP_TIME(275),
+ PSR_SETUP_TIME(165),
+ PSR_SETUP_TIME(110),
+ PSR_SETUP_TIME(55),
+ PSR_SETUP_TIME(0),
+ };
+ int i;
+
+ i = (psr_cap[1] & DP_PSR_SETUP_TIME_MASK) >> DP_PSR_SETUP_TIME_SHIFT;
+ if (i >= ARRAY_SIZE(psr_setup_time_us))
+ return -EINVAL;
+
+ return psr_setup_time_us[i];
+}
+EXPORT_SYMBOL(drm_dp_psr_setup_time);
+
+#undef PSR_SETUP_TIME
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 653790805..04e457117 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1493,11 +1493,8 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
WARN_ON(!mutex_is_locked(&mgr->qlock));
/* construct a chunk from the first msg in the tx_msg queue */
- if (list_empty(&mgr->tx_msg_downq)) {
- mgr->tx_down_in_progress = false;
+ if (list_empty(&mgr->tx_msg_downq))
return;
- }
- mgr->tx_down_in_progress = true;
txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
ret = process_single_tx_qlock(mgr, txmsg, false);
@@ -1512,10 +1509,6 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
wake_up(&mgr->tx_waitq);
}
- if (list_empty(&mgr->tx_msg_downq)) {
- mgr->tx_down_in_progress = false;
- return;
- }
}
/* called holding qlock */
@@ -1538,7 +1531,7 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
{
mutex_lock(&mgr->qlock);
list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
- if (!mgr->tx_down_in_progress)
+ if (list_is_singular(&mgr->tx_msg_downq))
process_single_down_tx_qlock(mgr);
mutex_unlock(&mgr->qlock);
}
@@ -2372,6 +2365,7 @@ EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
/**
* drm_dp_mst_detect_port() - get connection status for an MST port
+ * @connector: DRM connector for this port
* @mgr: manager for this port
* @port: unverified pointer to a port
*
@@ -2887,7 +2881,7 @@ static void drm_dp_tx_work(struct work_struct *work)
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
mutex_lock(&mgr->qlock);
- if (mgr->tx_down_in_progress)
+ if (!list_empty(&mgr->tx_msg_downq))
process_single_down_tx_qlock(mgr);
mutex_unlock(&mgr->qlock);
}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index bff89226a..be27ed36f 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -34,8 +34,10 @@
#include <linux/slab.h>
#include <drm/drmP.h>
#include <drm/drm_core.h>
+#include "drm_crtc_internal.h"
#include "drm_legacy.h"
#include "drm_internal.h"
+#include "drm_crtc_internal.h"
/*
* drm_debug: Enable debug output.
@@ -93,114 +95,6 @@ void drm_ut_debug_printk(const char *function_name, const char *format, ...)
}
EXPORT_SYMBOL(drm_ut_debug_printk);
-struct drm_master *drm_master_create(struct drm_minor *minor)
-{
- struct drm_master *master;
-
- master = kzalloc(sizeof(*master), GFP_KERNEL);
- if (!master)
- return NULL;
-
- kref_init(&master->refcount);
- spin_lock_init(&master->lock.spinlock);
- init_waitqueue_head(&master->lock.lock_queue);
- idr_init(&master->magic_map);
- master->minor = minor;
-
- return master;
-}
-
-struct drm_master *drm_master_get(struct drm_master *master)
-{
- kref_get(&master->refcount);
- return master;
-}
-EXPORT_SYMBOL(drm_master_get);
-
-static void drm_master_destroy(struct kref *kref)
-{
- struct drm_master *master = container_of(kref, struct drm_master, refcount);
- struct drm_device *dev = master->minor->dev;
-
- if (dev->driver->master_destroy)
- dev->driver->master_destroy(dev, master);
-
- drm_legacy_master_rmmaps(dev, master);
-
- idr_destroy(&master->magic_map);
- kfree(master->unique);
- kfree(master);
-}
-
-void drm_master_put(struct drm_master **master)
-{
- kref_put(&(*master)->refcount, drm_master_destroy);
- *master = NULL;
-}
-EXPORT_SYMBOL(drm_master_put);
-
-int drm_setmaster_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- int ret = 0;
-
- mutex_lock(&dev->master_mutex);
- if (file_priv->is_master)
- goto out_unlock;
-
- if (file_priv->minor->master) {
- ret = -EINVAL;
- goto out_unlock;
- }
-
- if (!file_priv->master) {
- ret = -EINVAL;
- goto out_unlock;
- }
-
- if (!file_priv->allowed_master) {
- ret = drm_new_set_master(dev, file_priv);
- goto out_unlock;
- }
-
- file_priv->minor->master = drm_master_get(file_priv->master);
- file_priv->is_master = 1;
- if (dev->driver->master_set) {
- ret = dev->driver->master_set(dev, file_priv, false);
- if (unlikely(ret != 0)) {
- file_priv->is_master = 0;
- drm_master_put(&file_priv->minor->master);
- }
- }
-
-out_unlock:
- mutex_unlock(&dev->master_mutex);
- return ret;
-}
-
-int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- int ret = -EINVAL;
-
- mutex_lock(&dev->master_mutex);
- if (!file_priv->is_master)
- goto out_unlock;
-
- if (!file_priv->minor->master)
- goto out_unlock;
-
- ret = 0;
- if (dev->driver->master_drop)
- dev->driver->master_drop(dev, file_priv, false);
- drm_master_put(&file_priv->minor->master);
- file_priv->is_master = 0;
-
-out_unlock:
- mutex_unlock(&dev->master_mutex);
- return ret;
-}
-
/*
* DRM Minors
* A DRM device can provide several char-dev interfaces on the DRM-Major. Each
@@ -405,10 +299,9 @@ void drm_minor_release(struct drm_minor *minor)
* callbacks implemented by the driver. The driver then needs to initialize all
* the various subsystems for the drm device like memory management, vblank
* handling, modesetting support and intial output configuration plus obviously
- * initialize all the corresponding hardware bits. An important part of this is
- * also calling drm_dev_set_unique() to set the userspace-visible unique name of
- * this device instance. Finally when everything is up and running and ready for
- * userspace the device instance can be published using drm_dev_register().
+ * initialize all the corresponding hardware bits. Finally when everything is up
+ * and running and ready for userspace the device instance can be published
+ * using drm_dev_register().
*
* There is also deprecated support for initalizing device instances using
* bus-specific helpers and the ->load() callback. But due to
@@ -430,6 +323,14 @@ void drm_minor_release(struct drm_minor *minor)
* dev_priv field of &drm_device.
*/
+static int drm_dev_set_unique(struct drm_device *dev, const char *name)
+{
+ kfree(dev->unique);
+ dev->unique = kstrdup(name, GFP_KERNEL);
+
+ return dev->unique ? 0 : -ENOMEM;
+}
+
/**
* drm_put_dev - Unregister and release a DRM device
* @dev: DRM device
@@ -461,9 +362,7 @@ EXPORT_SYMBOL(drm_put_dev);
void drm_unplug_dev(struct drm_device *dev)
{
/* for a USB device */
- drm_minor_unregister(dev, DRM_MINOR_LEGACY);
- drm_minor_unregister(dev, DRM_MINOR_RENDER);
- drm_minor_unregister(dev, DRM_MINOR_CONTROL);
+ drm_dev_unregister(dev);
mutex_lock(&drm_global_mutex);
@@ -549,11 +448,12 @@ static void drm_fs_inode_free(struct inode *inode)
}
/**
- * drm_dev_alloc - Allocate new DRM device
- * @driver: DRM driver to allocate device for
+ * drm_dev_init - Initialise new DRM device
+ * @dev: DRM device
+ * @driver: DRM driver
* @parent: Parent device object
*
- * Allocate and initialize a new DRM device. No device registration is done.
+ * Initialize a new DRM device. No device registration is done.
* Call drm_dev_register() to advertice the device to user space and register it
* with other core subsystems. This should be done last in the device
* initialization sequence to make sure userspace can't access an inconsistent
@@ -564,19 +464,18 @@ static void drm_fs_inode_free(struct inode *inode)
*
* Note that for purely virtual devices @parent can be NULL.
*
+ * Drivers that do not want to allocate their own device struct
+ * embedding struct &drm_device can call drm_dev_alloc() instead.
+ *
* RETURNS:
- * Pointer to new DRM device, or NULL if out of memory.
+ * 0 on success, or error code on failure.
*/
-struct drm_device *drm_dev_alloc(struct drm_driver *driver,
- struct device *parent)
+int drm_dev_init(struct drm_device *dev,
+ struct drm_driver *driver,
+ struct device *parent)
{
- struct drm_device *dev;
int ret;
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return NULL;
-
kref_init(&dev->ref);
dev->dev = parent;
dev->driver = driver;
@@ -605,8 +504,6 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
if (ret)
goto err_minors;
-
- WARN_ON(driver->suspend || driver->resume);
}
if (drm_core_check_feature(dev, DRIVER_RENDER)) {
@@ -619,7 +516,8 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
if (ret)
goto err_minors;
- if (drm_ht_create(&dev->map_hash, 12))
+ ret = drm_ht_create(&dev->map_hash, 12);
+ if (ret)
goto err_minors;
drm_legacy_ctxbitmap_init(dev);
@@ -632,13 +530,13 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
}
}
- if (parent) {
- ret = drm_dev_set_unique(dev, dev_name(parent));
- if (ret)
- goto err_setunique;
- }
+ /* Use the parent device name as DRM device unique identifier, but fall
+ * back to the driver name for virtual devices like vgem. */
+ ret = drm_dev_set_unique(dev, parent ? dev_name(parent) : driver->name);
+ if (ret)
+ goto err_setunique;
- return dev;
+ return 0;
err_setunique:
if (drm_core_check_feature(dev, DRIVER_GEM))
@@ -653,8 +551,49 @@ err_minors:
drm_fs_inode_free(dev->anon_inode);
err_free:
mutex_destroy(&dev->master_mutex);
- kfree(dev);
- return NULL;
+ return ret;
+}
+EXPORT_SYMBOL(drm_dev_init);
+
+/**
+ * drm_dev_alloc - Allocate new DRM device
+ * @driver: DRM driver to allocate device for
+ * @parent: Parent device object
+ *
+ * Allocate and initialize a new DRM device. No device registration is done.
+ * Call drm_dev_register() to advertice the device to user space and register it
+ * with other core subsystems. This should be done last in the device
+ * initialization sequence to make sure userspace can't access an inconsistent
+ * state.
+ *
+ * The initial ref-count of the object is 1. Use drm_dev_ref() and
+ * drm_dev_unref() to take and drop further ref-counts.
+ *
+ * Note that for purely virtual devices @parent can be NULL.
+ *
+ * Drivers that wish to subclass or embed struct &drm_device into their
+ * own struct should look at using drm_dev_init() instead.
+ *
+ * RETURNS:
+ * Pointer to new DRM device, or NULL if out of memory.
+ */
+struct drm_device *drm_dev_alloc(struct drm_driver *driver,
+ struct device *parent)
+{
+ struct drm_device *dev;
+ int ret;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+ ret = drm_dev_init(dev, driver, parent);
+ if (ret) {
+ kfree(dev);
+ return NULL;
+ }
+
+ return dev;
}
EXPORT_SYMBOL(drm_dev_alloc);
@@ -718,11 +657,7 @@ EXPORT_SYMBOL(drm_dev_unref);
*
* Register the DRM device @dev with the system, advertise device to user-space
* and start normal device operation. @dev must be allocated via drm_dev_alloc()
- * previously. Right after drm_dev_register() the driver should call
- * drm_connector_register_all() to register all connectors in sysfs. This is
- * a separate call for backward compatibility with drivers still using
- * the deprecated ->load() callback, where connectors are registered from within
- * the ->load() callback.
+ * previously.
*
* Never call this twice on any device!
*
@@ -759,6 +694,9 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
goto err_minors;
}
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ drm_modeset_register_all(dev);
+
ret = 0;
goto out_unlock;
@@ -789,6 +727,9 @@ void drm_dev_unregister(struct drm_device *dev)
drm_lastclose(dev);
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ drm_modeset_unregister_all(dev);
+
if (dev->driver->unload)
dev->driver->unload(dev);
@@ -806,26 +747,6 @@ void drm_dev_unregister(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_dev_unregister);
-/**
- * drm_dev_set_unique - Set the unique name of a DRM device
- * @dev: device of which to set the unique name
- * @name: unique name
- *
- * Sets the unique name of a DRM device using the specified string. Drivers
- * can use this at driver probe time if the unique name of the devices they
- * drive is static.
- *
- * Return: 0 on success or a negative error code on failure.
- */
-int drm_dev_set_unique(struct drm_device *dev, const char *name)
-{
- kfree(dev->unique);
- dev->unique = kstrdup(name, GFP_KERNEL);
-
- return dev->unique ? 0 : -ENOMEM;
-}
-EXPORT_SYMBOL(drm_dev_set_unique);
-
/*
* DRM Core
* The DRM core module initializes all global DRM objects and makes them
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 2cb472b99..637a0aa4d 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -3867,6 +3867,20 @@ static void drm_add_display_info(struct edid *edid,
/* HDMI deep color modes supported? Assign to info, if so */
drm_assign_hdmi_deep_color_info(edid, info, connector);
+ /*
+ * Digital sink with "DFP 1.x compliant TMDS" according to EDID 1.3?
+ *
+ * For such displays, the DFP spec 1.0, section 3.10 "EDID support"
+ * tells us to assume 8 bpc color depth if the EDID doesn't have
+ * extensions which tell otherwise.
+ */
+ if ((info->bpc == 0) && (edid->revision < 4) &&
+ (edid->input & DRM_EDID_DIGITAL_TYPE_DVI)) {
+ info->bpc = 8;
+ DRM_DEBUG("%s: Assigning DFP sink color depth as %d bpc.\n",
+ connector->name, info->bpc);
+ }
+
/* Only defined for 1.4 with digital displays */
if (edid->revision < 4)
return;
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 9a401aed9..622f788bf 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -271,7 +271,7 @@ int drm_load_edid_firmware(struct drm_connector *connector)
* by commas, search through the list looking for one that
* matches the connector.
*
- * If there's one or more that don't't specify a connector, keep
+ * If there's one or more that doesn't specify a connector, keep
* the last one found one as a fallback.
*/
fwstr = kstrdup(edid_firmware, GFP_KERNEL);
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 5075fae3c..1fd6eac14 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -23,6 +23,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <linux/dma-mapping.h>
#include <linux/module.h>
#define DEFAULT_FBDEFIO_DELAY_MS 50
@@ -52,7 +53,7 @@ struct drm_fbdev_cma {
* will be set up automatically. dirty() is called by
* drm_fb_helper_deferred_io() in process context (struct delayed_work).
*
- * Example fbdev deferred io code:
+ * Example fbdev deferred io code::
*
* static int driver_fbdev_fb_dirty(struct drm_framebuffer *fb,
* struct drm_file *file_priv,
@@ -162,6 +163,10 @@ static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
* drm_fb_cma_create_with_funcs() - helper function for the
* &drm_mode_config_funcs ->fb_create
* callback function
+ * @dev: DRM device
+ * @file_priv: drm file for the ioctl call
+ * @mode_cmd: metadata from the userspace fb creation request
+ * @funcs: vtable to be used for the new framebuffer object
*
* This can be used to set &drm_framebuffer_funcs for drivers that need the
* dirty() callback. Use drm_fb_cma_create() if you don't need to change
@@ -223,6 +228,9 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_create_with_funcs);
/**
* drm_fb_cma_create() - &drm_mode_config_funcs ->fb_create callback function
+ * @dev: DRM device
+ * @file_priv: drm file for the ioctl call
+ * @mode_cmd: metadata from the userspace fb creation request
*
* If your hardware has special alignment or pitch requirements these should be
* checked before calling this function. Use drm_fb_cma_create_with_funcs() if
@@ -246,7 +254,7 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_create);
* This function will usually be called from the CRTC callback functions.
*/
struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
- unsigned int plane)
+ unsigned int plane)
{
struct drm_fb_cma *fb_cma = to_fb_cma(fb);
@@ -258,10 +266,6 @@ struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
#ifdef CONFIG_DEBUG_FS
-/*
- * drm_fb_cma_describe() - Helper to dump information about a single
- * CMA framebuffer object
- */
static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
{
struct drm_fb_cma *fb_cma = to_fb_cma(fb);
@@ -279,7 +283,9 @@ static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
/**
* drm_fb_cma_debugfs_show() - Helper to list CMA framebuffer objects
- * in debugfs.
+ * in debugfs.
+ * @m: output file
+ * @arg: private data for the callback
*/
int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg)
{
@@ -297,6 +303,12 @@ int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg)
EXPORT_SYMBOL_GPL(drm_fb_cma_debugfs_show);
#endif
+static int drm_fb_cma_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ return dma_mmap_writecombine(info->device, vma, info->screen_base,
+ info->fix.smem_start, info->fix.smem_len);
+}
+
static struct fb_ops drm_fbdev_cma_ops = {
.owner = THIS_MODULE,
.fb_fillrect = drm_fb_helper_sys_fillrect,
@@ -307,6 +319,7 @@ static struct fb_ops drm_fbdev_cma_ops = {
.fb_blank = drm_fb_helper_blank,
.fb_pan_display = drm_fb_helper_pan_display,
.fb_setcmap = drm_fb_helper_setcmap,
+ .fb_mmap = drm_fb_cma_mmap,
};
static int drm_fbdev_cma_deferred_io_mmap(struct fb_info *info,
@@ -333,6 +346,7 @@ static int drm_fbdev_cma_defio_init(struct fb_info *fbi,
fbops = kzalloc(sizeof(*fbops), GFP_KERNEL);
if (!fbdefio || !fbops) {
kfree(fbdefio);
+ kfree(fbops);
return -ENOMEM;
}
@@ -582,3 +596,18 @@ void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma)
drm_fb_helper_hotplug_event(&fbdev_cma->fb_helper);
}
EXPORT_SYMBOL_GPL(drm_fbdev_cma_hotplug_event);
+
+/**
+ * drm_fbdev_cma_set_suspend - wrapper around drm_fb_helper_set_suspend
+ * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
+ * @state: desired state, zero to resume, non-zero to suspend
+ *
+ * Calls drm_fb_helper_set_suspend, which is a wrapper around
+ * fb_set_suspend implemented by fbdev core.
+ */
+void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, int state)
+{
+ if (fbdev_cma)
+ drm_fb_helper_set_suspend(&fbdev_cma->fb_helper, state);
+}
+EXPORT_SYMBOL(drm_fbdev_cma_set_suspend);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 7c2eb75db..0a06f9120 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -227,7 +227,7 @@ static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
g_base = r_base + crtc->gamma_size;
b_base = g_base + crtc->gamma_size;
- crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
+ crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size);
}
/**
@@ -385,7 +385,7 @@ static int restore_fbdev_mode(struct drm_fb_helper *fb_helper)
drm_warn_on_modeset_not_all_locked(dev);
- if (fb_helper->atomic)
+ if (dev->mode_config.funcs->atomic_commit)
return restore_fbdev_mode_atomic(fb_helper);
drm_for_each_plane(plane, dev) {
@@ -464,7 +464,7 @@ static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
/* Sometimes user space wants everything disabled, so don't steal the
* display if there's a master. */
- if (dev->primary->master)
+ if (READ_ONCE(dev->master))
return false;
drm_for_each_crtc(crtc, dev) {
@@ -716,8 +716,6 @@ int drm_fb_helper_init(struct drm_device *dev,
i++;
}
- fb_helper->atomic = !!drm_core_check_feature(dev, DRIVER_ATOMIC);
-
return 0;
out_free:
drm_fb_helper_crtc_free(fb_helper);
@@ -1042,7 +1040,6 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_framebuffer *fb = fb_helper->fb;
- int pindex;
if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
u32 *palette;
@@ -1074,38 +1071,10 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
!fb_helper->funcs->gamma_get))
return -EINVAL;
- pindex = regno;
-
- if (fb->bits_per_pixel == 16) {
- pindex = regno << 3;
-
- if (fb->depth == 16 && regno > 63)
- return -EINVAL;
- if (fb->depth == 15 && regno > 31)
- return -EINVAL;
-
- if (fb->depth == 16) {
- u16 r, g, b;
- int i;
- if (regno < 32) {
- for (i = 0; i < 8; i++)
- fb_helper->funcs->gamma_set(crtc, red,
- green, blue, pindex + i);
- }
+ WARN_ON(fb->bits_per_pixel != 8);
- fb_helper->funcs->gamma_get(crtc, &r,
- &g, &b,
- pindex >> 1);
+ fb_helper->funcs->gamma_set(crtc, red, green, blue, regno);
- for (i = 0; i < 4; i++)
- fb_helper->funcs->gamma_set(crtc, r,
- green, b,
- (pindex >> 1) + i);
- }
- }
-
- if (fb->depth != 16)
- fb_helper->funcs->gamma_set(crtc, red, green, blue, pindex);
return 0;
}
@@ -1373,7 +1342,7 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
return -EBUSY;
}
- if (fb_helper->atomic) {
+ if (dev->mode_config.funcs->atomic_commit) {
ret = pan_display_atomic(var, info);
goto unlock;
}
@@ -2000,7 +1969,18 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
my_score++;
connector_funcs = connector->helper_private;
- encoder = connector_funcs->best_encoder(connector);
+
+ /*
+ * If the DRM device implements atomic hooks and ->best_encoder() is
+ * NULL we fallback to the default drm_atomic_helper_best_encoder()
+ * helper.
+ */
+ if (fb_helper->dev->mode_config.funcs->atomic_commit &&
+ !connector_funcs->best_encoder)
+ encoder = drm_atomic_helper_best_encoder(connector);
+ else
+ encoder = connector_funcs->best_encoder(connector);
+
if (!encoder)
goto out;
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 7af7f8bcb..323c238fc 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -40,6 +40,7 @@
#include <linux/module.h>
#include "drm_legacy.h"
#include "drm_internal.h"
+#include "drm_crtc_internal.h"
/* from BKL pushdown */
DEFINE_MUTEX(drm_global_mutex);
@@ -67,7 +68,7 @@ DEFINE_MUTEX(drm_global_mutex);
* specific implementations. For GEM-based drivers this is drm_gem_mmap().
*
* No other file operations are supported by the DRM userspace API. Overall the
- * following is an example #file_operations structure:
+ * following is an example #file_operations structure::
*
* static const example_drm_fops = {
* .owner = THIS_MODULE,
@@ -168,60 +169,6 @@ static int drm_cpu_valid(void)
}
/*
- * drm_new_set_master - Allocate a new master object and become master for the
- * associated master realm.
- *
- * @dev: The associated device.
- * @fpriv: File private identifying the client.
- *
- * This function must be called with dev::struct_mutex held.
- * Returns negative error code on failure. Zero on success.
- */
-int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv)
-{
- struct drm_master *old_master;
- int ret;
-
- lockdep_assert_held_once(&dev->master_mutex);
-
- /* create a new master */
- fpriv->minor->master = drm_master_create(fpriv->minor);
- if (!fpriv->minor->master)
- return -ENOMEM;
-
- /* take another reference for the copy in the local file priv */
- old_master = fpriv->master;
- fpriv->master = drm_master_get(fpriv->minor->master);
-
- if (dev->driver->master_create) {
- ret = dev->driver->master_create(dev, fpriv->master);
- if (ret)
- goto out_err;
- }
- if (dev->driver->master_set) {
- ret = dev->driver->master_set(dev, fpriv, true);
- if (ret)
- goto out_err;
- }
-
- fpriv->is_master = 1;
- fpriv->allowed_master = 1;
- fpriv->authenticated = 1;
- if (old_master)
- drm_master_put(&old_master);
-
- return 0;
-
-out_err:
- /* drop both references and restore old master on failure */
- drm_master_put(&fpriv->minor->master);
- drm_master_put(&fpriv->master);
- fpriv->master = old_master;
-
- return ret;
-}
-
-/*
* Called whenever a process opens /dev/drm.
*
* \param filp file pointer.
@@ -283,19 +230,11 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
goto out_prime_destroy;
}
- /* if there is no current master make this fd it, but do not create
- * any master object for render clients */
- mutex_lock(&dev->master_mutex);
- if (drm_is_primary_client(priv) && !priv->minor->master) {
- /* create a new master */
- ret = drm_new_set_master(dev, priv);
+ if (drm_is_primary_client(priv)) {
+ ret = drm_master_open(priv);
if (ret)
goto out_close;
- } else if (drm_is_primary_client(priv)) {
- /* get a reference to the master */
- priv->master = drm_master_get(priv->minor->master);
}
- mutex_unlock(&dev->master_mutex);
mutex_lock(&dev->filelist_mutex);
list_add(&priv->lhead, &dev->filelist);
@@ -324,7 +263,6 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
return 0;
out_close:
- mutex_unlock(&dev->master_mutex);
if (dev->driver->postclose)
dev->driver->postclose(dev, priv);
out_prime_destroy:
@@ -338,18 +276,6 @@ out_prime_destroy:
return ret;
}
-static void drm_master_release(struct drm_device *dev, struct file *filp)
-{
- struct drm_file *file_priv = filp->private_data;
-
- if (drm_legacy_i_have_hw_lock(dev, file_priv)) {
- DRM_DEBUG("File %p released, freeing lock for context %d\n",
- filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
- drm_legacy_lock_free(&file_priv->master->lock,
- _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
- }
-}
-
static void drm_events_release(struct drm_file *file_priv)
{
struct drm_device *dev = file_priv->minor->dev;
@@ -368,7 +294,7 @@ static void drm_events_release(struct drm_file *file_priv)
/* Remove unconsumed events */
list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
list_del(&e->link);
- e->destroy(e);
+ kfree(e);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -451,11 +377,6 @@ int drm_release(struct inode *inode, struct file *filp)
list_del(&file_priv->lhead);
mutex_unlock(&dev->filelist_mutex);
- mutex_lock(&dev->struct_mutex);
- if (file_priv->magic)
- idr_remove(&file_priv->master->magic_map, file_priv->magic);
- mutex_unlock(&dev->struct_mutex);
-
if (dev->driver->preclose)
dev->driver->preclose(dev, file_priv);
@@ -468,9 +389,8 @@ int drm_release(struct inode *inode, struct file *filp)
(long)old_encode_dev(file_priv->minor->kdev->devt),
dev->open_count);
- /* if the master has gone away we can't do anything with the lock */
- if (file_priv->minor->master)
- drm_master_release(dev, filp);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ drm_legacy_lock_release(dev, filp);
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
drm_legacy_reclaim_buffers(dev, file_priv);
@@ -487,43 +407,12 @@ int drm_release(struct inode *inode, struct file *filp)
drm_legacy_ctxbitmap_flush(dev, file_priv);
- mutex_lock(&dev->master_mutex);
-
- if (file_priv->is_master) {
- struct drm_master *master = file_priv->master;
-
- /*
- * Since the master is disappearing, so is the
- * possibility to lock.
- */
- mutex_lock(&dev->struct_mutex);
- if (master->lock.hw_lock) {
- if (dev->sigdata.lock == master->lock.hw_lock)
- dev->sigdata.lock = NULL;
- master->lock.hw_lock = NULL;
- master->lock.file_priv = NULL;
- wake_up_interruptible_all(&master->lock.lock_queue);
- }
- mutex_unlock(&dev->struct_mutex);
-
- if (file_priv->minor->master == file_priv->master) {
- /* drop the reference held my the minor */
- if (dev->driver->master_drop)
- dev->driver->master_drop(dev, file_priv, true);
- drm_master_put(&file_priv->minor->master);
- }
- }
-
- /* drop the master reference held by the file priv */
- if (file_priv->master)
- drm_master_put(&file_priv->master);
- file_priv->is_master = 0;
- mutex_unlock(&dev->master_mutex);
+ if (drm_is_primary_client(file_priv))
+ drm_master_release(file_priv);
if (dev->driver->postclose)
dev->driver->postclose(dev, file_priv);
-
if (drm_core_check_feature(dev, DRIVER_PRIME))
drm_prime_destroy_file_private(&file_priv->prime);
@@ -636,7 +525,7 @@ put_back_event:
}
ret += length;
- e->destroy(e);
+ kfree(e);
}
}
mutex_unlock(&file_priv->event_read_lock);
@@ -713,9 +602,6 @@ int drm_event_reserve_init_locked(struct drm_device *dev,
list_add(&p->pending_link, &file_priv->pending_event_list);
p->file_priv = file_priv;
- /* we *could* pass this in as arg, but everyone uses kfree: */
- p->destroy = (void (*) (struct drm_pending_event *)) kfree;
-
return 0;
}
EXPORT_SYMBOL(drm_event_reserve_init_locked);
@@ -778,7 +664,7 @@ void drm_event_cancel_free(struct drm_device *dev,
list_del(&p->pending_link);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
- p->destroy(p);
+ kfree(p);
}
EXPORT_SYMBOL(drm_event_cancel_free);
@@ -800,8 +686,19 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
{
assert_spin_locked(&dev->event_lock);
+ if (e->completion) {
+ /* ->completion might disappear as soon as it signalled. */
+ complete_all(e->completion);
+ e->completion = NULL;
+ }
+
+ if (e->fence) {
+ fence_signal(e->fence);
+ fence_put(e->fence);
+ }
+
if (!e->file_priv) {
- e->destroy(e);
+ kfree(e);
return;
}
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
new file mode 100644
index 000000000..0645c85d5
--- /dev/null
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -0,0 +1,320 @@
+/*
+ * Copyright (c) 2016 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * DRM core format related functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/bug.h>
+#include <linux/ctype.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_fourcc.h>
+
+static char printable_char(int c)
+{
+ return isascii(c) && isprint(c) ? c : '?';
+}
+
+/**
+ * drm_get_format_name - return a string for drm fourcc format
+ * @format: format to compute name of
+ *
+ * Note that the buffer used by this function is globally shared and owned by
+ * the function itself.
+ *
+ * FIXME: This isn't really multithreading safe.
+ */
+const char *drm_get_format_name(uint32_t format)
+{
+ static char buf[32];
+
+ snprintf(buf, sizeof(buf),
+ "%c%c%c%c %s-endian (0x%08x)",
+ printable_char(format & 0xff),
+ printable_char((format >> 8) & 0xff),
+ printable_char((format >> 16) & 0xff),
+ printable_char((format >> 24) & 0x7f),
+ format & DRM_FORMAT_BIG_ENDIAN ? "big" : "little",
+ format);
+
+ return buf;
+}
+EXPORT_SYMBOL(drm_get_format_name);
+
+/**
+ * drm_fb_get_bpp_depth - get the bpp/depth values for format
+ * @format: pixel format (DRM_FORMAT_*)
+ * @depth: storage for the depth value
+ * @bpp: storage for the bpp value
+ *
+ * This only supports RGB formats here for compat with code that doesn't use
+ * pixel formats directly yet.
+ */
+void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
+ int *bpp)
+{
+ switch (format) {
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB332:
+ case DRM_FORMAT_BGR233:
+ *depth = 8;
+ *bpp = 8;
+ break;
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_XBGR1555:
+ case DRM_FORMAT_RGBX5551:
+ case DRM_FORMAT_BGRX5551:
+ case DRM_FORMAT_ARGB1555:
+ case DRM_FORMAT_ABGR1555:
+ case DRM_FORMAT_RGBA5551:
+ case DRM_FORMAT_BGRA5551:
+ *depth = 15;
+ *bpp = 16;
+ break;
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_BGR565:
+ *depth = 16;
+ *bpp = 16;
+ break;
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_BGR888:
+ *depth = 24;
+ *bpp = 24;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_BGRX8888:
+ *depth = 24;
+ *bpp = 32;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_RGBX1010102:
+ case DRM_FORMAT_BGRX1010102:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_RGBA1010102:
+ case DRM_FORMAT_BGRA1010102:
+ *depth = 30;
+ *bpp = 32;
+ break;
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_RGBA8888:
+ case DRM_FORMAT_BGRA8888:
+ *depth = 32;
+ *bpp = 32;
+ break;
+ default:
+ DRM_DEBUG_KMS("unsupported pixel format %s\n",
+ drm_get_format_name(format));
+ *depth = 0;
+ *bpp = 0;
+ break;
+ }
+}
+EXPORT_SYMBOL(drm_fb_get_bpp_depth);
+
+/**
+ * drm_format_num_planes - get the number of planes for format
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+ * Returns:
+ * The number of planes used by the specified pixel format.
+ */
+int drm_format_num_planes(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ return 3;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_NV24:
+ case DRM_FORMAT_NV42:
+ return 2;
+ default:
+ return 1;
+ }
+}
+EXPORT_SYMBOL(drm_format_num_planes);
+
+/**
+ * drm_format_plane_cpp - determine the bytes per pixel value
+ * @format: pixel format (DRM_FORMAT_*)
+ * @plane: plane index
+ *
+ * Returns:
+ * The bytes per pixel value for the specified plane.
+ */
+int drm_format_plane_cpp(uint32_t format, int plane)
+{
+ unsigned int depth;
+ int bpp;
+
+ if (plane >= drm_format_num_planes(format))
+ return 0;
+
+ switch (format) {
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ return 2;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_NV24:
+ case DRM_FORMAT_NV42:
+ return plane ? 2 : 1;
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ return 1;
+ default:
+ drm_fb_get_bpp_depth(format, &depth, &bpp);
+ return bpp >> 3;
+ }
+}
+EXPORT_SYMBOL(drm_format_plane_cpp);
+
+/**
+ * drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+ * Returns:
+ * The horizontal chroma subsampling factor for the
+ * specified pixel format.
+ */
+int drm_format_horz_chroma_subsampling(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ return 4;
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ return 2;
+ default:
+ return 1;
+ }
+}
+EXPORT_SYMBOL(drm_format_horz_chroma_subsampling);
+
+/**
+ * drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+ * Returns:
+ * The vertical chroma subsampling factor for the
+ * specified pixel format.
+ */
+int drm_format_vert_chroma_subsampling(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ return 4;
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ return 2;
+ default:
+ return 1;
+ }
+}
+EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
+
+/**
+ * drm_format_plane_width - width of the plane given the first plane
+ * @width: width of the first plane
+ * @format: pixel format
+ * @plane: plane index
+ *
+ * Returns:
+ * The width of @plane, given that the width of the first plane is @width.
+ */
+int drm_format_plane_width(int width, uint32_t format, int plane)
+{
+ if (plane >= drm_format_num_planes(format))
+ return 0;
+
+ if (plane == 0)
+ return width;
+
+ return width / drm_format_horz_chroma_subsampling(format);
+}
+EXPORT_SYMBOL(drm_format_plane_width);
+
+/**
+ * drm_format_plane_height - height of the plane given the first plane
+ * @height: height of the first plane
+ * @format: pixel format
+ * @plane: plane index
+ *
+ * Returns:
+ * The height of @plane, given that the height of the first plane is @height.
+ */
+int drm_format_plane_height(int height, uint32_t format, int plane)
+{
+ if (plane >= drm_format_num_planes(format))
+ return 0;
+
+ if (plane == 0)
+ return height;
+
+ return height / drm_format_vert_chroma_subsampling(format);
+}
+EXPORT_SYMBOL(drm_format_plane_height);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index ddfecb5de..9134ae134 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -137,7 +137,7 @@ int drm_gem_object_init(struct drm_device *dev,
drm_gem_private_object_init(dev, obj, size);
- filp = shmem_file_setup("drm mm object", size, VM_NORESERVE, 1);
+ filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
if (IS_ERR(filp))
return PTR_ERR(filp);
@@ -511,7 +511,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
int i, npages;
/* This is the shared memory object that backs the GEM resource */
- mapping = file_inode(obj->filp)->i_mapping;
+ mapping = obj->filp->f_mapping;
/* We already BUG_ON() for non-page-aligned sizes in
* drm_gem_object_init(), so we should never hit this unless
@@ -787,7 +787,7 @@ EXPORT_SYMBOL(drm_gem_object_release);
* @kref: kref of the object to free
*
* Called after the last reference to the object has been lost.
- * Must be called holding struct_ mutex
+ * Must be called holding &drm_device->struct_mutex.
*
* Frees the object
*/
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 5d469b2f2..9ae353f4d 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -50,106 +50,24 @@ int drm_name_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_minor *minor = node->minor;
struct drm_device *dev = minor->dev;
- struct drm_master *master = minor->master;
- if (!master)
- return 0;
-
- if (master->unique) {
- seq_printf(m, "%s %s %s\n",
- dev->driver->name,
- dev_name(dev->dev), master->unique);
- } else {
- seq_printf(m, "%s %s\n",
- dev->driver->name, dev_name(dev->dev));
- }
- return 0;
-}
-
-/**
- * Called when "/proc/dri/.../vm" is read.
- *
- * Prints information about all mappings in drm_device::maplist.
- */
-int drm_vm_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_local_map *map;
- struct drm_map_list *r_list;
-
- /* Hardcoded from _DRM_FRAME_BUFFER,
- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
- const char *type;
- int i;
-
- mutex_lock(&dev->struct_mutex);
- seq_printf(m, "slot offset size type flags address mtrr\n\n");
- i = 0;
- list_for_each_entry(r_list, &dev->maplist, head) {
- map = r_list->map;
- if (!map)
- continue;
- if (map->type < 0 || map->type > 5)
- type = "??";
- else
- type = types[map->type];
-
- seq_printf(m, "%4d 0x%016llx 0x%08lx %4.4s 0x%02x 0x%08lx ",
- i,
- (unsigned long long)map->offset,
- map->size, type, map->flags,
- (unsigned long) r_list->user_token);
- if (map->mtrr < 0)
- seq_printf(m, "none\n");
- else
- seq_printf(m, "%4d\n", map->mtrr);
- i++;
- }
- mutex_unlock(&dev->struct_mutex);
- return 0;
-}
+ struct drm_master *master;
-/**
- * Called when "/proc/dri/.../bufs" is read.
- */
-int drm_bufs_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_device_dma *dma;
- int i, seg_pages;
-
- mutex_lock(&dev->struct_mutex);
- dma = dev->dma;
- if (!dma) {
- mutex_unlock(&dev->struct_mutex);
- return 0;
- }
-
- seq_printf(m, " o size count free segs pages kB\n\n");
- for (i = 0; i <= DRM_MAX_ORDER; i++) {
- if (dma->bufs[i].buf_count) {
- seg_pages = dma->bufs[i].seg_count * (1 << dma->bufs[i].page_order);
- seq_printf(m, "%2d %8d %5d %5d %5d %5d %5ld\n",
- i,
- dma->bufs[i].buf_size,
- dma->bufs[i].buf_count,
- 0,
- dma->bufs[i].seg_count,
- seg_pages,
- seg_pages * PAGE_SIZE / 1024);
- }
- }
- seq_printf(m, "\n");
- for (i = 0; i < dma->buf_count; i++) {
- if (i && !(i % 32))
- seq_printf(m, "\n");
- seq_printf(m, " %d", dma->buflist[i]->list);
- }
+ mutex_lock(&dev->master_mutex);
+ master = dev->master;
+ if (!master)
+ goto out_unlock;
+
+ seq_printf(m, "%s", dev->driver->name);
+ if (dev->dev)
+ seq_printf(m, " dev=%s", dev_name(dev->dev));
+ if (master && master->unique)
+ seq_printf(m, " master=%s", master->unique);
+ if (dev->unique)
+ seq_printf(m, " unique=%s", dev->unique);
seq_printf(m, "\n");
- mutex_unlock(&dev->struct_mutex);
+out_unlock:
+ mutex_unlock(&dev->master_mutex);
+
return 0;
}
@@ -184,7 +102,7 @@ int drm_clients_info(struct seq_file *m, void *data)
task ? task->comm : "<unknown>",
pid_vnr(priv->pid),
priv->minor->index,
- priv->is_master ? 'y' : 'n',
+ drm_is_current_master(priv) ? 'y' : 'n',
priv->authenticated ? 'y' : 'n',
from_kuid_munged(seq_user_ns(m), priv->uid),
priv->magic);
@@ -194,7 +112,6 @@ int drm_clients_info(struct seq_file *m, void *data)
return 0;
}
-
static int drm_gem_one_name_info(int id, void *ptr, void *data)
{
struct drm_gem_object *obj = ptr;
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 902cf6a15..b86dc9b92 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -29,15 +29,9 @@ extern struct mutex drm_global_mutex;
void drm_lastclose(struct drm_device *dev);
/* drm_pci.c */
-int drm_pci_set_unique(struct drm_device *dev,
- struct drm_master *master,
- struct drm_unique *u);
int drm_irq_by_busid(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-/* drm_vm.c */
-int drm_vma_info(struct seq_file *m, void *data);
-
/* drm_prime.c */
int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -51,8 +45,6 @@ void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpr
/* drm_info.c */
int drm_name_info(struct seq_file *m, void *data);
-int drm_vm_info(struct seq_file *m, void *data);
-int drm_bufs_info(struct seq_file *m, void *data);
int drm_clients_info(struct seq_file *m, void* data);
int drm_gem_name_info(struct seq_file *m, void *data);
@@ -67,6 +59,12 @@ int drm_getmagic(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_authmagic(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int drm_setmaster_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_master_open(struct drm_file *file_priv);
+void drm_master_release(struct drm_file *file_priv);
/* drm_sysfs.c */
extern struct class *drm_class;
@@ -92,13 +90,6 @@ int drm_gem_open_ioctl(struct drm_device *dev, void *data,
void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
-/* drm_drv.c */
-int drm_setmaster_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-struct drm_master *drm_master_create(struct drm_minor *minor);
-
/* drm_debugfs.c */
#if defined(CONFIG_DEBUG_FS)
int drm_debugfs_init(struct drm_minor *minor, int minor_id,
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index b7a39771c..33af4a5dd 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -30,6 +30,7 @@
#include <drm/drmP.h>
#include <drm/drm_core.h>
+#include <drm/drm_auth.h>
#include "drm_legacy.h"
#include "drm_internal.h"
#include "drm_crtc_internal.h"
@@ -37,6 +38,64 @@
#include <linux/pci.h>
#include <linux/export.h>
+/**
+ * DOC: getunique and setversion story
+ *
+ * BEWARE THE DRAGONS! MIND THE TRAPDOORS!
+ *
+ * In an attempt to warn anyone else who's trying to figure out what's going
+ * on here, I'll try to summarize the story. First things first, let's clear up
+ * the names, because the kernel internals, libdrm and the ioctls are all named
+ * differently:
+ *
+ * - GET_UNIQUE ioctl, implemented by drm_getunique is wrapped up in libdrm
+ * through the drmGetBusid function.
+ * - The libdrm drmSetBusid function is backed by the SET_UNIQUE ioctl. All
+ * that code is nerved in the kernel with drm_invalid_op().
+ * - The internal set_busid kernel functions and driver callbacks are
+ * exclusively use by the SET_VERSION ioctl, because only drm 1.0 (which is
+ * nerved) allowed userspace to set the busid through the above ioctl.
+ * - Other ioctls and functions involved are named consistently.
+ *
+ * For anyone wondering what's the difference between drm 1.1 and 1.4: Correctly
+ * handling pci domains in the busid on ppc. Doing this correctly was only
+ * implemented in libdrm in 2010, hence can't be nerved yet. No one knows what's
+ * special with drm 1.2 and 1.3.
+ *
+ * Now the actual horror story of how device lookup in drm works. At large,
+ * there's 2 different ways, either by busid, or by device driver name.
+ *
+ * Opening by busid is fairly simple:
+ *
+ * 1. First call SET_VERSION to make sure pci domains are handled properly. As a
+ * side-effect this fills out the unique name in the master structure.
+ * 2. Call GET_UNIQUE to read out the unique name from the master structure,
+ * which matches the busid thanks to step 1. If it doesn't, proceed to try
+ * the next device node.
+ *
+ * Opening by name is slightly different:
+ *
+ * 1. Directly call VERSION to get the version and to match against the driver
+ * name returned by that ioctl. Note that SET_VERSION is not called, which
+ * means the the unique name for the master node just opening is _not_ filled
+ * out. This despite that with current drm device nodes are always bound to
+ * one device, and can't be runtime assigned like with drm 1.0.
+ * 2. Match driver name. If it mismatches, proceed to the next device node.
+ * 3. Call GET_UNIQUE, and check whether the unique name has length zero (by
+ * checking that the first byte in the string is 0). If that's not the case
+ * libdrm skips and proceeds to the next device node. Probably this is just
+ * copypasta from drm 1.0 times where a set unique name meant that the driver
+ * was in use already, but that's just conjecture.
+ *
+ * Long story short: To keep the open by name logic working, GET_UNIQUE must
+ * _not_ return a unique string when SET_VERSION hasn't been called yet,
+ * otherwise libdrm breaks. Even when that unique string can't ever change, and
+ * is totally irrelevant for actually opening the device because runtime
+ * assignable device instances were only support in drm 1.0, which is long dead.
+ * But the libdrm code in drmOpenByName somehow survived, hence this can't be
+ * broken.
+ */
+
static int drm_version(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -75,51 +134,6 @@ drm_unset_busid(struct drm_device *dev,
master->unique_len = 0;
}
-/*
- * Set the bus id.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_unique structure.
- * \return zero on success or a negative number on failure.
- *
- * Copies the bus id from userspace into drm_device::unique, and verifies that
- * it matches the device this DRM is attached to (EINVAL otherwise). Deprecated
- * in interface version 1.1 and will return EBUSY when setversion has requested
- * version 1.1 or greater. Also note that KMS is all version 1.1 and later and
- * UMS was only ever supported on pci devices.
- */
-static int drm_setunique(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_unique *u = data;
- struct drm_master *master = file_priv->master;
- int ret;
-
- if (master->unique_len || master->unique)
- return -EBUSY;
-
- if (!u->unique_len || u->unique_len > 1024)
- return -EINVAL;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return 0;
-
- if (WARN_ON(!dev->pdev))
- return -EINVAL;
-
- ret = drm_pci_set_unique(dev, master, u);
- if (ret)
- goto err;
-
- return 0;
-
-err:
- drm_unset_busid(dev, master);
- return ret;
-}
-
static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
{
struct drm_master *master = file_priv->master;
@@ -135,12 +149,7 @@ static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
return ret;
}
} else {
- if (WARN(dev->unique == NULL,
- "No drm_driver.set_busid() implementation provided by "
- "%ps. Use drm_dev_set_unique() to set the unique "
- "name explicitly.", dev->driver))
- return -EINVAL;
-
+ WARN_ON(!dev->unique);
master->unique = kstrdup(dev->unique, GFP_KERNEL);
if (master->unique)
master->unique_len = strlen(dev->unique);
@@ -473,7 +482,8 @@ int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
return -EACCES;
/* MASTER is only for master or control clients */
- if (unlikely((flags & DRM_MASTER) && !file_priv->is_master &&
+ if (unlikely((flags & DRM_MASTER) &&
+ !drm_is_current_master(file_priv) &&
!drm_is_control_client(file_priv)))
return -EACCES;
@@ -504,7 +514,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version,
DRM_UNLOCKED|DRM_RENDER_ALLOW|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
@@ -513,10 +523,10 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0),
DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
- DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_UNLOCKED|DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_legacy_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_legacy_rmmap_ioctl, DRM_AUTH),
@@ -524,8 +534,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_UNLOCKED|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_UNLOCKED|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -638,7 +648,7 @@ long drm_ioctl(struct file *filp,
int retcode = -EINVAL;
char stack_kdata[128];
char *kdata = NULL;
- unsigned int usize, asize, drv_size;
+ unsigned int in_size, out_size, drv_size, ksize;
bool is_driver_ioctl;
dev = file_priv->minor->dev;
@@ -661,9 +671,12 @@ long drm_ioctl(struct file *filp,
}
drv_size = _IOC_SIZE(ioctl->cmd);
- usize = _IOC_SIZE(cmd);
- asize = max(usize, drv_size);
- cmd = ioctl->cmd;
+ out_size = in_size = _IOC_SIZE(cmd);
+ if ((cmd & ioctl->cmd & IOC_IN) == 0)
+ in_size = 0;
+ if ((cmd & ioctl->cmd & IOC_OUT) == 0)
+ out_size = 0;
+ ksize = max(max(in_size, out_size), drv_size);
DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n",
task_pid_nr(current),
@@ -683,30 +696,24 @@ long drm_ioctl(struct file *filp,
if (unlikely(retcode))
goto err_i1;
- if (cmd & (IOC_IN | IOC_OUT)) {
- if (asize <= sizeof(stack_kdata)) {
- kdata = stack_kdata;
- } else {
- kdata = kmalloc(asize, GFP_KERNEL);
- if (!kdata) {
- retcode = -ENOMEM;
- goto err_i1;
- }
+ if (ksize <= sizeof(stack_kdata)) {
+ kdata = stack_kdata;
+ } else {
+ kdata = kmalloc(ksize, GFP_KERNEL);
+ if (!kdata) {
+ retcode = -ENOMEM;
+ goto err_i1;
}
- if (asize > usize)
- memset(kdata + usize, 0, asize - usize);
}
- if (cmd & IOC_IN) {
- if (copy_from_user(kdata, (void __user *)arg,
- usize) != 0) {
- retcode = -EFAULT;
- goto err_i1;
- }
- } else if (cmd & IOC_OUT) {
- memset(kdata, 0, usize);
+ if (copy_from_user(kdata, (void __user *)arg, in_size) != 0) {
+ retcode = -EFAULT;
+ goto err_i1;
}
+ if (ksize > in_size)
+ memset(kdata + in_size, 0, ksize - in_size);
+
/* Enforce sane locking for kms driver ioctls. Core ioctls are
* too messy still. */
if ((drm_core_check_feature(dev, DRIVER_MODESET) && is_driver_ioctl) ||
@@ -718,11 +725,8 @@ long drm_ioctl(struct file *filp,
mutex_unlock(&drm_global_mutex);
}
- if (cmd & IOC_OUT) {
- if (copy_to_user((void __user *)arg, kdata,
- usize) != 0)
- retcode = -EFAULT;
- }
+ if (copy_to_user((void __user *)arg, kdata, out_size) != 0)
+ retcode = -EFAULT;
err_i1:
if (!ioctl)
@@ -749,7 +753,7 @@ EXPORT_SYMBOL(drm_ioctl);
* shouldn't be used by any drivers.
*
* Returns:
- * True if the @nr corresponds to a DRM core ioctl numer, false otherwise.
+ * True if the @nr corresponds to a DRM core ioctl number, false otherwise.
*/
bool drm_ioctl_flags(unsigned int nr, unsigned int *flags)
{
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 0fac801c1..77f357b2c 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -42,10 +42,6 @@
#include <linux/vgaarb.h>
#include <linux/export.h>
-/* Access macro for slots in vblank timestamp ringbuffer. */
-#define vblanktimestamp(dev, pipe, count) \
- ((dev)->vblank[pipe].time[(count) % DRM_VBLANKTIME_RBSIZE])
-
/* Retry timestamp calculation up to 3 times to satisfy
* drm_timestamp_precision before giving up.
*/
@@ -82,36 +78,18 @@ static void store_vblank(struct drm_device *dev, unsigned int pipe,
struct timeval *t_vblank, u32 last)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
- u32 tslot;
assert_spin_locked(&dev->vblank_time_lock);
vblank->last = last;
- /* All writers hold the spinlock, but readers are serialized by
- * the latching of vblank->count below.
- */
- tslot = vblank->count + vblank_count_inc;
- vblanktimestamp(dev, pipe, tslot) = *t_vblank;
-
- /*
- * vblank timestamp updates are protected on the write side with
- * vblank_time_lock, but on the read side done locklessly using a
- * sequence-lock on the vblank counter. Ensure correct ordering using
- * memory barrriers. We need the barrier both before and also after the
- * counter update to synchronize with the next timestamp write.
- * The read-side barriers for this are in drm_vblank_count_and_time.
- */
- smp_wmb();
+ write_seqlock(&vblank->seqlock);
+ vblank->time = *t_vblank;
vblank->count += vblank_count_inc;
- smp_wmb();
+ write_sequnlock(&vblank->seqlock);
}
-/**
- * drm_reset_vblank_timestamp - reset the last timestamp to the last vblank
- * @dev: DRM device
- * @pipe: index of CRTC for which to reset the timestamp
- *
+/*
* Reset the stored timestamp for the current vblank count to correspond
* to the last vblank occurred.
*
@@ -155,11 +133,7 @@ static void drm_reset_vblank_timestamp(struct drm_device *dev, unsigned int pipe
spin_unlock(&dev->vblank_time_lock);
}
-/**
- * drm_update_vblank_count - update the master vblank counter
- * @dev: DRM device
- * @pipe: counter to update
- *
+/*
* Call back into the driver to update the appropriate vblank counter
* (specified by @pipe). Deal with wraparound, if it occurred, and
* update the last read value so we can deal with wraparound on the next
@@ -205,7 +179,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
const struct timeval *t_old;
u64 diff_ns;
- t_old = &vblanktimestamp(dev, pipe, vblank->count);
+ t_old = &vblank->time;
diff_ns = timeval_to_ns(&t_vblank) - timeval_to_ns(t_old);
/*
@@ -239,49 +213,6 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
diff = 1;
}
- /*
- * FIMXE: Need to replace this hack with proper seqlocks.
- *
- * Restrict the bump of the software vblank counter to a safe maximum
- * value of +1 whenever there is the possibility that concurrent readers
- * of vblank timestamps could be active at the moment, as the current
- * implementation of the timestamp caching and updating is not safe
- * against concurrent readers for calls to store_vblank() with a bump
- * of anything but +1. A bump != 1 would very likely return corrupted
- * timestamps to userspace, because the same slot in the cache could
- * be concurrently written by store_vblank() and read by one of those
- * readers without the read-retry logic detecting the collision.
- *
- * Concurrent readers can exist when we are called from the
- * drm_vblank_off() or drm_vblank_on() functions and other non-vblank-
- * irq callers. However, all those calls to us are happening with the
- * vbl_lock locked to prevent drm_vblank_get(), so the vblank refcount
- * can't increase while we are executing. Therefore a zero refcount at
- * this point is safe for arbitrary counter bumps if we are called
- * outside vblank irq, a non-zero count is not 100% safe. Unfortunately
- * we must also accept a refcount of 1, as whenever we are called from
- * drm_vblank_get() -> drm_vblank_enable() the refcount will be 1 and
- * we must let that one pass through in order to not lose vblank counts
- * during vblank irq off - which would completely defeat the whole
- * point of this routine.
- *
- * Whenever we are called from vblank irq, we have to assume concurrent
- * readers exist or can show up any time during our execution, even if
- * the refcount is currently zero, as vblank irqs are usually only
- * enabled due to the presence of readers, and because when we are called
- * from vblank irq we can't hold the vbl_lock to protect us from sudden
- * bumps in vblank refcount. Therefore also restrict bumps to +1 when
- * called from vblank irq.
- */
- if ((diff > 1) && (atomic_read(&vblank->refcount) > 1 ||
- (flags & DRM_CALLED_FROM_VBLIRQ))) {
- DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u "
- "refcount %u, vblirq %u\n", pipe, diff,
- atomic_read(&vblank->refcount),
- (flags & DRM_CALLED_FROM_VBLIRQ) != 0);
- diff = 1;
- }
-
DRM_DEBUG_VBL("updating vblank count on crtc %u:"
" current=%u, diff=%u, hw=%u hw_last=%u\n",
pipe, vblank->count, diff, cur_vblank, vblank->last);
@@ -303,6 +234,37 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
store_vblank(dev, pipe, diff, &t_vblank, cur_vblank);
}
+/**
+ * drm_accurate_vblank_count - retrieve the master vblank counter
+ * @crtc: which counter to retrieve
+ *
+ * This function is similar to @drm_crtc_vblank_count but this
+ * function interpolates to handle a race with vblank irq's.
+ *
+ * This is mostly useful for hardware that can obtain the scanout
+ * position, but doesn't have a frame counter.
+ */
+u32 drm_accurate_vblank_count(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = drm_crtc_index(crtc);
+ u32 vblank;
+ unsigned long flags;
+
+ WARN(!dev->driver->get_vblank_timestamp,
+ "This function requires support for accurate vblank timestamps.");
+
+ spin_lock_irqsave(&dev->vblank_time_lock, flags);
+
+ drm_update_vblank_count(dev, pipe, 0);
+ vblank = drm_vblank_count(dev, pipe);
+
+ spin_unlock_irqrestore(&dev->vblank_time_lock, flags);
+
+ return vblank;
+}
+EXPORT_SYMBOL(drm_accurate_vblank_count);
+
/*
* Disable vblank irq's on crtc, make sure that last vblank count
* of hardware and corresponding consistent software vblank counter
@@ -417,6 +379,7 @@ int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
init_waitqueue_head(&vblank->queue);
setup_timer(&vblank->disable_timer, vblank_disable_fn,
(unsigned long)vblank);
+ seqlock_init(&vblank->seqlock);
}
DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
@@ -569,7 +532,7 @@ int drm_irq_uninstall(struct drm_device *dev)
/*
* Wake up any waiters so they don't hang. This is just to paper over
- * isssues for UMS drivers which aren't in full control of their
+ * issues for UMS drivers which aren't in full control of their
* vblank/irq handling. KMS drivers must ensure that vblanks are all
* disabled when uninstalling the irq handler.
*/
@@ -631,7 +594,7 @@ int drm_control(struct drm_device *dev, void *data,
return 0;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
- /* UMS was only ever support on pci devices. */
+ /* UMS was only ever supported on pci devices. */
if (WARN_ON(!dev->pdev))
return -EINVAL;
@@ -982,31 +945,24 @@ EXPORT_SYMBOL(drm_crtc_vblank_count);
*
* This is the legacy version of drm_crtc_vblank_count_and_time().
*/
-u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
- struct timeval *vblanktime)
+static u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
+ struct timeval *vblanktime)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
- int count = DRM_TIMESTAMP_MAXRETRIES;
- u32 cur_vblank;
+ u32 vblank_count;
+ unsigned int seq;
if (WARN_ON(pipe >= dev->num_crtcs))
return 0;
- /*
- * Vblank timestamps are read lockless. To ensure consistency the vblank
- * counter is rechecked and ordering is ensured using memory barriers.
- * This works like a seqlock. The write-side barriers are in store_vblank.
- */
do {
- cur_vblank = vblank->count;
- smp_rmb();
- *vblanktime = vblanktimestamp(dev, pipe, cur_vblank);
- smp_rmb();
- } while (cur_vblank != vblank->count && --count > 0);
+ seq = read_seqbegin(&vblank->seqlock);
+ vblank_count = vblank->count;
+ *vblanktime = vblank->time;
+ } while (read_seqretry(&vblank->seqlock, seq));
- return cur_vblank;
+ return vblank_count;
}
-EXPORT_SYMBOL(drm_vblank_count_and_time);
/**
* drm_crtc_vblank_count_and_time - retrieve "cooked" vblank counter value
@@ -1018,8 +974,6 @@ EXPORT_SYMBOL(drm_vblank_count_and_time);
* vblank events since the system was booted, including lost events due to
* modesetting activity. Returns corresponding system timestamp of the time
* of the vblank interval that corresponds to the current vblank counter value.
- *
- * This is the native KMS version of drm_vblank_count_and_time().
*/
u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
struct timeval *vblanktime)
@@ -1037,39 +991,11 @@ static void send_vblank_event(struct drm_device *dev,
e->event.tv_sec = now->tv_sec;
e->event.tv_usec = now->tv_usec;
- drm_send_event_locked(dev, &e->base);
-
trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
e->event.sequence);
-}
-
-/**
- * drm_arm_vblank_event - arm vblank event after pageflip
- * @dev: DRM device
- * @pipe: CRTC index
- * @e: the event to prepare to send
- *
- * A lot of drivers need to generate vblank events for the very next vblank
- * interrupt. For example when the page flip interrupt happens when the page
- * flip gets armed, but not when it actually executes within the next vblank
- * period. This helper function implements exactly the required vblank arming
- * behaviour.
- *
- * Caller must hold event lock. Caller must also hold a vblank reference for
- * the event @e, which will be dropped when the next vblank arrives.
- *
- * This is the legacy version of drm_crtc_arm_vblank_event().
- */
-void drm_arm_vblank_event(struct drm_device *dev, unsigned int pipe,
- struct drm_pending_vblank_event *e)
-{
- assert_spin_locked(&dev->event_lock);
- e->pipe = pipe;
- e->event.sequence = drm_vblank_count(dev, pipe);
- list_add_tail(&e->base.link, &dev->vblank_event_list);
+ drm_send_event_locked(dev, &e->base);
}
-EXPORT_SYMBOL(drm_arm_vblank_event);
/**
* drm_crtc_arm_vblank_event - arm vblank event after pageflip
@@ -1084,32 +1010,35 @@ EXPORT_SYMBOL(drm_arm_vblank_event);
*
* Caller must hold event lock. Caller must also hold a vblank reference for
* the event @e, which will be dropped when the next vblank arrives.
- *
- * This is the native KMS version of drm_arm_vblank_event().
*/
void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
struct drm_pending_vblank_event *e)
{
- drm_arm_vblank_event(crtc->dev, drm_crtc_index(crtc), e);
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = drm_crtc_index(crtc);
+
+ assert_spin_locked(&dev->event_lock);
+
+ e->pipe = pipe;
+ e->event.sequence = drm_vblank_count(dev, pipe);
+ list_add_tail(&e->base.link, &dev->vblank_event_list);
}
EXPORT_SYMBOL(drm_crtc_arm_vblank_event);
/**
- * drm_send_vblank_event - helper to send vblank event after pageflip
- * @dev: DRM device
- * @pipe: CRTC index
+ * drm_crtc_send_vblank_event - helper to send vblank event after pageflip
+ * @crtc: the source CRTC of the vblank event
* @e: the event to send
*
* Updates sequence # and timestamp on event, and sends it to userspace.
* Caller must hold event lock.
- *
- * This is the legacy version of drm_crtc_send_vblank_event().
*/
-void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe,
- struct drm_pending_vblank_event *e)
+void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
+ struct drm_pending_vblank_event *e)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int seq, pipe = drm_crtc_index(crtc);
struct timeval now;
- unsigned int seq;
if (dev->num_crtcs > 0) {
seq = drm_vblank_count_and_time(dev, pipe, &now);
@@ -1121,23 +1050,6 @@ void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe,
e->pipe = pipe;
send_vblank_event(dev, e, seq, &now);
}
-EXPORT_SYMBOL(drm_send_vblank_event);
-
-/**
- * drm_crtc_send_vblank_event - helper to send vblank event after pageflip
- * @crtc: the source CRTC of the vblank event
- * @e: the event to send
- *
- * Updates sequence # and timestamp on event, and sends it to userspace.
- * Caller must hold event lock.
- *
- * This is the native KMS version of drm_send_vblank_event().
- */
-void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
- struct drm_pending_vblank_event *e)
-{
- drm_send_vblank_event(crtc->dev, drm_crtc_index(crtc), e);
-}
EXPORT_SYMBOL(drm_crtc_send_vblank_event);
/**
@@ -1193,7 +1105,7 @@ static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe)
* Returns:
* Zero on success or a negative error code on failure.
*/
-int drm_vblank_get(struct drm_device *dev, unsigned int pipe)
+static int drm_vblank_get(struct drm_device *dev, unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
unsigned long irqflags;
@@ -1219,7 +1131,6 @@ int drm_vblank_get(struct drm_device *dev, unsigned int pipe)
return ret;
}
-EXPORT_SYMBOL(drm_vblank_get);
/**
* drm_crtc_vblank_get - get a reference count on vblank events
@@ -1228,8 +1139,6 @@ EXPORT_SYMBOL(drm_vblank_get);
* Acquire a reference count on vblank events to avoid having them disabled
* while in use.
*
- * This is the native kms version of drm_vblank_get().
- *
* Returns:
* Zero on success or a negative error code on failure.
*/
@@ -1249,7 +1158,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_get);
*
* This is the legacy version of drm_crtc_vblank_put().
*/
-void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
+static void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
@@ -1270,7 +1179,6 @@ void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
jiffies + ((drm_vblank_offdelay * HZ)/1000));
}
}
-EXPORT_SYMBOL(drm_vblank_put);
/**
* drm_crtc_vblank_put - give up ownership of vblank events
@@ -1278,8 +1186,6 @@ EXPORT_SYMBOL(drm_vblank_put);
*
* Release ownership of a given vblank counter, turning off interrupts
* if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
- *
- * This is the native kms version of drm_vblank_put().
*/
void drm_crtc_vblank_put(struct drm_crtc *crtc)
{
@@ -1679,12 +1585,6 @@ static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
seq = drm_vblank_count_and_time(dev, pipe, &now);
- if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
- (seq - vblwait->request.sequence) <= (1 << 23)) {
- vblwait->request.sequence = seq + 1;
- vblwait->reply.sequence = vblwait->request.sequence;
- }
-
DRM_DEBUG("event on vblank count %d, current %d, crtc %u\n",
vblwait->request.sequence, seq, pipe);
@@ -1781,6 +1681,11 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
goto done;
}
+ if ((flags & _DRM_VBLANK_NEXTONMISS) &&
+ (seq - vblwait->request.sequence) <= (1 << 23)) {
+ vblwait->request.sequence = seq + 1;
+ }
+
if (flags & _DRM_VBLANK_EVENT) {
/* must hold on to the vblank ref until the event fires
* drm_vblank_put will be called asynchronously
@@ -1788,14 +1693,8 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
return drm_queue_vblank_event(dev, pipe, vblwait, file_priv);
}
- if ((flags & _DRM_VBLANK_NEXTONMISS) &&
- (seq - vblwait->request.sequence) <= (1<<23)) {
- vblwait->request.sequence = seq + 1;
- }
-
DRM_DEBUG("waiting on vblank count %d, crtc %u\n",
vblwait->request.sequence, pipe);
- vblank->last_wait = vblwait->request.sequence;
DRM_WAIT_ON(ret, vblank->queue, 3 * HZ,
(((drm_vblank_count(dev, pipe) -
vblwait->request.sequence) <= (1 << 23)) ||
diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h
index d3b6ee357..c6f422e87 100644
--- a/drivers/gpu/drm/drm_legacy.h
+++ b/drivers/gpu/drm/drm_legacy.h
@@ -88,14 +88,10 @@ struct drm_agp_mem {
struct list_head head;
};
-/*
- * Generic Userspace Locking-API
- */
-
-int drm_legacy_i_have_hw_lock(struct drm_device *d, struct drm_file *f);
+/* drm_lock.c */
int drm_legacy_lock(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_unlock(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_lock_free(struct drm_lock_data *lock, unsigned int ctx);
+void drm_legacy_lock_release(struct drm_device *dev, struct file *filp);
/* DMA support */
int drm_legacy_dma_setup(struct drm_device *dev);
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index daa2ff121..48ac0ebbd 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -41,6 +41,110 @@
static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
/**
+ * Take the heavyweight lock.
+ *
+ * \param lock lock pointer.
+ * \param context locking context.
+ * \return one if the lock is held, or zero otherwise.
+ *
+ * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
+ */
+static
+int drm_lock_take(struct drm_lock_data *lock_data,
+ unsigned int context)
+{
+ unsigned int old, new, prev;
+ volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+ spin_lock_bh(&lock_data->spinlock);
+ do {
+ old = *lock;
+ if (old & _DRM_LOCK_HELD)
+ new = old | _DRM_LOCK_CONT;
+ else {
+ new = context | _DRM_LOCK_HELD |
+ ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
+ _DRM_LOCK_CONT : 0);
+ }
+ prev = cmpxchg(lock, old, new);
+ } while (prev != old);
+ spin_unlock_bh(&lock_data->spinlock);
+
+ if (_DRM_LOCKING_CONTEXT(old) == context) {
+ if (old & _DRM_LOCK_HELD) {
+ if (context != DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("%d holds heavyweight lock\n",
+ context);
+ }
+ return 0;
+ }
+ }
+
+ if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
+ /* Have lock */
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * This takes a lock forcibly and hands it to context. Should ONLY be used
+ * inside *_unlock to give lock to kernel before calling *_dma_schedule.
+ *
+ * \param dev DRM device.
+ * \param lock lock pointer.
+ * \param context locking context.
+ * \return always one.
+ *
+ * Resets the lock file pointer.
+ * Marks the lock as held by the given context, via the \p cmpxchg instruction.
+ */
+static int drm_lock_transfer(struct drm_lock_data *lock_data,
+ unsigned int context)
+{
+ unsigned int old, new, prev;
+ volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+ lock_data->file_priv = NULL;
+ do {
+ old = *lock;
+ new = context | _DRM_LOCK_HELD;
+ prev = cmpxchg(lock, old, new);
+ } while (prev != old);
+ return 1;
+}
+
+static int drm_legacy_lock_free(struct drm_lock_data *lock_data,
+ unsigned int context)
+{
+ unsigned int old, new, prev;
+ volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+ spin_lock_bh(&lock_data->spinlock);
+ if (lock_data->kernel_waiters != 0) {
+ drm_lock_transfer(lock_data, 0);
+ lock_data->idle_has_lock = 1;
+ spin_unlock_bh(&lock_data->spinlock);
+ return 1;
+ }
+ spin_unlock_bh(&lock_data->spinlock);
+
+ do {
+ old = *lock;
+ new = _DRM_LOCKING_CONTEXT(old);
+ prev = cmpxchg(lock, old, new);
+ } while (prev != old);
+
+ if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
+ DRM_ERROR("%d freed heavyweight lock held by %d\n",
+ context, _DRM_LOCKING_CONTEXT(old));
+ return 1;
+ }
+ wake_up_interruptible(&lock_data->lock_queue);
+ return 0;
+}
+
+/**
* Lock ioctl.
*
* \param inode device inode.
@@ -115,7 +219,7 @@ int drm_legacy_lock(struct drm_device *dev, void *data,
/* don't set the block all signals on the master process for now
* really probably not the correct answer but lets us debug xkb
* xserver for now */
- if (!file_priv->is_master) {
+ if (!drm_is_current_master(file_priv)) {
dev->sigdata.context = lock->context;
dev->sigdata.lock = master->lock.hw_lock;
}
@@ -165,120 +269,6 @@ int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_
}
/**
- * Take the heavyweight lock.
- *
- * \param lock lock pointer.
- * \param context locking context.
- * \return one if the lock is held, or zero otherwise.
- *
- * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
- */
-static
-int drm_lock_take(struct drm_lock_data *lock_data,
- unsigned int context)
-{
- unsigned int old, new, prev;
- volatile unsigned int *lock = &lock_data->hw_lock->lock;
-
- spin_lock_bh(&lock_data->spinlock);
- do {
- old = *lock;
- if (old & _DRM_LOCK_HELD)
- new = old | _DRM_LOCK_CONT;
- else {
- new = context | _DRM_LOCK_HELD |
- ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
- _DRM_LOCK_CONT : 0);
- }
- prev = cmpxchg(lock, old, new);
- } while (prev != old);
- spin_unlock_bh(&lock_data->spinlock);
-
- if (_DRM_LOCKING_CONTEXT(old) == context) {
- if (old & _DRM_LOCK_HELD) {
- if (context != DRM_KERNEL_CONTEXT) {
- DRM_ERROR("%d holds heavyweight lock\n",
- context);
- }
- return 0;
- }
- }
-
- if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
- /* Have lock */
- return 1;
- }
- return 0;
-}
-
-/**
- * This takes a lock forcibly and hands it to context. Should ONLY be used
- * inside *_unlock to give lock to kernel before calling *_dma_schedule.
- *
- * \param dev DRM device.
- * \param lock lock pointer.
- * \param context locking context.
- * \return always one.
- *
- * Resets the lock file pointer.
- * Marks the lock as held by the given context, via the \p cmpxchg instruction.
- */
-static int drm_lock_transfer(struct drm_lock_data *lock_data,
- unsigned int context)
-{
- unsigned int old, new, prev;
- volatile unsigned int *lock = &lock_data->hw_lock->lock;
-
- lock_data->file_priv = NULL;
- do {
- old = *lock;
- new = context | _DRM_LOCK_HELD;
- prev = cmpxchg(lock, old, new);
- } while (prev != old);
- return 1;
-}
-
-/**
- * Free lock.
- *
- * \param dev DRM device.
- * \param lock lock.
- * \param context context.
- *
- * Resets the lock file pointer.
- * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task
- * waiting on the lock queue.
- */
-int drm_legacy_lock_free(struct drm_lock_data *lock_data, unsigned int context)
-{
- unsigned int old, new, prev;
- volatile unsigned int *lock = &lock_data->hw_lock->lock;
-
- spin_lock_bh(&lock_data->spinlock);
- if (lock_data->kernel_waiters != 0) {
- drm_lock_transfer(lock_data, 0);
- lock_data->idle_has_lock = 1;
- spin_unlock_bh(&lock_data->spinlock);
- return 1;
- }
- spin_unlock_bh(&lock_data->spinlock);
-
- do {
- old = *lock;
- new = _DRM_LOCKING_CONTEXT(old);
- prev = cmpxchg(lock, old, new);
- } while (prev != old);
-
- if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
- DRM_ERROR("%d freed heavyweight lock held by %d\n",
- context, _DRM_LOCKING_CONTEXT(old));
- return 1;
- }
- wake_up_interruptible(&lock_data->lock_queue);
- return 0;
-}
-
-/**
* This function returns immediately and takes the hw lock
* with the kernel context if it is free, otherwise it gets the highest priority when and if
* it is eventually released.
@@ -330,11 +320,27 @@ void drm_legacy_idlelock_release(struct drm_lock_data *lock_data)
}
EXPORT_SYMBOL(drm_legacy_idlelock_release);
-int drm_legacy_i_have_hw_lock(struct drm_device *dev,
- struct drm_file *file_priv)
+static int drm_legacy_i_have_hw_lock(struct drm_device *dev,
+ struct drm_file *file_priv)
{
struct drm_master *master = file_priv->master;
return (file_priv->lock_count && master->lock.hw_lock &&
_DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
master->lock.file_priv == file_priv);
}
+
+void drm_legacy_lock_release(struct drm_device *dev, struct file *filp)
+{
+ struct drm_file *file_priv = filp->private_data;
+
+ /* if the master has gone away we can't do anything with the lock */
+ if (!dev->master)
+ return;
+
+ if (drm_legacy_i_have_hw_lock(dev, file_priv)) {
+ DRM_DEBUG("File %p released, freeing lock for context %d\n",
+ filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
+ drm_legacy_lock_free(&file_priv->master->lock,
+ _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
+ }
+}
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index 87a8cb733..fc0ebd273 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -44,7 +44,7 @@
# include <asm/agp.h>
#else
# ifdef __powerpc__
-# define PAGE_AGP __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
+# define PAGE_AGP pgprot_noncached_wc(PAGE_KERNEL)
# else
# define PAGE_AGP PAGE_KERNEL
# endif
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index f5d80839a..af0d471ee 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -60,6 +60,21 @@ static int mipi_dsi_device_match(struct device *dev, struct device_driver *drv)
return 0;
}
+static int mipi_dsi_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+ int err;
+
+ err = of_device_uevent_modalias(dev, env);
+ if (err != -ENODEV)
+ return err;
+
+ add_uevent_var(env, "MODALIAS=%s%s", MIPI_DSI_MODULE_PREFIX,
+ dsi->name);
+
+ return 0;
+}
+
static const struct dev_pm_ops mipi_dsi_device_pm_ops = {
.runtime_suspend = pm_generic_runtime_suspend,
.runtime_resume = pm_generic_runtime_resume,
@@ -74,6 +89,7 @@ static const struct dev_pm_ops mipi_dsi_device_pm_ops = {
static struct bus_type mipi_dsi_bus_type = {
.name = "mipi-dsi",
.match = mipi_dsi_device_match,
+ .uevent = mipi_dsi_uevent,
.pm = &mipi_dsi_device_pm_ops,
};
@@ -983,6 +999,28 @@ int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi,
EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_on);
/**
+ * mipi_dsi_dcs_set_tear_scanline() - set the scanline to use as trigger for
+ * the Tearing Effect output signal of the display module
+ * @dsi: DSI peripheral device
+ * @scanline: scanline to use as trigger
+ *
+ * Return: 0 on success or a negative error code on failure
+ */
+int mipi_dsi_dcs_set_tear_scanline(struct mipi_dsi_device *dsi, u16 scanline)
+{
+ u8 payload[3] = { MIPI_DCS_SET_TEAR_SCANLINE, scanline >> 8,
+ scanline & 0xff };
+ ssize_t err;
+
+ err = mipi_dsi_generic_write(dsi, payload, sizeof(payload));
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_scanline);
+
+/**
* mipi_dsi_dcs_set_pixel_format() - sets the pixel format for the RGB image
* data used by the interface
* @dsi: DSI peripheral device
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 04de6fd88..cb39f45d6 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -179,12 +179,14 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
{
struct drm_mm_node *hole;
- u64 end = node->start + node->size;
+ u64 end;
u64 hole_start;
u64 hole_end;
BUG_ON(node == NULL);
+ end = node->start + node->size;
+
/* Find the relevant hole to add our node to */
drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
if (hole_start > node->start || hole_end < end)
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index e5e6f504d..fc5040ae5 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -544,6 +544,7 @@ EXPORT_SYMBOL(drm_gtf_mode_complex);
*
* This function is to create the modeline based on the GTF algorithm.
* Generalized Timing Formula is derived from:
+ *
* GTF Spreadsheet by Andy Morrish (1/5/97)
* available at http://www.vesa.org
*
@@ -552,7 +553,8 @@ EXPORT_SYMBOL(drm_gtf_mode_complex);
* I also refer to the function of fb_get_mode in the file of
* drivers/video/fbmon.c
*
- * Standard GTF parameters:
+ * Standard GTF parameters::
+ *
* M = 600
* C = 40
* K = 128
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index e3a4adf03..61146f5b4 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -30,14 +30,14 @@
*
* As KMS moves toward more fine grained locking, and atomic ioctl where
* userspace can indirectly control locking order, it becomes necessary
- * to use ww_mutex and acquire-contexts to avoid deadlocks. But because
+ * to use &ww_mutex and acquire-contexts to avoid deadlocks. But because
* the locking is more distributed around the driver code, we want a bit
* of extra utility/tracking out of our acquire-ctx. This is provided
* by drm_modeset_lock / drm_modeset_acquire_ctx.
*
- * For basic principles of ww_mutex, see: Documentation/locking/ww-mutex-design.txt
+ * For basic principles of &ww_mutex, see: Documentation/locking/ww-mutex-design.txt
*
- * The basic usage pattern is to:
+ * The basic usage pattern is to::
*
* drm_modeset_acquire_init(&ctx)
* retry:
@@ -51,6 +51,13 @@
* ... do stuff ...
* drm_modeset_drop_locks(&ctx);
* drm_modeset_acquire_fini(&ctx);
+ *
+ * On top of of these per-object locks using &ww_mutex there's also an overall
+ * dev->mode_config.lock, for protecting everything else. Mostly this means
+ * probe state of connectors, and preventing hotplug add/removal of connectors.
+ *
+ * Finally there's a bunch of dedicated locks to protect drm core internal
+ * lists and lookup data structures.
*/
/**
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 29d5a548d..b2f8f1062 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -144,50 +144,6 @@ int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
}
EXPORT_SYMBOL(drm_pci_set_busid);
-int drm_pci_set_unique(struct drm_device *dev,
- struct drm_master *master,
- struct drm_unique *u)
-{
- int domain, bus, slot, func, ret;
-
- master->unique_len = u->unique_len;
- master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
- if (!master->unique) {
- ret = -ENOMEM;
- goto err;
- }
-
- if (copy_from_user(master->unique, u->unique, master->unique_len)) {
- ret = -EFAULT;
- goto err;
- }
-
- master->unique[master->unique_len] = '\0';
-
- /* Return error if the busid submitted doesn't match the device's actual
- * busid.
- */
- ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
- if (ret != 3) {
- ret = -EINVAL;
- goto err;
- }
-
- domain = bus >> 8;
- bus &= 0xff;
-
- if ((domain != drm_get_pci_domain(dev)) ||
- (bus != dev->pdev->bus->number) ||
- (slot != PCI_SLOT(dev->pdev->devfn)) ||
- (func != PCI_FUNC(dev->pdev->devfn))) {
- ret = -EINVAL;
- goto err;
- }
- return 0;
-err:
- return ret;
-}
-
static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
{
if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
@@ -444,13 +400,6 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
{
return -EINVAL;
}
-
-int drm_pci_set_unique(struct drm_device *dev,
- struct drm_master *master,
- struct drm_unique *u)
-{
- return -EINVAL;
-}
#endif
EXPORT_SYMBOL(drm_pci_init);
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 369d2898f..16c4a7bd7 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -115,6 +115,7 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc,
* @src: source coordinates in 16.16 fixed point
* @dest: integer destination coordinates
* @clip: integer clipping coordinates
+ * @rotation: plane rotation
* @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
* @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point
* @can_position: is it legal to position the plane such that it
@@ -134,16 +135,17 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc,
* Zero if update appears valid, error code on failure
*/
int drm_plane_helper_check_update(struct drm_plane *plane,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_rect *src,
- struct drm_rect *dest,
- const struct drm_rect *clip,
- int min_scale,
- int max_scale,
- bool can_position,
- bool can_update_disabled,
- bool *visible)
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_rect *src,
+ struct drm_rect *dest,
+ const struct drm_rect *clip,
+ unsigned int rotation,
+ int min_scale,
+ int max_scale,
+ bool can_position,
+ bool can_update_disabled,
+ bool *visible)
{
int hscale, vscale;
@@ -163,6 +165,8 @@ int drm_plane_helper_check_update(struct drm_plane *plane,
return -EINVAL;
}
+ drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation);
+
/* Check scaling */
hscale = drm_rect_calc_hscale(src, dest, min_scale, max_scale);
vscale = drm_rect_calc_vscale(src, dest, min_scale, max_scale);
@@ -174,6 +178,9 @@ int drm_plane_helper_check_update(struct drm_plane *plane,
}
*visible = drm_rect_clip_scaled(src, dest, clip, hscale, vscale);
+
+ drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation);
+
if (!*visible)
/*
* Plane isn't visible; some drivers can handle this
@@ -219,10 +226,12 @@ EXPORT_SYMBOL(drm_plane_helper_check_update);
*
* Note that we make some assumptions about hardware limitations that may not be
* true for all hardware --
- * 1) Primary plane cannot be repositioned.
- * 2) Primary plane cannot be scaled.
- * 3) Primary plane must cover the entire CRTC.
- * 4) Subpixel positioning is not supported.
+ *
+ * 1. Primary plane cannot be repositioned.
+ * 2. Primary plane cannot be scaled.
+ * 3. Primary plane must cover the entire CRTC.
+ * 4. Subpixel positioning is not supported.
+ *
* Drivers for hardware that don't have these restrictions can provide their
* own implementation rather than using this helper.
*
@@ -265,6 +274,7 @@ int drm_primary_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
ret = drm_plane_helper_check_update(plane, crtc, fb,
&src, &dest, &clip,
+ BIT(DRM_ROTATE_0),
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
false, false, &visible);
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index 644169e1a..2c819ef90 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -68,24 +68,6 @@ err_free:
return ret;
}
-int drm_platform_set_busid(struct drm_device *dev, struct drm_master *master)
-{
- int id;
-
- id = dev->platformdev->id;
- if (id < 0)
- id = 0;
-
- master->unique = kasprintf(GFP_KERNEL, "platform:%s:%02d",
- dev->platformdev->name, id);
- if (!master->unique)
- return -ENOMEM;
-
- master->unique_len = strlen(master->unique);
- return 0;
-}
-EXPORT_SYMBOL(drm_platform_set_busid);
-
/**
* drm_platform_init - Register a platform device with the DRM subsystem
* @driver: DRM device driver
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index aab0f3f1f..780589b42 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -593,7 +593,7 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
get_dma_buf(dma_buf);
}
- /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
+ /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */
ret = drm_gem_handle_create_tail(file_priv, obj, handle);
drm_gem_object_unreference_unlocked(obj);
if (ret)
@@ -601,11 +601,10 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
ret = drm_prime_add_buf_handle(&file_priv->prime,
dma_buf, *handle);
+ mutex_unlock(&file_priv->prime.lock);
if (ret)
goto fail;
- mutex_unlock(&file_priv->prime.lock);
-
dma_buf_put(dma_buf);
return 0;
@@ -615,11 +614,14 @@ fail:
* to detach.. which seems ok..
*/
drm_gem_handle_delete(file_priv, *handle);
+ dma_buf_put(dma_buf);
+ return ret;
+
out_unlock:
mutex_unlock(&dev->object_name_lock);
out_put:
- dma_buf_put(dma_buf);
mutex_unlock(&file_priv->prime.lock);
+ dma_buf_put(dma_buf);
return ret;
}
EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 0329080d7..a0df377d7 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -82,13 +82,30 @@ drm_mode_validate_flag(const struct drm_display_mode *mode,
static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector)
{
+ struct drm_cmdline_mode *cmdline_mode;
struct drm_display_mode *mode;
- if (!connector->cmdline_mode.specified)
+ cmdline_mode = &connector->cmdline_mode;
+ if (!cmdline_mode->specified)
return 0;
+ /* Only add a GTF mode if we find no matching probed modes */
+ list_for_each_entry(mode, &connector->probed_modes, head) {
+ if (mode->hdisplay != cmdline_mode->xres ||
+ mode->vdisplay != cmdline_mode->yres)
+ continue;
+
+ if (cmdline_mode->refresh_specified) {
+ /* The probed mode's vrefresh is set until later */
+ if (drm_mode_vrefresh(mode) != cmdline_mode->refresh)
+ continue;
+ }
+
+ return 0;
+ }
+
mode = drm_mode_create_from_cmdline_mode(connector->dev,
- &connector->cmdline_mode);
+ cmdline_mode);
if (mode == NULL)
return 0;
diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
index 4f0f3b36d..bf7043107 100644
--- a/drivers/gpu/drm/drm_scatter.c
+++ b/drivers/gpu/drm/drm_scatter.c
@@ -41,7 +41,7 @@
static inline void *drm_vmalloc_dma(unsigned long size)
{
#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
- return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL | _PAGE_NO_CACHE);
+ return __vmalloc(size, GFP_KERNEL, pgprot_noncached_wc(PAGE_KERNEL));
#else
return vmalloc_32(size);
#endif
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
new file mode 100644
index 000000000..0db36d27e
--- /dev/null
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -0,0 +1,206 @@
+/*
+ * Copyright (C) 2016 Noralf Trønnes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+#include <linux/slab.h>
+
+/**
+ * DOC: overview
+ *
+ * This helper library provides helpers for drivers for simple display
+ * hardware.
+ *
+ * drm_simple_display_pipe_init() initializes a simple display pipeline
+ * which has only one full-screen scanout buffer feeding one output. The
+ * pipeline is represented by struct &drm_simple_display_pipe and binds
+ * together &drm_plane, &drm_crtc and &drm_encoder structures into one fixed
+ * entity. Some flexibility for code reuse is provided through a separately
+ * allocated &drm_connector object and supporting optional &drm_bridge
+ * encoder drivers.
+ */
+
+static const struct drm_encoder_funcs drm_simple_kms_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static void drm_simple_kms_crtc_enable(struct drm_crtc *crtc)
+{
+ struct drm_simple_display_pipe *pipe;
+
+ pipe = container_of(crtc, struct drm_simple_display_pipe, crtc);
+ if (!pipe->funcs || !pipe->funcs->enable)
+ return;
+
+ pipe->funcs->enable(pipe, crtc->state);
+}
+
+static void drm_simple_kms_crtc_disable(struct drm_crtc *crtc)
+{
+ struct drm_simple_display_pipe *pipe;
+
+ pipe = container_of(crtc, struct drm_simple_display_pipe, crtc);
+ if (!pipe->funcs || !pipe->funcs->disable)
+ return;
+
+ pipe->funcs->disable(pipe);
+}
+
+static const struct drm_crtc_helper_funcs drm_simple_kms_crtc_helper_funcs = {
+ .disable = drm_simple_kms_crtc_disable,
+ .enable = drm_simple_kms_crtc_enable,
+};
+
+static const struct drm_crtc_funcs drm_simple_kms_crtc_funcs = {
+ .reset = drm_atomic_helper_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static int drm_simple_kms_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *plane_state)
+{
+ struct drm_rect src = {
+ .x1 = plane_state->src_x,
+ .y1 = plane_state->src_y,
+ .x2 = plane_state->src_x + plane_state->src_w,
+ .y2 = plane_state->src_y + plane_state->src_h,
+ };
+ struct drm_rect dest = {
+ .x1 = plane_state->crtc_x,
+ .y1 = plane_state->crtc_y,
+ .x2 = plane_state->crtc_x + plane_state->crtc_w,
+ .y2 = plane_state->crtc_y + plane_state->crtc_h,
+ };
+ struct drm_rect clip = { 0 };
+ struct drm_simple_display_pipe *pipe;
+ struct drm_crtc_state *crtc_state;
+ bool visible;
+ int ret;
+
+ pipe = container_of(plane, struct drm_simple_display_pipe, plane);
+ crtc_state = drm_atomic_get_existing_crtc_state(plane_state->state,
+ &pipe->crtc);
+ if (crtc_state->enable != !!plane_state->crtc)
+ return -EINVAL; /* plane must match crtc enable state */
+
+ if (!crtc_state->enable)
+ return 0; /* nothing to check when disabling or disabled */
+
+ clip.x2 = crtc_state->adjusted_mode.hdisplay;
+ clip.y2 = crtc_state->adjusted_mode.vdisplay;
+ ret = drm_plane_helper_check_update(plane, &pipe->crtc,
+ plane_state->fb,
+ &src, &dest, &clip,
+ plane_state->rotation,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ false, true, &visible);
+ if (ret)
+ return ret;
+
+ if (!visible)
+ return -EINVAL;
+
+ if (!pipe->funcs || !pipe->funcs->check)
+ return 0;
+
+ return pipe->funcs->check(pipe, plane_state, crtc_state);
+}
+
+static void drm_simple_kms_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *pstate)
+{
+ struct drm_simple_display_pipe *pipe;
+
+ pipe = container_of(plane, struct drm_simple_display_pipe, plane);
+ if (!pipe->funcs || !pipe->funcs->update)
+ return;
+
+ pipe->funcs->update(pipe, pstate);
+}
+
+static const struct drm_plane_helper_funcs drm_simple_kms_plane_helper_funcs = {
+ .atomic_check = drm_simple_kms_plane_atomic_check,
+ .atomic_update = drm_simple_kms_plane_atomic_update,
+};
+
+static const struct drm_plane_funcs drm_simple_kms_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+/**
+ * drm_simple_display_pipe_init - Initialize a simple display pipeline
+ * @dev: DRM device
+ * @pipe: simple display pipe object to initialize
+ * @funcs: callbacks for the display pipe (optional)
+ * @formats: array of supported formats (%DRM_FORMAT_*)
+ * @format_count: number of elements in @formats
+ * @connector: connector to attach and register
+ *
+ * Sets up a display pipeline which consist of a really simple
+ * plane-crtc-encoder pipe coupled with the provided connector.
+ * Teardown of a simple display pipe is all handled automatically by the drm
+ * core through calling drm_mode_config_cleanup(). Drivers afterwards need to
+ * release the memory for the structure themselves.
+ *
+ * Returns:
+ * Zero on success, negative error code on failure.
+ */
+int drm_simple_display_pipe_init(struct drm_device *dev,
+ struct drm_simple_display_pipe *pipe,
+ const struct drm_simple_display_pipe_funcs *funcs,
+ const uint32_t *formats, unsigned int format_count,
+ struct drm_connector *connector)
+{
+ struct drm_encoder *encoder = &pipe->encoder;
+ struct drm_plane *plane = &pipe->plane;
+ struct drm_crtc *crtc = &pipe->crtc;
+ int ret;
+
+ pipe->connector = connector;
+ pipe->funcs = funcs;
+
+ drm_plane_helper_add(plane, &drm_simple_kms_plane_helper_funcs);
+ ret = drm_universal_plane_init(dev, plane, 0,
+ &drm_simple_kms_plane_funcs,
+ formats, format_count,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret)
+ return ret;
+
+ drm_crtc_helper_add(crtc, &drm_simple_kms_crtc_helper_funcs);
+ ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL,
+ &drm_simple_kms_crtc_funcs, NULL);
+ if (ret)
+ return ret;
+
+ encoder->possible_crtcs = 1 << drm_crtc_index(crtc);
+ ret = drm_encoder_init(dev, encoder, &drm_simple_kms_encoder_funcs,
+ DRM_MODE_ENCODER_NONE, NULL);
+ if (ret)
+ return ret;
+
+ return drm_mode_connector_attach_encoder(connector, encoder);
+}
+EXPORT_SYMBOL(drm_simple_display_pipe_init);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index fa7fadce8..32dd821b7 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -32,75 +32,6 @@ static struct device_type drm_sysfs_device_minor = {
struct class *drm_class;
-/**
- * __drm_class_suspend - internal DRM class suspend routine
- * @dev: Linux device to suspend
- * @state: power state to enter
- *
- * Just figures out what the actual struct drm_device associated with
- * @dev is and calls its suspend hook, if present.
- */
-static int __drm_class_suspend(struct device *dev, pm_message_t state)
-{
- if (dev->type == &drm_sysfs_device_minor) {
- struct drm_minor *drm_minor = to_drm_minor(dev);
- struct drm_device *drm_dev = drm_minor->dev;
-
- if (drm_minor->type == DRM_MINOR_LEGACY &&
- !drm_core_check_feature(drm_dev, DRIVER_MODESET) &&
- drm_dev->driver->suspend)
- return drm_dev->driver->suspend(drm_dev, state);
- }
- return 0;
-}
-
-/**
- * drm_class_suspend - internal DRM class suspend hook. Simply calls
- * __drm_class_suspend() with the correct pm state.
- * @dev: Linux device to suspend
- */
-static int drm_class_suspend(struct device *dev)
-{
- return __drm_class_suspend(dev, PMSG_SUSPEND);
-}
-
-/**
- * drm_class_freeze - internal DRM class freeze hook. Simply calls
- * __drm_class_suspend() with the correct pm state.
- * @dev: Linux device to freeze
- */
-static int drm_class_freeze(struct device *dev)
-{
- return __drm_class_suspend(dev, PMSG_FREEZE);
-}
-
-/**
- * drm_class_resume - DRM class resume hook
- * @dev: Linux device to resume
- *
- * Just figures out what the actual struct drm_device associated with
- * @dev is and calls its resume hook, if present.
- */
-static int drm_class_resume(struct device *dev)
-{
- if (dev->type == &drm_sysfs_device_minor) {
- struct drm_minor *drm_minor = to_drm_minor(dev);
- struct drm_device *drm_dev = drm_minor->dev;
-
- if (drm_minor->type == DRM_MINOR_LEGACY &&
- !drm_core_check_feature(drm_dev, DRIVER_MODESET) &&
- drm_dev->driver->resume)
- return drm_dev->driver->resume(drm_dev);
- }
- return 0;
-}
-
-static const struct dev_pm_ops drm_class_dev_pm_ops = {
- .suspend = drm_class_suspend,
- .resume = drm_class_resume,
- .freeze = drm_class_freeze,
-};
-
static char *drm_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev));
@@ -131,8 +62,6 @@ int drm_sysfs_init(void)
if (IS_ERR(drm_class))
return PTR_ERR(drm_class);
- drm_class->pm = &drm_class_dev_pm_ops;
-
err = class_create_file(drm_class, &class_attr_version.attr);
if (err) {
class_destroy(drm_class);
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index ac9f4b3ec..caa4e4ca6 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -80,7 +80,7 @@ static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
- tmp |= _PAGE_NO_CACHE;
+ tmp = pgprot_noncached_wc(tmp);
#endif
return tmp;
}
@@ -593,7 +593,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
* pages and mappings in fault()
*/
#if defined(__powerpc__)
- pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
#endif
vma->vm_ops = &drm_vm_ops;
break;
@@ -670,57 +670,3 @@ void drm_legacy_vma_flush(struct drm_device *dev)
kfree(vma);
}
}
-
-int drm_vma_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_vma_entry *pt;
- struct vm_area_struct *vma;
- unsigned long vma_count = 0;
-#if defined(__i386__)
- unsigned int pgprot;
-#endif
-
- mutex_lock(&dev->struct_mutex);
- list_for_each_entry(pt, &dev->vmalist, head)
- vma_count++;
-
- seq_printf(m, "vma use count: %lu, high_memory = %pK, 0x%pK\n",
- vma_count, high_memory,
- (void *)(unsigned long)virt_to_phys(high_memory));
-
- list_for_each_entry(pt, &dev->vmalist, head) {
- vma = pt->vma;
- if (!vma)
- continue;
- seq_printf(m,
- "\n%5d 0x%pK-0x%pK %c%c%c%c%c%c 0x%08lx000",
- pt->pid,
- (void *)vma->vm_start, (void *)vma->vm_end,
- vma->vm_flags & VM_READ ? 'r' : '-',
- vma->vm_flags & VM_WRITE ? 'w' : '-',
- vma->vm_flags & VM_EXEC ? 'x' : '-',
- vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
- vma->vm_flags & VM_LOCKED ? 'l' : '-',
- vma->vm_flags & VM_IO ? 'i' : '-',
- vma->vm_pgoff);
-
-#if defined(__i386__)
- pgprot = pgprot_val(vma->vm_page_prot);
- seq_printf(m, " %c%c%c%c%c%c%c%c%c",
- pgprot & _PAGE_PRESENT ? 'p' : '-',
- pgprot & _PAGE_RW ? 'w' : 'r',
- pgprot & _PAGE_USER ? 'u' : 's',
- pgprot & _PAGE_PWT ? 't' : 'b',
- pgprot & _PAGE_PCD ? 'u' : 'c',
- pgprot & _PAGE_ACCESSED ? 'a' : '-',
- pgprot & _PAGE_DIRTY ? 'd' : '-',
- pgprot & _PAGE_PSE ? 'm' : 'k',
- pgprot & _PAGE_GLOBAL ? 'g' : 'l');
-#endif
- seq_printf(m, "\n");
- }
- mutex_unlock(&dev->struct_mutex);
- return 0;
-}
diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
index 2f2ecde82..f306c8855 100644
--- a/drivers/gpu/drm/drm_vma_manager.c
+++ b/drivers/gpu/drm/drm_vma_manager.c
@@ -127,6 +127,9 @@ EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
* used to implement weakly referenced lookups using kref_get_unless_zero().
*
* Example:
+ *
+ * ::
+ *
* drm_vma_offset_lock_lookup(mgr);
* node = drm_vma_offset_lookup_locked(mgr);
* if (node)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 3d4f56df8..ffd1b32ca 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -91,10 +91,8 @@ static void load_gpu(struct drm_device *dev)
int ret;
ret = etnaviv_gpu_init(g);
- if (ret) {
- dev_err(g->dev, "hw init failed: %d\n", ret);
+ if (ret)
priv->gpu[i] = NULL;
- }
}
}
}
@@ -496,7 +494,6 @@ static struct drm_driver etnaviv_drm_driver = {
DRIVER_RENDER,
.open = etnaviv_open,
.preclose = etnaviv_preclose,
- .set_busid = drm_platform_set_busid,
.gem_free_object_unlocked = etnaviv_gem_free_object,
.gem_vm_ops = &vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index df9bcbab9..5ce3603e6 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -535,8 +535,7 @@ void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
{
- if (etnaviv_obj->vaddr)
- vunmap(etnaviv_obj->vaddr);
+ vunmap(etnaviv_obj->vaddr);
put_pages(etnaviv_obj);
}
@@ -660,7 +659,7 @@ static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
* why this is required _and_ expected if you're
* going to pin these pages.
*/
- mapping = file_inode(obj->filp)->i_mapping;
+ mapping = obj->filp->f_mapping;
mapping_set_gfp_mask(mapping, GFP_HIGHUSER);
}
@@ -670,9 +669,7 @@ static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
return obj;
fail:
- if (obj)
- drm_gem_object_unreference_unlocked(obj);
-
+ drm_gem_object_unreference_unlocked(obj);
return ERR_PTR(ret);
}
@@ -916,15 +913,12 @@ int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
get_task_struct(current);
ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
- if (ret) {
- drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
- return ret;
- }
+ if (ret)
+ goto unreference;
ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
-
+unreference:
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
-
return ret;
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index ff6aa5dfb..b382cf505 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -487,6 +487,47 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
return 0;
}
+static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
+{
+ u32 pmc, ppc;
+
+ /* enable clock gating */
+ ppc = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
+ ppc |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
+
+ /* Disable stall module clock gating for 4.3.0.1 and 4.3.0.2 revs */
+ if (gpu->identity.revision == 0x4301 ||
+ gpu->identity.revision == 0x4302)
+ ppc |= VIVS_PM_POWER_CONTROLS_DISABLE_STALL_MODULE_CLOCK_GATING;
+
+ gpu_write(gpu, VIVS_PM_POWER_CONTROLS, ppc);
+
+ pmc = gpu_read(gpu, VIVS_PM_MODULE_CONTROLS);
+
+ /* Disable PA clock gating for GC400+ except for GC420 */
+ if (gpu->identity.model >= chipModel_GC400 &&
+ gpu->identity.model != chipModel_GC420)
+ pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PA;
+
+ /*
+ * Disable PE clock gating on revs < 5.0.0.0 when HZ is
+ * present without a bug fix.
+ */
+ if (gpu->identity.revision < 0x5000 &&
+ gpu->identity.minor_features0 & chipMinorFeatures0_HZ &&
+ !(gpu->identity.minor_features1 &
+ chipMinorFeatures1_DISABLE_PE_GATING))
+ pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE;
+
+ if (gpu->identity.revision < 0x5422)
+ pmc |= BIT(15); /* Unknown bit */
+
+ pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ;
+ pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ;
+
+ gpu_write(gpu, VIVS_PM_MODULE_CONTROLS, pmc);
+}
+
static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
{
u16 prefetch;
@@ -506,6 +547,9 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
gpu_write(gpu, VIVS_MC_DEBUG_MEMORY, mc_memory_debug);
}
+ /* enable module-level clock gating */
+ etnaviv_gpu_enable_mlcg(gpu);
+
/*
* Update GPU AXI cache atttribute to "cacheable, no allocate".
* This is necessary to prevent the iMX6 SoC locking up.
@@ -553,8 +597,10 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
bool mmuv2;
ret = pm_runtime_get_sync(gpu->dev);
- if (ret < 0)
+ if (ret < 0) {
+ dev_err(gpu->dev, "Failed to enable GPU power domain\n");
return ret;
+ }
etnaviv_hw_identify(gpu);
@@ -591,8 +637,10 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
}
ret = etnaviv_hw_reset(gpu);
- if (ret)
+ if (ret) {
+ dev_err(gpu->dev, "GPU reset failed\n");
goto fail;
+ }
/* Setup IOMMU.. eventually we will (I think) do this once per context
* and have separate page tables per context. For now, to keep things
@@ -610,12 +658,14 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
}
if (!iommu) {
+ dev_err(gpu->dev, "Failed to allocate GPU IOMMU domain\n");
ret = -ENOMEM;
goto fail;
}
gpu->mmu = etnaviv_iommu_new(gpu, iommu, version);
if (!gpu->mmu) {
+ dev_err(gpu->dev, "Failed to instantiate GPU IOMMU\n");
iommu_domain_free(iommu);
ret = -ENOMEM;
goto fail;
@@ -1283,8 +1333,6 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
if (ret < 0)
return ret;
- mutex_lock(&gpu->lock);
-
/*
* TODO
*
@@ -1298,16 +1346,18 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
if (unlikely(event == ~0U)) {
DRM_ERROR("no free event\n");
ret = -EBUSY;
- goto out_unlock;
+ goto out_pm_put;
}
fence = etnaviv_gpu_fence_alloc(gpu);
if (!fence) {
event_free(gpu, event);
ret = -ENOMEM;
- goto out_unlock;
+ goto out_pm_put;
}
+ mutex_lock(&gpu->lock);
+
gpu->event[event].fence = fence;
submit->fence = fence->seqno;
gpu->active_fence = submit->fence;
@@ -1345,9 +1395,9 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
hangcheck_timer_reset(gpu);
ret = 0;
-out_unlock:
mutex_unlock(&gpu->lock);
+out_pm_put:
etnaviv_gpu_pm_put(gpu);
return ret;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index f5321e2f2..a69cdd526 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -125,7 +125,7 @@ struct etnaviv_gpu {
u32 completed_fence;
u32 retired_fence;
wait_queue_head_t fence_event;
- unsigned int fence_context;
+ u64 fence_context;
spinlock_t fence_spinlock;
/* worker for handling active-list retiring: */
diff --git a/drivers/gpu/drm/etnaviv/state_hi.xml.h b/drivers/gpu/drm/etnaviv/state_hi.xml.h
index 6a7de5f14..807a3d9e0 100644
--- a/drivers/gpu/drm/etnaviv/state_hi.xml.h
+++ b/drivers/gpu/drm/etnaviv/state_hi.xml.h
@@ -218,6 +218,13 @@ Copyright (C) 2015
#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_FE 0x00000001
#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_DE 0x00000002
#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE 0x00000004
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_SH 0x00000008
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PA 0x00000010
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_SE 0x00000020
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA 0x00000040
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX 0x00000080
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ 0x00010000
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ 0x00020000
#define VIVS_PM_MODULE_STATUS 0x00000108
#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_FE 0x00000001
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index d814b3048..83f61c513 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -2,10 +2,6 @@ config DRM_EXYNOS
tristate "DRM Support for Samsung SoC EXYNOS Series"
depends on OF && DRM && (ARCH_S3C64XX || ARCH_EXYNOS || ARCH_MULTIPLATFORM)
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
select VIDEOMODE_HELPERS
help
Choose this option if you have a Samsung SoC EXYNOS chipset.
@@ -15,7 +11,7 @@ if DRM_EXYNOS
config DRM_EXYNOS_IOMMU
bool
- depends on EXYNOS_IOMMU && ARM_DMA_USE_IOMMU
+ depends on EXYNOS_IOMMU
default y
comment "CRTCs"
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index 4c1fb3f8b..4f0850585 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -67,10 +67,10 @@ static int exynos_dp_poweroff(struct analogix_dp_plat_data *plat_data)
return exynos_dp_crtc_clock_enable(plat_data, false);
}
-static int exynos_dp_get_modes(struct analogix_dp_plat_data *plat_data)
+static int exynos_dp_get_modes(struct analogix_dp_plat_data *plat_data,
+ struct drm_connector *connector)
{
struct exynos_dp_device *dp = to_dp(plat_data);
- struct drm_connector *connector = dp->connector;
struct drm_display_mode *mode;
int num_modes = 0;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 5e38e749a..ad6b73c7f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -93,17 +93,8 @@ static int exynos_dpi_get_modes(struct drm_connector *connector)
return 0;
}
-static struct drm_encoder *
-exynos_dpi_best_encoder(struct drm_connector *connector)
-{
- struct exynos_dpi *ctx = connector_to_dpi(connector);
-
- return &ctx->encoder;
-}
-
static const struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = {
.get_modes = exynos_dpi_get_modes,
- .best_encoder = exynos_dpi_best_encoder,
};
static int exynos_dpi_create_connector(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 2dd820e23..877d2efa2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -159,12 +159,7 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
DRM_INFO("Exynos DRM: using %s device for DMA mapping operations\n",
dev_name(private->dma_dev));
- /*
- * create mapping to manage iommu table and set a pointer to iommu
- * mapping structure to iommu_mapping of private data.
- * also this iommu_mapping can be used to check if iommu is supported
- * or not.
- */
+ /* create common IOMMU mapping for all devices attached to Exynos DRM */
ret = drm_create_iommu_mapping(dev);
if (ret < 0) {
DRM_ERROR("failed to create iommu mapping.\n");
@@ -267,6 +262,8 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
{
struct exynos_drm_private *priv = dev->dev_private;
struct exynos_atomic_commit *commit;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
int i, ret;
commit = kzalloc(sizeof(*commit), GFP_KERNEL);
@@ -288,10 +285,8 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
/* Wait until all affected CRTCs have completed previous commits and
* mark them as pending.
*/
- for (i = 0; i < dev->mode_config.num_crtc; ++i) {
- if (state->crtcs[i])
- commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]);
- }
+ for_each_crtc_in_state(state, crtc, crtc_state, i)
+ commit->crtcs |= drm_crtc_mask(crtc);
wait_event(priv->wait, !commit_is_pending(priv, commit->crtcs));
@@ -299,7 +294,7 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
priv->pending |= commit->crtcs;
spin_unlock(&priv->lock);
- drm_atomic_helper_swap_state(dev, state);
+ drm_atomic_helper_swap_state(state, true);
if (nonblock)
schedule_work(&commit->work);
@@ -407,7 +402,6 @@ static struct drm_driver exynos_drm_driver = {
.preclose = exynos_drm_preclose,
.lastclose = exynos_drm_lastclose,
.postclose = exynos_drm_postclose,
- .set_busid = drm_platform_set_busid,
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = exynos_drm_crtc_enable_vblank,
.disable_vblank = exynos_drm_crtc_disable_vblank,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index cc33ec929..7f1a49d5b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -64,7 +64,6 @@ struct exynos_drm_plane_state {
struct exynos_drm_rect src;
unsigned int h_ratio;
unsigned int v_ratio;
- unsigned int zpos;
};
static inline struct exynos_drm_plane_state *
@@ -221,11 +220,8 @@ struct exynos_drm_private {
* this array is used to be aware of which crtc did it request vblank.
*/
struct drm_crtc *crtc[MAX_CRTC];
- struct drm_property *plane_zpos_property;
struct device *dma_dev;
- unsigned long da_start;
- unsigned long da_space_size;
void *mapping;
unsigned int pipe;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 601ecf800..e07cb1fe4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1566,17 +1566,8 @@ static int exynos_dsi_get_modes(struct drm_connector *connector)
return 0;
}
-static struct drm_encoder *
-exynos_dsi_best_encoder(struct drm_connector *connector)
-{
- struct exynos_dsi *dsi = connector_to_dsi(connector);
-
- return &dsi->encoder;
-}
-
static const struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = {
.get_modes = exynos_dsi_get_modes,
- .best_encoder = exynos_dsi_best_encoder,
};
static int exynos_dsi_create_connector(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index e0166403b..40ce841eb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -55,11 +55,11 @@ static int check_fb_gem_memory_type(struct drm_device *drm_dev,
flags = exynos_gem->flags;
/*
- * without iommu support, not support physically non-continuous memory
- * for framebuffer.
+ * Physically non-contiguous memory type for framebuffer is not
+ * supported without IOMMU.
*/
if (IS_NONCONTIG_BUFFER(flags)) {
- DRM_ERROR("cannot use this gem memory type for fb.\n");
+ DRM_ERROR("Non-contiguous GEM memory is not supported.\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 67dcd6831..4cfb39d54 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -52,7 +52,7 @@ static int exynos_drm_fb_mmap(struct fb_info *info,
ret = dma_mmap_attrs(to_dma_dev(helper->dev), vma, exynos_gem->cookie,
exynos_gem->dma_addr, exynos_gem->size,
- &exynos_gem->dma_attrs);
+ exynos_gem->dma_attrs);
if (ret < 0) {
DRM_ERROR("failed to mmap.\n");
return ret;
@@ -269,8 +269,7 @@ static void exynos_drm_fbdev_destroy(struct drm_device *dev,
struct exynos_drm_gem *exynos_gem = exynos_fbd->exynos_gem;
struct drm_framebuffer *fb;
- if (exynos_gem->kvaddr)
- vunmap(exynos_gem->kvaddr);
+ vunmap(exynos_gem->kvaddr);
/* release drm framebuffer and real buffer */
if (fb_helper->fb && fb_helper->fb->funcs) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 0525c5614..147ef0d29 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1753,32 +1753,6 @@ static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int fimc_suspend(struct device *dev)
-{
- struct fimc_context *ctx = get_fimc_context(dev);
-
- DRM_DEBUG_KMS("id[%d]\n", ctx->id);
-
- if (pm_runtime_suspended(dev))
- return 0;
-
- return fimc_clk_ctrl(ctx, false);
-}
-
-static int fimc_resume(struct device *dev)
-{
- struct fimc_context *ctx = get_fimc_context(dev);
-
- DRM_DEBUG_KMS("id[%d]\n", ctx->id);
-
- if (!pm_runtime_suspended(dev))
- return fimc_clk_ctrl(ctx, true);
-
- return 0;
-}
-#endif
-
static int fimc_runtime_suspend(struct device *dev)
{
struct fimc_context *ctx = get_fimc_context(dev);
@@ -1799,7 +1773,8 @@ static int fimc_runtime_resume(struct device *dev)
#endif
static const struct dev_pm_ops fimc_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(fimc_suspend, fimc_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(fimc_runtime_suspend, fimc_runtime_resume, NULL)
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 8564c3da0..6eca8bb88 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -17,7 +17,6 @@
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/dma-mapping.h>
-#include <linux/dma-attrs.h>
#include <linux/of.h>
#include <drm/drmP.h>
@@ -235,7 +234,7 @@ struct g2d_data {
struct mutex cmdlist_mutex;
dma_addr_t cmdlist_pool;
void *cmdlist_pool_virt;
- struct dma_attrs cmdlist_dma_attrs;
+ unsigned long cmdlist_dma_attrs;
/* runqueue*/
struct g2d_runqueue_node *runqueue_node;
@@ -256,13 +255,12 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
int ret;
struct g2d_buf_info *buf_info;
- init_dma_attrs(&g2d->cmdlist_dma_attrs);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs);
+ g2d->cmdlist_dma_attrs = DMA_ATTR_WRITE_COMBINE;
g2d->cmdlist_pool_virt = dma_alloc_attrs(to_dma_dev(subdrv->drm_dev),
G2D_CMDLIST_POOL_SIZE,
&g2d->cmdlist_pool, GFP_KERNEL,
- &g2d->cmdlist_dma_attrs);
+ g2d->cmdlist_dma_attrs);
if (!g2d->cmdlist_pool_virt) {
dev_err(dev, "failed to allocate dma memory\n");
return -ENOMEM;
@@ -295,7 +293,7 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
err:
dma_free_attrs(to_dma_dev(subdrv->drm_dev), G2D_CMDLIST_POOL_SIZE,
g2d->cmdlist_pool_virt,
- g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
+ g2d->cmdlist_pool, g2d->cmdlist_dma_attrs);
return ret;
}
@@ -309,7 +307,7 @@ static void g2d_fini_cmdlist(struct g2d_data *g2d)
dma_free_attrs(to_dma_dev(subdrv->drm_dev),
G2D_CMDLIST_POOL_SIZE,
g2d->cmdlist_pool_virt,
- g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
+ g2d->cmdlist_pool, g2d->cmdlist_dma_attrs);
}
}
@@ -1477,8 +1475,8 @@ static int g2d_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int g2d_suspend(struct device *dev)
+#ifdef CONFIG_PM
+static int g2d_runtime_suspend(struct device *dev)
{
struct g2d_data *g2d = dev_get_drvdata(dev);
@@ -1492,25 +1490,6 @@ static int g2d_suspend(struct device *dev)
flush_work(&g2d->runqueue_work);
- return 0;
-}
-
-static int g2d_resume(struct device *dev)
-{
- struct g2d_data *g2d = dev_get_drvdata(dev);
-
- g2d->suspended = false;
- g2d_exec_runqueue(g2d);
-
- return 0;
-}
-#endif
-
-#ifdef CONFIG_PM
-static int g2d_runtime_suspend(struct device *dev)
-{
- struct g2d_data *g2d = dev_get_drvdata(dev);
-
clk_disable_unprepare(g2d->gate_clk);
return 0;
@@ -1525,12 +1504,16 @@ static int g2d_runtime_resume(struct device *dev)
if (ret < 0)
dev_warn(dev, "failed to enable clock.\n");
+ g2d->suspended = false;
+ g2d_exec_runqueue(g2d);
+
return ret;
}
#endif
static const struct dev_pm_ops g2d_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(g2d_suspend, g2d_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(g2d_runtime_suspend, g2d_runtime_resume, NULL)
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index cdf9f1af4..f2ae72ba7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -24,7 +24,7 @@
static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
{
struct drm_device *dev = exynos_gem->base.dev;
- enum dma_attr attr;
+ unsigned long attr;
unsigned int nr_pages;
struct sg_table sgt;
int ret = -ENOMEM;
@@ -34,7 +34,7 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
return 0;
}
- init_dma_attrs(&exynos_gem->dma_attrs);
+ exynos_gem->dma_attrs = 0;
/*
* if EXYNOS_BO_CONTIG, fully physically contiguous memory
@@ -42,7 +42,7 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
* as possible.
*/
if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG))
- dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &exynos_gem->dma_attrs);
+ exynos_gem->dma_attrs |= DMA_ATTR_FORCE_CONTIGUOUS;
/*
* if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
@@ -54,8 +54,8 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
else
attr = DMA_ATTR_NON_CONSISTENT;
- dma_set_attr(attr, &exynos_gem->dma_attrs);
- dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &exynos_gem->dma_attrs);
+ exynos_gem->dma_attrs |= attr;
+ exynos_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
nr_pages = exynos_gem->size >> PAGE_SHIFT;
@@ -67,7 +67,7 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size,
&exynos_gem->dma_addr, GFP_KERNEL,
- &exynos_gem->dma_attrs);
+ exynos_gem->dma_attrs);
if (!exynos_gem->cookie) {
DRM_ERROR("failed to allocate buffer.\n");
goto err_free;
@@ -75,7 +75,7 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
ret = dma_get_sgtable_attrs(to_dma_dev(dev), &sgt, exynos_gem->cookie,
exynos_gem->dma_addr, exynos_gem->size,
- &exynos_gem->dma_attrs);
+ exynos_gem->dma_attrs);
if (ret < 0) {
DRM_ERROR("failed to get sgtable.\n");
goto err_dma_free;
@@ -99,7 +99,7 @@ err_sgt_free:
sg_free_table(&sgt);
err_dma_free:
dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
- exynos_gem->dma_addr, &exynos_gem->dma_attrs);
+ exynos_gem->dma_addr, exynos_gem->dma_attrs);
err_free:
drm_free_large(exynos_gem->pages);
@@ -120,7 +120,7 @@ static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
(dma_addr_t)exynos_gem->dma_addr,
- &exynos_gem->dma_attrs);
+ exynos_gem->dma_attrs);
drm_free_large(exynos_gem->pages);
}
@@ -346,7 +346,7 @@ static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
ret = dma_mmap_attrs(to_dma_dev(drm_dev), vma, exynos_gem->cookie,
exynos_gem->dma_addr, exynos_gem->size,
- &exynos_gem->dma_attrs);
+ exynos_gem->dma_attrs);
if (ret < 0) {
DRM_ERROR("failed to mmap.\n");
return ret;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 781007422..df7c543d6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -50,7 +50,7 @@ struct exynos_drm_gem {
void *cookie;
void __iomem *kvaddr;
dma_addr_t dma_addr;
- struct dma_attrs dma_attrs;
+ unsigned long dma_attrs;
struct page **pages;
struct sg_table *sgt;
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 5d20da8f9..52a9d2694 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -1760,34 +1760,7 @@ static int gsc_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int gsc_suspend(struct device *dev)
-{
- struct gsc_context *ctx = get_gsc_context(dev);
-
- DRM_DEBUG_KMS("id[%d]\n", ctx->id);
-
- if (pm_runtime_suspended(dev))
- return 0;
-
- return gsc_clk_ctrl(ctx, false);
-}
-
-static int gsc_resume(struct device *dev)
-{
- struct gsc_context *ctx = get_gsc_context(dev);
-
- DRM_DEBUG_KMS("id[%d]\n", ctx->id);
-
- if (!pm_runtime_suspended(dev))
- return gsc_clk_ctrl(ctx, true);
-
- return 0;
-}
-#endif
-
-#ifdef CONFIG_PM
-static int gsc_runtime_suspend(struct device *dev)
+static int __maybe_unused gsc_runtime_suspend(struct device *dev)
{
struct gsc_context *ctx = get_gsc_context(dev);
@@ -1796,7 +1769,7 @@ static int gsc_runtime_suspend(struct device *dev)
return gsc_clk_ctrl(ctx, false);
}
-static int gsc_runtime_resume(struct device *dev)
+static int __maybe_unused gsc_runtime_resume(struct device *dev)
{
struct gsc_context *ctx = get_gsc_context(dev);
@@ -1804,10 +1777,10 @@ static int gsc_runtime_resume(struct device *dev)
return gsc_clk_ctrl(ctx, true);
}
-#endif
static const struct dev_pm_ops gsc_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(gsc_suspend, gsc_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL)
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
index 7ca09ee19..0f3737024 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
@@ -14,13 +14,27 @@
#include <linux/dma-mapping.h>
#include <linux/iommu.h>
-#include <linux/kref.h>
-
-#include <asm/dma-iommu.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_iommu.h"
+static inline int configure_dma_max_seg_size(struct device *dev)
+{
+ if (!dev->dma_parms)
+ dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
+ if (!dev->dma_parms)
+ return -ENOMEM;
+
+ dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+ return 0;
+}
+
+static inline void clear_dma_max_seg_size(struct device *dev)
+{
+ kfree(dev->dma_parms);
+ dev->dma_parms = NULL;
+}
+
/*
* drm_create_iommu_mapping - create a mapping structure
*
@@ -28,38 +42,22 @@
*/
int drm_create_iommu_mapping(struct drm_device *drm_dev)
{
- struct dma_iommu_mapping *mapping = NULL;
struct exynos_drm_private *priv = drm_dev->dev_private;
- if (!priv->da_start)
- priv->da_start = EXYNOS_DEV_ADDR_START;
- if (!priv->da_space_size)
- priv->da_space_size = EXYNOS_DEV_ADDR_SIZE;
-
- mapping = arm_iommu_create_mapping(&platform_bus_type, priv->da_start,
- priv->da_space_size);
-
- if (IS_ERR(mapping))
- return PTR_ERR(mapping);
-
- priv->mapping = mapping;
-
- return 0;
+ return __exynos_iommu_create_mapping(priv, EXYNOS_DEV_ADDR_START,
+ EXYNOS_DEV_ADDR_SIZE);
}
/*
* drm_release_iommu_mapping - release iommu mapping structure
*
* @drm_dev: DRM device
- *
- * if mapping->kref becomes 0 then all things related to iommu mapping
- * will be released
*/
void drm_release_iommu_mapping(struct drm_device *drm_dev)
{
struct exynos_drm_private *priv = drm_dev->dev_private;
- arm_iommu_release_mapping(priv->mapping);
+ __exynos_iommu_release_mapping(priv);
}
/*
@@ -77,25 +75,19 @@ int drm_iommu_attach_device(struct drm_device *drm_dev,
struct exynos_drm_private *priv = drm_dev->dev_private;
int ret;
- if (!priv->mapping)
- return 0;
-
- subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev,
- sizeof(*subdrv_dev->dma_parms),
- GFP_KERNEL);
- if (!subdrv_dev->dma_parms)
- return -ENOMEM;
-
- dma_set_max_seg_size(subdrv_dev, 0xffffffffu);
-
- if (subdrv_dev->archdata.mapping)
- arm_iommu_detach_device(subdrv_dev);
+ if (get_dma_ops(priv->dma_dev) != get_dma_ops(subdrv_dev)) {
+ DRM_ERROR("Device %s lacks support for IOMMU\n",
+ dev_name(subdrv_dev));
+ return -EINVAL;
+ }
- ret = arm_iommu_attach_device(subdrv_dev, priv->mapping);
- if (ret < 0) {
- DRM_DEBUG_KMS("failed iommu attach.\n");
+ ret = configure_dma_max_seg_size(subdrv_dev);
+ if (ret)
return ret;
- }
+
+ ret = __exynos_iommu_attach(priv, subdrv_dev);
+ if (ret)
+ clear_dma_max_seg_size(subdrv_dev);
return 0;
}
@@ -113,10 +105,7 @@ void drm_iommu_detach_device(struct drm_device *drm_dev,
struct device *subdrv_dev)
{
struct exynos_drm_private *priv = drm_dev->dev_private;
- struct dma_iommu_mapping *mapping = priv->mapping;
-
- if (!mapping || !mapping->domain)
- return;
- arm_iommu_detach_device(subdrv_dev);
+ __exynos_iommu_detach(priv, subdrv_dev);
+ clear_dma_max_seg_size(subdrv_dev);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
index 5ffebe02e..c8de4913f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
@@ -17,6 +17,97 @@
#ifdef CONFIG_DRM_EXYNOS_IOMMU
+#if defined(CONFIG_ARM_DMA_USE_IOMMU)
+#include <asm/dma-iommu.h>
+
+static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv,
+ unsigned long start, unsigned long size)
+{
+ priv->mapping = arm_iommu_create_mapping(&platform_bus_type, start,
+ size);
+ return IS_ERR(priv->mapping);
+}
+
+static inline void
+__exynos_iommu_release_mapping(struct exynos_drm_private *priv)
+{
+ arm_iommu_release_mapping(priv->mapping);
+}
+
+static inline int __exynos_iommu_attach(struct exynos_drm_private *priv,
+ struct device *dev)
+{
+ if (dev->archdata.mapping)
+ arm_iommu_detach_device(dev);
+
+ return arm_iommu_attach_device(dev, priv->mapping);
+}
+
+static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
+ struct device *dev)
+{
+ arm_iommu_detach_device(dev);
+}
+
+#elif defined(CONFIG_IOMMU_DMA)
+#include <linux/dma-iommu.h>
+
+static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv,
+ unsigned long start, unsigned long size)
+{
+ struct iommu_domain *domain;
+ int ret;
+
+ domain = iommu_domain_alloc(priv->dma_dev->bus);
+ if (!domain)
+ return -ENOMEM;
+
+ ret = iommu_get_dma_cookie(domain);
+ if (ret)
+ goto free_domain;
+
+ ret = iommu_dma_init_domain(domain, start, size);
+ if (ret)
+ goto put_cookie;
+
+ priv->mapping = domain;
+ return 0;
+
+put_cookie:
+ iommu_put_dma_cookie(domain);
+free_domain:
+ iommu_domain_free(domain);
+ return ret;
+}
+
+static inline void __exynos_iommu_release_mapping(struct exynos_drm_private *priv)
+{
+ struct iommu_domain *domain = priv->mapping;
+
+ iommu_put_dma_cookie(domain);
+ iommu_domain_free(domain);
+ priv->mapping = NULL;
+}
+
+static inline int __exynos_iommu_attach(struct exynos_drm_private *priv,
+ struct device *dev)
+{
+ struct iommu_domain *domain = priv->mapping;
+
+ return iommu_attach_device(domain, dev);
+}
+
+static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
+ struct device *dev)
+{
+ struct iommu_domain *domain = priv->mapping;
+
+ iommu_detach_device(domain, dev);
+}
+#else
+#error Unsupported architecture and IOMMU/DMA-mapping glue code
+#endif
+
int drm_create_iommu_mapping(struct drm_device *drm_dev);
void drm_release_iommu_mapping(struct drm_device *drm_dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 77f12c00a..7f32419b2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -139,9 +139,9 @@ static void exynos_drm_plane_reset(struct drm_plane *plane)
exynos_state = kzalloc(sizeof(*exynos_state), GFP_KERNEL);
if (exynos_state) {
- exynos_state->zpos = exynos_plane->config->zpos;
plane->state = &exynos_state->base;
plane->state->plane = plane;
+ plane->state->zpos = exynos_plane->config->zpos;
}
}
@@ -157,7 +157,6 @@ exynos_drm_plane_duplicate_state(struct drm_plane *plane)
return NULL;
__drm_atomic_helper_plane_duplicate_state(plane, &copy->base);
- copy->zpos = exynos_state->zpos;
return &copy->base;
}
@@ -170,43 +169,6 @@ static void exynos_drm_plane_destroy_state(struct drm_plane *plane,
kfree(old_exynos_state);
}
-static int exynos_drm_plane_atomic_set_property(struct drm_plane *plane,
- struct drm_plane_state *state,
- struct drm_property *property,
- uint64_t val)
-{
- struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
- struct exynos_drm_plane_state *exynos_state =
- to_exynos_plane_state(state);
- struct exynos_drm_private *dev_priv = plane->dev->dev_private;
- const struct exynos_drm_plane_config *config = exynos_plane->config;
-
- if (property == dev_priv->plane_zpos_property &&
- (config->capabilities & EXYNOS_DRM_PLANE_CAP_ZPOS))
- exynos_state->zpos = val;
- else
- return -EINVAL;
-
- return 0;
-}
-
-static int exynos_drm_plane_atomic_get_property(struct drm_plane *plane,
- const struct drm_plane_state *state,
- struct drm_property *property,
- uint64_t *val)
-{
- const struct exynos_drm_plane_state *exynos_state =
- container_of(state, const struct exynos_drm_plane_state, base);
- struct exynos_drm_private *dev_priv = plane->dev->dev_private;
-
- if (property == dev_priv->plane_zpos_property)
- *val = exynos_state->zpos;
- else
- return -EINVAL;
-
- return 0;
-}
-
static struct drm_plane_funcs exynos_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
@@ -215,8 +177,6 @@ static struct drm_plane_funcs exynos_plane_funcs = {
.reset = exynos_drm_plane_reset,
.atomic_duplicate_state = exynos_drm_plane_duplicate_state,
.atomic_destroy_state = exynos_drm_plane_destroy_state,
- .atomic_set_property = exynos_drm_plane_atomic_set_property,
- .atomic_get_property = exynos_drm_plane_atomic_get_property,
};
static int
@@ -304,23 +264,13 @@ static const struct drm_plane_helper_funcs plane_helper_funcs = {
};
static void exynos_plane_attach_zpos_property(struct drm_plane *plane,
- unsigned int zpos)
+ bool immutable)
{
- struct drm_device *dev = plane->dev;
- struct exynos_drm_private *dev_priv = dev->dev_private;
- struct drm_property *prop;
-
- prop = dev_priv->plane_zpos_property;
- if (!prop) {
- prop = drm_property_create_range(dev, 0, "zpos",
- 0, MAX_PLANE - 1);
- if (!prop)
- return;
-
- dev_priv->plane_zpos_property = prop;
- }
-
- drm_object_attach_property(&plane->base, prop, zpos);
+ /* FIXME */
+ if (immutable)
+ drm_plane_create_zpos_immutable_property(plane, 0);
+ else
+ drm_plane_create_zpos_property(plane, 0, 0, MAX_PLANE - 1);
}
int exynos_plane_init(struct drm_device *dev,
@@ -346,7 +296,8 @@ int exynos_plane_init(struct drm_device *dev,
exynos_plane->index = index;
exynos_plane->config = config;
- exynos_plane_attach_zpos_property(&exynos_plane->base, config->zpos);
+ exynos_plane_attach_zpos_property(&exynos_plane->base,
+ !(config->capabilities & EXYNOS_DRM_PLANE_CAP_ZPOS));
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 404367a43..6591e4060 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -794,29 +794,6 @@ static int rotator_clk_crtl(struct rot_context *rot, bool enable)
return 0;
}
-
-#ifdef CONFIG_PM_SLEEP
-static int rotator_suspend(struct device *dev)
-{
- struct rot_context *rot = dev_get_drvdata(dev);
-
- if (pm_runtime_suspended(dev))
- return 0;
-
- return rotator_clk_crtl(rot, false);
-}
-
-static int rotator_resume(struct device *dev)
-{
- struct rot_context *rot = dev_get_drvdata(dev);
-
- if (!pm_runtime_suspended(dev))
- return rotator_clk_crtl(rot, true);
-
- return 0;
-}
-#endif
-
static int rotator_runtime_suspend(struct device *dev)
{
struct rot_context *rot = dev_get_drvdata(dev);
@@ -833,7 +810,8 @@ static int rotator_runtime_resume(struct device *dev)
#endif
static const struct dev_pm_ops rotator_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(rotator_suspend, rotator_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(rotator_runtime_suspend, rotator_runtime_resume,
NULL)
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 608b0afa3..e8f6c92b2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -378,16 +378,8 @@ static int vidi_get_modes(struct drm_connector *connector)
return drm_add_edid_modes(connector, edid);
}
-static struct drm_encoder *vidi_best_encoder(struct drm_connector *connector)
-{
- struct vidi_context *ctx = ctx_from_connector(connector);
-
- return &ctx->encoder;
-}
-
static const struct drm_connector_helper_funcs vidi_connector_helper_funcs = {
.get_modes = vidi_get_modes,
- .best_encoder = vidi_best_encoder,
};
static int vidi_create_connector(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 58de5a430..2275efe41 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -937,17 +937,9 @@ static int hdmi_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static struct drm_encoder *hdmi_best_encoder(struct drm_connector *connector)
-{
- struct hdmi_context *hdata = connector_to_hdmi(connector);
-
- return &hdata->encoder;
-}
-
static const struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
.get_modes = hdmi_get_modes,
.mode_valid = hdmi_mode_valid,
- .best_encoder = hdmi_best_encoder,
};
static int hdmi_create_connector(struct drm_encoder *encoder)
@@ -1828,6 +1820,7 @@ static int hdmi_probe(struct platform_device *pdev)
DRM_ERROR("Failed to find ddc node in device tree\n");
return -ENODEV;
}
+ of_node_put(dev->of_node);
out_get_ddc_adpt:
hdata->ddc_adpt = of_find_i2c_adapter_by_node(ddc_node);
@@ -1846,6 +1839,7 @@ out_get_ddc_adpt:
ret = -ENODEV;
goto err_ddc;
}
+ of_node_put(dev->of_node);
out_get_phy_port:
if (hdata->drv_data->is_apb_phy) {
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 74a4269cc..e1d47f943 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -477,6 +477,7 @@ static void vp_video_buffer(struct mixer_context *ctx,
struct drm_display_mode *mode = &state->base.crtc->state->adjusted_mode;
struct mixer_resources *res = &ctx->mixer_res;
struct drm_framebuffer *fb = state->base.fb;
+ unsigned int priority = state->base.normalized_zpos + 1;
unsigned long flags;
dma_addr_t luma_addr[2], chroma_addr[2];
bool tiled_mode = false;
@@ -561,7 +562,7 @@ static void vp_video_buffer(struct mixer_context *ctx,
mixer_cfg_scan(ctx, mode->vdisplay);
mixer_cfg_rgb_fmt(ctx, mode->vdisplay);
- mixer_cfg_layer(ctx, plane->index, state->zpos + 1, true);
+ mixer_cfg_layer(ctx, plane->index, priority, true);
mixer_cfg_vp_blend(ctx);
mixer_run(ctx);
@@ -586,6 +587,7 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
struct drm_display_mode *mode = &state->base.crtc->state->adjusted_mode;
struct mixer_resources *res = &ctx->mixer_res;
struct drm_framebuffer *fb = state->base.fb;
+ unsigned int priority = state->base.normalized_zpos + 1;
unsigned long flags;
unsigned int win = plane->index;
unsigned int x_ratio = 0, y_ratio = 0;
@@ -677,7 +679,7 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
mixer_cfg_scan(ctx, mode->vdisplay);
mixer_cfg_rgb_fmt(ctx, mode->vdisplay);
- mixer_cfg_layer(ctx, win, state->zpos + 1, true);
+ mixer_cfg_layer(ctx, win, priority, true);
mixer_cfg_gfx_blend(ctx, win, is_alpha_format(fb->pixel_format));
/* layer update mandatory for mixer 16.0.33.0 */
diff --git a/drivers/gpu/drm/fsl-dcu/Kconfig b/drivers/gpu/drm/fsl-dcu/Kconfig
index b9c714de6..14a72c4c4 100644
--- a/drivers/gpu/drm/fsl-dcu/Kconfig
+++ b/drivers/gpu/drm/fsl-dcu/Kconfig
@@ -5,12 +5,7 @@ config DRM_FSL_DCU
select BACKLIGHT_LCD_SUPPORT
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
- select DRM_KMS_FB_HELPER
select DRM_PANEL
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
- select FB_SYS_FOPS
select REGMAP_MMIO
select VIDEOMODE_HELPERS
help
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
index 89c0084c2..3371635cd 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
@@ -22,20 +22,21 @@
#include "fsl_dcu_drm_drv.h"
#include "fsl_dcu_drm_plane.h"
-static void fsl_dcu_drm_crtc_atomic_begin(struct drm_crtc *crtc,
+static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
-}
+ struct drm_pending_vblank_event *event = crtc->state->event;
-static int fsl_dcu_drm_crtc_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
-{
- return 0;
-}
+ if (event) {
+ crtc->state->event = NULL;
-static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
-{
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (drm_crtc_vblank_get(crtc) == 0)
+ drm_crtc_arm_vblank_event(crtc, event);
+ else
+ drm_crtc_send_vblank_event(crtc, event);
+ spin_unlock_irq(&crtc->dev->event_lock);
+ }
}
static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc)
@@ -43,6 +44,8 @@ static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+ drm_crtc_vblank_off(crtc);
+
regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
DCU_MODE_DCU_MODE_MASK,
DCU_MODE_DCU_MODE(DCU_MODE_OFF));
@@ -60,6 +63,8 @@ static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc)
DCU_MODE_DCU_MODE(DCU_MODE_NORMAL));
regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
DCU_UPDATE_MODE_READREG);
+
+ drm_crtc_vblank_on(crtc);
}
static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
@@ -117,8 +122,6 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
}
static const struct drm_crtc_helper_funcs fsl_dcu_drm_crtc_helper_funcs = {
- .atomic_begin = fsl_dcu_drm_crtc_atomic_begin,
- .atomic_check = fsl_dcu_drm_crtc_atomic_check,
.atomic_flush = fsl_dcu_drm_crtc_atomic_flush,
.disable = fsl_dcu_drm_disable_crtc,
.enable = fsl_dcu_drm_crtc_enable,
@@ -138,9 +141,10 @@ int fsl_dcu_drm_crtc_create(struct fsl_dcu_drm_device *fsl_dev)
{
struct drm_plane *primary;
struct drm_crtc *crtc = &fsl_dev->crtc;
- unsigned int i, j, reg_num;
int ret;
+ fsl_dcu_drm_init_planes(fsl_dev->drm);
+
primary = fsl_dcu_drm_primary_create_plane(fsl_dev->drm);
if (!primary)
return -ENOMEM;
@@ -154,19 +158,5 @@ int fsl_dcu_drm_crtc_create(struct fsl_dcu_drm_device *fsl_dev)
drm_crtc_helper_add(crtc, &fsl_dcu_drm_crtc_helper_funcs);
- if (!strcmp(fsl_dev->soc->name, "ls1021a"))
- reg_num = LS1021A_LAYER_REG_NUM;
- else
- reg_num = VF610_LAYER_REG_NUM;
- for (i = 0; i < fsl_dev->soc->total_layer; i++) {
- for (j = 1; j <= reg_num; j++)
- regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(i, j), 0);
- }
- regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
- DCU_MODE_DCU_MODE_MASK,
- DCU_MODE_DCU_MODE(DCU_MODE_OFF));
- regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
- DCU_UPDATE_MODE_READREG);
-
return 0;
}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index dc723f7ea..7882387f9 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -11,6 +11,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/console.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/mm.h>
@@ -22,6 +23,7 @@
#include <linux/regmap.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
@@ -42,10 +44,8 @@ static const struct regmap_config fsl_dcu_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
- .cache_type = REGCACHE_FLAT,
.volatile_reg = fsl_dcu_drm_is_volatile_reg,
- .max_register = 0x11fc,
};
static int fsl_dcu_drm_irq_init(struct drm_device *dev)
@@ -199,7 +199,7 @@ static struct drm_driver fsl_dcu_drm_driver = {
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = fsl_dcu_drm_enable_vblank,
.disable_vblank = fsl_dcu_drm_disable_vblank,
- .gem_free_object = drm_gem_cma_free_object,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
@@ -229,11 +229,26 @@ static int fsl_dcu_drm_pm_suspend(struct device *dev)
if (!fsl_dev)
return 0;
+ disable_irq(fsl_dev->irq);
drm_kms_helper_poll_disable(fsl_dev->drm);
- regcache_cache_only(fsl_dev->regmap, true);
- regcache_mark_dirty(fsl_dev->regmap);
- clk_disable(fsl_dev->clk);
- clk_unprepare(fsl_dev->clk);
+
+ console_lock();
+ drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 1);
+ console_unlock();
+
+ fsl_dev->state = drm_atomic_helper_suspend(fsl_dev->drm);
+ if (IS_ERR(fsl_dev->state)) {
+ console_lock();
+ drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 0);
+ console_unlock();
+
+ drm_kms_helper_poll_enable(fsl_dev->drm);
+ enable_irq(fsl_dev->irq);
+ return PTR_ERR(fsl_dev->state);
+ }
+
+ clk_disable_unprepare(fsl_dev->pix_clk);
+ clk_disable_unprepare(fsl_dev->clk);
return 0;
}
@@ -246,21 +261,27 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
if (!fsl_dev)
return 0;
- ret = clk_enable(fsl_dev->clk);
+ ret = clk_prepare_enable(fsl_dev->clk);
if (ret < 0) {
dev_err(dev, "failed to enable dcu clk\n");
- clk_unprepare(fsl_dev->clk);
return ret;
}
- ret = clk_prepare(fsl_dev->clk);
+
+ ret = clk_prepare_enable(fsl_dev->pix_clk);
if (ret < 0) {
- dev_err(dev, "failed to prepare dcu clk\n");
+ dev_err(dev, "failed to enable pix clk\n");
return ret;
}
+ fsl_dcu_drm_init_planes(fsl_dev->drm);
+ drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state);
+
+ console_lock();
+ drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 0);
+ console_unlock();
+
drm_kms_helper_poll_enable(fsl_dev->drm);
- regcache_cache_only(fsl_dev->regmap, false);
- regcache_sync(fsl_dev->regmap);
+ enable_irq(fsl_dev->irq);
return 0;
}
@@ -274,12 +295,14 @@ static const struct fsl_dcu_soc_data fsl_dcu_ls1021a_data = {
.name = "ls1021a",
.total_layer = 16,
.max_layer = 4,
+ .layer_regs = LS1021A_LAYER_REG_NUM,
};
static const struct fsl_dcu_soc_data fsl_dcu_vf610_data = {
.name = "vf610",
.total_layer = 64,
.max_layer = 6,
+ .layer_regs = VF610_LAYER_REG_NUM,
};
static const struct of_device_id fsl_dcu_of_match[] = {
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
index c275f900f..3b371fe74 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
@@ -175,6 +175,7 @@ struct fsl_dcu_soc_data {
unsigned int total_layer;
/*max layer number DCU supported*/
unsigned int max_layer;
+ unsigned int layer_regs;
};
struct fsl_dcu_drm_device {
@@ -193,6 +194,7 @@ struct fsl_dcu_drm_device {
struct drm_encoder encoder;
struct fsl_dcu_drm_connector connector;
const struct fsl_dcu_soc_data *soc;
+ struct drm_atomic_state *state;
};
void fsl_dcu_fbdev_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
index c564ec612..d9d6cc1c8 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
@@ -37,23 +37,22 @@ int fsl_dcu_drm_modeset_init(struct fsl_dcu_drm_device *fsl_dev)
ret = fsl_dcu_drm_crtc_create(fsl_dev);
if (ret)
- return ret;
+ goto err;
ret = fsl_dcu_drm_encoder_create(fsl_dev, &fsl_dev->crtc);
if (ret)
- goto fail_encoder;
+ goto err;
- ret = fsl_dcu_drm_connector_create(fsl_dev, &fsl_dev->encoder);
+ ret = fsl_dcu_create_outputs(fsl_dev);
if (ret)
- goto fail_connector;
+ goto err;
drm_mode_config_reset(fsl_dev->drm);
drm_kms_helper_poll_init(fsl_dev->drm);
return 0;
-fail_encoder:
- fsl_dev->crtc.funcs->destroy(&fsl_dev->crtc);
-fail_connector:
- fsl_dev->encoder.funcs->destroy(&fsl_dev->encoder);
+
+err:
+ drm_mode_config_cleanup(fsl_dev->drm);
return ret;
}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_output.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_output.h
index 7093109fb..5a7b88e19 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_output.h
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_output.h
@@ -25,9 +25,8 @@ to_fsl_dcu_connector(struct drm_connector *con)
: NULL;
}
-int fsl_dcu_drm_connector_create(struct fsl_dcu_drm_device *fsl_dev,
- struct drm_encoder *encoder);
int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev,
struct drm_crtc *crtc);
+int fsl_dcu_create_outputs(struct fsl_dcu_drm_device *fsl_dev);
#endif /* __FSL_DCU_DRM_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
index 274558b3b..e50467a0d 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -217,6 +217,22 @@ static const u32 fsl_dcu_drm_plane_formats[] = {
DRM_FORMAT_YUV422,
};
+void fsl_dcu_drm_init_planes(struct drm_device *dev)
+{
+ struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+ int i, j;
+
+ for (i = 0; i < fsl_dev->soc->total_layer; i++) {
+ for (j = 1; j <= fsl_dev->soc->layer_regs; j++)
+ regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(i, j), 0);
+ }
+ regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
+ DCU_MODE_DCU_MODE_MASK,
+ DCU_MODE_DCU_MODE(DCU_MODE_OFF));
+ regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
+ DCU_UPDATE_MODE_READREG);
+}
+
struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev)
{
struct drm_plane *primary;
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h
index d657f088d..8ee45f813 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h
@@ -12,6 +12,7 @@
#ifndef __FSL_DCU_DRM_PLANE_H__
#define __FSL_DCU_DRM_PLANE_H__
+void fsl_dcu_drm_init_planes(struct drm_device *dev);
struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev);
#endif /* __FSL_DCU_DRM_PLANE_H__ */
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index 98c998da9..26edcc899 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -10,6 +10,7 @@
*/
#include <linux/backlight.h>
+#include <linux/of_graph.h>
#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
@@ -102,14 +103,6 @@ static const struct drm_connector_funcs fsl_dcu_drm_connector_funcs = {
.reset = drm_atomic_helper_connector_reset,
};
-static struct drm_encoder *
-fsl_dcu_drm_connector_best_encoder(struct drm_connector *connector)
-{
- struct fsl_dcu_drm_connector *fsl_con = to_fsl_dcu_connector(connector);
-
- return fsl_con->encoder;
-}
-
static int fsl_dcu_drm_connector_get_modes(struct drm_connector *connector)
{
struct fsl_dcu_drm_connector *fsl_connector;
@@ -136,17 +129,16 @@ static int fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector,
}
static const struct drm_connector_helper_funcs connector_helper_funcs = {
- .best_encoder = fsl_dcu_drm_connector_best_encoder,
.get_modes = fsl_dcu_drm_connector_get_modes,
.mode_valid = fsl_dcu_drm_connector_mode_valid,
};
-int fsl_dcu_drm_connector_create(struct fsl_dcu_drm_device *fsl_dev,
- struct drm_encoder *encoder)
+static int fsl_dcu_attach_panel(struct fsl_dcu_drm_device *fsl_dev,
+ struct drm_panel *panel)
{
+ struct drm_encoder *encoder = &fsl_dev->encoder;
struct drm_connector *connector = &fsl_dev->connector.base;
struct drm_mode_config *mode_config = &fsl_dev->drm->mode_config;
- struct device_node *panel_node;
int ret;
fsl_dev->connector.encoder = encoder;
@@ -170,21 +162,7 @@ int fsl_dcu_drm_connector_create(struct fsl_dcu_drm_device *fsl_dev,
mode_config->dpms_property,
DRM_MODE_DPMS_OFF);
- panel_node = of_parse_phandle(fsl_dev->np, "fsl,panel", 0);
- if (!panel_node) {
- dev_err(fsl_dev->dev, "fsl,panel property not found\n");
- ret = -ENODEV;
- goto err_sysfs;
- }
-
- fsl_dev->connector.panel = of_drm_find_panel(panel_node);
- if (!fsl_dev->connector.panel) {
- ret = -EPROBE_DEFER;
- goto err_panel;
- }
- of_node_put(panel_node);
-
- ret = drm_panel_attach(fsl_dev->connector.panel, connector);
+ ret = drm_panel_attach(panel, connector);
if (ret) {
dev_err(fsl_dev->dev, "failed to attach panel\n");
goto err_sysfs;
@@ -192,11 +170,62 @@ int fsl_dcu_drm_connector_create(struct fsl_dcu_drm_device *fsl_dev,
return 0;
-err_panel:
- of_node_put(panel_node);
err_sysfs:
drm_connector_unregister(connector);
err_cleanup:
drm_connector_cleanup(connector);
return ret;
}
+
+static int fsl_dcu_attach_endpoint(struct fsl_dcu_drm_device *fsl_dev,
+ const struct of_endpoint *ep)
+{
+ struct drm_bridge *bridge;
+ struct device_node *np;
+
+ np = of_graph_get_remote_port_parent(ep->local_node);
+
+ fsl_dev->connector.panel = of_drm_find_panel(np);
+ if (fsl_dev->connector.panel) {
+ of_node_put(np);
+ return fsl_dcu_attach_panel(fsl_dev, fsl_dev->connector.panel);
+ }
+
+ bridge = of_drm_find_bridge(np);
+ of_node_put(np);
+ if (!bridge)
+ return -ENODEV;
+
+ fsl_dev->encoder.bridge = bridge;
+ bridge->encoder = &fsl_dev->encoder;
+
+ return drm_bridge_attach(fsl_dev->drm, bridge);
+}
+
+int fsl_dcu_create_outputs(struct fsl_dcu_drm_device *fsl_dev)
+{
+ struct of_endpoint ep;
+ struct device_node *ep_node, *panel_node;
+ int ret;
+
+ /* This is for backward compatibility */
+ panel_node = of_parse_phandle(fsl_dev->np, "fsl,panel", 0);
+ if (panel_node) {
+ fsl_dev->connector.panel = of_drm_find_panel(panel_node);
+ of_node_put(panel_node);
+ if (!fsl_dev->connector.panel)
+ return -EPROBE_DEFER;
+ return fsl_dcu_attach_panel(fsl_dev, fsl_dev->connector.panel);
+ }
+
+ ep_node = of_graph_get_next_endpoint(fsl_dev->np, NULL);
+ if (!ep_node)
+ return -ENODEV;
+
+ ret = of_graph_parse_endpoint(ep_node, &ep);
+ of_node_put(ep_node);
+ if (ret)
+ return -ENODEV;
+
+ return fsl_dcu_attach_endpoint(fsl_dev, &ep);
+}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_tcon.c b/drivers/gpu/drm/fsl-dcu/fsl_tcon.c
index bbe34f1c0..bca09ea24 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_tcon.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_tcon.c
@@ -92,6 +92,7 @@ struct fsl_tcon *fsl_tcon_init(struct device *dev)
goto err_node_put;
}
+ of_node_put(np);
clk_prepare_enable(tcon->ipg_clk);
dev_info(dev, "Using TCON in bypass mode\n");
diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig
index 17f928ec8..8906d6749 100644
--- a/drivers/gpu/drm/gma500/Kconfig
+++ b/drivers/gpu/drm/gma500/Kconfig
@@ -1,11 +1,7 @@
config DRM_GMA500
tristate "Intel GMA5/600 KMS Framebuffer"
depends on DRM && PCI && X86
- select FB_CFB_COPYAREA
- select FB_CFB_FILLRECT
- select FB_CFB_IMAGEBLIT
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_TTM
# GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915
select ACPI_VIDEO if ACPI
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 28f9d9098..563f193fc 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -246,8 +246,7 @@ static void cdv_hdmi_destroy(struct drm_connector *connector)
{
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
- if (gma_encoder->i2c_bus)
- psb_intel_i2c_destroy(gma_encoder->i2c_bus);
+ psb_intel_i2c_destroy(gma_encoder->i2c_bus);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(connector);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index 813ef23a8..38dc89083 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -444,8 +444,7 @@ static void cdv_intel_lvds_destroy(struct drm_connector *connector)
{
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
- if (gma_encoder->i2c_bus)
- psb_intel_i2c_destroy(gma_encoder->i2c_bus);
+ psb_intel_i2c_destroy(gma_encoder->i2c_bus);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(connector);
@@ -780,12 +779,10 @@ out:
failed_find:
mutex_unlock(&dev->mode_config.mutex);
printk(KERN_ERR "Failed find\n");
- if (gma_encoder->ddc_bus)
- psb_intel_i2c_destroy(gma_encoder->ddc_bus);
+ psb_intel_i2c_destroy(gma_encoder->ddc_bus);
failed_ddc:
printk(KERN_ERR "Failed DDC\n");
- if (gma_encoder->i2c_bus)
- psb_intel_i2c_destroy(gma_encoder->i2c_bus);
+ psb_intel_i2c_destroy(gma_encoder->i2c_bus);
failed_blc_i2c:
printk(KERN_ERR "Failed BLC\n");
drm_encoder_cleanup(encoder);
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 7440bf90a..0fcdce081 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -184,12 +184,6 @@ static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
return 0;
}
-static int psbfb_ioctl(struct fb_info *info, unsigned int cmd,
- unsigned long arg)
-{
- return -ENOTTY;
-}
-
static struct fb_ops psbfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
@@ -201,7 +195,6 @@ static struct fb_ops psbfb_ops = {
.fb_imageblit = drm_fb_helper_cfb_imageblit,
.fb_mmap = psbfb_mmap,
.fb_sync = psbfb_sync,
- .fb_ioctl = psbfb_ioctl,
};
static struct fb_ops psbfb_roll_ops = {
@@ -215,7 +208,6 @@ static struct fb_ops psbfb_roll_ops = {
.fb_imageblit = drm_fb_helper_cfb_imageblit,
.fb_pan_display = psbfb_pan,
.fb_mmap = psbfb_mmap,
- .fb_ioctl = psbfb_ioctl,
};
static struct fb_ops psbfb_unaccel_ops = {
@@ -228,7 +220,6 @@ static struct fb_ops psbfb_unaccel_ops = {
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
.fb_mmap = psbfb_mmap,
- .fb_ioctl = psbfb_ioctl,
};
/**
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index c95406e6f..1a1cf7a3b 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -175,20 +175,21 @@ void gma_crtc_load_lut(struct drm_crtc *crtc)
}
}
-void gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue,
- u32 start, u32 size)
+int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue,
+ u32 size)
{
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
int i;
- int end = (start + size > 256) ? 256 : start + size;
- for (i = start; i < end; i++) {
+ for (i = 0; i < size; i++) {
gma_crtc->lut_r[i] = red[i] >> 8;
gma_crtc->lut_g[i] = green[i] >> 8;
gma_crtc->lut_b[i] = blue[i] >> 8;
}
gma_crtc_load_lut(crtc);
+
+ return 0;
}
/**
@@ -281,7 +282,7 @@ void gma_crtc_dpms(struct drm_crtc *crtc, int mode)
REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
/* Turn off vblank interrupts */
- drm_vblank_off(dev, pipe);
+ drm_crtc_vblank_off(crtc);
/* Wait for vblank for the disable to take effect */
gma_wait_for_vblank(dev);
diff --git a/drivers/gpu/drm/gma500/gma_display.h b/drivers/gpu/drm/gma500/gma_display.h
index b2491c65f..e72dd08b7 100644
--- a/drivers/gpu/drm/gma500/gma_display.h
+++ b/drivers/gpu/drm/gma500/gma_display.h
@@ -72,8 +72,8 @@ extern int gma_crtc_cursor_set(struct drm_crtc *crtc,
uint32_t width, uint32_t height);
extern int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
extern void gma_crtc_load_lut(struct drm_crtc *crtc);
-extern void gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, u32 start, u32 size);
+extern int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, u32 size);
extern void gma_crtc_dpms(struct drm_crtc *crtc, int mode);
extern void gma_crtc_prepare(struct drm_crtc *crtc);
extern void gma_crtc_commit(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 82b8ce418..50eb944fb 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -210,10 +210,8 @@ static int psb_driver_unload(struct drm_device *dev)
iounmap(dev_priv->aux_reg);
dev_priv->aux_reg = NULL;
}
- if (dev_priv->aux_pdev)
- pci_dev_put(dev_priv->aux_pdev);
- if (dev_priv->lpc_pdev)
- pci_dev_put(dev_priv->lpc_pdev);
+ pci_dev_put(dev_priv->aux_pdev);
+ pci_dev_put(dev_priv->lpc_pdev);
/* Destroy VBT data */
psb_intel_destroy_bios(dev);
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 398015be8..7b6c84925 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -491,7 +491,6 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe,
struct drm_psb_private *dev_priv = dev->dev_private;
struct gma_crtc *gma_crtc;
int i;
- uint16_t *r_base, *g_base, *b_base;
/* We allocate a extra array of drm_connector pointers
* for fbdev after the crtc */
@@ -519,16 +518,10 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe,
gma_crtc->pipe = pipe;
gma_crtc->plane = pipe;
- r_base = gma_crtc->base.gamma_store;
- g_base = r_base + 256;
- b_base = g_base + 256;
for (i = 0; i < 256; i++) {
gma_crtc->lut_r[i] = i;
gma_crtc->lut_g[i] = i;
gma_crtc->lut_b[i] = i;
- r_base[i] = i << 8;
- g_base[i] = i << 8;
- b_base[i] = i << 8;
gma_crtc->lut_adj[i] = 0;
}
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index b1b93317d..e55733ca4 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -561,8 +561,7 @@ void psb_intel_lvds_destroy(struct drm_connector *connector)
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
struct psb_intel_lvds_priv *lvds_priv = gma_encoder->dev_priv;
- if (lvds_priv->ddc_bus)
- psb_intel_i2c_destroy(lvds_priv->ddc_bus);
+ psb_intel_i2c_destroy(lvds_priv->ddc_bus);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(connector);
@@ -835,11 +834,9 @@ out:
failed_find:
mutex_unlock(&dev->mode_config.mutex);
- if (lvds_priv->ddc_bus)
- psb_intel_i2c_destroy(lvds_priv->ddc_bus);
+ psb_intel_i2c_destroy(lvds_priv->ddc_bus);
failed_ddc:
- if (lvds_priv->i2c_bus)
- psb_intel_i2c_destroy(lvds_priv->i2c_bus);
+ psb_intel_i2c_destroy(lvds_priv->i2c_bus);
failed_blc_i2c:
drm_encoder_cleanup(encoder);
drm_connector_cleanup(connector);
diff --git a/drivers/gpu/drm/hisilicon/kirin/Kconfig b/drivers/gpu/drm/hisilicon/kirin/Kconfig
index ea0df6115..499f64405 100644
--- a/drivers/gpu/drm/hisilicon/kirin/Kconfig
+++ b/drivers/gpu/drm/hisilicon/kirin/Kconfig
@@ -4,6 +4,7 @@ config DRM_HISI_KIRIN
select DRM_KMS_HELPER
select DRM_GEM_CMA_HELPER
select DRM_KMS_CMA_HELPER
+ select HISI_KIRIN_DW_DSI
help
Choose this option if you have a hisilicon Kirin chipsets(hi6220).
If M is selected the module will be called kirin-drm.
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index fba6372d0..c3707d47c 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -487,6 +487,7 @@ static void ade_crtc_enable(struct drm_crtc *crtc)
ade_set_medianoc_qos(acrtc);
ade_display_enable(acrtc);
ade_dump_regs(ctx->base);
+ drm_crtc_vblank_on(crtc);
acrtc->enable = true;
}
@@ -498,17 +499,11 @@ static void ade_crtc_disable(struct drm_crtc *crtc)
if (!acrtc->enable)
return;
+ drm_crtc_vblank_off(crtc);
ade_power_down(ctx);
acrtc->enable = false;
}
-static int ade_crtc_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
-{
- /* do nothing */
- return 0;
-}
-
static void ade_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct ade_crtc *acrtc = to_ade_crtc(crtc);
@@ -537,6 +532,7 @@ static void ade_crtc_atomic_flush(struct drm_crtc *crtc,
{
struct ade_crtc *acrtc = to_ade_crtc(crtc);
struct ade_hw_ctx *ctx = acrtc->ctx;
+ struct drm_pending_vblank_event *event = crtc->state->event;
void __iomem *base = ctx->base;
/* only crtc is enabled regs take effect */
@@ -545,12 +541,22 @@ static void ade_crtc_atomic_flush(struct drm_crtc *crtc,
/* flush ade registers */
writel(ADE_ENABLE, base + ADE_EN);
}
+
+ if (event) {
+ crtc->state->event = NULL;
+
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (drm_crtc_vblank_get(crtc) == 0)
+ drm_crtc_arm_vblank_event(crtc, event);
+ else
+ drm_crtc_send_vblank_event(crtc, event);
+ spin_unlock_irq(&crtc->dev->event_lock);
+ }
}
static const struct drm_crtc_helper_funcs ade_crtc_helper_funcs = {
.enable = ade_crtc_enable,
.disable = ade_crtc_disable,
- .atomic_check = ade_crtc_atomic_check,
.mode_set_nofb = ade_crtc_mode_set_nofb,
.atomic_begin = ade_crtc_atomic_begin,
.atomic_flush = ade_crtc_atomic_flush,
@@ -961,21 +967,21 @@ static int ade_dts_parse(struct platform_device *pdev, struct ade_hw_ctx *ctx)
}
ctx->ade_core_clk = devm_clk_get(dev, "clk_ade_core");
- if (!ctx->ade_core_clk) {
+ if (IS_ERR(ctx->ade_core_clk)) {
DRM_ERROR("failed to parse clk ADE_CORE\n");
- return -ENODEV;
+ return PTR_ERR(ctx->ade_core_clk);
}
ctx->media_noc_clk = devm_clk_get(dev, "clk_codec_jpeg");
- if (!ctx->media_noc_clk) {
+ if (IS_ERR(ctx->media_noc_clk)) {
DRM_ERROR("failed to parse clk CODEC_JPEG\n");
- return -ENODEV;
+ return PTR_ERR(ctx->media_noc_clk);
}
ctx->ade_pix_clk = devm_clk_get(dev, "clk_ade_pix");
- if (!ctx->ade_pix_clk) {
+ if (IS_ERR(ctx->ade_pix_clk)) {
DRM_ERROR("failed to parse clk ADE_PIX\n");
- return -ENODEV;
+ return PTR_ERR(ctx->ade_pix_clk);
}
return 0;
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index 3f94785fb..1edd9bc80 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -171,9 +171,8 @@ static struct drm_driver kirin_drm_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC | DRIVER_HAVE_IRQ,
.fops = &kirin_drm_fops,
- .set_busid = drm_platform_set_busid,
- .gem_free_object = drm_gem_cma_free_object,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = kirin_gem_cma_dumb_create,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
@@ -221,19 +220,12 @@ static int kirin_drm_bind(struct device *dev)
if (ret)
goto err_kms_cleanup;
- /* connectors should be registered after drm device register */
- ret = drm_connector_register_all(drm_dev);
- if (ret)
- goto err_drm_dev_unregister;
-
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
driver->name, driver->major, driver->minor, driver->patchlevel,
driver->date, drm_dev->primary->index);
return 0;
-err_drm_dev_unregister:
- drm_dev_unregister(drm_dev);
err_kms_cleanup:
kirin_drm_kms_cleanup(drm_dev);
err_drm_dev_unref:
@@ -246,7 +238,6 @@ static void kirin_drm_unbind(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
- drm_connector_unregister_all(drm_dev);
drm_dev_unregister(drm_dev);
kirin_drm_kms_cleanup(drm_dev);
drm_dev_unref(drm_dev);
diff --git a/drivers/gpu/drm/i2c/Kconfig b/drivers/gpu/drm/i2c/Kconfig
index 22c7ed63a..4d341db46 100644
--- a/drivers/gpu/drm/i2c/Kconfig
+++ b/drivers/gpu/drm/i2c/Kconfig
@@ -1,12 +1,6 @@
menu "I2C encoder or helper chips"
depends on DRM && DRM_KMS_HELPER && I2C
-config DRM_I2C_ADV7511
- tristate "AV7511 encoder"
- select REGMAP_I2C
- help
- Support for the Analog Device ADV7511(W) and ADV7513 HDMI encoders.
-
config DRM_I2C_CH7006
tristate "Chrontel ch7006 TV encoder"
default m if DRM_NOUVEAU
diff --git a/drivers/gpu/drm/i2c/Makefile b/drivers/gpu/drm/i2c/Makefile
index 2c72eb584..43aa33bae 100644
--- a/drivers/gpu/drm/i2c/Makefile
+++ b/drivers/gpu/drm/i2c/Makefile
@@ -1,7 +1,5 @@
ccflags-y := -Iinclude/drm
-obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511.o
-
ch7006-y := ch7006_drv.o ch7006_mode.o
obj-$(CONFIG_DRM_I2C_CH7006) += ch7006.o
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index 0594c45f7..e9e8ae2ec 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -361,13 +361,8 @@ static int ch7006_encoder_set_property(struct drm_encoder *encoder,
/* Disable the crtc to ensure a full modeset is
* performed whenever it's turned on again. */
- if (crtc) {
- struct drm_mode_set modeset = {
- .crtc = crtc,
- };
-
- drm_mode_set_config_internal(&modeset);
- }
+ if (crtc)
+ drm_crtc_force_disable(crtc);
}
return 0;
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 29a32b119..7769e4691 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -57,6 +57,28 @@ config DRM_I915_USERPTR
If in doubt, say "Y".
+config DRM_I915_GVT
+ bool "Enable Intel GVT-g graphics virtualization host support"
+ depends on DRM_I915
+ default n
+ help
+ Choose this option if you want to enable Intel GVT-g graphics
+ virtualization technology host support with integrated graphics.
+ With GVT-g, it's possible to have one integrated graphics
+ device shared by multiple VMs under different hypervisors.
+
+ Note that at least one hypervisor like Xen or KVM is required for
+ this driver to work, and it only supports newer device from
+ Broadwell+. For further information and setup guide, you can
+ visit: http://01.org/igvt-g.
+
+ Now it's just a stub to support the modifications of i915 for
+ GVT device model. It requires at least one MPT modules for Xen/KVM
+ and other components of GVT device model to work. Use it under
+ you own risk.
+
+ If in doubt, say "N".
+
menu "drm/i915 Debugging"
depends on DRM_I915
depends on EXPERT
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 8f4041033..cee87bfd1 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -18,6 +18,9 @@ config DRM_I915_WERROR
config DRM_I915_DEBUG
bool "Enable additional driver debugging"
depends on DRM_I915
+ select PREEMPT_COUNT
+ select X86_MSR # used by igt/pm_rpm
+ select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
default n
help
Choose this option to turn on extra driver debugging that may affect
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 0b88ba0f3..684fc1cd0 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -10,9 +10,11 @@ subdir-ccflags-$(CONFIG_DRM_I915_WERROR) := -Werror
i915-y := i915_drv.o \
i915_irq.o \
i915_params.o \
+ i915_pci.o \
i915_suspend.o \
i915_sysfs.o \
intel_csr.o \
+ intel_device_info.o \
intel_pm.o \
intel_runtime_pm.o
@@ -37,6 +39,7 @@ i915-y += i915_cmd_parser.o \
i915_gem_userptr.o \
i915_gpu_error.o \
i915_trace_points.o \
+ intel_breadcrumbs.o \
intel_lrc.o \
intel_mocs.o \
intel_ringbuffer.o \
@@ -59,6 +62,7 @@ i915-y += intel_audio.o \
intel_bios.o \
intel_color.o \
intel_display.o \
+ intel_dpio_phy.o \
intel_dpll_mgr.o \
intel_fbc.o \
intel_fifo_underrun.o \
@@ -81,10 +85,12 @@ i915-y += dvo_ch7017.o \
dvo_tfp410.o \
intel_crt.o \
intel_ddi.o \
+ intel_dp_aux_backlight.o \
intel_dp_link_training.o \
intel_dp_mst.o \
intel_dp.o \
intel_dsi.o \
+ intel_dsi_dcs_backlight.o \
intel_dsi_panel_vbt.o \
intel_dsi_pll.o \
intel_dvo.o \
@@ -98,8 +104,10 @@ i915-y += dvo_ch7017.o \
# virtual gpu code
i915-y += i915_vgpu.o
-# legacy horrors
-i915-y += i915_dma.o
+ifeq ($(CONFIG_DRM_I915_GVT),y)
+i915-y += intel_gvt.o
+include $(src)/gvt/Makefile
+endif
obj-$(CONFIG_DRM_I915) += i915.o
diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile
new file mode 100644
index 000000000..d0f21a6ad
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/Makefile
@@ -0,0 +1,5 @@
+GVT_DIR := gvt
+GVT_SOURCE := gvt.o
+
+ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall
+i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
diff --git a/drivers/gpu/drm/i915/gvt/debug.h b/drivers/gpu/drm/i915/gvt/debug.h
new file mode 100644
index 000000000..7ef412be6
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/debug.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __GVT_DEBUG_H__
+#define __GVT_DEBUG_H__
+
+#define gvt_dbg_core(fmt, args...) \
+ DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args)
+
+/*
+ * Other GVT debug stuff will be introduced in the GVT device model patches.
+ */
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
new file mode 100644
index 000000000..927f4579f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -0,0 +1,145 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/types.h>
+#include <xen/xen.h>
+
+#include "i915_drv.h"
+
+struct intel_gvt_host intel_gvt_host;
+
+static const char * const supported_hypervisors[] = {
+ [INTEL_GVT_HYPERVISOR_XEN] = "XEN",
+ [INTEL_GVT_HYPERVISOR_KVM] = "KVM",
+};
+
+/**
+ * intel_gvt_init_host - Load MPT modules and detect if we're running in host
+ * @gvt: intel gvt device
+ *
+ * This function is called at the driver loading stage. If failed to find a
+ * loadable MPT module or detect currently we're running in a VM, then GVT-g
+ * will be disabled
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_gvt_init_host(void)
+{
+ if (intel_gvt_host.initialized)
+ return 0;
+
+ /* Xen DOM U */
+ if (xen_domain() && !xen_initial_domain())
+ return -ENODEV;
+
+ /* Try to load MPT modules for hypervisors */
+ if (xen_initial_domain()) {
+ /* In Xen dom0 */
+ intel_gvt_host.mpt = try_then_request_module(
+ symbol_get(xengt_mpt), "xengt");
+ intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_XEN;
+ } else {
+ /* not in Xen. Try KVMGT */
+ intel_gvt_host.mpt = try_then_request_module(
+ symbol_get(kvmgt_mpt), "kvm");
+ intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_KVM;
+ }
+
+ /* Fail to load MPT modules - bail out */
+ if (!intel_gvt_host.mpt)
+ return -EINVAL;
+
+ /* Try to detect if we're running in host instead of VM. */
+ if (!intel_gvt_hypervisor_detect_host())
+ return -ENODEV;
+
+ gvt_dbg_core("Running with hypervisor %s in host mode\n",
+ supported_hypervisors[intel_gvt_host.hypervisor_type]);
+
+ intel_gvt_host.initialized = true;
+ return 0;
+}
+
+static void init_device_info(struct intel_gvt *gvt)
+{
+ if (IS_BROADWELL(gvt->dev_priv))
+ gvt->device_info.max_support_vgpus = 8;
+ /* This function will grow large in GVT device model patches. */
+}
+
+/**
+ * intel_gvt_clean_device - clean a GVT device
+ * @gvt: intel gvt device
+ *
+ * This function is called at the driver unloading stage, to free the
+ * resources owned by a GVT device.
+ *
+ */
+void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
+{
+ struct intel_gvt *gvt = &dev_priv->gvt;
+
+ if (WARN_ON(!gvt->initialized))
+ return;
+
+ /* Other de-initialization of GVT components will be introduced. */
+
+ gvt->initialized = false;
+}
+
+/**
+ * intel_gvt_init_device - initialize a GVT device
+ * @dev_priv: drm i915 private data
+ *
+ * This function is called at the initialization stage, to initialize
+ * necessary GVT components.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_gvt_init_device(struct drm_i915_private *dev_priv)
+{
+ struct intel_gvt *gvt = &dev_priv->gvt;
+ /*
+ * Cannot initialize GVT device without intel_gvt_host gets
+ * initialized first.
+ */
+ if (WARN_ON(!intel_gvt_host.initialized))
+ return -EINVAL;
+
+ if (WARN_ON(gvt->initialized))
+ return -EEXIST;
+
+ gvt_dbg_core("init gvt device\n");
+
+ init_device_info(gvt);
+ /*
+ * Other initialization of GVT components will be introduce here.
+ */
+ gvt_dbg_core("gvt device creation is done\n");
+ gvt->initialized = true;
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
new file mode 100644
index 000000000..fb619a6e5
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _GVT_H_
+#define _GVT_H_
+
+#include "debug.h"
+#include "hypercall.h"
+
+#define GVT_MAX_VGPU 8
+
+enum {
+ INTEL_GVT_HYPERVISOR_XEN = 0,
+ INTEL_GVT_HYPERVISOR_KVM,
+};
+
+struct intel_gvt_host {
+ bool initialized;
+ int hypervisor_type;
+ struct intel_gvt_mpt *mpt;
+};
+
+extern struct intel_gvt_host intel_gvt_host;
+
+/* Describe per-platform limitations. */
+struct intel_gvt_device_info {
+ u32 max_support_vgpus;
+ /* This data structure will grow bigger in GVT device model patches */
+};
+
+struct intel_vgpu {
+ struct intel_gvt *gvt;
+ int id;
+ unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
+};
+
+struct intel_gvt {
+ struct mutex lock;
+ bool initialized;
+
+ struct drm_i915_private *dev_priv;
+ struct idr vgpu_idr; /* vGPU IDR pool */
+
+ struct intel_gvt_device_info device_info;
+};
+
+#include "mpt.h"
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
new file mode 100644
index 000000000..254df8bf1
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _GVT_HYPERCALL_H_
+#define _GVT_HYPERCALL_H_
+
+/*
+ * Specific GVT-g MPT modules function collections. Currently GVT-g supports
+ * both Xen and KVM by providing dedicated hypervisor-related MPT modules.
+ */
+struct intel_gvt_mpt {
+ int (*detect_host)(void);
+};
+
+extern struct intel_gvt_mpt xengt_mpt;
+extern struct intel_gvt_mpt kvmgt_mpt;
+
+#endif /* _GVT_HYPERCALL_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
new file mode 100644
index 000000000..03601e3ff
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _GVT_MPT_H_
+#define _GVT_MPT_H_
+
+/**
+ * DOC: Hypervisor Service APIs for GVT-g Core Logic
+ *
+ * This is the glue layer between specific hypervisor MPT modules and GVT-g core
+ * logic. Each kind of hypervisor MPT module provides a collection of function
+ * callbacks and will be attached to GVT host when the driver is loading.
+ * GVT-g core logic will call these APIs to request specific services from
+ * hypervisor.
+ */
+
+/**
+ * intel_gvt_hypervisor_detect_host - check if GVT-g is running within
+ * hypervisor host/privilged domain
+ *
+ * Returns:
+ * Zero on success, -ENODEV if current kernel is running inside a VM
+ */
+static inline int intel_gvt_hypervisor_detect_host(void)
+{
+ return intel_gvt_host.mpt->detect_host();
+}
+
+#endif /* _GVT_MPT_H_ */
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index a337f33be..b0fd6a7b0 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -215,7 +215,8 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
CMD( MI_RS_CONTEXT, SMI, F, 1, S ),
CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
- CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, R ),
+ CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W,
+ .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ),
CMD( MI_RS_STORE_DATA_IMM, SMI, !F, 0xFF, S ),
CMD( MI_LOAD_URB_MEM, SMI, !F, 0xFF, S ),
CMD( MI_STORE_URB_MEM, SMI, !F, 0xFF, S ),
@@ -736,7 +737,7 @@ static void fini_hash_table(struct intel_engine_cs *engine)
/**
* i915_cmd_parser_init_ring() - set cmd parser related fields for a ringbuffer
- * @ring: the ringbuffer to initialize
+ * @engine: the engine to initialize
*
* Optionally initializes fields related to batch buffer command parsing in the
* struct intel_engine_cs based on whether the platform requires software
@@ -750,12 +751,12 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
int cmd_table_count;
int ret;
- if (!IS_GEN7(engine->dev))
+ if (!IS_GEN7(engine->i915))
return 0;
switch (engine->id) {
case RCS:
- if (IS_HASWELL(engine->dev)) {
+ if (IS_HASWELL(engine->i915)) {
cmd_tables = hsw_render_ring_cmds;
cmd_table_count =
ARRAY_SIZE(hsw_render_ring_cmds);
@@ -764,7 +765,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
}
- if (IS_HASWELL(engine->dev)) {
+ if (IS_HASWELL(engine->i915)) {
engine->reg_tables = hsw_render_reg_tables;
engine->reg_table_count = ARRAY_SIZE(hsw_render_reg_tables);
} else {
@@ -780,7 +781,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break;
case BCS:
- if (IS_HASWELL(engine->dev)) {
+ if (IS_HASWELL(engine->i915)) {
cmd_tables = hsw_blt_ring_cmds;
cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
} else {
@@ -788,7 +789,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
}
- if (IS_HASWELL(engine->dev)) {
+ if (IS_HASWELL(engine->i915)) {
engine->reg_tables = hsw_blt_reg_tables;
engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables);
} else {
@@ -829,7 +830,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
/**
* i915_cmd_parser_fini_ring() - clean up cmd parser related fields
- * @ring: the ringbuffer to clean up
+ * @engine: the engine to clean up
*
* Releases any resources related to command parsing that may have been
* initialized for the specified ring.
@@ -1023,7 +1024,7 @@ unpin_src:
/**
* i915_needs_cmd_parser() - should a given ring use software command parsing?
- * @ring: the ring in question
+ * @engine: the engine in question
*
* Only certain platforms require software batch buffer command parsing, and
* only when enabled via module parameter.
@@ -1035,7 +1036,7 @@ bool i915_needs_cmd_parser(struct intel_engine_cs *engine)
if (!engine->needs_cmd_parser)
return false;
- if (!USES_PPGTT(engine->dev))
+ if (!USES_PPGTT(engine->i915))
return false;
return (i915.enable_cmd_parser == 1);
@@ -1098,6 +1099,11 @@ static bool check_cmd(const struct intel_engine_cs *engine,
return false;
}
+ if (desc->cmd.value == MI_LOAD_REGISTER_REG) {
+ DRM_DEBUG_DRIVER("CMD: Rejected LRR to OACONTROL\n");
+ return false;
+ }
+
if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
*oacontrol_set = (cmd[offset + 1] != 0);
}
@@ -1113,6 +1119,12 @@ static bool check_cmd(const struct intel_engine_cs *engine,
return false;
}
+ if (desc->cmd.value == MI_LOAD_REGISTER_REG) {
+ DRM_DEBUG_DRIVER("CMD: Rejected LRR to masked register 0x%08X\n",
+ reg_addr);
+ return false;
+ }
+
if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) &&
(offset + 2 > length ||
(cmd[offset + 1] & reg->mask) != reg->value)) {
@@ -1164,7 +1176,7 @@ static bool check_cmd(const struct intel_engine_cs *engine,
/**
* i915_parse_cmds() - parse a submitted batch buffer for privilege violations
- * @ring: the ring on which the batch is to execute
+ * @engine: the engine on which the batch is to execute
* @batch_obj: the batch buffer in question
* @shadow_batch_obj: copy of the batch buffer in question
* @batch_start_offset: byte offset in the batch at which execution starts
@@ -1269,14 +1281,28 @@ int i915_parse_cmds(struct intel_engine_cs *engine,
/**
* i915_cmd_parser_get_version() - get the cmd parser version number
+ * @dev_priv: i915 device private
*
* The cmd parser maintains a simple increasing integer version number suitable
* for passing to userspace clients to determine what operations are permitted.
*
* Return: the current version number of the cmd parser
*/
-int i915_cmd_parser_get_version(void)
+int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
{
+ struct intel_engine_cs *engine;
+ bool active = false;
+
+ /* If the command parser is not enabled, report 0 - unsupported */
+ for_each_engine(engine, dev_priv) {
+ if (i915_needs_cmd_parser(engine)) {
+ active = true;
+ break;
+ }
+ }
+ if (!active)
+ return 0;
+
/*
* Command parser version history
*
@@ -1288,6 +1314,7 @@ int i915_cmd_parser_get_version(void)
* 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3.
* 5. GPGPU dispatch compute indirect registers.
* 6. TIMESTAMP register and Haswell CS GPR registers
+ * 7. Allow MI_LOAD_REGISTER_REG between whitelisted registers.
*/
- return 6;
+ return 7;
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 2a6e12956..844fea795 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -89,17 +89,17 @@ static int i915_capabilities(struct seq_file *m, void *data)
return 0;
}
-static const char get_active_flag(struct drm_i915_gem_object *obj)
+static char get_active_flag(struct drm_i915_gem_object *obj)
{
return obj->active ? '*' : ' ';
}
-static const char get_pin_flag(struct drm_i915_gem_object *obj)
+static char get_pin_flag(struct drm_i915_gem_object *obj)
{
return obj->pin_display ? 'p' : ' ';
}
-static const char get_tiling_flag(struct drm_i915_gem_object *obj)
+static char get_tiling_flag(struct drm_i915_gem_object *obj)
{
switch (obj->tiling_mode) {
default:
@@ -109,12 +109,12 @@ static const char get_tiling_flag(struct drm_i915_gem_object *obj)
}
}
-static inline const char get_global_flag(struct drm_i915_gem_object *obj)
+static char get_global_flag(struct drm_i915_gem_object *obj)
{
return i915_gem_obj_to_ggtt(obj) ? 'g' : ' ';
}
-static inline const char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
+static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
{
return obj->mapping ? 'M' : ' ';
}
@@ -199,13 +199,6 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
}
-static void describe_ctx(struct seq_file *m, struct intel_context *ctx)
-{
- seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i');
- seq_putc(m, ctx->remap_slice ? 'R' : 'r');
- seq_putc(m, ' ');
-}
-
static int i915_gem_object_list_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
@@ -272,7 +265,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
u64 total_obj_size, total_gtt_size;
LIST_HEAD(stolen);
@@ -424,6 +417,42 @@ static void print_batch_pool_stats(struct seq_file *m,
print_file_stats(m, "[k]batch pool", stats);
}
+static int per_file_ctx_stats(int id, void *ptr, void *data)
+{
+ struct i915_gem_context *ctx = ptr;
+ int n;
+
+ for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) {
+ if (ctx->engine[n].state)
+ per_file_stats(0, ctx->engine[n].state, data);
+ if (ctx->engine[n].ringbuf)
+ per_file_stats(0, ctx->engine[n].ringbuf->obj, data);
+ }
+
+ return 0;
+}
+
+static void print_context_stats(struct seq_file *m,
+ struct drm_i915_private *dev_priv)
+{
+ struct file_stats stats;
+ struct drm_file *file;
+
+ memset(&stats, 0, sizeof(stats));
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ if (dev_priv->kernel_context)
+ per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
+
+ list_for_each_entry(file, &dev_priv->drm.filelist, lhead) {
+ struct drm_i915_file_private *fpriv = file->driver_priv;
+ idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
+ }
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ print_file_stats(m, "[k]contexts", stats);
+}
+
#define count_vmas(list, member) do { \
list_for_each_entry(vma, list, member) { \
size += i915_gem_obj_total_ggtt_size(vma->obj); \
@@ -528,10 +557,10 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
seq_putc(m, '\n');
print_batch_pool_stats(m, dev_priv);
-
mutex_unlock(&dev->struct_mutex);
mutex_lock(&dev->filelist_mutex);
+ print_context_stats(m, dev_priv);
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct file_stats stats;
struct task_struct *task;
@@ -562,7 +591,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
uintptr_t list = (uintptr_t) node->info_ent->data;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
u64 total_obj_size, total_gtt_size;
int count, ret;
@@ -596,7 +625,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc;
int ret;
@@ -607,18 +636,20 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
for_each_intel_crtc(dev, crtc) {
const char pipe = pipe_name(crtc->pipe);
const char plane = plane_name(crtc->plane);
- struct intel_unpin_work *work;
+ struct intel_flip_work *work;
spin_lock_irq(&dev->event_lock);
- work = crtc->unpin_work;
+ work = crtc->flip_work;
if (work == NULL) {
seq_printf(m, "No flip due on pipe %c (plane %c)\n",
pipe, plane);
} else {
+ u32 pending;
u32 addr;
- if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
- seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
+ pending = atomic_read(&work->pending);
+ if (pending) {
+ seq_printf(m, "Flip ioctl preparing on pipe %c (plane %c)\n",
pipe, plane);
} else {
seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
@@ -631,18 +662,14 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
engine->name,
i915_gem_request_get_seqno(work->flip_queued_req),
dev_priv->next_seqno,
- engine->get_seqno(engine),
- i915_gem_request_completed(work->flip_queued_req, true));
+ intel_engine_get_seqno(engine),
+ i915_gem_request_completed(work->flip_queued_req));
} else
seq_printf(m, "Flip not associated with any ring\n");
seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
work->flip_queued_vblank,
work->flip_ready_vblank,
- drm_crtc_vblank_count(&crtc->base));
- if (work->enable_stall_check)
- seq_puts(m, "Stall check enabled, ");
- else
- seq_puts(m, "Stall check waiting for page flip ioctl, ");
+ intel_crtc_get_vblank_counter(crtc));
seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
if (INTEL_INFO(dev)->gen >= 4)
@@ -668,7 +695,7 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
struct intel_engine_cs *engine;
int total = 0;
@@ -713,7 +740,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
struct drm_i915_gem_request *req;
int ret, any;
@@ -761,17 +788,29 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
static void i915_ring_seqno_info(struct seq_file *m,
struct intel_engine_cs *engine)
{
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ struct rb_node *rb;
+
seq_printf(m, "Current sequence (%s): %x\n",
- engine->name, engine->get_seqno(engine));
- seq_printf(m, "Current user interrupts (%s): %x\n",
- engine->name, READ_ONCE(engine->user_interrupts));
+ engine->name, intel_engine_get_seqno(engine));
+ seq_printf(m, "Current user interrupts (%s): %lx\n",
+ engine->name, READ_ONCE(engine->breadcrumbs.irq_wakeups));
+
+ spin_lock(&b->lock);
+ for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
+ struct intel_wait *w = container_of(rb, typeof(*w), node);
+
+ seq_printf(m, "Waiting (%s): %s [%d] on %x\n",
+ engine->name, w->tsk->comm, w->tsk->pid, w->seqno);
+ }
+ spin_unlock(&b->lock);
}
static int i915_gem_seqno_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
int ret;
@@ -794,7 +833,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
int ret, i, pipe;
@@ -985,7 +1024,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int i, ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1013,7 +1052,7 @@ static int i915_hws_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
const u32 *hws;
int i;
@@ -1124,7 +1163,7 @@ static int
i915_next_seqno_get(void *data, u64 *val)
{
struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1161,7 +1200,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret = 0;
intel_runtime_pm_get(dev_priv);
@@ -1281,6 +1320,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
}
seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
+ seq_printf(m, "pm_intr_keep: 0x%08x\n", dev_priv->rps.pm_intr_keep);
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
seq_printf(m, "Render p-state ratio: %d\n",
(gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8);
@@ -1363,7 +1403,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
u64 acthd[I915_NUM_ENGINES];
u32 seqno[I915_NUM_ENGINES];
@@ -1380,10 +1420,10 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
for_each_engine_id(engine, dev_priv, id) {
acthd[id] = intel_ring_get_active_head(engine);
- seqno[id] = engine->get_seqno(engine);
+ seqno[id] = intel_engine_get_seqno(engine);
}
- i915_get_extra_instdone(dev, instdone);
+ i915_get_extra_instdone(dev_priv, instdone);
intel_runtime_pm_put(dev_priv);
@@ -1400,9 +1440,11 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
engine->hangcheck.seqno,
seqno[id],
engine->last_submitted_seqno);
- seq_printf(m, "\tuser interrupts = %x [current %x]\n",
+ seq_printf(m, "\twaiters? %d\n",
+ intel_engine_has_waiter(engine));
+ seq_printf(m, "\tuser interrupts = %lx [current %lx]\n",
engine->hangcheck.user_interrupts,
- READ_ONCE(engine->user_interrupts));
+ READ_ONCE(engine->breadcrumbs.irq_wakeups));
seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
(long long)engine->hangcheck.acthd,
(long long)acthd[id]);
@@ -1432,7 +1474,7 @@ static int ironlake_drpc_info(struct seq_file *m)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 rgvmodectl, rstdbyctl;
u16 crstandvid;
int ret;
@@ -1500,7 +1542,7 @@ static int i915_forcewake_domains(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_uncore_forcewake_domain *fw_domain;
spin_lock_irq(&dev_priv->uncore.lock);
@@ -1518,7 +1560,7 @@ static int vlv_drpc_info(struct seq_file *m)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 rpmodectl1, rcctl1, pw_status;
intel_runtime_pm_get(dev_priv);
@@ -1558,7 +1600,7 @@ static int gen6_drpc_info(struct seq_file *m)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
unsigned forcewake_count;
int count = 0, ret;
@@ -1670,7 +1712,7 @@ static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
seq_printf(m, "FB tracking busy bits: 0x%08x\n",
dev_priv->fb_tracking.busy_bits);
@@ -1685,7 +1727,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (!HAS_FBC(dev)) {
seq_puts(m, "FBC unsupported on this chipset\n");
@@ -1715,7 +1757,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
static int i915_fbc_fc_get(void *data, u64 *val)
{
struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
return -ENODEV;
@@ -1728,7 +1770,7 @@ static int i915_fbc_fc_get(void *data, u64 *val)
static int i915_fbc_fc_set(void *data, u64 val)
{
struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 reg;
if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
@@ -1755,7 +1797,7 @@ static int i915_ips_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (!HAS_IPS(dev)) {
seq_puts(m, "not supported\n");
@@ -1785,7 +1827,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
bool sr_enabled = false;
intel_runtime_pm_get(dev_priv);
@@ -1814,7 +1856,7 @@ static int i915_emon_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long temp, chipset, gfx;
int ret;
@@ -1842,7 +1884,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret = 0;
int gpu_freq, ia_freq;
unsigned int max_gpu_freq, min_gpu_freq;
@@ -1897,7 +1939,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_opregion *opregion = &dev_priv->opregion;
int ret;
@@ -1918,7 +1960,7 @@ static int i915_vbt(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_opregion *opregion = &dev_priv->opregion;
if (opregion->vbt)
@@ -1940,19 +1982,19 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
return ret;
#ifdef CONFIG_DRM_FBDEV_EMULATION
- if (to_i915(dev)->fbdev) {
- fbdev_fb = to_intel_framebuffer(to_i915(dev)->fbdev->helper.fb);
-
- seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
- fbdev_fb->base.width,
- fbdev_fb->base.height,
- fbdev_fb->base.depth,
- fbdev_fb->base.bits_per_pixel,
- fbdev_fb->base.modifier[0],
- drm_framebuffer_read_refcount(&fbdev_fb->base));
- describe_obj(m, fbdev_fb->obj);
- seq_putc(m, '\n');
- }
+ if (to_i915(dev)->fbdev) {
+ fbdev_fb = to_intel_framebuffer(to_i915(dev)->fbdev->helper.fb);
+
+ seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
+ fbdev_fb->base.width,
+ fbdev_fb->base.height,
+ fbdev_fb->base.depth,
+ fbdev_fb->base.bits_per_pixel,
+ fbdev_fb->base.modifier[0],
+ drm_framebuffer_read_refcount(&fbdev_fb->base));
+ describe_obj(m, fbdev_fb->obj);
+ seq_putc(m, '\n');
+ }
#endif
mutex_lock(&dev->mode_config.fb_lock);
@@ -1989,10 +2031,9 @@ static int i915_context_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
- struct intel_context *ctx;
- enum intel_engine_id id;
+ struct i915_gem_context *ctx;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -2000,32 +2041,36 @@ static int i915_context_status(struct seq_file *m, void *unused)
return ret;
list_for_each_entry(ctx, &dev_priv->context_list, link) {
- if (!i915.enable_execlists &&
- ctx->legacy_hw_ctx.rcs_state == NULL)
- continue;
-
- seq_puts(m, "HW context ");
- describe_ctx(m, ctx);
- if (ctx == dev_priv->kernel_context)
- seq_printf(m, "(kernel context) ");
+ seq_printf(m, "HW context %u ", ctx->hw_id);
+ if (IS_ERR(ctx->file_priv)) {
+ seq_puts(m, "(deleted) ");
+ } else if (ctx->file_priv) {
+ struct pid *pid = ctx->file_priv->file->pid;
+ struct task_struct *task;
- if (i915.enable_execlists) {
- seq_putc(m, '\n');
- for_each_engine_id(engine, dev_priv, id) {
- struct drm_i915_gem_object *ctx_obj =
- ctx->engine[id].state;
- struct intel_ringbuffer *ringbuf =
- ctx->engine[id].ringbuf;
-
- seq_printf(m, "%s: ", engine->name);
- if (ctx_obj)
- describe_obj(m, ctx_obj);
- if (ringbuf)
- describe_ctx_ringbuf(m, ringbuf);
- seq_putc(m, '\n');
+ task = get_pid_task(pid, PIDTYPE_PID);
+ if (task) {
+ seq_printf(m, "(%s [%d]) ",
+ task->comm, task->pid);
+ put_task_struct(task);
}
} else {
- describe_obj(m, ctx->legacy_hw_ctx.rcs_state);
+ seq_puts(m, "(kernel) ");
+ }
+
+ seq_putc(m, ctx->remap_slice ? 'R' : 'r');
+ seq_putc(m, '\n');
+
+ for_each_engine(engine, dev_priv) {
+ struct intel_context *ce = &ctx->engine[engine->id];
+
+ seq_printf(m, "%s: ", engine->name);
+ seq_putc(m, ce->initialised ? 'I' : 'i');
+ if (ce->state)
+ describe_obj(m, ce->state);
+ if (ce->ringbuf)
+ describe_ctx_ringbuf(m, ce->ringbuf);
+ seq_putc(m, '\n');
}
seq_putc(m, '\n');
@@ -2037,24 +2082,22 @@ static int i915_context_status(struct seq_file *m, void *unused)
}
static void i915_dump_lrc_obj(struct seq_file *m,
- struct intel_context *ctx,
+ struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
+ struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
struct page *page;
uint32_t *reg_state;
int j;
- struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
unsigned long ggtt_offset = 0;
+ seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id);
+
if (ctx_obj == NULL) {
- seq_printf(m, "Context on %s with no gem object\n",
- engine->name);
+ seq_puts(m, "\tNot allocated\n");
return;
}
- seq_printf(m, "CONTEXT: %s %u\n", engine->name,
- intel_execlists_ctx_id(ctx, engine));
-
if (!i915_gem_obj_ggtt_bound(ctx_obj))
seq_puts(m, "\tNot bound in GGTT\n");
else
@@ -2085,9 +2128,9 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
- struct intel_context *ctx;
+ struct i915_gem_context *ctx;
int ret;
if (!i915.enable_execlists) {
@@ -2100,10 +2143,8 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
return ret;
list_for_each_entry(ctx, &dev_priv->context_list, link)
- if (ctx != dev_priv->kernel_context) {
- for_each_engine(engine, dev_priv)
- i915_dump_lrc_obj(m, ctx, engine);
- }
+ for_each_engine(engine, dev_priv)
+ i915_dump_lrc_obj(m, ctx, engine);
mutex_unlock(&dev->struct_mutex);
@@ -2114,7 +2155,7 @@ static int i915_execlists(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
u32 status_pointer;
u8 read_pointer;
@@ -2174,8 +2215,8 @@ static int i915_execlists(struct seq_file *m, void *data)
seq_printf(m, "\t%d requests in queue\n", count);
if (head_req) {
- seq_printf(m, "\tHead request id: %u\n",
- intel_execlists_ctx_id(head_req->ctx, engine));
+ seq_printf(m, "\tHead request context: %u\n",
+ head_req->ctx->hw_id);
seq_printf(m, "\tHead request tail: %u\n",
head_req->tail);
}
@@ -2217,7 +2258,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -2269,7 +2310,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
static int per_file_ctx(int id, void *ptr, void *data)
{
- struct intel_context *ctx = ptr;
+ struct i915_gem_context *ctx = ptr;
struct seq_file *m = data;
struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
@@ -2290,7 +2331,7 @@ static int per_file_ctx(int id, void *ptr, void *data)
static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
int i;
@@ -2311,15 +2352,15 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
- if (INTEL_INFO(dev)->gen == 6)
+ if (IS_GEN6(dev_priv))
seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
for_each_engine(engine, dev_priv) {
seq_printf(m, "%s\n", engine->name);
- if (INTEL_INFO(dev)->gen == 7)
+ if (IS_GEN7(dev_priv))
seq_printf(m, "GFX_MODE: 0x%08x\n",
I915_READ(RING_MODE_GEN7(engine)));
seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
@@ -2345,7 +2386,7 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_file *file;
int ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -2388,7 +2429,7 @@ static int count_irq_waiters(struct drm_i915_private *i915)
int count = 0;
for_each_engine(engine, i915)
- count += engine->irq_refcount;
+ count += intel_engine_has_waiter(engine);
return count;
}
@@ -2397,11 +2438,12 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_file *file;
seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
- seq_printf(m, "GPU busy? %d\n", dev_priv->mm.busy);
+ seq_printf(m, "GPU busy? %s [%x]\n",
+ yesno(dev_priv->gt.awake), dev_priv->gt.active_engines);
seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
seq_printf(m, "Frequency requested %d; min hard:%d, soft:%d; max soft:%d, hard:%d\n",
intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
@@ -2442,7 +2484,7 @@ static int i915_llc(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
const bool edram = INTEL_GEN(dev_priv) > 8;
seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
@@ -2455,7 +2497,7 @@ static int i915_llc(struct seq_file *m, void *data)
static int i915_guc_load_status_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
- struct drm_i915_private *dev_priv = node->minor->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(node->minor->dev);
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
u32 tmp, i;
@@ -2510,15 +2552,16 @@ static void i915_guc_client_info(struct seq_file *m,
seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n",
client->wq_size, client->wq_offset, client->wq_tail);
+ seq_printf(m, "\tWork queue full: %u\n", client->no_wq_space);
seq_printf(m, "\tFailed to queue: %u\n", client->q_fail);
seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
seq_printf(m, "\tLast submission result: %d\n", client->retcode);
for_each_engine(engine, dev_priv) {
seq_printf(m, "\tSubmissions: %llu %s\n",
- client->submissions[engine->guc_id],
+ client->submissions[engine->id],
engine->name);
- tot += client->submissions[engine->guc_id];
+ tot += client->submissions[engine->id];
}
seq_printf(m, "\tTotal: %llu\n", tot);
}
@@ -2527,7 +2570,7 @@ static int i915_guc_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_guc guc;
struct i915_guc_client client = {};
struct intel_engine_cs *engine;
@@ -2546,6 +2589,10 @@ static int i915_guc_info(struct seq_file *m, void *data)
mutex_unlock(&dev->struct_mutex);
+ seq_printf(m, "Doorbell map:\n");
+ seq_printf(m, "\t%*pb\n", GUC_MAX_DOORBELLS, guc.doorbell_bitmap);
+ seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc.db_cacheline);
+
seq_printf(m, "GuC total action count: %llu\n", guc.action_count);
seq_printf(m, "GuC action failure count: %u\n", guc.action_fail);
seq_printf(m, "GuC last action command: 0x%x\n", guc.action_cmd);
@@ -2555,9 +2602,9 @@ static int i915_guc_info(struct seq_file *m, void *data)
seq_printf(m, "\nGuC submissions:\n");
for_each_engine(engine, dev_priv) {
seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
- engine->name, guc.submissions[engine->guc_id],
- guc.last_seqno[engine->guc_id]);
- total += guc.submissions[engine->guc_id];
+ engine->name, guc.submissions[engine->id],
+ guc.last_seqno[engine->id]);
+ total += guc.submissions[engine->id];
}
seq_printf(m, "\t%s: %llu\n", "Total", total);
@@ -2573,7 +2620,7 @@ static int i915_guc_log_dump(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *log_obj = dev_priv->guc.log_obj;
u32 *log;
int i = 0, pg;
@@ -2601,7 +2648,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 psrperf = 0;
u32 stat[3];
enum pipe pipe;
@@ -2669,7 +2716,6 @@ static int i915_sink_crc(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct intel_encoder *encoder;
struct intel_connector *connector;
struct intel_dp *intel_dp = NULL;
int ret;
@@ -2677,18 +2723,19 @@ static int i915_sink_crc(struct seq_file *m, void *data)
drm_modeset_lock_all(dev);
for_each_intel_connector(dev, connector) {
+ struct drm_crtc *crtc;
- if (connector->base.dpms != DRM_MODE_DPMS_ON)
+ if (!connector->base.state->best_encoder)
continue;
- if (!connector->base.encoder)
+ crtc = connector->base.state->crtc;
+ if (!crtc->state->active)
continue;
- encoder = to_intel_encoder(connector->base.encoder);
- if (encoder->type != INTEL_OUTPUT_EDP)
+ if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
continue;
- intel_dp = enc_to_intel_dp(&encoder->base);
+ intel_dp = enc_to_intel_dp(connector->base.state->best_encoder);
ret = intel_dp_sink_crc(intel_dp, crc);
if (ret)
@@ -2709,7 +2756,7 @@ static int i915_energy_uJ(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u64 power;
u32 units;
@@ -2735,12 +2782,12 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (!HAS_RUNTIME_PM(dev_priv))
seq_puts(m, "Runtime power management not supported\n");
- seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
+ seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
seq_printf(m, "IRQs disabled: %s\n",
yesno(!intel_irqs_enabled(dev_priv)));
#ifdef CONFIG_PM
@@ -2750,8 +2797,8 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
#endif
seq_printf(m, "PCI device power state: %s [%d]\n",
- pci_power_name(dev_priv->dev->pdev->current_state),
- dev_priv->dev->pdev->current_state);
+ pci_power_name(dev_priv->drm.pdev->current_state),
+ dev_priv->drm.pdev->current_state);
return 0;
}
@@ -2760,7 +2807,7 @@ static int i915_power_domain_info(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_power_domains *power_domains = &dev_priv->power_domains;
int i;
@@ -2795,7 +2842,7 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_csr *csr;
if (!HAS_CSR(dev)) {
@@ -2918,7 +2965,7 @@ static void intel_dp_info(struct seq_file *m,
seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
- if (intel_encoder->type == INTEL_OUTPUT_EDP)
+ if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
intel_panel_info(m, &intel_connector->panel);
}
@@ -2957,14 +3004,26 @@ static void intel_connector_info(struct seq_file *m,
seq_printf(m, "\tCEA rev: %d\n",
connector->display_info.cea_rev);
}
- if (intel_encoder) {
- if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
- intel_encoder->type == INTEL_OUTPUT_EDP)
- intel_dp_info(m, intel_connector);
- else if (intel_encoder->type == INTEL_OUTPUT_HDMI)
- intel_hdmi_info(m, intel_connector);
- else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
+
+ if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
+ return;
+
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ case DRM_MODE_CONNECTOR_eDP:
+ intel_dp_info(m, intel_connector);
+ break;
+ case DRM_MODE_CONNECTOR_LVDS:
+ if (intel_encoder->type == INTEL_OUTPUT_LVDS)
intel_lvds_info(m, intel_connector);
+ break;
+ case DRM_MODE_CONNECTOR_HDMIA:
+ if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
+ intel_encoder->type == INTEL_OUTPUT_UNKNOWN)
+ intel_hdmi_info(m, intel_connector);
+ break;
+ default:
+ break;
}
seq_printf(m, "\tmodes:\n");
@@ -2974,7 +3033,7 @@ static void intel_connector_info(struct seq_file *m,
static bool cursor_active(struct drm_device *dev, int pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 state;
if (IS_845G(dev) || IS_I865G(dev))
@@ -2987,7 +3046,7 @@ static bool cursor_active(struct drm_device *dev, int pipe)
static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 pos;
pos = I915_READ(CURPOS(pipe));
@@ -3108,7 +3167,7 @@ static int i915_display_info(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc;
struct drm_connector *connector;
@@ -3163,13 +3222,13 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
int num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
enum intel_engine_id id;
int j, ret;
- if (!i915_semaphore_is_enabled(dev)) {
+ if (!i915_semaphore_is_enabled(dev_priv)) {
seq_puts(m, "Semaphores are disabled\n");
return 0;
}
@@ -3236,7 +3295,7 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int i;
drm_modeset_lock_all(dev);
@@ -3266,7 +3325,7 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
struct intel_engine_cs *engine;
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_workarounds *workarounds = &dev_priv->workarounds;
enum intel_engine_id id;
@@ -3304,7 +3363,7 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct skl_ddb_allocation *ddb;
struct skl_ddb_entry *entry;
enum pipe pipe;
@@ -3342,31 +3401,16 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
static void drrs_status_per_crtc(struct seq_file *m,
struct drm_device *dev, struct intel_crtc *intel_crtc)
{
- struct intel_encoder *intel_encoder;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_drrs *drrs = &dev_priv->drrs;
int vrefresh = 0;
+ struct drm_connector *connector;
- for_each_encoder_on_crtc(dev, &intel_crtc->base, intel_encoder) {
- /* Encoder connected on this CRTC */
- switch (intel_encoder->type) {
- case INTEL_OUTPUT_EDP:
- seq_puts(m, "eDP:\n");
- break;
- case INTEL_OUTPUT_DSI:
- seq_puts(m, "DSI:\n");
- break;
- case INTEL_OUTPUT_HDMI:
- seq_puts(m, "HDMI:\n");
- break;
- case INTEL_OUTPUT_DISPLAYPORT:
- seq_puts(m, "DP:\n");
- break;
- default:
- seq_printf(m, "Other encoder (id=%d).\n",
- intel_encoder->type);
- return;
- }
+ drm_for_each_connector(connector, dev) {
+ if (connector->state->crtc != &intel_crtc->base)
+ continue;
+
+ seq_printf(m, "%s:\n", connector->name);
}
if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
@@ -3429,18 +3473,16 @@ static int i915_drrs_status(struct seq_file *m, void *unused)
struct intel_crtc *intel_crtc;
int active_crtc_cnt = 0;
+ drm_modeset_lock_all(dev);
for_each_intel_crtc(dev, intel_crtc) {
- drm_modeset_lock(&intel_crtc->base.mutex, NULL);
-
if (intel_crtc->base.state->active) {
active_crtc_cnt++;
seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
drrs_status_per_crtc(m, dev, intel_crtc);
}
-
- drm_modeset_unlock(&intel_crtc->base.mutex);
}
+ drm_modeset_unlock_all(dev);
if (!active_crtc_cnt)
seq_puts(m, "No active crtc found\n");
@@ -3458,17 +3500,23 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_encoder *encoder;
struct intel_encoder *intel_encoder;
struct intel_digital_port *intel_dig_port;
+ struct drm_connector *connector;
+
drm_modeset_lock_all(dev);
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- intel_encoder = to_intel_encoder(encoder);
- if (intel_encoder->type != INTEL_OUTPUT_DISPLAYPORT)
+ drm_for_each_connector(connector, dev) {
+ if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
continue;
- intel_dig_port = enc_to_dig_port(encoder);
+
+ intel_encoder = intel_attached_encoder(connector);
+ if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ intel_dig_port = enc_to_dig_port(&intel_encoder->base);
if (!intel_dig_port->dp.can_mst)
continue;
+
seq_printf(m, "MST Source Port %c\n",
port_name(intel_dig_port->port));
drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
@@ -3480,7 +3528,7 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
{
struct pipe_crc_info *info = inode->i_private;
- struct drm_i915_private *dev_priv = info->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(info->dev);
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
if (info->pipe >= INTEL_INFO(info->dev)->num_pipes)
@@ -3504,7 +3552,7 @@ static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
{
struct pipe_crc_info *info = inode->i_private;
- struct drm_i915_private *dev_priv = info->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(info->dev);
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
spin_lock_irq(&pipe_crc->lock);
@@ -3532,7 +3580,7 @@ i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
{
struct pipe_crc_info *info = filep->private_data;
struct drm_device *dev = info->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
char buf[PIPE_CRC_BUFFER_LEN];
int n_entries;
@@ -3665,7 +3713,7 @@ static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
static int display_crc_ctl_show(struct seq_file *m, void *data)
{
struct drm_device *dev = m->private;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int i;
for (i = 0; i < I915_MAX_PIPES; i++)
@@ -3726,7 +3774,7 @@ static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
case INTEL_OUTPUT_TVOUT:
*source = INTEL_PIPE_CRC_SOURCE_TV;
break;
- case INTEL_OUTPUT_DISPLAYPORT:
+ case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_EDP:
dig_port = enc_to_dig_port(&encoder->base);
switch (dig_port->port) {
@@ -3759,7 +3807,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
enum intel_pipe_crc_source *source,
uint32_t *val)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
bool need_stable_symbols = false;
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
@@ -3830,7 +3878,7 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
enum intel_pipe_crc_source *source,
uint32_t *val)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
bool need_stable_symbols = false;
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
@@ -3904,7 +3952,7 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
enum pipe pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t tmp = I915_READ(PORT_DFT2_G4X);
switch (pipe) {
@@ -3929,7 +3977,7 @@ static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
static void g4x_undo_pipe_scramble_reset(struct drm_device *dev,
enum pipe pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t tmp = I915_READ(PORT_DFT2_G4X);
if (pipe == PIPE_A)
@@ -3972,7 +4020,7 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev, bool enable)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
struct intel_crtc_state *pipe_config;
@@ -4040,7 +4088,7 @@ static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
enum intel_pipe_crc_source source)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev,
pipe));
@@ -4547,7 +4595,7 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
static int pri_wm_latency_show(struct seq_file *m, void *data)
{
struct drm_device *dev = m->private;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
const uint16_t *latencies;
if (INTEL_INFO(dev)->gen >= 9)
@@ -4563,7 +4611,7 @@ static int pri_wm_latency_show(struct seq_file *m, void *data)
static int spr_wm_latency_show(struct seq_file *m, void *data)
{
struct drm_device *dev = m->private;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
const uint16_t *latencies;
if (INTEL_INFO(dev)->gen >= 9)
@@ -4579,7 +4627,7 @@ static int spr_wm_latency_show(struct seq_file *m, void *data)
static int cur_wm_latency_show(struct seq_file *m, void *data)
{
struct drm_device *dev = m->private;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
const uint16_t *latencies;
if (INTEL_INFO(dev)->gen >= 9)
@@ -4670,7 +4718,7 @@ static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
{
struct seq_file *m = file->private_data;
struct drm_device *dev = m->private;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint16_t *latencies;
if (INTEL_INFO(dev)->gen >= 9)
@@ -4686,7 +4734,7 @@ static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
{
struct seq_file *m = file->private_data;
struct drm_device *dev = m->private;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint16_t *latencies;
if (INTEL_INFO(dev)->gen >= 9)
@@ -4702,7 +4750,7 @@ static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
{
struct seq_file *m = file->private_data;
struct drm_device *dev = m->private;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint16_t *latencies;
if (INTEL_INFO(dev)->gen >= 9)
@@ -4744,7 +4792,7 @@ static int
i915_wedged_get(void *data, u64 *val)
{
struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
*val = i915_terminally_wedged(&dev_priv->gpu_error);
@@ -4755,7 +4803,7 @@ static int
i915_wedged_set(void *data, u64 val)
{
struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
/*
* There is no safeguard against this debugfs entry colliding
@@ -4770,7 +4818,7 @@ i915_wedged_set(void *data, u64 val)
intel_runtime_pm_get(dev_priv);
- i915_handle_error(dev, val,
+ i915_handle_error(dev_priv, val,
"Manually setting wedged to %llu", val);
intel_runtime_pm_put(dev_priv);
@@ -4783,44 +4831,10 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
"%llu\n");
static int
-i915_ring_stop_get(void *data, u64 *val)
-{
- struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- *val = dev_priv->gpu_error.stop_rings;
-
- return 0;
-}
-
-static int
-i915_ring_stop_set(void *data, u64 val)
-{
- struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
-
- DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val);
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- dev_priv->gpu_error.stop_rings = val;
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
- i915_ring_stop_get, i915_ring_stop_set,
- "0x%08llx\n");
-
-static int
i915_ring_missed_irq_get(void *data, u64 *val)
{
struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
*val = dev_priv->gpu_error.missed_irq_rings;
return 0;
@@ -4830,7 +4844,7 @@ static int
i915_ring_missed_irq_set(void *data, u64 val)
{
struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
/* Lock against concurrent debugfs callers */
@@ -4851,7 +4865,7 @@ static int
i915_ring_test_irq_get(void *data, u64 *val)
{
struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
*val = dev_priv->gpu_error.test_irq_rings;
@@ -4862,18 +4876,11 @@ static int
i915_ring_test_irq_set(void *data, u64 val)
{
struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ val &= INTEL_INFO(dev_priv)->ring_mask;
DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
-
- /* Lock against concurrent debugfs callers */
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
dev_priv->gpu_error.test_irq_rings = val;
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -4902,7 +4909,7 @@ static int
i915_drop_caches_set(void *data, u64 val)
{
struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
@@ -4914,13 +4921,13 @@ i915_drop_caches_set(void *data, u64 val)
return ret;
if (val & DROP_ACTIVE) {
- ret = i915_gpu_idle(dev);
+ ret = i915_gem_wait_for_idle(dev_priv);
if (ret)
goto unlock;
}
if (val & (DROP_RETIRE | DROP_ACTIVE))
- i915_gem_retire_requests(dev);
+ i915_gem_retire_requests(dev_priv);
if (val & DROP_BOUND)
i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
@@ -4942,7 +4949,7 @@ static int
i915_max_freq_get(void *data, u64 *val)
{
struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
if (INTEL_INFO(dev)->gen < 6)
@@ -4964,7 +4971,7 @@ static int
i915_max_freq_set(void *data, u64 val)
{
struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 hw_max, hw_min;
int ret;
@@ -4994,7 +5001,7 @@ i915_max_freq_set(void *data, u64 val)
dev_priv->rps.max_freq_softlimit = val;
- intel_set_rps(dev, val);
+ intel_set_rps(dev_priv, val);
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -5009,7 +5016,7 @@ static int
i915_min_freq_get(void *data, u64 *val)
{
struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
if (INTEL_INFO(dev)->gen < 6)
@@ -5031,7 +5038,7 @@ static int
i915_min_freq_set(void *data, u64 val)
{
struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 hw_max, hw_min;
int ret;
@@ -5061,7 +5068,7 @@ i915_min_freq_set(void *data, u64 val)
dev_priv->rps.min_freq_softlimit = val;
- intel_set_rps(dev, val);
+ intel_set_rps(dev_priv, val);
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -5076,7 +5083,7 @@ static int
i915_cache_sharing_get(void *data, u64 *val)
{
struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 snpcr;
int ret;
@@ -5091,7 +5098,7 @@ i915_cache_sharing_get(void *data, u64 *val)
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev_priv->dev->struct_mutex);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
@@ -5102,7 +5109,7 @@ static int
i915_cache_sharing_set(void *data, u64 val)
{
struct drm_device *dev = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 snpcr;
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
@@ -5139,7 +5146,7 @@ struct sseu_dev_status {
static void cherryview_sseu_device_status(struct drm_device *dev,
struct sseu_dev_status *stat)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ss_max = 2;
int ss;
u32 sig1[ss_max], sig2[ss_max];
@@ -5171,7 +5178,7 @@ static void cherryview_sseu_device_status(struct drm_device *dev,
static void gen9_sseu_device_status(struct drm_device *dev,
struct sseu_dev_status *stat)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int s_max = 3, ss_max = 4;
int s, ss;
u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];
@@ -5236,7 +5243,7 @@ static void gen9_sseu_device_status(struct drm_device *dev,
static void broadwell_sseu_device_status(struct drm_device *dev,
struct sseu_dev_status *stat)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int s;
u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
@@ -5278,6 +5285,10 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
INTEL_INFO(dev)->eu_total);
seq_printf(m, " Available EU Per Subslice: %u\n",
INTEL_INFO(dev)->eu_per_subslice);
+ seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev)));
+ if (HAS_POOLED_EU(dev))
+ seq_printf(m, " Min EU in pool: %u\n",
+ INTEL_INFO(dev)->min_eu_in_pool);
seq_printf(m, " Has Slice Power Gating: %s\n",
yesno(INTEL_INFO(dev)->has_slice_pg));
seq_printf(m, " Has Subslice Power Gating: %s\n",
@@ -5311,7 +5322,7 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
static int i915_forcewake_open(struct inode *inode, struct file *file)
{
struct drm_device *dev = inode->i_private;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (INTEL_INFO(dev)->gen < 6)
return 0;
@@ -5325,7 +5336,7 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
static int i915_forcewake_release(struct inode *inode, struct file *file)
{
struct drm_device *dev = inode->i_private;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (INTEL_INFO(dev)->gen < 6)
return 0;
@@ -5441,7 +5452,6 @@ static const struct i915_debugfs_files {
{"i915_max_freq", &i915_max_freq_fops},
{"i915_min_freq", &i915_min_freq_fops},
{"i915_cache_sharing", &i915_cache_sharing_fops},
- {"i915_ring_stop", &i915_ring_stop_fops},
{"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
{"i915_ring_test_irq", &i915_ring_test_irq_fops},
{"i915_gem_drop_caches", &i915_drop_caches_fops},
@@ -5459,7 +5469,7 @@ static const struct i915_debugfs_files {
void intel_display_crc_init(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe;
for_each_pipe(dev_priv, pipe) {
@@ -5471,8 +5481,9 @@ void intel_display_crc_init(struct drm_device *dev)
}
}
-int i915_debugfs_init(struct drm_minor *minor)
+int i915_debugfs_register(struct drm_i915_private *dev_priv)
{
+ struct drm_minor *minor = dev_priv->drm.primary;
int ret, i;
ret = i915_forcewake_create(minor->debugfs_root, minor);
@@ -5498,8 +5509,9 @@ int i915_debugfs_init(struct drm_minor *minor)
minor->debugfs_root, minor);
}
-void i915_debugfs_cleanup(struct drm_minor *minor)
+void i915_debugfs_unregister(struct drm_i915_private *dev_priv)
{
+ struct drm_minor *minor = dev_priv->drm.primary;
int i;
drm_debugfs_remove_files(i915_debugfs_list,
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index fd3553beb..5de36d8dc 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -27,400 +27,92 @@
*
*/
-#include <linux/device.h>
#include <linux/acpi.h>
-#include <drm/drmP.h>
-#include <drm/i915_drm.h>
-#include "i915_drv.h"
-#include "i915_trace.h"
-#include "intel_drv.h"
-
-#include <linux/apple-gmux.h>
-#include <linux/console.h>
+#include <linux/device.h>
+#include <linux/oom.h>
#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pm.h>
#include <linux/pm_runtime.h>
+#include <linux/pnp.h>
+#include <linux/slab.h>
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
-#include <drm/drm_crtc_helper.h>
-
-static struct drm_driver driver;
-
-#define GEN_DEFAULT_PIPEOFFSETS \
- .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
- PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
- .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
- TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
- .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
-
-#define GEN_CHV_PIPEOFFSETS \
- .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
- CHV_PIPE_C_OFFSET }, \
- .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
- CHV_TRANSCODER_C_OFFSET, }, \
- .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
- CHV_PALETTE_C_OFFSET }
-
-#define CURSOR_OFFSETS \
- .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
-
-#define IVB_CURSOR_OFFSETS \
- .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
-
-#define BDW_COLORS \
- .color = { .degamma_lut_size = 512, .gamma_lut_size = 512 }
-#define CHV_COLORS \
- .color = { .degamma_lut_size = 65, .gamma_lut_size = 257 }
-
-static const struct intel_device_info intel_i830_info = {
- .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
- .has_overlay = 1, .overlay_needs_physical = 1,
- .ring_mask = RENDER_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
-};
-
-static const struct intel_device_info intel_845g_info = {
- .gen = 2, .num_pipes = 1,
- .has_overlay = 1, .overlay_needs_physical = 1,
- .ring_mask = RENDER_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
-};
-
-static const struct intel_device_info intel_i85x_info = {
- .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
- .cursor_needs_physical = 1,
- .has_overlay = 1, .overlay_needs_physical = 1,
- .has_fbc = 1,
- .ring_mask = RENDER_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
-};
-
-static const struct intel_device_info intel_i865g_info = {
- .gen = 2, .num_pipes = 1,
- .has_overlay = 1, .overlay_needs_physical = 1,
- .ring_mask = RENDER_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
-};
-
-static const struct intel_device_info intel_i915g_info = {
- .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
- .has_overlay = 1, .overlay_needs_physical = 1,
- .ring_mask = RENDER_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
-};
-static const struct intel_device_info intel_i915gm_info = {
- .gen = 3, .is_mobile = 1, .num_pipes = 2,
- .cursor_needs_physical = 1,
- .has_overlay = 1, .overlay_needs_physical = 1,
- .supports_tv = 1,
- .has_fbc = 1,
- .ring_mask = RENDER_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
-};
-static const struct intel_device_info intel_i945g_info = {
- .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
- .has_overlay = 1, .overlay_needs_physical = 1,
- .ring_mask = RENDER_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
-};
-static const struct intel_device_info intel_i945gm_info = {
- .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
- .has_hotplug = 1, .cursor_needs_physical = 1,
- .has_overlay = 1, .overlay_needs_physical = 1,
- .supports_tv = 1,
- .has_fbc = 1,
- .ring_mask = RENDER_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
-};
-
-static const struct intel_device_info intel_i965g_info = {
- .gen = 4, .is_broadwater = 1, .num_pipes = 2,
- .has_hotplug = 1,
- .has_overlay = 1,
- .ring_mask = RENDER_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
-};
-
-static const struct intel_device_info intel_i965gm_info = {
- .gen = 4, .is_crestline = 1, .num_pipes = 2,
- .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
- .has_overlay = 1,
- .supports_tv = 1,
- .ring_mask = RENDER_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
-};
-
-static const struct intel_device_info intel_g33_info = {
- .gen = 3, .is_g33 = 1, .num_pipes = 2,
- .need_gfx_hws = 1, .has_hotplug = 1,
- .has_overlay = 1,
- .ring_mask = RENDER_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
-};
-
-static const struct intel_device_info intel_g45_info = {
- .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
- .has_pipe_cxsr = 1, .has_hotplug = 1,
- .ring_mask = RENDER_RING | BSD_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
-};
-
-static const struct intel_device_info intel_gm45_info = {
- .gen = 4, .is_g4x = 1, .num_pipes = 2,
- .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
- .has_pipe_cxsr = 1, .has_hotplug = 1,
- .supports_tv = 1,
- .ring_mask = RENDER_RING | BSD_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
-};
-
-static const struct intel_device_info intel_pineview_info = {
- .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
- .need_gfx_hws = 1, .has_hotplug = 1,
- .has_overlay = 1,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
-};
-
-static const struct intel_device_info intel_ironlake_d_info = {
- .gen = 5, .num_pipes = 2,
- .need_gfx_hws = 1, .has_hotplug = 1,
- .ring_mask = RENDER_RING | BSD_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
-};
+#include <linux/vt.h>
+#include <acpi/video.h>
-static const struct intel_device_info intel_ironlake_m_info = {
- .gen = 5, .is_mobile = 1, .num_pipes = 2,
- .need_gfx_hws = 1, .has_hotplug = 1,
- .has_fbc = 1,
- .ring_mask = RENDER_RING | BSD_RING,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
-};
-
-static const struct intel_device_info intel_sandybridge_d_info = {
- .gen = 6, .num_pipes = 2,
- .need_gfx_hws = 1, .has_hotplug = 1,
- .has_fbc = 1,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
- .has_llc = 1,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
-};
-
-static const struct intel_device_info intel_sandybridge_m_info = {
- .gen = 6, .is_mobile = 1, .num_pipes = 2,
- .need_gfx_hws = 1, .has_hotplug = 1,
- .has_fbc = 1,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
- .has_llc = 1,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS,
-};
-
-#define GEN7_FEATURES \
- .gen = 7, .num_pipes = 3, \
- .need_gfx_hws = 1, .has_hotplug = 1, \
- .has_fbc = 1, \
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
- .has_llc = 1, \
- GEN_DEFAULT_PIPEOFFSETS, \
- IVB_CURSOR_OFFSETS
-
-static const struct intel_device_info intel_ivybridge_d_info = {
- GEN7_FEATURES,
- .is_ivybridge = 1,
-};
-
-static const struct intel_device_info intel_ivybridge_m_info = {
- GEN7_FEATURES,
- .is_ivybridge = 1,
- .is_mobile = 1,
-};
-
-static const struct intel_device_info intel_ivybridge_q_info = {
- GEN7_FEATURES,
- .is_ivybridge = 1,
- .num_pipes = 0, /* legal, last one wins */
-};
-
-#define VLV_FEATURES \
- .gen = 7, .num_pipes = 2, \
- .need_gfx_hws = 1, .has_hotplug = 1, \
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
- .display_mmio_offset = VLV_DISPLAY_BASE, \
- GEN_DEFAULT_PIPEOFFSETS, \
- CURSOR_OFFSETS
-
-static const struct intel_device_info intel_valleyview_m_info = {
- VLV_FEATURES,
- .is_valleyview = 1,
- .is_mobile = 1,
-};
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/i915_drm.h>
-static const struct intel_device_info intel_valleyview_d_info = {
- VLV_FEATURES,
- .is_valleyview = 1,
-};
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "i915_vgpu.h"
+#include "intel_drv.h"
-#define HSW_FEATURES \
- GEN7_FEATURES, \
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
- .has_ddi = 1, \
- .has_fpga_dbg = 1
+static struct drm_driver driver;
-static const struct intel_device_info intel_haswell_d_info = {
- HSW_FEATURES,
- .is_haswell = 1,
-};
+static unsigned int i915_load_fail_count;
-static const struct intel_device_info intel_haswell_m_info = {
- HSW_FEATURES,
- .is_haswell = 1,
- .is_mobile = 1,
-};
+bool __i915_inject_load_failure(const char *func, int line)
+{
+ if (i915_load_fail_count >= i915.inject_load_failure)
+ return false;
-#define BDW_FEATURES \
- HSW_FEATURES, \
- BDW_COLORS
+ if (++i915_load_fail_count == i915.inject_load_failure) {
+ DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
+ i915.inject_load_failure, func, line);
+ return true;
+ }
-static const struct intel_device_info intel_broadwell_d_info = {
- BDW_FEATURES,
- .gen = 8,
-};
+ return false;
+}
-static const struct intel_device_info intel_broadwell_m_info = {
- BDW_FEATURES,
- .gen = 8, .is_mobile = 1,
-};
+#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
+#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
+ "providing the dmesg log by booting with drm.debug=0xf"
-static const struct intel_device_info intel_broadwell_gt3d_info = {
- BDW_FEATURES,
- .gen = 8,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
-};
+void
+__i915_printk(struct drm_i915_private *dev_priv, const char *level,
+ const char *fmt, ...)
+{
+ static bool shown_bug_once;
+ struct device *dev = dev_priv->drm.dev;
+ bool is_error = level[1] <= KERN_ERR[1];
+ bool is_debug = level[1] == KERN_DEBUG[1];
+ struct va_format vaf;
+ va_list args;
+
+ if (is_debug && !(drm_debug & DRM_UT_DRIVER))
+ return;
-static const struct intel_device_info intel_broadwell_gt3m_info = {
- BDW_FEATURES,
- .gen = 8, .is_mobile = 1,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
-};
+ va_start(args, fmt);
-static const struct intel_device_info intel_cherryview_info = {
- .gen = 8, .num_pipes = 3,
- .need_gfx_hws = 1, .has_hotplug = 1,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
- .is_cherryview = 1,
- .display_mmio_offset = VLV_DISPLAY_BASE,
- GEN_CHV_PIPEOFFSETS,
- CURSOR_OFFSETS,
- CHV_COLORS,
-};
+ vaf.fmt = fmt;
+ vaf.va = &args;
-static const struct intel_device_info intel_skylake_info = {
- BDW_FEATURES,
- .is_skylake = 1,
- .gen = 9,
-};
+ dev_printk(level, dev, "[" DRM_NAME ":%ps] %pV",
+ __builtin_return_address(0), &vaf);
-static const struct intel_device_info intel_skylake_gt3_info = {
- BDW_FEATURES,
- .is_skylake = 1,
- .gen = 9,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
-};
+ if (is_error && !shown_bug_once) {
+ dev_notice(dev, "%s", FDO_BUG_MSG);
+ shown_bug_once = true;
+ }
-static const struct intel_device_info intel_broxton_info = {
- .is_preliminary = 1,
- .is_broxton = 1,
- .gen = 9,
- .need_gfx_hws = 1, .has_hotplug = 1,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
- .num_pipes = 3,
- .has_ddi = 1,
- .has_fpga_dbg = 1,
- .has_fbc = 1,
- GEN_DEFAULT_PIPEOFFSETS,
- IVB_CURSOR_OFFSETS,
- BDW_COLORS,
-};
+ va_end(args);
+}
-static const struct intel_device_info intel_kabylake_info = {
- BDW_FEATURES,
- .is_kabylake = 1,
- .gen = 9,
-};
+static bool i915_error_injected(struct drm_i915_private *dev_priv)
+{
+ return i915.inject_load_failure &&
+ i915_load_fail_count == i915.inject_load_failure;
+}
-static const struct intel_device_info intel_kabylake_gt3_info = {
- BDW_FEATURES,
- .is_kabylake = 1,
- .gen = 9,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
-};
+#define i915_load_error(dev_priv, fmt, ...) \
+ __i915_printk(dev_priv, \
+ i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \
+ fmt, ##__VA_ARGS__)
-/*
- * Make sure any device matches here are from most specific to most
- * general. For example, since the Quanta match is based on the subsystem
- * and subvendor IDs, we need it to come before the more general IVB
- * PCI ID matches, otherwise we'll use the wrong info struct above.
- */
-static const struct pci_device_id pciidlist[] = {
- INTEL_I830_IDS(&intel_i830_info),
- INTEL_I845G_IDS(&intel_845g_info),
- INTEL_I85X_IDS(&intel_i85x_info),
- INTEL_I865G_IDS(&intel_i865g_info),
- INTEL_I915G_IDS(&intel_i915g_info),
- INTEL_I915GM_IDS(&intel_i915gm_info),
- INTEL_I945G_IDS(&intel_i945g_info),
- INTEL_I945GM_IDS(&intel_i945gm_info),
- INTEL_I965G_IDS(&intel_i965g_info),
- INTEL_G33_IDS(&intel_g33_info),
- INTEL_I965GM_IDS(&intel_i965gm_info),
- INTEL_GM45_IDS(&intel_gm45_info),
- INTEL_G45_IDS(&intel_g45_info),
- INTEL_PINEVIEW_IDS(&intel_pineview_info),
- INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),
- INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),
- INTEL_SNB_D_IDS(&intel_sandybridge_d_info),
- INTEL_SNB_M_IDS(&intel_sandybridge_m_info),
- INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */
- INTEL_IVB_M_IDS(&intel_ivybridge_m_info),
- INTEL_IVB_D_IDS(&intel_ivybridge_d_info),
- INTEL_HSW_D_IDS(&intel_haswell_d_info),
- INTEL_HSW_M_IDS(&intel_haswell_m_info),
- INTEL_VLV_M_IDS(&intel_valleyview_m_info),
- INTEL_VLV_D_IDS(&intel_valleyview_d_info),
- INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),
- INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),
- INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),
- INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info),
- INTEL_CHV_IDS(&intel_cherryview_info),
- INTEL_SKL_GT1_IDS(&intel_skylake_info),
- INTEL_SKL_GT2_IDS(&intel_skylake_info),
- INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),
- INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info),
- INTEL_BXT_IDS(&intel_broxton_info),
- INTEL_KBL_GT1_IDS(&intel_kabylake_info),
- INTEL_KBL_GT2_IDS(&intel_kabylake_info),
- INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
- INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
- {0, 0, 0}
-};
-
-MODULE_DEVICE_TABLE(pci, pciidlist);
static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
{
@@ -450,9 +142,9 @@ static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
return ret;
}
-void intel_detect_pch(struct drm_device *dev)
+static void intel_detect_pch(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pch = NULL;
/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
@@ -519,8 +211,10 @@ void intel_detect_pch(struct drm_device *dev)
} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
(id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
- pch->subsystem_vendor == 0x1af4 &&
- pch->subsystem_device == 0x1100)) {
+ pch->subsystem_vendor ==
+ PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
+ pch->subsystem_device ==
+ PCI_SUBDEVICE_ID_QEMU)) {
dev_priv->pch_type = intel_virt_detect_pch(dev);
} else
continue;
@@ -534,9 +228,9 @@ void intel_detect_pch(struct drm_device *dev)
pci_dev_put(pch);
}
-bool i915_semaphore_is_enabled(struct drm_device *dev)
+bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv)
{
- if (INTEL_INFO(dev)->gen < 6)
+ if (INTEL_GEN(dev_priv) < 6)
return false;
if (i915.semaphores >= 0)
@@ -546,22 +240,1177 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
if (i915.enable_execlists)
return false;
- /* Until we get further testing... */
- if (IS_GEN8(dev))
- return false;
-
#ifdef CONFIG_INTEL_IOMMU
/* Enable semaphores on SNB when IO remapping is off */
- if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
+ if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped)
return false;
#endif
return true;
}
+static int i915_getparam(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ drm_i915_getparam_t *param = data;
+ int value;
+
+ switch (param->param) {
+ case I915_PARAM_IRQ_ACTIVE:
+ case I915_PARAM_ALLOW_BATCHBUFFER:
+ case I915_PARAM_LAST_DISPATCH:
+ /* Reject all old ums/dri params. */
+ return -ENODEV;
+ case I915_PARAM_CHIPSET_ID:
+ value = dev->pdev->device;
+ break;
+ case I915_PARAM_REVISION:
+ value = dev->pdev->revision;
+ break;
+ case I915_PARAM_HAS_GEM:
+ value = 1;
+ break;
+ case I915_PARAM_NUM_FENCES_AVAIL:
+ value = dev_priv->num_fence_regs;
+ break;
+ case I915_PARAM_HAS_OVERLAY:
+ value = dev_priv->overlay ? 1 : 0;
+ break;
+ case I915_PARAM_HAS_PAGEFLIPPING:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_EXECBUF2:
+ /* depends on GEM */
+ value = 1;
+ break;
+ case I915_PARAM_HAS_BSD:
+ value = intel_engine_initialized(&dev_priv->engine[VCS]);
+ break;
+ case I915_PARAM_HAS_BLT:
+ value = intel_engine_initialized(&dev_priv->engine[BCS]);
+ break;
+ case I915_PARAM_HAS_VEBOX:
+ value = intel_engine_initialized(&dev_priv->engine[VECS]);
+ break;
+ case I915_PARAM_HAS_BSD2:
+ value = intel_engine_initialized(&dev_priv->engine[VCS2]);
+ break;
+ case I915_PARAM_HAS_RELAXED_FENCING:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_COHERENT_RINGS:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_EXEC_CONSTANTS:
+ value = INTEL_INFO(dev)->gen >= 4;
+ break;
+ case I915_PARAM_HAS_RELAXED_DELTA:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_GEN7_SOL_RESET:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_LLC:
+ value = HAS_LLC(dev);
+ break;
+ case I915_PARAM_HAS_WT:
+ value = HAS_WT(dev);
+ break;
+ case I915_PARAM_HAS_ALIASING_PPGTT:
+ value = USES_PPGTT(dev);
+ break;
+ case I915_PARAM_HAS_WAIT_TIMEOUT:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_SEMAPHORES:
+ value = i915_semaphore_is_enabled(dev_priv);
+ break;
+ case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_SECURE_BATCHES:
+ value = capable(CAP_SYS_ADMIN);
+ break;
+ case I915_PARAM_HAS_PINNED_BATCHES:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_EXEC_NO_RELOC:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_EXEC_HANDLE_LUT:
+ value = 1;
+ break;
+ case I915_PARAM_CMD_PARSER_VERSION:
+ value = i915_cmd_parser_get_version(dev_priv);
+ break;
+ case I915_PARAM_HAS_COHERENT_PHYS_GTT:
+ value = 1;
+ break;
+ case I915_PARAM_MMAP_VERSION:
+ value = 1;
+ break;
+ case I915_PARAM_SUBSLICE_TOTAL:
+ value = INTEL_INFO(dev)->subslice_total;
+ if (!value)
+ return -ENODEV;
+ break;
+ case I915_PARAM_EU_TOTAL:
+ value = INTEL_INFO(dev)->eu_total;
+ if (!value)
+ return -ENODEV;
+ break;
+ case I915_PARAM_HAS_GPU_RESET:
+ value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv);
+ break;
+ case I915_PARAM_HAS_RESOURCE_STREAMER:
+ value = HAS_RESOURCE_STREAMER(dev);
+ break;
+ case I915_PARAM_HAS_EXEC_SOFTPIN:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_POOLED_EU:
+ value = HAS_POOLED_EU(dev);
+ break;
+ case I915_PARAM_MIN_EU_IN_POOL:
+ value = INTEL_INFO(dev)->min_eu_in_pool;
+ break;
+ default:
+ DRM_DEBUG("Unknown parameter %d\n", param->param);
+ return -EINVAL;
+ }
+
+ if (put_user(value, param->value))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int i915_get_bridge_dev(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
+ if (!dev_priv->bridge_dev) {
+ DRM_ERROR("bridge device not found\n");
+ return -1;
+ }
+ return 0;
+}
+
+/* Allocate space for the MCH regs if needed, return nonzero on error */
+static int
+intel_alloc_mchbar_resource(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+ u32 temp_lo, temp_hi = 0;
+ u64 mchbar_addr;
+ int ret;
+
+ if (INTEL_INFO(dev)->gen >= 4)
+ pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
+ pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
+ mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
+
+ /* If ACPI doesn't have it, assume we need to allocate it ourselves */
+#ifdef CONFIG_PNP
+ if (mchbar_addr &&
+ pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
+ return 0;
+#endif
+
+ /* Get some space for it */
+ dev_priv->mch_res.name = "i915 MCHBAR";
+ dev_priv->mch_res.flags = IORESOURCE_MEM;
+ ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
+ &dev_priv->mch_res,
+ MCHBAR_SIZE, MCHBAR_SIZE,
+ PCIBIOS_MIN_MEM,
+ 0, pcibios_align_resource,
+ dev_priv->bridge_dev);
+ if (ret) {
+ DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
+ dev_priv->mch_res.start = 0;
+ return ret;
+ }
+
+ if (INTEL_INFO(dev)->gen >= 4)
+ pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
+ upper_32_bits(dev_priv->mch_res.start));
+
+ pci_write_config_dword(dev_priv->bridge_dev, reg,
+ lower_32_bits(dev_priv->mch_res.start));
+ return 0;
+}
+
+/* Setup MCHBAR if possible, return true if we should disable it again */
+static void
+intel_setup_mchbar(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+ u32 temp;
+ bool enabled;
+
+ if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ return;
+
+ dev_priv->mchbar_need_disable = false;
+
+ if (IS_I915G(dev) || IS_I915GM(dev)) {
+ pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
+ enabled = !!(temp & DEVEN_MCHBAR_EN);
+ } else {
+ pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
+ enabled = temp & 1;
+ }
+
+ /* If it's already enabled, don't have to do anything */
+ if (enabled)
+ return;
+
+ if (intel_alloc_mchbar_resource(dev))
+ return;
+
+ dev_priv->mchbar_need_disable = true;
+
+ /* Space is allocated or reserved, so enable it. */
+ if (IS_I915G(dev) || IS_I915GM(dev)) {
+ pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
+ temp | DEVEN_MCHBAR_EN);
+ } else {
+ pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
+ pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
+ }
+}
+
+static void
+intel_teardown_mchbar(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+
+ if (dev_priv->mchbar_need_disable) {
+ if (IS_I915G(dev) || IS_I915GM(dev)) {
+ u32 deven_val;
+
+ pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
+ &deven_val);
+ deven_val &= ~DEVEN_MCHBAR_EN;
+ pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
+ deven_val);
+ } else {
+ u32 mchbar_val;
+
+ pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
+ &mchbar_val);
+ mchbar_val &= ~1;
+ pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
+ mchbar_val);
+ }
+ }
+
+ if (dev_priv->mch_res.start)
+ release_resource(&dev_priv->mch_res);
+}
+
+/* true = enable decode, false = disable decoder */
+static unsigned int i915_vga_set_decode(void *cookie, bool state)
+{
+ struct drm_device *dev = cookie;
+
+ intel_modeset_vga_set_state(dev, state);
+ if (state)
+ return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
+ VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+ else
+ return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+}
+
+static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
+
+ if (state == VGA_SWITCHEROO_ON) {
+ pr_info("switched on\n");
+ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+ /* i915 resume handler doesn't set to D0 */
+ pci_set_power_state(dev->pdev, PCI_D0);
+ i915_resume_switcheroo(dev);
+ dev->switch_power_state = DRM_SWITCH_POWER_ON;
+ } else {
+ pr_info("switched off\n");
+ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+ i915_suspend_switcheroo(dev, pmm);
+ dev->switch_power_state = DRM_SWITCH_POWER_OFF;
+ }
+}
+
+static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ /*
+ * FIXME: open_count is protected by drm_global_mutex but that would lead to
+ * locking inversion with the driver load path. And the access here is
+ * completely racy anyway. So don't bother with locking for now.
+ */
+ return dev->open_count == 0;
+}
+
+static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
+ .set_gpu_state = i915_switcheroo_set_state,
+ .reprobe = NULL,
+ .can_switch = i915_switcheroo_can_switch,
+};
+
+static void i915_gem_fini(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ /*
+ * Neither the BIOS, ourselves or any other kernel
+ * expects the system to be in execlists mode on startup,
+ * so we need to reset the GPU back to legacy mode. And the only
+ * known way to disable logical contexts is through a GPU reset.
+ *
+ * So in order to leave the system in a known default configuration,
+ * always reset the GPU upon unload. Afterwards we then clean up the
+ * GEM state tracking, flushing off the requests and leaving the
+ * system in a known idle state.
+ *
+ * Note that is of the upmost importance that the GPU is idle and
+ * all stray writes are flushed *before* we dismantle the backing
+ * storage for the pinned objects.
+ *
+ * However, since we are uncertain that reseting the GPU on older
+ * machines is a good idea, we don't - just in case it leaves the
+ * machine in an unusable condition.
+ */
+ if (HAS_HW_CONTEXTS(dev)) {
+ int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
+ WARN_ON(reset && reset != -ENODEV);
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_reset(dev);
+ i915_gem_cleanup_engines(dev);
+ i915_gem_context_fini(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ WARN_ON(!list_empty(&to_i915(dev)->context_list));
+}
+
+static int i915_load_modeset_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ int ret;
+
+ if (i915_inject_load_failure())
+ return -ENODEV;
+
+ ret = intel_bios_init(dev_priv);
+ if (ret)
+ DRM_INFO("failed to find VBIOS tables\n");
+
+ /* If we have > 1 VGA cards, then we need to arbitrate access
+ * to the common VGA resources.
+ *
+ * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
+ * then we do not take part in VGA arbitration and the
+ * vga_client_register() fails with -ENODEV.
+ */
+ ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
+ if (ret && ret != -ENODEV)
+ goto out;
+
+ intel_register_dsm_handler();
+
+ ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false);
+ if (ret)
+ goto cleanup_vga_client;
+
+ /* must happen before intel_power_domains_init_hw() on VLV/CHV */
+ intel_update_rawclk(dev_priv);
+
+ intel_power_domains_init_hw(dev_priv, false);
+
+ intel_csr_ucode_init(dev_priv);
+
+ ret = intel_irq_install(dev_priv);
+ if (ret)
+ goto cleanup_csr;
+
+ intel_setup_gmbus(dev);
+
+ /* Important: The output setup functions called by modeset_init need
+ * working irqs for e.g. gmbus and dp aux transfers. */
+ intel_modeset_init(dev);
+
+ intel_guc_init(dev);
+
+ ret = i915_gem_init(dev);
+ if (ret)
+ goto cleanup_irq;
+
+ intel_modeset_gem_init(dev);
+
+ if (INTEL_INFO(dev)->num_pipes == 0)
+ return 0;
+
+ ret = intel_fbdev_init(dev);
+ if (ret)
+ goto cleanup_gem;
+
+ /* Only enable hotplug handling once the fbdev is fully set up. */
+ intel_hpd_init(dev_priv);
+
+ drm_kms_helper_poll_init(dev);
+
+ return 0;
+
+cleanup_gem:
+ i915_gem_fini(dev);
+cleanup_irq:
+ intel_guc_fini(dev);
+ drm_irq_uninstall(dev);
+ intel_teardown_gmbus(dev);
+cleanup_csr:
+ intel_csr_ucode_fini(dev_priv);
+ intel_power_domains_fini(dev_priv);
+ vga_switcheroo_unregister_client(dev->pdev);
+cleanup_vga_client:
+ vga_client_register(dev->pdev, NULL, NULL, NULL);
+out:
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_FB)
+static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+{
+ struct apertures_struct *ap;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ bool primary;
+ int ret;
+
+ ap = alloc_apertures(1);
+ if (!ap)
+ return -ENOMEM;
+
+ ap->ranges[0].base = ggtt->mappable_base;
+ ap->ranges[0].size = ggtt->mappable_end;
+
+ primary =
+ pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+
+ ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
+
+ kfree(ap);
+
+ return ret;
+}
+#else
+static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+{
+ return 0;
+}
+#endif
+
+#if !defined(CONFIG_VGA_CONSOLE)
+static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
+{
+ return 0;
+}
+#elif !defined(CONFIG_DUMMY_CONSOLE)
+static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
+{
+ return -ENODEV;
+}
+#else
+static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
+{
+ int ret = 0;
+
+ DRM_INFO("Replacing VGA console driver\n");
+
+ console_lock();
+ if (con_is_bound(&vga_con))
+ ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
+ if (ret == 0) {
+ ret = do_unregister_con_driver(&vga_con);
+
+ /* Ignore "already unregistered". */
+ if (ret == -ENODEV)
+ ret = 0;
+ }
+ console_unlock();
+
+ return ret;
+}
+#endif
+
+static void intel_init_dpio(struct drm_i915_private *dev_priv)
+{
+ /*
+ * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
+ * CHV x1 PHY (DP/HDMI D)
+ * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
+ */
+ if (IS_CHERRYVIEW(dev_priv)) {
+ DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
+ DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
+ } else if (IS_VALLEYVIEW(dev_priv)) {
+ DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
+ }
+}
+
+static int i915_workqueues_init(struct drm_i915_private *dev_priv)
+{
+ /*
+ * The i915 workqueue is primarily used for batched retirement of
+ * requests (and thus managing bo) once the task has been completed
+ * by the GPU. i915_gem_retire_requests() is called directly when we
+ * need high-priority retirement, such as waiting for an explicit
+ * bo.
+ *
+ * It is also used for periodic low-priority events, such as
+ * idle-timers and recording error state.
+ *
+ * All tasks on the workqueue are expected to acquire the dev mutex
+ * so there is no point in running more than one instance of the
+ * workqueue at any time. Use an ordered one.
+ */
+ dev_priv->wq = alloc_ordered_workqueue("i915", 0);
+ if (dev_priv->wq == NULL)
+ goto out_err;
+
+ dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
+ if (dev_priv->hotplug.dp_wq == NULL)
+ goto out_free_wq;
+
+ return 0;
+
+out_free_wq:
+ destroy_workqueue(dev_priv->wq);
+out_err:
+ DRM_ERROR("Failed to allocate workqueues.\n");
+
+ return -ENOMEM;
+}
+
+static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
+{
+ destroy_workqueue(dev_priv->hotplug.dp_wq);
+ destroy_workqueue(dev_priv->wq);
+}
+
+/**
+ * i915_driver_init_early - setup state not requiring device access
+ * @dev_priv: device private
+ *
+ * Initialize everything that is a "SW-only" state, that is state not
+ * requiring accessing the device or exposing the driver via kernel internal
+ * or userspace interfaces. Example steps belonging here: lock initialization,
+ * system memory allocation, setting up device specific attributes and
+ * function hooks not requiring accessing the device.
+ */
+static int i915_driver_init_early(struct drm_i915_private *dev_priv,
+ const struct pci_device_id *ent)
+{
+ const struct intel_device_info *match_info =
+ (struct intel_device_info *)ent->driver_data;
+ struct intel_device_info *device_info;
+ int ret = 0;
+
+ if (i915_inject_load_failure())
+ return -ENODEV;
+
+ /* Setup the write-once "constant" device info */
+ device_info = mkwrite_device_info(dev_priv);
+ memcpy(device_info, match_info, sizeof(*device_info));
+ device_info->device_id = dev_priv->drm.pdev->device;
+
+ BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
+ device_info->gen_mask = BIT(device_info->gen - 1);
+
+ spin_lock_init(&dev_priv->irq_lock);
+ spin_lock_init(&dev_priv->gpu_error.lock);
+ mutex_init(&dev_priv->backlight_lock);
+ spin_lock_init(&dev_priv->uncore.lock);
+ spin_lock_init(&dev_priv->mm.object_stat_lock);
+ spin_lock_init(&dev_priv->mmio_flip_lock);
+ mutex_init(&dev_priv->sb_lock);
+ mutex_init(&dev_priv->modeset_restore_lock);
+ mutex_init(&dev_priv->av_mutex);
+ mutex_init(&dev_priv->wm.wm_mutex);
+ mutex_init(&dev_priv->pps_mutex);
+
+ ret = i915_workqueues_init(dev_priv);
+ if (ret < 0)
+ return ret;
+
+ ret = intel_gvt_init(dev_priv);
+ if (ret < 0)
+ goto err_workqueues;
+
+ /* This must be called before any calls to HAS_PCH_* */
+ intel_detect_pch(&dev_priv->drm);
+
+ intel_pm_setup(&dev_priv->drm);
+ intel_init_dpio(dev_priv);
+ intel_power_domains_init(dev_priv);
+ intel_irq_init(dev_priv);
+ intel_init_display_hooks(dev_priv);
+ intel_init_clock_gating_hooks(dev_priv);
+ intel_init_audio_hooks(dev_priv);
+ i915_gem_load_init(&dev_priv->drm);
+
+ intel_display_crc_init(&dev_priv->drm);
+
+ intel_device_info_dump(dev_priv);
+
+ /* Not all pre-production machines fall into this category, only the
+ * very first ones. Almost everything should work, except for maybe
+ * suspend/resume. And we don't implement workarounds that affect only
+ * pre-production machines. */
+ if (IS_HSW_EARLY_SDV(dev_priv))
+ DRM_INFO("This is an early pre-production Haswell machine. "
+ "It may not be fully functional.\n");
+
+ return 0;
+
+err_workqueues:
+ i915_workqueues_cleanup(dev_priv);
+ return ret;
+}
+
+/**
+ * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early()
+ * @dev_priv: device private
+ */
+static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
+{
+ i915_gem_load_cleanup(&dev_priv->drm);
+ i915_workqueues_cleanup(dev_priv);
+}
+
+static int i915_mmio_setup(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ int mmio_bar;
+ int mmio_size;
+
+ mmio_bar = IS_GEN2(dev) ? 1 : 0;
+ /*
+ * Before gen4, the registers and the GTT are behind different BARs.
+ * However, from gen4 onwards, the registers and the GTT are shared
+ * in the same BAR, so we want to restrict this ioremap from
+ * clobbering the GTT which we want ioremap_wc instead. Fortunately,
+ * the register BAR remains the same size for all the earlier
+ * generations up to Ironlake.
+ */
+ if (INTEL_INFO(dev)->gen < 5)
+ mmio_size = 512 * 1024;
+ else
+ mmio_size = 2 * 1024 * 1024;
+ dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
+ if (dev_priv->regs == NULL) {
+ DRM_ERROR("failed to map registers\n");
+
+ return -EIO;
+ }
+
+ /* Try to make sure MCHBAR is enabled before poking at it */
+ intel_setup_mchbar(dev);
+
+ return 0;
+}
+
+static void i915_mmio_cleanup(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ intel_teardown_mchbar(dev);
+ pci_iounmap(dev->pdev, dev_priv->regs);
+}
+
+/**
+ * i915_driver_init_mmio - setup device MMIO
+ * @dev_priv: device private
+ *
+ * Setup minimal device state necessary for MMIO accesses later in the
+ * initialization sequence. The setup here should avoid any other device-wide
+ * side effects or exposing the driver via kernel internal or user space
+ * interfaces.
+ */
+static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = &dev_priv->drm;
+ int ret;
+
+ if (i915_inject_load_failure())
+ return -ENODEV;
+
+ if (i915_get_bridge_dev(dev))
+ return -EIO;
+
+ ret = i915_mmio_setup(dev);
+ if (ret < 0)
+ goto put_bridge;
+
+ intel_uncore_init(dev_priv);
+
+ return 0;
+
+put_bridge:
+ pci_dev_put(dev_priv->bridge_dev);
+
+ return ret;
+}
+
+/**
+ * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio()
+ * @dev_priv: device private
+ */
+static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = &dev_priv->drm;
+
+ intel_uncore_fini(dev_priv);
+ i915_mmio_cleanup(dev);
+ pci_dev_put(dev_priv->bridge_dev);
+}
+
+static void intel_sanitize_options(struct drm_i915_private *dev_priv)
+{
+ i915.enable_execlists =
+ intel_sanitize_enable_execlists(dev_priv,
+ i915.enable_execlists);
+
+ /*
+ * i915.enable_ppgtt is read-only, so do an early pass to validate the
+ * user's requested state against the hardware/driver capabilities. We
+ * do this now so that we can print out any log messages once rather
+ * than every time we check intel_enable_ppgtt().
+ */
+ i915.enable_ppgtt =
+ intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt);
+ DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
+}
+
+/**
+ * i915_driver_init_hw - setup state requiring device access
+ * @dev_priv: device private
+ *
+ * Setup state that requires accessing the device, but doesn't require
+ * exposing the driver via kernel internal or userspace interfaces.
+ */
+static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = &dev_priv->drm;
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ uint32_t aperture_size;
+ int ret;
+
+ if (i915_inject_load_failure())
+ return -ENODEV;
+
+ intel_device_info_runtime_init(dev_priv);
+
+ intel_sanitize_options(dev_priv);
+
+ ret = i915_ggtt_init_hw(dev);
+ if (ret)
+ return ret;
+
+ ret = i915_ggtt_enable_hw(dev);
+ if (ret) {
+ DRM_ERROR("failed to enable GGTT\n");
+ goto out_ggtt;
+ }
+
+ /* WARNING: Apparently we must kick fbdev drivers before vgacon,
+ * otherwise the vga fbdev driver falls over. */
+ ret = i915_kick_out_firmware_fb(dev_priv);
+ if (ret) {
+ DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
+ goto out_ggtt;
+ }
+
+ ret = i915_kick_out_vgacon(dev_priv);
+ if (ret) {
+ DRM_ERROR("failed to remove conflicting VGA console\n");
+ goto out_ggtt;
+ }
+
+ pci_set_master(dev->pdev);
+
+ /* overlay on gen2 is broken and can't address above 1G */
+ if (IS_GEN2(dev)) {
+ ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
+ if (ret) {
+ DRM_ERROR("failed to set DMA mask\n");
+
+ goto out_ggtt;
+ }
+ }
+
+
+ /* 965GM sometimes incorrectly writes to hardware status page (HWS)
+ * using 32bit addressing, overwriting memory if HWS is located
+ * above 4GB.
+ *
+ * The documentation also mentions an issue with undefined
+ * behaviour if any general state is accessed within a page above 4GB,
+ * which also needs to be handled carefully.
+ */
+ if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) {
+ ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
+
+ if (ret) {
+ DRM_ERROR("failed to set DMA mask\n");
+
+ goto out_ggtt;
+ }
+ }
+
+ aperture_size = ggtt->mappable_end;
+
+ ggtt->mappable =
+ io_mapping_create_wc(ggtt->mappable_base,
+ aperture_size);
+ if (!ggtt->mappable) {
+ ret = -EIO;
+ goto out_ggtt;
+ }
+
+ ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base,
+ aperture_size);
+
+ pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
+
+ intel_uncore_sanitize(dev_priv);
+
+ intel_opregion_setup(dev_priv);
+
+ i915_gem_load_init_fences(dev_priv);
+
+ /* On the 945G/GM, the chipset reports the MSI capability on the
+ * integrated graphics even though the support isn't actually there
+ * according to the published specs. It doesn't appear to function
+ * correctly in testing on 945G.
+ * This may be a side effect of MSI having been made available for PEG
+ * and the registers being closely associated.
+ *
+ * According to chipset errata, on the 965GM, MSI interrupts may
+ * be lost or delayed, but we use them anyways to avoid
+ * stuck interrupts on some machines.
+ */
+ if (!IS_I945G(dev) && !IS_I945GM(dev)) {
+ if (pci_enable_msi(dev->pdev) < 0)
+ DRM_DEBUG_DRIVER("can't enable MSI");
+ }
+
+ return 0;
+
+out_ggtt:
+ i915_ggtt_cleanup_hw(dev);
+
+ return ret;
+}
+
+/**
+ * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw()
+ * @dev_priv: device private
+ */
+static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = &dev_priv->drm;
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
+
+ if (dev->pdev->msi_enabled)
+ pci_disable_msi(dev->pdev);
+
+ pm_qos_remove_request(&dev_priv->pm_qos);
+ arch_phys_wc_del(ggtt->mtrr);
+ io_mapping_free(ggtt->mappable);
+ i915_ggtt_cleanup_hw(dev);
+}
+
+/**
+ * i915_driver_register - register the driver with the rest of the system
+ * @dev_priv: device private
+ *
+ * Perform any steps necessary to make the driver available via kernel
+ * internal or userspace interfaces.
+ */
+static void i915_driver_register(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = &dev_priv->drm;
+
+ i915_gem_shrinker_init(dev_priv);
+
+ /*
+ * Notify a valid surface after modesetting,
+ * when running inside a VM.
+ */
+ if (intel_vgpu_active(dev_priv))
+ I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
+
+ /* Reveal our presence to userspace */
+ if (drm_dev_register(dev, 0) == 0) {
+ i915_debugfs_register(dev_priv);
+ i915_setup_sysfs(dev);
+ } else
+ DRM_ERROR("Failed to register driver for userspace access!\n");
+
+ if (INTEL_INFO(dev_priv)->num_pipes) {
+ /* Must be done after probing outputs */
+ intel_opregion_register(dev_priv);
+ acpi_video_register();
+ }
+
+ if (IS_GEN5(dev_priv))
+ intel_gpu_ips_init(dev_priv);
+
+ i915_audio_component_init(dev_priv);
+
+ /*
+ * Some ports require correctly set-up hpd registers for detection to
+ * work properly (leading to ghost connected connector status), e.g. VGA
+ * on gm45. Hence we can only set up the initial fbdev config after hpd
+ * irqs are fully enabled. We do it last so that the async config
+ * cannot run before the connectors are registered.
+ */
+ intel_fbdev_initial_config_async(dev);
+}
+
+/**
+ * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
+ * @dev_priv: device private
+ */
+static void i915_driver_unregister(struct drm_i915_private *dev_priv)
+{
+ i915_audio_component_cleanup(dev_priv);
+
+ intel_gpu_ips_teardown();
+ acpi_video_unregister();
+ intel_opregion_unregister(dev_priv);
+
+ i915_teardown_sysfs(&dev_priv->drm);
+ i915_debugfs_unregister(dev_priv);
+ drm_dev_unregister(&dev_priv->drm);
+
+ i915_gem_shrinker_cleanup(dev_priv);
+}
+
+/**
+ * i915_driver_load - setup chip and create an initial config
+ * @dev: DRM device
+ * @flags: startup flags
+ *
+ * The driver load routine has to do several things:
+ * - drive output discovery via intel_modeset_init()
+ * - initialize the memory manager
+ * - allocate initial config memory
+ * - setup the DRM framebuffer with the allocated memory
+ */
+int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct drm_i915_private *dev_priv;
+ int ret;
+
+ if (i915.nuclear_pageflip)
+ driver.driver_features |= DRIVER_ATOMIC;
+
+ ret = -ENOMEM;
+ dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
+ if (dev_priv)
+ ret = drm_dev_init(&dev_priv->drm, &driver, &pdev->dev);
+ if (ret) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "[" DRM_NAME ":%s] allocation failed\n", __func__);
+ kfree(dev_priv);
+ return ret;
+ }
+
+ dev_priv->drm.pdev = pdev;
+ dev_priv->drm.dev_private = dev_priv;
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ goto out_free_priv;
+
+ pci_set_drvdata(pdev, &dev_priv->drm);
+
+ ret = i915_driver_init_early(dev_priv, ent);
+ if (ret < 0)
+ goto out_pci_disable;
+
+ intel_runtime_pm_get(dev_priv);
+
+ ret = i915_driver_init_mmio(dev_priv);
+ if (ret < 0)
+ goto out_runtime_pm_put;
+
+ ret = i915_driver_init_hw(dev_priv);
+ if (ret < 0)
+ goto out_cleanup_mmio;
+
+ /*
+ * TODO: move the vblank init and parts of modeset init steps into one
+ * of the i915_driver_init_/i915_driver_register functions according
+ * to the role/effect of the given init step.
+ */
+ if (INTEL_INFO(dev_priv)->num_pipes) {
+ ret = drm_vblank_init(&dev_priv->drm,
+ INTEL_INFO(dev_priv)->num_pipes);
+ if (ret)
+ goto out_cleanup_hw;
+ }
+
+ ret = i915_load_modeset_init(&dev_priv->drm);
+ if (ret < 0)
+ goto out_cleanup_vblank;
+
+ i915_driver_register(dev_priv);
+
+ intel_runtime_pm_enable(dev_priv);
+
+ /* Everything is in place, we can now relax! */
+ DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
+ driver.name, driver.major, driver.minor, driver.patchlevel,
+ driver.date, pci_name(pdev), dev_priv->drm.primary->index);
+
+ intel_runtime_pm_put(dev_priv);
+
+ return 0;
+
+out_cleanup_vblank:
+ drm_vblank_cleanup(&dev_priv->drm);
+out_cleanup_hw:
+ i915_driver_cleanup_hw(dev_priv);
+out_cleanup_mmio:
+ i915_driver_cleanup_mmio(dev_priv);
+out_runtime_pm_put:
+ intel_runtime_pm_put(dev_priv);
+ i915_driver_cleanup_early(dev_priv);
+out_pci_disable:
+ pci_disable_device(pdev);
+out_free_priv:
+ i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret);
+ drm_dev_unref(&dev_priv->drm);
+ return ret;
+}
+
+void i915_driver_unload(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ intel_fbdev_fini(dev);
+
+ if (i915_gem_suspend(dev))
+ DRM_ERROR("failed to idle hardware; continuing to unload!\n");
+
+ intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+
+ i915_driver_unregister(dev_priv);
+
+ drm_vblank_cleanup(dev);
+
+ intel_modeset_cleanup(dev);
+
+ /*
+ * free the memory space allocated for the child device
+ * config parsed from VBT
+ */
+ if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
+ kfree(dev_priv->vbt.child_dev);
+ dev_priv->vbt.child_dev = NULL;
+ dev_priv->vbt.child_dev_num = 0;
+ }
+ kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
+ dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
+ kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
+ dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
+
+ vga_switcheroo_unregister_client(dev->pdev);
+ vga_client_register(dev->pdev, NULL, NULL, NULL);
+
+ intel_csr_ucode_fini(dev_priv);
+
+ /* Free error state after interrupts are fully disabled. */
+ cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
+ i915_destroy_error_state(dev);
+
+ /* Flush any outstanding unpin_work. */
+ flush_workqueue(dev_priv->wq);
+
+ intel_guc_fini(dev);
+ i915_gem_fini(dev);
+ intel_fbc_cleanup_cfb(dev_priv);
+
+ intel_power_domains_fini(dev_priv);
+
+ i915_driver_cleanup_hw(dev_priv);
+ i915_driver_cleanup_mmio(dev_priv);
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+
+ i915_driver_cleanup_early(dev_priv);
+}
+
+static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
+{
+ int ret;
+
+ ret = i915_gem_open(dev, file);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * i915_driver_lastclose - clean up after all DRM clients have exited
+ * @dev: DRM device
+ *
+ * Take care of cleaning up after all DRM clients have exited. In the
+ * mode setting case, we want to restore the kernel's initial mode (just
+ * in case the last client left us in a bad state).
+ *
+ * Additionally, in the non-mode setting case, we'll tear down the GTT
+ * and DMA structures, since the kernel won't be using them, and clea
+ * up any GEM state.
+ */
+static void i915_driver_lastclose(struct drm_device *dev)
+{
+ intel_fbdev_restore_mode(dev);
+ vga_switcheroo_process_delayed_switch();
+}
+
+static void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
+{
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_context_close(dev, file);
+ i915_gem_release(dev, file);
+ mutex_unlock(&dev->struct_mutex);
+}
+
+static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+
+ kfree(file_priv);
+}
+
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct intel_encoder *encoder;
drm_modeset_lock_all(dev);
@@ -586,7 +1435,7 @@ static bool suspend_to_idle(struct drm_i915_private *dev_priv)
static int i915_drm_suspend(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
pci_power_t opregion_target_state;
int error;
@@ -614,7 +1463,7 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_guc_suspend(dev);
- intel_suspend_gt_powersave(dev);
+ intel_suspend_gt_powersave(dev_priv);
intel_display_suspend(dev);
@@ -632,10 +1481,10 @@ static int i915_drm_suspend(struct drm_device *dev)
i915_save_state(dev);
opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
- intel_opregion_notify_adapter(dev, opregion_target_state);
+ intel_opregion_notify_adapter(dev_priv, opregion_target_state);
- intel_uncore_forcewake_reset(dev, false);
- intel_opregion_fini(dev);
+ intel_uncore_forcewake_reset(dev_priv, false);
+ intel_opregion_unregister(dev_priv);
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
@@ -653,7 +1502,7 @@ out:
static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
{
- struct drm_i915_private *dev_priv = drm_dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(drm_dev);
bool fw_csr;
int ret;
@@ -715,7 +1564,7 @@ int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
{
int error;
- if (!dev || !dev->dev_private) {
+ if (!dev) {
DRM_ERROR("dev: %p\n", dev);
DRM_ERROR("DRM not initialized, aborting suspend.\n");
return -ENODEV;
@@ -737,7 +1586,7 @@ int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
static int i915_drm_resume(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
disable_rpm_wakeref_asserts(dev_priv);
@@ -753,7 +1602,7 @@ static int i915_drm_resume(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
i915_restore_state(dev);
- intel_opregion_setup(dev);
+ intel_opregion_setup(dev_priv);
intel_init_pch_refclk(dev);
drm_mode_config_reset(dev);
@@ -771,7 +1620,7 @@ static int i915_drm_resume(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
if (i915_gem_init_hw(dev)) {
DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
- atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
+ atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
}
mutex_unlock(&dev->struct_mutex);
@@ -781,7 +1630,7 @@ static int i915_drm_resume(struct drm_device *dev)
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display.hpd_irq_setup)
- dev_priv->display.hpd_irq_setup(dev);
+ dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
intel_dp_mst_resume(dev);
@@ -798,7 +1647,7 @@ static int i915_drm_resume(struct drm_device *dev)
/* Config may have changed between suspend and resume */
drm_helper_hpd_irq_event(dev);
- intel_opregion_init(dev);
+ intel_opregion_register(dev_priv);
intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
@@ -806,7 +1655,7 @@ static int i915_drm_resume(struct drm_device *dev)
dev_priv->modeset_restore = MODESET_DONE;
mutex_unlock(&dev_priv->modeset_restore_lock);
- intel_opregion_notify_adapter(dev, PCI_D0);
+ intel_opregion_notify_adapter(dev_priv, PCI_D0);
drm_kms_helper_poll_enable(dev);
@@ -817,7 +1666,7 @@ static int i915_drm_resume(struct drm_device *dev)
static int i915_drm_resume_early(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
/*
@@ -874,9 +1723,9 @@ static int i915_drm_resume_early(struct drm_device *dev)
DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
ret);
- intel_uncore_early_sanitize(dev, true);
+ intel_uncore_early_sanitize(dev_priv, true);
- if (IS_BROXTON(dev)) {
+ if (IS_BROXTON(dev_priv)) {
if (!dev_priv->suspended_to_idle)
gen9_sanitize_dc_state(dev_priv);
bxt_disable_dc9(dev_priv);
@@ -884,7 +1733,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
hsw_disable_pc8(dev_priv);
}
- intel_uncore_sanitize(dev);
+ intel_uncore_sanitize(dev_priv);
if (IS_BROXTON(dev_priv) ||
!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
@@ -927,14 +1776,14 @@ int i915_resume_switcheroo(struct drm_device *dev)
* - re-init interrupt state
* - re-init display
*/
-int i915_reset(struct drm_device *dev)
+int i915_reset(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_device *dev = &dev_priv->drm;
struct i915_gpu_error *error = &dev_priv->gpu_error;
unsigned reset_counter;
int ret;
- intel_reset_gt_powersave(dev);
+ intel_reset_gt_powersave(dev_priv);
mutex_lock(&dev->struct_mutex);
@@ -948,24 +1797,11 @@ int i915_reset(struct drm_device *dev)
goto error;
}
- i915_gem_reset(dev);
-
- ret = intel_gpu_reset(dev, ALL_ENGINES);
-
- /* Also reset the gpu hangman. */
- if (error->stop_rings != 0) {
- DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
- error->stop_rings = 0;
- if (ret == -ENODEV) {
- DRM_INFO("Reset not implemented, but ignoring "
- "error for simulated gpu hangs\n");
- ret = 0;
- }
- }
+ pr_notice("drm/i915: Resetting chip after gpu hang\n");
- if (i915_stop_ring_allow_warn(dev_priv))
- pr_notice("drm/i915: Resetting chip after gpu hang\n");
+ i915_gem_reset(dev);
+ ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
if (ret) {
if (ret != -ENODEV)
DRM_ERROR("Failed to reset chip: %i\n", ret);
@@ -1005,7 +1841,7 @@ int i915_reset(struct drm_device *dev)
* of re-init after reset.
*/
if (INTEL_INFO(dev)->gen > 5)
- intel_enable_gt_powersave(dev);
+ intel_enable_gt_powersave(dev_priv);
return 0;
@@ -1015,51 +1851,12 @@ error:
return ret;
}
-static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- struct intel_device_info *intel_info =
- (struct intel_device_info *) ent->driver_data;
-
- if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
- DRM_INFO("This hardware requires preliminary hardware support.\n"
- "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
- return -ENODEV;
- }
-
- /* Only bind to function 0 of the device. Early generations
- * used function 1 as a placeholder for multi-head. This causes
- * us confusion instead, especially on the systems where both
- * functions have the same PCI-ID!
- */
- if (PCI_FUNC(pdev->devfn))
- return -ENODEV;
-
- /*
- * apple-gmux is needed on dual GPU MacBook Pro
- * to probe the panel if we're the inactive GPU.
- */
- if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) &&
- apple_gmux_present() && pdev != vga_default_device() &&
- !vga_switcheroo_handler_flags())
- return -EPROBE_DEFER;
-
- return drm_get_pci_dev(pdev, ent, &driver);
-}
-
-static void
-i915_pci_remove(struct pci_dev *pdev)
-{
- struct drm_device *dev = pci_get_drvdata(pdev);
-
- drm_put_dev(dev);
-}
-
static int i915_pm_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
- if (!drm_dev || !drm_dev->dev_private) {
+ if (!drm_dev) {
dev_err(dev, "DRM not initialized, aborting suspend.\n");
return -ENODEV;
}
@@ -1072,7 +1869,7 @@ static int i915_pm_suspend(struct device *dev)
static int i915_pm_suspend_late(struct device *dev)
{
- struct drm_device *drm_dev = dev_to_i915(dev)->dev;
+ struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
/*
* We have a suspend ordering issue with the snd-hda driver also
@@ -1091,7 +1888,7 @@ static int i915_pm_suspend_late(struct device *dev)
static int i915_pm_poweroff_late(struct device *dev)
{
- struct drm_device *drm_dev = dev_to_i915(dev)->dev;
+ struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
@@ -1101,7 +1898,7 @@ static int i915_pm_poweroff_late(struct device *dev)
static int i915_pm_resume_early(struct device *dev)
{
- struct drm_device *drm_dev = dev_to_i915(dev)->dev;
+ struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
@@ -1111,7 +1908,7 @@ static int i915_pm_resume_early(struct device *dev)
static int i915_pm_resume(struct device *dev)
{
- struct drm_device *drm_dev = dev_to_i915(dev)->dev;
+ struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
@@ -1119,6 +1916,49 @@ static int i915_pm_resume(struct device *dev)
return i915_drm_resume(drm_dev);
}
+/* freeze: before creating the hibernation_image */
+static int i915_pm_freeze(struct device *dev)
+{
+ return i915_pm_suspend(dev);
+}
+
+static int i915_pm_freeze_late(struct device *dev)
+{
+ int ret;
+
+ ret = i915_pm_suspend_late(dev);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_freeze_late(dev_to_i915(dev));
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/* thaw: called after creating the hibernation image, but before turning off. */
+static int i915_pm_thaw_early(struct device *dev)
+{
+ return i915_pm_resume_early(dev);
+}
+
+static int i915_pm_thaw(struct device *dev)
+{
+ return i915_pm_resume(dev);
+}
+
+/* restore: called after loading the hibernation image. */
+static int i915_pm_restore_early(struct device *dev)
+{
+ return i915_pm_resume_early(dev);
+}
+
+static int i915_pm_restore(struct device *dev)
+{
+ return i915_pm_resume(dev);
+}
+
/*
* Save all Gunit registers that may be lost after a D3 and a subsequent
* S0i[R123] transition. The list of registers needing a save/restore is
@@ -1318,8 +2158,6 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
u32 val;
int err;
-#define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
-
val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
if (force_on)
@@ -1329,13 +2167,16 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
if (!force_on)
return 0;
- err = wait_for(COND, 20);
+ err = intel_wait_for_register(dev_priv,
+ VLV_GTLC_SURVIVABILITY_REG,
+ VLV_GFX_CLK_STATUS_BIT,
+ VLV_GFX_CLK_STATUS_BIT,
+ 20);
if (err)
DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
I915_READ(VLV_GTLC_SURVIVABILITY_REG));
return err;
-#undef COND
}
static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
@@ -1350,13 +2191,15 @@ static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
POSTING_READ(VLV_GTLC_WAKE_CTRL);
-#define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \
- allow)
- err = wait_for(COND, 1);
+ err = intel_wait_for_register(dev_priv,
+ VLV_GTLC_PW_STATUS,
+ VLV_GTLC_ALLOWWAKEACK,
+ allow,
+ 1);
if (err)
DRM_ERROR("timeout disabling GT waking\n");
+
return err;
-#undef COND
}
static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
@@ -1368,8 +2211,7 @@ static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
val = wait_for_on ? mask : 0;
-#define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
- if (COND)
+ if ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
return 0;
DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
@@ -1380,13 +2222,14 @@ static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
* RC6 transitioning can be delayed up to 2 msec (see
* valleyview_enable_rps), use 3 msec for safety.
*/
- err = wait_for(COND, 3);
+ err = intel_wait_for_register(dev_priv,
+ VLV_GTLC_PW_STATUS, mask, val,
+ 3);
if (err)
DRM_ERROR("timeout waiting for GT wells to go %s\n",
onoff(wait_for_on));
return err;
-#undef COND
}
static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
@@ -1443,7 +2286,7 @@ err1:
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
bool rpm_resume)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
int err;
int ret;
@@ -1479,10 +2322,10 @@ static int intel_runtime_suspend(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct drm_device *dev = pci_get_drvdata(pdev);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
- if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
+ if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6())))
return -ENODEV;
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
@@ -1517,11 +2360,8 @@ static int intel_runtime_suspend(struct device *device)
i915_gem_release_all_mmaps(dev_priv);
mutex_unlock(&dev->struct_mutex);
- cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
-
intel_guc_suspend(dev);
- intel_suspend_gt_powersave(dev);
intel_runtime_pm_disable_interrupts(dev_priv);
ret = 0;
@@ -1543,7 +2383,7 @@ static int intel_runtime_suspend(struct device *device)
return ret;
}
- intel_uncore_forcewake_reset(dev, false);
+ intel_uncore_forcewake_reset(dev_priv, false);
enable_rpm_wakeref_asserts(dev_priv);
WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
@@ -1557,14 +2397,14 @@ static int intel_runtime_suspend(struct device *device)
* FIXME: We really should find a document that references the arguments
* used below!
*/
- if (IS_BROADWELL(dev)) {
+ if (IS_BROADWELL(dev_priv)) {
/*
* On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
* being detected, and the call we do at intel_runtime_resume()
* won't be able to restore them. Since PCI_D3hot matches the
* actual specification and appears to be working, use it.
*/
- intel_opregion_notify_adapter(dev, PCI_D3hot);
+ intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
} else {
/*
* current versions of firmware which depend on this opregion
@@ -1573,7 +2413,7 @@ static int intel_runtime_suspend(struct device *device)
* to distinguish it from notifications that might be sent via
* the suspend path.
*/
- intel_opregion_notify_adapter(dev, PCI_D1);
+ intel_opregion_notify_adapter(dev_priv, PCI_D1);
}
assert_forcewakes_inactive(dev_priv);
@@ -1589,7 +2429,7 @@ static int intel_runtime_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct drm_device *dev = pci_get_drvdata(pdev);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret = 0;
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
@@ -1600,7 +2440,7 @@ static int intel_runtime_resume(struct device *device)
WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
disable_rpm_wakeref_asserts(dev_priv);
- intel_opregion_notify_adapter(dev, PCI_D0);
+ intel_opregion_notify_adapter(dev_priv, PCI_D0);
dev_priv->pm.suspended = false;
if (intel_uncore_unclaimed_mmio(dev_priv))
DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
@@ -1627,7 +2467,7 @@ static int intel_runtime_resume(struct device *device)
* we can do is to hope that things will still work (and disable RPM).
*/
i915_gem_init_swizzling(dev);
- gen6_update_ring_freq(dev);
+ gen6_update_ring_freq(dev_priv);
intel_runtime_pm_enable_interrupts(dev_priv);
@@ -1639,8 +2479,6 @@ static int intel_runtime_resume(struct device *device)
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
intel_hpd_init(dev_priv);
- intel_enable_gt_powersave(dev);
-
enable_rpm_wakeref_asserts(dev_priv);
if (ret)
@@ -1651,7 +2489,7 @@ static int intel_runtime_resume(struct device *device)
return ret;
}
-static const struct dev_pm_ops i915_pm_ops = {
+const struct dev_pm_ops i915_pm_ops = {
/*
* S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
* PMSG_RESUME]
@@ -1676,14 +2514,14 @@ static const struct dev_pm_ops i915_pm_ops = {
* @restore, @restore_early : called after rebooting and restoring the
* hibernation image [PMSG_RESTORE]
*/
- .freeze = i915_pm_suspend,
- .freeze_late = i915_pm_suspend_late,
- .thaw_early = i915_pm_resume_early,
- .thaw = i915_pm_resume,
+ .freeze = i915_pm_freeze,
+ .freeze_late = i915_pm_freeze_late,
+ .thaw_early = i915_pm_thaw_early,
+ .thaw = i915_pm_thaw,
.poweroff = i915_pm_suspend,
.poweroff_late = i915_pm_poweroff_late,
- .restore_early = i915_pm_resume_early,
- .restore = i915_pm_resume,
+ .restore_early = i915_pm_restore_early,
+ .restore = i915_pm_restore,
/* S0ix (via runtime suspend) event handlers */
.runtime_suspend = intel_runtime_suspend,
@@ -1710,6 +2548,68 @@ static const struct file_operations i915_driver_fops = {
.llseek = noop_llseek,
};
+static int
+i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ return -ENODEV;
+}
+
+static const struct drm_ioctl_desc i915_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
+ DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
+};
+
static struct drm_driver driver = {
/* Don't use MTRRs here; the Xserver or userspace app should
* deal with them for Intel hardware.
@@ -1717,18 +2617,12 @@ static struct drm_driver driver = {
.driver_features =
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
DRIVER_RENDER | DRIVER_MODESET,
- .load = i915_driver_load,
- .unload = i915_driver_unload,
.open = i915_driver_open,
.lastclose = i915_driver_lastclose,
.preclose = i915_driver_preclose,
.postclose = i915_driver_postclose,
.set_busid = drm_pci_set_busid,
-#if defined(CONFIG_DEBUG_FS)
- .debugfs_init = i915_debugfs_init,
- .debugfs_cleanup = i915_debugfs_cleanup,
-#endif
.gem_free_object = i915_gem_free_object,
.gem_vm_ops = &i915_gem_vm_ops,
@@ -1741,6 +2635,7 @@ static struct drm_driver driver = {
.dumb_map_offset = i915_gem_mmap_gtt,
.dumb_destroy = drm_gem_dumb_destroy,
.ioctls = i915_ioctls,
+ .num_ioctls = ARRAY_SIZE(i915_ioctls),
.fops = &i915_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -1749,56 +2644,3 @@ static struct drm_driver driver = {
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
};
-
-static struct pci_driver i915_pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- .probe = i915_pci_probe,
- .remove = i915_pci_remove,
- .driver.pm = &i915_pm_ops,
-};
-
-static int __init i915_init(void)
-{
- driver.num_ioctls = i915_max_ioctl;
-
- /*
- * Enable KMS by default, unless explicitly overriden by
- * either the i915.modeset prarameter or by the
- * vga_text_mode_force boot option.
- */
-
- if (i915.modeset == 0)
- driver.driver_features &= ~DRIVER_MODESET;
-
- if (vgacon_text_force() && i915.modeset == -1)
- driver.driver_features &= ~DRIVER_MODESET;
-
- if (!(driver.driver_features & DRIVER_MODESET)) {
- /* Silently fail loading to not upset userspace. */
- DRM_DEBUG_DRIVER("KMS and UMS disabled.\n");
- return 0;
- }
-
- if (i915.nuclear_pageflip)
- driver.driver_features |= DRIVER_ATOMIC;
-
- return drm_pci_init(&driver, &i915_pci_driver);
-}
-
-static void __exit i915_exit(void)
-{
- if (!(driver.driver_features & DRIVER_MODESET))
- return; /* Never loaded a driver. */
-
- drm_pci_exit(&driver, &i915_pci_driver);
-}
-
-module_init(i915_init);
-module_exit(i915_exit);
-
-MODULE_AUTHOR("Tungsten Graphics, Inc.");
-MODULE_AUTHOR("Intel Corporation");
-
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 74cb400c8..f68c78918 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -47,6 +47,7 @@
#include <drm/intel-gtt.h>
#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
#include <drm/drm_gem.h>
+#include <drm/drm_auth.h>
#include "i915_params.h"
#include "i915_reg.h"
@@ -61,12 +62,14 @@
#include "i915_gem_gtt.h"
#include "i915_gem_render_state.h"
+#include "intel_gvt.h"
+
/* General customization:
*/
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20160425"
+#define DRIVER_DATE "20160711"
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
@@ -320,21 +323,36 @@ struct i915_hotplug {
for_each_if ((__ports_mask) & (1 << (__port)))
#define for_each_crtc(dev, crtc) \
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
#define for_each_intel_plane(dev, intel_plane) \
list_for_each_entry(intel_plane, \
- &dev->mode_config.plane_list, \
+ &(dev)->mode_config.plane_list, \
base.head)
+#define for_each_intel_plane_mask(dev, intel_plane, plane_mask) \
+ list_for_each_entry(intel_plane, \
+ &(dev)->mode_config.plane_list, \
+ base.head) \
+ for_each_if ((plane_mask) & \
+ (1 << drm_plane_index(&intel_plane->base)))
+
#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \
list_for_each_entry(intel_plane, \
&(dev)->mode_config.plane_list, \
base.head) \
for_each_if ((intel_plane)->pipe == (intel_crtc)->pipe)
-#define for_each_intel_crtc(dev, intel_crtc) \
- list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
+#define for_each_intel_crtc(dev, intel_crtc) \
+ list_for_each_entry(intel_crtc, \
+ &(dev)->mode_config.crtc_list, \
+ base.head)
+
+#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \
+ list_for_each_entry(intel_crtc, \
+ &(dev)->mode_config.crtc_list, \
+ base.head) \
+ for_each_if ((crtc_mask) & (1 << drm_crtc_index(&intel_crtc->base)))
#define for_each_intel_encoder(dev, intel_encoder) \
list_for_each_entry(intel_encoder, \
@@ -343,7 +361,7 @@ struct i915_hotplug {
#define for_each_intel_connector(dev, intel_connector) \
list_for_each_entry(intel_connector, \
- &dev->mode_config.connector_list, \
+ &(dev)->mode_config.connector_list, \
base.head)
#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
@@ -465,6 +483,7 @@ struct drm_i915_error_state {
struct timeval time;
char error_msg[128];
+ bool simulated;
int iommu;
u32 reset_count;
u32 suspend_count;
@@ -496,6 +515,7 @@ struct drm_i915_error_state {
bool valid;
/* Software tracked state */
bool waiting;
+ int num_waiters;
int hangcheck_score;
enum intel_ring_hangcheck_action hangcheck_action;
int num_requests;
@@ -541,6 +561,12 @@ struct drm_i915_error_state {
u32 tail;
} *requests;
+ struct drm_i915_error_waiter {
+ char comm[TASK_COMM_LEN];
+ pid_t pid;
+ u32 seqno;
+ } *waiters;
+
struct {
u32 gfx_mode;
union {
@@ -591,6 +617,7 @@ struct drm_i915_display_funcs {
struct intel_crtc_state *newstate);
void (*initial_watermarks)(struct intel_crtc_state *cstate);
void (*optimize_watermarks)(struct intel_crtc_state *cstate);
+ int (*compute_global_watermarks)(struct drm_atomic_state *state);
void (*update_wm)(struct drm_crtc *crtc);
int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
@@ -615,7 +642,7 @@ struct drm_i915_display_funcs {
struct drm_i915_gem_object *obj,
struct drm_i915_gem_request *req,
uint32_t flags);
- void (*hpd_irq_setup)(struct drm_device *dev);
+ void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
/* clock updates for mode set */
/* cursor updates */
/* render clock increase/decrease */
@@ -738,6 +765,7 @@ struct intel_csr {
func(is_valleyview) sep \
func(is_cherryview) sep \
func(is_haswell) sep \
+ func(is_broadwell) sep \
func(is_skylake) sep \
func(is_broxton) sep \
func(is_kabylake) sep \
@@ -752,7 +780,8 @@ struct intel_csr {
func(has_llc) sep \
func(has_snoop) sep \
func(has_ddi) sep \
- func(has_fpga_dbg)
+ func(has_fpga_dbg) sep \
+ func(has_pooled_eu)
#define DEFINE_FLAG(name) u8 name:1
#define SEP_SEMICOLON ;
@@ -760,9 +789,10 @@ struct intel_csr {
struct intel_device_info {
u32 display_mmio_offset;
u16 device_id;
- u8 num_pipes:3;
+ u8 num_pipes;
u8 num_sprites[I915_MAX_PIPES];
u8 gen;
+ u16 gen_mask;
u8 ring_mask; /* Rings supported by the HW */
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
/* Register offsets for the various display pipes and transcoders */
@@ -777,6 +807,7 @@ struct intel_device_info {
u8 subslice_per_slice;
u8 eu_total;
u8 eu_per_subslice;
+ u8 min_eu_in_pool;
/* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
u8 subslice_7eu[3];
u8 has_slice_pg:1;
@@ -824,9 +855,8 @@ struct i915_ctx_hang_stats {
/* This must match up with the value previously used for execbuf2.rsvd1. */
#define DEFAULT_CONTEXT_HANDLE 0
-#define CONTEXT_NO_ZEROMAP (1<<0)
/**
- * struct intel_context - as the name implies, represents a context.
+ * struct i915_gem_context - as the name implies, represents a context.
* @ref: reference count.
* @user_handle: userspace tracking identity for this context.
* @remap_slice: l3 row remapping information.
@@ -844,33 +874,41 @@ struct i915_ctx_hang_stats {
* Contexts are memory images used by the hardware to store copies of their
* internal state.
*/
-struct intel_context {
+struct i915_gem_context {
struct kref ref;
- int user_handle;
- uint8_t remap_slice;
struct drm_i915_private *i915;
- int flags;
struct drm_i915_file_private *file_priv;
- struct i915_ctx_hang_stats hang_stats;
struct i915_hw_ppgtt *ppgtt;
- /* Legacy ring buffer submission */
- struct {
- struct drm_i915_gem_object *rcs_state;
- bool initialized;
- } legacy_hw_ctx;
+ struct i915_ctx_hang_stats hang_stats;
- /* Execlists */
- struct {
+ unsigned long flags;
+#define CONTEXT_NO_ZEROMAP BIT(0)
+#define CONTEXT_NO_ERROR_CAPTURE BIT(1)
+
+ /* Unique identifier for this context, used by the hw for tracking */
+ unsigned int hw_id;
+ u32 user_handle;
+
+ u32 ggtt_alignment;
+
+ struct intel_context {
struct drm_i915_gem_object *state;
struct intel_ringbuffer *ringbuf;
- int pin_count;
struct i915_vma *lrc_vma;
- u64 lrc_desc;
uint32_t *lrc_reg_state;
+ u64 lrc_desc;
+ int pin_count;
+ bool initialised;
} engine[I915_NUM_ENGINES];
+ u32 ring_size;
+ u32 desc_template;
+ struct atomic_notifier_head status_notifier;
+ bool execlists_force_single_submission;
struct list_head link;
+
+ u8 remap_slice;
};
enum fb_op_origin {
@@ -1119,6 +1157,8 @@ struct intel_gen6_power_mgmt {
bool interrupts_enabled;
u32 pm_iir;
+ u32 pm_intr_keep;
+
/* Frequencies are stored in potentially platform dependent multiples.
* In other words, *_freq needs to be multiplied by X to be interesting.
* Soft limits are those which are used for the dynamic reclocking done
@@ -1286,37 +1326,11 @@ struct i915_gem_mm {
struct list_head fence_list;
/**
- * We leave the user IRQ off as much as possible,
- * but this means that requests will finish and never
- * be retired once the system goes idle. Set a timer to
- * fire periodically while the ring is running. When it
- * fires, go retire requests.
- */
- struct delayed_work retire_work;
-
- /**
- * When we detect an idle GPU, we want to turn on
- * powersaving features. So once we see that there
- * are no more requests outstanding and no more
- * arrive within a small period of time, we fire
- * off the idle_work.
- */
- struct delayed_work idle_work;
-
- /**
* Are we in a non-interruptible section of code like
* modesetting?
*/
bool interruptible;
- /**
- * Is the GPU currently considered idle, or busy executing userspace
- * requests? Whilst idle, we attempt to power down the hardware and
- * display clocks. In order to reduce the effect on performance, there
- * is a slight delay before we do so.
- */
- bool busy;
-
/* the indicator for dispatch video commands on two BSD rings */
unsigned int bsd_ring_dispatch_index;
@@ -1353,7 +1367,6 @@ struct i915_gpu_error {
/* Hang gpu twice in this window and your context gets banned */
#define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
- struct workqueue_struct *hangcheck_wq;
struct delayed_work hangcheck_work;
/* For reset and error_state handling. */
@@ -1390,20 +1403,19 @@ struct i915_gpu_error {
#define I915_WEDGED (1 << 31)
/**
+ * Waitqueue to signal when a hang is detected. Used to for waiters
+ * to release the struct_mutex for the reset to procede.
+ */
+ wait_queue_head_t wait_queue;
+
+ /**
* Waitqueue to signal when the reset has completed. Used by clients
* that wait for dev_priv->mm.wedged to settle.
*/
wait_queue_head_t reset_queue;
- /* Userspace knobs for gpu hang simulation;
- * combines both a ring mask, and extra flags
- */
- u32 stop_rings;
-#define I915_STOP_RING_ALLOW_BAN (1 << 31)
-#define I915_STOP_RING_ALLOW_WARN (1 << 30)
-
/* For missed irq/seqno simulation. */
- unsigned int test_irq_rings;
+ unsigned long test_irq_rings;
};
enum modeset_restore {
@@ -1492,6 +1504,7 @@ struct intel_vbt_data {
bool present;
bool active_low_pwm;
u8 min_brightness; /* min_brightness/255 of max */
+ enum intel_backlight_type type;
} backlight;
/* MIPI DSI */
@@ -1584,7 +1597,7 @@ struct skl_ddb_allocation {
};
struct skl_wm_values {
- bool dirty[I915_MAX_PIPES];
+ unsigned dirty_pipes;
struct skl_ddb_allocation ddb;
uint32_t wm_linetime[I915_MAX_PIPES];
uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8];
@@ -1701,7 +1714,7 @@ struct i915_execbuffer_params {
uint64_t batch_obj_vm_offset;
struct intel_engine_cs *engine;
struct drm_i915_gem_object *batch_obj;
- struct intel_context *ctx;
+ struct i915_gem_context *ctx;
struct drm_i915_gem_request *request;
};
@@ -1713,7 +1726,8 @@ struct intel_wm_config {
};
struct drm_i915_private {
- struct drm_device *dev;
+ struct drm_device drm;
+
struct kmem_cache *objects;
struct kmem_cache *vmas;
struct kmem_cache *requests;
@@ -1728,6 +1742,8 @@ struct drm_i915_private {
struct i915_virtual_gpu vgpu;
+ struct intel_gvt gvt;
+
struct intel_guc guc;
struct intel_csr csr;
@@ -1751,6 +1767,7 @@ struct drm_i915_private {
wait_queue_head_t gmbus_wait_queue;
struct pci_dev *bridge_dev;
+ struct i915_gem_context *kernel_context;
struct intel_engine_cs engine[I915_NUM_ENGINES];
struct drm_i915_gem_object *semaphore_obj;
uint32_t last_seqno, next_seqno;
@@ -1806,13 +1823,17 @@ struct drm_i915_private {
int num_fence_regs; /* 8 on pre-965, 16 otherwise */
unsigned int fsb_freq, mem_freq, is_ddr3;
- unsigned int skl_boot_cdclk;
+ unsigned int skl_preferred_vco_freq;
unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq;
unsigned int max_dotclk_freq;
unsigned int rawclk_freq;
unsigned int hpll_freq;
unsigned int czclk_freq;
+ struct {
+ unsigned int vco, ref;
+ } cdclk_pll;
+
/**
* wq - Driver workqueue for GEM.
*
@@ -1834,6 +1855,7 @@ struct drm_i915_private {
enum modeset_restore modeset_restore;
struct mutex modeset_restore_lock;
struct drm_atomic_state *modeset_restore_state;
+ struct drm_modeset_acquire_ctx reset_ctx;
struct list_head vm_list; /* Global list of all address spaces */
struct i915_ggtt ggtt; /* VM representing the global address space */
@@ -1842,6 +1864,13 @@ struct drm_i915_private {
DECLARE_HASHTABLE(mm_structs, 7);
struct mutex mm_lock;
+ /* The hw wants to have a stable context identifier for the lifetime
+ * of the context (for OA, PASID, faults, etc). This is limited
+ * in execlists to 21 bits.
+ */
+ struct ida context_hw_ida;
+#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
+
/* Kernel Modesetting */
struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
@@ -1935,7 +1964,12 @@ struct drm_i915_private {
struct i915_suspend_saved_registers regfile;
struct vlv_s0ix_state vlv_s0ix_state;
- bool skl_sagv_enabled;
+ enum {
+ I915_SKL_SAGV_UNKNOWN = 0,
+ I915_SKL_SAGV_DISABLED,
+ I915_SKL_SAGV_ENABLED,
+ I915_SKL_SAGV_NOT_CONTROLLED
+ } skl_sagv_status;
struct {
/*
@@ -1956,9 +1990,6 @@ struct drm_i915_private {
*/
uint16_t skl_latency[8];
- /* Committed wm config */
- struct intel_wm_config config;
-
/*
* The skl_wm_values structure is a bit too big for stack
* allocation, so we keep the staging struct where we store
@@ -1981,6 +2012,13 @@ struct drm_i915_private {
* cstate->wm.need_postvbl_update.
*/
struct mutex wm_mutex;
+
+ /*
+ * Set during HW readout of watermarks/DDB. Some platforms
+ * need to know when we're still using BIOS-provided values
+ * (which we don't fully trust).
+ */
+ bool distrust_bios_wm;
} wm;
struct i915_runtime_pm pm;
@@ -1993,9 +2031,35 @@ struct drm_i915_private {
int (*init_engines)(struct drm_device *dev);
void (*cleanup_engine)(struct intel_engine_cs *engine);
void (*stop_engine)(struct intel_engine_cs *engine);
- } gt;
- struct intel_context *kernel_context;
+ /**
+ * Is the GPU currently considered idle, or busy executing
+ * userspace requests? Whilst idle, we allow runtime power
+ * management to power down the hardware and display clocks.
+ * In order to reduce the effect on performance, there
+ * is a slight delay before we do so.
+ */
+ unsigned int active_engines;
+ bool awake;
+
+ /**
+ * We leave the user IRQ off as much as possible,
+ * but this means that requests will finish and never
+ * be retired once the system goes idle. Set a timer to
+ * fire periodically while the ring is running. When it
+ * fires, go retire requests.
+ */
+ struct delayed_work retire_work;
+
+ /**
+ * When we detect an idle GPU, we want to turn on
+ * powersaving features. So once we see that there
+ * are no more requests outstanding and no more
+ * arrive within a small period of time, we fire
+ * off the idle_work.
+ */
+ struct delayed_work idle_work;
+ } gt;
/* perform PHY state sanity checks? */
bool chv_phy_assert[2];
@@ -2010,7 +2074,7 @@ struct drm_i915_private {
static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
{
- return dev->dev_private;
+ return container_of(dev, struct drm_i915_private, drm);
}
static inline struct drm_i915_private *dev_to_i915(struct device *dev)
@@ -2181,6 +2245,7 @@ struct drm_i915_gem_object {
unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
+ unsigned int has_wc_mmap;
unsigned int pin_display;
struct sg_table *pages;
@@ -2233,9 +2298,81 @@ struct drm_i915_gem_object {
};
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
-void i915_gem_track_fb(struct drm_i915_gem_object *old,
- struct drm_i915_gem_object *new,
- unsigned frontbuffer_bits);
+static inline bool
+i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
+{
+ return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
+}
+
+/*
+ * Optimised SGL iterator for GEM objects
+ */
+static __always_inline struct sgt_iter {
+ struct scatterlist *sgp;
+ union {
+ unsigned long pfn;
+ dma_addr_t dma;
+ };
+ unsigned int curr;
+ unsigned int max;
+} __sgt_iter(struct scatterlist *sgl, bool dma) {
+ struct sgt_iter s = { .sgp = sgl };
+
+ if (s.sgp) {
+ s.max = s.curr = s.sgp->offset;
+ s.max += s.sgp->length;
+ if (dma)
+ s.dma = sg_dma_address(s.sgp);
+ else
+ s.pfn = page_to_pfn(sg_page(s.sgp));
+ }
+
+ return s;
+}
+
+/**
+ * __sg_next - return the next scatterlist entry in a list
+ * @sg: The current sg entry
+ *
+ * Description:
+ * If the entry is the last, return NULL; otherwise, step to the next
+ * element in the array (@sg@+1). If that's a chain pointer, follow it;
+ * otherwise just return the pointer to the current element.
+ **/
+static inline struct scatterlist *__sg_next(struct scatterlist *sg)
+{
+#ifdef CONFIG_DEBUG_SG
+ BUG_ON(sg->sg_magic != SG_MAGIC);
+#endif
+ return sg_is_last(sg) ? NULL :
+ likely(!sg_is_chain(++sg)) ? sg :
+ sg_chain_ptr(sg);
+}
+
+/**
+ * for_each_sgt_dma - iterate over the DMA addresses of the given sg_table
+ * @__dmap: DMA address (output)
+ * @__iter: 'struct sgt_iter' (iterator state, internal)
+ * @__sgt: sg_table to iterate over (input)
+ */
+#define for_each_sgt_dma(__dmap, __iter, __sgt) \
+ for ((__iter) = __sgt_iter((__sgt)->sgl, true); \
+ ((__dmap) = (__iter).dma + (__iter).curr); \
+ (((__iter).curr += PAGE_SIZE) < (__iter).max) || \
+ ((__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0))
+
+/**
+ * for_each_sgt_page - iterate over the pages of the given sg_table
+ * @__pp: page pointer (output)
+ * @__iter: 'struct sgt_iter' (iterator state, internal)
+ * @__sgt: sg_table to iterate over (input)
+ */
+#define for_each_sgt_page(__pp, __iter, __sgt) \
+ for ((__iter) = __sgt_iter((__sgt)->sgl, false); \
+ ((__pp) = (__iter).pfn == 0 ? NULL : \
+ pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \
+ (((__iter).curr += PAGE_SIZE) < (__iter).max) || \
+ ((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0))
/**
* Request queue structure.
@@ -2257,7 +2394,7 @@ struct drm_i915_gem_request {
/** On Which ring this request was generated */
struct drm_i915_private *i915;
struct intel_engine_cs *engine;
- unsigned reset_counter;
+ struct intel_signal_node signaling;
/** GEM sequence number associated with the previous request,
* when the HWS breadcrumb is equal to this the GPU is processing
@@ -2284,6 +2421,9 @@ struct drm_i915_gem_request {
/** Position in the ringbuffer of the end of the whole request */
u32 tail;
+ /** Preallocate space in the ringbuffer for the emitting the request */
+ u32 reserved_space;
+
/**
* Context and ring buffer related to this request
* Contexts are refcounted, so when this request is associated with a
@@ -2294,9 +2434,20 @@ struct drm_i915_gem_request {
* i915_gem_request_free() will then decrement the refcount on the
* context.
*/
- struct intel_context *ctx;
+ struct i915_gem_context *ctx;
struct intel_ringbuffer *ringbuf;
+ /**
+ * Context related to the previous request.
+ * As the contexts are accessed by the hardware until the switch is
+ * completed to a new context, the hardware may still be writing
+ * to the context object after the breadcrumb is visible. We must
+ * not unpin/unbind/prune that object whilst still active and so
+ * we keep the previous context pinned until the following (this)
+ * request is retired.
+ */
+ struct i915_gem_context *previous_context;
+
/** Batch buffer related to this request if any (used for
error state dump only) */
struct drm_i915_gem_object *batch_obj;
@@ -2333,11 +2484,13 @@ struct drm_i915_gem_request {
/** Execlists no. of times this request has been sent to the ELSP */
int elsp_submitted;
+ /** Execlists context hardware id. */
+ unsigned ctx_hw_id;
};
struct drm_i915_gem_request * __must_check
i915_gem_request_alloc(struct intel_engine_cs *engine,
- struct intel_context *ctx);
+ struct i915_gem_context *ctx);
void i915_gem_request_free(struct kref *req_ref);
int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
struct drm_file *file);
@@ -2365,23 +2518,9 @@ i915_gem_request_reference(struct drm_i915_gem_request *req)
static inline void
i915_gem_request_unreference(struct drm_i915_gem_request *req)
{
- WARN_ON(!mutex_is_locked(&req->engine->dev->struct_mutex));
kref_put(&req->ref, i915_gem_request_free);
}
-static inline void
-i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req)
-{
- struct drm_device *dev;
-
- if (!req)
- return;
-
- dev = req->engine->dev;
- if (kref_put_mutex(&req->ref, i915_gem_request_free, &dev->struct_mutex))
- mutex_unlock(&dev->struct_mutex);
-}
-
static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
struct drm_i915_gem_request *src)
{
@@ -2509,9 +2648,29 @@ struct drm_i915_cmd_table {
#define INTEL_INFO(p) (&__I915__(p)->info)
#define INTEL_GEN(p) (INTEL_INFO(p)->gen)
#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
-#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision)
#define REVID_FOREVER 0xff
+#define INTEL_REVID(p) (__I915__(p)->drm.pdev->revision)
+
+#define GEN_FOREVER (0)
+/*
+ * Returns true if Gen is in inclusive range [Start, End].
+ *
+ * Use GEN_FOREVER for unbound start and or end.
+ */
+#define IS_GEN(p, s, e) ({ \
+ unsigned int __s = (s), __e = (e); \
+ BUILD_BUG_ON(!__builtin_constant_p(s)); \
+ BUILD_BUG_ON(!__builtin_constant_p(e)); \
+ if ((__s) != GEN_FOREVER) \
+ __s = (s) - 1; \
+ if ((__e) == GEN_FOREVER) \
+ __e = BITS_PER_LONG - 1; \
+ else \
+ __e = (e) - 1; \
+ !!(INTEL_INFO(p)->gen_mask & GENMASK((__e), (__s))); \
+})
+
/*
* Return true if revision is in range [since,until] inclusive.
*
@@ -2544,7 +2703,7 @@ struct drm_i915_cmd_table {
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_cherryview)
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
-#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_cherryview && IS_GEN8(dev))
+#define IS_BROADWELL(dev) (INTEL_INFO(dev)->is_broadwell)
#define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake)
#define IS_BROXTON(dev) (INTEL_INFO(dev)->is_broxton)
#define IS_KABYLAKE(dev) (INTEL_INFO(dev)->is_kabylake)
@@ -2623,29 +2782,34 @@ struct drm_i915_cmd_table {
* have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
* chips, etc.).
*/
-#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
-#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
-#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
-#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
-#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
-#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
-#define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8)
-#define IS_GEN9(dev) (INTEL_INFO(dev)->gen == 9)
-
-#define RENDER_RING (1<<RCS)
-#define BSD_RING (1<<VCS)
-#define BLT_RING (1<<BCS)
-#define VEBOX_RING (1<<VECS)
-#define BSD2_RING (1<<VCS2)
-#define ALL_ENGINES (~0)
-
-#define HAS_BSD(dev) (INTEL_INFO(dev)->ring_mask & BSD_RING)
-#define HAS_BSD2(dev) (INTEL_INFO(dev)->ring_mask & BSD2_RING)
-#define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING)
-#define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
+#define IS_GEN2(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(1)))
+#define IS_GEN3(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(2)))
+#define IS_GEN4(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(3)))
+#define IS_GEN5(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(4)))
+#define IS_GEN6(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(5)))
+#define IS_GEN7(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(6)))
+#define IS_GEN8(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(7)))
+#define IS_GEN9(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(8)))
+
+#define ENGINE_MASK(id) BIT(id)
+#define RENDER_RING ENGINE_MASK(RCS)
+#define BSD_RING ENGINE_MASK(VCS)
+#define BLT_RING ENGINE_MASK(BCS)
+#define VEBOX_RING ENGINE_MASK(VECS)
+#define BSD2_RING ENGINE_MASK(VCS2)
+#define ALL_ENGINES (~0)
+
+#define HAS_ENGINE(dev_priv, id) \
+ (!!(INTEL_INFO(dev_priv)->ring_mask & ENGINE_MASK(id)))
+
+#define HAS_BSD(dev_priv) HAS_ENGINE(dev_priv, VCS)
+#define HAS_BSD2(dev_priv) HAS_ENGINE(dev_priv, VCS2)
+#define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS)
+#define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS)
+
#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
#define HAS_SNOOP(dev) (INTEL_INFO(dev)->has_snoop)
-#define HAS_EDRAM(dev) (__I915__(dev)->edram_cap & EDRAM_ENABLED)
+#define HAS_EDRAM(dev) (!!(__I915__(dev)->edram_cap & EDRAM_ENABLED))
#define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
HAS_EDRAM(dev))
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
@@ -2663,9 +2827,10 @@ struct drm_i915_cmd_table {
#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
/* WaRsDisableCoarsePowerGating:skl,bxt */
-#define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \
- IS_SKL_GT3(dev) || \
- IS_SKL_GT4(dev))
+#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
+ (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) || \
+ IS_SKL_GT3(dev_priv) || \
+ IS_SKL_GT4(dev_priv))
/*
* dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
@@ -2703,12 +2868,18 @@ struct drm_i915_cmd_table {
IS_CHERRYVIEW(dev) || IS_SKYLAKE(dev) || \
IS_KABYLAKE(dev) || IS_BROXTON(dev))
#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6)
-#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
+#define HAS_RC6p(dev) (IS_GEN6(dev) || IS_IVYBRIDGE(dev))
#define HAS_CSR(dev) (IS_GEN9(dev))
-#define HAS_GUC_UCODE(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev))
-#define HAS_GUC_SCHED(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev))
+/*
+ * For now, anything with a GuC requires uCode loading, and then supports
+ * command submission once loaded. But these are logically independent
+ * properties, so we have separate macros to test them.
+ */
+#define HAS_GUC(dev) (IS_GEN9(dev))
+#define HAS_GUC_UCODE(dev) (HAS_GUC(dev))
+#define HAS_GUC_SCHED(dev) (HAS_GUC(dev))
#define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \
INTEL_INFO(dev)->gen >= 8)
@@ -2717,6 +2888,8 @@ struct drm_i915_cmd_table {
!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && \
!IS_BROXTON(dev))
+#define HAS_POOLED_EU(dev) (INTEL_INFO(dev)->has_pooled_eu)
+
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
@@ -2753,13 +2926,22 @@ struct drm_i915_cmd_table {
#include "i915_trace.h"
-extern const struct drm_ioctl_desc i915_ioctls[];
-extern int i915_max_ioctl;
+static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
+{
+#ifdef CONFIG_INTEL_IOMMU
+ if (INTEL_GEN(dev_priv) >= 6 && intel_iommu_gfx_mapped)
+ return true;
+#endif
+ return false;
+}
extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
extern int i915_resume_switcheroo(struct drm_device *dev);
-/* i915_dma.c */
+int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
+ int enable_ppgtt);
+
+/* i915_drv.c */
void __printf(3, 4)
__i915_printk(struct drm_i915_private *dev_priv, const char *level,
const char *fmt, ...);
@@ -2767,21 +2949,13 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
#define i915_report_error(dev_priv, fmt, ...) \
__i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__)
-extern int i915_driver_load(struct drm_device *, unsigned long flags);
-extern int i915_driver_unload(struct drm_device *);
-extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
-extern void i915_driver_lastclose(struct drm_device * dev);
-extern void i915_driver_preclose(struct drm_device *dev,
- struct drm_file *file);
-extern void i915_driver_postclose(struct drm_device *dev,
- struct drm_file *file);
#ifdef CONFIG_COMPAT
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
#endif
-extern int intel_gpu_reset(struct drm_device *dev, u32 engine_mask);
-extern bool intel_has_gpu_reset(struct drm_device *dev);
-extern int i915_reset(struct drm_device *dev);
+extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
+extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
+extern int i915_reset(struct drm_i915_private *dev_priv);
extern int intel_guc_reset(struct drm_i915_private *dev_priv);
extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
@@ -2791,7 +2965,8 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
/* intel_hotplug.c */
-void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask);
+void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
+ u32 pin_mask, u32 long_mask);
void intel_hpd_init(struct drm_i915_private *dev_priv);
void intel_hpd_init_work(struct drm_i915_private *dev_priv);
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
@@ -2800,23 +2975,41 @@ bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
/* i915_irq.c */
-void i915_queue_hangcheck(struct drm_device *dev);
+static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
+{
+ unsigned long delay;
+
+ if (unlikely(!i915.enable_hangcheck))
+ return;
+
+ /* Don't continually defer the hangcheck so that it is always run at
+ * least once after work has been scheduled on any ring. Otherwise,
+ * we will ignore a hung ring if a second ring is kept busy.
+ */
+
+ delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES);
+ queue_delayed_work(system_long_wq,
+ &dev_priv->gpu_error.hangcheck_work, delay);
+}
+
__printf(3, 4)
-void i915_handle_error(struct drm_device *dev, u32 engine_mask,
+void i915_handle_error(struct drm_i915_private *dev_priv,
+ u32 engine_mask,
const char *fmt, ...);
extern void intel_irq_init(struct drm_i915_private *dev_priv);
int intel_irq_install(struct drm_i915_private *dev_priv);
void intel_irq_uninstall(struct drm_i915_private *dev_priv);
-extern void intel_uncore_sanitize(struct drm_device *dev);
-extern void intel_uncore_early_sanitize(struct drm_device *dev,
+extern void intel_uncore_sanitize(struct drm_i915_private *dev_priv);
+extern void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
bool restore_forcewake);
-extern void intel_uncore_init(struct drm_device *dev);
+extern void intel_uncore_init(struct drm_i915_private *dev_priv);
extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv);
extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv);
-extern void intel_uncore_fini(struct drm_device *dev);
-extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore);
+extern void intel_uncore_fini(struct drm_i915_private *dev_priv);
+extern void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
+ bool restore);
const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
enum forcewake_domains domains);
@@ -2832,9 +3025,26 @@ void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
-static inline bool intel_vgpu_active(struct drm_device *dev)
+
+int intel_wait_for_register(struct drm_i915_private *dev_priv,
+ i915_reg_t reg,
+ const u32 mask,
+ const u32 value,
+ const unsigned long timeout_ms);
+int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
+ i915_reg_t reg,
+ const u32 mask,
+ const u32 value,
+ const unsigned long timeout_ms);
+
+static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
+{
+ return dev_priv->gvt.initialized;
+}
+
+static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
{
- return to_i915(dev)->vgpu.active;
+ return dev_priv->vgpu.active;
}
void
@@ -2891,7 +3101,6 @@ ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
ibx_display_interrupt_update(dev_priv, bits, 0);
}
-
/* i915_gem.c */
int i915_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -2930,7 +3139,7 @@ int i915_gem_set_tiling(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_get_tiling(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int i915_gem_init_userptr(struct drm_device *dev);
+void i915_gem_init_userptr(struct drm_i915_private *dev_priv);
int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
@@ -2940,11 +3149,13 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
void i915_gem_load_init(struct drm_device *dev);
void i915_gem_load_cleanup(struct drm_device *dev);
void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
+int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
+
void *i915_gem_object_alloc(struct drm_device *dev);
void i915_gem_object_free(struct drm_i915_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops);
-struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
+struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
size_t size);
struct drm_i915_gem_object *i915_gem_object_create_from_data(
struct drm_device *dev, const void *data, size_t size);
@@ -2999,6 +3210,23 @@ static inline int __sg_page_count(struct scatterlist *sg)
struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n);
+static inline dma_addr_t
+i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, int n)
+{
+ if (n < obj->get_page.last) {
+ obj->get_page.sg = obj->pages->sgl;
+ obj->get_page.last = 0;
+ }
+
+ while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) {
+ obj->get_page.last += __sg_page_count(obj->get_page.sg++);
+ if (unlikely(sg_is_chain(obj->get_page.sg)))
+ obj->get_page.sg = sg_chain_ptr(obj->get_page.sg);
+ }
+
+ return sg_dma_address(obj->get_page.sg) + ((n - obj->get_page.last) << PAGE_SHIFT);
+}
+
static inline struct page *
i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
{
@@ -3075,6 +3303,11 @@ int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_mode_create_dumb *args);
int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
+
+void i915_gem_track_fb(struct drm_i915_gem_object *old,
+ struct drm_i915_gem_object *new,
+ unsigned frontbuffer_bits);
+
/**
* Returns true if seq1 is later than seq2.
*/
@@ -3084,31 +3317,34 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
return (int32_t)(seq1 - seq2) >= 0;
}
-static inline bool i915_gem_request_started(struct drm_i915_gem_request *req,
- bool lazy_coherency)
+static inline bool i915_gem_request_started(const struct drm_i915_gem_request *req)
{
- if (!lazy_coherency && req->engine->irq_seqno_barrier)
- req->engine->irq_seqno_barrier(req->engine);
- return i915_seqno_passed(req->engine->get_seqno(req->engine),
+ return i915_seqno_passed(intel_engine_get_seqno(req->engine),
req->previous_seqno);
}
-static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
- bool lazy_coherency)
+static inline bool i915_gem_request_completed(const struct drm_i915_gem_request *req)
{
- if (!lazy_coherency && req->engine->irq_seqno_barrier)
- req->engine->irq_seqno_barrier(req->engine);
- return i915_seqno_passed(req->engine->get_seqno(req->engine),
+ return i915_seqno_passed(intel_engine_get_seqno(req->engine),
req->seqno);
}
-int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
+bool __i915_spin_request(const struct drm_i915_gem_request *request,
+ int state, unsigned long timeout_us);
+static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
+ int state, unsigned long timeout_us)
+{
+ return (i915_gem_request_started(request) &&
+ __i915_spin_request(request, state, timeout_us));
+}
+
+int __must_check i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno);
int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
struct drm_i915_gem_request *
i915_gem_find_active_request(struct intel_engine_cs *engine);
-bool i915_gem_retire_requests(struct drm_device *dev);
+void i915_gem_retire_requests(struct drm_i915_private *dev_priv);
void i915_gem_retire_requests_ring(struct intel_engine_cs *engine);
static inline u32 i915_reset_counter(struct i915_gpu_error *error)
@@ -3151,27 +3387,14 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
return ((i915_reset_counter(error) & ~I915_WEDGED) + 1) / 2;
}
-static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv)
-{
- return dev_priv->gpu_error.stop_rings == 0 ||
- dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_BAN;
-}
-
-static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv)
-{
- return dev_priv->gpu_error.stop_rings == 0 ||
- dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_WARN;
-}
-
void i915_gem_reset(struct drm_device *dev);
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
int __must_check i915_gem_init(struct drm_device *dev);
int i915_gem_init_engines(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
-int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice);
void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_cleanup_engines(struct drm_device *dev);
-int __must_check i915_gpu_idle(struct drm_device *dev);
+int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv);
int __must_check i915_gem_suspend(struct drm_device *dev);
void __i915_add_request(struct drm_i915_gem_request *req,
struct drm_i915_gem_object *batch_obj,
@@ -3236,8 +3459,6 @@ bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
struct i915_address_space *vm);
-unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
- struct i915_address_space *vm);
struct i915_vma *
i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm);
@@ -3272,14 +3493,8 @@ static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal);
}
-static inline unsigned long
-i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
-
- return i915_gem_obj_size(obj, &ggtt->base);
-}
+unsigned long
+i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj);
static inline int __must_check
i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
@@ -3293,12 +3508,6 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
alignment, flags | PIN_GLOBAL);
}
-static inline int
-i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
-{
- return i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
-}
-
void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view);
static inline void
@@ -3322,28 +3531,44 @@ void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
/* i915_gem_context.c */
int __must_check i915_gem_context_init(struct drm_device *dev);
+void i915_gem_context_lost(struct drm_i915_private *dev_priv);
void i915_gem_context_fini(struct drm_device *dev);
void i915_gem_context_reset(struct drm_device *dev);
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
-int i915_gem_context_enable(struct drm_i915_gem_request *req);
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
int i915_switch_context(struct drm_i915_gem_request *req);
-struct intel_context *
-i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
void i915_gem_context_free(struct kref *ctx_ref);
struct drm_i915_gem_object *
i915_gem_alloc_context_obj(struct drm_device *dev, size_t size);
-static inline void i915_gem_context_reference(struct intel_context *ctx)
+struct i915_gem_context *
+i915_gem_context_create_gvt(struct drm_device *dev);
+
+static inline struct i915_gem_context *
+i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
+{
+ struct i915_gem_context *ctx;
+
+ lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex);
+
+ ctx = idr_find(&file_priv->context_idr, id);
+ if (!ctx)
+ return ERR_PTR(-ENOENT);
+
+ return ctx;
+}
+
+static inline void i915_gem_context_reference(struct i915_gem_context *ctx)
{
kref_get(&ctx->ref);
}
-static inline void i915_gem_context_unreference(struct intel_context *ctx)
+static inline void i915_gem_context_unreference(struct i915_gem_context *ctx)
{
+ lockdep_assert_held(&ctx->i915->drm.struct_mutex);
kref_put(&ctx->ref, i915_gem_context_free);
}
-static inline bool i915_gem_context_is_default(const struct intel_context *c)
+static inline bool i915_gem_context_is_default(const struct i915_gem_context *c)
{
return c->user_handle == DEFAULT_CONTEXT_HANDLE;
}
@@ -3356,6 +3581,8 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev,
@@ -3370,9 +3597,10 @@ int __must_check i915_gem_evict_for_vma(struct i915_vma *target);
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
/* belongs in i915_gem_gtt.h */
-static inline void i915_gem_chipset_flush(struct drm_device *dev)
+static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
{
- if (INTEL_INFO(dev)->gen < 6)
+ wmb();
+ if (INTEL_GEN(dev_priv) < 6)
intel_gtt_chipset_flush();
}
@@ -3413,7 +3641,7 @@ void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv);
/* i915_gem_tiling.c */
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
obj->tiling_mode != I915_TILING_NONE;
@@ -3427,12 +3655,14 @@ int i915_verify_lists(struct drm_device *dev);
#endif
/* i915_debugfs.c */
-int i915_debugfs_init(struct drm_minor *minor);
-void i915_debugfs_cleanup(struct drm_minor *minor);
#ifdef CONFIG_DEBUG_FS
+int i915_debugfs_register(struct drm_i915_private *dev_priv);
+void i915_debugfs_unregister(struct drm_i915_private *dev_priv);
int i915_debugfs_connector_add(struct drm_connector *connector);
void intel_display_crc_init(struct drm_device *dev);
#else
+static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) {return 0;}
+static inline void i915_debugfs_unregister(struct drm_i915_private *dev_priv) {}
static inline int i915_debugfs_connector_add(struct drm_connector *connector)
{ return 0; }
static inline void intel_display_crc_init(struct drm_device *dev) {}
@@ -3451,18 +3681,19 @@ static inline void i915_error_state_buf_release(
{
kfree(eb->buf);
}
-void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
+void i915_capture_error_state(struct drm_i915_private *dev_priv,
+ u32 engine_mask,
const char *error_msg);
void i915_error_state_get(struct drm_device *dev,
struct i915_error_state_file_priv *error_priv);
void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
void i915_destroy_error_state(struct drm_device *dev);
-void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
+void i915_get_extra_instdone(struct drm_i915_private *dev_priv, uint32_t *instdone);
const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
/* i915_cmd_parser.c */
-int i915_cmd_parser_get_version(void);
+int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
int i915_cmd_parser_init_ring(struct intel_engine_cs *engine);
void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine);
bool i915_needs_cmd_parser(struct intel_engine_cs *engine);
@@ -3511,31 +3742,33 @@ bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
/* intel_opregion.c */
#ifdef CONFIG_ACPI
-extern int intel_opregion_setup(struct drm_device *dev);
-extern void intel_opregion_init(struct drm_device *dev);
-extern void intel_opregion_fini(struct drm_device *dev);
-extern void intel_opregion_asle_intr(struct drm_device *dev);
+extern int intel_opregion_setup(struct drm_i915_private *dev_priv);
+extern void intel_opregion_register(struct drm_i915_private *dev_priv);
+extern void intel_opregion_unregister(struct drm_i915_private *dev_priv);
+extern void intel_opregion_asle_intr(struct drm_i915_private *dev_priv);
extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
bool enable);
-extern int intel_opregion_notify_adapter(struct drm_device *dev,
+extern int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
pci_power_t state);
-extern int intel_opregion_get_panel_type(struct drm_device *dev);
+extern int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv);
#else
-static inline int intel_opregion_setup(struct drm_device *dev) { return 0; }
-static inline void intel_opregion_init(struct drm_device *dev) { return; }
-static inline void intel_opregion_fini(struct drm_device *dev) { return; }
-static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
+static inline int intel_opregion_setup(struct drm_i915_private *dev) { return 0; }
+static inline void intel_opregion_register(struct drm_i915_private *dev_priv) { }
+static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv) { }
+static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
+{
+}
static inline int
intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
{
return 0;
}
static inline int
-intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
+intel_opregion_notify_adapter(struct drm_i915_private *dev, pci_power_t state)
{
return 0;
}
-static inline int intel_opregion_get_panel_type(struct drm_device *dev)
+static inline int intel_opregion_get_panel_type(struct drm_i915_private *dev)
{
return -ENODEV;
}
@@ -3550,36 +3783,45 @@ static inline void intel_register_dsm_handler(void) { return; }
static inline void intel_unregister_dsm_handler(void) { return; }
#endif /* CONFIG_ACPI */
+/* intel_device_info.c */
+static inline struct intel_device_info *
+mkwrite_device_info(struct drm_i915_private *dev_priv)
+{
+ return (struct intel_device_info *)&dev_priv->info;
+}
+
+void intel_device_info_runtime_init(struct drm_i915_private *dev_priv);
+void intel_device_info_dump(struct drm_i915_private *dev_priv);
+
/* modesetting */
extern void intel_modeset_init_hw(struct drm_device *dev);
extern void intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
-extern void intel_connector_unregister(struct intel_connector *);
+extern int intel_connector_register(struct drm_connector *);
+extern void intel_connector_unregister(struct drm_connector *);
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
extern void intel_display_resume(struct drm_device *dev);
extern void i915_redisable_vga(struct drm_device *dev);
extern void i915_redisable_vga_power_on(struct drm_device *dev);
-extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
+extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
extern void intel_init_pch_refclk(struct drm_device *dev);
-extern void intel_set_rps(struct drm_device *dev, u8 val);
+extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
bool enable);
-extern void intel_detect_pch(struct drm_device *dev);
-extern int intel_enable_rc6(const struct drm_device *dev);
-extern bool i915_semaphore_is_enabled(struct drm_device *dev);
+extern bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv);
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
-int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file);
/* overlay */
-extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
+extern struct intel_overlay_error_state *
+intel_overlay_capture_error_state(struct drm_i915_private *dev_priv);
extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
struct intel_overlay_error_state *error);
-extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
+extern struct intel_display_error_state *
+intel_display_capture_error_state(struct drm_i915_private *dev_priv);
extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
struct drm_device *dev,
struct intel_display_error_state *error);
@@ -3608,6 +3850,24 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+/* intel_dpio_phy.c */
+void chv_set_phy_signal_level(struct intel_encoder *encoder,
+ u32 deemph_reg_value, u32 margin_reg_value,
+ bool uniq_trans_scale);
+void chv_data_lane_soft_reset(struct intel_encoder *encoder,
+ bool reset);
+void chv_phy_pre_pll_enable(struct intel_encoder *encoder);
+void chv_phy_pre_encoder_enable(struct intel_encoder *encoder);
+void chv_phy_release_cl2_override(struct intel_encoder *encoder);
+void chv_phy_post_pll_disable(struct intel_encoder *encoder);
+
+void vlv_set_phy_signal_level(struct intel_encoder *encoder,
+ u32 demph_reg_value, u32 preemph_reg_value,
+ u32 uniqtranscale_reg_value, u32 tx3_demph);
+void vlv_phy_pre_pll_enable(struct intel_encoder *encoder);
+void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder);
+void vlv_phy_reset_lanes(struct intel_encoder *encoder);
+
int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
@@ -3681,6 +3941,7 @@ __raw_write(64, q)
*/
#define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__))
#define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__))
+#define I915_WRITE64_FW(reg__, val__) __raw_i915_write64(dev_priv, (reg__), (val__))
#define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__)
/* "Broadcast RGB" property */
@@ -3744,12 +4005,80 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
schedule_timeout_uninterruptible(remaining_jiffies);
}
}
-
-static inline void i915_trace_irq_get(struct intel_engine_cs *engine,
- struct drm_i915_gem_request *req)
+static inline bool __i915_request_irq_complete(struct drm_i915_gem_request *req)
{
- if (engine->trace_irq_req == NULL && engine->irq_get(engine))
- i915_gem_request_assign(&engine->trace_irq_req, req);
+ struct intel_engine_cs *engine = req->engine;
+
+ /* Before we do the heavier coherent read of the seqno,
+ * check the value (hopefully) in the CPU cacheline.
+ */
+ if (i915_gem_request_completed(req))
+ return true;
+
+ /* Ensure our read of the seqno is coherent so that we
+ * do not "miss an interrupt" (i.e. if this is the last
+ * request and the seqno write from the GPU is not visible
+ * by the time the interrupt fires, we will see that the
+ * request is incomplete and go back to sleep awaiting
+ * another interrupt that will never come.)
+ *
+ * Strictly, we only need to do this once after an interrupt,
+ * but it is easier and safer to do it every time the waiter
+ * is woken.
+ */
+ if (engine->irq_seqno_barrier &&
+ READ_ONCE(engine->breadcrumbs.irq_seqno_bh) == current &&
+ cmpxchg_relaxed(&engine->breadcrumbs.irq_posted, 1, 0)) {
+ struct task_struct *tsk;
+
+ /* The ordering of irq_posted versus applying the barrier
+ * is crucial. The clearing of the current irq_posted must
+ * be visible before we perform the barrier operation,
+ * such that if a subsequent interrupt arrives, irq_posted
+ * is reasserted and our task rewoken (which causes us to
+ * do another __i915_request_irq_complete() immediately
+ * and reapply the barrier). Conversely, if the clear
+ * occurs after the barrier, then an interrupt that arrived
+ * whilst we waited on the barrier would not trigger a
+ * barrier on the next pass, and the read may not see the
+ * seqno update.
+ */
+ engine->irq_seqno_barrier(engine);
+
+ /* If we consume the irq, but we are no longer the bottom-half,
+ * the real bottom-half may not have serialised their own
+ * seqno check with the irq-barrier (i.e. may have inspected
+ * the seqno before we believe it coherent since they see
+ * irq_posted == false but we are still running).
+ */
+ rcu_read_lock();
+ tsk = READ_ONCE(engine->breadcrumbs.irq_seqno_bh);
+ if (tsk && tsk != current)
+ /* Note that if the bottom-half is changed as we
+ * are sending the wake-up, the new bottom-half will
+ * be woken by whomever made the change. We only have
+ * to worry about when we steal the irq-posted for
+ * ourself.
+ */
+ wake_up_process(tsk);
+ rcu_read_unlock();
+
+ if (i915_gem_request_completed(req))
+ return true;
+ }
+
+ /* We need to check whether any gpu reset happened in between
+ * the request being submitted and now. If a reset has occurred,
+ * the seqno will have been advance past ours and our request
+ * is complete. If we are in the process of handling a reset,
+ * the request is effectively complete as the rendering will
+ * be discarded, but we need to return in order to drop the
+ * struct_mutex.
+ */
+ if (i915_reset_in_progress(&req->i915->gpu_error))
+ return true;
+
+ return false;
}
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index aad26851c..a77ce9983 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -54,12 +54,33 @@ static bool cpu_cache_is_coherent(struct drm_device *dev,
static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
+ if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
+ return false;
+
if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
return true;
return obj->pin_display;
}
+static int
+insert_mappable_node(struct drm_i915_private *i915,
+ struct drm_mm_node *node, u32 size)
+{
+ memset(node, 0, sizeof(*node));
+ return drm_mm_insert_node_in_range_generic(&i915->ggtt.base.mm, node,
+ size, 0, 0, 0,
+ i915->ggtt.mappable_end,
+ DRM_MM_SEARCH_DEFAULT,
+ DRM_MM_CREATE_DEFAULT);
+}
+
+static void
+remove_mappable_node(struct drm_mm_node *node)
+{
+ drm_mm_remove_node(node);
+}
+
/* some bookkeeping */
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
size_t size)
@@ -107,7 +128,7 @@ i915_gem_wait_for_error(struct i915_gpu_error *error)
int i915_mutex_lock_interruptible(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
@@ -151,7 +172,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
static int
i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
{
- struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
+ struct address_space *mapping = obj->base.filp->f_mapping;
char *vaddr = obj->phys_handle->vaddr;
struct sg_table *st;
struct scatterlist *sg;
@@ -177,7 +198,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
vaddr += PAGE_SIZE;
}
- i915_gem_chipset_flush(obj->base.dev);
+ i915_gem_chipset_flush(to_i915(obj->base.dev));
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL)
@@ -218,7 +239,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
obj->dirty = 0;
if (obj->dirty) {
- struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
+ struct address_space *mapping = obj->base.filp->f_mapping;
char *vaddr = obj->phys_handle->vaddr;
int i;
@@ -347,7 +368,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
}
drm_clflush_virt_range(vaddr, args->size);
- i915_gem_chipset_flush(dev);
+ i915_gem_chipset_flush(to_i915(dev));
out:
intel_fb_obj_flush(obj, false, ORIGIN_CPU);
@@ -356,13 +377,13 @@ out:
void *i915_gem_object_alloc(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
}
void i915_gem_object_free(struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
kmem_cache_free(dev_priv->objects, obj);
}
@@ -381,9 +402,9 @@ i915_gem_create(struct drm_file *file,
return -EINVAL;
/* Allocate the new object */
- obj = i915_gem_alloc_object(dev, size);
- if (obj == NULL)
- return -ENOMEM;
+ obj = i915_gem_object_create(dev, size);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
ret = drm_gem_handle_create(file, &obj->base, &handle);
/* drop reference from allocate - handle holds it now */
@@ -409,6 +430,9 @@ i915_gem_dumb_create(struct drm_file *file,
/**
* Creates a new mm object and returns a handle to it.
+ * @dev: drm device pointer
+ * @data: ioctl data blob
+ * @file: drm file pointer
*/
int
i915_gem_create_ioctl(struct drm_device *dev, void *data,
@@ -484,7 +508,7 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
*needs_clflush = 0;
- if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0))
+ if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
return -EINVAL;
if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
@@ -585,6 +609,142 @@ shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
return ret ? - EFAULT : 0;
}
+static inline unsigned long
+slow_user_access(struct io_mapping *mapping,
+ uint64_t page_base, int page_offset,
+ char __user *user_data,
+ unsigned long length, bool pwrite)
+{
+ void __iomem *ioaddr;
+ void *vaddr;
+ uint64_t unwritten;
+
+ ioaddr = io_mapping_map_wc(mapping, page_base, PAGE_SIZE);
+ /* We can use the cpu mem copy function because this is X86. */
+ vaddr = (void __force *)ioaddr + page_offset;
+ if (pwrite)
+ unwritten = __copy_from_user(vaddr, user_data, length);
+ else
+ unwritten = __copy_to_user(user_data, vaddr, length);
+
+ io_mapping_unmap(ioaddr);
+ return unwritten;
+}
+
+static int
+i915_gem_gtt_pread(struct drm_device *dev,
+ struct drm_i915_gem_object *obj, uint64_t size,
+ uint64_t data_offset, uint64_t data_ptr)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct drm_mm_node node;
+ char __user *user_data;
+ uint64_t remain;
+ uint64_t offset;
+ int ret;
+
+ ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
+ if (ret) {
+ ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE);
+ if (ret)
+ goto out;
+
+ ret = i915_gem_object_get_pages(obj);
+ if (ret) {
+ remove_mappable_node(&node);
+ goto out;
+ }
+
+ i915_gem_object_pin_pages(obj);
+ } else {
+ node.start = i915_gem_obj_ggtt_offset(obj);
+ node.allocated = false;
+ ret = i915_gem_object_put_fence(obj);
+ if (ret)
+ goto out_unpin;
+ }
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, false);
+ if (ret)
+ goto out_unpin;
+
+ user_data = u64_to_user_ptr(data_ptr);
+ remain = size;
+ offset = data_offset;
+
+ mutex_unlock(&dev->struct_mutex);
+ if (likely(!i915.prefault_disable)) {
+ ret = fault_in_multipages_writeable(user_data, remain);
+ if (ret) {
+ mutex_lock(&dev->struct_mutex);
+ goto out_unpin;
+ }
+ }
+
+ while (remain > 0) {
+ /* Operation in this page
+ *
+ * page_base = page offset within aperture
+ * page_offset = offset within page
+ * page_length = bytes to copy for this page
+ */
+ u32 page_base = node.start;
+ unsigned page_offset = offset_in_page(offset);
+ unsigned page_length = PAGE_SIZE - page_offset;
+ page_length = remain < page_length ? remain : page_length;
+ if (node.allocated) {
+ wmb();
+ ggtt->base.insert_page(&ggtt->base,
+ i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
+ node.start,
+ I915_CACHE_NONE, 0);
+ wmb();
+ } else {
+ page_base += offset & PAGE_MASK;
+ }
+ /* This is a slow read/write as it tries to read from
+ * and write to user memory which may result into page
+ * faults, and so we cannot perform this under struct_mutex.
+ */
+ if (slow_user_access(ggtt->mappable, page_base,
+ page_offset, user_data,
+ page_length, false)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ remain -= page_length;
+ user_data += page_length;
+ offset += page_length;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ if (ret == 0 && (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
+ /* The user has modified the object whilst we tried
+ * reading from it, and we now have no idea what domain
+ * the pages should be in. As we have just been touching
+ * them directly, flush everything back to the GTT
+ * domain.
+ */
+ ret = i915_gem_object_set_to_gtt_domain(obj, false);
+ }
+
+out_unpin:
+ if (node.allocated) {
+ wmb();
+ ggtt->base.clear_range(&ggtt->base,
+ node.start, node.size,
+ true);
+ i915_gem_object_unpin_pages(obj);
+ remove_mappable_node(&node);
+ } else {
+ i915_gem_object_ggtt_unpin(obj);
+ }
+out:
+ return ret;
+}
+
static int
i915_gem_shmem_pread(struct drm_device *dev,
struct drm_i915_gem_object *obj,
@@ -600,6 +760,9 @@ i915_gem_shmem_pread(struct drm_device *dev,
int needs_clflush = 0;
struct sg_page_iter sg_iter;
+ if (!i915_gem_object_has_struct_page(obj))
+ return -ENODEV;
+
user_data = u64_to_user_ptr(args->data_ptr);
remain = args->size;
@@ -672,6 +835,9 @@ out:
/**
* Reads data from the object referenced by handle.
+ * @dev: drm device pointer
+ * @data: ioctl data blob
+ * @file: drm file pointer
*
* On error, the contents of *data are undefined.
*/
@@ -708,18 +874,18 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
goto out;
}
- /* prime objects have no backing filp to GEM pread/pwrite
- * pages from.
- */
- if (!obj->base.filp) {
- ret = -EINVAL;
- goto out;
- }
-
trace_i915_gem_object_pread(obj, args->offset, args->size);
ret = i915_gem_shmem_pread(dev, obj, args, file);
+ /* pread for non shmem backed objects */
+ if (ret == -EFAULT || ret == -ENODEV) {
+ intel_runtime_pm_get(to_i915(dev));
+ ret = i915_gem_gtt_pread(dev, obj, args->size,
+ args->offset, args->data_ptr);
+ intel_runtime_pm_put(to_i915(dev));
+ }
+
out:
drm_gem_object_unreference(&obj->base);
unlock:
@@ -753,60 +919,99 @@ fast_user_write(struct io_mapping *mapping,
/**
* This is the fast pwrite path, where we copy the data directly from the
* user into the GTT, uncached.
+ * @dev: drm device pointer
+ * @obj: i915 gem object
+ * @args: pwrite arguments structure
+ * @file: drm file pointer
*/
static int
-i915_gem_gtt_pwrite_fast(struct drm_device *dev,
+i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- ssize_t remain;
- loff_t offset, page_base;
+ struct i915_ggtt *ggtt = &i915->ggtt;
+ struct drm_device *dev = obj->base.dev;
+ struct drm_mm_node node;
+ uint64_t remain, offset;
char __user *user_data;
- int page_offset, page_length, ret;
+ int ret;
+ bool hit_slow_path = false;
+
+ if (obj->tiling_mode != I915_TILING_NONE)
+ return -EFAULT;
ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
- if (ret)
- goto out;
+ if (ret) {
+ ret = insert_mappable_node(i915, &node, PAGE_SIZE);
+ if (ret)
+ goto out;
+
+ ret = i915_gem_object_get_pages(obj);
+ if (ret) {
+ remove_mappable_node(&node);
+ goto out;
+ }
+
+ i915_gem_object_pin_pages(obj);
+ } else {
+ node.start = i915_gem_obj_ggtt_offset(obj);
+ node.allocated = false;
+ ret = i915_gem_object_put_fence(obj);
+ if (ret)
+ goto out_unpin;
+ }
ret = i915_gem_object_set_to_gtt_domain(obj, true);
if (ret)
goto out_unpin;
- ret = i915_gem_object_put_fence(obj);
- if (ret)
- goto out_unpin;
+ intel_fb_obj_invalidate(obj, ORIGIN_GTT);
+ obj->dirty = true;
user_data = u64_to_user_ptr(args->data_ptr);
+ offset = args->offset;
remain = args->size;
-
- offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
-
- intel_fb_obj_invalidate(obj, ORIGIN_GTT);
-
- while (remain > 0) {
+ while (remain) {
/* Operation in this page
*
* page_base = page offset within aperture
* page_offset = offset within page
* page_length = bytes to copy for this page
*/
- page_base = offset & PAGE_MASK;
- page_offset = offset_in_page(offset);
- page_length = remain;
- if ((page_offset + remain) > PAGE_SIZE)
- page_length = PAGE_SIZE - page_offset;
-
+ u32 page_base = node.start;
+ unsigned page_offset = offset_in_page(offset);
+ unsigned page_length = PAGE_SIZE - page_offset;
+ page_length = remain < page_length ? remain : page_length;
+ if (node.allocated) {
+ wmb(); /* flush the write before we modify the GGTT */
+ ggtt->base.insert_page(&ggtt->base,
+ i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
+ node.start, I915_CACHE_NONE, 0);
+ wmb(); /* flush modifications to the GGTT (insert_page) */
+ } else {
+ page_base += offset & PAGE_MASK;
+ }
/* If we get a fault while copying data, then (presumably) our
* source page isn't available. Return the error and we'll
* retry in the slow path.
+ * If the object is non-shmem backed, we retry again with the
+ * path that handles page fault.
*/
if (fast_user_write(ggtt->mappable, page_base,
page_offset, user_data, page_length)) {
- ret = -EFAULT;
- goto out_flush;
+ hit_slow_path = true;
+ mutex_unlock(&dev->struct_mutex);
+ if (slow_user_access(ggtt->mappable,
+ page_base,
+ page_offset, user_data,
+ page_length, true)) {
+ ret = -EFAULT;
+ mutex_lock(&dev->struct_mutex);
+ goto out_flush;
+ }
+
+ mutex_lock(&dev->struct_mutex);
}
remain -= page_length;
@@ -815,9 +1020,31 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
}
out_flush:
+ if (hit_slow_path) {
+ if (ret == 0 &&
+ (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
+ /* The user has modified the object whilst we tried
+ * reading from it, and we now have no idea what domain
+ * the pages should be in. As we have just been touching
+ * them directly, flush everything back to the GTT
+ * domain.
+ */
+ ret = i915_gem_object_set_to_gtt_domain(obj, false);
+ }
+ }
+
intel_fb_obj_flush(obj, false, ORIGIN_GTT);
out_unpin:
- i915_gem_object_ggtt_unpin(obj);
+ if (node.allocated) {
+ wmb();
+ ggtt->base.clear_range(&ggtt->base,
+ node.start, node.size,
+ true);
+ i915_gem_object_unpin_pages(obj);
+ remove_mappable_node(&node);
+ } else {
+ i915_gem_object_ggtt_unpin(obj);
+ }
out:
return ret;
}
@@ -1006,7 +1233,7 @@ out:
}
if (needs_clflush_after)
- i915_gem_chipset_flush(dev);
+ i915_gem_chipset_flush(to_i915(dev));
else
obj->cache_dirty = true;
@@ -1016,6 +1243,9 @@ out:
/**
* Writes data to the object referenced by handle.
+ * @dev: drm device
+ * @data: ioctl data blob
+ * @file: drm file
*
* On error, the contents of the buffer that were to be modified are undefined.
*/
@@ -1023,7 +1253,7 @@ int
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_pwrite *args = data;
struct drm_i915_gem_object *obj;
int ret;
@@ -1062,14 +1292,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
goto out;
}
- /* prime objects have no backing filp to GEM pread/pwrite
- * pages from.
- */
- if (!obj->base.filp) {
- ret = -EINVAL;
- goto out;
- }
-
trace_i915_gem_object_pwrite(obj, args->offset, args->size);
ret = -EFAULT;
@@ -1079,10 +1301,9 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
* pread/pwrite currently are reading and writing from the CPU
* perspective, requiring manual detiling by the client.
*/
- if (obj->tiling_mode == I915_TILING_NONE &&
- obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
+ if (!i915_gem_object_has_struct_page(obj) ||
cpu_write_needs_clflush(obj)) {
- ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
+ ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file);
/* Note that the gtt paths might fail with non-page-backed user
* pointers (e.g. gtt mappings when moving data between
* textures). Fallback to the shmem path in that case. */
@@ -1091,8 +1312,10 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
if (ret == -EFAULT || ret == -ENOSPC) {
if (obj->phys_handle)
ret = i915_gem_phys_pwrite(obj, args, file);
- else
+ else if (i915_gem_object_has_struct_page(obj))
ret = i915_gem_shmem_pwrite(dev, obj, args, file);
+ else
+ ret = -ENODEV;
}
out:
@@ -1123,17 +1346,6 @@ i915_gem_check_wedge(unsigned reset_counter, bool interruptible)
return 0;
}
-static void fake_irq(unsigned long data)
-{
- wake_up_process((struct task_struct *)data);
-}
-
-static bool missed_irq(struct drm_i915_private *dev_priv,
- struct intel_engine_cs *engine)
-{
- return test_bit(engine->id, &dev_priv->gpu_error.missed_irq_rings);
-}
-
static unsigned long local_clock_us(unsigned *cpu)
{
unsigned long t;
@@ -1166,9 +1378,9 @@ static bool busywait_stop(unsigned long timeout, unsigned cpu)
return this_cpu != cpu;
}
-static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
+bool __i915_spin_request(const struct drm_i915_gem_request *req,
+ int state, unsigned long timeout_us)
{
- unsigned long timeout;
unsigned cpu;
/* When waiting for high frequency requests, e.g. during synchronous
@@ -1181,31 +1393,21 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
* takes to sleep on a request, on the order of a microsecond.
*/
- if (req->engine->irq_refcount)
- return -EBUSY;
-
- /* Only spin if we know the GPU is processing this request */
- if (!i915_gem_request_started(req, true))
- return -EAGAIN;
-
- timeout = local_clock_us(&cpu) + 5;
- while (!need_resched()) {
- if (i915_gem_request_completed(req, true))
- return 0;
+ timeout_us += local_clock_us(&cpu);
+ do {
+ if (i915_gem_request_completed(req))
+ return true;
if (signal_pending_state(state, current))
break;
- if (busywait_stop(timeout, cpu))
+ if (busywait_stop(timeout_us, cpu))
break;
cpu_relax_lowlatency();
- }
+ } while (!need_resched());
- if (i915_gem_request_completed(req, false))
- return 0;
-
- return -EAGAIN;
+ return false;
}
/**
@@ -1213,6 +1415,7 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
* @req: duh!
* @interruptible: do an interruptible wait (normally yes)
* @timeout: in - how long to wait (NULL forever); out - how much time remaining
+ * @rps: RPS client
*
* Note: It is of utmost importance that the passed in seqno and reset_counter
* values have been read by the caller in an smp safe manner. Where read-side
@@ -1229,26 +1432,22 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
s64 *timeout,
struct intel_rps_client *rps)
{
- struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- const bool irq_test_in_progress =
- ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine);
int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
- DEFINE_WAIT(wait);
- unsigned long timeout_expire;
+ DEFINE_WAIT(reset);
+ struct intel_wait wait;
+ unsigned long timeout_remain;
s64 before = 0; /* Only to silence a compiler warning. */
- int ret;
+ int ret = 0;
- WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
+ might_sleep();
if (list_empty(&req->list))
return 0;
- if (i915_gem_request_completed(req, true))
+ if (i915_gem_request_completed(req))
return 0;
- timeout_expire = 0;
+ timeout_remain = MAX_SCHEDULE_TIMEOUT;
if (timeout) {
if (WARN_ON(*timeout < 0))
return -EINVAL;
@@ -1256,7 +1455,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
if (*timeout == 0)
return -ETIME;
- timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout);
+ timeout_remain = nsecs_to_jiffies_timeout(*timeout);
/*
* Record current time in case interrupted by signal, or wedged.
@@ -1264,75 +1463,76 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
before = ktime_get_raw_ns();
}
- if (INTEL_INFO(dev_priv)->gen >= 6)
- gen6_rps_boost(dev_priv, rps, req->emitted_jiffies);
-
trace_i915_gem_request_wait_begin(req);
- /* Optimistic spin for the next jiffie before touching IRQs */
- ret = __i915_spin_request(req, state);
- if (ret == 0)
- goto out;
-
- if (!irq_test_in_progress && WARN_ON(!engine->irq_get(engine))) {
- ret = -ENODEV;
- goto out;
- }
+ /* This client is about to stall waiting for the GPU. In many cases
+ * this is undesirable and limits the throughput of the system, as
+ * many clients cannot continue processing user input/output whilst
+ * blocked. RPS autotuning may take tens of milliseconds to respond
+ * to the GPU load and thus incurs additional latency for the client.
+ * We can circumvent that by promoting the GPU frequency to maximum
+ * before we wait. This makes the GPU throttle up much more quickly
+ * (good for benchmarks and user experience, e.g. window animations),
+ * but at a cost of spending more power processing the workload
+ * (bad for battery). Not all clients even want their results
+ * immediately and for them we should just let the GPU select its own
+ * frequency to maximise efficiency. To prevent a single client from
+ * forcing the clocks too high for the whole system, we only allow
+ * each client to waitboost once in a busy period.
+ */
+ if (INTEL_INFO(req->i915)->gen >= 6)
+ gen6_rps_boost(req->i915, rps, req->emitted_jiffies);
- for (;;) {
- struct timer_list timer;
+ /* Optimistic spin for the next ~jiffie before touching IRQs */
+ if (i915_spin_request(req, state, 5))
+ goto complete;
- prepare_to_wait(&engine->irq_queue, &wait, state);
+ set_current_state(state);
+ add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
- /* We need to check whether any gpu reset happened in between
- * the request being submitted and now. If a reset has occurred,
- * the request is effectively complete (we either are in the
- * process of or have discarded the rendering and completely
- * reset the GPU. The results of the request are lost and we
- * are free to continue on with the original operation.
+ intel_wait_init(&wait, req->seqno);
+ if (intel_engine_add_wait(req->engine, &wait))
+ /* In order to check that we haven't missed the interrupt
+ * as we enabled it, we need to kick ourselves to do a
+ * coherent check on the seqno before we sleep.
*/
- if (req->reset_counter != i915_reset_counter(&dev_priv->gpu_error)) {
- ret = 0;
- break;
- }
-
- if (i915_gem_request_completed(req, false)) {
- ret = 0;
- break;
- }
+ goto wakeup;
+ for (;;) {
if (signal_pending_state(state, current)) {
ret = -ERESTARTSYS;
break;
}
- if (timeout && time_after_eq(jiffies, timeout_expire)) {
+ timeout_remain = io_schedule_timeout(timeout_remain);
+ if (timeout_remain == 0) {
ret = -ETIME;
break;
}
- timer.function = NULL;
- if (timeout || missed_irq(dev_priv, engine)) {
- unsigned long expire;
+ if (intel_wait_complete(&wait))
+ break;
- setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
- expire = missed_irq(dev_priv, engine) ? jiffies + 1 : timeout_expire;
- mod_timer(&timer, expire);
- }
+ set_current_state(state);
- io_schedule();
+wakeup:
+ /* Carefully check if the request is complete, giving time
+ * for the seqno to be visible following the interrupt.
+ * We also have to check in case we are kicked by the GPU
+ * reset in order to drop the struct_mutex.
+ */
+ if (__i915_request_irq_complete(req))
+ break;
- if (timer.function) {
- del_singleshot_timer_sync(&timer);
- destroy_timer_on_stack(&timer);
- }
+ /* Only spin if we know the GPU is processing this request */
+ if (i915_spin_request(req, state, 2))
+ break;
}
- if (!irq_test_in_progress)
- engine->irq_put(engine);
+ remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
- finish_wait(&engine->irq_queue, &wait);
-
-out:
+ intel_engine_remove_wait(req->engine, &wait);
+ __set_current_state(TASK_RUNNING);
+complete:
trace_i915_gem_request_wait_end(req);
if (timeout) {
@@ -1351,6 +1551,22 @@ out:
*timeout = 0;
}
+ if (rps && req->seqno == req->engine->last_submitted_seqno) {
+ /* The GPU is now idle and this client has stalled.
+ * Since no other client has submitted a request in the
+ * meantime, assume that this client is the only one
+ * supplying work to the GPU but is unable to keep that
+ * work supplied because it is waiting. Since the GPU is
+ * then never kept fully busy, RPS autoclocking will
+ * keep the clocks relatively low, causing further delays.
+ * Compensate by giving the synchronous client credit for
+ * a waitboost next time.
+ */
+ spin_lock(&req->i915->rps.client_lock);
+ list_del_init(&rps->link);
+ spin_unlock(&req->i915->rps.client_lock);
+ }
+
return ret;
}
@@ -1413,6 +1629,13 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
list_del_init(&request->list);
i915_gem_request_remove_from_client(request);
+ if (request->previous_context) {
+ if (i915.enable_execlists)
+ intel_lr_context_unpin(request->previous_context,
+ request->engine);
+ }
+
+ i915_gem_context_unreference(request->ctx);
i915_gem_request_unreference(request);
}
@@ -1422,7 +1645,7 @@ __i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
struct intel_engine_cs *engine = req->engine;
struct drm_i915_gem_request *tmp;
- lockdep_assert_held(&engine->dev->struct_mutex);
+ lockdep_assert_held(&engine->i915->drm.struct_mutex);
if (list_empty(&req->list))
return;
@@ -1440,6 +1663,7 @@ __i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
/**
* Waits for a request to be signaled, and cleans up the
* request and object lists appropriately for that event.
+ * @req: request to wait on
*/
int
i915_wait_request(struct drm_i915_gem_request *req)
@@ -1450,14 +1674,14 @@ i915_wait_request(struct drm_i915_gem_request *req)
interruptible = dev_priv->mm.interruptible;
- BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
+ BUG_ON(!mutex_is_locked(&dev_priv->drm.struct_mutex));
ret = __i915_wait_request(req, interruptible, NULL, NULL);
if (ret)
return ret;
/* If the GPU hung, we want to keep the requests to find the guilty. */
- if (req->reset_counter == i915_reset_counter(&dev_priv->gpu_error))
+ if (!i915_reset_in_progress(&dev_priv->gpu_error))
__i915_gem_request_retire__upto(req);
return 0;
@@ -1466,6 +1690,8 @@ i915_wait_request(struct drm_i915_gem_request *req)
/**
* Ensures that all rendering to the object has completed and the object is
* safe to unbind from the GTT or access from the CPU.
+ * @obj: i915 gem object
+ * @readonly: waiting for read access or write
*/
int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
@@ -1516,7 +1742,7 @@ i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
else if (obj->last_write_req == req)
i915_gem_object_retire__write(obj);
- if (req->reset_counter == i915_reset_counter(&req->i915->gpu_error))
+ if (!i915_reset_in_progress(&req->i915->gpu_error))
__i915_gem_request_retire__upto(req);
}
@@ -1529,7 +1755,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
bool readonly)
{
struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
int ret, i, n = 0;
@@ -1580,9 +1806,19 @@ static struct intel_rps_client *to_rps_client(struct drm_file *file)
return &fpriv->rps;
}
+static enum fb_op_origin
+write_origin(struct drm_i915_gem_object *obj, unsigned domain)
+{
+ return domain == I915_GEM_DOMAIN_GTT && !obj->has_wc_mmap ?
+ ORIGIN_GTT : ORIGIN_CPU;
+}
+
/**
* Called when user space prepares to use an object with the CPU, either
* through the mmap ioctl's mapping or a GTT mapping.
+ * @dev: drm device
+ * @data: ioctl data blob
+ * @file: drm file
*/
int
i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
@@ -1633,9 +1869,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
if (write_domain != 0)
- intel_fb_obj_invalidate(obj,
- write_domain == I915_GEM_DOMAIN_GTT ?
- ORIGIN_GTT : ORIGIN_CPU);
+ intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
unref:
drm_gem_object_unreference(&obj->base);
@@ -1646,6 +1880,9 @@ unlock:
/**
* Called when user space has done writes to this buffer
+ * @dev: drm device
+ * @data: ioctl data blob
+ * @file: drm file
*/
int
i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
@@ -1676,8 +1913,11 @@ unlock:
}
/**
- * Maps the contents of an object, returning the address it is mapped
- * into.
+ * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
+ * it is mapped to.
+ * @dev: drm device
+ * @data: ioctl data blob
+ * @file: drm file
*
* While the mapping holds a reference on the contents of the object, it doesn't
* imply a ref on the object itself.
@@ -1736,6 +1976,9 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
else
addr = -ENOMEM;
up_write(&mm->mmap_sem);
+
+ /* This may race, but that's ok, it only gets set */
+ WRITE_ONCE(to_intel_bo(obj)->has_wc_mmap, true);
}
drm_gem_object_unreference_unlocked(obj);
if (IS_ERR((void *)addr))
@@ -1982,7 +2225,7 @@ i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
return size;
/* Previous chips need a power-of-two fence region when tiling */
- if (INTEL_INFO(dev)->gen == 3)
+ if (IS_GEN3(dev))
gtt_size = 1024*1024;
else
gtt_size = 512*1024;
@@ -1995,7 +2238,10 @@ i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
/**
* i915_gem_get_gtt_alignment - return required GTT alignment for an object
- * @obj: object to check
+ * @dev: drm device
+ * @size: object size
+ * @tiling_mode: tiling mode
+ * @fenced: is fenced alignemned required or not
*
* Return the required GTT alignment for an object, taking into account
* potential fence register mapping.
@@ -2021,7 +2267,7 @@ i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
int ret;
dev_priv->mm.shrinker_no_lock_stealing = true;
@@ -2155,14 +2401,15 @@ i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
if (obj->base.filp == NULL)
return;
- mapping = file_inode(obj->base.filp)->i_mapping,
+ mapping = obj->base.filp->f_mapping,
invalidate_mapping_pages(mapping, 0, (loff_t)-1);
}
static void
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
{
- struct sg_page_iter sg_iter;
+ struct sgt_iter sgt_iter;
+ struct page *page;
int ret;
BUG_ON(obj->madv == __I915_MADV_PURGED);
@@ -2184,9 +2431,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
if (obj->madv == I915_MADV_DONTNEED)
obj->dirty = 0;
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
- struct page *page = sg_page_iter_page(&sg_iter);
-
+ for_each_sgt_page(page, sgt_iter, obj->pages) {
if (obj->dirty)
set_page_dirty(page);
@@ -2238,12 +2483,12 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
static int
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
int page_count, i;
struct address_space *mapping;
struct sg_table *st;
struct scatterlist *sg;
- struct sg_page_iter sg_iter;
+ struct sgt_iter sgt_iter;
struct page *page;
unsigned long last_pfn = 0; /* suppress gcc warning */
int ret;
@@ -2271,7 +2516,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
*
* Fail silently without starting the shrinker
*/
- mapping = file_inode(obj->base.filp)->i_mapping;
+ mapping = obj->base.filp->f_mapping;
gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
gfp |= __GFP_NORETRY | __GFP_NOWARN;
sg = st->sgl;
@@ -2340,8 +2585,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
err_pages:
sg_mark_end(sg);
- for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
- put_page(sg_page_iter_page(&sg_iter));
+ for_each_sgt_page(page, sgt_iter, st)
+ put_page(page);
sg_free_table(st);
kfree(st);
@@ -2369,7 +2614,7 @@ err_pages:
int
i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
const struct drm_i915_gem_object_ops *ops = obj->ops;
int ret;
@@ -2395,6 +2640,44 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
return 0;
}
+/* The 'mapping' part of i915_gem_object_pin_map() below */
+static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
+{
+ unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
+ struct sg_table *sgt = obj->pages;
+ struct sgt_iter sgt_iter;
+ struct page *page;
+ struct page *stack_pages[32];
+ struct page **pages = stack_pages;
+ unsigned long i = 0;
+ void *addr;
+
+ /* A single page can always be kmapped */
+ if (n_pages == 1)
+ return kmap(sg_page(sgt->sgl));
+
+ if (n_pages > ARRAY_SIZE(stack_pages)) {
+ /* Too big for stack -- allocate temporary array instead */
+ pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
+ if (!pages)
+ return NULL;
+ }
+
+ for_each_sgt_page(page, sgt_iter, sgt)
+ pages[i++] = page;
+
+ /* Check that we have the expected number of pages */
+ GEM_BUG_ON(i != n_pages);
+
+ addr = vmap(pages, n_pages, 0, PAGE_KERNEL);
+
+ if (pages != stack_pages)
+ drm_free_large(pages);
+
+ return addr;
+}
+
+/* get, pin, and map the pages of the object into kernel space */
void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
{
int ret;
@@ -2407,29 +2690,9 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
i915_gem_object_pin_pages(obj);
- if (obj->mapping == NULL) {
- struct page **pages;
-
- pages = NULL;
- if (obj->base.size == PAGE_SIZE)
- obj->mapping = kmap(sg_page(obj->pages->sgl));
- else
- pages = drm_malloc_gfp(obj->base.size >> PAGE_SHIFT,
- sizeof(*pages),
- GFP_TEMPORARY);
- if (pages != NULL) {
- struct sg_page_iter sg_iter;
- int n;
-
- n = 0;
- for_each_sg_page(obj->pages->sgl, &sg_iter,
- obj->pages->nents, 0)
- pages[n++] = sg_page_iter_page(&sg_iter);
-
- obj->mapping = vmap(pages, n, 0, PAGE_KERNEL);
- drm_free_large(pages);
- }
- if (obj->mapping == NULL) {
+ if (!obj->mapping) {
+ obj->mapping = i915_gem_object_map(obj);
+ if (!obj->mapping) {
i915_gem_object_unpin_pages(obj);
return ERR_PTR(-ENOMEM);
}
@@ -2502,9 +2765,8 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
}
static int
-i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
+i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
int ret;
@@ -2514,7 +2776,14 @@ i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
if (ret)
return ret;
}
- i915_gem_retire_requests(dev);
+ i915_gem_retire_requests(dev_priv);
+
+ /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
+ if (!i915_seqno_passed(seqno, dev_priv->next_seqno)) {
+ while (intel_kick_waiters(dev_priv) ||
+ intel_kick_signalers(dev_priv))
+ yield();
+ }
/* Finally reset hw state */
for_each_engine(engine, dev_priv)
@@ -2525,7 +2794,7 @@ i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
if (seqno == 0)
@@ -2534,7 +2803,7 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
/* HWS page needs to be set less than what we
* will inject to ring
*/
- ret = i915_gem_init_seqno(dev, seqno - 1);
+ ret = i915_gem_init_seqno(dev_priv, seqno - 1);
if (ret)
return ret;
@@ -2550,13 +2819,11 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
}
int
-i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
+i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
/* reserve 0 for non-seqno */
if (dev_priv->next_seqno == 0) {
- int ret = i915_gem_init_seqno(dev, 0);
+ int ret = i915_gem_init_seqno(dev_priv, 0);
if (ret)
return ret;
@@ -2567,6 +2834,26 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
return 0;
}
+static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+
+ dev_priv->gt.active_engines |= intel_engine_flag(engine);
+ if (dev_priv->gt.awake)
+ return;
+
+ intel_runtime_pm_get_noresume(dev_priv);
+ dev_priv->gt.awake = true;
+
+ i915_update_gfx_val(dev_priv);
+ if (INTEL_GEN(dev_priv) >= 6)
+ gen6_rps_busy(dev_priv);
+
+ queue_delayed_work(dev_priv->wq,
+ &dev_priv->gt.retire_work,
+ round_jiffies_up_relative(HZ));
+}
+
/*
* NB: This function is not allowed to fail. Doing so would mean the the
* request is not being tracked for completion but the work itself is
@@ -2577,16 +2864,15 @@ void __i915_add_request(struct drm_i915_gem_request *request,
bool flush_caches)
{
struct intel_engine_cs *engine;
- struct drm_i915_private *dev_priv;
struct intel_ringbuffer *ringbuf;
u32 request_start;
+ u32 reserved_tail;
int ret;
if (WARN_ON(request == NULL))
return;
engine = request->engine;
- dev_priv = request->i915;
ringbuf = request->ringbuf;
/*
@@ -2594,9 +2880,10 @@ void __i915_add_request(struct drm_i915_gem_request *request,
* should already have been reserved in the ring buffer. Let the ring
* know that it is time to use that space up.
*/
- intel_ring_reserved_space_use(ringbuf);
-
request_start = intel_ring_get_tail(ringbuf);
+ reserved_tail = request->reserved_space;
+ request->reserved_space = 0;
+
/*
* Emit any outstanding flushes - execbuf can fail to emit the flush
* after having emitted the batchbuffer command. Hence we need to fix
@@ -2651,56 +2938,42 @@ void __i915_add_request(struct drm_i915_gem_request *request,
}
/* Not allowed to fail! */
WARN(ret, "emit|add_request failed: %d!\n", ret);
-
- i915_queue_hangcheck(engine->dev);
-
- queue_delayed_work(dev_priv->wq,
- &dev_priv->mm.retire_work,
- round_jiffies_up_relative(HZ));
- intel_mark_busy(dev_priv->dev);
-
/* Sanity check that the reserved size was large enough. */
- intel_ring_reserved_space_end(ringbuf);
+ ret = intel_ring_get_tail(ringbuf) - request_start;
+ if (ret < 0)
+ ret += ringbuf->size;
+ WARN_ONCE(ret > reserved_tail,
+ "Not enough space reserved (%d bytes) "
+ "for adding the request (%d bytes)\n",
+ reserved_tail, ret);
+
+ i915_gem_mark_busy(engine);
}
-static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
- const struct intel_context *ctx)
+static bool i915_context_is_banned(const struct i915_gem_context *ctx)
{
unsigned long elapsed;
- elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
-
if (ctx->hang_stats.banned)
return true;
+ elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
if (ctx->hang_stats.ban_period_seconds &&
elapsed <= ctx->hang_stats.ban_period_seconds) {
- if (!i915_gem_context_is_default(ctx)) {
- DRM_DEBUG("context hanging too fast, banning!\n");
- return true;
- } else if (i915_stop_ring_allow_ban(dev_priv)) {
- if (i915_stop_ring_allow_warn(dev_priv))
- DRM_ERROR("gpu hanging too fast, banning!\n");
- return true;
- }
+ DRM_DEBUG("context hanging too fast, banning!\n");
+ return true;
}
return false;
}
-static void i915_set_reset_status(struct drm_i915_private *dev_priv,
- struct intel_context *ctx,
+static void i915_set_reset_status(struct i915_gem_context *ctx,
const bool guilty)
{
- struct i915_ctx_hang_stats *hs;
-
- if (WARN_ON(!ctx))
- return;
-
- hs = &ctx->hang_stats;
+ struct i915_ctx_hang_stats *hs = &ctx->hang_stats;
if (guilty) {
- hs->banned = i915_context_is_banned(dev_priv, ctx);
+ hs->banned = i915_context_is_banned(ctx);
hs->batch_active++;
hs->guilty_ts = get_seconds();
} else {
@@ -2712,27 +2985,15 @@ void i915_gem_request_free(struct kref *req_ref)
{
struct drm_i915_gem_request *req = container_of(req_ref,
typeof(*req), ref);
- struct intel_context *ctx = req->ctx;
-
- if (req->file_priv)
- i915_gem_request_remove_from_client(req);
-
- if (ctx) {
- if (i915.enable_execlists && ctx != req->i915->kernel_context)
- intel_lr_context_unpin(ctx, req->engine);
-
- i915_gem_context_unreference(ctx);
- }
-
kmem_cache_free(req->i915->requests, req);
}
static inline int
__i915_gem_request_alloc(struct intel_engine_cs *engine,
- struct intel_context *ctx,
+ struct i915_gem_context *ctx,
struct drm_i915_gem_request **req_out)
{
- struct drm_i915_private *dev_priv = to_i915(engine->dev);
+ struct drm_i915_private *dev_priv = engine->i915;
unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
struct drm_i915_gem_request *req;
int ret;
@@ -2754,26 +3015,16 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
if (req == NULL)
return -ENOMEM;
- ret = i915_gem_get_seqno(engine->dev, &req->seqno);
+ ret = i915_gem_get_seqno(engine->i915, &req->seqno);
if (ret)
goto err;
kref_init(&req->ref);
req->i915 = dev_priv;
req->engine = engine;
- req->reset_counter = reset_counter;
req->ctx = ctx;
i915_gem_context_reference(req->ctx);
- if (i915.enable_execlists)
- ret = intel_logical_ring_alloc_request_extras(req);
- else
- ret = intel_ring_alloc_request_extras(req);
- if (ret) {
- i915_gem_context_unreference(req->ctx);
- goto err;
- }
-
/*
* Reserve space in the ring buffer for all the commands required to
* eventually emit this request. This is to guarantee that the
@@ -2781,24 +3032,20 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
* to be redone if the request is not actually submitted straight
* away, e.g. because a GPU scheduler has deferred it.
*/
+ req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
+
if (i915.enable_execlists)
- ret = intel_logical_ring_reserve_space(req);
+ ret = intel_logical_ring_alloc_request_extras(req);
else
- ret = intel_ring_reserve_space(req);
- if (ret) {
- /*
- * At this point, the request is fully allocated even if not
- * fully prepared. Thus it can be cleaned up using the proper
- * free code.
- */
- intel_ring_reserved_space_cancel(req->ringbuf);
- i915_gem_request_unreference(req);
- return ret;
- }
+ ret = intel_ring_alloc_request_extras(req);
+ if (ret)
+ goto err_ctx;
*req_out = req;
return 0;
+err_ctx:
+ i915_gem_context_unreference(ctx);
err:
kmem_cache_free(dev_priv->requests, req);
return ret;
@@ -2818,13 +3065,13 @@ err:
*/
struct drm_i915_gem_request *
i915_gem_request_alloc(struct intel_engine_cs *engine,
- struct intel_context *ctx)
+ struct i915_gem_context *ctx)
{
struct drm_i915_gem_request *req;
int err;
if (ctx == NULL)
- ctx = to_i915(engine->dev)->kernel_context;
+ ctx = engine->i915->kernel_context;
err = __i915_gem_request_alloc(engine, ctx, &req);
return err ? ERR_PTR(err) : req;
}
@@ -2834,8 +3081,16 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *request;
+ /* We are called by the error capture and reset at a random
+ * point in time. In particular, note that neither is crucially
+ * ordered with an interrupt. After a hang, the GPU is dead and we
+ * assume that no more writes can happen (we waited long enough for
+ * all writes that were in transaction to be flushed) - adding an
+ * extra delay for a recent interrupt is pointless. Hence, we do
+ * not need an engine->irq_seqno_barrier() before the seqno reads.
+ */
list_for_each_entry(request, &engine->request_list, list) {
- if (i915_gem_request_completed(request, false))
+ if (i915_gem_request_completed(request))
continue;
return request;
@@ -2844,27 +3099,23 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
return NULL;
}
-static void i915_gem_reset_engine_status(struct drm_i915_private *dev_priv,
- struct intel_engine_cs *engine)
+static void i915_gem_reset_engine_status(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *request;
bool ring_hung;
request = i915_gem_find_active_request(engine);
-
if (request == NULL)
return;
ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
- i915_set_reset_status(dev_priv, request->ctx, ring_hung);
-
+ i915_set_reset_status(request->ctx, ring_hung);
list_for_each_entry_continue(request, &engine->request_list, list)
- i915_set_reset_status(dev_priv, request->ctx, false);
+ i915_set_reset_status(request->ctx, false);
}
-static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
- struct intel_engine_cs *engine)
+static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
{
struct intel_ringbuffer *buffer;
@@ -2888,13 +3139,7 @@ static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
/* Ensure irq handler finishes or is cancelled. */
tasklet_kill(&engine->irq_tasklet);
- spin_lock_bh(&engine->execlist_lock);
- /* list_splice_tail_init checks for empty lists */
- list_splice_tail_init(&engine->execlist_queue,
- &engine->execlist_retired_req_list);
- spin_unlock_bh(&engine->execlist_lock);
-
- intel_execlists_retire_requests(engine);
+ intel_execlists_cancel_requests(engine);
}
/*
@@ -2927,11 +3172,13 @@ static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
}
intel_ring_init_seqno(engine, engine->last_submitted_seqno);
+
+ engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
}
void i915_gem_reset(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
/*
@@ -2940,10 +3187,11 @@ void i915_gem_reset(struct drm_device *dev)
* their reference to the objects, the inspection must be done first.
*/
for_each_engine(engine, dev_priv)
- i915_gem_reset_engine_status(dev_priv, engine);
+ i915_gem_reset_engine_status(engine);
for_each_engine(engine, dev_priv)
- i915_gem_reset_engine_cleanup(dev_priv, engine);
+ i915_gem_reset_engine_cleanup(engine);
+ mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
i915_gem_context_reset(dev);
@@ -2954,6 +3202,7 @@ void i915_gem_reset(struct drm_device *dev)
/**
* This function clears the request list as sequence numbers are passed.
+ * @engine: engine to retire requests on
*/
void
i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
@@ -2972,7 +3221,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
struct drm_i915_gem_request,
list);
- if (!i915_gem_request_completed(request, true))
+ if (!i915_gem_request_completed(request))
break;
i915_gem_request_retire(request);
@@ -2995,58 +3244,52 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
i915_gem_object_retire__read(obj, engine->id);
}
- if (unlikely(engine->trace_irq_req &&
- i915_gem_request_completed(engine->trace_irq_req, true))) {
- engine->irq_put(engine);
- i915_gem_request_assign(&engine->trace_irq_req, NULL);
- }
-
WARN_ON(i915_verify_lists(engine->dev));
}
-bool
-i915_gem_retire_requests(struct drm_device *dev)
+void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
- bool idle = true;
+
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+ if (dev_priv->gt.active_engines == 0)
+ return;
+
+ GEM_BUG_ON(!dev_priv->gt.awake);
for_each_engine(engine, dev_priv) {
i915_gem_retire_requests_ring(engine);
- idle &= list_empty(&engine->request_list);
- if (i915.enable_execlists) {
- spin_lock_bh(&engine->execlist_lock);
- idle &= list_empty(&engine->execlist_queue);
- spin_unlock_bh(&engine->execlist_lock);
-
- intel_execlists_retire_requests(engine);
- }
+ if (list_empty(&engine->request_list))
+ dev_priv->gt.active_engines &= ~intel_engine_flag(engine);
}
- if (idle)
- mod_delayed_work(dev_priv->wq,
- &dev_priv->mm.idle_work,
+ if (dev_priv->gt.active_engines == 0)
+ queue_delayed_work(dev_priv->wq,
+ &dev_priv->gt.idle_work,
msecs_to_jiffies(100));
-
- return idle;
}
static void
i915_gem_retire_work_handler(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), mm.retire_work.work);
- struct drm_device *dev = dev_priv->dev;
- bool idle;
+ container_of(work, typeof(*dev_priv), gt.retire_work.work);
+ struct drm_device *dev = &dev_priv->drm;
/* Come back later if the device is busy... */
- idle = false;
if (mutex_trylock(&dev->struct_mutex)) {
- idle = i915_gem_retire_requests(dev);
+ i915_gem_retire_requests(dev_priv);
mutex_unlock(&dev->struct_mutex);
}
- if (!idle)
- queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
+
+ /* Keep the retire handler running until we are finally idle.
+ * We do not need to do this test under locking as in the worst-case
+ * we queue the retire worker once too often.
+ */
+ if (READ_ONCE(dev_priv->gt.awake))
+ queue_delayed_work(dev_priv->wq,
+ &dev_priv->gt.retire_work,
round_jiffies_up_relative(HZ));
}
@@ -3054,25 +3297,55 @@ static void
i915_gem_idle_work_handler(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), mm.idle_work.work);
- struct drm_device *dev = dev_priv->dev;
+ container_of(work, typeof(*dev_priv), gt.idle_work.work);
+ struct drm_device *dev = &dev_priv->drm;
struct intel_engine_cs *engine;
+ unsigned int stuck_engines;
+ bool rearm_hangcheck;
+
+ if (!READ_ONCE(dev_priv->gt.awake))
+ return;
+
+ if (READ_ONCE(dev_priv->gt.active_engines))
+ return;
+
+ rearm_hangcheck =
+ cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
+
+ if (!mutex_trylock(&dev->struct_mutex)) {
+ /* Currently busy, come back later */
+ mod_delayed_work(dev_priv->wq,
+ &dev_priv->gt.idle_work,
+ msecs_to_jiffies(50));
+ goto out_rearm;
+ }
+
+ if (dev_priv->gt.active_engines)
+ goto out_unlock;
for_each_engine(engine, dev_priv)
- if (!list_empty(&engine->request_list))
- return;
+ i915_gem_batch_pool_fini(&engine->batch_pool);
- /* we probably should sync with hangcheck here, using cancel_work_sync.
- * Also locking seems to be fubar here, engine->request_list is protected
- * by dev->struct_mutex. */
+ GEM_BUG_ON(!dev_priv->gt.awake);
+ dev_priv->gt.awake = false;
+ rearm_hangcheck = false;
- intel_mark_idle(dev);
+ stuck_engines = intel_kick_waiters(dev_priv);
+ if (unlikely(stuck_engines)) {
+ DRM_DEBUG_DRIVER("kicked stuck waiters...missed irq\n");
+ dev_priv->gpu_error.missed_irq_rings |= stuck_engines;
+ }
- if (mutex_trylock(&dev->struct_mutex)) {
- for_each_engine(engine, dev_priv)
- i915_gem_batch_pool_fini(&engine->batch_pool);
+ if (INTEL_GEN(dev_priv) >= 6)
+ gen6_rps_idle(dev_priv);
+ intel_runtime_pm_put(dev_priv);
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
- mutex_unlock(&dev->struct_mutex);
+out_rearm:
+ if (rearm_hangcheck) {
+ GEM_BUG_ON(!dev_priv->gt.awake);
+ i915_queue_hangcheck(dev_priv);
}
}
@@ -3080,6 +3353,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
* Ensures that an object will eventually get non-busy by flushing any required
* write domains, emitting any outstanding lazy request and retiring and
* completed requests.
+ * @obj: object to flush
*/
static int
i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
@@ -3096,14 +3370,8 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
if (req == NULL)
continue;
- if (list_empty(&req->list))
- goto retire;
-
- if (i915_gem_request_completed(req, true)) {
- __i915_gem_request_retire__upto(req);
-retire:
+ if (i915_gem_request_completed(req))
i915_gem_object_retire__read(obj, i);
- }
}
return 0;
@@ -3111,7 +3379,9 @@ retire:
/**
* i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
- * @DRM_IOCTL_ARGS: standard ioctl arguments
+ * @dev: drm device pointer
+ * @data: ioctl data blob
+ * @file: drm file pointer
*
* Returns 0 if successful, else an error is returned with the remaining time in
* the timeout parameter.
@@ -3185,7 +3455,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
ret = __i915_wait_request(req[i], true,
args->timeout_ns > 0 ? &args->timeout_ns : NULL,
to_rps_client(file));
- i915_gem_request_unreference__unlocked(req[i]);
+ i915_gem_request_unreference(req[i]);
}
return ret;
@@ -3208,10 +3478,10 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
if (to == from)
return 0;
- if (i915_gem_request_completed(from_req, true))
+ if (i915_gem_request_completed(from_req))
return 0;
- if (!i915_semaphore_is_enabled(obj->base.dev)) {
+ if (!i915_semaphore_is_enabled(to_i915(obj->base.dev))) {
struct drm_i915_private *i915 = to_i915(obj->base.dev);
ret = __i915_wait_request(from_req,
i915->mm.interruptible,
@@ -3345,10 +3615,21 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
old_write_domain);
}
+static void __i915_vma_iounmap(struct i915_vma *vma)
+{
+ GEM_BUG_ON(vma->pin_count);
+
+ if (vma->iomap == NULL)
+ return;
+
+ io_mapping_unmap(vma->iomap);
+ vma->iomap = NULL;
+}
+
static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
{
struct drm_i915_gem_object *obj = vma->obj;
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
int ret;
if (list_empty(&vma->obj_link))
@@ -3377,6 +3658,8 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
ret = i915_gem_object_put_fence(obj);
if (ret)
return ret;
+
+ __i915_vma_iounmap(vma);
}
trace_i915_vma_unbind(vma);
@@ -3422,26 +3705,16 @@ int __i915_vma_unbind_no_wait(struct i915_vma *vma)
return __i915_vma_unbind(vma, false);
}
-int i915_gpu_idle(struct drm_device *dev)
+int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
int ret;
- /* Flush everything onto the inactive list. */
- for_each_engine(engine, dev_priv) {
- if (!i915.enable_execlists) {
- struct drm_i915_gem_request *req;
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
- req = i915_gem_request_alloc(engine, NULL);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
- ret = i915_switch_context(req);
- i915_add_request_no_flush(req);
- if (ret)
- return ret;
- }
+ for_each_engine(engine, dev_priv) {
+ if (engine->last_context == NULL)
+ continue;
ret = intel_engine_idle(engine);
if (ret)
@@ -3488,6 +3761,11 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
/**
* Finds free space in the GTT aperture and binds the object or a view of it
* there.
+ * @obj: object to bind
+ * @vm: address space to bind into
+ * @ggtt_view: global gtt view if applicable
+ * @alignment: requested alignment
+ * @flags: mask of PIN_* flags to use
*/
static struct i915_vma *
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
@@ -3731,7 +4009,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
return;
if (i915_gem_clflush_object(obj, obj->pin_display))
- i915_gem_chipset_flush(obj->base.dev);
+ i915_gem_chipset_flush(to_i915(obj->base.dev));
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
@@ -3745,6 +4023,8 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
/**
* Moves a single object to the GTT read, and possibly write domain.
+ * @obj: object to act on
+ * @write: ask for write access or read only
*
* This function returns when the move is complete, including waiting on
* flushes to occur.
@@ -3816,6 +4096,8 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
/**
* Changes the cache-level of an object across all VMA.
+ * @obj: object to act on
+ * @cache_level: new cache level to set for the object
*
* After this function returns, the object will be in the new cache-level
* across all GTT and the contents of the backing storage will be coherent,
@@ -3925,11 +4207,9 @@ out:
* object is now coherent at its new cache level (with respect
* to the access domain).
*/
- if (obj->cache_dirty &&
- obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
- cpu_write_needs_clflush(obj)) {
+ if (obj->cache_dirty && cpu_write_needs_clflush(obj)) {
if (i915_gem_clflush_object(obj, true))
- i915_gem_chipset_flush(obj->base.dev);
+ i915_gem_chipset_flush(to_i915(obj->base.dev));
}
return 0;
@@ -3967,7 +4247,7 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_caching *args = data;
struct drm_i915_gem_object *obj;
enum i915_cache_level level;
@@ -4097,6 +4377,8 @@ i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
/**
* Moves a single object to the CPU read, and possibly write domain.
+ * @obj: object to act on
+ * @write: requesting write or read-only access
*
* This function returns when the move is complete, including waiting on
* flushes to occur.
@@ -4159,7 +4441,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
static int
i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_file_private *file_priv = file->driver_priv;
unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
struct drm_i915_gem_request *request, *target = NULL;
@@ -4195,10 +4477,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
return 0;
ret = __i915_wait_request(target, true, NULL, NULL);
- if (ret == 0)
- queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
-
- i915_gem_request_unreference__unlocked(target);
+ i915_gem_request_unreference(target);
return ret;
}
@@ -4256,7 +4535,7 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
uint64_t flags)
{
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_vma *vma;
unsigned bound;
int ret;
@@ -4420,7 +4699,7 @@ int
i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_madvise *args = data;
struct drm_i915_gem_object *obj;
int ret;
@@ -4490,7 +4769,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
obj->fence_reg = I915_FENCE_REG_NONE;
obj->madv = I915_MADV_WILLNEED;
- i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
+ i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
}
static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
@@ -4499,21 +4778,21 @@ static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
.put_pages = i915_gem_object_put_pages_gtt,
};
-struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
+struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
size_t size)
{
struct drm_i915_gem_object *obj;
struct address_space *mapping;
gfp_t mask;
+ int ret;
obj = i915_gem_object_alloc(dev);
if (obj == NULL)
- return NULL;
+ return ERR_PTR(-ENOMEM);
- if (drm_gem_object_init(dev, &obj->base, size) != 0) {
- i915_gem_object_free(obj);
- return NULL;
- }
+ ret = drm_gem_object_init(dev, &obj->base, size);
+ if (ret)
+ goto fail;
mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
@@ -4522,7 +4801,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
mask |= __GFP_DMA32;
}
- mapping = file_inode(obj->base.filp)->i_mapping;
+ mapping = obj->base.filp->f_mapping;
mapping_set_gfp_mask(mapping, mask);
i915_gem_object_init(obj, &i915_gem_object_ops);
@@ -4550,6 +4829,11 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
trace_i915_gem_object_create(obj);
return obj;
+
+fail:
+ i915_gem_object_free(obj);
+
+ return ERR_PTR(ret);
}
static bool discard_backing_storage(struct drm_i915_gem_object *obj)
@@ -4580,7 +4864,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_vma *vma, *next;
intel_runtime_pm_get(dev_priv);
@@ -4655,16 +4939,12 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_vma *vma;
- BUG_ON(!view);
+ GEM_BUG_ON(!view);
list_for_each_entry(vma, &obj->vma_list, obj_link)
- if (vma->vm == &ggtt->base &&
- i915_ggtt_view_equal(&vma->ggtt_view, view))
+ if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
return vma;
return NULL;
}
@@ -4688,7 +4968,7 @@ void i915_gem_vma_destroy(struct i915_vma *vma)
static void
i915_gem_stop_engines(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
for_each_engine(engine, dev_priv)
@@ -4698,27 +4978,28 @@ i915_gem_stop_engines(struct drm_device *dev)
int
i915_gem_suspend(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret = 0;
mutex_lock(&dev->struct_mutex);
- ret = i915_gpu_idle(dev);
+ ret = i915_gem_wait_for_idle(dev_priv);
if (ret)
goto err;
- i915_gem_retire_requests(dev);
+ i915_gem_retire_requests(dev_priv);
i915_gem_stop_engines(dev);
+ i915_gem_context_lost(dev_priv);
mutex_unlock(&dev->struct_mutex);
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
- cancel_delayed_work_sync(&dev_priv->mm.retire_work);
- flush_delayed_work(&dev_priv->mm.idle_work);
+ cancel_delayed_work_sync(&dev_priv->gt.retire_work);
+ flush_delayed_work(&dev_priv->gt.idle_work);
/* Assert that we sucessfully flushed all the work and
* reset the GPU back to its idle, low power state.
*/
- WARN_ON(dev_priv->mm.busy);
+ WARN_ON(dev_priv->gt.awake);
return 0;
@@ -4727,40 +5008,9 @@ err:
return ret;
}
-int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
-{
- struct intel_engine_cs *engine = req->engine;
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
- int i, ret;
-
- if (!HAS_L3_DPF(dev) || !remap_info)
- return 0;
-
- ret = intel_ring_begin(req, GEN7_L3LOG_SIZE / 4 * 3);
- if (ret)
- return ret;
-
- /*
- * Note: We do not worry about the concurrent register cacheline hang
- * here because no other code should access these registers other than
- * at initialization time.
- */
- for (i = 0; i < GEN7_L3LOG_SIZE / 4; i++) {
- intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
- intel_ring_emit(engine, remap_info[i]);
- }
-
- intel_ring_advance(engine);
-
- return ret;
-}
-
void i915_gem_init_swizzling(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (INTEL_INFO(dev)->gen < 5 ||
dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
@@ -4785,7 +5035,7 @@ void i915_gem_init_swizzling(struct drm_device *dev)
static void init_unused_ring(struct drm_device *dev, u32 base)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
I915_WRITE(RING_CTL(base), 0);
I915_WRITE(RING_HEAD(base), 0);
@@ -4812,7 +5062,7 @@ static void init_unused_rings(struct drm_device *dev)
int i915_gem_init_engines(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
ret = intel_init_render_ring_buffer(dev);
@@ -4860,9 +5110,9 @@ cleanup_render_ring:
int
i915_gem_init_hw(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
- int ret, j;
+ int ret;
/* Double layer security blanket, see i915_gem_init() */
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
@@ -4914,59 +5164,10 @@ i915_gem_init_hw(struct drm_device *dev)
intel_mocs_init_l3cc_table(dev);
/* We can't enable contexts until all firmware is loaded */
- if (HAS_GUC_UCODE(dev)) {
- ret = intel_guc_ucode_load(dev);
- if (ret) {
- DRM_ERROR("Failed to initialize GuC, error %d\n", ret);
- ret = -EIO;
- goto out;
- }
- }
-
- /*
- * Increment the next seqno by 0x100 so we have a visible break
- * on re-initialisation
- */
- ret = i915_gem_set_seqno(dev, dev_priv->next_seqno+0x100);
+ ret = intel_guc_setup(dev);
if (ret)
goto out;
- /* Now it is safe to go back round and do everything else: */
- for_each_engine(engine, dev_priv) {
- struct drm_i915_gem_request *req;
-
- req = i915_gem_request_alloc(engine, NULL);
- if (IS_ERR(req)) {
- ret = PTR_ERR(req);
- break;
- }
-
- if (engine->id == RCS) {
- for (j = 0; j < NUM_L3_SLICES(dev); j++) {
- ret = i915_gem_l3_remap(req, j);
- if (ret)
- goto err_request;
- }
- }
-
- ret = i915_ppgtt_init_ring(req);
- if (ret)
- goto err_request;
-
- ret = i915_gem_context_enable(req);
- if (ret)
- goto err_request;
-
-err_request:
- i915_add_request_no_flush(req);
- if (ret) {
- DRM_ERROR("Failed to enable %s, error=%d\n",
- engine->name, ret);
- i915_gem_cleanup_engines(dev);
- break;
- }
- }
-
out:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
return ret;
@@ -4974,12 +5175,9 @@ out:
int i915_gem_init(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
- i915.enable_execlists = intel_sanitize_enable_execlists(dev,
- i915.enable_execlists);
-
mutex_lock(&dev->struct_mutex);
if (!i915.enable_execlists) {
@@ -5002,10 +5200,7 @@ int i915_gem_init(struct drm_device *dev)
*/
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
- ret = i915_gem_init_userptr(dev);
- if (ret)
- goto out_unlock;
-
+ i915_gem_init_userptr(dev_priv);
i915_gem_init_ggtt(dev);
ret = i915_gem_context_init(dev);
@@ -5037,19 +5232,11 @@ out_unlock:
void
i915_gem_cleanup_engines(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
for_each_engine(engine, dev_priv)
dev_priv->gt.cleanup_engine(engine);
-
- if (i915.enable_execlists)
- /*
- * Neither the BIOS, ourselves or any other kernel
- * expects the system to be in execlists mode on startup,
- * so we need to reset the GPU back to legacy mode.
- */
- intel_gpu_reset(dev, ALL_ENGINES);
}
static void
@@ -5062,7 +5249,7 @@ init_engine_lists(struct intel_engine_cs *engine)
void
i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
!IS_CHERRYVIEW(dev_priv))
@@ -5073,7 +5260,7 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
else
dev_priv->num_fence_regs = 8;
- if (intel_vgpu_active(dev))
+ if (intel_vgpu_active(dev_priv))
dev_priv->num_fence_regs =
I915_READ(vgtif_reg(avail_rs.fence_num));
@@ -5086,7 +5273,7 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
void
i915_gem_load_init(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int i;
dev_priv->objects =
@@ -5114,22 +5301,15 @@ i915_gem_load_init(struct drm_device *dev)
init_engine_lists(&dev_priv->engine[i]);
for (i = 0; i < I915_MAX_NUM_FENCES; i++)
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
- INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
+ INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
i915_gem_retire_work_handler);
- INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
+ INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
i915_gem_idle_work_handler);
+ init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
- /*
- * Set initial sequence number for requests.
- * Using this number allows the wraparound to happen early,
- * catching any obvious problems.
- */
- dev_priv->next_seqno = ((u32)~0 - 0x1100);
- dev_priv->last_seqno = ((u32)~0 - 0x1101);
-
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
init_waitqueue_head(&dev_priv->pending_flip_queue);
@@ -5148,6 +5328,34 @@ void i915_gem_load_cleanup(struct drm_device *dev)
kmem_cache_destroy(dev_priv->objects);
}
+int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
+{
+ struct drm_i915_gem_object *obj;
+
+ /* Called just before we write the hibernation image.
+ *
+ * We need to update the domain tracking to reflect that the CPU
+ * will be accessing all the pages to create and restore from the
+ * hibernation, and so upon restoration those pages will be in the
+ * CPU domain.
+ *
+ * To make sure the hibernation image contains the latest state,
+ * we update that state just before writing out the image.
+ */
+
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ }
+
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ }
+
+ return 0;
+}
+
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
@@ -5187,7 +5395,7 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
return -ENOMEM;
file->driver_priv = file_priv;
- file_priv->dev_priv = dev->dev_private;
+ file_priv->dev_priv = to_i915(dev);
file_priv->file = file;
INIT_LIST_HEAD(&file_priv->rps.link);
@@ -5233,7 +5441,7 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
struct i915_address_space *vm)
{
- struct drm_i915_private *dev_priv = o->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(o->base.dev);
struct i915_vma *vma;
WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
@@ -5254,13 +5462,10 @@ u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
const struct i915_ggtt_view *view)
{
- struct drm_i915_private *dev_priv = to_i915(o->base.dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_vma *vma;
list_for_each_entry(vma, &o->vma_list, obj_link)
- if (vma->vm == &ggtt->base &&
- i915_ggtt_view_equal(&vma->ggtt_view, view))
+ if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
return vma->node.start;
WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
@@ -5286,12 +5491,10 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
const struct i915_ggtt_view *view)
{
- struct drm_i915_private *dev_priv = to_i915(o->base.dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_vma *vma;
list_for_each_entry(vma, &o->vma_list, obj_link)
- if (vma->vm == &ggtt->base &&
+ if (vma->is_ggtt &&
i915_ggtt_view_equal(&vma->ggtt_view, view) &&
drm_mm_node_allocated(&vma->node))
return true;
@@ -5310,23 +5513,18 @@ bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
return false;
}
-unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
- struct i915_address_space *vm)
+unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
{
- struct drm_i915_private *dev_priv = o->base.dev->dev_private;
struct i915_vma *vma;
- WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
-
- BUG_ON(list_empty(&o->vma_list));
+ GEM_BUG_ON(list_empty(&o->vma_list));
list_for_each_entry(vma, &o->vma_list, obj_link) {
if (vma->is_ggtt &&
- vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
- continue;
- if (vma->vm == vm)
+ vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
return vma->node.size;
}
+
return 0;
}
@@ -5347,7 +5545,7 @@ i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
struct page *page;
/* Only default objects have per-page dirty tracking */
- if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0))
+ if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
return NULL;
page = i915_gem_object_get_page(obj, n);
@@ -5365,8 +5563,8 @@ i915_gem_object_create_from_data(struct drm_device *dev,
size_t bytes;
int ret;
- obj = i915_gem_alloc_object(dev, round_up(size, PAGE_SIZE));
- if (IS_ERR_OR_NULL(obj))
+ obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
+ if (IS_ERR(obj))
return obj;
ret = i915_gem_object_set_to_cpu_domain(obj, true);
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
index 7bf2f3f29..3752d5daa 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
@@ -134,9 +134,9 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
if (obj == NULL) {
int ret;
- obj = i915_gem_alloc_object(pool->dev, size);
- if (obj == NULL)
- return ERR_PTR(-ENOMEM);
+ obj = i915_gem_object_create(pool->dev, size);
+ if (IS_ERR(obj))
+ return obj;
ret = i915_gem_object_get_pages(obj);
if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index e5acc3916..3c97f0e7a 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -90,6 +90,8 @@
#include "i915_drv.h"
#include "i915_trace.h"
+#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
+
/* This is a HW constraint. The value below is the largest known requirement
* I've seen in a spec to date, and that was a workaround for a non-shipping
* part. It should be safe to decrease this, but it's more future proof as is.
@@ -97,28 +99,27 @@
#define GEN6_CONTEXT_ALIGN (64<<10)
#define GEN7_CONTEXT_ALIGN 4096
-static size_t get_context_alignment(struct drm_device *dev)
+static size_t get_context_alignment(struct drm_i915_private *dev_priv)
{
- if (IS_GEN6(dev))
+ if (IS_GEN6(dev_priv))
return GEN6_CONTEXT_ALIGN;
return GEN7_CONTEXT_ALIGN;
}
-static int get_context_size(struct drm_device *dev)
+static int get_context_size(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
u32 reg;
- switch (INTEL_INFO(dev)->gen) {
+ switch (INTEL_GEN(dev_priv)) {
case 6:
reg = I915_READ(CXT_SIZE);
ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
break;
case 7:
reg = I915_READ(GEN7_CXT_SIZE);
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev_priv))
ret = HSW_CXT_TOTAL_SIZE;
else
ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
@@ -133,7 +134,7 @@ static int get_context_size(struct drm_device *dev)
return ret;
}
-static void i915_gem_context_clean(struct intel_context *ctx)
+static void i915_gem_context_clean(struct i915_gem_context *ctx)
{
struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
struct i915_vma *vma, *next;
@@ -150,13 +151,12 @@ static void i915_gem_context_clean(struct intel_context *ctx)
void i915_gem_context_free(struct kref *ctx_ref)
{
- struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
+ struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
+ int i;
+ lockdep_assert_held(&ctx->i915->drm.struct_mutex);
trace_i915_context_free(ctx);
- if (i915.enable_execlists)
- intel_lr_context_free(ctx);
-
/*
* This context is going away and we need to remove all VMAs still
* around. This is to handle imported shared objects for which
@@ -166,9 +166,22 @@ void i915_gem_context_free(struct kref *ctx_ref)
i915_ppgtt_put(ctx->ppgtt);
- if (ctx->legacy_hw_ctx.rcs_state)
- drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
+ for (i = 0; i < I915_NUM_ENGINES; i++) {
+ struct intel_context *ce = &ctx->engine[i];
+
+ if (!ce->state)
+ continue;
+
+ WARN_ON(ce->pin_count);
+ if (ce->ringbuf)
+ intel_ringbuffer_free(ce->ringbuf);
+
+ drm_gem_object_unreference(&ce->state->base);
+ }
+
list_del(&ctx->link);
+
+ ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id);
kfree(ctx);
}
@@ -178,9 +191,11 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
struct drm_i915_gem_object *obj;
int ret;
- obj = i915_gem_alloc_object(dev, size);
- if (obj == NULL)
- return ERR_PTR(-ENOMEM);
+ lockdep_assert_held(&dev->struct_mutex);
+
+ obj = i915_gem_object_create(dev, size);
+ if (IS_ERR(obj))
+ return obj;
/*
* Try to make the context utilize L3 as well as LLC.
@@ -209,22 +224,52 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
return obj;
}
-static struct intel_context *
+static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
+{
+ int ret;
+
+ ret = ida_simple_get(&dev_priv->context_hw_ida,
+ 0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
+ if (ret < 0) {
+ /* Contexts are only released when no longer active.
+ * Flush any pending retires to hopefully release some
+ * stale contexts and try again.
+ */
+ i915_gem_retire_requests(dev_priv);
+ ret = ida_simple_get(&dev_priv->context_hw_ida,
+ 0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+ }
+
+ *out = ret;
+ return 0;
+}
+
+static struct i915_gem_context *
__create_hw_context(struct drm_device *dev,
struct drm_i915_file_private *file_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_context *ctx;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_gem_context *ctx;
int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (ctx == NULL)
return ERR_PTR(-ENOMEM);
+ ret = assign_hw_id(dev_priv, &ctx->hw_id);
+ if (ret) {
+ kfree(ctx);
+ return ERR_PTR(ret);
+ }
+
kref_init(&ctx->ref);
list_add_tail(&ctx->link, &dev_priv->context_list);
ctx->i915 = dev_priv;
+ ctx->ggtt_alignment = get_context_alignment(dev_priv);
+
if (dev_priv->hw_context_size) {
struct drm_i915_gem_object *obj =
i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size);
@@ -232,7 +277,7 @@ __create_hw_context(struct drm_device *dev,
ret = PTR_ERR(obj);
goto err_out;
}
- ctx->legacy_hw_ctx.rcs_state = obj;
+ ctx->engine[RCS].state = obj;
}
/* Default context will never have a file_priv */
@@ -249,9 +294,13 @@ __create_hw_context(struct drm_device *dev,
/* NB: Mark all slices as needing a remap so that when the context first
* loads it will restore whatever remap state already exists. If there
* is no remap info, it will be a NOP. */
- ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1;
+ ctx->remap_slice = ALL_L3_SLICES(dev_priv);
ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD;
+ ctx->ring_size = 4 * PAGE_SIZE;
+ ctx->desc_template = GEN8_CTX_ADDRESSING_MODE(dev_priv) <<
+ GEN8_CTX_ADDRESSING_MODE_SHIFT;
+ ATOMIC_INIT_NOTIFIER_HEAD(&ctx->status_notifier);
return ctx;
@@ -265,44 +314,27 @@ err_out:
* context state of the GPU for applications that don't utilize HW contexts, as
* well as an idle case.
*/
-static struct intel_context *
+static struct i915_gem_context *
i915_gem_create_context(struct drm_device *dev,
struct drm_i915_file_private *file_priv)
{
- const bool is_global_default_ctx = file_priv == NULL;
- struct intel_context *ctx;
- int ret = 0;
+ struct i915_gem_context *ctx;
- BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+ lockdep_assert_held(&dev->struct_mutex);
ctx = __create_hw_context(dev, file_priv);
if (IS_ERR(ctx))
return ctx;
- if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) {
- /* We may need to do things with the shrinker which
- * require us to immediately switch back to the default
- * context. This can cause a problem as pinning the
- * default context also requires GTT space which may not
- * be available. To avoid this we always pin the default
- * context.
- */
- ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
- get_context_alignment(dev), 0);
- if (ret) {
- DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
- goto err_destroy;
- }
- }
-
if (USES_FULL_PPGTT(dev)) {
struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv);
- if (IS_ERR_OR_NULL(ppgtt)) {
+ if (IS_ERR(ppgtt)) {
DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
PTR_ERR(ppgtt));
- ret = PTR_ERR(ppgtt);
- goto err_unpin;
+ idr_remove(&file_priv->context_idr, ctx->user_handle);
+ i915_gem_context_unreference(ctx);
+ return ERR_CAST(ppgtt);
}
ctx->ppgtt = ppgtt;
@@ -311,76 +343,102 @@ i915_gem_create_context(struct drm_device *dev,
trace_i915_context_create(ctx);
return ctx;
+}
-err_unpin:
- if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state)
- i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
-err_destroy:
- idr_remove(&file_priv->context_idr, ctx->user_handle);
- i915_gem_context_unreference(ctx);
- return ERR_PTR(ret);
+/**
+ * i915_gem_context_create_gvt - create a GVT GEM context
+ * @dev: drm device *
+ *
+ * This function is used to create a GVT specific GEM context.
+ *
+ * Returns:
+ * pointer to i915_gem_context on success, error pointer if failed
+ *
+ */
+struct i915_gem_context *
+i915_gem_context_create_gvt(struct drm_device *dev)
+{
+ struct i915_gem_context *ctx;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
+ return ERR_PTR(-ENODEV);
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ctx = i915_gem_create_context(dev, NULL);
+ if (IS_ERR(ctx))
+ goto out;
+
+ ctx->execlists_force_single_submission = true;
+ ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
+out:
+ mutex_unlock(&dev->struct_mutex);
+ return ctx;
}
-static void i915_gem_context_unpin(struct intel_context *ctx,
+static void i915_gem_context_unpin(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
if (i915.enable_execlists) {
intel_lr_context_unpin(ctx, engine);
} else {
- if (engine->id == RCS && ctx->legacy_hw_ctx.rcs_state)
- i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
+ struct intel_context *ce = &ctx->engine[engine->id];
+
+ if (ce->state)
+ i915_gem_object_ggtt_unpin(ce->state);
+
i915_gem_context_unreference(ctx);
}
}
void i915_gem_context_reset(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ lockdep_assert_held(&dev->struct_mutex);
if (i915.enable_execlists) {
- struct intel_context *ctx;
+ struct i915_gem_context *ctx;
list_for_each_entry(ctx, &dev_priv->context_list, link)
intel_lr_context_reset(dev_priv, ctx);
}
- for (i = 0; i < I915_NUM_ENGINES; i++) {
- struct intel_engine_cs *engine = &dev_priv->engine[i];
-
- if (engine->last_context) {
- i915_gem_context_unpin(engine->last_context, engine);
- engine->last_context = NULL;
- }
- }
-
- /* Force the GPU state to be reinitialised on enabling */
- dev_priv->kernel_context->legacy_hw_ctx.initialized = false;
+ i915_gem_context_lost(dev_priv);
}
int i915_gem_context_init(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_context *ctx;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_gem_context *ctx;
/* Init should only be called once per module load. Eventually the
* restriction on the context_disabled check can be loosened. */
if (WARN_ON(dev_priv->kernel_context))
return 0;
- if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) {
+ if (intel_vgpu_active(dev_priv) &&
+ HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
if (!i915.enable_execlists) {
DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
return -EINVAL;
}
}
+ /* Using the simple ida interface, the max is limited by sizeof(int) */
+ BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
+ ida_init(&dev_priv->context_hw_ida);
+
if (i915.enable_execlists) {
/* NB: intentionally left blank. We will allocate our own
* backing objects as we need them, thank you very much */
dev_priv->hw_context_size = 0;
- } else if (HAS_HW_CONTEXTS(dev)) {
- dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
+ } else if (HAS_HW_CONTEXTS(dev_priv)) {
+ dev_priv->hw_context_size =
+ round_up(get_context_size(dev_priv), 4096);
if (dev_priv->hw_context_size > (1<<20)) {
DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
dev_priv->hw_context_size);
@@ -403,67 +461,60 @@ int i915_gem_context_init(struct drm_device *dev)
return 0;
}
-void i915_gem_context_fini(struct drm_device *dev)
+void i915_gem_context_lost(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_context *dctx = dev_priv->kernel_context;
- int i;
-
- if (dctx->legacy_hw_ctx.rcs_state) {
- /* The only known way to stop the gpu from accessing the hw context is
- * to reset it. Do this as the very last operation to avoid confusing
- * other code, leading to spurious errors. */
- intel_gpu_reset(dev, ALL_ENGINES);
+ struct intel_engine_cs *engine;
- /* When default context is created and switched to, base object refcount
- * will be 2 (+1 from object creation and +1 from do_switch()).
- * i915_gem_context_fini() will be called after gpu_idle() has switched
- * to default context. So we need to unreference the base object once
- * to offset the do_switch part, so that i915_gem_context_unreference()
- * can then free the base object correctly. */
- WARN_ON(!dev_priv->engine[RCS].last_context);
-
- i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
- }
-
- for (i = I915_NUM_ENGINES; --i >= 0;) {
- struct intel_engine_cs *engine = &dev_priv->engine[i];
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ for_each_engine(engine, dev_priv) {
if (engine->last_context) {
i915_gem_context_unpin(engine->last_context, engine);
engine->last_context = NULL;
}
}
- i915_gem_context_unreference(dctx);
- dev_priv->kernel_context = NULL;
-}
+ /* Force the GPU state to be restored on enabling */
+ if (!i915.enable_execlists) {
+ struct i915_gem_context *ctx;
-int i915_gem_context_enable(struct drm_i915_gem_request *req)
-{
- struct intel_engine_cs *engine = req->engine;
- int ret;
+ list_for_each_entry(ctx, &dev_priv->context_list, link) {
+ if (!i915_gem_context_is_default(ctx))
+ continue;
- if (i915.enable_execlists) {
- if (engine->init_context == NULL)
- return 0;
+ for_each_engine(engine, dev_priv)
+ ctx->engine[engine->id].initialised = false;
- ret = engine->init_context(req);
- } else
- ret = i915_switch_context(req);
+ ctx->remap_slice = ALL_L3_SLICES(dev_priv);
+ }
- if (ret) {
- DRM_ERROR("ring init context: %d\n", ret);
- return ret;
+ for_each_engine(engine, dev_priv) {
+ struct intel_context *kce =
+ &dev_priv->kernel_context->engine[engine->id];
+
+ kce->initialised = true;
+ }
}
+}
- return 0;
+void i915_gem_context_fini(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_gem_context *dctx = dev_priv->kernel_context;
+
+ lockdep_assert_held(&dev->struct_mutex);
+
+ i915_gem_context_unreference(dctx);
+ dev_priv->kernel_context = NULL;
+
+ ida_destroy(&dev_priv->context_hw_ida);
}
static int context_idr_cleanup(int id, void *p, void *data)
{
- struct intel_context *ctx = p;
+ struct i915_gem_context *ctx = p;
+ ctx->file_priv = ERR_PTR(-EBADF);
i915_gem_context_unreference(ctx);
return 0;
}
@@ -471,7 +522,7 @@ static int context_idr_cleanup(int id, void *p, void *data)
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
- struct intel_context *ctx;
+ struct i915_gem_context *ctx;
idr_init(&file_priv->context_idr);
@@ -491,31 +542,22 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
+ lockdep_assert_held(&dev->struct_mutex);
+
idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
idr_destroy(&file_priv->context_idr);
}
-struct intel_context *
-i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
-{
- struct intel_context *ctx;
-
- ctx = (struct intel_context *)idr_find(&file_priv->context_idr, id);
- if (!ctx)
- return ERR_PTR(-ENOENT);
-
- return ctx;
-}
-
static inline int
mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
{
+ struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *engine = req->engine;
u32 flags = hw_flags | MI_MM_SPACE_GTT;
const int num_rings =
/* Use an extended w/a on ivb+ if signalling from other rings */
- i915_semaphore_is_enabled(engine->dev) ?
- hweight32(INTEL_INFO(engine->dev)->ring_mask) - 1 :
+ i915_semaphore_is_enabled(dev_priv) ?
+ hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1 :
0;
int len, ret;
@@ -524,21 +566,21 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
* explicitly, so we rely on the value at ring init, stored in
* itlb_before_ctx_switch.
*/
- if (IS_GEN6(engine->dev)) {
+ if (IS_GEN6(dev_priv)) {
ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0);
if (ret)
return ret;
}
/* These flags are for resource streamer on HSW+ */
- if (IS_HASWELL(engine->dev) || INTEL_INFO(engine->dev)->gen >= 8)
+ if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8)
flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
- else if (INTEL_INFO(engine->dev)->gen < 8)
+ else if (INTEL_GEN(dev_priv) < 8)
flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
len = 4;
- if (INTEL_INFO(engine->dev)->gen >= 7)
+ if (INTEL_GEN(dev_priv) >= 7)
len += 2 + (num_rings ? 4*num_rings + 6 : 0);
ret = intel_ring_begin(req, len);
@@ -546,14 +588,14 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
return ret;
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
- if (INTEL_INFO(engine->dev)->gen >= 7) {
+ if (INTEL_GEN(dev_priv) >= 7) {
intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE);
if (num_rings) {
struct intel_engine_cs *signaller;
intel_ring_emit(engine,
MI_LOAD_REGISTER_IMM(num_rings));
- for_each_engine(signaller, to_i915(engine->dev)) {
+ for_each_engine(signaller, dev_priv) {
if (signaller == engine)
continue;
@@ -568,7 +610,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
intel_ring_emit(engine, MI_NOOP);
intel_ring_emit(engine, MI_SET_CONTEXT);
intel_ring_emit(engine,
- i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) |
+ i915_gem_obj_ggtt_offset(req->ctx->engine[RCS].state) |
flags);
/*
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
@@ -576,14 +618,14 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
*/
intel_ring_emit(engine, MI_NOOP);
- if (INTEL_INFO(engine->dev)->gen >= 7) {
+ if (INTEL_GEN(dev_priv) >= 7) {
if (num_rings) {
struct intel_engine_cs *signaller;
i915_reg_t last_reg = {}; /* keep gcc quiet */
intel_ring_emit(engine,
MI_LOAD_REGISTER_IMM(num_rings));
- for_each_engine(signaller, to_i915(engine->dev)) {
+ for_each_engine(signaller, dev_priv) {
if (signaller == engine)
continue;
@@ -609,45 +651,83 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
return ret;
}
-static inline bool skip_rcs_switch(struct intel_engine_cs *engine,
- struct intel_context *to)
+static int remap_l3(struct drm_i915_gem_request *req, int slice)
+{
+ u32 *remap_info = req->i915->l3_parity.remap_info[slice];
+ struct intel_engine_cs *engine = req->engine;
+ int i, ret;
+
+ if (!remap_info)
+ return 0;
+
+ ret = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
+ if (ret)
+ return ret;
+
+ /*
+ * Note: We do not worry about the concurrent register cacheline hang
+ * here because no other code should access these registers other than
+ * at initialization time.
+ */
+ intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4));
+ for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
+ intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
+ intel_ring_emit(engine, remap_info[i]);
+ }
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_advance(engine);
+
+ return 0;
+}
+
+static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt,
+ struct intel_engine_cs *engine,
+ struct i915_gem_context *to)
{
if (to->remap_slice)
return false;
- if (!to->legacy_hw_ctx.initialized)
+ if (!to->engine[RCS].initialised)
return false;
- if (to->ppgtt &&
- !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings))
+ if (ppgtt && (intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
return false;
return to == engine->last_context;
}
static bool
-needs_pd_load_pre(struct intel_engine_cs *engine, struct intel_context *to)
+needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt,
+ struct intel_engine_cs *engine,
+ struct i915_gem_context *to)
{
- if (!to->ppgtt)
+ if (!ppgtt)
return false;
+ /* Always load the ppgtt on first use */
+ if (!engine->last_context)
+ return true;
+
+ /* Same context without new entries, skip */
if (engine->last_context == to &&
- !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings))
+ !(intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
return false;
if (engine->id != RCS)
return true;
- if (INTEL_INFO(engine->dev)->gen < 8)
+ if (INTEL_GEN(engine->i915) < 8)
return true;
return false;
}
static bool
-needs_pd_load_post(struct intel_context *to, u32 hw_flags)
+needs_pd_load_post(struct i915_hw_ppgtt *ppgtt,
+ struct i915_gem_context *to,
+ u32 hw_flags)
{
- if (!to->ppgtt)
+ if (!ppgtt)
return false;
if (!IS_GEN8(to->i915))
@@ -661,18 +741,19 @@ needs_pd_load_post(struct intel_context *to, u32 hw_flags)
static int do_rcs_switch(struct drm_i915_gem_request *req)
{
- struct intel_context *to = req->ctx;
+ struct i915_gem_context *to = req->ctx;
struct intel_engine_cs *engine = req->engine;
- struct intel_context *from;
+ struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
+ struct i915_gem_context *from;
u32 hw_flags;
int ret, i;
- if (skip_rcs_switch(engine, to))
+ if (skip_rcs_switch(ppgtt, engine, to))
return 0;
/* Trying to pin first makes error handling easier. */
- ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
- get_context_alignment(engine->dev),
+ ret = i915_gem_obj_ggtt_pin(to->engine[RCS].state,
+ to->ggtt_alignment,
0);
if (ret)
return ret;
@@ -694,37 +775,32 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
*
* XXX: We need a real interface to do this instead of trickery.
*/
- ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false);
+ ret = i915_gem_object_set_to_gtt_domain(to->engine[RCS].state, false);
if (ret)
goto unpin_out;
- if (needs_pd_load_pre(engine, to)) {
+ if (needs_pd_load_pre(ppgtt, engine, to)) {
/* Older GENs and non render rings still want the load first,
* "PP_DCLV followed by PP_DIR_BASE register through Load
* Register Immediate commands in Ring Buffer before submitting
* a context."*/
trace_switch_mm(engine, to);
- ret = to->ppgtt->switch_mm(to->ppgtt, req);
+ ret = ppgtt->switch_mm(ppgtt, req);
if (ret)
goto unpin_out;
}
- if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
+ if (!to->engine[RCS].initialised || i915_gem_context_is_default(to))
/* NB: If we inhibit the restore, the context is not allowed to
* die because future work may end up depending on valid address
* space. This means we must enforce that a page table load
* occur when this occurs. */
hw_flags = MI_RESTORE_INHIBIT;
- else if (to->ppgtt &&
- intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)
+ else if (ppgtt && intel_engine_flag(engine) & ppgtt->pd_dirty_rings)
hw_flags = MI_FORCE_RESTORE;
else
hw_flags = 0;
- /* We should never emit switch_mm more than once */
- WARN_ON(needs_pd_load_pre(engine, to) &&
- needs_pd_load_post(to, hw_flags));
-
if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
ret = mi_set_context(req, hw_flags);
if (ret)
@@ -738,8 +814,8 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
* MI_SET_CONTEXT instead of when the next seqno has completed.
*/
if (from != NULL) {
- from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
- i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), req);
+ from->engine[RCS].state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
+ i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->engine[RCS].state), req);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be
@@ -747,10 +823,10 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
* able to defer doing this until we know the object would be
* swapped, but there is no way to do that yet.
*/
- from->legacy_hw_ctx.rcs_state->dirty = 1;
+ from->engine[RCS].state->dirty = 1;
/* obj is kept alive until the next request by its active ref */
- i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
+ i915_gem_object_ggtt_unpin(from->engine[RCS].state);
i915_gem_context_unreference(from);
}
i915_gem_context_reference(to);
@@ -759,9 +835,9 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
/* GEN8 does *not* require an explicit reload if the PDPs have been
* setup, and we do not wish to move them.
*/
- if (needs_pd_load_post(to, hw_flags)) {
+ if (needs_pd_load_post(ppgtt, to, hw_flags)) {
trace_switch_mm(engine, to);
- ret = to->ppgtt->switch_mm(to->ppgtt, req);
+ ret = ppgtt->switch_mm(ppgtt, req);
/* The hardware context switch is emitted, but we haven't
* actually changed the state - so it's probably safe to bail
* here. Still, let the user know something dangerous has
@@ -771,33 +847,33 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
return ret;
}
- if (to->ppgtt)
- to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
+ if (ppgtt)
+ ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
for (i = 0; i < MAX_L3_SLICES; i++) {
if (!(to->remap_slice & (1<<i)))
continue;
- ret = i915_gem_l3_remap(req, i);
+ ret = remap_l3(req, i);
if (ret)
return ret;
to->remap_slice &= ~(1<<i);
}
- if (!to->legacy_hw_ctx.initialized) {
+ if (!to->engine[RCS].initialised) {
if (engine->init_context) {
ret = engine->init_context(req);
if (ret)
return ret;
}
- to->legacy_hw_ctx.initialized = true;
+ to->engine[RCS].initialised = true;
}
return 0;
unpin_out:
- i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
+ i915_gem_object_ggtt_unpin(to->engine[RCS].state);
return ret;
}
@@ -817,25 +893,24 @@ unpin_out:
int i915_switch_context(struct drm_i915_gem_request *req)
{
struct intel_engine_cs *engine = req->engine;
- struct drm_i915_private *dev_priv = req->i915;
WARN_ON(i915.enable_execlists);
- WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
+ lockdep_assert_held(&req->i915->drm.struct_mutex);
- if (engine->id != RCS ||
- req->ctx->legacy_hw_ctx.rcs_state == NULL) {
- struct intel_context *to = req->ctx;
+ if (!req->ctx->engine[engine->id].state) {
+ struct i915_gem_context *to = req->ctx;
+ struct i915_hw_ppgtt *ppgtt =
+ to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
- if (needs_pd_load_pre(engine, to)) {
+ if (needs_pd_load_pre(ppgtt, engine, to)) {
int ret;
trace_switch_mm(engine, to);
- ret = to->ppgtt->switch_mm(to->ppgtt, req);
+ ret = ppgtt->switch_mm(ppgtt, req);
if (ret)
return ret;
- /* Doing a PD load always reloads the page dirs */
- to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
+ ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
}
if (to != engine->last_context) {
@@ -861,7 +936,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_context_create *args = data;
struct drm_i915_file_private *file_priv = file->driver_priv;
- struct intel_context *ctx;
+ struct i915_gem_context *ctx;
int ret;
if (!contexts_enabled(dev))
@@ -890,7 +965,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_context_destroy *args = data;
struct drm_i915_file_private *file_priv = file->driver_priv;
- struct intel_context *ctx;
+ struct i915_gem_context *ctx;
int ret;
if (args->pad != 0)
@@ -903,13 +978,13 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- ctx = i915_gem_context_get(file_priv, args->ctx_id);
+ ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
if (IS_ERR(ctx)) {
mutex_unlock(&dev->struct_mutex);
return PTR_ERR(ctx);
}
- idr_remove(&ctx->file_priv->context_idr, ctx->user_handle);
+ idr_remove(&file_priv->context_idr, ctx->user_handle);
i915_gem_context_unreference(ctx);
mutex_unlock(&dev->struct_mutex);
@@ -922,14 +997,14 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_file_private *file_priv = file->driver_priv;
struct drm_i915_gem_context_param *args = data;
- struct intel_context *ctx;
+ struct i915_gem_context *ctx;
int ret;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
- ctx = i915_gem_context_get(file_priv, args->ctx_id);
+ ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
if (IS_ERR(ctx)) {
mutex_unlock(&dev->struct_mutex);
return PTR_ERR(ctx);
@@ -951,6 +1026,9 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
else
args->value = to_i915(dev)->ggtt.base.total;
break;
+ case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
+ args->value = !!(ctx->flags & CONTEXT_NO_ERROR_CAPTURE);
+ break;
default:
ret = -EINVAL;
break;
@@ -965,14 +1043,14 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_file_private *file_priv = file->driver_priv;
struct drm_i915_gem_context_param *args = data;
- struct intel_context *ctx;
+ struct i915_gem_context *ctx;
int ret;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
- ctx = i915_gem_context_get(file_priv, args->ctx_id);
+ ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
if (IS_ERR(ctx)) {
mutex_unlock(&dev->struct_mutex);
return PTR_ERR(ctx);
@@ -996,6 +1074,16 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0;
}
break;
+ case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
+ if (args->size) {
+ ret = -EINVAL;
+ } else {
+ if (args->value)
+ ctx->flags |= CONTEXT_NO_ERROR_CAPTURE;
+ else
+ ctx->flags &= ~CONTEXT_NO_ERROR_CAPTURE;
+ }
+ break;
default:
ret = -EINVAL;
break;
@@ -1004,3 +1092,42 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
return ret;
}
+
+int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_reset_stats *args = data;
+ struct i915_ctx_hang_stats *hs;
+ struct i915_gem_context *ctx;
+ int ret;
+
+ if (args->flags || args->pad)
+ return -EINVAL;
+
+ if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id);
+ if (IS_ERR(ctx)) {
+ mutex_unlock(&dev->struct_mutex);
+ return PTR_ERR(ctx);
+ }
+ hs = &ctx->hang_stats;
+
+ if (capable(CAP_SYS_ADMIN))
+ args->reset_count = i915_reset_count(&dev_priv->gpu_error);
+ else
+ args->reset_count = 0;
+
+ args->batch_active = hs->batch_active;
+ args->batch_pending = hs->batch_pending;
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.h b/drivers/gpu/drm/i915/i915_gem_dmabuf.h
new file mode 100644
index 000000000..91315557e
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _I915_GEM_DMABUF_H_
+#define _I915_GEM_DMABUF_H_
+
+#include <linux/dma-buf.h>
+
+static inline struct reservation_object *
+i915_gem_object_get_dmabuf_resv(struct drm_i915_gem_object *obj)
+{
+ struct dma_buf *dma_buf;
+
+ if (obj->base.dma_buf)
+ dma_buf = obj->base.dma_buf;
+ else if (obj->base.import_attach)
+ dma_buf = obj->base.import_attach->dmabuf;
+ else
+ return NULL;
+
+ return dma_buf->resv;
+}
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index ea1f8d1bd..3c1280ec7 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -33,6 +33,37 @@
#include "intel_drv.h"
#include "i915_trace.h"
+static int switch_to_pinned_context(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+
+ if (i915.enable_execlists)
+ return 0;
+
+ for_each_engine(engine, dev_priv) {
+ struct drm_i915_gem_request *req;
+ int ret;
+
+ if (engine->last_context == NULL)
+ continue;
+
+ if (engine->last_context == dev_priv->kernel_context)
+ continue;
+
+ req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ ret = i915_switch_context(req);
+ i915_add_request_no_flush(req);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+
static bool
mark_free(struct i915_vma *vma, struct list_head *unwind)
{
@@ -150,11 +181,19 @@ none:
/* Only idle the GPU and repeat the search once */
if (pass++ == 0) {
- ret = i915_gpu_idle(dev);
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ if (i915_is_ggtt(vm)) {
+ ret = switch_to_pinned_context(dev_priv);
+ if (ret)
+ return ret;
+ }
+
+ ret = i915_gem_wait_for_idle(dev_priv);
if (ret)
return ret;
- i915_gem_retire_requests(dev);
+ i915_gem_retire_requests(dev_priv);
goto search_again;
}
@@ -261,11 +300,19 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
trace_i915_gem_evict_vm(vm);
if (do_idle) {
- ret = i915_gpu_idle(vm->dev);
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
+
+ if (i915_is_ggtt(vm)) {
+ ret = switch_to_pinned_context(dev_priv);
+ if (ret)
+ return ret;
+ }
+
+ ret = i915_gem_wait_for_idle(dev_priv);
if (ret)
return ret;
- i915_gem_retire_requests(vm->dev);
+ i915_gem_retire_requests(dev_priv);
WARN_ON(!list_empty(&vm->active_list));
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 33df74d98..b35e5b647 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -714,7 +714,7 @@ eb_vma_misplaced(struct i915_vma *vma)
static int
i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
struct list_head *vmas,
- struct intel_context *ctx,
+ struct i915_gem_context *ctx,
bool *need_relocs)
{
struct drm_i915_gem_object *obj;
@@ -722,7 +722,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
struct i915_address_space *vm;
struct list_head ordered_vmas;
struct list_head pinned_vmas;
- bool has_fenced_gpu_access = INTEL_INFO(engine->dev)->gen < 4;
+ bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
int retry;
i915_gem_retire_requests_ring(engine);
@@ -826,7 +826,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
struct intel_engine_cs *engine,
struct eb_vmas *eb,
struct drm_i915_gem_exec_object2 *exec,
- struct intel_context *ctx)
+ struct i915_gem_context *ctx)
{
struct drm_i915_gem_relocation_entry *reloc;
struct i915_address_space *vm;
@@ -943,8 +943,6 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
{
const unsigned other_rings = ~intel_engine_flag(req->engine);
struct i915_vma *vma;
- uint32_t flush_domains = 0;
- bool flush_chipset = false;
int ret;
list_for_each_entry(vma, vmas, exec_list) {
@@ -957,16 +955,11 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
}
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
- flush_chipset |= i915_gem_clflush_object(obj, false);
-
- flush_domains |= obj->base.write_domain;
+ i915_gem_clflush_object(obj, false);
}
- if (flush_chipset)
- i915_gem_chipset_flush(req->engine->dev);
-
- if (flush_domains & I915_GEM_DOMAIN_GTT)
- wmb();
+ /* Unconditionally flush any chipset caches (for streaming writes). */
+ i915_gem_chipset_flush(req->engine->i915);
/* Unconditionally invalidate gpu caches and ensure that we do flush
* any residual writes from the previous batch.
@@ -1063,17 +1056,17 @@ validate_exec_list(struct drm_device *dev,
return 0;
}
-static struct intel_context *
+static struct i915_gem_context *
i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
struct intel_engine_cs *engine, const u32 ctx_id)
{
- struct intel_context *ctx = NULL;
+ struct i915_gem_context *ctx = NULL;
struct i915_ctx_hang_stats *hs;
if (engine->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
return ERR_PTR(-EINVAL);
- ctx = i915_gem_context_get(file->driver_priv, ctx_id);
+ ctx = i915_gem_context_lookup(file->driver_priv, ctx_id);
if (IS_ERR(ctx))
return ctx;
@@ -1083,14 +1076,6 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
return ERR_PTR(-EIO);
}
- if (i915.enable_execlists && !ctx->engine[engine->id].state) {
- int ret = intel_lr_context_deferred_alloc(ctx, engine);
- if (ret) {
- DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
- return ERR_PTR(ret);
- }
- }
-
return ctx;
}
@@ -1125,7 +1110,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
i915_gem_request_assign(&obj->last_fenced_req, req);
if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
- struct drm_i915_private *dev_priv = to_i915(engine->dev);
+ struct drm_i915_private *dev_priv = engine->i915;
list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
&dev_priv->mm.fence_list);
}
@@ -1150,7 +1135,7 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
struct drm_i915_gem_request *req)
{
struct intel_engine_cs *engine = req->engine;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret, i;
if (!IS_GEN7(dev) || engine != &dev_priv->engine[RCS]) {
@@ -1233,7 +1218,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
{
struct drm_device *dev = params->dev;
struct intel_engine_cs *engine = params->engine;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u64 exec_start, exec_len;
int instp_mode;
u32 instp_mask;
@@ -1336,10 +1321,10 @@ gen8_dispatch_bsd_ring(struct drm_i915_private *dev_priv, struct drm_file *file)
/* Check whether the file_priv has already selected one ring. */
if ((int)file_priv->bsd_ring < 0) {
/* If not, use the ping-pong mechanism to select one. */
- mutex_lock(&dev_priv->dev->struct_mutex);
+ mutex_lock(&dev_priv->drm.struct_mutex);
file_priv->bsd_ring = dev_priv->mm.bsd_ring_dispatch_index;
dev_priv->mm.bsd_ring_dispatch_index ^= 1;
- mutex_unlock(&dev_priv->dev->struct_mutex);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
}
return file_priv->bsd_ring;
@@ -1436,7 +1421,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_gem_object *batch_obj;
struct drm_i915_gem_exec_object2 shadow_exec_entry;
struct intel_engine_cs *engine;
- struct intel_context *ctx;
+ struct i915_gem_context *ctx;
struct i915_address_space *vm;
struct i915_execbuffer_params params_master; /* XXX: will be removed later */
struct i915_execbuffer_params *params = &params_master;
@@ -1454,7 +1439,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
dispatch_flags = 0;
if (args->flags & I915_EXEC_SECURE) {
- if (!file->is_master || !capable(CAP_SYS_ADMIN))
+ if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
return -EPERM;
dispatch_flags |= I915_DISPATCH_SECURE;
@@ -1485,6 +1470,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
dispatch_flags |= I915_DISPATCH_RS;
}
+ /* Take a local wakeref for preparing to dispatch the execbuf as
+ * we expect to access the hardware fairly frequently in the
+ * process. Upon first dispatch, we acquire another prolonged
+ * wakeref that we hold until the GPU has been idle for at least
+ * 100ms.
+ */
intel_runtime_pm_get(dev_priv);
ret = i915_mutex_lock_interruptible(dev);
@@ -1561,7 +1552,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
batch_obj,
args->batch_start_offset,
args->batch_len,
- file->is_master);
+ drm_is_current_master(file));
if (IS_ERR(parsed_batch_obj)) {
ret = PTR_ERR(parsed_batch_obj);
goto err;
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
index a2b938ec0..251d7a95a 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -58,7 +58,7 @@
static void i965_write_fence_reg(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t fence_reg_lo, fence_reg_hi;
int fence_pitch_shift;
@@ -117,7 +117,7 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
static void i915_write_fence_reg(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 val;
if (obj) {
@@ -156,7 +156,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
static void i830_write_fence_reg(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t val;
if (obj) {
@@ -193,7 +193,7 @@ inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
static void i915_gem_write_fence(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
/* Ensure that all CPU reads are completed before installing a fence
* and all writes before removing the fence.
@@ -229,7 +229,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
struct drm_i915_fence_reg *fence,
bool enable)
{
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
int reg = fence_number(dev_priv, fence);
i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
@@ -286,7 +286,7 @@ i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
int
i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct drm_i915_fence_reg *fence;
int ret;
@@ -311,7 +311,7 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
static struct drm_i915_fence_reg *
i915_find_fence_reg(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_fence_reg *reg, *avail;
int i;
@@ -367,7 +367,7 @@ int
i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
bool enable = obj->tiling_mode != I915_TILING_NONE;
struct drm_i915_fence_reg *reg;
int ret;
@@ -433,7 +433,7 @@ bool
i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
{
if (obj->fence_reg != I915_FENCE_REG_NONE) {
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
WARN_ON(!ggtt_vma ||
@@ -457,7 +457,7 @@ void
i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
{
if (obj->fence_reg != I915_FENCE_REG_NONE) {
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
dev_priv->fence_regs[obj->fence_reg].pin_count--;
}
@@ -472,7 +472,7 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
*/
void i915_gem_restore_fences(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int i;
for (i = 0; i < dev_priv->num_fence_regs; i++) {
@@ -549,7 +549,7 @@ void i915_gem_restore_fences(struct drm_device *dev)
void
i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
@@ -745,15 +745,15 @@ i915_gem_swizzle_page(struct page *page)
void
i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
- struct sg_page_iter sg_iter;
+ struct sgt_iter sgt_iter;
+ struct page *page;
int i;
if (obj->bit_17 == NULL)
return;
i = 0;
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
- struct page *page = sg_page_iter_page(&sg_iter);
+ for_each_sgt_page(page, sgt_iter, obj->pages) {
char new_bit_17 = page_to_phys(page) >> 17;
if ((new_bit_17 & 0x1) !=
(test_bit(i, obj->bit_17) != 0)) {
@@ -775,7 +775,8 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
void
i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
- struct sg_page_iter sg_iter;
+ struct sgt_iter sgt_iter;
+ struct page *page;
int page_count = obj->base.size >> PAGE_SHIFT;
int i;
@@ -790,8 +791,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
}
i = 0;
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
- if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17))
+
+ for_each_sgt_page(page, sgt_iter, obj->pages) {
+ if (page_to_phys(page) & (1 << 17))
__set_bit(i, obj->bit_17);
else
__clear_bit(i, obj->bit_17);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index e856f7906..f38ceffd8 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -93,6 +93,13 @@
*
*/
+static inline struct i915_ggtt *
+i915_vm_to_ggtt(struct i915_address_space *vm)
+{
+ GEM_BUG_ON(!i915_is_ggtt(vm));
+ return container_of(vm, struct i915_ggtt, base);
+}
+
static int
i915_get_ggtt_vma_pages(struct i915_vma *vma);
@@ -103,25 +110,32 @@ const struct i915_ggtt_view i915_ggtt_view_rotated = {
.type = I915_GGTT_VIEW_ROTATED,
};
-static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
+int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
+ int enable_ppgtt)
{
bool has_aliasing_ppgtt;
bool has_full_ppgtt;
bool has_full_48bit_ppgtt;
- has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
- has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
- has_full_48bit_ppgtt = IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9;
+ has_aliasing_ppgtt = INTEL_GEN(dev_priv) >= 6;
+ has_full_ppgtt = INTEL_GEN(dev_priv) >= 7;
+ has_full_48bit_ppgtt =
+ IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9;
+
+ if (intel_vgpu_active(dev_priv)) {
+ /* emulation is too hard */
+ has_full_ppgtt = false;
+ has_full_48bit_ppgtt = false;
+ }
- if (intel_vgpu_active(dev))
- has_full_ppgtt = false; /* emulation is too hard */
+ if (!has_aliasing_ppgtt)
+ return 0;
/*
* We don't allow disabling PPGTT for gen9+ as it's a requirement for
* execlists, the sole mechanism available to submit work.
*/
- if (INTEL_INFO(dev)->gen < 9 &&
- (enable_ppgtt == 0 || !has_aliasing_ppgtt))
+ if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
return 0;
if (enable_ppgtt == 1)
@@ -135,19 +149,19 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
#ifdef CONFIG_INTEL_IOMMU
/* Disable ppgtt on SNB if VT-d is on. */
- if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
+ if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped) {
DRM_INFO("Disabling PPGTT because VT-d is on\n");
return 0;
}
#endif
/* Early VLV doesn't have this */
- if (IS_VALLEYVIEW(dev) && dev->pdev->revision < 0xb) {
+ if (IS_VALLEYVIEW(dev_priv) && dev_priv->drm.pdev->revision < 0xb) {
DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
return 0;
}
- if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists)
+ if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists && has_full_ppgtt)
return has_full_48bit_ppgtt ? 3 : 2;
else
return has_aliasing_ppgtt ? 1 : 0;
@@ -866,6 +880,7 @@ static void gen8_free_page_tables(struct drm_device *dev,
static int gen8_init_scratch(struct i915_address_space *vm)
{
struct drm_device *dev = vm->dev;
+ int ret;
vm->scratch_page = alloc_scratch_page(dev);
if (IS_ERR(vm->scratch_page))
@@ -873,24 +888,21 @@ static int gen8_init_scratch(struct i915_address_space *vm)
vm->scratch_pt = alloc_pt(dev);
if (IS_ERR(vm->scratch_pt)) {
- free_scratch_page(dev, vm->scratch_page);
- return PTR_ERR(vm->scratch_pt);
+ ret = PTR_ERR(vm->scratch_pt);
+ goto free_scratch_page;
}
vm->scratch_pd = alloc_pd(dev);
if (IS_ERR(vm->scratch_pd)) {
- free_pt(dev, vm->scratch_pt);
- free_scratch_page(dev, vm->scratch_page);
- return PTR_ERR(vm->scratch_pd);
+ ret = PTR_ERR(vm->scratch_pd);
+ goto free_pt;
}
if (USES_FULL_48BIT_PPGTT(dev)) {
vm->scratch_pdp = alloc_pdp(dev);
if (IS_ERR(vm->scratch_pdp)) {
- free_pd(dev, vm->scratch_pd);
- free_pt(dev, vm->scratch_pt);
- free_scratch_page(dev, vm->scratch_page);
- return PTR_ERR(vm->scratch_pdp);
+ ret = PTR_ERR(vm->scratch_pdp);
+ goto free_pd;
}
}
@@ -900,6 +912,15 @@ static int gen8_init_scratch(struct i915_address_space *vm)
gen8_initialize_pdp(vm, vm->scratch_pdp);
return 0;
+
+free_pd:
+ free_pd(dev, vm->scratch_pd);
+free_pt:
+ free_pt(dev, vm->scratch_pt);
+free_scratch_page:
+ free_scratch_page(dev, vm->scratch_page);
+
+ return ret;
}
static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
@@ -978,7 +999,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- if (intel_vgpu_active(vm->dev))
+ if (intel_vgpu_active(to_i915(vm->dev)))
gen8_ppgtt_notify_vgt(ppgtt, false);
if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
@@ -1529,14 +1550,14 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
0, 0,
GEN8_PML4E_SHIFT);
- if (intel_vgpu_active(ppgtt->base.dev)) {
+ if (intel_vgpu_active(to_i915(ppgtt->base.dev))) {
ret = gen8_preallocate_top_level_pdps(ppgtt);
if (ret)
goto free_scratch;
}
}
- if (intel_vgpu_active(ppgtt->base.dev))
+ if (intel_vgpu_active(to_i915(ppgtt->base.dev)))
gen8_ppgtt_notify_vgt(ppgtt, true);
return 0;
@@ -1552,13 +1573,13 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
struct i915_page_table *unused;
gen6_pte_t scratch_pte;
uint32_t pd_entry;
- uint32_t pte, pde, temp;
+ uint32_t pte, pde;
uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
I915_CACHE_LLC, true, 0);
- gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) {
+ gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
u32 expected;
gen6_pte_t *pt_vaddr;
const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
@@ -1622,9 +1643,9 @@ static void gen6_write_page_range(struct drm_i915_private *dev_priv,
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_page_table *pt;
- uint32_t pde, temp;
+ uint32_t pde;
- gen6_for_each_pde(pt, pd, start, length, temp, pde)
+ gen6_for_each_pde(pt, pd, start, length, pde)
gen6_write_pde(pd, pde, pt);
/* Make sure write is complete before other code can use this page
@@ -1665,17 +1686,6 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
return 0;
}
-static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_request *req)
-{
- struct intel_engine_cs *engine = req->engine;
- struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
-
- I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
- I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
- return 0;
-}
-
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
{
@@ -1713,21 +1723,16 @@ static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
{
struct intel_engine_cs *engine = req->engine;
- struct drm_device *dev = ppgtt->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
+ struct drm_i915_private *dev_priv = req->i915;
I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
-
- POSTING_READ(RING_PP_DIR_DCLV(engine));
-
return 0;
}
static void gen8_ppgtt_enable(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
for_each_engine(engine, dev_priv) {
@@ -1739,7 +1744,7 @@ static void gen8_ppgtt_enable(struct drm_device *dev)
static void gen7_ppgtt_enable(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
uint32_t ecochk, ecobits;
@@ -1764,7 +1769,7 @@ static void gen7_ppgtt_enable(struct drm_device *dev)
static void gen6_ppgtt_enable(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t ecochk, gab_ctl, ecobits;
ecobits = I915_READ(GAC_ECO_BITS);
@@ -1821,20 +1826,19 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
enum i915_cache_level cache_level, u32 flags)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- gen6_pte_t *pt_vaddr;
unsigned first_entry = start >> PAGE_SHIFT;
unsigned act_pt = first_entry / GEN6_PTES;
unsigned act_pte = first_entry % GEN6_PTES;
- struct sg_page_iter sg_iter;
+ gen6_pte_t *pt_vaddr = NULL;
+ struct sgt_iter sgt_iter;
+ dma_addr_t addr;
- pt_vaddr = NULL;
- for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
+ for_each_sgt_dma(addr, sgt_iter, pages) {
if (pt_vaddr == NULL)
pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
pt_vaddr[act_pte] =
- vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
- cache_level, true, flags);
+ vm->pte_encode(addr, cache_level, true, flags);
if (++act_pte == GEN6_PTES) {
kunmap_px(ppgtt, pt_vaddr);
@@ -1843,6 +1847,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
act_pte = 0;
}
}
+
if (pt_vaddr)
kunmap_px(ppgtt, pt_vaddr);
}
@@ -1857,7 +1862,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct i915_page_table *pt;
uint32_t start, length, start_save, length_save;
- uint32_t pde, temp;
+ uint32_t pde;
int ret;
if (WARN_ON(start_in + length_in > ppgtt->base.total))
@@ -1873,7 +1878,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
* need allocation. The second stage marks use ptes within the page
* tables.
*/
- gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
+ gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
if (pt != vm->scratch_pt) {
WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES));
continue;
@@ -1898,7 +1903,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
start = start_save;
length = length_save;
- gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
+ gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
DECLARE_BITMAP(tmp_bitmap, GEN6_PTES);
bitmap_zero(tmp_bitmap, GEN6_PTES);
@@ -1967,15 +1972,16 @@ static void gen6_free_scratch(struct i915_address_space *vm)
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct i915_page_directory *pd = &ppgtt->pd;
+ struct drm_device *dev = vm->dev;
struct i915_page_table *pt;
uint32_t pde;
drm_mm_remove_node(&ppgtt->node);
- gen6_for_all_pdes(pt, ppgtt, pde) {
+ gen6_for_all_pdes(pt, pd, pde)
if (pt != vm->scratch_pt)
- free_pt(ppgtt->base.dev, pt);
- }
+ free_pt(dev, pt);
gen6_free_scratch(vm);
}
@@ -2041,9 +2047,9 @@ static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
uint64_t start, uint64_t length)
{
struct i915_page_table *unused;
- uint32_t pde, temp;
+ uint32_t pde;
- gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde)
+ gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde)
ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
}
@@ -2055,18 +2061,15 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
int ret;
ppgtt->base.pte_encode = ggtt->base.pte_encode;
- if (IS_GEN6(dev)) {
+ if (intel_vgpu_active(dev_priv) || IS_GEN6(dev))
ppgtt->switch_mm = gen6_mm_switch;
- } else if (IS_HASWELL(dev)) {
+ else if (IS_HASWELL(dev))
ppgtt->switch_mm = hsw_mm_switch;
- } else if (IS_GEN7(dev)) {
+ else if (IS_GEN7(dev))
ppgtt->switch_mm = gen7_mm_switch;
- } else
+ else
BUG();
- if (intel_vgpu_active(dev))
- ppgtt->switch_mm = vgpu_mm_switch;
-
ret = gen6_ppgtt_alloc(ppgtt);
if (ret)
return ret;
@@ -2115,7 +2118,7 @@ static void i915_address_space_init(struct i915_address_space *vm,
struct drm_i915_private *dev_priv)
{
drm_mm_init(&vm->mm, vm->start, vm->total);
- vm->dev = dev_priv->dev;
+ vm->dev = &dev_priv->drm;
INIT_LIST_HEAD(&vm->active_list);
INIT_LIST_HEAD(&vm->inactive_list);
list_add_tail(&vm->global_link, &dev_priv->vm_list);
@@ -2123,7 +2126,7 @@ static void i915_address_space_init(struct i915_address_space *vm,
static void gtt_write_workarounds(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
/* This function is for gtt related workarounds. This function is
* called on driver load and after a GPU reset, so you can place
@@ -2140,9 +2143,9 @@ static void gtt_write_workarounds(struct drm_device *dev)
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
}
-int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
+static int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret = 0;
ret = __hw_ppgtt_init(dev, ppgtt);
@@ -2179,20 +2182,6 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
return 0;
}
-int i915_ppgtt_init_ring(struct drm_i915_gem_request *req)
-{
- struct drm_i915_private *dev_priv = req->i915;
- struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
-
- if (i915.enable_execlists)
- return 0;
-
- if (!ppgtt)
- return 0;
-
- return ppgtt->switch_mm(ppgtt, req);
-}
-
struct i915_hw_ppgtt *
i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
{
@@ -2257,8 +2246,8 @@ static bool do_idling(struct drm_i915_private *dev_priv)
if (unlikely(ggtt->do_idle_maps)) {
dev_priv->mm.interruptible = false;
- if (i915_gpu_idle(dev_priv->dev)) {
- DRM_ERROR("Couldn't idle GPU\n");
+ if (i915_gem_wait_for_idle(dev_priv)) {
+ DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
/* Wait a bit, in hopes it avoids the hang */
udelay(10);
}
@@ -2275,12 +2264,11 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
dev_priv->mm.interruptible = interruptible;
}
-void i915_check_and_clear_faults(struct drm_device *dev)
+void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
- if (INTEL_INFO(dev)->gen < 6)
+ if (INTEL_INFO(dev_priv)->gen < 6)
return;
for_each_engine(engine, dev_priv) {
@@ -2324,7 +2312,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
if (INTEL_INFO(dev)->gen < 6)
return;
- i915_check_and_clear_faults(dev);
+ i915_check_and_clear_faults(dev_priv);
ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
true);
@@ -2352,29 +2340,49 @@ static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
#endif
}
+static void gen8_ggtt_insert_page(struct i915_address_space *vm,
+ dma_addr_t addr,
+ uint64_t offset,
+ enum i915_cache_level level,
+ u32 unused)
+{
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ gen8_pte_t __iomem *pte =
+ (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
+ (offset >> PAGE_SHIFT);
+ int rpm_atomic_seq;
+
+ rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
+
+ gen8_set_pte(pte, gen8_pte_encode(addr, level, true));
+
+ I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+ POSTING_READ(GFX_FLSH_CNTL_GEN6);
+
+ assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
+}
+
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st,
uint64_t start,
enum i915_cache_level level, u32 unused)
{
struct drm_i915_private *dev_priv = to_i915(vm->dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- unsigned first_entry = start >> PAGE_SHIFT;
- gen8_pte_t __iomem *gtt_entries =
- (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
- int i = 0;
- struct sg_page_iter sg_iter;
- dma_addr_t addr = 0; /* shut up gcc */
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ struct sgt_iter sgt_iter;
+ gen8_pte_t __iomem *gtt_entries;
+ gen8_pte_t gtt_entry;
+ dma_addr_t addr;
int rpm_atomic_seq;
+ int i = 0;
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
- for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
- addr = sg_dma_address(sg_iter.sg) +
- (sg_iter.sg_pgoffset << PAGE_SHIFT);
- gen8_set_pte(&gtt_entries[i],
- gen8_pte_encode(addr, level, true));
- i++;
+ gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
+
+ for_each_sgt_dma(addr, sgt_iter, st) {
+ gtt_entry = gen8_pte_encode(addr, level, true);
+ gen8_set_pte(&gtt_entries[i++], gtt_entry);
}
/*
@@ -2385,8 +2393,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
* hardware should work, we must keep this posting read for paranoia.
*/
if (i != 0)
- WARN_ON(readq(&gtt_entries[i-1])
- != gen8_pte_encode(addr, level, true));
+ WARN_ON(readq(&gtt_entries[i-1]) != gtt_entry);
/* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates
@@ -2424,6 +2431,28 @@ static void gen8_ggtt_insert_entries__BKL(struct i915_address_space *vm,
stop_machine(gen8_ggtt_insert_entries__cb, &arg, NULL);
}
+static void gen6_ggtt_insert_page(struct i915_address_space *vm,
+ dma_addr_t addr,
+ uint64_t offset,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ gen6_pte_t __iomem *pte =
+ (gen6_pte_t __iomem *)dev_priv->ggtt.gsm +
+ (offset >> PAGE_SHIFT);
+ int rpm_atomic_seq;
+
+ rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
+
+ iowrite32(vm->pte_encode(addr, level, true, flags), pte);
+
+ I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+ POSTING_READ(GFX_FLSH_CNTL_GEN6);
+
+ assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
+}
+
/*
* Binds an object into the global gtt with the specified cache level. The object
* will be accessible to the GPU via commands whose operands reference offsets
@@ -2436,21 +2465,21 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
enum i915_cache_level level, u32 flags)
{
struct drm_i915_private *dev_priv = to_i915(vm->dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- unsigned first_entry = start >> PAGE_SHIFT;
- gen6_pte_t __iomem *gtt_entries =
- (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
- int i = 0;
- struct sg_page_iter sg_iter;
- dma_addr_t addr = 0;
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ struct sgt_iter sgt_iter;
+ gen6_pte_t __iomem *gtt_entries;
+ gen6_pte_t gtt_entry;
+ dma_addr_t addr;
int rpm_atomic_seq;
+ int i = 0;
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
- for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
- addr = sg_page_iter_dma_address(&sg_iter);
- iowrite32(vm->pte_encode(addr, level, true, flags), &gtt_entries[i]);
- i++;
+ gtt_entries = (gen6_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
+
+ for_each_sgt_dma(addr, sgt_iter, st) {
+ gtt_entry = vm->pte_encode(addr, level, true, flags);
+ iowrite32(gtt_entry, &gtt_entries[i++]);
}
/* XXX: This serves as a posting read to make sure that the PTE has
@@ -2459,10 +2488,8 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
* of NUMA access patterns. Therefore, even with the way we assume
* hardware should work, we must keep this posting read for paranoia.
*/
- if (i != 0) {
- unsigned long gtt = readl(&gtt_entries[i-1]);
- WARN_ON(gtt != vm->pte_encode(addr, level, true, flags));
- }
+ if (i != 0)
+ WARN_ON(readl(&gtt_entries[i-1]) != gtt_entry);
/* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates
@@ -2474,13 +2501,20 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
+static void nop_clear_range(struct i915_address_space *vm,
+ uint64_t start,
+ uint64_t length,
+ bool use_scratch)
+{
+}
+
static void gen8_ggtt_clear_range(struct i915_address_space *vm,
uint64_t start,
uint64_t length,
bool use_scratch)
{
struct drm_i915_private *dev_priv = to_i915(vm->dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
gen8_pte_t scratch_pte, __iomem *gtt_base =
@@ -2512,7 +2546,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
bool use_scratch)
{
struct drm_i915_private *dev_priv = to_i915(vm->dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
gen6_pte_t scratch_pte, __iomem *gtt_base =
@@ -2538,12 +2572,30 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
+static void i915_ggtt_insert_page(struct i915_address_space *vm,
+ dma_addr_t addr,
+ uint64_t offset,
+ enum i915_cache_level cache_level,
+ u32 unused)
+{
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ unsigned int flags = (cache_level == I915_CACHE_NONE) ?
+ AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
+ int rpm_atomic_seq;
+
+ rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
+
+ intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
+
+ assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
+}
+
static void i915_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *pages,
uint64_t start,
enum i915_cache_level cache_level, u32 unused)
{
- struct drm_i915_private *dev_priv = vm->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
int rpm_atomic_seq;
@@ -2561,7 +2613,7 @@ static void i915_ggtt_clear_range(struct i915_address_space *vm,
uint64_t length,
bool unused)
{
- struct drm_i915_private *dev_priv = vm->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
int rpm_atomic_seq;
@@ -2642,7 +2694,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
static void ggtt_unbind_vma(struct i915_vma *vma)
{
struct drm_device *dev = vma->vm->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj = vma->obj;
const uint64_t size = min_t(uint64_t,
obj->base.size,
@@ -2668,7 +2720,7 @@ static void ggtt_unbind_vma(struct i915_vma *vma)
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
bool interruptible;
interruptible = do_idling(dev_priv);
@@ -2727,11 +2779,9 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
i915_address_space_init(&ggtt->base, dev_priv);
ggtt->base.total += PAGE_SIZE;
- if (intel_vgpu_active(dev)) {
- ret = intel_vgt_balloon(dev);
- if (ret)
- return ret;
- }
+ ret = intel_vgt_balloon(dev_priv);
+ if (ret)
+ return ret;
if (!HAS_LLC(dev))
ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
@@ -2832,8 +2882,7 @@ void i915_ggtt_cleanup_hw(struct drm_device *dev)
i915_gem_cleanup_stolen(dev);
if (drm_mm_initialized(&ggtt->base.mm)) {
- if (intel_vgpu_active(dev))
- intel_vgt_deballoon();
+ intel_vgt_deballoon(dev_priv);
drm_mm_takedown(&ggtt->base.mm);
list_del(&ggtt->base.global_link);
@@ -3070,13 +3119,16 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ret = ggtt_probe_common(dev, ggtt->size);
- ggtt->base.clear_range = gen8_ggtt_clear_range;
- if (IS_CHERRYVIEW(dev_priv))
- ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL;
- else
- ggtt->base.insert_entries = gen8_ggtt_insert_entries;
ggtt->base.bind_vma = ggtt_bind_vma;
ggtt->base.unbind_vma = ggtt_unbind_vma;
+ ggtt->base.insert_page = gen8_ggtt_insert_page;
+ ggtt->base.clear_range = nop_clear_range;
+ if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
+ ggtt->base.clear_range = gen8_ggtt_clear_range;
+
+ ggtt->base.insert_entries = gen8_ggtt_insert_entries;
+ if (IS_CHERRYVIEW(dev_priv))
+ ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL;
return ret;
}
@@ -3109,6 +3161,7 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
ret = ggtt_probe_common(dev, ggtt->size);
ggtt->base.clear_range = gen6_ggtt_clear_range;
+ ggtt->base.insert_page = gen6_ggtt_insert_page;
ggtt->base.insert_entries = gen6_ggtt_insert_entries;
ggtt->base.bind_vma = ggtt_bind_vma;
ggtt->base.unbind_vma = ggtt_unbind_vma;
@@ -3130,7 +3183,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
- ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
+ ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
if (!ret) {
DRM_ERROR("failed to set up gmch\n");
return -EIO;
@@ -3139,7 +3192,8 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
intel_gtt_get(&ggtt->base.total, &ggtt->stolen_size,
&ggtt->mappable_base, &ggtt->mappable_end);
- ggtt->do_idle_maps = needs_idle_maps(dev_priv->dev);
+ ggtt->do_idle_maps = needs_idle_maps(&dev_priv->drm);
+ ggtt->base.insert_page = i915_ggtt_insert_page;
ggtt->base.insert_entries = i915_ggtt_insert_entries;
ggtt->base.clear_range = i915_ggtt_clear_range;
ggtt->base.bind_vma = ggtt_bind_vma;
@@ -3220,14 +3274,6 @@ int i915_ggtt_init_hw(struct drm_device *dev)
if (intel_iommu_gfx_mapped)
DRM_INFO("VT-d active for gfx access\n");
#endif
- /*
- * i915.enable_ppgtt is read-only, so do an early pass to validate the
- * user's requested state against the hardware/driver capabilities. We
- * do this now so that we can print out any log messages once rather
- * than every time we check intel_enable_ppgtt().
- */
- i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt);
- DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
return 0;
@@ -3251,9 +3297,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
- bool flush;
- i915_check_and_clear_faults(dev);
+ i915_check_and_clear_faults(dev_priv);
/* First fill our portion of the GTT with scratch pages */
ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
@@ -3261,19 +3306,16 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
/* Cache flush objects bound into GGTT and rebind them. */
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- flush = false;
list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (vma->vm != &ggtt->base)
continue;
WARN_ON(i915_vma_bind(vma, obj->cache_level,
PIN_UPDATE));
-
- flush = true;
}
- if (flush)
- i915_gem_clflush_object(obj, obj->pin_display);
+ if (obj->pin_display)
+ WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
}
if (INTEL_INFO(dev)->gen >= 8) {
@@ -3399,9 +3441,11 @@ static struct sg_table *
intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
struct drm_i915_gem_object *obj)
{
+ const size_t n_pages = obj->base.size / PAGE_SIZE;
unsigned int size_pages = rot_info->plane[0].width * rot_info->plane[0].height;
unsigned int size_pages_uv;
- struct sg_page_iter sg_iter;
+ struct sgt_iter sgt_iter;
+ dma_addr_t dma_addr;
unsigned long i;
dma_addr_t *page_addr_list;
struct sg_table *st;
@@ -3410,7 +3454,7 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
int ret = -ENOMEM;
/* Allocate a temporary list of source pages for random access. */
- page_addr_list = drm_malloc_gfp(obj->base.size / PAGE_SIZE,
+ page_addr_list = drm_malloc_gfp(n_pages,
sizeof(dma_addr_t),
GFP_TEMPORARY);
if (!page_addr_list)
@@ -3433,11 +3477,10 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
/* Populate source page list from the object. */
i = 0;
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
- page_addr_list[i] = sg_page_iter_dma_address(&sg_iter);
- i++;
- }
+ for_each_sgt_dma(dma_addr, sgt_iter, obj->pages)
+ page_addr_list[i++] = dma_addr;
+ GEM_BUG_ON(i != n_pages);
st->nents = 0;
sg = st->sgl;
@@ -3635,3 +3678,29 @@ i915_ggtt_view_size(struct drm_i915_gem_object *obj,
return obj->base.size;
}
}
+
+void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
+{
+ void __iomem *ptr;
+
+ lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ if (WARN_ON(!vma->obj->map_and_fenceable))
+ return ERR_PTR(-ENODEV);
+
+ GEM_BUG_ON(!vma->is_ggtt);
+ GEM_BUG_ON((vma->bound & GLOBAL_BIND) == 0);
+
+ ptr = vma->iomap;
+ if (ptr == NULL) {
+ ptr = io_mapping_map_wc(i915_vm_to_ggtt(vma->vm)->mappable,
+ vma->node.start,
+ vma->node.size);
+ if (ptr == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ vma->iomap = ptr;
+ }
+
+ vma->pin_count++;
+ return ptr;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 0008543d5..aa5f31d1c 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -34,6 +34,8 @@
#ifndef __I915_GEM_GTT_H__
#define __I915_GEM_GTT_H__
+#include <linux/io-mapping.h>
+
struct drm_i915_file_private;
typedef uint32_t gen6_pte_t;
@@ -175,6 +177,7 @@ struct i915_vma {
struct drm_mm_node node;
struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
+ void __iomem *iomap;
/** Flags and address space this VMA is bound to */
#define GLOBAL_BIND (1<<0)
@@ -316,6 +319,11 @@ struct i915_address_space {
uint64_t start,
uint64_t length,
bool use_scratch);
+ void (*insert_page)(struct i915_address_space *vm,
+ dma_addr_t addr,
+ uint64_t offset,
+ enum i915_cache_level cache_level,
+ u32 flags);
void (*insert_entries)(struct i915_address_space *vm,
struct sg_table *st,
uint64_t start,
@@ -382,27 +390,27 @@ struct i915_hw_ppgtt {
void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
};
-/* For each pde iterates over every pde between from start until start + length.
- * If start, and start+length are not perfectly divisible, the macro will round
- * down, and up as needed. The macro modifies pde, start, and length. Dev is
- * only used to differentiate shift values. Temp is temp. On gen6/7, start = 0,
- * and length = 2G effectively iterates over every PDE in the system.
- *
- * XXX: temp is not actually needed, but it saves doing the ALIGN operation.
+/*
+ * gen6_for_each_pde() iterates over every pde from start until start+length.
+ * If start and start+length are not perfectly divisible, the macro will round
+ * down and up as needed. Start=0 and length=2G effectively iterates over
+ * every PDE in the system. The macro modifies ALL its parameters except 'pd',
+ * so each of the other parameters should preferably be a simple variable, or
+ * at most an lvalue with no side-effects!
*/
-#define gen6_for_each_pde(pt, pd, start, length, temp, iter) \
- for (iter = gen6_pde_index(start); \
- length > 0 && iter < I915_PDES ? \
- (pt = (pd)->page_table[iter]), 1 : 0; \
- iter++, \
- temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT) - start, \
- temp = min_t(unsigned, temp, length), \
- start += temp, length -= temp)
-
-#define gen6_for_all_pdes(pt, ppgtt, iter) \
- for (iter = 0; \
- pt = ppgtt->pd.page_table[iter], iter < I915_PDES; \
- iter++)
+#define gen6_for_each_pde(pt, pd, start, length, iter) \
+ for (iter = gen6_pde_index(start); \
+ length > 0 && iter < I915_PDES && \
+ (pt = (pd)->page_table[iter], true); \
+ ({ u32 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT); \
+ temp = min(temp - start, length); \
+ start += temp, length -= temp; }), ++iter)
+
+#define gen6_for_all_pdes(pt, pd, iter) \
+ for (iter = 0; \
+ iter < I915_PDES && \
+ (pt = (pd)->page_table[iter], true); \
+ ++iter)
static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift)
{
@@ -518,9 +526,7 @@ int i915_ggtt_enable_hw(struct drm_device *dev);
void i915_gem_init_ggtt(struct drm_device *dev);
void i915_ggtt_cleanup_hw(struct drm_device *dev);
-int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
int i915_ppgtt_init_hw(struct drm_device *dev);
-int i915_ppgtt_init_ring(struct drm_i915_gem_request *req);
void i915_ppgtt_release(struct kref *kref);
struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev,
struct drm_i915_file_private *fpriv);
@@ -535,7 +541,7 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
kref_put(&ppgtt->ref, i915_ppgtt_release);
}
-void i915_check_and_clear_faults(struct drm_device *dev);
+void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
@@ -560,4 +566,36 @@ size_t
i915_ggtt_view_size(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view);
+/**
+ * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
+ * @vma: VMA to iomap
+ *
+ * The passed in VMA has to be pinned in the global GTT mappable region.
+ * An extra pinning of the VMA is acquired for the return iomapping,
+ * the caller must call i915_vma_unpin_iomap to relinquish the pinning
+ * after the iomapping is no longer required.
+ *
+ * Callers must hold the struct_mutex.
+ *
+ * Returns a valid iomapped pointer or ERR_PTR.
+ */
+void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
+
+/**
+ * i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap
+ * @vma: VMA to unpin
+ *
+ * Unpins the previously iomapped VMA from i915_vma_pin_iomap().
+ *
+ * Callers must hold the struct_mutex. This function is only valid to be
+ * called on a VMA previously iomapped by the caller with i915_vma_pin_iomap().
+ */
+static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
+{
+ lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ GEM_BUG_ON(vma->pin_count == 0);
+ GEM_BUG_ON(vma->iomap == NULL);
+ vma->pin_count--;
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 71611bf21..f75bbd67a 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -29,7 +29,7 @@
#include "intel_renderstate.h"
static const struct intel_renderstate_rodata *
-render_state_get_rodata(struct drm_device *dev, const int gen)
+render_state_get_rodata(const int gen)
{
switch (gen) {
case 6:
@@ -45,21 +45,22 @@ render_state_get_rodata(struct drm_device *dev, const int gen)
return NULL;
}
-static int render_state_init(struct render_state *so, struct drm_device *dev)
+static int render_state_init(struct render_state *so,
+ struct drm_i915_private *dev_priv)
{
int ret;
- so->gen = INTEL_INFO(dev)->gen;
- so->rodata = render_state_get_rodata(dev, so->gen);
+ so->gen = INTEL_GEN(dev_priv);
+ so->rodata = render_state_get_rodata(so->gen);
if (so->rodata == NULL)
return 0;
if (so->rodata->batch_items * 4 > 4096)
return -EINVAL;
- so->obj = i915_gem_alloc_object(dev, 4096);
- if (so->obj == NULL)
- return -ENOMEM;
+ so->obj = i915_gem_object_create(&dev_priv->drm, 4096);
+ if (IS_ERR(so->obj))
+ return PTR_ERR(so->obj);
ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
if (ret)
@@ -93,6 +94,7 @@ free_gem:
static int render_state_setup(struct render_state *so)
{
+ struct drm_device *dev = so->obj->base.dev;
const struct intel_renderstate_rodata *rodata = so->rodata;
unsigned int i = 0, reloc_index = 0;
struct page *page;
@@ -134,6 +136,33 @@ static int render_state_setup(struct render_state *so)
so->aux_batch_offset = i * sizeof(u32);
+ if (HAS_POOLED_EU(dev)) {
+ /*
+ * We always program 3x6 pool config but depending upon which
+ * subslice is disabled HW drops down to appropriate config
+ * shown below.
+ *
+ * In the below table 2x6 config always refers to
+ * fused-down version, native 2x6 is not available and can
+ * be ignored
+ *
+ * SNo subslices config eu pool configuration
+ * -----------------------------------------------------------
+ * 1 3 subslices enabled (3x6) - 0x00777000 (9+9)
+ * 2 ss0 disabled (2x6) - 0x00777000 (3+9)
+ * 3 ss1 disabled (2x6) - 0x00770000 (6+6)
+ * 4 ss2 disabled (2x6) - 0x00007000 (9+3)
+ */
+ u32 eu_pool_config = 0x00777000;
+
+ OUT_BATCH(d, i, GEN9_MEDIA_POOL_STATE);
+ OUT_BATCH(d, i, GEN9_MEDIA_POOL_ENABLE);
+ OUT_BATCH(d, i, eu_pool_config);
+ OUT_BATCH(d, i, 0);
+ OUT_BATCH(d, i, 0);
+ OUT_BATCH(d, i, 0);
+ }
+
OUT_BATCH(d, i, MI_BATCH_BUFFER_END);
so->aux_batch_size = (i * sizeof(u32)) - so->aux_batch_offset;
@@ -177,7 +206,7 @@ int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
if (WARN_ON(engine->id != RCS))
return -ENOENT;
- ret = render_state_init(so, engine->dev);
+ ret = render_state_init(so, engine->i915);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 66571466e..6f10b4214 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -131,7 +131,16 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
unsigned long count = 0;
trace_i915_gem_shrink(dev_priv, target, flags);
- i915_gem_retire_requests(dev_priv->dev);
+ i915_gem_retire_requests(dev_priv);
+
+ /*
+ * Unbinding of objects will require HW access; Let us not wake the
+ * device just to recover a little memory. If absolutely necessary,
+ * we will force the wake during oom-notifier.
+ */
+ if ((flags & I915_SHRINK_BOUND) &&
+ !intel_runtime_pm_get_if_in_use(dev_priv))
+ flags &= ~I915_SHRINK_BOUND;
/*
* As we may completely rewrite the (un)bound list whilst unbinding
@@ -197,7 +206,10 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
list_splice(&still_in_list, phase->list);
}
- i915_gem_retire_requests(dev_priv->dev);
+ if (flags & I915_SHRINK_BOUND)
+ intel_runtime_pm_put(dev_priv);
+
+ i915_gem_retire_requests(dev_priv);
return count;
}
@@ -245,7 +257,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
struct drm_i915_private *dev_priv =
container_of(shrinker, struct drm_i915_private, mm.shrinker);
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct drm_i915_gem_object *obj;
unsigned long count;
bool unlock;
@@ -253,6 +265,8 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
if (!i915_gem_shrinker_lock(dev, &unlock))
return 0;
+ i915_gem_retire_requests(dev_priv);
+
count = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
if (can_release_pages(obj))
@@ -274,7 +288,7 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
struct drm_i915_private *dev_priv =
container_of(shrinker, struct drm_i915_private, mm.shrinker);
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
unsigned long freed;
bool unlock;
@@ -309,7 +323,7 @@ i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv,
{
unsigned long timeout = msecs_to_jiffies(timeout_ms) + 1;
- while (!i915_gem_shrinker_lock(dev_priv->dev, &slu->unlock)) {
+ while (!i915_gem_shrinker_lock(&dev_priv->drm, &slu->unlock)) {
schedule_timeout_killable(1);
if (fatal_signal_pending(current))
return false;
@@ -330,7 +344,7 @@ i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv,
{
dev_priv->mm.interruptible = slu->was_interruptible;
if (slu->unlock)
- mutex_unlock(&dev_priv->dev->struct_mutex);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
}
static int
@@ -345,7 +359,9 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
return NOTIFY_DONE;
+ intel_runtime_pm_get(dev_priv);
freed_pages = i915_gem_shrink_all(dev_priv);
+ intel_runtime_pm_put(dev_priv);
/* Because we may be allocating inside our own driver, we cannot
* assert that there are no objects with pinned pages that are not
@@ -386,17 +402,35 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
struct drm_i915_private *dev_priv =
container_of(nb, struct drm_i915_private, mm.vmap_notifier);
struct shrinker_lock_uninterruptible slu;
- unsigned long freed_pages;
+ struct i915_vma *vma, *next;
+ unsigned long freed_pages = 0;
+ int ret;
if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
return NOTIFY_DONE;
- freed_pages = i915_gem_shrink(dev_priv, -1UL,
- I915_SHRINK_BOUND |
- I915_SHRINK_UNBOUND |
- I915_SHRINK_ACTIVE |
- I915_SHRINK_VMAPS);
+ /* Force everything onto the inactive lists */
+ ret = i915_gem_wait_for_idle(dev_priv);
+ if (ret)
+ goto out;
+
+ intel_runtime_pm_get(dev_priv);
+ freed_pages += i915_gem_shrink(dev_priv, -1UL,
+ I915_SHRINK_BOUND |
+ I915_SHRINK_UNBOUND |
+ I915_SHRINK_ACTIVE |
+ I915_SHRINK_VMAPS);
+ intel_runtime_pm_put(dev_priv);
+
+ /* We also want to clear any cached iomaps as they wrap vmap */
+ list_for_each_entry_safe(vma, next,
+ &dev_priv->ggtt.base.inactive_list, vm_link) {
+ unsigned long count = vma->node.size >> PAGE_SHIFT;
+ if (vma->iomap && i915_vma_unbind(vma) == 0)
+ freed_pages += count;
+ }
+out:
i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
*(unsigned long *)ptr += freed_pages;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 44004e3f0..66be299a1 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -111,9 +111,9 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
if (INTEL_INFO(dev)->gen >= 3) {
u32 bsm;
- pci_read_config_dword(dev->pdev, BSM, &bsm);
+ pci_read_config_dword(dev->pdev, INTEL_BSM, &bsm);
- base = bsm & BSM_MASK;
+ base = bsm & INTEL_BSM_MASK;
} else if (IS_I865G(dev)) {
u16 toud = 0;
@@ -270,7 +270,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
void i915_gem_cleanup_stolen(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return;
@@ -550,7 +550,7 @@ static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
static void
i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
{
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
if (obj->stolen) {
i915_gem_stolen_remove_node(dev_priv, obj->stolen);
@@ -601,7 +601,7 @@ cleanup:
struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
int ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index b9bdb3403..803019973 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -125,7 +125,7 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
if (INTEL_INFO(obj->base.dev)->gen >= 4)
return true;
- if (INTEL_INFO(obj->base.dev)->gen == 3) {
+ if (IS_GEN3(obj->base.dev)) {
if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
return false;
} else {
@@ -162,7 +162,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_set_tiling *args = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
int ret = 0;
@@ -229,7 +229,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
*/
if (obj->map_and_fenceable &&
!i915_gem_object_fence_ok(obj, args->tiling_mode))
- ret = i915_gem_object_ggtt_unbind(obj);
+ ret = i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
if (ret == 0) {
if (obj->pages &&
@@ -294,7 +294,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_get_tiling *args = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 32d9726e3..2314c8832 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -706,7 +706,8 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
static void
i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
{
- struct sg_page_iter sg_iter;
+ struct sgt_iter sgt_iter;
+ struct page *page;
BUG_ON(obj->userptr.work != NULL);
__i915_gem_userptr_set_active(obj, false);
@@ -716,9 +717,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
i915_gem_gtt_finish_object(obj);
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
- struct page *page = sg_page_iter_page(&sg_iter);
-
+ for_each_sgt_page(page, sgt_iter, obj->pages) {
if (obj->dirty)
set_page_dirty(page);
@@ -855,11 +854,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
return 0;
}
-int
-i915_gem_init_userptr(struct drm_device *dev)
+void i915_gem_init_userptr(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
mutex_init(&dev_priv->mm_lock);
hash_init(dev_priv->mm_structs);
- return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 89725c9ef..9d73d2216 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -332,7 +332,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
const struct i915_error_state_file_priv *error_priv)
{
struct drm_device *dev = error_priv->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_error_state *error = error_priv->error;
struct drm_i915_error_object *obj;
int i, j, offset, elt;
@@ -411,7 +411,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
}
- if (INTEL_INFO(dev)->gen == 7)
+ if (IS_GEN7(dev))
err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
for (i = 0; i < ARRAY_SIZE(error->ring); i++)
@@ -463,6 +463,18 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
}
}
+ if (error->ring[i].num_waiters) {
+ err_printf(m, "%s --- %d waiters\n",
+ dev_priv->engine[i].name,
+ error->ring[i].num_waiters);
+ for (j = 0; j < error->ring[i].num_waiters; j++) {
+ err_printf(m, " seqno 0x%08x for %s [%d]\n",
+ error->ring[i].waiters[j].seqno,
+ error->ring[i].waiters[j].comm,
+ error->ring[i].waiters[j].pid);
+ }
+ }
+
if ((obj = error->ring[i].ringbuffer)) {
err_printf(m, "%s --- ringbuffer = 0x%08x\n",
dev_priv->engine[i].name,
@@ -488,7 +500,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
hws_page[elt+1],
hws_page[elt+2],
hws_page[elt+3]);
- offset += 16;
+ offset += 16;
}
}
@@ -605,8 +617,9 @@ static void i915_error_state_free(struct kref *error_ref)
i915_error_object_free(error->ring[i].ringbuffer);
i915_error_object_free(error->ring[i].hws_page);
i915_error_object_free(error->ring[i].ctx);
- kfree(error->ring[i].requests);
i915_error_object_free(error->ring[i].wa_ctx);
+ kfree(error->ring[i].requests);
+ kfree(error->ring[i].waiters);
}
i915_error_object_free(error->semaphore_obj);
@@ -824,19 +837,18 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
return error_code;
}
-static void i915_gem_record_fences(struct drm_device *dev,
+static void i915_gem_record_fences(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
int i;
- if (IS_GEN3(dev) || IS_GEN2(dev)) {
+ if (IS_GEN3(dev_priv) || IS_GEN2(dev_priv)) {
for (i = 0; i < dev_priv->num_fence_regs; i++)
error->fence[i] = I915_READ(FENCE_REG(i));
- } else if (IS_GEN5(dev) || IS_GEN4(dev)) {
+ } else if (IS_GEN5(dev_priv) || IS_GEN4(dev_priv)) {
for (i = 0; i < dev_priv->num_fence_regs; i++)
error->fence[i] = I915_READ64(FENCE_REG_965_LO(i));
- } else if (INTEL_INFO(dev)->gen >= 6) {
+ } else if (INTEL_GEN(dev_priv) >= 6) {
for (i = 0; i < dev_priv->num_fence_regs; i++)
error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i));
}
@@ -851,7 +863,7 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
struct intel_engine_cs *to;
enum intel_engine_id id;
- if (!i915_semaphore_is_enabled(dev_priv->dev))
+ if (!i915_semaphore_is_enabled(dev_priv))
return;
if (!error->semaphore_obj)
@@ -893,31 +905,71 @@ static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
}
}
-static void i915_record_ring_state(struct drm_device *dev,
+static void engine_record_waiters(struct intel_engine_cs *engine,
+ struct drm_i915_error_ring *ering)
+{
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ struct drm_i915_error_waiter *waiter;
+ struct rb_node *rb;
+ int count;
+
+ ering->num_waiters = 0;
+ ering->waiters = NULL;
+
+ spin_lock(&b->lock);
+ count = 0;
+ for (rb = rb_first(&b->waiters); rb != NULL; rb = rb_next(rb))
+ count++;
+ spin_unlock(&b->lock);
+
+ waiter = NULL;
+ if (count)
+ waiter = kmalloc_array(count,
+ sizeof(struct drm_i915_error_waiter),
+ GFP_ATOMIC);
+ if (!waiter)
+ return;
+
+ ering->waiters = waiter;
+
+ spin_lock(&b->lock);
+ for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
+ struct intel_wait *w = container_of(rb, typeof(*w), node);
+
+ strcpy(waiter->comm, w->tsk->comm);
+ waiter->pid = w->tsk->pid;
+ waiter->seqno = w->seqno;
+ waiter++;
+
+ if (++ering->num_waiters == count)
+ break;
+ }
+ spin_unlock(&b->lock);
+}
+
+static void i915_record_ring_state(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error,
struct intel_engine_cs *engine,
struct drm_i915_error_ring *ering)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (INTEL_INFO(dev)->gen >= 6) {
+ if (INTEL_GEN(dev_priv) >= 6) {
ering->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
ering->fault_reg = I915_READ(RING_FAULT_REG(engine));
- if (INTEL_INFO(dev)->gen >= 8)
+ if (INTEL_GEN(dev_priv) >= 8)
gen8_record_semaphore_state(dev_priv, error, engine,
ering);
else
gen6_record_semaphore_state(dev_priv, engine, ering);
}
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
ering->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
ering->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
ering->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
ering->instdone = I915_READ(RING_INSTDONE(engine->mmio_base));
ering->instps = I915_READ(RING_INSTPS(engine->mmio_base));
ering->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
}
@@ -929,20 +981,20 @@ static void i915_record_ring_state(struct drm_device *dev,
ering->instdone = I915_READ(GEN2_INSTDONE);
}
- ering->waiting = waitqueue_active(&engine->irq_queue);
+ ering->waiting = intel_engine_has_waiter(engine);
ering->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
ering->acthd = intel_ring_get_active_head(engine);
- ering->seqno = engine->get_seqno(engine);
+ ering->seqno = intel_engine_get_seqno(engine);
ering->last_seqno = engine->last_submitted_seqno;
ering->start = I915_READ_START(engine);
ering->head = I915_READ_HEAD(engine);
ering->tail = I915_READ_TAIL(engine);
ering->ctl = I915_READ_CTL(engine);
- if (I915_NEED_GFX_HWS(dev)) {
+ if (I915_NEED_GFX_HWS(dev_priv)) {
i915_reg_t mmio;
- if (IS_GEN7(dev)) {
+ if (IS_GEN7(dev_priv)) {
switch (engine->id) {
default:
case RCS:
@@ -958,7 +1010,7 @@ static void i915_record_ring_state(struct drm_device *dev,
mmio = VEBOX_HWS_PGA_GEN7;
break;
}
- } else if (IS_GEN6(engine->dev)) {
+ } else if (IS_GEN6(engine->i915)) {
mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
} else {
/* XXX: gen8 returns to sanity */
@@ -971,18 +1023,18 @@ static void i915_record_ring_state(struct drm_device *dev,
ering->hangcheck_score = engine->hangcheck.score;
ering->hangcheck_action = engine->hangcheck.action;
- if (USES_PPGTT(dev)) {
+ if (USES_PPGTT(dev_priv)) {
int i;
ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
- if (IS_GEN6(dev))
+ if (IS_GEN6(dev_priv))
ering->vm_info.pp_dir_base =
I915_READ(RING_PP_DIR_BASE_READ(engine));
- else if (IS_GEN7(dev))
+ else if (IS_GEN7(dev_priv))
ering->vm_info.pp_dir_base =
I915_READ(RING_PP_DIR_BASE(engine));
- else if (INTEL_INFO(dev)->gen >= 8)
+ else if (INTEL_GEN(dev_priv) >= 8)
for (i = 0; i < 4; i++) {
ering->vm_info.pdp[i] =
I915_READ(GEN8_RING_PDP_UDW(engine, i));
@@ -998,7 +1050,7 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine,
struct drm_i915_error_state *error,
struct drm_i915_error_ring *ering)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
struct drm_i915_gem_object *obj;
/* Currently render ring is the only HW context user */
@@ -1016,34 +1068,33 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine,
}
}
-static void i915_gem_record_rings(struct drm_device *dev,
+static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_request *request;
int i, count;
for (i = 0; i < I915_NUM_ENGINES; i++) {
struct intel_engine_cs *engine = &dev_priv->engine[i];
- struct intel_ringbuffer *rbuf;
error->ring[i].pid = -1;
- if (engine->dev == NULL)
+ if (!intel_engine_initialized(engine))
continue;
error->ring[i].valid = true;
- i915_record_ring_state(dev, error, engine, &error->ring[i]);
+ i915_record_ring_state(dev_priv, error, engine, &error->ring[i]);
+ engine_record_waiters(engine, &error->ring[i]);
request = i915_gem_find_active_request(engine);
if (request) {
struct i915_address_space *vm;
+ struct intel_ringbuffer *rb;
- vm = request->ctx && request->ctx->ppgtt ?
- &request->ctx->ppgtt->base :
- &ggtt->base;
+ vm = request->ctx->ppgtt ?
+ &request->ctx->ppgtt->base : &ggtt->base;
/* We need to copy these to an anonymous buffer
* as the simplest method to avoid being overwritten
@@ -1070,26 +1121,17 @@ static void i915_gem_record_rings(struct drm_device *dev,
}
rcu_read_unlock();
}
- }
- if (i915.enable_execlists) {
- /* TODO: This is only a small fix to keep basic error
- * capture working, but we need to add more information
- * for it to be useful (e.g. dump the context being
- * executed).
- */
- if (request)
- rbuf = request->ctx->engine[engine->id].ringbuf;
- else
- rbuf = dev_priv->kernel_context->engine[engine->id].ringbuf;
- } else
- rbuf = engine->buffer;
-
- error->ring[i].cpu_ring_head = rbuf->head;
- error->ring[i].cpu_ring_tail = rbuf->tail;
+ error->simulated |=
+ request->ctx->flags & CONTEXT_NO_ERROR_CAPTURE;
- error->ring[i].ringbuffer =
- i915_error_ggtt_object_create(dev_priv, rbuf->obj);
+ rb = request->ringbuf;
+ error->ring[i].cpu_ring_head = rb->head;
+ error->ring[i].cpu_ring_tail = rb->tail;
+ error->ring[i].ringbuffer =
+ i915_error_ggtt_object_create(dev_priv,
+ rb->obj);
+ }
error->ring[i].hws_page =
i915_error_ggtt_object_create(dev_priv,
@@ -1234,7 +1276,7 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
int i;
/* General organization
@@ -1301,15 +1343,14 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
- i915_get_extra_instdone(dev, error->extra_instdone);
+ i915_get_extra_instdone(dev_priv, error->extra_instdone);
}
-static void i915_error_capture_msg(struct drm_device *dev,
+static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error,
u32 engine_mask,
const char *error_msg)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 ecode;
int ring_id = -1, len;
@@ -1317,7 +1358,7 @@ static void i915_error_capture_msg(struct drm_device *dev,
len = scnprintf(error->error_msg, sizeof(error->error_msg),
"GPU HANG: ecode %d:%d:0x%08x",
- INTEL_INFO(dev)->gen, ring_id, ecode);
+ INTEL_GEN(dev_priv), ring_id, ecode);
if (ring_id != -1 && error->ring[ring_id].pid != -1)
len += scnprintf(error->error_msg + len,
@@ -1352,14 +1393,17 @@ static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
* out a structure which becomes available in debugfs for user level tools
* to pick up.
*/
-void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
+void i915_capture_error_state(struct drm_i915_private *dev_priv,
+ u32 engine_mask,
const char *error_msg)
{
static bool warned;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_error_state *error;
unsigned long flags;
+ if (READ_ONCE(dev_priv->gpu_error.first_error))
+ return;
+
/* Account for pipe specific data like PIPE*STAT */
error = kzalloc(sizeof(*error), GFP_ATOMIC);
if (!error) {
@@ -1372,23 +1416,25 @@ void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
i915_capture_gen_state(dev_priv, error);
i915_capture_reg_state(dev_priv, error);
i915_gem_capture_buffers(dev_priv, error);
- i915_gem_record_fences(dev, error);
- i915_gem_record_rings(dev, error);
+ i915_gem_record_fences(dev_priv, error);
+ i915_gem_record_rings(dev_priv, error);
do_gettimeofday(&error->time);
- error->overlay = intel_overlay_capture_error_state(dev);
- error->display = intel_display_capture_error_state(dev);
+ error->overlay = intel_overlay_capture_error_state(dev_priv);
+ error->display = intel_display_capture_error_state(dev_priv);
- i915_error_capture_msg(dev, error, engine_mask, error_msg);
+ i915_error_capture_msg(dev_priv, error, engine_mask, error_msg);
DRM_INFO("%s\n", error->error_msg);
- spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
- if (dev_priv->gpu_error.first_error == NULL) {
- dev_priv->gpu_error.first_error = error;
- error = NULL;
+ if (!error->simulated) {
+ spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+ if (!dev_priv->gpu_error.first_error) {
+ dev_priv->gpu_error.first_error = error;
+ error = NULL;
+ }
+ spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
}
- spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
if (error) {
i915_error_state_free(&error->ref);
@@ -1400,7 +1446,8 @@ void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
- DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev->primary->index);
+ DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
+ dev_priv->drm.primary->index);
warned = true;
}
}
@@ -1408,7 +1455,7 @@ void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
void i915_error_state_get(struct drm_device *dev,
struct i915_error_state_file_priv *error_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
spin_lock_irq(&dev_priv->gpu_error.lock);
error_priv->error = dev_priv->gpu_error.first_error;
@@ -1426,7 +1473,7 @@ void i915_error_state_put(struct i915_error_state_file_priv *error_priv)
void i915_destroy_error_state(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_error_state *error;
spin_lock_irq(&dev_priv->gpu_error.lock);
@@ -1450,17 +1497,17 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
}
/* NB: please notice the memset */
-void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
+void i915_get_extra_instdone(struct drm_i915_private *dev_priv,
+ uint32_t *instdone)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
- if (IS_GEN2(dev) || IS_GEN3(dev))
+ if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
instdone[0] = I915_READ(GEN2_INSTDONE);
- else if (IS_GEN4(dev) || IS_GEN5(dev) || IS_GEN6(dev)) {
+ else if (IS_GEN4(dev_priv) || IS_GEN5(dev_priv) || IS_GEN6(dev_priv)) {
instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
instdone[1] = I915_READ(GEN4_INSTDONE1);
- } else if (INTEL_INFO(dev)->gen >= 7) {
+ } else if (INTEL_GEN(dev_priv) >= 7) {
instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
instdone[1] = I915_READ(GEN7_SC_INSTDONE);
instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
diff --git a/drivers/gpu/drm/i915/i915_guc_reg.h b/drivers/gpu/drm/i915/i915_guc_reg.h
index 80786d9f9..cf5a65be4 100644
--- a/drivers/gpu/drm/i915/i915_guc_reg.h
+++ b/drivers/gpu/drm/i915/i915_guc_reg.h
@@ -67,11 +67,11 @@
#define GUC_WOPCM_OFFSET_VALUE 0x80000 /* 512KB */
#define GUC_MAX_IDLE_COUNT _MMIO(0xC3E4)
+/* Defines WOPCM space available to GuC firmware */
#define GUC_WOPCM_SIZE _MMIO(0xc050)
-#define GUC_WOPCM_SIZE_VALUE (0x80 << 12) /* 512KB */
-
/* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */
-#define GUC_WOPCM_TOP (GUC_WOPCM_SIZE_VALUE)
+#define GUC_WOPCM_TOP (0x80 << 12) /* 512KB */
+#define BXT_GUC_WOPCM_RC6_RESERVED (0x10 << 12) /* 64KB */
#define GEN8_GT_PM_CONFIG _MMIO(0x138140)
#define GEN9LP_GT_PM_CONFIG _MMIO(0x138140)
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index d40c13fb6..2112e029d 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -97,8 +97,14 @@ static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
I915_WRITE(HOST2GUC_INTERRUPT, HOST2GUC_TRIGGER);
- /* No HOST2GUC command should take longer than 10ms */
- ret = wait_for_atomic(host2guc_action_response(dev_priv, &status), 10);
+ /*
+ * Fast commands should complete in less than 10us, so sample quickly
+ * up to that length of time, then switch to a slower sleep-wait loop.
+ * No HOST2GUC command should ever take longer than 10ms.
+ */
+ ret = wait_for_us(host2guc_action_response(dev_priv, &status), 10);
+ if (ret)
+ ret = wait_for(host2guc_action_response(dev_priv, &status), 10);
if (status != GUC2HOST_STATUS_SUCCESS) {
/*
* Either the GuC explicitly returned an error (which
@@ -153,13 +159,11 @@ static int host2guc_sample_forcewake(struct intel_guc *guc,
struct i915_guc_client *client)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct drm_device *dev = dev_priv->dev;
u32 data[2];
data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
/* WaRsDisableCoarsePowerGating:skl,bxt */
- if (!intel_enable_rc6(dev) ||
- NEEDS_WaRsDisableCoarsePowerGating(dev))
+ if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
data[1] = 0;
else
/* bit 0 and 1 are for Render and Media domain separately */
@@ -175,94 +179,88 @@ static int host2guc_sample_forcewake(struct intel_guc *guc,
* client object which contains the page being used for the doorbell
*/
-static void guc_init_doorbell(struct intel_guc *guc,
- struct i915_guc_client *client)
+static int guc_update_doorbell_id(struct intel_guc *guc,
+ struct i915_guc_client *client,
+ u16 new_id)
{
+ struct sg_table *sg = guc->ctx_pool_obj->pages;
+ void *doorbell_bitmap = guc->doorbell_bitmap;
struct guc_doorbell_info *doorbell;
+ struct guc_context_desc desc;
+ size_t len;
doorbell = client->client_base + client->doorbell_offset;
- doorbell->db_status = GUC_DOORBELL_ENABLED;
- doorbell->cookie = 0;
-}
-
-static int guc_ring_doorbell(struct i915_guc_client *gc)
-{
- struct guc_process_desc *desc;
- union guc_doorbell_qw db_cmp, db_exc, db_ret;
- union guc_doorbell_qw *db;
- int attempt = 2, ret = -EAGAIN;
-
- desc = gc->client_base + gc->proc_desc_offset;
-
- /* Update the tail so it is visible to GuC */
- desc->tail = gc->wq_tail;
-
- /* current cookie */
- db_cmp.db_status = GUC_DOORBELL_ENABLED;
- db_cmp.cookie = gc->cookie;
-
- /* cookie to be updated */
- db_exc.db_status = GUC_DOORBELL_ENABLED;
- db_exc.cookie = gc->cookie + 1;
- if (db_exc.cookie == 0)
- db_exc.cookie = 1;
-
- /* pointer of current doorbell cacheline */
- db = gc->client_base + gc->doorbell_offset;
-
- while (attempt--) {
- /* lets ring the doorbell */
- db_ret.value_qw = atomic64_cmpxchg((atomic64_t *)db,
- db_cmp.value_qw, db_exc.value_qw);
-
- /* if the exchange was successfully executed */
- if (db_ret.value_qw == db_cmp.value_qw) {
- /* db was successfully rung */
- gc->cookie = db_exc.cookie;
- ret = 0;
- break;
- }
+ if (client->doorbell_id != GUC_INVALID_DOORBELL_ID &&
+ test_bit(client->doorbell_id, doorbell_bitmap)) {
+ /* Deactivate the old doorbell */
+ doorbell->db_status = GUC_DOORBELL_DISABLED;
+ (void)host2guc_release_doorbell(guc, client);
+ __clear_bit(client->doorbell_id, doorbell_bitmap);
+ }
- /* XXX: doorbell was lost and need to acquire it again */
- if (db_ret.db_status == GUC_DOORBELL_DISABLED)
- break;
+ /* Update the GuC's idea of the doorbell ID */
+ len = sg_pcopy_to_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
+ sizeof(desc) * client->ctx_index);
+ if (len != sizeof(desc))
+ return -EFAULT;
+ desc.db_id = new_id;
+ len = sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
+ sizeof(desc) * client->ctx_index);
+ if (len != sizeof(desc))
+ return -EFAULT;
- DRM_ERROR("Cookie mismatch. Expected %d, returned %d\n",
- db_cmp.cookie, db_ret.cookie);
+ client->doorbell_id = new_id;
+ if (new_id == GUC_INVALID_DOORBELL_ID)
+ return 0;
- /* update the cookie to newly read cookie from GuC */
- db_cmp.cookie = db_ret.cookie;
- db_exc.cookie = db_ret.cookie + 1;
- if (db_exc.cookie == 0)
- db_exc.cookie = 1;
- }
+ /* Activate the new doorbell */
+ __set_bit(new_id, doorbell_bitmap);
+ doorbell->cookie = 0;
+ doorbell->db_status = GUC_DOORBELL_ENABLED;
+ return host2guc_allocate_doorbell(guc, client);
+}
- return ret;
+static int guc_init_doorbell(struct intel_guc *guc,
+ struct i915_guc_client *client,
+ uint16_t db_id)
+{
+ return guc_update_doorbell_id(guc, client, db_id);
}
static void guc_disable_doorbell(struct intel_guc *guc,
struct i915_guc_client *client)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct guc_doorbell_info *doorbell;
- i915_reg_t drbreg = GEN8_DRBREGL(client->doorbell_id);
- int value;
-
- doorbell = client->client_base + client->doorbell_offset;
+ (void)guc_update_doorbell_id(guc, client, GUC_INVALID_DOORBELL_ID);
- doorbell->db_status = GUC_DOORBELL_DISABLED;
+ /* XXX: wait for any interrupts */
+ /* XXX: wait for workqueue to drain */
+}
- I915_WRITE(drbreg, I915_READ(drbreg) & ~GEN8_DRB_VALID);
+static uint16_t
+select_doorbell_register(struct intel_guc *guc, uint32_t priority)
+{
+ /*
+ * The bitmap tracks which doorbell registers are currently in use.
+ * It is split into two halves; the first half is used for normal
+ * priority contexts, the second half for high-priority ones.
+ * Note that logically higher priorities are numerically less than
+ * normal ones, so the test below means "is it high-priority?"
+ */
+ const bool hi_pri = (priority <= GUC_CTX_PRIORITY_HIGH);
+ const uint16_t half = GUC_MAX_DOORBELLS / 2;
+ const uint16_t start = hi_pri ? half : 0;
+ const uint16_t end = start + half;
+ uint16_t id;
- value = I915_READ(drbreg);
- WARN_ON((value & GEN8_DRB_VALID) != 0);
+ id = find_next_zero_bit(guc->doorbell_bitmap, end, start);
+ if (id == end)
+ id = GUC_INVALID_DOORBELL_ID;
- I915_WRITE(GEN8_DRBREGU(client->doorbell_id), 0);
- I915_WRITE(drbreg, 0);
+ DRM_DEBUG_DRIVER("assigned %s priority doorbell id 0x%x\n",
+ hi_pri ? "high" : "normal", id);
- /* XXX: wait for any interrupts */
- /* XXX: wait for workqueue to drain */
+ return id;
}
/*
@@ -289,37 +287,6 @@ static uint32_t select_doorbell_cacheline(struct intel_guc *guc)
return offset;
}
-static uint16_t assign_doorbell(struct intel_guc *guc, uint32_t priority)
-{
- /*
- * The bitmap is split into two halves; the first half is used for
- * normal priority contexts, the second half for high-priority ones.
- * Note that logically higher priorities are numerically less than
- * normal ones, so the test below means "is it high-priority?"
- */
- const bool hi_pri = (priority <= GUC_CTX_PRIORITY_HIGH);
- const uint16_t half = GUC_MAX_DOORBELLS / 2;
- const uint16_t start = hi_pri ? half : 0;
- const uint16_t end = start + half;
- uint16_t id;
-
- id = find_next_zero_bit(guc->doorbell_bitmap, end, start);
- if (id == end)
- id = GUC_INVALID_DOORBELL_ID;
- else
- bitmap_set(guc->doorbell_bitmap, id, 1);
-
- DRM_DEBUG_DRIVER("assigned %s priority doorbell id 0x%x\n",
- hi_pri ? "high" : "normal", id);
-
- return id;
-}
-
-static void release_doorbell(struct intel_guc *guc, uint16_t id)
-{
- bitmap_clear(guc->doorbell_bitmap, id, 1);
-}
-
/*
* Initialise the process descriptor shared with the GuC firmware.
*/
@@ -361,10 +328,9 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
struct drm_i915_gem_object *client_obj = client->client_obj;
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct intel_engine_cs *engine;
- struct intel_context *ctx = client->owner;
+ struct i915_gem_context *ctx = client->owner;
struct guc_context_desc desc;
struct sg_table *sg;
- enum intel_engine_id id;
u32 gfx_addr;
memset(&desc, 0, sizeof(desc));
@@ -374,10 +340,10 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
desc.priority = client->priority;
desc.db_id = client->doorbell_id;
- for_each_engine_id(engine, dev_priv, id) {
+ for_each_engine(engine, dev_priv) {
+ struct intel_context *ce = &ctx->engine[engine->id];
struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id];
struct drm_i915_gem_object *obj;
- uint64_t ctx_desc;
/* TODO: We have a design issue to be solved here. Only when we
* receive the first batch, we know which engine is used by the
@@ -386,20 +352,18 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
* for now who owns a GuC client. But for future owner of GuC
* client, need to make sure lrc is pinned prior to enter here.
*/
- obj = ctx->engine[id].state;
- if (!obj)
+ if (!ce->state)
break; /* XXX: continue? */
- ctx_desc = intel_lr_context_descriptor(ctx, engine);
- lrc->context_desc = (u32)ctx_desc;
+ lrc->context_desc = lower_32_bits(ce->lrc_desc);
/* The state page is after PPHWSP */
- gfx_addr = i915_gem_obj_ggtt_offset(obj);
+ gfx_addr = i915_gem_obj_ggtt_offset(ce->state);
lrc->ring_lcra = gfx_addr + LRC_STATE_PN * PAGE_SIZE;
lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
(engine->guc_id << GUC_ELC_ENGINE_OFFSET);
- obj = ctx->engine[id].ringbuf->obj;
+ obj = ce->ringbuf->obj;
gfx_addr = i915_gem_obj_ggtt_offset(obj);
lrc->ring_begin = gfx_addr;
@@ -427,7 +391,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
desc.wq_size = client->wq_size;
/*
- * XXX: Take LRCs from an existing intel_context if this is not an
+ * XXX: Take LRCs from an existing context if this is not an
* IsKMDCreatedContext client
*/
desc.desc_private = (uintptr_t)client;
@@ -451,47 +415,64 @@ static void guc_fini_ctx_desc(struct intel_guc *guc,
sizeof(desc) * client->ctx_index);
}
-int i915_guc_wq_check_space(struct i915_guc_client *gc)
+/**
+ * i915_guc_wq_check_space() - check that the GuC can accept a request
+ * @request: request associated with the commands
+ *
+ * Return: 0 if space is available
+ * -EAGAIN if space is not currently available
+ *
+ * This function must be called (and must return 0) before a request
+ * is submitted to the GuC via i915_guc_submit() below. Once a result
+ * of 0 has been returned, it remains valid until (but only until)
+ * the next call to submit().
+ *
+ * This precheck allows the caller to determine in advance that space
+ * will be available for the next submission before committing resources
+ * to it, and helps avoid late failures with complicated recovery paths.
+ */
+int i915_guc_wq_check_space(struct drm_i915_gem_request *request)
{
+ const size_t wqi_size = sizeof(struct guc_wq_item);
+ struct i915_guc_client *gc = request->i915->guc.execbuf_client;
struct guc_process_desc *desc;
- u32 size = sizeof(struct guc_wq_item);
- int ret = -ETIMEDOUT, timeout_counter = 200;
+ u32 freespace;
- if (!gc)
- return 0;
+ GEM_BUG_ON(gc == NULL);
desc = gc->client_base + gc->proc_desc_offset;
- while (timeout_counter-- > 0) {
- if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) {
- ret = 0;
- break;
- }
+ freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
+ if (likely(freespace >= wqi_size))
+ return 0;
- if (timeout_counter)
- usleep_range(1000, 2000);
- };
+ gc->no_wq_space += 1;
- return ret;
+ return -EAGAIN;
}
-static int guc_add_workqueue_item(struct i915_guc_client *gc,
- struct drm_i915_gem_request *rq)
+static void guc_add_workqueue_item(struct i915_guc_client *gc,
+ struct drm_i915_gem_request *rq)
{
+ /* wqi_len is in DWords, and does not include the one-word header */
+ const size_t wqi_size = sizeof(struct guc_wq_item);
+ const u32 wqi_len = wqi_size/sizeof(u32) - 1;
struct guc_process_desc *desc;
struct guc_wq_item *wqi;
void *base;
- u32 tail, wq_len, wq_off, space;
+ u32 freespace, tail, wq_off, wq_page;
desc = gc->client_base + gc->proc_desc_offset;
- space = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
- if (WARN_ON(space < sizeof(struct guc_wq_item)))
- return -ENOSPC; /* shouldn't happen */
- /* postincrement WQ tail for next time */
- wq_off = gc->wq_tail;
- gc->wq_tail += sizeof(struct guc_wq_item);
- gc->wq_tail &= gc->wq_size - 1;
+ /* Free space is guaranteed, see i915_guc_wq_check_space() above */
+ freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
+ GEM_BUG_ON(freespace < wqi_size);
+
+ /* The GuC firmware wants the tail index in QWords, not bytes */
+ tail = rq->tail;
+ GEM_BUG_ON(tail & 7);
+ tail >>= 3;
+ GEM_BUG_ON(tail > WQ_RING_TAIL_MAX);
/* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
* should not have the case where structure wqi is across page, neither
@@ -500,19 +481,23 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
* XXX: if not the case, we need save data to a temp wqi and copy it to
* workqueue buffer dw by dw.
*/
- WARN_ON(sizeof(struct guc_wq_item) != 16);
- WARN_ON(wq_off & 3);
+ BUILD_BUG_ON(wqi_size != 16);
+
+ /* postincrement WQ tail for next time */
+ wq_off = gc->wq_tail;
+ gc->wq_tail += wqi_size;
+ gc->wq_tail &= gc->wq_size - 1;
+ GEM_BUG_ON(wq_off & (wqi_size - 1));
- /* wq starts from the page after doorbell / process_desc */
- base = kmap_atomic(i915_gem_object_get_page(gc->client_obj,
- (wq_off + GUC_DB_SIZE) >> PAGE_SHIFT));
+ /* WQ starts from the page after doorbell / process_desc */
+ wq_page = (wq_off + GUC_DB_SIZE) >> PAGE_SHIFT;
wq_off &= PAGE_SIZE - 1;
+ base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, wq_page));
wqi = (struct guc_wq_item *)((char *)base + wq_off);
- /* len does not include the header */
- wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1;
+ /* Now fill in the 4-word work queue item */
wqi->header = WQ_TYPE_INORDER |
- (wq_len << WQ_LEN_SHIFT) |
+ (wqi_len << WQ_LEN_SHIFT) |
(rq->engine->guc_id << WQ_TARGET_SHIFT) |
WQ_NO_WCFLUSH_WAIT;
@@ -520,48 +505,105 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx,
rq->engine);
- /* The GuC firmware wants the tail index in QWords, not bytes */
- tail = rq->ringbuf->tail >> 3;
wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT;
- wqi->fence_id = 0; /*XXX: what fence to be here */
+ wqi->fence_id = rq->seqno;
kunmap_atomic(base);
+}
- return 0;
+static int guc_ring_doorbell(struct i915_guc_client *gc)
+{
+ struct guc_process_desc *desc;
+ union guc_doorbell_qw db_cmp, db_exc, db_ret;
+ union guc_doorbell_qw *db;
+ int attempt = 2, ret = -EAGAIN;
+
+ desc = gc->client_base + gc->proc_desc_offset;
+
+ /* Update the tail so it is visible to GuC */
+ desc->tail = gc->wq_tail;
+
+ /* current cookie */
+ db_cmp.db_status = GUC_DOORBELL_ENABLED;
+ db_cmp.cookie = gc->cookie;
+
+ /* cookie to be updated */
+ db_exc.db_status = GUC_DOORBELL_ENABLED;
+ db_exc.cookie = gc->cookie + 1;
+ if (db_exc.cookie == 0)
+ db_exc.cookie = 1;
+
+ /* pointer of current doorbell cacheline */
+ db = gc->client_base + gc->doorbell_offset;
+
+ while (attempt--) {
+ /* lets ring the doorbell */
+ db_ret.value_qw = atomic64_cmpxchg((atomic64_t *)db,
+ db_cmp.value_qw, db_exc.value_qw);
+
+ /* if the exchange was successfully executed */
+ if (db_ret.value_qw == db_cmp.value_qw) {
+ /* db was successfully rung */
+ gc->cookie = db_exc.cookie;
+ ret = 0;
+ break;
+ }
+
+ /* XXX: doorbell was lost and need to acquire it again */
+ if (db_ret.db_status == GUC_DOORBELL_DISABLED)
+ break;
+
+ DRM_ERROR("Cookie mismatch. Expected %d, returned %d\n",
+ db_cmp.cookie, db_ret.cookie);
+
+ /* update the cookie to newly read cookie from GuC */
+ db_cmp.cookie = db_ret.cookie;
+ db_exc.cookie = db_ret.cookie + 1;
+ if (db_exc.cookie == 0)
+ db_exc.cookie = 1;
+ }
+
+ return ret;
}
/**
* i915_guc_submit() - Submit commands through GuC
- * @client: the guc client where commands will go through
* @rq: request associated with the commands
*
- * Return: 0 if succeed
+ * Return: 0 on success, otherwise an errno.
+ * (Note: nonzero really shouldn't happen!)
+ *
+ * The caller must have already called i915_guc_wq_check_space() above
+ * with a result of 0 (success) since the last request submission. This
+ * guarantees that there is space in the work queue for the new request,
+ * so enqueuing the item cannot fail.
+ *
+ * Bad Things Will Happen if the caller violates this protocol e.g. calls
+ * submit() when check() says there's no space, or calls submit() multiple
+ * times with no intervening check().
+ *
+ * The only error here arises if the doorbell hardware isn't functioning
+ * as expected, which really shouln't happen.
*/
-int i915_guc_submit(struct i915_guc_client *client,
- struct drm_i915_gem_request *rq)
+int i915_guc_submit(struct drm_i915_gem_request *rq)
{
- struct intel_guc *guc = client->guc;
- unsigned int engine_id = rq->engine->guc_id;
- int q_ret, b_ret;
+ unsigned int engine_id = rq->engine->id;
+ struct intel_guc *guc = &rq->i915->guc;
+ struct i915_guc_client *client = guc->execbuf_client;
+ int b_ret;
- q_ret = guc_add_workqueue_item(client, rq);
- if (q_ret == 0)
- b_ret = guc_ring_doorbell(client);
+ guc_add_workqueue_item(client, rq);
+ b_ret = guc_ring_doorbell(client);
client->submissions[engine_id] += 1;
- if (q_ret) {
- client->q_fail += 1;
- client->retcode = q_ret;
- } else if (b_ret) {
+ client->retcode = b_ret;
+ if (b_ret)
client->b_fail += 1;
- client->retcode = q_ret = b_ret;
- } else {
- client->retcode = 0;
- }
+
guc->submissions[engine_id] += 1;
guc->last_seqno[engine_id] = rq->seqno;
- return q_ret;
+ return b_ret;
}
/*
@@ -572,7 +614,7 @@ int i915_guc_submit(struct i915_guc_client *client,
/**
* gem_allocate_guc_obj() - Allocate gem object for GuC usage
- * @dev: drm device
+ * @dev_priv: driver private data structure
* @size: size of object
*
* This is a wrapper to create a gem obj. In order to use it inside GuC, the
@@ -581,14 +623,13 @@ int i915_guc_submit(struct i915_guc_client *client,
*
* Return: A drm_i915_gem_object if successful, otherwise NULL.
*/
-static struct drm_i915_gem_object *gem_allocate_guc_obj(struct drm_device *dev,
- u32 size)
+static struct drm_i915_gem_object *
+gem_allocate_guc_obj(struct drm_i915_private *dev_priv, u32 size)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
- obj = i915_gem_alloc_object(dev, size);
- if (!obj)
+ obj = i915_gem_object_create(&dev_priv->drm, size);
+ if (IS_ERR(obj))
return NULL;
if (i915_gem_object_get_pages(obj)) {
@@ -623,10 +664,10 @@ static void gem_release_guc_obj(struct drm_i915_gem_object *obj)
drm_gem_object_unreference(&obj->base);
}
-static void guc_client_free(struct drm_device *dev,
- struct i915_guc_client *client)
+static void
+guc_client_free(struct drm_i915_private *dev_priv,
+ struct i915_guc_client *client)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc *guc = &dev_priv->guc;
if (!client)
@@ -639,17 +680,10 @@ static void guc_client_free(struct drm_device *dev,
if (client->client_base) {
/*
- * If we got as far as setting up a doorbell, make sure
- * we shut it down before unmapping & deallocating the
- * memory. So first disable the doorbell, then tell the
- * GuC that we've finished with it, finally deallocate
- * it in our bitmap
+ * If we got as far as setting up a doorbell, make sure we
+ * shut it down before unmapping & deallocating the memory.
*/
- if (client->doorbell_id != GUC_INVALID_DOORBELL_ID) {
- guc_disable_doorbell(guc, client);
- host2guc_release_doorbell(guc, client);
- release_doorbell(guc, client->doorbell_id);
- }
+ guc_disable_doorbell(guc, client);
kunmap(kmap_to_page(client->client_base));
}
@@ -664,9 +698,51 @@ static void guc_client_free(struct drm_device *dev,
kfree(client);
}
+/*
+ * Borrow the first client to set up & tear down every doorbell
+ * in turn, to ensure that all doorbell h/w is (re)initialised.
+ */
+static void guc_init_doorbell_hw(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct i915_guc_client *client = guc->execbuf_client;
+ uint16_t db_id, i;
+ int err;
+
+ db_id = client->doorbell_id;
+
+ for (i = 0; i < GUC_MAX_DOORBELLS; ++i) {
+ i915_reg_t drbreg = GEN8_DRBREGL(i);
+ u32 value = I915_READ(drbreg);
+
+ err = guc_update_doorbell_id(guc, client, i);
+
+ /* Report update failure or unexpectedly active doorbell */
+ if (err || (i != db_id && (value & GUC_DOORBELL_ENABLED)))
+ DRM_DEBUG_DRIVER("Doorbell %d (reg 0x%x) was 0x%x, err %d\n",
+ i, drbreg.reg, value, err);
+ }
+
+ /* Restore to original value */
+ err = guc_update_doorbell_id(guc, client, db_id);
+ if (err)
+ DRM_ERROR("Failed to restore doorbell to %d, err %d\n",
+ db_id, err);
+
+ for (i = 0; i < GUC_MAX_DOORBELLS; ++i) {
+ i915_reg_t drbreg = GEN8_DRBREGL(i);
+ u32 value = I915_READ(drbreg);
+
+ if (i != db_id && (value & GUC_DOORBELL_ENABLED))
+ DRM_DEBUG_DRIVER("Doorbell %d (reg 0x%x) finally 0x%x\n",
+ i, drbreg.reg, value);
+
+ }
+}
+
/**
* guc_client_alloc() - Allocate an i915_guc_client
- * @dev: drm device
+ * @dev_priv: driver private data structure
* @priority: four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW
* The kernel client to replace ExecList submission is created with
* NORMAL priority. Priority of a client for scheduler can be HIGH,
@@ -676,14 +752,15 @@ static void guc_client_free(struct drm_device *dev,
*
* Return: An i915_guc_client object if success, else NULL.
*/
-static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
- uint32_t priority,
- struct intel_context *ctx)
+static struct i915_guc_client *
+guc_client_alloc(struct drm_i915_private *dev_priv,
+ uint32_t priority,
+ struct i915_gem_context *ctx)
{
struct i915_guc_client *client;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc *guc = &dev_priv->guc;
struct drm_i915_gem_object *obj;
+ uint16_t db_id;
client = kzalloc(sizeof(*client), GFP_KERNEL);
if (!client)
@@ -702,7 +779,7 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
}
/* The first page is doorbell/proc_desc. Two followed pages are wq. */
- obj = gem_allocate_guc_obj(dev, GUC_DB_SIZE + GUC_WQ_SIZE);
+ obj = gem_allocate_guc_obj(dev_priv, GUC_DB_SIZE + GUC_WQ_SIZE);
if (!obj)
goto err;
@@ -712,6 +789,11 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
client->wq_offset = GUC_DB_SIZE;
client->wq_size = GUC_WQ_SIZE;
+ db_id = select_doorbell_register(guc, client->priority);
+ if (db_id == GUC_INVALID_DOORBELL_ID)
+ /* XXX: evict a doorbell instead? */
+ goto err;
+
client->doorbell_offset = select_doorbell_cacheline(guc);
/*
@@ -724,29 +806,22 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
else
client->proc_desc_offset = (GUC_DB_SIZE / 2);
- client->doorbell_id = assign_doorbell(guc, client->priority);
- if (client->doorbell_id == GUC_INVALID_DOORBELL_ID)
- /* XXX: evict a doorbell instead */
- goto err;
-
guc_init_proc_desc(guc, client);
guc_init_ctx_desc(guc, client);
- guc_init_doorbell(guc, client);
-
- /* XXX: Any cache flushes needed? General domain mgmt calls? */
-
- if (host2guc_allocate_doorbell(guc, client))
+ if (guc_init_doorbell(guc, client, db_id))
goto err;
- DRM_DEBUG_DRIVER("new priority %u client %p: ctx_index %u db_id %u\n",
- priority, client, client->ctx_index, client->doorbell_id);
+ DRM_DEBUG_DRIVER("new priority %u client %p: ctx_index %u\n",
+ priority, client, client->ctx_index);
+ DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%x\n",
+ client->doorbell_id, client->doorbell_offset);
return client;
err:
DRM_ERROR("FAILED to create priority %u GuC client!\n", priority);
- guc_client_free(dev, client);
+ guc_client_free(dev_priv, client);
return NULL;
}
@@ -771,7 +846,7 @@ static void guc_create_log(struct intel_guc *guc)
obj = guc->log_obj;
if (!obj) {
- obj = gem_allocate_guc_obj(dev_priv->dev, size);
+ obj = gem_allocate_guc_obj(dev_priv, size);
if (!obj) {
/* logging will be off */
i915.guc_log_level = -1;
@@ -831,7 +906,7 @@ static void guc_create_ads(struct intel_guc *guc)
obj = guc->ads_obj;
if (!obj) {
- obj = gem_allocate_guc_obj(dev_priv->dev, PAGE_ALIGN(size));
+ obj = gem_allocate_guc_obj(dev_priv, PAGE_ALIGN(size));
if (!obj)
return;
@@ -885,66 +960,65 @@ static void guc_create_ads(struct intel_guc *guc)
* Set up the memory resources to be shared with the GuC. At this point,
* we require just one object that can be mapped through the GGTT.
*/
-int i915_guc_submission_init(struct drm_device *dev)
+int i915_guc_submission_init(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
const size_t ctxsize = sizeof(struct guc_context_desc);
const size_t poolsize = GUC_MAX_GPU_CONTEXTS * ctxsize;
const size_t gemsize = round_up(poolsize, PAGE_SIZE);
struct intel_guc *guc = &dev_priv->guc;
+ /* Wipe bitmap & delete client in case of reinitialisation */
+ bitmap_clear(guc->doorbell_bitmap, 0, GUC_MAX_DOORBELLS);
+ i915_guc_submission_disable(dev_priv);
+
if (!i915.enable_guc_submission)
return 0; /* not enabled */
if (guc->ctx_pool_obj)
return 0; /* already allocated */
- guc->ctx_pool_obj = gem_allocate_guc_obj(dev_priv->dev, gemsize);
+ guc->ctx_pool_obj = gem_allocate_guc_obj(dev_priv, gemsize);
if (!guc->ctx_pool_obj)
return -ENOMEM;
ida_init(&guc->ctx_ids);
-
guc_create_log(guc);
-
guc_create_ads(guc);
return 0;
}
-int i915_guc_submission_enable(struct drm_device *dev)
+int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc *guc = &dev_priv->guc;
- struct intel_context *ctx = dev_priv->kernel_context;
struct i915_guc_client *client;
/* client for execbuf submission */
- client = guc_client_alloc(dev, GUC_CTX_PRIORITY_KMD_NORMAL, ctx);
+ client = guc_client_alloc(dev_priv,
+ GUC_CTX_PRIORITY_KMD_NORMAL,
+ dev_priv->kernel_context);
if (!client) {
DRM_ERROR("Failed to create execbuf guc_client\n");
return -ENOMEM;
}
guc->execbuf_client = client;
-
host2guc_sample_forcewake(guc, client);
+ guc_init_doorbell_hw(guc);
return 0;
}
-void i915_guc_submission_disable(struct drm_device *dev)
+void i915_guc_submission_disable(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc *guc = &dev_priv->guc;
- guc_client_free(dev, guc->execbuf_client);
+ guc_client_free(dev_priv, guc->execbuf_client);
guc->execbuf_client = NULL;
}
-void i915_guc_submission_fini(struct drm_device *dev)
+void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc *guc = &dev_priv->guc;
gem_release_guc_obj(dev_priv->guc.ads_obj);
@@ -965,12 +1039,12 @@ void i915_guc_submission_fini(struct drm_device *dev)
*/
int intel_guc_suspend(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_guc *guc = &dev_priv->guc;
- struct intel_context *ctx;
+ struct i915_gem_context *ctx;
u32 data[3];
- if (!i915.enable_guc_submission)
+ if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS)
return 0;
ctx = dev_priv->kernel_context;
@@ -991,12 +1065,12 @@ int intel_guc_suspend(struct drm_device *dev)
*/
int intel_guc_resume(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_guc *guc = &dev_priv->guc;
- struct intel_context *ctx;
+ struct i915_gem_context *ctx;
u32 data[3];
- if (!i915.enable_guc_submission)
+ if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS)
return 0;
ctx = dev_priv->kernel_context;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index aab47f7bb..1c2aec392 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -259,12 +259,12 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
dev_priv->gt_irq_mask &= ~interrupt_mask;
dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
- POSTING_READ(GTIMR);
}
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
ilk_update_gt_irq(dev_priv, mask, mask);
+ POSTING_READ_FW(GTIMR);
}
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
@@ -336,9 +336,8 @@ void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
__gen6_disable_pm_irq(dev_priv, mask);
}
-void gen6_reset_rps_interrupts(struct drm_device *dev)
+void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
i915_reg_t reg = gen6_pm_iir(dev_priv);
spin_lock_irq(&dev_priv->irq_lock);
@@ -349,14 +348,11 @@ void gen6_reset_rps_interrupts(struct drm_device *dev)
spin_unlock_irq(&dev_priv->irq_lock);
}
-void gen6_enable_rps_interrupts(struct drm_device *dev)
+void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
spin_lock_irq(&dev_priv->irq_lock);
-
- WARN_ON(dev_priv->rps.pm_iir);
- WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
+ WARN_ON_ONCE(dev_priv->rps.pm_iir);
+ WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
dev_priv->rps.interrupts_enabled = true;
I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
dev_priv->pm_rps_events);
@@ -367,32 +363,13 @@ void gen6_enable_rps_interrupts(struct drm_device *dev)
u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
{
- /*
- * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
- * if GEN6_PM_UP_EI_EXPIRED is masked.
- *
- * TODO: verify if this can be reproduced on VLV,CHV.
- */
- if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
- mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
-
- if (INTEL_INFO(dev_priv)->gen >= 8)
- mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
-
- return mask;
+ return (mask & ~dev_priv->rps.pm_intr_keep);
}
-void gen6_disable_rps_interrupts(struct drm_device *dev)
+void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
spin_lock_irq(&dev_priv->irq_lock);
dev_priv->rps.interrupts_enabled = false;
- spin_unlock_irq(&dev_priv->irq_lock);
-
- cancel_work_sync(&dev_priv->rps.work);
-
- spin_lock_irq(&dev_priv->irq_lock);
I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
@@ -401,8 +378,15 @@ void gen6_disable_rps_interrupts(struct drm_device *dev)
~dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
+ synchronize_irq(dev_priv->drm.irq);
- synchronize_irq(dev->irq);
+ /* Now that we will not be generating any more work, flush any
+ * outsanding tasks. As we are called on the RPS idle path,
+ * we will reset the GPU to minimum frequencies, so the current
+ * state of the worker can be discarded.
+ */
+ cancel_work_sync(&dev_priv->rps.work);
+ gen6_reset_rps_interrupts(dev_priv);
}
/**
@@ -582,7 +566,7 @@ i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
u32 enable_mask;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
+ enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
status_mask);
else
enable_mask = status_mask << 16;
@@ -596,7 +580,7 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
u32 enable_mask;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
+ enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
status_mask);
else
enable_mask = status_mask << 16;
@@ -605,19 +589,17 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
/**
* i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
- * @dev: drm device
+ * @dev_priv: i915 device private
*/
-static void i915_enable_asle_pipestat(struct drm_device *dev)
+static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
+ if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv))
return;
spin_lock_irq(&dev_priv->irq_lock);
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
i915_enable_pipestat(dev_priv, PIPE_A,
PIPE_LEGACY_BLC_EVENT_STATUS);
@@ -685,7 +667,7 @@ static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
*/
static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t high_frame, low_frame;
u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
struct intel_crtc *intel_crtc =
@@ -732,7 +714,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
}
@@ -741,7 +723,7 @@ static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
const struct drm_display_mode *mode = &crtc->base.hwmode;
enum pipe pipe = crtc->pipe;
int position, vtotal;
@@ -750,7 +732,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
vtotal /= 2;
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
else
position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
@@ -767,7 +749,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
* problem. We may need to extend this to include other platforms,
* but so far testing only shows the problem on HSW.
*/
- if (HAS_DDI(dev) && !position) {
+ if (HAS_DDI(dev_priv) && !position) {
int i, temp;
for (i = 0; i < 100; i++) {
@@ -793,7 +775,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int position;
@@ -835,7 +817,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
if (stime)
*stime = ktime_get();
- if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+ if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
/* No obvious pixelcount register. Only query vertical
* scanout position from Display scan line register.
*/
@@ -897,7 +879,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
else
position += vtotal - vbl_end;
- if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+ if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
*vpos = position;
*hpos = 0;
} else {
@@ -914,7 +896,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
int intel_get_crtc_scanline(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
unsigned long irqflags;
int position;
@@ -955,9 +937,8 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
&crtc->hwmode);
}
-static void ironlake_rps_change_irq_handler(struct drm_device *dev)
+static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 busy_up, busy_down, max_avg, min_avg;
u8 new_delay;
@@ -986,7 +967,7 @@ static void ironlake_rps_change_irq_handler(struct drm_device *dev)
new_delay = dev_priv->ips.min_delay;
}
- if (ironlake_set_drps(dev, new_delay))
+ if (ironlake_set_drps(dev_priv, new_delay))
dev_priv->ips.cur_delay = new_delay;
spin_unlock(&mchdev_lock);
@@ -996,13 +977,11 @@ static void ironlake_rps_change_irq_handler(struct drm_device *dev)
static void notify_ring(struct intel_engine_cs *engine)
{
- if (!intel_engine_initialized(engine))
- return;
-
- trace_i915_gem_request_notify(engine);
- engine->user_interrupts++;
-
- wake_up_all(&engine->irq_queue);
+ smp_store_mb(engine->breadcrumbs.irq_posted, true);
+ if (intel_engine_wakeup(engine)) {
+ trace_i915_gem_request_notify(engine);
+ engine->breadcrumbs.irq_wakeups++;
+ }
}
static void vlv_c0_read(struct drm_i915_private *dev_priv,
@@ -1083,7 +1062,7 @@ static bool any_waiters(struct drm_i915_private *dev_priv)
struct intel_engine_cs *engine;
for_each_engine(engine, dev_priv)
- if (engine->irq_refcount)
+ if (intel_engine_has_waiter(engine))
return true;
return false;
@@ -1104,13 +1083,6 @@ static void gen6_pm_rps_work(struct work_struct *work)
return;
}
- /*
- * The RPS work is synced during runtime suspend, we don't require a
- * wakeref. TODO: instead of disabling the asserts make sure that we
- * always hold an RPM reference while the work is running.
- */
- DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
-
pm_iir = dev_priv->rps.pm_iir;
dev_priv->rps.pm_iir = 0;
/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
@@ -1123,7 +1095,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
- goto out;
+ return;
mutex_lock(&dev_priv->rps.hw_lock);
@@ -1175,11 +1147,9 @@ static void gen6_pm_rps_work(struct work_struct *work)
new_delay += adj;
new_delay = clamp_t(int, new_delay, min, max);
- intel_set_rps(dev_priv->dev, new_delay);
+ intel_set_rps(dev_priv, new_delay);
mutex_unlock(&dev_priv->rps.hw_lock);
-out:
- ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
}
@@ -1205,7 +1175,7 @@ static void ivybridge_parity_work(struct work_struct *work)
* In order to prevent a get/put style interface, acquire struct mutex
* any time we access those registers.
*/
- mutex_lock(&dev_priv->dev->struct_mutex);
+ mutex_lock(&dev_priv->drm.struct_mutex);
/* If we've screwed up tracking, just let the interrupt fire again */
if (WARN_ON(!dev_priv->l3_parity.which_slice))
@@ -1241,7 +1211,7 @@ static void ivybridge_parity_work(struct work_struct *work)
parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
parity_event[5] = NULL;
- kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
+ kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
KOBJ_CHANGE, parity_event);
DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
@@ -1261,7 +1231,7 @@ out:
gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
spin_unlock_irq(&dev_priv->irq_lock);
- mutex_unlock(&dev_priv->dev->struct_mutex);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
}
static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
@@ -1287,8 +1257,7 @@ static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv
static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir)
{
- if (gt_iir &
- (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
+ if (gt_iir & GT_RENDER_USER_INTERRUPT)
notify_ring(&dev_priv->engine[RCS]);
if (gt_iir & ILK_BSD_USER_INTERRUPT)
notify_ring(&dev_priv->engine[VCS]);
@@ -1297,9 +1266,7 @@ static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir)
{
-
- if (gt_iir &
- (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
+ if (gt_iir & GT_RENDER_USER_INTERRUPT)
notify_ring(&dev_priv->engine[RCS]);
if (gt_iir & GT_BSD_USER_INTERRUPT)
notify_ring(&dev_priv->engine[VCS]);
@@ -1506,27 +1473,23 @@ static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
}
-static void gmbus_irq_handler(struct drm_device *dev)
+static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
wake_up_all(&dev_priv->gmbus_wait_queue);
}
-static void dp_aux_irq_handler(struct drm_device *dev)
+static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
wake_up_all(&dev_priv->gmbus_wait_queue);
}
#if defined(CONFIG_DEBUG_FS)
-static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
+static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+ enum pipe pipe,
uint32_t crc0, uint32_t crc1,
uint32_t crc2, uint32_t crc3,
uint32_t crc4)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
struct intel_pipe_crc_entry *entry;
int head, tail;
@@ -1550,7 +1513,8 @@ static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
entry = &pipe_crc->entries[head];
- entry->frame = dev->driver->get_vblank_counter(dev, pipe);
+ entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm,
+ pipe);
entry->crc[0] = crc0;
entry->crc[1] = crc1;
entry->crc[2] = crc2;
@@ -1566,27 +1530,26 @@ static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
}
#else
static inline void
-display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
+display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+ enum pipe pipe,
uint32_t crc0, uint32_t crc1,
uint32_t crc2, uint32_t crc3,
uint32_t crc4) {}
#endif
-static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
+static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- display_pipe_crc_irq_handler(dev, pipe,
+ display_pipe_crc_irq_handler(dev_priv, pipe,
I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
0, 0, 0, 0);
}
-static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
+static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- display_pipe_crc_irq_handler(dev, pipe,
+ display_pipe_crc_irq_handler(dev_priv, pipe,
I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
@@ -1594,22 +1557,22 @@ static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
}
-static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
+static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t res1, res2;
- if (INTEL_INFO(dev)->gen >= 3)
+ if (INTEL_GEN(dev_priv) >= 3)
res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
else
res1 = 0;
- if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
+ if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
else
res2 = 0;
- display_pipe_crc_irq_handler(dev, pipe,
+ display_pipe_crc_irq_handler(dev_priv, pipe,
I915_READ(PIPE_CRC_RES_RED(pipe)),
I915_READ(PIPE_CRC_RES_GREEN(pipe)),
I915_READ(PIPE_CRC_RES_BLUE(pipe)),
@@ -1626,7 +1589,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
if (dev_priv->rps.interrupts_enabled) {
dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
- queue_work(dev_priv->wq, &dev_priv->rps.work);
+ schedule_work(&dev_priv->rps.work);
}
spin_unlock(&dev_priv->irq_lock);
}
@@ -1643,18 +1606,21 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
}
}
-static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
+static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
{
- if (!drm_handle_vblank(dev, pipe))
- return false;
+ bool ret;
- return true;
+ ret = drm_handle_vblank(&dev_priv->drm, pipe);
+ if (ret)
+ intel_finish_page_flip_mmio(dev_priv, pipe);
+
+ return ret;
}
-static void valleyview_pipestat_irq_ack(struct drm_device *dev, u32 iir,
- u32 pipe_stats[I915_MAX_PIPES])
+static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv,
+ u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
- struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
spin_lock(&dev_priv->irq_lock);
@@ -1710,31 +1676,28 @@ static void valleyview_pipestat_irq_ack(struct drm_device *dev, u32 iir,
spin_unlock(&dev_priv->irq_lock);
}
-static void valleyview_pipestat_irq_handler(struct drm_device *dev,
+static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
u32 pipe_stats[I915_MAX_PIPES])
{
- struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe;
for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
- intel_pipe_handle_vblank(dev, pipe))
- intel_check_page_flip(dev, pipe);
+ intel_pipe_handle_vblank(dev_priv, pipe))
+ intel_check_page_flip(dev_priv, pipe);
- if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
- intel_prepare_page_flip(dev, pipe);
- intel_finish_page_flip(dev, pipe);
- }
+ if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
+ intel_finish_page_flip_cs(dev_priv, pipe);
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
- i9xx_pipe_crc_irq_handler(dev, pipe);
+ i9xx_pipe_crc_irq_handler(dev_priv, pipe);
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
}
if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
- gmbus_irq_handler(dev);
+ gmbus_irq_handler(dev_priv);
}
static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
@@ -1747,12 +1710,13 @@ static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
return hotplug_status;
}
-static void i9xx_hpd_irq_handler(struct drm_device *dev,
+static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
u32 hotplug_status)
{
u32 pin_mask = 0, long_mask = 0;
- if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv)) {
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
if (hotplug_trigger) {
@@ -1760,11 +1724,11 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev,
hotplug_trigger, hpd_status_g4x,
i9xx_port_hotplug_long_detect);
- intel_hpd_irq_handler(dev, pin_mask, long_mask);
+ intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
}
if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
- dp_aux_irq_handler(dev);
+ dp_aux_irq_handler(dev_priv);
} else {
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
@@ -1772,7 +1736,7 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev,
intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
hotplug_trigger, hpd_status_i915,
i9xx_port_hotplug_long_detect);
- intel_hpd_irq_handler(dev, pin_mask, long_mask);
+ intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
}
}
}
@@ -1780,7 +1744,7 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev,
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -1831,7 +1795,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
/* Call regardless, as some status bits might not be
* signalled in iir */
- valleyview_pipestat_irq_ack(dev, iir, pipe_stats);
+ valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
/*
* VLV_IIR is single buffered, and reflects the level
@@ -1850,9 +1814,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
gen6_rps_irq_handler(dev_priv, pm_iir);
if (hotplug_status)
- i9xx_hpd_irq_handler(dev, hotplug_status);
+ i9xx_hpd_irq_handler(dev_priv, hotplug_status);
- valleyview_pipestat_irq_handler(dev, pipe_stats);
+ valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
} while (0);
enable_rpm_wakeref_asserts(dev_priv);
@@ -1863,7 +1827,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
static irqreturn_t cherryview_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -1911,7 +1875,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
/* Call regardless, as some status bits might not be
* signalled in iir */
- valleyview_pipestat_irq_ack(dev, iir, pipe_stats);
+ valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
/*
* VLV_IIR is single buffered, and reflects the level
@@ -1927,9 +1891,9 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
gen8_gt_irq_handler(dev_priv, gt_iir);
if (hotplug_status)
- i9xx_hpd_irq_handler(dev, hotplug_status);
+ i9xx_hpd_irq_handler(dev_priv, hotplug_status);
- valleyview_pipestat_irq_handler(dev, pipe_stats);
+ valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
} while (0);
enable_rpm_wakeref_asserts(dev_priv);
@@ -1937,10 +1901,10 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
return ret;
}
-static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
+static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
+ u32 hotplug_trigger,
const u32 hpd[HPD_NUM_PINS])
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
/*
@@ -1966,16 +1930,15 @@ static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
dig_hotplug_reg, hpd,
pch_port_hotplug_long_detect);
- intel_hpd_irq_handler(dev, pin_mask, long_mask);
+ intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
}
-static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
+static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
- ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
+ ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
if (pch_iir & SDE_AUDIO_POWER_MASK) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -1985,10 +1948,10 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
}
if (pch_iir & SDE_AUX_MASK)
- dp_aux_irq_handler(dev);
+ dp_aux_irq_handler(dev_priv);
if (pch_iir & SDE_GMBUS)
- gmbus_irq_handler(dev);
+ gmbus_irq_handler(dev_priv);
if (pch_iir & SDE_AUDIO_HDCP_MASK)
DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
@@ -2018,9 +1981,8 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
}
-static void ivb_err_int_handler(struct drm_device *dev)
+static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 err_int = I915_READ(GEN7_ERR_INT);
enum pipe pipe;
@@ -2032,19 +1994,18 @@ static void ivb_err_int_handler(struct drm_device *dev)
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
- if (IS_IVYBRIDGE(dev))
- ivb_pipe_crc_irq_handler(dev, pipe);
+ if (IS_IVYBRIDGE(dev_priv))
+ ivb_pipe_crc_irq_handler(dev_priv, pipe);
else
- hsw_pipe_crc_irq_handler(dev, pipe);
+ hsw_pipe_crc_irq_handler(dev_priv, pipe);
}
}
I915_WRITE(GEN7_ERR_INT, err_int);
}
-static void cpt_serr_int_handler(struct drm_device *dev)
+static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 serr_int = I915_READ(SERR_INT);
if (serr_int & SERR_INT_POISON)
@@ -2062,13 +2023,12 @@ static void cpt_serr_int_handler(struct drm_device *dev)
I915_WRITE(SERR_INT, serr_int);
}
-static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
+static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
- ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
+ ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
@@ -2078,10 +2038,10 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
}
if (pch_iir & SDE_AUX_MASK_CPT)
- dp_aux_irq_handler(dev);
+ dp_aux_irq_handler(dev_priv);
if (pch_iir & SDE_GMBUS_CPT)
- gmbus_irq_handler(dev);
+ gmbus_irq_handler(dev_priv);
if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
@@ -2096,12 +2056,11 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
I915_READ(FDI_RX_IIR(pipe)));
if (pch_iir & SDE_ERROR_CPT)
- cpt_serr_int_handler(dev);
+ cpt_serr_int_handler(dev_priv);
}
-static void spt_irq_handler(struct drm_device *dev, u32 pch_iir)
+static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
~SDE_PORTE_HOTPLUG_SPT;
u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
@@ -2130,16 +2089,16 @@ static void spt_irq_handler(struct drm_device *dev, u32 pch_iir)
}
if (pin_mask)
- intel_hpd_irq_handler(dev, pin_mask, long_mask);
+ intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
if (pch_iir & SDE_GMBUS_CPT)
- gmbus_irq_handler(dev);
+ gmbus_irq_handler(dev_priv);
}
-static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
+static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
+ u32 hotplug_trigger,
const u32 hpd[HPD_NUM_PINS])
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
@@ -2149,97 +2108,93 @@ static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
dig_hotplug_reg, hpd,
ilk_port_hotplug_long_detect);
- intel_hpd_irq_handler(dev, pin_mask, long_mask);
+ intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
}
-static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
+static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
+ u32 de_iir)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
if (hotplug_trigger)
- ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk);
+ ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
if (de_iir & DE_AUX_CHANNEL_A)
- dp_aux_irq_handler(dev);
+ dp_aux_irq_handler(dev_priv);
if (de_iir & DE_GSE)
- intel_opregion_asle_intr(dev);
+ intel_opregion_asle_intr(dev_priv);
if (de_iir & DE_POISON)
DRM_ERROR("Poison interrupt\n");
for_each_pipe(dev_priv, pipe) {
if (de_iir & DE_PIPE_VBLANK(pipe) &&
- intel_pipe_handle_vblank(dev, pipe))
- intel_check_page_flip(dev, pipe);
+ intel_pipe_handle_vblank(dev_priv, pipe))
+ intel_check_page_flip(dev_priv, pipe);
if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
if (de_iir & DE_PIPE_CRC_DONE(pipe))
- i9xx_pipe_crc_irq_handler(dev, pipe);
+ i9xx_pipe_crc_irq_handler(dev_priv, pipe);
/* plane/pipes map 1:1 on ilk+ */
- if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
- intel_prepare_page_flip(dev, pipe);
- intel_finish_page_flip_plane(dev, pipe);
- }
+ if (de_iir & DE_PLANE_FLIP_DONE(pipe))
+ intel_finish_page_flip_cs(dev_priv, pipe);
}
/* check event from PCH */
if (de_iir & DE_PCH_EVENT) {
u32 pch_iir = I915_READ(SDEIIR);
- if (HAS_PCH_CPT(dev))
- cpt_irq_handler(dev, pch_iir);
+ if (HAS_PCH_CPT(dev_priv))
+ cpt_irq_handler(dev_priv, pch_iir);
else
- ibx_irq_handler(dev, pch_iir);
+ ibx_irq_handler(dev_priv, pch_iir);
/* should clear PCH hotplug event before clear CPU irq */
I915_WRITE(SDEIIR, pch_iir);
}
- if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
- ironlake_rps_change_irq_handler(dev);
+ if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT)
+ ironlake_rps_change_irq_handler(dev_priv);
}
-static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
+static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
+ u32 de_iir)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
if (hotplug_trigger)
- ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb);
+ ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
if (de_iir & DE_ERR_INT_IVB)
- ivb_err_int_handler(dev);
+ ivb_err_int_handler(dev_priv);
if (de_iir & DE_AUX_CHANNEL_A_IVB)
- dp_aux_irq_handler(dev);
+ dp_aux_irq_handler(dev_priv);
if (de_iir & DE_GSE_IVB)
- intel_opregion_asle_intr(dev);
+ intel_opregion_asle_intr(dev_priv);
for_each_pipe(dev_priv, pipe) {
if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
- intel_pipe_handle_vblank(dev, pipe))
- intel_check_page_flip(dev, pipe);
+ intel_pipe_handle_vblank(dev_priv, pipe))
+ intel_check_page_flip(dev_priv, pipe);
/* plane/pipes map 1:1 on ilk+ */
- if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
- intel_prepare_page_flip(dev, pipe);
- intel_finish_page_flip_plane(dev, pipe);
- }
+ if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
+ intel_finish_page_flip_cs(dev_priv, pipe);
}
/* check event from PCH */
- if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
+ if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
u32 pch_iir = I915_READ(SDEIIR);
- cpt_irq_handler(dev, pch_iir);
+ cpt_irq_handler(dev_priv, pch_iir);
/* clear PCH hotplug event before clear CPU irq */
I915_WRITE(SDEIIR, pch_iir);
@@ -2257,7 +2212,7 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 de_iir, gt_iir, de_ier, sde_ier = 0;
irqreturn_t ret = IRQ_NONE;
@@ -2277,7 +2232,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
* able to process them after we restore SDEIER (as soon as we restore
* it, we'll get an interrupt if SDEIIR still has something to process
* due to its back queue). */
- if (!HAS_PCH_NOP(dev)) {
+ if (!HAS_PCH_NOP(dev_priv)) {
sde_ier = I915_READ(SDEIER);
I915_WRITE(SDEIER, 0);
POSTING_READ(SDEIER);
@@ -2289,7 +2244,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
if (gt_iir) {
I915_WRITE(GTIIR, gt_iir);
ret = IRQ_HANDLED;
- if (INTEL_INFO(dev)->gen >= 6)
+ if (INTEL_GEN(dev_priv) >= 6)
snb_gt_irq_handler(dev_priv, gt_iir);
else
ilk_gt_irq_handler(dev_priv, gt_iir);
@@ -2299,13 +2254,13 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
if (de_iir) {
I915_WRITE(DEIIR, de_iir);
ret = IRQ_HANDLED;
- if (INTEL_INFO(dev)->gen >= 7)
- ivb_display_irq_handler(dev, de_iir);
+ if (INTEL_GEN(dev_priv) >= 7)
+ ivb_display_irq_handler(dev_priv, de_iir);
else
- ilk_display_irq_handler(dev, de_iir);
+ ilk_display_irq_handler(dev_priv, de_iir);
}
- if (INTEL_INFO(dev)->gen >= 6) {
+ if (INTEL_GEN(dev_priv) >= 6) {
u32 pm_iir = I915_READ(GEN6_PMIIR);
if (pm_iir) {
I915_WRITE(GEN6_PMIIR, pm_iir);
@@ -2316,7 +2271,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
I915_WRITE(DEIER, de_ier);
POSTING_READ(DEIER);
- if (!HAS_PCH_NOP(dev)) {
+ if (!HAS_PCH_NOP(dev_priv)) {
I915_WRITE(SDEIER, sde_ier);
POSTING_READ(SDEIER);
}
@@ -2327,10 +2282,10 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
return ret;
}
-static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
+static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
+ u32 hotplug_trigger,
const u32 hpd[HPD_NUM_PINS])
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
@@ -2340,13 +2295,12 @@ static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
dig_hotplug_reg, hpd,
bxt_port_hotplug_long_detect);
- intel_hpd_irq_handler(dev, pin_mask, long_mask);
+ intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
}
static irqreturn_t
gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
{
- struct drm_device *dev = dev_priv->dev;
irqreturn_t ret = IRQ_NONE;
u32 iir;
enum pipe pipe;
@@ -2357,7 +2311,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
I915_WRITE(GEN8_DE_MISC_IIR, iir);
ret = IRQ_HANDLED;
if (iir & GEN8_DE_MISC_GSE)
- intel_opregion_asle_intr(dev);
+ intel_opregion_asle_intr(dev_priv);
else
DRM_ERROR("Unexpected DE Misc interrupt\n");
}
@@ -2381,26 +2335,28 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
GEN9_AUX_CHANNEL_D;
if (iir & tmp_mask) {
- dp_aux_irq_handler(dev);
+ dp_aux_irq_handler(dev_priv);
found = true;
}
if (IS_BROXTON(dev_priv)) {
tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
if (tmp_mask) {
- bxt_hpd_irq_handler(dev, tmp_mask, hpd_bxt);
+ bxt_hpd_irq_handler(dev_priv, tmp_mask,
+ hpd_bxt);
found = true;
}
} else if (IS_BROADWELL(dev_priv)) {
tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
if (tmp_mask) {
- ilk_hpd_irq_handler(dev, tmp_mask, hpd_bdw);
+ ilk_hpd_irq_handler(dev_priv,
+ tmp_mask, hpd_bdw);
found = true;
}
}
- if (IS_BROXTON(dev) && (iir & BXT_DE_PORT_GMBUS)) {
- gmbus_irq_handler(dev);
+ if (IS_BROXTON(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
+ gmbus_irq_handler(dev_priv);
found = true;
}
@@ -2427,8 +2383,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
if (iir & GEN8_PIPE_VBLANK &&
- intel_pipe_handle_vblank(dev, pipe))
- intel_check_page_flip(dev, pipe);
+ intel_pipe_handle_vblank(dev_priv, pipe))
+ intel_check_page_flip(dev_priv, pipe);
flip_done = iir;
if (INTEL_INFO(dev_priv)->gen >= 9)
@@ -2436,13 +2392,11 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
else
flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE;
- if (flip_done) {
- intel_prepare_page_flip(dev, pipe);
- intel_finish_page_flip_plane(dev, pipe);
- }
+ if (flip_done)
+ intel_finish_page_flip_cs(dev_priv, pipe);
if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
- hsw_pipe_crc_irq_handler(dev, pipe);
+ hsw_pipe_crc_irq_handler(dev_priv, pipe);
if (iir & GEN8_PIPE_FIFO_UNDERRUN)
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
@@ -2459,7 +2413,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
fault_errors);
}
- if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
+ if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
master_ctl & GEN8_DE_PCH_IRQ) {
/*
* FIXME(BDW): Assume for now that the new interrupt handling
@@ -2472,9 +2426,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
ret = IRQ_HANDLED;
if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
- spt_irq_handler(dev, iir);
+ spt_irq_handler(dev_priv, iir);
else
- cpt_irq_handler(dev, iir);
+ cpt_irq_handler(dev_priv, iir);
} else {
/*
* Like on previous PCH there seems to be something
@@ -2490,7 +2444,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 master_ctl;
u32 gt_iir[4] = {};
irqreturn_t ret;
@@ -2521,11 +2475,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
return ret;
}
-static void i915_error_wake_up(struct drm_i915_private *dev_priv,
- bool reset_completed)
+static void i915_error_wake_up(struct drm_i915_private *dev_priv)
{
- struct intel_engine_cs *engine;
-
/*
* Notify all waiters for GPU completion events that reset state has
* been changed, and that they need to restart their wait after
@@ -2534,36 +2485,28 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv,
*/
/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
- for_each_engine(engine, dev_priv)
- wake_up_all(&engine->irq_queue);
+ wake_up_all(&dev_priv->gpu_error.wait_queue);
/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
wake_up_all(&dev_priv->pending_flip_queue);
-
- /*
- * Signal tasks blocked in i915_gem_wait_for_error that the pending
- * reset state is cleared.
- */
- if (reset_completed)
- wake_up_all(&dev_priv->gpu_error.reset_queue);
}
/**
* i915_reset_and_wakeup - do process context error handling work
- * @dev: drm device
+ * @dev_priv: i915 device private
*
* Fire an error uevent so userspace can see that a hang or error
* was detected.
*/
-static void i915_reset_and_wakeup(struct drm_device *dev)
+static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
int ret;
- kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
+ kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
/*
* Note that there's only one work item which does gpu resets, so we
@@ -2577,8 +2520,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
*/
if (i915_reset_in_progress(&dev_priv->gpu_error)) {
DRM_DEBUG_DRIVER("resetting chip\n");
- kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
- reset_event);
+ kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
/*
* In most cases it's guaranteed that we get here with an RPM
@@ -2589,7 +2531,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
*/
intel_runtime_pm_get(dev_priv);
- intel_prepare_reset(dev);
+ intel_prepare_reset(dev_priv);
/*
* All state reset _must_ be completed before we update the
@@ -2597,27 +2539,26 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
* pending state and not properly drop locks, resulting in
* deadlocks with the reset work.
*/
- ret = i915_reset(dev);
+ ret = i915_reset(dev_priv);
- intel_finish_reset(dev);
+ intel_finish_reset(dev_priv);
intel_runtime_pm_put(dev_priv);
if (ret == 0)
- kobject_uevent_env(&dev->primary->kdev->kobj,
+ kobject_uevent_env(kobj,
KOBJ_CHANGE, reset_done_event);
/*
* Note: The wake_up also serves as a memory barrier so that
* waiters see the update value of the reset counter atomic_t.
*/
- i915_error_wake_up(dev_priv, true);
+ wake_up_all(&dev_priv->gpu_error.reset_queue);
}
}
-static void i915_report_and_clear_eir(struct drm_device *dev)
+static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t instdone[I915_NUM_INSTDONE_REG];
u32 eir = I915_READ(EIR);
int pipe, i;
@@ -2627,9 +2568,9 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
pr_err("render error detected, EIR: 0x%08x\n", eir);
- i915_get_extra_instdone(dev, instdone);
+ i915_get_extra_instdone(dev_priv, instdone);
- if (IS_G4X(dev)) {
+ if (IS_G4X(dev_priv)) {
if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
u32 ipeir = I915_READ(IPEIR_I965);
@@ -2651,7 +2592,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
}
}
- if (!IS_GEN2(dev)) {
+ if (!IS_GEN2(dev_priv)) {
if (eir & I915_ERROR_PAGE_TABLE) {
u32 pgtbl_err = I915_READ(PGTBL_ER);
pr_err("page table error\n");
@@ -2673,7 +2614,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
for (i = 0; i < ARRAY_SIZE(instdone); i++)
pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
- if (INTEL_INFO(dev)->gen < 4) {
+ if (INTEL_GEN(dev_priv) < 4) {
u32 ipeir = I915_READ(IPEIR);
pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
@@ -2709,18 +2650,19 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
/**
* i915_handle_error - handle a gpu error
- * @dev: drm device
+ * @dev_priv: i915 device private
* @engine_mask: mask representing engines that are hung
* Do some basic checking of register state at error time and
* dump it to the syslog. Also call i915_capture_error_state() to make
* sure we get a record and make it available in debugfs. Fire a uevent
* so userspace knows something bad happened (should trigger collection
* of a ring dump etc.).
+ * @fmt: Error message format string
*/
-void i915_handle_error(struct drm_device *dev, u32 engine_mask,
+void i915_handle_error(struct drm_i915_private *dev_priv,
+ u32 engine_mask,
const char *fmt, ...)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
va_list args;
char error_msg[80];
@@ -2728,8 +2670,8 @@ void i915_handle_error(struct drm_device *dev, u32 engine_mask,
vscnprintf(error_msg, sizeof(error_msg), fmt, args);
va_end(args);
- i915_capture_error_state(dev, engine_mask, error_msg);
- i915_report_and_clear_eir(dev);
+ i915_capture_error_state(dev_priv, engine_mask, error_msg);
+ i915_report_and_clear_eir(dev_priv);
if (engine_mask) {
atomic_or(I915_RESET_IN_PROGRESS_FLAG,
@@ -2748,10 +2690,10 @@ void i915_handle_error(struct drm_device *dev, u32 engine_mask,
* ensure that the waiters see the updated value of the reset
* counter atomic_t.
*/
- i915_error_wake_up(dev_priv, false);
+ i915_error_wake_up(dev_priv);
}
- i915_reset_and_wakeup(dev);
+ i915_reset_and_wakeup(dev_priv);
}
/* Called from drm generic code, passed 'crtc' which
@@ -2759,7 +2701,7 @@ void i915_handle_error(struct drm_device *dev, u32 engine_mask,
*/
static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -2776,7 +2718,7 @@ static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
DE_PIPE_VBLANK(pipe);
@@ -2790,7 +2732,7 @@ static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -2803,7 +2745,7 @@ static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -2818,7 +2760,7 @@ static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
*/
static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -2830,7 +2772,7 @@ static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
DE_PIPE_VBLANK(pipe);
@@ -2842,7 +2784,7 @@ static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -2853,7 +2795,7 @@ static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -2869,9 +2811,9 @@ ring_idle(struct intel_engine_cs *engine, u32 seqno)
}
static bool
-ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
+ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
{
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(engine->i915) >= 8) {
return (ipehr >> 23) == 0x1c;
} else {
ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
@@ -2884,10 +2826,10 @@ static struct intel_engine_cs *
semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
u64 offset)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
struct intel_engine_cs *signaller;
- if (INTEL_INFO(dev_priv)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
for_each_engine(signaller, dev_priv) {
if (engine == signaller)
continue;
@@ -2916,7 +2858,7 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
static struct intel_engine_cs *
semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
u32 cmd, ipehr, head;
u64 offset = 0;
int i, backwards;
@@ -2942,7 +2884,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
return NULL;
ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
- if (!ipehr_is_semaphore_wait(engine->dev, ipehr))
+ if (!ipehr_is_semaphore_wait(engine, ipehr))
return NULL;
/*
@@ -2954,7 +2896,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
* ringbuffer itself.
*/
head = I915_READ_HEAD(engine) & HEAD_ADDR;
- backwards = (INTEL_INFO(engine->dev)->gen >= 8) ? 5 : 4;
+ backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
for (i = backwards; i; --i) {
/*
@@ -2976,7 +2918,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
return NULL;
*seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1;
- if (INTEL_INFO(engine->dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
offset = ioread32(engine->buffer->virtual_start + head + 12);
offset <<= 32;
offset = ioread32(engine->buffer->virtual_start + head + 8);
@@ -2986,7 +2928,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
static int semaphore_passed(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
struct intel_engine_cs *signaller;
u32 seqno;
@@ -3000,7 +2942,7 @@ static int semaphore_passed(struct intel_engine_cs *engine)
if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
return -1;
- if (i915_seqno_passed(signaller->get_seqno(signaller), seqno))
+ if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno))
return 1;
/* cursory check for an unkickable deadlock */
@@ -3028,7 +2970,7 @@ static bool subunits_stuck(struct intel_engine_cs *engine)
if (engine->id != RCS)
return true;
- i915_get_extra_instdone(engine->dev, instdone);
+ i915_get_extra_instdone(engine->i915, instdone);
/* There might be unstable subunit states even when
* actual head is not moving. Filter out the unstable ones by
@@ -3069,8 +3011,7 @@ head_stuck(struct intel_engine_cs *engine, u64 acthd)
static enum intel_ring_hangcheck_action
ring_stuck(struct intel_engine_cs *engine, u64 acthd)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
enum intel_ring_hangcheck_action ha;
u32 tmp;
@@ -3078,7 +3019,7 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd)
if (ha != HANGCHECK_HUNG)
return ha;
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
return HANGCHECK_HUNG;
/* Is the chip hanging on a WAIT_FOR_EVENT?
@@ -3088,19 +3029,19 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd)
*/
tmp = I915_READ_CTL(engine);
if (tmp & RING_WAIT) {
- i915_handle_error(dev, 0,
+ i915_handle_error(dev_priv, 0,
"Kicking stuck wait on %s",
engine->name);
I915_WRITE_CTL(engine, tmp);
return HANGCHECK_KICK;
}
- if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
+ if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) {
switch (semaphore_passed(engine)) {
default:
return HANGCHECK_HUNG;
case 1:
- i915_handle_error(dev, 0,
+ i915_handle_error(dev_priv, 0,
"Kicking stuck semaphore on %s",
engine->name);
I915_WRITE_CTL(engine, tmp);
@@ -3113,23 +3054,21 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd)
return HANGCHECK_HUNG;
}
-static unsigned kick_waiters(struct intel_engine_cs *engine)
+static unsigned long kick_waiters(struct intel_engine_cs *engine)
{
- struct drm_i915_private *i915 = to_i915(engine->dev);
- unsigned user_interrupts = READ_ONCE(engine->user_interrupts);
+ struct drm_i915_private *i915 = engine->i915;
+ unsigned long irq_count = READ_ONCE(engine->breadcrumbs.irq_wakeups);
- if (engine->hangcheck.user_interrupts == user_interrupts &&
+ if (engine->hangcheck.user_interrupts == irq_count &&
!test_and_set_bit(engine->id, &i915->gpu_error.missed_irq_rings)) {
- if (!(i915->gpu_error.test_irq_rings & intel_engine_flag(engine)))
+ if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings))
DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
engine->name);
- else
- DRM_INFO("Fake missed irq on %s\n",
- engine->name);
- wake_up_all(&engine->irq_queue);
+
+ intel_engine_enable_fake_irq(engine);
}
- return user_interrupts;
+ return irq_count;
}
/*
* This is called when the chip hasn't reported back with completed
@@ -3144,11 +3083,9 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv),
gpu_error.hangcheck_work.work);
- struct drm_device *dev = dev_priv->dev;
struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int busy_count = 0, rings_hung = 0;
- bool stuck[I915_NUM_ENGINES] = { 0 };
+ unsigned int hung = 0, stuck = 0;
+ int busy_count = 0;
#define BUSY 1
#define KICK 5
#define HUNG 20
@@ -3157,12 +3094,8 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
if (!i915.enable_hangcheck)
return;
- /*
- * The hangcheck work is synced during runtime suspend, we don't
- * require a wakeref. TODO: instead of disabling the asserts make
- * sure that we hold a reference when this work is running.
- */
- DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
+ if (!READ_ONCE(dev_priv->gt.awake))
+ return;
/* As enabling the GPU requires fairly extensive mmio access,
* periodically arm the mmio checker to see if we are triggering
@@ -3170,11 +3103,11 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
*/
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
- for_each_engine_id(engine, dev_priv, id) {
+ for_each_engine(engine, dev_priv) {
+ bool busy = intel_engine_has_waiter(engine);
u64 acthd;
u32 seqno;
unsigned user_interrupts;
- bool busy = true;
semaphore_clear_deadlocks(dev_priv);
@@ -3189,7 +3122,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
engine->irq_seqno_barrier(engine);
acthd = intel_ring_get_active_head(engine);
- seqno = engine->get_seqno(engine);
+ seqno = intel_engine_get_seqno(engine);
/* Reset stuck interrupts between batch advances */
user_interrupts = 0;
@@ -3197,12 +3130,11 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
if (engine->hangcheck.seqno == seqno) {
if (ring_idle(engine, seqno)) {
engine->hangcheck.action = HANGCHECK_IDLE;
- if (waitqueue_active(&engine->irq_queue)) {
+ if (busy) {
/* Safeguard against driver failure */
user_interrupts = kick_waiters(engine);
engine->hangcheck.score += BUSY;
- } else
- busy = false;
+ }
} else {
/* We always increment the hangcheck score
* if the ring is busy and still processing
@@ -3234,10 +3166,15 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
break;
case HANGCHECK_HUNG:
engine->hangcheck.score += HUNG;
- stuck[id] = true;
break;
}
}
+
+ if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
+ hung |= intel_engine_flag(engine);
+ if (engine->hangcheck.action != HANGCHECK_HUNG)
+ stuck |= intel_engine_flag(engine);
+ }
} else {
engine->hangcheck.action = HANGCHECK_ACTIVE;
@@ -3262,48 +3199,33 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
busy_count += busy;
}
- for_each_engine_id(engine, dev_priv, id) {
- if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
- DRM_INFO("%s on %s\n",
- stuck[id] ? "stuck" : "no progress",
- engine->name);
- rings_hung |= intel_engine_flag(engine);
- }
- }
+ if (hung) {
+ char msg[80];
+ int len;
- if (rings_hung) {
- i915_handle_error(dev, rings_hung, "Engine(s) hung");
- goto out;
+ /* If some rings hung but others were still busy, only
+ * blame the hanging rings in the synopsis.
+ */
+ if (stuck != hung)
+ hung &= ~stuck;
+ len = scnprintf(msg, sizeof(msg),
+ "%s on ", stuck == hung ? "No progress" : "Hang");
+ for_each_engine_masked(engine, dev_priv, hung)
+ len += scnprintf(msg + len, sizeof(msg) - len,
+ "%s, ", engine->name);
+ msg[len-2] = '\0';
+
+ return i915_handle_error(dev_priv, hung, msg);
}
+ /* Reset timer in case GPU hangs without another request being added */
if (busy_count)
- /* Reset timer case chip hangs without another request
- * being added */
- i915_queue_hangcheck(dev);
-
-out:
- ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
-}
-
-void i915_queue_hangcheck(struct drm_device *dev)
-{
- struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
-
- if (!i915.enable_hangcheck)
- return;
-
- /* Don't continually defer the hangcheck so that it is always run at
- * least once after work has been scheduled on any ring. Otherwise,
- * we will ignore a hung ring if a second ring is kept busy.
- */
-
- queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
- round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
+ i915_queue_hangcheck(dev_priv);
}
static void ibx_irq_reset(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (HAS_PCH_NOP(dev))
return;
@@ -3324,7 +3246,7 @@ static void ibx_irq_reset(struct drm_device *dev)
*/
static void ibx_irq_pre_postinstall(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (HAS_PCH_NOP(dev))
return;
@@ -3336,7 +3258,7 @@ static void ibx_irq_pre_postinstall(struct drm_device *dev)
static void gen5_gt_irq_reset(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
GEN5_IRQ_RESET(GT);
if (INTEL_INFO(dev)->gen >= 6)
@@ -3396,7 +3318,7 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
*/
static void ironlake_irq_reset(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
I915_WRITE(HWSTAM, 0xffffffff);
@@ -3411,7 +3333,7 @@ static void ironlake_irq_reset(struct drm_device *dev)
static void valleyview_irq_preinstall(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
I915_WRITE(VLV_MASTER_IER, 0);
POSTING_READ(VLV_MASTER_IER);
@@ -3434,7 +3356,7 @@ static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
static void gen8_irq_reset(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int pipe;
I915_WRITE(GEN8_MASTER_IRQ, 0);
@@ -3480,12 +3402,12 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
spin_unlock_irq(&dev_priv->irq_lock);
/* make sure we're done processing display irqs */
- synchronize_irq(dev_priv->dev->irq);
+ synchronize_irq(dev_priv->drm.irq);
}
static void cherryview_irq_preinstall(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
I915_WRITE(GEN8_MASTER_IRQ, 0);
POSTING_READ(GEN8_MASTER_IRQ);
@@ -3500,31 +3422,29 @@ static void cherryview_irq_preinstall(struct drm_device *dev)
spin_unlock_irq(&dev_priv->irq_lock);
}
-static u32 intel_hpd_enabled_irqs(struct drm_device *dev,
+static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
const u32 hpd[HPD_NUM_PINS])
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *encoder;
u32 enabled_irqs = 0;
- for_each_intel_encoder(dev, encoder)
+ for_each_intel_encoder(&dev_priv->drm, encoder)
if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
enabled_irqs |= hpd[encoder->hpd_pin];
return enabled_irqs;
}
-static void ibx_hpd_irq_setup(struct drm_device *dev)
+static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 hotplug_irqs, hotplug, enabled_irqs;
- if (HAS_PCH_IBX(dev)) {
+ if (HAS_PCH_IBX(dev_priv)) {
hotplug_irqs = SDE_HOTPLUG_MASK;
- enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
} else {
hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
- enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
}
ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
@@ -3543,18 +3463,17 @@ static void ibx_hpd_irq_setup(struct drm_device *dev)
* When CPU and PCH are on the same package, port A
* HPD must be enabled in both north and south.
*/
- if (HAS_PCH_LPT_LP(dev))
+ if (HAS_PCH_LPT_LP(dev_priv))
hotplug |= PORTA_HOTPLUG_ENABLE;
I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
}
-static void spt_hpd_irq_setup(struct drm_device *dev)
+static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 hotplug_irqs, hotplug, enabled_irqs;
hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
- enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
@@ -3569,24 +3488,23 @@ static void spt_hpd_irq_setup(struct drm_device *dev)
I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
}
-static void ilk_hpd_irq_setup(struct drm_device *dev)
+static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 hotplug_irqs, hotplug, enabled_irqs;
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
- enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
- } else if (INTEL_INFO(dev)->gen >= 7) {
+ } else if (INTEL_GEN(dev_priv) >= 7) {
hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
- enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
} else {
hotplug_irqs = DE_DP_A_HOTPLUG;
- enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
}
@@ -3601,15 +3519,14 @@ static void ilk_hpd_irq_setup(struct drm_device *dev)
hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
- ibx_hpd_irq_setup(dev);
+ ibx_hpd_irq_setup(dev_priv);
}
-static void bxt_hpd_irq_setup(struct drm_device *dev)
+static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 hotplug_irqs, hotplug, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
@@ -3642,7 +3559,7 @@ static void bxt_hpd_irq_setup(struct drm_device *dev)
static void ibx_irq_postinstall(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 mask;
if (HAS_PCH_NOP(dev))
@@ -3659,7 +3576,7 @@ static void ibx_irq_postinstall(struct drm_device *dev)
static void gen5_gt_irq_postinstall(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 pm_irqs, gt_irqs;
pm_irqs = gt_irqs = 0;
@@ -3673,8 +3590,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
gt_irqs |= GT_RENDER_USER_INTERRUPT;
if (IS_GEN5(dev)) {
- gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
- ILK_BSD_USER_INTERRUPT;
+ gt_irqs |= ILK_BSD_USER_INTERRUPT;
} else {
gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
}
@@ -3696,7 +3612,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
static int ironlake_irq_postinstall(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 display_mask, extra_mask;
if (INTEL_INFO(dev)->gen >= 7) {
@@ -3775,7 +3691,7 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
static int valleyview_irq_postinstall(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
gen5_gt_irq_postinstall(dev);
@@ -3827,6 +3743,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
uint32_t de_pipe_enables;
u32 de_port_masked = GEN8_AUX_CHANNEL_A;
u32 de_port_enables;
+ u32 de_misc_masked = GEN8_DE_MISC_GSE;
enum pipe pipe;
if (INTEL_INFO(dev_priv)->gen >= 9) {
@@ -3862,11 +3779,12 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
de_pipe_enables);
GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
+ GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
}
static int gen8_irq_postinstall(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (HAS_PCH_SPLIT(dev))
ibx_irq_pre_postinstall(dev);
@@ -3885,7 +3803,7 @@ static int gen8_irq_postinstall(struct drm_device *dev)
static int cherryview_irq_postinstall(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
gen8_gt_irq_postinstall(dev_priv);
@@ -3902,7 +3820,7 @@ static int cherryview_irq_postinstall(struct drm_device *dev)
static void gen8_irq_uninstall(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (!dev_priv)
return;
@@ -3912,7 +3830,7 @@ static void gen8_irq_uninstall(struct drm_device *dev)
static void valleyview_irq_uninstall(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (!dev_priv)
return;
@@ -3932,7 +3850,7 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
static void cherryview_irq_uninstall(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (!dev_priv)
return;
@@ -3952,7 +3870,7 @@ static void cherryview_irq_uninstall(struct drm_device *dev)
static void ironlake_irq_uninstall(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (!dev_priv)
return;
@@ -3962,7 +3880,7 @@ static void ironlake_irq_uninstall(struct drm_device *dev)
static void i8xx_irq_preinstall(struct drm_device * dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int pipe;
for_each_pipe(dev_priv, pipe)
@@ -3974,7 +3892,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
static int i8xx_irq_postinstall(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
I915_WRITE16(EMR,
~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
@@ -4006,13 +3924,12 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
/*
* Returns true when a page flip has completed.
*/
-static bool i8xx_handle_vblank(struct drm_device *dev,
+static bool i8xx_handle_vblank(struct drm_i915_private *dev_priv,
int plane, int pipe, u32 iir)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
- if (!intel_pipe_handle_vblank(dev, pipe))
+ if (!intel_pipe_handle_vblank(dev_priv, pipe))
return false;
if ((iir & flip_pending) == 0)
@@ -4027,19 +3944,18 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
if (I915_READ16(ISR) & flip_pending)
goto check_page_flip;
- intel_prepare_page_flip(dev, plane);
- intel_finish_page_flip(dev, pipe);
+ intel_finish_page_flip_cs(dev_priv, pipe);
return true;
check_page_flip:
- intel_check_page_flip(dev, pipe);
+ intel_check_page_flip(dev_priv, pipe);
return false;
}
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u16 iir, new_iir;
u32 pipe_stats[2];
int pipe;
@@ -4089,15 +4005,15 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
for_each_pipe(dev_priv, pipe) {
int plane = pipe;
- if (HAS_FBC(dev))
+ if (HAS_FBC(dev_priv))
plane = !plane;
if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
- i8xx_handle_vblank(dev, plane, pipe, iir))
+ i8xx_handle_vblank(dev_priv, plane, pipe, iir))
flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
- i9xx_pipe_crc_irq_handler(dev, pipe);
+ i9xx_pipe_crc_irq_handler(dev_priv, pipe);
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
intel_cpu_fifo_underrun_irq_handler(dev_priv,
@@ -4116,7 +4032,7 @@ out:
static void i8xx_irq_uninstall(struct drm_device * dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int pipe;
for_each_pipe(dev_priv, pipe) {
@@ -4131,7 +4047,7 @@ static void i8xx_irq_uninstall(struct drm_device * dev)
static void i915_irq_preinstall(struct drm_device * dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int pipe;
if (I915_HAS_HOTPLUG(dev)) {
@@ -4149,7 +4065,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
static int i915_irq_postinstall(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 enable_mask;
I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
@@ -4182,7 +4098,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
I915_WRITE(IER, enable_mask);
POSTING_READ(IER);
- i915_enable_asle_pipestat(dev);
+ i915_enable_asle_pipestat(dev_priv);
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
@@ -4197,13 +4113,12 @@ static int i915_irq_postinstall(struct drm_device *dev)
/*
* Returns true when a page flip has completed.
*/
-static bool i915_handle_vblank(struct drm_device *dev,
+static bool i915_handle_vblank(struct drm_i915_private *dev_priv,
int plane, int pipe, u32 iir)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
- if (!intel_pipe_handle_vblank(dev, pipe))
+ if (!intel_pipe_handle_vblank(dev_priv, pipe))
return false;
if ((iir & flip_pending) == 0)
@@ -4218,19 +4133,18 @@ static bool i915_handle_vblank(struct drm_device *dev,
if (I915_READ(ISR) & flip_pending)
goto check_page_flip;
- intel_prepare_page_flip(dev, plane);
- intel_finish_page_flip(dev, pipe);
+ intel_finish_page_flip_cs(dev_priv, pipe);
return true;
check_page_flip:
- intel_check_page_flip(dev, pipe);
+ intel_check_page_flip(dev_priv, pipe);
return false;
}
static irqreturn_t i915_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
u32 flip_mask =
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
@@ -4273,11 +4187,11 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
break;
/* Consume port. Then clear IIR or we'll miss events */
- if (I915_HAS_HOTPLUG(dev) &&
+ if (I915_HAS_HOTPLUG(dev_priv) &&
iir & I915_DISPLAY_PORT_INTERRUPT) {
u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
if (hotplug_status)
- i9xx_hpd_irq_handler(dev, hotplug_status);
+ i9xx_hpd_irq_handler(dev_priv, hotplug_status);
}
I915_WRITE(IIR, iir & ~flip_mask);
@@ -4288,18 +4202,18 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
for_each_pipe(dev_priv, pipe) {
int plane = pipe;
- if (HAS_FBC(dev))
+ if (HAS_FBC(dev_priv))
plane = !plane;
if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
- i915_handle_vblank(dev, plane, pipe, iir))
+ i915_handle_vblank(dev_priv, plane, pipe, iir))
flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
- i9xx_pipe_crc_irq_handler(dev, pipe);
+ i9xx_pipe_crc_irq_handler(dev_priv, pipe);
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
intel_cpu_fifo_underrun_irq_handler(dev_priv,
@@ -4307,7 +4221,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
}
if (blc_event || (iir & I915_ASLE_INTERRUPT))
- intel_opregion_asle_intr(dev);
+ intel_opregion_asle_intr(dev_priv);
/* With MSI, interrupts are only generated when iir
* transitions from zero to nonzero. If another bit got
@@ -4335,7 +4249,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
static void i915_irq_uninstall(struct drm_device * dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int pipe;
if (I915_HAS_HOTPLUG(dev)) {
@@ -4357,7 +4271,7 @@ static void i915_irq_uninstall(struct drm_device * dev)
static void i965_irq_preinstall(struct drm_device * dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int pipe;
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
@@ -4373,7 +4287,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
static int i965_irq_postinstall(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 enable_mask;
u32 error_mask;
@@ -4391,7 +4305,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
enable_mask |= I915_USER_INTERRUPT;
- if (IS_G4X(dev))
+ if (IS_G4X(dev_priv))
enable_mask |= I915_BSD_USER_INTERRUPT;
/* Interrupt setup is already guaranteed to be single-threaded, this is
@@ -4406,7 +4320,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
* Enable some error detection, note the instruction error mask
* bit is reserved, so we leave it masked.
*/
- if (IS_G4X(dev)) {
+ if (IS_G4X(dev_priv)) {
error_mask = ~(GM45_ERROR_PAGE_TABLE |
GM45_ERROR_MEM_PRIV |
GM45_ERROR_CP_PRIV |
@@ -4424,26 +4338,25 @@ static int i965_irq_postinstall(struct drm_device *dev)
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
POSTING_READ(PORT_HOTPLUG_EN);
- i915_enable_asle_pipestat(dev);
+ i915_enable_asle_pipestat(dev_priv);
return 0;
}
-static void i915_hpd_irq_setup(struct drm_device *dev)
+static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 hotplug_en;
assert_spin_locked(&dev_priv->irq_lock);
/* Note HDMI and DP share hotplug bits */
/* enable bits are the same for all generations */
- hotplug_en = intel_hpd_enabled_irqs(dev, hpd_mask_i915);
+ hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
/* Programming the CRT detection parameters tends
to generate a spurious hotplug event about three
seconds later. So just do it once.
*/
- if (IS_G4X(dev))
+ if (IS_G4X(dev_priv))
hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
@@ -4458,7 +4371,7 @@ static void i915_hpd_irq_setup(struct drm_device *dev)
static irqreturn_t i965_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 iir, new_iir;
u32 pipe_stats[I915_MAX_PIPES];
int ret = IRQ_NONE, pipe;
@@ -4510,7 +4423,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
if (iir & I915_DISPLAY_PORT_INTERRUPT) {
u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
if (hotplug_status)
- i9xx_hpd_irq_handler(dev, hotplug_status);
+ i9xx_hpd_irq_handler(dev_priv, hotplug_status);
}
I915_WRITE(IIR, iir & ~flip_mask);
@@ -4523,24 +4436,24 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
- i915_handle_vblank(dev, pipe, pipe, iir))
+ i915_handle_vblank(dev_priv, pipe, pipe, iir))
flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
- i9xx_pipe_crc_irq_handler(dev, pipe);
+ i9xx_pipe_crc_irq_handler(dev_priv, pipe);
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
}
if (blc_event || (iir & I915_ASLE_INTERRUPT))
- intel_opregion_asle_intr(dev);
+ intel_opregion_asle_intr(dev_priv);
if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
- gmbus_irq_handler(dev);
+ gmbus_irq_handler(dev_priv);
/* With MSI, interrupts are only generated when iir
* transitions from zero to nonzero. If another bit got
@@ -4567,7 +4480,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
static void i965_irq_uninstall(struct drm_device * dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int pipe;
if (!dev_priv)
@@ -4597,7 +4510,7 @@ static void i965_irq_uninstall(struct drm_device * dev)
*/
void intel_irq_init(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
intel_hpd_init_work(dev_priv);
@@ -4611,6 +4524,20 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
else
dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
+ dev_priv->rps.pm_intr_keep = 0;
+
+ /*
+ * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
+ * if GEN6_PM_UP_EI_EXPIRED is masked.
+ *
+ * TODO: verify if this can be reproduced on VLV,CHV.
+ */
+ if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
+ dev_priv->rps.pm_intr_keep |= GEN6_PM_RP_UP_EI_EXPIRED;
+
+ if (INTEL_INFO(dev_priv)->gen >= 8)
+ dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
+
INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
i915_hangcheck_elapsed);
@@ -4674,12 +4601,12 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->disable_vblank = ironlake_disable_vblank;
dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
} else {
- if (INTEL_INFO(dev_priv)->gen == 2) {
+ if (IS_GEN2(dev_priv)) {
dev->driver->irq_preinstall = i8xx_irq_preinstall;
dev->driver->irq_postinstall = i8xx_irq_postinstall;
dev->driver->irq_handler = i8xx_irq_handler;
dev->driver->irq_uninstall = i8xx_irq_uninstall;
- } else if (INTEL_INFO(dev_priv)->gen == 3) {
+ } else if (IS_GEN3(dev_priv)) {
dev->driver->irq_preinstall = i915_irq_preinstall;
dev->driver->irq_postinstall = i915_irq_postinstall;
dev->driver->irq_uninstall = i915_irq_uninstall;
@@ -4717,7 +4644,7 @@ int intel_irq_install(struct drm_i915_private *dev_priv)
*/
dev_priv->pm.irqs_enabled = true;
- return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
+ return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
}
/**
@@ -4729,7 +4656,7 @@ int intel_irq_install(struct drm_i915_private *dev_priv)
*/
void intel_irq_uninstall(struct drm_i915_private *dev_priv)
{
- drm_irq_uninstall(dev_priv->dev);
+ drm_irq_uninstall(&dev_priv->drm);
intel_hpd_cancel_work(dev_priv);
dev_priv->pm.irqs_enabled = false;
}
@@ -4743,9 +4670,9 @@ void intel_irq_uninstall(struct drm_i915_private *dev_priv)
*/
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
{
- dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
+ dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
dev_priv->pm.irqs_enabled = false;
- synchronize_irq(dev_priv->dev->irq);
+ synchronize_irq(dev_priv->drm.irq);
}
/**
@@ -4758,6 +4685,6 @@ void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
{
dev_priv->pm.irqs_enabled = true;
- dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
- dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
+ dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
+ dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
}
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 1779f02e6..b6e404c91 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -54,10 +54,13 @@ struct i915_params i915 __read_mostly = {
.verbose_state_checks = 1,
.nuclear_pageflip = 0,
.edp_vswing = 0,
- .enable_guc_submission = false,
+ .enable_guc_loading = 0,
+ .enable_guc_submission = 0,
.guc_log_level = -1,
.enable_dp_mst = true,
.inject_load_failure = 0,
+ .enable_dpcd_backlight = false,
+ .enable_gvt = false,
};
module_param_named(modeset, i915.modeset, int, 0400);
@@ -197,8 +200,15 @@ MODULE_PARM_DESC(edp_vswing,
"(0=use value from vbt [default], 1=low power swing(200mV),"
"2=default swing(400mV))");
-module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, bool, 0400);
-MODULE_PARM_DESC(enable_guc_submission, "Enable GuC submission (default:false)");
+module_param_named_unsafe(enable_guc_loading, i915.enable_guc_loading, int, 0400);
+MODULE_PARM_DESC(enable_guc_loading,
+ "Enable GuC firmware loading "
+ "(-1=auto, 0=never [default], 1=if available, 2=required)");
+
+module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, int, 0400);
+MODULE_PARM_DESC(enable_guc_submission,
+ "Enable GuC submission "
+ "(-1=auto, 0=never [default], 1=if available, 2=required)");
module_param_named(guc_log_level, i915.guc_log_level, int, 0400);
MODULE_PARM_DESC(guc_log_level,
@@ -210,3 +220,10 @@ MODULE_PARM_DESC(enable_dp_mst,
module_param_named_unsafe(inject_load_failure, i915.inject_load_failure, uint, 0400);
MODULE_PARM_DESC(inject_load_failure,
"Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)");
+module_param_named(enable_dpcd_backlight, i915.enable_dpcd_backlight, bool, 0600);
+MODULE_PARM_DESC(enable_dpcd_backlight,
+ "Enable support for DPCD backlight control (default:false)");
+
+module_param_named(enable_gvt, i915.enable_gvt, bool, 0400);
+MODULE_PARM_DESC(enable_gvt,
+ "Enable support for Intel GVT-g graphics virtualization host support(default:false)");
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 02bc27804..0ad020b4a 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -45,6 +45,8 @@ struct i915_params {
int enable_ips;
int invert_brightness;
int enable_cmd_parser;
+ int enable_guc_loading;
+ int enable_guc_submission;
int guc_log_level;
int use_mmio_flip;
int mmio_debug;
@@ -57,10 +59,11 @@ struct i915_params {
bool load_detect_test;
bool reset;
bool disable_display;
- bool enable_guc_submission;
bool verbose_state_checks;
bool nuclear_pageflip;
bool enable_dp_mst;
+ bool enable_dpcd_backlight;
+ bool enable_gvt;
};
extern struct i915_params i915 __read_mostly;
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
new file mode 100644
index 000000000..949c01686
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -0,0 +1,503 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/console.h>
+#include <linux/vgaarb.h>
+#include <linux/vga_switcheroo.h>
+
+#include "i915_drv.h"
+
+#define GEN_DEFAULT_PIPEOFFSETS \
+ .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
+ PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
+ .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
+ TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
+ .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
+
+#define GEN_CHV_PIPEOFFSETS \
+ .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
+ CHV_PIPE_C_OFFSET }, \
+ .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
+ CHV_TRANSCODER_C_OFFSET, }, \
+ .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
+ CHV_PALETTE_C_OFFSET }
+
+#define CURSOR_OFFSETS \
+ .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
+
+#define IVB_CURSOR_OFFSETS \
+ .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
+
+#define BDW_COLORS \
+ .color = { .degamma_lut_size = 512, .gamma_lut_size = 512 }
+#define CHV_COLORS \
+ .color = { .degamma_lut_size = 65, .gamma_lut_size = 257 }
+
+static const struct intel_device_info intel_i830_info = {
+ .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+ .ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_845g_info = {
+ .gen = 2, .num_pipes = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+ .ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_i85x_info = {
+ .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
+ .cursor_needs_physical = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+ .has_fbc = 1,
+ .ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_i865g_info = {
+ .gen = 2, .num_pipes = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+ .ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_i915g_info = {
+ .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+ .ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
+static const struct intel_device_info intel_i915gm_info = {
+ .gen = 3, .is_mobile = 1, .num_pipes = 2,
+ .cursor_needs_physical = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+ .supports_tv = 1,
+ .has_fbc = 1,
+ .ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
+static const struct intel_device_info intel_i945g_info = {
+ .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+ .ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
+static const struct intel_device_info intel_i945gm_info = {
+ .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
+ .has_hotplug = 1, .cursor_needs_physical = 1,
+ .has_overlay = 1, .overlay_needs_physical = 1,
+ .supports_tv = 1,
+ .has_fbc = 1,
+ .ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_i965g_info = {
+ .gen = 4, .is_broadwater = 1, .num_pipes = 2,
+ .has_hotplug = 1,
+ .has_overlay = 1,
+ .ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_i965gm_info = {
+ .gen = 4, .is_crestline = 1, .num_pipes = 2,
+ .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
+ .has_overlay = 1,
+ .supports_tv = 1,
+ .ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_g33_info = {
+ .gen = 3, .is_g33 = 1, .num_pipes = 2,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_overlay = 1,
+ .ring_mask = RENDER_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_g45_info = {
+ .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
+ .has_pipe_cxsr = 1, .has_hotplug = 1,
+ .ring_mask = RENDER_RING | BSD_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_gm45_info = {
+ .gen = 4, .is_g4x = 1, .num_pipes = 2,
+ .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
+ .has_pipe_cxsr = 1, .has_hotplug = 1,
+ .supports_tv = 1,
+ .ring_mask = RENDER_RING | BSD_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_pineview_info = {
+ .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_overlay = 1,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_ironlake_d_info = {
+ .gen = 5, .num_pipes = 2,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .ring_mask = RENDER_RING | BSD_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_ironlake_m_info = {
+ .gen = 5, .is_mobile = 1, .num_pipes = 2,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_fbc = 1,
+ .ring_mask = RENDER_RING | BSD_RING,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_sandybridge_d_info = {
+ .gen = 6, .num_pipes = 2,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_fbc = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
+ .has_llc = 1,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_sandybridge_m_info = {
+ .gen = 6, .is_mobile = 1, .num_pipes = 2,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_fbc = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
+ .has_llc = 1,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+};
+
+#define GEN7_FEATURES \
+ .gen = 7, .num_pipes = 3, \
+ .need_gfx_hws = 1, .has_hotplug = 1, \
+ .has_fbc = 1, \
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
+ .has_llc = 1, \
+ GEN_DEFAULT_PIPEOFFSETS, \
+ IVB_CURSOR_OFFSETS
+
+static const struct intel_device_info intel_ivybridge_d_info = {
+ GEN7_FEATURES,
+ .is_ivybridge = 1,
+};
+
+static const struct intel_device_info intel_ivybridge_m_info = {
+ GEN7_FEATURES,
+ .is_ivybridge = 1,
+ .is_mobile = 1,
+};
+
+static const struct intel_device_info intel_ivybridge_q_info = {
+ GEN7_FEATURES,
+ .is_ivybridge = 1,
+ .num_pipes = 0, /* legal, last one wins */
+};
+
+#define VLV_FEATURES \
+ .gen = 7, .num_pipes = 2, \
+ .need_gfx_hws = 1, .has_hotplug = 1, \
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
+ .display_mmio_offset = VLV_DISPLAY_BASE, \
+ GEN_DEFAULT_PIPEOFFSETS, \
+ CURSOR_OFFSETS
+
+static const struct intel_device_info intel_valleyview_m_info = {
+ VLV_FEATURES,
+ .is_valleyview = 1,
+ .is_mobile = 1,
+};
+
+static const struct intel_device_info intel_valleyview_d_info = {
+ VLV_FEATURES,
+ .is_valleyview = 1,
+};
+
+#define HSW_FEATURES \
+ GEN7_FEATURES, \
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
+ .has_ddi = 1, \
+ .has_fpga_dbg = 1
+
+static const struct intel_device_info intel_haswell_d_info = {
+ HSW_FEATURES,
+ .is_haswell = 1,
+};
+
+static const struct intel_device_info intel_haswell_m_info = {
+ HSW_FEATURES,
+ .is_haswell = 1,
+ .is_mobile = 1,
+};
+
+#define BDW_FEATURES \
+ HSW_FEATURES, \
+ BDW_COLORS
+
+static const struct intel_device_info intel_broadwell_d_info = {
+ BDW_FEATURES,
+ .gen = 8,
+ .is_broadwell = 1,
+};
+
+static const struct intel_device_info intel_broadwell_m_info = {
+ BDW_FEATURES,
+ .gen = 8, .is_mobile = 1,
+ .is_broadwell = 1,
+};
+
+static const struct intel_device_info intel_broadwell_gt3d_info = {
+ BDW_FEATURES,
+ .gen = 8,
+ .is_broadwell = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+};
+
+static const struct intel_device_info intel_broadwell_gt3m_info = {
+ BDW_FEATURES,
+ .gen = 8, .is_mobile = 1,
+ .is_broadwell = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+};
+
+static const struct intel_device_info intel_cherryview_info = {
+ .gen = 8, .num_pipes = 3,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+ .is_cherryview = 1,
+ .display_mmio_offset = VLV_DISPLAY_BASE,
+ GEN_CHV_PIPEOFFSETS,
+ CURSOR_OFFSETS,
+ CHV_COLORS,
+};
+
+static const struct intel_device_info intel_skylake_info = {
+ BDW_FEATURES,
+ .is_skylake = 1,
+ .gen = 9,
+};
+
+static const struct intel_device_info intel_skylake_gt3_info = {
+ BDW_FEATURES,
+ .is_skylake = 1,
+ .gen = 9,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+};
+
+static const struct intel_device_info intel_broxton_info = {
+ .is_broxton = 1,
+ .gen = 9,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+ .num_pipes = 3,
+ .has_ddi = 1,
+ .has_fpga_dbg = 1,
+ .has_fbc = 1,
+ .has_pooled_eu = 0,
+ GEN_DEFAULT_PIPEOFFSETS,
+ IVB_CURSOR_OFFSETS,
+ BDW_COLORS,
+};
+
+static const struct intel_device_info intel_kabylake_info = {
+ BDW_FEATURES,
+ .is_kabylake = 1,
+ .gen = 9,
+};
+
+static const struct intel_device_info intel_kabylake_gt3_info = {
+ BDW_FEATURES,
+ .is_kabylake = 1,
+ .gen = 9,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+};
+
+/*
+ * Make sure any device matches here are from most specific to most
+ * general. For example, since the Quanta match is based on the subsystem
+ * and subvendor IDs, we need it to come before the more general IVB
+ * PCI ID matches, otherwise we'll use the wrong info struct above.
+ */
+static const struct pci_device_id pciidlist[] = {
+ INTEL_I830_IDS(&intel_i830_info),
+ INTEL_I845G_IDS(&intel_845g_info),
+ INTEL_I85X_IDS(&intel_i85x_info),
+ INTEL_I865G_IDS(&intel_i865g_info),
+ INTEL_I915G_IDS(&intel_i915g_info),
+ INTEL_I915GM_IDS(&intel_i915gm_info),
+ INTEL_I945G_IDS(&intel_i945g_info),
+ INTEL_I945GM_IDS(&intel_i945gm_info),
+ INTEL_I965G_IDS(&intel_i965g_info),
+ INTEL_G33_IDS(&intel_g33_info),
+ INTEL_I965GM_IDS(&intel_i965gm_info),
+ INTEL_GM45_IDS(&intel_gm45_info),
+ INTEL_G45_IDS(&intel_g45_info),
+ INTEL_PINEVIEW_IDS(&intel_pineview_info),
+ INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),
+ INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),
+ INTEL_SNB_D_IDS(&intel_sandybridge_d_info),
+ INTEL_SNB_M_IDS(&intel_sandybridge_m_info),
+ INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */
+ INTEL_IVB_M_IDS(&intel_ivybridge_m_info),
+ INTEL_IVB_D_IDS(&intel_ivybridge_d_info),
+ INTEL_HSW_D_IDS(&intel_haswell_d_info),
+ INTEL_HSW_M_IDS(&intel_haswell_m_info),
+ INTEL_VLV_M_IDS(&intel_valleyview_m_info),
+ INTEL_VLV_D_IDS(&intel_valleyview_d_info),
+ INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),
+ INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),
+ INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),
+ INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info),
+ INTEL_CHV_IDS(&intel_cherryview_info),
+ INTEL_SKL_GT1_IDS(&intel_skylake_info),
+ INTEL_SKL_GT2_IDS(&intel_skylake_info),
+ INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),
+ INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info),
+ INTEL_BXT_IDS(&intel_broxton_info),
+ INTEL_KBL_GT1_IDS(&intel_kabylake_info),
+ INTEL_KBL_GT2_IDS(&intel_kabylake_info),
+ INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
+ INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
+ {0, 0, 0}
+};
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+extern int i915_driver_load(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+
+static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct intel_device_info *intel_info =
+ (struct intel_device_info *) ent->driver_data;
+
+ if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
+ DRM_INFO("This hardware requires preliminary hardware support.\n"
+ "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
+ return -ENODEV;
+ }
+
+ /* Only bind to function 0 of the device. Early generations
+ * used function 1 as a placeholder for multi-head. This causes
+ * us confusion instead, especially on the systems where both
+ * functions have the same PCI-ID!
+ */
+ if (PCI_FUNC(pdev->devfn))
+ return -ENODEV;
+
+ /*
+ * apple-gmux is needed on dual GPU MacBook Pro
+ * to probe the panel if we're the inactive GPU.
+ */
+ if (vga_switcheroo_client_probe_defer(pdev))
+ return -EPROBE_DEFER;
+
+ return i915_driver_load(pdev, ent);
+}
+
+extern void i915_driver_unload(struct drm_device *dev);
+
+static void i915_pci_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ i915_driver_unload(dev);
+ drm_dev_unref(dev);
+}
+
+extern const struct dev_pm_ops i915_pm_ops;
+
+static struct pci_driver i915_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = i915_pci_probe,
+ .remove = i915_pci_remove,
+ .driver.pm = &i915_pm_ops,
+};
+
+static int __init i915_init(void)
+{
+ bool use_kms = true;
+
+ /*
+ * Enable KMS by default, unless explicitly overriden by
+ * either the i915.modeset prarameter or by the
+ * vga_text_mode_force boot option.
+ */
+
+ if (i915.modeset == 0)
+ use_kms = false;
+
+ if (vgacon_text_force() && i915.modeset == -1)
+ use_kms = false;
+
+ if (!use_kms) {
+ /* Silently fail loading to not upset userspace. */
+ DRM_DEBUG_DRIVER("KMS disabled.\n");
+ return 0;
+ }
+
+ return pci_register_driver(&i915_pci_driver);
+}
+
+static void __exit i915_exit(void)
+{
+ if (!i915_pci_driver.driver.owner)
+ return;
+
+ pci_unregister_driver(&i915_pci_driver);
+}
+
+module_init(i915_init);
+module_exit(i915_exit);
+
+MODULE_AUTHOR("Tungsten Graphics, Inc.");
+MODULE_AUTHOR("Intel Corporation");
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h
new file mode 100644
index 000000000..c0cb2974c
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_pvinfo.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _I915_PVINFO_H_
+#define _I915_PVINFO_H_
+
+/* The MMIO offset of the shared info between guest and host emulator */
+#define VGT_PVINFO_PAGE 0x78000
+#define VGT_PVINFO_SIZE 0x1000
+
+/*
+ * The following structure pages are defined in GEN MMIO space
+ * for virtualization. (One page for now)
+ */
+#define VGT_MAGIC 0x4776544776544776ULL /* 'vGTvGTvG' */
+#define VGT_VERSION_MAJOR 1
+#define VGT_VERSION_MINOR 0
+
+#define INTEL_VGT_IF_VERSION_ENCODE(major, minor) ((major) << 16 | (minor))
+#define INTEL_VGT_IF_VERSION \
+ INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR)
+
+/*
+ * notifications from guest to vgpu device model
+ */
+enum vgt_g2v_type {
+ VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE = 2,
+ VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY,
+ VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE,
+ VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY,
+ VGT_G2V_EXECLIST_CONTEXT_CREATE,
+ VGT_G2V_EXECLIST_CONTEXT_DESTROY,
+ VGT_G2V_MAX,
+};
+
+struct vgt_if {
+ u64 magic; /* VGT_MAGIC */
+ uint16_t version_major;
+ uint16_t version_minor;
+ u32 vgt_id; /* ID of vGT instance */
+ u32 rsv1[12]; /* pad to offset 0x40 */
+ /*
+ * Data structure to describe the balooning info of resources.
+ * Each VM can only have one portion of continuous area for now.
+ * (May support scattered resource in future)
+ * (starting from offset 0x40)
+ */
+ struct {
+ /* Aperture register balooning */
+ struct {
+ u32 base;
+ u32 size;
+ } mappable_gmadr; /* aperture */
+ /* GMADR register balooning */
+ struct {
+ u32 base;
+ u32 size;
+ } nonmappable_gmadr; /* non aperture */
+ /* allowed fence registers */
+ u32 fence_num;
+ u32 rsv2[3];
+ } avail_rs; /* available/assigned resource */
+ u32 rsv3[0x200 - 24]; /* pad to half page */
+ /*
+ * The bottom half page is for response from Gfx driver to hypervisor.
+ */
+ u32 rsv4;
+ u32 display_ready; /* ready for display owner switch */
+
+ u32 rsv5[4];
+
+ u32 g2v_notify;
+ u32 rsv6[7];
+
+ struct {
+ u32 lo;
+ u32 hi;
+ } pdp[4];
+
+ u32 execlist_context_descriptor_lo;
+ u32 execlist_context_descriptor_hi;
+
+ u32 rsv7[0x200 - 24]; /* pad to one page */
+} __packed;
+
+#define vgtif_reg(x) \
+ _MMIO((VGT_PVINFO_PAGE + offsetof(struct vgt_if, x)))
+
+/* vGPU display status to be used by the host side */
+#define VGT_DRV_DISPLAY_NOT_READY 0
+#define VGT_DRV_DISPLAY_READY 1 /* ready for display switch */
+
+#endif /* _I915_PVINFO_H_ */
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 3c123d430..bf2cad3f9 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -445,6 +445,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
*/
#define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags))
+#define GEN9_MEDIA_POOL_STATE ((0x3 << 29) | (0x2 << 27) | (0x5 << 16) | 4)
+#define GEN9_MEDIA_POOL_ENABLE (1 << 31)
#define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24))
#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
#define SC_UPDATE_SCISSOR (0x1<<1)
@@ -716,6 +718,9 @@ enum skl_disp_power_wells {
/* Not actual bit groups. Used as IDs for lookup_power_well() */
SKL_DISP_PW_ALWAYS_ON,
SKL_DISP_PW_DC_OFF,
+
+ BXT_DPIO_CMN_A,
+ BXT_DPIO_CMN_BC,
};
#define SKL_POWER_WELL_STATE(pw) (1 << ((pw) * 2))
@@ -889,7 +894,7 @@ enum skl_disp_power_wells {
* PLLs can be routed to any transcoder A/B/C.
*
* Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is
- * digital port D (CHV) or port A (BXT).
+ * digital port D (CHV) or port A (BXT). ::
*
*
* Dual channel PHY (VLV/CHV/BXT)
@@ -1276,6 +1281,15 @@ enum skl_disp_power_wells {
#define BXT_P_CR_GT_DISP_PWRON _MMIO(0x138090)
#define GT_DISPLAY_POWER_ON(phy) (1 << (phy))
+#define _BXT_PHY_CTL_DDI_A 0x64C00
+#define _BXT_PHY_CTL_DDI_B 0x64C10
+#define _BXT_PHY_CTL_DDI_C 0x64C20
+#define BXT_PHY_CMNLANE_POWERDOWN_ACK (1 << 10)
+#define BXT_PHY_LANE_POWERDOWN_ACK (1 << 9)
+#define BXT_PHY_LANE_ENABLED (1 << 8)
+#define BXT_PHY_CTL(port) _MMIO_PORT(port, _BXT_PHY_CTL_DDI_A, \
+ _BXT_PHY_CTL_DDI_B)
+
#define _PHY_CTL_FAMILY_EDP 0x64C80
#define _PHY_CTL_FAMILY_DDI 0x64C90
#define COMMON_RESET_DIS (1 << 31)
@@ -2175,6 +2189,9 @@ enum skl_disp_power_wells {
#define FBC_LL_SIZE (1536)
+#define FBC_LLC_READ_CTRL _MMIO(0x9044)
+#define FBC_LLC_FULLY_OPEN (1<<30)
+
/* Framebuffer compression for GM45+ */
#define DPFC_CB_BASE _MMIO(0x3200)
#define DPFC_CONTROL _MMIO(0x3208)
@@ -2465,6 +2482,8 @@ enum skl_disp_power_wells {
#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
+#define RAWCLK_FREQ_VLV _MMIO(VLV_DISPLAY_BASE + 0x6024)
+
#define _FPA0 0x6040
#define _FPA1 0x6044
#define _FPB0 0x6048
@@ -3036,6 +3055,18 @@ enum skl_disp_power_wells {
/* Same as Haswell, but 72064 bytes now. */
#define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE)
+enum {
+ INTEL_ADVANCED_CONTEXT = 0,
+ INTEL_LEGACY_32B_CONTEXT,
+ INTEL_ADVANCED_AD_CONTEXT,
+ INTEL_LEGACY_64B_CONTEXT
+};
+
+#define GEN8_CTX_ADDRESSING_MODE_SHIFT 3
+#define GEN8_CTX_ADDRESSING_MODE(dev_priv) (USES_FULL_48BIT_PPGTT(dev_priv) ?\
+ INTEL_LEGACY_64B_CONTEXT : \
+ INTEL_LEGACY_32B_CONTEXT)
+
#define CHV_CLK_CTL1 _MMIO(0x101100)
#define VLV_CLK_CTL2 _MMIO(0x101104)
#define CLK_CTL2_CZCOUNT_30NS_SHIFT 28
@@ -6049,6 +6080,9 @@ enum skl_disp_power_wells {
#define FORCE_ARB_IDLE_PLANES (1 << 14)
#define SKL_EDP_PSR_FIX_RDWRAP (1 << 3)
+#define CHICKEN_PAR2_1 _MMIO(0x42090)
+#define KVM_CONFIG_CHANGE_NOTIFICATION_SELECT (1 << 14)
+
#define _CHICKEN_PIPESL_1_A 0x420b0
#define _CHICKEN_PIPESL_1_B 0x420b4
#define HSW_FBCQ_DIS (1 << 22)
@@ -6088,6 +6122,7 @@ enum skl_disp_power_wells {
#define FF_SLICE_CS_CHICKEN2 _MMIO(0x20e4)
#define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8)
+#define GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE (1<<10)
#define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec)
#define GEN9_CTX_PREEMPT_REG _MMIO(0x2248)
@@ -6112,7 +6147,14 @@ enum skl_disp_power_wells {
#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
#define GEN8_L3SQCREG1 _MMIO(0xB100)
-#define BDW_WA_L3SQCREG1_DEFAULT 0x784000
+/*
+ * Note that on CHV the following has an off-by-one error wrt. to BSpec.
+ * Using the formula in BSpec leads to a hang, while the formula here works
+ * fine and matches the formulas for all other platforms. A BSpec change
+ * request has been filed to clarify this.
+ */
+#define L3_GENERAL_PRIO_CREDITS(x) (((x) >> 1) << 19)
+#define L3_HIGH_PRIO_CREDITS(x) (((x) >> 1) << 14)
#define GEN7_L3CNTLREG1 _MMIO(0xB01C)
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
@@ -7032,7 +7074,8 @@ enum skl_disp_power_wells {
#define GEN6_RPDEUC _MMIO(0xA084)
#define GEN6_RPDEUCSW _MMIO(0xA088)
#define GEN6_RC_STATE _MMIO(0xA094)
-#define RC6_STATE (1 << 18)
+#define RC_SW_TARGET_STATE_SHIFT 16
+#define RC_SW_TARGET_STATE_MASK (7 << RC_SW_TARGET_STATE_SHIFT)
#define GEN6_RC1_WAKE_RATE_LIMIT _MMIO(0xA098)
#define GEN6_RC6_WAKE_RATE_LIMIT _MMIO(0xA09C)
#define GEN6_RC6pp_WAKE_RATE_LIMIT _MMIO(0xA0A0)
@@ -7046,13 +7089,17 @@ enum skl_disp_power_wells {
#define VLV_RCEDATA _MMIO(0xA0BC)
#define GEN6_RC6pp_THRESHOLD _MMIO(0xA0C0)
#define GEN6_PMINTRMSK _MMIO(0xA168)
-#define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31)
+#define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31)
+#define GEN8_MISC_CTRL0 _MMIO(0xA180)
#define VLV_PWRDWNUPCTL _MMIO(0xA294)
#define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xA0C4)
#define GEN9_RENDER_PG_IDLE_HYSTERESIS _MMIO(0xA0C8)
#define GEN9_PG_ENABLE _MMIO(0xA210)
#define GEN9_RENDER_PG_ENABLE (1<<0)
#define GEN9_MEDIA_PG_ENABLE (1<<1)
+#define GEN8_PUSHBUS_CONTROL _MMIO(0xA248)
+#define GEN8_PUSHBUS_ENABLE _MMIO(0xA250)
+#define GEN8_PUSHBUS_SHIFT _MMIO(0xA25C)
#define VLV_CHICKEN_3 _MMIO(VLV_DISPLAY_BASE + 0x7040C)
#define PIXEL_OVERLAP_CNT_MASK (3 << 30)
@@ -7098,6 +7145,15 @@ enum skl_disp_power_wells {
#define GEN6_PCODE_MAILBOX _MMIO(0x138124)
#define GEN6_PCODE_READY (1<<31)
+#define GEN6_PCODE_ERROR_MASK 0xFF
+#define GEN6_PCODE_SUCCESS 0x0
+#define GEN6_PCODE_ILLEGAL_CMD 0x1
+#define GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x2
+#define GEN6_PCODE_TIMEOUT 0x3
+#define GEN6_PCODE_UNIMPLEMENTED_CMD 0xFF
+#define GEN7_PCODE_TIMEOUT 0x2
+#define GEN7_PCODE_ILLEGAL_DATA 0x3
+#define GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x10
#define GEN6_PCODE_WRITE_RC6VIDS 0x4
#define GEN6_PCODE_READ_RC6VIDS 0x5
#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
@@ -7121,9 +7177,8 @@ enum skl_disp_power_wells {
#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A
#define GEN9_PCODE_SAGV_CONTROL 0x21
#define GEN9_SAGV_DISABLE 0x0
-#define GEN9_SAGV_LOW_FREQ 0x1
-#define GEN9_SAGV_HIGH_FREQ 0x2
-#define GEN9_SAGV_DYNAMIC_FREQ 0x3
+#define GEN9_SAGV_IS_DISABLED 0x1
+#define GEN9_SAGV_ENABLE 0x3
#define GEN6_PCODE_DATA _MMIO(0x138128)
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
@@ -7547,8 +7602,6 @@ enum skl_disp_power_wells {
#define CDCLK_FREQ _MMIO(0x46200)
-#define CDCLK_FREQ _MMIO(0x46200)
-
#define _TRANSA_MSA_MISC 0x60410
#define _TRANSB_MSA_MISC 0x61410
#define _TRANSC_MSA_MISC 0x62410
@@ -7589,14 +7642,15 @@ enum skl_disp_power_wells {
#define CDCLK_FREQ_540 (1<<26)
#define CDCLK_FREQ_337_308 (2<<26)
#define CDCLK_FREQ_675_617 (3<<26)
-#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
-
#define BXT_CDCLK_CD2X_DIV_SEL_MASK (3<<22)
#define BXT_CDCLK_CD2X_DIV_SEL_1 (0<<22)
#define BXT_CDCLK_CD2X_DIV_SEL_1_5 (1<<22)
#define BXT_CDCLK_CD2X_DIV_SEL_2 (2<<22)
#define BXT_CDCLK_CD2X_DIV_SEL_4 (3<<22)
+#define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe)<<20)
+#define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3)
#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16)
+#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
/* LCPLL_CTL */
#define LCPLL1_CTL _MMIO(0x46010)
@@ -8172,6 +8226,8 @@ enum skl_disp_power_wells {
#define _MIPIA_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb05c)
#define _MIPIC_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c)
#define MIPI_EOT_DISABLE(port) _MMIO_MIPI(port, _MIPIA_EOT_DISABLE, _MIPIC_EOT_DISABLE)
+#define BXT_DEFEATURE_DPI_FIFO_CTR (1 << 9)
+#define BXT_DPHY_DEFEATURE_EN (1 << 8)
#define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7)
#define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6)
#define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 34e061a9e..5cfe4c771 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -31,7 +31,7 @@
static void i915_save_display(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
/* Display arbitration control */
if (INTEL_INFO(dev)->gen <= 4)
@@ -63,7 +63,7 @@ static void i915_save_display(struct drm_device *dev)
static void i915_restore_display(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 mask = 0xffffffff;
/* Display arbitration */
@@ -103,7 +103,7 @@ static void i915_restore_display(struct drm_device *dev)
int i915_save_state(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int i;
mutex_lock(&dev->struct_mutex);
@@ -148,7 +148,7 @@ int i915_save_state(struct drm_device *dev)
int i915_restore_state(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int i;
mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 2d576b7ff..d61829e54 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -38,12 +38,12 @@
static u32 calc_residency(struct drm_device *dev,
i915_reg_t reg)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u64 raw_time; /* 32b value may overflow during fixed point math */
u64 units = 128ULL, div = 100000ULL;
u32 ret;
- if (!intel_enable_rc6(dev))
+ if (!intel_enable_rc6())
return 0;
intel_runtime_pm_get(dev_priv);
@@ -70,8 +70,7 @@ static u32 calc_residency(struct drm_device *dev,
static ssize_t
show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
{
- struct drm_minor *dminor = dev_to_drm_minor(kdev);
- return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
+ return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6());
}
static ssize_t
@@ -167,7 +166,7 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct drm_minor *dminor = dev_to_drm_minor(dev);
struct drm_device *drm_dev = dminor->dev;
- struct drm_i915_private *dev_priv = drm_dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(drm_dev);
int slice = (int)(uintptr_t)attr->private;
int ret;
@@ -203,8 +202,8 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct drm_minor *dminor = dev_to_drm_minor(dev);
struct drm_device *drm_dev = dminor->dev;
- struct drm_i915_private *dev_priv = drm_dev->dev_private;
- struct intel_context *ctx;
+ struct drm_i915_private *dev_priv = to_i915(drm_dev);
+ struct i915_gem_context *ctx;
u32 *temp = NULL; /* Just here to make handling failures easy */
int slice = (int)(uintptr_t)attr->private;
int ret;
@@ -228,13 +227,6 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
}
}
- ret = i915_gpu_idle(drm_dev);
- if (ret) {
- kfree(temp);
- mutex_unlock(&drm_dev->struct_mutex);
- return ret;
- }
-
/* TODO: Ideally we really want a GPU reset here to make sure errors
* aren't propagated. Since I cannot find a stable way to reset the GPU
* at this point it is left as a TODO.
@@ -276,7 +268,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
{
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -310,7 +302,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
{
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -331,7 +323,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
{
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
return snprintf(buf, PAGE_SIZE,
"%d\n",
@@ -342,7 +334,7 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
{
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -360,7 +352,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
{
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 val;
ssize_t ret;
@@ -397,7 +389,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
/* We still need *_set_rps to process the new max_delay and
* update the interrupt limits and PMINTRMSK even though
* frequency request may be unchanged. */
- intel_set_rps(dev, val);
+ intel_set_rps(dev_priv, val);
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -410,7 +402,7 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
{
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -428,7 +420,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
{
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 val;
ssize_t ret;
@@ -461,7 +453,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
/* We still need *_set_rps to process the new min_delay and
* update the interrupt limits and PMINTRMSK even though
* frequency request may be unchanged. */
- intel_set_rps(dev, val);
+ intel_set_rps(dev_priv, val);
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -488,7 +480,7 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
{
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 val;
if (attr == &dev_attr_gt_RP0_freq_mhz)
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index dc0def210..534154e05 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -118,7 +118,7 @@ TRACE_EVENT(i915_gem_shrink,
),
TP_fast_assign(
- __entry->dev = i915->dev->primary->index;
+ __entry->dev = i915->drm.primary->index;
__entry->target = target;
__entry->flags = flags;
),
@@ -462,7 +462,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
),
TP_fast_assign(
- __entry->dev = from->dev->primary->index;
+ __entry->dev = from->i915->drm.primary->index;
__entry->sync_from = from->id;
__entry->sync_to = to_req->engine->id;
__entry->seqno = i915_gem_request_get_seqno(req);
@@ -486,13 +486,11 @@ TRACE_EVENT(i915_gem_ring_dispatch,
),
TP_fast_assign(
- struct intel_engine_cs *engine =
- i915_gem_request_get_engine(req);
- __entry->dev = engine->dev->primary->index;
- __entry->ring = engine->id;
- __entry->seqno = i915_gem_request_get_seqno(req);
+ __entry->dev = req->i915->drm.primary->index;
+ __entry->ring = req->engine->id;
+ __entry->seqno = req->seqno;
__entry->flags = flags;
- i915_trace_irq_get(engine, req);
+ intel_engine_enable_signaling(req);
),
TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
@@ -511,7 +509,7 @@ TRACE_EVENT(i915_gem_ring_flush,
),
TP_fast_assign(
- __entry->dev = req->engine->dev->primary->index;
+ __entry->dev = req->i915->drm.primary->index;
__entry->ring = req->engine->id;
__entry->invalidate = invalidate;
__entry->flush = flush;
@@ -533,11 +531,9 @@ DECLARE_EVENT_CLASS(i915_gem_request,
),
TP_fast_assign(
- struct intel_engine_cs *engine =
- i915_gem_request_get_engine(req);
- __entry->dev = engine->dev->primary->index;
- __entry->ring = engine->id;
- __entry->seqno = i915_gem_request_get_seqno(req);
+ __entry->dev = req->i915->drm.primary->index;
+ __entry->ring = req->engine->id;
+ __entry->seqno = req->seqno;
),
TP_printk("dev=%u, ring=%u, seqno=%u",
@@ -560,9 +556,9 @@ TRACE_EVENT(i915_gem_request_notify,
),
TP_fast_assign(
- __entry->dev = engine->dev->primary->index;
+ __entry->dev = engine->i915->drm.primary->index;
__entry->ring = engine->id;
- __entry->seqno = engine->get_seqno(engine);
+ __entry->seqno = intel_engine_get_seqno(engine);
),
TP_printk("dev=%u, ring=%u, seqno=%u",
@@ -597,13 +593,11 @@ TRACE_EVENT(i915_gem_request_wait_begin,
* less desirable.
*/
TP_fast_assign(
- struct intel_engine_cs *engine =
- i915_gem_request_get_engine(req);
- __entry->dev = engine->dev->primary->index;
- __entry->ring = engine->id;
- __entry->seqno = i915_gem_request_get_seqno(req);
+ __entry->dev = req->i915->drm.primary->index;
+ __entry->ring = req->engine->id;
+ __entry->seqno = req->seqno;
__entry->blocking =
- mutex_is_locked(&engine->dev->struct_mutex);
+ mutex_is_locked(&req->i915->drm.struct_mutex);
),
TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
@@ -740,19 +734,19 @@ DEFINE_EVENT(i915_ppgtt, i915_ppgtt_release,
* the context.
*/
DECLARE_EVENT_CLASS(i915_context,
- TP_PROTO(struct intel_context *ctx),
+ TP_PROTO(struct i915_gem_context *ctx),
TP_ARGS(ctx),
TP_STRUCT__entry(
__field(u32, dev)
- __field(struct intel_context *, ctx)
+ __field(struct i915_gem_context *, ctx)
__field(struct i915_address_space *, vm)
),
TP_fast_assign(
__entry->ctx = ctx;
__entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
- __entry->dev = ctx->i915->dev->primary->index;
+ __entry->dev = ctx->i915->drm.primary->index;
),
TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
@@ -760,12 +754,12 @@ DECLARE_EVENT_CLASS(i915_context,
)
DEFINE_EVENT(i915_context, i915_context_create,
- TP_PROTO(struct intel_context *ctx),
+ TP_PROTO(struct i915_gem_context *ctx),
TP_ARGS(ctx)
);
DEFINE_EVENT(i915_context, i915_context_free,
- TP_PROTO(struct intel_context *ctx),
+ TP_PROTO(struct i915_gem_context *ctx),
TP_ARGS(ctx)
);
@@ -777,13 +771,13 @@ DEFINE_EVENT(i915_context, i915_context_free,
* called only if full ppgtt is enabled.
*/
TRACE_EVENT(switch_mm,
- TP_PROTO(struct intel_engine_cs *engine, struct intel_context *to),
+ TP_PROTO(struct intel_engine_cs *engine, struct i915_gem_context *to),
TP_ARGS(engine, to),
TP_STRUCT__entry(
__field(u32, ring)
- __field(struct intel_context *, to)
+ __field(struct i915_gem_context *, to)
__field(struct i915_address_space *, vm)
__field(u32, dev)
),
@@ -792,7 +786,7 @@ TRACE_EVENT(switch_mm,
__entry->ring = engine->id;
__entry->to = to;
__entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
- __entry->dev = engine->dev->primary->index;
+ __entry->dev = engine->i915->drm.primary->index;
),
TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index d02efb8ca..b81cfb3b2 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -53,22 +53,18 @@
/**
* i915_check_vgpu - detect virtual GPU
- * @dev: drm device *
+ * @dev_priv: i915 device private
*
* This function is called at the initialization stage, to detect whether
* running on a vGPU.
*/
-void i915_check_vgpu(struct drm_device *dev)
+void i915_check_vgpu(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint64_t magic;
uint32_t version;
BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
- if (!IS_HASWELL(dev))
- return;
-
magic = __raw_i915_read64(dev_priv, vgtif_reg(magic));
if (magic != VGT_MAGIC)
return;
@@ -102,10 +98,13 @@ static struct _balloon_info_ bl_info;
* This function is called to deallocate the ballooned-out graphic memory, when
* driver is unloaded or when ballooning fails.
*/
-void intel_vgt_deballoon(void)
+void intel_vgt_deballoon(struct drm_i915_private *dev_priv)
{
int i;
+ if (!intel_vgpu_active(dev_priv))
+ return;
+
DRM_DEBUG("VGT deballoon.\n");
for (i = 0; i < 4; i++) {
@@ -151,36 +150,35 @@ static int vgt_balloon_space(struct drm_mm *mm,
* of its graphic space being zero. Yet there are some portions ballooned out(
* the shadow part, which are marked as reserved by drm allocator). From the
* host point of view, the graphic address space is partitioned by multiple
- * vGPUs in different VMs.
+ * vGPUs in different VMs. ::
*
* vGPU1 view Host view
* 0 ------> +-----------+ +-----------+
- * ^ |///////////| | vGPU3 |
- * | |///////////| +-----------+
- * | |///////////| | vGPU2 |
+ * ^ |###########| | vGPU3 |
+ * | |###########| +-----------+
+ * | |###########| | vGPU2 |
* | +-----------+ +-----------+
* mappable GM | available | ==> | vGPU1 |
* | +-----------+ +-----------+
- * | |///////////| | |
- * v |///////////| | Host |
+ * | |###########| | |
+ * v |###########| | Host |
* +=======+===========+ +===========+
- * ^ |///////////| | vGPU3 |
- * | |///////////| +-----------+
- * | |///////////| | vGPU2 |
+ * ^ |###########| | vGPU3 |
+ * | |###########| +-----------+
+ * | |###########| | vGPU2 |
* | +-----------+ +-----------+
* unmappable GM | available | ==> | vGPU1 |
* | +-----------+ +-----------+
- * | |///////////| | |
- * | |///////////| | Host |
- * v |///////////| | |
+ * | |###########| | |
+ * | |###########| | Host |
+ * v |###########| | |
* total GM size ------> +-----------+ +-----------+
*
* Returns:
* zero on success, non-zero if configuration invalid or ballooning failed
*/
-int intel_vgt_balloon(struct drm_device *dev)
+int intel_vgt_balloon(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned long ggtt_end = ggtt->base.start + ggtt->base.total;
@@ -188,6 +186,9 @@ int intel_vgt_balloon(struct drm_device *dev)
unsigned long unmappable_base, unmappable_size, unmappable_end;
int ret;
+ if (!intel_vgpu_active(dev_priv))
+ return 0;
+
mappable_base = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.base));
mappable_size = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.size));
unmappable_base = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.base));
@@ -259,6 +260,6 @@ int intel_vgt_balloon(struct drm_device *dev)
err:
DRM_ERROR("VGT balloon fail\n");
- intel_vgt_deballoon();
+ intel_vgt_deballoon(dev_priv);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h
index 3c83b47b5..3c3b2d24e 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.h
+++ b/drivers/gpu/drm/i915/i915_vgpu.h
@@ -24,94 +24,10 @@
#ifndef _I915_VGPU_H_
#define _I915_VGPU_H_
-/* The MMIO offset of the shared info between guest and host emulator */
-#define VGT_PVINFO_PAGE 0x78000
-#define VGT_PVINFO_SIZE 0x1000
+#include "i915_pvinfo.h"
-/*
- * The following structure pages are defined in GEN MMIO space
- * for virtualization. (One page for now)
- */
-#define VGT_MAGIC 0x4776544776544776ULL /* 'vGTvGTvG' */
-#define VGT_VERSION_MAJOR 1
-#define VGT_VERSION_MINOR 0
-
-#define INTEL_VGT_IF_VERSION_ENCODE(major, minor) ((major) << 16 | (minor))
-#define INTEL_VGT_IF_VERSION \
- INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR)
-
-/*
- * notifications from guest to vgpu device model
- */
-enum vgt_g2v_type {
- VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE = 2,
- VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY,
- VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE,
- VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY,
- VGT_G2V_EXECLIST_CONTEXT_CREATE,
- VGT_G2V_EXECLIST_CONTEXT_DESTROY,
- VGT_G2V_MAX,
-};
-
-struct vgt_if {
- uint64_t magic; /* VGT_MAGIC */
- uint16_t version_major;
- uint16_t version_minor;
- uint32_t vgt_id; /* ID of vGT instance */
- uint32_t rsv1[12]; /* pad to offset 0x40 */
- /*
- * Data structure to describe the balooning info of resources.
- * Each VM can only have one portion of continuous area for now.
- * (May support scattered resource in future)
- * (starting from offset 0x40)
- */
- struct {
- /* Aperture register balooning */
- struct {
- uint32_t base;
- uint32_t size;
- } mappable_gmadr; /* aperture */
- /* GMADR register balooning */
- struct {
- uint32_t base;
- uint32_t size;
- } nonmappable_gmadr; /* non aperture */
- /* allowed fence registers */
- uint32_t fence_num;
- uint32_t rsv2[3];
- } avail_rs; /* available/assigned resource */
- uint32_t rsv3[0x200 - 24]; /* pad to half page */
- /*
- * The bottom half page is for response from Gfx driver to hypervisor.
- */
- uint32_t rsv4;
- uint32_t display_ready; /* ready for display owner switch */
-
- uint32_t rsv5[4];
-
- uint32_t g2v_notify;
- uint32_t rsv6[7];
-
- struct {
- uint32_t lo;
- uint32_t hi;
- } pdp[4];
-
- uint32_t execlist_context_descriptor_lo;
- uint32_t execlist_context_descriptor_hi;
-
- uint32_t rsv7[0x200 - 24]; /* pad to one page */
-} __packed;
-
-#define vgtif_reg(x) \
- _MMIO((VGT_PVINFO_PAGE + (long)&((struct vgt_if *)NULL)->x))
-
-/* vGPU display status to be used by the host side */
-#define VGT_DRV_DISPLAY_NOT_READY 0
-#define VGT_DRV_DISPLAY_READY 1 /* ready for display switch */
-
-extern void i915_check_vgpu(struct drm_device *dev);
-extern int intel_vgt_balloon(struct drm_device *dev);
-extern void intel_vgt_deballoon(void);
+void i915_check_vgpu(struct drm_i915_private *dev_priv);
+int intel_vgt_balloon(struct drm_i915_private *dev_priv);
+void intel_vgt_deballoon(struct drm_i915_private *dev_priv);
#endif /* _I915_VGPU_H_ */
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 50ff90aea..c5a166752 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -191,7 +191,7 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
/* plane scaler case: assign as a plane scaler */
/* find the plane that set the bit as scaler_user */
- plane = drm_state->planes[i];
+ plane = drm_state->planes[i].ptr;
/*
* to enable/disable hq mode, add planes that are using scaler
@@ -223,7 +223,8 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
continue;
}
- plane_state = to_intel_plane_state(drm_state->plane_states[i]);
+ plane_state = intel_atomic_get_existing_plane_state(drm_state,
+ intel_plane);
scaler_id = &plane_state->scaler_id;
}
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 74eca43b2..d32f586f9 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -154,7 +154,7 @@ static bool audio_rate_need_prog(struct intel_crtc *crtc,
{
if (((mode->clock == TMDS_297M) ||
(mode->clock == TMDS_296M)) &&
- intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
+ intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI))
return true;
else
return false;
@@ -165,7 +165,7 @@ static bool intel_eld_uptodate(struct drm_connector *connector,
i915_reg_t reg_elda, uint32_t bits_elda,
i915_reg_t reg_edid)
{
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
uint8_t *eld = connector->eld;
uint32_t tmp;
int i;
@@ -189,7 +189,7 @@ static bool intel_eld_uptodate(struct drm_connector *connector,
static void g4x_audio_codec_disable(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
uint32_t eldv, tmp;
DRM_DEBUG_KMS("Disable audio codec\n");
@@ -210,7 +210,7 @@ static void g4x_audio_codec_enable(struct drm_connector *connector,
struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode)
{
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
uint8_t *eld = connector->eld;
uint32_t eldv;
uint32_t tmp;
@@ -247,7 +247,7 @@ static void g4x_audio_codec_enable(struct drm_connector *connector,
static void hsw_audio_codec_disable(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
enum pipe pipe = intel_crtc->pipe;
uint32_t tmp;
@@ -262,7 +262,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder)
tmp |= AUD_CONFIG_N_PROG_ENABLE;
tmp &= ~AUD_CONFIG_UPPER_N_MASK;
tmp &= ~AUD_CONFIG_LOWER_N_MASK;
- if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
+ if (intel_crtc_has_dp_encoder(intel_crtc->config))
tmp |= AUD_CONFIG_N_VALUE_INDEX;
I915_WRITE(HSW_AUD_CFG(pipe), tmp);
@@ -279,7 +279,7 @@ static void hsw_audio_codec_enable(struct drm_connector *connector,
struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode)
{
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
enum pipe pipe = intel_crtc->pipe;
struct i915_audio_component *acomp = dev_priv->audio_component;
@@ -328,7 +328,7 @@ static void hsw_audio_codec_enable(struct drm_connector *connector,
tmp = I915_READ(HSW_AUD_CFG(pipe));
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
- if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
+ if (intel_crtc_has_dp_encoder(intel_crtc->config))
tmp |= AUD_CONFIG_N_VALUE_INDEX;
else
tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
@@ -357,7 +357,7 @@ static void hsw_audio_codec_enable(struct drm_connector *connector,
static void ilk_audio_codec_disable(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_digital_port *intel_dig_port =
enc_to_dig_port(&encoder->base);
@@ -389,7 +389,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder)
tmp |= AUD_CONFIG_N_PROG_ENABLE;
tmp &= ~AUD_CONFIG_UPPER_N_MASK;
tmp &= ~AUD_CONFIG_LOWER_N_MASK;
- if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
+ if (intel_crtc_has_dp_encoder(intel_crtc->config))
tmp |= AUD_CONFIG_N_VALUE_INDEX;
I915_WRITE(aud_config, tmp);
@@ -405,7 +405,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode)
{
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_digital_port *intel_dig_port =
enc_to_dig_port(&encoder->base);
@@ -475,7 +475,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
- if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
+ if (intel_crtc_has_dp_encoder(intel_crtc->config))
tmp |= AUD_CONFIG_N_VALUE_INDEX;
else
tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
@@ -496,7 +496,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
struct drm_connector *connector;
struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
enum port port = intel_dig_port->port;
@@ -513,7 +513,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
/* ELD Conn_Type */
connector->eld[5] &= ~(3 << 2);
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
+ if (intel_crtc_has_dp_encoder(crtc->config))
connector->eld[5] |= (1 << 2);
connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
@@ -543,7 +543,7 @@ void intel_audio_codec_disable(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_audio_component *acomp = dev_priv->audio_component;
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
enum port port = intel_dig_port->port;
@@ -625,17 +625,11 @@ static void i915_audio_component_codec_wake_override(struct device *dev,
static int i915_audio_component_get_cdclk_freq(struct device *dev)
{
struct drm_i915_private *dev_priv = dev_to_i915(dev);
- int ret;
if (WARN_ON_ONCE(!HAS_DDI(dev_priv)))
return -ENODEV;
- intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
- ret = dev_priv->display.get_display_clock_speed(dev_priv->dev);
-
- intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
-
- return ret;
+ return dev_priv->cdclk_freq;
}
static int i915_audio_component_sync_audio_rate(struct device *dev,
@@ -761,14 +755,14 @@ static int i915_audio_component_bind(struct device *i915_dev,
if (WARN_ON(acomp->ops || acomp->dev))
return -EEXIST;
- drm_modeset_lock_all(dev_priv->dev);
+ drm_modeset_lock_all(&dev_priv->drm);
acomp->ops = &i915_audio_component_ops;
acomp->dev = i915_dev;
BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS);
for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++)
acomp->aud_sample_rate[i] = 0;
dev_priv->audio_component = acomp;
- drm_modeset_unlock_all(dev_priv->dev);
+ drm_modeset_unlock_all(&dev_priv->drm);
return 0;
}
@@ -779,11 +773,11 @@ static void i915_audio_component_unbind(struct device *i915_dev,
struct i915_audio_component *acomp = data;
struct drm_i915_private *dev_priv = dev_to_i915(i915_dev);
- drm_modeset_lock_all(dev_priv->dev);
+ drm_modeset_lock_all(&dev_priv->drm);
acomp->ops = NULL;
acomp->dev = NULL;
dev_priv->audio_component = NULL;
- drm_modeset_unlock_all(dev_priv->dev);
+ drm_modeset_unlock_all(&dev_priv->drm);
}
static const struct component_ops i915_audio_component_bind_ops = {
@@ -811,7 +805,7 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv)
{
int ret;
- ret = component_add(dev_priv->dev->dev, &i915_audio_component_bind_ops);
+ ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops);
if (ret < 0) {
DRM_ERROR("failed to add audio component (%d)\n", ret);
/* continue with reduced functionality */
@@ -833,6 +827,6 @@ void i915_audio_component_cleanup(struct drm_i915_private *dev_priv)
if (!dev_priv->audio_component_registered)
return;
- component_del(dev_priv->dev->dev, &i915_audio_component_bind_ops);
+ component_del(dev_priv->drm.dev, &i915_audio_component_bind_ops);
dev_priv->audio_component_registered = false;
}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index b9022fa05..c6e69e4cf 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -218,7 +218,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
dev_priv->vbt.lvds_dither = lvds_options->pixel_dither;
- ret = intel_opregion_get_panel_type(dev_priv->dev);
+ ret = intel_opregion_get_panel_type(dev_priv);
if (ret >= 0) {
WARN_ON(ret > 0xf);
panel_type = ret;
@@ -323,6 +323,15 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv,
return;
}
+ dev_priv->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
+ if (bdb->version >= 191 &&
+ get_blocksize(backlight_data) >= sizeof(*backlight_data)) {
+ const struct bdb_lfp_backlight_control_method *method;
+
+ method = &backlight_data->backlight_control[panel_type];
+ dev_priv->vbt.backlight.type = method->type;
+ }
+
dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
dev_priv->vbt.backlight.min_brightness = entry->min_brightness;
@@ -768,6 +777,16 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
return;
}
+ /*
+ * These fields are introduced from the VBT version 197 onwards,
+ * so making sure that these bits are set zero in the previous
+ * versions.
+ */
+ if (dev_priv->vbt.dsi.config->dual_link && bdb->version < 197) {
+ dev_priv->vbt.dsi.config->dl_dcs_cabc_ports = 0;
+ dev_priv->vbt.dsi.config->dl_dcs_backlight_ports = 0;
+ }
+
/* We have mandatory mipi config blocks. Initialize as generic panel */
dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
}
@@ -1407,7 +1426,7 @@ static const struct vbt_header *find_vbt(void __iomem *bios, size_t size)
int
intel_bios_init(struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev_priv->dev->pdev;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
const struct vbt_header *vbt = dev_priv->opregion.vbt;
const struct bdb_header *bdb;
u8 __iomem *bios = NULL;
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index ab0ea315e..8405b5a36 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -30,6 +30,14 @@
#ifndef _INTEL_BIOS_H_
#define _INTEL_BIOS_H_
+enum intel_backlight_type {
+ INTEL_BACKLIGHT_PMIC,
+ INTEL_BACKLIGHT_LPSS,
+ INTEL_BACKLIGHT_DISPLAY_DDI,
+ INTEL_BACKLIGHT_DSI_DCS,
+ INTEL_BACKLIGHT_PANEL_DRIVER_INTERFACE,
+};
+
struct edp_power_seq {
u16 t1_t3;
u16 t8;
@@ -113,7 +121,13 @@ struct mipi_config {
u16 dual_link:2;
u16 lane_cnt:2;
u16 pixel_overlap:3;
- u16 rsvd3:9;
+ u16 rgb_flip:1;
+#define DL_DCS_PORT_A 0x00
+#define DL_DCS_PORT_C 0x01
+#define DL_DCS_PORT_A_AND_C 0x02
+ u16 dl_dcs_cabc_ports:2;
+ u16 dl_dcs_backlight_ports:2;
+ u16 rsvd3:4;
u16 rsvd4;
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
new file mode 100644
index 000000000..b074f3d6d
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -0,0 +1,595 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/kthread.h>
+
+#include "i915_drv.h"
+
+static void intel_breadcrumbs_fake_irq(unsigned long data)
+{
+ struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
+
+ /*
+ * The timer persists in case we cannot enable interrupts,
+ * or if we have previously seen seqno/interrupt incoherency
+ * ("missed interrupt" syndrome). Here the worker will wake up
+ * every jiffie in order to kick the oldest waiter to do the
+ * coherent seqno check.
+ */
+ rcu_read_lock();
+ if (intel_engine_wakeup(engine))
+ mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
+ rcu_read_unlock();
+}
+
+static void irq_enable(struct intel_engine_cs *engine)
+{
+ /* Enabling the IRQ may miss the generation of the interrupt, but
+ * we still need to force the barrier before reading the seqno,
+ * just in case.
+ */
+ engine->breadcrumbs.irq_posted = true;
+
+ spin_lock_irq(&engine->i915->irq_lock);
+ engine->irq_enable(engine);
+ spin_unlock_irq(&engine->i915->irq_lock);
+}
+
+static void irq_disable(struct intel_engine_cs *engine)
+{
+ spin_lock_irq(&engine->i915->irq_lock);
+ engine->irq_disable(engine);
+ spin_unlock_irq(&engine->i915->irq_lock);
+
+ engine->breadcrumbs.irq_posted = false;
+}
+
+static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
+{
+ struct intel_engine_cs *engine =
+ container_of(b, struct intel_engine_cs, breadcrumbs);
+ struct drm_i915_private *i915 = engine->i915;
+
+ assert_spin_locked(&b->lock);
+ if (b->rpm_wakelock)
+ return;
+
+ /* Since we are waiting on a request, the GPU should be busy
+ * and should have its own rpm reference. For completeness,
+ * record an rpm reference for ourselves to cover the
+ * interrupt we unmask.
+ */
+ intel_runtime_pm_get_noresume(i915);
+ b->rpm_wakelock = true;
+
+ /* No interrupts? Kick the waiter every jiffie! */
+ if (intel_irqs_enabled(i915)) {
+ if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings))
+ irq_enable(engine);
+ b->irq_enabled = true;
+ }
+
+ if (!b->irq_enabled ||
+ test_bit(engine->id, &i915->gpu_error.missed_irq_rings))
+ mod_timer(&b->fake_irq, jiffies + 1);
+
+ /* Ensure that even if the GPU hangs, we get woken up.
+ *
+ * However, note that if no one is waiting, we never notice
+ * a gpu hang. Eventually, we will have to wait for a resource
+ * held by the GPU and so trigger a hangcheck. In the most
+ * pathological case, this will be upon memory starvation!
+ */
+ i915_queue_hangcheck(i915);
+}
+
+static void __intel_breadcrumbs_disable_irq(struct intel_breadcrumbs *b)
+{
+ struct intel_engine_cs *engine =
+ container_of(b, struct intel_engine_cs, breadcrumbs);
+
+ assert_spin_locked(&b->lock);
+ if (!b->rpm_wakelock)
+ return;
+
+ if (b->irq_enabled) {
+ irq_disable(engine);
+ b->irq_enabled = false;
+ }
+
+ intel_runtime_pm_put(engine->i915);
+ b->rpm_wakelock = false;
+}
+
+static inline struct intel_wait *to_wait(struct rb_node *node)
+{
+ return container_of(node, struct intel_wait, node);
+}
+
+static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b,
+ struct intel_wait *wait)
+{
+ assert_spin_locked(&b->lock);
+
+ /* This request is completed, so remove it from the tree, mark it as
+ * complete, and *then* wake up the associated task.
+ */
+ rb_erase(&wait->node, &b->waiters);
+ RB_CLEAR_NODE(&wait->node);
+
+ wake_up_process(wait->tsk); /* implicit smp_wmb() */
+}
+
+static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
+ struct intel_wait *wait)
+{
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ struct rb_node **p, *parent, *completed;
+ bool first;
+ u32 seqno;
+
+ /* Insert the request into the retirement ordered list
+ * of waiters by walking the rbtree. If we are the oldest
+ * seqno in the tree (the first to be retired), then
+ * set ourselves as the bottom-half.
+ *
+ * As we descend the tree, prune completed branches since we hold the
+ * spinlock we know that the first_waiter must be delayed and can
+ * reduce some of the sequential wake up latency if we take action
+ * ourselves and wake up the completed tasks in parallel. Also, by
+ * removing stale elements in the tree, we may be able to reduce the
+ * ping-pong between the old bottom-half and ourselves as first-waiter.
+ */
+ first = true;
+ parent = NULL;
+ completed = NULL;
+ seqno = intel_engine_get_seqno(engine);
+
+ /* If the request completed before we managed to grab the spinlock,
+ * return now before adding ourselves to the rbtree. We let the
+ * current bottom-half handle any pending wakeups and instead
+ * try and get out of the way quickly.
+ */
+ if (i915_seqno_passed(seqno, wait->seqno)) {
+ RB_CLEAR_NODE(&wait->node);
+ return first;
+ }
+
+ p = &b->waiters.rb_node;
+ while (*p) {
+ parent = *p;
+ if (wait->seqno == to_wait(parent)->seqno) {
+ /* We have multiple waiters on the same seqno, select
+ * the highest priority task (that with the smallest
+ * task->prio) to serve as the bottom-half for this
+ * group.
+ */
+ if (wait->tsk->prio > to_wait(parent)->tsk->prio) {
+ p = &parent->rb_right;
+ first = false;
+ } else {
+ p = &parent->rb_left;
+ }
+ } else if (i915_seqno_passed(wait->seqno,
+ to_wait(parent)->seqno)) {
+ p = &parent->rb_right;
+ if (i915_seqno_passed(seqno, to_wait(parent)->seqno))
+ completed = parent;
+ else
+ first = false;
+ } else {
+ p = &parent->rb_left;
+ }
+ }
+ rb_link_node(&wait->node, parent, p);
+ rb_insert_color(&wait->node, &b->waiters);
+ GEM_BUG_ON(!first && !b->irq_seqno_bh);
+
+ if (completed) {
+ struct rb_node *next = rb_next(completed);
+
+ GEM_BUG_ON(!next && !first);
+ if (next && next != &wait->node) {
+ GEM_BUG_ON(first);
+ b->first_wait = to_wait(next);
+ smp_store_mb(b->irq_seqno_bh, b->first_wait->tsk);
+ /* As there is a delay between reading the current
+ * seqno, processing the completed tasks and selecting
+ * the next waiter, we may have missed the interrupt
+ * and so need for the next bottom-half to wakeup.
+ *
+ * Also as we enable the IRQ, we may miss the
+ * interrupt for that seqno, so we have to wake up
+ * the next bottom-half in order to do a coherent check
+ * in case the seqno passed.
+ */
+ __intel_breadcrumbs_enable_irq(b);
+ if (READ_ONCE(b->irq_posted))
+ wake_up_process(to_wait(next)->tsk);
+ }
+
+ do {
+ struct intel_wait *crumb = to_wait(completed);
+ completed = rb_prev(completed);
+ __intel_breadcrumbs_finish(b, crumb);
+ } while (completed);
+ }
+
+ if (first) {
+ GEM_BUG_ON(rb_first(&b->waiters) != &wait->node);
+ b->first_wait = wait;
+ smp_store_mb(b->irq_seqno_bh, wait->tsk);
+ /* After assigning ourselves as the new bottom-half, we must
+ * perform a cursory check to prevent a missed interrupt.
+ * Either we miss the interrupt whilst programming the hardware,
+ * or if there was a previous waiter (for a later seqno) they
+ * may be woken instead of us (due to the inherent race
+ * in the unlocked read of b->irq_seqno_bh in the irq handler)
+ * and so we miss the wake up.
+ */
+ __intel_breadcrumbs_enable_irq(b);
+ }
+ GEM_BUG_ON(!b->irq_seqno_bh);
+ GEM_BUG_ON(!b->first_wait);
+ GEM_BUG_ON(rb_first(&b->waiters) != &b->first_wait->node);
+
+ return first;
+}
+
+bool intel_engine_add_wait(struct intel_engine_cs *engine,
+ struct intel_wait *wait)
+{
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ bool first;
+
+ spin_lock(&b->lock);
+ first = __intel_engine_add_wait(engine, wait);
+ spin_unlock(&b->lock);
+
+ return first;
+}
+
+void intel_engine_enable_fake_irq(struct intel_engine_cs *engine)
+{
+ mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
+}
+
+static inline bool chain_wakeup(struct rb_node *rb, int priority)
+{
+ return rb && to_wait(rb)->tsk->prio <= priority;
+}
+
+static inline int wakeup_priority(struct intel_breadcrumbs *b,
+ struct task_struct *tsk)
+{
+ if (tsk == b->signaler)
+ return INT_MIN;
+ else
+ return tsk->prio;
+}
+
+void intel_engine_remove_wait(struct intel_engine_cs *engine,
+ struct intel_wait *wait)
+{
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+
+ /* Quick check to see if this waiter was already decoupled from
+ * the tree by the bottom-half to avoid contention on the spinlock
+ * by the herd.
+ */
+ if (RB_EMPTY_NODE(&wait->node))
+ return;
+
+ spin_lock(&b->lock);
+
+ if (RB_EMPTY_NODE(&wait->node))
+ goto out_unlock;
+
+ if (b->first_wait == wait) {
+ const int priority = wakeup_priority(b, wait->tsk);
+ struct rb_node *next;
+
+ GEM_BUG_ON(b->irq_seqno_bh != wait->tsk);
+
+ /* We are the current bottom-half. Find the next candidate,
+ * the first waiter in the queue on the remaining oldest
+ * request. As multiple seqnos may complete in the time it
+ * takes us to wake up and find the next waiter, we have to
+ * wake up that waiter for it to perform its own coherent
+ * completion check.
+ */
+ next = rb_next(&wait->node);
+ if (chain_wakeup(next, priority)) {
+ /* If the next waiter is already complete,
+ * wake it up and continue onto the next waiter. So
+ * if have a small herd, they will wake up in parallel
+ * rather than sequentially, which should reduce
+ * the overall latency in waking all the completed
+ * clients.
+ *
+ * However, waking up a chain adds extra latency to
+ * the first_waiter. This is undesirable if that
+ * waiter is a high priority task.
+ */
+ u32 seqno = intel_engine_get_seqno(engine);
+
+ while (i915_seqno_passed(seqno, to_wait(next)->seqno)) {
+ struct rb_node *n = rb_next(next);
+
+ __intel_breadcrumbs_finish(b, to_wait(next));
+ next = n;
+ if (!chain_wakeup(next, priority))
+ break;
+ }
+ }
+
+ if (next) {
+ /* In our haste, we may have completed the first waiter
+ * before we enabled the interrupt. Do so now as we
+ * have a second waiter for a future seqno. Afterwards,
+ * we have to wake up that waiter in case we missed
+ * the interrupt, or if we have to handle an
+ * exception rather than a seqno completion.
+ */
+ b->first_wait = to_wait(next);
+ smp_store_mb(b->irq_seqno_bh, b->first_wait->tsk);
+ if (b->first_wait->seqno != wait->seqno)
+ __intel_breadcrumbs_enable_irq(b);
+ wake_up_process(b->irq_seqno_bh);
+ } else {
+ b->first_wait = NULL;
+ WRITE_ONCE(b->irq_seqno_bh, NULL);
+ __intel_breadcrumbs_disable_irq(b);
+ }
+ } else {
+ GEM_BUG_ON(rb_first(&b->waiters) == &wait->node);
+ }
+
+ GEM_BUG_ON(RB_EMPTY_NODE(&wait->node));
+ rb_erase(&wait->node, &b->waiters);
+
+out_unlock:
+ GEM_BUG_ON(b->first_wait == wait);
+ GEM_BUG_ON(rb_first(&b->waiters) !=
+ (b->first_wait ? &b->first_wait->node : NULL));
+ GEM_BUG_ON(!b->irq_seqno_bh ^ RB_EMPTY_ROOT(&b->waiters));
+ spin_unlock(&b->lock);
+}
+
+static bool signal_complete(struct drm_i915_gem_request *request)
+{
+ if (!request)
+ return false;
+
+ /* If another process served as the bottom-half it may have already
+ * signalled that this wait is already completed.
+ */
+ if (intel_wait_complete(&request->signaling.wait))
+ return true;
+
+ /* Carefully check if the request is complete, giving time for the
+ * seqno to be visible or if the GPU hung.
+ */
+ if (__i915_request_irq_complete(request))
+ return true;
+
+ return false;
+}
+
+static struct drm_i915_gem_request *to_signaler(struct rb_node *rb)
+{
+ return container_of(rb, struct drm_i915_gem_request, signaling.node);
+}
+
+static void signaler_set_rtpriority(void)
+{
+ struct sched_param param = { .sched_priority = 1 };
+
+ sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
+}
+
+static int intel_breadcrumbs_signaler(void *arg)
+{
+ struct intel_engine_cs *engine = arg;
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ struct drm_i915_gem_request *request;
+
+ /* Install ourselves with high priority to reduce signalling latency */
+ signaler_set_rtpriority();
+
+ do {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ /* We are either woken up by the interrupt bottom-half,
+ * or by a client adding a new signaller. In both cases,
+ * the GPU seqno may have advanced beyond our oldest signal.
+ * If it has, propagate the signal, remove the waiter and
+ * check again with the next oldest signal. Otherwise we
+ * need to wait for a new interrupt from the GPU or for
+ * a new client.
+ */
+ request = READ_ONCE(b->first_signal);
+ if (signal_complete(request)) {
+ /* Wake up all other completed waiters and select the
+ * next bottom-half for the next user interrupt.
+ */
+ intel_engine_remove_wait(engine,
+ &request->signaling.wait);
+
+ /* Find the next oldest signal. Note that as we have
+ * not been holding the lock, another client may
+ * have installed an even older signal than the one
+ * we just completed - so double check we are still
+ * the oldest before picking the next one.
+ */
+ spin_lock(&b->lock);
+ if (request == b->first_signal) {
+ struct rb_node *rb =
+ rb_next(&request->signaling.node);
+ b->first_signal = rb ? to_signaler(rb) : NULL;
+ }
+ rb_erase(&request->signaling.node, &b->signals);
+ spin_unlock(&b->lock);
+
+ i915_gem_request_unreference(request);
+ } else {
+ if (kthread_should_stop())
+ break;
+
+ schedule();
+ }
+ } while (1);
+ __set_current_state(TASK_RUNNING);
+
+ return 0;
+}
+
+void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
+{
+ struct intel_engine_cs *engine = request->engine;
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ struct rb_node *parent, **p;
+ bool first, wakeup;
+
+ if (unlikely(READ_ONCE(request->signaling.wait.tsk)))
+ return;
+
+ spin_lock(&b->lock);
+ if (unlikely(request->signaling.wait.tsk)) {
+ wakeup = false;
+ goto unlock;
+ }
+
+ request->signaling.wait.tsk = b->signaler;
+ request->signaling.wait.seqno = request->seqno;
+ i915_gem_request_reference(request);
+
+ /* First add ourselves into the list of waiters, but register our
+ * bottom-half as the signaller thread. As per usual, only the oldest
+ * waiter (not just signaller) is tasked as the bottom-half waking
+ * up all completed waiters after the user interrupt.
+ *
+ * If we are the oldest waiter, enable the irq (after which we
+ * must double check that the seqno did not complete).
+ */
+ wakeup = __intel_engine_add_wait(engine, &request->signaling.wait);
+
+ /* Now insert ourselves into the retirement ordered list of signals
+ * on this engine. We track the oldest seqno as that will be the
+ * first signal to complete.
+ */
+ parent = NULL;
+ first = true;
+ p = &b->signals.rb_node;
+ while (*p) {
+ parent = *p;
+ if (i915_seqno_passed(request->seqno,
+ to_signaler(parent)->seqno)) {
+ p = &parent->rb_right;
+ first = false;
+ } else {
+ p = &parent->rb_left;
+ }
+ }
+ rb_link_node(&request->signaling.node, parent, p);
+ rb_insert_color(&request->signaling.node, &b->signals);
+ if (first)
+ smp_store_mb(b->first_signal, request);
+
+unlock:
+ spin_unlock(&b->lock);
+
+ if (wakeup)
+ wake_up_process(b->signaler);
+}
+
+int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
+{
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ struct task_struct *tsk;
+
+ spin_lock_init(&b->lock);
+ setup_timer(&b->fake_irq,
+ intel_breadcrumbs_fake_irq,
+ (unsigned long)engine);
+
+ /* Spawn a thread to provide a common bottom-half for all signals.
+ * As this is an asynchronous interface we cannot steal the current
+ * task for handling the bottom-half to the user interrupt, therefore
+ * we create a thread to do the coherent seqno dance after the
+ * interrupt and then signal the waitqueue (via the dma-buf/fence).
+ */
+ tsk = kthread_run(intel_breadcrumbs_signaler, engine,
+ "i915/signal:%d", engine->id);
+ if (IS_ERR(tsk))
+ return PTR_ERR(tsk);
+
+ b->signaler = tsk;
+
+ return 0;
+}
+
+void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
+{
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+
+ if (!IS_ERR_OR_NULL(b->signaler))
+ kthread_stop(b->signaler);
+
+ del_timer_sync(&b->fake_irq);
+}
+
+unsigned int intel_kick_waiters(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ unsigned int mask = 0;
+
+ /* To avoid the task_struct disappearing beneath us as we wake up
+ * the process, we must first inspect the task_struct->state under the
+ * RCU lock, i.e. as we call wake_up_process() we must be holding the
+ * rcu_read_lock().
+ */
+ rcu_read_lock();
+ for_each_engine(engine, i915)
+ if (unlikely(intel_engine_wakeup(engine)))
+ mask |= intel_engine_flag(engine);
+ rcu_read_unlock();
+
+ return mask;
+}
+
+unsigned int intel_kick_signalers(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ unsigned int mask = 0;
+
+ for_each_engine(engine, i915) {
+ if (unlikely(READ_ONCE(engine->breadcrumbs.first_signal))) {
+ wake_up_process(engine->breadcrumbs.signaler);
+ mask |= intel_engine_flag(engine);
+ }
+ }
+
+ return mask;
+}
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index 1b3f97449..bc0fef3d3 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -96,7 +96,7 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
{
struct drm_crtc *crtc = crtc_state->crtc;
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int i, pipe = intel_crtc->pipe;
uint16_t coeffs[9] = { 0, };
@@ -207,7 +207,7 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
{
struct drm_crtc *crtc = state->crtc;
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int pipe = to_intel_crtc(crtc)->pipe;
uint32_t mode;
@@ -255,7 +255,7 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
void intel_color_set_csc(struct drm_crtc_state *crtc_state)
{
struct drm_device *dev = crtc_state->crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (dev_priv->display.load_csc_matrix)
dev_priv->display.load_csc_matrix(crtc_state);
@@ -266,13 +266,13 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
struct drm_property_blob *blob)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
int i;
if (HAS_GMCH_DISPLAY(dev)) {
- if (intel_crtc->config->has_dsi_encoder)
+ if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI))
assert_dsi_pll_enabled(dev_priv);
else
assert_pll_enabled(dev_priv, pipe);
@@ -313,7 +313,7 @@ static void haswell_load_luts(struct drm_crtc_state *crtc_state)
{
struct drm_crtc *crtc = crtc_state->crtc;
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *intel_crtc_state =
to_intel_crtc_state(crtc_state);
@@ -343,7 +343,7 @@ static void broadwell_load_luts(struct drm_crtc_state *state)
{
struct drm_crtc *crtc = state->crtc;
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
uint32_t i, lut_size = INTEL_INFO(dev)->color.degamma_lut_size;
@@ -426,7 +426,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
{
struct drm_crtc *crtc = state->crtc;
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
struct drm_color_lut *lut;
uint32_t i, lut_size;
@@ -485,7 +485,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
void intel_color_load_luts(struct drm_crtc_state *crtc_state)
{
struct drm_device *dev = crtc_state->crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
dev_priv->display.load_luts(crtc_state);
}
@@ -526,7 +526,7 @@ int intel_color_check(struct drm_crtc *crtc,
void intel_color_init(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
drm_mode_crtc_set_gamma_size(crtc, 256);
@@ -547,7 +547,8 @@ void intel_color_init(struct drm_crtc *crtc)
/* Enable color management support when we have degamma & gamma LUTs. */
if (INTEL_INFO(dev)->color.degamma_lut_size != 0 &&
INTEL_INFO(dev)->color.gamma_lut_size != 0)
- drm_helper_crtc_enable_color_mgmt(crtc,
+ drm_crtc_enable_color_mgmt(crtc,
INTEL_INFO(dev)->color.degamma_lut_size,
+ true,
INTEL_INFO(dev)->color.gamma_lut_size);
}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index a3f87d668..827b6ef4e 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -67,7 +67,7 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crt *crt = intel_encoder_to_crt(encoder);
enum intel_display_power_domain power_domain;
u32 tmp;
@@ -98,7 +98,7 @@ out:
static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crt *crt = intel_encoder_to_crt(encoder);
u32 tmp, flags = 0;
@@ -146,7 +146,7 @@ static void hsw_crt_get_config(struct intel_encoder *encoder,
static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crt *crt = intel_encoder_to_crt(encoder);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
@@ -281,7 +281,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct intel_crt *crt = intel_attached_crt(connector);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 adpa;
bool ret;
@@ -301,8 +301,10 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
I915_WRITE(crt->adpa_reg, adpa);
- if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
- 1000))
+ if (intel_wait_for_register(dev_priv,
+ crt->adpa_reg,
+ ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0,
+ 1000))
DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
if (turn_off_dac) {
@@ -326,7 +328,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct intel_crt *crt = intel_attached_crt(connector);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
bool reenable_hpd;
u32 adpa;
bool ret;
@@ -353,8 +355,10 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
I915_WRITE(crt->adpa_reg, adpa);
- if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
- 1000)) {
+ if (intel_wait_for_register(dev_priv,
+ crt->adpa_reg,
+ ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0,
+ 1000)) {
DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
I915_WRITE(crt->adpa_reg, save_adpa);
}
@@ -385,7 +389,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
static bool intel_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 stat;
bool ret = false;
int i, tries = 0;
@@ -412,9 +416,9 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
CRT_HOTPLUG_FORCE_DETECT,
CRT_HOTPLUG_FORCE_DETECT);
/* wait for FORCE_DETECT to go off */
- if (wait_for((I915_READ(PORT_HOTPLUG_EN) &
- CRT_HOTPLUG_FORCE_DETECT) == 0,
- 1000))
+ if (intel_wait_for_register(dev_priv, PORT_HOTPLUG_EN,
+ CRT_HOTPLUG_FORCE_DETECT, 0,
+ 1000))
DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
}
@@ -467,7 +471,7 @@ static int intel_crt_ddc_get_modes(struct drm_connector *connector,
static bool intel_crt_detect_ddc(struct drm_connector *connector)
{
struct intel_crt *crt = intel_attached_crt(connector);
- struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev);
struct edid *edid;
struct i2c_adapter *i2c;
@@ -503,7 +507,7 @@ static enum drm_connector_status
intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
{
struct drm_device *dev = crt->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t save_bclrpat;
uint32_t save_vtotal;
uint32_t vtotal, vactive;
@@ -618,7 +622,7 @@ static enum drm_connector_status
intel_crt_detect(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crt *crt = intel_attached_crt(connector);
struct intel_encoder *intel_encoder = &crt->base;
enum intel_display_power_domain power_domain;
@@ -699,7 +703,7 @@ static void intel_crt_destroy(struct drm_connector *connector)
static int intel_crt_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crt *crt = intel_attached_crt(connector);
struct intel_encoder *intel_encoder = &crt->base;
enum intel_display_power_domain power_domain;
@@ -734,7 +738,7 @@ static int intel_crt_set_property(struct drm_connector *connector,
void intel_crt_reset(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crt *crt = intel_encoder_to_crt(to_intel_encoder(encoder));
if (INTEL_INFO(dev)->gen >= 5) {
@@ -760,6 +764,8 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.detect = intel_crt_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .late_register = intel_connector_register,
+ .early_unregister = intel_connector_unregister,
.destroy = intel_crt_destroy,
.set_property = intel_crt_set_property,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
@@ -770,7 +776,6 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
.mode_valid = intel_crt_mode_valid,
.get_modes = intel_crt_get_modes,
- .best_encoder = intel_best_encoder,
};
static const struct drm_encoder_funcs intel_crt_enc_funcs = {
@@ -809,7 +814,7 @@ void intel_crt_init(struct drm_device *dev)
struct drm_connector *connector;
struct intel_crt *crt;
struct intel_connector *intel_connector;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t adpa_reg;
u32 adpa;
@@ -857,7 +862,7 @@ void intel_crt_init(struct drm_device *dev)
&intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
+ DRM_MODE_ENCODER_DAC, "CRT");
intel_connector_attach_encoder(intel_connector, &crt->base);
@@ -894,12 +899,9 @@ void intel_crt_init(struct drm_device *dev)
crt->base.get_hw_state = intel_crt_get_hw_state;
}
intel_connector->get_hw_state = intel_connector_get_hw_state;
- intel_connector->unregister = intel_connector_unregister;
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
- drm_connector_register(connector);
-
if (!I915_HAS_HOTPLUG(dev))
intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index ecadf8709..40e9c3c3c 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -47,7 +47,7 @@
#define I915_CSR_SKL "/*(DEBLOBBED)*/"
/*(DEBLOBBED)*/
-#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23)
+#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 26)
#define I915_CSR_BXT "/*(DEBLOBBED)*/"
/*(DEBLOBBED)*/
@@ -286,7 +286,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
uint32_t i;
uint32_t *dmc_payload;
- uint32_t required_min_version;
+ uint32_t required_version;
if (!fw)
return NULL;
@@ -303,24 +303,23 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
csr->version = css_header->version;
if (IS_KABYLAKE(dev_priv)) {
- required_min_version = KBL_CSR_VERSION_REQUIRED;
+ required_version = KBL_CSR_VERSION_REQUIRED;
} else if (IS_SKYLAKE(dev_priv)) {
- required_min_version = SKL_CSR_VERSION_REQUIRED;
+ required_version = SKL_CSR_VERSION_REQUIRED;
} else if (IS_BROXTON(dev_priv)) {
- required_min_version = BXT_CSR_VERSION_REQUIRED;
+ required_version = BXT_CSR_VERSION_REQUIRED;
} else {
MISSING_CASE(INTEL_REVID(dev_priv));
- required_min_version = 0;
+ required_version = 0;
}
- if (csr->version < required_min_version) {
- DRM_INFO("Refusing to load old DMC firmware v%u.%u,"
- " please upgrade to v%u.%u or later"
- " [" FIRMWARE_URL "].\n",
+ if (csr->version != required_version) {
+ DRM_INFO("Refusing to load DMC firmware v%u.%u,"
+ " please use v%u.%u [" FIRMWARE_URL "].\n",
CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version),
- CSR_VERSION_MAJOR(required_min_version),
- CSR_VERSION_MINOR(required_min_version));
+ CSR_VERSION_MAJOR(required_version),
+ CSR_VERSION_MINOR(required_version));
return NULL;
}
@@ -413,7 +412,7 @@ static void csr_load_work_fn(struct work_struct *work)
csr = &dev_priv->csr;
ret = reject_firmware(&fw, dev_priv->csr.fw_path,
- &dev_priv->dev->pdev->dev);
+ &dev_priv->drm.pdev->dev);
if (fw)
dev_priv->csr.dmc_payload = parse_csr_fw(dev_priv, fw);
@@ -427,7 +426,7 @@ static void csr_load_work_fn(struct work_struct *work)
CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version));
} else {
- dev_notice(dev_priv->dev->dev,
+ dev_notice(dev_priv->drm.dev,
"Failed to load DMC firmware"
" [" FIRMWARE_URL "],"
" disabling runtime power management.\n");
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 12c4f4356..1a7efac65 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -318,7 +318,7 @@ static void ddi_get_encoder_port(struct intel_encoder *intel_encoder,
default:
WARN(1, "Invalid DDI encoder type %d\n", intel_encoder->type);
/* fallthrough and treat as unknown */
- case INTEL_OUTPUT_DISPLAYPORT:
+ case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_EDP:
case INTEL_OUTPUT_HDMI:
case INTEL_OUTPUT_UNKNOWN:
@@ -512,7 +512,7 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
ddi_translations = ddi_translations_edp;
size = n_edp_entries;
break;
- case INTEL_OUTPUT_DISPLAYPORT:
+ case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_HDMI:
ddi_translations = ddi_translations_dp;
size = n_dp_entries;
@@ -568,7 +568,7 @@ static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
void hsw_fdi_link_train(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
u32 temp, i, rx_ctl_val;
@@ -859,7 +859,7 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
if (pipe_config->has_pch_encoder)
dotclock = intel_dotclock_calculate(pipe_config->port_clock,
&pipe_config->fdi_m_n);
- else if (pipe_config->has_dp_encoder)
+ else if (intel_crtc_has_dp_encoder(pipe_config))
dotclock = intel_dotclock_calculate(pipe_config->port_clock,
&pipe_config->dp_m_n);
else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp == 36)
@@ -876,7 +876,7 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
static void skl_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int link_clock = 0;
uint32_t dpll_ctl1, dpll;
@@ -924,7 +924,7 @@ static void skl_ddi_clock_get(struct intel_encoder *encoder,
static void hsw_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int link_clock = 0;
u32 val, pll;
@@ -973,7 +973,7 @@ static int bxt_calc_pll_link(struct drm_i915_private *dev_priv,
{
struct intel_shared_dpll *pll;
struct intel_dpll_hw_state *state;
- intel_clock_t clock;
+ struct dpll clock;
/* For DDI ports we always use a shared PLL. */
if (WARN_ON(dpll == DPLL_ID_PRIVATE))
@@ -996,7 +996,7 @@ static int bxt_calc_pll_link(struct drm_i915_private *dev_priv,
static void bxt_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = intel_ddi_get_encoder_port(encoder);
uint32_t dpll = port;
@@ -1086,14 +1086,14 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
int type = intel_encoder->type;
uint32_t temp;
- if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP_MST) {
+ if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP_MST) {
WARN_ON(transcoder_is_dsi(cpu_transcoder));
temp = TRANS_MSA_SYNC_CLK;
@@ -1121,7 +1121,7 @@ void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
uint32_t temp;
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
@@ -1138,7 +1138,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe = intel_crtc->pipe;
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
enum port port = intel_ddi_get_encoder_port(intel_encoder);
@@ -1207,7 +1207,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
temp |= TRANS_DDI_MODE_SELECT_FDI;
temp |= (intel_crtc->config->fdi_lanes - 1) << 1;
- } else if (type == INTEL_OUTPUT_DISPLAYPORT ||
+ } else if (type == INTEL_OUTPUT_DP ||
type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -1248,7 +1248,7 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
{
struct drm_device *dev = intel_connector->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *intel_encoder = intel_connector->encoder;
int type = intel_connector->base.connector_type;
enum port port = intel_ddi_get_encoder_port(intel_encoder);
@@ -1310,7 +1310,7 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_ddi_get_encoder_port(encoder);
enum intel_display_power_domain power_domain;
u32 tmp;
@@ -1367,6 +1367,14 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port));
out:
+ if (ret && IS_BROXTON(dev_priv)) {
+ tmp = I915_READ(BXT_PHY_CTL(port));
+ if ((tmp & (BXT_PHY_LANE_POWERDOWN_ACK |
+ BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED)
+ DRM_ERROR("Port %c enabled but PHY powered down? "
+ "(PHY_CTL %08x)\n", port_name(port), tmp);
+ }
+
intel_display_power_put(dev_priv, power_domain);
return ret;
@@ -1376,7 +1384,7 @@ void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
{
struct drm_crtc *crtc = &intel_crtc->base;
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
@@ -1388,7 +1396,7 @@ void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
{
- struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
if (cpu_transcoder != TRANSCODER_EDP)
@@ -1425,7 +1433,7 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder, u32 level)
dp_iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level;
hdmi_iboost = dev_priv->vbt.ddi_port_info[port].hdmi_boost_level;
- if (type == INTEL_OUTPUT_DISPLAYPORT) {
+ if (type == INTEL_OUTPUT_DP) {
if (dp_iboost) {
iboost = dp_iboost;
} else {
@@ -1477,7 +1485,7 @@ static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) {
n_entries = ARRAY_SIZE(bxt_ddi_translations_edp);
ddi_translations = bxt_ddi_translations_edp;
- } else if (type == INTEL_OUTPUT_DISPLAYPORT
+ } else if (type == INTEL_OUTPUT_DP
|| type == INTEL_OUTPUT_EDP) {
n_entries = ARRAY_SIZE(bxt_ddi_translations_dp);
ddi_translations = bxt_ddi_translations_dp;
@@ -1651,7 +1659,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
intel_ddi_clk_select(intel_encoder, crtc->config);
- if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+ if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_dp_set_link_params(intel_dp, crtc->config);
@@ -1679,7 +1687,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
uint32_t val;
@@ -1700,7 +1708,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
if (wait)
intel_wait_ddi_buf_idle(dev_priv, port);
- if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+ if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
intel_edp_panel_vdd_on(intel_dp);
@@ -1726,7 +1734,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
@@ -1765,7 +1773,7 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int type = intel_encoder->type;
struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (intel_crtc->config->has_audio) {
intel_audio_codec_disable(intel_encoder);
@@ -1781,9 +1789,11 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
}
}
-static bool broxton_phy_is_enabled(struct drm_i915_private *dev_priv,
- enum dpio_phy phy)
+bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
{
+ enum port port;
+
if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & GT_DISPLAY_POWER_ON(phy)))
return false;
@@ -1809,38 +1819,51 @@ static bool broxton_phy_is_enabled(struct drm_i915_private *dev_priv,
return false;
}
+ for_each_port_masked(port,
+ phy == DPIO_PHY0 ? BIT(PORT_B) | BIT(PORT_C) :
+ BIT(PORT_A)) {
+ u32 tmp = I915_READ(BXT_PHY_CTL(port));
+
+ if (tmp & BXT_PHY_CMNLANE_POWERDOWN_ACK) {
+ DRM_DEBUG_DRIVER("DDI PHY %d powered, but common lane "
+ "for port %c powered down "
+ "(PHY_CTL %08x)\n",
+ phy, port_name(port), tmp);
+
+ return false;
+ }
+ }
+
return true;
}
-static u32 broxton_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
u32 val = I915_READ(BXT_PORT_REF_DW6(phy));
return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
}
-static void broxton_phy_wait_grc_done(struct drm_i915_private *dev_priv,
- enum dpio_phy phy)
+static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
{
- if (wait_for(I915_READ(BXT_PORT_REF_DW3(phy)) & GRC_DONE, 10))
+ if (intel_wait_for_register(dev_priv,
+ BXT_PORT_REF_DW3(phy),
+ GRC_DONE, GRC_DONE,
+ 10))
DRM_ERROR("timeout waiting for PHY%d GRC\n", phy);
}
-static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv,
- enum dpio_phy phy);
-
-static void broxton_phy_init(struct drm_i915_private *dev_priv,
- enum dpio_phy phy)
+void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
- enum port port;
- u32 ports, val;
+ u32 val;
- if (broxton_phy_is_enabled(dev_priv, phy)) {
+ if (bxt_ddi_phy_is_enabled(dev_priv, phy)) {
/* Still read out the GRC value for state verification */
if (phy == DPIO_PHY0)
- dev_priv->bxt_phy_grc = broxton_get_grc(dev_priv, phy);
+ dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, phy);
- if (broxton_phy_verify_state(dev_priv, phy)) {
+ if (bxt_ddi_phy_verify_state(dev_priv, phy)) {
DRM_DEBUG_DRIVER("DDI PHY %d already enabled, "
"won't reprogram it\n", phy);
@@ -1849,8 +1872,6 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv,
DRM_DEBUG_DRIVER("DDI PHY %d enabled with invalid state, "
"force reprogramming it\n", phy);
- } else {
- DRM_DEBUG_DRIVER("DDI PHY %d not enabled, enabling it\n", phy);
}
val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
@@ -1870,28 +1891,6 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv,
DRM_ERROR("timeout during PHY%d power on\n", phy);
}
- if (phy == DPIO_PHY0)
- ports = BIT(PORT_B) | BIT(PORT_C);
- else
- ports = BIT(PORT_A);
-
- for_each_port_masked(port, ports) {
- int lane;
-
- for (lane = 0; lane < 4; lane++) {
- val = I915_READ(BXT_PORT_TX_DW14_LN(port, lane));
- /*
- * Note that on CHV this flag is called UPAR, but has
- * the same function.
- */
- val &= ~LATENCY_OPTIM;
- if (lane != 1)
- val |= LATENCY_OPTIM;
-
- I915_WRITE(BXT_PORT_TX_DW14_LN(port, lane), val);
- }
- }
-
/* Program PLL Rcomp code offset */
val = I915_READ(BXT_PORT_CL1CM_DW9(phy));
val &= ~IREF0RC_OFFSET_MASK;
@@ -1938,10 +1937,7 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv,
* the corresponding calibrated value from PHY1, and disable
* the automatic calibration on PHY0.
*/
- broxton_phy_wait_grc_done(dev_priv, DPIO_PHY1);
-
- val = dev_priv->bxt_phy_grc = broxton_get_grc(dev_priv,
- DPIO_PHY1);
+ val = dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, DPIO_PHY1);
grc_code = val << GRC_CODE_FAST_SHIFT |
val << GRC_CODE_SLOW_SHIFT |
val;
@@ -1951,31 +1947,16 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv,
val |= GRC_DIS | GRC_RDY_OVRD;
I915_WRITE(BXT_PORT_REF_DW8(DPIO_PHY0), val);
}
- /*
- * During PHY1 init delay waiting for GRC calibration to finish, since
- * it can happen in parallel with the subsequent PHY0 init.
- */
val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
val |= COMMON_RESET_DIS;
I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
-}
-
-void broxton_ddi_phy_init(struct drm_i915_private *dev_priv)
-{
- /* Enable PHY1 first since it provides Rcomp for PHY0 */
- broxton_phy_init(dev_priv, DPIO_PHY1);
- broxton_phy_init(dev_priv, DPIO_PHY0);
- /*
- * If BIOS enabled only PHY0 and not PHY1, we skipped waiting for the
- * PHY1 GRC calibration to finish, so wait for it here.
- */
- broxton_phy_wait_grc_done(dev_priv, DPIO_PHY1);
+ if (phy == DPIO_PHY1)
+ bxt_phy_wait_grc_done(dev_priv, DPIO_PHY1);
}
-static void broxton_phy_uninit(struct drm_i915_private *dev_priv,
- enum dpio_phy phy)
+void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
uint32_t val;
@@ -1988,12 +1969,6 @@ static void broxton_phy_uninit(struct drm_i915_private *dev_priv,
I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
}
-void broxton_ddi_phy_uninit(struct drm_i915_private *dev_priv)
-{
- broxton_phy_uninit(dev_priv, DPIO_PHY1);
- broxton_phy_uninit(dev_priv, DPIO_PHY0);
-}
-
static bool __printf(6, 7)
__phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
i915_reg_t reg, u32 mask, u32 expected,
@@ -2021,11 +1996,9 @@ __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
return false;
}
-static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv,
- enum dpio_phy phy)
+bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
{
- enum port port;
- u32 ports;
uint32_t mask;
bool ok;
@@ -2033,27 +2006,11 @@ static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv,
__phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \
## __VA_ARGS__)
- /* We expect the PHY to be always enabled */
- if (!broxton_phy_is_enabled(dev_priv, phy))
+ if (!bxt_ddi_phy_is_enabled(dev_priv, phy))
return false;
ok = true;
- if (phy == DPIO_PHY0)
- ports = BIT(PORT_B) | BIT(PORT_C);
- else
- ports = BIT(PORT_A);
-
- for_each_port_masked(port, ports) {
- int lane;
-
- for (lane = 0; lane < 4; lane++)
- ok &= _CHK(BXT_PORT_TX_DW14_LN(port, lane),
- LATENCY_OPTIM,
- lane != 1 ? LATENCY_OPTIM : 0,
- "BXT_PORT_TX_DW14_LN(%d, %d)", port, lane);
- }
-
/* PLL Rcomp code offset */
ok &= _CHK(BXT_PORT_CL1CM_DW9(phy),
IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT,
@@ -2097,11 +2054,65 @@ static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv,
#undef _CHK
}
-void broxton_ddi_phy_verify_state(struct drm_i915_private *dev_priv)
+static uint8_t
+bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
+{
+ switch (pipe_config->lane_count) {
+ case 1:
+ return 0;
+ case 2:
+ return BIT(2) | BIT(0);
+ case 4:
+ return BIT(3) | BIT(2) | BIT(0);
+ default:
+ MISSING_CASE(pipe_config->lane_count);
+
+ return 0;
+ }
+}
+
+static void bxt_ddi_pre_pll_enable(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
+ enum port port = dport->port;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ int lane;
+
+ for (lane = 0; lane < 4; lane++) {
+ u32 val = I915_READ(BXT_PORT_TX_DW14_LN(port, lane));
+
+ /*
+ * Note that on CHV this flag is called UPAR, but has
+ * the same function.
+ */
+ val &= ~LATENCY_OPTIM;
+ if (intel_crtc->config->lane_lat_optim_mask & BIT(lane))
+ val |= LATENCY_OPTIM;
+
+ I915_WRITE(BXT_PORT_TX_DW14_LN(port, lane), val);
+ }
+}
+
+static uint8_t
+bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
{
- if (!broxton_phy_verify_state(dev_priv, DPIO_PHY0) ||
- !broxton_phy_verify_state(dev_priv, DPIO_PHY1))
- i915_report_error(dev_priv, "DDI PHY state mismatch\n");
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
+ enum port port = dport->port;
+ int lane;
+ uint8_t mask;
+
+ mask = 0;
+ for (lane = 0; lane < 4; lane++) {
+ u32 val = I915_READ(BXT_PORT_TX_DW14_LN(port, lane));
+
+ if (val & LATENCY_OPTIM)
+ mask |= BIT(lane);
+ }
+
+ return mask;
}
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
@@ -2152,7 +2163,7 @@ void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
void intel_ddi_fdi_disable(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
uint32_t val;
@@ -2185,7 +2196,7 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc)
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
struct intel_hdmi *intel_hdmi;
@@ -2239,7 +2250,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
break;
case TRANS_DDI_MODE_SELECT_DP_SST:
case TRANS_DDI_MODE_SELECT_DP_MST:
- pipe_config->has_dp_encoder = true;
pipe_config->lane_count =
((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
intel_dp_get_m_n(intel_crtc, pipe_config);
@@ -2275,13 +2285,19 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
}
intel_ddi_clock_get(encoder, pipe_config);
+
+ if (IS_BROXTON(dev_priv))
+ pipe_config->lane_lat_optim_mask =
+ bxt_ddi_phy_get_lane_lat_optim_mask(encoder);
}
static bool intel_ddi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int type = encoder->type;
int port = intel_ddi_get_encoder_port(encoder);
+ int ret;
WARN(type == INTEL_OUTPUT_UNKNOWN, "compute_config() on unknown output!\n");
@@ -2289,9 +2305,17 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
pipe_config->cpu_transcoder = TRANSCODER_EDP;
if (type == INTEL_OUTPUT_HDMI)
- return intel_hdmi_compute_config(encoder, pipe_config);
+ ret = intel_hdmi_compute_config(encoder, pipe_config);
else
- return intel_dp_compute_config(encoder, pipe_config);
+ ret = intel_dp_compute_config(encoder, pipe_config);
+
+ if (IS_BROXTON(dev_priv) && ret)
+ pipe_config->lane_lat_optim_mask =
+ bxt_ddi_phy_calc_lane_lat_optim_mask(encoder,
+ pipe_config);
+
+ return ret;
+
}
static const struct drm_encoder_funcs intel_ddi_funcs = {
@@ -2336,7 +2360,7 @@ intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port)
void intel_ddi_init(struct drm_device *dev, enum port port)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
@@ -2386,10 +2410,12 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
encoder = &intel_encoder->base;
drm_encoder_init(dev, encoder, &intel_ddi_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ DRM_MODE_ENCODER_TMDS, "DDI %c", port_name(port));
intel_encoder->compute_config = intel_ddi_compute_config;
intel_encoder->enable = intel_enable_ddi;
+ if (IS_BROXTON(dev_priv))
+ intel_encoder->pre_pll_enable = bxt_ddi_pre_pll_enable;
intel_encoder->pre_enable = intel_ddi_pre_enable;
intel_encoder->disable = intel_disable_ddi;
intel_encoder->post_disable = intel_ddi_post_disable;
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
new file mode 100644
index 000000000..cba137f9a
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -0,0 +1,388 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "i915_drv.h"
+
+void intel_device_info_dump(struct drm_i915_private *dev_priv)
+{
+ const struct intel_device_info *info = &dev_priv->info;
+
+#define PRINT_S(name) "%s"
+#define SEP_EMPTY
+#define PRINT_FLAG(name) info->name ? #name "," : ""
+#define SEP_COMMA ,
+ DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
+ DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
+ info->gen,
+ dev_priv->drm.pdev->device,
+ dev_priv->drm.pdev->revision,
+ DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
+#undef PRINT_S
+#undef SEP_EMPTY
+#undef PRINT_FLAG
+#undef SEP_COMMA
+}
+
+static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
+{
+ struct intel_device_info *info = mkwrite_device_info(dev_priv);
+ u32 fuse, eu_dis;
+
+ fuse = I915_READ(CHV_FUSE_GT);
+
+ info->slice_total = 1;
+
+ if (!(fuse & CHV_FGT_DISABLE_SS0)) {
+ info->subslice_per_slice++;
+ eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
+ CHV_FGT_EU_DIS_SS0_R1_MASK);
+ info->eu_total += 8 - hweight32(eu_dis);
+ }
+
+ if (!(fuse & CHV_FGT_DISABLE_SS1)) {
+ info->subslice_per_slice++;
+ eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
+ CHV_FGT_EU_DIS_SS1_R1_MASK);
+ info->eu_total += 8 - hweight32(eu_dis);
+ }
+
+ info->subslice_total = info->subslice_per_slice;
+ /*
+ * CHV expected to always have a uniform distribution of EU
+ * across subslices.
+ */
+ info->eu_per_subslice = info->subslice_total ?
+ info->eu_total / info->subslice_total :
+ 0;
+ /*
+ * CHV supports subslice power gating on devices with more than
+ * one subslice, and supports EU power gating on devices with
+ * more than one EU pair per subslice.
+ */
+ info->has_slice_pg = 0;
+ info->has_subslice_pg = (info->subslice_total > 1);
+ info->has_eu_pg = (info->eu_per_subslice > 2);
+}
+
+static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
+{
+ struct intel_device_info *info = mkwrite_device_info(dev_priv);
+ int s_max = 3, ss_max = 4, eu_max = 8;
+ int s, ss;
+ u32 fuse2, s_enable, ss_disable, eu_disable;
+ u8 eu_mask = 0xff;
+
+ fuse2 = I915_READ(GEN8_FUSE2);
+ s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
+ ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >> GEN9_F2_SS_DIS_SHIFT;
+
+ info->slice_total = hweight32(s_enable);
+ /*
+ * The subslice disable field is global, i.e. it applies
+ * to each of the enabled slices.
+ */
+ info->subslice_per_slice = ss_max - hweight32(ss_disable);
+ info->subslice_total = info->slice_total * info->subslice_per_slice;
+
+ /*
+ * Iterate through enabled slices and subslices to
+ * count the total enabled EU.
+ */
+ for (s = 0; s < s_max; s++) {
+ if (!(s_enable & BIT(s)))
+ /* skip disabled slice */
+ continue;
+
+ eu_disable = I915_READ(GEN9_EU_DISABLE(s));
+ for (ss = 0; ss < ss_max; ss++) {
+ int eu_per_ss;
+
+ if (ss_disable & BIT(ss))
+ /* skip disabled subslice */
+ continue;
+
+ eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) &
+ eu_mask);
+
+ /*
+ * Record which subslice(s) has(have) 7 EUs. we
+ * can tune the hash used to spread work among
+ * subslices if they are unbalanced.
+ */
+ if (eu_per_ss == 7)
+ info->subslice_7eu[s] |= BIT(ss);
+
+ info->eu_total += eu_per_ss;
+ }
+ }
+
+ /*
+ * SKL is expected to always have a uniform distribution
+ * of EU across subslices with the exception that any one
+ * EU in any one subslice may be fused off for die
+ * recovery. BXT is expected to be perfectly uniform in EU
+ * distribution.
+ */
+ info->eu_per_subslice = info->subslice_total ?
+ DIV_ROUND_UP(info->eu_total,
+ info->subslice_total) : 0;
+ /*
+ * SKL supports slice power gating on devices with more than
+ * one slice, and supports EU power gating on devices with
+ * more than one EU pair per subslice. BXT supports subslice
+ * power gating on devices with more than one subslice, and
+ * supports EU power gating on devices with more than one EU
+ * pair per subslice.
+ */
+ info->has_slice_pg =
+ (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
+ info->slice_total > 1;
+ info->has_subslice_pg =
+ IS_BROXTON(dev_priv) && info->subslice_total > 1;
+ info->has_eu_pg = info->eu_per_subslice > 2;
+
+ if (IS_BROXTON(dev_priv)) {
+#define IS_SS_DISABLED(_ss_disable, ss) (_ss_disable & BIT(ss))
+ /*
+ * There is a HW issue in 2x6 fused down parts that requires
+ * Pooled EU to be enabled as a WA. The pool configuration
+ * changes depending upon which subslice is fused down. This
+ * doesn't affect if the device has all 3 subslices enabled.
+ */
+ /* WaEnablePooledEuFor2x6:bxt */
+ info->has_pooled_eu = ((info->subslice_per_slice == 3) ||
+ (info->subslice_per_slice == 2 &&
+ INTEL_REVID(dev_priv) < BXT_REVID_C0));
+
+ info->min_eu_in_pool = 0;
+ if (info->has_pooled_eu) {
+ if (IS_SS_DISABLED(ss_disable, 0) ||
+ IS_SS_DISABLED(ss_disable, 2))
+ info->min_eu_in_pool = 3;
+ else if (IS_SS_DISABLED(ss_disable, 1))
+ info->min_eu_in_pool = 6;
+ else
+ info->min_eu_in_pool = 9;
+ }
+#undef IS_SS_DISABLED
+ }
+}
+
+static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
+{
+ struct intel_device_info *info = mkwrite_device_info(dev_priv);
+ const int s_max = 3, ss_max = 3, eu_max = 8;
+ int s, ss;
+ u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
+
+ fuse2 = I915_READ(GEN8_FUSE2);
+ s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
+ ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT;
+
+ eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
+ eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
+ ((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
+ (32 - GEN8_EU_DIS0_S1_SHIFT));
+ eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
+ ((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
+ (32 - GEN8_EU_DIS1_S2_SHIFT));
+
+ info->slice_total = hweight32(s_enable);
+
+ /*
+ * The subslice disable field is global, i.e. it applies
+ * to each of the enabled slices.
+ */
+ info->subslice_per_slice = ss_max - hweight32(ss_disable);
+ info->subslice_total = info->slice_total * info->subslice_per_slice;
+
+ /*
+ * Iterate through enabled slices and subslices to
+ * count the total enabled EU.
+ */
+ for (s = 0; s < s_max; s++) {
+ if (!(s_enable & (0x1 << s)))
+ /* skip disabled slice */
+ continue;
+
+ for (ss = 0; ss < ss_max; ss++) {
+ u32 n_disabled;
+
+ if (ss_disable & (0x1 << ss))
+ /* skip disabled subslice */
+ continue;
+
+ n_disabled = hweight8(eu_disable[s] >> (ss * eu_max));
+
+ /*
+ * Record which subslices have 7 EUs.
+ */
+ if (eu_max - n_disabled == 7)
+ info->subslice_7eu[s] |= 1 << ss;
+
+ info->eu_total += eu_max - n_disabled;
+ }
+ }
+
+ /*
+ * BDW is expected to always have a uniform distribution of EU across
+ * subslices with the exception that any one EU in any one subslice may
+ * be fused off for die recovery.
+ */
+ info->eu_per_subslice = info->subslice_total ?
+ DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0;
+
+ /*
+ * BDW supports slice power gating on devices with more than
+ * one slice.
+ */
+ info->has_slice_pg = (info->slice_total > 1);
+ info->has_subslice_pg = 0;
+ info->has_eu_pg = 0;
+}
+
+/*
+ * Determine various intel_device_info fields at runtime.
+ *
+ * Use it when either:
+ * - it's judged too laborious to fill n static structures with the limit
+ * when a simple if statement does the job,
+ * - run-time checks (eg read fuse/strap registers) are needed.
+ *
+ * This function needs to be called:
+ * - after the MMIO has been setup as we are reading registers,
+ * - after the PCH has been detected,
+ * - before the first usage of the fields it can tweak.
+ */
+void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
+{
+ struct intel_device_info *info = mkwrite_device_info(dev_priv);
+ enum pipe pipe;
+
+ /*
+ * Skylake and Broxton currently don't expose the topmost plane as its
+ * use is exclusive with the legacy cursor and we only want to expose
+ * one of those, not both. Until we can safely expose the topmost plane
+ * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
+ * we don't expose the topmost plane at all to prevent ABI breakage
+ * down the line.
+ */
+ if (IS_BROXTON(dev_priv)) {
+ info->num_sprites[PIPE_A] = 2;
+ info->num_sprites[PIPE_B] = 2;
+ info->num_sprites[PIPE_C] = 1;
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ for_each_pipe(dev_priv, pipe)
+ info->num_sprites[pipe] = 2;
+ else
+ for_each_pipe(dev_priv, pipe)
+ info->num_sprites[pipe] = 1;
+
+ if (i915.disable_display) {
+ DRM_INFO("Display disabled (module parameter)\n");
+ info->num_pipes = 0;
+ } else if (info->num_pipes > 0 &&
+ (IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) &&
+ HAS_PCH_SPLIT(dev_priv)) {
+ u32 fuse_strap = I915_READ(FUSE_STRAP);
+ u32 sfuse_strap = I915_READ(SFUSE_STRAP);
+
+ /*
+ * SFUSE_STRAP is supposed to have a bit signalling the display
+ * is fused off. Unfortunately it seems that, at least in
+ * certain cases, fused off display means that PCH display
+ * reads don't land anywhere. In that case, we read 0s.
+ *
+ * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
+ * should be set when taking over after the firmware.
+ */
+ if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
+ sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
+ (dev_priv->pch_type == PCH_CPT &&
+ !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
+ DRM_INFO("Display fused off, disabling\n");
+ info->num_pipes = 0;
+ } else if (fuse_strap & IVB_PIPE_C_DISABLE) {
+ DRM_INFO("PipeC fused off\n");
+ info->num_pipes -= 1;
+ }
+ } else if (info->num_pipes > 0 && IS_GEN9(dev_priv)) {
+ u32 dfsm = I915_READ(SKL_DFSM);
+ u8 disabled_mask = 0;
+ bool invalid;
+ int num_bits;
+
+ if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
+ disabled_mask |= BIT(PIPE_A);
+ if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
+ disabled_mask |= BIT(PIPE_B);
+ if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
+ disabled_mask |= BIT(PIPE_C);
+
+ num_bits = hweight8(disabled_mask);
+
+ switch (disabled_mask) {
+ case BIT(PIPE_A):
+ case BIT(PIPE_B):
+ case BIT(PIPE_A) | BIT(PIPE_B):
+ case BIT(PIPE_A) | BIT(PIPE_C):
+ invalid = true;
+ break;
+ default:
+ invalid = false;
+ }
+
+ if (num_bits > info->num_pipes || invalid)
+ DRM_ERROR("invalid pipe fuse configuration: 0x%x\n",
+ disabled_mask);
+ else
+ info->num_pipes -= num_bits;
+ }
+
+ /* Initialize slice/subslice/EU info */
+ if (IS_CHERRYVIEW(dev_priv))
+ cherryview_sseu_info_init(dev_priv);
+ else if (IS_BROADWELL(dev_priv))
+ broadwell_sseu_info_init(dev_priv);
+ else if (INTEL_INFO(dev_priv)->gen >= 9)
+ gen9_sseu_info_init(dev_priv);
+
+ info->has_snoop = !info->has_llc;
+
+ /* Snooping is broken on BXT A stepping. */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
+ info->has_snoop = false;
+
+ DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
+ DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
+ DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
+ DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
+ DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
+ DRM_DEBUG_DRIVER("has slice power gating: %s\n",
+ info->has_slice_pg ? "y" : "n");
+ DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
+ info->has_subslice_pg ? "y" : "n");
+ DRM_DEBUG_DRIVER("has EU power gating: %s\n",
+ info->has_eu_pg ? "y" : "n");
+}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 32893195d..175595fc3 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -36,6 +36,7 @@
#include "intel_drv.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "i915_gem_dmabuf.h"
#include "intel_dsi.h"
#include "i915_trace.h"
#include <drm/drm_atomic.h>
@@ -46,7 +47,11 @@
#include <drm/drm_rect.h>
#include <linux/dma_remapping.h>
#include <linux/reservation.h>
-#include <linux/dma-buf.h>
+
+static bool is_mmio_work(struct intel_flip_work *work)
+{
+ return work->mmio_work.func;
+}
/* Primary plane formats for gen <= 3 */
static const uint32_t i8xx_primary_formats[] = {
@@ -117,20 +122,18 @@ static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
static void ironlake_pfit_enable(struct intel_crtc *crtc);
static void intel_modeset_setup_hw_state(struct drm_device *dev);
static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
+static int ilk_max_pixel_rate(struct drm_atomic_state *state);
+static int bxt_calc_cdclk(int max_pixclk);
-typedef struct {
- int min, max;
-} intel_range_t;
-
-typedef struct {
- int dot_limit;
- int p2_slow, p2_fast;
-} intel_p2_t;
-
-typedef struct intel_limit intel_limit_t;
struct intel_limit {
- intel_range_t dot, vco, n, m, m1, m2, p, p1;
- intel_p2_t p2;
+ struct {
+ int min, max;
+ } dot, vco, n, m, m1, m2, p, p1;
+
+ struct {
+ int dot_limit;
+ int p2_slow, p2_fast;
+ } p2;
};
/* returns HPLL frequency in kHz */
@@ -185,6 +188,7 @@ intel_pch_rawclk(struct drm_i915_private *dev_priv)
static int
intel_vlv_hrawclk(struct drm_i915_private *dev_priv)
{
+ /* RAWCLK_FREQ_VLV register updated from power well code */
return vlv_get_cck_clock_hpll(dev_priv, "hrawclk",
CCK_DISPLAY_REF_CLOCK_CONTROL);
}
@@ -218,7 +222,7 @@ intel_g4x_hrawclk(struct drm_i915_private *dev_priv)
}
}
-static void intel_update_rawclk(struct drm_i915_private *dev_priv)
+void intel_update_rawclk(struct drm_i915_private *dev_priv)
{
if (HAS_PCH_SPLIT(dev_priv))
dev_priv->rawclk_freq = intel_pch_rawclk(dev_priv);
@@ -255,7 +259,7 @@ intel_fdi_link_freq(struct drm_i915_private *dev_priv,
return 270000;
}
-static const intel_limit_t intel_limits_i8xx_dac = {
+static const struct intel_limit intel_limits_i8xx_dac = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 908000, .max = 1512000 },
.n = { .min = 2, .max = 16 },
@@ -268,7 +272,7 @@ static const intel_limit_t intel_limits_i8xx_dac = {
.p2_slow = 4, .p2_fast = 2 },
};
-static const intel_limit_t intel_limits_i8xx_dvo = {
+static const struct intel_limit intel_limits_i8xx_dvo = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 908000, .max = 1512000 },
.n = { .min = 2, .max = 16 },
@@ -281,7 +285,7 @@ static const intel_limit_t intel_limits_i8xx_dvo = {
.p2_slow = 4, .p2_fast = 4 },
};
-static const intel_limit_t intel_limits_i8xx_lvds = {
+static const struct intel_limit intel_limits_i8xx_lvds = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 908000, .max = 1512000 },
.n = { .min = 2, .max = 16 },
@@ -294,7 +298,7 @@ static const intel_limit_t intel_limits_i8xx_lvds = {
.p2_slow = 14, .p2_fast = 7 },
};
-static const intel_limit_t intel_limits_i9xx_sdvo = {
+static const struct intel_limit intel_limits_i9xx_sdvo = {
.dot = { .min = 20000, .max = 400000 },
.vco = { .min = 1400000, .max = 2800000 },
.n = { .min = 1, .max = 6 },
@@ -307,7 +311,7 @@ static const intel_limit_t intel_limits_i9xx_sdvo = {
.p2_slow = 10, .p2_fast = 5 },
};
-static const intel_limit_t intel_limits_i9xx_lvds = {
+static const struct intel_limit intel_limits_i9xx_lvds = {
.dot = { .min = 20000, .max = 400000 },
.vco = { .min = 1400000, .max = 2800000 },
.n = { .min = 1, .max = 6 },
@@ -321,7 +325,7 @@ static const intel_limit_t intel_limits_i9xx_lvds = {
};
-static const intel_limit_t intel_limits_g4x_sdvo = {
+static const struct intel_limit intel_limits_g4x_sdvo = {
.dot = { .min = 25000, .max = 270000 },
.vco = { .min = 1750000, .max = 3500000},
.n = { .min = 1, .max = 4 },
@@ -336,7 +340,7 @@ static const intel_limit_t intel_limits_g4x_sdvo = {
},
};
-static const intel_limit_t intel_limits_g4x_hdmi = {
+static const struct intel_limit intel_limits_g4x_hdmi = {
.dot = { .min = 22000, .max = 400000 },
.vco = { .min = 1750000, .max = 3500000},
.n = { .min = 1, .max = 4 },
@@ -349,7 +353,7 @@ static const intel_limit_t intel_limits_g4x_hdmi = {
.p2_slow = 10, .p2_fast = 5 },
};
-static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
+static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
.dot = { .min = 20000, .max = 115000 },
.vco = { .min = 1750000, .max = 3500000 },
.n = { .min = 1, .max = 3 },
@@ -363,7 +367,7 @@ static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
},
};
-static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
+static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
.dot = { .min = 80000, .max = 224000 },
.vco = { .min = 1750000, .max = 3500000 },
.n = { .min = 1, .max = 3 },
@@ -377,7 +381,7 @@ static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
},
};
-static const intel_limit_t intel_limits_pineview_sdvo = {
+static const struct intel_limit intel_limits_pineview_sdvo = {
.dot = { .min = 20000, .max = 400000},
.vco = { .min = 1700000, .max = 3500000 },
/* Pineview's Ncounter is a ring counter */
@@ -392,7 +396,7 @@ static const intel_limit_t intel_limits_pineview_sdvo = {
.p2_slow = 10, .p2_fast = 5 },
};
-static const intel_limit_t intel_limits_pineview_lvds = {
+static const struct intel_limit intel_limits_pineview_lvds = {
.dot = { .min = 20000, .max = 400000 },
.vco = { .min = 1700000, .max = 3500000 },
.n = { .min = 3, .max = 6 },
@@ -410,7 +414,7 @@ static const intel_limit_t intel_limits_pineview_lvds = {
* We calculate clock using (register_value + 2) for N/M1/M2, so here
* the range value for them is (actual_value - 2).
*/
-static const intel_limit_t intel_limits_ironlake_dac = {
+static const struct intel_limit intel_limits_ironlake_dac = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000 },
.n = { .min = 1, .max = 5 },
@@ -423,7 +427,7 @@ static const intel_limit_t intel_limits_ironlake_dac = {
.p2_slow = 10, .p2_fast = 5 },
};
-static const intel_limit_t intel_limits_ironlake_single_lvds = {
+static const struct intel_limit intel_limits_ironlake_single_lvds = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000 },
.n = { .min = 1, .max = 3 },
@@ -436,7 +440,7 @@ static const intel_limit_t intel_limits_ironlake_single_lvds = {
.p2_slow = 14, .p2_fast = 14 },
};
-static const intel_limit_t intel_limits_ironlake_dual_lvds = {
+static const struct intel_limit intel_limits_ironlake_dual_lvds = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000 },
.n = { .min = 1, .max = 3 },
@@ -450,7 +454,7 @@ static const intel_limit_t intel_limits_ironlake_dual_lvds = {
};
/* LVDS 100mhz refclk limits. */
-static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
+static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000 },
.n = { .min = 1, .max = 2 },
@@ -463,7 +467,7 @@ static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
.p2_slow = 14, .p2_fast = 14 },
};
-static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
+static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000 },
.n = { .min = 1, .max = 3 },
@@ -476,7 +480,7 @@ static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
.p2_slow = 7, .p2_fast = 7 },
};
-static const intel_limit_t intel_limits_vlv = {
+static const struct intel_limit intel_limits_vlv = {
/*
* These are the data rate limits (measured in fast clocks)
* since those are the strictest limits we have. The fast
@@ -492,7 +496,7 @@ static const intel_limit_t intel_limits_vlv = {
.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
};
-static const intel_limit_t intel_limits_chv = {
+static const struct intel_limit intel_limits_chv = {
/*
* These are the data rate limits (measured in fast clocks)
* since those are the strictest limits we have. The fast
@@ -508,7 +512,7 @@ static const intel_limit_t intel_limits_chv = {
.p2 = { .p2_slow = 1, .p2_fast = 14 },
};
-static const intel_limit_t intel_limits_bxt = {
+static const struct intel_limit intel_limits_bxt = {
/* FIXME: find real dot limits */
.dot = { .min = 0, .max = INT_MAX },
.vco = { .min = 4800000, .max = 6700000 },
@@ -526,52 +530,6 @@ needs_modeset(struct drm_crtc_state *state)
return drm_atomic_crtc_needs_modeset(state);
}
-/**
- * Returns whether any output on the specified pipe is of the specified type
- */
-bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
-{
- struct drm_device *dev = crtc->base.dev;
- struct intel_encoder *encoder;
-
- for_each_encoder_on_crtc(dev, &crtc->base, encoder)
- if (encoder->type == type)
- return true;
-
- return false;
-}
-
-/**
- * Returns whether any output on the specified pipe will have the specified
- * type after a staged modeset is complete, i.e., the same as
- * intel_pipe_has_type() but looking at encoder->new_crtc instead of
- * encoder->crtc.
- */
-static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
- int type)
-{
- struct drm_atomic_state *state = crtc_state->base.state;
- struct drm_connector *connector;
- struct drm_connector_state *connector_state;
- struct intel_encoder *encoder;
- int i, num_connectors = 0;
-
- for_each_connector_in_state(state, connector, connector_state, i) {
- if (connector_state->crtc != crtc_state->base.crtc)
- continue;
-
- num_connectors++;
-
- encoder = to_intel_encoder(connector_state->best_encoder);
- if (encoder->type == type)
- return true;
- }
-
- WARN_ON(num_connectors == 0);
-
- return false;
-}
-
/*
* Platform specific helpers to calculate the port PLL loopback- (clock.m),
* and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
@@ -581,7 +539,7 @@ static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
* divided-down version of it.
*/
/* m1 is reserved as 0 in Pineview, n is a ring counter */
-static int pnv_calc_dpll_params(int refclk, intel_clock_t *clock)
+static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
{
clock->m = clock->m2 + 2;
clock->p = clock->p1 * clock->p2;
@@ -598,7 +556,7 @@ static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
}
-static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock)
+static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
{
clock->m = i9xx_dpll_compute_m(clock);
clock->p = clock->p1 * clock->p2;
@@ -610,7 +568,7 @@ static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock)
return clock->dot;
}
-static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock)
+static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
{
clock->m = clock->m1 * clock->m2;
clock->p = clock->p1 * clock->p2;
@@ -622,7 +580,7 @@ static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock)
return clock->dot / 5;
}
-int chv_calc_dpll_params(int refclk, intel_clock_t *clock)
+int chv_calc_dpll_params(int refclk, struct dpll *clock)
{
clock->m = clock->m1 * clock->m2;
clock->p = clock->p1 * clock->p2;
@@ -642,8 +600,8 @@ int chv_calc_dpll_params(int refclk, intel_clock_t *clock)
*/
static bool intel_PLL_is_valid(struct drm_device *dev,
- const intel_limit_t *limit,
- const intel_clock_t *clock)
+ const struct intel_limit *limit,
+ const struct dpll *clock)
{
if (clock->n < limit->n.min || limit->n.max < clock->n)
INTELPllInvalid("n out of range\n");
@@ -678,13 +636,13 @@ static bool intel_PLL_is_valid(struct drm_device *dev,
}
static int
-i9xx_select_p2_div(const intel_limit_t *limit,
+i9xx_select_p2_div(const struct intel_limit *limit,
const struct intel_crtc_state *crtc_state,
int target)
{
struct drm_device *dev = crtc_state->base.crtc->dev;
- if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
/*
* For LVDS just rely on its current settings for dual-channel.
* We haven't figured out how to reliably set up different
@@ -713,13 +671,13 @@ i9xx_select_p2_div(const intel_limit_t *limit,
* divider from @match_clock used for LVDS downclocking.
*/
static bool
-i9xx_find_best_dpll(const intel_limit_t *limit,
+i9xx_find_best_dpll(const struct intel_limit *limit,
struct intel_crtc_state *crtc_state,
- int target, int refclk, intel_clock_t *match_clock,
- intel_clock_t *best_clock)
+ int target, int refclk, struct dpll *match_clock,
+ struct dpll *best_clock)
{
struct drm_device *dev = crtc_state->base.crtc->dev;
- intel_clock_t clock;
+ struct dpll clock;
int err = target;
memset(best_clock, 0, sizeof(*best_clock));
@@ -770,13 +728,13 @@ i9xx_find_best_dpll(const intel_limit_t *limit,
* divider from @match_clock used for LVDS downclocking.
*/
static bool
-pnv_find_best_dpll(const intel_limit_t *limit,
+pnv_find_best_dpll(const struct intel_limit *limit,
struct intel_crtc_state *crtc_state,
- int target, int refclk, intel_clock_t *match_clock,
- intel_clock_t *best_clock)
+ int target, int refclk, struct dpll *match_clock,
+ struct dpll *best_clock)
{
struct drm_device *dev = crtc_state->base.crtc->dev;
- intel_clock_t clock;
+ struct dpll clock;
int err = target;
memset(best_clock, 0, sizeof(*best_clock));
@@ -825,13 +783,13 @@ pnv_find_best_dpll(const intel_limit_t *limit,
* divider from @match_clock used for LVDS downclocking.
*/
static bool
-g4x_find_best_dpll(const intel_limit_t *limit,
+g4x_find_best_dpll(const struct intel_limit *limit,
struct intel_crtc_state *crtc_state,
- int target, int refclk, intel_clock_t *match_clock,
- intel_clock_t *best_clock)
+ int target, int refclk, struct dpll *match_clock,
+ struct dpll *best_clock)
{
struct drm_device *dev = crtc_state->base.crtc->dev;
- intel_clock_t clock;
+ struct dpll clock;
int max_n;
bool found = false;
/* approximately equals target * 0.00585 */
@@ -877,8 +835,8 @@ g4x_find_best_dpll(const intel_limit_t *limit,
* best configuration and error found so far. Return the calculated error.
*/
static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
- const intel_clock_t *calculated_clock,
- const intel_clock_t *best_clock,
+ const struct dpll *calculated_clock,
+ const struct dpll *best_clock,
unsigned int best_error_ppm,
unsigned int *error_ppm)
{
@@ -918,14 +876,14 @@ static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
static bool
-vlv_find_best_dpll(const intel_limit_t *limit,
+vlv_find_best_dpll(const struct intel_limit *limit,
struct intel_crtc_state *crtc_state,
- int target, int refclk, intel_clock_t *match_clock,
- intel_clock_t *best_clock)
+ int target, int refclk, struct dpll *match_clock,
+ struct dpll *best_clock)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
- intel_clock_t clock;
+ struct dpll clock;
unsigned int bestppm = 1000000;
/* min update 19.2 MHz */
int max_n = min(limit->n.max, refclk / 19200);
@@ -977,15 +935,15 @@ vlv_find_best_dpll(const intel_limit_t *limit,
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
static bool
-chv_find_best_dpll(const intel_limit_t *limit,
+chv_find_best_dpll(const struct intel_limit *limit,
struct intel_crtc_state *crtc_state,
- int target, int refclk, intel_clock_t *match_clock,
- intel_clock_t *best_clock)
+ int target, int refclk, struct dpll *match_clock,
+ struct dpll *best_clock)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
unsigned int best_error_ppm;
- intel_clock_t clock;
+ struct dpll clock;
uint64_t m2;
int found = false;
@@ -1035,10 +993,10 @@ chv_find_best_dpll(const intel_limit_t *limit,
}
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
- intel_clock_t *best_clock)
+ struct dpll *best_clock)
{
int refclk = 100000;
- const intel_limit_t *limit = &intel_limits_bxt;
+ const struct intel_limit *limit = &intel_limits_bxt;
return chv_find_best_dpll(limit, crtc_state,
target_clock, refclk, NULL, best_clock);
@@ -1076,7 +1034,7 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t reg = PIPEDSL(pipe);
u32 line1, line2;
u32 line_mask;
@@ -1112,7 +1070,7 @@ static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
enum pipe pipe = crtc->pipe;
@@ -1120,8 +1078,9 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
i915_reg_t reg = PIPECONF(cpu_transcoder);
/* Wait for the Pipe State to go off */
- if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
- 100))
+ if (intel_wait_for_register(dev_priv,
+ reg, I965_PIPECONF_ACTIVE, 0,
+ 100))
WARN(1, "pipe_off wait timed out\n");
} else {
/* Wait for the display line to settle */
@@ -1203,7 +1162,7 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
u32 val;
/* ILK FDI PLL is always enabled */
- if (INTEL_INFO(dev_priv)->gen == 5)
+ if (IS_GEN5(dev_priv))
return;
/* On Haswell, DDI ports are responsible for the FDI PLL setup */
@@ -1230,7 +1189,7 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
void assert_panel_unlocked(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
i915_reg_t pp_reg;
u32 val;
enum pipe panel_pipe = PIPE_A;
@@ -1272,7 +1231,7 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv,
static void assert_cursor(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
bool cur_state;
if (IS_845G(dev) || IS_I865G(dev))
@@ -1334,7 +1293,7 @@ static void assert_plane(struct drm_i915_private *dev_priv,
static void assert_planes_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
int i;
/* Primary planes are fixed to pipes on gen4+ */
@@ -1360,7 +1319,7 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
int sprite;
if (INTEL_INFO(dev)->gen >= 9) {
@@ -1540,7 +1499,11 @@ static void _vlv_enable_pll(struct intel_crtc *crtc,
POSTING_READ(DPLL(pipe));
udelay(150);
- if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
+ if (intel_wait_for_register(dev_priv,
+ DPLL(pipe),
+ DPLL_LOCK_VLV,
+ DPLL_LOCK_VLV,
+ 1))
DRM_ERROR("DPLL %d failed to lock\n", pipe);
}
@@ -1589,7 +1552,9 @@ static void _chv_enable_pll(struct intel_crtc *crtc,
I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
/* Check PLL is locked */
- if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
+ if (intel_wait_for_register(dev_priv,
+ DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
+ 1))
DRM_ERROR("PLL %d failed to lock\n", pipe);
}
@@ -1635,9 +1600,10 @@ static int intel_num_dvo_pipes(struct drm_device *dev)
struct intel_crtc *crtc;
int count = 0;
- for_each_intel_crtc(dev, crtc)
+ for_each_intel_crtc(dev, crtc) {
count += crtc->base.state->active &&
- intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
+ intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO);
+ }
return count;
}
@@ -1645,7 +1611,7 @@ static int intel_num_dvo_pipes(struct drm_device *dev)
static void i9xx_enable_pll(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t reg = DPLL(crtc->pipe);
u32 dpll = crtc->config->dpll_hw_state.dpll;
@@ -1717,12 +1683,12 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
static void i9xx_disable_pll(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe = crtc->pipe;
/* Disable DVO 2x clock on both PLLs if necessary */
if (IS_I830(dev) &&
- intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
+ intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) &&
!intel_num_dvo_pipes(dev)) {
I915_WRITE(DPLL(PIPE_B),
I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
@@ -1809,7 +1775,9 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
BUG();
}
- if (wait_for((I915_READ(dpll_reg) & port_mask) == expected_mask, 1000))
+ if (intel_wait_for_register(dev_priv,
+ dpll_reg, port_mask, expected_mask,
+ 1000))
WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
}
@@ -1817,7 +1785,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
i915_reg_t reg;
@@ -1850,7 +1818,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
* here for both 8bpc and 12bpc.
*/
val &= ~PIPECONF_BPC_MASK;
- if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_HDMI))
+ if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_HDMI))
val |= PIPECONF_8BPC;
else
val |= pipeconf_val & PIPECONF_BPC_MASK;
@@ -1859,7 +1827,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
val &= ~TRANS_INTERLACE_MASK;
if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
if (HAS_PCH_IBX(dev_priv) &&
- intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
+ intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
val |= TRANS_LEGACY_INTERLACED_ILK;
else
val |= TRANS_INTERLACED;
@@ -1867,7 +1835,9 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
val |= TRANS_PROGRESSIVE;
I915_WRITE(reg, val | TRANS_ENABLE);
- if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
+ if (intel_wait_for_register(dev_priv,
+ reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
+ 100))
DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
}
@@ -1895,14 +1865,18 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
val |= TRANS_PROGRESSIVE;
I915_WRITE(LPT_TRANSCONF, val);
- if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
+ if (intel_wait_for_register(dev_priv,
+ LPT_TRANSCONF,
+ TRANS_STATE_ENABLE,
+ TRANS_STATE_ENABLE,
+ 100))
DRM_ERROR("Failed to enable PCH transcoder\n");
}
static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
i915_reg_t reg;
uint32_t val;
@@ -1918,7 +1892,9 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
val &= ~TRANS_ENABLE;
I915_WRITE(reg, val);
/* wait for PCH transcoder off, transcoder state */
- if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
+ if (intel_wait_for_register(dev_priv,
+ reg, TRANS_STATE_ENABLE, 0,
+ 50))
DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
if (HAS_PCH_CPT(dev)) {
@@ -1938,7 +1914,9 @@ static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
val &= ~TRANS_ENABLE;
I915_WRITE(LPT_TRANSCONF, val);
/* wait for PCH transcoder off, transcoder state */
- if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
+ if (intel_wait_for_register(dev_priv,
+ LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
+ 50))
DRM_ERROR("Failed to disable PCH transcoder\n");
/* Workaround: clear timing override bit. */
@@ -1957,7 +1935,7 @@ static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
static void intel_enable_pipe(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe = crtc->pipe;
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
enum pipe pch_transcoder;
@@ -1981,7 +1959,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
* need the check.
*/
if (HAS_GMCH_DISPLAY(dev_priv))
- if (crtc->config->has_dsi_encoder)
+ if (intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI))
assert_dsi_pll_enabled(dev_priv);
else
assert_pll_enabled(dev_priv, pipe);
@@ -2030,7 +2008,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
*/
static void intel_disable_pipe(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
@@ -2068,15 +2046,6 @@ static void intel_disable_pipe(struct intel_crtc *crtc)
intel_wait_for_pipe_off(crtc);
}
-static bool need_vtd_wa(struct drm_device *dev)
-{
-#ifdef CONFIG_INTEL_IOMMU
- if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
- return true;
-#endif
- return false;
-}
-
static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
{
return IS_GEN2(dev_priv) ? 2048 : 4096;
@@ -2241,7 +2210,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
unsigned int rotation)
{
struct drm_device *dev = fb->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct i915_ggtt_view view;
u32 alignment;
@@ -2258,7 +2227,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
* we should always have valid PTE following the scanout preventing
* the VT-d warning.
*/
- if (need_vtd_wa(dev) && alignment < 256 * 1024)
+ if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
alignment = 256 * 1024;
/*
@@ -2309,7 +2278,7 @@ err_pm:
return ret;
}
-static void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
+void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
{
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct i915_ggtt_view view;
@@ -2543,7 +2512,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
struct intel_initial_plane_config *plane_config)
{
struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *c;
struct intel_crtc *i;
struct drm_i915_gem_object *obj;
@@ -2639,7 +2608,7 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
const struct intel_plane_state *plane_state)
{
struct drm_device *dev = primary->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -2752,7 +2721,7 @@ static void i9xx_disable_primary_plane(struct drm_plane *primary,
struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane = intel_crtc->plane;
@@ -2769,7 +2738,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
const struct intel_plane_state *plane_state)
{
struct drm_device *dev = primary->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -2897,7 +2866,7 @@ u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
{
struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
@@ -3007,7 +2976,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
const struct intel_plane_state *plane_state)
{
struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -3091,7 +3060,7 @@ static void skylake_disable_primary_plane(struct drm_plane *primary,
struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int pipe = to_intel_crtc(crtc)->pipe;
I915_WRITE(PLANE_CTL(pipe, 0), 0);
@@ -3110,17 +3079,12 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
return -ENODEV;
}
-static void intel_complete_page_flips(struct drm_device *dev)
+static void intel_complete_page_flips(struct drm_i915_private *dev_priv)
{
- struct drm_crtc *crtc;
-
- for_each_crtc(dev, crtc) {
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum plane plane = intel_crtc->plane;
+ struct intel_crtc *crtc;
- intel_prepare_page_flip(dev, plane);
- intel_finish_page_flip_plane(dev, plane);
- }
+ for_each_intel_crtc(&dev_priv->drm, crtc)
+ intel_finish_page_flip_cs(dev_priv, crtc->pipe);
}
static void intel_update_primary_planes(struct drm_device *dev)
@@ -3129,55 +3093,125 @@ static void intel_update_primary_planes(struct drm_device *dev)
for_each_crtc(dev, crtc) {
struct intel_plane *plane = to_intel_plane(crtc->primary);
- struct intel_plane_state *plane_state;
-
- drm_modeset_lock_crtc(crtc, &plane->base);
- plane_state = to_intel_plane_state(plane->base.state);
+ struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
if (plane_state->visible)
plane->update_plane(&plane->base,
to_intel_crtc_state(crtc->state),
plane_state);
+ }
+}
+
+static int
+__intel_display_resume(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ int i, ret;
+
+ intel_modeset_setup_hw_state(dev);
+ i915_redisable_vga(dev);
+
+ if (!state)
+ return 0;
- drm_modeset_unlock_crtc(crtc);
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ /*
+ * Force recalculation even if we restore
+ * current state. With fast modeset this may not result
+ * in a modeset when the state is compatible.
+ */
+ crtc_state->mode_changed = true;
}
+
+ /* ignore any reset values/BIOS leftovers in the WM registers */
+ to_intel_atomic_state(state)->skip_intermediate_wm = true;
+
+ ret = drm_atomic_commit(state);
+
+ WARN_ON(ret == -EDEADLK);
+ return ret;
}
-void intel_prepare_reset(struct drm_device *dev)
+void intel_prepare_reset(struct drm_i915_private *dev_priv)
{
+ struct drm_device *dev = &dev_priv->drm;
+ struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
+ struct drm_atomic_state *state;
+ int ret;
+
/* no reset support for gen2 */
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
return;
- /* reset doesn't touch the display */
- if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
+ /*
+ * Need mode_config.mutex so that we don't
+ * trample ongoing ->detect() and whatnot.
+ */
+ mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_acquire_init(ctx, 0);
+ while (1) {
+ ret = drm_modeset_lock_all_ctx(dev, ctx);
+ if (ret != -EDEADLK)
+ break;
+
+ drm_modeset_backoff(ctx);
+ }
+
+ /* reset doesn't touch the display, but flips might get nuked anyway, */
+ if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
return;
- drm_modeset_lock_all(dev);
/*
* Disabling the crtcs gracefully seems nicer. Also the
* g33 docs say we should at least disable all the planes.
*/
- intel_display_suspend(dev);
+ state = drm_atomic_helper_duplicate_state(dev, ctx);
+ if (IS_ERR(state)) {
+ ret = PTR_ERR(state);
+ state = NULL;
+ DRM_ERROR("Duplicating state failed with %i\n", ret);
+ goto err;
+ }
+
+ ret = drm_atomic_helper_disable_all(dev, ctx);
+ if (ret) {
+ DRM_ERROR("Suspending crtc's failed with %i\n", ret);
+ goto err;
+ }
+
+ dev_priv->modeset_restore_state = state;
+ state->acquire_ctx = ctx;
+ return;
+
+err:
+ drm_atomic_state_free(state);
}
-void intel_finish_reset(struct drm_device *dev)
+void intel_finish_reset(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_device *dev = &dev_priv->drm;
+ struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
+ struct drm_atomic_state *state = dev_priv->modeset_restore_state;
+ int ret;
/*
* Flips in the rings will be nuked by the reset,
* so complete all pending flips so that user space
* will get its events and not get stuck.
*/
- intel_complete_page_flips(dev);
+ intel_complete_page_flips(dev_priv);
/* no reset support for gen2 */
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
return;
+ dev_priv->modeset_restore_state = NULL;
+
/* reset doesn't touch the display */
- if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) {
+ if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
/*
* Flips in the rings have been nuked by the reset,
* so update the base address of all primary
@@ -3188,28 +3222,31 @@ void intel_finish_reset(struct drm_device *dev)
* CS-based flips (which might get lost in gpu resets) any more.
*/
intel_update_primary_planes(dev);
- return;
- }
-
- /*
- * The display has been reset as well,
- * so need a full re-initialization.
- */
- intel_runtime_pm_disable_interrupts(dev_priv);
- intel_runtime_pm_enable_interrupts(dev_priv);
+ } else {
+ /*
+ * The display has been reset as well,
+ * so need a full re-initialization.
+ */
+ intel_runtime_pm_disable_interrupts(dev_priv);
+ intel_runtime_pm_enable_interrupts(dev_priv);
- intel_modeset_init_hw(dev);
+ intel_modeset_init_hw(dev);
- spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display.hpd_irq_setup)
- dev_priv->display.hpd_irq_setup(dev);
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (dev_priv->display.hpd_irq_setup)
+ dev_priv->display.hpd_irq_setup(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
- intel_display_resume(dev);
+ ret = __intel_display_resume(dev, state);
+ if (ret)
+ DRM_ERROR("Restoring old state failed with %i\n", ret);
- intel_hpd_init(dev_priv);
+ intel_hpd_init(dev_priv);
+ }
- drm_modeset_unlock_all(dev);
+ drm_modeset_drop_locks(ctx);
+ drm_modeset_acquire_fini(ctx);
+ mutex_unlock(&dev->mode_config.mutex);
}
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
@@ -3224,7 +3261,7 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
return false;
spin_lock_irq(&dev->event_lock);
- pending = to_intel_crtc(crtc)->unpin_work != NULL;
+ pending = to_intel_crtc(crtc)->flip_work != NULL;
spin_unlock_irq(&dev->event_lock);
return pending;
@@ -3234,7 +3271,7 @@ static void intel_update_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *old_crtc_state)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc->base.state);
@@ -3275,7 +3312,7 @@ static void intel_update_pipe_config(struct intel_crtc *crtc,
static void intel_fdi_normal_train(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
i915_reg_t reg;
@@ -3318,7 +3355,7 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
i915_reg_t reg;
@@ -3419,7 +3456,7 @@ static const int snb_b_fdi_train_param[] = {
static void gen6_fdi_link_train(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
i915_reg_t reg;
@@ -3552,7 +3589,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
i915_reg_t reg;
@@ -3671,7 +3708,7 @@ train_done:
static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
{
struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int pipe = intel_crtc->pipe;
i915_reg_t reg;
u32 temp;
@@ -3708,7 +3745,7 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
{
struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int pipe = intel_crtc->pipe;
i915_reg_t reg;
u32 temp;
@@ -3738,7 +3775,7 @@ static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
static void ironlake_fdi_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
i915_reg_t reg;
@@ -3803,7 +3840,7 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev)
if (atomic_read(&crtc->unpin_work_count) == 0)
continue;
- if (crtc->unpin_work)
+ if (crtc->flip_work)
intel_wait_for_vblank(dev, crtc->pipe);
return true;
@@ -3815,11 +3852,9 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev)
static void page_flip_completed(struct intel_crtc *intel_crtc)
{
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
- struct intel_unpin_work *work = intel_crtc->unpin_work;
+ struct intel_flip_work *work = intel_crtc->flip_work;
- /* ensure that the unpin work is consistent wrt ->pending. */
- smp_rmb();
- intel_crtc->unpin_work = NULL;
+ intel_crtc->flip_work = NULL;
if (work->event)
drm_crtc_send_vblank_event(&intel_crtc->base, work->event);
@@ -3827,7 +3862,7 @@ static void page_flip_completed(struct intel_crtc *intel_crtc)
drm_crtc_vblank_put(&intel_crtc->base);
wake_up_all(&dev_priv->pending_flip_queue);
- queue_work(dev_priv->wq, &work->work);
+ queue_work(dev_priv->wq, &work->unpin_work);
trace_i915_flip_complete(intel_crtc->plane,
work->pending_flip_obj);
@@ -3836,7 +3871,7 @@ static void page_flip_completed(struct intel_crtc *intel_crtc)
static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
long ret;
WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
@@ -3851,9 +3886,11 @@ static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
if (ret == 0) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_flip_work *work;
spin_lock_irq(&dev->event_lock);
- if (intel_crtc->unpin_work) {
+ work = intel_crtc->flip_work;
+ if (work && !is_mmio_work(work)) {
WARN_ONCE(1, "Removing stuck page flip\n");
page_flip_completed(intel_crtc);
}
@@ -3997,7 +4034,7 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
enum pipe pch_transcoder)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
@@ -4019,7 +4056,7 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t temp;
temp = I915_READ(SOUTH_CHICKEN1);
@@ -4069,7 +4106,7 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
struct intel_encoder *encoder;
for_each_encoder_on_crtc(dev, crtc, encoder) {
- if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+ if (encoder->type == INTEL_OUTPUT_DP ||
encoder->type == INTEL_OUTPUT_EDP)
return enc_to_dig_port(&encoder->base)->port;
}
@@ -4088,7 +4125,7 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
static void ironlake_pch_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 temp;
@@ -4138,7 +4175,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
intel_fdi_normal_train(crtc);
/* For PCH DP, enable TRANS_DP_CTL */
- if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
+ if (HAS_PCH_CPT(dev) && intel_crtc_has_dp_encoder(intel_crtc->config)) {
const struct drm_display_mode *adjusted_mode =
&intel_crtc->config->base.adjusted_mode;
u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
@@ -4178,7 +4215,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
static void lpt_pch_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
@@ -4194,7 +4231,7 @@ static void lpt_pch_enable(struct drm_crtc *crtc)
static void cpt_verify_modeset(struct drm_device *dev, int pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t dslreg = PIPEDSL(pipe);
u32 temp;
@@ -4281,8 +4318,9 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
- DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n",
- intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
+ DRM_DEBUG_KMS("Updating scaler for [CRTC:%d:%s] scaler_user index %u.%u\n",
+ intel_crtc->base.base.id, intel_crtc->base.name,
+ intel_crtc->pipe, SKL_CRTC_INDEX);
return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
&state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
@@ -4312,9 +4350,9 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
bool force_detach = !fb || !plane_state->visible;
- DRM_DEBUG_KMS("Updating scaler for [PLANE:%d] scaler_user index %u.%u\n",
- intel_plane->base.base.id, intel_crtc->pipe,
- drm_plane_index(&intel_plane->base));
+ DRM_DEBUG_KMS("Updating scaler for [PLANE:%d:%s] scaler_user index %u.%u\n",
+ intel_plane->base.base.id, intel_plane->base.name,
+ intel_crtc->pipe, drm_plane_index(&intel_plane->base));
ret = skl_update_scaler(crtc_state, force_detach,
drm_plane_index(&intel_plane->base),
@@ -4330,8 +4368,9 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
/* check colorkey */
if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
- DRM_DEBUG_KMS("[PLANE:%d] scaling with color key not allowed",
- intel_plane->base.base.id);
+ DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
+ intel_plane->base.base.id,
+ intel_plane->base.name);
return -EINVAL;
}
@@ -4350,8 +4389,9 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
case DRM_FORMAT_VYUY:
break;
default:
- DRM_DEBUG_KMS("[PLANE:%d] FB:%d unsupported scaling format 0x%x\n",
- intel_plane->base.base.id, fb->base.id, fb->pixel_format);
+ DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
+ intel_plane->base.base.id, intel_plane->base.name,
+ fb->base.id, fb->pixel_format);
return -EINVAL;
}
@@ -4369,7 +4409,7 @@ static void skylake_scaler_disable(struct intel_crtc *crtc)
static void skylake_pfit_enable(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int pipe = crtc->pipe;
struct intel_crtc_scaler_state *scaler_state =
&crtc->config->scaler_state;
@@ -4397,7 +4437,7 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
static void ironlake_pfit_enable(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int pipe = crtc->pipe;
if (crtc->config->pch_pfit.enabled) {
@@ -4418,7 +4458,7 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc)
void hsw_enable_ips(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (!crtc->config->ips_enabled)
return;
@@ -4446,7 +4486,9 @@ void hsw_enable_ips(struct intel_crtc *crtc)
* and don't wait for vblanks until the end of crtc_enable, then
* the HW state readout code will complain that the expected
* IPS_CTL value is not the one we read. */
- if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
+ if (intel_wait_for_register(dev_priv,
+ IPS_CTL, IPS_ENABLE, IPS_ENABLE,
+ 50))
DRM_ERROR("Timed out waiting for IPS enable\n");
}
}
@@ -4454,7 +4496,7 @@ void hsw_enable_ips(struct intel_crtc *crtc)
void hsw_disable_ips(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (!crtc->config->ips_enabled)
return;
@@ -4465,7 +4507,9 @@ void hsw_disable_ips(struct intel_crtc *crtc)
WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
mutex_unlock(&dev_priv->rps.hw_lock);
/* wait for pcode to finish disabling IPS, which may take up to 42ms */
- if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42))
+ if (intel_wait_for_register(dev_priv,
+ IPS_CTL, IPS_ENABLE, 0,
+ 42))
DRM_ERROR("Timed out waiting for IPS disable\n");
} else {
I915_WRITE(IPS_CTL, 0);
@@ -4480,7 +4524,7 @@ static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
{
if (intel_crtc->overlay) {
struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
mutex_lock(&dev->struct_mutex);
dev_priv->mm.interruptible = false;
@@ -4508,7 +4552,7 @@ static void
intel_post_enable_primary(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
@@ -4540,7 +4584,7 @@ static void
intel_pre_disable_primary(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
@@ -4567,7 +4611,7 @@ static void
intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
@@ -4626,7 +4670,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc->base.state);
struct drm_atomic_state *old_state = old_crtc_state->base.state;
@@ -4641,14 +4685,14 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
struct intel_plane_state *old_primary_state =
to_intel_plane_state(old_pri_state);
- intel_fbc_pre_update(crtc);
+ intel_fbc_pre_update(crtc, pipe_config, primary_state);
if (old_primary_state->visible &&
(modeset || !primary_state->visible))
intel_pre_disable_primary(&crtc->base);
}
- if (pipe_config->disable_cxsr) {
+ if (pipe_config->disable_cxsr && HAS_GMCH_DISPLAY(dev)) {
crtc->wm.cxsr_allowed = false;
/*
@@ -4729,7 +4773,7 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask
static void ironlake_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
@@ -4757,7 +4801,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
if (intel_crtc->config->has_pch_encoder)
intel_prepare_shared_dpll(intel_crtc);
- if (intel_crtc->config->has_dp_encoder)
+ if (intel_crtc_has_dp_encoder(intel_crtc->config))
intel_dp_set_m_n(intel_crtc, M1_N1);
intel_set_pipe_timings(intel_crtc);
@@ -4826,7 +4870,7 @@ static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
static void haswell_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe, hsw_workaround_pipe;
@@ -4841,13 +4885,17 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
false);
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->pre_pll_enable)
+ encoder->pre_pll_enable(encoder);
+
if (intel_crtc->config->shared_dpll)
intel_enable_shared_dpll(intel_crtc);
- if (intel_crtc->config->has_dp_encoder)
+ if (intel_crtc_has_dp_encoder(intel_crtc->config))
intel_dp_set_m_n(intel_crtc, M1_N1);
- if (!intel_crtc->config->has_dsi_encoder)
+ if (!transcoder_is_dsi(cpu_transcoder))
intel_set_pipe_timings(intel_crtc);
intel_set_pipe_src_size(intel_crtc);
@@ -4863,7 +4911,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
&intel_crtc->config->fdi_m_n, NULL);
}
- if (!intel_crtc->config->has_dsi_encoder)
+ if (!transcoder_is_dsi(cpu_transcoder))
haswell_set_pipeconf(crtc);
haswell_set_pipemisc(crtc);
@@ -4885,7 +4933,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
if (intel_crtc->config->has_pch_encoder)
dev_priv->display.fdi_link_train(crtc);
- if (!intel_crtc->config->has_dsi_encoder)
+ if (!transcoder_is_dsi(cpu_transcoder))
intel_ddi_enable_pipe_clock(intel_crtc);
if (INTEL_INFO(dev)->gen >= 9)
@@ -4900,7 +4948,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_color_load_luts(&pipe_config->base);
intel_ddi_set_pipe_settings(crtc);
- if (!intel_crtc->config->has_dsi_encoder)
+ if (!transcoder_is_dsi(cpu_transcoder))
intel_ddi_enable_transcoder_func(crtc);
if (dev_priv->display.initial_watermarks != NULL)
@@ -4909,7 +4957,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_update_watermarks(crtc);
/* XXX: Do the pipe assertions at the right place for BXT DSI. */
- if (!intel_crtc->config->has_dsi_encoder)
+ if (!transcoder_is_dsi(cpu_transcoder))
intel_enable_pipe(intel_crtc);
if (intel_crtc->config->has_pch_encoder)
@@ -4946,7 +4994,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int pipe = crtc->pipe;
/* To avoid upsetting the power well on haswell only disable the pfit if
@@ -4961,7 +5009,7 @@ static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
static void ironlake_crtc_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
@@ -5024,7 +5072,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
static void haswell_crtc_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
@@ -5042,13 +5090,13 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
assert_vblank_disabled(crtc);
/* XXX: Do the pipe assertions at the right place for BXT DSI. */
- if (!intel_crtc->config->has_dsi_encoder)
+ if (!transcoder_is_dsi(cpu_transcoder))
intel_disable_pipe(intel_crtc);
if (intel_crtc->config->dp_encoder_is_mst)
intel_ddi_set_vc_payload_alloc(crtc, false);
- if (!intel_crtc->config->has_dsi_encoder)
+ if (!transcoder_is_dsi(cpu_transcoder))
intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
if (INTEL_INFO(dev)->gen >= 9)
@@ -5056,7 +5104,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
else
ironlake_pfit_disable(intel_crtc, false);
- if (!intel_crtc->config->has_dsi_encoder)
+ if (!transcoder_is_dsi(cpu_transcoder))
intel_ddi_disable_pipe_clock(intel_crtc);
for_each_encoder_on_crtc(dev, crtc, encoder)
@@ -5076,7 +5124,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
static void i9xx_pfit_enable(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc_state *pipe_config = crtc->config;
if (!pipe_config->gmch_pfit.control)
@@ -5146,7 +5194,7 @@ intel_display_port_power_domain(struct intel_encoder *intel_encoder)
case INTEL_OUTPUT_UNKNOWN:
/* Only DDI platforms should ever use this output type */
WARN_ON_ONCE(!HAS_DDI(dev));
- case INTEL_OUTPUT_DISPLAYPORT:
+ case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_HDMI:
case INTEL_OUTPUT_EDP:
intel_dig_port = enc_to_dig_port(&intel_encoder->base);
@@ -5180,7 +5228,7 @@ intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
* run the DP detection too.
*/
WARN_ON_ONCE(!HAS_DDI(dev));
- case INTEL_OUTPUT_DISPLAYPORT:
+ case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_EDP:
intel_dig_port = enc_to_dig_port(&intel_encoder->base);
return port_to_aux_power_domain(intel_dig_port->port);
@@ -5228,7 +5276,7 @@ static unsigned long
modeset_get_crtc_power_domains(struct drm_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum intel_display_power_domain domain;
unsigned long domains, new_domains, old_domains;
@@ -5269,21 +5317,34 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
return max_cdclk_freq*90/100;
}
+static int skl_calc_cdclk(int max_pixclk, int vco);
+
static void intel_update_max_cdclk(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
+ int max_cdclk, vco;
+ vco = dev_priv->skl_preferred_vco_freq;
+ WARN_ON(vco != 8100000 && vco != 8640000);
+
+ /*
+ * Use the lower (vco 8640) cdclk values as a
+ * first guess. skl_calc_cdclk() will correct it
+ * if the preferred vco is 8100 instead.
+ */
if (limit == SKL_DFSM_CDCLK_LIMIT_675)
- dev_priv->max_cdclk_freq = 675000;
+ max_cdclk = 617143;
else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
- dev_priv->max_cdclk_freq = 540000;
+ max_cdclk = 540000;
else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
- dev_priv->max_cdclk_freq = 450000;
+ max_cdclk = 432000;
else
- dev_priv->max_cdclk_freq = 337500;
+ max_cdclk = 308571;
+
+ dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
} else if (IS_BROXTON(dev)) {
dev_priv->max_cdclk_freq = 624000;
} else if (IS_BROADWELL(dev)) {
@@ -5321,267 +5382,322 @@ static void intel_update_max_cdclk(struct drm_device *dev)
static void intel_update_cdclk(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
- DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
- dev_priv->cdclk_freq);
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz, VCO: %d kHz, ref: %d kHz\n",
+ dev_priv->cdclk_freq, dev_priv->cdclk_pll.vco,
+ dev_priv->cdclk_pll.ref);
+ else
+ DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
+ dev_priv->cdclk_freq);
/*
- * Program the gmbus_freq based on the cdclk frequency.
- * BSpec erroneously claims we should aim for 4MHz, but
- * in fact 1MHz is the correct frequency.
+ * 9:0 CMBUS [sic] CDCLK frequency (cdfreq):
+ * Programmng [sic] note: bit[9:2] should be programmed to the number
+ * of cdclk that generates 4MHz reference clock freq which is used to
+ * generate GMBus clock. This will vary with the cdclk freq.
*/
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
- /*
- * Program the gmbus_freq based on the cdclk frequency.
- * BSpec erroneously claims we should aim for 4MHz, but
- * in fact 1MHz is the correct frequency.
- */
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
- }
+}
- if (dev_priv->max_cdclk_freq == 0)
- intel_update_max_cdclk(dev);
+/* convert from kHz to .1 fixpoint MHz with -1MHz offset */
+static int skl_cdclk_decimal(int cdclk)
+{
+ return DIV_ROUND_CLOSEST(cdclk - 1000, 500);
}
-static void broxton_set_cdclk(struct drm_i915_private *dev_priv, int frequency)
+static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
{
- uint32_t divider;
- uint32_t ratio;
- uint32_t current_freq;
- int ret;
+ int ratio;
+
+ if (cdclk == dev_priv->cdclk_pll.ref)
+ return 0;
- /* frequency = 19.2MHz * ratio / 2 / div{1,1.5,2,4} */
- switch (frequency) {
+ switch (cdclk) {
+ default:
+ MISSING_CASE(cdclk);
case 144000:
+ case 288000:
+ case 384000:
+ case 576000:
+ ratio = 60;
+ break;
+ case 624000:
+ ratio = 65;
+ break;
+ }
+
+ return dev_priv->cdclk_pll.ref * ratio;
+}
+
+static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE(BXT_DE_PLL_ENABLE, 0);
+
+ /* Timeout 200us */
+ if (intel_wait_for_register(dev_priv,
+ BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 0,
+ 1))
+ DRM_ERROR("timeout waiting for DE PLL unlock\n");
+
+ dev_priv->cdclk_pll.vco = 0;
+}
+
+static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
+{
+ int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk_pll.ref);
+ u32 val;
+
+ val = I915_READ(BXT_DE_PLL_CTL);
+ val &= ~BXT_DE_PLL_RATIO_MASK;
+ val |= BXT_DE_PLL_RATIO(ratio);
+ I915_WRITE(BXT_DE_PLL_CTL, val);
+
+ I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
+
+ /* Timeout 200us */
+ if (intel_wait_for_register(dev_priv,
+ BXT_DE_PLL_ENABLE,
+ BXT_DE_PLL_LOCK,
+ BXT_DE_PLL_LOCK,
+ 1))
+ DRM_ERROR("timeout waiting for DE PLL lock\n");
+
+ dev_priv->cdclk_pll.vco = vco;
+}
+
+static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
+{
+ u32 val, divider;
+ int vco, ret;
+
+ vco = bxt_de_pll_vco(dev_priv, cdclk);
+
+ DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
+
+ /* cdclk = vco / 2 / div{1,1.5,2,4} */
+ switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
+ case 8:
divider = BXT_CDCLK_CD2X_DIV_SEL_4;
- ratio = BXT_DE_PLL_RATIO(60);
break;
- case 288000:
+ case 4:
divider = BXT_CDCLK_CD2X_DIV_SEL_2;
- ratio = BXT_DE_PLL_RATIO(60);
break;
- case 384000:
+ case 3:
divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
- ratio = BXT_DE_PLL_RATIO(60);
break;
- case 576000:
- divider = BXT_CDCLK_CD2X_DIV_SEL_1;
- ratio = BXT_DE_PLL_RATIO(60);
- break;
- case 624000:
+ case 2:
divider = BXT_CDCLK_CD2X_DIV_SEL_1;
- ratio = BXT_DE_PLL_RATIO(65);
- break;
- case 19200:
- /*
- * Bypass frequency with DE PLL disabled. Init ratio, divider
- * to suppress GCC warning.
- */
- ratio = 0;
- divider = 0;
break;
default:
- DRM_ERROR("unsupported CDCLK freq %d", frequency);
+ WARN_ON(cdclk != dev_priv->cdclk_pll.ref);
+ WARN_ON(vco != 0);
- return;
+ divider = BXT_CDCLK_CD2X_DIV_SEL_1;
+ break;
}
- mutex_lock(&dev_priv->rps.hw_lock);
/* Inform power controller of upcoming frequency change */
+ mutex_lock(&dev_priv->rps.hw_lock);
ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
0x80000000);
mutex_unlock(&dev_priv->rps.hw_lock);
if (ret) {
DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
- ret, frequency);
+ ret, cdclk);
return;
}
- current_freq = I915_READ(CDCLK_CTL) & CDCLK_FREQ_DECIMAL_MASK;
- /* convert from .1 fixpoint MHz with -1MHz offset to kHz */
- current_freq = current_freq * 500 + 1000;
+ if (dev_priv->cdclk_pll.vco != 0 &&
+ dev_priv->cdclk_pll.vco != vco)
+ bxt_de_pll_disable(dev_priv);
- /*
- * DE PLL has to be disabled when
- * - setting to 19.2MHz (bypass, PLL isn't used)
- * - before setting to 624MHz (PLL needs toggling)
- * - before setting to any frequency from 624MHz (PLL needs toggling)
- */
- if (frequency == 19200 || frequency == 624000 ||
- current_freq == 624000) {
- I915_WRITE(BXT_DE_PLL_ENABLE, ~BXT_DE_PLL_PLL_ENABLE);
- /* Timeout 200us */
- if (wait_for(!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK),
- 1))
- DRM_ERROR("timout waiting for DE PLL unlock\n");
- }
-
- if (frequency != 19200) {
- uint32_t val;
-
- val = I915_READ(BXT_DE_PLL_CTL);
- val &= ~BXT_DE_PLL_RATIO_MASK;
- val |= ratio;
- I915_WRITE(BXT_DE_PLL_CTL, val);
-
- I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
- /* Timeout 200us */
- if (wait_for(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK, 1))
- DRM_ERROR("timeout waiting for DE PLL lock\n");
-
- val = I915_READ(CDCLK_CTL);
- val &= ~BXT_CDCLK_CD2X_DIV_SEL_MASK;
- val |= divider;
- /*
- * Disable SSA Precharge when CD clock frequency < 500 MHz,
- * enable otherwise.
- */
- val &= ~BXT_CDCLK_SSA_PRECHARGE_ENABLE;
- if (frequency >= 500000)
- val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
+ if (dev_priv->cdclk_pll.vco != vco)
+ bxt_de_pll_enable(dev_priv, vco);
- val &= ~CDCLK_FREQ_DECIMAL_MASK;
- /* convert from kHz to .1 fixpoint MHz with -1MHz offset */
- val |= (frequency - 1000) / 500;
- I915_WRITE(CDCLK_CTL, val);
- }
+ val = divider | skl_cdclk_decimal(cdclk);
+ /*
+ * FIXME if only the cd2x divider needs changing, it could be done
+ * without shutting off the pipe (if only one pipe is active).
+ */
+ val |= BXT_CDCLK_CD2X_PIPE_NONE;
+ /*
+ * Disable SSA Precharge when CD clock frequency < 500 MHz,
+ * enable otherwise.
+ */
+ if (cdclk >= 500000)
+ val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
+ I915_WRITE(CDCLK_CTL, val);
mutex_lock(&dev_priv->rps.hw_lock);
ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
- DIV_ROUND_UP(frequency, 25000));
+ DIV_ROUND_UP(cdclk, 25000));
mutex_unlock(&dev_priv->rps.hw_lock);
if (ret) {
DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
- ret, frequency);
+ ret, cdclk);
return;
}
- intel_update_cdclk(dev_priv->dev);
+ intel_update_cdclk(&dev_priv->drm);
}
-static bool broxton_cdclk_is_enabled(struct drm_i915_private *dev_priv)
+static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
{
- if (!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE))
- return false;
+ u32 cdctl, expected;
- /* TODO: Check for a valid CDCLK rate */
+ intel_update_cdclk(&dev_priv->drm);
- if (!(I915_READ(DBUF_CTL) & DBUF_POWER_REQUEST)) {
- DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power not requested\n");
+ if (dev_priv->cdclk_pll.vco == 0 ||
+ dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
+ goto sanitize;
- return false;
- }
+ /* DPLL okay; verify the cdclock
+ *
+ * Some BIOS versions leave an incorrect decimal frequency value and
+ * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4,
+ * so sanitize this register.
+ */
+ cdctl = I915_READ(CDCLK_CTL);
+ /*
+ * Let's ignore the pipe field, since BIOS could have configured the
+ * dividers both synching to an active pipe, or asynchronously
+ * (PIPE_NONE).
+ */
+ cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE;
- if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) {
- DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power hasn't settled\n");
+ expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) |
+ skl_cdclk_decimal(dev_priv->cdclk_freq);
+ /*
+ * Disable SSA Precharge when CD clock frequency < 500 MHz,
+ * enable otherwise.
+ */
+ if (dev_priv->cdclk_freq >= 500000)
+ expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
- return false;
- }
+ if (cdctl == expected)
+ /* All well; nothing to sanitize */
+ return;
- return true;
-}
+sanitize:
+ DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
-bool broxton_cdclk_verify_state(struct drm_i915_private *dev_priv)
-{
- return broxton_cdclk_is_enabled(dev_priv);
+ /* force cdclk programming */
+ dev_priv->cdclk_freq = 0;
+
+ /* force full PLL disable + enable */
+ dev_priv->cdclk_pll.vco = -1;
}
-void broxton_init_cdclk(struct drm_i915_private *dev_priv)
+void bxt_init_cdclk(struct drm_i915_private *dev_priv)
{
- /* check if cd clock is enabled */
- if (broxton_cdclk_is_enabled(dev_priv)) {
- DRM_DEBUG_KMS("CDCLK already enabled, won't reprogram it\n");
- return;
- }
+ bxt_sanitize_cdclk(dev_priv);
- DRM_DEBUG_KMS("CDCLK not enabled, enabling it\n");
+ if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0)
+ return;
/*
* FIXME:
* - The initial CDCLK needs to be read from VBT.
* Need to make this change after VBT has changes for BXT.
- * - check if setting the max (or any) cdclk freq is really necessary
- * here, it belongs to modeset time
*/
- broxton_set_cdclk(dev_priv, 624000);
-
- I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
- POSTING_READ(DBUF_CTL);
+ bxt_set_cdclk(dev_priv, bxt_calc_cdclk(0));
+}
- udelay(10);
+void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
+{
+ bxt_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref);
+}
- if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
- DRM_ERROR("DBuf power enable timeout!\n");
+static int skl_calc_cdclk(int max_pixclk, int vco)
+{
+ if (vco == 8640000) {
+ if (max_pixclk > 540000)
+ return 617143;
+ else if (max_pixclk > 432000)
+ return 540000;
+ else if (max_pixclk > 308571)
+ return 432000;
+ else
+ return 308571;
+ } else {
+ if (max_pixclk > 540000)
+ return 675000;
+ else if (max_pixclk > 450000)
+ return 540000;
+ else if (max_pixclk > 337500)
+ return 450000;
+ else
+ return 337500;
+ }
}
-void broxton_uninit_cdclk(struct drm_i915_private *dev_priv)
+static void
+skl_dpll0_update(struct drm_i915_private *dev_priv)
{
- I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
- POSTING_READ(DBUF_CTL);
+ u32 val;
- udelay(10);
+ dev_priv->cdclk_pll.ref = 24000;
+ dev_priv->cdclk_pll.vco = 0;
- if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
- DRM_ERROR("DBuf power disable timeout!\n");
+ val = I915_READ(LCPLL1_CTL);
+ if ((val & LCPLL_PLL_ENABLE) == 0)
+ return;
- /* Set minimum (bypass) frequency, in effect turning off the DE PLL */
- broxton_set_cdclk(dev_priv, 19200);
-}
+ if (WARN_ON((val & LCPLL_PLL_LOCK) == 0))
+ return;
-static const struct skl_cdclk_entry {
- unsigned int freq;
- unsigned int vco;
-} skl_cdclk_frequencies[] = {
- { .freq = 308570, .vco = 8640 },
- { .freq = 337500, .vco = 8100 },
- { .freq = 432000, .vco = 8640 },
- { .freq = 450000, .vco = 8100 },
- { .freq = 540000, .vco = 8100 },
- { .freq = 617140, .vco = 8640 },
- { .freq = 675000, .vco = 8100 },
-};
+ val = I915_READ(DPLL_CTRL1);
-static unsigned int skl_cdclk_decimal(unsigned int freq)
-{
- return (freq - 1000) / 500;
+ if (WARN_ON((val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) |
+ DPLL_CTRL1_SSC(SKL_DPLL0) |
+ DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) !=
+ DPLL_CTRL1_OVERRIDE(SKL_DPLL0)))
+ return;
+
+ switch (val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) {
+ case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, SKL_DPLL0):
+ case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, SKL_DPLL0):
+ case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, SKL_DPLL0):
+ case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, SKL_DPLL0):
+ dev_priv->cdclk_pll.vco = 8100000;
+ break;
+ case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0):
+ case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, SKL_DPLL0):
+ dev_priv->cdclk_pll.vco = 8640000;
+ break;
+ default:
+ MISSING_CASE(val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
+ break;
+ }
}
-static unsigned int skl_cdclk_get_vco(unsigned int freq)
+void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco)
{
- unsigned int i;
+ bool changed = dev_priv->skl_preferred_vco_freq != vco;
- for (i = 0; i < ARRAY_SIZE(skl_cdclk_frequencies); i++) {
- const struct skl_cdclk_entry *e = &skl_cdclk_frequencies[i];
-
- if (e->freq == freq)
- return e->vco;
- }
+ dev_priv->skl_preferred_vco_freq = vco;
- return 8100;
+ if (changed)
+ intel_update_max_cdclk(&dev_priv->drm);
}
static void
-skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco)
+skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
{
- unsigned int min_freq;
+ int min_cdclk = skl_calc_cdclk(0, vco);
u32 val;
- /* select the minimum CDCLK before enabling DPLL 0 */
- val = I915_READ(CDCLK_CTL);
- val &= ~CDCLK_FREQ_SEL_MASK | ~CDCLK_FREQ_DECIMAL_MASK;
- val |= CDCLK_FREQ_337_308;
-
- if (required_vco == 8640)
- min_freq = 308570;
- else
- min_freq = 337500;
-
- val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_freq);
+ WARN_ON(vco != 8100000 && vco != 8640000);
+ /* select the minimum CDCLK before enabling DPLL 0 */
+ val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk);
I915_WRITE(CDCLK_CTL, val);
POSTING_READ(CDCLK_CTL);
@@ -5592,14 +5708,14 @@ skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco)
* 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
* The modeset code is responsible for the selection of the exact link
* rate later on, with the constraint of choosing a frequency that
- * works with required_vco.
+ * works with vco.
*/
val = I915_READ(DPLL_CTRL1);
val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
- if (required_vco == 8640)
+ if (vco == 8640000)
val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
SKL_DPLL0);
else
@@ -5611,8 +5727,27 @@ skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco)
I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
- if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5))
+ if (intel_wait_for_register(dev_priv,
+ LCPLL1_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
+ 5))
DRM_ERROR("DPLL0 not locked\n");
+
+ dev_priv->cdclk_pll.vco = vco;
+
+ /* We'll want to keep using the current vco from now on. */
+ skl_set_preferred_cdclk_vco(dev_priv, vco);
+}
+
+static void
+skl_dpll0_disable(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
+ if (intel_wait_for_register(dev_priv,
+ LCPLL1_CTL, LCPLL_PLL_LOCK, 0,
+ 1))
+ DRM_ERROR("Couldn't disable DPLL0\n");
+
+ dev_priv->cdclk_pll.vco = 0;
}
static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
@@ -5631,23 +5766,17 @@ static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
{
- unsigned int i;
-
- for (i = 0; i < 15; i++) {
- if (skl_cdclk_pcu_ready(dev_priv))
- return true;
- udelay(10);
- }
-
- return false;
+ return _wait_for(skl_cdclk_pcu_ready(dev_priv), 3000, 10) == 0;
}
-static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
+static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
u32 freq_select, pcu_ack;
- DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq);
+ WARN_ON((cdclk == 24000) != (vco == 0));
+
+ DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
DRM_ERROR("failed to inform PCU about cdclk change\n");
@@ -5655,7 +5784,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
}
/* set CDCLK_CTL */
- switch(freq) {
+ switch (cdclk) {
case 450000:
case 432000:
freq_select = CDCLK_FREQ_450_432;
@@ -5665,20 +5794,27 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
freq_select = CDCLK_FREQ_540;
pcu_ack = 2;
break;
- case 308570:
+ case 308571:
case 337500:
default:
freq_select = CDCLK_FREQ_337_308;
pcu_ack = 0;
break;
- case 617140:
+ case 617143:
case 675000:
freq_select = CDCLK_FREQ_675_617;
pcu_ack = 3;
break;
}
- I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(freq));
+ if (dev_priv->cdclk_pll.vco != 0 &&
+ dev_priv->cdclk_pll.vco != vco)
+ skl_dpll0_disable(dev_priv);
+
+ if (dev_priv->cdclk_pll.vco != vco)
+ skl_dpll0_enable(dev_priv, vco);
+
+ I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk));
POSTING_READ(CDCLK_CTL);
/* inform PCU of the change */
@@ -5689,52 +5825,41 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
intel_update_cdclk(dev);
}
+static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv);
+
void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
{
- /* disable DBUF power */
- I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
- POSTING_READ(DBUF_CTL);
-
- udelay(10);
-
- if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
- DRM_ERROR("DBuf power disable timeout\n");
-
- /* disable DPLL0 */
- I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
- if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
- DRM_ERROR("Couldn't disable DPLL0\n");
+ skl_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref, 0);
}
void skl_init_cdclk(struct drm_i915_private *dev_priv)
{
- unsigned int required_vco;
-
- /* DPLL0 not enabled (happens on early BIOS versions) */
- if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) {
- /* enable DPLL0 */
- required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
- skl_dpll0_enable(dev_priv, required_vco);
- }
+ int cdclk, vco;
- /* set CDCLK to the frequency the BIOS chose */
- skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk);
+ skl_sanitize_cdclk(dev_priv);
- /* enable DBUF power */
- I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
- POSTING_READ(DBUF_CTL);
+ if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0) {
+ /*
+ * Use the current vco as our initial
+ * guess as to what the preferred vco is.
+ */
+ if (dev_priv->skl_preferred_vco_freq == 0)
+ skl_set_preferred_cdclk_vco(dev_priv,
+ dev_priv->cdclk_pll.vco);
+ return;
+ }
- udelay(10);
+ vco = dev_priv->skl_preferred_vco_freq;
+ if (vco == 0)
+ vco = 8100000;
+ cdclk = skl_calc_cdclk(0, vco);
- if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
- DRM_ERROR("DBuf power enable timeout\n");
+ skl_set_cdclk(dev_priv, cdclk, vco);
}
-int skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
+static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
{
- uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
- uint32_t cdctl = I915_READ(CDCLK_CTL);
- int freq = dev_priv->skl_boot_cdclk;
+ uint32_t cdctl, expected;
/*
* check if the pre-os intialized the display
@@ -5744,8 +5869,10 @@ int skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
goto sanitize;
+ intel_update_cdclk(&dev_priv->drm);
/* Is PLL enabled and locked ? */
- if (!((lcpll1 & LCPLL_PLL_ENABLE) && (lcpll1 & LCPLL_PLL_LOCK)))
+ if (dev_priv->cdclk_pll.vco == 0 ||
+ dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
goto sanitize;
/* DPLL okay; verify the cdclock
@@ -5754,25 +5881,26 @@ int skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
* decimal part is programmed wrong from BIOS where pre-os does not
* enable display. Verify the same as well.
*/
- if (cdctl == ((cdctl & CDCLK_FREQ_SEL_MASK) | skl_cdclk_decimal(freq)))
+ cdctl = I915_READ(CDCLK_CTL);
+ expected = (cdctl & CDCLK_FREQ_SEL_MASK) |
+ skl_cdclk_decimal(dev_priv->cdclk_freq);
+ if (cdctl == expected)
/* All well; nothing to sanitize */
- return false;
+ return;
+
sanitize:
- /*
- * As of now initialize with max cdclk till
- * we get dynamic cdclk support
- * */
- dev_priv->skl_boot_cdclk = dev_priv->max_cdclk_freq;
- skl_init_cdclk(dev_priv);
+ DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
- /* we did have to sanitize */
- return true;
+ /* force cdclk programming */
+ dev_priv->cdclk_freq = 0;
+ /* force full PLL disable + enable */
+ dev_priv->cdclk_pll.vco = -1;
}
/* Adjust CDclk dividers to allow high res or save power if possible */
static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 val, cmd;
WARN_ON(dev_priv->display.get_display_clock_speed(dev)
@@ -5837,7 +5965,7 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 val, cmd;
WARN_ON(dev_priv->display.get_display_clock_speed(dev)
@@ -5906,21 +6034,15 @@ static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
return 200000;
}
-static int broxton_calc_cdclk(struct drm_i915_private *dev_priv,
- int max_pixclk)
+static int bxt_calc_cdclk(int max_pixclk)
{
- /*
- * FIXME:
- * - remove the guardband, it's not needed on BXT
- * - set 19.2MHz bypass frequency if there are no active pipes
- */
- if (max_pixclk > 576000*9/10)
+ if (max_pixclk > 576000)
return 624000;
- else if (max_pixclk > 384000*9/10)
+ else if (max_pixclk > 384000)
return 576000;
- else if (max_pixclk > 288000*9/10)
+ else if (max_pixclk > 288000)
return 384000;
- else if (max_pixclk > 144000*9/10)
+ else if (max_pixclk > 144000)
return 288000;
else
return 144000;
@@ -5931,7 +6053,7 @@ static int intel_mode_max_pixclk(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
unsigned max_pixclk = 0, i;
@@ -5958,14 +6080,11 @@ static int intel_mode_max_pixclk(struct drm_device *dev,
static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int max_pixclk = intel_mode_max_pixclk(dev, state);
struct intel_atomic_state *intel_state =
to_intel_atomic_state(state);
- if (max_pixclk < 0)
- return max_pixclk;
-
intel_state->cdclk = intel_state->dev_cdclk =
valleyview_calc_cdclk(dev_priv, max_pixclk);
@@ -5975,22 +6094,17 @@ static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
return 0;
}
-static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state)
+static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state)
{
- struct drm_device *dev = state->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int max_pixclk = intel_mode_max_pixclk(dev, state);
+ int max_pixclk = ilk_max_pixel_rate(state);
struct intel_atomic_state *intel_state =
to_intel_atomic_state(state);
- if (max_pixclk < 0)
- return max_pixclk;
-
intel_state->cdclk = intel_state->dev_cdclk =
- broxton_calc_cdclk(dev_priv, max_pixclk);
+ bxt_calc_cdclk(max_pixclk);
if (!intel_state->active_crtcs)
- intel_state->dev_cdclk = broxton_calc_cdclk(dev_priv, 0);
+ intel_state->dev_cdclk = bxt_calc_cdclk(0);
return 0;
}
@@ -6034,7 +6148,7 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_atomic_state *old_intel_state =
to_intel_atomic_state(old_state);
unsigned req_cdclk = old_intel_state->dev_cdclk;
@@ -6073,14 +6187,14 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
if (WARN_ON(intel_crtc->active))
return;
- if (intel_crtc->config->has_dp_encoder)
+ if (intel_crtc_has_dp_encoder(intel_crtc->config))
intel_dp_set_m_n(intel_crtc, M1_N1);
intel_set_pipe_timings(intel_crtc);
intel_set_pipe_src_size(intel_crtc);
if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
I915_WRITE(CHV_CANVAS(pipe), 0);
@@ -6125,7 +6239,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
@@ -6146,7 +6260,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
i9xx_set_pll_dividers(intel_crtc);
- if (intel_crtc->config->has_dp_encoder)
+ if (intel_crtc_has_dp_encoder(intel_crtc->config))
intel_dp_set_m_n(intel_crtc, M1_N1);
intel_set_pipe_timings(intel_crtc);
@@ -6182,7 +6296,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
static void i9xx_pfit_disable(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (!crtc->config->gmch_pfit.control)
return;
@@ -6197,7 +6311,7 @@ static void i9xx_pfit_disable(struct intel_crtc *crtc)
static void i9xx_crtc_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
@@ -6223,7 +6337,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
if (encoder->post_disable)
encoder->post_disable(encoder);
- if (!intel_crtc->config->has_dsi_encoder) {
+ if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) {
if (IS_CHERRYVIEW(dev))
chv_disable_pll(dev_priv, pipe);
else if (IS_VALLEYVIEW(dev))
@@ -6252,7 +6366,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
return;
if (to_intel_plane_state(crtc->primary->state)->visible) {
- WARN_ON(intel_crtc->unpin_work);
+ WARN_ON(intel_crtc->flip_work);
intel_pre_disable_primary_noatomic(crtc);
@@ -6262,8 +6376,8 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
dev_priv->display.crtc_disable(crtc);
- DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was enabled, now disabled\n",
- crtc->base.id);
+ DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
+ crtc->base.id, crtc->name);
WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
crtc->state->active = false;
@@ -6541,7 +6655,7 @@ static void hsw_compute_ips_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
pipe_config->ips_enabled = i915.enable_ips &&
hsw_crtc_supports_ips(crtc) &&
@@ -6561,12 +6675,12 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ int clock_limit = dev_priv->max_dotclk_freq;
- /* FIXME should check pixel clock limits on all platforms */
if (INTEL_INFO(dev)->gen < 4) {
- int clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
+ clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
/*
* Enable double wide mode when the dot clock
@@ -6574,16 +6688,16 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
*/
if (intel_crtc_supports_double_wide(crtc) &&
adjusted_mode->crtc_clock > clock_limit) {
- clock_limit *= 2;
+ clock_limit = dev_priv->max_dotclk_freq;
pipe_config->double_wide = true;
}
+ }
- if (adjusted_mode->crtc_clock > clock_limit) {
- DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
- adjusted_mode->crtc_clock, clock_limit,
- yesno(pipe_config->double_wide));
- return -EINVAL;
- }
+ if (adjusted_mode->crtc_clock > clock_limit) {
+ DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
+ adjusted_mode->crtc_clock, clock_limit,
+ yesno(pipe_config->double_wide));
+ return -EINVAL;
}
/*
@@ -6592,7 +6706,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
* - LVDS dual channel mode
* - Double wide pipe
*/
- if ((intel_pipe_will_have_type(pipe_config, INTEL_OUTPUT_LVDS) &&
+ if ((intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
pipe_config->pipe_src_w &= ~1;
@@ -6615,81 +6729,103 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
static int skylake_get_display_clock_speed(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
- uint32_t cdctl = I915_READ(CDCLK_CTL);
- uint32_t linkrate;
+ uint32_t cdctl;
- if (!(lcpll1 & LCPLL_PLL_ENABLE))
- return 24000; /* 24MHz is the cd freq with NSSC ref */
+ skl_dpll0_update(dev_priv);
- if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540)
- return 540000;
+ if (dev_priv->cdclk_pll.vco == 0)
+ return dev_priv->cdclk_pll.ref;
- linkrate = (I915_READ(DPLL_CTRL1) &
- DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1;
+ cdctl = I915_READ(CDCLK_CTL);
- if (linkrate == DPLL_CTRL1_LINK_RATE_2160 ||
- linkrate == DPLL_CTRL1_LINK_RATE_1080) {
- /* vco 8640 */
+ if (dev_priv->cdclk_pll.vco == 8640000) {
switch (cdctl & CDCLK_FREQ_SEL_MASK) {
case CDCLK_FREQ_450_432:
return 432000;
case CDCLK_FREQ_337_308:
- return 308570;
+ return 308571;
+ case CDCLK_FREQ_540:
+ return 540000;
case CDCLK_FREQ_675_617:
- return 617140;
+ return 617143;
default:
- WARN(1, "Unknown cd freq selection\n");
+ MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
}
} else {
- /* vco 8100 */
switch (cdctl & CDCLK_FREQ_SEL_MASK) {
case CDCLK_FREQ_450_432:
return 450000;
case CDCLK_FREQ_337_308:
return 337500;
+ case CDCLK_FREQ_540:
+ return 540000;
case CDCLK_FREQ_675_617:
return 675000;
default:
- WARN(1, "Unknown cd freq selection\n");
+ MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
}
}
- /* error case, do as if DPLL0 isn't enabled */
- return 24000;
+ return dev_priv->cdclk_pll.ref;
+}
+
+static void bxt_de_pll_update(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ dev_priv->cdclk_pll.ref = 19200;
+ dev_priv->cdclk_pll.vco = 0;
+
+ val = I915_READ(BXT_DE_PLL_ENABLE);
+ if ((val & BXT_DE_PLL_PLL_ENABLE) == 0)
+ return;
+
+ if (WARN_ON((val & BXT_DE_PLL_LOCK) == 0))
+ return;
+
+ val = I915_READ(BXT_DE_PLL_CTL);
+ dev_priv->cdclk_pll.vco = (val & BXT_DE_PLL_RATIO_MASK) *
+ dev_priv->cdclk_pll.ref;
}
static int broxton_get_display_clock_speed(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- uint32_t cdctl = I915_READ(CDCLK_CTL);
- uint32_t pll_ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
- uint32_t pll_enab = I915_READ(BXT_DE_PLL_ENABLE);
- int cdclk;
+ u32 divider;
+ int div, vco;
- if (!(pll_enab & BXT_DE_PLL_PLL_ENABLE))
- return 19200;
+ bxt_de_pll_update(dev_priv);
+
+ vco = dev_priv->cdclk_pll.vco;
+ if (vco == 0)
+ return dev_priv->cdclk_pll.ref;
- cdclk = 19200 * pll_ratio / 2;
+ divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
- switch (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) {
+ switch (divider) {
case BXT_CDCLK_CD2X_DIV_SEL_1:
- return cdclk; /* 576MHz or 624MHz */
+ div = 2;
+ break;
case BXT_CDCLK_CD2X_DIV_SEL_1_5:
- return cdclk * 2 / 3; /* 384MHz */
+ div = 3;
+ break;
case BXT_CDCLK_CD2X_DIV_SEL_2:
- return cdclk / 2; /* 288MHz */
+ div = 4;
+ break;
case BXT_CDCLK_CD2X_DIV_SEL_4:
- return cdclk / 4; /* 144MHz */
+ div = 8;
+ break;
+ default:
+ MISSING_CASE(divider);
+ return dev_priv->cdclk_pll.ref;
}
- /* error case, do as if DE PLL isn't enabled */
- return 19200;
+ return DIV_ROUND_CLOSEST(vco, div);
}
static int broadwell_get_display_clock_speed(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t lcpll = I915_READ(LCPLL_CTL);
uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
@@ -6709,7 +6845,7 @@ static int broadwell_get_display_clock_speed(struct drm_device *dev)
static int haswell_get_display_clock_speed(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t lcpll = I915_READ(LCPLL_CTL);
uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
@@ -6843,7 +6979,7 @@ static int i830_get_display_clock_speed(struct drm_device *dev)
static unsigned int intel_hpll_vco(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
static const unsigned int blb_vco[8] = {
[0] = 3200000,
[1] = 4000000,
@@ -7063,7 +7199,7 @@ static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
- intel_clock_t *reduced_clock)
+ struct dpll *reduced_clock)
{
struct drm_device *dev = crtc->base.dev;
u32 fp, fp2 = 0;
@@ -7081,7 +7217,7 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
crtc_state->dpll_hw_state.fp0 = fp;
crtc->lowfreq_avail = false;
- if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
reduced_clock) {
crtc_state->dpll_hw_state.fp1 = fp2;
crtc->lowfreq_avail = true;
@@ -7123,7 +7259,7 @@ static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
struct intel_link_m_n *m_n)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int pipe = crtc->pipe;
I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
@@ -7137,7 +7273,7 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
struct intel_link_m_n *m2_n2)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int pipe = crtc->pipe;
enum transcoder transcoder = crtc->config->cpu_transcoder;
@@ -7200,7 +7336,7 @@ static void vlv_compute_dpll(struct intel_crtc *crtc,
pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
/* DPLL not used with DSI, but still need the rest set up */
- if (!pipe_config->has_dsi_encoder)
+ if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
DPLL_EXT_BUFFER_ENABLE_VLV;
@@ -7217,7 +7353,7 @@ static void chv_compute_dpll(struct intel_crtc *crtc,
pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
/* DPLL not used with DSI, but still need the rest set up */
- if (!pipe_config->has_dsi_encoder)
+ if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
pipe_config->dpll_hw_state.dpll_md =
@@ -7228,7 +7364,7 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe = crtc->pipe;
u32 mdiv;
u32 bestn, bestm1, bestm2, bestp1, bestp2;
@@ -7287,15 +7423,15 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
/* Set HBR and RBR LPF coefficients */
if (pipe_config->port_clock == 162000 ||
- intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) ||
- intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
+ intel_crtc_has_type(crtc->config, INTEL_OUTPUT_ANALOG) ||
+ intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI))
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
0x009f0003);
else
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
0x00d0000f);
- if (pipe_config->has_dp_encoder) {
+ if (intel_crtc_has_dp_encoder(pipe_config)) {
/* Use SSC source */
if (pipe == PIPE_A)
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
@@ -7315,8 +7451,7 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
- intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
+ if (intel_crtc_has_dp_encoder(crtc->config))
coreclk |= 0x01000000;
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
@@ -7328,7 +7463,7 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe = crtc->pipe;
enum dpio_channel port = vlv_pipe_to_channel(pipe);
u32 loopfilter, tribuf_calcntr;
@@ -7487,22 +7622,18 @@ void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
static void i9xx_compute_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
- intel_clock_t *reduced_clock)
+ struct dpll *reduced_clock)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 dpll;
- bool is_sdvo;
struct dpll *clock = &crtc_state->dpll;
i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
- is_sdvo = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO) ||
- intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI);
-
dpll = DPLL_VGA_MODE_DIS;
- if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
dpll |= DPLLB_MODE_LVDS;
else
dpll |= DPLLB_MODE_DAC_SERIAL;
@@ -7512,10 +7643,11 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
<< SDVO_MULTIPLIER_SHIFT_HIRES;
}
- if (is_sdvo)
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
dpll |= DPLL_SDVO_HIGH_SPEED;
- if (crtc_state->has_dp_encoder)
+ if (intel_crtc_has_dp_encoder(crtc_state))
dpll |= DPLL_SDVO_HIGH_SPEED;
/* compute bitmask from p1 value */
@@ -7545,7 +7677,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
if (crtc_state->sdvo_tv_clock)
dpll |= PLL_REF_INPUT_TVCLKINBC;
- else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv))
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
@@ -7563,10 +7695,10 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
static void i8xx_compute_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
- intel_clock_t *reduced_clock)
+ struct dpll *reduced_clock)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 dpll;
struct dpll *clock = &crtc_state->dpll;
@@ -7574,7 +7706,7 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
dpll = DPLL_VGA_MODE_DIS;
- if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
} else {
if (clock->p1 == 2)
@@ -7585,10 +7717,10 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
dpll |= PLL_P2_DIVIDE_BY_4;
}
- if (!IS_I830(dev) && intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
+ if (!IS_I830(dev) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
dpll |= DPLL_DVO_2X_MODE;
- if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv))
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
@@ -7601,7 +7733,7 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
{
struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe = intel_crtc->pipe;
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
@@ -7618,7 +7750,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
crtc_vtotal -= 1;
crtc_vblank_end -= 1;
- if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
+ if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
else
vsyncshift = adjusted_mode->crtc_hsync_start -
@@ -7663,7 +7795,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc)
{
struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe = intel_crtc->pipe;
/* pipesrc controls the size that is scaled from, which should
@@ -7678,7 +7810,7 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
uint32_t tmp;
@@ -7713,7 +7845,7 @@ static void intel_get_pipe_src_size(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 tmp;
tmp = I915_READ(PIPESRC(crtc->pipe));
@@ -7751,7 +7883,7 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
{
struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t pipeconf;
pipeconf = 0;
@@ -7797,7 +7929,7 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
if (INTEL_INFO(dev)->gen < 4 ||
- intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
+ intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
else
pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
@@ -7816,21 +7948,21 @@ static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- const intel_limit_t *limit;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ const struct intel_limit *limit;
int refclk = 48000;
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
- if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
refclk = dev_priv->vbt.lvds_ssc_freq;
DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
}
limit = &intel_limits_i8xx_lvds;
- } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO)) {
+ } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
limit = &intel_limits_i8xx_dvo;
} else {
limit = &intel_limits_i8xx_dac;
@@ -7852,14 +7984,14 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- const intel_limit_t *limit;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ const struct intel_limit *limit;
int refclk = 96000;
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
- if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
refclk = dev_priv->vbt.lvds_ssc_freq;
DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
@@ -7869,10 +8001,10 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
limit = &intel_limits_g4x_dual_channel_lvds;
else
limit = &intel_limits_g4x_single_channel_lvds;
- } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) ||
- intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
+ } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
limit = &intel_limits_g4x_hdmi;
- } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) {
+ } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
limit = &intel_limits_g4x_sdvo;
} else {
/* The option is for other outputs */
@@ -7895,14 +8027,14 @@ static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- const intel_limit_t *limit;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ const struct intel_limit *limit;
int refclk = 96000;
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
- if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
refclk = dev_priv->vbt.lvds_ssc_freq;
DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
@@ -7929,14 +8061,14 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- const intel_limit_t *limit;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ const struct intel_limit *limit;
int refclk = 96000;
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
- if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
refclk = dev_priv->vbt.lvds_ssc_freq;
DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
@@ -7963,7 +8095,7 @@ static int chv_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
int refclk = 100000;
- const intel_limit_t *limit = &intel_limits_chv;
+ const struct intel_limit *limit = &intel_limits_chv;
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
@@ -7984,7 +8116,7 @@ static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
int refclk = 100000;
- const intel_limit_t *limit = &intel_limits_vlv;
+ const struct intel_limit *limit = &intel_limits_vlv;
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
@@ -8005,7 +8137,7 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t tmp;
if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
@@ -8032,9 +8164,9 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int pipe = pipe_config->cpu_transcoder;
- intel_clock_t clock;
+ struct dpll clock;
u32 mdiv;
int refclk = 100000;
@@ -8060,7 +8192,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
struct intel_initial_plane_config *plane_config)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 val, base, offset;
int pipe = crtc->pipe, plane = crtc->plane;
int fourcc, pixel_format;
@@ -8128,10 +8260,10 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int pipe = pipe_config->cpu_transcoder;
enum dpio_channel port = vlv_pipe_to_channel(pipe);
- intel_clock_t clock;
+ struct dpll clock;
u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
int refclk = 100000;
@@ -8162,7 +8294,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
uint32_t tmp;
bool ret;
@@ -8273,7 +8405,7 @@ out:
static void ironlake_init_pch_refclk(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *encoder;
int i;
u32 val, final;
@@ -8544,7 +8676,7 @@ static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
bool with_fdi)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t reg, tmp;
if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
@@ -8583,7 +8715,7 @@ static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
/* Sequence to disable CLKOUT_DP */
static void lpt_disable_clkout_dp(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t reg, tmp;
mutex_lock(&dev_priv->sb_lock);
@@ -8704,7 +8836,7 @@ void intel_init_pch_refclk(struct drm_device *dev)
static void ironlake_set_pipeconf(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
uint32_t val;
@@ -8746,7 +8878,7 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc)
static void haswell_set_pipeconf(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
u32 val = 0;
@@ -8765,7 +8897,7 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc)
static void haswell_set_pipemisc(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
if (IS_BROADWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 9) {
@@ -8814,41 +8946,17 @@ static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state,
- intel_clock_t *reduced_clock)
+ struct dpll *reduced_clock)
{
struct drm_crtc *crtc = &intel_crtc->base;
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_atomic_state *state = crtc_state->base.state;
- struct drm_connector *connector;
- struct drm_connector_state *connector_state;
- struct intel_encoder *encoder;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 dpll, fp, fp2;
- int factor, i;
- bool is_lvds = false, is_sdvo = false;
-
- for_each_connector_in_state(state, connector, connector_state, i) {
- if (connector_state->crtc != crtc_state->base.crtc)
- continue;
-
- encoder = to_intel_encoder(connector_state->best_encoder);
-
- switch (encoder->type) {
- case INTEL_OUTPUT_LVDS:
- is_lvds = true;
- break;
- case INTEL_OUTPUT_SDVO:
- case INTEL_OUTPUT_HDMI:
- is_sdvo = true;
- break;
- default:
- break;
- }
- }
+ int factor;
/* Enable autotuning of the PLL clock (if permissible) */
factor = 21;
- if (is_lvds) {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if ((intel_panel_use_ssc(dev_priv) &&
dev_priv->vbt.lvds_ssc_freq == 100000) ||
(HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
@@ -8872,7 +8980,7 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
dpll = 0;
- if (is_lvds)
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
dpll |= DPLLB_MODE_LVDS;
else
dpll |= DPLLB_MODE_DAC_SERIAL;
@@ -8880,9 +8988,11 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
dpll |= (crtc_state->pixel_multiplier - 1)
<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
- if (is_sdvo)
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
dpll |= DPLL_SDVO_HIGH_SPEED;
- if (crtc_state->has_dp_encoder)
+
+ if (intel_crtc_has_dp_encoder(crtc_state))
dpll |= DPLL_SDVO_HIGH_SPEED;
/* compute bitmask from p1 value */
@@ -8905,7 +9015,8 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
break;
}
- if (is_lvds && intel_panel_use_ssc(dev_priv))
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+ intel_panel_use_ssc(dev_priv))
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
dpll |= PLL_REF_INPUT_DREFCLK;
@@ -8921,11 +9032,11 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- intel_clock_t reduced_clock;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct dpll reduced_clock;
bool has_reduced_clock = false;
struct intel_shared_dpll *pll;
- const intel_limit_t *limit;
+ const struct intel_limit *limit;
int refclk = 120000;
memset(&crtc_state->dpll_hw_state, 0,
@@ -8937,7 +9048,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
if (!crtc_state->has_pch_encoder)
return 0;
- if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
dev_priv->vbt.lvds_ssc_freq);
@@ -8976,7 +9087,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
return -EINVAL;
}
- if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
has_reduced_clock)
crtc->lowfreq_avail = true;
@@ -8987,7 +9098,7 @@ static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
struct intel_link_m_n *m_n)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe = crtc->pipe;
m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
@@ -9005,7 +9116,7 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
struct intel_link_m_n *m2_n2)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe = crtc->pipe;
if (INTEL_INFO(dev)->gen >= 5) {
@@ -9063,7 +9174,7 @@ static void skylake_get_pfit_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
uint32_t ps_ctrl = 0;
int id = -1;
@@ -9094,7 +9205,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
struct intel_initial_plane_config *plane_config)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 val, base, offset, stride_mult, tiling;
int pipe = crtc->pipe;
int fourcc, pixel_format;
@@ -9177,7 +9288,7 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t tmp;
tmp = I915_READ(PF_CTL(crtc->pipe));
@@ -9202,7 +9313,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
struct intel_initial_plane_config *plane_config)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 val, base, offset;
int pipe = crtc->pipe;
int fourcc, pixel_format;
@@ -9270,7 +9381,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
uint32_t tmp;
bool ret;
@@ -9320,6 +9431,10 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
ironlake_get_fdi_m_n_config(crtc, pipe_config);
if (HAS_PCH_IBX(dev_priv)) {
+ /*
+ * The pipe->pch transcoder and pch transcoder->pll
+ * mapping is fixed.
+ */
pll_id = (enum intel_dpll_id) crtc->pipe;
} else {
tmp = I915_READ(PCH_DPLL_SEL);
@@ -9361,7 +9476,7 @@ out:
static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct intel_crtc *crtc;
for_each_intel_crtc(dev, crtc)
@@ -9395,7 +9510,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
if (IS_HASWELL(dev))
return I915_READ(D_COMP_HSW);
@@ -9405,7 +9520,7 @@ static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
if (IS_HASWELL(dev)) {
mutex_lock(&dev_priv->rps.hw_lock);
@@ -9451,7 +9566,7 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
I915_WRITE(LCPLL_CTL, val);
POSTING_READ(LCPLL_CTL);
- if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
+ if (intel_wait_for_register(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1))
DRM_ERROR("LCPLL still locked\n");
val = hsw_read_dcomp(dev_priv);
@@ -9506,7 +9621,9 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
val &= ~LCPLL_PLL_DISABLE;
I915_WRITE(LCPLL_CTL, val);
- if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
+ if (intel_wait_for_register(dev_priv,
+ LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
+ 5))
DRM_ERROR("LCPLL not locked yet\n");
if (val & LCPLL_CD_SOURCE_FCLK) {
@@ -9520,7 +9637,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
}
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
- intel_update_cdclk(dev_priv->dev);
+ intel_update_cdclk(&dev_priv->drm);
}
/*
@@ -9548,7 +9665,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
*/
void hsw_enable_pc8(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
uint32_t val;
DRM_DEBUG_KMS("Enabling package C8+\n");
@@ -9565,7 +9682,7 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv)
void hsw_disable_pc8(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
uint32_t val;
DRM_DEBUG_KMS("Disabling package C8+\n");
@@ -9580,21 +9697,21 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
}
}
-static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state)
+static void bxt_modeset_commit_cdclk(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
struct intel_atomic_state *old_intel_state =
to_intel_atomic_state(old_state);
unsigned int req_cdclk = old_intel_state->dev_cdclk;
- broxton_set_cdclk(to_i915(dev), req_cdclk);
+ bxt_set_cdclk(to_i915(dev), req_cdclk);
}
/* compute the max rate for new configuration */
static int ilk_max_pixel_rate(struct drm_atomic_state *state)
{
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct drm_i915_private *dev_priv = state->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(state->dev);
struct drm_crtc *crtc;
struct drm_crtc_state *cstate;
struct intel_crtc_state *crtc_state;
@@ -9630,7 +9747,7 @@ static int ilk_max_pixel_rate(struct drm_atomic_state *state)
static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t val, data;
int ret;
@@ -9700,8 +9817,6 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
- I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
-
intel_update_cdclk(dev);
WARN(cdclk != dev_priv->cdclk_freq,
@@ -9709,6 +9824,18 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
cdclk, dev_priv->cdclk_freq);
}
+static int broadwell_calc_cdclk(int max_pixclk)
+{
+ if (max_pixclk > 540000)
+ return 675000;
+ else if (max_pixclk > 450000)
+ return 540000;
+ else if (max_pixclk > 337500)
+ return 450000;
+ else
+ return 337500;
+}
+
static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->dev);
@@ -9720,14 +9847,7 @@ static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
* FIXME should also account for plane ratio
* once 64bpp pixel formats are supported.
*/
- if (max_pixclk > 540000)
- cdclk = 675000;
- else if (max_pixclk > 450000)
- cdclk = 540000;
- else if (max_pixclk > 337500)
- cdclk = 450000;
- else
- cdclk = 337500;
+ cdclk = broadwell_calc_cdclk(max_pixclk);
if (cdclk > dev_priv->max_cdclk_freq) {
DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
@@ -9737,7 +9857,7 @@ static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
intel_state->cdclk = intel_state->dev_cdclk = cdclk;
if (!intel_state->active_crtcs)
- intel_state->dev_cdclk = 337500;
+ intel_state->dev_cdclk = broadwell_calc_cdclk(0);
return 0;
}
@@ -9752,13 +9872,51 @@ static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
broadwell_set_cdclk(dev, req_cdclk);
}
+static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
+{
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ struct drm_i915_private *dev_priv = to_i915(state->dev);
+ const int max_pixclk = ilk_max_pixel_rate(state);
+ int vco = intel_state->cdclk_pll_vco;
+ int cdclk;
+
+ /*
+ * FIXME should also account for plane ratio
+ * once 64bpp pixel formats are supported.
+ */
+ cdclk = skl_calc_cdclk(max_pixclk, vco);
+
+ /*
+ * FIXME move the cdclk caclulation to
+ * compute_config() so we can fail gracegully.
+ */
+ if (cdclk > dev_priv->max_cdclk_freq) {
+ DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
+ cdclk, dev_priv->max_cdclk_freq);
+ cdclk = dev_priv->max_cdclk_freq;
+ }
+
+ intel_state->cdclk = intel_state->dev_cdclk = cdclk;
+ if (!intel_state->active_crtcs)
+ intel_state->dev_cdclk = skl_calc_cdclk(0, vco);
+
+ return 0;
+}
+
+static void skl_modeset_commit_cdclk(struct drm_atomic_state *old_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(old_state->dev);
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(old_state);
+ unsigned int req_cdclk = intel_state->dev_cdclk;
+ unsigned int req_vco = intel_state->cdclk_pll_vco;
+
+ skl_set_cdclk(dev_priv, req_cdclk, req_vco);
+}
+
static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
- struct intel_encoder *intel_encoder =
- intel_ddi_get_crtc_new_encoder(crtc_state);
-
- if (intel_encoder->type != INTEL_OUTPUT_DSI) {
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) {
if (!intel_ddi_pll_select(crtc, crtc_state))
return -EINVAL;
}
@@ -9868,10 +10026,14 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
unsigned long *power_domain_mask)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
u32 tmp;
+ /*
+ * The pipe->transcoder mapping is fixed with the exception of the eDP
+ * transcoder handled below.
+ */
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
/*
@@ -9915,14 +10077,12 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
unsigned long *power_domain_mask)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
enum port port;
enum transcoder cpu_transcoder;
u32 tmp;
- pipe_config->has_dsi_encoder = false;
-
for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
if (port == PORT_A)
cpu_transcoder = TRANSCODER_DSI_A;
@@ -9954,18 +10114,17 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
continue;
pipe_config->cpu_transcoder = cpu_transcoder;
- pipe_config->has_dsi_encoder = true;
break;
}
- return pipe_config->has_dsi_encoder;
+ return transcoder_is_dsi(pipe_config->cpu_transcoder);
}
static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_shared_dpll *pll;
enum port port;
uint32_t tmp;
@@ -10008,7 +10167,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
unsigned long power_domain_mask;
bool active;
@@ -10022,18 +10181,16 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
- if (IS_BROXTON(dev_priv)) {
- bxt_get_dsi_transcoder_state(crtc, pipe_config,
- &power_domain_mask);
- WARN_ON(active && pipe_config->has_dsi_encoder);
- if (pipe_config->has_dsi_encoder)
- active = true;
+ if (IS_BROXTON(dev_priv) &&
+ bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) {
+ WARN_ON(active);
+ active = true;
}
if (!active)
goto out;
- if (!pipe_config->has_dsi_encoder) {
+ if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
haswell_get_ddi_port_state(crtc, pipe_config);
intel_get_pipe_timings(crtc, pipe_config);
}
@@ -10084,7 +10241,7 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
const struct intel_plane_state *plane_state)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t cntl = 0, size = 0;
@@ -10147,7 +10304,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
const struct intel_plane_state *plane_state)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
uint32_t cntl = 0;
@@ -10195,7 +10352,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
const struct intel_plane_state *plane_state)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 base = intel_crtc->cursor_addr;
@@ -10339,10 +10496,10 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
struct drm_i915_gem_object *obj;
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
- obj = i915_gem_alloc_object(dev,
+ obj = i915_gem_object_create(dev,
intel_framebuffer_size_for_mode(mode, bpp));
- if (obj == NULL)
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
mode_cmd.width = mode->hdisplay;
mode_cmd.height = mode->vdisplay;
@@ -10362,7 +10519,7 @@ mode_fits_in_fbdev(struct drm_device *dev,
struct drm_display_mode *mode)
{
#ifdef CONFIG_DRM_FBDEV_EMULATION
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
struct drm_framebuffer *fb;
@@ -10632,7 +10789,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
static int i9xx_pll_refclk(struct drm_device *dev,
const struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 dpll = pipe_config->dpll_hw_state.dpll;
if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
@@ -10650,11 +10807,11 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int pipe = pipe_config->cpu_transcoder;
u32 dpll = pipe_config->dpll_hw_state.dpll;
u32 fp;
- intel_clock_t clock;
+ struct dpll clock;
int port_clock;
int refclk = i9xx_pll_refclk(dev, pipe_config);
@@ -10776,7 +10933,7 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc,
struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
struct drm_display_mode *mode;
@@ -10828,48 +10985,20 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
return mode;
}
-void intel_mark_busy(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (dev_priv->mm.busy)
- return;
-
- intel_runtime_pm_get(dev_priv);
- i915_update_gfx_val(dev_priv);
- if (INTEL_INFO(dev)->gen >= 6)
- gen6_rps_busy(dev_priv);
- dev_priv->mm.busy = true;
-}
-
-void intel_mark_idle(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!dev_priv->mm.busy)
- return;
-
- dev_priv->mm.busy = false;
-
- if (INTEL_INFO(dev)->gen >= 6)
- gen6_rps_idle(dev->dev_private);
-
- intel_runtime_pm_put(dev_priv);
-}
-
static void intel_crtc_destroy(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct intel_unpin_work *work;
+ struct intel_flip_work *work;
spin_lock_irq(&dev->event_lock);
- work = intel_crtc->unpin_work;
- intel_crtc->unpin_work = NULL;
+ work = intel_crtc->flip_work;
+ intel_crtc->flip_work = NULL;
spin_unlock_irq(&dev->event_lock);
if (work) {
- cancel_work_sync(&work->work);
+ cancel_work_sync(&work->mmio_work);
+ cancel_work_sync(&work->unpin_work);
kfree(work);
}
@@ -10880,12 +11009,15 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
static void intel_unpin_work_fn(struct work_struct *__work)
{
- struct intel_unpin_work *work =
- container_of(__work, struct intel_unpin_work, work);
+ struct intel_flip_work *work =
+ container_of(__work, struct intel_flip_work, unpin_work);
struct intel_crtc *crtc = to_intel_crtc(work->crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_plane *primary = crtc->base.primary;
+ if (is_mmio_work(work))
+ flush_work(&work->mmio_work);
+
mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
drm_gem_object_unreference(&work->pending_flip_obj->base);
@@ -10904,63 +11036,17 @@ static void intel_unpin_work_fn(struct work_struct *__work)
kfree(work);
}
-static void do_intel_finish_page_flip(struct drm_device *dev,
- struct drm_crtc *crtc)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_unpin_work *work;
- unsigned long flags;
-
- /* Ignore early vblank irqs */
- if (intel_crtc == NULL)
- return;
-
- /*
- * This is called both by irq handlers and the reset code (to complete
- * lost pageflips) so needs the full irqsave spinlocks.
- */
- spin_lock_irqsave(&dev->event_lock, flags);
- work = intel_crtc->unpin_work;
-
- /* Ensure we don't miss a work->pending update ... */
- smp_rmb();
-
- if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
- spin_unlock_irqrestore(&dev->event_lock, flags);
- return;
- }
-
- page_flip_completed(intel_crtc);
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
-void intel_finish_page_flip(struct drm_device *dev, int pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
-
- do_intel_finish_page_flip(dev, crtc);
-}
-
-void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
-
- do_intel_finish_page_flip(dev, crtc);
-}
-
/* Is 'a' after or equal to 'b'? */
static bool g4x_flip_count_after_eq(u32 a, u32 b)
{
return !((a - b) & 0x80000000);
}
-static bool page_flip_finished(struct intel_crtc *crtc)
+static bool __pageflip_finished_cs(struct intel_crtc *crtc,
+ struct intel_flip_work *work)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
unsigned reset_counter;
reset_counter = i915_reset_counter(&dev_priv->gpu_error);
@@ -10999,40 +11085,103 @@ static bool page_flip_finished(struct intel_crtc *crtc)
* anyway, we don't really care.
*/
return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
- crtc->unpin_work->gtt_offset &&
+ crtc->flip_work->gtt_offset &&
g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)),
- crtc->unpin_work->flip_count);
+ crtc->flip_work->flip_count);
}
-void intel_prepare_page_flip(struct drm_device *dev, int plane)
+static bool
+__pageflip_finished_mmio(struct intel_crtc *crtc,
+ struct intel_flip_work *work)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc =
- to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
+ /*
+ * MMIO work completes when vblank is different from
+ * flip_queued_vblank.
+ *
+ * Reset counter value doesn't matter, this is handled by
+ * i915_wait_request finishing early, so no need to handle
+ * reset here.
+ */
+ return intel_crtc_get_vblank_counter(crtc) != work->flip_queued_vblank;
+}
+
+
+static bool pageflip_finished(struct intel_crtc *crtc,
+ struct intel_flip_work *work)
+{
+ if (!atomic_read(&work->pending))
+ return false;
+
+ smp_rmb();
+
+ if (is_mmio_work(work))
+ return __pageflip_finished_mmio(crtc, work);
+ else
+ return __pageflip_finished_cs(crtc, work);
+}
+
+void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
+{
+ struct drm_device *dev = &dev_priv->drm;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_flip_work *work;
unsigned long flags;
+ /* Ignore early vblank irqs */
+ if (!crtc)
+ return;
/*
* This is called both by irq handlers and the reset code (to complete
* lost pageflips) so needs the full irqsave spinlocks.
- *
- * NB: An MMIO update of the plane base pointer will also
- * generate a page-flip completion irq, i.e. every modeset
- * is also accompanied by a spurious intel_prepare_page_flip().
*/
spin_lock_irqsave(&dev->event_lock, flags);
- if (intel_crtc->unpin_work && page_flip_finished(intel_crtc))
- atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
+ work = intel_crtc->flip_work;
+
+ if (work != NULL &&
+ !is_mmio_work(work) &&
+ pageflip_finished(intel_crtc, work))
+ page_flip_completed(intel_crtc);
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe)
+{
+ struct drm_device *dev = &dev_priv->drm;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_flip_work *work;
+ unsigned long flags;
+
+ /* Ignore early vblank irqs */
+ if (!crtc)
+ return;
+
+ /*
+ * This is called both by irq handlers and the reset code (to complete
+ * lost pageflips) so needs the full irqsave spinlocks.
+ */
+ spin_lock_irqsave(&dev->event_lock, flags);
+ work = intel_crtc->flip_work;
+
+ if (work != NULL &&
+ is_mmio_work(work) &&
+ pageflip_finished(intel_crtc, work))
+ page_flip_completed(intel_crtc);
+
spin_unlock_irqrestore(&dev->event_lock, flags);
}
-static inline void intel_mark_page_flip_active(struct intel_unpin_work *work)
+static inline void intel_mark_page_flip_active(struct intel_crtc *crtc,
+ struct intel_flip_work *work)
{
+ work->flip_queued_vblank = intel_crtc_get_vblank_counter(crtc);
+
/* Ensure that the work item is consistent when activating it ... */
- smp_wmb();
- atomic_set(&work->pending, INTEL_FLIP_PENDING);
- /* and that it is marked active as soon as the irq could fire. */
- smp_wmb();
+ smp_mb__before_atomic();
+ atomic_set(&work->pending, 1);
}
static int intel_gen2_queue_flip(struct drm_device *dev,
@@ -11063,10 +11212,9 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
intel_ring_emit(engine, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(engine, fb->pitches[0]);
- intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
+ intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
intel_ring_emit(engine, 0); /* aux display base address, unused */
- intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
}
@@ -11095,10 +11243,9 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(engine, fb->pitches[0]);
- intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
+ intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
intel_ring_emit(engine, MI_NOOP);
- intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
}
@@ -11110,7 +11257,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
uint32_t flags)
{
struct intel_engine_cs *engine = req->engine;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pf, pipesrc;
int ret;
@@ -11126,7 +11273,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
intel_ring_emit(engine, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(engine, fb->pitches[0]);
- intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset |
+ intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset |
obj->tiling_mode);
/* XXX Enabling the panel-fitter across page-flip is so far
@@ -11137,7 +11284,6 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
intel_ring_emit(engine, pf | pipesrc);
- intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
}
@@ -11149,7 +11295,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
uint32_t flags)
{
struct intel_engine_cs *engine = req->engine;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pf, pipesrc;
int ret;
@@ -11161,7 +11307,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
intel_ring_emit(engine, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode);
- intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
+ intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
/* Contrary to the suggestions in the documentation,
* "Enable Panel Fitter" does not seem to be required when page
@@ -11173,7 +11319,6 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
intel_ring_emit(engine, pf | pipesrc);
- intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
}
@@ -11265,16 +11410,17 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit);
intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode));
- intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
+ intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
intel_ring_emit(engine, (MI_NOOP));
- intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
}
static bool use_mmio_flip(struct intel_engine_cs *engine,
struct drm_i915_gem_object *obj)
{
+ struct reservation_object *resv;
+
/*
* This is not being used for older platforms, because
* non-availability of flip done interrupt forces us to use
@@ -11286,7 +11432,7 @@ static bool use_mmio_flip(struct intel_engine_cs *engine,
if (engine == NULL)
return true;
- if (INTEL_INFO(engine->dev)->gen < 5)
+ if (INTEL_GEN(engine->i915) < 5)
return false;
if (i915.use_mmio_flip < 0)
@@ -11295,20 +11441,20 @@ static bool use_mmio_flip(struct intel_engine_cs *engine,
return true;
else if (i915.enable_execlists)
return true;
- else if (obj->base.dma_buf &&
- !reservation_object_test_signaled_rcu(obj->base.dma_buf->resv,
- false))
+
+ resv = i915_gem_object_get_dmabuf_resv(obj);
+ if (resv && !reservation_object_test_signaled_rcu(resv, false))
return true;
- else
- return engine != i915_gem_request_get_engine(obj->last_write_req);
+
+ return engine != i915_gem_request_get_engine(obj->last_write_req);
}
static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
unsigned int rotation,
- struct intel_unpin_work *work)
+ struct intel_flip_work *work)
{
struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
const enum pipe pipe = intel_crtc->pipe;
u32 ctl, stride, tile_height;
@@ -11357,10 +11503,10 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
}
static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
- struct intel_unpin_work *work)
+ struct intel_flip_work *work)
{
struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_framebuffer *intel_fb =
to_intel_framebuffer(intel_crtc->base.primary->fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
@@ -11380,78 +11526,37 @@ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
POSTING_READ(DSPSURF(intel_crtc->plane));
}
-/*
- * XXX: This is the temporary way to update the plane registers until we get
- * around to using the usual plane update functions for MMIO flips
- */
-static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip)
-{
- struct intel_crtc *crtc = mmio_flip->crtc;
- struct intel_unpin_work *work;
-
- spin_lock_irq(&crtc->base.dev->event_lock);
- work = crtc->unpin_work;
- spin_unlock_irq(&crtc->base.dev->event_lock);
- if (work == NULL)
- return;
-
- intel_mark_page_flip_active(work);
-
- intel_pipe_update_start(crtc);
-
- if (INTEL_INFO(mmio_flip->i915)->gen >= 9)
- skl_do_mmio_flip(crtc, mmio_flip->rotation, work);
- else
- /* use_mmio_flip() retricts MMIO flips to ilk+ */
- ilk_do_mmio_flip(crtc, work);
-
- intel_pipe_update_end(crtc);
-}
-
-static void intel_mmio_flip_work_func(struct work_struct *work)
+static void intel_mmio_flip_work_func(struct work_struct *w)
{
- struct intel_mmio_flip *mmio_flip =
- container_of(work, struct intel_mmio_flip, work);
+ struct intel_flip_work *work =
+ container_of(w, struct intel_flip_work, mmio_work);
+ struct intel_crtc *crtc = to_intel_crtc(work->crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_framebuffer *intel_fb =
- to_intel_framebuffer(mmio_flip->crtc->base.primary->fb);
+ to_intel_framebuffer(crtc->base.primary->fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct reservation_object *resv;
- if (mmio_flip->req) {
- WARN_ON(__i915_wait_request(mmio_flip->req,
+ if (work->flip_queued_req)
+ WARN_ON(__i915_wait_request(work->flip_queued_req,
false, NULL,
- &mmio_flip->i915->rps.mmioflips));
- i915_gem_request_unreference__unlocked(mmio_flip->req);
- }
+ &dev_priv->rps.mmioflips));
/* For framebuffer backed by dmabuf, wait for fence */
- if (obj->base.dma_buf)
- WARN_ON(reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv,
- false, false,
+ resv = i915_gem_object_get_dmabuf_resv(obj);
+ if (resv)
+ WARN_ON(reservation_object_wait_timeout_rcu(resv, false, false,
MAX_SCHEDULE_TIMEOUT) < 0);
- intel_do_mmio_flip(mmio_flip);
- kfree(mmio_flip);
-}
-
-static int intel_queue_mmio_flip(struct drm_device *dev,
- struct drm_crtc *crtc,
- struct drm_i915_gem_object *obj)
-{
- struct intel_mmio_flip *mmio_flip;
-
- mmio_flip = kmalloc(sizeof(*mmio_flip), GFP_KERNEL);
- if (mmio_flip == NULL)
- return -ENOMEM;
-
- mmio_flip->i915 = to_i915(dev);
- mmio_flip->req = i915_gem_request_reference(obj->last_write_req);
- mmio_flip->crtc = to_intel_crtc(crtc);
- mmio_flip->rotation = crtc->primary->state->rotation;
+ intel_pipe_update_start(crtc);
- INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func);
- schedule_work(&mmio_flip->work);
+ if (INTEL_GEN(dev_priv) >= 9)
+ skl_do_mmio_flip(crtc, work->rotation, work);
+ else
+ /* use_mmio_flip() retricts MMIO flips to ilk+ */
+ ilk_do_mmio_flip(crtc, work);
- return 0;
+ intel_pipe_update_end(crtc, work);
}
static int intel_default_queue_flip(struct drm_device *dev,
@@ -11464,37 +11569,32 @@ static int intel_default_queue_flip(struct drm_device *dev,
return -ENODEV;
}
-static bool __intel_pageflip_stall_check(struct drm_device *dev,
- struct drm_crtc *crtc)
+static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv,
+ struct intel_crtc *intel_crtc,
+ struct intel_flip_work *work)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_unpin_work *work = intel_crtc->unpin_work;
- u32 addr;
-
- if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE)
- return true;
+ u32 addr, vblank;
- if (atomic_read(&work->pending) < INTEL_FLIP_PENDING)
+ if (!atomic_read(&work->pending))
return false;
- if (!work->enable_stall_check)
- return false;
+ smp_rmb();
+ vblank = intel_crtc_get_vblank_counter(intel_crtc);
if (work->flip_ready_vblank == 0) {
if (work->flip_queued_req &&
- !i915_gem_request_completed(work->flip_queued_req, true))
+ !i915_gem_request_completed(work->flip_queued_req))
return false;
- work->flip_ready_vblank = drm_crtc_vblank_count(crtc);
+ work->flip_ready_vblank = vblank;
}
- if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3)
+ if (vblank - work->flip_ready_vblank < 3)
return false;
/* Potential stall - if we see that the flip has happened,
* assume a missed interrupt. */
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
else
addr = I915_READ(DSPADDR(intel_crtc->plane));
@@ -11506,12 +11606,12 @@ static bool __intel_pageflip_stall_check(struct drm_device *dev,
return addr == work->gtt_offset;
}
-void intel_check_page_flip(struct drm_device *dev, int pipe)
+void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_device *dev = &dev_priv->drm;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_unpin_work *work;
+ struct intel_flip_work *work;
WARN_ON(!in_interrupt());
@@ -11519,16 +11619,20 @@ void intel_check_page_flip(struct drm_device *dev, int pipe)
return;
spin_lock(&dev->event_lock);
- work = intel_crtc->unpin_work;
- if (work != NULL && __intel_pageflip_stall_check(dev, crtc)) {
- WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
- work->flip_queued_vblank, drm_vblank_count(dev, pipe));
+ work = intel_crtc->flip_work;
+
+ if (work != NULL && !is_mmio_work(work) &&
+ __pageflip_stall_check_cs(dev_priv, intel_crtc, work)) {
+ WARN_ONCE(1,
+ "Kicking stuck page flip: queued at %d, now %d\n",
+ work->flip_queued_vblank, intel_crtc_get_vblank_counter(intel_crtc));
page_flip_completed(intel_crtc);
work = NULL;
}
- if (work != NULL &&
- drm_vblank_count(dev, pipe) - work->flip_queued_vblank > 1)
- intel_queue_rps_boost_for_request(dev, work->flip_queued_req);
+
+ if (work != NULL && !is_mmio_work(work) &&
+ intel_crtc_get_vblank_counter(intel_crtc) - work->flip_queued_vblank > 1)
+ intel_queue_rps_boost_for_request(work->flip_queued_req);
spin_unlock(&dev->event_lock);
}
@@ -11538,13 +11642,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
uint32_t page_flip_flags)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_framebuffer *old_fb = crtc->primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_plane *primary = crtc->primary;
enum pipe pipe = intel_crtc->pipe;
- struct intel_unpin_work *work;
+ struct intel_flip_work *work;
struct intel_engine_cs *engine;
bool mmio_flip;
struct drm_i915_gem_request *request = NULL;
@@ -11581,19 +11685,19 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->event = event;
work->crtc = crtc;
work->old_fb = old_fb;
- INIT_WORK(&work->work, intel_unpin_work_fn);
+ INIT_WORK(&work->unpin_work, intel_unpin_work_fn);
ret = drm_crtc_vblank_get(crtc);
if (ret)
goto free_work;
- /* We borrow the event spin lock for protecting unpin_work */
+ /* We borrow the event spin lock for protecting flip_work */
spin_lock_irq(&dev->event_lock);
- if (intel_crtc->unpin_work) {
+ if (intel_crtc->flip_work) {
/* Before declaring the flip queue wedged, check if
* the hardware completed the operation behind our backs.
*/
- if (__intel_pageflip_stall_check(dev, crtc)) {
+ if (pageflip_finished(intel_crtc, intel_crtc->flip_work)) {
DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
page_flip_completed(intel_crtc);
} else {
@@ -11605,7 +11709,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
return -EBUSY;
}
}
- intel_crtc->unpin_work = work;
+ intel_crtc->flip_work = work;
spin_unlock_irq(&dev->event_lock);
if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
@@ -11617,7 +11721,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
crtc->primary->fb = fb;
update_state_fb(crtc->primary);
- intel_fbc_pre_update(intel_crtc);
+
+ intel_fbc_pre_update(intel_crtc, intel_crtc->config,
+ to_intel_plane_state(primary->state));
work->pending_flip_obj = obj;
@@ -11660,6 +11766,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
*/
if (!mmio_flip) {
ret = i915_gem_object_sync(obj, engine, &request);
+ if (!ret && !request) {
+ request = i915_gem_request_alloc(engine, NULL);
+ ret = PTR_ERR_OR_ZERO(request);
+ }
+
if (ret)
goto cleanup_pending;
}
@@ -11671,38 +11782,28 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary),
obj, 0);
work->gtt_offset += intel_crtc->dspaddr_offset;
+ work->rotation = crtc->primary->state->rotation;
if (mmio_flip) {
- ret = intel_queue_mmio_flip(dev, crtc, obj);
- if (ret)
- goto cleanup_unpin;
+ INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
i915_gem_request_assign(&work->flip_queued_req,
obj->last_write_req);
- } else {
- if (!request) {
- request = i915_gem_request_alloc(engine, NULL);
- if (IS_ERR(request)) {
- ret = PTR_ERR(request);
- goto cleanup_unpin;
- }
- }
+ schedule_work(&work->mmio_work);
+ } else {
+ i915_gem_request_assign(&work->flip_queued_req, request);
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
page_flip_flags);
if (ret)
goto cleanup_unpin;
- i915_gem_request_assign(&work->flip_queued_req, request);
- }
+ intel_mark_page_flip_active(intel_crtc, work);
- if (request)
i915_add_request_no_flush(request);
+ }
- work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
- work->enable_stall_check = true;
-
- i915_gem_track_fb(intel_fb_obj(work->old_fb), obj,
+ i915_gem_track_fb(intel_fb_obj(old_fb), obj,
to_intel_plane(primary)->frontbuffer_bit);
mutex_unlock(&dev->struct_mutex);
@@ -11728,7 +11829,7 @@ cleanup:
drm_framebuffer_unreference(work->old_fb);
spin_lock_irq(&dev->event_lock);
- intel_crtc->unpin_work = NULL;
+ intel_crtc->flip_work = NULL;
spin_unlock_irq(&dev->event_lock);
drm_crtc_vblank_put(crtc);
@@ -11830,15 +11931,14 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane_state *old_plane_state =
to_intel_plane_state(plane->state);
- int idx = intel_crtc->base.base.id, ret;
bool mode_changed = needs_modeset(crtc_state);
bool was_crtc_enabled = crtc->state->active;
bool is_crtc_enabled = crtc_state->active;
bool turn_off, turn_on, visible, was_visible;
struct drm_framebuffer *fb = plane_state->fb;
+ int ret;
- if (crtc_state && INTEL_INFO(dev)->gen >= 9 &&
- plane->type != DRM_PLANE_TYPE_CURSOR) {
+ if (INTEL_GEN(dev) >= 9 && plane->type != DRM_PLANE_TYPE_CURSOR) {
ret = skl_update_scaler_plane(
to_intel_crtc_state(crtc_state),
to_intel_plane_state(plane_state));
@@ -11856,6 +11956,11 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
* Visibility is calculated as if the crtc was on, but
* after scaler setup everything depends on it being off
* when the crtc isn't active.
+ *
+ * FIXME this is wrong for watermarks. Watermarks should also
+ * be computed as if the pipe would be active. Perhaps move
+ * per-plane wm computation to the .check_plane() hook, and
+ * only combine the results from all planes in the current place?
*/
if (!is_crtc_enabled)
to_intel_plane_state(plane_state)->visible = visible = false;
@@ -11869,11 +11974,15 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
turn_off = was_visible && (!visible || mode_changed);
turn_on = visible && (!was_visible || mode_changed);
- DRM_DEBUG_ATOMIC("[CRTC:%i] has [PLANE:%i] with fb %i\n", idx,
- plane->base.id, fb ? fb->base.id : -1);
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
+ intel_crtc->base.base.id,
+ intel_crtc->base.name,
+ plane->base.id, plane->name,
+ fb ? fb->base.id : -1);
- DRM_DEBUG_ATOMIC("[PLANE:%i] visible %i -> %i, off %i, on %i, ms %i\n",
- plane->base.id, was_visible, visible,
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
+ plane->base.id, plane->name,
+ was_visible, visible,
turn_off, turn_on, mode_changed);
if (turn_on) {
@@ -11946,31 +12055,11 @@ static bool check_single_encoder_cloning(struct drm_atomic_state *state,
return true;
}
-static bool check_encoder_cloning(struct drm_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct intel_encoder *encoder;
- struct drm_connector *connector;
- struct drm_connector_state *connector_state;
- int i;
-
- for_each_connector_in_state(state, connector, connector_state, i) {
- if (connector_state->crtc != &crtc->base)
- continue;
-
- encoder = to_intel_encoder(connector_state->best_encoder);
- if (!check_single_encoder_cloning(state, crtc, encoder))
- return false;
- }
-
- return true;
-}
-
static int intel_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc_state);
@@ -11978,11 +12067,6 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
int ret;
bool mode_changed = needs_modeset(crtc_state);
- if (mode_changed && !check_encoder_cloning(state, intel_crtc)) {
- DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
- return -EINVAL;
- }
-
if (mode_changed && !crtc_state->active)
pipe_config->update_wm_post = true;
@@ -12035,7 +12119,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
}
} else if (dev_priv->display.compute_intermediate_wm) {
if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
- pipe_config->wm.intermediate = pipe_config->wm.optimal.ilk;
+ pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal;
}
if (INTEL_INFO(dev)->gen >= 9) {
@@ -12160,7 +12244,8 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
struct intel_plane_state *state;
struct drm_framebuffer *fb;
- DRM_DEBUG_KMS("[CRTC:%d]%s config %p for pipe %c\n", crtc->base.base.id,
+ DRM_DEBUG_KMS("[CRTC:%d:%s]%s config %p for pipe %c\n",
+ crtc->base.base.id, crtc->base.name,
context, pipe_config, pipe_name(crtc->pipe));
DRM_DEBUG_KMS("cpu_transcoder: %s\n", transcoder_name(pipe_config->cpu_transcoder));
@@ -12173,14 +12258,14 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
pipe_config->fdi_m_n.tu);
DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
- pipe_config->has_dp_encoder,
+ intel_crtc_has_dp_encoder(pipe_config),
pipe_config->lane_count,
pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
pipe_config->dp_m_n.tu);
DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
- pipe_config->has_dp_encoder,
+ intel_crtc_has_dp_encoder(pipe_config),
pipe_config->lane_count,
pipe_config->dp_m2_n2.gmch_m,
pipe_config->dp_m2_n2.gmch_n,
@@ -12261,29 +12346,24 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
state = to_intel_plane_state(plane->state);
fb = state->base.fb;
if (!fb) {
- DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d "
- "disabled, scaler_id = %d\n",
- plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
- plane->base.id, intel_plane->pipe,
- (crtc->base.primary == plane) ? 0 : intel_plane->plane + 1,
- drm_plane_index(plane), state->scaler_id);
+ DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n",
+ plane->base.id, plane->name, state->scaler_id);
continue;
}
- DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d enabled",
- plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
- plane->base.id, intel_plane->pipe,
- crtc->base.primary == plane ? 0 : intel_plane->plane + 1,
- drm_plane_index(plane));
- DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = 0x%x",
- fb->base.id, fb->width, fb->height, fb->pixel_format);
- DRM_DEBUG_KMS("\tscaler:%d src (%u, %u) %ux%u dst (%u, %u) %ux%u\n",
- state->scaler_id,
- state->src.x1 >> 16, state->src.y1 >> 16,
- drm_rect_width(&state->src) >> 16,
- drm_rect_height(&state->src) >> 16,
- state->dst.x1, state->dst.y1,
- drm_rect_width(&state->dst), drm_rect_height(&state->dst));
+ DRM_DEBUG_KMS("[PLANE:%d:%s] enabled",
+ plane->base.id, plane->name);
+ DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = %s",
+ fb->base.id, fb->width, fb->height,
+ drm_get_format_name(fb->pixel_format));
+ DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
+ state->scaler_id,
+ state->src.x1 >> 16, state->src.y1 >> 16,
+ drm_rect_width(&state->src) >> 16,
+ drm_rect_height(&state->src) >> 16,
+ state->dst.x1, state->dst.y1,
+ drm_rect_width(&state->dst),
+ drm_rect_height(&state->dst));
}
}
@@ -12318,7 +12398,7 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
case INTEL_OUTPUT_UNKNOWN:
if (WARN_ON(!HAS_DDI(dev)))
break;
- case INTEL_OUTPUT_DISPLAYPORT:
+ case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_HDMI:
case INTEL_OUTPUT_EDP:
port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
@@ -12415,6 +12495,24 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
&pipe_config->pipe_src_w,
&pipe_config->pipe_src_h);
+ for_each_connector_in_state(state, connector, connector_state, i) {
+ if (connector_state->crtc != crtc)
+ continue;
+
+ encoder = to_intel_encoder(connector_state->best_encoder);
+
+ if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
+ DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
+ goto fail;
+ }
+
+ /*
+ * Determine output_types before calling the .compute_config()
+ * hooks so that the hooks can use this information safely.
+ */
+ pipe_config->output_types |= 1 << encoder->type;
+ }
+
encoder_retry:
/* Ensure the port clock defaults are reset when retrying. */
pipe_config->port_clock = 0;
@@ -12700,8 +12798,8 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(fdi_lanes);
PIPE_CONF_CHECK_M_N(fdi_m_n);
- PIPE_CONF_CHECK_I(has_dp_encoder);
PIPE_CONF_CHECK_I(lane_count);
+ PIPE_CONF_CHECK_X(lane_lat_optim_mask);
if (INTEL_INFO(dev)->gen < 8) {
PIPE_CONF_CHECK_M_N(dp_m_n);
@@ -12711,7 +12809,7 @@ intel_pipe_config_compare(struct drm_device *dev,
} else
PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
- PIPE_CONF_CHECK_I(has_dsi_encoder);
+ PIPE_CONF_CHECK_X(output_types);
PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
@@ -12830,7 +12928,7 @@ static void verify_wm_state(struct drm_crtc *crtc,
struct drm_crtc_state *new_state)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct skl_ddb_allocation hw_ddb, *sw_ddb;
struct skl_ddb_entry *hw_entry, *sw_entry;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -12936,7 +13034,7 @@ verify_crtc_state(struct drm_crtc *crtc,
struct drm_crtc_state *new_crtc_state)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *encoder;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *pipe_config, *sw_config;
@@ -12950,7 +13048,7 @@ verify_crtc_state(struct drm_crtc *crtc,
pipe_config->base.crtc = crtc;
pipe_config->base.state = old_state;
- DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
+ DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
@@ -12979,8 +13077,10 @@ verify_crtc_state(struct drm_crtc *crtc,
"Encoder connected to wrong pipe %c\n",
pipe_name(pipe));
- if (active)
+ if (active) {
+ pipe_config->output_types |= 1 << encoder->type;
encoder->get_config(encoder, pipe_config);
+ }
}
if (!new_crtc_state->active)
@@ -13059,7 +13159,7 @@ verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state,
struct drm_crtc_state *new_crtc_state)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
@@ -13098,7 +13198,7 @@ intel_modeset_verify_crtc(struct drm_crtc *crtc,
static void
verify_disabled_dpll_state(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int i;
for (i = 0; i < dev_priv->num_shared_dpll; i++)
@@ -13145,7 +13245,7 @@ static void update_scanline_offset(struct intel_crtc *crtc)
crtc->scanline_offset = vtotal - 1;
} else if (HAS_DDI(dev) &&
- intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
+ intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) {
crtc->scanline_offset = 2;
} else
crtc->scanline_offset = 1;
@@ -13280,7 +13380,7 @@ static int intel_modeset_all_pipes(struct drm_atomic_state *state)
static int intel_modeset_checks(struct drm_atomic_state *state)
{
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct drm_i915_private *dev_priv = state->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(state->dev);
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
int ret = 0, i;
@@ -13298,6 +13398,9 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
intel_state->active_crtcs |= 1 << i;
else
intel_state->active_crtcs &= ~(1 << i);
+
+ if (crtc_state->active != crtc->state->active)
+ intel_state->active_pipe_changes |= drm_crtc_mask(crtc);
}
/*
@@ -13308,9 +13411,17 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
* adjusted_mode bits in the crtc directly.
*/
if (dev_priv->display.modeset_calc_cdclk) {
+ if (!intel_state->cdclk_pll_vco)
+ intel_state->cdclk_pll_vco = dev_priv->cdclk_pll.vco;
+ if (!intel_state->cdclk_pll_vco)
+ intel_state->cdclk_pll_vco = dev_priv->skl_preferred_vco_freq;
+
ret = dev_priv->display.modeset_calc_cdclk(state);
+ if (ret < 0)
+ return ret;
- if (!ret && intel_state->dev_cdclk != dev_priv->cdclk_freq)
+ if (intel_state->dev_cdclk != dev_priv->cdclk_freq ||
+ intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco)
ret = intel_modeset_all_pipes(state);
if (ret < 0)
@@ -13334,38 +13445,16 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
* phase. The code here should be run after the per-crtc and per-plane 'check'
* handlers to ensure that all derived state has been updated.
*/
-static void calc_watermark_data(struct drm_atomic_state *state)
+static int calc_watermark_data(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct drm_crtc *crtc;
- struct drm_crtc_state *cstate;
- struct drm_plane *plane;
- struct drm_plane_state *pstate;
-
- /*
- * Calculate watermark configuration details now that derived
- * plane/crtc state is all properly updated.
- */
- drm_for_each_crtc(crtc, dev) {
- cstate = drm_atomic_get_existing_crtc_state(state, crtc) ?:
- crtc->state;
-
- if (cstate->active)
- intel_state->wm_config.num_pipes_active++;
- }
- drm_for_each_legacy_plane(plane, dev) {
- pstate = drm_atomic_get_existing_plane_state(state, plane) ?:
- plane->state;
+ struct drm_i915_private *dev_priv = to_i915(dev);
- if (!to_intel_plane_state(pstate)->visible)
- continue;
+ /* Is there platform-specific watermark information to calculate? */
+ if (dev_priv->display.compute_global_watermarks)
+ return dev_priv->display.compute_global_watermarks(state);
- intel_state->wm_config.sprites_enabled = true;
- if (pstate->crtc_w != pstate->src_w >> 16 ||
- pstate->crtc_h != pstate->src_h >> 16)
- intel_state->wm_config.sprites_scaled = true;
- }
+ return 0;
}
/**
@@ -13395,14 +13484,13 @@ static int intel_atomic_check(struct drm_device *dev,
if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
crtc_state->mode_changed = true;
- if (!crtc_state->enable) {
- if (needs_modeset(crtc_state))
- any_ms = true;
+ if (!needs_modeset(crtc_state))
continue;
- }
- if (!needs_modeset(crtc_state))
+ if (!crtc_state->enable) {
+ any_ms = true;
continue;
+ }
/* FIXME: For only active_changed we shouldn't need to do any
* state recomputation at all. */
@@ -13412,8 +13500,11 @@ static int intel_atomic_check(struct drm_device *dev,
return ret;
ret = intel_modeset_pipe_config(crtc, pipe_config);
- if (ret)
+ if (ret) {
+ intel_dump_pipe_config(to_intel_crtc(crtc),
+ pipe_config, "[failed]");
return ret;
+ }
if (i915.fastboot &&
intel_pipe_config_compare(dev,
@@ -13423,13 +13514,12 @@ static int intel_atomic_check(struct drm_device *dev,
to_intel_crtc_state(crtc_state)->update_pipe = true;
}
- if (needs_modeset(crtc_state)) {
+ if (needs_modeset(crtc_state))
any_ms = true;
- ret = drm_atomic_add_affected_planes(state, crtc);
- if (ret)
- return ret;
- }
+ ret = drm_atomic_add_affected_planes(state, crtc);
+ if (ret)
+ return ret;
intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
needs_modeset(crtc_state) ?
@@ -13449,27 +13539,20 @@ static int intel_atomic_check(struct drm_device *dev,
return ret;
intel_fbc_choose_crtc(dev_priv, state);
- calc_watermark_data(state);
-
- return 0;
+ return calc_watermark_data(state);
}
static int intel_atomic_prepare_commit(struct drm_device *dev,
struct drm_atomic_state *state,
bool nonblock)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_plane_state *plane_state;
struct drm_crtc_state *crtc_state;
struct drm_plane *plane;
struct drm_crtc *crtc;
int i, ret;
- if (nonblock) {
- DRM_DEBUG_KMS("i915 does not yet support nonblocking commit\n");
- return -EINVAL;
- }
-
for_each_crtc_in_state(state, crtc, crtc_state, i) {
if (state->legacy_cursor_update)
continue;
@@ -13513,6 +13596,16 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
return ret;
}
+u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+
+ if (!dev->max_vblank_count)
+ return drm_accurate_vblank_count(&crtc->base);
+
+ return dev->driver->get_vblank_counter(dev, crtc->pipe);
+}
+
static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
struct drm_i915_private *dev_priv,
unsigned crtc_mask)
@@ -13578,45 +13671,36 @@ static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
return false;
}
-/**
- * intel_atomic_commit - commit validated state object
- * @dev: DRM device
- * @state: the top-level driver state object
- * @nonblock: nonblocking commit
- *
- * This function commits a top-level state object that has been validated
- * with drm_atomic_helper_check().
- *
- * FIXME: Atomic modeset support for i915 is not yet complete. At the moment
- * we can only handle plane-related operations and do not yet support
- * nonblocking commit.
- *
- * RETURNS
- * Zero for success or -errno.
- */
-static int intel_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *state,
- bool nonblock)
+static void intel_atomic_commit_tail(struct drm_atomic_state *state)
{
+ struct drm_device *dev = state->dev;
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc_state *old_crtc_state;
struct drm_crtc *crtc;
struct intel_crtc_state *intel_cstate;
- int ret = 0, i;
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
bool hw_check = intel_state->modeset;
unsigned long put_domains[I915_MAX_PIPES] = {};
unsigned crtc_vblank_mask = 0;
+ int i, ret;
- ret = intel_atomic_prepare_commit(dev, state, nonblock);
- if (ret) {
- DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
- return ret;
+ for_each_plane_in_state(state, plane, plane_state, i) {
+ struct intel_plane_state *intel_plane_state =
+ to_intel_plane_state(plane_state);
+
+ if (!intel_plane_state->wait_req)
+ continue;
+
+ ret = __i915_wait_request(intel_plane_state->wait_req,
+ true, NULL, NULL);
+ /* EIO should be eaten, and we can't get interrupted in the
+ * worker, and blocking commits have waited already. */
+ WARN_ON(ret);
}
- drm_atomic_helper_swap_state(dev, state);
- dev_priv->wm.config = intel_state->wm_config;
- intel_shared_dpll_commit(state);
+ drm_atomic_helper_wait_for_dependencies(state);
if (intel_state->modeset) {
memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
@@ -13671,9 +13755,17 @@ static int intel_atomic_commit(struct drm_device *dev,
drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
if (dev_priv->display.modeset_commit_cdclk &&
- intel_state->dev_cdclk != dev_priv->cdclk_freq)
+ (intel_state->dev_cdclk != dev_priv->cdclk_freq ||
+ intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco))
dev_priv->display.modeset_commit_cdclk(state);
+ /*
+ * SKL workaround: bspec recommends we disable the SAGV when we
+ * have more then one pipe enabled
+ */
+ if (IS_SKYLAKE(dev_priv) && !skl_can_enable_sagv(state))
+ skl_disable_sagv(dev_priv);
+
intel_modeset_verify_disabled(dev);
}
@@ -13683,30 +13775,44 @@ static int intel_atomic_commit(struct drm_device *dev,
bool modeset = needs_modeset(crtc->state);
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc->state);
- bool update_pipe = !modeset && pipe_config->update_pipe;
if (modeset && crtc->state->active) {
update_scanline_offset(to_intel_crtc(crtc));
dev_priv->display.crtc_enable(crtc);
}
+ /* Complete events for now disable pipes here. */
+ if (modeset && !crtc->state->active && crtc->state->event) {
+ spin_lock_irq(&dev->event_lock);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ spin_unlock_irq(&dev->event_lock);
+
+ crtc->state->event = NULL;
+ }
+
if (!modeset)
intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
if (crtc->state->active &&
drm_atomic_get_existing_plane_state(state, crtc->primary))
- intel_fbc_enable(intel_crtc);
+ intel_fbc_enable(intel_crtc, pipe_config, to_intel_plane_state(crtc->primary->state));
- if (crtc->state->active &&
- (crtc->state->planes_changed || update_pipe))
+ if (crtc->state->active)
drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
if (pipe_config->base.active && needs_vblank_wait(pipe_config))
crtc_vblank_mask |= 1 << i;
}
- /* FIXME: add subpixel order */
-
+ /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
+ * already, but still need the state for the delayed optimization. To
+ * fix this:
+ * - wrap the optimization/post_plane_update stuff into a per-crtc work.
+ * - schedule that vblank worker _before_ calling hw_done
+ * - at the start of commit_tail, cancel it _synchrously
+ * - switch over to the vblank wait helper in the core after that since
+ * we don't need out special handling any more.
+ */
if (!state->legacy_cursor_update)
intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask);
@@ -13733,6 +13839,12 @@ static int intel_atomic_commit(struct drm_device *dev,
intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
}
+ if (IS_SKYLAKE(dev_priv) && intel_state->modeset &&
+ skl_can_enable_sagv(state))
+ skl_enable_sagv(dev_priv);
+
+ drm_atomic_helper_commit_hw_done(state);
+
if (intel_state->modeset)
intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
@@ -13740,6 +13852,8 @@ static int intel_atomic_commit(struct drm_device *dev,
drm_atomic_helper_cleanup_planes(dev, state);
mutex_unlock(&dev->struct_mutex);
+ drm_atomic_helper_commit_cleanup_done(state);
+
drm_atomic_state_free(state);
/* As one of the primary mmio accessors, KMS has a high likelihood
@@ -13754,6 +13868,86 @@ static int intel_atomic_commit(struct drm_device *dev,
* can happen also when the device is completely off.
*/
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
+}
+
+static void intel_atomic_commit_work(struct work_struct *work)
+{
+ struct drm_atomic_state *state = container_of(work,
+ struct drm_atomic_state,
+ commit_work);
+ intel_atomic_commit_tail(state);
+}
+
+static void intel_atomic_track_fbs(struct drm_atomic_state *state)
+{
+ struct drm_plane_state *old_plane_state;
+ struct drm_plane *plane;
+ struct drm_i915_gem_object *obj, *old_obj;
+ struct intel_plane *intel_plane;
+ int i;
+
+ mutex_lock(&state->dev->struct_mutex);
+ for_each_plane_in_state(state, plane, old_plane_state, i) {
+ obj = intel_fb_obj(plane->state->fb);
+ old_obj = intel_fb_obj(old_plane_state->fb);
+ intel_plane = to_intel_plane(plane);
+
+ i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
+ }
+ mutex_unlock(&state->dev->struct_mutex);
+}
+
+/**
+ * intel_atomic_commit - commit validated state object
+ * @dev: DRM device
+ * @state: the top-level driver state object
+ * @nonblock: nonblocking commit
+ *
+ * This function commits a top-level state object that has been validated
+ * with drm_atomic_helper_check().
+ *
+ * FIXME: Atomic modeset support for i915 is not yet complete. At the moment
+ * nonblocking commits are only safe for pure plane updates. Everything else
+ * should work though.
+ *
+ * RETURNS
+ * Zero for success or -errno.
+ */
+static int intel_atomic_commit(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ bool nonblock)
+{
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ int ret = 0;
+
+ if (intel_state->modeset && nonblock) {
+ DRM_DEBUG_KMS("nonblocking commit for modeset not yet implemented.\n");
+ return -EINVAL;
+ }
+
+ ret = drm_atomic_helper_setup_commit(state, nonblock);
+ if (ret)
+ return ret;
+
+ INIT_WORK(&state->commit_work, intel_atomic_commit_work);
+
+ ret = intel_atomic_prepare_commit(dev, state, nonblock);
+ if (ret) {
+ DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
+ return ret;
+ }
+
+ drm_atomic_helper_swap_state(state, true);
+ dev_priv->wm.distrust_bios_wm = false;
+ dev_priv->wm.skl_results = intel_state->wm_results;
+ intel_shared_dpll_commit(state);
+ intel_atomic_track_fbs(state);
+
+ if (nonblock)
+ queue_work(system_unbound_wq, &state->commit_work);
+ else
+ intel_atomic_commit_tail(state);
return 0;
}
@@ -13767,8 +13961,8 @@ void intel_crtc_restore_mode(struct drm_crtc *crtc)
state = drm_atomic_state_alloc(dev);
if (!state) {
- DRM_DEBUG_KMS("[CRTC:%d] crtc restore failed, out of memory",
- crtc->base.id);
+ DRM_DEBUG_KMS("[CRTC:%d:%s] crtc restore failed, out of memory",
+ crtc->base.id, crtc->name);
return;
}
@@ -13798,8 +13992,50 @@ out:
#undef for_each_intel_crtc_masked
+/*
+ * FIXME: Remove this once i915 is fully DRIVER_ATOMIC by calling
+ * drm_atomic_helper_legacy_gamma_set() directly.
+ */
+static int intel_atomic_legacy_gamma_set(struct drm_crtc *crtc,
+ u16 *red, u16 *green, u16 *blue,
+ uint32_t size)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_crtc_state *state;
+ int ret;
+
+ ret = drm_atomic_helper_legacy_gamma_set(crtc, red, green, blue, size);
+ if (ret)
+ return ret;
+
+ /*
+ * Make sure we update the legacy properties so this works when
+ * atomic is not enabled.
+ */
+
+ state = crtc->state;
+
+ drm_object_property_set_value(&crtc->base,
+ config->degamma_lut_property,
+ (state->degamma_lut) ?
+ state->degamma_lut->base.id : 0);
+
+ drm_object_property_set_value(&crtc->base,
+ config->ctm_property,
+ (state->ctm) ?
+ state->ctm->base.id : 0);
+
+ drm_object_property_set_value(&crtc->base,
+ config->gamma_lut_property,
+ (state->gamma_lut) ?
+ state->gamma_lut->base.id : 0);
+
+ return 0;
+}
+
static const struct drm_crtc_funcs intel_crtc_funcs = {
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
+ .gamma_set = intel_atomic_legacy_gamma_set,
.set_config = drm_atomic_helper_set_config,
.set_property = drm_atomic_helper_crtc_set_property,
.destroy = intel_crtc_destroy,
@@ -13828,9 +14064,9 @@ intel_prepare_plane_fb(struct drm_plane *plane,
{
struct drm_device *dev = plane->dev;
struct drm_framebuffer *fb = new_state->fb;
- struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
+ struct reservation_object *resv;
int ret = 0;
if (!obj && !old_obj)
@@ -13860,12 +14096,15 @@ intel_prepare_plane_fb(struct drm_plane *plane,
}
}
+ if (!obj)
+ return 0;
+
/* For framebuffer backed by dmabuf, wait for fence */
- if (obj && obj->base.dma_buf) {
+ resv = i915_gem_object_get_dmabuf_resv(obj);
+ if (resv) {
long lret;
- lret = reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv,
- false, true,
+ lret = reservation_object_wait_timeout_rcu(resv, false, true,
MAX_SCHEDULE_TIMEOUT);
if (lret == -ERESTARTSYS)
return lret;
@@ -13873,9 +14112,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
WARN(lret < 0, "waiting returns %li\n", lret);
}
- if (!obj) {
- ret = 0;
- } else if (plane->type == DRM_PLANE_TYPE_CURSOR &&
+ if (plane->type == DRM_PLANE_TYPE_CURSOR &&
INTEL_INFO(dev)->cursor_needs_physical) {
int align = IS_I830(dev) ? 16 * 1024 : 256;
ret = i915_gem_object_attach_phys(obj, align);
@@ -13886,15 +14123,11 @@ intel_prepare_plane_fb(struct drm_plane *plane,
}
if (ret == 0) {
- if (obj) {
- struct intel_plane_state *plane_state =
- to_intel_plane_state(new_state);
-
- i915_gem_request_assign(&plane_state->wait_req,
- obj->last_write_req);
- }
+ struct intel_plane_state *plane_state =
+ to_intel_plane_state(new_state);
- i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
+ i915_gem_request_assign(&plane_state->wait_req,
+ obj->last_write_req);
}
return ret;
@@ -13914,7 +14147,6 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
const struct drm_plane_state *old_state)
{
struct drm_device *dev = plane->dev;
- struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_plane_state *old_intel_state;
struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
@@ -13928,11 +14160,6 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
!INTEL_INFO(dev)->cursor_needs_physical))
intel_unpin_fb_obj(old_state->fb, old_state->rotation);
- /* prepare_fb aborted? */
- if ((old_obj && (old_obj->frontbuffer_bits & intel_plane->frontbuffer_bit)) ||
- (obj && !(obj->frontbuffer_bits & intel_plane->frontbuffer_bit)))
- i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
-
i915_gem_request_assign(&old_intel_state->wait_req, NULL);
}
@@ -13940,15 +14167,11 @@ int
skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state)
{
int max_scale;
- struct drm_device *dev;
- struct drm_i915_private *dev_priv;
int crtc_clock, cdclk;
if (!intel_crtc || !crtc_state->base.enable)
return DRM_PLANE_HELPER_NO_SCALING;
- dev = intel_crtc->base.dev;
- dev_priv = dev->dev_private;
crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
@@ -13988,6 +14211,7 @@ intel_check_primary_plane(struct drm_plane *plane,
return drm_plane_helper_check_update(plane, crtc, fb, &state->src,
&state->dst, &state->clip,
+ state->base.rotation,
min_scale, max_scale,
can_position, true,
&state->visible);
@@ -14024,7 +14248,7 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc,
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- intel_pipe_update_end(intel_crtc);
+ intel_pipe_update_end(intel_crtc, NULL);
}
/**
@@ -14036,9 +14260,11 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc,
*/
void intel_plane_destroy(struct drm_plane *plane)
{
- struct intel_plane *intel_plane = to_intel_plane(plane);
+ if (!plane)
+ return;
+
drm_plane_cleanup(plane);
- kfree(intel_plane);
+ kfree(to_intel_plane(plane));
}
const struct drm_plane_funcs intel_plane_funcs = {
@@ -14110,10 +14336,24 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
primary->disable_plane = i9xx_disable_primary_plane;
}
- ret = drm_universal_plane_init(dev, &primary->base, 0,
- &intel_plane_funcs,
- intel_primary_formats, num_formats,
- DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (INTEL_INFO(dev)->gen >= 9)
+ ret = drm_universal_plane_init(dev, &primary->base, 0,
+ &intel_plane_funcs,
+ intel_primary_formats, num_formats,
+ DRM_PLANE_TYPE_PRIMARY,
+ "plane 1%c", pipe_name(pipe));
+ else if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
+ ret = drm_universal_plane_init(dev, &primary->base, 0,
+ &intel_plane_funcs,
+ intel_primary_formats, num_formats,
+ DRM_PLANE_TYPE_PRIMARY,
+ "primary %c", pipe_name(pipe));
+ else
+ ret = drm_universal_plane_init(dev, &primary->base, 0,
+ &intel_plane_funcs,
+ intel_primary_formats, num_formats,
+ DRM_PLANE_TYPE_PRIMARY,
+ "plane %c", plane_name(primary->plane));
if (ret)
goto fail;
@@ -14163,6 +14403,7 @@ intel_check_cursor_plane(struct drm_plane *plane,
ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src,
&state->dst, &state->clip,
+ state->base.rotation,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
true, true, &state->visible);
@@ -14271,7 +14512,8 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
&intel_plane_funcs,
intel_cursor_formats,
ARRAY_SIZE(intel_cursor_formats),
- DRM_PLANE_TYPE_CURSOR, NULL);
+ DRM_PLANE_TYPE_CURSOR,
+ "cursor %c", pipe_name(pipe));
if (ret)
goto fail;
@@ -14319,7 +14561,7 @@ static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_cr
static void intel_crtc_init(struct drm_device *dev, int pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc;
struct intel_crtc_state *crtc_state = NULL;
struct drm_plane *primary = NULL;
@@ -14356,7 +14598,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
goto fail;
ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
- cursor, &intel_crtc_funcs, NULL);
+ cursor, &intel_crtc_funcs,
+ "pipe %c", pipe_name(pipe));
if (ret)
goto fail;
@@ -14390,10 +14633,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
return;
fail:
- if (primary)
- drm_plane_cleanup(primary);
- if (cursor)
- drm_plane_cleanup(cursor);
+ intel_plane_destroy(primary);
+ intel_plane_destroy(cursor);
kfree(crtc_state);
kfree(intel_crtc);
}
@@ -14419,11 +14660,8 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct intel_crtc *crtc;
drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
-
- if (!drmmode_crtc) {
- DRM_ERROR("no such CRTC id\n");
+ if (!drmmode_crtc)
return -ENOENT;
- }
crtc = to_intel_crtc(drmmode_crtc);
pipe_from_crtc_id->pipe = crtc->pipe;
@@ -14450,7 +14688,7 @@ static int intel_encoder_clones(struct intel_encoder *encoder)
static bool has_edp_a(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (!IS_MOBILE(dev))
return false;
@@ -14466,7 +14704,7 @@ static bool has_edp_a(struct drm_device *dev)
static bool intel_crt_present(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (INTEL_INFO(dev)->gen >= 9)
return false;
@@ -14492,10 +14730,15 @@ static bool intel_crt_present(struct drm_device *dev)
static void intel_setup_outputs(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *encoder;
bool dpd_is_edp = false;
+ /*
+ * intel_edp_init_connector() depends on this completing first, to
+ * prevent the registeration of both eDP and LVDS and the incorrect
+ * sharing of the PPS.
+ */
intel_lvds_init(dev);
if (intel_crt_present(dev))
@@ -15080,12 +15323,13 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
dev_priv->display.fdi_link_train = hsw_fdi_link_train;
- if (IS_BROADWELL(dev_priv)) {
- dev_priv->display.modeset_commit_cdclk =
- broadwell_modeset_commit_cdclk;
- dev_priv->display.modeset_calc_cdclk =
- broadwell_modeset_calc_cdclk;
- }
+ }
+
+ if (IS_BROADWELL(dev_priv)) {
+ dev_priv->display.modeset_commit_cdclk =
+ broadwell_modeset_commit_cdclk;
+ dev_priv->display.modeset_calc_cdclk =
+ broadwell_modeset_calc_cdclk;
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
dev_priv->display.modeset_commit_cdclk =
valleyview_modeset_commit_cdclk;
@@ -15093,9 +15337,14 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
valleyview_modeset_calc_cdclk;
} else if (IS_BROXTON(dev_priv)) {
dev_priv->display.modeset_commit_cdclk =
- broxton_modeset_commit_cdclk;
+ bxt_modeset_commit_cdclk;
+ dev_priv->display.modeset_calc_cdclk =
+ bxt_modeset_calc_cdclk;
+ } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ dev_priv->display.modeset_commit_cdclk =
+ skl_modeset_commit_cdclk;
dev_priv->display.modeset_calc_cdclk =
- broxton_modeset_calc_cdclk;
+ skl_modeset_calc_cdclk;
}
switch (INTEL_INFO(dev_priv)->gen) {
@@ -15134,7 +15383,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
*/
static void quirk_pipea_force(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
dev_priv->quirks |= QUIRK_PIPEA_FORCE;
DRM_INFO("applying pipe a force quirk\n");
@@ -15142,7 +15391,7 @@ static void quirk_pipea_force(struct drm_device *dev)
static void quirk_pipeb_force(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
dev_priv->quirks |= QUIRK_PIPEB_FORCE;
DRM_INFO("applying pipe b force quirk\n");
@@ -15153,7 +15402,7 @@ static void quirk_pipeb_force(struct drm_device *dev)
*/
static void quirk_ssc_force_disable(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
DRM_INFO("applying lvds SSC disable quirk\n");
}
@@ -15164,7 +15413,7 @@ static void quirk_ssc_force_disable(struct drm_device *dev)
*/
static void quirk_invert_brightness(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
DRM_INFO("applying inverted panel brightness quirk\n");
}
@@ -15172,7 +15421,7 @@ static void quirk_invert_brightness(struct drm_device *dev)
/* Some VBT's incorrectly indicate no backlight is present */
static void quirk_backlight_present(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
DRM_INFO("applying backlight present quirk\n");
}
@@ -15298,7 +15547,7 @@ static void intel_init_quirks(struct drm_device *dev)
/* Disable the VGA plane that we never use */
static void i915_disable_vga(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u8 sr1;
i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
@@ -15316,14 +15565,14 @@ static void i915_disable_vga(struct drm_device *dev)
void intel_modeset_init_hw(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
intel_update_cdclk(dev);
dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
intel_init_clock_gating(dev);
- intel_enable_gt_powersave(dev);
+ intel_enable_gt_powersave(dev_priv);
}
/*
@@ -15393,7 +15642,6 @@ retry:
}
/* Write calculated watermark values back */
- to_i915(dev)->wm.config = to_intel_atomic_state(state)->wm_config;
for_each_crtc_in_state(state, crtc, cstate, i) {
struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
@@ -15491,11 +15739,13 @@ void intel_modeset_init(struct drm_device *dev)
}
intel_update_czclk(dev_priv);
- intel_update_rawclk(dev_priv);
intel_update_cdclk(dev);
intel_shared_dpll_init(dev);
+ if (dev_priv->max_cdclk_freq == 0)
+ intel_update_max_cdclk(dev);
+
/* Just disable it once at startup */
i915_disable_vga(dev);
intel_setup_outputs(dev);
@@ -15563,7 +15813,7 @@ static bool
intel_check_plane_mapping(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 val;
if (INTEL_INFO(dev)->num_pipes == 1)
@@ -15603,7 +15853,7 @@ static bool intel_encoder_has_connectors(struct intel_encoder *encoder)
static void intel_sanitize_crtc(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
/* Clear any frame start delays used for debugging left by the BIOS */
@@ -15636,8 +15886,8 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
bool plane;
- DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
- crtc->base.base.id);
+ DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n",
+ crtc->base.base.id, crtc->base.name);
/* Pipe has the wrong plane attached and the plane is active.
* Temporarily change the plane mapping and disable everything
@@ -15728,7 +15978,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
void i915_redisable_vga_power_on(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
@@ -15739,7 +15989,7 @@ void i915_redisable_vga_power_on(struct drm_device *dev)
void i915_redisable_vga(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
/* This function can be called both from intel_modeset_setup_hw_state or
* at a very early point in our resume sequence, where the power well
@@ -15779,7 +16029,7 @@ static void readout_plane_state(struct intel_crtc *crtc)
static void intel_modeset_readout_hw_state(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
@@ -15805,26 +16055,24 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
if (crtc_state->base.active) {
dev_priv->active_crtcs |= 1 << crtc->pipe;
- if (IS_BROADWELL(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
pixclk = ilk_pipe_pixel_rate(crtc_state);
-
- /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
- if (crtc_state->ips_enabled)
- pixclk = DIV_ROUND_UP(pixclk * 100, 95);
- } else if (IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv) ||
- IS_BROXTON(dev_priv))
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
pixclk = crtc_state->base.adjusted_mode.crtc_clock;
else
WARN_ON(dev_priv->display.modeset_calc_cdclk);
+
+ /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
+ if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
+ pixclk = DIV_ROUND_UP(pixclk * 100, 95);
}
dev_priv->min_pixclk[crtc->pipe] = pixclk;
readout_plane_state(crtc);
- DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
- crtc->base.base.id,
+ DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
+ crtc->base.base.id, crtc->base.name,
crtc->active ? "enabled" : "disabled");
}
@@ -15850,6 +16098,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
if (encoder->get_hw_state(encoder, &pipe)) {
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
encoder->base.crtc = &crtc->base;
+ crtc->config->output_types |= 1 << encoder->type;
encoder->get_config(encoder, crtc->config);
} else {
encoder->base.crtc = NULL;
@@ -15934,7 +16183,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
static void
intel_modeset_setup_hw_state(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
@@ -15993,9 +16242,10 @@ void intel_display_resume(struct drm_device *dev)
struct drm_atomic_state *state = dev_priv->modeset_restore_state;
struct drm_modeset_acquire_ctx ctx;
int ret;
- bool setup = false;
dev_priv->modeset_restore_state = NULL;
+ if (state)
+ state->acquire_ctx = &ctx;
/*
* This is a cludge because with real atomic modeset mode_config.mutex
@@ -16006,43 +16256,17 @@ void intel_display_resume(struct drm_device *dev)
mutex_lock(&dev->mode_config.mutex);
drm_modeset_acquire_init(&ctx, 0);
-retry:
- ret = drm_modeset_lock_all_ctx(dev, &ctx);
-
- if (ret == 0 && !setup) {
- setup = true;
-
- intel_modeset_setup_hw_state(dev);
- i915_redisable_vga(dev);
- }
-
- if (ret == 0 && state) {
- struct drm_crtc_state *crtc_state;
- struct drm_crtc *crtc;
- int i;
-
- state->acquire_ctx = &ctx;
-
- /* ignore any reset values/BIOS leftovers in the WM registers */
- to_intel_atomic_state(state)->skip_intermediate_wm = true;
-
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- /*
- * Force recalculation even if we restore
- * current state. With fast modeset this may not result
- * in a modeset when the state is compatible.
- */
- crtc_state->mode_changed = true;
- }
-
- ret = drm_atomic_commit(state);
- }
+ while (1) {
+ ret = drm_modeset_lock_all_ctx(dev, &ctx);
+ if (ret != -EDEADLK)
+ break;
- if (ret == -EDEADLK) {
drm_modeset_backoff(&ctx);
- goto retry;
}
+ if (!ret)
+ ret = __intel_display_resume(dev, state);
+
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
mutex_unlock(&dev->mode_config.mutex);
@@ -16055,15 +16279,16 @@ retry:
void intel_modeset_gem_init(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *c;
struct drm_i915_gem_object *obj;
int ret;
- intel_init_gt_powersave(dev);
+ intel_init_gt_powersave(dev_priv);
intel_modeset_init_hw(dev);
- intel_setup_overlay(dev);
+ intel_setup_overlay(dev_priv);
/*
* Make sure any fbs we allocated at startup are properly
@@ -16089,26 +16314,36 @@ void intel_modeset_gem_init(struct drm_device *dev)
c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
}
}
+}
- intel_backlight_register(dev);
+int intel_connector_register(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ int ret;
+
+ ret = intel_backlight_device_register(intel_connector);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ return ret;
}
-void intel_connector_unregister(struct intel_connector *intel_connector)
+void intel_connector_unregister(struct drm_connector *connector)
{
- struct drm_connector *connector = &intel_connector->base;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ intel_backlight_device_unregister(intel_connector);
intel_panel_destroy_backlight(connector);
- drm_connector_unregister(connector);
}
void intel_modeset_cleanup(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_connector *connector;
-
- intel_disable_gt_powersave(dev);
+ struct drm_i915_private *dev_priv = to_i915(dev);
- intel_backlight_unregister(dev);
+ intel_disable_gt_powersave(dev_priv);
/*
* Interrupts and polling as the first thing to avoid creating havoc.
@@ -16130,27 +16365,15 @@ void intel_modeset_cleanup(struct drm_device *dev)
/* flush any delayed tasks or pending work */
flush_scheduled_work();
- /* destroy the backlight and sysfs files before encoders/connectors */
- for_each_intel_connector(dev, connector)
- connector->unregister(connector);
-
drm_mode_config_cleanup(dev);
- intel_cleanup_overlay(dev);
+ intel_cleanup_overlay(dev_priv);
- intel_cleanup_gt_powersave(dev);
+ intel_cleanup_gt_powersave(dev_priv);
intel_teardown_gmbus(dev);
}
-/*
- * Return which encoder is currently attached for connector.
- */
-struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
-{
- return &intel_attached_encoder(connector)->base;
-}
-
void intel_connector_attach_encoder(struct intel_connector *connector,
struct intel_encoder *encoder)
{
@@ -16164,7 +16387,7 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
*/
int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
u16 gmch_ctrl;
@@ -16234,9 +16457,8 @@ struct intel_display_error_state {
};
struct intel_display_error_state *
-intel_display_capture_error_state(struct drm_device *dev)
+intel_display_capture_error_state(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_display_error_state *error;
int transcoders[] = {
TRANSCODER_A,
@@ -16246,14 +16468,14 @@ intel_display_capture_error_state(struct drm_device *dev)
};
int i;
- if (INTEL_INFO(dev)->num_pipes == 0)
+ if (INTEL_INFO(dev_priv)->num_pipes == 0)
return NULL;
error = kzalloc(sizeof(*error), GFP_ATOMIC);
if (error == NULL)
return NULL;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
for_each_pipe(dev_priv, i) {
@@ -16269,25 +16491,25 @@ intel_display_capture_error_state(struct drm_device *dev)
error->plane[i].control = I915_READ(DSPCNTR(i));
error->plane[i].stride = I915_READ(DSPSTRIDE(i));
- if (INTEL_INFO(dev)->gen <= 3) {
+ if (INTEL_GEN(dev_priv) <= 3) {
error->plane[i].size = I915_READ(DSPSIZE(i));
error->plane[i].pos = I915_READ(DSPPOS(i));
}
- if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
+ if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
error->plane[i].addr = I915_READ(DSPADDR(i));
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
error->plane[i].surface = I915_READ(DSPSURF(i));
error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
}
error->pipe[i].source = I915_READ(PIPESRC(i));
- if (HAS_GMCH_DISPLAY(dev))
+ if (HAS_GMCH_DISPLAY(dev_priv))
error->pipe[i].stat = I915_READ(PIPESTAT(i));
}
/* Note: this does not include DSI transcoders. */
- error->num_transcoders = INTEL_INFO(dev)->num_pipes;
+ error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes;
if (HAS_DDI(dev_priv))
error->num_transcoders++; /* Account for eDP. */
@@ -16321,7 +16543,7 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
struct drm_device *dev,
struct intel_display_error_state *error)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int i;
if (!error)
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 891107f92..21b04c3ed 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -131,11 +131,6 @@ static void vlv_steal_power_sequencer(struct drm_device *dev,
enum pipe pipe);
static void intel_dp_unset_edid(struct intel_dp *intel_dp);
-static unsigned int intel_dp_unused_lane_mask(int lane_count)
-{
- return ~((1 << lane_count) - 1) & 0xf;
-}
-
static int
intel_dp_max_link_bw(struct intel_dp *intel_dp)
{
@@ -267,7 +262,7 @@ static void pps_lock(struct intel_dp *intel_dp)
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &intel_dig_port->base;
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
/*
@@ -285,7 +280,7 @@ static void pps_unlock(struct intel_dp *intel_dp)
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &intel_dig_port->base;
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
mutex_unlock(&dev_priv->pps_mutex);
@@ -299,7 +294,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe = intel_dp->pps_pipe;
bool pll_enabled, release_cl_override = false;
enum dpio_phy phy = DPIO_PHY(pipe);
@@ -373,7 +368,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *encoder;
unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
enum pipe pipe;
@@ -431,6 +426,37 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
return intel_dp->pps_pipe;
}
+static int
+bxt_power_sequencer_idx(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ /* We should never land here with regular DP ports */
+ WARN_ON(!is_edp(intel_dp));
+
+ /*
+ * TODO: BXT has 2 PPS instances. The correct port->PPS instance
+ * mapping needs to be retrieved from VBT, for now just hard-code to
+ * use instance #0 always.
+ */
+ if (!intel_dp->pps_reset)
+ return 0;
+
+ intel_dp->pps_reset = false;
+
+ /*
+ * Only the HW needs to be reprogrammed, the SW state is fixed and
+ * has been setup during connector init.
+ */
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
+
+ return 0;
+}
+
typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
enum pipe pipe);
@@ -480,7 +506,7 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_dig_port->port;
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -512,12 +538,13 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
}
-void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
+void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct intel_encoder *encoder;
- if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)))
+ if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
+ !IS_BROXTON(dev)))
return;
/*
@@ -537,34 +564,71 @@ void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
continue;
intel_dp = enc_to_intel_dp(&encoder->base);
- intel_dp->pps_pipe = INVALID_PIPE;
+ if (IS_BROXTON(dev))
+ intel_dp->pps_reset = true;
+ else
+ intel_dp->pps_pipe = INVALID_PIPE;
+ }
+}
+
+struct pps_registers {
+ i915_reg_t pp_ctrl;
+ i915_reg_t pp_stat;
+ i915_reg_t pp_on;
+ i915_reg_t pp_off;
+ i915_reg_t pp_div;
+};
+
+static void intel_pps_get_registers(struct drm_i915_private *dev_priv,
+ struct intel_dp *intel_dp,
+ struct pps_registers *regs)
+{
+ memset(regs, 0, sizeof(*regs));
+
+ if (IS_BROXTON(dev_priv)) {
+ int idx = bxt_power_sequencer_idx(intel_dp);
+
+ regs->pp_ctrl = BXT_PP_CONTROL(idx);
+ regs->pp_stat = BXT_PP_STATUS(idx);
+ regs->pp_on = BXT_PP_ON_DELAYS(idx);
+ regs->pp_off = BXT_PP_OFF_DELAYS(idx);
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
+ regs->pp_ctrl = PCH_PP_CONTROL;
+ regs->pp_stat = PCH_PP_STATUS;
+ regs->pp_on = PCH_PP_ON_DELAYS;
+ regs->pp_off = PCH_PP_OFF_DELAYS;
+ regs->pp_div = PCH_PP_DIVISOR;
+ } else {
+ enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
+
+ regs->pp_ctrl = VLV_PIPE_PP_CONTROL(pipe);
+ regs->pp_stat = VLV_PIPE_PP_STATUS(pipe);
+ regs->pp_on = VLV_PIPE_PP_ON_DELAYS(pipe);
+ regs->pp_off = VLV_PIPE_PP_OFF_DELAYS(pipe);
+ regs->pp_div = VLV_PIPE_PP_DIVISOR(pipe);
}
}
static i915_reg_t
_pp_ctrl_reg(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct pps_registers regs;
- if (IS_BROXTON(dev))
- return BXT_PP_CONTROL(0);
- else if (HAS_PCH_SPLIT(dev))
- return PCH_PP_CONTROL;
- else
- return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
+ intel_pps_get_registers(to_i915(intel_dp_to_dev(intel_dp)), intel_dp,
+ &regs);
+
+ return regs.pp_ctrl;
}
static i915_reg_t
_pp_stat_reg(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct pps_registers regs;
- if (IS_BROXTON(dev))
- return BXT_PP_STATUS(0);
- else if (HAS_PCH_SPLIT(dev))
- return PCH_PP_STATUS;
- else
- return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
+ intel_pps_get_registers(to_i915(intel_dp_to_dev(intel_dp)), intel_dp,
+ &regs);
+
+ return regs.pp_stat;
}
/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
@@ -575,7 +639,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
edp_notifier);
struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (!is_edp(intel_dp) || code != SYS_RESTART)
return 0;
@@ -606,7 +670,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
static bool edp_have_panel_power(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -620,7 +684,7 @@ static bool edp_have_panel_power(struct intel_dp *intel_dp)
static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -635,7 +699,7 @@ static void
intel_dp_check_edp(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (!is_edp(intel_dp))
return;
@@ -653,7 +717,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
uint32_t status;
bool done;
@@ -775,6 +839,7 @@ static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
DP_AUX_CH_CTL_TIME_OUT_1600us |
DP_AUX_CH_CTL_RECEIVE_ERROR |
(send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
}
@@ -785,7 +850,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
uint32_t aux_clock_divider;
int i, ret, recv_bytes;
@@ -1181,48 +1246,21 @@ static void intel_aux_reg_init(struct intel_dp *intel_dp)
static void
intel_dp_aux_fini(struct intel_dp *intel_dp)
{
- drm_dp_aux_unregister(&intel_dp->aux);
kfree(intel_dp->aux.name);
}
-static int
+static void
intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum port port = intel_dig_port->port;
- int ret;
intel_aux_reg_init(intel_dp);
+ drm_dp_aux_init(&intel_dp->aux);
+ /* Failure to allocate our preferred name is not critical */
intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
- if (!intel_dp->aux.name)
- return -ENOMEM;
-
- intel_dp->aux.dev = connector->base.kdev;
intel_dp->aux.transfer = intel_dp_aux_transfer;
-
- DRM_DEBUG_KMS("registering %s bus for %s\n",
- intel_dp->aux.name,
- connector->base.kdev->kobj.name);
-
- ret = drm_dp_aux_register(&intel_dp->aux);
- if (ret < 0) {
- DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
- intel_dp->aux.name, ret);
- kfree(intel_dp->aux.name);
- return ret;
- }
-
- return 0;
-}
-
-static void
-intel_dp_connector_unregister(struct intel_connector *intel_connector)
-{
- struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
-
- intel_dp_aux_fini(intel_dp);
- intel_connector_unregister(intel_connector);
}
static int
@@ -1435,7 +1473,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
@@ -1463,7 +1501,6 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
pipe_config->has_pch_encoder = true;
- pipe_config->has_dp_encoder = true;
pipe_config->has_drrs = false;
pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
@@ -1582,6 +1619,27 @@ found:
&pipe_config->dp_m2_n2);
}
+ /*
+ * DPLL0 VCO may need to be adjusted to get the correct
+ * clock for eDP. This will affect cdclk as well.
+ */
+ if (is_edp(intel_dp) &&
+ (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))) {
+ int vco;
+
+ switch (pipe_config->port_clock / 2) {
+ case 108000:
+ case 216000:
+ vco = 8640000;
+ break;
+ default:
+ vco = 8100000;
+ break;
+ }
+
+ to_intel_atomic_state(pipe_config->base.state)->cdclk_pll_vco = vco;
+ }
+
if (!HAS_DDI(dev))
intel_dp_set_clock(encoder, pipe_config);
@@ -1598,7 +1656,7 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
static void intel_dp_prepare(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
@@ -1686,16 +1744,21 @@ static void intel_dp_prepare(struct intel_encoder *encoder)
#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
+static void intel_pps_verify_state(struct drm_i915_private *dev_priv,
+ struct intel_dp *intel_dp);
+
static void wait_panel_status(struct intel_dp *intel_dp,
u32 mask,
u32 value)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t pp_stat_reg, pp_ctrl_reg;
lockdep_assert_held(&dev_priv->pps_mutex);
+ intel_pps_verify_state(dev_priv, intel_dp);
+
pp_stat_reg = _pp_stat_reg(intel_dp);
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
@@ -1704,8 +1767,9 @@ static void wait_panel_status(struct intel_dp *intel_dp,
I915_READ(pp_stat_reg),
I915_READ(pp_ctrl_reg));
- if (_wait_for((I915_READ(pp_stat_reg) & mask) == value,
- 5 * USEC_PER_SEC, 10 * USEC_PER_MSEC))
+ if (intel_wait_for_register(dev_priv,
+ pp_stat_reg, mask, value,
+ 5000))
DRM_ERROR("Panel status timeout: status %08x control %08x\n",
I915_READ(pp_stat_reg),
I915_READ(pp_ctrl_reg));
@@ -1765,7 +1829,7 @@ static void edp_wait_backlight_off(struct intel_dp *intel_dp)
static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 control;
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -1788,7 +1852,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
u32 pp;
i915_reg_t pp_stat_reg, pp_ctrl_reg;
@@ -1861,7 +1925,7 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_digital_port *intel_dig_port =
dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
@@ -1930,8 +1994,7 @@ static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
*/
static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
{
- struct drm_i915_private *dev_priv =
- intel_dp_to_dev(intel_dp)->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -1952,7 +2015,7 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
static void edp_panel_on(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 pp;
i915_reg_t pp_ctrl_reg;
@@ -2013,7 +2076,7 @@ static void edp_panel_off(struct intel_dp *intel_dp)
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
u32 pp;
i915_reg_t pp_ctrl_reg;
@@ -2065,7 +2128,7 @@ static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 pp;
i915_reg_t pp_ctrl_reg;
@@ -2106,7 +2169,7 @@ void intel_edp_backlight_on(struct intel_dp *intel_dp)
static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 pp;
i915_reg_t pp_ctrl_reg;
@@ -2222,7 +2285,7 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
* 2. Program DP PLL enable
*/
if (IS_GEN5(dev_priv))
- intel_wait_for_vblank_if_active(dev_priv->dev, !crtc->pipe);
+ intel_wait_for_vblank_if_active(&dev_priv->drm, !crtc->pipe);
intel_dp->DP |= DP_PLL_ENABLE;
@@ -2287,7 +2350,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
u32 tmp;
bool ret;
@@ -2340,7 +2403,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
u32 tmp, flags = 0;
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = dp_to_dig_port(intel_dp)->port;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
@@ -2378,8 +2441,6 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
!IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
- pipe_config->has_dp_encoder = true;
-
pipe_config->lane_count =
((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
@@ -2460,55 +2521,11 @@ static void vlv_post_disable_dp(struct intel_encoder *encoder)
intel_dp_link_down(intel_dp);
}
-static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
- bool reset)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
- enum pipe pipe = crtc->pipe;
- uint32_t val;
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
- if (reset)
- val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
- else
- val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
-
- if (crtc->config->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
- if (reset)
- val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
- else
- val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
- }
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
- val |= CHV_PCS_REQ_SOFTRESET_EN;
- if (reset)
- val &= ~DPIO_PCS_CLK_SOFT_RESET;
- else
- val |= DPIO_PCS_CLK_SOFT_RESET;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
-
- if (crtc->config->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
- val |= CHV_PCS_REQ_SOFTRESET_EN;
- if (reset)
- val &= ~DPIO_PCS_CLK_SOFT_RESET;
- else
- val |= DPIO_PCS_CLK_SOFT_RESET;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
- }
-}
-
static void chv_post_disable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
intel_dp_link_down(intel_dp);
@@ -2527,7 +2544,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_dig_port->port;
if (HAS_DDI(dev)) {
@@ -2607,7 +2624,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
static void intel_dp_enable_port(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc =
to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
@@ -2636,7 +2653,7 @@ static void intel_enable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
enum pipe pipe = crtc->pipe;
@@ -2709,7 +2726,7 @@ static void g4x_pre_enable_dp(struct intel_encoder *encoder)
static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
enum pipe pipe = intel_dp->pps_pipe;
i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
@@ -2735,7 +2752,7 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
static void vlv_steal_power_sequencer(struct drm_device *dev,
enum pipe pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *encoder;
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -2773,7 +2790,7 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &intel_dig_port->base;
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -2811,266 +2828,38 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
static void vlv_pre_enable_dp(struct intel_encoder *encoder)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- enum dpio_channel port = vlv_dport_to_channel(dport);
- int pipe = intel_crtc->pipe;
- u32 val;
-
- mutex_lock(&dev_priv->sb_lock);
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
- val = 0;
- if (pipe)
- val |= (1<<21);
- else
- val &= ~(1<<21);
- val |= 0x001000c4;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
-
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_phy_pre_encoder_enable(encoder);
intel_enable_dp(encoder);
}
static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc =
- to_intel_crtc(encoder->base.crtc);
- enum dpio_channel port = vlv_dport_to_channel(dport);
- int pipe = intel_crtc->pipe;
-
intel_dp_prepare(encoder);
- /* Program Tx lane resets to default */
- mutex_lock(&dev_priv->sb_lock);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
- DPIO_PCS_TX_LANE2_RESET |
- DPIO_PCS_TX_LANE1_RESET);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
- DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
- DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
- (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
- DPIO_PCS_CLK_SOFT_RESET);
-
- /* Fix up inter-pair skew failure */
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_phy_pre_pll_enable(encoder);
}
static void chv_pre_enable_dp(struct intel_encoder *encoder)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc =
- to_intel_crtc(encoder->base.crtc);
- enum dpio_channel ch = vlv_dport_to_channel(dport);
- int pipe = intel_crtc->pipe;
- int data, i, stagger;
- u32 val;
-
- mutex_lock(&dev_priv->sb_lock);
-
- /* allow hardware to manage TX FIFO reset source */
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
- val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
-
- if (intel_crtc->config->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
- val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
- }
-
- /* Program Tx lane latency optimal setting*/
- for (i = 0; i < intel_crtc->config->lane_count; i++) {
- /* Set the upar bit */
- if (intel_crtc->config->lane_count == 1)
- data = 0x0;
- else
- data = (i == 1) ? 0x0 : 0x1;
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
- data << DPIO_UPAR_SHIFT);
- }
-
- /* Data lane stagger programming */
- if (intel_crtc->config->port_clock > 270000)
- stagger = 0x18;
- else if (intel_crtc->config->port_clock > 135000)
- stagger = 0xd;
- else if (intel_crtc->config->port_clock > 67500)
- stagger = 0x7;
- else if (intel_crtc->config->port_clock > 33750)
- stagger = 0x4;
- else
- stagger = 0x2;
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
- val |= DPIO_TX2_STAGGER_MASK(0x1f);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
-
- if (intel_crtc->config->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
- val |= DPIO_TX2_STAGGER_MASK(0x1f);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
- }
-
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
- DPIO_LANESTAGGER_STRAP(stagger) |
- DPIO_LANESTAGGER_STRAP_OVRD |
- DPIO_TX1_STAGGER_MASK(0x1f) |
- DPIO_TX1_STAGGER_MULT(6) |
- DPIO_TX2_STAGGER_MULT(0));
-
- if (intel_crtc->config->lane_count > 2) {
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
- DPIO_LANESTAGGER_STRAP(stagger) |
- DPIO_LANESTAGGER_STRAP_OVRD |
- DPIO_TX1_STAGGER_MASK(0x1f) |
- DPIO_TX1_STAGGER_MULT(7) |
- DPIO_TX2_STAGGER_MULT(5));
- }
-
- /* Deassert data lane reset */
- chv_data_lane_soft_reset(encoder, false);
-
- mutex_unlock(&dev_priv->sb_lock);
+ chv_phy_pre_encoder_enable(encoder);
intel_enable_dp(encoder);
/* Second common lane will stay alive on its own now */
- if (dport->release_cl2_override) {
- chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
- dport->release_cl2_override = false;
- }
+ chv_phy_release_cl2_override(encoder);
}
static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc =
- to_intel_crtc(encoder->base.crtc);
- enum dpio_channel ch = vlv_dport_to_channel(dport);
- enum pipe pipe = intel_crtc->pipe;
- unsigned int lane_mask =
- intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
- u32 val;
-
intel_dp_prepare(encoder);
- /*
- * Must trick the second common lane into life.
- * Otherwise we can't even access the PLL.
- */
- if (ch == DPIO_CH0 && pipe == PIPE_B)
- dport->release_cl2_override =
- !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
-
- chv_phy_powergate_lanes(encoder, true, lane_mask);
-
- mutex_lock(&dev_priv->sb_lock);
-
- /* Assert data lane reset */
- chv_data_lane_soft_reset(encoder, true);
-
- /* program left/right clock distribution */
- if (pipe != PIPE_B) {
- val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
- val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
- if (ch == DPIO_CH0)
- val |= CHV_BUFLEFTENA1_FORCE;
- if (ch == DPIO_CH1)
- val |= CHV_BUFRIGHTENA1_FORCE;
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
- } else {
- val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
- val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
- if (ch == DPIO_CH0)
- val |= CHV_BUFLEFTENA2_FORCE;
- if (ch == DPIO_CH1)
- val |= CHV_BUFRIGHTENA2_FORCE;
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
- }
-
- /* program clock channel usage */
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
- val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
- if (pipe != PIPE_B)
- val &= ~CHV_PCS_USEDCLKCHANNEL;
- else
- val |= CHV_PCS_USEDCLKCHANNEL;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
-
- if (intel_crtc->config->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
- val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
- if (pipe != PIPE_B)
- val &= ~CHV_PCS_USEDCLKCHANNEL;
- else
- val |= CHV_PCS_USEDCLKCHANNEL;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
- }
-
- /*
- * This a a bit weird since generally CL
- * matches the pipe, but here we need to
- * pick the CL based on the port.
- */
- val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
- if (pipe != PIPE_B)
- val &= ~CHV_CMN_USEDCLKCHANNEL;
- else
- val |= CHV_CMN_USEDCLKCHANNEL;
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
-
- mutex_unlock(&dev_priv->sb_lock);
+ chv_phy_pre_pll_enable(encoder);
}
static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
- u32 val;
-
- mutex_lock(&dev_priv->sb_lock);
-
- /* disable left/right clock distribution */
- if (pipe != PIPE_B) {
- val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
- val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
- } else {
- val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
- val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
- }
-
- mutex_unlock(&dev_priv->sb_lock);
-
- /*
- * Leave the power down bit cleared for at least one
- * lane so that chv_powergate_phy_ch() will power
- * on something when the channel is otherwise unused.
- * When the port is off and the override is removed
- * the lanes power down anyway, so otherwise it doesn't
- * really matter what the state of power down bits is
- * after this.
- */
- chv_phy_powergate_lanes(encoder, false, 0x0);
+ chv_phy_post_pll_disable(encoder);
}
/*
@@ -3089,7 +2878,7 @@ uint8_t
intel_dp_voltage_max(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = dp_to_dig_port(intel_dp)->port;
if (IS_BROXTON(dev))
@@ -3178,16 +2967,10 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
- struct intel_crtc *intel_crtc =
- to_intel_crtc(dport->base.base.crtc);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
unsigned long demph_reg_value, preemph_reg_value,
uniqtranscale_reg_value;
uint8_t train_set = intel_dp->train_set[0];
- enum dpio_channel port = vlv_dport_to_channel(dport);
- int pipe = intel_crtc->pipe;
switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
case DP_TRAIN_PRE_EMPH_LEVEL_0:
@@ -3262,37 +3045,18 @@ static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
return 0;
}
- mutex_lock(&dev_priv->sb_lock);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
- uniqtranscale_reg_value);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
+ uniqtranscale_reg_value, 0);
return 0;
}
-static bool chv_need_uniq_trans_scale(uint8_t train_set)
-{
- return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
- (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
-}
-
static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
- struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
- u32 deemph_reg_value, margin_reg_value, val;
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ u32 deemph_reg_value, margin_reg_value;
+ bool uniq_trans_scale = false;
uint8_t train_set = intel_dp->train_set[0];
- enum dpio_channel ch = vlv_dport_to_channel(dport);
- enum pipe pipe = intel_crtc->pipe;
- int i;
switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
case DP_TRAIN_PRE_EMPH_LEVEL_0:
@@ -3312,7 +3076,7 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
deemph_reg_value = 128;
margin_reg_value = 154;
- /* FIXME extra to set for 1200 */
+ uniq_trans_scale = true;
break;
default:
return 0;
@@ -3364,88 +3128,8 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
return 0;
}
- mutex_lock(&dev_priv->sb_lock);
-
- /* Clear calc init */
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
- val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
- val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
- val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
-
- if (intel_crtc->config->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
- val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
- val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
- val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
- }
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
- val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
- val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
-
- if (intel_crtc->config->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
- val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
- val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
- }
-
- /* Program swing deemph */
- for (i = 0; i < intel_crtc->config->lane_count; i++) {
- val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
- val &= ~DPIO_SWING_DEEMPH9P5_MASK;
- val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
- }
-
- /* Program swing margin */
- for (i = 0; i < intel_crtc->config->lane_count; i++) {
- val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
-
- val &= ~DPIO_SWING_MARGIN000_MASK;
- val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
-
- /*
- * Supposedly this value shouldn't matter when unique transition
- * scale is disabled, but in fact it does matter. Let's just
- * always program the same value and hope it's OK.
- */
- val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
- val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
-
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
- }
-
- /*
- * The document said it needs to set bit 27 for ch0 and bit 26
- * for ch1. Might be a typo in the doc.
- * For now, for this unique transition scale selection, set bit
- * 27 for ch0 and ch1.
- */
- for (i = 0; i < intel_crtc->config->lane_count; i++) {
- val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
- if (chv_need_uniq_trans_scale(train_set))
- val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
- else
- val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
- }
-
- /* Start swing calculation */
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
- val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
-
- if (intel_crtc->config->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
- val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
- }
-
- mutex_unlock(&dev_priv->sb_lock);
+ chv_set_phy_signal_level(encoder, deemph_reg_value,
+ margin_reg_value, uniq_trans_scale);
return 0;
}
@@ -3612,7 +3296,7 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_dig_port->port;
uint32_t val;
@@ -3634,8 +3318,10 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
if (port == PORT_A)
return;
- if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
- 1))
+ if (intel_wait_for_register(dev_priv,DP_TP_STATUS(port),
+ DP_TP_STATUS_IDLE_DONE,
+ DP_TP_STATUS_IDLE_DONE,
+ 1))
DRM_ERROR("Timed out waiting for DP idle patterns\n");
}
@@ -3646,7 +3332,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
enum port port = intel_dig_port->port;
struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t DP = intel_dp->DP;
if (WARN_ON(HAS_DDI(dev)))
@@ -3698,7 +3384,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
I915_WRITE(intel_dp->output_reg, DP);
POSTING_READ(intel_dp->output_reg);
- intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
+ intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
@@ -3713,8 +3399,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint8_t rev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
sizeof(intel_dp->dpcd)) < 0)
@@ -3771,6 +3456,15 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("PSR2 %s on sink",
dev_priv->psr.psr2_support ? "supported" : "not supported");
}
+
+ /* Read the eDP Display control capabilities registers */
+ memset(intel_dp->edp_dpcd, 0, sizeof(intel_dp->edp_dpcd));
+ if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
+ (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
+ intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
+ sizeof(intel_dp->edp_dpcd)))
+ DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
+ intel_dp->edp_dpcd);
}
DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
@@ -3778,10 +3472,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
/* Intermediate frequency support */
- if (is_edp(intel_dp) &&
- (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
- (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
- (rev >= 0x03)) { /* eDp v1.4 or higher */
+ if (is_edp(intel_dp) && (intel_dp->edp_dpcd[0] >= 0x03)) { /* eDp v1.4 or higher */
__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
int i;
@@ -4559,7 +4250,7 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
}
if (intel_encoder->type != INTEL_OUTPUT_EDP)
- intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+ intel_encoder->type = INTEL_OUTPUT_DP;
intel_dp_probe_oui(intel_dp);
@@ -4635,7 +4326,7 @@ intel_dp_detect(struct drm_connector *connector, bool force)
/* MST devices are disconnected from a monitor POV */
intel_dp_unset_edid(intel_dp);
if (intel_encoder->type != INTEL_OUTPUT_EDP)
- intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+ intel_encoder->type = INTEL_OUTPUT_DP;
return connector_status_disconnected;
}
@@ -4674,7 +4365,7 @@ intel_dp_force(struct drm_connector *connector)
intel_display_power_put(dev_priv, power_domain);
if (intel_encoder->type != INTEL_OUTPUT_EDP)
- intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+ intel_encoder->type = INTEL_OUTPUT_DP;
}
static int intel_dp_get_modes(struct drm_connector *connector)
@@ -4723,7 +4414,7 @@ intel_dp_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
@@ -4811,6 +4502,32 @@ done:
return 0;
}
+static int
+intel_dp_connector_register(struct drm_connector *connector)
+{
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ int ret;
+
+ ret = intel_connector_register(connector);
+ if (ret)
+ return ret;
+
+ i915_debugfs_connector_add(connector);
+
+ DRM_DEBUG_KMS("registering %s bus for %s\n",
+ intel_dp->aux.name, connector->kdev->kobj.name);
+
+ intel_dp->aux.dev = connector->kdev;
+ return drm_dp_aux_register(&intel_dp->aux);
+}
+
+static void
+intel_dp_connector_unregister(struct drm_connector *connector)
+{
+ drm_dp_aux_unregister(&intel_attached_dp(connector)->aux);
+ intel_connector_unregister(connector);
+}
+
static void
intel_dp_connector_destroy(struct drm_connector *connector)
{
@@ -4851,6 +4568,9 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
intel_dp->edp_notifier.notifier_call = NULL;
}
}
+
+ intel_dp_aux_fini(intel_dp);
+
drm_encoder_cleanup(encoder);
kfree(intel_dig_port);
}
@@ -4876,7 +4596,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -4929,6 +4649,8 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_dp_set_property,
.atomic_get_property = intel_connector_atomic_get_property,
+ .late_register = intel_dp_connector_register,
+ .early_unregister = intel_dp_connector_unregister,
.destroy = intel_dp_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -4937,7 +4659,6 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
.get_modes = intel_dp_get_modes,
.mode_valid = intel_dp_mode_valid,
- .best_encoder = intel_best_encoder,
};
static const struct drm_encoder_funcs intel_dp_enc_funcs = {
@@ -4951,13 +4672,13 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
enum irqreturn ret = IRQ_NONE;
if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
- intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
+ intel_dig_port->base.type = INTEL_OUTPUT_DP;
if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
/*
@@ -5019,7 +4740,7 @@ put_power:
/* check the VBT to see whether the eDP is on another port */
bool intel_dp_is_edp(struct drm_device *dev, enum port port)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
/*
* eDP not supported on g4x. so bail out early just
@@ -5061,82 +4782,93 @@ static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
}
static void
-intel_dp_init_panel_power_sequencer(struct drm_device *dev,
- struct intel_dp *intel_dp)
+intel_pps_readout_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_dp *intel_dp, struct edp_power_seq *seq)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct edp_power_seq cur, vbt, spec,
- *final = &intel_dp->pps_delays;
u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
- i915_reg_t pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
-
- lockdep_assert_held(&dev_priv->pps_mutex);
-
- /* already initialized? */
- if (final->t11_t12 != 0)
- return;
-
- if (IS_BROXTON(dev)) {
- /*
- * TODO: BXT has 2 sets of PPS registers.
- * Correct Register for Broxton need to be identified
- * using VBT. hardcoding for now
- */
- pp_ctrl_reg = BXT_PP_CONTROL(0);
- pp_on_reg = BXT_PP_ON_DELAYS(0);
- pp_off_reg = BXT_PP_OFF_DELAYS(0);
- } else if (HAS_PCH_SPLIT(dev)) {
- pp_ctrl_reg = PCH_PP_CONTROL;
- pp_on_reg = PCH_PP_ON_DELAYS;
- pp_off_reg = PCH_PP_OFF_DELAYS;
- pp_div_reg = PCH_PP_DIVISOR;
- } else {
- enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
+ struct pps_registers regs;
- pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
- pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
- pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
- pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
- }
+ intel_pps_get_registers(dev_priv, intel_dp, &regs);
/* Workaround: Need to write PP_CONTROL with the unlock key as
* the very first thing. */
pp_ctl = ironlake_get_pp_control(intel_dp);
- pp_on = I915_READ(pp_on_reg);
- pp_off = I915_READ(pp_off_reg);
- if (!IS_BROXTON(dev)) {
- I915_WRITE(pp_ctrl_reg, pp_ctl);
- pp_div = I915_READ(pp_div_reg);
+ pp_on = I915_READ(regs.pp_on);
+ pp_off = I915_READ(regs.pp_off);
+ if (!IS_BROXTON(dev_priv)) {
+ I915_WRITE(regs.pp_ctrl, pp_ctl);
+ pp_div = I915_READ(regs.pp_div);
}
/* Pull timing values out of registers */
- cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
- PANEL_POWER_UP_DELAY_SHIFT;
+ seq->t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
+ PANEL_POWER_UP_DELAY_SHIFT;
- cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
- PANEL_LIGHT_ON_DELAY_SHIFT;
+ seq->t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
+ PANEL_LIGHT_ON_DELAY_SHIFT;
- cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
- PANEL_LIGHT_OFF_DELAY_SHIFT;
+ seq->t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
+ PANEL_LIGHT_OFF_DELAY_SHIFT;
- cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
- PANEL_POWER_DOWN_DELAY_SHIFT;
+ seq->t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
+ PANEL_POWER_DOWN_DELAY_SHIFT;
- if (IS_BROXTON(dev)) {
+ if (IS_BROXTON(dev_priv)) {
u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
BXT_POWER_CYCLE_DELAY_SHIFT;
if (tmp > 0)
- cur.t11_t12 = (tmp - 1) * 1000;
+ seq->t11_t12 = (tmp - 1) * 1000;
else
- cur.t11_t12 = 0;
+ seq->t11_t12 = 0;
} else {
- cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
+ seq->t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
}
+}
+
+static void
+intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
+{
+ DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+ state_name,
+ seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
+}
+
+static void
+intel_pps_verify_state(struct drm_i915_private *dev_priv,
+ struct intel_dp *intel_dp)
+{
+ struct edp_power_seq hw;
+ struct edp_power_seq *sw = &intel_dp->pps_delays;
+
+ intel_pps_readout_hw_state(dev_priv, intel_dp, &hw);
+
+ if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
+ hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
+ DRM_ERROR("PPS state mismatch\n");
+ intel_pps_dump_state("sw", sw);
+ intel_pps_dump_state("hw", &hw);
+ }
+}
+
+static void
+intel_dp_init_panel_power_sequencer(struct drm_device *dev,
+ struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct edp_power_seq cur, vbt, spec,
+ *final = &intel_dp->pps_delays;
+
+ lockdep_assert_held(&dev_priv->pps_mutex);
+
+ /* already initialized? */
+ if (final->t11_t12 != 0)
+ return;
- DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
- cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
+ intel_pps_readout_hw_state(dev_priv, intel_dp, &cur);
+
+ intel_pps_dump_state("cur", &cur);
vbt = dev_priv->vbt.edp.pps;
@@ -5152,8 +4884,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
* too. */
spec.t11_t12 = (510 + 100) * 10;
- DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
- vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
+ intel_pps_dump_state("vbt", &vbt);
/* Use the max of the register settings and vbt. If both are
* unset, fall back to the spec limits. */
@@ -5181,59 +4912,41 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
+
+ /*
+ * We override the HW backlight delays to 1 because we do manual waits
+ * on them. For T8, even BSpec recommends doing it. For T9, if we
+ * don't do this, we'll end up waiting for the backlight off delay
+ * twice: once when we do the manual sleep, and once when we disable
+ * the panel and wait for the PP_STATUS bit to become zero.
+ */
+ final->t8 = 1;
+ final->t9 = 1;
}
static void
intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 pp_on, pp_off, pp_div, port_sel = 0;
int div = dev_priv->rawclk_freq / 1000;
- i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg;
+ struct pps_registers regs;
enum port port = dp_to_dig_port(intel_dp)->port;
const struct edp_power_seq *seq = &intel_dp->pps_delays;
lockdep_assert_held(&dev_priv->pps_mutex);
- if (IS_BROXTON(dev)) {
- /*
- * TODO: BXT has 2 sets of PPS registers.
- * Correct Register for Broxton need to be identified
- * using VBT. hardcoding for now
- */
- pp_ctrl_reg = BXT_PP_CONTROL(0);
- pp_on_reg = BXT_PP_ON_DELAYS(0);
- pp_off_reg = BXT_PP_OFF_DELAYS(0);
-
- } else if (HAS_PCH_SPLIT(dev)) {
- pp_on_reg = PCH_PP_ON_DELAYS;
- pp_off_reg = PCH_PP_OFF_DELAYS;
- pp_div_reg = PCH_PP_DIVISOR;
- } else {
- enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
+ intel_pps_get_registers(dev_priv, intel_dp, &regs);
- pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
- pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
- pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
- }
-
- /*
- * And finally store the new values in the power sequencer. The
- * backlight delays are set to 1 because we do manual waits on them. For
- * T8, even BSpec recommends doing it. For T9, if we don't do this,
- * we'll end up waiting for the backlight off delay twice: once when we
- * do the manual sleep, and once when we disable the panel and wait for
- * the PP_STATUS bit to become zero.
- */
pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
- (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
- pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
+ (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
+ pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
(seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
/* Compute the divisor for the pp clock, simply match the Bspec
* formula. */
if (IS_BROXTON(dev)) {
- pp_div = I915_READ(pp_ctrl_reg);
+ pp_div = I915_READ(regs.pp_ctrl);
pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
<< BXT_POWER_CYCLE_DELAY_SHIFT);
@@ -5256,19 +4969,19 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
pp_on |= port_sel;
- I915_WRITE(pp_on_reg, pp_on);
- I915_WRITE(pp_off_reg, pp_off);
+ I915_WRITE(regs.pp_on, pp_on);
+ I915_WRITE(regs.pp_off, pp_off);
if (IS_BROXTON(dev))
- I915_WRITE(pp_ctrl_reg, pp_div);
+ I915_WRITE(regs.pp_ctrl, pp_div);
else
- I915_WRITE(pp_div_reg, pp_div);
+ I915_WRITE(regs.pp_div, pp_div);
DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
- I915_READ(pp_on_reg),
- I915_READ(pp_off_reg),
+ I915_READ(regs.pp_on),
+ I915_READ(regs.pp_off),
IS_BROXTON(dev) ?
- (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
- I915_READ(pp_div_reg));
+ (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) :
+ I915_READ(regs.pp_div));
}
/**
@@ -5285,7 +4998,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
*/
static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *encoder;
struct intel_digital_port *dig_port = NULL;
struct intel_dp *intel_dp = dev_priv->drrs.dp;
@@ -5384,7 +5097,7 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
void intel_edp_drrs_enable(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_crtc *crtc = dig_port->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -5416,7 +5129,7 @@ unlock:
void intel_edp_drrs_disable(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_crtc *crtc = dig_port->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -5431,9 +5144,9 @@ void intel_edp_drrs_disable(struct intel_dp *intel_dp)
}
if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
- intel_dp_set_drrs_state(dev_priv->dev,
- intel_dp->attached_connector->panel.
- fixed_mode->vrefresh);
+ intel_dp_set_drrs_state(&dev_priv->drm,
+ intel_dp->attached_connector->panel.
+ fixed_mode->vrefresh);
dev_priv->drrs.dp = NULL;
mutex_unlock(&dev_priv->drrs.mutex);
@@ -5463,9 +5176,9 @@ static void intel_edp_drrs_downclock_work(struct work_struct *work)
goto unlock;
if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
- intel_dp_set_drrs_state(dev_priv->dev,
- intel_dp->attached_connector->panel.
- downclock_mode->vrefresh);
+ intel_dp_set_drrs_state(&dev_priv->drm,
+ intel_dp->attached_connector->panel.
+ downclock_mode->vrefresh);
unlock:
mutex_unlock(&dev_priv->drrs.mutex);
@@ -5484,7 +5197,7 @@ unlock:
void intel_edp_drrs_invalidate(struct drm_device *dev,
unsigned frontbuffer_bits)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *crtc;
enum pipe pipe;
@@ -5507,9 +5220,9 @@ void intel_edp_drrs_invalidate(struct drm_device *dev,
/* invalidate means busy screen hence upclock */
if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
- intel_dp_set_drrs_state(dev_priv->dev,
- dev_priv->drrs.dp->attached_connector->panel.
- fixed_mode->vrefresh);
+ intel_dp_set_drrs_state(&dev_priv->drm,
+ dev_priv->drrs.dp->attached_connector->panel.
+ fixed_mode->vrefresh);
mutex_unlock(&dev_priv->drrs.mutex);
}
@@ -5529,7 +5242,7 @@ void intel_edp_drrs_invalidate(struct drm_device *dev,
void intel_edp_drrs_flush(struct drm_device *dev,
unsigned frontbuffer_bits)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *crtc;
enum pipe pipe;
@@ -5552,9 +5265,9 @@ void intel_edp_drrs_flush(struct drm_device *dev,
/* flush means busy screen hence upclock */
if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
- intel_dp_set_drrs_state(dev_priv->dev,
- dev_priv->drrs.dp->attached_connector->panel.
- fixed_mode->vrefresh);
+ intel_dp_set_drrs_state(&dev_priv->drm,
+ dev_priv->drrs.dp->attached_connector->panel.
+ fixed_mode->vrefresh);
/*
* flush also means no more activity hence schedule downclock, if all
@@ -5589,14 +5302,14 @@ void intel_edp_drrs_flush(struct drm_device *dev,
*
* DRRS saves power by switching to low RR based on usage scenarios.
*
- * eDP DRRS:-
- * The implementation is based on frontbuffer tracking implementation.
- * When there is a disturbance on the screen triggered by user activity or a
- * periodic system activity, DRRS is disabled (RR is changed to high RR).
- * When there is no movement on screen, after a timeout of 1 second, a switch
- * to low RR is made.
- * For integration with frontbuffer tracking code,
- * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
+ * The implementation is based on frontbuffer tracking implementation. When
+ * there is a disturbance on the screen triggered by user activity or a periodic
+ * system activity, DRRS is disabled (RR is changed to high RR). When there is
+ * no movement on screen, after a timeout of 1 second, a switch to low RR is
+ * made.
+ *
+ * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
+ * and intel_edp_drrs_flush() are called.
*
* DRRS can be further extended to support other internal panels and also
* the scenario of video playback wherein RR is set based on the rate
@@ -5622,7 +5335,7 @@ intel_dp_drrs_init(struct intel_connector *intel_connector,
{
struct drm_connector *connector = &intel_connector->base;
struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_display_mode *downclock_mode = NULL;
INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
@@ -5660,7 +5373,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = intel_encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_display_mode *fixed_mode = NULL;
struct drm_display_mode *downclock_mode = NULL;
bool has_dpcd;
@@ -5671,8 +5384,32 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
if (!is_edp(intel_dp))
return true;
+ /*
+ * On IBX/CPT we may get here with LVDS already registered. Since the
+ * driver uses the only internal power sequencer available for both
+ * eDP and LVDS bail out early in this case to prevent interfering
+ * with an already powered-on LVDS power sequencer.
+ */
+ if (intel_get_lvds_encoder(dev)) {
+ WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
+ DRM_INFO("LVDS was detected, not registering eDP\n");
+
+ return false;
+ }
+
pps_lock(intel_dp);
+
+ intel_dp_init_panel_power_timestamps(intel_dp);
+
+ if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ vlv_initial_power_sequencer_setup(intel_dp);
+ } else {
+ intel_dp_init_panel_power_sequencer(dev, intel_dp);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
+ }
+
intel_edp_panel_vdd_sanitize(intel_dp);
+
pps_unlock(intel_dp);
/* Cache DPCD and EDID for edp. */
@@ -5686,14 +5423,9 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
} else {
/* if this fails, presume the device is a ghost */
DRM_INFO("failed to retrieve link info, disabling eDP\n");
- return false;
+ goto out_vdd_off;
}
- /* We now know it's not a ghost, init power sequence regs. */
- pps_lock(intel_dp);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
- pps_unlock(intel_dp);
-
mutex_lock(&dev->mode_config.mutex);
edid = drm_get_edid(connector, &intel_dp->aux.ddc);
if (edid) {
@@ -5761,6 +5493,18 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
intel_panel_setup_backlight(connector, pipe);
return true;
+
+out_vdd_off:
+ cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
+ /*
+ * vdd might still be enabled do to the delayed vdd off.
+ * Make sure vdd is actually turned off here.
+ */
+ pps_lock(intel_dp);
+ edp_panel_vdd_off_sync(intel_dp);
+ pps_unlock(intel_dp);
+
+ return false;
}
bool
@@ -5771,9 +5515,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = intel_encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_dig_port->port;
- int type, ret;
+ int type;
if (WARN(intel_dig_port->max_lanes < 1,
"Not enough lanes (%d) for DP on port %c\n",
@@ -5832,17 +5576,17 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
+ intel_dp_aux_init(intel_dp, intel_connector);
+
INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
edp_panel_vdd_work);
intel_connector_attach_encoder(intel_connector, intel_encoder);
- drm_connector_register(connector);
if (HAS_DDI(dev))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
- intel_connector->unregister = intel_dp_connector_unregister;
/* Set up the hotplug pin. */
switch (port) {
@@ -5867,22 +5611,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
BUG();
}
- if (is_edp(intel_dp)) {
- pps_lock(intel_dp);
- intel_dp_init_panel_power_timestamps(intel_dp);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
- vlv_initial_power_sequencer_setup(intel_dp);
- else
- intel_dp_init_panel_power_sequencer(dev, intel_dp);
- pps_unlock(intel_dp);
- }
-
- ret = intel_dp_aux_init(intel_dp, intel_connector);
- if (ret)
- goto fail;
-
/* init MST on ports that can support it */
- if (HAS_DP_MST(dev) &&
+ if (HAS_DP_MST(dev) && !is_edp(intel_dp) &&
(port == PORT_B || port == PORT_C || port == PORT_D))
intel_dp_mst_encoder_init(intel_dig_port,
intel_connector->base.base.id);
@@ -5904,22 +5634,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
- i915_debugfs_connector_add(connector);
-
return true;
fail:
- if (is_edp(intel_dp)) {
- cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
- /*
- * vdd might still be enabled do to the delayed vdd off.
- * Make sure vdd is actually turned off here.
- */
- pps_lock(intel_dp);
- edp_panel_vdd_off_sync(intel_dp);
- pps_unlock(intel_dp);
- }
- drm_connector_unregister(connector);
drm_connector_cleanup(connector);
return false;
@@ -5929,7 +5646,7 @@ bool intel_dp_init(struct drm_device *dev,
i915_reg_t output_reg,
enum port port)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
@@ -5947,7 +5664,7 @@ bool intel_dp_init(struct drm_device *dev,
encoder = &intel_encoder->base;
if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
- DRM_MODE_ENCODER_TMDS, NULL))
+ DRM_MODE_ENCODER_TMDS, "DP %c", port_name(port)))
goto err_encoder_init;
intel_encoder->compute_config = intel_dp_compute_config;
@@ -5977,7 +5694,7 @@ bool intel_dp_init(struct drm_device *dev,
intel_dig_port->dp.output_reg = output_reg;
intel_dig_port->max_lanes = 4;
- intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+ intel_encoder->type = INTEL_OUTPUT_DP;
if (IS_CHERRYVIEW(dev)) {
if (port == PORT_D)
intel_encoder->crtc_mask = 1 << 2;
@@ -6007,43 +5724,35 @@ err_connector_alloc:
void intel_dp_mst_suspend(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int i;
/* disable MST */
for (i = 0; i < I915_MAX_PORTS; i++) {
struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
- if (!intel_dig_port)
+
+ if (!intel_dig_port || !intel_dig_port->dp.can_mst)
continue;
- if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
- if (!intel_dig_port->dp.can_mst)
- continue;
- if (intel_dig_port->dp.is_mst)
- drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
- }
+ if (intel_dig_port->dp.is_mst)
+ drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
}
}
void intel_dp_mst_resume(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int i;
for (i = 0; i < I915_MAX_PORTS; i++) {
struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
- if (!intel_dig_port)
- continue;
- if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
- int ret;
+ int ret;
- if (!intel_dig_port->dp.can_mst)
- continue;
+ if (!intel_dig_port || !intel_dig_port->dp.can_mst)
+ continue;
- ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
- if (ret != 0) {
- intel_dp_check_mst_status(&intel_dig_port->dp);
- }
- }
+ ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
+ if (ret)
+ intel_dp_check_mst_status(&intel_dig_port->dp);
}
}
diff --git a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
new file mode 100644
index 000000000..6532e226d
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
@@ -0,0 +1,172 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "intel_drv.h"
+
+static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
+{
+ uint8_t reg_val = 0;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER,
+ &reg_val) < 0) {
+ DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
+ DP_EDP_DISPLAY_CONTROL_REGISTER);
+ return;
+ }
+ if (enable)
+ reg_val |= DP_EDP_BACKLIGHT_ENABLE;
+ else
+ reg_val &= ~(DP_EDP_BACKLIGHT_ENABLE);
+
+ if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER,
+ reg_val) != 1) {
+ DRM_DEBUG_KMS("Failed to %s aux backlight\n",
+ enable ? "enable" : "disable");
+ }
+}
+
+/*
+ * Read the current backlight value from DPCD register(s) based
+ * on if 8-bit(MSB) or 16-bit(MSB and LSB) values are supported
+ */
+static uint32_t intel_dp_aux_get_backlight(struct intel_connector *connector)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+ uint8_t read_val[2] = { 0x0 };
+ uint16_t level = 0;
+
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
+ &read_val, sizeof(read_val)) < 0) {
+ DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
+ DP_EDP_BACKLIGHT_BRIGHTNESS_MSB);
+ return 0;
+ }
+ level = read_val[0];
+ if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
+ level = (read_val[0] << 8 | read_val[1]);
+
+ return level;
+}
+
+/*
+ * Sends the current backlight level over the aux channel, checking if its using
+ * 8-bit or 16 bit value (MSB and LSB)
+ */
+static void
+intel_dp_aux_set_backlight(struct intel_connector *connector, u32 level)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+ uint8_t vals[2] = { 0x0 };
+
+ vals[0] = level;
+
+ /* Write the MSB and/or LSB */
+ if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) {
+ vals[0] = (level & 0xFF00) >> 8;
+ vals[1] = (level & 0xFF);
+ }
+ if (drm_dp_dpcd_write(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
+ vals, sizeof(vals)) < 0) {
+ DRM_DEBUG_KMS("Failed to write aux backlight level\n");
+ return;
+ }
+}
+
+static void intel_dp_aux_enable_backlight(struct intel_connector *connector)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+ uint8_t dpcd_buf = 0;
+
+ set_aux_backlight_enable(intel_dp, true);
+
+ if ((drm_dp_dpcd_readb(&intel_dp->aux,
+ DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) == 1) &&
+ ((dpcd_buf & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK) ==
+ DP_EDP_BACKLIGHT_CONTROL_MODE_PRESET))
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
+ (dpcd_buf | DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD));
+}
+
+static void intel_dp_aux_disable_backlight(struct intel_connector *connector)
+{
+ set_aux_backlight_enable(enc_to_intel_dp(&connector->encoder->base), false);
+}
+
+static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
+ enum pipe pipe)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+ struct intel_panel *panel = &connector->panel;
+
+ intel_dp_aux_enable_backlight(connector);
+
+ if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
+ panel->backlight.max = 0xFFFF;
+ else
+ panel->backlight.max = 0xFF;
+
+ panel->backlight.min = 0;
+ panel->backlight.level = intel_dp_aux_get_backlight(connector);
+
+ panel->backlight.enabled = panel->backlight.level != 0;
+
+ return 0;
+}
+
+static bool
+intel_dp_aux_display_control_capable(struct intel_connector *connector)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+
+ /* Check the eDP Display control capabilities registers to determine if
+ * the panel can support backlight control over the aux channel
+ */
+ if (intel_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP &&
+ (intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP) &&
+ !((intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_PIN_ENABLE_CAP) ||
+ (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP))) {
+ DRM_DEBUG_KMS("AUX Backlight Control Supported!\n");
+ return true;
+ }
+ return false;
+}
+
+int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
+{
+ struct intel_panel *panel = &intel_connector->panel;
+
+ if (!i915.enable_dpcd_backlight)
+ return -ENODEV;
+
+ if (!intel_dp_aux_display_control_capable(intel_connector))
+ return -ENODEV;
+
+ panel->backlight.setup = intel_dp_aux_setup_backlight;
+ panel->backlight.enable = intel_dp_aux_enable_backlight;
+ panel->backlight.disable = intel_dp_aux_disable_backlight;
+ panel->backlight.set = intel_dp_aux_set_backlight;
+ panel->backlight.get = intel_dp_aux_get_backlight;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 7a34090ce..68a005d72 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -47,7 +47,6 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
pipe_config->dp_encoder_is_mst = true;
pipe_config->has_pch_encoder = false;
- pipe_config->has_dp_encoder = true;
bpp = 24;
/*
* for MST we always configure max link bw - the spec doesn't
@@ -140,7 +139,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_dig_port->port;
int ret;
uint32_t temp;
@@ -207,14 +206,17 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder)
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_dig_port->port;
int ret;
DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
- if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_ACT_SENT),
- 1))
+ if (intel_wait_for_register(dev_priv,
+ DP_TP_STATUS(port),
+ DP_TP_STATUS_ACT_SENT,
+ DP_TP_STATUS_ACT_SENT,
+ 1))
DRM_ERROR("Timed out waiting for ACT sent\n");
ret = drm_dp_check_act_status(&intel_dp->mst_mgr);
@@ -239,12 +241,10 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
u32 temp, flags = 0;
- pipe_config->has_dp_encoder = true;
-
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (temp & TRANS_DDI_PHSYNC)
flags |= DRM_MODE_FLAG_PHSYNC;
@@ -336,6 +336,8 @@ static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_dp_mst_set_property,
.atomic_get_property = intel_connector_atomic_get_property,
+ .late_register = intel_connector_register,
+ .early_unregister = intel_connector_unregister,
.destroy = intel_dp_mst_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -455,7 +457,6 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort);
drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);
- intel_connector->unregister = intel_connector_unregister;
intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
intel_connector->mst_port = intel_dp;
intel_connector->port = port;
@@ -477,9 +478,11 @@ static void intel_dp_register_mst_connector(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_device *dev = connector->dev;
+
drm_modeset_lock_all(dev);
intel_connector_add_to_fbdev(intel_connector);
drm_modeset_unlock_all(dev);
+
drm_connector_register(&intel_connector->base);
}
@@ -489,7 +492,7 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_device *dev = connector->dev;
- intel_connector->unregister(intel_connector);
+ drm_connector_unregister(connector);
/* need to nuke the connector */
drm_modeset_lock_all(dev);
@@ -534,7 +537,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
intel_mst->primary = intel_dig_port;
drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs,
- DRM_MODE_ENCODER_DPMST, NULL);
+ DRM_MODE_ENCODER_DPMST, "DP-MST %c", pipe_name(pipe));
intel_encoder->type = INTEL_OUTPUT_DP_MST;
intel_encoder->crtc_mask = 0x7;
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c
new file mode 100644
index 000000000..047f48748
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dpio_phy.c
@@ -0,0 +1,470 @@
+/*
+ * Copyright © 2014-2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "intel_drv.h"
+
+void chv_set_phy_signal_level(struct intel_encoder *encoder,
+ u32 deemph_reg_value, u32 margin_reg_value,
+ bool uniq_trans_scale)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
+ enum dpio_channel ch = vlv_dport_to_channel(dport);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 val;
+ int i;
+
+ mutex_lock(&dev_priv->sb_lock);
+
+ /* Clear calc init */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
+ val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+ val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
+ val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
+
+ if (intel_crtc->config->lane_count > 2) {
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
+ val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+ val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
+ val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+ }
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
+ val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
+ val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
+
+ if (intel_crtc->config->lane_count > 2) {
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
+ val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
+ val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
+ }
+
+ /* Program swing deemph */
+ for (i = 0; i < intel_crtc->config->lane_count; i++) {
+ val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
+ val &= ~DPIO_SWING_DEEMPH9P5_MASK;
+ val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
+ }
+
+ /* Program swing margin */
+ for (i = 0; i < intel_crtc->config->lane_count; i++) {
+ val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
+
+ val &= ~DPIO_SWING_MARGIN000_MASK;
+ val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
+
+ /*
+ * Supposedly this value shouldn't matter when unique transition
+ * scale is disabled, but in fact it does matter. Let's just
+ * always program the same value and hope it's OK.
+ */
+ val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
+ val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
+
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
+ }
+
+ /*
+ * The document said it needs to set bit 27 for ch0 and bit 26
+ * for ch1. Might be a typo in the doc.
+ * For now, for this unique transition scale selection, set bit
+ * 27 for ch0 and ch1.
+ */
+ for (i = 0; i < intel_crtc->config->lane_count; i++) {
+ val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
+ if (uniq_trans_scale)
+ val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
+ else
+ val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
+ }
+
+ /* Start swing calculation */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
+ val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
+
+ if (intel_crtc->config->lane_count > 2) {
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
+ val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+ }
+
+ mutex_unlock(&dev_priv->sb_lock);
+
+}
+
+void chv_data_lane_soft_reset(struct intel_encoder *encoder,
+ bool reset)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ enum pipe pipe = crtc->pipe;
+ uint32_t val;
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
+ if (reset)
+ val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+ else
+ val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
+
+ if (crtc->config->lane_count > 2) {
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
+ if (reset)
+ val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+ else
+ val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
+ }
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
+ val |= CHV_PCS_REQ_SOFTRESET_EN;
+ if (reset)
+ val &= ~DPIO_PCS_CLK_SOFT_RESET;
+ else
+ val |= DPIO_PCS_CLK_SOFT_RESET;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
+
+ if (crtc->config->lane_count > 2) {
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
+ val |= CHV_PCS_REQ_SOFTRESET_EN;
+ if (reset)
+ val &= ~DPIO_PCS_CLK_SOFT_RESET;
+ else
+ val |= DPIO_PCS_CLK_SOFT_RESET;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
+ }
+}
+
+void chv_phy_pre_pll_enable(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ enum dpio_channel ch = vlv_dport_to_channel(dport);
+ enum pipe pipe = intel_crtc->pipe;
+ unsigned int lane_mask =
+ intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
+ u32 val;
+
+ /*
+ * Must trick the second common lane into life.
+ * Otherwise we can't even access the PLL.
+ */
+ if (ch == DPIO_CH0 && pipe == PIPE_B)
+ dport->release_cl2_override =
+ !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
+
+ chv_phy_powergate_lanes(encoder, true, lane_mask);
+
+ mutex_lock(&dev_priv->sb_lock);
+
+ /* Assert data lane reset */
+ chv_data_lane_soft_reset(encoder, true);
+
+ /* program left/right clock distribution */
+ if (pipe != PIPE_B) {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+ val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
+ if (ch == DPIO_CH0)
+ val |= CHV_BUFLEFTENA1_FORCE;
+ if (ch == DPIO_CH1)
+ val |= CHV_BUFRIGHTENA1_FORCE;
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+ } else {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+ val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
+ if (ch == DPIO_CH0)
+ val |= CHV_BUFLEFTENA2_FORCE;
+ if (ch == DPIO_CH1)
+ val |= CHV_BUFRIGHTENA2_FORCE;
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+ }
+
+ /* program clock channel usage */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
+ val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+ if (pipe != PIPE_B)
+ val &= ~CHV_PCS_USEDCLKCHANNEL;
+ else
+ val |= CHV_PCS_USEDCLKCHANNEL;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
+
+ if (intel_crtc->config->lane_count > 2) {
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
+ val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+ if (pipe != PIPE_B)
+ val &= ~CHV_PCS_USEDCLKCHANNEL;
+ else
+ val |= CHV_PCS_USEDCLKCHANNEL;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
+ }
+
+ /*
+ * This a a bit weird since generally CL
+ * matches the pipe, but here we need to
+ * pick the CL based on the port.
+ */
+ val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
+ if (pipe != PIPE_B)
+ val &= ~CHV_CMN_USEDCLKCHANNEL;
+ else
+ val |= CHV_CMN_USEDCLKCHANNEL;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
+
+ mutex_unlock(&dev_priv->sb_lock);
+}
+
+void chv_phy_pre_encoder_enable(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ enum dpio_channel ch = vlv_dport_to_channel(dport);
+ int pipe = intel_crtc->pipe;
+ int data, i, stagger;
+ u32 val;
+
+ mutex_lock(&dev_priv->sb_lock);
+
+ /* allow hardware to manage TX FIFO reset source */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
+ val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
+
+ if (intel_crtc->config->lane_count > 2) {
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
+ val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
+ }
+
+ /* Program Tx lane latency optimal setting*/
+ for (i = 0; i < intel_crtc->config->lane_count; i++) {
+ /* Set the upar bit */
+ if (intel_crtc->config->lane_count == 1)
+ data = 0x0;
+ else
+ data = (i == 1) ? 0x0 : 0x1;
+ vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
+ data << DPIO_UPAR_SHIFT);
+ }
+
+ /* Data lane stagger programming */
+ if (intel_crtc->config->port_clock > 270000)
+ stagger = 0x18;
+ else if (intel_crtc->config->port_clock > 135000)
+ stagger = 0xd;
+ else if (intel_crtc->config->port_clock > 67500)
+ stagger = 0x7;
+ else if (intel_crtc->config->port_clock > 33750)
+ stagger = 0x4;
+ else
+ stagger = 0x2;
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
+ val |= DPIO_TX2_STAGGER_MASK(0x1f);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
+
+ if (intel_crtc->config->lane_count > 2) {
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
+ val |= DPIO_TX2_STAGGER_MASK(0x1f);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
+ }
+
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
+ DPIO_LANESTAGGER_STRAP(stagger) |
+ DPIO_LANESTAGGER_STRAP_OVRD |
+ DPIO_TX1_STAGGER_MASK(0x1f) |
+ DPIO_TX1_STAGGER_MULT(6) |
+ DPIO_TX2_STAGGER_MULT(0));
+
+ if (intel_crtc->config->lane_count > 2) {
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
+ DPIO_LANESTAGGER_STRAP(stagger) |
+ DPIO_LANESTAGGER_STRAP_OVRD |
+ DPIO_TX1_STAGGER_MASK(0x1f) |
+ DPIO_TX1_STAGGER_MULT(7) |
+ DPIO_TX2_STAGGER_MULT(5));
+ }
+
+ /* Deassert data lane reset */
+ chv_data_lane_soft_reset(encoder, false);
+
+ mutex_unlock(&dev_priv->sb_lock);
+}
+
+void chv_phy_release_cl2_override(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (dport->release_cl2_override) {
+ chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
+ dport->release_cl2_override = false;
+ }
+}
+
+void chv_phy_post_pll_disable(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
+ u32 val;
+
+ mutex_lock(&dev_priv->sb_lock);
+
+ /* disable left/right clock distribution */
+ if (pipe != PIPE_B) {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+ val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+ } else {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+ val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+ }
+
+ mutex_unlock(&dev_priv->sb_lock);
+
+ /*
+ * Leave the power down bit cleared for at least one
+ * lane so that chv_powergate_phy_ch() will power
+ * on something when the channel is otherwise unused.
+ * When the port is off and the override is removed
+ * the lanes power down anyway, so otherwise it doesn't
+ * really matter what the state of power down bits is
+ * after this.
+ */
+ chv_phy_powergate_lanes(encoder, false, 0x0);
+}
+
+void vlv_set_phy_signal_level(struct intel_encoder *encoder,
+ u32 demph_reg_value, u32 preemph_reg_value,
+ u32 uniqtranscale_reg_value, u32 tx3_demph)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ enum dpio_channel port = vlv_dport_to_channel(dport);
+ int pipe = intel_crtc->pipe;
+
+ mutex_lock(&dev_priv->sb_lock);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
+ uniqtranscale_reg_value);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
+
+ if (tx3_demph)
+ vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), tx3_demph);
+
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
+ mutex_unlock(&dev_priv->sb_lock);
+}
+
+void vlv_phy_pre_pll_enable(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ enum dpio_channel port = vlv_dport_to_channel(dport);
+ int pipe = intel_crtc->pipe;
+
+ /* Program Tx lane resets to default */
+ mutex_lock(&dev_priv->sb_lock);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
+ DPIO_PCS_TX_LANE2_RESET |
+ DPIO_PCS_TX_LANE1_RESET);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
+ DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
+ DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
+ (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
+ DPIO_PCS_CLK_SOFT_RESET);
+
+ /* Fix up inter-pair skew failure */
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
+ vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
+ mutex_unlock(&dev_priv->sb_lock);
+}
+
+void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ enum dpio_channel port = vlv_dport_to_channel(dport);
+ int pipe = intel_crtc->pipe;
+ u32 val;
+
+ mutex_lock(&dev_priv->sb_lock);
+
+ /* Enable clock channels for this port */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
+ val = 0;
+ if (pipe)
+ val |= (1<<21);
+ else
+ val &= ~(1<<21);
+ val |= 0x001000c4;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
+
+ /* Program lane clock */
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
+
+ mutex_unlock(&dev_priv->sb_lock);
+}
+
+void vlv_phy_reset_lanes(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ enum dpio_channel port = vlv_dport_to_channel(dport);
+ int pipe = intel_crtc->pipe;
+
+ mutex_lock(&dev_priv->sb_lock);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060);
+ mutex_unlock(&dev_priv->sb_lock);
+}
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 58f60b278..5c1f2d235 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -83,7 +83,7 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
void intel_prepare_shared_dpll(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_shared_dpll *pll = crtc->config->shared_dpll;
if (WARN_ON(pll == NULL))
@@ -112,7 +112,7 @@ void intel_prepare_shared_dpll(struct intel_crtc *crtc)
void intel_enable_shared_dpll(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_shared_dpll *pll = crtc->config->shared_dpll;
unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
unsigned old_mask;
@@ -151,7 +151,7 @@ out:
void intel_disable_shared_dpll(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_shared_dpll *pll = crtc->config->shared_dpll;
unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
@@ -191,7 +191,7 @@ intel_find_shared_dpll(struct intel_crtc *crtc,
enum intel_dpll_id range_min,
enum intel_dpll_id range_max)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll;
struct intel_shared_dpll_config *shared_dpll;
enum intel_dpll_id i;
@@ -208,8 +208,8 @@ intel_find_shared_dpll(struct intel_crtc *crtc,
if (memcmp(&crtc_state->dpll_hw_state,
&shared_dpll[i].hw_state,
sizeof(crtc_state->dpll_hw_state)) == 0) {
- DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, active %x)\n",
- crtc->base.base.id, pll->name,
+ DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
+ crtc->base.base.id, crtc->base.name, pll->name,
shared_dpll[i].crtc_mask,
pll->active_mask);
return pll;
@@ -220,8 +220,8 @@ intel_find_shared_dpll(struct intel_crtc *crtc,
for (i = range_min; i <= range_max; i++) {
pll = &dev_priv->shared_dplls[i];
if (shared_dpll[i].crtc_mask == 0) {
- DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
- crtc->base.base.id, pll->name);
+ DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n",
+ crtc->base.base.id, crtc->base.name, pll->name);
return pll;
}
}
@@ -331,7 +331,7 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct intel_crtc *crtc;
/* Make sure no transcoder isn't still depending on us. */
@@ -358,8 +358,8 @@ ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
i = (enum intel_dpll_id) crtc->pipe;
pll = &dev_priv->shared_dplls[i];
- DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
- crtc->base.base.id, pll->name);
+ DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
+ crtc->base.base.id, crtc->base.name, pll->name);
} else {
pll = intel_find_shared_dpll(crtc, crtc_state,
DPLL_ID_PCH_PLL_A,
@@ -713,7 +713,7 @@ hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
pll = intel_find_shared_dpll(crtc, crtc_state,
DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
- } else if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+ } else if (encoder->type == INTEL_OUTPUT_DP ||
encoder->type == INTEL_OUTPUT_DP_MST ||
encoder->type == INTEL_OUTPUT_EDP) {
enum intel_dpll_id pll_id;
@@ -856,7 +856,11 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
I915_WRITE(regs[pll->id].ctl,
I915_READ(regs[pll->id].ctl) | LCPLL_PLL_ENABLE);
- if (wait_for(I915_READ(DPLL_STATUS) & DPLL_LOCK(pll->id), 5))
+ if (intel_wait_for_register(dev_priv,
+ DPLL_STATUS,
+ DPLL_LOCK(pll->id),
+ DPLL_LOCK(pll->id),
+ 5))
DRM_ERROR("DPLL %d not locked\n", pll->id);
}
@@ -1222,7 +1226,7 @@ skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
wrpll_params.central_freq;
- } else if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+ } else if (encoder->type == INTEL_OUTPUT_DP ||
encoder->type == INTEL_OUTPUT_DP_MST ||
encoder->type == INTEL_OUTPUT_EDP) {
switch (crtc_state->port_clock / 2) {
@@ -1239,9 +1243,6 @@ skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
case 162000:
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
break;
- /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
- results in CDCLK change. Need to handle the change of CDCLK by
- disabling pipes and re-enabling them */
case 108000:
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
break;
@@ -1511,7 +1512,7 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
int clock = crtc_state->port_clock;
if (encoder->type == INTEL_OUTPUT_HDMI) {
- intel_clock_t best_clock;
+ struct dpll best_clock;
/* Calculate HDMI div */
/*
@@ -1533,7 +1534,7 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
clk_div.m2_frac_en = clk_div.m2_frac != 0;
vco = best_clock.vco;
- } else if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+ } else if (encoder->type == INTEL_OUTPUT_DP ||
encoder->type == INTEL_OUTPUT_EDP) {
int i;
@@ -1616,8 +1617,8 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
i = (enum intel_dpll_id) intel_dig_port->port;
pll = intel_get_shared_dpll_by_id(dev_priv, i);
- DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
- crtc->base.base.id, pll->name);
+ DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
+ crtc->base.base.id, crtc->base.name, pll->name);
intel_reference_shared_dpll(pll, crtc_state);
@@ -1635,19 +1636,11 @@ static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
static void intel_ddi_pll_init(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t val = I915_READ(LCPLL_CTL);
-
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
- int cdclk_freq;
-
- cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
- dev_priv->skl_boot_cdclk = cdclk_freq;
- if (skl_sanitize_cdclk(dev_priv))
- DRM_DEBUG_KMS("Sanitized cdclk programmed by pre-os\n");
- if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE))
- DRM_ERROR("LCPLL1 is disabled\n");
- } else if (!IS_BROXTON(dev_priv)) {
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ if (INTEL_GEN(dev_priv) < 9) {
+ uint32_t val = I915_READ(LCPLL_CTL);
+
/*
* The LCPLL register should be turned on by the BIOS. For now
* let's just check its state and print errors in case
@@ -1730,7 +1723,7 @@ static const struct intel_dpll_mgr bxt_pll_mgr = {
void intel_shared_dpll_init(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
const struct intel_dpll_mgr *dpll_mgr = NULL;
const struct dpll_info *dpll_info;
int i;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 94144a70b..ff399b9a5 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -69,39 +69,63 @@
})
#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 1000)
-#define wait_for_us(COND, US) _wait_for((COND), (US), 1)
/* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */
#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT)
-# define _WAIT_FOR_ATOMIC_CHECK WARN_ON_ONCE(!in_atomic())
+# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic())
#else
-# define _WAIT_FOR_ATOMIC_CHECK do { } while (0)
+# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0)
#endif
-#define _wait_for_atomic(COND, US) ({ \
- unsigned long end__; \
- int ret__ = 0; \
- _WAIT_FOR_ATOMIC_CHECK; \
+#define _wait_for_atomic(COND, US, ATOMIC) \
+({ \
+ int cpu, ret, timeout = (US) * 1000; \
+ u64 base; \
+ _WAIT_FOR_ATOMIC_CHECK(ATOMIC); \
BUILD_BUG_ON((US) > 50000); \
- end__ = (local_clock() >> 10) + (US) + 1; \
- while (!(COND)) { \
- if (time_after((unsigned long)(local_clock() >> 10), end__)) { \
- /* Unlike the regular wait_for(), this atomic variant \
- * cannot be preempted (and we'll just ignore the issue\
- * of irq interruptions) and so we know that no time \
- * has passed since the last check of COND and can \
- * immediately report the timeout. \
- */ \
- ret__ = -ETIMEDOUT; \
+ if (!(ATOMIC)) { \
+ preempt_disable(); \
+ cpu = smp_processor_id(); \
+ } \
+ base = local_clock(); \
+ for (;;) { \
+ u64 now = local_clock(); \
+ if (!(ATOMIC)) \
+ preempt_enable(); \
+ if (COND) { \
+ ret = 0; \
+ break; \
+ } \
+ if (now - base >= timeout) { \
+ ret = -ETIMEDOUT; \
break; \
} \
cpu_relax(); \
+ if (!(ATOMIC)) { \
+ preempt_disable(); \
+ if (unlikely(cpu != smp_processor_id())) { \
+ timeout -= now - base; \
+ cpu = smp_processor_id(); \
+ base = local_clock(); \
+ } \
+ } \
} \
+ ret; \
+})
+
+#define wait_for_us(COND, US) \
+({ \
+ int ret__; \
+ BUILD_BUG_ON(!__builtin_constant_p(US)); \
+ if ((US) > 10) \
+ ret__ = _wait_for((COND), (US), 10); \
+ else \
+ ret__ = _wait_for_atomic((COND), (US), 0); \
ret__; \
})
-#define wait_for_atomic(COND, MS) _wait_for_atomic((COND), (MS) * 1000)
-#define wait_for_atomic_us(COND, US) _wait_for_atomic((COND), (US))
+#define wait_for_atomic(COND, MS) _wait_for_atomic((COND), (MS) * 1000, 1)
+#define wait_for_atomic_us(COND, US) _wait_for_atomic((COND), (US), 1)
#define KHz(x) (1000 * (x))
#define MHz(x) KHz(1000 * (x))
@@ -135,7 +159,7 @@ enum intel_output_type {
INTEL_OUTPUT_LVDS = 4,
INTEL_OUTPUT_TVOUT = 5,
INTEL_OUTPUT_HDMI = 6,
- INTEL_OUTPUT_DISPLAYPORT = 7,
+ INTEL_OUTPUT_DP = 7,
INTEL_OUTPUT_EDP = 8,
INTEL_OUTPUT_DSI = 9,
INTEL_OUTPUT_UNKNOWN = 10,
@@ -159,6 +183,7 @@ struct intel_framebuffer {
struct intel_fbdev {
struct drm_fb_helper helper;
struct intel_framebuffer *fb;
+ async_cookie_t cookie;
int preferred_bpp;
};
@@ -242,14 +267,6 @@ struct intel_connector {
* and active (i.e. dpms ON state). */
bool (*get_hw_state)(struct intel_connector *);
- /*
- * Removes all interfaces through which the connector is accessible
- * - like sysfs, debugfs entries -, so that no new operations can be
- * started on the connector. Also makes sure all currently pending
- * operations finish before returing.
- */
- void (*unregister)(struct intel_connector *);
-
/* Panel info for eDP and LVDS */
struct intel_panel panel;
@@ -266,7 +283,7 @@ struct intel_connector {
struct intel_dp *mst_port;
};
-typedef struct dpll {
+struct dpll {
/* given values */
int n;
int m1, m2;
@@ -276,7 +293,7 @@ typedef struct dpll {
int vco;
int m;
int p;
-} intel_clock_t;
+};
struct intel_atomic_state {
struct drm_atomic_state base;
@@ -291,17 +308,32 @@ struct intel_atomic_state {
bool dpll_set, modeset;
+ /*
+ * Does this transaction change the pipes that are active? This mask
+ * tracks which CRTC's have changed their active state at the end of
+ * the transaction (not counting the temporary disable during modesets).
+ * This mask should only be non-zero when intel_state->modeset is true,
+ * but the converse is not necessarily true; simply changing a mode may
+ * not flip the final active status of any CRTC's
+ */
+ unsigned int active_pipe_changes;
+
unsigned int active_crtcs;
unsigned int min_pixclk[I915_MAX_PIPES];
+ /* SKL/KBL Only */
+ unsigned int cdclk_pll_vco;
+
struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS];
- struct intel_wm_config wm_config;
/*
* Current watermarks can't be trusted during hardware readout, so
* don't bother calculating intermediate watermarks.
*/
bool skip_intermediate_wm;
+
+ /* Gen9+ only */
+ struct skl_wm_values wm_results;
};
struct intel_plane_state {
@@ -405,6 +437,48 @@ struct skl_pipe_wm {
uint32_t linetime;
};
+struct intel_crtc_wm_state {
+ union {
+ struct {
+ /*
+ * Intermediate watermarks; these can be
+ * programmed immediately since they satisfy
+ * both the current configuration we're
+ * switching away from and the new
+ * configuration we're switching to.
+ */
+ struct intel_pipe_wm intermediate;
+
+ /*
+ * Optimal watermarks, programmed post-vblank
+ * when this state is committed.
+ */
+ struct intel_pipe_wm optimal;
+ } ilk;
+
+ struct {
+ /* gen9+ only needs 1-step wm programming */
+ struct skl_pipe_wm optimal;
+
+ /* cached plane data rate */
+ unsigned plane_data_rate[I915_MAX_PLANES];
+ unsigned plane_y_data_rate[I915_MAX_PLANES];
+
+ /* minimum block allocation */
+ uint16_t minimum_blocks[I915_MAX_PLANES];
+ uint16_t minimum_y_blocks[I915_MAX_PLANES];
+ } skl;
+ };
+
+ /*
+ * Platforms with two-step watermark programming will need to
+ * update watermark programming post-vblank to switch from the
+ * safe intermediate watermarks to the optimal final
+ * watermarks.
+ */
+ bool need_postvbl_update;
+};
+
struct intel_crtc_state {
struct drm_crtc_state base;
@@ -448,12 +522,10 @@ struct intel_crtc_state {
*/
bool limited_color_range;
- /* DP has a bunch of special case unfortunately, so mark the pipe
- * accordingly. */
- bool has_dp_encoder;
-
- /* DSI has special cases */
- bool has_dsi_encoder;
+ /* Bitmask of encoder types (enum intel_output_type)
+ * driven by the pipe.
+ */
+ unsigned int output_types;
/* Whether we should send NULL infoframes. Required for audio. */
bool has_hdmi_sink;
@@ -522,6 +594,12 @@ struct intel_crtc_state {
uint8_t lane_count;
+ /*
+ * Used by platforms having DP/HDMI PHY with programmable lane
+ * latency optimization.
+ */
+ uint8_t lane_lat_optim_mask;
+
/* Panel fitter controls for gen2-gen4 + VLV */
struct {
u32 control;
@@ -558,32 +636,7 @@ struct intel_crtc_state {
/* IVB sprite scaling w/a (WaCxSRDisabledForSpriteScaling:ivb) */
bool disable_lp_wm;
- struct {
- /*
- * Optimal watermarks, programmed post-vblank when this state
- * is committed.
- */
- union {
- struct intel_pipe_wm ilk;
- struct skl_pipe_wm skl;
- } optimal;
-
- /*
- * Intermediate watermarks; these can be programmed immediately
- * since they satisfy both the current configuration we're
- * switching away from and the new configuration we're switching
- * to.
- */
- struct intel_pipe_wm intermediate;
-
- /*
- * Platforms with two-step watermark programming will need to
- * update watermark programming post-vblank to switch from the
- * safe intermediate watermarks to the optimal final
- * watermarks.
- */
- bool need_postvbl_update;
- } wm;
+ struct intel_crtc_wm_state wm;
/* Gamma mode programmed on the pipe */
uint32_t gamma_mode;
@@ -598,14 +651,6 @@ struct vlv_wm_state {
bool cxsr;
};
-struct intel_mmio_flip {
- struct work_struct work;
- struct drm_i915_private *i915;
- struct drm_i915_gem_request *req;
- struct intel_crtc *crtc;
- unsigned int rotation;
-};
-
struct intel_crtc {
struct drm_crtc base;
enum pipe pipe;
@@ -620,7 +665,7 @@ struct intel_crtc {
unsigned long enabled_power_domains;
bool lowfreq_avail;
struct intel_overlay *overlay;
- struct intel_unpin_work *unpin_work;
+ struct intel_flip_work *flip_work;
atomic_t unpin_work_count;
@@ -815,6 +860,7 @@ struct intel_dp {
uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
+ uint8_t edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE];
/* sink rates as reported by DP_SUPPORTED_LINK_RATES */
uint8_t num_sink_rates;
int sink_rates[DP_MAX_SUPPORTED_RATES];
@@ -838,6 +884,11 @@ struct intel_dp {
* this port. Only relevant on VLV/CHV.
*/
enum pipe pps_pipe;
+ /*
+ * Set if the sequencer may be reset due to a power transition,
+ * requiring a reinitialization. Only relevant on BXT.
+ */
+ bool pps_reset;
struct edp_power_seq pps_delays;
bool can_mst; /* this port supports mst */
@@ -934,33 +985,32 @@ vlv_pipe_to_channel(enum pipe pipe)
static inline struct drm_crtc *
intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
return dev_priv->pipe_to_crtc_mapping[pipe];
}
static inline struct drm_crtc *
intel_get_crtc_for_plane(struct drm_device *dev, int plane)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
return dev_priv->plane_to_crtc_mapping[plane];
}
-struct intel_unpin_work {
- struct work_struct work;
+struct intel_flip_work {
+ struct work_struct unpin_work;
+ struct work_struct mmio_work;
+
struct drm_crtc *crtc;
struct drm_framebuffer *old_fb;
struct drm_i915_gem_object *pending_flip_obj;
struct drm_pending_vblank_event *event;
atomic_t pending;
-#define INTEL_FLIP_INACTIVE 0
-#define INTEL_FLIP_PENDING 1
-#define INTEL_FLIP_COMPLETE 2
u32 flip_count;
u32 gtt_offset;
struct drm_i915_gem_request *flip_queued_req;
u32 flip_queued_vblank;
u32 flip_ready_vblank;
- bool enable_stall_check;
+ unsigned int rotation;
};
struct intel_load_detect_pipe {
@@ -1029,9 +1079,9 @@ void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void gen6_reset_rps_interrupts(struct drm_device *dev);
-void gen6_enable_rps_interrupts(struct drm_device *dev);
-void gen6_disable_rps_interrupts(struct drm_device *dev);
+void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv);
+void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv);
+void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv);
u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask);
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
@@ -1110,14 +1160,16 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv);
void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
/* intel_display.c */
+void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco);
+void intel_update_rawclk(struct drm_i915_private *dev_priv);
int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
const char *name, u32 reg, int ref_freq);
extern const struct drm_plane_funcs intel_plane_funcs;
void intel_init_display_hooks(struct drm_i915_private *dev_priv);
unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
bool intel_has_pending_fb_unpin(struct drm_device *dev);
-void intel_mark_busy(struct drm_device *dev);
-void intel_mark_idle(struct drm_device *dev);
+void intel_mark_busy(struct drm_i915_private *dev_priv);
+void intel_mark_idle(struct drm_i915_private *dev_priv);
void intel_crtc_restore_mode(struct drm_crtc *crtc);
int intel_display_suspend(struct drm_device *dev);
void intel_encoder_destroy(struct drm_encoder *encoder);
@@ -1126,7 +1178,6 @@ struct intel_connector *intel_connector_alloc(void);
bool intel_connector_get_hw_state(struct intel_connector *connector);
void intel_connector_attach_encoder(struct intel_connector *connector,
struct intel_encoder *encoder);
-struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc);
enum pipe intel_get_pipe_from_connector(struct intel_connector *connector);
@@ -1134,7 +1185,20 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file_priv);
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe);
-bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type);
+static inline bool
+intel_crtc_has_type(const struct intel_crtc_state *crtc_state,
+ enum intel_output_type type)
+{
+ return crtc_state->output_types & (1 << type);
+}
+static inline bool
+intel_crtc_has_dp_encoder(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->output_types &
+ ((1 << INTEL_OUTPUT_DP) |
+ (1 << INTEL_OUTPUT_DP_MST) |
+ (1 << INTEL_OUTPUT_EDP));
+}
static inline void
intel_wait_for_vblank(struct drm_device *dev, int pipe)
{
@@ -1149,6 +1213,9 @@ intel_wait_for_vblank_if_active(struct drm_device *dev, int pipe)
if (crtc->active)
intel_wait_for_vblank(dev, pipe);
}
+
+u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
+
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
struct intel_digital_port *dport,
@@ -1162,14 +1229,14 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx);
int intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
unsigned int rotation);
+void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
struct drm_framebuffer *
__intel_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj);
-void intel_prepare_page_flip(struct drm_device *dev, int plane);
-void intel_finish_page_flip(struct drm_device *dev, int pipe);
-void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
-void intel_check_page_flip(struct drm_device *dev, int pipe);
+void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe);
+void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe);
+void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe);
int intel_prepare_plane_fb(struct drm_plane *plane,
const struct drm_plane_state *new_state);
void intel_cleanup_plane_fb(struct drm_plane *plane,
@@ -1226,23 +1293,25 @@ u32 intel_compute_tile_offset(int *x, int *y,
const struct drm_framebuffer *fb, int plane,
unsigned int pitch,
unsigned int rotation);
-void intel_prepare_reset(struct drm_device *dev);
-void intel_finish_reset(struct drm_device *dev);
+void intel_prepare_reset(struct drm_i915_private *dev_priv);
+void intel_finish_reset(struct drm_i915_private *dev_priv);
void hsw_enable_pc8(struct drm_i915_private *dev_priv);
void hsw_disable_pc8(struct drm_i915_private *dev_priv);
-void broxton_init_cdclk(struct drm_i915_private *dev_priv);
-void broxton_uninit_cdclk(struct drm_i915_private *dev_priv);
-bool broxton_cdclk_verify_state(struct drm_i915_private *dev_priv);
-void broxton_ddi_phy_init(struct drm_i915_private *dev_priv);
-void broxton_ddi_phy_uninit(struct drm_i915_private *dev_priv);
-void broxton_ddi_phy_verify_state(struct drm_i915_private *dev_priv);
+void bxt_init_cdclk(struct drm_i915_private *dev_priv);
+void bxt_uninit_cdclk(struct drm_i915_private *dev_priv);
+void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy);
+void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy);
+bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy);
+bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy);
void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv);
void bxt_enable_dc9(struct drm_i915_private *dev_priv);
void bxt_disable_dc9(struct drm_i915_private *dev_priv);
void gen9_enable_dc5(struct drm_i915_private *dev_priv);
void skl_init_cdclk(struct drm_i915_private *dev_priv);
-int skl_sanitize_cdclk(struct drm_i915_private *dev_priv);
void skl_uninit_cdclk(struct drm_i915_private *dev_priv);
+unsigned int skl_cdclk_get_vco(unsigned int freq);
void skl_enable_dc6(struct drm_i915_private *dev_priv);
void skl_disable_dc6(struct drm_i915_private *dev_priv);
void intel_dp_get_m_n(struct intel_crtc *crtc,
@@ -1250,8 +1319,8 @@ void intel_dp_get_m_n(struct intel_crtc *crtc,
void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
- intel_clock_t *best_clock);
-int chv_calc_dpll_params(int refclk, intel_clock_t *pll_clock);
+ struct dpll *best_clock);
+int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
bool intel_crtc_active(struct drm_crtc *crtc);
void hsw_enable_ips(struct intel_crtc *crtc);
@@ -1310,7 +1379,7 @@ void intel_dp_mst_resume(struct drm_device *dev);
int intel_dp_max_link_rate(struct intel_dp *intel_dp);
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
-void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv);
+void intel_power_sequencer_reset(struct drm_i915_private *dev_priv);
uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
void intel_plane_destroy(struct drm_plane *plane);
void intel_edp_drrs_enable(struct intel_dp *intel_dp);
@@ -1337,12 +1406,22 @@ bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
bool
intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]);
+static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
+{
+ return ~((1 << lane_count) - 1) & 0xf;
+}
+
+/* intel_dp_aux_backlight.c */
+int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector);
+
/* intel_dp_mst.c */
int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
/* intel_dsi.c */
void intel_dsi_init(struct drm_device *dev);
+/* intel_dsi_dcs_backlight.c */
+int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector);
/* intel_dvo.c */
void intel_dvo_init(struct drm_device *dev);
@@ -1385,11 +1464,15 @@ static inline void intel_fbdev_restore_mode(struct drm_device *dev)
void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
struct drm_atomic_state *state);
bool intel_fbc_is_active(struct drm_i915_private *dev_priv);
-void intel_fbc_pre_update(struct intel_crtc *crtc);
+void intel_fbc_pre_update(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state);
void intel_fbc_post_update(struct intel_crtc *crtc);
void intel_fbc_init(struct drm_i915_private *dev_priv);
void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv);
-void intel_fbc_enable(struct intel_crtc *crtc);
+void intel_fbc_enable(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state);
void intel_fbc_disable(struct intel_crtc *crtc);
void intel_fbc_global_disable(struct drm_i915_private *dev_priv);
void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
@@ -1411,6 +1494,7 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
/* intel_lvds.c */
void intel_lvds_init(struct drm_device *dev);
+struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev);
bool intel_is_dual_link_lvds(struct drm_device *dev);
@@ -1424,13 +1508,13 @@ void intel_attach_aspect_ratio_property(struct drm_connector *connector);
/* intel_overlay.c */
-void intel_setup_overlay(struct drm_device *dev);
-void intel_cleanup_overlay(struct drm_device *dev);
+void intel_setup_overlay(struct drm_i915_private *dev_priv);
+void intel_cleanup_overlay(struct drm_i915_private *dev_priv);
int intel_overlay_switch_off(struct intel_overlay *overlay);
-int intel_overlay_put_image(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int intel_overlay_attrs(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
+int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
void intel_overlay_reset(struct drm_i915_private *dev_priv);
@@ -1449,7 +1533,8 @@ void intel_gmch_panel_fitting(struct intel_crtc *crtc,
int fitting_mode);
void intel_panel_set_backlight_acpi(struct intel_connector *connector,
u32 level, u32 max);
-int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe);
+int intel_panel_setup_backlight(struct drm_connector *connector,
+ enum pipe pipe);
void intel_panel_enable_backlight(struct intel_connector *connector);
void intel_panel_disable_backlight(struct intel_connector *connector);
void intel_panel_destroy_backlight(struct drm_connector *connector);
@@ -1458,8 +1543,19 @@ extern struct drm_display_mode *intel_find_panel_downclock(
struct drm_device *dev,
struct drm_display_mode *fixed_mode,
struct drm_connector *connector);
-void intel_backlight_register(struct drm_device *dev);
-void intel_backlight_unregister(struct drm_device *dev);
+
+#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
+int intel_backlight_device_register(struct intel_connector *connector);
+void intel_backlight_device_unregister(struct intel_connector *connector);
+#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
+static int intel_backlight_device_register(struct intel_connector *connector)
+{
+ return 0;
+}
+static inline void intel_backlight_device_unregister(struct intel_connector *connector)
+{
+}
+#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
/* intel_psr.c */
@@ -1601,29 +1697,35 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
void intel_pm_setup(struct drm_device *dev);
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
void intel_gpu_ips_teardown(void);
-void intel_init_gt_powersave(struct drm_device *dev);
-void intel_cleanup_gt_powersave(struct drm_device *dev);
-void intel_enable_gt_powersave(struct drm_device *dev);
-void intel_disable_gt_powersave(struct drm_device *dev);
-void intel_suspend_gt_powersave(struct drm_device *dev);
-void intel_reset_gt_powersave(struct drm_device *dev);
-void gen6_update_ring_freq(struct drm_device *dev);
+void intel_init_gt_powersave(struct drm_i915_private *dev_priv);
+void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
+void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
+void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
+void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv);
+void intel_reset_gt_powersave(struct drm_i915_private *dev_priv);
+void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
void gen6_rps_busy(struct drm_i915_private *dev_priv);
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
void gen6_rps_idle(struct drm_i915_private *dev_priv);
void gen6_rps_boost(struct drm_i915_private *dev_priv,
struct intel_rps_client *rps,
unsigned long submitted);
-void intel_queue_rps_boost_for_request(struct drm_device *dev,
- struct drm_i915_gem_request *req);
+void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req);
void vlv_wm_get_hw_state(struct drm_device *dev);
void ilk_wm_get_hw_state(struct drm_device *dev);
void skl_wm_get_hw_state(struct drm_device *dev);
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */);
+bool skl_can_enable_sagv(struct drm_atomic_state *state);
+int skl_enable_sagv(struct drm_i915_private *dev_priv);
+int skl_disable_sagv(struct drm_i915_private *dev_priv);
uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
bool ilk_disable_lp_wm(struct drm_device *dev);
-int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6);
+int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6);
+static inline int intel_enable_rc6(void)
+{
+ return i915.enable_rc6;
+}
/* intel_sdvo.c */
bool intel_sdvo_init(struct drm_device *dev,
@@ -1631,11 +1733,13 @@ bool intel_sdvo_init(struct drm_device *dev,
/* intel_sprite.c */
+int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
+ int usecs);
int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void intel_pipe_update_start(struct intel_crtc *crtc);
-void intel_pipe_update_end(struct intel_crtc *crtc);
+void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work);
/* intel_tv.c */
void intel_tv_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 4756ef639..de8e9fb51 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -84,13 +84,15 @@ static void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 mask;
mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY |
LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY;
- if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & mask) == mask, 100))
+ if (intel_wait_for_register(dev_priv,
+ MIPI_GEN_FIFO_STAT(port), mask, mask,
+ 100))
DRM_ERROR("DPI FIFOs are not empty\n");
}
@@ -129,7 +131,7 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
{
struct intel_dsi_host *intel_dsi_host = to_intel_dsi_host(host);
struct drm_device *dev = intel_dsi_host->intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_dsi_host->port;
struct mipi_dsi_packet packet;
ssize_t ret;
@@ -158,8 +160,10 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
/* note: this is never true for reads */
if (packet.payload_length) {
-
- if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & data_mask) == 0, 50))
+ if (intel_wait_for_register(dev_priv,
+ MIPI_GEN_FIFO_STAT(port),
+ data_mask, 0,
+ 50))
DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n");
write_data(dev_priv, data_reg, packet.payload,
@@ -170,7 +174,10 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
I915_WRITE(MIPI_INTR_STAT(port), GEN_READ_DATA_AVAIL);
}
- if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & ctrl_mask) == 0, 50)) {
+ if (intel_wait_for_register(dev_priv,
+ MIPI_GEN_FIFO_STAT(port),
+ ctrl_mask, 0,
+ 50)) {
DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n");
}
@@ -179,7 +186,10 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
/* ->rx_len is set only for reads */
if (msg->rx_len) {
data_mask = GEN_READ_DATA_AVAIL;
- if (wait_for((I915_READ(MIPI_INTR_STAT(port)) & data_mask) == data_mask, 50))
+ if (intel_wait_for_register(dev_priv,
+ MIPI_INTR_STAT(port),
+ data_mask, data_mask,
+ 50))
DRM_ERROR("Timeout waiting for read data.\n");
read_data(dev_priv, data_reg, msg->rx_buf, msg->rx_len);
@@ -250,7 +260,7 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 mask;
/* XXX: pipe, hs */
@@ -269,7 +279,9 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
I915_WRITE(MIPI_DPI_CONTROL(port), cmd);
mask = SPL_PKT_SENT_INTERRUPT;
- if (wait_for((I915_READ(MIPI_INTR_STAT(port)) & mask) == mask, 100))
+ if (intel_wait_for_register(dev_priv,
+ MIPI_INTR_STAT(port), mask, mask,
+ 100))
DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
return 0;
@@ -302,7 +314,7 @@ static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
static bool intel_dsi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
base);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
@@ -313,8 +325,6 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
DRM_DEBUG_KMS("\n");
- pipe_config->has_dsi_encoder = true;
-
if (fixed_mode) {
intel_fixed_panel_mode(fixed_mode, adjusted_mode);
@@ -348,7 +358,7 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
static void bxt_dsi_device_ready(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
u32 val;
@@ -387,7 +397,7 @@ static void bxt_dsi_device_ready(struct intel_encoder *encoder)
static void vlv_dsi_device_ready(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
u32 val;
@@ -437,7 +447,7 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
static void intel_dsi_port_enable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
@@ -478,7 +488,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder)
static void intel_dsi_port_disable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
@@ -497,7 +507,7 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
static void intel_dsi_enable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
@@ -528,11 +538,10 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder);
static void intel_dsi_pre_enable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
enum port port;
- u32 tmp;
DRM_DEBUG_KMS("\n");
@@ -551,11 +560,13 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
msleep(intel_dsi->panel_on_delay);
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ u32 val;
+
/* Disable DPOunit clock gating, can stall pipe */
- tmp = I915_READ(DSPCLK_GATE_D);
- tmp |= DPOUNIT_CLOCK_GATE_DISABLE;
- I915_WRITE(DSPCLK_GATE_D, tmp);
+ val = I915_READ(DSPCLK_GATE_D);
+ val |= DPOUNIT_CLOCK_GATE_DISABLE;
+ I915_WRITE(DSPCLK_GATE_D, val);
}
/* put device in ready state */
@@ -601,7 +612,7 @@ static void intel_dsi_pre_disable(struct intel_encoder *encoder)
static void intel_dsi_disable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
u32 temp;
@@ -640,7 +651,7 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
@@ -666,8 +677,9 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
/* Wait till Clock lanes are in LP-00 state for MIPI Port A
* only. MIPI Port C has no similar bit for checking
*/
- if (wait_for(((I915_READ(port_ctrl) & AFE_LATCHOUT)
- == 0x00000), 30))
+ if (intel_wait_for_register(dev_priv,
+ port_ctrl, AFE_LATCHOUT, 0,
+ 30))
DRM_ERROR("DSI LP not going Low\n");
/* Disable MIPI PHY transparent latch */
@@ -684,7 +696,7 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
static void intel_dsi_post_disable(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
DRM_DEBUG_KMS("\n");
@@ -693,7 +705,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder)
intel_dsi_clear_device_ready(encoder);
- if (!IS_BROXTON(dev_priv)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
u32 val;
val = I915_READ(DSPCLK_GATE_D);
@@ -719,7 +731,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder)
static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
struct drm_device *dev = encoder->base.dev;
enum intel_display_power_domain power_domain;
@@ -793,7 +805,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_display_mode *adjusted_mode =
&pipe_config->base.adjusted_mode;
struct drm_display_mode *adjusted_mode_sw;
@@ -953,8 +965,6 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
u32 pclk;
DRM_DEBUG_KMS("\n");
- pipe_config->has_dsi_encoder = true;
-
if (IS_BROXTON(dev))
bxt_dsi_get_pipe_config(encoder, pipe_config);
@@ -1012,7 +1022,7 @@ static void set_dsi_timings(struct drm_encoder *encoder,
const struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
@@ -1098,7 +1108,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
@@ -1171,6 +1181,12 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
if (intel_dsi->clock_stop)
tmp |= CLOCKSTOP;
+ if (IS_BROXTON(dev_priv)) {
+ tmp |= BXT_DPHY_DEFEATURE_EN;
+ if (!is_cmd_mode(intel_dsi))
+ tmp |= BXT_DEFEATURE_DPI_FIFO_CTR;
+ }
+
for_each_dsi_port(port, intel_dsi->ports) {
I915_WRITE(MIPI_DSI_FUNC_PRG(port), val);
@@ -1378,12 +1394,13 @@ static const struct drm_encoder_funcs intel_dsi_funcs = {
static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs = {
.get_modes = intel_dsi_get_modes,
.mode_valid = intel_dsi_mode_valid,
- .best_encoder = intel_best_encoder,
};
static const struct drm_connector_funcs intel_dsi_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.detect = intel_dsi_detect,
+ .late_register = intel_connector_register,
+ .early_unregister = intel_connector_unregister,
.destroy = intel_dsi_connector_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_dsi_set_property,
@@ -1413,7 +1430,7 @@ void intel_dsi_init(struct drm_device *dev)
struct intel_connector *intel_connector;
struct drm_connector *connector;
struct drm_display_mode *scan, *fixed_mode = NULL;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum port port;
unsigned int i;
@@ -1449,7 +1466,7 @@ void intel_dsi_init(struct drm_device *dev)
connector = &intel_connector->base;
drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI,
- NULL);
+ "DSI %c", port_name(port));
intel_encoder->compute_config = intel_dsi_compute_config;
intel_encoder->pre_enable = intel_dsi_pre_enable;
@@ -1460,7 +1477,6 @@ void intel_dsi_init(struct drm_device *dev)
intel_encoder->get_config = intel_dsi_get_config;
intel_connector->get_hw_state = intel_connector_get_hw_state;
- intel_connector->unregister = intel_connector_unregister;
/*
* On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI
@@ -1473,10 +1489,42 @@ void intel_dsi_init(struct drm_device *dev)
else
intel_encoder->crtc_mask = BIT(PIPE_B);
- if (dev_priv->vbt.dsi.config->dual_link)
+ if (dev_priv->vbt.dsi.config->dual_link) {
intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C);
- else
+
+ switch (dev_priv->vbt.dsi.config->dl_dcs_backlight_ports) {
+ case DL_DCS_PORT_A:
+ intel_dsi->dcs_backlight_ports = BIT(PORT_A);
+ break;
+ case DL_DCS_PORT_C:
+ intel_dsi->dcs_backlight_ports = BIT(PORT_C);
+ break;
+ default:
+ case DL_DCS_PORT_A_AND_C:
+ intel_dsi->dcs_backlight_ports = BIT(PORT_A) | BIT(PORT_C);
+ break;
+ }
+
+ switch (dev_priv->vbt.dsi.config->dl_dcs_cabc_ports) {
+ case DL_DCS_PORT_A:
+ intel_dsi->dcs_cabc_ports = BIT(PORT_A);
+ break;
+ case DL_DCS_PORT_C:
+ intel_dsi->dcs_cabc_ports = BIT(PORT_C);
+ break;
+ default:
+ case DL_DCS_PORT_A_AND_C:
+ intel_dsi->dcs_cabc_ports = BIT(PORT_A) | BIT(PORT_C);
+ break;
+ }
+ } else {
intel_dsi->ports = BIT(port);
+ intel_dsi->dcs_backlight_ports = BIT(port);
+ intel_dsi->dcs_cabc_ports = BIT(port);
+ }
+
+ if (!dev_priv->vbt.dsi.config->cabc_supported)
+ intel_dsi->dcs_cabc_ports = 0;
/* Create a DSI host (and a device) for each port. */
for_each_dsi_port(port, intel_dsi->ports) {
@@ -1549,13 +1597,10 @@ void intel_dsi_init(struct drm_device *dev)
connector->display_info.height_mm = fixed_mode->height_mm;
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
+ intel_panel_setup_backlight(connector, INVALID_PIPE);
intel_dsi_add_properties(intel_connector);
- drm_connector_register(connector);
-
- intel_panel_setup_backlight(connector, INVALID_PIPE);
-
return;
err:
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index 61a6957fc..5967ea6d6 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -78,6 +78,10 @@ struct intel_dsi {
u8 escape_clk_div;
u8 dual_link;
+
+ u16 dcs_backlight_ports;
+ u16 dcs_cabc_ports;
+
u8 pixel_overlap;
u32 port_bits;
u32 bw_timer;
diff --git a/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c
new file mode 100644
index 000000000..ac7c6020c
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c
@@ -0,0 +1,179 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Deepak M <m.deepak at intel.com>
+ */
+
+#include "intel_drv.h"
+#include "intel_dsi.h"
+#include "i915_drv.h"
+#include <video/mipi_display.h>
+#include <drm/drm_mipi_dsi.h>
+
+#define CONTROL_DISPLAY_BCTRL (1 << 5)
+#define CONTROL_DISPLAY_DD (1 << 3)
+#define CONTROL_DISPLAY_BL (1 << 2)
+
+#define POWER_SAVE_OFF (0 << 0)
+#define POWER_SAVE_LOW (1 << 0)
+#define POWER_SAVE_MEDIUM (2 << 0)
+#define POWER_SAVE_HIGH (3 << 0)
+#define POWER_SAVE_OUTDOOR_MODE (4 << 0)
+
+#define PANEL_PWM_MAX_VALUE 0xFF
+
+static u32 dcs_get_backlight(struct intel_connector *connector)
+{
+ struct intel_encoder *encoder = connector->encoder;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct mipi_dsi_device *dsi_device;
+ u8 data;
+ enum port port;
+
+ /* FIXME: Need to take care of 16 bit brightness level */
+ for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
+ dsi_device = intel_dsi->dsi_hosts[port]->device;
+ mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_DISPLAY_BRIGHTNESS,
+ &data, sizeof(data));
+ break;
+ }
+
+ return data;
+}
+
+static void dcs_set_backlight(struct intel_connector *connector, u32 level)
+{
+ struct intel_encoder *encoder = connector->encoder;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct mipi_dsi_device *dsi_device;
+ u8 data = level;
+ enum port port;
+
+ /* FIXME: Need to take care of 16 bit brightness level */
+ for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
+ dsi_device = intel_dsi->dsi_hosts[port]->device;
+ mipi_dsi_dcs_write(dsi_device, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
+ &data, sizeof(data));
+ }
+}
+
+static void dcs_disable_backlight(struct intel_connector *connector)
+{
+ struct intel_encoder *encoder = connector->encoder;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct mipi_dsi_device *dsi_device;
+ enum port port;
+
+ dcs_set_backlight(connector, 0);
+
+ for_each_dsi_port(port, intel_dsi->dcs_cabc_ports) {
+ u8 cabc = POWER_SAVE_OFF;
+
+ dsi_device = intel_dsi->dsi_hosts[port]->device;
+ mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_POWER_SAVE,
+ &cabc, sizeof(cabc));
+ }
+
+ for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
+ u8 ctrl = 0;
+
+ dsi_device = intel_dsi->dsi_hosts[port]->device;
+
+ mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_CONTROL_DISPLAY,
+ &ctrl, sizeof(ctrl));
+
+ ctrl &= ~CONTROL_DISPLAY_BL;
+ ctrl &= ~CONTROL_DISPLAY_DD;
+ ctrl &= ~CONTROL_DISPLAY_BCTRL;
+
+ mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+ &ctrl, sizeof(ctrl));
+ }
+}
+
+static void dcs_enable_backlight(struct intel_connector *connector)
+{
+ struct intel_encoder *encoder = connector->encoder;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_panel *panel = &connector->panel;
+ struct mipi_dsi_device *dsi_device;
+ enum port port;
+
+ for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
+ u8 ctrl = 0;
+
+ dsi_device = intel_dsi->dsi_hosts[port]->device;
+
+ mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_CONTROL_DISPLAY,
+ &ctrl, sizeof(ctrl));
+
+ ctrl |= CONTROL_DISPLAY_BL;
+ ctrl |= CONTROL_DISPLAY_DD;
+ ctrl |= CONTROL_DISPLAY_BCTRL;
+
+ mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+ &ctrl, sizeof(ctrl));
+ }
+
+ for_each_dsi_port(port, intel_dsi->dcs_cabc_ports) {
+ u8 cabc = POWER_SAVE_MEDIUM;
+
+ dsi_device = intel_dsi->dsi_hosts[port]->device;
+ mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_POWER_SAVE,
+ &cabc, sizeof(cabc));
+ }
+
+ dcs_set_backlight(connector, panel->backlight.level);
+}
+
+static int dcs_setup_backlight(struct intel_connector *connector,
+ enum pipe unused)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ panel->backlight.max = PANEL_PWM_MAX_VALUE;
+ panel->backlight.level = PANEL_PWM_MAX_VALUE;
+
+ return 0;
+}
+
+int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector)
+{
+ struct drm_device *dev = intel_connector->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_encoder *encoder = intel_connector->encoder;
+ struct intel_panel *panel = &intel_connector->panel;
+
+ if (dev_priv->vbt.backlight.type != INTEL_BACKLIGHT_DSI_DCS)
+ return -ENODEV;
+
+ if (WARN_ON(encoder->type != INTEL_OUTPUT_DSI))
+ return -EINVAL;
+
+ panel->backlight.setup = dcs_setup_backlight;
+ panel->backlight.enable = dcs_enable_backlight;
+ panel->backlight.disable = dcs_disable_backlight;
+ panel->backlight.set = dcs_set_backlight;
+ panel->backlight.get = dcs_get_backlight;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index e498f1c32..cd154ce6b 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -95,6 +95,24 @@ static struct gpio_map vlv_gpio_table[] = {
{ VLV_GPIO_NC_11_PANEL1_BKLTCTL },
};
+#define CHV_GPIO_IDX_START_N 0
+#define CHV_GPIO_IDX_START_E 73
+#define CHV_GPIO_IDX_START_SW 100
+#define CHV_GPIO_IDX_START_SE 198
+
+#define CHV_VBT_MAX_PINS_PER_FMLY 15
+
+#define CHV_GPIO_PAD_CFG0(f, i) (0x4400 + (f) * 0x400 + (i) * 8)
+#define CHV_GPIO_GPIOEN (1 << 15)
+#define CHV_GPIO_GPIOCFG_GPIO (0 << 8)
+#define CHV_GPIO_GPIOCFG_GPO (1 << 8)
+#define CHV_GPIO_GPIOCFG_GPI (2 << 8)
+#define CHV_GPIO_GPIOCFG_HIZ (3 << 8)
+#define CHV_GPIO_GPIOTXSTATE(state) ((!!(state)) << 1)
+
+#define CHV_GPIO_PAD_CFG1(f, i) (0x4400 + (f) * 0x400 + (i) * 8 + 4)
+#define CHV_GPIO_CFGLOCK (1 << 31)
+
static inline enum port intel_dsi_seq_port_to_port(u8 port)
{
return port ? PORT_C : PORT_A;
@@ -203,13 +221,14 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
map = &vlv_gpio_table[gpio_index];
if (dev_priv->vbt.dsi.seq_version >= 3) {
- DRM_DEBUG_KMS("GPIO element v3 not supported\n");
- return;
+ /* XXX: this assumes vlv_gpio_table only has NC GPIOs. */
+ port = IOSF_PORT_GPIO_NC;
} else {
if (gpio_source == 0) {
port = IOSF_PORT_GPIO_NC;
} else if (gpio_source == 1) {
- port = IOSF_PORT_GPIO_SC;
+ DRM_DEBUG_KMS("SC gpio not supported\n");
+ return;
} else {
DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source);
return;
@@ -231,10 +250,60 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
mutex_unlock(&dev_priv->sb_lock);
}
+static void chv_exec_gpio(struct drm_i915_private *dev_priv,
+ u8 gpio_source, u8 gpio_index, bool value)
+{
+ u16 cfg0, cfg1;
+ u16 family_num;
+ u8 port;
+
+ if (dev_priv->vbt.dsi.seq_version >= 3) {
+ if (gpio_index >= CHV_GPIO_IDX_START_SE) {
+ /* XXX: it's unclear whether 255->57 is part of SE. */
+ gpio_index -= CHV_GPIO_IDX_START_SE;
+ port = CHV_IOSF_PORT_GPIO_SE;
+ } else if (gpio_index >= CHV_GPIO_IDX_START_SW) {
+ gpio_index -= CHV_GPIO_IDX_START_SW;
+ port = CHV_IOSF_PORT_GPIO_SW;
+ } else if (gpio_index >= CHV_GPIO_IDX_START_E) {
+ gpio_index -= CHV_GPIO_IDX_START_E;
+ port = CHV_IOSF_PORT_GPIO_E;
+ } else {
+ port = CHV_IOSF_PORT_GPIO_N;
+ }
+ } else {
+ /* XXX: The spec is unclear about CHV GPIO on seq v2 */
+ if (gpio_source != 0) {
+ DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source);
+ return;
+ }
+
+ if (gpio_index >= CHV_GPIO_IDX_START_E) {
+ DRM_DEBUG_KMS("invalid gpio index %u for GPIO N\n",
+ gpio_index);
+ return;
+ }
+
+ port = CHV_IOSF_PORT_GPIO_N;
+ }
+
+ family_num = gpio_index / CHV_VBT_MAX_PINS_PER_FMLY;
+ gpio_index = gpio_index % CHV_VBT_MAX_PINS_PER_FMLY;
+
+ cfg0 = CHV_GPIO_PAD_CFG0(family_num, gpio_index);
+ cfg1 = CHV_GPIO_PAD_CFG1(family_num, gpio_index);
+
+ mutex_lock(&dev_priv->sb_lock);
+ vlv_iosf_sb_write(dev_priv, port, cfg1, 0);
+ vlv_iosf_sb_write(dev_priv, port, cfg0,
+ CHV_GPIO_GPIOCFG_GPO | CHV_GPIO_GPIOTXSTATE(value));
+ mutex_unlock(&dev_priv->sb_lock);
+}
+
static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
{
struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u8 gpio_source, gpio_index;
bool value;
@@ -254,6 +323,8 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
if (IS_VALLEYVIEW(dev_priv))
vlv_exec_gpio(dev_priv, gpio_source, gpio_index, value);
+ else if (IS_CHERRYVIEW(dev_priv))
+ chv_exec_gpio(dev_priv, gpio_source, gpio_index, value);
else
DRM_DEBUG_KMS("GPIO element not supported on this platform\n");
@@ -398,7 +469,7 @@ static int vbt_panel_get_modes(struct drm_panel *panel)
struct vbt_panel *vbt_panel = to_vbt_panel(panel);
struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_display_mode *mode;
if (!panel->connector)
@@ -426,7 +497,7 @@ static const struct drm_panel_funcs vbt_panel_funcs = {
struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
{
struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps;
struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
@@ -578,14 +649,13 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
);
/*
- * Exit zero is unified val ths_zero and ths_exit
+ * Exit zero is unified val ths_zero and ths_exit
* minimum value for ths_exit = 110ns
* min (exit_zero_cnt * 2) = 110/UI
* exit_zero_cnt = 55/UI
*/
- if (exit_zero_cnt < (55 * ui_den / ui_num))
- if ((55 * ui_den) % ui_num)
- exit_zero_cnt += 1;
+ if (exit_zero_cnt < (55 * ui_den / ui_num) && (55 * ui_den) % ui_num)
+ exit_zero_cnt += 1;
/* clk zero count */
clk_zero_cnt = DIV_ROUND_UP(
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
index 1765e6e18..6ab58a01b 100644
--- a/drivers/gpu/drm/i915/intel_dsi_pll.c
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -55,12 +55,10 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
struct intel_crtc_state *config,
int target_dsi_clk)
{
- unsigned int calc_m = 0, calc_p = 0;
unsigned int m_min, m_max, p_min = 2, p_max = 6;
unsigned int m, n, p;
- int ref_clk;
- int delta = target_dsi_clk;
- u32 m_seed;
+ unsigned int calc_m, calc_p;
+ int delta, ref_clk;
/* target_dsi_clk is expected in kHz */
if (target_dsi_clk < 300000 || target_dsi_clk > 1150000) {
@@ -80,6 +78,10 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
m_max = 92;
}
+ calc_p = p_min;
+ calc_m = m_min;
+ delta = abs(target_dsi_clk - (m_min * ref_clk) / (p_min * n));
+
for (m = m_min; m <= m_max && delta; m++) {
for (p = p_min; p <= p_max && delta; p++) {
/*
@@ -97,11 +99,10 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
}
/* register has log2(N1), this works fine for powers of two */
- n = ffs(n) - 1;
- m_seed = lfsr_converts[calc_m - 62];
config->dsi_pll.ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + calc_p - 2);
- config->dsi_pll.div = n << DSI_PLL_N1_DIV_SHIFT |
- m_seed << DSI_PLL_M1_DIV_SHIFT;
+ config->dsi_pll.div =
+ (ffs(n) - 1) << DSI_PLL_N1_DIV_SHIFT |
+ (u32)lfsr_converts[calc_m - 62] << DSI_PLL_M1_DIV_SHIFT;
return 0;
}
@@ -113,7 +114,7 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
static int vlv_compute_dsi_pll(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
int ret;
u32 dsi_clk;
@@ -234,8 +235,11 @@ static void bxt_disable_dsi_pll(struct intel_encoder *encoder)
* PLL lock should deassert within 200us.
* Wait up to 1ms before timing out.
*/
- if (wait_for((I915_READ(BXT_DSI_PLL_ENABLE)
- & BXT_DSI_PLL_LOCKED) == 0, 1))
+ if (intel_wait_for_register(dev_priv,
+ BXT_DSI_PLL_ENABLE,
+ BXT_DSI_PLL_LOCKED,
+ 0,
+ 1))
DRM_ERROR("Timeout waiting for PLL lock deassertion\n");
}
@@ -321,7 +325,7 @@ static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
u32 dsi_clk;
u32 dsi_ratio;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
/* Divide by zero */
if (!pipe_bpp) {
@@ -356,7 +360,7 @@ u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
u32 temp;
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
temp = I915_READ(MIPI_CTRL(port));
@@ -370,7 +374,7 @@ static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
const struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 tmp;
u32 dsi_rate = 0;
u32 pll_ratio = 0;
@@ -465,7 +469,7 @@ static int bxt_compute_dsi_pll(struct intel_encoder *encoder,
static void bxt_enable_dsi_pll(struct intel_encoder *encoder,
const struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
u32 val;
@@ -486,7 +490,11 @@ static void bxt_enable_dsi_pll(struct intel_encoder *encoder,
I915_WRITE(BXT_DSI_PLL_ENABLE, val);
/* Timeout and fail if PLL not locked */
- if (wait_for(I915_READ(BXT_DSI_PLL_ENABLE) & BXT_DSI_PLL_LOCKED, 1)) {
+ if (intel_wait_for_register(dev_priv,
+ BXT_DSI_PLL_ENABLE,
+ BXT_DSI_PLL_LOCKED,
+ BXT_DSI_PLL_LOCKED,
+ 1)) {
DRM_ERROR("Timed out waiting for DSI PLL to lock\n");
return;
}
@@ -542,7 +550,7 @@ static void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
u32 tmp;
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
/* Clear old configurations */
tmp = I915_READ(BXT_MIPI_CLOCK_CTL);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 286baec97..b9e5a63a7 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -122,7 +122,7 @@ static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base);
u32 tmp;
@@ -138,7 +138,7 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
u32 tmp;
@@ -155,7 +155,7 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
static void intel_dvo_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
u32 tmp, flags = 0;
@@ -176,7 +176,7 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
static void intel_disable_dvo(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
u32 temp = I915_READ(dvo_reg);
@@ -188,7 +188,7 @@ static void intel_disable_dvo(struct intel_encoder *encoder)
static void intel_enable_dvo(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
@@ -256,7 +256,7 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
static void intel_dvo_pre_enable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
@@ -305,7 +305,7 @@ intel_dvo_detect(struct drm_connector *connector, bool force)
static int intel_dvo_get_modes(struct drm_connector *connector)
{
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
const struct drm_display_mode *fixed_mode =
to_intel_connector(connector)->panel.fixed_mode;
@@ -341,6 +341,8 @@ static void intel_dvo_destroy(struct drm_connector *connector)
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.detect = intel_dvo_detect,
+ .late_register = intel_connector_register,
+ .early_unregister = intel_connector_unregister,
.destroy = intel_dvo_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_get_property = intel_connector_atomic_get_property,
@@ -351,7 +353,6 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = {
static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
.mode_valid = intel_dvo_mode_valid,
.get_modes = intel_dvo_get_modes,
- .best_encoder = intel_best_encoder,
};
static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
@@ -378,7 +379,7 @@ static struct drm_display_mode *
intel_dvo_get_current_mode(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg);
struct drm_display_mode *mode = NULL;
@@ -406,9 +407,21 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
return mode;
}
+static char intel_dvo_port_name(i915_reg_t dvo_reg)
+{
+ if (i915_mmio_reg_equal(dvo_reg, DVOA))
+ return 'A';
+ else if (i915_mmio_reg_equal(dvo_reg, DVOB))
+ return 'B';
+ else if (i915_mmio_reg_equal(dvo_reg, DVOC))
+ return 'C';
+ else
+ return '?';
+}
+
void intel_dvo_init(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *intel_encoder;
struct intel_dvo *intel_dvo;
struct intel_connector *intel_connector;
@@ -428,8 +441,6 @@ void intel_dvo_init(struct drm_device *dev)
intel_dvo->attached_connector = intel_connector;
intel_encoder = &intel_dvo->base;
- drm_encoder_init(dev, &intel_encoder->base,
- &intel_dvo_enc_funcs, encoder_type, NULL);
intel_encoder->disable = intel_disable_dvo;
intel_encoder->enable = intel_enable_dvo;
@@ -438,7 +449,6 @@ void intel_dvo_init(struct drm_device *dev)
intel_encoder->compute_config = intel_dvo_compute_config;
intel_encoder->pre_enable = intel_dvo_pre_enable;
intel_connector->get_hw_state = intel_dvo_connector_get_hw_state;
- intel_connector->unregister = intel_connector_unregister;
/* Now, try to find a controller */
for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
@@ -496,6 +506,10 @@ void intel_dvo_init(struct drm_device *dev)
if (!dvoinit)
continue;
+ drm_encoder_init(dev, &intel_encoder->base,
+ &intel_dvo_enc_funcs, encoder_type,
+ "DVO %c", intel_dvo_port_name(dvo->dvo_reg));
+
intel_encoder->type = INTEL_OUTPUT_DVO;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
switch (dvo->type) {
@@ -537,11 +551,9 @@ void intel_dvo_init(struct drm_device *dev)
intel_dvo->panel_wants_dither = true;
}
- drm_connector_register(connector);
return;
}
- drm_encoder_cleanup(&intel_encoder->base);
kfree(intel_dvo);
kfree(intel_connector);
}
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 647127f3a..3836a1c79 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -124,7 +124,9 @@ static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
I915_WRITE(FBC_CONTROL, fbc_ctl);
/* Wait for compressing bit to clear */
- if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
+ if (intel_wait_for_register(dev_priv,
+ FBC_STATUS, FBC_STAT_COMPRESSING, 0,
+ 10)) {
DRM_DEBUG_KMS("FBC idle timed out\n");
return;
}
@@ -374,8 +376,9 @@ static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv)
* @dev_priv: i915 device instance
*
* This function is used to verify the current state of FBC.
+ *
* FIXME: This should be tracked in the plane config eventually
- * instead of queried at runtime for most callers.
+ * instead of queried at runtime for most callers.
*/
bool intel_fbc_is_active(struct drm_i915_private *dev_priv)
{
@@ -389,7 +392,7 @@ static void intel_fbc_work_fn(struct work_struct *__work)
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_work *work = &fbc->work;
struct intel_crtc *crtc = fbc->crtc;
- struct drm_vblank_crtc *vblank = &dev_priv->dev->vblank[crtc->pipe];
+ struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[crtc->pipe];
if (drm_crtc_vblank_get(&crtc->base)) {
DRM_ERROR("vblank not available for FBC on pipe %c\n",
@@ -442,7 +445,7 @@ out:
static void intel_fbc_schedule_activation(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_work *work = &fbc->work;
@@ -480,10 +483,10 @@ static void intel_fbc_deactivate(struct drm_i915_private *dev_priv)
intel_fbc_hw_deactivate(dev_priv);
}
-static bool multiple_pipes_ok(struct intel_crtc *crtc)
+static bool multiple_pipes_ok(struct intel_crtc *crtc,
+ struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
- struct drm_plane *primary = crtc->base.primary;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
enum pipe pipe = crtc->pipe;
@@ -491,9 +494,7 @@ static bool multiple_pipes_ok(struct intel_crtc *crtc)
if (!no_fbc_on_multiple_pipes(dev_priv))
return true;
- WARN_ON(!drm_modeset_is_locked(&primary->mutex));
-
- if (to_intel_plane_state(primary->state)->visible)
+ if (plane_state->visible)
fbc->visible_pipes_mask |= (1 << pipe);
else
fbc->visible_pipes_mask &= ~(1 << pipe);
@@ -554,7 +555,7 @@ again:
static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
struct drm_mm_node *uninitialized_var(compressed_llb);
int size, fb_cpp, ret;
@@ -685,7 +686,7 @@ static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
*/
static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
unsigned int effective_w, effective_h, max_w, max_h;
@@ -708,21 +709,16 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
return effective_w <= max_w && effective_h <= max_h;
}
-static void intel_fbc_update_state_cache(struct intel_crtc *crtc)
+static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_state_cache *cache = &fbc->state_cache;
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- struct intel_plane_state *plane_state =
- to_intel_plane_state(crtc->base.primary->state);
struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_i915_gem_object *obj;
- WARN_ON(!drm_modeset_is_locked(&crtc->base.mutex));
- WARN_ON(!drm_modeset_is_locked(&crtc->base.primary->mutex));
-
cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
cache->crtc.hsw_bdw_pixel_rate =
@@ -740,7 +736,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc)
/* FIXME: We lack the proper locking here, so only run this on the
* platforms that need. */
- if (INTEL_INFO(dev_priv)->gen >= 5 && INTEL_INFO(dev_priv)->gen < 7)
+ if (IS_GEN(dev_priv, 5, 6))
cache->fb.ilk_ggtt_offset = i915_gem_obj_ggtt_offset(obj);
cache->fb.pixel_format = fb->pixel_format;
cache->fb.stride = fb->pitches[0];
@@ -750,7 +746,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc)
static bool intel_fbc_can_activate(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_state_cache *cache = &fbc->state_cache;
@@ -822,22 +818,16 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
static bool intel_fbc_can_choose(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
- bool enable_by_default = IS_BROADWELL(dev_priv);
- if (intel_vgpu_active(dev_priv->dev)) {
+ if (intel_vgpu_active(dev_priv)) {
fbc->no_fbc_reason = "VGPU is active";
return false;
}
- if (i915.enable_fbc < 0 && !enable_by_default) {
- fbc->no_fbc_reason = "disabled per chip default";
- return false;
- }
-
if (!i915.enable_fbc) {
- fbc->no_fbc_reason = "disabled per module param";
+ fbc->no_fbc_reason = "disabled per module param or by default";
return false;
}
@@ -857,7 +847,7 @@ static bool intel_fbc_can_choose(struct intel_crtc *crtc)
static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
struct intel_fbc_reg_params *params)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_state_cache *cache = &fbc->state_cache;
@@ -886,9 +876,11 @@ static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1,
return memcmp(params1, params2, sizeof(*params1)) == 0;
}
-void intel_fbc_pre_update(struct intel_crtc *crtc)
+void intel_fbc_pre_update(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
if (!fbc_supported(dev_priv))
@@ -896,7 +888,7 @@ void intel_fbc_pre_update(struct intel_crtc *crtc)
mutex_lock(&fbc->lock);
- if (!multiple_pipes_ok(crtc)) {
+ if (!multiple_pipes_ok(crtc, plane_state)) {
fbc->no_fbc_reason = "more than one pipe active";
goto deactivate;
}
@@ -904,7 +896,7 @@ void intel_fbc_pre_update(struct intel_crtc *crtc)
if (!fbc->enabled || fbc->crtc != crtc)
goto unlock;
- intel_fbc_update_state_cache(crtc);
+ intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
deactivate:
intel_fbc_deactivate(dev_priv);
@@ -914,7 +906,7 @@ unlock:
static void __intel_fbc_post_update(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_reg_params old_params;
@@ -947,7 +939,7 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc)
void intel_fbc_post_update(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
if (!fbc_supported(dev_priv))
@@ -996,13 +988,13 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv,
if (!fbc_supported(dev_priv))
return;
- if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
- return;
-
mutex_lock(&fbc->lock);
fbc->busy_bits &= ~frontbuffer_bits;
+ if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
+ goto out;
+
if (!fbc->busy_bits && fbc->enabled &&
(frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) {
if (fbc->active)
@@ -1011,6 +1003,7 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv,
__intel_fbc_post_update(fbc->crtc);
}
+out:
mutex_unlock(&fbc->lock);
}
@@ -1088,9 +1081,11 @@ out:
* intel_fbc_enable multiple times for the same pipe without an
* intel_fbc_disable in the middle, as long as it is deactivated.
*/
-void intel_fbc_enable(struct intel_crtc *crtc)
+void intel_fbc_enable(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
if (!fbc_supported(dev_priv))
@@ -1101,19 +1096,19 @@ void intel_fbc_enable(struct intel_crtc *crtc)
if (fbc->enabled) {
WARN_ON(fbc->crtc == NULL);
if (fbc->crtc == crtc) {
- WARN_ON(!crtc->config->enable_fbc);
+ WARN_ON(!crtc_state->enable_fbc);
WARN_ON(fbc->active);
}
goto out;
}
- if (!crtc->config->enable_fbc)
+ if (!crtc_state->enable_fbc)
goto out;
WARN_ON(fbc->active);
WARN_ON(fbc->crtc != NULL);
- intel_fbc_update_state_cache(crtc);
+ intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
if (intel_fbc_alloc_cfb(crtc)) {
fbc->no_fbc_reason = "not enough stolen memory";
goto out;
@@ -1161,7 +1156,7 @@ static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
*/
void intel_fbc_disable(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
if (!fbc_supported(dev_priv))
@@ -1215,12 +1210,49 @@ void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
if (!no_fbc_on_multiple_pipes(dev_priv))
return;
- for_each_intel_crtc(dev_priv->dev, crtc)
+ for_each_intel_crtc(&dev_priv->drm, crtc)
if (intel_crtc_active(&crtc->base) &&
to_intel_plane_state(crtc->base.primary->state)->visible)
dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe);
}
+/*
+ * The DDX driver changes its behavior depending on the value it reads from
+ * i915.enable_fbc, so sanitize it by translating the default value into either
+ * 0 or 1 in order to allow it to know what's going on.
+ *
+ * Notice that this is done at driver initialization and we still allow user
+ * space to change the value during runtime without sanitizing it again. IGT
+ * relies on being able to change i915.enable_fbc at runtime.
+ */
+static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
+{
+ if (i915.enable_fbc >= 0)
+ return !!i915.enable_fbc;
+
+ if (!HAS_FBC(dev_priv))
+ return 0;
+
+ if (IS_BROADWELL(dev_priv))
+ return 1;
+
+ return 0;
+}
+
+static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv)
+{
+#ifdef CONFIG_INTEL_IOMMU
+ /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
+ if (intel_iommu_gfx_mapped &&
+ (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) {
+ DRM_INFO("Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
+ return true;
+ }
+#endif
+
+ return false;
+}
+
/**
* intel_fbc_init - Initialize FBC
* @dev_priv: the i915 device
@@ -1238,6 +1270,12 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
fbc->active = false;
fbc->work.scheduled = false;
+ if (need_fbc_vtd_wa(dev_priv))
+ mkwrite_device_info(dev_priv)->has_fbc = false;
+
+ i915.enable_fbc = intel_sanitize_fbc_option(dev_priv);
+ DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", i915.enable_fbc);
+
if (!HAS_FBC(dev_priv)) {
fbc->no_fbc_reason = "unsupported by this chipset";
return;
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index ab8d09a81..3e3632c18 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -150,10 +150,10 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
if (size * 2 < ggtt->stolen_usable_size)
obj = i915_gem_object_create_stolen(dev, size);
if (obj == NULL)
- obj = i915_gem_alloc_object(dev, size);
- if (!obj) {
+ obj = i915_gem_object_create(dev, size);
+ if (IS_ERR(obj)) {
DRM_ERROR("failed to allocate framebuffer\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(obj);
goto out;
}
@@ -186,9 +186,11 @@ static int intelfb_create(struct drm_fb_helper *helper,
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct fb_info *info;
struct drm_framebuffer *fb;
+ struct i915_vma *vma;
struct drm_i915_gem_object *obj;
- int size, ret;
bool prealloc = false;
+ void *vaddr;
+ int ret;
if (intel_fb &&
(sizes->fb_width > intel_fb->base.width ||
@@ -214,7 +216,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
}
obj = intel_fb->obj;
- size = obj->base.size;
mutex_lock(&dev->struct_mutex);
@@ -244,22 +245,23 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &intelfb_ops;
+ vma = i915_gem_obj_to_ggtt(obj);
+
/* setup aperture base/size for vesafb takeover */
info->apertures->ranges[0].base = dev->mode_config.fb_base;
info->apertures->ranges[0].size = ggtt->mappable_end;
- info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
- info->fix.smem_len = size;
+ info->fix.smem_start = dev->mode_config.fb_base + vma->node.start;
+ info->fix.smem_len = vma->node.size;
- info->screen_base =
- ioremap_wc(ggtt->mappable_base + i915_gem_obj_ggtt_offset(obj),
- size);
- if (!info->screen_base) {
+ vaddr = i915_vma_pin_iomap(vma);
+ if (IS_ERR(vaddr)) {
DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
- ret = -ENOSPC;
+ ret = PTR_ERR(vaddr);
goto out_destroy_fbi;
}
- info->screen_size = size;
+ info->screen_base = vaddr;
+ info->screen_size = vma->node.size;
/* This driver doesn't need a VT switch to restore the mode on resume */
info->skip_vt_switch = true;
@@ -287,7 +289,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
out_destroy_fbi:
drm_fb_helper_release_fbi(helper);
out_unpin:
- i915_gem_object_ggtt_unpin(obj);
+ intel_unpin_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0));
out_unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -360,23 +362,24 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
bool *enabled, int width, int height)
{
struct drm_device *dev = fb_helper->dev;
+ unsigned long conn_configured, mask;
+ unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
int i, j;
bool *save_enabled;
bool fallback = true;
int num_connectors_enabled = 0;
int num_connectors_detected = 0;
- uint64_t conn_configured = 0, mask;
int pass = 0;
- save_enabled = kcalloc(fb_helper->connector_count, sizeof(bool),
- GFP_KERNEL);
+ save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL);
if (!save_enabled)
return false;
- memcpy(save_enabled, enabled, fb_helper->connector_count);
- mask = (1 << fb_helper->connector_count) - 1;
+ memcpy(save_enabled, enabled, count);
+ mask = BIT(count) - 1;
+ conn_configured = 0;
retry:
- for (i = 0; i < fb_helper->connector_count; i++) {
+ for (i = 0; i < count; i++) {
struct drm_fb_helper_connector *fb_conn;
struct drm_connector *connector;
struct drm_encoder *encoder;
@@ -386,7 +389,7 @@ retry:
fb_conn = fb_helper->connector_info[i];
connector = fb_conn->connector;
- if (conn_configured & (1 << i))
+ if (conn_configured & BIT(i))
continue;
if (pass == 0 && !connector->has_tile)
@@ -398,7 +401,7 @@ retry:
if (!enabled[i]) {
DRM_DEBUG_KMS("connector %s not enabled, skipping\n",
connector->name);
- conn_configured |= (1 << i);
+ conn_configured |= BIT(i);
continue;
}
@@ -417,7 +420,7 @@ retry:
DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n",
connector->name);
enabled[i] = false;
- conn_configured |= (1 << i);
+ conn_configured |= BIT(i);
continue;
}
@@ -430,14 +433,15 @@ retry:
intel_crtc->lut_b[j] = j;
}
- new_crtc = intel_fb_helper_crtc(fb_helper, connector->state->crtc);
+ new_crtc = intel_fb_helper_crtc(fb_helper,
+ connector->state->crtc);
/*
* Make sure we're not trying to drive multiple connectors
* with a single CRTC, since our cloning support may not
* match the BIOS.
*/
- for (j = 0; j < fb_helper->connector_count; j++) {
+ for (j = 0; j < count; j++) {
if (crtcs[j] == new_crtc) {
DRM_DEBUG_KMS("fallback: cloned configuration\n");
goto bail;
@@ -488,15 +492,15 @@ retry:
}
crtcs[i] = new_crtc;
- DRM_DEBUG_KMS("connector %s on pipe %c [CRTC:%d]: %dx%d%s\n",
+ DRM_DEBUG_KMS("connector %s on [CRTC:%d:%s]: %dx%d%s\n",
connector->name,
- pipe_name(to_intel_crtc(connector->state->crtc)->pipe),
connector->state->crtc->base.id,
+ connector->state->crtc->name,
modes[i]->hdisplay, modes[i]->vdisplay,
modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :"");
fallback = false;
- conn_configured |= (1 << i);
+ conn_configured |= BIT(i);
}
if ((conn_configured & mask) != mask) {
@@ -520,7 +524,7 @@ retry:
if (fallback) {
bail:
DRM_DEBUG_KMS("Not using firmware configuration\n");
- memcpy(enabled, save_enabled, fb_helper->connector_count);
+ memcpy(enabled, save_enabled, count);
kfree(save_enabled);
return false;
}
@@ -536,8 +540,7 @@ static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
.fb_probe = intelfb_create,
};
-static void intel_fbdev_destroy(struct drm_device *dev,
- struct intel_fbdev *ifbdev)
+static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
{
/* We rely on the object-free to release the VMA pinning for
* the info->screen_base mmaping. Leaking the VMA is simpler than
@@ -550,9 +553,14 @@ static void intel_fbdev_destroy(struct drm_device *dev,
drm_fb_helper_fini(&ifbdev->helper);
if (ifbdev->fb) {
- drm_framebuffer_unregister_private(&ifbdev->fb->base);
+ mutex_lock(&ifbdev->helper.dev->struct_mutex);
+ intel_unpin_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0));
+ mutex_unlock(&ifbdev->helper.dev->struct_mutex);
+
drm_framebuffer_remove(&ifbdev->fb->base);
}
+
+ kfree(ifbdev);
}
/*
@@ -685,9 +693,9 @@ out:
static void intel_fbdev_suspend_worker(struct work_struct *work)
{
- intel_fbdev_set_suspend(container_of(work,
- struct drm_i915_private,
- fbdev_suspend_work)->dev,
+ intel_fbdev_set_suspend(&container_of(work,
+ struct drm_i915_private,
+ fbdev_suspend_work)->drm,
FBINFO_STATE_RUNNING,
true);
}
@@ -695,7 +703,7 @@ static void intel_fbdev_suspend_worker(struct work_struct *work)
int intel_fbdev_init(struct drm_device *dev)
{
struct intel_fbdev *ifbdev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
if (WARN_ON(INTEL_INFO(dev)->num_pipes == 0))
@@ -717,8 +725,6 @@ int intel_fbdev_init(struct drm_device *dev)
return ret;
}
- ifbdev->helper.atomic = true;
-
dev_priv->fbdev = ifbdev;
INIT_WORK(&dev_priv->fbdev_suspend_work, intel_fbdev_suspend_worker);
@@ -729,42 +735,54 @@ int intel_fbdev_init(struct drm_device *dev)
static void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
{
- struct drm_i915_private *dev_priv = data;
- struct intel_fbdev *ifbdev = dev_priv->fbdev;
+ struct intel_fbdev *ifbdev = data;
/* Due to peculiar init order wrt to hpd handling this is separate. */
if (drm_fb_helper_initial_config(&ifbdev->helper,
ifbdev->preferred_bpp))
- intel_fbdev_fini(dev_priv->dev);
+ intel_fbdev_fini(ifbdev->helper.dev);
}
void intel_fbdev_initial_config_async(struct drm_device *dev)
{
- async_schedule(intel_fbdev_initial_config, to_i915(dev));
+ struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
+
+ ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev);
+}
+
+static void intel_fbdev_sync(struct intel_fbdev *ifbdev)
+{
+ if (!ifbdev->cookie)
+ return;
+
+ /* Only serialises with all preceding async calls, hence +1 */
+ async_synchronize_cookie(ifbdev->cookie + 1);
+ ifbdev->cookie = 0;
}
void intel_fbdev_fini(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- if (!dev_priv->fbdev)
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_fbdev *ifbdev = dev_priv->fbdev;
+
+ if (!ifbdev)
return;
flush_work(&dev_priv->fbdev_suspend_work);
-
if (!current_is_async())
- async_synchronize_full();
- intel_fbdev_destroy(dev, dev_priv->fbdev);
- kfree(dev_priv->fbdev);
+ intel_fbdev_sync(ifbdev);
+
+ intel_fbdev_destroy(ifbdev);
dev_priv->fbdev = NULL;
}
void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_fbdev *ifbdev = dev_priv->fbdev;
struct fb_info *info;
- if (!ifbdev)
+ if (!ifbdev || !ifbdev->fb)
return;
info = ifbdev->helper.fbdev;
@@ -809,29 +827,28 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
void intel_fbdev_output_poll_changed(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- if (dev_priv->fbdev)
- drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
+ struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
+
+ if (ifbdev && ifbdev->fb)
+ drm_fb_helper_hotplug_event(&ifbdev->helper);
}
void intel_fbdev_restore_mode(struct drm_device *dev)
{
- int ret;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_fbdev *ifbdev = dev_priv->fbdev;
- struct drm_fb_helper *fb_helper;
+ struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
if (!ifbdev)
return;
- fb_helper = &ifbdev->helper;
+ intel_fbdev_sync(ifbdev);
+ if (!ifbdev->fb)
+ return;
- ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
- if (ret) {
+ if (drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper)) {
DRM_DEBUG("failed to restore crtc mode\n");
} else {
- mutex_lock(&fb_helper->dev->struct_mutex);
+ mutex_lock(&dev->struct_mutex);
intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
- mutex_unlock(&fb_helper->dev->struct_mutex);
+ mutex_unlock(&dev->struct_mutex);
}
}
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
index 9be839a24..2aa744081 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -50,7 +50,7 @@
static bool ivb_can_enable_err_int(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc;
enum pipe pipe;
@@ -68,7 +68,7 @@ static bool ivb_can_enable_err_int(struct drm_device *dev)
static bool cpt_can_enable_serr_int(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe;
struct intel_crtc *crtc;
@@ -105,7 +105,7 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe,
bool enable, bool old)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t reg = PIPESTAT(pipe);
u32 pipestat = I915_READ(reg) & 0xffff0000;
@@ -123,7 +123,7 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
DE_PIPEB_FIFO_UNDERRUN;
@@ -154,7 +154,7 @@ static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe,
bool enable, bool old)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (enable) {
I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
@@ -176,7 +176,7 @@ static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (enable)
bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN);
@@ -188,7 +188,7 @@ static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
enum transcoder pch_transcoder,
bool enable)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
@@ -220,7 +220,7 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
enum transcoder pch_transcoder,
bool enable, bool old)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (enable) {
I915_WRITE(SERR_INT,
@@ -244,7 +244,7 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
bool old;
@@ -289,7 +289,7 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
bool ret;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- ret = __intel_set_cpu_fifo_underrun_reporting(dev_priv->dev, pipe,
+ ret = __intel_set_cpu_fifo_underrun_reporting(&dev_priv->drm, pipe,
enable);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
@@ -334,10 +334,12 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
intel_crtc->pch_fifo_underrun_disabled = !enable;
if (HAS_PCH_IBX(dev_priv))
- ibx_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder,
+ ibx_set_fifo_underrun_reporting(&dev_priv->drm,
+ pch_transcoder,
enable);
else
- cpt_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder,
+ cpt_set_fifo_underrun_reporting(&dev_priv->drm,
+ pch_transcoder,
enable, old);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
@@ -405,7 +407,7 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv)
spin_lock_irq(&dev_priv->irq_lock);
- for_each_intel_crtc(dev_priv->dev, crtc) {
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
if (crtc->cpu_fifo_underrun_disabled)
continue;
@@ -432,7 +434,7 @@ void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv)
spin_lock_irq(&dev_priv->irq_lock);
- for_each_intel_crtc(dev_priv->dev, crtc) {
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
if (crtc->pch_fifo_underrun_disabled)
continue;
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 9d79c4c3e..3e3e74374 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -26,6 +26,7 @@
#include "intel_guc_fwif.h"
#include "i915_guc_reg.h"
+#include "intel_ringbuffer.h"
struct drm_i915_gem_request;
@@ -48,14 +49,23 @@ struct drm_i915_gem_request;
* queue (a circular array of work items), again described in the process
* descriptor. Work queue pages are mapped momentarily as required.
*
- * Finally, we also keep a few statistics here, including the number of
- * submissions to each engine, and a record of the last submission failure
- * (if any).
+ * We also keep a few statistics on failures. Ideally, these should all
+ * be zero!
+ * no_wq_space: times that the submission pre-check found no space was
+ * available in the work queue (note, the queue is shared,
+ * not per-engine). It is OK for this to be nonzero, but
+ * it should not be huge!
+ * q_fail: failed to enqueue a work item. This should never happen,
+ * because we check for space beforehand.
+ * b_fail: failed to ring the doorbell. This should never happen, unless
+ * somehow the hardware misbehaves, or maybe if the GuC firmware
+ * crashes? We probably need to reset the GPU to recover.
+ * retcode: errno from last guc_submit()
*/
struct i915_guc_client {
struct drm_i915_gem_object *client_obj;
void *client_base; /* first page (only) of above */
- struct intel_context *owner;
+ struct i915_gem_context *owner;
struct intel_guc *guc;
uint32_t priority;
uint32_t ctx_index;
@@ -71,12 +81,13 @@ struct i915_guc_client {
uint32_t wq_tail;
uint32_t unused; /* Was 'wq_head' */
- /* GuC submission statistics & status */
- uint64_t submissions[GUC_MAX_ENGINES_NUM];
- uint32_t q_fail;
+ uint32_t no_wq_space;
+ uint32_t q_fail; /* No longer used */
uint32_t b_fail;
int retcode;
- int spare; /* pad to 32 DWords */
+
+ /* Per-engine counts of GuC submissions */
+ uint64_t submissions[I915_NUM_ENGINES];
};
enum intel_guc_fw_status {
@@ -133,25 +144,24 @@ struct intel_guc {
uint32_t action_fail; /* Total number of failures */
int32_t action_err; /* Last error code */
- uint64_t submissions[GUC_MAX_ENGINES_NUM];
- uint32_t last_seqno[GUC_MAX_ENGINES_NUM];
+ uint64_t submissions[I915_NUM_ENGINES];
+ uint32_t last_seqno[I915_NUM_ENGINES];
};
/* intel_guc_loader.c */
-extern void intel_guc_ucode_init(struct drm_device *dev);
-extern int intel_guc_ucode_load(struct drm_device *dev);
-extern void intel_guc_ucode_fini(struct drm_device *dev);
+extern void intel_guc_init(struct drm_device *dev);
+extern int intel_guc_setup(struct drm_device *dev);
+extern void intel_guc_fini(struct drm_device *dev);
extern const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status);
extern int intel_guc_suspend(struct drm_device *dev);
extern int intel_guc_resume(struct drm_device *dev);
/* i915_guc_submission.c */
-int i915_guc_submission_init(struct drm_device *dev);
-int i915_guc_submission_enable(struct drm_device *dev);
-int i915_guc_submit(struct i915_guc_client *client,
- struct drm_i915_gem_request *rq);
-void i915_guc_submission_disable(struct drm_device *dev);
-void i915_guc_submission_fini(struct drm_device *dev);
-int i915_guc_wq_check_space(struct i915_guc_client *client);
+int i915_guc_submission_init(struct drm_i915_private *dev_priv);
+int i915_guc_submission_enable(struct drm_i915_private *dev_priv);
+int i915_guc_wq_check_space(struct drm_i915_gem_request *rq);
+int i915_guc_submit(struct drm_i915_gem_request *rq);
+void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
+void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 2de57ffe5..944786d70 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -71,7 +71,8 @@
#define WQ_WORKLOAD_TOUCH (2 << WQ_WORKLOAD_SHIFT)
#define WQ_RING_TAIL_SHIFT 20
-#define WQ_RING_TAIL_MASK (0x7FF << WQ_RING_TAIL_SHIFT)
+#define WQ_RING_TAIL_MAX 0x7FF /* 2^11 QWords */
+#define WQ_RING_TAIL_MASK (WQ_RING_TAIL_MAX << WQ_RING_TAIL_SHIFT)
#define GUC_DOORBELL_ENABLED 1
#define GUC_DOORBELL_DISABLED 0
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index 6021d5872..a00f2d654 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -62,6 +62,12 @@
#define I915_SKL_GUC_UCODE "/*(DEBLOBBED)*/"
/*(DEBLOBBED)*/
+#define I915_BXT_GUC_UCODE "/*(DEBLOBBED)*/"
+/*(DEBLOBBED)*/
+
+#define I915_KBL_GUC_UCODE "/*(DEBLOBBED)*/"
+/*(DEBLOBBED)*/
+
/* User-friendly representation of an enum */
const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
{
@@ -84,7 +90,7 @@ static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
struct intel_engine_cs *engine;
int irqs;
- /* tell all command streamers NOT to forward interrupts and vblank to GuC */
+ /* tell all command streamers NOT to forward interrupts or vblank to GuC */
irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
for_each_engine(engine, dev_priv)
@@ -100,10 +106,10 @@ static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
int irqs;
+ u32 tmp;
- /* tell all command streamers to forward interrupts and vblank to GuC */
- irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS);
- irqs |= _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
+ /* tell all command streamers to forward interrupts (but not vblank) to GuC */
+ irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
for_each_engine(engine, dev_priv)
I915_WRITE(RING_MODE_GEN7(engine), irqs);
@@ -114,6 +120,16 @@ static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
I915_WRITE(GUC_BCS_RCS_IER, ~irqs);
I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs);
I915_WRITE(GUC_WD_VECS_IER, ~irqs);
+
+ /*
+ * If GuC has routed PM interrupts to itself, don't keep it.
+ * and keep other interrupts those are unmasked by GuC.
+ */
+ tmp = I915_READ(GEN6_PMINTRMSK);
+ if (tmp & GEN8_PMINTR_REDIRECT_TO_NON_DISP) {
+ dev_priv->rps.pm_intr_keep |= ~(tmp & ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
+ dev_priv->rps.pm_intr_keep &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
+ }
}
static u32 get_gttype(struct drm_i915_private *dev_priv)
@@ -281,13 +297,24 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv)
return ret;
}
+static u32 guc_wopcm_size(struct drm_i915_private *dev_priv)
+{
+ u32 wopcm_size = GUC_WOPCM_TOP;
+
+ /* On BXT, the top of WOPCM is reserved for RC6 context */
+ if (IS_BROXTON(dev_priv))
+ wopcm_size -= BXT_GUC_WOPCM_RC6_RESERVED;
+
+ return wopcm_size;
+}
+
/*
* Load the GuC firmware blob into the MinuteIA.
*/
static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
{
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
int ret;
ret = i915_gem_object_set_to_gtt_domain(guc_fw->guc_fw_obj, false);
@@ -308,7 +335,7 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
/* init WOPCM */
- I915_WRITE(GUC_WOPCM_SIZE, GUC_WOPCM_SIZE_VALUE);
+ I915_WRITE(GUC_WOPCM_SIZE, guc_wopcm_size(dev_priv));
I915_WRITE(DMA_GUC_WOPCM_OFFSET, GUC_WOPCM_OFFSET_VALUE);
/* Enable MIA caching. GuC clock gating is disabled. */
@@ -372,66 +399,63 @@ static int i915_reset_guc(struct drm_i915_private *dev_priv)
}
/**
- * intel_guc_ucode_load() - load GuC uCode into the device
+ * intel_guc_setup() - finish preparing the GuC for activity
* @dev: drm device
*
* Called from gem_init_hw() during driver loading and also after a GPU reset.
*
+ * The main action required here it to load the GuC uCode into the device.
* The firmware image should have already been fetched into memory by the
- * earlier call to intel_guc_ucode_init(), so here we need only check that
- * is succeeded, and then transfer the image to the h/w.
+ * earlier call to intel_guc_init(), so here we need only check that worked,
+ * and then transfer the image to the h/w.
*
* Return: non-zero code on error
*/
-int intel_guc_ucode_load(struct drm_device *dev)
+int intel_guc_setup(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
- int retries, err = 0;
+ const char *fw_path = guc_fw->guc_fw_path;
+ int retries, ret, err;
- if (!i915.enable_guc_submission)
- return 0;
-
- DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
+ DRM_DEBUG_DRIVER("GuC fw status: path %s, fetch %s, load %s\n",
+ fw_path,
intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
- direct_interrupts_to_host(dev_priv);
-
- if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_NONE)
- return 0;
-
- if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_SUCCESS &&
- guc_fw->guc_fw_load_status == GUC_FIRMWARE_FAIL)
- return -ENOEXEC;
-
- guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
-
- DRM_DEBUG_DRIVER("GuC fw fetch status %s\n",
- intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
-
- switch (guc_fw->guc_fw_fetch_status) {
- case GUC_FIRMWARE_FAIL:
- /* something went wrong :( */
- err = -EIO;
+ /* Loading forbidden, or no firmware to load? */
+ if (!i915.enable_guc_loading) {
+ err = 0;
goto fail;
-
- case GUC_FIRMWARE_NONE:
- case GUC_FIRMWARE_PENDING:
- default:
- /* "can't happen" */
- WARN_ONCE(1, "GuC fw %s invalid guc_fw_fetch_status %s [%d]\n",
- guc_fw->guc_fw_path,
- intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
- guc_fw->guc_fw_fetch_status);
+ } else if (fw_path == NULL) {
+ /* Device is known to have no uCode (e.g. no GuC) */
err = -ENXIO;
goto fail;
+ } else if (*fw_path == '\0') {
+ /* Device has a GuC but we don't know what f/w to load? */
+ DRM_INFO("No GuC firmware known for this platform\n");
+ err = -ENODEV;
+ goto fail;
+ }
- case GUC_FIRMWARE_SUCCESS:
- break;
+ /* Fetch failed, or already fetched but failed to load? */
+ if (guc_fw->guc_fw_fetch_status != GUC_FIRMWARE_SUCCESS) {
+ err = -EIO;
+ goto fail;
+ } else if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_FAIL) {
+ err = -ENOEXEC;
+ goto fail;
}
- err = i915_guc_submission_init(dev);
+ direct_interrupts_to_host(dev_priv);
+
+ guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
+
+ DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
+ intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
+ intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
+
+ err = i915_guc_submission_init(dev_priv);
if (err)
goto fail;
@@ -448,7 +472,7 @@ int intel_guc_ucode_load(struct drm_device *dev)
*/
err = i915_reset_guc(dev_priv);
if (err) {
- DRM_ERROR("GuC reset failed, err %d\n", err);
+ DRM_ERROR("GuC reset failed: %d\n", err);
goto fail;
}
@@ -459,8 +483,8 @@ int intel_guc_ucode_load(struct drm_device *dev)
if (--retries == 0)
goto fail;
- DRM_INFO("GuC fw load failed, err %d; will reset and "
- "retry %d more time(s)\n", err, retries);
+ DRM_INFO("GuC fw load failed: %d; will reset and "
+ "retry %d more time(s)\n", err, retries);
}
guc_fw->guc_fw_load_status = GUC_FIRMWARE_SUCCESS;
@@ -470,10 +494,7 @@ int intel_guc_ucode_load(struct drm_device *dev)
intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
if (i915.enable_guc_submission) {
- /* The execbuf_client will be recreated. Release it first. */
- i915_guc_submission_disable(dev);
-
- err = i915_guc_submission_enable(dev);
+ err = i915_guc_submission_enable(dev_priv);
if (err)
goto fail;
direct_interrupts_to_guc(dev_priv);
@@ -482,15 +503,50 @@ int intel_guc_ucode_load(struct drm_device *dev)
return 0;
fail:
- DRM_ERROR("GuC firmware load failed, err %d\n", err);
if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING)
guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL;
direct_interrupts_to_host(dev_priv);
- i915_guc_submission_disable(dev);
- i915_guc_submission_fini(dev);
+ i915_guc_submission_disable(dev_priv);
+ i915_guc_submission_fini(dev_priv);
+
+ /*
+ * We've failed to load the firmware :(
+ *
+ * Decide whether to disable GuC submission and fall back to
+ * execlist mode, and whether to hide the error by returning
+ * zero or to return -EIO, which the caller will treat as a
+ * nonfatal error (i.e. it doesn't prevent driver load, but
+ * marks the GPU as wedged until reset).
+ */
+ if (i915.enable_guc_loading > 1) {
+ ret = -EIO;
+ } else if (i915.enable_guc_submission > 1) {
+ ret = -EIO;
+ } else {
+ ret = 0;
+ }
+
+ if (err == 0 && !HAS_GUC_UCODE(dev))
+ ; /* Don't mention the GuC! */
+ else if (err == 0)
+ DRM_INFO("GuC firmware load skipped\n");
+ else if (ret != -EIO)
+ DRM_INFO("GuC firmware load failed: %d\n", err);
+ else
+ DRM_ERROR("GuC firmware load failed: %d\n", err);
+
+ if (i915.enable_guc_submission) {
+ if (fw_path == NULL)
+ DRM_INFO("GuC submission without firmware not supported\n");
+ if (ret == 0)
+ DRM_INFO("Falling back from GuC submission to execlist mode\n");
+ else
+ DRM_ERROR("GuC init failed: %d\n", ret);
+ }
+ i915.enable_guc_submission = 0;
- return err;
+ return ret;
}
static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
@@ -552,9 +608,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
/* Header and uCode will be loaded to WOPCM. Size of the two. */
size = guc_fw->header_size + guc_fw->ucode_size;
-
- /* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */
- if (size > GUC_WOPCM_SIZE_VALUE - 0x8000) {
+ if (size > guc_wopcm_size(to_i915(dev))) {
DRM_ERROR("Firmware is too large to fit in WOPCM\n");
goto fail;
}
@@ -617,22 +671,25 @@ fail:
}
/**
- * intel_guc_ucode_init() - define parameters and fetch firmware
+ * intel_guc_init() - define parameters and fetch firmware
* @dev: drm device
*
* Called early during driver load, but after GEM is initialised.
*
* The firmware will be transferred to the GuC's memory later,
- * when intel_guc_ucode_load() is called.
+ * when intel_guc_setup() is called.
*/
-void intel_guc_ucode_init(struct drm_device *dev)
+void intel_guc_init(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
const char *fw_path;
- if (!HAS_GUC_SCHED(dev))
- i915.enable_guc_submission = false;
+ /* A negative value means "use platform default" */
+ if (i915.enable_guc_loading < 0)
+ i915.enable_guc_loading = HAS_GUC_UCODE(dev);
+ if (i915.enable_guc_submission < 0)
+ i915.enable_guc_submission = HAS_GUC_SCHED(dev);
if (!HAS_GUC_UCODE(dev)) {
fw_path = NULL;
@@ -640,27 +697,30 @@ void intel_guc_ucode_init(struct drm_device *dev)
fw_path = I915_SKL_GUC_UCODE;
guc_fw->guc_fw_major_wanted = 6;
guc_fw->guc_fw_minor_wanted = 1;
+ } else if (IS_BROXTON(dev)) {
+ fw_path = I915_BXT_GUC_UCODE;
+ guc_fw->guc_fw_major_wanted = 8;
+ guc_fw->guc_fw_minor_wanted = 7;
+ } else if (IS_KABYLAKE(dev)) {
+ fw_path = I915_KBL_GUC_UCODE;
+ guc_fw->guc_fw_major_wanted = 9;
+ guc_fw->guc_fw_minor_wanted = 14;
} else {
- i915.enable_guc_submission = false;
fw_path = ""; /* unknown device */
}
- if (!i915.enable_guc_submission)
- return;
-
guc_fw->guc_dev = dev;
guc_fw->guc_fw_path = fw_path;
guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
guc_fw->guc_fw_load_status = GUC_FIRMWARE_NONE;
+ /* Early (and silent) return if GuC loading is disabled */
+ if (!i915.enable_guc_loading)
+ return;
if (fw_path == NULL)
return;
-
- if (*fw_path == '\0') {
- DRM_ERROR("No GuC firmware known for this platform\n");
- guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
+ if (*fw_path == '\0')
return;
- }
guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_PENDING;
DRM_DEBUG_DRIVER("GuC firmware pending, path %s\n", fw_path);
@@ -669,18 +729,18 @@ void intel_guc_ucode_init(struct drm_device *dev)
}
/**
- * intel_guc_ucode_fini() - clean up all allocated resources
+ * intel_guc_fini() - clean up all allocated resources
* @dev: drm device
*/
-void intel_guc_ucode_fini(struct drm_device *dev)
+void intel_guc_fini(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
mutex_lock(&dev->struct_mutex);
direct_interrupts_to_host(dev_priv);
- i915_guc_submission_disable(dev);
- i915_guc_submission_fini(dev);
+ i915_guc_submission_disable(dev_priv);
+ i915_guc_submission_fini(dev_priv);
if (guc_fw->guc_fw_obj)
drm_gem_object_unreference(&guc_fw->guc_fw_obj->base);
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
new file mode 100644
index 000000000..434f4d5c5
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "i915_drv.h"
+#include "intel_gvt.h"
+
+/**
+ * DOC: Intel GVT-g host support
+ *
+ * Intel GVT-g is a graphics virtualization technology which shares the
+ * GPU among multiple virtual machines on a time-sharing basis. Each
+ * virtual machine is presented a virtual GPU (vGPU), which has equivalent
+ * features as the underlying physical GPU (pGPU), so i915 driver can run
+ * seamlessly in a virtual machine. This file provides the englightments
+ * of GVT and the necessary components used by GVT in i915 driver.
+ */
+
+static bool is_supported_device(struct drm_i915_private *dev_priv)
+{
+ if (IS_BROADWELL(dev_priv))
+ return true;
+ return false;
+}
+
+/**
+ * intel_gvt_init - initialize GVT components
+ * @dev_priv: drm i915 private data
+ *
+ * This function is called at the initialization stage to create a GVT device.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_gvt_init(struct drm_i915_private *dev_priv)
+{
+ int ret;
+
+ if (!i915.enable_gvt) {
+ DRM_DEBUG_DRIVER("GVT-g is disabled by kernel params\n");
+ return 0;
+ }
+
+ if (!is_supported_device(dev_priv)) {
+ DRM_DEBUG_DRIVER("Unsupported device. GVT-g is disabled\n");
+ goto bail;
+ }
+
+ /*
+ * We're not in host or fail to find a MPT module, disable GVT-g
+ */
+ ret = intel_gvt_init_host();
+ if (ret) {
+ DRM_DEBUG_DRIVER("Not in host or MPT modules not found\n");
+ goto bail;
+ }
+
+ ret = intel_gvt_init_device(dev_priv);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Fail to init GVT device\n");
+ goto bail;
+ }
+
+ return 0;
+
+bail:
+ i915.enable_gvt = 0;
+ return 0;
+}
+
+/**
+ * intel_gvt_cleanup - cleanup GVT components when i915 driver is unloading
+ * @dev_priv: drm i915 private *
+ *
+ * This function is called at the i915 driver unloading stage, to shutdown
+ * GVT components and release the related resources.
+ */
+void intel_gvt_cleanup(struct drm_i915_private *dev_priv)
+{
+ if (!intel_gvt_active(dev_priv))
+ return;
+
+ intel_gvt_clean_device(dev_priv);
+}
diff --git a/drivers/gpu/drm/i915/intel_gvt.h b/drivers/gpu/drm/i915/intel_gvt.h
new file mode 100644
index 000000000..960211df7
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_gvt.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _INTEL_GVT_H_
+#define _INTEL_GVT_H_
+
+#include "gvt/gvt.h"
+
+#ifdef CONFIG_DRM_I915_GVT
+int intel_gvt_init(struct drm_i915_private *dev_priv);
+void intel_gvt_cleanup(struct drm_i915_private *dev_priv);
+int intel_gvt_init_device(struct drm_i915_private *dev_priv);
+void intel_gvt_clean_device(struct drm_i915_private *dev_priv);
+int intel_gvt_init_host(void);
+#else
+static inline int intel_gvt_init(struct drm_i915_private *dev_priv)
+{
+ return 0;
+}
+static inline void intel_gvt_cleanup(struct drm_i915_private *dev_priv)
+{
+}
+#endif
+
+#endif /* _INTEL_GVT_H_ */
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index a8844702d..4df9f3849 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -47,7 +47,7 @@ static void
assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
{
struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t enabled_bits;
enabled_bits = HAS_DDI(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
@@ -138,7 +138,7 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
{
const uint32_t *data = frame;
struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 val = I915_READ(VIDEO_DIP_CTL);
int i;
@@ -192,7 +192,7 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
{
const uint32_t *data = frame;
struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
@@ -251,7 +251,7 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
{
const uint32_t *data = frame;
struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
@@ -308,7 +308,7 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
{
const uint32_t *data = frame;
struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
@@ -366,7 +366,7 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
{
const uint32_t *data = frame;
struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
@@ -508,7 +508,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
bool enable,
const struct drm_display_mode *adjusted_mode)
{
- struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
i915_reg_t reg = VIDEO_DIP_CTL;
@@ -629,7 +629,7 @@ static bool gcp_default_phase_possible(int pipe_bpp,
static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder)
{
- struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
i915_reg_t reg;
u32 val = 0;
@@ -661,7 +661,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
bool enable,
const struct drm_display_mode *adjusted_mode)
{
- struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
@@ -713,7 +713,7 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
bool enable,
const struct drm_display_mode *adjusted_mode)
{
- struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
@@ -755,7 +755,7 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
bool enable,
const struct drm_display_mode *adjusted_mode)
{
- struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
@@ -807,7 +807,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
bool enable,
const struct drm_display_mode *adjusted_mode)
{
- struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
i915_reg_t reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder);
@@ -855,7 +855,7 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
static void intel_hdmi_prepare(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
@@ -894,7 +894,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
enum intel_display_power_domain power_domain;
u32 tmp;
@@ -931,7 +931,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 tmp, flags = 0;
int dotclock;
@@ -988,7 +988,7 @@ static void intel_enable_hdmi_audio(struct intel_encoder *encoder)
static void g4x_enable_hdmi(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
u32 temp;
@@ -1009,7 +1009,7 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder)
static void ibx_enable_hdmi(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
u32 temp;
@@ -1058,7 +1058,7 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder)
static void cpt_enable_hdmi(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
enum pipe pipe = crtc->pipe;
@@ -1115,7 +1115,7 @@ static void vlv_enable_hdmi(struct intel_encoder *encoder)
static void intel_disable_hdmi(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
u32 temp;
@@ -1154,7 +1154,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
- intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
+ intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
@@ -1273,33 +1273,15 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc_state->base.crtc->dev;
- struct drm_atomic_state *state;
- struct intel_encoder *encoder;
- struct drm_connector *connector;
- struct drm_connector_state *connector_state;
- int count = 0, count_hdmi = 0;
- int i;
if (HAS_GMCH_DISPLAY(dev))
return false;
- state = crtc_state->base.state;
-
- for_each_connector_in_state(state, connector, connector_state, i) {
- if (connector_state->crtc != crtc_state->base.crtc)
- continue;
-
- encoder = to_intel_encoder(connector_state->best_encoder);
-
- count_hdmi += encoder->type == INTEL_OUTPUT_HDMI;
- count++;
- }
-
/*
* HDMI 12bpc affects the clocks, so it's only possible
* when not cloning with other encoder types.
*/
- return count_hdmi > 0 && count_hdmi == count;
+ return crtc_state->output_types == 1 << INTEL_OUTPUT_HDMI;
}
bool intel_hdmi_compute_config(struct intel_encoder *encoder,
@@ -1575,7 +1557,7 @@ intel_hdmi_set_property(struct drm_connector *connector,
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct intel_digital_port *intel_dig_port =
hdmi_to_dig_port(intel_hdmi);
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
int ret;
ret = drm_object_property_set_value(&connector->base, property, val);
@@ -1674,39 +1656,16 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct intel_hdmi *intel_hdmi = &dport->hdmi;
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
- enum dpio_channel port = vlv_dport_to_channel(dport);
- int pipe = intel_crtc->pipe;
- u32 val;
- /* Enable clock channels for this port */
- mutex_lock(&dev_priv->sb_lock);
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
- val = 0;
- if (pipe)
- val |= (1<<21);
- else
- val &= ~(1<<21);
- val |= 0x001000c4;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
+ vlv_phy_pre_encoder_enable(encoder);
/* HDMI 1.0V-2dB */
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), 0x2b245f5f);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port), 0x5578b83a);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0c782040);
- vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), 0x2b247878);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
-
- /* Program lane clock */
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_set_phy_signal_level(encoder, 0x2b245f5f, 0x00002000, 0x5578b83a,
+ 0x2b247878);
intel_hdmi->set_infoframes(&encoder->base,
intel_crtc->config->has_hdmi_sink,
@@ -1719,213 +1678,33 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc =
- to_intel_crtc(encoder->base.crtc);
- enum dpio_channel port = vlv_dport_to_channel(dport);
- int pipe = intel_crtc->pipe;
-
intel_hdmi_prepare(encoder);
- /* Program Tx lane resets to default */
- mutex_lock(&dev_priv->sb_lock);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
- DPIO_PCS_TX_LANE2_RESET |
- DPIO_PCS_TX_LANE1_RESET);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
- DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
- DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
- (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
- DPIO_PCS_CLK_SOFT_RESET);
-
- /* Fix up inter-pair skew failure */
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
-
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
- mutex_unlock(&dev_priv->sb_lock);
-}
-
-static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
- bool reset)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
- enum pipe pipe = crtc->pipe;
- uint32_t val;
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
- if (reset)
- val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
- else
- val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
-
- if (crtc->config->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
- if (reset)
- val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
- else
- val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
- }
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
- val |= CHV_PCS_REQ_SOFTRESET_EN;
- if (reset)
- val &= ~DPIO_PCS_CLK_SOFT_RESET;
- else
- val |= DPIO_PCS_CLK_SOFT_RESET;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
-
- if (crtc->config->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
- val |= CHV_PCS_REQ_SOFTRESET_EN;
- if (reset)
- val &= ~DPIO_PCS_CLK_SOFT_RESET;
- else
- val |= DPIO_PCS_CLK_SOFT_RESET;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
- }
+ vlv_phy_pre_pll_enable(encoder);
}
static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc =
- to_intel_crtc(encoder->base.crtc);
- enum dpio_channel ch = vlv_dport_to_channel(dport);
- enum pipe pipe = intel_crtc->pipe;
- u32 val;
-
intel_hdmi_prepare(encoder);
- /*
- * Must trick the second common lane into life.
- * Otherwise we can't even access the PLL.
- */
- if (ch == DPIO_CH0 && pipe == PIPE_B)
- dport->release_cl2_override =
- !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
-
- chv_phy_powergate_lanes(encoder, true, 0x0);
-
- mutex_lock(&dev_priv->sb_lock);
-
- /* Assert data lane reset */
- chv_data_lane_soft_reset(encoder, true);
-
- /* program left/right clock distribution */
- if (pipe != PIPE_B) {
- val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
- val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
- if (ch == DPIO_CH0)
- val |= CHV_BUFLEFTENA1_FORCE;
- if (ch == DPIO_CH1)
- val |= CHV_BUFRIGHTENA1_FORCE;
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
- } else {
- val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
- val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
- if (ch == DPIO_CH0)
- val |= CHV_BUFLEFTENA2_FORCE;
- if (ch == DPIO_CH1)
- val |= CHV_BUFRIGHTENA2_FORCE;
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
- }
-
- /* program clock channel usage */
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
- val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
- if (pipe != PIPE_B)
- val &= ~CHV_PCS_USEDCLKCHANNEL;
- else
- val |= CHV_PCS_USEDCLKCHANNEL;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
- val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
- if (pipe != PIPE_B)
- val &= ~CHV_PCS_USEDCLKCHANNEL;
- else
- val |= CHV_PCS_USEDCLKCHANNEL;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
-
- /*
- * This a a bit weird since generally CL
- * matches the pipe, but here we need to
- * pick the CL based on the port.
- */
- val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
- if (pipe != PIPE_B)
- val &= ~CHV_CMN_USEDCLKCHANNEL;
- else
- val |= CHV_CMN_USEDCLKCHANNEL;
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
-
- mutex_unlock(&dev_priv->sb_lock);
+ chv_phy_pre_pll_enable(encoder);
}
static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
- u32 val;
-
- mutex_lock(&dev_priv->sb_lock);
-
- /* disable left/right clock distribution */
- if (pipe != PIPE_B) {
- val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
- val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
- } else {
- val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
- val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
- }
-
- mutex_unlock(&dev_priv->sb_lock);
-
- /*
- * Leave the power down bit cleared for at least one
- * lane so that chv_powergate_phy_ch() will power
- * on something when the channel is otherwise unused.
- * When the port is off and the override is removed
- * the lanes power down anyway, so otherwise it doesn't
- * really matter what the state of power down bits is
- * after this.
- */
- chv_phy_powergate_lanes(encoder, false, 0x0);
+ chv_phy_post_pll_disable(encoder);
}
static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
{
- struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
- struct intel_crtc *intel_crtc =
- to_intel_crtc(encoder->base.crtc);
- enum dpio_channel port = vlv_dport_to_channel(dport);
- int pipe = intel_crtc->pipe;
-
/* Reset lanes to avoid HDMI flicker (VLV w/a) */
- mutex_lock(&dev_priv->sb_lock);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060);
- mutex_unlock(&dev_priv->sb_lock);
+ vlv_phy_reset_lanes(encoder);
}
static void chv_hdmi_post_disable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
mutex_lock(&dev_priv->sb_lock);
@@ -1940,142 +1719,16 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct intel_hdmi *intel_hdmi = &dport->hdmi;
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
- enum dpio_channel ch = vlv_dport_to_channel(dport);
- int pipe = intel_crtc->pipe;
- int data, i, stagger;
- u32 val;
- mutex_lock(&dev_priv->sb_lock);
-
- /* allow hardware to manage TX FIFO reset source */
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
- val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
- val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
-
- /* Program Tx latency optimal setting */
- for (i = 0; i < 4; i++) {
- /* Set the upar bit */
- data = (i == 1) ? 0x0 : 0x1;
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
- data << DPIO_UPAR_SHIFT);
- }
-
- /* Data lane stagger programming */
- if (intel_crtc->config->port_clock > 270000)
- stagger = 0x18;
- else if (intel_crtc->config->port_clock > 135000)
- stagger = 0xd;
- else if (intel_crtc->config->port_clock > 67500)
- stagger = 0x7;
- else if (intel_crtc->config->port_clock > 33750)
- stagger = 0x4;
- else
- stagger = 0x2;
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
- val |= DPIO_TX2_STAGGER_MASK(0x1f);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
- val |= DPIO_TX2_STAGGER_MASK(0x1f);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
-
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
- DPIO_LANESTAGGER_STRAP(stagger) |
- DPIO_LANESTAGGER_STRAP_OVRD |
- DPIO_TX1_STAGGER_MASK(0x1f) |
- DPIO_TX1_STAGGER_MULT(6) |
- DPIO_TX2_STAGGER_MULT(0));
-
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
- DPIO_LANESTAGGER_STRAP(stagger) |
- DPIO_LANESTAGGER_STRAP_OVRD |
- DPIO_TX1_STAGGER_MASK(0x1f) |
- DPIO_TX1_STAGGER_MULT(7) |
- DPIO_TX2_STAGGER_MULT(5));
-
- /* Deassert data lane reset */
- chv_data_lane_soft_reset(encoder, false);
-
- /* Clear calc init */
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
- val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
- val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
- val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
- val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
- val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
- val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
- val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
- val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
- val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
- val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
+ chv_phy_pre_encoder_enable(encoder);
/* FIXME: Program the support xxx V-dB */
/* Use 800mV-0dB */
- for (i = 0; i < 4; i++) {
- val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
- val &= ~DPIO_SWING_DEEMPH9P5_MASK;
- val |= 128 << DPIO_SWING_DEEMPH9P5_SHIFT;
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
- }
-
- for (i = 0; i < 4; i++) {
- val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
-
- val &= ~DPIO_SWING_MARGIN000_MASK;
- val |= 102 << DPIO_SWING_MARGIN000_SHIFT;
-
- /*
- * Supposedly this value shouldn't matter when unique transition
- * scale is disabled, but in fact it does matter. Let's just
- * always program the same value and hope it's OK.
- */
- val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
- val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
-
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
- }
-
- /*
- * The document said it needs to set bit 27 for ch0 and bit 26
- * for ch1. Might be a typo in the doc.
- * For now, for this unique transition scale selection, set bit
- * 27 for ch0 and ch1.
- */
- for (i = 0; i < 4; i++) {
- val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
- val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
- }
-
- /* Start swing calculation */
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
- val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
-
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
- val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
-
- mutex_unlock(&dev_priv->sb_lock);
+ chv_set_phy_signal_level(encoder, 128, 102, false);
intel_hdmi->set_infoframes(&encoder->base,
intel_crtc->config->has_hdmi_sink,
@@ -2086,10 +1739,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
vlv_wait_port_ready(dev_priv, dport, 0x0);
/* Second common lane will stay alive on its own now */
- if (dport->release_cl2_override) {
- chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
- dport->release_cl2_override = false;
- }
+ chv_phy_release_cl2_override(encoder);
}
static void intel_hdmi_destroy(struct drm_connector *connector)
@@ -2106,6 +1756,8 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_hdmi_set_property,
.atomic_get_property = intel_connector_atomic_get_property,
+ .late_register = intel_connector_register,
+ .early_unregister = intel_connector_unregister,
.destroy = intel_hdmi_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -2114,7 +1766,6 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
.get_modes = intel_hdmi_get_modes,
.mode_valid = intel_hdmi_mode_valid,
- .best_encoder = intel_best_encoder,
};
static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
@@ -2138,7 +1789,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = intel_encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_dig_port->port;
uint8_t alternate_ddc_pin;
@@ -2242,12 +1893,10 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
- intel_connector->unregister = intel_connector_unregister;
intel_hdmi_add_properties(intel_hdmi, connector);
intel_connector_attach_encoder(intel_connector, intel_encoder);
- drm_connector_register(connector);
intel_hdmi->attached_connector = intel_connector;
/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
@@ -2280,7 +1929,7 @@ void intel_hdmi_init(struct drm_device *dev,
intel_encoder = &intel_dig_port->base;
drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ DRM_MODE_ENCODER_TMDS, "HDMI %c", port_name(port));
intel_encoder->compute_config = intel_hdmi_compute_config;
if (HAS_PCH_SPLIT(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index 2c49458a9..f48957ea1 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -144,7 +144,7 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_connector *intel_connector;
struct intel_encoder *intel_encoder;
@@ -191,7 +191,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv),
hotplug.reenable_work.work);
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct drm_mode_config *mode_config = &dev->mode_config;
int i;
@@ -220,7 +220,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
}
}
if (dev_priv->display.hpd_irq_setup)
- dev_priv->display.hpd_irq_setup(dev);
+ dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
intel_runtime_pm_put(dev_priv);
@@ -302,7 +302,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private, hotplug.hotplug_work);
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_connector *intel_connector;
struct intel_encoder *intel_encoder;
@@ -346,7 +346,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
/**
* intel_hpd_irq_handler - main hotplug irq handler
- * @dev: drm device
+ * @dev_priv: drm_i915_private
* @pin_mask: a mask of hpd pins that have triggered the irq
* @long_mask: a mask of hpd pins that may be long hpd pulses
*
@@ -360,10 +360,9 @@ static void i915_hotplug_work_func(struct work_struct *work)
* Here, we do hotplug irq storm detection and mitigation, and pass further
* processing to appropriate bottom halves.
*/
-void intel_hpd_irq_handler(struct drm_device *dev,
+void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
u32 pin_mask, u32 long_mask)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
int i;
enum port port;
bool storm_detected = false;
@@ -407,7 +406,7 @@ void intel_hpd_irq_handler(struct drm_device *dev,
* hotplug bits itself. So only WARN about unexpected
* interrupts on saner platforms.
*/
- WARN_ONCE(!HAS_GMCH_DISPLAY(dev),
+ WARN_ONCE(!HAS_GMCH_DISPLAY(dev_priv),
"Received HPD interrupt on pin %d although disabled\n", i);
continue;
}
@@ -427,7 +426,7 @@ void intel_hpd_irq_handler(struct drm_device *dev,
}
if (storm_detected)
- dev_priv->display.hpd_irq_setup(dev);
+ dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock(&dev_priv->irq_lock);
/*
@@ -474,7 +473,7 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
*/
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display.hpd_irq_setup)
- dev_priv->display.hpd_irq_setup(dev_priv->dev);
+ dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -482,7 +481,7 @@ void i915_hpd_poll_init_work(struct work_struct *work) {
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private,
hotplug.poll_init_work);
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
bool enabled;
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 81de23098..1f266d7df 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -113,7 +113,7 @@ to_intel_gmbus(struct i2c_adapter *i2c)
void
intel_i2c_reset(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
I915_WRITE(GMBUS0, 0);
I915_WRITE(GMBUS4, 0);
@@ -138,7 +138,7 @@ static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
static u32 get_reserved(struct intel_gmbus *bus)
{
struct drm_i915_private *dev_priv = bus->dev_priv;
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
u32 reserved = 0;
/* On most chips, these bits must be preserved in software. */
@@ -212,7 +212,7 @@ intel_gpio_pre_xfer(struct i2c_adapter *adapter)
adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
- intel_i2c_reset(dev_priv->dev);
+ intel_i2c_reset(&dev_priv->drm);
intel_i2c_quirk_set(dev_priv, true);
set_data(bus, 1);
set_clock(bus, 1);
@@ -298,15 +298,16 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
{
int ret;
-#define C ((I915_READ_NOTRACE(GMBUS2) & GMBUS_ACTIVE) == 0)
-
if (!HAS_GMBUS_IRQ(dev_priv))
- return wait_for(C, 10);
+ return intel_wait_for_register(dev_priv,
+ GMBUS2, GMBUS_ACTIVE, 0,
+ 10);
/* Important: The hw handles only the first bit, so set only one! */
I915_WRITE(GMBUS4, GMBUS_IDLE_EN);
- ret = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
+ ret = wait_event_timeout(dev_priv->gmbus_wait_queue,
+ (I915_READ_NOTRACE(GMBUS2) & GMBUS_ACTIVE) == 0,
msecs_to_jiffies_timeout(10));
I915_WRITE(GMBUS4, 0);
@@ -315,7 +316,6 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
return 0;
else
return -ETIMEDOUT;
-#undef C
}
static int
@@ -632,7 +632,7 @@ static const struct i2c_algorithm gmbus_algorithm = {
*/
int intel_setup_gmbus(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_gmbus *bus;
unsigned int pin;
int ret;
@@ -688,7 +688,7 @@ int intel_setup_gmbus(struct drm_device *dev)
goto err;
}
- intel_i2c_reset(dev_priv->dev);
+ intel_i2c_reset(&dev_priv->drm);
return 0;
@@ -736,7 +736,7 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
void intel_teardown_gmbus(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_gmbus *bus;
unsigned int pin;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 7f2d8415e..414ddda43 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -208,31 +208,27 @@
} while (0)
enum {
- ADVANCED_CONTEXT = 0,
- LEGACY_32B_CONTEXT,
- ADVANCED_AD_CONTEXT,
- LEGACY_64B_CONTEXT
-};
-#define GEN8_CTX_ADDRESSING_MODE_SHIFT 3
-#define GEN8_CTX_ADDRESSING_MODE(dev) (USES_FULL_48BIT_PPGTT(dev) ?\
- LEGACY_64B_CONTEXT :\
- LEGACY_32B_CONTEXT)
-enum {
FAULT_AND_HANG = 0,
FAULT_AND_HALT, /* Debug only */
FAULT_AND_STREAM,
FAULT_AND_CONTINUE /* Unsupported */
};
#define GEN8_CTX_ID_SHIFT 32
+#define GEN8_CTX_ID_WIDTH 21
#define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
#define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26
-static int intel_lr_context_pin(struct intel_context *ctx,
+/* Typical size of the average request (2 pipecontrols and a MI_BB) */
+#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
+
+static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine);
+static int intel_lr_context_pin(struct i915_gem_context *ctx,
struct intel_engine_cs *engine);
/**
* intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
- * @dev: DRM device.
+ * @dev_priv: i915 device private
* @enable_execlists: value of i915.enable_execlists module parameter.
*
* Only certain platforms support Execlists (the prerequisites being
@@ -240,23 +236,22 @@ static int intel_lr_context_pin(struct intel_context *ctx,
*
* Return: 1 if Execlists is supported and has to be enabled.
*/
-int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists)
+int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enable_execlists)
{
- WARN_ON(i915.enable_ppgtt == -1);
-
/* On platforms with execlist available, vGPU will only
* support execlist mode, no ring buffer mode.
*/
- if (HAS_LOGICAL_RING_CONTEXTS(dev) && intel_vgpu_active(dev))
+ if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && intel_vgpu_active(dev_priv))
return 1;
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
return 1;
if (enable_execlists == 0)
return 0;
- if (HAS_LOGICAL_RING_CONTEXTS(dev) && USES_PPGTT(dev) &&
+ if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) &&
+ USES_PPGTT(dev_priv) &&
i915.use_mmio_flip >= 0)
return 1;
@@ -266,19 +261,17 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists
static void
logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
+ struct drm_i915_private *dev_priv = engine->i915;
- if (IS_GEN8(dev) || IS_GEN9(dev))
+ if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv))
engine->idle_lite_restore_wa = ~0;
- engine->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
- IS_BXT_REVID(dev, 0, BXT_REVID_A1)) &&
+ engine->disable_lite_restore_wa = (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
+ IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) &&
(engine->id == VCS || engine->id == VCS2);
engine->ctx_desc_template = GEN8_CTX_VALID;
- engine->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) <<
- GEN8_CTX_ADDRESSING_MODE_SHIFT;
- if (IS_GEN8(dev))
+ if (IS_GEN8(dev_priv))
engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
@@ -297,7 +290,7 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
* descriptor for a pinned context
*
* @ctx: Context to work on
- * @ring: Engine the descriptor will be used with
+ * @engine: Engine the descriptor will be used with
*
* The context descriptor encodes various attributes of a context,
* including its GTT address and some flags. Because it's fairly
@@ -305,62 +298,42 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
* which remains valid until the context is unpinned.
*
* This is what a descriptor looks like, from LSB to MSB:
- * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template)
+ * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template)
* bits 12-31: LRCA, GTT address of (the HWSP of) this context
- * bits 32-51: ctx ID, a globally unique tag (the LRCA again!)
- * bits 52-63: reserved, may encode the engine ID (for GuC)
+ * bits 32-52: ctx ID, a globally unique tag
+ * bits 53-54: mbz, reserved for use by hardware
+ * bits 55-63: group ID, currently unused and set to 0
*/
static void
-intel_lr_context_descriptor_update(struct intel_context *ctx,
+intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
- uint64_t lrca, desc;
+ struct intel_context *ce = &ctx->engine[engine->id];
+ u64 desc;
- lrca = ctx->engine[engine->id].lrc_vma->node.start +
- LRC_PPHWSP_PN * PAGE_SIZE;
+ BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH));
- desc = engine->ctx_desc_template; /* bits 0-11 */
- desc |= lrca; /* bits 12-31 */
- desc |= (lrca >> PAGE_SHIFT) << GEN8_CTX_ID_SHIFT; /* bits 32-51 */
+ desc = ctx->desc_template; /* bits 3-4 */
+ desc |= engine->ctx_desc_template; /* bits 0-11 */
+ desc |= ce->lrc_vma->node.start + LRC_PPHWSP_PN * PAGE_SIZE;
+ /* bits 12-31 */
+ desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */
- ctx->engine[engine->id].lrc_desc = desc;
+ ce->lrc_desc = desc;
}
-uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
+uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
return ctx->engine[engine->id].lrc_desc;
}
-/**
- * intel_execlists_ctx_id() - get the Execlists Context ID
- * @ctx: Context to get the ID for
- * @ring: Engine to get the ID for
- *
- * Do not confuse with ctx->id! Unfortunately we have a name overload
- * here: the old context ID we pass to userspace as a handler so that
- * they can refer to a context, and the new context ID we pass to the
- * ELSP so that the GPU can inform us of the context status via
- * interrupts.
- *
- * The context ID is a portion of the context descriptor, so we can
- * just extract the required part from the cached descriptor.
- *
- * Return: 20-bits globally unique context ID.
- */
-u32 intel_execlists_ctx_id(struct intel_context *ctx,
- struct intel_engine_cs *engine)
-{
- return intel_lr_context_descriptor(ctx, engine) >> GEN8_CTX_ID_SHIFT;
-}
-
static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
struct drm_i915_gem_request *rq1)
{
struct intel_engine_cs *engine = rq0->engine;
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = rq0->i915;
uint64_t desc[2];
if (rq1) {
@@ -431,6 +404,20 @@ static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
spin_unlock_irq(&dev_priv->uncore.lock);
}
+static inline void execlists_context_status_change(
+ struct drm_i915_gem_request *rq,
+ unsigned long status)
+{
+ /*
+ * Only used when GVT-g is enabled now. When GVT-g is disabled,
+ * The compiler should eliminate this function as dead-code.
+ */
+ if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
+ return;
+
+ atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq);
+}
+
static void execlists_context_unqueue(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *req0 = NULL, *req1 = NULL;
@@ -442,7 +429,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine)
* If irqs are not active generate a warning as batches that finish
* without the irqs may get lost and a GPU Hang may occur.
*/
- WARN_ON(!intel_irqs_enabled(engine->dev->dev_private));
+ WARN_ON(!intel_irqs_enabled(engine->i915));
/* Try to read in pairs */
list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue,
@@ -453,10 +440,24 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine)
/* Same ctx: ignore first request, as second request
* will update tail past first request's workload */
cursor->elsp_submitted = req0->elsp_submitted;
- list_move_tail(&req0->execlist_link,
- &engine->execlist_retired_req_list);
+ list_del(&req0->execlist_link);
+ i915_gem_request_unreference(req0);
req0 = cursor;
} else {
+ if (IS_ENABLED(CONFIG_DRM_I915_GVT)) {
+ /*
+ * req0 (after merged) ctx requires single
+ * submission, stop picking
+ */
+ if (req0->ctx->execlists_force_single_submission)
+ break;
+ /*
+ * req0 ctx doesn't require single submission,
+ * but next req ctx requires, stop picking
+ */
+ if (cursor->ctx->execlists_force_single_submission)
+ break;
+ }
req1 = cursor;
WARN_ON(req1->elsp_submitted);
break;
@@ -466,6 +467,12 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine)
if (unlikely(!req0))
return;
+ execlists_context_status_change(req0, INTEL_CONTEXT_SCHEDULE_IN);
+
+ if (req1)
+ execlists_context_status_change(req1,
+ INTEL_CONTEXT_SCHEDULE_IN);
+
if (req0->elsp_submitted & engine->idle_lite_restore_wa) {
/*
* WaIdleLiteRestore: make sure we never cause a lite restore
@@ -486,7 +493,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine)
}
static unsigned int
-execlists_check_remove_request(struct intel_engine_cs *engine, u32 request_id)
+execlists_check_remove_request(struct intel_engine_cs *engine, u32 ctx_id)
{
struct drm_i915_gem_request *head_req;
@@ -496,19 +503,18 @@ execlists_check_remove_request(struct intel_engine_cs *engine, u32 request_id)
struct drm_i915_gem_request,
execlist_link);
- if (!head_req)
- return 0;
-
- if (unlikely(intel_execlists_ctx_id(head_req->ctx, engine) != request_id))
- return 0;
+ if (WARN_ON(!head_req || (head_req->ctx_hw_id != ctx_id)))
+ return 0;
WARN(head_req->elsp_submitted == 0, "Never submitted head request\n");
if (--head_req->elsp_submitted > 0)
return 0;
- list_move_tail(&head_req->execlist_link,
- &engine->execlist_retired_req_list);
+ execlists_context_status_change(head_req, INTEL_CONTEXT_SCHEDULE_OUT);
+
+ list_del(&head_req->execlist_link);
+ i915_gem_request_unreference(head_req);
return 1;
}
@@ -517,7 +523,7 @@ static u32
get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
u32 *context_id)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
u32 status;
read_pointer %= GEN8_CSB_ENTRIES;
@@ -535,7 +541,7 @@ get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
/**
* intel_lrc_irq_handler() - handle Context Switch interrupts
- * @engine: Engine Command Streamer to handle.
+ * @data: tasklet handler passed in unsigned long
*
* Check the unread Context Status Buffers and manage the submission of new
* contexts to the ELSP accordingly.
@@ -543,7 +549,7 @@ get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
static void intel_lrc_irq_handler(unsigned long data)
{
struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
u32 status_pointer;
unsigned int read_pointer, write_pointer;
u32 csb[GEN8_CSB_ENTRIES][2];
@@ -612,11 +618,6 @@ static void execlists_context_queue(struct drm_i915_gem_request *request)
struct drm_i915_gem_request *cursor;
int num_elements = 0;
- if (request->ctx != request->i915->kernel_context)
- intel_lr_context_pin(request->ctx, engine);
-
- i915_gem_request_reference(request);
-
spin_lock_bh(&engine->execlist_lock);
list_for_each_entry(cursor, &engine->execlist_queue, execlist_link)
@@ -633,12 +634,14 @@ static void execlists_context_queue(struct drm_i915_gem_request *request)
if (request->ctx == tail_req->ctx) {
WARN(tail_req->elsp_submitted != 0,
"More than 2 already-submitted reqs queued\n");
- list_move_tail(&tail_req->execlist_link,
- &engine->execlist_retired_req_list);
+ list_del(&tail_req->execlist_link);
+ i915_gem_request_unreference(tail_req);
}
}
+ i915_gem_request_reference(request);
list_add_tail(&request->execlist_link, &engine->execlist_queue);
+ request->ctx_hw_id = request->ctx->hw_id;
if (num_elements == 0)
execlists_context_unqueue(engine);
@@ -698,9 +701,23 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
{
- int ret = 0;
+ struct intel_engine_cs *engine = request->engine;
+ struct intel_context *ce = &request->ctx->engine[engine->id];
+ int ret;
- request->ringbuf = request->ctx->engine[request->engine->id].ringbuf;
+ /* Flush enough space to reduce the likelihood of waiting after
+ * we start building the request - in which case we will just
+ * have to repeat work.
+ */
+ request->reserved_space += EXECLISTS_REQUEST_SIZE;
+
+ if (!ce->state) {
+ ret = execlists_context_deferred_alloc(request->ctx, engine);
+ if (ret)
+ return ret;
+ }
+
+ request->ringbuf = ce->ringbuf;
if (i915.enable_guc_submission) {
/*
@@ -708,16 +725,39 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
* going any further, as the i915_add_request() call
* later on mustn't fail ...
*/
- struct intel_guc *guc = &request->i915->guc;
-
- ret = i915_guc_wq_check_space(guc->execbuf_client);
+ ret = i915_guc_wq_check_space(request);
if (ret)
return ret;
}
- if (request->ctx != request->i915->kernel_context)
- ret = intel_lr_context_pin(request->ctx, request->engine);
+ ret = intel_lr_context_pin(request->ctx, engine);
+ if (ret)
+ return ret;
+
+ ret = intel_ring_begin(request, 0);
+ if (ret)
+ goto err_unpin;
+
+ if (!ce->initialised) {
+ ret = engine->init_context(request);
+ if (ret)
+ goto err_unpin;
+
+ ce->initialised = true;
+ }
+
+ /* Note that after this point, we have committed to using
+ * this request as it is being used to both track the
+ * state of engine initialisation and liveness of the
+ * golden renderstate above. Think twice before you try
+ * to cancel/unwind this request now.
+ */
+ request->reserved_space -= EXECLISTS_REQUEST_SIZE;
+ return 0;
+
+err_unpin:
+ intel_lr_context_unpin(request->ctx, engine);
return ret;
}
@@ -734,7 +774,6 @@ static int
intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
{
struct intel_ringbuffer *ringbuf = request->ringbuf;
- struct drm_i915_private *dev_priv = request->i915;
struct intel_engine_cs *engine = request->engine;
intel_logical_ring_advance(ringbuf);
@@ -750,54 +789,28 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
intel_logical_ring_emit(ringbuf, MI_NOOP);
intel_logical_ring_advance(ringbuf);
- if (intel_engine_stopped(engine))
- return 0;
-
- if (engine->last_context != request->ctx) {
- if (engine->last_context)
- intel_lr_context_unpin(engine->last_context, engine);
- if (request->ctx != request->i915->kernel_context) {
- intel_lr_context_pin(request->ctx, engine);
- engine->last_context = request->ctx;
- } else {
- engine->last_context = NULL;
- }
- }
+ /* We keep the previous context alive until we retire the following
+ * request. This ensures that any the context object is still pinned
+ * for any residual writes the HW makes into it on the context switch
+ * into the next object following the breadcrumb. Otherwise, we may
+ * retire the context too early.
+ */
+ request->previous_context = engine->last_context;
+ engine->last_context = request->ctx;
- if (dev_priv->guc.execbuf_client)
- i915_guc_submit(dev_priv->guc.execbuf_client, request);
+ if (i915.enable_guc_submission)
+ i915_guc_submit(request);
else
execlists_context_queue(request);
return 0;
}
-int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request)
-{
- /*
- * The first call merely notes the reserve request and is common for
- * all back ends. The subsequent localised _begin() call actually
- * ensures that the reservation is available. Without the begin, if
- * the request creator immediately submitted the request without
- * adding any commands to it then there might not actually be
- * sufficient room for the submission commands.
- */
- intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
-
- return intel_ring_begin(request, 0);
-}
-
/**
* execlists_submission() - submit a batchbuffer for execution, Execlists style
- * @dev: DRM device.
- * @file: DRM file.
- * @ring: Engine Command Streamer to submit to.
- * @ctx: Context to employ for this submission.
+ * @params: execbuffer call parameters.
* @args: execbuffer call arguments.
* @vmas: list of vmas.
- * @batch_obj: the batchbuffer to submit.
- * @exec_start: batchbuffer start virtual address pointer.
- * @dispatch_flags: translated execbuffer call flags.
*
* This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts
* away the submission details of the execbuffer ioctl call.
@@ -810,7 +823,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
{
struct drm_device *dev = params->dev;
struct intel_engine_cs *engine = params->engine;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf;
u64 exec_start;
int instp_mode;
@@ -881,28 +894,18 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
return 0;
}
-void intel_execlists_retire_requests(struct intel_engine_cs *engine)
+void intel_execlists_cancel_requests(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *req, *tmp;
- struct list_head retired_list;
+ LIST_HEAD(cancel_list);
- WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex));
- if (list_empty(&engine->execlist_retired_req_list))
- return;
+ WARN_ON(!mutex_is_locked(&engine->i915->drm.struct_mutex));
- INIT_LIST_HEAD(&retired_list);
spin_lock_bh(&engine->execlist_lock);
- list_replace_init(&engine->execlist_retired_req_list, &retired_list);
+ list_replace_init(&engine->execlist_queue, &cancel_list);
spin_unlock_bh(&engine->execlist_lock);
- list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
- struct intel_context *ctx = req->ctx;
- struct drm_i915_gem_object *ctx_obj =
- ctx->engine[engine->id].state;
-
- if (ctx_obj && (ctx != req->i915->kernel_context))
- intel_lr_context_unpin(ctx, engine);
-
+ list_for_each_entry_safe(req, tmp, &cancel_list, execlist_link) {
list_del(&req->execlist_link);
i915_gem_request_unreference(req);
}
@@ -910,7 +913,7 @@ void intel_execlists_retire_requests(struct intel_engine_cs *engine)
void intel_logical_ring_stop(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
int ret;
if (!intel_engine_initialized(engine))
@@ -923,7 +926,10 @@ void intel_logical_ring_stop(struct intel_engine_cs *engine)
/* TODO: Is this correct with Execlists enabled? */
I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
- if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) {
+ if (intel_wait_for_register(dev_priv,
+ RING_MI_MODE(engine->mmio_base),
+ MODE_IDLE, MODE_IDLE,
+ 1000)) {
DRM_ERROR("%s :timed out trying to stop ring\n", engine->name);
return;
}
@@ -946,25 +952,26 @@ int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
return 0;
}
-static int intel_lr_context_do_pin(struct intel_context *ctx,
- struct intel_engine_cs *engine)
+static int intel_lr_context_pin(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
- struct intel_ringbuffer *ringbuf = ctx->engine[engine->id].ringbuf;
+ struct drm_i915_private *dev_priv = ctx->i915;
+ struct intel_context *ce = &ctx->engine[engine->id];
void *vaddr;
u32 *lrc_reg_state;
int ret;
- WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex));
+ lockdep_assert_held(&ctx->i915->drm.struct_mutex);
- ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
- PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
+ if (ce->pin_count++)
+ return 0;
+
+ ret = i915_gem_obj_ggtt_pin(ce->state, GEN8_LR_CONTEXT_ALIGN,
+ PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
if (ret)
- return ret;
+ goto err;
- vaddr = i915_gem_object_pin_map(ctx_obj);
+ vaddr = i915_gem_object_pin_map(ce->state);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
goto unpin_ctx_obj;
@@ -972,65 +979,54 @@ static int intel_lr_context_do_pin(struct intel_context *ctx,
lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
- ret = intel_pin_and_map_ringbuffer_obj(engine->dev, ringbuf);
+ ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ce->ringbuf);
if (ret)
goto unpin_map;
- ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
+ i915_gem_context_reference(ctx);
+ ce->lrc_vma = i915_gem_obj_to_ggtt(ce->state);
intel_lr_context_descriptor_update(ctx, engine);
- lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start;
- ctx->engine[engine->id].lrc_reg_state = lrc_reg_state;
- ctx_obj->dirty = true;
+
+ lrc_reg_state[CTX_RING_BUFFER_START+1] = ce->ringbuf->vma->node.start;
+ ce->lrc_reg_state = lrc_reg_state;
+ ce->state->dirty = true;
/* Invalidate GuC TLB. */
if (i915.enable_guc_submission)
I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
- return ret;
+ return 0;
unpin_map:
- i915_gem_object_unpin_map(ctx_obj);
+ i915_gem_object_unpin_map(ce->state);
unpin_ctx_obj:
- i915_gem_object_ggtt_unpin(ctx_obj);
-
+ i915_gem_object_ggtt_unpin(ce->state);
+err:
+ ce->pin_count = 0;
return ret;
}
-static int intel_lr_context_pin(struct intel_context *ctx,
- struct intel_engine_cs *engine)
+void intel_lr_context_unpin(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
{
- int ret = 0;
+ struct intel_context *ce = &ctx->engine[engine->id];
- if (ctx->engine[engine->id].pin_count++ == 0) {
- ret = intel_lr_context_do_pin(ctx, engine);
- if (ret)
- goto reset_pin_count;
+ lockdep_assert_held(&ctx->i915->drm.struct_mutex);
+ GEM_BUG_ON(ce->pin_count == 0);
- i915_gem_context_reference(ctx);
- }
- return ret;
+ if (--ce->pin_count)
+ return;
-reset_pin_count:
- ctx->engine[engine->id].pin_count = 0;
- return ret;
-}
+ intel_unpin_ringbuffer_obj(ce->ringbuf);
-void intel_lr_context_unpin(struct intel_context *ctx,
- struct intel_engine_cs *engine)
-{
- struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
+ i915_gem_object_unpin_map(ce->state);
+ i915_gem_object_ggtt_unpin(ce->state);
- WARN_ON(!mutex_is_locked(&ctx->i915->dev->struct_mutex));
- if (--ctx->engine[engine->id].pin_count == 0) {
- i915_gem_object_unpin_map(ctx_obj);
- intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf);
- i915_gem_object_ggtt_unpin(ctx_obj);
- ctx->engine[engine->id].lrc_vma = NULL;
- ctx->engine[engine->id].lrc_desc = 0;
- ctx->engine[engine->id].lrc_reg_state = NULL;
+ ce->lrc_vma = NULL;
+ ce->lrc_desc = 0;
+ ce->lrc_reg_state = NULL;
- i915_gem_context_unreference(ctx);
- }
+ i915_gem_context_unreference(ctx);
}
static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
@@ -1038,9 +1034,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
int ret, i;
struct intel_engine_cs *engine = req->engine;
struct intel_ringbuffer *ringbuf = req->ringbuf;
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_workarounds *w = &dev_priv->workarounds;
+ struct i915_workarounds *w = &req->i915->workarounds;
if (w->count == 0)
return 0;
@@ -1103,7 +1097,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
uint32_t *const batch,
uint32_t index)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
/*
@@ -1165,7 +1159,7 @@ static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
/**
* gen8_init_indirectctx_bb() - initialize indirect ctx batch with WA
*
- * @ring: only applicable for RCS
+ * @engine: only applicable for RCS
* @wa_ctx: structure representing wa_ctx
* offset: specifies start of the batch, should be cache-aligned. This is updated
* with the offset value received as input.
@@ -1202,7 +1196,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
/* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
- if (IS_BROADWELL(engine->dev)) {
+ if (IS_BROADWELL(engine->i915)) {
int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index);
if (rc < 0)
return rc;
@@ -1239,7 +1233,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
/**
* gen8_init_perctx_bb() - initialize per ctx batch with WA
*
- * @ring: only applicable for RCS
+ * @engine: only applicable for RCS
* @wa_ctx: structure representing wa_ctx
* offset: specifies start of the batch, should be cache-aligned.
* size: size of the batch in DWORDS but HW expects in terms of cachelines
@@ -1274,13 +1268,12 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
uint32_t *offset)
{
int ret;
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
/* WaDisableCtxRestoreArbitration:skl,bxt */
- if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
- IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+ if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0) ||
+ IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
@@ -1305,6 +1298,31 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
wa_ctx_emit(batch, index, 0);
wa_ctx_emit(batch, index, 0);
}
+
+ /* WaMediaPoolStateCmdInWABB:bxt */
+ if (HAS_POOLED_EU(engine->i915)) {
+ /*
+ * EU pool configuration is setup along with golden context
+ * during context initialization. This value depends on
+ * device type (2x6 or 3x6) and needs to be updated based
+ * on which subslice is disabled especially for 2x6
+ * devices, however it is safe to load default
+ * configuration of 3x6 device instead of masking off
+ * corresponding bits because HW ignores bits of a disabled
+ * subslice and drops down to appropriate config. Please
+ * see render_state_setup() in i915_gem_render_state.c for
+ * possible configurations, to avoid duplication they are
+ * not shown here again.
+ */
+ u32 eu_pool_config = 0x00777000;
+ wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_STATE);
+ wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_ENABLE);
+ wa_ctx_emit(batch, index, eu_pool_config);
+ wa_ctx_emit(batch, index, 0);
+ wa_ctx_emit(batch, index, 0);
+ wa_ctx_emit(batch, index, 0);
+ }
+
/* Pad to end of cacheline */
while (index % CACHELINE_DWORDS)
wa_ctx_emit(batch, index, MI_NOOP);
@@ -1317,12 +1335,11 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
uint32_t *const batch,
uint32_t *offset)
{
- struct drm_device *dev = engine->dev;
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
- if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
- IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+ if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_B0) ||
+ IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
wa_ctx_emit(batch, index,
@@ -1331,7 +1348,7 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
}
/* WaClearTdlStateAckDirtyBits:bxt */
- if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) {
+ if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_B0)) {
wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4));
wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK);
@@ -1350,8 +1367,8 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
}
/* WaDisableCtxRestoreArbitration:skl,bxt */
- if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
- IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+ if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
+ IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
@@ -1363,11 +1380,13 @@ static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
{
int ret;
- engine->wa_ctx.obj = i915_gem_alloc_object(engine->dev,
- PAGE_ALIGN(size));
- if (!engine->wa_ctx.obj) {
+ engine->wa_ctx.obj = i915_gem_object_create(&engine->i915->drm,
+ PAGE_ALIGN(size));
+ if (IS_ERR(engine->wa_ctx.obj)) {
DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
- return -ENOMEM;
+ ret = PTR_ERR(engine->wa_ctx.obj);
+ engine->wa_ctx.obj = NULL;
+ return ret;
}
ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0);
@@ -1401,9 +1420,9 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
WARN_ON(engine->id != RCS);
/* update this when WA for higher Gen are added */
- if (INTEL_INFO(engine->dev)->gen > 9) {
+ if (INTEL_GEN(engine->i915) > 9) {
DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
- INTEL_INFO(engine->dev)->gen);
+ INTEL_GEN(engine->i915));
return 0;
}
@@ -1423,7 +1442,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
batch = kmap_atomic(page);
offset = 0;
- if (INTEL_INFO(engine->dev)->gen == 8) {
+ if (IS_GEN8(engine->i915)) {
ret = gen8_init_indirectctx_bb(engine,
&wa_ctx->indirect_ctx,
batch,
@@ -1437,7 +1456,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
&offset);
if (ret)
goto out;
- } else if (INTEL_INFO(engine->dev)->gen == 9) {
+ } else if (IS_GEN9(engine->i915)) {
ret = gen9_init_indirectctx_bb(engine,
&wa_ctx->indirect_ctx,
batch,
@@ -1463,7 +1482,7 @@ out:
static void lrc_init_hws(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
I915_WRITE(RING_HWS_PGA(engine->mmio_base),
(u32)engine->status_page.gfx_addr);
@@ -1472,8 +1491,7 @@ static void lrc_init_hws(struct intel_engine_cs *engine)
static int gen8_init_common_ring(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
unsigned int next_context_status_buffer_hw;
lrc_init_hws(engine);
@@ -1520,8 +1538,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
static int gen8_init_render_ring(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
int ret;
ret = gen8_init_common_ring(engine);
@@ -1598,7 +1615,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
if (req->ctx->ppgtt &&
(intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) {
if (!USES_FULL_48BIT_PPGTT(req->i915) &&
- !intel_vgpu_active(req->i915->dev)) {
+ !intel_vgpu_active(req->i915)) {
ret = intel_logical_ring_emit_pdps(req);
if (ret)
return ret;
@@ -1624,38 +1641,18 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
return 0;
}
-static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine)
+static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long flags;
-
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
- return false;
-
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (engine->irq_refcount++ == 0) {
- I915_WRITE_IMR(engine,
- ~(engine->irq_enable_mask | engine->irq_keep_mask));
- POSTING_READ(RING_IMR(engine->mmio_base));
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
- return true;
+ struct drm_i915_private *dev_priv = engine->i915;
+ I915_WRITE_IMR(engine,
+ ~(engine->irq_enable_mask | engine->irq_keep_mask));
+ POSTING_READ_FW(RING_IMR(engine->mmio_base));
}
-static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine)
+static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long flags;
-
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--engine->irq_refcount == 0) {
- I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
- POSTING_READ(RING_IMR(engine->mmio_base));
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ struct drm_i915_private *dev_priv = engine->i915;
+ I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
}
static int gen8_emit_flush(struct drm_i915_gem_request *request,
@@ -1664,8 +1661,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
{
struct intel_ringbuffer *ringbuf = request->ringbuf;
struct intel_engine_cs *engine = ringbuf->engine;
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = request->i915;
uint32_t cmd;
int ret;
@@ -1734,7 +1730,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
* On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
* pipe control.
*/
- if (IS_GEN9(engine->dev))
+ if (IS_GEN9(request->i915))
vf_flush_wa = true;
/* WaForGAMHang:kbl */
@@ -1793,16 +1789,6 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
return 0;
}
-static u32 gen8_get_seqno(struct intel_engine_cs *engine)
-{
- return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
-}
-
-static void gen8_set_seqno(struct intel_engine_cs *engine, u32 seqno)
-{
- intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
-}
-
static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
{
/*
@@ -1818,14 +1804,6 @@ static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
}
-static void bxt_a_set_seqno(struct intel_engine_cs *engine, u32 seqno)
-{
- intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
-
- /* See bxt_a_get_seqno() explaining the reason for the clflush. */
- intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
-}
-
/*
* Reserve space for 2 NOOPs at the end of each request to be
* used as a workaround for not being allowed to do lite
@@ -1833,11 +1811,6 @@ static void bxt_a_set_seqno(struct intel_engine_cs *engine, u32 seqno)
*/
#define WA_TAIL_DWORDS 2
-static inline u32 hws_seqno_address(struct intel_engine_cs *engine)
-{
- return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR;
-}
-
static int gen8_emit_request(struct drm_i915_gem_request *request)
{
struct intel_ringbuffer *ringbuf = request->ringbuf;
@@ -1853,10 +1826,10 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
intel_logical_ring_emit(ringbuf,
(MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
intel_logical_ring_emit(ringbuf,
- hws_seqno_address(request->engine) |
+ intel_hws_seqno_address(request->engine) |
MI_FLUSH_DW_USE_GTT);
intel_logical_ring_emit(ringbuf, 0);
- intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
+ intel_logical_ring_emit(ringbuf, request->seqno);
intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
intel_logical_ring_emit(ringbuf, MI_NOOP);
return intel_logical_ring_advance_and_submit(request);
@@ -1883,7 +1856,8 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
(PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_QW_WRITE));
- intel_logical_ring_emit(ringbuf, hws_seqno_address(request->engine));
+ intel_logical_ring_emit(ringbuf,
+ intel_hws_seqno_address(request->engine));
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
/* We're thrashing one dword of HWS. */
@@ -1945,7 +1919,7 @@ static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
/**
* intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
*
- * @ring: Engine Command Streamer.
+ * @engine: Engine Command Streamer.
*
*/
void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
@@ -1962,7 +1936,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state)))
tasklet_kill(&engine->irq_tasklet);
- dev_priv = engine->dev->dev_private;
+ dev_priv = engine->i915;
if (engine->buffer) {
intel_logical_ring_stop(engine);
@@ -1975,36 +1949,34 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
i915_cmd_parser_fini_ring(engine);
i915_gem_batch_pool_fini(&engine->batch_pool);
+ intel_engine_fini_breadcrumbs(engine);
+
if (engine->status_page.obj) {
i915_gem_object_unpin_map(engine->status_page.obj);
engine->status_page.obj = NULL;
}
+ intel_lr_context_unpin(dev_priv->kernel_context, engine);
engine->idle_lite_restore_wa = 0;
engine->disable_lite_restore_wa = false;
engine->ctx_desc_template = 0;
lrc_destroy_wa_ctx_obj(engine);
- engine->dev = NULL;
+ engine->i915 = NULL;
}
static void
-logical_ring_default_vfuncs(struct drm_device *dev,
- struct intel_engine_cs *engine)
+logical_ring_default_vfuncs(struct intel_engine_cs *engine)
{
/* Default vfuncs which can be overriden by each engine. */
engine->init_hw = gen8_init_common_ring;
engine->emit_request = gen8_emit_request;
engine->emit_flush = gen8_emit_flush;
- engine->irq_get = gen8_logical_ring_get_irq;
- engine->irq_put = gen8_logical_ring_put_irq;
+ engine->irq_enable = gen8_logical_ring_enable_irq;
+ engine->irq_disable = gen8_logical_ring_disable_irq;
engine->emit_bb_start = gen8_emit_bb_start;
- engine->get_seqno = gen8_get_seqno;
- engine->set_seqno = gen8_set_seqno;
- if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+ if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
engine->irq_seqno_barrier = bxt_a_seqno_barrier;
- engine->set_seqno = bxt_a_set_seqno;
- }
}
static inline void
@@ -2033,60 +2005,28 @@ lrc_setup_hws(struct intel_engine_cs *engine,
}
static int
-logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine)
+logical_ring_init(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_context *dctx = dev_priv->kernel_context;
- enum forcewake_domains fw_domains;
+ struct i915_gem_context *dctx = engine->i915->kernel_context;
int ret;
- /* Intentionally left blank. */
- engine->buffer = NULL;
-
- engine->dev = dev;
- INIT_LIST_HEAD(&engine->active_list);
- INIT_LIST_HEAD(&engine->request_list);
- i915_gem_batch_pool_init(dev, &engine->batch_pool);
- init_waitqueue_head(&engine->irq_queue);
-
- INIT_LIST_HEAD(&engine->buffers);
- INIT_LIST_HEAD(&engine->execlist_queue);
- INIT_LIST_HEAD(&engine->execlist_retired_req_list);
- spin_lock_init(&engine->execlist_lock);
-
- tasklet_init(&engine->irq_tasklet,
- intel_lrc_irq_handler, (unsigned long)engine);
-
- logical_ring_init_platform_invariants(engine);
-
- fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
- RING_ELSP(engine),
- FW_REG_WRITE);
-
- fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
- RING_CONTEXT_STATUS_PTR(engine),
- FW_REG_READ | FW_REG_WRITE);
-
- fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
- RING_CONTEXT_STATUS_BUF_BASE(engine),
- FW_REG_READ);
-
- engine->fw_domains = fw_domains;
+ ret = intel_engine_init_breadcrumbs(engine);
+ if (ret)
+ goto error;
ret = i915_cmd_parser_init_ring(engine);
if (ret)
goto error;
- ret = intel_lr_context_deferred_alloc(dctx, engine);
+ ret = execlists_context_deferred_alloc(dctx, engine);
if (ret)
goto error;
/* As this is the default context, always pin it */
- ret = intel_lr_context_do_pin(dctx, engine);
+ ret = intel_lr_context_pin(dctx, engine);
if (ret) {
- DRM_ERROR(
- "Failed to pin and map ringbuffer %s: %d\n",
- engine->name, ret);
+ DRM_ERROR("Failed to pin context for %s: %d\n",
+ engine->name, ret);
goto error;
}
@@ -2104,26 +2044,16 @@ error:
return ret;
}
-static int logical_render_ring_init(struct drm_device *dev)
+static int logical_render_ring_init(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *engine = &dev_priv->engine[RCS];
+ struct drm_i915_private *dev_priv = engine->i915;
int ret;
- engine->name = "render ring";
- engine->id = RCS;
- engine->exec_id = I915_EXEC_RENDER;
- engine->guc_id = GUC_RENDER_ENGINE;
- engine->mmio_base = RENDER_RING_BASE;
-
- logical_ring_default_irqs(engine, GEN8_RCS_IRQ_SHIFT);
- if (HAS_L3_DPF(dev))
+ if (HAS_L3_DPF(dev_priv))
engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
- logical_ring_default_vfuncs(dev, engine);
-
/* Override some for render ring. */
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
engine->init_hw = gen9_init_render_ring;
else
engine->init_hw = gen8_init_render_ring;
@@ -2132,9 +2062,7 @@ static int logical_render_ring_init(struct drm_device *dev)
engine->emit_flush = gen8_emit_flush_render;
engine->emit_request = gen8_emit_request_render;
- engine->dev = dev;
-
- ret = intel_init_pipe_control(engine);
+ ret = intel_init_pipe_control(engine, 4096);
if (ret)
return ret;
@@ -2149,7 +2077,7 @@ static int logical_render_ring_init(struct drm_device *dev)
ret);
}
- ret = logical_ring_init(dev, engine);
+ ret = logical_ring_init(engine);
if (ret) {
lrc_destroy_wa_ctx_obj(engine);
}
@@ -2157,133 +2085,164 @@ static int logical_render_ring_init(struct drm_device *dev)
return ret;
}
-static int logical_bsd_ring_init(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *engine = &dev_priv->engine[VCS];
-
- engine->name = "bsd ring";
- engine->id = VCS;
- engine->exec_id = I915_EXEC_BSD;
- engine->guc_id = GUC_VIDEO_ENGINE;
- engine->mmio_base = GEN6_BSD_RING_BASE;
-
- logical_ring_default_irqs(engine, GEN8_VCS1_IRQ_SHIFT);
- logical_ring_default_vfuncs(dev, engine);
-
- return logical_ring_init(dev, engine);
-}
+static const struct logical_ring_info {
+ const char *name;
+ unsigned exec_id;
+ unsigned guc_id;
+ u32 mmio_base;
+ unsigned irq_shift;
+ int (*init)(struct intel_engine_cs *engine);
+} logical_rings[] = {
+ [RCS] = {
+ .name = "render ring",
+ .exec_id = I915_EXEC_RENDER,
+ .guc_id = GUC_RENDER_ENGINE,
+ .mmio_base = RENDER_RING_BASE,
+ .irq_shift = GEN8_RCS_IRQ_SHIFT,
+ .init = logical_render_ring_init,
+ },
+ [BCS] = {
+ .name = "blitter ring",
+ .exec_id = I915_EXEC_BLT,
+ .guc_id = GUC_BLITTER_ENGINE,
+ .mmio_base = BLT_RING_BASE,
+ .irq_shift = GEN8_BCS_IRQ_SHIFT,
+ .init = logical_ring_init,
+ },
+ [VCS] = {
+ .name = "bsd ring",
+ .exec_id = I915_EXEC_BSD,
+ .guc_id = GUC_VIDEO_ENGINE,
+ .mmio_base = GEN6_BSD_RING_BASE,
+ .irq_shift = GEN8_VCS1_IRQ_SHIFT,
+ .init = logical_ring_init,
+ },
+ [VCS2] = {
+ .name = "bsd2 ring",
+ .exec_id = I915_EXEC_BSD,
+ .guc_id = GUC_VIDEO_ENGINE2,
+ .mmio_base = GEN8_BSD2_RING_BASE,
+ .irq_shift = GEN8_VCS2_IRQ_SHIFT,
+ .init = logical_ring_init,
+ },
+ [VECS] = {
+ .name = "video enhancement ring",
+ .exec_id = I915_EXEC_VEBOX,
+ .guc_id = GUC_VIDEOENHANCE_ENGINE,
+ .mmio_base = VEBOX_RING_BASE,
+ .irq_shift = GEN8_VECS_IRQ_SHIFT,
+ .init = logical_ring_init,
+ },
+};
-static int logical_bsd2_ring_init(struct drm_device *dev)
+static struct intel_engine_cs *
+logical_ring_setup(struct drm_i915_private *dev_priv, enum intel_engine_id id)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *engine = &dev_priv->engine[VCS2];
+ const struct logical_ring_info *info = &logical_rings[id];
+ struct intel_engine_cs *engine = &dev_priv->engine[id];
+ enum forcewake_domains fw_domains;
- engine->name = "bsd2 ring";
- engine->id = VCS2;
- engine->exec_id = I915_EXEC_BSD;
- engine->guc_id = GUC_VIDEO_ENGINE2;
- engine->mmio_base = GEN8_BSD2_RING_BASE;
+ engine->id = id;
+ engine->name = info->name;
+ engine->exec_id = info->exec_id;
+ engine->guc_id = info->guc_id;
+ engine->mmio_base = info->mmio_base;
- logical_ring_default_irqs(engine, GEN8_VCS2_IRQ_SHIFT);
- logical_ring_default_vfuncs(dev, engine);
+ engine->i915 = dev_priv;
- return logical_ring_init(dev, engine);
-}
+ /* Intentionally left blank. */
+ engine->buffer = NULL;
-static int logical_blt_ring_init(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *engine = &dev_priv->engine[BCS];
+ fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
+ RING_ELSP(engine),
+ FW_REG_WRITE);
- engine->name = "blitter ring";
- engine->id = BCS;
- engine->exec_id = I915_EXEC_BLT;
- engine->guc_id = GUC_BLITTER_ENGINE;
- engine->mmio_base = BLT_RING_BASE;
+ fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
+ RING_CONTEXT_STATUS_PTR(engine),
+ FW_REG_READ | FW_REG_WRITE);
- logical_ring_default_irqs(engine, GEN8_BCS_IRQ_SHIFT);
- logical_ring_default_vfuncs(dev, engine);
+ fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
+ RING_CONTEXT_STATUS_BUF_BASE(engine),
+ FW_REG_READ);
- return logical_ring_init(dev, engine);
-}
+ engine->fw_domains = fw_domains;
-static int logical_vebox_ring_init(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *engine = &dev_priv->engine[VECS];
+ INIT_LIST_HEAD(&engine->active_list);
+ INIT_LIST_HEAD(&engine->request_list);
+ INIT_LIST_HEAD(&engine->buffers);
+ INIT_LIST_HEAD(&engine->execlist_queue);
+ spin_lock_init(&engine->execlist_lock);
- engine->name = "video enhancement ring";
- engine->id = VECS;
- engine->exec_id = I915_EXEC_VEBOX;
- engine->guc_id = GUC_VIDEOENHANCE_ENGINE;
- engine->mmio_base = VEBOX_RING_BASE;
+ tasklet_init(&engine->irq_tasklet,
+ intel_lrc_irq_handler, (unsigned long)engine);
- logical_ring_default_irqs(engine, GEN8_VECS_IRQ_SHIFT);
- logical_ring_default_vfuncs(dev, engine);
+ logical_ring_init_platform_invariants(engine);
+ logical_ring_default_vfuncs(engine);
+ logical_ring_default_irqs(engine, info->irq_shift);
+
+ intel_engine_init_hangcheck(engine);
+ i915_gem_batch_pool_init(&dev_priv->drm, &engine->batch_pool);
- return logical_ring_init(dev, engine);
+ return engine;
}
/**
* intel_logical_rings_init() - allocate, populate and init the Engine Command Streamers
* @dev: DRM device.
*
- * This function inits the engines for an Execlists submission style (the equivalent in the
- * legacy ringbuffer submission world would be i915_gem_init_engines). It does it only for
- * those engines that are present in the hardware.
+ * This function inits the engines for an Execlists submission style (the
+ * equivalent in the legacy ringbuffer submission world would be
+ * i915_gem_init_engines). It does it only for those engines that are present in
+ * the hardware.
*
* Return: non-zero if the initialization failed.
*/
int intel_logical_rings_init(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ unsigned int mask = 0;
+ unsigned int i;
int ret;
- ret = logical_render_ring_init(dev);
- if (ret)
- return ret;
+ WARN_ON(INTEL_INFO(dev_priv)->ring_mask &
+ GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));
- if (HAS_BSD(dev)) {
- ret = logical_bsd_ring_init(dev);
- if (ret)
- goto cleanup_render_ring;
- }
+ for (i = 0; i < ARRAY_SIZE(logical_rings); i++) {
+ if (!HAS_ENGINE(dev_priv, i))
+ continue;
- if (HAS_BLT(dev)) {
- ret = logical_blt_ring_init(dev);
- if (ret)
- goto cleanup_bsd_ring;
- }
+ if (!logical_rings[i].init)
+ continue;
- if (HAS_VEBOX(dev)) {
- ret = logical_vebox_ring_init(dev);
+ ret = logical_rings[i].init(logical_ring_setup(dev_priv, i));
if (ret)
- goto cleanup_blt_ring;
+ goto cleanup;
+
+ mask |= ENGINE_MASK(i);
}
- if (HAS_BSD2(dev)) {
- ret = logical_bsd2_ring_init(dev);
- if (ret)
- goto cleanup_vebox_ring;
+ /*
+ * Catch failures to update logical_rings table when the new engines
+ * are added to the driver by a warning and disabling the forgotten
+ * engines.
+ */
+ if (WARN_ON(mask != INTEL_INFO(dev_priv)->ring_mask)) {
+ struct intel_device_info *info =
+ (struct intel_device_info *)&dev_priv->info;
+ info->ring_mask = mask;
}
return 0;
-cleanup_vebox_ring:
- intel_logical_ring_cleanup(&dev_priv->engine[VECS]);
-cleanup_blt_ring:
- intel_logical_ring_cleanup(&dev_priv->engine[BCS]);
-cleanup_bsd_ring:
- intel_logical_ring_cleanup(&dev_priv->engine[VCS]);
-cleanup_render_ring:
- intel_logical_ring_cleanup(&dev_priv->engine[RCS]);
+cleanup:
+ for (i = 0; i < I915_NUM_ENGINES; i++)
+ intel_logical_ring_cleanup(&dev_priv->engine[i]);
return ret;
}
static u32
-make_rpcs(struct drm_device *dev)
+make_rpcs(struct drm_i915_private *dev_priv)
{
u32 rpcs = 0;
@@ -2291,7 +2250,7 @@ make_rpcs(struct drm_device *dev)
* No explicit RPCS request is needed to ensure full
* slice/subslice/EU enablement prior to Gen9.
*/
- if (INTEL_INFO(dev)->gen < 9)
+ if (INTEL_GEN(dev_priv) < 9)
return 0;
/*
@@ -2300,24 +2259,24 @@ make_rpcs(struct drm_device *dev)
* must make an explicit request through RPCS for full
* enablement.
*/
- if (INTEL_INFO(dev)->has_slice_pg) {
+ if (INTEL_INFO(dev_priv)->has_slice_pg) {
rpcs |= GEN8_RPCS_S_CNT_ENABLE;
- rpcs |= INTEL_INFO(dev)->slice_total <<
+ rpcs |= INTEL_INFO(dev_priv)->slice_total <<
GEN8_RPCS_S_CNT_SHIFT;
rpcs |= GEN8_RPCS_ENABLE;
}
- if (INTEL_INFO(dev)->has_subslice_pg) {
+ if (INTEL_INFO(dev_priv)->has_subslice_pg) {
rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
- rpcs |= INTEL_INFO(dev)->subslice_per_slice <<
+ rpcs |= INTEL_INFO(dev_priv)->subslice_per_slice <<
GEN8_RPCS_SS_CNT_SHIFT;
rpcs |= GEN8_RPCS_ENABLE;
}
- if (INTEL_INFO(dev)->has_eu_pg) {
- rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
+ if (INTEL_INFO(dev_priv)->has_eu_pg) {
+ rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
GEN8_RPCS_EU_MIN_SHIFT;
- rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
+ rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
GEN8_RPCS_EU_MAX_SHIFT;
rpcs |= GEN8_RPCS_ENABLE;
}
@@ -2329,9 +2288,9 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
{
u32 indirect_ctx_offset;
- switch (INTEL_INFO(engine->dev)->gen) {
+ switch (INTEL_GEN(engine->i915)) {
default:
- MISSING_CASE(INTEL_INFO(engine->dev)->gen);
+ MISSING_CASE(INTEL_GEN(engine->i915));
/* fall through */
case 9:
indirect_ctx_offset =
@@ -2347,13 +2306,12 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
}
static int
-populate_lr_context(struct intel_context *ctx,
+populate_lr_context(struct i915_gem_context *ctx,
struct drm_i915_gem_object *ctx_obj,
struct intel_engine_cs *engine,
struct intel_ringbuffer *ringbuf)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = ctx->i915;
struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
void *vaddr;
u32 *reg_state;
@@ -2391,7 +2349,7 @@ populate_lr_context(struct intel_context *ctx,
RING_CONTEXT_CONTROL(engine),
_MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
- (HAS_RESOURCE_STREAMER(dev) ?
+ (HAS_RESOURCE_STREAMER(dev_priv) ?
CTX_CTRL_RS_CTX_ENABLE : 0)));
ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
0);
@@ -2480,7 +2438,7 @@ populate_lr_context(struct intel_context *ctx,
if (engine->id == RCS) {
reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
- make_rpcs(dev));
+ make_rpcs(dev_priv));
}
i915_gem_object_unpin_map(ctx_obj);
@@ -2489,39 +2447,8 @@ populate_lr_context(struct intel_context *ctx,
}
/**
- * intel_lr_context_free() - free the LRC specific bits of a context
- * @ctx: the LR context to free.
- *
- * The real context freeing is done in i915_gem_context_free: this only
- * takes care of the bits that are LRC related: the per-engine backing
- * objects and the logical ringbuffer.
- */
-void intel_lr_context_free(struct intel_context *ctx)
-{
- int i;
-
- for (i = I915_NUM_ENGINES; --i >= 0; ) {
- struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
- struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
-
- if (!ctx_obj)
- continue;
-
- if (ctx == ctx->i915->kernel_context) {
- intel_unpin_ringbuffer_obj(ringbuf);
- i915_gem_object_ggtt_unpin(ctx_obj);
- i915_gem_object_unpin_map(ctx_obj);
- }
-
- WARN_ON(ctx->engine[i].pin_count);
- intel_ringbuffer_free(ringbuf);
- drm_gem_object_unreference(&ctx_obj->base);
- }
-}
-
-/**
* intel_lr_context_size() - return the size of the context for an engine
- * @ring: which engine to find the context size for
+ * @engine: which engine to find the context size for
*
* Each engine may require a different amount of space for a context image,
* so when allocating (or copying) an image, this function can be used to
@@ -2537,11 +2464,11 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
{
int ret = 0;
- WARN_ON(INTEL_INFO(engine->dev)->gen < 8);
+ WARN_ON(INTEL_GEN(engine->i915) < 8);
switch (engine->id) {
case RCS:
- if (INTEL_INFO(engine->dev)->gen >= 9)
+ if (INTEL_GEN(engine->i915) >= 9)
ret = GEN9_LR_CONTEXT_RENDER_SIZE;
else
ret = GEN8_LR_CONTEXT_RENDER_SIZE;
@@ -2558,9 +2485,9 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
}
/**
- * intel_lr_context_deferred_alloc() - create the LRC specific bits of a context
+ * execlists_context_deferred_alloc() - create the LRC specific bits of a context
* @ctx: LR context to create.
- * @ring: engine to be used with the context.
+ * @engine: engine to be used with the context.
*
* This function can be called more than once, with different engines, if we plan
* to use the context with them. The context backing objects and the ringbuffers
@@ -2570,31 +2497,29 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
*
* Return: non-zero on error.
*/
-
-int intel_lr_context_deferred_alloc(struct intel_context *ctx,
- struct intel_engine_cs *engine)
+static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
struct drm_i915_gem_object *ctx_obj;
+ struct intel_context *ce = &ctx->engine[engine->id];
uint32_t context_size;
struct intel_ringbuffer *ringbuf;
int ret;
- WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
- WARN_ON(ctx->engine[engine->id].state);
+ WARN_ON(ce->state);
context_size = round_up(intel_lr_context_size(engine), 4096);
/* One extra page as the sharing data between driver and GuC */
context_size += PAGE_SIZE * LRC_PPHWSP_PN;
- ctx_obj = i915_gem_alloc_object(dev, context_size);
- if (!ctx_obj) {
+ ctx_obj = i915_gem_object_create(&ctx->i915->drm, context_size);
+ if (IS_ERR(ctx_obj)) {
DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
- return -ENOMEM;
+ return PTR_ERR(ctx_obj);
}
- ringbuf = intel_engine_create_ringbuffer(engine, 4 * PAGE_SIZE);
+ ringbuf = intel_engine_create_ringbuffer(engine, ctx->ring_size);
if (IS_ERR(ringbuf)) {
ret = PTR_ERR(ringbuf);
goto error_deref_obj;
@@ -2606,48 +2531,29 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
goto error_ringbuf;
}
- ctx->engine[engine->id].ringbuf = ringbuf;
- ctx->engine[engine->id].state = ctx_obj;
+ ce->ringbuf = ringbuf;
+ ce->state = ctx_obj;
+ ce->initialised = engine->init_context == NULL;
- if (ctx != ctx->i915->kernel_context && engine->init_context) {
- struct drm_i915_gem_request *req;
-
- req = i915_gem_request_alloc(engine, ctx);
- if (IS_ERR(req)) {
- ret = PTR_ERR(req);
- DRM_ERROR("ring create req: %d\n", ret);
- goto error_ringbuf;
- }
-
- ret = engine->init_context(req);
- i915_add_request_no_flush(req);
- if (ret) {
- DRM_ERROR("ring init context: %d\n",
- ret);
- goto error_ringbuf;
- }
- }
return 0;
error_ringbuf:
intel_ringbuffer_free(ringbuf);
error_deref_obj:
drm_gem_object_unreference(&ctx_obj->base);
- ctx->engine[engine->id].ringbuf = NULL;
- ctx->engine[engine->id].state = NULL;
+ ce->ringbuf = NULL;
+ ce->state = NULL;
return ret;
}
void intel_lr_context_reset(struct drm_i915_private *dev_priv,
- struct intel_context *ctx)
+ struct i915_gem_context *ctx)
{
struct intel_engine_cs *engine;
for_each_engine(engine, dev_priv) {
- struct drm_i915_gem_object *ctx_obj =
- ctx->engine[engine->id].state;
- struct intel_ringbuffer *ringbuf =
- ctx->engine[engine->id].ringbuf;
+ struct intel_context *ce = &ctx->engine[engine->id];
+ struct drm_i915_gem_object *ctx_obj = ce->state;
void *vaddr;
uint32_t *reg_state;
@@ -2666,7 +2572,7 @@ void intel_lr_context_reset(struct drm_i915_private *dev_priv,
i915_gem_object_unpin_map(ctx_obj);
- ringbuf->head = 0;
- ringbuf->tail = 0;
+ ce->ringbuf->head = 0;
+ ce->ringbuf->tail = 0;
}
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 60a7385bc..2b8255c19 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -57,6 +57,11 @@
#define GEN8_CSB_READ_PTR(csb_status) \
(((csb_status) & GEN8_CSB_READ_PTR_MASK) >> 8)
+enum {
+ INTEL_CONTEXT_SCHEDULE_IN = 0,
+ INTEL_CONTEXT_SCHEDULE_OUT,
+};
+
/* Logical Rings */
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request);
int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request);
@@ -99,30 +104,27 @@ static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf,
#define LRC_PPHWSP_PN (LRC_GUCSHR_PN + 1)
#define LRC_STATE_PN (LRC_PPHWSP_PN + 1)
-void intel_lr_context_free(struct intel_context *ctx);
+struct i915_gem_context;
+
uint32_t intel_lr_context_size(struct intel_engine_cs *engine);
-int intel_lr_context_deferred_alloc(struct intel_context *ctx,
- struct intel_engine_cs *engine);
-void intel_lr_context_unpin(struct intel_context *ctx,
+void intel_lr_context_unpin(struct i915_gem_context *ctx,
struct intel_engine_cs *engine);
struct drm_i915_private;
void intel_lr_context_reset(struct drm_i915_private *dev_priv,
- struct intel_context *ctx);
-uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
+ struct i915_gem_context *ctx);
+uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
struct intel_engine_cs *engine);
-u32 intel_execlists_ctx_id(struct intel_context *ctx,
- struct intel_engine_cs *engine);
-
/* Execlists */
-int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
+int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
+ int enable_execlists);
struct i915_execbuffer_params;
int intel_execlists_submission(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas);
-void intel_execlists_retire_requests(struct intel_engine_cs *engine);
+void intel_execlists_cancel_requests(struct intel_engine_cs *engine);
#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 96281e628..495504704 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -72,7 +72,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
enum intel_display_power_domain power_domain;
u32 tmp;
@@ -106,7 +106,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
u32 tmp, flags = 0;
@@ -140,7 +140,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
{
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
int pipe = crtc->pipe;
@@ -184,13 +184,13 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
* panels behave in the two modes. For now, let's just maintain the
* value we got from the BIOS.
*/
- temp &= ~LVDS_A3_POWER_MASK;
- temp |= lvds_encoder->a3_power;
+ temp &= ~LVDS_A3_POWER_MASK;
+ temp |= lvds_encoder->a3_power;
/* Set the dithering flag on LVDS as needed, note that there is no
* special lvds dither control bit on pch-split platforms, dithering is
* only controlled through the PIPECONF reg. */
- if (INTEL_INFO(dev)->gen == 4) {
+ if (IS_GEN4(dev_priv)) {
/* Bspec wording suggests that LVDS port dithering only exists
* for 18bpp panels. */
if (crtc->config->dither && crtc->config->pipe_bpp == 18)
@@ -216,7 +216,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct intel_connector *intel_connector =
&lvds_encoder->attached_connector->base;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t ctl_reg, stat_reg;
if (HAS_PCH_SPLIT(dev)) {
@@ -231,7 +231,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
POSTING_READ(lvds_encoder->reg);
- if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000))
+ if (intel_wait_for_register(dev_priv, stat_reg, PP_ON, PP_ON, 1000))
DRM_ERROR("timed out waiting for panel to power on\n");
intel_panel_enable_backlight(intel_connector);
@@ -241,7 +241,7 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t ctl_reg, stat_reg;
if (HAS_PCH_SPLIT(dev)) {
@@ -253,7 +253,7 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
}
I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
- if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
+ if (intel_wait_for_register(dev_priv, stat_reg, PP_ON, 0, 1000))
DRM_ERROR("timed out waiting for panel to power off\n");
I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN);
@@ -442,7 +442,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
container_of(nb, struct intel_lvds_connector, lid_notifier);
struct drm_connector *connector = &lvds_connector->base.base;
struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
return NOTIFY_OK;
@@ -547,7 +547,6 @@ static int intel_lvds_set_property(struct drm_connector *connector,
static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
.get_modes = intel_lvds_get_modes,
.mode_valid = intel_lvds_mode_valid,
- .best_encoder = intel_best_encoder,
};
static const struct drm_connector_funcs intel_lvds_connector_funcs = {
@@ -556,6 +555,8 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_lvds_set_property,
.atomic_get_property = intel_connector_atomic_get_property,
+ .late_register = intel_connector_register,
+ .early_unregister = intel_connector_unregister,
.destroy = intel_lvds_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -810,27 +811,29 @@ static const struct dmi_system_id intel_dual_link_lvds[] = {
{ } /* terminating entry */
};
-bool intel_is_dual_link_lvds(struct drm_device *dev)
+struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev)
{
- struct intel_encoder *encoder;
- struct intel_lvds_encoder *lvds_encoder;
+ struct intel_encoder *intel_encoder;
- for_each_intel_encoder(dev, encoder) {
- if (encoder->type == INTEL_OUTPUT_LVDS) {
- lvds_encoder = to_lvds_encoder(&encoder->base);
+ for_each_intel_encoder(dev, intel_encoder)
+ if (intel_encoder->type == INTEL_OUTPUT_LVDS)
+ return intel_encoder;
- return lvds_encoder->is_dual_link;
- }
- }
+ return NULL;
+}
- return false;
+bool intel_is_dual_link_lvds(struct drm_device *dev)
+{
+ struct intel_encoder *encoder = intel_get_lvds_encoder(dev);
+
+ return encoder && to_lvds_encoder(&encoder->base)->is_dual_link;
}
static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
{
struct drm_device *dev = lvds_encoder->base.base.dev;
unsigned int val;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
/* use the module option value if specified */
if (i915.lvds_channel_mode > 0)
@@ -880,7 +883,7 @@ static bool intel_lvds_supported(struct drm_device *dev)
*/
void intel_lvds_init(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_lvds_encoder *lvds_encoder;
struct intel_encoder *intel_encoder;
struct intel_lvds_connector *lvds_connector;
@@ -978,7 +981,7 @@ void intel_lvds_init(struct drm_device *dev)
DRM_MODE_CONNECTOR_LVDS);
drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
+ DRM_MODE_ENCODER_LVDS, "LVDS");
intel_encoder->enable = intel_enable_lvds;
intel_encoder->pre_enable = intel_pre_enable_lvds;
@@ -992,7 +995,6 @@ void intel_lvds_init(struct drm_device *dev)
intel_encoder->get_hw_state = intel_lvds_get_hw_state;
intel_encoder->get_config = intel_lvds_get_config;
intel_connector->get_hw_state = intel_connector_get_hw_state;
- intel_connector->unregister = intel_connector_unregister;
intel_connector_attach_encoder(intel_connector, intel_encoder);
intel_encoder->type = INTEL_OUTPUT_LVDS;
@@ -1119,6 +1121,7 @@ out:
mutex_unlock(&dev->mode_config.mutex);
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
+ intel_panel_setup_backlight(connector, INVALID_PIPE);
lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
@@ -1131,9 +1134,6 @@ out:
DRM_DEBUG_KMS("lid notifier registration failed\n");
lvds_connector->lid_notifier.notifier_call = NULL;
}
- drm_connector_register(connector);
-
- intel_panel_setup_backlight(connector, INVALID_PIPE);
return;
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index 6ba4bf7f2..927825f5b 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -66,9 +66,10 @@ struct drm_i915_mocs_table {
#define L3_WB 3
/* Target cache */
-#define ELLC 0
-#define LLC 1
-#define LLC_ELLC 2
+#define LE_TC_PAGETABLE 0
+#define LE_TC_LLC 1
+#define LE_TC_LLC_ELLC 2
+#define LE_TC_LLC_ELLC_ALT 3
/*
* MOCS tables
@@ -96,34 +97,67 @@ struct drm_i915_mocs_table {
* end.
*/
static const struct drm_i915_mocs_entry skylake_mocs_table[] = {
- /* { 0x00000009, 0x0010 } */
- { (LE_CACHEABILITY(LE_UC) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(0) |
- LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
- (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC)) },
- /* { 0x00000038, 0x0030 } */
- { (LE_CACHEABILITY(LE_PAGETABLE) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
- LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
- (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) },
- /* { 0x0000003b, 0x0030 } */
- { (LE_CACHEABILITY(LE_WB) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
- LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
- (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) }
+ { /* 0x00000009 */
+ .control_value = LE_CACHEABILITY(LE_UC) |
+ LE_TGT_CACHE(LE_TC_LLC_ELLC) |
+ LE_LRUM(0) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
+ LE_PFM(0) | LE_SCF(0),
+
+ /* 0x0010 */
+ .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC),
+ },
+ {
+ /* 0x00000038 */
+ .control_value = LE_CACHEABILITY(LE_PAGETABLE) |
+ LE_TGT_CACHE(LE_TC_LLC_ELLC) |
+ LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
+ LE_PFM(0) | LE_SCF(0),
+ /* 0x0030 */
+ .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB),
+ },
+ {
+ /* 0x0000003b */
+ .control_value = LE_CACHEABILITY(LE_WB) |
+ LE_TGT_CACHE(LE_TC_LLC_ELLC) |
+ LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
+ LE_PFM(0) | LE_SCF(0),
+ /* 0x0030 */
+ .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB),
+ },
};
/* NOTE: the LE_TGT_CACHE is not used on Broxton */
static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
- /* { 0x00000009, 0x0010 } */
- { (LE_CACHEABILITY(LE_UC) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(0) |
- LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
- (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC)) },
- /* { 0x00000038, 0x0030 } */
- { (LE_CACHEABILITY(LE_PAGETABLE) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
- LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
- (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) },
- /* { 0x0000003b, 0x0030 } */
- { (LE_CACHEABILITY(LE_WB) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
- LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
- (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) }
+ {
+ /* 0x00000009 */
+ .control_value = LE_CACHEABILITY(LE_UC) |
+ LE_TGT_CACHE(LE_TC_LLC_ELLC) |
+ LE_LRUM(0) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
+ LE_PFM(0) | LE_SCF(0),
+
+ /* 0x0010 */
+ .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC),
+ },
+ {
+ /* 0x00000038 */
+ .control_value = LE_CACHEABILITY(LE_PAGETABLE) |
+ LE_TGT_CACHE(LE_TC_LLC_ELLC) |
+ LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
+ LE_PFM(0) | LE_SCF(0),
+
+ /* 0x0030 */
+ .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB),
+ },
+ {
+ /* 0x00000039 */
+ .control_value = LE_CACHEABILITY(LE_UC) |
+ LE_TGT_CACHE(LE_TC_LLC_ELLC) |
+ LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
+ LE_PFM(0) | LE_SCF(0),
+
+ /* 0x0030 */
+ .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB),
+ },
};
/**
@@ -156,6 +190,16 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv,
"Platform that should have a MOCS table does not.\n");
}
+ /* WaDisableSkipCaching:skl,bxt,kbl */
+ if (IS_GEN9(dev_priv)) {
+ int i;
+
+ for (i = 0; i < table->size; i++)
+ if (WARN_ON(table->table[i].l3cc_value &
+ (L3_ESC(1) | L3_SCC(0x7))))
+ return false;
+ }
+
return result;
}
@@ -189,7 +233,7 @@ static i915_reg_t mocs_register(enum intel_engine_id ring, int index)
*/
int intel_mocs_init_engine(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = to_i915(engine->dev);
+ struct drm_i915_private *dev_priv = engine->i915;
struct drm_i915_mocs_table table;
unsigned int index;
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index 38a4c8ce7..f2584d0a0 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -82,7 +82,7 @@ void
intel_attach_force_audio_property(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_property *prop;
prop = dev_priv->force_audio_property;
@@ -109,7 +109,7 @@ void
intel_attach_broadcast_rgb_property(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_property *prop;
prop = dev_priv->broadcast_rgb_property;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 72842aafd..7acbbbf97 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -232,18 +232,36 @@ struct opregion_asle_ext {
#define SWSCI_SBCB_POST_VBE_PM SWSCI_FUNCTION_CODE(SWSCI_SBCB, 19)
#define SWSCI_SBCB_ENABLE_DISABLE_AUDIO SWSCI_FUNCTION_CODE(SWSCI_SBCB, 21)
-#define ACPI_OTHER_OUTPUT (0<<8)
-#define ACPI_VGA_OUTPUT (1<<8)
-#define ACPI_TV_OUTPUT (2<<8)
-#define ACPI_DIGITAL_OUTPUT (3<<8)
-#define ACPI_LVDS_OUTPUT (4<<8)
+/*
+ * ACPI Specification, Revision 5.0, Appendix B.3.2 _DOD (Enumerate All Devices
+ * Attached to the Display Adapter).
+ */
+#define ACPI_DISPLAY_INDEX_SHIFT 0
+#define ACPI_DISPLAY_INDEX_MASK (0xf << 0)
+#define ACPI_DISPLAY_PORT_ATTACHMENT_SHIFT 4
+#define ACPI_DISPLAY_PORT_ATTACHMENT_MASK (0xf << 4)
+#define ACPI_DISPLAY_TYPE_SHIFT 8
+#define ACPI_DISPLAY_TYPE_MASK (0xf << 8)
+#define ACPI_DISPLAY_TYPE_OTHER (0 << 8)
+#define ACPI_DISPLAY_TYPE_VGA (1 << 8)
+#define ACPI_DISPLAY_TYPE_TV (2 << 8)
+#define ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL (3 << 8)
+#define ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL (4 << 8)
+#define ACPI_VENDOR_SPECIFIC_SHIFT 12
+#define ACPI_VENDOR_SPECIFIC_MASK (0xf << 12)
+#define ACPI_BIOS_CAN_DETECT (1 << 16)
+#define ACPI_DEPENDS_ON_VGA (1 << 17)
+#define ACPI_PIPE_ID_SHIFT 18
+#define ACPI_PIPE_ID_MASK (7 << 18)
+#define ACPI_DEVICE_ID_SCHEME (1 << 31)
#define MAX_DSLP 1500
-static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
+static int swsci(struct drm_i915_private *dev_priv,
+ u32 function, u32 parm, u32 *parm_out)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct opregion_swsci *swsci = dev_priv->opregion.swsci;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
u32 main_function, sub_function, scic;
u16 swsci_val;
u32 dslp;
@@ -293,16 +311,16 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
swsci->scic = scic;
/* Ensure SCI event is selected and event trigger is cleared. */
- pci_read_config_word(dev->pdev, SWSCI, &swsci_val);
+ pci_read_config_word(pdev, SWSCI, &swsci_val);
if (!(swsci_val & SWSCI_SCISEL) || (swsci_val & SWSCI_GSSCIE)) {
swsci_val |= SWSCI_SCISEL;
swsci_val &= ~SWSCI_GSSCIE;
- pci_write_config_word(dev->pdev, SWSCI, swsci_val);
+ pci_write_config_word(pdev, SWSCI, swsci_val);
}
/* Use event trigger to tell bios to check the mail. */
swsci_val |= SWSCI_GSSCIE;
- pci_write_config_word(dev->pdev, SWSCI, swsci_val);
+ pci_write_config_word(pdev, SWSCI, swsci_val);
/* Poll for the result. */
#define C (((scic = swsci->scic) & SWSCI_SCIC_INDICATOR) == 0)
@@ -336,13 +354,13 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
bool enable)
{
- struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
u32 parm = 0;
u32 type = 0;
u32 port;
/* don't care about old stuff for now */
- if (!HAS_DDI(dev))
+ if (!HAS_DDI(dev_priv))
return 0;
if (intel_encoder->type == INTEL_OUTPUT_DSI)
@@ -365,7 +383,7 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
type = DISPLAY_TYPE_CRT;
break;
case INTEL_OUTPUT_UNKNOWN:
- case INTEL_OUTPUT_DISPLAYPORT:
+ case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_HDMI:
case INTEL_OUTPUT_DP_MST:
type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL;
@@ -382,7 +400,7 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
parm |= type << (16 + port * 3);
- return swsci(dev, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL);
+ return swsci(dev_priv, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL);
}
static const struct {
@@ -396,27 +414,28 @@ static const struct {
{ PCI_D3cold, 0x04 },
};
-int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
+int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
+ pci_power_t state)
{
int i;
- if (!HAS_DDI(dev))
+ if (!HAS_DDI(dev_priv))
return 0;
for (i = 0; i < ARRAY_SIZE(power_state_map); i++) {
if (state == power_state_map[i].pci_power_state)
- return swsci(dev, SWSCI_SBCB_ADAPTER_POWER_STATE,
+ return swsci(dev_priv, SWSCI_SBCB_ADAPTER_POWER_STATE,
power_state_map[i].parm, NULL);
}
return -EINVAL;
}
-static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
+static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_connector *connector;
struct opregion_asle *asle = dev_priv->opregion.asle;
+ struct drm_device *dev = &dev_priv->drm;
DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
@@ -449,7 +468,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
return 0;
}
-static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
+static u32 asle_set_als_illum(struct drm_i915_private *dev_priv, u32 alsi)
{
/* alsi is the current ALS reading in lux. 0 indicates below sensor
range, 0xffff indicates above sensor range. 1-0xfffe are valid */
@@ -457,13 +476,13 @@ static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
return ASLC_ALS_ILLUM_FAILED;
}
-static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
+static u32 asle_set_pwm_freq(struct drm_i915_private *dev_priv, u32 pfmb)
{
DRM_DEBUG_DRIVER("PWM freq is not supported\n");
return ASLC_PWM_FREQ_FAILED;
}
-static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
+static u32 asle_set_pfit(struct drm_i915_private *dev_priv, u32 pfit)
{
/* Panel fitting is currently controlled by the X code, so this is a
noop until modesetting support works fully */
@@ -471,13 +490,13 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
return ASLC_PFIT_FAILED;
}
-static u32 asle_set_supported_rotation_angles(struct drm_device *dev, u32 srot)
+static u32 asle_set_supported_rotation_angles(struct drm_i915_private *dev_priv, u32 srot)
{
DRM_DEBUG_DRIVER("SROT is not supported\n");
return ASLC_ROTATION_ANGLES_FAILED;
}
-static u32 asle_set_button_array(struct drm_device *dev, u32 iuer)
+static u32 asle_set_button_array(struct drm_i915_private *dev_priv, u32 iuer)
{
if (!iuer)
DRM_DEBUG_DRIVER("Button array event is not supported (nothing)\n");
@@ -495,7 +514,7 @@ static u32 asle_set_button_array(struct drm_device *dev, u32 iuer)
return ASLC_BUTTON_ARRAY_FAILED;
}
-static u32 asle_set_convertible(struct drm_device *dev, u32 iuer)
+static u32 asle_set_convertible(struct drm_i915_private *dev_priv, u32 iuer)
{
if (iuer & ASLE_IUER_CONVERTIBLE)
DRM_DEBUG_DRIVER("Convertible is not supported (clamshell)\n");
@@ -505,7 +524,7 @@ static u32 asle_set_convertible(struct drm_device *dev, u32 iuer)
return ASLC_CONVERTIBLE_FAILED;
}
-static u32 asle_set_docking(struct drm_device *dev, u32 iuer)
+static u32 asle_set_docking(struct drm_i915_private *dev_priv, u32 iuer)
{
if (iuer & ASLE_IUER_DOCKING)
DRM_DEBUG_DRIVER("Docking is not supported (docked)\n");
@@ -515,7 +534,7 @@ static u32 asle_set_docking(struct drm_device *dev, u32 iuer)
return ASLC_DOCKING_FAILED;
}
-static u32 asle_isct_state(struct drm_device *dev)
+static u32 asle_isct_state(struct drm_i915_private *dev_priv)
{
DRM_DEBUG_DRIVER("ISCT is not supported\n");
return ASLC_ISCT_STATE_FAILED;
@@ -527,7 +546,6 @@ static void asle_work(struct work_struct *work)
container_of(work, struct intel_opregion, asle_work);
struct drm_i915_private *dev_priv =
container_of(opregion, struct drm_i915_private, opregion);
- struct drm_device *dev = dev_priv->dev;
struct opregion_asle *asle = dev_priv->opregion.asle;
u32 aslc_stat = 0;
u32 aslc_req;
@@ -544,40 +562,38 @@ static void asle_work(struct work_struct *work)
}
if (aslc_req & ASLC_SET_ALS_ILLUM)
- aslc_stat |= asle_set_als_illum(dev, asle->alsi);
+ aslc_stat |= asle_set_als_illum(dev_priv, asle->alsi);
if (aslc_req & ASLC_SET_BACKLIGHT)
- aslc_stat |= asle_set_backlight(dev, asle->bclp);
+ aslc_stat |= asle_set_backlight(dev_priv, asle->bclp);
if (aslc_req & ASLC_SET_PFIT)
- aslc_stat |= asle_set_pfit(dev, asle->pfit);
+ aslc_stat |= asle_set_pfit(dev_priv, asle->pfit);
if (aslc_req & ASLC_SET_PWM_FREQ)
- aslc_stat |= asle_set_pwm_freq(dev, asle->pfmb);
+ aslc_stat |= asle_set_pwm_freq(dev_priv, asle->pfmb);
if (aslc_req & ASLC_SUPPORTED_ROTATION_ANGLES)
- aslc_stat |= asle_set_supported_rotation_angles(dev,
+ aslc_stat |= asle_set_supported_rotation_angles(dev_priv,
asle->srot);
if (aslc_req & ASLC_BUTTON_ARRAY)
- aslc_stat |= asle_set_button_array(dev, asle->iuer);
+ aslc_stat |= asle_set_button_array(dev_priv, asle->iuer);
if (aslc_req & ASLC_CONVERTIBLE_INDICATOR)
- aslc_stat |= asle_set_convertible(dev, asle->iuer);
+ aslc_stat |= asle_set_convertible(dev_priv, asle->iuer);
if (aslc_req & ASLC_DOCKING_INDICATOR)
- aslc_stat |= asle_set_docking(dev, asle->iuer);
+ aslc_stat |= asle_set_docking(dev_priv, asle->iuer);
if (aslc_req & ASLC_ISCT_STATE_CHANGE)
- aslc_stat |= asle_isct_state(dev);
+ aslc_stat |= asle_isct_state(dev_priv);
asle->aslc = aslc_stat;
}
-void intel_opregion_asle_intr(struct drm_device *dev)
+void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
if (dev_priv->opregion.asle)
schedule_work(&dev_priv->opregion.asle_work);
}
@@ -658,10 +674,51 @@ static void set_did(struct intel_opregion *opregion, int i, u32 val)
}
}
-static void intel_didl_outputs(struct drm_device *dev)
+static u32 acpi_display_type(struct drm_connector *connector)
+{
+ u32 display_type;
+
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_VGA:
+ case DRM_MODE_CONNECTOR_DVIA:
+ display_type = ACPI_DISPLAY_TYPE_VGA;
+ break;
+ case DRM_MODE_CONNECTOR_Composite:
+ case DRM_MODE_CONNECTOR_SVIDEO:
+ case DRM_MODE_CONNECTOR_Component:
+ case DRM_MODE_CONNECTOR_9PinDIN:
+ case DRM_MODE_CONNECTOR_TV:
+ display_type = ACPI_DISPLAY_TYPE_TV;
+ break;
+ case DRM_MODE_CONNECTOR_DVII:
+ case DRM_MODE_CONNECTOR_DVID:
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ case DRM_MODE_CONNECTOR_HDMIA:
+ case DRM_MODE_CONNECTOR_HDMIB:
+ display_type = ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL;
+ break;
+ case DRM_MODE_CONNECTOR_LVDS:
+ case DRM_MODE_CONNECTOR_eDP:
+ case DRM_MODE_CONNECTOR_DSI:
+ display_type = ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL;
+ break;
+ case DRM_MODE_CONNECTOR_Unknown:
+ case DRM_MODE_CONNECTOR_VIRTUAL:
+ display_type = ACPI_DISPLAY_TYPE_OTHER;
+ break;
+ default:
+ MISSING_CASE(connector->connector_type);
+ display_type = ACPI_DISPLAY_TYPE_OTHER;
+ break;
+ }
+
+ return display_type;
+}
+
+static void intel_didl_outputs(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
struct drm_connector *connector;
acpi_handle handle;
struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
@@ -670,7 +727,7 @@ static void intel_didl_outputs(struct drm_device *dev)
u32 temp, max_outputs;
int i = 0;
- handle = ACPI_HANDLE(&dev->pdev->dev);
+ handle = ACPI_HANDLE(&pdev->dev);
if (!handle || acpi_bus_get_device(handle, &acpi_dev))
return;
@@ -725,45 +782,25 @@ end:
blind_set:
i = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- int output_type = ACPI_OTHER_OUTPUT;
+ list_for_each_entry(connector,
+ &dev_priv->drm.mode_config.connector_list, head) {
+ int display_type = acpi_display_type(connector);
+
if (i >= max_outputs) {
DRM_DEBUG_KMS("More than %u outputs in connector list\n",
max_outputs);
return;
}
- switch (connector->connector_type) {
- case DRM_MODE_CONNECTOR_VGA:
- case DRM_MODE_CONNECTOR_DVIA:
- output_type = ACPI_VGA_OUTPUT;
- break;
- case DRM_MODE_CONNECTOR_Composite:
- case DRM_MODE_CONNECTOR_SVIDEO:
- case DRM_MODE_CONNECTOR_Component:
- case DRM_MODE_CONNECTOR_9PinDIN:
- output_type = ACPI_TV_OUTPUT;
- break;
- case DRM_MODE_CONNECTOR_DVII:
- case DRM_MODE_CONNECTOR_DVID:
- case DRM_MODE_CONNECTOR_DisplayPort:
- case DRM_MODE_CONNECTOR_HDMIA:
- case DRM_MODE_CONNECTOR_HDMIB:
- output_type = ACPI_DIGITAL_OUTPUT;
- break;
- case DRM_MODE_CONNECTOR_LVDS:
- output_type = ACPI_LVDS_OUTPUT;
- break;
- }
+
temp = get_did(opregion, i);
- set_did(opregion, i, temp | (1 << 31) | output_type | i);
+ set_did(opregion, i, temp | (1 << 31) | display_type | i);
i++;
}
goto end;
}
-static void intel_setup_cadls(struct drm_device *dev)
+static void intel_setup_cadls(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
int i = 0;
u32 disp_id;
@@ -780,17 +817,16 @@ static void intel_setup_cadls(struct drm_device *dev)
} while (++i < 8 && disp_id != 0);
}
-void intel_opregion_init(struct drm_device *dev)
+void intel_opregion_register(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
if (!opregion->header)
return;
if (opregion->acpi) {
- intel_didl_outputs(dev);
- intel_setup_cadls(dev);
+ intel_didl_outputs(dev_priv);
+ intel_setup_cadls(dev_priv);
/* Notify BIOS we are ready to handle ACPI video ext notifs.
* Right now, all the events are handled by the ACPI video module.
@@ -808,9 +844,8 @@ void intel_opregion_init(struct drm_device *dev)
}
}
-void intel_opregion_fini(struct drm_device *dev)
+void intel_opregion_unregister(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
if (!opregion->header)
@@ -842,9 +877,8 @@ void intel_opregion_fini(struct drm_device *dev)
opregion->lid_state = NULL;
}
-static void swsci_setup(struct drm_device *dev)
+static void swsci_setup(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
bool requested_callbacks = false;
u32 tmp;
@@ -854,7 +888,7 @@ static void swsci_setup(struct drm_device *dev)
opregion->swsci_sbcb_sub_functions = 1;
/* We use GBDA to ask for supported GBDA calls. */
- if (swsci(dev, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) {
+ if (swsci(dev_priv, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) {
/* make the bits match the sub-function codes */
tmp <<= 1;
opregion->swsci_gbda_sub_functions |= tmp;
@@ -865,7 +899,7 @@ static void swsci_setup(struct drm_device *dev)
* must not call interfaces that are not specifically requested by the
* bios.
*/
- if (swsci(dev, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) {
+ if (swsci(dev_priv, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) {
/* here, the bits already match sub-function codes */
opregion->swsci_sbcb_sub_functions |= tmp;
requested_callbacks = true;
@@ -876,7 +910,7 @@ static void swsci_setup(struct drm_device *dev)
* the callback is _requested_. But we still can't call interfaces that
* are not requested.
*/
- if (swsci(dev, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) {
+ if (swsci(dev_priv, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) {
/* make the bits match the sub-function codes */
u32 low = tmp & 0x7ff;
u32 high = tmp & ~0xfff; /* bit 11 is reserved */
@@ -918,10 +952,10 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = {
{ }
};
-int intel_opregion_setup(struct drm_device *dev)
+int intel_opregion_setup(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
u32 asls, mboxes;
char buf[sizeof(OPREGION_SIGNATURE)];
int err = 0;
@@ -933,7 +967,7 @@ int intel_opregion_setup(struct drm_device *dev)
BUILD_BUG_ON(sizeof(struct opregion_asle) != 0x100);
BUILD_BUG_ON(sizeof(struct opregion_asle_ext) != 0x400);
- pci_read_config_dword(dev->pdev, ASLS, &asls);
+ pci_read_config_dword(pdev, ASLS, &asls);
DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
if (asls == 0) {
DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n");
@@ -965,7 +999,7 @@ int intel_opregion_setup(struct drm_device *dev)
if (mboxes & MBOX_SWSCI) {
DRM_DEBUG_DRIVER("SWSCI supported\n");
opregion->swsci = base + OPREGION_SWSCI_OFFSET;
- swsci_setup(dev);
+ swsci_setup(dev_priv);
}
if (mboxes & MBOX_ASLE) {
@@ -1031,12 +1065,12 @@ static const struct dmi_system_id intel_use_opregion_panel_type[] = {
};
int
-intel_opregion_get_panel_type(struct drm_device *dev)
+intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
{
u32 panel_details;
int ret;
- ret = swsci(dev, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details);
+ ret = swsci(dev_priv, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details);
if (ret) {
DRM_DEBUG_KMS("Failed to get panel details from OpRegion (%d)\n",
ret);
@@ -1071,7 +1105,7 @@ intel_opregion_get_panel_type(struct drm_device *dev)
* vswing instead. Low vswing results in some display flickers, so
* let's simply ignore the OpRegion panel type on SKL for now.
*/
- if (IS_SKYLAKE(dev)) {
+ if (IS_SKYLAKE(dev_priv)) {
DRM_DEBUG_KMS("Ignoring OpRegion panel type (%d)\n", ret - 1);
return -ENODEV;
}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index bd38e49f7..3212d8806 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -168,7 +168,7 @@ struct overlay_registers {
};
struct intel_overlay {
- struct drm_device *dev;
+ struct drm_i915_private *i915;
struct intel_crtc *crtc;
struct drm_i915_gem_object *vid_bo;
struct drm_i915_gem_object *old_vid_bo;
@@ -190,15 +190,15 @@ struct intel_overlay {
static struct overlay_registers __iomem *
intel_overlay_map_regs(struct intel_overlay *overlay)
{
- struct drm_i915_private *dev_priv = to_i915(overlay->dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct drm_i915_private *dev_priv = overlay->i915;
struct overlay_registers __iomem *regs;
- if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+ if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
else
- regs = io_mapping_map_wc(ggtt->mappable,
- i915_gem_obj_ggtt_offset(overlay->reg_bo));
+ regs = io_mapping_map_wc(dev_priv->ggtt.mappable,
+ overlay->flip_addr,
+ PAGE_SIZE);
return regs;
}
@@ -206,7 +206,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
struct overlay_registers __iomem *regs)
{
- if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+ if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915))
io_mapping_unmap(regs);
}
@@ -232,14 +232,13 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
/* overlay needs to be disable in OCMD reg */
static int intel_overlay_on(struct intel_overlay *overlay)
{
- struct drm_device *dev = overlay->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = overlay->i915;
struct intel_engine_cs *engine = &dev_priv->engine[RCS];
struct drm_i915_gem_request *req;
int ret;
WARN_ON(overlay->active);
- WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
+ WARN_ON(IS_I830(dev_priv) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
req = i915_gem_request_alloc(engine, NULL);
if (IS_ERR(req))
@@ -266,8 +265,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
static int intel_overlay_continue(struct intel_overlay *overlay,
bool load_polyphase_filter)
{
- struct drm_device *dev = overlay->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = overlay->i915;
struct intel_engine_cs *engine = &dev_priv->engine[RCS];
struct drm_i915_gem_request *req;
u32 flip_addr = overlay->flip_addr;
@@ -335,8 +333,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
/* overlay needs to be disabled in OCMD reg */
static int intel_overlay_off(struct intel_overlay *overlay)
{
- struct drm_device *dev = overlay->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = overlay->i915;
struct intel_engine_cs *engine = &dev_priv->engine[RCS];
struct drm_i915_gem_request *req;
u32 flip_addr = overlay->flip_addr;
@@ -365,7 +362,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
intel_ring_emit(engine, flip_addr);
intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
/* turn overlay off */
- if (IS_I830(dev)) {
+ if (IS_I830(dev_priv)) {
/* Workaround: Don't disable the overlay fully, since otherwise
* it dies on the next OVERLAY_ON cmd. */
intel_ring_emit(engine, MI_NOOP);
@@ -408,12 +405,11 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
*/
static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
{
- struct drm_device *dev = overlay->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = overlay->i915;
struct intel_engine_cs *engine = &dev_priv->engine[RCS];
int ret;
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
/* Only wait if there is actually an old frame to release to
* guarantee forward progress.
@@ -537,10 +533,10 @@ static int uv_vsubsampling(u32 format)
}
}
-static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
+static u32 calc_swidthsw(struct drm_i915_private *dev_priv, u32 offset, u32 width)
{
u32 mask, shift, ret;
- if (IS_GEN2(dev)) {
+ if (IS_GEN2(dev_priv)) {
mask = 0x1f;
shift = 5;
} else {
@@ -548,7 +544,7 @@ static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
shift = 6;
}
ret = ((offset + width + mask) >> shift) - (offset >> shift);
- if (!IS_GEN2(dev))
+ if (!IS_GEN2(dev_priv))
ret <<= 1;
ret -= 1;
return ret << 2;
@@ -741,12 +737,12 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
int ret, tmp_width;
struct overlay_registers __iomem *regs;
bool scale_changed = false;
- struct drm_device *dev = overlay->dev;
+ struct drm_i915_private *dev_priv = overlay->i915;
u32 swidth, swidthsw, sheight, ostride;
enum pipe pipe = overlay->crtc->pipe;
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
ret = intel_overlay_release_old_vid(overlay);
if (ret != 0)
@@ -769,7 +765,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
goto out_unpin;
}
oconfig = OCONF_CC_OUT_8BIT;
- if (IS_GEN4(overlay->dev))
+ if (IS_GEN4(dev_priv))
oconfig |= OCONF_CSC_MODE_BT709;
oconfig |= pipe == 0 ?
OCONF_PIPE_A : OCONF_PIPE_B;
@@ -796,7 +792,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
tmp_width = params->src_w;
swidth = params->src_w;
- swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width);
+ swidthsw = calc_swidthsw(dev_priv, params->offset_Y, tmp_width);
sheight = params->src_h;
iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, &regs->OBUF_0Y);
ostride = params->stride_Y;
@@ -806,9 +802,9 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
int uv_vscale = uv_vsubsampling(params->format);
u32 tmp_U, tmp_V;
swidth |= (params->src_w/uv_hscale) << 16;
- tmp_U = calc_swidthsw(overlay->dev, params->offset_U,
+ tmp_U = calc_swidthsw(dev_priv, params->offset_U,
params->src_w/uv_hscale);
- tmp_V = calc_swidthsw(overlay->dev, params->offset_V,
+ tmp_V = calc_swidthsw(dev_priv, params->offset_V,
params->src_w/uv_hscale);
swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
sheight |= (params->src_h/uv_vscale) << 16;
@@ -840,7 +836,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
overlay->old_vid_bo = overlay->vid_bo;
overlay->vid_bo = new_bo;
- intel_frontbuffer_flip(dev,
+ intel_frontbuffer_flip(&dev_priv->drm,
INTEL_FRONTBUFFER_OVERLAY(pipe));
return 0;
@@ -852,12 +848,12 @@ out_unpin:
int intel_overlay_switch_off(struct intel_overlay *overlay)
{
+ struct drm_i915_private *dev_priv = overlay->i915;
struct overlay_registers __iomem *regs;
- struct drm_device *dev = overlay->dev;
int ret;
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
ret = intel_overlay_recover_from_interrupt(overlay);
if (ret != 0)
@@ -897,15 +893,14 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
{
- struct drm_device *dev = overlay->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = overlay->i915;
u32 pfit_control = I915_READ(PFIT_CONTROL);
u32 ratio;
/* XXX: This is not the same logic as in the xorg driver, but more in
* line with the intel documentation for the i965
*/
- if (INTEL_INFO(dev)->gen >= 4) {
+ if (INTEL_GEN(dev_priv) >= 4) {
/* on i965 use the PGM reg to read out the autoscaler values */
ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
} else {
@@ -948,7 +943,7 @@ static int check_overlay_scaling(struct put_image_params *rec)
return 0;
}
-static int check_overlay_src(struct drm_device *dev,
+static int check_overlay_src(struct drm_i915_private *dev_priv,
struct drm_intel_overlay_put_image *rec,
struct drm_i915_gem_object *new_bo)
{
@@ -959,7 +954,7 @@ static int check_overlay_src(struct drm_device *dev,
u32 tmp;
/* check src dimensions */
- if (IS_845G(dev) || IS_I830(dev)) {
+ if (IS_845G(dev_priv) || IS_I830(dev_priv)) {
if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY ||
rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
return -EINVAL;
@@ -1011,14 +1006,14 @@ static int check_overlay_src(struct drm_device *dev,
return -EINVAL;
/* stride checking */
- if (IS_I830(dev) || IS_845G(dev))
+ if (IS_I830(dev_priv) || IS_845G(dev_priv))
stride_mask = 255;
else
stride_mask = 63;
if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
return -EINVAL;
- if (IS_GEN4(dev) && rec->stride_Y < 512)
+ if (IS_GEN4(dev_priv) && rec->stride_Y < 512)
return -EINVAL;
tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
@@ -1063,13 +1058,13 @@ static int check_overlay_src(struct drm_device *dev,
* Return the pipe currently connected to the panel fitter,
* or -1 if the panel fitter is not present or not in use
*/
-static int intel_panel_fitter_pipe(struct drm_device *dev)
+static int intel_panel_fitter_pipe(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 pfit_control;
/* i830 doesn't have a panel fitter */
- if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
+ if (INTEL_GEN(dev_priv) <= 3 &&
+ (IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
return -1;
pfit_control = I915_READ(PFIT_CONTROL);
@@ -1079,18 +1074,18 @@ static int intel_panel_fitter_pipe(struct drm_device *dev)
return -1;
/* 965 can place panel fitter on either pipe */
- if (IS_GEN4(dev))
+ if (IS_GEN4(dev_priv))
return (pfit_control >> 29) & 0x3;
/* older chips can only use pipe 1 */
return 1;
}
-int intel_overlay_put_image(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_intel_overlay_put_image *put_image_rec = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_overlay *overlay;
struct drm_crtc *drmmode_crtc;
struct intel_crtc *crtc;
@@ -1162,7 +1157,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
/* line too wide, i.e. one-line-mode */
if (mode->hdisplay > 1024 &&
- intel_panel_fitter_pipe(dev) == crtc->pipe) {
+ intel_panel_fitter_pipe(dev_priv) == crtc->pipe) {
overlay->pfit_active = true;
update_pfit_vscale_ratio(overlay);
} else
@@ -1196,7 +1191,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
goto out_unlock;
}
- ret = check_overlay_src(dev, put_image_rec, new_bo);
+ ret = check_overlay_src(dev_priv, put_image_rec, new_bo);
if (ret != 0)
goto out_unlock;
params->format = put_image_rec->flags & ~I915_OVERLAY_FLAGS_MASK;
@@ -1284,11 +1279,11 @@ static int check_gamma(struct drm_intel_overlay_attrs *attrs)
return 0;
}
-int intel_overlay_attrs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_intel_overlay_attrs *attrs = data;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_overlay *overlay;
struct overlay_registers __iomem *regs;
int ret;
@@ -1309,7 +1304,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
attrs->contrast = overlay->contrast;
attrs->saturation = overlay->saturation;
- if (!IS_GEN2(dev)) {
+ if (!IS_GEN2(dev_priv)) {
attrs->gamma0 = I915_READ(OGAMC0);
attrs->gamma1 = I915_READ(OGAMC1);
attrs->gamma2 = I915_READ(OGAMC2);
@@ -1341,7 +1336,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
intel_overlay_unmap_regs(overlay, regs);
if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev_priv))
goto out_unlock;
if (overlay->active) {
@@ -1371,37 +1366,37 @@ out_unlock:
return ret;
}
-void intel_setup_overlay(struct drm_device *dev)
+void intel_setup_overlay(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_overlay *overlay;
struct drm_i915_gem_object *reg_bo;
struct overlay_registers __iomem *regs;
int ret;
- if (!HAS_OVERLAY(dev))
+ if (!HAS_OVERLAY(dev_priv))
return;
overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
if (!overlay)
return;
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&dev_priv->drm.struct_mutex);
if (WARN_ON(dev_priv->overlay))
goto out_free;
- overlay->dev = dev;
+ overlay->i915 = dev_priv;
reg_bo = NULL;
- if (!OVERLAY_NEEDS_PHYSICAL(dev))
- reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE);
- if (reg_bo == NULL)
- reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
+ if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
+ reg_bo = i915_gem_object_create_stolen(&dev_priv->drm,
+ PAGE_SIZE);
if (reg_bo == NULL)
+ reg_bo = i915_gem_object_create(&dev_priv->drm, PAGE_SIZE);
+ if (IS_ERR(reg_bo))
goto out_free;
overlay->reg_bo = reg_bo;
- if (OVERLAY_NEEDS_PHYSICAL(dev)) {
+ if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) {
ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE);
if (ret) {
DRM_ERROR("failed to attach phys overlay regs\n");
@@ -1441,25 +1436,23 @@ void intel_setup_overlay(struct drm_device *dev)
intel_overlay_unmap_regs(overlay, regs);
dev_priv->overlay = overlay;
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
DRM_INFO("initialized overlay support\n");
return;
out_unpin_bo:
- if (!OVERLAY_NEEDS_PHYSICAL(dev))
+ if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
i915_gem_object_ggtt_unpin(reg_bo);
out_free_bo:
drm_gem_object_unreference(&reg_bo->base);
out_free:
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
kfree(overlay);
return;
}
-void intel_cleanup_overlay(struct drm_device *dev)
+void intel_cleanup_overlay(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
if (!dev_priv->overlay)
return;
@@ -1482,18 +1475,17 @@ struct intel_overlay_error_state {
static struct overlay_registers __iomem *
intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
{
- struct drm_i915_private *dev_priv = to_i915(overlay->dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct drm_i915_private *dev_priv = overlay->i915;
struct overlay_registers __iomem *regs;
- if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+ if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
/* Cast to make sparse happy, but it's wc memory anyway, so
* equivalent to the wc io mapping on X86. */
regs = (struct overlay_registers __iomem *)
overlay->reg_bo->phys_handle->vaddr;
else
- regs = io_mapping_map_atomic_wc(ggtt->mappable,
- i915_gem_obj_ggtt_offset(overlay->reg_bo));
+ regs = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable,
+ overlay->flip_addr);
return regs;
}
@@ -1501,15 +1493,13 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
struct overlay_registers __iomem *regs)
{
- if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+ if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915))
io_mapping_unmap_atomic(regs);
}
-
struct intel_overlay_error_state *
-intel_overlay_capture_error_state(struct drm_device *dev)
+intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_overlay *overlay = dev_priv->overlay;
struct intel_overlay_error_state *error;
struct overlay_registers __iomem *regs;
@@ -1523,10 +1513,7 @@ intel_overlay_capture_error_state(struct drm_device *dev)
error->dovsta = I915_READ(DOVSTA);
error->isr = I915_READ(ISR);
- if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
- error->base = (__force long)overlay->reg_bo->phys_handle->vaddr;
- else
- error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo);
+ error->base = overlay->flip_addr;
regs = intel_overlay_map_regs_atomic(overlay);
if (!regs)
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index aba940998..96c65d77e 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -377,7 +377,7 @@ out:
enum drm_connector_status
intel_panel_detect(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
/* Assume that the BIOS does not lie through the OpRegion... */
if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) {
@@ -504,7 +504,7 @@ static u32 i9xx_get_backlight(struct intel_connector *connector)
if (panel->backlight.combination_mode) {
u8 lbpc;
- pci_read_config_byte(dev_priv->dev->pdev, LBPC, &lbpc);
+ pci_read_config_byte(dev_priv->drm.pdev, LBPC, &lbpc);
val *= lbpc;
}
@@ -592,7 +592,7 @@ static void i9xx_set_backlight(struct intel_connector *connector, u32 level)
lbpc = level * 0xfe / panel->backlight.max + 1;
level /= lbpc;
- pci_write_config_byte(dev_priv->dev->pdev, LBPC, lbpc);
+ pci_write_config_byte(dev_priv->drm.pdev, LBPC, lbpc);
}
if (IS_GEN4(dev_priv)) {
@@ -822,7 +822,7 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
* backlight. This will leave the backlight on unnecessarily when
* another client is not activated.
*/
- if (dev_priv->dev->switch_power_state == DRM_SWITCH_POWER_CHANGING) {
+ if (dev_priv->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) {
DRM_DEBUG_DRIVER("Skipping backlight disable on vga switch\n");
return;
}
@@ -1142,7 +1142,7 @@ static int intel_backlight_device_get_brightness(struct backlight_device *bd)
{
struct intel_connector *connector = bl_get_data(bd);
struct drm_device *dev = connector->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 hw_level;
int ret;
@@ -1163,7 +1163,7 @@ static const struct backlight_ops intel_backlight_device_ops = {
.get_brightness = intel_backlight_device_get_brightness,
};
-static int intel_backlight_device_register(struct intel_connector *connector)
+int intel_backlight_device_register(struct intel_connector *connector)
{
struct intel_panel *panel = &connector->panel;
struct backlight_properties props;
@@ -1216,7 +1216,7 @@ static int intel_backlight_device_register(struct intel_connector *connector)
return 0;
}
-static void intel_backlight_device_unregister(struct intel_connector *connector)
+void intel_backlight_device_unregister(struct intel_connector *connector)
{
struct intel_panel *panel = &connector->panel;
@@ -1225,14 +1225,6 @@ static void intel_backlight_device_unregister(struct intel_connector *connector)
panel->backlight.device = NULL;
}
}
-#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
-static int intel_backlight_device_register(struct intel_connector *connector)
-{
- return 0;
-}
-static void intel_backlight_device_unregister(struct intel_connector *connector)
-{
-}
#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
/*
@@ -1324,7 +1316,7 @@ static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
struct drm_device *dev = connector->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int clock;
if (IS_G4X(dev_priv))
@@ -1724,6 +1716,14 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
container_of(panel, struct intel_connector, panel);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP &&
+ intel_dp_aux_init_backlight_funcs(connector) == 0)
+ return;
+
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI &&
+ intel_dsi_dcs_init_backlight_funcs(connector) == 0)
+ return;
+
if (IS_BROXTON(dev_priv)) {
panel->backlight.setup = bxt_setup_backlight;
panel->backlight.enable = bxt_enable_backlight;
@@ -1805,19 +1805,3 @@ void intel_panel_fini(struct intel_panel *panel)
drm_mode_destroy(intel_connector->base.dev,
panel->downclock_mode);
}
-
-void intel_backlight_register(struct drm_device *dev)
-{
- struct intel_connector *connector;
-
- for_each_intel_connector(dev, connector)
- intel_backlight_device_register(connector);
-}
-
-void intel_backlight_unregister(struct drm_device *dev)
-{
- struct intel_connector *connector;
-
- for_each_intel_connector(dev, connector)
- intel_backlight_device_unregister(connector);
-}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 64dd842ae..0f042d133 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -26,6 +26,7 @@
*/
#include <linux/cpufreq.h>
+#include <drm/drm_plane_helper.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include "../../../platform/x86/intel_ips.h"
@@ -82,7 +83,7 @@ static void gen9_init_clock_gating(struct drm_device *dev)
static void bxt_init_clock_gating(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
gen9_init_clock_gating(dev);
@@ -108,7 +109,7 @@ static void bxt_init_clock_gating(struct drm_device *dev)
static void i915_pineview_get_mem_freq(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 tmp;
tmp = I915_READ(CLKCFG);
@@ -147,7 +148,7 @@ static void i915_pineview_get_mem_freq(struct drm_device *dev)
static void i915_ironlake_get_mem_freq(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u16 ddrpll, csipll;
ddrpll = I915_READ16(DDRMPLL1);
@@ -318,7 +319,7 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
u32 val;
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
@@ -374,7 +375,7 @@ static const int pessimal_latency_ns = 5000;
static int vlv_get_fifo_size(struct drm_device *dev,
enum pipe pipe, int plane)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int sprite0_start, sprite1_start, size;
switch (pipe) {
@@ -425,7 +426,7 @@ static int vlv_get_fifo_size(struct drm_device *dev,
static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t dsparb = I915_READ(DSPARB);
int size;
@@ -441,7 +442,7 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
static int i830_get_fifo_size(struct drm_device *dev, int plane)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t dsparb = I915_READ(DSPARB);
int size;
@@ -458,7 +459,7 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
static int i845_get_fifo_size(struct drm_device *dev, int plane)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t dsparb = I915_READ(DSPARB);
int size;
@@ -636,7 +637,7 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
static void pineview_update_wm(struct drm_crtc *unused_crtc)
{
struct drm_device *dev = unused_crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *crtc;
const struct cxsr_latency *latency;
u32 reg;
@@ -933,7 +934,7 @@ static unsigned int vlv_wm_method2(unsigned int pixel_rate,
static void vlv_setup_wm_latency(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
/* all latencies in usec */
dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
@@ -1324,7 +1325,7 @@ static void vlv_merge_wm(struct drm_device *dev,
static void vlv_update_wm(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
struct vlv_wm_values wm = {};
@@ -1380,7 +1381,7 @@ static void g4x_update_wm(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
static const int sr_latency_ns = 12000;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
int plane_sr, cursor_sr;
unsigned int enabled = 0;
@@ -1437,7 +1438,7 @@ static void g4x_update_wm(struct drm_crtc *crtc)
static void i965_update_wm(struct drm_crtc *unused_crtc)
{
struct drm_device *dev = unused_crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *crtc;
int srwm = 1;
int cursor_sr = 16;
@@ -1511,7 +1512,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
static void i9xx_update_wm(struct drm_crtc *unused_crtc)
{
struct drm_device *dev = unused_crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
const struct intel_watermark_params *wm_info;
uint32_t fwater_lo;
uint32_t fwater_hi;
@@ -1641,7 +1642,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
static void i845_update_wm(struct drm_crtc *unused_crtc)
{
struct drm_device *dev = unused_crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *crtc;
const struct drm_display_mode *adjusted_mode;
uint32_t fwater_lo;
@@ -2040,10 +2041,10 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
}
static uint32_t
-hsw_compute_linetime_wm(struct drm_device *dev,
- struct intel_crtc_state *cstate)
+hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ const struct intel_atomic_state *intel_state =
+ to_intel_atomic_state(cstate->base.state);
const struct drm_display_mode *adjusted_mode =
&cstate->base.adjusted_mode;
u32 linetime, ips_linetime;
@@ -2052,7 +2053,7 @@ hsw_compute_linetime_wm(struct drm_device *dev,
return 0;
if (WARN_ON(adjusted_mode->crtc_clock == 0))
return 0;
- if (WARN_ON(dev_priv->cdclk_freq == 0))
+ if (WARN_ON(intel_state->cdclk == 0))
return 0;
/* The WM are computed with base on how long it takes to fill a single
@@ -2061,7 +2062,7 @@ hsw_compute_linetime_wm(struct drm_device *dev,
linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
adjusted_mode->crtc_clock);
ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
- dev_priv->cdclk_freq);
+ intel_state->cdclk);
return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
PIPE_WM_LINETIME_TIME(linetime);
@@ -2069,7 +2070,7 @@ hsw_compute_linetime_wm(struct drm_device *dev,
static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (IS_GEN9(dev)) {
uint32_t val;
@@ -2174,14 +2175,14 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
{
/* ILK sprite LP0 latency is 1300 ns */
- if (INTEL_INFO(dev)->gen == 5)
+ if (IS_GEN5(dev))
wm[0] = 13;
}
static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
{
/* ILK cursor LP0 latency is 1300 ns */
- if (INTEL_INFO(dev)->gen == 5)
+ if (IS_GEN5(dev))
wm[0] = 13;
/* WaDoubleCursorLP3Latency:ivb */
@@ -2235,7 +2236,7 @@ static void intel_print_wm_latency(struct drm_device *dev,
static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
uint16_t wm[5], uint16_t min)
{
- int level, max_level = ilk_wm_max_level(dev_priv->dev);
+ int level, max_level = ilk_wm_max_level(&dev_priv->drm);
if (wm[0] >= min)
return false;
@@ -2249,7 +2250,7 @@ static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
static void snb_wm_latency_quirk(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
bool changed;
/*
@@ -2271,7 +2272,7 @@ static void snb_wm_latency_quirk(struct drm_device *dev)
static void ilk_setup_wm_latency(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
@@ -2293,7 +2294,7 @@ static void ilk_setup_wm_latency(struct drm_device *dev)
static void skl_setup_wm_latency(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
intel_read_wm_latency(dev, dev_priv->wm.skl_latency);
intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
@@ -2329,7 +2330,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
struct intel_pipe_wm *pipe_wm;
struct drm_device *dev = state->dev;
- const struct drm_i915_private *dev_priv = dev->dev_private;
+ const struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *intel_plane;
struct intel_plane_state *pristate = NULL;
struct intel_plane_state *sprstate = NULL;
@@ -2337,7 +2338,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
int level, max_level = ilk_wm_max_level(dev), usable_level;
struct ilk_wm_maximums max;
- pipe_wm = &cstate->wm.optimal.ilk;
+ pipe_wm = &cstate->wm.ilk.optimal;
for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
struct intel_plane_state *ps;
@@ -2380,7 +2381,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
pipe_wm->wm[0] = pipe_wm->raw_wm[0];
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
- pipe_wm->linetime = hsw_compute_linetime_wm(dev, cstate);
+ pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
if (!ilk_validate_pipe_wm(dev, pipe_wm))
return -EINVAL;
@@ -2419,7 +2420,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
struct intel_crtc *intel_crtc,
struct intel_crtc_state *newstate)
{
- struct intel_pipe_wm *a = &newstate->wm.intermediate;
+ struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk;
int level, max_level = ilk_wm_max_level(dev);
@@ -2428,7 +2429,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
* currently active watermarks to get values that are safe both before
* and after the vblank.
*/
- *a = newstate->wm.optimal.ilk;
+ *a = newstate->wm.ilk.optimal;
a->pipe_enabled |= b->pipe_enabled;
a->sprites_enabled |= b->sprites_enabled;
a->sprites_scaled |= b->sprites_scaled;
@@ -2457,7 +2458,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
* If our intermediate WM are identical to the final WM, then we can
* omit the post-vblank programming; only update if it's different.
*/
- if (memcmp(a, &newstate->wm.optimal.ilk, sizeof(*a)) == 0)
+ if (memcmp(a, &newstate->wm.ilk.optimal, sizeof(*a)) == 0)
newstate->wm.need_postvbl_update = false;
return 0;
@@ -2504,7 +2505,7 @@ static void ilk_wm_merge(struct drm_device *dev,
const struct ilk_wm_maximums *max,
struct intel_pipe_wm *merged)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int level, max_level = ilk_wm_max_level(dev);
int last_enabled_level = max_level;
@@ -2564,7 +2565,7 @@ static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
/* The value we need to program into the WM_LPx latency field */
static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
return 2 * level;
@@ -2764,7 +2765,7 @@ static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
struct ilk_wm_values *results)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct ilk_wm_values *previous = &dev_priv->wm.hw;
unsigned int dirty;
uint32_t val;
@@ -2839,7 +2840,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
bool ilk_disable_lp_wm(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
}
@@ -2851,6 +2852,7 @@ bool ilk_disable_lp_wm(struct drm_device *dev)
#define SKL_DDB_SIZE 896 /* in blocks */
#define BXT_DDB_SIZE 512
+#define SKL_SAGV_BLOCK_TIME 30 /* µs */
/*
* Return the index of a plane in the SKL DDB and wm result arrays. Primary
@@ -2874,126 +2876,179 @@ skl_wm_plane_id(const struct intel_plane *plane)
}
}
-static void
-skl_sagv_get_hw_state(struct drm_i915_private *dev_priv)
-{
- u32 temp;
- int ret;
-
- if (IS_BROXTON(dev_priv))
- return;
-
- mutex_lock(&dev_priv->rps.hw_lock);
- ret = sandybridge_pcode_read(dev_priv, GEN9_PCODE_SAGV_CONTROL, &temp);
- mutex_unlock(&dev_priv->rps.hw_lock);
-
- if (!ret) {
- dev_priv->skl_sagv_enabled = !!(temp & GEN9_SAGV_DYNAMIC_FREQ);
- } else {
- /*
- * If for some reason we can't access the SAGV state, follow
- * the bspec and assume it's enabled
- */
- DRM_ERROR("Failed to get SAGV state, assuming enabled\n");
- dev_priv->skl_sagv_enabled = true;
- }
-}
-
/*
* SAGV dynamically adjusts the system agent voltage and clock frequencies
* depending on power and performance requirements. The display engine access
- * to system memory is blocked during the adjustment time. Having this enabled
- * in multi-pipe configurations can cause issues (such as underruns causing
- * full system hangs), and the bspec also suggests that software disable it
- * when more then one pipe is enabled.
+ * to system memory is blocked during the adjustment time. Because of the
+ * blocking time, having this enabled can cause full system hangs and/or pipe
+ * underruns if we don't meet all of the following requirements:
+ *
+ * - <= 1 pipe enabled
+ * - All planes can enable watermarks for latencies >= SAGV engine block time
+ * - We're not using an interlaced display configuration
*/
-static int
+int
skl_enable_sagv(struct drm_i915_private *dev_priv)
{
int ret;
- if (IS_BROXTON(dev_priv))
- return 0;
- if (dev_priv->skl_sagv_enabled)
+ if (dev_priv->skl_sagv_status == I915_SKL_SAGV_NOT_CONTROLLED ||
+ dev_priv->skl_sagv_status == I915_SKL_SAGV_ENABLED)
return 0;
- mutex_lock(&dev_priv->rps.hw_lock);
DRM_DEBUG_KMS("Enabling the SAGV\n");
+ mutex_lock(&dev_priv->rps.hw_lock);
ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
- GEN9_SAGV_DYNAMIC_FREQ);
- if (!ret)
- dev_priv->skl_sagv_enabled = true;
- else
- DRM_ERROR("Failed to enable the SAGV\n");
+ GEN9_SAGV_ENABLE);
- /* We don't need to wait for SAGV when enabling */
+ /* We don't need to wait for the SAGV when enabling */
mutex_unlock(&dev_priv->rps.hw_lock);
- return ret;
+
+ /*
+ * Some skl systems, pre-release machines in particular,
+ * don't actually have an SAGV.
+ */
+ if (ret == -ENXIO) {
+ DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
+ dev_priv->skl_sagv_status = I915_SKL_SAGV_NOT_CONTROLLED;
+ return 0;
+ } else if (ret < 0) {
+ DRM_ERROR("Failed to enable the SAGV\n");
+ return ret;
+ }
+
+ dev_priv->skl_sagv_status = I915_SKL_SAGV_ENABLED;
+ return 0;
}
static int
+skl_do_sagv_disable(struct drm_i915_private *dev_priv)
+{
+ int ret;
+ uint32_t temp = GEN9_SAGV_DISABLE;
+
+ ret = sandybridge_pcode_read(dev_priv, GEN9_PCODE_SAGV_CONTROL,
+ &temp);
+ if (ret)
+ return ret;
+ else
+ return temp & GEN9_SAGV_IS_DISABLED;
+}
+
+int
skl_disable_sagv(struct drm_i915_private *dev_priv)
{
- int ret = 0;
- unsigned long timeout;
- u32 temp;
+ int ret, result;
- if (IS_BROXTON(dev_priv))
- return 0;
- if (!dev_priv->skl_sagv_enabled)
+ if (dev_priv->skl_sagv_status == I915_SKL_SAGV_NOT_CONTROLLED ||
+ dev_priv->skl_sagv_status == I915_SKL_SAGV_DISABLED)
return 0;
- mutex_lock(&dev_priv->rps.hw_lock);
DRM_DEBUG_KMS("Disabling the SAGV\n");
+ mutex_lock(&dev_priv->rps.hw_lock);
/* bspec says to keep retrying for at least 1 ms */
- timeout = jiffies + msecs_to_jiffies(1);
- do {
- ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
- GEN9_SAGV_DISABLE);
- if (ret) {
- DRM_ERROR("Failed to disable the SAGV\n");
- goto out;
- }
-
- ret = sandybridge_pcode_read(dev_priv, GEN9_PCODE_SAGV_CONTROL,
- &temp);
- if (ret) {
- DRM_ERROR("Failed to check the status of the SAGV\n");
- goto out;
- }
- } while (!(temp & 0x1) && jiffies < timeout);
+ ret = wait_for(result = skl_do_sagv_disable(dev_priv), 1);
+ mutex_unlock(&dev_priv->rps.hw_lock);
- if (temp & 0x1) {
- dev_priv->skl_sagv_enabled = false;
- } else {
- ret = -1;
+ if (ret == -ETIMEDOUT) {
DRM_ERROR("Request to disable SAGV timed out\n");
+ return -ETIMEDOUT;
}
-out:
- mutex_unlock(&dev_priv->rps.hw_lock);
- return ret;
+ /*
+ * Some skl systems, pre-release machines in particular,
+ * don't actually have an SAGV.
+ */
+ if (result == -ENXIO) {
+ DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
+ dev_priv->skl_sagv_status = I915_SKL_SAGV_NOT_CONTROLLED;
+ return 0;
+ } else if (result < 0) {
+ DRM_ERROR("Failed to disable the SAGV\n");
+ return result;
+ }
+
+ dev_priv->skl_sagv_status = I915_SKL_SAGV_DISABLED;
+ return 0;
+}
+
+bool skl_can_enable_sagv(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ struct drm_crtc *crtc;
+ enum pipe pipe;
+ int level, plane;
+
+ /*
+ * SKL workaround: bspec recommends we disable the SAGV when we have
+ * more then one pipe enabled
+ *
+ * If there are no active CRTCs, no additional checks need be performed
+ */
+ if (hweight32(intel_state->active_crtcs) == 0)
+ return true;
+ else if (hweight32(intel_state->active_crtcs) > 1)
+ return false;
+
+ /* Since we're now guaranteed to only have one active CRTC... */
+ pipe = ffs(intel_state->active_crtcs) - 1;
+ crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+
+ if (crtc->state->mode.flags & DRM_MODE_FLAG_INTERLACE)
+ return false;
+
+ for_each_plane(dev_priv, pipe, plane) {
+ /* Skip this plane if it's not enabled */
+ if (intel_state->wm_results.plane[pipe][plane][0] == 0)
+ continue;
+
+ /* Find the highest enabled wm level for this plane */
+ for (level = ilk_wm_max_level(dev);
+ intel_state->wm_results.plane[pipe][plane][level] == 0; --level)
+ { }
+
+ /*
+ * If any of the planes on this pipe don't enable wm levels
+ * that incur memory latencies higher then 30µs we can't enable
+ * the SAGV
+ */
+ if (dev_priv->wm.skl_latency[level] < SKL_SAGV_BLOCK_TIME)
+ return false;
+ }
+
+ return true;
}
static void
skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
const struct intel_crtc_state *cstate,
- const struct intel_wm_config *config,
- struct skl_ddb_entry *alloc /* out */)
+ struct skl_ddb_entry *alloc, /* out */
+ int *num_active /* out */)
{
+ struct drm_atomic_state *state = cstate->base.state;
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *for_crtc = cstate->base.crtc;
- struct drm_crtc *crtc;
unsigned int pipe_size, ddb_size;
int nth_active_pipe;
+ int pipe = to_intel_crtc(for_crtc)->pipe;
- if (!cstate->base.active) {
+ if (WARN_ON(!state) || !cstate->base.active) {
alloc->start = 0;
alloc->end = 0;
+ *num_active = hweight32(dev_priv->active_crtcs);
return;
}
+ if (intel_state->active_pipe_changes)
+ *num_active = hweight32(intel_state->active_crtcs);
+ else
+ *num_active = hweight32(dev_priv->active_crtcs);
+
if (IS_BROXTON(dev))
ddb_size = BXT_DDB_SIZE;
else
@@ -3001,25 +3056,29 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
ddb_size -= 4; /* 4 blocks for bypass path allocation */
- nth_active_pipe = 0;
- for_each_crtc(dev, crtc) {
- if (!to_intel_crtc(crtc)->active)
- continue;
-
- if (crtc == for_crtc)
- break;
-
- nth_active_pipe++;
+ /*
+ * If the state doesn't change the active CRTC's, then there's
+ * no need to recalculate; the existing pipe allocation limits
+ * should remain unchanged. Note that we're safe from racing
+ * commits since any racing commit that changes the active CRTC
+ * list would need to grab _all_ crtc locks, including the one
+ * we currently hold.
+ */
+ if (!intel_state->active_pipe_changes) {
+ *alloc = dev_priv->wm.skl_hw.ddb.pipe[pipe];
+ return;
}
- pipe_size = ddb_size / config->num_pipes_active;
- alloc->start = nth_active_pipe * ddb_size / config->num_pipes_active;
+ nth_active_pipe = hweight32(intel_state->active_crtcs &
+ (drm_crtc_mask(for_crtc) - 1));
+ pipe_size = ddb_size / hweight32(intel_state->active_crtcs);
+ alloc->start = nth_active_pipe * ddb_size / *num_active;
alloc->end = alloc->start + pipe_size;
}
-static unsigned int skl_cursor_allocation(const struct intel_wm_config *config)
+static unsigned int skl_cursor_allocation(int num_active)
{
- if (config->num_pipes_active == 1)
+ if (num_active == 1)
return 32;
return 8;
@@ -3063,6 +3122,46 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
}
}
+/*
+ * Determines the downscale amount of a plane for the purposes of watermark calculations.
+ * The bspec defines downscale amount as:
+ *
+ * """
+ * Horizontal down scale amount = maximum[1, Horizontal source size /
+ * Horizontal destination size]
+ * Vertical down scale amount = maximum[1, Vertical source size /
+ * Vertical destination size]
+ * Total down scale amount = Horizontal down scale amount *
+ * Vertical down scale amount
+ * """
+ *
+ * Return value is provided in 16.16 fixed point form to retain fractional part.
+ * Caller should take care of dividing & rounding off the value.
+ */
+static uint32_t
+skl_plane_downscale_amount(const struct intel_plane_state *pstate)
+{
+ uint32_t downscale_h, downscale_w;
+ uint32_t src_w, src_h, dst_w, dst_h;
+
+ if (WARN_ON(!pstate->visible))
+ return DRM_PLANE_HELPER_NO_SCALING;
+
+ /* n.b., src is 16.16 fixed point, dst is whole integer */
+ src_w = drm_rect_width(&pstate->src);
+ src_h = drm_rect_height(&pstate->src);
+ dst_w = drm_rect_width(&pstate->dst);
+ dst_h = drm_rect_height(&pstate->dst);
+ if (intel_rotation_90_or_270(pstate->base.rotation))
+ swap(dst_w, dst_h);
+
+ downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
+ downscale_w = max(src_w / dst_w, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
+
+ /* Provide result in 16.16 fixed point */
+ return (uint64_t)downscale_w * downscale_h >> 16;
+}
+
static unsigned int
skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
const struct drm_plane_state *pstate,
@@ -3070,7 +3169,16 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
{
struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
struct drm_framebuffer *fb = pstate->fb;
+ uint32_t down_scale_amount, data_rate;
uint32_t width = 0, height = 0;
+ unsigned format = fb ? fb->pixel_format : DRM_FORMAT_XRGB8888;
+
+ if (!intel_pstate->visible)
+ return 0;
+ if (pstate->plane->type == DRM_PLANE_TYPE_CURSOR)
+ return 0;
+ if (y && format != DRM_FORMAT_NV12)
+ return 0;
width = drm_rect_width(&intel_pstate->src) >> 16;
height = drm_rect_height(&intel_pstate->src) >> 16;
@@ -3079,17 +3187,21 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
swap(width, height);
/* for planar format */
- if (fb->pixel_format == DRM_FORMAT_NV12) {
+ if (format == DRM_FORMAT_NV12) {
if (y) /* y-plane data rate */
- return width * height *
- drm_format_plane_cpp(fb->pixel_format, 0);
+ data_rate = width * height *
+ drm_format_plane_cpp(format, 0);
else /* uv-plane data rate */
- return (width / 2) * (height / 2) *
- drm_format_plane_cpp(fb->pixel_format, 1);
+ data_rate = (width / 2) * (height / 2) *
+ drm_format_plane_cpp(format, 1);
+ } else {
+ /* for packed formats */
+ data_rate = width * height * drm_format_plane_cpp(format, 0);
}
- /* for packed formats */
- return width * height * drm_format_plane_cpp(fb->pixel_format, 0);
+ down_scale_amount = skl_plane_downscale_amount(intel_pstate);
+
+ return (uint64_t)data_rate * down_scale_amount >> 16;
}
/*
@@ -3098,86 +3210,186 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
* 3 * 4096 * 8192 * 4 < 2^32
*/
static unsigned int
-skl_get_total_relative_data_rate(const struct intel_crtc_state *cstate)
+skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
- struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_crtc_state *cstate = &intel_cstate->base;
+ struct drm_atomic_state *state = cstate->state;
+ struct drm_crtc *crtc = cstate->crtc;
+ struct drm_device *dev = crtc->dev;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ const struct drm_plane *plane;
const struct intel_plane *intel_plane;
- unsigned int total_data_rate = 0;
+ struct drm_plane_state *pstate;
+ unsigned int rate, total_data_rate = 0;
+ int id;
+ int i;
- for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
- const struct drm_plane_state *pstate = intel_plane->base.state;
+ if (WARN_ON(!state))
+ return 0;
- if (pstate->fb == NULL)
- continue;
+ /* Calculate and cache data rate for each plane */
+ for_each_plane_in_state(state, plane, pstate, i) {
+ id = skl_wm_plane_id(to_intel_plane(plane));
+ intel_plane = to_intel_plane(plane);
- if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
+ if (intel_plane->pipe != intel_crtc->pipe)
continue;
/* packed/uv */
- total_data_rate += skl_plane_relative_data_rate(cstate,
- pstate,
- 0);
+ rate = skl_plane_relative_data_rate(intel_cstate,
+ pstate, 0);
+ intel_cstate->wm.skl.plane_data_rate[id] = rate;
- if (pstate->fb->pixel_format == DRM_FORMAT_NV12)
- /* y-plane */
- total_data_rate += skl_plane_relative_data_rate(cstate,
- pstate,
- 1);
+ /* y-plane */
+ rate = skl_plane_relative_data_rate(intel_cstate,
+ pstate, 1);
+ intel_cstate->wm.skl.plane_y_data_rate[id] = rate;
+ }
+
+ /* Calculate CRTC's total data rate from cached values */
+ for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
+ int id = skl_wm_plane_id(intel_plane);
+
+ /* packed/uv */
+ total_data_rate += intel_cstate->wm.skl.plane_data_rate[id];
+ total_data_rate += intel_cstate->wm.skl.plane_y_data_rate[id];
}
return total_data_rate;
}
-static void
+static uint16_t
+skl_ddb_min_alloc(const struct drm_plane_state *pstate,
+ const int y)
+{
+ struct drm_framebuffer *fb = pstate->fb;
+ struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
+ uint32_t src_w, src_h;
+ uint32_t min_scanlines = 8;
+ uint8_t plane_bpp;
+
+ if (WARN_ON(!fb))
+ return 0;
+
+ /* For packed formats, no y-plane, return 0 */
+ if (y && fb->pixel_format != DRM_FORMAT_NV12)
+ return 0;
+
+ /* For Non Y-tile return 8-blocks */
+ if (fb->modifier[0] != I915_FORMAT_MOD_Y_TILED &&
+ fb->modifier[0] != I915_FORMAT_MOD_Yf_TILED)
+ return 8;
+
+ src_w = drm_rect_width(&intel_pstate->src) >> 16;
+ src_h = drm_rect_height(&intel_pstate->src) >> 16;
+
+ if (intel_rotation_90_or_270(pstate->rotation))
+ swap(src_w, src_h);
+
+ /* Halve UV plane width and height for NV12 */
+ if (fb->pixel_format == DRM_FORMAT_NV12 && !y) {
+ src_w /= 2;
+ src_h /= 2;
+ }
+
+ if (fb->pixel_format == DRM_FORMAT_NV12 && !y)
+ plane_bpp = drm_format_plane_cpp(fb->pixel_format, 1);
+ else
+ plane_bpp = drm_format_plane_cpp(fb->pixel_format, 0);
+
+ if (intel_rotation_90_or_270(pstate->rotation)) {
+ switch (plane_bpp) {
+ case 1:
+ min_scanlines = 32;
+ break;
+ case 2:
+ min_scanlines = 16;
+ break;
+ case 4:
+ min_scanlines = 8;
+ break;
+ case 8:
+ min_scanlines = 4;
+ break;
+ default:
+ WARN(1, "Unsupported pixel depth %u for rotation",
+ plane_bpp);
+ min_scanlines = 32;
+ }
+ }
+
+ return DIV_ROUND_UP((4 * src_w * plane_bpp), 512) * min_scanlines/4 + 3;
+}
+
+static int
skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
struct skl_ddb_allocation *ddb /* out */)
{
+ struct drm_atomic_state *state = cstate->base.state;
struct drm_crtc *crtc = cstate->base.crtc;
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_wm_config *config = &dev_priv->wm.config;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_plane *intel_plane;
+ struct drm_plane *plane;
+ struct drm_plane_state *pstate;
enum pipe pipe = intel_crtc->pipe;
struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
uint16_t alloc_size, start, cursor_blocks;
- uint16_t minimum[I915_MAX_PLANES];
- uint16_t y_minimum[I915_MAX_PLANES];
+ uint16_t *minimum = cstate->wm.skl.minimum_blocks;
+ uint16_t *y_minimum = cstate->wm.skl.minimum_y_blocks;
unsigned int total_data_rate;
+ int num_active;
+ int id, i;
- skl_ddb_get_pipe_allocation_limits(dev, cstate, config, alloc);
+ if (WARN_ON(!state))
+ return 0;
+
+ if (!cstate->base.active) {
+ ddb->pipe[pipe].start = ddb->pipe[pipe].end = 0;
+ memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
+ memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe]));
+ return 0;
+ }
+
+ skl_ddb_get_pipe_allocation_limits(dev, cstate, alloc, &num_active);
alloc_size = skl_ddb_entry_size(alloc);
if (alloc_size == 0) {
memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
- memset(&ddb->plane[pipe][PLANE_CURSOR], 0,
- sizeof(ddb->plane[pipe][PLANE_CURSOR]));
- return;
+ return 0;
}
- cursor_blocks = skl_cursor_allocation(config);
+ cursor_blocks = skl_cursor_allocation(num_active);
ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - cursor_blocks;
ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
alloc_size -= cursor_blocks;
- alloc->end -= cursor_blocks;
/* 1. Allocate the mininum required blocks for each active plane */
- for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
- struct drm_plane *plane = &intel_plane->base;
- struct drm_framebuffer *fb = plane->state->fb;
- int id = skl_wm_plane_id(intel_plane);
+ for_each_plane_in_state(state, plane, pstate, i) {
+ intel_plane = to_intel_plane(plane);
+ id = skl_wm_plane_id(intel_plane);
- if (!to_intel_plane_state(plane->state)->visible)
+ if (intel_plane->pipe != pipe)
continue;
- if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ if (!to_intel_plane_state(pstate)->visible) {
+ minimum[id] = 0;
+ y_minimum[id] = 0;
+ continue;
+ }
+ if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+ minimum[id] = 0;
+ y_minimum[id] = 0;
continue;
+ }
+
+ minimum[id] = skl_ddb_min_alloc(pstate, 0);
+ y_minimum[id] = skl_ddb_min_alloc(pstate, 1);
+ }
- minimum[id] = 8;
- alloc_size -= minimum[id];
- y_minimum[id] = (fb->pixel_format == DRM_FORMAT_NV12) ? 8 : 0;
- alloc_size -= y_minimum[id];
+ for (i = 0; i < PLANE_CURSOR; i++) {
+ alloc_size -= minimum[i];
+ alloc_size -= y_minimum[i];
}
/*
@@ -3187,21 +3399,16 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
* FIXME: we may not allocate every single block here.
*/
total_data_rate = skl_get_total_relative_data_rate(cstate);
+ if (total_data_rate == 0)
+ return 0;
start = alloc->start;
for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
- struct drm_plane *plane = &intel_plane->base;
- struct drm_plane_state *pstate = intel_plane->base.state;
unsigned int data_rate, y_data_rate;
uint16_t plane_blocks, y_plane_blocks = 0;
int id = skl_wm_plane_id(intel_plane);
- if (!to_intel_plane_state(pstate)->visible)
- continue;
- if (plane->type == DRM_PLANE_TYPE_CURSOR)
- continue;
-
- data_rate = skl_plane_relative_data_rate(cstate, pstate, 0);
+ data_rate = cstate->wm.skl.plane_data_rate[id];
/*
* allocation for (packed formats) or (uv-plane part of planar format):
@@ -3212,30 +3419,32 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
total_data_rate);
- ddb->plane[pipe][id].start = start;
- ddb->plane[pipe][id].end = start + plane_blocks;
+ /* Leave disabled planes at (0,0) */
+ if (data_rate) {
+ ddb->plane[pipe][id].start = start;
+ ddb->plane[pipe][id].end = start + plane_blocks;
+ }
start += plane_blocks;
/*
* allocation for y_plane part of planar format:
*/
- if (pstate->fb->pixel_format == DRM_FORMAT_NV12) {
- y_data_rate = skl_plane_relative_data_rate(cstate,
- pstate,
- 1);
- y_plane_blocks = y_minimum[id];
- y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
- total_data_rate);
+ y_data_rate = cstate->wm.skl.plane_y_data_rate[id];
+ y_plane_blocks = y_minimum[id];
+ y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
+ total_data_rate);
+
+ if (y_data_rate) {
ddb->y_plane[pipe][id].start = start;
ddb->y_plane[pipe][id].end = start + y_plane_blocks;
-
- start += y_plane_blocks;
}
+ start += y_plane_blocks;
}
+ return 0;
}
static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config)
@@ -3281,6 +3490,8 @@ static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
plane_bytes_per_line *= 4;
plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
plane_blocks_per_line /= 4;
+ } else if (tiling == DRM_FORMAT_MOD_NONE) {
+ plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1;
} else {
plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
}
@@ -3292,35 +3503,41 @@ static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
return ret;
}
-static bool skl_ddb_allocation_changed(const struct skl_ddb_allocation *new_ddb,
- const struct intel_crtc *intel_crtc)
+static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
+ struct intel_plane_state *pstate)
{
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- const struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
+ uint64_t adjusted_pixel_rate;
+ uint64_t downscale_amount;
+ uint64_t pixel_rate;
+
+ /* Shouldn't reach here on disabled planes... */
+ if (WARN_ON(!pstate->visible))
+ return 0;
/*
- * If ddb allocation of pipes changed, it may require recalculation of
- * watermarks
+ * Adjusted plane pixel rate is just the pipe's adjusted pixel rate
+ * with additional adjustments for plane-specific scaling.
*/
- if (memcmp(new_ddb->pipe, cur_ddb->pipe, sizeof(new_ddb->pipe)))
- return true;
+ adjusted_pixel_rate = skl_pipe_pixel_rate(cstate);
+ downscale_amount = skl_plane_downscale_amount(pstate);
- return false;
+ pixel_rate = adjusted_pixel_rate * downscale_amount >> 16;
+ WARN_ON(pixel_rate != clamp_t(uint32_t, pixel_rate, 0, ~0));
+
+ return pixel_rate;
}
-static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
- struct intel_crtc_state *cstate,
- struct intel_plane *intel_plane,
- uint16_t ddb_allocation,
- int level,
- uint16_t *out_blocks, /* out */
- uint8_t *out_lines /* out */)
+static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
+ struct intel_crtc_state *cstate,
+ struct intel_plane_state *intel_pstate,
+ uint16_t ddb_allocation,
+ int level,
+ uint16_t *out_blocks, /* out */
+ uint8_t *out_lines, /* out */
+ bool *enabled /* out */)
{
- struct drm_plane *plane = &intel_plane->base;
- struct drm_framebuffer *fb = plane->state->fb;
- struct intel_plane_state *intel_pstate =
- to_intel_plane_state(plane->state);
+ struct drm_plane_state *pstate = &intel_pstate->base;
+ struct drm_framebuffer *fb = pstate->fb;
uint32_t latency = dev_priv->wm.skl_latency[level];
uint32_t method1, method2;
uint32_t plane_bytes_per_line, plane_blocks_per_line;
@@ -3328,20 +3545,24 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
uint32_t selected_result;
uint8_t cpp;
uint32_t width = 0, height = 0;
+ uint32_t plane_pixel_rate;
- if (latency == 0 || !cstate->base.active || !intel_pstate->visible)
- return false;
+ if (latency == 0 || !cstate->base.active || !intel_pstate->visible) {
+ *enabled = false;
+ return 0;
+ }
width = drm_rect_width(&intel_pstate->src) >> 16;
height = drm_rect_height(&intel_pstate->src) >> 16;
- if (intel_rotation_90_or_270(plane->state->rotation))
+ if (intel_rotation_90_or_270(pstate->rotation))
swap(width, height);
cpp = drm_format_plane_cpp(fb->pixel_format, 0);
- method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate),
- cpp, latency);
- method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate),
+ plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
+
+ method1 = skl_wm_method1(plane_pixel_rate, cpp, latency);
+ method2 = skl_wm_method2(plane_pixel_rate,
cstate->base.adjusted_mode.crtc_htotal,
width,
cpp,
@@ -3355,7 +3576,7 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
uint32_t min_scanlines = 4;
uint32_t y_tile_minimum;
- if (intel_rotation_90_or_270(plane->state->rotation)) {
+ if (intel_rotation_90_or_270(pstate->rotation)) {
int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
drm_format_plane_cpp(fb->pixel_format, 1) :
drm_format_plane_cpp(fb->pixel_format, 0);
@@ -3391,40 +3612,100 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
res_blocks++;
}
- if (res_blocks >= ddb_allocation || res_lines > 31)
- return false;
+ if (res_blocks >= ddb_allocation || res_lines > 31) {
+ *enabled = false;
+
+ /*
+ * If there are no valid level 0 watermarks, then we can't
+ * support this display configuration.
+ */
+ if (level) {
+ return 0;
+ } else {
+ DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n");
+ DRM_DEBUG_KMS("Plane %d.%d: blocks required = %u/%u, lines required = %u/31\n",
+ to_intel_crtc(cstate->base.crtc)->pipe,
+ skl_wm_plane_id(to_intel_plane(pstate->plane)),
+ res_blocks, ddb_allocation, res_lines);
+
+ return -EINVAL;
+ }
+ }
*out_blocks = res_blocks;
*out_lines = res_lines;
+ *enabled = true;
- return true;
+ return 0;
}
-static void skl_compute_wm_level(const struct drm_i915_private *dev_priv,
- struct skl_ddb_allocation *ddb,
- struct intel_crtc_state *cstate,
- int level,
- struct skl_wm_level *result)
+static int
+skl_compute_wm_level(const struct drm_i915_private *dev_priv,
+ struct skl_ddb_allocation *ddb,
+ struct intel_crtc_state *cstate,
+ int level,
+ struct skl_wm_level *result)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_atomic_state *state = cstate->base.state;
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
+ struct drm_plane *plane;
struct intel_plane *intel_plane;
+ struct intel_plane_state *intel_pstate;
uint16_t ddb_blocks;
enum pipe pipe = intel_crtc->pipe;
+ int ret;
- for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
+ /*
+ * We'll only calculate watermarks for planes that are actually
+ * enabled, so make sure all other planes are set as disabled.
+ */
+ memset(result, 0, sizeof(*result));
+
+ for_each_intel_plane_mask(&dev_priv->drm,
+ intel_plane,
+ cstate->base.plane_mask) {
int i = skl_wm_plane_id(intel_plane);
+ plane = &intel_plane->base;
+ intel_pstate = NULL;
+ if (state)
+ intel_pstate =
+ intel_atomic_get_existing_plane_state(state,
+ intel_plane);
+
+ /*
+ * Note: If we start supporting multiple pending atomic commits
+ * against the same planes/CRTC's in the future, plane->state
+ * will no longer be the correct pre-state to use for the
+ * calculations here and we'll need to change where we get the
+ * 'unchanged' plane data from.
+ *
+ * For now this is fine because we only allow one queued commit
+ * against a CRTC. Even if the plane isn't modified by this
+ * transaction and we don't have a plane lock, we still have
+ * the CRTC's lock, so we know that no other transactions are
+ * racing with us to update it.
+ */
+ if (!intel_pstate)
+ intel_pstate = to_intel_plane_state(plane->state);
+
+ WARN_ON(!intel_pstate->base.fb);
+
ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
- result->plane_en[i] = skl_compute_plane_wm(dev_priv,
- cstate,
- intel_plane,
- ddb_blocks,
- level,
- &result->plane_res_b[i],
- &result->plane_res_l[i]);
+ ret = skl_compute_plane_wm(dev_priv,
+ cstate,
+ intel_pstate,
+ ddb_blocks,
+ level,
+ &result->plane_res_b[i],
+ &result->plane_res_l[i],
+ &result->plane_en[i]);
+ if (ret)
+ return ret;
}
+
+ return 0;
}
static uint32_t
@@ -3458,21 +3739,26 @@ static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
}
}
-static void skl_compute_pipe_wm(struct intel_crtc_state *cstate,
- struct skl_ddb_allocation *ddb,
- struct skl_pipe_wm *pipe_wm)
+static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
+ struct skl_ddb_allocation *ddb,
+ struct skl_pipe_wm *pipe_wm)
{
struct drm_device *dev = cstate->base.crtc->dev;
- const struct drm_i915_private *dev_priv = dev->dev_private;
+ const struct drm_i915_private *dev_priv = to_i915(dev);
int level, max_level = ilk_wm_max_level(dev);
+ int ret;
for (level = 0; level <= max_level; level++) {
- skl_compute_wm_level(dev_priv, ddb, cstate,
- level, &pipe_wm->wm[level]);
+ ret = skl_compute_wm_level(dev_priv, ddb, cstate,
+ level, &pipe_wm->wm[level]);
+ if (ret)
+ return ret;
}
pipe_wm->linetime = skl_compute_linetime_wm(cstate);
skl_compute_transition_wm(cstate, &pipe_wm->trans_wm);
+
+ return 0;
}
static void skl_compute_wm_results(struct drm_device *dev,
@@ -3545,19 +3831,16 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
static void skl_write_wm_values(struct drm_i915_private *dev_priv,
const struct skl_wm_values *new)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct intel_crtc *crtc;
- if (dev_priv->active_crtcs == 1)
- skl_enable_sagv(dev_priv);
- else
- skl_disable_sagv(dev_priv);
-
for_each_intel_crtc(dev, crtc) {
int i, level, max_level = ilk_wm_max_level(dev);
enum pipe pipe = crtc->pipe;
- if (!new->dirty[pipe])
+ if ((new->dirty_pipes & drm_crtc_mask(&crtc->base)) == 0)
+ continue;
+ if (!crtc->active)
continue;
I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]);
@@ -3644,7 +3927,7 @@ skl_ddb_allocation_included(const struct skl_ddb_allocation *old,
static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
struct skl_wm_values *new_values)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct skl_ddb_allocation *cur_ddb, *new_ddb;
bool reallocated[I915_MAX_PIPES] = {};
struct intel_crtc *crtc;
@@ -3723,116 +4006,225 @@ static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
}
}
-static bool skl_update_pipe_wm(struct drm_crtc *crtc,
- struct skl_ddb_allocation *ddb, /* out */
- struct skl_pipe_wm *pipe_wm /* out */)
+static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
+ struct skl_ddb_allocation *ddb, /* out */
+ struct skl_pipe_wm *pipe_wm, /* out */
+ bool *changed /* out */)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
+ struct intel_crtc *intel_crtc = to_intel_crtc(cstate->crtc);
+ struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate);
+ int ret;
- skl_allocate_pipe_ddb(cstate, ddb);
- skl_compute_pipe_wm(cstate, ddb, pipe_wm);
+ ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm);
+ if (ret)
+ return ret;
if (!memcmp(&intel_crtc->wm.active.skl, pipe_wm, sizeof(*pipe_wm)))
- return false;
+ *changed = false;
+ else
+ *changed = true;
- intel_crtc->wm.active.skl = *pipe_wm;
+ return 0;
+}
- return true;
+static uint32_t
+pipes_modified(struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *cstate;
+ uint32_t i, ret = 0;
+
+ for_each_crtc_in_state(state, crtc, cstate, i)
+ ret |= drm_crtc_mask(crtc);
+
+ return ret;
}
-static void skl_update_other_pipe_wm(struct drm_device *dev,
- struct drm_crtc *crtc,
- struct skl_wm_values *r)
+static int
+skl_compute_ddb(struct drm_atomic_state *state)
{
+ struct drm_device *dev = state->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct intel_crtc *intel_crtc;
- struct intel_crtc *this_crtc = to_intel_crtc(crtc);
+ struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb;
+ uint32_t realloc_pipes = pipes_modified(state);
+ int ret;
/*
- * If the WM update hasn't changed the allocation for this_crtc (the
- * crtc we are currently computing the new WM values for), other
- * enabled crtcs will keep the same allocation and we don't need to
- * recompute anything for them.
+ * If this is our first atomic update following hardware readout,
+ * we can't trust the DDB that the BIOS programmed for us. Let's
+ * pretend that all pipes switched active status so that we'll
+ * ensure a full DDB recompute.
*/
- if (!skl_ddb_allocation_changed(&r->ddb, this_crtc))
- return;
+ if (dev_priv->wm.distrust_bios_wm) {
+ ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
+ state->acquire_ctx);
+ if (ret)
+ return ret;
+
+ intel_state->active_pipe_changes = ~0;
+
+ /*
+ * We usually only initialize intel_state->active_crtcs if we
+ * we're doing a modeset; make sure this field is always
+ * initialized during the sanitization process that happens
+ * on the first commit too.
+ */
+ if (!intel_state->modeset)
+ intel_state->active_crtcs = dev_priv->active_crtcs;
+ }
/*
- * Otherwise, because of this_crtc being freshly enabled/disabled, the
- * other active pipes need new DDB allocation and WM values.
+ * If the modeset changes which CRTC's are active, we need to
+ * recompute the DDB allocation for *all* active pipes, even
+ * those that weren't otherwise being modified in any way by this
+ * atomic commit. Due to the shrinking of the per-pipe allocations
+ * when new active CRTC's are added, it's possible for a pipe that
+ * we were already using and aren't changing at all here to suddenly
+ * become invalid if its DDB needs exceeds its new allocation.
+ *
+ * Note that if we wind up doing a full DDB recompute, we can't let
+ * any other display updates race with this transaction, so we need
+ * to grab the lock on *all* CRTC's.
*/
- for_each_intel_crtc(dev, intel_crtc) {
- struct skl_pipe_wm pipe_wm = {};
- bool wm_changed;
-
- if (this_crtc->pipe == intel_crtc->pipe)
- continue;
+ if (intel_state->active_pipe_changes) {
+ realloc_pipes = ~0;
+ intel_state->wm_results.dirty_pipes = ~0;
+ }
- if (!intel_crtc->active)
- continue;
+ for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) {
+ struct intel_crtc_state *cstate;
- wm_changed = skl_update_pipe_wm(&intel_crtc->base,
- &r->ddb, &pipe_wm);
+ cstate = intel_atomic_get_crtc_state(state, intel_crtc);
+ if (IS_ERR(cstate))
+ return PTR_ERR(cstate);
- /*
- * If we end up re-computing the other pipe WM values, it's
- * because it was really needed, so we expect the WM values to
- * be different.
- */
- WARN_ON(!wm_changed);
+ ret = skl_allocate_pipe_ddb(cstate, ddb);
+ if (ret)
+ return ret;
- skl_compute_wm_results(dev, &pipe_wm, r, intel_crtc);
- r->dirty[intel_crtc->pipe] = true;
+ ret = drm_atomic_add_affected_planes(state, &intel_crtc->base);
+ if (ret)
+ return ret;
}
+
+ return 0;
}
-static void skl_clear_wm(struct skl_wm_values *watermarks, enum pipe pipe)
+static void
+skl_copy_wm_for_pipe(struct skl_wm_values *dst,
+ struct skl_wm_values *src,
+ enum pipe pipe)
{
- watermarks->wm_linetime[pipe] = 0;
- memset(watermarks->plane[pipe], 0,
- sizeof(uint32_t) * 8 * I915_MAX_PLANES);
- memset(watermarks->plane_trans[pipe],
- 0, sizeof(uint32_t) * I915_MAX_PLANES);
- watermarks->plane_trans[pipe][PLANE_CURSOR] = 0;
+ dst->wm_linetime[pipe] = src->wm_linetime[pipe];
+ memcpy(dst->plane[pipe], src->plane[pipe],
+ sizeof(dst->plane[pipe]));
+ memcpy(dst->plane_trans[pipe], src->plane_trans[pipe],
+ sizeof(dst->plane_trans[pipe]));
+
+ dst->ddb.pipe[pipe] = src->ddb.pipe[pipe];
+ memcpy(dst->ddb.y_plane[pipe], src->ddb.y_plane[pipe],
+ sizeof(dst->ddb.y_plane[pipe]));
+ memcpy(dst->ddb.plane[pipe], src->ddb.plane[pipe],
+ sizeof(dst->ddb.plane[pipe]));
+}
+
+static int
+skl_compute_wm(struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *cstate;
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ struct skl_wm_values *results = &intel_state->wm_results;
+ struct skl_pipe_wm *pipe_wm;
+ bool changed = false;
+ int ret, i;
- /* Clear ddb entries for pipe */
- memset(&watermarks->ddb.pipe[pipe], 0, sizeof(struct skl_ddb_entry));
- memset(&watermarks->ddb.plane[pipe], 0,
- sizeof(struct skl_ddb_entry) * I915_MAX_PLANES);
- memset(&watermarks->ddb.y_plane[pipe], 0,
- sizeof(struct skl_ddb_entry) * I915_MAX_PLANES);
- memset(&watermarks->ddb.plane[pipe][PLANE_CURSOR], 0,
- sizeof(struct skl_ddb_entry));
+ /*
+ * If this transaction isn't actually touching any CRTC's, don't
+ * bother with watermark calculation. Note that if we pass this
+ * test, we're guaranteed to hold at least one CRTC state mutex,
+ * which means we can safely use values like dev_priv->active_crtcs
+ * since any racing commits that want to update them would need to
+ * hold _all_ CRTC state mutexes.
+ */
+ for_each_crtc_in_state(state, crtc, cstate, i)
+ changed = true;
+ if (!changed)
+ return 0;
+
+ /* Clear all dirty flags */
+ results->dirty_pipes = 0;
+ ret = skl_compute_ddb(state);
+ if (ret)
+ return ret;
+
+ /*
+ * Calculate WM's for all pipes that are part of this transaction.
+ * Note that the DDB allocation above may have added more CRTC's that
+ * weren't otherwise being modified (and set bits in dirty_pipes) if
+ * pipe allocations had to change.
+ *
+ * FIXME: Now that we're doing this in the atomic check phase, we
+ * should allow skl_update_pipe_wm() to return failure in cases where
+ * no suitable watermark values can be found.
+ */
+ for_each_crtc_in_state(state, crtc, cstate, i) {
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc_state *intel_cstate =
+ to_intel_crtc_state(cstate);
+
+ pipe_wm = &intel_cstate->wm.skl.optimal;
+ ret = skl_update_pipe_wm(cstate, &results->ddb, pipe_wm,
+ &changed);
+ if (ret)
+ return ret;
+
+ if (changed)
+ results->dirty_pipes |= drm_crtc_mask(crtc);
+
+ if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
+ /* This pipe's WM's did not change */
+ continue;
+
+ intel_cstate->update_wm_pre = true;
+ skl_compute_wm_results(crtc->dev, pipe_wm, results, intel_crtc);
+ }
+
+ return 0;
}
static void skl_update_wm(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct skl_wm_values *results = &dev_priv->wm.skl_results;
+ struct skl_wm_values *hw_vals = &dev_priv->wm.skl_hw;
struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
- struct skl_pipe_wm *pipe_wm = &cstate->wm.optimal.skl;
-
-
- /* Clear all dirty flags */
- memset(results->dirty, 0, sizeof(bool) * I915_MAX_PIPES);
-
- skl_clear_wm(results, intel_crtc->pipe);
+ struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
+ int pipe;
- if (!skl_update_pipe_wm(crtc, &results->ddb, pipe_wm))
+ if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
return;
- skl_compute_wm_results(dev, pipe_wm, results, intel_crtc);
- results->dirty[intel_crtc->pipe] = true;
+ intel_crtc->wm.active.skl = *pipe_wm;
+
+ mutex_lock(&dev_priv->wm.wm_mutex);
- skl_update_other_pipe_wm(dev, crtc, results);
skl_write_wm_values(dev_priv, results);
skl_flush_wm_values(dev_priv, results);
- /* store the new configuration */
- dev_priv->wm.skl_hw = *results;
+ /*
+ * Store the new configuration (but only for the pipes that have
+ * changed; the other values weren't recomputed).
+ */
+ for_each_pipe_masked(dev_priv, pipe, results->dirty_pipes)
+ skl_copy_wm_for_pipe(hw_vals, results, pipe);
+
+ mutex_unlock(&dev_priv->wm.wm_mutex);
}
static void ilk_compute_wm_config(struct drm_device *dev,
@@ -3855,7 +4247,7 @@ static void ilk_compute_wm_config(struct drm_device *dev,
static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
struct ilk_wm_maximums max;
struct intel_wm_config config = {};
@@ -3892,7 +4284,7 @@ static void ilk_initial_watermarks(struct intel_crtc_state *cstate)
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
mutex_lock(&dev_priv->wm.wm_mutex);
- intel_crtc->wm.active.ilk = cstate->wm.intermediate;
+ intel_crtc->wm.active.ilk = cstate->wm.ilk.intermediate;
ilk_program_watermarks(dev_priv);
mutex_unlock(&dev_priv->wm.wm_mutex);
}
@@ -3904,7 +4296,7 @@ static void ilk_optimize_watermarks(struct intel_crtc_state *cstate)
mutex_lock(&dev_priv->wm.wm_mutex);
if (cstate->wm.need_postvbl_update) {
- intel_crtc->wm.active.ilk = cstate->wm.optimal.ilk;
+ intel_crtc->wm.active.ilk = cstate->wm.ilk.optimal;
ilk_program_watermarks(dev_priv);
}
mutex_unlock(&dev_priv->wm.wm_mutex);
@@ -3957,11 +4349,11 @@ static void skl_pipe_wm_active_state(uint32_t val,
static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
- struct skl_pipe_wm *active = &cstate->wm.optimal.skl;
+ struct skl_pipe_wm *active = &cstate->wm.skl.optimal;
enum pipe pipe = intel_crtc->pipe;
int level, i, max_level;
uint32_t temp;
@@ -3984,7 +4376,7 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
if (!intel_crtc->active)
return;
- hw->dirty[pipe] = true;
+ hw->dirty_pipes |= drm_crtc_mask(crtc);
active->linetime = hw->wm_linetime[pipe];
@@ -4011,7 +4403,7 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
void skl_wm_get_hw_state(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
struct drm_crtc *crtc;
@@ -4019,17 +4411,23 @@ void skl_wm_get_hw_state(struct drm_device *dev)
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
skl_pipe_wm_get_hw_state(crtc);
- skl_sagv_get_hw_state(dev_priv);
+ if (dev_priv->active_crtcs) {
+ /* Fully recompute DDB on first atomic commit */
+ dev_priv->wm.distrust_bios_wm = true;
+ } else {
+ /* Easy/common case; just sanitize DDB now if everything off */
+ memset(ddb, 0, sizeof(*ddb));
+ }
}
static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct ilk_wm_values *hw = &dev_priv->wm.hw;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
- struct intel_pipe_wm *active = &cstate->wm.optimal.ilk;
+ struct intel_pipe_wm *active = &cstate->wm.ilk.optimal;
enum pipe pipe = intel_crtc->pipe;
static const i915_reg_t wm0_pipe_reg[] = {
[PIPE_A] = WM0_PIPEA_ILK,
@@ -4229,7 +4627,7 @@ void vlv_wm_get_hw_state(struct drm_device *dev)
void ilk_wm_get_hw_state(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct ilk_wm_values *hw = &dev_priv->wm.hw;
struct drm_crtc *crtc;
@@ -4291,7 +4689,7 @@ void ilk_wm_get_hw_state(struct drm_device *dev)
*/
void intel_update_watermarks(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
if (dev_priv->display.update_wm)
dev_priv->display.update_wm(crtc);
@@ -4306,9 +4704,8 @@ DEFINE_SPINLOCK(mchdev_lock);
* mchdev_lock. */
static struct drm_i915_private *i915_mch_dev;
-bool ironlake_set_drps(struct drm_device *dev, u8 val)
+bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u16 rgvswctl;
assert_spin_locked(&mchdev_lock);
@@ -4330,9 +4727,8 @@ bool ironlake_set_drps(struct drm_device *dev, u8 val)
return true;
}
-static void ironlake_enable_drps(struct drm_device *dev)
+static void ironlake_enable_drps(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 rgvmodectl;
u8 fmax, fmin, fstart, vstart;
@@ -4389,7 +4785,7 @@ static void ironlake_enable_drps(struct drm_device *dev)
DRM_ERROR("stuck trying to change perf mode\n");
mdelay(1);
- ironlake_set_drps(dev, fstart);
+ ironlake_set_drps(dev_priv, fstart);
dev_priv->ips.last_count1 = I915_READ(DMIEC) +
I915_READ(DDREC) + I915_READ(CSIEC);
@@ -4400,9 +4796,8 @@ static void ironlake_enable_drps(struct drm_device *dev)
spin_unlock_irq(&mchdev_lock);
}
-static void ironlake_disable_drps(struct drm_device *dev)
+static void ironlake_disable_drps(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u16 rgvswctl;
spin_lock_irq(&mchdev_lock);
@@ -4417,7 +4812,7 @@ static void ironlake_disable_drps(struct drm_device *dev)
I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
/* Go back to the starting frequency */
- ironlake_set_drps(dev, dev_priv->ips.fstart);
+ ironlake_set_drps(dev_priv, dev_priv->ips.fstart);
mdelay(1);
rgvswctl |= MEMCTL_CMD_STS;
I915_WRITE(MEMSWCTL, rgvswctl);
@@ -4463,19 +4858,23 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
new_power = dev_priv->rps.power;
switch (dev_priv->rps.power) {
case LOW_POWER:
- if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq)
+ if (val > dev_priv->rps.efficient_freq + 1 &&
+ val > dev_priv->rps.cur_freq)
new_power = BETWEEN;
break;
case BETWEEN:
- if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq)
+ if (val <= dev_priv->rps.efficient_freq &&
+ val < dev_priv->rps.cur_freq)
new_power = LOW_POWER;
- else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq)
+ else if (val >= dev_priv->rps.rp0_freq &&
+ val > dev_priv->rps.cur_freq)
new_power = HIGH_POWER;
break;
case HIGH_POWER:
- if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq)
+ if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 &&
+ val < dev_priv->rps.cur_freq)
new_power = BETWEEN;
break;
}
@@ -4521,22 +4920,24 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
}
I915_WRITE(GEN6_RP_UP_EI,
- GT_INTERVAL_FROM_US(dev_priv, ei_up));
+ GT_INTERVAL_FROM_US(dev_priv, ei_up));
I915_WRITE(GEN6_RP_UP_THRESHOLD,
- GT_INTERVAL_FROM_US(dev_priv, (ei_up * threshold_up / 100)));
+ GT_INTERVAL_FROM_US(dev_priv,
+ ei_up * threshold_up / 100));
I915_WRITE(GEN6_RP_DOWN_EI,
- GT_INTERVAL_FROM_US(dev_priv, ei_down));
+ GT_INTERVAL_FROM_US(dev_priv, ei_down));
I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
- GT_INTERVAL_FROM_US(dev_priv, (ei_down * threshold_down / 100)));
+ GT_INTERVAL_FROM_US(dev_priv,
+ ei_down * threshold_down / 100));
- I915_WRITE(GEN6_RP_CONTROL,
- GEN6_RP_MEDIA_TURBO |
- GEN6_RP_MEDIA_HW_NORMAL_MODE |
- GEN6_RP_MEDIA_IS_GFX |
- GEN6_RP_ENABLE |
- GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_AVG);
+ I915_WRITE(GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_TURBO |
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_AVG);
dev_priv->rps.power = new_power;
dev_priv->rps.up_threshold = threshold_up;
@@ -4561,12 +4962,10 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
/* gen6_set_rps is called to update the frequency request, but should also be
* called when the range (min_delay and max_delay) is modified so that we can
* update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
-static void gen6_set_rps(struct drm_device *dev, u8 val)
+static void gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
/* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
- if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
return;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -4579,10 +4978,10 @@ static void gen6_set_rps(struct drm_device *dev, u8 val)
if (val != dev_priv->rps.cur_freq) {
gen6_set_rps_thresholds(dev_priv, val);
- if (IS_GEN9(dev))
+ if (IS_GEN9(dev_priv))
I915_WRITE(GEN6_RPNSWREQ,
GEN9_FREQUENCY(val));
- else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
I915_WRITE(GEN6_RPNSWREQ,
HSW_FREQUENCY(val));
else
@@ -4604,15 +5003,13 @@ static void gen6_set_rps(struct drm_device *dev, u8 val)
trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
}
-static void valleyview_set_rps(struct drm_device *dev, u8 val)
+static void valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
WARN_ON(val > dev_priv->rps.max_freq);
WARN_ON(val < dev_priv->rps.min_freq);
- if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
+ if (WARN_ONCE(IS_CHERRYVIEW(dev_priv) && (val & 1),
"Odd GPU freq value\n"))
val &= ~1;
@@ -4645,7 +5042,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
/* Wake up the media well, as that takes a lot less
* power than the Render well. */
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
- valleyview_set_rps(dev_priv->dev, val);
+ valleyview_set_rps(dev_priv, val);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
}
@@ -4657,20 +5054,33 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv)
gen6_rps_reset_ei(dev_priv);
I915_WRITE(GEN6_PMINTRMSK,
gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
+
+ gen6_enable_rps_interrupts(dev_priv);
+
+ /* Ensure we start at the user's desired frequency */
+ intel_set_rps(dev_priv,
+ clamp(dev_priv->rps.cur_freq,
+ dev_priv->rps.min_freq_softlimit,
+ dev_priv->rps.max_freq_softlimit));
}
mutex_unlock(&dev_priv->rps.hw_lock);
}
void gen6_rps_idle(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ /* Flush our bottom-half so that it does not race with us
+ * setting the idle frequency and so that it is bounded by
+ * our rpm wakeref. And then disable the interrupts to stop any
+ * futher RPS reclocking whilst we are asleep.
+ */
+ gen6_disable_rps_interrupts(dev_priv);
mutex_lock(&dev_priv->rps.hw_lock);
if (dev_priv->rps.enabled) {
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_set_rps_idle(dev_priv);
else
- gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
+ gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
dev_priv->rps.last_adj = 0;
I915_WRITE(GEN6_PMINTRMSK,
gen6_sanitize_rps_pm_mask(dev_priv, ~0));
@@ -4690,7 +5100,7 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv,
/* This is intentionally racy! We peek at the state here, then
* validate inside the RPS worker.
*/
- if (!(dev_priv->mm.busy &&
+ if (!(dev_priv->gt.awake &&
dev_priv->rps.enabled &&
dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit))
return;
@@ -4706,7 +5116,7 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv,
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->rps.interrupts_enabled) {
dev_priv->rps.client_boost = true;
- queue_work(dev_priv->wq, &dev_priv->rps.work);
+ schedule_work(&dev_priv->rps.work);
}
spin_unlock_irq(&dev_priv->irq_lock);
@@ -4719,49 +5129,39 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv,
spin_unlock(&dev_priv->rps.client_lock);
}
-void intel_set_rps(struct drm_device *dev, u8 val)
+void intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
{
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
- valleyview_set_rps(dev, val);
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ valleyview_set_rps(dev_priv, val);
else
- gen6_set_rps(dev, val);
+ gen6_set_rps(dev_priv, val);
}
-static void gen9_disable_rc6(struct drm_device *dev)
+static void gen9_disable_rc6(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
I915_WRITE(GEN6_RC_CONTROL, 0);
I915_WRITE(GEN9_PG_ENABLE, 0);
}
-static void gen9_disable_rps(struct drm_device *dev)
+static void gen9_disable_rps(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
I915_WRITE(GEN6_RP_CONTROL, 0);
}
-static void gen6_disable_rps(struct drm_device *dev)
+static void gen6_disable_rps(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
I915_WRITE(GEN6_RC_CONTROL, 0);
I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
I915_WRITE(GEN6_RP_CONTROL, 0);
}
-static void cherryview_disable_rps(struct drm_device *dev)
+static void cherryview_disable_rps(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
I915_WRITE(GEN6_RC_CONTROL, 0);
}
-static void valleyview_disable_rps(struct drm_device *dev)
+static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
/* we're doing forcewake before Disabling RC6,
* This what the BIOS expects when going into suspend */
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
@@ -4771,34 +5171,45 @@ static void valleyview_disable_rps(struct drm_device *dev)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
-static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
+static void intel_print_rc6_info(struct drm_i915_private *dev_priv, u32 mode)
{
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
mode = GEN6_RC_CTL_RC6_ENABLE;
else
mode = 0;
}
- if (HAS_RC6p(dev))
- DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n",
- onoff(mode & GEN6_RC_CTL_RC6_ENABLE),
- onoff(mode & GEN6_RC_CTL_RC6p_ENABLE),
- onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE));
+ if (HAS_RC6p(dev_priv))
+ DRM_DEBUG_DRIVER("Enabling RC6 states: "
+ "RC6 %s RC6p %s RC6pp %s\n",
+ onoff(mode & GEN6_RC_CTL_RC6_ENABLE),
+ onoff(mode & GEN6_RC_CTL_RC6p_ENABLE),
+ onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE));
else
- DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n",
- onoff(mode & GEN6_RC_CTL_RC6_ENABLE));
+ DRM_DEBUG_DRIVER("Enabling RC6 states: RC6 %s\n",
+ onoff(mode & GEN6_RC_CTL_RC6_ENABLE));
}
-static bool bxt_check_bios_rc6_setup(const struct drm_device *dev)
+static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
bool enable_rc6 = true;
unsigned long rc6_ctx_base;
+ u32 rc_ctl;
+ int rc_sw_target;
+
+ rc_ctl = I915_READ(GEN6_RC_CONTROL);
+ rc_sw_target = (I915_READ(GEN6_RC_STATE) & RC_SW_TARGET_STATE_MASK) >>
+ RC_SW_TARGET_STATE_SHIFT;
+ DRM_DEBUG_DRIVER("BIOS enabled RC states: "
+ "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
+ onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE),
+ onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE),
+ rc_sw_target);
if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
- DRM_DEBUG_KMS("RC6 Base location not set properly.\n");
+ DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n");
enable_rc6 = false;
}
@@ -4810,7 +5221,7 @@ static bool bxt_check_bios_rc6_setup(const struct drm_device *dev)
if (!((rc6_ctx_base >= ggtt->stolen_reserved_base) &&
(rc6_ctx_base + PAGE_SIZE <= ggtt->stolen_reserved_base +
ggtt->stolen_reserved_size))) {
- DRM_DEBUG_KMS("RC6 Base address not as expected.\n");
+ DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
enable_rc6 = false;
}
@@ -4818,31 +5229,40 @@ static bool bxt_check_bios_rc6_setup(const struct drm_device *dev)
((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) &&
((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) &&
((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) {
- DRM_DEBUG_KMS("Engine Idle wait time not set properly.\n");
+ DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n");
enable_rc6 = false;
}
- if (!(I915_READ(GEN6_RC_CONTROL) & (GEN6_RC_CTL_RC6_ENABLE |
- GEN6_RC_CTL_HW_ENABLE)) &&
- ((I915_READ(GEN6_RC_CONTROL) & GEN6_RC_CTL_HW_ENABLE) ||
- !(I915_READ(GEN6_RC_STATE) & RC6_STATE))) {
- DRM_DEBUG_KMS("HW/SW RC6 is not enabled by BIOS.\n");
+ if (!I915_READ(GEN8_PUSHBUS_CONTROL) ||
+ !I915_READ(GEN8_PUSHBUS_ENABLE) ||
+ !I915_READ(GEN8_PUSHBUS_SHIFT)) {
+ DRM_DEBUG_DRIVER("Pushbus not setup properly.\n");
+ enable_rc6 = false;
+ }
+
+ if (!I915_READ(GEN6_GFXPAUSE)) {
+ DRM_DEBUG_DRIVER("GFX pause not setup properly.\n");
+ enable_rc6 = false;
+ }
+
+ if (!I915_READ(GEN8_MISC_CTRL0)) {
+ DRM_DEBUG_DRIVER("GPM control not setup properly.\n");
enable_rc6 = false;
}
return enable_rc6;
}
-int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
+int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6)
{
/* No RC6 before Ironlake and code is gone for ilk. */
- if (INTEL_INFO(dev)->gen < 6)
+ if (INTEL_INFO(dev_priv)->gen < 6)
return 0;
if (!enable_rc6)
return 0;
- if (IS_BROXTON(dev) && !bxt_check_bios_rc6_setup(dev)) {
+ if (IS_BROXTON(dev_priv) && !bxt_check_bios_rc6_setup(dev_priv)) {
DRM_INFO("RC6 disabled by BIOS\n");
return 0;
}
@@ -4851,33 +5271,28 @@ int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
if (enable_rc6 >= 0) {
int mask;
- if (HAS_RC6p(dev))
+ if (HAS_RC6p(dev_priv))
mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
INTEL_RC6pp_ENABLE;
else
mask = INTEL_RC6_ENABLE;
if ((enable_rc6 & mask) != enable_rc6)
- DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
- enable_rc6 & mask, enable_rc6, mask);
+ DRM_DEBUG_DRIVER("Adjusting RC6 mask to %d "
+ "(requested %d, valid %d)\n",
+ enable_rc6 & mask, enable_rc6, mask);
return enable_rc6 & mask;
}
- if (IS_IVYBRIDGE(dev))
+ if (IS_IVYBRIDGE(dev_priv))
return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
return INTEL_RC6_ENABLE;
}
-int intel_enable_rc6(const struct drm_device *dev)
+static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
{
- return i915.enable_rc6;
-}
-
-static void gen6_init_rps_frequencies(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t rp_state_cap;
u32 ddcc_status = 0;
int ret;
@@ -4885,7 +5300,7 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
/* All of these values are in units of 50MHz */
dev_priv->rps.cur_freq = 0;
/* static values from HW: RP0 > RP1 > RPn (min_freq) */
- if (IS_BROXTON(dev)) {
+ if (IS_BROXTON(dev_priv)) {
rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
@@ -4901,8 +5316,8 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev) ||
- IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
+ IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
ret = sandybridge_pcode_read(dev_priv,
HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
&ddcc_status);
@@ -4914,7 +5329,7 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
dev_priv->rps.max_freq);
}
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
/* Store the frequency values in 16.66 MHZ units, which is
the natural hardware unit for SKL */
dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
@@ -4931,7 +5346,7 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
if (dev_priv->rps.min_freq_softlimit == 0) {
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
dev_priv->rps.min_freq_softlimit =
max_t(int, dev_priv->rps.efficient_freq,
intel_freq_opcode(dev_priv, 450));
@@ -4942,16 +5357,14 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
}
/* See the Gen9_GT_PM_Programming_Guide doc for the below */
-static void gen9_enable_rps(struct drm_device *dev)
+static void gen9_enable_rps(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
- gen6_init_rps_frequencies(dev);
+ gen6_init_rps_frequencies(dev_priv);
/* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
- if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
/*
* BIOS could leave the Hw Turbo enabled, so need to explicitly
* clear out the Control register just to avoid inconsitency
@@ -4961,7 +5374,7 @@ static void gen9_enable_rps(struct drm_device *dev)
* if the Turbo is left enabled in the Control register, as the
* Up/Down interrupts would remain masked.
*/
- gen9_disable_rps(dev);
+ gen9_disable_rps(dev_priv);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
return;
}
@@ -4980,14 +5393,13 @@ static void gen9_enable_rps(struct drm_device *dev)
* Up/Down EI & threshold registers, as well as the RP_CONTROL,
* RP_INTERRUPT_LIMITS & RPNSWREQ registers */
dev_priv->rps.power = HIGH_POWER; /* force a reset */
- gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
+ gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
-static void gen9_enable_rc6(struct drm_device *dev)
+static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
uint32_t rc6_mask = 0;
@@ -5004,7 +5416,7 @@ static void gen9_enable_rc6(struct drm_device *dev)
/* 2b: Program RC6 thresholds.*/
/* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
- if (IS_SKYLAKE(dev))
+ if (IS_SKYLAKE(dev_priv))
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
else
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
@@ -5013,7 +5425,7 @@ static void gen9_enable_rc6(struct drm_device *dev)
for_each_engine(engine, dev_priv)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
- if (HAS_GUC_UCODE(dev))
+ if (HAS_GUC(dev_priv))
I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
I915_WRITE(GEN6_RC_SLEEP, 0);
@@ -5023,12 +5435,12 @@ static void gen9_enable_rc6(struct drm_device *dev)
I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
/* 3a: Enable RC6 */
- if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
+ if (intel_enable_rc6() & INTEL_RC6_ENABLE)
rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
/* WaRsUseTimeoutMode */
- if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
- IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+ if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0) ||
+ IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */
I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
GEN7_RC_CTL_TO_MODE |
@@ -5044,19 +5456,17 @@ static void gen9_enable_rc6(struct drm_device *dev)
* 3b: Enable Coarse Power Gating only when RC6 is enabled.
* WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
*/
- if (NEEDS_WaRsDisableCoarsePowerGating(dev))
+ if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
I915_WRITE(GEN9_PG_ENABLE, 0);
else
I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
(GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
-
}
-static void gen8_enable_rps(struct drm_device *dev)
+static void gen8_enable_rps(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
uint32_t rc6_mask = 0;
@@ -5071,7 +5481,7 @@ static void gen8_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_CONTROL, 0);
/* Initialize rps frequencies */
- gen6_init_rps_frequencies(dev);
+ gen6_init_rps_frequencies(dev_priv);
/* 2b: Program RC6 thresholds.*/
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
@@ -5080,16 +5490,16 @@ static void gen8_enable_rps(struct drm_device *dev)
for_each_engine(engine, dev_priv)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
- if (IS_BROADWELL(dev))
+ if (IS_BROADWELL(dev_priv))
I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
else
I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
/* 3: Enable RC6 */
- if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
+ if (intel_enable_rc6() & INTEL_RC6_ENABLE)
rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
- intel_print_rc6_info(dev, rc6_mask);
- if (IS_BROADWELL(dev))
+ intel_print_rc6_info(dev_priv, rc6_mask);
+ if (IS_BROADWELL(dev_priv))
I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
GEN7_RC_CTL_TO_MODE |
rc6_mask);
@@ -5130,14 +5540,13 @@ static void gen8_enable_rps(struct drm_device *dev)
/* 6: Ring frequency + overclocking (our driver does this later */
dev_priv->rps.power = HIGH_POWER; /* force a reset */
- gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
+ gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
-static void gen6_enable_rps(struct drm_device *dev)
+static void gen6_enable_rps(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
u32 gtfifodbg;
@@ -5164,7 +5573,7 @@ static void gen6_enable_rps(struct drm_device *dev)
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
/* Initialize rps frequencies */
- gen6_init_rps_frequencies(dev);
+ gen6_init_rps_frequencies(dev_priv);
/* disable the counters and set deterministic thresholds */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -5180,7 +5589,7 @@ static void gen6_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_SLEEP, 0);
I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
- if (IS_IVYBRIDGE(dev))
+ if (IS_IVYBRIDGE(dev_priv))
I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
else
I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
@@ -5188,12 +5597,12 @@ static void gen6_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
/* Check if we are enabling RC6 */
- rc6_mode = intel_enable_rc6(dev_priv->dev);
+ rc6_mode = intel_enable_rc6();
if (rc6_mode & INTEL_RC6_ENABLE)
rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
/* We don't use those on Haswell */
- if (!IS_HASWELL(dev)) {
+ if (!IS_HASWELL(dev_priv)) {
if (rc6_mode & INTEL_RC6p_ENABLE)
rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
@@ -5201,7 +5610,7 @@ static void gen6_enable_rps(struct drm_device *dev)
rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
}
- intel_print_rc6_info(dev, rc6_mask);
+ intel_print_rc6_info(dev_priv, rc6_mask);
I915_WRITE(GEN6_RC_CONTROL,
rc6_mask |
@@ -5225,13 +5634,13 @@ static void gen6_enable_rps(struct drm_device *dev)
}
dev_priv->rps.power = HIGH_POWER; /* force a reset */
- gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
+ gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
rc6vids = 0;
ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
- if (IS_GEN6(dev) && ret) {
+ if (IS_GEN6(dev_priv) && ret) {
DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
- } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
+ } else if (IS_GEN6(dev_priv) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
rc6vids &= 0xffff00;
@@ -5244,9 +5653,8 @@ static void gen6_enable_rps(struct drm_device *dev)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
-static void __gen6_update_ring_freq(struct drm_device *dev)
+static void __gen6_update_ring_freq(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
int min_freq = 15;
unsigned int gpu_freq;
unsigned int max_ia_freq, min_ring_freq;
@@ -5275,7 +5683,7 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
/* convert DDR frequency from units of 266.6MHz to bandwidth */
min_ring_freq = mult_frac(min_ring_freq, 8, 3);
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
/* Convert GT frequency to 50 HZ units */
min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
@@ -5293,16 +5701,16 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
int diff = max_gpu_freq - gpu_freq;
unsigned int ia_freq = 0, ring_freq = 0;
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
/*
* ring_freq = 2 * GT. ring_freq is in 100MHz units
* No floor required for ring frequency on SKL.
*/
ring_freq = gpu_freq;
- } else if (INTEL_INFO(dev)->gen >= 8) {
+ } else if (INTEL_INFO(dev_priv)->gen >= 8) {
/* max(2 * GT, DDR). NB: GT is 50MHz units */
ring_freq = max(min_ring_freq, gpu_freq);
- } else if (IS_HASWELL(dev)) {
+ } else if (IS_HASWELL(dev_priv)) {
ring_freq = mult_frac(gpu_freq, 5, 4);
ring_freq = max(min_ring_freq, ring_freq);
/* leave ia_freq as the default, chosen by cpufreq */
@@ -5329,26 +5737,23 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
}
}
-void gen6_update_ring_freq(struct drm_device *dev)
+void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!HAS_CORE_RING_FREQ(dev))
+ if (!HAS_CORE_RING_FREQ(dev_priv))
return;
mutex_lock(&dev_priv->rps.hw_lock);
- __gen6_update_ring_freq(dev);
+ __gen6_update_ring_freq(dev_priv);
mutex_unlock(&dev_priv->rps.hw_lock);
}
static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
u32 val, rp0;
val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
- switch (INTEL_INFO(dev)->eu_total) {
+ switch (INTEL_INFO(dev_priv)->eu_total) {
case 8:
/* (2 * 4) config */
rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
@@ -5459,9 +5864,8 @@ static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
}
-static void cherryview_setup_pctx(struct drm_device *dev)
+static void cherryview_setup_pctx(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned long pctx_paddr, paddr;
u32 pcbr;
@@ -5480,15 +5884,14 @@ static void cherryview_setup_pctx(struct drm_device *dev)
DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
}
-static void valleyview_setup_pctx(struct drm_device *dev)
+static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *pctx;
unsigned long pctx_paddr;
u32 pcbr;
int pctx_size = 24*1024;
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&dev_priv->drm.struct_mutex);
pcbr = I915_READ(VLV_PCBR);
if (pcbr) {
@@ -5496,7 +5899,7 @@ static void valleyview_setup_pctx(struct drm_device *dev)
int pcbr_offset;
pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
- pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
+ pctx = i915_gem_object_create_stolen_for_preallocated(&dev_priv->drm,
pcbr_offset,
I915_GTT_OFFSET_NONE,
pctx_size);
@@ -5513,7 +5916,7 @@ static void valleyview_setup_pctx(struct drm_device *dev)
* overlap with other ranges, such as the frame buffer, protected
* memory, or any other relevant ranges.
*/
- pctx = i915_gem_object_create_stolen(dev, pctx_size);
+ pctx = i915_gem_object_create_stolen(&dev_priv->drm, pctx_size);
if (!pctx) {
DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
goto out;
@@ -5525,13 +5928,11 @@ static void valleyview_setup_pctx(struct drm_device *dev)
out:
DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
dev_priv->vlv_pctx = pctx;
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
}
-static void valleyview_cleanup_pctx(struct drm_device *dev)
+static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
if (WARN_ON(!dev_priv->vlv_pctx))
return;
@@ -5550,12 +5951,11 @@ static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
dev_priv->rps.gpll_ref_freq);
}
-static void valleyview_init_gt_powersave(struct drm_device *dev)
+static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 val;
- valleyview_setup_pctx(dev);
+ valleyview_setup_pctx(dev_priv);
vlv_init_gpll_ref_freq(dev_priv);
@@ -5609,12 +6009,11 @@ static void valleyview_init_gt_powersave(struct drm_device *dev)
mutex_unlock(&dev_priv->rps.hw_lock);
}
-static void cherryview_init_gt_powersave(struct drm_device *dev)
+static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 val;
- cherryview_setup_pctx(dev);
+ cherryview_setup_pctx(dev_priv);
vlv_init_gpll_ref_freq(dev_priv);
@@ -5674,14 +6073,13 @@ static void cherryview_init_gt_powersave(struct drm_device *dev)
mutex_unlock(&dev_priv->rps.hw_lock);
}
-static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
+static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
{
- valleyview_cleanup_pctx(dev);
+ valleyview_cleanup_pctx(dev_priv);
}
-static void cherryview_enable_rps(struct drm_device *dev)
+static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
u32 gtfifodbg, val, rc6_mode = 0, pcbr;
@@ -5726,8 +6124,8 @@ static void cherryview_enable_rps(struct drm_device *dev)
pcbr = I915_READ(VLV_PCBR);
/* 3: Enable RC6 */
- if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
- (pcbr >> VLV_PCBR_ADDR_SHIFT))
+ if ((intel_enable_rc6() & INTEL_RC6_ENABLE) &&
+ (pcbr >> VLV_PCBR_ADDR_SHIFT))
rc6_mode = GEN7_RC_CTL_TO_MODE;
I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
@@ -5772,14 +6170,13 @@ static void cherryview_enable_rps(struct drm_device *dev)
intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
dev_priv->rps.idle_freq);
- valleyview_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
+ valleyview_set_rps(dev_priv, dev_priv->rps.idle_freq);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
-static void valleyview_enable_rps(struct drm_device *dev)
+static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
u32 gtfifodbg, val, rc6_mode = 0;
@@ -5832,10 +6229,10 @@ static void valleyview_enable_rps(struct drm_device *dev)
VLV_MEDIA_RC6_COUNT_EN |
VLV_RENDER_RC6_COUNT_EN));
- if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
+ if (intel_enable_rc6() & INTEL_RC6_ENABLE)
rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
- intel_print_rc6_info(dev, rc6_mode);
+ intel_print_rc6_info(dev_priv, rc6_mode);
I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
@@ -5862,7 +6259,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
dev_priv->rps.idle_freq);
- valleyview_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
+ valleyview_set_rps(dev_priv, dev_priv->rps.idle_freq);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -5952,10 +6349,9 @@ static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
unsigned long val;
- if (INTEL_INFO(dev)->gen != 5)
+ if (INTEL_INFO(dev_priv)->gen != 5)
return 0;
spin_lock_irq(&mchdev_lock);
@@ -5995,11 +6391,10 @@ static int _pxvid_to_vd(u8 pxvid)
static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
{
- struct drm_device *dev = dev_priv->dev;
const int vd = _pxvid_to_vd(pxvid);
const int vm = vd - 1125;
- if (INTEL_INFO(dev)->is_mobile)
+ if (INTEL_INFO(dev_priv)->is_mobile)
return vm > 0 ? vm : 0;
return vd;
@@ -6040,9 +6435,7 @@ static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
void i915_update_gfx_val(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
-
- if (INTEL_INFO(dev)->gen != 5)
+ if (INTEL_INFO(dev_priv)->gen != 5)
return;
spin_lock_irq(&mchdev_lock);
@@ -6091,10 +6484,9 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
unsigned long val;
- if (INTEL_INFO(dev)->gen != 5)
+ if (INTEL_INFO(dev_priv)->gen != 5)
return 0;
spin_lock_irq(&mchdev_lock);
@@ -6235,7 +6627,7 @@ bool i915_gpu_turbo_disable(void)
dev_priv->ips.max_delay = dev_priv->ips.fstart;
- if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
+ if (!ironlake_set_drps(dev_priv, dev_priv->ips.fstart))
ret = false;
out_unlock:
@@ -6283,9 +6675,8 @@ void intel_gpu_ips_teardown(void)
spin_unlock_irq(&mchdev_lock);
}
-static void intel_init_emon(struct drm_device *dev)
+static void intel_init_emon(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 lcfuse;
u8 pxw[16];
int i;
@@ -6354,10 +6745,8 @@ static void intel_init_emon(struct drm_device *dev)
dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
}
-void intel_init_gt_powersave(struct drm_device *dev)
+void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
/*
* RPM depends on RC6 to save restore the GT HW context, so make RC6 a
* requirement.
@@ -6367,74 +6756,64 @@ void intel_init_gt_powersave(struct drm_device *dev)
intel_runtime_pm_get(dev_priv);
}
- if (IS_CHERRYVIEW(dev))
- cherryview_init_gt_powersave(dev);
- else if (IS_VALLEYVIEW(dev))
- valleyview_init_gt_powersave(dev);
+ if (IS_CHERRYVIEW(dev_priv))
+ cherryview_init_gt_powersave(dev_priv);
+ else if (IS_VALLEYVIEW(dev_priv))
+ valleyview_init_gt_powersave(dev_priv);
}
-void intel_cleanup_gt_powersave(struct drm_device *dev)
+void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (IS_CHERRYVIEW(dev))
- return;
- else if (IS_VALLEYVIEW(dev))
- valleyview_cleanup_gt_powersave(dev);
+ if (IS_VALLEYVIEW(dev_priv))
+ valleyview_cleanup_gt_powersave(dev_priv);
if (!i915.enable_rc6)
intel_runtime_pm_put(dev_priv);
}
-static void gen6_suspend_rps(struct drm_device *dev)
+static void gen6_suspend_rps(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
- gen6_disable_rps_interrupts(dev);
+ gen6_disable_rps_interrupts(dev_priv);
}
/**
* intel_suspend_gt_powersave - suspend PM work and helper threads
- * @dev: drm device
+ * @dev_priv: i915 device
*
* We don't want to disable RC6 or other features here, we just want
* to make sure any work we've queued has finished and won't bother
* us while we're suspended.
*/
-void intel_suspend_gt_powersave(struct drm_device *dev)
+void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (INTEL_INFO(dev)->gen < 6)
+ if (INTEL_GEN(dev_priv) < 6)
return;
- gen6_suspend_rps(dev);
+ gen6_suspend_rps(dev_priv);
/* Force GPU to min freq during suspend */
gen6_rps_idle(dev_priv);
}
-void intel_disable_gt_powersave(struct drm_device *dev)
+void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (IS_IRONLAKE_M(dev)) {
- ironlake_disable_drps(dev);
- } else if (INTEL_INFO(dev)->gen >= 6) {
- intel_suspend_gt_powersave(dev);
+ if (IS_IRONLAKE_M(dev_priv)) {
+ ironlake_disable_drps(dev_priv);
+ } else if (INTEL_INFO(dev_priv)->gen >= 6) {
+ intel_suspend_gt_powersave(dev_priv);
mutex_lock(&dev_priv->rps.hw_lock);
- if (INTEL_INFO(dev)->gen >= 9) {
- gen9_disable_rc6(dev);
- gen9_disable_rps(dev);
- } else if (IS_CHERRYVIEW(dev))
- cherryview_disable_rps(dev);
- else if (IS_VALLEYVIEW(dev))
- valleyview_disable_rps(dev);
+ if (INTEL_INFO(dev_priv)->gen >= 9) {
+ gen9_disable_rc6(dev_priv);
+ gen9_disable_rps(dev_priv);
+ } else if (IS_CHERRYVIEW(dev_priv))
+ cherryview_disable_rps(dev_priv);
+ else if (IS_VALLEYVIEW(dev_priv))
+ valleyview_disable_rps(dev_priv);
else
- gen6_disable_rps(dev);
+ gen6_disable_rps(dev_priv);
dev_priv->rps.enabled = false;
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -6446,27 +6825,26 @@ static void intel_gen6_powersave_work(struct work_struct *work)
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private,
rps.delayed_resume_work.work);
- struct drm_device *dev = dev_priv->dev;
mutex_lock(&dev_priv->rps.hw_lock);
- gen6_reset_rps_interrupts(dev);
+ gen6_reset_rps_interrupts(dev_priv);
- if (IS_CHERRYVIEW(dev)) {
- cherryview_enable_rps(dev);
- } else if (IS_VALLEYVIEW(dev)) {
- valleyview_enable_rps(dev);
- } else if (INTEL_INFO(dev)->gen >= 9) {
- gen9_enable_rc6(dev);
- gen9_enable_rps(dev);
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
- __gen6_update_ring_freq(dev);
- } else if (IS_BROADWELL(dev)) {
- gen8_enable_rps(dev);
- __gen6_update_ring_freq(dev);
+ if (IS_CHERRYVIEW(dev_priv)) {
+ cherryview_enable_rps(dev_priv);
+ } else if (IS_VALLEYVIEW(dev_priv)) {
+ valleyview_enable_rps(dev_priv);
+ } else if (INTEL_INFO(dev_priv)->gen >= 9) {
+ gen9_enable_rc6(dev_priv);
+ gen9_enable_rps(dev_priv);
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ __gen6_update_ring_freq(dev_priv);
+ } else if (IS_BROADWELL(dev_priv)) {
+ gen8_enable_rps(dev_priv);
+ __gen6_update_ring_freq(dev_priv);
} else {
- gen6_enable_rps(dev);
- __gen6_update_ring_freq(dev);
+ gen6_enable_rps(dev_priv);
+ __gen6_update_ring_freq(dev_priv);
}
WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
@@ -6477,27 +6855,25 @@ static void intel_gen6_powersave_work(struct work_struct *work)
dev_priv->rps.enabled = true;
- gen6_enable_rps_interrupts(dev);
+ gen6_enable_rps_interrupts(dev_priv);
mutex_unlock(&dev_priv->rps.hw_lock);
intel_runtime_pm_put(dev_priv);
}
-void intel_enable_gt_powersave(struct drm_device *dev)
+void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
/* Powersaving is controlled by the host when inside a VM */
- if (intel_vgpu_active(dev))
+ if (intel_vgpu_active(dev_priv))
return;
- if (IS_IRONLAKE_M(dev)) {
- ironlake_enable_drps(dev);
- mutex_lock(&dev->struct_mutex);
- intel_init_emon(dev);
- mutex_unlock(&dev->struct_mutex);
- } else if (INTEL_INFO(dev)->gen >= 6) {
+ if (IS_IRONLAKE_M(dev_priv)) {
+ ironlake_enable_drps(dev_priv);
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ intel_init_emon(dev_priv);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ } else if (INTEL_INFO(dev_priv)->gen >= 6) {
/*
* PCU communication is slow and this doesn't need to be
* done at any specific time, so do this out of our fast path
@@ -6516,20 +6892,18 @@ void intel_enable_gt_powersave(struct drm_device *dev)
}
}
-void intel_reset_gt_powersave(struct drm_device *dev)
+void intel_reset_gt_powersave(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (INTEL_INFO(dev)->gen < 6)
+ if (INTEL_INFO(dev_priv)->gen < 6)
return;
- gen6_suspend_rps(dev);
+ gen6_suspend_rps(dev_priv);
dev_priv->rps.enabled = false;
}
static void ibx_init_clock_gating(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
/*
* On Ibex Peak and Cougar Point, we need to disable clock
@@ -6541,7 +6915,7 @@ static void ibx_init_clock_gating(struct drm_device *dev)
static void g4x_disable_trickle_feed(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe;
for_each_pipe(dev_priv, pipe) {
@@ -6556,7 +6930,7 @@ static void g4x_disable_trickle_feed(struct drm_device *dev)
static void ilk_init_lp_watermarks(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
@@ -6570,7 +6944,7 @@ static void ilk_init_lp_watermarks(struct drm_device *dev)
static void ironlake_init_clock_gating(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
/*
@@ -6644,7 +7018,7 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
static void cpt_init_clock_gating(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int pipe;
uint32_t val;
@@ -6681,7 +7055,7 @@ static void cpt_init_clock_gating(struct drm_device *dev)
static void gen6_check_mch_setup(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t tmp;
tmp = I915_READ(MCH_SSKPD);
@@ -6692,7 +7066,7 @@ static void gen6_check_mch_setup(struct drm_device *dev)
static void gen6_init_clock_gating(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
@@ -6807,7 +7181,7 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
static void lpt_init_clock_gating(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
/*
* TODO: this bit should only be enabled when really needed, then
@@ -6826,7 +7200,7 @@ static void lpt_init_clock_gating(struct drm_device *dev)
static void lpt_suspend_hw(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (HAS_PCH_LPT_LP(dev)) {
uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
@@ -6836,6 +7210,29 @@ static void lpt_suspend_hw(struct drm_device *dev)
}
}
+static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
+ int general_prio_credits,
+ int high_prio_credits)
+{
+ u32 misccpctl;
+
+ /* WaTempDisableDOPClkGating:bdw */
+ misccpctl = I915_READ(GEN7_MISCCPCTL);
+ I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
+
+ I915_WRITE(GEN8_L3SQCREG1,
+ L3_GENERAL_PRIO_CREDITS(general_prio_credits) |
+ L3_HIGH_PRIO_CREDITS(high_prio_credits));
+
+ /*
+ * Wait at least 100 clocks before re-enabling clock gating.
+ * See the definition of L3SQCREG1 in BSpec.
+ */
+ POSTING_READ(GEN8_L3SQCREG1);
+ udelay(1);
+ I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+}
+
static void kabylake_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6863,6 +7260,10 @@ static void skylake_init_clock_gating(struct drm_device *dev)
gen9_init_clock_gating(dev);
+ /* WAC6entrylatency:skl */
+ I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
+ FBC_LLC_FULLY_OPEN);
+
/* WaFbcNukeOnHostModify:skl */
I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
@@ -6870,9 +7271,8 @@ static void skylake_init_clock_gating(struct drm_device *dev)
static void broadwell_init_clock_gating(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe;
- uint32_t misccpctl;
ilk_init_lp_watermarks(dev);
@@ -6903,20 +7303,8 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
- /*
- * WaProgramL3SqcReg1Default:bdw
- * WaTempDisableDOPClkGating:bdw
- */
- misccpctl = I915_READ(GEN7_MISCCPCTL);
- I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
- I915_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT);
- /*
- * Wait at least 100 clocks before re-enabling clock gating. See
- * the definition of L3SQCREG1 in BSpec.
- */
- POSTING_READ(GEN8_L3SQCREG1);
- udelay(1);
- I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+ /* WaProgramL3SqcReg1Default:bdw */
+ gen8_set_l3sqc_credits(dev_priv, 30, 2);
/*
* WaGttCachingOffByDefault:bdw
@@ -6925,12 +7313,16 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
*/
I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
+ /* WaKVMNotificationOnConfigChange:bdw */
+ I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1)
+ | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
+
lpt_init_clock_gating(dev);
}
static void haswell_init_clock_gating(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
ilk_init_lp_watermarks(dev);
@@ -6986,7 +7378,7 @@ static void haswell_init_clock_gating(struct drm_device *dev)
static void ivybridge_init_clock_gating(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t snpcr;
ilk_init_lp_watermarks(dev);
@@ -7084,7 +7476,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
static void valleyview_init_clock_gating(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
/* WaDisableEarlyCull:vlv */
I915_WRITE(_3D_CHICKEN3,
@@ -7166,7 +7558,7 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
static void cherryview_init_clock_gating(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
/* WaVSRefCountFullforceMissDisable:chv */
/* WaDSRefCountFullforceMissDisable:chv */
@@ -7187,6 +7579,13 @@ static void cherryview_init_clock_gating(struct drm_device *dev)
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
/*
+ * WaProgramL3SqcReg1Default:chv
+ * See gfxspecs/Related Documents/Performance Guide/
+ * LSQC Setting Recommendations.
+ */
+ gen8_set_l3sqc_credits(dev_priv, 38, 2);
+
+ /*
* GTT cache may not work with big pages, so if those
* are ever enabled GTT cache may need to be disabled.
*/
@@ -7195,7 +7594,7 @@ static void cherryview_init_clock_gating(struct drm_device *dev)
static void g4x_init_clock_gating(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t dspclk_gate;
I915_WRITE(RENCLK_GATE_D1, 0);
@@ -7222,7 +7621,7 @@ static void g4x_init_clock_gating(struct drm_device *dev)
static void crestline_init_clock_gating(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
I915_WRITE(RENCLK_GATE_D2, 0);
@@ -7238,7 +7637,7 @@ static void crestline_init_clock_gating(struct drm_device *dev)
static void broadwater_init_clock_gating(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
I965_RCC_CLOCK_GATE_DISABLE |
@@ -7255,7 +7654,7 @@ static void broadwater_init_clock_gating(struct drm_device *dev)
static void gen3_init_clock_gating(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 dstate = I915_READ(D_STATE);
dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
@@ -7280,7 +7679,7 @@ static void gen3_init_clock_gating(struct drm_device *dev)
static void i85x_init_clock_gating(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
@@ -7294,7 +7693,7 @@ static void i85x_init_clock_gating(struct drm_device *dev)
static void i830_init_clock_gating(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
@@ -7305,7 +7704,7 @@ static void i830_init_clock_gating(struct drm_device *dev)
void intel_init_clock_gating(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
dev_priv->display.init_clock_gating(dev);
}
@@ -7373,7 +7772,7 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
/* Set up chip specific power management-related functions */
void intel_init_pm(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
intel_fbc_init(dev_priv);
@@ -7387,6 +7786,7 @@ void intel_init_pm(struct drm_device *dev)
if (INTEL_INFO(dev)->gen >= 9) {
skl_setup_wm_latency(dev);
dev_priv->display.update_wm = skl_update_wm;
+ dev_priv->display.compute_global_watermarks = skl_compute_wm;
} else if (HAS_PCH_SPLIT(dev)) {
ilk_setup_wm_latency(dev);
@@ -7446,50 +7846,133 @@ void intel_init_pm(struct drm_device *dev)
}
}
+static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
+{
+ uint32_t flags =
+ I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
+
+ switch (flags) {
+ case GEN6_PCODE_SUCCESS:
+ return 0;
+ case GEN6_PCODE_UNIMPLEMENTED_CMD:
+ case GEN6_PCODE_ILLEGAL_CMD:
+ return -ENXIO;
+ case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
+ case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
+ return -EOVERFLOW;
+ case GEN6_PCODE_TIMEOUT:
+ return -ETIMEDOUT;
+ default:
+ MISSING_CASE(flags)
+ return 0;
+ }
+}
+
+static inline int gen7_check_mailbox_status(struct drm_i915_private *dev_priv)
+{
+ uint32_t flags =
+ I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
+
+ switch (flags) {
+ case GEN6_PCODE_SUCCESS:
+ return 0;
+ case GEN6_PCODE_ILLEGAL_CMD:
+ return -ENXIO;
+ case GEN7_PCODE_TIMEOUT:
+ return -ETIMEDOUT;
+ case GEN7_PCODE_ILLEGAL_DATA:
+ return -EINVAL;
+ case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
+ return -EOVERFLOW;
+ default:
+ MISSING_CASE(flags);
+ return 0;
+ }
+}
+
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
{
+ int status;
+
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
- if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
+ /* GEN6_PCODE_* are outside of the forcewake domain, we can
+ * use te fw I915_READ variants to reduce the amount of work
+ * required when reading/writing.
+ */
+
+ if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
return -EAGAIN;
}
- I915_WRITE(GEN6_PCODE_DATA, *val);
- I915_WRITE(GEN6_PCODE_DATA1, 0);
- I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
+ I915_WRITE_FW(GEN6_PCODE_DATA, *val);
+ I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
+ I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
- 500)) {
+ if (intel_wait_for_register_fw(dev_priv,
+ GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
+ 500)) {
DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
return -ETIMEDOUT;
}
- *val = I915_READ(GEN6_PCODE_DATA);
- I915_WRITE(GEN6_PCODE_DATA, 0);
+ *val = I915_READ_FW(GEN6_PCODE_DATA);
+ I915_WRITE_FW(GEN6_PCODE_DATA, 0);
+
+ if (INTEL_GEN(dev_priv) > 6)
+ status = gen7_check_mailbox_status(dev_priv);
+ else
+ status = gen6_check_mailbox_status(dev_priv);
+
+ if (status) {
+ DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed: %d\n",
+ status);
+ return status;
+ }
return 0;
}
-int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val)
+int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
+ u32 mbox, u32 val)
{
+ int status;
+
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
- if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
+ /* GEN6_PCODE_* are outside of the forcewake domain, we can
+ * use te fw I915_READ variants to reduce the amount of work
+ * required when reading/writing.
+ */
+
+ if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
return -EAGAIN;
}
- I915_WRITE(GEN6_PCODE_DATA, val);
- I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
+ I915_WRITE_FW(GEN6_PCODE_DATA, val);
+ I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
- if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
- 500)) {
+ if (intel_wait_for_register_fw(dev_priv,
+ GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
+ 500)) {
DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
return -ETIMEDOUT;
}
- I915_WRITE(GEN6_PCODE_DATA, 0);
+ I915_WRITE_FW(GEN6_PCODE_DATA, 0);
+
+ if (INTEL_GEN(dev_priv) > 6)
+ status = gen7_check_mailbox_status(dev_priv);
+ else
+ status = gen6_check_mailbox_status(dev_priv);
+
+ if (status) {
+ DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed: %d\n",
+ status);
+ return status;
+ }
return 0;
}
@@ -7559,23 +8042,21 @@ static void __intel_rps_boost_work(struct work_struct *work)
struct request_boost *boost = container_of(work, struct request_boost, work);
struct drm_i915_gem_request *req = boost->req;
- if (!i915_gem_request_completed(req, true))
- gen6_rps_boost(to_i915(req->engine->dev), NULL,
- req->emitted_jiffies);
+ if (!i915_gem_request_completed(req))
+ gen6_rps_boost(req->i915, NULL, req->emitted_jiffies);
- i915_gem_request_unreference__unlocked(req);
+ i915_gem_request_unreference(req);
kfree(boost);
}
-void intel_queue_rps_boost_for_request(struct drm_device *dev,
- struct drm_i915_gem_request *req)
+void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req)
{
struct request_boost *boost;
- if (req == NULL || INTEL_INFO(dev)->gen < 6)
+ if (req == NULL || INTEL_GEN(req->i915) < 6)
return;
- if (i915_gem_request_completed(req, true))
+ if (i915_gem_request_completed(req))
return;
boost = kmalloc(sizeof(*boost), GFP_ATOMIC);
@@ -7586,12 +8067,12 @@ void intel_queue_rps_boost_for_request(struct drm_device *dev,
boost->req = req;
INIT_WORK(&boost->work, __intel_rps_boost_work);
- queue_work(to_i915(dev)->wq, &boost->work);
+ queue_work(req->i915->wq, &boost->work);
}
void intel_pm_setup(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
mutex_init(&dev_priv->rps.hw_lock);
spin_lock_init(&dev_priv->rps.client_lock);
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index a788d1e95..cf171b4b8 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -63,7 +63,7 @@ static bool is_edp_psr(struct intel_dp *intel_dp)
static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t val;
val = I915_READ(VLV_PSRSTAT(pipe)) &
@@ -77,7 +77,7 @@ static void intel_psr_write_vsc(struct intel_dp *intel_dp,
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
@@ -107,7 +107,7 @@ static void vlv_psr_setup_vsc(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
enum pipe pipe = to_intel_crtc(crtc)->pipe;
uint32_t val;
@@ -173,10 +173,9 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t aux_clock_divider;
i915_reg_t aux_ctl_reg;
- int precharge = 0x3;
static const uint8_t aux_msg[] = {
[0] = DP_AUX_NATIVE_WRITE << 4,
[1] = DP_SET_POWER >> 8,
@@ -185,6 +184,7 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
[4] = DP_SET_POWER_D0,
};
enum port port = dig_port->port;
+ u32 aux_ctl;
int i;
BUILD_BUG_ON(sizeof(aux_msg) > 20);
@@ -197,6 +197,13 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
DP_AUX_FRAME_SYNC_ENABLE);
+ if (dev_priv->psr.link_standby)
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
+ DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
+ else
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
+ DP_PSR_ENABLE);
+
aux_ctl_reg = psr_aux_ctl_reg(dev_priv, port);
/* Setup AUX registers */
@@ -204,40 +211,16 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
I915_WRITE(psr_aux_data_reg(dev_priv, port, i >> 2),
intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
- if (INTEL_INFO(dev)->gen >= 9) {
- uint32_t val;
-
- val = I915_READ(aux_ctl_reg);
- val &= ~DP_AUX_CH_CTL_TIME_OUT_MASK;
- val |= DP_AUX_CH_CTL_TIME_OUT_1600us;
- val &= ~DP_AUX_CH_CTL_MESSAGE_SIZE_MASK;
- val |= (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
- /* Use hardcoded data values for PSR, frame sync and GTC */
- val &= ~DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL;
- val &= ~DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL;
- val &= ~DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL;
- I915_WRITE(aux_ctl_reg, val);
- } else {
- I915_WRITE(aux_ctl_reg,
- DP_AUX_CH_CTL_TIME_OUT_400us |
- (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
- (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
- (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
- }
-
- if (dev_priv->psr.link_standby)
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
- DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
- else
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
- DP_PSR_ENABLE);
+ aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, 0, sizeof(aux_msg),
+ aux_clock_divider);
+ I915_WRITE(aux_ctl_reg, aux_ctl);
}
static void vlv_psr_enable_source(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *crtc = dig_port->base.base.crtc;
enum pipe pipe = to_intel_crtc(crtc)->pipe;
@@ -252,7 +235,7 @@ static void vlv_psr_activate(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *crtc = dig_port->base.base.crtc;
enum pipe pipe = to_intel_crtc(crtc)->pipe;
@@ -269,7 +252,7 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t max_sleep_time = 0x1f;
/*
@@ -341,9 +324,12 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *crtc = dig_port->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ const struct drm_display_mode *adjusted_mode =
+ &intel_crtc->config->base.adjusted_mode;
+ int psr_setup_time;
lockdep_assert_held(&dev_priv->psr.lock);
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
@@ -382,11 +368,25 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
}
if (IS_HASWELL(dev) &&
- intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
+ adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
return false;
}
+ psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
+ if (psr_setup_time < 0) {
+ DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n",
+ intel_dp->psr_dpcd[1]);
+ return false;
+ }
+
+ if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
+ adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
+ DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n",
+ psr_setup_time);
+ return false;
+ }
+
dev_priv->psr.source_ok = true;
return true;
}
@@ -395,7 +395,7 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
WARN_ON(dev_priv->psr.active);
@@ -424,7 +424,7 @@ void intel_psr_enable(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
if (!HAS_PSR(dev)) {
@@ -511,15 +511,18 @@ static void vlv_psr_disable(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc =
to_intel_crtc(intel_dig_port->base.base.crtc);
uint32_t val;
if (dev_priv->psr.active) {
/* Put VLV PSR back to PSR_state 0 that is PSR Disabled. */
- if (wait_for((I915_READ(VLV_PSRSTAT(intel_crtc->pipe)) &
- VLV_EDP_PSR_IN_TRANS) == 0, 1))
+ if (intel_wait_for_register(dev_priv,
+ VLV_PSRSTAT(intel_crtc->pipe),
+ VLV_EDP_PSR_IN_TRANS,
+ 0,
+ 1))
WARN(1, "PSR transition took longer than expected\n");
val = I915_READ(VLV_PSRCTL(intel_crtc->pipe));
@@ -538,16 +541,18 @@ static void hsw_psr_disable(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (dev_priv->psr.active) {
I915_WRITE(EDP_PSR_CTL,
I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
/* Wait till PSR is idle */
- if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
- EDP_PSR_STATUS_STATE_MASK) == 0,
- 2 * USEC_PER_SEC, 10 * USEC_PER_MSEC))
+ if (intel_wait_for_register(dev_priv,
+ EDP_PSR_STATUS_CTL,
+ EDP_PSR_STATUS_STATE_MASK,
+ 0,
+ 2000))
DRM_ERROR("Timed out waiting for PSR Idle State\n");
dev_priv->psr.active = false;
@@ -566,7 +571,7 @@ void intel_psr_disable(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.enabled) {
@@ -603,14 +608,20 @@ static void intel_psr_work(struct work_struct *work)
* and be ready for re-enable.
*/
if (HAS_DDI(dev_priv)) {
- if (wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
- EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
+ if (intel_wait_for_register(dev_priv,
+ EDP_PSR_STATUS_CTL,
+ EDP_PSR_STATUS_STATE_MASK,
+ 0,
+ 50)) {
DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
return;
}
} else {
- if (wait_for((I915_READ(VLV_PSRSTAT(pipe)) &
- VLV_EDP_PSR_IN_TRANS) == 0, 1)) {
+ if (intel_wait_for_register(dev_priv,
+ VLV_PSRSTAT(pipe),
+ VLV_EDP_PSR_IN_TRANS,
+ 0,
+ 1)) {
DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
return;
}
@@ -636,7 +647,7 @@ unlock:
static void intel_psr_exit(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_dp *intel_dp = dev_priv->psr.enabled;
struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
enum pipe pipe = to_intel_crtc(crtc)->pipe;
@@ -691,7 +702,7 @@ static void intel_psr_exit(struct drm_device *dev)
void intel_psr_single_frame_update(struct drm_device *dev,
unsigned frontbuffer_bits)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *crtc;
enum pipe pipe;
u32 val;
@@ -739,7 +750,7 @@ void intel_psr_single_frame_update(struct drm_device *dev,
void intel_psr_invalidate(struct drm_device *dev,
unsigned frontbuffer_bits)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *crtc;
enum pipe pipe;
@@ -777,7 +788,7 @@ void intel_psr_invalidate(struct drm_device *dev,
void intel_psr_flush(struct drm_device *dev,
unsigned frontbuffer_bits, enum fb_op_origin origin)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *crtc;
enum pipe pipe;
@@ -813,7 +824,7 @@ void intel_psr_flush(struct drm_device *dev,
*/
void intel_psr_init(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 9d778f3ab..1d3161bbe 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -34,6 +34,11 @@
#include "i915_trace.h"
#include "intel_drv.h"
+/* Rough estimate of the typical request size, performing a flush,
+ * set-context and then emitting the batch.
+ */
+#define LEGACY_REQUEST_SIZE 200
+
int __intel_ring_space(int head, int tail, int size)
{
int space = head - tail;
@@ -53,18 +58,10 @@ void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
ringbuf->tail, ringbuf->size);
}
-bool intel_engine_stopped(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
- return dev_priv->gpu_error.stop_rings & intel_engine_flag(engine);
-}
-
static void __intel_ring_advance(struct intel_engine_cs *engine)
{
struct intel_ringbuffer *ringbuf = engine->buffer;
ringbuf->tail &= ringbuf->size - 1;
- if (intel_engine_stopped(engine))
- return;
engine->write_tail(engine, ringbuf->tail);
}
@@ -101,7 +98,6 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
u32 flush_domains)
{
struct intel_engine_cs *engine = req->engine;
- struct drm_device *dev = engine->dev;
u32 cmd;
int ret;
@@ -140,7 +136,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
cmd |= MI_EXE_FLUSH;
if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
- (IS_G4X(dev) || IS_GEN5(dev)))
+ (IS_G4X(req->i915) || IS_GEN5(req->i915)))
cmd |= MI_INVALIDATE_ISP;
ret = intel_ring_begin(req, 2);
@@ -426,19 +422,19 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
static void ring_write_tail(struct intel_engine_cs *engine,
u32 value)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
I915_WRITE_TAIL(engine, value);
}
u64 intel_ring_get_active_head(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
u64 acthd;
- if (INTEL_INFO(engine->dev)->gen >= 8)
+ if (INTEL_GEN(dev_priv) >= 8)
acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
RING_ACTHD_UDW(engine->mmio_base));
- else if (INTEL_INFO(engine->dev)->gen >= 4)
+ else if (INTEL_GEN(dev_priv) >= 4)
acthd = I915_READ(RING_ACTHD(engine->mmio_base));
else
acthd = I915_READ(ACTHD);
@@ -448,25 +444,24 @@ u64 intel_ring_get_active_head(struct intel_engine_cs *engine)
static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
u32 addr;
addr = dev_priv->status_page_dmah->busaddr;
- if (INTEL_INFO(engine->dev)->gen >= 4)
+ if (INTEL_GEN(dev_priv) >= 4)
addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
I915_WRITE(HWS_PGA, addr);
}
static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
i915_reg_t mmio;
/* The ring status page addresses are no longer next to the rest of
* the ring registers as of gen7.
*/
- if (IS_GEN7(dev)) {
+ if (IS_GEN7(dev_priv)) {
switch (engine->id) {
case RCS:
mmio = RENDER_HWS_PGA_GEN7;
@@ -486,7 +481,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
mmio = VEBOX_HWS_PGA_GEN7;
break;
}
- } else if (IS_GEN6(engine->dev)) {
+ } else if (IS_GEN6(dev_priv)) {
mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
} else {
/* XXX: gen8 returns to sanity */
@@ -503,7 +498,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
* arises: do we still need this and if so how should we go about
* invalidating the TLB?
*/
- if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
+ if (IS_GEN(dev_priv, 6, 7)) {
i915_reg_t reg = RING_INSTPM(engine->mmio_base);
/* ring should be idle before issuing a sync flush*/
@@ -512,8 +507,9 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
I915_WRITE(reg,
_MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
INSTPM_SYNC_FLUSH));
- if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
- 1000))
+ if (intel_wait_for_register(dev_priv,
+ reg, INSTPM_SYNC_FLUSH, 0,
+ 1000))
DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
engine->name);
}
@@ -521,11 +517,15 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
static bool stop_ring(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = to_i915(engine->dev);
+ struct drm_i915_private *dev_priv = engine->i915;
- if (!IS_GEN2(engine->dev)) {
+ if (!IS_GEN2(dev_priv)) {
I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
- if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) {
+ if (intel_wait_for_register(dev_priv,
+ RING_MI_MODE(engine->mmio_base),
+ MODE_IDLE,
+ MODE_IDLE,
+ 1000)) {
DRM_ERROR("%s : timed out trying to stop ring\n",
engine->name);
/* Sometimes we observe that the idle flag is not
@@ -541,7 +541,7 @@ static bool stop_ring(struct intel_engine_cs *engine)
I915_WRITE_HEAD(engine, 0);
engine->write_tail(engine, 0);
- if (!IS_GEN2(engine->dev)) {
+ if (!IS_GEN2(dev_priv)) {
(void)I915_READ_CTL(engine);
I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
}
@@ -556,8 +556,7 @@ void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
static int init_ring_common(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
struct intel_ringbuffer *ringbuf = engine->buffer;
struct drm_i915_gem_object *obj = ringbuf->obj;
int ret = 0;
@@ -587,7 +586,7 @@ static int init_ring_common(struct intel_engine_cs *engine)
}
}
- if (I915_NEED_GFX_HWS(dev))
+ if (I915_NEED_GFX_HWS(dev_priv))
intel_ring_setup_status_page(engine);
else
ring_setup_phys_status_page(engine);
@@ -641,59 +640,42 @@ out:
return ret;
}
-void
-intel_fini_pipe_control(struct intel_engine_cs *engine)
+void intel_fini_pipe_control(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
-
if (engine->scratch.obj == NULL)
return;
- if (INTEL_INFO(dev)->gen >= 5) {
- kunmap(sg_page(engine->scratch.obj->pages->sgl));
- i915_gem_object_ggtt_unpin(engine->scratch.obj);
- }
-
+ i915_gem_object_ggtt_unpin(engine->scratch.obj);
drm_gem_object_unreference(&engine->scratch.obj->base);
engine->scratch.obj = NULL;
}
-int
-intel_init_pipe_control(struct intel_engine_cs *engine)
+int intel_init_pipe_control(struct intel_engine_cs *engine, int size)
{
+ struct drm_i915_gem_object *obj;
int ret;
WARN_ON(engine->scratch.obj);
- engine->scratch.obj = i915_gem_alloc_object(engine->dev, 4096);
- if (engine->scratch.obj == NULL) {
- DRM_ERROR("Failed to allocate seqno page\n");
- ret = -ENOMEM;
+ obj = i915_gem_object_create_stolen(&engine->i915->drm, size);
+ if (!obj)
+ obj = i915_gem_object_create(&engine->i915->drm, size);
+ if (IS_ERR(obj)) {
+ DRM_ERROR("Failed to allocate scratch page\n");
+ ret = PTR_ERR(obj);
goto err;
}
- ret = i915_gem_object_set_cache_level(engine->scratch.obj,
- I915_CACHE_LLC);
- if (ret)
- goto err_unref;
-
- ret = i915_gem_obj_ggtt_pin(engine->scratch.obj, 4096, 0);
+ ret = i915_gem_obj_ggtt_pin(obj, 4096, PIN_HIGH);
if (ret)
goto err_unref;
- engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(engine->scratch.obj);
- engine->scratch.cpu_page = kmap(sg_page(engine->scratch.obj->pages->sgl));
- if (engine->scratch.cpu_page == NULL) {
- ret = -ENOMEM;
- goto err_unpin;
- }
-
+ engine->scratch.obj = obj;
+ engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
engine->name, engine->scratch.gtt_offset);
return 0;
-err_unpin:
- i915_gem_object_ggtt_unpin(engine->scratch.obj);
err_unref:
drm_gem_object_unreference(&engine->scratch.obj->base);
err:
@@ -702,11 +684,9 @@ err:
static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
{
- int ret, i;
struct intel_engine_cs *engine = req->engine;
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_workarounds *w = &dev_priv->workarounds;
+ struct i915_workarounds *w = &req->i915->workarounds;
+ int ret, i;
if (w->count == 0)
return 0;
@@ -795,7 +775,7 @@ static int wa_add(struct drm_i915_private *dev_priv,
static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
i915_reg_t reg)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
struct i915_workarounds *wa = &dev_priv->workarounds;
const uint32_t index = wa->hw_whitelist_count[engine->id];
@@ -811,8 +791,7 @@ static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
static int gen8_init_workarounds(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
@@ -863,9 +842,8 @@ static int gen8_init_workarounds(struct intel_engine_cs *engine)
static int bdw_init_workarounds(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *dev_priv = engine->i915;
int ret;
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
ret = gen8_init_workarounds(engine);
if (ret)
@@ -885,16 +863,15 @@ static int bdw_init_workarounds(struct intel_engine_cs *engine)
/* WaForceContextSaveRestoreNonCoherent:bdw */
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
/* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
- (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
+ (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
return 0;
}
static int chv_init_workarounds(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *dev_priv = engine->i915;
int ret;
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
ret = gen8_init_workarounds(engine);
if (ret)
@@ -911,8 +888,7 @@ static int chv_init_workarounds(struct intel_engine_cs *engine)
static int gen9_init_workarounds(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
int ret;
/* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl */
@@ -937,14 +913,14 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
/* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
- if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
- IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+ if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
+ IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
GEN9_DG_MIRROR_FIX_ENABLE);
/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
- if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
- IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+ if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
+ IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
GEN9_RHWO_OPTIMIZATION_DISABLE);
/*
@@ -970,8 +946,8 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
GEN9_CCS_TLB_PREFETCH_ENABLE);
/* WaDisableMaskBasedCammingInRCC:skl,bxt */
- if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_C0) ||
- IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+ if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_C0) ||
+ IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
PIXEL_MASK_CAMMING_DISABLE);
@@ -1035,8 +1011,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
u8 vals[3] = { 0, 0, 0 };
unsigned int i;
@@ -1077,9 +1052,8 @@ static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
static int skl_init_workarounds(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *dev_priv = engine->i915;
int ret;
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
ret = gen9_init_workarounds(engine);
if (ret)
@@ -1090,12 +1064,12 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
* until D0 which is the default case so this is equivalent to
* !WaDisablePerCtxtPreemptionGranularityControl:skl
*/
- if (IS_SKL_REVID(dev, SKL_REVID_E0, REVID_FOREVER)) {
+ if (IS_SKL_REVID(dev_priv, SKL_REVID_E0, REVID_FOREVER)) {
I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
_MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
}
- if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) {
+ if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0)) {
/* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
I915_WRITE(FF_SLICE_CS_CHICKEN2,
_MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
@@ -1104,30 +1078,30 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
/* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
* involving this register should also be added to WA batch as required.
*/
- if (IS_SKL_REVID(dev, 0, SKL_REVID_E0))
+ if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0))
/* WaDisableLSQCROPERFforOCL:skl */
I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
GEN8_LQSC_RO_PERF_DIS);
/* WaEnableGapsTsvCreditFix:skl */
- if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER)) {
+ if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, REVID_FOREVER)) {
I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
GEN9_GAPS_TSV_CREDIT_DISABLE));
}
/* WaDisablePowerCompilerClockGating:skl */
- if (IS_SKL_REVID(dev, SKL_REVID_B0, SKL_REVID_B0))
+ if (IS_SKL_REVID(dev_priv, SKL_REVID_B0, SKL_REVID_B0))
WA_SET_BIT_MASKED(HIZ_CHICKEN,
BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
/* WaBarrierPerformanceFixDisable:skl */
- if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_D0))
+ if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_D0))
WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FENCE_DEST_SLM_DISABLE |
HDC_BARRIER_PERFORMANCE_DISABLE);
/* WaDisableSbeCacheDispatchPortSharing:skl */
- if (IS_SKL_REVID(dev, 0, SKL_REVID_F0))
+ if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0))
WA_SET_BIT_MASKED(
GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
@@ -1150,9 +1124,8 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
static int bxt_init_workarounds(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *dev_priv = engine->i915;
int ret;
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
ret = gen9_init_workarounds(engine);
if (ret)
@@ -1160,11 +1133,11 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
/* WaStoreMultiplePTEenable:bxt */
/* This is a requirement according to Hardware specification */
- if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
/* WaSetClckGatingDisableMedia:bxt */
- if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
}
@@ -1173,8 +1146,14 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
STALL_DOP_GATING_DISABLE);
+ /* WaDisablePooledEuLoadBalancingFix:bxt */
+ if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
+ WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2,
+ GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
+ }
+
/* WaDisableSbeCacheDispatchPortSharing:bxt */
- if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) {
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) {
WA_SET_BIT_MASKED(
GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
@@ -1184,7 +1163,7 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
/* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
/* WaDisableObjectLevelPreemtionForInstanceId:bxt */
/* WaDisableLSQCROPERFforOCL:bxt */
- if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
if (ret)
return ret;
@@ -1194,8 +1173,13 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
return ret;
}
- /* WaInsertDummyPushConstPs:bxt */
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
+ /* WaProgramL3SqcReg1DefaultForPerf:bxt */
+ if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
+ I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
+ L3_HIGH_PRIO_CREDITS(2));
+
+ /* WaToEnableHwFixForPushConstHWBug:bxt */
+ if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
@@ -1209,7 +1193,7 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
static int kbl_init_workarounds(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
int ret;
ret = gen9_init_workarounds(engine);
@@ -1238,8 +1222,8 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine)
I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
GEN8_LQSC_RO_PERF_DIS);
- /* WaInsertDummyPushConstPs:kbl */
- if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+ /* WaToEnableHwFixForPushConstHWBug:kbl */
+ if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
@@ -1265,24 +1249,23 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine)
int init_workarounds_ring(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
WARN_ON(engine->id != RCS);
dev_priv->workarounds.count = 0;
dev_priv->workarounds.hw_whitelist_count[RCS] = 0;
- if (IS_BROADWELL(dev))
+ if (IS_BROADWELL(dev_priv))
return bdw_init_workarounds(engine);
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev_priv))
return chv_init_workarounds(engine);
- if (IS_SKYLAKE(dev))
+ if (IS_SKYLAKE(dev_priv))
return skl_init_workarounds(engine);
- if (IS_BROXTON(dev))
+ if (IS_BROXTON(dev_priv))
return bxt_init_workarounds(engine);
if (IS_KABYLAKE(dev_priv))
@@ -1293,14 +1276,13 @@ int init_workarounds_ring(struct intel_engine_cs *engine)
static int init_render_ring(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
int ret = init_ring_common(engine);
if (ret)
return ret;
/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
- if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7)
+ if (IS_GEN(dev_priv, 4, 6))
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
/* We need to disable the AsyncFlip performance optimisations in order
@@ -1309,22 +1291,22 @@ static int init_render_ring(struct intel_engine_cs *engine)
*
* WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
*/
- if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8)
+ if (IS_GEN(dev_priv, 6, 7))
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
/* Required for the hardware to program scanline values for waiting */
/* WaEnableFlushTlbInvalidationMode:snb */
- if (INTEL_INFO(dev)->gen == 6)
+ if (IS_GEN6(dev_priv))
I915_WRITE(GFX_MODE,
_MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
/* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
- if (IS_GEN7(dev))
+ if (IS_GEN7(dev_priv))
I915_WRITE(GFX_MODE_GEN7,
_MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
_MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
- if (IS_GEN6(dev)) {
+ if (IS_GEN6(dev_priv)) {
/* From the Sandybridge PRM, volume 1 part 3, page 24:
* "If this bit is set, STCunit will have LRA as replacement
* policy. [...] This bit must be reset. LRA replacement
@@ -1334,19 +1316,18 @@ static int init_render_ring(struct intel_engine_cs *engine)
_MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
}
- if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8)
+ if (IS_GEN(dev_priv, 6, 7))
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
- if (HAS_L3_DPF(dev))
- I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev));
+ if (INTEL_INFO(dev_priv)->gen >= 6)
+ I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
return init_workarounds_ring(engine);
}
static void render_ring_cleanup(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
if (dev_priv->semaphore_obj) {
i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj);
@@ -1362,13 +1343,12 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
{
#define MBOX_UPDATE_DWORDS 8
struct intel_engine_cs *signaller = signaller_req->engine;
- struct drm_device *dev = signaller->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = signaller_req->i915;
struct intel_engine_cs *waiter;
enum intel_engine_id id;
int ret, num_rings;
- num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+ num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
#undef MBOX_UPDATE_DWORDS
@@ -1377,19 +1357,17 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
return ret;
for_each_engine_id(waiter, dev_priv, id) {
- u32 seqno;
u64 gtt_offset = signaller->semaphore.signal_ggtt[id];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
- seqno = i915_gem_request_get_seqno(signaller_req);
intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_FLUSH_ENABLE);
+ PIPE_CONTROL_CS_STALL);
intel_ring_emit(signaller, lower_32_bits(gtt_offset));
intel_ring_emit(signaller, upper_32_bits(gtt_offset));
- intel_ring_emit(signaller, seqno);
+ intel_ring_emit(signaller, signaller_req->seqno);
intel_ring_emit(signaller, 0);
intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
MI_SEMAPHORE_TARGET(waiter->hw_id));
@@ -1404,13 +1382,12 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
{
#define MBOX_UPDATE_DWORDS 6
struct intel_engine_cs *signaller = signaller_req->engine;
- struct drm_device *dev = signaller->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = signaller_req->i915;
struct intel_engine_cs *waiter;
enum intel_engine_id id;
int ret, num_rings;
- num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+ num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
#undef MBOX_UPDATE_DWORDS
@@ -1419,18 +1396,16 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
return ret;
for_each_engine_id(waiter, dev_priv, id) {
- u32 seqno;
u64 gtt_offset = signaller->semaphore.signal_ggtt[id];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
- seqno = i915_gem_request_get_seqno(signaller_req);
intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
MI_FLUSH_DW_OP_STOREDW);
intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
MI_FLUSH_DW_USE_GTT);
intel_ring_emit(signaller, upper_32_bits(gtt_offset));
- intel_ring_emit(signaller, seqno);
+ intel_ring_emit(signaller, signaller_req->seqno);
intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
MI_SEMAPHORE_TARGET(waiter->hw_id));
intel_ring_emit(signaller, 0);
@@ -1443,14 +1418,13 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
unsigned int num_dwords)
{
struct intel_engine_cs *signaller = signaller_req->engine;
- struct drm_device *dev = signaller->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = signaller_req->i915;
struct intel_engine_cs *useless;
enum intel_engine_id id;
int ret, num_rings;
#define MBOX_UPDATE_DWORDS 3
- num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+ num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
#undef MBOX_UPDATE_DWORDS
@@ -1462,11 +1436,9 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[id];
if (i915_mmio_reg_valid(mbox_reg)) {
- u32 seqno = i915_gem_request_get_seqno(signaller_req);
-
intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit_reg(signaller, mbox_reg);
- intel_ring_emit(signaller, seqno);
+ intel_ring_emit(signaller, signaller_req->seqno);
}
}
@@ -1502,17 +1474,45 @@ gen6_add_request(struct drm_i915_gem_request *req)
intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
intel_ring_emit(engine,
I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ intel_ring_emit(engine, req->seqno);
+ intel_ring_emit(engine, MI_USER_INTERRUPT);
+ __intel_ring_advance(engine);
+
+ return 0;
+}
+
+static int
+gen8_render_add_request(struct drm_i915_gem_request *req)
+{
+ struct intel_engine_cs *engine = req->engine;
+ int ret;
+
+ if (engine->semaphore.signal)
+ ret = engine->semaphore.signal(req, 8);
+ else
+ ret = intel_ring_begin(req, 8);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6));
+ intel_ring_emit(engine, (PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_QW_WRITE));
+ intel_ring_emit(engine, intel_hws_seqno_address(req->engine));
+ intel_ring_emit(engine, 0);
intel_ring_emit(engine, i915_gem_request_get_seqno(req));
+ /* We're thrashing one dword of HWS. */
+ intel_ring_emit(engine, 0);
intel_ring_emit(engine, MI_USER_INTERRUPT);
+ intel_ring_emit(engine, MI_NOOP);
__intel_ring_advance(engine);
return 0;
}
-static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
+static inline bool i915_gem_has_seqno_wrapped(struct drm_i915_private *dev_priv,
u32 seqno)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
return dev_priv->last_seqno < seqno;
}
@@ -1530,7 +1530,9 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
u32 seqno)
{
struct intel_engine_cs *waiter = waiter_req->engine;
- struct drm_i915_private *dev_priv = waiter->dev->dev_private;
+ struct drm_i915_private *dev_priv = waiter_req->i915;
+ u64 offset = GEN8_WAIT_OFFSET(waiter, signaller->id);
+ struct i915_hw_ppgtt *ppgtt;
int ret;
ret = intel_ring_begin(waiter_req, 4);
@@ -1539,14 +1541,20 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
intel_ring_emit(waiter, MI_SEMAPHORE_WAIT |
MI_SEMAPHORE_GLOBAL_GTT |
- MI_SEMAPHORE_POLL |
MI_SEMAPHORE_SAD_GTE_SDD);
intel_ring_emit(waiter, seqno);
- intel_ring_emit(waiter,
- lower_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
- intel_ring_emit(waiter,
- upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
+ intel_ring_emit(waiter, lower_32_bits(offset));
+ intel_ring_emit(waiter, upper_32_bits(offset));
intel_ring_advance(waiter);
+
+ /* When the !RCS engines idle waiting upon a semaphore, they lose their
+ * pagetables and we must reload them before executing the batch.
+ * We do this on the i915_switch_context() following the wait and
+ * before the dispatch.
+ */
+ ppgtt = waiter_req->ctx->ppgtt;
+ if (ppgtt && waiter_req->engine->id != RCS)
+ ppgtt->pd_dirty_rings |= intel_engine_flag(waiter_req->engine);
return 0;
}
@@ -1575,7 +1583,7 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
return ret;
/* If seqno wrap happened, omit the wait with no-ops */
- if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
+ if (likely(!i915_gem_has_seqno_wrapped(waiter_req->i915, seqno))) {
intel_ring_emit(waiter, dw1 | wait_mbox);
intel_ring_emit(waiter, seqno);
intel_ring_emit(waiter, 0);
@@ -1591,72 +1599,28 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
return 0;
}
-#define PIPE_CONTROL_FLUSH(ring__, addr__) \
-do { \
- intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \
- PIPE_CONTROL_DEPTH_STALL); \
- intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
- intel_ring_emit(ring__, 0); \
- intel_ring_emit(ring__, 0); \
-} while (0)
-
-static int
-pc_render_add_request(struct drm_i915_gem_request *req)
+static void
+gen5_seqno_barrier(struct intel_engine_cs *ring)
{
- struct intel_engine_cs *engine = req->engine;
- u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
- int ret;
-
- /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
- * incoherent with writes to memory, i.e. completely fubar,
- * so we need to use PIPE_NOTIFY instead.
+ /* MI_STORE are internally buffered by the GPU and not flushed
+ * either by MI_FLUSH or SyncFlush or any other combination of
+ * MI commands.
+ *
+ * "Only the submission of the store operation is guaranteed.
+ * The write result will be complete (coherent) some time later
+ * (this is practically a finite period but there is no guaranteed
+ * latency)."
*
- * However, we also need to workaround the qword write
- * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
- * memory before requesting an interrupt.
+ * Empirically, we observe that we need a delay of at least 75us to
+ * be sure that the seqno write is visible by the CPU.
*/
- ret = intel_ring_begin(req, 32);
- if (ret)
- return ret;
-
- intel_ring_emit(engine,
- GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_WRITE_FLUSH |
- PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
- intel_ring_emit(engine,
- engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(engine, i915_gem_request_get_seqno(req));
- intel_ring_emit(engine, 0);
- PIPE_CONTROL_FLUSH(engine, scratch_addr);
- scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
- PIPE_CONTROL_FLUSH(engine, scratch_addr);
- scratch_addr += 2 * CACHELINE_BYTES;
- PIPE_CONTROL_FLUSH(engine, scratch_addr);
- scratch_addr += 2 * CACHELINE_BYTES;
- PIPE_CONTROL_FLUSH(engine, scratch_addr);
- scratch_addr += 2 * CACHELINE_BYTES;
- PIPE_CONTROL_FLUSH(engine, scratch_addr);
- scratch_addr += 2 * CACHELINE_BYTES;
- PIPE_CONTROL_FLUSH(engine, scratch_addr);
-
- intel_ring_emit(engine,
- GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_WRITE_FLUSH |
- PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
- PIPE_CONTROL_NOTIFY);
- intel_ring_emit(engine,
- engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(engine, i915_gem_request_get_seqno(req));
- intel_ring_emit(engine, 0);
- __intel_ring_advance(engine);
-
- return 0;
+ usleep_range(125, 250);
}
static void
gen6_seqno_barrier(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
/* Workaround to force correct ordering between irq and seqno writes on
* ivb (and maybe also on snb) by reading from a CS register (like
@@ -1678,133 +1642,54 @@ gen6_seqno_barrier(struct intel_engine_cs *engine)
spin_unlock_irq(&dev_priv->uncore.lock);
}
-static u32
-ring_get_seqno(struct intel_engine_cs *engine)
-{
- return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
-}
-
static void
-ring_set_seqno(struct intel_engine_cs *engine, u32 seqno)
-{
- intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
-}
-
-static u32
-pc_render_get_seqno(struct intel_engine_cs *engine)
+gen5_irq_enable(struct intel_engine_cs *engine)
{
- return engine->scratch.cpu_page[0];
+ gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
}
static void
-pc_render_set_seqno(struct intel_engine_cs *engine, u32 seqno)
-{
- engine->scratch.cpu_page[0] = seqno;
-}
-
-static bool
-gen5_ring_get_irq(struct intel_engine_cs *engine)
+gen5_irq_disable(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long flags;
-
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
- return false;
-
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (engine->irq_refcount++ == 0)
- gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
- return true;
+ gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
}
static void
-gen5_ring_put_irq(struct intel_engine_cs *engine)
+i9xx_irq_enable(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long flags;
+ struct drm_i915_private *dev_priv = engine->i915;
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--engine->irq_refcount == 0)
- gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-}
-
-static bool
-i9xx_ring_get_irq(struct intel_engine_cs *engine)
-{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long flags;
-
- if (!intel_irqs_enabled(dev_priv))
- return false;
-
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (engine->irq_refcount++ == 0) {
- dev_priv->irq_mask &= ~engine->irq_enable_mask;
- I915_WRITE(IMR, dev_priv->irq_mask);
- POSTING_READ(IMR);
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
- return true;
+ dev_priv->irq_mask &= ~engine->irq_enable_mask;
+ I915_WRITE(IMR, dev_priv->irq_mask);
+ POSTING_READ_FW(RING_IMR(engine->mmio_base));
}
static void
-i9xx_ring_put_irq(struct intel_engine_cs *engine)
+i9xx_irq_disable(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long flags;
+ struct drm_i915_private *dev_priv = engine->i915;
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--engine->irq_refcount == 0) {
- dev_priv->irq_mask |= engine->irq_enable_mask;
- I915_WRITE(IMR, dev_priv->irq_mask);
- POSTING_READ(IMR);
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ dev_priv->irq_mask |= engine->irq_enable_mask;
+ I915_WRITE(IMR, dev_priv->irq_mask);
}
-static bool
-i8xx_ring_get_irq(struct intel_engine_cs *engine)
+static void
+i8xx_irq_enable(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long flags;
-
- if (!intel_irqs_enabled(dev_priv))
- return false;
+ struct drm_i915_private *dev_priv = engine->i915;
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (engine->irq_refcount++ == 0) {
- dev_priv->irq_mask &= ~engine->irq_enable_mask;
- I915_WRITE16(IMR, dev_priv->irq_mask);
- POSTING_READ16(IMR);
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
- return true;
+ dev_priv->irq_mask &= ~engine->irq_enable_mask;
+ I915_WRITE16(IMR, dev_priv->irq_mask);
+ POSTING_READ16(RING_IMR(engine->mmio_base));
}
static void
-i8xx_ring_put_irq(struct intel_engine_cs *engine)
+i8xx_irq_disable(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long flags;
+ struct drm_i915_private *dev_priv = engine->i915;
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--engine->irq_refcount == 0) {
- dev_priv->irq_mask |= engine->irq_enable_mask;
- I915_WRITE16(IMR, dev_priv->irq_mask);
- POSTING_READ16(IMR);
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ dev_priv->irq_mask |= engine->irq_enable_mask;
+ I915_WRITE16(IMR, dev_priv->irq_mask);
}
static int
@@ -1838,135 +1723,68 @@ i9xx_add_request(struct drm_i915_gem_request *req)
intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
intel_ring_emit(engine,
I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(engine, i915_gem_request_get_seqno(req));
+ intel_ring_emit(engine, req->seqno);
intel_ring_emit(engine, MI_USER_INTERRUPT);
__intel_ring_advance(engine);
return 0;
}
-static bool
-gen6_ring_get_irq(struct intel_engine_cs *engine)
+static void
+gen6_irq_enable(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long flags;
-
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
- return false;
-
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (engine->irq_refcount++ == 0) {
- if (HAS_L3_DPF(dev) && engine->id == RCS)
- I915_WRITE_IMR(engine,
- ~(engine->irq_enable_mask |
- GT_PARITY_ERROR(dev)));
- else
- I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
- gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ struct drm_i915_private *dev_priv = engine->i915;
- return true;
+ I915_WRITE_IMR(engine,
+ ~(engine->irq_enable_mask |
+ engine->irq_keep_mask));
+ gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
}
static void
-gen6_ring_put_irq(struct intel_engine_cs *engine)
+gen6_irq_disable(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long flags;
+ struct drm_i915_private *dev_priv = engine->i915;
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--engine->irq_refcount == 0) {
- if (HAS_L3_DPF(dev) && engine->id == RCS)
- I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev));
- else
- I915_WRITE_IMR(engine, ~0);
- gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
+ gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
}
-static bool
-hsw_vebox_get_irq(struct intel_engine_cs *engine)
+static void
+hsw_vebox_irq_enable(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long flags;
-
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
- return false;
+ struct drm_i915_private *dev_priv = engine->i915;
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (engine->irq_refcount++ == 0) {
- I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
- gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask);
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
- return true;
+ I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
+ gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask);
}
static void
-hsw_vebox_put_irq(struct intel_engine_cs *engine)
+hsw_vebox_irq_disable(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long flags;
+ struct drm_i915_private *dev_priv = engine->i915;
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--engine->irq_refcount == 0) {
- I915_WRITE_IMR(engine, ~0);
- gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask);
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ I915_WRITE_IMR(engine, ~0);
+ gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask);
}
-static bool
-gen8_ring_get_irq(struct intel_engine_cs *engine)
+static void
+gen8_irq_enable(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long flags;
+ struct drm_i915_private *dev_priv = engine->i915;
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
- return false;
-
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (engine->irq_refcount++ == 0) {
- if (HAS_L3_DPF(dev) && engine->id == RCS) {
- I915_WRITE_IMR(engine,
- ~(engine->irq_enable_mask |
- GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
- } else {
- I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
- }
- POSTING_READ(RING_IMR(engine->mmio_base));
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
- return true;
+ I915_WRITE_IMR(engine,
+ ~(engine->irq_enable_mask |
+ engine->irq_keep_mask));
+ POSTING_READ_FW(RING_IMR(engine->mmio_base));
}
static void
-gen8_ring_put_irq(struct intel_engine_cs *engine)
+gen8_irq_disable(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long flags;
+ struct drm_i915_private *dev_priv = engine->i915;
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--engine->irq_refcount == 0) {
- if (HAS_L3_DPF(dev) && engine->id == RCS) {
- I915_WRITE_IMR(engine,
- ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
- } else {
- I915_WRITE_IMR(engine, ~0);
- }
- POSTING_READ(RING_IMR(engine->mmio_base));
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
}
static int
@@ -2080,12 +1898,12 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
static void cleanup_phys_status_page(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = to_i915(engine->dev);
+ struct drm_i915_private *dev_priv = engine->i915;
if (!dev_priv->status_page_dmah)
return;
- drm_pci_free(engine->dev, dev_priv->status_page_dmah);
+ drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah);
engine->status_page.page_addr = NULL;
}
@@ -2111,10 +1929,10 @@ static int init_status_page(struct intel_engine_cs *engine)
unsigned flags;
int ret;
- obj = i915_gem_alloc_object(engine->dev, 4096);
- if (obj == NULL) {
+ obj = i915_gem_object_create(&engine->i915->drm, 4096);
+ if (IS_ERR(obj)) {
DRM_ERROR("Failed to allocate status page\n");
- return -ENOMEM;
+ return PTR_ERR(obj);
}
ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
@@ -2122,7 +1940,7 @@ static int init_status_page(struct intel_engine_cs *engine)
goto err_unref;
flags = 0;
- if (!HAS_LLC(engine->dev))
+ if (!HAS_LLC(engine->i915))
/* On g33, we cannot place HWS above 256MiB, so
* restrict its pinning to the low mappable arena.
* Though this restriction is not documented for
@@ -2156,11 +1974,11 @@ err_unref:
static int init_phys_status_page(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
if (!dev_priv->status_page_dmah) {
dev_priv->status_page_dmah =
- drm_pci_alloc(engine->dev, PAGE_SIZE, PAGE_SIZE);
+ drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
if (!dev_priv->status_page_dmah)
return -ENOMEM;
}
@@ -2173,20 +1991,22 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
{
+ GEM_BUG_ON(ringbuf->vma == NULL);
+ GEM_BUG_ON(ringbuf->virtual_start == NULL);
+
if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen)
i915_gem_object_unpin_map(ringbuf->obj);
else
- iounmap(ringbuf->virtual_start);
+ i915_vma_unpin_iomap(ringbuf->vma);
ringbuf->virtual_start = NULL;
- ringbuf->vma = NULL;
+
i915_gem_object_ggtt_unpin(ringbuf->obj);
+ ringbuf->vma = NULL;
}
-int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
+int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
struct intel_ringbuffer *ringbuf)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_object *obj = ringbuf->obj;
/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
unsigned flags = PIN_OFFSET_BIAS | 4096;
@@ -2220,10 +2040,9 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
/* Access through the GTT requires the device to be awake. */
assert_rpm_wakelock_held(dev_priv);
- addr = ioremap_wc(ggtt->mappable_base +
- i915_gem_obj_ggtt_offset(obj), ringbuf->size);
- if (addr == NULL) {
- ret = -ENOMEM;
+ addr = i915_vma_pin_iomap(i915_gem_obj_to_ggtt(obj));
+ if (IS_ERR(addr)) {
+ ret = PTR_ERR(addr);
goto err_unpin;
}
}
@@ -2252,9 +2071,9 @@ static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
if (!HAS_LLC(dev))
obj = i915_gem_object_create_stolen(dev, ringbuf->size);
if (obj == NULL)
- obj = i915_gem_alloc_object(dev, ringbuf->size);
- if (obj == NULL)
- return -ENOMEM;
+ obj = i915_gem_object_create(dev, ringbuf->size);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
/* mark ring buffers as read-only from GPU side by default */
obj->gt_ro = 1;
@@ -2286,13 +2105,13 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
* of the buffer.
*/
ring->effective_size = size;
- if (IS_I830(engine->dev) || IS_845G(engine->dev))
+ if (IS_I830(engine->i915) || IS_845G(engine->i915))
ring->effective_size -= 2 * CACHELINE_BYTES;
ring->last_retired_head = -1;
intel_ring_update_space(ring);
- ret = intel_alloc_ringbuffer_obj(engine->dev, ring);
+ ret = intel_alloc_ringbuffer_obj(&engine->i915->drm, ring);
if (ret) {
DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n",
engine->name, ret);
@@ -2312,15 +2131,67 @@ intel_ringbuffer_free(struct intel_ringbuffer *ring)
kfree(ring);
}
+static int intel_ring_context_pin(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ struct intel_context *ce = &ctx->engine[engine->id];
+ int ret;
+
+ lockdep_assert_held(&ctx->i915->drm.struct_mutex);
+
+ if (ce->pin_count++)
+ return 0;
+
+ if (ce->state) {
+ ret = i915_gem_obj_ggtt_pin(ce->state, ctx->ggtt_alignment, 0);
+ if (ret)
+ goto error;
+ }
+
+ /* The kernel context is only used as a placeholder for flushing the
+ * active context. It is never used for submitting user rendering and
+ * as such never requires the golden render context, and so we can skip
+ * emitting it when we switch to the kernel context. This is required
+ * as during eviction we cannot allocate and pin the renderstate in
+ * order to initialise the context.
+ */
+ if (ctx == ctx->i915->kernel_context)
+ ce->initialised = true;
+
+ i915_gem_context_reference(ctx);
+ return 0;
+
+error:
+ ce->pin_count = 0;
+ return ret;
+}
+
+static void intel_ring_context_unpin(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ struct intel_context *ce = &ctx->engine[engine->id];
+
+ lockdep_assert_held(&ctx->i915->drm.struct_mutex);
+
+ if (--ce->pin_count)
+ return;
+
+ if (ce->state)
+ i915_gem_object_ggtt_unpin(ce->state);
+
+ i915_gem_context_unreference(ctx);
+}
+
static int intel_init_ring_buffer(struct drm_device *dev,
struct intel_engine_cs *engine)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_ringbuffer *ringbuf;
int ret;
WARN_ON(engine->buffer);
- engine->dev = dev;
+ engine->i915 = dev_priv;
INIT_LIST_HEAD(&engine->active_list);
INIT_LIST_HEAD(&engine->request_list);
INIT_LIST_HEAD(&engine->execlist_queue);
@@ -2329,7 +2200,20 @@ static int intel_init_ring_buffer(struct drm_device *dev,
memset(engine->semaphore.sync_seqno, 0,
sizeof(engine->semaphore.sync_seqno));
- init_waitqueue_head(&engine->irq_queue);
+ ret = intel_engine_init_breadcrumbs(engine);
+ if (ret)
+ goto error;
+
+ /* We may need to do things with the shrinker which
+ * require us to immediately switch back to the default
+ * context. This can cause a problem as pinning the
+ * default context also requires GTT space which may not
+ * be available. To avoid this we always pin the default
+ * context.
+ */
+ ret = intel_ring_context_pin(dev_priv->kernel_context, engine);
+ if (ret)
+ goto error;
ringbuf = intel_engine_create_ringbuffer(engine, 32 * PAGE_SIZE);
if (IS_ERR(ringbuf)) {
@@ -2338,7 +2222,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
}
engine->buffer = ringbuf;
- if (I915_NEED_GFX_HWS(dev)) {
+ if (I915_NEED_GFX_HWS(dev_priv)) {
ret = init_status_page(engine);
if (ret)
goto error;
@@ -2349,7 +2233,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
goto error;
}
- ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
+ ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ringbuf);
if (ret) {
DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
engine->name, ret);
@@ -2375,11 +2259,11 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
if (!intel_engine_initialized(engine))
return;
- dev_priv = to_i915(engine->dev);
+ dev_priv = engine->i915;
if (engine->buffer) {
intel_stop_engine(engine);
- WARN_ON(!IS_GEN2(engine->dev) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
+ WARN_ON(!IS_GEN2(dev_priv) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
intel_unpin_ringbuffer_obj(engine->buffer);
intel_ringbuffer_free(engine->buffer);
@@ -2389,7 +2273,7 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
if (engine->cleanup)
engine->cleanup(engine);
- if (I915_NEED_GFX_HWS(engine->dev)) {
+ if (I915_NEED_GFX_HWS(dev_priv)) {
cleanup_status_page(engine);
} else {
WARN_ON(engine->id != RCS);
@@ -2398,7 +2282,11 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
i915_cmd_parser_fini_ring(engine);
i915_gem_batch_pool_fini(&engine->batch_pool);
- engine->dev = NULL;
+ intel_engine_fini_breadcrumbs(engine);
+
+ intel_ring_context_unpin(dev_priv->kernel_context, engine);
+
+ engine->i915 = NULL;
}
int intel_engine_idle(struct intel_engine_cs *engine)
@@ -2421,46 +2309,22 @@ int intel_engine_idle(struct intel_engine_cs *engine)
int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
{
- request->ringbuf = request->engine->buffer;
- return 0;
-}
+ int ret;
-int intel_ring_reserve_space(struct drm_i915_gem_request *request)
-{
- /*
- * The first call merely notes the reserve request and is common for
- * all back ends. The subsequent localised _begin() call actually
- * ensures that the reservation is available. Without the begin, if
- * the request creator immediately submitted the request without
- * adding any commands to it then there might not actually be
- * sufficient room for the submission commands.
+ /* Flush enough space to reduce the likelihood of waiting after
+ * we start building the request - in which case we will just
+ * have to repeat work.
*/
- intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
-
- return intel_ring_begin(request, 0);
-}
-
-void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size)
-{
- GEM_BUG_ON(ringbuf->reserved_size);
- ringbuf->reserved_size = size;
-}
+ request->reserved_space += LEGACY_REQUEST_SIZE;
-void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf)
-{
- GEM_BUG_ON(!ringbuf->reserved_size);
- ringbuf->reserved_size = 0;
-}
+ request->ringbuf = request->engine->buffer;
-void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf)
-{
- GEM_BUG_ON(!ringbuf->reserved_size);
- ringbuf->reserved_size = 0;
-}
+ ret = intel_ring_begin(request, 0);
+ if (ret)
+ return ret;
-void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf)
-{
- GEM_BUG_ON(ringbuf->reserved_size);
+ request->reserved_space -= LEGACY_REQUEST_SIZE;
+ return 0;
}
static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
@@ -2482,7 +2346,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
*
* See also i915_gem_request_alloc() and i915_add_request().
*/
- GEM_BUG_ON(!ringbuf->reserved_size);
+ GEM_BUG_ON(!req->reserved_space);
list_for_each_entry(target, &engine->request_list, list) {
unsigned space;
@@ -2517,7 +2381,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
int total_bytes, wait_bytes;
bool need_wrap = false;
- total_bytes = bytes + ringbuf->reserved_size;
+ total_bytes = bytes + req->reserved_space;
if (unlikely(bytes > remain_usable)) {
/*
@@ -2533,7 +2397,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
* and only need to effectively wait for the reserved
* size space from the start of ringbuffer.
*/
- wait_bytes = remain_actual + ringbuf->reserved_size;
+ wait_bytes = remain_actual + req->reserved_space;
} else {
/* No wrapping required, just waiting. */
wait_bytes = total_bytes;
@@ -2590,7 +2454,7 @@ int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
{
- struct drm_i915_private *dev_priv = to_i915(engine->dev);
+ struct drm_i915_private *dev_priv = engine->i915;
/* Our semaphore implementation is strictly monotonic (i.e. we proceed
* so long as the semaphore value in the register/page is greater
@@ -2600,7 +2464,7 @@ void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
* the semaphore value, then when the seqno moves backwards all
* future waits will complete instantly (causing rendering corruption).
*/
- if (INTEL_INFO(dev_priv)->gen == 6 || INTEL_INFO(dev_priv)->gen == 7) {
+ if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
if (HAS_VEBOX(dev_priv))
@@ -2617,43 +2481,58 @@ void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
memset(engine->semaphore.sync_seqno, 0,
sizeof(engine->semaphore.sync_seqno));
- engine->set_seqno(engine, seqno);
+ intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
+ if (engine->irq_seqno_barrier)
+ engine->irq_seqno_barrier(engine);
engine->last_submitted_seqno = seqno;
engine->hangcheck.seqno = seqno;
+
+ /* After manually advancing the seqno, fake the interrupt in case
+ * there are any waiters for that seqno.
+ */
+ rcu_read_lock();
+ intel_engine_wakeup(engine);
+ rcu_read_unlock();
}
static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
u32 value)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
+
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
/* Every tail move must follow the sequence below */
/* Disable notification that the ring is IDLE. The GT
* will then assume that it is busy and bring it out of rc6.
*/
- I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
- _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+ I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
+ _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
/* Clear the context id. Here be magic! */
- I915_WRITE64(GEN6_BSD_RNCID, 0x0);
+ I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0);
/* Wait for the ring not to be idle, i.e. for it to wake up. */
- if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
- GEN6_BSD_SLEEP_INDICATOR) == 0,
- 50))
+ if (intel_wait_for_register_fw(dev_priv,
+ GEN6_BSD_SLEEP_PSMI_CONTROL,
+ GEN6_BSD_SLEEP_INDICATOR,
+ 0,
+ 50))
DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
/* Now that the ring is fully powered up, update the tail */
- I915_WRITE_TAIL(engine, value);
- POSTING_READ(RING_TAIL(engine->mmio_base));
+ I915_WRITE_FW(RING_TAIL(engine->mmio_base), value);
+ POSTING_READ_FW(RING_TAIL(engine->mmio_base));
/* Let the ring send IDLE messages to the GT again,
* and so let it sleep to conserve power when idle.
*/
- I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
- _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+ I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
+ _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
@@ -2668,7 +2547,7 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
return ret;
cmd = MI_FLUSH_DW;
- if (INTEL_INFO(engine->dev)->gen >= 8)
+ if (INTEL_GEN(req->i915) >= 8)
cmd += 1;
/* We always require a command barrier so that subsequent
@@ -2690,7 +2569,7 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
intel_ring_emit(engine, cmd);
intel_ring_emit(engine,
I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
- if (INTEL_INFO(engine->dev)->gen >= 8) {
+ if (INTEL_GEN(req->i915) >= 8) {
intel_ring_emit(engine, 0); /* upper addr */
intel_ring_emit(engine, 0); /* value */
} else {
@@ -2781,7 +2660,6 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate, u32 flush)
{
struct intel_engine_cs *engine = req->engine;
- struct drm_device *dev = engine->dev;
uint32_t cmd;
int ret;
@@ -2790,7 +2668,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
return ret;
cmd = MI_FLUSH_DW;
- if (INTEL_INFO(dev)->gen >= 8)
+ if (INTEL_GEN(req->i915) >= 8)
cmd += 1;
/* We always require a command barrier so that subsequent
@@ -2811,7 +2689,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
intel_ring_emit(engine, cmd);
intel_ring_emit(engine,
I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(req->i915) >= 8) {
intel_ring_emit(engine, 0); /* upper addr */
intel_ring_emit(engine, 0); /* value */
} else {
@@ -2823,11 +2701,159 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
return 0;
}
+static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
+ struct intel_engine_cs *engine)
+{
+ struct drm_i915_gem_object *obj;
+ int ret, i;
+
+ if (!i915_semaphore_is_enabled(dev_priv))
+ return;
+
+ if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore_obj) {
+ obj = i915_gem_object_create(&dev_priv->drm, 4096);
+ if (IS_ERR(obj)) {
+ DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
+ i915.semaphores = 0;
+ } else {
+ i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+ ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK);
+ if (ret != 0) {
+ drm_gem_object_unreference(&obj->base);
+ DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
+ i915.semaphores = 0;
+ } else {
+ dev_priv->semaphore_obj = obj;
+ }
+ }
+ }
+
+ if (!i915_semaphore_is_enabled(dev_priv))
+ return;
+
+ if (INTEL_GEN(dev_priv) >= 8) {
+ u64 offset = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj);
+
+ engine->semaphore.sync_to = gen8_ring_sync;
+ engine->semaphore.signal = gen8_xcs_signal;
+
+ for (i = 0; i < I915_NUM_ENGINES; i++) {
+ u64 ring_offset;
+
+ if (i != engine->id)
+ ring_offset = offset + GEN8_SEMAPHORE_OFFSET(engine->id, i);
+ else
+ ring_offset = MI_SEMAPHORE_SYNC_INVALID;
+
+ engine->semaphore.signal_ggtt[i] = ring_offset;
+ }
+ } else if (INTEL_GEN(dev_priv) >= 6) {
+ engine->semaphore.sync_to = gen6_ring_sync;
+ engine->semaphore.signal = gen6_signal;
+
+ /*
+ * The current semaphore is only applied on pre-gen8
+ * platform. And there is no VCS2 ring on the pre-gen8
+ * platform. So the semaphore between RCS and VCS2 is
+ * initialized as INVALID. Gen8 will initialize the
+ * sema between VCS2 and RCS later.
+ */
+ for (i = 0; i < I915_NUM_ENGINES; i++) {
+ static const struct {
+ u32 wait_mbox;
+ i915_reg_t mbox_reg;
+ } sem_data[I915_NUM_ENGINES][I915_NUM_ENGINES] = {
+ [RCS] = {
+ [VCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RV, .mbox_reg = GEN6_VRSYNC },
+ [BCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RB, .mbox_reg = GEN6_BRSYNC },
+ [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
+ },
+ [VCS] = {
+ [RCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VR, .mbox_reg = GEN6_RVSYNC },
+ [BCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VB, .mbox_reg = GEN6_BVSYNC },
+ [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
+ },
+ [BCS] = {
+ [RCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BR, .mbox_reg = GEN6_RBSYNC },
+ [VCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BV, .mbox_reg = GEN6_VBSYNC },
+ [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
+ },
+ [VECS] = {
+ [RCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
+ [VCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
+ [BCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
+ },
+ };
+ u32 wait_mbox;
+ i915_reg_t mbox_reg;
+
+ if (i == engine->id || i == VCS2) {
+ wait_mbox = MI_SEMAPHORE_SYNC_INVALID;
+ mbox_reg = GEN6_NOSYNC;
+ } else {
+ wait_mbox = sem_data[engine->id][i].wait_mbox;
+ mbox_reg = sem_data[engine->id][i].mbox_reg;
+ }
+
+ engine->semaphore.mbox.wait[i] = wait_mbox;
+ engine->semaphore.mbox.signal[i] = mbox_reg;
+ }
+ }
+}
+
+static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
+ struct intel_engine_cs *engine)
+{
+ if (INTEL_GEN(dev_priv) >= 8) {
+ engine->irq_enable = gen8_irq_enable;
+ engine->irq_disable = gen8_irq_disable;
+ engine->irq_seqno_barrier = gen6_seqno_barrier;
+ } else if (INTEL_GEN(dev_priv) >= 6) {
+ engine->irq_enable = gen6_irq_enable;
+ engine->irq_disable = gen6_irq_disable;
+ engine->irq_seqno_barrier = gen6_seqno_barrier;
+ } else if (INTEL_GEN(dev_priv) >= 5) {
+ engine->irq_enable = gen5_irq_enable;
+ engine->irq_disable = gen5_irq_disable;
+ engine->irq_seqno_barrier = gen5_seqno_barrier;
+ } else if (INTEL_GEN(dev_priv) >= 3) {
+ engine->irq_enable = i9xx_irq_enable;
+ engine->irq_disable = i9xx_irq_disable;
+ } else {
+ engine->irq_enable = i8xx_irq_enable;
+ engine->irq_disable = i8xx_irq_disable;
+ }
+}
+
+static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
+ struct intel_engine_cs *engine)
+{
+ engine->init_hw = init_ring_common;
+ engine->write_tail = ring_write_tail;
+
+ engine->add_request = i9xx_add_request;
+ if (INTEL_GEN(dev_priv) >= 6)
+ engine->add_request = gen6_add_request;
+
+ if (INTEL_GEN(dev_priv) >= 8)
+ engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+ else if (INTEL_GEN(dev_priv) >= 6)
+ engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ else if (INTEL_GEN(dev_priv) >= 4)
+ engine->dispatch_execbuffer = i965_dispatch_execbuffer;
+ else if (IS_I830(dev_priv) || IS_845G(dev_priv))
+ engine->dispatch_execbuffer = i830_dispatch_execbuffer;
+ else
+ engine->dispatch_execbuffer = i915_dispatch_execbuffer;
+
+ intel_ring_init_irq(dev_priv, engine);
+ intel_ring_init_semaphores(dev_priv, engine);
+}
+
int intel_init_render_ring_buffer(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine = &dev_priv->engine[RCS];
- struct drm_i915_gem_object *obj;
int ret;
engine->name = "render ring";
@@ -2836,140 +2862,49 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
engine->hw_id = 0;
engine->mmio_base = RENDER_RING_BASE;
- if (INTEL_INFO(dev)->gen >= 8) {
- if (i915_semaphore_is_enabled(dev)) {
- obj = i915_gem_alloc_object(dev, 4096);
- if (obj == NULL) {
- DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
- i915.semaphores = 0;
- } else {
- i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
- ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK);
- if (ret != 0) {
- drm_gem_object_unreference(&obj->base);
- DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
- i915.semaphores = 0;
- } else
- dev_priv->semaphore_obj = obj;
- }
- }
+ intel_ring_default_vfuncs(dev_priv, engine);
+
+ engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
+ if (HAS_L3_DPF(dev_priv))
+ engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+ if (INTEL_GEN(dev_priv) >= 8) {
engine->init_context = intel_rcs_ctx_init;
- engine->add_request = gen6_add_request;
+ engine->add_request = gen8_render_add_request;
engine->flush = gen8_render_ring_flush;
- engine->irq_get = gen8_ring_get_irq;
- engine->irq_put = gen8_ring_put_irq;
- engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
- engine->irq_seqno_barrier = gen6_seqno_barrier;
- engine->get_seqno = ring_get_seqno;
- engine->set_seqno = ring_set_seqno;
- if (i915_semaphore_is_enabled(dev)) {
- WARN_ON(!dev_priv->semaphore_obj);
- engine->semaphore.sync_to = gen8_ring_sync;
+ if (i915_semaphore_is_enabled(dev_priv))
engine->semaphore.signal = gen8_rcs_signal;
- GEN8_RING_SEMAPHORE_INIT(engine);
- }
- } else if (INTEL_INFO(dev)->gen >= 6) {
+ } else if (INTEL_GEN(dev_priv) >= 6) {
engine->init_context = intel_rcs_ctx_init;
- engine->add_request = gen6_add_request;
engine->flush = gen7_render_ring_flush;
- if (INTEL_INFO(dev)->gen == 6)
+ if (IS_GEN6(dev_priv))
engine->flush = gen6_render_ring_flush;
- engine->irq_get = gen6_ring_get_irq;
- engine->irq_put = gen6_ring_put_irq;
- engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
- engine->irq_seqno_barrier = gen6_seqno_barrier;
- engine->get_seqno = ring_get_seqno;
- engine->set_seqno = ring_set_seqno;
- if (i915_semaphore_is_enabled(dev)) {
- engine->semaphore.sync_to = gen6_ring_sync;
- engine->semaphore.signal = gen6_signal;
- /*
- * The current semaphore is only applied on pre-gen8
- * platform. And there is no VCS2 ring on the pre-gen8
- * platform. So the semaphore between RCS and VCS2 is
- * initialized as INVALID. Gen8 will initialize the
- * sema between VCS2 and RCS later.
- */
- engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
- engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
- engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
- engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
- engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
- engine->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
- engine->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
- engine->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
- engine->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
- engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
- }
- } else if (IS_GEN5(dev)) {
- engine->add_request = pc_render_add_request;
+ } else if (IS_GEN5(dev_priv)) {
engine->flush = gen4_render_ring_flush;
- engine->get_seqno = pc_render_get_seqno;
- engine->set_seqno = pc_render_set_seqno;
- engine->irq_get = gen5_ring_get_irq;
- engine->irq_put = gen5_ring_put_irq;
- engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
- GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
} else {
- engine->add_request = i9xx_add_request;
- if (INTEL_INFO(dev)->gen < 4)
+ if (INTEL_GEN(dev_priv) < 4)
engine->flush = gen2_render_ring_flush;
else
engine->flush = gen4_render_ring_flush;
- engine->get_seqno = ring_get_seqno;
- engine->set_seqno = ring_set_seqno;
- if (IS_GEN2(dev)) {
- engine->irq_get = i8xx_ring_get_irq;
- engine->irq_put = i8xx_ring_put_irq;
- } else {
- engine->irq_get = i9xx_ring_get_irq;
- engine->irq_put = i9xx_ring_put_irq;
- }
engine->irq_enable_mask = I915_USER_INTERRUPT;
}
- engine->write_tail = ring_write_tail;
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev_priv))
engine->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
- else if (IS_GEN8(dev))
- engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
- else if (INTEL_INFO(dev)->gen >= 6)
- engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
- else if (INTEL_INFO(dev)->gen >= 4)
- engine->dispatch_execbuffer = i965_dispatch_execbuffer;
- else if (IS_I830(dev) || IS_845G(dev))
- engine->dispatch_execbuffer = i830_dispatch_execbuffer;
- else
- engine->dispatch_execbuffer = i915_dispatch_execbuffer;
+
engine->init_hw = init_render_ring;
engine->cleanup = render_ring_cleanup;
- /* Workaround batchbuffer to combat CS tlb bug. */
- if (HAS_BROKEN_CS_TLB(dev)) {
- obj = i915_gem_alloc_object(dev, I830_WA_SIZE);
- if (obj == NULL) {
- DRM_ERROR("Failed to allocate batch bo\n");
- return -ENOMEM;
- }
-
- ret = i915_gem_obj_ggtt_pin(obj, 0, 0);
- if (ret != 0) {
- drm_gem_object_unreference(&obj->base);
- DRM_ERROR("Failed to ping batch bo\n");
- return ret;
- }
-
- engine->scratch.obj = obj;
- engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
- }
-
ret = intel_init_ring_buffer(dev, engine);
if (ret)
return ret;
- if (INTEL_INFO(dev)->gen >= 5) {
- ret = intel_init_pipe_control(engine);
+ if (INTEL_GEN(dev_priv) >= 6) {
+ ret = intel_init_pipe_control(engine, 4096);
+ if (ret)
+ return ret;
+ } else if (HAS_BROKEN_CS_TLB(dev_priv)) {
+ ret = intel_init_pipe_control(engine, I830_WA_SIZE);
if (ret)
return ret;
}
@@ -2979,7 +2914,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
int intel_init_bsd_ring_buffer(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine = &dev_priv->engine[VCS];
engine->name = "bsd ring";
@@ -2987,68 +2922,27 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
engine->exec_id = I915_EXEC_BSD;
engine->hw_id = 1;
- engine->write_tail = ring_write_tail;
- if (INTEL_INFO(dev)->gen >= 6) {
+ intel_ring_default_vfuncs(dev_priv, engine);
+
+ if (INTEL_GEN(dev_priv) >= 6) {
engine->mmio_base = GEN6_BSD_RING_BASE;
/* gen6 bsd needs a special wa for tail updates */
- if (IS_GEN6(dev))
+ if (IS_GEN6(dev_priv))
engine->write_tail = gen6_bsd_ring_write_tail;
engine->flush = gen6_bsd_ring_flush;
- engine->add_request = gen6_add_request;
- engine->irq_seqno_barrier = gen6_seqno_barrier;
- engine->get_seqno = ring_get_seqno;
- engine->set_seqno = ring_set_seqno;
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8)
engine->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
- engine->irq_get = gen8_ring_get_irq;
- engine->irq_put = gen8_ring_put_irq;
- engine->dispatch_execbuffer =
- gen8_ring_dispatch_execbuffer;
- if (i915_semaphore_is_enabled(dev)) {
- engine->semaphore.sync_to = gen8_ring_sync;
- engine->semaphore.signal = gen8_xcs_signal;
- GEN8_RING_SEMAPHORE_INIT(engine);
- }
- } else {
+ else
engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
- engine->irq_get = gen6_ring_get_irq;
- engine->irq_put = gen6_ring_put_irq;
- engine->dispatch_execbuffer =
- gen6_ring_dispatch_execbuffer;
- if (i915_semaphore_is_enabled(dev)) {
- engine->semaphore.sync_to = gen6_ring_sync;
- engine->semaphore.signal = gen6_signal;
- engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
- engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
- engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
- engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
- engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
- engine->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
- engine->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
- engine->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
- engine->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
- engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
- }
- }
} else {
engine->mmio_base = BSD_RING_BASE;
engine->flush = bsd_ring_flush;
- engine->add_request = i9xx_add_request;
- engine->get_seqno = ring_get_seqno;
- engine->set_seqno = ring_set_seqno;
- if (IS_GEN5(dev)) {
+ if (IS_GEN5(dev_priv))
engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
- engine->irq_get = gen5_ring_get_irq;
- engine->irq_put = gen5_ring_put_irq;
- } else {
+ else
engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
- engine->irq_get = i9xx_ring_get_irq;
- engine->irq_put = i9xx_ring_put_irq;
- }
- engine->dispatch_execbuffer = i965_dispatch_execbuffer;
}
- engine->init_hw = init_ring_common;
return intel_init_ring_buffer(dev, engine);
}
@@ -3058,147 +2952,70 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
*/
int intel_init_bsd2_ring_buffer(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine = &dev_priv->engine[VCS2];
engine->name = "bsd2 ring";
engine->id = VCS2;
engine->exec_id = I915_EXEC_BSD;
engine->hw_id = 4;
-
- engine->write_tail = ring_write_tail;
engine->mmio_base = GEN8_BSD2_RING_BASE;
+
+ intel_ring_default_vfuncs(dev_priv, engine);
+
engine->flush = gen6_bsd_ring_flush;
- engine->add_request = gen6_add_request;
- engine->irq_seqno_barrier = gen6_seqno_barrier;
- engine->get_seqno = ring_get_seqno;
- engine->set_seqno = ring_set_seqno;
engine->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
- engine->irq_get = gen8_ring_get_irq;
- engine->irq_put = gen8_ring_put_irq;
- engine->dispatch_execbuffer =
- gen8_ring_dispatch_execbuffer;
- if (i915_semaphore_is_enabled(dev)) {
- engine->semaphore.sync_to = gen8_ring_sync;
- engine->semaphore.signal = gen8_xcs_signal;
- GEN8_RING_SEMAPHORE_INIT(engine);
- }
- engine->init_hw = init_ring_common;
return intel_init_ring_buffer(dev, engine);
}
int intel_init_blt_ring_buffer(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine = &dev_priv->engine[BCS];
engine->name = "blitter ring";
engine->id = BCS;
engine->exec_id = I915_EXEC_BLT;
engine->hw_id = 2;
-
engine->mmio_base = BLT_RING_BASE;
- engine->write_tail = ring_write_tail;
+
+ intel_ring_default_vfuncs(dev_priv, engine);
+
engine->flush = gen6_ring_flush;
- engine->add_request = gen6_add_request;
- engine->irq_seqno_barrier = gen6_seqno_barrier;
- engine->get_seqno = ring_get_seqno;
- engine->set_seqno = ring_set_seqno;
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8)
engine->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
- engine->irq_get = gen8_ring_get_irq;
- engine->irq_put = gen8_ring_put_irq;
- engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
- if (i915_semaphore_is_enabled(dev)) {
- engine->semaphore.sync_to = gen8_ring_sync;
- engine->semaphore.signal = gen8_xcs_signal;
- GEN8_RING_SEMAPHORE_INIT(engine);
- }
- } else {
+ else
engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
- engine->irq_get = gen6_ring_get_irq;
- engine->irq_put = gen6_ring_put_irq;
- engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
- if (i915_semaphore_is_enabled(dev)) {
- engine->semaphore.signal = gen6_signal;
- engine->semaphore.sync_to = gen6_ring_sync;
- /*
- * The current semaphore is only applied on pre-gen8
- * platform. And there is no VCS2 ring on the pre-gen8
- * platform. So the semaphore between BCS and VCS2 is
- * initialized as INVALID. Gen8 will initialize the
- * sema between BCS and VCS2 later.
- */
- engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
- engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
- engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
- engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
- engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
- engine->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
- engine->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
- engine->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
- engine->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
- engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
- }
- }
- engine->init_hw = init_ring_common;
return intel_init_ring_buffer(dev, engine);
}
int intel_init_vebox_ring_buffer(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine = &dev_priv->engine[VECS];
engine->name = "video enhancement ring";
engine->id = VECS;
engine->exec_id = I915_EXEC_VEBOX;
engine->hw_id = 3;
-
engine->mmio_base = VEBOX_RING_BASE;
- engine->write_tail = ring_write_tail;
+
+ intel_ring_default_vfuncs(dev_priv, engine);
+
engine->flush = gen6_ring_flush;
- engine->add_request = gen6_add_request;
- engine->irq_seqno_barrier = gen6_seqno_barrier;
- engine->get_seqno = ring_get_seqno;
- engine->set_seqno = ring_set_seqno;
- if (INTEL_INFO(dev)->gen >= 8) {
+ if (INTEL_GEN(dev_priv) >= 8) {
engine->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
- engine->irq_get = gen8_ring_get_irq;
- engine->irq_put = gen8_ring_put_irq;
- engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
- if (i915_semaphore_is_enabled(dev)) {
- engine->semaphore.sync_to = gen8_ring_sync;
- engine->semaphore.signal = gen8_xcs_signal;
- GEN8_RING_SEMAPHORE_INIT(engine);
- }
} else {
engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
- engine->irq_get = hsw_vebox_get_irq;
- engine->irq_put = hsw_vebox_put_irq;
- engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
- if (i915_semaphore_is_enabled(dev)) {
- engine->semaphore.sync_to = gen6_ring_sync;
- engine->semaphore.signal = gen6_signal;
- engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
- engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
- engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
- engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
- engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
- engine->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
- engine->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
- engine->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
- engine->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
- engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
- }
+ engine->irq_enable = hsw_vebox_irq_enable;
+ engine->irq_disable = hsw_vebox_irq_disable;
}
- engine->init_hw = init_ring_common;
return intel_init_ring_buffer(dev, engine);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index ff126485d..12cb7ed90 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -62,18 +62,6 @@ struct intel_hw_status_page {
(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
-#define GEN8_RING_SEMAPHORE_INIT(e) do { \
- if (!dev_priv->semaphore_obj) { \
- break; \
- } \
- (e)->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET((e), RCS); \
- (e)->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET((e), VCS); \
- (e)->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET((e), BCS); \
- (e)->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET((e), VECS); \
- (e)->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET((e), VCS2); \
- (e)->semaphore.signal_ggtt[(e)->id] = MI_SEMAPHORE_SYNC_INVALID; \
- } while(0)
-
enum intel_ring_hangcheck_action {
HANGCHECK_IDLE = 0,
HANGCHECK_WAIT,
@@ -86,8 +74,8 @@ enum intel_ring_hangcheck_action {
struct intel_ring_hangcheck {
u64 acthd;
+ unsigned long user_interrupts;
u32 seqno;
- unsigned user_interrupts;
int score;
enum intel_ring_hangcheck_action action;
int deadlock;
@@ -107,7 +95,6 @@ struct intel_ringbuffer {
int space;
int size;
int effective_size;
- int reserved_size;
/** We track the position of the requests in the ring buffer, and
* when each is retired we increment last_retired_head as the GPU
@@ -120,7 +107,7 @@ struct intel_ringbuffer {
u32 last_retired_head;
};
-struct intel_context;
+struct i915_gem_context;
struct drm_i915_reg_table;
/*
@@ -142,7 +129,10 @@ struct i915_ctx_workarounds {
struct drm_i915_gem_object *obj;
};
-struct intel_engine_cs {
+struct drm_i915_gem_request;
+
+struct intel_engine_cs {
+ struct drm_i915_private *i915;
const char *name;
enum intel_engine_id {
RCS = 0,
@@ -157,10 +147,42 @@ struct intel_engine_cs {
unsigned int hw_id;
unsigned int guc_id; /* XXX same as hw_id? */
u32 mmio_base;
- struct drm_device *dev;
struct intel_ringbuffer *buffer;
struct list_head buffers;
+ /* Rather than have every client wait upon all user interrupts,
+ * with the herd waking after every interrupt and each doing the
+ * heavyweight seqno dance, we delegate the task (of being the
+ * bottom-half of the user interrupt) to the first client. After
+ * every interrupt, we wake up one client, who does the heavyweight
+ * coherent seqno read and either goes back to sleep (if incomplete),
+ * or wakes up all the completed clients in parallel, before then
+ * transferring the bottom-half status to the next client in the queue.
+ *
+ * Compared to walking the entire list of waiters in a single dedicated
+ * bottom-half, we reduce the latency of the first waiter by avoiding
+ * a context switch, but incur additional coherent seqno reads when
+ * following the chain of request breadcrumbs. Since it is most likely
+ * that we have a single client waiting on each seqno, then reducing
+ * the overhead of waking that client is much preferred.
+ */
+ struct intel_breadcrumbs {
+ struct task_struct *irq_seqno_bh; /* bh for user interrupts */
+ unsigned long irq_wakeups;
+ bool irq_posted;
+
+ spinlock_t lock; /* protects the lists of requests */
+ struct rb_root waiters; /* sorted by retirement, priority */
+ struct rb_root signals; /* sorted by retirement */
+ struct intel_wait *first_wait; /* oldest waiter by retirement */
+ struct task_struct *signaler; /* used for fence signalling */
+ struct drm_i915_gem_request *first_signal;
+ struct timer_list fake_irq; /* used after a missed interrupt */
+
+ bool irq_enabled : 1;
+ bool rpm_wakelock : 1;
+ } breadcrumbs;
+
/*
* A pool of objects to use as shadow copies of client batch buffers
* when the command parser is enabled. Prevents the client from
@@ -171,11 +193,10 @@ struct intel_engine_cs {
struct intel_hw_status_page status_page;
struct i915_ctx_workarounds wa_ctx;
- unsigned irq_refcount; /* protected by dev_priv->irq_lock */
- u32 irq_enable_mask; /* bitmask to enable ring interrupt */
- struct drm_i915_gem_request *trace_irq_req;
- bool __must_check (*irq_get)(struct intel_engine_cs *ring);
- void (*irq_put)(struct intel_engine_cs *ring);
+ u32 irq_keep_mask; /* always keep these interrupts */
+ u32 irq_enable_mask; /* bitmask to enable ring interrupt */
+ void (*irq_enable)(struct intel_engine_cs *ring);
+ void (*irq_disable)(struct intel_engine_cs *ring);
int (*init_hw)(struct intel_engine_cs *ring);
@@ -194,9 +215,6 @@ struct intel_engine_cs {
* monotonic, even if not coherent.
*/
void (*irq_seqno_barrier)(struct intel_engine_cs *ring);
- u32 (*get_seqno)(struct intel_engine_cs *ring);
- void (*set_seqno)(struct intel_engine_cs *ring,
- u32 seqno);
int (*dispatch_execbuffer)(struct drm_i915_gem_request *req,
u64 offset, u32 length,
unsigned dispatch_flags);
@@ -268,13 +286,11 @@ struct intel_engine_cs {
struct tasklet_struct irq_tasklet;
spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */
struct list_head execlist_queue;
- struct list_head execlist_retired_req_list;
unsigned int fw_domains;
unsigned int next_context_status_buffer;
unsigned int idle_lite_restore_wa;
bool disable_lite_restore_wa;
u32 ctx_desc_template;
- u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
int (*emit_request)(struct drm_i915_gem_request *request);
int (*emit_flush)(struct drm_i915_gem_request *request,
u32 invalidate_domains,
@@ -306,20 +322,16 @@ struct intel_engine_cs {
* inspecting request list.
*/
u32 last_submitted_seqno;
- unsigned user_interrupts;
bool gpu_caches_dirty;
- wait_queue_head_t irq_queue;
-
- struct intel_context *last_context;
+ struct i915_gem_context *last_context;
struct intel_ring_hangcheck hangcheck;
struct {
struct drm_i915_gem_object *obj;
u32 gtt_offset;
- volatile u32 *cpu_page;
} scratch;
bool needs_cmd_parser;
@@ -350,13 +362,13 @@ struct intel_engine_cs {
};
static inline bool
-intel_engine_initialized(struct intel_engine_cs *engine)
+intel_engine_initialized(const struct intel_engine_cs *engine)
{
- return engine->dev != NULL;
+ return engine->i915 != NULL;
}
static inline unsigned
-intel_engine_flag(struct intel_engine_cs *engine)
+intel_engine_flag(const struct intel_engine_cs *engine)
{
return 1 << engine->id;
}
@@ -427,7 +439,7 @@ intel_write_status_page(struct intel_engine_cs *engine,
struct intel_ringbuffer *
intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size);
-int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
+int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
struct intel_ringbuffer *ringbuf);
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
void intel_ringbuffer_free(struct intel_ringbuffer *ring);
@@ -458,15 +470,14 @@ static inline void intel_ring_advance(struct intel_engine_cs *engine)
}
int __intel_ring_space(int head, int tail, int size);
void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
-bool intel_engine_stopped(struct intel_engine_cs *engine);
int __must_check intel_engine_idle(struct intel_engine_cs *engine);
void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno);
int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
+int intel_init_pipe_control(struct intel_engine_cs *engine, int size);
void intel_fini_pipe_control(struct intel_engine_cs *engine);
-int intel_init_pipe_control(struct intel_engine_cs *engine);
int intel_init_render_ring_buffer(struct drm_device *dev);
int intel_init_bsd_ring_buffer(struct drm_device *dev);
@@ -475,6 +486,10 @@ int intel_init_blt_ring_buffer(struct drm_device *dev);
int intel_init_vebox_ring_buffer(struct drm_device *dev);
u64 intel_ring_get_active_head(struct intel_engine_cs *engine);
+static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
+{
+ return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
+}
int init_workarounds_ring(struct intel_engine_cs *engine);
@@ -486,26 +501,73 @@ static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
/*
* Arbitrary size for largest possible 'add request' sequence. The code paths
* are complex and variable. Empirical measurement shows that the worst case
- * is ILK at 136 words. Reserving too much is better than reserving too little
- * as that allows for corner cases that might have been missed. So the figure
- * has been rounded up to 160 words.
+ * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However,
+ * we need to allocate double the largest single packet within that emission
+ * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW).
*/
-#define MIN_SPACE_FOR_ADD_REQUEST 160
+#define MIN_SPACE_FOR_ADD_REQUEST 336
-/*
- * Reserve space in the ring to guarantee that the i915_add_request() call
- * will always have sufficient room to do its stuff. The request creation
- * code calls this automatically.
- */
-void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size);
-/* Cancel the reservation, e.g. because the request is being discarded. */
-void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf);
-/* Use the reserved space - for use by i915_add_request() only. */
-void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf);
-/* Finish with the reserved space - for use by i915_add_request() only. */
-void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf);
-
-/* Legacy ringbuffer specific portion of reservation code: */
-int intel_ring_reserve_space(struct drm_i915_gem_request *request);
+static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
+{
+ return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR;
+}
+
+/* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
+struct intel_wait {
+ struct rb_node node;
+ struct task_struct *tsk;
+ u32 seqno;
+};
+
+struct intel_signal_node {
+ struct rb_node node;
+ struct intel_wait wait;
+};
+
+int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
+
+static inline void intel_wait_init(struct intel_wait *wait, u32 seqno)
+{
+ wait->tsk = current;
+ wait->seqno = seqno;
+}
+
+static inline bool intel_wait_complete(const struct intel_wait *wait)
+{
+ return RB_EMPTY_NODE(&wait->node);
+}
+
+bool intel_engine_add_wait(struct intel_engine_cs *engine,
+ struct intel_wait *wait);
+void intel_engine_remove_wait(struct intel_engine_cs *engine,
+ struct intel_wait *wait);
+void intel_engine_enable_signaling(struct drm_i915_gem_request *request);
+
+static inline bool intel_engine_has_waiter(struct intel_engine_cs *engine)
+{
+ return READ_ONCE(engine->breadcrumbs.irq_seqno_bh);
+}
+
+static inline bool intel_engine_wakeup(struct intel_engine_cs *engine)
+{
+ bool wakeup = false;
+ struct task_struct *tsk = READ_ONCE(engine->breadcrumbs.irq_seqno_bh);
+ /* Note that for this not to dangerously chase a dangling pointer,
+ * the caller is responsible for ensure that the task remain valid for
+ * wake_up_process() i.e. that the RCU grace period cannot expire.
+ *
+ * Also note that tsk is likely to be in !TASK_RUNNING state so an
+ * early test for tsk->state != TASK_RUNNING before wake_up_process()
+ * is unlikely to be beneficial.
+ */
+ if (tsk)
+ wakeup = wake_up_process(tsk);
+ return wakeup;
+}
+
+void intel_engine_enable_fake_irq(struct intel_engine_cs *engine);
+void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
+unsigned int intel_kick_waiters(struct drm_i915_private *i915);
+unsigned int intel_kick_signalers(struct drm_i915_private *i915);
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 2592b39ff..1c603bbe5 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -65,6 +65,9 @@
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
int power_well_id);
+static struct i915_power_well *
+lookup_power_well(struct drm_i915_private *dev_priv, int power_well_id);
+
const char *
intel_display_power_domain_str(enum intel_display_power_domain domain)
{
@@ -151,6 +154,23 @@ static void intel_power_well_disable(struct drm_i915_private *dev_priv,
power_well->ops->disable(dev_priv, power_well);
}
+static void intel_power_well_get(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ if (!power_well->count++)
+ intel_power_well_enable(dev_priv, power_well);
+}
+
+static void intel_power_well_put(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ WARN(!power_well->count, "Use count on power well %s is already zero",
+ power_well->name);
+
+ if (!--power_well->count)
+ intel_power_well_disable(dev_priv, power_well);
+}
+
/*
* We should only use the power well if we explicitly asked the hardware to
* enable it, so check if it's enabled and also check if we've requested it to
@@ -267,7 +287,7 @@ void intel_display_set_init_power(struct drm_i915_private *dev_priv,
*/
static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
/*
* After we re-enable the power well, if we touch VGA register 0x3d5
@@ -298,7 +318,7 @@ static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv)
static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
/*
* After we re-enable the power well, if we touch VGA register 0x3d5
@@ -345,8 +365,11 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
if (!is_enabled) {
DRM_DEBUG_KMS("Enabling power well\n");
- if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
- HSW_PWR_WELL_STATE_ENABLED), 20))
+ if (intel_wait_for_register(dev_priv,
+ HSW_PWR_WELL_DRIVER,
+ HSW_PWR_WELL_STATE_ENABLED,
+ HSW_PWR_WELL_STATE_ENABLED,
+ 20))
DRM_ERROR("Timeout enabling power well\n");
hsw_power_well_post_enable(dev_priv);
}
@@ -419,6 +442,16 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
BIT(POWER_DOMAIN_MODESET) | \
BIT(POWER_DOMAIN_AUX_A) | \
BIT(POWER_DOMAIN_INIT))
+#define BXT_DPIO_CMN_A_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \
+ BIT(POWER_DOMAIN_AUX_A) | \
+ BIT(POWER_DOMAIN_INIT))
+#define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT(POWER_DOMAIN_AUX_B) | \
+ BIT(POWER_DOMAIN_AUX_C) | \
+ BIT(POWER_DOMAIN_INIT))
static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
{
@@ -548,6 +581,7 @@ void bxt_enable_dc9(struct drm_i915_private *dev_priv)
DRM_DEBUG_KMS("Enabling DC9\n");
+ intel_power_sequencer_reset(dev_priv);
gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
}
@@ -669,8 +703,11 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
switch (power_well->data) {
case SKL_DISP_PW_1:
- if (wait_for((I915_READ(SKL_FUSE_STATUS) &
- SKL_FUSE_PG0_DIST_STATUS), 1)) {
+ if (intel_wait_for_register(dev_priv,
+ SKL_FUSE_STATUS,
+ SKL_FUSE_PG0_DIST_STATUS,
+ SKL_FUSE_PG0_DIST_STATUS,
+ 1)) {
DRM_ERROR("PG0 not enabled\n");
return;
}
@@ -731,12 +768,18 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
if (check_fuse_status) {
if (power_well->data == SKL_DISP_PW_1) {
- if (wait_for((I915_READ(SKL_FUSE_STATUS) &
- SKL_FUSE_PG1_DIST_STATUS), 1))
+ if (intel_wait_for_register(dev_priv,
+ SKL_FUSE_STATUS,
+ SKL_FUSE_PG1_DIST_STATUS,
+ SKL_FUSE_PG1_DIST_STATUS,
+ 1))
DRM_ERROR("PG1 distributing status timeout\n");
} else if (power_well->data == SKL_DISP_PW_2) {
- if (wait_for((I915_READ(SKL_FUSE_STATUS) &
- SKL_FUSE_PG2_DIST_STATUS), 1))
+ if (intel_wait_for_register(dev_priv,
+ SKL_FUSE_STATUS,
+ SKL_FUSE_PG2_DIST_STATUS,
+ SKL_FUSE_PG2_DIST_STATUS,
+ 1))
DRM_ERROR("PG2 distributing status timeout\n");
}
}
@@ -800,21 +843,99 @@ static void skl_power_well_disable(struct drm_i915_private *dev_priv,
skl_set_power_well(dev_priv, power_well, false);
}
+static enum dpio_phy bxt_power_well_to_phy(struct i915_power_well *power_well)
+{
+ enum skl_disp_power_wells power_well_id = power_well->data;
+
+ return power_well_id == BXT_DPIO_CMN_A ? DPIO_PHY1 : DPIO_PHY0;
+}
+
+static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum skl_disp_power_wells power_well_id = power_well->data;
+ struct i915_power_well *cmn_a_well;
+
+ if (power_well_id == BXT_DPIO_CMN_BC) {
+ /*
+ * We need to copy the GRC calibration value from the eDP PHY,
+ * so make sure it's powered up.
+ */
+ cmn_a_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
+ intel_power_well_get(dev_priv, cmn_a_well);
+ }
+
+ bxt_ddi_phy_init(dev_priv, bxt_power_well_to_phy(power_well));
+
+ if (power_well_id == BXT_DPIO_CMN_BC)
+ intel_power_well_put(dev_priv, cmn_a_well);
+}
+
+static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ bxt_ddi_phy_uninit(dev_priv, bxt_power_well_to_phy(power_well));
+}
+
+static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ return bxt_ddi_phy_is_enabled(dev_priv,
+ bxt_power_well_to_phy(power_well));
+}
+
+static void bxt_dpio_cmn_power_well_sync_hw(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ if (power_well->count > 0)
+ bxt_dpio_cmn_power_well_enable(dev_priv, power_well);
+ else
+ bxt_dpio_cmn_power_well_disable(dev_priv, power_well);
+}
+
+
+static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_well *power_well;
+
+ power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
+ if (power_well->count > 0)
+ bxt_ddi_phy_verify_state(dev_priv,
+ bxt_power_well_to_phy(power_well));
+
+ power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC);
+ if (power_well->count > 0)
+ bxt_ddi_phy_verify_state(dev_priv,
+ bxt_power_well_to_phy(power_well));
+}
+
static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
}
+static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
+{
+ u32 tmp = I915_READ(DBUF_CTL);
+
+ WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
+ (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
+ "Unexpected DBuf power power state (0x%08x)\n", tmp);
+}
+
static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
- if (IS_BROXTON(dev_priv)) {
- broxton_cdclk_verify_state(dev_priv);
- broxton_ddi_phy_verify_state(dev_priv);
- }
+ WARN_ON(dev_priv->cdclk_freq !=
+ dev_priv->display.get_display_clock_speed(&dev_priv->drm));
+
+ gen9_assert_dbuf_enabled(dev_priv);
+
+ if (IS_BROXTON(dev_priv))
+ bxt_verify_ddi_phy_power_wells(dev_priv);
}
static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
@@ -948,6 +1069,11 @@ static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
*/
I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
I915_WRITE(CBR1_VLV, 0);
+
+ WARN_ON(dev_priv->rawclk_freq == 0);
+
+ I915_WRITE(RAWCLK_FREQ_VLV,
+ DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
}
static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
@@ -963,7 +1089,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
*
* CHV DPLL B/C have some issues if VGA mode is enabled.
*/
- for_each_pipe(dev_priv->dev, pipe) {
+ for_each_pipe(&dev_priv->drm, pipe) {
u32 val = I915_READ(DPLL(pipe));
val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
@@ -989,12 +1115,12 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
intel_hpd_init(dev_priv);
/* Re-enable the ADPA, if we have one */
- for_each_intel_encoder(dev_priv->dev, encoder) {
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
if (encoder->type == INTEL_OUTPUT_ANALOG)
intel_crt_reset(&encoder->base);
}
- i915_redisable_vga_power_on(dev_priv->dev);
+ i915_redisable_vga_power_on(&dev_priv->drm);
}
static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
@@ -1004,9 +1130,9 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
spin_unlock_irq(&dev_priv->irq_lock);
/* make sure we're done processing display irqs */
- synchronize_irq(dev_priv->dev->irq);
+ synchronize_irq(dev_priv->drm.irq);
- vlv_power_sequencer_reset(dev_priv);
+ intel_power_sequencer_reset(dev_priv);
intel_hpd_poll_init(dev_priv);
}
@@ -1101,7 +1227,6 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
u32 phy_control = dev_priv->chv_phy_control;
u32 phy_status = 0;
u32 phy_status_mask = 0xffffffff;
- u32 tmp;
/*
* The BIOS can leave the PHY is some weird state
@@ -1189,10 +1314,14 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
* The PHY may be busy with some initial calibration and whatnot,
* so the power state can take a while to actually change.
*/
- if (wait_for((tmp = I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask) == phy_status, 10))
- WARN(phy_status != tmp,
- "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
- tmp, phy_status, dev_priv->chv_phy_control);
+ if (intel_wait_for_register(dev_priv,
+ DISPLAY_PHY_STATUS,
+ phy_status_mask,
+ phy_status,
+ 10))
+ DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
+ I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
+ phy_status, dev_priv->chv_phy_control);
}
#undef BITS_SET
@@ -1220,7 +1349,11 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
vlv_set_power_well(dev_priv, power_well, true);
/* Poll for phypwrgood signal */
- if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
+ if (intel_wait_for_register(dev_priv,
+ DISPLAY_PHY_STATUS,
+ PHY_POWERGOOD(phy),
+ PHY_POWERGOOD(phy),
+ 1))
DRM_ERROR("Display PHY %d is not power up\n", phy);
mutex_lock(&dev_priv->sb_lock);
@@ -1510,10 +1643,8 @@ __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well;
int i;
- for_each_power_well(i, power_well, BIT(domain), power_domains) {
- if (!power_well->count++)
- intel_power_well_enable(dev_priv, power_well);
- }
+ for_each_power_well(i, power_well, BIT(domain), power_domains)
+ intel_power_well_get(dev_priv, power_well);
power_domains->domain_use_count[domain]++;
}
@@ -1607,14 +1738,8 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
intel_display_power_domain_str(domain));
power_domains->domain_use_count[domain]--;
- for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
- WARN(!power_well->count,
- "Use count on power well %s is already zero",
- power_well->name);
-
- if (!--power_well->count)
- intel_power_well_disable(dev_priv, power_well);
- }
+ for_each_power_well_rev(i, power_well, BIT(domain), power_domains)
+ intel_power_well_put(dev_priv, power_well);
mutex_unlock(&power_domains->lock);
@@ -1785,6 +1910,13 @@ static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
.is_enabled = gen9_dc_off_power_well_enabled,
};
+static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
+ .sync_hw = bxt_dpio_cmn_power_well_sync_hw,
+ .enable = bxt_dpio_cmn_power_well_enable,
+ .disable = bxt_dpio_cmn_power_well_disable,
+ .is_enabled = bxt_dpio_cmn_power_well_enabled,
+};
+
static struct i915_power_well hsw_power_wells[] = {
{
.name = "always-on",
@@ -2021,6 +2153,18 @@ static struct i915_power_well bxt_power_wells[] = {
.ops = &skl_power_well_ops,
.data = SKL_DISP_PW_2,
},
+ {
+ .name = "dpio-common-a",
+ .domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
+ .ops = &bxt_dpio_cmn_power_well_ops,
+ .data = BXT_DPIO_CMN_A,
+ },
+ {
+ .name = "dpio-common-bc",
+ .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
+ .ops = &bxt_dpio_cmn_power_well_ops,
+ .data = BXT_DPIO_CMN_BC,
+ },
};
static int
@@ -2140,7 +2284,7 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
*/
void intel_power_domains_fini(struct drm_i915_private *dev_priv)
{
- struct device *device = &dev_priv->dev->pdev->dev;
+ struct device *device = &dev_priv->drm.pdev->dev;
/*
* The i915.ko module is still not prepared to be loaded when
@@ -2180,6 +2324,28 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
mutex_unlock(&power_domains->lock);
}
+static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
+ POSTING_READ(DBUF_CTL);
+
+ udelay(10);
+
+ if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
+ DRM_ERROR("DBuf power enable timeout\n");
+}
+
+static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
+ POSTING_READ(DBUF_CTL);
+
+ udelay(10);
+
+ if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
+ DRM_ERROR("DBuf power disable timeout!\n");
+}
+
static void skl_display_core_init(struct drm_i915_private *dev_priv,
bool resume)
{
@@ -2204,12 +2370,11 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv,
mutex_unlock(&power_domains->lock);
- if (!resume)
- return;
-
skl_init_cdclk(dev_priv);
- if (dev_priv->csr.dmc_payload)
+ gen9_dbuf_enable(dev_priv);
+
+ if (resume && dev_priv->csr.dmc_payload)
intel_csr_load_program(dev_priv);
}
@@ -2220,6 +2385,8 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+ gen9_dbuf_disable(dev_priv);
+
skl_uninit_cdclk(dev_priv);
/* The spec doesn't call for removing the reset handshake flag */
@@ -2263,11 +2430,9 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv,
mutex_unlock(&power_domains->lock);
- broxton_init_cdclk(dev_priv);
- broxton_ddi_phy_init(dev_priv);
+ bxt_init_cdclk(dev_priv);
- broxton_cdclk_verify_state(dev_priv);
- broxton_ddi_phy_verify_state(dev_priv);
+ gen9_dbuf_enable(dev_priv);
if (resume && dev_priv->csr.dmc_payload)
intel_csr_load_program(dev_priv);
@@ -2280,8 +2445,9 @@ void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
- broxton_ddi_phy_uninit(dev_priv);
- broxton_uninit_cdclk(dev_priv);
+ gen9_dbuf_disable(dev_priv);
+
+ bxt_uninit_cdclk(dev_priv);
/* The spec doesn't call for removing the reset handshake flag */
@@ -2412,13 +2578,14 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
/**
* intel_power_domains_init_hw - initialize hardware power domain state
* @dev_priv: i915 device instance
+ * @resume: Called from resume code paths or not
*
* This function initializes the hardware power domain state and enables all
* power domains using intel_display_set_init_power().
*/
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct i915_power_domains *power_domains = &dev_priv->power_domains;
power_domains->initializing = true;
@@ -2480,7 +2647,7 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
*/
void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct device *device = &dev->pdev->dev;
pm_runtime_get_sync(device);
@@ -2501,7 +2668,7 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
*/
bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct device *device = &dev->pdev->dev;
if (IS_ENABLED(CONFIG_PM)) {
@@ -2543,7 +2710,7 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
*/
void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct device *device = &dev->pdev->dev;
assert_rpm_wakelock_held(dev_priv);
@@ -2562,7 +2729,7 @@ void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
*/
void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct device *device = &dev->pdev->dev;
assert_rpm_wakelock_held(dev_priv);
@@ -2585,7 +2752,7 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
*/
void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->drm;
struct device *device = &dev->pdev->dev;
pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 2128fae56..e378f3536 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -240,7 +240,7 @@ intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
{
struct drm_device *dev = intel_sdvo->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 bval = val, cval = val;
int i;
@@ -1195,7 +1195,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
{
struct drm_device *dev = intel_encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc = to_intel_crtc(intel_encoder->base.crtc);
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
struct drm_display_mode *mode = &crtc->config->base.mode;
@@ -1330,7 +1330,7 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
u16 active_outputs = 0;
u32 tmp;
@@ -1353,7 +1353,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct intel_sdvo_dtd dtd;
int encoder_pixel_multiplier = 0;
@@ -1436,7 +1436,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
static void intel_disable_sdvo(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
u32 temp;
@@ -1471,7 +1471,7 @@ static void intel_disable_sdvo(struct intel_encoder *encoder)
temp &= ~SDVO_ENABLE;
intel_sdvo_write_sdvox(intel_sdvo, temp);
- intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
+ intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
@@ -1489,7 +1489,7 @@ static void pch_post_disable_sdvo(struct intel_encoder *encoder)
static void intel_enable_sdvo(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
u32 temp;
@@ -1633,7 +1633,7 @@ intel_sdvo_get_edid(struct drm_connector *connector)
static struct edid *
intel_sdvo_get_analog_edid(struct drm_connector *connector)
{
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
return drm_get_edid(connector,
intel_gmbus_get_adapter(dev_priv,
@@ -1916,7 +1916,7 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
{
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct drm_display_mode *newmode;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
@@ -2001,7 +2001,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
{
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
- struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
uint16_t temp_value;
uint8_t cmd;
int ret;
@@ -2177,12 +2177,39 @@ done:
#undef CHECK_PROPERTY
}
+static int
+intel_sdvo_connector_register(struct drm_connector *connector)
+{
+ struct intel_sdvo *sdvo = intel_attached_sdvo(connector);
+ int ret;
+
+ ret = intel_connector_register(connector);
+ if (ret)
+ return ret;
+
+ return sysfs_create_link(&connector->kdev->kobj,
+ &sdvo->ddc.dev.kobj,
+ sdvo->ddc.dev.kobj.name);
+}
+
+static void
+intel_sdvo_connector_unregister(struct drm_connector *connector)
+{
+ struct intel_sdvo *sdvo = intel_attached_sdvo(connector);
+
+ sysfs_remove_link(&connector->kdev->kobj,
+ sdvo->ddc.dev.kobj.name);
+ intel_connector_unregister(connector);
+}
+
static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.detect = intel_sdvo_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_sdvo_set_property,
.atomic_get_property = intel_connector_atomic_get_property,
+ .late_register = intel_sdvo_connector_register,
+ .early_unregister = intel_sdvo_connector_unregister,
.destroy = intel_sdvo_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -2191,7 +2218,6 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
.get_modes = intel_sdvo_get_modes,
.mode_valid = intel_sdvo_mode_valid,
- .best_encoder = intel_best_encoder,
};
static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
@@ -2312,7 +2338,7 @@ intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
static u8
intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct sdvo_device_mapping *my_mapping, *other_mapping;
if (sdvo->port == PORT_B) {
@@ -2346,20 +2372,6 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
return 0x72;
}
-static void
-intel_sdvo_connector_unregister(struct intel_connector *intel_connector)
-{
- struct drm_connector *drm_connector;
- struct intel_sdvo *sdvo_encoder;
-
- drm_connector = &intel_connector->base;
- sdvo_encoder = intel_attached_sdvo(&intel_connector->base);
-
- sysfs_remove_link(&drm_connector->kdev->kobj,
- sdvo_encoder->ddc.dev.kobj.name);
- intel_connector_unregister(intel_connector);
-}
-
static int
intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
struct intel_sdvo *encoder)
@@ -2382,27 +2394,10 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
connector->base.base.doublescan_allowed = 0;
connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
connector->base.get_hw_state = intel_sdvo_connector_get_hw_state;
- connector->base.unregister = intel_sdvo_connector_unregister;
intel_connector_attach_encoder(&connector->base, &encoder->base);
- ret = drm_connector_register(drm_connector);
- if (ret < 0)
- goto err1;
-
- ret = sysfs_create_link(&drm_connector->kdev->kobj,
- &encoder->ddc.dev.kobj,
- encoder->ddc.dev.kobj.name);
- if (ret < 0)
- goto err2;
return 0;
-
-err2:
- drm_connector_unregister(drm_connector);
-err1:
- drm_connector_cleanup(drm_connector);
-
- return ret;
}
static void
@@ -2529,7 +2524,6 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
return true;
err:
- drm_connector_unregister(connector);
intel_sdvo_destroy(connector);
return false;
}
@@ -2608,7 +2602,6 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
return true;
err:
- drm_connector_unregister(connector);
intel_sdvo_destroy(connector);
return false;
}
@@ -2959,7 +2952,7 @@ static void assert_sdvo_port_valid(const struct drm_i915_private *dev_priv,
bool intel_sdvo_init(struct drm_device *dev,
i915_reg_t sdvo_reg, enum port port)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *intel_encoder;
struct intel_sdvo *intel_sdvo;
int i;
@@ -2981,7 +2974,7 @@ bool intel_sdvo_init(struct drm_device *dev,
intel_encoder = &intel_sdvo->base;
intel_encoder->type = INTEL_OUTPUT_SDVO;
drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0,
- NULL);
+ "SDVO %c", port_name(port));
/* Read the regs to test if we can talk to the device */
for (i = 0; i < 0x40; i++) {
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index c3998188c..1a840bf92 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -51,7 +51,9 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
- if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) {
+ if (intel_wait_for_register(dev_priv,
+ VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
+ 5)) {
DRM_DEBUG_DRIVER("IOSF sideband idle wait (%s) timed out\n",
is_read ? "read" : "write");
return -EAGAIN;
@@ -62,7 +64,9 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
I915_WRITE(VLV_IOSF_DATA, *val);
I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd);
- if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) {
+ if (intel_wait_for_register(dev_priv,
+ VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
+ 5)) {
DRM_DEBUG_DRIVER("IOSF sideband finish wait (%s) timed out\n",
is_read ? "read" : "write");
return -ETIMEDOUT;
@@ -202,8 +206,9 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
u32 value = 0;
WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
- if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
- 100)) {
+ if (intel_wait_for_register(dev_priv,
+ SBI_CTL_STAT, SBI_BUSY, 0,
+ 100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
return 0;
}
@@ -216,8 +221,11 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
- if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
- 100)) {
+ if (intel_wait_for_register(dev_priv,
+ SBI_CTL_STAT,
+ SBI_BUSY | SBI_RESPONSE_FAIL,
+ 0,
+ 100)) {
DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
return 0;
}
@@ -232,8 +240,9 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
- if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
- 100)) {
+ if (intel_wait_for_register(dev_priv,
+ SBI_CTL_STAT, SBI_BUSY, 0,
+ 100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
return;
}
@@ -247,8 +256,11 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
- if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
- 100)) {
+ if (intel_wait_for_register(dev_priv,
+ SBI_CTL_STAT,
+ SBI_BUSY | SBI_RESPONSE_FAIL,
+ 0,
+ 100)) {
DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
return;
}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 0f3e2303e..7c08e4f29 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -53,8 +53,8 @@ format_is_yuv(uint32_t format)
}
}
-static int usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
- int usecs)
+int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
+ int usecs)
{
/* paranoia */
if (!adjusted_mode->crtc_htotal)
@@ -80,9 +80,7 @@ static int usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
*/
void intel_pipe_update_start(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
- enum pipe pipe = crtc->pipe;
long timeout = msecs_to_jiffies_timeout(1);
int scanline, min, max, vblank_start;
wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
@@ -93,7 +91,7 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
vblank_start = DIV_ROUND_UP(vblank_start, 2);
/* FIXME needs to be calibrated sensibly */
- min = vblank_start - usecs_to_scanlines(adjusted_mode, 100);
+ min = vblank_start - intel_usecs_to_scanlines(adjusted_mode, 100);
max = vblank_start - 1;
local_irq_disable();
@@ -139,8 +137,7 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
crtc->debug.scanline_start = scanline;
crtc->debug.start_vbl_time = ktime_get();
- crtc->debug.start_vbl_count =
- dev->driver->get_vblank_counter(dev, pipe);
+ crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
trace_i915_pipe_update_vblank_evaded(crtc);
}
@@ -154,16 +151,35 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
* re-enables interrupts and verifies the update was actually completed
* before a vblank using the value of @start_vbl_count.
*/
-void intel_pipe_update_end(struct intel_crtc *crtc)
+void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work)
{
- struct drm_device *dev = crtc->base.dev;
enum pipe pipe = crtc->pipe;
int scanline_end = intel_get_crtc_scanline(crtc);
- u32 end_vbl_count = dev->driver->get_vblank_counter(dev, pipe);
+ u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc);
ktime_t end_vbl_time = ktime_get();
+ if (work) {
+ work->flip_queued_vblank = end_vbl_count;
+ smp_mb__before_atomic();
+ atomic_set(&work->pending, 1);
+ }
+
trace_i915_pipe_update_end(crtc, end_vbl_count, scanline_end);
+ /* We're still in the vblank-evade critical section, this can't race.
+ * Would be slightly nice to just grab the vblank count and arm the
+ * event outside of the critical section - the spinlock might spin for a
+ * while ... */
+ if (crtc->base.state->event) {
+ WARN_ON(drm_crtc_vblank_get(&crtc->base) != 0);
+
+ spin_lock(&crtc->base.dev->event_lock);
+ drm_crtc_arm_vblank_event(&crtc->base, crtc->base.state->event);
+ spin_unlock(&crtc->base.dev->event_lock);
+
+ crtc->base.state->event = NULL;
+ }
+
local_irq_enable();
if (crtc->debug.start_vbl_count &&
@@ -183,7 +199,7 @@ skl_update_plane(struct drm_plane *drm_plane,
const struct intel_plane_state *plane_state)
{
struct drm_device *dev = drm_plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *intel_plane = to_intel_plane(drm_plane);
struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -203,8 +219,6 @@ skl_update_plane(struct drm_plane *drm_plane,
uint32_t y = plane_state->src.y1 >> 16;
uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
- const struct intel_scaler *scaler =
- &crtc_state->scaler_state.scalers[plane_state->scaler_id];
plane_ctl = PLANE_CTL_ENABLE |
PLANE_CTL_PIPE_GAMMA_ENABLE |
@@ -260,13 +274,16 @@ skl_update_plane(struct drm_plane *drm_plane,
/* program plane scaler */
if (plane_state->scaler_id >= 0) {
- uint32_t ps_ctrl = 0;
int scaler_id = plane_state->scaler_id;
+ const struct intel_scaler *scaler;
DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n", plane,
PS_PLANE_SEL(plane));
- ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane) | scaler->mode;
- I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
+
+ scaler = &crtc_state->scaler_state.scalers[scaler_id];
+
+ I915_WRITE(SKL_PS_CTRL(pipe, scaler_id),
+ PS_SCALER_EN | PS_PLANE_SEL(plane) | scaler->mode);
I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id),
@@ -286,7 +303,7 @@ static void
skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
{
struct drm_device *dev = dplane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *intel_plane = to_intel_plane(dplane);
const int pipe = intel_plane->pipe;
const int plane = intel_plane->plane + 1;
@@ -300,7 +317,7 @@ skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
static void
chv_update_csc(struct intel_plane *intel_plane, uint32_t format)
{
- struct drm_i915_private *dev_priv = intel_plane->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
int plane = intel_plane->plane;
/* Seems RGB data bypasses the CSC always */
@@ -342,7 +359,7 @@ vlv_update_plane(struct drm_plane *dplane,
const struct intel_plane_state *plane_state)
{
struct drm_device *dev = dplane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *intel_plane = to_intel_plane(dplane);
struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -468,7 +485,7 @@ static void
vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
{
struct drm_device *dev = dplane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *intel_plane = to_intel_plane(dplane);
int pipe = intel_plane->pipe;
int plane = intel_plane->plane;
@@ -485,7 +502,7 @@ ivb_update_plane(struct drm_plane *plane,
const struct intel_plane_state *plane_state)
{
struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -607,7 +624,7 @@ static void
ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
{
struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *intel_plane = to_intel_plane(plane);
int pipe = intel_plane->pipe;
@@ -626,7 +643,7 @@ ilk_update_plane(struct drm_plane *plane,
const struct intel_plane_state *plane_state)
{
struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -736,7 +753,7 @@ static void
ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
{
struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *intel_plane = to_intel_plane(plane);
int pipe = intel_plane->pipe;
@@ -1111,10 +1128,18 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
possible_crtcs = (1 << pipe);
- ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
- &intel_plane_funcs,
- plane_formats, num_plane_formats,
- DRM_PLANE_TYPE_OVERLAY, NULL);
+ if (INTEL_INFO(dev)->gen >= 9)
+ ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
+ &intel_plane_funcs,
+ plane_formats, num_plane_formats,
+ DRM_PLANE_TYPE_OVERLAY,
+ "plane %d%c", plane + 2, pipe_name(pipe));
+ else
+ ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
+ &intel_plane_funcs,
+ plane_formats, num_plane_formats,
+ DRM_PLANE_TYPE_OVERLAY,
+ "sprite %c", sprite_name(pipe, plane));
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 223129d3c..49136ad54 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -826,7 +826,7 @@ static bool
intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 tmp = I915_READ(TV_CTL);
if (!(tmp & TV_ENC_ENABLE))
@@ -841,7 +841,7 @@ static void
intel_enable_tv(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
/* Prevents vblank waits from timing out in intel_tv_detect_type() */
intel_wait_for_vblank(encoder->base.dev,
@@ -854,7 +854,7 @@ static void
intel_disable_tv(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE);
}
@@ -1013,7 +1013,7 @@ static void set_color_conversion(struct drm_i915_private *dev_priv,
static void intel_tv_pre_enable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_tv *intel_tv = enc_to_tv(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
@@ -1173,7 +1173,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
struct drm_crtc *crtc = connector->state->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = connector->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u32 tv_ctl, save_tv_ctl;
u32 tv_dac, save_tv_dac;
int type;
@@ -1501,6 +1501,8 @@ out:
static const struct drm_connector_funcs intel_tv_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.detect = intel_tv_detect,
+ .late_register = intel_connector_register,
+ .early_unregister = intel_connector_unregister,
.destroy = intel_tv_destroy,
.set_property = intel_tv_set_property,
.atomic_get_property = intel_connector_atomic_get_property,
@@ -1512,7 +1514,6 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = {
static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
.mode_valid = intel_tv_mode_valid,
.get_modes = intel_tv_get_modes,
- .best_encoder = intel_best_encoder,
};
static const struct drm_encoder_funcs intel_tv_enc_funcs = {
@@ -1522,7 +1523,7 @@ static const struct drm_encoder_funcs intel_tv_enc_funcs = {
void
intel_tv_init(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_connector *connector;
struct intel_tv *intel_tv;
struct intel_encoder *intel_encoder;
@@ -1591,7 +1592,7 @@ intel_tv_init(struct drm_device *dev)
DRM_MODE_CONNECTOR_SVIDEO);
drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs,
- DRM_MODE_ENCODER_TVDAC, NULL);
+ DRM_MODE_ENCODER_TVDAC, "TV");
intel_encoder->compute_config = intel_tv_compute_config;
intel_encoder->get_config = intel_tv_get_config;
@@ -1600,7 +1601,6 @@ intel_tv_init(struct drm_device *dev)
intel_encoder->disable = intel_disable_tv;
intel_encoder->get_hw_state = intel_tv_get_hw_state;
intel_connector->get_hw_state = intel_connector_get_hw_state;
- intel_connector->unregister = intel_connector_unregister;
intel_connector_attach_encoder(intel_connector, intel_encoder);
intel_encoder->type = INTEL_OUTPUT_TVOUT;
@@ -1642,5 +1642,4 @@ intel_tv_init(struct drm_device *dev)
drm_object_attach_property(&connector->base,
dev->mode_config.tv_bottom_margin_property,
intel_tv->margin[TV_MARGIN_BOTTOM]);
- drm_connector_register(connector);
}
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 4f1dfe616..ff80a81b1 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -248,9 +248,9 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
return HRTIMER_NORESTART;
}
-void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
+void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
+ bool restore)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
struct intel_uncore_forcewake_domain *domain;
int retry_count = 100;
@@ -304,7 +304,7 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
if (fw)
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
- if (IS_GEN6(dev) || IS_GEN7(dev))
+ if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
dev_priv->uncore.fifo_count =
fifo_free_entries(dev_priv);
}
@@ -400,43 +400,42 @@ check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
return false;
}
-static void __intel_uncore_early_sanitize(struct drm_device *dev,
+static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
bool restore_forcewake)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
/* clear out unclaimed reg detection bit */
if (check_for_unclaimed_mmio(dev_priv))
DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
/* clear out old GT FIFO errors */
- if (IS_GEN6(dev) || IS_GEN7(dev))
+ if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
__raw_i915_write32(dev_priv, GTFIFODBG,
__raw_i915_read32(dev_priv, GTFIFODBG));
/* WaDisableShadowRegForCpd:chv */
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
__raw_i915_write32(dev_priv, GTFIFOCTL,
__raw_i915_read32(dev_priv, GTFIFOCTL) |
GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
GT_FIFO_CTL_RC6_POLICY_STALL);
}
- intel_uncore_forcewake_reset(dev, restore_forcewake);
+ intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
}
-void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
+void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
+ bool restore_forcewake)
{
- __intel_uncore_early_sanitize(dev, restore_forcewake);
- i915_check_and_clear_faults(dev);
+ __intel_uncore_early_sanitize(dev_priv, restore_forcewake);
+ i915_check_and_clear_faults(dev_priv);
}
-void intel_uncore_sanitize(struct drm_device *dev)
+void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
{
- i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
+ i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6);
/* BIOS often leaves RC6 enabled, but disable it for hw init */
- intel_disable_gt_powersave(dev);
+ intel_disable_gt_powersave(dev_priv);
}
static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
@@ -1233,14 +1232,12 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
fw_domain_reset(d);
}
-static void intel_uncore_fw_domains_init(struct drm_device *dev)
+static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
if (INTEL_INFO(dev_priv)->gen <= 5)
return;
- if (IS_GEN9(dev)) {
+ if (IS_GEN9(dev_priv)) {
dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
@@ -1251,9 +1248,9 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
FORCEWAKE_ACK_BLITTER_GEN9);
fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
- } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
- if (!IS_CHERRYVIEW(dev))
+ if (!IS_CHERRYVIEW(dev_priv))
dev_priv->uncore.funcs.force_wake_put =
fw_domains_put_with_fifo;
else
@@ -1262,17 +1259,17 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
- } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
dev_priv->uncore.funcs.force_wake_get =
fw_domains_get_with_thread_status;
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev_priv))
dev_priv->uncore.funcs.force_wake_put =
fw_domains_put_with_fifo;
else
dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
- } else if (IS_IVYBRIDGE(dev)) {
+ } else if (IS_IVYBRIDGE(dev_priv)) {
u32 ecobus;
/* IVB configs may use multi-threaded forcewake */
@@ -1302,11 +1299,11 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
FORCEWAKE_MT, FORCEWAKE_MT_ACK);
- mutex_lock(&dev->struct_mutex);
+ spin_lock_irq(&dev_priv->uncore.lock);
fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
ecobus = __raw_i915_read32(dev_priv, ECOBUS);
fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
- mutex_unlock(&dev->struct_mutex);
+ spin_unlock_irq(&dev_priv->uncore.lock);
if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
@@ -1314,7 +1311,7 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
FORCEWAKE, FORCEWAKE_ACK);
}
- } else if (IS_GEN6(dev)) {
+ } else if (IS_GEN6(dev_priv)) {
dev_priv->uncore.funcs.force_wake_get =
fw_domains_get_with_thread_status;
dev_priv->uncore.funcs.force_wake_put =
@@ -1327,26 +1324,24 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
WARN_ON(dev_priv->uncore.fw_domains == 0);
}
-void intel_uncore_init(struct drm_device *dev)
+void intel_uncore_init(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- i915_check_vgpu(dev);
+ i915_check_vgpu(dev_priv);
intel_uncore_edram_detect(dev_priv);
- intel_uncore_fw_domains_init(dev);
- __intel_uncore_early_sanitize(dev, false);
+ intel_uncore_fw_domains_init(dev_priv);
+ __intel_uncore_early_sanitize(dev_priv, false);
dev_priv->uncore.unclaimed_mmio_check = 1;
- switch (INTEL_INFO(dev)->gen) {
+ switch (INTEL_INFO(dev_priv)->gen) {
default:
case 9:
ASSIGN_WRITE_MMIO_VFUNCS(gen9);
ASSIGN_READ_MMIO_VFUNCS(gen9);
break;
case 8:
- if (IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
ASSIGN_WRITE_MMIO_VFUNCS(chv);
ASSIGN_READ_MMIO_VFUNCS(chv);
@@ -1357,13 +1352,13 @@ void intel_uncore_init(struct drm_device *dev)
break;
case 7:
case 6:
- if (IS_HASWELL(dev)) {
+ if (IS_HASWELL(dev_priv)) {
ASSIGN_WRITE_MMIO_VFUNCS(hsw);
} else {
ASSIGN_WRITE_MMIO_VFUNCS(gen6);
}
- if (IS_VALLEYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev_priv)) {
ASSIGN_READ_MMIO_VFUNCS(vlv);
} else {
ASSIGN_READ_MMIO_VFUNCS(gen6);
@@ -1381,24 +1376,24 @@ void intel_uncore_init(struct drm_device *dev)
break;
}
- if (intel_vgpu_active(dev)) {
+ if (intel_vgpu_active(dev_priv)) {
ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
ASSIGN_READ_MMIO_VFUNCS(vgpu);
}
- i915_check_and_clear_faults(dev);
+ i915_check_and_clear_faults(dev_priv);
}
#undef ASSIGN_WRITE_MMIO_VFUNCS
#undef ASSIGN_READ_MMIO_VFUNCS
-void intel_uncore_fini(struct drm_device *dev)
+void intel_uncore_fini(struct drm_i915_private *dev_priv)
{
/* Paranoia: make sure we have disabled everything before we exit. */
- intel_uncore_sanitize(dev);
- intel_uncore_forcewake_reset(dev, false);
+ intel_uncore_sanitize(dev_priv);
+ intel_uncore_forcewake_reset(dev_priv, false);
}
-#define GEN_RANGE(l, h) GENMASK(h, l)
+#define GEN_RANGE(l, h) GENMASK((h) - 1, (l) - 1)
static const struct register_whitelist {
i915_reg_t offset_ldw, offset_udw;
@@ -1414,7 +1409,7 @@ static const struct register_whitelist {
int i915_reg_read_ioctl(struct drm_device *dev,
void *data, struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_reg_read *reg = data;
struct register_whitelist const *entry = whitelist;
unsigned size;
@@ -1423,7 +1418,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) &&
- (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
+ (INTEL_INFO(dev)->gen_mask & entry->gen_bitmask))
break;
}
@@ -1467,83 +1462,47 @@ out:
return ret;
}
-int i915_get_reset_stats_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_reset_stats *args = data;
- struct i915_ctx_hang_stats *hs;
- struct intel_context *ctx;
- int ret;
-
- if (args->flags || args->pad)
- return -EINVAL;
-
- if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
- if (IS_ERR(ctx)) {
- mutex_unlock(&dev->struct_mutex);
- return PTR_ERR(ctx);
- }
- hs = &ctx->hang_stats;
-
- if (capable(CAP_SYS_ADMIN))
- args->reset_count = i915_reset_count(&dev_priv->gpu_error);
- else
- args->reset_count = 0;
-
- args->batch_active = hs->batch_active;
- args->batch_pending = hs->batch_pending;
-
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-static int i915_reset_complete(struct drm_device *dev)
+static int i915_reset_complete(struct pci_dev *pdev)
{
u8 gdrst;
- pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
+ pci_read_config_byte(pdev, I915_GDRST, &gdrst);
return (gdrst & GRDOM_RESET_STATUS) == 0;
}
-static int i915_do_reset(struct drm_device *dev, unsigned engine_mask)
+static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
{
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+
/* assert reset for at least 20 usec */
- pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
+ pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
udelay(20);
- pci_write_config_byte(dev->pdev, I915_GDRST, 0);
+ pci_write_config_byte(pdev, I915_GDRST, 0);
- return wait_for(i915_reset_complete(dev), 500);
+ return wait_for(i915_reset_complete(pdev), 500);
}
-static int g4x_reset_complete(struct drm_device *dev)
+static int g4x_reset_complete(struct pci_dev *pdev)
{
u8 gdrst;
- pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
+ pci_read_config_byte(pdev, I915_GDRST, &gdrst);
return (gdrst & GRDOM_RESET_ENABLE) == 0;
}
-static int g33_do_reset(struct drm_device *dev, unsigned engine_mask)
+static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
{
- pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
- return wait_for(g4x_reset_complete(dev), 500);
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
+ return wait_for(g4x_reset_complete(pdev), 500);
}
-static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask)
+static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
int ret;
- pci_write_config_byte(dev->pdev, I915_GDRST,
+ pci_write_config_byte(pdev, I915_GDRST,
GRDOM_RENDER | GRDOM_RESET_ENABLE);
- ret = wait_for(g4x_reset_complete(dev), 500);
+ ret = wait_for(g4x_reset_complete(pdev), 500);
if (ret)
return ret;
@@ -1551,9 +1510,9 @@ static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask)
I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
POSTING_READ(VDECCLK_GATE_D);
- pci_write_config_byte(dev->pdev, I915_GDRST,
+ pci_write_config_byte(pdev, I915_GDRST,
GRDOM_MEDIA | GRDOM_RESET_ENABLE);
- ret = wait_for(g4x_reset_complete(dev), 500);
+ ret = wait_for(g4x_reset_complete(pdev), 500);
if (ret)
return ret;
@@ -1561,27 +1520,29 @@ static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask)
I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
POSTING_READ(VDECCLK_GATE_D);
- pci_write_config_byte(dev->pdev, I915_GDRST, 0);
+ pci_write_config_byte(pdev, I915_GDRST, 0);
return 0;
}
-static int ironlake_do_reset(struct drm_device *dev, unsigned engine_mask)
+static int ironlake_do_reset(struct drm_i915_private *dev_priv,
+ unsigned engine_mask)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
I915_WRITE(ILK_GDSR,
ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
- ret = wait_for((I915_READ(ILK_GDSR) &
- ILK_GRDOM_RESET_ENABLE) == 0, 500);
+ ret = intel_wait_for_register(dev_priv,
+ ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
+ 500);
if (ret)
return ret;
I915_WRITE(ILK_GDSR,
ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
- ret = wait_for((I915_READ(ILK_GDSR) &
- ILK_GRDOM_RESET_ENABLE) == 0, 500);
+ ret = intel_wait_for_register(dev_priv,
+ ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
+ 500);
if (ret)
return ret;
@@ -1594,25 +1555,21 @@ static int ironlake_do_reset(struct drm_device *dev, unsigned engine_mask)
static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
u32 hw_domain_mask)
{
- int ret;
-
/* GEN6_GDRST is not in the gt power well, no need to check
* for fifo space for the write or forcewake the chip for
* the read
*/
__raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask);
-#define ACKED ((__raw_i915_read32(dev_priv, GEN6_GDRST) & hw_domain_mask) == 0)
/* Spin waiting for the device to ack the reset requests */
- ret = wait_for(ACKED, 500);
-#undef ACKED
-
- return ret;
+ return intel_wait_for_register_fw(dev_priv,
+ GEN6_GDRST, hw_domain_mask, 0,
+ 500);
}
/**
* gen6_reset_engines - reset individual engines
- * @dev: DRM device
+ * @dev_priv: i915 device
* @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
*
* This function will reset the individual engines that are set in engine_mask.
@@ -1623,9 +1580,9 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
*
* Returns 0 on success, nonzero on error.
*/
-static int gen6_reset_engines(struct drm_device *dev, unsigned engine_mask)
+static int gen6_reset_engines(struct drm_i915_private *dev_priv,
+ unsigned engine_mask)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
const u32 hw_engine_mask[I915_NUM_ENGINES] = {
[RCS] = GEN6_GRDOM_RENDER,
@@ -1647,33 +1604,94 @@ static int gen6_reset_engines(struct drm_device *dev, unsigned engine_mask)
ret = gen6_hw_domain_reset(dev_priv, hw_mask);
- intel_uncore_forcewake_reset(dev, true);
+ intel_uncore_forcewake_reset(dev_priv, true);
return ret;
}
-static int wait_for_register_fw(struct drm_i915_private *dev_priv,
- i915_reg_t reg,
- const u32 mask,
- const u32 value,
- const unsigned long timeout_ms)
+/**
+ * intel_wait_for_register_fw - wait until register matches expected state
+ * @dev_priv: the i915 device
+ * @reg: the register to read
+ * @mask: mask to apply to register value
+ * @value: expected value
+ * @timeout_ms: timeout in millisecond
+ *
+ * This routine waits until the target register @reg contains the expected
+ * @value after applying the @mask, i.e. it waits until
+ * (I915_READ_FW(@reg) & @mask) == @value
+ * Otherwise, the wait will timeout after @timeout_ms milliseconds.
+ *
+ * Note that this routine assumes the caller holds forcewake asserted, it is
+ * not suitable for very long waits. See intel_wait_for_register() if you
+ * wish to wait without holding forcewake for the duration (i.e. you expect
+ * the wait to be slow).
+ *
+ * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
+ */
+int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
+ i915_reg_t reg,
+ const u32 mask,
+ const u32 value,
+ const unsigned long timeout_ms)
+{
+#define done ((I915_READ_FW(reg) & mask) == value)
+ int ret = wait_for_us(done, 2);
+ if (ret)
+ ret = wait_for(done, timeout_ms);
+ return ret;
+#undef done
+}
+
+/**
+ * intel_wait_for_register - wait until register matches expected state
+ * @dev_priv: the i915 device
+ * @reg: the register to read
+ * @mask: mask to apply to register value
+ * @value: expected value
+ * @timeout_ms: timeout in millisecond
+ *
+ * This routine waits until the target register @reg contains the expected
+ * @value after applying the @mask, i.e. it waits until
+ * (I915_READ(@reg) & @mask) == @value
+ * Otherwise, the wait will timeout after @timeout_ms milliseconds.
+ *
+ * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
+ */
+int intel_wait_for_register(struct drm_i915_private *dev_priv,
+ i915_reg_t reg,
+ const u32 mask,
+ const u32 value,
+ const unsigned long timeout_ms)
{
- return wait_for((I915_READ_FW(reg) & mask) == value, timeout_ms);
+
+ unsigned fw =
+ intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
+ int ret;
+
+ intel_uncore_forcewake_get(dev_priv, fw);
+ ret = wait_for_us((I915_READ_FW(reg) & mask) == value, 2);
+ intel_uncore_forcewake_put(dev_priv, fw);
+ if (ret)
+ ret = wait_for((I915_READ_NOTRACE(reg) & mask) == value,
+ timeout_ms);
+
+ return ret;
}
static int gen8_request_engine_reset(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *dev_priv = engine->i915;
int ret;
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
- ret = wait_for_register_fw(dev_priv,
- RING_RESET_CTL(engine->mmio_base),
- RESET_CTL_READY_TO_RESET,
- RESET_CTL_READY_TO_RESET,
- 700);
+ ret = intel_wait_for_register_fw(dev_priv,
+ RING_RESET_CTL(engine->mmio_base),
+ RESET_CTL_READY_TO_RESET,
+ RESET_CTL_READY_TO_RESET,
+ 700);
if (ret)
DRM_ERROR("%s: reset request timeout\n", engine->name);
@@ -1682,22 +1700,22 @@ static int gen8_request_engine_reset(struct intel_engine_cs *engine)
static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->i915;
I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
_MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
}
-static int gen8_reset_engines(struct drm_device *dev, unsigned engine_mask)
+static int gen8_reset_engines(struct drm_i915_private *dev_priv,
+ unsigned engine_mask)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
for_each_engine_masked(engine, dev_priv, engine_mask)
if (gen8_request_engine_reset(engine))
goto not_ready;
- return gen6_reset_engines(dev, engine_mask);
+ return gen6_reset_engines(dev_priv, engine_mask);
not_ready:
for_each_engine_masked(engine, dev_priv, engine_mask)
@@ -1706,35 +1724,35 @@ not_ready:
return -EIO;
}
-static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *,
- unsigned engine_mask)
+typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask);
+
+static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
{
if (!i915.reset)
return NULL;
- if (INTEL_INFO(dev)->gen >= 8)
+ if (INTEL_INFO(dev_priv)->gen >= 8)
return gen8_reset_engines;
- else if (INTEL_INFO(dev)->gen >= 6)
+ else if (INTEL_INFO(dev_priv)->gen >= 6)
return gen6_reset_engines;
- else if (IS_GEN5(dev))
+ else if (IS_GEN5(dev_priv))
return ironlake_do_reset;
- else if (IS_G4X(dev))
+ else if (IS_G4X(dev_priv))
return g4x_do_reset;
- else if (IS_G33(dev))
+ else if (IS_G33(dev_priv))
return g33_do_reset;
- else if (INTEL_INFO(dev)->gen >= 3)
+ else if (INTEL_INFO(dev_priv)->gen >= 3)
return i915_do_reset;
else
return NULL;
}
-int intel_gpu_reset(struct drm_device *dev, unsigned engine_mask)
+int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- int (*reset)(struct drm_device *, unsigned);
+ reset_func reset;
int ret;
- reset = intel_get_gpu_reset(dev);
+ reset = intel_get_gpu_reset(dev_priv);
if (reset == NULL)
return -ENODEV;
@@ -1742,15 +1760,15 @@ int intel_gpu_reset(struct drm_device *dev, unsigned engine_mask)
* request may be dropped and never completes (causing -EIO).
*/
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
- ret = reset(dev, engine_mask);
+ ret = reset(dev_priv, engine_mask);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
return ret;
}
-bool intel_has_gpu_reset(struct drm_device *dev)
+bool intel_has_gpu_reset(struct drm_i915_private *dev_priv)
{
- return intel_get_gpu_reset(dev) != NULL;
+ return intel_get_gpu_reset(dev_priv) != NULL;
}
int intel_guc_reset(struct drm_i915_private *dev_priv)
@@ -1758,7 +1776,7 @@ int intel_guc_reset(struct drm_i915_private *dev_priv)
int ret;
unsigned long irqflags;
- if (!i915.enable_guc_submission)
+ if (!HAS_GUC(dev_priv))
return -EINVAL;
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
@@ -1802,10 +1820,10 @@ intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
{
enum forcewake_domains fw_domains;
- if (intel_vgpu_active(dev_priv->dev))
+ if (intel_vgpu_active(dev_priv))
return 0;
- switch (INTEL_INFO(dev_priv)->gen) {
+ switch (INTEL_GEN(dev_priv)) {
case 9:
fw_domains = __gen9_reg_read_fw_domains(i915_mmio_reg_offset(reg));
break;
@@ -1842,10 +1860,10 @@ intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
{
enum forcewake_domains fw_domains;
- if (intel_vgpu_active(dev_priv->dev))
+ if (intel_vgpu_active(dev_priv))
return 0;
- switch (INTEL_INFO(dev_priv)->gen) {
+ switch (INTEL_GEN(dev_priv)) {
case 9:
fw_domains = __gen9_reg_write_fw_domains(i915_mmio_reg_offset(reg));
break;
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
index 44fb0b35e..68db9621f 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -447,10 +447,16 @@ struct bdb_lfp_backlight_data_entry {
u8 obsolete3;
} __packed;
+struct bdb_lfp_backlight_control_method {
+ u8 type:4;
+ u8 controller:4;
+} __packed;
+
struct bdb_lfp_backlight_data {
u8 entry_size;
struct bdb_lfp_backlight_data_entry data[16];
u8 level[16];
+ struct bdb_lfp_backlight_control_method backlight_control[16];
} __packed;
struct aimdb_header {
diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig
index a1844b505..f2c9ae822 100644
--- a/drivers/gpu/drm/imx/Kconfig
+++ b/drivers/gpu/drm/imx/Kconfig
@@ -1,7 +1,6 @@
config DRM_IMX
tristate "DRM Support for Freescale i.MX"
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select VIDEOMODE_HELPERS
select DRM_GEM_CMA_HELPER
select DRM_KMS_CMA_HELPER
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index a24631fdf..359cd2765 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -28,6 +28,11 @@ struct imx_hdmi {
struct regmap *regmap;
};
+static inline struct imx_hdmi *enc_to_imx_hdmi(struct drm_encoder *e)
+{
+ return container_of(e, struct imx_hdmi, encoder);
+}
+
static const struct dw_hdmi_mpll_config imx_mpll_cfg[] = {
{
45250000, {
@@ -109,15 +114,9 @@ static void dw_hdmi_imx_encoder_disable(struct drm_encoder *encoder)
{
}
-static void dw_hdmi_imx_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adj_mode)
+static void dw_hdmi_imx_encoder_enable(struct drm_encoder *encoder)
{
-}
-
-static void dw_hdmi_imx_encoder_commit(struct drm_encoder *encoder)
-{
- struct imx_hdmi *hdmi = container_of(encoder, struct imx_hdmi, encoder);
+ struct imx_hdmi *hdmi = enc_to_imx_hdmi(encoder);
int mux = drm_of_encoder_active_port_id(hdmi->dev->of_node, encoder);
regmap_update_bits(hdmi->regmap, IOMUXC_GPR3,
@@ -125,16 +124,23 @@ static void dw_hdmi_imx_encoder_commit(struct drm_encoder *encoder)
mux << IMX6Q_GPR3_HDMI_MUX_CTL_SHIFT);
}
-static void dw_hdmi_imx_encoder_prepare(struct drm_encoder *encoder)
+static int dw_hdmi_imx_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
{
- imx_drm_set_bus_format(encoder, MEDIA_BUS_FMT_RGB888_1X24);
+ struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
+
+ imx_crtc_state->bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ imx_crtc_state->di_hsync_pin = 2;
+ imx_crtc_state->di_vsync_pin = 3;
+
+ return 0;
}
static const struct drm_encoder_helper_funcs dw_hdmi_imx_encoder_helper_funcs = {
- .mode_set = dw_hdmi_imx_encoder_mode_set,
- .prepare = dw_hdmi_imx_encoder_prepare,
- .commit = dw_hdmi_imx_encoder_commit,
+ .enable = dw_hdmi_imx_encoder_enable,
.disable = dw_hdmi_imx_encoder_disable,
+ .atomic_check = dw_hdmi_imx_atomic_check,
};
static const struct drm_encoder_funcs dw_hdmi_imx_encoder_funcs = {
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 82656654f..7bf90e9e6 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -15,10 +15,14 @@
*/
#include <linux/component.h>
#include <linux/device.h>
+#include <linux/dma-buf.h>
#include <linux/fb.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/reservation.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_cma_helper.h>
@@ -41,6 +45,7 @@ struct imx_drm_device {
struct imx_drm_crtc *crtc[MAX_CRTC];
unsigned int pipes;
struct drm_fbdev_cma *fbhelper;
+ struct drm_atomic_state *state;
};
struct imx_drm_crtc {
@@ -85,45 +90,6 @@ static int imx_drm_driver_unload(struct drm_device *drm)
return 0;
}
-static struct imx_drm_crtc *imx_drm_find_crtc(struct drm_crtc *crtc)
-{
- struct imx_drm_device *imxdrm = crtc->dev->dev_private;
- unsigned i;
-
- for (i = 0; i < MAX_CRTC; i++)
- if (imxdrm->crtc[i] && imxdrm->crtc[i]->crtc == crtc)
- return imxdrm->crtc[i];
-
- return NULL;
-}
-
-int imx_drm_set_bus_config(struct drm_encoder *encoder, u32 bus_format,
- int hsync_pin, int vsync_pin, u32 bus_flags)
-{
- struct imx_drm_crtc_helper_funcs *helper;
- struct imx_drm_crtc *imx_crtc;
-
- imx_crtc = imx_drm_find_crtc(encoder->crtc);
- if (!imx_crtc)
- return -EINVAL;
-
- helper = &imx_crtc->imx_drm_helper_funcs;
- if (helper->set_interface_pix_fmt)
- return helper->set_interface_pix_fmt(encoder->crtc,
- bus_format, hsync_pin, vsync_pin,
- bus_flags);
- return 0;
-}
-EXPORT_SYMBOL_GPL(imx_drm_set_bus_config);
-
-int imx_drm_set_bus_format(struct drm_encoder *encoder, u32 bus_format)
-{
- return imx_drm_set_bus_config(encoder, bus_format, 2, 3,
- DRM_BUS_FLAG_DE_HIGH |
- DRM_BUS_FLAG_PIXDATA_NEGEDGE);
-}
-EXPORT_SYMBOL_GPL(imx_drm_set_bus_format);
-
int imx_drm_crtc_vblank_get(struct imx_drm_crtc *imx_drm_crtc)
{
return drm_crtc_vblank_get(imx_drm_crtc->crtc);
@@ -205,9 +171,90 @@ static void imx_drm_output_poll_changed(struct drm_device *drm)
drm_fbdev_cma_hotplug_event(imxdrm->fbhelper);
}
+static int imx_drm_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ int ret;
+
+ ret = drm_atomic_helper_check_modeset(dev, state);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_helper_check_planes(dev, state);
+ if (ret)
+ return ret;
+
+ /*
+ * Check modeset again in case crtc_state->mode_changed is
+ * updated in plane's ->atomic_check callback.
+ */
+ ret = drm_atomic_helper_check_modeset(dev, state);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = {
.fb_create = drm_fb_cma_create,
.output_poll_changed = imx_drm_output_poll_changed,
+ .atomic_check = imx_drm_atomic_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_plane_state *plane_state;
+ struct drm_gem_cma_object *cma_obj;
+ struct fence *excl;
+ unsigned shared_count;
+ struct fence **shared;
+ unsigned int i, j;
+ int ret;
+
+ /* Wait for fences. */
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ plane_state = crtc->primary->state;
+ if (plane_state->fb) {
+ cma_obj = drm_fb_cma_get_gem_obj(plane_state->fb, 0);
+ if (cma_obj->base.dma_buf) {
+ ret = reservation_object_get_fences_rcu(
+ cma_obj->base.dma_buf->resv, &excl,
+ &shared_count, &shared);
+ if (unlikely(ret))
+ DRM_ERROR("failed to get fences "
+ "for buffer\n");
+
+ if (excl) {
+ fence_wait(excl, false);
+ fence_put(excl);
+ }
+ for (j = 0; j < shared_count; i++) {
+ fence_wait(shared[j], false);
+ fence_put(shared[j]);
+ }
+ }
+ }
+ }
+
+ drm_atomic_helper_commit_modeset_disables(dev, state);
+
+ drm_atomic_helper_commit_planes(dev, state, true);
+
+ drm_atomic_helper_commit_modeset_enables(dev, state);
+
+ drm_atomic_helper_commit_hw_done(state);
+
+ drm_atomic_helper_wait_for_vblanks(dev, state);
+
+ drm_atomic_helper_cleanup_planes(dev, state);
+}
+
+static struct drm_mode_config_helper_funcs imx_drm_mode_config_helpers = {
+ .atomic_commit_tail = imx_drm_atomic_commit_tail,
};
/*
@@ -249,6 +296,7 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
drm->mode_config.max_width = 4096;
drm->mode_config.max_height = 4096;
drm->mode_config.funcs = &imx_drm_mode_config_funcs;
+ drm->mode_config.helper_private = &imx_drm_mode_config_helpers;
drm_mode_config_init(drm);
@@ -279,6 +327,8 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
}
}
+ drm_mode_config_reset(drm);
+
/*
* All components are now initialised, so setup the fb helper.
* The fb helper takes copies of key hardware information, so the
@@ -289,7 +339,6 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
dev_warn(drm->dev, "Invalid legacyfb_depth. Defaulting to 16bpp\n");
legacyfb_depth = 16;
}
- drm_helper_disable_unused_functions(drm);
imxdrm->fbhelper = drm_fbdev_cma_init(drm, legacyfb_depth,
drm->mode_config.num_crtc, MAX_CRTC);
if (IS_ERR(imxdrm->fbhelper)) {
@@ -403,11 +452,11 @@ static const struct drm_ioctl_desc imx_drm_ioctls[] = {
};
static struct drm_driver imx_drm_driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
+ DRIVER_ATOMIC,
.load = imx_drm_driver_load,
.unload = imx_drm_driver_unload,
.lastclose = imx_drm_driver_lastclose,
- .set_busid = drm_platform_set_busid,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
@@ -492,6 +541,7 @@ static int imx_drm_platform_remove(struct platform_device *pdev)
static int imx_drm_suspend(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct imx_drm_device *imxdrm;
/* The drm_dev is NULL before .load hook is called */
if (drm_dev == NULL)
@@ -499,17 +549,26 @@ static int imx_drm_suspend(struct device *dev)
drm_kms_helper_poll_disable(drm_dev);
+ imxdrm = drm_dev->dev_private;
+ imxdrm->state = drm_atomic_helper_suspend(drm_dev);
+ if (IS_ERR(imxdrm->state)) {
+ drm_kms_helper_poll_enable(drm_dev);
+ return PTR_ERR(imxdrm->state);
+ }
+
return 0;
}
static int imx_drm_resume(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct imx_drm_device *imx_drm;
if (drm_dev == NULL)
return 0;
- drm_helper_resume_force_mode(drm_dev);
+ imx_drm = drm_dev->dev_private;
+ drm_atomic_helper_resume(drm_dev, imx_drm->state);
drm_kms_helper_poll_enable(drm_dev);
return 0;
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h
index 74320a172..07d33e45f 100644
--- a/drivers/gpu/drm/imx/imx-drm.h
+++ b/drivers/gpu/drm/imx/imx-drm.h
@@ -15,12 +15,22 @@ struct platform_device;
unsigned int imx_drm_crtc_id(struct imx_drm_crtc *crtc);
+struct imx_crtc_state {
+ struct drm_crtc_state base;
+ u32 bus_format;
+ u32 bus_flags;
+ int di_hsync_pin;
+ int di_vsync_pin;
+};
+
+static inline struct imx_crtc_state *to_imx_crtc_state(struct drm_crtc_state *s)
+{
+ return container_of(s, struct imx_crtc_state, base);
+}
+
struct imx_drm_crtc_helper_funcs {
int (*enable_vblank)(struct drm_crtc *crtc);
void (*disable_vblank)(struct drm_crtc *crtc);
- int (*set_interface_pix_fmt)(struct drm_crtc *crtc,
- u32 bus_format, int hsync_pin, int vsync_pin,
- u32 bus_flags);
const struct drm_crtc_helper_funcs *crtc_helper_funcs;
const struct drm_crtc_funcs *crtc_funcs;
};
@@ -42,11 +52,6 @@ void imx_drm_mode_config_init(struct drm_device *drm);
struct drm_gem_cma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb);
-int imx_drm_set_bus_config(struct drm_encoder *encoder, u32 bus_format,
- int hsync_pin, int vsync_pin, u32 bus_flags);
-int imx_drm_set_bus_format(struct drm_encoder *encoder,
- u32 bus_format);
-
int imx_drm_encoder_parse_of(struct drm_device *drm,
struct drm_encoder *encoder, struct device_node *np);
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index beff793bb..b03919ed6 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -17,6 +17,8 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_of.h>
@@ -49,9 +51,6 @@
#define LDB_DI1_VS_POL_ACT_LOW (1 << 10)
#define LDB_BGREF_RMODE_INT (1 << 15)
-#define con_to_imx_ldb_ch(x) container_of(x, struct imx_ldb_channel, connector)
-#define enc_to_imx_ldb_ch(x) container_of(x, struct imx_ldb_channel, encoder)
-
struct imx_ldb;
struct imx_ldb_channel {
@@ -66,9 +65,19 @@ struct imx_ldb_channel {
int edid_len;
struct drm_display_mode mode;
int mode_valid;
- int bus_format;
+ u32 bus_format;
};
+static inline struct imx_ldb_channel *con_to_imx_ldb_ch(struct drm_connector *c)
+{
+ return container_of(c, struct imx_ldb_channel, connector);
+}
+
+static inline struct imx_ldb_channel *enc_to_imx_ldb_ch(struct drm_encoder *e)
+{
+ return container_of(e, struct imx_ldb_channel, encoder);
+}
+
struct bus_mux {
int reg;
int shift;
@@ -93,6 +102,32 @@ static enum drm_connector_status imx_ldb_connector_detect(
return connector_status_connected;
}
+static void imx_ldb_ch_set_bus_format(struct imx_ldb_channel *imx_ldb_ch,
+ u32 bus_format)
+{
+ struct imx_ldb *ldb = imx_ldb_ch->ldb;
+ int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
+
+ switch (bus_format) {
+ case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
+ if (imx_ldb_ch->chno == 0 || dual)
+ ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24;
+ if (imx_ldb_ch->chno == 1 || dual)
+ ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
+ if (imx_ldb_ch->chno == 0 || dual)
+ ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24 |
+ LDB_BIT_MAP_CH0_JEIDA;
+ if (imx_ldb_ch->chno == 1 || dual)
+ ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24 |
+ LDB_BIT_MAP_CH1_JEIDA;
+ break;
+ }
+}
+
static int imx_ldb_connector_get_modes(struct drm_connector *connector)
{
struct imx_ldb_channel *imx_ldb_ch = con_to_imx_ldb_ch(connector);
@@ -100,11 +135,7 @@ static int imx_ldb_connector_get_modes(struct drm_connector *connector)
if (imx_ldb_ch->panel && imx_ldb_ch->panel->funcs &&
imx_ldb_ch->panel->funcs->get_modes) {
- struct drm_display_info *di = &connector->display_info;
-
num_modes = imx_ldb_ch->panel->funcs->get_modes(imx_ldb_ch->panel);
- if (!imx_ldb_ch->bus_format && di->num_bus_formats)
- imx_ldb_ch->bus_format = di->bus_formats[0];
if (num_modes > 0)
return num_modes;
}
@@ -141,10 +172,6 @@ static struct drm_encoder *imx_ldb_connector_best_encoder(
return &imx_ldb_ch->encoder;
}
-static void imx_ldb_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
-}
-
static void imx_ldb_set_clock(struct imx_ldb *ldb, int mux, int chno,
unsigned long serial_clk, unsigned long di_clk)
{
@@ -173,43 +200,7 @@ static void imx_ldb_set_clock(struct imx_ldb *ldb, int mux, int chno,
chno);
}
-static void imx_ldb_encoder_prepare(struct drm_encoder *encoder)
-{
- struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
- struct imx_ldb *ldb = imx_ldb_ch->ldb;
- int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
- u32 bus_format;
-
- switch (imx_ldb_ch->bus_format) {
- default:
- dev_warn(ldb->dev,
- "could not determine data mapping, default to 18-bit \"spwg\"\n");
- /* fallthrough */
- case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
- bus_format = MEDIA_BUS_FMT_RGB666_1X18;
- break;
- case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
- bus_format = MEDIA_BUS_FMT_RGB888_1X24;
- if (imx_ldb_ch->chno == 0 || dual)
- ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24;
- if (imx_ldb_ch->chno == 1 || dual)
- ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24;
- break;
- case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
- bus_format = MEDIA_BUS_FMT_RGB888_1X24;
- if (imx_ldb_ch->chno == 0 || dual)
- ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24 |
- LDB_BIT_MAP_CH0_JEIDA;
- if (imx_ldb_ch->chno == 1 || dual)
- ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24 |
- LDB_BIT_MAP_CH1_JEIDA;
- break;
- }
-
- imx_drm_set_bus_format(encoder, bus_format);
-}
-
-static void imx_ldb_encoder_commit(struct drm_encoder *encoder)
+static void imx_ldb_encoder_enable(struct drm_encoder *encoder)
{
struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
struct imx_ldb *ldb = imx_ldb_ch->ldb;
@@ -219,8 +210,13 @@ static void imx_ldb_encoder_commit(struct drm_encoder *encoder)
drm_panel_prepare(imx_ldb_ch->panel);
if (dual) {
+ clk_set_parent(ldb->clk_sel[mux], ldb->clk[0]);
+ clk_set_parent(ldb->clk_sel[mux], ldb->clk[1]);
+
clk_prepare_enable(ldb->clk[0]);
clk_prepare_enable(ldb->clk[1]);
+ } else {
+ clk_set_parent(ldb->clk_sel[mux], ldb->clk[imx_ldb_ch->chno]);
}
if (imx_ldb_ch == &ldb->channel[0] || dual) {
@@ -265,6 +261,7 @@ static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder,
unsigned long serial_clk;
unsigned long di_clk = mode->clock * 1000;
int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder);
+ u32 bus_format = imx_ldb_ch->bus_format;
if (mode->clock > 170000) {
dev_warn(ldb->dev,
@@ -286,18 +283,33 @@ static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder,
}
/* FIXME - assumes straight connections DI0 --> CH0, DI1 --> CH1 */
- if (imx_ldb_ch == &ldb->channel[0]) {
+ if (imx_ldb_ch == &ldb->channel[0] || dual) {
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
ldb->ldb_ctrl |= LDB_DI0_VS_POL_ACT_LOW;
else if (mode->flags & DRM_MODE_FLAG_PVSYNC)
ldb->ldb_ctrl &= ~LDB_DI0_VS_POL_ACT_LOW;
}
- if (imx_ldb_ch == &ldb->channel[1]) {
+ if (imx_ldb_ch == &ldb->channel[1] || dual) {
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
ldb->ldb_ctrl |= LDB_DI1_VS_POL_ACT_LOW;
else if (mode->flags & DRM_MODE_FLAG_PVSYNC)
ldb->ldb_ctrl &= ~LDB_DI1_VS_POL_ACT_LOW;
}
+
+ if (!bus_format) {
+ struct drm_connector *connector;
+
+ drm_for_each_connector(connector, encoder->dev) {
+ struct drm_display_info *di = &connector->display_info;
+
+ if (connector->encoder == encoder &&
+ di->num_bus_formats) {
+ bus_format = di->bus_formats[0];
+ break;
+ }
+ }
+ }
+ imx_ldb_ch_set_bus_format(imx_ldb_ch, bus_format);
}
static void imx_ldb_encoder_disable(struct drm_encoder *encoder)
@@ -357,11 +369,45 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder)
drm_panel_unprepare(imx_ldb_ch->panel);
}
+static int imx_ldb_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
+ struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
+ struct drm_display_info *di = &conn_state->connector->display_info;
+ u32 bus_format = imx_ldb_ch->bus_format;
+
+ /* Bus format description in DT overrides connector display info. */
+ if (!bus_format && di->num_bus_formats)
+ bus_format = di->bus_formats[0];
+ switch (bus_format) {
+ case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
+ imx_crtc_state->bus_format = MEDIA_BUS_FMT_RGB666_1X18;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
+ case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
+ imx_crtc_state->bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ imx_crtc_state->di_hsync_pin = 2;
+ imx_crtc_state->di_vsync_pin = 3;
+
+ return 0;
+}
+
+
static const struct drm_connector_funcs imx_ldb_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = imx_ldb_connector_detect,
.destroy = imx_drm_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_connector_helper_funcs imx_ldb_connector_helper_funcs = {
@@ -374,11 +420,10 @@ static const struct drm_encoder_funcs imx_ldb_encoder_funcs = {
};
static const struct drm_encoder_helper_funcs imx_ldb_encoder_helper_funcs = {
- .dpms = imx_ldb_encoder_dpms,
- .prepare = imx_ldb_encoder_prepare,
- .commit = imx_ldb_encoder_commit,
.mode_set = imx_ldb_encoder_mode_set,
+ .enable = imx_ldb_encoder_enable,
.disable = imx_ldb_encoder_disable,
+ .atomic_check = imx_ldb_encoder_atomic_check,
};
static int imx_ldb_get_clk(struct imx_ldb *ldb, int chno)
@@ -400,10 +445,10 @@ static int imx_ldb_register(struct drm_device *drm,
struct imx_ldb_channel *imx_ldb_ch)
{
struct imx_ldb *ldb = imx_ldb_ch->ldb;
+ struct drm_encoder *encoder = &imx_ldb_ch->encoder;
int ret;
- ret = imx_drm_encoder_parse_of(drm, &imx_ldb_ch->encoder,
- imx_ldb_ch->child);
+ ret = imx_drm_encoder_parse_of(drm, encoder, imx_ldb_ch->child);
if (ret)
return ret;
@@ -417,9 +462,8 @@ static int imx_ldb_register(struct drm_device *drm,
return ret;
}
- drm_encoder_helper_add(&imx_ldb_ch->encoder,
- &imx_ldb_encoder_helper_funcs);
- drm_encoder_init(drm, &imx_ldb_ch->encoder, &imx_ldb_encoder_funcs,
+ drm_encoder_helper_add(encoder, &imx_ldb_encoder_helper_funcs);
+ drm_encoder_init(drm, encoder, &imx_ldb_encoder_funcs,
DRM_MODE_ENCODER_LVDS, NULL);
drm_connector_helper_add(&imx_ldb_ch->connector,
@@ -427,11 +471,14 @@ static int imx_ldb_register(struct drm_device *drm,
drm_connector_init(drm, &imx_ldb_ch->connector,
&imx_ldb_connector_funcs, DRM_MODE_CONNECTOR_LVDS);
- if (imx_ldb_ch->panel)
- drm_panel_attach(imx_ldb_ch->panel, &imx_ldb_ch->connector);
+ if (imx_ldb_ch->panel) {
+ ret = drm_panel_attach(imx_ldb_ch->panel,
+ &imx_ldb_ch->connector);
+ if (ret)
+ return ret;
+ }
- drm_mode_connector_attach_encoder(&imx_ldb_ch->connector,
- &imx_ldb_ch->encoder);
+ drm_mode_connector_attach_encoder(&imx_ldb_ch->connector, encoder);
return 0;
}
@@ -560,6 +607,7 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
struct imx_ldb_channel *channel;
struct device_node *ddc_node;
struct device_node *ep;
+ int bus_format;
ret = of_property_read_u32(child, "reg", &i);
if (ret || i < 0 || i > 1)
@@ -632,21 +680,22 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
}
}
- channel->bus_format = of_get_bus_format(dev, child);
- if (channel->bus_format == -EINVAL) {
+ bus_format = of_get_bus_format(dev, child);
+ if (bus_format == -EINVAL) {
/*
* If no bus format was specified in the device tree,
* we can still get it from the connected panel later.
*/
if (channel->panel && channel->panel->funcs &&
channel->panel->funcs->get_modes)
- channel->bus_format = 0;
+ bus_format = 0;
}
- if (channel->bus_format < 0) {
+ if (bus_format < 0) {
dev_err(dev, "could not determine data mapping: %d\n",
- channel->bus_format);
- return channel->bus_format;
+ bus_format);
+ return bus_format;
}
+ channel->bus_format = bus_format;
ret = imx_ldb_register(drm, channel);
if (ret)
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index baf788121..5e875944f 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -23,6 +23,7 @@
#include <linux/spinlock.h>
#include <linux/videodev2.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
#include <video/imx-ipu-v3.h>
@@ -97,9 +98,6 @@
/* TVE_TST_MODE_REG */
#define TVE_TVDAC_TEST_MODE_MASK (0x7 << 0)
-#define con_to_tve(x) container_of(x, struct imx_tve, connector)
-#define enc_to_tve(x) container_of(x, struct imx_tve, encoder)
-
enum {
TVE_MODE_TVOUT,
TVE_MODE_VGA,
@@ -112,6 +110,8 @@ struct imx_tve {
spinlock_t lock; /* register lock */
bool enabled;
int mode;
+ int di_hsync_pin;
+ int di_vsync_pin;
struct regmap *regmap;
struct regulator *dac_reg;
@@ -120,10 +120,18 @@ struct imx_tve {
struct clk *di_sel_clk;
struct clk_hw clk_hw_di;
struct clk *di_clk;
- int vsync_pin;
- int hsync_pin;
};
+static inline struct imx_tve *con_to_tve(struct drm_connector *c)
+{
+ return container_of(c, struct imx_tve, connector);
+}
+
+static inline struct imx_tve *enc_to_tve(struct drm_encoder *e)
+{
+ return container_of(e, struct imx_tve, encoder);
+}
+
static void tve_lock(void *__tve)
__acquires(&tve->lock)
{
@@ -148,8 +156,7 @@ static void tve_enable(struct imx_tve *tve)
tve->enabled = true;
clk_prepare_enable(tve->clk);
ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
- TVE_IPU_CLK_EN | TVE_EN,
- TVE_IPU_CLK_EN | TVE_EN);
+ TVE_EN, TVE_EN);
}
/* clear interrupt status register */
@@ -172,7 +179,7 @@ static void tve_disable(struct imx_tve *tve)
if (tve->enabled) {
tve->enabled = false;
ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
- TVE_IPU_CLK_EN | TVE_EN, 0);
+ TVE_EN, 0);
clk_disable_unprepare(tve->clk);
}
}
@@ -275,36 +282,6 @@ static struct drm_encoder *imx_tve_connector_best_encoder(
return &tve->encoder;
}
-static void imx_tve_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
- struct imx_tve *tve = enc_to_tve(encoder);
- int ret;
-
- ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
- TVE_TV_OUT_MODE_MASK, TVE_TV_OUT_DISABLE);
- if (ret < 0)
- dev_err(tve->dev, "failed to disable TVOUT: %d\n", ret);
-}
-
-static void imx_tve_encoder_prepare(struct drm_encoder *encoder)
-{
- struct imx_tve *tve = enc_to_tve(encoder);
-
- tve_disable(tve);
-
- switch (tve->mode) {
- case TVE_MODE_VGA:
- imx_drm_set_bus_config(encoder, MEDIA_BUS_FMT_GBR888_1X24,
- tve->hsync_pin, tve->vsync_pin,
- DRM_BUS_FLAG_DE_HIGH |
- DRM_BUS_FLAG_PIXDATA_NEGEDGE);
- break;
- case TVE_MODE_TVOUT:
- imx_drm_set_bus_format(encoder, MEDIA_BUS_FMT_YUV8_1X24);
- break;
- }
-}
-
static void imx_tve_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *orig_mode,
struct drm_display_mode *mode)
@@ -333,6 +310,9 @@ static void imx_tve_encoder_mode_set(struct drm_encoder *encoder,
ret);
}
+ regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
+ TVE_IPU_CLK_EN, TVE_IPU_CLK_EN);
+
if (tve->mode == TVE_MODE_VGA)
ret = tve_setup_vga(tve);
else
@@ -341,7 +321,7 @@ static void imx_tve_encoder_mode_set(struct drm_encoder *encoder,
dev_err(tve->dev, "failed to set configuration: %d\n", ret);
}
-static void imx_tve_encoder_commit(struct drm_encoder *encoder)
+static void imx_tve_encoder_enable(struct drm_encoder *encoder)
{
struct imx_tve *tve = enc_to_tve(encoder);
@@ -355,11 +335,28 @@ static void imx_tve_encoder_disable(struct drm_encoder *encoder)
tve_disable(tve);
}
+static int imx_tve_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
+ struct imx_tve *tve = enc_to_tve(encoder);
+
+ imx_crtc_state->bus_format = MEDIA_BUS_FMT_GBR888_1X24;
+ imx_crtc_state->di_hsync_pin = tve->di_hsync_pin;
+ imx_crtc_state->di_vsync_pin = tve->di_vsync_pin;
+
+ return 0;
+}
+
static const struct drm_connector_funcs imx_tve_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = imx_tve_connector_detect,
.destroy = imx_drm_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_connector_helper_funcs imx_tve_connector_helper_funcs = {
@@ -373,11 +370,10 @@ static const struct drm_encoder_funcs imx_tve_encoder_funcs = {
};
static const struct drm_encoder_helper_funcs imx_tve_encoder_helper_funcs = {
- .dpms = imx_tve_encoder_dpms,
- .prepare = imx_tve_encoder_prepare,
.mode_set = imx_tve_encoder_mode_set,
- .commit = imx_tve_encoder_commit,
+ .enable = imx_tve_encoder_enable,
.disable = imx_tve_encoder_disable,
+ .atomic_check = imx_tve_atomic_check,
};
static irqreturn_t imx_tve_irq_handler(int irq, void *data)
@@ -495,8 +491,7 @@ static int imx_tve_register(struct drm_device *drm, struct imx_tve *tve)
encoder_type = tve->mode == TVE_MODE_VGA ?
DRM_MODE_ENCODER_DAC : DRM_MODE_ENCODER_TVDAC;
- ret = imx_drm_encoder_parse_of(drm, &tve->encoder,
- tve->dev->of_node);
+ ret = imx_drm_encoder_parse_of(drm, &tve->encoder, tve->dev->of_node);
if (ret)
return ret;
@@ -587,15 +582,15 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
if (tve->mode == TVE_MODE_VGA) {
ret = of_property_read_u32(np, "fsl,hsync-pin",
- &tve->hsync_pin);
+ &tve->di_hsync_pin);
if (ret < 0) {
- dev_err(dev, "failed to get vsync pin\n");
+ dev_err(dev, "failed to get hsync pin\n");
return ret;
}
- ret |= of_property_read_u32(np, "fsl,vsync-pin",
- &tve->vsync_pin);
+ ret = of_property_read_u32(np, "fsl,vsync-pin",
+ &tve->di_vsync_pin);
if (ret < 0) {
dev_err(dev, "failed to get vsync pin\n");
@@ -633,7 +628,9 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
tve->dac_reg = devm_regulator_get(dev, "dac");
if (!IS_ERR(tve->dac_reg)) {
- regulator_set_voltage(tve->dac_reg, 2750000, 2750000);
+ ret = regulator_set_voltage(tve->dac_reg, 2750000, 2750000);
+ if (ret)
+ return ret;
ret = regulator_enable(tve->dac_reg);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index fc040417e..462056e4b 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -18,12 +18,12 @@
#include <linux/device.h>
#include <linux/platform_device.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <linux/fb.h>
#include <linux/clk.h>
#include <linux/errno.h>
-#include <linux/reservation.h>
-#include <linux/dma-buf.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
@@ -33,23 +33,6 @@
#define DRIVER_DESC "i.MX IPUv3 Graphics"
-enum ipu_flip_status {
- IPU_FLIP_NONE,
- IPU_FLIP_PENDING,
- IPU_FLIP_SUBMITTED,
-};
-
-struct ipu_flip_work {
- struct work_struct unref_work;
- struct drm_gem_object *bo;
- struct drm_pending_vblank_event *page_flip_event;
- struct work_struct fence_work;
- struct ipu_crtc *crtc;
- struct fence *excl;
- unsigned shared_count;
- struct fence **shared;
-};
-
struct ipu_crtc {
struct device *dev;
struct drm_crtc base;
@@ -60,201 +43,170 @@ struct ipu_crtc {
struct ipu_dc *dc;
struct ipu_di *di;
- int enabled;
- enum ipu_flip_status flip_state;
- struct workqueue_struct *flip_queue;
- struct ipu_flip_work *flip_work;
int irq;
- u32 bus_format;
- u32 bus_flags;
- int di_hsync_pin;
- int di_vsync_pin;
};
-#define to_ipu_crtc(x) container_of(x, struct ipu_crtc, base)
+static inline struct ipu_crtc *to_ipu_crtc(struct drm_crtc *crtc)
+{
+ return container_of(crtc, struct ipu_crtc, base);
+}
-static void ipu_fb_enable(struct ipu_crtc *ipu_crtc)
+static void ipu_crtc_enable(struct drm_crtc *crtc)
{
+ struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
- if (ipu_crtc->enabled)
- return;
-
ipu_dc_enable(ipu);
- ipu_plane_enable(ipu_crtc->plane[0]);
- /* Start DC channel and DI after IDMAC */
ipu_dc_enable_channel(ipu_crtc->dc);
ipu_di_enable(ipu_crtc->di);
- drm_crtc_vblank_on(&ipu_crtc->base);
-
- ipu_crtc->enabled = 1;
}
-static void ipu_fb_disable(struct ipu_crtc *ipu_crtc)
+static void ipu_crtc_disable(struct drm_crtc *crtc)
{
+ struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
- if (!ipu_crtc->enabled)
- return;
-
- /* Stop DC channel and DI before IDMAC */
ipu_dc_disable_channel(ipu_crtc->dc);
ipu_di_disable(ipu_crtc->di);
- ipu_plane_disable(ipu_crtc->plane[0]);
ipu_dc_disable(ipu);
- drm_crtc_vblank_off(&ipu_crtc->base);
- ipu_crtc->enabled = 0;
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (crtc->state->event) {
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+
+ drm_crtc_vblank_off(crtc);
}
-static void ipu_crtc_dpms(struct drm_crtc *crtc, int mode)
+static void imx_drm_crtc_reset(struct drm_crtc *crtc)
{
- struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
+ struct imx_crtc_state *state;
+
+ if (crtc->state) {
+ if (crtc->state->mode_blob)
+ drm_property_unreference_blob(crtc->state->mode_blob);
- dev_dbg(ipu_crtc->dev, "%s mode: %d\n", __func__, mode);
-
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- ipu_fb_enable(ipu_crtc);
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- ipu_fb_disable(ipu_crtc);
- break;
+ state = to_imx_crtc_state(crtc->state);
+ memset(state, 0, sizeof(*state));
+ } else {
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return;
+ crtc->state = &state->base;
}
+
+ state->base.crtc = crtc;
}
-static void ipu_flip_unref_work_func(struct work_struct *__work)
+static struct drm_crtc_state *imx_drm_crtc_duplicate_state(struct drm_crtc *crtc)
{
- struct ipu_flip_work *work =
- container_of(__work, struct ipu_flip_work, unref_work);
+ struct imx_crtc_state *state;
- drm_gem_object_unreference_unlocked(work->bo);
- kfree(work);
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
+
+ WARN_ON(state->base.crtc != crtc);
+ state->base.crtc = crtc;
+
+ return &state->base;
}
-static void ipu_flip_fence_work_func(struct work_struct *__work)
+static void imx_drm_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
{
- struct ipu_flip_work *work =
- container_of(__work, struct ipu_flip_work, fence_work);
- int i;
-
- /* wait for all fences attached to the FB obj to signal */
- if (work->excl) {
- fence_wait(work->excl, false);
- fence_put(work->excl);
- }
- for (i = 0; i < work->shared_count; i++) {
- fence_wait(work->shared[i], false);
- fence_put(work->shared[i]);
- }
+ __drm_atomic_helper_crtc_destroy_state(state);
+ kfree(to_imx_crtc_state(state));
+}
- work->crtc->flip_state = IPU_FLIP_SUBMITTED;
+static const struct drm_crtc_funcs ipu_crtc_funcs = {
+ .set_config = drm_atomic_helper_set_config,
+ .destroy = drm_crtc_cleanup,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = imx_drm_crtc_reset,
+ .atomic_duplicate_state = imx_drm_crtc_duplicate_state,
+ .atomic_destroy_state = imx_drm_crtc_destroy_state,
+};
+
+static irqreturn_t ipu_irq_handler(int irq, void *dev_id)
+{
+ struct ipu_crtc *ipu_crtc = dev_id;
+
+ imx_drm_handle_vblank(ipu_crtc->imx_crtc);
+
+ return IRQ_HANDLED;
}
-static int ipu_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags)
+static bool ipu_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
- struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
- struct ipu_flip_work *flip_work;
+ struct videomode vm;
int ret;
- if (ipu_crtc->flip_state != IPU_FLIP_NONE)
- return -EBUSY;
-
- ret = imx_drm_crtc_vblank_get(ipu_crtc->imx_crtc);
- if (ret) {
- dev_dbg(ipu_crtc->dev, "failed to acquire vblank counter\n");
- list_del(&event->base.link);
-
- return ret;
- }
+ drm_display_mode_to_videomode(adjusted_mode, &vm);
- flip_work = kzalloc(sizeof *flip_work, GFP_KERNEL);
- if (!flip_work) {
- ret = -ENOMEM;
- goto put_vblank;
- }
- INIT_WORK(&flip_work->unref_work, ipu_flip_unref_work_func);
- flip_work->page_flip_event = event;
+ ret = ipu_di_adjust_videomode(ipu_crtc->di, &vm);
+ if (ret)
+ return false;
- /* get BO backing the old framebuffer and take a reference */
- flip_work->bo = &drm_fb_cma_get_gem_obj(crtc->primary->fb, 0)->base;
- drm_gem_object_reference(flip_work->bo);
+ if ((vm.vsync_len == 0) || (vm.hsync_len == 0))
+ return false;
- ipu_crtc->flip_work = flip_work;
- /*
- * If the object has a DMABUF attached, we need to wait on its fences
- * if there are any.
- */
- if (cma_obj->base.dma_buf) {
- INIT_WORK(&flip_work->fence_work, ipu_flip_fence_work_func);
- flip_work->crtc = ipu_crtc;
+ drm_display_mode_from_videomode(&vm, adjusted_mode);
- ret = reservation_object_get_fences_rcu(
- cma_obj->base.dma_buf->resv, &flip_work->excl,
- &flip_work->shared_count, &flip_work->shared);
+ return true;
+}
- if (unlikely(ret)) {
- DRM_ERROR("failed to get fences for buffer\n");
- goto free_flip_work;
- }
+static int ipu_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ u32 primary_plane_mask = 1 << drm_plane_index(crtc->primary);
- /* No need to queue the worker if the are no fences */
- if (!flip_work->excl && !flip_work->shared_count) {
- ipu_crtc->flip_state = IPU_FLIP_SUBMITTED;
- } else {
- ipu_crtc->flip_state = IPU_FLIP_PENDING;
- queue_work(ipu_crtc->flip_queue,
- &flip_work->fence_work);
- }
- } else {
- ipu_crtc->flip_state = IPU_FLIP_SUBMITTED;
- }
+ if (state->active && (primary_plane_mask & state->plane_mask) == 0)
+ return -EINVAL;
return 0;
+}
-free_flip_work:
- drm_gem_object_unreference_unlocked(flip_work->bo);
- kfree(flip_work);
- ipu_crtc->flip_work = NULL;
-put_vblank:
- imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc);
+static void ipu_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ drm_crtc_vblank_on(crtc);
- return ret;
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (crtc->state->event) {
+ WARN_ON(drm_crtc_vblank_get(crtc));
+ drm_crtc_arm_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
}
-static const struct drm_crtc_funcs ipu_crtc_funcs = {
- .set_config = drm_crtc_helper_set_config,
- .destroy = drm_crtc_cleanup,
- .page_flip = ipu_page_flip,
-};
-
-static int ipu_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *orig_mode,
- struct drm_display_mode *mode,
- int x, int y,
- struct drm_framebuffer *old_fb)
+static void ipu_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_encoder *encoder;
struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
+ struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc->state);
struct ipu_di_signal_cfg sig_cfg = {};
unsigned long encoder_types = 0;
- int ret;
dev_dbg(ipu_crtc->dev, "%s: mode->hdisplay: %d\n", __func__,
mode->hdisplay);
dev_dbg(ipu_crtc->dev, "%s: mode->vdisplay: %d\n", __func__,
mode->vdisplay);
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc)
encoder_types |= BIT(encoder->encoder_type);
+ }
dev_dbg(ipu_crtc->dev, "%s: attached to encoder types 0x%lx\n",
__func__, encoder_types);
@@ -272,114 +224,30 @@ static int ipu_crtc_mode_set(struct drm_crtc *crtc,
else
sig_cfg.clkflags = 0;
- sig_cfg.enable_pol = !(ipu_crtc->bus_flags & DRM_BUS_FLAG_DE_LOW);
+ sig_cfg.enable_pol = !(imx_crtc_state->bus_flags & DRM_BUS_FLAG_DE_LOW);
/* Default to driving pixel data on negative clock edges */
- sig_cfg.clk_pol = !!(ipu_crtc->bus_flags &
+ sig_cfg.clk_pol = !!(imx_crtc_state->bus_flags &
DRM_BUS_FLAG_PIXDATA_POSEDGE);
- sig_cfg.bus_format = ipu_crtc->bus_format;
+ sig_cfg.bus_format = imx_crtc_state->bus_format;
sig_cfg.v_to_h_sync = 0;
- sig_cfg.hsync_pin = ipu_crtc->di_hsync_pin;
- sig_cfg.vsync_pin = ipu_crtc->di_vsync_pin;
+ sig_cfg.hsync_pin = imx_crtc_state->di_hsync_pin;
+ sig_cfg.vsync_pin = imx_crtc_state->di_vsync_pin;
drm_display_mode_to_videomode(mode, &sig_cfg.mode);
- ret = ipu_dc_init_sync(ipu_crtc->dc, ipu_crtc->di,
- mode->flags & DRM_MODE_FLAG_INTERLACE,
- ipu_crtc->bus_format, mode->hdisplay);
- if (ret) {
- dev_err(ipu_crtc->dev,
- "initializing display controller failed with %d\n",
- ret);
- return ret;
- }
-
- ret = ipu_di_init_sync_panel(ipu_crtc->di, &sig_cfg);
- if (ret) {
- dev_err(ipu_crtc->dev,
- "initializing panel failed with %d\n", ret);
- return ret;
- }
-
- return ipu_plane_mode_set(ipu_crtc->plane[0], crtc, mode,
- crtc->primary->fb,
- 0, 0, mode->hdisplay, mode->vdisplay,
- x, y, mode->hdisplay, mode->vdisplay,
- mode->flags & DRM_MODE_FLAG_INTERLACE);
-}
-
-static void ipu_crtc_handle_pageflip(struct ipu_crtc *ipu_crtc)
-{
- unsigned long flags;
- struct drm_device *drm = ipu_crtc->base.dev;
- struct ipu_flip_work *work = ipu_crtc->flip_work;
-
- spin_lock_irqsave(&drm->event_lock, flags);
- if (work->page_flip_event)
- drm_crtc_send_vblank_event(&ipu_crtc->base,
- work->page_flip_event);
- imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc);
- spin_unlock_irqrestore(&drm->event_lock, flags);
-}
-
-static irqreturn_t ipu_irq_handler(int irq, void *dev_id)
-{
- struct ipu_crtc *ipu_crtc = dev_id;
-
- imx_drm_handle_vblank(ipu_crtc->imx_crtc);
-
- if (ipu_crtc->flip_state == IPU_FLIP_SUBMITTED) {
- struct ipu_plane *plane = ipu_crtc->plane[0];
-
- ipu_plane_set_base(plane, ipu_crtc->base.primary->fb,
- plane->x, plane->y);
- ipu_crtc_handle_pageflip(ipu_crtc);
- queue_work(ipu_crtc->flip_queue,
- &ipu_crtc->flip_work->unref_work);
- ipu_crtc->flip_state = IPU_FLIP_NONE;
- }
-
- return IRQ_HANDLED;
-}
-
-static bool ipu_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
- struct videomode vm;
- int ret;
-
- drm_display_mode_to_videomode(adjusted_mode, &vm);
-
- ret = ipu_di_adjust_videomode(ipu_crtc->di, &vm);
- if (ret)
- return false;
-
- drm_display_mode_from_videomode(&vm, adjusted_mode);
-
- return true;
-}
-
-static void ipu_crtc_prepare(struct drm_crtc *crtc)
-{
- struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
-
- ipu_fb_disable(ipu_crtc);
-}
-
-static void ipu_crtc_commit(struct drm_crtc *crtc)
-{
- struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
-
- ipu_fb_enable(ipu_crtc);
+ ipu_dc_init_sync(ipu_crtc->dc, ipu_crtc->di,
+ mode->flags & DRM_MODE_FLAG_INTERLACE,
+ imx_crtc_state->bus_format, mode->hdisplay);
+ ipu_di_init_sync_panel(ipu_crtc->di, &sig_cfg);
}
static const struct drm_crtc_helper_funcs ipu_helper_funcs = {
- .dpms = ipu_crtc_dpms,
.mode_fixup = ipu_crtc_mode_fixup,
- .mode_set = ipu_crtc_mode_set,
- .prepare = ipu_crtc_prepare,
- .commit = ipu_crtc_commit,
+ .mode_set_nofb = ipu_crtc_mode_set_nofb,
+ .atomic_check = ipu_crtc_atomic_check,
+ .atomic_begin = ipu_crtc_atomic_begin,
+ .disable = ipu_crtc_disable,
+ .enable = ipu_crtc_enable,
};
static int ipu_enable_vblank(struct drm_crtc *crtc)
@@ -398,23 +266,9 @@ static void ipu_disable_vblank(struct drm_crtc *crtc)
disable_irq_nosync(ipu_crtc->irq);
}
-static int ipu_set_interface_pix_fmt(struct drm_crtc *crtc,
- u32 bus_format, int hsync_pin, int vsync_pin, u32 bus_flags)
-{
- struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
-
- ipu_crtc->bus_format = bus_format;
- ipu_crtc->bus_flags = bus_flags;
- ipu_crtc->di_hsync_pin = hsync_pin;
- ipu_crtc->di_vsync_pin = vsync_pin;
-
- return 0;
-}
-
static const struct imx_drm_crtc_helper_funcs ipu_crtc_helper_funcs = {
.enable_vblank = ipu_enable_vblank,
.disable_vblank = ipu_disable_vblank,
- .set_interface_pix_fmt = ipu_set_interface_pix_fmt,
.crtc_funcs = &ipu_crtc_funcs,
.crtc_helper_funcs = &ipu_helper_funcs,
};
@@ -496,8 +350,16 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
IPU_DP_FLOW_SYNC_FG,
drm_crtc_mask(&ipu_crtc->base),
DRM_PLANE_TYPE_OVERLAY);
- if (IS_ERR(ipu_crtc->plane[1]))
+ if (IS_ERR(ipu_crtc->plane[1])) {
ipu_crtc->plane[1] = NULL;
+ } else {
+ ret = ipu_plane_get_resources(ipu_crtc->plane[1]);
+ if (ret) {
+ dev_err(ipu_crtc->dev, "getting plane 1 "
+ "resources failed with %d.\n", ret);
+ goto err_put_plane0_res;
+ }
+ }
}
ipu_crtc->irq = ipu_plane_irq(ipu_crtc->plane[0]);
@@ -505,16 +367,17 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
"imx_drm", ipu_crtc);
if (ret < 0) {
dev_err(ipu_crtc->dev, "irq request failed with %d.\n", ret);
- goto err_put_plane_res;
+ goto err_put_plane1_res;
}
/* Only enable IRQ when we actually need it to trigger work. */
disable_irq(ipu_crtc->irq);
- ipu_crtc->flip_queue = create_singlethread_workqueue("ipu-crtc-flip");
-
return 0;
-err_put_plane_res:
+err_put_plane1_res:
+ if (ipu_crtc->plane[1])
+ ipu_plane_put_resources(ipu_crtc->plane[1]);
+err_put_plane0_res:
ipu_plane_put_resources(ipu_crtc->plane[0]);
err_remove_crtc:
imx_drm_remove_crtc(ipu_crtc->imx_crtc);
@@ -553,9 +416,10 @@ static void ipu_drm_unbind(struct device *dev, struct device *master,
imx_drm_remove_crtc(ipu_crtc->imx_crtc);
- destroy_workqueue(ipu_crtc->flip_queue);
- ipu_plane_put_resources(ipu_crtc->plane[0]);
ipu_put_resources(ipu_crtc);
+ if (ipu_crtc->plane[1])
+ ipu_plane_put_resources(ipu_crtc->plane[1]);
+ ipu_plane_put_resources(ipu_crtc->plane[0]);
}
static const struct component_ops ipu_crtc_ops = {
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index a4bb44118..29423e757 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -14,13 +14,19 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane_helper.h>
#include "video/imx-ipu-v3.h"
#include "ipuv3-plane.h"
-#define to_ipu_plane(x) container_of(x, struct ipu_plane, base)
+static inline struct ipu_plane *to_ipu_plane(struct drm_plane *p)
+{
+ return container_of(p, struct ipu_plane, base);
+}
static const uint32_t ipu_plane_formats[] = {
DRM_FORMAT_ARGB1555,
@@ -53,62 +59,67 @@ int ipu_plane_irq(struct ipu_plane *ipu_plane)
IPU_IRQ_EOF);
}
-static int calc_vref(struct drm_display_mode *mode)
+static inline unsigned long
+drm_plane_state_to_eba(struct drm_plane_state *state)
{
- unsigned long htotal, vtotal;
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_gem_cma_object *cma_obj;
- htotal = mode->htotal;
- vtotal = mode->vtotal;
+ cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+ BUG_ON(!cma_obj);
- if (!htotal || !vtotal)
- return 60;
-
- return DIV_ROUND_UP(mode->clock * 1000, vtotal * htotal);
+ return cma_obj->paddr + fb->offsets[0] +
+ fb->pitches[0] * (state->src_y >> 16) +
+ (fb->bits_per_pixel >> 3) * (state->src_x >> 16);
}
-static inline int calc_bandwidth(int width, int height, unsigned int vref)
+static inline unsigned long
+drm_plane_state_to_ubo(struct drm_plane_state *state)
{
- return width * height * vref;
-}
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_gem_cma_object *cma_obj;
+ unsigned long eba = drm_plane_state_to_eba(state);
-int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb,
- int x, int y)
-{
- struct drm_gem_cma_object *cma_obj[3];
- unsigned long eba, ubo, vbo;
- int active, i;
+ cma_obj = drm_fb_cma_get_gem_obj(fb, 1);
+ BUG_ON(!cma_obj);
- for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) {
- cma_obj[i] = drm_fb_cma_get_gem_obj(fb, i);
- if (!cma_obj[i]) {
- DRM_DEBUG_KMS("plane %d entry is null.\n", i);
- return -EFAULT;
- }
- }
+ return cma_obj->paddr + fb->offsets[1] +
+ fb->pitches[1] * (state->src_y >> 16) / 2 +
+ (state->src_x >> 16) / 2 - eba;
+}
- eba = cma_obj[0]->paddr + fb->offsets[0] +
- fb->pitches[0] * y + (fb->bits_per_pixel >> 3) * x;
+static inline unsigned long
+drm_plane_state_to_vbo(struct drm_plane_state *state)
+{
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_gem_cma_object *cma_obj;
+ unsigned long eba = drm_plane_state_to_eba(state);
- if (eba & 0x7) {
- DRM_DEBUG_KMS("base address must be a multiple of 8.\n");
- return -EINVAL;
- }
+ cma_obj = drm_fb_cma_get_gem_obj(fb, 2);
+ BUG_ON(!cma_obj);
- if (fb->pitches[0] < 1 || fb->pitches[0] > 16384) {
- DRM_DEBUG_KMS("pitches out of range.\n");
- return -EINVAL;
- }
+ return cma_obj->paddr + fb->offsets[2] +
+ fb->pitches[2] * (state->src_y >> 16) / 2 +
+ (state->src_x >> 16) / 2 - eba;
+}
- if (ipu_plane->enabled && fb->pitches[0] != ipu_plane->stride[0]) {
- DRM_DEBUG_KMS("pitches must not change while plane is enabled.\n");
- return -EINVAL;
- }
+static void ipu_plane_atomic_set_base(struct ipu_plane *ipu_plane,
+ struct drm_plane_state *old_state)
+{
+ struct drm_plane *plane = &ipu_plane->base;
+ struct drm_plane_state *state = plane->state;
+ struct drm_framebuffer *fb = state->fb;
+ unsigned long eba, ubo, vbo;
+ int active;
- ipu_plane->stride[0] = fb->pitches[0];
+ eba = drm_plane_state_to_eba(state);
switch (fb->pixel_format) {
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
+ if (old_state->fb)
+ break;
+
/*
* Multiplanar formats have to meet the following restrictions:
* - The (up to) three plane addresses are EBA, EBA+UBO, EBA+VBO
@@ -117,59 +128,28 @@ int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb,
* - Only EBA may be changed while scanout is active
* - The strides of U and V planes must be identical.
*/
- ubo = cma_obj[1]->paddr + fb->offsets[1] +
- fb->pitches[1] * y / 2 + x / 2 - eba;
- vbo = cma_obj[2]->paddr + fb->offsets[2] +
- fb->pitches[2] * y / 2 + x / 2 - eba;
-
- if ((ubo & 0x7) || (vbo & 0x7)) {
- DRM_DEBUG_KMS("U/V buffer offsets must be a multiple of 8.\n");
- return -EINVAL;
- }
-
- if ((ubo > 0xfffff8) || (vbo > 0xfffff8)) {
- DRM_DEBUG_KMS("U/V buffer offsets must be positive and not larger than 0xfffff8.\n");
- return -EINVAL;
- }
-
- if (ipu_plane->enabled && ((ipu_plane->u_offset != ubo) ||
- (ipu_plane->v_offset != vbo))) {
- DRM_DEBUG_KMS("U/V buffer offsets must not change while plane is enabled.\n");
- return -EINVAL;
- }
-
- if (fb->pitches[1] != fb->pitches[2]) {
- DRM_DEBUG_KMS("U/V pitches must be identical.\n");
- return -EINVAL;
- }
+ ubo = drm_plane_state_to_ubo(state);
+ vbo = drm_plane_state_to_vbo(state);
- if (fb->pitches[1] < 1 || fb->pitches[1] > 16384) {
- DRM_DEBUG_KMS("U/V pitches out of range.\n");
- return -EINVAL;
- }
-
- if (ipu_plane->enabled &&
- (ipu_plane->stride[1] != fb->pitches[1])) {
- DRM_DEBUG_KMS("U/V pitches must not change while plane is enabled.\n");
- return -EINVAL;
- }
-
- ipu_plane->u_offset = ubo;
- ipu_plane->v_offset = vbo;
- ipu_plane->stride[1] = fb->pitches[1];
+ if (fb->pixel_format == DRM_FORMAT_YUV420)
+ ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
+ fb->pitches[1], ubo, vbo);
+ else
+ ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
+ fb->pitches[1], vbo, ubo);
dev_dbg(ipu_plane->base.dev->dev,
- "phys = %pad %pad %pad, x = %d, y = %d",
- &cma_obj[0]->paddr, &cma_obj[1]->paddr,
- &cma_obj[2]->paddr, x, y);
+ "phy = %lu %lu %lu, x = %d, y = %d", eba, ubo, vbo,
+ state->src_x >> 16, state->src_y >> 16);
break;
default:
- dev_dbg(ipu_plane->base.dev->dev, "phys = %pad, x = %d, y = %d",
- &cma_obj[0]->paddr, x, y);
+ dev_dbg(ipu_plane->base.dev->dev, "phys = %lu, x = %d, y = %d",
+ eba, state->src_x >> 16, state->src_y >> 16);
+
break;
}
- if (ipu_plane->enabled) {
+ if (old_state->fb) {
active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch);
ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba);
ipu_idmac_select_buffer(ipu_plane->ipu_ch, !active);
@@ -177,155 +157,6 @@ int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb,
ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 0, eba);
ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 1, eba);
}
-
- /* cache offsets for subsequent pageflips */
- ipu_plane->x = x;
- ipu_plane->y = y;
-
- return 0;
-}
-
-int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_framebuffer *fb, int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h, bool interlaced)
-{
- struct device *dev = ipu_plane->base.dev->dev;
- int ret;
-
- /* no scaling */
- if (src_w != crtc_w || src_h != crtc_h)
- return -EINVAL;
-
- /* clip to crtc bounds */
- if (crtc_x < 0) {
- if (-crtc_x > crtc_w)
- return -EINVAL;
- src_x += -crtc_x;
- src_w -= -crtc_x;
- crtc_w -= -crtc_x;
- crtc_x = 0;
- }
- if (crtc_y < 0) {
- if (-crtc_y > crtc_h)
- return -EINVAL;
- src_y += -crtc_y;
- src_h -= -crtc_y;
- crtc_h -= -crtc_y;
- crtc_y = 0;
- }
- if (crtc_x + crtc_w > mode->hdisplay) {
- if (crtc_x > mode->hdisplay)
- return -EINVAL;
- crtc_w = mode->hdisplay - crtc_x;
- src_w = crtc_w;
- }
- if (crtc_y + crtc_h > mode->vdisplay) {
- if (crtc_y > mode->vdisplay)
- return -EINVAL;
- crtc_h = mode->vdisplay - crtc_y;
- src_h = crtc_h;
- }
- /* full plane minimum width is 13 pixels */
- if (crtc_w < 13 && (ipu_plane->dp_flow != IPU_DP_FLOW_SYNC_FG))
- return -EINVAL;
- if (crtc_h < 2)
- return -EINVAL;
-
- /*
- * since we cannot touch active IDMAC channels, we do not support
- * resizing the enabled plane or changing its format
- */
- if (ipu_plane->enabled) {
- if (src_w != ipu_plane->w || src_h != ipu_plane->h ||
- fb->pixel_format != ipu_plane->base.fb->pixel_format)
- return -EINVAL;
-
- return ipu_plane_set_base(ipu_plane, fb, src_x, src_y);
- }
-
- switch (ipu_plane->dp_flow) {
- case IPU_DP_FLOW_SYNC_BG:
- ret = ipu_dp_setup_channel(ipu_plane->dp,
- IPUV3_COLORSPACE_RGB,
- IPUV3_COLORSPACE_RGB);
- if (ret) {
- dev_err(dev,
- "initializing display processor failed with %d\n",
- ret);
- return ret;
- }
- ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true);
- break;
- case IPU_DP_FLOW_SYNC_FG:
- ipu_dp_setup_channel(ipu_plane->dp,
- ipu_drm_fourcc_to_colorspace(fb->pixel_format),
- IPUV3_COLORSPACE_UNKNOWN);
- ipu_dp_set_window_pos(ipu_plane->dp, crtc_x, crtc_y);
- /* Enable local alpha on partial plane */
- switch (fb->pixel_format) {
- case DRM_FORMAT_ARGB1555:
- case DRM_FORMAT_ABGR1555:
- case DRM_FORMAT_RGBA5551:
- case DRM_FORMAT_BGRA5551:
- case DRM_FORMAT_ARGB4444:
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_RGBA8888:
- case DRM_FORMAT_BGRA8888:
- ipu_dp_set_global_alpha(ipu_plane->dp, false, 0, false);
- break;
- default:
- break;
- }
- }
-
- ret = ipu_dmfc_alloc_bandwidth(ipu_plane->dmfc,
- calc_bandwidth(crtc_w, crtc_h,
- calc_vref(mode)), 64);
- if (ret) {
- dev_err(dev, "allocating dmfc bandwidth failed with %d\n", ret);
- return ret;
- }
-
- ipu_dmfc_config_wait4eot(ipu_plane->dmfc, crtc_w);
-
- ipu_cpmem_zero(ipu_plane->ipu_ch);
- ipu_cpmem_set_resolution(ipu_plane->ipu_ch, src_w, src_h);
- ret = ipu_cpmem_set_fmt(ipu_plane->ipu_ch, fb->pixel_format);
- if (ret < 0) {
- dev_err(dev, "unsupported pixel format 0x%08x\n",
- fb->pixel_format);
- return ret;
- }
- ipu_cpmem_set_high_priority(ipu_plane->ipu_ch);
- ipu_idmac_set_double_buffer(ipu_plane->ipu_ch, 1);
- ipu_cpmem_set_stride(ipu_plane->ipu_ch, fb->pitches[0]);
-
- ret = ipu_plane_set_base(ipu_plane, fb, src_x, src_y);
- if (ret < 0)
- return ret;
- if (interlaced)
- ipu_cpmem_interlaced_scan(ipu_plane->ipu_ch, fb->pitches[0]);
-
- if (fb->pixel_format == DRM_FORMAT_YUV420) {
- ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
- ipu_plane->stride[1],
- ipu_plane->u_offset,
- ipu_plane->v_offset);
- } else if (fb->pixel_format == DRM_FORMAT_YVU420) {
- ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
- ipu_plane->stride[1],
- ipu_plane->v_offset,
- ipu_plane->u_offset);
- }
-
- ipu_plane->w = src_w;
- ipu_plane->h = src_h;
-
- return 0;
}
void ipu_plane_put_resources(struct ipu_plane *ipu_plane)
@@ -372,7 +203,7 @@ err_out:
return ret;
}
-void ipu_plane_enable(struct ipu_plane *ipu_plane)
+static void ipu_plane_enable(struct ipu_plane *ipu_plane)
{
if (ipu_plane->dp)
ipu_dp_enable(ipu_plane->ipu);
@@ -380,14 +211,10 @@ void ipu_plane_enable(struct ipu_plane *ipu_plane)
ipu_idmac_enable_channel(ipu_plane->ipu_ch);
if (ipu_plane->dp)
ipu_dp_enable_channel(ipu_plane->dp);
-
- ipu_plane->enabled = true;
}
-void ipu_plane_disable(struct ipu_plane *ipu_plane)
+static void ipu_plane_disable(struct ipu_plane *ipu_plane)
{
- ipu_plane->enabled = false;
-
ipu_idmac_wait_busy(ipu_plane->ipu_ch, 50);
if (ipu_plane->dp)
@@ -398,74 +225,232 @@ void ipu_plane_disable(struct ipu_plane *ipu_plane)
ipu_dp_disable(ipu_plane->ipu);
}
-/*
- * drm_plane API
- */
-
-static int ipu_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h)
+static int ipu_disable_plane(struct drm_plane *plane)
{
struct ipu_plane *ipu_plane = to_ipu_plane(plane);
- int ret = 0;
-
- DRM_DEBUG_KMS("plane - %p\n", plane);
-
- if (!ipu_plane->enabled)
- ret = ipu_plane_get_resources(ipu_plane);
- if (ret < 0)
- return ret;
-
- ret = ipu_plane_mode_set(ipu_plane, crtc, &crtc->hwmode, fb,
- crtc_x, crtc_y, crtc_w, crtc_h,
- src_x >> 16, src_y >> 16, src_w >> 16, src_h >> 16,
- false);
- if (ret < 0) {
- ipu_plane_put_resources(ipu_plane);
- return ret;
- }
- if (crtc != plane->crtc)
- dev_dbg(plane->dev->dev, "crtc change: %p -> %p\n",
- plane->crtc, crtc);
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
- if (!ipu_plane->enabled)
- ipu_plane_enable(ipu_plane);
+ ipu_plane_disable(ipu_plane);
return 0;
}
-static int ipu_disable_plane(struct drm_plane *plane)
+static void ipu_plane_destroy(struct drm_plane *plane)
{
struct ipu_plane *ipu_plane = to_ipu_plane(plane);
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
- if (ipu_plane->enabled)
- ipu_plane_disable(ipu_plane);
+ ipu_disable_plane(plane);
+ drm_plane_cleanup(plane);
+ kfree(ipu_plane);
+}
- ipu_plane_put_resources(ipu_plane);
+static const struct drm_plane_funcs ipu_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = ipu_plane_destroy,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static int ipu_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_plane_state *old_state = plane->state;
+ struct drm_crtc_state *crtc_state;
+ struct device *dev = plane->dev->dev;
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_framebuffer *old_fb = old_state->fb;
+ unsigned long eba, ubo, vbo, old_ubo, old_vbo;
+
+ /* Ok to disable */
+ if (!fb)
+ return 0;
+
+ if (!state->crtc)
+ return -EINVAL;
+
+ crtc_state =
+ drm_atomic_get_existing_crtc_state(state->state, state->crtc);
+ if (WARN_ON(!crtc_state))
+ return -EINVAL;
+
+ /* CRTC should be enabled */
+ if (!crtc_state->enable)
+ return -EINVAL;
+
+ /* no scaling */
+ if (state->src_w >> 16 != state->crtc_w ||
+ state->src_h >> 16 != state->crtc_h)
+ return -EINVAL;
+
+ switch (plane->type) {
+ case DRM_PLANE_TYPE_PRIMARY:
+ /* full plane doesn't support partial off screen */
+ if (state->crtc_x || state->crtc_y ||
+ state->crtc_w != crtc_state->adjusted_mode.hdisplay ||
+ state->crtc_h != crtc_state->adjusted_mode.vdisplay)
+ return -EINVAL;
+
+ /* full plane minimum width is 13 pixels */
+ if (state->crtc_w < 13)
+ return -EINVAL;
+ break;
+ case DRM_PLANE_TYPE_OVERLAY:
+ if (state->crtc_x < 0 || state->crtc_y < 0)
+ return -EINVAL;
+
+ if (state->crtc_x + state->crtc_w >
+ crtc_state->adjusted_mode.hdisplay)
+ return -EINVAL;
+ if (state->crtc_y + state->crtc_h >
+ crtc_state->adjusted_mode.vdisplay)
+ return -EINVAL;
+ break;
+ default:
+ dev_warn(dev, "Unsupported plane type\n");
+ return -EINVAL;
+ }
+
+ if (state->crtc_h < 2)
+ return -EINVAL;
+
+ /*
+ * We support resizing active plane or changing its format by
+ * forcing CRTC mode change and disabling-enabling plane in plane's
+ * ->atomic_update callback.
+ */
+ if (old_fb && (state->src_w != old_state->src_w ||
+ state->src_h != old_state->src_h ||
+ fb->pixel_format != old_fb->pixel_format))
+ crtc_state->mode_changed = true;
+
+ eba = drm_plane_state_to_eba(state);
+
+ if (eba & 0x7)
+ return -EINVAL;
+
+ if (fb->pitches[0] < 1 || fb->pitches[0] > 16384)
+ return -EINVAL;
+
+ if (old_fb && fb->pitches[0] != old_fb->pitches[0])
+ crtc_state->mode_changed = true;
+
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ /*
+ * Multiplanar formats have to meet the following restrictions:
+ * - The (up to) three plane addresses are EBA, EBA+UBO, EBA+VBO
+ * - EBA, UBO and VBO are a multiple of 8
+ * - UBO and VBO are unsigned and not larger than 0xfffff8
+ * - Only EBA may be changed while scanout is active
+ * - The strides of U and V planes must be identical.
+ */
+ ubo = drm_plane_state_to_ubo(state);
+ vbo = drm_plane_state_to_vbo(state);
+
+ if ((ubo & 0x7) || (vbo & 0x7))
+ return -EINVAL;
+
+ if ((ubo > 0xfffff8) || (vbo > 0xfffff8))
+ return -EINVAL;
+
+ if (old_fb) {
+ old_ubo = drm_plane_state_to_ubo(old_state);
+ old_vbo = drm_plane_state_to_vbo(old_state);
+ if (ubo != old_ubo || vbo != old_vbo)
+ return -EINVAL;
+ }
+
+ if (fb->pitches[1] != fb->pitches[2])
+ return -EINVAL;
+
+ if (fb->pitches[1] < 1 || fb->pitches[1] > 16384)
+ return -EINVAL;
+
+ if (old_fb && old_fb->pitches[1] != fb->pitches[1])
+ crtc_state->mode_changed = true;
+ }
return 0;
}
-static void ipu_plane_destroy(struct drm_plane *plane)
+static void ipu_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ ipu_disable_plane(plane);
+}
+
+static void ipu_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
{
struct ipu_plane *ipu_plane = to_ipu_plane(plane);
+ struct drm_plane_state *state = plane->state;
+ enum ipu_color_space ics;
- DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+ if (old_state->fb) {
+ struct drm_crtc_state *crtc_state = state->crtc->state;
- ipu_disable_plane(plane);
- drm_plane_cleanup(plane);
- kfree(ipu_plane);
+ if (!crtc_state->mode_changed) {
+ ipu_plane_atomic_set_base(ipu_plane, old_state);
+ return;
+ }
+
+ ipu_disable_plane(plane);
+ }
+
+ switch (ipu_plane->dp_flow) {
+ case IPU_DP_FLOW_SYNC_BG:
+ ipu_dp_setup_channel(ipu_plane->dp,
+ IPUV3_COLORSPACE_RGB,
+ IPUV3_COLORSPACE_RGB);
+ ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true);
+ break;
+ case IPU_DP_FLOW_SYNC_FG:
+ ics = ipu_drm_fourcc_to_colorspace(state->fb->pixel_format);
+ ipu_dp_setup_channel(ipu_plane->dp, ics,
+ IPUV3_COLORSPACE_UNKNOWN);
+ ipu_dp_set_window_pos(ipu_plane->dp, state->crtc_x,
+ state->crtc_y);
+ /* Enable local alpha on partial plane */
+ switch (state->fb->pixel_format) {
+ case DRM_FORMAT_ARGB1555:
+ case DRM_FORMAT_ABGR1555:
+ case DRM_FORMAT_RGBA5551:
+ case DRM_FORMAT_BGRA5551:
+ case DRM_FORMAT_ARGB4444:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_RGBA8888:
+ case DRM_FORMAT_BGRA8888:
+ ipu_dp_set_global_alpha(ipu_plane->dp, false, 0, false);
+ break;
+ default:
+ break;
+ }
+ }
+
+ ipu_dmfc_config_wait4eot(ipu_plane->dmfc, state->crtc_w);
+
+ ipu_cpmem_zero(ipu_plane->ipu_ch);
+ ipu_cpmem_set_resolution(ipu_plane->ipu_ch, state->src_w >> 16,
+ state->src_h >> 16);
+ ipu_cpmem_set_fmt(ipu_plane->ipu_ch, state->fb->pixel_format);
+ ipu_cpmem_set_high_priority(ipu_plane->ipu_ch);
+ ipu_idmac_set_double_buffer(ipu_plane->ipu_ch, 1);
+ ipu_cpmem_set_stride(ipu_plane->ipu_ch, state->fb->pitches[0]);
+ ipu_plane_atomic_set_base(ipu_plane, old_state);
+ ipu_plane_enable(ipu_plane);
}
-static const struct drm_plane_funcs ipu_plane_funcs = {
- .update_plane = ipu_update_plane,
- .disable_plane = ipu_disable_plane,
- .destroy = ipu_plane_destroy,
+static const struct drm_plane_helper_funcs ipu_plane_helper_funcs = {
+ .atomic_check = ipu_plane_atomic_check,
+ .atomic_disable = ipu_plane_atomic_disable,
+ .atomic_update = ipu_plane_atomic_update,
};
struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
@@ -498,5 +483,7 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
return ERR_PTR(ret);
}
+ drm_plane_helper_add(&ipu_plane->base, &ipu_plane_helper_funcs);
+
return ipu_plane;
}
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.h b/drivers/gpu/drm/imx/ipuv3-plane.h
index 4448fd4ad..338b88a74 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.h
+++ b/drivers/gpu/drm/imx/ipuv3-plane.h
@@ -23,17 +23,6 @@ struct ipu_plane {
int dma;
int dp_flow;
-
- int x;
- int y;
- int w;
- int h;
-
- unsigned int u_offset;
- unsigned int v_offset;
- unsigned int stride[2];
-
- bool enabled;
};
struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
@@ -48,11 +37,6 @@ int ipu_plane_mode_set(struct ipu_plane *plane, struct drm_crtc *crtc,
uint32_t src_x, uint32_t src_y, uint32_t src_w,
uint32_t src_h, bool interlaced);
-void ipu_plane_enable(struct ipu_plane *plane);
-void ipu_plane_disable(struct ipu_plane *plane);
-int ipu_plane_set_base(struct ipu_plane *plane, struct drm_framebuffer *fb,
- int x, int y);
-
int ipu_plane_get_resources(struct ipu_plane *plane);
void ipu_plane_put_resources(struct ipu_plane *plane);
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 2d1fd02cd..1dad297b0 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -16,6 +16,7 @@
#include <linux/component.h>
#include <linux/module.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_panel.h>
@@ -25,9 +26,6 @@
#include "imx-drm.h"
-#define con_to_imxpd(x) container_of(x, struct imx_parallel_display, connector)
-#define enc_to_imxpd(x) container_of(x, struct imx_parallel_display, encoder)
-
struct imx_parallel_display {
struct drm_connector connector;
struct drm_encoder encoder;
@@ -37,8 +35,19 @@ struct imx_parallel_display {
u32 bus_format;
struct drm_display_mode mode;
struct drm_panel *panel;
+ struct drm_bridge *bridge;
};
+static inline struct imx_parallel_display *con_to_imxpd(struct drm_connector *c)
+{
+ return container_of(c, struct imx_parallel_display, connector);
+}
+
+static inline struct imx_parallel_display *enc_to_imxpd(struct drm_encoder *e)
+{
+ return container_of(e, struct imx_parallel_display, encoder);
+}
+
static enum drm_connector_status imx_pd_connector_detect(
struct drm_connector *connector, bool force)
{
@@ -53,11 +62,7 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
if (imxpd->panel && imxpd->panel->funcs &&
imxpd->panel->funcs->get_modes) {
- struct drm_display_info *di = &connector->display_info;
-
num_modes = imxpd->panel->funcs->get_modes(imxpd->panel);
- if (!imxpd->bus_format && di->num_bus_formats)
- imxpd->bus_format = di->bus_formats[0];
if (num_modes > 0)
return num_modes;
}
@@ -69,10 +74,16 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
if (np) {
struct drm_display_mode *mode = drm_mode_create(connector->dev);
+ int ret;
if (!mode)
return -EINVAL;
- of_get_drm_display_mode(np, &imxpd->mode, OF_USE_NATIVE_MODE);
+
+ ret = of_get_drm_display_mode(np, &imxpd->mode,
+ OF_USE_NATIVE_MODE);
+ if (ret)
+ return ret;
+
drm_mode_copy(mode, &imxpd->mode);
mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
drm_mode_probed_add(connector, mode);
@@ -90,24 +101,7 @@ static struct drm_encoder *imx_pd_connector_best_encoder(
return &imxpd->encoder;
}
-static void imx_pd_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
- struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
-
- if (mode != DRM_MODE_DPMS_ON)
- drm_panel_disable(imxpd->panel);
- else
- drm_panel_enable(imxpd->panel);
-}
-
-static void imx_pd_encoder_prepare(struct drm_encoder *encoder)
-{
- struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
- imx_drm_set_bus_config(encoder, imxpd->bus_format, 2, 3,
- imxpd->connector.display_info.bus_flags);
-}
-
-static void imx_pd_encoder_commit(struct drm_encoder *encoder)
+static void imx_pd_encoder_enable(struct drm_encoder *encoder)
{
struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
@@ -115,12 +109,6 @@ static void imx_pd_encoder_commit(struct drm_encoder *encoder)
drm_panel_enable(imxpd->panel);
}
-static void imx_pd_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *orig_mode,
- struct drm_display_mode *mode)
-{
-}
-
static void imx_pd_encoder_disable(struct drm_encoder *encoder)
{
struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
@@ -129,11 +117,33 @@ static void imx_pd_encoder_disable(struct drm_encoder *encoder)
drm_panel_unprepare(imxpd->panel);
}
+static int imx_pd_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
+ struct drm_display_info *di = &conn_state->connector->display_info;
+ struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
+
+ imx_crtc_state->bus_flags = di->bus_flags;
+ if (!imxpd->bus_format && di->num_bus_formats)
+ imx_crtc_state->bus_format = di->bus_formats[0];
+ else
+ imx_crtc_state->bus_format = imxpd->bus_format;
+ imx_crtc_state->di_hsync_pin = 2;
+ imx_crtc_state->di_vsync_pin = 3;
+
+ return 0;
+}
+
static const struct drm_connector_funcs imx_pd_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
+ .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = imx_pd_connector_detect,
.destroy = imx_drm_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_connector_helper_funcs imx_pd_connector_helper_funcs = {
@@ -146,20 +156,18 @@ static const struct drm_encoder_funcs imx_pd_encoder_funcs = {
};
static const struct drm_encoder_helper_funcs imx_pd_encoder_helper_funcs = {
- .dpms = imx_pd_encoder_dpms,
- .prepare = imx_pd_encoder_prepare,
- .commit = imx_pd_encoder_commit,
- .mode_set = imx_pd_encoder_mode_set,
+ .enable = imx_pd_encoder_enable,
.disable = imx_pd_encoder_disable,
+ .atomic_check = imx_pd_encoder_atomic_check,
};
static int imx_pd_register(struct drm_device *drm,
struct imx_parallel_display *imxpd)
{
+ struct drm_encoder *encoder = &imxpd->encoder;
int ret;
- ret = imx_drm_encoder_parse_of(drm, &imxpd->encoder,
- imxpd->dev->of_node);
+ ret = imx_drm_encoder_parse_of(drm, encoder, imxpd->dev->of_node);
if (ret)
return ret;
@@ -170,19 +178,33 @@ static int imx_pd_register(struct drm_device *drm,
*/
imxpd->connector.dpms = DRM_MODE_DPMS_OFF;
- drm_encoder_helper_add(&imxpd->encoder, &imx_pd_encoder_helper_funcs);
- drm_encoder_init(drm, &imxpd->encoder, &imx_pd_encoder_funcs,
+ drm_encoder_helper_add(encoder, &imx_pd_encoder_helper_funcs);
+ drm_encoder_init(drm, encoder, &imx_pd_encoder_funcs,
DRM_MODE_ENCODER_NONE, NULL);
- drm_connector_helper_add(&imxpd->connector,
- &imx_pd_connector_helper_funcs);
- drm_connector_init(drm, &imxpd->connector, &imx_pd_connector_funcs,
- DRM_MODE_CONNECTOR_VGA);
+ if (!imxpd->bridge) {
+ drm_connector_helper_add(&imxpd->connector,
+ &imx_pd_connector_helper_funcs);
+ drm_connector_init(drm, &imxpd->connector,
+ &imx_pd_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA);
+ }
if (imxpd->panel)
drm_panel_attach(imxpd->panel, &imxpd->connector);
- drm_mode_connector_attach_encoder(&imxpd->connector, &imxpd->encoder);
+ if (imxpd->bridge) {
+ imxpd->bridge->encoder = encoder;
+ encoder->bridge = imxpd->bridge;
+ ret = drm_bridge_attach(drm, imxpd->bridge);
+ if (ret < 0) {
+ dev_err(imxpd->dev, "failed to attach bridge: %d\n",
+ ret);
+ return ret;
+ }
+ } else {
+ drm_mode_connector_attach_encoder(&imxpd->connector, encoder);
+ }
return 0;
}
@@ -195,6 +217,7 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
const u8 *edidp;
struct imx_parallel_display *imxpd;
int ret;
+ u32 bus_format = 0;
const char *fmt;
imxpd = devm_kzalloc(dev, sizeof(*imxpd), GFP_KERNEL);
@@ -208,14 +231,15 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
ret = of_property_read_string(np, "interface-pix-fmt", &fmt);
if (!ret) {
if (!strcmp(fmt, "rgb24"))
- imxpd->bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ bus_format = MEDIA_BUS_FMT_RGB888_1X24;
else if (!strcmp(fmt, "rgb565"))
- imxpd->bus_format = MEDIA_BUS_FMT_RGB565_1X16;
+ bus_format = MEDIA_BUS_FMT_RGB565_1X16;
else if (!strcmp(fmt, "bgr666"))
- imxpd->bus_format = MEDIA_BUS_FMT_RGB666_1X18;
+ bus_format = MEDIA_BUS_FMT_RGB666_1X18;
else if (!strcmp(fmt, "lvds666"))
- imxpd->bus_format = MEDIA_BUS_FMT_RGB666_1X24_CPADHI;
+ bus_format = MEDIA_BUS_FMT_RGB666_1X24_CPADHI;
}
+ imxpd->bus_format = bus_format;
/* port@1 is the output port */
ep = of_graph_get_endpoint_by_regs(np, 1, -1);
@@ -223,13 +247,30 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
struct device_node *remote;
remote = of_graph_get_remote_port_parent(ep);
+ if (!remote) {
+ dev_warn(dev, "endpoint %s not connected\n",
+ ep->full_name);
+ of_node_put(ep);
+ return -ENODEV;
+ }
of_node_put(ep);
- if (remote) {
- imxpd->panel = of_drm_find_panel(remote);
- of_node_put(remote);
+
+ imxpd->panel = of_drm_find_panel(remote);
+ if (imxpd->panel) {
+ dev_dbg(dev, "found panel %s\n", remote->full_name);
+ } else {
+ imxpd->bridge = of_drm_find_bridge(remote);
+ if (imxpd->bridge)
+ dev_dbg(dev, "found bridge %s\n",
+ remote->full_name);
}
- if (!imxpd->panel)
+ if (!imxpd->panel && !imxpd->bridge) {
+ dev_dbg(dev, "waiting for panel or bridge %s\n",
+ remote->full_name);
+ of_node_put(remote);
return -EPROBE_DEFER;
+ }
+ of_node_put(remote);
}
imxpd->dev = dev;
diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig
index eeefc9718..294de4549 100644
--- a/drivers/gpu/drm/mediatek/Kconfig
+++ b/drivers/gpu/drm/mediatek/Kconfig
@@ -2,11 +2,13 @@ config DRM_MEDIATEK
tristate "DRM Support for Mediatek SoCs"
depends on DRM
depends on ARCH_MEDIATEK || (ARM && COMPILE_TEST)
+ depends on COMMON_CLK
+ depends on HAVE_ARM_SMCCC
+ depends on OF
select DRM_GEM_CMA_HELPER
select DRM_KMS_HELPER
select DRM_MIPI_DSI
select DRM_PANEL
- select IOMMU_DMA
select MEMORY
select MTK_SMI
help
@@ -14,3 +16,11 @@ config DRM_MEDIATEK
The module will be called mediatek-drm
This driver provides kernel mode setting and
buffer management to userspace.
+
+config DRM_MEDIATEK_HDMI
+ tristate "DRM HDMI Support for Mediatek SoCs"
+ depends on DRM_MEDIATEK
+ select SND_SOC_HDMI_CODEC if SND_SOC
+ select GENERIC_PHY
+ help
+ DRM/KMS HDMI driver for Mediatek SoCs
diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile
index 5fcf58e87..bf2e5be1a 100644
--- a/drivers/gpu/drm/mediatek/Makefile
+++ b/drivers/gpu/drm/mediatek/Makefile
@@ -12,3 +12,10 @@ mediatek-drm-y := mtk_disp_ovl.o \
mtk_dpi.o
obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o
+
+mediatek-drm-hdmi-objs := mtk_cec.o \
+ mtk_hdmi.o \
+ mtk_hdmi_ddc.o \
+ mtk_mt8173_hdmi_phy.o
+
+obj-$(CONFIG_DRM_MEDIATEK_HDMI) += mediatek-drm-hdmi.o
diff --git a/drivers/gpu/drm/mediatek/mtk_cec.c b/drivers/gpu/drm/mediatek/mtk_cec.c
new file mode 100644
index 000000000..7a3eb8c17
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_cec.c
@@ -0,0 +1,265 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Jie Qiu <jie.qiu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+
+#include "mtk_cec.h"
+
+#define TR_CONFIG 0x00
+#define CLEAR_CEC_IRQ BIT(15)
+
+#define CEC_CKGEN 0x04
+#define CEC_32K_PDN BIT(19)
+#define PDN BIT(16)
+
+#define RX_EVENT 0x54
+#define HDMI_PORD BIT(25)
+#define HDMI_HTPLG BIT(24)
+#define HDMI_PORD_INT_EN BIT(9)
+#define HDMI_HTPLG_INT_EN BIT(8)
+
+#define RX_GEN_WD 0x58
+#define HDMI_PORD_INT_32K_STATUS BIT(26)
+#define RX_RISC_INT_32K_STATUS BIT(25)
+#define HDMI_HTPLG_INT_32K_STATUS BIT(24)
+#define HDMI_PORD_INT_32K_CLR BIT(18)
+#define RX_INT_32K_CLR BIT(17)
+#define HDMI_HTPLG_INT_32K_CLR BIT(16)
+#define HDMI_PORD_INT_32K_STA_MASK BIT(10)
+#define RX_RISC_INT_32K_STA_MASK BIT(9)
+#define HDMI_HTPLG_INT_32K_STA_MASK BIT(8)
+#define HDMI_PORD_INT_32K_EN BIT(2)
+#define RX_INT_32K_EN BIT(1)
+#define HDMI_HTPLG_INT_32K_EN BIT(0)
+
+#define NORMAL_INT_CTRL 0x5C
+#define HDMI_HTPLG_INT_STA BIT(0)
+#define HDMI_PORD_INT_STA BIT(1)
+#define HDMI_HTPLG_INT_CLR BIT(16)
+#define HDMI_PORD_INT_CLR BIT(17)
+#define HDMI_FULL_INT_CLR BIT(20)
+
+struct mtk_cec {
+ void __iomem *regs;
+ struct clk *clk;
+ int irq;
+ bool hpd;
+ void (*hpd_event)(bool hpd, struct device *dev);
+ struct device *hdmi_dev;
+ spinlock_t lock;
+};
+
+static void mtk_cec_clear_bits(struct mtk_cec *cec, unsigned int offset,
+ unsigned int bits)
+{
+ void __iomem *reg = cec->regs + offset;
+ u32 tmp;
+
+ tmp = readl(reg);
+ tmp &= ~bits;
+ writel(tmp, reg);
+}
+
+static void mtk_cec_set_bits(struct mtk_cec *cec, unsigned int offset,
+ unsigned int bits)
+{
+ void __iomem *reg = cec->regs + offset;
+ u32 tmp;
+
+ tmp = readl(reg);
+ tmp |= bits;
+ writel(tmp, reg);
+}
+
+static void mtk_cec_mask(struct mtk_cec *cec, unsigned int offset,
+ unsigned int val, unsigned int mask)
+{
+ u32 tmp = readl(cec->regs + offset) & ~mask;
+
+ tmp |= val & mask;
+ writel(val, cec->regs + offset);
+}
+
+void mtk_cec_set_hpd_event(struct device *dev,
+ void (*hpd_event)(bool hpd, struct device *dev),
+ struct device *hdmi_dev)
+{
+ struct mtk_cec *cec = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&cec->lock, flags);
+ cec->hdmi_dev = hdmi_dev;
+ cec->hpd_event = hpd_event;
+ spin_unlock_irqrestore(&cec->lock, flags);
+}
+
+bool mtk_cec_hpd_high(struct device *dev)
+{
+ struct mtk_cec *cec = dev_get_drvdata(dev);
+ unsigned int status;
+
+ status = readl(cec->regs + RX_EVENT);
+
+ return (status & (HDMI_PORD | HDMI_HTPLG)) == (HDMI_PORD | HDMI_HTPLG);
+}
+
+static void mtk_cec_htplg_irq_init(struct mtk_cec *cec)
+{
+ mtk_cec_mask(cec, CEC_CKGEN, 0 | CEC_32K_PDN, PDN | CEC_32K_PDN);
+ mtk_cec_set_bits(cec, RX_GEN_WD, HDMI_PORD_INT_32K_CLR |
+ RX_INT_32K_CLR | HDMI_HTPLG_INT_32K_CLR);
+ mtk_cec_mask(cec, RX_GEN_WD, 0, HDMI_PORD_INT_32K_CLR | RX_INT_32K_CLR |
+ HDMI_HTPLG_INT_32K_CLR | HDMI_PORD_INT_32K_EN |
+ RX_INT_32K_EN | HDMI_HTPLG_INT_32K_EN);
+}
+
+static void mtk_cec_htplg_irq_enable(struct mtk_cec *cec)
+{
+ mtk_cec_set_bits(cec, RX_EVENT, HDMI_PORD_INT_EN | HDMI_HTPLG_INT_EN);
+}
+
+static void mtk_cec_htplg_irq_disable(struct mtk_cec *cec)
+{
+ mtk_cec_clear_bits(cec, RX_EVENT, HDMI_PORD_INT_EN | HDMI_HTPLG_INT_EN);
+}
+
+static void mtk_cec_clear_htplg_irq(struct mtk_cec *cec)
+{
+ mtk_cec_set_bits(cec, TR_CONFIG, CLEAR_CEC_IRQ);
+ mtk_cec_set_bits(cec, NORMAL_INT_CTRL, HDMI_HTPLG_INT_CLR |
+ HDMI_PORD_INT_CLR | HDMI_FULL_INT_CLR);
+ mtk_cec_set_bits(cec, RX_GEN_WD, HDMI_PORD_INT_32K_CLR |
+ RX_INT_32K_CLR | HDMI_HTPLG_INT_32K_CLR);
+ usleep_range(5, 10);
+ mtk_cec_clear_bits(cec, NORMAL_INT_CTRL, HDMI_HTPLG_INT_CLR |
+ HDMI_PORD_INT_CLR | HDMI_FULL_INT_CLR);
+ mtk_cec_clear_bits(cec, TR_CONFIG, CLEAR_CEC_IRQ);
+ mtk_cec_clear_bits(cec, RX_GEN_WD, HDMI_PORD_INT_32K_CLR |
+ RX_INT_32K_CLR | HDMI_HTPLG_INT_32K_CLR);
+}
+
+static void mtk_cec_hpd_event(struct mtk_cec *cec, bool hpd)
+{
+ void (*hpd_event)(bool hpd, struct device *dev);
+ struct device *hdmi_dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cec->lock, flags);
+ hpd_event = cec->hpd_event;
+ hdmi_dev = cec->hdmi_dev;
+ spin_unlock_irqrestore(&cec->lock, flags);
+
+ if (hpd_event)
+ hpd_event(hpd, hdmi_dev);
+}
+
+static irqreturn_t mtk_cec_htplg_isr_thread(int irq, void *arg)
+{
+ struct device *dev = arg;
+ struct mtk_cec *cec = dev_get_drvdata(dev);
+ bool hpd;
+
+ mtk_cec_clear_htplg_irq(cec);
+ hpd = mtk_cec_hpd_high(dev);
+
+ if (cec->hpd != hpd) {
+ dev_dbg(dev, "hotplug event! cur hpd = %d, hpd = %d\n",
+ cec->hpd, hpd);
+ cec->hpd = hpd;
+ mtk_cec_hpd_event(cec, hpd);
+ }
+ return IRQ_HANDLED;
+}
+
+static int mtk_cec_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_cec *cec;
+ struct resource *res;
+ int ret;
+
+ cec = devm_kzalloc(dev, sizeof(*cec), GFP_KERNEL);
+ if (!cec)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, cec);
+ spin_lock_init(&cec->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ cec->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(cec->regs)) {
+ ret = PTR_ERR(cec->regs);
+ dev_err(dev, "Failed to ioremap cec: %d\n", ret);
+ return ret;
+ }
+
+ cec->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(cec->clk)) {
+ ret = PTR_ERR(cec->clk);
+ dev_err(dev, "Failed to get cec clock: %d\n", ret);
+ return ret;
+ }
+
+ cec->irq = platform_get_irq(pdev, 0);
+ if (cec->irq < 0) {
+ dev_err(dev, "Failed to get cec irq: %d\n", cec->irq);
+ return cec->irq;
+ }
+
+ ret = devm_request_threaded_irq(dev, cec->irq, NULL,
+ mtk_cec_htplg_isr_thread,
+ IRQF_SHARED | IRQF_TRIGGER_LOW |
+ IRQF_ONESHOT, "hdmi hpd", dev);
+ if (ret) {
+ dev_err(dev, "Failed to register cec irq: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(cec->clk);
+ if (ret) {
+ dev_err(dev, "Failed to enable cec clock: %d\n", ret);
+ return ret;
+ }
+
+ mtk_cec_htplg_irq_init(cec);
+ mtk_cec_htplg_irq_enable(cec);
+
+ return 0;
+}
+
+static int mtk_cec_remove(struct platform_device *pdev)
+{
+ struct mtk_cec *cec = platform_get_drvdata(pdev);
+
+ mtk_cec_htplg_irq_disable(cec);
+ clk_disable_unprepare(cec->clk);
+ return 0;
+}
+
+static const struct of_device_id mtk_cec_of_ids[] = {
+ { .compatible = "mediatek,mt8173-cec", },
+ {}
+};
+
+struct platform_driver mtk_cec_driver = {
+ .probe = mtk_cec_probe,
+ .remove = mtk_cec_remove,
+ .driver = {
+ .name = "mediatek-cec",
+ .of_match_table = mtk_cec_of_ids,
+ },
+};
diff --git a/drivers/gpu/drm/mediatek/mtk_cec.h b/drivers/gpu/drm/mediatek/mtk_cec.h
new file mode 100644
index 000000000..10057b7ea
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_cec.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Jie Qiu <jie.qiu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MTK_CEC_H
+#define _MTK_CEC_H
+
+#include <linux/types.h>
+
+struct device;
+
+void mtk_cec_set_hpd_event(struct device *dev,
+ void (*hotplug_event)(bool hpd, struct device *dev),
+ struct device *hdmi_dev);
+bool mtk_cec_hpd_high(struct device *dev);
+
+#endif /* _MTK_CEC_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index b1223d54d..eebb7d881 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -91,7 +91,7 @@ static int mtk_atomic_commit(struct drm_device *drm,
mutex_lock(&private->commit.lock);
flush_work(&private->commit.work);
- drm_atomic_helper_swap_state(drm, state);
+ drm_atomic_helper_swap_state(state, true);
if (async)
mtk_atomic_schedule(private, state);
@@ -243,7 +243,7 @@ static struct drm_driver mtk_drm_driver = {
.enable_vblank = mtk_drm_crtc_enable_vblank,
.disable_vblank = mtk_drm_crtc_disable_vblank,
- .gem_free_object = mtk_drm_gem_free_object,
+ .gem_free_object_unlocked = mtk_drm_gem_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = mtk_drm_gem_dumb_create,
.dumb_map_offset = mtk_drm_gem_dumb_map_offset,
@@ -280,8 +280,6 @@ static int mtk_drm_bind(struct device *dev)
if (!drm)
return -ENOMEM;
- drm_dev_set_unique(drm, dev_name(dev));
-
drm->dev_private = private;
private->drm = drm;
@@ -293,14 +291,8 @@ static int mtk_drm_bind(struct device *dev)
if (ret < 0)
goto err_deinit;
- ret = drm_connector_register_all(drm);
- if (ret < 0)
- goto err_unregister;
-
return 0;
-err_unregister:
- drm_dev_unregister(drm);
err_deinit:
mtk_drm_kms_deinit(drm);
err_free:
@@ -455,7 +447,6 @@ static int mtk_drm_remove(struct platform_device *pdev)
struct drm_device *drm = private->drm;
int i;
- drm_connector_unregister_all(drm);
drm_dev_unregister(drm);
mtk_drm_kms_deinit(drm);
drm_dev_unref(drm);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
index fa2ec0cd0..7abc550eb 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
@@ -54,15 +54,14 @@ struct mtk_drm_gem_obj *mtk_drm_gem_create(struct drm_device *dev,
obj = &mtk_gem->base;
- init_dma_attrs(&mtk_gem->dma_attrs);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &mtk_gem->dma_attrs);
+ mtk_gem->dma_attrs = DMA_ATTR_WRITE_COMBINE;
if (!alloc_kmap)
- dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &mtk_gem->dma_attrs);
+ mtk_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
mtk_gem->cookie = dma_alloc_attrs(priv->dma_dev, obj->size,
&mtk_gem->dma_addr, GFP_KERNEL,
- &mtk_gem->dma_attrs);
+ mtk_gem->dma_attrs);
if (!mtk_gem->cookie) {
DRM_ERROR("failed to allocate %zx byte dma buffer", obj->size);
ret = -ENOMEM;
@@ -93,7 +92,7 @@ void mtk_drm_gem_free_object(struct drm_gem_object *obj)
drm_prime_gem_destroy(obj, mtk_gem->sg);
else
dma_free_attrs(priv->dma_dev, obj->size, mtk_gem->cookie,
- mtk_gem->dma_addr, &mtk_gem->dma_attrs);
+ mtk_gem->dma_addr, mtk_gem->dma_attrs);
/* release file pointer to gem object. */
drm_gem_object_release(obj);
@@ -173,7 +172,7 @@ static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj,
vma->vm_pgoff = 0;
ret = dma_mmap_attrs(priv->dma_dev, vma, mtk_gem->cookie,
- mtk_gem->dma_addr, obj->size, &mtk_gem->dma_attrs);
+ mtk_gem->dma_addr, obj->size, mtk_gem->dma_attrs);
if (ret)
drm_gem_vm_close(vma);
@@ -224,7 +223,7 @@ struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj)
ret = dma_get_sgtable_attrs(priv->dma_dev, sgt, mtk_gem->cookie,
mtk_gem->dma_addr, obj->size,
- &mtk_gem->dma_attrs);
+ mtk_gem->dma_attrs);
if (ret) {
DRM_ERROR("failed to allocate sgt, %d\n", ret);
kfree(sgt);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.h b/drivers/gpu/drm/mediatek/mtk_drm_gem.h
index 3a2a5624a..2752718fa 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.h
@@ -35,7 +35,7 @@ struct mtk_drm_gem_obj {
void *cookie;
void *kvaddr;
dma_addr_t dma_addr;
- struct dma_attrs dma_attrs;
+ unsigned long dma_attrs;
struct sg_table *sg;
};
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
index 51bc8988f..3995765a9 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -170,6 +170,7 @@ static int mtk_plane_atomic_check(struct drm_plane *plane,
return drm_plane_helper_check_update(plane, state->crtc, fb,
&src, &dest, &clip,
+ state->rotation,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
true, true, &visible);
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 769559124..28b2044ed 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -575,14 +575,6 @@ static int mtk_dsi_connector_get_modes(struct drm_connector *connector)
return drm_panel_get_modes(dsi->panel);
}
-static struct drm_encoder *mtk_dsi_connector_best_encoder(
- struct drm_connector *connector)
-{
- struct mtk_dsi *dsi = connector_to_dsi(connector);
-
- return &dsi->encoder;
-}
-
static const struct drm_encoder_helper_funcs mtk_dsi_encoder_helper_funcs = {
.mode_fixup = mtk_dsi_encoder_mode_fixup,
.mode_set = mtk_dsi_encoder_mode_set,
@@ -603,7 +595,6 @@ static const struct drm_connector_funcs mtk_dsi_connector_funcs = {
static const struct drm_connector_helper_funcs
mtk_dsi_connector_helper_funcs = {
.get_modes = mtk_dsi_connector_get_modes,
- .best_encoder = mtk_dsi_connector_best_encoder,
};
static int mtk_drm_attach_bridge(struct drm_bridge *bridge,
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
new file mode 100644
index 000000000..334562d06
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -0,0 +1,1828 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Jie Qiu <jie.qiu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <linux/arm-smccc.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/hdmi.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_platform.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_graph.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <sound/hdmi-codec.h>
+#include "mtk_cec.h"
+#include "mtk_hdmi.h"
+#include "mtk_hdmi_regs.h"
+
+#define NCTS_BYTES 7
+
+enum mtk_hdmi_clk_id {
+ MTK_HDMI_CLK_HDMI_PIXEL,
+ MTK_HDMI_CLK_HDMI_PLL,
+ MTK_HDMI_CLK_AUD_BCLK,
+ MTK_HDMI_CLK_AUD_SPDIF,
+ MTK_HDMI_CLK_COUNT
+};
+
+enum hdmi_aud_input_type {
+ HDMI_AUD_INPUT_I2S = 0,
+ HDMI_AUD_INPUT_SPDIF,
+};
+
+enum hdmi_aud_i2s_fmt {
+ HDMI_I2S_MODE_RJT_24BIT = 0,
+ HDMI_I2S_MODE_RJT_16BIT,
+ HDMI_I2S_MODE_LJT_24BIT,
+ HDMI_I2S_MODE_LJT_16BIT,
+ HDMI_I2S_MODE_I2S_24BIT,
+ HDMI_I2S_MODE_I2S_16BIT
+};
+
+enum hdmi_aud_mclk {
+ HDMI_AUD_MCLK_128FS,
+ HDMI_AUD_MCLK_192FS,
+ HDMI_AUD_MCLK_256FS,
+ HDMI_AUD_MCLK_384FS,
+ HDMI_AUD_MCLK_512FS,
+ HDMI_AUD_MCLK_768FS,
+ HDMI_AUD_MCLK_1152FS,
+};
+
+enum hdmi_aud_channel_type {
+ HDMI_AUD_CHAN_TYPE_1_0 = 0,
+ HDMI_AUD_CHAN_TYPE_1_1,
+ HDMI_AUD_CHAN_TYPE_2_0,
+ HDMI_AUD_CHAN_TYPE_2_1,
+ HDMI_AUD_CHAN_TYPE_3_0,
+ HDMI_AUD_CHAN_TYPE_3_1,
+ HDMI_AUD_CHAN_TYPE_4_0,
+ HDMI_AUD_CHAN_TYPE_4_1,
+ HDMI_AUD_CHAN_TYPE_5_0,
+ HDMI_AUD_CHAN_TYPE_5_1,
+ HDMI_AUD_CHAN_TYPE_6_0,
+ HDMI_AUD_CHAN_TYPE_6_1,
+ HDMI_AUD_CHAN_TYPE_7_0,
+ HDMI_AUD_CHAN_TYPE_7_1,
+ HDMI_AUD_CHAN_TYPE_3_0_LRS,
+ HDMI_AUD_CHAN_TYPE_3_1_LRS,
+ HDMI_AUD_CHAN_TYPE_4_0_CLRS,
+ HDMI_AUD_CHAN_TYPE_4_1_CLRS,
+ HDMI_AUD_CHAN_TYPE_6_1_CS,
+ HDMI_AUD_CHAN_TYPE_6_1_CH,
+ HDMI_AUD_CHAN_TYPE_6_1_OH,
+ HDMI_AUD_CHAN_TYPE_6_1_CHR,
+ HDMI_AUD_CHAN_TYPE_7_1_LH_RH,
+ HDMI_AUD_CHAN_TYPE_7_1_LSR_RSR,
+ HDMI_AUD_CHAN_TYPE_7_1_LC_RC,
+ HDMI_AUD_CHAN_TYPE_7_1_LW_RW,
+ HDMI_AUD_CHAN_TYPE_7_1_LSD_RSD,
+ HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS,
+ HDMI_AUD_CHAN_TYPE_7_1_LHS_RHS,
+ HDMI_AUD_CHAN_TYPE_7_1_CS_CH,
+ HDMI_AUD_CHAN_TYPE_7_1_CS_OH,
+ HDMI_AUD_CHAN_TYPE_7_1_CS_CHR,
+ HDMI_AUD_CHAN_TYPE_7_1_CH_OH,
+ HDMI_AUD_CHAN_TYPE_7_1_CH_CHR,
+ HDMI_AUD_CHAN_TYPE_7_1_OH_CHR,
+ HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS_LSR_RSR,
+ HDMI_AUD_CHAN_TYPE_6_0_CS,
+ HDMI_AUD_CHAN_TYPE_6_0_CH,
+ HDMI_AUD_CHAN_TYPE_6_0_OH,
+ HDMI_AUD_CHAN_TYPE_6_0_CHR,
+ HDMI_AUD_CHAN_TYPE_7_0_LH_RH,
+ HDMI_AUD_CHAN_TYPE_7_0_LSR_RSR,
+ HDMI_AUD_CHAN_TYPE_7_0_LC_RC,
+ HDMI_AUD_CHAN_TYPE_7_0_LW_RW,
+ HDMI_AUD_CHAN_TYPE_7_0_LSD_RSD,
+ HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS,
+ HDMI_AUD_CHAN_TYPE_7_0_LHS_RHS,
+ HDMI_AUD_CHAN_TYPE_7_0_CS_CH,
+ HDMI_AUD_CHAN_TYPE_7_0_CS_OH,
+ HDMI_AUD_CHAN_TYPE_7_0_CS_CHR,
+ HDMI_AUD_CHAN_TYPE_7_0_CH_OH,
+ HDMI_AUD_CHAN_TYPE_7_0_CH_CHR,
+ HDMI_AUD_CHAN_TYPE_7_0_OH_CHR,
+ HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS_LSR_RSR,
+ HDMI_AUD_CHAN_TYPE_8_0_LH_RH_CS,
+ HDMI_AUD_CHAN_TYPE_UNKNOWN = 0xFF
+};
+
+enum hdmi_aud_channel_swap_type {
+ HDMI_AUD_SWAP_LR,
+ HDMI_AUD_SWAP_LFE_CC,
+ HDMI_AUD_SWAP_LSRS,
+ HDMI_AUD_SWAP_RLS_RRS,
+ HDMI_AUD_SWAP_LR_STATUS,
+};
+
+struct hdmi_audio_param {
+ enum hdmi_audio_coding_type aud_codec;
+ enum hdmi_audio_sample_size aud_sampe_size;
+ enum hdmi_aud_input_type aud_input_type;
+ enum hdmi_aud_i2s_fmt aud_i2s_fmt;
+ enum hdmi_aud_mclk aud_mclk;
+ enum hdmi_aud_channel_type aud_input_chan_type;
+ struct hdmi_codec_params codec_params;
+};
+
+struct mtk_hdmi {
+ struct drm_bridge bridge;
+ struct drm_connector conn;
+ struct device *dev;
+ struct phy *phy;
+ struct device *cec_dev;
+ struct i2c_adapter *ddc_adpt;
+ struct clk *clk[MTK_HDMI_CLK_COUNT];
+ struct drm_display_mode mode;
+ bool dvi_mode;
+ u32 min_clock;
+ u32 max_clock;
+ u32 max_hdisplay;
+ u32 max_vdisplay;
+ u32 ibias;
+ u32 ibias_up;
+ struct regmap *sys_regmap;
+ unsigned int sys_offset;
+ void __iomem *regs;
+ enum hdmi_colorspace csp;
+ struct hdmi_audio_param aud_param;
+ bool audio_enable;
+ bool powered;
+ bool enabled;
+};
+
+static inline struct mtk_hdmi *hdmi_ctx_from_bridge(struct drm_bridge *b)
+{
+ return container_of(b, struct mtk_hdmi, bridge);
+}
+
+static inline struct mtk_hdmi *hdmi_ctx_from_conn(struct drm_connector *c)
+{
+ return container_of(c, struct mtk_hdmi, conn);
+}
+
+static u32 mtk_hdmi_read(struct mtk_hdmi *hdmi, u32 offset)
+{
+ return readl(hdmi->regs + offset);
+}
+
+static void mtk_hdmi_write(struct mtk_hdmi *hdmi, u32 offset, u32 val)
+{
+ writel(val, hdmi->regs + offset);
+}
+
+static void mtk_hdmi_clear_bits(struct mtk_hdmi *hdmi, u32 offset, u32 bits)
+{
+ void __iomem *reg = hdmi->regs + offset;
+ u32 tmp;
+
+ tmp = readl(reg);
+ tmp &= ~bits;
+ writel(tmp, reg);
+}
+
+static void mtk_hdmi_set_bits(struct mtk_hdmi *hdmi, u32 offset, u32 bits)
+{
+ void __iomem *reg = hdmi->regs + offset;
+ u32 tmp;
+
+ tmp = readl(reg);
+ tmp |= bits;
+ writel(tmp, reg);
+}
+
+static void mtk_hdmi_mask(struct mtk_hdmi *hdmi, u32 offset, u32 val, u32 mask)
+{
+ void __iomem *reg = hdmi->regs + offset;
+ u32 tmp;
+
+ tmp = readl(reg);
+ tmp = (tmp & ~mask) | (val & mask);
+ writel(tmp, reg);
+}
+
+static void mtk_hdmi_hw_vid_black(struct mtk_hdmi *hdmi, bool black)
+{
+ mtk_hdmi_mask(hdmi, VIDEO_CFG_4, black ? GEN_RGB : NORMAL_PATH,
+ VIDEO_SOURCE_SEL);
+}
+
+static void mtk_hdmi_hw_make_reg_writable(struct mtk_hdmi *hdmi, bool enable)
+{
+ struct arm_smccc_res res;
+
+ /*
+ * MT8173 HDMI hardware has an output control bit to enable/disable HDMI
+ * output. This bit can only be controlled in ARM supervisor mode.
+ * The ARM trusted firmware provides an API for the HDMI driver to set
+ * this control bit to enable HDMI output in supervisor mode.
+ */
+ arm_smccc_smc(MTK_SIP_SET_AUTHORIZED_SECURE_REG, 0x14000904, 0x80000000,
+ 0, 0, 0, 0, 0, &res);
+
+ regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20,
+ HDMI_PCLK_FREE_RUN, enable ? HDMI_PCLK_FREE_RUN : 0);
+ regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C,
+ HDMI_ON | ANLG_ON, enable ? (HDMI_ON | ANLG_ON) : 0);
+}
+
+static void mtk_hdmi_hw_1p4_version_enable(struct mtk_hdmi *hdmi, bool enable)
+{
+ regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20,
+ HDMI2P0_EN, enable ? 0 : HDMI2P0_EN);
+}
+
+static void mtk_hdmi_hw_aud_mute(struct mtk_hdmi *hdmi)
+{
+ mtk_hdmi_set_bits(hdmi, GRL_AUDIO_CFG, AUDIO_ZERO);
+}
+
+static void mtk_hdmi_hw_aud_unmute(struct mtk_hdmi *hdmi)
+{
+ mtk_hdmi_clear_bits(hdmi, GRL_AUDIO_CFG, AUDIO_ZERO);
+}
+
+static void mtk_hdmi_hw_reset(struct mtk_hdmi *hdmi)
+{
+ regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C,
+ HDMI_RST, HDMI_RST);
+ regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C,
+ HDMI_RST, 0);
+ mtk_hdmi_clear_bits(hdmi, GRL_CFG3, CFG3_CONTROL_PACKET_DELAY);
+ regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C,
+ ANLG_ON, ANLG_ON);
+}
+
+static void mtk_hdmi_hw_enable_notice(struct mtk_hdmi *hdmi, bool enable_notice)
+{
+ mtk_hdmi_mask(hdmi, GRL_CFG2, enable_notice ? CFG2_NOTICE_EN : 0,
+ CFG2_NOTICE_EN);
+}
+
+static void mtk_hdmi_hw_write_int_mask(struct mtk_hdmi *hdmi, u32 int_mask)
+{
+ mtk_hdmi_write(hdmi, GRL_INT_MASK, int_mask);
+}
+
+static void mtk_hdmi_hw_enable_dvi_mode(struct mtk_hdmi *hdmi, bool enable)
+{
+ mtk_hdmi_mask(hdmi, GRL_CFG1, enable ? CFG1_DVI : 0, CFG1_DVI);
+}
+
+static void mtk_hdmi_hw_send_info_frame(struct mtk_hdmi *hdmi, u8 *buffer,
+ u8 len)
+{
+ u32 ctrl_reg = GRL_CTRL;
+ int i;
+ u8 *frame_data;
+ enum hdmi_infoframe_type frame_type;
+ u8 frame_ver;
+ u8 frame_len;
+ u8 checksum;
+ int ctrl_frame_en = 0;
+
+ frame_type = *buffer;
+ buffer += 1;
+ frame_ver = *buffer;
+ buffer += 1;
+ frame_len = *buffer;
+ buffer += 1;
+ checksum = *buffer;
+ buffer += 1;
+ frame_data = buffer;
+
+ dev_dbg(hdmi->dev,
+ "frame_type:0x%x,frame_ver:0x%x,frame_len:0x%x,checksum:0x%x\n",
+ frame_type, frame_ver, frame_len, checksum);
+
+ switch (frame_type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ ctrl_frame_en = CTRL_AVI_EN;
+ ctrl_reg = GRL_CTRL;
+ break;
+ case HDMI_INFOFRAME_TYPE_SPD:
+ ctrl_frame_en = CTRL_SPD_EN;
+ ctrl_reg = GRL_CTRL;
+ break;
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ ctrl_frame_en = CTRL_AUDIO_EN;
+ ctrl_reg = GRL_CTRL;
+ break;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ ctrl_frame_en = VS_EN;
+ ctrl_reg = GRL_ACP_ISRC_CTRL;
+ break;
+ }
+ mtk_hdmi_clear_bits(hdmi, ctrl_reg, ctrl_frame_en);
+ mtk_hdmi_write(hdmi, GRL_INFOFRM_TYPE, frame_type);
+ mtk_hdmi_write(hdmi, GRL_INFOFRM_VER, frame_ver);
+ mtk_hdmi_write(hdmi, GRL_INFOFRM_LNG, frame_len);
+
+ mtk_hdmi_write(hdmi, GRL_IFM_PORT, checksum);
+ for (i = 0; i < frame_len; i++)
+ mtk_hdmi_write(hdmi, GRL_IFM_PORT, frame_data[i]);
+
+ mtk_hdmi_set_bits(hdmi, ctrl_reg, ctrl_frame_en);
+}
+
+static void mtk_hdmi_hw_send_aud_packet(struct mtk_hdmi *hdmi, bool enable)
+{
+ mtk_hdmi_mask(hdmi, GRL_SHIFT_R2, enable ? 0 : AUDIO_PACKET_OFF,
+ AUDIO_PACKET_OFF);
+}
+
+static void mtk_hdmi_hw_config_sys(struct mtk_hdmi *hdmi)
+{
+ regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20,
+ HDMI_OUT_FIFO_EN | MHL_MODE_ON, 0);
+ usleep_range(2000, 4000);
+ regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20,
+ HDMI_OUT_FIFO_EN | MHL_MODE_ON, HDMI_OUT_FIFO_EN);
+}
+
+static void mtk_hdmi_hw_set_deep_color_mode(struct mtk_hdmi *hdmi)
+{
+ regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20,
+ DEEP_COLOR_MODE_MASK | DEEP_COLOR_EN,
+ COLOR_8BIT_MODE);
+}
+
+static void mtk_hdmi_hw_send_av_mute(struct mtk_hdmi *hdmi)
+{
+ mtk_hdmi_clear_bits(hdmi, GRL_CFG4, CTRL_AVMUTE);
+ usleep_range(2000, 4000);
+ mtk_hdmi_set_bits(hdmi, GRL_CFG4, CTRL_AVMUTE);
+}
+
+static void mtk_hdmi_hw_send_av_unmute(struct mtk_hdmi *hdmi)
+{
+ mtk_hdmi_mask(hdmi, GRL_CFG4, CFG4_AV_UNMUTE_EN,
+ CFG4_AV_UNMUTE_EN | CFG4_AV_UNMUTE_SET);
+ usleep_range(2000, 4000);
+ mtk_hdmi_mask(hdmi, GRL_CFG4, CFG4_AV_UNMUTE_SET,
+ CFG4_AV_UNMUTE_EN | CFG4_AV_UNMUTE_SET);
+}
+
+static void mtk_hdmi_hw_ncts_enable(struct mtk_hdmi *hdmi, bool on)
+{
+ mtk_hdmi_mask(hdmi, GRL_CTS_CTRL, on ? 0 : CTS_CTRL_SOFT,
+ CTS_CTRL_SOFT);
+}
+
+static void mtk_hdmi_hw_ncts_auto_write_enable(struct mtk_hdmi *hdmi,
+ bool enable)
+{
+ mtk_hdmi_mask(hdmi, GRL_CTS_CTRL, enable ? NCTS_WRI_ANYTIME : 0,
+ NCTS_WRI_ANYTIME);
+}
+
+static void mtk_hdmi_hw_msic_setting(struct mtk_hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ mtk_hdmi_clear_bits(hdmi, GRL_CFG4, CFG4_MHL_MODE);
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE &&
+ mode->clock == 74250 &&
+ mode->vdisplay == 1080)
+ mtk_hdmi_clear_bits(hdmi, GRL_CFG2, CFG2_MHL_DE_SEL);
+ else
+ mtk_hdmi_set_bits(hdmi, GRL_CFG2, CFG2_MHL_DE_SEL);
+}
+
+static void mtk_hdmi_hw_aud_set_channel_swap(struct mtk_hdmi *hdmi,
+ enum hdmi_aud_channel_swap_type swap)
+{
+ u8 swap_bit;
+
+ switch (swap) {
+ case HDMI_AUD_SWAP_LR:
+ swap_bit = LR_SWAP;
+ break;
+ case HDMI_AUD_SWAP_LFE_CC:
+ swap_bit = LFE_CC_SWAP;
+ break;
+ case HDMI_AUD_SWAP_LSRS:
+ swap_bit = LSRS_SWAP;
+ break;
+ case HDMI_AUD_SWAP_RLS_RRS:
+ swap_bit = RLS_RRS_SWAP;
+ break;
+ case HDMI_AUD_SWAP_LR_STATUS:
+ swap_bit = LR_STATUS_SWAP;
+ break;
+ default:
+ swap_bit = LFE_CC_SWAP;
+ break;
+ }
+ mtk_hdmi_mask(hdmi, GRL_CH_SWAP, swap_bit, 0xff);
+}
+
+static void mtk_hdmi_hw_aud_set_bit_num(struct mtk_hdmi *hdmi,
+ enum hdmi_audio_sample_size bit_num)
+{
+ u32 val;
+
+ switch (bit_num) {
+ case HDMI_AUDIO_SAMPLE_SIZE_16:
+ val = AOUT_16BIT;
+ break;
+ case HDMI_AUDIO_SAMPLE_SIZE_20:
+ val = AOUT_20BIT;
+ break;
+ case HDMI_AUDIO_SAMPLE_SIZE_24:
+ case HDMI_AUDIO_SAMPLE_SIZE_STREAM:
+ val = AOUT_24BIT;
+ break;
+ }
+
+ mtk_hdmi_mask(hdmi, GRL_AOUT_CFG, val, AOUT_BNUM_SEL_MASK);
+}
+
+static void mtk_hdmi_hw_aud_set_i2s_fmt(struct mtk_hdmi *hdmi,
+ enum hdmi_aud_i2s_fmt i2s_fmt)
+{
+ u32 val;
+
+ val = mtk_hdmi_read(hdmi, GRL_CFG0);
+ val &= ~(CFG0_W_LENGTH_MASK | CFG0_I2S_MODE_MASK);
+
+ switch (i2s_fmt) {
+ case HDMI_I2S_MODE_RJT_24BIT:
+ val |= CFG0_I2S_MODE_RTJ | CFG0_W_LENGTH_24BIT;
+ break;
+ case HDMI_I2S_MODE_RJT_16BIT:
+ val |= CFG0_I2S_MODE_RTJ | CFG0_W_LENGTH_16BIT;
+ break;
+ case HDMI_I2S_MODE_LJT_24BIT:
+ default:
+ val |= CFG0_I2S_MODE_LTJ | CFG0_W_LENGTH_24BIT;
+ break;
+ case HDMI_I2S_MODE_LJT_16BIT:
+ val |= CFG0_I2S_MODE_LTJ | CFG0_W_LENGTH_16BIT;
+ break;
+ case HDMI_I2S_MODE_I2S_24BIT:
+ val |= CFG0_I2S_MODE_I2S | CFG0_W_LENGTH_24BIT;
+ break;
+ case HDMI_I2S_MODE_I2S_16BIT:
+ val |= CFG0_I2S_MODE_I2S | CFG0_W_LENGTH_16BIT;
+ break;
+ }
+ mtk_hdmi_write(hdmi, GRL_CFG0, val);
+}
+
+static void mtk_hdmi_hw_audio_config(struct mtk_hdmi *hdmi, bool dst)
+{
+ const u8 mask = HIGH_BIT_RATE | DST_NORMAL_DOUBLE | SACD_DST | DSD_SEL;
+ u8 val;
+
+ /* Disable high bitrate, set DST packet normal/double */
+ mtk_hdmi_clear_bits(hdmi, GRL_AOUT_CFG, HIGH_BIT_RATE_PACKET_ALIGN);
+
+ if (dst)
+ val = DST_NORMAL_DOUBLE | SACD_DST;
+ else
+ val = 0;
+
+ mtk_hdmi_mask(hdmi, GRL_AUDIO_CFG, val, mask);
+}
+
+static void mtk_hdmi_hw_aud_set_i2s_chan_num(struct mtk_hdmi *hdmi,
+ enum hdmi_aud_channel_type channel_type,
+ u8 channel_count)
+{
+ unsigned int ch_switch;
+ u8 i2s_uv;
+
+ ch_switch = CH_SWITCH(7, 7) | CH_SWITCH(6, 6) |
+ CH_SWITCH(5, 5) | CH_SWITCH(4, 4) |
+ CH_SWITCH(3, 3) | CH_SWITCH(1, 2) |
+ CH_SWITCH(2, 1) | CH_SWITCH(0, 0);
+
+ if (channel_count == 2) {
+ i2s_uv = I2S_UV_CH_EN(0);
+ } else if (channel_count == 3 || channel_count == 4) {
+ if (channel_count == 4 &&
+ (channel_type == HDMI_AUD_CHAN_TYPE_3_0_LRS ||
+ channel_type == HDMI_AUD_CHAN_TYPE_4_0))
+ i2s_uv = I2S_UV_CH_EN(2) | I2S_UV_CH_EN(0);
+ else
+ i2s_uv = I2S_UV_CH_EN(3) | I2S_UV_CH_EN(2);
+ } else if (channel_count == 6 || channel_count == 5) {
+ if (channel_count == 6 &&
+ channel_type != HDMI_AUD_CHAN_TYPE_5_1 &&
+ channel_type != HDMI_AUD_CHAN_TYPE_4_1_CLRS) {
+ i2s_uv = I2S_UV_CH_EN(3) | I2S_UV_CH_EN(2) |
+ I2S_UV_CH_EN(1) | I2S_UV_CH_EN(0);
+ } else {
+ i2s_uv = I2S_UV_CH_EN(2) | I2S_UV_CH_EN(1) |
+ I2S_UV_CH_EN(0);
+ }
+ } else if (channel_count == 8 || channel_count == 7) {
+ i2s_uv = I2S_UV_CH_EN(3) | I2S_UV_CH_EN(2) |
+ I2S_UV_CH_EN(1) | I2S_UV_CH_EN(0);
+ } else {
+ i2s_uv = I2S_UV_CH_EN(0);
+ }
+
+ mtk_hdmi_write(hdmi, GRL_CH_SW0, ch_switch & 0xff);
+ mtk_hdmi_write(hdmi, GRL_CH_SW1, (ch_switch >> 8) & 0xff);
+ mtk_hdmi_write(hdmi, GRL_CH_SW2, (ch_switch >> 16) & 0xff);
+ mtk_hdmi_write(hdmi, GRL_I2S_UV, i2s_uv);
+}
+
+static void mtk_hdmi_hw_aud_set_input_type(struct mtk_hdmi *hdmi,
+ enum hdmi_aud_input_type input_type)
+{
+ u32 val;
+
+ val = mtk_hdmi_read(hdmi, GRL_CFG1);
+ if (input_type == HDMI_AUD_INPUT_I2S &&
+ (val & CFG1_SPDIF) == CFG1_SPDIF) {
+ val &= ~CFG1_SPDIF;
+ } else if (input_type == HDMI_AUD_INPUT_SPDIF &&
+ (val & CFG1_SPDIF) == 0) {
+ val |= CFG1_SPDIF;
+ }
+ mtk_hdmi_write(hdmi, GRL_CFG1, val);
+}
+
+static void mtk_hdmi_hw_aud_set_channel_status(struct mtk_hdmi *hdmi,
+ u8 *channel_status)
+{
+ int i;
+
+ for (i = 0; i < 5; i++) {
+ mtk_hdmi_write(hdmi, GRL_I2S_C_STA0 + i * 4, channel_status[i]);
+ mtk_hdmi_write(hdmi, GRL_L_STATUS_0 + i * 4, channel_status[i]);
+ mtk_hdmi_write(hdmi, GRL_R_STATUS_0 + i * 4, channel_status[i]);
+ }
+ for (; i < 24; i++) {
+ mtk_hdmi_write(hdmi, GRL_L_STATUS_0 + i * 4, 0);
+ mtk_hdmi_write(hdmi, GRL_R_STATUS_0 + i * 4, 0);
+ }
+}
+
+static void mtk_hdmi_hw_aud_src_reenable(struct mtk_hdmi *hdmi)
+{
+ u32 val;
+
+ val = mtk_hdmi_read(hdmi, GRL_MIX_CTRL);
+ if (val & MIX_CTRL_SRC_EN) {
+ val &= ~MIX_CTRL_SRC_EN;
+ mtk_hdmi_write(hdmi, GRL_MIX_CTRL, val);
+ usleep_range(255, 512);
+ val |= MIX_CTRL_SRC_EN;
+ mtk_hdmi_write(hdmi, GRL_MIX_CTRL, val);
+ }
+}
+
+static void mtk_hdmi_hw_aud_src_disable(struct mtk_hdmi *hdmi)
+{
+ u32 val;
+
+ val = mtk_hdmi_read(hdmi, GRL_MIX_CTRL);
+ val &= ~MIX_CTRL_SRC_EN;
+ mtk_hdmi_write(hdmi, GRL_MIX_CTRL, val);
+ mtk_hdmi_write(hdmi, GRL_SHIFT_L1, 0x00);
+}
+
+static void mtk_hdmi_hw_aud_set_mclk(struct mtk_hdmi *hdmi,
+ enum hdmi_aud_mclk mclk)
+{
+ u32 val;
+
+ val = mtk_hdmi_read(hdmi, GRL_CFG5);
+ val &= CFG5_CD_RATIO_MASK;
+
+ switch (mclk) {
+ case HDMI_AUD_MCLK_128FS:
+ val |= CFG5_FS128;
+ break;
+ case HDMI_AUD_MCLK_256FS:
+ val |= CFG5_FS256;
+ break;
+ case HDMI_AUD_MCLK_384FS:
+ val |= CFG5_FS384;
+ break;
+ case HDMI_AUD_MCLK_512FS:
+ val |= CFG5_FS512;
+ break;
+ case HDMI_AUD_MCLK_768FS:
+ val |= CFG5_FS768;
+ break;
+ default:
+ val |= CFG5_FS256;
+ break;
+ }
+ mtk_hdmi_write(hdmi, GRL_CFG5, val);
+}
+
+struct hdmi_acr_n {
+ unsigned int clock;
+ unsigned int n[3];
+};
+
+/* Recommended N values from HDMI specification, tables 7-1 to 7-3 */
+static const struct hdmi_acr_n hdmi_rec_n_table[] = {
+ /* Clock, N: 32kHz 44.1kHz 48kHz */
+ { 25175, { 4576, 7007, 6864 } },
+ { 74176, { 11648, 17836, 11648 } },
+ { 148352, { 11648, 8918, 5824 } },
+ { 296703, { 5824, 4459, 5824 } },
+ { 297000, { 3072, 4704, 5120 } },
+ { 0, { 4096, 6272, 6144 } }, /* all other TMDS clocks */
+};
+
+/**
+ * hdmi_recommended_n() - Return N value recommended by HDMI specification
+ * @freq: audio sample rate in Hz
+ * @clock: rounded TMDS clock in kHz
+ */
+static unsigned int hdmi_recommended_n(unsigned int freq, unsigned int clock)
+{
+ const struct hdmi_acr_n *recommended;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(hdmi_rec_n_table) - 1; i++) {
+ if (clock == hdmi_rec_n_table[i].clock)
+ break;
+ }
+ recommended = hdmi_rec_n_table + i;
+
+ switch (freq) {
+ case 32000:
+ return recommended->n[0];
+ case 44100:
+ return recommended->n[1];
+ case 48000:
+ return recommended->n[2];
+ case 88200:
+ return recommended->n[1] * 2;
+ case 96000:
+ return recommended->n[2] * 2;
+ case 176400:
+ return recommended->n[1] * 4;
+ case 192000:
+ return recommended->n[2] * 4;
+ default:
+ return (128 * freq) / 1000;
+ }
+}
+
+static unsigned int hdmi_mode_clock_to_hz(unsigned int clock)
+{
+ switch (clock) {
+ case 25175:
+ return 25174825; /* 25.2/1.001 MHz */
+ case 74176:
+ return 74175824; /* 74.25/1.001 MHz */
+ case 148352:
+ return 148351648; /* 148.5/1.001 MHz */
+ case 296703:
+ return 296703297; /* 297/1.001 MHz */
+ default:
+ return clock * 1000;
+ }
+}
+
+static unsigned int hdmi_expected_cts(unsigned int audio_sample_rate,
+ unsigned int tmds_clock, unsigned int n)
+{
+ return DIV_ROUND_CLOSEST_ULL((u64)hdmi_mode_clock_to_hz(tmds_clock) * n,
+ 128 * audio_sample_rate);
+}
+
+static void do_hdmi_hw_aud_set_ncts(struct mtk_hdmi *hdmi, unsigned int n,
+ unsigned int cts)
+{
+ unsigned char val[NCTS_BYTES];
+ int i;
+
+ mtk_hdmi_write(hdmi, GRL_NCTS, 0);
+ mtk_hdmi_write(hdmi, GRL_NCTS, 0);
+ mtk_hdmi_write(hdmi, GRL_NCTS, 0);
+ memset(val, 0, sizeof(val));
+
+ val[0] = (cts >> 24) & 0xff;
+ val[1] = (cts >> 16) & 0xff;
+ val[2] = (cts >> 8) & 0xff;
+ val[3] = cts & 0xff;
+
+ val[4] = (n >> 16) & 0xff;
+ val[5] = (n >> 8) & 0xff;
+ val[6] = n & 0xff;
+
+ for (i = 0; i < NCTS_BYTES; i++)
+ mtk_hdmi_write(hdmi, GRL_NCTS, val[i]);
+}
+
+static void mtk_hdmi_hw_aud_set_ncts(struct mtk_hdmi *hdmi,
+ unsigned int sample_rate,
+ unsigned int clock)
+{
+ unsigned int n, cts;
+
+ n = hdmi_recommended_n(sample_rate, clock);
+ cts = hdmi_expected_cts(sample_rate, clock, n);
+
+ dev_dbg(hdmi->dev, "%s: sample_rate=%u, clock=%d, cts=%u, n=%u\n",
+ __func__, sample_rate, clock, n, cts);
+
+ mtk_hdmi_mask(hdmi, DUMMY_304, AUDIO_I2S_NCTS_SEL_64,
+ AUDIO_I2S_NCTS_SEL);
+ do_hdmi_hw_aud_set_ncts(hdmi, n, cts);
+}
+
+static u8 mtk_hdmi_aud_get_chnl_count(enum hdmi_aud_channel_type channel_type)
+{
+ switch (channel_type) {
+ case HDMI_AUD_CHAN_TYPE_1_0:
+ case HDMI_AUD_CHAN_TYPE_1_1:
+ case HDMI_AUD_CHAN_TYPE_2_0:
+ return 2;
+ case HDMI_AUD_CHAN_TYPE_2_1:
+ case HDMI_AUD_CHAN_TYPE_3_0:
+ return 3;
+ case HDMI_AUD_CHAN_TYPE_3_1:
+ case HDMI_AUD_CHAN_TYPE_4_0:
+ case HDMI_AUD_CHAN_TYPE_3_0_LRS:
+ return 4;
+ case HDMI_AUD_CHAN_TYPE_4_1:
+ case HDMI_AUD_CHAN_TYPE_5_0:
+ case HDMI_AUD_CHAN_TYPE_3_1_LRS:
+ case HDMI_AUD_CHAN_TYPE_4_0_CLRS:
+ return 5;
+ case HDMI_AUD_CHAN_TYPE_5_1:
+ case HDMI_AUD_CHAN_TYPE_6_0:
+ case HDMI_AUD_CHAN_TYPE_4_1_CLRS:
+ case HDMI_AUD_CHAN_TYPE_6_0_CS:
+ case HDMI_AUD_CHAN_TYPE_6_0_CH:
+ case HDMI_AUD_CHAN_TYPE_6_0_OH:
+ case HDMI_AUD_CHAN_TYPE_6_0_CHR:
+ return 6;
+ case HDMI_AUD_CHAN_TYPE_6_1:
+ case HDMI_AUD_CHAN_TYPE_6_1_CS:
+ case HDMI_AUD_CHAN_TYPE_6_1_CH:
+ case HDMI_AUD_CHAN_TYPE_6_1_OH:
+ case HDMI_AUD_CHAN_TYPE_6_1_CHR:
+ case HDMI_AUD_CHAN_TYPE_7_0:
+ case HDMI_AUD_CHAN_TYPE_7_0_LH_RH:
+ case HDMI_AUD_CHAN_TYPE_7_0_LSR_RSR:
+ case HDMI_AUD_CHAN_TYPE_7_0_LC_RC:
+ case HDMI_AUD_CHAN_TYPE_7_0_LW_RW:
+ case HDMI_AUD_CHAN_TYPE_7_0_LSD_RSD:
+ case HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS:
+ case HDMI_AUD_CHAN_TYPE_7_0_LHS_RHS:
+ case HDMI_AUD_CHAN_TYPE_7_0_CS_CH:
+ case HDMI_AUD_CHAN_TYPE_7_0_CS_OH:
+ case HDMI_AUD_CHAN_TYPE_7_0_CS_CHR:
+ case HDMI_AUD_CHAN_TYPE_7_0_CH_OH:
+ case HDMI_AUD_CHAN_TYPE_7_0_CH_CHR:
+ case HDMI_AUD_CHAN_TYPE_7_0_OH_CHR:
+ case HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS_LSR_RSR:
+ case HDMI_AUD_CHAN_TYPE_8_0_LH_RH_CS:
+ return 7;
+ case HDMI_AUD_CHAN_TYPE_7_1:
+ case HDMI_AUD_CHAN_TYPE_7_1_LH_RH:
+ case HDMI_AUD_CHAN_TYPE_7_1_LSR_RSR:
+ case HDMI_AUD_CHAN_TYPE_7_1_LC_RC:
+ case HDMI_AUD_CHAN_TYPE_7_1_LW_RW:
+ case HDMI_AUD_CHAN_TYPE_7_1_LSD_RSD:
+ case HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS:
+ case HDMI_AUD_CHAN_TYPE_7_1_LHS_RHS:
+ case HDMI_AUD_CHAN_TYPE_7_1_CS_CH:
+ case HDMI_AUD_CHAN_TYPE_7_1_CS_OH:
+ case HDMI_AUD_CHAN_TYPE_7_1_CS_CHR:
+ case HDMI_AUD_CHAN_TYPE_7_1_CH_OH:
+ case HDMI_AUD_CHAN_TYPE_7_1_CH_CHR:
+ case HDMI_AUD_CHAN_TYPE_7_1_OH_CHR:
+ case HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS_LSR_RSR:
+ return 8;
+ default:
+ return 2;
+ }
+}
+
+static int mtk_hdmi_video_change_vpll(struct mtk_hdmi *hdmi, u32 clock)
+{
+ unsigned long rate;
+ int ret;
+
+ /* The DPI driver already should have set TVDPLL to the correct rate */
+ ret = clk_set_rate(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL], clock);
+ if (ret) {
+ dev_err(hdmi->dev, "Failed to set PLL to %u Hz: %d\n", clock,
+ ret);
+ return ret;
+ }
+
+ rate = clk_get_rate(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]);
+
+ if (DIV_ROUND_CLOSEST(rate, 1000) != DIV_ROUND_CLOSEST(clock, 1000))
+ dev_warn(hdmi->dev, "Want PLL %u Hz, got %lu Hz\n", clock,
+ rate);
+ else
+ dev_dbg(hdmi->dev, "Want PLL %u Hz, got %lu Hz\n", clock, rate);
+
+ mtk_hdmi_hw_config_sys(hdmi);
+ mtk_hdmi_hw_set_deep_color_mode(hdmi);
+ return 0;
+}
+
+static void mtk_hdmi_video_set_display_mode(struct mtk_hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ mtk_hdmi_hw_reset(hdmi);
+ mtk_hdmi_hw_enable_notice(hdmi, true);
+ mtk_hdmi_hw_write_int_mask(hdmi, 0xff);
+ mtk_hdmi_hw_enable_dvi_mode(hdmi, hdmi->dvi_mode);
+ mtk_hdmi_hw_ncts_auto_write_enable(hdmi, true);
+
+ mtk_hdmi_hw_msic_setting(hdmi, mode);
+}
+
+static int mtk_hdmi_aud_enable_packet(struct mtk_hdmi *hdmi, bool enable)
+{
+ mtk_hdmi_hw_send_aud_packet(hdmi, enable);
+ return 0;
+}
+
+static int mtk_hdmi_aud_on_off_hw_ncts(struct mtk_hdmi *hdmi, bool on)
+{
+ mtk_hdmi_hw_ncts_enable(hdmi, on);
+ return 0;
+}
+
+static int mtk_hdmi_aud_set_input(struct mtk_hdmi *hdmi)
+{
+ enum hdmi_aud_channel_type chan_type;
+ u8 chan_count;
+ bool dst;
+
+ mtk_hdmi_hw_aud_set_channel_swap(hdmi, HDMI_AUD_SWAP_LFE_CC);
+ mtk_hdmi_set_bits(hdmi, GRL_MIX_CTRL, MIX_CTRL_FLAT);
+
+ if (hdmi->aud_param.aud_input_type == HDMI_AUD_INPUT_SPDIF &&
+ hdmi->aud_param.aud_codec == HDMI_AUDIO_CODING_TYPE_DST) {
+ mtk_hdmi_hw_aud_set_bit_num(hdmi, HDMI_AUDIO_SAMPLE_SIZE_24);
+ } else if (hdmi->aud_param.aud_i2s_fmt == HDMI_I2S_MODE_LJT_24BIT) {
+ hdmi->aud_param.aud_i2s_fmt = HDMI_I2S_MODE_LJT_16BIT;
+ }
+
+ mtk_hdmi_hw_aud_set_i2s_fmt(hdmi, hdmi->aud_param.aud_i2s_fmt);
+ mtk_hdmi_hw_aud_set_bit_num(hdmi, HDMI_AUDIO_SAMPLE_SIZE_24);
+
+ dst = ((hdmi->aud_param.aud_input_type == HDMI_AUD_INPUT_SPDIF) &&
+ (hdmi->aud_param.aud_codec == HDMI_AUDIO_CODING_TYPE_DST));
+ mtk_hdmi_hw_audio_config(hdmi, dst);
+
+ if (hdmi->aud_param.aud_input_type == HDMI_AUD_INPUT_SPDIF)
+ chan_type = HDMI_AUD_CHAN_TYPE_2_0;
+ else
+ chan_type = hdmi->aud_param.aud_input_chan_type;
+ chan_count = mtk_hdmi_aud_get_chnl_count(chan_type);
+ mtk_hdmi_hw_aud_set_i2s_chan_num(hdmi, chan_type, chan_count);
+ mtk_hdmi_hw_aud_set_input_type(hdmi, hdmi->aud_param.aud_input_type);
+
+ return 0;
+}
+
+static int mtk_hdmi_aud_set_src(struct mtk_hdmi *hdmi,
+ struct drm_display_mode *display_mode)
+{
+ unsigned int sample_rate = hdmi->aud_param.codec_params.sample_rate;
+
+ mtk_hdmi_aud_on_off_hw_ncts(hdmi, false);
+ mtk_hdmi_hw_aud_src_disable(hdmi);
+ mtk_hdmi_clear_bits(hdmi, GRL_CFG2, CFG2_ACLK_INV);
+
+ if (hdmi->aud_param.aud_input_type == HDMI_AUD_INPUT_I2S) {
+ switch (sample_rate) {
+ case 32000:
+ case 44100:
+ case 48000:
+ case 88200:
+ case 96000:
+ break;
+ default:
+ return -EINVAL;
+ }
+ mtk_hdmi_hw_aud_set_mclk(hdmi, hdmi->aud_param.aud_mclk);
+ } else {
+ switch (sample_rate) {
+ case 32000:
+ case 44100:
+ case 48000:
+ break;
+ default:
+ return -EINVAL;
+ }
+ mtk_hdmi_hw_aud_set_mclk(hdmi, HDMI_AUD_MCLK_128FS);
+ }
+
+ mtk_hdmi_hw_aud_set_ncts(hdmi, sample_rate, display_mode->clock);
+
+ mtk_hdmi_hw_aud_src_reenable(hdmi);
+ return 0;
+}
+
+static int mtk_hdmi_aud_output_config(struct mtk_hdmi *hdmi,
+ struct drm_display_mode *display_mode)
+{
+ mtk_hdmi_hw_aud_mute(hdmi);
+ mtk_hdmi_aud_enable_packet(hdmi, false);
+
+ mtk_hdmi_aud_set_input(hdmi);
+ mtk_hdmi_aud_set_src(hdmi, display_mode);
+ mtk_hdmi_hw_aud_set_channel_status(hdmi,
+ hdmi->aud_param.codec_params.iec.status);
+
+ usleep_range(50, 100);
+
+ mtk_hdmi_aud_on_off_hw_ncts(hdmi, true);
+ mtk_hdmi_aud_enable_packet(hdmi, true);
+ mtk_hdmi_hw_aud_unmute(hdmi);
+ return 0;
+}
+
+static int mtk_hdmi_setup_avi_infoframe(struct mtk_hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ struct hdmi_avi_infoframe frame;
+ u8 buffer[17];
+ ssize_t err;
+
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+ if (err < 0) {
+ dev_err(hdmi->dev,
+ "Failed to get AVI infoframe from mode: %zd\n", err);
+ return err;
+ }
+
+ err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ dev_err(hdmi->dev, "Failed to pack AVI infoframe: %zd\n", err);
+ return err;
+ }
+
+ mtk_hdmi_hw_send_info_frame(hdmi, buffer, sizeof(buffer));
+ return 0;
+}
+
+static int mtk_hdmi_setup_spd_infoframe(struct mtk_hdmi *hdmi,
+ const char *vendor,
+ const char *product)
+{
+ struct hdmi_spd_infoframe frame;
+ u8 buffer[29];
+ ssize_t err;
+
+ err = hdmi_spd_infoframe_init(&frame, vendor, product);
+ if (err < 0) {
+ dev_err(hdmi->dev, "Failed to initialize SPD infoframe: %zd\n",
+ err);
+ return err;
+ }
+
+ err = hdmi_spd_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ dev_err(hdmi->dev, "Failed to pack SDP infoframe: %zd\n", err);
+ return err;
+ }
+
+ mtk_hdmi_hw_send_info_frame(hdmi, buffer, sizeof(buffer));
+ return 0;
+}
+
+static int mtk_hdmi_setup_audio_infoframe(struct mtk_hdmi *hdmi)
+{
+ struct hdmi_audio_infoframe frame;
+ u8 buffer[14];
+ ssize_t err;
+
+ err = hdmi_audio_infoframe_init(&frame);
+ if (err < 0) {
+ dev_err(hdmi->dev, "Failed to setup audio infoframe: %zd\n",
+ err);
+ return err;
+ }
+
+ frame.coding_type = HDMI_AUDIO_CODING_TYPE_STREAM;
+ frame.sample_frequency = HDMI_AUDIO_SAMPLE_FREQUENCY_STREAM;
+ frame.sample_size = HDMI_AUDIO_SAMPLE_SIZE_STREAM;
+ frame.channels = mtk_hdmi_aud_get_chnl_count(
+ hdmi->aud_param.aud_input_chan_type);
+
+ err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ dev_err(hdmi->dev, "Failed to pack audio infoframe: %zd\n",
+ err);
+ return err;
+ }
+
+ mtk_hdmi_hw_send_info_frame(hdmi, buffer, sizeof(buffer));
+ return 0;
+}
+
+static int mtk_hdmi_setup_vendor_specific_infoframe(struct mtk_hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ struct hdmi_vendor_infoframe frame;
+ u8 buffer[10];
+ ssize_t err;
+
+ err = drm_hdmi_vendor_infoframe_from_display_mode(&frame, mode);
+ if (err) {
+ dev_err(hdmi->dev,
+ "Failed to get vendor infoframe from mode: %zd\n", err);
+ return err;
+ }
+
+ err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err) {
+ dev_err(hdmi->dev, "Failed to pack vendor infoframe: %zd\n",
+ err);
+ return err;
+ }
+
+ mtk_hdmi_hw_send_info_frame(hdmi, buffer, sizeof(buffer));
+ return 0;
+}
+
+static int mtk_hdmi_output_init(struct mtk_hdmi *hdmi)
+{
+ struct hdmi_audio_param *aud_param = &hdmi->aud_param;
+
+ hdmi->csp = HDMI_COLORSPACE_RGB;
+ aud_param->aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
+ aud_param->aud_sampe_size = HDMI_AUDIO_SAMPLE_SIZE_16;
+ aud_param->aud_input_type = HDMI_AUD_INPUT_I2S;
+ aud_param->aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT;
+ aud_param->aud_mclk = HDMI_AUD_MCLK_128FS;
+ aud_param->aud_input_chan_type = HDMI_AUD_CHAN_TYPE_2_0;
+
+ return 0;
+}
+
+void mtk_hdmi_audio_enable(struct mtk_hdmi *hdmi)
+{
+ mtk_hdmi_aud_enable_packet(hdmi, true);
+ hdmi->audio_enable = true;
+}
+
+void mtk_hdmi_audio_disable(struct mtk_hdmi *hdmi)
+{
+ mtk_hdmi_aud_enable_packet(hdmi, false);
+ hdmi->audio_enable = false;
+}
+
+int mtk_hdmi_audio_set_param(struct mtk_hdmi *hdmi,
+ struct hdmi_audio_param *param)
+{
+ if (!hdmi->audio_enable) {
+ dev_err(hdmi->dev, "hdmi audio is in disable state!\n");
+ return -EINVAL;
+ }
+ dev_dbg(hdmi->dev, "codec:%d, input:%d, channel:%d, fs:%d\n",
+ param->aud_codec, param->aud_input_type,
+ param->aud_input_chan_type, param->codec_params.sample_rate);
+ memcpy(&hdmi->aud_param, param, sizeof(*param));
+ return mtk_hdmi_aud_output_config(hdmi, &hdmi->mode);
+}
+
+static int mtk_hdmi_output_set_display_mode(struct mtk_hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ int ret;
+
+ mtk_hdmi_hw_vid_black(hdmi, true);
+ mtk_hdmi_hw_aud_mute(hdmi);
+ mtk_hdmi_hw_send_av_mute(hdmi);
+ phy_power_off(hdmi->phy);
+
+ ret = mtk_hdmi_video_change_vpll(hdmi,
+ mode->clock * 1000);
+ if (ret) {
+ dev_err(hdmi->dev, "Failed to set vpll: %d\n", ret);
+ return ret;
+ }
+ mtk_hdmi_video_set_display_mode(hdmi, mode);
+
+ phy_power_on(hdmi->phy);
+ mtk_hdmi_aud_output_config(hdmi, mode);
+
+ mtk_hdmi_setup_audio_infoframe(hdmi);
+ mtk_hdmi_setup_avi_infoframe(hdmi, mode);
+ mtk_hdmi_setup_spd_infoframe(hdmi, "mediatek", "On-chip HDMI");
+ if (mode->flags & DRM_MODE_FLAG_3D_MASK)
+ mtk_hdmi_setup_vendor_specific_infoframe(hdmi, mode);
+
+ mtk_hdmi_hw_vid_black(hdmi, false);
+ mtk_hdmi_hw_aud_unmute(hdmi);
+ mtk_hdmi_hw_send_av_unmute(hdmi);
+
+ return 0;
+}
+
+static const char * const mtk_hdmi_clk_names[MTK_HDMI_CLK_COUNT] = {
+ [MTK_HDMI_CLK_HDMI_PIXEL] = "pixel",
+ [MTK_HDMI_CLK_HDMI_PLL] = "pll",
+ [MTK_HDMI_CLK_AUD_BCLK] = "bclk",
+ [MTK_HDMI_CLK_AUD_SPDIF] = "spdif",
+};
+
+static int mtk_hdmi_get_all_clk(struct mtk_hdmi *hdmi,
+ struct device_node *np)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mtk_hdmi_clk_names); i++) {
+ hdmi->clk[i] = of_clk_get_by_name(np,
+ mtk_hdmi_clk_names[i]);
+ if (IS_ERR(hdmi->clk[i]))
+ return PTR_ERR(hdmi->clk[i]);
+ }
+ return 0;
+}
+
+static int mtk_hdmi_clk_enable_audio(struct mtk_hdmi *hdmi)
+{
+ int ret;
+
+ ret = clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_AUD_BCLK]);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_AUD_SPDIF]);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_AUD_BCLK]);
+ return ret;
+}
+
+static void mtk_hdmi_clk_disable_audio(struct mtk_hdmi *hdmi)
+{
+ clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_AUD_BCLK]);
+ clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_AUD_SPDIF]);
+}
+
+static enum drm_connector_status hdmi_conn_detect(struct drm_connector *conn,
+ bool force)
+{
+ struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
+
+ return mtk_cec_hpd_high(hdmi->cec_dev) ?
+ connector_status_connected : connector_status_disconnected;
+}
+
+static void hdmi_conn_destroy(struct drm_connector *conn)
+{
+ struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
+
+ mtk_cec_set_hpd_event(hdmi->cec_dev, NULL, NULL);
+
+ drm_connector_cleanup(conn);
+}
+
+static int mtk_hdmi_conn_get_modes(struct drm_connector *conn)
+{
+ struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
+ struct edid *edid;
+ int ret;
+
+ if (!hdmi->ddc_adpt)
+ return -ENODEV;
+
+ edid = drm_get_edid(conn, hdmi->ddc_adpt);
+ if (!edid)
+ return -ENODEV;
+
+ hdmi->dvi_mode = !drm_detect_monitor_audio(edid);
+
+ drm_mode_connector_update_edid_property(conn, edid);
+
+ ret = drm_add_edid_modes(conn, edid);
+ drm_edid_to_eld(conn, edid);
+ kfree(edid);
+ return ret;
+}
+
+static int mtk_hdmi_conn_mode_valid(struct drm_connector *conn,
+ struct drm_display_mode *mode)
+{
+ struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
+
+ dev_dbg(hdmi->dev, "xres=%d, yres=%d, refresh=%d, intl=%d clock=%d\n",
+ mode->hdisplay, mode->vdisplay, mode->vrefresh,
+ !!(mode->flags & DRM_MODE_FLAG_INTERLACE), mode->clock * 1000);
+
+ if (hdmi->bridge.next) {
+ struct drm_display_mode adjusted_mode;
+
+ drm_mode_copy(&adjusted_mode, mode);
+ if (!drm_bridge_mode_fixup(hdmi->bridge.next, mode,
+ &adjusted_mode))
+ return MODE_BAD;
+ }
+
+ if (mode->clock < 27000)
+ return MODE_CLOCK_LOW;
+ if (mode->clock > 297000)
+ return MODE_CLOCK_HIGH;
+
+ return drm_mode_validate_size(mode, 0x1fff, 0x1fff);
+}
+
+static struct drm_encoder *mtk_hdmi_conn_best_enc(struct drm_connector *conn)
+{
+ struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
+
+ return hdmi->bridge.encoder;
+}
+
+static const struct drm_connector_funcs mtk_hdmi_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .detect = hdmi_conn_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = hdmi_conn_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_connector_helper_funcs
+ mtk_hdmi_connector_helper_funcs = {
+ .get_modes = mtk_hdmi_conn_get_modes,
+ .mode_valid = mtk_hdmi_conn_mode_valid,
+ .best_encoder = mtk_hdmi_conn_best_enc,
+};
+
+static void mtk_hdmi_hpd_event(bool hpd, struct device *dev)
+{
+ struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
+
+ if (hdmi && hdmi->bridge.encoder && hdmi->bridge.encoder->dev)
+ drm_helper_hpd_irq_event(hdmi->bridge.encoder->dev);
+}
+
+/*
+ * Bridge callbacks
+ */
+
+static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge)
+{
+ struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
+ int ret;
+
+ ret = drm_connector_init(bridge->encoder->dev, &hdmi->conn,
+ &mtk_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
+ if (ret) {
+ dev_err(hdmi->dev, "Failed to initialize connector: %d\n", ret);
+ return ret;
+ }
+ drm_connector_helper_add(&hdmi->conn, &mtk_hdmi_connector_helper_funcs);
+
+ hdmi->conn.polled = DRM_CONNECTOR_POLL_HPD;
+ hdmi->conn.interlace_allowed = true;
+ hdmi->conn.doublescan_allowed = false;
+
+ ret = drm_mode_connector_attach_encoder(&hdmi->conn,
+ bridge->encoder);
+ if (ret) {
+ dev_err(hdmi->dev,
+ "Failed to attach connector to encoder: %d\n", ret);
+ return ret;
+ }
+
+ if (bridge->next) {
+ bridge->next->encoder = bridge->encoder;
+ ret = drm_bridge_attach(bridge->encoder->dev, bridge->next);
+ if (ret) {
+ dev_err(hdmi->dev,
+ "Failed to attach external bridge: %d\n", ret);
+ return ret;
+ }
+ }
+
+ mtk_cec_set_hpd_event(hdmi->cec_dev, mtk_hdmi_hpd_event, hdmi->dev);
+
+ return 0;
+}
+
+static bool mtk_hdmi_bridge_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void mtk_hdmi_bridge_disable(struct drm_bridge *bridge)
+{
+ struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
+
+ if (!hdmi->enabled)
+ return;
+
+ phy_power_off(hdmi->phy);
+ clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_HDMI_PIXEL]);
+ clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]);
+
+ hdmi->enabled = false;
+}
+
+static void mtk_hdmi_bridge_post_disable(struct drm_bridge *bridge)
+{
+ struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
+
+ if (!hdmi->powered)
+ return;
+
+ mtk_hdmi_hw_1p4_version_enable(hdmi, true);
+ mtk_hdmi_hw_make_reg_writable(hdmi, false);
+
+ hdmi->powered = false;
+}
+
+static void mtk_hdmi_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
+
+ dev_dbg(hdmi->dev, "cur info: name:%s, hdisplay:%d\n",
+ adjusted_mode->name, adjusted_mode->hdisplay);
+ dev_dbg(hdmi->dev, "hsync_start:%d,hsync_end:%d, htotal:%d",
+ adjusted_mode->hsync_start, adjusted_mode->hsync_end,
+ adjusted_mode->htotal);
+ dev_dbg(hdmi->dev, "hskew:%d, vdisplay:%d\n",
+ adjusted_mode->hskew, adjusted_mode->vdisplay);
+ dev_dbg(hdmi->dev, "vsync_start:%d, vsync_end:%d, vtotal:%d",
+ adjusted_mode->vsync_start, adjusted_mode->vsync_end,
+ adjusted_mode->vtotal);
+ dev_dbg(hdmi->dev, "vscan:%d, flag:%d\n",
+ adjusted_mode->vscan, adjusted_mode->flags);
+
+ drm_mode_copy(&hdmi->mode, adjusted_mode);
+}
+
+static void mtk_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
+{
+ struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
+
+ mtk_hdmi_hw_make_reg_writable(hdmi, true);
+ mtk_hdmi_hw_1p4_version_enable(hdmi, true);
+
+ hdmi->powered = true;
+}
+
+static void mtk_hdmi_bridge_enable(struct drm_bridge *bridge)
+{
+ struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
+
+ mtk_hdmi_output_set_display_mode(hdmi, &hdmi->mode);
+ clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]);
+ clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PIXEL]);
+ phy_power_on(hdmi->phy);
+
+ hdmi->enabled = true;
+}
+
+static const struct drm_bridge_funcs mtk_hdmi_bridge_funcs = {
+ .attach = mtk_hdmi_bridge_attach,
+ .mode_fixup = mtk_hdmi_bridge_mode_fixup,
+ .disable = mtk_hdmi_bridge_disable,
+ .post_disable = mtk_hdmi_bridge_post_disable,
+ .mode_set = mtk_hdmi_bridge_mode_set,
+ .pre_enable = mtk_hdmi_bridge_pre_enable,
+ .enable = mtk_hdmi_bridge_enable,
+};
+
+static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *cec_np, *port, *ep, *remote, *i2c_np;
+ struct platform_device *cec_pdev;
+ struct regmap *regmap;
+ struct resource *mem;
+ int ret;
+
+ ret = mtk_hdmi_get_all_clk(hdmi, np);
+ if (ret) {
+ dev_err(dev, "Failed to get clocks: %d\n", ret);
+ return ret;
+ }
+
+ /* The CEC module handles HDMI hotplug detection */
+ cec_np = of_find_compatible_node(np->parent, NULL,
+ "mediatek,mt8173-cec");
+ if (!cec_np) {
+ dev_err(dev, "Failed to find CEC node\n");
+ return -EINVAL;
+ }
+
+ cec_pdev = of_find_device_by_node(cec_np);
+ if (!cec_pdev) {
+ dev_err(hdmi->dev, "Waiting for CEC device %s\n",
+ cec_np->full_name);
+ return -EPROBE_DEFER;
+ }
+ hdmi->cec_dev = &cec_pdev->dev;
+
+ /*
+ * The mediatek,syscon-hdmi property contains a phandle link to the
+ * MMSYS_CONFIG device and the register offset of the HDMI_SYS_CFG
+ * registers it contains.
+ */
+ regmap = syscon_regmap_lookup_by_phandle(np, "mediatek,syscon-hdmi");
+ ret = of_property_read_u32_index(np, "mediatek,syscon-hdmi", 1,
+ &hdmi->sys_offset);
+ if (IS_ERR(regmap))
+ ret = PTR_ERR(regmap);
+ if (ret) {
+ ret = PTR_ERR(regmap);
+ dev_err(dev,
+ "Failed to get system configuration registers: %d\n",
+ ret);
+ return ret;
+ }
+ hdmi->sys_regmap = regmap;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hdmi->regs = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(hdmi->regs))
+ return PTR_ERR(hdmi->regs);
+
+ port = of_graph_get_port_by_id(np, 1);
+ if (!port) {
+ dev_err(dev, "Missing output port node\n");
+ return -EINVAL;
+ }
+
+ ep = of_get_child_by_name(port, "endpoint");
+ if (!ep) {
+ dev_err(dev, "Missing endpoint node in port %s\n",
+ port->full_name);
+ of_node_put(port);
+ return -EINVAL;
+ }
+ of_node_put(port);
+
+ remote = of_graph_get_remote_port_parent(ep);
+ if (!remote) {
+ dev_err(dev, "Missing connector/bridge node for endpoint %s\n",
+ ep->full_name);
+ of_node_put(ep);
+ return -EINVAL;
+ }
+ of_node_put(ep);
+
+ if (!of_device_is_compatible(remote, "hdmi-connector")) {
+ hdmi->bridge.next = of_drm_find_bridge(remote);
+ if (!hdmi->bridge.next) {
+ dev_err(dev, "Waiting for external bridge\n");
+ of_node_put(remote);
+ return -EPROBE_DEFER;
+ }
+ }
+
+ i2c_np = of_parse_phandle(remote, "ddc-i2c-bus", 0);
+ if (!i2c_np) {
+ dev_err(dev, "Failed to find ddc-i2c-bus node in %s\n",
+ remote->full_name);
+ of_node_put(remote);
+ return -EINVAL;
+ }
+ of_node_put(remote);
+
+ hdmi->ddc_adpt = of_find_i2c_adapter_by_node(i2c_np);
+ if (!hdmi->ddc_adpt) {
+ dev_err(dev, "Failed to get ddc i2c adapter by node\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * HDMI audio codec callbacks
+ */
+
+static int mtk_hdmi_audio_hw_params(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
+{
+ struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
+ struct hdmi_audio_param hdmi_params;
+ unsigned int chan = params->cea.channels;
+
+ dev_dbg(hdmi->dev, "%s: %u Hz, %d bit, %d channels\n", __func__,
+ params->sample_rate, params->sample_width, chan);
+
+ if (!hdmi->bridge.encoder)
+ return -ENODEV;
+
+ switch (chan) {
+ case 2:
+ hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_2_0;
+ break;
+ case 4:
+ hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_4_0;
+ break;
+ case 6:
+ hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_5_1;
+ break;
+ case 8:
+ hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_7_1;
+ break;
+ default:
+ dev_err(hdmi->dev, "channel[%d] not supported!\n", chan);
+ return -EINVAL;
+ }
+
+ switch (params->sample_rate) {
+ case 32000:
+ case 44100:
+ case 48000:
+ case 88200:
+ case 96000:
+ case 176400:
+ case 192000:
+ break;
+ default:
+ dev_err(hdmi->dev, "rate[%d] not supported!\n",
+ params->sample_rate);
+ return -EINVAL;
+ }
+
+ switch (daifmt->fmt) {
+ case HDMI_I2S:
+ hdmi_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
+ hdmi_params.aud_sampe_size = HDMI_AUDIO_SAMPLE_SIZE_16;
+ hdmi_params.aud_input_type = HDMI_AUD_INPUT_I2S;
+ hdmi_params.aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT;
+ hdmi_params.aud_mclk = HDMI_AUD_MCLK_128FS;
+ break;
+ default:
+ dev_err(hdmi->dev, "%s: Invalid DAI format %d\n", __func__,
+ daifmt->fmt);
+ return -EINVAL;
+ }
+
+ memcpy(&hdmi_params.codec_params, params,
+ sizeof(hdmi_params.codec_params));
+
+ mtk_hdmi_audio_set_param(hdmi, &hdmi_params);
+
+ return 0;
+}
+
+static int mtk_hdmi_audio_startup(struct device *dev, void *data)
+{
+ struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ mtk_hdmi_audio_enable(hdmi);
+
+ return 0;
+}
+
+static void mtk_hdmi_audio_shutdown(struct device *dev, void *data)
+{
+ struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ mtk_hdmi_audio_disable(hdmi);
+}
+
+int mtk_hdmi_audio_digital_mute(struct device *dev, void *data, bool enable)
+{
+ struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "%s(%d)\n", __func__, enable);
+
+ if (enable)
+ mtk_hdmi_hw_aud_mute(hdmi);
+ else
+ mtk_hdmi_hw_aud_unmute(hdmi);
+
+ return 0;
+}
+
+static int mtk_hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf, size_t len)
+{
+ struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ memcpy(buf, hdmi->conn.eld, min(sizeof(hdmi->conn.eld), len));
+
+ return 0;
+}
+
+static const struct hdmi_codec_ops mtk_hdmi_audio_codec_ops = {
+ .hw_params = mtk_hdmi_audio_hw_params,
+ .audio_startup = mtk_hdmi_audio_startup,
+ .audio_shutdown = mtk_hdmi_audio_shutdown,
+ .digital_mute = mtk_hdmi_audio_digital_mute,
+ .get_eld = mtk_hdmi_audio_get_eld,
+};
+
+static void mtk_hdmi_register_audio_driver(struct device *dev)
+{
+ struct hdmi_codec_pdata codec_data = {
+ .ops = &mtk_hdmi_audio_codec_ops,
+ .max_i2s_channels = 2,
+ .i2s = 1,
+ };
+ struct platform_device *pdev;
+
+ pdev = platform_device_register_data(dev, HDMI_CODEC_DRV_NAME,
+ PLATFORM_DEVID_AUTO, &codec_data,
+ sizeof(codec_data));
+ if (IS_ERR(pdev))
+ return;
+
+ DRM_INFO("%s driver bound to HDMI\n", HDMI_CODEC_DRV_NAME);
+}
+
+static int mtk_drm_hdmi_probe(struct platform_device *pdev)
+{
+ struct mtk_hdmi *hdmi;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
+ if (!hdmi)
+ return -ENOMEM;
+
+ hdmi->dev = dev;
+
+ ret = mtk_hdmi_dt_parse_pdata(hdmi, pdev);
+ if (ret)
+ return ret;
+
+ hdmi->phy = devm_phy_get(dev, "hdmi");
+ if (IS_ERR(hdmi->phy)) {
+ ret = PTR_ERR(hdmi->phy);
+ dev_err(dev, "Failed to get HDMI PHY: %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, hdmi);
+
+ ret = mtk_hdmi_output_init(hdmi);
+ if (ret) {
+ dev_err(dev, "Failed to initialize hdmi output\n");
+ return ret;
+ }
+
+ mtk_hdmi_register_audio_driver(dev);
+
+ hdmi->bridge.funcs = &mtk_hdmi_bridge_funcs;
+ hdmi->bridge.of_node = pdev->dev.of_node;
+ ret = drm_bridge_add(&hdmi->bridge);
+ if (ret) {
+ dev_err(dev, "failed to add bridge, ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = mtk_hdmi_clk_enable_audio(hdmi);
+ if (ret) {
+ dev_err(dev, "Failed to enable audio clocks: %d\n", ret);
+ goto err_bridge_remove;
+ }
+
+ dev_dbg(dev, "mediatek hdmi probe success\n");
+ return 0;
+
+err_bridge_remove:
+ drm_bridge_remove(&hdmi->bridge);
+ return ret;
+}
+
+static int mtk_drm_hdmi_remove(struct platform_device *pdev)
+{
+ struct mtk_hdmi *hdmi = platform_get_drvdata(pdev);
+
+ drm_bridge_remove(&hdmi->bridge);
+ mtk_hdmi_clk_disable_audio(hdmi);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mtk_hdmi_suspend(struct device *dev)
+{
+ struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
+
+ mtk_hdmi_clk_disable_audio(hdmi);
+ dev_dbg(dev, "hdmi suspend success!\n");
+ return 0;
+}
+
+static int mtk_hdmi_resume(struct device *dev)
+{
+ struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
+ int ret = 0;
+
+ ret = mtk_hdmi_clk_enable_audio(hdmi);
+ if (ret) {
+ dev_err(dev, "hdmi resume failed!\n");
+ return ret;
+ }
+
+ dev_dbg(dev, "hdmi resume success!\n");
+ return 0;
+}
+#endif
+static SIMPLE_DEV_PM_OPS(mtk_hdmi_pm_ops,
+ mtk_hdmi_suspend, mtk_hdmi_resume);
+
+static const struct of_device_id mtk_drm_hdmi_of_ids[] = {
+ { .compatible = "mediatek,mt8173-hdmi", },
+ {}
+};
+
+static struct platform_driver mtk_hdmi_driver = {
+ .probe = mtk_drm_hdmi_probe,
+ .remove = mtk_drm_hdmi_remove,
+ .driver = {
+ .name = "mediatek-drm-hdmi",
+ .of_match_table = mtk_drm_hdmi_of_ids,
+ .pm = &mtk_hdmi_pm_ops,
+ },
+};
+
+static struct platform_driver * const mtk_hdmi_drivers[] = {
+ &mtk_hdmi_phy_driver,
+ &mtk_hdmi_ddc_driver,
+ &mtk_cec_driver,
+ &mtk_hdmi_driver,
+};
+
+static int __init mtk_hdmitx_init(void)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mtk_hdmi_drivers); i++) {
+ ret = platform_driver_register(mtk_hdmi_drivers[i]);
+ if (ret < 0) {
+ pr_err("Failed to register %s driver: %d\n",
+ mtk_hdmi_drivers[i]->driver.name, ret);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ while (--i >= 0)
+ platform_driver_unregister(mtk_hdmi_drivers[i]);
+
+ return ret;
+}
+
+static void __exit mtk_hdmitx_exit(void)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(mtk_hdmi_drivers) - 1; i >= 0; i--)
+ platform_driver_unregister(mtk_hdmi_drivers[i]);
+}
+
+module_init(mtk_hdmitx_init);
+module_exit(mtk_hdmitx_exit);
+
+MODULE_AUTHOR("Jie Qiu <jie.qiu@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek HDMI Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.h b/drivers/gpu/drm/mediatek/mtk_hdmi.h
new file mode 100644
index 000000000..6371b3de1
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Jie Qiu <jie.qiu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MTK_HDMI_CTRL_H
+#define _MTK_HDMI_CTRL_H
+
+struct platform_driver;
+
+extern struct platform_driver mtk_cec_driver;
+extern struct platform_driver mtk_hdmi_ddc_driver;
+extern struct platform_driver mtk_hdmi_phy_driver;
+
+#endif /* _MTK_HDMI_CTRL_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
new file mode 100644
index 000000000..33c9e1bdb
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
@@ -0,0 +1,358 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Jie Qiu <jie.qiu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+
+#define SIF1_CLOK (288)
+#define DDC_DDCMCTL0 (0x0)
+#define DDCM_ODRAIN BIT(31)
+#define DDCM_CLK_DIV_OFFSET (16)
+#define DDCM_CLK_DIV_MASK (0xfff << 16)
+#define DDCM_CS_STATUS BIT(4)
+#define DDCM_SCL_STATE BIT(3)
+#define DDCM_SDA_STATE BIT(2)
+#define DDCM_SM0EN BIT(1)
+#define DDCM_SCL_STRECH BIT(0)
+#define DDC_DDCMCTL1 (0x4)
+#define DDCM_ACK_OFFSET (16)
+#define DDCM_ACK_MASK (0xff << 16)
+#define DDCM_PGLEN_OFFSET (8)
+#define DDCM_PGLEN_MASK (0x7 << 8)
+#define DDCM_SIF_MODE_OFFSET (4)
+#define DDCM_SIF_MODE_MASK (0x7 << 4)
+#define DDCM_START (0x1)
+#define DDCM_WRITE_DATA (0x2)
+#define DDCM_STOP (0x3)
+#define DDCM_READ_DATA_NO_ACK (0x4)
+#define DDCM_READ_DATA_ACK (0x5)
+#define DDCM_TRI BIT(0)
+#define DDC_DDCMD0 (0x8)
+#define DDCM_DATA3 (0xff << 24)
+#define DDCM_DATA2 (0xff << 16)
+#define DDCM_DATA1 (0xff << 8)
+#define DDCM_DATA0 (0xff << 0)
+#define DDC_DDCMD1 (0xc)
+#define DDCM_DATA7 (0xff << 24)
+#define DDCM_DATA6 (0xff << 16)
+#define DDCM_DATA5 (0xff << 8)
+#define DDCM_DATA4 (0xff << 0)
+
+struct mtk_hdmi_ddc {
+ struct i2c_adapter adap;
+ struct clk *clk;
+ void __iomem *regs;
+};
+
+static inline void sif_set_bit(struct mtk_hdmi_ddc *ddc, unsigned int offset,
+ unsigned int val)
+{
+ writel(readl(ddc->regs + offset) | val, ddc->regs + offset);
+}
+
+static inline void sif_clr_bit(struct mtk_hdmi_ddc *ddc, unsigned int offset,
+ unsigned int val)
+{
+ writel(readl(ddc->regs + offset) & ~val, ddc->regs + offset);
+}
+
+static inline bool sif_bit_is_set(struct mtk_hdmi_ddc *ddc, unsigned int offset,
+ unsigned int val)
+{
+ return (readl(ddc->regs + offset) & val) == val;
+}
+
+static inline void sif_write_mask(struct mtk_hdmi_ddc *ddc, unsigned int offset,
+ unsigned int mask, unsigned int shift,
+ unsigned int val)
+{
+ unsigned int tmp;
+
+ tmp = readl(ddc->regs + offset);
+ tmp &= ~mask;
+ tmp |= (val << shift) & mask;
+ writel(tmp, ddc->regs + offset);
+}
+
+static inline unsigned int sif_read_mask(struct mtk_hdmi_ddc *ddc,
+ unsigned int offset, unsigned int mask,
+ unsigned int shift)
+{
+ return (readl(ddc->regs + offset) & mask) >> shift;
+}
+
+static void ddcm_trigger_mode(struct mtk_hdmi_ddc *ddc, int mode)
+{
+ u32 val;
+
+ sif_write_mask(ddc, DDC_DDCMCTL1, DDCM_SIF_MODE_MASK,
+ DDCM_SIF_MODE_OFFSET, mode);
+ sif_set_bit(ddc, DDC_DDCMCTL1, DDCM_TRI);
+ readl_poll_timeout(ddc->regs + DDC_DDCMCTL1, val,
+ (val & DDCM_TRI) != DDCM_TRI, 4, 20000);
+}
+
+static int mtk_hdmi_ddc_read_msg(struct mtk_hdmi_ddc *ddc, struct i2c_msg *msg)
+{
+ struct device *dev = ddc->adap.dev.parent;
+ u32 remain_count, ack_count, ack_final, read_count, temp_count;
+ u32 index = 0;
+ u32 ack;
+ int i;
+
+ ddcm_trigger_mode(ddc, DDCM_START);
+ sif_write_mask(ddc, DDC_DDCMD0, 0xff, 0, (msg->addr << 1) | 0x01);
+ sif_write_mask(ddc, DDC_DDCMCTL1, DDCM_PGLEN_MASK, DDCM_PGLEN_OFFSET,
+ 0x00);
+ ddcm_trigger_mode(ddc, DDCM_WRITE_DATA);
+ ack = sif_read_mask(ddc, DDC_DDCMCTL1, DDCM_ACK_MASK, DDCM_ACK_OFFSET);
+ dev_dbg(dev, "ack = 0x%x\n", ack);
+ if (ack != 0x01) {
+ dev_err(dev, "i2c ack err!\n");
+ return -ENXIO;
+ }
+
+ remain_count = msg->len;
+ ack_count = (msg->len - 1) / 8;
+ ack_final = 0;
+
+ while (remain_count > 0) {
+ if (ack_count > 0) {
+ read_count = 8;
+ ack_final = 0;
+ ack_count--;
+ } else {
+ read_count = remain_count;
+ ack_final = 1;
+ }
+
+ sif_write_mask(ddc, DDC_DDCMCTL1, DDCM_PGLEN_MASK,
+ DDCM_PGLEN_OFFSET, read_count - 1);
+ ddcm_trigger_mode(ddc, (ack_final == 1) ?
+ DDCM_READ_DATA_NO_ACK :
+ DDCM_READ_DATA_ACK);
+
+ ack = sif_read_mask(ddc, DDC_DDCMCTL1, DDCM_ACK_MASK,
+ DDCM_ACK_OFFSET);
+ temp_count = 0;
+ while (((ack & (1 << temp_count)) != 0) && (temp_count < 8))
+ temp_count++;
+ if (((ack_final == 1) && (temp_count != (read_count - 1))) ||
+ ((ack_final == 0) && (temp_count != read_count))) {
+ dev_err(dev, "Address NACK! ACK(0x%x)\n", ack);
+ break;
+ }
+
+ for (i = read_count; i >= 1; i--) {
+ int shift;
+ int offset;
+
+ if (i > 4) {
+ offset = DDC_DDCMD1;
+ shift = (i - 5) * 8;
+ } else {
+ offset = DDC_DDCMD0;
+ shift = (i - 1) * 8;
+ }
+
+ msg->buf[index + i - 1] = sif_read_mask(ddc, offset,
+ 0xff << shift,
+ shift);
+ }
+
+ remain_count -= read_count;
+ index += read_count;
+ }
+
+ return 0;
+}
+
+static int mtk_hdmi_ddc_write_msg(struct mtk_hdmi_ddc *ddc, struct i2c_msg *msg)
+{
+ struct device *dev = ddc->adap.dev.parent;
+ u32 ack;
+
+ ddcm_trigger_mode(ddc, DDCM_START);
+ sif_write_mask(ddc, DDC_DDCMD0, DDCM_DATA0, 0, msg->addr << 1);
+ sif_write_mask(ddc, DDC_DDCMD0, DDCM_DATA1, 8, msg->buf[0]);
+ sif_write_mask(ddc, DDC_DDCMCTL1, DDCM_PGLEN_MASK, DDCM_PGLEN_OFFSET,
+ 0x1);
+ ddcm_trigger_mode(ddc, DDCM_WRITE_DATA);
+
+ ack = sif_read_mask(ddc, DDC_DDCMCTL1, DDCM_ACK_MASK, DDCM_ACK_OFFSET);
+ dev_dbg(dev, "ack = %d\n", ack);
+
+ if (ack != 0x03) {
+ dev_err(dev, "i2c ack err!\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mtk_hdmi_ddc_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs, int num)
+{
+ struct mtk_hdmi_ddc *ddc = adapter->algo_data;
+ struct device *dev = adapter->dev.parent;
+ int ret;
+ int i;
+
+ if (!ddc) {
+ dev_err(dev, "invalid arguments\n");
+ return -EINVAL;
+ }
+
+ sif_set_bit(ddc, DDC_DDCMCTL0, DDCM_SCL_STRECH);
+ sif_set_bit(ddc, DDC_DDCMCTL0, DDCM_SM0EN);
+ sif_clr_bit(ddc, DDC_DDCMCTL0, DDCM_ODRAIN);
+
+ if (sif_bit_is_set(ddc, DDC_DDCMCTL1, DDCM_TRI)) {
+ dev_err(dev, "ddc line is busy!\n");
+ return -EBUSY;
+ }
+
+ sif_write_mask(ddc, DDC_DDCMCTL0, DDCM_CLK_DIV_MASK,
+ DDCM_CLK_DIV_OFFSET, SIF1_CLOK);
+
+ for (i = 0; i < num; i++) {
+ struct i2c_msg *msg = &msgs[i];
+
+ dev_dbg(dev, "i2c msg, adr:0x%x, flags:%d, len :0x%x\n",
+ msg->addr, msg->flags, msg->len);
+
+ if (msg->flags & I2C_M_RD)
+ ret = mtk_hdmi_ddc_read_msg(ddc, msg);
+ else
+ ret = mtk_hdmi_ddc_write_msg(ddc, msg);
+ if (ret < 0)
+ goto xfer_end;
+ }
+
+ ddcm_trigger_mode(ddc, DDCM_STOP);
+
+ return i;
+
+xfer_end:
+ ddcm_trigger_mode(ddc, DDCM_STOP);
+ dev_err(dev, "ddc failed!\n");
+ return ret;
+}
+
+static u32 mtk_hdmi_ddc_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm mtk_hdmi_ddc_algorithm = {
+ .master_xfer = mtk_hdmi_ddc_xfer,
+ .functionality = mtk_hdmi_ddc_func,
+};
+
+static int mtk_hdmi_ddc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_hdmi_ddc *ddc;
+ struct resource *mem;
+ int ret;
+
+ ddc = devm_kzalloc(dev, sizeof(struct mtk_hdmi_ddc), GFP_KERNEL);
+ if (!ddc)
+ return -ENOMEM;
+
+ ddc->clk = devm_clk_get(dev, "ddc-i2c");
+ if (IS_ERR(ddc->clk)) {
+ dev_err(dev, "get ddc_clk failed: %p ,\n", ddc->clk);
+ return PTR_ERR(ddc->clk);
+ }
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ddc->regs = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(ddc->regs))
+ return PTR_ERR(ddc->regs);
+
+ ret = clk_prepare_enable(ddc->clk);
+ if (ret) {
+ dev_err(dev, "enable ddc clk failed!\n");
+ return ret;
+ }
+
+ strlcpy(ddc->adap.name, "mediatek-hdmi-ddc", sizeof(ddc->adap.name));
+ ddc->adap.owner = THIS_MODULE;
+ ddc->adap.class = I2C_CLASS_DDC;
+ ddc->adap.algo = &mtk_hdmi_ddc_algorithm;
+ ddc->adap.retries = 3;
+ ddc->adap.dev.of_node = dev->of_node;
+ ddc->adap.algo_data = ddc;
+ ddc->adap.dev.parent = &pdev->dev;
+
+ ret = i2c_add_adapter(&ddc->adap);
+ if (ret < 0) {
+ dev_err(dev, "failed to add bus to i2c core\n");
+ goto err_clk_disable;
+ }
+
+ platform_set_drvdata(pdev, ddc);
+
+ dev_dbg(dev, "ddc->adap: %p\n", &ddc->adap);
+ dev_dbg(dev, "ddc->clk: %p\n", ddc->clk);
+ dev_dbg(dev, "physical adr: %pa, end: %pa\n", &mem->start,
+ &mem->end);
+
+ return 0;
+
+err_clk_disable:
+ clk_disable_unprepare(ddc->clk);
+ return ret;
+}
+
+static int mtk_hdmi_ddc_remove(struct platform_device *pdev)
+{
+ struct mtk_hdmi_ddc *ddc = platform_get_drvdata(pdev);
+
+ i2c_del_adapter(&ddc->adap);
+ clk_disable_unprepare(ddc->clk);
+
+ return 0;
+}
+
+static const struct of_device_id mtk_hdmi_ddc_match[] = {
+ { .compatible = "mediatek,mt8173-hdmi-ddc", },
+ {},
+};
+
+struct platform_driver mtk_hdmi_ddc_driver = {
+ .probe = mtk_hdmi_ddc_probe,
+ .remove = mtk_hdmi_ddc_remove,
+ .driver = {
+ .name = "mediatek-hdmi-ddc",
+ .of_match_table = mtk_hdmi_ddc_match,
+ },
+};
+
+MODULE_AUTHOR("Jie Qiu <jie.qiu@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek HDMI DDC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_regs.h b/drivers/gpu/drm/mediatek/mtk_hdmi_regs.h
new file mode 100644
index 000000000..a5cb07d12
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi_regs.h
@@ -0,0 +1,238 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Jie Qiu <jie.qiu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MTK_HDMI_REGS_H
+#define _MTK_HDMI_REGS_H
+
+#define GRL_INT_MASK 0x18
+#define GRL_IFM_PORT 0x188
+#define GRL_CH_SWAP 0x198
+#define LR_SWAP BIT(0)
+#define LFE_CC_SWAP BIT(1)
+#define LSRS_SWAP BIT(2)
+#define RLS_RRS_SWAP BIT(3)
+#define LR_STATUS_SWAP BIT(4)
+#define GRL_I2S_C_STA0 0x140
+#define GRL_I2S_C_STA1 0x144
+#define GRL_I2S_C_STA2 0x148
+#define GRL_I2S_C_STA3 0x14C
+#define GRL_I2S_C_STA4 0x150
+#define GRL_I2S_UV 0x154
+#define I2S_UV_V BIT(0)
+#define I2S_UV_U BIT(1)
+#define I2S_UV_CH_EN_MASK 0x3c
+#define I2S_UV_CH_EN(x) BIT((x) + 2)
+#define I2S_UV_TMDS_DEBUG BIT(6)
+#define I2S_UV_NORMAL_INFO_INV BIT(7)
+#define GRL_ACP_ISRC_CTRL 0x158
+#define VS_EN BIT(0)
+#define ACP_EN BIT(1)
+#define ISRC1_EN BIT(2)
+#define ISRC2_EN BIT(3)
+#define GAMUT_EN BIT(4)
+#define GRL_CTS_CTRL 0x160
+#define CTS_CTRL_SOFT BIT(0)
+#define GRL_INT 0x14
+#define INT_MDI BIT(0)
+#define INT_HDCP BIT(1)
+#define INT_FIFO_O BIT(2)
+#define INT_FIFO_U BIT(3)
+#define INT_IFM_ERR BIT(4)
+#define INT_INF_DONE BIT(5)
+#define INT_NCTS_DONE BIT(6)
+#define INT_CTRL_PKT_DONE BIT(7)
+#define GRL_INT_MASK 0x18
+#define GRL_CTRL 0x1C
+#define CTRL_GEN_EN BIT(2)
+#define CTRL_SPD_EN BIT(3)
+#define CTRL_MPEG_EN BIT(4)
+#define CTRL_AUDIO_EN BIT(5)
+#define CTRL_AVI_EN BIT(6)
+#define CTRL_AVMUTE BIT(7)
+#define GRL_STATUS 0x20
+#define STATUS_HTPLG BIT(0)
+#define STATUS_PORD BIT(1)
+#define GRL_DIVN 0x170
+#define NCTS_WRI_ANYTIME BIT(6)
+#define GRL_AUDIO_CFG 0x17C
+#define AUDIO_ZERO BIT(0)
+#define HIGH_BIT_RATE BIT(1)
+#define SACD_DST BIT(2)
+#define DST_NORMAL_DOUBLE BIT(3)
+#define DSD_INV BIT(4)
+#define LR_INV BIT(5)
+#define LR_MIX BIT(6)
+#define DSD_SEL BIT(7)
+#define GRL_NCTS 0x184
+#define GRL_CH_SW0 0x18C
+#define GRL_CH_SW1 0x190
+#define GRL_CH_SW2 0x194
+#define CH_SWITCH(from, to) ((from) << ((to) * 3))
+#define GRL_INFOFRM_VER 0x19C
+#define GRL_INFOFRM_TYPE 0x1A0
+#define GRL_INFOFRM_LNG 0x1A4
+#define GRL_MIX_CTRL 0x1B4
+#define MIX_CTRL_SRC_EN BIT(0)
+#define BYPASS_VOLUME BIT(1)
+#define MIX_CTRL_FLAT BIT(7)
+#define GRL_AOUT_CFG 0x1C4
+#define AOUT_BNUM_SEL_MASK 0x03
+#define AOUT_24BIT 0x00
+#define AOUT_20BIT 0x02
+#define AOUT_16BIT 0x03
+#define AOUT_FIFO_ADAP_CTRL BIT(6)
+#define AOUT_BURST_PREAMBLE_EN BIT(7)
+#define HIGH_BIT_RATE_PACKET_ALIGN (AOUT_BURST_PREAMBLE_EN | \
+ AOUT_FIFO_ADAP_CTRL)
+#define GRL_SHIFT_L1 0x1C0
+#define GRL_SHIFT_R2 0x1B0
+#define AUDIO_PACKET_OFF BIT(6)
+#define GRL_CFG0 0x24
+#define CFG0_I2S_MODE_MASK 0x3
+#define CFG0_I2S_MODE_RTJ 0x1
+#define CFG0_I2S_MODE_LTJ 0x0
+#define CFG0_I2S_MODE_I2S 0x2
+#define CFG0_W_LENGTH_MASK 0x30
+#define CFG0_W_LENGTH_24BIT 0x00
+#define CFG0_W_LENGTH_16BIT 0x10
+#define GRL_CFG1 0x28
+#define CFG1_EDG_SEL BIT(0)
+#define CFG1_SPDIF BIT(1)
+#define CFG1_DVI BIT(2)
+#define CFG1_HDCP_DEBUG BIT(3)
+#define GRL_CFG2 0x2c
+#define CFG2_MHL_DE_SEL BIT(3)
+#define CFG2_MHL_FAKE_DE_SEL BIT(4)
+#define CFG2_MHL_DATA_REMAP BIT(5)
+#define CFG2_NOTICE_EN BIT(6)
+#define CFG2_ACLK_INV BIT(7)
+#define GRL_CFG3 0x30
+#define CFG3_AES_KEY_INDEX_MASK 0x3f
+#define CFG3_CONTROL_PACKET_DELAY BIT(6)
+#define CFG3_KSV_LOAD_START BIT(7)
+#define GRL_CFG4 0x34
+#define CFG4_AES_KEY_LOAD BIT(4)
+#define CFG4_AV_UNMUTE_EN BIT(5)
+#define CFG4_AV_UNMUTE_SET BIT(6)
+#define CFG4_MHL_MODE BIT(7)
+#define GRL_CFG5 0x38
+#define CFG5_CD_RATIO_MASK 0x8F
+#define CFG5_FS128 (0x1 << 4)
+#define CFG5_FS256 (0x2 << 4)
+#define CFG5_FS384 (0x3 << 4)
+#define CFG5_FS512 (0x4 << 4)
+#define CFG5_FS768 (0x6 << 4)
+#define DUMMY_304 0x304
+#define CHMO_SEL (0x3 << 2)
+#define CHM1_SEL (0x3 << 4)
+#define CHM2_SEL (0x3 << 6)
+#define AUDIO_I2S_NCTS_SEL BIT(1)
+#define AUDIO_I2S_NCTS_SEL_64 (1 << 1)
+#define AUDIO_I2S_NCTS_SEL_128 (0 << 1)
+#define NEW_GCP_CTRL BIT(0)
+#define NEW_GCP_CTRL_MERGE BIT(0)
+#define GRL_L_STATUS_0 0x200
+#define GRL_L_STATUS_1 0x204
+#define GRL_L_STATUS_2 0x208
+#define GRL_L_STATUS_3 0x20c
+#define GRL_L_STATUS_4 0x210
+#define GRL_L_STATUS_5 0x214
+#define GRL_L_STATUS_6 0x218
+#define GRL_L_STATUS_7 0x21c
+#define GRL_L_STATUS_8 0x220
+#define GRL_L_STATUS_9 0x224
+#define GRL_L_STATUS_10 0x228
+#define GRL_L_STATUS_11 0x22c
+#define GRL_L_STATUS_12 0x230
+#define GRL_L_STATUS_13 0x234
+#define GRL_L_STATUS_14 0x238
+#define GRL_L_STATUS_15 0x23c
+#define GRL_L_STATUS_16 0x240
+#define GRL_L_STATUS_17 0x244
+#define GRL_L_STATUS_18 0x248
+#define GRL_L_STATUS_19 0x24c
+#define GRL_L_STATUS_20 0x250
+#define GRL_L_STATUS_21 0x254
+#define GRL_L_STATUS_22 0x258
+#define GRL_L_STATUS_23 0x25c
+#define GRL_R_STATUS_0 0x260
+#define GRL_R_STATUS_1 0x264
+#define GRL_R_STATUS_2 0x268
+#define GRL_R_STATUS_3 0x26c
+#define GRL_R_STATUS_4 0x270
+#define GRL_R_STATUS_5 0x274
+#define GRL_R_STATUS_6 0x278
+#define GRL_R_STATUS_7 0x27c
+#define GRL_R_STATUS_8 0x280
+#define GRL_R_STATUS_9 0x284
+#define GRL_R_STATUS_10 0x288
+#define GRL_R_STATUS_11 0x28c
+#define GRL_R_STATUS_12 0x290
+#define GRL_R_STATUS_13 0x294
+#define GRL_R_STATUS_14 0x298
+#define GRL_R_STATUS_15 0x29c
+#define GRL_R_STATUS_16 0x2a0
+#define GRL_R_STATUS_17 0x2a4
+#define GRL_R_STATUS_18 0x2a8
+#define GRL_R_STATUS_19 0x2ac
+#define GRL_R_STATUS_20 0x2b0
+#define GRL_R_STATUS_21 0x2b4
+#define GRL_R_STATUS_22 0x2b8
+#define GRL_R_STATUS_23 0x2bc
+#define GRL_ABIST_CTRL0 0x2D4
+#define GRL_ABIST_CTRL1 0x2D8
+#define ABIST_EN BIT(7)
+#define ABIST_DATA_FMT (0x7 << 0)
+#define VIDEO_CFG_0 0x380
+#define VIDEO_CFG_1 0x384
+#define VIDEO_CFG_2 0x388
+#define VIDEO_CFG_3 0x38c
+#define VIDEO_CFG_4 0x390
+#define VIDEO_SOURCE_SEL BIT(7)
+#define NORMAL_PATH (1 << 7)
+#define GEN_RGB (0 << 7)
+
+#define HDMI_SYS_CFG1C 0x000
+#define HDMI_ON BIT(0)
+#define HDMI_RST BIT(1)
+#define ANLG_ON BIT(2)
+#define CFG10_DVI BIT(3)
+#define HDMI_TST BIT(3)
+#define SYS_KEYMASK1 (0xff << 8)
+#define SYS_KEYMASK2 (0xff << 16)
+#define AUD_OUTSYNC_EN BIT(24)
+#define AUD_OUTSYNC_PRE_EN BIT(25)
+#define I2CM_ON BIT(26)
+#define E2PROM_TYPE_8BIT BIT(27)
+#define MCM_E2PROM_ON BIT(28)
+#define EXT_E2PROM_ON BIT(29)
+#define HTPLG_PIN_SEL_OFF BIT(30)
+#define AES_EFUSE_ENABLE BIT(31)
+#define HDMI_SYS_CFG20 0x004
+#define DEEP_COLOR_MODE_MASK (3 << 1)
+#define COLOR_8BIT_MODE (0 << 1)
+#define COLOR_10BIT_MODE (1 << 1)
+#define COLOR_12BIT_MODE (2 << 1)
+#define COLOR_16BIT_MODE (3 << 1)
+#define DEEP_COLOR_EN BIT(0)
+#define HDMI_AUDIO_TEST_SEL BIT(8)
+#define HDMI2P0_EN BIT(11)
+#define HDMI_OUT_FIFO_EN BIT(16)
+#define HDMI_OUT_FIFO_CLK_INV BIT(17)
+#define MHL_MODE_ON BIT(28)
+#define MHL_PP_MODE BIT(29)
+#define MHL_SYNC_AUTO_EN BIT(30)
+#define HDMI_PCLK_FREE_RUN BIT(31)
+
+#define MTK_SIP_SET_AUTHORIZED_SECURE_REG 0x82000001
+#endif
diff --git a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
index cf8f38d39..1c366f8cb 100644
--- a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
+++ b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
@@ -431,7 +431,7 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
phy_set_drvdata(phy, mipi_tx);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- if (IS_ERR(phy)) {
+ if (IS_ERR(phy_provider)) {
ret = PTR_ERR(phy_provider);
return ret;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
new file mode 100644
index 000000000..8a24754b4
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
@@ -0,0 +1,515 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Jie Qiu <jie.qiu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#define HDMI_CON0 0x00
+#define RG_HDMITX_PLL_EN BIT(31)
+#define RG_HDMITX_PLL_FBKDIV (0x7f << 24)
+#define PLL_FBKDIV_SHIFT 24
+#define RG_HDMITX_PLL_FBKSEL (0x3 << 22)
+#define PLL_FBKSEL_SHIFT 22
+#define RG_HDMITX_PLL_PREDIV (0x3 << 20)
+#define PREDIV_SHIFT 20
+#define RG_HDMITX_PLL_POSDIV (0x3 << 18)
+#define POSDIV_SHIFT 18
+#define RG_HDMITX_PLL_RST_DLY (0x3 << 16)
+#define RG_HDMITX_PLL_IR (0xf << 12)
+#define PLL_IR_SHIFT 12
+#define RG_HDMITX_PLL_IC (0xf << 8)
+#define PLL_IC_SHIFT 8
+#define RG_HDMITX_PLL_BP (0xf << 4)
+#define PLL_BP_SHIFT 4
+#define RG_HDMITX_PLL_BR (0x3 << 2)
+#define PLL_BR_SHIFT 2
+#define RG_HDMITX_PLL_BC (0x3 << 0)
+#define PLL_BC_SHIFT 0
+#define HDMI_CON1 0x04
+#define RG_HDMITX_PLL_DIVEN (0x7 << 29)
+#define PLL_DIVEN_SHIFT 29
+#define RG_HDMITX_PLL_AUTOK_EN BIT(28)
+#define RG_HDMITX_PLL_AUTOK_KF (0x3 << 26)
+#define RG_HDMITX_PLL_AUTOK_KS (0x3 << 24)
+#define RG_HDMITX_PLL_AUTOK_LOAD BIT(23)
+#define RG_HDMITX_PLL_BAND (0x3f << 16)
+#define RG_HDMITX_PLL_REF_SEL BIT(15)
+#define RG_HDMITX_PLL_BIAS_EN BIT(14)
+#define RG_HDMITX_PLL_BIAS_LPF_EN BIT(13)
+#define RG_HDMITX_PLL_TXDIV_EN BIT(12)
+#define RG_HDMITX_PLL_TXDIV (0x3 << 10)
+#define PLL_TXDIV_SHIFT 10
+#define RG_HDMITX_PLL_LVROD_EN BIT(9)
+#define RG_HDMITX_PLL_MONVC_EN BIT(8)
+#define RG_HDMITX_PLL_MONCK_EN BIT(7)
+#define RG_HDMITX_PLL_MONREF_EN BIT(6)
+#define RG_HDMITX_PLL_TST_EN BIT(5)
+#define RG_HDMITX_PLL_TST_CK_EN BIT(4)
+#define RG_HDMITX_PLL_TST_SEL (0xf << 0)
+#define HDMI_CON2 0x08
+#define RGS_HDMITX_PLL_AUTOK_BAND (0x7f << 8)
+#define RGS_HDMITX_PLL_AUTOK_FAIL BIT(1)
+#define RG_HDMITX_EN_TX_CKLDO BIT(0)
+#define HDMI_CON3 0x0c
+#define RG_HDMITX_SER_EN (0xf << 28)
+#define RG_HDMITX_PRD_EN (0xf << 24)
+#define RG_HDMITX_PRD_IMP_EN (0xf << 20)
+#define RG_HDMITX_DRV_EN (0xf << 16)
+#define RG_HDMITX_DRV_IMP_EN (0xf << 12)
+#define DRV_IMP_EN_SHIFT 12
+#define RG_HDMITX_MHLCK_FORCE BIT(10)
+#define RG_HDMITX_MHLCK_PPIX_EN BIT(9)
+#define RG_HDMITX_MHLCK_EN BIT(8)
+#define RG_HDMITX_SER_DIN_SEL (0xf << 4)
+#define RG_HDMITX_SER_5T1_BIST_EN BIT(3)
+#define RG_HDMITX_SER_BIST_TOG BIT(2)
+#define RG_HDMITX_SER_DIN_TOG BIT(1)
+#define RG_HDMITX_SER_CLKDIG_INV BIT(0)
+#define HDMI_CON4 0x10
+#define RG_HDMITX_PRD_IBIAS_CLK (0xf << 24)
+#define RG_HDMITX_PRD_IBIAS_D2 (0xf << 16)
+#define RG_HDMITX_PRD_IBIAS_D1 (0xf << 8)
+#define RG_HDMITX_PRD_IBIAS_D0 (0xf << 0)
+#define PRD_IBIAS_CLK_SHIFT 24
+#define PRD_IBIAS_D2_SHIFT 16
+#define PRD_IBIAS_D1_SHIFT 8
+#define PRD_IBIAS_D0_SHIFT 0
+#define HDMI_CON5 0x14
+#define RG_HDMITX_DRV_IBIAS_CLK (0x3f << 24)
+#define RG_HDMITX_DRV_IBIAS_D2 (0x3f << 16)
+#define RG_HDMITX_DRV_IBIAS_D1 (0x3f << 8)
+#define RG_HDMITX_DRV_IBIAS_D0 (0x3f << 0)
+#define DRV_IBIAS_CLK_SHIFT 24
+#define DRV_IBIAS_D2_SHIFT 16
+#define DRV_IBIAS_D1_SHIFT 8
+#define DRV_IBIAS_D0_SHIFT 0
+#define HDMI_CON6 0x18
+#define RG_HDMITX_DRV_IMP_CLK (0x3f << 24)
+#define RG_HDMITX_DRV_IMP_D2 (0x3f << 16)
+#define RG_HDMITX_DRV_IMP_D1 (0x3f << 8)
+#define RG_HDMITX_DRV_IMP_D0 (0x3f << 0)
+#define DRV_IMP_CLK_SHIFT 24
+#define DRV_IMP_D2_SHIFT 16
+#define DRV_IMP_D1_SHIFT 8
+#define DRV_IMP_D0_SHIFT 0
+#define HDMI_CON7 0x1c
+#define RG_HDMITX_MHLCK_DRV_IBIAS (0x1f << 27)
+#define RG_HDMITX_SER_DIN (0x3ff << 16)
+#define RG_HDMITX_CHLDC_TST (0xf << 12)
+#define RG_HDMITX_CHLCK_TST (0xf << 8)
+#define RG_HDMITX_RESERVE (0xff << 0)
+#define HDMI_CON8 0x20
+#define RGS_HDMITX_2T1_LEV (0xf << 16)
+#define RGS_HDMITX_2T1_EDG (0xf << 12)
+#define RGS_HDMITX_5T1_LEV (0xf << 8)
+#define RGS_HDMITX_5T1_EDG (0xf << 4)
+#define RGS_HDMITX_PLUG_TST BIT(0)
+
+struct mtk_hdmi_phy {
+ void __iomem *regs;
+ struct device *dev;
+ struct clk *pll;
+ struct clk_hw pll_hw;
+ unsigned long pll_rate;
+ u8 drv_imp_clk;
+ u8 drv_imp_d2;
+ u8 drv_imp_d1;
+ u8 drv_imp_d0;
+ u32 ibias;
+ u32 ibias_up;
+};
+
+static const u8 PREDIV[3][4] = {
+ {0x0, 0x0, 0x0, 0x0}, /* 27Mhz */
+ {0x1, 0x1, 0x1, 0x1}, /* 74Mhz */
+ {0x1, 0x1, 0x1, 0x1} /* 148Mhz */
+};
+
+static const u8 TXDIV[3][4] = {
+ {0x3, 0x3, 0x3, 0x2}, /* 27Mhz */
+ {0x2, 0x1, 0x1, 0x1}, /* 74Mhz */
+ {0x1, 0x0, 0x0, 0x0} /* 148Mhz */
+};
+
+static const u8 FBKSEL[3][4] = {
+ {0x1, 0x1, 0x1, 0x1}, /* 27Mhz */
+ {0x1, 0x0, 0x1, 0x1}, /* 74Mhz */
+ {0x1, 0x0, 0x1, 0x1} /* 148Mhz */
+};
+
+static const u8 FBKDIV[3][4] = {
+ {19, 24, 29, 19}, /* 27Mhz */
+ {19, 24, 14, 19}, /* 74Mhz */
+ {19, 24, 14, 19} /* 148Mhz */
+};
+
+static const u8 DIVEN[3][4] = {
+ {0x2, 0x1, 0x1, 0x2}, /* 27Mhz */
+ {0x2, 0x2, 0x2, 0x2}, /* 74Mhz */
+ {0x2, 0x2, 0x2, 0x2} /* 148Mhz */
+};
+
+static const u8 HTPLLBP[3][4] = {
+ {0xc, 0xc, 0x8, 0xc}, /* 27Mhz */
+ {0xc, 0xf, 0xf, 0xc}, /* 74Mhz */
+ {0xc, 0xf, 0xf, 0xc} /* 148Mhz */
+};
+
+static const u8 HTPLLBC[3][4] = {
+ {0x2, 0x3, 0x3, 0x2}, /* 27Mhz */
+ {0x2, 0x3, 0x3, 0x2}, /* 74Mhz */
+ {0x2, 0x3, 0x3, 0x2} /* 148Mhz */
+};
+
+static const u8 HTPLLBR[3][4] = {
+ {0x1, 0x1, 0x0, 0x1}, /* 27Mhz */
+ {0x1, 0x2, 0x2, 0x1}, /* 74Mhz */
+ {0x1, 0x2, 0x2, 0x1} /* 148Mhz */
+};
+
+static void mtk_hdmi_phy_clear_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
+ u32 bits)
+{
+ void __iomem *reg = hdmi_phy->regs + offset;
+ u32 tmp;
+
+ tmp = readl(reg);
+ tmp &= ~bits;
+ writel(tmp, reg);
+}
+
+static void mtk_hdmi_phy_set_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
+ u32 bits)
+{
+ void __iomem *reg = hdmi_phy->regs + offset;
+ u32 tmp;
+
+ tmp = readl(reg);
+ tmp |= bits;
+ writel(tmp, reg);
+}
+
+static void mtk_hdmi_phy_mask(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
+ u32 val, u32 mask)
+{
+ void __iomem *reg = hdmi_phy->regs + offset;
+ u32 tmp;
+
+ tmp = readl(reg);
+ tmp = (tmp & ~mask) | (val & mask);
+ writel(tmp, reg);
+}
+
+static inline struct mtk_hdmi_phy *to_mtk_hdmi_phy(struct clk_hw *hw)
+{
+ return container_of(hw, struct mtk_hdmi_phy, pll_hw);
+}
+
+static int mtk_hdmi_pll_prepare(struct clk_hw *hw)
+{
+ struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+
+ dev_dbg(hdmi_phy->dev, "%s\n", __func__);
+
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_AUTOK_EN);
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_POSDIV);
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, RG_HDMITX_MHLCK_EN);
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_EN);
+ usleep_range(100, 150);
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_EN);
+ usleep_range(100, 150);
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_LPF_EN);
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_TXDIV_EN);
+
+ return 0;
+}
+
+static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
+{
+ struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+
+ dev_dbg(hdmi_phy->dev, "%s\n", __func__);
+
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_TXDIV_EN);
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_LPF_EN);
+ usleep_range(100, 150);
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_EN);
+ usleep_range(100, 150);
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_EN);
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_POSDIV);
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_AUTOK_EN);
+ usleep_range(100, 150);
+}
+
+static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+ unsigned int pre_div;
+ unsigned int div;
+
+ dev_dbg(hdmi_phy->dev, "%s: %lu Hz, parent: %lu Hz\n", __func__,
+ rate, parent_rate);
+
+ if (rate <= 27000000) {
+ pre_div = 0;
+ div = 3;
+ } else if (rate <= 74250000) {
+ pre_div = 1;
+ div = 2;
+ } else {
+ pre_div = 1;
+ div = 1;
+ }
+
+ mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0,
+ (pre_div << PREDIV_SHIFT), RG_HDMITX_PLL_PREDIV);
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_POSDIV);
+ mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0,
+ (0x1 << PLL_IC_SHIFT) | (0x1 << PLL_IR_SHIFT),
+ RG_HDMITX_PLL_IC | RG_HDMITX_PLL_IR);
+ mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON1,
+ (div << PLL_TXDIV_SHIFT), RG_HDMITX_PLL_TXDIV);
+ mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0,
+ (0x1 << PLL_FBKSEL_SHIFT) | (19 << PLL_FBKDIV_SHIFT),
+ RG_HDMITX_PLL_FBKSEL | RG_HDMITX_PLL_FBKDIV);
+ mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON1,
+ (0x2 << PLL_DIVEN_SHIFT), RG_HDMITX_PLL_DIVEN);
+ mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0,
+ (0xc << PLL_BP_SHIFT) | (0x2 << PLL_BC_SHIFT) |
+ (0x1 << PLL_BR_SHIFT),
+ RG_HDMITX_PLL_BP | RG_HDMITX_PLL_BC |
+ RG_HDMITX_PLL_BR);
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, RG_HDMITX_PRD_IMP_EN);
+ mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON4,
+ (0x3 << PRD_IBIAS_CLK_SHIFT) |
+ (0x3 << PRD_IBIAS_D2_SHIFT) |
+ (0x3 << PRD_IBIAS_D1_SHIFT) |
+ (0x3 << PRD_IBIAS_D0_SHIFT),
+ RG_HDMITX_PRD_IBIAS_CLK |
+ RG_HDMITX_PRD_IBIAS_D2 |
+ RG_HDMITX_PRD_IBIAS_D1 |
+ RG_HDMITX_PRD_IBIAS_D0);
+ mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON3,
+ (0x0 << DRV_IMP_EN_SHIFT), RG_HDMITX_DRV_IMP_EN);
+ mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6,
+ (hdmi_phy->drv_imp_clk << DRV_IMP_CLK_SHIFT) |
+ (hdmi_phy->drv_imp_d2 << DRV_IMP_D2_SHIFT) |
+ (hdmi_phy->drv_imp_d1 << DRV_IMP_D1_SHIFT) |
+ (hdmi_phy->drv_imp_d0 << DRV_IMP_D0_SHIFT),
+ RG_HDMITX_DRV_IMP_CLK | RG_HDMITX_DRV_IMP_D2 |
+ RG_HDMITX_DRV_IMP_D1 | RG_HDMITX_DRV_IMP_D0);
+ mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON5,
+ (hdmi_phy->ibias << DRV_IBIAS_CLK_SHIFT) |
+ (hdmi_phy->ibias << DRV_IBIAS_D2_SHIFT) |
+ (hdmi_phy->ibias << DRV_IBIAS_D1_SHIFT) |
+ (hdmi_phy->ibias << DRV_IBIAS_D0_SHIFT),
+ RG_HDMITX_DRV_IBIAS_CLK | RG_HDMITX_DRV_IBIAS_D2 |
+ RG_HDMITX_DRV_IBIAS_D1 | RG_HDMITX_DRV_IBIAS_D0);
+ return 0;
+}
+
+static long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+
+ hdmi_phy->pll_rate = rate;
+ if (rate <= 74250000)
+ *parent_rate = rate;
+ else
+ *parent_rate = rate / 2;
+
+ return rate;
+}
+
+static unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+
+ return hdmi_phy->pll_rate;
+}
+
+static const struct clk_ops mtk_hdmi_pll_ops = {
+ .prepare = mtk_hdmi_pll_prepare,
+ .unprepare = mtk_hdmi_pll_unprepare,
+ .set_rate = mtk_hdmi_pll_set_rate,
+ .round_rate = mtk_hdmi_pll_round_rate,
+ .recalc_rate = mtk_hdmi_pll_recalc_rate,
+};
+
+static void mtk_hdmi_phy_enable_tmds(struct mtk_hdmi_phy *hdmi_phy)
+{
+ mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON3,
+ RG_HDMITX_SER_EN | RG_HDMITX_PRD_EN |
+ RG_HDMITX_DRV_EN);
+ usleep_range(100, 150);
+}
+
+static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy)
+{
+ mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3,
+ RG_HDMITX_DRV_EN | RG_HDMITX_PRD_EN |
+ RG_HDMITX_SER_EN);
+}
+
+static int mtk_hdmi_phy_power_on(struct phy *phy)
+{
+ struct mtk_hdmi_phy *hdmi_phy = phy_get_drvdata(phy);
+ int ret;
+
+ ret = clk_prepare_enable(hdmi_phy->pll);
+ if (ret < 0)
+ return ret;
+
+ mtk_hdmi_phy_enable_tmds(hdmi_phy);
+
+ return 0;
+}
+
+static int mtk_hdmi_phy_power_off(struct phy *phy)
+{
+ struct mtk_hdmi_phy *hdmi_phy = phy_get_drvdata(phy);
+
+ mtk_hdmi_phy_disable_tmds(hdmi_phy);
+ clk_disable_unprepare(hdmi_phy->pll);
+
+ return 0;
+}
+
+static const struct phy_ops mtk_hdmi_phy_ops = {
+ .power_on = mtk_hdmi_phy_power_on,
+ .power_off = mtk_hdmi_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int mtk_hdmi_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_hdmi_phy *hdmi_phy;
+ struct resource *mem;
+ struct clk *ref_clk;
+ const char *ref_clk_name;
+ struct clk_init_data clk_init = {
+ .ops = &mtk_hdmi_pll_ops,
+ .num_parents = 1,
+ .parent_names = (const char * const *)&ref_clk_name,
+ .flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE,
+ };
+ struct phy *phy;
+ struct phy_provider *phy_provider;
+ int ret;
+
+ hdmi_phy = devm_kzalloc(dev, sizeof(*hdmi_phy), GFP_KERNEL);
+ if (!hdmi_phy)
+ return -ENOMEM;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hdmi_phy->regs = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(hdmi_phy->regs)) {
+ ret = PTR_ERR(hdmi_phy->regs);
+ dev_err(dev, "Failed to get memory resource: %d\n", ret);
+ return ret;
+ }
+
+ ref_clk = devm_clk_get(dev, "pll_ref");
+ if (IS_ERR(ref_clk)) {
+ ret = PTR_ERR(ref_clk);
+ dev_err(&pdev->dev, "Failed to get PLL reference clock: %d\n",
+ ret);
+ return ret;
+ }
+ ref_clk_name = __clk_get_name(ref_clk);
+
+ ret = of_property_read_string(dev->of_node, "clock-output-names",
+ &clk_init.name);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read clock-output-names: %d\n", ret);
+ return ret;
+ }
+
+ hdmi_phy->pll_hw.init = &clk_init;
+ hdmi_phy->pll = devm_clk_register(dev, &hdmi_phy->pll_hw);
+ if (IS_ERR(hdmi_phy->pll)) {
+ ret = PTR_ERR(hdmi_phy->pll);
+ dev_err(dev, "Failed to register PLL: %d\n", ret);
+ return ret;
+ }
+
+ ret = of_property_read_u32(dev->of_node, "mediatek,ibias",
+ &hdmi_phy->ibias);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to get ibias: %d\n", ret);
+ return ret;
+ }
+
+ ret = of_property_read_u32(dev->of_node, "mediatek,ibias_up",
+ &hdmi_phy->ibias_up);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to get ibias up: %d\n", ret);
+ return ret;
+ }
+
+ dev_info(dev, "Using default TX DRV impedance: 4.2k/36\n");
+ hdmi_phy->drv_imp_clk = 0x30;
+ hdmi_phy->drv_imp_d2 = 0x30;
+ hdmi_phy->drv_imp_d1 = 0x30;
+ hdmi_phy->drv_imp_d0 = 0x30;
+
+ phy = devm_phy_create(dev, NULL, &mtk_hdmi_phy_ops);
+ if (IS_ERR(phy)) {
+ dev_err(dev, "Failed to create HDMI PHY\n");
+ return PTR_ERR(phy);
+ }
+ phy_set_drvdata(phy, hdmi_phy);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
+ hdmi_phy->dev = dev;
+ return of_clk_add_provider(dev->of_node, of_clk_src_simple_get,
+ hdmi_phy->pll);
+}
+
+static int mtk_hdmi_phy_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static const struct of_device_id mtk_hdmi_phy_match[] = {
+ { .compatible = "mediatek,mt8173-hdmi-phy", },
+ {},
+};
+
+struct platform_driver mtk_hdmi_phy_driver = {
+ .probe = mtk_hdmi_phy_probe,
+ .remove = mtk_hdmi_phy_remove,
+ .driver = {
+ .name = "mediatek-hdmi-phy",
+ .of_match_table = mtk_hdmi_phy_match,
+ },
+};
+
+MODULE_AUTHOR("Jie Qiu <jie.qiu@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek MT8173 HDMI PHY Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/mgag200/Kconfig b/drivers/gpu/drm/mgag200/Kconfig
index 3a1c5fbae..520e5e668 100644
--- a/drivers/gpu/drm/mgag200/Kconfig
+++ b/drivers/gpu/drm/mgag200/Kconfig
@@ -1,11 +1,7 @@
config DRM_MGAG200
tristate "Kernel modesetting driver for MGA G200 server engines"
depends on DRM && PCI
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_TTM
help
This is a KMS driver for the MGA G200 server chips, it
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index ebb470ff7..2b4b125ee 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -101,7 +101,7 @@ static struct drm_driver driver = {
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
- .gem_free_object = mgag200_gem_free_object,
+ .gem_free_object_unlocked = mgag200_gem_free_object,
.dumb_create = mgag200_dumb_create,
.dumb_map_offset = mgag200_dumb_mmap_offset,
.dumb_destroy = drm_gem_dumb_destroy,
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index 615cbb08b..13798b3e6 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -17,8 +17,8 @@
static void mga_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct mga_framebuffer *mga_fb = to_mga_framebuffer(fb);
- if (mga_fb->obj)
- drm_gem_object_unreference_unlocked(mga_fb->obj);
+
+ drm_gem_object_unreference_unlocked(mga_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(fb);
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index d347dca17..6b21cb27e 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1352,19 +1352,20 @@ static void mga_crtc_commit(struct drm_crtc *crtc)
* use this for 8-bit mode so can't perform smooth fades on deeper modes,
* but it's a requirement that we provide the function
*/
-static void mga_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t start, uint32_t size)
+static int mga_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t size)
{
struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
- int end = (start + size > MGAG200_LUT_SIZE) ? MGAG200_LUT_SIZE : start + size;
int i;
- for (i = start; i < end; i++) {
+ for (i = 0; i < size; i++) {
mga_crtc->lut_r[i] = red[i] >> 8;
mga_crtc->lut_g[i] = green[i] >> 8;
mga_crtc->lut_b[i] = blue[i] >> 8;
}
mga_crtc_load_lut(crtc);
+
+ return 0;
}
/* Simple cleanup function */
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 9d5083d0f..68268e55d 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -186,17 +186,6 @@ static void mgag200_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_r
{
}
-static int mgag200_bo_move(struct ttm_buffer_object *bo,
- bool evict, bool interruptible,
- bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
-{
- int r;
- r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
- return r;
-}
-
-
static void mgag200_ttm_backend_destroy(struct ttm_tt *tt)
{
ttm_tt_fini(tt);
@@ -241,7 +230,7 @@ struct ttm_bo_driver mgag200_bo_driver = {
.ttm_tt_unpopulate = mgag200_ttm_tt_unpopulate,
.init_mem_type = mgag200_bo_init_mem_type,
.evict_flags = mgag200_bo_evict_flags,
- .move = mgag200_bo_move,
+ .move = NULL,
.verify_access = mgag200_bo_verify_access,
.io_mem_reserve = &mgag200_ttm_io_mem_reserve,
.io_mem_free = &mgag200_ttm_io_mem_free,
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 167a4971f..7c7a0314a 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -10,6 +10,7 @@ config DRM_MSM
select SHMEM
select TMPFS
select QCOM_SCM
+ select SND_SOC_HDMI_CODEC if SND_SOC
default y
help
DRM/KMS driver for MSM/snapdragon.
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 60cb02624..4e2806cf7 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -35,6 +35,7 @@ msm-y := \
mdp/mdp5/mdp5_crtc.o \
mdp/mdp5/mdp5_encoder.o \
mdp/mdp5/mdp5_irq.o \
+ mdp/mdp5/mdp5_mdss.o \
mdp/mdp5/mdp5_kms.o \
mdp/mdp5/mdp5_plane.o \
mdp/mdp5/mdp5_smp.o \
@@ -45,6 +46,7 @@ msm-y := \
msm_fence.o \
msm_gem.o \
msm_gem_prime.o \
+ msm_gem_shrinker.o \
msm_gem_submit.o \
msm_gpu.o \
msm_iommu.o \
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 7e25f94af..6613bad06 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -139,7 +139,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct msm_drm_private *priv = gpu->dev->dev_private;
struct msm_ringbuffer *ring = gpu->rb;
- unsigned i, ibs = 0;
+ unsigned i;
for (i = 0; i < submit->nr_cmds; i++) {
switch (submit->cmd[i].type) {
@@ -155,18 +155,11 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
OUT_RING(ring, submit->cmd[i].iova);
OUT_RING(ring, submit->cmd[i].size);
- ibs++;
+ OUT_PKT2(ring);
break;
}
}
- /* on a320, at least, we seem to need to pad things out to an
- * even number of qwords to avoid issue w/ CP hanging on wrap-
- * around:
- */
- if (ibs % 2)
- OUT_PKT2(ring);
-
OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
OUT_RING(ring, submit->fence->seqno);
@@ -407,7 +400,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return ret;
}
- adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo);
+ adreno_gpu->memptrs = msm_gem_get_vaddr(adreno_gpu->memptrs_bo);
if (IS_ERR(adreno_gpu->memptrs)) {
dev_err(drm->dev, "could not vmap memptrs\n");
return -ENOMEM;
@@ -426,8 +419,12 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
void adreno_gpu_cleanup(struct adreno_gpu *gpu)
{
if (gpu->memptrs_bo) {
+ if (gpu->memptrs)
+ msm_gem_put_vaddr(gpu->memptrs_bo);
+
if (gpu->memptrs_iova)
msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
+
drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
}
release_firmware(gpu->pm4);
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index 6edcd6f57..ec572f838 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -29,7 +29,7 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
struct platform_device *phy_pdev;
struct device_node *phy_node;
- phy_node = of_parse_phandle(pdev->dev.of_node, "qcom,dsi-phy", 0);
+ phy_node = of_parse_phandle(pdev->dev.of_node, "phys", 0);
if (!phy_node) {
dev_err(&pdev->dev, "cannot find phy device\n");
return -ENXIO;
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index 93c1ee094..63436d8ee 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -29,6 +29,8 @@ static const struct msm_dsi_config apq8064_dsi_cfg = {
},
.bus_clk_names = dsi_v2_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_v2_bus_clk_names),
+ .io_start = { 0x4700000, 0x5800000 },
+ .num_dsi = 2,
};
static const char * const dsi_6g_bus_clk_names[] = {
@@ -48,6 +50,8 @@ static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = {
},
.bus_clk_names = dsi_6g_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names),
+ .io_start = { 0xfd922800, 0xfd922b00 },
+ .num_dsi = 2,
};
static const char * const dsi_8916_bus_clk_names[] = {
@@ -66,6 +70,8 @@ static const struct msm_dsi_config msm8916_dsi_cfg = {
},
.bus_clk_names = dsi_8916_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_8916_bus_clk_names),
+ .io_start = { 0x1a98000 },
+ .num_dsi = 1,
};
static const struct msm_dsi_config msm8994_dsi_cfg = {
@@ -84,6 +90,8 @@ static const struct msm_dsi_config msm8994_dsi_cfg = {
},
.bus_clk_names = dsi_6g_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names),
+ .io_start = { 0xfd998000, 0xfd9a0000 },
+ .num_dsi = 2,
};
static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
index a68c83674..eeacc3232 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -34,6 +34,8 @@ struct msm_dsi_config {
struct dsi_reg_config reg_cfg;
const char * const *bus_clk_names;
const int num_bus_clks;
+ const resource_size_t io_start[DSI_MAX];
+ const int num_dsi;
};
struct msm_dsi_cfg_handler {
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index a3e47ad83..f05ed0e1f 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -1066,7 +1066,7 @@ static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
}
if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
- data = msm_gem_vaddr(msm_host->tx_gem_obj);
+ data = msm_gem_get_vaddr(msm_host->tx_gem_obj);
if (IS_ERR(data)) {
ret = PTR_ERR(data);
pr_err("%s: get vaddr failed, %d\n", __func__, ret);
@@ -1094,6 +1094,9 @@ static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
if (packet.size < len)
memset(data + packet.size, 0xff, len - packet.size);
+ if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G)
+ msm_gem_put_vaddr(msm_host->tx_gem_obj);
+
return len;
}
@@ -1543,7 +1546,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
u32 lane_map[4];
int ret, i, len, num_lanes;
- prop = of_find_property(ep, "qcom,data-lane-map", &len);
+ prop = of_find_property(ep, "data-lanes", &len);
if (!prop) {
dev_dbg(dev, "failed to find data lane mapping\n");
return -EINVAL;
@@ -1558,7 +1561,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
msm_host->num_data_lanes = num_lanes;
- ret = of_property_read_u32_array(ep, "qcom,data-lane-map", lane_map,
+ ret = of_property_read_u32_array(ep, "data-lanes", lane_map,
num_lanes);
if (ret) {
dev_err(dev, "failed to read lane data\n");
@@ -1573,8 +1576,19 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
const int *swap = supported_data_lane_swaps[i];
int j;
+ /*
+ * the data-lanes array we get from DT has a logical->physical
+ * mapping. The "data lane swap" register field represents
+ * supported configurations in a physical->logical mapping.
+ * Translate the DT mapping to what we understand and find a
+ * configuration that works.
+ */
for (j = 0; j < num_lanes; j++) {
- if (swap[j] != lane_map[j])
+ if (lane_map[j] < 0 || lane_map[j] > 3)
+ dev_err(dev, "bad physical lane entry %u\n",
+ lane_map[j]);
+
+ if (swap[lane_map[j]] != j)
break;
}
@@ -1594,20 +1608,13 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
struct device_node *endpoint, *device_node;
int ret;
- ret = of_property_read_u32(np, "qcom,dsi-host-index", &msm_host->id);
- if (ret) {
- dev_err(dev, "%s: host index not specified, ret=%d\n",
- __func__, ret);
- return ret;
- }
-
/*
- * Get the first endpoint node. In our case, dsi has one output port
- * to which the panel is connected. Don't return an error if a port
- * isn't defined. It's possible that there is nothing connected to
- * the dsi output.
+ * Get the endpoint of the output port of the DSI host. In our case,
+ * this is mapped to port number with reg = 1. Don't return an error if
+ * the remote endpoint isn't defined. It's possible that there is
+ * nothing connected to the dsi output.
*/
- endpoint = of_graph_get_next_endpoint(np, NULL);
+ endpoint = of_graph_get_endpoint_by_regs(np, 1, -1);
if (!endpoint) {
dev_dbg(dev, "%s: no endpoint\n", __func__);
return 0;
@@ -1648,6 +1655,25 @@ err:
return ret;
}
+static int dsi_host_get_id(struct msm_dsi_host *msm_host)
+{
+ struct platform_device *pdev = msm_host->pdev;
+ const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
+ struct resource *res;
+ int i;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsi_ctrl");
+ if (!res)
+ return -EINVAL;
+
+ for (i = 0; i < cfg->num_dsi; i++) {
+ if (cfg->io_start[i] == res->start)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
int msm_dsi_host_init(struct msm_dsi *msm_dsi)
{
struct msm_dsi_host *msm_host = NULL;
@@ -1684,6 +1710,13 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
goto fail;
}
+ msm_host->id = dsi_host_get_id(msm_host);
+ if (msm_host->id < 0) {
+ ret = msm_host->id;
+ pr_err("%s: unable to identify DSI host index\n", __func__);
+ goto fail;
+ }
+
/* fixup base address by io offset */
msm_host->ctrl_base += msm_host->cfg_hnd->cfg->io_offset;
@@ -2245,9 +2278,9 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
}
msm_host->mode = drm_mode_duplicate(msm_host->dev, mode);
- if (IS_ERR(msm_host->mode)) {
+ if (!msm_host->mode) {
pr_err("%s: cannot duplicate mode\n", __func__);
- return PTR_ERR(msm_host->mode);
+ return -ENOMEM;
}
return 0;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index e2f42d8ea..f39386ed7 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -271,6 +271,30 @@ static const struct of_device_id dsi_phy_dt_match[] = {
{}
};
+/*
+ * Currently, we only support one SoC for each PHY type. When we have multiple
+ * SoCs for the same PHY, we can try to make the index searching a bit more
+ * clever.
+ */
+static int dsi_phy_get_id(struct msm_dsi_phy *phy)
+{
+ struct platform_device *pdev = phy->pdev;
+ const struct msm_dsi_phy_cfg *cfg = phy->cfg;
+ struct resource *res;
+ int i;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsi_phy");
+ if (!res)
+ return -EINVAL;
+
+ for (i = 0; i < cfg->num_dsi_phy; i++) {
+ if (cfg->io_start[i] == res->start)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
static int dsi_phy_driver_probe(struct platform_device *pdev)
{
struct msm_dsi_phy *phy;
@@ -289,10 +313,10 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
phy->cfg = match->data;
phy->pdev = pdev;
- ret = of_property_read_u32(dev->of_node,
- "qcom,dsi-phy-index", &phy->id);
- if (ret) {
- dev_err(dev, "%s: PHY index not specified, %d\n",
+ phy->id = dsi_phy_get_id(phy);
+ if (phy->id < 0) {
+ ret = phy->id;
+ dev_err(dev, "%s: couldn't identify PHY index, %d\n",
__func__, ret);
goto fail;
}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index 0d54ed003..f24a85439 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -38,6 +38,8 @@ struct msm_dsi_phy_cfg {
* Fill default H/W values in illegal cells, eg. cell {0, 1}.
*/
bool src_pll_truthtable[DSI_MAX][DSI_MAX];
+ const resource_size_t io_start[DSI_MAX];
+ const int num_dsi_phy;
};
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
index f4bc11af8..c757e2070 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
@@ -145,6 +145,8 @@ const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = {
.ops = {
.enable = dsi_20nm_phy_enable,
.disable = dsi_20nm_phy_disable,
- }
+ },
+ .io_start = { 0xfd998300, 0xfd9a0300 },
+ .num_dsi_phy = 2,
};
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
index 96d1852af..63d7fba31 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
@@ -145,6 +145,8 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = {
.enable = dsi_28nm_phy_enable,
.disable = dsi_28nm_phy_disable,
},
+ .io_start = { 0xfd922b00, 0xfd923100 },
+ .num_dsi_phy = 2,
};
const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
@@ -160,5 +162,7 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
.enable = dsi_28nm_phy_enable,
.disable = dsi_28nm_phy_disable,
},
+ .io_start = { 0x1a98500 },
+ .num_dsi_phy = 1,
};
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
index 213355a3e..7bdb9de54 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
@@ -192,4 +192,6 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs = {
.enable = dsi_28nm_phy_enable,
.disable = dsi_28nm_phy_disable,
},
+ .io_start = { 0x4700300, 0x5800300 },
+ .num_dsi_phy = 2,
};
diff --git a/drivers/gpu/drm/msm/edp/edp_connector.c b/drivers/gpu/drm/msm/edp/edp_connector.c
index 72360cd03..5960628ce 100644
--- a/drivers/gpu/drm/msm/edp/edp_connector.c
+++ b/drivers/gpu/drm/msm/edp/edp_connector.c
@@ -91,15 +91,6 @@ static int edp_connector_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static struct drm_encoder *
-edp_connector_best_encoder(struct drm_connector *connector)
-{
- struct edp_connector *edp_connector = to_edp_connector(connector);
-
- DBG("");
- return edp_connector->edp->encoder;
-}
-
static const struct drm_connector_funcs edp_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.detect = edp_connector_detect,
@@ -113,7 +104,6 @@ static const struct drm_connector_funcs edp_connector_funcs = {
static const struct drm_connector_helper_funcs edp_connector_helper_funcs = {
.get_modes = edp_connector_get_modes,
.mode_valid = edp_connector_mode_valid,
- .best_encoder = edp_connector_best_encoder,
};
/* initialize connector */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 51b9ea552..973720792 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -19,6 +19,7 @@
#include <linux/of_irq.h>
#include <linux/of_gpio.h>
+#include <sound/hdmi-codec.h>
#include "hdmi.h"
void msm_hdmi_set_mode(struct hdmi *hdmi, bool power_on)
@@ -434,6 +435,111 @@ static int msm_hdmi_get_gpio(struct device_node *of_node, const char *name)
return gpio;
}
+/*
+ * HDMI audio codec callbacks
+ */
+static int msm_hdmi_audio_hw_params(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
+{
+ struct hdmi *hdmi = dev_get_drvdata(dev);
+ unsigned int chan;
+ unsigned int channel_allocation = 0;
+ unsigned int rate;
+ unsigned int level_shift = 0; /* 0dB */
+ bool down_mix = false;
+
+ dev_dbg(dev, "%u Hz, %d bit, %d channels\n", params->sample_rate,
+ params->sample_width, params->cea.channels);
+
+ switch (params->cea.channels) {
+ case 2:
+ /* FR and FL speakers */
+ channel_allocation = 0;
+ chan = MSM_HDMI_AUDIO_CHANNEL_2;
+ break;
+ case 4:
+ /* FC, LFE, FR and FL speakers */
+ channel_allocation = 0x3;
+ chan = MSM_HDMI_AUDIO_CHANNEL_4;
+ break;
+ case 6:
+ /* RR, RL, FC, LFE, FR and FL speakers */
+ channel_allocation = 0x0B;
+ chan = MSM_HDMI_AUDIO_CHANNEL_6;
+ break;
+ case 8:
+ /* FRC, FLC, RR, RL, FC, LFE, FR and FL speakers */
+ channel_allocation = 0x1F;
+ chan = MSM_HDMI_AUDIO_CHANNEL_8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (params->sample_rate) {
+ case 32000:
+ rate = HDMI_SAMPLE_RATE_32KHZ;
+ break;
+ case 44100:
+ rate = HDMI_SAMPLE_RATE_44_1KHZ;
+ break;
+ case 48000:
+ rate = HDMI_SAMPLE_RATE_48KHZ;
+ break;
+ case 88200:
+ rate = HDMI_SAMPLE_RATE_88_2KHZ;
+ break;
+ case 96000:
+ rate = HDMI_SAMPLE_RATE_96KHZ;
+ break;
+ case 176400:
+ rate = HDMI_SAMPLE_RATE_176_4KHZ;
+ break;
+ case 192000:
+ rate = HDMI_SAMPLE_RATE_192KHZ;
+ break;
+ default:
+ dev_err(dev, "rate[%d] not supported!\n",
+ params->sample_rate);
+ return -EINVAL;
+ }
+
+ msm_hdmi_audio_set_sample_rate(hdmi, rate);
+ msm_hdmi_audio_info_setup(hdmi, 1, chan, channel_allocation,
+ level_shift, down_mix);
+
+ return 0;
+}
+
+static void msm_hdmi_audio_shutdown(struct device *dev, void *data)
+{
+ struct hdmi *hdmi = dev_get_drvdata(dev);
+
+ msm_hdmi_audio_info_setup(hdmi, 0, 0, 0, 0, 0);
+}
+
+static const struct hdmi_codec_ops msm_hdmi_audio_codec_ops = {
+ .hw_params = msm_hdmi_audio_hw_params,
+ .audio_shutdown = msm_hdmi_audio_shutdown,
+};
+
+static struct hdmi_codec_pdata codec_data = {
+ .ops = &msm_hdmi_audio_codec_ops,
+ .max_i2s_channels = 8,
+ .i2s = 1,
+};
+
+static int msm_hdmi_register_audio_driver(struct hdmi *hdmi, struct device *dev)
+{
+ hdmi->audio_pdev = platform_device_register_data(dev,
+ HDMI_CODEC_DRV_NAME,
+ PLATFORM_DEVID_AUTO,
+ &codec_data,
+ sizeof(codec_data));
+ return PTR_ERR_OR_ZERO(hdmi->audio_pdev);
+}
+
static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
{
struct drm_device *drm = dev_get_drvdata(master);
@@ -441,7 +547,7 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
static struct hdmi_platform_config *hdmi_cfg;
struct hdmi *hdmi;
struct device_node *of_node = dev->of_node;
- int i;
+ int i, err;
hdmi_cfg = (struct hdmi_platform_config *)
of_device_get_match_data(dev);
@@ -468,6 +574,12 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
return PTR_ERR(hdmi);
priv->hdmi = hdmi;
+ err = msm_hdmi_register_audio_driver(hdmi, dev);
+ if (err) {
+ DRM_ERROR("Failed to attach an audio codec %d\n", err);
+ hdmi->audio_pdev = NULL;
+ }
+
return 0;
}
@@ -477,6 +589,9 @@ static void msm_hdmi_unbind(struct device *dev, struct device *master,
struct drm_device *drm = dev_get_drvdata(master);
struct msm_drm_private *priv = drm->dev_private;
if (priv->hdmi) {
+ if (priv->hdmi->audio_pdev)
+ platform_device_unregister(priv->hdmi->audio_pdev);
+
msm_hdmi_destroy(priv->hdmi);
priv->hdmi = NULL;
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index bc7ba0bde..accc9a616 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -50,6 +50,7 @@ struct hdmi_hdcp_ctrl;
struct hdmi {
struct drm_device *dev;
struct platform_device *pdev;
+ struct platform_device *audio_pdev;
const struct hdmi_platform_config *config;
@@ -210,6 +211,19 @@ static inline int msm_hdmi_pll_8996_init(struct platform_device *pdev)
/*
* audio:
*/
+/* Supported HDMI Audio channels and rates */
+#define MSM_HDMI_AUDIO_CHANNEL_2 0
+#define MSM_HDMI_AUDIO_CHANNEL_4 1
+#define MSM_HDMI_AUDIO_CHANNEL_6 2
+#define MSM_HDMI_AUDIO_CHANNEL_8 3
+
+#define HDMI_SAMPLE_RATE_32KHZ 0
+#define HDMI_SAMPLE_RATE_44_1KHZ 1
+#define HDMI_SAMPLE_RATE_48KHZ 2
+#define HDMI_SAMPLE_RATE_88_2KHZ 3
+#define HDMI_SAMPLE_RATE_96KHZ 4
+#define HDMI_SAMPLE_RATE_176_4KHZ 5
+#define HDMI_SAMPLE_RATE_192KHZ 6
int msm_hdmi_audio_update(struct hdmi *hdmi);
int msm_hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled,
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index b15d72683..a2515b466 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -406,13 +406,6 @@ static int msm_hdmi_connector_mode_valid(struct drm_connector *connector,
return 0;
}
-static struct drm_encoder *
-msm_hdmi_connector_best_encoder(struct drm_connector *connector)
-{
- struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
- return hdmi_connector->hdmi->encoder;
-}
-
static const struct drm_connector_funcs hdmi_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.detect = hdmi_connector_detect,
@@ -426,7 +419,6 @@ static const struct drm_connector_funcs hdmi_connector_funcs = {
static const struct drm_connector_helper_funcs msm_hdmi_connector_helper_funcs = {
.get_modes = msm_hdmi_connector_get_modes,
.mode_valid = msm_hdmi_connector_mode_valid,
- .best_encoder = msm_hdmi_connector_best_encoder,
};
/* initialize connector */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
index 0baaaaabd..6e767979a 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
@@ -1430,7 +1430,7 @@ struct hdmi_hdcp_ctrl *msm_hdmi_hdcp_init(struct hdmi *hdmi)
void msm_hdmi_hdcp_destroy(struct hdmi *hdmi)
{
- if (hdmi && hdmi->hdcp_ctrl) {
+ if (hdmi) {
kfree(hdmi->hdcp_ctrl);
hdmi->hdcp_ctrl = NULL;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
index 35ad78a1d..24258e302 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
@@ -23,7 +23,6 @@
struct mdp4_dtv_encoder {
struct drm_encoder base;
- struct clk *src_clk;
struct clk *hdmi_clk;
struct clk *mdp_clk;
unsigned long int pixclock;
@@ -179,7 +178,6 @@ static void mdp4_dtv_encoder_disable(struct drm_encoder *encoder)
*/
mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_EXTERNAL_VSYNC);
- clk_disable_unprepare(mdp4_dtv_encoder->src_clk);
clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk);
clk_disable_unprepare(mdp4_dtv_encoder->mdp_clk);
@@ -208,19 +206,21 @@ static void mdp4_dtv_encoder_enable(struct drm_encoder *encoder)
bs_set(mdp4_dtv_encoder, 1);
- DBG("setting src_clk=%lu", pc);
+ DBG("setting mdp_clk=%lu", pc);
- ret = clk_set_rate(mdp4_dtv_encoder->src_clk, pc);
+ ret = clk_set_rate(mdp4_dtv_encoder->mdp_clk, pc);
if (ret)
- dev_err(dev->dev, "failed to set src_clk to %lu: %d\n", pc, ret);
- clk_prepare_enable(mdp4_dtv_encoder->src_clk);
- ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk);
- if (ret)
- dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
+ dev_err(dev->dev, "failed to set mdp_clk to %lu: %d\n",
+ pc, ret);
+
ret = clk_prepare_enable(mdp4_dtv_encoder->mdp_clk);
if (ret)
dev_err(dev->dev, "failed to enabled mdp_clk: %d\n", ret);
+ ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk);
+ if (ret)
+ dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
+
mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 1);
mdp4_dtv_encoder->enabled = true;
@@ -235,7 +235,7 @@ static const struct drm_encoder_helper_funcs mdp4_dtv_encoder_helper_funcs = {
long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate)
{
struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
- return clk_round_rate(mdp4_dtv_encoder->src_clk, rate);
+ return clk_round_rate(mdp4_dtv_encoder->mdp_clk, rate);
}
/* initialize encoder */
@@ -257,13 +257,6 @@ struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev)
DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(encoder, &mdp4_dtv_encoder_helper_funcs);
- mdp4_dtv_encoder->src_clk = devm_clk_get(dev->dev, "src_clk");
- if (IS_ERR(mdp4_dtv_encoder->src_clk)) {
- dev_err(dev->dev, "failed to get src_clk\n");
- ret = PTR_ERR(mdp4_dtv_encoder->src_clk);
- goto fail;
- }
-
mdp4_dtv_encoder->hdmi_clk = devm_clk_get(dev->dev, "hdmi_clk");
if (IS_ERR(mdp4_dtv_encoder->hdmi_clk)) {
dev_err(dev->dev, "failed to get hdmi_clk\n");
@@ -271,9 +264,9 @@ struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev)
goto fail;
}
- mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "mdp_clk");
+ mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "tv_clk");
if (IS_ERR(mdp4_dtv_encoder->mdp_clk)) {
- dev_err(dev->dev, "failed to get mdp_clk\n");
+ dev_err(dev->dev, "failed to get tv_clk\n");
ret = PTR_ERR(mdp4_dtv_encoder->mdp_clk);
goto fail;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 67442d50a..7b39e89fb 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -106,31 +106,27 @@ out:
static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
- int i, ncrtcs = state->dev->mode_config.num_crtc;
+ int i;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
mdp4_enable(mdp4_kms);
/* see 119ecb7fd */
- for (i = 0; i < ncrtcs; i++) {
- struct drm_crtc *crtc = state->crtcs[i];
- if (!crtc)
- continue;
+ for_each_crtc_in_state(state, crtc, crtc_state, i)
drm_crtc_vblank_get(crtc);
- }
}
static void mdp4_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
- int i, ncrtcs = state->dev->mode_config.num_crtc;
+ int i;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
/* see 119ecb7fd */
- for (i = 0; i < ncrtcs; i++) {
- struct drm_crtc *crtc = state->crtcs[i];
- if (!crtc)
- continue;
+ for_each_crtc_in_state(state, crtc, crtc_state, i)
drm_crtc_vblank_put(crtc);
- }
mdp4_disable(mdp4_kms);
}
@@ -162,6 +158,7 @@ static const char * const iommu_ports[] = {
static void mdp4_destroy(struct msm_kms *kms)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ struct device *dev = mdp4_kms->dev->dev;
struct msm_mmu *mmu = mdp4_kms->mmu;
if (mmu) {
@@ -171,8 +168,11 @@ static void mdp4_destroy(struct msm_kms *kms)
if (mdp4_kms->blank_cursor_iova)
msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
- if (mdp4_kms->blank_cursor_bo)
- drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
+ drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
+
+ if (mdp4_kms->rpm_enabled)
+ pm_runtime_disable(dev);
+
kfree(mdp4_kms);
}
@@ -440,7 +440,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
struct mdp4_kms *mdp4_kms;
struct msm_kms *kms = NULL;
struct msm_mmu *mmu;
- int ret;
+ int irq, ret;
mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
if (!mdp4_kms) {
@@ -461,6 +461,15 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
goto fail;
}
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ dev_err(dev->dev, "failed to get irq: %d\n", ret);
+ goto fail;
+ }
+
+ kms->irq = irq;
+
/* NOTE: driver for this regulator still missing upstream.. use
* _get_exclusive() and ignore the error if it does not exist
* (and hope that the bootloader left it on for us)
@@ -496,7 +505,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
goto fail;
}
- mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "mdp_axi_clk");
+ mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
if (IS_ERR(mdp4_kms->axi_clk)) {
dev_err(dev->dev, "failed to get axi_clk\n");
ret = PTR_ERR(mdp4_kms->axi_clk);
@@ -506,6 +515,9 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
clk_set_rate(mdp4_kms->clk, config->max_clk);
clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
+ pm_runtime_enable(dev->dev);
+ mdp4_kms->rpm_enabled = true;
+
/* make sure things are off before attaching iommu (bootloader could
* have left things on, in which case we'll start getting faults if
* we don't disable):
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index c5d045d56..25fb83997 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -47,6 +47,8 @@ struct mdp4_kms {
struct mdp_irq error_handler;
+ bool rpm_enabled;
+
/* empty/blank cursor bo to use when cursor is "disabled" */
struct drm_gem_object *blank_cursor_bo;
uint32_t blank_cursor_iova;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
index 2648cd763..353429b05 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
@@ -90,14 +90,6 @@ static int mdp4_lvds_connector_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static struct drm_encoder *
-mdp4_lvds_connector_best_encoder(struct drm_connector *connector)
-{
- struct mdp4_lvds_connector *mdp4_lvds_connector =
- to_mdp4_lvds_connector(connector);
- return mdp4_lvds_connector->encoder;
-}
-
static const struct drm_connector_funcs mdp4_lvds_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.detect = mdp4_lvds_connector_detect,
@@ -111,7 +103,6 @@ static const struct drm_connector_funcs mdp4_lvds_connector_funcs = {
static const struct drm_connector_helper_funcs mdp4_lvds_connector_helper_funcs = {
.get_modes = mdp4_lvds_connector_get_modes,
.mode_valid = mdp4_lvds_connector_mode_valid,
- .best_encoder = mdp4_lvds_connector_best_encoder,
};
/* initialize connector */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
index b275ce11b..ca6ca3065 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -8,19 +8,11 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
-
-Copyright (C) 2013-2015 by the following authors:
+- /local/mnt/workspace/source_trees/envytools/rnndb/../rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-05-10 05:06:30)
+- /local/mnt/workspace/source_trees/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-05-09 06:32:54)
+- /local/mnt/workspace/source_trees/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2016-01-07 08:45:55)
+
+Copyright (C) 2013-2016 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -198,118 +190,109 @@ static inline uint32_t MDSS_HW_VERSION_MAJOR(uint32_t val)
#define MDSS_HW_INTR_STATUS_INTR_HDMI 0x00000100
#define MDSS_HW_INTR_STATUS_INTR_EDP 0x00001000
-static inline uint32_t __offset_MDP(uint32_t idx)
-{
- switch (idx) {
- case 0: return (mdp5_cfg->mdp.base[0]);
- default: return INVALID_IDX(idx);
- }
-}
-static inline uint32_t REG_MDP5_MDP(uint32_t i0) { return 0x00000000 + __offset_MDP(i0); }
-
-static inline uint32_t REG_MDP5_MDP_HW_VERSION(uint32_t i0) { return 0x00000000 + __offset_MDP(i0); }
-#define MDP5_MDP_HW_VERSION_STEP__MASK 0x0000ffff
-#define MDP5_MDP_HW_VERSION_STEP__SHIFT 0
-static inline uint32_t MDP5_MDP_HW_VERSION_STEP(uint32_t val)
+#define REG_MDP5_HW_VERSION 0x00000000
+#define MDP5_HW_VERSION_STEP__MASK 0x0000ffff
+#define MDP5_HW_VERSION_STEP__SHIFT 0
+static inline uint32_t MDP5_HW_VERSION_STEP(uint32_t val)
{
- return ((val) << MDP5_MDP_HW_VERSION_STEP__SHIFT) & MDP5_MDP_HW_VERSION_STEP__MASK;
+ return ((val) << MDP5_HW_VERSION_STEP__SHIFT) & MDP5_HW_VERSION_STEP__MASK;
}
-#define MDP5_MDP_HW_VERSION_MINOR__MASK 0x0fff0000
-#define MDP5_MDP_HW_VERSION_MINOR__SHIFT 16
-static inline uint32_t MDP5_MDP_HW_VERSION_MINOR(uint32_t val)
+#define MDP5_HW_VERSION_MINOR__MASK 0x0fff0000
+#define MDP5_HW_VERSION_MINOR__SHIFT 16
+static inline uint32_t MDP5_HW_VERSION_MINOR(uint32_t val)
{
- return ((val) << MDP5_MDP_HW_VERSION_MINOR__SHIFT) & MDP5_MDP_HW_VERSION_MINOR__MASK;
+ return ((val) << MDP5_HW_VERSION_MINOR__SHIFT) & MDP5_HW_VERSION_MINOR__MASK;
}
-#define MDP5_MDP_HW_VERSION_MAJOR__MASK 0xf0000000
-#define MDP5_MDP_HW_VERSION_MAJOR__SHIFT 28
-static inline uint32_t MDP5_MDP_HW_VERSION_MAJOR(uint32_t val)
+#define MDP5_HW_VERSION_MAJOR__MASK 0xf0000000
+#define MDP5_HW_VERSION_MAJOR__SHIFT 28
+static inline uint32_t MDP5_HW_VERSION_MAJOR(uint32_t val)
{
- return ((val) << MDP5_MDP_HW_VERSION_MAJOR__SHIFT) & MDP5_MDP_HW_VERSION_MAJOR__MASK;
+ return ((val) << MDP5_HW_VERSION_MAJOR__SHIFT) & MDP5_HW_VERSION_MAJOR__MASK;
}
-static inline uint32_t REG_MDP5_MDP_DISP_INTF_SEL(uint32_t i0) { return 0x00000004 + __offset_MDP(i0); }
-#define MDP5_MDP_DISP_INTF_SEL_INTF0__MASK 0x000000ff
-#define MDP5_MDP_DISP_INTF_SEL_INTF0__SHIFT 0
-static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF0(enum mdp5_intf_type val)
+#define REG_MDP5_DISP_INTF_SEL 0x00000004
+#define MDP5_DISP_INTF_SEL_INTF0__MASK 0x000000ff
+#define MDP5_DISP_INTF_SEL_INTF0__SHIFT 0
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF0(enum mdp5_intf_type val)
{
- return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF0__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF0__MASK;
+ return ((val) << MDP5_DISP_INTF_SEL_INTF0__SHIFT) & MDP5_DISP_INTF_SEL_INTF0__MASK;
}
-#define MDP5_MDP_DISP_INTF_SEL_INTF1__MASK 0x0000ff00
-#define MDP5_MDP_DISP_INTF_SEL_INTF1__SHIFT 8
-static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF1(enum mdp5_intf_type val)
+#define MDP5_DISP_INTF_SEL_INTF1__MASK 0x0000ff00
+#define MDP5_DISP_INTF_SEL_INTF1__SHIFT 8
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF1(enum mdp5_intf_type val)
{
- return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF1__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF1__MASK;
+ return ((val) << MDP5_DISP_INTF_SEL_INTF1__SHIFT) & MDP5_DISP_INTF_SEL_INTF1__MASK;
}
-#define MDP5_MDP_DISP_INTF_SEL_INTF2__MASK 0x00ff0000
-#define MDP5_MDP_DISP_INTF_SEL_INTF2__SHIFT 16
-static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF2(enum mdp5_intf_type val)
+#define MDP5_DISP_INTF_SEL_INTF2__MASK 0x00ff0000
+#define MDP5_DISP_INTF_SEL_INTF2__SHIFT 16
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF2(enum mdp5_intf_type val)
{
- return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF2__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF2__MASK;
+ return ((val) << MDP5_DISP_INTF_SEL_INTF2__SHIFT) & MDP5_DISP_INTF_SEL_INTF2__MASK;
}
-#define MDP5_MDP_DISP_INTF_SEL_INTF3__MASK 0xff000000
-#define MDP5_MDP_DISP_INTF_SEL_INTF3__SHIFT 24
-static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF3(enum mdp5_intf_type val)
+#define MDP5_DISP_INTF_SEL_INTF3__MASK 0xff000000
+#define MDP5_DISP_INTF_SEL_INTF3__SHIFT 24
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF3(enum mdp5_intf_type val)
{
- return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF3__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF3__MASK;
+ return ((val) << MDP5_DISP_INTF_SEL_INTF3__SHIFT) & MDP5_DISP_INTF_SEL_INTF3__MASK;
}
-static inline uint32_t REG_MDP5_MDP_INTR_EN(uint32_t i0) { return 0x00000010 + __offset_MDP(i0); }
+#define REG_MDP5_INTR_EN 0x00000010
-static inline uint32_t REG_MDP5_MDP_INTR_STATUS(uint32_t i0) { return 0x00000014 + __offset_MDP(i0); }
+#define REG_MDP5_INTR_STATUS 0x00000014
-static inline uint32_t REG_MDP5_MDP_INTR_CLEAR(uint32_t i0) { return 0x00000018 + __offset_MDP(i0); }
+#define REG_MDP5_INTR_CLEAR 0x00000018
-static inline uint32_t REG_MDP5_MDP_HIST_INTR_EN(uint32_t i0) { return 0x0000001c + __offset_MDP(i0); }
+#define REG_MDP5_HIST_INTR_EN 0x0000001c
-static inline uint32_t REG_MDP5_MDP_HIST_INTR_STATUS(uint32_t i0) { return 0x00000020 + __offset_MDP(i0); }
+#define REG_MDP5_HIST_INTR_STATUS 0x00000020
-static inline uint32_t REG_MDP5_MDP_HIST_INTR_CLEAR(uint32_t i0) { return 0x00000024 + __offset_MDP(i0); }
+#define REG_MDP5_HIST_INTR_CLEAR 0x00000024
-static inline uint32_t REG_MDP5_MDP_SPARE_0(uint32_t i0) { return 0x00000028 + __offset_MDP(i0); }
-#define MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN 0x00000001
+#define REG_MDP5_SPARE_0 0x00000028
+#define MDP5_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN 0x00000001
-static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_W(uint32_t i0, uint32_t i1) { return 0x00000080 + __offset_MDP(i0) + 0x4*i1; }
+static inline uint32_t REG_MDP5_SMP_ALLOC_W(uint32_t i0) { return 0x00000080 + 0x4*i0; }
-static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_W_REG(uint32_t i0, uint32_t i1) { return 0x00000080 + __offset_MDP(i0) + 0x4*i1; }
-#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK 0x000000ff
-#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__SHIFT 0
-static inline uint32_t MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0(uint32_t val)
+static inline uint32_t REG_MDP5_SMP_ALLOC_W_REG(uint32_t i0) { return 0x00000080 + 0x4*i0; }
+#define MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK 0x000000ff
+#define MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT 0
+static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT0(uint32_t val)
{
- return ((val) << MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__SHIFT) & MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK;
+ return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
}
-#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK 0x0000ff00
-#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__SHIFT 8
-static inline uint32_t MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1(uint32_t val)
+#define MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK 0x0000ff00
+#define MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT 8
+static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT1(uint32_t val)
{
- return ((val) << MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__SHIFT) & MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK;
+ return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
}
-#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK 0x00ff0000
-#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__SHIFT 16
-static inline uint32_t MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2(uint32_t val)
+#define MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK 0x00ff0000
+#define MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT 16
+static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT2(uint32_t val)
{
- return ((val) << MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__SHIFT) & MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK;
+ return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
}
-static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_R(uint32_t i0, uint32_t i1) { return 0x00000130 + __offset_MDP(i0) + 0x4*i1; }
+static inline uint32_t REG_MDP5_SMP_ALLOC_R(uint32_t i0) { return 0x00000130 + 0x4*i0; }
-static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_R_REG(uint32_t i0, uint32_t i1) { return 0x00000130 + __offset_MDP(i0) + 0x4*i1; }
-#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__MASK 0x000000ff
-#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__SHIFT 0
-static inline uint32_t MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0(uint32_t val)
+static inline uint32_t REG_MDP5_SMP_ALLOC_R_REG(uint32_t i0) { return 0x00000130 + 0x4*i0; }
+#define MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK 0x000000ff
+#define MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT 0
+static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT0(uint32_t val)
{
- return ((val) << MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__SHIFT) & MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__MASK;
+ return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK;
}
-#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__MASK 0x0000ff00
-#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__SHIFT 8
-static inline uint32_t MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1(uint32_t val)
+#define MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK 0x0000ff00
+#define MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT 8
+static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT1(uint32_t val)
{
- return ((val) << MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__SHIFT) & MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__MASK;
+ return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK;
}
-#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__MASK 0x00ff0000
-#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__SHIFT 16
-static inline uint32_t MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2(uint32_t val)
+#define MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK 0x00ff0000
+#define MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT 16
+static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT2(uint32_t val)
{
- return ((val) << MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__SHIFT) & MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__MASK;
+ return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK;
}
static inline uint32_t __offset_IGC(enum mdp5_igc_type idx)
@@ -322,35 +305,35 @@ static inline uint32_t __offset_IGC(enum mdp5_igc_type idx)
default: return INVALID_IDX(idx);
}
}
-static inline uint32_t REG_MDP5_MDP_IGC(uint32_t i0, enum mdp5_igc_type i1) { return 0x00000000 + __offset_MDP(i0) + __offset_IGC(i1); }
+static inline uint32_t REG_MDP5_IGC(enum mdp5_igc_type i0) { return 0x00000000 + __offset_IGC(i0); }
-static inline uint32_t REG_MDP5_MDP_IGC_LUT(uint32_t i0, enum mdp5_igc_type i1, uint32_t i2) { return 0x00000000 + __offset_MDP(i0) + __offset_IGC(i1) + 0x4*i2; }
+static inline uint32_t REG_MDP5_IGC_LUT(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; }
-static inline uint32_t REG_MDP5_MDP_IGC_LUT_REG(uint32_t i0, enum mdp5_igc_type i1, uint32_t i2) { return 0x00000000 + __offset_MDP(i0) + __offset_IGC(i1) + 0x4*i2; }
-#define MDP5_MDP_IGC_LUT_REG_VAL__MASK 0x00000fff
-#define MDP5_MDP_IGC_LUT_REG_VAL__SHIFT 0
-static inline uint32_t MDP5_MDP_IGC_LUT_REG_VAL(uint32_t val)
+static inline uint32_t REG_MDP5_IGC_LUT_REG(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; }
+#define MDP5_IGC_LUT_REG_VAL__MASK 0x00000fff
+#define MDP5_IGC_LUT_REG_VAL__SHIFT 0
+static inline uint32_t MDP5_IGC_LUT_REG_VAL(uint32_t val)
{
- return ((val) << MDP5_MDP_IGC_LUT_REG_VAL__SHIFT) & MDP5_MDP_IGC_LUT_REG_VAL__MASK;
+ return ((val) << MDP5_IGC_LUT_REG_VAL__SHIFT) & MDP5_IGC_LUT_REG_VAL__MASK;
}
-#define MDP5_MDP_IGC_LUT_REG_INDEX_UPDATE 0x02000000
-#define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_0 0x10000000
-#define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000
-#define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000
+#define MDP5_IGC_LUT_REG_INDEX_UPDATE 0x02000000
+#define MDP5_IGC_LUT_REG_DISABLE_PIPE_0 0x10000000
+#define MDP5_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000
+#define MDP5_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000
-static inline uint32_t REG_MDP5_MDP_SPLIT_DPL_EN(uint32_t i0) { return 0x000002f4 + __offset_MDP(i0); }
+#define REG_MDP5_SPLIT_DPL_EN 0x000002f4
-static inline uint32_t REG_MDP5_MDP_SPLIT_DPL_UPPER(uint32_t i0) { return 0x000002f8 + __offset_MDP(i0); }
-#define MDP5_MDP_SPLIT_DPL_UPPER_SMART_PANEL 0x00000002
-#define MDP5_MDP_SPLIT_DPL_UPPER_SMART_PANEL_FREE_RUN 0x00000004
-#define MDP5_MDP_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX 0x00000010
-#define MDP5_MDP_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX 0x00000100
+#define REG_MDP5_SPLIT_DPL_UPPER 0x000002f8
+#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL 0x00000002
+#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL_FREE_RUN 0x00000004
+#define MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX 0x00000010
+#define MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX 0x00000100
-static inline uint32_t REG_MDP5_MDP_SPLIT_DPL_LOWER(uint32_t i0) { return 0x000003f0 + __offset_MDP(i0); }
-#define MDP5_MDP_SPLIT_DPL_LOWER_SMART_PANEL 0x00000002
-#define MDP5_MDP_SPLIT_DPL_LOWER_SMART_PANEL_FREE_RUN 0x00000004
-#define MDP5_MDP_SPLIT_DPL_LOWER_INTF1_TG_SYNC 0x00000010
-#define MDP5_MDP_SPLIT_DPL_LOWER_INTF2_TG_SYNC 0x00000100
+#define REG_MDP5_SPLIT_DPL_LOWER 0x000003f0
+#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL 0x00000002
+#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL_FREE_RUN 0x00000004
+#define MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC 0x00000010
+#define MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC 0x00000100
static inline uint32_t __offset_CTL(uint32_t idx)
{
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
index 57f73f0c1..ac9e4cde1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -26,7 +26,6 @@ const struct mdp5_cfg_hw msm8x74v1_config = {
.name = "msm8x74v1",
.mdp = {
.count = 1,
- .base = { 0x00100 },
.caps = MDP_CAP_SMP |
0,
},
@@ -41,12 +40,12 @@ const struct mdp5_cfg_hw msm8x74v1_config = {
},
.ctl = {
.count = 5,
- .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
+ .base = { 0x00500, 0x00600, 0x00700, 0x00800, 0x00900 },
.flush_hw_mask = 0x0003ffff,
},
.pipe_vig = {
.count = 3,
- .base = { 0x01200, 0x01600, 0x01a00 },
+ .base = { 0x01100, 0x01500, 0x01900 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE |
@@ -55,7 +54,7 @@ const struct mdp5_cfg_hw msm8x74v1_config = {
},
.pipe_rgb = {
.count = 3,
- .base = { 0x01e00, 0x02200, 0x02600 },
+ .base = { 0x01d00, 0x02100, 0x02500 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE |
@@ -63,26 +62,26 @@ const struct mdp5_cfg_hw msm8x74v1_config = {
},
.pipe_dma = {
.count = 2,
- .base = { 0x02a00, 0x02e00 },
+ .base = { 0x02900, 0x02d00 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
0,
},
.lm = {
.count = 5,
- .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
+ .base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 },
.nb_stages = 5,
},
.dspp = {
.count = 3,
- .base = { 0x04600, 0x04a00, 0x04e00 },
+ .base = { 0x04500, 0x04900, 0x04d00 },
},
.pp = {
.count = 3,
- .base = { 0x21b00, 0x21c00, 0x21d00 },
+ .base = { 0x21a00, 0x21b00, 0x21c00 },
},
.intf = {
- .base = { 0x21100, 0x21300, 0x21500, 0x21700 },
+ .base = { 0x21000, 0x21200, 0x21400, 0x21600 },
.connect = {
[0] = INTF_eDP,
[1] = INTF_DSI,
@@ -97,7 +96,6 @@ const struct mdp5_cfg_hw msm8x74v2_config = {
.name = "msm8x74",
.mdp = {
.count = 1,
- .base = { 0x00100 },
.caps = MDP_CAP_SMP |
0,
},
@@ -112,48 +110,48 @@ const struct mdp5_cfg_hw msm8x74v2_config = {
},
.ctl = {
.count = 5,
- .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
+ .base = { 0x00500, 0x00600, 0x00700, 0x00800, 0x00900 },
.flush_hw_mask = 0x0003ffff,
},
.pipe_vig = {
.count = 3,
- .base = { 0x01200, 0x01600, 0x01a00 },
+ .base = { 0x01100, 0x01500, 0x01900 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
MDP_PIPE_CAP_DECIMATION,
},
.pipe_rgb = {
.count = 3,
- .base = { 0x01e00, 0x02200, 0x02600 },
+ .base = { 0x01d00, 0x02100, 0x02500 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
},
.pipe_dma = {
.count = 2,
- .base = { 0x02a00, 0x02e00 },
+ .base = { 0x02900, 0x02d00 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
},
.lm = {
.count = 5,
- .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
+ .base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 },
.nb_stages = 5,
.max_width = 2048,
.max_height = 0xFFFF,
},
.dspp = {
.count = 3,
- .base = { 0x04600, 0x04a00, 0x04e00 },
+ .base = { 0x04500, 0x04900, 0x04d00 },
},
.ad = {
.count = 2,
- .base = { 0x13100, 0x13300 },
+ .base = { 0x13000, 0x13200 },
},
.pp = {
.count = 3,
- .base = { 0x12d00, 0x12e00, 0x12f00 },
+ .base = { 0x12c00, 0x12d00, 0x12e00 },
},
.intf = {
- .base = { 0x12500, 0x12700, 0x12900, 0x12b00 },
+ .base = { 0x12400, 0x12600, 0x12800, 0x12a00 },
.connect = {
[0] = INTF_eDP,
[1] = INTF_DSI,
@@ -168,7 +166,6 @@ const struct mdp5_cfg_hw apq8084_config = {
.name = "apq8084",
.mdp = {
.count = 1,
- .base = { 0x00100 },
.caps = MDP_CAP_SMP |
0,
},
@@ -190,49 +187,49 @@ const struct mdp5_cfg_hw apq8084_config = {
},
.ctl = {
.count = 5,
- .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
+ .base = { 0x00500, 0x00600, 0x00700, 0x00800, 0x00900 },
.flush_hw_mask = 0x003fffff,
},
.pipe_vig = {
.count = 4,
- .base = { 0x01200, 0x01600, 0x01a00, 0x01e00 },
+ .base = { 0x01100, 0x01500, 0x01900, 0x01d00 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
MDP_PIPE_CAP_DECIMATION,
},
.pipe_rgb = {
.count = 4,
- .base = { 0x02200, 0x02600, 0x02a00, 0x02e00 },
+ .base = { 0x02100, 0x02500, 0x02900, 0x02d00 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
},
.pipe_dma = {
.count = 2,
- .base = { 0x03200, 0x03600 },
+ .base = { 0x03100, 0x03500 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
},
.lm = {
.count = 6,
- .base = { 0x03a00, 0x03e00, 0x04200, 0x04600, 0x04a00, 0x04e00 },
+ .base = { 0x03900, 0x03d00, 0x04100, 0x04500, 0x04900, 0x04d00 },
.nb_stages = 5,
.max_width = 2048,
.max_height = 0xFFFF,
},
.dspp = {
.count = 4,
- .base = { 0x05200, 0x05600, 0x05a00, 0x05e00 },
+ .base = { 0x05100, 0x05500, 0x05900, 0x05d00 },
},
.ad = {
.count = 3,
- .base = { 0x13500, 0x13700, 0x13900 },
+ .base = { 0x13400, 0x13600, 0x13800 },
},
.pp = {
.count = 4,
- .base = { 0x12f00, 0x13000, 0x13100, 0x13200 },
+ .base = { 0x12e00, 0x12f00, 0x13000, 0x13100 },
},
.intf = {
- .base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 },
+ .base = { 0x12400, 0x12600, 0x12800, 0x12a00, 0x12c00 },
.connect = {
[0] = INTF_eDP,
[1] = INTF_DSI,
@@ -247,7 +244,7 @@ const struct mdp5_cfg_hw msm8x16_config = {
.name = "msm8x16",
.mdp = {
.count = 1,
- .base = { 0x01000 },
+ .base = { 0x0 },
.caps = MDP_CAP_SMP |
0,
},
@@ -261,41 +258,41 @@ const struct mdp5_cfg_hw msm8x16_config = {
},
.ctl = {
.count = 5,
- .base = { 0x02000, 0x02200, 0x02400, 0x02600, 0x02800 },
+ .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 },
.flush_hw_mask = 0x4003ffff,
},
.pipe_vig = {
.count = 1,
- .base = { 0x05000 },
+ .base = { 0x04000 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
MDP_PIPE_CAP_DECIMATION,
},
.pipe_rgb = {
.count = 2,
- .base = { 0x15000, 0x17000 },
+ .base = { 0x14000, 0x16000 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
},
.pipe_dma = {
.count = 1,
- .base = { 0x25000 },
+ .base = { 0x24000 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
},
.lm = {
.count = 2, /* LM0 and LM3 */
- .base = { 0x45000, 0x48000 },
+ .base = { 0x44000, 0x47000 },
.nb_stages = 5,
.max_width = 2048,
.max_height = 0xFFFF,
},
.dspp = {
.count = 1,
- .base = { 0x55000 },
+ .base = { 0x54000 },
},
.intf = {
- .base = { 0x00000, 0x6b800 },
+ .base = { 0x00000, 0x6a800 },
.connect = {
[0] = INTF_DISABLED,
[1] = INTF_DSI,
@@ -308,7 +305,6 @@ const struct mdp5_cfg_hw msm8x94_config = {
.name = "msm8x94",
.mdp = {
.count = 1,
- .base = { 0x01000 },
.caps = MDP_CAP_SMP |
0,
},
@@ -330,49 +326,49 @@ const struct mdp5_cfg_hw msm8x94_config = {
},
.ctl = {
.count = 5,
- .base = { 0x02000, 0x02200, 0x02400, 0x02600, 0x02800 },
+ .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 },
.flush_hw_mask = 0xf0ffffff,
},
.pipe_vig = {
.count = 4,
- .base = { 0x05000, 0x07000, 0x09000, 0x0b000 },
+ .base = { 0x04000, 0x06000, 0x08000, 0x0a000 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
MDP_PIPE_CAP_DECIMATION,
},
.pipe_rgb = {
.count = 4,
- .base = { 0x15000, 0x17000, 0x19000, 0x1b000 },
+ .base = { 0x14000, 0x16000, 0x18000, 0x1a000 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
},
.pipe_dma = {
.count = 2,
- .base = { 0x25000, 0x27000 },
+ .base = { 0x24000, 0x26000 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
},
.lm = {
.count = 6,
- .base = { 0x45000, 0x46000, 0x47000, 0x48000, 0x49000, 0x4a000 },
+ .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 },
.nb_stages = 8,
.max_width = 2048,
.max_height = 0xFFFF,
},
.dspp = {
.count = 4,
- .base = { 0x55000, 0x57000, 0x59000, 0x5b000 },
+ .base = { 0x54000, 0x56000, 0x58000, 0x5a000 },
},
.ad = {
.count = 3,
- .base = { 0x79000, 0x79800, 0x7a000 },
+ .base = { 0x78000, 0x78800, 0x79000 },
},
.pp = {
.count = 4,
- .base = { 0x71000, 0x71800, 0x72000, 0x72800 },
+ .base = { 0x70000, 0x70800, 0x71000, 0x71800 },
},
.intf = {
- .base = { 0x6b000, 0x6b800, 0x6c000, 0x6c800, 0x6d000 },
+ .base = { 0x6a000, 0x6a800, 0x6b000, 0x6b800, 0x6c000 },
.connect = {
[0] = INTF_DISABLED,
[1] = INTF_DSI,
@@ -387,19 +383,18 @@ const struct mdp5_cfg_hw msm8x96_config = {
.name = "msm8x96",
.mdp = {
.count = 1,
- .base = { 0x01000 },
.caps = MDP_CAP_DSC |
MDP_CAP_CDM |
0,
},
.ctl = {
.count = 5,
- .base = { 0x02000, 0x02200, 0x02400, 0x02600, 0x02800 },
+ .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 },
.flush_hw_mask = 0xf4ffffff,
},
.pipe_vig = {
.count = 4,
- .base = { 0x05000, 0x07000, 0x09000, 0x0b000 },
+ .base = { 0x04000, 0x06000, 0x08000, 0x0a000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE |
@@ -410,7 +405,7 @@ const struct mdp5_cfg_hw msm8x96_config = {
},
.pipe_rgb = {
.count = 4,
- .base = { 0x15000, 0x17000, 0x19000, 0x1b000 },
+ .base = { 0x14000, 0x16000, 0x18000, 0x1a000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE |
@@ -420,7 +415,7 @@ const struct mdp5_cfg_hw msm8x96_config = {
},
.pipe_dma = {
.count = 2,
- .base = { 0x25000, 0x27000 },
+ .base = { 0x24000, 0x26000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SW_PIX_EXT |
@@ -428,33 +423,33 @@ const struct mdp5_cfg_hw msm8x96_config = {
},
.lm = {
.count = 6,
- .base = { 0x45000, 0x46000, 0x47000, 0x48000, 0x49000, 0x4a000 },
+ .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 },
.nb_stages = 8,
.max_width = 2560,
.max_height = 0xFFFF,
},
.dspp = {
.count = 2,
- .base = { 0x55000, 0x57000 },
+ .base = { 0x54000, 0x56000 },
},
.ad = {
.count = 3,
- .base = { 0x79000, 0x79800, 0x7a000 },
+ .base = { 0x78000, 0x78800, 0x79000 },
},
.pp = {
.count = 4,
- .base = { 0x71000, 0x71800, 0x72000, 0x72800 },
+ .base = { 0x70000, 0x70800, 0x71000, 0x71800 },
},
.cdm = {
.count = 1,
- .base = { 0x7a200 },
+ .base = { 0x79200 },
},
.dsc = {
.count = 2,
- .base = { 0x81000, 0x81400 },
+ .base = { 0x80000, 0x80400 },
},
.intf = {
- .base = { 0x6b000, 0x6b800, 0x6c000, 0x6c800, 0x6d000 },
+ .base = { 0x6a000, 0x6a800, 0x6b000, 0x6b800, 0x6c000 },
.connect = {
[0] = INTF_DISABLED,
[1] = INTF_DSI,
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
index 69094cb28..c627ab6d0 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
@@ -272,22 +272,22 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
* start signal for the slave encoder
*/
if (intf_num == 1)
- data |= MDP5_MDP_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX;
+ data |= MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX;
else if (intf_num == 2)
- data |= MDP5_MDP_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX;
+ data |= MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX;
else
return -EINVAL;
/* Smart Panel, Sync mode */
- data |= MDP5_MDP_SPLIT_DPL_UPPER_SMART_PANEL;
+ data |= MDP5_SPLIT_DPL_UPPER_SMART_PANEL;
/* Make sure clocks are on when connectors calling this function. */
mdp5_enable(mdp5_kms);
- mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_UPPER(0), data);
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, data);
- mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_LOWER(0),
- MDP5_MDP_SPLIT_DPL_LOWER_SMART_PANEL);
- mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_EN(0), 1);
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER,
+ MDP5_SPLIT_DPL_LOWER_SMART_PANEL);
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1);
mdp5_disable(mdp5_kms);
return 0;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 88fe256c1..fa2be7ce9 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -374,6 +374,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct plane_state pstates[STAGE_MAX + 1];
const struct mdp5_cfg_hw *hw_cfg;
+ const struct drm_plane_state *pstate;
int cnt = 0, i;
DBG("%s: check", mdp5_crtc->name);
@@ -382,20 +383,13 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
* and that we don't have conflicting mixer stages:
*/
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
- drm_atomic_crtc_state_for_each_plane(plane, state) {
- struct drm_plane_state *pstate;
+ drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
if (cnt >= (hw_cfg->lm.nb_stages)) {
dev_err(dev->dev, "too many planes!\n");
return -EINVAL;
}
- pstate = state->state->plane_states[drm_plane_index(plane)];
- /* plane might not have changed, in which case take
- * current state:
- */
- if (!pstate)
- pstate = plane->state;
pstates[cnt].plane = plane;
pstates[cnt].state = to_mdp5_plane_state(pstate);
@@ -496,8 +490,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct drm_gem_object *cursor_bo, *old_bo = NULL;
uint32_t blendcfg, cursor_addr, stride;
- int ret, bpp, lm;
- unsigned int depth;
+ int ret, lm;
enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
uint32_t roi_w, roi_h;
@@ -527,8 +520,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
return -EINVAL;
lm = mdp5_crtc->lm;
- drm_fb_get_bpp_depth(DRM_FORMAT_ARGB8888, &depth, &bpp);
- stride = width * (bpp >> 3);
+ stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0);
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
old_bo = mdp5_crtc->cursor.scanout_bo;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
index 4e81ca4f9..d021edc3b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
@@ -118,31 +118,31 @@ static void set_display_intf(struct mdp5_kms *mdp5_kms,
u32 intf_sel;
spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
- intf_sel = mdp5_read(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0));
+ intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
switch (intf->num) {
case 0:
- intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF0__MASK;
- intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF0(intf->type);
+ intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK;
+ intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf->type);
break;
case 1:
- intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF1__MASK;
- intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF1(intf->type);
+ intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK;
+ intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf->type);
break;
case 2:
- intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF2__MASK;
- intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF2(intf->type);
+ intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK;
+ intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf->type);
break;
case 3:
- intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF3__MASK;
- intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF3(intf->type);
+ intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK;
+ intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf->type);
break;
default:
BUG();
break;
}
- mdp5_write(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0), intf_sel);
+ mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
}
@@ -557,7 +557,7 @@ int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable)
if (!enable) {
ctlx->pair = NULL;
ctly->pair = NULL;
- mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_SPARE_0, 0);
return 0;
} else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) {
dev_err(ctl_mgr->dev->dev, "CTLs already paired\n");
@@ -570,8 +570,8 @@ int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable)
ctlx->pair = ctly;
ctly->pair = ctlx;
- mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0),
- MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
+ mdp5_write(mdp5_kms, REG_MDP5_SPARE_0,
+ MDP5_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
return 0;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index 1d95f9fd9..fe0c22230 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -322,18 +322,18 @@ int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
* to use the master's enable signal for the slave encoder.
*/
if (intf_num == 1)
- data |= MDP5_MDP_SPLIT_DPL_LOWER_INTF2_TG_SYNC;
+ data |= MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC;
else if (intf_num == 2)
- data |= MDP5_MDP_SPLIT_DPL_LOWER_INTF1_TG_SYNC;
+ data |= MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC;
else
return -EINVAL;
/* Make sure clocks are on when connectors calling this function. */
mdp5_enable(mdp5_kms);
/* Dumb Panel, Sync mode */
- mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_UPPER(0), 0);
- mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_LOWER(0), data);
- mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_EN(0), 1);
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, 0);
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER, data);
+ mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1);
mdp5_ctl_pair(mdp5_encoder->ctl, mdp5_slave_enc->ctl, true);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
index 73bc3e312..d53e5510f 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -15,7 +15,6 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/irqdomain.h>
#include <linux/irq.h>
#include "msm_drv.h"
@@ -24,9 +23,9 @@
void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
uint32_t old_irqmask)
{
- mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_MDP_INTR_CLEAR(0),
- irqmask ^ (irqmask & old_irqmask));
- mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_MDP_INTR_EN(0), irqmask);
+ mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_INTR_CLEAR,
+ irqmask ^ (irqmask & old_irqmask));
+ mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_INTR_EN, irqmask);
}
static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
@@ -38,8 +37,8 @@ void mdp5_irq_preinstall(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
mdp5_enable(mdp5_kms);
- mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_CLEAR(0), 0xffffffff);
- mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_EN(0), 0x00000000);
+ mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff);
+ mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
mdp5_disable(mdp5_kms);
}
@@ -55,7 +54,9 @@ int mdp5_irq_postinstall(struct msm_kms *kms)
MDP5_IRQ_INTF2_UNDER_RUN |
MDP5_IRQ_INTF3_UNDER_RUN;
+ mdp5_enable(mdp5_kms);
mdp_irq_register(mdp_kms, error_handler);
+ mdp5_disable(mdp5_kms);
return 0;
}
@@ -64,21 +65,22 @@ void mdp5_irq_uninstall(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
mdp5_enable(mdp5_kms);
- mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_EN(0), 0x00000000);
+ mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
mdp5_disable(mdp5_kms);
}
-static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
+irqreturn_t mdp5_irq(struct msm_kms *kms)
{
+ struct mdp_kms *mdp_kms = to_mdp_kms(kms);
struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
struct drm_device *dev = mdp5_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
unsigned int id;
uint32_t status, enable;
- enable = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_EN(0));
- status = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_STATUS(0)) & enable;
- mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_CLEAR(0), status);
+ enable = mdp5_read(mdp5_kms, REG_MDP5_INTR_EN);
+ status = mdp5_read(mdp5_kms, REG_MDP5_INTR_STATUS) & enable;
+ mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, status);
VERB("status=%08x", status);
@@ -87,29 +89,6 @@ static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
for (id = 0; id < priv->num_crtcs; id++)
if (status & mdp5_crtc_vblank(priv->crtcs[id]))
drm_handle_vblank(dev, id);
-}
-
-irqreturn_t mdp5_irq(struct msm_kms *kms)
-{
- struct mdp_kms *mdp_kms = to_mdp_kms(kms);
- struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
- uint32_t intr;
-
- intr = mdp5_read(mdp5_kms, REG_MDSS_HW_INTR_STATUS);
-
- VERB("intr=%08x", intr);
-
- if (intr & MDSS_HW_INTR_STATUS_INTR_MDP) {
- mdp5_irq_mdp(mdp_kms);
- intr &= ~MDSS_HW_INTR_STATUS_INTR_MDP;
- }
-
- while (intr) {
- irq_hw_number_t hwirq = fls(intr) - 1;
- generic_handle_irq(irq_find_mapping(
- mdp5_kms->irqcontroller.domain, hwirq));
- intr &= ~(1 << hwirq);
- }
return IRQ_HANDLED;
}
@@ -135,81 +114,3 @@ void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
mdp5_crtc_vblank(crtc), false);
mdp5_disable(mdp5_kms);
}
-
-/*
- * interrupt-controller implementation, so sub-blocks (hdmi/eDP/dsi/etc)
- * can register to get their irq's delivered
- */
-
-#define VALID_IRQS (MDSS_HW_INTR_STATUS_INTR_DSI0 | \
- MDSS_HW_INTR_STATUS_INTR_DSI1 | \
- MDSS_HW_INTR_STATUS_INTR_HDMI | \
- MDSS_HW_INTR_STATUS_INTR_EDP)
-
-static void mdp5_hw_mask_irq(struct irq_data *irqd)
-{
- struct mdp5_kms *mdp5_kms = irq_data_get_irq_chip_data(irqd);
- smp_mb__before_atomic();
- clear_bit(irqd->hwirq, &mdp5_kms->irqcontroller.enabled_mask);
- smp_mb__after_atomic();
-}
-
-static void mdp5_hw_unmask_irq(struct irq_data *irqd)
-{
- struct mdp5_kms *mdp5_kms = irq_data_get_irq_chip_data(irqd);
- smp_mb__before_atomic();
- set_bit(irqd->hwirq, &mdp5_kms->irqcontroller.enabled_mask);
- smp_mb__after_atomic();
-}
-
-static struct irq_chip mdp5_hw_irq_chip = {
- .name = "mdp5",
- .irq_mask = mdp5_hw_mask_irq,
- .irq_unmask = mdp5_hw_unmask_irq,
-};
-
-static int mdp5_hw_irqdomain_map(struct irq_domain *d,
- unsigned int irq, irq_hw_number_t hwirq)
-{
- struct mdp5_kms *mdp5_kms = d->host_data;
-
- if (!(VALID_IRQS & (1 << hwirq)))
- return -EPERM;
-
- irq_set_chip_and_handler(irq, &mdp5_hw_irq_chip, handle_level_irq);
- irq_set_chip_data(irq, mdp5_kms);
-
- return 0;
-}
-
-static struct irq_domain_ops mdp5_hw_irqdomain_ops = {
- .map = mdp5_hw_irqdomain_map,
- .xlate = irq_domain_xlate_onecell,
-};
-
-
-int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms)
-{
- struct device *dev = mdp5_kms->dev->dev;
- struct irq_domain *d;
-
- d = irq_domain_add_linear(dev->of_node, 32,
- &mdp5_hw_irqdomain_ops, mdp5_kms);
- if (!d) {
- dev_err(dev, "mdp5 irq domain add failed\n");
- return -ENXIO;
- }
-
- mdp5_kms->irqcontroller.enabled_mask = 0;
- mdp5_kms->irqcontroller.domain = d;
-
- return 0;
-}
-
-void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms)
-{
- if (mdp5_kms->irqcontroller.domain) {
- irq_domain_remove(mdp5_kms->irqcontroller.domain);
- mdp5_kms->irqcontroller.domain = NULL;
- }
-}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 484b4d15e..ed7143d35 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -16,6 +16,7 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/of_irq.h>
#include "msm_drv.h"
#include "msm_mmu.h"
@@ -28,10 +29,11 @@ static const char *iommu_ports[] = {
static int mdp5_hw_init(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
- struct drm_device *dev = mdp5_kms->dev;
+ struct platform_device *pdev = mdp5_kms->pdev;
unsigned long flags;
- pm_runtime_get_sync(dev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+ mdp5_enable(mdp5_kms);
/* Magic unknown register writes:
*
@@ -58,12 +60,13 @@ static int mdp5_hw_init(struct msm_kms *kms)
*/
spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
- mdp5_write(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0), 0);
+ mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
mdp5_ctlm_hw_reset(mdp5_kms->ctlm);
- pm_runtime_put_sync(dev->dev);
+ mdp5_disable(mdp5_kms);
+ pm_runtime_put_sync(&pdev->dev);
return 0;
}
@@ -78,17 +81,11 @@ static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *s
{
int i;
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
- int nplanes = mdp5_kms->dev->mode_config.num_total_plane;
-
- for (i = 0; i < nplanes; i++) {
- struct drm_plane *plane = state->planes[i];
- struct drm_plane_state *plane_state = state->plane_states[i];
-
- if (!plane)
- continue;
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
+ for_each_plane_in_state(state, plane, plane_state, i)
mdp5_plane_complete_commit(plane, plane_state);
- }
mdp5_disable(mdp5_kms);
}
@@ -117,26 +114,15 @@ static int mdp5_set_split_display(struct msm_kms *kms,
return mdp5_encoder_set_split_display(encoder, slave_encoder);
}
-static void mdp5_destroy(struct msm_kms *kms)
+static void mdp5_kms_destroy(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
struct msm_mmu *mmu = mdp5_kms->mmu;
- mdp5_irq_domain_fini(mdp5_kms);
-
if (mmu) {
mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
mmu->funcs->destroy(mmu);
}
-
- if (mdp5_kms->ctlm)
- mdp5_ctlm_destroy(mdp5_kms->ctlm);
- if (mdp5_kms->smp)
- mdp5_smp_destroy(mdp5_kms->smp);
- if (mdp5_kms->cfg)
- mdp5_cfg_destroy(mdp5_kms->cfg);
-
- kfree(mdp5_kms);
}
static const struct mdp_kms_funcs kms_funcs = {
@@ -154,7 +140,7 @@ static const struct mdp_kms_funcs kms_funcs = {
.get_format = mdp_get_format,
.round_pixclk = mdp5_round_pixclk,
.set_split_display = mdp5_set_split_display,
- .destroy = mdp5_destroy,
+ .destroy = mdp5_kms_destroy,
},
.set_irqmask = mdp5_set_irqmask,
};
@@ -351,13 +337,6 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
- /* register our interrupt-controller for hdmi/eDP/dsi/etc
- * to use for irqs routed through mdp:
- */
- ret = mdp5_irq_domain_init(mdp5_kms);
- if (ret)
- goto fail;
-
/* construct CRTCs and their private planes: */
for (i = 0; i < hw_cfg->pipe_rgb.count; i++) {
struct drm_plane *plane;
@@ -425,17 +404,17 @@ fail:
return ret;
}
-static void read_hw_revision(struct mdp5_kms *mdp5_kms,
- uint32_t *major, uint32_t *minor)
+static void read_mdp_hw_revision(struct mdp5_kms *mdp5_kms,
+ u32 *major, u32 *minor)
{
- uint32_t version;
+ u32 version;
mdp5_enable(mdp5_kms);
- version = mdp5_read(mdp5_kms, REG_MDSS_HW_VERSION);
+ version = mdp5_read(mdp5_kms, REG_MDP5_HW_VERSION);
mdp5_disable(mdp5_kms);
- *major = FIELD(version, MDSS_HW_VERSION_MAJOR);
- *minor = FIELD(version, MDSS_HW_VERSION_MINOR);
+ *major = FIELD(version, MDP5_HW_VERSION_MAJOR);
+ *minor = FIELD(version, MDP5_HW_VERSION_MINOR);
DBG("MDP5 version v%d.%d", *major, *minor);
}
@@ -580,51 +559,146 @@ static u32 mdp5_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
struct msm_kms *mdp5_kms_init(struct drm_device *dev)
{
- struct platform_device *pdev = dev->platformdev;
- struct mdp5_cfg *config;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev;
struct mdp5_kms *mdp5_kms;
- struct msm_kms *kms = NULL;
+ struct mdp5_cfg *config;
+ struct msm_kms *kms;
struct msm_mmu *mmu;
- uint32_t major, minor;
- int i, ret;
+ int irq, i, ret;
- mdp5_kms = kzalloc(sizeof(*mdp5_kms), GFP_KERNEL);
- if (!mdp5_kms) {
- dev_err(dev->dev, "failed to allocate kms\n");
- ret = -ENOMEM;
+ /* priv->kms would have been populated by the MDP5 driver */
+ kms = priv->kms;
+ if (!kms)
+ return NULL;
+
+ mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+
+ mdp_kms_init(&mdp5_kms->base, &kms_funcs);
+
+ pdev = mdp5_kms->pdev;
+
+ irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if (irq < 0) {
+ ret = irq;
+ dev_err(&pdev->dev, "failed to get irq: %d\n", ret);
goto fail;
}
- spin_lock_init(&mdp5_kms->resource_lock);
+ kms->irq = irq;
- mdp_kms_init(&mdp5_kms->base, &kms_funcs);
+ config = mdp5_cfg_get_config(mdp5_kms->cfg);
- kms = &mdp5_kms->base.base;
+ /* make sure things are off before attaching iommu (bootloader could
+ * have left things on, in which case we'll start getting faults if
+ * we don't disable):
+ */
+ mdp5_enable(mdp5_kms);
+ for (i = 0; i < MDP5_INTF_NUM_MAX; i++) {
+ if (mdp5_cfg_intf_is_virtual(config->hw->intf.connect[i]) ||
+ !config->hw->intf.base[i])
+ continue;
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
- mdp5_kms->dev = dev;
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i), 0x3);
+ }
+ mdp5_disable(mdp5_kms);
+ mdelay(16);
- /* mdp5_kms->mmio actually represents the MDSS base address */
- mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
- if (IS_ERR(mdp5_kms->mmio)) {
- ret = PTR_ERR(mdp5_kms->mmio);
+ if (config->platform.iommu) {
+ mmu = msm_iommu_new(&pdev->dev, config->platform.iommu);
+ if (IS_ERR(mmu)) {
+ ret = PTR_ERR(mmu);
+ dev_err(&pdev->dev, "failed to init iommu: %d\n", ret);
+ iommu_domain_free(config->platform.iommu);
+ goto fail;
+ }
+
+ ret = mmu->funcs->attach(mmu, iommu_ports,
+ ARRAY_SIZE(iommu_ports));
+ if (ret) {
+ dev_err(&pdev->dev, "failed to attach iommu: %d\n",
+ ret);
+ mmu->funcs->destroy(mmu);
+ goto fail;
+ }
+ } else {
+ dev_info(&pdev->dev,
+ "no iommu, fallback to phys contig buffers for scanout\n");
+ mmu = NULL;
+ }
+ mdp5_kms->mmu = mmu;
+
+ mdp5_kms->id = msm_register_mmu(dev, mmu);
+ if (mdp5_kms->id < 0) {
+ ret = mdp5_kms->id;
+ dev_err(&pdev->dev, "failed to register mdp5 iommu: %d\n", ret);
goto fail;
}
- mdp5_kms->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
- if (IS_ERR(mdp5_kms->vbif)) {
- ret = PTR_ERR(mdp5_kms->vbif);
+ ret = modeset_init(mdp5_kms);
+ if (ret) {
+ dev_err(&pdev->dev, "modeset_init failed: %d\n", ret);
goto fail;
}
- mdp5_kms->vdd = devm_regulator_get(&pdev->dev, "vdd");
- if (IS_ERR(mdp5_kms->vdd)) {
- ret = PTR_ERR(mdp5_kms->vdd);
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ dev->mode_config.max_width = config->hw->lm.max_width;
+ dev->mode_config.max_height = config->hw->lm.max_height;
+
+ dev->driver->get_vblank_timestamp = mdp5_get_vblank_timestamp;
+ dev->driver->get_scanout_position = mdp5_get_scanoutpos;
+ dev->driver->get_vblank_counter = mdp5_get_vblank_counter;
+ dev->max_vblank_count = 0xffffffff;
+ dev->vblank_disable_immediate = true;
+
+ return kms;
+fail:
+ if (kms)
+ mdp5_kms_destroy(kms);
+ return ERR_PTR(ret);
+}
+
+static void mdp5_destroy(struct platform_device *pdev)
+{
+ struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev);
+
+ if (mdp5_kms->ctlm)
+ mdp5_ctlm_destroy(mdp5_kms->ctlm);
+ if (mdp5_kms->smp)
+ mdp5_smp_destroy(mdp5_kms->smp);
+ if (mdp5_kms->cfg)
+ mdp5_cfg_destroy(mdp5_kms->cfg);
+
+ if (mdp5_kms->rpm_enabled)
+ pm_runtime_disable(&pdev->dev);
+}
+
+static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct mdp5_kms *mdp5_kms;
+ struct mdp5_cfg *config;
+ u32 major, minor;
+ int ret;
+
+ mdp5_kms = devm_kzalloc(&pdev->dev, sizeof(*mdp5_kms), GFP_KERNEL);
+ if (!mdp5_kms) {
+ ret = -ENOMEM;
goto fail;
}
- ret = regulator_enable(mdp5_kms->vdd);
- if (ret) {
- dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret);
+ platform_set_drvdata(pdev, mdp5_kms);
+
+ spin_lock_init(&mdp5_kms->resource_lock);
+
+ mdp5_kms->dev = dev;
+ mdp5_kms->pdev = pdev;
+
+ mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
+ if (IS_ERR(mdp5_kms->mmio)) {
+ ret = PTR_ERR(mdp5_kms->mmio);
goto fail;
}
@@ -635,9 +709,6 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk", true);
if (ret)
goto fail;
- ret = get_clk(pdev, &mdp5_kms->src_clk, "core_clk_src", true);
- if (ret)
- goto fail;
ret = get_clk(pdev, &mdp5_kms->core_clk, "core_clk", true);
if (ret)
goto fail;
@@ -652,9 +723,12 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
* rate first, then figure out hw revision, and then set a
* more optimal rate:
*/
- clk_set_rate(mdp5_kms->src_clk, 200000000);
+ clk_set_rate(mdp5_kms->core_clk, 200000000);
- read_hw_revision(mdp5_kms, &major, &minor);
+ pm_runtime_enable(&pdev->dev);
+ mdp5_kms->rpm_enabled = true;
+
+ read_mdp_hw_revision(mdp5_kms, &major, &minor);
mdp5_kms->cfg = mdp5_cfg_init(mdp5_kms, major, minor);
if (IS_ERR(mdp5_kms->cfg)) {
@@ -667,7 +741,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
mdp5_kms->caps = config->hw->mdp.caps;
/* TODO: compute core clock rate at runtime */
- clk_set_rate(mdp5_kms->src_clk, config->hw->max_clk);
+ clk_set_rate(mdp5_kms->core_clk, config->hw->max_clk);
/*
* Some chipsets have a Shared Memory Pool (SMP), while others
@@ -690,73 +764,76 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
goto fail;
}
- /* make sure things are off before attaching iommu (bootloader could
- * have left things on, in which case we'll start getting faults if
- * we don't disable):
- */
- mdp5_enable(mdp5_kms);
- for (i = 0; i < MDP5_INTF_NUM_MAX; i++) {
- if (mdp5_cfg_intf_is_virtual(config->hw->intf.connect[i]) ||
- !config->hw->intf.base[i])
- continue;
- mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
+ /* set uninit-ed kms */
+ priv->kms = &mdp5_kms->base.base;
- mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i), 0x3);
- }
- mdp5_disable(mdp5_kms);
- mdelay(16);
+ return 0;
+fail:
+ mdp5_destroy(pdev);
+ return ret;
+}
- if (config->platform.iommu) {
- mmu = msm_iommu_new(&pdev->dev, config->platform.iommu);
- if (IS_ERR(mmu)) {
- ret = PTR_ERR(mmu);
- dev_err(dev->dev, "failed to init iommu: %d\n", ret);
- iommu_domain_free(config->platform.iommu);
- goto fail;
- }
+static int mdp5_bind(struct device *dev, struct device *master, void *data)
+{
+ struct drm_device *ddev = dev_get_drvdata(master);
+ struct platform_device *pdev = to_platform_device(dev);
- ret = mmu->funcs->attach(mmu, iommu_ports,
- ARRAY_SIZE(iommu_ports));
- if (ret) {
- dev_err(dev->dev, "failed to attach iommu: %d\n", ret);
- mmu->funcs->destroy(mmu);
- goto fail;
- }
- } else {
- dev_info(dev->dev, "no iommu, fallback to phys "
- "contig buffers for scanout\n");
- mmu = NULL;
- }
- mdp5_kms->mmu = mmu;
+ DBG("");
- mdp5_kms->id = msm_register_mmu(dev, mmu);
- if (mdp5_kms->id < 0) {
- ret = mdp5_kms->id;
- dev_err(dev->dev, "failed to register mdp5 iommu: %d\n", ret);
- goto fail;
- }
+ return mdp5_init(pdev, ddev);
+}
- ret = modeset_init(mdp5_kms);
- if (ret) {
- dev_err(dev->dev, "modeset_init failed: %d\n", ret);
- goto fail;
- }
+static void mdp5_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
- dev->mode_config.min_width = 0;
- dev->mode_config.min_height = 0;
- dev->mode_config.max_width = config->hw->lm.max_width;
- dev->mode_config.max_height = config->hw->lm.max_height;
+ mdp5_destroy(pdev);
+}
- dev->driver->get_vblank_timestamp = mdp5_get_vblank_timestamp;
- dev->driver->get_scanout_position = mdp5_get_scanoutpos;
- dev->driver->get_vblank_counter = mdp5_get_vblank_counter;
- dev->max_vblank_count = 0xffffffff;
- dev->vblank_disable_immediate = true;
+static const struct component_ops mdp5_ops = {
+ .bind = mdp5_bind,
+ .unbind = mdp5_unbind,
+};
- return kms;
+static int mdp5_dev_probe(struct platform_device *pdev)
+{
+ DBG("");
+ return component_add(&pdev->dev, &mdp5_ops);
+}
-fail:
- if (kms)
- mdp5_destroy(kms);
- return ERR_PTR(ret);
+static int mdp5_dev_remove(struct platform_device *pdev)
+{
+ DBG("");
+ component_del(&pdev->dev, &mdp5_ops);
+ return 0;
+}
+
+static const struct of_device_id mdp5_dt_match[] = {
+ { .compatible = "qcom,mdp5", },
+ /* to support downstream DT files */
+ { .compatible = "qcom,mdss_mdp", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mdp5_dt_match);
+
+static struct platform_driver mdp5_driver = {
+ .probe = mdp5_dev_probe,
+ .remove = mdp5_dev_remove,
+ .driver = {
+ .name = "msm_mdp",
+ .of_match_table = mdp5_dt_match,
+ },
+};
+
+void __init msm_mdp_register(void)
+{
+ DBG("");
+ platform_driver_register(&mdp5_driver);
+}
+
+void __exit msm_mdp_unregister(void)
+{
+ DBG("");
+ platform_driver_unregister(&mdp5_driver);
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 9a2589823..03738927b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -31,6 +31,8 @@ struct mdp5_kms {
struct drm_device *dev;
+ struct platform_device *pdev;
+
struct mdp5_cfg_handler *cfg;
uint32_t caps; /* MDP capabilities (MDP_CAP_XXX bits) */
@@ -43,29 +45,23 @@ struct mdp5_kms {
struct mdp5_ctl_manager *ctlm;
/* io/register spaces: */
- void __iomem *mmio, *vbif;
-
- struct regulator *vdd;
+ void __iomem *mmio;
struct clk *axi_clk;
struct clk *ahb_clk;
- struct clk *src_clk;
struct clk *core_clk;
struct clk *lut_clk;
struct clk *vsync_clk;
/*
* lock to protect access to global resources: ie., following register:
- * - REG_MDP5_MDP_DISP_INTF_SEL
+ * - REG_MDP5_DISP_INTF_SEL
*/
spinlock_t resource_lock;
- struct mdp_irq error_handler;
+ bool rpm_enabled;
- struct {
- volatile unsigned long enabled_mask;
- struct irq_domain *domain;
- } irqcontroller;
+ struct mdp_irq error_handler;
};
#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c
new file mode 100644
index 000000000..d444a6901
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c
@@ -0,0 +1,235 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+
+#include "msm_drv.h"
+#include "mdp5_kms.h"
+
+/*
+ * If needed, this can become more specific: something like struct mdp5_mdss,
+ * which contains a 'struct msm_mdss base' member.
+ */
+struct msm_mdss {
+ struct drm_device *dev;
+
+ void __iomem *mmio, *vbif;
+
+ struct regulator *vdd;
+
+ struct {
+ volatile unsigned long enabled_mask;
+ struct irq_domain *domain;
+ } irqcontroller;
+};
+
+static inline void mdss_write(struct msm_mdss *mdss, u32 reg, u32 data)
+{
+ msm_writel(data, mdss->mmio + reg);
+}
+
+static inline u32 mdss_read(struct msm_mdss *mdss, u32 reg)
+{
+ return msm_readl(mdss->mmio + reg);
+}
+
+static irqreturn_t mdss_irq(int irq, void *arg)
+{
+ struct msm_mdss *mdss = arg;
+ u32 intr;
+
+ intr = mdss_read(mdss, REG_MDSS_HW_INTR_STATUS);
+
+ VERB("intr=%08x", intr);
+
+ while (intr) {
+ irq_hw_number_t hwirq = fls(intr) - 1;
+
+ generic_handle_irq(irq_find_mapping(
+ mdss->irqcontroller.domain, hwirq));
+ intr &= ~(1 << hwirq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * interrupt-controller implementation, so sub-blocks (MDP/HDMI/eDP/DSI/etc)
+ * can register to get their irq's delivered
+ */
+
+#define VALID_IRQS (MDSS_HW_INTR_STATUS_INTR_MDP | \
+ MDSS_HW_INTR_STATUS_INTR_DSI0 | \
+ MDSS_HW_INTR_STATUS_INTR_DSI1 | \
+ MDSS_HW_INTR_STATUS_INTR_HDMI | \
+ MDSS_HW_INTR_STATUS_INTR_EDP)
+
+static void mdss_hw_mask_irq(struct irq_data *irqd)
+{
+ struct msm_mdss *mdss = irq_data_get_irq_chip_data(irqd);
+
+ smp_mb__before_atomic();
+ clear_bit(irqd->hwirq, &mdss->irqcontroller.enabled_mask);
+ smp_mb__after_atomic();
+}
+
+static void mdss_hw_unmask_irq(struct irq_data *irqd)
+{
+ struct msm_mdss *mdss = irq_data_get_irq_chip_data(irqd);
+
+ smp_mb__before_atomic();
+ set_bit(irqd->hwirq, &mdss->irqcontroller.enabled_mask);
+ smp_mb__after_atomic();
+}
+
+static struct irq_chip mdss_hw_irq_chip = {
+ .name = "mdss",
+ .irq_mask = mdss_hw_mask_irq,
+ .irq_unmask = mdss_hw_unmask_irq,
+};
+
+static int mdss_hw_irqdomain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct msm_mdss *mdss = d->host_data;
+
+ if (!(VALID_IRQS & (1 << hwirq)))
+ return -EPERM;
+
+ irq_set_chip_and_handler(irq, &mdss_hw_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, mdss);
+
+ return 0;
+}
+
+static struct irq_domain_ops mdss_hw_irqdomain_ops = {
+ .map = mdss_hw_irqdomain_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+
+static int mdss_irq_domain_init(struct msm_mdss *mdss)
+{
+ struct device *dev = mdss->dev->dev;
+ struct irq_domain *d;
+
+ d = irq_domain_add_linear(dev->of_node, 32, &mdss_hw_irqdomain_ops,
+ mdss);
+ if (!d) {
+ dev_err(dev, "mdss irq domain add failed\n");
+ return -ENXIO;
+ }
+
+ mdss->irqcontroller.enabled_mask = 0;
+ mdss->irqcontroller.domain = d;
+
+ return 0;
+}
+
+void msm_mdss_destroy(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_mdss *mdss = priv->mdss;
+
+ if (!mdss)
+ return;
+
+ irq_domain_remove(mdss->irqcontroller.domain);
+ mdss->irqcontroller.domain = NULL;
+
+ regulator_disable(mdss->vdd);
+
+ pm_runtime_put_sync(dev->dev);
+
+ pm_runtime_disable(dev->dev);
+}
+
+int msm_mdss_init(struct drm_device *dev)
+{
+ struct platform_device *pdev = dev->platformdev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_mdss *mdss;
+ int ret;
+
+ DBG("");
+
+ if (!of_device_is_compatible(dev->dev->of_node, "qcom,mdss"))
+ return 0;
+
+ mdss = devm_kzalloc(dev->dev, sizeof(*mdss), GFP_KERNEL);
+ if (!mdss) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ mdss->dev = dev;
+
+ mdss->mmio = msm_ioremap(pdev, "mdss_phys", "MDSS");
+ if (IS_ERR(mdss->mmio)) {
+ ret = PTR_ERR(mdss->mmio);
+ goto fail;
+ }
+
+ mdss->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
+ if (IS_ERR(mdss->vbif)) {
+ ret = PTR_ERR(mdss->vbif);
+ goto fail;
+ }
+
+ /* Regulator to enable GDSCs in downstream kernels */
+ mdss->vdd = devm_regulator_get(dev->dev, "vdd");
+ if (IS_ERR(mdss->vdd)) {
+ ret = PTR_ERR(mdss->vdd);
+ goto fail;
+ }
+
+ ret = regulator_enable(mdss->vdd);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable regulator vdd: %d\n",
+ ret);
+ goto fail;
+ }
+
+ ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
+ mdss_irq, 0, "mdss_isr", mdss);
+ if (ret) {
+ dev_err(dev->dev, "failed to init irq: %d\n", ret);
+ goto fail_irq;
+ }
+
+ ret = mdss_irq_domain_init(mdss);
+ if (ret) {
+ dev_err(dev->dev, "failed to init sub-block irqs: %d\n", ret);
+ goto fail_irq;
+ }
+
+ priv->mdss = mdss;
+
+ pm_runtime_enable(dev->dev);
+
+ /*
+ * TODO: This is needed as the MDSS GDSC is only tied to MDSS's power
+ * domain. Remove this once runtime PM is adapted for all the devices.
+ */
+ pm_runtime_get_sync(dev->dev);
+
+ return 0;
+fail_irq:
+ regulator_disable(mdss->vdd);
+fail:
+ return ret;
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
index 6f425c25d..27d7b55b5 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
@@ -42,7 +42,7 @@
*
* configured:
* The block is allocated to some client, and assigned to that
- * client in MDP5_MDP_SMP_ALLOC registers.
+ * client in MDP5_SMP_ALLOC registers.
*
* inuse:
* The block is being actively used by a client.
@@ -59,7 +59,7 @@
* mdp5_smp_commit.
*
* 2) mdp5_smp_configure():
- * As hw is programmed, before FLUSH, MDP5_MDP_SMP_ALLOC registers
+ * As hw is programmed, before FLUSH, MDP5_SMP_ALLOC registers
* are configured for the union(pending, inuse)
* Current pending is copied to configured.
* It is assumed that mdp5_smp_request and mdp5_smp_configure not run
@@ -311,25 +311,25 @@ static void update_smp_state(struct mdp5_smp *smp,
int idx = blk / 3;
int fld = blk % 3;
- val = mdp5_read(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_W_REG(0, idx));
+ val = mdp5_read(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx));
switch (fld) {
case 0:
- val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK;
- val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0(cid);
+ val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
+ val |= MDP5_SMP_ALLOC_W_REG_CLIENT0(cid);
break;
case 1:
- val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK;
- val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1(cid);
+ val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
+ val |= MDP5_SMP_ALLOC_W_REG_CLIENT1(cid);
break;
case 2:
- val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK;
- val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2(cid);
+ val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
+ val |= MDP5_SMP_ALLOC_W_REG_CLIENT2(cid);
break;
}
- mdp5_write(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_W_REG(0, idx), val);
- mdp5_write(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_R_REG(0, idx), val);
+ mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx), val);
+ mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(idx), val);
}
}
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index e3892c263..4a8a6f1f1 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -84,17 +84,12 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
struct msm_drm_private *priv = old_state->dev->dev_private;
struct msm_kms *kms = priv->kms;
- int ncrtcs = old_state->dev->mode_config.num_crtc;
int i;
- for (i = 0; i < ncrtcs; i++) {
- crtc = old_state->crtcs[i];
-
- if (!crtc)
- continue;
-
+ for_each_crtc_in_state(old_state, crtc, crtc_state, i) {
if (!crtc->state->enable)
continue;
@@ -192,9 +187,11 @@ int msm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool nonblock)
{
struct msm_drm_private *priv = dev->dev_private;
- int nplanes = dev->mode_config.num_total_plane;
- int ncrtcs = dev->mode_config.num_crtc;
struct msm_commit *c;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
int i, ret;
ret = drm_atomic_helper_prepare_planes(dev, state);
@@ -210,28 +207,18 @@ int msm_atomic_commit(struct drm_device *dev,
/*
* Figure out what crtcs we have:
*/
- for (i = 0; i < ncrtcs; i++) {
- struct drm_crtc *crtc = state->crtcs[i];
- if (!crtc)
- continue;
- c->crtc_mask |= (1 << drm_crtc_index(crtc));
- }
+ for_each_crtc_in_state(state, crtc, crtc_state, i)
+ c->crtc_mask |= drm_crtc_mask(crtc);
/*
* Figure out what fence to wait for:
*/
- for (i = 0; i < nplanes; i++) {
- struct drm_plane *plane = state->planes[i];
- struct drm_plane_state *new_state = state->plane_states[i];
-
- if (!plane)
- continue;
-
- if ((plane->state->fb != new_state->fb) && new_state->fb) {
- struct drm_gem_object *obj = msm_framebuffer_bo(new_state->fb, 0);
+ for_each_plane_in_state(state, plane, plane_state, i) {
+ if ((plane->state->fb != plane_state->fb) && plane_state->fb) {
+ struct drm_gem_object *obj = msm_framebuffer_bo(plane_state->fb, 0);
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- new_state->fence = reservation_object_get_excl_rcu(msm_obj->resv);
+ plane_state->fence = reservation_object_get_excl_rcu(msm_obj->resv);
}
}
@@ -251,7 +238,7 @@ int msm_atomic_commit(struct drm_device *dev,
* the software side now.
*/
- drm_atomic_helper_swap_state(dev, state);
+ drm_atomic_helper_swap_state(state, true);
/*
* Everything below can be run asynchronously without the need to grab
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 9c654092e..8a0237008 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -21,6 +21,16 @@
#include "msm_gpu.h"
#include "msm_kms.h"
+
+/*
+ * MSM driver version:
+ * - 1.0.0 - initial interface
+ * - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers
+ */
+#define MSM_VERSION_MAJOR 1
+#define MSM_VERSION_MINOR 1
+#define MSM_VERSION_PATCHLEVEL 0
+
static void msm_fb_output_poll_changed(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
@@ -195,9 +205,9 @@ static int msm_drm_uninit(struct device *dev)
kfree(vbl_ev);
}
- drm_kms_helper_poll_fini(ddev);
+ msm_gem_shrinker_cleanup(ddev);
- drm_connector_unregister_all(ddev);
+ drm_kms_helper_poll_fini(ddev);
drm_dev_unregister(ddev);
@@ -217,10 +227,8 @@ static int msm_drm_uninit(struct device *dev)
flush_workqueue(priv->atomic_wq);
destroy_workqueue(priv->atomic_wq);
- if (kms) {
- pm_runtime_disable(dev);
+ if (kms)
kms->funcs->destroy(kms);
- }
if (gpu) {
mutex_lock(&ddev->struct_mutex);
@@ -230,15 +238,16 @@ static int msm_drm_uninit(struct device *dev)
}
if (priv->vram.paddr) {
- DEFINE_DMA_ATTRS(attrs);
- dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
+ unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
drm_mm_takedown(&priv->vram.mm);
dma_free_attrs(dev, priv->vram.size, NULL,
- priv->vram.paddr, &attrs);
+ priv->vram.paddr, attrs);
}
component_unbind_all(dev, ddev);
+ msm_mdss_destroy(ddev);
+
ddev->dev_private = NULL;
drm_dev_unref(ddev);
@@ -284,6 +293,7 @@ static int msm_init_vram(struct drm_device *dev)
if (node) {
struct resource r;
ret = of_address_to_resource(node, 0, &r);
+ of_node_put(node);
if (ret)
return ret;
size = r.end - r.start;
@@ -299,21 +309,21 @@ static int msm_init_vram(struct drm_device *dev)
}
if (size) {
- DEFINE_DMA_ATTRS(attrs);
+ unsigned long attrs = 0;
void *p;
priv->vram.size = size;
drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
- dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
+ attrs |= DMA_ATTR_WRITE_COMBINE;
/* note that for no-kernel-mapping, the vaddr returned
* is bogus, but non-null if allocation succeeded:
*/
p = dma_alloc_attrs(dev->dev, size,
- &priv->vram.paddr, GFP_KERNEL, &attrs);
+ &priv->vram.paddr, GFP_KERNEL, attrs);
if (!p) {
dev_err(dev->dev, "failed to allocate VRAM\n");
priv->vram.paddr = 0;
@@ -352,6 +362,14 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
}
ddev->dev_private = priv;
+ priv->dev = ddev;
+
+ ret = msm_mdss_init(ddev);
+ if (ret) {
+ kfree(priv);
+ drm_dev_unref(ddev);
+ return ret;
+ }
priv->wq = alloc_ordered_workqueue("msm", 0);
priv->atomic_wq = alloc_ordered_workqueue("msm:atomic", 0);
@@ -367,6 +385,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
/* Bind all our sub-components: */
ret = component_bind_all(dev, ddev);
if (ret) {
+ msm_mdss_destroy(ddev);
kfree(priv);
drm_dev_unref(ddev);
return ret;
@@ -376,9 +395,12 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
if (ret)
goto fail;
+ msm_gem_shrinker_init(ddev);
+
switch (get_mdp_ver(pdev)) {
case 4:
kms = mdp4_kms_init(ddev);
+ priv->kms = kms;
break;
case 5:
kms = mdp5_kms_init(ddev);
@@ -400,10 +422,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
goto fail;
}
- priv->kms = kms;
-
if (kms) {
- pm_runtime_enable(dev);
ret = kms->funcs->hw_init(kms);
if (ret) {
dev_err(dev, "kms hw init failed: %d\n", ret);
@@ -419,24 +438,20 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
goto fail;
}
- pm_runtime_get_sync(dev);
- ret = drm_irq_install(ddev, platform_get_irq(pdev, 0));
- pm_runtime_put_sync(dev);
- if (ret < 0) {
- dev_err(dev, "failed to install IRQ handler\n");
- goto fail;
+ if (kms) {
+ pm_runtime_get_sync(dev);
+ ret = drm_irq_install(ddev, kms->irq);
+ pm_runtime_put_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to install IRQ handler\n");
+ goto fail;
+ }
}
ret = drm_dev_register(ddev, 0);
if (ret)
goto fail;
- ret = drm_connector_register_all(ddev);
- if (ret) {
- dev_err(dev, "failed to register connectors\n");
- goto fail;
- }
-
drm_mode_config_reset(ddev);
#ifdef CONFIG_DRM_FBDEV_EMULATION
@@ -690,6 +705,44 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
return msm_wait_fence(priv->gpu->fctx, args->fence, &timeout, true);
}
+static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_msm_gem_madvise *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+
+ switch (args->madv) {
+ case MSM_MADV_DONTNEED:
+ case MSM_MADV_WILLNEED:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ obj = drm_gem_object_lookup(file, args->handle);
+ if (!obj) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ ret = msm_gem_madvise(obj, args->madv);
+ if (ret >= 0) {
+ args->retained = ret;
+ ret = 0;
+ }
+
+ drm_gem_object_unreference(obj);
+
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
static const struct drm_ioctl_desc msm_ioctls[] = {
DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_AUTH|DRM_RENDER_ALLOW),
@@ -698,6 +751,7 @@ static const struct drm_ioctl_desc msm_ioctls[] = {
DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_AUTH|DRM_RENDER_ALLOW),
};
static const struct vm_operations_struct vm_ops = {
@@ -730,7 +784,6 @@ static struct drm_driver msm_driver = {
.open = msm_open,
.preclose = msm_preclose,
.lastclose = msm_lastclose,
- .set_busid = drm_platform_set_busid,
.irq_handler = msm_irq,
.irq_preinstall = msm_irq_preinstall,
.irq_postinstall = msm_irq_postinstall,
@@ -764,8 +817,9 @@ static struct drm_driver msm_driver = {
.name = "msm",
.desc = "MSM Snapdragon DRM",
.date = "20130625",
- .major = 1,
- .minor = 0,
+ .major = MSM_VERSION_MAJOR,
+ .minor = MSM_VERSION_MINOR,
+ .patchlevel = MSM_VERSION_PATCHLEVEL,
};
#ifdef CONFIG_PM_SLEEP
@@ -805,22 +859,146 @@ static int compare_of(struct device *dev, void *data)
return dev->of_node == data;
}
-static int add_components(struct device *dev, struct component_match **matchptr,
- const char *name)
+/*
+ * Identify what components need to be added by parsing what remote-endpoints
+ * our MDP output ports are connected to. In the case of LVDS on MDP4, there
+ * is no external component that we need to add since LVDS is within MDP4
+ * itself.
+ */
+static int add_components_mdp(struct device *mdp_dev,
+ struct component_match **matchptr)
{
- struct device_node *np = dev->of_node;
- unsigned i;
+ struct device_node *np = mdp_dev->of_node;
+ struct device_node *ep_node;
+ struct device *master_dev;
+
+ /*
+ * on MDP4 based platforms, the MDP platform device is the component
+ * master that adds other display interface components to itself.
+ *
+ * on MDP5 based platforms, the MDSS platform device is the component
+ * master that adds MDP5 and other display interface components to
+ * itself.
+ */
+ if (of_device_is_compatible(np, "qcom,mdp4"))
+ master_dev = mdp_dev;
+ else
+ master_dev = mdp_dev->parent;
- for (i = 0; ; i++) {
- struct device_node *node;
+ for_each_endpoint_of_node(np, ep_node) {
+ struct device_node *intf;
+ struct of_endpoint ep;
+ int ret;
- node = of_parse_phandle(np, name, i);
- if (!node)
- break;
+ ret = of_graph_parse_endpoint(ep_node, &ep);
+ if (ret) {
+ dev_err(mdp_dev, "unable to parse port endpoint\n");
+ of_node_put(ep_node);
+ return ret;
+ }
+
+ /*
+ * The LCDC/LVDS port on MDP4 is a speacial case where the
+ * remote-endpoint isn't a component that we need to add
+ */
+ if (of_device_is_compatible(np, "qcom,mdp4") &&
+ ep.port == 0) {
+ of_node_put(ep_node);
+ continue;
+ }
+
+ /*
+ * It's okay if some of the ports don't have a remote endpoint
+ * specified. It just means that the port isn't connected to
+ * any external interface.
+ */
+ intf = of_graph_get_remote_port_parent(ep_node);
+ if (!intf) {
+ of_node_put(ep_node);
+ continue;
+ }
+
+ component_match_add(master_dev, matchptr, compare_of, intf);
+
+ of_node_put(intf);
+ of_node_put(ep_node);
+ }
+
+ return 0;
+}
+
+static int compare_name_mdp(struct device *dev, void *data)
+{
+ return (strstr(dev_name(dev), "mdp") != NULL);
+}
+
+static int add_display_components(struct device *dev,
+ struct component_match **matchptr)
+{
+ struct device *mdp_dev;
+ int ret;
+
+ /*
+ * MDP5 based devices don't have a flat hierarchy. There is a top level
+ * parent: MDSS, and children: MDP5, DSI, HDMI, eDP etc. Populate the
+ * children devices, find the MDP5 node, and then add the interfaces
+ * to our components list.
+ */
+ if (of_device_is_compatible(dev->of_node, "qcom,mdss")) {
+ ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
+ if (ret) {
+ dev_err(dev, "failed to populate children devices\n");
+ return ret;
+ }
+
+ mdp_dev = device_find_child(dev, NULL, compare_name_mdp);
+ if (!mdp_dev) {
+ dev_err(dev, "failed to find MDSS MDP node\n");
+ of_platform_depopulate(dev);
+ return -ENODEV;
+ }
+
+ put_device(mdp_dev);
- component_match_add(dev, matchptr, compare_of, node);
+ /* add the MDP component itself */
+ component_match_add(dev, matchptr, compare_of,
+ mdp_dev->of_node);
+ } else {
+ /* MDP4 */
+ mdp_dev = dev;
}
+ ret = add_components_mdp(mdp_dev, matchptr);
+ if (ret)
+ of_platform_depopulate(dev);
+
+ return ret;
+}
+
+/*
+ * We don't know what's the best binding to link the gpu with the drm device.
+ * Fow now, we just hunt for all the possible gpus that we support, and add them
+ * as components.
+ */
+static const struct of_device_id msm_gpu_match[] = {
+ { .compatible = "qcom,adreno-3xx" },
+ { .compatible = "qcom,kgsl-3d0" },
+ { },
+};
+
+static int add_gpu_components(struct device *dev,
+ struct component_match **matchptr)
+{
+ struct device_node *np;
+
+ np = of_find_matching_node(NULL, msm_gpu_match);
+ if (!np)
+ return 0;
+
+ component_match_add(dev, matchptr, compare_of, np);
+
+ of_node_put(np);
+
return 0;
}
@@ -846,9 +1024,15 @@ static const struct component_master_ops msm_drm_ops = {
static int msm_pdev_probe(struct platform_device *pdev)
{
struct component_match *match = NULL;
+ int ret;
- add_components(&pdev->dev, &match, "connectors");
- add_components(&pdev->dev, &match, "gpus");
+ ret = add_display_components(&pdev->dev, &match);
+ if (ret)
+ return ret;
+
+ ret = add_gpu_components(&pdev->dev, &match);
+ if (ret)
+ return ret;
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
@@ -857,20 +1041,14 @@ static int msm_pdev_probe(struct platform_device *pdev)
static int msm_pdev_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &msm_drm_ops);
+ of_platform_depopulate(&pdev->dev);
return 0;
}
-static const struct platform_device_id msm_id[] = {
- { "mdp", 0 },
- { }
-};
-
static const struct of_device_id dt_match[] = {
- { .compatible = "qcom,mdp4", .data = (void *) 4 }, /* mdp4 */
- { .compatible = "qcom,mdp5", .data = (void *) 5 }, /* mdp5 */
- /* to support downstream DT files */
- { .compatible = "qcom,mdss_mdp", .data = (void *) 5 }, /* mdp5 */
+ { .compatible = "qcom,mdp4", .data = (void *)4 }, /* MDP4 */
+ { .compatible = "qcom,mdss", .data = (void *)5 }, /* MDP5 MDSS */
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
@@ -883,12 +1061,12 @@ static struct platform_driver msm_platform_driver = {
.of_match_table = dt_match,
.pm = &msm_pm_ops,
},
- .id_table = msm_id,
};
static int __init msm_drm_register(void)
{
DBG("init");
+ msm_mdp_register();
msm_dsi_register();
msm_edp_register();
msm_hdmi_register();
@@ -904,6 +1082,7 @@ static void __exit msm_drm_unregister(void)
adreno_unregister();
msm_edp_unregister();
msm_dsi_unregister();
+ msm_mdp_unregister();
}
module_init(msm_drm_register);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 16ae246f7..d0da52f2a 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -46,6 +46,7 @@
struct msm_kms;
struct msm_gpu;
struct msm_mmu;
+struct msm_mdss;
struct msm_rd_state;
struct msm_perf_state;
struct msm_gem_submit;
@@ -77,11 +78,16 @@ struct msm_vblank_ctrl {
struct msm_drm_private {
+ struct drm_device *dev;
+
struct msm_kms *kms;
/* subordinate devices, if present: */
struct platform_device *gpu_pdev;
+ /* top level MDSS wrapper device (for MDP5 only) */
+ struct msm_mdss *mdss;
+
/* possibly this should be in the kms component, but it is
* shared by both mdp4 and mdp5..
*/
@@ -147,6 +153,9 @@ struct msm_drm_private {
struct drm_mm mm;
} vram;
+ struct notifier_block vmap_notifier;
+ struct shrinker shrinker;
+
struct msm_vblank_ctrl vblank_ctrl;
/* task holding struct_mutex.. currently only used in submit path
@@ -171,6 +180,9 @@ void msm_gem_submit_free(struct msm_gem_submit *submit);
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file);
+void msm_gem_shrinker_init(struct drm_device *dev);
+void msm_gem_shrinker_cleanup(struct drm_device *dev);
+
int msm_gem_mmap_obj(struct drm_gem_object *obj,
struct vm_area_struct *vma);
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
@@ -195,8 +207,13 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg);
int msm_gem_prime_pin(struct drm_gem_object *obj);
void msm_gem_prime_unpin(struct drm_gem_object *obj);
-void *msm_gem_vaddr_locked(struct drm_gem_object *obj);
-void *msm_gem_vaddr(struct drm_gem_object *obj);
+void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj);
+void *msm_gem_get_vaddr(struct drm_gem_object *obj);
+void msm_gem_put_vaddr_locked(struct drm_gem_object *obj);
+void msm_gem_put_vaddr(struct drm_gem_object *obj);
+int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
+void msm_gem_purge(struct drm_gem_object *obj);
+void msm_gem_vunmap(struct drm_gem_object *obj);
int msm_gem_sync_object(struct drm_gem_object *obj,
struct msm_fence_context *fctx, bool exclusive);
void msm_gem_move_to_active(struct drm_gem_object *obj,
@@ -263,6 +280,9 @@ static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi,
}
#endif
+void __init msm_mdp_register(void);
+void __exit msm_mdp_unregister(void);
+
#ifdef CONFIG_DEBUG_FS
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
void msm_gem_describe_objects(struct list_head *list, struct seq_file *m);
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 461dc8b87..95cf8fe72 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -49,24 +49,16 @@ static void msm_framebuffer_destroy(struct drm_framebuffer *fb)
for (i = 0; i < n; i++) {
struct drm_gem_object *bo = msm_fb->planes[i];
- if (bo)
- drm_gem_object_unreference_unlocked(bo);
+
+ drm_gem_object_unreference_unlocked(bo);
}
kfree(msm_fb);
}
-static int msm_framebuffer_dirty(struct drm_framebuffer *fb,
- struct drm_file *file_priv, unsigned flags, unsigned color,
- struct drm_clip_rect *clips, unsigned num_clips)
-{
- return 0;
-}
-
static const struct drm_framebuffer_funcs msm_framebuffer_funcs = {
.create_handle = msm_framebuffer_create_handle,
.destroy = msm_framebuffer_destroy,
- .dirty = msm_framebuffer_dirty,
};
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index c6cf837c5..ffd4a338c 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -158,7 +158,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
dev->mode_config.fb_base = paddr;
- fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo);
+ fbi->screen_base = msm_gem_get_vaddr_locked(fbdev->bo);
if (IS_ERR(fbi->screen_base)) {
ret = PTR_ERR(fbi->screen_base);
goto fail_unlock;
@@ -188,21 +188,7 @@ fail:
return ret;
}
-static void msm_crtc_fb_gamma_set(struct drm_crtc *crtc,
- u16 red, u16 green, u16 blue, int regno)
-{
- DBG("fbdev: set gamma");
-}
-
-static void msm_crtc_fb_gamma_get(struct drm_crtc *crtc,
- u16 *red, u16 *green, u16 *blue, int regno)
-{
- DBG("fbdev: get gamma");
-}
-
static const struct drm_fb_helper_funcs msm_fb_helper_funcs = {
- .gamma_set = msm_crtc_fb_gamma_set,
- .gamma_get = msm_crtc_fb_gamma_get,
.fb_probe = msm_fbdev_create,
};
@@ -265,6 +251,7 @@ void msm_fbdev_free(struct drm_device *dev)
/* this will free the backing object */
if (fbdev->fb) {
+ msm_gem_put_vaddr(fbdev->bo);
drm_framebuffer_unregister_private(fbdev->fb);
drm_framebuffer_remove(fbdev->fb);
}
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 46ffcbf2f..85f3047e0 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -285,6 +285,26 @@ uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
return offset;
}
+static void
+put_iova(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct msm_drm_private *priv = obj->dev->dev_private;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ int id;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
+ struct msm_mmu *mmu = priv->mmus[id];
+ if (mmu && msm_obj->domain[id].iova) {
+ uint32_t offset = msm_obj->domain[id].iova;
+ mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
+ msm_obj->domain[id].iova = 0;
+ }
+ }
+}
+
/* should be called under struct_mutex.. although it can be called
* from atomic context without struct_mutex to acquire an extra
* iova ref if you know one is already held.
@@ -397,7 +417,7 @@ fail:
return ret;
}
-void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
+void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
@@ -410,18 +430,91 @@ void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
if (msm_obj->vaddr == NULL)
return ERR_PTR(-ENOMEM);
}
+ msm_obj->vmap_count++;
return msm_obj->vaddr;
}
-void *msm_gem_vaddr(struct drm_gem_object *obj)
+void *msm_gem_get_vaddr(struct drm_gem_object *obj)
{
void *ret;
mutex_lock(&obj->dev->struct_mutex);
- ret = msm_gem_vaddr_locked(obj);
+ ret = msm_gem_get_vaddr_locked(obj);
mutex_unlock(&obj->dev->struct_mutex);
return ret;
}
+void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
+ WARN_ON(msm_obj->vmap_count < 1);
+ msm_obj->vmap_count--;
+}
+
+void msm_gem_put_vaddr(struct drm_gem_object *obj)
+{
+ mutex_lock(&obj->dev->struct_mutex);
+ msm_gem_put_vaddr_locked(obj);
+ mutex_unlock(&obj->dev->struct_mutex);
+}
+
+/* Update madvise status, returns true if not purged, else
+ * false or -errno.
+ */
+int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
+
+ if (msm_obj->madv != __MSM_MADV_PURGED)
+ msm_obj->madv = madv;
+
+ return (msm_obj->madv != __MSM_MADV_PURGED);
+}
+
+void msm_gem_purge(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ WARN_ON(!is_purgeable(msm_obj));
+ WARN_ON(obj->import_attach);
+
+ put_iova(obj);
+
+ msm_gem_vunmap(obj);
+
+ put_pages(obj);
+
+ msm_obj->madv = __MSM_MADV_PURGED;
+
+ drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
+ drm_gem_free_mmap_offset(obj);
+
+ /* Our goal here is to return as much of the memory as
+ * is possible back to the system as we are called from OOM.
+ * To do this we must instruct the shmfs to drop all of its
+ * backing pages, *now*.
+ */
+ shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
+
+ invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
+ 0, (loff_t)-1);
+}
+
+void msm_gem_vunmap(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
+ return;
+
+ vunmap(msm_obj->vaddr);
+ msm_obj->vaddr = NULL;
+}
+
/* must be called before _move_to_active().. */
int msm_gem_sync_object(struct drm_gem_object *obj,
struct msm_fence_context *fctx, bool exclusive)
@@ -473,6 +566,7 @@ void msm_gem_move_to_active(struct drm_gem_object *obj,
struct msm_gpu *gpu, bool exclusive, struct fence *fence)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
msm_obj->gpu = gpu;
if (exclusive)
reservation_object_add_excl_fence(msm_obj->resv, fence);
@@ -541,13 +635,27 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
struct reservation_object_list *fobj;
struct fence *fence;
uint64_t off = drm_vma_node_start(&obj->vma_node);
+ const char *madv;
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
- seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu\n",
+ switch (msm_obj->madv) {
+ case __MSM_MADV_PURGED:
+ madv = " purged";
+ break;
+ case MSM_MADV_DONTNEED:
+ madv = " purgeable";
+ break;
+ case MSM_MADV_WILLNEED:
+ default:
+ madv = "";
+ break;
+ }
+
+ seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu%s\n",
msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
obj->name, obj->refcount.refcount.counter,
- off, msm_obj->vaddr, obj->size);
+ off, msm_obj->vaddr, obj->size, madv);
rcu_read_lock();
fobj = rcu_dereference(robj->fence);
@@ -587,9 +695,7 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
void msm_gem_free_object(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct msm_drm_private *priv = obj->dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- int id;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -598,13 +704,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
list_del(&msm_obj->mm_list);
- for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
- struct msm_mmu *mmu = priv->mmus[id];
- if (mmu && msm_obj->domain[id].iova) {
- uint32_t offset = msm_obj->domain[id].iova;
- mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
- }
- }
+ put_iova(obj);
if (obj->import_attach) {
if (msm_obj->vaddr)
@@ -618,7 +718,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
drm_prime_gem_destroy(obj, msm_obj->sgt);
} else {
- vunmap(msm_obj->vaddr);
+ msm_gem_vunmap(obj);
put_pages(obj);
}
@@ -697,6 +797,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
msm_obj->vram_node = (void *)&msm_obj[1];
msm_obj->flags = flags;
+ msm_obj->madv = MSM_MADV_WILLNEED;
if (resv) {
msm_obj->resv = resv;
@@ -738,9 +839,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
return obj;
fail:
- if (obj)
- drm_gem_object_unreference(obj);
-
+ drm_gem_object_unreference(obj);
return ERR_PTR(ret);
}
@@ -783,8 +882,6 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
return obj;
fail:
- if (obj)
- drm_gem_object_unreference_unlocked(obj);
-
+ drm_gem_object_unreference_unlocked(obj);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 9facd4b6f..b2f13cfe9 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -29,6 +29,16 @@ struct msm_gem_object {
uint32_t flags;
+ /**
+ * Advice: are the backing pages purgeable?
+ */
+ uint8_t madv;
+
+ /**
+ * count of active vmap'ing
+ */
+ uint8_t vmap_count;
+
/* And object is either:
* inactive - on priv->inactive_list
* active - on one one of the gpu's active_list.. well, at
@@ -72,7 +82,16 @@ static inline bool is_active(struct msm_gem_object *msm_obj)
return msm_obj->gpu != NULL;
}
-#define MAX_CMDS 4
+static inline bool is_purgeable(struct msm_gem_object *msm_obj)
+{
+ return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt &&
+ !msm_obj->base.dma_buf && !msm_obj->base.import_attach;
+}
+
+static inline bool is_vunmapable(struct msm_gem_object *msm_obj)
+{
+ return (msm_obj->vmap_count == 0) && msm_obj->vaddr;
+}
/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
* associated with the cmdstream submission for synchronization (and
@@ -95,7 +114,7 @@ struct msm_gem_submit {
uint32_t size; /* in dwords */
uint32_t iova;
uint32_t idx; /* cmdstream buffer idx in bos[] */
- } cmd[MAX_CMDS];
+ } *cmd; /* array of size nr_cmds */
struct {
uint32_t flags;
struct msm_gem_object *obj;
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index 6b90890fa..60bb29070 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -33,12 +33,12 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
void *msm_gem_prime_vmap(struct drm_gem_object *obj)
{
- return msm_gem_vaddr(obj);
+ return msm_gem_get_vaddr(obj);
}
void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
{
- /* TODO msm_gem_vunmap() */
+ msm_gem_put_vaddr(obj);
}
int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
new file mode 100644
index 000000000..283d2841b
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -0,0 +1,168 @@
+/*
+ * Copyright (C) 2016 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+#include "msm_gem.h"
+
+static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
+{
+ if (!mutex_is_locked(mutex))
+ return false;
+
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
+ return mutex->owner == task;
+#else
+ /* Since UP may be pre-empted, we cannot assume that we own the lock */
+ return false;
+#endif
+}
+
+static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
+{
+ if (!mutex_trylock(&dev->struct_mutex)) {
+ if (!mutex_is_locked_by(&dev->struct_mutex, current))
+ return false;
+ *unlock = false;
+ } else {
+ *unlock = true;
+ }
+
+ return true;
+}
+
+
+static unsigned long
+msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
+{
+ struct msm_drm_private *priv =
+ container_of(shrinker, struct msm_drm_private, shrinker);
+ struct drm_device *dev = priv->dev;
+ struct msm_gem_object *msm_obj;
+ unsigned long count = 0;
+ bool unlock;
+
+ if (!msm_gem_shrinker_lock(dev, &unlock))
+ return 0;
+
+ list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
+ if (is_purgeable(msm_obj))
+ count += msm_obj->base.size >> PAGE_SHIFT;
+ }
+
+ if (unlock)
+ mutex_unlock(&dev->struct_mutex);
+
+ return count;
+}
+
+static unsigned long
+msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
+{
+ struct msm_drm_private *priv =
+ container_of(shrinker, struct msm_drm_private, shrinker);
+ struct drm_device *dev = priv->dev;
+ struct msm_gem_object *msm_obj;
+ unsigned long freed = 0;
+ bool unlock;
+
+ if (!msm_gem_shrinker_lock(dev, &unlock))
+ return SHRINK_STOP;
+
+ list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
+ if (freed >= sc->nr_to_scan)
+ break;
+ if (is_purgeable(msm_obj)) {
+ msm_gem_purge(&msm_obj->base);
+ freed += msm_obj->base.size >> PAGE_SHIFT;
+ }
+ }
+
+ if (unlock)
+ mutex_unlock(&dev->struct_mutex);
+
+ if (freed > 0)
+ pr_info_ratelimited("Purging %lu bytes\n", freed << PAGE_SHIFT);
+
+ return freed;
+}
+
+static int
+msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
+{
+ struct msm_drm_private *priv =
+ container_of(nb, struct msm_drm_private, vmap_notifier);
+ struct drm_device *dev = priv->dev;
+ struct msm_gem_object *msm_obj;
+ unsigned unmapped = 0;
+ bool unlock;
+
+ if (!msm_gem_shrinker_lock(dev, &unlock))
+ return NOTIFY_DONE;
+
+ list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
+ if (is_vunmapable(msm_obj)) {
+ msm_gem_vunmap(&msm_obj->base);
+ /* since we don't know any better, lets bail after a few
+ * and if necessary the shrinker will be invoked again.
+ * Seems better than unmapping *everything*
+ */
+ if (++unmapped >= 15)
+ break;
+ }
+ }
+
+ if (unlock)
+ mutex_unlock(&dev->struct_mutex);
+
+ *(unsigned long *)ptr += unmapped;
+
+ if (unmapped > 0)
+ pr_info_ratelimited("Purging %u vmaps\n", unmapped);
+
+ return NOTIFY_DONE;
+}
+
+/**
+ * msm_gem_shrinker_init - Initialize msm shrinker
+ * @dev_priv: msm device
+ *
+ * This function registers and sets up the msm shrinker.
+ */
+void msm_gem_shrinker_init(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ priv->shrinker.count_objects = msm_gem_shrinker_count;
+ priv->shrinker.scan_objects = msm_gem_shrinker_scan;
+ priv->shrinker.seeks = DEFAULT_SEEKS;
+ WARN_ON(register_shrinker(&priv->shrinker));
+
+ priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
+ WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
+}
+
+/**
+ * msm_gem_shrinker_cleanup - Clean up msm shrinker
+ * @dev_priv: msm device
+ *
+ * This function unregisters the msm shrinker.
+ */
+void msm_gem_shrinker_cleanup(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
+ unregister_shrinker(&priv->shrinker);
+}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index c1889d700..880d6a9af 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -29,10 +29,11 @@
#define BO_PINNED 0x2000
static struct msm_gem_submit *submit_create(struct drm_device *dev,
- struct msm_gpu *gpu, int nr)
+ struct msm_gpu *gpu, int nr_bos, int nr_cmds)
{
struct msm_gem_submit *submit;
- int sz = sizeof(*submit) + (nr * sizeof(submit->bos[0]));
+ int sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) +
+ (nr_cmds * sizeof(*submit->cmd));
submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
if (!submit)
@@ -42,6 +43,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
submit->gpu = gpu;
submit->fence = NULL;
submit->pid = get_pid(task_pid(current));
+ submit->cmd = (void *)&submit->bos[nr_bos];
/* initially, until copy_from_user() and bo lookup succeeds: */
submit->nr_bos = 0;
@@ -296,7 +298,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
/* For now, just map the entire thing. Eventually we probably
* to do it page-by-page, w/ kmap() if not vmap()d..
*/
- ptr = msm_gem_vaddr_locked(&obj->base);
+ ptr = msm_gem_get_vaddr_locked(&obj->base);
if (IS_ERR(ptr)) {
ret = PTR_ERR(ptr);
@@ -349,6 +351,8 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
last_offset = off;
}
+ msm_gem_put_vaddr_locked(&obj->base);
+
return 0;
}
@@ -386,16 +390,18 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (args->pipe != MSM_PIPE_3D0)
return -EINVAL;
- if (args->nr_cmds > MAX_CMDS)
- return -EINVAL;
-
- submit = submit_create(dev, gpu, args->nr_bos);
- if (!submit)
- return -ENOMEM;
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
- mutex_lock(&dev->struct_mutex);
priv->struct_mutex_task = current;
+ submit = submit_create(dev, gpu, args->nr_bos, args->nr_cmds);
+ if (!submit) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
ret = submit_lookup_objects(submit, args, file);
if (ret)
goto out;
@@ -480,6 +486,7 @@ out:
submit_cleanup(submit);
if (ret)
msm_gem_submit_free(submit);
+out_unlock:
priv->struct_mutex_task = NULL;
mutex_unlock(&dev->struct_mutex);
return ret;
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index a7a0b6d9b..3a294d0da 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -59,10 +59,10 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
return -EINVAL;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
- u32 pa = sg_phys(sg) - sg->offset;
+ dma_addr_t pa = sg_phys(sg) - sg->offset;
size_t bytes = sg->length + sg->offset;
- VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
+ VERB("map[%d]: %08x %08lx(%zx)", i, da, (unsigned long)pa, bytes);
ret = iommu_map(domain, da, pa, bytes, prot);
if (ret)
@@ -101,7 +101,7 @@ static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
if (unmapped < bytes)
return unmapped;
- VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
+ VERB("unmap[%d]: %08x(%zx)", i, da, bytes);
BUG_ON(!PAGE_ALIGNED(bytes));
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index e32222c3d..40e41e5cd 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -61,10 +61,8 @@ struct msm_kms_funcs {
struct msm_kms {
const struct msm_kms_funcs *funcs;
- /* irq handling: */
- bool in_irq;
- struct list_head irq_list; /* list of mdp4_irq */
- uint32_t vblank_mask; /* irq bits set for userspace vblank */
+ /* irq number to be passed on to drm_irq_install */
+ int irq;
};
static inline void msm_kms_init(struct msm_kms *kms,
@@ -75,5 +73,7 @@ static inline void msm_kms_init(struct msm_kms *kms,
struct msm_kms *mdp4_kms_init(struct drm_device *dev);
struct msm_kms *mdp5_kms_init(struct drm_device *dev);
+int msm_mdss_init(struct drm_device *dev);
+void msm_mdss_destroy(struct drm_device *dev);
#endif /* __MSM_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/msm_perf.c b/drivers/gpu/drm/msm/msm_perf.c
index 830857c47..17fe4e53e 100644
--- a/drivers/gpu/drm/msm/msm_perf.c
+++ b/drivers/gpu/drm/msm/msm_perf.c
@@ -132,7 +132,7 @@ static ssize_t perf_read(struct file *file, char __user *buf,
size_t sz, loff_t *ppos)
{
struct msm_perf_state *perf = file->private_data;
- int n = 0, ret;
+ int n = 0, ret = 0;
mutex_lock(&perf->read_lock);
@@ -143,9 +143,10 @@ static ssize_t perf_read(struct file *file, char __user *buf,
}
n = min((int)sz, perf->buftot - perf->bufpos);
- ret = copy_to_user(buf, &perf->buf[perf->bufpos], n);
- if (ret)
+ if (copy_to_user(buf, &perf->buf[perf->bufpos], n)) {
+ ret = -EFAULT;
goto out;
+ }
perf->bufpos += n;
*ppos += n;
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 0857710c2..3a5fdfcd6 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -27,6 +27,11 @@
* This bypasses drm_debugfs_create_files() mainly because we need to use
* our own fops for a bit more control. In particular, we don't want to
* do anything if userspace doesn't have the debugfs file open.
+ *
+ * The module-param "rd_full", which defaults to false, enables snapshotting
+ * all (non-written) buffers in the submit, rather than just cmdstream bo's.
+ * This is useful to capture the contents of (for example) vbo's or textures,
+ * or shader programs (if not emitted inline in cmdstream).
*/
#ifdef CONFIG_DEBUG_FS
@@ -40,6 +45,10 @@
#include "msm_gpu.h"
#include "msm_gem.h"
+static bool rd_full = false;
+MODULE_PARM_DESC(rd_full, "If true, $debugfs/.../rd will snapshot all buffer contents");
+module_param_named(rd_full, rd_full, bool, 0600);
+
enum rd_sect_type {
RD_NONE,
RD_TEST, /* ascii text */
@@ -140,9 +149,10 @@ static ssize_t rd_read(struct file *file, char __user *buf,
goto out;
n = min_t(int, sz, circ_count_to_end(&rd->fifo));
- ret = copy_to_user(buf, fptr, n);
- if (ret)
+ if (copy_to_user(buf, fptr, n)) {
+ ret = -EFAULT;
goto out;
+ }
fifo->tail = (fifo->tail + n) & (BUF_SZ - 1);
*ppos += n;
@@ -277,6 +287,31 @@ void msm_rd_debugfs_cleanup(struct drm_minor *minor)
kfree(rd);
}
+static void snapshot_buf(struct msm_rd_state *rd,
+ struct msm_gem_submit *submit, int idx,
+ uint32_t iova, uint32_t size)
+{
+ struct msm_gem_object *obj = submit->bos[idx].obj;
+ const char *buf;
+
+ buf = msm_gem_get_vaddr_locked(&obj->base);
+ if (IS_ERR(buf))
+ return;
+
+ if (iova) {
+ buf += iova - submit->bos[idx].iova;
+ } else {
+ iova = submit->bos[idx].iova;
+ size = obj->base.size;
+ }
+
+ rd_write_section(rd, RD_GPUADDR,
+ (uint32_t[2]){ iova, size }, 8);
+ rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size);
+
+ msm_gem_put_vaddr_locked(&obj->base);
+}
+
/* called under struct_mutex */
void msm_rd_dump_submit(struct msm_gem_submit *submit)
{
@@ -300,27 +335,27 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
- /* could be nice to have an option (module-param?) to snapshot
- * all the bo's associated with the submit. Handy to see vtx
- * buffers, etc. For now just the cmdstream bo's is enough.
- */
+ if (rd_full) {
+ for (i = 0; i < submit->nr_bos; i++) {
+ /* buffers that are written to probably don't start out
+ * with anything interesting:
+ */
+ if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
+ continue;
+
+ snapshot_buf(rd, submit, i, 0, 0);
+ }
+ }
for (i = 0; i < submit->nr_cmds; i++) {
- uint32_t idx = submit->cmd[i].idx;
uint32_t iova = submit->cmd[i].iova;
uint32_t szd = submit->cmd[i].size; /* in dwords */
- struct msm_gem_object *obj = submit->bos[idx].obj;
- const char *buf = msm_gem_vaddr_locked(&obj->base);
-
- if (IS_ERR(buf))
- continue;
- buf += iova - submit->bos[idx].iova;
-
- rd_write_section(rd, RD_GPUADDR,
- (uint32_t[2]){ iova, szd * 4 }, 8);
- rd_write_section(rd, RD_BUFFER_CONTENTS,
- buf, szd * 4);
+ /* snapshot cmdstream bo's (if we haven't already): */
+ if (!rd_full) {
+ snapshot_buf(rd, submit, submit->cmd[i].idx,
+ submit->cmd[i].iova, szd * 4);
+ }
switch (submit->cmd[i].type) {
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 42f5359cf..f326cf6a3 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -39,7 +39,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
goto fail;
}
- ring->start = msm_gem_vaddr_locked(ring->bo);
+ ring->start = msm_gem_get_vaddr_locked(ring->bo);
if (IS_ERR(ring->start)) {
ret = PTR_ERR(ring->start);
goto fail;
@@ -59,7 +59,9 @@ fail:
void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
{
- if (ring->bo)
+ if (ring->bo) {
+ msm_gem_put_vaddr(ring->bo);
drm_gem_object_unreference_unlocked(ring->bo);
+ }
kfree(ring);
}
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 5ab13e793..2922a82cb 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -3,13 +3,7 @@ config DRM_NOUVEAU
depends on DRM && PCI
select FW_LOADER
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_TTM
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- select FB
- select FRAMEBUFFER_CONSOLE if !EXPERT
select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && INPUT
select X86_PLATFORM_DEVICES if ACPI && X86
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 6f318c54d..0cb7a18cd 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -785,14 +785,14 @@ nv_crtc_disable(struct drm_crtc *crtc)
nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]);
}
-static void
-nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start,
+static int
+nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
uint32_t size)
{
- int end = (start + size > 256) ? 256 : start + size, i;
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ int i;
- for (i = start; i < end; i++) {
+ for (i = 0; i < size; i++) {
nv_crtc->lut.r[i] = r[i];
nv_crtc->lut.g[i] = g[i];
nv_crtc->lut.b[i] = b[i];
@@ -805,10 +805,12 @@ nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start,
*/
if (!nv_crtc->base.primary->fb) {
nv_crtc->lut.depth = 0;
- return;
+ return 0;
}
nv_crtc_gamma_load(crtc);
+
+ return 0;
}
static int
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index aea81a547..34c0f2f67 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -125,18 +125,8 @@ nv04_display_destroy(struct drm_device *dev)
struct nv04_display *disp = nv04_display(dev);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_encoder *encoder;
- struct drm_crtc *crtc;
struct nouveau_crtc *nv_crtc;
- /* Turn every CRTC off. */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct drm_mode_set modeset = {
- .crtc = crtc,
- };
-
- drm_mode_set_config_internal(&modeset);
- }
-
/* Restore state */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.base.head)
encoder->enc_restore(&encoder->base.base);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index a665b78b2..434d1e29f 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -749,13 +749,8 @@ static int nv17_tv_set_property(struct drm_encoder *encoder,
/* Disable the crtc to ensure a full modeset is
* performed whenever it's turned on again. */
- if (crtc) {
- struct drm_mode_set modeset = {
- .crtc = crtc,
- };
-
- drm_mode_set_config_internal(&modeset);
- }
+ if (crtc)
+ drm_crtc_force_disable(crtc);
}
return 0;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
index 331620a52..287a7d6fa 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
@@ -29,6 +29,7 @@ struct nv_device_info_v0 {
#define NV_DEVICE_INFO_V0_FERMI 0x07
#define NV_DEVICE_INFO_V0_KEPLER 0x08
#define NV_DEVICE_INFO_V0_MAXWELL 0x09
+#define NV_DEVICE_INFO_V0_PASCAL 0x0a
__u8 family;
__u8 pad06[2];
__u64 ram_size;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index 982aad8fa..e6e953753 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -39,6 +39,7 @@
#define KEPLER_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000a06f
#define KEPLER_CHANNEL_GPFIFO_B /* cla06f.h */ 0x0000a16f
#define MAXWELL_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000b06f
+#define PASCAL_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000c06f
#define NV50_DISP /* cl5070.h */ 0x00005070
#define G82_DISP /* cl5070.h */ 0x00008270
@@ -50,6 +51,8 @@
#define GK110_DISP /* cl5070.h */ 0x00009270
#define GM107_DISP /* cl5070.h */ 0x00009470
#define GM200_DISP /* cl5070.h */ 0x00009570
+#define GP100_DISP /* cl5070.h */ 0x00009770
+#define GP104_DISP /* cl5070.h */ 0x00009870
#define NV31_MPEG 0x00003174
#define G82_MPEG 0x00008274
@@ -86,6 +89,8 @@
#define GK110_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000927d
#define GM107_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000947d
#define GM200_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000957d
+#define GP100_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000977d
+#define GP104_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000987d
#define NV50_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000507e
#define G82_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000827e
@@ -105,6 +110,8 @@
#define MAXWELL_A /* cl9097.h */ 0x0000b097
#define MAXWELL_B /* cl9097.h */ 0x0000b197
+#define PASCAL_A /* cl9097.h */ 0x0000c097
+
#define NV74_BSP 0x000074b0
#define GT212_MSVLD 0x000085b1
@@ -128,6 +135,8 @@
#define FERMI_DMA 0x000090b5
#define KEPLER_DMA_COPY_A 0x0000a0b5
#define MAXWELL_DMA_COPY_A 0x0000b0b5
+#define PASCAL_DMA_COPY_A 0x0000c0b5
+#define PASCAL_DMA_COPY_B 0x0000c1b5
#define FERMI_DECOMPRESS 0x000090b8
@@ -137,6 +146,7 @@
#define KEPLER_COMPUTE_B 0x0000a1c0
#define MAXWELL_COMPUTE_A 0x0000b0c0
#define MAXWELL_COMPUTE_B 0x0000b1c0
+#define PASCAL_COMPUTE_A 0x0000c0c0
#define NV74_CIPHER 0x000074c1
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index 126a85cc8..6bc712f32 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -33,7 +33,10 @@ enum nvkm_devidx {
NVKM_ENGINE_CE0,
NVKM_ENGINE_CE1,
NVKM_ENGINE_CE2,
- NVKM_ENGINE_CE_LAST = NVKM_ENGINE_CE2,
+ NVKM_ENGINE_CE3,
+ NVKM_ENGINE_CE4,
+ NVKM_ENGINE_CE5,
+ NVKM_ENGINE_CE_LAST = NVKM_ENGINE_CE5,
NVKM_ENGINE_CIPHER,
NVKM_ENGINE_DISP,
@@ -50,7 +53,8 @@ enum nvkm_devidx {
NVKM_ENGINE_NVENC0,
NVKM_ENGINE_NVENC1,
- NVKM_ENGINE_NVENC_LAST = NVKM_ENGINE_NVENC1,
+ NVKM_ENGINE_NVENC2,
+ NVKM_ENGINE_NVENC_LAST = NVKM_ENGINE_NVENC2,
NVKM_ENGINE_NVDEC,
NVKM_ENGINE_PM,
@@ -102,6 +106,7 @@ struct nvkm_device {
NV_C0 = 0xc0,
NV_E0 = 0xe0,
GM100 = 0x110,
+ GP100 = 0x130,
} card_type;
u32 chipset;
u8 chiprev;
@@ -136,7 +141,7 @@ struct nvkm_device {
struct nvkm_volt *volt;
struct nvkm_engine *bsp;
- struct nvkm_engine *ce[3];
+ struct nvkm_engine *ce[6];
struct nvkm_engine *cipher;
struct nvkm_disp *disp;
struct nvkm_dma *dma;
@@ -149,7 +154,7 @@ struct nvkm_device {
struct nvkm_engine *mspdec;
struct nvkm_engine *msppp;
struct nvkm_engine *msvld;
- struct nvkm_engine *nvenc[2];
+ struct nvkm_engine *nvenc[3];
struct nvkm_engine *nvdec;
struct nvkm_pm *pm;
struct nvkm_engine *sec;
@@ -206,7 +211,7 @@ struct nvkm_device_chip {
int (*volt )(struct nvkm_device *, int idx, struct nvkm_volt **);
int (*bsp )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*ce[3] )(struct nvkm_device *, int idx, struct nvkm_engine **);
+ int (*ce[6] )(struct nvkm_device *, int idx, struct nvkm_engine **);
int (*cipher )(struct nvkm_device *, int idx, struct nvkm_engine **);
int (*disp )(struct nvkm_device *, int idx, struct nvkm_disp **);
int (*dma )(struct nvkm_device *, int idx, struct nvkm_dma **);
@@ -219,7 +224,7 @@ struct nvkm_device_chip {
int (*mspdec )(struct nvkm_device *, int idx, struct nvkm_engine **);
int (*msppp )(struct nvkm_device *, int idx, struct nvkm_engine **);
int (*msvld )(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*nvenc[2])(struct nvkm_device *, int idx, struct nvkm_engine **);
+ int (*nvenc[3])(struct nvkm_device *, int idx, struct nvkm_engine **);
int (*nvdec )(struct nvkm_device *, int idx, struct nvkm_engine **);
int (*pm )(struct nvkm_device *, int idx, struct nvkm_pm **);
int (*sec )(struct nvkm_device *, int idx, struct nvkm_engine **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
index b5370cb56..e5c9b6268 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
@@ -28,6 +28,7 @@ struct nvkm_device_tegra {
} iommu;
int gpu_speedo;
+ int gpu_speedo_id;
};
struct nvkm_device_tegra_func {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
index 594d719ba..d3d26a1e2 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
@@ -7,4 +7,6 @@ int gf100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
int gk104_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
int gm107_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
int gm200_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gp100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gp104_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
index d4fdce27b..e82049667 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
@@ -32,4 +32,6 @@ int gk104_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
int gk110_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
int gm107_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
int gm200_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gp100_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gp104_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
index 15ddfcf5e..ed92fec52 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
@@ -66,4 +66,5 @@ int gk20a_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
int gm107_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
int gm200_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
int gm20b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int gp100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
index 6515f5810..89cf99307 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
@@ -42,4 +42,5 @@ int gk20a_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gm107_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gm200_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gm20b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gp100_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
index e39a1fea9..a72f32905 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
@@ -7,6 +7,9 @@ struct nvkm_bios {
u32 size;
u8 *data;
+ u32 image0_size;
+ u32 imaged_addr;
+
u32 bmp_offset;
u32 bit_offset;
@@ -22,10 +25,9 @@ struct nvkm_bios {
u8 nvbios_checksum(const u8 *data, int size);
u16 nvbios_findstr(const u8 *data, int size, const char *str, int len);
int nvbios_memcmp(struct nvkm_bios *, u32 addr, const char *, u32 len);
-
-#define nvbios_rd08(b,o) (b)->data[(o)]
-#define nvbios_rd16(b,o) get_unaligned_le16(&(b)->data[(o)])
-#define nvbios_rd32(b,o) get_unaligned_le32(&(b)->data[(o)])
+u8 nvbios_rd08(struct nvkm_bios *, u32 addr);
+u16 nvbios_rd16(struct nvkm_bios *, u32 addr);
+u32 nvbios_rd32(struct nvkm_bios *, u32 addr);
int nvkm_bios_new(struct nvkm_device *, int, struct nvkm_bios **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 0a734fd06..3a410275f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -56,6 +56,8 @@ struct nvkm_fb {
int regions;
} tile;
+ u8 page;
+
struct nvkm_memory *mmu_rd;
struct nvkm_memory *mmu_wr;
};
@@ -91,6 +93,8 @@ int gk104_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gk20a_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gm107_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gm200_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gp100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gp104_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
#include <subdev/bios.h>
#include <subdev/bios/ramcfg.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
index c6b90b654..cd755baf9 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
@@ -38,4 +38,5 @@ int gk104_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
int gk20a_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
int gm107_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
int gm200_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
+int gp100_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
index 2e80682b2..27d25b18d 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
@@ -7,11 +7,14 @@ struct nvkm_mc {
struct nvkm_subdev subdev;
};
-void nvkm_mc_intr(struct nvkm_mc *, bool *handled);
-void nvkm_mc_intr_unarm(struct nvkm_mc *);
-void nvkm_mc_intr_rearm(struct nvkm_mc *);
-void nvkm_mc_reset(struct nvkm_mc *, enum nvkm_devidx);
-void nvkm_mc_unk260(struct nvkm_mc *, u32 data);
+void nvkm_mc_enable(struct nvkm_device *, enum nvkm_devidx);
+void nvkm_mc_disable(struct nvkm_device *, enum nvkm_devidx);
+void nvkm_mc_reset(struct nvkm_device *, enum nvkm_devidx);
+void nvkm_mc_intr(struct nvkm_device *, bool *handled);
+void nvkm_mc_intr_unarm(struct nvkm_device *);
+void nvkm_mc_intr_rearm(struct nvkm_device *);
+void nvkm_mc_intr_mask(struct nvkm_device *, enum nvkm_devidx, bool enable);
+void nvkm_mc_unk260(struct nvkm_device *, u32 data);
int nv04_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int nv11_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
@@ -24,4 +27,5 @@ int gt215_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int gf100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int gk104_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int gk20a_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int gp100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
index ddb913889..e6523e2ce 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
@@ -47,6 +47,7 @@ int g94_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
int gf100_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
int gf106_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
int gk104_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
+int gp100_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
/* pcie functions */
int nvkm_pcie_set_link(struct nvkm_pci *, enum nvkm_pcie_speed, u8 width);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
index c6edd95a5..b04c38c07 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
@@ -43,9 +43,8 @@ struct nvkm_secboot {
const struct nvkm_secboot_func *func;
struct nvkm_subdev subdev;
+ enum nvkm_devidx devidx;
u32 base;
- u32 irq_mask;
- u32 enable_mask;
};
#define nvkm_secboot(p) container_of((p), struct nvkm_secboot, subdev)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h
index 8fb575a92..71ebbfd44 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h
@@ -8,10 +8,11 @@ struct nvkm_top {
struct list_head device;
};
-u32 nvkm_top_reset(struct nvkm_top *, enum nvkm_devidx);
-u32 nvkm_top_intr(struct nvkm_top *, u32 intr, u64 *subdevs);
-enum nvkm_devidx nvkm_top_fault(struct nvkm_top *, int fault);
-enum nvkm_devidx nvkm_top_engine(struct nvkm_top *, int, int *runl, int *engn);
+u32 nvkm_top_reset(struct nvkm_device *, enum nvkm_devidx);
+u32 nvkm_top_intr(struct nvkm_device *, u32 intr, u64 *subdevs);
+u32 nvkm_top_intr_mask(struct nvkm_device *, enum nvkm_devidx);
+enum nvkm_devidx nvkm_top_fault(struct nvkm_device *, int fault);
+enum nvkm_devidx nvkm_top_engine(struct nvkm_device *, int, int *runl, int *engn);
int gk104_top_new(struct nvkm_device *, int, struct nvkm_top **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
index feff55cff..b765f4ffc 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
@@ -12,6 +12,9 @@ struct nvkm_volt {
u32 uv;
u8 vid;
} vid[256];
+
+ u32 max_uv;
+ u32 min_uv;
};
int nvkm_volt_get(struct nvkm_volt *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index eb7de487a..7bd468321 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -100,6 +100,7 @@ nouveau_abi16_swclass(struct nouveau_drm *drm)
case NV_DEVICE_INFO_V0_FERMI:
case NV_DEVICE_INFO_V0_KEPLER:
case NV_DEVICE_INFO_V0_MAXWELL:
+ case NV_DEVICE_INFO_V0_PASCAL:
return NVIF_CLASS_SW_GF100;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index db76b94e6..dc57b628e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -45,6 +45,8 @@
static struct nouveau_dsm_priv {
bool dsm_detected;
bool optimus_detected;
+ bool optimus_flags_detected;
+ bool optimus_skip_dsm;
acpi_handle dhandle;
acpi_handle rom_handle;
} nouveau_dsm_priv;
@@ -57,9 +59,6 @@ bool nouveau_is_v1_dsm(void) {
return nouveau_dsm_priv.dsm_detected;
}
-#define NOUVEAU_DSM_HAS_MUX 0x1
-#define NOUVEAU_DSM_HAS_OPT 0x2
-
#ifdef CONFIG_VGA_SWITCHEROO
static const char nouveau_dsm_muid[] = {
0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D,
@@ -110,7 +109,7 @@ static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t *
* requirements on the fourth parameter, so a private implementation
* instead of using acpi_check_dsm().
*/
-static int nouveau_check_optimus_dsm(acpi_handle handle)
+static int nouveau_dsm_get_optimus_functions(acpi_handle handle)
{
int result;
@@ -125,7 +124,9 @@ static int nouveau_check_optimus_dsm(acpi_handle handle)
* ACPI Spec v4 9.14.1: if bit 0 is zero, no function is supported.
* If the n-th bit is enabled, function n is supported
*/
- return result & 1 && result & (1 << NOUVEAU_DSM_OPTIMUS_CAPS);
+ if (result & 1 && result & (1 << NOUVEAU_DSM_OPTIMUS_CAPS))
+ return result;
+ return 0;
}
static int nouveau_dsm(acpi_handle handle, int func, int arg)
@@ -212,26 +213,66 @@ static const struct vga_switcheroo_handler nouveau_dsm_handler = {
.get_client_id = nouveau_dsm_get_client_id,
};
-static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
+/*
+ * Firmware supporting Windows 8 or later do not use _DSM to put the device into
+ * D3cold, they instead rely on disabling power resources on the parent.
+ */
+static bool nouveau_pr3_present(struct pci_dev *pdev)
+{
+ struct pci_dev *parent_pdev = pci_upstream_bridge(pdev);
+ struct acpi_device *parent_adev;
+
+ if (!parent_pdev)
+ return false;
+
+ if (!parent_pdev->bridge_d3) {
+ /*
+ * Parent PCI bridge is currently not power managed.
+ * Since userspace can change these afterwards to be on
+ * the safe side we stick with _DSM and prevent usage of
+ * _PR3 from the bridge.
+ */
+ pci_d3cold_disable(pdev);
+ return false;
+ }
+
+ parent_adev = ACPI_COMPANION(&parent_pdev->dev);
+ if (!parent_adev)
+ return false;
+
+ return acpi_has_method(parent_adev->handle, "_PR3");
+}
+
+static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out,
+ bool *has_mux, bool *has_opt,
+ bool *has_opt_flags, bool *has_pr3)
{
acpi_handle dhandle;
- int retval = 0;
+ bool supports_mux;
+ int optimus_funcs;
dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)
- return false;
+ return;
if (!acpi_has_method(dhandle, "_DSM"))
- return false;
+ return;
- if (acpi_check_dsm(dhandle, nouveau_dsm_muid, 0x00000102,
- 1 << NOUVEAU_DSM_POWER))
- retval |= NOUVEAU_DSM_HAS_MUX;
+ supports_mux = acpi_check_dsm(dhandle, nouveau_dsm_muid, 0x00000102,
+ 1 << NOUVEAU_DSM_POWER);
+ optimus_funcs = nouveau_dsm_get_optimus_functions(dhandle);
- if (nouveau_check_optimus_dsm(dhandle))
- retval |= NOUVEAU_DSM_HAS_OPT;
+ /* Does not look like a Nvidia device. */
+ if (!supports_mux && !optimus_funcs)
+ return;
- if (retval & NOUVEAU_DSM_HAS_OPT) {
+ *dhandle_out = dhandle;
+ *has_mux = supports_mux;
+ *has_opt = !!optimus_funcs;
+ *has_opt_flags = optimus_funcs & (1 << NOUVEAU_DSM_OPTIMUS_FLAGS);
+ *has_pr3 = false;
+
+ if (optimus_funcs) {
uint32_t result;
nouveau_optimus_dsm(dhandle, NOUVEAU_DSM_OPTIMUS_CAPS, 0,
&result);
@@ -239,11 +280,9 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
(result & OPTIMUS_ENABLED) ? "enabled" : "disabled",
(result & OPTIMUS_DYNAMIC_PWR_CAP) ? "dynamic power, " : "",
(result & OPTIMUS_HDA_CODEC_MASK) ? "hda bios codec supported" : "");
- }
- if (retval)
- nouveau_dsm_priv.dhandle = dhandle;
- return retval;
+ *has_pr3 = nouveau_pr3_present(pdev);
+ }
}
static bool nouveau_dsm_detect(void)
@@ -251,11 +290,13 @@ static bool nouveau_dsm_detect(void)
char acpi_method_name[255] = { 0 };
struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
struct pci_dev *pdev = NULL;
- int has_dsm = 0;
- int has_optimus = 0;
+ acpi_handle dhandle = NULL;
+ bool has_mux = false;
+ bool has_optimus = false;
+ bool has_optimus_flags = false;
+ bool has_power_resources = false;
int vga_count = 0;
bool guid_valid;
- int retval;
bool ret = false;
/* lookup the MXM GUID */
@@ -268,32 +309,32 @@ static bool nouveau_dsm_detect(void)
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
vga_count++;
- retval = nouveau_dsm_pci_probe(pdev);
- if (retval & NOUVEAU_DSM_HAS_MUX)
- has_dsm |= 1;
- if (retval & NOUVEAU_DSM_HAS_OPT)
- has_optimus = 1;
+ nouveau_dsm_pci_probe(pdev, &dhandle, &has_mux, &has_optimus,
+ &has_optimus_flags, &has_power_resources);
}
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_3D << 8, pdev)) != NULL) {
vga_count++;
- retval = nouveau_dsm_pci_probe(pdev);
- if (retval & NOUVEAU_DSM_HAS_MUX)
- has_dsm |= 1;
- if (retval & NOUVEAU_DSM_HAS_OPT)
- has_optimus = 1;
+ nouveau_dsm_pci_probe(pdev, &dhandle, &has_mux, &has_optimus,
+ &has_optimus_flags, &has_power_resources);
}
/* find the optimus DSM or the old v1 DSM */
- if (has_optimus == 1) {
+ if (has_optimus) {
+ nouveau_dsm_priv.dhandle = dhandle;
acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
&buffer);
printk(KERN_INFO "VGA switcheroo: detected Optimus DSM method %s handle\n",
acpi_method_name);
+ if (has_power_resources)
+ pr_info("nouveau: detected PR support, will not use DSM\n");
nouveau_dsm_priv.optimus_detected = true;
+ nouveau_dsm_priv.optimus_flags_detected = has_optimus_flags;
+ nouveau_dsm_priv.optimus_skip_dsm = has_power_resources;
ret = true;
- } else if (vga_count == 2 && has_dsm && guid_valid) {
+ } else if (vga_count == 2 && has_mux && guid_valid) {
+ nouveau_dsm_priv.dhandle = dhandle;
acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
&buffer);
printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
@@ -321,11 +362,12 @@ void nouveau_register_dsm_handler(void)
void nouveau_switcheroo_optimus_dsm(void)
{
u32 result = 0;
- if (!nouveau_dsm_priv.optimus_detected)
+ if (!nouveau_dsm_priv.optimus_detected || nouveau_dsm_priv.optimus_skip_dsm)
return;
- nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_FLAGS,
- 0x3, &result);
+ if (nouveau_dsm_priv.optimus_flags_detected)
+ nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_FLAGS,
+ 0x3, &result);
nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_CAPS,
NOUVEAU_DSM_OPTIMUS_SET_POWERDOWN, &result);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 5e3f3e826..864323b19 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -424,13 +424,7 @@ nouveau_bo_map(struct nouveau_bo *nvbo)
if (ret)
return ret;
- /*
- * TTM buffers allocated using the DMA API already have a mapping, let's
- * use it instead.
- */
- if (!nvbo->force_coherent)
- ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
- &nvbo->kmap);
+ ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
ttm_bo_unreserve(&nvbo->bo);
return ret;
@@ -442,12 +436,7 @@ nouveau_bo_unmap(struct nouveau_bo *nvbo)
if (!nvbo)
return;
- /*
- * TTM buffers allocated using the DMA API already had a coherent
- * mapping which we used, no need to unmap.
- */
- if (!nvbo->force_coherent)
- ttm_bo_kunmap(&nvbo->kmap);
+ ttm_bo_kunmap(&nvbo->kmap);
}
void
@@ -506,35 +495,13 @@ nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
return 0;
}
-static inline void *
-_nouveau_bo_mem_index(struct nouveau_bo *nvbo, unsigned index, void *mem, u8 sz)
-{
- struct ttm_dma_tt *dma_tt;
- u8 *m = mem;
-
- index *= sz;
-
- if (m) {
- /* kmap'd address, return the corresponding offset */
- m += index;
- } else {
- /* DMA-API mapping, lookup the right address */
- dma_tt = (struct ttm_dma_tt *)nvbo->bo.ttm;
- m = dma_tt->cpu_address[index / PAGE_SIZE];
- m += index % PAGE_SIZE;
- }
-
- return m;
-}
-#define nouveau_bo_mem_index(o, i, m) _nouveau_bo_mem_index(o, i, m, sizeof(*m))
-
void
nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
{
bool is_iomem;
u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
- mem = nouveau_bo_mem_index(nvbo, index, mem);
+ mem += index;
if (is_iomem)
iowrite16_native(val, (void __force __iomem *)mem);
@@ -548,7 +515,7 @@ nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
bool is_iomem;
u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
- mem = nouveau_bo_mem_index(nvbo, index, mem);
+ mem += index;
if (is_iomem)
return ioread32_native((void __force __iomem *)mem);
@@ -562,7 +529,7 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
bool is_iomem;
u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
- mem = nouveau_bo_mem_index(nvbo, index, mem);
+ mem += index;
if (is_iomem)
iowrite32_native(val, (void __force __iomem *)mem);
@@ -1082,7 +1049,6 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
ret = ttm_bo_move_accel_cleanup(bo,
&fence->base,
evict,
- no_wait_gpu,
new_mem);
nouveau_fence_unref(&fence);
}
@@ -1104,6 +1070,10 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
struct ttm_mem_reg *, struct ttm_mem_reg *);
int (*init)(struct nouveau_channel *, u32 handle);
} _methods[] = {
+ { "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init },
+ { "GRCE", 0, 0xc1b5, nve0_bo_move_copy, nvc0_bo_move_init },
+ { "COPY", 4, 0xc0b5, nve0_bo_move_copy, nve0_bo_move_init },
+ { "GRCE", 0, 0xc0b5, nve0_bo_move_copy, nvc0_bo_move_init },
{ "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init },
{ "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init },
{ "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
@@ -1182,7 +1152,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
goto out;
- ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
+ ret = ttm_bo_move_ttm(bo, true, intr, no_wait_gpu, new_mem);
out:
ttm_bo_mem_put(bo, &tmp_mem);
return ret;
@@ -1210,7 +1180,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
return ret;
- ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
+ ret = ttm_bo_move_ttm(bo, true, intr, no_wait_gpu, &tmp_mem);
if (ret)
goto out;
@@ -1289,6 +1259,10 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
struct nouveau_drm_tile *new_tile = NULL;
int ret = 0;
+ ret = ttm_bo_wait(bo, intr, no_wait_gpu);
+ if (ret)
+ return ret;
+
if (nvbo->pin_refcnt)
NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
@@ -1324,7 +1298,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
/* Fallback to software copy. */
ret = ttm_bo_wait(bo, intr, no_wait_gpu);
if (ret == 0)
- ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+ ret = ttm_bo_move_memcpy(bo, evict, intr, no_wait_gpu, new_mem);
out:
if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
@@ -1488,14 +1462,6 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
dev = drm->dev;
pdev = device->dev;
- /*
- * Objects matching this condition have been marked as force_coherent,
- * so use the DMA API for them.
- */
- if (!nvxx_device(&drm->device)->func->cpu_coherent &&
- ttm->caching_state == tt_uncached)
- return ttm_dma_populate(ttm_dma, dev->dev);
-
#if IS_ENABLED(CONFIG_AGP)
if (drm->agp.bridge) {
return ttm_agp_tt_populate(ttm);
@@ -1553,16 +1519,6 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
dev = drm->dev;
pdev = device->dev;
- /*
- * Objects matching this condition have been marked as force_coherent,
- * so use the DMA API for them.
- */
- if (!nvxx_device(&drm->device)->func->cpu_coherent &&
- ttm->caching_state == tt_uncached) {
- ttm_dma_unpopulate(ttm_dma, dev->dev);
- return;
- }
-
#if IS_ENABLED(CONFIG_AGP)
if (drm->agp.bridge) {
ttm_agp_tt_unpopulate(ttm);
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index b1d2527c5..f9b3c8111 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -191,7 +191,8 @@ static int
nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
u32 engine, struct nouveau_channel **pchan)
{
- static const u16 oclasses[] = { MAXWELL_CHANNEL_GPFIFO_A,
+ static const u16 oclasses[] = { PASCAL_CHANNEL_GPFIFO_A,
+ MAXWELL_CHANNEL_GPFIFO_A,
KEPLER_CHANNEL_GPFIFO_B,
KEPLER_CHANNEL_GPFIFO_A,
FERMI_CHANNEL_GPFIFO,
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 7c77f960c..afbf557b2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -47,7 +47,7 @@ nouveau_display_vblank_handler(struct nvif_notify *notify)
{
struct nouveau_crtc *nv_crtc =
container_of(notify, typeof(*nv_crtc), vblank);
- drm_handle_vblank(nv_crtc->base.dev, nv_crtc->index);
+ drm_crtc_handle_vblank(&nv_crtc->base);
return NVIF_NOTIFY_KEEP;
}
@@ -495,6 +495,8 @@ nouveau_display_create(struct drm_device *dev)
if (nouveau_modeset != 2 && drm->vbios.dcb.entries) {
static const u16 oclass[] = {
+ GP104_DISP,
+ GP100_DISP,
GM200_DISP,
GM107_DISP,
GK110_DISP,
@@ -554,6 +556,7 @@ nouveau_display_destroy(struct drm_device *dev)
nouveau_display_vblank_fini(dev);
drm_kms_helper_poll_fini(dev);
+ drm_crtc_force_disable_all(dev);
drm_mode_config_cleanup(dev);
if (disp->dtor)
@@ -760,12 +763,11 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
/* Initialize a page flip struct */
*s = (struct nouveau_page_flip_state)
- { { }, event, nouveau_crtc(crtc)->index,
- fb->bits_per_pixel, fb->pitches[0], crtc->x, crtc->y,
+ { { }, event, crtc, fb->bits_per_pixel, fb->pitches[0],
new_bo->bo.offset };
/* Keep vblanks on during flip, for the target crtc of this flip */
- drm_vblank_get(dev, nouveau_crtc(crtc)->index);
+ drm_crtc_vblank_get(crtc);
/* Emit a page flip */
if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
@@ -810,7 +812,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
return 0;
fail_unreserve:
- drm_vblank_put(dev, nouveau_crtc(crtc)->index);
+ drm_crtc_vblank_put(crtc);
ttm_bo_unreserve(&old_bo->bo);
fail_unpin:
mutex_unlock(&cli->mutex);
@@ -842,17 +844,17 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
if (s->event) {
if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
- drm_arm_vblank_event(dev, s->crtc, s->event);
+ drm_crtc_arm_vblank_event(s->crtc, s->event);
} else {
- drm_send_vblank_event(dev, s->crtc, s->event);
+ drm_crtc_send_vblank_event(s->crtc, s->event);
/* Give up ownership of vblank for page-flipped crtc */
- drm_vblank_put(dev, s->crtc);
+ drm_crtc_vblank_put(s->crtc);
}
}
else {
/* Give up ownership of vblank for page-flipped crtc */
- drm_vblank_put(dev, s->crtc);
+ drm_crtc_vblank_put(s->crtc);
}
list_del(&s->head);
@@ -873,9 +875,10 @@ nouveau_flip_complete(struct nvif_notify *notify)
if (!nouveau_finish_page_flip(chan, &state)) {
if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
- nv_set_crtc_base(drm->dev, state.crtc, state.offset +
- state.y * state.pitch +
- state.x * state.bpp / 8);
+ nv_set_crtc_base(drm->dev, drm_crtc_index(state.crtc),
+ state.offset + state.crtc->y *
+ state.pitch + state.crtc->x *
+ state.bpp / 8);
}
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 24273bacd..0420ee861 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -28,7 +28,8 @@ int nouveau_framebuffer_init(struct drm_device *, struct nouveau_framebuffer *,
struct nouveau_page_flip_state {
struct list_head head;
struct drm_pending_vblank_event *event;
- int crtc, bpp, pitch, x, y;
+ struct drm_crtc *crtc;
+ int bpp, pitch;
u64 offset;
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index d6c134b01..66c1280c0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -22,13 +22,11 @@
* Authors: Ben Skeggs
*/
-#include <linux/apple-gmux.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
-#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include "drmP.h"
@@ -200,6 +198,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
case KEPLER_CHANNEL_GPFIFO_A:
case KEPLER_CHANNEL_GPFIFO_B:
case MAXWELL_CHANNEL_GPFIFO_A:
+ case PASCAL_CHANNEL_GPFIFO_A:
ret = nvc0_fence_create(drm);
break;
default:
@@ -315,13 +314,7 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
bool boot = false;
int ret;
- /*
- * apple-gmux is needed on dual GPU MacBook Pro
- * to probe the panel if we're the inactive GPU.
- */
- if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) &&
- apple_gmux_present() && pdev != vga_default_device() &&
- !vga_switcheroo_handler_flags())
+ if (vga_switcheroo_client_probe_defer(pdev))
return -EPROBE_DEFER;
/* We need to check that the chipset is supported before booting
@@ -447,6 +440,11 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
nouveau_vga_init(drm);
if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ if (!nvxx_device(&drm->device)->mmu) {
+ ret = -ENOSYS;
+ goto fail_device;
+ }
+
ret = nvkm_vm_new(nvxx_device(&drm->device), 0, (1ULL << 40),
0x1000, NULL, &drm->client.vm);
if (ret)
@@ -507,7 +505,11 @@ nouveau_drm_unload(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- pm_runtime_get_sync(dev->dev);
+ if (nouveau_runtime_pm != 0) {
+ pm_runtime_get_sync(dev->dev);
+ pm_runtime_forbid(dev->dev);
+ }
+
nouveau_fbcon_fini(dev);
nouveau_accel_fini(drm);
nouveau_hwmon_fini(dev);
@@ -979,7 +981,7 @@ driver_stub = {
.gem_prime_vmap = nouveau_gem_prime_vmap,
.gem_prime_vunmap = nouveau_gem_prime_vunmap,
- .gem_free_object = nouveau_gem_object_del,
+ .gem_free_object_unlocked = nouveau_gem_object_del,
.gem_open_object = nouveau_gem_object_open,
.gem_close_object = nouveau_gem_object_close,
@@ -1087,7 +1089,6 @@ nouveau_drm_init(void)
driver_pci = driver_stub;
driver_pci.set_busid = drm_pci_set_busid;
driver_platform = driver_stub;
- driver_platform.set_busid = drm_platform_set_busid;
nouveau_display_options();
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 2e3a62d38..64c4ce711 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -57,7 +57,8 @@ struct nouveau_fence_priv {
int (*context_new)(struct nouveau_channel *);
void (*context_del)(struct nouveau_channel *);
- u32 contexts, context_base;
+ u32 contexts;
+ u64 context_base;
bool uevent;
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index 1ff4166af..71f764bf4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -535,6 +535,40 @@ static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO,
nouveau_hwmon_get_in0_input, NULL, 0);
static ssize_t
+nouveau_hwmon_get_in0_min(struct device *d,
+ struct device_attribute *a, char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nvkm_volt *volt = nvxx_volt(&drm->device);
+
+ if (!volt || !volt->min_uv)
+ return -ENODEV;
+
+ return sprintf(buf, "%i\n", volt->min_uv / 1000);
+}
+
+static SENSOR_DEVICE_ATTR(in0_min, S_IRUGO,
+ nouveau_hwmon_get_in0_min, NULL, 0);
+
+static ssize_t
+nouveau_hwmon_get_in0_max(struct device *d,
+ struct device_attribute *a, char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nvkm_volt *volt = nvxx_volt(&drm->device);
+
+ if (!volt || !volt->max_uv)
+ return -ENODEV;
+
+ return sprintf(buf, "%i\n", volt->max_uv / 1000);
+}
+
+static SENSOR_DEVICE_ATTR(in0_max, S_IRUGO,
+ nouveau_hwmon_get_in0_max, NULL, 0);
+
+static ssize_t
nouveau_hwmon_get_in0_label(struct device *d,
struct device_attribute *a, char *buf)
{
@@ -594,6 +628,8 @@ static struct attribute *hwmon_pwm_fan_attributes[] = {
static struct attribute *hwmon_in0_attributes[] = {
&sensor_dev_attr_in0_input.dev_attr.attr,
+ &sensor_dev_attr_in0_min.dev_attr.attr,
+ &sensor_dev_attr_in0_max.dev_attr.attr,
&sensor_dev_attr_in0_label.dev_attr.attr,
NULL
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index bcee91497..1825dbc33 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -164,6 +164,7 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
case NV_DEVICE_INFO_V0_FERMI:
case NV_DEVICE_INFO_V0_KEPLER:
case NV_DEVICE_INFO_V0_MAXWELL:
+ case NV_DEVICE_INFO_V0_PASCAL:
node->memtype = (nvbo->tile_flags & 0xff00) >> 8;
break;
default:
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c
index 675e9e077..08f9c6fa0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_usif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_usif.c
@@ -212,7 +212,6 @@ usif_notify_get(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
ntfy->p->base.event = &ntfy->p->e.base;
ntfy->p->base.file_priv = f;
ntfy->p->base.pid = current->pid;
- ntfy->p->base.destroy =(void(*)(struct drm_pending_event *))kfree;
ntfy->p->e.base.type = DRM_NOUVEAU_EVENT_NVIF;
ntfy->p->e.base.length = sizeof(ntfy->p->e.base) + ntfy->reply;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 3ffc2b005..7d0edcbcf 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -297,6 +297,8 @@ nv50_core_create(struct nvif_device *device, struct nvif_object *disp,
.pushbuf = 0xb0007d00,
};
static const s32 oclass[] = {
+ GP104_DISP_CORE_CHANNEL_DMA,
+ GP100_DISP_CORE_CHANNEL_DMA,
GM200_DISP_CORE_CHANNEL_DMA,
GM107_DISP_CORE_CHANNEL_DMA,
GK110_DISP_CORE_CHANNEL_DMA,
@@ -1346,21 +1348,22 @@ nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
return 0;
}
-static void
+static int
nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
- uint32_t start, uint32_t size)
+ uint32_t size)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- u32 end = min_t(u32, start + size, 256);
u32 i;
- for (i = start; i < end; i++) {
+ for (i = 0; i < size; i++) {
nv_crtc->lut.r[i] = r[i];
nv_crtc->lut.g[i] = g[i];
nv_crtc->lut.b[i] = b[i];
}
nv50_crtc_lut_load(crtc);
+
+ return 0;
}
static void
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
index b18557858..19044aba2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
@@ -57,6 +57,9 @@ nvkm_subdev_name[NVKM_SUBDEV_NR] = {
[NVKM_ENGINE_CE0 ] = "ce0",
[NVKM_ENGINE_CE1 ] = "ce1",
[NVKM_ENGINE_CE2 ] = "ce2",
+ [NVKM_ENGINE_CE3 ] = "ce3",
+ [NVKM_ENGINE_CE4 ] = "ce4",
+ [NVKM_ENGINE_CE5 ] = "ce5",
[NVKM_ENGINE_CIPHER ] = "cipher",
[NVKM_ENGINE_DISP ] = "disp",
[NVKM_ENGINE_DMAOBJ ] = "dma",
@@ -71,6 +74,7 @@ nvkm_subdev_name[NVKM_SUBDEV_NR] = {
[NVKM_ENGINE_MSVLD ] = "msvld",
[NVKM_ENGINE_NVENC0 ] = "nvenc0",
[NVKM_ENGINE_NVENC1 ] = "nvenc1",
+ [NVKM_ENGINE_NVENC2 ] = "nvenc2",
[NVKM_ENGINE_NVDEC ] = "nvdec",
[NVKM_ENGINE_PM ] = "pm",
[NVKM_ENGINE_SEC ] = "sec",
@@ -105,7 +109,7 @@ nvkm_subdev_fini(struct nvkm_subdev *subdev, bool suspend)
}
}
- nvkm_mc_reset(device->mc, subdev->index);
+ nvkm_mc_reset(device, subdev->index);
time = ktime_to_us(ktime_get()) - time;
nvkm_trace(subdev, "%s completed in %lldus\n", action, time);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
index 9c19d59b4..a4458a8eb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
@@ -3,3 +3,5 @@ nvkm-y += nvkm/engine/ce/gf100.o
nvkm-y += nvkm/engine/ce/gk104.o
nvkm-y += nvkm/engine/ce/gm107.o
nvkm-y += nvkm/engine/ce/gm200.o
+nvkm-y += nvkm/engine/ce/gp100.o
+nvkm-y += nvkm/engine/ce/gp104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c
new file mode 100644
index 000000000..c7710456b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include <core/enum.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_enum
+gp100_ce_launcherr_report[] = {
+ { 0x0, "NO_ERR" },
+ { 0x1, "2D_LAYER_EXCEEDS_DEPTH" },
+ { 0x2, "INVALID_ALIGNMENT" },
+ { 0x3, "MEM2MEM_RECT_OUT_OF_BOUNDS" },
+ { 0x4, "SRC_LINE_EXCEEDS_PITCH" },
+ { 0x5, "SRC_LINE_EXCEEDS_NEG_PITCH" },
+ { 0x6, "DST_LINE_EXCEEDS_PITCH" },
+ { 0x7, "DST_LINE_EXCEEDS_NEG_PITCH" },
+ { 0x8, "BAD_SRC_PIXEL_COMP_REF" },
+ { 0x9, "INVALID_VALUE" },
+ { 0xa, "UNUSED_FIELD" },
+ { 0xb, "INVALID_OPERATION" },
+ { 0xc, "NO_RESOURCES" },
+ { 0xd, "INVALID_CONFIG" },
+ {}
+};
+
+static void
+gp100_ce_intr_launcherr(struct nvkm_engine *ce, const u32 base)
+{
+ struct nvkm_subdev *subdev = &ce->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 stat = nvkm_rd32(device, 0x104418 + base);
+ const struct nvkm_enum *en =
+ nvkm_enum_find(gp100_ce_launcherr_report, stat & 0x0000000f);
+ nvkm_warn(subdev, "LAUNCHERR %08x [%s]\n", stat, en ? en->name : "");
+}
+
+void
+gp100_ce_intr(struct nvkm_engine *ce)
+{
+ const u32 base = (ce->subdev.index - NVKM_ENGINE_CE0) * 0x80;
+ struct nvkm_subdev *subdev = &ce->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 mask = nvkm_rd32(device, 0x10440c + base);
+ u32 intr = nvkm_rd32(device, 0x104410 + base) & mask;
+ if (intr & 0x00000001) { //XXX: guess
+ nvkm_warn(subdev, "BLOCKPIPE\n");
+ nvkm_wr32(device, 0x104410 + base, 0x00000001);
+ intr &= ~0x00000001;
+ }
+ if (intr & 0x00000002) { //XXX: guess
+ nvkm_warn(subdev, "NONBLOCKPIPE\n");
+ nvkm_wr32(device, 0x104410 + base, 0x00000002);
+ intr &= ~0x00000002;
+ }
+ if (intr & 0x00000004) {
+ gp100_ce_intr_launcherr(ce, base);
+ nvkm_wr32(device, 0x104410 + base, 0x00000004);
+ intr &= ~0x00000004;
+ }
+ if (intr) {
+ nvkm_warn(subdev, "intr %08x\n", intr);
+ nvkm_wr32(device, 0x104410 + base, intr);
+ }
+}
+
+static const struct nvkm_engine_func
+gp100_ce = {
+ .intr = gp100_ce_intr,
+ .sclass = {
+ { -1, -1, PASCAL_DMA_COPY_A },
+ {}
+ }
+};
+
+int
+gp100_ce_new(struct nvkm_device *device, int index,
+ struct nvkm_engine **pengine)
+{
+ return nvkm_engine_new_(&gp100_ce, device, index, true, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp104.c
new file mode 100644
index 000000000..20e019788
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp104.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include <core/enum.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+gp104_ce = {
+ .intr = gp100_ce_intr,
+ .sclass = {
+ { -1, -1, PASCAL_DMA_COPY_B },
+ { -1, -1, PASCAL_DMA_COPY_A },
+ {}
+ }
+};
+
+int
+gp104_ce_new(struct nvkm_device *device, int index,
+ struct nvkm_engine **pengine)
+{
+ return nvkm_engine_new_(&gp104_ce, device, index, true, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
index e2fa8b161..2dce40597 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
@@ -4,4 +4,5 @@
void gt215_ce_intr(struct nvkm_falcon *, struct nvkm_fifo_chan *);
void gk104_ce_intr(struct nvkm_engine *);
+void gp100_ce_intr(struct nvkm_engine *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 4572debcb..7218a067a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2148,6 +2148,67 @@ nv12b_chipset = {
.sw = gf100_sw_new,
};
+static const struct nvkm_device_chip
+nv130_chipset = {
+ .name = "GP100",
+ .bar = gf100_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .devinit = gm200_devinit_new,
+ .fb = gp100_fb_new,
+ .fuse = gm107_fuse_new,
+ .gpio = gk104_gpio_new,
+ .i2c = gm200_i2c_new,
+ .ibus = gm200_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gp100_ltc_new,
+ .mc = gp100_mc_new,
+ .mmu = gf100_mmu_new,
+ .secboot = gm200_secboot_new,
+ .pci = gp100_pci_new,
+ .timer = gk20a_timer_new,
+ .top = gk104_top_new,
+ .ce[0] = gp100_ce_new,
+ .ce[1] = gp100_ce_new,
+ .ce[2] = gp100_ce_new,
+ .ce[3] = gp100_ce_new,
+ .ce[4] = gp100_ce_new,
+ .ce[5] = gp100_ce_new,
+ .dma = gf119_dma_new,
+ .disp = gp100_disp_new,
+ .fifo = gp100_fifo_new,
+ .gr = gp100_gr_new,
+ .sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv134_chipset = {
+ .name = "GP104",
+ .bar = gf100_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .devinit = gm200_devinit_new,
+ .fb = gp104_fb_new,
+ .fuse = gm107_fuse_new,
+ .gpio = gk104_gpio_new,
+ .i2c = gm200_i2c_new,
+ .ibus = gm200_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gp100_ltc_new,
+ .mc = gp100_mc_new,
+ .mmu = gf100_mmu_new,
+ .pci = gp100_pci_new,
+ .timer = gk20a_timer_new,
+ .top = gk104_top_new,
+ .ce[0] = gp104_ce_new,
+ .ce[1] = gp104_ce_new,
+ .ce[2] = gp104_ce_new,
+ .ce[3] = gp104_ce_new,
+ .disp = gp104_disp_new,
+ .dma = gf119_dma_new,
+ .fifo = gp100_fifo_new,
+};
+
static int
nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
struct nvkm_notify *notify)
@@ -2221,6 +2282,9 @@ nvkm_device_engine(struct nvkm_device *device, int index)
_(CE0 , device->ce[0] , device->ce[0]);
_(CE1 , device->ce[1] , device->ce[1]);
_(CE2 , device->ce[2] , device->ce[2]);
+ _(CE3 , device->ce[3] , device->ce[3]);
+ _(CE4 , device->ce[4] , device->ce[4]);
+ _(CE5 , device->ce[5] , device->ce[5]);
_(CIPHER , device->cipher , device->cipher);
_(DISP , device->disp , &device->disp->engine);
_(DMAOBJ , device->dma , &device->dma->engine);
@@ -2235,6 +2299,7 @@ nvkm_device_engine(struct nvkm_device *device, int index)
_(MSVLD , device->msvld , device->msvld);
_(NVENC0 , device->nvenc[0], device->nvenc[0]);
_(NVENC1 , device->nvenc[1], device->nvenc[1]);
+ _(NVENC2 , device->nvenc[2], device->nvenc[2]);
_(NVDEC , device->nvdec , device->nvdec);
_(PM , device->pm , &device->pm->engine);
_(SEC , device->sec , device->sec);
@@ -2492,6 +2557,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x100: device->card_type = NV_E0; break;
case 0x110:
case 0x120: device->card_type = GM100; break;
+ case 0x130: device->card_type = GP100; break;
default:
break;
}
@@ -2576,6 +2642,8 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x124: device->chip = &nv124_chipset; break;
case 0x126: device->chip = &nv126_chipset; break;
case 0x12b: device->chip = &nv12b_chipset; break;
+ case 0x130: device->chip = &nv130_chipset; break;
+ case 0x134: device->chip = &nv134_chipset; break;
default:
nvdev_error(device, "unknown chipset (%08x)\n", boot0);
goto done;
@@ -2659,6 +2727,9 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
_(NVKM_ENGINE_CE0 , ce[0]);
_(NVKM_ENGINE_CE1 , ce[1]);
_(NVKM_ENGINE_CE2 , ce[2]);
+ _(NVKM_ENGINE_CE3 , ce[3]);
+ _(NVKM_ENGINE_CE4 , ce[4]);
+ _(NVKM_ENGINE_CE5 , ce[5]);
_(NVKM_ENGINE_CIPHER , cipher);
_(NVKM_ENGINE_DISP , disp);
_(NVKM_ENGINE_DMAOBJ , dma);
@@ -2673,6 +2744,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
_(NVKM_ENGINE_MSVLD , msvld);
_(NVKM_ENGINE_NVENC0 , nvenc[0]);
_(NVKM_ENGINE_NVENC1 , nvenc[1]);
+ _(NVKM_ENGINE_NVENC2 , nvenc[2]);
_(NVKM_ENGINE_NVDEC , nvdec);
_(NVKM_ENGINE_PM , pm);
_(NVKM_ENGINE_SEC , sec);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index ec12efb46..9b638bd90 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -191,13 +191,11 @@ static irqreturn_t
nvkm_device_tegra_intr(int irq, void *arg)
{
struct nvkm_device_tegra *tdev = arg;
- struct nvkm_mc *mc = tdev->device.mc;
+ struct nvkm_device *device = &tdev->device;
bool handled = false;
- if (likely(mc)) {
- nvkm_mc_intr_unarm(mc);
- nvkm_mc_intr(mc, &handled);
- nvkm_mc_intr_rearm(mc);
- }
+ nvkm_mc_intr_unarm(device);
+ nvkm_mc_intr(device, &handled);
+ nvkm_mc_intr_rearm(device);
return handled ? IRQ_HANDLED : IRQ_NONE;
}
@@ -313,6 +311,7 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
goto remove;
tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value;
+ tdev->gpu_speedo_id = tegra_sku_info.gpu_speedo_id;
ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
NVKM_DEVICE_TEGRA, pdev->id, NULL,
cfg, dbg, detect, mmio, subdev_mask,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
index 137066426..79a8f71cf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -102,6 +102,7 @@ nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size)
case NV_C0: args->v0.family = NV_DEVICE_INFO_V0_FERMI; break;
case NV_E0: args->v0.family = NV_DEVICE_INFO_V0_KEPLER; break;
case GM100: args->v0.family = NV_DEVICE_INFO_V0_MAXWELL; break;
+ case GP100: args->v0.family = NV_DEVICE_INFO_V0_PASCAL; break;
default:
args->v0.family = 0;
break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index e2a64ed14..77a52b54a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -10,6 +10,8 @@ nvkm-y += nvkm/engine/disp/gk104.o
nvkm-y += nvkm/engine/disp/gk110.o
nvkm-y += nvkm/engine/disp/gm107.o
nvkm-y += nvkm/engine/disp/gm200.o
+nvkm-y += nvkm/engine/disp/gp100.o
+nvkm-y += nvkm/engine/disp/gp104.o
nvkm-y += nvkm/engine/disp/outp.o
nvkm-y += nvkm/engine/disp/outpdp.o
@@ -45,12 +47,15 @@ nvkm-y += nvkm/engine/disp/rootgk104.o
nvkm-y += nvkm/engine/disp/rootgk110.o
nvkm-y += nvkm/engine/disp/rootgm107.o
nvkm-y += nvkm/engine/disp/rootgm200.o
+nvkm-y += nvkm/engine/disp/rootgp100.o
+nvkm-y += nvkm/engine/disp/rootgp104.o
nvkm-y += nvkm/engine/disp/channv50.o
nvkm-y += nvkm/engine/disp/changf119.o
nvkm-y += nvkm/engine/disp/dmacnv50.o
nvkm-y += nvkm/engine/disp/dmacgf119.o
+nvkm-y += nvkm/engine/disp/dmacgp104.o
nvkm-y += nvkm/engine/disp/basenv50.o
nvkm-y += nvkm/engine/disp/baseg84.o
@@ -59,6 +64,7 @@ nvkm-y += nvkm/engine/disp/basegt215.o
nvkm-y += nvkm/engine/disp/basegf119.o
nvkm-y += nvkm/engine/disp/basegk104.o
nvkm-y += nvkm/engine/disp/basegk110.o
+nvkm-y += nvkm/engine/disp/basegp104.o
nvkm-y += nvkm/engine/disp/corenv50.o
nvkm-y += nvkm/engine/disp/coreg84.o
@@ -70,6 +76,8 @@ nvkm-y += nvkm/engine/disp/coregk104.o
nvkm-y += nvkm/engine/disp/coregk110.o
nvkm-y += nvkm/engine/disp/coregm107.o
nvkm-y += nvkm/engine/disp/coregm200.o
+nvkm-y += nvkm/engine/disp/coregp100.o
+nvkm-y += nvkm/engine/disp/coregp104.o
nvkm-y += nvkm/engine/disp/ovlynv50.o
nvkm-y += nvkm/engine/disp/ovlyg84.o
@@ -77,6 +85,7 @@ nvkm-y += nvkm/engine/disp/ovlygt200.o
nvkm-y += nvkm/engine/disp/ovlygt215.o
nvkm-y += nvkm/engine/disp/ovlygf119.o
nvkm-y += nvkm/engine/disp/ovlygk104.o
+nvkm-y += nvkm/engine/disp/ovlygp104.o
nvkm-y += nvkm/engine/disp/piocnv50.o
nvkm-y += nvkm/engine/disp/piocgf119.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp104.c
new file mode 100644
index 000000000..51688e37c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp104.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gp104_disp_base_oclass = {
+ .base.oclass = GK110_DISP_BASE_CHANNEL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_base_new,
+ .func = &gp104_disp_dmac_func,
+ .mthd = &gf119_disp_base_chan_mthd,
+ .chid = 1,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
index aee374884..f5f683d9f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
@@ -85,6 +85,7 @@ extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_pior;
extern const struct nv50_disp_chan_mthd gf119_disp_base_chan_mthd;
extern const struct nv50_disp_chan_mthd gk104_disp_core_chan_mthd;
+extern const struct nv50_disp_chan_mthd gk104_disp_ovly_chan_mthd;
struct nv50_disp_pioc_oclass {
int (*ctor)(const struct nv50_disp_chan_func *,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c
index 6b1dc703d..21fbf89b6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c
@@ -171,7 +171,7 @@ gf119_disp_core_chan_mthd = {
}
};
-static void
+void
gf119_disp_core_fini(struct nv50_disp_dmac *chan)
{
struct nv50_disp *disp = chan->base.root->disp;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp100.c
new file mode 100644
index 000000000..d5dff6619
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp100.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gp100_disp_core_oclass = {
+ .base.oclass = GP100_DISP_CORE_CHANNEL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_core_new,
+ .func = &gf119_disp_core_func,
+ .mthd = &gk104_disp_core_chan_mthd,
+ .chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp104.c
new file mode 100644
index 000000000..6922f4007
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp104.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+
+static int
+gp104_disp_core_init(struct nv50_disp_dmac *chan)
+{
+ struct nv50_disp *disp = chan->base.root->disp;
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+
+ /* enable error reporting */
+ nvkm_mask(device, 0x6100a0, 0x00000001, 0x00000001);
+
+ /* initialise channel for dma command submission */
+ nvkm_wr32(device, 0x611494, chan->push);
+ nvkm_wr32(device, 0x611498, 0x00010000);
+ nvkm_wr32(device, 0x61149c, 0x00000001);
+ nvkm_mask(device, 0x610490, 0x00000010, 0x00000010);
+ nvkm_wr32(device, 0x640000, 0x00000000);
+ nvkm_wr32(device, 0x610490, 0x01000013);
+
+ /* wait for it to go inactive */
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x610490) & 0x80000000))
+ break;
+ ) < 0) {
+ nvkm_error(subdev, "core init: %08x\n",
+ nvkm_rd32(device, 0x610490));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+const struct nv50_disp_dmac_func
+gp104_disp_core_func = {
+ .init = gp104_disp_core_init,
+ .fini = gf119_disp_core_fini,
+ .bind = gf119_disp_dmac_bind,
+};
+
+const struct nv50_disp_dmac_oclass
+gp104_disp_core_oclass = {
+ .base.oclass = GP104_DISP_CORE_CHANNEL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_core_new,
+ .func = &gp104_disp_core_func,
+ .mthd = &gk104_disp_core_chan_mthd,
+ .chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
index 876b14549..a57f7cef3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
@@ -36,7 +36,7 @@ gf119_disp_dmac_bind(struct nv50_disp_dmac *chan,
chan->base.chid << 27 | 0x00000001);
}
-static void
+void
gf119_disp_dmac_fini(struct nv50_disp_dmac *chan)
{
struct nv50_disp *disp = chan->base.root->disp;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c
new file mode 100644
index 000000000..ad24c2c57
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <subdev/timer.h>
+
+static int
+gp104_disp_dmac_init(struct nv50_disp_dmac *chan)
+{
+ struct nv50_disp *disp = chan->base.root->disp;
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ int chid = chan->base.chid;
+
+ /* enable error reporting */
+ nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+
+ /* initialise channel for dma command submission */
+ nvkm_wr32(device, 0x611494 + (chid * 0x0010), chan->push);
+ nvkm_wr32(device, 0x611498 + (chid * 0x0010), 0x00010000);
+ nvkm_wr32(device, 0x61149c + (chid * 0x0010), 0x00000001);
+ nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
+ nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000);
+ nvkm_wr32(device, 0x610490 + (chid * 0x0010), 0x00000013);
+
+ /* wait for it to go inactive */
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x80000000))
+ break;
+ ) < 0) {
+ nvkm_error(subdev, "ch %d init: %08x\n", chid,
+ nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+const struct nv50_disp_dmac_func
+gp104_disp_dmac_func = {
+ .init = gp104_disp_dmac_init,
+ .fini = gf119_disp_dmac_fini,
+ .bind = gf119_disp_dmac_bind,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
index fc84eb8b5..43ac05857 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
@@ -25,8 +25,12 @@ int nv50_disp_dmac_bind(struct nv50_disp_dmac *, struct nvkm_object *, u32);
extern const struct nv50_disp_dmac_func nv50_disp_core_func;
extern const struct nv50_disp_dmac_func gf119_disp_dmac_func;
+void gf119_disp_dmac_fini(struct nv50_disp_dmac *);
int gf119_disp_dmac_bind(struct nv50_disp_dmac *, struct nvkm_object *, u32);
extern const struct nv50_disp_dmac_func gf119_disp_core_func;
+void gf119_disp_core_fini(struct nv50_disp_dmac *);
+
+extern const struct nv50_disp_dmac_func gp104_disp_dmac_func;
struct nv50_disp_dmac_oclass {
int (*ctor)(const struct nv50_disp_dmac_func *,
@@ -88,4 +92,10 @@ extern const struct nv50_disp_dmac_oclass gk110_disp_base_oclass;
extern const struct nv50_disp_dmac_oclass gm107_disp_core_oclass;
extern const struct nv50_disp_dmac_oclass gm200_disp_core_oclass;
+
+extern const struct nv50_disp_dmac_oclass gp100_disp_core_oclass;
+
+extern const struct nv50_disp_dmac_oclass gp104_disp_core_oclass;
+extern const struct nv50_disp_dmac_oclass gp104_disp_base_oclass;
+extern const struct nv50_disp_dmac_oclass gp104_disp_ovly_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
index 5dd34382f..29e84b241 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
@@ -76,12 +76,10 @@ exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl,
mask |= 0x0001 << or;
mask |= 0x0100 << head;
-
list_for_each_entry(outp, &disp->base.outp, head) {
if ((outp->info.hasht & 0xff) == type &&
(outp->info.hashm & mask) == mask) {
- *data = nvbios_outp_match(bios, outp->info.hasht,
- outp->info.hashm,
+ *data = nvbios_outp_match(bios, outp->info.hasht, mask,
ver, hdr, cnt, len, info);
if (!*data)
return NULL;
@@ -415,7 +413,7 @@ gf119_disp_intr_supervisor(struct work_struct *work)
nvkm_wr32(device, 0x6101d0, 0x80000000);
}
-static void
+void
gf119_disp_intr_error(struct nv50_disp *disp, int chid)
{
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
@@ -463,7 +461,7 @@ gf119_disp_intr(struct nv50_disp *disp)
u32 stat = nvkm_rd32(device, 0x61009c);
int chid = ffs(stat) - 1;
if (chid >= 0)
- gf119_disp_intr_error(disp, chid);
+ disp->func->intr_error(disp, chid);
intr &= ~0x00000002;
}
@@ -507,6 +505,7 @@ gf119_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device,
static const struct nv50_disp_func
gf119_disp = {
.intr = gf119_disp_intr,
+ .intr_error = gf119_disp_intr_error,
.uevent = &gf119_disp_chan_uevent,
.super = gf119_disp_intr_supervisor,
.root = &gf119_disp_root_oclass,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
index a86384b8e..37f145cf3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
@@ -27,6 +27,7 @@
static const struct nv50_disp_func
gk104_disp = {
.intr = gf119_disp_intr,
+ .intr_error = gf119_disp_intr_error,
.uevent = &gf119_disp_chan_uevent,
.super = gf119_disp_intr_supervisor,
.root = &gk104_disp_root_oclass,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
index 0d574c7e5..e14ac9466 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
@@ -27,6 +27,7 @@
static const struct nv50_disp_func
gk110_disp = {
.intr = gf119_disp_intr,
+ .intr_error = gf119_disp_intr_error,
.uevent = &gf119_disp_chan_uevent,
.super = gf119_disp_intr_supervisor,
.root = &gk110_disp_root_oclass,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
index f4b9cf857..2f2437cc5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
@@ -27,6 +27,7 @@
static const struct nv50_disp_func
gm107_disp = {
.intr = gf119_disp_intr,
+ .intr_error = gf119_disp_intr_error,
.uevent = &gf119_disp_chan_uevent,
.super = gf119_disp_intr_supervisor,
.root = &gm107_disp_root_oclass,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
index 67eec8620..9f368d4ee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
@@ -27,6 +27,7 @@
static const struct nv50_disp_func
gm200_disp = {
.intr = gf119_disp_intr,
+ .intr_error = gf119_disp_intr_error,
.uevent = &gf119_disp_chan_uevent,
.super = gf119_disp_intr_supervisor,
.root = &gm200_disp_root_oclass,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
new file mode 100644
index 000000000..4f81bf314
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "nv50.h"
+#include "rootnv50.h"
+
+static const struct nv50_disp_func
+gp100_disp = {
+ .intr = gf119_disp_intr,
+ .intr_error = gf119_disp_intr_error,
+ .uevent = &gf119_disp_chan_uevent,
+ .super = gf119_disp_intr_supervisor,
+ .root = &gp100_disp_root_oclass,
+ .head.vblank_init = gf119_disp_vblank_init,
+ .head.vblank_fini = gf119_disp_vblank_fini,
+ .head.scanoutpos = gf119_disp_root_scanoutpos,
+ .outp.internal.crt = nv50_dac_output_new,
+ .outp.internal.tmds = nv50_sor_output_new,
+ .outp.internal.lvds = nv50_sor_output_new,
+ .outp.internal.dp = gm200_sor_dp_new,
+ .dac.nr = 3,
+ .dac.power = nv50_dac_power,
+ .dac.sense = nv50_dac_sense,
+ .sor.nr = 4,
+ .sor.power = nv50_sor_power,
+ .sor.hda_eld = gf119_hda_eld,
+ .sor.hdmi = gk104_hdmi_ctrl,
+ .sor.magic = gm200_sor_magic,
+};
+
+int
+gp100_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+{
+ return gf119_disp_new_(&gp100_disp, device, index, pdisp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp104.c
new file mode 100644
index 000000000..3bf338033
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp104.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "nv50.h"
+#include "rootnv50.h"
+
+static void
+gp104_disp_intr_error(struct nv50_disp *disp, int chid)
+{
+ struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 mthd = nvkm_rd32(device, 0x6111f0 + (chid * 12));
+ u32 data = nvkm_rd32(device, 0x6111f4 + (chid * 12));
+ u32 unkn = nvkm_rd32(device, 0x6111f8 + (chid * 12));
+
+ nvkm_error(subdev, "chid %d mthd %04x data %08x %08x %08x\n",
+ chid, (mthd & 0x0000ffc), data, mthd, unkn);
+
+ if (chid < ARRAY_SIZE(disp->chan)) {
+ switch (mthd & 0xffc) {
+ case 0x0080:
+ nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
+ break;
+ default:
+ break;
+ }
+ }
+
+ nvkm_wr32(device, 0x61009c, (1 << chid));
+ nvkm_wr32(device, 0x6111f0 + (chid * 12), 0x90000000);
+}
+
+static const struct nv50_disp_func
+gp104_disp = {
+ .intr = gf119_disp_intr,
+ .intr_error = gp104_disp_intr_error,
+ .uevent = &gf119_disp_chan_uevent,
+ .super = gf119_disp_intr_supervisor,
+ .root = &gp104_disp_root_oclass,
+ .head.vblank_init = gf119_disp_vblank_init,
+ .head.vblank_fini = gf119_disp_vblank_fini,
+ .head.scanoutpos = gf119_disp_root_scanoutpos,
+ .outp.internal.crt = nv50_dac_output_new,
+ .outp.internal.tmds = nv50_sor_output_new,
+ .outp.internal.lvds = nv50_sor_output_new,
+ .outp.internal.dp = gm200_sor_dp_new,
+ .dac.nr = 3,
+ .dac.power = nv50_dac_power,
+ .dac.sense = nv50_dac_sense,
+ .sor.nr = 4,
+ .sor.power = nv50_sor_power,
+ .sor.hda_eld = gf119_hda_eld,
+ .sor.hdmi = gk104_hdmi_ctrl,
+ .sor.magic = gm200_sor_magic,
+};
+
+int
+gp104_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+{
+ return gf119_disp_new_(&gp104_disp, device, index, pdisp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index fcb1b0c46..fbb8c7dc1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -32,6 +32,7 @@
#include <subdev/bios/init.h>
#include <subdev/bios/pll.h>
#include <subdev/devinit.h>
+#include <subdev/timer.h>
static const struct nvkm_disp_oclass *
nv50_disp_root_(struct nvkm_disp *base)
@@ -269,8 +270,7 @@ exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl,
list_for_each_entry(outp, &disp->base.outp, head) {
if ((outp->info.hasht & 0xff) == type &&
(outp->info.hashm & mask) == mask) {
- *data = nvbios_outp_match(bios, outp->info.hasht,
- outp->info.hashm,
+ *data = nvbios_outp_match(bios, outp->info.hasht, mask,
ver, hdr, cnt, len, info);
if (!*data)
return NULL;
@@ -426,6 +426,134 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
return outp;
}
+static bool
+nv50_disp_dptmds_war(struct nvkm_device *device)
+{
+ switch (device->chipset) {
+ case 0x94:
+ case 0x96:
+ case 0x98:
+ case 0xaa:
+ case 0xac:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+static bool
+nv50_disp_dptmds_war_needed(struct nv50_disp *disp, struct dcb_output *outp)
+{
+ struct nvkm_device *device = disp->base.engine.subdev.device;
+ const u32 soff = __ffs(outp->or) * 0x800;
+ if (nv50_disp_dptmds_war(device) && outp->type == DCB_OUTPUT_TMDS) {
+ switch (nvkm_rd32(device, 0x614300 + soff) & 0x00030000) {
+ case 0x00000000:
+ case 0x00030000:
+ return true;
+ default:
+ break;
+ }
+ }
+ return false;
+
+}
+
+static void
+nv50_disp_dptmds_war_2(struct nv50_disp *disp, struct dcb_output *outp)
+{
+ struct nvkm_device *device = disp->base.engine.subdev.device;
+ const u32 soff = __ffs(outp->or) * 0x800;
+
+ if (!nv50_disp_dptmds_war_needed(disp, outp))
+ return;
+
+ nvkm_mask(device, 0x00e840, 0x80000000, 0x80000000);
+ nvkm_mask(device, 0x614300 + soff, 0x03000000, 0x03000000);
+ nvkm_mask(device, 0x61c10c + soff, 0x00000001, 0x00000001);
+
+ nvkm_mask(device, 0x61c00c + soff, 0x0f000000, 0x00000000);
+ nvkm_mask(device, 0x61c008 + soff, 0xff000000, 0x14000000);
+ nvkm_usec(device, 400, NVKM_DELAY);
+ nvkm_mask(device, 0x61c008 + soff, 0xff000000, 0x00000000);
+ nvkm_mask(device, 0x61c00c + soff, 0x0f000000, 0x01000000);
+
+ if (nvkm_rd32(device, 0x61c004 + soff) & 0x00000001) {
+ u32 seqctl = nvkm_rd32(device, 0x61c030 + soff);
+ u32 pu_pc = seqctl & 0x0000000f;
+ nvkm_wr32(device, 0x61c040 + soff + pu_pc * 4, 0x1f008000);
+ }
+}
+
+static void
+nv50_disp_dptmds_war_3(struct nv50_disp *disp, struct dcb_output *outp)
+{
+ struct nvkm_device *device = disp->base.engine.subdev.device;
+ const u32 soff = __ffs(outp->or) * 0x800;
+ u32 sorpwr;
+
+ if (!nv50_disp_dptmds_war_needed(disp, outp))
+ return;
+
+ sorpwr = nvkm_rd32(device, 0x61c004 + soff);
+ if (sorpwr & 0x00000001) {
+ u32 seqctl = nvkm_rd32(device, 0x61c030 + soff);
+ u32 pd_pc = (seqctl & 0x00000f00) >> 8;
+ u32 pu_pc = seqctl & 0x0000000f;
+
+ nvkm_wr32(device, 0x61c040 + soff + pd_pc * 4, 0x1f008000);
+
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x61c030 + soff) & 0x10000000))
+ break;
+ );
+ nvkm_mask(device, 0x61c004 + soff, 0x80000001, 0x80000000);
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x61c030 + soff) & 0x10000000))
+ break;
+ );
+
+ nvkm_wr32(device, 0x61c040 + soff + pd_pc * 4, 0x00002000);
+ nvkm_wr32(device, 0x61c040 + soff + pu_pc * 4, 0x1f000000);
+ }
+
+ nvkm_mask(device, 0x61c10c + soff, 0x00000001, 0x00000000);
+ nvkm_mask(device, 0x614300 + soff, 0x03000000, 0x00000000);
+
+ if (sorpwr & 0x00000001) {
+ nvkm_mask(device, 0x61c004 + soff, 0x80000001, 0x80000001);
+ }
+}
+
+static void
+nv50_disp_update_sppll1(struct nv50_disp *disp)
+{
+ struct nvkm_device *device = disp->base.engine.subdev.device;
+ bool used = false;
+ int sor;
+
+ if (!nv50_disp_dptmds_war(device))
+ return;
+
+ for (sor = 0; sor < disp->func->sor.nr; sor++) {
+ u32 clksor = nvkm_rd32(device, 0x614300 + (sor * 0x800));
+ switch (clksor & 0x03000000) {
+ case 0x02000000:
+ case 0x03000000:
+ used = true;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (used)
+ return;
+
+ nvkm_mask(device, 0x00e840, 0x80000000, 0x00000000);
+}
+
static void
nv50_disp_intr_unk10_0(struct nv50_disp *disp, int head)
{
@@ -679,6 +807,8 @@ nv50_disp_intr_unk20_2(struct nv50_disp *disp, int head)
nvkm_mask(device, hreg, 0x0000000f, hval);
nvkm_mask(device, oreg, mask, oval);
+
+ nv50_disp_dptmds_war_2(disp, &outp->info);
}
/* If programming a TMDS output on a SOR that can also be configured for
@@ -720,6 +850,7 @@ nv50_disp_intr_unk40_0(struct nv50_disp *disp, int head)
if (outp->info.location == 0 && outp->info.type == DCB_OUTPUT_TMDS)
nv50_disp_intr_unk40_0_tmds(disp, &outp->info);
+ nv50_disp_dptmds_war_3(disp, &outp->info);
}
void
@@ -767,6 +898,7 @@ nv50_disp_intr_supervisor(struct work_struct *work)
continue;
nv50_disp_intr_unk40_0(disp, head);
}
+ nv50_disp_update_sppll1(disp);
}
nvkm_wr32(device, 0x610030, 0x80000000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
index aecebd871..1e1de6bfe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
@@ -68,6 +68,7 @@ struct nv50_disp_func_outp {
struct nv50_disp_func {
void (*intr)(struct nv50_disp *);
+ void (*intr_error)(struct nv50_disp *, int chid);
const struct nvkm_event_func *uevent;
void (*super)(struct work_struct *);
@@ -114,4 +115,5 @@ void gf119_disp_vblank_init(struct nv50_disp *, int);
void gf119_disp_vblank_fini(struct nv50_disp *, int);
void gf119_disp_intr(struct nv50_disp *);
void gf119_disp_intr_supervisor(struct work_struct *);
+void gf119_disp_intr_error(struct nv50_disp *, int);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c
index 2e2dc0641..2f0220b39 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c
@@ -80,7 +80,7 @@ gk104_disp_ovly_mthd_base = {
}
};
-static const struct nv50_disp_chan_mthd
+const struct nv50_disp_chan_mthd
gk104_disp_ovly_chan_mthd = {
.name = "Overlay",
.addr = 0x001000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp104.c
new file mode 100644
index 000000000..97e2dd2d9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp104.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gp104_disp_ovly_oclass = {
+ .base.oclass = GK104_DISP_OVERLAY_CONTROL_DMA,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = nv50_disp_ovly_new,
+ .func = &gp104_disp_dmac_func,
+ .mthd = &gk104_disp_ovly_chan_mthd,
+ .chid = 5,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c
new file mode 100644
index 000000000..ac8fdd728
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+gp100_disp_root = {
+ .init = gf119_disp_root_init,
+ .fini = gf119_disp_root_fini,
+ .dmac = {
+ &gp100_disp_core_oclass,
+ &gk110_disp_base_oclass,
+ &gk104_disp_ovly_oclass,
+ },
+ .pioc = {
+ &gk104_disp_oimm_oclass,
+ &gk104_disp_curs_oclass,
+ },
+};
+
+static int
+gp100_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ return nv50_disp_root_new_(&gp100_disp_root, disp, oclass,
+ data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+gp100_disp_root_oclass = {
+ .base.oclass = GP100_DISP,
+ .base.minver = -1,
+ .base.maxver = -1,
+ .ctor = gp100_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c
new file mode 100644
index 000000000..8443e04dc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+gp104_disp_root = {
+ .init = gf119_disp_root_init,
+ .fini = gf119_disp_root_fini,
+ .dmac = {
+ &gp104_disp_core_oclass,
+ &gp104_disp_base_oclass,
+ &gp104_disp_ovly_oclass,
+ },
+ .pioc = {
+ &gk104_disp_oimm_oclass,
+ &gk104_disp_curs_oclass,
+ },
+};
+
+static int
+gp104_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ return nv50_disp_root_new_(&gp104_disp_root, disp, oclass,
+ data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+gp104_disp_root_oclass = {
+ .base.oclass = GP104_DISP,
+ .base.minver = -1,
+ .base.maxver = -1,
+ .ctor = gp104_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
index cb449ed8d..ad00f1724 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
@@ -40,4 +40,6 @@ extern const struct nvkm_disp_oclass gk104_disp_root_oclass;
extern const struct nvkm_disp_oclass gk110_disp_root_oclass;
extern const struct nvkm_disp_oclass gm107_disp_root_oclass;
extern const struct nvkm_disp_oclass gm200_disp_root_oclass;
+extern const struct nvkm_disp_oclass gp100_disp_root_oclass;
+extern const struct nvkm_disp_oclass gp104_disp_root_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
index 65e5d291e..98651a43b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
@@ -13,6 +13,7 @@ nvkm-y += nvkm/engine/fifo/gk20a.o
nvkm-y += nvkm/engine/fifo/gm107.o
nvkm-y += nvkm/engine/fifo/gm200.o
nvkm-y += nvkm/engine/fifo/gm20b.o
+nvkm-y += nvkm/engine/fifo/gp100.o
nvkm-y += nvkm/engine/fifo/chan.o
nvkm-y += nvkm/engine/fifo/channv50.o
@@ -31,3 +32,4 @@ nvkm-y += nvkm/engine/fifo/gpfifogf100.o
nvkm-y += nvkm/engine/fifo/gpfifogk104.o
nvkm-y += nvkm/engine/fifo/gpfifogk110.o
nvkm-y += nvkm/engine/fifo/gpfifogm200.o
+nvkm-y += nvkm/engine/fifo/gpfifogp100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
index e06f4d46f..230f64e5f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
@@ -27,4 +27,5 @@ int gk104_fifo_gpfifo_new(struct nvkm_fifo *, const struct nvkm_oclass *,
extern const struct nvkm_fifo_chan_oclass gk104_fifo_gpfifo_oclass;
extern const struct nvkm_fifo_chan_oclass gk110_fifo_gpfifo_oclass;
extern const struct nvkm_fifo_chan_oclass gm200_fifo_gpfifo_oclass;
+extern const struct nvkm_fifo_chan_oclass gp100_fifo_gpfifo_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
index edec30fd3..0a7b6ed5e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
@@ -37,7 +37,10 @@ nv04_fifo_dma_object_dtor(struct nvkm_fifo_chan *base, int cookie)
{
struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem;
+
+ mutex_lock(&chan->fifo->base.engine.subdev.mutex);
nvkm_ramht_remove(imem->ramht, cookie);
+ mutex_unlock(&chan->fifo->base.engine.subdev.mutex);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index 743f3a189..103c0afaa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -329,7 +329,7 @@ gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
}
if (eu == NULL) {
- enum nvkm_devidx engidx = nvkm_top_fault(device->top, unit);
+ enum nvkm_devidx engidx = nvkm_top_fault(device, unit);
if (engidx < NVKM_SUBDEV_NR) {
const char *src = nvkm_subdev_name[engidx];
char *dst = en;
@@ -589,7 +589,6 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
struct gk104_fifo *fifo = gk104_fifo(base);
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- struct nvkm_top *top = device->top;
int engn, runl, pbid, ret, i, j;
enum nvkm_devidx engidx;
u32 *map;
@@ -608,7 +607,7 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
/* Determine runlist configuration from topology device info. */
i = 0;
- while ((int)(engidx = nvkm_top_engine(top, i++, &runl, &engn)) >= 0) {
+ while ((int)(engidx = nvkm_top_engine(device, i++, &runl, &engn)) >= 0) {
/* Determine which PBDMA handles requests for this engine. */
for (j = 0, pbid = -1; j < fifo->pbdma_nr; j++) {
if (map[j] & (1 << runl)) {
@@ -617,8 +616,8 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
}
}
- nvkm_debug(subdev, "engine %2d: runlist %2d pbdma %2d\n",
- engn, runl, pbid);
+ nvkm_debug(subdev, "engine %2d: runlist %2d pbdma %2d (%s)\n",
+ engn, runl, pbid, nvkm_subdev_name[engidx]);
fifo->engine[engn].engine = nvkm_device_engine(device, engidx);
fifo->engine[engn].runl = runl;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
new file mode 100644
index 000000000..eff83f7fb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "gk104.h"
+#include "changk104.h"
+
+static const struct nvkm_enum
+gp100_fifo_fault_engine[] = {
+ { 0x01, "DISPLAY" },
+ { 0x03, "IFB", NULL, NVKM_ENGINE_IFB },
+ { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
+ { 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
+ { 0x06, "HOST0" },
+ { 0x07, "HOST1" },
+ { 0x08, "HOST2" },
+ { 0x09, "HOST3" },
+ { 0x0a, "HOST4" },
+ { 0x0b, "HOST5" },
+ { 0x0c, "HOST6" },
+ { 0x0d, "HOST7" },
+ { 0x0e, "HOST8" },
+ { 0x0f, "HOST9" },
+ { 0x10, "HOST10" },
+ { 0x13, "PERF" },
+ { 0x17, "PMU" },
+ { 0x18, "PTP" },
+ { 0x1f, "PHYSICAL" },
+ {}
+};
+
+static const struct gk104_fifo_func
+gp100_fifo = {
+ .fault.engine = gp100_fifo_fault_engine,
+ .fault.reason = gk104_fifo_fault_reason,
+ .fault.hubclient = gk104_fifo_fault_hubclient,
+ .fault.gpcclient = gk104_fifo_fault_gpcclient,
+ .chan = {
+ &gp100_fifo_gpfifo_oclass,
+ NULL
+ },
+};
+
+int
+gp100_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+ return gk104_fifo_new_(&gp100_fifo, device, index, 4096, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogp100.c
new file mode 100644
index 000000000..1530a9217
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogp100.c
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "changk104.h"
+
+#include <nvif/class.h>
+
+const struct nvkm_fifo_chan_oclass
+gp100_fifo_gpfifo_oclass = {
+ .base.oclass = PASCAL_CHANNEL_GPFIFO_A,
+ .base.minver = 0,
+ .base.maxver = 0,
+ .ctor = gk104_fifo_gpfifo_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
index 290ed0db8..f1c494182 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
@@ -31,6 +31,7 @@ nvkm-y += nvkm/engine/gr/gk20a.o
nvkm-y += nvkm/engine/gr/gm107.o
nvkm-y += nvkm/engine/gr/gm200.o
nvkm-y += nvkm/engine/gr/gm20b.o
+nvkm-y += nvkm/engine/gr/gp100.o
nvkm-y += nvkm/engine/gr/ctxnv40.o
nvkm-y += nvkm/engine/gr/ctxnv50.o
@@ -48,3 +49,4 @@ nvkm-y += nvkm/engine/gr/ctxgk20a.o
nvkm-y += nvkm/engine/gr/ctxgm107.o
nvkm-y += nvkm/engine/gr/ctxgm200.o
nvkm-y += nvkm/engine/gr/ctxgm20b.o
+nvkm-y += nvkm/engine/gr/ctxgp100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index b02d8f50e..bc77eea35 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -1240,7 +1240,7 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
const struct gf100_grctx_func *grctx = gr->func->grctx;
u32 idle_timeout;
- nvkm_mc_unk260(device->mc, 0);
+ nvkm_mc_unk260(device, 0);
gf100_gr_mmio(gr, grctx->hub);
gf100_gr_mmio(gr, grctx->gpc);
@@ -1264,7 +1264,7 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
gf100_gr_icmd(gr, grctx->icmd);
nvkm_wr32(device, 0x404154, idle_timeout);
gf100_gr_mthd(gr, grctx->mthd);
- nvkm_mc_unk260(device->mc, 1);
+ nvkm_mc_unk260(device, 1);
}
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index ac895edce..52048b5a5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -101,6 +101,8 @@ void gm200_grctx_generate_405b60(struct gf100_gr *);
extern const struct gf100_grctx_func gm20b_grctx;
+extern const struct gf100_grctx_func gp100_grctx;
+
/* context init value lists */
extern const struct gf100_gr_pack gf100_grctx_pack_icmd[];
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
index f521de11a..c925ade58 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
@@ -226,7 +226,7 @@ gf117_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
u32 idle_timeout;
int i;
- nvkm_mc_unk260(device->mc, 0);
+ nvkm_mc_unk260(device, 0);
gf100_gr_mmio(gr, grctx->hub);
gf100_gr_mmio(gr, grctx->gpc);
@@ -253,7 +253,7 @@ gf117_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
gf100_gr_icmd(gr, grctx->icmd);
nvkm_wr32(device, 0x404154, idle_timeout);
gf100_gr_mthd(gr, grctx->mthd);
- nvkm_mc_unk260(device->mc, 1);
+ nvkm_mc_unk260(device, 1);
}
const struct gf100_grctx_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
index 9ba337778..c46b3fdf7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
@@ -950,7 +950,7 @@ gk104_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
u32 idle_timeout;
int i;
- nvkm_mc_unk260(device->mc, 0);
+ nvkm_mc_unk260(device, 0);
gf100_gr_mmio(gr, grctx->hub);
gf100_gr_mmio(gr, grctx->gpc);
@@ -979,7 +979,7 @@ gk104_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
gf100_gr_icmd(gr, grctx->icmd);
nvkm_wr32(device, 0x404154, idle_timeout);
gf100_gr_mthd(gr, grctx->mthd);
- nvkm_mc_unk260(device->mc, 1);
+ nvkm_mc_unk260(device, 1);
nvkm_mask(device, 0x418800, 0x00200000, 0x00200000);
nvkm_mask(device, 0x41be10, 0x00800000, 0x00800000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
new file mode 100644
index 000000000..3d1ae7ddf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
@@ -0,0 +1,179 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "ctxgf100.h"
+
+#include <subdev/fb.h>
+
+/*******************************************************************************
+ * PGRAPH context implementation
+ ******************************************************************************/
+
+static void
+gp100_grctx_generate_pagepool(struct gf100_grctx *info)
+{
+ const struct gf100_grctx_func *grctx = info->gr->func->grctx;
+ const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
+ const int s = 8;
+ const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
+ mmio_refn(info, 0x40800c, 0x00000000, s, b);
+ mmio_wr32(info, 0x408010, 0x80000000);
+ mmio_refn(info, 0x419004, 0x00000000, s, b);
+ mmio_wr32(info, 0x419008, 0x00000000);
+}
+
+static void
+gp100_grctx_generate_attrib(struct gf100_grctx *info)
+{
+ struct gf100_gr *gr = info->gr;
+ const struct gf100_grctx_func *grctx = gr->func->grctx;
+ const u32 alpha = grctx->alpha_nr;
+ const u32 attrib = grctx->attrib_nr;
+ const u32 pertpc = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
+ const u32 size = roundup(gr->tpc_total * pertpc, 0x80);
+ const u32 access = NV_MEM_ACCESS_RW;
+ const int s = 12;
+ const int b = mmio_vram(info, size, (1 << s), access);
+ const int max_batches = 0xffff;
+ u32 ao = 0;
+ u32 bo = ao + grctx->alpha_nr_max * gr->tpc_total;
+ int gpc, ppc, n = 0;
+
+ mmio_refn(info, 0x418810, 0x80000000, s, b);
+ mmio_refn(info, 0x419848, 0x10000000, s, b);
+ mmio_refn(info, 0x419c2c, 0x10000000, s, b);
+ mmio_refn(info, 0x419b00, 0x00000000, s, b);
+ mmio_wr32(info, 0x419b04, 0x80000000 | size >> 7);
+ mmio_wr32(info, 0x405830, attrib);
+ mmio_wr32(info, 0x40585c, alpha);
+ mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches);
+
+ for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+ for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++, n++) {
+ const u32 as = alpha * gr->ppc_tpc_nr[gpc][ppc];
+ const u32 bs = attrib * gr->ppc_tpc_nr[gpc][ppc];
+ const u32 u = 0x418ea0 + (n * 0x04);
+ const u32 o = PPC_UNIT(gpc, ppc, 0);
+ if (!(gr->ppc_mask[gpc] & (1 << ppc)))
+ continue;
+ mmio_wr32(info, o + 0xc0, bs);
+ mmio_wr32(info, o + 0xf4, bo);
+ mmio_wr32(info, o + 0xf0, bs);
+ bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc];
+ mmio_wr32(info, o + 0xe4, as);
+ mmio_wr32(info, o + 0xf8, ao);
+ ao += grctx->alpha_nr_max * gr->ppc_tpc_nr[gpc][ppc];
+ mmio_wr32(info, u, bs);
+ }
+ }
+
+ mmio_wr32(info, 0x418eec, 0x00000000);
+ mmio_wr32(info, 0x41befc, 0x00000000);
+}
+
+static void
+gp100_grctx_generate_405b60(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ const u32 dist_nr = DIV_ROUND_UP(gr->tpc_total, 4);
+ u32 dist[TPC_MAX / 4] = {};
+ u32 gpcs[GPC_MAX * 2] = {};
+ u8 tpcnr[GPC_MAX];
+ int tpc, gpc, i;
+
+ memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+
+ /* won't result in the same distribution as the binary driver where
+ * some of the gpcs have more tpcs than others, but this shall do
+ * for the moment. the code for earlier gpus has this issue too.
+ */
+ for (gpc = -1, i = 0; i < gr->tpc_total; i++) {
+ do {
+ gpc = (gpc + 1) % gr->gpc_nr;
+ } while(!tpcnr[gpc]);
+ tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
+
+ dist[i / 4] |= ((gpc << 4) | tpc) << ((i % 4) * 8);
+ gpcs[gpc + (gr->gpc_nr * (tpc / 4))] |= i << (tpc * 8);
+ }
+
+ for (i = 0; i < dist_nr; i++)
+ nvkm_wr32(device, 0x405b60 + (i * 4), dist[i]);
+ for (i = 0; i < gr->gpc_nr * 2; i++)
+ nvkm_wr32(device, 0x405ba0 + (i * 4), gpcs[i]);
+}
+
+static void
+gp100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ const struct gf100_grctx_func *grctx = gr->func->grctx;
+ u32 idle_timeout, tmp;
+ int i;
+
+ gf100_gr_mmio(gr, gr->fuc_sw_ctx);
+
+ idle_timeout = nvkm_mask(device, 0x404154, 0xffffffff, 0x00000000);
+
+ grctx->pagepool(info);
+ grctx->bundle(info);
+ grctx->attrib(info);
+ grctx->unkn(gr);
+
+ gm200_grctx_generate_tpcid(gr);
+ gf100_grctx_generate_r406028(gr);
+ gk104_grctx_generate_r418bb8(gr);
+
+ for (i = 0; i < 8; i++)
+ nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
+ nvkm_wr32(device, 0x406500, 0x00000000);
+
+ nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
+
+ for (tmp = 0, i = 0; i < gr->gpc_nr; i++)
+ tmp |= ((1 << gr->tpc_nr[i]) - 1) << (i * 5);
+ nvkm_wr32(device, 0x4041c4, tmp);
+
+ gp100_grctx_generate_405b60(gr);
+
+ gf100_gr_icmd(gr, gr->fuc_bundle);
+ nvkm_wr32(device, 0x404154, idle_timeout);
+ gf100_gr_mthd(gr, gr->fuc_method);
+}
+
+const struct gf100_grctx_func
+gp100_grctx = {
+ .main = gp100_grctx_generate_main,
+ .unkn = gk104_grctx_generate_unkn,
+ .bundle = gm107_grctx_generate_bundle,
+ .bundle_size = 0x3000,
+ .bundle_min_gpm_fifo_depth = 0x180,
+ .bundle_token_limit = 0x1080,
+ .pagepool = gp100_grctx_generate_pagepool,
+ .pagepool_size = 0x20000,
+ .attrib = gp100_grctx_generate_attrib,
+ .attrib_nr_max = 0x660,
+ .attrib_nr = 0x440,
+ .alpha_nr_max = 0xc00,
+ .alpha_nr = 0x800,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index ae9ab5b1a..157919c78 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1457,24 +1457,30 @@ gf100_gr_init_ctxctl(struct gf100_gr *gr)
struct nvkm_device *device = subdev->device;
struct nvkm_secboot *sb = device->secboot;
int i;
+ int ret = 0;
if (gr->firmware) {
/* load fuc microcode */
- nvkm_mc_unk260(device->mc, 0);
+ nvkm_mc_unk260(device, 0);
/* securely-managed falcons must be reset using secure boot */
if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_FECS))
- nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_FECS);
+ ret = nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_FECS);
else
gf100_gr_init_fw(gr, 0x409000, &gr->fuc409c,
&gr->fuc409d);
+ if (ret)
+ return ret;
+
if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_GPCCS))
- nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_GPCCS);
+ ret = nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_GPCCS);
else
gf100_gr_init_fw(gr, 0x41a000, &gr->fuc41ac,
&gr->fuc41ad);
+ if (ret)
+ return ret;
- nvkm_mc_unk260(device->mc, 1);
+ nvkm_mc_unk260(device, 1);
/* start both of them running */
nvkm_wr32(device, 0x409840, 0xffffffff);
@@ -1576,7 +1582,7 @@ gf100_gr_init_ctxctl(struct gf100_gr *gr)
}
/* load HUB microcode */
- nvkm_mc_unk260(device->mc, 0);
+ nvkm_mc_unk260(device, 0);
nvkm_wr32(device, 0x4091c0, 0x01000000);
for (i = 0; i < gr->func->fecs.ucode->data.size / 4; i++)
nvkm_wr32(device, 0x4091c4, gr->func->fecs.ucode->data.data[i]);
@@ -1599,7 +1605,7 @@ gf100_gr_init_ctxctl(struct gf100_gr *gr)
nvkm_wr32(device, 0x41a188, i >> 6);
nvkm_wr32(device, 0x41a184, gr->func->gpccs.ucode->code.data[i]);
}
- nvkm_mc_unk260(device->mc, 1);
+ nvkm_mc_unk260(device, 1);
/* load register lists */
gf100_gr_init_csdata(gr, grctx->hub, 0x409000, 0x000, 0x000000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index 2b98abdb9..268b8d60f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -292,4 +292,6 @@ extern const struct gf100_gr_init gm107_gr_init_l1c_0[];
extern const struct gf100_gr_init gm107_gr_init_wwdx_0[];
extern const struct gf100_gr_init gm107_gr_init_cbm_0[];
void gm107_gr_init_bios(struct gf100_gr *);
+
+void gm200_gr_init_gpc_mmu(struct gf100_gr *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
index 4ca8ed151..de8b806b8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
@@ -361,6 +361,5 @@ gk20a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
if (ret)
return ret;
-
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
index 4dfa4513b..6435f1257 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
@@ -38,7 +38,7 @@ gm200_gr_rops(struct gf100_gr *gr)
return nvkm_rd32(gr->base.engine.subdev.device, 0x12006c);
}
-static void
+void
gm200_gr_init_gpc_mmu(struct gf100_gr *gr)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
new file mode 100644
index 000000000..26ad79def
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "gf100.h"
+#include "ctxgf100.h"
+
+#include <nvif/class.h>
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+static void
+gp100_gr_init_rop_active_fbps(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ /*XXX: otherwise identical to gm200 aside from mask.. do everywhere? */
+ const u32 fbp_count = nvkm_rd32(device, 0x12006c) & 0x0000000f;
+ nvkm_mask(device, 0x408850, 0x0000000f, fbp_count); /* zrop */
+ nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
+}
+
+static int
+gp100_gr_init(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
+ u32 data[TPC_MAX / 8] = {};
+ u8 tpcnr[GPC_MAX];
+ int gpc, tpc, rop;
+ int i;
+
+ gr->func->init_gpc_mmu(gr);
+
+ gf100_gr_mmio(gr, gr->fuc_sw_nonctx);
+
+ nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
+
+ memset(data, 0x00, sizeof(data));
+ memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+ for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
+ do {
+ gpc = (gpc + 1) % gr->gpc_nr;
+ } while (!tpcnr[gpc]);
+ tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
+
+ data[i / 8] |= tpc << ((i % 8) * 4);
+ }
+
+ nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
+ nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
+ nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
+ nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
+
+ for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
+ gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
+ gr->tpc_total);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
+ }
+
+ nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
+ nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
+ nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804));
+
+ gr->func->init_rop_active_fbps(gr);
+
+ nvkm_wr32(device, 0x400500, 0x00010001);
+ nvkm_wr32(device, 0x400100, 0xffffffff);
+ nvkm_wr32(device, 0x40013c, 0xffffffff);
+ nvkm_wr32(device, 0x400124, 0x00000002);
+ nvkm_wr32(device, 0x409c24, 0x000f0002);
+ nvkm_wr32(device, 0x405848, 0xc0000000);
+ nvkm_mask(device, 0x40584c, 0x00000000, 0x00000001);
+ nvkm_wr32(device, 0x404000, 0xc0000000);
+ nvkm_wr32(device, 0x404600, 0xc0000000);
+ nvkm_wr32(device, 0x408030, 0xc0000000);
+ nvkm_wr32(device, 0x404490, 0xc0000000);
+ nvkm_wr32(device, 0x406018, 0xc0000000);
+ nvkm_wr32(device, 0x407020, 0x40000000);
+ nvkm_wr32(device, 0x405840, 0xc0000000);
+ nvkm_wr32(device, 0x405844, 0x00ffffff);
+ nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
+
+ nvkm_mask(device, 0x419c9c, 0x00010000, 0x00010000);
+ nvkm_mask(device, 0x419c9c, 0x00020000, 0x00020000);
+
+ gr->func->init_ppc_exceptions(gr);
+
+ for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+ for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
+ nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x00000105);
+ }
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
+ nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
+ }
+
+ for (rop = 0; rop < gr->rop_nr; rop++) {
+ nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0x40000000);
+ nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0x40000000);
+ nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
+ nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
+ }
+
+ nvkm_wr32(device, 0x400108, 0xffffffff);
+ nvkm_wr32(device, 0x400138, 0xffffffff);
+ nvkm_wr32(device, 0x400118, 0xffffffff);
+ nvkm_wr32(device, 0x400130, 0xffffffff);
+ nvkm_wr32(device, 0x40011c, 0xffffffff);
+ nvkm_wr32(device, 0x400134, 0xffffffff);
+
+ gf100_gr_zbc_init(gr);
+
+ return gf100_gr_init_ctxctl(gr);
+}
+
+static const struct gf100_gr_func
+gp100_gr = {
+ .init = gp100_gr_init,
+ .init_gpc_mmu = gm200_gr_init_gpc_mmu,
+ .init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
+ .init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
+ .rops = gm200_gr_rops,
+ .ppc_nr = 2,
+ .grctx = &gp100_grctx,
+ .sclass = {
+ { -1, -1, FERMI_TWOD_A },
+ { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+ { -1, -1, PASCAL_A, &gf100_fermi },
+ { -1, -1, PASCAL_COMPUTE_A },
+ {}
+ }
+};
+
+int
+gp100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return gm200_gr_new_(&gp100_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
index e15b9627b..f3c30b2a7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
@@ -26,6 +26,49 @@
#include <subdev/bios.h>
#include <subdev/bios/bmp.h>
#include <subdev/bios/bit.h>
+#include <subdev/bios/image.h>
+
+static bool
+nvbios_addr(struct nvkm_bios *bios, u32 *addr, u8 size)
+{
+ u32 p = *addr;
+
+ if (*addr > bios->image0_size && bios->imaged_addr) {
+ *addr -= bios->image0_size;
+ *addr += bios->imaged_addr;
+ }
+
+ if (unlikely(*addr + size >= bios->size)) {
+ nvkm_error(&bios->subdev, "OOB %d %08x %08x\n", size, p, *addr);
+ return false;
+ }
+
+ return true;
+}
+
+u8
+nvbios_rd08(struct nvkm_bios *bios, u32 addr)
+{
+ if (likely(nvbios_addr(bios, &addr, 1)))
+ return bios->data[addr];
+ return 0x00;
+}
+
+u16
+nvbios_rd16(struct nvkm_bios *bios, u32 addr)
+{
+ if (likely(nvbios_addr(bios, &addr, 2)))
+ return get_unaligned_le16(&bios->data[addr]);
+ return 0x0000;
+}
+
+u32
+nvbios_rd32(struct nvkm_bios *bios, u32 addr)
+{
+ if (likely(nvbios_addr(bios, &addr, 4)))
+ return get_unaligned_le32(&bios->data[addr]);
+ return 0x00000000;
+}
u8
nvbios_checksum(const u8 *data, int size)
@@ -100,8 +143,9 @@ int
nvkm_bios_new(struct nvkm_device *device, int index, struct nvkm_bios **pbios)
{
struct nvkm_bios *bios;
+ struct nvbios_image image;
struct bit_entry bit_i;
- int ret;
+ int ret, idx = 0;
if (!(bios = *pbios = kzalloc(sizeof(*bios), GFP_KERNEL)))
return -ENOMEM;
@@ -111,6 +155,19 @@ nvkm_bios_new(struct nvkm_device *device, int index, struct nvkm_bios **pbios)
if (ret)
return ret;
+ /* Some tables have weird pointers that need adjustment before
+ * they're dereferenced. I'm not entirely sure why...
+ */
+ if (nvbios_image(bios, idx++, &image)) {
+ bios->image0_size = image.size;
+ while (nvbios_image(bios, idx++, &image)) {
+ if (image.type == 0xe0) {
+ bios->imaged_addr = image.base;
+ break;
+ }
+ }
+ }
+
/* detect type of vbios we're dealing with */
bios->bmp_offset = nvbios_findstr(bios->data, bios->size,
"\xff\x7f""NV\0", 5);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
index 053324763..d89e78c4e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
@@ -40,6 +40,7 @@ nvbios_dp_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
case 0x30:
case 0x40:
case 0x41:
+ case 0x42:
*hdr = nvbios_rd08(bios, data + 0x01);
*len = nvbios_rd08(bios, data + 0x02);
*cnt = nvbios_rd08(bios, data + 0x03);
@@ -70,6 +71,7 @@ nvbios_dpout_entry(struct nvkm_bios *bios, u8 idx,
break;
case 0x40:
case 0x41:
+ case 0x42:
*hdr = nvbios_rd08(bios, data + 0x04);
*cnt = 0;
*len = 0;
@@ -109,6 +111,7 @@ nvbios_dpout_parse(struct nvkm_bios *bios, u8 idx,
break;
case 0x40:
case 0x41:
+ case 0x42:
info->flags = nvbios_rd08(bios, data + 0x04);
info->script[0] = nvbios_rd16(bios, data + 0x05);
info->script[1] = nvbios_rd16(bios, data + 0x07);
@@ -180,6 +183,11 @@ nvbios_dpcfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
info->pe = nvbios_rd08(bios, data + 0x02);
info->tx_pu = nvbios_rd08(bios, data + 0x03);
break;
+ case 0x42:
+ info->dc = nvbios_rd08(bios, data + 0x00);
+ info->pe = nvbios_rd08(bios, data + 0x01);
+ info->tx_pu = nvbios_rd08(bios, data + 0x02);
+ break;
default:
data = 0x0000;
break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c
index 74b14cf09..1dbff7aea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c
@@ -68,11 +68,16 @@ nvbios_imagen(struct nvkm_bios *bios, struct nvbios_image *image)
bool
nvbios_image(struct nvkm_bios *bios, int idx, struct nvbios_image *image)
{
+ u32 imaged_addr = bios->imaged_addr;
memset(image, 0x00, sizeof(*image));
+ bios->imaged_addr = 0;
do {
image->base += image->size;
- if (image->last || !nvbios_imagen(bios, image))
+ if (image->last || !nvbios_imagen(bios, image)) {
+ bios->imaged_addr = imaged_addr;
return false;
+ }
} while(idx--);
+ bios->imaged_addr = imaged_addr;
return true;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
index 91a7dc56e..2ca23a915 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
@@ -77,15 +77,17 @@ g84_pll_mapping[] = {
{}
};
-static u16
+static u32
pll_limits_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
struct bit_entry bit_C;
- u16 data = 0x0000;
+ u32 data = 0x0000;
if (!bit_entry(bios, 'C', &bit_C)) {
if (bit_C.version == 1 && bit_C.length >= 10)
data = nvbios_rd16(bios, bit_C.offset + 8);
+ if (bit_C.version == 2 && bit_C.length >= 4)
+ data = nvbios_rd32(bios, bit_C.offset + 0);
if (data) {
*ver = nvbios_rd08(bios, data + 0);
*hdr = nvbios_rd08(bios, data + 1);
@@ -137,12 +139,12 @@ pll_map(struct nvkm_bios *bios)
}
}
-static u16
+static u32
pll_map_reg(struct nvkm_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len)
{
struct pll_mapping *map;
u8 hdr, cnt;
- u16 data;
+ u32 data;
data = pll_limits_table(bios, ver, &hdr, &cnt, len);
if (data && *ver >= 0x30) {
@@ -160,7 +162,7 @@ pll_map_reg(struct nvkm_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len)
map = pll_map(bios);
while (map && map->reg) {
if (map->reg == reg && *ver >= 0x20) {
- u16 addr = (data += hdr);
+ u32 addr = (data += hdr);
*type = map->type;
while (cnt--) {
if (nvbios_rd32(bios, data) == map->reg)
@@ -179,12 +181,12 @@ pll_map_reg(struct nvkm_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len)
return 0x0000;
}
-static u16
+static u32
pll_map_type(struct nvkm_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len)
{
struct pll_mapping *map;
u8 hdr, cnt;
- u16 data;
+ u32 data;
data = pll_limits_table(bios, ver, &hdr, &cnt, len);
if (data && *ver >= 0x30) {
@@ -202,7 +204,7 @@ pll_map_type(struct nvkm_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len)
map = pll_map(bios);
while (map && map->reg) {
if (map->type == type && *ver >= 0x20) {
- u16 addr = (data += hdr);
+ u32 addr = (data += hdr);
*reg = map->reg;
while (cnt--) {
if (nvbios_rd32(bios, data) == map->reg)
@@ -228,7 +230,7 @@ nvbios_pll_parse(struct nvkm_bios *bios, u32 type, struct nvbios_pll *info)
struct nvkm_device *device = subdev->device;
u8 ver, len;
u32 reg = type;
- u16 data;
+ u32 data;
if (type > PLL_MAX) {
reg = type;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c
index c268e5afe..b4a308f3c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c
@@ -26,21 +26,6 @@
#include <subdev/bios/image.h>
#include <subdev/bios/pmu.h>
-static u32
-weirdo_pointer(struct nvkm_bios *bios, u32 data)
-{
- struct nvbios_image image;
- int idx = 0;
- if (nvbios_image(bios, idx++, &image)) {
- data -= image.size;
- while (nvbios_image(bios, idx++, &image)) {
- if (image.type == 0xe0)
- return image.base + data;
- }
- }
- return 0;
-}
-
u32
nvbios_pmuTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
@@ -50,7 +35,7 @@ nvbios_pmuTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
if (!bit_entry(bios, 'p', &bit_p)) {
if (bit_p.version == 2 && bit_p.length >= 4)
data = nvbios_rd32(bios, bit_p.offset + 0x00);
- if ((data = weirdo_pointer(bios, data))) {
+ if (data) {
*ver = nvbios_rd08(bios, data + 0x00); /* maybe? */
*hdr = nvbios_rd08(bios, data + 0x01);
*len = nvbios_rd08(bios, data + 0x02);
@@ -97,8 +82,7 @@ nvbios_pmuRm(struct nvkm_bios *bios, u8 type, struct nvbios_pmuR *info)
u32 data;
memset(info, 0x00, sizeof(*info));
while ((data = nvbios_pmuEp(bios, idx++, &ver, &hdr, &pmuE))) {
- if ( pmuE.type == type &&
- (data = weirdo_pointer(bios, pmuE.data))) {
+ if (pmuE.type == type && (data = pmuE.data)) {
info->init_addr_pmu = nvbios_rd32(bios, data + 0x08);
info->args_addr_pmu = nvbios_rd32(bios, data + 0x0c);
info->boot_addr = data + 0x30;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c
index d0ae74547..b57c370c7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c
@@ -30,11 +30,11 @@ nvbios_rammapTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr,
u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
{
struct bit_entry bit_P;
- u16 rammap = 0x0000;
+ u32 rammap = 0x0000;
if (!bit_entry(bios, 'P', &bit_P)) {
if (bit_P.version == 2)
- rammap = nvbios_rd16(bios, bit_P.offset + 4);
+ rammap = nvbios_rd32(bios, bit_P.offset + 4);
if (rammap) {
*ver = nvbios_rd08(bios, rammap + 0);
@@ -61,7 +61,7 @@ nvbios_rammapEe(struct nvkm_bios *bios, int idx,
u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
u8 snr, ssz;
- u16 rammap = nvbios_rammapTe(bios, ver, hdr, cnt, len, &snr, &ssz);
+ u32 rammap = nvbios_rammapTe(bios, ver, hdr, cnt, len, &snr, &ssz);
if (rammap && idx < *cnt) {
rammap = rammap + *hdr + (idx * (*len + (snr * ssz)));
*hdr = *len;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
index 78c449b41..89d554311 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
@@ -99,7 +99,7 @@ read_div(struct gf100_clk *clk, int doff, u32 dsrc, u32 dctl)
{
struct nvkm_device *device = clk->base.subdev.device;
u32 ssrc = nvkm_rd32(device, dsrc + (doff * 4));
- u32 sctl = nvkm_rd32(device, dctl + (doff * 4));
+ u32 sclk, sctl, sdiv = 2;
switch (ssrc & 0x00000003) {
case 0:
@@ -109,13 +109,21 @@ read_div(struct gf100_clk *clk, int doff, u32 dsrc, u32 dctl)
case 2:
return 100000;
case 3:
- if (sctl & 0x80000000) {
- u32 sclk = read_vco(clk, dsrc + (doff * 4));
- u32 sdiv = (sctl & 0x0000003f) + 2;
- return (sclk * 2) / sdiv;
+ sclk = read_vco(clk, dsrc + (doff * 4));
+
+ /* Memclk has doff of 0 despite its alt. location */
+ if (doff <= 2) {
+ sctl = nvkm_rd32(device, dctl + (doff * 4));
+
+ if (sctl & 0x80000000) {
+ if (ssrc & 0x100)
+ sctl >>= 8;
+
+ sdiv = (sctl & 0x3f) + 2;
+ }
}
- return read_vco(clk, dsrc + (doff * 4));
+ return (sclk * 2) / sdiv;
default:
return 0;
}
@@ -366,11 +374,17 @@ gf100_clk_prog_2(struct gf100_clk *clk, int idx)
if (info->coef) {
nvkm_wr32(device, addr + 0x04, info->coef);
nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001);
+
+ /* Test PLL lock */
+ nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000000);
nvkm_msec(device, 2000,
if (nvkm_rd32(device, addr + 0x00) & 0x00020000)
break;
);
- nvkm_mask(device, addr + 0x00, 0x00020004, 0x00000004);
+ nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000010);
+
+ /* Enable sync mode */
+ nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000004);
}
}
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
index 975c401bc..06bc0d2d6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
@@ -393,11 +393,17 @@ gk104_clk_prog_2(struct gk104_clk *clk, int idx)
if (info->coef) {
nvkm_wr32(device, addr + 0x04, info->coef);
nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001);
+
+ /* Test PLL lock */
+ nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000000);
nvkm_msec(device, 2000,
if (nvkm_rd32(device, addr + 0x00) & 0x00020000)
break;
);
- nvkm_mask(device, addr + 0x00, 0x00020004, 0x00000004);
+ nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000010);
+
+ /* Enable sync mode */
+ nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000004);
}
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
index 5f0ee24e3..218893e3e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
@@ -28,69 +28,6 @@
#include <core/tegra.h>
#include <subdev/timer.h>
-#define KHZ (1000)
-#define MHZ (KHZ * 1000)
-
-#define MASK(w) ((1 << w) - 1)
-
-#define GPCPLL_CFG (SYS_GPCPLL_CFG_BASE + 0)
-#define GPCPLL_CFG_ENABLE BIT(0)
-#define GPCPLL_CFG_IDDQ BIT(1)
-#define GPCPLL_CFG_LOCK_DET_OFF BIT(4)
-#define GPCPLL_CFG_LOCK BIT(17)
-
-#define GPCPLL_COEFF (SYS_GPCPLL_CFG_BASE + 4)
-#define GPCPLL_COEFF_M_SHIFT 0
-#define GPCPLL_COEFF_M_WIDTH 8
-#define GPCPLL_COEFF_N_SHIFT 8
-#define GPCPLL_COEFF_N_WIDTH 8
-#define GPCPLL_COEFF_P_SHIFT 16
-#define GPCPLL_COEFF_P_WIDTH 6
-
-#define GPCPLL_CFG2 (SYS_GPCPLL_CFG_BASE + 0xc)
-#define GPCPLL_CFG2_SETUP2_SHIFT 16
-#define GPCPLL_CFG2_PLL_STEPA_SHIFT 24
-
-#define GPCPLL_CFG3 (SYS_GPCPLL_CFG_BASE + 0x18)
-#define GPCPLL_CFG3_PLL_STEPB_SHIFT 16
-
-#define GPC_BCASE_GPCPLL_CFG_BASE 0x00132800
-#define GPCPLL_NDIV_SLOWDOWN (SYS_GPCPLL_CFG_BASE + 0x1c)
-#define GPCPLL_NDIV_SLOWDOWN_NDIV_LO_SHIFT 0
-#define GPCPLL_NDIV_SLOWDOWN_NDIV_MID_SHIFT 8
-#define GPCPLL_NDIV_SLOWDOWN_STEP_SIZE_LO2MID_SHIFT 16
-#define GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT 22
-#define GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT 31
-
-#define SEL_VCO (SYS_GPCPLL_CFG_BASE + 0x100)
-#define SEL_VCO_GPC2CLK_OUT_SHIFT 0
-
-#define GPC2CLK_OUT (SYS_GPCPLL_CFG_BASE + 0x250)
-#define GPC2CLK_OUT_SDIV14_INDIV4_WIDTH 1
-#define GPC2CLK_OUT_SDIV14_INDIV4_SHIFT 31
-#define GPC2CLK_OUT_SDIV14_INDIV4_MODE 1
-#define GPC2CLK_OUT_VCODIV_WIDTH 6
-#define GPC2CLK_OUT_VCODIV_SHIFT 8
-#define GPC2CLK_OUT_VCODIV1 0
-#define GPC2CLK_OUT_VCODIV_MASK (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << \
- GPC2CLK_OUT_VCODIV_SHIFT)
-#define GPC2CLK_OUT_BYPDIV_WIDTH 6
-#define GPC2CLK_OUT_BYPDIV_SHIFT 0
-#define GPC2CLK_OUT_BYPDIV31 0x3c
-#define GPC2CLK_OUT_INIT_MASK ((MASK(GPC2CLK_OUT_SDIV14_INDIV4_WIDTH) << \
- GPC2CLK_OUT_SDIV14_INDIV4_SHIFT)\
- | (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << GPC2CLK_OUT_VCODIV_SHIFT)\
- | (MASK(GPC2CLK_OUT_BYPDIV_WIDTH) << GPC2CLK_OUT_BYPDIV_SHIFT))
-#define GPC2CLK_OUT_INIT_VAL ((GPC2CLK_OUT_SDIV14_INDIV4_MODE << \
- GPC2CLK_OUT_SDIV14_INDIV4_SHIFT) \
- | (GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT) \
- | (GPC2CLK_OUT_BYPDIV31 << GPC2CLK_OUT_BYPDIV_SHIFT))
-
-#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG (GPC_BCASE_GPCPLL_CFG_BASE + 0xa0)
-#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT 24
-#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK \
- (0x1 << GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT)
-
static const u8 _pl_to_div[] = {
/* PL: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 */
/* p: */ 1, 2, 3, 4, 5, 6, 8, 10, 12, 16, 12, 16, 20, 24, 32,
@@ -124,7 +61,7 @@ static const struct gk20a_clk_pllg_params gk20a_pllg_params = {
.min_pl = 1, .max_pl = 32,
};
-static void
+void
gk20a_pllg_read_mnp(struct gk20a_clk *clk, struct gk20a_pll *pll)
{
struct nvkm_device *device = clk->base.subdev.device;
@@ -136,20 +73,33 @@ gk20a_pllg_read_mnp(struct gk20a_clk *clk, struct gk20a_pll *pll)
pll->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
}
-static u32
-gk20a_pllg_calc_rate(struct gk20a_clk *clk)
+void
+gk20a_pllg_write_mnp(struct gk20a_clk *clk, const struct gk20a_pll *pll)
+{
+ struct nvkm_device *device = clk->base.subdev.device;
+ u32 val;
+
+ val = (pll->m & MASK(GPCPLL_COEFF_M_WIDTH)) << GPCPLL_COEFF_M_SHIFT;
+ val |= (pll->n & MASK(GPCPLL_COEFF_N_WIDTH)) << GPCPLL_COEFF_N_SHIFT;
+ val |= (pll->pl & MASK(GPCPLL_COEFF_P_WIDTH)) << GPCPLL_COEFF_P_SHIFT;
+ nvkm_wr32(device, GPCPLL_COEFF, val);
+}
+
+u32
+gk20a_pllg_calc_rate(struct gk20a_clk *clk, struct gk20a_pll *pll)
{
u32 rate;
u32 divider;
- rate = clk->parent_rate * clk->pll.n;
- divider = clk->pll.m * clk->pl_to_div(clk->pll.pl);
+ rate = clk->parent_rate * pll->n;
+ divider = pll->m * clk->pl_to_div(pll->pl);
return rate / divider / 2;
}
-static int
-gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate)
+int
+gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate,
+ struct gk20a_pll *pll)
{
struct nvkm_subdev *subdev = &clk->base.subdev;
u32 target_clk_f, ref_clk_f, target_freq;
@@ -163,16 +113,13 @@ gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate)
target_clk_f = rate * 2 / KHZ;
ref_clk_f = clk->parent_rate / KHZ;
- max_vco_f = clk->params->max_vco;
+ target_vco_f = target_clk_f + target_clk_f / 50;
+ max_vco_f = max(clk->params->max_vco, target_vco_f);
min_vco_f = clk->params->min_vco;
best_m = clk->params->max_m;
best_n = clk->params->min_n;
best_pl = clk->params->min_pl;
- target_vco_f = target_clk_f + target_clk_f / 50;
- if (max_vco_f < target_vco_f)
- max_vco_f = target_vco_f;
-
/* min_pl <= high_pl <= max_pl */
high_pl = (max_vco_f + target_vco_f - 1) / target_vco_f;
high_pl = min(high_pl, clk->params->max_pl);
@@ -195,9 +142,7 @@ gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate)
target_vco_f = target_clk_f * clk->pl_to_div(pl);
for (m = clk->params->min_m; m <= clk->params->max_m; m++) {
- u32 u_f, vco_f;
-
- u_f = ref_clk_f / m;
+ u32 u_f = ref_clk_f / m;
if (u_f < clk->params->min_u)
break;
@@ -211,6 +156,8 @@ gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate)
break;
for (; n <= n2; n++) {
+ u32 vco_f;
+
if (n < clk->params->min_n)
continue;
if (n > clk->params->max_n)
@@ -247,16 +194,16 @@ found_match:
"no best match for target @ %dMHz on gpc_pll",
target_clk_f / KHZ);
- clk->pll.m = best_m;
- clk->pll.n = best_n;
- clk->pll.pl = best_pl;
+ pll->m = best_m;
+ pll->n = best_n;
+ pll->pl = best_pl;
- target_freq = gk20a_pllg_calc_rate(clk);
+ target_freq = gk20a_pllg_calc_rate(clk, pll);
nvkm_debug(subdev,
- "actual target freq %d MHz, M %d, N %d, PL %d(div%d)\n",
- target_freq / MHZ, clk->pll.m, clk->pll.n, clk->pll.pl,
- clk->pl_to_div(clk->pll.pl));
+ "actual target freq %d KHz, M %d, N %d, PL %d(div%d)\n",
+ target_freq / KHZ, pll->m, pll->n, pll->pl,
+ clk->pl_to_div(pll->pl));
return 0;
}
@@ -265,45 +212,36 @@ gk20a_pllg_slide(struct gk20a_clk *clk, u32 n)
{
struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvkm_device *device = subdev->device;
- u32 val;
- int ramp_timeout;
+ struct gk20a_pll pll;
+ int ret = 0;
/* get old coefficients */
- val = nvkm_rd32(device, GPCPLL_COEFF);
+ gk20a_pllg_read_mnp(clk, &pll);
/* do nothing if NDIV is the same */
- if (n == ((val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH)))
+ if (n == pll.n)
return 0;
- /* setup */
- nvkm_mask(device, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT,
- 0x2b << GPCPLL_CFG2_PLL_STEPA_SHIFT);
- nvkm_mask(device, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT,
- 0xb << GPCPLL_CFG3_PLL_STEPB_SHIFT);
-
/* pll slowdown mode */
nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT),
BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT));
/* new ndiv ready for ramp */
- val = nvkm_rd32(device, GPCPLL_COEFF);
- val &= ~(MASK(GPCPLL_COEFF_N_WIDTH) << GPCPLL_COEFF_N_SHIFT);
- val |= (n & MASK(GPCPLL_COEFF_N_WIDTH)) << GPCPLL_COEFF_N_SHIFT;
+ pll.n = n;
udelay(1);
- nvkm_wr32(device, GPCPLL_COEFF, val);
+ gk20a_pllg_write_mnp(clk, &pll);
/* dynamic ramp to new ndiv */
- val = nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
- val |= 0x1 << GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT;
udelay(1);
- nvkm_wr32(device, GPCPLL_NDIV_SLOWDOWN, val);
+ nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
+ BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT),
+ BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT));
- for (ramp_timeout = 500; ramp_timeout > 0; ramp_timeout--) {
- udelay(1);
- val = nvkm_rd32(device, GPC_BCAST_NDIV_SLOWDOWN_DEBUG);
- if (val & GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK)
- break;
- }
+ /* wait for ramping to complete */
+ if (nvkm_wait_usec(device, 500, GPC_BCAST_NDIV_SLOWDOWN_DEBUG,
+ GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK,
+ GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK) < 0)
+ ret = -ETIMEDOUT;
/* exit slowdown mode */
nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
@@ -311,21 +249,35 @@ gk20a_pllg_slide(struct gk20a_clk *clk, u32 n)
BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0);
nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
- if (ramp_timeout <= 0) {
- nvkm_error(subdev, "gpcpll dynamic ramp timeout\n");
- return -ETIMEDOUT;
- }
-
- return 0;
+ return ret;
}
-static void
+static int
gk20a_pllg_enable(struct gk20a_clk *clk)
{
struct nvkm_device *device = clk->base.subdev.device;
+ u32 val;
nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
nvkm_rd32(device, GPCPLL_CFG);
+
+ /* enable lock detection */
+ val = nvkm_rd32(device, GPCPLL_CFG);
+ if (val & GPCPLL_CFG_LOCK_DET_OFF) {
+ val &= ~GPCPLL_CFG_LOCK_DET_OFF;
+ nvkm_wr32(device, GPCPLL_CFG, val);
+ }
+
+ /* wait for lock */
+ if (nvkm_wait_usec(device, 300, GPCPLL_CFG, GPCPLL_CFG_LOCK,
+ GPCPLL_CFG_LOCK) < 0)
+ return -ETIMEDOUT;
+
+ /* switch to VCO mode */
+ nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT),
+ BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
+
+ return 0;
}
static void
@@ -333,117 +285,81 @@ gk20a_pllg_disable(struct gk20a_clk *clk)
{
struct nvkm_device *device = clk->base.subdev.device;
+ /* put PLL in bypass before disabling it */
+ nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
+
nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
nvkm_rd32(device, GPCPLL_CFG);
}
static int
-_gk20a_pllg_program_mnp(struct gk20a_clk *clk, bool allow_slide)
+gk20a_pllg_program_mnp(struct gk20a_clk *clk, const struct gk20a_pll *pll)
{
struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvkm_device *device = subdev->device;
- u32 val, cfg;
- struct gk20a_pll old_pll;
- u32 n_lo;
-
- /* get old coefficients */
- gk20a_pllg_read_mnp(clk, &old_pll);
-
- /* do NDIV slide if there is no change in M and PL */
- cfg = nvkm_rd32(device, GPCPLL_CFG);
- if (allow_slide && clk->pll.m == old_pll.m &&
- clk->pll.pl == old_pll.pl && (cfg & GPCPLL_CFG_ENABLE)) {
- return gk20a_pllg_slide(clk, clk->pll.n);
- }
-
- /* slide down to NDIV_LO */
- if (allow_slide && (cfg & GPCPLL_CFG_ENABLE)) {
- int ret;
-
- n_lo = DIV_ROUND_UP(old_pll.m * clk->params->min_vco,
- clk->parent_rate / KHZ);
- ret = gk20a_pllg_slide(clk, n_lo);
+ struct gk20a_pll cur_pll;
+ int ret;
- if (ret)
- return ret;
- }
+ gk20a_pllg_read_mnp(clk, &cur_pll);
- /* split FO-to-bypass jump in halfs by setting out divider 1:2 */
+ /* split VCO-to-bypass jump in half by setting out divider 1:2 */
nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
- 0x2 << GPC2CLK_OUT_VCODIV_SHIFT);
-
- /* put PLL in bypass before programming it */
- val = nvkm_rd32(device, SEL_VCO);
- val &= ~(BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
+ GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
+ /* Intentional 2nd write to assure linear divider operation */
+ nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
+ GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
+ nvkm_rd32(device, GPC2CLK_OUT);
udelay(2);
- nvkm_wr32(device, SEL_VCO, val);
-
- /* get out from IDDQ */
- val = nvkm_rd32(device, GPCPLL_CFG);
- if (val & GPCPLL_CFG_IDDQ) {
- val &= ~GPCPLL_CFG_IDDQ;
- nvkm_wr32(device, GPCPLL_CFG, val);
- nvkm_rd32(device, GPCPLL_CFG);
- udelay(2);
- }
gk20a_pllg_disable(clk);
- nvkm_debug(subdev, "%s: m=%d n=%d pl=%d\n", __func__,
- clk->pll.m, clk->pll.n, clk->pll.pl);
-
- n_lo = DIV_ROUND_UP(clk->pll.m * clk->params->min_vco,
- clk->parent_rate / KHZ);
- val = clk->pll.m << GPCPLL_COEFF_M_SHIFT;
- val |= (allow_slide ? n_lo : clk->pll.n) << GPCPLL_COEFF_N_SHIFT;
- val |= clk->pll.pl << GPCPLL_COEFF_P_SHIFT;
- nvkm_wr32(device, GPCPLL_COEFF, val);
+ gk20a_pllg_write_mnp(clk, pll);
- gk20a_pllg_enable(clk);
-
- val = nvkm_rd32(device, GPCPLL_CFG);
- if (val & GPCPLL_CFG_LOCK_DET_OFF) {
- val &= ~GPCPLL_CFG_LOCK_DET_OFF;
- nvkm_wr32(device, GPCPLL_CFG, val);
- }
-
- if (nvkm_usec(device, 300,
- if (nvkm_rd32(device, GPCPLL_CFG) & GPCPLL_CFG_LOCK)
- break;
- ) < 0)
- return -ETIMEDOUT;
-
- /* switch to VCO mode */
- nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT),
- BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
+ ret = gk20a_pllg_enable(clk);
+ if (ret)
+ return ret;
/* restore out divider 1:1 */
- val = nvkm_rd32(device, GPC2CLK_OUT);
- if ((val & GPC2CLK_OUT_VCODIV_MASK) !=
- (GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT)) {
- val &= ~GPC2CLK_OUT_VCODIV_MASK;
- val |= GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT;
- udelay(2);
- nvkm_wr32(device, GPC2CLK_OUT, val);
- /* Intentional 2nd write to assure linear divider operation */
- nvkm_wr32(device, GPC2CLK_OUT, val);
- nvkm_rd32(device, GPC2CLK_OUT);
- }
+ udelay(2);
+ nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
+ GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
+ /* Intentional 2nd write to assure linear divider operation */
+ nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
+ GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
+ nvkm_rd32(device, GPC2CLK_OUT);
- /* slide up to new NDIV */
- return allow_slide ? gk20a_pllg_slide(clk, clk->pll.n) : 0;
+ return 0;
}
static int
-gk20a_pllg_program_mnp(struct gk20a_clk *clk)
+gk20a_pllg_program_mnp_slide(struct gk20a_clk *clk, const struct gk20a_pll *pll)
{
- int err;
+ struct gk20a_pll cur_pll;
+ int ret;
- err = _gk20a_pllg_program_mnp(clk, true);
- if (err)
- err = _gk20a_pllg_program_mnp(clk, false);
+ if (gk20a_pllg_is_enabled(clk)) {
+ gk20a_pllg_read_mnp(clk, &cur_pll);
+
+ /* just do NDIV slide if there is no change to M and PL */
+ if (pll->m == cur_pll.m && pll->pl == cur_pll.pl)
+ return gk20a_pllg_slide(clk, pll->n);
+
+ /* slide down to current NDIV_LO */
+ cur_pll.n = gk20a_pllg_n_lo(clk, &cur_pll);
+ ret = gk20a_pllg_slide(clk, cur_pll.n);
+ if (ret)
+ return ret;
+ }
+
+ /* program MNP with the new clock parameters and new NDIV_LO */
+ cur_pll = *pll;
+ cur_pll.n = gk20a_pllg_n_lo(clk, &cur_pll);
+ ret = gk20a_pllg_program_mnp(clk, &cur_pll);
+ if (ret)
+ return ret;
- return err;
+ /* slide up to new NDIV */
+ return gk20a_pllg_slide(clk, pll->n);
}
static struct nvkm_pstate
@@ -546,13 +462,14 @@ gk20a_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
struct gk20a_clk *clk = gk20a_clk(base);
struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvkm_device *device = subdev->device;
+ struct gk20a_pll pll;
switch (src) {
case nv_clk_src_crystal:
return device->crystal;
case nv_clk_src_gpc:
- gk20a_pllg_read_mnp(clk, &clk->pll);
- return gk20a_pllg_calc_rate(clk) / GK20A_CLK_GPC_MDIV;
+ gk20a_pllg_read_mnp(clk, &pll);
+ return gk20a_pllg_calc_rate(clk, &pll) / GK20A_CLK_GPC_MDIV;
default:
nvkm_error(subdev, "invalid clock source %d\n", src);
return -EINVAL;
@@ -565,15 +482,20 @@ gk20a_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
struct gk20a_clk *clk = gk20a_clk(base);
return gk20a_pllg_calc_mnp(clk, cstate->domain[nv_clk_src_gpc] *
- GK20A_CLK_GPC_MDIV);
+ GK20A_CLK_GPC_MDIV, &clk->pll);
}
int
gk20a_clk_prog(struct nvkm_clk *base)
{
struct gk20a_clk *clk = gk20a_clk(base);
+ int ret;
+
+ ret = gk20a_pllg_program_mnp_slide(clk, &clk->pll);
+ if (ret)
+ ret = gk20a_pllg_program_mnp(clk, &clk->pll);
- return gk20a_pllg_program_mnp(clk);
+ return ret;
}
void
@@ -581,29 +503,62 @@ gk20a_clk_tidy(struct nvkm_clk *base)
{
}
+int
+gk20a_clk_setup_slide(struct gk20a_clk *clk)
+{
+ struct nvkm_subdev *subdev = &clk->base.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 step_a, step_b;
+
+ switch (clk->parent_rate) {
+ case 12000000:
+ case 12800000:
+ case 13000000:
+ step_a = 0x2b;
+ step_b = 0x0b;
+ break;
+ case 19200000:
+ step_a = 0x12;
+ step_b = 0x08;
+ break;
+ case 38400000:
+ step_a = 0x04;
+ step_b = 0x05;
+ break;
+ default:
+ nvkm_error(subdev, "invalid parent clock rate %u KHz",
+ clk->parent_rate / KHZ);
+ return -EINVAL;
+ }
+
+ nvkm_mask(device, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT,
+ step_a << GPCPLL_CFG2_PLL_STEPA_SHIFT);
+ nvkm_mask(device, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT,
+ step_b << GPCPLL_CFG3_PLL_STEPB_SHIFT);
+
+ return 0;
+}
+
void
gk20a_clk_fini(struct nvkm_clk *base)
{
struct nvkm_device *device = base->subdev.device;
struct gk20a_clk *clk = gk20a_clk(base);
- u32 val;
/* slide to VCO min */
- val = nvkm_rd32(device, GPCPLL_CFG);
- if (val & GPCPLL_CFG_ENABLE) {
+ if (gk20a_pllg_is_enabled(clk)) {
struct gk20a_pll pll;
u32 n_lo;
gk20a_pllg_read_mnp(clk, &pll);
- n_lo = DIV_ROUND_UP(pll.m * clk->params->min_vco,
- clk->parent_rate / KHZ);
+ n_lo = gk20a_pllg_n_lo(clk, &pll);
gk20a_pllg_slide(clk, n_lo);
}
- /* put PLL in bypass before disabling it */
- nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
-
gk20a_pllg_disable(clk);
+
+ /* set IDDQ */
+ nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 1);
}
static int
@@ -614,9 +569,18 @@ gk20a_clk_init(struct nvkm_clk *base)
struct nvkm_device *device = subdev->device;
int ret;
+ /* get out from IDDQ */
+ nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 0);
+ nvkm_rd32(device, GPCPLL_CFG);
+ udelay(5);
+
nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK,
GPC2CLK_OUT_INIT_VAL);
+ ret = gk20a_clk_setup_slide(clk);
+ if (ret)
+ return ret;
+
/* Start with lowest frequency */
base->func->calc(base, &base->func->pstates[0].base);
ret = base->func->prog(&clk->base);
@@ -646,7 +610,7 @@ gk20a_clk = {
};
int
-_gk20a_clk_ctor(struct nvkm_device *device, int index,
+gk20a_clk_ctor(struct nvkm_device *device, int index,
const struct nvkm_clk_func *func,
const struct gk20a_clk_pllg_params *params,
struct gk20a_clk *clk)
@@ -685,7 +649,7 @@ gk20a_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
return -ENOMEM;
*pclk = &clk->base;
- ret = _gk20a_clk_ctor(device, index, &gk20a_clk, &gk20a_pllg_params,
+ ret = gk20a_clk_ctor(device, index, &gk20a_clk, &gk20a_pllg_params,
clk);
clk->pl_to_div = pl_to_div;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h
index 13c467401..0d1450972 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h
@@ -24,9 +24,79 @@
#ifndef __NVKM_CLK_GK20A_H__
#define __NVKM_CLK_GK20A_H__
+#define KHZ (1000)
+#define MHZ (KHZ * 1000)
+
+#define MASK(w) ((1 << (w)) - 1)
+
#define GK20A_CLK_GPC_MDIV 1000
#define SYS_GPCPLL_CFG_BASE 0x00137000
+#define GPCPLL_CFG (SYS_GPCPLL_CFG_BASE + 0)
+#define GPCPLL_CFG_ENABLE BIT(0)
+#define GPCPLL_CFG_IDDQ BIT(1)
+#define GPCPLL_CFG_LOCK_DET_OFF BIT(4)
+#define GPCPLL_CFG_LOCK BIT(17)
+
+#define GPCPLL_CFG2 (SYS_GPCPLL_CFG_BASE + 0xc)
+#define GPCPLL_CFG2_SETUP2_SHIFT 16
+#define GPCPLL_CFG2_PLL_STEPA_SHIFT 24
+
+#define GPCPLL_CFG3 (SYS_GPCPLL_CFG_BASE + 0x18)
+#define GPCPLL_CFG3_VCO_CTRL_SHIFT 0
+#define GPCPLL_CFG3_VCO_CTRL_WIDTH 9
+#define GPCPLL_CFG3_VCO_CTRL_MASK \
+ (MASK(GPCPLL_CFG3_VCO_CTRL_WIDTH) << GPCPLL_CFG3_VCO_CTRL_SHIFT)
+#define GPCPLL_CFG3_PLL_STEPB_SHIFT 16
+#define GPCPLL_CFG3_PLL_STEPB_WIDTH 8
+
+#define GPCPLL_COEFF (SYS_GPCPLL_CFG_BASE + 4)
+#define GPCPLL_COEFF_M_SHIFT 0
+#define GPCPLL_COEFF_M_WIDTH 8
+#define GPCPLL_COEFF_N_SHIFT 8
+#define GPCPLL_COEFF_N_WIDTH 8
+#define GPCPLL_COEFF_N_MASK \
+ (MASK(GPCPLL_COEFF_N_WIDTH) << GPCPLL_COEFF_N_SHIFT)
+#define GPCPLL_COEFF_P_SHIFT 16
+#define GPCPLL_COEFF_P_WIDTH 6
+
+#define GPCPLL_NDIV_SLOWDOWN (SYS_GPCPLL_CFG_BASE + 0x1c)
+#define GPCPLL_NDIV_SLOWDOWN_NDIV_LO_SHIFT 0
+#define GPCPLL_NDIV_SLOWDOWN_NDIV_MID_SHIFT 8
+#define GPCPLL_NDIV_SLOWDOWN_STEP_SIZE_LO2MID_SHIFT 16
+#define GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT 22
+#define GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT 31
+
+#define GPC_BCAST_GPCPLL_CFG_BASE 0x00132800
+#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG (GPC_BCAST_GPCPLL_CFG_BASE + 0xa0)
+#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT 24
+#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK \
+ (0x1 << GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT)
+
+#define SEL_VCO (SYS_GPCPLL_CFG_BASE + 0x100)
+#define SEL_VCO_GPC2CLK_OUT_SHIFT 0
+
+#define GPC2CLK_OUT (SYS_GPCPLL_CFG_BASE + 0x250)
+#define GPC2CLK_OUT_SDIV14_INDIV4_WIDTH 1
+#define GPC2CLK_OUT_SDIV14_INDIV4_SHIFT 31
+#define GPC2CLK_OUT_SDIV14_INDIV4_MODE 1
+#define GPC2CLK_OUT_VCODIV_WIDTH 6
+#define GPC2CLK_OUT_VCODIV_SHIFT 8
+#define GPC2CLK_OUT_VCODIV1 0
+#define GPC2CLK_OUT_VCODIV2 2
+#define GPC2CLK_OUT_VCODIV_MASK (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << \
+ GPC2CLK_OUT_VCODIV_SHIFT)
+#define GPC2CLK_OUT_BYPDIV_WIDTH 6
+#define GPC2CLK_OUT_BYPDIV_SHIFT 0
+#define GPC2CLK_OUT_BYPDIV31 0x3c
+#define GPC2CLK_OUT_INIT_MASK ((MASK(GPC2CLK_OUT_SDIV14_INDIV4_WIDTH) << \
+ GPC2CLK_OUT_SDIV14_INDIV4_SHIFT)\
+ | (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << GPC2CLK_OUT_VCODIV_SHIFT)\
+ | (MASK(GPC2CLK_OUT_BYPDIV_WIDTH) << GPC2CLK_OUT_BYPDIV_SHIFT))
+#define GPC2CLK_OUT_INIT_VAL ((GPC2CLK_OUT_SDIV14_INDIV4_MODE << \
+ GPC2CLK_OUT_SDIV14_INDIV4_SHIFT) \
+ | (GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT) \
+ | (GPC2CLK_OUT_BYPDIV31 << GPC2CLK_OUT_BYPDIV_SHIFT))
/* All frequencies in Khz */
struct gk20a_clk_pllg_params {
@@ -54,7 +124,29 @@ struct gk20a_clk {
};
#define gk20a_clk(p) container_of((p), struct gk20a_clk, base)
-int _gk20a_clk_ctor(struct nvkm_device *, int, const struct nvkm_clk_func *,
+u32 gk20a_pllg_calc_rate(struct gk20a_clk *, struct gk20a_pll *);
+int gk20a_pllg_calc_mnp(struct gk20a_clk *, unsigned long, struct gk20a_pll *);
+void gk20a_pllg_read_mnp(struct gk20a_clk *, struct gk20a_pll *);
+void gk20a_pllg_write_mnp(struct gk20a_clk *, const struct gk20a_pll *);
+
+static inline bool
+gk20a_pllg_is_enabled(struct gk20a_clk *clk)
+{
+ struct nvkm_device *device = clk->base.subdev.device;
+ u32 val;
+
+ val = nvkm_rd32(device, GPCPLL_CFG);
+ return val & GPCPLL_CFG_ENABLE;
+}
+
+static inline u32
+gk20a_pllg_n_lo(struct gk20a_clk *clk, struct gk20a_pll *pll)
+{
+ return DIV_ROUND_UP(pll->m * clk->params->min_vco,
+ clk->parent_rate / KHZ);
+}
+
+int gk20a_clk_ctor(struct nvkm_device *, int, const struct nvkm_clk_func *,
const struct gk20a_clk_pllg_params *, struct gk20a_clk *);
void gk20a_clk_fini(struct nvkm_clk *);
int gk20a_clk_read(struct nvkm_clk *, enum nv_clk_src);
@@ -62,4 +154,6 @@ int gk20a_clk_calc(struct nvkm_clk *, struct nvkm_cstate *);
int gk20a_clk_prog(struct nvkm_clk *);
void gk20a_clk_tidy(struct nvkm_clk *);
+int gk20a_clk_setup_slide(struct gk20a_clk *);
+
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c
index 71b2bbb61..b284e949f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c
@@ -21,20 +21,123 @@
*/
#include <subdev/clk.h>
+#include <subdev/volt.h>
+#include <subdev/timer.h>
#include <core/device.h>
+#include <core/tegra.h>
#include "priv.h"
#include "gk20a.h"
-#define KHZ (1000)
-#define MHZ (KHZ * 1000)
-
-#define MASK(w) ((1 << w) - 1)
+#define GPCPLL_CFG_SYNC_MODE BIT(2)
#define BYPASSCTRL_SYS (SYS_GPCPLL_CFG_BASE + 0x340)
#define BYPASSCTRL_SYS_GPCPLL_SHIFT 0
#define BYPASSCTRL_SYS_GPCPLL_WIDTH 1
+#define GPCPLL_CFG2_SDM_DIN_SHIFT 0
+#define GPCPLL_CFG2_SDM_DIN_WIDTH 8
+#define GPCPLL_CFG2_SDM_DIN_MASK \
+ (MASK(GPCPLL_CFG2_SDM_DIN_WIDTH) << GPCPLL_CFG2_SDM_DIN_SHIFT)
+#define GPCPLL_CFG2_SDM_DIN_NEW_SHIFT 8
+#define GPCPLL_CFG2_SDM_DIN_NEW_WIDTH 15
+#define GPCPLL_CFG2_SDM_DIN_NEW_MASK \
+ (MASK(GPCPLL_CFG2_SDM_DIN_NEW_WIDTH) << GPCPLL_CFG2_SDM_DIN_NEW_SHIFT)
+#define GPCPLL_CFG2_SETUP2_SHIFT 16
+#define GPCPLL_CFG2_PLL_STEPA_SHIFT 24
+
+#define GPCPLL_DVFS0 (SYS_GPCPLL_CFG_BASE + 0x10)
+#define GPCPLL_DVFS0_DFS_COEFF_SHIFT 0
+#define GPCPLL_DVFS0_DFS_COEFF_WIDTH 7
+#define GPCPLL_DVFS0_DFS_COEFF_MASK \
+ (MASK(GPCPLL_DVFS0_DFS_COEFF_WIDTH) << GPCPLL_DVFS0_DFS_COEFF_SHIFT)
+#define GPCPLL_DVFS0_DFS_DET_MAX_SHIFT 8
+#define GPCPLL_DVFS0_DFS_DET_MAX_WIDTH 7
+#define GPCPLL_DVFS0_DFS_DET_MAX_MASK \
+ (MASK(GPCPLL_DVFS0_DFS_DET_MAX_WIDTH) << GPCPLL_DVFS0_DFS_DET_MAX_SHIFT)
+
+#define GPCPLL_DVFS1 (SYS_GPCPLL_CFG_BASE + 0x14)
+#define GPCPLL_DVFS1_DFS_EXT_DET_SHIFT 0
+#define GPCPLL_DVFS1_DFS_EXT_DET_WIDTH 7
+#define GPCPLL_DVFS1_DFS_EXT_STRB_SHIFT 7
+#define GPCPLL_DVFS1_DFS_EXT_STRB_WIDTH 1
+#define GPCPLL_DVFS1_DFS_EXT_CAL_SHIFT 8
+#define GPCPLL_DVFS1_DFS_EXT_CAL_WIDTH 7
+#define GPCPLL_DVFS1_DFS_EXT_SEL_SHIFT 15
+#define GPCPLL_DVFS1_DFS_EXT_SEL_WIDTH 1
+#define GPCPLL_DVFS1_DFS_CTRL_SHIFT 16
+#define GPCPLL_DVFS1_DFS_CTRL_WIDTH 12
+#define GPCPLL_DVFS1_EN_SDM_SHIFT 28
+#define GPCPLL_DVFS1_EN_SDM_WIDTH 1
+#define GPCPLL_DVFS1_EN_SDM_BIT BIT(28)
+#define GPCPLL_DVFS1_EN_DFS_SHIFT 29
+#define GPCPLL_DVFS1_EN_DFS_WIDTH 1
+#define GPCPLL_DVFS1_EN_DFS_BIT BIT(29)
+#define GPCPLL_DVFS1_EN_DFS_CAL_SHIFT 30
+#define GPCPLL_DVFS1_EN_DFS_CAL_WIDTH 1
+#define GPCPLL_DVFS1_EN_DFS_CAL_BIT BIT(30)
+#define GPCPLL_DVFS1_DFS_CAL_DONE_SHIFT 31
+#define GPCPLL_DVFS1_DFS_CAL_DONE_WIDTH 1
+#define GPCPLL_DVFS1_DFS_CAL_DONE_BIT BIT(31)
+
+#define GPC_BCAST_GPCPLL_DVFS2 (GPC_BCAST_GPCPLL_CFG_BASE + 0x20)
+#define GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT BIT(16)
+
+#define GPCPLL_CFG3_PLL_DFS_TESTOUT_SHIFT 24
+#define GPCPLL_CFG3_PLL_DFS_TESTOUT_WIDTH 7
+
+#define DFS_DET_RANGE 6 /* -2^6 ... 2^6-1 */
+#define SDM_DIN_RANGE 12 /* -2^12 ... 2^12-1 */
+
+struct gm20b_clk_dvfs_params {
+ s32 coeff_slope;
+ s32 coeff_offs;
+ u32 vco_ctrl;
+};
+
+static const struct gm20b_clk_dvfs_params gm20b_dvfs_params = {
+ .coeff_slope = -165230,
+ .coeff_offs = 214007,
+ .vco_ctrl = 0x7 << 3,
+};
+
+/*
+ * base.n is now the *integer* part of the N factor.
+ * sdm_din contains n's decimal part.
+ */
+struct gm20b_pll {
+ struct gk20a_pll base;
+ u32 sdm_din;
+};
+
+struct gm20b_clk_dvfs {
+ u32 dfs_coeff;
+ s32 dfs_det_max;
+ s32 dfs_ext_cal;
+};
+
+struct gm20b_clk {
+ /* currently applied parameters */
+ struct gk20a_clk base;
+ struct gm20b_clk_dvfs dvfs;
+ u32 uv;
+
+ /* new parameters to apply */
+ struct gk20a_pll new_pll;
+ struct gm20b_clk_dvfs new_dvfs;
+ u32 new_uv;
+
+ const struct gm20b_clk_dvfs_params *dvfs_params;
+
+ /* fused parameters */
+ s32 uvdet_slope;
+ s32 uvdet_offs;
+
+ /* safe frequency we can use at minimum voltage */
+ u32 safe_fmax_vmin;
+};
+#define gm20b_clk(p) container_of((gk20a_clk(p)), struct gm20b_clk, base)
+
static u32 pl_to_div(u32 pl)
{
return pl;
@@ -53,6 +156,484 @@ static const struct gk20a_clk_pllg_params gm20b_pllg_params = {
.min_pl = 1, .max_pl = 31,
};
+static void
+gm20b_pllg_read_mnp(struct gm20b_clk *clk, struct gm20b_pll *pll)
+{
+ struct nvkm_subdev *subdev = &clk->base.base.subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 val;
+
+ gk20a_pllg_read_mnp(&clk->base, &pll->base);
+ val = nvkm_rd32(device, GPCPLL_CFG2);
+ pll->sdm_din = (val >> GPCPLL_CFG2_SDM_DIN_SHIFT) &
+ MASK(GPCPLL_CFG2_SDM_DIN_WIDTH);
+}
+
+static void
+gm20b_pllg_write_mnp(struct gm20b_clk *clk, const struct gm20b_pll *pll)
+{
+ struct nvkm_device *device = clk->base.base.subdev.device;
+
+ nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_MASK,
+ pll->sdm_din << GPCPLL_CFG2_SDM_DIN_SHIFT);
+ gk20a_pllg_write_mnp(&clk->base, &pll->base);
+}
+
+/*
+ * Determine DFS_COEFF for the requested voltage. Always select external
+ * calibration override equal to the voltage, and set maximum detection
+ * limit "0" (to make sure that PLL output remains under F/V curve when
+ * voltage increases).
+ */
+static void
+gm20b_dvfs_calc_det_coeff(struct gm20b_clk *clk, s32 uv,
+ struct gm20b_clk_dvfs *dvfs)
+{
+ struct nvkm_subdev *subdev = &clk->base.base.subdev;
+ const struct gm20b_clk_dvfs_params *p = clk->dvfs_params;
+ u32 coeff;
+ /* Work with mv as uv would likely trigger an overflow */
+ s32 mv = DIV_ROUND_CLOSEST(uv, 1000);
+
+ /* coeff = slope * voltage + offset */
+ coeff = DIV_ROUND_CLOSEST(mv * p->coeff_slope, 1000) + p->coeff_offs;
+ coeff = DIV_ROUND_CLOSEST(coeff, 1000);
+ dvfs->dfs_coeff = min_t(u32, coeff, MASK(GPCPLL_DVFS0_DFS_COEFF_WIDTH));
+
+ dvfs->dfs_ext_cal = DIV_ROUND_CLOSEST(uv - clk->uvdet_offs,
+ clk->uvdet_slope);
+ /* should never happen */
+ if (abs(dvfs->dfs_ext_cal) >= BIT(DFS_DET_RANGE))
+ nvkm_error(subdev, "dfs_ext_cal overflow!\n");
+
+ dvfs->dfs_det_max = 0;
+
+ nvkm_debug(subdev, "%s uv: %d coeff: %x, ext_cal: %d, det_max: %d\n",
+ __func__, uv, dvfs->dfs_coeff, dvfs->dfs_ext_cal,
+ dvfs->dfs_det_max);
+}
+
+/*
+ * Solve equation for integer and fractional part of the effective NDIV:
+ *
+ * n_eff = n_int + 1/2 + (SDM_DIN / 2^(SDM_DIN_RANGE + 1)) +
+ * (DVFS_COEFF * DVFS_DET_DELTA) / 2^DFS_DET_RANGE
+ *
+ * The SDM_DIN LSB is finally shifted out, since it is not accessible by sw.
+ */
+static void
+gm20b_dvfs_calc_ndiv(struct gm20b_clk *clk, u32 n_eff, u32 *n_int, u32 *sdm_din)
+{
+ struct nvkm_subdev *subdev = &clk->base.base.subdev;
+ const struct gk20a_clk_pllg_params *p = clk->base.params;
+ u32 n;
+ s32 det_delta;
+ u32 rem, rem_range;
+
+ /* calculate current ext_cal and subtract previous one */
+ det_delta = DIV_ROUND_CLOSEST(((s32)clk->uv) - clk->uvdet_offs,
+ clk->uvdet_slope);
+ det_delta -= clk->dvfs.dfs_ext_cal;
+ det_delta = min(det_delta, clk->dvfs.dfs_det_max);
+ det_delta *= clk->dvfs.dfs_coeff;
+
+ /* integer part of n */
+ n = (n_eff << DFS_DET_RANGE) - det_delta;
+ /* should never happen! */
+ if (n <= 0) {
+ nvkm_error(subdev, "ndiv <= 0 - setting to 1...\n");
+ n = 1 << DFS_DET_RANGE;
+ }
+ if (n >> DFS_DET_RANGE > p->max_n) {
+ nvkm_error(subdev, "ndiv > max_n - setting to max_n...\n");
+ n = p->max_n << DFS_DET_RANGE;
+ }
+ *n_int = n >> DFS_DET_RANGE;
+
+ /* fractional part of n */
+ rem = ((u32)n) & MASK(DFS_DET_RANGE);
+ rem_range = SDM_DIN_RANGE + 1 - DFS_DET_RANGE;
+ /* subtract 2^SDM_DIN_RANGE to account for the 1/2 of the equation */
+ rem = (rem << rem_range) - BIT(SDM_DIN_RANGE);
+ /* lose 8 LSB and clip - sdm_din only keeps the most significant byte */
+ *sdm_din = (rem >> BITS_PER_BYTE) & MASK(GPCPLL_CFG2_SDM_DIN_WIDTH);
+
+ nvkm_debug(subdev, "%s n_eff: %d, n_int: %d, sdm_din: %d\n", __func__,
+ n_eff, *n_int, *sdm_din);
+}
+
+static int
+gm20b_pllg_slide(struct gm20b_clk *clk, u32 n)
+{
+ struct nvkm_subdev *subdev = &clk->base.base.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct gm20b_pll pll;
+ u32 n_int, sdm_din;
+ int ret = 0;
+
+ /* calculate the new n_int/sdm_din for this n/uv */
+ gm20b_dvfs_calc_ndiv(clk, n, &n_int, &sdm_din);
+
+ /* get old coefficients */
+ gm20b_pllg_read_mnp(clk, &pll);
+ /* do nothing if NDIV is the same */
+ if (n_int == pll.base.n && sdm_din == pll.sdm_din)
+ return 0;
+
+ /* pll slowdown mode */
+ nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
+ BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT),
+ BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT));
+
+ /* new ndiv ready for ramp */
+ /* in DVFS mode SDM is updated via "new" field */
+ nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_NEW_MASK,
+ sdm_din << GPCPLL_CFG2_SDM_DIN_NEW_SHIFT);
+ pll.base.n = n_int;
+ udelay(1);
+ gk20a_pllg_write_mnp(&clk->base, &pll.base);
+
+ /* dynamic ramp to new ndiv */
+ udelay(1);
+ nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
+ BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT),
+ BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT));
+
+ /* wait for ramping to complete */
+ if (nvkm_wait_usec(device, 500, GPC_BCAST_NDIV_SLOWDOWN_DEBUG,
+ GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK,
+ GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK) < 0)
+ ret = -ETIMEDOUT;
+
+ /* in DVFS mode complete SDM update */
+ nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_MASK,
+ sdm_din << GPCPLL_CFG2_SDM_DIN_SHIFT);
+
+ /* exit slowdown mode */
+ nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
+ BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT) |
+ BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0);
+ nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
+
+ return ret;
+}
+
+static int
+gm20b_pllg_enable(struct gm20b_clk *clk)
+{
+ struct nvkm_device *device = clk->base.base.subdev.device;
+
+ nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
+ nvkm_rd32(device, GPCPLL_CFG);
+
+ /* In DVFS mode lock cannot be used - so just delay */
+ udelay(40);
+
+ /* set SYNC_MODE for glitchless switch out of bypass */
+ nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_SYNC_MODE,
+ GPCPLL_CFG_SYNC_MODE);
+ nvkm_rd32(device, GPCPLL_CFG);
+
+ /* switch to VCO mode */
+ nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT),
+ BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
+
+ return 0;
+}
+
+static void
+gm20b_pllg_disable(struct gm20b_clk *clk)
+{
+ struct nvkm_device *device = clk->base.base.subdev.device;
+
+ /* put PLL in bypass before disabling it */
+ nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
+
+ /* clear SYNC_MODE before disabling PLL */
+ nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_SYNC_MODE, 0);
+
+ nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
+ nvkm_rd32(device, GPCPLL_CFG);
+}
+
+static int
+gm20b_pllg_program_mnp(struct gm20b_clk *clk, const struct gk20a_pll *pll)
+{
+ struct nvkm_subdev *subdev = &clk->base.base.subdev;
+ struct nvkm_device *device = subdev->device;
+ struct gm20b_pll cur_pll;
+ u32 n_int, sdm_din;
+ /* if we only change pdiv, we can do a glitchless transition */
+ bool pdiv_only;
+ int ret;
+
+ gm20b_dvfs_calc_ndiv(clk, pll->n, &n_int, &sdm_din);
+ gm20b_pllg_read_mnp(clk, &cur_pll);
+ pdiv_only = cur_pll.base.n == n_int && cur_pll.sdm_din == sdm_din &&
+ cur_pll.base.m == pll->m;
+
+ /* need full sequence if clock not enabled yet */
+ if (!gk20a_pllg_is_enabled(&clk->base))
+ pdiv_only = false;
+
+ /* split VCO-to-bypass jump in half by setting out divider 1:2 */
+ nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
+ GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
+ /* Intentional 2nd write to assure linear divider operation */
+ nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
+ GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
+ nvkm_rd32(device, GPC2CLK_OUT);
+ udelay(2);
+
+ if (pdiv_only) {
+ u32 old = cur_pll.base.pl;
+ u32 new = pll->pl;
+
+ /*
+ * we can do a glitchless transition only if the old and new PL
+ * parameters share at least one bit set to 1. If this is not
+ * the case, calculate and program an interim PL that will allow
+ * us to respect that rule.
+ */
+ if ((old & new) == 0) {
+ cur_pll.base.pl = min(old | BIT(ffs(new) - 1),
+ new | BIT(ffs(old) - 1));
+ gk20a_pllg_write_mnp(&clk->base, &cur_pll.base);
+ }
+
+ cur_pll.base.pl = new;
+ gk20a_pllg_write_mnp(&clk->base, &cur_pll.base);
+ } else {
+ /* disable before programming if more than pdiv changes */
+ gm20b_pllg_disable(clk);
+
+ cur_pll.base = *pll;
+ cur_pll.base.n = n_int;
+ cur_pll.sdm_din = sdm_din;
+ gm20b_pllg_write_mnp(clk, &cur_pll);
+
+ ret = gm20b_pllg_enable(clk);
+ if (ret)
+ return ret;
+ }
+
+ /* restore out divider 1:1 */
+ udelay(2);
+ nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
+ GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
+ /* Intentional 2nd write to assure linear divider operation */
+ nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
+ GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
+ nvkm_rd32(device, GPC2CLK_OUT);
+
+ return 0;
+}
+
+static int
+gm20b_pllg_program_mnp_slide(struct gm20b_clk *clk, const struct gk20a_pll *pll)
+{
+ struct gk20a_pll cur_pll;
+ int ret;
+
+ if (gk20a_pllg_is_enabled(&clk->base)) {
+ gk20a_pllg_read_mnp(&clk->base, &cur_pll);
+
+ /* just do NDIV slide if there is no change to M and PL */
+ if (pll->m == cur_pll.m && pll->pl == cur_pll.pl)
+ return gm20b_pllg_slide(clk, pll->n);
+
+ /* slide down to current NDIV_LO */
+ cur_pll.n = gk20a_pllg_n_lo(&clk->base, &cur_pll);
+ ret = gm20b_pllg_slide(clk, cur_pll.n);
+ if (ret)
+ return ret;
+ }
+
+ /* program MNP with the new clock parameters and new NDIV_LO */
+ cur_pll = *pll;
+ cur_pll.n = gk20a_pllg_n_lo(&clk->base, &cur_pll);
+ ret = gm20b_pllg_program_mnp(clk, &cur_pll);
+ if (ret)
+ return ret;
+
+ /* slide up to new NDIV */
+ return gm20b_pllg_slide(clk, pll->n);
+}
+
+static int
+gm20b_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
+{
+ struct gm20b_clk *clk = gm20b_clk(base);
+ struct nvkm_subdev *subdev = &base->subdev;
+ struct nvkm_volt *volt = base->subdev.device->volt;
+ int ret;
+
+ ret = gk20a_pllg_calc_mnp(&clk->base, cstate->domain[nv_clk_src_gpc] *
+ GK20A_CLK_GPC_MDIV, &clk->new_pll);
+ if (ret)
+ return ret;
+
+ clk->new_uv = volt->vid[cstate->voltage].uv;
+ gm20b_dvfs_calc_det_coeff(clk, clk->new_uv, &clk->new_dvfs);
+
+ nvkm_debug(subdev, "%s uv: %d uv\n", __func__, clk->new_uv);
+
+ return 0;
+}
+
+/*
+ * Compute PLL parameters that are always safe for the current voltage
+ */
+static void
+gm20b_dvfs_calc_safe_pll(struct gm20b_clk *clk, struct gk20a_pll *pll)
+{
+ u32 rate = gk20a_pllg_calc_rate(&clk->base, pll) / KHZ;
+ u32 parent_rate = clk->base.parent_rate / KHZ;
+ u32 nmin, nsafe;
+
+ /* remove a safe margin of 10% */
+ if (rate > clk->safe_fmax_vmin)
+ rate = rate * (100 - 10) / 100;
+
+ /* gpc2clk */
+ rate *= 2;
+
+ nmin = DIV_ROUND_UP(pll->m * clk->base.params->min_vco, parent_rate);
+ nsafe = pll->m * rate / (clk->base.parent_rate);
+
+ if (nsafe < nmin) {
+ pll->pl = DIV_ROUND_UP(nmin * parent_rate, pll->m * rate);
+ nsafe = nmin;
+ }
+
+ pll->n = nsafe;
+}
+
+static void
+gm20b_dvfs_program_coeff(struct gm20b_clk *clk, u32 coeff)
+{
+ struct nvkm_device *device = clk->base.base.subdev.device;
+
+ /* strobe to read external DFS coefficient */
+ nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
+ GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT,
+ GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT);
+
+ nvkm_mask(device, GPCPLL_DVFS0, GPCPLL_DVFS0_DFS_COEFF_MASK,
+ coeff << GPCPLL_DVFS0_DFS_COEFF_SHIFT);
+
+ udelay(1);
+ nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
+ GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT, 0);
+}
+
+static void
+gm20b_dvfs_program_ext_cal(struct gm20b_clk *clk, u32 dfs_det_cal)
+{
+ struct nvkm_device *device = clk->base.base.subdev.device;
+ u32 val;
+
+ nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2, MASK(DFS_DET_RANGE + 1),
+ dfs_det_cal);
+ udelay(1);
+
+ val = nvkm_rd32(device, GPCPLL_DVFS1);
+ if (!(val & BIT(25))) {
+ /* Use external value to overwrite calibration value */
+ val |= BIT(25) | BIT(16);
+ nvkm_wr32(device, GPCPLL_DVFS1, val);
+ }
+}
+
+static void
+gm20b_dvfs_program_dfs_detection(struct gm20b_clk *clk,
+ struct gm20b_clk_dvfs *dvfs)
+{
+ struct nvkm_device *device = clk->base.base.subdev.device;
+
+ /* strobe to read external DFS coefficient */
+ nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
+ GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT,
+ GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT);
+
+ nvkm_mask(device, GPCPLL_DVFS0,
+ GPCPLL_DVFS0_DFS_COEFF_MASK | GPCPLL_DVFS0_DFS_DET_MAX_MASK,
+ dvfs->dfs_coeff << GPCPLL_DVFS0_DFS_COEFF_SHIFT |
+ dvfs->dfs_det_max << GPCPLL_DVFS0_DFS_DET_MAX_SHIFT);
+
+ udelay(1);
+ nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
+ GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT, 0);
+
+ gm20b_dvfs_program_ext_cal(clk, dvfs->dfs_ext_cal);
+}
+
+static int
+gm20b_clk_prog(struct nvkm_clk *base)
+{
+ struct gm20b_clk *clk = gm20b_clk(base);
+ u32 cur_freq;
+ int ret;
+
+ /* No change in DVFS settings? */
+ if (clk->uv == clk->new_uv)
+ goto prog;
+
+ /*
+ * Interim step for changing DVFS detection settings: low enough
+ * frequency to be safe at at DVFS coeff = 0.
+ *
+ * 1. If voltage is increasing:
+ * - safe frequency target matches the lowest - old - frequency
+ * - DVFS settings are still old
+ * - Voltage already increased to new level by volt, but maximum
+ * detection limit assures PLL output remains under F/V curve
+ *
+ * 2. If voltage is decreasing:
+ * - safe frequency target matches the lowest - new - frequency
+ * - DVFS settings are still old
+ * - Voltage is also old, it will be lowered by volt afterwards
+ *
+ * Interim step can be skipped if old frequency is below safe minimum,
+ * i.e., it is low enough to be safe at any voltage in operating range
+ * with zero DVFS coefficient.
+ */
+ cur_freq = nvkm_clk_read(&clk->base.base, nv_clk_src_gpc);
+ if (cur_freq > clk->safe_fmax_vmin) {
+ struct gk20a_pll pll_safe;
+
+ if (clk->uv < clk->new_uv)
+ /* voltage will raise: safe frequency is current one */
+ pll_safe = clk->base.pll;
+ else
+ /* voltage will drop: safe frequency is new one */
+ pll_safe = clk->new_pll;
+
+ gm20b_dvfs_calc_safe_pll(clk, &pll_safe);
+ ret = gm20b_pllg_program_mnp_slide(clk, &pll_safe);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * DVFS detection settings transition:
+ * - Set DVFS coefficient zero
+ * - Set calibration level to new voltage
+ * - Set DVFS coefficient to match new voltage
+ */
+ gm20b_dvfs_program_coeff(clk, 0);
+ gm20b_dvfs_program_ext_cal(clk, clk->new_dvfs.dfs_ext_cal);
+ gm20b_dvfs_program_coeff(clk, clk->new_dvfs.dfs_coeff);
+ gm20b_dvfs_program_dfs_detection(clk, &clk->new_dvfs);
+
+prog:
+ clk->uv = clk->new_uv;
+ clk->dvfs = clk->new_dvfs;
+ clk->base.pll = clk->new_pll;
+
+ return gm20b_pllg_program_mnp_slide(clk, &clk->base.pll);
+}
+
static struct nvkm_pstate
gm20b_pstates[] = {
{
@@ -133,9 +714,99 @@ gm20b_pstates[] = {
.voltage = 12,
},
},
-
};
+static void
+gm20b_clk_fini(struct nvkm_clk *base)
+{
+ struct nvkm_device *device = base->subdev.device;
+ struct gm20b_clk *clk = gm20b_clk(base);
+
+ /* slide to VCO min */
+ if (gk20a_pllg_is_enabled(&clk->base)) {
+ struct gk20a_pll pll;
+ u32 n_lo;
+
+ gk20a_pllg_read_mnp(&clk->base, &pll);
+ n_lo = gk20a_pllg_n_lo(&clk->base, &pll);
+ gm20b_pllg_slide(clk, n_lo);
+ }
+
+ gm20b_pllg_disable(clk);
+
+ /* set IDDQ */
+ nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 1);
+}
+
+static int
+gm20b_clk_init_dvfs(struct gm20b_clk *clk)
+{
+ struct nvkm_subdev *subdev = &clk->base.base.subdev;
+ struct nvkm_device *device = subdev->device;
+ bool fused = clk->uvdet_offs && clk->uvdet_slope;
+ static const s32 ADC_SLOPE_UV = 10000; /* default ADC detection slope */
+ u32 data;
+ int ret;
+
+ /* Enable NA DVFS */
+ nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_BIT,
+ GPCPLL_DVFS1_EN_DFS_BIT);
+
+ /* Set VCO_CTRL */
+ if (clk->dvfs_params->vco_ctrl)
+ nvkm_mask(device, GPCPLL_CFG3, GPCPLL_CFG3_VCO_CTRL_MASK,
+ clk->dvfs_params->vco_ctrl << GPCPLL_CFG3_VCO_CTRL_SHIFT);
+
+ if (fused) {
+ /* Start internal calibration, but ignore results */
+ nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_CAL_BIT,
+ GPCPLL_DVFS1_EN_DFS_CAL_BIT);
+
+ /* got uvdev parameters from fuse, skip calibration */
+ goto calibrated;
+ }
+
+ /*
+ * If calibration parameters are not fused, start internal calibration,
+ * wait for completion, and use results along with default slope to
+ * calculate ADC offset during boot.
+ */
+ nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_CAL_BIT,
+ GPCPLL_DVFS1_EN_DFS_CAL_BIT);
+
+ /* Wait for internal calibration done (spec < 2us). */
+ ret = nvkm_wait_usec(device, 10, GPCPLL_DVFS1,
+ GPCPLL_DVFS1_DFS_CAL_DONE_BIT,
+ GPCPLL_DVFS1_DFS_CAL_DONE_BIT);
+ if (ret < 0) {
+ nvkm_error(subdev, "GPCPLL calibration timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ data = nvkm_rd32(device, GPCPLL_CFG3) >>
+ GPCPLL_CFG3_PLL_DFS_TESTOUT_SHIFT;
+ data &= MASK(GPCPLL_CFG3_PLL_DFS_TESTOUT_WIDTH);
+
+ clk->uvdet_slope = ADC_SLOPE_UV;
+ clk->uvdet_offs = ((s32)clk->uv) - data * ADC_SLOPE_UV;
+
+ nvkm_debug(subdev, "calibrated DVFS parameters: offs %d, slope %d\n",
+ clk->uvdet_offs, clk->uvdet_slope);
+
+calibrated:
+ /* Compute and apply initial DVFS parameters */
+ gm20b_dvfs_calc_det_coeff(clk, clk->uv, &clk->dvfs);
+ gm20b_dvfs_program_coeff(clk, 0);
+ gm20b_dvfs_program_ext_cal(clk, clk->dvfs.dfs_ext_cal);
+ gm20b_dvfs_program_coeff(clk, clk->dvfs.dfs_coeff);
+ gm20b_dvfs_program_dfs_detection(clk, &clk->new_dvfs);
+
+ return 0;
+}
+
+/* Forward declaration to detect speedo >=1 in gm20b_clk_init() */
+static const struct nvkm_clk_func gm20b_clk;
+
static int
gm20b_clk_init(struct nvkm_clk *base)
{
@@ -143,15 +814,56 @@ gm20b_clk_init(struct nvkm_clk *base)
struct nvkm_subdev *subdev = &clk->base.subdev;
struct nvkm_device *device = subdev->device;
int ret;
+ u32 data;
+
+ /* get out from IDDQ */
+ nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 0);
+ nvkm_rd32(device, GPCPLL_CFG);
+ udelay(5);
+
+ nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK,
+ GPC2CLK_OUT_INIT_VAL);
/* Set the global bypass control to VCO */
nvkm_mask(device, BYPASSCTRL_SYS,
MASK(BYPASSCTRL_SYS_GPCPLL_WIDTH) << BYPASSCTRL_SYS_GPCPLL_SHIFT,
0);
+ ret = gk20a_clk_setup_slide(clk);
+ if (ret)
+ return ret;
+
+ /* If not fused, set RAM SVOP PDP data 0x2, and enable fuse override */
+ data = nvkm_rd32(device, 0x021944);
+ if (!(data & 0x3)) {
+ data |= 0x2;
+ nvkm_wr32(device, 0x021944, data);
+
+ data = nvkm_rd32(device, 0x021948);
+ data |= 0x1;
+ nvkm_wr32(device, 0x021948, data);
+ }
+
+ /* Disable idle slow down */
+ nvkm_mask(device, 0x20160, 0x003f0000, 0x0);
+
+ /* speedo >= 1? */
+ if (clk->base.func == &gm20b_clk) {
+ struct gm20b_clk *_clk = gm20b_clk(base);
+ struct nvkm_volt *volt = device->volt;
+
+ /* Get current voltage */
+ _clk->uv = nvkm_volt_get(volt);
+
+ /* Initialize DVFS */
+ ret = gm20b_clk_init_dvfs(_clk);
+ if (ret)
+ return ret;
+ }
+
/* Start with lowest frequency */
base->func->calc(base, &base->func->pstates[0].base);
- ret = base->func->prog(&clk->base);
+ ret = base->func->prog(base);
if (ret) {
nvkm_error(subdev, "cannot initialize clock\n");
return ret;
@@ -169,6 +881,7 @@ gm20b_clk_speedo0 = {
.prog = gk20a_clk_prog,
.tidy = gk20a_clk_tidy,
.pstates = gm20b_pstates,
+ /* Speedo 0 only supports 12 voltages */
.nr_pstates = ARRAY_SIZE(gm20b_pstates) - 1,
.domains = {
{ nv_clk_src_crystal, 0xff },
@@ -177,8 +890,26 @@ gm20b_clk_speedo0 = {
},
};
-int
-gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+static const struct nvkm_clk_func
+gm20b_clk = {
+ .init = gm20b_clk_init,
+ .fini = gm20b_clk_fini,
+ .read = gk20a_clk_read,
+ .calc = gm20b_clk_calc,
+ .prog = gm20b_clk_prog,
+ .tidy = gk20a_clk_tidy,
+ .pstates = gm20b_pstates,
+ .nr_pstates = ARRAY_SIZE(gm20b_pstates),
+ .domains = {
+ { nv_clk_src_crystal, 0xff },
+ { nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV },
+ { nv_clk_src_max },
+ },
+};
+
+static int
+gm20b_clk_new_speedo0(struct nvkm_device *device, int index,
+ struct nvkm_clk **pclk)
{
struct gk20a_clk *clk;
int ret;
@@ -188,11 +919,156 @@ gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
return -ENOMEM;
*pclk = &clk->base;
- ret = _gk20a_clk_ctor(device, index, &gm20b_clk_speedo0,
- &gm20b_pllg_params, clk);
+ ret = gk20a_clk_ctor(device, index, &gm20b_clk_speedo0,
+ &gm20b_pllg_params, clk);
clk->pl_to_div = pl_to_div;
clk->div_to_pl = div_to_pl;
return ret;
}
+
+/* FUSE register */
+#define FUSE_RESERVED_CALIB0 0x204
+#define FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_SHIFT 0
+#define FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_WIDTH 4
+#define FUSE_RESERVED_CALIB0_INTERCEPT_INT_SHIFT 4
+#define FUSE_RESERVED_CALIB0_INTERCEPT_INT_WIDTH 10
+#define FUSE_RESERVED_CALIB0_SLOPE_FRAC_SHIFT 14
+#define FUSE_RESERVED_CALIB0_SLOPE_FRAC_WIDTH 10
+#define FUSE_RESERVED_CALIB0_SLOPE_INT_SHIFT 24
+#define FUSE_RESERVED_CALIB0_SLOPE_INT_WIDTH 6
+#define FUSE_RESERVED_CALIB0_FUSE_REV_SHIFT 30
+#define FUSE_RESERVED_CALIB0_FUSE_REV_WIDTH 2
+
+static int
+gm20b_clk_init_fused_params(struct gm20b_clk *clk)
+{
+ struct nvkm_subdev *subdev = &clk->base.base.subdev;
+ u32 val = 0;
+ u32 rev = 0;
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA)
+ tegra_fuse_readl(FUSE_RESERVED_CALIB0, &val);
+ rev = (val >> FUSE_RESERVED_CALIB0_FUSE_REV_SHIFT) &
+ MASK(FUSE_RESERVED_CALIB0_FUSE_REV_WIDTH);
+#endif
+
+ /* No fused parameters, we will calibrate later */
+ if (rev == 0)
+ return -EINVAL;
+
+ /* Integer part in mV + fractional part in uV */
+ clk->uvdet_slope = ((val >> FUSE_RESERVED_CALIB0_SLOPE_INT_SHIFT) &
+ MASK(FUSE_RESERVED_CALIB0_SLOPE_INT_WIDTH)) * 1000 +
+ ((val >> FUSE_RESERVED_CALIB0_SLOPE_FRAC_SHIFT) &
+ MASK(FUSE_RESERVED_CALIB0_SLOPE_FRAC_WIDTH));
+
+ /* Integer part in mV + fractional part in 100uV */
+ clk->uvdet_offs = ((val >> FUSE_RESERVED_CALIB0_INTERCEPT_INT_SHIFT) &
+ MASK(FUSE_RESERVED_CALIB0_INTERCEPT_INT_WIDTH)) * 1000 +
+ ((val >> FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_SHIFT) &
+ MASK(FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_WIDTH)) * 100;
+
+ nvkm_debug(subdev, "fused calibration data: slope %d, offs %d\n",
+ clk->uvdet_slope, clk->uvdet_offs);
+ return 0;
+}
+
+static int
+gm20b_clk_init_safe_fmax(struct gm20b_clk *clk)
+{
+ struct nvkm_subdev *subdev = &clk->base.base.subdev;
+ struct nvkm_volt *volt = subdev->device->volt;
+ struct nvkm_pstate *pstates = clk->base.base.func->pstates;
+ int nr_pstates = clk->base.base.func->nr_pstates;
+ int vmin, id = 0;
+ u32 fmax = 0;
+ int i;
+
+ /* find lowest voltage we can use */
+ vmin = volt->vid[0].uv;
+ for (i = 1; i < volt->vid_nr; i++) {
+ if (volt->vid[i].uv <= vmin) {
+ vmin = volt->vid[i].uv;
+ id = volt->vid[i].vid;
+ }
+ }
+
+ /* find max frequency at this voltage */
+ for (i = 0; i < nr_pstates; i++)
+ if (pstates[i].base.voltage == id)
+ fmax = max(fmax,
+ pstates[i].base.domain[nv_clk_src_gpc]);
+
+ if (!fmax) {
+ nvkm_error(subdev, "failed to evaluate safe fmax\n");
+ return -EINVAL;
+ }
+
+ /* we are safe at 90% of the max frequency */
+ clk->safe_fmax_vmin = fmax * (100 - 10) / 100;
+ nvkm_debug(subdev, "safe fmax @ vmin = %u Khz\n", clk->safe_fmax_vmin);
+
+ return 0;
+}
+
+int
+gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+{
+ struct nvkm_device_tegra *tdev = device->func->tegra(device);
+ struct gm20b_clk *clk;
+ struct nvkm_subdev *subdev;
+ struct gk20a_clk_pllg_params *clk_params;
+ int ret;
+
+ /* Speedo 0 GPUs cannot use noise-aware PLL */
+ if (tdev->gpu_speedo_id == 0)
+ return gm20b_clk_new_speedo0(device, index, pclk);
+
+ /* Speedo >= 1, use NAPLL */
+ clk = kzalloc(sizeof(*clk) + sizeof(*clk_params), GFP_KERNEL);
+ if (!clk)
+ return -ENOMEM;
+ *pclk = &clk->base.base;
+ subdev = &clk->base.base.subdev;
+
+ /* duplicate the clock parameters since we will patch them below */
+ clk_params = (void *) (clk + 1);
+ *clk_params = gm20b_pllg_params;
+ ret = gk20a_clk_ctor(device, index, &gm20b_clk, clk_params,
+ &clk->base);
+ if (ret)
+ return ret;
+
+ /*
+ * NAPLL can only work with max_u, clamp the m range so
+ * gk20a_pllg_calc_mnp always uses it
+ */
+ clk_params->max_m = clk_params->min_m = DIV_ROUND_UP(clk_params->max_u,
+ (clk->base.parent_rate / KHZ));
+ if (clk_params->max_m == 0) {
+ nvkm_warn(subdev, "cannot use NAPLL, using legacy clock...\n");
+ kfree(clk);
+ return gm20b_clk_new_speedo0(device, index, pclk);
+ }
+
+ clk->base.pl_to_div = pl_to_div;
+ clk->base.div_to_pl = div_to_pl;
+
+ clk->dvfs_params = &gm20b_dvfs_params;
+
+ ret = gm20b_clk_init_fused_params(clk);
+ /*
+ * we will calibrate during init - should never happen on
+ * prod parts
+ */
+ if (ret)
+ nvkm_warn(subdev, "no fused calibration parameters\n");
+
+ ret = gm20b_clk_init_safe_fmax(clk);
+ if (ret)
+ return ret;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
index 842d5de96..edcc157e6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
@@ -24,6 +24,8 @@ nvkm-y += nvkm/subdev/fb/gk104.o
nvkm-y += nvkm/subdev/fb/gk20a.o
nvkm-y += nvkm/subdev/fb/gm107.o
nvkm-y += nvkm/subdev/fb/gm200.o
+nvkm-y += nvkm/subdev/fb/gp100.o
+nvkm-y += nvkm/subdev/fb/gp104.o
nvkm-y += nvkm/subdev/fb/ram.o
nvkm-y += nvkm/subdev/fb/ramnv04.o
@@ -41,6 +43,7 @@ nvkm-y += nvkm/subdev/fb/rammcp77.o
nvkm-y += nvkm/subdev/fb/ramgf100.o
nvkm-y += nvkm/subdev/fb/ramgk104.o
nvkm-y += nvkm/subdev/fb/ramgm107.o
+nvkm-y += nvkm/subdev/fb/ramgp100.o
nvkm-y += nvkm/subdev/fb/sddr2.o
nvkm-y += nvkm/subdev/fb/sddr3.o
nvkm-y += nvkm/subdev/fb/gddr3.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
index ce90242b8..a7049c041 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
@@ -25,6 +25,7 @@
#include "ram.h"
#include <core/memory.h>
+#include <core/option.h>
#include <subdev/bios.h>
#include <subdev/bios/M0203.h>
#include <engine/gr.h>
@@ -134,6 +135,10 @@ nvkm_fb_init(struct nvkm_subdev *subdev)
if (fb->func->init)
fb->func->init(fb);
+ if (fb->func->init_page)
+ fb->func->init_page(fb);
+ if (fb->func->init_unkn)
+ fb->func->init_unkn(fb);
return 0;
}
@@ -171,6 +176,7 @@ nvkm_fb_ctor(const struct nvkm_fb_func *func, struct nvkm_device *device,
nvkm_subdev_ctor(&nvkm_fb, device, index, &fb->subdev);
fb->func = func;
fb->tile.regions = fb->func->tile.regions;
+ fb->page = nvkm_longopt(device->cfgopt, "NvFbBigPage", 0);
}
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
index e649ead5c..76433cc66 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
@@ -72,6 +72,22 @@ gf100_fb_oneinit(struct nvkm_fb *fb)
}
void
+gf100_fb_init_page(struct nvkm_fb *fb)
+{
+ struct nvkm_device *device = fb->subdev.device;
+ switch (fb->page) {
+ case 16:
+ nvkm_mask(device, 0x100c80, 0x00000001, 0x00000001);
+ break;
+ case 17:
+ default:
+ nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000);
+ fb->page = 17;
+ break;
+ }
+}
+
+void
gf100_fb_init(struct nvkm_fb *base)
{
struct gf100_fb *fb = gf100_fb(base);
@@ -79,8 +95,6 @@ gf100_fb_init(struct nvkm_fb *base)
if (fb->r100c10_page)
nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8);
-
- nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
}
void *
@@ -125,6 +139,7 @@ gf100_fb = {
.dtor = gf100_fb_dtor,
.oneinit = gf100_fb_oneinit,
.init = gf100_fb_init,
+ .init_page = gf100_fb_init_page,
.intr = gf100_fb_intr,
.ram_new = gf100_ram_new,
.memtype_valid = gf100_fb_memtype_valid,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
index 2160e5a39..449f43164 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
@@ -14,4 +14,6 @@ int gf100_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *,
void *gf100_fb_dtor(struct nvkm_fb *);
void gf100_fb_init(struct nvkm_fb *);
void gf100_fb_intr(struct nvkm_fb *);
+
+void gp100_fb_init(struct nvkm_fb *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
index b41f0f700..4245e2e6e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
@@ -29,6 +29,7 @@ gk104_fb = {
.dtor = gf100_fb_dtor,
.oneinit = gf100_fb_oneinit,
.init = gf100_fb_init,
+ .init_page = gf100_fb_init_page,
.intr = gf100_fb_intr,
.ram_new = gk104_ram_new,
.memtype_valid = gf100_fb_memtype_valid,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
index 7306f7dfc..f815fe2bb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
@@ -27,7 +27,6 @@ static void
gk20a_fb_init(struct nvkm_fb *fb)
{
struct nvkm_device *device = fb->subdev.device;
- nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(fb->mmu_wr) >> 8);
nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(fb->mmu_rd) >> 8);
}
@@ -36,6 +35,7 @@ static const struct nvkm_fb_func
gk20a_fb = {
.oneinit = gf100_fb_oneinit,
.init = gk20a_fb_init,
+ .init_page = gf100_fb_init_page,
.memtype_valid = gf100_fb_memtype_valid,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
index 4869fdb75..db699025f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
@@ -29,6 +29,7 @@ gm107_fb = {
.dtor = gf100_fb_dtor,
.oneinit = gf100_fb_oneinit,
.init = gf100_fb_init,
+ .init_page = gf100_fb_init_page,
.intr = gf100_fb_intr,
.ram_new = gm107_ram_new,
.memtype_valid = gf100_fb_memtype_valid,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
index 44f5716f6..62f653240 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
@@ -26,6 +26,24 @@
#include <core/memory.h>
+void
+gm200_fb_init_page(struct nvkm_fb *fb)
+{
+ struct nvkm_device *device = fb->subdev.device;
+ switch (fb->page) {
+ case 16:
+ nvkm_mask(device, 0x100c80, 0x00000801, 0x00000001);
+ break;
+ case 17:
+ nvkm_mask(device, 0x100c80, 0x00000801, 0x00000000);
+ break;
+ default:
+ nvkm_mask(device, 0x100c80, 0x00000800, 0x00000800);
+ fb->page = 0;
+ break;
+ }
+}
+
static void
gm200_fb_init(struct nvkm_fb *base)
{
@@ -48,6 +66,7 @@ gm200_fb = {
.dtor = gf100_fb_dtor,
.oneinit = gf100_fb_oneinit,
.init = gm200_fb_init,
+ .init_page = gm200_fb_init_page,
.intr = gf100_fb_intr,
.ram_new = gm107_ram_new,
.memtype_valid = gf100_fb_memtype_valid,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
new file mode 100644
index 000000000..98474aec1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "gf100.h"
+#include "ram.h"
+
+#include <core/memory.h>
+
+static void
+gp100_fb_init_unkn(struct nvkm_fb *base)
+{
+ struct nvkm_device *device = gf100_fb(base)->base.subdev.device;
+ nvkm_wr32(device, 0x1fac80, nvkm_rd32(device, 0x100c80));
+ nvkm_wr32(device, 0x1facc4, nvkm_rd32(device, 0x100cc4));
+ nvkm_wr32(device, 0x1facc8, nvkm_rd32(device, 0x100cc8));
+ nvkm_wr32(device, 0x1faccc, nvkm_rd32(device, 0x100ccc));
+}
+
+void
+gp100_fb_init(struct nvkm_fb *base)
+{
+ struct gf100_fb *fb = gf100_fb(base);
+ struct nvkm_device *device = fb->base.subdev.device;
+
+ if (fb->r100c10_page)
+ nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8);
+
+ nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(fb->base.mmu_wr) >> 8);
+ nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(fb->base.mmu_rd) >> 8);
+ nvkm_mask(device, 0x100cc4, 0x00060000,
+ max(nvkm_memory_size(fb->base.mmu_rd) >> 16, (u64)2) << 17);
+}
+
+static const struct nvkm_fb_func
+gp100_fb = {
+ .dtor = gf100_fb_dtor,
+ .oneinit = gf100_fb_oneinit,
+ .init = gp100_fb_init,
+ .init_page = gm200_fb_init_page,
+ .init_unkn = gp100_fb_init_unkn,
+ .ram_new = gp100_ram_new,
+ .memtype_valid = gf100_fb_memtype_valid,
+};
+
+int
+gp100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return gf100_fb_new_(&gp100_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp104.c
new file mode 100644
index 000000000..92cb71861
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp104.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "gf100.h"
+#include "ram.h"
+
+#include <core/memory.h>
+
+static const struct nvkm_fb_func
+gp104_fb = {
+ .dtor = gf100_fb_dtor,
+ .oneinit = gf100_fb_oneinit,
+ .init = gp100_fb_init,
+ .init_page = gm200_fb_init_page,
+ .ram_new = gp100_ram_new,
+ .memtype_valid = gf100_fb_memtype_valid,
+};
+
+int
+gp104_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return gf100_fb_new_(&gp104_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index d97d640e6..e905d44fa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -8,6 +8,8 @@ struct nvkm_fb_func {
void *(*dtor)(struct nvkm_fb *);
int (*oneinit)(struct nvkm_fb *);
void (*init)(struct nvkm_fb *);
+ void (*init_page)(struct nvkm_fb *);
+ void (*init_unkn)(struct nvkm_fb *);
void (*intr)(struct nvkm_fb *);
struct {
@@ -60,5 +62,8 @@ void nv46_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
u32 pitch, u32 flags, struct nvkm_fb_tile *);
int gf100_fb_oneinit(struct nvkm_fb *);
+void gf100_fb_init_page(struct nvkm_fb *);
bool gf100_fb_memtype_valid(struct nvkm_fb *, u32);
+
+void gm200_fb_init_page(struct nvkm_fb *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
index f816cbf2c..b9ec0ae67 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
@@ -47,4 +47,5 @@ int mcp77_ram_new(struct nvkm_fb *, struct nvkm_ram **);
int gf100_ram_new(struct nvkm_fb *, struct nvkm_ram **);
int gk104_ram_new(struct nvkm_fb *, struct nvkm_ram **);
int gm107_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int gp100_ram_new(struct nvkm_fb *, struct nvkm_ram **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
new file mode 100644
index 000000000..f3be408b5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
@@ -0,0 +1,146 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "ram.h"
+
+#include <subdev/bios.h>
+#include <subdev/bios/init.h>
+#include <subdev/bios/rammap.h>
+
+static int
+gp100_ram_init(struct nvkm_ram *ram)
+{
+ struct nvkm_subdev *subdev = &ram->fb->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_bios *bios = device->bios;
+ u8 ver, hdr, cnt, len, snr, ssz;
+ u32 data;
+ int i;
+
+ /* run a bunch of tables from rammap table. there's actually
+ * individual pointers for each rammap entry too, but, nvidia
+ * seem to just run the last two entries' scripts early on in
+ * their init, and never again.. we'll just run 'em all once
+ * for now.
+ *
+ * i strongly suspect that each script is for a separate mode
+ * (likely selected by 0x9a065c's lower bits?), and the
+ * binary driver skips the one that's already been setup by
+ * the init tables.
+ */
+ data = nvbios_rammapTe(bios, &ver, &hdr, &cnt, &len, &snr, &ssz);
+ if (!data || hdr < 0x15)
+ return -EINVAL;
+
+ cnt = nvbios_rd08(bios, data + 0x14); /* guess at count */
+ data = nvbios_rd32(bios, data + 0x10); /* guess u32... */
+ if (cnt) {
+ u32 save = nvkm_rd32(device, 0x9a065c) & 0x000000f0;
+ for (i = 0; i < cnt; i++, data += 4) {
+ if (i != save >> 4) {
+ nvkm_mask(device, 0x9a065c, 0x000000f0, i << 4);
+ nvbios_exec(&(struct nvbios_init) {
+ .subdev = subdev,
+ .bios = bios,
+ .offset = nvbios_rd32(bios, data),
+ .execute = 1,
+ });
+ }
+ }
+ nvkm_mask(device, 0x9a065c, 0x000000f0, save);
+ }
+
+ nvkm_mask(device, 0x9a0584, 0x11000000, 0x00000000);
+ nvkm_wr32(device, 0x10ecc0, 0xffffffff);
+ nvkm_mask(device, 0x9a0160, 0x00000010, 0x00000010);
+ return 0;
+}
+
+static const struct nvkm_ram_func
+gp100_ram_func = {
+ .init = gp100_ram_init,
+ .get = gf100_ram_get,
+ .put = gf100_ram_put,
+};
+
+int
+gp100_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
+{
+ struct nvkm_ram *ram;
+ struct nvkm_subdev *subdev = &fb->subdev;
+ struct nvkm_device *device = subdev->device;
+ enum nvkm_ram_type type = nvkm_fb_bios_memtype(device->bios);
+ const u32 rsvd_head = ( 256 * 1024); /* vga memory */
+ const u32 rsvd_tail = (1024 * 1024); /* vbios etc */
+ u32 fbpa_num = nvkm_rd32(device, 0x022438), fbpa;
+ u32 fbio_opt = nvkm_rd32(device, 0x021c14);
+ u64 part, size = 0, comm = ~0ULL;
+ bool mixed = false;
+ int ret;
+
+ nvkm_debug(subdev, "022438: %08x\n", fbpa_num);
+ nvkm_debug(subdev, "021c14: %08x\n", fbio_opt);
+ for (fbpa = 0; fbpa < fbpa_num; fbpa++) {
+ if (!(fbio_opt & (1 << fbpa))) {
+ part = nvkm_rd32(device, 0x90020c + (fbpa * 0x4000));
+ nvkm_debug(subdev, "fbpa %02x: %lld MiB\n", fbpa, part);
+ part = part << 20;
+ if (part != comm) {
+ if (comm != ~0ULL)
+ mixed = true;
+ comm = min(comm, part);
+ }
+ size = size + part;
+ }
+ }
+
+ ret = nvkm_ram_new_(&gp100_ram_func, fb, type, size, 0, &ram);
+ *pram = ram;
+ if (ret)
+ return ret;
+
+ nvkm_mm_fini(&ram->vram);
+
+ if (mixed) {
+ ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
+ ((comm * fbpa_num) - rsvd_head) >>
+ NVKM_RAM_MM_SHIFT, 1);
+ if (ret)
+ return ret;
+
+ ret = nvkm_mm_init(&ram->vram, (0x1000000000ULL + comm) >>
+ NVKM_RAM_MM_SHIFT,
+ (size - (comm * fbpa_num) - rsvd_tail) >>
+ NVKM_RAM_MM_SHIFT, 1);
+ if (ret)
+ return ret;
+ } else {
+ ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
+ (size - rsvd_head - rsvd_tail) >>
+ NVKM_RAM_MM_SHIFT, 1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
index 6b8f2a19b..a6a7fa0d7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
@@ -109,7 +109,7 @@ struct gk20a_instmem {
u16 iommu_bit;
/* Only used by DMA API */
- struct dma_attrs attrs;
+ unsigned long attrs;
};
#define gk20a_instmem(p) container_of((p), struct gk20a_instmem, base)
@@ -293,7 +293,7 @@ gk20a_instobj_dtor_dma(struct nvkm_memory *memory)
goto out;
dma_free_attrs(dev, node->base.mem.size << PAGE_SHIFT, node->base.vaddr,
- node->handle, &imem->attrs);
+ node->handle, imem->attrs);
out:
return node;
@@ -386,7 +386,7 @@ gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align,
node->base.vaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT,
&node->handle, GFP_KERNEL,
- &imem->attrs);
+ imem->attrs);
if (!node->base.vaddr) {
nvkm_error(subdev, "cannot allocate DMA memory\n");
return -ENOMEM;
@@ -597,10 +597,9 @@ gk20a_instmem_new(struct nvkm_device *device, int index,
nvkm_info(&imem->base.subdev, "using IOMMU\n");
} else {
- init_dma_attrs(&imem->attrs);
- dma_set_attr(DMA_ATTR_NON_CONSISTENT, &imem->attrs);
- dma_set_attr(DMA_ATTR_WEAK_ORDERING, &imem->attrs);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &imem->attrs);
+ imem->attrs = DMA_ATTR_NON_CONSISTENT |
+ DMA_ATTR_WEAK_ORDERING |
+ DMA_ATTR_WRITE_COMBINE;
nvkm_info(&imem->base.subdev, "using DMA API\n");
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
index 932b36659..12d6f4f10 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
@@ -3,3 +3,4 @@ nvkm-y += nvkm/subdev/ltc/gf100.o
nvkm-y += nvkm/subdev/ltc/gk104.o
nvkm-y += nvkm/subdev/ltc/gm107.o
nvkm-y += nvkm/subdev/ltc/gm200.o
+nvkm-y += nvkm/subdev/ltc/gp100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
index c9eb67796..4a0fa0a9b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
@@ -23,7 +23,6 @@
*/
#include "priv.h"
-#include <core/enum.h>
#include <subdev/fb.h>
#include <subdev/timer.h>
@@ -71,7 +70,7 @@ gf100_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth)
nvkm_wr32(device, 0x17ea58, depth);
}
-static const struct nvkm_bitfield
+const struct nvkm_bitfield
gf100_ltc_lts_intr_name[] = {
{ 0x00000001, "IDLE_ERROR_IQ" },
{ 0x00000002, "IDLE_ERROR_CBC" },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
index 389fb13a1..ec0a3844b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
@@ -68,18 +68,22 @@ gm107_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth)
nvkm_wr32(device, 0x17e34c, depth);
}
-static void
+void
gm107_ltc_intr_lts(struct nvkm_ltc *ltc, int c, int s)
{
struct nvkm_subdev *subdev = &ltc->subdev;
struct nvkm_device *device = subdev->device;
u32 base = 0x140400 + (c * 0x2000) + (s * 0x200);
- u32 stat = nvkm_rd32(device, base + 0x00c);
+ u32 intr = nvkm_rd32(device, base + 0x00c);
+ u16 stat = intr & 0x0000ffff;
+ char msg[128];
if (stat) {
- nvkm_error(subdev, "LTC%d_LTS%d: %08x\n", c, s, stat);
- nvkm_wr32(device, base + 0x00c, stat);
+ nvkm_snprintbf(msg, sizeof(msg), gf100_ltc_lts_intr_name, stat);
+ nvkm_error(subdev, "LTC%d_LTS%d: %08x [%s]\n", c, s, intr, msg);
}
+
+ nvkm_wr32(device, base + 0x00c, intr);
}
void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c
new file mode 100644
index 000000000..0bdfb2f40
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+
+static void
+gp100_ltc_intr(struct nvkm_ltc *ltc)
+{
+ struct nvkm_device *device = ltc->subdev.device;
+ u32 mask;
+
+ mask = nvkm_rd32(device, 0x0001c0);
+ while (mask) {
+ u32 s, c = __ffs(mask);
+ for (s = 0; s < ltc->lts_nr; s++)
+ gm107_ltc_intr_lts(ltc, c, s);
+ mask &= ~(1 << c);
+ }
+}
+
+static int
+gp100_ltc_oneinit(struct nvkm_ltc *ltc)
+{
+ struct nvkm_device *device = ltc->subdev.device;
+ ltc->ltc_nr = nvkm_rd32(device, 0x12006c);
+ ltc->lts_nr = nvkm_rd32(device, 0x17e280) >> 28;
+ /*XXX: tagram allocation - TBD */
+ return nvkm_mm_init(&ltc->tags, 0, 0, 1);
+}
+
+static void
+gp100_ltc_init(struct nvkm_ltc *ltc)
+{
+ /*XXX: PMU LS call to setup tagram address */
+}
+
+static const struct nvkm_ltc_func
+gp100_ltc = {
+ .oneinit = gp100_ltc_oneinit,
+ .init = gp100_ltc_init,
+ .intr = gp100_ltc_intr,
+ .cbc_clear = gm107_ltc_cbc_clear,
+ .cbc_wait = gm107_ltc_cbc_wait,
+ .zbc = 16,
+ .zbc_clear_color = gm107_ltc_zbc_clear_color,
+ .zbc_clear_depth = gm107_ltc_zbc_clear_depth,
+ .invalidate = gf100_ltc_invalidate,
+ .flush = gf100_ltc_flush,
+};
+
+int
+gp100_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+{
+ return nvkm_ltc_new_(&gp100_ltc, device, index, pltc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
index 6d81c695e..8b95f96e3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
@@ -2,6 +2,7 @@
#define __NVKM_LTC_PRIV_H__
#define nvkm_ltc(p) container_of((p), struct nvkm_ltc, subdev)
#include <subdev/ltc.h>
+#include <core/enum.h>
int nvkm_ltc_new_(const struct nvkm_ltc_func *, struct nvkm_device *,
int index, struct nvkm_ltc **);
@@ -31,8 +32,10 @@ void gf100_ltc_zbc_clear_color(struct nvkm_ltc *, int, const u32[4]);
void gf100_ltc_zbc_clear_depth(struct nvkm_ltc *, int, const u32);
void gf100_ltc_invalidate(struct nvkm_ltc *);
void gf100_ltc_flush(struct nvkm_ltc *);
+extern const struct nvkm_bitfield gf100_ltc_lts_intr_name[];
void gm107_ltc_intr(struct nvkm_ltc *);
+void gm107_ltc_intr_lts(struct nvkm_ltc *, int ltc, int lts);
void gm107_ltc_cbc_clear(struct nvkm_ltc *, u32, u32);
void gm107_ltc_cbc_wait(struct nvkm_ltc *);
void gm107_ltc_zbc_clear_color(struct nvkm_ltc *, int, const u32[4]);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
index 49695ac7b..12943f92c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
@@ -10,3 +10,4 @@ nvkm-y += nvkm/subdev/mc/gt215.o
nvkm-y += nvkm/subdev/mc/gf100.o
nvkm-y += nvkm/subdev/mc/gk104.o
nvkm-y += nvkm/subdev/mc/gk20a.o
+nvkm-y += nvkm/subdev/mc/gp100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
index 350a8caa8..6b25e25f9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
@@ -27,43 +27,67 @@
#include <subdev/top.h>
void
-nvkm_mc_unk260(struct nvkm_mc *mc, u32 data)
+nvkm_mc_unk260(struct nvkm_device *device, u32 data)
{
- if (mc->func->unk260)
+ struct nvkm_mc *mc = device->mc;
+ if (likely(mc) && mc->func->unk260)
mc->func->unk260(mc, data);
}
void
-nvkm_mc_intr_unarm(struct nvkm_mc *mc)
+nvkm_mc_intr_mask(struct nvkm_device *device, enum nvkm_devidx devidx, bool en)
{
- return mc->func->intr_unarm(mc);
+ struct nvkm_mc *mc = device->mc;
+ const struct nvkm_mc_map *map;
+ if (likely(mc) && mc->func->intr_mask) {
+ u32 mask = nvkm_top_intr_mask(device, devidx);
+ for (map = mc->func->intr; !mask && map->stat; map++) {
+ if (map->unit == devidx)
+ mask = map->stat;
+ }
+ mc->func->intr_mask(mc, mask, en ? mask : 0);
+ }
+}
+
+void
+nvkm_mc_intr_unarm(struct nvkm_device *device)
+{
+ struct nvkm_mc *mc = device->mc;
+ if (likely(mc))
+ mc->func->intr_unarm(mc);
}
void
-nvkm_mc_intr_rearm(struct nvkm_mc *mc)
+nvkm_mc_intr_rearm(struct nvkm_device *device)
{
- return mc->func->intr_rearm(mc);
+ struct nvkm_mc *mc = device->mc;
+ if (likely(mc))
+ mc->func->intr_rearm(mc);
}
static u32
-nvkm_mc_intr_mask(struct nvkm_mc *mc)
+nvkm_mc_intr_stat(struct nvkm_mc *mc)
{
- u32 intr = mc->func->intr_mask(mc);
+ u32 intr = mc->func->intr_stat(mc);
if (WARN_ON_ONCE(intr == 0xffffffff))
intr = 0; /* likely fallen off the bus */
return intr;
}
void
-nvkm_mc_intr(struct nvkm_mc *mc, bool *handled)
+nvkm_mc_intr(struct nvkm_device *device, bool *handled)
{
- struct nvkm_device *device = mc->subdev.device;
+ struct nvkm_mc *mc = device->mc;
struct nvkm_subdev *subdev;
- const struct nvkm_mc_map *map = mc->func->intr;
- u32 stat, intr = nvkm_mc_intr_mask(mc);
+ const struct nvkm_mc_map *map;
+ u32 stat, intr;
u64 subdevs;
- stat = nvkm_top_intr(device->top, intr, &subdevs);
+ if (unlikely(!mc))
+ return;
+
+ intr = nvkm_mc_intr_stat(mc);
+ stat = nvkm_top_intr(device, intr, &subdevs);
while (subdevs) {
enum nvkm_devidx subidx = __ffs64(subdevs);
subdev = nvkm_device_subdev(device, subidx);
@@ -72,14 +96,13 @@ nvkm_mc_intr(struct nvkm_mc *mc, bool *handled)
subdevs &= ~BIT_ULL(subidx);
}
- while (map->stat) {
+ for (map = mc->func->intr; map->stat; map++) {
if (intr & map->stat) {
subdev = nvkm_device_subdev(device, map->unit);
if (subdev)
nvkm_subdev_intr(subdev);
stat &= ~map->stat;
}
- map++;
}
if (stat)
@@ -87,22 +110,32 @@ nvkm_mc_intr(struct nvkm_mc *mc, bool *handled)
*handled = intr != 0;
}
-static void
-nvkm_mc_reset_(struct nvkm_mc *mc, enum nvkm_devidx devidx)
+static u32
+nvkm_mc_reset_mask(struct nvkm_device *device, bool isauto,
+ enum nvkm_devidx devidx)
{
- struct nvkm_device *device = mc->subdev.device;
+ struct nvkm_mc *mc = device->mc;
const struct nvkm_mc_map *map;
- u64 pmc_enable;
-
- if (!(pmc_enable = nvkm_top_reset(device->top, devidx))) {
- for (map = mc->func->reset; map && map->stat; map++) {
- if (map->unit == devidx) {
- pmc_enable = map->stat;
- break;
+ u64 pmc_enable = 0;
+ if (likely(mc)) {
+ if (!(pmc_enable = nvkm_top_reset(device, devidx))) {
+ for (map = mc->func->reset; map && map->stat; map++) {
+ if (!isauto || !map->noauto) {
+ if (map->unit == devidx) {
+ pmc_enable = map->stat;
+ break;
+ }
+ }
}
}
}
+ return pmc_enable;
+}
+void
+nvkm_mc_reset(struct nvkm_device *device, enum nvkm_devidx devidx)
+{
+ u64 pmc_enable = nvkm_mc_reset_mask(device, true, devidx);
if (pmc_enable) {
nvkm_mask(device, 0x000200, pmc_enable, 0x00000000);
nvkm_mask(device, 0x000200, pmc_enable, pmc_enable);
@@ -111,17 +144,27 @@ nvkm_mc_reset_(struct nvkm_mc *mc, enum nvkm_devidx devidx)
}
void
-nvkm_mc_reset(struct nvkm_mc *mc, enum nvkm_devidx devidx)
+nvkm_mc_disable(struct nvkm_device *device, enum nvkm_devidx devidx)
{
- if (likely(mc))
- nvkm_mc_reset_(mc, devidx);
+ u64 pmc_enable = nvkm_mc_reset_mask(device, false, devidx);
+ if (pmc_enable)
+ nvkm_mask(device, 0x000200, pmc_enable, 0x00000000);
+}
+
+void
+nvkm_mc_enable(struct nvkm_device *device, enum nvkm_devidx devidx)
+{
+ u64 pmc_enable = nvkm_mc_reset_mask(device, false, devidx);
+ if (pmc_enable) {
+ nvkm_mask(device, 0x000200, pmc_enable, pmc_enable);
+ nvkm_rd32(device, 0x000200);
+ }
}
static int
nvkm_mc_fini(struct nvkm_subdev *subdev, bool suspend)
{
- struct nvkm_mc *mc = nvkm_mc(subdev);
- nvkm_mc_intr_unarm(mc);
+ nvkm_mc_intr_unarm(subdev->device);
return 0;
}
@@ -131,7 +174,7 @@ nvkm_mc_init(struct nvkm_subdev *subdev)
struct nvkm_mc *mc = nvkm_mc(subdev);
if (mc->func->init)
mc->func->init(mc);
- nvkm_mc_intr_rearm(mc);
+ nvkm_mc_intr_rearm(subdev->device);
return 0;
}
@@ -148,16 +191,21 @@ nvkm_mc = {
.fini = nvkm_mc_fini,
};
+void
+nvkm_mc_ctor(const struct nvkm_mc_func *func, struct nvkm_device *device,
+ int index, struct nvkm_mc *mc)
+{
+ nvkm_subdev_ctor(&nvkm_mc, device, index, &mc->subdev);
+ mc->func = func;
+}
+
int
nvkm_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device,
int index, struct nvkm_mc **pmc)
{
struct nvkm_mc *mc;
-
if (!(mc = *pmc = kzalloc(sizeof(*mc), GFP_KERNEL)))
return -ENOMEM;
-
- nvkm_subdev_ctor(&nvkm_mc, device, index, &mc->subdev);
- mc->func = func;
+ nvkm_mc_ctor(func, device, index, *pmc);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c
index 5c85b47f0..c3d66ef5d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c
@@ -57,7 +57,7 @@ g84_mc = {
.intr = g84_mc_intr,
.intr_unarm = nv04_mc_intr_unarm,
.intr_rearm = nv04_mc_intr_rearm,
- .intr_mask = nv04_mc_intr_mask,
+ .intr_stat = nv04_mc_intr_stat,
.reset = g84_mc_reset,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c
index 0280b43cc..93ad4982c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c
@@ -57,7 +57,7 @@ g98_mc = {
.intr = g98_mc_intr,
.intr_unarm = nv04_mc_intr_unarm,
.intr_rearm = nv04_mc_intr_rearm,
- .intr_mask = nv04_mc_intr_mask,
+ .intr_stat = nv04_mc_intr_stat,
.reset = g98_mc_reset,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
index 8397e223b..d2c4d6033 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
@@ -76,7 +76,7 @@ gf100_mc_intr_rearm(struct nvkm_mc *mc)
}
u32
-gf100_mc_intr_mask(struct nvkm_mc *mc)
+gf100_mc_intr_stat(struct nvkm_mc *mc)
{
struct nvkm_device *device = mc->subdev.device;
u32 intr0 = nvkm_rd32(device, 0x000100);
@@ -85,6 +85,14 @@ gf100_mc_intr_mask(struct nvkm_mc *mc)
}
void
+gf100_mc_intr_mask(struct nvkm_mc *mc, u32 mask, u32 stat)
+{
+ struct nvkm_device *device = mc->subdev.device;
+ nvkm_mask(device, 0x000640, mask, stat);
+ nvkm_mask(device, 0x000644, mask, stat);
+}
+
+void
gf100_mc_unk260(struct nvkm_mc *mc, u32 data)
{
nvkm_wr32(mc->subdev.device, 0x000260, data);
@@ -97,6 +105,7 @@ gf100_mc = {
.intr_unarm = gf100_mc_intr_unarm,
.intr_rearm = gf100_mc_intr_rearm,
.intr_mask = gf100_mc_intr_mask,
+ .intr_stat = gf100_mc_intr_stat,
.reset = gf100_mc_reset,
.unk260 = gf100_mc_unk260,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c
index 317464212..7b8c6ecad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c
@@ -26,6 +26,7 @@
const struct nvkm_mc_map
gk104_mc_reset[] = {
{ 0x00000100, NVKM_ENGINE_FIFO },
+ { 0x00002000, NVKM_SUBDEV_PMU, true },
{}
};
@@ -53,6 +54,7 @@ gk104_mc = {
.intr_unarm = gf100_mc_intr_unarm,
.intr_rearm = gf100_mc_intr_rearm,
.intr_mask = gf100_mc_intr_mask,
+ .intr_stat = gf100_mc_intr_stat,
.reset = gk104_mc_reset,
.unk260 = gf100_mc_unk260,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c
index 60b044f51..ca1bf3279 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c
@@ -30,6 +30,7 @@ gk20a_mc = {
.intr_unarm = gf100_mc_intr_unarm,
.intr_rearm = gf100_mc_intr_rearm,
.intr_mask = gf100_mc_intr_mask,
+ .intr_stat = gf100_mc_intr_stat,
.reset = gk104_mc_reset,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c
new file mode 100644
index 000000000..4d22f4abd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#define gp100_mc(p) container_of((p), struct gp100_mc, base)
+#include "priv.h"
+
+struct gp100_mc {
+ struct nvkm_mc base;
+ spinlock_t lock;
+ bool intr;
+ u32 mask;
+};
+
+static void
+gp100_mc_intr_update(struct gp100_mc *mc)
+{
+ struct nvkm_device *device = mc->base.subdev.device;
+ u32 mask = mc->intr ? mc->mask : 0, i;
+ for (i = 0; i < 2; i++) {
+ nvkm_wr32(device, 0x000180 + (i * 0x04), ~mask);
+ nvkm_wr32(device, 0x000160 + (i * 0x04), mask);
+ }
+}
+
+static void
+gp100_mc_intr_unarm(struct nvkm_mc *base)
+{
+ struct gp100_mc *mc = gp100_mc(base);
+ unsigned long flags;
+ spin_lock_irqsave(&mc->lock, flags);
+ mc->intr = false;
+ gp100_mc_intr_update(mc);
+ spin_unlock_irqrestore(&mc->lock, flags);
+}
+
+static void
+gp100_mc_intr_rearm(struct nvkm_mc *base)
+{
+ struct gp100_mc *mc = gp100_mc(base);
+ unsigned long flags;
+ spin_lock_irqsave(&mc->lock, flags);
+ mc->intr = true;
+ gp100_mc_intr_update(mc);
+ spin_unlock_irqrestore(&mc->lock, flags);
+}
+
+static void
+gp100_mc_intr_mask(struct nvkm_mc *base, u32 mask, u32 intr)
+{
+ struct gp100_mc *mc = gp100_mc(base);
+ unsigned long flags;
+ spin_lock_irqsave(&mc->lock, flags);
+ mc->mask = (mc->mask & ~mask) | intr;
+ gp100_mc_intr_update(mc);
+ spin_unlock_irqrestore(&mc->lock, flags);
+}
+
+static const struct nvkm_mc_func
+gp100_mc = {
+ .init = nv50_mc_init,
+ .intr = gk104_mc_intr,
+ .intr_unarm = gp100_mc_intr_unarm,
+ .intr_rearm = gp100_mc_intr_rearm,
+ .intr_mask = gp100_mc_intr_mask,
+ .intr_stat = gf100_mc_intr_stat,
+ .reset = gk104_mc_reset,
+};
+
+int
+gp100_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+ struct gp100_mc *mc;
+
+ if (!(mc = kzalloc(sizeof(*mc), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_mc_ctor(&gp100_mc, device, index, &mc->base);
+ *pmc = &mc->base;
+
+ spin_lock_init(&mc->lock);
+ mc->intr = false;
+ mc->mask = 0x7fffffff;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c
index aad0ba95b..99d50a3d9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c
@@ -53,13 +53,20 @@ gt215_mc_intr[] = {
{},
};
+static void
+gt215_mc_intr_mask(struct nvkm_mc *mc, u32 mask, u32 stat)
+{
+ nvkm_mask(mc->subdev.device, 0x000640, mask, stat);
+}
+
static const struct nvkm_mc_func
gt215_mc = {
.init = nv50_mc_init,
.intr = gt215_mc_intr,
.intr_unarm = nv04_mc_intr_unarm,
.intr_rearm = nv04_mc_intr_rearm,
- .intr_mask = nv04_mc_intr_mask,
+ .intr_mask = gt215_mc_intr_mask,
+ .intr_stat = nv04_mc_intr_stat,
.reset = gt215_mc_reset,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c
index a062624e9..6509defd1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c
@@ -56,7 +56,7 @@ nv04_mc_intr_rearm(struct nvkm_mc *mc)
}
u32
-nv04_mc_intr_mask(struct nvkm_mc *mc)
+nv04_mc_intr_stat(struct nvkm_mc *mc)
{
return nvkm_rd32(mc->subdev.device, 0x000100);
}
@@ -75,7 +75,7 @@ nv04_mc = {
.intr = nv04_mc_intr,
.intr_unarm = nv04_mc_intr_unarm,
.intr_rearm = nv04_mc_intr_rearm,
- .intr_mask = nv04_mc_intr_mask,
+ .intr_stat = nv04_mc_intr_stat,
.reset = nv04_mc_reset,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c
index 55f0b9166..921310790 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c
@@ -39,7 +39,7 @@ nv11_mc = {
.intr = nv11_mc_intr,
.intr_unarm = nv04_mc_intr_unarm,
.intr_rearm = nv04_mc_intr_rearm,
- .intr_mask = nv04_mc_intr_mask,
+ .intr_stat = nv04_mc_intr_stat,
.reset = nv04_mc_reset,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c
index c40fa67f7..64bf5bbf8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c
@@ -48,7 +48,7 @@ nv17_mc = {
.intr = nv17_mc_intr,
.intr_unarm = nv04_mc_intr_unarm,
.intr_rearm = nv04_mc_intr_rearm,
- .intr_mask = nv04_mc_intr_mask,
+ .intr_stat = nv04_mc_intr_stat,
.reset = nv17_mc_reset,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c
index cc56271db..65fa44a64 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c
@@ -43,7 +43,7 @@ nv44_mc = {
.intr = nv17_mc_intr,
.intr_unarm = nv04_mc_intr_unarm,
.intr_rearm = nv04_mc_intr_rearm,
- .intr_mask = nv04_mc_intr_mask,
+ .intr_stat = nv04_mc_intr_stat,
.reset = nv17_mc_reset,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c
index 343b60785..fe93b4fd7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c
@@ -50,7 +50,7 @@ nv50_mc = {
.intr = nv50_mc_intr,
.intr_unarm = nv04_mc_intr_unarm,
.intr_rearm = nv04_mc_intr_rearm,
- .intr_mask = nv04_mc_intr_mask,
+ .intr_stat = nv04_mc_intr_stat,
.reset = nv17_mc_reset,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
index a12038118..4f0576a06 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
@@ -3,12 +3,15 @@
#define nvkm_mc(p) container_of((p), struct nvkm_mc, subdev)
#include <subdev/mc.h>
+void nvkm_mc_ctor(const struct nvkm_mc_func *, struct nvkm_device *,
+ int index, struct nvkm_mc *);
int nvkm_mc_new_(const struct nvkm_mc_func *, struct nvkm_device *,
int index, struct nvkm_mc **);
struct nvkm_mc_map {
u32 stat;
u32 unit;
+ bool noauto;
};
struct nvkm_mc_func {
@@ -18,8 +21,10 @@ struct nvkm_mc_func {
void (*intr_unarm)(struct nvkm_mc *);
/* enable reporting of interrupts to host */
void (*intr_rearm)(struct nvkm_mc *);
+ /* (un)mask delivery of specific interrupts */
+ void (*intr_mask)(struct nvkm_mc *, u32 mask, u32 stat);
/* retrieve pending interrupt mask (NV_PMC_INTR) */
- u32 (*intr_mask)(struct nvkm_mc *);
+ u32 (*intr_stat)(struct nvkm_mc *);
const struct nvkm_mc_map *reset;
void (*unk260)(struct nvkm_mc *, u32);
};
@@ -27,7 +32,7 @@ struct nvkm_mc_func {
void nv04_mc_init(struct nvkm_mc *);
void nv04_mc_intr_unarm(struct nvkm_mc *);
void nv04_mc_intr_rearm(struct nvkm_mc *);
-u32 nv04_mc_intr_mask(struct nvkm_mc *);
+u32 nv04_mc_intr_stat(struct nvkm_mc *);
extern const struct nvkm_mc_map nv04_mc_reset[];
extern const struct nvkm_mc_map nv17_mc_intr[];
@@ -39,7 +44,8 @@ void nv50_mc_init(struct nvkm_mc *);
void gf100_mc_intr_unarm(struct nvkm_mc *);
void gf100_mc_intr_rearm(struct nvkm_mc *);
-u32 gf100_mc_intr_mask(struct nvkm_mc *);
+void gf100_mc_intr_mask(struct nvkm_mc *, u32, u32);
+u32 gf100_mc_intr_stat(struct nvkm_mc *);
void gf100_mc_unk260(struct nvkm_mc *, u32);
extern const struct nvkm_mc_map gk104_mc_intr[];
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
index 3c2519fde..2a31b7d66 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
@@ -10,3 +10,4 @@ nvkm-y += nvkm/subdev/pci/g94.o
nvkm-y += nvkm/subdev/pci/gf100.o
nvkm-y += nvkm/subdev/pci/gf106.o
nvkm-y += nvkm/subdev/pci/gk104.o
+nvkm-y += nvkm/subdev/pci/gp100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
index 6b0328bd7..eb9b27819 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
@@ -69,15 +69,13 @@ static irqreturn_t
nvkm_pci_intr(int irq, void *arg)
{
struct nvkm_pci *pci = arg;
- struct nvkm_mc *mc = pci->subdev.device->mc;
+ struct nvkm_device *device = pci->subdev.device;
bool handled = false;
- if (likely(mc)) {
- nvkm_mc_intr_unarm(mc);
- if (pci->msi)
- pci->func->msi_rearm(pci);
- nvkm_mc_intr(mc, &handled);
- nvkm_mc_intr_rearm(mc);
- }
+ nvkm_mc_intr_unarm(device);
+ if (pci->msi)
+ pci->func->msi_rearm(pci);
+ nvkm_mc_intr(device, &handled);
+ nvkm_mc_intr_rearm(device);
return handled ? IRQ_HANDLED : IRQ_NONE;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c
new file mode 100644
index 000000000..82c5234a0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+static void
+gp100_pci_msi_rearm(struct nvkm_pci *pci)
+{
+ nvkm_pci_wr32(pci, 0x0704, 0x00000000);
+}
+
+static const struct nvkm_pci_func
+gp100_pci_func = {
+ .rd32 = nv40_pci_rd32,
+ .wr08 = nv40_pci_wr08,
+ .wr32 = nv40_pci_wr32,
+ .msi_rearm = gp100_pci_msi_rearm,
+};
+
+int
+gp100_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+{
+ return nvkm_pci_new_(&gp100_pci_func, device, index, ppci);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
index 213fdba6c..314be2192 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
@@ -19,8 +19,9 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
-
#include "priv.h"
+
+#include <subdev/mc.h>
#include <subdev/timer.h>
static const char *
@@ -70,12 +71,11 @@ nvkm_secboot_falcon_enable(struct nvkm_secboot *sb)
int ret;
/* enable engine */
- nvkm_mask(device, 0x200, sb->enable_mask, sb->enable_mask);
- nvkm_rd32(device, 0x200);
+ nvkm_mc_enable(device, sb->devidx);
ret = nvkm_wait_msec(device, 10, sb->base + 0x10c, 0x6, 0x0);
if (ret < 0) {
- nvkm_mask(device, 0x200, sb->enable_mask, 0x0);
nvkm_error(&sb->subdev, "Falcon mem scrubbing timeout\n");
+ nvkm_mc_disable(device, sb->devidx);
return ret;
}
@@ -85,8 +85,7 @@ nvkm_secboot_falcon_enable(struct nvkm_secboot *sb)
/* enable IRQs */
nvkm_wr32(device, sb->base + 0x010, 0xff);
- nvkm_mask(device, 0x640, sb->irq_mask, sb->irq_mask);
- nvkm_mask(device, 0x644, sb->irq_mask, sb->irq_mask);
+ nvkm_mc_intr_mask(device, sb->devidx, true);
return 0;
}
@@ -97,14 +96,13 @@ nvkm_secboot_falcon_disable(struct nvkm_secboot *sb)
struct nvkm_device *device = sb->subdev.device;
/* disable IRQs and wait for any previous code to complete */
- nvkm_mask(device, 0x644, sb->irq_mask, 0x0);
- nvkm_mask(device, 0x640, sb->irq_mask, 0x0);
+ nvkm_mc_intr_mask(device, sb->devidx, false);
nvkm_wr32(device, sb->base + 0x014, 0xff);
falcon_wait_idle(device, sb->base);
/* disable engine */
- nvkm_mask(device, 0x200, sb->enable_mask, 0x0);
+ nvkm_mc_disable(device, sb->devidx);
return 0;
}
@@ -216,14 +214,7 @@ nvkm_secboot_oneinit(struct nvkm_subdev *subdev)
return ret;
}
- /*
- * Build all blobs - the same blobs can be used to perform secure boot
- * multiple times
- */
- if (sb->func->prepare_blobs)
- ret = sb->func->prepare_blobs(sb);
-
- return ret;
+ return 0;
}
static int
@@ -270,9 +261,8 @@ nvkm_secboot_ctor(const struct nvkm_secboot_func *func,
/* setup the performing falcon's base address and masks */
switch (func->boot_falcon) {
case NVKM_SECBOOT_FALCON_PMU:
+ sb->devidx = NVKM_SUBDEV_PMU;
sb->base = 0x10a000;
- sb->irq_mask = 0x1000000;
- sb->enable_mask = 0x2000;
break;
default:
nvkm_error(&sb->subdev, "invalid secure boot falcon\n");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
index cefd0a923..c29921c1b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
@@ -860,6 +860,8 @@ gm200_secboot_prepare_ls_blob(struct gm200_secboot *gsb)
/* Write LS blob */
ret = ls_ucode_mgr_write_wpr(gsb, &mgr, gsb->ls_blob);
+ if (ret)
+ nvkm_gpuobj_del(&gsb->ls_blob);
cleanup:
ls_ucode_mgr_cleanup(&mgr);
@@ -1023,29 +1025,34 @@ gm20x_secboot_prepare_blobs(struct gm200_secboot *gsb)
int ret;
/* Load and prepare the managed falcon's firmwares */
- ret = gm200_secboot_prepare_ls_blob(gsb);
- if (ret)
- return ret;
+ if (!gsb->ls_blob) {
+ ret = gm200_secboot_prepare_ls_blob(gsb);
+ if (ret)
+ return ret;
+ }
/* Load the HS firmware that will load the LS firmwares */
- ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_load",
- &gsb->acr_load_blob,
- &gsb->acr_load_bl_desc, true);
- if (ret)
- return ret;
+ if (!gsb->acr_load_blob) {
+ ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_load",
+ &gsb->acr_load_blob,
+ &gsb->acr_load_bl_desc, true);
+ if (ret)
+ return ret;
+ }
/* Load the HS firmware bootloader */
- ret = gm200_secboot_prepare_hsbl_blob(gsb);
- if (ret)
- return ret;
+ if (!gsb->hsbl_blob) {
+ ret = gm200_secboot_prepare_hsbl_blob(gsb);
+ if (ret)
+ return ret;
+ }
return 0;
}
static int
-gm200_secboot_prepare_blobs(struct nvkm_secboot *sb)
+gm200_secboot_prepare_blobs(struct gm200_secboot *gsb)
{
- struct gm200_secboot *gsb = gm200_secboot(sb);
int ret;
ret = gm20x_secboot_prepare_blobs(gsb);
@@ -1053,15 +1060,37 @@ gm200_secboot_prepare_blobs(struct nvkm_secboot *sb)
return ret;
/* dGPU only: load the HS firmware that unprotects the WPR region */
- ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_unload",
- &gsb->acr_unload_blob,
- &gsb->acr_unload_bl_desc, false);
- if (ret)
- return ret;
+ if (!gsb->acr_unload_blob) {
+ ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_unload",
+ &gsb->acr_unload_blob,
+ &gsb->acr_unload_bl_desc, false);
+ if (ret)
+ return ret;
+ }
return 0;
}
+static int
+gm200_secboot_blobs_ready(struct gm200_secboot *gsb)
+{
+ struct nvkm_subdev *subdev = &gsb->base.subdev;
+ int ret;
+
+ /* firmware already loaded, nothing to do... */
+ if (gsb->firmware_ok)
+ return 0;
+
+ ret = gsb->func->prepare_blobs(gsb);
+ if (ret) {
+ nvkm_error(subdev, "failed to load secure firmware\n");
+ return ret;
+ }
+
+ gsb->firmware_ok = true;
+
+ return 0;
+}
/*
@@ -1234,6 +1263,11 @@ gm200_secboot_reset(struct nvkm_secboot *sb, enum nvkm_secboot_falcon falcon)
struct gm200_secboot *gsb = gm200_secboot(sb);
int ret;
+ /* Make sure all blobs are ready */
+ ret = gm200_secboot_blobs_ready(gsb);
+ if (ret)
+ return ret;
+
/*
* Dummy GM200 implementation: perform secure boot each time we are
* called on FECS. Since only FECS and GPCCS are managed and started
@@ -1373,7 +1407,6 @@ gm200_secboot = {
.dtor = gm200_secboot_dtor,
.init = gm200_secboot_init,
.fini = gm200_secboot_fini,
- .prepare_blobs = gm200_secboot_prepare_blobs,
.reset = gm200_secboot_reset,
.start = gm200_secboot_start,
.managed_falcons = BIT(NVKM_SECBOOT_FALCON_FECS) |
@@ -1415,6 +1448,7 @@ gm200_secboot_func = {
.bl_desc_size = sizeof(struct gm200_flcn_bl_desc),
.fixup_bl_desc = gm200_secboot_fixup_bl_desc,
.fixup_hs_desc = gm200_secboot_fixup_hs_desc,
+ .prepare_blobs = gm200_secboot_prepare_blobs,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
index 77872da73..875b0e9c4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
@@ -42,6 +42,32 @@ struct gm20b_flcn_bl_desc {
u32 data_size;
};
+static int
+gm20b_secboot_prepare_blobs(struct gm200_secboot *gsb)
+{
+ struct nvkm_subdev *subdev = &gsb->base.subdev;
+ int acr_size;
+ int ret;
+
+ ret = gm20x_secboot_prepare_blobs(gsb);
+ if (ret)
+ return ret;
+
+ acr_size = gsb->acr_load_blob->size;
+ /*
+ * On Tegra the WPR region is set by the bootloader. It is illegal for
+ * the HS blob to be larger than this region.
+ */
+ if (acr_size > gsb->wpr_size) {
+ nvkm_error(subdev, "WPR region too small for FW blob!\n");
+ nvkm_error(subdev, "required: %dB\n", acr_size);
+ nvkm_error(subdev, "WPR size: %dB\n", gsb->wpr_size);
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
/**
* gm20b_secboot_fixup_bl_desc - adapt BL descriptor to format used by GM20B FW
*
@@ -88,6 +114,7 @@ gm20b_secboot_func = {
.bl_desc_size = sizeof(struct gm20b_flcn_bl_desc),
.fixup_bl_desc = gm20b_secboot_fixup_bl_desc,
.fixup_hs_desc = gm20b_secboot_fixup_hs_desc,
+ .prepare_blobs = gm20b_secboot_prepare_blobs,
};
@@ -147,32 +174,6 @@ gm20b_tegra_read_wpr(struct gm200_secboot *gsb)
#endif
static int
-gm20b_secboot_prepare_blobs(struct nvkm_secboot *sb)
-{
- struct gm200_secboot *gsb = gm200_secboot(sb);
- int acr_size;
- int ret;
-
- ret = gm20x_secboot_prepare_blobs(gsb);
- if (ret)
- return ret;
-
- acr_size = gsb->acr_load_blob->size;
- /*
- * On Tegra the WPR region is set by the bootloader. It is illegal for
- * the HS blob to be larger than this region.
- */
- if (acr_size > gsb->wpr_size) {
- nvkm_error(&sb->subdev, "WPR region too small for FW blob!\n");
- nvkm_error(&sb->subdev, "required: %dB\n", acr_size);
- nvkm_error(&sb->subdev, "WPR size: %dB\n", gsb->wpr_size);
- return -ENOSPC;
- }
-
- return 0;
-}
-
-static int
gm20b_secboot_init(struct nvkm_secboot *sb)
{
struct gm200_secboot *gsb = gm200_secboot(sb);
@@ -189,7 +190,6 @@ static const struct nvkm_secboot_func
gm20b_secboot = {
.dtor = gm200_secboot_dtor,
.init = gm20b_secboot_init,
- .prepare_blobs = gm20b_secboot_prepare_blobs,
.reset = gm200_secboot_reset,
.start = gm200_secboot_start,
.managed_falcons = BIT(NVKM_SECBOOT_FALCON_FECS),
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
index f2b09dee7..a9a8a0e10 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
@@ -30,7 +30,6 @@ struct nvkm_secboot_func {
int (*init)(struct nvkm_secboot *);
int (*fini)(struct nvkm_secboot *, bool suspend);
void *(*dtor)(struct nvkm_secboot *);
- int (*prepare_blobs)(struct nvkm_secboot *);
int (*reset)(struct nvkm_secboot *, enum nvkm_secboot_falcon);
int (*start)(struct nvkm_secboot *, enum nvkm_secboot_falcon);
@@ -147,10 +146,8 @@ struct hsflcn_acr_desc {
* @inst: instance block for HS falcon
* @pgd: page directory for the HS falcon
* @vm: address space used by the HS falcon
- * @bl_desc_size: size of the BL descriptor used by this chip.
- * @fixup_bl_desc: hook that generates the proper BL descriptor format from
- * the generic GM200 format into a data array of size
- * bl_desc_size
+ * @falcon_state: current state of the managed falcons
+ * @firmware_ok: whether the firmware blobs have been created
*/
struct gm200_secboot {
struct nvkm_secboot base;
@@ -196,9 +193,19 @@ struct gm200_secboot {
RUNNING,
} falcon_state[NVKM_SECBOOT_FALCON_END];
+ bool firmware_ok;
};
#define gm200_secboot(sb) container_of(sb, struct gm200_secboot, base)
+/**
+ * Contains functions we wish to abstract between GM200-like implementations
+ * @bl_desc_size: size of the BL descriptor used by this chip.
+ * @fixup_bl_desc: hook that generates the proper BL descriptor format from
+ * the generic GM200 format into a data array of size
+ * bl_desc_size
+ * @fixup_hs_desc: hook that twiddles the HS descriptor before it is used
+ * @prepare_blobs: prepares the various blobs needed for secure booting
+ */
struct gm200_secboot_func {
/*
* Size of the bootloader descriptor for this chip. A block of this
@@ -214,6 +221,7 @@ struct gm200_secboot_func {
* we want the HS FW to set up.
*/
void (*fixup_hs_desc)(struct gm200_secboot *, struct hsflcn_acr_desc *);
+ int (*prepare_blobs)(struct gm200_secboot *);
};
int gm200_secboot_init(struct nvkm_secboot *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c
index a1b264664..fe063d572 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c
@@ -41,8 +41,9 @@ nvkm_top_device_new(struct nvkm_top *top)
}
u32
-nvkm_top_reset(struct nvkm_top *top, enum nvkm_devidx index)
+nvkm_top_reset(struct nvkm_device *device, enum nvkm_devidx index)
{
+ struct nvkm_top *top = device->top;
struct nvkm_top_device *info;
if (top) {
@@ -56,8 +57,25 @@ nvkm_top_reset(struct nvkm_top *top, enum nvkm_devidx index)
}
u32
-nvkm_top_intr(struct nvkm_top *top, u32 intr, u64 *psubdevs)
+nvkm_top_intr_mask(struct nvkm_device *device, enum nvkm_devidx devidx)
{
+ struct nvkm_top *top = device->top;
+ struct nvkm_top_device *info;
+
+ if (top) {
+ list_for_each_entry(info, &top->device, head) {
+ if (info->index == devidx && info->intr >= 0)
+ return BIT(info->intr);
+ }
+ }
+
+ return 0;
+}
+
+u32
+nvkm_top_intr(struct nvkm_device *device, u32 intr, u64 *psubdevs)
+{
+ struct nvkm_top *top = device->top;
struct nvkm_top_device *info;
u64 subdevs = 0;
u32 handled = 0;
@@ -78,8 +96,9 @@ nvkm_top_intr(struct nvkm_top *top, u32 intr, u64 *psubdevs)
}
enum nvkm_devidx
-nvkm_top_fault(struct nvkm_top *top, int fault)
+nvkm_top_fault(struct nvkm_device *device, int fault)
{
+ struct nvkm_top *top = device->top;
struct nvkm_top_device *info;
list_for_each_entry(info, &top->device, head) {
@@ -91,8 +110,9 @@ nvkm_top_fault(struct nvkm_top *top, int fault)
}
enum nvkm_devidx
-nvkm_top_engine(struct nvkm_top *top, int index, int *runl, int *engn)
+nvkm_top_engine(struct nvkm_device *device, int index, int *runl, int *engn)
{
+ struct nvkm_top *top = device->top;
struct nvkm_top_device *info;
int n = 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
index e06acc340..efac3402f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
@@ -29,7 +29,7 @@ gk104_top_oneinit(struct nvkm_top *top)
struct nvkm_subdev *subdev = &top->subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_top_device *info = NULL;
- u32 data, type;
+ u32 data, type, inst;
int i;
for (i = 0; i < 64; i++) {
@@ -37,6 +37,7 @@ gk104_top_oneinit(struct nvkm_top *top)
if (!(info = nvkm_top_device_new(top)))
return -ENOMEM;
type = ~0;
+ inst = 0;
}
data = nvkm_rd32(device, 0x022700 + (i * 0x04));
@@ -45,6 +46,7 @@ gk104_top_oneinit(struct nvkm_top *top)
case 0x00000000: /* NOT_VALID */
continue;
case 0x00000001: /* DATA */
+ inst = (data & 0x3c000000) >> 26;
info->addr = (data & 0x00fff000);
info->fault = (data & 0x000000f8) >> 3;
break;
@@ -67,27 +69,32 @@ gk104_top_oneinit(struct nvkm_top *top)
continue;
/* Translate engine type to NVKM engine identifier. */
+#define A_(A) if (inst == 0) info->index = NVKM_ENGINE_##A
+#define B_(A) if (inst + NVKM_ENGINE_##A##0 < NVKM_ENGINE_##A##_LAST + 1) \
+ info->index = NVKM_ENGINE_##A##0 + inst
switch (type) {
- case 0x00000000: info->index = NVKM_ENGINE_GR; break;
- case 0x00000001: info->index = NVKM_ENGINE_CE0; break;
- case 0x00000002: info->index = NVKM_ENGINE_CE1; break;
- case 0x00000003: info->index = NVKM_ENGINE_CE2; break;
- case 0x00000008: info->index = NVKM_ENGINE_MSPDEC; break;
- case 0x00000009: info->index = NVKM_ENGINE_MSPPP; break;
- case 0x0000000a: info->index = NVKM_ENGINE_MSVLD; break;
- case 0x0000000b: info->index = NVKM_ENGINE_MSENC; break;
- case 0x0000000c: info->index = NVKM_ENGINE_VIC; break;
- case 0x0000000d: info->index = NVKM_ENGINE_SEC; break;
- case 0x0000000e: info->index = NVKM_ENGINE_NVENC0; break;
- case 0x0000000f: info->index = NVKM_ENGINE_NVENC1; break;
- case 0x00000010: info->index = NVKM_ENGINE_NVDEC; break;
+ case 0x00000000: A_(GR ); break;
+ case 0x00000001: A_(CE0 ); break;
+ case 0x00000002: A_(CE1 ); break;
+ case 0x00000003: A_(CE2 ); break;
+ case 0x00000008: A_(MSPDEC); break;
+ case 0x00000009: A_(MSPPP ); break;
+ case 0x0000000a: A_(MSVLD ); break;
+ case 0x0000000b: A_(MSENC ); break;
+ case 0x0000000c: A_(VIC ); break;
+ case 0x0000000d: A_(SEC ); break;
+ case 0x0000000e: B_(NVENC ); break;
+ case 0x0000000f: A_(NVENC1); break;
+ case 0x00000010: A_(NVDEC ); break;
+ case 0x00000013: B_(CE ); break;
break;
default:
break;
}
- nvkm_debug(subdev, "%02x (%8s): addr %06x fault %2d engine %2d "
- "runlist %2d intr %2d reset %2d\n", type,
+ nvkm_debug(subdev, "%02x.%d (%8s): addr %06x fault %2d "
+ "engine %2d runlist %2d intr %2d "
+ "reset %2d\n", type, inst,
info->index == NVKM_SUBDEV_NR ? NULL :
nvkm_subdev_name[info->index],
info->addr, info->fault, info->engine, info->runlist,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
index 6b2d7531a..1c3d23b0e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
@@ -120,6 +120,8 @@ nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt)
data = nvbios_volt_parse(bios, &ver, &hdr, &cnt, &len, &info);
if (data && info.vidmask && info.base && info.step) {
+ volt->min_uv = info.min;
+ volt->max_uv = info.max;
for (i = 0; i < info.vidmask + 1; i++) {
if (info.base >= info.min &&
info.base <= info.max) {
@@ -131,6 +133,8 @@ nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt)
}
volt->vid_mask = info.vidmask;
} else if (data && info.vidmask) {
+ volt->min_uv = 0xffffffff;
+ volt->max_uv = 0;
for (i = 0; i < cnt; i++) {
data = nvbios_volt_entry_parse(bios, i, &ver, &hdr,
&ivid);
@@ -138,9 +142,14 @@ nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt)
volt->vid[volt->vid_nr].uv = ivid.voltage;
volt->vid[volt->vid_nr].vid = ivid.vid;
volt->vid_nr++;
+ volt->min_uv = min(volt->min_uv, ivid.voltage);
+ volt->max_uv = max(volt->max_uv, ivid.voltage);
}
}
volt->vid_mask = info.vidmask;
+ } else if (data && info.type == NVBIOS_VOLT_PWM) {
+ volt->min_uv = info.base;
+ volt->max_uv = info.base + info.pwm_range;
}
}
@@ -181,8 +190,11 @@ nvkm_volt_ctor(const struct nvkm_volt_func *func, struct nvkm_device *device,
volt->func = func;
/* Assuming the non-bios device should build the voltage table later */
- if (bios)
+ if (bios) {
nvkm_volt_parse_bios(bios, volt);
+ nvkm_debug(&volt->subdev, "min: %iuv max: %iuv\n",
+ volt->min_uv, volt->max_uv);
+ }
if (volt->vid_nr) {
for (i = 0; i < volt->vid_nr; i++) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
index d55445532..ce5d83cdc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
@@ -77,18 +77,19 @@ gk20a_volt_get_cvb_t_voltage(int speedo, int temp, int s_scale, int t_scale,
return mv;
}
-int
+static int
gk20a_volt_calc_voltage(const struct cvb_coef *coef, int speedo)
{
+ static const int v_scale = 1000;
int mv;
mv = gk20a_volt_get_cvb_t_voltage(speedo, -10, 100, 10, coef);
- mv = DIV_ROUND_UP(mv, 1000);
+ mv = DIV_ROUND_UP(mv, v_scale);
return mv * 1000;
}
-int
+static int
gk20a_volt_vid_get(struct nvkm_volt *base)
{
struct gk20a_volt *volt = gk20a_volt(base);
@@ -103,7 +104,7 @@ gk20a_volt_vid_get(struct nvkm_volt *base)
return -EINVAL;
}
-int
+static int
gk20a_volt_vid_set(struct nvkm_volt *base, u8 vid)
{
struct gk20a_volt *volt = gk20a_volt(base);
@@ -113,7 +114,7 @@ gk20a_volt_vid_set(struct nvkm_volt *base, u8 vid)
return regulator_set_voltage(volt->vdd, volt->base.vid[vid].uv, 1200000);
}
-int
+static int
gk20a_volt_set_id(struct nvkm_volt *base, u8 id, int condition)
{
struct gk20a_volt *volt = gk20a_volt(base);
@@ -143,9 +144,9 @@ gk20a_volt = {
};
int
-_gk20a_volt_ctor(struct nvkm_device *device, int index,
- const struct cvb_coef *coefs, int nb_coefs,
- struct gk20a_volt *volt)
+gk20a_volt_ctor(struct nvkm_device *device, int index,
+ const struct cvb_coef *coefs, int nb_coefs,
+ int vmin, struct gk20a_volt *volt)
{
struct nvkm_device_tegra *tdev = device->func->tegra(device);
int i, uv;
@@ -160,9 +161,9 @@ _gk20a_volt_ctor(struct nvkm_device *device, int index,
volt->base.vid_nr = nb_coefs;
for (i = 0; i < volt->base.vid_nr; i++) {
volt->base.vid[i].vid = i;
- volt->base.vid[i].uv =
- gk20a_volt_calc_voltage(&coefs[i],
- tdev->gpu_speedo);
+ volt->base.vid[i].uv = max(
+ gk20a_volt_calc_voltage(&coefs[i], tdev->gpu_speedo),
+ vmin);
nvkm_debug(&volt->base.subdev, "%2d: vid=%d, uv=%d\n", i,
volt->base.vid[i].vid, volt->base.vid[i].uv);
}
@@ -180,6 +181,6 @@ gk20a_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
return -ENOMEM;
*pvolt = &volt->base;
- return _gk20a_volt_ctor(device, index, gk20a_cvb_coef,
- ARRAY_SIZE(gk20a_cvb_coef), volt);
+ return gk20a_volt_ctor(device, index, gk20a_cvb_coef,
+ ARRAY_SIZE(gk20a_cvb_coef), 0, volt);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h
index 0fa3b502b..6a6c97f96 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h
@@ -37,13 +37,8 @@ struct gk20a_volt {
struct regulator *vdd;
};
-int _gk20a_volt_ctor(struct nvkm_device *device, int index,
- const struct cvb_coef *coefs, int nb_coefs,
- struct gk20a_volt *volt);
-
-int gk20a_volt_calc_voltage(const struct cvb_coef *coef, int speedo);
-int gk20a_volt_vid_get(struct nvkm_volt *volt);
-int gk20a_volt_vid_set(struct nvkm_volt *volt, u8 vid);
-int gk20a_volt_set_id(struct nvkm_volt *volt, u8 id, int condition);
+int gk20a_volt_ctor(struct nvkm_device *device, int index,
+ const struct cvb_coef *coefs, int nb_coefs,
+ int vmin, struct gk20a_volt *volt);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c
index 49b5ecb70..74db4d289 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c
@@ -41,16 +41,52 @@ const struct cvb_coef gm20b_cvb_coef[] = {
/* 921600 */ { 2647676, -106455, 1632 },
};
+static const struct cvb_coef gm20b_na_cvb_coef[] = {
+ /* KHz, c0, c1, c2, c3, c4, c5 */
+ /* 76800 */ { 814294, 8144, -940, 808, -21583, 226 },
+ /* 153600 */ { 856185, 8144, -940, 808, -21583, 226 },
+ /* 230400 */ { 898077, 8144, -940, 808, -21583, 226 },
+ /* 307200 */ { 939968, 8144, -940, 808, -21583, 226 },
+ /* 384000 */ { 981860, 8144, -940, 808, -21583, 226 },
+ /* 460800 */ { 1023751, 8144, -940, 808, -21583, 226 },
+ /* 537600 */ { 1065642, 8144, -940, 808, -21583, 226 },
+ /* 614400 */ { 1107534, 8144, -940, 808, -21583, 226 },
+ /* 691200 */ { 1149425, 8144, -940, 808, -21583, 226 },
+ /* 768000 */ { 1191317, 8144, -940, 808, -21583, 226 },
+ /* 844800 */ { 1233208, 8144, -940, 808, -21583, 226 },
+ /* 921600 */ { 1275100, 8144, -940, 808, -21583, 226 },
+ /* 998400 */ { 1316991, 8144, -940, 808, -21583, 226 },
+};
+
+const u32 speedo_to_vmin[] = {
+ /* 0, 1, 2, 3, 4, */
+ 950000, 840000, 818750, 840000, 810000,
+};
+
int
gm20b_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
{
+ struct nvkm_device_tegra *tdev = device->func->tegra(device);
struct gk20a_volt *volt;
+ u32 vmin;
+
+ if (tdev->gpu_speedo_id >= ARRAY_SIZE(speedo_to_vmin)) {
+ nvdev_error(device, "unsupported speedo %d\n",
+ tdev->gpu_speedo_id);
+ return -EINVAL;
+ }
volt = kzalloc(sizeof(*volt), GFP_KERNEL);
if (!volt)
return -ENOMEM;
*pvolt = &volt->base;
- return _gk20a_volt_ctor(device, index, gm20b_cvb_coef,
- ARRAY_SIZE(gm20b_cvb_coef), volt);
+ vmin = speedo_to_vmin[tdev->gpu_speedo_id];
+
+ if (tdev->gpu_speedo_id >= 1)
+ return gk20a_volt_ctor(device, index, gm20b_na_cvb_coef,
+ ARRAY_SIZE(gm20b_na_cvb_coef), vmin, volt);
+ else
+ return gk20a_volt_ctor(device, index, gm20b_cvb_coef,
+ ARRAY_SIZE(gm20b_cvb_coef), vmin, volt);
}
diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig
index 336ad4de9..556f81f6b 100644
--- a/drivers/gpu/drm/omapdrm/Kconfig
+++ b/drivers/gpu/drm/omapdrm/Kconfig
@@ -4,11 +4,6 @@ config DRM_OMAP
depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM
select OMAP2_DSS
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
- select FB_SYS_FOPS
default n
help
DRM display driver for OMAP2/3/4 based boards.
diff --git a/drivers/gpu/drm/omapdrm/displays/Kconfig b/drivers/gpu/drm/omapdrm/displays/Kconfig
index 2a618afe0..c226da145 100644
--- a/drivers/gpu/drm/omapdrm/displays/Kconfig
+++ b/drivers/gpu/drm/omapdrm/displays/Kconfig
@@ -1,80 +1,80 @@
menu "OMAPDRM External Display Device Drivers"
-config DISPLAY_ENCODER_OPA362
+config DRM_OMAP_ENCODER_OPA362
tristate "OPA362 external analog amplifier"
help
Driver for OPA362 external analog TV amplifier controlled
through a GPIO.
-config DISPLAY_ENCODER_TFP410
+config DRM_OMAP_ENCODER_TFP410
tristate "TFP410 DPI to DVI Encoder"
help
Driver for TFP410 DPI to DVI encoder.
-config DISPLAY_ENCODER_TPD12S015
+config DRM_OMAP_ENCODER_TPD12S015
tristate "TPD12S015 HDMI ESD protection and level shifter"
help
Driver for TPD12S015, which offers HDMI ESD protection and level
shifting.
-config DISPLAY_CONNECTOR_DVI
+config DRM_OMAP_CONNECTOR_DVI
tristate "DVI Connector"
depends on I2C
help
Driver for a generic DVI connector.
-config DISPLAY_CONNECTOR_HDMI
+config DRM_OMAP_CONNECTOR_HDMI
tristate "HDMI Connector"
help
Driver for a generic HDMI connector.
-config DISPLAY_CONNECTOR_ANALOG_TV
+config DRM_OMAP_CONNECTOR_ANALOG_TV
tristate "Analog TV Connector"
help
Driver for a generic analog TV connector.
-config DISPLAY_PANEL_DPI
+config DRM_OMAP_PANEL_DPI
tristate "Generic DPI panel"
help
Driver for generic DPI panels.
-config DISPLAY_PANEL_DSI_CM
+config DRM_OMAP_PANEL_DSI_CM
tristate "Generic DSI Command Mode Panel"
depends on BACKLIGHT_CLASS_DEVICE
help
Driver for generic DSI command mode panels.
-config DISPLAY_PANEL_SONY_ACX565AKM
+config DRM_OMAP_PANEL_SONY_ACX565AKM
tristate "ACX565AKM Panel"
depends on SPI && BACKLIGHT_CLASS_DEVICE
help
This is the LCD panel used on Nokia N900
-config DISPLAY_PANEL_LGPHILIPS_LB035Q02
+config DRM_OMAP_PANEL_LGPHILIPS_LB035Q02
tristate "LG.Philips LB035Q02 LCD Panel"
depends on SPI
help
LCD Panel used on the Gumstix Overo Palo35
-config DISPLAY_PANEL_SHARP_LS037V7DW01
+config DRM_OMAP_PANEL_SHARP_LS037V7DW01
tristate "Sharp LS037V7DW01 LCD Panel"
depends on BACKLIGHT_CLASS_DEVICE
help
LCD Panel used in TI's SDP3430 and EVM boards
-config DISPLAY_PANEL_TPO_TD028TTEC1
+config DRM_OMAP_PANEL_TPO_TD028TTEC1
tristate "TPO TD028TTEC1 LCD Panel"
depends on SPI
help
LCD panel used in Openmoko.
-config DISPLAY_PANEL_TPO_TD043MTEA1
+config DRM_OMAP_PANEL_TPO_TD043MTEA1
tristate "TPO TD043MTEA1 LCD Panel"
depends on SPI
help
LCD Panel used in OMAP3 Pandora
-config DISPLAY_PANEL_NEC_NL8048HL11
+config DRM_OMAP_PANEL_NEC_NL8048HL11
tristate "NEC NL8048HL11 Panel"
depends on SPI
depends on BACKLIGHT_CLASS_DEVICE
diff --git a/drivers/gpu/drm/omapdrm/displays/Makefile b/drivers/gpu/drm/omapdrm/displays/Makefile
index 9aa176bfb..46baafb1a 100644
--- a/drivers/gpu/drm/omapdrm/displays/Makefile
+++ b/drivers/gpu/drm/omapdrm/displays/Makefile
@@ -1,14 +1,14 @@
-obj-$(CONFIG_DISPLAY_ENCODER_OPA362) += encoder-opa362.o
-obj-$(CONFIG_DISPLAY_ENCODER_TFP410) += encoder-tfp410.o
-obj-$(CONFIG_DISPLAY_ENCODER_TPD12S015) += encoder-tpd12s015.o
-obj-$(CONFIG_DISPLAY_CONNECTOR_DVI) += connector-dvi.o
-obj-$(CONFIG_DISPLAY_CONNECTOR_HDMI) += connector-hdmi.o
-obj-$(CONFIG_DISPLAY_CONNECTOR_ANALOG_TV) += connector-analog-tv.o
-obj-$(CONFIG_DISPLAY_PANEL_DPI) += panel-dpi.o
-obj-$(CONFIG_DISPLAY_PANEL_DSI_CM) += panel-dsi-cm.o
-obj-$(CONFIG_DISPLAY_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o
-obj-$(CONFIG_DISPLAY_PANEL_LGPHILIPS_LB035Q02) += panel-lgphilips-lb035q02.o
-obj-$(CONFIG_DISPLAY_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o
-obj-$(CONFIG_DISPLAY_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o
-obj-$(CONFIG_DISPLAY_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
-obj-$(CONFIG_DISPLAY_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o
+obj-$(CONFIG_DRM_OMAP_ENCODER_OPA362) += encoder-opa362.o
+obj-$(CONFIG_DRM_OMAP_ENCODER_TFP410) += encoder-tfp410.o
+obj-$(CONFIG_DRM_OMAP_ENCODER_TPD12S015) += encoder-tpd12s015.o
+obj-$(CONFIG_DRM_OMAP_CONNECTOR_DVI) += connector-dvi.o
+obj-$(CONFIG_DRM_OMAP_CONNECTOR_HDMI) += connector-hdmi.o
+obj-$(CONFIG_DRM_OMAP_CONNECTOR_ANALOG_TV) += connector-analog-tv.o
+obj-$(CONFIG_DRM_OMAP_PANEL_DPI) += panel-dpi.o
+obj-$(CONFIG_DRM_OMAP_PANEL_DSI_CM) += panel-dsi-cm.o
+obj-$(CONFIG_DRM_OMAP_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o
+obj-$(CONFIG_DRM_OMAP_PANEL_LGPHILIPS_LB035Q02) += panel-lgphilips-lb035q02.o
+obj-$(CONFIG_DRM_OMAP_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o
+obj-$(CONFIG_DRM_OMAP_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o
+obj-$(CONFIG_DRM_OMAP_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
+obj-$(CONFIG_DRM_OMAP_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
index 8511c648a..3485d1ecd 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
@@ -14,9 +14,10 @@
#include <linux/platform_device.h>
#include <linux/of.h>
-#include <video/omapdss.h>
#include <video/omap-panel-data.h>
+#include "../dss/omapdss.h"
+
struct panel_drv_data {
struct omap_dss_device dssdev;
struct omap_dss_device *in;
@@ -25,7 +26,6 @@ struct panel_drv_data {
struct omap_video_timings timings;
- enum omap_dss_venc_type connector_type;
bool invert_polarity;
};
@@ -45,10 +45,6 @@ static const struct omap_video_timings tvc_pal_timings = {
static const struct of_device_id tvc_of_match[];
-struct tvc_of_data {
- enum omap_dss_venc_type connector_type;
-};
-
#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
static int tvc_connect(struct omap_dss_device *dssdev)
@@ -99,7 +95,7 @@ static int tvc_enable(struct omap_dss_device *dssdev)
in->ops.atv->set_timings(in, &ddata->timings);
if (!ddata->dev->of_node) {
- in->ops.atv->set_type(in, ddata->connector_type);
+ in->ops.atv->set_type(in, OMAP_DSS_VENC_TYPE_COMPOSITE);
in->ops.atv->invert_vid_out_polarity(in,
ddata->invert_polarity);
@@ -207,7 +203,6 @@ static int tvc_probe_pdata(struct platform_device *pdev)
ddata->in = in;
- ddata->connector_type = pdata->connector_type;
ddata->invert_polarity = pdata->invert_polarity;
dssdev = &ddata->dssdev;
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
index 747f26a55..684b7aeda 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
@@ -15,10 +15,10 @@
#include <linux/slab.h>
#include <drm/drm_edid.h>
-
-#include <video/omapdss.h>
#include <video/omap-panel-data.h>
+#include "../dss/omapdss.h"
+
static const struct omap_video_timings dvic_default_timings = {
.x_res = 640,
.y_res = 480,
@@ -255,6 +255,7 @@ static int dvic_probe_of(struct platform_device *pdev)
adapter_node = of_parse_phandle(node, "ddc-i2c-bus", 0);
if (adapter_node) {
adapter = of_get_i2c_adapter_by_node(adapter_node);
+ of_node_put(adapter_node);
if (adapter == NULL) {
dev_err(&pdev->dev, "failed to parse ddc-i2c-bus\n");
omap_dss_put_device(ddata->in);
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
index 667ca4a24..7bdf83af9 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
@@ -17,10 +17,10 @@
#include <linux/of_gpio.h>
#include <drm/drm_edid.h>
-
-#include <video/omapdss.h>
#include <video/omap-panel-data.h>
+#include "../dss/omapdss.h"
+
static const struct omap_video_timings hdmic_default_timings = {
.x_res = 640,
.y_res = 480,
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
index 9594ff7a2..fe4e7ec3b 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
@@ -18,9 +18,8 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/of_gpio.h>
-#include <video/omapdss.h>
+#include "../dss/omapdss.h"
struct panel_drv_data {
struct omap_dss_device dssdev;
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
index 671806ca7..d768217ce 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
@@ -15,8 +15,7 @@
#include <linux/slab.h>
#include <linux/of_gpio.h>
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
+#include "../dss/omapdss.h"
struct panel_drv_data {
struct omap_dss_device dssdev;
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
index 916a89978..46855c8f5 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
@@ -16,8 +16,7 @@
#include <linux/platform_device.h>
#include <linux/gpio/consumer.h>
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
+#include "../dss/omapdss.h"
struct panel_drv_data {
struct omap_dss_device dssdev;
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
index 7c2331be8..7f16f985a 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
@@ -15,11 +15,13 @@
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
+#include <linux/regulator/consumer.h>
-#include <video/omapdss.h>
#include <video/omap-panel-data.h>
#include <video/of_display_timing.h>
+#include "../dss/omapdss.h"
+
struct panel_drv_data {
struct omap_dss_device dssdev;
struct omap_dss_device *in;
@@ -32,6 +34,7 @@ struct panel_drv_data {
int backlight_gpio;
struct gpio_desc *enable_gpio;
+ struct regulator *vcc_supply;
};
#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
@@ -83,6 +86,12 @@ static int panel_dpi_enable(struct omap_dss_device *dssdev)
if (r)
return r;
+ r = regulator_enable(ddata->vcc_supply);
+ if (r) {
+ in->ops.dpi->disable(in);
+ return r;
+ }
+
gpiod_set_value_cansleep(ddata->enable_gpio, 1);
if (gpio_is_valid(ddata->backlight_gpio))
@@ -105,6 +114,7 @@ static void panel_dpi_disable(struct omap_dss_device *dssdev)
gpio_set_value_cansleep(ddata->backlight_gpio, 0);
gpiod_set_value_cansleep(ddata->enable_gpio, 0);
+ regulator_disable(ddata->vcc_supply);
in->ops.dpi->disable(in);
@@ -213,6 +223,20 @@ static int panel_dpi_probe_of(struct platform_device *pdev)
ddata->enable_gpio = gpio;
+ /*
+ * Many different panels are supported by this driver and there are
+ * probably very different needs for their reset pins in regards to
+ * timing and order relative to the enable gpio. So for now it's just
+ * ensured that the reset line isn't active.
+ */
+ gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
+
+ ddata->vcc_supply = devm_regulator_get(&pdev->dev, "vcc");
+ if (IS_ERR(ddata->vcc_supply))
+ return PTR_ERR(ddata->vcc_supply);
+
ddata->backlight_gpio = -ENOENT;
r = of_get_display_timing(node, "panel-timing", &timing);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
index 2b118071b..0eae8afae 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
@@ -25,10 +25,10 @@
#include <linux/of_device.h>
#include <linux/of_gpio.h>
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
#include <video/mipi_display.h>
+#include "../dss/omapdss.h"
+
/* DSI Virtual channel. Hardcoded for now. */
#define TCH 0
@@ -1284,8 +1284,7 @@ static int dsicm_probe(struct platform_device *pdev)
return 0;
err_sysfs_create:
- if (bldev != NULL)
- backlight_device_unregister(bldev);
+ backlight_device_unregister(bldev);
err_bl:
destroy_workqueue(ddata->workqueue);
err_reg:
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
index ac680e1de..6dfb96cea 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
@@ -17,8 +17,7 @@
#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
+#include "../dss/omapdss.h"
static struct omap_video_timings lb035q02_timings = {
.x_res = 320,
@@ -51,9 +50,6 @@ struct panel_drv_data {
struct omap_video_timings videomode;
- /* used for non-DT boot, to be removed */
- int backlight_gpio;
-
struct gpio_desc *enable_gpio;
};
@@ -171,9 +167,6 @@ static int lb035q02_enable(struct omap_dss_device *dssdev)
if (ddata->enable_gpio)
gpiod_set_value_cansleep(ddata->enable_gpio, 1);
- if (gpio_is_valid(ddata->backlight_gpio))
- gpio_set_value_cansleep(ddata->backlight_gpio, 1);
-
dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
return 0;
@@ -190,9 +183,6 @@ static void lb035q02_disable(struct omap_dss_device *dssdev)
if (ddata->enable_gpio)
gpiod_set_value_cansleep(ddata->enable_gpio, 0);
- if (gpio_is_valid(ddata->backlight_gpio))
- gpio_set_value_cansleep(ddata->backlight_gpio, 0);
-
in->ops.dpi->disable(in);
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
@@ -256,8 +246,6 @@ static int lb035q02_probe_of(struct spi_device *spi)
ddata->enable_gpio = gpio;
- ddata->backlight_gpio = -ENOENT;
-
in = omapdss_of_find_source_for_first_ep(node);
if (IS_ERR(in)) {
dev_err(&spi->dev, "failed to find video source\n");
@@ -290,13 +278,6 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi)
if (r)
return r;
- if (gpio_is_valid(ddata->backlight_gpio)) {
- r = devm_gpio_request_one(&spi->dev, ddata->backlight_gpio,
- GPIOF_OUT_INIT_LOW, "panel backlight");
- if (r)
- goto err_gpio;
- }
-
ddata->videomode = lb035q02_timings;
dssdev = &ddata->dssdev;
@@ -316,7 +297,6 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi)
return 0;
err_reg:
-err_gpio:
omap_dss_put_device(ddata->in);
return r;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
index 38d2920a9..fc4c238c9 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
@@ -18,7 +18,7 @@
#include <linux/gpio/consumer.h>
#include <linux/of_gpio.h>
-#include <video/omapdss.h>
+#include "../dss/omapdss.h"
struct panel_drv_data {
struct omap_dss_device dssdev;
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
index 4363fffc8..3d3efc561 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
@@ -13,11 +13,11 @@
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/regulator/consumer.h>
-#include <video/omapdss.h>
+
+#include "../dss/omapdss.h"
struct panel_drv_data {
struct omap_dss_device dssdev;
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
index deb416736..157c51220 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
@@ -33,9 +33,10 @@
#include <linux/of.h>
#include <linux/of_gpio.h>
-#include <video/omapdss.h>
#include <video/omap-panel-data.h>
+#include "../dss/omapdss.h"
+
#define MIPID_CMD_READ_DISP_ID 0x04
#define MIPID_CMD_READ_RED 0x06
#define MIPID_CMD_READ_GREEN 0x07
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
index bd8d85041..e859b3f89 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
@@ -28,7 +28,8 @@
#include <linux/delay.h>
#include <linux/spi/spi.h>
#include <linux/gpio.h>
-#include <video/omapdss.h>
+
+#include "../dss/omapdss.h"
struct panel_drv_data {
struct omap_dss_device dssdev;
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
index d93175b03..66c6bbe64 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
@@ -19,7 +19,7 @@
#include <linux/slab.h>
#include <linux/of_gpio.h>
-#include <video/omapdss.h>
+#include "../dss/omapdss.h"
#define TPO_R02_MODE(x) ((x) & 7)
#define TPO_R02_MODE_800x480 7
diff --git a/drivers/gpu/drm/omapdrm/dss/core.c b/drivers/gpu/drm/omapdrm/dss/core.c
index 7e4e5beba..6a3ebfcd7 100644
--- a/drivers/gpu/drm/omapdrm/dss/core.c
+++ b/drivers/gpu/drm/omapdrm/dss/core.c
@@ -35,8 +35,7 @@
#include <linux/suspend.h>
#include <linux/slab.h>
-#include <video/omapdss.h>
-
+#include "omapdss.h"
#include "dss.h"
#include "dss_features.h"
@@ -196,8 +195,6 @@ static int __init omap_dss_probe(struct platform_device *pdev)
core.default_display_name = def_disp_name;
else if (pdata->default_display_name)
core.default_display_name = pdata->default_display_name;
- else if (pdata->default_device)
- core.default_display_name = pdata->default_device->name;
return 0;
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index f83608b69..535240fba 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -41,8 +41,7 @@
#include <linux/of.h>
#include <linux/component.h>
-#include <video/omapdss.h>
-
+#include "omapdss.h"
#include "dss.h"
#include "dss_features.h"
#include "dispc.h"
@@ -113,9 +112,14 @@ struct dispc_features {
* never both, we can just use this flag for now.
*/
bool reverse_ilace_field_order:1;
+
+ bool has_gamma_table:1;
+
+ bool has_gamma_i734_bug:1;
};
#define DISPC_MAX_NR_FIFOS 5
+#define DISPC_MAX_CHANNEL_GAMMA 4
static struct {
struct platform_device *pdev;
@@ -135,6 +139,8 @@ static struct {
bool ctx_valid;
u32 ctx[DISPC_SZ_REGS / sizeof(u32)];
+ u32 *gamma_table[DISPC_MAX_CHANNEL_GAMMA];
+
const struct dispc_features *feat;
bool is_enabled;
@@ -178,11 +184,19 @@ struct dispc_reg_field {
u8 low;
};
+struct dispc_gamma_desc {
+ u32 len;
+ u32 bits;
+ u16 reg;
+ bool has_index;
+};
+
static const struct {
const char *name;
u32 vsync_irq;
u32 framedone_irq;
u32 sync_lost_irq;
+ struct dispc_gamma_desc gamma;
struct dispc_reg_field reg_desc[DISPC_MGR_FLD_NUM];
} mgr_desc[] = {
[OMAP_DSS_CHANNEL_LCD] = {
@@ -190,6 +204,12 @@ static const struct {
.vsync_irq = DISPC_IRQ_VSYNC,
.framedone_irq = DISPC_IRQ_FRAMEDONE,
.sync_lost_irq = DISPC_IRQ_SYNC_LOST,
+ .gamma = {
+ .len = 256,
+ .bits = 8,
+ .reg = DISPC_GAMMA_TABLE0,
+ .has_index = true,
+ },
.reg_desc = {
[DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL, 0, 0 },
[DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL, 3, 3 },
@@ -207,6 +227,12 @@ static const struct {
.vsync_irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN,
.framedone_irq = DISPC_IRQ_FRAMEDONETV,
.sync_lost_irq = DISPC_IRQ_SYNC_LOST_DIGIT,
+ .gamma = {
+ .len = 1024,
+ .bits = 10,
+ .reg = DISPC_GAMMA_TABLE2,
+ .has_index = false,
+ },
.reg_desc = {
[DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL, 1, 1 },
[DISPC_MGR_FLD_STNTFT] = { },
@@ -224,6 +250,12 @@ static const struct {
.vsync_irq = DISPC_IRQ_VSYNC2,
.framedone_irq = DISPC_IRQ_FRAMEDONE2,
.sync_lost_irq = DISPC_IRQ_SYNC_LOST2,
+ .gamma = {
+ .len = 256,
+ .bits = 8,
+ .reg = DISPC_GAMMA_TABLE1,
+ .has_index = true,
+ },
.reg_desc = {
[DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL2, 0, 0 },
[DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL2, 3, 3 },
@@ -241,6 +273,12 @@ static const struct {
.vsync_irq = DISPC_IRQ_VSYNC3,
.framedone_irq = DISPC_IRQ_FRAMEDONE3,
.sync_lost_irq = DISPC_IRQ_SYNC_LOST3,
+ .gamma = {
+ .len = 256,
+ .bits = 8,
+ .reg = DISPC_GAMMA_TABLE3,
+ .has_index = true,
+ },
.reg_desc = {
[DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL3, 0, 0 },
[DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL3, 3, 3 },
@@ -1084,20 +1122,6 @@ static u32 dispc_ovl_get_burst_size(enum omap_plane plane)
return unit * 8;
}
-void dispc_enable_gamma_table(bool enable)
-{
- /*
- * This is partially implemented to support only disabling of
- * the gamma table.
- */
- if (enable) {
- DSSWARN("Gamma table enabling for TV not yet supported");
- return;
- }
-
- REG_FLD_MOD(DISPC_CONFIG, enable, 9, 9);
-}
-
static void dispc_mgr_enable_cpr(enum omap_channel channel, bool enable)
{
if (channel == OMAP_DSS_CHANNEL_DIGIT)
@@ -3299,30 +3323,21 @@ static void dispc_mgr_get_lcd_divisor(enum omap_channel channel, int *lck_div,
static unsigned long dispc_fclk_rate(void)
{
- struct dss_pll *pll;
- unsigned long r = 0;
+ unsigned long r;
+ enum dss_clk_source src;
+
+ src = dss_get_dispc_clk_source();
- switch (dss_get_dispc_clk_source()) {
- case OMAP_DSS_CLK_SRC_FCK:
+ if (src == DSS_CLK_SRC_FCK) {
r = dss_get_dispc_clk_rate();
- break;
- case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
- pll = dss_pll_find("dsi0");
- if (!pll)
- pll = dss_pll_find("video0");
+ } else {
+ struct dss_pll *pll;
+ unsigned clkout_idx;
- r = pll->cinfo.clkout[0];
- break;
- case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
- pll = dss_pll_find("dsi1");
- if (!pll)
- pll = dss_pll_find("video1");
+ pll = dss_pll_find_by_src(src);
+ clkout_idx = dss_pll_get_clkout_idx_for_src(src);
- r = pll->cinfo.clkout[0];
- break;
- default:
- BUG();
- return 0;
+ r = pll->cinfo.clkout[clkout_idx];
}
return r;
@@ -3330,43 +3345,31 @@ static unsigned long dispc_fclk_rate(void)
static unsigned long dispc_mgr_lclk_rate(enum omap_channel channel)
{
- struct dss_pll *pll;
int lcd;
unsigned long r;
- u32 l;
-
- if (dss_mgr_is_lcd(channel)) {
- l = dispc_read_reg(DISPC_DIVISORo(channel));
+ enum dss_clk_source src;
- lcd = FLD_GET(l, 23, 16);
+ /* for TV, LCLK rate is the FCLK rate */
+ if (!dss_mgr_is_lcd(channel))
+ return dispc_fclk_rate();
- switch (dss_get_lcd_clk_source(channel)) {
- case OMAP_DSS_CLK_SRC_FCK:
- r = dss_get_dispc_clk_rate();
- break;
- case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
- pll = dss_pll_find("dsi0");
- if (!pll)
- pll = dss_pll_find("video0");
+ src = dss_get_lcd_clk_source(channel);
- r = pll->cinfo.clkout[0];
- break;
- case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
- pll = dss_pll_find("dsi1");
- if (!pll)
- pll = dss_pll_find("video1");
+ if (src == DSS_CLK_SRC_FCK) {
+ r = dss_get_dispc_clk_rate();
+ } else {
+ struct dss_pll *pll;
+ unsigned clkout_idx;
- r = pll->cinfo.clkout[0];
- break;
- default:
- BUG();
- return 0;
- }
+ pll = dss_pll_find_by_src(src);
+ clkout_idx = dss_pll_get_clkout_idx_for_src(src);
- return r / lcd;
- } else {
- return dispc_fclk_rate();
+ r = pll->cinfo.clkout[clkout_idx];
}
+
+ lcd = REG_GET(DISPC_DIVISORo(channel), 23, 16);
+
+ return r / lcd;
}
static unsigned long dispc_mgr_pclk_rate(enum omap_channel channel)
@@ -3426,15 +3429,14 @@ static unsigned long dispc_plane_lclk_rate(enum omap_plane plane)
static void dispc_dump_clocks_channel(struct seq_file *s, enum omap_channel channel)
{
int lcd, pcd;
- enum omap_dss_clk_source lcd_clk_src;
+ enum dss_clk_source lcd_clk_src;
seq_printf(s, "- %s -\n", mgr_desc[channel].name);
lcd_clk_src = dss_get_lcd_clk_source(channel);
- seq_printf(s, "%s clk source = %s (%s)\n", mgr_desc[channel].name,
- dss_get_generic_clk_source_name(lcd_clk_src),
- dss_feat_get_clk_source_name(lcd_clk_src));
+ seq_printf(s, "%s clk source = %s\n", mgr_desc[channel].name,
+ dss_get_clk_source_name(lcd_clk_src));
dispc_mgr_get_lcd_divisor(channel, &lcd, &pcd);
@@ -3448,16 +3450,15 @@ void dispc_dump_clocks(struct seq_file *s)
{
int lcd;
u32 l;
- enum omap_dss_clk_source dispc_clk_src = dss_get_dispc_clk_source();
+ enum dss_clk_source dispc_clk_src = dss_get_dispc_clk_source();
if (dispc_runtime_get())
return;
seq_printf(s, "- DISPC -\n");
- seq_printf(s, "dispc fclk source = %s (%s)\n",
- dss_get_generic_clk_source_name(dispc_clk_src),
- dss_feat_get_clk_source_name(dispc_clk_src));
+ seq_printf(s, "dispc fclk source = %s\n",
+ dss_get_clk_source_name(dispc_clk_src));
seq_printf(s, "fck\t\t%-16lu\n", dispc_fclk_rate());
@@ -3814,6 +3815,139 @@ void dispc_disable_sidle(void)
REG_FLD_MOD(DISPC_SYSCONFIG, 1, 4, 3); /* SIDLEMODE: no idle */
}
+u32 dispc_mgr_gamma_size(enum omap_channel channel)
+{
+ const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma;
+
+ if (!dispc.feat->has_gamma_table)
+ return 0;
+
+ return gdesc->len;
+}
+EXPORT_SYMBOL(dispc_mgr_gamma_size);
+
+static void dispc_mgr_write_gamma_table(enum omap_channel channel)
+{
+ const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma;
+ u32 *table = dispc.gamma_table[channel];
+ unsigned int i;
+
+ DSSDBG("%s: channel %d\n", __func__, channel);
+
+ for (i = 0; i < gdesc->len; ++i) {
+ u32 v = table[i];
+
+ if (gdesc->has_index)
+ v |= i << 24;
+ else if (i == 0)
+ v |= 1 << 31;
+
+ dispc_write_reg(gdesc->reg, v);
+ }
+}
+
+static void dispc_restore_gamma_tables(void)
+{
+ DSSDBG("%s()\n", __func__);
+
+ if (!dispc.feat->has_gamma_table)
+ return;
+
+ dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_LCD);
+
+ dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_DIGIT);
+
+ if (dss_has_feature(FEAT_MGR_LCD2))
+ dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_LCD2);
+
+ if (dss_has_feature(FEAT_MGR_LCD3))
+ dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_LCD3);
+}
+
+static const struct drm_color_lut dispc_mgr_gamma_default_lut[] = {
+ { .red = 0, .green = 0, .blue = 0, },
+ { .red = U16_MAX, .green = U16_MAX, .blue = U16_MAX, },
+};
+
+void dispc_mgr_set_gamma(enum omap_channel channel,
+ const struct drm_color_lut *lut,
+ unsigned int length)
+{
+ const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma;
+ u32 *table = dispc.gamma_table[channel];
+ uint i;
+
+ DSSDBG("%s: channel %d, lut len %u, hw len %u\n", __func__,
+ channel, length, gdesc->len);
+
+ if (!dispc.feat->has_gamma_table)
+ return;
+
+ if (lut == NULL || length < 2) {
+ lut = dispc_mgr_gamma_default_lut;
+ length = ARRAY_SIZE(dispc_mgr_gamma_default_lut);
+ }
+
+ for (i = 0; i < length - 1; ++i) {
+ uint first = i * (gdesc->len - 1) / (length - 1);
+ uint last = (i + 1) * (gdesc->len - 1) / (length - 1);
+ uint w = last - first;
+ u16 r, g, b;
+ uint j;
+
+ if (w == 0)
+ continue;
+
+ for (j = 0; j <= w; j++) {
+ r = (lut[i].red * (w - j) + lut[i+1].red * j) / w;
+ g = (lut[i].green * (w - j) + lut[i+1].green * j) / w;
+ b = (lut[i].blue * (w - j) + lut[i+1].blue * j) / w;
+
+ r >>= 16 - gdesc->bits;
+ g >>= 16 - gdesc->bits;
+ b >>= 16 - gdesc->bits;
+
+ table[first + j] = (r << (gdesc->bits * 2)) |
+ (g << gdesc->bits) | b;
+ }
+ }
+
+ if (dispc.is_enabled)
+ dispc_mgr_write_gamma_table(channel);
+}
+EXPORT_SYMBOL(dispc_mgr_set_gamma);
+
+static int dispc_init_gamma_tables(void)
+{
+ int channel;
+
+ if (!dispc.feat->has_gamma_table)
+ return 0;
+
+ for (channel = 0; channel < ARRAY_SIZE(dispc.gamma_table); channel++) {
+ const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma;
+ u32 *gt;
+
+ if (channel == OMAP_DSS_CHANNEL_LCD2 &&
+ !dss_has_feature(FEAT_MGR_LCD2))
+ continue;
+
+ if (channel == OMAP_DSS_CHANNEL_LCD3 &&
+ !dss_has_feature(FEAT_MGR_LCD3))
+ continue;
+
+ gt = devm_kmalloc_array(&dispc.pdev->dev, gdesc->len,
+ sizeof(u32), GFP_KERNEL);
+ if (!gt)
+ return -ENOMEM;
+
+ dispc.gamma_table[channel] = gt;
+
+ dispc_mgr_set_gamma(channel, NULL, 0);
+ }
+ return 0;
+}
+
static void _omap_dispc_initial_config(void)
{
u32 l;
@@ -3829,8 +3963,15 @@ static void _omap_dispc_initial_config(void)
dispc.core_clk_rate = dispc_fclk_rate();
}
- /* FUNCGATED */
- if (dss_has_feature(FEAT_FUNCGATED))
+ /* Use gamma table mode, instead of palette mode */
+ if (dispc.feat->has_gamma_table)
+ REG_FLD_MOD(DISPC_CONFIG, 1, 3, 3);
+
+ /* For older DSS versions (FEAT_FUNCGATED) this enables
+ * func-clock auto-gating. For newer versions
+ * (dispc.feat->has_gamma_table) this enables tv-out gamma tables.
+ */
+ if (dss_has_feature(FEAT_FUNCGATED) || dispc.feat->has_gamma_table)
REG_FLD_MOD(DISPC_CONFIG, 1, 9, 9);
dispc_setup_color_conv_coef();
@@ -3934,6 +4075,8 @@ static const struct dispc_features omap44xx_dispc_feats = {
.has_writeback = true,
.supports_double_pixel = true,
.reverse_ilace_field_order = true,
+ .has_gamma_table = true,
+ .has_gamma_i734_bug = true,
};
static const struct dispc_features omap54xx_dispc_feats = {
@@ -3959,6 +4102,8 @@ static const struct dispc_features omap54xx_dispc_feats = {
.has_writeback = true,
.supports_double_pixel = true,
.reverse_ilace_field_order = true,
+ .has_gamma_table = true,
+ .has_gamma_i734_bug = true,
};
static int dispc_init_features(struct platform_device *pdev)
@@ -4050,6 +4195,168 @@ void dispc_free_irq(void *dev_id)
}
EXPORT_SYMBOL(dispc_free_irq);
+/*
+ * Workaround for errata i734 in DSS dispc
+ * - LCD1 Gamma Correction Is Not Working When GFX Pipe Is Disabled
+ *
+ * For gamma tables to work on LCD1 the GFX plane has to be used at
+ * least once after DSS HW has come out of reset. The workaround
+ * sets up a minimal LCD setup with GFX plane and waits for one
+ * vertical sync irq before disabling the setup and continuing with
+ * the context restore. The physical outputs are gated during the
+ * operation. This workaround requires that gamma table's LOADMODE
+ * is set to 0x2 in DISPC_CONTROL1 register.
+ *
+ * For details see:
+ * OMAP543x Multimedia Device Silicon Revision 2.0 Silicon Errata
+ * Literature Number: SWPZ037E
+ * Or some other relevant errata document for the DSS IP version.
+ */
+
+static const struct dispc_errata_i734_data {
+ struct omap_video_timings timings;
+ struct omap_overlay_info ovli;
+ struct omap_overlay_manager_info mgri;
+ struct dss_lcd_mgr_config lcd_conf;
+} i734 = {
+ .timings = {
+ .x_res = 8, .y_res = 1,
+ .pixelclock = 16000000,
+ .hsw = 8, .hfp = 4, .hbp = 4,
+ .vsw = 1, .vfp = 1, .vbp = 1,
+ .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .interlace = false,
+ .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .double_pixel = false,
+ },
+ .ovli = {
+ .screen_width = 1,
+ .width = 1, .height = 1,
+ .color_mode = OMAP_DSS_COLOR_RGB24U,
+ .rotation = OMAP_DSS_ROT_0,
+ .rotation_type = OMAP_DSS_ROT_DMA,
+ .mirror = 0,
+ .pos_x = 0, .pos_y = 0,
+ .out_width = 0, .out_height = 0,
+ .global_alpha = 0xff,
+ .pre_mult_alpha = 0,
+ .zorder = 0,
+ },
+ .mgri = {
+ .default_color = 0,
+ .trans_enabled = false,
+ .partial_alpha_enabled = false,
+ .cpr_enable = false,
+ },
+ .lcd_conf = {
+ .io_pad_mode = DSS_IO_PAD_MODE_BYPASS,
+ .stallmode = false,
+ .fifohandcheck = false,
+ .clock_info = {
+ .lck_div = 1,
+ .pck_div = 2,
+ },
+ .video_port_width = 24,
+ .lcden_sig_polarity = 0,
+ },
+};
+
+static struct i734_buf {
+ size_t size;
+ dma_addr_t paddr;
+ void *vaddr;
+} i734_buf;
+
+static int dispc_errata_i734_wa_init(void)
+{
+ if (!dispc.feat->has_gamma_i734_bug)
+ return 0;
+
+ i734_buf.size = i734.ovli.width * i734.ovli.height *
+ color_mode_to_bpp(i734.ovli.color_mode) / 8;
+
+ i734_buf.vaddr = dma_alloc_writecombine(&dispc.pdev->dev, i734_buf.size,
+ &i734_buf.paddr, GFP_KERNEL);
+ if (!i734_buf.vaddr) {
+ dev_err(&dispc.pdev->dev, "%s: dma_alloc_writecombine failed",
+ __func__);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void dispc_errata_i734_wa_fini(void)
+{
+ if (!dispc.feat->has_gamma_i734_bug)
+ return;
+
+ dma_free_writecombine(&dispc.pdev->dev, i734_buf.size, i734_buf.vaddr,
+ i734_buf.paddr);
+}
+
+static void dispc_errata_i734_wa(void)
+{
+ u32 framedone_irq = dispc_mgr_get_framedone_irq(OMAP_DSS_CHANNEL_LCD);
+ struct omap_overlay_info ovli;
+ struct dss_lcd_mgr_config lcd_conf;
+ u32 gatestate;
+ unsigned int count;
+
+ if (!dispc.feat->has_gamma_i734_bug)
+ return;
+
+ gatestate = REG_GET(DISPC_CONFIG, 8, 4);
+
+ ovli = i734.ovli;
+ ovli.paddr = i734_buf.paddr;
+ lcd_conf = i734.lcd_conf;
+
+ /* Gate all LCD1 outputs */
+ REG_FLD_MOD(DISPC_CONFIG, 0x1f, 8, 4);
+
+ /* Setup and enable GFX plane */
+ dispc_ovl_set_channel_out(OMAP_DSS_GFX, OMAP_DSS_CHANNEL_LCD);
+ dispc_ovl_setup(OMAP_DSS_GFX, &ovli, false, &i734.timings, false);
+ dispc_ovl_enable(OMAP_DSS_GFX, true);
+
+ /* Set up and enable display manager for LCD1 */
+ dispc_mgr_setup(OMAP_DSS_CHANNEL_LCD, &i734.mgri);
+ dispc_calc_clock_rates(dss_get_dispc_clk_rate(),
+ &lcd_conf.clock_info);
+ dispc_mgr_set_lcd_config(OMAP_DSS_CHANNEL_LCD, &lcd_conf);
+ dispc_mgr_set_timings(OMAP_DSS_CHANNEL_LCD, &i734.timings);
+
+ dispc_clear_irqstatus(framedone_irq);
+
+ /* Enable and shut the channel to produce just one frame */
+ dispc_mgr_enable(OMAP_DSS_CHANNEL_LCD, true);
+ dispc_mgr_enable(OMAP_DSS_CHANNEL_LCD, false);
+
+ /* Busy wait for framedone. We can't fiddle with irq handlers
+ * in PM resume. Typically the loop runs less than 5 times and
+ * waits less than a micro second.
+ */
+ count = 0;
+ while (!(dispc_read_irqstatus() & framedone_irq)) {
+ if (count++ > 10000) {
+ dev_err(&dispc.pdev->dev, "%s: framedone timeout\n",
+ __func__);
+ break;
+ }
+ }
+ dispc_ovl_enable(OMAP_DSS_GFX, false);
+
+ /* Clear all irq bits before continuing */
+ dispc_clear_irqstatus(0xffffffff);
+
+ /* Restore the original state to LCD1 output gates */
+ REG_FLD_MOD(DISPC_CONFIG, gatestate, 8, 4);
+}
+
/* DISPC HW IP initialisation */
static int dispc_bind(struct device *dev, struct device *master, void *data)
{
@@ -4067,6 +4374,10 @@ static int dispc_bind(struct device *dev, struct device *master, void *data)
if (r)
return r;
+ r = dispc_errata_i734_wa_init();
+ if (r)
+ return r;
+
dispc_mem = platform_get_resource(dispc.pdev, IORESOURCE_MEM, 0);
if (!dispc_mem) {
DSSERR("can't get IORESOURCE_MEM DISPC\n");
@@ -4100,6 +4411,10 @@ static int dispc_bind(struct device *dev, struct device *master, void *data)
}
}
+ r = dispc_init_gamma_tables();
+ if (r)
+ return r;
+
pm_runtime_enable(&pdev->dev);
r = dispc_runtime_get();
@@ -4127,6 +4442,8 @@ static void dispc_unbind(struct device *dev, struct device *master,
void *data)
{
pm_runtime_disable(dev);
+
+ dispc_errata_i734_wa_fini();
}
static const struct component_ops dispc_component_ops = {
@@ -4169,7 +4486,11 @@ static int dispc_runtime_resume(struct device *dev)
if (REG_GET(DISPC_CONFIG, 2, 1) != OMAP_DSS_LOAD_FRAME_ONLY) {
_omap_dispc_initial_config();
+ dispc_errata_i734_wa();
+
dispc_restore_context();
+
+ dispc_restore_gamma_tables();
}
dispc.is_enabled = true;
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.h b/drivers/gpu/drm/omapdrm/dss/dispc.h
index 483744223..bc1d8126e 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.h
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.h
@@ -42,6 +42,11 @@
#define DISPC_MSTANDBY_CTRL 0x0858
#define DISPC_GLOBAL_MFLAG_ATTRIBUTE 0x085C
+#define DISPC_GAMMA_TABLE0 0x0630
+#define DISPC_GAMMA_TABLE1 0x0634
+#define DISPC_GAMMA_TABLE2 0x0638
+#define DISPC_GAMMA_TABLE3 0x0850
+
/* DISPC overlay registers */
#define DISPC_OVL_BA0(n) (DISPC_OVL_BASE(n) + \
DISPC_BA0_OFFSET(n))
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c b/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c
index 038c15b04..34fad2376 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c
@@ -18,8 +18,8 @@
*/
#include <linux/kernel.h>
-#include <video/omapdss.h>
+#include "omapdss.h"
#include "dispc.h"
static const struct dispc_coef coef3_M8[8] = {
diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c
index 9f3dd09b0..8dcdd7cf9 100644
--- a/drivers/gpu/drm/omapdrm/dss/display.c
+++ b/drivers/gpu/drm/omapdrm/dss/display.c
@@ -28,7 +28,7 @@
#include <linux/platform_device.h>
#include <linux/of.h>
-#include <video/omapdss.h>
+#include "omapdss.h"
#include "dss.h"
#include "dss_features.h"
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index 97ea60257..b268295b7 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -34,17 +34,15 @@
#include <linux/clk.h>
#include <linux/component.h>
-#include <video/omapdss.h>
-
+#include "omapdss.h"
#include "dss.h"
#include "dss_features.h"
-#define HSDIV_DISPC 0
-
struct dpi_data {
struct platform_device *pdev;
struct regulator *vdds_dsi_reg;
+ enum dss_clk_source clk_src;
struct dss_pll *pll;
struct mutex lock;
@@ -69,7 +67,7 @@ static struct dpi_data *dpi_get_data_from_pdev(struct platform_device *pdev)
return dev_get_drvdata(&pdev->dev);
}
-static struct dss_pll *dpi_get_pll(enum omap_channel channel)
+static enum dss_clk_source dpi_get_clk_src(enum omap_channel channel)
{
/*
* XXX we can't currently use DSI PLL for DPI with OMAP3, as the DSI PLL
@@ -83,64 +81,51 @@ static struct dss_pll *dpi_get_pll(enum omap_channel channel)
case OMAPDSS_VER_OMAP3630:
case OMAPDSS_VER_AM35xx:
case OMAPDSS_VER_AM43xx:
- return NULL;
+ return DSS_CLK_SRC_FCK;
case OMAPDSS_VER_OMAP4430_ES1:
case OMAPDSS_VER_OMAP4430_ES2:
case OMAPDSS_VER_OMAP4:
switch (channel) {
case OMAP_DSS_CHANNEL_LCD:
- return dss_pll_find("dsi0");
+ return DSS_CLK_SRC_PLL1_1;
case OMAP_DSS_CHANNEL_LCD2:
- return dss_pll_find("dsi1");
+ return DSS_CLK_SRC_PLL2_1;
default:
- return NULL;
+ return DSS_CLK_SRC_FCK;
}
case OMAPDSS_VER_OMAP5:
switch (channel) {
case OMAP_DSS_CHANNEL_LCD:
- return dss_pll_find("dsi0");
+ return DSS_CLK_SRC_PLL1_1;
case OMAP_DSS_CHANNEL_LCD3:
- return dss_pll_find("dsi1");
+ return DSS_CLK_SRC_PLL2_1;
+ case OMAP_DSS_CHANNEL_LCD2:
default:
- return NULL;
+ return DSS_CLK_SRC_FCK;
}
case OMAPDSS_VER_DRA7xx:
switch (channel) {
case OMAP_DSS_CHANNEL_LCD:
+ return DSS_CLK_SRC_PLL1_1;
case OMAP_DSS_CHANNEL_LCD2:
- return dss_pll_find("video0");
+ return DSS_CLK_SRC_PLL1_3;
case OMAP_DSS_CHANNEL_LCD3:
- return dss_pll_find("video1");
+ return DSS_CLK_SRC_PLL2_1;
default:
- return NULL;
+ return DSS_CLK_SRC_FCK;
}
default:
- return NULL;
- }
-}
-
-static enum omap_dss_clk_source dpi_get_alt_clk_src(enum omap_channel channel)
-{
- switch (channel) {
- case OMAP_DSS_CHANNEL_LCD:
- return OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC;
- case OMAP_DSS_CHANNEL_LCD2:
- return OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC;
- case OMAP_DSS_CHANNEL_LCD3:
- return OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC;
- default:
- /* this shouldn't happen */
- WARN_ON(1);
- return OMAP_DSS_CLK_SRC_FCK;
+ return DSS_CLK_SRC_FCK;
}
}
struct dpi_clk_calc_ctx {
struct dss_pll *pll;
+ unsigned clkout_idx;
/* inputs */
@@ -148,7 +133,7 @@ struct dpi_clk_calc_ctx {
/* outputs */
- struct dss_pll_clock_info dsi_cinfo;
+ struct dss_pll_clock_info pll_cinfo;
unsigned long fck;
struct dispc_clock_info dispc_cinfo;
};
@@ -193,8 +178,8 @@ static bool dpi_calc_hsdiv_cb(int m_dispc, unsigned long dispc,
if (m_dispc > 1 && m_dispc % 2 != 0 && ctx->pck_min >= 100000000)
return false;
- ctx->dsi_cinfo.mX[HSDIV_DISPC] = m_dispc;
- ctx->dsi_cinfo.clkout[HSDIV_DISPC] = dispc;
+ ctx->pll_cinfo.mX[ctx->clkout_idx] = m_dispc;
+ ctx->pll_cinfo.clkout[ctx->clkout_idx] = dispc;
return dispc_div_calc(dispc, ctx->pck_min, ctx->pck_max,
dpi_calc_dispc_cb, ctx);
@@ -207,12 +192,12 @@ static bool dpi_calc_pll_cb(int n, int m, unsigned long fint,
{
struct dpi_clk_calc_ctx *ctx = data;
- ctx->dsi_cinfo.n = n;
- ctx->dsi_cinfo.m = m;
- ctx->dsi_cinfo.fint = fint;
- ctx->dsi_cinfo.clkdco = clkdco;
+ ctx->pll_cinfo.n = n;
+ ctx->pll_cinfo.m = m;
+ ctx->pll_cinfo.fint = fint;
+ ctx->pll_cinfo.clkdco = clkdco;
- return dss_pll_hsdiv_calc(ctx->pll, clkdco,
+ return dss_pll_hsdiv_calc_a(ctx->pll, clkdco,
ctx->pck_min, dss_feat_get_param_max(FEAT_PARAM_DSS_FCK),
dpi_calc_hsdiv_cb, ctx);
}
@@ -227,25 +212,39 @@ static bool dpi_calc_dss_cb(unsigned long fck, void *data)
dpi_calc_dispc_cb, ctx);
}
-static bool dpi_dsi_clk_calc(struct dpi_data *dpi, unsigned long pck,
+static bool dpi_pll_clk_calc(struct dpi_data *dpi, unsigned long pck,
struct dpi_clk_calc_ctx *ctx)
{
unsigned long clkin;
- unsigned long pll_min, pll_max;
memset(ctx, 0, sizeof(*ctx));
ctx->pll = dpi->pll;
- ctx->pck_min = pck - 1000;
- ctx->pck_max = pck + 1000;
+ ctx->clkout_idx = dss_pll_get_clkout_idx_for_src(dpi->clk_src);
- pll_min = 0;
- pll_max = 0;
+ clkin = clk_get_rate(dpi->pll->clkin);
- clkin = clk_get_rate(ctx->pll->clkin);
+ if (dpi->pll->hw->type == DSS_PLL_TYPE_A) {
+ unsigned long pll_min, pll_max;
- return dss_pll_calc(ctx->pll, clkin,
- pll_min, pll_max,
- dpi_calc_pll_cb, ctx);
+ ctx->pck_min = pck - 1000;
+ ctx->pck_max = pck + 1000;
+
+ pll_min = 0;
+ pll_max = 0;
+
+ return dss_pll_calc_a(ctx->pll, clkin,
+ pll_min, pll_max,
+ dpi_calc_pll_cb, ctx);
+ } else { /* DSS_PLL_TYPE_B */
+ dss_pll_calc_b(dpi->pll, clkin, pck, &ctx->pll_cinfo);
+
+ ctx->dispc_cinfo.lck_div = 1;
+ ctx->dispc_cinfo.pck_div = 1;
+ ctx->dispc_cinfo.lck = ctx->pll_cinfo.clkout[0];
+ ctx->dispc_cinfo.pck = ctx->dispc_cinfo.lck;
+
+ return true;
+ }
}
static bool dpi_dss_clk_calc(unsigned long pck, struct dpi_clk_calc_ctx *ctx)
@@ -279,7 +278,7 @@ static bool dpi_dss_clk_calc(unsigned long pck, struct dpi_clk_calc_ctx *ctx)
-static int dpi_set_dsi_clk(struct dpi_data *dpi, enum omap_channel channel,
+static int dpi_set_pll_clk(struct dpi_data *dpi, enum omap_channel channel,
unsigned long pck_req, unsigned long *fck, int *lck_div,
int *pck_div)
{
@@ -287,20 +286,19 @@ static int dpi_set_dsi_clk(struct dpi_data *dpi, enum omap_channel channel,
int r;
bool ok;
- ok = dpi_dsi_clk_calc(dpi, pck_req, &ctx);
+ ok = dpi_pll_clk_calc(dpi, pck_req, &ctx);
if (!ok)
return -EINVAL;
- r = dss_pll_set_config(dpi->pll, &ctx.dsi_cinfo);
+ r = dss_pll_set_config(dpi->pll, &ctx.pll_cinfo);
if (r)
return r;
- dss_select_lcd_clk_source(channel,
- dpi_get_alt_clk_src(channel));
+ dss_select_lcd_clk_source(channel, dpi->clk_src);
dpi->mgr_config.clock_info = ctx.dispc_cinfo;
- *fck = ctx.dsi_cinfo.clkout[HSDIV_DISPC];
+ *fck = ctx.pll_cinfo.clkout[ctx.clkout_idx];
*lck_div = ctx.dispc_cinfo.lck_div;
*pck_div = ctx.dispc_cinfo.pck_div;
@@ -342,7 +340,7 @@ static int dpi_set_mode(struct dpi_data *dpi)
int r = 0;
if (dpi->pll)
- r = dpi_set_dsi_clk(dpi, channel, t->pixelclock, &fck,
+ r = dpi_set_pll_clk(dpi, channel, t->pixelclock, &fck,
&lck_div, &pck_div);
else
r = dpi_set_dispc_clk(dpi, t->pixelclock, &fck,
@@ -419,7 +417,7 @@ static int dpi_display_enable(struct omap_dss_device *dssdev)
if (dpi->pll) {
r = dss_pll_enable(dpi->pll);
if (r)
- goto err_dsi_pll_init;
+ goto err_pll_init;
}
r = dpi_set_mode(dpi);
@@ -442,7 +440,7 @@ err_mgr_enable:
err_set_mode:
if (dpi->pll)
dss_pll_disable(dpi->pll);
-err_dsi_pll_init:
+err_pll_init:
err_src_sel:
dispc_runtime_put();
err_get_dispc:
@@ -465,7 +463,7 @@ static void dpi_display_disable(struct omap_dss_device *dssdev)
dss_mgr_disable(channel);
if (dpi->pll) {
- dss_select_lcd_clk_source(channel, OMAP_DSS_CLK_SRC_FCK);
+ dss_select_lcd_clk_source(channel, DSS_CLK_SRC_FCK);
dss_pll_disable(dpi->pll);
}
@@ -524,11 +522,11 @@ static int dpi_check_timings(struct omap_dss_device *dssdev,
return -EINVAL;
if (dpi->pll) {
- ok = dpi_dsi_clk_calc(dpi, timings->pixelclock, &ctx);
+ ok = dpi_pll_clk_calc(dpi, timings->pixelclock, &ctx);
if (!ok)
return -EINVAL;
- fck = ctx.dsi_cinfo.clkout[HSDIV_DISPC];
+ fck = ctx.pll_cinfo.clkout[ctx.clkout_idx];
} else {
ok = dpi_dss_clk_calc(timings->pixelclock, &ctx);
if (!ok)
@@ -558,7 +556,7 @@ static void dpi_set_data_lines(struct omap_dss_device *dssdev, int data_lines)
mutex_unlock(&dpi->lock);
}
-static int dpi_verify_dsi_pll(struct dss_pll *pll)
+static int dpi_verify_pll(struct dss_pll *pll)
{
int r;
@@ -602,16 +600,14 @@ static void dpi_init_pll(struct dpi_data *dpi)
if (dpi->pll)
return;
- pll = dpi_get_pll(dpi->output.dispc_channel);
+ dpi->clk_src = dpi_get_clk_src(dpi->output.dispc_channel);
+
+ pll = dss_pll_find_by_src(dpi->clk_src);
if (!pll)
return;
- /* On DRA7 we need to set a mux to use the PLL */
- if (omapdss_get_version() == OMAPDSS_VER_DRA7xx)
- dss_ctrl_pll_set_control_mux(pll->id, dpi->output.dispc_channel);
-
- if (dpi_verify_dsi_pll(pll)) {
- DSSWARN("DSI PLL not operational\n");
+ if (dpi_verify_pll(pll)) {
+ DSSWARN("PLL not operational\n");
return;
}
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 56c43f355..e1be5e795 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -42,9 +42,9 @@
#include <linux/of_platform.h>
#include <linux/component.h>
-#include <video/omapdss.h>
#include <video/mipi_display.h>
+#include "omapdss.h"
#include "dss.h"
#include "dss_features.h"
@@ -1261,7 +1261,7 @@ static unsigned long dsi_fclk_rate(struct platform_device *dsidev)
unsigned long r;
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- if (dss_get_dsi_clk_source(dsi->module_id) == OMAP_DSS_CLK_SRC_FCK) {
+ if (dss_get_dsi_clk_source(dsi->module_id) == DSS_CLK_SRC_FCK) {
/* DSI FCLK source is DSS_CLK_FCK */
r = clk_get_rate(dsi->dss_clk);
} else {
@@ -1474,7 +1474,7 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo;
- enum omap_dss_clk_source dispc_clk_src, dsi_clk_src;
+ enum dss_clk_source dispc_clk_src, dsi_clk_src;
int dsi_module = dsi->module_id;
struct dss_pll *pll = &dsi->pll;
@@ -1494,28 +1494,27 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
cinfo->clkdco, cinfo->m);
seq_printf(s, "DSI_PLL_HSDIV_DISPC (%s)\t%-16lum_dispc %u\t(%s)\n",
- dss_feat_get_clk_source_name(dsi_module == 0 ?
- OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC :
- OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC),
+ dss_get_clk_source_name(dsi_module == 0 ?
+ DSS_CLK_SRC_PLL1_1 :
+ DSS_CLK_SRC_PLL2_1),
cinfo->clkout[HSDIV_DISPC],
cinfo->mX[HSDIV_DISPC],
- dispc_clk_src == OMAP_DSS_CLK_SRC_FCK ?
+ dispc_clk_src == DSS_CLK_SRC_FCK ?
"off" : "on");
seq_printf(s, "DSI_PLL_HSDIV_DSI (%s)\t%-16lum_dsi %u\t(%s)\n",
- dss_feat_get_clk_source_name(dsi_module == 0 ?
- OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI :
- OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI),
+ dss_get_clk_source_name(dsi_module == 0 ?
+ DSS_CLK_SRC_PLL1_2 :
+ DSS_CLK_SRC_PLL2_2),
cinfo->clkout[HSDIV_DSI],
cinfo->mX[HSDIV_DSI],
- dsi_clk_src == OMAP_DSS_CLK_SRC_FCK ?
+ dsi_clk_src == DSS_CLK_SRC_FCK ?
"off" : "on");
seq_printf(s, "- DSI%d -\n", dsi_module + 1);
- seq_printf(s, "dsi fclk source = %s (%s)\n",
- dss_get_generic_clk_source_name(dsi_clk_src),
- dss_feat_get_clk_source_name(dsi_clk_src));
+ seq_printf(s, "dsi fclk source = %s\n",
+ dss_get_clk_source_name(dsi_clk_src));
seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate(dsidev));
@@ -4101,8 +4100,8 @@ static int dsi_display_init_dispc(struct platform_device *dsidev,
int r;
dss_select_lcd_clk_source(channel, dsi->module_id == 0 ?
- OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC :
- OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC);
+ DSS_CLK_SRC_PLL1_1 :
+ DSS_CLK_SRC_PLL2_1);
if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) {
r = dss_mgr_register_framedone_handler(channel,
@@ -4149,7 +4148,7 @@ err1:
dss_mgr_unregister_framedone_handler(channel,
dsi_framedone_irq_callback, dsidev);
err:
- dss_select_lcd_clk_source(channel, OMAP_DSS_CLK_SRC_FCK);
+ dss_select_lcd_clk_source(channel, DSS_CLK_SRC_FCK);
return r;
}
@@ -4162,7 +4161,7 @@ static void dsi_display_uninit_dispc(struct platform_device *dsidev,
dss_mgr_unregister_framedone_handler(channel,
dsi_framedone_irq_callback, dsidev);
- dss_select_lcd_clk_source(channel, OMAP_DSS_CLK_SRC_FCK);
+ dss_select_lcd_clk_source(channel, DSS_CLK_SRC_FCK);
}
static int dsi_configure_dsi_clocks(struct platform_device *dsidev)
@@ -4196,8 +4195,8 @@ static int dsi_display_init_dsi(struct platform_device *dsidev)
goto err1;
dss_select_dsi_clk_source(dsi->module_id, dsi->module_id == 0 ?
- OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI :
- OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI);
+ DSS_CLK_SRC_PLL1_2 :
+ DSS_CLK_SRC_PLL2_2);
DSSDBG("PLL OK\n");
@@ -4229,7 +4228,7 @@ static int dsi_display_init_dsi(struct platform_device *dsidev)
err3:
dsi_cio_uninit(dsidev);
err2:
- dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK);
+ dss_select_dsi_clk_source(dsi->module_id, DSS_CLK_SRC_FCK);
err1:
dss_pll_disable(&dsi->pll);
err0:
@@ -4251,7 +4250,7 @@ static void dsi_display_uninit_dsi(struct platform_device *dsidev,
dsi_vc_enable(dsidev, 2, 0);
dsi_vc_enable(dsidev, 3, 0);
- dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK);
+ dss_select_dsi_clk_source(dsi->module_id, DSS_CLK_SRC_FCK);
dsi_cio_uninit(dsidev);
dsi_pll_uninit(dsidev, disconnect_lanes);
}
@@ -4452,7 +4451,7 @@ static bool dsi_cm_calc_pll_cb(int n, int m, unsigned long fint,
ctx->dsi_cinfo.fint = fint;
ctx->dsi_cinfo.clkdco = clkdco;
- return dss_pll_hsdiv_calc(ctx->pll, clkdco, ctx->req_pck_min,
+ return dss_pll_hsdiv_calc_a(ctx->pll, clkdco, ctx->req_pck_min,
dss_feat_get_param_max(FEAT_PARAM_DSS_FCK),
dsi_cm_calc_hsdiv_cb, ctx);
}
@@ -4491,7 +4490,7 @@ static bool dsi_cm_calc(struct dsi_data *dsi,
pll_min = max(cfg->hs_clk_min * 4, txbyteclk * 4 * 4);
pll_max = cfg->hs_clk_max * 4;
- return dss_pll_calc(ctx->pll, clkin,
+ return dss_pll_calc_a(ctx->pll, clkin,
pll_min, pll_max,
dsi_cm_calc_pll_cb, ctx);
}
@@ -4750,7 +4749,7 @@ static bool dsi_vm_calc_pll_cb(int n, int m, unsigned long fint,
ctx->dsi_cinfo.fint = fint;
ctx->dsi_cinfo.clkdco = clkdco;
- return dss_pll_hsdiv_calc(ctx->pll, clkdco, ctx->req_pck_min,
+ return dss_pll_hsdiv_calc_a(ctx->pll, clkdco, ctx->req_pck_min,
dss_feat_get_param_max(FEAT_PARAM_DSS_FCK),
dsi_vm_calc_hsdiv_cb, ctx);
}
@@ -4792,7 +4791,7 @@ static bool dsi_vm_calc(struct dsi_data *dsi,
pll_max = byteclk_max * 4 * 4;
}
- return dss_pll_calc(ctx->pll, clkin,
+ return dss_pll_calc_a(ctx->pll, clkin,
pll_min, pll_max,
dsi_vm_calc_pll_cb, ctx);
}
@@ -5138,6 +5137,8 @@ static const struct dss_pll_ops dsi_pll_ops = {
};
static const struct dss_pll_hw dss_omap3_dsi_pll_hw = {
+ .type = DSS_PLL_TYPE_A,
+
.n_max = (1 << 7) - 1,
.m_max = (1 << 11) - 1,
.mX_max = (1 << 4) - 1,
@@ -5163,6 +5164,8 @@ static const struct dss_pll_hw dss_omap3_dsi_pll_hw = {
};
static const struct dss_pll_hw dss_omap4_dsi_pll_hw = {
+ .type = DSS_PLL_TYPE_A,
+
.n_max = (1 << 8) - 1,
.m_max = (1 << 12) - 1,
.mX_max = (1 << 5) - 1,
@@ -5188,6 +5191,8 @@ static const struct dss_pll_hw dss_omap4_dsi_pll_hw = {
};
static const struct dss_pll_hw dss_omap5_dsi_pll_hw = {
+ .type = DSS_PLL_TYPE_A,
+
.n_max = (1 << 8) - 1,
.m_max = (1 << 12) - 1,
.mX_max = (1 << 5) - 1,
diff --git a/drivers/gpu/drm/omapdrm/dss/dss-of.c b/drivers/gpu/drm/omapdrm/dss/dss-of.c
index bf407b6ba..e256d879b 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss-of.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss-of.c
@@ -18,8 +18,7 @@
#include <linux/of.h>
#include <linux/seq_file.h>
-#include <video/omapdss.h>
-
+#include "omapdss.h"
#include "dss.h"
struct device_node *
@@ -126,15 +125,16 @@ u32 dss_of_port_get_port_number(struct device_node *port)
static struct device_node *omapdss_of_get_remote_port(const struct device_node *node)
{
- struct device_node *np;
+ struct device_node *np, *np_parent;
np = of_parse_phandle(node, "remote-endpoint", 0);
if (!np)
return NULL;
- np = of_get_next_parent(np);
+ np_parent = of_get_next_parent(np);
+ of_node_put(np);
- return np;
+ return np_parent;
}
struct device_node *
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index 3303cfad4..14887d5b0 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -42,8 +42,7 @@
#include <linux/suspend.h>
#include <linux/component.h>
-#include <video/omapdss.h>
-
+#include "omapdss.h"
#include "dss.h"
#include "dss_features.h"
@@ -76,6 +75,8 @@ struct dss_features {
const enum omap_display_type *ports;
int num_ports;
int (*dpi_select_source)(int port, enum omap_channel channel);
+ int (*select_lcd_source)(enum omap_channel channel,
+ enum dss_clk_source clk_src);
};
static struct {
@@ -92,9 +93,9 @@ static struct {
unsigned long cache_prate;
struct dispc_clock_info cache_dispc_cinfo;
- enum omap_dss_clk_source dsi_clk_source[MAX_NUM_DSI];
- enum omap_dss_clk_source dispc_clk_source;
- enum omap_dss_clk_source lcd_clk_source[MAX_DSS_LCD_MANAGERS];
+ enum dss_clk_source dsi_clk_source[MAX_NUM_DSI];
+ enum dss_clk_source dispc_clk_source;
+ enum dss_clk_source lcd_clk_source[MAX_DSS_LCD_MANAGERS];
bool ctx_valid;
u32 ctx[DSS_SZ_REGS / sizeof(u32)];
@@ -106,11 +107,14 @@ static struct {
} dss;
static const char * const dss_generic_clk_source_names[] = {
- [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DSI_PLL_HSDIV_DISPC",
- [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DSI_PLL_HSDIV_DSI",
- [OMAP_DSS_CLK_SRC_FCK] = "DSS_FCK",
- [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC] = "DSI_PLL2_HSDIV_DISPC",
- [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI] = "DSI_PLL2_HSDIV_DSI",
+ [DSS_CLK_SRC_FCK] = "FCK",
+ [DSS_CLK_SRC_PLL1_1] = "PLL1:1",
+ [DSS_CLK_SRC_PLL1_2] = "PLL1:2",
+ [DSS_CLK_SRC_PLL1_3] = "PLL1:3",
+ [DSS_CLK_SRC_PLL2_1] = "PLL2:1",
+ [DSS_CLK_SRC_PLL2_2] = "PLL2:2",
+ [DSS_CLK_SRC_PLL2_3] = "PLL2:3",
+ [DSS_CLK_SRC_HDMI_PLL] = "HDMI PLL",
};
static bool dss_initialized;
@@ -203,68 +207,70 @@ void dss_ctrl_pll_enable(enum dss_pll_id pll_id, bool enable)
1 << shift, val << shift);
}
-void dss_ctrl_pll_set_control_mux(enum dss_pll_id pll_id,
+static int dss_ctrl_pll_set_control_mux(enum dss_clk_source clk_src,
enum omap_channel channel)
{
unsigned shift, val;
if (!dss.syscon_pll_ctrl)
- return;
+ return -EINVAL;
switch (channel) {
case OMAP_DSS_CHANNEL_LCD:
shift = 3;
- switch (pll_id) {
- case DSS_PLL_VIDEO1:
+ switch (clk_src) {
+ case DSS_CLK_SRC_PLL1_1:
val = 0; break;
- case DSS_PLL_HDMI:
+ case DSS_CLK_SRC_HDMI_PLL:
val = 1; break;
default:
DSSERR("error in PLL mux config for LCD\n");
- return;
+ return -EINVAL;
}
break;
case OMAP_DSS_CHANNEL_LCD2:
shift = 5;
- switch (pll_id) {
- case DSS_PLL_VIDEO1:
+ switch (clk_src) {
+ case DSS_CLK_SRC_PLL1_3:
val = 0; break;
- case DSS_PLL_VIDEO2:
+ case DSS_CLK_SRC_PLL2_3:
val = 1; break;
- case DSS_PLL_HDMI:
+ case DSS_CLK_SRC_HDMI_PLL:
val = 2; break;
default:
DSSERR("error in PLL mux config for LCD2\n");
- return;
+ return -EINVAL;
}
break;
case OMAP_DSS_CHANNEL_LCD3:
shift = 7;
- switch (pll_id) {
- case DSS_PLL_VIDEO1:
- val = 1; break;
- case DSS_PLL_VIDEO2:
+ switch (clk_src) {
+ case DSS_CLK_SRC_PLL2_1:
val = 0; break;
- case DSS_PLL_HDMI:
+ case DSS_CLK_SRC_PLL1_3:
+ val = 1; break;
+ case DSS_CLK_SRC_HDMI_PLL:
val = 2; break;
default:
DSSERR("error in PLL mux config for LCD3\n");
- return;
+ return -EINVAL;
}
break;
default:
DSSERR("error in PLL mux config\n");
- return;
+ return -EINVAL;
}
regmap_update_bits(dss.syscon_pll_ctrl, dss.syscon_pll_ctrl_offset,
0x3 << shift, val << shift);
+
+ return 0;
}
void dss_sdi_init(int datapairs)
@@ -354,14 +360,14 @@ void dss_sdi_disable(void)
REG_FLD_MOD(DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */
}
-const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src)
+const char *dss_get_clk_source_name(enum dss_clk_source clk_src)
{
return dss_generic_clk_source_names[clk_src];
}
void dss_dump_clocks(struct seq_file *s)
{
- const char *fclk_name, *fclk_real_name;
+ const char *fclk_name;
unsigned long fclk_rate;
if (dss_runtime_get())
@@ -369,12 +375,11 @@ void dss_dump_clocks(struct seq_file *s)
seq_printf(s, "- DSS -\n");
- fclk_name = dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_FCK);
- fclk_real_name = dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_FCK);
+ fclk_name = dss_get_clk_source_name(DSS_CLK_SRC_FCK);
fclk_rate = clk_get_rate(dss.dss_clk);
- seq_printf(s, "%s (%s) = %lu\n",
- fclk_name, fclk_real_name,
+ seq_printf(s, "%s = %lu\n",
+ fclk_name,
fclk_rate);
dss_runtime_put();
@@ -403,19 +408,42 @@ static void dss_dump_regs(struct seq_file *s)
#undef DUMPREG
}
-static void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src)
+static int dss_get_channel_index(enum omap_channel channel)
+{
+ switch (channel) {
+ case OMAP_DSS_CHANNEL_LCD:
+ return 0;
+ case OMAP_DSS_CHANNEL_LCD2:
+ return 1;
+ case OMAP_DSS_CHANNEL_LCD3:
+ return 2;
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+}
+
+static void dss_select_dispc_clk_source(enum dss_clk_source clk_src)
{
int b;
u8 start, end;
+ /*
+ * We always use PRCM clock as the DISPC func clock, except on DSS3,
+ * where we don't have separate DISPC and LCD clock sources.
+ */
+ if (WARN_ON(dss_has_feature(FEAT_LCD_CLK_SRC) &&
+ clk_src != DSS_CLK_SRC_FCK))
+ return;
+
switch (clk_src) {
- case OMAP_DSS_CLK_SRC_FCK:
+ case DSS_CLK_SRC_FCK:
b = 0;
break;
- case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
+ case DSS_CLK_SRC_PLL1_1:
b = 1;
break;
- case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
+ case DSS_CLK_SRC_PLL2_1:
b = 2;
break;
default:
@@ -431,19 +459,19 @@ static void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src)
}
void dss_select_dsi_clk_source(int dsi_module,
- enum omap_dss_clk_source clk_src)
+ enum dss_clk_source clk_src)
{
int b, pos;
switch (clk_src) {
- case OMAP_DSS_CLK_SRC_FCK:
+ case DSS_CLK_SRC_FCK:
b = 0;
break;
- case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI:
+ case DSS_CLK_SRC_PLL1_2:
BUG_ON(dsi_module != 0);
b = 1;
break;
- case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI:
+ case DSS_CLK_SRC_PLL2_2:
BUG_ON(dsi_module != 1);
b = 1;
break;
@@ -458,59 +486,125 @@ void dss_select_dsi_clk_source(int dsi_module,
dss.dsi_clk_source[dsi_module] = clk_src;
}
+static int dss_lcd_clk_mux_dra7(enum omap_channel channel,
+ enum dss_clk_source clk_src)
+{
+ const u8 ctrl_bits[] = {
+ [OMAP_DSS_CHANNEL_LCD] = 0,
+ [OMAP_DSS_CHANNEL_LCD2] = 12,
+ [OMAP_DSS_CHANNEL_LCD3] = 19,
+ };
+
+ u8 ctrl_bit = ctrl_bits[channel];
+ int r;
+
+ if (clk_src == DSS_CLK_SRC_FCK) {
+ /* LCDx_CLK_SWITCH */
+ REG_FLD_MOD(DSS_CONTROL, 0, ctrl_bit, ctrl_bit);
+ return -EINVAL;
+ }
+
+ r = dss_ctrl_pll_set_control_mux(clk_src, channel);
+ if (r)
+ return r;
+
+ REG_FLD_MOD(DSS_CONTROL, 1, ctrl_bit, ctrl_bit);
+
+ return 0;
+}
+
+static int dss_lcd_clk_mux_omap5(enum omap_channel channel,
+ enum dss_clk_source clk_src)
+{
+ const u8 ctrl_bits[] = {
+ [OMAP_DSS_CHANNEL_LCD] = 0,
+ [OMAP_DSS_CHANNEL_LCD2] = 12,
+ [OMAP_DSS_CHANNEL_LCD3] = 19,
+ };
+ const enum dss_clk_source allowed_plls[] = {
+ [OMAP_DSS_CHANNEL_LCD] = DSS_CLK_SRC_PLL1_1,
+ [OMAP_DSS_CHANNEL_LCD2] = DSS_CLK_SRC_FCK,
+ [OMAP_DSS_CHANNEL_LCD3] = DSS_CLK_SRC_PLL2_1,
+ };
+
+ u8 ctrl_bit = ctrl_bits[channel];
+
+ if (clk_src == DSS_CLK_SRC_FCK) {
+ /* LCDx_CLK_SWITCH */
+ REG_FLD_MOD(DSS_CONTROL, 0, ctrl_bit, ctrl_bit);
+ return -EINVAL;
+ }
+
+ if (WARN_ON(allowed_plls[channel] != clk_src))
+ return -EINVAL;
+
+ REG_FLD_MOD(DSS_CONTROL, 1, ctrl_bit, ctrl_bit);
+
+ return 0;
+}
+
+static int dss_lcd_clk_mux_omap4(enum omap_channel channel,
+ enum dss_clk_source clk_src)
+{
+ const u8 ctrl_bits[] = {
+ [OMAP_DSS_CHANNEL_LCD] = 0,
+ [OMAP_DSS_CHANNEL_LCD2] = 12,
+ };
+ const enum dss_clk_source allowed_plls[] = {
+ [OMAP_DSS_CHANNEL_LCD] = DSS_CLK_SRC_PLL1_1,
+ [OMAP_DSS_CHANNEL_LCD2] = DSS_CLK_SRC_PLL2_1,
+ };
+
+ u8 ctrl_bit = ctrl_bits[channel];
+
+ if (clk_src == DSS_CLK_SRC_FCK) {
+ /* LCDx_CLK_SWITCH */
+ REG_FLD_MOD(DSS_CONTROL, 0, ctrl_bit, ctrl_bit);
+ return 0;
+ }
+
+ if (WARN_ON(allowed_plls[channel] != clk_src))
+ return -EINVAL;
+
+ REG_FLD_MOD(DSS_CONTROL, 1, ctrl_bit, ctrl_bit);
+
+ return 0;
+}
+
void dss_select_lcd_clk_source(enum omap_channel channel,
- enum omap_dss_clk_source clk_src)
+ enum dss_clk_source clk_src)
{
- int b, ix, pos;
+ int idx = dss_get_channel_index(channel);
+ int r;
if (!dss_has_feature(FEAT_LCD_CLK_SRC)) {
dss_select_dispc_clk_source(clk_src);
+ dss.lcd_clk_source[idx] = clk_src;
return;
}
- switch (clk_src) {
- case OMAP_DSS_CLK_SRC_FCK:
- b = 0;
- break;
- case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
- BUG_ON(channel != OMAP_DSS_CHANNEL_LCD);
- b = 1;
- break;
- case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
- BUG_ON(channel != OMAP_DSS_CHANNEL_LCD2 &&
- channel != OMAP_DSS_CHANNEL_LCD3);
- b = 1;
- break;
- default:
- BUG();
+ r = dss.feat->select_lcd_source(channel, clk_src);
+ if (r)
return;
- }
-
- pos = channel == OMAP_DSS_CHANNEL_LCD ? 0 :
- (channel == OMAP_DSS_CHANNEL_LCD2 ? 12 : 19);
- REG_FLD_MOD(DSS_CONTROL, b, pos, pos); /* LCDx_CLK_SWITCH */
- ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 :
- (channel == OMAP_DSS_CHANNEL_LCD2 ? 1 : 2);
- dss.lcd_clk_source[ix] = clk_src;
+ dss.lcd_clk_source[idx] = clk_src;
}
-enum omap_dss_clk_source dss_get_dispc_clk_source(void)
+enum dss_clk_source dss_get_dispc_clk_source(void)
{
return dss.dispc_clk_source;
}
-enum omap_dss_clk_source dss_get_dsi_clk_source(int dsi_module)
+enum dss_clk_source dss_get_dsi_clk_source(int dsi_module)
{
return dss.dsi_clk_source[dsi_module];
}
-enum omap_dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel)
+enum dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel)
{
if (dss_has_feature(FEAT_LCD_CLK_SRC)) {
- int ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 :
- (channel == OMAP_DSS_CHANNEL_LCD2 ? 1 : 2);
- return dss.lcd_clk_source[ix];
+ int idx = dss_get_channel_index(channel);
+ return dss.lcd_clk_source[idx];
} else {
/* LCD_CLK source is the same as DISPC_FCLK source for
* OMAP2 and OMAP3 */
@@ -859,6 +953,7 @@ static const struct dss_features omap44xx_dss_feats = {
.dpi_select_source = &dss_dpi_select_source_omap4,
.ports = omap2plus_ports,
.num_ports = ARRAY_SIZE(omap2plus_ports),
+ .select_lcd_source = &dss_lcd_clk_mux_omap4,
};
static const struct dss_features omap54xx_dss_feats = {
@@ -868,6 +963,7 @@ static const struct dss_features omap54xx_dss_feats = {
.dpi_select_source = &dss_dpi_select_source_omap5,
.ports = omap2plus_ports,
.num_ports = ARRAY_SIZE(omap2plus_ports),
+ .select_lcd_source = &dss_lcd_clk_mux_omap5,
};
static const struct dss_features am43xx_dss_feats = {
@@ -886,6 +982,7 @@ static const struct dss_features dra7xx_dss_feats = {
.dpi_select_source = &dss_dpi_select_source_dra7xx,
.ports = dra7xx_ports,
.num_ports = ARRAY_SIZE(dra7xx_ports),
+ .select_lcd_source = &dss_lcd_clk_mux_dra7,
};
static int dss_init_features(struct platform_device *pdev)
@@ -1143,18 +1240,18 @@ static int dss_bind(struct device *dev)
/* Select DPLL */
REG_FLD_MOD(DSS_CONTROL, 0, 0, 0);
- dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
+ dss_select_dispc_clk_source(DSS_CLK_SRC_FCK);
#ifdef CONFIG_OMAP2_DSS_VENC
REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */
REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */
REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */
#endif
- dss.dsi_clk_source[0] = OMAP_DSS_CLK_SRC_FCK;
- dss.dsi_clk_source[1] = OMAP_DSS_CLK_SRC_FCK;
- dss.dispc_clk_source = OMAP_DSS_CLK_SRC_FCK;
- dss.lcd_clk_source[0] = OMAP_DSS_CLK_SRC_FCK;
- dss.lcd_clk_source[1] = OMAP_DSS_CLK_SRC_FCK;
+ dss.dsi_clk_source[0] = DSS_CLK_SRC_FCK;
+ dss.dsi_clk_source[1] = DSS_CLK_SRC_FCK;
+ dss.dispc_clk_source = DSS_CLK_SRC_FCK;
+ dss.lcd_clk_source[0] = DSS_CLK_SRC_FCK;
+ dss.lcd_clk_source[1] = DSS_CLK_SRC_FCK;
rev = dss_read_reg(DSS_REVISION);
printk(KERN_INFO "OMAP DSS rev %d.%d\n",
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h
index 38e6ab501..4fd06dc41 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.h
+++ b/drivers/gpu/drm/omapdrm/dss/dss.h
@@ -102,6 +102,20 @@ enum dss_writeback_channel {
DSS_WB_LCD3_MGR = 7,
};
+enum dss_clk_source {
+ DSS_CLK_SRC_FCK = 0,
+
+ DSS_CLK_SRC_PLL1_1,
+ DSS_CLK_SRC_PLL1_2,
+ DSS_CLK_SRC_PLL1_3,
+
+ DSS_CLK_SRC_PLL2_1,
+ DSS_CLK_SRC_PLL2_2,
+ DSS_CLK_SRC_PLL2_3,
+
+ DSS_CLK_SRC_HDMI_PLL,
+};
+
enum dss_pll_id {
DSS_PLL_DSI1,
DSS_PLL_DSI2,
@@ -114,6 +128,11 @@ struct dss_pll;
#define DSS_PLL_MAX_HSDIVS 4
+enum dss_pll_type {
+ DSS_PLL_TYPE_A,
+ DSS_PLL_TYPE_B,
+};
+
/*
* Type-A PLLs: clkout[]/mX[] refer to hsdiv outputs m4, m5, m6, m7.
* Type-B PLLs: clkout[0] refers to m2.
@@ -140,6 +159,8 @@ struct dss_pll_ops {
};
struct dss_pll_hw {
+ enum dss_pll_type type;
+
unsigned n_max;
unsigned m_min;
unsigned m_max;
@@ -227,7 +248,7 @@ unsigned long dss_get_dispc_clk_rate(void);
int dss_dpi_select_source(int port, enum omap_channel channel);
void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select);
enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void);
-const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src);
+const char *dss_get_clk_source_name(enum dss_clk_source clk_src);
void dss_dump_clocks(struct seq_file *s);
/* DSS VIDEO PLL */
@@ -244,20 +265,18 @@ void dss_debug_dump_clocks(struct seq_file *s);
#endif
void dss_ctrl_pll_enable(enum dss_pll_id pll_id, bool enable);
-void dss_ctrl_pll_set_control_mux(enum dss_pll_id pll_id,
- enum omap_channel channel);
void dss_sdi_init(int datapairs);
int dss_sdi_enable(void);
void dss_sdi_disable(void);
void dss_select_dsi_clk_source(int dsi_module,
- enum omap_dss_clk_source clk_src);
+ enum dss_clk_source clk_src);
void dss_select_lcd_clk_source(enum omap_channel channel,
- enum omap_dss_clk_source clk_src);
-enum omap_dss_clk_source dss_get_dispc_clk_source(void);
-enum omap_dss_clk_source dss_get_dsi_clk_source(int dsi_module);
-enum omap_dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel);
+ enum dss_clk_source clk_src);
+enum dss_clk_source dss_get_dispc_clk_source(void);
+enum dss_clk_source dss_get_dsi_clk_source(int dsi_module);
+enum dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel);
void dss_set_venc_output(enum omap_dss_venc_type type);
void dss_set_dac_pwrdn_bgz(bool enable);
@@ -409,17 +428,23 @@ typedef bool (*dss_hsdiv_calc_func)(int m_dispc, unsigned long dispc,
int dss_pll_register(struct dss_pll *pll);
void dss_pll_unregister(struct dss_pll *pll);
struct dss_pll *dss_pll_find(const char *name);
+struct dss_pll *dss_pll_find_by_src(enum dss_clk_source src);
+unsigned dss_pll_get_clkout_idx_for_src(enum dss_clk_source src);
int dss_pll_enable(struct dss_pll *pll);
void dss_pll_disable(struct dss_pll *pll);
int dss_pll_set_config(struct dss_pll *pll,
const struct dss_pll_clock_info *cinfo);
-bool dss_pll_hsdiv_calc(const struct dss_pll *pll, unsigned long clkdco,
+bool dss_pll_hsdiv_calc_a(const struct dss_pll *pll, unsigned long clkdco,
unsigned long out_min, unsigned long out_max,
dss_hsdiv_calc_func func, void *data);
-bool dss_pll_calc(const struct dss_pll *pll, unsigned long clkin,
+bool dss_pll_calc_a(const struct dss_pll *pll, unsigned long clkin,
unsigned long pll_min, unsigned long pll_max,
dss_pll_calc_func func, void *data);
+
+bool dss_pll_calc_b(const struct dss_pll *pll, unsigned long clkin,
+ unsigned long target_clkout, struct dss_pll_clock_info *cinfo);
+
int dss_pll_write_config_type_a(struct dss_pll *pll,
const struct dss_pll_clock_info *cinfo);
int dss_pll_write_config_type_b(struct dss_pll *pll,
diff --git a/drivers/gpu/drm/omapdrm/dss/dss_features.c b/drivers/gpu/drm/omapdrm/dss/dss_features.c
index c886a2927..ee5b93ce2 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss_features.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss_features.c
@@ -23,8 +23,7 @@
#include <linux/err.h>
#include <linux/slab.h>
-#include <video/omapdss.h>
-
+#include "omapdss.h"
#include "dss.h"
#include "dss_features.h"
@@ -50,7 +49,6 @@ struct omap_dss_features {
const enum omap_dss_output_id *supported_outputs;
const enum omap_color_mode *supported_color_modes;
const enum omap_overlay_caps *overlay_caps;
- const char * const *clksrc_names;
const struct dss_param_range *dss_params;
const enum omap_dss_rotation_type supported_rotation_types;
@@ -389,34 +387,6 @@ static const enum omap_overlay_caps omap4_dss_overlay_caps[] = {
OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION,
};
-static const char * const omap2_dss_clk_source_names[] = {
- [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "N/A",
- [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "N/A",
- [OMAP_DSS_CLK_SRC_FCK] = "DSS_FCLK1",
-};
-
-static const char * const omap3_dss_clk_source_names[] = {
- [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DSI1_PLL_FCLK",
- [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DSI2_PLL_FCLK",
- [OMAP_DSS_CLK_SRC_FCK] = "DSS1_ALWON_FCLK",
-};
-
-static const char * const omap4_dss_clk_source_names[] = {
- [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "PLL1_CLK1",
- [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "PLL1_CLK2",
- [OMAP_DSS_CLK_SRC_FCK] = "DSS_FCLK",
- [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC] = "PLL2_CLK1",
- [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI] = "PLL2_CLK2",
-};
-
-static const char * const omap5_dss_clk_source_names[] = {
- [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DPLL_DSI1_A_CLK1",
- [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DPLL_DSI1_A_CLK2",
- [OMAP_DSS_CLK_SRC_FCK] = "DSS_CLK",
- [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC] = "DPLL_DSI1_C_CLK1",
- [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI] = "DPLL_DSI1_C_CLK2",
-};
-
static const struct dss_param_range omap2_dss_param_range[] = {
[FEAT_PARAM_DSS_FCK] = { 0, 133000000 },
[FEAT_PARAM_DSS_PCD] = { 2, 255 },
@@ -631,7 +601,6 @@ static const struct omap_dss_features omap2_dss_features = {
.supported_outputs = omap2_dss_supported_outputs,
.supported_color_modes = omap2_dss_supported_color_modes,
.overlay_caps = omap2_dss_overlay_caps,
- .clksrc_names = omap2_dss_clk_source_names,
.dss_params = omap2_dss_param_range,
.supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
.buffer_size_unit = 1,
@@ -652,7 +621,6 @@ static const struct omap_dss_features omap3430_dss_features = {
.supported_outputs = omap3430_dss_supported_outputs,
.supported_color_modes = omap3_dss_supported_color_modes,
.overlay_caps = omap3430_dss_overlay_caps,
- .clksrc_names = omap3_dss_clk_source_names,
.dss_params = omap3_dss_param_range,
.supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
.buffer_size_unit = 1,
@@ -676,7 +644,6 @@ static const struct omap_dss_features am35xx_dss_features = {
.supported_outputs = omap3430_dss_supported_outputs,
.supported_color_modes = omap3_dss_supported_color_modes,
.overlay_caps = omap3430_dss_overlay_caps,
- .clksrc_names = omap3_dss_clk_source_names,
.dss_params = omap3_dss_param_range,
.supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
.buffer_size_unit = 1,
@@ -696,7 +663,6 @@ static const struct omap_dss_features am43xx_dss_features = {
.supported_outputs = am43xx_dss_supported_outputs,
.supported_color_modes = omap3_dss_supported_color_modes,
.overlay_caps = omap3430_dss_overlay_caps,
- .clksrc_names = omap2_dss_clk_source_names,
.dss_params = am43xx_dss_param_range,
.supported_rotation_types = OMAP_DSS_ROT_DMA,
.buffer_size_unit = 1,
@@ -716,7 +682,6 @@ static const struct omap_dss_features omap3630_dss_features = {
.supported_outputs = omap3630_dss_supported_outputs,
.supported_color_modes = omap3_dss_supported_color_modes,
.overlay_caps = omap3630_dss_overlay_caps,
- .clksrc_names = omap3_dss_clk_source_names,
.dss_params = omap3_dss_param_range,
.supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
.buffer_size_unit = 1,
@@ -738,7 +703,6 @@ static const struct omap_dss_features omap4430_es1_0_dss_features = {
.supported_outputs = omap4_dss_supported_outputs,
.supported_color_modes = omap4_dss_supported_color_modes,
.overlay_caps = omap4_dss_overlay_caps,
- .clksrc_names = omap4_dss_clk_source_names,
.dss_params = omap4_dss_param_range,
.supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
.buffer_size_unit = 16,
@@ -759,7 +723,6 @@ static const struct omap_dss_features omap4430_es2_0_1_2_dss_features = {
.supported_outputs = omap4_dss_supported_outputs,
.supported_color_modes = omap4_dss_supported_color_modes,
.overlay_caps = omap4_dss_overlay_caps,
- .clksrc_names = omap4_dss_clk_source_names,
.dss_params = omap4_dss_param_range,
.supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
.buffer_size_unit = 16,
@@ -780,7 +743,6 @@ static const struct omap_dss_features omap4_dss_features = {
.supported_outputs = omap4_dss_supported_outputs,
.supported_color_modes = omap4_dss_supported_color_modes,
.overlay_caps = omap4_dss_overlay_caps,
- .clksrc_names = omap4_dss_clk_source_names,
.dss_params = omap4_dss_param_range,
.supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
.buffer_size_unit = 16,
@@ -801,7 +763,6 @@ static const struct omap_dss_features omap5_dss_features = {
.supported_outputs = omap5_dss_supported_outputs,
.supported_color_modes = omap4_dss_supported_color_modes,
.overlay_caps = omap4_dss_overlay_caps,
- .clksrc_names = omap5_dss_clk_source_names,
.dss_params = omap5_dss_param_range,
.supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
.buffer_size_unit = 16,
@@ -859,11 +820,6 @@ bool dss_feat_color_mode_supported(enum omap_plane plane,
color_mode;
}
-const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id)
-{
- return omap_current_dss_features->clksrc_names[id];
-}
-
u32 dss_feat_get_buffer_size_unit(void)
{
return omap_current_dss_features->buffer_size_unit;
diff --git a/drivers/gpu/drm/omapdrm/dss/dss_features.h b/drivers/gpu/drm/omapdrm/dss/dss_features.h
index 3d67d39f1..bb4b7f0e6 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss_features.h
+++ b/drivers/gpu/drm/omapdrm/dss/dss_features.h
@@ -91,7 +91,6 @@ unsigned long dss_feat_get_param_max(enum dss_range_param param);
enum omap_overlay_caps dss_feat_get_overlay_caps(enum omap_plane plane);
bool dss_feat_color_mode_supported(enum omap_plane plane,
enum omap_color_mode color_mode);
-const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id);
u32 dss_feat_get_buffer_size_unit(void); /* in bytes */
u32 dss_feat_get_burst_size_unit(void); /* in bytes */
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi.h b/drivers/gpu/drm/omapdrm/dss/hdmi.h
index 53616b02b..63e711545 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi.h
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi.h
@@ -23,8 +23,9 @@
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/hdmi.h>
-#include <video/omapdss.h>
+#include <sound/omap-hdmi-audio.h>
+#include "omapdss.h"
#include "dss.h"
/* HDMI Wrapper */
@@ -240,6 +241,7 @@ struct hdmi_pll_data {
void __iomem *base;
+ struct platform_device *pdev;
struct hdmi_wp_data *wp;
};
@@ -306,8 +308,6 @@ phys_addr_t hdmi_wp_get_audio_dma_addr(struct hdmi_wp_data *wp);
/* HDMI PLL funcs */
void hdmi_pll_dump(struct hdmi_pll_data *pll, struct seq_file *s);
-void hdmi_pll_compute(struct hdmi_pll_data *pll,
- unsigned long target_tmds, struct dss_pll_clock_info *pi);
int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll,
struct hdmi_wp_data *wp);
void hdmi_pll_uninit(struct hdmi_pll_data *hpll);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index 4d46cdf7a..cbd28dfdb 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -34,9 +34,9 @@
#include <linux/regulator/consumer.h>
#include <linux/component.h>
#include <linux/of.h>
-#include <video/omapdss.h>
#include <sound/omap-hdmi-audio.h>
+#include "omapdss.h"
#include "hdmi4_core.h"
#include "dss.h"
#include "dss_features.h"
@@ -177,7 +177,11 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
if (p->double_pixel)
pc *= 2;
- hdmi_pll_compute(&hdmi.pll, pc, &hdmi_cinfo);
+ /* DSS_HDMI_TCLK is bitclk / 10 */
+ pc *= 10;
+
+ dss_pll_calc_b(&hdmi.pll.pll, clk_get_rate(hdmi.pll.pll.clkin),
+ pc, &hdmi_cinfo);
r = dss_pll_enable(&hdmi.pll.pll);
if (r) {
@@ -204,9 +208,6 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
hdmi4_configure(&hdmi.core, &hdmi.wp, &hdmi.cfg);
- /* bypass TV gamma table */
- dispc_enable_gamma_table(0);
-
/* tv size */
dss_mgr_set_timings(channel, p);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index 9255c0e1e..0c0a5139a 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -39,9 +39,9 @@
#include <linux/regulator/consumer.h>
#include <linux/component.h>
#include <linux/of.h>
-#include <video/omapdss.h>
#include <sound/omap-hdmi-audio.h>
+#include "omapdss.h"
#include "hdmi5_core.h"
#include "dss.h"
#include "dss_features.h"
@@ -189,7 +189,11 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
if (p->double_pixel)
pc *= 2;
- hdmi_pll_compute(&hdmi.pll, pc, &hdmi_cinfo);
+ /* DSS_HDMI_TCLK is bitclk / 10 */
+ pc *= 10;
+
+ dss_pll_calc_b(&hdmi.pll.pll, clk_get_rate(hdmi.pll.pll.clkin),
+ pc, &hdmi_cinfo);
/* disable and clear irqs */
hdmi_wp_clear_irqenable(&hdmi.wp, 0xffffffff);
@@ -221,9 +225,6 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
hdmi5_configure(&hdmi.core, &hdmi.wp, &hdmi.cfg);
- /* bypass TV gamma table */
- dispc_enable_gamma_table(0);
-
/* tv size */
dss_mgr_set_timings(channel, p);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_common.c b/drivers/gpu/drm/omapdrm/dss/hdmi_common.c
index 1b8fcc6c4..4dfb67fe5 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_common.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_common.c
@@ -4,8 +4,8 @@
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/of.h>
-#include <video/omapdss.h>
+#include "omapdss.h"
#include "hdmi.h"
int hdmi_parse_lanes_of(struct platform_device *pdev, struct device_node *ep,
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
index f98b750fc..3ead47ccc 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
@@ -14,8 +14,8 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
-#include <video/omapdss.h>
+#include "omapdss.h"
#include "dss.h"
#include "hdmi.h"
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
index f1015e8b8..b8bf6a9e5 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
@@ -17,9 +17,9 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/seq_file.h>
+#include <linux/pm_runtime.h>
-#include <video/omapdss.h>
-
+#include "omapdss.h"
#include "dss.h"
#include "hdmi.h"
@@ -39,71 +39,14 @@ void hdmi_pll_dump(struct hdmi_pll_data *pll, struct seq_file *s)
DUMPPLL(PLLCTRL_CFG4);
}
-void hdmi_pll_compute(struct hdmi_pll_data *pll,
- unsigned long target_tmds, struct dss_pll_clock_info *pi)
-{
- unsigned long fint, clkdco, clkout;
- unsigned long target_bitclk, target_clkdco;
- unsigned long min_dco;
- unsigned n, m, mf, m2, sd;
- unsigned long clkin;
- const struct dss_pll_hw *hw = pll->pll.hw;
-
- clkin = clk_get_rate(pll->pll.clkin);
-
- DSSDBG("clkin %lu, target tmds %lu\n", clkin, target_tmds);
-
- target_bitclk = target_tmds * 10;
-
- /* Fint */
- n = DIV_ROUND_UP(clkin, hw->fint_max);
- fint = clkin / n;
-
- /* adjust m2 so that the clkdco will be high enough */
- min_dco = roundup(hw->clkdco_min, fint);
- m2 = DIV_ROUND_UP(min_dco, target_bitclk);
- if (m2 == 0)
- m2 = 1;
-
- target_clkdco = target_bitclk * m2;
- m = target_clkdco / fint;
-
- clkdco = fint * m;
-
- /* adjust clkdco with fractional mf */
- if (WARN_ON(target_clkdco - clkdco > fint))
- mf = 0;
- else
- mf = (u32)div_u64(262144ull * (target_clkdco - clkdco), fint);
-
- if (mf > 0)
- clkdco += (u32)div_u64((u64)mf * fint, 262144);
-
- clkout = clkdco / m2;
-
- /* sigma-delta */
- sd = DIV_ROUND_UP(fint * m, 250000000);
-
- DSSDBG("N = %u, M = %u, M.f = %u, M2 = %u, SD = %u\n",
- n, m, mf, m2, sd);
- DSSDBG("Fint %lu, clkdco %lu, clkout %lu\n", fint, clkdco, clkout);
-
- pi->n = n;
- pi->m = m;
- pi->mf = mf;
- pi->mX[0] = m2;
- pi->sd = sd;
-
- pi->fint = fint;
- pi->clkdco = clkdco;
- pi->clkout[0] = clkout;
-}
-
static int hdmi_pll_enable(struct dss_pll *dsspll)
{
struct hdmi_pll_data *pll = container_of(dsspll, struct hdmi_pll_data, pll);
struct hdmi_wp_data *wp = pll->wp;
- u16 r = 0;
+ int r;
+
+ r = pm_runtime_get_sync(&pll->pdev->dev);
+ WARN_ON(r < 0);
dss_ctrl_pll_enable(DSS_PLL_HDMI, true);
@@ -118,10 +61,14 @@ static void hdmi_pll_disable(struct dss_pll *dsspll)
{
struct hdmi_pll_data *pll = container_of(dsspll, struct hdmi_pll_data, pll);
struct hdmi_wp_data *wp = pll->wp;
+ int r;
hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_ALLOFF);
dss_ctrl_pll_enable(DSS_PLL_HDMI, false);
+
+ r = pm_runtime_put_sync(&pll->pdev->dev);
+ WARN_ON(r < 0 && r != -ENOSYS);
}
static const struct dss_pll_ops dsi_pll_ops = {
@@ -131,6 +78,8 @@ static const struct dss_pll_ops dsi_pll_ops = {
};
static const struct dss_pll_hw dss_omap4_hdmi_pll_hw = {
+ .type = DSS_PLL_TYPE_B,
+
.n_max = 255,
.m_min = 20,
.m_max = 4095,
@@ -154,6 +103,8 @@ static const struct dss_pll_hw dss_omap4_hdmi_pll_hw = {
};
static const struct dss_pll_hw dss_omap5_hdmi_pll_hw = {
+ .type = DSS_PLL_TYPE_B,
+
.n_max = 255,
.m_min = 20,
.m_max = 2045,
@@ -225,6 +176,7 @@ int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll,
int r;
struct resource *res;
+ pll->pdev = pdev;
pll->wp = wp;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pll");
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
index 055f62fca..203694a52 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
@@ -15,8 +15,8 @@
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/seq_file.h>
-#include <video/omapdss.h>
+#include "omapdss.h"
#include "dss.h"
#include "hdmi.h"
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
index d7e7c909b..6eaf1adbd 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss.h
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
@@ -18,7 +18,872 @@
#ifndef __OMAP_DRM_DSS_H
#define __OMAP_DRM_DSS_H
-#include <video/omapdss.h>
+#include <linux/list.h>
+#include <linux/kobject.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <video/videomode.h>
+#include <linux/platform_data/omapdss.h>
+#include <uapi/drm/drm_mode.h>
+
+#define DISPC_IRQ_FRAMEDONE (1 << 0)
+#define DISPC_IRQ_VSYNC (1 << 1)
+#define DISPC_IRQ_EVSYNC_EVEN (1 << 2)
+#define DISPC_IRQ_EVSYNC_ODD (1 << 3)
+#define DISPC_IRQ_ACBIAS_COUNT_STAT (1 << 4)
+#define DISPC_IRQ_PROG_LINE_NUM (1 << 5)
+#define DISPC_IRQ_GFX_FIFO_UNDERFLOW (1 << 6)
+#define DISPC_IRQ_GFX_END_WIN (1 << 7)
+#define DISPC_IRQ_PAL_GAMMA_MASK (1 << 8)
+#define DISPC_IRQ_OCP_ERR (1 << 9)
+#define DISPC_IRQ_VID1_FIFO_UNDERFLOW (1 << 10)
+#define DISPC_IRQ_VID1_END_WIN (1 << 11)
+#define DISPC_IRQ_VID2_FIFO_UNDERFLOW (1 << 12)
+#define DISPC_IRQ_VID2_END_WIN (1 << 13)
+#define DISPC_IRQ_SYNC_LOST (1 << 14)
+#define DISPC_IRQ_SYNC_LOST_DIGIT (1 << 15)
+#define DISPC_IRQ_WAKEUP (1 << 16)
+#define DISPC_IRQ_SYNC_LOST2 (1 << 17)
+#define DISPC_IRQ_VSYNC2 (1 << 18)
+#define DISPC_IRQ_VID3_END_WIN (1 << 19)
+#define DISPC_IRQ_VID3_FIFO_UNDERFLOW (1 << 20)
+#define DISPC_IRQ_ACBIAS_COUNT_STAT2 (1 << 21)
+#define DISPC_IRQ_FRAMEDONE2 (1 << 22)
+#define DISPC_IRQ_FRAMEDONEWB (1 << 23)
+#define DISPC_IRQ_FRAMEDONETV (1 << 24)
+#define DISPC_IRQ_WBBUFFEROVERFLOW (1 << 25)
+#define DISPC_IRQ_WBUNCOMPLETEERROR (1 << 26)
+#define DISPC_IRQ_SYNC_LOST3 (1 << 27)
+#define DISPC_IRQ_VSYNC3 (1 << 28)
+#define DISPC_IRQ_ACBIAS_COUNT_STAT3 (1 << 29)
+#define DISPC_IRQ_FRAMEDONE3 (1 << 30)
+
+struct omap_dss_device;
+struct omap_overlay_manager;
+struct dss_lcd_mgr_config;
+struct snd_aes_iec958;
+struct snd_cea_861_aud_if;
+struct hdmi_avi_infoframe;
+
+enum omap_display_type {
+ OMAP_DISPLAY_TYPE_NONE = 0,
+ OMAP_DISPLAY_TYPE_DPI = 1 << 0,
+ OMAP_DISPLAY_TYPE_DBI = 1 << 1,
+ OMAP_DISPLAY_TYPE_SDI = 1 << 2,
+ OMAP_DISPLAY_TYPE_DSI = 1 << 3,
+ OMAP_DISPLAY_TYPE_VENC = 1 << 4,
+ OMAP_DISPLAY_TYPE_HDMI = 1 << 5,
+ OMAP_DISPLAY_TYPE_DVI = 1 << 6,
+};
+
+enum omap_plane {
+ OMAP_DSS_GFX = 0,
+ OMAP_DSS_VIDEO1 = 1,
+ OMAP_DSS_VIDEO2 = 2,
+ OMAP_DSS_VIDEO3 = 3,
+ OMAP_DSS_WB = 4,
+};
+
+enum omap_channel {
+ OMAP_DSS_CHANNEL_LCD = 0,
+ OMAP_DSS_CHANNEL_DIGIT = 1,
+ OMAP_DSS_CHANNEL_LCD2 = 2,
+ OMAP_DSS_CHANNEL_LCD3 = 3,
+ OMAP_DSS_CHANNEL_WB = 4,
+};
+
+enum omap_color_mode {
+ OMAP_DSS_COLOR_CLUT1 = 1 << 0, /* BITMAP 1 */
+ OMAP_DSS_COLOR_CLUT2 = 1 << 1, /* BITMAP 2 */
+ OMAP_DSS_COLOR_CLUT4 = 1 << 2, /* BITMAP 4 */
+ OMAP_DSS_COLOR_CLUT8 = 1 << 3, /* BITMAP 8 */
+ OMAP_DSS_COLOR_RGB12U = 1 << 4, /* RGB12, 16-bit container */
+ OMAP_DSS_COLOR_ARGB16 = 1 << 5, /* ARGB16 */
+ OMAP_DSS_COLOR_RGB16 = 1 << 6, /* RGB16 */
+ OMAP_DSS_COLOR_RGB24U = 1 << 7, /* RGB24, 32-bit container */
+ OMAP_DSS_COLOR_RGB24P = 1 << 8, /* RGB24, 24-bit container */
+ OMAP_DSS_COLOR_YUV2 = 1 << 9, /* YUV2 4:2:2 co-sited */
+ OMAP_DSS_COLOR_UYVY = 1 << 10, /* UYVY 4:2:2 co-sited */
+ OMAP_DSS_COLOR_ARGB32 = 1 << 11, /* ARGB32 */
+ OMAP_DSS_COLOR_RGBA32 = 1 << 12, /* RGBA32 */
+ OMAP_DSS_COLOR_RGBX32 = 1 << 13, /* RGBx32 */
+ OMAP_DSS_COLOR_NV12 = 1 << 14, /* NV12 format: YUV 4:2:0 */
+ OMAP_DSS_COLOR_RGBA16 = 1 << 15, /* RGBA16 - 4444 */
+ OMAP_DSS_COLOR_RGBX16 = 1 << 16, /* RGBx16 - 4444 */
+ OMAP_DSS_COLOR_ARGB16_1555 = 1 << 17, /* ARGB16 - 1555 */
+ OMAP_DSS_COLOR_XRGB16_1555 = 1 << 18, /* xRGB16 - 1555 */
+};
+
+enum omap_dss_load_mode {
+ OMAP_DSS_LOAD_CLUT_AND_FRAME = 0,
+ OMAP_DSS_LOAD_CLUT_ONLY = 1,
+ OMAP_DSS_LOAD_FRAME_ONLY = 2,
+ OMAP_DSS_LOAD_CLUT_ONCE_FRAME = 3,
+};
+
+enum omap_dss_trans_key_type {
+ OMAP_DSS_COLOR_KEY_GFX_DST = 0,
+ OMAP_DSS_COLOR_KEY_VID_SRC = 1,
+};
+
+enum omap_rfbi_te_mode {
+ OMAP_DSS_RFBI_TE_MODE_1 = 1,
+ OMAP_DSS_RFBI_TE_MODE_2 = 2,
+};
+
+enum omap_dss_signal_level {
+ OMAPDSS_SIG_ACTIVE_LOW,
+ OMAPDSS_SIG_ACTIVE_HIGH,
+};
+
+enum omap_dss_signal_edge {
+ OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ OMAPDSS_DRIVE_SIG_RISING_EDGE,
+};
+
+enum omap_dss_venc_type {
+ OMAP_DSS_VENC_TYPE_COMPOSITE,
+ OMAP_DSS_VENC_TYPE_SVIDEO,
+};
+
+enum omap_dss_dsi_pixel_format {
+ OMAP_DSS_DSI_FMT_RGB888,
+ OMAP_DSS_DSI_FMT_RGB666,
+ OMAP_DSS_DSI_FMT_RGB666_PACKED,
+ OMAP_DSS_DSI_FMT_RGB565,
+};
+
+enum omap_dss_dsi_mode {
+ OMAP_DSS_DSI_CMD_MODE = 0,
+ OMAP_DSS_DSI_VIDEO_MODE,
+};
+
+enum omap_display_caps {
+ OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE = 1 << 0,
+ OMAP_DSS_DISPLAY_CAP_TEAR_ELIM = 1 << 1,
+};
+
+enum omap_dss_display_state {
+ OMAP_DSS_DISPLAY_DISABLED = 0,
+ OMAP_DSS_DISPLAY_ACTIVE,
+};
+
+enum omap_dss_rotation_type {
+ OMAP_DSS_ROT_DMA = 1 << 0,
+ OMAP_DSS_ROT_VRFB = 1 << 1,
+ OMAP_DSS_ROT_TILER = 1 << 2,
+};
+
+/* clockwise rotation angle */
+enum omap_dss_rotation_angle {
+ OMAP_DSS_ROT_0 = 0,
+ OMAP_DSS_ROT_90 = 1,
+ OMAP_DSS_ROT_180 = 2,
+ OMAP_DSS_ROT_270 = 3,
+};
+
+enum omap_overlay_caps {
+ OMAP_DSS_OVL_CAP_SCALE = 1 << 0,
+ OMAP_DSS_OVL_CAP_GLOBAL_ALPHA = 1 << 1,
+ OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA = 1 << 2,
+ OMAP_DSS_OVL_CAP_ZORDER = 1 << 3,
+ OMAP_DSS_OVL_CAP_POS = 1 << 4,
+ OMAP_DSS_OVL_CAP_REPLICATION = 1 << 5,
+};
+
+enum omap_overlay_manager_caps {
+ OMAP_DSS_DUMMY_VALUE, /* add a dummy value to prevent compiler error */
+};
+
+enum omap_dss_clk_source {
+ OMAP_DSS_CLK_SRC_FCK = 0, /* OMAP2/3: DSS1_ALWON_FCLK
+ * OMAP4: DSS_FCLK */
+ OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC, /* OMAP3: DSI1_PLL_FCLK
+ * OMAP4: PLL1_CLK1 */
+ OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI, /* OMAP3: DSI2_PLL_FCLK
+ * OMAP4: PLL1_CLK2 */
+ OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC, /* OMAP4: PLL2_CLK1 */
+ OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI, /* OMAP4: PLL2_CLK2 */
+};
+
+enum omap_hdmi_flags {
+ OMAP_HDMI_SDA_SCL_EXTERNAL_PULLUP = 1 << 0,
+};
+
+enum omap_dss_output_id {
+ OMAP_DSS_OUTPUT_DPI = 1 << 0,
+ OMAP_DSS_OUTPUT_DBI = 1 << 1,
+ OMAP_DSS_OUTPUT_SDI = 1 << 2,
+ OMAP_DSS_OUTPUT_DSI1 = 1 << 3,
+ OMAP_DSS_OUTPUT_DSI2 = 1 << 4,
+ OMAP_DSS_OUTPUT_VENC = 1 << 5,
+ OMAP_DSS_OUTPUT_HDMI = 1 << 6,
+};
+
+/* RFBI */
+
+struct rfbi_timings {
+ int cs_on_time;
+ int cs_off_time;
+ int we_on_time;
+ int we_off_time;
+ int re_on_time;
+ int re_off_time;
+ int we_cycle_time;
+ int re_cycle_time;
+ int cs_pulse_width;
+ int access_time;
+
+ int clk_div;
+
+ u32 tim[5]; /* set by rfbi_convert_timings() */
+
+ int converted;
+};
+
+/* DSI */
+
+enum omap_dss_dsi_trans_mode {
+ /* Sync Pulses: both sync start and end packets sent */
+ OMAP_DSS_DSI_PULSE_MODE,
+ /* Sync Events: only sync start packets sent */
+ OMAP_DSS_DSI_EVENT_MODE,
+ /* Burst: only sync start packets sent, pixels are time compressed */
+ OMAP_DSS_DSI_BURST_MODE,
+};
+
+struct omap_dss_dsi_videomode_timings {
+ unsigned long hsclk;
+
+ unsigned ndl;
+ unsigned bitspp;
+
+ /* pixels */
+ u16 hact;
+ /* lines */
+ u16 vact;
+
+ /* DSI video mode blanking data */
+ /* Unit: byte clock cycles */
+ u16 hss;
+ u16 hsa;
+ u16 hse;
+ u16 hfp;
+ u16 hbp;
+ /* Unit: line clocks */
+ u16 vsa;
+ u16 vfp;
+ u16 vbp;
+
+ /* DSI blanking modes */
+ int blanking_mode;
+ int hsa_blanking_mode;
+ int hbp_blanking_mode;
+ int hfp_blanking_mode;
+
+ enum omap_dss_dsi_trans_mode trans_mode;
+
+ bool ddr_clk_always_on;
+ int window_sync;
+};
+
+struct omap_dss_dsi_config {
+ enum omap_dss_dsi_mode mode;
+ enum omap_dss_dsi_pixel_format pixel_format;
+ const struct omap_video_timings *timings;
+
+ unsigned long hs_clk_min, hs_clk_max;
+ unsigned long lp_clk_min, lp_clk_max;
+
+ bool ddr_clk_always_on;
+ enum omap_dss_dsi_trans_mode trans_mode;
+};
+
+struct omap_video_timings {
+ /* Unit: pixels */
+ u16 x_res;
+ /* Unit: pixels */
+ u16 y_res;
+ /* Unit: Hz */
+ u32 pixelclock;
+ /* Unit: pixel clocks */
+ u16 hsw; /* Horizontal synchronization pulse width */
+ /* Unit: pixel clocks */
+ u16 hfp; /* Horizontal front porch */
+ /* Unit: pixel clocks */
+ u16 hbp; /* Horizontal back porch */
+ /* Unit: line clocks */
+ u16 vsw; /* Vertical synchronization pulse width */
+ /* Unit: line clocks */
+ u16 vfp; /* Vertical front porch */
+ /* Unit: line clocks */
+ u16 vbp; /* Vertical back porch */
+
+ /* Vsync logic level */
+ enum omap_dss_signal_level vsync_level;
+ /* Hsync logic level */
+ enum omap_dss_signal_level hsync_level;
+ /* Interlaced or Progressive timings */
+ bool interlace;
+ /* Pixel clock edge to drive LCD data */
+ enum omap_dss_signal_edge data_pclk_edge;
+ /* Data enable logic level */
+ enum omap_dss_signal_level de_level;
+ /* Pixel clock edges to drive HSYNC and VSYNC signals */
+ enum omap_dss_signal_edge sync_pclk_edge;
+
+ bool double_pixel;
+};
+
+/* Hardcoded timings for tv modes. Venc only uses these to
+ * identify the mode, and does not actually use the configs
+ * itself. However, the configs should be something that
+ * a normal monitor can also show */
+extern const struct omap_video_timings omap_dss_pal_timings;
+extern const struct omap_video_timings omap_dss_ntsc_timings;
+
+struct omap_dss_cpr_coefs {
+ s16 rr, rg, rb;
+ s16 gr, gg, gb;
+ s16 br, bg, bb;
+};
+
+struct omap_overlay_info {
+ dma_addr_t paddr;
+ dma_addr_t p_uv_addr; /* for NV12 format */
+ u16 screen_width;
+ u16 width;
+ u16 height;
+ enum omap_color_mode color_mode;
+ u8 rotation;
+ enum omap_dss_rotation_type rotation_type;
+ bool mirror;
+
+ u16 pos_x;
+ u16 pos_y;
+ u16 out_width; /* if 0, out_width == width */
+ u16 out_height; /* if 0, out_height == height */
+ u8 global_alpha;
+ u8 pre_mult_alpha;
+ u8 zorder;
+};
+
+struct omap_overlay {
+ struct kobject kobj;
+ struct list_head list;
+
+ /* static fields */
+ const char *name;
+ enum omap_plane id;
+ enum omap_color_mode supported_modes;
+ enum omap_overlay_caps caps;
+
+ /* dynamic fields */
+ struct omap_overlay_manager *manager;
+
+ /*
+ * The following functions do not block:
+ *
+ * is_enabled
+ * set_overlay_info
+ * get_overlay_info
+ *
+ * The rest of the functions may block and cannot be called from
+ * interrupt context
+ */
+
+ int (*enable)(struct omap_overlay *ovl);
+ int (*disable)(struct omap_overlay *ovl);
+ bool (*is_enabled)(struct omap_overlay *ovl);
+
+ int (*set_manager)(struct omap_overlay *ovl,
+ struct omap_overlay_manager *mgr);
+ int (*unset_manager)(struct omap_overlay *ovl);
+
+ int (*set_overlay_info)(struct omap_overlay *ovl,
+ struct omap_overlay_info *info);
+ void (*get_overlay_info)(struct omap_overlay *ovl,
+ struct omap_overlay_info *info);
+
+ int (*wait_for_go)(struct omap_overlay *ovl);
+
+ struct omap_dss_device *(*get_device)(struct omap_overlay *ovl);
+};
+
+struct omap_overlay_manager_info {
+ u32 default_color;
+
+ enum omap_dss_trans_key_type trans_key_type;
+ u32 trans_key;
+ bool trans_enabled;
+
+ bool partial_alpha_enabled;
+
+ bool cpr_enable;
+ struct omap_dss_cpr_coefs cpr_coefs;
+};
+
+struct omap_overlay_manager {
+ struct kobject kobj;
+
+ /* static fields */
+ const char *name;
+ enum omap_channel id;
+ enum omap_overlay_manager_caps caps;
+ struct list_head overlays;
+ enum omap_display_type supported_displays;
+ enum omap_dss_output_id supported_outputs;
+
+ /* dynamic fields */
+ struct omap_dss_device *output;
+
+ /*
+ * The following functions do not block:
+ *
+ * set_manager_info
+ * get_manager_info
+ * apply
+ *
+ * The rest of the functions may block and cannot be called from
+ * interrupt context
+ */
+
+ int (*set_output)(struct omap_overlay_manager *mgr,
+ struct omap_dss_device *output);
+ int (*unset_output)(struct omap_overlay_manager *mgr);
+
+ int (*set_manager_info)(struct omap_overlay_manager *mgr,
+ struct omap_overlay_manager_info *info);
+ void (*get_manager_info)(struct omap_overlay_manager *mgr,
+ struct omap_overlay_manager_info *info);
+
+ int (*apply)(struct omap_overlay_manager *mgr);
+ int (*wait_for_go)(struct omap_overlay_manager *mgr);
+ int (*wait_for_vsync)(struct omap_overlay_manager *mgr);
+
+ struct omap_dss_device *(*get_device)(struct omap_overlay_manager *mgr);
+};
+
+/* 22 pins means 1 clk lane and 10 data lanes */
+#define OMAP_DSS_MAX_DSI_PINS 22
+
+struct omap_dsi_pin_config {
+ int num_pins;
+ /*
+ * pin numbers in the following order:
+ * clk+, clk-
+ * data1+, data1-
+ * data2+, data2-
+ * ...
+ */
+ int pins[OMAP_DSS_MAX_DSI_PINS];
+};
+
+struct omap_dss_writeback_info {
+ u32 paddr;
+ u32 p_uv_addr;
+ u16 buf_width;
+ u16 width;
+ u16 height;
+ enum omap_color_mode color_mode;
+ u8 rotation;
+ enum omap_dss_rotation_type rotation_type;
+ bool mirror;
+ u8 pre_mult_alpha;
+};
+
+struct omapdss_dpi_ops {
+ int (*connect)(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
+ void (*disconnect)(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
+
+ int (*enable)(struct omap_dss_device *dssdev);
+ void (*disable)(struct omap_dss_device *dssdev);
+
+ int (*check_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+ void (*set_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+ void (*get_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+
+ void (*set_data_lines)(struct omap_dss_device *dssdev, int data_lines);
+};
+
+struct omapdss_sdi_ops {
+ int (*connect)(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
+ void (*disconnect)(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
+
+ int (*enable)(struct omap_dss_device *dssdev);
+ void (*disable)(struct omap_dss_device *dssdev);
+
+ int (*check_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+ void (*set_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+ void (*get_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+
+ void (*set_datapairs)(struct omap_dss_device *dssdev, int datapairs);
+};
+
+struct omapdss_dvi_ops {
+ int (*connect)(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
+ void (*disconnect)(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
+
+ int (*enable)(struct omap_dss_device *dssdev);
+ void (*disable)(struct omap_dss_device *dssdev);
+
+ int (*check_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+ void (*set_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+ void (*get_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+};
+
+struct omapdss_atv_ops {
+ int (*connect)(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
+ void (*disconnect)(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
+
+ int (*enable)(struct omap_dss_device *dssdev);
+ void (*disable)(struct omap_dss_device *dssdev);
+
+ int (*check_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+ void (*set_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+ void (*get_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+
+ void (*set_type)(struct omap_dss_device *dssdev,
+ enum omap_dss_venc_type type);
+ void (*invert_vid_out_polarity)(struct omap_dss_device *dssdev,
+ bool invert_polarity);
+
+ int (*set_wss)(struct omap_dss_device *dssdev, u32 wss);
+ u32 (*get_wss)(struct omap_dss_device *dssdev);
+};
+
+struct omapdss_hdmi_ops {
+ int (*connect)(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
+ void (*disconnect)(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
+
+ int (*enable)(struct omap_dss_device *dssdev);
+ void (*disable)(struct omap_dss_device *dssdev);
+
+ int (*check_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+ void (*set_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+ void (*get_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+
+ int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
+ bool (*detect)(struct omap_dss_device *dssdev);
+
+ int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode);
+ int (*set_infoframe)(struct omap_dss_device *dssdev,
+ const struct hdmi_avi_infoframe *avi);
+};
+
+struct omapdss_dsi_ops {
+ int (*connect)(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
+ void (*disconnect)(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
+
+ int (*enable)(struct omap_dss_device *dssdev);
+ void (*disable)(struct omap_dss_device *dssdev, bool disconnect_lanes,
+ bool enter_ulps);
+
+ /* bus configuration */
+ int (*set_config)(struct omap_dss_device *dssdev,
+ const struct omap_dss_dsi_config *cfg);
+ int (*configure_pins)(struct omap_dss_device *dssdev,
+ const struct omap_dsi_pin_config *pin_cfg);
+
+ void (*enable_hs)(struct omap_dss_device *dssdev, int channel,
+ bool enable);
+ int (*enable_te)(struct omap_dss_device *dssdev, bool enable);
+
+ int (*update)(struct omap_dss_device *dssdev, int channel,
+ void (*callback)(int, void *), void *data);
+
+ void (*bus_lock)(struct omap_dss_device *dssdev);
+ void (*bus_unlock)(struct omap_dss_device *dssdev);
+
+ int (*enable_video_output)(struct omap_dss_device *dssdev, int channel);
+ void (*disable_video_output)(struct omap_dss_device *dssdev,
+ int channel);
+
+ int (*request_vc)(struct omap_dss_device *dssdev, int *channel);
+ int (*set_vc_id)(struct omap_dss_device *dssdev, int channel,
+ int vc_id);
+ void (*release_vc)(struct omap_dss_device *dssdev, int channel);
+
+ /* data transfer */
+ int (*dcs_write)(struct omap_dss_device *dssdev, int channel,
+ u8 *data, int len);
+ int (*dcs_write_nosync)(struct omap_dss_device *dssdev, int channel,
+ u8 *data, int len);
+ int (*dcs_read)(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
+ u8 *data, int len);
+
+ int (*gen_write)(struct omap_dss_device *dssdev, int channel,
+ u8 *data, int len);
+ int (*gen_write_nosync)(struct omap_dss_device *dssdev, int channel,
+ u8 *data, int len);
+ int (*gen_read)(struct omap_dss_device *dssdev, int channel,
+ u8 *reqdata, int reqlen,
+ u8 *data, int len);
+
+ int (*bta_sync)(struct omap_dss_device *dssdev, int channel);
+
+ int (*set_max_rx_packet_size)(struct omap_dss_device *dssdev,
+ int channel, u16 plen);
+};
+
+struct omap_dss_device {
+ struct kobject kobj;
+ struct device *dev;
+
+ struct module *owner;
+
+ struct list_head panel_list;
+
+ /* alias in the form of "display%d" */
+ char alias[16];
+
+ enum omap_display_type type;
+ enum omap_display_type output_type;
+
+ union {
+ struct {
+ u8 data_lines;
+ } dpi;
+
+ struct {
+ u8 channel;
+ u8 data_lines;
+ } rfbi;
+
+ struct {
+ u8 datapairs;
+ } sdi;
+
+ struct {
+ int module;
+ } dsi;
+
+ struct {
+ enum omap_dss_venc_type type;
+ bool invert_polarity;
+ } venc;
+ } phy;
+
+ struct {
+ struct omap_video_timings timings;
+
+ enum omap_dss_dsi_pixel_format dsi_pix_fmt;
+ enum omap_dss_dsi_mode dsi_mode;
+ } panel;
+
+ struct {
+ u8 pixel_size;
+ struct rfbi_timings rfbi_timings;
+ } ctrl;
+
+ const char *name;
+
+ /* used to match device to driver */
+ const char *driver_name;
+
+ void *data;
+
+ struct omap_dss_driver *driver;
+
+ union {
+ const struct omapdss_dpi_ops *dpi;
+ const struct omapdss_sdi_ops *sdi;
+ const struct omapdss_dvi_ops *dvi;
+ const struct omapdss_hdmi_ops *hdmi;
+ const struct omapdss_atv_ops *atv;
+ const struct omapdss_dsi_ops *dsi;
+ } ops;
+
+ /* helper variable for driver suspend/resume */
+ bool activate_after_resume;
+
+ enum omap_display_caps caps;
+
+ struct omap_dss_device *src;
+
+ enum omap_dss_display_state state;
+
+ /* OMAP DSS output specific fields */
+
+ struct list_head list;
+
+ /* DISPC channel for this output */
+ enum omap_channel dispc_channel;
+ bool dispc_channel_connected;
+
+ /* output instance */
+ enum omap_dss_output_id id;
+
+ /* the port number in the DT node */
+ int port_num;
+
+ /* dynamic fields */
+ struct omap_overlay_manager *manager;
+
+ struct omap_dss_device *dst;
+};
+
+struct omap_dss_driver {
+ int (*probe)(struct omap_dss_device *);
+ void (*remove)(struct omap_dss_device *);
+
+ int (*connect)(struct omap_dss_device *dssdev);
+ void (*disconnect)(struct omap_dss_device *dssdev);
+
+ int (*enable)(struct omap_dss_device *display);
+ void (*disable)(struct omap_dss_device *display);
+ int (*run_test)(struct omap_dss_device *display, int test);
+
+ int (*update)(struct omap_dss_device *dssdev,
+ u16 x, u16 y, u16 w, u16 h);
+ int (*sync)(struct omap_dss_device *dssdev);
+
+ int (*enable_te)(struct omap_dss_device *dssdev, bool enable);
+ int (*get_te)(struct omap_dss_device *dssdev);
+
+ u8 (*get_rotate)(struct omap_dss_device *dssdev);
+ int (*set_rotate)(struct omap_dss_device *dssdev, u8 rotate);
+
+ bool (*get_mirror)(struct omap_dss_device *dssdev);
+ int (*set_mirror)(struct omap_dss_device *dssdev, bool enable);
+
+ int (*memory_read)(struct omap_dss_device *dssdev,
+ void *buf, size_t size,
+ u16 x, u16 y, u16 w, u16 h);
+
+ void (*get_resolution)(struct omap_dss_device *dssdev,
+ u16 *xres, u16 *yres);
+ void (*get_dimensions)(struct omap_dss_device *dssdev,
+ u32 *width, u32 *height);
+ int (*get_recommended_bpp)(struct omap_dss_device *dssdev);
+
+ int (*check_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+ void (*set_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+ void (*get_timings)(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+
+ int (*set_wss)(struct omap_dss_device *dssdev, u32 wss);
+ u32 (*get_wss)(struct omap_dss_device *dssdev);
+
+ int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
+ bool (*detect)(struct omap_dss_device *dssdev);
+
+ int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode);
+ int (*set_hdmi_infoframe)(struct omap_dss_device *dssdev,
+ const struct hdmi_avi_infoframe *avi);
+};
+
+enum omapdss_version omapdss_get_version(void);
+bool omapdss_is_initialized(void);
+
+int omap_dss_register_driver(struct omap_dss_driver *);
+void omap_dss_unregister_driver(struct omap_dss_driver *);
+
+int omapdss_register_display(struct omap_dss_device *dssdev);
+void omapdss_unregister_display(struct omap_dss_device *dssdev);
+
+struct omap_dss_device *omap_dss_get_device(struct omap_dss_device *dssdev);
+void omap_dss_put_device(struct omap_dss_device *dssdev);
+#define for_each_dss_dev(d) while ((d = omap_dss_get_next_device(d)) != NULL)
+struct omap_dss_device *omap_dss_get_next_device(struct omap_dss_device *from);
+struct omap_dss_device *omap_dss_find_device(void *data,
+ int (*match)(struct omap_dss_device *dssdev, void *data));
+const char *omapdss_get_default_display_name(void);
+
+void videomode_to_omap_video_timings(const struct videomode *vm,
+ struct omap_video_timings *ovt);
+void omap_video_timings_to_videomode(const struct omap_video_timings *ovt,
+ struct videomode *vm);
+
+int dss_feat_get_num_mgrs(void);
+int dss_feat_get_num_ovls(void);
+enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane);
+
+
+
+int omap_dss_get_num_overlay_managers(void);
+struct omap_overlay_manager *omap_dss_get_overlay_manager(int num);
+
+int omap_dss_get_num_overlays(void);
+struct omap_overlay *omap_dss_get_overlay(int num);
+
+int omapdss_register_output(struct omap_dss_device *output);
+void omapdss_unregister_output(struct omap_dss_device *output);
+struct omap_dss_device *omap_dss_get_output(enum omap_dss_output_id id);
+struct omap_dss_device *omap_dss_find_output(const char *name);
+struct omap_dss_device *omap_dss_find_output_by_port_node(struct device_node *port);
+int omapdss_output_set_device(struct omap_dss_device *out,
+ struct omap_dss_device *dssdev);
+int omapdss_output_unset_device(struct omap_dss_device *out);
+
+struct omap_dss_device *omapdss_find_output_from_display(struct omap_dss_device *dssdev);
+struct omap_overlay_manager *omapdss_find_mgr_from_display(struct omap_dss_device *dssdev);
+
+void omapdss_default_get_resolution(struct omap_dss_device *dssdev,
+ u16 *xres, u16 *yres);
+int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev);
+void omapdss_default_get_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings);
+
+typedef void (*omap_dispc_isr_t) (void *arg, u32 mask);
+int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
+int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
+
+int omapdss_compat_init(void);
+void omapdss_compat_uninit(void);
+
+static inline bool omapdss_device_is_connected(struct omap_dss_device *dssdev)
+{
+ return dssdev->src;
+}
+
+static inline bool omapdss_device_is_enabled(struct omap_dss_device *dssdev)
+{
+ return dssdev->state == OMAP_DSS_DISPLAY_ACTIVE;
+}
+
+struct device_node *
+omapdss_of_get_next_port(const struct device_node *parent,
+ struct device_node *prev);
+
+struct device_node *
+omapdss_of_get_next_endpoint(const struct device_node *parent,
+ struct device_node *prev);
+
+struct device_node *
+omapdss_of_get_first_endpoint(const struct device_node *parent);
+
+struct omap_dss_device *
+omapdss_of_find_source_for_first_ep(struct device_node *node);
u32 dispc_read_irqstatus(void);
void dispc_clear_irqstatus(u32 mask);
@@ -44,6 +909,10 @@ void dispc_mgr_set_timings(enum omap_channel channel,
const struct omap_video_timings *timings);
void dispc_mgr_setup(enum omap_channel channel,
const struct omap_overlay_manager_info *info);
+u32 dispc_mgr_gamma_size(enum omap_channel channel);
+void dispc_mgr_set_gamma(enum omap_channel channel,
+ const struct drm_color_lut *lut,
+ unsigned int length);
int dispc_ovl_enable(enum omap_plane plane, bool enable);
bool dispc_ovl_enabled(enum omap_plane plane);
diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c
index 829232ad8..24f859488 100644
--- a/drivers/gpu/drm/omapdrm/dss/output.c
+++ b/drivers/gpu/drm/omapdrm/dss/output.c
@@ -21,8 +21,7 @@
#include <linux/slab.h>
#include <linux/of.h>
-#include <video/omapdss.h>
-
+#include "omapdss.h"
#include "dss.h"
static LIST_HEAD(output_list);
diff --git a/drivers/gpu/drm/omapdrm/dss/pll.c b/drivers/gpu/drm/omapdrm/dss/pll.c
index f974ddcd3..0a76c89cd 100644
--- a/drivers/gpu/drm/omapdrm/dss/pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/pll.c
@@ -22,8 +22,7 @@
#include <linux/regulator/consumer.h>
#include <linux/sched.h>
-#include <video/omapdss.h>
-
+#include "omapdss.h"
#include "dss.h"
#define PLL_CONTROL 0x0000
@@ -76,6 +75,59 @@ struct dss_pll *dss_pll_find(const char *name)
return NULL;
}
+struct dss_pll *dss_pll_find_by_src(enum dss_clk_source src)
+{
+ struct dss_pll *pll;
+
+ switch (src) {
+ default:
+ case DSS_CLK_SRC_FCK:
+ return NULL;
+
+ case DSS_CLK_SRC_HDMI_PLL:
+ return dss_pll_find("hdmi");
+
+ case DSS_CLK_SRC_PLL1_1:
+ case DSS_CLK_SRC_PLL1_2:
+ case DSS_CLK_SRC_PLL1_3:
+ pll = dss_pll_find("dsi0");
+ if (!pll)
+ pll = dss_pll_find("video0");
+ return pll;
+
+ case DSS_CLK_SRC_PLL2_1:
+ case DSS_CLK_SRC_PLL2_2:
+ case DSS_CLK_SRC_PLL2_3:
+ pll = dss_pll_find("dsi1");
+ if (!pll)
+ pll = dss_pll_find("video1");
+ return pll;
+ }
+}
+
+unsigned dss_pll_get_clkout_idx_for_src(enum dss_clk_source src)
+{
+ switch (src) {
+ case DSS_CLK_SRC_HDMI_PLL:
+ return 0;
+
+ case DSS_CLK_SRC_PLL1_1:
+ case DSS_CLK_SRC_PLL2_1:
+ return 0;
+
+ case DSS_CLK_SRC_PLL1_2:
+ case DSS_CLK_SRC_PLL2_2:
+ return 1;
+
+ case DSS_CLK_SRC_PLL1_3:
+ case DSS_CLK_SRC_PLL2_3:
+ return 2;
+
+ default:
+ return 0;
+ }
+}
+
int dss_pll_enable(struct dss_pll *pll)
{
int r;
@@ -129,7 +181,7 @@ int dss_pll_set_config(struct dss_pll *pll, const struct dss_pll_clock_info *cin
return 0;
}
-bool dss_pll_hsdiv_calc(const struct dss_pll *pll, unsigned long clkdco,
+bool dss_pll_hsdiv_calc_a(const struct dss_pll *pll, unsigned long clkdco,
unsigned long out_min, unsigned long out_max,
dss_hsdiv_calc_func func, void *data)
{
@@ -154,7 +206,11 @@ bool dss_pll_hsdiv_calc(const struct dss_pll *pll, unsigned long clkdco,
return false;
}
-bool dss_pll_calc(const struct dss_pll *pll, unsigned long clkin,
+/*
+ * clkdco = clkin / n * m * 2
+ * clkoutX = clkdco / mX
+ */
+bool dss_pll_calc_a(const struct dss_pll *pll, unsigned long clkin,
unsigned long pll_min, unsigned long pll_max,
dss_pll_calc_func func, void *data)
{
@@ -195,6 +251,71 @@ bool dss_pll_calc(const struct dss_pll *pll, unsigned long clkin,
return false;
}
+/*
+ * This calculates a PLL config that will provide the target_clkout rate
+ * for clkout. Additionally clkdco rate will be the same as clkout rate
+ * when clkout rate is >= min_clkdco.
+ *
+ * clkdco = clkin / n * m + clkin / n * mf / 262144
+ * clkout = clkdco / m2
+ */
+bool dss_pll_calc_b(const struct dss_pll *pll, unsigned long clkin,
+ unsigned long target_clkout, struct dss_pll_clock_info *cinfo)
+{
+ unsigned long fint, clkdco, clkout;
+ unsigned long target_clkdco;
+ unsigned long min_dco;
+ unsigned n, m, mf, m2, sd;
+ const struct dss_pll_hw *hw = pll->hw;
+
+ DSSDBG("clkin %lu, target clkout %lu\n", clkin, target_clkout);
+
+ /* Fint */
+ n = DIV_ROUND_UP(clkin, hw->fint_max);
+ fint = clkin / n;
+
+ /* adjust m2 so that the clkdco will be high enough */
+ min_dco = roundup(hw->clkdco_min, fint);
+ m2 = DIV_ROUND_UP(min_dco, target_clkout);
+ if (m2 == 0)
+ m2 = 1;
+
+ target_clkdco = target_clkout * m2;
+ m = target_clkdco / fint;
+
+ clkdco = fint * m;
+
+ /* adjust clkdco with fractional mf */
+ if (WARN_ON(target_clkdco - clkdco > fint))
+ mf = 0;
+ else
+ mf = (u32)div_u64(262144ull * (target_clkdco - clkdco), fint);
+
+ if (mf > 0)
+ clkdco += (u32)div_u64((u64)mf * fint, 262144);
+
+ clkout = clkdco / m2;
+
+ /* sigma-delta */
+ sd = DIV_ROUND_UP(fint * m, 250000000);
+
+ DSSDBG("N = %u, M = %u, M.f = %u, M2 = %u, SD = %u\n",
+ n, m, mf, m2, sd);
+ DSSDBG("Fint %lu, clkdco %lu, clkout %lu\n", fint, clkdco, clkout);
+
+ cinfo->n = n;
+ cinfo->m = m;
+ cinfo->mf = mf;
+ cinfo->mX[0] = m2;
+ cinfo->sd = sd;
+
+ cinfo->fint = fint;
+ cinfo->clkdco = clkdco;
+ cinfo->clkout[0] = clkout;
+
+ return true;
+}
+
static int wait_for_bit_change(void __iomem *reg, int bitnum, int value)
{
unsigned long timeout;
diff --git a/drivers/gpu/drm/omapdrm/dss/rfbi.c b/drivers/gpu/drm/omapdrm/dss/rfbi.c
index 3796576df..cd53566d7 100644
--- a/drivers/gpu/drm/omapdrm/dss/rfbi.c
+++ b/drivers/gpu/drm/omapdrm/dss/rfbi.c
@@ -38,7 +38,7 @@
#include <linux/pm_runtime.h>
#include <linux/component.h>
-#include <video/omapdss.h>
+#include "omapdss.h"
#include "dss.h"
struct rfbi_reg { u16 idx; };
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index cd6d3bfb0..0a96c321c 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -29,7 +29,7 @@
#include <linux/of.h>
#include <linux/component.h>
-#include <video/omapdss.h>
+#include "omapdss.h"
#include "dss.h"
static struct {
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index 08a2cc778..6eedf2118 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -37,8 +37,7 @@
#include <linux/of.h>
#include <linux/component.h>
-#include <video/omapdss.h>
-
+#include "omapdss.h"
#include "dss.h"
#include "dss_features.h"
diff --git a/drivers/gpu/drm/omapdrm/dss/video-pll.c b/drivers/gpu/drm/omapdrm/dss/video-pll.c
index b1ec59e42..7429de928 100644
--- a/drivers/gpu/drm/omapdrm/dss/video-pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/video-pll.c
@@ -17,8 +17,7 @@
#include <linux/platform_device.h>
#include <linux/sched.h>
-#include <video/omapdss.h>
-
+#include "omapdss.h"
#include "dss.h"
#include "dss_features.h"
@@ -108,6 +107,8 @@ static const struct dss_pll_ops dss_pll_ops = {
};
static const struct dss_pll_hw dss_dra7_video_pll_hw = {
+ .type = DSS_PLL_TYPE_A,
+
.n_max = (1 << 8) - 1,
.m_max = (1 << 12) - 1,
.mX_max = (1 << 5) - 1,
@@ -124,6 +125,10 @@ static const struct dss_pll_hw dss_dra7_video_pll_hw = {
.mX_lsb[0] = 21,
.mX_msb[1] = 30,
.mX_lsb[1] = 26,
+ .mX_msb[2] = 4,
+ .mX_lsb[2] = 0,
+ .mX_msb[3] = 9,
+ .mX_lsb[3] = 5,
.has_refsel = true,
};
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index ce2d67b6a..137fe690a 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -32,7 +32,6 @@
struct omap_connector {
struct drm_connector base;
struct omap_dss_device *dssdev;
- struct drm_encoder *encoder;
bool hdmi_mode;
};
@@ -256,13 +255,6 @@ static int omap_connector_mode_valid(struct drm_connector *connector,
return ret;
}
-struct drm_encoder *omap_connector_attached_encoder(
- struct drm_connector *connector)
-{
- struct omap_connector *omap_connector = to_omap_connector(connector);
- return omap_connector->encoder;
-}
-
static const struct drm_connector_funcs omap_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.reset = drm_atomic_helper_connector_reset,
@@ -276,7 +268,6 @@ static const struct drm_connector_funcs omap_connector_funcs = {
static const struct drm_connector_helper_funcs omap_connector_helper_funcs = {
.get_modes = omap_connector_get_modes,
.mode_valid = omap_connector_mode_valid,
- .best_encoder = omap_connector_attached_encoder,
};
/* initialize connector */
@@ -296,7 +287,6 @@ struct drm_connector *omap_connector_init(struct drm_device *dev,
goto fail;
omap_connector->dssdev = dssdev;
- omap_connector->encoder = encoder;
connector = &omap_connector->base;
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 075f2bb44..180f644e8 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -372,6 +372,20 @@ static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc)
copy_timings_drm_to_omap(&omap_crtc->timings, mode);
}
+static int omap_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ if (state->color_mgmt_changed && state->gamma_lut) {
+ uint length = state->gamma_lut->length /
+ sizeof(struct drm_color_lut);
+
+ if (length < 2)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static void omap_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
@@ -384,6 +398,32 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
WARN_ON(omap_crtc->vblank_irq.registered);
+ if (crtc->state->color_mgmt_changed) {
+ struct drm_color_lut *lut = NULL;
+ uint length = 0;
+
+ if (crtc->state->gamma_lut) {
+ lut = (struct drm_color_lut *)
+ crtc->state->gamma_lut->data;
+ length = crtc->state->gamma_lut->length /
+ sizeof(*lut);
+ }
+ dispc_mgr_set_gamma(omap_crtc->channel, lut, length);
+ }
+
+ if (crtc->state->color_mgmt_changed) {
+ struct drm_color_lut *lut = NULL;
+ uint length = 0;
+
+ if (crtc->state->gamma_lut) {
+ lut = (struct drm_color_lut *)
+ crtc->state->gamma_lut->data;
+ length = crtc->state->gamma_lut->length /
+ sizeof(*lut);
+ }
+ dispc_mgr_set_gamma(omap_crtc->channel, lut, length);
+ }
+
if (dispc_mgr_is_enabled(omap_crtc->channel)) {
DBG("%s: GO", omap_crtc->name);
@@ -460,6 +500,7 @@ static const struct drm_crtc_funcs omap_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = omap_crtc_destroy,
.page_flip = drm_atomic_helper_page_flip,
+ .gamma_set = drm_atomic_helper_legacy_gamma_set,
.set_property = drm_atomic_helper_crtc_set_property,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
@@ -471,6 +512,7 @@ static const struct drm_crtc_helper_funcs omap_crtc_helper_funcs = {
.mode_set_nofb = omap_crtc_mode_set_nofb,
.disable = omap_crtc_disable,
.enable = omap_crtc_enable,
+ .atomic_check = omap_crtc_atomic_check,
.atomic_begin = omap_crtc_atomic_begin,
.atomic_flush = omap_crtc_atomic_flush,
};
@@ -534,6 +576,20 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
drm_crtc_helper_add(crtc, &omap_crtc_helper_funcs);
+ /* The dispc API adapts to what ever size, but the HW supports
+ * 256 element gamma table for LCDs and 1024 element table for
+ * OMAP_DSS_CHANNEL_DIGIT. X server assumes 256 element gamma
+ * tables so lets use that. Size of HW gamma table can be
+ * extracted with dispc_mgr_gamma_size(). If it returns 0
+ * gamma table is not supprted.
+ */
+ if (dispc_mgr_gamma_size(channel)) {
+ uint gamma_lut_size = 256;
+
+ drm_crtc_enable_color_mgmt(crtc, 0, false, gamma_lut_size);
+ drm_mode_crtc_set_gamma_size(crtc, gamma_lut_size);
+ }
+
omap_plane_install_properties(crtc->primary, &crtc->base);
omap_crtcs[channel] = omap_crtc;
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index d86f54793..26c6134eb 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -142,8 +142,9 @@ static int omap_atomic_commit(struct drm_device *dev,
{
struct omap_drm_private *priv = dev->dev_private;
struct omap_atomic_state_commit *commit;
- unsigned int i;
- int ret;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ int i, ret;
ret = drm_atomic_helper_prepare_planes(dev, state);
if (ret)
@@ -163,10 +164,8 @@ static int omap_atomic_commit(struct drm_device *dev,
/* Wait until all affected CRTCs have completed previous commits and
* mark them as pending.
*/
- for (i = 0; i < dev->mode_config.num_crtc; ++i) {
- if (state->crtcs[i])
- commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]);
- }
+ for_each_crtc_in_state(state, crtc, crtc_state, i)
+ commit->crtcs |= drm_crtc_mask(crtc);
wait_event(priv->commit.wait, !omap_atomic_is_pending(priv, commit));
@@ -175,7 +174,7 @@ static int omap_atomic_commit(struct drm_device *dev,
spin_unlock(&priv->commit.lock);
/* Swap the state, this is the point of no return. */
- drm_atomic_helper_swap_state(dev, state);
+ drm_atomic_helper_swap_state(state, true);
if (nonblock)
schedule_work(&commit->work);
@@ -203,6 +202,8 @@ static int get_connector_type(struct omap_dss_device *dssdev)
return DRM_MODE_CONNECTOR_HDMIA;
case OMAP_DISPLAY_TYPE_DVI:
return DRM_MODE_CONNECTOR_DVID;
+ case OMAP_DISPLAY_TYPE_DSI:
+ return DRM_MODE_CONNECTOR_DSI;
default:
return DRM_MODE_CONNECTOR_Unknown;
}
@@ -800,7 +801,6 @@ static struct drm_driver omap_drm_driver = {
.unload = dev_unload,
.open = dev_open,
.lastclose = dev_lastclose,
- .set_busid = drm_platform_set_busid,
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = omap_irq_enable_vblank,
.disable_vblank = omap_irq_disable_vblank,
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 3f823c368..dcc30a98b 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -24,7 +24,6 @@
#include <linux/platform_data/omap_drm.h>
#include <linux/types.h>
#include <linux/wait.h>
-#include <video/omapdss.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
@@ -183,7 +182,6 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd);
struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
-struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p);
int omap_framebuffer_pin(struct drm_framebuffer *fb);
void omap_framebuffer_unpin(struct drm_framebuffer *fb);
void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
@@ -231,7 +229,6 @@ int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient,
int x, int y, dma_addr_t *paddr);
uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj);
size_t omap_gem_mmap_size(struct drm_gem_object *obj);
-int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h);
int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient);
struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
@@ -239,17 +236,6 @@ struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
struct dma_buf *buffer);
-static inline int align_pitch(int pitch, int width, int bpp)
-{
- int bytespp = (bpp + 7) / 8;
- /* in case someone tries to feed us a completely bogus stride: */
- pitch = max(pitch, width * bytespp);
- /* PVR needs alignment to 8 pixels.. right now that is the most
- * restrictive stride requirement..
- */
- return roundup(pitch, 8 * bytespp);
-}
-
/* map crtc to vblank mask */
uint32_t pipe2vbl(struct drm_crtc *crtc);
struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index f84570d16..31f5178c2 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -115,24 +115,16 @@ static void omap_framebuffer_destroy(struct drm_framebuffer *fb)
for (i = 0; i < n; i++) {
struct plane *plane = &omap_fb->planes[i];
- if (plane->bo)
- drm_gem_object_unreference_unlocked(plane->bo);
+
+ drm_gem_object_unreference_unlocked(plane->bo);
}
kfree(omap_fb);
}
-static int omap_framebuffer_dirty(struct drm_framebuffer *fb,
- struct drm_file *file_priv, unsigned flags, unsigned color,
- struct drm_clip_rect *clips, unsigned num_clips)
-{
- return 0;
-}
-
static const struct drm_framebuffer_funcs omap_framebuffer_funcs = {
.create_handle = omap_framebuffer_create_handle,
.destroy = omap_framebuffer_destroy,
- .dirty = omap_framebuffer_dirty,
};
static uint32_t get_linear_addr(struct plane *plane,
@@ -320,14 +312,6 @@ void omap_framebuffer_unpin(struct drm_framebuffer *fb)
mutex_unlock(&omap_fb->lock);
}
-struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p)
-{
- struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
- if (p >= drm_format_num_planes(fb->pixel_format))
- return NULL;
- return omap_fb->planes[p].bo;
-}
-
/* iterate thru all the connectors, returning ones that are attached
* to the same fb..
*/
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 89da41ac6..adb10fbe9 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -125,9 +125,8 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
- mode_cmd.pitches[0] = align_pitch(
- mode_cmd.width * ((sizes->surface_bpp + 7) / 8),
- mode_cmd.width, sizes->surface_bpp);
+ mode_cmd.pitches[0] =
+ DIV_ROUND_UP(mode_cmd.width * sizes->surface_bpp, 8);
fbdev->ywrap_enabled = priv->has_dmm && ywrap_enabled;
if (fbdev->ywrap_enabled) {
@@ -280,9 +279,6 @@ struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
if (ret)
goto fini;
- /* disable all the possible outputs/crtcs before entering KMS mode */
- drm_helper_disable_unused_functions(dev);
-
ret = drm_fb_helper_initial_config(helper, 32);
if (ret)
goto fini;
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 03698b6c8..505dee0db 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -383,18 +383,6 @@ size_t omap_gem_mmap_size(struct drm_gem_object *obj)
return size;
}
-/* get tiled size, returns -EINVAL if not tiled buffer */
-int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h)
-{
- struct omap_gem_object *omap_obj = to_omap_bo(obj);
- if (omap_obj->flags & OMAP_BO_TILED) {
- *w = omap_obj->width;
- *h = omap_obj->height;
- return 0;
- }
- return -EINVAL;
-}
-
/* -----------------------------------------------------------------------------
* Fault Handling
*/
@@ -661,7 +649,8 @@ int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
{
union omap_gem_size gsize;
- args->pitch = align_pitch(0, args->width, args->bpp);
+ args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+
args->size = PAGE_ALIGN(args->pitch * args->height);
gsize = (union omap_gem_size){
@@ -1407,7 +1396,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
if (ret)
goto err_free;
- mapping = file_inode(obj->filp)->i_mapping;
+ mapping = obj->filp->f_mapping;
mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
}
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 3a7bdf1c8..85143d1b9 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -168,6 +168,7 @@ static int panel_simple_disable(struct drm_panel *panel)
if (p->backlight) {
p->backlight->props.power = FB_BLANK_POWERDOWN;
+ p->backlight->props.state |= BL_CORE_FBBLANK;
backlight_update_status(p->backlight);
}
@@ -235,6 +236,7 @@ static int panel_simple_enable(struct drm_panel *panel)
msleep(p->desc->delay.enable);
if (p->backlight) {
+ p->backlight->props.state &= ~BL_CORE_FBBLANK;
p->backlight->props.power = FB_BLANK_UNBLANK;
backlight_update_status(p->backlight);
}
@@ -964,8 +966,8 @@ static const struct panel_desc innolux_zj070na_01p = {
.num_modes = 1,
.bpc = 6,
.size = {
- .width = 1024,
- .height = 600,
+ .width = 154,
+ .height = 90,
},
};
@@ -1017,6 +1019,51 @@ static const struct panel_desc lg_lb070wv8 = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
};
+static const struct drm_display_mode lg_lp079qx1_sp0v_mode = {
+ .clock = 200000,
+ .hdisplay = 1536,
+ .hsync_start = 1536 + 12,
+ .hsync_end = 1536 + 12 + 16,
+ .htotal = 1536 + 12 + 16 + 48,
+ .vdisplay = 2048,
+ .vsync_start = 2048 + 8,
+ .vsync_end = 2048 + 8 + 4,
+ .vtotal = 2048 + 8 + 4 + 8,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc lg_lp079qx1_sp0v = {
+ .modes = &lg_lp079qx1_sp0v_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 129,
+ .height = 171,
+ },
+};
+
+static const struct drm_display_mode lg_lp097qx1_spa1_mode = {
+ .clock = 205210,
+ .hdisplay = 2048,
+ .hsync_start = 2048 + 150,
+ .hsync_end = 2048 + 150 + 5,
+ .htotal = 2048 + 150 + 5 + 5,
+ .vdisplay = 1536,
+ .vsync_start = 1536 + 3,
+ .vsync_end = 1536 + 3 + 1,
+ .vtotal = 1536 + 3 + 1 + 9,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc lg_lp097qx1_spa1 = {
+ .modes = &lg_lp097qx1_spa1_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 208,
+ .height = 147,
+ },
+};
+
static const struct drm_display_mode lg_lp120up1_mode = {
.clock = 162300,
.hdisplay = 1920,
@@ -1224,6 +1271,28 @@ static const struct panel_desc qd43003c0_40 = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
+static const struct drm_display_mode samsung_lsn122dl01_c01_mode = {
+ .clock = 271560,
+ .hdisplay = 2560,
+ .hsync_start = 2560 + 48,
+ .hsync_end = 2560 + 48 + 32,
+ .htotal = 2560 + 48 + 32 + 80,
+ .vdisplay = 1600,
+ .vsync_start = 1600 + 2,
+ .vsync_end = 1600 + 2 + 5,
+ .vtotal = 1600 + 2 + 5 + 57,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc samsung_lsn122dl01_c01 = {
+ .modes = &samsung_lsn122dl01_c01_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 263,
+ .height = 164,
+ },
+};
+
static const struct drm_display_mode samsung_ltn101nt05_mode = {
.clock = 54030,
.hdisplay = 1024,
@@ -1242,8 +1311,8 @@ static const struct panel_desc samsung_ltn101nt05 = {
.num_modes = 1,
.bpc = 6,
.size = {
- .width = 1024,
- .height = 600,
+ .width = 223,
+ .height = 125,
},
};
@@ -1270,6 +1339,53 @@ static const struct panel_desc samsung_ltn140at29_301 = {
},
};
+static const struct display_timing sharp_lq101k1ly04_timing = {
+ .pixelclock = { 60000000, 65000000, 80000000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 20, 20, 20 },
+ .hback_porch = { 20, 20, 20 },
+ .hsync_len = { 10, 10, 10 },
+ .vactive = { 800, 800, 800 },
+ .vfront_porch = { 4, 4, 4 },
+ .vback_porch = { 4, 4, 4 },
+ .vsync_len = { 4, 4, 4 },
+ .flags = DISPLAY_FLAGS_PIXDATA_POSEDGE,
+};
+
+static const struct panel_desc sharp_lq101k1ly04 = {
+ .timings = &sharp_lq101k1ly04_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 217,
+ .height = 136,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
+};
+
+static const struct drm_display_mode sharp_lq123p1jx31_mode = {
+ .clock = 252750,
+ .hdisplay = 2400,
+ .hsync_start = 2400 + 48,
+ .hsync_end = 2400 + 48 + 32,
+ .htotal = 2400 + 48 + 32 + 80,
+ .vdisplay = 1600,
+ .vsync_start = 1600 + 3,
+ .vsync_end = 1600 + 3 + 10,
+ .vtotal = 1600 + 3 + 10 + 33,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc sharp_lq123p1jx31 = {
+ .modes = &sharp_lq123p1jx31_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 259,
+ .height = 173,
+ },
+};
+
static const struct drm_display_mode shelly_sca07010_bfn_lnn_mode = {
.clock = 33300,
.hdisplay = 800,
@@ -1293,6 +1409,29 @@ static const struct panel_desc shelly_sca07010_bfn_lnn = {
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
+static const struct drm_display_mode starry_kr122ea0sra_mode = {
+ .clock = 147000,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 16,
+ .hsync_end = 1920 + 16 + 16,
+ .htotal = 1920 + 16 + 16 + 32,
+ .vdisplay = 1200,
+ .vsync_start = 1200 + 15,
+ .vsync_end = 1200 + 15 + 2,
+ .vtotal = 1200 + 15 + 2 + 18,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc starry_kr122ea0sra = {
+ .modes = &starry_kr122ea0sra_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 263,
+ .height = 164,
+ },
+};
+
static const struct drm_display_mode tpk_f07a_0102_mode = {
.clock = 33260,
.hdisplay = 800,
@@ -1457,6 +1596,12 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "lg,lb070wv8",
.data = &lg_lb070wv8,
}, {
+ .compatible = "lg,lp079qx1-sp0v",
+ .data = &lg_lp079qx1_sp0v,
+ }, {
+ .compatible = "lg,lp097qx1-spa1",
+ .data = &lg_lp097qx1_spa1,
+ }, {
.compatible = "lg,lp120up1",
.data = &lg_lp120up1,
}, {
@@ -1481,15 +1626,27 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "qiaodian,qd43003c0-40",
.data = &qd43003c0_40,
}, {
+ .compatible = "samsung,lsn122dl01-c01",
+ .data = &samsung_lsn122dl01_c01,
+ }, {
.compatible = "samsung,ltn101nt05",
.data = &samsung_ltn101nt05,
}, {
.compatible = "samsung,ltn140at29-301",
.data = &samsung_ltn140at29_301,
}, {
+ .compatible = "sharp,lq101k1ly04",
+ .data = &sharp_lq101k1ly04,
+ }, {
+ .compatible = "sharp,lq123p1jx31",
+ .data = &sharp_lq123p1jx31,
+ }, {
.compatible = "shelly,sca07010-bfn-lnn",
.data = &shelly_sca07010_bfn_lnn,
}, {
+ .compatible = "starry,kr122ea0sra",
+ .data = &starry_kr122ea0sra,
+ }, {
.compatible = "tpk,f07a-0102",
.data = &tpk_f07a_0102,
}, {
@@ -1701,7 +1858,6 @@ static const struct panel_desc_dsi panasonic_vvx10f004b00 = {
.lanes = 4,
};
-
static const struct of_device_id dsi_of_match[] = {
{
.compatible = "auo,b080uan01",
diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig
index 38c2bb72e..da45b11b6 100644
--- a/drivers/gpu/drm/qxl/Kconfig
+++ b/drivers/gpu/drm/qxl/Kconfig
@@ -1,12 +1,7 @@
config DRM_QXL
tristate "QXL virtual GPU"
depends on DRM && PCI
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
- select FB_DEFERRED_IO
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_TTM
select CRC32
help
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index b5d4b4136..04270f5d1 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -203,7 +203,7 @@ qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *releas
bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush)
{
if (!qxl_check_idle(qdev->release_ring)) {
- queue_work(qdev->gc_queue, &qdev->gc_work);
+ schedule_work(&qdev->gc_work);
if (flush)
flush_work(&qdev->gc_work);
return true;
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 8b5d54385..3aef12742 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -221,7 +221,6 @@ static int qxl_crtc_page_flip(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct qxl_device *qdev = dev->dev_private;
- struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
struct qxl_framebuffer *qfb_src = to_qxl_framebuffer(fb);
struct qxl_framebuffer *qfb_old = to_qxl_framebuffer(crtc->primary->fb);
struct qxl_bo *bo_old = gem_to_qxl_bo(qfb_old->obj);
@@ -252,14 +251,14 @@ static int qxl_crtc_page_flip(struct drm_crtc *crtc,
qxl_draw_dirty_fb(qdev, qfb_src, bo, 0, 0,
&norect, one_clip_rect, inc);
- drm_vblank_get(dev, qcrtc->index);
+ drm_crtc_vblank_get(crtc);
if (event) {
spin_lock_irqsave(&dev->event_lock, flags);
- drm_send_vblank_event(dev, qcrtc->index, event);
+ drm_crtc_send_vblank_event(crtc, event);
spin_unlock_irqrestore(&dev->event_lock, flags);
}
- drm_vblank_put(dev, qcrtc->index);
+ drm_crtc_vblank_put(crtc);
ret = qxl_bo_reserve(bo, false);
if (!ret) {
@@ -469,8 +468,7 @@ void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb);
- if (qxl_fb->obj)
- drm_gem_object_unreference_unlocked(qxl_fb->obj);
+ drm_gem_object_unreference_unlocked(qxl_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(qxl_fb);
}
@@ -730,7 +728,6 @@ static int qdev_crtc_init(struct drm_device *dev, int crtc_id)
drm_crtc_init(dev, &qxl_crtc->base, &qxl_crtc_funcs);
qxl_crtc->index = crtc_id;
- drm_mode_crtc_set_gamma_size(&qxl_crtc->base, 256);
drm_crtc_helper_add(&qxl_crtc->base, &qxl_crtc_helper_funcs);
return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
index 6e6c76080..ffe885395 100644
--- a/drivers/gpu/drm/qxl/qxl_draw.c
+++ b/drivers/gpu/drm/qxl/qxl_draw.c
@@ -37,7 +37,6 @@ static int alloc_clips(struct qxl_device *qdev,
* the qxl_clip_rects. This is *not* the same as the memory allocated
* on the device, it is offset to qxl_clip_rects.chunk.data */
static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev,
- struct qxl_drawable *drawable,
unsigned num_clips,
struct qxl_bo *clips_bo)
{
@@ -351,7 +350,7 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
if (ret)
goto out_release_backoff;
- rects = drawable_set_clipping(qdev, drawable, num_clips, clips_bo);
+ rects = drawable_set_clipping(qdev, num_clips, clips_bo);
if (!rects)
goto out_release_backoff;
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index dc9df5fe5..460bbceae 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -256,7 +256,7 @@ static struct drm_driver qxl_driver = {
.gem_prime_vmap = qxl_gem_prime_vmap,
.gem_prime_vunmap = qxl_gem_prime_vunmap,
.gem_prime_mmap = qxl_gem_prime_mmap,
- .gem_free_object = qxl_gem_object_free,
+ .gem_free_object_unlocked = qxl_gem_object_free,
.gem_open_object = qxl_gem_object_open,
.gem_close_object = qxl_gem_object_close,
.fops = &qxl_fops,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 3ad6604b3..8e633caa4 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -321,7 +321,6 @@ struct qxl_device {
struct qxl_bo *current_release_bo[3];
int current_release_bo_offset[3];
- struct workqueue_struct *gc_queue;
struct work_struct gc_work;
struct drm_property *hotplug_mode_update_property;
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 5ea57f632..28c142304 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -73,10 +73,12 @@ static void qxl_fb_image_init(struct qxl_fb_image *qxl_fb_image,
}
}
+#ifdef CONFIG_DRM_FBDEV_EMULATION
static struct fb_deferred_io qxl_defio = {
.delay = QXL_DIRTY_DELAY,
.deferred_io = drm_fb_helper_deferred_io,
};
+#endif
static struct fb_ops qxlfb_ops = {
.owner = THIS_MODULE,
@@ -131,10 +133,6 @@ static int qxlfb_create_pinned_object(struct qxl_fbdev *qfbdev,
int ret;
int aligned_size, size;
int height = mode_cmd->height;
- int bpp;
- int depth;
-
- drm_fb_get_bpp_depth(mode_cmd->pixel_format, &bpp, &depth);
size = mode_cmd->pitches[0] * height;
aligned_size = ALIGN(size, PAGE_SIZE);
@@ -317,8 +315,10 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
goto out_destroy_fbi;
}
+#ifdef CONFIG_DRM_FBDEV_EMULATION
info->fbdefio = &qxl_defio;
fb_deferred_io_init(info);
+#endif
qdev->fbdev_info = info;
qdev->fbdev_qfb = &qfbdev->qfb;
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index 2319800b7..e64224272 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -258,7 +258,6 @@ static int qxl_device_init(struct qxl_device *qdev,
(unsigned long)qdev->surfaceram_size);
- qdev->gc_queue = create_singlethread_workqueue("qxl_gc");
INIT_WORK(&qdev->gc_work, qxl_gc_work);
return 0;
@@ -270,10 +269,7 @@ static void qxl_device_fini(struct qxl_device *qdev)
qxl_bo_unref(&qdev->current_release_bo[0]);
if (qdev->current_release_bo[1])
qxl_bo_unref(&qdev->current_release_bo[1]);
- flush_workqueue(qdev->gc_queue);
- destroy_workqueue(qdev->gc_queue);
- qdev->gc_queue = NULL;
-
+ flush_work(&qdev->gc_work);
qxl_ring_free(qdev->command_ring);
qxl_ring_free(qdev->cursor_ring);
qxl_ring_free(qdev->release_ring);
@@ -310,10 +306,6 @@ int qxl_driver_load(struct drm_device *dev, unsigned long flags)
struct qxl_device *qdev;
int r;
- /* require kms */
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
qdev = kzalloc(sizeof(struct qxl_device), GFP_KERNEL);
if (qdev == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 4efa8e261..f599cd073 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -96,7 +96,7 @@ retry:
return 0;
if (have_drawable_releases && sc > 300) {
- FENCE_WARN(fence, "failed to wait on release %d "
+ FENCE_WARN(fence, "failed to wait on release %llu "
"after spincount %d\n",
fence->context & ~0xf0000000, sc);
goto signaled;
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 0738d74c8..d50c9679e 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -350,11 +350,19 @@ static int qxl_bo_move(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem)
{
struct ttm_mem_reg *old_mem = &bo->mem;
+ int ret;
+
+ ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
+ if (ret)
+ return ret;
+
+
if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
qxl_move_null(bo, new_mem);
return 0;
}
- return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+ return ttm_bo_move_memcpy(bo, evict, interruptible,
+ no_wait_gpu, new_mem);
}
static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 17e34546a..1dcf39084 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -276,14 +276,14 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
atombios_blank_crtc(crtc, ATOM_DISABLE);
if (dev->num_crtcs > radeon_crtc->crtc_id)
- drm_vblank_on(dev, radeon_crtc->crtc_id);
+ drm_crtc_vblank_on(crtc);
radeon_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
if (dev->num_crtcs > radeon_crtc->crtc_id)
- drm_vblank_off(dev, radeon_crtc->crtc_id);
+ drm_crtc_vblank_off(crtc);
if (radeon_crtc->enabled)
atombios_blank_crtc(crtc, ATOM_ENABLE);
if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 35e0fc3ae..7ba450832 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -3843,7 +3843,10 @@ static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev,
if (i >= sclk_table->count) {
pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
} else {
- /* XXX check display min clock requirements */
+ /* XXX The current code always reprogrammed the sclk levels,
+ * but we don't currently handle disp sclk requirements
+ * so just skip it.
+ */
if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK)
pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
}
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 807756e4a..bb7609aed 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -1911,12 +1911,17 @@ static int cik_init_microcode(struct radeon_device *rdev)
int new_fw = 0;
int err;
int num_fw;
+ bool new_smc = false;
DRM_DEBUG("\n");
switch (rdev->family) {
case CHIP_BONAIRE:
chip_name = "BONAIRE";
+ if ((rdev->pdev->revision == 0x80) ||
+ (rdev->pdev->revision == 0x81) ||
+ (rdev->pdev->device == 0x665f))
+ new_smc = true;
new_chip_name = "bonaire";
pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
me_req_size = CIK_ME_UCODE_SIZE * 4;
@@ -1931,6 +1936,8 @@ static int cik_init_microcode(struct radeon_device *rdev)
break;
case CHIP_HAWAII:
chip_name = "HAWAII";
+ if (rdev->pdev->revision == 0x80)
+ new_smc = true;
new_chip_name = "hawaii";
pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
me_req_size = CIK_ME_UCODE_SIZE * 4;
@@ -2180,7 +2187,10 @@ static int cik_init_microcode(struct radeon_device *rdev)
}
}
- snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", new_chip_name);
+ if (new_smc)
+ snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", new_chip_name);
+ else
+ snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", new_chip_name);
err = reject_firmware(&rdev->smc_fw, fw_name, rdev->dev);
if (err) {
snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", chip_name);
@@ -8275,7 +8285,8 @@ static int cik_startup(struct radeon_device *rdev)
}
}
rdev->rlc.cs_data = ci_cs_data;
- rdev->rlc.cp_table_size = CP_ME_TABLE_SIZE * 5 * 4;
+ rdev->rlc.cp_table_size = ALIGN(CP_ME_TABLE_SIZE * 5 * 4, 2048); /* CP JT */
+ rdev->rlc.cp_table_size += 64 * 1024; /* GDS */
r = sumo_rlc_init(rdev);
if (r) {
DRM_ERROR("Failed to init rlc BOs!\n");
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index db275b7ed..c1828f95f 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -5840,7 +5840,7 @@ int evergreen_init(struct radeon_device *rdev)
r = r600_init_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load firmware!\n");
- return r;
+ /*(DEBLOBBED)*/
}
}
}
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 0d3f744de..d960d3915 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -2209,6 +2209,12 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
break;
}
+ case PACKET3_PFP_SYNC_ME:
+ if (pkt->count) {
+ DRM_ERROR("bad PFP_SYNC_ME\n");
+ return -EINVAL;
+ }
+ break;
case PACKET3_SURFACE_SYNC:
if (pkt->count != 3) {
DRM_ERROR("bad SURFACE_SYNC\n");
@@ -3381,6 +3387,7 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
case PACKET3_MPEG_INDEX:
case PACKET3_WAIT_REG_MEM:
case PACKET3_MEM_WRITE:
+ case PACKET3_PFP_SYNC_ME:
case PACKET3_SURFACE_SYNC:
case PACKET3_EVENT_WRITE:
case PACKET3_EVENT_WRITE_EOP:
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 0b174e14e..c8e3d394c 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -1624,6 +1624,7 @@
*/
# define PACKET3_CP_DMA_CMD_SAIC (1 << 28)
# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
+#define PACKET3_PFP_SYNC_ME 0x42
#define PACKET3_SURFACE_SYNC 0x43
# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 80b24a495..5633ee3eb 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -2386,7 +2386,7 @@ struct radeon_device {
struct radeon_mman mman;
struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
wait_queue_head_t fence_queue;
- unsigned fence_context;
+ u64 fence_context;
struct mutex ring_lock;
struct radeon_ring ring[RADEON_NUM_RINGS];
bool ib_pool_ready;
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
index 59acd0e5c..31c9a92d6 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.c
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -741,13 +741,6 @@ int radeon_acpi_init(struct radeon_device *rdev)
}
atif->encoder_for_bl = target;
- if (!target) {
- /* Brightness change notification is enabled, but we
- * didn't find a backlight controller, this should
- * never happen.
- */
- DRM_ERROR("Cannot find a backlight controller\n");
- }
}
if (atif->functions.sbios_requests && !atif->functions.system_params) {
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 1b3f4e51f..ddef0d494 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -28,6 +28,7 @@ struct radeon_atpx_functions {
struct radeon_atpx {
acpi_handle handle;
struct radeon_atpx_functions functions;
+ bool is_hybrid;
};
static struct radeon_atpx_priv {
@@ -63,6 +64,14 @@ bool radeon_has_atpx(void) {
return radeon_atpx_priv.atpx_detected;
}
+bool radeon_has_atpx_dgpu_power_cntl(void) {
+ return radeon_atpx_priv.atpx.functions.power_cntl;
+}
+
+bool radeon_is_atpx_hybrid(void) {
+ return radeon_atpx_priv.atpx.is_hybrid;
+}
+
/**
* radeon_atpx_call - call an ATPX method
*
@@ -142,18 +151,12 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas
*/
static int radeon_atpx_validate(struct radeon_atpx *atpx)
{
- /* make sure required functions are enabled */
- /* dGPU power control is required */
- if (atpx->functions.power_cntl == false) {
- printk("ATPX dGPU power cntl not present, forcing\n");
- atpx->functions.power_cntl = true;
- }
+ u32 valid_bits = 0;
if (atpx->functions.px_params) {
union acpi_object *info;
struct atpx_px_params output;
size_t size;
- u32 valid_bits;
info = radeon_atpx_call(atpx->handle, ATPX_FUNCTION_GET_PX_PARAMETERS, NULL);
if (!info)
@@ -172,19 +175,33 @@ static int radeon_atpx_validate(struct radeon_atpx *atpx)
memcpy(&output, info->buffer.pointer, size);
valid_bits = output.flags & output.valid_flags;
- /* if separate mux flag is set, mux controls are required */
- if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) {
- atpx->functions.i2c_mux_cntl = true;
- atpx->functions.disp_mux_cntl = true;
- }
- /* if any outputs are muxed, mux controls are required */
- if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED |
- ATPX_TV_SIGNAL_MUXED |
- ATPX_DFP_SIGNAL_MUXED))
- atpx->functions.disp_mux_cntl = true;
kfree(info);
}
+
+ /* if separate mux flag is set, mux controls are required */
+ if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) {
+ atpx->functions.i2c_mux_cntl = true;
+ atpx->functions.disp_mux_cntl = true;
+ }
+ /* if any outputs are muxed, mux controls are required */
+ if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED |
+ ATPX_TV_SIGNAL_MUXED |
+ ATPX_DFP_SIGNAL_MUXED))
+ atpx->functions.disp_mux_cntl = true;
+
+ /* some bioses set these bits rather than flagging power_cntl as supported */
+ if (valid_bits & (ATPX_DYNAMIC_PX_SUPPORTED |
+ ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED))
+ atpx->functions.power_cntl = true;
+
+ atpx->is_hybrid = false;
+ if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
+ printk("ATPX Hybrid Graphics\n");
+ atpx->functions.power_cntl = false;
+ atpx->is_hybrid = true;
+ }
+
return 0;
}
@@ -510,7 +527,6 @@ static int radeon_atpx_get_client_id(struct pci_dev *pdev)
static const struct vga_switcheroo_handler radeon_atpx_handler = {
.switchto = radeon_atpx_switchto,
.power_state = radeon_atpx_power_state,
- .init = radeon_atpx_init,
.get_client_id = radeon_atpx_get_client_id,
};
@@ -546,6 +562,7 @@ static bool radeon_atpx_detect(void)
printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n",
acpi_method_name);
radeon_atpx_priv.atpx_detected = true;
+ radeon_atpx_init();
return true;
}
return false;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 21c44b229..a00dd2f74 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -30,6 +30,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
+#include <linux/pm_runtime.h>
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include <linux/efi.h>
@@ -1526,6 +1527,9 @@ int radeon_device_init(struct radeon_device *rdev,
return 0;
failed:
+ /* balance pm_runtime_get_sync() in radeon_driver_unload_kms() */
+ if (radeon_is_px(ddev))
+ pm_runtime_put_noidle(ddev->dev);
if (runtime)
vga_switcheroo_fini_domain_pm_ops(rdev->dev);
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 6a41b4982..c3206fb8f 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -231,19 +231,21 @@ void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
*blue = radeon_crtc->lut_b[regno] << 6;
}
-static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t start, uint32_t size)
+static int radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t size)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- int end = (start + size > 256) ? 256 : start + size, i;
+ int i;
/* userspace palettes are always correct as is */
- for (i = start; i < end; i++) {
+ for (i = 0; i < size; i++) {
radeon_crtc->lut_r[i] = red[i] >> 6;
radeon_crtc->lut_g[i] = green[i] >> 6;
radeon_crtc->lut_b[i] = blue[i] >> 6;
}
radeon_crtc_load_lut(crtc);
+
+ return 0;
}
static void radeon_crtc_destroy(struct drm_crtc *crtc)
@@ -381,7 +383,7 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
- drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id);
+ drm_crtc_vblank_put(&radeon_crtc->base);
radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id);
queue_work(radeon_crtc->flip_queue, &work->unpin_work);
}
@@ -598,7 +600,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
}
work->base = base;
- r = drm_vblank_get(crtc->dev, radeon_crtc->crtc_id);
+ r = drm_crtc_vblank_get(crtc);
if (r) {
DRM_ERROR("failed to get vblank before flip\n");
goto pflip_cleanup;
@@ -625,7 +627,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
return 0;
vblank_cleanup:
- drm_vblank_put(crtc->dev, radeon_crtc->crtc_id);
+ drm_crtc_vblank_put(crtc);
pflip_cleanup:
if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) {
@@ -688,6 +690,7 @@ radeon_crtc_set_config(struct drm_mode_set *set)
pm_runtime_put_autosuspend(dev->dev);
return ret;
}
+
static const struct drm_crtc_funcs radeon_crtc_funcs = {
.cursor_set2 = radeon_crtc_cursor_set2,
.cursor_move = radeon_crtc_cursor_move,
@@ -711,7 +714,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256);
radeon_crtc->crtc_id = index;
- radeon_crtc->flip_queue = create_singlethread_workqueue("radeon-crtc");
+ radeon_crtc->flip_queue = alloc_workqueue("radeon-crtc", WQ_HIGHPRI, 0);
rdev->mode_info.crtcs[index] = radeon_crtc;
if (rdev->family >= CHIP_BONAIRE) {
@@ -1321,9 +1324,7 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
- if (radeon_fb->obj) {
- drm_gem_object_unreference_unlocked(radeon_fb->obj);
- }
+ drm_gem_object_unreference_unlocked(radeon_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(radeon_fb);
}
@@ -1708,6 +1709,7 @@ void radeon_modeset_fini(struct radeon_device *rdev)
radeon_afmt_fini(rdev);
drm_kms_helper_poll_fini(rdev->ddev);
radeon_hpd_fini(rdev);
+ drm_crtc_force_disable_all(rdev->ddev);
drm_mode_config_cleanup(rdev->ddev);
rdev->mode_info.mode_config_initialized = false;
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index b55aa7401..c01a7c6ab 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -34,11 +34,9 @@
#include "radeon_drv.h"
#include <drm/drm_pciids.h>
-#include <linux/apple-gmux.h>
#include <linux/console.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
-#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include <drm/drm_gem.h>
@@ -95,9 +93,10 @@
* 2.43.0 - RADEON_INFO_GPU_RESET_COUNTER
* 2.44.0 - SET_APPEND_CNT packet3 support
* 2.45.0 - Allow setting shader registers using DMA/COPY packet3 on SI
+ * 2.46.0 - Add PFP_SYNC_ME support on evergreen
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 45
+#define KMS_DRIVER_MINOR 46
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -164,9 +163,13 @@ void radeon_debugfs_cleanup(struct drm_minor *minor);
#if defined(CONFIG_VGA_SWITCHEROO)
void radeon_register_atpx_handler(void);
void radeon_unregister_atpx_handler(void);
+bool radeon_has_atpx_dgpu_power_cntl(void);
+bool radeon_is_atpx_hybrid(void);
#else
static inline void radeon_register_atpx_handler(void) {}
static inline void radeon_unregister_atpx_handler(void) {}
+static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
+static inline bool radeon_is_atpx_hybrid(void) { return false; }
#endif
int radeon_no_wb;
@@ -340,13 +343,7 @@ static int radeon_pci_probe(struct pci_dev *pdev,
if (ret == -EPROBE_DEFER)
return ret;
- /*
- * apple-gmux is needed on dual GPU MacBook Pro
- * to probe the panel if we're the inactive GPU.
- */
- if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) &&
- apple_gmux_present() && pdev != vga_default_device() &&
- !vga_switcheroo_handler_flags())
+ if (vga_switcheroo_client_probe_defer(pdev))
return -EPROBE_DEFER;
/* Get rid of things like offb */
@@ -412,7 +409,10 @@ static int radeon_pmops_runtime_suspend(struct device *dev)
pci_save_state(pdev);
pci_disable_device(pdev);
pci_ignore_hotplug(pdev);
- pci_set_power_state(pdev, PCI_D3cold);
+ if (radeon_is_atpx_hybrid())
+ pci_set_power_state(pdev, PCI_D3cold);
+ else if (!radeon_has_atpx_dgpu_power_cntl())
+ pci_set_power_state(pdev, PCI_D3hot);
drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
return 0;
@@ -429,7 +429,9 @@ static int radeon_pmops_runtime_resume(struct device *dev)
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- pci_set_power_state(pdev, PCI_D0);
+ if (radeon_is_atpx_hybrid() ||
+ !radeon_has_atpx_dgpu_power_cntl())
+ pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
ret = pci_enable_device(pdev);
if (ret)
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 414953c46..835563c1f 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -63,7 +63,10 @@ int radeon_driver_unload_kms(struct drm_device *dev)
if (rdev->rmmio == NULL)
goto done_free;
- pm_runtime_get_sync(dev->dev);
+ if (radeon_is_px(dev)) {
+ pm_runtime_get_sync(dev->dev);
+ pm_runtime_forbid(dev->dev);
+ }
radeon_kfd_device_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 478d4099b..d0de4022f 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -332,14 +332,14 @@ static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl));
}
if (dev->num_crtcs > radeon_crtc->crtc_id)
- drm_vblank_on(dev, radeon_crtc->crtc_id);
+ drm_crtc_vblank_on(crtc);
radeon_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
if (dev->num_crtcs > radeon_crtc->crtc_id)
- drm_vblank_off(dev, radeon_crtc->crtc_id);
+ drm_crtc_vblank_off(crtc);
if (radeon_crtc->crtc_id)
WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask));
else {
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 38226d925..4b6542538 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -246,6 +246,7 @@ static void radeon_set_power_state(struct radeon_device *rdev)
static void radeon_pm_set_clocks(struct radeon_device *rdev)
{
+ struct drm_crtc *crtc;
int i, r;
/* no need to take locks, etc. if nothing's going to change */
@@ -274,26 +275,30 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
radeon_unmap_vram_bos(rdev);
if (rdev->irq.installed) {
- for (i = 0; i < rdev->num_crtc; i++) {
+ i = 0;
+ drm_for_each_crtc(crtc, rdev->ddev) {
if (rdev->pm.active_crtcs & (1 << i)) {
/* This can fail if a modeset is in progress */
- if (drm_vblank_get(rdev->ddev, i) == 0)
+ if (drm_crtc_vblank_get(crtc) == 0)
rdev->pm.req_vblank |= (1 << i);
else
DRM_DEBUG_DRIVER("crtc %d no vblank, can glitch\n",
i);
}
+ i++;
}
}
radeon_set_power_state(rdev);
if (rdev->irq.installed) {
- for (i = 0; i < rdev->num_crtc; i++) {
+ i = 0;
+ drm_for_each_crtc(crtc, rdev->ddev) {
if (rdev->pm.req_vblank & (1 << i)) {
rdev->pm.req_vblank &= ~(1 << i);
- drm_vblank_put(rdev->ddev, i);
+ drm_crtc_vblank_put(crtc);
}
+ i++;
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 0ab76dd13..c2e0a1ccd 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -300,8 +300,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
if (IS_ERR(fence))
return PTR_ERR(fence);
- r = ttm_bo_move_accel_cleanup(bo, &fence->base,
- evict, no_wait_gpu, new_mem);
+ r = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, new_mem);
radeon_fence_unref(&fence);
return r;
}
@@ -347,7 +346,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
if (unlikely(r)) {
goto out_cleanup;
}
- r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
+ r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, new_mem);
out_cleanup:
ttm_bo_mem_put(bo, &tmp_mem);
return r;
@@ -380,7 +379,7 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
if (unlikely(r)) {
return r;
}
- r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
+ r = ttm_bo_move_ttm(bo, true, interruptible, no_wait_gpu, &tmp_mem);
if (unlikely(r)) {
goto out_cleanup;
}
@@ -403,6 +402,10 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem = &bo->mem;
int r;
+ r = ttm_bo_wait(bo, interruptible, no_wait_gpu);
+ if (r)
+ return r;
+
/* Can't move a pinned BO */
rbo = container_of(bo, struct radeon_bo, tbo);
if (WARN_ON_ONCE(rbo->pin_count > 0))
@@ -441,7 +444,8 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
if (r) {
memcpy:
- r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+ r = ttm_bo_move_memcpy(bo, evict, interruptible,
+ no_wait_gpu, new_mem);
if (r) {
return r;
}
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 073454a2f..3e70a6120 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -1580,12 +1580,16 @@ static int si_init_microcode(struct radeon_device *rdev)
char fw_name[30];
int err;
int new_fw = 0;
+ bool new_smc = false;
DRM_DEBUG("\n");
switch (rdev->family) {
case CHIP_TAHITI:
chip_name = "TAHITI";
+ /* XXX: figure out which Tahitis need the new ucode */
+ if (0)
+ new_smc = true;
new_chip_name = "tahiti";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
@@ -1597,6 +1601,13 @@ static int si_init_microcode(struct radeon_device *rdev)
break;
case CHIP_PITCAIRN:
chip_name = "PITCAIRN";
+ if ((rdev->pdev->revision == 0x81) ||
+ (rdev->pdev->device == 0x6810) ||
+ (rdev->pdev->device == 0x6811) ||
+ (rdev->pdev->device == 0x6816) ||
+ (rdev->pdev->device == 0x6817) ||
+ (rdev->pdev->device == 0x6806))
+ new_smc = true;
new_chip_name = "pitcairn";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
@@ -1608,6 +1619,16 @@ static int si_init_microcode(struct radeon_device *rdev)
break;
case CHIP_VERDE:
chip_name = "VERDE";
+ if ((rdev->pdev->revision == 0x81) ||
+ (rdev->pdev->revision == 0x83) ||
+ (rdev->pdev->revision == 0x87) ||
+ (rdev->pdev->device == 0x6820) ||
+ (rdev->pdev->device == 0x6821) ||
+ (rdev->pdev->device == 0x6822) ||
+ (rdev->pdev->device == 0x6823) ||
+ (rdev->pdev->device == 0x682A) ||
+ (rdev->pdev->device == 0x682B))
+ new_smc = true;
new_chip_name = "verde";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
@@ -1619,6 +1640,13 @@ static int si_init_microcode(struct radeon_device *rdev)
break;
case CHIP_OLAND:
chip_name = "OLAND";
+ if ((rdev->pdev->revision == 0xC7) ||
+ (rdev->pdev->revision == 0x80) ||
+ (rdev->pdev->revision == 0x81) ||
+ (rdev->pdev->revision == 0x83) ||
+ (rdev->pdev->device == 0x6604) ||
+ (rdev->pdev->device == 0x6605))
+ new_smc = true;
new_chip_name = "oland";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
@@ -1629,6 +1657,13 @@ static int si_init_microcode(struct radeon_device *rdev)
break;
case CHIP_HAINAN:
chip_name = "HAINAN";
+ if ((rdev->pdev->revision == 0x81) ||
+ (rdev->pdev->revision == 0x83) ||
+ (rdev->pdev->revision == 0xC3) ||
+ (rdev->pdev->device == 0x6664) ||
+ (rdev->pdev->device == 0x6665) ||
+ (rdev->pdev->device == 0x6667))
+ new_smc = true;
new_chip_name = "hainan";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
@@ -1774,7 +1809,10 @@ static int si_init_microcode(struct radeon_device *rdev)
}
}
- snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", new_chip_name);
+ if (new_smc)
+ snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", new_chip_name);
+ else
+ snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", new_chip_name);
err = reject_firmware(&rdev->smc_fw, fw_name, rdev->dev);
if (err) {
snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", chip_name);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index e6abc09b6..1f78ec254 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -3015,6 +3015,12 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
if (rdev->pdev->device == 0x6811 &&
rdev->pdev->revision == 0x81)
max_mclk = 120000;
+ /* limit sclk/mclk on Jet parts for stability */
+ if (rdev->pdev->device == 0x6665 &&
+ rdev->pdev->revision == 0xc3) {
+ max_sclk = 75000;
+ max_mclk = 80000;
+ }
if (rps->vce_active) {
rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index 7fc3ca5ce..4c2fd056d 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -6,7 +6,6 @@ config DRM_RCAR_DU
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
- select DRM_KMS_FB_HELPER
select VIDEOMODE_HELPERS
help
Choose this option if you have an R-Car chipset.
diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile
index 827711e28..d3b446510 100644
--- a/drivers/gpu/drm/rcar-du/Makefile
+++ b/drivers/gpu/drm/rcar-du/Makefile
@@ -7,8 +7,8 @@ rcar-du-drm-y := rcar_du_crtc.o \
rcar_du_plane.o \
rcar_du_vgacon.o
-rcar-du-drm-$(CONFIG_DRM_RCAR_HDMI) += rcar_du_hdmicon.o \
- rcar_du_hdmienc.o
+rcar-du-drm-$(CONFIG_DRM_RCAR_HDMI) += rcar_du_hdmienc.o
+
rcar-du-drm-$(CONFIG_DRM_RCAR_LVDS) += rcar_du_lvdsenc.o
rcar-du-drm-$(CONFIG_DRM_RCAR_VSP) += rcar_du_vsp.o
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 0d8bdda73..7316fc7fa 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -196,7 +196,7 @@ void rcar_du_crtc_route_output(struct drm_crtc *crtc,
static unsigned int plane_zpos(struct rcar_du_plane *plane)
{
- return to_rcar_plane_state(plane->plane.state)->zpos;
+ return plane->plane.state->normalized_zpos;
}
static const struct rcar_du_format_info *
@@ -552,7 +552,7 @@ static irqreturn_t rcar_du_crtc_irq(int irq, void *arg)
rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK);
if (status & DSSR_FRM) {
- drm_handle_vblank(rcrtc->crtc.dev, rcrtc->index);
+ drm_crtc_handle_vblank(&rcrtc->crtc);
rcar_du_crtc_finish_page_flip(rcrtc);
ret = IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index fb9242d27..899ef7a2a 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -217,7 +217,7 @@ static struct drm_driver rcar_du_driver = {
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = rcar_du_enable_vblank,
.disable_vblank = rcar_du_disable_vblank,
- .gem_free_object = drm_gem_cma_free_object,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
@@ -278,7 +278,6 @@ static int rcar_du_remove(struct platform_device *pdev)
struct rcar_du_device *rcdu = platform_get_drvdata(pdev);
struct drm_device *ddev = rcdu->ddev;
- drm_connector_unregister_all(ddev);
drm_dev_unregister(ddev);
if (rcdu->fbdev)
@@ -320,8 +319,6 @@ static int rcar_du_probe(struct platform_device *pdev)
if (!ddev)
return -ENOMEM;
- drm_dev_set_unique(ddev, dev_name(&pdev->dev));
-
rcdu->ddev = ddev;
ddev->dev_private = rcdu;
@@ -339,15 +336,15 @@ static int rcar_du_probe(struct platform_device *pdev)
* disabled for all CRTCs.
*/
ret = drm_vblank_init(ddev, (1 << rcdu->info->num_crtcs) - 1);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to initialize vblank\n");
+ if (ret < 0)
goto error;
- }
/* DRM/KMS objects */
ret = rcar_du_modeset_init(rcdu);
if (ret < 0) {
- dev_err(&pdev->dev, "failed to initialize DRM/KMS (%d)\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "failed to initialize DRM/KMS (%d)\n", ret);
goto error;
}
@@ -360,10 +357,6 @@ static int rcar_du_probe(struct platform_device *pdev)
if (ret)
goto error;
- ret = drm_connector_register_all(ddev);
- if (ret < 0)
- goto error;
-
DRM_INFO("Device %s probed\n", dev_name(&pdev->dev));
return 0;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index ed35467d9..c843c3134 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -92,7 +92,6 @@ struct rcar_du_device {
struct {
struct drm_property *alpha;
struct drm_property *colorkey;
- struct drm_property *zpos;
} props;
unsigned int dpad0_source;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index 4e939e41f..ab8645c57 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -19,7 +19,6 @@
#include "rcar_du_drv.h"
#include "rcar_du_encoder.h"
-#include "rcar_du_hdmicon.h"
#include "rcar_du_hdmienc.h"
#include "rcar_du_kms.h"
#include "rcar_du_lvdscon.h"
@@ -27,18 +26,6 @@
#include "rcar_du_vgacon.h"
/* -----------------------------------------------------------------------------
- * Common connector functions
- */
-
-struct drm_encoder *
-rcar_du_connector_best_encoder(struct drm_connector *connector)
-{
- struct rcar_du_connector *rcon = to_rcar_connector(connector);
-
- return rcar_encoder_to_drm_encoder(rcon->encoder);
-}
-
-/* -----------------------------------------------------------------------------
* Encoder
*/
@@ -186,7 +173,7 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
break;
case DRM_MODE_ENCODER_TMDS:
- ret = rcar_du_hdmi_connector_init(rcdu, renc);
+ /* connector managed by the bridge driver */
break;
default:
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
index 719b6f2a0..7fc10a9c3 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
@@ -15,7 +15,6 @@
#define __RCAR_DU_ENCODER_H__
#include <drm/drm_crtc.h>
-#include <drm/drm_encoder_slave.h>
struct rcar_du_device;
struct rcar_du_hdmienc;
@@ -30,16 +29,16 @@ enum rcar_du_encoder_type {
};
struct rcar_du_encoder {
- struct drm_encoder_slave slave;
+ struct drm_encoder base;
enum rcar_du_output output;
struct rcar_du_hdmienc *hdmi;
struct rcar_du_lvdsenc *lvds;
};
#define to_rcar_encoder(e) \
- container_of(e, struct rcar_du_encoder, slave.base)
+ container_of(e, struct rcar_du_encoder, base)
-#define rcar_encoder_to_drm_encoder(e) (&(e)->slave.base)
+#define rcar_encoder_to_drm_encoder(e) (&(e)->base)
struct rcar_du_connector {
struct drm_connector connector;
@@ -49,9 +48,6 @@ struct rcar_du_connector {
#define to_rcar_connector(c) \
container_of(c, struct rcar_du_connector, connector)
-struct drm_encoder *
-rcar_du_connector_best_encoder(struct drm_connector *connector);
-
int rcar_du_encoder_init(struct rcar_du_device *rcdu,
enum rcar_du_encoder_type type,
enum rcar_du_output output,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
index 461662d23..e03004f45 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
@@ -16,7 +16,6 @@
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_encoder_slave.h>
#include "rcar_du_drv.h"
#include "rcar_du_encoder.h"
@@ -25,20 +24,14 @@
struct rcar_du_hdmienc {
struct rcar_du_encoder *renc;
- struct device *dev;
bool enabled;
};
#define to_rcar_hdmienc(e) (to_rcar_encoder(e)->hdmi)
-#define to_slave_funcs(e) (to_rcar_encoder(e)->slave.slave_funcs)
static void rcar_du_hdmienc_disable(struct drm_encoder *encoder)
{
struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
- const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
-
- if (sfuncs->dpms)
- sfuncs->dpms(encoder, DRM_MODE_DPMS_OFF);
if (hdmienc->renc->lvds)
rcar_du_lvdsenc_enable(hdmienc->renc->lvds, encoder->crtc,
@@ -50,15 +43,11 @@ static void rcar_du_hdmienc_disable(struct drm_encoder *encoder)
static void rcar_du_hdmienc_enable(struct drm_encoder *encoder)
{
struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
- const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
if (hdmienc->renc->lvds)
rcar_du_lvdsenc_enable(hdmienc->renc->lvds, encoder->crtc,
true);
- if (sfuncs->dpms)
- sfuncs->dpms(encoder, DRM_MODE_DPMS_ON);
-
hdmienc->enabled = true;
}
@@ -67,29 +56,21 @@ static int rcar_du_hdmienc_atomic_check(struct drm_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
- const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
- const struct drm_display_mode *mode = &crtc_state->mode;
if (hdmienc->renc->lvds)
rcar_du_lvdsenc_atomic_check(hdmienc->renc->lvds,
adjusted_mode);
- if (sfuncs->mode_fixup == NULL)
- return 0;
-
- return sfuncs->mode_fixup(encoder, mode, adjusted_mode) ? 0 : -EINVAL;
+ return 0;
}
+
static void rcar_du_hdmienc_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
- const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
-
- if (sfuncs->mode_set)
- sfuncs->mode_set(encoder, mode, adjusted_mode);
rcar_du_crtc_route_output(encoder->crtc, hdmienc->renc->output);
}
@@ -109,7 +90,6 @@ static void rcar_du_hdmienc_cleanup(struct drm_encoder *encoder)
rcar_du_hdmienc_disable(encoder);
drm_encoder_cleanup(encoder);
- put_device(hdmienc->dev);
}
static const struct drm_encoder_funcs encoder_funcs = {
@@ -120,8 +100,7 @@ int rcar_du_hdmienc_init(struct rcar_du_device *rcdu,
struct rcar_du_encoder *renc, struct device_node *np)
{
struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(renc);
- struct drm_i2c_encoder_driver *driver;
- struct i2c_client *i2c_slave;
+ struct drm_bridge *bridge;
struct rcar_du_hdmienc *hdmienc;
int ret;
@@ -129,44 +108,30 @@ int rcar_du_hdmienc_init(struct rcar_du_device *rcdu,
if (hdmienc == NULL)
return -ENOMEM;
- /* Locate the slave I2C device and driver. */
- i2c_slave = of_find_i2c_device_by_node(np);
- if (!i2c_slave || !i2c_get_clientdata(i2c_slave)) {
- dev_dbg(rcdu->dev,
- "can't get I2C slave for %s, deferring probe\n",
- of_node_full_name(np));
+ /* Locate drm bridge from the hdmi encoder DT node */
+ bridge = of_drm_find_bridge(np);
+ if (!bridge)
return -EPROBE_DEFER;
- }
-
- hdmienc->dev = &i2c_slave->dev;
-
- if (hdmienc->dev->driver == NULL) {
- dev_dbg(rcdu->dev,
- "I2C slave %s not probed yet, deferring probe\n",
- dev_name(hdmienc->dev));
- ret = -EPROBE_DEFER;
- goto error;
- }
-
- /* Initialize the slave encoder. */
- driver = to_drm_i2c_encoder_driver(to_i2c_driver(hdmienc->dev->driver));
- ret = driver->encoder_init(i2c_slave, rcdu->ddev, &renc->slave);
- if (ret < 0)
- goto error;
ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
if (ret < 0)
- goto error;
+ return ret;
drm_encoder_helper_add(encoder, &encoder_helper_funcs);
renc->hdmi = hdmienc;
hdmienc->renc = renc;
- return 0;
+ /* Link drm_bridge to encoder */
+ bridge->encoder = encoder;
+ encoder->bridge = bridge;
+
+ ret = drm_bridge_attach(rcdu->ddev, bridge);
+ if (ret) {
+ drm_encoder_cleanup(encoder);
+ return ret;
+ }
-error:
- put_device(hdmienc->dev);
- return ret;
+ return 0;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index e70a4f33d..f03eb5531 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -288,6 +288,8 @@ static int rcar_du_atomic_commit(struct drm_device *dev,
{
struct rcar_du_device *rcdu = dev->dev_private;
struct rcar_du_commit *commit;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
unsigned int i;
int ret;
@@ -309,10 +311,8 @@ static int rcar_du_atomic_commit(struct drm_device *dev,
/* Wait until all affected CRTCs have completed previous commits and
* mark them as pending.
*/
- for (i = 0; i < dev->mode_config.num_crtc; ++i) {
- if (state->crtcs[i])
- commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]);
- }
+ for_each_crtc_in_state(state, crtc, crtc_state, i)
+ commit->crtcs |= drm_crtc_mask(crtc);
spin_lock(&rcdu->commit.wait.lock);
ret = wait_event_interruptible_locked(rcdu->commit.wait,
@@ -327,7 +327,7 @@ static int rcar_du_atomic_commit(struct drm_device *dev,
}
/* Swap the state, this is the point of no return. */
- drm_atomic_helper_swap_state(dev, state);
+ drm_atomic_helper_swap_state(state, true);
if (nonblock)
schedule_work(&commit->work);
@@ -527,11 +527,6 @@ static int rcar_du_properties_init(struct rcar_du_device *rcdu)
if (rcdu->props.colorkey == NULL)
return -ENOMEM;
- rcdu->props.zpos =
- drm_property_create_range(rcdu->ddev, 0, "zpos", 1, 7);
- if (rcdu->props.zpos == NULL)
- return -ENOMEM;
-
return 0;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
index e905f5da7..6afd0af31 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
@@ -59,7 +59,6 @@ static int rcar_du_lvds_connector_get_modes(struct drm_connector *connector)
static const struct drm_connector_helper_funcs connector_helper_funcs = {
.get_modes = rcar_du_lvds_connector_get_modes,
- .best_encoder = rcar_du_connector_best_encoder,
};
static enum drm_connector_status
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index d445e67f7..a74f8ed8c 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -140,18 +140,17 @@ int rcar_du_atomic_check_planes(struct drm_device *dev,
bool needs_realloc = false;
unsigned int groups = 0;
unsigned int i;
+ struct drm_plane *drm_plane;
+ struct drm_plane_state *drm_plane_state;
/* Check if hardware planes need to be reallocated. */
- for (i = 0; i < dev->mode_config.num_total_plane; ++i) {
+ for_each_plane_in_state(state, drm_plane, drm_plane_state, i) {
struct rcar_du_plane_state *plane_state;
struct rcar_du_plane *plane;
unsigned int index;
- if (!state->planes[i])
- continue;
-
- plane = to_rcar_plane(state->planes[i]);
- plane_state = to_rcar_plane_state(state->plane_states[i]);
+ plane = to_rcar_plane(drm_plane);
+ plane_state = to_rcar_plane_state(drm_plane_state);
dev_dbg(rcdu->dev, "%s: checking plane (%u,%tu)\n", __func__,
plane->group->index, plane - plane->group->planes);
@@ -247,18 +246,15 @@ int rcar_du_atomic_check_planes(struct drm_device *dev,
}
/* Reallocate hardware planes for each plane that needs it. */
- for (i = 0; i < dev->mode_config.num_total_plane; ++i) {
+ for_each_plane_in_state(state, drm_plane, drm_plane_state, i) {
struct rcar_du_plane_state *plane_state;
struct rcar_du_plane *plane;
unsigned int crtc_planes;
unsigned int free;
int idx;
- if (!state->planes[i])
- continue;
-
- plane = to_rcar_plane(state->planes[i]);
- plane_state = to_rcar_plane_state(state->plane_states[i]);
+ plane = to_rcar_plane(drm_plane);
+ plane_state = to_rcar_plane_state(drm_plane_state);
dev_dbg(rcdu->dev, "%s: allocating plane (%u,%tu)\n", __func__,
plane->group->index, plane - plane->group->planes);
@@ -656,7 +652,7 @@ static void rcar_du_plane_reset(struct drm_plane *plane)
state->source = RCAR_DU_PLANE_MEMORY;
state->alpha = 255;
state->colorkey = RCAR_DU_COLORKEY_NONE;
- state->zpos = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : 1;
+ state->state.zpos = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : 1;
plane->state = &state->state;
plane->state->plane = plane;
@@ -674,8 +670,6 @@ static int rcar_du_plane_atomic_set_property(struct drm_plane *plane,
rstate->alpha = val;
else if (property == rcdu->props.colorkey)
rstate->colorkey = val;
- else if (property == rcdu->props.zpos)
- rstate->zpos = val;
else
return -EINVAL;
@@ -694,8 +688,6 @@ static int rcar_du_plane_atomic_get_property(struct drm_plane *plane,
*val = rstate->alpha;
else if (property == rcdu->props.colorkey)
*val = rstate->colorkey;
- else if (property == rcdu->props.zpos)
- *val = rstate->zpos;
else
return -EINVAL;
@@ -767,8 +759,7 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp)
drm_object_attach_property(&plane->plane.base,
rcdu->props.colorkey,
RCAR_DU_COLORKEY_NONE);
- drm_object_attach_property(&plane->plane.base,
- rcdu->props.zpos, 1);
+ drm_plane_create_zpos_property(&plane->plane, 1, 1, 7);
}
return 0;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.h b/drivers/gpu/drm/rcar-du/rcar_du_plane.h
index b18b7b25d..8b91dd3a4 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.h
@@ -51,7 +51,6 @@ static inline struct rcar_du_plane *to_rcar_plane(struct drm_plane *plane)
* @hwindex: 0-based hardware plane index, -1 means unused
* @alpha: value of the plane alpha property
* @colorkey: value of the plane colorkey property
- * @zpos: value of the plane zpos property
*/
struct rcar_du_plane_state {
struct drm_plane_state state;
@@ -62,7 +61,6 @@ struct rcar_du_plane_state {
unsigned int alpha;
unsigned int colorkey;
- unsigned int zpos;
};
static inline struct rcar_du_plane_state *
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
index d2f66068e..fedb0161e 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
@@ -195,9 +195,10 @@
#define DEFR6_ODPM12_DISP (2 << 8)
#define DEFR6_ODPM12_CDE (3 << 8)
#define DEFR6_ODPM12_MASK (3 << 8)
-#define DEFR6_TCNE2 (1 << 6)
+#define DEFR6_TCNE1 (1 << 6)
+#define DEFR6_TCNE0 (1 << 4)
#define DEFR6_MLOS1 (1 << 2)
-#define DEFR6_DEFAULT (DEFR6_CODE | DEFR6_TCNE2)
+#define DEFR6_DEFAULT (DEFR6_CODE | DEFR6_TCNE1)
/* -----------------------------------------------------------------------------
* R8A7790-only Control Registers
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
index 9d7e5c99c..8d6125c1c 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
@@ -28,7 +28,6 @@ static int rcar_du_vga_connector_get_modes(struct drm_connector *connector)
static const struct drm_connector_helper_funcs connector_helper_funcs = {
.get_modes = rcar_du_vga_connector_get_modes,
- .best_encoder = rcar_du_connector_best_encoder,
};
static enum drm_connector_status
@@ -79,7 +78,5 @@ int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
if (ret < 0)
return ret;
- rcon->encoder = renc;
-
return 0;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
index e671a7cd3..83ebd162f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
@@ -43,12 +43,12 @@ void rcar_du_vsp_enable(struct rcar_du_crtc *crtc)
.src_y = 0,
.src_w = mode->hdisplay << 16,
.src_h = mode->vdisplay << 16,
+ .zpos = 0,
},
.format = rcar_du_format_info(DRM_FORMAT_ARGB8888),
.source = RCAR_DU_PLANE_VSPD1,
.alpha = 255,
.colorkey = 0,
- .zpos = 0,
};
if (rcdu->info->gen >= 3)
@@ -148,40 +148,39 @@ static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane)
struct rcar_du_vsp_plane_state *state =
to_rcar_vsp_plane_state(plane->plane.state);
struct drm_framebuffer *fb = plane->plane.state->fb;
- struct v4l2_rect src;
- struct v4l2_rect dst;
- dma_addr_t paddr[2] = { 0, };
- u32 pixelformat = 0;
+ struct vsp1_du_atomic_config cfg = {
+ .pixelformat = 0,
+ .pitch = fb->pitches[0],
+ .alpha = state->alpha,
+ .zpos = state->state.zpos,
+ };
unsigned int i;
- src.left = state->state.src_x >> 16;
- src.top = state->state.src_y >> 16;
- src.width = state->state.src_w >> 16;
- src.height = state->state.src_h >> 16;
+ cfg.src.left = state->state.src_x >> 16;
+ cfg.src.top = state->state.src_y >> 16;
+ cfg.src.width = state->state.src_w >> 16;
+ cfg.src.height = state->state.src_h >> 16;
- dst.left = state->state.crtc_x;
- dst.top = state->state.crtc_y;
- dst.width = state->state.crtc_w;
- dst.height = state->state.crtc_h;
+ cfg.dst.left = state->state.crtc_x;
+ cfg.dst.top = state->state.crtc_y;
+ cfg.dst.width = state->state.crtc_w;
+ cfg.dst.height = state->state.crtc_h;
for (i = 0; i < state->format->planes; ++i) {
struct drm_gem_cma_object *gem;
gem = drm_fb_cma_get_gem_obj(fb, i);
- paddr[i] = gem->paddr + fb->offsets[i];
+ cfg.mem[i] = gem->paddr + fb->offsets[i];
}
for (i = 0; i < ARRAY_SIZE(formats_kms); ++i) {
if (formats_kms[i] == state->format->fourcc) {
- pixelformat = formats_v4l2[i];
+ cfg.pixelformat = formats_v4l2[i];
break;
}
}
- WARN_ON(!pixelformat);
-
- vsp1_du_atomic_update(plane->vsp->vsp, plane->index, pixelformat,
- fb->pitches[0], paddr, &src, &dst);
+ vsp1_du_atomic_update(plane->vsp->vsp, plane->index, &cfg);
}
static int rcar_du_vsp_plane_atomic_check(struct drm_plane *plane,
@@ -220,8 +219,7 @@ static void rcar_du_vsp_plane_atomic_update(struct drm_plane *plane,
if (plane->state->crtc)
rcar_du_vsp_plane_setup(rplane);
else
- vsp1_du_atomic_update(rplane->vsp->vsp, rplane->index, 0, 0, 0,
- NULL, NULL);
+ vsp1_du_atomic_update(rplane->vsp->vsp, rplane->index, NULL);
}
static const struct drm_plane_helper_funcs rcar_du_vsp_plane_helper_funcs = {
@@ -269,6 +267,7 @@ static void rcar_du_vsp_plane_reset(struct drm_plane *plane)
return;
state->alpha = 255;
+ state->state.zpos = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : 1;
plane->state = &state->state;
plane->state->plane = plane;
@@ -378,6 +377,8 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp)
drm_object_attach_property(&plane->plane.base,
rcdu->props.alpha, 255);
+ drm_plane_create_zpos_property(&plane->plane, 1, 1,
+ vsp->num_planes - 1);
}
return 0;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.h b/drivers/gpu/drm/rcar-du/rcar_du_vsp.h
index df3bf3805..510dcc9c6 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.h
@@ -44,6 +44,7 @@ static inline struct rcar_du_vsp_plane *to_rcar_vsp_plane(struct drm_plane *p)
* @state: base DRM plane state
* @format: information about the pixel format used by the plane
* @alpha: value of the plane alpha property
+ * @zpos: value of the plane zpos property
*/
struct rcar_du_vsp_plane_state {
struct drm_plane_state state;
@@ -51,6 +52,7 @@ struct rcar_du_vsp_plane_state {
const struct rcar_du_format_info *format;
unsigned int alpha;
+ unsigned int zpos;
};
static inline struct rcar_du_vsp_plane_state *
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index d30bdc38a..3c58669a0 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -2,12 +2,9 @@ config DRM_ROCKCHIP
tristate "DRM Support for Rockchip"
depends on DRM && ROCKCHIP_IOMMU
depends on RESET_CONTROLLER
+ select DRM_GEM_CMA_HELPER
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_PANEL
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
select VIDEOMODE_HELPERS
help
Choose this option if you have a Rockchip soc chipset.
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index 7f6a55cae..89aadbf46 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -14,6 +14,7 @@
#include <linux/component.h>
#include <linux/mfd/syscon.h>
+#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/regmap.h>
#include <linux/reset.h>
@@ -33,13 +34,28 @@
#include "rockchip_drm_drv.h"
#include "rockchip_drm_vop.h"
+#define RK3288_GRF_SOC_CON6 0x25c
+#define RK3288_EDP_LCDC_SEL BIT(5)
+#define RK3399_GRF_SOC_CON20 0x6250
+#define RK3399_EDP_LCDC_SEL BIT(5)
+
+#define HIWORD_UPDATE(val, mask) (val | (mask) << 16)
+
#define to_dp(nm) container_of(nm, struct rockchip_dp_device, nm)
-/* dp grf register offset */
-#define GRF_SOC_CON6 0x025c
-#define GRF_EDP_LCD_SEL_MASK BIT(5)
-#define GRF_EDP_SEL_VOP_LIT BIT(5)
-#define GRF_EDP_SEL_VOP_BIG 0
+/**
+ * struct rockchip_dp_chip_data - splite the grf setting of kind of chips
+ * @lcdsel_grf_reg: grf register offset of lcdc select
+ * @lcdsel_big: reg value of selecting vop big for eDP
+ * @lcdsel_lit: reg value of selecting vop little for eDP
+ * @chip_type: specific chip type
+ */
+struct rockchip_dp_chip_data {
+ u32 lcdsel_grf_reg;
+ u32 lcdsel_big;
+ u32 lcdsel_lit;
+ u32 chip_type;
+};
struct rockchip_dp_device {
struct drm_device *drm_dev;
@@ -48,9 +64,12 @@ struct rockchip_dp_device {
struct drm_display_mode mode;
struct clk *pclk;
+ struct clk *grfclk;
struct regmap *grf;
struct reset_control *rst;
+ const struct rockchip_dp_chip_data *data;
+
struct analogix_dp_plat_data plat_data;
};
@@ -77,6 +96,7 @@ static int rockchip_dp_poweron(struct analogix_dp_plat_data *plat_data)
ret = rockchip_dp_pre_init(dp);
if (ret < 0) {
dev_err(dp->dev, "failed to dp pre init %d\n", ret);
+ clk_disable_unprepare(dp->pclk);
return ret;
}
@@ -92,6 +112,23 @@ static int rockchip_dp_powerdown(struct analogix_dp_plat_data *plat_data)
return 0;
}
+static int rockchip_dp_get_modes(struct analogix_dp_plat_data *plat_data,
+ struct drm_connector *connector)
+{
+ struct drm_display_info *di = &connector->display_info;
+ /* VOP couldn't output YUV video format for eDP rightly */
+ u32 mask = DRM_COLOR_FORMAT_YCRCB444 | DRM_COLOR_FORMAT_YCRCB422;
+
+ if ((di->color_formats & mask)) {
+ DRM_DEBUG_KMS("Swapping display color format from YUV to RGB\n");
+ di->color_formats &= ~mask;
+ di->color_formats |= DRM_COLOR_FORMAT_RGB444;
+ di->bpc = 8;
+ }
+
+ return 0;
+}
+
static bool
rockchip_dp_drm_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
@@ -119,17 +156,23 @@ static void rockchip_dp_drm_encoder_enable(struct drm_encoder *encoder)
return;
if (ret)
- val = GRF_EDP_SEL_VOP_LIT | (GRF_EDP_LCD_SEL_MASK << 16);
+ val = dp->data->lcdsel_lit;
else
- val = GRF_EDP_SEL_VOP_BIG | (GRF_EDP_LCD_SEL_MASK << 16);
+ val = dp->data->lcdsel_big;
dev_dbg(dp->dev, "vop %s output to dp\n", (ret) ? "LIT" : "BIG");
- ret = regmap_write(dp->grf, GRF_SOC_CON6, val);
- if (ret != 0) {
- dev_err(dp->dev, "Could not write to GRF: %d\n", ret);
+ ret = clk_prepare_enable(dp->grfclk);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to enable grfclk %d\n", ret);
return;
}
+
+ ret = regmap_write(dp->grf, dp->data->lcdsel_grf_reg, val);
+ if (ret != 0)
+ dev_err(dp->dev, "Could not write to GRF: %d\n", ret);
+
+ clk_disable_unprepare(dp->grfclk);
}
static void rockchip_dp_drm_encoder_nop(struct drm_encoder *encoder)
@@ -143,22 +186,29 @@ rockchip_dp_drm_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+ struct rockchip_dp_device *dp = to_dp(encoder);
+ int ret;
/*
- * FIXME(Yakir): driver should configure the CRTC output video
- * mode with the display information which indicated the monitor
- * support colorimetry.
- *
- * But don't know why the CRTC driver seems could only output the
- * RGBaaa rightly. For example, if connect the "innolux,n116bge"
- * eDP screen, EDID would indicated that screen only accepted the
- * 6bpc mode. But if I configure CRTC to RGB666 output, then eDP
- * screen would show a blue picture (RGB888 show a green picture).
- * But if I configure CTRC to RGBaaa, and eDP driver still keep
- * RGB666 input video mode, then screen would works prefect.
+ * The hardware IC designed that VOP must output the RGB10 video
+ * format to eDP controller, and if eDP panel only support RGB8,
+ * then eDP controller should cut down the video data, not via VOP
+ * controller, that's why we need to hardcode the VOP output mode
+ * to RGA10 here.
*/
+
s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
s->output_type = DRM_MODE_CONNECTOR_eDP;
+ if (dp->data->chip_type == RK3399_EDP) {
+ /*
+ * For RK3399, VOP Lit must code the out mode to RGB888,
+ * VOP Big must code the out mode to RGB10.
+ */
+ ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node,
+ encoder);
+ if (ret > 0)
+ s->output_mode = ROCKCHIP_OUT_MODE_P888;
+ }
return 0;
}
@@ -192,6 +242,16 @@ static int rockchip_dp_init(struct rockchip_dp_device *dp)
return PTR_ERR(dp->grf);
}
+ dp->grfclk = devm_clk_get(dev, "grf");
+ if (PTR_ERR(dp->grfclk) == -ENOENT) {
+ dp->grfclk = NULL;
+ } else if (PTR_ERR(dp->grfclk) == -EPROBE_DEFER) {
+ return -EPROBE_DEFER;
+ } else if (IS_ERR(dp->grfclk)) {
+ dev_err(dev, "failed to get grf clock\n");
+ return PTR_ERR(dp->grfclk);
+ }
+
dp->pclk = devm_clk_get(dev, "pclk");
if (IS_ERR(dp->pclk)) {
dev_err(dev, "failed to get pclk property\n");
@@ -213,6 +273,7 @@ static int rockchip_dp_init(struct rockchip_dp_device *dp)
ret = rockchip_dp_pre_init(dp);
if (ret < 0) {
dev_err(dp->dev, "failed to pre init %d\n", ret);
+ clk_disable_unprepare(dp->pclk);
return ret;
}
@@ -246,6 +307,7 @@ static int rockchip_dp_bind(struct device *dev, struct device *master,
void *data)
{
struct rockchip_dp_device *dp = dev_get_drvdata(dev);
+ const struct rockchip_dp_chip_data *dp_data;
struct drm_device *drm_dev = data;
int ret;
@@ -256,10 +318,15 @@ static int rockchip_dp_bind(struct device *dev, struct device *master,
*/
dev_set_drvdata(dev, NULL);
+ dp_data = of_device_get_match_data(dev);
+ if (!dp_data)
+ return -ENODEV;
+
ret = rockchip_dp_init(dp);
if (ret < 0)
return ret;
+ dp->data = dp_data;
dp->drm_dev = drm_dev;
ret = rockchip_dp_drm_create_encoder(dp);
@@ -270,9 +337,10 @@ static int rockchip_dp_bind(struct device *dev, struct device *master,
dp->plat_data.encoder = &dp->encoder;
- dp->plat_data.dev_type = RK3288_DP;
+ dp->plat_data.dev_type = dp->data->chip_type;
dp->plat_data.power_on = rockchip_dp_poweron;
dp->plat_data.power_off = rockchip_dp_powerdown;
+ dp->plat_data.get_modes = rockchip_dp_get_modes;
return analogix_dp_bind(dev, dp->drm_dev, &dp->plat_data);
}
@@ -292,38 +360,33 @@ static int rockchip_dp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *panel_node, *port, *endpoint;
+ struct drm_panel *panel = NULL;
struct rockchip_dp_device *dp;
- struct drm_panel *panel;
port = of_graph_get_port_by_id(dev->of_node, 1);
- if (!port) {
- dev_err(dev, "can't find output port\n");
- return -EINVAL;
- }
-
- endpoint = of_get_child_by_name(port, "endpoint");
- of_node_put(port);
- if (!endpoint) {
- dev_err(dev, "no output endpoint found\n");
- return -EINVAL;
- }
-
- panel_node = of_graph_get_remote_port_parent(endpoint);
- of_node_put(endpoint);
- if (!panel_node) {
- dev_err(dev, "no output node found\n");
- return -EINVAL;
- }
-
- panel = of_drm_find_panel(panel_node);
- if (!panel) {
- DRM_ERROR("failed to find panel\n");
+ if (port) {
+ endpoint = of_get_child_by_name(port, "endpoint");
+ of_node_put(port);
+ if (!endpoint) {
+ dev_err(dev, "no output endpoint found\n");
+ return -EINVAL;
+ }
+
+ panel_node = of_graph_get_remote_port_parent(endpoint);
+ of_node_put(endpoint);
+ if (!panel_node) {
+ dev_err(dev, "no output node found\n");
+ return -EINVAL;
+ }
+
+ panel = of_drm_find_panel(panel_node);
of_node_put(panel_node);
- return -EPROBE_DEFER;
+ if (!panel) {
+ DRM_ERROR("failed to find panel\n");
+ return -EPROBE_DEFER;
+ }
}
- of_node_put(panel_node);
-
dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
if (!dp)
return -ENOMEM;
@@ -349,24 +412,30 @@ static int rockchip_dp_remove(struct platform_device *pdev)
return 0;
}
+static const struct dev_pm_ops rockchip_dp_pm_ops = {
#ifdef CONFIG_PM_SLEEP
-static int rockchip_dp_suspend(struct device *dev)
-{
- return analogix_dp_suspend(dev);
-}
-
-static int rockchip_dp_resume(struct device *dev)
-{
- return analogix_dp_resume(dev);
-}
+ .suspend = analogix_dp_suspend,
+ .resume_early = analogix_dp_resume,
#endif
+};
-static const struct dev_pm_ops rockchip_dp_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(rockchip_dp_suspend, rockchip_dp_resume)
+static const struct rockchip_dp_chip_data rk3399_edp = {
+ .lcdsel_grf_reg = RK3399_GRF_SOC_CON20,
+ .lcdsel_big = HIWORD_UPDATE(0, RK3399_EDP_LCDC_SEL),
+ .lcdsel_lit = HIWORD_UPDATE(RK3399_EDP_LCDC_SEL, RK3399_EDP_LCDC_SEL),
+ .chip_type = RK3399_EDP,
+};
+
+static const struct rockchip_dp_chip_data rk3288_dp = {
+ .lcdsel_grf_reg = RK3288_GRF_SOC_CON6,
+ .lcdsel_big = HIWORD_UPDATE(0, RK3288_EDP_LCDC_SEL),
+ .lcdsel_lit = HIWORD_UPDATE(RK3288_EDP_LCDC_SEL, RK3288_EDP_LCDC_SEL),
+ .chip_type = RK3288_DP,
};
static const struct of_device_id rockchip_dp_dt_ids[] = {
- {.compatible = "rockchip,rk3288-dp",},
+ {.compatible = "rockchip,rk3288-dp", .data = &rk3288_dp },
+ {.compatible = "rockchip,rk3399-edp", .data = &rk3399_edp },
{}
};
MODULE_DEVICE_TABLE(of, rockchip_dp_dt_ids);
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
index dedc65b40..ca22e5ee8 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
@@ -964,18 +964,9 @@ static enum drm_mode_status dw_mipi_dsi_mode_valid(
return mode_status;
}
-static struct drm_encoder *dw_mipi_dsi_connector_best_encoder(
- struct drm_connector *connector)
-{
- struct dw_mipi_dsi *dsi = con_to_dsi(connector);
-
- return &dsi->encoder;
-}
-
static struct drm_connector_helper_funcs dw_mipi_dsi_connector_helper_funcs = {
.get_modes = dw_mipi_dsi_connector_get_modes,
.mode_valid = dw_mipi_dsi_mode_valid,
- .best_encoder = dw_mipi_dsi_connector_best_encoder,
};
static enum drm_connector_status
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 801110f65..0665fb915 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -15,7 +15,6 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
-#include <drm/drm_encoder_slave.h>
#include <drm/bridge/dw_hdmi.h>
#include "rockchip_drm_drv.h"
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index f8b4feb60..006260de9 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -579,14 +579,6 @@ inno_hdmi_connector_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static struct drm_encoder *
-inno_hdmi_connector_best_encoder(struct drm_connector *connector)
-{
- struct inno_hdmi *hdmi = to_inno_hdmi(connector);
-
- return &hdmi->encoder;
-}
-
static int
inno_hdmi_probe_single_connector_modes(struct drm_connector *connector,
uint32_t maxX, uint32_t maxY)
@@ -613,7 +605,6 @@ static struct drm_connector_funcs inno_hdmi_connector_funcs = {
static struct drm_connector_helper_funcs inno_hdmi_connector_helper_funcs = {
.get_modes = inno_hdmi_connector_get_modes,
.mode_valid = inno_hdmi_connector_mode_valid,
- .best_encoder = inno_hdmi_connector_best_encoder,
};
static int inno_hdmi_register(struct drm_device *drm, struct inno_hdmi *hdmi)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index a409d1f70..a822d49a2 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -19,11 +19,13 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
#include <linux/module.h>
#include <linux/of_graph.h>
#include <linux/component.h>
+#include <linux/console.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_fb.h"
@@ -37,6 +39,7 @@
#define DRIVER_MINOR 0
static bool is_support_iommu = true;
+static struct drm_driver rockchip_drm_driver;
/*
* Attach a (component) device to the shared drm dma mapping from master drm
@@ -76,7 +79,7 @@ int rockchip_register_crtc_funcs(struct drm_crtc *crtc,
int pipe = drm_crtc_index(crtc);
struct rockchip_drm_private *priv = crtc->dev->dev_private;
- if (pipe > ROCKCHIP_MAX_CRTC)
+ if (pipe >= ROCKCHIP_MAX_CRTC)
return -EINVAL;
priv->crtc_funcs[pipe] = crtc_funcs;
@@ -89,7 +92,7 @@ void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc)
int pipe = drm_crtc_index(crtc);
struct rockchip_drm_private *priv = crtc->dev->dev_private;
- if (pipe > ROCKCHIP_MAX_CRTC)
+ if (pipe >= ROCKCHIP_MAX_CRTC)
return;
priv->crtc_funcs[pipe] = NULL;
@@ -132,20 +135,24 @@ static void rockchip_drm_crtc_disable_vblank(struct drm_device *dev,
priv->crtc_funcs[pipe]->disable_vblank(crtc);
}
-static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags)
+static int rockchip_drm_bind(struct device *dev)
{
+ struct drm_device *drm_dev;
struct rockchip_drm_private *private;
struct dma_iommu_mapping *mapping = NULL;
- struct device *dev = drm_dev->dev;
- struct drm_connector *connector;
int ret;
- private = devm_kzalloc(drm_dev->dev, sizeof(*private), GFP_KERNEL);
- if (!private)
+ drm_dev = drm_dev_alloc(&rockchip_drm_driver, dev);
+ if (!drm_dev)
return -ENOMEM;
- mutex_init(&private->commit.lock);
- INIT_WORK(&private->commit.work, rockchip_drm_atomic_work);
+ dev_set_drvdata(dev, drm_dev);
+
+ private = devm_kzalloc(drm_dev->dev, sizeof(*private), GFP_KERNEL);
+ if (!private) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
drm_dev->dev_private = private;
@@ -186,23 +193,6 @@ static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags)
if (ret)
goto err_detach_device;
- /*
- * All components are now added, we can publish the connector sysfs
- * entries to userspace. This will generate hotplug events and so
- * userspace will expect to be able to access DRM at this point.
- */
- list_for_each_entry(connector, &drm_dev->mode_config.connector_list,
- head) {
- ret = drm_connector_register(connector);
- if (ret) {
- dev_err(drm_dev->dev,
- "[CONNECTOR:%d:%s] drm_connector_register failed: %d\n",
- connector->base.id,
- connector->name, ret);
- goto err_unbind;
- }
- }
-
/* init kms poll for handling hpd */
drm_kms_helper_poll_init(drm_dev);
@@ -222,14 +212,19 @@ static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags)
if (ret)
goto err_vblank_cleanup;
+ ret = drm_dev_register(drm_dev, 0);
+ if (ret)
+ goto err_fbdev_fini;
+
if (is_support_iommu)
arm_iommu_release_mapping(mapping);
return 0;
+err_fbdev_fini:
+ rockchip_drm_fbdev_fini(drm_dev);
err_vblank_cleanup:
drm_vblank_cleanup(drm_dev);
err_kms_helper_poll_fini:
drm_kms_helper_poll_fini(drm_dev);
-err_unbind:
component_unbind_all(dev, drm_dev);
err_detach_device:
if (is_support_iommu)
@@ -240,12 +235,14 @@ err_release_mapping:
err_config_cleanup:
drm_mode_config_cleanup(drm_dev);
drm_dev->dev_private = NULL;
+err_free:
+ drm_dev_unref(drm_dev);
return ret;
}
-static int rockchip_drm_unload(struct drm_device *drm_dev)
+static void rockchip_drm_unbind(struct device *dev)
{
- struct device *dev = drm_dev->dev;
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
rockchip_drm_fbdev_fini(drm_dev);
drm_vblank_cleanup(drm_dev);
@@ -255,32 +252,12 @@ static int rockchip_drm_unload(struct drm_device *drm_dev)
arm_iommu_detach_device(dev);
drm_mode_config_cleanup(drm_dev);
drm_dev->dev_private = NULL;
-
- return 0;
-}
-
-static void rockchip_drm_crtc_cancel_pending_vblank(struct drm_crtc *crtc,
- struct drm_file *file_priv)
-{
- struct rockchip_drm_private *priv = crtc->dev->dev_private;
- int pipe = drm_crtc_index(crtc);
-
- if (pipe < ROCKCHIP_MAX_CRTC &&
- priv->crtc_funcs[pipe] &&
- priv->crtc_funcs[pipe]->cancel_pending_vblank)
- priv->crtc_funcs[pipe]->cancel_pending_vblank(crtc, file_priv);
-}
-
-static void rockchip_drm_preclose(struct drm_device *dev,
- struct drm_file *file_priv)
-{
- struct drm_crtc *crtc;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
- rockchip_drm_crtc_cancel_pending_vblank(crtc, file_priv);
+ drm_dev_unregister(drm_dev);
+ drm_dev_unref(drm_dev);
+ dev_set_drvdata(dev, NULL);
}
-void rockchip_drm_lastclose(struct drm_device *dev)
+static void rockchip_drm_lastclose(struct drm_device *dev)
{
struct rockchip_drm_private *priv = dev->dev_private;
@@ -300,23 +277,15 @@ static const struct file_operations rockchip_drm_driver_fops = {
.release = drm_release,
};
-const struct vm_operations_struct rockchip_drm_vm_ops = {
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
static struct drm_driver rockchip_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM |
DRIVER_PRIME | DRIVER_ATOMIC,
- .load = rockchip_drm_load,
- .unload = rockchip_drm_unload,
- .preclose = rockchip_drm_preclose,
.lastclose = rockchip_drm_lastclose,
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = rockchip_drm_crtc_enable_vblank,
.disable_vblank = rockchip_drm_crtc_disable_vblank,
- .gem_vm_ops = &rockchip_drm_vm_ops,
- .gem_free_object = rockchip_gem_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .gem_free_object_unlocked = rockchip_gem_free_object,
.dumb_create = rockchip_gem_dumb_create,
.dumb_map_offset = rockchip_gem_dumb_map_offset,
.dumb_destroy = drm_gem_dumb_destroy,
@@ -337,25 +306,38 @@ static struct drm_driver rockchip_drm_driver = {
};
#ifdef CONFIG_PM_SLEEP
-static int rockchip_drm_sys_suspend(struct device *dev)
+void rockchip_drm_fb_suspend(struct drm_device *drm)
{
- struct drm_device *drm = dev_get_drvdata(dev);
- struct drm_connector *connector;
+ struct rockchip_drm_private *priv = drm->dev_private;
- if (!drm)
- return 0;
+ console_lock();
+ drm_fb_helper_set_suspend(&priv->fbdev_helper, 1);
+ console_unlock();
+}
- drm_modeset_lock_all(drm);
- list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
- int old_dpms = connector->dpms;
+void rockchip_drm_fb_resume(struct drm_device *drm)
+{
+ struct rockchip_drm_private *priv = drm->dev_private;
- if (connector->funcs->dpms)
- connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF);
+ console_lock();
+ drm_fb_helper_set_suspend(&priv->fbdev_helper, 0);
+ console_unlock();
+}
- /* Set the old mode back to the connector for resume */
- connector->dpms = old_dpms;
+static int rockchip_drm_sys_suspend(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct rockchip_drm_private *priv = drm->dev_private;
+
+ drm_kms_helper_poll_disable(drm);
+ rockchip_drm_fb_suspend(drm);
+
+ priv->state = drm_atomic_helper_suspend(drm);
+ if (IS_ERR(priv->state)) {
+ rockchip_drm_fb_resume(drm);
+ drm_kms_helper_poll_enable(drm);
+ return PTR_ERR(priv->state);
}
- drm_modeset_unlock_all(drm);
return 0;
}
@@ -363,47 +345,11 @@ static int rockchip_drm_sys_suspend(struct device *dev)
static int rockchip_drm_sys_resume(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- struct drm_connector *connector;
- enum drm_connector_status status;
- bool changed = false;
-
- if (!drm)
- return 0;
+ struct rockchip_drm_private *priv = drm->dev_private;
- drm_modeset_lock_all(drm);
- list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
- int desired_mode = connector->dpms;
-
- /*
- * at suspend time, we save dpms to connector->dpms,
- * restore the old_dpms, and at current time, the connector
- * dpms status must be DRM_MODE_DPMS_OFF.
- */
- connector->dpms = DRM_MODE_DPMS_OFF;
-
- /*
- * If the connector has been disconnected during suspend,
- * disconnect it from the encoder and leave it off. We'll notify
- * userspace at the end.
- */
- if (desired_mode == DRM_MODE_DPMS_ON) {
- status = connector->funcs->detect(connector, true);
- if (status == connector_status_disconnected) {
- connector->encoder = NULL;
- connector->status = status;
- changed = true;
- continue;
- }
- }
- if (connector->funcs->dpms)
- connector->funcs->dpms(connector, desired_mode);
- }
- drm_modeset_unlock_all(drm);
-
- drm_helper_resume_force_mode(drm);
-
- if (changed)
- drm_kms_helper_hotplug_event(drm);
+ drm_atomic_helper_resume(drm, priv->state);
+ rockchip_drm_fb_resume(drm);
+ drm_kms_helper_poll_enable(drm);
return 0;
}
@@ -444,37 +390,6 @@ static void rockchip_add_endpoints(struct device *dev,
}
}
-static int rockchip_drm_bind(struct device *dev)
-{
- struct drm_device *drm;
- int ret;
-
- drm = drm_dev_alloc(&rockchip_drm_driver, dev);
- if (!drm)
- return -ENOMEM;
-
- ret = drm_dev_register(drm, 0);
- if (ret)
- goto err_free;
-
- dev_set_drvdata(dev, drm);
-
- return 0;
-
-err_free:
- drm_dev_unref(drm);
- return ret;
-}
-
-static void rockchip_drm_unbind(struct device *dev)
-{
- struct drm_device *drm = dev_get_drvdata(dev);
-
- drm_dev_unregister(drm);
- drm_dev_unref(drm);
- dev_set_drvdata(dev, NULL);
-}
-
static const struct component_master_ops rockchip_drm_ops = {
.bind = rockchip_drm_bind,
.unbind = rockchip_drm_unbind,
@@ -518,6 +433,7 @@ static int rockchip_drm_platform_probe(struct platform_device *pdev)
is_support_iommu = false;
}
+ of_node_put(iommu);
component_match_add(dev, &match, compare_of, port->parent);
of_node_put(port);
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index 56f43a364..ea3932940 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -40,14 +40,6 @@ struct rockchip_crtc_funcs {
int (*enable_vblank)(struct drm_crtc *crtc);
void (*disable_vblank)(struct drm_crtc *crtc);
void (*wait_for_update)(struct drm_crtc *crtc);
- void (*cancel_pending_vblank)(struct drm_crtc *crtc, struct drm_file *file_priv);
-};
-
-struct rockchip_atomic_commit {
- struct work_struct work;
- struct drm_atomic_state *state;
- struct drm_device *dev;
- struct mutex lock;
};
struct rockchip_crtc_state {
@@ -68,11 +60,9 @@ struct rockchip_drm_private {
struct drm_fb_helper fbdev_helper;
struct drm_gem_object *fbdev_bo;
const struct rockchip_crtc_funcs *crtc_funcs[ROCKCHIP_MAX_CRTC];
-
- struct rockchip_atomic_commit commit;
+ struct drm_atomic_state *state;
};
-void rockchip_drm_atomic_work(struct work_struct *work);
int rockchip_register_crtc_funcs(struct drm_crtc *crtc,
const struct rockchip_crtc_funcs *crtc_funcs);
void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index 755cfdba6..55c52734c 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -20,6 +20,7 @@
#include <drm/drm_crtc_helper.h>
#include "rockchip_drm_drv.h"
+#include "rockchip_drm_fb.h"
#include "rockchip_drm_gem.h"
#define to_rockchip_fb(x) container_of(x, struct rockchip_drm_fb, fb)
@@ -43,14 +44,10 @@ struct drm_gem_object *rockchip_fb_get_gem_obj(struct drm_framebuffer *fb,
static void rockchip_drm_fb_destroy(struct drm_framebuffer *fb)
{
struct rockchip_drm_fb *rockchip_fb = to_rockchip_fb(fb);
- struct drm_gem_object *obj;
int i;
- for (i = 0; i < ROCKCHIP_MAX_FB_BUFFER; i++) {
- obj = rockchip_fb->obj[i];
- if (obj)
- drm_gem_object_unreference_unlocked(obj);
- }
+ for (i = 0; i < ROCKCHIP_MAX_FB_BUFFER; i++)
+ drm_gem_object_unreference_unlocked(rockchip_fb->obj[i]);
drm_framebuffer_cleanup(fb);
kfree(rockchip_fb);
@@ -228,87 +225,32 @@ rockchip_atomic_wait_for_complete(struct drm_device *dev, struct drm_atomic_stat
}
static void
-rockchip_atomic_commit_complete(struct rockchip_atomic_commit *commit)
+rockchip_atomic_commit_tail(struct drm_atomic_state *state)
{
- struct drm_atomic_state *state = commit->state;
- struct drm_device *dev = commit->dev;
+ struct drm_device *dev = state->dev;
- /*
- * TODO: do fence wait here.
- */
-
- /*
- * Rockchip crtc support runtime PM, can't update display planes
- * when crtc is disabled.
- *
- * drm_atomic_helper_commit comments detail that:
- * For drivers supporting runtime PM the recommended sequence is
- *
- * drm_atomic_helper_commit_modeset_disables(dev, state);
- *
- * drm_atomic_helper_commit_modeset_enables(dev, state);
- *
- * drm_atomic_helper_commit_planes(dev, state, true);
- *
- * See the kerneldoc entries for these three functions for more details.
- */
drm_atomic_helper_commit_modeset_disables(dev, state);
drm_atomic_helper_commit_modeset_enables(dev, state);
drm_atomic_helper_commit_planes(dev, state, true);
+ drm_atomic_helper_commit_hw_done(state);
+
rockchip_atomic_wait_for_complete(dev, state);
drm_atomic_helper_cleanup_planes(dev, state);
-
- drm_atomic_state_free(state);
-}
-
-void rockchip_drm_atomic_work(struct work_struct *work)
-{
- struct rockchip_atomic_commit *commit = container_of(work,
- struct rockchip_atomic_commit, work);
-
- rockchip_atomic_commit_complete(commit);
}
-int rockchip_drm_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *state,
- bool nonblock)
-{
- struct rockchip_drm_private *private = dev->dev_private;
- struct rockchip_atomic_commit *commit = &private->commit;
- int ret;
-
- ret = drm_atomic_helper_prepare_planes(dev, state);
- if (ret)
- return ret;
-
- /* serialize outstanding nonblocking commits */
- mutex_lock(&commit->lock);
- flush_work(&commit->work);
-
- drm_atomic_helper_swap_state(dev, state);
-
- commit->dev = dev;
- commit->state = state;
-
- if (nonblock)
- schedule_work(&commit->work);
- else
- rockchip_atomic_commit_complete(commit);
-
- mutex_unlock(&commit->lock);
-
- return 0;
-}
+static struct drm_mode_config_helper_funcs rockchip_mode_config_helpers = {
+ .atomic_commit_tail = rockchip_atomic_commit_tail,
+};
static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = {
.fb_create = rockchip_user_fb_create,
.output_poll_changed = rockchip_drm_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
- .atomic_commit = rockchip_drm_atomic_commit,
+ .atomic_commit = drm_atomic_helper_commit,
};
struct drm_framebuffer *
@@ -339,4 +281,5 @@ void rockchip_drm_mode_config_init(struct drm_device *dev)
dev->mode_config.max_height = 4096;
dev->mode_config.funcs = &rockchip_drm_mode_config_funcs;
+ dev->mode_config.helper_private = &rockchip_mode_config_helpers;
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
index f261512bb..207e01de6 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -108,7 +108,7 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
fbi->screen_size = rk_obj->base.size;
fbi->fix.smem_len = rk_obj->base.size;
- DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%p offset=%ld size=%d\n",
+ DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%p offset=%ld size=%zu\n",
fb->width, fb->height, fb->depth, rk_obj->kvaddr,
offset, size);
@@ -156,9 +156,6 @@ int rockchip_drm_fbdev_init(struct drm_device *dev)
goto err_drm_fb_helper_fini;
}
- /* disable all the possible outputs/crtcs before entering KMS mode */
- drm_helper_disable_unused_functions(dev);
-
ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
if (ret < 0) {
dev_err(dev->dev, "Failed to set initial hw config - %d.\n",
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 9c2d8a894..b70f94233 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -17,8 +17,6 @@
#include <drm/drm_gem.h>
#include <drm/drm_vma_manager.h>
-#include <linux/dma-attrs.h>
-
#include "rockchip_drm_drv.h"
#include "rockchip_drm_gem.h"
@@ -28,17 +26,16 @@ static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
struct drm_gem_object *obj = &rk_obj->base;
struct drm_device *drm = obj->dev;
- init_dma_attrs(&rk_obj->dma_attrs);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &rk_obj->dma_attrs);
+ rk_obj->dma_attrs = DMA_ATTR_WRITE_COMBINE;
if (!alloc_kmap)
- dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &rk_obj->dma_attrs);
+ rk_obj->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size,
&rk_obj->dma_addr, GFP_KERNEL,
- &rk_obj->dma_attrs);
+ rk_obj->dma_attrs);
if (!rk_obj->kvaddr) {
- DRM_ERROR("failed to allocate %#x byte dma buffer", obj->size);
+ DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size);
return -ENOMEM;
}
@@ -51,7 +48,7 @@ static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
struct drm_device *drm = obj->dev;
dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_addr,
- &rk_obj->dma_attrs);
+ rk_obj->dma_attrs);
}
static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
@@ -70,7 +67,7 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
vma->vm_pgoff = 0;
ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
- obj->size, &rk_obj->dma_attrs);
+ obj->size, rk_obj->dma_attrs);
if (ret)
drm_gem_vm_close(vma);
@@ -262,7 +259,7 @@ struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr,
rk_obj->dma_addr, obj->size,
- &rk_obj->dma_attrs);
+ rk_obj->dma_attrs);
if (ret) {
DRM_ERROR("failed to allocate sgt, %d\n", ret);
kfree(sgt);
@@ -276,7 +273,7 @@ void *rockchip_gem_prime_vmap(struct drm_gem_object *obj)
{
struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
- if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, &rk_obj->dma_attrs))
+ if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
return NULL;
return rk_obj->kvaddr;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
index ad2261847..18b3488db 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
@@ -23,7 +23,7 @@ struct rockchip_gem_object {
void *kvaddr;
dma_addr_t dma_addr;
- struct dma_attrs dma_attrs;
+ unsigned long dma_attrs;
};
struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index b1673236c..91305eb7d 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -98,7 +98,9 @@ struct vop_win {
const struct vop_win_data *data;
struct vop *vop;
- struct vop_plane_state state;
+ /* protected by dev->event_lock */
+ bool enable;
+ dma_addr_t yrgb_mst;
};
struct vop {
@@ -112,6 +114,8 @@ struct vop {
bool vsync_work_pending;
struct completion dsp_hold_completion;
struct completion wait_update_complete;
+
+ /* protected by dev->event_lock */
struct drm_pending_vblank_event *event;
const struct vop_data *data;
@@ -324,9 +328,9 @@ static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
scl_cal_scale2(src_h, dst_h));
if (is_yuv) {
VOP_SCL_SET(vop, win, scale_cbcr_x,
- scl_cal_scale2(src_w, dst_w));
+ scl_cal_scale2(cbcr_src_w, dst_w));
VOP_SCL_SET(vop, win, scale_cbcr_y,
- scl_cal_scale2(src_h, dst_h));
+ scl_cal_scale2(cbcr_src_h, dst_h));
}
return;
}
@@ -431,9 +435,6 @@ static void vop_enable(struct drm_crtc *crtc)
struct vop *vop = to_vop(crtc);
int ret;
- if (vop->is_enabled)
- return;
-
ret = pm_runtime_get_sync(vop->dev);
if (ret < 0) {
dev_err(vop->dev, "failed to get pm runtime: %d\n", ret);
@@ -501,8 +502,7 @@ static void vop_crtc_disable(struct drm_crtc *crtc)
struct vop *vop = to_vop(crtc);
int i;
- if (!vop->is_enabled)
- return;
+ WARN_ON(vop->event);
/*
* We need to make sure that all windows are disabled before we
@@ -553,6 +553,14 @@ static void vop_crtc_disable(struct drm_crtc *crtc)
clk_disable(vop->aclk);
clk_disable(vop->hclk);
pm_runtime_put(vop->dev);
+
+ if (crtc->state->event && !crtc->state->active) {
+ spin_lock_irq(&crtc->dev->event_lock);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ spin_unlock_irq(&crtc->dev->event_lock);
+
+ crtc->state->event = NULL;
+ }
}
static void vop_plane_destroy(struct drm_plane *plane)
@@ -618,6 +626,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
ret = drm_plane_helper_check_update(plane, crtc, state->fb,
src, dest, &clip,
+ state->rotation,
min_scale,
max_scale,
true, true, &visible);
@@ -658,6 +667,11 @@ static void vop_plane_atomic_disable(struct drm_plane *plane,
if (!old_state->crtc)
return;
+ spin_lock_irq(&plane->dev->event_lock);
+ vop_win->enable = false;
+ vop_win->yrgb_mst = 0;
+ spin_unlock_irq(&plane->dev->event_lock);
+
spin_lock(&vop->reg_lock);
VOP_WIN_SET(vop, win, enable, 0);
@@ -692,7 +706,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
/*
* can't update plane when vop is disabled.
*/
- if (!crtc)
+ if (WARN_ON(!crtc))
return;
if (WARN_ON(!vop->is_enabled))
@@ -721,6 +735,11 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
offset += (src->y1 >> 16) * fb->pitches[0];
vop_plane_state->yrgb_mst = rk_obj->dma_addr + offset + fb->offsets[0];
+ spin_lock_irq(&plane->dev->event_lock);
+ vop_win->enable = true;
+ vop_win->yrgb_mst = vop_plane_state->yrgb_mst;
+ spin_unlock_irq(&plane->dev->event_lock);
+
spin_lock(&vop->reg_lock);
VOP_WIN_SET(vop, win, format, vop_plane_state->format);
@@ -779,7 +798,7 @@ static const struct drm_plane_helper_funcs plane_helper_funcs = {
.atomic_disable = vop_plane_atomic_disable,
};
-void vop_atomic_plane_reset(struct drm_plane *plane)
+static void vop_atomic_plane_reset(struct drm_plane *plane)
{
struct vop_plane_state *vop_plane_state =
to_vop_plane_state(plane->state);
@@ -796,7 +815,7 @@ void vop_atomic_plane_reset(struct drm_plane *plane)
plane->state->plane = plane;
}
-struct drm_plane_state *
+static struct drm_plane_state *
vop_atomic_plane_duplicate_state(struct drm_plane *plane)
{
struct vop_plane_state *old_vop_plane_state;
@@ -876,30 +895,10 @@ static void vop_crtc_wait_for_update(struct drm_crtc *crtc)
WARN_ON(!wait_for_completion_timeout(&vop->wait_update_complete, 100));
}
-static void vop_crtc_cancel_pending_vblank(struct drm_crtc *crtc,
- struct drm_file *file_priv)
-{
- struct drm_device *drm = crtc->dev;
- struct vop *vop = to_vop(crtc);
- struct drm_pending_vblank_event *e;
- unsigned long flags;
-
- spin_lock_irqsave(&drm->event_lock, flags);
- e = vop->event;
- if (e && e->base.file_priv == file_priv) {
- vop->event = NULL;
-
- e->base.destroy(&e->base);
- file_priv->event_space += sizeof(e->event);
- }
- spin_unlock_irqrestore(&drm->event_lock, flags);
-}
-
static const struct rockchip_crtc_funcs private_crtc_funcs = {
.enable_vblank = vop_crtc_enable_vblank,
.disable_vblank = vop_crtc_disable_vblank,
.wait_for_update = vop_crtc_wait_for_update,
- .cancel_pending_vblank = vop_crtc_cancel_pending_vblank,
};
static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -931,6 +930,8 @@ static void vop_crtc_enable(struct drm_crtc *crtc)
u16 vact_end = vact_st + vdisplay;
uint32_t val;
+ WARN_ON(vop->event);
+
vop_enable(crtc);
/*
* If dclk rate is zero, mean that scanout is stop,
@@ -1027,12 +1028,15 @@ static void vop_crtc_atomic_begin(struct drm_crtc *crtc,
{
struct vop *vop = to_vop(crtc);
+ spin_lock_irq(&crtc->dev->event_lock);
if (crtc->state->event) {
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+ WARN_ON(vop->event);
vop->event = crtc->state->event;
crtc->state->event = NULL;
}
+ spin_unlock_irq(&crtc->dev->event_lock);
}
static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = {
@@ -1091,16 +1095,14 @@ static const struct drm_crtc_funcs vop_crtc_funcs = {
static bool vop_win_pending_is_complete(struct vop_win *vop_win)
{
- struct drm_plane *plane = &vop_win->base;
- struct vop_plane_state *state = to_vop_plane_state(plane->state);
dma_addr_t yrgb_mst;
- if (!state->enable)
+ if (!vop_win->enable)
return VOP_WIN_GET(vop_win->vop, vop_win->data, enable) == 0;
yrgb_mst = VOP_WIN_GET_YRGBADDR(vop_win->vop, vop_win->data);
- return yrgb_mst == state->yrgb_mst;
+ return yrgb_mst == vop_win->yrgb_mst;
}
static void vop_handle_vblank(struct vop *vop)
@@ -1115,15 +1117,16 @@ static void vop_handle_vblank(struct vop *vop)
return;
}
+ spin_lock_irqsave(&drm->event_lock, flags);
if (vop->event) {
- spin_lock_irqsave(&drm->event_lock, flags);
drm_crtc_send_vblank_event(crtc, vop->event);
drm_crtc_vblank_put(crtc);
vop->event = NULL;
- spin_unlock_irqrestore(&drm->event_lock, flags);
}
+ spin_unlock_irqrestore(&drm->event_lock, flags);
+
if (!completion_done(&vop->wait_update_complete))
complete(&vop->wait_update_complete);
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index 3166b46a5..919992cdc 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -190,7 +190,7 @@ static const struct vop_data rk3288_vop = {
.win_size = ARRAY_SIZE(rk3288_vop_win_data),
};
-static const struct vop_scl_regs rk3066_win_scl = {
+static const struct vop_scl_regs rk3036_win_scl = {
.scale_yrgb_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
.scale_yrgb_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
.scale_cbcr_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_CBR, 0xffff, 0x0),
@@ -198,7 +198,7 @@ static const struct vop_scl_regs rk3066_win_scl = {
};
static const struct vop_win_phy rk3036_win0_data = {
- .scl = &rk3066_win_scl,
+ .scl = &rk3036_win_scl,
.data_formats = formats_win_full,
.nformats = ARRAY_SIZE(formats_win_full),
.enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 0),
@@ -210,6 +210,7 @@ static const struct vop_win_phy rk3036_win0_data = {
.yrgb_mst = VOP_REG(RK3036_WIN0_YRGB_MST, 0xffffffff, 0),
.uv_mst = VOP_REG(RK3036_WIN0_CBR_MST, 0xffffffff, 0),
.yrgb_vir = VOP_REG(RK3036_WIN0_VIR, 0xffff, 0),
+ .uv_vir = VOP_REG(RK3036_WIN0_VIR, 0x1fff, 16),
};
static const struct vop_win_phy rk3036_win1_data = {
@@ -299,7 +300,7 @@ static int vop_remove(struct platform_device *pdev)
return 0;
}
-struct platform_driver vop_platform_driver = {
+static struct platform_driver vop_platform_driver = {
.probe = vop_probe,
.remove = vop_remove,
.driver = {
diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig
index 8d17d00dd..c987c826d 100644
--- a/drivers/gpu/drm/shmobile/Kconfig
+++ b/drivers/gpu/drm/shmobile/Kconfig
@@ -6,7 +6,6 @@ config DRM_SHMOBILE
select BACKLIGHT_CLASS_DEVICE
select BACKLIGHT_LCD_SUPPORT
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
help
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 1e154fc77..6547b1db4 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -441,7 +441,7 @@ void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc)
scrtc->event = NULL;
if (event) {
drm_crtc_send_vblank_event(&scrtc->crtc, event);
- drm_vblank_put(dev, 0);
+ drm_crtc_vblank_put(&scrtc->crtc);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
}
@@ -467,7 +467,7 @@ static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc,
if (event) {
event->pipe = 0;
- drm_vblank_get(dev, 0);
+ drm_crtc_vblank_get(&scrtc->crtc);
spin_lock_irqsave(&dev->event_lock, flags);
scrtc->event = event;
spin_unlock_irqrestore(&dev->event_lock, flags);
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index 7700ff172..f0492603e 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -259,12 +259,11 @@ static struct drm_driver shmob_drm_driver = {
| DRIVER_PRIME,
.load = shmob_drm_load,
.unload = shmob_drm_unload,
- .set_busid = drm_platform_set_busid,
.irq_handler = shmob_drm_irq,
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = shmob_drm_enable_vblank,
.disable_vblank = shmob_drm_disable_vblank,
- .gem_free_object = drm_gem_cma_free_object,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index 93ad8a570..03defda77 100644
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -316,7 +316,7 @@ void sis_reclaim_buffers_locked(struct drm_device *dev,
struct sis_file_private *file_priv = file->driver_priv;
struct sis_memblock *entry, *next;
- if (!(file->minor->master && file->master->lock.hw_lock))
+ if (!(dev->master && file->master->lock.hw_lock))
return;
drm_legacy_idlelock_take(&file->master->lock);
diff --git a/drivers/gpu/drm/sti/Kconfig b/drivers/gpu/drm/sti/Kconfig
index 5ad43a1bb..494ab257f 100644
--- a/drivers/gpu/drm/sti/Kconfig
+++ b/drivers/gpu/drm/sti/Kconfig
@@ -7,5 +7,6 @@ config DRM_STI
select DRM_KMS_CMA_HELPER
select DRM_PANEL
select FW_LOADER
+ select SND_SOC_HDMI_CODEC if SND_SOC
help
Choose this option to enable DRM on STM stiH41x chipset
diff --git a/drivers/gpu/drm/sti/sti_awg_utils.c b/drivers/gpu/drm/sti/sti_awg_utils.c
index a516eb869..2da7d6866 100644
--- a/drivers/gpu/drm/sti/sti_awg_utils.c
+++ b/drivers/gpu/drm/sti/sti_awg_utils.c
@@ -6,6 +6,8 @@
#include "sti_awg_utils.h"
+#define AWG_DELAY (-5)
+
#define AWG_OPCODE_OFFSET 10
#define AWG_MAX_ARG 0x3ff
@@ -125,7 +127,7 @@ static int awg_generate_line_signal(
val = timing->blanking_level;
ret |= awg_generate_instr(RPLSET, val, 0, 0, fwparams);
- val = timing->trailing_pixels - 1;
+ val = timing->trailing_pixels - 1 + AWG_DELAY;
ret |= awg_generate_instr(SKIP, val, 0, 0, fwparams);
}
diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c
index 3d2fa3ab3..134201ecc 100644
--- a/drivers/gpu/drm/sti/sti_compositor.c
+++ b/drivers/gpu/drm/sti/sti_compositor.c
@@ -55,6 +55,26 @@ struct sti_compositor_data stih416_compositor_data = {
},
};
+int sti_compositor_debufs_init(struct sti_compositor *compo,
+ struct drm_minor *minor)
+{
+ int ret = 0, i;
+
+ for (i = 0; compo->vid[i]; i++) {
+ ret = vid_debugfs_init(compo->vid[i], minor);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; compo->mixer[i]; i++) {
+ ret = sti_mixer_debugfs_init(compo->mixer[i], minor);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int sti_compositor_bind(struct device *dev,
struct device *master,
void *data)
@@ -234,12 +254,12 @@ static int sti_compositor_probe(struct platform_device *pdev)
}
/* Get reset resources */
- compo->rst_main = devm_reset_control_get(dev, "compo-main");
+ compo->rst_main = devm_reset_control_get_shared(dev, "compo-main");
/* Take compo main out of reset */
if (!IS_ERR(compo->rst_main))
reset_control_deassert(compo->rst_main);
- compo->rst_aux = devm_reset_control_get(dev, "compo-aux");
+ compo->rst_aux = devm_reset_control_get_shared(dev, "compo-aux");
/* Take compo aux out of reset */
if (!IS_ERR(compo->rst_aux))
reset_control_deassert(compo->rst_aux);
@@ -247,10 +267,12 @@ static int sti_compositor_probe(struct platform_device *pdev)
vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 0);
if (vtg_np)
compo->vtg_main = of_vtg_find(vtg_np);
+ of_node_put(vtg_np);
vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 1);
if (vtg_np)
compo->vtg_aux = of_vtg_find(vtg_np);
+ of_node_put(vtg_np);
platform_set_drvdata(pdev, compo);
diff --git a/drivers/gpu/drm/sti/sti_compositor.h b/drivers/gpu/drm/sti/sti_compositor.h
index 1a4a73dab..24444ef42 100644
--- a/drivers/gpu/drm/sti/sti_compositor.h
+++ b/drivers/gpu/drm/sti/sti_compositor.h
@@ -81,4 +81,7 @@ struct sti_compositor {
struct notifier_block vtg_vblank_nb;
};
+int sti_compositor_debufs_init(struct sti_compositor *compo,
+ struct drm_minor *minor);
+
#endif
diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index e04deedab..c7d734dc3 100644
--- a/drivers/gpu/drm/sti/sti_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -23,22 +23,11 @@
static void sti_crtc_enable(struct drm_crtc *crtc)
{
struct sti_mixer *mixer = to_sti_mixer(crtc);
- struct device *dev = mixer->dev;
- struct sti_compositor *compo = dev_get_drvdata(dev);
DRM_DEBUG_DRIVER("\n");
mixer->status = STI_MIXER_READY;
- /* Prepare and enable the compo IP clock */
- if (mixer->id == STI_MIXER_MAIN) {
- if (clk_prepare_enable(compo->clk_compo_main))
- DRM_INFO("Failed to prepare/enable compo_main clk\n");
- } else {
- if (clk_prepare_enable(compo->clk_compo_aux))
- DRM_INFO("Failed to prepare/enable compo_aux clk\n");
- }
-
drm_crtc_vblank_on(crtc);
}
@@ -57,9 +46,8 @@ sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
struct sti_mixer *mixer = to_sti_mixer(crtc);
struct device *dev = mixer->dev;
struct sti_compositor *compo = dev_get_drvdata(dev);
- struct clk *clk;
+ struct clk *compo_clk, *pix_clk;
int rate = mode->clock * 1000;
- int res;
DRM_DEBUG_KMS("CRTC:%d (%s) mode:%d (%s)\n",
crtc->base.id, sti_mixer_to_str(mixer),
@@ -74,32 +62,46 @@ sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
mode->vsync_start, mode->vsync_end,
mode->vtotal, mode->type, mode->flags);
- /* Set rate and prepare/enable pixel clock */
- if (mixer->id == STI_MIXER_MAIN)
- clk = compo->clk_pix_main;
- else
- clk = compo->clk_pix_aux;
+ if (mixer->id == STI_MIXER_MAIN) {
+ compo_clk = compo->clk_compo_main;
+ pix_clk = compo->clk_pix_main;
+ } else {
+ compo_clk = compo->clk_compo_aux;
+ pix_clk = compo->clk_pix_aux;
+ }
+
+ /* Prepare and enable the compo IP clock */
+ if (clk_prepare_enable(compo_clk)) {
+ DRM_INFO("Failed to prepare/enable compositor clk\n");
+ goto compo_error;
+ }
- res = clk_set_rate(clk, rate);
- if (res < 0) {
+ /* Set rate and prepare/enable pixel clock */
+ if (clk_set_rate(pix_clk, rate) < 0) {
DRM_ERROR("Cannot set rate (%dHz) for pix clk\n", rate);
- return -EINVAL;
+ goto pix_error;
}
- if (clk_prepare_enable(clk)) {
+ if (clk_prepare_enable(pix_clk)) {
DRM_ERROR("Failed to prepare/enable pix clk\n");
- return -EINVAL;
+ goto pix_error;
}
sti_vtg_set_config(mixer->id == STI_MIXER_MAIN ?
compo->vtg_main : compo->vtg_aux, &crtc->mode);
- res = sti_mixer_active_video_area(mixer, &crtc->mode);
- if (res) {
+ if (sti_mixer_active_video_area(mixer, &crtc->mode)) {
DRM_ERROR("Can't set active video area\n");
- return -EINVAL;
+ goto mixer_error;
}
- return res;
+ return 0;
+
+mixer_error:
+ clk_disable_unprepare(pix_clk);
+pix_error:
+ clk_disable_unprepare(compo_clk);
+compo_error:
+ return -EINVAL;
}
static void sti_crtc_disable(struct drm_crtc *crtc)
@@ -130,7 +132,6 @@ static void sti_crtc_disable(struct drm_crtc *crtc)
static void
sti_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
- sti_crtc_enable(crtc);
sti_crtc_mode_set(crtc, &crtc->state->adjusted_mode);
}
@@ -221,9 +222,7 @@ static void sti_crtc_atomic_flush(struct drm_crtc *crtc,
static const struct drm_crtc_helper_funcs sti_crtc_helper_funcs = {
.enable = sti_crtc_enable,
.disable = sti_crtc_disabling,
- .mode_set = drm_helper_crtc_mode_set,
.mode_set_nofb = sti_crtc_mode_set_nofb,
- .mode_set_base = drm_helper_crtc_mode_set_base,
.atomic_begin = sti_crtc_atomic_begin,
.atomic_flush = sti_crtc_atomic_flush,
};
@@ -331,6 +330,17 @@ void sti_crtc_disable_vblank(struct drm_device *drm_dev, unsigned int pipe)
}
}
+static int sti_crtc_late_register(struct drm_crtc *crtc)
+{
+ struct sti_mixer *mixer = to_sti_mixer(crtc);
+ struct sti_compositor *compo = dev_get_drvdata(mixer->dev);
+
+ if (drm_crtc_index(crtc) == 0)
+ return sti_compositor_debufs_init(compo, crtc->dev->primary);
+
+ return 0;
+}
+
static const struct drm_crtc_funcs sti_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
@@ -339,6 +349,7 @@ static const struct drm_crtc_funcs sti_crtc_funcs = {
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .late_register = sti_crtc_late_register,
};
bool sti_crtc_is_main(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index 4e9902997..3b53f7f2e 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -105,12 +105,6 @@ static int cursor_dbg_show(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct sti_cursor *cursor = (struct sti_cursor *)node->info_ent->data;
- struct drm_device *dev = node->minor->dev;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
seq_printf(s, "%s: (vaddr = 0x%p)",
sti_plane_to_str(&cursor->plane), cursor->regs);
@@ -129,7 +123,6 @@ static int cursor_dbg_show(struct seq_file *s, void *data)
DBGFS_DUMP(CUR_AWE);
seq_puts(s, "\n");
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -336,6 +329,33 @@ static const struct drm_plane_helper_funcs sti_cursor_helpers_funcs = {
.atomic_disable = sti_cursor_atomic_disable,
};
+static void sti_cursor_destroy(struct drm_plane *drm_plane)
+{
+ DRM_DEBUG_DRIVER("\n");
+
+ drm_plane_helper_disable(drm_plane);
+ drm_plane_cleanup(drm_plane);
+}
+
+static int sti_cursor_late_register(struct drm_plane *drm_plane)
+{
+ struct sti_plane *plane = to_sti_plane(drm_plane);
+ struct sti_cursor *cursor = to_sti_cursor(plane);
+
+ return cursor_debugfs_init(cursor, drm_plane->dev->primary);
+}
+
+struct drm_plane_funcs sti_cursor_plane_helpers_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = sti_cursor_destroy,
+ .set_property = drm_atomic_helper_plane_set_property,
+ .reset = sti_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+ .late_register = sti_cursor_late_register,
+};
+
struct drm_plane *sti_cursor_create(struct drm_device *drm_dev,
struct device *dev, int desc,
void __iomem *baseaddr,
@@ -370,7 +390,7 @@ struct drm_plane *sti_cursor_create(struct drm_device *drm_dev,
res = drm_universal_plane_init(drm_dev, &cursor->plane.drm_plane,
possible_crtcs,
- &sti_plane_helpers_funcs,
+ &sti_cursor_plane_helpers_funcs,
cursor_supported_formats,
ARRAY_SIZE(cursor_supported_formats),
DRM_PLANE_TYPE_CURSOR, NULL);
@@ -384,9 +404,6 @@ struct drm_plane *sti_cursor_create(struct drm_device *drm_dev,
sti_plane_init_property(&cursor->plane, DRM_PLANE_TYPE_CURSOR);
- if (cursor_debugfs_init(cursor, drm_dev->primary))
- DRM_ERROR("CURSOR debugfs setup failed\n");
-
return &cursor->plane.drm_plane;
err_plane:
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 872495e72..96bd3d08b 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -72,11 +72,6 @@ static int sti_drm_fps_dbg_show(struct seq_file *s, void *data)
struct drm_info_node *node = s->private;
struct drm_device *dev = node->minor->dev;
struct drm_plane *p;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
list_for_each_entry(p, &dev->mode_config.plane_list, head) {
struct sti_plane *plane = to_sti_plane(p);
@@ -86,7 +81,6 @@ static int sti_drm_fps_dbg_show(struct seq_file *s, void *data)
plane->fps_info.fips_str);
}
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -221,7 +215,7 @@ static int sti_atomic_commit(struct drm_device *drm,
* the software side now.
*/
- drm_atomic_helper_swap_state(drm, state);
+ drm_atomic_helper_swap_state(state, true);
if (nonblock)
sti_atomic_schedule(private, state);
@@ -232,8 +226,28 @@ static int sti_atomic_commit(struct drm_device *drm,
return 0;
}
+static void sti_output_poll_changed(struct drm_device *ddev)
+{
+ struct sti_private *private = ddev->dev_private;
+
+ if (!ddev->mode_config.num_connector)
+ return;
+
+ if (private->fbdev) {
+ drm_fbdev_cma_hotplug_event(private->fbdev);
+ return;
+ }
+
+ private->fbdev = drm_fbdev_cma_init(ddev, 32,
+ ddev->mode_config.num_crtc,
+ ddev->mode_config.num_connector);
+ if (IS_ERR(private->fbdev))
+ private->fbdev = NULL;
+}
+
static const struct drm_mode_config_funcs sti_mode_config_funcs = {
.fb_create = drm_fb_cma_create,
+ .output_poll_changed = sti_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = sti_atomic_commit,
};
@@ -254,45 +268,6 @@ static void sti_mode_config_init(struct drm_device *dev)
dev->mode_config.funcs = &sti_mode_config_funcs;
}
-static int sti_load(struct drm_device *dev, unsigned long flags)
-{
- struct sti_private *private;
- int ret;
-
- private = kzalloc(sizeof(*private), GFP_KERNEL);
- if (!private) {
- DRM_ERROR("Failed to allocate private\n");
- return -ENOMEM;
- }
- dev->dev_private = (void *)private;
- private->drm_dev = dev;
-
- mutex_init(&private->commit.lock);
- INIT_WORK(&private->commit.work, sti_atomic_work);
-
- drm_mode_config_init(dev);
- drm_kms_helper_poll_init(dev);
-
- sti_mode_config_init(dev);
-
- ret = component_bind_all(dev->dev, dev);
- if (ret) {
- drm_kms_helper_poll_fini(dev);
- drm_mode_config_cleanup(dev);
- kfree(private);
- return ret;
- }
-
- drm_mode_config_reset(dev);
-
- drm_helper_disable_unused_functions(dev);
- drm_fbdev_cma_init(dev, 32,
- dev->mode_config.num_crtc,
- dev->mode_config.num_connector);
-
- return 0;
-}
-
static const struct file_operations sti_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
@@ -309,8 +284,7 @@ static const struct file_operations sti_driver_fops = {
static struct drm_driver sti_driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET |
DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC,
- .load = sti_load,
- .gem_free_object = drm_gem_cma_free_object,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
@@ -346,14 +320,88 @@ static int compare_of(struct device *dev, void *data)
return dev->of_node == data;
}
+static int sti_init(struct drm_device *ddev)
+{
+ struct sti_private *private;
+
+ private = kzalloc(sizeof(*private), GFP_KERNEL);
+ if (!private)
+ return -ENOMEM;
+
+ ddev->dev_private = (void *)private;
+ dev_set_drvdata(ddev->dev, ddev);
+ private->drm_dev = ddev;
+
+ mutex_init(&private->commit.lock);
+ INIT_WORK(&private->commit.work, sti_atomic_work);
+
+ drm_mode_config_init(ddev);
+
+ sti_mode_config_init(ddev);
+
+ drm_kms_helper_poll_init(ddev);
+
+ return 0;
+}
+
+static void sti_cleanup(struct drm_device *ddev)
+{
+ struct sti_private *private = ddev->dev_private;
+
+ if (private->fbdev) {
+ drm_fbdev_cma_fini(private->fbdev);
+ private->fbdev = NULL;
+ }
+
+ drm_kms_helper_poll_fini(ddev);
+ drm_vblank_cleanup(ddev);
+ kfree(private);
+ ddev->dev_private = NULL;
+}
+
static int sti_bind(struct device *dev)
{
- return drm_platform_init(&sti_driver, to_platform_device(dev));
+ struct drm_device *ddev;
+ int ret;
+
+ ddev = drm_dev_alloc(&sti_driver, dev);
+ if (!ddev)
+ return -ENOMEM;
+
+ ddev->platformdev = to_platform_device(dev);
+
+ ret = sti_init(ddev);
+ if (ret)
+ goto err_drm_dev_unref;
+
+ ret = component_bind_all(ddev->dev, ddev);
+ if (ret)
+ goto err_cleanup;
+
+ ret = drm_dev_register(ddev, 0);
+ if (ret)
+ goto err_register;
+
+ drm_mode_config_reset(ddev);
+
+ return 0;
+
+err_register:
+ drm_mode_config_cleanup(ddev);
+err_cleanup:
+ sti_cleanup(ddev);
+err_drm_dev_unref:
+ drm_dev_unref(ddev);
+ return ret;
}
static void sti_unbind(struct device *dev)
{
- drm_put_dev(dev_get_drvdata(dev));
+ struct drm_device *ddev = dev_get_drvdata(dev);
+
+ drm_dev_unregister(ddev);
+ sti_cleanup(ddev);
+ drm_dev_unref(ddev);
}
static const struct component_master_ops sti_ops = {
diff --git a/drivers/gpu/drm/sti/sti_drv.h b/drivers/gpu/drm/sti/sti_drv.h
index 30ddc2084..78ebe5e30 100644
--- a/drivers/gpu/drm/sti/sti_drv.h
+++ b/drivers/gpu/drm/sti/sti_drv.h
@@ -24,6 +24,7 @@ struct sti_private {
struct sti_compositor *compo;
struct drm_property *plane_zorder_property;
struct drm_device *drm_dev;
+ struct drm_fbdev_cma *fbdev;
struct {
struct drm_atomic_state *state;
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index 25f766320..00881eb45 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -177,12 +177,6 @@ static int dvo_dbg_show(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct sti_dvo *dvo = (struct sti_dvo *)node->info_ent->data;
- struct drm_device *dev = node->minor->dev;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
seq_printf(s, "DVO: (vaddr = 0x%p)", dvo->regs);
DBGFS_DUMP(DVO_AWG_DIGSYNC_CTRL);
@@ -193,7 +187,6 @@ static int dvo_dbg_show(struct seq_file *s, void *data)
dvo_dbg_awg_microcode(s, dvo->regs + DVO_DIGSYNC_INSTR_I);
seq_puts(s, "\n");
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -384,20 +377,10 @@ static int sti_dvo_connector_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-struct drm_encoder *sti_dvo_best_encoder(struct drm_connector *connector)
-{
- struct sti_dvo_connector *dvo_connector
- = to_sti_dvo_connector(connector);
-
- /* Best encoder is the one associated during connector creation */
- return dvo_connector->encoder;
-}
-
static const
struct drm_connector_helper_funcs sti_dvo_connector_helper_funcs = {
.get_modes = sti_dvo_connector_get_modes,
.mode_valid = sti_dvo_connector_mode_valid,
- .best_encoder = sti_dvo_best_encoder,
};
static enum drm_connector_status
@@ -421,24 +404,29 @@ sti_dvo_connector_detect(struct drm_connector *connector, bool force)
return connector_status_disconnected;
}
-static void sti_dvo_connector_destroy(struct drm_connector *connector)
+static int sti_dvo_late_register(struct drm_connector *connector)
{
struct sti_dvo_connector *dvo_connector
= to_sti_dvo_connector(connector);
+ struct sti_dvo *dvo = dvo_connector->dvo;
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
- kfree(dvo_connector);
+ if (dvo_debugfs_init(dvo, dvo->drm_dev->primary)) {
+ DRM_ERROR("DVO debugfs setup failed\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
static const struct drm_connector_funcs sti_dvo_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = sti_dvo_connector_detect,
- .destroy = sti_dvo_connector_destroy,
+ .destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .late_register = sti_dvo_late_register,
};
static struct drm_encoder *sti_dvo_find_encoder(struct drm_device *dev)
@@ -509,26 +497,16 @@ static int sti_dvo_bind(struct device *dev, struct device *master, void *data)
drm_connector_helper_add(drm_connector,
&sti_dvo_connector_helper_funcs);
- err = drm_connector_register(drm_connector);
- if (err)
- goto err_connector;
-
err = drm_mode_connector_attach_encoder(drm_connector, encoder);
if (err) {
DRM_ERROR("Failed to attach a connector to a encoder\n");
goto err_sysfs;
}
- if (dvo_debugfs_init(dvo, drm_dev->primary))
- DRM_ERROR("DVO debugfs setup failed\n");
-
return 0;
err_sysfs:
- drm_connector_unregister(drm_connector);
-err_connector:
drm_bridge_remove(bridge);
- drm_connector_cleanup(drm_connector);
return -EINVAL;
}
@@ -602,6 +580,7 @@ static int sti_dvo_probe(struct platform_device *pdev)
dvo->panel_node = of_parse_phandle(np, "sti,panel", 0);
if (!dvo->panel_node)
DRM_ERROR("No panel associated to the dvo output\n");
+ of_node_put(dvo->panel_node);
platform_set_drvdata(pdev, dvo);
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index ff33c38da..b8d942ca4 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -208,14 +208,8 @@ static int gdp_dbg_show(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data;
- struct drm_device *dev = node->minor->dev;
struct drm_plane *drm_plane = &gdp->plane.drm_plane;
struct drm_crtc *crtc = drm_plane->crtc;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
seq_printf(s, "%s: (vaddr = 0x%p)",
sti_plane_to_str(&gdp->plane), gdp->regs);
@@ -248,7 +242,6 @@ static int gdp_dbg_show(struct seq_file *s, void *data)
seq_printf(s, " Connected to DRM CRTC #%d (%s)\n",
crtc->base.id, sti_mixer_to_str(to_sti_mixer(crtc)));
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -279,13 +272,7 @@ static int gdp_node_dbg_show(struct seq_file *s, void *arg)
{
struct drm_info_node *node = s->private;
struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data;
- struct drm_device *dev = node->minor->dev;
unsigned int b;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
for (b = 0; b < GDP_NODE_NB_BANK; b++) {
seq_printf(s, "\n%s[%d].top", sti_plane_to_str(&gdp->plane), b);
@@ -294,7 +281,6 @@ static int gdp_node_dbg_show(struct seq_file *s, void *arg)
gdp_node_dump_node(s, gdp->node_list[b].btm_field);
}
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -880,6 +866,33 @@ static const struct drm_plane_helper_funcs sti_gdp_helpers_funcs = {
.atomic_disable = sti_gdp_atomic_disable,
};
+static void sti_gdp_destroy(struct drm_plane *drm_plane)
+{
+ DRM_DEBUG_DRIVER("\n");
+
+ drm_plane_helper_disable(drm_plane);
+ drm_plane_cleanup(drm_plane);
+}
+
+static int sti_gdp_late_register(struct drm_plane *drm_plane)
+{
+ struct sti_plane *plane = to_sti_plane(drm_plane);
+ struct sti_gdp *gdp = to_sti_gdp(plane);
+
+ return gdp_debugfs_init(gdp, drm_plane->dev->primary);
+}
+
+struct drm_plane_funcs sti_gdp_plane_helpers_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = sti_gdp_destroy,
+ .set_property = drm_atomic_helper_plane_set_property,
+ .reset = sti_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+ .late_register = sti_gdp_late_register,
+};
+
struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
struct device *dev, int desc,
void __iomem *baseaddr,
@@ -906,7 +919,7 @@ struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
res = drm_universal_plane_init(drm_dev, &gdp->plane.drm_plane,
possible_crtcs,
- &sti_plane_helpers_funcs,
+ &sti_gdp_plane_helpers_funcs,
gdp_supported_formats,
ARRAY_SIZE(gdp_supported_formats),
type, NULL);
@@ -919,9 +932,6 @@ struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
sti_plane_init_property(&gdp->plane, type);
- if (gdp_debugfs_init(gdp, drm_dev->primary))
- DRM_ERROR("GDP debugfs setup failed\n");
-
return &gdp->plane.drm_plane;
err:
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index f7d3464cd..8505569f7 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -376,12 +376,6 @@ static int hda_dbg_show(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct sti_hda *hda = (struct sti_hda *)node->info_ent->data;
- struct drm_device *dev = node->minor->dev;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
seq_printf(s, "HD Analog: (vaddr = 0x%p)", hda->regs);
DBGFS_DUMP(HDA_ANA_CFG);
@@ -397,7 +391,6 @@ static int hda_dbg_show(struct seq_file *s, void *data)
hda_dbg_video_dacs_ctrl(s, hda->video_dacs_ctrl);
seq_puts(s, "\n");
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -676,20 +669,10 @@ static int sti_hda_connector_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-struct drm_encoder *sti_hda_best_encoder(struct drm_connector *connector)
-{
- struct sti_hda_connector *hda_connector
- = to_sti_hda_connector(connector);
-
- /* Best encoder is the one associated during connector creation */
- return hda_connector->encoder;
-}
-
static const
struct drm_connector_helper_funcs sti_hda_connector_helper_funcs = {
.get_modes = sti_hda_connector_get_modes,
.mode_valid = sti_hda_connector_mode_valid,
- .best_encoder = sti_hda_best_encoder,
};
static enum drm_connector_status
@@ -698,24 +681,29 @@ sti_hda_connector_detect(struct drm_connector *connector, bool force)
return connector_status_connected;
}
-static void sti_hda_connector_destroy(struct drm_connector *connector)
+static int sti_hda_late_register(struct drm_connector *connector)
{
struct sti_hda_connector *hda_connector
= to_sti_hda_connector(connector);
+ struct sti_hda *hda = hda_connector->hda;
+
+ if (hda_debugfs_init(hda, hda->drm_dev->primary)) {
+ DRM_ERROR("HDA debugfs setup failed\n");
+ return -EINVAL;
+ }
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
- kfree(hda_connector);
+ return 0;
}
static const struct drm_connector_funcs sti_hda_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = sti_hda_connector_detect,
- .destroy = sti_hda_connector_destroy,
+ .destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .late_register = sti_hda_late_register,
};
static struct drm_encoder *sti_hda_find_encoder(struct drm_device *dev)
@@ -773,10 +761,6 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data)
drm_connector_helper_add(drm_connector,
&sti_hda_connector_helper_funcs);
- err = drm_connector_register(drm_connector);
- if (err)
- goto err_connector;
-
err = drm_mode_connector_attach_encoder(drm_connector, encoder);
if (err) {
DRM_ERROR("Failed to attach a connector to a encoder\n");
@@ -786,15 +770,10 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data)
/* force to disable hd dacs at startup */
hda_enable_hd_dacs(hda, false);
- if (hda_debugfs_init(hda, drm_dev->primary))
- DRM_ERROR("HDA debugfs setup failed\n");
-
return 0;
err_sysfs:
- drm_connector_unregister(drm_connector);
-err_connector:
- drm_connector_cleanup(drm_connector);
+ drm_bridge_remove(bridge);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 6ef0715bd..fedc17f98 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -18,6 +18,8 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
+#include <sound/hdmi-codec.h>
+
#include "sti_hdmi.h"
#include "sti_hdmi_tx3g4c28phy.h"
#include "sti_hdmi_tx3g0c55phy.h"
@@ -35,6 +37,8 @@
#define HDMI_DFLT_CHL0_DAT 0x0110
#define HDMI_DFLT_CHL1_DAT 0x0114
#define HDMI_DFLT_CHL2_DAT 0x0118
+#define HDMI_AUDIO_CFG 0x0200
+#define HDMI_SPDIF_FIFO_STATUS 0x0204
#define HDMI_SW_DI_1_HEAD_WORD 0x0210
#define HDMI_SW_DI_1_PKT_WORD0 0x0214
#define HDMI_SW_DI_1_PKT_WORD1 0x0218
@@ -44,6 +48,9 @@
#define HDMI_SW_DI_1_PKT_WORD5 0x0228
#define HDMI_SW_DI_1_PKT_WORD6 0x022C
#define HDMI_SW_DI_CFG 0x0230
+#define HDMI_SAMPLE_FLAT_MASK 0x0244
+#define HDMI_AUDN 0x0400
+#define HDMI_AUD_CTS 0x0404
#define HDMI_SW_DI_2_HEAD_WORD 0x0600
#define HDMI_SW_DI_2_PKT_WORD0 0x0604
#define HDMI_SW_DI_2_PKT_WORD1 0x0608
@@ -103,6 +110,7 @@
#define HDMI_INT_DLL_LCK BIT(5)
#define HDMI_INT_NEW_FRAME BIT(6)
#define HDMI_INT_GENCTRL_PKT BIT(7)
+#define HDMI_INT_AUDIO_FIFO_XRUN BIT(8)
#define HDMI_INT_SINK_TERM_PRESENT BIT(11)
#define HDMI_DEFAULT_INT (HDMI_INT_SINK_TERM_PRESENT \
@@ -111,6 +119,7 @@
| HDMI_INT_GLOBAL)
#define HDMI_WORKING_INT (HDMI_INT_SINK_TERM_PRESENT \
+ | HDMI_INT_AUDIO_FIFO_XRUN \
| HDMI_INT_GENCTRL_PKT \
| HDMI_INT_NEW_FRAME \
| HDMI_INT_DLL_LCK \
@@ -121,6 +130,27 @@
#define HDMI_STA_SW_RST BIT(1)
+#define HDMI_AUD_CFG_8CH BIT(0)
+#define HDMI_AUD_CFG_SPDIF_DIV_2 BIT(1)
+#define HDMI_AUD_CFG_SPDIF_DIV_3 BIT(2)
+#define HDMI_AUD_CFG_SPDIF_CLK_DIV_4 (BIT(1) | BIT(2))
+#define HDMI_AUD_CFG_CTS_CLK_256FS BIT(12)
+#define HDMI_AUD_CFG_DTS_INVALID BIT(16)
+#define HDMI_AUD_CFG_ONE_BIT_INVALID (BIT(18) | BIT(19) | BIT(20) | BIT(21))
+#define HDMI_AUD_CFG_CH12_VALID BIT(28)
+#define HDMI_AUD_CFG_CH34_VALID BIT(29)
+#define HDMI_AUD_CFG_CH56_VALID BIT(30)
+#define HDMI_AUD_CFG_CH78_VALID BIT(31)
+
+/* sample flat mask */
+#define HDMI_SAMPLE_FLAT_NO 0
+#define HDMI_SAMPLE_FLAT_SP0 BIT(0)
+#define HDMI_SAMPLE_FLAT_SP1 BIT(1)
+#define HDMI_SAMPLE_FLAT_SP2 BIT(2)
+#define HDMI_SAMPLE_FLAT_SP3 BIT(3)
+#define HDMI_SAMPLE_FLAT_ALL (HDMI_SAMPLE_FLAT_SP0 | HDMI_SAMPLE_FLAT_SP1 |\
+ HDMI_SAMPLE_FLAT_SP2 | HDMI_SAMPLE_FLAT_SP3)
+
#define HDMI_INFOFRAME_HEADER_TYPE(x) (((x) & 0xff) << 0)
#define HDMI_INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) << 8)
#define HDMI_INFOFRAME_HEADER_LEN(x) (((x) & 0x0f) << 16)
@@ -171,6 +201,10 @@ static irqreturn_t hdmi_irq_thread(int irq, void *arg)
wake_up_interruptible(&hdmi->wait_event);
}
+ /* Audio FIFO underrun IRQ */
+ if (hdmi->irq_status & HDMI_INT_AUDIO_FIFO_XRUN)
+ DRM_INFO("Warning: audio FIFO underrun occurs!");
+
return IRQ_HANDLED;
}
@@ -441,26 +475,29 @@ static int hdmi_avi_infoframe_config(struct sti_hdmi *hdmi)
*/
static int hdmi_audio_infoframe_config(struct sti_hdmi *hdmi)
{
- struct hdmi_audio_infoframe infofame;
+ struct hdmi_audio_params *audio = &hdmi->audio;
u8 buffer[HDMI_INFOFRAME_SIZE(AUDIO)];
- int ret;
-
- ret = hdmi_audio_infoframe_init(&infofame);
- if (ret < 0) {
- DRM_ERROR("failed to setup audio infoframe: %d\n", ret);
- return ret;
- }
-
- infofame.channels = 2;
-
- ret = hdmi_audio_infoframe_pack(&infofame, buffer, sizeof(buffer));
- if (ret < 0) {
- DRM_ERROR("failed to pack audio infoframe: %d\n", ret);
- return ret;
+ int ret, val;
+
+ DRM_DEBUG_DRIVER("enter %s, AIF %s\n", __func__,
+ audio->enabled ? "enable" : "disable");
+ if (audio->enabled) {
+ /* set audio parameters stored*/
+ ret = hdmi_audio_infoframe_pack(&audio->cea, buffer,
+ sizeof(buffer));
+ if (ret < 0) {
+ DRM_ERROR("failed to pack audio infoframe: %d\n", ret);
+ return ret;
+ }
+ hdmi_infoframe_write_infopack(hdmi, buffer, ret);
+ } else {
+ /*disable audio info frame transmission */
+ val = hdmi_read(hdmi, HDMI_SW_DI_CFG);
+ val &= ~HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK,
+ HDMI_IFRAME_SLOT_AUDIO);
+ hdmi_write(hdmi, val, HDMI_SW_DI_CFG);
}
- hdmi_infoframe_write_infopack(hdmi, buffer, ret);
-
return 0;
}
@@ -628,12 +665,6 @@ static int hdmi_dbg_show(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct sti_hdmi *hdmi = (struct sti_hdmi *)node->info_ent->data;
- struct drm_device *dev = node->minor->dev;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
seq_printf(s, "HDMI: (vaddr = 0x%p)", hdmi->regs);
DBGFS_DUMP("\n", HDMI_CFG);
@@ -656,6 +687,10 @@ static int hdmi_dbg_show(struct seq_file *s, void *data)
DBGFS_DUMP("", HDMI_SW_DI_CFG);
hdmi_dbg_sw_di_cfg(s, hdmi_read(hdmi, HDMI_SW_DI_CFG));
+ DBGFS_DUMP("\n", HDMI_AUDIO_CFG);
+ DBGFS_DUMP("\n", HDMI_SPDIF_FIFO_STATUS);
+ DBGFS_DUMP("\n", HDMI_AUDN);
+
seq_printf(s, "\n AVI Infoframe (Data Island slot N=%d):",
HDMI_IFRAME_SLOT_AVI);
DBGFS_DUMP_DI(HDMI_SW_DI_N_HEAD_WORD, HDMI_IFRAME_SLOT_AVI);
@@ -690,7 +725,6 @@ static int hdmi_dbg_show(struct seq_file *s, void *data)
DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD6, HDMI_IFRAME_SLOT_VENDOR);
seq_puts(s, "\n");
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -861,6 +895,7 @@ static int sti_hdmi_connector_get_modes(struct drm_connector *connector)
count = drm_add_edid_modes(connector, edid);
drm_mode_connector_update_edid_property(connector, edid);
+ drm_edid_to_eld(connector, edid);
kfree(edid);
return count;
@@ -897,20 +932,10 @@ static int sti_hdmi_connector_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-struct drm_encoder *sti_hdmi_best_encoder(struct drm_connector *connector)
-{
- struct sti_hdmi_connector *hdmi_connector
- = to_sti_hdmi_connector(connector);
-
- /* Best encoder is the one associated during connector creation */
- return hdmi_connector->encoder;
-}
-
static const
struct drm_connector_helper_funcs sti_hdmi_connector_helper_funcs = {
.get_modes = sti_hdmi_connector_get_modes,
.mode_valid = sti_hdmi_connector_mode_valid,
- .best_encoder = sti_hdmi_best_encoder,
};
/* get detection status of display device */
@@ -932,16 +957,6 @@ sti_hdmi_connector_detect(struct drm_connector *connector, bool force)
return connector_status_disconnected;
}
-static void sti_hdmi_connector_destroy(struct drm_connector *connector)
-{
- struct sti_hdmi_connector *hdmi_connector
- = to_sti_hdmi_connector(connector);
-
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
- kfree(hdmi_connector);
-}
-
static void sti_hdmi_connector_init_property(struct drm_device *drm_dev,
struct drm_connector *connector)
{
@@ -1024,17 +1039,31 @@ sti_hdmi_connector_get_property(struct drm_connector *connector,
return -EINVAL;
}
+static int sti_hdmi_late_register(struct drm_connector *connector)
+{
+ struct sti_hdmi_connector *hdmi_connector
+ = to_sti_hdmi_connector(connector);
+ struct sti_hdmi *hdmi = hdmi_connector->hdmi;
+
+ if (hdmi_debugfs_init(hdmi, hdmi->drm_dev->primary)) {
+ DRM_ERROR("HDMI debugfs setup failed\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct drm_connector_funcs sti_hdmi_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = sti_hdmi_connector_detect,
- .destroy = sti_hdmi_connector_destroy,
+ .destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.set_property = drm_atomic_helper_connector_set_property,
.atomic_set_property = sti_hdmi_connector_set_property,
.atomic_get_property = sti_hdmi_connector_get_property,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .late_register = sti_hdmi_late_register,
};
static struct drm_encoder *sti_hdmi_find_encoder(struct drm_device *dev)
@@ -1049,6 +1078,207 @@ static struct drm_encoder *sti_hdmi_find_encoder(struct drm_device *dev)
return NULL;
}
+/**
+ * sti_hdmi_audio_get_non_coherent_n() - get N parameter for non-coherent
+ * clocks. None-coherent clocks means that audio and TMDS clocks have not the
+ * same source (drifts between clocks). In this case assumption is that CTS is
+ * automatically calculated by hardware.
+ *
+ * @audio_fs: audio frame clock frequency in Hz
+ *
+ * Values computed are based on table described in HDMI specification 1.4b
+ *
+ * Returns n value.
+ */
+static int sti_hdmi_audio_get_non_coherent_n(unsigned int audio_fs)
+{
+ unsigned int n;
+
+ switch (audio_fs) {
+ case 32000:
+ n = 4096;
+ break;
+ case 44100:
+ n = 6272;
+ break;
+ case 48000:
+ n = 6144;
+ break;
+ case 88200:
+ n = 6272 * 2;
+ break;
+ case 96000:
+ n = 6144 * 2;
+ break;
+ case 176400:
+ n = 6272 * 4;
+ break;
+ case 192000:
+ n = 6144 * 4;
+ break;
+ default:
+ /* Not pre-defined, recommended value: 128 * fs / 1000 */
+ n = (audio_fs * 128) / 1000;
+ }
+
+ return n;
+}
+
+static int hdmi_audio_configure(struct sti_hdmi *hdmi,
+ struct hdmi_audio_params *params)
+{
+ int audio_cfg, n;
+ struct hdmi_audio_infoframe *info = &params->cea;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (!hdmi->enabled)
+ return 0;
+
+ /* update N parameter */
+ n = sti_hdmi_audio_get_non_coherent_n(params->sample_rate);
+
+ DRM_DEBUG_DRIVER("Audio rate = %d Hz, TMDS clock = %d Hz, n = %d\n",
+ params->sample_rate, hdmi->mode.clock * 1000, n);
+ hdmi_write(hdmi, n, HDMI_AUDN);
+
+ /* update HDMI registers according to configuration */
+ audio_cfg = HDMI_AUD_CFG_SPDIF_DIV_2 | HDMI_AUD_CFG_DTS_INVALID |
+ HDMI_AUD_CFG_ONE_BIT_INVALID;
+
+ switch (info->channels) {
+ case 8:
+ audio_cfg |= HDMI_AUD_CFG_CH78_VALID;
+ case 6:
+ audio_cfg |= HDMI_AUD_CFG_CH56_VALID;
+ case 4:
+ audio_cfg |= HDMI_AUD_CFG_CH34_VALID | HDMI_AUD_CFG_8CH;
+ case 2:
+ audio_cfg |= HDMI_AUD_CFG_CH12_VALID;
+ break;
+ default:
+ DRM_ERROR("ERROR: Unsupported number of channels (%d)!\n",
+ info->channels);
+ return -EINVAL;
+ }
+
+ hdmi_write(hdmi, audio_cfg, HDMI_AUDIO_CFG);
+
+ hdmi->audio = *params;
+
+ return hdmi_audio_infoframe_config(hdmi);
+}
+
+static void hdmi_audio_shutdown(struct device *dev, void *data)
+{
+ struct sti_hdmi *hdmi = dev_get_drvdata(dev);
+ int audio_cfg;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ /* disable audio */
+ audio_cfg = HDMI_AUD_CFG_SPDIF_DIV_2 | HDMI_AUD_CFG_DTS_INVALID |
+ HDMI_AUD_CFG_ONE_BIT_INVALID;
+ hdmi_write(hdmi, audio_cfg, HDMI_AUDIO_CFG);
+
+ hdmi->audio.enabled = 0;
+ hdmi_audio_infoframe_config(hdmi);
+}
+
+static int hdmi_audio_hw_params(struct device *dev,
+ void *data,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
+{
+ struct sti_hdmi *hdmi = dev_get_drvdata(dev);
+ int ret;
+ struct hdmi_audio_params audio = {
+ .sample_width = params->sample_width,
+ .sample_rate = params->sample_rate,
+ .cea = params->cea,
+ };
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (!hdmi->enabled)
+ return 0;
+
+ if ((daifmt->fmt != HDMI_I2S) || daifmt->bit_clk_inv ||
+ daifmt->frame_clk_inv || daifmt->bit_clk_master ||
+ daifmt->frame_clk_master) {
+ dev_err(dev, "%s: Bad flags %d %d %d %d\n", __func__,
+ daifmt->bit_clk_inv, daifmt->frame_clk_inv,
+ daifmt->bit_clk_master,
+ daifmt->frame_clk_master);
+ return -EINVAL;
+ }
+
+ audio.enabled = 1;
+
+ ret = hdmi_audio_configure(hdmi, &audio);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int hdmi_audio_digital_mute(struct device *dev, void *data, bool enable)
+{
+ struct sti_hdmi *hdmi = dev_get_drvdata(dev);
+
+ DRM_DEBUG_DRIVER("%s\n", enable ? "enable" : "disable");
+
+ if (enable)
+ hdmi_write(hdmi, HDMI_SAMPLE_FLAT_ALL, HDMI_SAMPLE_FLAT_MASK);
+ else
+ hdmi_write(hdmi, HDMI_SAMPLE_FLAT_NO, HDMI_SAMPLE_FLAT_MASK);
+
+ return 0;
+}
+
+static int hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf, size_t len)
+{
+ struct sti_hdmi *hdmi = dev_get_drvdata(dev);
+ struct drm_connector *connector = hdmi->drm_connector;
+
+ DRM_DEBUG_DRIVER("\n");
+ memcpy(buf, connector->eld, min(sizeof(connector->eld), len));
+
+ return 0;
+}
+
+static const struct hdmi_codec_ops audio_codec_ops = {
+ .hw_params = hdmi_audio_hw_params,
+ .audio_shutdown = hdmi_audio_shutdown,
+ .digital_mute = hdmi_audio_digital_mute,
+ .get_eld = hdmi_audio_get_eld,
+};
+
+static int sti_hdmi_register_audio_driver(struct device *dev,
+ struct sti_hdmi *hdmi)
+{
+ struct hdmi_codec_pdata codec_data = {
+ .ops = &audio_codec_ops,
+ .max_i2s_channels = 8,
+ .i2s = 1,
+ };
+
+ DRM_DEBUG_DRIVER("\n");
+
+ hdmi->audio.enabled = 0;
+
+ hdmi->audio_pdev = platform_device_register_data(
+ dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
+ &codec_data, sizeof(codec_data));
+
+ if (IS_ERR(hdmi->audio_pdev))
+ return PTR_ERR(hdmi->audio_pdev);
+
+ DRM_INFO("%s Driver bound %s\n", HDMI_CODEC_DRV_NAME, dev_name(dev));
+
+ return 0;
+}
+
static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
{
struct sti_hdmi *hdmi = dev_get_drvdata(dev);
@@ -1095,9 +1325,7 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
/* initialise property */
sti_hdmi_connector_init_property(drm_dev, drm_connector);
- err = drm_connector_register(drm_connector);
- if (err)
- goto err_connector;
+ hdmi->drm_connector = drm_connector;
err = drm_mode_connector_attach_encoder(drm_connector, encoder);
if (err) {
@@ -1105,19 +1333,27 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
goto err_sysfs;
}
+ err = sti_hdmi_register_audio_driver(dev, hdmi);
+ if (err) {
+ DRM_ERROR("Failed to attach an audio codec\n");
+ goto err_sysfs;
+ }
+
+ /* Initialize audio infoframe */
+ err = hdmi_audio_infoframe_init(&hdmi->audio.cea);
+ if (err) {
+ DRM_ERROR("Failed to init audio infoframe\n");
+ goto err_sysfs;
+ }
+
/* Enable default interrupts */
hdmi_write(hdmi, HDMI_DEFAULT_INT, HDMI_INT_EN);
- if (hdmi_debugfs_init(hdmi, drm_dev->primary))
- DRM_ERROR("HDMI debugfs setup failed\n");
-
return 0;
err_sysfs:
- drm_connector_unregister(drm_connector);
-err_connector:
- drm_connector_cleanup(drm_connector);
-
+ drm_bridge_remove(bridge);
+ hdmi->drm_connector = NULL;
return -EINVAL;
}
@@ -1267,6 +1503,8 @@ static int sti_hdmi_remove(struct platform_device *pdev)
struct sti_hdmi *hdmi = dev_get_drvdata(&pdev->dev);
i2c_put_adapter(hdmi->ddc_adapt);
+ if (hdmi->audio_pdev)
+ platform_device_unregister(hdmi->audio_pdev);
component_del(&pdev->dev, &sti_hdmi_ops);
return 0;
diff --git a/drivers/gpu/drm/sti/sti_hdmi.h b/drivers/gpu/drm/sti/sti_hdmi.h
index ef3a94583..119bc3582 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.h
+++ b/drivers/gpu/drm/sti/sti_hdmi.h
@@ -23,6 +23,13 @@ struct hdmi_phy_ops {
void (*stop)(struct sti_hdmi *hdmi);
};
+struct hdmi_audio_params {
+ bool enabled;
+ unsigned int sample_width;
+ unsigned int sample_rate;
+ struct hdmi_audio_infoframe cea;
+};
+
/* values for the framing mode property */
enum sti_hdmi_modes {
HDMI_MODE_HDMI,
@@ -67,6 +74,9 @@ static const struct drm_prop_enum_list colorspace_mode_names[] = {
* @ddc_adapt: i2c ddc adapter
* @colorspace: current colorspace selected
* @hdmi_mode: select framing for HDMI or DVI
+ * @audio_pdev: ASoC hdmi-codec platform device
+ * @audio: hdmi audio parameters.
+ * @drm_connector: hdmi connector
*/
struct sti_hdmi {
struct device dev;
@@ -89,6 +99,9 @@ struct sti_hdmi {
struct i2c_adapter *ddc_adapt;
enum hdmi_colorspace colorspace;
enum sti_hdmi_modes hdmi_mode;
+ struct platform_device *audio_pdev;
+ struct hdmi_audio_params audio;
+ struct drm_connector *drm_connector;
};
u32 hdmi_read(struct sti_hdmi *hdmi, int offset);
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index 52102045c..e4b769d67 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -555,14 +555,8 @@ static int hqvdp_dbg_show(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct sti_hqvdp *hqvdp = (struct sti_hqvdp *)node->info_ent->data;
- struct drm_device *dev = node->minor->dev;
int cmd, cmd_offset, infoxp70;
void *virt;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
seq_printf(s, "%s: (vaddr = 0x%p)",
sti_plane_to_str(&hqvdp->plane), hqvdp->regs);
@@ -630,7 +624,6 @@ static int hqvdp_dbg_show(struct seq_file *s, void *data)
seq_puts(s, "\n");
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -1241,6 +1234,33 @@ static const struct drm_plane_helper_funcs sti_hqvdp_helpers_funcs = {
.atomic_disable = sti_hqvdp_atomic_disable,
};
+static void sti_hqvdp_destroy(struct drm_plane *drm_plane)
+{
+ DRM_DEBUG_DRIVER("\n");
+
+ drm_plane_helper_disable(drm_plane);
+ drm_plane_cleanup(drm_plane);
+}
+
+static int sti_hqvdp_late_register(struct drm_plane *drm_plane)
+{
+ struct sti_plane *plane = to_sti_plane(drm_plane);
+ struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
+
+ return hqvdp_debugfs_init(hqvdp, drm_plane->dev->primary);
+}
+
+struct drm_plane_funcs sti_hqvdp_plane_helpers_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = sti_hqvdp_destroy,
+ .set_property = drm_atomic_helper_plane_set_property,
+ .reset = sti_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+ .late_register = sti_hqvdp_late_register,
+};
+
static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev,
struct device *dev, int desc)
{
@@ -1253,7 +1273,7 @@ static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev,
sti_hqvdp_init(hqvdp);
res = drm_universal_plane_init(drm_dev, &hqvdp->plane.drm_plane, 1,
- &sti_plane_helpers_funcs,
+ &sti_hqvdp_plane_helpers_funcs,
hqvdp_supported_formats,
ARRAY_SIZE(hqvdp_supported_formats),
DRM_PLANE_TYPE_OVERLAY, NULL);
@@ -1266,9 +1286,6 @@ static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev,
sti_plane_init_property(&hqvdp->plane, DRM_PLANE_TYPE_OVERLAY);
- if (hqvdp_debugfs_init(hqvdp, drm_dev->primary))
- DRM_ERROR("HQVDP debugfs setup failed\n");
-
return &hqvdp->plane.drm_plane;
}
@@ -1346,6 +1363,7 @@ static int sti_hqvdp_probe(struct platform_device *pdev)
vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 0);
if (vtg_np)
hqvdp->vtg = of_vtg_find(vtg_np);
+ of_node_put(vtg_np);
platform_set_drvdata(pdev, hqvdp);
diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c
index aed7801b5..7d9aea805 100644
--- a/drivers/gpu/drm/sti/sti_mixer.c
+++ b/drivers/gpu/drm/sti/sti_mixer.c
@@ -151,12 +151,6 @@ static int mixer_dbg_show(struct seq_file *s, void *arg)
{
struct drm_info_node *node = s->private;
struct sti_mixer *mixer = (struct sti_mixer *)node->info_ent->data;
- struct drm_device *dev = node->minor->dev;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
seq_printf(s, "%s: (vaddr = 0x%p)",
sti_mixer_to_str(mixer), mixer->regs);
@@ -176,7 +170,6 @@ static int mixer_dbg_show(struct seq_file *s, void *arg)
mixer_dbg_mxn(s, mixer->regs + GAM_MIXER_MX0);
seq_puts(s, "\n");
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -188,7 +181,7 @@ static struct drm_info_list mixer1_debugfs_files[] = {
{ "mixer_aux", mixer_dbg_show, 0, NULL },
};
-static int mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor)
+int sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor)
{
unsigned int i;
struct drm_info_list *mixer_debugfs_files;
@@ -246,13 +239,10 @@ static void sti_mixer_set_background_area(struct sti_mixer *mixer,
int sti_mixer_set_plane_depth(struct sti_mixer *mixer, struct sti_plane *plane)
{
- int plane_id, depth = plane->zorder;
+ int plane_id, depth = plane->drm_plane.state->normalized_zpos;
unsigned int i;
u32 mask, val;
- if ((depth < 1) || (depth > GAM_MIXER_NB_DEPTH_LEVEL))
- return 1;
-
switch (plane->desc) {
case STI_GDP_0:
plane_id = GAM_DEPTH_GDP0_ID;
@@ -285,8 +275,8 @@ int sti_mixer_set_plane_depth(struct sti_mixer *mixer, struct sti_plane *plane)
break;
}
- mask |= GAM_DEPTH_MASK_ID << (3 * (depth - 1));
- plane_id = plane_id << (3 * (depth - 1));
+ mask |= GAM_DEPTH_MASK_ID << (3 * depth);
+ plane_id = plane_id << (3 * depth);
DRM_DEBUG_DRIVER("%s %s depth=%d\n", sti_mixer_to_str(mixer),
sti_plane_to_str(plane), depth);
@@ -400,8 +390,5 @@ struct sti_mixer *sti_mixer_create(struct device *dev,
DRM_DEBUG_DRIVER("%s created. Regs=%p\n",
sti_mixer_to_str(mixer), mixer->regs);
- if (mixer_debugfs_init(mixer, drm_dev->primary))
- DRM_ERROR("MIXER debugfs setup failed\n");
-
return mixer;
}
diff --git a/drivers/gpu/drm/sti/sti_mixer.h b/drivers/gpu/drm/sti/sti_mixer.h
index 6f35fc086..830a3c42d 100644
--- a/drivers/gpu/drm/sti/sti_mixer.h
+++ b/drivers/gpu/drm/sti/sti_mixer.h
@@ -55,6 +55,8 @@ int sti_mixer_active_video_area(struct sti_mixer *mixer,
void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable);
+int sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor);
+
/* depth in Cross-bar control = z order */
#define GAM_MIXER_NB_DEPTH_LEVEL 6
diff --git a/drivers/gpu/drm/sti/sti_plane.c b/drivers/gpu/drm/sti/sti_plane.c
index f10c98d3f..ca4b3719a 100644
--- a/drivers/gpu/drm/sti/sti_plane.c
+++ b/drivers/gpu/drm/sti/sti_plane.c
@@ -14,15 +14,6 @@
#include "sti_drv.h"
#include "sti_plane.h"
-/* (Background) < GDP0 < GDP1 < HQVDP0 < GDP2 < GDP3 < (ForeGround) */
-enum sti_plane_desc sti_plane_default_zorder[] = {
- STI_GDP_0,
- STI_GDP_1,
- STI_HQVDP_0,
- STI_GDP_2,
- STI_GDP_3,
-};
-
const char *sti_plane_to_str(struct sti_plane *plane)
{
switch (plane->desc) {
@@ -45,25 +36,15 @@ const char *sti_plane_to_str(struct sti_plane *plane)
#define STI_FPS_INTERVAL_MS 3000
-static int sti_plane_timespec_ms_diff(struct timespec lhs, struct timespec rhs)
-{
- struct timespec tmp_ts = timespec_sub(lhs, rhs);
- u64 tmp_ns = (u64)timespec_to_ns(&tmp_ts);
-
- do_div(tmp_ns, NSEC_PER_MSEC);
-
- return (u32)tmp_ns;
-}
-
void sti_plane_update_fps(struct sti_plane *plane,
bool new_frame,
bool new_field)
{
- struct timespec now;
+ ktime_t now;
struct sti_fps_info *fps;
int fpks, fipks, ms_since_last, num_frames, num_fields;
- getrawmonotonic(&now);
+ now = ktime_get();
/* Compute number of frame updates */
fps = &plane->fps_info;
@@ -76,7 +57,7 @@ void sti_plane_update_fps(struct sti_plane *plane,
return;
fps->curr_frame_counter++;
- ms_since_last = sti_plane_timespec_ms_diff(now, fps->last_timestamp);
+ ms_since_last = ktime_to_ms(ktime_sub(now, fps->last_timestamp));
num_frames = fps->curr_frame_counter - fps->last_frame_counter;
if (num_frames <= 0 || ms_since_last < STI_FPS_INTERVAL_MS)
@@ -106,77 +87,46 @@ void sti_plane_update_fps(struct sti_plane *plane,
plane->fps_info.fips_str);
}
-static void sti_plane_destroy(struct drm_plane *drm_plane)
-{
- DRM_DEBUG_DRIVER("\n");
-
- drm_plane_helper_disable(drm_plane);
- drm_plane_cleanup(drm_plane);
-}
-
-static int sti_plane_set_property(struct drm_plane *drm_plane,
- struct drm_property *property,
- uint64_t val)
+static int sti_plane_get_default_zpos(enum drm_plane_type type)
{
- struct drm_device *dev = drm_plane->dev;
- struct sti_private *private = dev->dev_private;
- struct sti_plane *plane = to_sti_plane(drm_plane);
-
- DRM_DEBUG_DRIVER("\n");
-
- if (property == private->plane_zorder_property) {
- plane->zorder = val;
+ switch (type) {
+ case DRM_PLANE_TYPE_PRIMARY:
return 0;
+ case DRM_PLANE_TYPE_OVERLAY:
+ return 1;
+ case DRM_PLANE_TYPE_CURSOR:
+ return 7;
}
+ return 0;
+}
- return -EINVAL;
+void sti_plane_reset(struct drm_plane *plane)
+{
+ drm_atomic_helper_plane_reset(plane);
+ plane->state->zpos = sti_plane_get_default_zpos(plane->type);
}
-static void sti_plane_attach_zorder_property(struct drm_plane *drm_plane)
+static void sti_plane_attach_zorder_property(struct drm_plane *drm_plane,
+ enum drm_plane_type type)
{
- struct drm_device *dev = drm_plane->dev;
- struct sti_private *private = dev->dev_private;
- struct sti_plane *plane = to_sti_plane(drm_plane);
- struct drm_property *prop;
-
- prop = private->plane_zorder_property;
- if (!prop) {
- prop = drm_property_create_range(dev, 0, "zpos", 1,
- GAM_MIXER_NB_DEPTH_LEVEL);
- if (!prop)
- return;
-
- private->plane_zorder_property = prop;
+ int zpos = sti_plane_get_default_zpos(type);
+
+ switch (type) {
+ case DRM_PLANE_TYPE_PRIMARY:
+ case DRM_PLANE_TYPE_OVERLAY:
+ drm_plane_create_zpos_property(drm_plane, zpos, 0, 6);
+ break;
+ case DRM_PLANE_TYPE_CURSOR:
+ drm_plane_create_zpos_immutable_property(drm_plane, zpos);
+ break;
}
-
- drm_object_attach_property(&drm_plane->base, prop, plane->zorder);
}
void sti_plane_init_property(struct sti_plane *plane,
enum drm_plane_type type)
{
- unsigned int i;
+ sti_plane_attach_zorder_property(&plane->drm_plane, type);
- for (i = 0; i < ARRAY_SIZE(sti_plane_default_zorder); i++)
- if (sti_plane_default_zorder[i] == plane->desc)
- break;
-
- plane->zorder = i + 1;
-
- if (type == DRM_PLANE_TYPE_OVERLAY)
- sti_plane_attach_zorder_property(&plane->drm_plane);
-
- DRM_DEBUG_DRIVER("drm plane:%d mapped to %s with zorder:%d\n",
- plane->drm_plane.base.id,
- sti_plane_to_str(plane), plane->zorder);
+ DRM_DEBUG_DRIVER("drm plane:%d mapped to %s\n",
+ plane->drm_plane.base.id, sti_plane_to_str(plane));
}
-
-struct drm_plane_funcs sti_plane_helpers_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = sti_plane_destroy,
- .set_property = sti_plane_set_property,
- .reset = drm_atomic_helper_plane_reset,
- .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
-};
diff --git a/drivers/gpu/drm/sti/sti_plane.h b/drivers/gpu/drm/sti/sti_plane.h
index c50a3b9f5..ce3e8d6c8 100644
--- a/drivers/gpu/drm/sti/sti_plane.h
+++ b/drivers/gpu/drm/sti/sti_plane.h
@@ -11,8 +11,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
-extern struct drm_plane_funcs sti_plane_helpers_funcs;
-
#define to_sti_plane(x) container_of(x, struct sti_plane, drm_plane)
#define STI_PLANE_TYPE_SHIFT 8
@@ -57,7 +55,7 @@ struct sti_fps_info {
unsigned int last_frame_counter;
unsigned int curr_field_counter;
unsigned int last_field_counter;
- struct timespec last_timestamp;
+ ktime_t last_timestamp;
char fps_str[FPS_LENGTH];
char fips_str[FPS_LENGTH];
};
@@ -68,14 +66,12 @@ struct sti_fps_info {
* @plane: drm plane it is bound to (if any)
* @desc: plane type & id
* @status: to know the status of the plane
- * @zorder: plane z-order
* @fps_info: frame per second info
*/
struct sti_plane {
struct drm_plane drm_plane;
enum sti_plane_desc desc;
enum sti_plane_status status;
- int zorder;
struct sti_fps_info fps_info;
};
@@ -83,6 +79,8 @@ const char *sti_plane_to_str(struct sti_plane *plane);
void sti_plane_update_fps(struct sti_plane *plane,
bool new_frame,
bool new_field);
+
void sti_plane_init_property(struct sti_plane *plane,
enum drm_plane_type type);
+void sti_plane_reset(struct drm_plane *plane);
#endif
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index f983db5a5..e25995b35 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -112,6 +112,7 @@ struct sti_tvout {
struct drm_encoder *hdmi;
struct drm_encoder *hda;
struct drm_encoder *dvo;
+ bool debugfs_registered;
};
struct sti_tvout_encoder {
@@ -515,13 +516,7 @@ static int tvout_dbg_show(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct sti_tvout *tvout = (struct sti_tvout *)node->info_ent->data;
- struct drm_device *dev = node->minor->dev;
struct drm_crtc *crtc;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
seq_printf(s, "TVOUT: (vaddr = 0x%p)", tvout->regs);
@@ -587,7 +582,6 @@ static int tvout_dbg_show(struct seq_file *s, void *data)
DBGFS_DUMP(TVO_AUX_IN_VID_FORMAT);
seq_puts(s, "\n");
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -632,8 +626,37 @@ static void sti_tvout_encoder_destroy(struct drm_encoder *encoder)
kfree(sti_encoder);
}
+static int sti_tvout_late_register(struct drm_encoder *encoder)
+{
+ struct sti_tvout *tvout = to_sti_tvout(encoder);
+ int ret;
+
+ if (tvout->debugfs_registered)
+ return 0;
+
+ ret = tvout_debugfs_init(tvout, encoder->dev->primary);
+ if (ret)
+ return ret;
+
+ tvout->debugfs_registered = true;
+ return 0;
+}
+
+static void sti_tvout_early_unregister(struct drm_encoder *encoder)
+{
+ struct sti_tvout *tvout = to_sti_tvout(encoder);
+
+ if (!tvout->debugfs_registered)
+ return;
+
+ tvout_debugfs_exit(tvout, encoder->dev->primary);
+ tvout->debugfs_registered = false;
+}
+
static const struct drm_encoder_funcs sti_tvout_encoder_funcs = {
.destroy = sti_tvout_encoder_destroy,
+ .late_register = sti_tvout_late_register,
+ .early_unregister = sti_tvout_early_unregister,
};
static void sti_dvo_encoder_enable(struct drm_encoder *encoder)
@@ -820,9 +843,6 @@ static int sti_tvout_bind(struct device *dev, struct device *master, void *data)
sti_tvout_create_encoders(drm_dev, tvout);
- if (tvout_debugfs_init(tvout, drm_dev->primary))
- DRM_ERROR("TVOUT debugfs setup failed\n");
-
return 0;
}
@@ -830,11 +850,8 @@ static void sti_tvout_unbind(struct device *dev, struct device *master,
void *data)
{
struct sti_tvout *tvout = dev_get_drvdata(dev);
- struct drm_device *drm_dev = data;
sti_tvout_destroy_encoders(tvout);
-
- tvout_debugfs_exit(tvout, drm_dev->primary);
}
static const struct component_ops sti_tvout_ops = {
diff --git a/drivers/gpu/drm/sti/sti_vid.c b/drivers/gpu/drm/sti/sti_vid.c
index 523ed19f5..47634a025 100644
--- a/drivers/gpu/drm/sti/sti_vid.c
+++ b/drivers/gpu/drm/sti/sti_vid.c
@@ -92,12 +92,6 @@ static int vid_dbg_show(struct seq_file *s, void *arg)
{
struct drm_info_node *node = s->private;
struct sti_vid *vid = (struct sti_vid *)node->info_ent->data;
- struct drm_device *dev = node->minor->dev;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
seq_printf(s, "VID: (vaddr= 0x%p)", vid->regs);
@@ -122,7 +116,6 @@ static int vid_dbg_show(struct seq_file *s, void *arg)
DBGFS_DUMP(VID_CSAT);
seq_puts(s, "\n");
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -130,7 +123,7 @@ static struct drm_info_list vid_debugfs_files[] = {
{ "vid", vid_dbg_show, 0, NULL },
};
-static int vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor)
+int vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor)
{
unsigned int i;
@@ -227,8 +220,5 @@ struct sti_vid *sti_vid_create(struct device *dev, struct drm_device *drm_dev,
sti_vid_init(vid);
- if (vid_debugfs_init(vid, drm_dev->primary))
- DRM_ERROR("VID debugfs setup failed\n");
-
return vid;
}
diff --git a/drivers/gpu/drm/sti/sti_vid.h b/drivers/gpu/drm/sti/sti_vid.h
index 6c842344f..fdc90f922 100644
--- a/drivers/gpu/drm/sti/sti_vid.h
+++ b/drivers/gpu/drm/sti/sti_vid.h
@@ -26,4 +26,6 @@ void sti_vid_disable(struct sti_vid *vid);
struct sti_vid *sti_vid_create(struct device *dev, struct drm_device *drm_dev,
int id, void __iomem *baseaddr);
+int vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor);
+
#endif
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
index 6bf4ce466..0bdc385ee 100644
--- a/drivers/gpu/drm/sti/sti_vtg.c
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -65,7 +65,7 @@
#define HDMI_DELAY (5)
/* Delay introduced by the DVO in nb of pixel */
-#define DVO_DELAY (2)
+#define DVO_DELAY (7)
/* delay introduced by the Arbitrary Waveform Generator in nb of pixels */
#define AWG_DELAY_HD (-9)
@@ -432,6 +432,7 @@ static int vtg_probe(struct platform_device *pdev)
np = of_parse_phandle(pdev->dev.of_node, "st,slave", 0);
if (np) {
vtg->slave = of_vtg_find(np);
+ of_node_put(np);
if (!vtg->slave)
return -EPROBE_DEFER;
diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.c b/drivers/gpu/drm/sun4i/sun4i_crtc.c
index 41cacecbe..4a1922105 100644
--- a/drivers/gpu/drm/sun4i/sun4i_crtc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_crtc.c
@@ -51,10 +51,22 @@ static void sun4i_crtc_atomic_flush(struct drm_crtc *crtc,
{
struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
struct sun4i_drv *drv = scrtc->drv;
+ struct drm_pending_vblank_event *event = crtc->state->event;
DRM_DEBUG_DRIVER("Committing plane changes\n");
sun4i_backend_commit(drv->backend);
+
+ if (event) {
+ crtc->state->event = NULL;
+
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (drm_crtc_vblank_get(crtc) == 0)
+ drm_crtc_arm_vblank_event(crtc, event);
+ else
+ drm_crtc_send_vblank_event(crtc, event);
+ spin_unlock_irq(&crtc->dev->event_lock);
+ }
}
static void sun4i_crtc_disable(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 937394cbc..7092daaf6 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -75,7 +75,7 @@ static struct drm_driver sun4i_drv_driver = {
.dumb_create = drm_gem_cma_dumb_create,
.dumb_destroy = drm_gem_dumb_destroy,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
- .gem_free_object = drm_gem_cma_free_object,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
/* PRIME Operations */
@@ -123,10 +123,6 @@ static int sun4i_drv_bind(struct device *dev)
if (!drm)
return -ENOMEM;
- ret = drm_dev_set_unique(drm, dev_name(drm->dev));
- if (ret)
- goto free_drm;
-
drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
if (!drv) {
ret = -ENOMEM;
@@ -178,14 +174,8 @@ static int sun4i_drv_bind(struct device *dev)
if (ret)
goto free_drm;
- ret = drm_connector_register_all(drm);
- if (ret)
- goto unregister_drm;
-
return 0;
-unregister_drm:
- drm_dev_unregister(drm);
free_drm:
drm_dev_unref(drm);
return ret;
@@ -195,7 +185,6 @@ static void sun4i_drv_unbind(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- drm_connector_unregister_all(drm);
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
sun4i_framebuffer_free(drm);
diff --git a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
index a0b30c216..70688febd 100644
--- a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
@@ -20,8 +20,7 @@ static void sun4i_de_output_poll_changed(struct drm_device *drm)
{
struct sun4i_drv *drv = drm->dev_private;
- if (drv->fbdev)
- drm_fbdev_cma_hotplug_event(drv->fbdev);
+ drm_fbdev_cma_hotplug_event(drv->fbdev);
}
static const struct drm_mode_config_funcs sun4i_de_mode_config_funcs = {
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index aaffe9e64..f5bbac6ef 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -104,19 +104,9 @@ static int sun4i_rgb_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static struct drm_encoder *
-sun4i_rgb_best_encoder(struct drm_connector *connector)
-{
- struct sun4i_rgb *rgb =
- drm_connector_to_sun4i_rgb(connector);
-
- return &rgb->encoder;
-}
-
static struct drm_connector_helper_funcs sun4i_rgb_con_helper_funcs = {
.get_modes = sun4i_rgb_get_modes,
.mode_valid = sun4i_rgb_mode_valid,
- .best_encoder = sun4i_rgb_best_encoder,
};
static enum drm_connector_status
diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c
index bc047f923..b84147896 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tv.c
@@ -526,18 +526,9 @@ static int sun4i_tv_comp_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-static struct drm_encoder *
-sun4i_tv_comp_best_encoder(struct drm_connector *connector)
-{
- struct sun4i_tv *tv = drm_connector_to_sun4i_tv(connector);
-
- return &tv->encoder;
-}
-
static struct drm_connector_helper_funcs sun4i_tv_comp_connector_helper_funcs = {
.get_modes = sun4i_tv_comp_get_modes,
.mode_valid = sun4i_tv_comp_mode_valid,
- .best_encoder = sun4i_tv_comp_best_encoder,
};
static enum drm_connector_status
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 39940f5b7..8495bd01b 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/iommu.h>
+#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <soc/tegra/pmc.h>
@@ -1216,6 +1217,8 @@ static void tegra_crtc_disable(struct drm_crtc *crtc)
tegra_dc_stats_reset(&dc->stats);
drm_crtc_vblank_off(crtc);
+
+ pm_runtime_put_sync(dc->dev);
}
static void tegra_crtc_enable(struct drm_crtc *crtc)
@@ -1225,6 +1228,48 @@ static void tegra_crtc_enable(struct drm_crtc *crtc)
struct tegra_dc *dc = to_tegra_dc(crtc);
u32 value;
+ pm_runtime_get_sync(dc->dev);
+
+ /* initialize display controller */
+ if (dc->syncpt) {
+ u32 syncpt = host1x_syncpt_id(dc->syncpt);
+
+ value = SYNCPT_CNTRL_NO_STALL;
+ tegra_dc_writel(dc, value, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
+
+ value = SYNCPT_VSYNC_ENABLE | syncpt;
+ tegra_dc_writel(dc, value, DC_CMD_CONT_SYNCPT_VSYNC);
+ }
+
+ value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+ WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
+
+ value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+ WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
+
+ /* initialize timer */
+ value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
+ WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
+ tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY);
+
+ value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) |
+ WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
+ tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
+
+ value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+ WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
+
+ value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+ WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
+ tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
+
+ if (dc->soc->supports_border_color)
+ tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR);
+
+ /* apply PLL and pixel clock changes */
tegra_dc_commit_state(dc, state);
/* program display mode */
@@ -1685,7 +1730,6 @@ static int tegra_dc_init(struct host1x_client *client)
struct tegra_drm *tegra = drm->dev_private;
struct drm_plane *primary = NULL;
struct drm_plane *cursor = NULL;
- u32 value;
int err;
dc->syncpt = host1x_syncpt_request(dc->dev, flags);
@@ -1755,47 +1799,6 @@ static int tegra_dc_init(struct host1x_client *client)
goto cleanup;
}
- /* initialize display controller */
- if (dc->syncpt) {
- u32 syncpt = host1x_syncpt_id(dc->syncpt);
-
- value = SYNCPT_CNTRL_NO_STALL;
- tegra_dc_writel(dc, value, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
-
- value = SYNCPT_VSYNC_ENABLE | syncpt;
- tegra_dc_writel(dc, value, DC_CMD_CONT_SYNCPT_VSYNC);
- }
-
- value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
- WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
- tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
-
- value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
- WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
- tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
-
- /* initialize timer */
- value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
- WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
- tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY);
-
- value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) |
- WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
- tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
-
- value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
- WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
- tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
-
- value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
- WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
- tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
-
- if (dc->soc->supports_border_color)
- tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR);
-
- tegra_dc_stats_reset(&dc->stats);
-
return 0;
cleanup:
@@ -1987,33 +1990,15 @@ static int tegra_dc_probe(struct platform_device *pdev)
return PTR_ERR(dc->rst);
}
+ reset_control_assert(dc->rst);
+
if (dc->soc->has_powergate) {
if (dc->pipe == 0)
dc->powergate = TEGRA_POWERGATE_DIS;
else
dc->powergate = TEGRA_POWERGATE_DISB;
- err = tegra_powergate_sequence_power_up(dc->powergate, dc->clk,
- dc->rst);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to power partition: %d\n",
- err);
- return err;
- }
- } else {
- err = clk_prepare_enable(dc->clk);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to enable clock: %d\n",
- err);
- return err;
- }
-
- err = reset_control_deassert(dc->rst);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to deassert reset: %d\n",
- err);
- return err;
- }
+ tegra_powergate_power_off(dc->powergate);
}
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2027,16 +2012,19 @@ static int tegra_dc_probe(struct platform_device *pdev)
return -ENXIO;
}
- INIT_LIST_HEAD(&dc->client.list);
- dc->client.ops = &dc_client_ops;
- dc->client.dev = &pdev->dev;
-
err = tegra_dc_rgb_probe(dc);
if (err < 0 && err != -ENODEV) {
dev_err(&pdev->dev, "failed to probe RGB output: %d\n", err);
return err;
}
+ platform_set_drvdata(pdev, dc);
+ pm_runtime_enable(&pdev->dev);
+
+ INIT_LIST_HEAD(&dc->client.list);
+ dc->client.ops = &dc_client_ops;
+ dc->client.dev = &pdev->dev;
+
err = host1x_client_register(&dc->client);
if (err < 0) {
dev_err(&pdev->dev, "failed to register host1x client: %d\n",
@@ -2044,8 +2032,6 @@ static int tegra_dc_probe(struct platform_device *pdev)
return err;
}
- platform_set_drvdata(pdev, dc);
-
return 0;
}
@@ -2067,7 +2053,22 @@ static int tegra_dc_remove(struct platform_device *pdev)
return err;
}
- reset_control_assert(dc->rst);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tegra_dc_suspend(struct device *dev)
+{
+ struct tegra_dc *dc = dev_get_drvdata(dev);
+ int err;
+
+ err = reset_control_assert(dc->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to assert reset: %d\n", err);
+ return err;
+ }
if (dc->soc->has_powergate)
tegra_powergate_power_off(dc->powergate);
@@ -2077,10 +2078,45 @@ static int tegra_dc_remove(struct platform_device *pdev)
return 0;
}
+static int tegra_dc_resume(struct device *dev)
+{
+ struct tegra_dc *dc = dev_get_drvdata(dev);
+ int err;
+
+ if (dc->soc->has_powergate) {
+ err = tegra_powergate_sequence_power_up(dc->powergate, dc->clk,
+ dc->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to power partition: %d\n", err);
+ return err;
+ }
+ } else {
+ err = clk_prepare_enable(dc->clk);
+ if (err < 0) {
+ dev_err(dev, "failed to enable clock: %d\n", err);
+ return err;
+ }
+
+ err = reset_control_deassert(dc->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to deassert reset: %d\n", err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops tegra_dc_pm_ops = {
+ SET_RUNTIME_PM_OPS(tegra_dc_suspend, tegra_dc_resume, NULL)
+};
+
struct platform_driver tegra_dc_driver = {
.driver = {
.name = "tegra-dc",
.of_match_table = tegra_dc_of_match,
+ .pm = &tegra_dc_pm_ops,
},
.probe = tegra_dc_probe,
.remove = tegra_dc_remove,
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index b24a0f148..059f40955 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -12,6 +12,9 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of_gpio.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/regulator/consumer.h>
@@ -44,6 +47,11 @@ struct tegra_dpaux {
struct completion complete;
struct work_struct work;
struct list_head list;
+
+#ifdef CONFIG_GENERIC_PINCONF
+ struct pinctrl_dev *pinctrl;
+ struct pinctrl_desc desc;
+#endif
};
static inline struct tegra_dpaux *to_dpaux(struct drm_dp_aux *aux)
@@ -267,6 +275,148 @@ static irqreturn_t tegra_dpaux_irq(int irq, void *data)
return ret;
}
+enum tegra_dpaux_functions {
+ DPAUX_PADCTL_FUNC_AUX,
+ DPAUX_PADCTL_FUNC_I2C,
+ DPAUX_PADCTL_FUNC_OFF,
+};
+
+static void tegra_dpaux_pad_power_down(struct tegra_dpaux *dpaux)
+{
+ u32 value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
+
+ value |= DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
+
+ tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
+}
+
+static void tegra_dpaux_pad_power_up(struct tegra_dpaux *dpaux)
+{
+ u32 value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
+
+ value &= ~DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
+
+ tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
+}
+
+static int tegra_dpaux_pad_config(struct tegra_dpaux *dpaux, unsigned function)
+{
+ u32 value;
+
+ switch (function) {
+ case DPAUX_PADCTL_FUNC_AUX:
+ value = DPAUX_HYBRID_PADCTL_AUX_CMH(2) |
+ DPAUX_HYBRID_PADCTL_AUX_DRVZ(4) |
+ DPAUX_HYBRID_PADCTL_AUX_DRVI(0x18) |
+ DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV |
+ DPAUX_HYBRID_PADCTL_MODE_AUX;
+ break;
+
+ case DPAUX_PADCTL_FUNC_I2C:
+ value = DPAUX_HYBRID_PADCTL_I2C_SDA_INPUT_RCV |
+ DPAUX_HYBRID_PADCTL_I2C_SCL_INPUT_RCV |
+ DPAUX_HYBRID_PADCTL_MODE_I2C;
+ break;
+
+ case DPAUX_PADCTL_FUNC_OFF:
+ tegra_dpaux_pad_power_down(dpaux);
+ return 0;
+
+ default:
+ return -ENOTSUPP;
+ }
+
+ tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_PADCTL);
+ tegra_dpaux_pad_power_up(dpaux);
+
+ return 0;
+}
+
+#ifdef CONFIG_GENERIC_PINCONF
+static const struct pinctrl_pin_desc tegra_dpaux_pins[] = {
+ PINCTRL_PIN(0, "DP_AUX_CHx_P"),
+ PINCTRL_PIN(1, "DP_AUX_CHx_N"),
+};
+
+static const unsigned tegra_dpaux_pin_numbers[] = { 0, 1 };
+
+static const char * const tegra_dpaux_groups[] = {
+ "dpaux-io",
+};
+
+static const char * const tegra_dpaux_functions[] = {
+ "aux",
+ "i2c",
+ "off",
+};
+
+static int tegra_dpaux_get_groups_count(struct pinctrl_dev *pinctrl)
+{
+ return ARRAY_SIZE(tegra_dpaux_groups);
+}
+
+static const char *tegra_dpaux_get_group_name(struct pinctrl_dev *pinctrl,
+ unsigned int group)
+{
+ return tegra_dpaux_groups[group];
+}
+
+static int tegra_dpaux_get_group_pins(struct pinctrl_dev *pinctrl,
+ unsigned group, const unsigned **pins,
+ unsigned *num_pins)
+{
+ *pins = tegra_dpaux_pin_numbers;
+ *num_pins = ARRAY_SIZE(tegra_dpaux_pin_numbers);
+
+ return 0;
+}
+
+static const struct pinctrl_ops tegra_dpaux_pinctrl_ops = {
+ .get_groups_count = tegra_dpaux_get_groups_count,
+ .get_group_name = tegra_dpaux_get_group_name,
+ .get_group_pins = tegra_dpaux_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_group,
+ .dt_free_map = pinconf_generic_dt_free_map,
+};
+
+static int tegra_dpaux_get_functions_count(struct pinctrl_dev *pinctrl)
+{
+ return ARRAY_SIZE(tegra_dpaux_functions);
+}
+
+static const char *tegra_dpaux_get_function_name(struct pinctrl_dev *pinctrl,
+ unsigned int function)
+{
+ return tegra_dpaux_functions[function];
+}
+
+static int tegra_dpaux_get_function_groups(struct pinctrl_dev *pinctrl,
+ unsigned int function,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ *num_groups = ARRAY_SIZE(tegra_dpaux_groups);
+ *groups = tegra_dpaux_groups;
+
+ return 0;
+}
+
+static int tegra_dpaux_set_mux(struct pinctrl_dev *pinctrl,
+ unsigned int function, unsigned int group)
+{
+ struct tegra_dpaux *dpaux = pinctrl_dev_get_drvdata(pinctrl);
+
+ return tegra_dpaux_pad_config(dpaux, function);
+}
+
+static const struct pinmux_ops tegra_dpaux_pinmux_ops = {
+ .get_functions_count = tegra_dpaux_get_functions_count,
+ .get_function_name = tegra_dpaux_get_function_name,
+ .get_function_groups = tegra_dpaux_get_function_groups,
+ .set_mux = tegra_dpaux_set_mux,
+};
+#endif
+
static int tegra_dpaux_probe(struct platform_device *pdev)
{
struct tegra_dpaux *dpaux;
@@ -294,11 +444,14 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
return -ENXIO;
}
- dpaux->rst = devm_reset_control_get(&pdev->dev, "dpaux");
- if (IS_ERR(dpaux->rst)) {
- dev_err(&pdev->dev, "failed to get reset control: %ld\n",
- PTR_ERR(dpaux->rst));
- return PTR_ERR(dpaux->rst);
+ if (!pdev->dev.pm_domain) {
+ dpaux->rst = devm_reset_control_get(&pdev->dev, "dpaux");
+ if (IS_ERR(dpaux->rst)) {
+ dev_err(&pdev->dev,
+ "failed to get reset control: %ld\n",
+ PTR_ERR(dpaux->rst));
+ return PTR_ERR(dpaux->rst);
+ }
}
dpaux->clk = devm_clk_get(&pdev->dev, NULL);
@@ -315,34 +468,37 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
return err;
}
- reset_control_deassert(dpaux->rst);
+ if (dpaux->rst)
+ reset_control_deassert(dpaux->rst);
dpaux->clk_parent = devm_clk_get(&pdev->dev, "parent");
if (IS_ERR(dpaux->clk_parent)) {
dev_err(&pdev->dev, "failed to get parent clock: %ld\n",
PTR_ERR(dpaux->clk_parent));
- return PTR_ERR(dpaux->clk_parent);
+ err = PTR_ERR(dpaux->clk_parent);
+ goto assert_reset;
}
err = clk_prepare_enable(dpaux->clk_parent);
if (err < 0) {
dev_err(&pdev->dev, "failed to enable parent clock: %d\n",
err);
- return err;
+ goto assert_reset;
}
err = clk_set_rate(dpaux->clk_parent, 270000000);
if (err < 0) {
dev_err(&pdev->dev, "failed to set clock to 270 MHz: %d\n",
err);
- return err;
+ goto disable_parent_clk;
}
dpaux->vdd = devm_regulator_get(&pdev->dev, "vdd");
if (IS_ERR(dpaux->vdd)) {
dev_err(&pdev->dev, "failed to get VDD supply: %ld\n",
PTR_ERR(dpaux->vdd));
- return PTR_ERR(dpaux->vdd);
+ err = PTR_ERR(dpaux->vdd);
+ goto disable_parent_clk;
}
err = devm_request_irq(dpaux->dev, dpaux->irq, tegra_dpaux_irq, 0,
@@ -350,7 +506,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
if (err < 0) {
dev_err(dpaux->dev, "failed to request IRQ#%u: %d\n",
dpaux->irq, err);
- return err;
+ goto disable_parent_clk;
}
disable_irq(dpaux->irq);
@@ -360,7 +516,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
err = drm_dp_aux_register(&dpaux->aux);
if (err < 0)
- return err;
+ goto disable_parent_clk;
/*
* Assume that by default the DPAUX/I2C pads will be used for HDMI,
@@ -370,16 +526,24 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
* is no possibility to perform the I2C mode configuration in the
* HDMI path.
*/
- value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
- value &= ~DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
- tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
-
- value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_PADCTL);
- value = DPAUX_HYBRID_PADCTL_I2C_SDA_INPUT_RCV |
- DPAUX_HYBRID_PADCTL_I2C_SCL_INPUT_RCV |
- DPAUX_HYBRID_PADCTL_MODE_I2C;
- tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_PADCTL);
+ err = tegra_dpaux_pad_config(dpaux, DPAUX_HYBRID_PADCTL_MODE_I2C);
+ if (err < 0)
+ return err;
+#ifdef CONFIG_GENERIC_PINCONF
+ dpaux->desc.name = dev_name(&pdev->dev);
+ dpaux->desc.pins = tegra_dpaux_pins;
+ dpaux->desc.npins = ARRAY_SIZE(tegra_dpaux_pins);
+ dpaux->desc.pctlops = &tegra_dpaux_pinctrl_ops;
+ dpaux->desc.pmxops = &tegra_dpaux_pinmux_ops;
+ dpaux->desc.owner = THIS_MODULE;
+
+ dpaux->pinctrl = devm_pinctrl_register(&pdev->dev, &dpaux->desc, dpaux);
+ if (!dpaux->pinctrl) {
+ dev_err(&pdev->dev, "failed to register pincontrol\n");
+ return -ENODEV;
+ }
+#endif
/* enable and clear all interrupts */
value = DPAUX_INTR_AUX_DONE | DPAUX_INTR_IRQ_EVENT |
DPAUX_INTR_UNPLUG_EVENT | DPAUX_INTR_PLUG_EVENT;
@@ -393,17 +557,24 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dpaux);
return 0;
+
+disable_parent_clk:
+ clk_disable_unprepare(dpaux->clk_parent);
+assert_reset:
+ if (dpaux->rst)
+ reset_control_assert(dpaux->rst);
+
+ clk_disable_unprepare(dpaux->clk);
+
+ return err;
}
static int tegra_dpaux_remove(struct platform_device *pdev)
{
struct tegra_dpaux *dpaux = platform_get_drvdata(pdev);
- u32 value;
/* make sure pads are powered down when not in use */
- value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
- value |= DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
- tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
+ tegra_dpaux_pad_power_down(dpaux);
drm_dp_aux_unregister(&dpaux->aux);
@@ -414,7 +585,10 @@ static int tegra_dpaux_remove(struct platform_device *pdev)
cancel_work_sync(&dpaux->work);
clk_disable_unprepare(dpaux->clk_parent);
- reset_control_assert(dpaux->rst);
+
+ if (dpaux->rst)
+ reset_control_assert(dpaux->rst);
+
clk_disable_unprepare(dpaux->clk);
return 0;
@@ -528,30 +702,15 @@ enum drm_connector_status drm_dp_aux_detect(struct drm_dp_aux *aux)
int drm_dp_aux_enable(struct drm_dp_aux *aux)
{
struct tegra_dpaux *dpaux = to_dpaux(aux);
- u32 value;
-
- value = DPAUX_HYBRID_PADCTL_AUX_CMH(2) |
- DPAUX_HYBRID_PADCTL_AUX_DRVZ(4) |
- DPAUX_HYBRID_PADCTL_AUX_DRVI(0x18) |
- DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV |
- DPAUX_HYBRID_PADCTL_MODE_AUX;
- tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_PADCTL);
-
- value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
- value &= ~DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
- tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
- return 0;
+ return tegra_dpaux_pad_config(dpaux, DPAUX_PADCTL_FUNC_AUX);
}
int drm_dp_aux_disable(struct drm_dp_aux *aux)
{
struct tegra_dpaux *dpaux = to_dpaux(aux);
- u32 value;
- value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
- value |= DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
- tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
+ tegra_dpaux_pad_power_down(dpaux);
return 0;
}
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index b59c3bf0d..755264d9d 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -56,8 +56,8 @@ static void tegra_atomic_complete(struct tegra_drm *tegra,
*/
drm_atomic_helper_commit_modeset_disables(drm, state);
- drm_atomic_helper_commit_planes(drm, state, false);
drm_atomic_helper_commit_modeset_enables(drm, state);
+ drm_atomic_helper_commit_planes(drm, state, true);
drm_atomic_helper_wait_for_vblanks(drm, state);
@@ -93,7 +93,7 @@ static int tegra_atomic_commit(struct drm_device *drm,
* the software side now.
*/
- drm_atomic_helper_swap_state(drm, state);
+ drm_atomic_helper_swap_state(state, true);
if (nonblock)
tegra_atomic_schedule(tegra, state);
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index f52d6cb24..0ddcce1b4 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -239,8 +239,6 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output);
void tegra_output_exit(struct tegra_output *output);
int tegra_output_connector_get_modes(struct drm_connector *connector);
-struct drm_encoder *
-tegra_output_connector_best_encoder(struct drm_connector *connector);
enum drm_connector_status
tegra_output_connector_detect(struct drm_connector *connector, bool force);
void tegra_output_connector_destroy(struct drm_connector *connector);
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index d1239ebc1..3dea1216b 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -13,6 +13,7 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/regulator/consumer.h>
@@ -677,6 +678,45 @@ static void tegra_dsi_ganged_disable(struct tegra_dsi *dsi)
tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_CONTROL);
}
+static int tegra_dsi_pad_enable(struct tegra_dsi *dsi)
+{
+ u32 value;
+
+ value = DSI_PAD_CONTROL_VS1_PULLDN(0) | DSI_PAD_CONTROL_VS1_PDIO(0);
+ tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_0);
+
+ return 0;
+}
+
+static int tegra_dsi_pad_calibrate(struct tegra_dsi *dsi)
+{
+ u32 value;
+
+ /*
+ * XXX Is this still needed? The module reset is deasserted right
+ * before this function is called.
+ */
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_0);
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_1);
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_2);
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_3);
+ tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_4);
+
+ /* start calibration */
+ tegra_dsi_pad_enable(dsi);
+
+ value = DSI_PAD_SLEW_UP(0x7) | DSI_PAD_SLEW_DN(0x7) |
+ DSI_PAD_LP_UP(0x1) | DSI_PAD_LP_DN(0x1) |
+ DSI_PAD_OUT_CLK(0x0);
+ tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_2);
+
+ value = DSI_PAD_PREEMP_PD_CLK(0x3) | DSI_PAD_PREEMP_PU_CLK(0x3) |
+ DSI_PAD_PREEMP_PD(0x03) | DSI_PAD_PREEMP_PU(0x3);
+ tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_3);
+
+ return tegra_mipi_calibrate(dsi->mipi);
+}
+
static void tegra_dsi_set_timeout(struct tegra_dsi *dsi, unsigned long bclk,
unsigned int vrefresh)
{
@@ -794,13 +834,27 @@ tegra_dsi_connector_mode_valid(struct drm_connector *connector,
static const struct drm_connector_helper_funcs tegra_dsi_connector_helper_funcs = {
.get_modes = tegra_output_connector_get_modes,
.mode_valid = tegra_dsi_connector_mode_valid,
- .best_encoder = tegra_output_connector_best_encoder,
};
static const struct drm_encoder_funcs tegra_dsi_encoder_funcs = {
.destroy = tegra_output_encoder_destroy,
};
+static void tegra_dsi_unprepare(struct tegra_dsi *dsi)
+{
+ int err;
+
+ if (dsi->slave)
+ tegra_dsi_unprepare(dsi->slave);
+
+ err = tegra_mipi_disable(dsi->mipi);
+ if (err < 0)
+ dev_err(dsi->dev, "failed to disable MIPI calibration: %d\n",
+ err);
+
+ pm_runtime_put(dsi->dev);
+}
+
static void tegra_dsi_encoder_disable(struct drm_encoder *encoder)
{
struct tegra_output *output = encoder_to_output(encoder);
@@ -837,7 +891,26 @@ static void tegra_dsi_encoder_disable(struct drm_encoder *encoder)
tegra_dsi_disable(dsi);
- return;
+ tegra_dsi_unprepare(dsi);
+}
+
+static void tegra_dsi_prepare(struct tegra_dsi *dsi)
+{
+ int err;
+
+ pm_runtime_get_sync(dsi->dev);
+
+ err = tegra_mipi_enable(dsi->mipi);
+ if (err < 0)
+ dev_err(dsi->dev, "failed to enable MIPI calibration: %d\n",
+ err);
+
+ err = tegra_dsi_pad_calibrate(dsi);
+ if (err < 0)
+ dev_err(dsi->dev, "MIPI calibration failed: %d\n", err);
+
+ if (dsi->slave)
+ tegra_dsi_prepare(dsi->slave);
}
static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
@@ -849,6 +922,8 @@ static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
struct tegra_dsi_state *state;
u32 value;
+ tegra_dsi_prepare(dsi);
+
state = tegra_dsi_get_state(dsi);
tegra_dsi_set_timeout(dsi, state->bclk, state->vrefresh);
@@ -876,8 +951,6 @@ static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
if (output->panel)
drm_panel_enable(output->panel);
-
- return;
}
static int
@@ -967,55 +1040,12 @@ static const struct drm_encoder_helper_funcs tegra_dsi_encoder_helper_funcs = {
.atomic_check = tegra_dsi_encoder_atomic_check,
};
-static int tegra_dsi_pad_enable(struct tegra_dsi *dsi)
-{
- u32 value;
-
- value = DSI_PAD_CONTROL_VS1_PULLDN(0) | DSI_PAD_CONTROL_VS1_PDIO(0);
- tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_0);
-
- return 0;
-}
-
-static int tegra_dsi_pad_calibrate(struct tegra_dsi *dsi)
-{
- u32 value;
-
- tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_0);
- tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_1);
- tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_2);
- tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_3);
- tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_4);
-
- /* start calibration */
- tegra_dsi_pad_enable(dsi);
-
- value = DSI_PAD_SLEW_UP(0x7) | DSI_PAD_SLEW_DN(0x7) |
- DSI_PAD_LP_UP(0x1) | DSI_PAD_LP_DN(0x1) |
- DSI_PAD_OUT_CLK(0x0);
- tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_2);
-
- value = DSI_PAD_PREEMP_PD_CLK(0x3) | DSI_PAD_PREEMP_PU_CLK(0x3) |
- DSI_PAD_PREEMP_PD(0x03) | DSI_PAD_PREEMP_PU(0x3);
- tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_3);
-
- return tegra_mipi_calibrate(dsi->mipi);
-}
-
static int tegra_dsi_init(struct host1x_client *client)
{
struct drm_device *drm = dev_get_drvdata(client->parent);
struct tegra_dsi *dsi = host1x_client_to_dsi(client);
int err;
- reset_control_deassert(dsi->rst);
-
- err = tegra_dsi_pad_calibrate(dsi);
- if (err < 0) {
- dev_err(dsi->dev, "MIPI calibration failed: %d\n", err);
- goto reset;
- }
-
/* Gangsters must not register their own outputs. */
if (!dsi->master) {
dsi->output.dev = client->dev;
@@ -1038,12 +1068,9 @@ static int tegra_dsi_init(struct host1x_client *client)
drm_connector_register(&dsi->output.connector);
err = tegra_output_init(drm, &dsi->output);
- if (err < 0) {
- dev_err(client->dev,
- "failed to initialize output: %d\n",
+ if (err < 0)
+ dev_err(dsi->dev, "failed to initialize output: %d\n",
err);
- goto reset;
- }
dsi->output.encoder.possible_crtcs = 0x3;
}
@@ -1055,10 +1082,6 @@ static int tegra_dsi_init(struct host1x_client *client)
}
return 0;
-
-reset:
- reset_control_assert(dsi->rst);
- return err;
}
static int tegra_dsi_exit(struct host1x_client *client)
@@ -1070,7 +1093,7 @@ static int tegra_dsi_exit(struct host1x_client *client)
if (IS_ENABLED(CONFIG_DEBUG_FS))
tegra_dsi_debugfs_exit(dsi);
- reset_control_assert(dsi->rst);
+ regulator_disable(dsi->vdd);
return 0;
}
@@ -1494,74 +1517,50 @@ static int tegra_dsi_probe(struct platform_device *pdev)
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->lanes = 4;
- dsi->rst = devm_reset_control_get(&pdev->dev, "dsi");
- if (IS_ERR(dsi->rst))
- return PTR_ERR(dsi->rst);
+ if (!pdev->dev.pm_domain) {
+ dsi->rst = devm_reset_control_get(&pdev->dev, "dsi");
+ if (IS_ERR(dsi->rst))
+ return PTR_ERR(dsi->rst);
+ }
dsi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(dsi->clk)) {
dev_err(&pdev->dev, "cannot get DSI clock\n");
- err = PTR_ERR(dsi->clk);
- goto reset;
- }
-
- err = clk_prepare_enable(dsi->clk);
- if (err < 0) {
- dev_err(&pdev->dev, "cannot enable DSI clock\n");
- goto reset;
+ return PTR_ERR(dsi->clk);
}
dsi->clk_lp = devm_clk_get(&pdev->dev, "lp");
if (IS_ERR(dsi->clk_lp)) {
dev_err(&pdev->dev, "cannot get low-power clock\n");
- err = PTR_ERR(dsi->clk_lp);
- goto disable_clk;
- }
-
- err = clk_prepare_enable(dsi->clk_lp);
- if (err < 0) {
- dev_err(&pdev->dev, "cannot enable low-power clock\n");
- goto disable_clk;
+ return PTR_ERR(dsi->clk_lp);
}
dsi->clk_parent = devm_clk_get(&pdev->dev, "parent");
if (IS_ERR(dsi->clk_parent)) {
dev_err(&pdev->dev, "cannot get parent clock\n");
- err = PTR_ERR(dsi->clk_parent);
- goto disable_clk_lp;
+ return PTR_ERR(dsi->clk_parent);
}
dsi->vdd = devm_regulator_get(&pdev->dev, "avdd-dsi-csi");
if (IS_ERR(dsi->vdd)) {
dev_err(&pdev->dev, "cannot get VDD supply\n");
- err = PTR_ERR(dsi->vdd);
- goto disable_clk_lp;
- }
-
- err = regulator_enable(dsi->vdd);
- if (err < 0) {
- dev_err(&pdev->dev, "cannot enable VDD supply\n");
- goto disable_clk_lp;
+ return PTR_ERR(dsi->vdd);
}
err = tegra_dsi_setup_clocks(dsi);
if (err < 0) {
dev_err(&pdev->dev, "cannot setup clocks\n");
- goto disable_vdd;
+ return err;
}
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dsi->regs = devm_ioremap_resource(&pdev->dev, regs);
- if (IS_ERR(dsi->regs)) {
- err = PTR_ERR(dsi->regs);
- goto disable_vdd;
- }
+ if (IS_ERR(dsi->regs))
+ return PTR_ERR(dsi->regs);
dsi->mipi = tegra_mipi_request(&pdev->dev);
- if (IS_ERR(dsi->mipi)) {
- err = PTR_ERR(dsi->mipi);
- goto disable_vdd;
- }
+ if (IS_ERR(dsi->mipi))
+ return PTR_ERR(dsi->mipi);
dsi->host.ops = &tegra_dsi_host_ops;
dsi->host.dev = &pdev->dev;
@@ -1572,6 +1571,9 @@ static int tegra_dsi_probe(struct platform_device *pdev)
goto mipi_free;
}
+ platform_set_drvdata(pdev, dsi);
+ pm_runtime_enable(&pdev->dev);
+
INIT_LIST_HEAD(&dsi->client.list);
dsi->client.ops = &dsi_client_ops;
dsi->client.dev = &pdev->dev;
@@ -1583,22 +1585,12 @@ static int tegra_dsi_probe(struct platform_device *pdev)
goto unregister;
}
- platform_set_drvdata(pdev, dsi);
-
return 0;
unregister:
mipi_dsi_host_unregister(&dsi->host);
mipi_free:
tegra_mipi_free(dsi->mipi);
-disable_vdd:
- regulator_disable(dsi->vdd);
-disable_clk_lp:
- clk_disable_unprepare(dsi->clk_lp);
-disable_clk:
- clk_disable_unprepare(dsi->clk);
-reset:
- reset_control_assert(dsi->rst);
return err;
}
@@ -1607,6 +1599,8 @@ static int tegra_dsi_remove(struct platform_device *pdev)
struct tegra_dsi *dsi = platform_get_drvdata(pdev);
int err;
+ pm_runtime_disable(&pdev->dev);
+
err = host1x_client_unregister(&dsi->client);
if (err < 0) {
dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
@@ -1619,13 +1613,81 @@ static int tegra_dsi_remove(struct platform_device *pdev)
mipi_dsi_host_unregister(&dsi->host);
tegra_mipi_free(dsi->mipi);
- regulator_disable(dsi->vdd);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tegra_dsi_suspend(struct device *dev)
+{
+ struct tegra_dsi *dsi = dev_get_drvdata(dev);
+ int err;
+
+ if (dsi->rst) {
+ err = reset_control_assert(dsi->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to assert reset: %d\n", err);
+ return err;
+ }
+ }
+
+ usleep_range(1000, 2000);
+
clk_disable_unprepare(dsi->clk_lp);
clk_disable_unprepare(dsi->clk);
- reset_control_assert(dsi->rst);
+
+ regulator_disable(dsi->vdd);
+
+ return 0;
+}
+
+static int tegra_dsi_resume(struct device *dev)
+{
+ struct tegra_dsi *dsi = dev_get_drvdata(dev);
+ int err;
+
+ err = regulator_enable(dsi->vdd);
+ if (err < 0) {
+ dev_err(dsi->dev, "failed to enable VDD supply: %d\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(dsi->clk);
+ if (err < 0) {
+ dev_err(dev, "cannot enable DSI clock: %d\n", err);
+ goto disable_vdd;
+ }
+
+ err = clk_prepare_enable(dsi->clk_lp);
+ if (err < 0) {
+ dev_err(dev, "cannot enable low-power clock: %d\n", err);
+ goto disable_clk;
+ }
+
+ usleep_range(1000, 2000);
+
+ if (dsi->rst) {
+ err = reset_control_deassert(dsi->rst);
+ if (err < 0) {
+ dev_err(dev, "cannot assert reset: %d\n", err);
+ goto disable_clk_lp;
+ }
+ }
return 0;
+
+disable_clk_lp:
+ clk_disable_unprepare(dsi->clk_lp);
+disable_clk:
+ clk_disable_unprepare(dsi->clk);
+disable_vdd:
+ regulator_disable(dsi->vdd);
+ return err;
}
+#endif
+
+static const struct dev_pm_ops tegra_dsi_pm_ops = {
+ SET_RUNTIME_PM_OPS(tegra_dsi_suspend, tegra_dsi_resume, NULL)
+};
static const struct of_device_id tegra_dsi_of_match[] = {
{ .compatible = "nvidia,tegra210-dsi", },
@@ -1640,6 +1702,7 @@ struct platform_driver tegra_dsi_driver = {
.driver = {
.name = "tegra-dsi",
.of_match_table = tegra_dsi_of_match,
+ .pm = &tegra_dsi_pm_ops,
},
.probe = tegra_dsi_probe,
.remove = tegra_dsi_remove,
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 1b12aa7a7..e6d71fa40 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -68,7 +68,7 @@ static void tegra_fb_destroy(struct drm_framebuffer *framebuffer)
struct tegra_bo *bo = fb->planes[i];
if (bo) {
- if (bo->pages && bo->vaddr)
+ if (bo->pages)
vunmap(bo->vaddr);
drm_gem_object_unreference_unlocked(&bo->gem);
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index b7ef4929e..cda0491ed 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -11,6 +11,7 @@
#include <linux/debugfs.h>
#include <linux/gpio.h>
#include <linux/hdmi.h>
+#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
@@ -18,10 +19,14 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <sound/hda_verbs.h>
+
#include "hdmi.h"
#include "drm.h"
#include "dc.h"
+#define HDMI_ELD_BUFFER_SIZE 96
+
struct tmds_config {
unsigned int pclk;
u32 pll0;
@@ -39,6 +44,8 @@ struct tegra_hdmi_config {
u32 fuse_override_value;
bool has_sor_io_peak_current;
+ bool has_hda;
+ bool has_hbr;
};
struct tegra_hdmi {
@@ -60,7 +67,10 @@ struct tegra_hdmi {
const struct tegra_hdmi_config *config;
unsigned int audio_source;
- unsigned int audio_freq;
+ unsigned int audio_sample_rate;
+ unsigned int audio_channels;
+
+ unsigned int pixel_clock;
bool stereo;
bool dvi;
@@ -402,11 +412,11 @@ static const struct tmds_config tegra124_tmds_config[] = {
};
static const struct tegra_hdmi_audio_config *
-tegra_hdmi_get_audio_config(unsigned int audio_freq, unsigned int pclk)
+tegra_hdmi_get_audio_config(unsigned int sample_rate, unsigned int pclk)
{
const struct tegra_hdmi_audio_config *table;
- switch (audio_freq) {
+ switch (sample_rate) {
case 32000:
table = tegra_hdmi_audio_32k;
break;
@@ -476,44 +486,114 @@ static void tegra_hdmi_setup_audio_fs_tables(struct tegra_hdmi *hdmi)
}
}
-static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi, unsigned int pclk)
+static void tegra_hdmi_write_aval(struct tegra_hdmi *hdmi, u32 value)
+{
+ static const struct {
+ unsigned int sample_rate;
+ unsigned int offset;
+ } regs[] = {
+ { 32000, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320 },
+ { 44100, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441 },
+ { 48000, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480 },
+ { 88200, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882 },
+ { 96000, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960 },
+ { 176400, HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764 },
+ { 192000, HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920 },
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(regs); i++) {
+ if (regs[i].sample_rate == hdmi->audio_sample_rate) {
+ tegra_hdmi_writel(hdmi, value, regs[i].offset);
+ break;
+ }
+ }
+}
+
+static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi)
{
- struct device_node *node = hdmi->dev->of_node;
const struct tegra_hdmi_audio_config *config;
- unsigned int offset = 0;
- u32 value;
+ u32 source, value;
switch (hdmi->audio_source) {
case HDA:
- value = AUDIO_CNTRL0_SOURCE_SELECT_HDAL;
+ if (hdmi->config->has_hda)
+ source = SOR_AUDIO_CNTRL0_SOURCE_SELECT_HDAL;
+ else
+ return -EINVAL;
+
break;
case SPDIF:
- value = AUDIO_CNTRL0_SOURCE_SELECT_SPDIF;
+ if (hdmi->config->has_hda)
+ source = SOR_AUDIO_CNTRL0_SOURCE_SELECT_SPDIF;
+ else
+ source = AUDIO_CNTRL0_SOURCE_SELECT_SPDIF;
break;
default:
- value = AUDIO_CNTRL0_SOURCE_SELECT_AUTO;
+ if (hdmi->config->has_hda)
+ source = SOR_AUDIO_CNTRL0_SOURCE_SELECT_AUTO;
+ else
+ source = AUDIO_CNTRL0_SOURCE_SELECT_AUTO;
break;
}
- if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) {
- value |= AUDIO_CNTRL0_ERROR_TOLERANCE(6) |
- AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0);
- tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0);
- } else {
- value |= AUDIO_CNTRL0_INJECT_NULLSMPL;
+ /*
+ * Tegra30 and later use a slightly modified version of the register
+ * layout to accomodate for changes related to supporting HDA as the
+ * audio input source for HDMI. The source select field has moved to
+ * the SOR_AUDIO_CNTRL0 register, but the error tolerance and frames
+ * per block fields remain in the AUDIO_CNTRL0 register.
+ */
+ if (hdmi->config->has_hda) {
+ /*
+ * Inject null samples into the audio FIFO for every frame in
+ * which the codec did not receive any samples. This applies
+ * to stereo LPCM only.
+ *
+ * XXX: This seems to be a remnant of MCP days when this was
+ * used to work around issues with monitors not being able to
+ * play back system startup sounds early. It is possibly not
+ * needed on Linux at all.
+ */
+ if (hdmi->audio_channels == 2)
+ value = SOR_AUDIO_CNTRL0_INJECT_NULLSMPL;
+ else
+ value = 0;
+
+ value |= source;
+
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_AUDIO_CNTRL0);
+ }
- value = AUDIO_CNTRL0_ERROR_TOLERANCE(6) |
- AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0);
- tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0);
+ /*
+ * On Tegra20, HDA is not a supported audio source and the source
+ * select field is part of the AUDIO_CNTRL0 register.
+ */
+ value = AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0) |
+ AUDIO_CNTRL0_ERROR_TOLERANCE(6);
+
+ if (!hdmi->config->has_hda)
+ value |= source;
+
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0);
+
+ /*
+ * Advertise support for High Bit-Rate on Tegra114 and later.
+ */
+ if (hdmi->config->has_hbr) {
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_AUDIO_SPARE0);
+ value |= SOR_AUDIO_SPARE0_HBR_ENABLE;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_AUDIO_SPARE0);
}
- config = tegra_hdmi_get_audio_config(hdmi->audio_freq, pclk);
+ config = tegra_hdmi_get_audio_config(hdmi->audio_sample_rate,
+ hdmi->pixel_clock);
if (!config) {
- dev_err(hdmi->dev, "cannot set audio to %u at %u pclk\n",
- hdmi->audio_freq, pclk);
+ dev_err(hdmi->dev,
+ "cannot set audio to %u Hz at %u Hz pixel clock\n",
+ hdmi->audio_sample_rate, hdmi->pixel_clock);
return -EINVAL;
}
@@ -526,8 +606,8 @@ static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi, unsigned int pclk)
tegra_hdmi_writel(hdmi, ACR_SUBPACK_N(config->n) | ACR_ENABLE,
HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH);
- value = ACR_SUBPACK_CTS(config->cts);
- tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW);
+ tegra_hdmi_writel(hdmi, ACR_SUBPACK_CTS(config->cts),
+ HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW);
value = SPARE_HW_CTS | SPARE_FORCE_SW_CTS | SPARE_CTS_RESET_VAL(1);
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_SPARE);
@@ -536,43 +616,53 @@ static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi, unsigned int pclk)
value &= ~AUDIO_N_RESETF;
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_N);
- if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) {
- switch (hdmi->audio_freq) {
- case 32000:
- offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320;
- break;
+ if (hdmi->config->has_hda)
+ tegra_hdmi_write_aval(hdmi, config->aval);
- case 44100:
- offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441;
- break;
+ tegra_hdmi_setup_audio_fs_tables(hdmi);
- case 48000:
- offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480;
- break;
+ return 0;
+}
- case 88200:
- offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882;
- break;
+static void tegra_hdmi_disable_audio(struct tegra_hdmi *hdmi)
+{
+ u32 value;
- case 96000:
- offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960;
- break;
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+ value &= ~GENERIC_CTRL_AUDIO;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+}
- case 176400:
- offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764;
- break;
+static void tegra_hdmi_enable_audio(struct tegra_hdmi *hdmi)
+{
+ u32 value;
- case 192000:
- offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920;
- break;
- }
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+ value |= GENERIC_CTRL_AUDIO;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+}
- tegra_hdmi_writel(hdmi, config->aval, offset);
- }
+static void tegra_hdmi_write_eld(struct tegra_hdmi *hdmi)
+{
+ size_t length = drm_eld_size(hdmi->output.connector.eld), i;
+ u32 value;
- tegra_hdmi_setup_audio_fs_tables(hdmi);
+ for (i = 0; i < length; i++)
+ tegra_hdmi_writel(hdmi, i << 8 | hdmi->output.connector.eld[i],
+ HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR);
- return 0;
+ /*
+ * The HDA codec will always report an ELD buffer size of 96 bytes and
+ * the HDA codec driver will check that each byte read from the buffer
+ * is valid. Therefore every byte must be written, even if no 96 bytes
+ * were parsed from EDID.
+ */
+ for (i = length; i < HDMI_ELD_BUFFER_SIZE; i++)
+ tegra_hdmi_writel(hdmi, i << 8 | 0,
+ HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR);
+
+ value = SOR_AUDIO_HDA_PRESENSE_VALID | SOR_AUDIO_HDA_PRESENSE_PRESENT;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE);
}
static inline u32 tegra_hdmi_subpack(const u8 *ptr, size_t size)
@@ -644,12 +734,6 @@ static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi,
u8 buffer[17];
ssize_t err;
- if (hdmi->dvi) {
- tegra_hdmi_writel(hdmi, 0,
- HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
- return;
- }
-
err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
if (err < 0) {
dev_err(hdmi->dev, "failed to setup AVI infoframe: %zd\n", err);
@@ -663,9 +747,24 @@ static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi,
}
tegra_hdmi_write_infopack(hdmi, buffer, err);
+}
+
+static void tegra_hdmi_disable_avi_infoframe(struct tegra_hdmi *hdmi)
+{
+ u32 value;
- tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
- HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+ value &= ~INFOFRAME_CTRL_ENABLE;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+}
+
+static void tegra_hdmi_enable_avi_infoframe(struct tegra_hdmi *hdmi)
+{
+ u32 value;
+
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+ value |= INFOFRAME_CTRL_ENABLE;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
}
static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
@@ -674,12 +773,6 @@ static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
u8 buffer[14];
ssize_t err;
- if (hdmi->dvi) {
- tegra_hdmi_writel(hdmi, 0,
- HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
- return;
- }
-
err = hdmi_audio_infoframe_init(&frame);
if (err < 0) {
dev_err(hdmi->dev, "failed to setup audio infoframe: %zd\n",
@@ -687,7 +780,7 @@ static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
return;
}
- frame.channels = 2;
+ frame.channels = hdmi->audio_channels;
err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer));
if (err < 0) {
@@ -703,9 +796,24 @@ static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
* bytes can be programmed.
*/
tegra_hdmi_write_infopack(hdmi, buffer, min_t(size_t, 10, err));
+}
- tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
- HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+static void tegra_hdmi_disable_audio_infoframe(struct tegra_hdmi *hdmi)
+{
+ u32 value;
+
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+ value &= ~INFOFRAME_CTRL_ENABLE;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+}
+
+static void tegra_hdmi_enable_audio_infoframe(struct tegra_hdmi *hdmi)
+{
+ u32 value;
+
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+ value |= INFOFRAME_CTRL_ENABLE;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
}
static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi)
@@ -713,14 +821,6 @@ static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi)
struct hdmi_vendor_infoframe frame;
u8 buffer[10];
ssize_t err;
- u32 value;
-
- if (!hdmi->stereo) {
- value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
- value &= ~GENERIC_CTRL_ENABLE;
- tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
- return;
- }
hdmi_vendor_infoframe_init(&frame);
frame.s3d_struct = HDMI_3D_STRUCTURE_FRAME_PACKING;
@@ -733,6 +833,20 @@ static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi)
}
tegra_hdmi_write_infopack(hdmi, buffer, err);
+}
+
+static void tegra_hdmi_disable_stereo_infoframe(struct tegra_hdmi *hdmi)
+{
+ u32 value;
+
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+ value &= ~GENERIC_CTRL_ENABLE;
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+}
+
+static void tegra_hdmi_enable_stereo_infoframe(struct tegra_hdmi *hdmi)
+{
+ u32 value;
value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
value |= GENERIC_CTRL_ENABLE;
@@ -772,10 +886,25 @@ static bool tegra_output_is_hdmi(struct tegra_output *output)
return drm_detect_hdmi_monitor(edid);
}
+static enum drm_connector_status
+tegra_hdmi_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct tegra_output *output = connector_to_output(connector);
+ struct tegra_hdmi *hdmi = to_hdmi(output);
+ enum drm_connector_status status;
+
+ status = tegra_output_connector_detect(connector, force);
+ if (status == connector_status_connected)
+ return status;
+
+ tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE);
+ return status;
+}
+
static const struct drm_connector_funcs tegra_hdmi_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.reset = drm_atomic_helper_connector_reset,
- .detect = tegra_output_connector_detect,
+ .detect = tegra_hdmi_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = tegra_output_connector_destroy,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -806,7 +935,6 @@ static const struct drm_connector_helper_funcs
tegra_hdmi_connector_helper_funcs = {
.get_modes = tegra_output_connector_get_modes,
.mode_valid = tegra_hdmi_connector_mode_valid,
- .best_encoder = tegra_output_connector_best_encoder,
};
static const struct drm_encoder_funcs tegra_hdmi_encoder_funcs = {
@@ -815,7 +943,9 @@ static const struct drm_encoder_funcs tegra_hdmi_encoder_funcs = {
static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder)
{
+ struct tegra_output *output = encoder_to_output(encoder);
struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+ struct tegra_hdmi *hdmi = to_hdmi(output);
u32 value;
/*
@@ -829,6 +959,20 @@ static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder)
tegra_dc_commit(dc);
}
+
+ if (!hdmi->dvi) {
+ if (hdmi->stereo)
+ tegra_hdmi_disable_stereo_infoframe(hdmi);
+
+ tegra_hdmi_disable_audio_infoframe(hdmi);
+ tegra_hdmi_disable_avi_infoframe(hdmi);
+ tegra_hdmi_disable_audio(hdmi);
+ }
+
+ tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_INT_ENABLE);
+ tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_INT_MASK);
+
+ pm_runtime_put(hdmi->dev);
}
static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
@@ -837,21 +981,28 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
unsigned int h_sync_width, h_front_porch, h_back_porch, i, rekey;
struct tegra_output *output = encoder_to_output(encoder);
struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
- struct device_node *node = output->dev->of_node;
struct tegra_hdmi *hdmi = to_hdmi(output);
- unsigned int pulse_start, div82, pclk;
+ unsigned int pulse_start, div82;
int retries = 1000;
u32 value;
int err;
- hdmi->dvi = !tegra_output_is_hdmi(output);
+ pm_runtime_get_sync(hdmi->dev);
- pclk = mode->clock * 1000;
+ /*
+ * Enable and unmask the HDA codec SCRATCH0 register interrupt. This
+ * is used for interoperability between the HDA codec driver and the
+ * HDMI driver.
+ */
+ tegra_hdmi_writel(hdmi, INT_CODEC_SCRATCH0, HDMI_NV_PDISP_INT_ENABLE);
+ tegra_hdmi_writel(hdmi, INT_CODEC_SCRATCH0, HDMI_NV_PDISP_INT_MASK);
+
+ hdmi->pixel_clock = mode->clock * 1000;
h_sync_width = mode->hsync_end - mode->hsync_start;
h_back_porch = mode->htotal - mode->hsync_end;
h_front_porch = mode->hsync_start - mode->hdisplay;
- err = clk_set_rate(hdmi->clk, pclk);
+ err = clk_set_rate(hdmi->clk, hdmi->pixel_clock);
if (err < 0) {
dev_err(hdmi->dev, "failed to set HDMI clock frequency: %d\n",
err);
@@ -910,17 +1061,15 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
value = SOR_REFCLK_DIV_INT(div82 >> 2) | SOR_REFCLK_DIV_FRAC(div82);
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_REFCLK);
+ hdmi->dvi = !tegra_output_is_hdmi(output);
if (!hdmi->dvi) {
- err = tegra_hdmi_setup_audio(hdmi, pclk);
+ err = tegra_hdmi_setup_audio(hdmi);
if (err < 0)
hdmi->dvi = true;
}
- if (of_device_is_compatible(node, "nvidia,tegra20-hdmi")) {
- /*
- * TODO: add ELD support
- */
- }
+ if (hdmi->config->has_hda)
+ tegra_hdmi_write_eld(hdmi);
rekey = HDMI_REKEY_DEFAULT;
value = HDMI_CTRL_REKEY(rekey);
@@ -932,20 +1081,17 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_CTRL);
- if (hdmi->dvi)
- tegra_hdmi_writel(hdmi, 0x0,
- HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
- else
- tegra_hdmi_writel(hdmi, GENERIC_CTRL_AUDIO,
- HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+ if (!hdmi->dvi) {
+ tegra_hdmi_setup_avi_infoframe(hdmi, mode);
+ tegra_hdmi_setup_audio_infoframe(hdmi);
- tegra_hdmi_setup_avi_infoframe(hdmi, mode);
- tegra_hdmi_setup_audio_infoframe(hdmi);
- tegra_hdmi_setup_stereo_infoframe(hdmi);
+ if (hdmi->stereo)
+ tegra_hdmi_setup_stereo_infoframe(hdmi);
+ }
/* TMDS CONFIG */
for (i = 0; i < hdmi->config->num_tmds; i++) {
- if (pclk <= hdmi->config->tmds[i].pclk) {
+ if (hdmi->pixel_clock <= hdmi->config->tmds[i].pclk) {
tegra_hdmi_setup_tmds(hdmi, &hdmi->config->tmds[i]);
break;
}
@@ -1032,6 +1178,15 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
tegra_dc_commit(dc);
+ if (!hdmi->dvi) {
+ tegra_hdmi_enable_avi_infoframe(hdmi);
+ tegra_hdmi_enable_audio_infoframe(hdmi);
+ tegra_hdmi_enable_audio(hdmi);
+
+ if (hdmi->stereo)
+ tegra_hdmi_enable_stereo_infoframe(hdmi);
+ }
+
/* TODO: add HDCP support */
}
@@ -1236,8 +1391,14 @@ static int tegra_hdmi_show_regs(struct seq_file *s, void *data)
DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG);
DUMP_REG(HDMI_NV_PDISP_KEY_SKEY_INDEX);
DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_CNTRL0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_SPARE0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH0);
+ DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH1);
DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR);
DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE);
+ DUMP_REG(HDMI_NV_PDISP_INT_STATUS);
+ DUMP_REG(HDMI_NV_PDISP_INT_MASK);
+ DUMP_REG(HDMI_NV_PDISP_INT_ENABLE);
DUMP_REG(HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT);
#undef DUMP_REG
@@ -1361,14 +1522,6 @@ static int tegra_hdmi_init(struct host1x_client *client)
return err;
}
- err = clk_prepare_enable(hdmi->clk);
- if (err < 0) {
- dev_err(hdmi->dev, "failed to enable clock: %d\n", err);
- return err;
- }
-
- reset_control_deassert(hdmi->rst);
-
return 0;
}
@@ -1378,9 +1531,6 @@ static int tegra_hdmi_exit(struct host1x_client *client)
tegra_output_exit(&hdmi->output);
- reset_control_assert(hdmi->rst);
- clk_disable_unprepare(hdmi->clk);
-
regulator_disable(hdmi->vdd);
regulator_disable(hdmi->pll);
regulator_disable(hdmi->hdmi);
@@ -1402,6 +1552,8 @@ static const struct tegra_hdmi_config tegra20_hdmi_config = {
.fuse_override_offset = HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT,
.fuse_override_value = 1 << 31,
.has_sor_io_peak_current = false,
+ .has_hda = false,
+ .has_hbr = false,
};
static const struct tegra_hdmi_config tegra30_hdmi_config = {
@@ -1410,6 +1562,8 @@ static const struct tegra_hdmi_config tegra30_hdmi_config = {
.fuse_override_offset = HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT,
.fuse_override_value = 1 << 31,
.has_sor_io_peak_current = false,
+ .has_hda = true,
+ .has_hbr = false,
};
static const struct tegra_hdmi_config tegra114_hdmi_config = {
@@ -1418,6 +1572,8 @@ static const struct tegra_hdmi_config tegra114_hdmi_config = {
.fuse_override_offset = HDMI_NV_PDISP_SOR_PAD_CTLS0,
.fuse_override_value = 1 << 31,
.has_sor_io_peak_current = true,
+ .has_hda = true,
+ .has_hbr = true,
};
static const struct tegra_hdmi_config tegra124_hdmi_config = {
@@ -1426,6 +1582,8 @@ static const struct tegra_hdmi_config tegra124_hdmi_config = {
.fuse_override_offset = HDMI_NV_PDISP_SOR_PAD_CTLS0,
.fuse_override_value = 1 << 31,
.has_sor_io_peak_current = true,
+ .has_hda = true,
+ .has_hbr = true,
};
static const struct of_device_id tegra_hdmi_of_match[] = {
@@ -1437,6 +1595,67 @@ static const struct of_device_id tegra_hdmi_of_match[] = {
};
MODULE_DEVICE_TABLE(of, tegra_hdmi_of_match);
+static void hda_format_parse(unsigned int format, unsigned int *rate,
+ unsigned int *channels)
+{
+ unsigned int mul, div;
+
+ if (format & AC_FMT_BASE_44K)
+ *rate = 44100;
+ else
+ *rate = 48000;
+
+ mul = (format & AC_FMT_MULT_MASK) >> AC_FMT_MULT_SHIFT;
+ div = (format & AC_FMT_DIV_MASK) >> AC_FMT_DIV_SHIFT;
+
+ *rate = *rate * (mul + 1) / (div + 1);
+
+ *channels = (format & AC_FMT_CHAN_MASK) >> AC_FMT_CHAN_SHIFT;
+}
+
+static irqreturn_t tegra_hdmi_irq(int irq, void *data)
+{
+ struct tegra_hdmi *hdmi = data;
+ u32 value;
+ int err;
+
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_INT_STATUS);
+ tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_INT_STATUS);
+
+ if (value & INT_CODEC_SCRATCH0) {
+ unsigned int format;
+ u32 value;
+
+ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH0);
+
+ if (value & SOR_AUDIO_HDA_CODEC_SCRATCH0_VALID) {
+ unsigned int sample_rate, channels;
+
+ format = value & SOR_AUDIO_HDA_CODEC_SCRATCH0_FMT_MASK;
+
+ hda_format_parse(format, &sample_rate, &channels);
+
+ hdmi->audio_sample_rate = sample_rate;
+ hdmi->audio_channels = channels;
+
+ err = tegra_hdmi_setup_audio(hdmi);
+ if (err < 0) {
+ tegra_hdmi_disable_audio_infoframe(hdmi);
+ tegra_hdmi_disable_audio(hdmi);
+ } else {
+ tegra_hdmi_setup_audio_infoframe(hdmi);
+ tegra_hdmi_enable_audio_infoframe(hdmi);
+ tegra_hdmi_enable_audio(hdmi);
+ }
+ } else {
+ tegra_hdmi_disable_audio_infoframe(hdmi);
+ tegra_hdmi_disable_audio(hdmi);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
static int tegra_hdmi_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
@@ -1454,8 +1673,10 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
hdmi->config = match->data;
hdmi->dev = &pdev->dev;
+
hdmi->audio_source = AUTO;
- hdmi->audio_freq = 44100;
+ hdmi->audio_sample_rate = 48000;
+ hdmi->audio_channels = 2;
hdmi->stereo = false;
hdmi->dvi = false;
@@ -1516,6 +1737,17 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
hdmi->irq = err;
+ err = devm_request_irq(hdmi->dev, hdmi->irq, tegra_hdmi_irq, 0,
+ dev_name(hdmi->dev), hdmi);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n",
+ hdmi->irq, err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, hdmi);
+ pm_runtime_enable(&pdev->dev);
+
INIT_LIST_HEAD(&hdmi->client.list);
hdmi->client.ops = &hdmi_client_ops;
hdmi->client.dev = &pdev->dev;
@@ -1527,8 +1759,6 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
return err;
}
- platform_set_drvdata(pdev, hdmi);
-
return 0;
}
@@ -1537,6 +1767,8 @@ static int tegra_hdmi_remove(struct platform_device *pdev)
struct tegra_hdmi *hdmi = platform_get_drvdata(pdev);
int err;
+ pm_runtime_disable(&pdev->dev);
+
err = host1x_client_unregister(&hdmi->client);
if (err < 0) {
dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
@@ -1546,17 +1778,61 @@ static int tegra_hdmi_remove(struct platform_device *pdev)
tegra_output_remove(&hdmi->output);
- clk_disable_unprepare(hdmi->clk_parent);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tegra_hdmi_suspend(struct device *dev)
+{
+ struct tegra_hdmi *hdmi = dev_get_drvdata(dev);
+ int err;
+
+ err = reset_control_assert(hdmi->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to assert reset: %d\n", err);
+ return err;
+ }
+
+ usleep_range(1000, 2000);
+
clk_disable_unprepare(hdmi->clk);
return 0;
}
+static int tegra_hdmi_resume(struct device *dev)
+{
+ struct tegra_hdmi *hdmi = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_prepare_enable(hdmi->clk);
+ if (err < 0) {
+ dev_err(dev, "failed to enable clock: %d\n", err);
+ return err;
+ }
+
+ usleep_range(1000, 2000);
+
+ err = reset_control_deassert(hdmi->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to deassert reset: %d\n", err);
+ clk_disable_unprepare(hdmi->clk);
+ return err;
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops tegra_hdmi_pm_ops = {
+ SET_RUNTIME_PM_OPS(tegra_hdmi_suspend, tegra_hdmi_resume, NULL)
+};
+
struct platform_driver tegra_hdmi_driver = {
.driver = {
.name = "tegra-hdmi",
- .owner = THIS_MODULE,
.of_match_table = tegra_hdmi_of_match,
+ .pm = &tegra_hdmi_pm_ops,
},
.probe = tegra_hdmi_probe,
.remove = tegra_hdmi_remove,
diff --git a/drivers/gpu/drm/tegra/hdmi.h b/drivers/gpu/drm/tegra/hdmi.h
index a88251438..2339f134a 100644
--- a/drivers/gpu/drm/tegra/hdmi.h
+++ b/drivers/gpu/drm/tegra/hdmi.h
@@ -468,9 +468,20 @@
#define HDMI_NV_PDISP_KEY_SKEY_INDEX 0xa3
#define HDMI_NV_PDISP_SOR_AUDIO_CNTRL0 0xac
-#define AUDIO_CNTRL0_INJECT_NULLSMPL (1 << 29)
+#define SOR_AUDIO_CNTRL0_SOURCE_SELECT_AUTO (0 << 20)
+#define SOR_AUDIO_CNTRL0_SOURCE_SELECT_SPDIF (1 << 20)
+#define SOR_AUDIO_CNTRL0_SOURCE_SELECT_HDAL (2 << 20)
+#define SOR_AUDIO_CNTRL0_INJECT_NULLSMPL (1 << 29)
+#define HDMI_NV_PDISP_SOR_AUDIO_SPARE0 0xae
+#define SOR_AUDIO_SPARE0_HBR_ENABLE (1 << 27)
+#define HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH0 0xba
+#define SOR_AUDIO_HDA_CODEC_SCRATCH0_VALID (1 << 30)
+#define SOR_AUDIO_HDA_CODEC_SCRATCH0_FMT_MASK 0xffff
+#define HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH1 0xbb
#define HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR 0xbc
#define HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE 0xbd
+#define SOR_AUDIO_HDA_PRESENSE_VALID (1 << 1)
+#define SOR_AUDIO_HDA_PRESENSE_PRESENT (1 << 0)
#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320 0xbf
#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441 0xc0
@@ -481,6 +492,14 @@
#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920 0xc5
#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_DEFAULT 0xc5
+#define HDMI_NV_PDISP_INT_STATUS 0xcc
+#define INT_SCRATCH (1 << 3)
+#define INT_CP_REQUEST (1 << 2)
+#define INT_CODEC_SCRATCH1 (1 << 1)
+#define INT_CODEC_SCRATCH0 (1 << 0)
+#define HDMI_NV_PDISP_INT_MASK 0xcd
+#define HDMI_NV_PDISP_INT_ENABLE 0xce
+
#define HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT 0xd1
#define PEAK_CURRENT_LANE0(x) (((x) & 0x7f) << 0)
#define PEAK_CURRENT_LANE1(x) (((x) & 0x7f) << 8)
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index 46664b622..595d1ec3e 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -36,20 +36,13 @@ int tegra_output_connector_get_modes(struct drm_connector *connector)
if (edid) {
err = drm_add_edid_modes(connector, edid);
+ drm_edid_to_eld(connector, edid);
kfree(edid);
}
return err;
}
-struct drm_encoder *
-tegra_output_connector_best_encoder(struct drm_connector *connector)
-{
- struct tegra_output *output = connector_to_output(connector);
-
- return &output->encoder;
-}
-
enum drm_connector_status
tegra_output_connector_detect(struct drm_connector *connector, bool force)
{
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index e246334e0..a131b44e2 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -112,7 +112,6 @@ tegra_rgb_connector_mode_valid(struct drm_connector *connector,
static const struct drm_connector_helper_funcs tegra_rgb_connector_helper_funcs = {
.get_modes = tegra_output_connector_get_modes,
.mode_valid = tegra_rgb_connector_mode_valid,
- .best_encoder = tegra_output_connector_best_encoder,
};
static const struct drm_encoder_funcs tegra_rgb_encoder_funcs = {
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 757c6e860..74d0540b8 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -7,11 +7,13 @@
*/
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/debugfs.h>
#include <linux/gpio.h>
#include <linux/io.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
@@ -149,6 +151,8 @@ struct tegra_sor_soc {
const struct tegra_sor_hdmi_settings *settings;
unsigned int num_settings;
+
+ const u8 *xbar_cfg;
};
struct tegra_sor;
@@ -169,7 +173,9 @@ struct tegra_sor {
struct reset_control *rst;
struct clk *clk_parent;
+ struct clk *clk_brick;
struct clk *clk_safe;
+ struct clk *clk_src;
struct clk *clk_dp;
struct clk *clk;
@@ -190,6 +196,18 @@ struct tegra_sor {
struct regulator *hdmi_supply;
};
+struct tegra_sor_state {
+ struct drm_connector_state base;
+
+ unsigned int bpc;
+};
+
+static inline struct tegra_sor_state *
+to_sor_state(struct drm_connector_state *state)
+{
+ return container_of(state, struct tegra_sor_state, base);
+}
+
struct tegra_sor_config {
u32 bits_per_pixel;
@@ -225,6 +243,118 @@ static inline void tegra_sor_writel(struct tegra_sor *sor, u32 value,
writel(value, sor->regs + (offset << 2));
}
+static int tegra_sor_set_parent_clock(struct tegra_sor *sor, struct clk *parent)
+{
+ int err;
+
+ clk_disable_unprepare(sor->clk);
+
+ err = clk_set_parent(sor->clk, parent);
+ if (err < 0)
+ return err;
+
+ err = clk_prepare_enable(sor->clk);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+struct tegra_clk_sor_brick {
+ struct clk_hw hw;
+ struct tegra_sor *sor;
+};
+
+static inline struct tegra_clk_sor_brick *to_brick(struct clk_hw *hw)
+{
+ return container_of(hw, struct tegra_clk_sor_brick, hw);
+}
+
+static const char * const tegra_clk_sor_brick_parents[] = {
+ "pll_d2_out0", "pll_dp"
+};
+
+static int tegra_clk_sor_brick_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct tegra_clk_sor_brick *brick = to_brick(hw);
+ struct tegra_sor *sor = brick->sor;
+ u32 value;
+
+ value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
+ value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK;
+
+ switch (index) {
+ case 0:
+ value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_PCLK;
+ break;
+
+ case 1:
+ value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK;
+ break;
+ }
+
+ tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
+
+ return 0;
+}
+
+static u8 tegra_clk_sor_brick_get_parent(struct clk_hw *hw)
+{
+ struct tegra_clk_sor_brick *brick = to_brick(hw);
+ struct tegra_sor *sor = brick->sor;
+ u8 parent = U8_MAX;
+ u32 value;
+
+ value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
+
+ switch (value & SOR_CLK_CNTRL_DP_CLK_SEL_MASK) {
+ case SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_PCLK:
+ case SOR_CLK_CNTRL_DP_CLK_SEL_DIFF_PCLK:
+ parent = 0;
+ break;
+
+ case SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK:
+ case SOR_CLK_CNTRL_DP_CLK_SEL_DIFF_DPCLK:
+ parent = 1;
+ break;
+ }
+
+ return parent;
+}
+
+static const struct clk_ops tegra_clk_sor_brick_ops = {
+ .set_parent = tegra_clk_sor_brick_set_parent,
+ .get_parent = tegra_clk_sor_brick_get_parent,
+};
+
+static struct clk *tegra_clk_sor_brick_register(struct tegra_sor *sor,
+ const char *name)
+{
+ struct tegra_clk_sor_brick *brick;
+ struct clk_init_data init;
+ struct clk *clk;
+
+ brick = devm_kzalloc(sor->dev, sizeof(*brick), GFP_KERNEL);
+ if (!brick)
+ return ERR_PTR(-ENOMEM);
+
+ brick->sor = sor;
+
+ init.name = name;
+ init.flags = 0;
+ init.parent_names = tegra_clk_sor_brick_parents;
+ init.num_parents = ARRAY_SIZE(tegra_clk_sor_brick_parents);
+ init.ops = &tegra_clk_sor_brick_ops;
+
+ brick->hw.init = &init;
+
+ clk = devm_clk_register(sor->dev, &brick->hw);
+ if (IS_ERR(clk))
+ kfree(brick);
+
+ return clk;
+}
+
static int tegra_sor_dp_train_fast(struct tegra_sor *sor,
struct drm_dp_link *link)
{
@@ -569,10 +699,10 @@ static int tegra_sor_compute_params(struct tegra_sor *sor,
return false;
}
-static int tegra_sor_calc_config(struct tegra_sor *sor,
- const struct drm_display_mode *mode,
- struct tegra_sor_config *config,
- struct drm_dp_link *link)
+static int tegra_sor_compute_config(struct tegra_sor *sor,
+ const struct drm_display_mode *mode,
+ struct tegra_sor_config *config,
+ struct drm_dp_link *link)
{
const u64 f = 100000, link_rate = link->rate * 1000;
const u64 pclk = mode->clock * 1000;
@@ -661,6 +791,135 @@ static int tegra_sor_calc_config(struct tegra_sor *sor,
return 0;
}
+static void tegra_sor_apply_config(struct tegra_sor *sor,
+ const struct tegra_sor_config *config)
+{
+ u32 value;
+
+ value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
+ value &= ~SOR_DP_LINKCTL_TU_SIZE_MASK;
+ value |= SOR_DP_LINKCTL_TU_SIZE(config->tu_size);
+ tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
+
+ value = tegra_sor_readl(sor, SOR_DP_CONFIG0);
+ value &= ~SOR_DP_CONFIG_WATERMARK_MASK;
+ value |= SOR_DP_CONFIG_WATERMARK(config->watermark);
+
+ value &= ~SOR_DP_CONFIG_ACTIVE_SYM_COUNT_MASK;
+ value |= SOR_DP_CONFIG_ACTIVE_SYM_COUNT(config->active_count);
+
+ value &= ~SOR_DP_CONFIG_ACTIVE_SYM_FRAC_MASK;
+ value |= SOR_DP_CONFIG_ACTIVE_SYM_FRAC(config->active_frac);
+
+ if (config->active_polarity)
+ value |= SOR_DP_CONFIG_ACTIVE_SYM_POLARITY;
+ else
+ value &= ~SOR_DP_CONFIG_ACTIVE_SYM_POLARITY;
+
+ value |= SOR_DP_CONFIG_ACTIVE_SYM_ENABLE;
+ value |= SOR_DP_CONFIG_DISPARITY_NEGATIVE;
+ tegra_sor_writel(sor, value, SOR_DP_CONFIG0);
+
+ value = tegra_sor_readl(sor, SOR_DP_AUDIO_HBLANK_SYMBOLS);
+ value &= ~SOR_DP_AUDIO_HBLANK_SYMBOLS_MASK;
+ value |= config->hblank_symbols & 0xffff;
+ tegra_sor_writel(sor, value, SOR_DP_AUDIO_HBLANK_SYMBOLS);
+
+ value = tegra_sor_readl(sor, SOR_DP_AUDIO_VBLANK_SYMBOLS);
+ value &= ~SOR_DP_AUDIO_VBLANK_SYMBOLS_MASK;
+ value |= config->vblank_symbols & 0xffff;
+ tegra_sor_writel(sor, value, SOR_DP_AUDIO_VBLANK_SYMBOLS);
+}
+
+static void tegra_sor_mode_set(struct tegra_sor *sor,
+ const struct drm_display_mode *mode,
+ struct tegra_sor_state *state)
+{
+ struct tegra_dc *dc = to_tegra_dc(sor->output.encoder.crtc);
+ unsigned int vbe, vse, hbe, hse, vbs, hbs;
+ u32 value;
+
+ value = tegra_sor_readl(sor, SOR_STATE1);
+ value &= ~SOR_STATE_ASY_PIXELDEPTH_MASK;
+ value &= ~SOR_STATE_ASY_CRC_MODE_MASK;
+ value &= ~SOR_STATE_ASY_OWNER_MASK;
+
+ value |= SOR_STATE_ASY_CRC_MODE_COMPLETE |
+ SOR_STATE_ASY_OWNER(dc->pipe + 1);
+
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ value &= ~SOR_STATE_ASY_HSYNCPOL;
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ value |= SOR_STATE_ASY_HSYNCPOL;
+
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ value &= ~SOR_STATE_ASY_VSYNCPOL;
+
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ value |= SOR_STATE_ASY_VSYNCPOL;
+
+ switch (state->bpc) {
+ case 16:
+ value |= SOR_STATE_ASY_PIXELDEPTH_BPP_48_444;
+ break;
+
+ case 12:
+ value |= SOR_STATE_ASY_PIXELDEPTH_BPP_36_444;
+ break;
+
+ case 10:
+ value |= SOR_STATE_ASY_PIXELDEPTH_BPP_30_444;
+ break;
+
+ case 8:
+ value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444;
+ break;
+
+ case 6:
+ value |= SOR_STATE_ASY_PIXELDEPTH_BPP_18_444;
+ break;
+
+ default:
+ value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444;
+ break;
+ }
+
+ tegra_sor_writel(sor, value, SOR_STATE1);
+
+ /*
+ * TODO: The video timing programming below doesn't seem to match the
+ * register definitions.
+ */
+
+ value = ((mode->vtotal & 0x7fff) << 16) | (mode->htotal & 0x7fff);
+ tegra_sor_writel(sor, value, SOR_HEAD_STATE1(dc->pipe));
+
+ /* sync end = sync width - 1 */
+ vse = mode->vsync_end - mode->vsync_start - 1;
+ hse = mode->hsync_end - mode->hsync_start - 1;
+
+ value = ((vse & 0x7fff) << 16) | (hse & 0x7fff);
+ tegra_sor_writel(sor, value, SOR_HEAD_STATE2(dc->pipe));
+
+ /* blank end = sync end + back porch */
+ vbe = vse + (mode->vtotal - mode->vsync_end);
+ hbe = hse + (mode->htotal - mode->hsync_end);
+
+ value = ((vbe & 0x7fff) << 16) | (hbe & 0x7fff);
+ tegra_sor_writel(sor, value, SOR_HEAD_STATE3(dc->pipe));
+
+ /* blank start = blank end + active */
+ vbs = vbe + mode->vdisplay;
+ hbs = hbe + mode->hdisplay;
+
+ value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff);
+ tegra_sor_writel(sor, value, SOR_HEAD_STATE4(dc->pipe));
+
+ /* XXX interlacing support */
+ tegra_sor_writel(sor, 0x001, SOR_HEAD_STATE5(dc->pipe));
+}
+
static int tegra_sor_detach(struct tegra_sor *sor)
{
unsigned long value, timeout;
@@ -733,7 +992,8 @@ static int tegra_sor_power_down(struct tegra_sor *sor)
if ((value & SOR_PWR_TRIGGER) != 0)
return -ETIMEDOUT;
- err = clk_set_parent(sor->clk, sor->clk_safe);
+ /* switch to safe parent clock */
+ err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
if (err < 0)
dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
@@ -1038,6 +1298,22 @@ static void tegra_sor_debugfs_exit(struct tegra_sor *sor)
sor->debugfs = NULL;
}
+static void tegra_sor_connector_reset(struct drm_connector *connector)
+{
+ struct tegra_sor_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return;
+
+ if (connector->state) {
+ __drm_atomic_helper_connector_destroy_state(connector->state);
+ kfree(connector->state);
+ }
+
+ __drm_atomic_helper_connector_reset(connector, &state->base);
+}
+
static enum drm_connector_status
tegra_sor_connector_detect(struct drm_connector *connector, bool force)
{
@@ -1050,13 +1326,28 @@ tegra_sor_connector_detect(struct drm_connector *connector, bool force)
return tegra_output_connector_detect(connector, force);
}
+static struct drm_connector_state *
+tegra_sor_connector_duplicate_state(struct drm_connector *connector)
+{
+ struct tegra_sor_state *state = to_sor_state(connector->state);
+ struct tegra_sor_state *copy;
+
+ copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
+ if (!copy)
+ return NULL;
+
+ __drm_atomic_helper_connector_duplicate_state(connector, &copy->base);
+
+ return &copy->base;
+}
+
static const struct drm_connector_funcs tegra_sor_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
- .reset = drm_atomic_helper_connector_reset,
+ .reset = tegra_sor_connector_reset,
.detect = tegra_sor_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = tegra_output_connector_destroy,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_duplicate_state = tegra_sor_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
@@ -1081,13 +1372,16 @@ static enum drm_mode_status
tegra_sor_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ /* HDMI 2.0 modes are not yet supported */
+ if (mode->clock > 340000)
+ return MODE_NOCLOCK;
+
return MODE_OK;
}
static const struct drm_connector_helper_funcs tegra_sor_connector_helper_funcs = {
.get_modes = tegra_sor_connector_get_modes,
.mode_valid = tegra_sor_connector_mode_valid,
- .best_encoder = tegra_output_connector_best_encoder,
};
static const struct drm_encoder_funcs tegra_sor_encoder_funcs = {
@@ -1141,8 +1435,7 @@ static void tegra_sor_edp_disable(struct drm_encoder *encoder)
if (output->panel)
drm_panel_unprepare(output->panel);
- reset_control_assert(sor->rst);
- clk_disable_unprepare(sor->clk);
+ pm_runtime_put(sor->dev);
}
#if 0
@@ -1192,19 +1485,18 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder)
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct tegra_output *output = encoder_to_output(encoder);
struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
- unsigned int vbe, vse, hbe, hse, vbs, hbs, i;
struct tegra_sor *sor = to_sor(output);
struct tegra_sor_config config;
+ struct tegra_sor_state *state;
struct drm_dp_link link;
u8 rate, lanes;
+ unsigned int i;
int err = 0;
u32 value;
- err = clk_prepare_enable(sor->clk);
- if (err < 0)
- dev_err(sor->dev, "failed to enable clock: %d\n", err);
+ state = to_sor_state(output->connector.state);
- reset_control_deassert(sor->rst);
+ pm_runtime_get_sync(sor->dev);
if (output->panel)
drm_panel_prepare(output->panel);
@@ -1219,17 +1511,17 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder)
return;
}
- err = clk_set_parent(sor->clk, sor->clk_safe);
+ /* switch to safe parent clock */
+ err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
if (err < 0)
dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
memset(&config, 0, sizeof(config));
- config.bits_per_pixel = output->connector.display_info.bpc * 3;
+ config.bits_per_pixel = state->bpc * 3;
- err = tegra_sor_calc_config(sor, mode, &config, &link);
+ err = tegra_sor_compute_config(sor, mode, &config, &link);
if (err < 0)
- dev_err(sor->dev, "failed to compute link configuration: %d\n",
- err);
+ dev_err(sor->dev, "failed to compute configuration: %d\n", err);
value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK;
@@ -1326,10 +1618,18 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder)
value &= ~SOR_PLL2_PORT_POWERDOWN;
tegra_sor_writel(sor, value, SOR_PLL2);
- /* switch to DP clock */
- err = clk_set_parent(sor->clk, sor->clk_dp);
+ /* XXX not in TRM */
+ for (value = 0, i = 0; i < 5; i++)
+ value |= SOR_XBAR_CTRL_LINK0_XSEL(i, sor->soc->xbar_cfg[i]) |
+ SOR_XBAR_CTRL_LINK1_XSEL(i, i);
+
+ tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL);
+ tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
+
+ /* switch to DP parent clock */
+ err = tegra_sor_set_parent_clock(sor, sor->clk_dp);
if (err < 0)
- dev_err(sor->dev, "failed to set DP parent clock: %d\n", err);
+ dev_err(sor->dev, "failed to set parent clock: %d\n", err);
/* power DP lanes */
value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
@@ -1375,13 +1675,11 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder)
value |= drm_dp_link_rate_to_bw_code(link.rate) << 2;
tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
- /* set linkctl */
+ tegra_sor_apply_config(sor, &config);
+
+ /* enable link */
value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
value |= SOR_DP_LINKCTL_ENABLE;
-
- value &= ~SOR_DP_LINKCTL_TU_SIZE_MASK;
- value |= SOR_DP_LINKCTL_TU_SIZE(config.tu_size);
-
value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
@@ -1394,35 +1692,6 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder)
tegra_sor_writel(sor, value, SOR_DP_TPG);
- value = tegra_sor_readl(sor, SOR_DP_CONFIG0);
- value &= ~SOR_DP_CONFIG_WATERMARK_MASK;
- value |= SOR_DP_CONFIG_WATERMARK(config.watermark);
-
- value &= ~SOR_DP_CONFIG_ACTIVE_SYM_COUNT_MASK;
- value |= SOR_DP_CONFIG_ACTIVE_SYM_COUNT(config.active_count);
-
- value &= ~SOR_DP_CONFIG_ACTIVE_SYM_FRAC_MASK;
- value |= SOR_DP_CONFIG_ACTIVE_SYM_FRAC(config.active_frac);
-
- if (config.active_polarity)
- value |= SOR_DP_CONFIG_ACTIVE_SYM_POLARITY;
- else
- value &= ~SOR_DP_CONFIG_ACTIVE_SYM_POLARITY;
-
- value |= SOR_DP_CONFIG_ACTIVE_SYM_ENABLE;
- value |= SOR_DP_CONFIG_DISPARITY_NEGATIVE;
- tegra_sor_writel(sor, value, SOR_DP_CONFIG0);
-
- value = tegra_sor_readl(sor, SOR_DP_AUDIO_HBLANK_SYMBOLS);
- value &= ~SOR_DP_AUDIO_HBLANK_SYMBOLS_MASK;
- value |= config.hblank_symbols & 0xffff;
- tegra_sor_writel(sor, value, SOR_DP_AUDIO_HBLANK_SYMBOLS);
-
- value = tegra_sor_readl(sor, SOR_DP_AUDIO_VBLANK_SYMBOLS);
- value &= ~SOR_DP_AUDIO_VBLANK_SYMBOLS_MASK;
- value |= config.vblank_symbols & 0xffff;
- tegra_sor_writel(sor, value, SOR_DP_AUDIO_VBLANK_SYMBOLS);
-
/* enable pad calibration logic */
value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
value |= SOR_DP_PADCTL_PAD_CAL_PD;
@@ -1478,75 +1747,19 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder)
if (err < 0)
dev_err(sor->dev, "failed to power up SOR: %d\n", err);
- /*
- * configure panel (24bpp, vsync-, hsync-, DP-A protocol, complete
- * raster, associate with display controller)
- */
- value = SOR_STATE_ASY_PROTOCOL_DP_A |
- SOR_STATE_ASY_CRC_MODE_COMPLETE |
- SOR_STATE_ASY_OWNER(dc->pipe + 1);
-
- if (mode->flags & DRM_MODE_FLAG_PHSYNC)
- value &= ~SOR_STATE_ASY_HSYNCPOL;
-
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- value |= SOR_STATE_ASY_HSYNCPOL;
-
- if (mode->flags & DRM_MODE_FLAG_PVSYNC)
- value &= ~SOR_STATE_ASY_VSYNCPOL;
-
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
- value |= SOR_STATE_ASY_VSYNCPOL;
-
- switch (config.bits_per_pixel) {
- case 24:
- value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444;
- break;
-
- case 18:
- value |= SOR_STATE_ASY_PIXELDEPTH_BPP_18_444;
- break;
-
- default:
- BUG();
- break;
- }
-
- tegra_sor_writel(sor, value, SOR_STATE1);
-
- /*
- * TODO: The video timing programming below doesn't seem to match the
- * register definitions.
- */
-
- value = ((mode->vtotal & 0x7fff) << 16) | (mode->htotal & 0x7fff);
- tegra_sor_writel(sor, value, SOR_HEAD_STATE1(dc->pipe));
-
- vse = mode->vsync_end - mode->vsync_start - 1;
- hse = mode->hsync_end - mode->hsync_start - 1;
-
- value = ((vse & 0x7fff) << 16) | (hse & 0x7fff);
- tegra_sor_writel(sor, value, SOR_HEAD_STATE2(dc->pipe));
-
- vbe = vse + (mode->vsync_start - mode->vdisplay);
- hbe = hse + (mode->hsync_start - mode->hdisplay);
-
- value = ((vbe & 0x7fff) << 16) | (hbe & 0x7fff);
- tegra_sor_writel(sor, value, SOR_HEAD_STATE3(dc->pipe));
-
- vbs = vbe + mode->vdisplay;
- hbs = hbe + mode->hdisplay;
-
- value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff);
- tegra_sor_writel(sor, value, SOR_HEAD_STATE4(dc->pipe));
-
- tegra_sor_writel(sor, 0x1, SOR_HEAD_STATE5(dc->pipe));
-
/* CSTM (LVDS, link A/B, upper) */
value = SOR_CSTM_LVDS | SOR_CSTM_LINK_ACT_A | SOR_CSTM_LINK_ACT_B |
SOR_CSTM_UPPER;
tegra_sor_writel(sor, value, SOR_CSTM);
+ /* use DP-A protocol */
+ value = tegra_sor_readl(sor, SOR_STATE1);
+ value &= ~SOR_STATE_ASY_PROTOCOL_MASK;
+ value |= SOR_STATE_ASY_PROTOCOL_DP_A;
+ tegra_sor_writel(sor, value, SOR_STATE1);
+
+ tegra_sor_mode_set(sor, mode, state);
+
/* PWM setup */
err = tegra_sor_setup_pwm(sor, 250);
if (err < 0)
@@ -1578,11 +1791,15 @@ tegra_sor_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct tegra_output *output = encoder_to_output(encoder);
+ struct tegra_sor_state *state = to_sor_state(conn_state);
struct tegra_dc *dc = to_tegra_dc(conn_state->crtc);
unsigned long pclk = crtc_state->mode.clock * 1000;
struct tegra_sor *sor = to_sor(output);
+ struct drm_display_info *info;
int err;
+ info = &output->connector.display_info;
+
err = tegra_dc_state_setup_clock(dc, crtc_state, sor->clk_parent,
pclk, 0);
if (err < 0) {
@@ -1590,6 +1807,18 @@ tegra_sor_encoder_atomic_check(struct drm_encoder *encoder,
return err;
}
+ switch (info->bpc) {
+ case 8:
+ case 6:
+ state->bpc = info->bpc;
+ break;
+
+ default:
+ DRM_DEBUG_KMS("%u bits-per-color not supported\n", info->bpc);
+ state->bpc = 8;
+ break;
+ }
+
return 0;
}
@@ -1752,9 +1981,7 @@ static void tegra_sor_hdmi_disable(struct drm_encoder *encoder)
if (err < 0)
dev_err(sor->dev, "failed to power off HDMI rail: %d\n", err);
- reset_control_assert(sor->rst);
- usleep_range(1000, 2000);
- clk_disable_unprepare(sor->clk);
+ pm_runtime_put(sor->dev);
}
static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
@@ -1762,26 +1989,21 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
struct tegra_output *output = encoder_to_output(encoder);
unsigned int h_ref_to_sync = 1, pulse_start, max_ac;
struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
- unsigned int vbe, vse, hbe, hse, vbs, hbs, div;
struct tegra_sor_hdmi_settings *settings;
struct tegra_sor *sor = to_sor(output);
+ struct tegra_sor_state *state;
struct drm_display_mode *mode;
- struct drm_display_info *info;
+ unsigned int div, i;
u32 value;
int err;
+ state = to_sor_state(output->connector.state);
mode = &encoder->crtc->state->adjusted_mode;
- info = &output->connector.display_info;
- err = clk_prepare_enable(sor->clk);
- if (err < 0)
- dev_err(sor->dev, "failed to enable clock: %d\n", err);
+ pm_runtime_get_sync(sor->dev);
- usleep_range(1000, 2000);
-
- reset_control_deassert(sor->rst);
-
- err = clk_set_parent(sor->clk, sor->clk_safe);
+ /* switch to safe parent clock */
+ err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
if (err < 0)
dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
@@ -1877,22 +2099,20 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
value = SOR_REFCLK_DIV_INT(div) | SOR_REFCLK_DIV_FRAC(div);
tegra_sor_writel(sor, value, SOR_REFCLK);
- /* XXX don't hardcode */
- value = SOR_XBAR_CTRL_LINK1_XSEL(4, 4) |
- SOR_XBAR_CTRL_LINK1_XSEL(3, 3) |
- SOR_XBAR_CTRL_LINK1_XSEL(2, 2) |
- SOR_XBAR_CTRL_LINK1_XSEL(1, 1) |
- SOR_XBAR_CTRL_LINK1_XSEL(0, 0) |
- SOR_XBAR_CTRL_LINK0_XSEL(4, 4) |
- SOR_XBAR_CTRL_LINK0_XSEL(3, 3) |
- SOR_XBAR_CTRL_LINK0_XSEL(2, 0) |
- SOR_XBAR_CTRL_LINK0_XSEL(1, 1) |
- SOR_XBAR_CTRL_LINK0_XSEL(0, 2);
- tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
+ /* XXX not in TRM */
+ for (value = 0, i = 0; i < 5; i++)
+ value |= SOR_XBAR_CTRL_LINK0_XSEL(i, sor->soc->xbar_cfg[i]) |
+ SOR_XBAR_CTRL_LINK1_XSEL(i, i);
tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL);
+ tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
- err = clk_set_parent(sor->clk, sor->clk_parent);
+ /* switch to parent clock */
+ err = clk_set_parent(sor->clk_src, sor->clk_parent);
+ if (err < 0)
+ dev_err(sor->dev, "failed to set source clock: %d\n", err);
+
+ err = tegra_sor_set_parent_clock(sor, sor->clk_src);
if (err < 0)
dev_err(sor->dev, "failed to set parent clock: %d\n", err);
@@ -2002,7 +2222,7 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
value &= ~DITHER_CONTROL_MASK;
value &= ~BASE_COLOR_SIZE_MASK;
- switch (info->bpc) {
+ switch (state->bpc) {
case 6:
value |= BASE_COLOR_SIZE_666;
break;
@@ -2012,7 +2232,8 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
break;
default:
- WARN(1, "%u bits-per-color not supported\n", info->bpc);
+ WARN(1, "%u bits-per-color not supported\n", state->bpc);
+ value |= BASE_COLOR_SIZE_888;
break;
}
@@ -2022,83 +2243,19 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
if (err < 0)
dev_err(sor->dev, "failed to power up SOR: %d\n", err);
- /* configure mode */
- value = tegra_sor_readl(sor, SOR_STATE1);
- value &= ~SOR_STATE_ASY_PIXELDEPTH_MASK;
- value &= ~SOR_STATE_ASY_CRC_MODE_MASK;
- value &= ~SOR_STATE_ASY_OWNER_MASK;
-
- value |= SOR_STATE_ASY_CRC_MODE_COMPLETE |
- SOR_STATE_ASY_OWNER(dc->pipe + 1);
-
- if (mode->flags & DRM_MODE_FLAG_PHSYNC)
- value &= ~SOR_STATE_ASY_HSYNCPOL;
-
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- value |= SOR_STATE_ASY_HSYNCPOL;
-
- if (mode->flags & DRM_MODE_FLAG_PVSYNC)
- value &= ~SOR_STATE_ASY_VSYNCPOL;
-
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
- value |= SOR_STATE_ASY_VSYNCPOL;
-
- switch (info->bpc) {
- case 8:
- value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444;
- break;
-
- case 6:
- value |= SOR_STATE_ASY_PIXELDEPTH_BPP_18_444;
- break;
-
- default:
- BUG();
- break;
- }
-
- tegra_sor_writel(sor, value, SOR_STATE1);
-
+ /* configure dynamic range of output */
value = tegra_sor_readl(sor, SOR_HEAD_STATE0(dc->pipe));
value &= ~SOR_HEAD_STATE_RANGECOMPRESS_MASK;
value &= ~SOR_HEAD_STATE_DYNRANGE_MASK;
tegra_sor_writel(sor, value, SOR_HEAD_STATE0(dc->pipe));
+ /* configure colorspace */
value = tegra_sor_readl(sor, SOR_HEAD_STATE0(dc->pipe));
value &= ~SOR_HEAD_STATE_COLORSPACE_MASK;
value |= SOR_HEAD_STATE_COLORSPACE_RGB;
tegra_sor_writel(sor, value, SOR_HEAD_STATE0(dc->pipe));
- /*
- * TODO: The video timing programming below doesn't seem to match the
- * register definitions.
- */
-
- value = ((mode->vtotal & 0x7fff) << 16) | (mode->htotal & 0x7fff);
- tegra_sor_writel(sor, value, SOR_HEAD_STATE1(dc->pipe));
-
- /* sync end = sync width - 1 */
- vse = mode->vsync_end - mode->vsync_start - 1;
- hse = mode->hsync_end - mode->hsync_start - 1;
-
- value = ((vse & 0x7fff) << 16) | (hse & 0x7fff);
- tegra_sor_writel(sor, value, SOR_HEAD_STATE2(dc->pipe));
-
- /* blank end = sync end + back porch */
- vbe = vse + (mode->vtotal - mode->vsync_end);
- hbe = hse + (mode->htotal - mode->hsync_end);
-
- value = ((vbe & 0x7fff) << 16) | (hbe & 0x7fff);
- tegra_sor_writel(sor, value, SOR_HEAD_STATE3(dc->pipe));
-
- /* blank start = blank end + active */
- vbs = vbe + mode->vdisplay;
- hbs = hbe + mode->hdisplay;
-
- value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff);
- tegra_sor_writel(sor, value, SOR_HEAD_STATE4(dc->pipe));
-
- tegra_sor_writel(sor, 0x1, SOR_HEAD_STATE5(dc->pipe));
+ tegra_sor_mode_set(sor, mode, state);
tegra_sor_update(sor);
@@ -2196,10 +2353,13 @@ static int tegra_sor_init(struct host1x_client *client)
* XXX: Remove this reset once proper hand-over from firmware to
* kernel is possible.
*/
- err = reset_control_assert(sor->rst);
- if (err < 0) {
- dev_err(sor->dev, "failed to assert SOR reset: %d\n", err);
- return err;
+ if (sor->rst) {
+ err = reset_control_assert(sor->rst);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to assert SOR reset: %d\n",
+ err);
+ return err;
+ }
}
err = clk_prepare_enable(sor->clk);
@@ -2210,10 +2370,13 @@ static int tegra_sor_init(struct host1x_client *client)
usleep_range(1000, 3000);
- err = reset_control_deassert(sor->rst);
- if (err < 0) {
- dev_err(sor->dev, "failed to deassert SOR reset: %d\n", err);
- return err;
+ if (sor->rst) {
+ err = reset_control_deassert(sor->rst);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to deassert SOR reset: %d\n",
+ err);
+ return err;
+ }
}
err = clk_prepare_enable(sor->clk_safe);
@@ -2324,11 +2487,16 @@ static const struct tegra_sor_ops tegra_sor_hdmi_ops = {
.remove = tegra_sor_hdmi_remove,
};
+static const u8 tegra124_sor_xbar_cfg[5] = {
+ 0, 1, 2, 3, 4
+};
+
static const struct tegra_sor_soc tegra124_sor = {
.supports_edp = true,
.supports_lvds = true,
.supports_hdmi = false,
.supports_dp = false,
+ .xbar_cfg = tegra124_sor_xbar_cfg,
};
static const struct tegra_sor_soc tegra210_sor = {
@@ -2336,6 +2504,11 @@ static const struct tegra_sor_soc tegra210_sor = {
.supports_lvds = false,
.supports_hdmi = false,
.supports_dp = false,
+ .xbar_cfg = tegra124_sor_xbar_cfg,
+};
+
+static const u8 tegra210_sor_xbar_cfg[5] = {
+ 2, 1, 0, 3, 4
};
static const struct tegra_sor_soc tegra210_sor1 = {
@@ -2346,6 +2519,8 @@ static const struct tegra_sor_soc tegra210_sor1 = {
.num_settings = ARRAY_SIZE(tegra210_sor_hdmi_defaults),
.settings = tegra210_sor_hdmi_defaults,
+
+ .xbar_cfg = tegra210_sor_xbar_cfg,
};
static const struct of_device_id tegra_sor_of_match[] = {
@@ -2435,11 +2610,14 @@ static int tegra_sor_probe(struct platform_device *pdev)
goto remove;
}
- sor->rst = devm_reset_control_get(&pdev->dev, "sor");
- if (IS_ERR(sor->rst)) {
- err = PTR_ERR(sor->rst);
- dev_err(&pdev->dev, "failed to get reset control: %d\n", err);
- goto remove;
+ if (!pdev->dev.pm_domain) {
+ sor->rst = devm_reset_control_get(&pdev->dev, "sor");
+ if (IS_ERR(sor->rst)) {
+ err = PTR_ERR(sor->rst);
+ dev_err(&pdev->dev, "failed to get reset control: %d\n",
+ err);
+ goto remove;
+ }
}
sor->clk = devm_clk_get(&pdev->dev, NULL);
@@ -2449,6 +2627,16 @@ static int tegra_sor_probe(struct platform_device *pdev)
goto remove;
}
+ if (sor->soc->supports_hdmi || sor->soc->supports_dp) {
+ sor->clk_src = devm_clk_get(&pdev->dev, "source");
+ if (IS_ERR(sor->clk_src)) {
+ err = PTR_ERR(sor->clk_src);
+ dev_err(sor->dev, "failed to get source clock: %d\n",
+ err);
+ goto remove;
+ }
+ }
+
sor->clk_parent = devm_clk_get(&pdev->dev, "parent");
if (IS_ERR(sor->clk_parent)) {
err = PTR_ERR(sor->clk_parent);
@@ -2470,6 +2658,19 @@ static int tegra_sor_probe(struct platform_device *pdev)
goto remove;
}
+ platform_set_drvdata(pdev, sor);
+ pm_runtime_enable(&pdev->dev);
+
+ pm_runtime_get_sync(&pdev->dev);
+ sor->clk_brick = tegra_clk_sor_brick_register(sor, "sor1_brick");
+ pm_runtime_put(&pdev->dev);
+
+ if (IS_ERR(sor->clk_brick)) {
+ err = PTR_ERR(sor->clk_brick);
+ dev_err(&pdev->dev, "failed to register SOR clock: %d\n", err);
+ goto remove;
+ }
+
INIT_LIST_HEAD(&sor->client.list);
sor->client.ops = &sor_client_ops;
sor->client.dev = &pdev->dev;
@@ -2481,8 +2682,6 @@ static int tegra_sor_probe(struct platform_device *pdev)
goto remove;
}
- platform_set_drvdata(pdev, sor);
-
return 0;
remove:
@@ -2498,6 +2697,8 @@ static int tegra_sor_remove(struct platform_device *pdev)
struct tegra_sor *sor = platform_get_drvdata(pdev);
int err;
+ pm_runtime_disable(&pdev->dev);
+
err = host1x_client_unregister(&sor->client);
if (err < 0) {
dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
@@ -2516,10 +2717,62 @@ static int tegra_sor_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int tegra_sor_suspend(struct device *dev)
+{
+ struct tegra_sor *sor = dev_get_drvdata(dev);
+ int err;
+
+ if (sor->rst) {
+ err = reset_control_assert(sor->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to assert reset: %d\n", err);
+ return err;
+ }
+ }
+
+ usleep_range(1000, 2000);
+
+ clk_disable_unprepare(sor->clk);
+
+ return 0;
+}
+
+static int tegra_sor_resume(struct device *dev)
+{
+ struct tegra_sor *sor = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_prepare_enable(sor->clk);
+ if (err < 0) {
+ dev_err(dev, "failed to enable clock: %d\n", err);
+ return err;
+ }
+
+ usleep_range(1000, 2000);
+
+ if (sor->rst) {
+ err = reset_control_deassert(sor->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to deassert reset: %d\n", err);
+ clk_disable_unprepare(sor->clk);
+ return err;
+ }
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops tegra_sor_pm_ops = {
+ SET_RUNTIME_PM_OPS(tegra_sor_suspend, tegra_sor_resume, NULL)
+};
+
struct platform_driver tegra_sor_driver = {
.driver = {
.name = "tegra-sor",
.of_match_table = tegra_sor_of_match,
+ .pm = &tegra_sor_pm_ops,
},
.probe = tegra_sor_probe,
.remove = tegra_sor_remove,
diff --git a/drivers/gpu/drm/tegra/sor.h b/drivers/gpu/drm/tegra/sor.h
index 2d31d027e..865c73b48 100644
--- a/drivers/gpu/drm/tegra/sor.h
+++ b/drivers/gpu/drm/tegra/sor.h
@@ -27,6 +27,9 @@
#define SOR_STATE_ASY_PIXELDEPTH_MASK (0xf << 17)
#define SOR_STATE_ASY_PIXELDEPTH_BPP_18_444 (0x2 << 17)
#define SOR_STATE_ASY_PIXELDEPTH_BPP_24_444 (0x5 << 17)
+#define SOR_STATE_ASY_PIXELDEPTH_BPP_30_444 (0x6 << 17)
+#define SOR_STATE_ASY_PIXELDEPTH_BPP_36_444 (0x8 << 17)
+#define SOR_STATE_ASY_PIXELDEPTH_BPP_48_444 (0x9 << 17)
#define SOR_STATE_ASY_VSYNCPOL (1 << 13)
#define SOR_STATE_ASY_HSYNCPOL (1 << 12)
#define SOR_STATE_ASY_PROTOCOL_MASK (0xf << 8)
diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig
index f60a1ec84..28fed7e20 100644
--- a/drivers/gpu/drm/tilcdc/Kconfig
+++ b/drivers/gpu/drm/tilcdc/Kconfig
@@ -2,7 +2,6 @@ config DRM_TILCDC
tristate "DRM Support for TI LCDC Display Controller"
depends on DRM && OF && ARM
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
select VIDEOMODE_HELPERS
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 79027b1c6..107c8bd04 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -697,7 +697,7 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
- drm_handle_vblank(dev, 0);
+ drm_crtc_handle_vblank(crtc);
if (!skip_event) {
struct drm_pending_vblank_event *event;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 709bc9035..d27809372 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -541,7 +541,6 @@ static struct drm_driver tilcdc_driver = {
.load = tilcdc_load,
.unload = tilcdc_unload,
.lastclose = tilcdc_lastclose,
- .set_busid = drm_platform_set_busid,
.irq_handler = tilcdc_irq,
.irq_preinstall = tilcdc_irq_preinstall,
.irq_postinstall = tilcdc_irq_postinstall,
@@ -549,7 +548,7 @@ static struct drm_driver tilcdc_driver = {
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = tilcdc_enable_vblank,
.disable_vblank = tilcdc_disable_vblank,
- .gem_free_object = drm_gem_cma_free_object,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index a71cf98c6..42c074a9c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -146,10 +146,9 @@ static void ttm_bo_release_list(struct kref *list_kref)
BUG_ON(bo->mem.mm_node != NULL);
BUG_ON(!list_empty(&bo->lru));
BUG_ON(!list_empty(&bo->ddestroy));
-
- if (bo->ttm)
- ttm_tt_destroy(bo->ttm);
+ ttm_tt_destroy(bo->ttm);
atomic_dec(&bo->glob->bo_count);
+ fence_put(bo->moving);
if (bo->resv == &bo->ttm_resv)
reservation_object_fini(&bo->ttm_resv);
mutex_destroy(&bo->wu_mutex);
@@ -355,12 +354,14 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
- ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem);
+ ret = ttm_bo_move_ttm(bo, evict, interruptible, no_wait_gpu,
+ mem);
else if (bdev->driver->move)
ret = bdev->driver->move(bo, evict, interruptible,
no_wait_gpu, mem);
else
- ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem);
+ ret = ttm_bo_move_memcpy(bo, evict, interruptible,
+ no_wait_gpu, mem);
if (ret) {
if (bdev->driver->move_notify) {
@@ -396,8 +397,7 @@ moved:
out_err:
new_man = &bdev->man[bo->mem.mem_type];
- if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
- ttm_tt_unbind(bo->ttm);
+ if (new_man->flags & TTM_MEMTYPE_FLAG_FIXED) {
ttm_tt_destroy(bo->ttm);
bo->ttm = NULL;
}
@@ -418,11 +418,8 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
if (bo->bdev->driver->move_notify)
bo->bdev->driver->move_notify(bo, NULL);
- if (bo->ttm) {
- ttm_tt_unbind(bo->ttm);
- ttm_tt_destroy(bo->ttm);
- bo->ttm = NULL;
- }
+ ttm_tt_destroy(bo->ttm);
+ bo->ttm = NULL;
ttm_bo_mem_put(bo, &bo->mem);
ww_mutex_unlock (&bo->resv->lock);
@@ -688,15 +685,6 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
struct ttm_placement placement;
int ret = 0;
- ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
-
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS) {
- pr_err("Failed to expire sync object before buffer eviction\n");
- }
- goto out;
- }
-
lockdep_assert_held(&bo->resv->lock.base);
evict_mem = bo->mem;
@@ -720,7 +708,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
no_wait_gpu);
- if (ret) {
+ if (unlikely(ret)) {
if (ret != -ERESTARTSYS)
pr_err("Buffer eviction failed\n");
ttm_bo_mem_put(bo, &evict_mem);
@@ -800,6 +788,34 @@ void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
EXPORT_SYMBOL(ttm_bo_mem_put);
/**
+ * Add the last move fence to the BO and reserve a new shared slot.
+ */
+static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
+ struct ttm_mem_type_manager *man,
+ struct ttm_mem_reg *mem)
+{
+ struct fence *fence;
+ int ret;
+
+ spin_lock(&man->move_lock);
+ fence = fence_get(man->move);
+ spin_unlock(&man->move_lock);
+
+ if (fence) {
+ reservation_object_add_shared_fence(bo->resv, fence);
+
+ ret = reservation_object_reserve_shared(bo->resv);
+ if (unlikely(ret))
+ return ret;
+
+ fence_put(bo->moving);
+ bo->moving = fence;
+ }
+
+ return 0;
+}
+
+/**
* Repeatedly evict memory from the LRU for @mem_type until we create enough
* space, or we've evicted everything and there isn't enough space.
*/
@@ -825,10 +841,8 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
if (unlikely(ret != 0))
return ret;
} while (1);
- if (mem->mm_node == NULL)
- return -ENOMEM;
mem->mem_type = mem_type;
- return 0;
+ return ttm_bo_add_move_fence(bo, man, mem);
}
static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
@@ -898,6 +912,10 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
bool has_erestartsys = false;
int i, ret;
+ ret = reservation_object_reserve_shared(bo->resv);
+ if (unlikely(ret))
+ return ret;
+
mem->mm_node = NULL;
for (i = 0; i < placement->num_placement; ++i) {
const struct ttm_place *place = &placement->placement[i];
@@ -931,9 +949,15 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
ret = (*man->func->get_node)(man, bo, place, mem);
if (unlikely(ret))
return ret;
-
- if (mem->mm_node)
+
+ if (mem->mm_node) {
+ ret = ttm_bo_add_move_fence(bo, man, mem);
+ if (unlikely(ret)) {
+ (*man->func->put_node)(man, mem);
+ return ret;
+ }
break;
+ }
}
if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
@@ -1000,20 +1024,6 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
lockdep_assert_held(&bo->resv->lock.base);
- /*
- * Don't wait for the BO on initial allocation. This is important when
- * the BO has an imported reservation object.
- */
- if (bo->mem.mem_type != TTM_PL_SYSTEM || bo->ttm != NULL) {
- /*
- * FIXME: It's possible to pipeline buffer moves.
- * Have the driver move function wait for idle when necessary,
- * instead of doing it here.
- */
- ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
- if (ret)
- return ret;
- }
mem.num_pages = bo->num_pages;
mem.size = mem.num_pages << PAGE_SHIFT;
mem.page_alignment = bo->mem.page_alignment;
@@ -1166,7 +1176,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
bo->mem.page_alignment = page_alignment;
bo->mem.bus.io_reserved_vm = false;
bo->mem.bus.io_reserved_count = 0;
- bo->priv_flags = 0;
+ bo->moving = NULL;
bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
bo->persistent_swap_storage = persistent_swap_storage;
bo->acc_size = acc_size;
@@ -1278,6 +1288,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
{
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
struct ttm_bo_global *glob = bdev->glob;
+ struct fence *fence;
int ret;
/*
@@ -1298,6 +1309,23 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
spin_lock(&glob->lru_lock);
}
spin_unlock(&glob->lru_lock);
+
+ spin_lock(&man->move_lock);
+ fence = fence_get(man->move);
+ spin_unlock(&man->move_lock);
+
+ if (fence) {
+ ret = fence_wait(fence, false);
+ fence_put(fence);
+ if (ret) {
+ if (allow_errors) {
+ return ret;
+ } else {
+ pr_err("Cleanup eviction failed\n");
+ }
+ }
+ }
+
return 0;
}
@@ -1317,6 +1345,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
mem_type);
return ret;
}
+ fence_put(man->move);
man->use_type = false;
man->has_type = false;
@@ -1362,6 +1391,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
man->io_reserve_fastpath = true;
man->use_io_reserve_lru = false;
mutex_init(&man->io_reserve_mutex);
+ spin_lock_init(&man->move_lock);
INIT_LIST_HEAD(&man->io_reserve_lru);
ret = bdev->driver->init_mem_type(bdev, type, man);
@@ -1380,6 +1410,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
man->size = p_size;
INIT_LIST_HEAD(&man->lru);
+ man->move = NULL;
return 0;
}
@@ -1573,47 +1604,17 @@ EXPORT_SYMBOL(ttm_bo_unmap_virtual);
int ttm_bo_wait(struct ttm_buffer_object *bo,
bool interruptible, bool no_wait)
{
- struct reservation_object_list *fobj;
- struct reservation_object *resv;
- struct fence *excl;
- long timeout = 15 * HZ;
- int i;
-
- resv = bo->resv;
- fobj = reservation_object_get_list(resv);
- excl = reservation_object_get_excl(resv);
- if (excl) {
- if (!fence_is_signaled(excl)) {
- if (no_wait)
- return -EBUSY;
-
- timeout = fence_wait_timeout(excl,
- interruptible, timeout);
- }
- }
-
- for (i = 0; fobj && timeout > 0 && i < fobj->shared_count; ++i) {
- struct fence *fence;
- fence = rcu_dereference_protected(fobj->shared[i],
- reservation_object_held(resv));
-
- if (!fence_is_signaled(fence)) {
- if (no_wait)
- return -EBUSY;
-
- timeout = fence_wait_timeout(fence,
- interruptible, timeout);
- }
- }
+ long timeout = no_wait ? 0 : 15 * HZ;
+ timeout = reservation_object_wait_timeout_rcu(bo->resv, true,
+ interruptible, timeout);
if (timeout < 0)
return timeout;
if (timeout == 0)
return -EBUSY;
- reservation_object_add_excl_fence(resv, NULL);
- clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
+ reservation_object_add_excl_fence(bo->resv, NULL);
return 0;
}
EXPORT_SYMBOL(ttm_bo_wait);
@@ -1683,14 +1684,9 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
ttm_bo_list_ref_sub(bo, put_count, true);
/**
- * Wait for GPU, then move to system cached.
+ * Move to system cached
*/
- ret = ttm_bo_wait(bo, false, false);
-
- if (unlikely(ret != 0))
- goto out;
-
if ((bo->mem.placement & swap_placement) != swap_placement) {
struct ttm_mem_reg evict_mem;
@@ -1705,6 +1701,14 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
goto out;
}
+ /**
+ * Make sure BO is idle.
+ */
+
+ ret = ttm_bo_wait(bo, false, false);
+ if (unlikely(ret != 0))
+ goto out;
+
ttm_bo_unmap_virtual(bo);
/**
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index d98315597..f157a9efd 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -45,7 +45,7 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
}
int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
- bool evict,
+ bool evict, bool interruptible,
bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
struct ttm_tt *ttm = bo->ttm;
@@ -53,6 +53,14 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
int ret;
if (old_mem->mem_type != TTM_PL_SYSTEM) {
+ ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
+
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ pr_err("Failed to expire sync object before unbinding TTM\n");
+ return ret;
+ }
+
ttm_tt_unbind(ttm);
ttm_bo_free_old_node(bo);
ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
@@ -321,7 +329,8 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
}
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
- bool evict, bool no_wait_gpu,
+ bool evict, bool interruptible,
+ bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
@@ -337,6 +346,10 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
unsigned long add = 0;
int dir;
+ ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
+ if (ret)
+ return ret;
+
ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
if (ret)
return ret;
@@ -401,8 +414,7 @@ out2:
*old_mem = *new_mem;
new_mem->mm_node = NULL;
- if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
- ttm_tt_unbind(ttm);
+ if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
ttm_tt_destroy(ttm);
bo->ttm = NULL;
}
@@ -462,6 +474,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
INIT_LIST_HEAD(&fbo->lru);
INIT_LIST_HEAD(&fbo->swap);
INIT_LIST_HEAD(&fbo->io_reserve_lru);
+ fbo->moving = NULL;
drm_vma_node_reset(&fbo->vma_node);
atomic_set(&fbo->cpu_writers, 0);
@@ -634,7 +647,6 @@ EXPORT_SYMBOL(ttm_bo_kunmap);
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
struct fence *fence,
bool evict,
- bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
@@ -649,9 +661,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
if (ret)
return ret;
- if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
- (bo->ttm != NULL)) {
- ttm_tt_unbind(bo->ttm);
+ if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
ttm_tt_destroy(bo->ttm);
bo->ttm = NULL;
}
@@ -665,7 +675,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
* operation has completed.
*/
- set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
+ fence_put(bo->moving);
+ bo->moving = fence_get(fence);
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
if (ret)
@@ -694,3 +705,95 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
return 0;
}
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
+
+int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
+ struct fence *fence, bool evict,
+ struct ttm_mem_reg *new_mem)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_mem_reg *old_mem = &bo->mem;
+
+ struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type];
+ struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type];
+
+ int ret;
+
+ reservation_object_add_excl_fence(bo->resv, fence);
+
+ if (!evict) {
+ struct ttm_buffer_object *ghost_obj;
+
+ /**
+ * This should help pipeline ordinary buffer moves.
+ *
+ * Hang old buffer memory on a new buffer object,
+ * and leave it to be released when the GPU
+ * operation has completed.
+ */
+
+ fence_put(bo->moving);
+ bo->moving = fence_get(fence);
+
+ ret = ttm_buffer_object_transfer(bo, &ghost_obj);
+ if (ret)
+ return ret;
+
+ reservation_object_add_excl_fence(ghost_obj->resv, fence);
+
+ /**
+ * If we're not moving to fixed memory, the TTM object
+ * needs to stay alive. Otherwhise hang it on the ghost
+ * bo to be unbound and destroyed.
+ */
+
+ if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED))
+ ghost_obj->ttm = NULL;
+ else
+ bo->ttm = NULL;
+
+ ttm_bo_unreserve(ghost_obj);
+ ttm_bo_unref(&ghost_obj);
+
+ } else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
+
+ /**
+ * BO doesn't have a TTM we need to bind/unbind. Just remember
+ * this eviction and free up the allocation
+ */
+
+ spin_lock(&from->move_lock);
+ if (!from->move || fence_is_later(fence, from->move)) {
+ fence_put(from->move);
+ from->move = fence_get(fence);
+ }
+ spin_unlock(&from->move_lock);
+
+ ttm_bo_free_old_node(bo);
+
+ fence_put(bo->moving);
+ bo->moving = fence_get(fence);
+
+ } else {
+ /**
+ * Last resort, wait for the move to be completed.
+ *
+ * Should never happen in pratice.
+ */
+
+ ret = ttm_bo_wait(bo, false, false);
+ if (ret)
+ return ret;
+
+ if (to->flags & TTM_MEMTYPE_FLAG_FIXED) {
+ ttm_tt_destroy(bo->ttm);
+ bo->ttm = NULL;
+ }
+ ttm_bo_free_old_node(bo);
+ }
+
+ *old_mem = *new_mem;
+ new_mem->mm_node = NULL;
+
+ return 0;
+}
+EXPORT_SYMBOL(ttm_bo_pipeline_move);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 3216878bc..a6ed9d5e5 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -48,15 +48,14 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
{
int ret = 0;
- if (likely(!test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)))
+ if (likely(!bo->moving))
goto out_unlock;
/*
* Quick non-stalling check for idle.
*/
- ret = ttm_bo_wait(bo, false, true);
- if (likely(ret == 0))
- goto out_unlock;
+ if (fence_is_signaled(bo->moving))
+ goto out_clear;
/*
* If possible, avoid waiting for GPU with mmap_sem
@@ -68,17 +67,23 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
goto out_unlock;
up_read(&vma->vm_mm->mmap_sem);
- (void) ttm_bo_wait(bo, true, false);
+ (void) fence_wait(bo->moving, true);
goto out_unlock;
}
/*
* Ordinary wait.
*/
- ret = ttm_bo_wait(bo, true, false);
- if (unlikely(ret != 0))
+ ret = fence_wait(bo->moving, true);
+ if (unlikely(ret != 0)) {
ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
VM_FAULT_NOPAGE;
+ goto out_unlock;
+ }
+
+out_clear:
+ fence_put(bo->moving);
+ bo->moving = NULL;
out_unlock:
return ret;
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 5145da4bc..bc5aa573f 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -166,12 +166,10 @@ EXPORT_SYMBOL(ttm_tt_set_placement_caching);
void ttm_tt_destroy(struct ttm_tt *ttm)
{
- if (unlikely(ttm == NULL))
+ if (ttm == NULL)
return;
- if (ttm->state == tt_bound) {
- ttm_tt_unbind(ttm);
- }
+ ttm_tt_unbind(ttm);
if (ttm->state == tt_unbound)
ttm_tt_unpopulate(ttm);
@@ -298,7 +296,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
swap_storage = ttm->swap_storage;
BUG_ON(swap_storage == NULL);
- swap_space = file_inode(swap_storage)->i_mapping;
+ swap_space = swap_storage->f_mapping;
for (i = 0; i < ttm->num_pages; ++i) {
from_page = shmem_read_mapping_page(swap_space, i);
@@ -339,7 +337,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
if (!persistent_swap_storage) {
swap_storage = shmem_file_setup("ttm swap",
ttm->num_pages << PAGE_SHIFT,
- 0, 0);
+ 0);
if (IS_ERR(swap_storage)) {
pr_err("Failed allocating swap storage\n");
return PTR_ERR(swap_storage);
@@ -347,7 +345,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
} else
swap_storage = persistent_swap_storage;
- swap_space = file_inode(swap_storage)->i_mapping;
+ swap_space = swap_storage->f_mapping;
for (i = 0; i < ttm->num_pages; ++i) {
from_page = ttm->pages[i];
diff --git a/drivers/gpu/drm/udl/Kconfig b/drivers/gpu/drm/udl/Kconfig
index 613ab0622..1616ec4f4 100644
--- a/drivers/gpu/drm/udl/Kconfig
+++ b/drivers/gpu/drm/udl/Kconfig
@@ -4,12 +4,7 @@ config DRM_UDL
depends on USB_SUPPORT
depends on USB_ARCH_HAS_HCD
select USB
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
- select FB_DEFERRED_IO
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
help
This is a KMS driver for the USB displaylink video adapters.
Say M/Y to add support for these devices via drm/kms interfaces.
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index c20408940..17d34e0ed 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -94,7 +94,6 @@ static void udl_usb_disconnect(struct usb_interface *interface)
struct drm_device *dev = usb_get_intfdata(interface);
drm_kms_helper_poll_disable(dev);
- drm_connector_unregister_all(dev);
udl_fbdev_unplug(dev);
udl_drop_usb(dev);
drm_unplug_dev(dev);
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index d5df555ae..611b6b9bb 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -122,7 +122,7 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
return 0;
cmd = urb->transfer_buffer;
- for (i = y; i < height ; i++) {
+ for (i = y; i < y + height ; i++) {
const int line_offset = fb->base.pitches[0] * i;
const int byte_offset = line_offset + (x * bpp);
const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp);
@@ -203,6 +203,7 @@ static int udl_fb_open(struct fb_info *info, int user)
ufbdev->fb_count++;
+#ifdef CONFIG_DRM_FBDEV_EMULATION
if (fb_defio && (info->fbdefio == NULL)) {
/* enable defio at last moment if not disabled by client */
@@ -218,6 +219,7 @@ static int udl_fb_open(struct fb_info *info, int user)
info->fbdefio = fbdefio;
fb_deferred_io_init(info);
}
+#endif
pr_notice("open /dev/fb%d user=%d fb_info=%p count=%d\n",
info->node, user, info, ufbdev->fb_count);
@@ -235,12 +237,14 @@ static int udl_fb_release(struct fb_info *info, int user)
ufbdev->fb_count--;
+#ifdef CONFIG_DRM_FBDEV_EMULATION
if ((ufbdev->fb_count == 0) && (info->fbdefio)) {
fb_deferred_io_cleanup(info);
kfree(info->fbdefio);
info->fbdefio = NULL;
info->fbops->fb_mmap = udl_fb_mmap;
}
+#endif
pr_warn("released /dev/fb%d user=%d count=%d\n",
info->node, user, ufbdev->fb_count);
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index b87afee44..f92ea9579 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -376,7 +376,7 @@ static int udl_crtc_page_flip(struct drm_crtc *crtc,
spin_lock_irqsave(&dev->event_lock, flags);
if (event)
- drm_send_vblank_event(dev, 0, event);
+ drm_crtc_send_vblank_event(crtc, event);
spin_unlock_irqrestore(&dev->event_lock, flags);
crtc->primary->fb = fb;
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index e5a9d3aaf..3f6704cf6 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -144,7 +144,7 @@ static struct list_head *vc4_get_cache_list_for_size(struct drm_device *dev,
return &vc4->bo_cache.size_list[page_index];
}
-void vc4_bo_cache_purge(struct drm_device *dev)
+static void vc4_bo_cache_purge(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
@@ -291,8 +291,6 @@ static void vc4_bo_cache_free_old(struct drm_device *dev)
/* Called on the last userspace/kernel unreference of the BO. Returns
* it to the BO cache if possible, otherwise frees it.
- *
- * Note that this is called with the struct_mutex held.
*/
void vc4_free_object(struct drm_gem_object *gem_bo)
{
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 0f18b76c7..8fc2b731b 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -46,12 +46,17 @@ struct vc4_crtc {
const struct vc4_crtc_data *data;
void __iomem *regs;
+ /* Timestamp at start of vblank irq - unaffected by lock delays. */
+ ktime_t t_vblank;
+
/* Which HVS channel we're using for our CRTC. */
int channel;
u8 lut_r[256];
u8 lut_g[256];
u8 lut_b[256];
+ /* Size in pixels of the COB memory allocated to this CRTC. */
+ u32 cob_size;
struct drm_pending_vblank_event *event;
};
@@ -146,6 +151,144 @@ int vc4_crtc_debugfs_regs(struct seq_file *m, void *unused)
}
#endif
+int vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
+ unsigned int flags, int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_crtc *vc4_crtc = vc4->crtc[crtc_id];
+ u32 val;
+ int fifo_lines;
+ int vblank_lines;
+ int ret = 0;
+
+ /*
+ * XXX Doesn't work well in interlaced mode yet, partially due
+ * to problems in vc4 kms or drm core interlaced mode handling,
+ * so disable for now in interlaced mode.
+ */
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return ret;
+
+ /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
+
+ /* Get optional system timestamp before query. */
+ if (stime)
+ *stime = ktime_get();
+
+ /*
+ * Read vertical scanline which is currently composed for our
+ * pixelvalve by the HVS, and also the scaler status.
+ */
+ val = HVS_READ(SCALER_DISPSTATX(vc4_crtc->channel));
+
+ /* Get optional system timestamp after query. */
+ if (etime)
+ *etime = ktime_get();
+
+ /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
+
+ /* Vertical position of hvs composed scanline. */
+ *vpos = VC4_GET_FIELD(val, SCALER_DISPSTATX_LINE);
+
+ /* No hpos info available. */
+ if (hpos)
+ *hpos = 0;
+
+ /* This is the offset we need for translating hvs -> pv scanout pos. */
+ fifo_lines = vc4_crtc->cob_size / mode->crtc_hdisplay;
+
+ if (fifo_lines > 0)
+ ret |= DRM_SCANOUTPOS_VALID;
+
+ /* HVS more than fifo_lines into frame for compositing? */
+ if (*vpos > fifo_lines) {
+ /*
+ * We are in active scanout and can get some meaningful results
+ * from HVS. The actual PV scanout can not trail behind more
+ * than fifo_lines as that is the fifo's capacity. Assume that
+ * in active scanout the HVS and PV work in lockstep wrt. HVS
+ * refilling the fifo and PV consuming from the fifo, ie.
+ * whenever the PV consumes and frees up a scanline in the
+ * fifo, the HVS will immediately refill it, therefore
+ * incrementing vpos. Therefore we choose HVS read position -
+ * fifo size in scanlines as a estimate of the real scanout
+ * position of the PV.
+ */
+ *vpos -= fifo_lines + 1;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ *vpos /= 2;
+
+ ret |= DRM_SCANOUTPOS_ACCURATE;
+ return ret;
+ }
+
+ /*
+ * Less: This happens when we are in vblank and the HVS, after getting
+ * the VSTART restart signal from the PV, just started refilling its
+ * fifo with new lines from the top-most lines of the new framebuffers.
+ * The PV does not scan out in vblank, so does not remove lines from
+ * the fifo, so the fifo will be full quickly and the HVS has to pause.
+ * We can't get meaningful readings wrt. scanline position of the PV
+ * and need to make things up in a approximative but consistent way.
+ */
+ ret |= DRM_SCANOUTPOS_IN_VBLANK;
+ vblank_lines = mode->crtc_vtotal - mode->crtc_vdisplay;
+
+ if (flags & DRM_CALLED_FROM_VBLIRQ) {
+ /*
+ * Assume the irq handler got called close to first
+ * line of vblank, so PV has about a full vblank
+ * scanlines to go, and as a base timestamp use the
+ * one taken at entry into vblank irq handler, so it
+ * is not affected by random delays due to lock
+ * contention on event_lock or vblank_time lock in
+ * the core.
+ */
+ *vpos = -vblank_lines;
+
+ if (stime)
+ *stime = vc4_crtc->t_vblank;
+ if (etime)
+ *etime = vc4_crtc->t_vblank;
+
+ /*
+ * If the HVS fifo is not yet full then we know for certain
+ * we are at the very beginning of vblank, as the hvs just
+ * started refilling, and the stime and etime timestamps
+ * truly correspond to start of vblank.
+ */
+ if ((val & SCALER_DISPSTATX_FULL) != SCALER_DISPSTATX_FULL)
+ ret |= DRM_SCANOUTPOS_ACCURATE;
+ } else {
+ /*
+ * No clue where we are inside vblank. Return a vpos of zero,
+ * which will cause calling code to just return the etime
+ * timestamp uncorrected. At least this is no worse than the
+ * standard fallback.
+ */
+ *vpos = 0;
+ }
+
+ return ret;
+}
+
+int vc4_crtc_get_vblank_timestamp(struct drm_device *dev, unsigned int crtc_id,
+ int *max_error, struct timeval *vblank_time,
+ unsigned flags)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_crtc *vc4_crtc = vc4->crtc[crtc_id];
+ struct drm_crtc *crtc = &vc4_crtc->base;
+ struct drm_crtc_state *state = crtc->state;
+
+ /* Helper routine in DRM core does all the work: */
+ return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc_id, max_error,
+ vblank_time, flags,
+ &state->adjusted_mode);
+}
+
static void vc4_crtc_destroy(struct drm_crtc *crtc)
{
drm_crtc_cleanup(crtc);
@@ -175,20 +318,22 @@ vc4_crtc_lut_load(struct drm_crtc *crtc)
HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_b[i]);
}
-static void
+static int
vc4_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
- uint32_t start, uint32_t size)
+ uint32_t size)
{
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
u32 i;
- for (i = start; i < start + size; i++) {
+ for (i = 0; i < size; i++) {
vc4_crtc->lut_r[i] = r[i] >> 8;
vc4_crtc->lut_g[i] = g[i] >> 8;
vc4_crtc->lut_b[i] = b[i] >> 8;
}
vc4_crtc_lut_load(crtc);
+
+ return 0;
}
static u32 vc4_get_fifo_full_level(u32 format)
@@ -395,6 +540,7 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_plane *plane;
unsigned long flags;
+ const struct drm_plane_state *plane_state;
u32 dlist_count = 0;
int ret;
@@ -404,18 +550,8 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
if (hweight32(state->connector_mask) > 1)
return -EINVAL;
- drm_atomic_crtc_state_for_each_plane(plane, state) {
- struct drm_plane_state *plane_state =
- state->state->plane_states[drm_plane_index(plane)];
-
- /* plane might not have changed, in which case take
- * current state:
- */
- if (!plane_state)
- plane_state = plane->state;
-
+ drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, state)
dlist_count += vc4_plane_dlist_size(plane_state);
- }
dlist_count++; /* Account for SCALER_CTL0_END. */
@@ -526,6 +662,7 @@ static irqreturn_t vc4_crtc_irq_handler(int irq, void *data)
irqreturn_t ret = IRQ_NONE;
if (stat & PV_INT_VFP_START) {
+ vc4_crtc->t_vblank = ktime_get();
CRTC_WRITE(PV_INTSTAT, PV_INT_VFP_START);
drm_crtc_handle_vblank(&vc4_crtc->base);
vc4_crtc_handle_page_flip(vc4_crtc);
@@ -730,6 +867,22 @@ static void vc4_set_crtc_possible_masks(struct drm_device *drm,
}
}
+static void
+vc4_crtc_get_cob_allocation(struct vc4_crtc *vc4_crtc)
+{
+ struct drm_device *drm = vc4_crtc->base.dev;
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ u32 dispbase = HVS_READ(SCALER_DISPBASEX(vc4_crtc->channel));
+ /* Top/base are supposed to be 4-pixel aligned, but the
+ * Raspberry Pi firmware fills the low bits (which are
+ * presumably ignored).
+ */
+ u32 top = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_TOP) & ~3;
+ u32 base = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_BASE) & ~3;
+
+ vc4_crtc->cob_size = top - base + 4;
+}
+
static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -806,6 +959,8 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
crtc->cursor = cursor_plane;
}
+ vc4_crtc_get_cob_allocation(vc4_crtc);
+
CRTC_WRITE(PV_INTEN, 0);
CRTC_WRITE(PV_INTSTAT, PV_INT_VFP_START);
ret = devm_request_irq(dev, platform_get_irq(pdev, 0),
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index 9817dbfa4..275fedbdb 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -208,14 +208,6 @@ static int vc4_dpi_connector_get_modes(struct drm_connector *connector)
return 0;
}
-static struct drm_encoder *
-vc4_dpi_connector_best_encoder(struct drm_connector *connector)
-{
- struct vc4_dpi_connector *dpi_connector =
- to_vc4_dpi_connector(connector);
- return dpi_connector->encoder;
-}
-
static const struct drm_connector_funcs vc4_dpi_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.detect = vc4_dpi_connector_detect,
@@ -228,7 +220,6 @@ static const struct drm_connector_funcs vc4_dpi_connector_funcs = {
static const struct drm_connector_helper_funcs vc4_dpi_connector_helper_funcs = {
.get_modes = vc4_dpi_connector_get_modes,
- .best_encoder = vc4_dpi_connector_best_encoder,
};
static struct drm_connector *vc4_dpi_connector_init(struct drm_device *dev,
@@ -236,14 +227,12 @@ static struct drm_connector *vc4_dpi_connector_init(struct drm_device *dev,
{
struct drm_connector *connector = NULL;
struct vc4_dpi_connector *dpi_connector;
- int ret = 0;
dpi_connector = devm_kzalloc(dev->dev, sizeof(*dpi_connector),
GFP_KERNEL);
- if (!dpi_connector) {
- ret = -ENOMEM;
- goto fail;
- }
+ if (!dpi_connector)
+ return ERR_PTR(-ENOMEM);
+
connector = &dpi_connector->base;
dpi_connector->encoder = dpi->encoder;
@@ -260,12 +249,6 @@ static struct drm_connector *vc4_dpi_connector_init(struct drm_device *dev,
drm_mode_connector_attach_encoder(connector, dpi->encoder);
return connector;
-
- fail:
- if (connector)
- vc4_dpi_connector_destroy(connector);
-
- return ERR_PTR(ret);
}
static const struct drm_encoder_funcs vc4_dpi_encoder_funcs = {
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 250ed7e37..9ecef9385 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include "drm_fb_cma_helper.h"
#include "uapi/drm/vc4_drm.h"
@@ -43,12 +44,54 @@ void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index)
return map;
}
+static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct drm_vc4_get_param *args = data;
+ int ret;
+
+ if (args->pad != 0)
+ return -EINVAL;
+
+ switch (args->param) {
+ case DRM_VC4_PARAM_V3D_IDENT0:
+ ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
+ if (ret < 0)
+ return ret;
+ args->value = V3D_READ(V3D_IDENT0);
+ pm_runtime_put(&vc4->v3d->pdev->dev);
+ break;
+ case DRM_VC4_PARAM_V3D_IDENT1:
+ ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
+ if (ret < 0)
+ return ret;
+ args->value = V3D_READ(V3D_IDENT1);
+ pm_runtime_put(&vc4->v3d->pdev->dev);
+ break;
+ case DRM_VC4_PARAM_V3D_IDENT2:
+ ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
+ if (ret < 0)
+ return ret;
+ args->value = V3D_READ(V3D_IDENT2);
+ pm_runtime_put(&vc4->v3d->pdev->dev);
+ break;
+ case DRM_VC4_PARAM_SUPPORTS_BRANCHES:
+ args->value = true;
+ break;
+ default:
+ DRM_DEBUG("Unknown parameter %d\n", args->param);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static void vc4_lastclose(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
- if (vc4->fbdev)
- drm_fbdev_cma_restore_mode(vc4->fbdev);
+ drm_fbdev_cma_restore_mode(vc4->fbdev);
}
static const struct file_operations vc4_drm_fops = {
@@ -74,6 +117,7 @@ static const struct drm_ioctl_desc vc4_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VC4_GET_HANG_STATE, vc4_get_hang_state_ioctl,
DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(VC4_GET_PARAM, vc4_get_param_ioctl, DRM_RENDER_ALLOW),
};
static struct drm_driver vc4_drm_driver = {
@@ -92,6 +136,8 @@ static struct drm_driver vc4_drm_driver = {
.enable_vblank = vc4_enable_vblank,
.disable_vblank = vc4_disable_vblank,
.get_vblank_counter = drm_vblank_no_hw_counter,
+ .get_scanout_position = vc4_crtc_get_scanoutpos,
+ .get_vblank_timestamp = vc4_crtc_get_vblank_timestamp,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = vc4_debugfs_init,
@@ -99,7 +145,7 @@ static struct drm_driver vc4_drm_driver = {
#endif
.gem_create_object = vc4_create_object,
- .gem_free_object = vc4_free_object,
+ .gem_free_object_unlocked = vc4_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
@@ -176,7 +222,6 @@ static int vc4_drm_bind(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm;
- struct drm_connector *connector;
struct vc4_dev *vc4;
int ret = 0;
@@ -196,8 +241,6 @@ static int vc4_drm_bind(struct device *dev)
vc4_bo_cache_init(drm);
drm_mode_config_init(drm);
- if (ret)
- goto unref;
vc4_gem_init(drm);
@@ -211,27 +254,14 @@ static int vc4_drm_bind(struct device *dev)
if (ret < 0)
goto unbind_all;
- /* Connector registration has to occur after DRM device
- * registration, because it creates sysfs entries based on the
- * DRM device.
- */
- list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
- ret = drm_connector_register(connector);
- if (ret)
- goto unregister;
- }
-
vc4_kms_load(drm);
return 0;
-unregister:
- drm_dev_unregister(drm);
unbind_all:
component_unbind_all(dev, drm);
gem_destroy:
vc4_gem_destroy(drm);
-unref:
drm_dev_unref(drm);
vc4_bo_cache_destroy(drm);
return ret;
@@ -259,8 +289,8 @@ static const struct component_master_ops vc4_drm_ops = {
static struct platform_driver *const component_drivers[] = {
&vc4_hdmi_driver,
&vc4_dpi_driver,
- &vc4_crtc_driver,
&vc4_hvs_driver,
+ &vc4_crtc_driver,
&vc4_v3d_driver,
};
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 2e24616cd..428e24919 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -364,6 +364,9 @@ struct vc4_validated_shader_info {
uint32_t uniforms_src_size;
uint32_t num_texture_samples;
struct vc4_texture_sample_info *texture_samples;
+
+ uint32_t num_uniform_addr_offsets;
+ uint32_t *uniform_addr_offsets;
};
/**
@@ -424,6 +427,13 @@ extern struct platform_driver vc4_crtc_driver;
int vc4_enable_vblank(struct drm_device *dev, unsigned int crtc_id);
void vc4_disable_vblank(struct drm_device *dev, unsigned int crtc_id);
int vc4_crtc_debugfs_regs(struct seq_file *m, void *arg);
+int vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
+ unsigned int flags, int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode);
+int vc4_crtc_get_vblank_timestamp(struct drm_device *dev, unsigned int crtc_id,
+ int *max_error, struct timeval *vblank_time,
+ unsigned flags);
/* vc4_debugfs.c */
int vc4_debugfs_init(struct drm_minor *minor);
@@ -478,7 +488,7 @@ int vc4_kms_load(struct drm_device *dev);
struct drm_plane *vc4_plane_init(struct drm_device *dev,
enum drm_plane_type type);
u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist);
-u32 vc4_plane_dlist_size(struct drm_plane_state *state);
+u32 vc4_plane_dlist_size(const struct drm_plane_state *state);
void vc4_plane_async_set_fb(struct drm_plane *plane,
struct drm_framebuffer *fb);
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 78ab08e8f..b262c5c26 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -53,10 +53,8 @@ vc4_free_hang_state(struct drm_device *dev, struct vc4_hang_state *state)
{
unsigned int i;
- mutex_lock(&dev->struct_mutex);
for (i = 0; i < state->user_state.bo_count; i++)
- drm_gem_object_unreference(state->bo[i]);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(state->bo[i]);
kfree(state);
}
@@ -536,8 +534,8 @@ vc4_cl_lookup_bos(struct drm_device *dev,
return -EINVAL;
}
- exec->bo = kcalloc(exec->bo_count, sizeof(struct drm_gem_cma_object *),
- GFP_KERNEL);
+ exec->bo = drm_calloc_large(exec->bo_count,
+ sizeof(struct drm_gem_cma_object *));
if (!exec->bo) {
DRM_ERROR("Failed to allocate validated BO pointers\n");
return -ENOMEM;
@@ -610,7 +608,7 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
* read the contents back for validation, and I think the
* bo->vaddr is uncached access.
*/
- temp = kmalloc(temp_size, GFP_KERNEL);
+ temp = drm_malloc_ab(temp_size, 1);
if (!temp) {
DRM_ERROR("Failed to allocate storage for copying "
"in bin/render CLs.\n");
@@ -677,7 +675,7 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
ret = vc4_validate_shader_recs(dev, exec);
fail:
- kfree(temp);
+ drm_free_large(temp);
return ret;
}
@@ -687,21 +685,18 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
struct vc4_dev *vc4 = to_vc4_dev(dev);
unsigned i;
- /* Need the struct lock for drm_gem_object_unreference(). */
- mutex_lock(&dev->struct_mutex);
if (exec->bo) {
for (i = 0; i < exec->bo_count; i++)
- drm_gem_object_unreference(&exec->bo[i]->base);
- kfree(exec->bo);
+ drm_gem_object_unreference_unlocked(&exec->bo[i]->base);
+ drm_free_large(exec->bo);
}
while (!list_empty(&exec->unref_list)) {
struct vc4_bo *bo = list_first_entry(&exec->unref_list,
struct vc4_bo, unref_head);
list_del(&bo->unref_head);
- drm_gem_object_unreference(&bo->base.base);
+ drm_gem_object_unreference_unlocked(&bo->base.base);
}
- mutex_unlock(&dev->struct_mutex);
mutex_lock(&vc4->power_lock);
if (--vc4->power_refcount == 0)
@@ -947,8 +942,8 @@ vc4_gem_destroy(struct drm_device *dev)
vc4->overflow_mem = NULL;
}
- vc4_bo_cache_destroy(dev);
-
if (vc4->hang_state)
vc4_free_hang_state(dev, vc4->hang_state);
+
+ vc4_bo_cache_destroy(dev);
}
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index fd2644d23..4452f3631 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -208,14 +208,6 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
return ret;
}
-static struct drm_encoder *
-vc4_hdmi_connector_best_encoder(struct drm_connector *connector)
-{
- struct vc4_hdmi_connector *hdmi_connector =
- to_vc4_hdmi_connector(connector);
- return hdmi_connector->encoder;
-}
-
static const struct drm_connector_funcs vc4_hdmi_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.detect = vc4_hdmi_connector_detect,
@@ -228,7 +220,6 @@ static const struct drm_connector_funcs vc4_hdmi_connector_funcs = {
static const struct drm_connector_helper_funcs vc4_hdmi_connector_helper_funcs = {
.get_modes = vc4_hdmi_connector_get_modes,
- .best_encoder = vc4_hdmi_connector_best_encoder,
};
static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev,
@@ -465,12 +456,6 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
if (IS_ERR(hdmi->hd_regs))
return PTR_ERR(hdmi->hd_regs);
- ddc_node = of_parse_phandle(dev->of_node, "ddc", 0);
- if (!ddc_node) {
- DRM_ERROR("Failed to find ddc node in device tree\n");
- return -ENODEV;
- }
-
hdmi->pixel_clock = devm_clk_get(dev, "pixel");
if (IS_ERR(hdmi->pixel_clock)) {
DRM_ERROR("Failed to get pixel clock\n");
@@ -482,7 +467,14 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
return PTR_ERR(hdmi->hsm_clock);
}
+ ddc_node = of_parse_phandle(dev->of_node, "ddc", 0);
+ if (!ddc_node) {
+ DRM_ERROR("Failed to find ddc node in device tree\n");
+ return -ENODEV;
+ }
+
hdmi->ddc = of_find_i2c_adapter_by_node(ddc_node);
+ of_node_put(ddc_node);
if (!hdmi->ddc) {
DRM_DEBUG("Failed to get ddc i2c adapter by node\n");
return -EPROBE_DEFER;
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 861a623bc..4ac894d99 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -26,8 +26,7 @@ static void vc4_output_poll_changed(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
- if (vc4->fbdev)
- drm_fbdev_cma_hotplug_event(vc4->fbdev);
+ drm_fbdev_cma_hotplug_event(vc4->fbdev);
}
struct vc4_commit {
@@ -111,6 +110,8 @@ static int vc4_atomic_commit(struct drm_device *dev,
int i;
uint64_t wait_seqno = 0;
struct vc4_commit *c;
+ struct drm_plane *plane;
+ struct drm_plane_state *new_state;
c = commit_init(state);
if (!c)
@@ -138,13 +139,7 @@ static int vc4_atomic_commit(struct drm_device *dev,
return ret;
}
- for (i = 0; i < dev->mode_config.num_total_plane; i++) {
- struct drm_plane *plane = state->planes[i];
- struct drm_plane_state *new_state = state->plane_states[i];
-
- if (!plane)
- continue;
-
+ for_each_plane_in_state(state, plane, new_state, i) {
if ((plane->state->fb != new_state->fb) && new_state->fb) {
struct drm_gem_cma_object *cma_bo =
drm_fb_cma_get_gem_obj(new_state->fb, 0);
@@ -160,7 +155,7 @@ static int vc4_atomic_commit(struct drm_device *dev,
* the software side now.
*/
- drm_atomic_helper_swap_state(dev, state);
+ drm_atomic_helper_swap_state(state, true);
/*
* Everything below can be run asynchronously without the need to grab
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 4037b52fd..29e4b400e 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -94,6 +94,14 @@ static const struct hvs_format {
.pixel_order = HVS_PIXEL_ORDER_ABGR, .has_alpha = true,
},
{
+ .drm = DRM_FORMAT_ABGR8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888,
+ .pixel_order = HVS_PIXEL_ORDER_ARGB, .has_alpha = true,
+ },
+ {
+ .drm = DRM_FORMAT_XBGR8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888,
+ .pixel_order = HVS_PIXEL_ORDER_ARGB, .has_alpha = false,
+ },
+ {
.drm = DRM_FORMAT_RGB565, .hvs = HVS_PIXEL_FORMAT_RGB565,
.pixel_order = HVS_PIXEL_ORDER_XRGB, .has_alpha = false,
},
@@ -690,9 +698,10 @@ u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist)
return vc4_state->dlist_count;
}
-u32 vc4_plane_dlist_size(struct drm_plane_state *state)
+u32 vc4_plane_dlist_size(const struct drm_plane_state *state)
{
- struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ const struct vc4_plane_state *vc4_state =
+ container_of(state, typeof(*vc4_state), base);
return vc4_state->dlist_count;
}
diff --git a/drivers/gpu/drm/vc4/vc4_qpu_defines.h b/drivers/gpu/drm/vc4/vc4_qpu_defines.h
index d5c2f3c85..f4e795a0d 100644
--- a/drivers/gpu/drm/vc4/vc4_qpu_defines.h
+++ b/drivers/gpu/drm/vc4/vc4_qpu_defines.h
@@ -70,7 +70,7 @@ enum qpu_raddr {
QPU_R_ELEM_QPU = 38,
QPU_R_NOP,
QPU_R_XY_PIXEL_COORD = 41,
- QPU_R_MS_REV_FLAGS = 41,
+ QPU_R_MS_REV_FLAGS = 42,
QPU_R_VPM = 48,
QPU_R_VPM_LD_BUSY,
QPU_R_VPM_LD_WAIT,
@@ -230,6 +230,15 @@ enum qpu_unpack_r4 {
#define QPU_COND_MUL_SHIFT 46
#define QPU_COND_MUL_MASK QPU_MASK(48, 46)
+#define QPU_BRANCH_COND_SHIFT 52
+#define QPU_BRANCH_COND_MASK QPU_MASK(55, 52)
+
+#define QPU_BRANCH_REL ((uint64_t)1 << 51)
+#define QPU_BRANCH_REG ((uint64_t)1 << 50)
+
+#define QPU_BRANCH_RADDR_A_SHIFT 45
+#define QPU_BRANCH_RADDR_A_MASK QPU_MASK(49, 45)
+
#define QPU_SF ((uint64_t)1 << 45)
#define QPU_WADDR_ADD_SHIFT 38
@@ -261,4 +270,10 @@ enum qpu_unpack_r4 {
#define QPU_OP_ADD_SHIFT 24
#define QPU_OP_ADD_MASK QPU_MASK(28, 24)
+#define QPU_LOAD_IMM_SHIFT 0
+#define QPU_LOAD_IMM_MASK QPU_MASK(31, 0)
+
+#define QPU_BRANCH_TARGET_SHIFT 0
+#define QPU_BRANCH_TARGET_MASK QPU_MASK(31, 0)
+
#endif /* VC4_QPU_DEFINES_H */
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index f99eece4c..160942a91 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -366,7 +366,6 @@
# define SCALER_DISPBKGND_FILL BIT(24)
#define SCALER_DISPSTAT0 0x00000048
-#define SCALER_DISPBASE0 0x0000004c
# define SCALER_DISPSTATX_MODE_MASK VC4_MASK(31, 30)
# define SCALER_DISPSTATX_MODE_SHIFT 30
# define SCALER_DISPSTATX_MODE_DISABLED 0
@@ -375,6 +374,24 @@
# define SCALER_DISPSTATX_MODE_EOF 3
# define SCALER_DISPSTATX_FULL BIT(29)
# define SCALER_DISPSTATX_EMPTY BIT(28)
+# define SCALER_DISPSTATX_FRAME_COUNT_MASK VC4_MASK(17, 12)
+# define SCALER_DISPSTATX_FRAME_COUNT_SHIFT 12
+# define SCALER_DISPSTATX_LINE_MASK VC4_MASK(11, 0)
+# define SCALER_DISPSTATX_LINE_SHIFT 0
+
+#define SCALER_DISPBASE0 0x0000004c
+/* Last pixel in the COB (display FIFO memory) allocated to this HVS
+ * channel. Must be 4-pixel aligned (and thus 4 pixels less than the
+ * next COB base).
+ */
+# define SCALER_DISPBASEX_TOP_MASK VC4_MASK(31, 16)
+# define SCALER_DISPBASEX_TOP_SHIFT 16
+/* First pixel in the COB (display FIFO memory) allocated to this HVS
+ * channel. Must be 4-pixel aligned.
+ */
+# define SCALER_DISPBASEX_BASE_MASK VC4_MASK(15, 0)
+# define SCALER_DISPBASEX_BASE_SHIFT 0
+
#define SCALER_DISPCTRL1 0x00000050
#define SCALER_DISPBKGND1 0x00000054
#define SCALER_DISPBKGNDX(x) (SCALER_DISPBKGND0 + \
@@ -385,6 +402,9 @@
(x) * (SCALER_DISPSTAT1 - \
SCALER_DISPSTAT0))
#define SCALER_DISPBASE1 0x0000005c
+#define SCALER_DISPBASEX(x) (SCALER_DISPBASE0 + \
+ (x) * (SCALER_DISPBASE1 - \
+ SCALER_DISPBASE0))
#define SCALER_DISPCTRL2 0x00000060
#define SCALER_DISPCTRLX(x) (SCALER_DISPCTRL0 + \
(x) * (SCALER_DISPCTRL1 - \
diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
index 24c2c746e..9ce1d0adf 100644
--- a/drivers/gpu/drm/vc4/vc4_validate.c
+++ b/drivers/gpu/drm/vc4/vc4_validate.c
@@ -802,7 +802,7 @@ validate_gl_shader_rec(struct drm_device *dev,
uint32_t src_offset = *(uint32_t *)(pkt_u + o);
uint32_t *texture_handles_u;
void *uniform_data_u;
- uint32_t tex;
+ uint32_t tex, uni;
*(uint32_t *)(pkt_v + o) = bo[i]->paddr + src_offset;
@@ -840,6 +840,17 @@ validate_gl_shader_rec(struct drm_device *dev,
}
}
+ /* Fill in the uniform slots that need this shader's
+ * start-of-uniforms address (used for resetting the uniform
+ * stream in the presence of control flow).
+ */
+ for (uni = 0;
+ uni < validated_shader->num_uniform_addr_offsets;
+ uni++) {
+ uint32_t o = validated_shader->uniform_addr_offsets[uni];
+ ((uint32_t *)exec->uniforms_v)[o] = exec->uniforms_p;
+ }
+
*(uint32_t *)(pkt_v + o + 4) = exec->uniforms_p;
exec->uniforms_u += validated_shader->uniforms_src_size;
diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
index f67124b4c..2543cf5b8 100644
--- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
+++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
@@ -39,7 +39,17 @@
#include "vc4_drv.h"
#include "vc4_qpu_defines.h"
+#define LIVE_REG_COUNT (32 + 32 + 4)
+
struct vc4_shader_validation_state {
+ /* Current IP being validated. */
+ uint32_t ip;
+
+ /* IP at the end of the BO, do not read shader[max_ip] */
+ uint32_t max_ip;
+
+ uint64_t *shader;
+
struct vc4_texture_sample_info tmu_setup[2];
int tmu_write_count[2];
@@ -49,8 +59,30 @@ struct vc4_shader_validation_state {
*
* This is used for the validation of direct address memory reads.
*/
- uint32_t live_min_clamp_offsets[32 + 32 + 4];
- bool live_max_clamp_regs[32 + 32 + 4];
+ uint32_t live_min_clamp_offsets[LIVE_REG_COUNT];
+ bool live_max_clamp_regs[LIVE_REG_COUNT];
+ uint32_t live_immediates[LIVE_REG_COUNT];
+
+ /* Bitfield of which IPs are used as branch targets.
+ *
+ * Used for validation that the uniform stream is updated at the right
+ * points and clearing the texturing/clamping state.
+ */
+ unsigned long *branch_targets;
+
+ /* Set when entering a basic block, and cleared when the uniform
+ * address update is found. This is used to make sure that we don't
+ * read uniforms when the address is undefined.
+ */
+ bool needs_uniform_address_update;
+
+ /* Set when we find a backwards branch. If the branch is backwards,
+ * the taraget is probably doing an address reset to read uniforms,
+ * and so we need to be sure that a uniforms address is present in the
+ * stream, even if the shader didn't need to read uniforms in later
+ * basic blocks.
+ */
+ bool needs_uniform_address_for_loop;
};
static uint32_t
@@ -129,11 +161,11 @@ record_texture_sample(struct vc4_validated_shader_info *validated_shader,
}
static bool
-check_tmu_write(uint64_t inst,
- struct vc4_validated_shader_info *validated_shader,
+check_tmu_write(struct vc4_validated_shader_info *validated_shader,
struct vc4_shader_validation_state *validation_state,
bool is_mul)
{
+ uint64_t inst = validation_state->shader[validation_state->ip];
uint32_t waddr = (is_mul ?
QPU_GET_FIELD(inst, QPU_WADDR_MUL) :
QPU_GET_FIELD(inst, QPU_WADDR_ADD));
@@ -162,7 +194,7 @@ check_tmu_write(uint64_t inst,
return false;
}
- /* We assert that the the clamped address is the first
+ /* We assert that the clamped address is the first
* argument, and the UBO base address is the second argument.
* This is arbitrary, but simpler than supporting flipping the
* two either way.
@@ -212,8 +244,14 @@ check_tmu_write(uint64_t inst,
/* Since direct uses a RADDR uniform reference, it will get counted in
* check_instruction_reads()
*/
- if (!is_direct)
+ if (!is_direct) {
+ if (validation_state->needs_uniform_address_update) {
+ DRM_ERROR("Texturing with undefined uniform address\n");
+ return false;
+ }
+
validated_shader->uniforms_size += 4;
+ }
if (submit) {
if (!record_texture_sample(validated_shader,
@@ -227,23 +265,144 @@ check_tmu_write(uint64_t inst,
return true;
}
+static bool require_uniform_address_uniform(struct vc4_validated_shader_info *validated_shader)
+{
+ uint32_t o = validated_shader->num_uniform_addr_offsets;
+ uint32_t num_uniforms = validated_shader->uniforms_size / 4;
+
+ validated_shader->uniform_addr_offsets =
+ krealloc(validated_shader->uniform_addr_offsets,
+ (o + 1) *
+ sizeof(*validated_shader->uniform_addr_offsets),
+ GFP_KERNEL);
+ if (!validated_shader->uniform_addr_offsets)
+ return false;
+
+ validated_shader->uniform_addr_offsets[o] = num_uniforms;
+ validated_shader->num_uniform_addr_offsets++;
+
+ return true;
+}
+
static bool
-check_reg_write(uint64_t inst,
- struct vc4_validated_shader_info *validated_shader,
+validate_uniform_address_write(struct vc4_validated_shader_info *validated_shader,
+ struct vc4_shader_validation_state *validation_state,
+ bool is_mul)
+{
+ uint64_t inst = validation_state->shader[validation_state->ip];
+ u32 add_b = QPU_GET_FIELD(inst, QPU_ADD_B);
+ u32 raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
+ u32 raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
+ u32 add_lri = raddr_add_a_to_live_reg_index(inst);
+ /* We want our reset to be pointing at whatever uniform follows the
+ * uniforms base address.
+ */
+ u32 expected_offset = validated_shader->uniforms_size + 4;
+
+ /* We only support absolute uniform address changes, and we
+ * require that they be in the current basic block before any
+ * of its uniform reads.
+ *
+ * One could potentially emit more efficient QPU code, by
+ * noticing that (say) an if statement does uniform control
+ * flow for all threads and that the if reads the same number
+ * of uniforms on each side. However, this scheme is easy to
+ * validate so it's all we allow for now.
+ */
+ switch (QPU_GET_FIELD(inst, QPU_SIG)) {
+ case QPU_SIG_NONE:
+ case QPU_SIG_SCOREBOARD_UNLOCK:
+ case QPU_SIG_COLOR_LOAD:
+ case QPU_SIG_LOAD_TMU0:
+ case QPU_SIG_LOAD_TMU1:
+ break;
+ default:
+ DRM_ERROR("uniforms address change must be "
+ "normal math\n");
+ return false;
+ }
+
+ if (is_mul || QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) {
+ DRM_ERROR("Uniform address reset must be an ADD.\n");
+ return false;
+ }
+
+ if (QPU_GET_FIELD(inst, QPU_COND_ADD) != QPU_COND_ALWAYS) {
+ DRM_ERROR("Uniform address reset must be unconditional.\n");
+ return false;
+ }
+
+ if (QPU_GET_FIELD(inst, QPU_PACK) != QPU_PACK_A_NOP &&
+ !(inst & QPU_PM)) {
+ DRM_ERROR("No packing allowed on uniforms reset\n");
+ return false;
+ }
+
+ if (add_lri == -1) {
+ DRM_ERROR("First argument of uniform address write must be "
+ "an immediate value.\n");
+ return false;
+ }
+
+ if (validation_state->live_immediates[add_lri] != expected_offset) {
+ DRM_ERROR("Resetting uniforms with offset %db instead of %db\n",
+ validation_state->live_immediates[add_lri],
+ expected_offset);
+ return false;
+ }
+
+ if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) &&
+ !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) {
+ DRM_ERROR("Second argument of uniform address write must be "
+ "a uniform.\n");
+ return false;
+ }
+
+ validation_state->needs_uniform_address_update = false;
+ validation_state->needs_uniform_address_for_loop = false;
+ return require_uniform_address_uniform(validated_shader);
+}
+
+static bool
+check_reg_write(struct vc4_validated_shader_info *validated_shader,
struct vc4_shader_validation_state *validation_state,
bool is_mul)
{
+ uint64_t inst = validation_state->shader[validation_state->ip];
uint32_t waddr = (is_mul ?
QPU_GET_FIELD(inst, QPU_WADDR_MUL) :
QPU_GET_FIELD(inst, QPU_WADDR_ADD));
+ uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
+ bool ws = inst & QPU_WS;
+ bool is_b = is_mul ^ ws;
+ u32 lri = waddr_to_live_reg_index(waddr, is_b);
+
+ if (lri != -1) {
+ uint32_t cond_add = QPU_GET_FIELD(inst, QPU_COND_ADD);
+ uint32_t cond_mul = QPU_GET_FIELD(inst, QPU_COND_MUL);
+
+ if (sig == QPU_SIG_LOAD_IMM &&
+ QPU_GET_FIELD(inst, QPU_PACK) == QPU_PACK_A_NOP &&
+ ((is_mul && cond_mul == QPU_COND_ALWAYS) ||
+ (!is_mul && cond_add == QPU_COND_ALWAYS))) {
+ validation_state->live_immediates[lri] =
+ QPU_GET_FIELD(inst, QPU_LOAD_IMM);
+ } else {
+ validation_state->live_immediates[lri] = ~0;
+ }
+ }
switch (waddr) {
case QPU_W_UNIFORMS_ADDRESS:
- /* XXX: We'll probably need to support this for reladdr, but
- * it's definitely a security-related one.
- */
- DRM_ERROR("uniforms address load unsupported\n");
- return false;
+ if (is_b) {
+ DRM_ERROR("relative uniforms address change "
+ "unsupported\n");
+ return false;
+ }
+
+ return validate_uniform_address_write(validated_shader,
+ validation_state,
+ is_mul);
case QPU_W_TLB_COLOR_MS:
case QPU_W_TLB_COLOR_ALL:
@@ -261,7 +420,7 @@ check_reg_write(uint64_t inst,
case QPU_W_TMU1_T:
case QPU_W_TMU1_R:
case QPU_W_TMU1_B:
- return check_tmu_write(inst, validated_shader, validation_state,
+ return check_tmu_write(validated_shader, validation_state,
is_mul);
case QPU_W_HOST_INT:
@@ -294,10 +453,10 @@ check_reg_write(uint64_t inst,
}
static void
-track_live_clamps(uint64_t inst,
- struct vc4_validated_shader_info *validated_shader,
+track_live_clamps(struct vc4_validated_shader_info *validated_shader,
struct vc4_shader_validation_state *validation_state)
{
+ uint64_t inst = validation_state->shader[validation_state->ip];
uint32_t op_add = QPU_GET_FIELD(inst, QPU_OP_ADD);
uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
@@ -369,10 +528,10 @@ track_live_clamps(uint64_t inst,
}
static bool
-check_instruction_writes(uint64_t inst,
- struct vc4_validated_shader_info *validated_shader,
+check_instruction_writes(struct vc4_validated_shader_info *validated_shader,
struct vc4_shader_validation_state *validation_state)
{
+ uint64_t inst = validation_state->shader[validation_state->ip];
uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
bool ok;
@@ -382,20 +541,44 @@ check_instruction_writes(uint64_t inst,
return false;
}
- ok = (check_reg_write(inst, validated_shader, validation_state,
- false) &&
- check_reg_write(inst, validated_shader, validation_state,
- true));
+ ok = (check_reg_write(validated_shader, validation_state, false) &&
+ check_reg_write(validated_shader, validation_state, true));
- track_live_clamps(inst, validated_shader, validation_state);
+ track_live_clamps(validated_shader, validation_state);
return ok;
}
static bool
-check_instruction_reads(uint64_t inst,
- struct vc4_validated_shader_info *validated_shader)
+check_branch(uint64_t inst,
+ struct vc4_validated_shader_info *validated_shader,
+ struct vc4_shader_validation_state *validation_state,
+ int ip)
+{
+ int32_t branch_imm = QPU_GET_FIELD(inst, QPU_BRANCH_TARGET);
+ uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
+ uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
+
+ if ((int)branch_imm < 0)
+ validation_state->needs_uniform_address_for_loop = true;
+
+ /* We don't want to have to worry about validation of this, and
+ * there's no need for it.
+ */
+ if (waddr_add != QPU_W_NOP || waddr_mul != QPU_W_NOP) {
+ DRM_ERROR("branch instruction at %d wrote a register.\n",
+ validation_state->ip);
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+check_instruction_reads(struct vc4_validated_shader_info *validated_shader,
+ struct vc4_shader_validation_state *validation_state)
{
+ uint64_t inst = validation_state->shader[validation_state->ip];
uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
@@ -407,40 +590,204 @@ check_instruction_reads(uint64_t inst,
* already be OOM.
*/
validated_shader->uniforms_size += 4;
+
+ if (validation_state->needs_uniform_address_update) {
+ DRM_ERROR("Uniform read with undefined uniform "
+ "address\n");
+ return false;
+ }
}
return true;
}
+/* Make sure that all branches are absolute and point within the shader, and
+ * note their targets for later.
+ */
+static bool
+vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
+{
+ uint32_t max_branch_target = 0;
+ bool found_shader_end = false;
+ int ip;
+ int shader_end_ip = 0;
+ int last_branch = -2;
+
+ for (ip = 0; ip < validation_state->max_ip; ip++) {
+ uint64_t inst = validation_state->shader[ip];
+ int32_t branch_imm = QPU_GET_FIELD(inst, QPU_BRANCH_TARGET);
+ uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
+ uint32_t after_delay_ip = ip + 4;
+ uint32_t branch_target_ip;
+
+ if (sig == QPU_SIG_PROG_END) {
+ shader_end_ip = ip;
+ found_shader_end = true;
+ continue;
+ }
+
+ if (sig != QPU_SIG_BRANCH)
+ continue;
+
+ if (ip - last_branch < 4) {
+ DRM_ERROR("Branch at %d during delay slots\n", ip);
+ return false;
+ }
+ last_branch = ip;
+
+ if (inst & QPU_BRANCH_REG) {
+ DRM_ERROR("branching from register relative "
+ "not supported\n");
+ return false;
+ }
+
+ if (!(inst & QPU_BRANCH_REL)) {
+ DRM_ERROR("relative branching required\n");
+ return false;
+ }
+
+ /* The actual branch target is the instruction after the delay
+ * slots, plus whatever byte offset is in the low 32 bits of
+ * the instruction. Make sure we're not branching beyond the
+ * end of the shader object.
+ */
+ if (branch_imm % sizeof(inst) != 0) {
+ DRM_ERROR("branch target not aligned\n");
+ return false;
+ }
+
+ branch_target_ip = after_delay_ip + (branch_imm >> 3);
+ if (branch_target_ip >= validation_state->max_ip) {
+ DRM_ERROR("Branch at %d outside of shader (ip %d/%d)\n",
+ ip, branch_target_ip,
+ validation_state->max_ip);
+ return false;
+ }
+ set_bit(branch_target_ip, validation_state->branch_targets);
+
+ /* Make sure that the non-branching path is also not outside
+ * the shader.
+ */
+ if (after_delay_ip >= validation_state->max_ip) {
+ DRM_ERROR("Branch at %d continues past shader end "
+ "(%d/%d)\n",
+ ip, after_delay_ip, validation_state->max_ip);
+ return false;
+ }
+ set_bit(after_delay_ip, validation_state->branch_targets);
+ max_branch_target = max(max_branch_target, after_delay_ip);
+
+ /* There are two delay slots after program end is signaled
+ * that are still executed, then we're finished.
+ */
+ if (found_shader_end && ip == shader_end_ip + 2)
+ break;
+ }
+
+ if (max_branch_target > shader_end_ip) {
+ DRM_ERROR("Branch landed after QPU_SIG_PROG_END");
+ return false;
+ }
+
+ return true;
+}
+
+/* Resets any known state for the shader, used when we may be branched to from
+ * multiple locations in the program (or at shader start).
+ */
+static void
+reset_validation_state(struct vc4_shader_validation_state *validation_state)
+{
+ int i;
+
+ for (i = 0; i < 8; i++)
+ validation_state->tmu_setup[i / 4].p_offset[i % 4] = ~0;
+
+ for (i = 0; i < LIVE_REG_COUNT; i++) {
+ validation_state->live_min_clamp_offsets[i] = ~0;
+ validation_state->live_max_clamp_regs[i] = false;
+ validation_state->live_immediates[i] = ~0;
+ }
+}
+
+static bool
+texturing_in_progress(struct vc4_shader_validation_state *validation_state)
+{
+ return (validation_state->tmu_write_count[0] != 0 ||
+ validation_state->tmu_write_count[1] != 0);
+}
+
+static bool
+vc4_handle_branch_target(struct vc4_shader_validation_state *validation_state)
+{
+ uint32_t ip = validation_state->ip;
+
+ if (!test_bit(ip, validation_state->branch_targets))
+ return true;
+
+ if (texturing_in_progress(validation_state)) {
+ DRM_ERROR("Branch target landed during TMU setup\n");
+ return false;
+ }
+
+ /* Reset our live values tracking, since this instruction may have
+ * multiple predecessors.
+ *
+ * One could potentially do analysis to determine that, for
+ * example, all predecessors have a live max clamp in the same
+ * register, but we don't bother with that.
+ */
+ reset_validation_state(validation_state);
+
+ /* Since we've entered a basic block from potentially multiple
+ * predecessors, we need the uniforms address to be updated before any
+ * unforms are read. We require that after any branch point, the next
+ * uniform to be loaded is a uniform address offset. That uniform's
+ * offset will be marked by the uniform address register write
+ * validation, or a one-off the end-of-program check.
+ */
+ validation_state->needs_uniform_address_update = true;
+
+ return true;
+}
+
struct vc4_validated_shader_info *
vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
{
bool found_shader_end = false;
int shader_end_ip = 0;
- uint32_t ip, max_ip;
- uint64_t *shader;
- struct vc4_validated_shader_info *validated_shader;
+ uint32_t ip;
+ struct vc4_validated_shader_info *validated_shader = NULL;
struct vc4_shader_validation_state validation_state;
- int i;
memset(&validation_state, 0, sizeof(validation_state));
+ validation_state.shader = shader_obj->vaddr;
+ validation_state.max_ip = shader_obj->base.size / sizeof(uint64_t);
- for (i = 0; i < 8; i++)
- validation_state.tmu_setup[i / 4].p_offset[i % 4] = ~0;
- for (i = 0; i < ARRAY_SIZE(validation_state.live_min_clamp_offsets); i++)
- validation_state.live_min_clamp_offsets[i] = ~0;
+ reset_validation_state(&validation_state);
- shader = shader_obj->vaddr;
- max_ip = shader_obj->base.size / sizeof(uint64_t);
+ validation_state.branch_targets =
+ kcalloc(BITS_TO_LONGS(validation_state.max_ip),
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!validation_state.branch_targets)
+ goto fail;
validated_shader = kcalloc(1, sizeof(*validated_shader), GFP_KERNEL);
if (!validated_shader)
- return NULL;
+ goto fail;
+
+ if (!vc4_validate_branches(&validation_state))
+ goto fail;
- for (ip = 0; ip < max_ip; ip++) {
- uint64_t inst = shader[ip];
+ for (ip = 0; ip < validation_state.max_ip; ip++) {
+ uint64_t inst = validation_state.shader[ip];
uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
+ validation_state.ip = ip;
+
+ if (!vc4_handle_branch_target(&validation_state))
+ goto fail;
+
switch (sig) {
case QPU_SIG_NONE:
case QPU_SIG_WAIT_FOR_SCOREBOARD:
@@ -450,13 +797,14 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
case QPU_SIG_LOAD_TMU1:
case QPU_SIG_PROG_END:
case QPU_SIG_SMALL_IMM:
- if (!check_instruction_writes(inst, validated_shader,
+ if (!check_instruction_writes(validated_shader,
&validation_state)) {
DRM_ERROR("Bad write at ip %d\n", ip);
goto fail;
}
- if (!check_instruction_reads(inst, validated_shader))
+ if (!check_instruction_reads(validated_shader,
+ &validation_state))
goto fail;
if (sig == QPU_SIG_PROG_END) {
@@ -467,13 +815,18 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
break;
case QPU_SIG_LOAD_IMM:
- if (!check_instruction_writes(inst, validated_shader,
+ if (!check_instruction_writes(validated_shader,
&validation_state)) {
DRM_ERROR("Bad LOAD_IMM write at ip %d\n", ip);
goto fail;
}
break;
+ case QPU_SIG_BRANCH:
+ if (!check_branch(inst, validated_shader,
+ &validation_state, ip))
+ goto fail;
+ break;
default:
DRM_ERROR("Unsupported QPU signal %d at "
"instruction %d\n", sig, ip);
@@ -487,13 +840,28 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
break;
}
- if (ip == max_ip) {
+ if (ip == validation_state.max_ip) {
DRM_ERROR("shader failed to terminate before "
"shader BO end at %zd\n",
shader_obj->base.size);
goto fail;
}
+ /* If we did a backwards branch and we haven't emitted a uniforms
+ * reset since then, we still need the uniforms stream to have the
+ * uniforms address available so that the backwards branch can do its
+ * uniforms reset.
+ *
+ * We could potentially prove that the backwards branch doesn't
+ * contain any uses of uniforms until program exit, but that doesn't
+ * seem to be worth the trouble.
+ */
+ if (validation_state.needs_uniform_address_for_loop) {
+ if (!require_uniform_address_uniform(validated_shader))
+ goto fail;
+ validated_shader->uniforms_size += 4;
+ }
+
/* Again, no chance of integer overflow here because the worst case
* scenario is 8 bytes of uniforms plus handles per 8-byte
* instruction.
@@ -502,9 +870,12 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
(validated_shader->uniforms_size +
4 * validated_shader->num_texture_samples);
+ kfree(validation_state.branch_targets);
+
return validated_shader;
fail:
+ kfree(validation_state.branch_targets);
if (validated_shader) {
kfree(validated_shader->texture_samples);
kfree(validated_shader);
diff --git a/drivers/gpu/drm/vgem/Makefile b/drivers/gpu/drm/vgem/Makefile
index 3f4c7b842..bfcdea133 100644
--- a/drivers/gpu/drm/vgem/Makefile
+++ b/drivers/gpu/drm/vgem/Makefile
@@ -1,4 +1,4 @@
ccflags-y := -Iinclude/drm
-vgem-y := vgem_drv.o
+vgem-y := vgem_drv.o vgem_fence.o
obj-$(CONFIG_DRM_VGEM) += vgem.o
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 341f9be3d..c15bafb06 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -42,81 +42,38 @@
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
-void vgem_gem_put_pages(struct drm_vgem_gem_object *obj)
-{
- drm_gem_put_pages(&obj->base, obj->pages, false, false);
- obj->pages = NULL;
-}
-
static void vgem_gem_free_object(struct drm_gem_object *obj)
{
struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
- drm_gem_free_mmap_offset(obj);
-
- if (vgem_obj->use_dma_buf && obj->dma_buf) {
- dma_buf_put(obj->dma_buf);
- obj->dma_buf = NULL;
- }
-
drm_gem_object_release(obj);
-
- if (vgem_obj->pages)
- vgem_gem_put_pages(vgem_obj);
-
- vgem_obj->pages = NULL;
-
kfree(vgem_obj);
}
-int vgem_gem_get_pages(struct drm_vgem_gem_object *obj)
-{
- struct page **pages;
-
- if (obj->pages || obj->use_dma_buf)
- return 0;
-
- pages = drm_gem_get_pages(&obj->base);
- if (IS_ERR(pages)) {
- return PTR_ERR(pages);
- }
-
- obj->pages = pages;
-
- return 0;
-}
-
static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct drm_vgem_gem_object *obj = vma->vm_private_data;
- loff_t num_pages;
- pgoff_t page_offset;
- int ret;
-
/* We don't use vmf->pgoff since that has the fake offset */
- page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
- PAGE_SHIFT;
-
- num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE);
-
- if (page_offset > num_pages)
- return VM_FAULT_SIGBUS;
-
- ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address,
- obj->pages[page_offset]);
- switch (ret) {
- case 0:
- return VM_FAULT_NOPAGE;
- case -ENOMEM:
- return VM_FAULT_OOM;
- case -EBUSY:
- return VM_FAULT_RETRY;
- case -EFAULT:
- case -EINVAL:
- return VM_FAULT_SIGBUS;
- default:
- WARN_ON(1);
- return VM_FAULT_SIGBUS;
+ unsigned long vaddr = (unsigned long)vmf->virtual_address;
+ struct page *page;
+
+ page = shmem_read_mapping_page(file_inode(obj->base.filp)->i_mapping,
+ (vaddr - vma->vm_start) >> PAGE_SHIFT);
+ if (!IS_ERR(page)) {
+ vmf->page = page;
+ return 0;
+ } else switch (PTR_ERR(page)) {
+ case -ENOSPC:
+ case -ENOMEM:
+ return VM_FAULT_OOM;
+ case -EBUSY:
+ return VM_FAULT_RETRY;
+ case -EFAULT:
+ case -EINVAL:
+ return VM_FAULT_SIGBUS;
+ default:
+ WARN_ON_ONCE(PTR_ERR(page));
+ return VM_FAULT_SIGBUS;
}
}
@@ -126,6 +83,34 @@ static const struct vm_operations_struct vgem_gem_vm_ops = {
.close = drm_gem_vm_close,
};
+static int vgem_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct vgem_file *vfile;
+ int ret;
+
+ vfile = kzalloc(sizeof(*vfile), GFP_KERNEL);
+ if (!vfile)
+ return -ENOMEM;
+
+ file->driver_priv = vfile;
+
+ ret = vgem_fence_open(vfile);
+ if (ret) {
+ kfree(vfile);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void vgem_preclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct vgem_file *vfile = file->driver_priv;
+
+ vgem_fence_close(vfile);
+ kfree(vfile);
+}
+
/* ioctls */
static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
@@ -134,57 +119,43 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
unsigned long size)
{
struct drm_vgem_gem_object *obj;
- struct drm_gem_object *gem_object;
- int err;
-
- size = roundup(size, PAGE_SIZE);
+ int ret;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj)
return ERR_PTR(-ENOMEM);
- gem_object = &obj->base;
-
- err = drm_gem_object_init(dev, gem_object, size);
- if (err)
- goto out;
-
- err = vgem_gem_get_pages(obj);
- if (err)
- goto out;
-
- err = drm_gem_handle_create(file, gem_object, handle);
- if (err)
- goto handle_out;
+ ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE));
+ if (ret)
+ goto err_free;
- drm_gem_object_unreference_unlocked(gem_object);
+ ret = drm_gem_handle_create(file, &obj->base, handle);
+ drm_gem_object_unreference_unlocked(&obj->base);
+ if (ret)
+ goto err;
- return gem_object;
+ return &obj->base;
-handle_out:
- drm_gem_object_release(gem_object);
-out:
+err_free:
kfree(obj);
- return ERR_PTR(err);
+err:
+ return ERR_PTR(ret);
}
static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
struct drm_gem_object *gem_object;
- uint64_t size;
- uint64_t pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
+ u64 pitch, size;
+ pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
size = args->height * pitch;
if (size == 0)
return -EINVAL;
gem_object = vgem_gem_create(dev, file, &args->handle, size);
-
- if (IS_ERR(gem_object)) {
- DRM_DEBUG_DRIVER("object creation failed\n");
+ if (IS_ERR(gem_object))
return PTR_ERR(gem_object);
- }
args->size = gem_object->size;
args->pitch = pitch;
@@ -194,26 +165,26 @@ static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
return 0;
}
-int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
- uint32_t handle, uint64_t *offset)
+static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset)
{
- int ret = 0;
struct drm_gem_object *obj;
+ int ret;
obj = drm_gem_object_lookup(file, handle);
if (!obj)
return -ENOENT;
+ if (!obj->filp) {
+ ret = -EINVAL;
+ goto unref;
+ }
+
ret = drm_gem_create_mmap_offset(obj);
if (ret)
goto unref;
- BUG_ON(!obj->filp);
-
- obj->filp->private_data = obj;
-
*offset = drm_vma_node_offset_addr(&obj->vma_node);
-
unref:
drm_gem_object_unreference_unlocked(obj);
@@ -221,26 +192,134 @@ unref:
}
static struct drm_ioctl_desc vgem_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
};
+static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ unsigned long flags = vma->vm_flags;
+ int ret;
+
+ ret = drm_gem_mmap(filp, vma);
+ if (ret)
+ return ret;
+
+ /* Keep the WC mmaping set by drm_gem_mmap() but our pages
+ * are ordinary and not special.
+ */
+ vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP;
+ return 0;
+}
+
static const struct file_operations vgem_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
- .mmap = drm_gem_mmap,
+ .mmap = vgem_mmap,
.poll = drm_poll,
.read = drm_read,
.unlocked_ioctl = drm_ioctl,
.release = drm_release,
};
+static int vgem_prime_pin(struct drm_gem_object *obj)
+{
+ long n_pages = obj->size >> PAGE_SHIFT;
+ struct page **pages;
+
+ /* Flush the object from the CPU cache so that importers can rely
+ * on coherent indirect access via the exported dma-address.
+ */
+ pages = drm_gem_get_pages(obj);
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
+
+ drm_clflush_pages(pages, n_pages);
+ drm_gem_put_pages(obj, pages, true, false);
+
+ return 0;
+}
+
+static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+ struct sg_table *st;
+ struct page **pages;
+
+ pages = drm_gem_get_pages(obj);
+ if (IS_ERR(pages))
+ return ERR_CAST(pages);
+
+ st = drm_prime_pages_to_sg(pages, obj->size >> PAGE_SHIFT);
+ drm_gem_put_pages(obj, pages, false, false);
+
+ return st;
+}
+
+static void *vgem_prime_vmap(struct drm_gem_object *obj)
+{
+ long n_pages = obj->size >> PAGE_SHIFT;
+ struct page **pages;
+ void *addr;
+
+ pages = drm_gem_get_pages(obj);
+ if (IS_ERR(pages))
+ return NULL;
+
+ addr = vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
+ drm_gem_put_pages(obj, pages, false, false);
+
+ return addr;
+}
+
+static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+ vunmap(vaddr);
+}
+
+static int vgem_prime_mmap(struct drm_gem_object *obj,
+ struct vm_area_struct *vma)
+{
+ int ret;
+
+ if (obj->size < vma->vm_end - vma->vm_start)
+ return -EINVAL;
+
+ if (!obj->filp)
+ return -ENODEV;
+
+ ret = obj->filp->f_op->mmap(obj->filp, vma);
+ if (ret)
+ return ret;
+
+ fput(vma->vm_file);
+ vma->vm_file = get_file(obj->filp);
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+
+ return 0;
+}
+
static struct drm_driver vgem_driver = {
- .driver_features = DRIVER_GEM,
- .gem_free_object = vgem_gem_free_object,
+ .driver_features = DRIVER_GEM | DRIVER_PRIME,
+ .open = vgem_open,
+ .preclose = vgem_preclose,
+ .gem_free_object_unlocked = vgem_gem_free_object,
.gem_vm_ops = &vgem_gem_vm_ops,
.ioctls = vgem_ioctls,
+ .num_ioctls = ARRAY_SIZE(vgem_ioctls),
.fops = &vgem_driver_fops,
+
.dumb_create = vgem_gem_dumb_create,
.dumb_map_offset = vgem_gem_dumb_map,
+
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .gem_prime_pin = vgem_prime_pin,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_get_sg_table = vgem_prime_get_sg_table,
+ .gem_prime_vmap = vgem_prime_vmap,
+ .gem_prime_vunmap = vgem_prime_vunmap,
+ .gem_prime_mmap = vgem_prime_mmap,
+
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
@@ -248,7 +327,7 @@ static struct drm_driver vgem_driver = {
.minor = DRIVER_MINOR,
};
-struct drm_device *vgem_device;
+static struct drm_device *vgem_device;
static int __init vgem_init(void)
{
@@ -260,10 +339,7 @@ static int __init vgem_init(void)
goto out;
}
- drm_dev_set_unique(vgem_device, "vgem");
-
ret = drm_dev_register(vgem_device, 0);
-
if (ret)
goto out_unref;
@@ -285,5 +361,6 @@ module_init(vgem_init);
module_exit(vgem_exit);
MODULE_AUTHOR("Red Hat, Inc.");
+MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/vgem/vgem_drv.h b/drivers/gpu/drm/vgem/vgem_drv.h
index e9f92f7ee..1f8798ad3 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.h
+++ b/drivers/gpu/drm/vgem/vgem_drv.h
@@ -32,15 +32,25 @@
#include <drm/drmP.h>
#include <drm/drm_gem.h>
+#include <uapi/drm/vgem_drm.h>
+
+struct vgem_file {
+ struct idr fence_idr;
+ struct mutex fence_mutex;
+};
+
#define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base)
struct drm_vgem_gem_object {
struct drm_gem_object base;
- struct page **pages;
- bool use_dma_buf;
};
-/* vgem_drv.c */
-extern void vgem_gem_put_pages(struct drm_vgem_gem_object *obj);
-extern int vgem_gem_get_pages(struct drm_vgem_gem_object *obj);
+int vgem_fence_open(struct vgem_file *file);
+int vgem_fence_attach_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *file);
+int vgem_fence_signal_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *file);
+void vgem_fence_close(struct vgem_file *file);
#endif
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
new file mode 100644
index 000000000..5c57c1ffa
--- /dev/null
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -0,0 +1,283 @@
+/*
+ * Copyright 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software")
+ * to deal in the software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * them Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTIBILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/reservation.h>
+
+#include "vgem_drv.h"
+
+#define VGEM_FENCE_TIMEOUT (10*HZ)
+
+struct vgem_fence {
+ struct fence base;
+ struct spinlock lock;
+ struct timer_list timer;
+};
+
+static const char *vgem_fence_get_driver_name(struct fence *fence)
+{
+ return "vgem";
+}
+
+static const char *vgem_fence_get_timeline_name(struct fence *fence)
+{
+ return "unbound";
+}
+
+static bool vgem_fence_signaled(struct fence *fence)
+{
+ return false;
+}
+
+static bool vgem_fence_enable_signaling(struct fence *fence)
+{
+ return true;
+}
+
+static void vgem_fence_release(struct fence *base)
+{
+ struct vgem_fence *fence = container_of(base, typeof(*fence), base);
+
+ del_timer_sync(&fence->timer);
+ fence_free(&fence->base);
+}
+
+static void vgem_fence_value_str(struct fence *fence, char *str, int size)
+{
+ snprintf(str, size, "%u", fence->seqno);
+}
+
+static void vgem_fence_timeline_value_str(struct fence *fence, char *str,
+ int size)
+{
+ snprintf(str, size, "%u", fence_is_signaled(fence) ? fence->seqno : 0);
+}
+
+static const struct fence_ops vgem_fence_ops = {
+ .get_driver_name = vgem_fence_get_driver_name,
+ .get_timeline_name = vgem_fence_get_timeline_name,
+ .enable_signaling = vgem_fence_enable_signaling,
+ .signaled = vgem_fence_signaled,
+ .wait = fence_default_wait,
+ .release = vgem_fence_release,
+
+ .fence_value_str = vgem_fence_value_str,
+ .timeline_value_str = vgem_fence_timeline_value_str,
+};
+
+static void vgem_fence_timeout(unsigned long data)
+{
+ struct vgem_fence *fence = (struct vgem_fence *)data;
+
+ fence_signal(&fence->base);
+}
+
+static struct fence *vgem_fence_create(struct vgem_file *vfile,
+ unsigned int flags)
+{
+ struct vgem_fence *fence;
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence)
+ return NULL;
+
+ spin_lock_init(&fence->lock);
+ fence_init(&fence->base, &vgem_fence_ops, &fence->lock,
+ fence_context_alloc(1), 1);
+
+ setup_timer(&fence->timer, vgem_fence_timeout, (unsigned long)fence);
+
+ /* We force the fence to expire within 10s to prevent driver hangs */
+ mod_timer(&fence->timer, jiffies + VGEM_FENCE_TIMEOUT);
+
+ return &fence->base;
+}
+
+static int attach_dmabuf(struct drm_device *dev,
+ struct drm_gem_object *obj)
+{
+ struct dma_buf *dmabuf;
+
+ if (obj->dma_buf)
+ return 0;
+
+ dmabuf = dev->driver->gem_prime_export(dev, obj, 0);
+ if (IS_ERR(dmabuf))
+ return PTR_ERR(dmabuf);
+
+ obj->dma_buf = dmabuf;
+ drm_gem_object_reference(obj);
+ return 0;
+}
+
+/*
+ * vgem_fence_attach_ioctl (DRM_IOCTL_VGEM_FENCE_ATTACH):
+ *
+ * Create and attach a fence to the vGEM handle. This fence is then exposed
+ * via the dma-buf reservation object and visible to consumers of the exported
+ * dma-buf. If the flags contain VGEM_FENCE_WRITE, the fence indicates the
+ * vGEM buffer is being written to by the client and is exposed as an exclusive
+ * fence, otherwise the fence indicates the client is current reading from the
+ * buffer and all future writes should wait for the client to signal its
+ * completion. Note that if a conflicting fence is already on the dma-buf (i.e.
+ * an exclusive fence when adding a read, or any fence when adding a write),
+ * -EBUSY is reported. Serialisation between operations should be handled
+ * by waiting upon the dma-buf.
+ *
+ * This returns the handle for the new fence that must be signaled within 10
+ * seconds (or otherwise it will automatically expire). See
+ * vgem_fence_signal_ioctl (DRM_IOCTL_VGEM_FENCE_SIGNAL).
+ *
+ * If the vGEM handle does not exist, vgem_fence_attach_ioctl returns -ENOENT.
+ */
+int vgem_fence_attach_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *file)
+{
+ struct drm_vgem_fence_attach *arg = data;
+ struct vgem_file *vfile = file->driver_priv;
+ struct reservation_object *resv;
+ struct drm_gem_object *obj;
+ struct fence *fence;
+ int ret;
+
+ if (arg->flags & ~VGEM_FENCE_WRITE)
+ return -EINVAL;
+
+ if (arg->pad)
+ return -EINVAL;
+
+ obj = drm_gem_object_lookup(file, arg->handle);
+ if (!obj)
+ return -ENOENT;
+
+ ret = attach_dmabuf(dev, obj);
+ if (ret)
+ goto err;
+
+ fence = vgem_fence_create(vfile, arg->flags);
+ if (!fence) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ /* Check for a conflicting fence */
+ resv = obj->dma_buf->resv;
+ if (!reservation_object_test_signaled_rcu(resv,
+ arg->flags & VGEM_FENCE_WRITE)) {
+ ret = -EBUSY;
+ goto err_fence;
+ }
+
+ /* Expose the fence via the dma-buf */
+ ret = 0;
+ mutex_lock(&resv->lock.base);
+ if (arg->flags & VGEM_FENCE_WRITE)
+ reservation_object_add_excl_fence(resv, fence);
+ else if ((ret = reservation_object_reserve_shared(resv)) == 0)
+ reservation_object_add_shared_fence(resv, fence);
+ mutex_unlock(&resv->lock.base);
+
+ /* Record the fence in our idr for later signaling */
+ if (ret == 0) {
+ mutex_lock(&vfile->fence_mutex);
+ ret = idr_alloc(&vfile->fence_idr, fence, 1, 0, GFP_KERNEL);
+ mutex_unlock(&vfile->fence_mutex);
+ if (ret > 0) {
+ arg->out_fence = ret;
+ ret = 0;
+ }
+ }
+err_fence:
+ if (ret) {
+ fence_signal(fence);
+ fence_put(fence);
+ }
+err:
+ drm_gem_object_unreference_unlocked(obj);
+ return ret;
+}
+
+/*
+ * vgem_fence_signal_ioctl (DRM_IOCTL_VGEM_FENCE_SIGNAL):
+ *
+ * Signal and consume a fence ealier attached to a vGEM handle using
+ * vgem_fence_attach_ioctl (DRM_IOCTL_VGEM_FENCE_ATTACH).
+ *
+ * All fences must be signaled within 10s of attachment or otherwise they
+ * will automatically expire (and a vgem_fence_signal_ioctl returns -ETIMEDOUT).
+ *
+ * Signaling a fence indicates to all consumers of the dma-buf that the
+ * client has completed the operation associated with the fence, and that the
+ * buffer is then ready for consumption.
+ *
+ * If the fence does not exist (or has already been signaled by the client),
+ * vgem_fence_signal_ioctl returns -ENOENT.
+ */
+int vgem_fence_signal_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *file)
+{
+ struct vgem_file *vfile = file->driver_priv;
+ struct drm_vgem_fence_signal *arg = data;
+ struct fence *fence;
+ int ret = 0;
+
+ if (arg->flags)
+ return -EINVAL;
+
+ mutex_lock(&vfile->fence_mutex);
+ fence = idr_replace(&vfile->fence_idr, NULL, arg->fence);
+ mutex_unlock(&vfile->fence_mutex);
+ if (!fence)
+ return -ENOENT;
+ if (IS_ERR(fence))
+ return PTR_ERR(fence);
+
+ if (fence_is_signaled(fence))
+ ret = -ETIMEDOUT;
+
+ fence_signal(fence);
+ fence_put(fence);
+ return ret;
+}
+
+int vgem_fence_open(struct vgem_file *vfile)
+{
+ mutex_init(&vfile->fence_mutex);
+ idr_init(&vfile->fence_idr);
+
+ return 0;
+}
+
+static int __vgem_fence_idr_fini(int id, void *p, void *data)
+{
+ fence_signal(p);
+ fence_put(p);
+ return 0;
+}
+
+void vgem_fence_close(struct vgem_file *vfile)
+{
+ idr_for_each(&vfile->fence_idr, __vgem_fence_idr_fini, vfile);
+ idr_destroy(&vfile->fence_idr);
+}
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index 4f20742e7..a04ef1c99 100644
--- a/drivers/gpu/drm/via/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -208,7 +208,7 @@ void via_reclaim_buffers_locked(struct drm_device *dev,
struct via_file_private *file_priv = file->driver_priv;
struct via_memblock *entry, *next;
- if (!(file->minor->master && file->master->lock.hw_lock))
+ if (!(dev->master && file->master->lock.hw_lock))
return;
drm_legacy_idlelock_take(&file->master->lock);
diff --git a/drivers/gpu/drm/virtio/Kconfig b/drivers/gpu/drm/virtio/Kconfig
index 9983eadb8..e1afc3d3f 100644
--- a/drivers/gpu/drm/virtio/Kconfig
+++ b/drivers/gpu/drm/virtio/Kconfig
@@ -1,11 +1,7 @@
config DRM_VIRTIO_GPU
tristate "Virtio GPU driver"
depends on DRM && VIRTIO
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_TTM
help
This is the virtual GPU driver for virtio. It can be used with
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index d4305da88..4e192aa2d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -29,8 +29,8 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic_helper.h>
-#define XRES_MIN 320
-#define YRES_MIN 200
+#define XRES_MIN 32
+#define YRES_MIN 32
#define XRES_DEF 1024
#define YRES_DEF 768
@@ -38,138 +38,11 @@
#define XRES_MAX 8192
#define YRES_MAX 8192
-static void
-virtio_gpu_hide_cursor(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_output *output)
-{
- output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
- output->cursor.resource_id = 0;
- virtio_gpu_cursor_ping(vgdev, output);
-}
-
-static int virtio_gpu_crtc_cursor_set(struct drm_crtc *crtc,
- struct drm_file *file_priv,
- uint32_t handle,
- uint32_t width,
- uint32_t height,
- int32_t hot_x, int32_t hot_y)
-{
- struct virtio_gpu_device *vgdev = crtc->dev->dev_private;
- struct virtio_gpu_output *output =
- container_of(crtc, struct virtio_gpu_output, crtc);
- struct drm_gem_object *gobj = NULL;
- struct virtio_gpu_object *qobj = NULL;
- struct virtio_gpu_fence *fence = NULL;
- int ret = 0;
-
- if (handle == 0) {
- virtio_gpu_hide_cursor(vgdev, output);
- return 0;
- }
-
- /* lookup the cursor */
- gobj = drm_gem_object_lookup(file_priv, handle);
- if (gobj == NULL)
- return -ENOENT;
-
- qobj = gem_to_virtio_gpu_obj(gobj);
-
- if (!qobj->hw_res_handle) {
- ret = -EINVAL;
- goto out;
- }
-
- virtio_gpu_cmd_transfer_to_host_2d(vgdev, qobj->hw_res_handle, 0,
- cpu_to_le32(64),
- cpu_to_le32(64),
- 0, 0, &fence);
- ret = virtio_gpu_object_reserve(qobj, false);
- if (!ret) {
- reservation_object_add_excl_fence(qobj->tbo.resv,
- &fence->f);
- fence_put(&fence->f);
- virtio_gpu_object_unreserve(qobj);
- virtio_gpu_object_wait(qobj, false);
- }
-
- output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
- output->cursor.resource_id = cpu_to_le32(qobj->hw_res_handle);
- output->cursor.hot_x = cpu_to_le32(hot_x);
- output->cursor.hot_y = cpu_to_le32(hot_y);
- virtio_gpu_cursor_ping(vgdev, output);
- ret = 0;
-
-out:
- drm_gem_object_unreference_unlocked(gobj);
- return ret;
-}
-
-static int virtio_gpu_crtc_cursor_move(struct drm_crtc *crtc,
- int x, int y)
-{
- struct virtio_gpu_device *vgdev = crtc->dev->dev_private;
- struct virtio_gpu_output *output =
- container_of(crtc, struct virtio_gpu_output, crtc);
-
- output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR);
- output->cursor.pos.x = cpu_to_le32(x);
- output->cursor.pos.y = cpu_to_le32(y);
- virtio_gpu_cursor_ping(vgdev, output);
- return 0;
-}
-
-static int virtio_gpu_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t flags)
-{
- struct virtio_gpu_device *vgdev = crtc->dev->dev_private;
- struct virtio_gpu_output *output =
- container_of(crtc, struct virtio_gpu_output, crtc);
- struct drm_plane *plane = crtc->primary;
- struct virtio_gpu_framebuffer *vgfb;
- struct virtio_gpu_object *bo;
- unsigned long irqflags;
- uint32_t handle;
-
- plane->fb = fb;
- vgfb = to_virtio_gpu_framebuffer(plane->fb);
- bo = gem_to_virtio_gpu_obj(vgfb->obj);
- handle = bo->hw_res_handle;
-
- DRM_DEBUG("handle 0x%x%s, crtc %dx%d\n", handle,
- bo->dumb ? ", dumb" : "",
- crtc->mode.hdisplay, crtc->mode.vdisplay);
- if (bo->dumb) {
- virtio_gpu_cmd_transfer_to_host_2d
- (vgdev, handle, 0,
- cpu_to_le32(crtc->mode.hdisplay),
- cpu_to_le32(crtc->mode.vdisplay),
- 0, 0, NULL);
- }
- virtio_gpu_cmd_set_scanout(vgdev, output->index, handle,
- crtc->mode.hdisplay,
- crtc->mode.vdisplay, 0, 0);
- virtio_gpu_cmd_resource_flush(vgdev, handle, 0, 0,
- crtc->mode.hdisplay,
- crtc->mode.vdisplay);
-
- if (event) {
- spin_lock_irqsave(&crtc->dev->event_lock, irqflags);
- drm_send_vblank_event(crtc->dev, -1, event);
- spin_unlock_irqrestore(&crtc->dev->event_lock, irqflags);
- }
-
- return 0;
-}
-
static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = {
- .cursor_set2 = virtio_gpu_crtc_cursor_set,
- .cursor_move = virtio_gpu_crtc_cursor_move,
.set_config = drm_atomic_helper_set_config,
.destroy = drm_crtc_cleanup,
- .page_flip = virtio_gpu_page_flip,
+ .page_flip = drm_atomic_helper_page_flip,
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
@@ -180,8 +53,7 @@ static void virtio_gpu_user_framebuffer_destroy(struct drm_framebuffer *fb)
struct virtio_gpu_framebuffer *virtio_gpu_fb
= to_virtio_gpu_framebuffer(fb);
- if (virtio_gpu_fb->obj)
- drm_gem_object_unreference_unlocked(virtio_gpu_fb->obj);
+ drm_gem_object_unreference_unlocked(virtio_gpu_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(virtio_gpu_fb);
}
@@ -267,6 +139,7 @@ static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
spin_lock_irqsave(&crtc->dev->event_lock, flags);
if (crtc->state->event)
drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
}
@@ -341,15 +214,6 @@ static int virtio_gpu_conn_mode_valid(struct drm_connector *connector,
return MODE_BAD;
}
-static struct drm_encoder*
-virtio_gpu_best_encoder(struct drm_connector *connector)
-{
- struct virtio_gpu_output *virtio_gpu_output =
- drm_connector_to_virtio_gpu_output(connector);
-
- return &virtio_gpu_output->enc;
-}
-
static const struct drm_encoder_helper_funcs virtio_gpu_enc_helper_funcs = {
.mode_set = virtio_gpu_enc_mode_set,
.enable = virtio_gpu_enc_enable,
@@ -359,7 +223,6 @@ static const struct drm_encoder_helper_funcs virtio_gpu_enc_helper_funcs = {
static const struct drm_connector_helper_funcs virtio_gpu_conn_helper_funcs = {
.get_modes = virtio_gpu_conn_get_modes,
.mode_valid = virtio_gpu_conn_mode_valid,
- .best_encoder = virtio_gpu_best_encoder,
};
static enum drm_connector_status virtio_gpu_conn_detect(
@@ -406,7 +269,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
struct drm_connector *connector = &output->conn;
struct drm_encoder *encoder = &output->enc;
struct drm_crtc *crtc = &output->crtc;
- struct drm_plane *plane;
+ struct drm_plane *primary, *cursor;
output->index = index;
if (index == 0) {
@@ -415,13 +278,17 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
output->info.r.height = cpu_to_le32(YRES_DEF);
}
- plane = virtio_gpu_plane_init(vgdev, index);
- if (IS_ERR(plane))
- return PTR_ERR(plane);
- drm_crtc_init_with_planes(dev, crtc, plane, NULL,
+ primary = virtio_gpu_plane_init(vgdev, DRM_PLANE_TYPE_PRIMARY, index);
+ if (IS_ERR(primary))
+ return PTR_ERR(primary);
+ cursor = virtio_gpu_plane_init(vgdev, DRM_PLANE_TYPE_CURSOR, index);
+ if (IS_ERR(cursor))
+ return PTR_ERR(cursor);
+ drm_crtc_init_with_planes(dev, crtc, primary, cursor,
&virtio_gpu_crtc_funcs, NULL);
drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs);
- plane->crtc = crtc;
+ primary->crtc = crtc;
+ cursor->crtc = crtc;
drm_connector_init(dev, connector, &virtio_gpu_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
@@ -458,14 +325,31 @@ virtio_gpu_user_framebuffer_create(struct drm_device *dev,
ret = virtio_gpu_framebuffer_init(dev, virtio_gpu_fb, mode_cmd, obj);
if (ret) {
kfree(virtio_gpu_fb);
- if (obj)
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_unreference_unlocked(obj);
return NULL;
}
return &virtio_gpu_fb->base;
}
+static void vgdev_atomic_commit_tail(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+
+ drm_atomic_helper_commit_modeset_disables(dev, state);
+ drm_atomic_helper_commit_modeset_enables(dev, state);
+ drm_atomic_helper_commit_planes(dev, state, true);
+
+ drm_atomic_helper_commit_hw_done(state);
+
+ drm_atomic_helper_wait_for_vblanks(dev, state);
+ drm_atomic_helper_cleanup_planes(dev, state);
+}
+
+static struct drm_mode_config_helper_funcs virtio_mode_config_helpers = {
+ .atomic_commit_tail = vgdev_atomic_commit_tail,
+};
+
static const struct drm_mode_config_funcs virtio_gpu_mode_funcs = {
.fb_create = virtio_gpu_user_framebuffer_create,
.atomic_check = drm_atomic_helper_check,
@@ -477,7 +361,8 @@ int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev)
int i;
drm_mode_config_init(vgdev->ddev);
- vgdev->ddev->mode_config.funcs = (void *)&virtio_gpu_mode_funcs;
+ vgdev->ddev->mode_config.funcs = &virtio_gpu_mode_funcs;
+ vgdev->ddev->mode_config.helper_private = &virtio_mode_config_helpers;
/* modes will be validated against the framebuffer size */
vgdev->ddev->mode_config.min_width = XRES_MIN;
diff --git a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
index 88a39165e..7f0e93f87 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
@@ -27,16 +27,6 @@
#include "virtgpu_drv.h"
-int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master)
-{
- struct pci_dev *pdev = dev->pdev;
-
- if (pdev) {
- return drm_pci_set_busid(dev, master);
- }
- return 0;
-}
-
static void virtio_pci_kick_out_firmware_fb(struct pci_dev *pci_dev)
{
struct apertures_struct *ap;
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 3cc7afa77..c13f70cfc 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -117,7 +117,6 @@ static const struct file_operations virtio_gpu_driver_fops = {
static struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC,
- .set_busid = drm_virtio_set_busid,
.load = virtio_gpu_driver_load,
.unload = virtio_gpu_driver_unload,
.open = virtio_gpu_driver_open,
@@ -143,7 +142,7 @@ static struct drm_driver driver = {
.gem_prime_vunmap = virtgpu_gem_prime_vunmap,
.gem_prime_mmap = virtgpu_gem_prime_mmap,
- .gem_free_object = virtio_gpu_gem_free_object,
+ .gem_free_object_unlocked = virtio_gpu_gem_free_object,
.gem_open_object = virtio_gpu_gem_object_open,
.gem_close_object = virtio_gpu_gem_object_close,
.fops = &virtio_gpu_driver_fops,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 0a54f43f8..b18ef3111 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -33,6 +33,7 @@
#include <drm/drmP.h>
#include <drm/drm_gem.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_crtc_helper.h>
#include <ttm/ttm_bo_api.h>
#include <ttm/ttm_bo_driver.h>
@@ -48,7 +49,6 @@
#define DRIVER_PATCHLEVEL 1
/* virtgpu_drm_bus.c */
-int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master);
int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev);
struct virtio_gpu_object {
@@ -335,6 +335,7 @@ void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev);
/* virtio_gpu_plane.c */
struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
+ enum drm_plane_type type,
int index);
/* virtio_gpu_ttm.c */
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 70b44a234..925ca2520 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -38,6 +38,10 @@ static const uint32_t virtio_gpu_formats[] = {
DRM_FORMAT_ABGR8888,
};
+static const uint32_t virtio_gpu_cursor_formats[] = {
+ DRM_FORMAT_ARGB8888,
+};
+
static void virtio_gpu_plane_destroy(struct drm_plane *plane)
{
kfree(plane);
@@ -58,16 +62,22 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
return 0;
}
-static void virtio_gpu_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
{
struct drm_device *dev = plane->dev;
struct virtio_gpu_device *vgdev = dev->dev_private;
- struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(plane->crtc);
+ struct virtio_gpu_output *output = NULL;
struct virtio_gpu_framebuffer *vgfb;
struct virtio_gpu_object *bo;
uint32_t handle;
+ if (plane->state->crtc)
+ output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
+ if (old_state->crtc)
+ output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
+ WARN_ON(!output);
+
if (plane->state->fb) {
vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
bo = gem_to_virtio_gpu_obj(vgfb->obj);
@@ -75,55 +85,149 @@ static void virtio_gpu_plane_atomic_update(struct drm_plane *plane,
if (bo->dumb) {
virtio_gpu_cmd_transfer_to_host_2d
(vgdev, handle, 0,
- cpu_to_le32(plane->state->crtc_w),
- cpu_to_le32(plane->state->crtc_h),
- plane->state->crtc_x, plane->state->crtc_y, NULL);
+ cpu_to_le32(plane->state->src_w >> 16),
+ cpu_to_le32(plane->state->src_h >> 16),
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16, NULL);
}
} else {
handle = 0;
}
- DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d\n", handle,
+ DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n", handle,
plane->state->crtc_w, plane->state->crtc_h,
- plane->state->crtc_x, plane->state->crtc_y);
+ plane->state->crtc_x, plane->state->crtc_y,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16,
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16);
virtio_gpu_cmd_set_scanout(vgdev, output->index, handle,
- plane->state->crtc_w,
- plane->state->crtc_h,
- plane->state->crtc_x,
- plane->state->crtc_y);
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16,
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16);
virtio_gpu_cmd_resource_flush(vgdev, handle,
- plane->state->crtc_x,
- plane->state->crtc_y,
- plane->state->crtc_w,
- plane->state->crtc_h);
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16);
}
+static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct drm_device *dev = plane->dev;
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_output *output = NULL;
+ struct virtio_gpu_framebuffer *vgfb;
+ struct virtio_gpu_fence *fence = NULL;
+ struct virtio_gpu_object *bo = NULL;
+ uint32_t handle;
+ int ret = 0;
-static const struct drm_plane_helper_funcs virtio_gpu_plane_helper_funcs = {
+ if (plane->state->crtc)
+ output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
+ if (old_state->crtc)
+ output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
+ WARN_ON(!output);
+
+ if (plane->state->fb) {
+ vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
+ bo = gem_to_virtio_gpu_obj(vgfb->obj);
+ handle = bo->hw_res_handle;
+ } else {
+ handle = 0;
+ }
+
+ if (bo && bo->dumb && (plane->state->fb != old_state->fb)) {
+ /* new cursor -- update & wait */
+ virtio_gpu_cmd_transfer_to_host_2d
+ (vgdev, handle, 0,
+ cpu_to_le32(plane->state->crtc_w),
+ cpu_to_le32(plane->state->crtc_h),
+ 0, 0, &fence);
+ ret = virtio_gpu_object_reserve(bo, false);
+ if (!ret) {
+ reservation_object_add_excl_fence(bo->tbo.resv,
+ &fence->f);
+ fence_put(&fence->f);
+ fence = NULL;
+ virtio_gpu_object_unreserve(bo);
+ virtio_gpu_object_wait(bo, false);
+ }
+ }
+
+ if (plane->state->fb != old_state->fb) {
+ DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle,
+ plane->state->crtc_x,
+ plane->state->crtc_y,
+ plane->state->fb ? plane->state->fb->hot_x : 0,
+ plane->state->fb ? plane->state->fb->hot_y : 0);
+ output->cursor.hdr.type =
+ cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
+ output->cursor.resource_id = cpu_to_le32(handle);
+ if (plane->state->fb) {
+ output->cursor.hot_x =
+ cpu_to_le32(plane->state->fb->hot_x);
+ output->cursor.hot_y =
+ cpu_to_le32(plane->state->fb->hot_y);
+ } else {
+ output->cursor.hot_x = cpu_to_le32(0);
+ output->cursor.hot_y = cpu_to_le32(0);
+ }
+ } else {
+ DRM_DEBUG("move +%d+%d\n",
+ plane->state->crtc_x,
+ plane->state->crtc_y);
+ output->cursor.hdr.type =
+ cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR);
+ }
+ output->cursor.pos.x = cpu_to_le32(plane->state->crtc_x);
+ output->cursor.pos.y = cpu_to_le32(plane->state->crtc_y);
+ virtio_gpu_cursor_ping(vgdev, output);
+}
+
+static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = {
+ .atomic_check = virtio_gpu_plane_atomic_check,
+ .atomic_update = virtio_gpu_primary_plane_update,
+};
+
+static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = {
.atomic_check = virtio_gpu_plane_atomic_check,
- .atomic_update = virtio_gpu_plane_atomic_update,
+ .atomic_update = virtio_gpu_cursor_plane_update,
};
struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
+ enum drm_plane_type type,
int index)
{
struct drm_device *dev = vgdev->ddev;
+ const struct drm_plane_helper_funcs *funcs;
struct drm_plane *plane;
- int ret;
+ const uint32_t *formats;
+ int ret, nformats;
plane = kzalloc(sizeof(*plane), GFP_KERNEL);
if (!plane)
return ERR_PTR(-ENOMEM);
+ if (type == DRM_PLANE_TYPE_CURSOR) {
+ formats = virtio_gpu_cursor_formats;
+ nformats = ARRAY_SIZE(virtio_gpu_cursor_formats);
+ funcs = &virtio_gpu_cursor_helper_funcs;
+ } else {
+ formats = virtio_gpu_formats;
+ nformats = ARRAY_SIZE(virtio_gpu_formats);
+ funcs = &virtio_gpu_primary_helper_funcs;
+ }
ret = drm_universal_plane_init(dev, plane, 1 << index,
&virtio_gpu_plane_funcs,
- virtio_gpu_formats,
- ARRAY_SIZE(virtio_gpu_formats),
- DRM_PLANE_TYPE_PRIMARY, NULL);
+ formats, nformats,
+ type, NULL);
if (ret)
goto err_plane_init;
- drm_plane_helper_add(plane, &virtio_gpu_plane_helper_funcs);
+ drm_plane_helper_add(plane, funcs);
return plane;
err_plane_init:
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
index a05808156..80482ac5f 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c
@@ -375,6 +375,12 @@ static int virtio_gpu_bo_move(struct ttm_buffer_object *bo,
bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
+ int ret;
+
+ ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
+ if (ret)
+ return ret;
+
virtio_gpu_move_null(bo, new_mem);
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 67cebb23c..aa04fb015 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -293,13 +293,10 @@ static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
struct vmw_cmdbuf_man *man = header->man;
u32 val;
- if (sizeof(header->handle) > 4)
- val = (header->handle >> 32);
- else
- val = 0;
+ val = upper_32_bits(header->handle);
vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
- val = (header->handle & 0xFFFFFFFFULL);
+ val = lower_32_bits(header->handle);
val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 8d528fcf6..e8ae3dc47 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1053,15 +1053,14 @@ static struct vmw_master *vmw_master_check(struct drm_device *dev,
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
struct vmw_master *vmaster;
- if (file_priv->minor->type != DRM_MINOR_LEGACY ||
- !(flags & DRM_AUTH))
+ if (!drm_is_primary_client(file_priv) || !(flags & DRM_AUTH))
return NULL;
ret = mutex_lock_interruptible(&dev->master_mutex);
if (unlikely(ret != 0))
return ERR_PTR(-ERESTARTSYS);
- if (file_priv->is_master) {
+ if (drm_is_current_master(file_priv)) {
mutex_unlock(&dev->master_mutex);
return NULL;
}
@@ -1240,8 +1239,7 @@ static int vmw_master_set(struct drm_device *dev,
}
static void vmw_master_drop(struct drm_device *dev,
- struct drm_file *file_priv,
- bool from_release)
+ struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 89fb19443..74304b03f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -32,6 +32,7 @@
#include <drm/drmP.h>
#include <drm/vmwgfx_drm.h>
#include <drm/drm_hashtab.h>
+#include <drm/drm_auth.h>
#include <linux/suspend.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_object.h>
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 1a1a87cbf..dc5beff2b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3625,9 +3625,7 @@ static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
(sw_context->cmd_bounce_size >> 1));
}
- if (sw_context->cmd_bounce != NULL)
- vfree(sw_context->cmd_bounce);
-
+ vfree(sw_context->cmd_bounce);
sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
if (sw_context->cmd_bounce == NULL) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index e959df6ed..26ac8e80a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -46,7 +46,7 @@ struct vmw_fence_manager {
bool goal_irq_on; /* Protected by @goal_irq_mutex */
bool seqno_valid; /* Protected by @lock, and may not be set to true
without the @goal_irq_mutex held. */
- unsigned ctx;
+ u64 ctx;
};
struct vmw_user_fence {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index e29da45a2..bf28ccc15 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1404,9 +1404,9 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
return 0;
}
-void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
- u16 *r, u16 *g, u16 *b,
- uint32_t start, uint32_t size)
+int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
+ u16 *r, u16 *g, u16 *b,
+ uint32_t size)
{
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
int i;
@@ -1418,6 +1418,8 @@ void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
}
+
+ return 0;
}
int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 57203212c..ff4803c10 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -195,9 +195,9 @@ struct vmw_display_unit {
void vmw_du_cleanup(struct vmw_display_unit *du);
void vmw_du_crtc_save(struct drm_crtc *crtc);
void vmw_du_crtc_restore(struct drm_crtc *crtc);
-void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
+int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
u16 *r, u16 *g, u16 *b,
- uint32_t start, uint32_t size);
+ uint32_t size);
int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t handle, uint32_t width, uint32_t height,
int32_t hot_x, int32_t hot_y);
diff --git a/drivers/gpu/host1x/cdma.c b/drivers/gpu/host1x/cdma.c
index a18db4d53..c5d82a8a2 100644
--- a/drivers/gpu/host1x/cdma.c
+++ b/drivers/gpu/host1x/cdma.c
@@ -96,12 +96,12 @@ fail:
*/
static void host1x_pushbuffer_push(struct push_buffer *pb, u32 op1, u32 op2)
{
- u32 pos = pb->pos;
- u32 *p = (u32 *)((void *)pb->mapped + pos);
- WARN_ON(pos == pb->fence);
+ u32 *p = (u32 *)((void *)pb->mapped + pb->pos);
+
+ WARN_ON(pb->pos == pb->fence);
*(p++) = op1;
*(p++) = op2;
- pb->pos = (pos + 8) & (pb->size_bytes - 1);
+ pb->pos = (pb->pos + 8) & (pb->size_bytes - 1);
}
/*
@@ -134,14 +134,19 @@ unsigned int host1x_cdma_wait_locked(struct host1x_cdma *cdma,
enum cdma_event event)
{
for (;;) {
+ struct push_buffer *pb = &cdma->push_buffer;
unsigned int space;
- if (event == CDMA_EVENT_SYNC_QUEUE_EMPTY)
+ switch (event) {
+ case CDMA_EVENT_SYNC_QUEUE_EMPTY:
space = list_empty(&cdma->sync_queue) ? 1 : 0;
- else if (event == CDMA_EVENT_PUSH_BUFFER_SPACE) {
- struct push_buffer *pb = &cdma->push_buffer;
+ break;
+
+ case CDMA_EVENT_PUSH_BUFFER_SPACE:
space = host1x_pushbuffer_space(pb);
- } else {
+ break;
+
+ default:
WARN_ON(1);
return -EINVAL;
}
@@ -159,12 +164,14 @@ unsigned int host1x_cdma_wait_locked(struct host1x_cdma *cdma,
mutex_lock(&cdma->lock);
continue;
}
+
cdma->event = event;
mutex_unlock(&cdma->lock);
down(&cdma->sem);
mutex_lock(&cdma->lock);
}
+
return 0;
}
@@ -234,6 +241,7 @@ static void update_cdma_locked(struct host1x_cdma *cdma)
/* Start timer on next pending syncpt */
if (job->timeout)
cdma_start_timer_locked(cdma, job);
+
break;
}
@@ -247,7 +255,9 @@ static void update_cdma_locked(struct host1x_cdma *cdma)
/* Pop push buffer slots */
if (job->num_slots) {
struct push_buffer *pb = &cdma->push_buffer;
+
host1x_pushbuffer_pop(pb, job->num_slots);
+
if (cdma->event == CDMA_EVENT_PUSH_BUFFER_SPACE)
signal = true;
}
@@ -269,11 +279,9 @@ static void update_cdma_locked(struct host1x_cdma *cdma)
void host1x_cdma_update_sync_queue(struct host1x_cdma *cdma,
struct device *dev)
{
- u32 restart_addr;
- u32 syncpt_incrs;
- struct host1x_job *job = NULL;
- u32 syncpt_val;
struct host1x *host1x = cdma_to_host1x(cdma);
+ u32 restart_addr, syncpt_incrs, syncpt_val;
+ struct host1x_job *job = NULL;
syncpt_val = host1x_syncpt_load(cdma->timeout.syncpt);
@@ -342,9 +350,11 @@ void host1x_cdma_update_sync_queue(struct host1x_cdma *cdma,
syncpt_val += syncpt_incrs;
}
- /* The following sumbits from the same client may be dependent on the
+ /*
+ * The following sumbits from the same client may be dependent on the
* failed submit and therefore they may fail. Force a small timeout
- * to make the queue cleanup faster */
+ * to make the queue cleanup faster.
+ */
list_for_each_entry_from(job, &cdma->sync_queue, list)
if (job->client == cdma->timeout.client)
@@ -375,6 +385,7 @@ int host1x_cdma_init(struct host1x_cdma *cdma)
err = host1x_pushbuffer_init(&cdma->push_buffer);
if (err)
return err;
+
return 0;
}
@@ -410,6 +421,7 @@ int host1x_cdma_begin(struct host1x_cdma *cdma, struct host1x_job *job)
/* init state on first submit with timeout value */
if (!cdma->timeout.initialized) {
int err;
+
err = host1x_hw_cdma_timeout_init(host1x, cdma,
job->syncpt_id);
if (err) {
@@ -418,6 +430,7 @@ int host1x_cdma_begin(struct host1x_cdma *cdma, struct host1x_job *job)
}
}
}
+
if (!cdma->running)
host1x_hw_cdma_start(host1x, cdma);
@@ -448,6 +461,7 @@ void host1x_cdma_push(struct host1x_cdma *cdma, u32 op1, u32 op2)
slots_free = host1x_cdma_wait_locked(cdma,
CDMA_EVENT_PUSH_BUFFER_SPACE);
}
+
cdma->slots_free = slots_free - 1;
cdma->slots_used++;
host1x_pushbuffer_push(pb, op1, op2);
diff --git a/drivers/gpu/host1x/channel.c b/drivers/gpu/host1x/channel.c
index b4ae3affb..8f437d924 100644
--- a/drivers/gpu/host1x/channel.c
+++ b/drivers/gpu/host1x/channel.c
@@ -83,9 +83,10 @@ EXPORT_SYMBOL(host1x_channel_put);
struct host1x_channel *host1x_channel_request(struct device *dev)
{
struct host1x *host = dev_get_drvdata(dev->parent);
- int max_channels = host->info->nb_channels;
+ unsigned int max_channels = host->info->nb_channels;
struct host1x_channel *channel = NULL;
- int index, err;
+ unsigned long index;
+ int err;
mutex_lock(&host->chlist_mutex);
diff --git a/drivers/gpu/host1x/debug.c b/drivers/gpu/host1x/debug.c
index ee3d12b51..d9330fcc6 100644
--- a/drivers/gpu/host1x/debug.c
+++ b/drivers/gpu/host1x/debug.c
@@ -39,6 +39,7 @@ void host1x_debug_output(struct output *o, const char *fmt, ...)
va_start(args, fmt);
len = vsnprintf(o->buf, sizeof(o->buf), fmt, args);
va_end(args);
+
o->fn(o->ctx, o->buf, len);
}
@@ -48,13 +49,17 @@ static int show_channels(struct host1x_channel *ch, void *data, bool show_fifo)
struct output *o = data;
mutex_lock(&ch->reflock);
+
if (ch->refcount) {
mutex_lock(&ch->cdma.lock);
+
if (show_fifo)
host1x_hw_show_channel_fifo(m, ch, o);
+
host1x_hw_show_channel_cdma(m, ch, o);
mutex_unlock(&ch->cdma.lock);
}
+
mutex_unlock(&ch->reflock);
return 0;
@@ -62,22 +67,27 @@ static int show_channels(struct host1x_channel *ch, void *data, bool show_fifo)
static void show_syncpts(struct host1x *m, struct output *o)
{
- int i;
+ unsigned int i;
+
host1x_debug_output(o, "---- syncpts ----\n");
+
for (i = 0; i < host1x_syncpt_nb_pts(m); i++) {
u32 max = host1x_syncpt_read_max(m->syncpt + i);
u32 min = host1x_syncpt_load(m->syncpt + i);
+
if (!min && !max)
continue;
- host1x_debug_output(o, "id %d (%s) min %d max %d\n",
+
+ host1x_debug_output(o, "id %u (%s) min %d max %d\n",
i, m->syncpt[i].name, min, max);
}
for (i = 0; i < host1x_syncpt_nb_bases(m); i++) {
u32 base_val;
+
base_val = host1x_syncpt_load_wait_base(m->syncpt + i);
if (base_val)
- host1x_debug_output(o, "waitbase id %d val %d\n", i,
+ host1x_debug_output(o, "waitbase id %u val %d\n", i,
base_val);
}
@@ -114,7 +124,9 @@ static int host1x_debug_show_all(struct seq_file *s, void *unused)
.fn = write_to_seqfile,
.ctx = s
};
+
show_all(s->private, &o);
+
return 0;
}
@@ -124,7 +136,9 @@ static int host1x_debug_show(struct seq_file *s, void *unused)
.fn = write_to_seqfile,
.ctx = s
};
+
show_all_no_fifo(s->private, &o);
+
return 0;
}
@@ -134,10 +148,10 @@ static int host1x_debug_open_all(struct inode *inode, struct file *file)
}
static const struct file_operations host1x_debug_all_fops = {
- .open = host1x_debug_open_all,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
+ .open = host1x_debug_open_all,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
};
static int host1x_debug_open(struct inode *inode, struct file *file)
@@ -146,10 +160,10 @@ static int host1x_debug_open(struct inode *inode, struct file *file)
}
static const struct file_operations host1x_debug_fops = {
- .open = host1x_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
+ .open = host1x_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
};
static void host1x_debugfs_init(struct host1x *host1x)
@@ -201,6 +215,7 @@ void host1x_debug_dump(struct host1x *host1x)
struct output o = {
.fn = write_to_printk
};
+
show_all(host1x, &o);
}
@@ -209,5 +224,6 @@ void host1x_debug_dump_syncpts(struct host1x *host1x)
struct output o = {
.fn = write_to_printk
};
+
show_syncpts(host1x, &o);
}
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index ff348690d..a62317af7 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -63,13 +63,13 @@ u32 host1x_ch_readl(struct host1x_channel *ch, u32 r)
}
static const struct host1x_info host1x01_info = {
- .nb_channels = 8,
- .nb_pts = 32,
- .nb_mlocks = 16,
- .nb_bases = 8,
- .init = host1x01_init,
- .sync_offset = 0x3000,
- .dma_mask = DMA_BIT_MASK(32),
+ .nb_channels = 8,
+ .nb_pts = 32,
+ .nb_mlocks = 16,
+ .nb_bases = 8,
+ .init = host1x01_init,
+ .sync_offset = 0x3000,
+ .dma_mask = DMA_BIT_MASK(32),
};
static const struct host1x_info host1x02_info = {
@@ -102,7 +102,7 @@ static const struct host1x_info host1x05_info = {
.dma_mask = DMA_BIT_MASK(34),
};
-static struct of_device_id host1x_of_match[] = {
+static const struct of_device_id host1x_of_match[] = {
{ .compatible = "nvidia,tegra210-host1x", .data = &host1x05_info, },
{ .compatible = "nvidia,tegra124-host1x", .data = &host1x04_info, },
{ .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, },
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index dace12499..5220510f3 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -45,7 +45,7 @@ struct host1x_cdma_ops {
void (*start)(struct host1x_cdma *cdma);
void (*stop)(struct host1x_cdma *cdma);
void (*flush)(struct host1x_cdma *cdma);
- int (*timeout_init)(struct host1x_cdma *cdma, u32 syncpt_id);
+ int (*timeout_init)(struct host1x_cdma *cdma, unsigned int syncpt);
void (*timeout_destroy)(struct host1x_cdma *cdma);
void (*freeze)(struct host1x_cdma *cdma);
void (*resume)(struct host1x_cdma *cdma, u32 getptr);
@@ -82,21 +82,21 @@ struct host1x_intr_ops {
int (*init_host_sync)(struct host1x *host, u32 cpm,
void (*syncpt_thresh_work)(struct work_struct *work));
void (*set_syncpt_threshold)(
- struct host1x *host, u32 id, u32 thresh);
- void (*enable_syncpt_intr)(struct host1x *host, u32 id);
- void (*disable_syncpt_intr)(struct host1x *host, u32 id);
+ struct host1x *host, unsigned int id, u32 thresh);
+ void (*enable_syncpt_intr)(struct host1x *host, unsigned int id);
+ void (*disable_syncpt_intr)(struct host1x *host, unsigned int id);
void (*disable_all_syncpt_intrs)(struct host1x *host);
int (*free_syncpt_irq)(struct host1x *host);
};
struct host1x_info {
- int nb_channels; /* host1x: num channels supported */
- int nb_pts; /* host1x: num syncpoints supported */
- int nb_bases; /* host1x: num syncpoints supported */
- int nb_mlocks; /* host1x: number of mlocks */
- int (*init)(struct host1x *); /* initialize per SoC ops */
- int sync_offset;
- u64 dma_mask; /* mask of addressable memory */
+ unsigned int nb_channels; /* host1x: number of channels supported */
+ unsigned int nb_pts; /* host1x: number of syncpoints supported */
+ unsigned int nb_bases; /* host1x: number of syncpoint bases supported */
+ unsigned int nb_mlocks; /* host1x: number of mlocks supported */
+ int (*init)(struct host1x *host1x); /* initialize per SoC ops */
+ unsigned int sync_offset; /* offset of syncpoint registers */
+ u64 dma_mask; /* mask of addressable memory */
};
struct host1x {
@@ -109,7 +109,6 @@ struct host1x {
struct clk *clk;
struct mutex intr_mutex;
- struct workqueue_struct *intr_wq;
int intr_syncpt_irq;
const struct host1x_syncpt_ops *syncpt_op;
@@ -183,19 +182,20 @@ static inline int host1x_hw_intr_init_host_sync(struct host1x *host, u32 cpm,
}
static inline void host1x_hw_intr_set_syncpt_threshold(struct host1x *host,
- u32 id, u32 thresh)
+ unsigned int id,
+ u32 thresh)
{
host->intr_op->set_syncpt_threshold(host, id, thresh);
}
static inline void host1x_hw_intr_enable_syncpt_intr(struct host1x *host,
- u32 id)
+ unsigned int id)
{
host->intr_op->enable_syncpt_intr(host, id);
}
static inline void host1x_hw_intr_disable_syncpt_intr(struct host1x *host,
- u32 id)
+ unsigned int id)
{
host->intr_op->disable_syncpt_intr(host, id);
}
@@ -212,9 +212,9 @@ static inline int host1x_hw_intr_free_syncpt_irq(struct host1x *host)
static inline int host1x_hw_channel_init(struct host1x *host,
struct host1x_channel *channel,
- int chid)
+ unsigned int id)
{
- return host->channel_op->init(channel, host, chid);
+ return host->channel_op->init(channel, host, id);
}
static inline int host1x_hw_channel_submit(struct host1x *host,
@@ -243,9 +243,9 @@ static inline void host1x_hw_cdma_flush(struct host1x *host,
static inline int host1x_hw_cdma_timeout_init(struct host1x *host,
struct host1x_cdma *cdma,
- u32 syncpt_id)
+ unsigned int syncpt)
{
- return host->cdma_op->timeout_init(cdma, syncpt_id);
+ return host->cdma_op->timeout_init(cdma, syncpt);
}
static inline void host1x_hw_cdma_timeout_destroy(struct host1x *host,
diff --git a/drivers/gpu/host1x/hw/cdma_hw.c b/drivers/gpu/host1x/hw/cdma_hw.c
index 305ea8f33..659c1bbfe 100644
--- a/drivers/gpu/host1x/hw/cdma_hw.c
+++ b/drivers/gpu/host1x/hw/cdma_hw.c
@@ -41,7 +41,7 @@ static void cdma_timeout_cpu_incr(struct host1x_cdma *cdma, u32 getptr,
{
struct host1x *host1x = cdma_to_host1x(cdma);
struct push_buffer *pb = &cdma->push_buffer;
- u32 i;
+ unsigned int i;
for (i = 0; i < syncpt_incrs; i++)
host1x_syncpt_incr(cdma->timeout.syncpt);
@@ -58,6 +58,7 @@ static void cdma_timeout_cpu_incr(struct host1x_cdma *cdma, u32 getptr,
&pb->phys, getptr);
getptr = (getptr + 8) & (pb->size_bytes - 1);
}
+
wmb();
}
@@ -162,12 +163,14 @@ static void cdma_stop(struct host1x_cdma *cdma)
struct host1x_channel *ch = cdma_to_channel(cdma);
mutex_lock(&cdma->lock);
+
if (cdma->running) {
host1x_cdma_wait_locked(cdma, CDMA_EVENT_SYNC_QUEUE_EMPTY);
host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
HOST1X_CHANNEL_DMACTRL);
cdma->running = false;
}
+
mutex_unlock(&cdma->lock);
}
@@ -213,11 +216,11 @@ static void cdma_resume(struct host1x_cdma *cdma, u32 getptr)
u32 cmdproc_stop;
dev_dbg(host1x->dev,
- "resuming channel (id %d, DMAGET restart = 0x%x)\n",
+ "resuming channel (id %u, DMAGET restart = 0x%x)\n",
ch->id, getptr);
cmdproc_stop = host1x_sync_readl(host1x, HOST1X_SYNC_CMDPROC_STOP);
- cmdproc_stop &= ~(BIT(ch->id));
+ cmdproc_stop &= ~BIT(ch->id);
host1x_sync_writel(host1x, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP);
cdma->torndown = false;
@@ -231,14 +234,11 @@ static void cdma_resume(struct host1x_cdma *cdma, u32 getptr)
*/
static void cdma_timeout_handler(struct work_struct *work)
{
+ u32 prev_cmdproc, cmdproc_stop, syncpt_val;
struct host1x_cdma *cdma;
struct host1x *host1x;
struct host1x_channel *ch;
- u32 syncpt_val;
-
- u32 prev_cmdproc, cmdproc_stop;
-
cdma = container_of(to_delayed_work(work), struct host1x_cdma,
timeout.wq);
host1x = cdma_to_host1x(cdma);
@@ -277,9 +277,9 @@ static void cdma_timeout_handler(struct work_struct *work)
return;
}
- dev_warn(host1x->dev, "%s: timeout: %d (%s), HW thresh %d, done %d\n",
- __func__, cdma->timeout.syncpt->id, cdma->timeout.syncpt->name,
- syncpt_val, cdma->timeout.syncpt_val);
+ dev_warn(host1x->dev, "%s: timeout: %u (%s), HW thresh %d, done %d\n",
+ __func__, cdma->timeout.syncpt->id, cdma->timeout.syncpt->name,
+ syncpt_val, cdma->timeout.syncpt_val);
/* stop HW, resetting channel/module */
host1x_hw_cdma_freeze(host1x, cdma);
@@ -291,7 +291,7 @@ static void cdma_timeout_handler(struct work_struct *work)
/*
* Init timeout resources
*/
-static int cdma_timeout_init(struct host1x_cdma *cdma, u32 syncpt_id)
+static int cdma_timeout_init(struct host1x_cdma *cdma, unsigned int syncpt)
{
INIT_DELAYED_WORK(&cdma->timeout.wq, cdma_timeout_handler);
cdma->timeout.initialized = true;
@@ -306,6 +306,7 @@ static void cdma_timeout_destroy(struct host1x_cdma *cdma)
{
if (cdma->timeout.initialized)
cancel_delayed_work(&cdma->timeout.wq);
+
cdma->timeout.initialized = false;
}
diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c
index 946c332c3..5e8df78b7 100644
--- a/drivers/gpu/host1x/hw/channel_hw.c
+++ b/drivers/gpu/host1x/hw/channel_hw.c
@@ -46,6 +46,7 @@ static void trace_write_gather(struct host1x_cdma *cdma, struct host1x_bo *bo,
*/
for (i = 0; i < words; i += TRACE_MAX_LENGTH) {
u32 num_words = min(words - i, TRACE_MAX_LENGTH);
+
offset += i * sizeof(u32);
trace_host1x_cdma_push_gather(dev_name(dev), bo,
@@ -66,6 +67,7 @@ static void submit_gathers(struct host1x_job *job)
struct host1x_job_gather *g = &job->gathers[i];
u32 op1 = host1x_opcode_gather(g->words);
u32 op2 = g->base + g->offset;
+
trace_write_gather(cdma, g->bo, g->offset, op1 & 0xffff);
host1x_cdma_push(cdma, op1, op2);
}
@@ -75,7 +77,8 @@ static inline void synchronize_syncpt_base(struct host1x_job *job)
{
struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
struct host1x_syncpt *sp = host->syncpt + job->syncpt_id;
- u32 id, value;
+ unsigned int id;
+ u32 value;
value = host1x_syncpt_read_max(sp);
id = sp->base->id;
diff --git a/drivers/gpu/host1x/hw/debug_hw.c b/drivers/gpu/host1x/hw/debug_hw.c
index cc3f1825c..7a4a3286e 100644
--- a/drivers/gpu/host1x/hw/debug_hw.c
+++ b/drivers/gpu/host1x/hw/debug_hw.c
@@ -40,8 +40,7 @@ enum {
static unsigned int show_channel_command(struct output *o, u32 val)
{
- unsigned mask;
- unsigned subop;
+ unsigned int mask, subop;
switch (val >> 28) {
case HOST1X_OPCODE_SETCLASS:
@@ -51,12 +50,11 @@ static unsigned int show_channel_command(struct output *o, u32 val)
val >> 6 & 0x3ff,
val >> 16 & 0xfff, mask);
return hweight8(mask);
- } else {
- host1x_debug_output(o, "SETCL(class=%03x)\n",
- val >> 6 & 0x3ff);
- return 0;
}
+ host1x_debug_output(o, "SETCL(class=%03x)\n", val >> 6 & 0x3ff);
+ return 0;
+
case HOST1X_OPCODE_INCR:
host1x_debug_output(o, "INCR(offset=%03x, [",
val >> 16 & 0xfff);
@@ -143,7 +141,8 @@ static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma)
struct host1x_job *job;
list_for_each_entry(job, &cdma->sync_queue, list) {
- int i;
+ unsigned int i;
+
host1x_debug_output(o, "\n%p: JOB, syncpt_id=%d, syncpt_val=%d, first_get=%08x, timeout=%d num_slots=%d, num_handles=%d\n",
job, job->syncpt_id, job->syncpt_end,
job->first_get, job->timeout,
@@ -190,7 +189,7 @@ static void host1x_debug_show_channel_cdma(struct host1x *host,
cbread = host1x_sync_readl(host, HOST1X_SYNC_CBREAD(ch->id));
cbstat = host1x_sync_readl(host, HOST1X_SYNC_CBSTAT(ch->id));
- host1x_debug_output(o, "%d-%s: ", ch->id, dev_name(ch->dev));
+ host1x_debug_output(o, "%u-%s: ", ch->id, dev_name(ch->dev));
if (HOST1X_CHANNEL_DMACTRL_DMASTOP_V(dmactrl) ||
!ch->cdma.push_buffer.mapped) {
@@ -200,14 +199,13 @@ static void host1x_debug_show_channel_cdma(struct host1x *host,
if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) == HOST1X_CLASS_HOST1X &&
HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) ==
- HOST1X_UCLASS_WAIT_SYNCPT)
+ HOST1X_UCLASS_WAIT_SYNCPT)
host1x_debug_output(o, "waiting on syncpt %d val %d\n",
cbread >> 24, cbread & 0xffffff);
else if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) ==
- HOST1X_CLASS_HOST1X &&
- HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) ==
- HOST1X_UCLASS_WAIT_SYNCPT_BASE) {
-
+ HOST1X_CLASS_HOST1X &&
+ HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) ==
+ HOST1X_UCLASS_WAIT_SYNCPT_BASE) {
base = (cbread >> 16) & 0xff;
baseval =
host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(base));
@@ -236,7 +234,7 @@ static void host1x_debug_show_channel_fifo(struct host1x *host,
u32 val, rd_ptr, wr_ptr, start, end;
unsigned int data_count = 0;
- host1x_debug_output(o, "%d: fifo:\n", ch->id);
+ host1x_debug_output(o, "%u: fifo:\n", ch->id);
val = host1x_ch_readl(ch, HOST1X_CHANNEL_FIFOSTAT);
host1x_debug_output(o, "FIFOSTAT %08x\n", val);
@@ -290,20 +288,22 @@ static void host1x_debug_show_channel_fifo(struct host1x *host,
static void host1x_debug_show_mlocks(struct host1x *host, struct output *o)
{
- int i;
+ unsigned int i;
host1x_debug_output(o, "---- mlocks ----\n");
+
for (i = 0; i < host1x_syncpt_nb_mlocks(host); i++) {
u32 owner =
host1x_sync_readl(host, HOST1X_SYNC_MLOCK_OWNER(i));
if (HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(owner))
- host1x_debug_output(o, "%d: locked by channel %d\n",
+ host1x_debug_output(o, "%u: locked by channel %u\n",
i, HOST1X_SYNC_MLOCK_OWNER_CHID_V(owner));
else if (HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(owner))
- host1x_debug_output(o, "%d: locked by cpu\n", i);
+ host1x_debug_output(o, "%u: locked by cpu\n", i);
else
- host1x_debug_output(o, "%d: unlocked\n", i);
+ host1x_debug_output(o, "%u: unlocked\n", i);
}
+
host1x_debug_output(o, "\n");
}
diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c
index e1e31e9e6..dacb8009a 100644
--- a/drivers/gpu/host1x/hw/intr_hw.c
+++ b/drivers/gpu/host1x/hw/intr_hw.c
@@ -38,14 +38,14 @@ static void host1x_intr_syncpt_handle(struct host1x_syncpt *syncpt)
host1x_sync_writel(host, BIT_MASK(id),
HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(BIT_WORD(id)));
- queue_work(host->intr_wq, &syncpt->intr.work);
+ schedule_work(&syncpt->intr.work);
}
static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id)
{
struct host1x *host = dev_id;
unsigned long reg;
- int i, id;
+ unsigned int i, id;
for (i = 0; i < DIV_ROUND_UP(host->info->nb_pts, 32); i++) {
reg = host1x_sync_readl(host,
@@ -62,7 +62,7 @@ static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id)
static void _host1x_intr_disable_all_syncpt_intrs(struct host1x *host)
{
- u32 i;
+ unsigned int i;
for (i = 0; i < DIV_ROUND_UP(host->info->nb_pts, 32); ++i) {
host1x_sync_writel(host, 0xffffffffu,
@@ -72,10 +72,12 @@ static void _host1x_intr_disable_all_syncpt_intrs(struct host1x *host)
}
}
-static int _host1x_intr_init_host_sync(struct host1x *host, u32 cpm,
- void (*syncpt_thresh_work)(struct work_struct *))
+static int
+_host1x_intr_init_host_sync(struct host1x *host, u32 cpm,
+ void (*syncpt_thresh_work)(struct work_struct *))
{
- int i, err;
+ unsigned int i;
+ int err;
host1x_hw_intr_disable_all_syncpt_intrs(host);
@@ -106,18 +108,21 @@ static int _host1x_intr_init_host_sync(struct host1x *host, u32 cpm,
}
static void _host1x_intr_set_syncpt_threshold(struct host1x *host,
- u32 id, u32 thresh)
+ unsigned int id,
+ u32 thresh)
{
host1x_sync_writel(host, thresh, HOST1X_SYNC_SYNCPT_INT_THRESH(id));
}
-static void _host1x_intr_enable_syncpt_intr(struct host1x *host, u32 id)
+static void _host1x_intr_enable_syncpt_intr(struct host1x *host,
+ unsigned int id)
{
host1x_sync_writel(host, BIT_MASK(id),
HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(BIT_WORD(id)));
}
-static void _host1x_intr_disable_syncpt_intr(struct host1x *host, u32 id)
+static void _host1x_intr_disable_syncpt_intr(struct host1x *host,
+ unsigned int id)
{
host1x_sync_writel(host, BIT_MASK(id),
HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(BIT_WORD(id)));
@@ -127,8 +132,13 @@ static void _host1x_intr_disable_syncpt_intr(struct host1x *host, u32 id)
static int _host1x_free_syncpt_irq(struct host1x *host)
{
+ unsigned int i;
+
devm_free_irq(host->dev, host->intr_syncpt_irq, host);
- flush_workqueue(host->intr_wq);
+
+ for (i = 0; i < host->info->nb_pts; i++)
+ cancel_work_sync(&host->syncpt[i].intr.work);
+
return 0;
}
diff --git a/drivers/gpu/host1x/hw/syncpt_hw.c b/drivers/gpu/host1x/hw/syncpt_hw.c
index 56e85395a..c93f74fcc 100644
--- a/drivers/gpu/host1x/hw/syncpt_hw.c
+++ b/drivers/gpu/host1x/hw/syncpt_hw.c
@@ -26,8 +26,9 @@
*/
static void syncpt_restore(struct host1x_syncpt *sp)
{
+ u32 min = host1x_syncpt_read_min(sp);
struct host1x *host = sp->host;
- int min = host1x_syncpt_read_min(sp);
+
host1x_sync_writel(host, min, HOST1X_SYNC_SYNCPT(sp->id));
}
@@ -37,6 +38,7 @@ static void syncpt_restore(struct host1x_syncpt *sp)
static void syncpt_restore_wait_base(struct host1x_syncpt *sp)
{
struct host1x *host = sp->host;
+
host1x_sync_writel(host, sp->base_val,
HOST1X_SYNC_SYNCPT_BASE(sp->id));
}
@@ -47,6 +49,7 @@ static void syncpt_restore_wait_base(struct host1x_syncpt *sp)
static void syncpt_read_wait_base(struct host1x_syncpt *sp)
{
struct host1x *host = sp->host;
+
sp->base_val =
host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(sp->id));
}
@@ -85,6 +88,7 @@ static int syncpt_cpu_incr(struct host1x_syncpt *sp)
if (!host1x_syncpt_client_managed(sp) &&
host1x_syncpt_idle(sp))
return -EINVAL;
+
host1x_sync_writel(host, BIT_MASK(sp->id),
HOST1X_SYNC_SYNCPT_CPU_INCR(reg_offset));
wmb();
@@ -95,10 +99,10 @@ static int syncpt_cpu_incr(struct host1x_syncpt *sp)
/* remove a wait pointed to by patch_addr */
static int syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr)
{
- u32 override = host1x_class_host_wait_syncpt(
- HOST1X_SYNCPT_RESERVED, 0);
+ u32 override = host1x_class_host_wait_syncpt(HOST1X_SYNCPT_RESERVED, 0);
*((u32 *)patch_addr) = override;
+
return 0;
}
diff --git a/drivers/gpu/host1x/intr.c b/drivers/gpu/host1x/intr.c
index 2491bf82e..8b4fad0ab 100644
--- a/drivers/gpu/host1x/intr.c
+++ b/drivers/gpu/host1x/intr.c
@@ -122,18 +122,20 @@ static void action_submit_complete(struct host1x_waitlist *waiter)
static void action_wakeup(struct host1x_waitlist *waiter)
{
wait_queue_head_t *wq = waiter->data;
+
wake_up(wq);
}
static void action_wakeup_interruptible(struct host1x_waitlist *waiter)
{
wait_queue_head_t *wq = waiter->data;
+
wake_up_interruptible(wq);
}
typedef void (*action_handler)(struct host1x_waitlist *waiter);
-static action_handler action_handlers[HOST1X_INTR_ACTION_COUNT] = {
+static const action_handler action_handlers[HOST1X_INTR_ACTION_COUNT] = {
action_submit_complete,
action_wakeup,
action_wakeup_interruptible,
@@ -209,7 +211,7 @@ static void syncpt_thresh_work(struct work_struct *work)
host1x_syncpt_load(host->syncpt + id));
}
-int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh,
+int host1x_intr_add_action(struct host1x *host, unsigned int id, u32 thresh,
enum host1x_intr_action action, void *data,
struct host1x_waitlist *waiter, void **ref)
{
@@ -254,7 +256,7 @@ int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh,
return 0;
}
-void host1x_intr_put_ref(struct host1x *host, u32 id, void *ref)
+void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref)
{
struct host1x_waitlist *waiter = ref;
struct host1x_syncpt *syncpt;
@@ -277,9 +279,6 @@ int host1x_intr_init(struct host1x *host, unsigned int irq_sync)
mutex_init(&host->intr_mutex);
host->intr_syncpt_irq = irq_sync;
- host->intr_wq = create_workqueue("host_syncpt");
- if (!host->intr_wq)
- return -ENOMEM;
for (id = 0; id < nb_pts; ++id) {
struct host1x_syncpt *syncpt = host->syncpt + id;
@@ -288,7 +287,7 @@ int host1x_intr_init(struct host1x *host, unsigned int irq_sync)
INIT_LIST_HEAD(&syncpt->intr.wait_head);
snprintf(syncpt->intr.thresh_irq_name,
sizeof(syncpt->intr.thresh_irq_name),
- "host1x_sp_%02d", id);
+ "host1x_sp_%02u", id);
}
host1x_intr_start(host);
@@ -299,7 +298,6 @@ int host1x_intr_init(struct host1x *host, unsigned int irq_sync)
void host1x_intr_deinit(struct host1x *host)
{
host1x_intr_stop(host);
- destroy_workqueue(host->intr_wq);
}
void host1x_intr_start(struct host1x *host)
@@ -342,7 +340,7 @@ void host1x_intr_stop(struct host1x *host)
if (!list_empty(&syncpt[id].intr.wait_head)) {
/* output diagnostics */
mutex_unlock(&host->intr_mutex);
- pr_warn("%s cannot stop syncpt intr id=%d\n",
+ pr_warn("%s cannot stop syncpt intr id=%u\n",
__func__, id);
return;
}
diff --git a/drivers/gpu/host1x/intr.h b/drivers/gpu/host1x/intr.h
index 2b8adf016..1370c2bb7 100644
--- a/drivers/gpu/host1x/intr.h
+++ b/drivers/gpu/host1x/intr.h
@@ -75,7 +75,7 @@ struct host1x_waitlist {
*
* This is a non-blocking api.
*/
-int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh,
+int host1x_intr_add_action(struct host1x *host, unsigned int id, u32 thresh,
enum host1x_intr_action action, void *data,
struct host1x_waitlist *waiter, void **ref);
@@ -84,7 +84,7 @@ int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh,
* You must call this if you passed non-NULL as ref.
* @ref the ref returned from host1x_intr_add_action()
*/
-void host1x_intr_put_ref(struct host1x *host, u32 id, void *ref);
+void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref);
/* Initialize host1x sync point interrupt */
int host1x_intr_init(struct host1x *host, unsigned int irq_sync);
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index b4515d544..a91b7c4a6 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -161,7 +161,7 @@ static int do_waitchks(struct host1x_job *job, struct host1x *host,
if (host1x_syncpt_is_expired(sp, wait->thresh)) {
dev_dbg(host->dev,
- "drop WAIT id %d (%s) thresh 0x%x, min 0x%x\n",
+ "drop WAIT id %u (%s) thresh 0x%x, min 0x%x\n",
wait->syncpt_id, sp->name, wait->thresh,
host1x_syncpt_read_min(sp));
@@ -464,6 +464,7 @@ static inline int copy_gathers(struct host1x_job *job, struct device *dev)
for (i = 0; i < job->num_gathers; i++) {
struct host1x_job_gather *g = &job->gathers[i];
+
size += g->words * sizeof(u32);
}
@@ -514,6 +515,7 @@ int host1x_job_pin(struct host1x_job *job, struct device *dev)
bitmap_zero(waitchk_mask, host1x_syncpt_nb_pts(host));
for (i = 0; i < job->num_waitchk; i++) {
u32 syncpt_id = job->waitchk[i].syncpt_id;
+
if (syncpt_id < host1x_syncpt_nb_pts(host))
set_bit(syncpt_id, waitchk_mask);
}
@@ -571,14 +573,16 @@ void host1x_job_unpin(struct host1x_job *job)
for (i = 0; i < job->num_unpins; i++) {
struct host1x_job_unpin_data *unpin = &job->unpins[i];
+
host1x_bo_unpin(unpin->bo, unpin->sgt);
host1x_bo_put(unpin->bo);
}
+
job->num_unpins = 0;
if (job->gather_copy_size)
dma_free_wc(job->channel->dev, job->gather_copy_size,
- job->gather_copy_mapped, job->gather_copy);
+ job->gather_copy_mapped, job->gather_copy);
}
EXPORT_SYMBOL(host1x_job_unpin);
diff --git a/drivers/gpu/host1x/mipi.c b/drivers/gpu/host1x/mipi.c
index 52a6fd224..e00809d99 100644
--- a/drivers/gpu/host1x/mipi.c
+++ b/drivers/gpu/host1x/mipi.c
@@ -242,20 +242,6 @@ struct tegra_mipi_device *tegra_mipi_request(struct device *device)
dev->pads = args.args[0];
dev->device = device;
- mutex_lock(&dev->mipi->lock);
-
- if (dev->mipi->usage_count++ == 0) {
- err = tegra_mipi_power_up(dev->mipi);
- if (err < 0) {
- dev_err(dev->mipi->dev,
- "failed to power up MIPI bricks: %d\n",
- err);
- return ERR_PTR(err);
- }
- }
-
- mutex_unlock(&dev->mipi->lock);
-
return dev;
put:
@@ -270,29 +256,42 @@ EXPORT_SYMBOL(tegra_mipi_request);
void tegra_mipi_free(struct tegra_mipi_device *device)
{
- int err;
+ platform_device_put(device->pdev);
+ kfree(device);
+}
+EXPORT_SYMBOL(tegra_mipi_free);
- mutex_lock(&device->mipi->lock);
+int tegra_mipi_enable(struct tegra_mipi_device *dev)
+{
+ int err = 0;
- if (--device->mipi->usage_count == 0) {
- err = tegra_mipi_power_down(device->mipi);
- if (err < 0) {
- /*
- * Not much that can be done here, so an error message
- * will have to do.
- */
- dev_err(device->mipi->dev,
- "failed to power down MIPI bricks: %d\n",
- err);
- }
- }
+ mutex_lock(&dev->mipi->lock);
- mutex_unlock(&device->mipi->lock);
+ if (dev->mipi->usage_count++ == 0)
+ err = tegra_mipi_power_up(dev->mipi);
+
+ mutex_unlock(&dev->mipi->lock);
+
+ return err;
- platform_device_put(device->pdev);
- kfree(device);
}
-EXPORT_SYMBOL(tegra_mipi_free);
+EXPORT_SYMBOL(tegra_mipi_enable);
+
+int tegra_mipi_disable(struct tegra_mipi_device *dev)
+{
+ int err = 0;
+
+ mutex_lock(&dev->mipi->lock);
+
+ if (--dev->mipi->usage_count == 0)
+ err = tegra_mipi_power_down(dev->mipi);
+
+ mutex_unlock(&dev->mipi->lock);
+
+ return err;
+
+}
+EXPORT_SYMBOL(tegra_mipi_disable);
static int tegra_mipi_wait(struct tegra_mipi *mipi)
{
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
index 6b7fdc1e2..95589328a 100644
--- a/drivers/gpu/host1x/syncpt.c
+++ b/drivers/gpu/host1x/syncpt.c
@@ -73,7 +73,7 @@ static struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
return NULL;
}
- name = kasprintf(GFP_KERNEL, "%02d-%s", sp->id,
+ name = kasprintf(GFP_KERNEL, "%02u-%s", sp->id,
dev ? dev_name(dev) : NULL);
if (!name)
return NULL;
@@ -110,12 +110,14 @@ EXPORT_SYMBOL(host1x_syncpt_incr_max);
void host1x_syncpt_restore(struct host1x *host)
{
struct host1x_syncpt *sp_base = host->syncpt;
- u32 i;
+ unsigned int i;
for (i = 0; i < host1x_syncpt_nb_pts(host); i++)
host1x_hw_syncpt_restore(host, sp_base + i);
+
for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
host1x_hw_syncpt_restore_wait_base(host, sp_base + i);
+
wmb();
}
@@ -126,7 +128,7 @@ void host1x_syncpt_restore(struct host1x *host)
void host1x_syncpt_save(struct host1x *host)
{
struct host1x_syncpt *sp_base = host->syncpt;
- u32 i;
+ unsigned int i;
for (i = 0; i < host1x_syncpt_nb_pts(host); i++) {
if (host1x_syncpt_client_managed(sp_base + i))
@@ -146,6 +148,7 @@ void host1x_syncpt_save(struct host1x *host)
u32 host1x_syncpt_load(struct host1x_syncpt *sp)
{
u32 val;
+
val = host1x_hw_syncpt_load(sp->host, sp);
trace_host1x_syncpt_load_min(sp->id, val);
@@ -157,10 +160,9 @@ u32 host1x_syncpt_load(struct host1x_syncpt *sp)
*/
u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp)
{
- u32 val;
host1x_hw_syncpt_load_wait_base(sp->host, sp);
- val = sp->base_val;
- return val;
+
+ return sp->base_val;
}
/*
@@ -179,6 +181,7 @@ EXPORT_SYMBOL(host1x_syncpt_incr);
static bool syncpt_load_min_is_expired(struct host1x_syncpt *sp, u32 thresh)
{
host1x_hw_syncpt_load(sp->host, sp);
+
return host1x_syncpt_is_expired(sp, thresh);
}
@@ -186,7 +189,7 @@ static bool syncpt_load_min_is_expired(struct host1x_syncpt *sp, u32 thresh)
* Main entrypoint for syncpoint value waits.
*/
int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
- u32 *value)
+ u32 *value)
{
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
void *ref;
@@ -201,6 +204,7 @@ int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
if (host1x_syncpt_is_expired(sp, thresh)) {
if (value)
*value = host1x_syncpt_load(sp);
+
return 0;
}
@@ -209,6 +213,7 @@ int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
if (host1x_syncpt_is_expired(sp, thresh)) {
if (value)
*value = val;
+
goto done;
}
@@ -239,32 +244,42 @@ int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
/* wait for the syncpoint, or timeout, or signal */
while (timeout) {
long check = min_t(long, SYNCPT_CHECK_PERIOD, timeout);
- int remain = wait_event_interruptible_timeout(wq,
+ int remain;
+
+ remain = wait_event_interruptible_timeout(wq,
syncpt_load_min_is_expired(sp, thresh),
check);
if (remain > 0 || host1x_syncpt_is_expired(sp, thresh)) {
if (value)
*value = host1x_syncpt_load(sp);
+
err = 0;
+
break;
}
+
if (remain < 0) {
err = remain;
break;
}
+
timeout -= check;
+
if (timeout && check_count <= MAX_STUCK_CHECK_COUNT) {
dev_warn(sp->host->dev,
- "%s: syncpoint id %d (%s) stuck waiting %d, timeout=%ld\n",
+ "%s: syncpoint id %u (%s) stuck waiting %d, timeout=%ld\n",
current->comm, sp->id, sp->name,
thresh, timeout);
host1x_debug_dump_syncpts(sp->host);
+
if (check_count == MAX_STUCK_CHECK_COUNT)
host1x_debug_dump(sp->host);
+
check_count++;
}
}
+
host1x_intr_put_ref(sp->host, sp->id, ref);
done:
@@ -279,7 +294,9 @@ bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh)
{
u32 current_val;
u32 future_val;
+
smp_rmb();
+
current_val = (u32)atomic_read(&sp->min_val);
future_val = (u32)atomic_read(&sp->max_val);
@@ -341,14 +358,14 @@ int host1x_syncpt_init(struct host1x *host)
{
struct host1x_syncpt_base *bases;
struct host1x_syncpt *syncpt;
- int i;
+ unsigned int i;
- syncpt = devm_kzalloc(host->dev, sizeof(*syncpt) * host->info->nb_pts,
+ syncpt = devm_kcalloc(host->dev, host->info->nb_pts, sizeof(*syncpt),
GFP_KERNEL);
if (!syncpt)
return -ENOMEM;
- bases = devm_kzalloc(host->dev, sizeof(*bases) * host->info->nb_bases,
+ bases = devm_kcalloc(host->dev, host->info->nb_bases, sizeof(*bases),
GFP_KERNEL);
if (!bases)
return -ENOMEM;
@@ -378,6 +395,7 @@ struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
unsigned long flags)
{
struct host1x *host = dev_get_drvdata(dev->parent);
+
return host1x_syncpt_alloc(host, dev, flags);
}
EXPORT_SYMBOL(host1x_syncpt_request);
@@ -398,8 +416,9 @@ EXPORT_SYMBOL(host1x_syncpt_free);
void host1x_syncpt_deinit(struct host1x *host)
{
- int i;
struct host1x_syncpt *sp = host->syncpt;
+ unsigned int i;
+
for (i = 0; i < host->info->nb_pts; i++, sp++)
kfree(sp->name);
}
@@ -407,10 +426,11 @@ void host1x_syncpt_deinit(struct host1x *host)
/*
* Read max. It indicates how many operations there are in queue, either in
* channel or in a software thread.
- * */
+ */
u32 host1x_syncpt_read_max(struct host1x_syncpt *sp)
{
smp_rmb();
+
return (u32)atomic_read(&sp->max_val);
}
EXPORT_SYMBOL(host1x_syncpt_read_max);
@@ -421,6 +441,7 @@ EXPORT_SYMBOL(host1x_syncpt_read_max);
u32 host1x_syncpt_read_min(struct host1x_syncpt *sp)
{
smp_rmb();
+
return (u32)atomic_read(&sp->min_val);
}
EXPORT_SYMBOL(host1x_syncpt_read_min);
@@ -431,25 +452,26 @@ u32 host1x_syncpt_read(struct host1x_syncpt *sp)
}
EXPORT_SYMBOL(host1x_syncpt_read);
-int host1x_syncpt_nb_pts(struct host1x *host)
+unsigned int host1x_syncpt_nb_pts(struct host1x *host)
{
return host->info->nb_pts;
}
-int host1x_syncpt_nb_bases(struct host1x *host)
+unsigned int host1x_syncpt_nb_bases(struct host1x *host)
{
return host->info->nb_bases;
}
-int host1x_syncpt_nb_mlocks(struct host1x *host)
+unsigned int host1x_syncpt_nb_mlocks(struct host1x *host)
{
return host->info->nb_mlocks;
}
-struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id)
+struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, unsigned int id)
{
if (host->info->nb_pts < id)
return NULL;
+
return host->syncpt + id;
}
EXPORT_SYMBOL(host1x_syncpt_get);
diff --git a/drivers/gpu/host1x/syncpt.h b/drivers/gpu/host1x/syncpt.h
index 9056465ec..f71920510 100644
--- a/drivers/gpu/host1x/syncpt.h
+++ b/drivers/gpu/host1x/syncpt.h
@@ -37,7 +37,7 @@ struct host1x_syncpt_base {
};
struct host1x_syncpt {
- int id;
+ unsigned int id;
atomic_t min_val;
atomic_t max_val;
u32 base_val;
@@ -58,13 +58,13 @@ int host1x_syncpt_init(struct host1x *host);
void host1x_syncpt_deinit(struct host1x *host);
/* Return number of sync point supported. */
-int host1x_syncpt_nb_pts(struct host1x *host);
+unsigned int host1x_syncpt_nb_pts(struct host1x *host);
/* Return number of wait bases supported. */
-int host1x_syncpt_nb_bases(struct host1x *host);
+unsigned int host1x_syncpt_nb_bases(struct host1x *host);
/* Return number of mlocks supported. */
-int host1x_syncpt_nb_mlocks(struct host1x *host);
+unsigned int host1x_syncpt_nb_mlocks(struct host1x *host);
/*
* Check sync point sanity. If max is larger than min, there have too many
diff --git a/drivers/gpu/ipu-v3/ipu-dc.c b/drivers/gpu/ipu-v3/ipu-dc.c
index 2f29780e7..659475c1e 100644
--- a/drivers/gpu/ipu-v3/ipu-dc.c
+++ b/drivers/gpu/ipu-v3/ipu-dc.c
@@ -150,6 +150,9 @@ static void dc_write_tmpl(struct ipu_dc *dc, int word, u32 opcode, u32 operand,
static int ipu_bus_format_to_map(u32 fmt)
{
switch (fmt) {
+ default:
+ WARN_ON(1);
+ /* fall-through */
case MEDIA_BUS_FMT_RGB888_1X24:
return IPU_DC_MAP_RGB24;
case MEDIA_BUS_FMT_RGB565_1X16:
@@ -162,8 +165,6 @@ static int ipu_bus_format_to_map(u32 fmt)
return IPU_DC_MAP_LVDS666;
case MEDIA_BUS_FMT_BGR888_1X24:
return IPU_DC_MAP_BGR24;
- default:
- return -EINVAL;
}
}
@@ -178,10 +179,6 @@ int ipu_dc_init_sync(struct ipu_dc *dc, struct ipu_di *di, bool interlaced,
dc->di = ipu_di_get_num(di);
map = ipu_bus_format_to_map(bus_format);
- if (map < 0) {
- dev_dbg(priv->dev, "IPU_DISP: No MAP\n");
- return map;
- }
/*
* In interlaced mode we need more counters to create the asymmetric
diff --git a/drivers/gpu/ipu-v3/ipu-di.c b/drivers/gpu/ipu-v3/ipu-di.c
index 359268e3a..a8d87ddd8 100644
--- a/drivers/gpu/ipu-v3/ipu-di.c
+++ b/drivers/gpu/ipu-v3/ipu-di.c
@@ -572,9 +572,6 @@ int ipu_di_init_sync_panel(struct ipu_di *di, struct ipu_di_signal_cfg *sig)
dev_dbg(di->ipu->dev, "disp %d: panel size = %d x %d\n",
di->id, sig->mode.hactive, sig->mode.vactive);
- if ((sig->mode.vsync_len == 0) || (sig->mode.hsync_len == 0))
- return -EINVAL;
-
dev_dbg(di->ipu->dev, "Clocks: IPU %luHz DI %luHz Needed %luHz\n",
clk_get_rate(di->clk_ipu),
clk_get_rate(di->clk_di),
diff --git a/drivers/gpu/ipu-v3/ipu-dmfc.c b/drivers/gpu/ipu-v3/ipu-dmfc.c
index 837b1ec22..42705bb5a 100644
--- a/drivers/gpu/ipu-v3/ipu-dmfc.c
+++ b/drivers/gpu/ipu-v3/ipu-dmfc.c
@@ -45,17 +45,6 @@
#define DMFC_DP_CHAN_6B_24 16
#define DMFC_DP_CHAN_6F_29 24
-#define DMFC_FIFO_SIZE_64 (3 << 3)
-#define DMFC_FIFO_SIZE_128 (2 << 3)
-#define DMFC_FIFO_SIZE_256 (1 << 3)
-#define DMFC_FIFO_SIZE_512 (0 << 3)
-
-#define DMFC_SEGMENT(x) ((x & 0x7) << 0)
-#define DMFC_BURSTSIZE_128 (0 << 6)
-#define DMFC_BURSTSIZE_64 (1 << 6)
-#define DMFC_BURSTSIZE_32 (2 << 6)
-#define DMFC_BURSTSIZE_16 (3 << 6)
-
struct dmfc_channel_data {
int ipu_channel;
unsigned long channel_reg;
@@ -104,9 +93,6 @@ struct ipu_dmfc_priv;
struct dmfc_channel {
unsigned slots;
- unsigned slotmask;
- unsigned segment;
- int burstsize;
struct ipu_soc *ipu;
struct ipu_dmfc_priv *priv;
const struct dmfc_channel_data *data;
@@ -117,7 +103,6 @@ struct ipu_dmfc_priv {
struct device *dev;
struct dmfc_channel channels[DMFC_NUM_CHANNELS];
struct mutex mutex;
- unsigned long bandwidth_per_slot;
void __iomem *base;
int use_count;
};
@@ -172,184 +157,6 @@ void ipu_dmfc_disable_channel(struct dmfc_channel *dmfc)
}
EXPORT_SYMBOL_GPL(ipu_dmfc_disable_channel);
-static int ipu_dmfc_setup_channel(struct dmfc_channel *dmfc, int slots,
- int segment, int burstsize)
-{
- struct ipu_dmfc_priv *priv = dmfc->priv;
- u32 val, field;
-
- dev_dbg(priv->dev,
- "dmfc: using %d slots starting from segment %d for IPU channel %d\n",
- slots, segment, dmfc->data->ipu_channel);
-
- switch (slots) {
- case 1:
- field = DMFC_FIFO_SIZE_64;
- break;
- case 2:
- field = DMFC_FIFO_SIZE_128;
- break;
- case 4:
- field = DMFC_FIFO_SIZE_256;
- break;
- case 8:
- field = DMFC_FIFO_SIZE_512;
- break;
- default:
- return -EINVAL;
- }
-
- switch (burstsize) {
- case 16:
- field |= DMFC_BURSTSIZE_16;
- break;
- case 32:
- field |= DMFC_BURSTSIZE_32;
- break;
- case 64:
- field |= DMFC_BURSTSIZE_64;
- break;
- case 128:
- field |= DMFC_BURSTSIZE_128;
- break;
- }
-
- field |= DMFC_SEGMENT(segment);
-
- val = readl(priv->base + dmfc->data->channel_reg);
-
- val &= ~(0xff << dmfc->data->shift);
- val |= field << dmfc->data->shift;
-
- writel(val, priv->base + dmfc->data->channel_reg);
-
- dmfc->slots = slots;
- dmfc->segment = segment;
- dmfc->burstsize = burstsize;
- dmfc->slotmask = ((1 << slots) - 1) << segment;
-
- return 0;
-}
-
-static int dmfc_bandwidth_to_slots(struct ipu_dmfc_priv *priv,
- unsigned long bandwidth)
-{
- int slots = 1;
-
- while (slots * priv->bandwidth_per_slot < bandwidth)
- slots *= 2;
-
- return slots;
-}
-
-static int dmfc_find_slots(struct ipu_dmfc_priv *priv, int slots)
-{
- unsigned slotmask_need, slotmask_used = 0;
- int i, segment = 0;
-
- slotmask_need = (1 << slots) - 1;
-
- for (i = 0; i < DMFC_NUM_CHANNELS; i++)
- slotmask_used |= priv->channels[i].slotmask;
-
- while (slotmask_need <= 0xff) {
- if (!(slotmask_used & slotmask_need))
- return segment;
-
- slotmask_need <<= 1;
- segment++;
- }
-
- return -EBUSY;
-}
-
-void ipu_dmfc_free_bandwidth(struct dmfc_channel *dmfc)
-{
- struct ipu_dmfc_priv *priv = dmfc->priv;
- int i;
-
- dev_dbg(priv->dev, "dmfc: freeing %d slots starting from segment %d\n",
- dmfc->slots, dmfc->segment);
-
- mutex_lock(&priv->mutex);
-
- if (!dmfc->slots)
- goto out;
-
- dmfc->slotmask = 0;
- dmfc->slots = 0;
- dmfc->segment = 0;
-
- for (i = 0; i < DMFC_NUM_CHANNELS; i++)
- priv->channels[i].slotmask = 0;
-
- for (i = 0; i < DMFC_NUM_CHANNELS; i++) {
- if (priv->channels[i].slots > 0) {
- priv->channels[i].segment =
- dmfc_find_slots(priv, priv->channels[i].slots);
- priv->channels[i].slotmask =
- ((1 << priv->channels[i].slots) - 1) <<
- priv->channels[i].segment;
- }
- }
-
- for (i = 0; i < DMFC_NUM_CHANNELS; i++) {
- if (priv->channels[i].slots > 0)
- ipu_dmfc_setup_channel(&priv->channels[i],
- priv->channels[i].slots,
- priv->channels[i].segment,
- priv->channels[i].burstsize);
- }
-out:
- mutex_unlock(&priv->mutex);
-}
-EXPORT_SYMBOL_GPL(ipu_dmfc_free_bandwidth);
-
-int ipu_dmfc_alloc_bandwidth(struct dmfc_channel *dmfc,
- unsigned long bandwidth_pixel_per_second, int burstsize)
-{
- struct ipu_dmfc_priv *priv = dmfc->priv;
- int slots = dmfc_bandwidth_to_slots(priv, bandwidth_pixel_per_second);
- int segment = -1, ret = 0;
-
- dev_dbg(priv->dev, "dmfc: trying to allocate %ldMpixel/s for IPU channel %d\n",
- bandwidth_pixel_per_second / 1000000,
- dmfc->data->ipu_channel);
-
- ipu_dmfc_free_bandwidth(dmfc);
-
- mutex_lock(&priv->mutex);
-
- if (slots > 8) {
- ret = -EBUSY;
- goto out;
- }
-
- /* For the MEM_BG channel, first try to allocate twice the slots */
- if (dmfc->data->ipu_channel == IPUV3_CHANNEL_MEM_BG_SYNC)
- segment = dmfc_find_slots(priv, slots * 2);
- else if (slots < 2)
- /* Always allocate at least 128*4 bytes (2 slots) */
- slots = 2;
-
- if (segment >= 0)
- slots *= 2;
- else
- segment = dmfc_find_slots(priv, slots);
- if (segment < 0) {
- ret = -EBUSY;
- goto out;
- }
-
- ipu_dmfc_setup_channel(dmfc, slots, segment, burstsize);
-
-out:
- mutex_unlock(&priv->mutex);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(ipu_dmfc_alloc_bandwidth);
-
void ipu_dmfc_config_wait4eot(struct dmfc_channel *dmfc, int width)
{
struct ipu_dmfc_priv *priv = dmfc->priv;
@@ -384,7 +191,6 @@ EXPORT_SYMBOL_GPL(ipu_dmfc_get);
void ipu_dmfc_put(struct dmfc_channel *dmfc)
{
- ipu_dmfc_free_bandwidth(dmfc);
}
EXPORT_SYMBOL_GPL(ipu_dmfc_put);
@@ -412,20 +218,15 @@ int ipu_dmfc_init(struct ipu_soc *ipu, struct device *dev, unsigned long base,
priv->channels[i].priv = priv;
priv->channels[i].ipu = ipu;
priv->channels[i].data = &dmfcdata[i];
- }
-
- writel(0x0, priv->base + DMFC_WR_CHAN);
- writel(0x0, priv->base + DMFC_DP_CHAN);
- /*
- * We have a total bandwidth of clkrate * 4pixel divided
- * into 8 slots.
- */
- priv->bandwidth_per_slot = clk_get_rate(ipu_clk) * 4 / 8;
-
- dev_dbg(dev, "dmfc: 8 slots with %ldMpixel/s bandwidth each\n",
- priv->bandwidth_per_slot / 1000000);
+ if (dmfcdata[i].ipu_channel == IPUV3_CHANNEL_MEM_BG_SYNC ||
+ dmfcdata[i].ipu_channel == IPUV3_CHANNEL_MEM_FG_SYNC ||
+ dmfcdata[i].ipu_channel == IPUV3_CHANNEL_MEM_DC_SYNC)
+ priv->channels[i].slots = 2;
+ }
+ writel(0x00000050, priv->base + DMFC_WR_CHAN);
+ writel(0x00005654, priv->base + DMFC_DP_CHAN);
writel(0x202020f6, priv->base + DMFC_WR_CHAN_DEF);
writel(0x2020f6f6, priv->base + DMFC_DP_CHAN_DEF);
writel(0x00000003, priv->base + DMFC_GENERAL1);
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index cbd7c986d..5f962bfcb 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -30,6 +30,7 @@
#define pr_fmt(fmt) "vga_switcheroo: " fmt
+#include <linux/apple-gmux.h>
#include <linux/console.h>
#include <linux/debugfs.h>
#include <linux/fb.h>
@@ -51,9 +52,9 @@
*
* * muxed: Dual GPUs with a multiplexer chip to switch outputs between GPUs.
* * muxless: Dual GPUs but only one of them is connected to outputs.
- * The other one is merely used to offload rendering, its results
- * are copied over PCIe into the framebuffer. On Linux this is
- * supported with DRI PRIME.
+ * The other one is merely used to offload rendering, its results
+ * are copied over PCIe into the framebuffer. On Linux this is
+ * supported with DRI PRIME.
*
* Hybrid graphics started to appear in the late Naughties and were initially
* all muxed. Newer laptops moved to a muxless architecture for cost reasons.
@@ -308,7 +309,8 @@ static int register_client(struct pci_dev *pdev,
*
* Register vga client (GPU). Enable vga_switcheroo if another GPU and a
* handler have already registered. The power state of the client is assumed
- * to be ON.
+ * to be ON. Beforehand, vga_switcheroo_client_probe_defer() shall be called
+ * to ensure that all prerequisites are met.
*
* Return: 0 on success, -ENOMEM on memory allocation error.
*/
@@ -329,7 +331,8 @@ EXPORT_SYMBOL(vga_switcheroo_register_client);
* @id: client identifier
*
* Register audio client (audio device on a GPU). The power state of the
- * client is assumed to be ON.
+ * client is assumed to be ON. Beforehand, vga_switcheroo_client_probe_defer()
+ * shall be called to ensure that all prerequisites are met.
*
* Return: 0 on success, -ENOMEM on memory allocation error.
*/
@@ -376,6 +379,33 @@ find_active_client(struct list_head *head)
}
/**
+ * vga_switcheroo_client_probe_defer() - whether to defer probing a given client
+ * @pdev: client pci device
+ *
+ * Determine whether any prerequisites are not fulfilled to probe a given
+ * client. Drivers shall invoke this early on in their ->probe callback
+ * and return %-EPROBE_DEFER if it evaluates to %true. Thou shalt not
+ * register the client ere thou hast called this.
+ *
+ * Return: %true if probing should be deferred, otherwise %false.
+ */
+bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev)
+{
+ if ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
+ /*
+ * apple-gmux is needed on pre-retina MacBook Pro
+ * to probe the panel if pdev is the inactive GPU.
+ */
+ if (apple_gmux_present() && pdev != vga_default_device() &&
+ !vgasr_priv.handler_flags)
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL(vga_switcheroo_client_probe_defer);
+
+/**
* vga_switcheroo_get_client_state() - obtain power state of a given client
* @pdev: client pci device
*
@@ -530,21 +560,21 @@ EXPORT_SYMBOL(vga_switcheroo_unlock_ddc);
* * OFF: Power off the device not in use.
* * ON: Power on the device not in use.
* * IGD: Switch to the integrated graphics device.
- * Power on the integrated GPU if necessary, power off the discrete GPU.
- * Prerequisite is that no user space processes (e.g. Xorg, alsactl)
- * have opened device files of the GPUs or the audio client. If the
- * switch fails, the user may invoke lsof(8) or fuser(1) on /dev/dri/
- * and /dev/snd/controlC1 to identify processes blocking the switch.
+ * Power on the integrated GPU if necessary, power off the discrete GPU.
+ * Prerequisite is that no user space processes (e.g. Xorg, alsactl)
+ * have opened device files of the GPUs or the audio client. If the
+ * switch fails, the user may invoke lsof(8) or fuser(1) on /dev/dri/
+ * and /dev/snd/controlC1 to identify processes blocking the switch.
* * DIS: Switch to the discrete graphics device.
* * DIGD: Delayed switch to the integrated graphics device.
- * This will perform the switch once the last user space process has
- * closed the device files of the GPUs and the audio client.
+ * This will perform the switch once the last user space process has
+ * closed the device files of the GPUs and the audio client.
* * DDIS: Delayed switch to the discrete graphics device.
* * MIGD: Mux-only switch to the integrated graphics device.
- * Does not remap console or change the power state of either gpu.
- * If the integrated GPU is currently off, the screen will turn black.
- * If it is on, the screen will show whatever happens to be in VRAM.
- * Either way, the user has to blindly enter the command to switch back.
+ * Does not remap console or change the power state of either gpu.
+ * If the integrated GPU is currently off, the screen will turn black.
+ * If it is on, the screen will show whatever happens to be in VRAM.
+ * Either way, the user has to blindly enter the command to switch back.
* * MDIS: Mux-only switch to the discrete graphics device.
*
* For GPUs whose power state is controlled by the driver's runtime pm,
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 5646ca4b9..78ac4811b 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -388,6 +388,21 @@ config HID_LCPOWER
---help---
Support for LC-Power RC1000MCE RF remote control.
+config HID_LED
+ tristate "Simple RGB LED support"
+ depends on HID
+ depends on LEDS_CLASS
+ ---help---
+ Support for simple RGB LED devices. Currently supported are:
+ - Riso Kagaku Webmail Notifier
+ - Dream Cheeky Webmail Notifier and Friends Alert
+ - ThingM blink(1)
+ - Delcom Visual Signal Indicator Generation 2
+ - Greynut Luxafor
+
+ To compile this driver as a module, choose M here: the
+ module will be called hid-led.
+
config HID_LENOVO
tristate "Lenovo / Thinkpad devices"
depends on HID
@@ -819,11 +834,11 @@ config HID_THINGM
tristate "ThingM blink(1) USB RGB LED"
depends on HID
depends on LEDS_CLASS
+ select HID_LED
---help---
- Support for the ThingM blink(1) USB RGB LED. This driver registers a
- Linux LED class instance, plus additional sysfs attributes to control
- RGB colors, fade time and playing. The device is exposed through hidraw
- to access other functions.
+ Support for the ThingM blink(1) USB RGB LED. This driver has been
+ merged into the generic hid led driver. Config symbol HID_THINGM
+ just selects HID_LED and will be removed soon.
config HID_THRUSTMASTER
tristate "ThrustMaster devices support"
@@ -936,6 +951,14 @@ config HID_SENSOR_CUSTOM_SENSOR
standard sensors.
Select this config option for custom/generic sensor support.
+config HID_ALPS
+ tristate "Alps HID device support"
+ depends on HID
+ ---help---
+ Support for Alps I2C HID touchpads and StickPointer.
+ Say Y here if you have a Alps touchpads over i2c-hid or usbhid
+ and want support for its special functionalities.
+
endmenu
endif # HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index a2fb562de..fc4b2aa47 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -21,6 +21,7 @@ hid-wiimote-y := hid-wiimote-core.o hid-wiimote-modules.o
hid-wiimote-$(CONFIG_DEBUG_FS) += hid-wiimote-debug.o
obj-$(CONFIG_HID_A4TECH) += hid-a4tech.o
+obj-$(CONFIG_HID_ALPS) += hid-alps.o
obj-$(CONFIG_HID_ACRUX) += hid-axff.o
obj-$(CONFIG_HID_APPLE) += hid-apple.o
obj-$(CONFIG_HID_APPLEIR) += hid-appleir.o
@@ -90,12 +91,12 @@ obj-$(CONFIG_HID_SPEEDLINK) += hid-speedlink.o
obj-$(CONFIG_HID_STEELSERIES) += hid-steelseries.o
obj-$(CONFIG_HID_SUNPLUS) += hid-sunplus.o
obj-$(CONFIG_HID_GREENASIA) += hid-gaff.o
-obj-$(CONFIG_HID_THINGM) += hid-thingm.o
obj-$(CONFIG_HID_THRUSTMASTER) += hid-tmff.o
obj-$(CONFIG_HID_TIVO) += hid-tivo.o
obj-$(CONFIG_HID_TOPSEED) += hid-topseed.o
obj-$(CONFIG_HID_TWINHAN) += hid-twinhan.o
obj-$(CONFIG_HID_UCLOGIC) += hid-uclogic.o
+obj-$(CONFIG_HID_LED) += hid-led.o
obj-$(CONFIG_HID_XINMO) += hid-xinmo.o
obj-$(CONFIG_HID_ZEROPLUS) += hid-zpff.o
obj-$(CONFIG_HID_ZYDACRON) += hid-zydacron.o
diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c
new file mode 100644
index 000000000..048befde2
--- /dev/null
+++ b/drivers/hid/hid-alps.c
@@ -0,0 +1,506 @@
+/*
+ * Copyright (c) 2016 Masaki Ota <masaki.ota@jp.alps.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/hid.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/module.h>
+#include <asm/unaligned.h>
+#include "hid-ids.h"
+
+/* ALPS Device Product ID */
+#define HID_PRODUCT_ID_T3_BTNLESS 0xD0C0
+#define HID_PRODUCT_ID_COSMO 0x1202
+#define HID_PRODUCT_ID_U1_PTP_1 0x1207
+#define HID_PRODUCT_ID_U1 0x1209
+#define HID_PRODUCT_ID_U1_PTP_2 0x120A
+#define HID_PRODUCT_ID_U1_DUAL 0x120B
+#define HID_PRODUCT_ID_T4_BTNLESS 0x120C
+
+#define DEV_SINGLEPOINT 0x01
+#define DEV_DUALPOINT 0x02
+
+#define U1_MOUSE_REPORT_ID 0x01 /* Mouse data ReportID */
+#define U1_ABSOLUTE_REPORT_ID 0x03 /* Absolute data ReportID */
+#define U1_FEATURE_REPORT_ID 0x05 /* Feature ReportID */
+#define U1_SP_ABSOLUTE_REPORT_ID 0x06 /* Feature ReportID */
+
+#define U1_FEATURE_REPORT_LEN 0x08 /* Feature Report Length */
+#define U1_FEATURE_REPORT_LEN_ALL 0x0A
+#define U1_CMD_REGISTER_READ 0xD1
+#define U1_CMD_REGISTER_WRITE 0xD2
+
+#define U1_DEVTYPE_SP_SUPPORT 0x10 /* SP Support */
+#define U1_DISABLE_DEV 0x01
+#define U1_TP_ABS_MODE 0x02
+#define U1_SP_ABS_MODE 0x80
+
+#define ADDRESS_U1_DEV_CTRL_1 0x00800040
+#define ADDRESS_U1_DEVICE_TYP 0x00800043
+#define ADDRESS_U1_NUM_SENS_X 0x00800047
+#define ADDRESS_U1_NUM_SENS_Y 0x00800048
+#define ADDRESS_U1_PITCH_SENS_X 0x00800049
+#define ADDRESS_U1_PITCH_SENS_Y 0x0080004A
+#define ADDRESS_U1_RESO_DWN_ABS 0x0080004E
+#define ADDRESS_U1_PAD_BTN 0x00800052
+#define ADDRESS_U1_SP_BTN 0x0080009F
+
+#define MAX_TOUCHES 5
+
+/**
+ * struct u1_data
+ *
+ * @input: pointer to the kernel input device
+ * @input2: pointer to the kernel input2 device
+ * @hdev: pointer to the struct hid_device
+ *
+ * @dev_ctrl: device control parameter
+ * @dev_type: device type
+ * @sen_line_num_x: number of sensor line of X
+ * @sen_line_num_y: number of sensor line of Y
+ * @pitch_x: sensor pitch of X
+ * @pitch_y: sensor pitch of Y
+ * @resolution: resolution
+ * @btn_info: button information
+ * @x_active_len_mm: active area length of X (mm)
+ * @y_active_len_mm: active area length of Y (mm)
+ * @x_max: maximum x coordinate value
+ * @y_max: maximum y coordinate value
+ * @btn_cnt: number of buttons
+ * @sp_btn_cnt: number of stick buttons
+ */
+struct u1_dev {
+ struct input_dev *input;
+ struct input_dev *input2;
+ struct hid_device *hdev;
+
+ u8 dev_ctrl;
+ u8 dev_type;
+ u8 sen_line_num_x;
+ u8 sen_line_num_y;
+ u8 pitch_x;
+ u8 pitch_y;
+ u8 resolution;
+ u8 btn_info;
+ u8 sp_btn_info;
+ u32 x_active_len_mm;
+ u32 y_active_len_mm;
+ u32 x_max;
+ u32 y_max;
+ u32 btn_cnt;
+ u32 sp_btn_cnt;
+};
+
+static int u1_read_write_register(struct hid_device *hdev, u32 address,
+ u8 *read_val, u8 write_val, bool read_flag)
+{
+ int ret, i;
+ u8 check_sum;
+ u8 *input;
+ u8 *readbuf;
+
+ input = kzalloc(U1_FEATURE_REPORT_LEN, GFP_KERNEL);
+ if (!input)
+ return -ENOMEM;
+
+ input[0] = U1_FEATURE_REPORT_ID;
+ if (read_flag) {
+ input[1] = U1_CMD_REGISTER_READ;
+ input[6] = 0x00;
+ } else {
+ input[1] = U1_CMD_REGISTER_WRITE;
+ input[6] = write_val;
+ }
+
+ put_unaligned_le32(address, input + 2);
+
+ /* Calculate the checksum */
+ check_sum = U1_FEATURE_REPORT_LEN_ALL;
+ for (i = 0; i < U1_FEATURE_REPORT_LEN - 1; i++)
+ check_sum += input[i];
+
+ input[7] = check_sum;
+ ret = hid_hw_raw_request(hdev, U1_FEATURE_REPORT_ID, input,
+ U1_FEATURE_REPORT_LEN,
+ HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+
+ if (ret < 0) {
+ dev_err(&hdev->dev, "failed to read command (%d)\n", ret);
+ goto exit;
+ }
+
+ if (read_flag) {
+ readbuf = kzalloc(U1_FEATURE_REPORT_LEN, GFP_KERNEL);
+ if (!readbuf) {
+ kfree(input);
+ return -ENOMEM;
+ }
+
+ ret = hid_hw_raw_request(hdev, U1_FEATURE_REPORT_ID, readbuf,
+ U1_FEATURE_REPORT_LEN,
+ HID_FEATURE_REPORT, HID_REQ_GET_REPORT);
+
+ if (ret < 0) {
+ dev_err(&hdev->dev, "failed read register (%d)\n", ret);
+ goto exit;
+ }
+
+ *read_val = readbuf[6];
+
+ kfree(readbuf);
+ }
+
+ ret = 0;
+
+exit:
+ kfree(input);
+ return ret;
+}
+
+static int alps_raw_event(struct hid_device *hdev,
+ struct hid_report *report, u8 *data, int size)
+{
+ unsigned int x, y, z;
+ int i;
+ short sp_x, sp_y;
+ struct u1_dev *hdata = hid_get_drvdata(hdev);
+
+ switch (data[0]) {
+ case U1_MOUSE_REPORT_ID:
+ break;
+ case U1_FEATURE_REPORT_ID:
+ break;
+ case U1_ABSOLUTE_REPORT_ID:
+ for (i = 0; i < MAX_TOUCHES; i++) {
+ u8 *contact = &data[i * 5];
+
+ x = get_unaligned_le16(contact + 3);
+ y = get_unaligned_le16(contact + 5);
+ z = contact[7] & 0x7F;
+
+ input_mt_slot(hdata->input, i);
+
+ if (z != 0) {
+ input_mt_report_slot_state(hdata->input,
+ MT_TOOL_FINGER, 1);
+ } else {
+ input_mt_report_slot_state(hdata->input,
+ MT_TOOL_FINGER, 0);
+ break;
+ }
+
+ input_report_abs(hdata->input, ABS_MT_POSITION_X, x);
+ input_report_abs(hdata->input, ABS_MT_POSITION_Y, y);
+ input_report_abs(hdata->input, ABS_MT_PRESSURE, z);
+
+ }
+
+ input_mt_sync_frame(hdata->input);
+
+ input_report_key(hdata->input, BTN_LEFT,
+ data[1] & 0x1);
+ input_report_key(hdata->input, BTN_RIGHT,
+ (data[1] & 0x2));
+ input_report_key(hdata->input, BTN_MIDDLE,
+ (data[1] & 0x4));
+
+ input_sync(hdata->input);
+
+ return 1;
+
+ case U1_SP_ABSOLUTE_REPORT_ID:
+ sp_x = get_unaligned_le16(data+2);
+ sp_y = get_unaligned_le16(data+4);
+
+ sp_x = sp_x / 8;
+ sp_y = sp_y / 8;
+
+ input_report_rel(hdata->input2, REL_X, sp_x);
+ input_report_rel(hdata->input2, REL_Y, sp_y);
+
+ input_report_key(hdata->input2, BTN_LEFT,
+ data[1] & 0x1);
+ input_report_key(hdata->input2, BTN_RIGHT,
+ (data[1] & 0x2));
+ input_report_key(hdata->input2, BTN_MIDDLE,
+ (data[1] & 0x4));
+
+ input_sync(hdata->input2);
+
+ return 1;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int alps_post_reset(struct hid_device *hdev)
+{
+ return u1_read_write_register(hdev, ADDRESS_U1_DEV_CTRL_1,
+ NULL, U1_TP_ABS_MODE, false);
+}
+
+static int alps_post_resume(struct hid_device *hdev)
+{
+ return u1_read_write_register(hdev, ADDRESS_U1_DEV_CTRL_1,
+ NULL, U1_TP_ABS_MODE, false);
+}
+#endif /* CONFIG_PM */
+
+static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
+{
+ struct u1_dev *data = hid_get_drvdata(hdev);
+ struct input_dev *input = hi->input, *input2;
+ struct u1_dev devInfo;
+ int ret;
+ int res_x, res_y, i;
+
+ data->input = input;
+
+ hid_dbg(hdev, "Opening low level driver\n");
+ ret = hid_hw_open(hdev);
+ if (ret)
+ return ret;
+
+ /* Allow incoming hid reports */
+ hid_device_io_start(hdev);
+
+ /* Device initialization */
+ ret = u1_read_write_register(hdev, ADDRESS_U1_DEV_CTRL_1,
+ &devInfo.dev_ctrl, 0, true);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "failed U1_DEV_CTRL_1 (%d)\n", ret);
+ goto exit;
+ }
+
+ devInfo.dev_ctrl &= ~U1_DISABLE_DEV;
+ devInfo.dev_ctrl |= U1_TP_ABS_MODE;
+ ret = u1_read_write_register(hdev, ADDRESS_U1_DEV_CTRL_1,
+ NULL, devInfo.dev_ctrl, false);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "failed to change TP mode (%d)\n", ret);
+ goto exit;
+ }
+
+ ret = u1_read_write_register(hdev, ADDRESS_U1_NUM_SENS_X,
+ &devInfo.sen_line_num_x, 0, true);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "failed U1_NUM_SENS_X (%d)\n", ret);
+ goto exit;
+ }
+
+ ret = u1_read_write_register(hdev, ADDRESS_U1_NUM_SENS_Y,
+ &devInfo.sen_line_num_y, 0, true);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "failed U1_NUM_SENS_Y (%d)\n", ret);
+ goto exit;
+ }
+
+ ret = u1_read_write_register(hdev, ADDRESS_U1_PITCH_SENS_X,
+ &devInfo.pitch_x, 0, true);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "failed U1_PITCH_SENS_X (%d)\n", ret);
+ goto exit;
+ }
+
+ ret = u1_read_write_register(hdev, ADDRESS_U1_PITCH_SENS_Y,
+ &devInfo.pitch_y, 0, true);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "failed U1_PITCH_SENS_Y (%d)\n", ret);
+ goto exit;
+ }
+
+ ret = u1_read_write_register(hdev, ADDRESS_U1_RESO_DWN_ABS,
+ &devInfo.resolution, 0, true);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "failed U1_RESO_DWN_ABS (%d)\n", ret);
+ goto exit;
+ }
+
+ ret = u1_read_write_register(hdev, ADDRESS_U1_PAD_BTN,
+ &devInfo.btn_info, 0, true);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "failed U1_PAD_BTN (%d)\n", ret);
+ goto exit;
+ }
+
+ /* Check StickPointer device */
+ ret = u1_read_write_register(hdev, ADDRESS_U1_DEVICE_TYP,
+ &devInfo.dev_type, 0, true);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "failed U1_DEVICE_TYP (%d)\n", ret);
+ goto exit;
+ }
+
+ devInfo.x_active_len_mm =
+ (devInfo.pitch_x * (devInfo.sen_line_num_x - 1)) / 10;
+ devInfo.y_active_len_mm =
+ (devInfo.pitch_y * (devInfo.sen_line_num_y - 1)) / 10;
+
+ devInfo.x_max =
+ (devInfo.resolution << 2) * (devInfo.sen_line_num_x - 1);
+ devInfo.y_max =
+ (devInfo.resolution << 2) * (devInfo.sen_line_num_y - 1);
+
+ __set_bit(EV_ABS, input->evbit);
+ input_set_abs_params(input, ABS_MT_POSITION_X, 1, devInfo.x_max, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y, 1, devInfo.y_max, 0, 0);
+
+ if (devInfo.x_active_len_mm && devInfo.y_active_len_mm) {
+ res_x = (devInfo.x_max - 1) / devInfo.x_active_len_mm;
+ res_y = (devInfo.y_max - 1) / devInfo.y_active_len_mm;
+
+ input_abs_set_res(input, ABS_MT_POSITION_X, res_x);
+ input_abs_set_res(input, ABS_MT_POSITION_Y, res_y);
+ }
+
+ input_set_abs_params(input, ABS_MT_PRESSURE, 0, 64, 0, 0);
+
+ input_mt_init_slots(input, MAX_TOUCHES, INPUT_MT_POINTER);
+
+ __set_bit(EV_KEY, input->evbit);
+ if ((devInfo.btn_info & 0x0F) == (devInfo.btn_info & 0xF0) >> 4) {
+ devInfo.btn_cnt = (devInfo.btn_info & 0x0F);
+ } else {
+ /* Button pad */
+ devInfo.btn_cnt = 1;
+ __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
+ }
+
+ for (i = 0; i < devInfo.btn_cnt; i++)
+ __set_bit(BTN_LEFT + i, input->keybit);
+
+
+ /* Stick device initialization */
+ if (devInfo.dev_type & U1_DEVTYPE_SP_SUPPORT) {
+
+ input2 = input_allocate_device();
+ if (!input2) {
+ input_free_device(input2);
+ goto exit;
+ }
+
+ data->input2 = input2;
+
+ devInfo.dev_ctrl |= U1_SP_ABS_MODE;
+ ret = u1_read_write_register(hdev, ADDRESS_U1_DEV_CTRL_1,
+ NULL, devInfo.dev_ctrl, false);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "failed SP mode (%d)\n", ret);
+ input_free_device(input2);
+ goto exit;
+ }
+
+ ret = u1_read_write_register(hdev, ADDRESS_U1_SP_BTN,
+ &devInfo.sp_btn_info, 0, true);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "failed U1_SP_BTN (%d)\n", ret);
+ input_free_device(input2);
+ goto exit;
+ }
+
+ input2->phys = input->phys;
+ input2->name = "DualPoint Stick";
+ input2->id.bustype = BUS_I2C;
+ input2->id.vendor = input->id.vendor;
+ input2->id.product = input->id.product;
+ input2->id.version = input->id.version;
+ input2->dev.parent = input->dev.parent;
+
+ __set_bit(EV_KEY, input2->evbit);
+ devInfo.sp_btn_cnt = (devInfo.sp_btn_info & 0x0F);
+ for (i = 0; i < devInfo.sp_btn_cnt; i++)
+ __set_bit(BTN_LEFT + i, input2->keybit);
+
+ __set_bit(EV_REL, input2->evbit);
+ __set_bit(REL_X, input2->relbit);
+ __set_bit(REL_Y, input2->relbit);
+ __set_bit(INPUT_PROP_POINTER, input2->propbit);
+ __set_bit(INPUT_PROP_POINTING_STICK, input2->propbit);
+
+ if (input_register_device(data->input2)) {
+ input_free_device(input2);
+ goto exit;
+ }
+ }
+
+exit:
+ hid_device_io_stop(hdev);
+ hid_hw_close(hdev);
+ return ret;
+}
+
+static int alps_input_mapping(struct hid_device *hdev,
+ struct hid_input *hi, struct hid_field *field,
+ struct hid_usage *usage, unsigned long **bit, int *max)
+{
+ return -1;
+}
+
+static int alps_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ struct u1_dev *data = NULL;
+ int ret;
+
+ data = devm_kzalloc(&hdev->dev, sizeof(struct u1_dev), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->hdev = hdev;
+ hid_set_drvdata(hdev, data);
+
+ hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS;
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "parse failed\n");
+ return ret;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (ret) {
+ hid_err(hdev, "hw start failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void alps_remove(struct hid_device *hdev)
+{
+ hid_hw_stop(hdev);
+}
+
+static const struct hid_device_id alps_id[] = {
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY,
+ USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, alps_id);
+
+static struct hid_driver alps_driver = {
+ .name = "hid-alps",
+ .id_table = alps_id,
+ .probe = alps_probe,
+ .remove = alps_remove,
+ .raw_event = alps_raw_event,
+ .input_mapping = alps_input_mapping,
+ .input_configured = alps_input_configured,
+#ifdef CONFIG_PM
+ .resume = alps_post_resume,
+ .reset_resume = alps_post_reset,
+#endif
+};
+
+module_hid_driver(alps_driver);
+
+MODULE_AUTHOR("Masaki Ota <masaki.ota@jp.alps.com>");
+MODULE_DESCRIPTION("ALPS HID driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 884d82f91..2e0460822 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -474,6 +474,8 @@ static const struct hid_device_id apple_devices[] = {
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI),
+ .driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO),
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 8ea3a2636..08f53c7fd 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1772,6 +1772,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0xf705) },
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) },
@@ -1851,6 +1852,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD) },
@@ -1877,8 +1879,11 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DELCOM, USB_DEVICE_ID_DELCOM_VISUAL_IND) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0030) },
@@ -1962,6 +1967,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_LUXAFOR) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_KEYBOARD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) },
@@ -2008,6 +2014,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL) },
#if IS_ENABLED(CONFIG_HID_ROCCAT)
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) },
@@ -2348,8 +2355,6 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x0004) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x000a) },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0401) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
@@ -2486,7 +2491,6 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DPAD) },
#endif
{ HID_USB_DEVICE(USB_VENDOR_ID_YEALINK, USB_DEVICE_ID_YEALINK_P1K_P4K_B2K) },
- { HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL) },
{ }
};
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 3eec09a13..4ed9a4fdf 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -70,6 +70,9 @@
#define USB_VENDOR_ID_ALPS 0x0433
#define USB_DEVICE_ID_IBM_GAMEPAD 0x1101
+#define USB_VENDOR_ID_ALPS_JP 0x044E
+#define HID_DEVICE_ID_ALPS_U1_DUAL 0x120B
+
#define USB_VENDOR_ID_ANTON 0x1130
#define USB_DEVICE_ID_ANTON_TOUCH_PAD 0x3101
@@ -142,6 +145,7 @@
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI 0x0255
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS 0x0257
+#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI 0x0267
#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
@@ -296,6 +300,9 @@
#define USB_VENDOR_ID_DEALEXTREAME 0x10c5
#define USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701 0x819a
+#define USB_VENDOR_ID_DELCOM 0x0fc5
+#define USB_DEVICE_ID_DELCOM_VISUAL_IND 0xb080
+
#define USB_VENDOR_ID_DELORME 0x1163
#define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100
#define USB_DEVICE_ID_DELORME_EM_LT20 0x0200
@@ -334,6 +341,8 @@
#define USB_DEVICE_ID_ELECOM_BM084 0x0061
#define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34
+#define USB_DEVICE_ID_DREAM_CHEEKY_WN 0x0004
+#define USB_DEVICE_ID_DREAM_CHEEKY_FA 0x000a
#define USB_VENDOR_ID_ELITEGROUP 0x03fc
#define USB_DEVICE_ID_ELITEGROUP_05D8 0x05d8
@@ -680,6 +689,7 @@
#define USB_DEVICE_ID_PICOLCD_BOOTLOADER 0xf002
#define USB_DEVICE_ID_PICK16F1454 0x0042
#define USB_DEVICE_ID_PICK16F1454_V2 0xf2f7
+#define USB_DEVICE_ID_LUXAFOR 0xf372
#define USB_VENDOR_ID_MICROSOFT 0x045e
#define USB_DEVICE_ID_SIDEWINDER_GV 0x003b
diff --git a/drivers/hid/hid-led.c b/drivers/hid/hid-led.c
new file mode 100644
index 000000000..d8d55f37b
--- /dev/null
+++ b/drivers/hid/hid-led.c
@@ -0,0 +1,523 @@
+/*
+ * Simple USB RGB LED driver
+ *
+ * Copyright 2016 Heiner Kallweit <hkallweit1@gmail.com>
+ * Based on drivers/hid/hid-thingm.c and
+ * drivers/usb/misc/usbled.c
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2.
+ */
+
+#include <linux/hid.h>
+#include <linux/hidraw.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+
+#include "hid-ids.h"
+
+enum hidled_report_type {
+ RAW_REQUEST,
+ OUTPUT_REPORT
+};
+
+enum hidled_type {
+ RISO_KAGAKU,
+ DREAM_CHEEKY,
+ THINGM,
+ DELCOM,
+ LUXAFOR,
+};
+
+static unsigned const char riso_kagaku_tbl[] = {
+/* R+2G+4B -> riso kagaku color index */
+ [0] = 0, /* black */
+ [1] = 2, /* red */
+ [2] = 1, /* green */
+ [3] = 5, /* yellow */
+ [4] = 3, /* blue */
+ [5] = 6, /* magenta */
+ [6] = 4, /* cyan */
+ [7] = 7 /* white */
+};
+
+#define RISO_KAGAKU_IX(r, g, b) riso_kagaku_tbl[((r)?1:0)+((g)?2:0)+((b)?4:0)]
+
+union delcom_packet {
+ __u8 data[8];
+ struct {
+ __u8 major_cmd;
+ __u8 minor_cmd;
+ __u8 data_lsb;
+ __u8 data_msb;
+ } tx;
+ struct {
+ __u8 cmd;
+ } rx;
+ struct {
+ __le16 family_code;
+ __le16 security_code;
+ __u8 fw_version;
+ } fw;
+};
+
+#define DELCOM_GREEN_LED 0
+#define DELCOM_RED_LED 1
+#define DELCOM_BLUE_LED 2
+
+struct hidled_device;
+struct hidled_rgb;
+
+struct hidled_config {
+ enum hidled_type type;
+ const char *name;
+ const char *short_name;
+ enum led_brightness max_brightness;
+ int num_leds;
+ size_t report_size;
+ enum hidled_report_type report_type;
+ int (*init)(struct hidled_device *ldev);
+ int (*write)(struct led_classdev *cdev, enum led_brightness br);
+};
+
+struct hidled_led {
+ struct led_classdev cdev;
+ struct hidled_rgb *rgb;
+ char name[32];
+};
+
+struct hidled_rgb {
+ struct hidled_device *ldev;
+ struct hidled_led red;
+ struct hidled_led green;
+ struct hidled_led blue;
+ u8 num;
+};
+
+struct hidled_device {
+ const struct hidled_config *config;
+ struct hid_device *hdev;
+ struct hidled_rgb *rgb;
+ struct mutex lock;
+};
+
+#define MAX_REPORT_SIZE 16
+
+#define to_hidled_led(arg) container_of(arg, struct hidled_led, cdev)
+
+static bool riso_kagaku_switch_green_blue;
+module_param(riso_kagaku_switch_green_blue, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(riso_kagaku_switch_green_blue,
+ "switch green and blue RGB component for Riso Kagaku devices");
+
+static int hidled_send(struct hidled_device *ldev, __u8 *buf)
+{
+ int ret;
+
+ mutex_lock(&ldev->lock);
+
+ if (ldev->config->report_type == RAW_REQUEST)
+ ret = hid_hw_raw_request(ldev->hdev, buf[0], buf,
+ ldev->config->report_size,
+ HID_FEATURE_REPORT,
+ HID_REQ_SET_REPORT);
+ else if (ldev->config->report_type == OUTPUT_REPORT)
+ ret = hid_hw_output_report(ldev->hdev, buf,
+ ldev->config->report_size);
+ else
+ ret = -EINVAL;
+
+ mutex_unlock(&ldev->lock);
+
+ if (ret < 0)
+ return ret;
+
+ return ret == ldev->config->report_size ? 0 : -EMSGSIZE;
+}
+
+/* reading data is supported for report type RAW_REQUEST only */
+static int hidled_recv(struct hidled_device *ldev, __u8 *buf)
+{
+ int ret;
+
+ if (ldev->config->report_type != RAW_REQUEST)
+ return -EINVAL;
+
+ mutex_lock(&ldev->lock);
+
+ ret = hid_hw_raw_request(ldev->hdev, buf[0], buf,
+ ldev->config->report_size,
+ HID_FEATURE_REPORT,
+ HID_REQ_SET_REPORT);
+ if (ret < 0)
+ goto err;
+
+ ret = hid_hw_raw_request(ldev->hdev, buf[0], buf,
+ ldev->config->report_size,
+ HID_FEATURE_REPORT,
+ HID_REQ_GET_REPORT);
+err:
+ mutex_unlock(&ldev->lock);
+
+ return ret < 0 ? ret : 0;
+}
+
+static u8 riso_kagaku_index(struct hidled_rgb *rgb)
+{
+ enum led_brightness r, g, b;
+
+ r = rgb->red.cdev.brightness;
+ g = rgb->green.cdev.brightness;
+ b = rgb->blue.cdev.brightness;
+
+ if (riso_kagaku_switch_green_blue)
+ return RISO_KAGAKU_IX(r, b, g);
+ else
+ return RISO_KAGAKU_IX(r, g, b);
+}
+
+static int riso_kagaku_write(struct led_classdev *cdev, enum led_brightness br)
+{
+ struct hidled_led *led = to_hidled_led(cdev);
+ struct hidled_rgb *rgb = led->rgb;
+ __u8 buf[MAX_REPORT_SIZE] = {};
+
+ buf[1] = riso_kagaku_index(rgb);
+
+ return hidled_send(rgb->ldev, buf);
+}
+
+static int dream_cheeky_write(struct led_classdev *cdev, enum led_brightness br)
+{
+ struct hidled_led *led = to_hidled_led(cdev);
+ struct hidled_rgb *rgb = led->rgb;
+ __u8 buf[MAX_REPORT_SIZE] = {};
+
+ buf[1] = rgb->red.cdev.brightness;
+ buf[2] = rgb->green.cdev.brightness;
+ buf[3] = rgb->blue.cdev.brightness;
+ buf[7] = 0x1a;
+ buf[8] = 0x05;
+
+ return hidled_send(rgb->ldev, buf);
+}
+
+static int dream_cheeky_init(struct hidled_device *ldev)
+{
+ __u8 buf[MAX_REPORT_SIZE] = {};
+
+ /* Dream Cheeky magic */
+ buf[1] = 0x1f;
+ buf[2] = 0x02;
+ buf[4] = 0x5f;
+ buf[7] = 0x1a;
+ buf[8] = 0x03;
+
+ return hidled_send(ldev, buf);
+}
+
+static int _thingm_write(struct led_classdev *cdev, enum led_brightness br,
+ u8 offset)
+{
+ struct hidled_led *led = to_hidled_led(cdev);
+ __u8 buf[MAX_REPORT_SIZE] = { 1, 'c' };
+
+ buf[2] = led->rgb->red.cdev.brightness;
+ buf[3] = led->rgb->green.cdev.brightness;
+ buf[4] = led->rgb->blue.cdev.brightness;
+ buf[7] = led->rgb->num + offset;
+
+ return hidled_send(led->rgb->ldev, buf);
+}
+
+static int thingm_write_v1(struct led_classdev *cdev, enum led_brightness br)
+{
+ return _thingm_write(cdev, br, 0);
+}
+
+static int thingm_write(struct led_classdev *cdev, enum led_brightness br)
+{
+ return _thingm_write(cdev, br, 1);
+}
+
+static const struct hidled_config hidled_config_thingm_v1 = {
+ .name = "ThingM blink(1) v1",
+ .short_name = "thingm",
+ .max_brightness = 255,
+ .num_leds = 1,
+ .report_size = 9,
+ .report_type = RAW_REQUEST,
+ .write = thingm_write_v1,
+};
+
+static int thingm_init(struct hidled_device *ldev)
+{
+ __u8 buf[MAX_REPORT_SIZE] = { 1, 'v' };
+ int ret;
+
+ ret = hidled_recv(ldev, buf);
+ if (ret)
+ return ret;
+
+ /* Check for firmware major version 1 */
+ if (buf[3] == '1')
+ ldev->config = &hidled_config_thingm_v1;
+
+ return 0;
+}
+
+static inline int delcom_get_lednum(const struct hidled_led *led)
+{
+ if (led == &led->rgb->red)
+ return DELCOM_RED_LED;
+ else if (led == &led->rgb->green)
+ return DELCOM_GREEN_LED;
+ else
+ return DELCOM_BLUE_LED;
+}
+
+static int delcom_enable_led(struct hidled_led *led)
+{
+ union delcom_packet dp = { .tx.major_cmd = 101, .tx.minor_cmd = 12 };
+
+ dp.tx.data_lsb = 1 << delcom_get_lednum(led);
+ dp.tx.data_msb = 0;
+
+ return hidled_send(led->rgb->ldev, dp.data);
+}
+
+static int delcom_set_pwm(struct hidled_led *led)
+{
+ union delcom_packet dp = { .tx.major_cmd = 101, .tx.minor_cmd = 34 };
+
+ dp.tx.data_lsb = delcom_get_lednum(led);
+ dp.tx.data_msb = led->cdev.brightness;
+
+ return hidled_send(led->rgb->ldev, dp.data);
+}
+
+static int delcom_write(struct led_classdev *cdev, enum led_brightness br)
+{
+ struct hidled_led *led = to_hidled_led(cdev);
+ int ret;
+
+ /*
+ * enable LED
+ * We can't do this in the init function already because the device
+ * is internally reset later.
+ */
+ ret = delcom_enable_led(led);
+ if (ret)
+ return ret;
+
+ return delcom_set_pwm(led);
+}
+
+static int delcom_init(struct hidled_device *ldev)
+{
+ union delcom_packet dp = { .rx.cmd = 104 };
+ int ret;
+
+ ret = hidled_recv(ldev, dp.data);
+ if (ret)
+ return ret;
+ /*
+ * Several Delcom devices share the same USB VID/PID
+ * Check for family id 2 for Visual Signal Indicator
+ */
+ return le16_to_cpu(dp.fw.family_code) == 2 ? 0 : -ENODEV;
+}
+
+static int luxafor_write(struct led_classdev *cdev, enum led_brightness br)
+{
+ struct hidled_led *led = to_hidled_led(cdev);
+ __u8 buf[MAX_REPORT_SIZE] = { [1] = 1 };
+
+ buf[2] = led->rgb->num + 1;
+ buf[3] = led->rgb->red.cdev.brightness;
+ buf[4] = led->rgb->green.cdev.brightness;
+ buf[5] = led->rgb->blue.cdev.brightness;
+
+ return hidled_send(led->rgb->ldev, buf);
+}
+
+static const struct hidled_config hidled_configs[] = {
+ {
+ .type = RISO_KAGAKU,
+ .name = "Riso Kagaku Webmail Notifier",
+ .short_name = "riso_kagaku",
+ .max_brightness = 1,
+ .num_leds = 1,
+ .report_size = 6,
+ .report_type = OUTPUT_REPORT,
+ .write = riso_kagaku_write,
+ },
+ {
+ .type = DREAM_CHEEKY,
+ .name = "Dream Cheeky Webmail Notifier",
+ .short_name = "dream_cheeky",
+ .max_brightness = 31,
+ .num_leds = 1,
+ .report_size = 9,
+ .report_type = RAW_REQUEST,
+ .init = dream_cheeky_init,
+ .write = dream_cheeky_write,
+ },
+ {
+ .type = THINGM,
+ .name = "ThingM blink(1)",
+ .short_name = "thingm",
+ .max_brightness = 255,
+ .num_leds = 2,
+ .report_size = 9,
+ .report_type = RAW_REQUEST,
+ .init = thingm_init,
+ .write = thingm_write,
+ },
+ {
+ .type = DELCOM,
+ .name = "Delcom Visual Signal Indicator G2",
+ .short_name = "delcom",
+ .max_brightness = 100,
+ .num_leds = 1,
+ .report_size = 8,
+ .report_type = RAW_REQUEST,
+ .init = delcom_init,
+ .write = delcom_write,
+ },
+ {
+ .type = LUXAFOR,
+ .name = "Greynut Luxafor",
+ .short_name = "luxafor",
+ .max_brightness = 255,
+ .num_leds = 6,
+ .report_size = 9,
+ .report_type = OUTPUT_REPORT,
+ .write = luxafor_write,
+ },
+};
+
+static int hidled_init_led(struct hidled_led *led, const char *color_name,
+ struct hidled_rgb *rgb, unsigned int minor)
+{
+ const struct hidled_config *config = rgb->ldev->config;
+
+ if (config->num_leds > 1)
+ snprintf(led->name, sizeof(led->name), "%s%u:%s:led%u",
+ config->short_name, minor, color_name, rgb->num);
+ else
+ snprintf(led->name, sizeof(led->name), "%s%u:%s",
+ config->short_name, minor, color_name);
+ led->cdev.name = led->name;
+ led->cdev.max_brightness = config->max_brightness;
+ led->cdev.brightness_set_blocking = config->write;
+ led->cdev.flags = LED_HW_PLUGGABLE;
+ led->rgb = rgb;
+
+ return devm_led_classdev_register(&rgb->ldev->hdev->dev, &led->cdev);
+}
+
+static int hidled_init_rgb(struct hidled_rgb *rgb, unsigned int minor)
+{
+ int ret;
+
+ /* Register the red diode */
+ ret = hidled_init_led(&rgb->red, "red", rgb, minor);
+ if (ret)
+ return ret;
+
+ /* Register the green diode */
+ ret = hidled_init_led(&rgb->green, "green", rgb, minor);
+ if (ret)
+ return ret;
+
+ /* Register the blue diode */
+ return hidled_init_led(&rgb->blue, "blue", rgb, minor);
+}
+
+static int hidled_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ struct hidled_device *ldev;
+ unsigned int minor;
+ int ret, i;
+
+ ldev = devm_kzalloc(&hdev->dev, sizeof(*ldev), GFP_KERNEL);
+ if (!ldev)
+ return -ENOMEM;
+
+ ret = hid_parse(hdev);
+ if (ret)
+ return ret;
+
+ ldev->hdev = hdev;
+ mutex_init(&ldev->lock);
+
+ for (i = 0; !ldev->config && i < ARRAY_SIZE(hidled_configs); i++)
+ if (hidled_configs[i].type == id->driver_data)
+ ldev->config = &hidled_configs[i];
+
+ if (!ldev->config)
+ return -EINVAL;
+
+ if (ldev->config->init) {
+ ret = ldev->config->init(ldev);
+ if (ret)
+ return ret;
+ }
+
+ ldev->rgb = devm_kcalloc(&hdev->dev, ldev->config->num_leds,
+ sizeof(struct hidled_rgb), GFP_KERNEL);
+ if (!ldev->rgb)
+ return -ENOMEM;
+
+ ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW);
+ if (ret)
+ return ret;
+
+ minor = ((struct hidraw *) hdev->hidraw)->minor;
+
+ for (i = 0; i < ldev->config->num_leds; i++) {
+ ldev->rgb[i].ldev = ldev;
+ ldev->rgb[i].num = i;
+ ret = hidled_init_rgb(&ldev->rgb[i], minor);
+ if (ret) {
+ hid_hw_stop(hdev);
+ return ret;
+ }
+ }
+
+ hid_info(hdev, "%s initialized\n", ldev->config->name);
+
+ return 0;
+}
+
+static const struct hid_device_id hidled_table[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU,
+ USB_DEVICE_ID_RI_KA_WEBMAIL), .driver_data = RISO_KAGAKU },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY,
+ USB_DEVICE_ID_DREAM_CHEEKY_WN), .driver_data = DREAM_CHEEKY },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY,
+ USB_DEVICE_ID_DREAM_CHEEKY_FA), .driver_data = DREAM_CHEEKY },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THINGM,
+ USB_DEVICE_ID_BLINK1), .driver_data = THINGM },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DELCOM,
+ USB_DEVICE_ID_DELCOM_VISUAL_IND), .driver_data = DELCOM },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP,
+ USB_DEVICE_ID_LUXAFOR), .driver_data = LUXAFOR },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, hidled_table);
+
+static struct hid_driver hidled_driver = {
+ .name = "hid-led",
+ .probe = hidled_probe,
+ .id_table = hidled_table,
+};
+
+module_hid_driver(hidled_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Heiner Kallweit <hkallweit1@gmail.com>");
+MODULE_DESCRIPTION("Simple USB RGB LED driver");
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 2e021ba8f..b3ec4f2de 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -1020,6 +1020,7 @@ static int i2c_hid_probe(struct i2c_client *client,
pm_runtime_get_noresume(&client->dev);
pm_runtime_set_active(&client->dev);
pm_runtime_enable(&client->dev);
+ device_enable_async_suspend(&client->dev);
ret = i2c_hid_fetch_hid_descriptor(ihid);
if (ret < 0)
@@ -1106,6 +1107,14 @@ static int i2c_hid_remove(struct i2c_client *client)
return 0;
}
+static void i2c_hid_shutdown(struct i2c_client *client)
+{
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
+
+ i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
+ free_irq(client->irq, ihid);
+}
+
#ifdef CONFIG_PM_SLEEP
static int i2c_hid_suspend(struct device *dev)
{
@@ -1230,7 +1239,7 @@ static struct i2c_driver i2c_hid_driver = {
.probe = i2c_hid_probe,
.remove = i2c_hid_remove,
-
+ .shutdown = i2c_hid_shutdown,
.id_table = i2c_hid_id_table,
};
diff --git a/drivers/hsi/Makefile b/drivers/hsi/Makefile
index 360371e13..96944783d 100644
--- a/drivers/hsi/Makefile
+++ b/drivers/hsi/Makefile
@@ -1,7 +1,8 @@
#
# Makefile for HSI
#
-obj-$(CONFIG_HSI_BOARDINFO) += hsi_boardinfo.o
obj-$(CONFIG_HSI) += hsi.o
+hsi-objs := hsi_core.o
+hsi-$(CONFIG_HSI_BOARDINFO) += hsi_boardinfo.o
obj-y += controllers/
obj-y += clients/
diff --git a/drivers/hsi/clients/cmt_speech.c b/drivers/hsi/clients/cmt_speech.c
index 95638df73..3deef6cc7 100644
--- a/drivers/hsi/clients/cmt_speech.c
+++ b/drivers/hsi/clients/cmt_speech.c
@@ -444,8 +444,8 @@ static void cs_hsi_read_on_control_complete(struct hsi_msg *msg)
hi->control_state &= ~SSI_CHANNEL_STATE_READING;
if (msg->status == HSI_STATUS_ERROR) {
dev_err(&hi->cl->device, "Control RX error detected\n");
- cs_hsi_control_read_error(hi, msg);
spin_unlock(&hi->lock);
+ cs_hsi_control_read_error(hi, msg);
goto out;
}
dev_dbg(&hi->cl->device, "Read on control: %08X\n", cmd);
@@ -1275,7 +1275,7 @@ static int cs_char_mmap(struct file *file, struct vm_area_struct *vma)
if (vma->vm_end < vma->vm_start)
return -EINVAL;
- if (((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) != 1)
+ if (vma_pages(vma) != 1)
return -EINVAL;
vma->vm_flags |= VM_IO | VM_DONTDUMP | VM_DONTEXPAND;
diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c
index 6595d2091..6031cd146 100644
--- a/drivers/hsi/clients/ssi_protocol.c
+++ b/drivers/hsi/clients/ssi_protocol.c
@@ -88,6 +88,8 @@ void ssi_waketest(struct hsi_client *cl, unsigned int enable);
#define SSIP_READY_CMD SSIP_CMD(SSIP_READY, 0)
#define SSIP_SWBREAK_CMD SSIP_CMD(SSIP_SW_BREAK, 0)
+#define SSIP_WAKETEST_FLAG 0
+
/* Main state machine states */
enum {
INIT,
@@ -116,7 +118,7 @@ enum {
* @main_state: Main state machine
* @send_state: TX state machine
* @recv_state: RX state machine
- * @waketest: Flag to follow wake line test
+ * @flags: Flags, currently only used to follow wake line test
* @rxid: RX data id
* @txid: TX data id
* @txqueue_len: TX queue length
@@ -137,7 +139,7 @@ struct ssi_protocol {
unsigned int main_state;
unsigned int send_state;
unsigned int recv_state;
- unsigned int waketest:1;
+ unsigned long flags;
u8 rxid;
u8 txid;
unsigned int txqueue_len;
@@ -148,6 +150,7 @@ struct ssi_protocol {
struct net_device *netdev;
struct list_head txqueue;
struct list_head cmdqueue;
+ struct work_struct work;
struct hsi_client *cl;
struct list_head link;
atomic_t tx_usecnt;
@@ -405,15 +408,17 @@ static void ssip_reset(struct hsi_client *cl)
spin_lock_bh(&ssi->lock);
if (ssi->send_state != SEND_IDLE)
hsi_stop_tx(cl);
- if (ssi->waketest)
- ssi_waketest(cl, 0);
+ spin_unlock_bh(&ssi->lock);
+ if (test_and_clear_bit(SSIP_WAKETEST_FLAG, &ssi->flags))
+ ssi_waketest(cl, 0); /* FIXME: To be removed */
+ spin_lock_bh(&ssi->lock);
del_timer(&ssi->rx_wd);
del_timer(&ssi->tx_wd);
del_timer(&ssi->keep_alive);
ssi->main_state = 0;
ssi->send_state = 0;
ssi->recv_state = 0;
- ssi->waketest = 0;
+ ssi->flags = 0;
ssi->rxid = 0;
ssi->txid = 0;
list_for_each_safe(head, tmp, &ssi->txqueue) {
@@ -437,7 +442,8 @@ static void ssip_dump_state(struct hsi_client *cl)
dev_err(&cl->device, "Send state: %d\n", ssi->send_state);
dev_err(&cl->device, "CMT %s\n", (ssi->main_state == ACTIVE) ?
"Online" : "Offline");
- dev_err(&cl->device, "Wake test %d\n", ssi->waketest);
+ dev_err(&cl->device, "Wake test %d\n",
+ test_bit(SSIP_WAKETEST_FLAG, &ssi->flags));
dev_err(&cl->device, "Data RX id: %d\n", ssi->rxid);
dev_err(&cl->device, "Data TX id: %d\n", ssi->txid);
@@ -515,17 +521,17 @@ static void ssip_start_rx(struct hsi_client *cl)
dev_dbg(&cl->device, "RX start M(%d) R(%d)\n", ssi->main_state,
ssi->recv_state);
- spin_lock(&ssi->lock);
+ spin_lock_bh(&ssi->lock);
/*
* We can have two UP events in a row due to a short low
* high transition. Therefore we need to ignore the sencond UP event.
*/
if ((ssi->main_state != ACTIVE) || (ssi->recv_state == RECV_READY)) {
- spin_unlock(&ssi->lock);
+ spin_unlock_bh(&ssi->lock);
return;
}
ssip_set_rxstate(ssi, RECV_READY);
- spin_unlock(&ssi->lock);
+ spin_unlock_bh(&ssi->lock);
msg = ssip_claim_cmd(ssi);
ssip_set_cmd(msg, SSIP_READY_CMD);
@@ -539,10 +545,10 @@ static void ssip_stop_rx(struct hsi_client *cl)
struct ssi_protocol *ssi = hsi_client_drvdata(cl);
dev_dbg(&cl->device, "RX stop M(%d)\n", ssi->main_state);
- spin_lock(&ssi->lock);
+ spin_lock_bh(&ssi->lock);
if (likely(ssi->main_state == ACTIVE))
ssip_set_rxstate(ssi, RECV_IDLE);
- spin_unlock(&ssi->lock);
+ spin_unlock_bh(&ssi->lock);
}
static void ssip_free_strans(struct hsi_msg *msg)
@@ -559,9 +565,9 @@ static void ssip_strans_complete(struct hsi_msg *msg)
data = msg->context;
ssip_release_cmd(msg);
- spin_lock(&ssi->lock);
+ spin_lock_bh(&ssi->lock);
ssip_set_txstate(ssi, SENDING);
- spin_unlock(&ssi->lock);
+ spin_unlock_bh(&ssi->lock);
hsi_async_write(cl, data);
}
@@ -666,15 +672,17 @@ static void ssip_rx_bootinforeq(struct hsi_client *cl, u32 cmd)
/* Fall through */
case INIT:
case HANDSHAKE:
- spin_lock(&ssi->lock);
+ spin_lock_bh(&ssi->lock);
ssi->main_state = HANDSHAKE;
- if (!ssi->waketest) {
- ssi->waketest = 1;
+ spin_unlock_bh(&ssi->lock);
+
+ if (!test_and_set_bit(SSIP_WAKETEST_FLAG, &ssi->flags))
ssi_waketest(cl, 1); /* FIXME: To be removed */
- }
+
+ spin_lock_bh(&ssi->lock);
/* Start boot handshake watchdog */
mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT));
- spin_unlock(&ssi->lock);
+ spin_unlock_bh(&ssi->lock);
dev_dbg(&cl->device, "Send BOOTINFO_RESP\n");
if (SSIP_DATA_VERSION(cmd) != SSIP_LOCAL_VERID)
dev_warn(&cl->device, "boot info req verid mismatch\n");
@@ -696,14 +704,14 @@ static void ssip_rx_bootinforesp(struct hsi_client *cl, u32 cmd)
if (SSIP_DATA_VERSION(cmd) != SSIP_LOCAL_VERID)
dev_warn(&cl->device, "boot info resp verid mismatch\n");
- spin_lock(&ssi->lock);
+ spin_lock_bh(&ssi->lock);
if (ssi->main_state != ACTIVE)
/* Use tx_wd as a boot watchdog in non ACTIVE state */
mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT));
else
dev_dbg(&cl->device, "boot info resp ignored M(%d)\n",
ssi->main_state);
- spin_unlock(&ssi->lock);
+ spin_unlock_bh(&ssi->lock);
}
static void ssip_rx_waketest(struct hsi_client *cl, u32 cmd)
@@ -711,20 +719,22 @@ static void ssip_rx_waketest(struct hsi_client *cl, u32 cmd)
struct ssi_protocol *ssi = hsi_client_drvdata(cl);
unsigned int wkres = SSIP_PAYLOAD(cmd);
- spin_lock(&ssi->lock);
+ spin_lock_bh(&ssi->lock);
if (ssi->main_state != HANDSHAKE) {
dev_dbg(&cl->device, "wake lines test ignored M(%d)\n",
ssi->main_state);
- spin_unlock(&ssi->lock);
+ spin_unlock_bh(&ssi->lock);
return;
}
- if (ssi->waketest) {
- ssi->waketest = 0;
+ spin_unlock_bh(&ssi->lock);
+
+ if (test_and_clear_bit(SSIP_WAKETEST_FLAG, &ssi->flags))
ssi_waketest(cl, 0); /* FIXME: To be removed */
- }
+
+ spin_lock_bh(&ssi->lock);
ssi->main_state = ACTIVE;
del_timer(&ssi->tx_wd); /* Stop boot handshake timer */
- spin_unlock(&ssi->lock);
+ spin_unlock_bh(&ssi->lock);
dev_notice(&cl->device, "WAKELINES TEST %s\n",
wkres & SSIP_WAKETEST_FAILED ? "FAILED" : "OK");
@@ -741,20 +751,20 @@ static void ssip_rx_ready(struct hsi_client *cl)
{
struct ssi_protocol *ssi = hsi_client_drvdata(cl);
- spin_lock(&ssi->lock);
+ spin_lock_bh(&ssi->lock);
if (unlikely(ssi->main_state != ACTIVE)) {
dev_dbg(&cl->device, "READY on wrong state: S(%d) M(%d)\n",
ssi->send_state, ssi->main_state);
- spin_unlock(&ssi->lock);
+ spin_unlock_bh(&ssi->lock);
return;
}
if (ssi->send_state != WAIT4READY) {
dev_dbg(&cl->device, "Ignore spurious READY command\n");
- spin_unlock(&ssi->lock);
+ spin_unlock_bh(&ssi->lock);
return;
}
ssip_set_txstate(ssi, SEND_READY);
- spin_unlock(&ssi->lock);
+ spin_unlock_bh(&ssi->lock);
ssip_xmit(cl);
}
@@ -766,22 +776,22 @@ static void ssip_rx_strans(struct hsi_client *cl, u32 cmd)
int len = SSIP_PDU_LENGTH(cmd);
dev_dbg(&cl->device, "RX strans: %d frames\n", len);
- spin_lock(&ssi->lock);
+ spin_lock_bh(&ssi->lock);
if (unlikely(ssi->main_state != ACTIVE)) {
dev_err(&cl->device, "START TRANS wrong state: S(%d) M(%d)\n",
ssi->send_state, ssi->main_state);
- spin_unlock(&ssi->lock);
+ spin_unlock_bh(&ssi->lock);
return;
}
ssip_set_rxstate(ssi, RECEIVING);
if (unlikely(SSIP_MSG_ID(cmd) != ssi->rxid)) {
dev_err(&cl->device, "START TRANS id %d expected %d\n",
SSIP_MSG_ID(cmd), ssi->rxid);
- spin_unlock(&ssi->lock);
+ spin_unlock_bh(&ssi->lock);
goto out1;
}
ssi->rxid++;
- spin_unlock(&ssi->lock);
+ spin_unlock_bh(&ssi->lock);
skb = netdev_alloc_skb(ssi->netdev, len * 4);
if (unlikely(!skb)) {
dev_err(&cl->device, "No memory for rx skb\n");
@@ -849,7 +859,7 @@ static void ssip_swbreak_complete(struct hsi_msg *msg)
struct ssi_protocol *ssi = hsi_client_drvdata(cl);
ssip_release_cmd(msg);
- spin_lock(&ssi->lock);
+ spin_lock_bh(&ssi->lock);
if (list_empty(&ssi->txqueue)) {
if (atomic_read(&ssi->tx_usecnt)) {
ssip_set_txstate(ssi, SEND_READY);
@@ -857,9 +867,9 @@ static void ssip_swbreak_complete(struct hsi_msg *msg)
ssip_set_txstate(ssi, SEND_IDLE);
hsi_stop_tx(cl);
}
- spin_unlock(&ssi->lock);
+ spin_unlock_bh(&ssi->lock);
} else {
- spin_unlock(&ssi->lock);
+ spin_unlock_bh(&ssi->lock);
ssip_xmit(cl);
}
netif_wake_queue(ssi->netdev);
@@ -876,17 +886,17 @@ static void ssip_tx_data_complete(struct hsi_msg *msg)
ssip_error(cl);
goto out;
}
- spin_lock(&ssi->lock);
+ spin_lock_bh(&ssi->lock);
if (list_empty(&ssi->txqueue)) {
ssip_set_txstate(ssi, SENDING_SWBREAK);
- spin_unlock(&ssi->lock);
+ spin_unlock_bh(&ssi->lock);
cmsg = ssip_claim_cmd(ssi);
ssip_set_cmd(cmsg, SSIP_SWBREAK_CMD);
cmsg->complete = ssip_swbreak_complete;
dev_dbg(&cl->device, "Send SWBREAK\n");
hsi_async_write(cl, cmsg);
} else {
- spin_unlock(&ssi->lock);
+ spin_unlock_bh(&ssi->lock);
ssip_xmit(cl);
}
out:
@@ -926,11 +936,11 @@ static int ssip_pn_open(struct net_device *dev)
}
dev_dbg(&cl->device, "Configuring SSI port\n");
hsi_setup(cl);
- spin_lock_bh(&ssi->lock);
- if (!ssi->waketest) {
- ssi->waketest = 1;
+
+ if (!test_and_set_bit(SSIP_WAKETEST_FLAG, &ssi->flags))
ssi_waketest(cl, 1); /* FIXME: To be removed */
- }
+
+ spin_lock_bh(&ssi->lock);
ssi->main_state = HANDSHAKE;
spin_unlock_bh(&ssi->lock);
@@ -959,6 +969,15 @@ static int ssip_pn_set_mtu(struct net_device *dev, int new_mtu)
return 0;
}
+static void ssip_xmit_work(struct work_struct *work)
+{
+ struct ssi_protocol *ssi =
+ container_of(work, struct ssi_protocol, work);
+ struct hsi_client *cl = ssi->cl;
+
+ ssip_xmit(cl);
+}
+
static int ssip_pn_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct hsi_client *cl = to_hsi_client(dev->dev.parent);
@@ -1011,7 +1030,7 @@ static int ssip_pn_xmit(struct sk_buff *skb, struct net_device *dev)
dev_dbg(&cl->device, "Start TX on SEND READY qlen %d\n",
ssi->txqueue_len);
spin_unlock_bh(&ssi->lock);
- ssip_xmit(cl);
+ schedule_work(&ssi->work);
} else {
spin_unlock_bh(&ssi->lock);
}
@@ -1088,6 +1107,7 @@ static int ssi_protocol_probe(struct device *dev)
atomic_set(&ssi->tx_usecnt, 0);
hsi_client_set_drvdata(cl, ssi);
ssi->cl = cl;
+ INIT_WORK(&ssi->work, ssip_xmit_work);
ssi->channel_id_cmd = hsi_get_channel_id_by_name(cl, "mcsaab-control");
if (ssi->channel_id_cmd < 0) {
diff --git a/drivers/hsi/controllers/omap_ssi.h b/drivers/hsi/controllers/omap_ssi.h
index 7b4dec2c6..32ced0c8f 100644
--- a/drivers/hsi/controllers/omap_ssi.h
+++ b/drivers/hsi/controllers/omap_ssi.h
@@ -35,6 +35,8 @@
#define SSI_MAX_GDD_LCH 8
#define SSI_BYTES_TO_FRAMES(x) ((((x) - 1) >> 2) + 1)
+#define SSI_WAKE_EN 0
+
/**
* struct omap_ssm_ctx - OMAP synchronous serial module (TX/RX) context
* @mode: Bit transmission mode
@@ -71,13 +73,14 @@ struct omap_ssm_ctx {
* @txqueue: TX message queues
* @rxqueue: RX message queues
* @brkqueue: Queue of incoming HWBREAK requests (FRAME mode)
+ * @errqueue: Queue for failed messages
+ * @errqueue_work: Delayed Work for failed messages
* @irq: IRQ number
* @wake_irq: IRQ number for incoming wake line (-1 if none)
* @wake_gpio: GPIO number for incoming wake line (-1 if none)
- * @pio_tasklet: Bottom half for PIO transfers and events
- * @wake_tasklet: Bottom half for incoming wake events
- * @wkin_cken: Keep track of clock references due to the incoming wake line
+ * @flags: flags to keep track of states
* @wk_refcount: Reference count for output wake line
+ * @work: worker for starting TX
* @sys_mpu_enable: Context for the interrupt enable register for irq 0
* @sst: Context for the synchronous serial transmitter
* @ssr: Context for the synchronous serial receiver
@@ -95,14 +98,15 @@ struct omap_ssi_port {
struct list_head txqueue[SSI_MAX_CHANNELS];
struct list_head rxqueue[SSI_MAX_CHANNELS];
struct list_head brkqueue;
+ struct list_head errqueue;
+ struct delayed_work errqueue_work;
unsigned int irq;
int wake_irq;
struct gpio_desc *wake_gpio;
- struct tasklet_struct pio_tasklet;
- struct tasklet_struct wake_tasklet;
bool wktest:1; /* FIXME: HACK to be removed */
- bool wkin_cken:1; /* Workaround */
+ unsigned long flags;
unsigned int wk_refcount;
+ struct work_struct work;
/* OMAP SSI port context */
u32 sys_mpu_enable; /* We use only one irq */
struct omap_ssm_ctx sst;
@@ -138,7 +142,6 @@ struct gdd_trn {
* @fck_rate: clock rate
* @loss_count: To follow if we need to restore context or not
* @max_speed: Maximum TX speed (Kb/s) set by the clients.
- * @sysconfig: SSI controller saved context
* @gdd_gcr: SSI GDD saved context
* @get_loss: Pointer to omap_pm_get_dev_context_loss_count, if any
* @port: Array of pointers of the ports of the controller
@@ -158,7 +161,6 @@ struct omap_ssi_controller {
u32 loss_count;
u32 max_speed;
/* OMAP SSI Controller context */
- u32 sysconfig;
u32 gdd_gcr;
int (*get_loss)(struct device *dev);
struct omap_ssi_port **port;
diff --git a/drivers/hsi/controllers/omap_ssi_core.c b/drivers/hsi/controllers/omap_ssi_core.c
index a3e0febfb..9a29b34ed 100644
--- a/drivers/hsi/controllers/omap_ssi_core.c
+++ b/drivers/hsi/controllers/omap_ssi_core.c
@@ -58,7 +58,7 @@ static int ssi_debug_show(struct seq_file *m, void *p __maybe_unused)
seq_printf(m, "REVISION\t: 0x%08x\n", readl(sys + SSI_REVISION_REG));
seq_printf(m, "SYSCONFIG\t: 0x%08x\n", readl(sys + SSI_SYSCONFIG_REG));
seq_printf(m, "SYSSTATUS\t: 0x%08x\n", readl(sys + SSI_SYSSTATUS_REG));
- pm_runtime_put_sync(ssi->device.parent);
+ pm_runtime_put(ssi->device.parent);
return 0;
}
@@ -112,7 +112,7 @@ static int ssi_debug_gdd_show(struct seq_file *m, void *p __maybe_unused)
readw(gdd + SSI_GDD_CLNK_CTRL_REG(lch)));
}
- pm_runtime_put_sync(ssi->device.parent);
+ pm_runtime_put(ssi->device.parent);
return 0;
}
@@ -193,7 +193,7 @@ void ssi_waketest(struct hsi_client *cl, unsigned int enable)
} else {
writel_relaxed(SSI_WAKE(0),
omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
- pm_runtime_put_sync(ssi->device.parent);
+ pm_runtime_put(ssi->device.parent);
}
}
EXPORT_SYMBOL_GPL(ssi_waketest);
@@ -217,7 +217,7 @@ static void ssi_gdd_complete(struct hsi_controller *ssi, unsigned int lch)
if (msg->ttype == HSI_MSG_READ) {
dir = DMA_FROM_DEVICE;
val = SSI_DATAAVAILABLE(msg->channel);
- pm_runtime_put_sync(ssi->device.parent);
+ pm_runtime_put(omap_port->pdev);
} else {
dir = DMA_TO_DEVICE;
val = SSI_DATAACCEPT(msg->channel);
@@ -235,7 +235,9 @@ static void ssi_gdd_complete(struct hsi_controller *ssi, unsigned int lch)
spin_lock(&omap_port->lock);
list_del(&msg->link); /* Dequeue msg */
spin_unlock(&omap_port->lock);
- msg->complete(msg);
+
+ list_add_tail(&msg->link, &omap_port->errqueue);
+ schedule_delayed_work(&omap_port->errqueue_work, 0);
return;
}
spin_lock(&omap_port->lock);
@@ -255,7 +257,13 @@ static void ssi_gdd_tasklet(unsigned long dev)
unsigned int lch;
u32 status_reg;
- pm_runtime_get_sync(ssi->device.parent);
+ pm_runtime_get(ssi->device.parent);
+
+ if (!pm_runtime_active(ssi->device.parent)) {
+ dev_warn(ssi->device.parent, "ssi_gdd_tasklet called without runtime PM!\n");
+ pm_runtime_put(ssi->device.parent);
+ return;
+ }
status_reg = readl(sys + SSI_GDD_MPU_IRQ_STATUS_REG);
for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++) {
@@ -265,7 +273,7 @@ static void ssi_gdd_tasklet(unsigned long dev)
writel_relaxed(status_reg, sys + SSI_GDD_MPU_IRQ_STATUS_REG);
status_reg = readl(sys + SSI_GDD_MPU_IRQ_STATUS_REG);
- pm_runtime_put_sync(ssi->device.parent);
+ pm_runtime_put(ssi->device.parent);
if (status_reg)
tasklet_hi_schedule(&omap_ssi->gdd_tasklet);
@@ -312,7 +320,7 @@ static int ssi_clk_event(struct notifier_block *nb, unsigned long event,
continue;
/* Workaround for SWBREAK + CAwake down race in CMT */
- tasklet_disable(&omap_port->wake_tasklet);
+ disable_irq(omap_port->wake_irq);
/* stop all ssi communication */
pinctrl_pm_select_idle_state(omap_port->pdev);
@@ -338,7 +346,7 @@ static int ssi_clk_event(struct notifier_block *nb, unsigned long event,
/* resume ssi communication */
pinctrl_pm_select_default_state(omap_port->pdev);
- tasklet_enable(&omap_port->wake_tasklet);
+ enable_irq(omap_port->wake_irq);
}
break;
@@ -452,8 +460,6 @@ out_err:
static int ssi_hw_init(struct hsi_controller *ssi)
{
struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
- unsigned int i;
- u32 val;
int err;
err = pm_runtime_get_sync(ssi->device.parent);
@@ -461,27 +467,12 @@ static int ssi_hw_init(struct hsi_controller *ssi)
dev_err(&ssi->device, "runtime PM failed %d\n", err);
return err;
}
- /* Reseting SSI controller */
- writel_relaxed(SSI_SOFTRESET, omap_ssi->sys + SSI_SYSCONFIG_REG);
- val = readl(omap_ssi->sys + SSI_SYSSTATUS_REG);
- for (i = 0; ((i < 20) && !(val & SSI_RESETDONE)); i++) {
- msleep(20);
- val = readl(omap_ssi->sys + SSI_SYSSTATUS_REG);
- }
- if (!(val & SSI_RESETDONE)) {
- dev_err(&ssi->device, "SSI HW reset failed\n");
- pm_runtime_put_sync(ssi->device.parent);
- return -EIO;
- }
/* Reseting GDD */
writel_relaxed(SSI_SWRESET, omap_ssi->gdd + SSI_GDD_GRST_REG);
/* Get FCK rate in KHz */
omap_ssi->fck_rate = DIV_ROUND_CLOSEST(ssi_get_clk_rate(ssi), 1000);
dev_dbg(&ssi->device, "SSI fck rate %lu KHz\n", omap_ssi->fck_rate);
- /* Set default PM settings */
- val = SSI_AUTOIDLE | SSI_SIDLEMODE_SMART | SSI_MIDLEMODE_SMART;
- writel_relaxed(val, omap_ssi->sys + SSI_SYSCONFIG_REG);
- omap_ssi->sysconfig = val;
+
writel_relaxed(SSI_CLK_AUTOGATING_ON, omap_ssi->sys + SSI_GDD_GCR_REG);
omap_ssi->gdd_gcr = SSI_CLK_AUTOGATING_ON;
pm_runtime_put_sync(ssi->device.parent);
@@ -552,7 +543,6 @@ static int ssi_probe(struct platform_device *pd)
if (err < 0)
goto out1;
- pm_runtime_irq_safe(&pd->dev);
pm_runtime_enable(&pd->dev);
err = ssi_hw_init(ssi);
diff --git a/drivers/hsi/controllers/omap_ssi_port.c b/drivers/hsi/controllers/omap_ssi_port.c
index 6b8f77397..7765de2f1 100644
--- a/drivers/hsi/controllers/omap_ssi_port.c
+++ b/drivers/hsi/controllers/omap_ssi_port.c
@@ -126,7 +126,7 @@ static int ssi_debug_port_show(struct seq_file *m, void *p __maybe_unused)
seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
readl(base + SSI_SSR_BUFFER_CH_REG(ch)));
}
- pm_runtime_put_sync(omap_port->pdev);
+ pm_runtime_put_autosuspend(omap_port->pdev);
return 0;
}
@@ -150,7 +150,7 @@ static int ssi_div_get(void *data, u64 *val)
pm_runtime_get_sync(omap_port->pdev);
*val = readl(omap_port->sst_base + SSI_SST_DIVISOR_REG);
- pm_runtime_put_sync(omap_port->pdev);
+ pm_runtime_put_autosuspend(omap_port->pdev);
return 0;
}
@@ -166,7 +166,7 @@ static int ssi_div_set(void *data, u64 val)
pm_runtime_get_sync(omap_port->pdev);
writel(val, omap_port->sst_base + SSI_SST_DIVISOR_REG);
omap_port->sst.divisor = val;
- pm_runtime_put_sync(omap_port->pdev);
+ pm_runtime_put_autosuspend(omap_port->pdev);
return 0;
}
@@ -193,6 +193,21 @@ static int ssi_debug_add_port(struct omap_ssi_port *omap_port,
}
#endif
+static void ssi_process_errqueue(struct work_struct *work)
+{
+ struct omap_ssi_port *omap_port;
+ struct list_head *head, *tmp;
+ struct hsi_msg *msg;
+
+ omap_port = container_of(work, struct omap_ssi_port, errqueue_work.work);
+
+ list_for_each_safe(head, tmp, &omap_port->errqueue) {
+ msg = list_entry(head, struct hsi_msg, link);
+ msg->complete(msg);
+ list_del(head);
+ }
+}
+
static int ssi_claim_lch(struct hsi_msg *msg)
{
@@ -225,11 +240,21 @@ static int ssi_start_dma(struct hsi_msg *msg, int lch)
u32 d_addr;
u32 tmp;
+ /* Hold clocks during the transfer */
+ pm_runtime_get(omap_port->pdev);
+
+ if (!pm_runtime_active(omap_port->pdev)) {
+ dev_warn(&port->device, "ssi_start_dma called without runtime PM!\n");
+ pm_runtime_put_autosuspend(omap_port->pdev);
+ return -EREMOTEIO;
+ }
+
if (msg->ttype == HSI_MSG_READ) {
err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
DMA_FROM_DEVICE);
if (err < 0) {
dev_dbg(&ssi->device, "DMA map SG failed !\n");
+ pm_runtime_put_autosuspend(omap_port->pdev);
return err;
}
csdp = SSI_DST_BURST_4x32_BIT | SSI_DST_MEMORY_PORT |
@@ -246,6 +271,7 @@ static int ssi_start_dma(struct hsi_msg *msg, int lch)
DMA_TO_DEVICE);
if (err < 0) {
dev_dbg(&ssi->device, "DMA map SG failed !\n");
+ pm_runtime_put_autosuspend(omap_port->pdev);
return err;
}
csdp = SSI_SRC_BURST_4x32_BIT | SSI_SRC_MEMORY_PORT |
@@ -261,9 +287,6 @@ static int ssi_start_dma(struct hsi_msg *msg, int lch)
dev_dbg(&ssi->device, "lch %d cdsp %08x ccr %04x s_addr %08x d_addr %08x\n",
lch, csdp, ccr, s_addr, d_addr);
- /* Hold clocks during the transfer */
- pm_runtime_get_sync(omap_port->pdev);
-
writew_relaxed(csdp, gdd + SSI_GDD_CSDP_REG(lch));
writew_relaxed(SSI_BLOCK_IE | SSI_TOUT_IE, gdd + SSI_GDD_CICR_REG(lch));
writel_relaxed(d_addr, gdd + SSI_GDD_CDSA_REG(lch));
@@ -290,11 +313,18 @@ static int ssi_start_pio(struct hsi_msg *msg)
struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
u32 val;
- pm_runtime_get_sync(omap_port->pdev);
+ pm_runtime_get(omap_port->pdev);
+
+ if (!pm_runtime_active(omap_port->pdev)) {
+ dev_warn(&port->device, "ssi_start_pio called without runtime PM!\n");
+ pm_runtime_put_autosuspend(omap_port->pdev);
+ return -EREMOTEIO;
+ }
+
if (msg->ttype == HSI_MSG_WRITE) {
val = SSI_DATAACCEPT(msg->channel);
/* Hold clocks for pio writes */
- pm_runtime_get_sync(omap_port->pdev);
+ pm_runtime_get(omap_port->pdev);
} else {
val = SSI_DATAAVAILABLE(msg->channel) | SSI_ERROROCCURED;
}
@@ -302,7 +332,7 @@ static int ssi_start_pio(struct hsi_msg *msg)
msg->ttype ? "write" : "read");
val |= readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
- pm_runtime_put_sync(omap_port->pdev);
+ pm_runtime_put_autosuspend(omap_port->pdev);
msg->actual_len = 0;
msg->status = HSI_STATUS_PROCEEDING;
@@ -360,7 +390,8 @@ static int ssi_async_break(struct hsi_msg *msg)
spin_unlock_bh(&omap_port->lock);
}
out:
- pm_runtime_put_sync(omap_port->pdev);
+ pm_runtime_mark_last_busy(omap_port->pdev);
+ pm_runtime_put_autosuspend(omap_port->pdev);
return err;
}
@@ -388,6 +419,8 @@ static int ssi_async(struct hsi_msg *msg)
queue = &omap_port->rxqueue[msg->channel];
}
msg->status = HSI_STATUS_QUEUED;
+
+ pm_runtime_get_sync(omap_port->pdev);
spin_lock_bh(&omap_port->lock);
list_add_tail(&msg->link, queue);
err = ssi_start_transfer(queue);
@@ -396,6 +429,8 @@ static int ssi_async(struct hsi_msg *msg)
msg->status = HSI_STATUS_ERROR;
}
spin_unlock_bh(&omap_port->lock);
+ pm_runtime_mark_last_busy(omap_port->pdev);
+ pm_runtime_put_autosuspend(omap_port->pdev);
dev_dbg(&port->device, "msg status %d ttype %d ch %d\n",
msg->status, msg->ttype, msg->channel);
@@ -497,7 +532,8 @@ static int ssi_setup(struct hsi_client *cl)
omap_port->ssr.mode = cl->rx_cfg.mode;
out:
spin_unlock_bh(&omap_port->lock);
- pm_runtime_put_sync(omap_port->pdev);
+ pm_runtime_mark_last_busy(omap_port->pdev);
+ pm_runtime_put_autosuspend(omap_port->pdev);
return err;
}
@@ -528,7 +564,7 @@ static int ssi_flush(struct hsi_client *cl)
continue;
writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
if (msg->ttype == HSI_MSG_READ)
- pm_runtime_put_sync(omap_port->pdev);
+ pm_runtime_put_autosuspend(omap_port->pdev);
omap_ssi->gdd_trn[i].msg = NULL;
}
/* Flush all SST buffers */
@@ -552,7 +588,7 @@ static int ssi_flush(struct hsi_client *cl)
for (i = 0; i < omap_port->channels; i++) {
/* Release write clocks */
if (!list_empty(&omap_port->txqueue[i]))
- pm_runtime_put_sync(omap_port->pdev);
+ pm_runtime_put_autosuspend(omap_port->pdev);
ssi_flush_queue(&omap_port->txqueue[i], NULL);
ssi_flush_queue(&omap_port->rxqueue[i], NULL);
}
@@ -562,17 +598,28 @@ static int ssi_flush(struct hsi_client *cl)
pinctrl_pm_select_default_state(omap_port->pdev);
spin_unlock_bh(&omap_port->lock);
- pm_runtime_put_sync(omap_port->pdev);
+ pm_runtime_mark_last_busy(omap_port->pdev);
+ pm_runtime_put_autosuspend(omap_port->pdev);
return 0;
}
+static void start_tx_work(struct work_struct *work)
+{
+ struct omap_ssi_port *omap_port =
+ container_of(work, struct omap_ssi_port, work);
+ struct hsi_port *port = to_hsi_port(omap_port->dev);
+ struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+
+ pm_runtime_get_sync(omap_port->pdev); /* Grab clocks */
+ writel(SSI_WAKE(0), omap_ssi->sys + SSI_SET_WAKE_REG(port->num));
+}
+
static int ssi_start_tx(struct hsi_client *cl)
{
struct hsi_port *port = hsi_get_port(cl);
struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
- struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
- struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
dev_dbg(&port->device, "Wake out high %d\n", omap_port->wk_refcount);
@@ -581,10 +628,10 @@ static int ssi_start_tx(struct hsi_client *cl)
spin_unlock_bh(&omap_port->wk_lock);
return 0;
}
- pm_runtime_get_sync(omap_port->pdev); /* Grab clocks */
- writel(SSI_WAKE(0), omap_ssi->sys + SSI_SET_WAKE_REG(port->num));
spin_unlock_bh(&omap_port->wk_lock);
+ schedule_work(&omap_port->work);
+
return 0;
}
@@ -604,9 +651,12 @@ static int ssi_stop_tx(struct hsi_client *cl)
return 0;
}
writel(SSI_WAKE(0), omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
- pm_runtime_put_sync(omap_port->pdev); /* Release clocks */
spin_unlock_bh(&omap_port->wk_lock);
+ pm_runtime_mark_last_busy(omap_port->pdev);
+ pm_runtime_put_autosuspend(omap_port->pdev); /* Release clocks */
+
+
return 0;
}
@@ -616,6 +666,7 @@ static void ssi_transfer(struct omap_ssi_port *omap_port,
struct hsi_msg *msg;
int err = -1;
+ pm_runtime_get(omap_port->pdev);
spin_lock_bh(&omap_port->lock);
while (err < 0) {
err = ssi_start_transfer(queue);
@@ -630,6 +681,8 @@ static void ssi_transfer(struct omap_ssi_port *omap_port,
}
}
spin_unlock_bh(&omap_port->lock);
+ pm_runtime_mark_last_busy(omap_port->pdev);
+ pm_runtime_put_autosuspend(omap_port->pdev);
}
static void ssi_cleanup_queues(struct hsi_client *cl)
@@ -658,7 +711,8 @@ static void ssi_cleanup_queues(struct hsi_client *cl)
txbufstate |= (1 << i);
status |= SSI_DATAACCEPT(i);
/* Release the clocks writes, also GDD ones */
- pm_runtime_put_sync(omap_port->pdev);
+ pm_runtime_mark_last_busy(omap_port->pdev);
+ pm_runtime_put_autosuspend(omap_port->pdev);
}
ssi_flush_queue(&omap_port->txqueue[i], cl);
}
@@ -712,8 +766,10 @@ static void ssi_cleanup_gdd(struct hsi_controller *ssi, struct hsi_client *cl)
* Clock references for write will be handled in
* ssi_cleanup_queues
*/
- if (msg->ttype == HSI_MSG_READ)
- pm_runtime_put_sync(omap_port->pdev);
+ if (msg->ttype == HSI_MSG_READ) {
+ pm_runtime_mark_last_busy(omap_port->pdev);
+ pm_runtime_put_autosuspend(omap_port->pdev);
+ }
omap_ssi->gdd_trn[i].msg = NULL;
}
tmp = readl_relaxed(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
@@ -738,32 +794,30 @@ static int ssi_release(struct hsi_client *cl)
struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
- spin_lock_bh(&omap_port->lock);
pm_runtime_get_sync(omap_port->pdev);
+ spin_lock_bh(&omap_port->lock);
/* Stop all the pending DMA requests for that client */
ssi_cleanup_gdd(ssi, cl);
/* Now cleanup all the queues */
ssi_cleanup_queues(cl);
- pm_runtime_put_sync(omap_port->pdev);
/* If it is the last client of the port, do extra checks and cleanup */
if (port->claimed <= 1) {
/*
* Drop the clock reference for the incoming wake line
* if it is still kept high by the other side.
*/
- if (omap_port->wkin_cken) {
+ if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags))
pm_runtime_put_sync(omap_port->pdev);
- omap_port->wkin_cken = 0;
- }
- pm_runtime_get_sync(omap_port->pdev);
+ pm_runtime_get(omap_port->pdev);
/* Stop any SSI TX/RX without a client */
ssi_set_port_mode(omap_port, SSI_MODE_SLEEP);
omap_port->sst.mode = SSI_MODE_SLEEP;
omap_port->ssr.mode = SSI_MODE_SLEEP;
- pm_runtime_put_sync(omap_port->pdev);
+ pm_runtime_put(omap_port->pdev);
WARN_ON(omap_port->wk_refcount != 0);
}
spin_unlock_bh(&omap_port->lock);
+ pm_runtime_put_sync(omap_port->pdev);
return 0;
}
@@ -868,7 +922,7 @@ static void ssi_pio_complete(struct hsi_port *port, struct list_head *queue)
u32 reg;
u32 val;
- spin_lock(&omap_port->lock);
+ spin_lock_bh(&omap_port->lock);
msg = list_first_entry(queue, struct hsi_msg, link);
if ((!msg->sgt.nents) || (!msg->sgt.sgl->length)) {
msg->actual_len = 0;
@@ -900,7 +954,7 @@ static void ssi_pio_complete(struct hsi_port *port, struct list_head *queue)
(msg->ttype == HSI_MSG_WRITE))) {
writel(val, omap_ssi->sys +
SSI_MPU_STATUS_REG(port->num, 0));
- spin_unlock(&omap_port->lock);
+ spin_unlock_bh(&omap_port->lock);
return;
}
@@ -910,18 +964,19 @@ static void ssi_pio_complete(struct hsi_port *port, struct list_head *queue)
reg = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
if (msg->ttype == HSI_MSG_WRITE) {
/* Release clocks for write transfer */
- pm_runtime_put_sync(omap_port->pdev);
+ pm_runtime_mark_last_busy(omap_port->pdev);
+ pm_runtime_put_autosuspend(omap_port->pdev);
}
reg &= ~val;
writel_relaxed(reg, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
writel_relaxed(val, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
list_del(&msg->link);
- spin_unlock(&omap_port->lock);
+ spin_unlock_bh(&omap_port->lock);
msg->complete(msg);
ssi_transfer(omap_port, queue);
}
-static void ssi_pio_tasklet(unsigned long ssi_port)
+static irqreturn_t ssi_pio_thread(int irq, void *ssi_port)
{
struct hsi_port *port = (struct hsi_port *)ssi_port;
struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
@@ -932,41 +987,35 @@ static void ssi_pio_tasklet(unsigned long ssi_port)
u32 status_reg;
pm_runtime_get_sync(omap_port->pdev);
- status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
- status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
- for (ch = 0; ch < omap_port->channels; ch++) {
- if (status_reg & SSI_DATAACCEPT(ch))
- ssi_pio_complete(port, &omap_port->txqueue[ch]);
- if (status_reg & SSI_DATAAVAILABLE(ch))
- ssi_pio_complete(port, &omap_port->rxqueue[ch]);
- }
- if (status_reg & SSI_BREAKDETECTED)
- ssi_break_complete(port);
- if (status_reg & SSI_ERROROCCURED)
- ssi_error(port);
+ do {
+ status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
+ status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
- status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
- status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
- pm_runtime_put_sync(omap_port->pdev);
+ for (ch = 0; ch < omap_port->channels; ch++) {
+ if (status_reg & SSI_DATAACCEPT(ch))
+ ssi_pio_complete(port, &omap_port->txqueue[ch]);
+ if (status_reg & SSI_DATAAVAILABLE(ch))
+ ssi_pio_complete(port, &omap_port->rxqueue[ch]);
+ }
+ if (status_reg & SSI_BREAKDETECTED)
+ ssi_break_complete(port);
+ if (status_reg & SSI_ERROROCCURED)
+ ssi_error(port);
- if (status_reg)
- tasklet_hi_schedule(&omap_port->pio_tasklet);
- else
- enable_irq(omap_port->irq);
-}
+ status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
+ status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
-static irqreturn_t ssi_pio_isr(int irq, void *port)
-{
- struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
+ /* TODO: sleep if we retry? */
+ } while (status_reg);
- tasklet_hi_schedule(&omap_port->pio_tasklet);
- disable_irq_nosync(irq);
+ pm_runtime_mark_last_busy(omap_port->pdev);
+ pm_runtime_put_autosuspend(omap_port->pdev);
return IRQ_HANDLED;
}
-static void ssi_wake_tasklet(unsigned long ssi_port)
+static irqreturn_t ssi_wake_thread(int irq __maybe_unused, void *ssi_port)
{
struct hsi_port *port = (struct hsi_port *)ssi_port;
struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
@@ -981,12 +1030,8 @@ static void ssi_wake_tasklet(unsigned long ssi_port)
* This workaround will avoid breaking the clock reference
* count when such a situation ocurrs.
*/
- spin_lock(&omap_port->lock);
- if (!omap_port->wkin_cken) {
- omap_port->wkin_cken = 1;
+ if (!test_and_set_bit(SSI_WAKE_EN, &omap_port->flags))
pm_runtime_get_sync(omap_port->pdev);
- }
- spin_unlock(&omap_port->lock);
dev_dbg(&ssi->device, "Wake in high\n");
if (omap_port->wktest) { /* FIXME: HACK ! To be removed */
writel(SSI_WAKE(0),
@@ -1000,26 +1045,16 @@ static void ssi_wake_tasklet(unsigned long ssi_port)
omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
}
hsi_event(port, HSI_EVENT_STOP_RX);
- spin_lock(&omap_port->lock);
- if (omap_port->wkin_cken) {
- pm_runtime_put_sync(omap_port->pdev);
- omap_port->wkin_cken = 0;
+ if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags)) {
+ pm_runtime_mark_last_busy(omap_port->pdev);
+ pm_runtime_put_autosuspend(omap_port->pdev);
}
- spin_unlock(&omap_port->lock);
}
-}
-
-static irqreturn_t ssi_wake_isr(int irq __maybe_unused, void *ssi_port)
-{
- struct omap_ssi_port *omap_port = hsi_port_drvdata(ssi_port);
-
- tasklet_hi_schedule(&omap_port->wake_tasklet);
return IRQ_HANDLED;
}
-static int ssi_port_irq(struct hsi_port *port,
- struct platform_device *pd)
+static int ssi_port_irq(struct hsi_port *port, struct platform_device *pd)
{
struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
int err;
@@ -1030,18 +1065,15 @@ static int ssi_port_irq(struct hsi_port *port,
return err;
}
omap_port->irq = err;
- tasklet_init(&omap_port->pio_tasklet, ssi_pio_tasklet,
- (unsigned long)port);
- err = devm_request_irq(&port->device, omap_port->irq, ssi_pio_isr,
- 0, "mpu_irq0", port);
+ err = devm_request_threaded_irq(&port->device, omap_port->irq, NULL,
+ ssi_pio_thread, IRQF_ONESHOT, "SSI PORT", port);
if (err < 0)
dev_err(&port->device, "Request IRQ %d failed (%d)\n",
omap_port->irq, err);
return err;
}
-static int ssi_wake_irq(struct hsi_port *port,
- struct platform_device *pd)
+static int ssi_wake_irq(struct hsi_port *port, struct platform_device *pd)
{
struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
int cawake_irq;
@@ -1053,13 +1085,12 @@ static int ssi_wake_irq(struct hsi_port *port,
}
cawake_irq = gpiod_to_irq(omap_port->wake_gpio);
-
omap_port->wake_irq = cawake_irq;
- tasklet_init(&omap_port->wake_tasklet, ssi_wake_tasklet,
- (unsigned long)port);
- err = devm_request_irq(&port->device, cawake_irq, ssi_wake_isr,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "cawake", port);
+
+ err = devm_request_threaded_irq(&port->device, cawake_irq, NULL,
+ ssi_wake_thread,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "SSI cawake", port);
if (err < 0)
dev_err(&port->device, "Request Wake in IRQ %d failed %d\n",
cawake_irq, err);
@@ -1169,6 +1200,9 @@ static int ssi_port_probe(struct platform_device *pd)
omap_port->pdev = &pd->dev;
omap_port->port_id = port_id;
+ INIT_DEFERRABLE_WORK(&omap_port->errqueue_work, ssi_process_errqueue);
+ INIT_WORK(&omap_port->work, start_tx_work);
+
/* initialize HSI port */
port->async = ssi_async;
port->setup = ssi_setup;
@@ -1202,7 +1236,8 @@ static int ssi_port_probe(struct platform_device *pd)
spin_lock_init(&omap_port->wk_lock);
omap_port->dev = &port->device;
- pm_runtime_irq_safe(omap_port->pdev);
+ pm_runtime_use_autosuspend(omap_port->pdev);
+ pm_runtime_set_autosuspend_delay(omap_port->pdev, 250);
pm_runtime_enable(omap_port->pdev);
#ifdef CONFIG_DEBUG_FS
@@ -1234,10 +1269,9 @@ static int ssi_port_remove(struct platform_device *pd)
ssi_debug_remove_port(port);
#endif
- hsi_port_unregister_clients(port);
+ cancel_delayed_work_sync(&omap_port->errqueue_work);
- tasklet_kill(&omap_port->wake_tasklet);
- tasklet_kill(&omap_port->pio_tasklet);
+ hsi_port_unregister_clients(port);
port->async = hsi_dummy_msg;
port->setup = hsi_dummy_cl;
@@ -1248,6 +1282,8 @@ static int ssi_port_remove(struct platform_device *pd)
omap_ssi->port[omap_port->port_id] = NULL;
platform_set_drvdata(pd, NULL);
+
+ pm_runtime_dont_use_autosuspend(&pd->dev);
pm_runtime_disable(&pd->dev);
return 0;
diff --git a/drivers/hsi/hsi_core.c b/drivers/hsi/hsi_core.c
new file mode 100644
index 000000000..c2a2a9795
--- /dev/null
+++ b/drivers/hsi/hsi_core.c
@@ -0,0 +1,781 @@
+/*
+ * HSI core.
+ *
+ * Copyright (C) 2010 Nokia Corporation. All rights reserved.
+ *
+ * Contact: Carlos Chinea <carlos.chinea@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+#include <linux/hsi/hsi.h>
+#include <linux/compiler.h>
+#include <linux/list.h>
+#include <linux/kobject.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include "hsi_core.h"
+
+static ssize_t modalias_show(struct device *dev,
+ struct device_attribute *a __maybe_unused, char *buf)
+{
+ return sprintf(buf, "hsi:%s\n", dev_name(dev));
+}
+static DEVICE_ATTR_RO(modalias);
+
+static struct attribute *hsi_bus_dev_attrs[] = {
+ &dev_attr_modalias.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(hsi_bus_dev);
+
+static int hsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ add_uevent_var(env, "MODALIAS=hsi:%s", dev_name(dev));
+
+ return 0;
+}
+
+static int hsi_bus_match(struct device *dev, struct device_driver *driver)
+{
+ if (of_driver_match_device(dev, driver))
+ return true;
+
+ if (strcmp(dev_name(dev), driver->name) == 0)
+ return true;
+
+ return false;
+}
+
+static struct bus_type hsi_bus_type = {
+ .name = "hsi",
+ .dev_groups = hsi_bus_dev_groups,
+ .match = hsi_bus_match,
+ .uevent = hsi_bus_uevent,
+};
+
+static void hsi_client_release(struct device *dev)
+{
+ struct hsi_client *cl = to_hsi_client(dev);
+
+ kfree(cl->tx_cfg.channels);
+ kfree(cl->rx_cfg.channels);
+ kfree(cl);
+}
+
+struct hsi_client *hsi_new_client(struct hsi_port *port,
+ struct hsi_board_info *info)
+{
+ struct hsi_client *cl;
+ size_t size;
+
+ cl = kzalloc(sizeof(*cl), GFP_KERNEL);
+ if (!cl)
+ goto err;
+
+ cl->tx_cfg = info->tx_cfg;
+ if (cl->tx_cfg.channels) {
+ size = cl->tx_cfg.num_channels * sizeof(*cl->tx_cfg.channels);
+ cl->tx_cfg.channels = kmemdup(info->tx_cfg.channels, size,
+ GFP_KERNEL);
+ if (!cl->tx_cfg.channels)
+ goto err_tx;
+ }
+
+ cl->rx_cfg = info->rx_cfg;
+ if (cl->rx_cfg.channels) {
+ size = cl->rx_cfg.num_channels * sizeof(*cl->rx_cfg.channels);
+ cl->rx_cfg.channels = kmemdup(info->rx_cfg.channels, size,
+ GFP_KERNEL);
+ if (!cl->rx_cfg.channels)
+ goto err_rx;
+ }
+
+ cl->device.bus = &hsi_bus_type;
+ cl->device.parent = &port->device;
+ cl->device.release = hsi_client_release;
+ dev_set_name(&cl->device, "%s", info->name);
+ cl->device.platform_data = info->platform_data;
+ if (info->archdata)
+ cl->device.archdata = *info->archdata;
+ if (device_register(&cl->device) < 0) {
+ pr_err("hsi: failed to register client: %s\n", info->name);
+ put_device(&cl->device);
+ }
+
+ return cl;
+err_rx:
+ kfree(cl->tx_cfg.channels);
+err_tx:
+ kfree(cl);
+err:
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(hsi_new_client);
+
+static void hsi_scan_board_info(struct hsi_controller *hsi)
+{
+ struct hsi_cl_info *cl_info;
+ struct hsi_port *p;
+
+ list_for_each_entry(cl_info, &hsi_board_list, list)
+ if (cl_info->info.hsi_id == hsi->id) {
+ p = hsi_find_port_num(hsi, cl_info->info.port);
+ if (!p)
+ continue;
+ hsi_new_client(p, &cl_info->info);
+ }
+}
+
+#ifdef CONFIG_OF
+static struct hsi_board_info hsi_char_dev_info = {
+ .name = "hsi_char",
+};
+
+static int hsi_of_property_parse_mode(struct device_node *client, char *name,
+ unsigned int *result)
+{
+ const char *mode;
+ int err;
+
+ err = of_property_read_string(client, name, &mode);
+ if (err < 0)
+ return err;
+
+ if (strcmp(mode, "stream") == 0)
+ *result = HSI_MODE_STREAM;
+ else if (strcmp(mode, "frame") == 0)
+ *result = HSI_MODE_FRAME;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int hsi_of_property_parse_flow(struct device_node *client, char *name,
+ unsigned int *result)
+{
+ const char *flow;
+ int err;
+
+ err = of_property_read_string(client, name, &flow);
+ if (err < 0)
+ return err;
+
+ if (strcmp(flow, "synchronized") == 0)
+ *result = HSI_FLOW_SYNC;
+ else if (strcmp(flow, "pipeline") == 0)
+ *result = HSI_FLOW_PIPE;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int hsi_of_property_parse_arb_mode(struct device_node *client,
+ char *name, unsigned int *result)
+{
+ const char *arb_mode;
+ int err;
+
+ err = of_property_read_string(client, name, &arb_mode);
+ if (err < 0)
+ return err;
+
+ if (strcmp(arb_mode, "round-robin") == 0)
+ *result = HSI_ARB_RR;
+ else if (strcmp(arb_mode, "priority") == 0)
+ *result = HSI_ARB_PRIO;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static void hsi_add_client_from_dt(struct hsi_port *port,
+ struct device_node *client)
+{
+ struct hsi_client *cl;
+ struct hsi_channel channel;
+ struct property *prop;
+ char name[32];
+ int length, cells, err, i, max_chan, mode;
+
+ cl = kzalloc(sizeof(*cl), GFP_KERNEL);
+ if (!cl)
+ return;
+
+ err = of_modalias_node(client, name, sizeof(name));
+ if (err)
+ goto err;
+
+ dev_set_name(&cl->device, "%s", name);
+
+ err = hsi_of_property_parse_mode(client, "hsi-mode", &mode);
+ if (err) {
+ err = hsi_of_property_parse_mode(client, "hsi-rx-mode",
+ &cl->rx_cfg.mode);
+ if (err)
+ goto err;
+
+ err = hsi_of_property_parse_mode(client, "hsi-tx-mode",
+ &cl->tx_cfg.mode);
+ if (err)
+ goto err;
+ } else {
+ cl->rx_cfg.mode = mode;
+ cl->tx_cfg.mode = mode;
+ }
+
+ err = of_property_read_u32(client, "hsi-speed-kbps",
+ &cl->tx_cfg.speed);
+ if (err)
+ goto err;
+ cl->rx_cfg.speed = cl->tx_cfg.speed;
+
+ err = hsi_of_property_parse_flow(client, "hsi-flow",
+ &cl->rx_cfg.flow);
+ if (err)
+ goto err;
+
+ err = hsi_of_property_parse_arb_mode(client, "hsi-arb-mode",
+ &cl->rx_cfg.arb_mode);
+ if (err)
+ goto err;
+
+ prop = of_find_property(client, "hsi-channel-ids", &length);
+ if (!prop) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ cells = length / sizeof(u32);
+
+ cl->rx_cfg.num_channels = cells;
+ cl->tx_cfg.num_channels = cells;
+
+ cl->rx_cfg.channels = kzalloc(cells * sizeof(channel), GFP_KERNEL);
+ if (!cl->rx_cfg.channels) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ cl->tx_cfg.channels = kzalloc(cells * sizeof(channel), GFP_KERNEL);
+ if (!cl->tx_cfg.channels) {
+ err = -ENOMEM;
+ goto err2;
+ }
+
+ max_chan = 0;
+ for (i = 0; i < cells; i++) {
+ err = of_property_read_u32_index(client, "hsi-channel-ids", i,
+ &channel.id);
+ if (err)
+ goto err3;
+
+ err = of_property_read_string_index(client, "hsi-channel-names",
+ i, &channel.name);
+ if (err)
+ channel.name = NULL;
+
+ if (channel.id > max_chan)
+ max_chan = channel.id;
+
+ cl->rx_cfg.channels[i] = channel;
+ cl->tx_cfg.channels[i] = channel;
+ }
+
+ cl->rx_cfg.num_hw_channels = max_chan + 1;
+ cl->tx_cfg.num_hw_channels = max_chan + 1;
+
+ cl->device.bus = &hsi_bus_type;
+ cl->device.parent = &port->device;
+ cl->device.release = hsi_client_release;
+ cl->device.of_node = client;
+
+ if (device_register(&cl->device) < 0) {
+ pr_err("hsi: failed to register client: %s\n", name);
+ put_device(&cl->device);
+ }
+
+ return;
+
+err3:
+ kfree(cl->tx_cfg.channels);
+err2:
+ kfree(cl->rx_cfg.channels);
+err:
+ kfree(cl);
+ pr_err("hsi client: missing or incorrect of property: err=%d\n", err);
+}
+
+void hsi_add_clients_from_dt(struct hsi_port *port, struct device_node *clients)
+{
+ struct device_node *child;
+
+ /* register hsi-char device */
+ hsi_new_client(port, &hsi_char_dev_info);
+
+ for_each_available_child_of_node(clients, child)
+ hsi_add_client_from_dt(port, child);
+}
+EXPORT_SYMBOL_GPL(hsi_add_clients_from_dt);
+#endif
+
+int hsi_remove_client(struct device *dev, void *data __maybe_unused)
+{
+ device_unregister(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hsi_remove_client);
+
+static int hsi_remove_port(struct device *dev, void *data __maybe_unused)
+{
+ device_for_each_child(dev, NULL, hsi_remove_client);
+ device_unregister(dev);
+
+ return 0;
+}
+
+static void hsi_controller_release(struct device *dev)
+{
+ struct hsi_controller *hsi = to_hsi_controller(dev);
+
+ kfree(hsi->port);
+ kfree(hsi);
+}
+
+static void hsi_port_release(struct device *dev)
+{
+ kfree(to_hsi_port(dev));
+}
+
+/**
+ * hsi_unregister_port - Unregister an HSI port
+ * @port: The HSI port to unregister
+ */
+void hsi_port_unregister_clients(struct hsi_port *port)
+{
+ device_for_each_child(&port->device, NULL, hsi_remove_client);
+}
+EXPORT_SYMBOL_GPL(hsi_port_unregister_clients);
+
+/**
+ * hsi_unregister_controller - Unregister an HSI controller
+ * @hsi: The HSI controller to register
+ */
+void hsi_unregister_controller(struct hsi_controller *hsi)
+{
+ device_for_each_child(&hsi->device, NULL, hsi_remove_port);
+ device_unregister(&hsi->device);
+}
+EXPORT_SYMBOL_GPL(hsi_unregister_controller);
+
+/**
+ * hsi_register_controller - Register an HSI controller and its ports
+ * @hsi: The HSI controller to register
+ *
+ * Returns -errno on failure, 0 on success.
+ */
+int hsi_register_controller(struct hsi_controller *hsi)
+{
+ unsigned int i;
+ int err;
+
+ err = device_add(&hsi->device);
+ if (err < 0)
+ return err;
+ for (i = 0; i < hsi->num_ports; i++) {
+ hsi->port[i]->device.parent = &hsi->device;
+ err = device_add(&hsi->port[i]->device);
+ if (err < 0)
+ goto out;
+ }
+ /* Populate HSI bus with HSI clients */
+ hsi_scan_board_info(hsi);
+
+ return 0;
+out:
+ while (i-- > 0)
+ device_del(&hsi->port[i]->device);
+ device_del(&hsi->device);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(hsi_register_controller);
+
+/**
+ * hsi_register_client_driver - Register an HSI client to the HSI bus
+ * @drv: HSI client driver to register
+ *
+ * Returns -errno on failure, 0 on success.
+ */
+int hsi_register_client_driver(struct hsi_client_driver *drv)
+{
+ drv->driver.bus = &hsi_bus_type;
+
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(hsi_register_client_driver);
+
+static inline int hsi_dummy_msg(struct hsi_msg *msg __maybe_unused)
+{
+ return 0;
+}
+
+static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused)
+{
+ return 0;
+}
+
+/**
+ * hsi_put_controller - Free an HSI controller
+ *
+ * @hsi: Pointer to the HSI controller to freed
+ *
+ * HSI controller drivers should only use this function if they need
+ * to free their allocated hsi_controller structures before a successful
+ * call to hsi_register_controller. Other use is not allowed.
+ */
+void hsi_put_controller(struct hsi_controller *hsi)
+{
+ unsigned int i;
+
+ if (!hsi)
+ return;
+
+ for (i = 0; i < hsi->num_ports; i++)
+ if (hsi->port && hsi->port[i])
+ put_device(&hsi->port[i]->device);
+ put_device(&hsi->device);
+}
+EXPORT_SYMBOL_GPL(hsi_put_controller);
+
+/**
+ * hsi_alloc_controller - Allocate an HSI controller and its ports
+ * @n_ports: Number of ports on the HSI controller
+ * @flags: Kernel allocation flags
+ *
+ * Return NULL on failure or a pointer to an hsi_controller on success.
+ */
+struct hsi_controller *hsi_alloc_controller(unsigned int n_ports, gfp_t flags)
+{
+ struct hsi_controller *hsi;
+ struct hsi_port **port;
+ unsigned int i;
+
+ if (!n_ports)
+ return NULL;
+
+ hsi = kzalloc(sizeof(*hsi), flags);
+ if (!hsi)
+ return NULL;
+ port = kzalloc(sizeof(*port)*n_ports, flags);
+ if (!port) {
+ kfree(hsi);
+ return NULL;
+ }
+ hsi->num_ports = n_ports;
+ hsi->port = port;
+ hsi->device.release = hsi_controller_release;
+ device_initialize(&hsi->device);
+
+ for (i = 0; i < n_ports; i++) {
+ port[i] = kzalloc(sizeof(**port), flags);
+ if (port[i] == NULL)
+ goto out;
+ port[i]->num = i;
+ port[i]->async = hsi_dummy_msg;
+ port[i]->setup = hsi_dummy_cl;
+ port[i]->flush = hsi_dummy_cl;
+ port[i]->start_tx = hsi_dummy_cl;
+ port[i]->stop_tx = hsi_dummy_cl;
+ port[i]->release = hsi_dummy_cl;
+ mutex_init(&port[i]->lock);
+ BLOCKING_INIT_NOTIFIER_HEAD(&port[i]->n_head);
+ dev_set_name(&port[i]->device, "port%d", i);
+ hsi->port[i]->device.release = hsi_port_release;
+ device_initialize(&hsi->port[i]->device);
+ }
+
+ return hsi;
+out:
+ hsi_put_controller(hsi);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(hsi_alloc_controller);
+
+/**
+ * hsi_free_msg - Free an HSI message
+ * @msg: Pointer to the HSI message
+ *
+ * Client is responsible to free the buffers pointed by the scatterlists.
+ */
+void hsi_free_msg(struct hsi_msg *msg)
+{
+ if (!msg)
+ return;
+ sg_free_table(&msg->sgt);
+ kfree(msg);
+}
+EXPORT_SYMBOL_GPL(hsi_free_msg);
+
+/**
+ * hsi_alloc_msg - Allocate an HSI message
+ * @nents: Number of memory entries
+ * @flags: Kernel allocation flags
+ *
+ * nents can be 0. This mainly makes sense for read transfer.
+ * In that case, HSI drivers will call the complete callback when
+ * there is data to be read without consuming it.
+ *
+ * Return NULL on failure or a pointer to an hsi_msg on success.
+ */
+struct hsi_msg *hsi_alloc_msg(unsigned int nents, gfp_t flags)
+{
+ struct hsi_msg *msg;
+ int err;
+
+ msg = kzalloc(sizeof(*msg), flags);
+ if (!msg)
+ return NULL;
+
+ if (!nents)
+ return msg;
+
+ err = sg_alloc_table(&msg->sgt, nents, flags);
+ if (unlikely(err)) {
+ kfree(msg);
+ msg = NULL;
+ }
+
+ return msg;
+}
+EXPORT_SYMBOL_GPL(hsi_alloc_msg);
+
+/**
+ * hsi_async - Submit an HSI transfer to the controller
+ * @cl: HSI client sending the transfer
+ * @msg: The HSI transfer passed to controller
+ *
+ * The HSI message must have the channel, ttype, complete and destructor
+ * fields set beforehand. If nents > 0 then the client has to initialize
+ * also the scatterlists to point to the buffers to write to or read from.
+ *
+ * HSI controllers relay on pre-allocated buffers from their clients and they
+ * do not allocate buffers on their own.
+ *
+ * Once the HSI message transfer finishes, the HSI controller calls the
+ * complete callback with the status and actual_len fields of the HSI message
+ * updated. The complete callback can be called before returning from
+ * hsi_async.
+ *
+ * Returns -errno on failure or 0 on success
+ */
+int hsi_async(struct hsi_client *cl, struct hsi_msg *msg)
+{
+ struct hsi_port *port = hsi_get_port(cl);
+
+ if (!hsi_port_claimed(cl))
+ return -EACCES;
+
+ WARN_ON_ONCE(!msg->destructor || !msg->complete);
+ msg->cl = cl;
+
+ return port->async(msg);
+}
+EXPORT_SYMBOL_GPL(hsi_async);
+
+/**
+ * hsi_claim_port - Claim the HSI client's port
+ * @cl: HSI client that wants to claim its port
+ * @share: Flag to indicate if the client wants to share the port or not.
+ *
+ * Returns -errno on failure, 0 on success.
+ */
+int hsi_claim_port(struct hsi_client *cl, unsigned int share)
+{
+ struct hsi_port *port = hsi_get_port(cl);
+ int err = 0;
+
+ mutex_lock(&port->lock);
+ if ((port->claimed) && (!port->shared || !share)) {
+ err = -EBUSY;
+ goto out;
+ }
+ if (!try_module_get(to_hsi_controller(port->device.parent)->owner)) {
+ err = -ENODEV;
+ goto out;
+ }
+ port->claimed++;
+ port->shared = !!share;
+ cl->pclaimed = 1;
+out:
+ mutex_unlock(&port->lock);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(hsi_claim_port);
+
+/**
+ * hsi_release_port - Release the HSI client's port
+ * @cl: HSI client which previously claimed its port
+ */
+void hsi_release_port(struct hsi_client *cl)
+{
+ struct hsi_port *port = hsi_get_port(cl);
+
+ mutex_lock(&port->lock);
+ /* Allow HW driver to do some cleanup */
+ port->release(cl);
+ if (cl->pclaimed)
+ port->claimed--;
+ BUG_ON(port->claimed < 0);
+ cl->pclaimed = 0;
+ if (!port->claimed)
+ port->shared = 0;
+ module_put(to_hsi_controller(port->device.parent)->owner);
+ mutex_unlock(&port->lock);
+}
+EXPORT_SYMBOL_GPL(hsi_release_port);
+
+static int hsi_event_notifier_call(struct notifier_block *nb,
+ unsigned long event, void *data __maybe_unused)
+{
+ struct hsi_client *cl = container_of(nb, struct hsi_client, nb);
+
+ (*cl->ehandler)(cl, event);
+
+ return 0;
+}
+
+/**
+ * hsi_register_port_event - Register a client to receive port events
+ * @cl: HSI client that wants to receive port events
+ * @handler: Event handler callback
+ *
+ * Clients should register a callback to be able to receive
+ * events from the ports. Registration should happen after
+ * claiming the port.
+ * The handler can be called in interrupt context.
+ *
+ * Returns -errno on error, or 0 on success.
+ */
+int hsi_register_port_event(struct hsi_client *cl,
+ void (*handler)(struct hsi_client *, unsigned long))
+{
+ struct hsi_port *port = hsi_get_port(cl);
+
+ if (!handler || cl->ehandler)
+ return -EINVAL;
+ if (!hsi_port_claimed(cl))
+ return -EACCES;
+ cl->ehandler = handler;
+ cl->nb.notifier_call = hsi_event_notifier_call;
+
+ return blocking_notifier_chain_register(&port->n_head, &cl->nb);
+}
+EXPORT_SYMBOL_GPL(hsi_register_port_event);
+
+/**
+ * hsi_unregister_port_event - Stop receiving port events for a client
+ * @cl: HSI client that wants to stop receiving port events
+ *
+ * Clients should call this function before releasing their associated
+ * port.
+ *
+ * Returns -errno on error, or 0 on success.
+ */
+int hsi_unregister_port_event(struct hsi_client *cl)
+{
+ struct hsi_port *port = hsi_get_port(cl);
+ int err;
+
+ WARN_ON(!hsi_port_claimed(cl));
+
+ err = blocking_notifier_chain_unregister(&port->n_head, &cl->nb);
+ if (!err)
+ cl->ehandler = NULL;
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(hsi_unregister_port_event);
+
+/**
+ * hsi_event - Notifies clients about port events
+ * @port: Port where the event occurred
+ * @event: The event type
+ *
+ * Clients should not be concerned about wake line behavior. However, due
+ * to a race condition in HSI HW protocol, clients need to be notified
+ * about wake line changes, so they can implement a workaround for it.
+ *
+ * Events:
+ * HSI_EVENT_START_RX - Incoming wake line high
+ * HSI_EVENT_STOP_RX - Incoming wake line down
+ *
+ * Returns -errno on error, or 0 on success.
+ */
+int hsi_event(struct hsi_port *port, unsigned long event)
+{
+ return blocking_notifier_call_chain(&port->n_head, event, NULL);
+}
+EXPORT_SYMBOL_GPL(hsi_event);
+
+/**
+ * hsi_get_channel_id_by_name - acquire channel id by channel name
+ * @cl: HSI client, which uses the channel
+ * @name: name the channel is known under
+ *
+ * Clients can call this function to get the hsi channel ids similar to
+ * requesting IRQs or GPIOs by name. This function assumes the same
+ * channel configuration is used for RX and TX.
+ *
+ * Returns -errno on error or channel id on success.
+ */
+int hsi_get_channel_id_by_name(struct hsi_client *cl, char *name)
+{
+ int i;
+
+ if (!cl->rx_cfg.channels)
+ return -ENOENT;
+
+ for (i = 0; i < cl->rx_cfg.num_channels; i++)
+ if (!strcmp(cl->rx_cfg.channels[i].name, name))
+ return cl->rx_cfg.channels[i].id;
+
+ return -ENXIO;
+}
+EXPORT_SYMBOL_GPL(hsi_get_channel_id_by_name);
+
+static int __init hsi_init(void)
+{
+ return bus_register(&hsi_bus_type);
+}
+postcore_initcall(hsi_init);
+
+static void __exit hsi_exit(void)
+{
+ bus_unregister(&hsi_bus_type);
+}
+module_exit(hsi_exit);
+
+MODULE_AUTHOR("Carlos Chinea <carlos.chinea@nokia.com>");
+MODULE_DESCRIPTION("High-speed Synchronous Serial Interface (HSI) framework");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index ff940075b..eaf2f916d 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -486,6 +486,18 @@ config SENSORS_FSCHMD
This driver can also be built as a module. If so, the module
will be called fschmd.
+config SENSORS_FTSTEUTATES
+ tristate "Fujitsu Technology Solutions sensor chip Teutates"
+ depends on I2C && WATCHDOG
+ select WATCHDOG_CORE
+ help
+ If you say yes here you get support for the Fujitsu Technology
+ Solutions (FTS) sensor chip "Teutates" including support for
+ the integrated watchdog.
+
+ This driver can also be built as a module. If so, the module
+ will be called ftsteutates.
+
config SENSORS_GL518SM
tristate "Genesys Logic GL518SM"
depends on I2C
@@ -645,8 +657,8 @@ config SENSORS_JC42
temperature sensors, which are used on many DDR3 memory modules for
mobile devices and servers. Support will include, but not be limited
to, ADT7408, AT30TS00, CAT34TS02, CAT6095, MAX6604, MCP9804, MCP9805,
- MCP98242, MCP98243, MCP98244, MCP9843, SE97, SE98, STTS424(E),
- STTS2002, STTS3000, TSE2002, TSE2004, TS3000, and TS3001.
+ MCP9808, MCP98242, MCP98243, MCP98244, MCP9843, SE97, SE98,
+ STTS424(E), STTS2002, STTS3000, TSE2002, TSE2004, TS3000, and TS3001.
This driver can also be built as a module. If so, the module
will be called jc42.
@@ -958,6 +970,7 @@ config SENSORS_LM75
tristate "National Semiconductor LM75 and compatibles"
depends on I2C
depends on THERMAL || !THERMAL_OF
+ select REGMAP_I2C
help
If you say yes here you get support for one common type of
temperature sensor chip, with models including:
@@ -1265,6 +1278,17 @@ config SENSORS_SHT21
This driver can also be built as a module. If so, the module
will be called sht21.
+config SENSORS_SHT3x
+ tristate "Sensiron humidity and temperature sensors. SHT3x and compat."
+ depends on I2C
+ select CRC8
+ help
+ If you say yes here you get support for the Sensiron SHT30 and SHT31
+ humidity and temperature sensors.
+
+ This driver can also be built as a module. If so, the module
+ will be called sht3x.
+
config SENSORS_SHTC1
tristate "Sensiron humidity and temperature sensors. SHTC1 and compat."
depends on I2C
@@ -1514,6 +1538,17 @@ config SENSORS_INA2XX
This driver can also be built as a module. If so, the module
will be called ina2xx.
+config SENSORS_INA3221
+ tristate "Texas Instruments INA3221 Triple Power Monitor"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for the TI INA3221 Triple Power
+ Monitor.
+
+ This driver can also be built as a module. If so, the module
+ will be called ina3221.
+
config SENSORS_TC74
tristate "Microchip TC74"
depends on I2C
@@ -1538,6 +1573,7 @@ config SENSORS_TMP102
tristate "Texas Instruments TMP102"
depends on I2C
depends on THERMAL || !THERMAL_OF
+ select REGMAP_I2C
help
If you say yes here you get support for Texas Instruments TMP102
sensor chips.
@@ -1561,7 +1597,7 @@ config SENSORS_TMP401
depends on I2C
help
If you say yes here you get support for Texas Instruments TMP401,
- TMP411, TMP431, TMP432 and TMP435 temperature sensor chips.
+ TMP411, TMP431, TMP432, TMP435, and TMP461 temperature sensor chips.
This driver can also be built as a module. If so, the module
will be called tmp401.
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 2ef5b7c4c..fe87d2895 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -62,6 +62,7 @@ obj-$(CONFIG_SENSORS_F71882FG) += f71882fg.o
obj-$(CONFIG_SENSORS_F75375S) += f75375s.o
obj-$(CONFIG_SENSORS_FAM15H_POWER) += fam15h_power.o
obj-$(CONFIG_SENSORS_FSCHMD) += fschmd.o
+obj-$(CONFIG_SENSORS_FTSTEUTATES) += ftsteutates.o
obj-$(CONFIG_SENSORS_G760A) += g760a.o
obj-$(CONFIG_SENSORS_G762) += g762.o
obj-$(CONFIG_SENSORS_GL518SM) += gl518sm.o
@@ -77,6 +78,7 @@ obj-$(CONFIG_SENSORS_IBMPOWERNV)+= ibmpowernv.o
obj-$(CONFIG_SENSORS_IIO_HWMON) += iio_hwmon.o
obj-$(CONFIG_SENSORS_INA209) += ina209.o
obj-$(CONFIG_SENSORS_INA2XX) += ina2xx.o
+obj-$(CONFIG_SENSORS_INA3221) += ina3221.o
obj-$(CONFIG_SENSORS_IT87) += it87.o
obj-$(CONFIG_SENSORS_JC42) += jc42.o
obj-$(CONFIG_SENSORS_JZ4740) += jz4740-hwmon.o
@@ -138,6 +140,7 @@ obj-$(CONFIG_SENSORS_SCH5627) += sch5627.o
obj-$(CONFIG_SENSORS_SCH5636) += sch5636.o
obj-$(CONFIG_SENSORS_SHT15) += sht15.o
obj-$(CONFIG_SENSORS_SHT21) += sht21.o
+obj-$(CONFIG_SENSORS_SHT3x) += sht3x.o
obj-$(CONFIG_SENSORS_SHTC1) += shtc1.o
obj-$(CONFIG_SENSORS_SIS5595) += sis5595.o
obj-$(CONFIG_SENSORS_SMM665) += smm665.o
diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c
index 202c1fbb3..8ea35932f 100644
--- a/drivers/hwmon/ad7314.c
+++ b/drivers/hwmon/ad7314.c
@@ -37,7 +37,6 @@ enum ad7314_variant {
struct ad7314_data {
struct spi_device *spi_dev;
- struct device *hwmon_dev;
u16 rx ____cacheline_aligned;
};
@@ -88,62 +87,30 @@ static ssize_t ad7314_show_temperature(struct device *dev,
}
}
-static ssize_t ad7314_show_name(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- return sprintf(buf, "%s\n", to_spi_device(dev)->modalias);
-}
-
-static DEVICE_ATTR(name, S_IRUGO, ad7314_show_name, NULL);
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
ad7314_show_temperature, NULL, 0);
-static struct attribute *ad7314_attributes[] = {
- &dev_attr_name.attr,
+static struct attribute *ad7314_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
NULL,
};
-static const struct attribute_group ad7314_group = {
- .attrs = ad7314_attributes,
-};
+ATTRIBUTE_GROUPS(ad7314);
static int ad7314_probe(struct spi_device *spi_dev)
{
- int ret;
struct ad7314_data *chip;
+ struct device *hwmon_dev;
chip = devm_kzalloc(&spi_dev->dev, sizeof(*chip), GFP_KERNEL);
if (chip == NULL)
return -ENOMEM;
- spi_set_drvdata(spi_dev, chip);
-
- ret = sysfs_create_group(&spi_dev->dev.kobj, &ad7314_group);
- if (ret < 0)
- return ret;
-
- chip->hwmon_dev = hwmon_device_register(&spi_dev->dev);
- if (IS_ERR(chip->hwmon_dev)) {
- ret = PTR_ERR(chip->hwmon_dev);
- goto error_remove_group;
- }
chip->spi_dev = spi_dev;
-
- return 0;
-error_remove_group:
- sysfs_remove_group(&spi_dev->dev.kobj, &ad7314_group);
- return ret;
-}
-
-static int ad7314_remove(struct spi_device *spi_dev)
-{
- struct ad7314_data *chip = spi_get_drvdata(spi_dev);
-
- hwmon_device_unregister(chip->hwmon_dev);
- sysfs_remove_group(&spi_dev->dev.kobj, &ad7314_group);
-
- return 0;
+ hwmon_dev = devm_hwmon_device_register_with_groups(&spi_dev->dev,
+ spi_dev->modalias,
+ chip, ad7314_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
static const struct spi_device_id ad7314_id[] = {
@@ -159,7 +126,6 @@ static struct spi_driver ad7314_driver = {
.name = "ad7314",
},
.probe = ad7314_probe,
- .remove = ad7314_remove,
.id_table = ad7314_id,
};
diff --git a/drivers/hwmon/ads7871.c b/drivers/hwmon/ads7871.c
index 4fd9e4de1..59bd7b9e1 100644
--- a/drivers/hwmon/ads7871.c
+++ b/drivers/hwmon/ads7871.c
@@ -66,14 +66,12 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
-#include <linux/mutex.h>
#include <linux/delay.h>
#define DEVICE_NAME "ads7871"
struct ads7871_data {
- struct device *hwmon_dev;
- struct mutex update_lock;
+ struct spi_device *spi;
};
static int ads7871_read_reg8(struct spi_device *spi, int reg)
@@ -101,7 +99,8 @@ static int ads7871_write_reg8(struct spi_device *spi, int reg, u8 val)
static ssize_t show_voltage(struct device *dev,
struct device_attribute *da, char *buf)
{
- struct spi_device *spi = to_spi_device(dev);
+ struct ads7871_data *pdata = dev_get_drvdata(dev);
+ struct spi_device *spi = pdata->spi;
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int ret, val, i = 0;
uint8_t channel, mux_cnv;
@@ -139,12 +138,6 @@ static ssize_t show_voltage(struct device *dev,
}
}
-static ssize_t ads7871_show_name(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- return sprintf(buf, "%s\n", to_spi_device(dev)->modalias);
-}
-
static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, show_voltage, NULL, 0);
static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_voltage, NULL, 1);
static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_voltage, NULL, 2);
@@ -154,9 +147,7 @@ static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, show_voltage, NULL, 5);
static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, show_voltage, NULL, 6);
static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, show_voltage, NULL, 7);
-static DEVICE_ATTR(name, S_IRUGO, ads7871_show_name, NULL);
-
-static struct attribute *ads7871_attributes[] = {
+static struct attribute *ads7871_attrs[] = {
&sensor_dev_attr_in0_input.dev_attr.attr,
&sensor_dev_attr_in1_input.dev_attr.attr,
&sensor_dev_attr_in2_input.dev_attr.attr,
@@ -165,21 +156,18 @@ static struct attribute *ads7871_attributes[] = {
&sensor_dev_attr_in5_input.dev_attr.attr,
&sensor_dev_attr_in6_input.dev_attr.attr,
&sensor_dev_attr_in7_input.dev_attr.attr,
- &dev_attr_name.attr,
NULL
};
-static const struct attribute_group ads7871_group = {
- .attrs = ads7871_attributes,
-};
+ATTRIBUTE_GROUPS(ads7871);
static int ads7871_probe(struct spi_device *spi)
{
- int ret, err;
+ struct device *dev = &spi->dev;
+ int ret;
uint8_t val;
struct ads7871_data *pdata;
-
- dev_dbg(&spi->dev, "probe\n");
+ struct device *hwmon_dev;
/* Configure the SPI bus */
spi->mode = (SPI_MODE_0);
@@ -193,7 +181,7 @@ static int ads7871_probe(struct spi_device *spi)
ads7871_write_reg8(spi, REG_OSC_CONTROL, val);
ret = ads7871_read_reg8(spi, REG_OSC_CONTROL);
- dev_dbg(&spi->dev, "REG_OSC_CONTROL write:%x, read:%x\n", val, ret);
+ dev_dbg(dev, "REG_OSC_CONTROL write:%x, read:%x\n", val, ret);
/*
* because there is no other error checking on an SPI bus
* we need to make sure we really have a chip
@@ -201,46 +189,23 @@ static int ads7871_probe(struct spi_device *spi)
if (val != ret)
return -ENODEV;
- pdata = devm_kzalloc(&spi->dev, sizeof(struct ads7871_data),
- GFP_KERNEL);
+ pdata = devm_kzalloc(dev, sizeof(struct ads7871_data), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
- err = sysfs_create_group(&spi->dev.kobj, &ads7871_group);
- if (err < 0)
- return err;
-
- spi_set_drvdata(spi, pdata);
+ pdata->spi = spi;
- pdata->hwmon_dev = hwmon_device_register(&spi->dev);
- if (IS_ERR(pdata->hwmon_dev)) {
- err = PTR_ERR(pdata->hwmon_dev);
- goto error_remove;
- }
-
- return 0;
-
-error_remove:
- sysfs_remove_group(&spi->dev.kobj, &ads7871_group);
- return err;
-}
-
-static int ads7871_remove(struct spi_device *spi)
-{
- struct ads7871_data *pdata = spi_get_drvdata(spi);
-
- hwmon_device_unregister(pdata->hwmon_dev);
- sysfs_remove_group(&spi->dev.kobj, &ads7871_group);
- return 0;
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, spi->modalias,
+ pdata,
+ ads7871_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
static struct spi_driver ads7871_driver = {
.driver = {
.name = DEVICE_NAME,
},
-
.probe = ads7871_probe,
- .remove = ads7871_remove,
};
module_spi_driver(ads7871_driver);
diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c
index 827c03703..fc1e65a26 100644
--- a/drivers/hwmon/adt7411.c
+++ b/drivers/hwmon/adt7411.c
@@ -30,12 +30,17 @@
#define ADT7411_REG_CFG1 0x18
#define ADT7411_CFG1_START_MONITOR (1 << 0)
+#define ADT7411_CFG1_RESERVED_BIT1 (1 << 1)
+#define ADT7411_CFG1_RESERVED_BIT3 (1 << 3)
#define ADT7411_REG_CFG2 0x19
#define ADT7411_CFG2_DISABLE_AVG (1 << 5)
#define ADT7411_REG_CFG3 0x1a
#define ADT7411_CFG3_ADC_CLK_225 (1 << 0)
+#define ADT7411_CFG3_RESERVED_BIT1 (1 << 1)
+#define ADT7411_CFG3_RESERVED_BIT2 (1 << 2)
+#define ADT7411_CFG3_RESERVED_BIT3 (1 << 3)
#define ADT7411_CFG3_REF_VDD (1 << 4)
#define ADT7411_REG_DEVICE_ID 0x4d
@@ -279,6 +284,45 @@ static int adt7411_detect(struct i2c_client *client,
return 0;
}
+static int adt7411_init_device(struct adt7411_data *data)
+{
+ int ret;
+ u8 val;
+
+ ret = i2c_smbus_read_byte_data(data->client, ADT7411_REG_CFG3);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * We must only write zero to bit 1 and bit 2 and only one to bit 3
+ * according to the datasheet.
+ */
+ val = ret;
+ val &= ~(ADT7411_CFG3_RESERVED_BIT1 | ADT7411_CFG3_RESERVED_BIT2);
+ val |= ADT7411_CFG3_RESERVED_BIT3;
+
+ ret = i2c_smbus_write_byte_data(data->client, ADT7411_REG_CFG3, val);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, ADT7411_REG_CFG1);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * We must only write zero to bit 1 and only one to bit 3 according to
+ * the datasheet.
+ */
+ val = ret;
+ val &= ~ADT7411_CFG1_RESERVED_BIT1;
+ val |= ADT7411_CFG1_RESERVED_BIT3;
+
+ /* enable monitoring */
+ val |= ADT7411_CFG1_START_MONITOR;
+
+ return i2c_smbus_write_byte_data(data->client, ADT7411_REG_CFG1, val);
+}
+
static int adt7411_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -296,8 +340,7 @@ static int adt7411_probe(struct i2c_client *client,
mutex_init(&data->device_lock);
mutex_init(&data->update_lock);
- ret = adt7411_modify_bit(client, ADT7411_REG_CFG1,
- ADT7411_CFG1_START_MONITOR, 1);
+ ret = adt7411_init_device(data);
if (ret < 0)
return ret;
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index 2ac87d553..acf9c0361 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -81,6 +81,7 @@ static bool disallow_fan_type_call;
#define I8K_HWMON_HAVE_TEMP4 (1 << 3)
#define I8K_HWMON_HAVE_FAN1 (1 << 4)
#define I8K_HWMON_HAVE_FAN2 (1 << 5)
+#define I8K_HWMON_HAVE_FAN3 (1 << 6)
MODULE_AUTHOR("Massimo Dal Zotto (dz@debian.org)");
MODULE_AUTHOR("Pali RohĂ¡r <pali.rohar@gmail.com>");
@@ -139,6 +140,14 @@ static int i8k_smm(struct smm_regs *regs)
int eax = regs->eax;
cpumask_var_t old_mask;
+#ifdef DEBUG
+ int ebx = regs->ebx;
+ unsigned long duration;
+ ktime_t calltime, delta, rettime;
+
+ calltime = ktime_get();
+#endif
+
/* SMM requires CPU 0 */
if (!alloc_cpumask_var(&old_mask, GFP_KERNEL))
return -ENOMEM;
@@ -210,6 +219,15 @@ static int i8k_smm(struct smm_regs *regs)
out:
set_cpus_allowed_ptr(current, old_mask);
free_cpumask_var(old_mask);
+
+#ifdef DEBUG
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, calltime);
+ duration = ktime_to_ns(delta) >> 10;
+ pr_debug("smm(0x%.4x 0x%.4x) = 0x%.4x (took %7lu usecs)\n", eax, ebx,
+ (rc ? 0xffff : regs->eax & 0xffff), duration);
+#endif
+
return rc;
}
@@ -252,7 +270,7 @@ static int _i8k_get_fan_type(int fan)
static int i8k_get_fan_type(int fan)
{
/* I8K_SMM_GET_FAN_TYPE SMM call is expensive, so cache values */
- static int types[2] = { INT_MIN, INT_MIN };
+ static int types[3] = { INT_MIN, INT_MIN, INT_MIN };
if (types[fan] == INT_MIN)
types[fan] = _i8k_get_fan_type(fan);
@@ -719,6 +737,12 @@ static SENSOR_DEVICE_ATTR(fan2_label, S_IRUGO, i8k_hwmon_show_fan_label, NULL,
1);
static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR, i8k_hwmon_show_pwm,
i8k_hwmon_set_pwm, 1);
+static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, i8k_hwmon_show_fan, NULL,
+ 2);
+static SENSOR_DEVICE_ATTR(fan3_label, S_IRUGO, i8k_hwmon_show_fan_label, NULL,
+ 2);
+static SENSOR_DEVICE_ATTR(pwm3, S_IRUGO | S_IWUSR, i8k_hwmon_show_pwm,
+ i8k_hwmon_set_pwm, 2);
static struct attribute *i8k_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr, /* 0 */
@@ -735,6 +759,9 @@ static struct attribute *i8k_attrs[] = {
&sensor_dev_attr_fan2_input.dev_attr.attr, /* 11 */
&sensor_dev_attr_fan2_label.dev_attr.attr, /* 12 */
&sensor_dev_attr_pwm2.dev_attr.attr, /* 13 */
+ &sensor_dev_attr_fan3_input.dev_attr.attr, /* 14 */
+ &sensor_dev_attr_fan3_label.dev_attr.attr, /* 15 */
+ &sensor_dev_attr_pwm3.dev_attr.attr, /* 16 */
NULL
};
@@ -742,7 +769,7 @@ static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
int index)
{
if (disallow_fan_type_call &&
- (index == 9 || index == 12))
+ (index == 9 || index == 12 || index == 15))
return 0;
if (index >= 0 && index <= 1 &&
!(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1))
@@ -762,6 +789,9 @@ static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
if (index >= 11 && index <= 13 &&
!(i8k_hwmon_flags & I8K_HWMON_HAVE_FAN2))
return 0;
+ if (index >= 14 && index <= 16 &&
+ !(i8k_hwmon_flags & I8K_HWMON_HAVE_FAN3))
+ return 0;
return attr->mode;
}
@@ -807,6 +837,13 @@ static int __init i8k_init_hwmon(void)
if (err >= 0)
i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN2;
+ /* Third fan attributes, if fan status or type is OK */
+ err = i8k_get_fan_status(2);
+ if (err < 0)
+ err = i8k_get_fan_type(2);
+ if (err >= 0)
+ i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN3;
+
i8k_hwmon_dev = hwmon_device_register_with_groups(NULL, "dell_smm",
NULL, i8k_groups);
if (IS_ERR(i8k_hwmon_dev)) {
diff --git a/drivers/hwmon/emc6w201.c b/drivers/hwmon/emc6w201.c
index ada907164..f37fe2011 100644
--- a/drivers/hwmon/emc6w201.c
+++ b/drivers/hwmon/emc6w201.c
@@ -464,7 +464,7 @@ static int emc6w201_detect(struct i2c_client *client,
if (verstep < 0 || (verstep & 0xF0) != 0xB0)
return -ENODEV;
if ((verstep & 0x0F) > 2) {
- dev_dbg(&client->dev, "Unknwown EMC6W201 stepping %d\n",
+ dev_dbg(&client->dev, "Unknown EMC6W201 stepping %d\n",
verstep & 0x0F);
return -ENODEV;
}
diff --git a/drivers/hwmon/ftsteutates.c b/drivers/hwmon/ftsteutates.c
new file mode 100644
index 000000000..48633e541
--- /dev/null
+++ b/drivers/hwmon/ftsteutates.c
@@ -0,0 +1,819 @@
+/*
+ * Support for the FTS Systemmonitoring Chip "Teutates"
+ *
+ * Copyright (C) 2016 Fujitsu Technology Solutions GmbH,
+ * Thilo Cestonaro <thilo.cestonaro@ts.fujitsu.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/uaccess.h>
+#include <linux/watchdog.h>
+
+#define FTS_DEVICE_ID_REG 0x0000
+#define FTS_DEVICE_REVISION_REG 0x0001
+#define FTS_DEVICE_STATUS_REG 0x0004
+#define FTS_SATELLITE_STATUS_REG 0x0005
+#define FTS_EVENT_STATUS_REG 0x0006
+#define FTS_GLOBAL_CONTROL_REG 0x0007
+
+#define FTS_SENSOR_EVENT_REG 0x0010
+
+#define FTS_FAN_EVENT_REG 0x0014
+#define FTS_FAN_PRESENT_REG 0x0015
+
+#define FTS_POWER_ON_TIME_COUNTER_A 0x007A
+#define FTS_POWER_ON_TIME_COUNTER_B 0x007B
+#define FTS_POWER_ON_TIME_COUNTER_C 0x007C
+
+#define FTS_PAGE_SELECT_REG 0x007F
+
+#define FTS_WATCHDOG_TIME_PRESET 0x000B
+#define FTS_WATCHDOG_CONTROL 0x5081
+
+#define FTS_NO_FAN_SENSORS 0x08
+#define FTS_NO_TEMP_SENSORS 0x10
+#define FTS_NO_VOLT_SENSORS 0x04
+
+static struct i2c_device_id fts_id[] = {
+ { "ftsteutates", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, fts_id);
+
+enum WATCHDOG_RESOLUTION {
+ seconds = 1,
+ minutes = 60
+};
+
+struct fts_data {
+ struct i2c_client *client;
+ /* update sensor data lock */
+ struct mutex update_lock;
+ /* read/write register lock */
+ struct mutex access_lock;
+ unsigned long last_updated; /* in jiffies */
+ struct watchdog_device wdd;
+ enum WATCHDOG_RESOLUTION resolution;
+ bool valid; /* false until following fields are valid */
+
+ u8 volt[FTS_NO_VOLT_SENSORS];
+
+ u8 temp_input[FTS_NO_TEMP_SENSORS];
+ u8 temp_alarm;
+
+ u8 fan_present;
+ u8 fan_input[FTS_NO_FAN_SENSORS]; /* in rps */
+ u8 fan_source[FTS_NO_FAN_SENSORS];
+ u8 fan_alarm;
+};
+
+#define FTS_REG_FAN_INPUT(idx) ((idx) + 0x20)
+#define FTS_REG_FAN_SOURCE(idx) ((idx) + 0x30)
+#define FTS_REG_FAN_CONTROL(idx) (((idx) << 16) + 0x4881)
+
+#define FTS_REG_TEMP_INPUT(idx) ((idx) + 0x40)
+#define FTS_REG_TEMP_CONTROL(idx) (((idx) << 16) + 0x0681)
+
+#define FTS_REG_VOLT(idx) ((idx) + 0x18)
+
+/*****************************************************************************/
+/* I2C Helper functions */
+/*****************************************************************************/
+static int fts_read_byte(struct i2c_client *client, unsigned short reg)
+{
+ int ret;
+ unsigned char page = reg >> 8;
+ struct fts_data *data = dev_get_drvdata(&client->dev);
+
+ mutex_lock(&data->access_lock);
+
+ dev_dbg(&client->dev, "page select - page: 0x%.02x\n", page);
+ ret = i2c_smbus_write_byte_data(client, FTS_PAGE_SELECT_REG, page);
+ if (ret < 0)
+ goto error;
+
+ reg &= 0xFF;
+ ret = i2c_smbus_read_byte_data(client, reg);
+ dev_dbg(&client->dev, "read - reg: 0x%.02x: val: 0x%.02x\n", reg, ret);
+
+error:
+ mutex_unlock(&data->access_lock);
+ return ret;
+}
+
+static int fts_write_byte(struct i2c_client *client, unsigned short reg,
+ unsigned char value)
+{
+ int ret;
+ unsigned char page = reg >> 8;
+ struct fts_data *data = dev_get_drvdata(&client->dev);
+
+ mutex_lock(&data->access_lock);
+
+ dev_dbg(&client->dev, "page select - page: 0x%.02x\n", page);
+ ret = i2c_smbus_write_byte_data(client, FTS_PAGE_SELECT_REG, page);
+ if (ret < 0)
+ goto error;
+
+ reg &= 0xFF;
+ dev_dbg(&client->dev,
+ "write - reg: 0x%.02x: val: 0x%.02x\n", reg, value);
+ ret = i2c_smbus_write_byte_data(client, reg, value);
+
+error:
+ mutex_unlock(&data->access_lock);
+ return ret;
+}
+
+/*****************************************************************************/
+/* Data Updater Helper function */
+/*****************************************************************************/
+static int fts_update_device(struct fts_data *data)
+{
+ int i;
+ int err = 0;
+
+ mutex_lock(&data->update_lock);
+ if (!time_after(jiffies, data->last_updated + 2 * HZ) && data->valid)
+ goto exit;
+
+ err = fts_read_byte(data->client, FTS_DEVICE_STATUS_REG);
+ if (err < 0)
+ goto exit;
+
+ data->valid = !!(err & 0x02); /* Data not ready yet */
+ if (unlikely(!data->valid)) {
+ err = -EAGAIN;
+ goto exit;
+ }
+
+ err = fts_read_byte(data->client, FTS_FAN_PRESENT_REG);
+ if (err < 0)
+ goto exit;
+ data->fan_present = err;
+
+ err = fts_read_byte(data->client, FTS_FAN_EVENT_REG);
+ if (err < 0)
+ goto exit;
+ data->fan_alarm = err;
+
+ for (i = 0; i < FTS_NO_FAN_SENSORS; i++) {
+ if (data->fan_present & BIT(i)) {
+ err = fts_read_byte(data->client, FTS_REG_FAN_INPUT(i));
+ if (err < 0)
+ goto exit;
+ data->fan_input[i] = err;
+
+ err = fts_read_byte(data->client,
+ FTS_REG_FAN_SOURCE(i));
+ if (err < 0)
+ goto exit;
+ data->fan_source[i] = err;
+ } else {
+ data->fan_input[i] = 0;
+ data->fan_source[i] = 0;
+ }
+ }
+
+ err = fts_read_byte(data->client, FTS_SENSOR_EVENT_REG);
+ if (err < 0)
+ goto exit;
+ data->temp_alarm = err;
+
+ for (i = 0; i < FTS_NO_TEMP_SENSORS; i++) {
+ err = fts_read_byte(data->client, FTS_REG_TEMP_INPUT(i));
+ if (err < 0)
+ goto exit;
+ data->temp_input[i] = err;
+ }
+
+ for (i = 0; i < FTS_NO_VOLT_SENSORS; i++) {
+ err = fts_read_byte(data->client, FTS_REG_VOLT(i));
+ if (err < 0)
+ goto exit;
+ data->volt[i] = err;
+ }
+ data->last_updated = jiffies;
+ err = 0;
+exit:
+ mutex_unlock(&data->update_lock);
+ return err;
+}
+
+/*****************************************************************************/
+/* Watchdog functions */
+/*****************************************************************************/
+static int fts_wd_set_resolution(struct fts_data *data,
+ enum WATCHDOG_RESOLUTION resolution)
+{
+ int ret;
+
+ if (data->resolution == resolution)
+ return 0;
+
+ ret = fts_read_byte(data->client, FTS_WATCHDOG_CONTROL);
+ if (ret < 0)
+ return ret;
+
+ if ((resolution == seconds && ret & BIT(1)) ||
+ (resolution == minutes && (ret & BIT(1)) == 0)) {
+ data->resolution = resolution;
+ return 0;
+ }
+
+ if (resolution == seconds)
+ ret |= BIT(1);
+ else
+ ret &= ~BIT(1);
+
+ ret = fts_write_byte(data->client, FTS_WATCHDOG_CONTROL, ret);
+ if (ret < 0)
+ return ret;
+
+ data->resolution = resolution;
+ return ret;
+}
+
+static int fts_wd_set_timeout(struct watchdog_device *wdd, unsigned int timeout)
+{
+ struct fts_data *data;
+ enum WATCHDOG_RESOLUTION resolution = seconds;
+ int ret;
+
+ data = watchdog_get_drvdata(wdd);
+ /* switch watchdog resolution to minutes if timeout does not fit
+ * into a byte
+ */
+ if (timeout > 0xFF) {
+ timeout = DIV_ROUND_UP(timeout, 60) * 60;
+ resolution = minutes;
+ }
+
+ ret = fts_wd_set_resolution(data, resolution);
+ if (ret < 0)
+ return ret;
+
+ wdd->timeout = timeout;
+ return 0;
+}
+
+static int fts_wd_start(struct watchdog_device *wdd)
+{
+ struct fts_data *data = watchdog_get_drvdata(wdd);
+
+ return fts_write_byte(data->client, FTS_WATCHDOG_TIME_PRESET,
+ wdd->timeout / (u8)data->resolution);
+}
+
+static int fts_wd_stop(struct watchdog_device *wdd)
+{
+ struct fts_data *data;
+
+ data = watchdog_get_drvdata(wdd);
+ return fts_write_byte(data->client, FTS_WATCHDOG_TIME_PRESET, 0);
+}
+
+static const struct watchdog_info fts_wd_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
+ .identity = "FTS Teutates Hardware Watchdog",
+};
+
+static const struct watchdog_ops fts_wd_ops = {
+ .owner = THIS_MODULE,
+ .start = fts_wd_start,
+ .stop = fts_wd_stop,
+ .set_timeout = fts_wd_set_timeout,
+};
+
+static int fts_watchdog_init(struct fts_data *data)
+{
+ int timeout, ret;
+
+ watchdog_set_drvdata(&data->wdd, data);
+
+ timeout = fts_read_byte(data->client, FTS_WATCHDOG_TIME_PRESET);
+ if (timeout < 0)
+ return timeout;
+
+ /* watchdog not running, set timeout to a default of 60 sec. */
+ if (timeout == 0) {
+ ret = fts_wd_set_resolution(data, seconds);
+ if (ret < 0)
+ return ret;
+ data->wdd.timeout = 60;
+ } else {
+ ret = fts_read_byte(data->client, FTS_WATCHDOG_CONTROL);
+ if (ret < 0)
+ return ret;
+
+ data->resolution = ret & BIT(1) ? seconds : minutes;
+ data->wdd.timeout = timeout * (u8)data->resolution;
+ set_bit(WDOG_HW_RUNNING, &data->wdd.status);
+ }
+
+ /* Register our watchdog part */
+ data->wdd.info = &fts_wd_info;
+ data->wdd.ops = &fts_wd_ops;
+ data->wdd.parent = &data->client->dev;
+ data->wdd.min_timeout = 1;
+
+ /* max timeout 255 minutes. */
+ data->wdd.max_hw_heartbeat_ms = 0xFF * 60 * MSEC_PER_SEC;
+
+ return watchdog_register_device(&data->wdd);
+}
+
+/*****************************************************************************/
+/* SysFS handler functions */
+/*****************************************************************************/
+static ssize_t show_in_value(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct fts_data *data = dev_get_drvdata(dev);
+ int index = to_sensor_dev_attr(devattr)->index;
+ int err;
+
+ err = fts_update_device(data);
+ if (err < 0)
+ return err;
+
+ return sprintf(buf, "%u\n", data->volt[index]);
+}
+
+static ssize_t show_temp_value(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct fts_data *data = dev_get_drvdata(dev);
+ int index = to_sensor_dev_attr(devattr)->index;
+ int err;
+
+ err = fts_update_device(data);
+ if (err < 0)
+ return err;
+
+ return sprintf(buf, "%u\n", data->temp_input[index]);
+}
+
+static ssize_t show_temp_fault(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct fts_data *data = dev_get_drvdata(dev);
+ int index = to_sensor_dev_attr(devattr)->index;
+ int err;
+
+ err = fts_update_device(data);
+ if (err < 0)
+ return err;
+
+ /* 00h Temperature = Sensor Error */
+ return sprintf(buf, "%d\n", data->temp_input[index] == 0);
+}
+
+static ssize_t show_temp_alarm(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct fts_data *data = dev_get_drvdata(dev);
+ int index = to_sensor_dev_attr(devattr)->index;
+ int err;
+
+ err = fts_update_device(data);
+ if (err < 0)
+ return err;
+
+ return sprintf(buf, "%u\n", !!(data->temp_alarm & BIT(index)));
+}
+
+static ssize_t
+clear_temp_alarm(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct fts_data *data = dev_get_drvdata(dev);
+ int index = to_sensor_dev_attr(devattr)->index;
+ long ret;
+
+ ret = fts_update_device(data);
+ if (ret < 0)
+ return ret;
+
+ if (kstrtoul(buf, 10, &ret) || ret != 0)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ ret = fts_read_byte(data->client, FTS_REG_TEMP_CONTROL(index));
+ if (ret < 0)
+ goto error;
+
+ ret = fts_write_byte(data->client, FTS_REG_TEMP_CONTROL(index),
+ ret | 0x1);
+ if (ret < 0)
+ goto error;
+
+ data->valid = false;
+error:
+ mutex_unlock(&data->update_lock);
+ return ret;
+}
+
+static ssize_t show_fan_value(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct fts_data *data = dev_get_drvdata(dev);
+ int index = to_sensor_dev_attr(devattr)->index;
+ int err;
+
+ err = fts_update_device(data);
+ if (err < 0)
+ return err;
+
+ return sprintf(buf, "%u\n", data->fan_input[index]);
+}
+
+static ssize_t show_fan_source(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct fts_data *data = dev_get_drvdata(dev);
+ int index = to_sensor_dev_attr(devattr)->index;
+ int err;
+
+ err = fts_update_device(data);
+ if (err < 0)
+ return err;
+
+ return sprintf(buf, "%u\n", data->fan_source[index]);
+}
+
+static ssize_t show_fan_alarm(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct fts_data *data = dev_get_drvdata(dev);
+ int index = to_sensor_dev_attr(devattr)->index;
+ int err;
+
+ err = fts_update_device(data);
+ if (err < 0)
+ return err;
+
+ return sprintf(buf, "%d\n", !!(data->fan_alarm & BIT(index)));
+}
+
+static ssize_t
+clear_fan_alarm(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct fts_data *data = dev_get_drvdata(dev);
+ int index = to_sensor_dev_attr(devattr)->index;
+ long ret;
+
+ ret = fts_update_device(data);
+ if (ret < 0)
+ return ret;
+
+ if (kstrtoul(buf, 10, &ret) || ret != 0)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ ret = fts_read_byte(data->client, FTS_REG_FAN_CONTROL(index));
+ if (ret < 0)
+ goto error;
+
+ ret = fts_write_byte(data->client, FTS_REG_FAN_CONTROL(index),
+ ret | 0x1);
+ if (ret < 0)
+ goto error;
+
+ data->valid = false;
+error:
+ mutex_unlock(&data->update_lock);
+ return ret;
+}
+
+/*****************************************************************************/
+/* SysFS structs */
+/*****************************************************************************/
+
+/* Temprature sensors */
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_value, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp_value, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp_value, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp_value, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp5_input, S_IRUGO, show_temp_value, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp6_input, S_IRUGO, show_temp_value, NULL, 5);
+static SENSOR_DEVICE_ATTR(temp7_input, S_IRUGO, show_temp_value, NULL, 6);
+static SENSOR_DEVICE_ATTR(temp8_input, S_IRUGO, show_temp_value, NULL, 7);
+static SENSOR_DEVICE_ATTR(temp9_input, S_IRUGO, show_temp_value, NULL, 8);
+static SENSOR_DEVICE_ATTR(temp10_input, S_IRUGO, show_temp_value, NULL, 9);
+static SENSOR_DEVICE_ATTR(temp11_input, S_IRUGO, show_temp_value, NULL, 10);
+static SENSOR_DEVICE_ATTR(temp12_input, S_IRUGO, show_temp_value, NULL, 11);
+static SENSOR_DEVICE_ATTR(temp13_input, S_IRUGO, show_temp_value, NULL, 12);
+static SENSOR_DEVICE_ATTR(temp14_input, S_IRUGO, show_temp_value, NULL, 13);
+static SENSOR_DEVICE_ATTR(temp15_input, S_IRUGO, show_temp_value, NULL, 14);
+static SENSOR_DEVICE_ATTR(temp16_input, S_IRUGO, show_temp_value, NULL, 15);
+
+static SENSOR_DEVICE_ATTR(temp1_fault, S_IRUGO, show_temp_fault, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_temp_fault, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_temp_fault, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp4_fault, S_IRUGO, show_temp_fault, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp5_fault, S_IRUGO, show_temp_fault, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp6_fault, S_IRUGO, show_temp_fault, NULL, 5);
+static SENSOR_DEVICE_ATTR(temp7_fault, S_IRUGO, show_temp_fault, NULL, 6);
+static SENSOR_DEVICE_ATTR(temp8_fault, S_IRUGO, show_temp_fault, NULL, 7);
+static SENSOR_DEVICE_ATTR(temp9_fault, S_IRUGO, show_temp_fault, NULL, 8);
+static SENSOR_DEVICE_ATTR(temp10_fault, S_IRUGO, show_temp_fault, NULL, 9);
+static SENSOR_DEVICE_ATTR(temp11_fault, S_IRUGO, show_temp_fault, NULL, 10);
+static SENSOR_DEVICE_ATTR(temp12_fault, S_IRUGO, show_temp_fault, NULL, 11);
+static SENSOR_DEVICE_ATTR(temp13_fault, S_IRUGO, show_temp_fault, NULL, 12);
+static SENSOR_DEVICE_ATTR(temp14_fault, S_IRUGO, show_temp_fault, NULL, 13);
+static SENSOR_DEVICE_ATTR(temp15_fault, S_IRUGO, show_temp_fault, NULL, 14);
+static SENSOR_DEVICE_ATTR(temp16_fault, S_IRUGO, show_temp_fault, NULL, 15);
+
+static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+ clear_temp_alarm, 0);
+static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+ clear_temp_alarm, 1);
+static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+ clear_temp_alarm, 2);
+static SENSOR_DEVICE_ATTR(temp4_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+ clear_temp_alarm, 3);
+static SENSOR_DEVICE_ATTR(temp5_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+ clear_temp_alarm, 4);
+static SENSOR_DEVICE_ATTR(temp6_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+ clear_temp_alarm, 5);
+static SENSOR_DEVICE_ATTR(temp7_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+ clear_temp_alarm, 6);
+static SENSOR_DEVICE_ATTR(temp8_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+ clear_temp_alarm, 7);
+static SENSOR_DEVICE_ATTR(temp9_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+ clear_temp_alarm, 8);
+static SENSOR_DEVICE_ATTR(temp10_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+ clear_temp_alarm, 9);
+static SENSOR_DEVICE_ATTR(temp11_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+ clear_temp_alarm, 10);
+static SENSOR_DEVICE_ATTR(temp12_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+ clear_temp_alarm, 11);
+static SENSOR_DEVICE_ATTR(temp13_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+ clear_temp_alarm, 12);
+static SENSOR_DEVICE_ATTR(temp14_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+ clear_temp_alarm, 13);
+static SENSOR_DEVICE_ATTR(temp15_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+ clear_temp_alarm, 14);
+static SENSOR_DEVICE_ATTR(temp16_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+ clear_temp_alarm, 15);
+
+static struct attribute *fts_temp_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_input.dev_attr.attr,
+ &sensor_dev_attr_temp3_input.dev_attr.attr,
+ &sensor_dev_attr_temp4_input.dev_attr.attr,
+ &sensor_dev_attr_temp5_input.dev_attr.attr,
+ &sensor_dev_attr_temp6_input.dev_attr.attr,
+ &sensor_dev_attr_temp7_input.dev_attr.attr,
+ &sensor_dev_attr_temp8_input.dev_attr.attr,
+ &sensor_dev_attr_temp9_input.dev_attr.attr,
+ &sensor_dev_attr_temp10_input.dev_attr.attr,
+ &sensor_dev_attr_temp11_input.dev_attr.attr,
+ &sensor_dev_attr_temp12_input.dev_attr.attr,
+ &sensor_dev_attr_temp13_input.dev_attr.attr,
+ &sensor_dev_attr_temp14_input.dev_attr.attr,
+ &sensor_dev_attr_temp15_input.dev_attr.attr,
+ &sensor_dev_attr_temp16_input.dev_attr.attr,
+
+ &sensor_dev_attr_temp1_fault.dev_attr.attr,
+ &sensor_dev_attr_temp2_fault.dev_attr.attr,
+ &sensor_dev_attr_temp3_fault.dev_attr.attr,
+ &sensor_dev_attr_temp4_fault.dev_attr.attr,
+ &sensor_dev_attr_temp5_fault.dev_attr.attr,
+ &sensor_dev_attr_temp6_fault.dev_attr.attr,
+ &sensor_dev_attr_temp7_fault.dev_attr.attr,
+ &sensor_dev_attr_temp8_fault.dev_attr.attr,
+ &sensor_dev_attr_temp9_fault.dev_attr.attr,
+ &sensor_dev_attr_temp10_fault.dev_attr.attr,
+ &sensor_dev_attr_temp11_fault.dev_attr.attr,
+ &sensor_dev_attr_temp12_fault.dev_attr.attr,
+ &sensor_dev_attr_temp13_fault.dev_attr.attr,
+ &sensor_dev_attr_temp14_fault.dev_attr.attr,
+ &sensor_dev_attr_temp15_fault.dev_attr.attr,
+ &sensor_dev_attr_temp16_fault.dev_attr.attr,
+
+ &sensor_dev_attr_temp1_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp3_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp4_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp5_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp6_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp7_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp8_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp9_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp10_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp11_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp12_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp13_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp14_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp15_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp16_alarm.dev_attr.attr,
+ NULL
+};
+
+/* Fans */
+static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan_value, NULL, 0);
+static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan_value, NULL, 1);
+static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, show_fan_value, NULL, 2);
+static SENSOR_DEVICE_ATTR(fan4_input, S_IRUGO, show_fan_value, NULL, 3);
+static SENSOR_DEVICE_ATTR(fan5_input, S_IRUGO, show_fan_value, NULL, 4);
+static SENSOR_DEVICE_ATTR(fan6_input, S_IRUGO, show_fan_value, NULL, 5);
+static SENSOR_DEVICE_ATTR(fan7_input, S_IRUGO, show_fan_value, NULL, 6);
+static SENSOR_DEVICE_ATTR(fan8_input, S_IRUGO, show_fan_value, NULL, 7);
+
+static SENSOR_DEVICE_ATTR(fan1_source, S_IRUGO, show_fan_source, NULL, 0);
+static SENSOR_DEVICE_ATTR(fan2_source, S_IRUGO, show_fan_source, NULL, 1);
+static SENSOR_DEVICE_ATTR(fan3_source, S_IRUGO, show_fan_source, NULL, 2);
+static SENSOR_DEVICE_ATTR(fan4_source, S_IRUGO, show_fan_source, NULL, 3);
+static SENSOR_DEVICE_ATTR(fan5_source, S_IRUGO, show_fan_source, NULL, 4);
+static SENSOR_DEVICE_ATTR(fan6_source, S_IRUGO, show_fan_source, NULL, 5);
+static SENSOR_DEVICE_ATTR(fan7_source, S_IRUGO, show_fan_source, NULL, 6);
+static SENSOR_DEVICE_ATTR(fan8_source, S_IRUGO, show_fan_source, NULL, 7);
+
+static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO | S_IWUSR,
+ show_fan_alarm, clear_fan_alarm, 0);
+static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO | S_IWUSR,
+ show_fan_alarm, clear_fan_alarm, 1);
+static SENSOR_DEVICE_ATTR(fan3_alarm, S_IRUGO | S_IWUSR,
+ show_fan_alarm, clear_fan_alarm, 2);
+static SENSOR_DEVICE_ATTR(fan4_alarm, S_IRUGO | S_IWUSR,
+ show_fan_alarm, clear_fan_alarm, 3);
+static SENSOR_DEVICE_ATTR(fan5_alarm, S_IRUGO | S_IWUSR,
+ show_fan_alarm, clear_fan_alarm, 4);
+static SENSOR_DEVICE_ATTR(fan6_alarm, S_IRUGO | S_IWUSR,
+ show_fan_alarm, clear_fan_alarm, 5);
+static SENSOR_DEVICE_ATTR(fan7_alarm, S_IRUGO | S_IWUSR,
+ show_fan_alarm, clear_fan_alarm, 6);
+static SENSOR_DEVICE_ATTR(fan8_alarm, S_IRUGO | S_IWUSR,
+ show_fan_alarm, clear_fan_alarm, 7);
+
+static struct attribute *fts_fan_attrs[] = {
+ &sensor_dev_attr_fan1_input.dev_attr.attr,
+ &sensor_dev_attr_fan2_input.dev_attr.attr,
+ &sensor_dev_attr_fan3_input.dev_attr.attr,
+ &sensor_dev_attr_fan4_input.dev_attr.attr,
+ &sensor_dev_attr_fan5_input.dev_attr.attr,
+ &sensor_dev_attr_fan6_input.dev_attr.attr,
+ &sensor_dev_attr_fan7_input.dev_attr.attr,
+ &sensor_dev_attr_fan8_input.dev_attr.attr,
+
+ &sensor_dev_attr_fan1_source.dev_attr.attr,
+ &sensor_dev_attr_fan2_source.dev_attr.attr,
+ &sensor_dev_attr_fan3_source.dev_attr.attr,
+ &sensor_dev_attr_fan4_source.dev_attr.attr,
+ &sensor_dev_attr_fan5_source.dev_attr.attr,
+ &sensor_dev_attr_fan6_source.dev_attr.attr,
+ &sensor_dev_attr_fan7_source.dev_attr.attr,
+ &sensor_dev_attr_fan8_source.dev_attr.attr,
+
+ &sensor_dev_attr_fan1_alarm.dev_attr.attr,
+ &sensor_dev_attr_fan2_alarm.dev_attr.attr,
+ &sensor_dev_attr_fan3_alarm.dev_attr.attr,
+ &sensor_dev_attr_fan4_alarm.dev_attr.attr,
+ &sensor_dev_attr_fan5_alarm.dev_attr.attr,
+ &sensor_dev_attr_fan6_alarm.dev_attr.attr,
+ &sensor_dev_attr_fan7_alarm.dev_attr.attr,
+ &sensor_dev_attr_fan8_alarm.dev_attr.attr,
+ NULL
+};
+
+/* Voltages */
+static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_in_value, NULL, 0);
+static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_in_value, NULL, 1);
+static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, show_in_value, NULL, 2);
+static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, show_in_value, NULL, 3);
+static struct attribute *fts_voltage_attrs[] = {
+ &sensor_dev_attr_in1_input.dev_attr.attr,
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+ &sensor_dev_attr_in3_input.dev_attr.attr,
+ &sensor_dev_attr_in4_input.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group fts_voltage_attr_group = {
+ .attrs = fts_voltage_attrs
+};
+
+static const struct attribute_group fts_temp_attr_group = {
+ .attrs = fts_temp_attrs
+};
+
+static const struct attribute_group fts_fan_attr_group = {
+ .attrs = fts_fan_attrs
+};
+
+static const struct attribute_group *fts_attr_groups[] = {
+ &fts_voltage_attr_group,
+ &fts_temp_attr_group,
+ &fts_fan_attr_group,
+ NULL
+};
+
+/*****************************************************************************/
+/* Module initialization / remove functions */
+/*****************************************************************************/
+static int fts_remove(struct i2c_client *client)
+{
+ struct fts_data *data = dev_get_drvdata(&client->dev);
+
+ watchdog_unregister_device(&data->wdd);
+ return 0;
+}
+
+static int fts_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ u8 revision;
+ struct fts_data *data;
+ int err;
+ s8 deviceid;
+ struct device *hwmon_dev;
+
+ if (client->addr != 0x73)
+ return -ENODEV;
+
+ /* Baseboard Management Controller check */
+ deviceid = i2c_smbus_read_byte_data(client, FTS_DEVICE_ID_REG);
+ if (deviceid > 0 && (deviceid & 0xF0) == 0x10) {
+ switch (deviceid & 0x0F) {
+ case 0x01:
+ break;
+ default:
+ dev_dbg(&client->dev,
+ "No Baseboard Management Controller\n");
+ return -ENODEV;
+ }
+ } else {
+ dev_dbg(&client->dev, "No fujitsu board\n");
+ return -ENODEV;
+ }
+
+ data = devm_kzalloc(&client->dev, sizeof(struct fts_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ mutex_init(&data->update_lock);
+ mutex_init(&data->access_lock);
+ data->client = client;
+ dev_set_drvdata(&client->dev, data);
+
+ err = i2c_smbus_read_byte_data(client, FTS_DEVICE_REVISION_REG);
+ if (err < 0)
+ return err;
+ revision = err;
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(&client->dev,
+ "ftsteutates",
+ data,
+ fts_attr_groups);
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
+
+ err = fts_watchdog_init(data);
+ if (err)
+ return err;
+
+ dev_info(&client->dev, "Detected FTS Teutates chip, revision: %d.%d\n",
+ (revision & 0xF0) >> 4, revision & 0x0F);
+ return 0;
+}
+
+/*****************************************************************************/
+/* Module Details */
+/*****************************************************************************/
+static struct i2c_driver fts_driver = {
+ .driver = {
+ .name = "ftsteutates",
+ },
+ .id_table = fts_id,
+ .probe = fts_probe,
+ .remove = fts_remove,
+};
+
+module_i2c_driver(fts_driver);
+
+MODULE_AUTHOR("Thilo Cestonaro <thilo.cestonaro@ts.fujitsu.com>");
+MODULE_DESCRIPTION("FTS Teutates driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c
new file mode 100644
index 000000000..e6b49500c
--- /dev/null
+++ b/drivers/hwmon/ina3221.c
@@ -0,0 +1,445 @@
+/*
+ * INA3221 Triple Current/Voltage Monitor
+ *
+ * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/
+ * Andrew F. Davis <afd@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#define INA3221_DRIVER_NAME "ina3221"
+
+#define INA3221_CONFIG 0x00
+#define INA3221_SHUNT1 0x01
+#define INA3221_BUS1 0x02
+#define INA3221_SHUNT2 0x03
+#define INA3221_BUS2 0x04
+#define INA3221_SHUNT3 0x05
+#define INA3221_BUS3 0x06
+#define INA3221_CRIT1 0x07
+#define INA3221_WARN1 0x08
+#define INA3221_CRIT2 0x09
+#define INA3221_WARN2 0x0a
+#define INA3221_CRIT3 0x0b
+#define INA3221_WARN3 0x0c
+#define INA3221_MASK_ENABLE 0x0f
+
+#define INA3221_CONFIG_MODE_SHUNT BIT(1)
+#define INA3221_CONFIG_MODE_BUS BIT(2)
+#define INA3221_CONFIG_MODE_CONTINUOUS BIT(3)
+
+#define INA3221_RSHUNT_DEFAULT 10000
+
+enum ina3221_fields {
+ /* Configuration */
+ F_RST,
+
+ /* Alert Flags */
+ F_WF3, F_WF2, F_WF1,
+ F_CF3, F_CF2, F_CF1,
+
+ /* sentinel */
+ F_MAX_FIELDS
+};
+
+static const struct reg_field ina3221_reg_fields[] = {
+ [F_RST] = REG_FIELD(INA3221_CONFIG, 15, 15),
+
+ [F_WF3] = REG_FIELD(INA3221_MASK_ENABLE, 3, 3),
+ [F_WF2] = REG_FIELD(INA3221_MASK_ENABLE, 4, 4),
+ [F_WF1] = REG_FIELD(INA3221_MASK_ENABLE, 5, 5),
+ [F_CF3] = REG_FIELD(INA3221_MASK_ENABLE, 7, 7),
+ [F_CF2] = REG_FIELD(INA3221_MASK_ENABLE, 8, 8),
+ [F_CF1] = REG_FIELD(INA3221_MASK_ENABLE, 9, 9),
+};
+
+enum ina3221_channels {
+ INA3221_CHANNEL1,
+ INA3221_CHANNEL2,
+ INA3221_CHANNEL3,
+ INA3221_NUM_CHANNELS
+};
+
+static const unsigned int register_channel[] = {
+ [INA3221_SHUNT1] = INA3221_CHANNEL1,
+ [INA3221_SHUNT2] = INA3221_CHANNEL2,
+ [INA3221_SHUNT3] = INA3221_CHANNEL3,
+ [INA3221_CRIT1] = INA3221_CHANNEL1,
+ [INA3221_CRIT2] = INA3221_CHANNEL2,
+ [INA3221_CRIT3] = INA3221_CHANNEL3,
+ [INA3221_WARN1] = INA3221_CHANNEL1,
+ [INA3221_WARN2] = INA3221_CHANNEL2,
+ [INA3221_WARN3] = INA3221_CHANNEL3,
+};
+
+/**
+ * struct ina3221_data - device specific information
+ * @regmap: Register map of the device
+ * @fields: Register fields of the device
+ * @shunt_resistors: Array of resistor values per channel
+ */
+struct ina3221_data {
+ struct regmap *regmap;
+ struct regmap_field *fields[F_MAX_FIELDS];
+ int shunt_resistors[INA3221_NUM_CHANNELS];
+};
+
+static int ina3221_read_value(struct ina3221_data *ina, unsigned int reg,
+ int *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(ina->regmap, reg, &regval);
+ if (ret)
+ return ret;
+
+ *val = sign_extend32(regval >> 3, 12);
+
+ return 0;
+}
+
+static ssize_t ina3221_show_bus_voltage(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct sensor_device_attribute *sd_attr = to_sensor_dev_attr(attr);
+ struct ina3221_data *ina = dev_get_drvdata(dev);
+ unsigned int reg = sd_attr->index;
+ int val, voltage_mv, ret;
+
+ ret = ina3221_read_value(ina, reg, &val);
+ if (ret)
+ return ret;
+
+ voltage_mv = val * 8;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", voltage_mv);
+}
+
+static ssize_t ina3221_show_shunt_voltage(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct sensor_device_attribute *sd_attr = to_sensor_dev_attr(attr);
+ struct ina3221_data *ina = dev_get_drvdata(dev);
+ unsigned int reg = sd_attr->index;
+ int val, voltage_uv, ret;
+
+ ret = ina3221_read_value(ina, reg, &val);
+ if (ret)
+ return ret;
+ voltage_uv = val * 40;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", voltage_uv);
+}
+
+static ssize_t ina3221_show_current(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sensor_device_attribute *sd_attr = to_sensor_dev_attr(attr);
+ struct ina3221_data *ina = dev_get_drvdata(dev);
+ unsigned int reg = sd_attr->index;
+ unsigned int channel = register_channel[reg];
+ int resistance_uo = ina->shunt_resistors[channel];
+ int val, current_ma, voltage_nv, ret;
+
+ ret = ina3221_read_value(ina, reg, &val);
+ if (ret)
+ return ret;
+ voltage_nv = val * 40000;
+
+ current_ma = DIV_ROUND_CLOSEST(voltage_nv, resistance_uo);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", current_ma);
+}
+
+static ssize_t ina3221_set_current(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute *sd_attr = to_sensor_dev_attr(attr);
+ struct ina3221_data *ina = dev_get_drvdata(dev);
+ unsigned int reg = sd_attr->index;
+ unsigned int channel = register_channel[reg];
+ int resistance_uo = ina->shunt_resistors[channel];
+ int val, current_ma, voltage_uv, ret;
+
+ ret = kstrtoint(buf, 0, &current_ma);
+ if (ret)
+ return ret;
+
+ /* clamp current */
+ current_ma = clamp_val(current_ma,
+ INT_MIN / resistance_uo,
+ INT_MAX / resistance_uo);
+
+ voltage_uv = DIV_ROUND_CLOSEST(current_ma * resistance_uo, 1000);
+
+ /* clamp voltage */
+ voltage_uv = clamp_val(voltage_uv, -163800, 163800);
+
+ /* 1 / 40uV(scale) << 3(register shift) = 5 */
+ val = DIV_ROUND_CLOSEST(voltage_uv, 5) & 0xfff8;
+
+ ret = regmap_write(ina->regmap, reg, val);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t ina3221_show_shunt(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sensor_device_attribute *sd_attr = to_sensor_dev_attr(attr);
+ struct ina3221_data *ina = dev_get_drvdata(dev);
+ unsigned int channel = sd_attr->index;
+ unsigned int resistance_uo;
+
+ resistance_uo = ina->shunt_resistors[channel];
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", resistance_uo);
+}
+
+static ssize_t ina3221_set_shunt(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute *sd_attr = to_sensor_dev_attr(attr);
+ struct ina3221_data *ina = dev_get_drvdata(dev);
+ unsigned int channel = sd_attr->index;
+ int val;
+ int ret;
+
+ ret = kstrtoint(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ val = clamp_val(val, 1, INT_MAX);
+
+ ina->shunt_resistors[channel] = val;
+
+ return count;
+}
+
+static ssize_t ina3221_show_alert(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sensor_device_attribute *sd_attr = to_sensor_dev_attr(attr);
+ struct ina3221_data *ina = dev_get_drvdata(dev);
+ unsigned int field = sd_attr->index;
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_field_read(ina->fields[field], &regval);
+ if (ret)
+ return ret;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", regval);
+}
+
+/* bus voltage */
+static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO,
+ ina3221_show_bus_voltage, NULL, INA3221_BUS1);
+static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO,
+ ina3221_show_bus_voltage, NULL, INA3221_BUS2);
+static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO,
+ ina3221_show_bus_voltage, NULL, INA3221_BUS3);
+
+/* calculated current */
+static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO,
+ ina3221_show_current, NULL, INA3221_SHUNT1);
+static SENSOR_DEVICE_ATTR(curr2_input, S_IRUGO,
+ ina3221_show_current, NULL, INA3221_SHUNT2);
+static SENSOR_DEVICE_ATTR(curr3_input, S_IRUGO,
+ ina3221_show_current, NULL, INA3221_SHUNT3);
+
+/* shunt resistance */
+static SENSOR_DEVICE_ATTR(shunt1_resistor, S_IRUGO | S_IWUSR,
+ ina3221_show_shunt, ina3221_set_shunt, INA3221_CHANNEL1);
+static SENSOR_DEVICE_ATTR(shunt2_resistor, S_IRUGO | S_IWUSR,
+ ina3221_show_shunt, ina3221_set_shunt, INA3221_CHANNEL2);
+static SENSOR_DEVICE_ATTR(shunt3_resistor, S_IRUGO | S_IWUSR,
+ ina3221_show_shunt, ina3221_set_shunt, INA3221_CHANNEL3);
+
+/* critical current */
+static SENSOR_DEVICE_ATTR(curr1_crit, S_IRUGO | S_IWUSR,
+ ina3221_show_current, ina3221_set_current, INA3221_CRIT1);
+static SENSOR_DEVICE_ATTR(curr2_crit, S_IRUGO | S_IWUSR,
+ ina3221_show_current, ina3221_set_current, INA3221_CRIT2);
+static SENSOR_DEVICE_ATTR(curr3_crit, S_IRUGO | S_IWUSR,
+ ina3221_show_current, ina3221_set_current, INA3221_CRIT3);
+
+/* critical current alert */
+static SENSOR_DEVICE_ATTR(curr1_crit_alarm, S_IRUGO,
+ ina3221_show_alert, NULL, F_CF1);
+static SENSOR_DEVICE_ATTR(curr2_crit_alarm, S_IRUGO,
+ ina3221_show_alert, NULL, F_CF2);
+static SENSOR_DEVICE_ATTR(curr3_crit_alarm, S_IRUGO,
+ ina3221_show_alert, NULL, F_CF3);
+
+/* warning current */
+static SENSOR_DEVICE_ATTR(curr1_max, S_IRUGO | S_IWUSR,
+ ina3221_show_current, ina3221_set_current, INA3221_WARN1);
+static SENSOR_DEVICE_ATTR(curr2_max, S_IRUGO | S_IWUSR,
+ ina3221_show_current, ina3221_set_current, INA3221_WARN2);
+static SENSOR_DEVICE_ATTR(curr3_max, S_IRUGO | S_IWUSR,
+ ina3221_show_current, ina3221_set_current, INA3221_WARN3);
+
+/* warning current alert */
+static SENSOR_DEVICE_ATTR(curr1_max_alarm, S_IRUGO,
+ ina3221_show_alert, NULL, F_WF1);
+static SENSOR_DEVICE_ATTR(curr2_max_alarm, S_IRUGO,
+ ina3221_show_alert, NULL, F_WF2);
+static SENSOR_DEVICE_ATTR(curr3_max_alarm, S_IRUGO,
+ ina3221_show_alert, NULL, F_WF3);
+
+/* shunt voltage */
+static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO,
+ ina3221_show_shunt_voltage, NULL, INA3221_SHUNT1);
+static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO,
+ ina3221_show_shunt_voltage, NULL, INA3221_SHUNT2);
+static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO,
+ ina3221_show_shunt_voltage, NULL, INA3221_SHUNT3);
+
+static struct attribute *ina3221_attrs[] = {
+ /* channel 1 */
+ &sensor_dev_attr_in1_input.dev_attr.attr,
+ &sensor_dev_attr_curr1_input.dev_attr.attr,
+ &sensor_dev_attr_shunt1_resistor.dev_attr.attr,
+ &sensor_dev_attr_curr1_crit.dev_attr.attr,
+ &sensor_dev_attr_curr1_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_curr1_max.dev_attr.attr,
+ &sensor_dev_attr_curr1_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_in4_input.dev_attr.attr,
+
+ /* channel 2 */
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+ &sensor_dev_attr_curr2_input.dev_attr.attr,
+ &sensor_dev_attr_shunt2_resistor.dev_attr.attr,
+ &sensor_dev_attr_curr2_crit.dev_attr.attr,
+ &sensor_dev_attr_curr2_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_curr2_max.dev_attr.attr,
+ &sensor_dev_attr_curr2_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_in5_input.dev_attr.attr,
+
+ /* channel 3 */
+ &sensor_dev_attr_in3_input.dev_attr.attr,
+ &sensor_dev_attr_curr3_input.dev_attr.attr,
+ &sensor_dev_attr_shunt3_resistor.dev_attr.attr,
+ &sensor_dev_attr_curr3_crit.dev_attr.attr,
+ &sensor_dev_attr_curr3_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_curr3_max.dev_attr.attr,
+ &sensor_dev_attr_curr3_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_in6_input.dev_attr.attr,
+
+ NULL,
+};
+ATTRIBUTE_GROUPS(ina3221);
+
+static const struct regmap_range ina3221_yes_ranges[] = {
+ regmap_reg_range(INA3221_SHUNT1, INA3221_BUS3),
+ regmap_reg_range(INA3221_MASK_ENABLE, INA3221_MASK_ENABLE),
+};
+
+static const struct regmap_access_table ina3221_volatile_table = {
+ .yes_ranges = ina3221_yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(ina3221_yes_ranges),
+};
+
+static const struct regmap_config ina3221_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_table = &ina3221_volatile_table,
+};
+
+static int ina3221_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct ina3221_data *ina;
+ struct device *hwmon_dev;
+ int i, ret;
+
+ ina = devm_kzalloc(dev, sizeof(*ina), GFP_KERNEL);
+ if (!ina)
+ return -ENOMEM;
+
+ ina->regmap = devm_regmap_init_i2c(client, &ina3221_regmap_config);
+ if (IS_ERR(ina->regmap)) {
+ dev_err(dev, "Unable to allocate register map\n");
+ return PTR_ERR(ina->regmap);
+ }
+
+ for (i = 0; i < F_MAX_FIELDS; i++) {
+ ina->fields[i] = devm_regmap_field_alloc(dev,
+ ina->regmap,
+ ina3221_reg_fields[i]);
+ if (IS_ERR(ina->fields[i])) {
+ dev_err(dev, "Unable to allocate regmap fields\n");
+ return PTR_ERR(ina->fields[i]);
+ }
+ }
+
+ for (i = 0; i < INA3221_NUM_CHANNELS; i++)
+ ina->shunt_resistors[i] = INA3221_RSHUNT_DEFAULT;
+
+ ret = regmap_field_write(ina->fields[F_RST], true);
+ if (ret) {
+ dev_err(dev, "Unable to reset device\n");
+ return ret;
+ }
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev,
+ client->name,
+ ina, ina3221_groups);
+ if (IS_ERR(hwmon_dev)) {
+ dev_err(dev, "Unable to register hwmon device\n");
+ return PTR_ERR(hwmon_dev);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id ina3221_of_match_table[] = {
+ { .compatible = "ti,ina3221", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ina3221_of_match_table);
+
+static const struct i2c_device_id ina3221_ids[] = {
+ { "ina3221", 0 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, ina3221_ids);
+
+static struct i2c_driver ina3221_i2c_driver = {
+ .probe = ina3221_probe,
+ .driver = {
+ .name = INA3221_DRIVER_NAME,
+ .of_match_table = ina3221_of_match_table,
+ },
+ .id_table = ina3221_ids,
+};
+module_i2c_driver(ina3221_i2c_driver);
+
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("Texas Instruments INA3221 HWMon Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index 55bf47934..4667012b4 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -491,7 +491,7 @@ struct it87_sio_data {
struct it87_data {
const struct attribute_group *groups[7];
enum chips type;
- u16 features;
+ u32 features;
u8 peci_mask;
u8 old_peci_mask;
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index 9887d3224..9d5f85f33 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -31,6 +31,7 @@
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
+#include <linux/of.h>
/* Addresses to scan */
static const unsigned short normal_i2c[] = {
@@ -104,6 +105,9 @@ static const unsigned short normal_i2c[] = {
#define MCP9804_DEVID 0x0200
#define MCP9804_DEVID_MASK 0xfffc
+#define MCP9808_DEVID 0x0400
+#define MCP9808_DEVID_MASK 0xfffc
+
#define MCP98242_DEVID 0x2000
#define MCP98242_DEVID_MASK 0xfffc
@@ -160,6 +164,7 @@ static struct jc42_chips jc42_chips[] = {
{ IDT_MANID, TS3001_DEVID, TS3001_DEVID_MASK },
{ MAX_MANID, MAX6604_DEVID, MAX6604_DEVID_MASK },
{ MCP_MANID, MCP9804_DEVID, MCP9804_DEVID_MASK },
+ { MCP_MANID, MCP9808_DEVID, MCP9808_DEVID_MASK },
{ MCP_MANID, MCP98242_DEVID, MCP98242_DEVID_MASK },
{ MCP_MANID, MCP98243_DEVID, MCP98243_DEVID_MASK },
{ MCP_MANID, MCP98244_DEVID, MCP98244_DEVID_MASK },
@@ -537,11 +542,20 @@ static const struct i2c_device_id jc42_id[] = {
};
MODULE_DEVICE_TABLE(i2c, jc42_id);
+#ifdef CONFIG_OF
+static const struct of_device_id jc42_of_ids[] = {
+ { .compatible = "jedec,jc-42.4-temp", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, jc42_of_ids);
+#endif
+
static struct i2c_driver jc42_driver = {
- .class = I2C_CLASS_SPD,
+ .class = I2C_CLASS_SPD | I2C_CLASS_HWMON,
.driver = {
.name = "jc42",
.pm = JC42_DEV_PM_OPS,
+ .of_match_table = of_match_ptr(jc42_of_ids),
},
.probe = jc42_probe,
.remove = jc42_remove,
diff --git a/drivers/hwmon/jz4740-hwmon.c b/drivers/hwmon/jz4740-hwmon.c
index df9b3447f..0621ee1b3 100644
--- a/drivers/hwmon/jz4740-hwmon.c
+++ b/drivers/hwmon/jz4740-hwmon.c
@@ -29,23 +29,13 @@
struct jz4740_hwmon {
void __iomem *base;
-
int irq;
-
const struct mfd_cell *cell;
- struct device *hwmon;
-
+ struct platform_device *pdev;
struct completion read_completion;
-
struct mutex lock;
};
-static ssize_t jz4740_hwmon_show_name(struct device *dev,
- struct device_attribute *dev_attr, char *buf)
-{
- return sprintf(buf, "jz4740\n");
-}
-
static irqreturn_t jz4740_hwmon_irq(int irq, void *data)
{
struct jz4740_hwmon *hwmon = data;
@@ -58,6 +48,7 @@ static ssize_t jz4740_hwmon_read_adcin(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
struct jz4740_hwmon *hwmon = dev_get_drvdata(dev);
+ struct platform_device *pdev = hwmon->pdev;
struct completion *completion = &hwmon->read_completion;
long t;
unsigned long val;
@@ -68,7 +59,7 @@ static ssize_t jz4740_hwmon_read_adcin(struct device *dev,
reinit_completion(completion);
enable_irq(hwmon->irq);
- hwmon->cell->enable(to_platform_device(dev));
+ hwmon->cell->enable(pdev);
t = wait_for_completion_interruptible_timeout(completion, HZ);
@@ -80,7 +71,7 @@ static ssize_t jz4740_hwmon_read_adcin(struct device *dev,
ret = t ? t : -ETIMEDOUT;
}
- hwmon->cell->disable(to_platform_device(dev));
+ hwmon->cell->disable(pdev);
disable_irq(hwmon->irq);
mutex_unlock(&hwmon->lock);
@@ -88,26 +79,24 @@ static ssize_t jz4740_hwmon_read_adcin(struct device *dev,
return ret;
}
-static DEVICE_ATTR(name, S_IRUGO, jz4740_hwmon_show_name, NULL);
static DEVICE_ATTR(in0_input, S_IRUGO, jz4740_hwmon_read_adcin, NULL);
-static struct attribute *jz4740_hwmon_attributes[] = {
- &dev_attr_name.attr,
+static struct attribute *jz4740_attrs[] = {
&dev_attr_in0_input.attr,
NULL
};
-static const struct attribute_group jz4740_hwmon_attr_group = {
- .attrs = jz4740_hwmon_attributes,
-};
+ATTRIBUTE_GROUPS(jz4740);
static int jz4740_hwmon_probe(struct platform_device *pdev)
{
int ret;
+ struct device *dev = &pdev->dev;
struct jz4740_hwmon *hwmon;
+ struct device *hwmon_dev;
struct resource *mem;
- hwmon = devm_kzalloc(&pdev->dev, sizeof(*hwmon), GFP_KERNEL);
+ hwmon = devm_kzalloc(dev, sizeof(*hwmon), GFP_KERNEL);
if (!hwmon)
return -ENOMEM;
@@ -125,12 +114,11 @@ static int jz4740_hwmon_probe(struct platform_device *pdev)
if (IS_ERR(hwmon->base))
return PTR_ERR(hwmon->base);
+ hwmon->pdev = pdev;
init_completion(&hwmon->read_completion);
mutex_init(&hwmon->lock);
- platform_set_drvdata(pdev, hwmon);
-
- ret = devm_request_irq(&pdev->dev, hwmon->irq, jz4740_hwmon_irq, 0,
+ ret = devm_request_irq(dev, hwmon->irq, jz4740_hwmon_irq, 0,
pdev->name, hwmon);
if (ret) {
dev_err(&pdev->dev, "Failed to request irq: %d\n", ret);
@@ -138,38 +126,13 @@ static int jz4740_hwmon_probe(struct platform_device *pdev)
}
disable_irq(hwmon->irq);
- ret = sysfs_create_group(&pdev->dev.kobj, &jz4740_hwmon_attr_group);
- if (ret) {
- dev_err(&pdev->dev, "Failed to create sysfs group: %d\n", ret);
- return ret;
- }
-
- hwmon->hwmon = hwmon_device_register(&pdev->dev);
- if (IS_ERR(hwmon->hwmon)) {
- ret = PTR_ERR(hwmon->hwmon);
- goto err_remove_file;
- }
-
- return 0;
-
-err_remove_file:
- sysfs_remove_group(&pdev->dev.kobj, &jz4740_hwmon_attr_group);
- return ret;
-}
-
-static int jz4740_hwmon_remove(struct platform_device *pdev)
-{
- struct jz4740_hwmon *hwmon = platform_get_drvdata(pdev);
-
- hwmon_device_unregister(hwmon->hwmon);
- sysfs_remove_group(&pdev->dev.kobj, &jz4740_hwmon_attr_group);
-
- return 0;
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, "jz4740", hwmon,
+ jz4740_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
static struct platform_driver jz4740_hwmon_driver = {
.probe = jz4740_hwmon_probe,
- .remove = jz4740_hwmon_remove,
.driver = {
.name = "jz4740-hwmon",
},
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 69166ab31..92f9d4bbf 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -26,8 +26,8 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
-#include <linux/mutex.h>
#include <linux/of.h>
+#include <linux/regmap.h>
#include <linux/thermal.h>
#include "lm75.h"
@@ -66,35 +66,21 @@ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c,
/* The LM75 registers */
+#define LM75_REG_TEMP 0x00
#define LM75_REG_CONF 0x01
-static const u8 LM75_REG_TEMP[3] = {
- 0x00, /* input */
- 0x03, /* max */
- 0x02, /* hyst */
-};
+#define LM75_REG_HYST 0x02
+#define LM75_REG_MAX 0x03
/* Each client has this additional data */
struct lm75_data {
struct i2c_client *client;
- struct device *hwmon_dev;
- struct mutex update_lock;
+ struct regmap *regmap;
u8 orig_conf;
u8 resolution; /* In bits, between 9 and 12 */
u8 resolution_limits;
- char valid; /* !=0 if registers are valid */
- unsigned long last_updated; /* In jiffies */
- unsigned long sample_time; /* In jiffies */
- s16 temp[3]; /* Register values,
- 0 = input
- 1 = max
- 2 = hyst */
+ unsigned int sample_time; /* In ms */
};
-static int lm75_read_value(struct i2c_client *client, u8 reg);
-static int lm75_write_value(struct i2c_client *client, u8 reg, u16 value);
-static struct lm75_data *lm75_update_device(struct device *dev);
-
-
/*-----------------------------------------------------------------------*/
static inline long lm75_reg_to_mc(s16 temp, u8 resolution)
@@ -106,12 +92,15 @@ static inline long lm75_reg_to_mc(s16 temp, u8 resolution)
static int lm75_read_temp(void *dev, int *temp)
{
- struct lm75_data *data = lm75_update_device(dev);
+ struct lm75_data *data = dev_get_drvdata(dev);
+ unsigned int _temp;
+ int err;
- if (IS_ERR(data))
- return PTR_ERR(data);
+ err = regmap_read(data->regmap, LM75_REG_TEMP, &_temp);
+ if (err < 0)
+ return err;
- *temp = lm75_reg_to_mc(data->temp[0], data->resolution);
+ *temp = lm75_reg_to_mc(_temp, data->resolution);
return 0;
}
@@ -120,13 +109,15 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
- struct lm75_data *data = lm75_update_device(dev);
+ struct lm75_data *data = dev_get_drvdata(dev);
+ unsigned int temp = 0;
+ int err;
- if (IS_ERR(data))
- return PTR_ERR(data);
+ err = regmap_read(data->regmap, attr->index, &temp);
+ if (err < 0)
+ return err;
- return sprintf(buf, "%ld\n", lm75_reg_to_mc(data->temp[attr->index],
- data->resolution));
+ return sprintf(buf, "%ld\n", lm75_reg_to_mc(temp, data->resolution));
}
static ssize_t set_temp(struct device *dev, struct device_attribute *da,
@@ -134,8 +125,6 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct lm75_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- int nr = attr->index;
long temp;
int error;
u8 resolution;
@@ -153,25 +142,36 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
else
resolution = data->resolution;
- mutex_lock(&data->update_lock);
temp = clamp_val(temp, LM75_TEMP_MIN, LM75_TEMP_MAX);
- data->temp[nr] = DIV_ROUND_CLOSEST(temp << (resolution - 8),
- 1000) << (16 - resolution);
- lm75_write_value(client, LM75_REG_TEMP[nr], data->temp[nr]);
- mutex_unlock(&data->update_lock);
+ temp = DIV_ROUND_CLOSEST(temp << (resolution - 8),
+ 1000) << (16 - resolution);
+ error = regmap_write(data->regmap, attr->index, temp);
+ if (error < 0)
+ return error;
+
return count;
}
+static ssize_t show_update_interval(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct lm75_data *data = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u\n", data->sample_time);
+}
+
static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO,
- show_temp, set_temp, 1);
+ show_temp, set_temp, LM75_REG_MAX);
static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO,
- show_temp, set_temp, 2);
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
+ show_temp, set_temp, LM75_REG_HYST);
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, LM75_REG_TEMP);
+static DEVICE_ATTR(update_interval, S_IRUGO, show_update_interval, NULL);
static struct attribute *lm75_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
&sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
+ &dev_attr_update_interval.attr,
NULL
};
@@ -185,12 +185,42 @@ static const struct thermal_zone_of_device_ops lm75_of_thermal_ops = {
/* device probe and removal */
+static bool lm75_is_writeable_reg(struct device *dev, unsigned int reg)
+{
+ return reg != LM75_REG_TEMP;
+}
+
+static bool lm75_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ return reg == LM75_REG_TEMP;
+}
+
+static const struct regmap_config lm75_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = LM75_REG_MAX,
+ .writeable_reg = lm75_is_writeable_reg,
+ .volatile_reg = lm75_is_volatile_reg,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+ .cache_type = REGCACHE_RBTREE,
+ .use_single_rw = true,
+};
+
+static void lm75_remove(void *data)
+{
+ struct lm75_data *lm75 = data;
+ struct i2c_client *client = lm75->client;
+
+ i2c_smbus_write_byte_data(client, LM75_REG_CONF, lm75->orig_conf);
+}
+
static int
lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
+ struct device *hwmon_dev;
struct lm75_data *data;
- int status;
+ int status, err;
u8 set_mask, clr_mask;
int new;
enum lm75_type kind = id->driver_data;
@@ -204,8 +234,10 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
return -ENOMEM;
data->client = client;
- i2c_set_clientdata(client, data);
- mutex_init(&data->update_lock);
+
+ data->regmap = devm_regmap_init_i2c(client, &lm75_regmap_config);
+ if (IS_ERR(data->regmap))
+ return PTR_ERR(data->regmap);
/* Set to LM75 resolution (9 bits, 1/2 degree C) and range.
* Then tweak to be more precise when appropriate.
@@ -217,7 +249,7 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
case adt75:
clr_mask |= 1 << 5; /* not one-shot mode */
data->resolution = 12;
- data->sample_time = HZ / 8;
+ data->sample_time = MSEC_PER_SEC / 8;
break;
case ds1775:
case ds75:
@@ -225,35 +257,35 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
clr_mask |= 3 << 5;
set_mask |= 2 << 5; /* 11-bit mode */
data->resolution = 11;
- data->sample_time = HZ;
+ data->sample_time = MSEC_PER_SEC;
break;
case ds7505:
set_mask |= 3 << 5; /* 12-bit mode */
data->resolution = 12;
- data->sample_time = HZ / 4;
+ data->sample_time = MSEC_PER_SEC / 4;
break;
case g751:
case lm75:
case lm75a:
data->resolution = 9;
- data->sample_time = HZ / 2;
+ data->sample_time = MSEC_PER_SEC / 2;
break;
case lm75b:
data->resolution = 11;
- data->sample_time = HZ / 4;
+ data->sample_time = MSEC_PER_SEC / 4;
break;
case max6625:
data->resolution = 9;
- data->sample_time = HZ / 4;
+ data->sample_time = MSEC_PER_SEC / 4;
break;
case max6626:
data->resolution = 12;
data->resolution_limits = 9;
- data->sample_time = HZ / 4;
+ data->sample_time = MSEC_PER_SEC / 4;
break;
case tcn75:
data->resolution = 9;
- data->sample_time = HZ / 8;
+ data->sample_time = MSEC_PER_SEC / 8;
break;
case mcp980x:
data->resolution_limits = 9;
@@ -262,14 +294,14 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
case tmp101:
set_mask |= 3 << 5; /* 12-bit mode */
data->resolution = 12;
- data->sample_time = HZ;
+ data->sample_time = MSEC_PER_SEC;
clr_mask |= 1 << 7; /* not one-shot mode */
break;
case tmp112:
set_mask |= 3 << 5; /* 12-bit mode */
clr_mask |= 1 << 7; /* not one-shot mode */
data->resolution = 12;
- data->sample_time = HZ / 4;
+ data->sample_time = MSEC_PER_SEC / 4;
break;
case tmp105:
case tmp175:
@@ -278,17 +310,17 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
set_mask |= 3 << 5; /* 12-bit mode */
clr_mask |= 1 << 7; /* not one-shot mode */
data->resolution = 12;
- data->sample_time = HZ / 2;
+ data->sample_time = MSEC_PER_SEC / 2;
break;
case tmp75c:
clr_mask |= 1 << 5; /* not one-shot mode */
data->resolution = 12;
- data->sample_time = HZ / 4;
+ data->sample_time = MSEC_PER_SEC / 4;
break;
}
/* configure as specified */
- status = lm75_read_value(client, LM75_REG_CONF);
+ status = i2c_smbus_read_byte_data(client, LM75_REG_CONF);
if (status < 0) {
dev_dbg(dev, "Can't read config? %d\n", status);
return status;
@@ -297,30 +329,25 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
new = status & ~clr_mask;
new |= set_mask;
if (status != new)
- lm75_write_value(client, LM75_REG_CONF, new);
- dev_dbg(dev, "Config %02x\n", new);
+ i2c_smbus_write_byte_data(client, LM75_REG_CONF, new);
- data->hwmon_dev = hwmon_device_register_with_groups(dev, client->name,
- data, lm75_groups);
- if (IS_ERR(data->hwmon_dev))
- return PTR_ERR(data->hwmon_dev);
+ err = devm_add_action_or_reset(dev, lm75_remove, data);
+ if (err)
+ return err;
- devm_thermal_zone_of_sensor_register(data->hwmon_dev, 0,
- data->hwmon_dev,
- &lm75_of_thermal_ops);
+ dev_dbg(dev, "Config %02x\n", new);
- dev_info(dev, "%s: sensor '%s'\n",
- dev_name(data->hwmon_dev), client->name);
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ data, lm75_groups);
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
- return 0;
-}
+ devm_thermal_zone_of_sensor_register(hwmon_dev, 0,
+ hwmon_dev,
+ &lm75_of_thermal_ops);
-static int lm75_remove(struct i2c_client *client)
-{
- struct lm75_data *data = i2c_get_clientdata(client);
+ dev_info(dev, "%s: sensor '%s'\n", dev_name(hwmon_dev), client->name);
- hwmon_device_unregister(data->hwmon_dev);
- lm75_write_value(client, LM75_REG_CONF, data->orig_conf);
return 0;
}
@@ -449,13 +476,13 @@ static int lm75_suspend(struct device *dev)
{
int status;
struct i2c_client *client = to_i2c_client(dev);
- status = lm75_read_value(client, LM75_REG_CONF);
+ status = i2c_smbus_read_byte_data(client, LM75_REG_CONF);
if (status < 0) {
dev_dbg(&client->dev, "Can't read config? %d\n", status);
return status;
}
status = status | LM75_SHUTDOWN;
- lm75_write_value(client, LM75_REG_CONF, status);
+ i2c_smbus_write_byte_data(client, LM75_REG_CONF, status);
return 0;
}
@@ -463,13 +490,13 @@ static int lm75_resume(struct device *dev)
{
int status;
struct i2c_client *client = to_i2c_client(dev);
- status = lm75_read_value(client, LM75_REG_CONF);
+ status = i2c_smbus_read_byte_data(client, LM75_REG_CONF);
if (status < 0) {
dev_dbg(&client->dev, "Can't read config? %d\n", status);
return status;
}
status = status & ~LM75_SHUTDOWN;
- lm75_write_value(client, LM75_REG_CONF, status);
+ i2c_smbus_write_byte_data(client, LM75_REG_CONF, status);
return 0;
}
@@ -489,73 +516,11 @@ static struct i2c_driver lm75_driver = {
.pm = LM75_DEV_PM_OPS,
},
.probe = lm75_probe,
- .remove = lm75_remove,
.id_table = lm75_ids,
.detect = lm75_detect,
.address_list = normal_i2c,
};
-/*-----------------------------------------------------------------------*/
-
-/* register access */
-
-/*
- * All registers are word-sized, except for the configuration register.
- * LM75 uses a high-byte first convention, which is exactly opposite to
- * the SMBus standard.
- */
-static int lm75_read_value(struct i2c_client *client, u8 reg)
-{
- if (reg == LM75_REG_CONF)
- return i2c_smbus_read_byte_data(client, reg);
- else
- return i2c_smbus_read_word_swapped(client, reg);
-}
-
-static int lm75_write_value(struct i2c_client *client, u8 reg, u16 value)
-{
- if (reg == LM75_REG_CONF)
- return i2c_smbus_write_byte_data(client, reg, value);
- else
- return i2c_smbus_write_word_swapped(client, reg, value);
-}
-
-static struct lm75_data *lm75_update_device(struct device *dev)
-{
- struct lm75_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- struct lm75_data *ret = data;
-
- mutex_lock(&data->update_lock);
-
- if (time_after(jiffies, data->last_updated + data->sample_time)
- || !data->valid) {
- int i;
- dev_dbg(&client->dev, "Starting lm75 update\n");
-
- for (i = 0; i < ARRAY_SIZE(data->temp); i++) {
- int status;
-
- status = lm75_read_value(client, LM75_REG_TEMP[i]);
- if (unlikely(status < 0)) {
- dev_dbg(dev,
- "LM75: Failed to read value: reg %d, error %d\n",
- LM75_REG_TEMP[i], status);
- ret = ERR_PTR(status);
- data->valid = 0;
- goto abort;
- }
- data->temp[i] = status;
- }
- data->last_updated = jiffies;
- data->valid = 1;
- }
-
-abort:
- mutex_unlock(&data->update_lock);
- return ret;
-}
-
module_i2c_driver(lm75_driver);
MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>");
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index e30a5939d..496e771b3 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -171,7 +171,6 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
#define SA56004_REG_R_LOCAL_TEMPL 0x22
-#define LM90_DEF_CONVRATE_RVAL 6 /* Def conversion rate register value */
#define LM90_MAX_CONVRATE_MS 16000 /* Maximum conversion rate in ms */
/* TMP451 registers */
@@ -366,11 +365,9 @@ enum lm90_temp11_reg_index {
struct lm90_data {
struct i2c_client *client;
- struct device *hwmon_dev;
const struct attribute_group *groups[6];
struct mutex update_lock;
- struct regulator *regulator;
- char valid; /* zero until following fields are valid */
+ bool valid; /* true if register values are valid */
unsigned long last_updated; /* in jiffies */
int kind;
u32 flags;
@@ -412,7 +409,7 @@ static inline s32 adm1032_write_byte(struct i2c_client *client, u8 value)
* because we don't want the address pointer to change between the write
* byte and the read byte transactions.
*/
-static int lm90_read_reg(struct i2c_client *client, u8 reg, u8 *value)
+static int lm90_read_reg(struct i2c_client *client, u8 reg)
{
int err;
@@ -423,20 +420,12 @@ static int lm90_read_reg(struct i2c_client *client, u8 reg, u8 *value)
} else
err = i2c_smbus_read_byte_data(client, reg);
- if (err < 0) {
- dev_warn(&client->dev, "Register %#02x read failed (%d)\n",
- reg, err);
- return err;
- }
- *value = err;
-
- return 0;
+ return err;
}
-static int lm90_read16(struct i2c_client *client, u8 regh, u8 regl, u16 *value)
+static int lm90_read16(struct i2c_client *client, u8 regh, u8 regl)
{
- int err;
- u8 oldh, newh, l;
+ int oldh, newh, l;
/*
* There is a trick here. We have to read two registers to have the
@@ -451,18 +440,21 @@ static int lm90_read16(struct i2c_client *client, u8 regh, u8 regl, u16 *value)
* we have to read the low byte again, and now we believe we have a
* correct reading.
*/
- if ((err = lm90_read_reg(client, regh, &oldh))
- || (err = lm90_read_reg(client, regl, &l))
- || (err = lm90_read_reg(client, regh, &newh)))
- return err;
+ oldh = lm90_read_reg(client, regh);
+ if (oldh < 0)
+ return oldh;
+ l = lm90_read_reg(client, regl);
+ if (l < 0)
+ return l;
+ newh = lm90_read_reg(client, regh);
+ if (newh < 0)
+ return newh;
if (oldh != newh) {
- err = lm90_read_reg(client, regl, &l);
- if (err)
- return err;
+ l = lm90_read_reg(client, regl);
+ if (l < 0)
+ return l;
}
- *value = (newh << 8) | l;
-
- return 0;
+ return (newh << 8) | l;
}
/*
@@ -473,20 +465,23 @@ static int lm90_read16(struct i2c_client *client, u8 regh, u8 regl, u16 *value)
* various registers have different meanings as a result of selecting a
* non-default remote channel.
*/
-static inline void lm90_select_remote_channel(struct i2c_client *client,
- struct lm90_data *data,
- int channel)
+static inline int lm90_select_remote_channel(struct i2c_client *client,
+ struct lm90_data *data,
+ int channel)
{
- u8 config;
+ int config;
if (data->kind == max6696) {
- lm90_read_reg(client, LM90_REG_R_CONFIG1, &config);
+ config = lm90_read_reg(client, LM90_REG_R_CONFIG1);
+ if (config < 0)
+ return config;
config &= ~0x08;
if (channel)
config |= 0x08;
i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1,
config);
}
+ return 0;
}
/*
@@ -513,118 +508,204 @@ static void lm90_set_convrate(struct i2c_client *client, struct lm90_data *data,
data->update_interval = DIV_ROUND_CLOSEST(update_interval, 64);
}
+static int lm90_update_limits(struct device *dev)
+{
+ struct lm90_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ int val;
+
+ val = lm90_read_reg(client, LM90_REG_R_LOCAL_CRIT);
+ if (val < 0)
+ return val;
+ data->temp8[LOCAL_CRIT] = val;
+
+ val = lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT);
+ if (val < 0)
+ return val;
+ data->temp8[REMOTE_CRIT] = val;
+
+ val = lm90_read_reg(client, LM90_REG_R_TCRIT_HYST);
+ if (val < 0)
+ return val;
+ data->temp_hyst = val;
+
+ val = lm90_read_reg(client, LM90_REG_R_REMOTE_LOWH);
+ if (val < 0)
+ return val;
+ data->temp11[REMOTE_LOW] = val << 8;
+
+ if (data->flags & LM90_HAVE_REM_LIMIT_EXT) {
+ val = lm90_read_reg(client, LM90_REG_R_REMOTE_LOWL);
+ if (val < 0)
+ return val;
+ data->temp11[REMOTE_LOW] |= val;
+ }
+
+ val = lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHH);
+ if (val < 0)
+ return val;
+ data->temp11[REMOTE_HIGH] = val << 8;
+
+ if (data->flags & LM90_HAVE_REM_LIMIT_EXT) {
+ val = lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHL);
+ if (val < 0)
+ return val;
+ data->temp11[REMOTE_HIGH] |= val;
+ }
+
+ if (data->flags & LM90_HAVE_OFFSET) {
+ val = lm90_read16(client, LM90_REG_R_REMOTE_OFFSH,
+ LM90_REG_R_REMOTE_OFFSL);
+ if (val < 0)
+ return val;
+ data->temp11[REMOTE_OFFSET] = val;
+ }
+
+ if (data->flags & LM90_HAVE_EMERGENCY) {
+ val = lm90_read_reg(client, MAX6659_REG_R_LOCAL_EMERG);
+ if (val < 0)
+ return val;
+ data->temp8[LOCAL_EMERG] = val;
+
+ val = lm90_read_reg(client, MAX6659_REG_R_REMOTE_EMERG);
+ if (val < 0)
+ return val;
+ data->temp8[REMOTE_EMERG] = val;
+ }
+
+ if (data->kind == max6696) {
+ val = lm90_select_remote_channel(client, data, 1);
+ if (val < 0)
+ return val;
+
+ val = lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT);
+ if (val < 0)
+ return val;
+ data->temp8[REMOTE2_CRIT] = val;
+
+ val = lm90_read_reg(client, MAX6659_REG_R_REMOTE_EMERG);
+ if (val < 0)
+ return val;
+ data->temp8[REMOTE2_EMERG] = val;
+
+ val = lm90_read_reg(client, LM90_REG_R_REMOTE_LOWH);
+ if (val < 0)
+ return val;
+ data->temp11[REMOTE2_LOW] = val << 8;
+
+ val = lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHH);
+ if (val < 0)
+ return val;
+ data->temp11[REMOTE2_HIGH] = val << 8;
+
+ lm90_select_remote_channel(client, data, 0);
+ }
+
+ return 0;
+}
+
static struct lm90_data *lm90_update_device(struct device *dev)
{
struct lm90_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
unsigned long next_update;
+ int val = 0;
mutex_lock(&data->update_lock);
+ if (!data->valid) {
+ val = lm90_update_limits(dev);
+ if (val < 0)
+ goto error;
+ }
+
next_update = data->last_updated +
msecs_to_jiffies(data->update_interval);
if (time_after(jiffies, next_update) || !data->valid) {
- u8 h, l;
- u8 alarms;
-
dev_dbg(&client->dev, "Updating lm90 data.\n");
- lm90_read_reg(client, LM90_REG_R_LOCAL_LOW,
- &data->temp8[LOCAL_LOW]);
- lm90_read_reg(client, LM90_REG_R_LOCAL_HIGH,
- &data->temp8[LOCAL_HIGH]);
- lm90_read_reg(client, LM90_REG_R_LOCAL_CRIT,
- &data->temp8[LOCAL_CRIT]);
- lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT,
- &data->temp8[REMOTE_CRIT]);
- lm90_read_reg(client, LM90_REG_R_TCRIT_HYST, &data->temp_hyst);
+
+ data->valid = false;
+
+ val = lm90_read_reg(client, LM90_REG_R_LOCAL_LOW);
+ if (val < 0)
+ goto error;
+ data->temp8[LOCAL_LOW] = val;
+
+ val = lm90_read_reg(client, LM90_REG_R_LOCAL_HIGH);
+ if (val < 0)
+ goto error;
+ data->temp8[LOCAL_HIGH] = val;
if (data->reg_local_ext) {
- lm90_read16(client, LM90_REG_R_LOCAL_TEMP,
- data->reg_local_ext,
- &data->temp11[LOCAL_TEMP]);
+ val = lm90_read16(client, LM90_REG_R_LOCAL_TEMP,
+ data->reg_local_ext);
+ if (val < 0)
+ goto error;
+ data->temp11[LOCAL_TEMP] = val;
} else {
- if (lm90_read_reg(client, LM90_REG_R_LOCAL_TEMP,
- &h) == 0)
- data->temp11[LOCAL_TEMP] = h << 8;
- }
- lm90_read16(client, LM90_REG_R_REMOTE_TEMPH,
- LM90_REG_R_REMOTE_TEMPL,
- &data->temp11[REMOTE_TEMP]);
-
- if (lm90_read_reg(client, LM90_REG_R_REMOTE_LOWH, &h) == 0) {
- data->temp11[REMOTE_LOW] = h << 8;
- if ((data->flags & LM90_HAVE_REM_LIMIT_EXT)
- && lm90_read_reg(client, LM90_REG_R_REMOTE_LOWL,
- &l) == 0)
- data->temp11[REMOTE_LOW] |= l;
- }
- if (lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHH, &h) == 0) {
- data->temp11[REMOTE_HIGH] = h << 8;
- if ((data->flags & LM90_HAVE_REM_LIMIT_EXT)
- && lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHL,
- &l) == 0)
- data->temp11[REMOTE_HIGH] |= l;
+ val = lm90_read_reg(client, LM90_REG_R_LOCAL_TEMP);
+ if (val < 0)
+ goto error;
+ data->temp11[LOCAL_TEMP] = val << 8;
}
+ val = lm90_read16(client, LM90_REG_R_REMOTE_TEMPH,
+ LM90_REG_R_REMOTE_TEMPL);
+ if (val < 0)
+ goto error;
+ data->temp11[REMOTE_TEMP] = val;
- if (data->flags & LM90_HAVE_OFFSET) {
- if (lm90_read_reg(client, LM90_REG_R_REMOTE_OFFSH,
- &h) == 0
- && lm90_read_reg(client, LM90_REG_R_REMOTE_OFFSL,
- &l) == 0)
- data->temp11[REMOTE_OFFSET] = (h << 8) | l;
- }
- if (data->flags & LM90_HAVE_EMERGENCY) {
- lm90_read_reg(client, MAX6659_REG_R_LOCAL_EMERG,
- &data->temp8[LOCAL_EMERG]);
- lm90_read_reg(client, MAX6659_REG_R_REMOTE_EMERG,
- &data->temp8[REMOTE_EMERG]);
- }
- lm90_read_reg(client, LM90_REG_R_STATUS, &alarms);
- data->alarms = alarms; /* save as 16 bit value */
+ val = lm90_read_reg(client, LM90_REG_R_STATUS);
+ if (val < 0)
+ goto error;
+ data->alarms = val; /* lower 8 bit of alarms */
if (data->kind == max6696) {
- lm90_select_remote_channel(client, data, 1);
- lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT,
- &data->temp8[REMOTE2_CRIT]);
- lm90_read_reg(client, MAX6659_REG_R_REMOTE_EMERG,
- &data->temp8[REMOTE2_EMERG]);
- lm90_read16(client, LM90_REG_R_REMOTE_TEMPH,
- LM90_REG_R_REMOTE_TEMPL,
- &data->temp11[REMOTE2_TEMP]);
- if (!lm90_read_reg(client, LM90_REG_R_REMOTE_LOWH, &h))
- data->temp11[REMOTE2_LOW] = h << 8;
- if (!lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHH, &h))
- data->temp11[REMOTE2_HIGH] = h << 8;
+ val = lm90_select_remote_channel(client, data, 1);
+ if (val < 0)
+ goto error;
+
+ val = lm90_read16(client, LM90_REG_R_REMOTE_TEMPH,
+ LM90_REG_R_REMOTE_TEMPL);
+ if (val < 0)
+ goto error;
+ data->temp11[REMOTE2_TEMP] = val;
+
lm90_select_remote_channel(client, data, 0);
- if (!lm90_read_reg(client, MAX6696_REG_R_STATUS2,
- &alarms))
- data->alarms |= alarms << 8;
+ val = lm90_read_reg(client, MAX6696_REG_R_STATUS2);
+ if (val < 0)
+ goto error;
+ data->alarms |= val << 8;
}
/*
* Re-enable ALERT# output if it was originally enabled and
* relevant alarms are all clear
*/
- if ((data->config_orig & 0x80) == 0
- && (data->alarms & data->alert_alarms) == 0) {
- u8 config;
+ if (!(data->config_orig & 0x80) &&
+ !(data->alarms & data->alert_alarms)) {
+ val = lm90_read_reg(client, LM90_REG_R_CONFIG1);
+ if (val < 0)
+ goto error;
- lm90_read_reg(client, LM90_REG_R_CONFIG1, &config);
- if (config & 0x80) {
+ if (val & 0x80) {
dev_dbg(&client->dev, "Re-enabling ALERT#\n");
i2c_smbus_write_byte_data(client,
LM90_REG_W_CONFIG1,
- config & ~0x80);
+ val & ~0x80);
}
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
+error:
mutex_unlock(&data->update_lock);
+ if (val < 0)
+ return ERR_PTR(val);
+
return data;
}
@@ -709,16 +790,14 @@ static inline int temp_from_u8_adt7461(struct lm90_data *data, u8 val)
{
if (data->flags & LM90_FLAG_ADT7461_EXT)
return (val - 64) * 1000;
- else
- return temp_from_s8(val);
+ return temp_from_s8(val);
}
static inline int temp_from_u16_adt7461(struct lm90_data *data, u16 val)
{
if (data->flags & LM90_FLAG_ADT7461_EXT)
return (val - 0x4000) / 64 * 250;
- else
- return temp_from_s16(val);
+ return temp_from_s16(val);
}
static u8 temp_to_u8_adt7461(struct lm90_data *data, long val)
@@ -729,13 +808,12 @@ static u8 temp_to_u8_adt7461(struct lm90_data *data, long val)
if (val >= 191000)
return 0xFF;
return (val + 500 + 64000) / 1000;
- } else {
- if (val <= 0)
- return 0;
- if (val >= 127000)
- return 127;
- return (val + 500) / 1000;
}
+ if (val <= 0)
+ return 0;
+ if (val >= 127000)
+ return 127;
+ return (val + 500) / 1000;
}
static u16 temp_to_u16_adt7461(struct lm90_data *data, long val)
@@ -746,13 +824,12 @@ static u16 temp_to_u16_adt7461(struct lm90_data *data, long val)
if (val >= 191750)
return 0xFFC0;
return (val + 64000 + 125) / 250 * 64;
- } else {
- if (val <= 0)
- return 0;
- if (val >= 127750)
- return 0x7FC0;
- return (val + 125) / 250 * 64;
}
+ if (val <= 0)
+ return 0;
+ if (val >= 127750)
+ return 0x7FC0;
+ return (val + 125) / 250 * 64;
}
/*
@@ -766,6 +843,9 @@ static ssize_t show_temp8(struct device *dev, struct device_attribute *devattr,
struct lm90_data *data = lm90_update_device(dev);
int temp;
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
if (data->kind == adt7461 || data->kind == tmp451)
temp = temp_from_u8_adt7461(data, data->temp8[attr->index]);
else if (data->kind == max6646)
@@ -832,6 +912,9 @@ static ssize_t show_temp11(struct device *dev, struct device_attribute *devattr,
struct lm90_data *data = lm90_update_device(dev);
int temp;
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
if (data->kind == adt7461 || data->kind == tmp451)
temp = temp_from_u16_adt7461(data, data->temp11[attr->index]);
else if (data->kind == max6646)
@@ -907,6 +990,9 @@ static ssize_t show_temphyst(struct device *dev,
struct lm90_data *data = lm90_update_device(dev);
int temp;
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
if (data->kind == adt7461 || data->kind == tmp451)
temp = temp_from_u8_adt7461(data, data->temp8[attr->index]);
else if (data->kind == max6646)
@@ -953,6 +1039,10 @@ static ssize_t show_alarms(struct device *dev, struct device_attribute *dummy,
char *buf)
{
struct lm90_data *data = lm90_update_device(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
return sprintf(buf, "%d\n", data->alarms);
}
@@ -963,6 +1053,9 @@ static ssize_t show_alarm(struct device *dev, struct device_attribute
struct lm90_data *data = lm90_update_device(dev);
int bitnr = attr->index;
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
return sprintf(buf, "%d\n", (data->alarms >> bitnr) & 1);
}
@@ -1404,8 +1497,11 @@ static int lm90_detect(struct i2c_client *client,
return 0;
}
-static void lm90_restore_conf(struct i2c_client *client, struct lm90_data *data)
+static void lm90_restore_conf(void *_data)
{
+ struct lm90_data *data = _data;
+ struct i2c_client *client = data->client;
+
/* Restore initial configuration */
i2c_smbus_write_byte_data(client, LM90_REG_W_CONVRATE,
data->convrate_orig);
@@ -1413,24 +1509,22 @@ static void lm90_restore_conf(struct i2c_client *client, struct lm90_data *data)
data->config_orig);
}
-static void lm90_init_client(struct i2c_client *client, struct lm90_data *data)
+static int lm90_init_client(struct i2c_client *client, struct lm90_data *data)
{
- u8 config, convrate;
+ int config, convrate;
- if (lm90_read_reg(client, LM90_REG_R_CONVRATE, &convrate) < 0) {
- dev_warn(&client->dev, "Failed to read convrate register!\n");
- convrate = LM90_DEF_CONVRATE_RVAL;
- }
+ convrate = lm90_read_reg(client, LM90_REG_R_CONVRATE);
+ if (convrate < 0)
+ return convrate;
data->convrate_orig = convrate;
/*
* Start the conversions.
*/
lm90_set_convrate(client, data, 500); /* 500ms; 2Hz conversion rate */
- if (lm90_read_reg(client, LM90_REG_R_CONFIG1, &config) < 0) {
- dev_warn(&client->dev, "Initialization failed!\n");
- return;
- }
+ config = lm90_read_reg(client, LM90_REG_R_CONFIG1);
+ if (config < 0)
+ return config;
data->config_orig = config;
/* Check Temperature Range Select */
@@ -1456,17 +1550,24 @@ static void lm90_init_client(struct i2c_client *client, struct lm90_data *data)
config &= 0xBF; /* run */
if (config != data->config_orig) /* Only write if changed */
i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1, config);
+
+ return devm_add_action_or_reset(&client->dev, lm90_restore_conf, data);
}
static bool lm90_is_tripped(struct i2c_client *client, u16 *status)
{
struct lm90_data *data = i2c_get_clientdata(client);
- u8 st, st2 = 0;
+ int st, st2 = 0;
- lm90_read_reg(client, LM90_REG_R_STATUS, &st);
+ st = lm90_read_reg(client, LM90_REG_R_STATUS);
+ if (st < 0)
+ return false;
- if (data->kind == max6696)
- lm90_read_reg(client, MAX6696_REG_R_STATUS2, &st2);
+ if (data->kind == max6696) {
+ st2 = lm90_read_reg(client, MAX6696_REG_R_STATUS2);
+ if (st2 < 0)
+ return false;
+ }
*status = st | (st2 << 8);
@@ -1506,6 +1607,16 @@ static irqreturn_t lm90_irq_thread(int irq, void *dev_id)
return IRQ_NONE;
}
+static void lm90_remove_pec(void *dev)
+{
+ device_remove_file(dev, &dev_attr_pec);
+}
+
+static void lm90_regulator_disable(void *regulator)
+{
+ regulator_disable(regulator);
+}
+
static int lm90_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -1513,6 +1624,7 @@ static int lm90_probe(struct i2c_client *client,
struct i2c_adapter *adapter = to_i2c_adapter(dev->parent);
struct lm90_data *data;
struct regulator *regulator;
+ struct device *hwmon_dev;
int groups = 0;
int err;
@@ -1526,6 +1638,10 @@ static int lm90_probe(struct i2c_client *client,
return err;
}
+ err = devm_add_action_or_reset(dev, lm90_regulator_disable, regulator);
+ if (err)
+ return err;
+
data = devm_kzalloc(dev, sizeof(struct lm90_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -1534,8 +1650,6 @@ static int lm90_probe(struct i2c_client *client,
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
- data->regulator = regulator;
-
/* Set the device type */
data->kind = id->driver_data;
if (data->kind == adm1032) {
@@ -1557,7 +1671,11 @@ static int lm90_probe(struct i2c_client *client,
data->max_convrate = lm90_params[data->kind].max_convrate;
/* Initialize the LM90 chip */
- lm90_init_client(client, data);
+ err = lm90_init_client(client, data);
+ if (err < 0) {
+ dev_err(dev, "Failed to initialize device\n");
+ return err;
+ }
/* Register sysfs hooks */
data->groups[groups++] = &lm90_group;
@@ -1577,15 +1695,16 @@ static int lm90_probe(struct i2c_client *client,
if (client->flags & I2C_CLIENT_PEC) {
err = device_create_file(dev, &dev_attr_pec);
if (err)
- goto exit_restore;
+ return err;
+ err = devm_add_action_or_reset(dev, lm90_remove_pec, dev);
+ if (err)
+ return err;
}
- data->hwmon_dev = hwmon_device_register_with_groups(dev, client->name,
- data, data->groups);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto exit_remove_pec;
- }
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ data, data->groups);
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
if (client->irq) {
dev_dbg(dev, "IRQ: %d\n", client->irq);
@@ -1595,39 +1714,21 @@ static int lm90_probe(struct i2c_client *client,
"lm90", client);
if (err < 0) {
dev_err(dev, "cannot request IRQ %d\n", client->irq);
- goto exit_unregister;
+ return err;
}
}
return 0;
-
-exit_unregister:
- hwmon_device_unregister(data->hwmon_dev);
-exit_remove_pec:
- device_remove_file(dev, &dev_attr_pec);
-exit_restore:
- lm90_restore_conf(client, data);
- regulator_disable(data->regulator);
-
- return err;
-}
-
-static int lm90_remove(struct i2c_client *client)
-{
- struct lm90_data *data = i2c_get_clientdata(client);
-
- hwmon_device_unregister(data->hwmon_dev);
- device_remove_file(&client->dev, &dev_attr_pec);
- lm90_restore_conf(client, data);
- regulator_disable(data->regulator);
-
- return 0;
}
-static void lm90_alert(struct i2c_client *client, unsigned int flag)
+static void lm90_alert(struct i2c_client *client, enum i2c_alert_protocol type,
+ unsigned int flag)
{
u16 alarms;
+ if (type != I2C_PROTOCOL_SMBUS_ALERT)
+ return;
+
if (lm90_is_tripped(client, &alarms)) {
/*
* Disable ALERT# output, because these chips don't implement
@@ -1636,13 +1737,16 @@ static void lm90_alert(struct i2c_client *client, unsigned int flag)
*/
struct lm90_data *data = i2c_get_clientdata(client);
- if ((data->flags & LM90_HAVE_BROKEN_ALERT)
- && (alarms & data->alert_alarms)) {
- u8 config;
+ if ((data->flags & LM90_HAVE_BROKEN_ALERT) &&
+ (alarms & data->alert_alarms)) {
+ int config;
+
dev_dbg(&client->dev, "Disabling ALERT#\n");
- lm90_read_reg(client, LM90_REG_R_CONFIG1, &config);
- i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1,
- config | 0x80);
+ config = lm90_read_reg(client, LM90_REG_R_CONFIG1);
+ if (config >= 0)
+ i2c_smbus_write_byte_data(client,
+ LM90_REG_W_CONFIG1,
+ config | 0x80);
}
} else {
dev_info(&client->dev, "Everything OK\n");
@@ -1655,7 +1759,6 @@ static struct i2c_driver lm90_driver = {
.name = "lm90",
},
.probe = lm90_probe,
- .remove = lm90_remove,
.alert = lm90_alert,
.id_table = lm90_id,
.detect = lm90_detect,
diff --git a/drivers/hwmon/sht3x.c b/drivers/hwmon/sht3x.c
new file mode 100644
index 000000000..6ea99cd6a
--- /dev/null
+++ b/drivers/hwmon/sht3x.c
@@ -0,0 +1,775 @@
+/* Sensirion SHT3x-DIS humidity and temperature sensor driver.
+ * The SHT3x comes in many different versions, this driver is for the
+ * I2C version only.
+ *
+ * Copyright (C) 2016 Sensirion AG, Switzerland
+ * Author: David Frey <david.frey@sensirion.com>
+ * Author: Pascal Sachs <pascal.sachs@sensirion.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/page.h>
+#include <linux/crc8.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/platform_data/sht3x.h>
+
+/* commands (high precision mode) */
+static const unsigned char sht3x_cmd_measure_blocking_hpm[] = { 0x2c, 0x06 };
+static const unsigned char sht3x_cmd_measure_nonblocking_hpm[] = { 0x24, 0x00 };
+
+/* commands (low power mode) */
+static const unsigned char sht3x_cmd_measure_blocking_lpm[] = { 0x2c, 0x10 };
+static const unsigned char sht3x_cmd_measure_nonblocking_lpm[] = { 0x24, 0x16 };
+
+/* commands for periodic mode */
+static const unsigned char sht3x_cmd_measure_periodic_mode[] = { 0xe0, 0x00 };
+static const unsigned char sht3x_cmd_break[] = { 0x30, 0x93 };
+
+/* commands for heater control */
+static const unsigned char sht3x_cmd_heater_on[] = { 0x30, 0x6d };
+static const unsigned char sht3x_cmd_heater_off[] = { 0x30, 0x66 };
+
+/* other commands */
+static const unsigned char sht3x_cmd_read_status_reg[] = { 0xf3, 0x2d };
+static const unsigned char sht3x_cmd_clear_status_reg[] = { 0x30, 0x41 };
+
+/* delays for non-blocking i2c commands, both in us */
+#define SHT3X_NONBLOCKING_WAIT_TIME_HPM 15000
+#define SHT3X_NONBLOCKING_WAIT_TIME_LPM 4000
+
+#define SHT3X_WORD_LEN 2
+#define SHT3X_CMD_LENGTH 2
+#define SHT3X_CRC8_LEN 1
+#define SHT3X_RESPONSE_LENGTH 6
+#define SHT3X_CRC8_POLYNOMIAL 0x31
+#define SHT3X_CRC8_INIT 0xFF
+#define SHT3X_MIN_TEMPERATURE -45000
+#define SHT3X_MAX_TEMPERATURE 130000
+#define SHT3X_MIN_HUMIDITY 0
+#define SHT3X_MAX_HUMIDITY 100000
+
+enum sht3x_chips {
+ sht3x,
+ sts3x,
+};
+
+enum sht3x_limits {
+ limit_max = 0,
+ limit_max_hyst,
+ limit_min,
+ limit_min_hyst,
+};
+
+DECLARE_CRC8_TABLE(sht3x_crc8_table);
+
+/* periodic measure commands (high precision mode) */
+static const char periodic_measure_commands_hpm[][SHT3X_CMD_LENGTH] = {
+ /* 0.5 measurements per second */
+ {0x20, 0x32},
+ /* 1 measurements per second */
+ {0x21, 0x30},
+ /* 2 measurements per second */
+ {0x22, 0x36},
+ /* 4 measurements per second */
+ {0x23, 0x34},
+ /* 10 measurements per second */
+ {0x27, 0x37},
+};
+
+/* periodic measure commands (low power mode) */
+static const char periodic_measure_commands_lpm[][SHT3X_CMD_LENGTH] = {
+ /* 0.5 measurements per second */
+ {0x20, 0x2f},
+ /* 1 measurements per second */
+ {0x21, 0x2d},
+ /* 2 measurements per second */
+ {0x22, 0x2b},
+ /* 4 measurements per second */
+ {0x23, 0x29},
+ /* 10 measurements per second */
+ {0x27, 0x2a},
+};
+
+struct sht3x_limit_commands {
+ const char read_command[SHT3X_CMD_LENGTH];
+ const char write_command[SHT3X_CMD_LENGTH];
+};
+
+static const struct sht3x_limit_commands limit_commands[] = {
+ /* temp1_max, humidity1_max */
+ [limit_max] = { {0xe1, 0x1f}, {0x61, 0x1d} },
+ /* temp_1_max_hyst, humidity1_max_hyst */
+ [limit_max_hyst] = { {0xe1, 0x14}, {0x61, 0x16} },
+ /* temp1_min, humidity1_min */
+ [limit_min] = { {0xe1, 0x02}, {0x61, 0x00} },
+ /* temp_1_min_hyst, humidity1_min_hyst */
+ [limit_min_hyst] = { {0xe1, 0x09}, {0x61, 0x0B} },
+};
+
+#define SHT3X_NUM_LIMIT_CMD ARRAY_SIZE(limit_commands)
+
+static const u16 mode_to_update_interval[] = {
+ 0,
+ 2000,
+ 1000,
+ 500,
+ 250,
+ 100,
+};
+
+struct sht3x_data {
+ struct i2c_client *client;
+ struct mutex i2c_lock; /* lock for sending i2c commands */
+ struct mutex data_lock; /* lock for updating driver data */
+
+ u8 mode;
+ const unsigned char *command;
+ u32 wait_time; /* in us*/
+ unsigned long last_update; /* last update in periodic mode*/
+
+ struct sht3x_platform_data setup;
+
+ /*
+ * cached values for temperature and humidity and limits
+ * the limits arrays have the following order:
+ * max, max_hyst, min, min_hyst
+ */
+ int temperature;
+ int temperature_limits[SHT3X_NUM_LIMIT_CMD];
+ u32 humidity;
+ u32 humidity_limits[SHT3X_NUM_LIMIT_CMD];
+};
+
+static u8 get_mode_from_update_interval(u16 value)
+{
+ size_t index;
+ u8 number_of_modes = ARRAY_SIZE(mode_to_update_interval);
+
+ if (value == 0)
+ return 0;
+
+ /* find next faster update interval */
+ for (index = 1; index < number_of_modes; index++) {
+ if (mode_to_update_interval[index] <= value)
+ return index;
+ }
+
+ return number_of_modes - 1;
+}
+
+static int sht3x_read_from_command(struct i2c_client *client,
+ struct sht3x_data *data,
+ const char *command,
+ char *buf, int length, u32 wait_time)
+{
+ int ret;
+
+ mutex_lock(&data->i2c_lock);
+ ret = i2c_master_send(client, command, SHT3X_CMD_LENGTH);
+
+ if (ret != SHT3X_CMD_LENGTH) {
+ ret = ret < 0 ? ret : -EIO;
+ goto out;
+ }
+
+ if (wait_time)
+ usleep_range(wait_time, wait_time + 1000);
+
+ ret = i2c_master_recv(client, buf, length);
+ if (ret != length) {
+ ret = ret < 0 ? ret : -EIO;
+ goto out;
+ }
+
+ ret = 0;
+out:
+ mutex_unlock(&data->i2c_lock);
+ return ret;
+}
+
+static int sht3x_extract_temperature(u16 raw)
+{
+ /*
+ * From datasheet:
+ * T = -45 + 175 * ST / 2^16
+ * Adapted for integer fixed point (3 digit) arithmetic.
+ */
+ return ((21875 * (int)raw) >> 13) - 45000;
+}
+
+static u32 sht3x_extract_humidity(u16 raw)
+{
+ /*
+ * From datasheet:
+ * RH = 100 * SRH / 2^16
+ * Adapted for integer fixed point (3 digit) arithmetic.
+ */
+ return (12500 * (u32)raw) >> 13;
+}
+
+static struct sht3x_data *sht3x_update_client(struct device *dev)
+{
+ struct sht3x_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ u16 interval_ms = mode_to_update_interval[data->mode];
+ unsigned long interval_jiffies = msecs_to_jiffies(interval_ms);
+ unsigned char buf[SHT3X_RESPONSE_LENGTH];
+ u16 val;
+ int ret = 0;
+
+ mutex_lock(&data->data_lock);
+ /*
+ * Only update cached readings once per update interval in periodic
+ * mode. In single shot mode the sensor measures values on demand, so
+ * every time the sysfs interface is called, a measurement is triggered.
+ * In periodic mode however, the measurement process is handled
+ * internally by the sensor and reading out sensor values only makes
+ * sense if a new reading is available.
+ */
+ if (time_after(jiffies, data->last_update + interval_jiffies)) {
+ ret = sht3x_read_from_command(client, data, data->command, buf,
+ sizeof(buf), data->wait_time);
+ if (ret)
+ goto out;
+
+ val = be16_to_cpup((__be16 *)buf);
+ data->temperature = sht3x_extract_temperature(val);
+ val = be16_to_cpup((__be16 *)(buf + 3));
+ data->humidity = sht3x_extract_humidity(val);
+ data->last_update = jiffies;
+ }
+
+out:
+ mutex_unlock(&data->data_lock);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return data;
+}
+
+/* sysfs attributes */
+static ssize_t temp1_input_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sht3x_data *data = sht3x_update_client(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ return sprintf(buf, "%d\n", data->temperature);
+}
+
+static ssize_t humidity1_input_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sht3x_data *data = sht3x_update_client(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ return sprintf(buf, "%u\n", data->humidity);
+}
+
+/*
+ * limits_update must only be called from probe or with data_lock held
+ */
+static int limits_update(struct sht3x_data *data)
+{
+ int ret;
+ u8 index;
+ int temperature;
+ u32 humidity;
+ u16 raw;
+ char buffer[SHT3X_RESPONSE_LENGTH];
+ const struct sht3x_limit_commands *commands;
+ struct i2c_client *client = data->client;
+
+ for (index = 0; index < SHT3X_NUM_LIMIT_CMD; index++) {
+ commands = &limit_commands[index];
+ ret = sht3x_read_from_command(client, data,
+ commands->read_command, buffer,
+ SHT3X_RESPONSE_LENGTH, 0);
+
+ if (ret)
+ return ret;
+
+ raw = be16_to_cpup((__be16 *)buffer);
+ temperature = sht3x_extract_temperature((raw & 0x01ff) << 7);
+ humidity = sht3x_extract_humidity(raw & 0xfe00);
+ data->temperature_limits[index] = temperature;
+ data->humidity_limits[index] = humidity;
+ }
+
+ return ret;
+}
+
+static ssize_t temp1_limit_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct sht3x_data *data = dev_get_drvdata(dev);
+ u8 index = to_sensor_dev_attr(attr)->index;
+ int temperature_limit = data->temperature_limits[index];
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", temperature_limit);
+}
+
+static ssize_t humidity1_limit_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct sht3x_data *data = dev_get_drvdata(dev);
+ u8 index = to_sensor_dev_attr(attr)->index;
+ u32 humidity_limit = data->humidity_limits[index];
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", humidity_limit);
+}
+
+/*
+ * limit_store must only be called with data_lock held
+ */
+static size_t limit_store(struct device *dev,
+ size_t count,
+ u8 index,
+ int temperature,
+ u32 humidity)
+{
+ char buffer[SHT3X_CMD_LENGTH + SHT3X_WORD_LEN + SHT3X_CRC8_LEN];
+ char *position = buffer;
+ int ret;
+ u16 raw;
+ struct sht3x_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ const struct sht3x_limit_commands *commands;
+
+ commands = &limit_commands[index];
+
+ memcpy(position, commands->write_command, SHT3X_CMD_LENGTH);
+ position += SHT3X_CMD_LENGTH;
+ /*
+ * ST = (T + 45) / 175 * 2^16
+ * SRH = RH / 100 * 2^16
+ * adapted for fixed point arithmetic and packed the same as
+ * in limit_show()
+ */
+ raw = ((u32)(temperature + 45000) * 24543) >> (16 + 7);
+ raw |= ((humidity * 42950) >> 16) & 0xfe00;
+
+ *((__be16 *)position) = cpu_to_be16(raw);
+ position += SHT3X_WORD_LEN;
+ *position = crc8(sht3x_crc8_table,
+ position - SHT3X_WORD_LEN,
+ SHT3X_WORD_LEN,
+ SHT3X_CRC8_INIT);
+
+ mutex_lock(&data->i2c_lock);
+ ret = i2c_master_send(client, buffer, sizeof(buffer));
+ mutex_unlock(&data->i2c_lock);
+
+ if (ret != sizeof(buffer))
+ return ret < 0 ? ret : -EIO;
+
+ data->temperature_limits[index] = temperature;
+ data->humidity_limits[index] = humidity;
+ return count;
+}
+
+static ssize_t temp1_limit_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ int temperature;
+ int ret;
+ struct sht3x_data *data = dev_get_drvdata(dev);
+ u8 index = to_sensor_dev_attr(attr)->index;
+
+ ret = kstrtoint(buf, 0, &temperature);
+ if (ret)
+ return ret;
+
+ temperature = clamp_val(temperature, SHT3X_MIN_TEMPERATURE,
+ SHT3X_MAX_TEMPERATURE);
+ mutex_lock(&data->data_lock);
+ ret = limit_store(dev, count, index, temperature,
+ data->humidity_limits[index]);
+ mutex_unlock(&data->data_lock);
+
+ return ret;
+}
+
+static ssize_t humidity1_limit_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ u32 humidity;
+ int ret;
+ struct sht3x_data *data = dev_get_drvdata(dev);
+ u8 index = to_sensor_dev_attr(attr)->index;
+
+ ret = kstrtou32(buf, 0, &humidity);
+ if (ret)
+ return ret;
+
+ humidity = clamp_val(humidity, SHT3X_MIN_HUMIDITY, SHT3X_MAX_HUMIDITY);
+ mutex_lock(&data->data_lock);
+ ret = limit_store(dev, count, index, data->temperature_limits[index],
+ humidity);
+ mutex_unlock(&data->data_lock);
+
+ return ret;
+}
+
+static void sht3x_select_command(struct sht3x_data *data)
+{
+ /*
+ * In blocking mode (clock stretching mode) the I2C bus
+ * is blocked for other traffic, thus the call to i2c_master_recv()
+ * will wait until the data is ready. For non blocking mode, we
+ * have to wait ourselves.
+ */
+ if (data->mode > 0) {
+ data->command = sht3x_cmd_measure_periodic_mode;
+ data->wait_time = 0;
+ } else if (data->setup.blocking_io) {
+ data->command = data->setup.high_precision ?
+ sht3x_cmd_measure_blocking_hpm :
+ sht3x_cmd_measure_blocking_lpm;
+ data->wait_time = 0;
+ } else {
+ if (data->setup.high_precision) {
+ data->command = sht3x_cmd_measure_nonblocking_hpm;
+ data->wait_time = SHT3X_NONBLOCKING_WAIT_TIME_HPM;
+ } else {
+ data->command = sht3x_cmd_measure_nonblocking_lpm;
+ data->wait_time = SHT3X_NONBLOCKING_WAIT_TIME_LPM;
+ }
+ }
+}
+
+static int status_register_read(struct device *dev,
+ struct device_attribute *attr,
+ char *buffer, int length)
+{
+ int ret;
+ struct sht3x_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+
+ ret = sht3x_read_from_command(client, data, sht3x_cmd_read_status_reg,
+ buffer, length, 0);
+
+ return ret;
+}
+
+static ssize_t temp1_alarm_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ char buffer[SHT3X_WORD_LEN + SHT3X_CRC8_LEN];
+ int ret;
+
+ ret = status_register_read(dev, attr, buffer,
+ SHT3X_WORD_LEN + SHT3X_CRC8_LEN);
+ if (ret)
+ return ret;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", !!(buffer[0] & 0x04));
+}
+
+static ssize_t humidity1_alarm_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ char buffer[SHT3X_WORD_LEN + SHT3X_CRC8_LEN];
+ int ret;
+
+ ret = status_register_read(dev, attr, buffer,
+ SHT3X_WORD_LEN + SHT3X_CRC8_LEN);
+ if (ret)
+ return ret;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", !!(buffer[0] & 0x08));
+}
+
+static ssize_t heater_enable_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ char buffer[SHT3X_WORD_LEN + SHT3X_CRC8_LEN];
+ int ret;
+
+ ret = status_register_read(dev, attr, buffer,
+ SHT3X_WORD_LEN + SHT3X_CRC8_LEN);
+ if (ret)
+ return ret;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", !!(buffer[0] & 0x20));
+}
+
+static ssize_t heater_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct sht3x_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ int ret;
+ bool status;
+
+ ret = kstrtobool(buf, &status);
+ if (ret)
+ return ret;
+
+ mutex_lock(&data->i2c_lock);
+
+ if (status)
+ ret = i2c_master_send(client, (char *)&sht3x_cmd_heater_on,
+ SHT3X_CMD_LENGTH);
+ else
+ ret = i2c_master_send(client, (char *)&sht3x_cmd_heater_off,
+ SHT3X_CMD_LENGTH);
+
+ mutex_unlock(&data->i2c_lock);
+
+ return ret;
+}
+
+static ssize_t update_interval_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct sht3x_data *data = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ mode_to_update_interval[data->mode]);
+}
+
+static ssize_t update_interval_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ u16 update_interval;
+ u8 mode;
+ int ret;
+ const char *command;
+ struct sht3x_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+
+ ret = kstrtou16(buf, 0, &update_interval);
+ if (ret)
+ return ret;
+
+ mode = get_mode_from_update_interval(update_interval);
+
+ mutex_lock(&data->data_lock);
+ /* mode did not change */
+ if (mode == data->mode) {
+ mutex_unlock(&data->data_lock);
+ return count;
+ }
+
+ mutex_lock(&data->i2c_lock);
+ /*
+ * Abort periodic measure mode.
+ * To do any changes to the configuration while in periodic mode, we
+ * have to send a break command to the sensor, which then falls back
+ * to single shot (mode = 0).
+ */
+ if (data->mode > 0) {
+ ret = i2c_master_send(client, sht3x_cmd_break,
+ SHT3X_CMD_LENGTH);
+ if (ret != SHT3X_CMD_LENGTH)
+ goto out;
+ data->mode = 0;
+ }
+
+ if (mode > 0) {
+ if (data->setup.high_precision)
+ command = periodic_measure_commands_hpm[mode - 1];
+ else
+ command = periodic_measure_commands_lpm[mode - 1];
+
+ /* select mode */
+ ret = i2c_master_send(client, command, SHT3X_CMD_LENGTH);
+ if (ret != SHT3X_CMD_LENGTH)
+ goto out;
+ }
+
+ /* select mode and command */
+ data->mode = mode;
+ sht3x_select_command(data);
+
+out:
+ mutex_unlock(&data->i2c_lock);
+ mutex_unlock(&data->data_lock);
+ if (ret != SHT3X_CMD_LENGTH)
+ return ret < 0 ? ret : -EIO;
+
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, temp1_input_show, NULL, 0);
+static SENSOR_DEVICE_ATTR(humidity1_input, S_IRUGO, humidity1_input_show,
+ NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR,
+ temp1_limit_show, temp1_limit_store,
+ limit_max);
+static SENSOR_DEVICE_ATTR(humidity1_max, S_IRUGO | S_IWUSR,
+ humidity1_limit_show, humidity1_limit_store,
+ limit_max);
+static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR,
+ temp1_limit_show, temp1_limit_store,
+ limit_max_hyst);
+static SENSOR_DEVICE_ATTR(humidity1_max_hyst, S_IRUGO | S_IWUSR,
+ humidity1_limit_show, humidity1_limit_store,
+ limit_max_hyst);
+static SENSOR_DEVICE_ATTR(temp1_min, S_IRUGO | S_IWUSR,
+ temp1_limit_show, temp1_limit_store,
+ limit_min);
+static SENSOR_DEVICE_ATTR(humidity1_min, S_IRUGO | S_IWUSR,
+ humidity1_limit_show, humidity1_limit_store,
+ limit_min);
+static SENSOR_DEVICE_ATTR(temp1_min_hyst, S_IRUGO | S_IWUSR,
+ temp1_limit_show, temp1_limit_store,
+ limit_min_hyst);
+static SENSOR_DEVICE_ATTR(humidity1_min_hyst, S_IRUGO | S_IWUSR,
+ humidity1_limit_show, humidity1_limit_store,
+ limit_min_hyst);
+static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, temp1_alarm_show, NULL, 0);
+static SENSOR_DEVICE_ATTR(humidity1_alarm, S_IRUGO, humidity1_alarm_show,
+ NULL, 0);
+static SENSOR_DEVICE_ATTR(heater_enable, S_IRUGO | S_IWUSR,
+ heater_enable_show, heater_enable_store, 0);
+static SENSOR_DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR,
+ update_interval_show, update_interval_store, 0);
+
+static struct attribute *sht3x_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_humidity1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
+ &sensor_dev_attr_humidity1_max.dev_attr.attr,
+ &sensor_dev_attr_humidity1_max_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp1_min.dev_attr.attr,
+ &sensor_dev_attr_temp1_min_hyst.dev_attr.attr,
+ &sensor_dev_attr_humidity1_min.dev_attr.attr,
+ &sensor_dev_attr_humidity1_min_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp1_alarm.dev_attr.attr,
+ &sensor_dev_attr_humidity1_alarm.dev_attr.attr,
+ &sensor_dev_attr_heater_enable.dev_attr.attr,
+ &sensor_dev_attr_update_interval.dev_attr.attr,
+ NULL
+};
+
+static struct attribute *sts3x_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(sht3x);
+ATTRIBUTE_GROUPS(sts3x);
+
+static int sht3x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct sht3x_data *data;
+ struct device *hwmon_dev;
+ struct i2c_adapter *adap = client->adapter;
+ struct device *dev = &client->dev;
+ const struct attribute_group **attribute_groups;
+
+ /*
+ * we require full i2c support since the sht3x uses multi-byte read and
+ * writes as well as multi-byte commands which are not supported by
+ * the smbus protocol
+ */
+ if (!i2c_check_functionality(adap, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ ret = i2c_master_send(client, sht3x_cmd_clear_status_reg,
+ SHT3X_CMD_LENGTH);
+ if (ret != SHT3X_CMD_LENGTH)
+ return ret < 0 ? ret : -ENODEV;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->setup.blocking_io = false;
+ data->setup.high_precision = true;
+ data->mode = 0;
+ data->last_update = jiffies - msecs_to_jiffies(3000);
+ data->client = client;
+ crc8_populate_msb(sht3x_crc8_table, SHT3X_CRC8_POLYNOMIAL);
+
+ if (client->dev.platform_data)
+ data->setup = *(struct sht3x_platform_data *)dev->platform_data;
+
+ sht3x_select_command(data);
+
+ mutex_init(&data->i2c_lock);
+ mutex_init(&data->data_lock);
+
+ ret = limits_update(data);
+ if (ret)
+ return ret;
+
+ if (id->driver_data == sts3x)
+ attribute_groups = sts3x_groups;
+ else
+ attribute_groups = sht3x_groups;
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev,
+ client->name,
+ data,
+ attribute_groups);
+
+ if (IS_ERR(hwmon_dev))
+ dev_dbg(dev, "unable to register hwmon device\n");
+
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+/* device ID table */
+static const struct i2c_device_id sht3x_ids[] = {
+ {"sht3x", sht3x},
+ {"sts3x", sts3x},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, sht3x_ids);
+
+static struct i2c_driver sht3x_i2c_driver = {
+ .driver.name = "sht3x",
+ .probe = sht3x_probe,
+ .id_table = sht3x_ids,
+};
+
+module_i2c_driver(sht3x_i2c_driver);
+
+MODULE_AUTHOR("David Frey <david.frey@sensirion.com>");
+MODULE_AUTHOR("Pascal Sachs <pascal.sachs@sensirion.com>");
+MODULE_DESCRIPTION("Sensirion SHT3x humidity and temperature sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index f1e96fd7f..8479ac5eb 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -11,12 +11,9 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include <linux/delay.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -27,6 +24,7 @@
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/jiffies.h>
+#include <linux/regmap.h>
#include <linux/thermal.h>
#include <linux/of.h>
@@ -50,14 +48,23 @@
#define TMP102_TLOW_REG 0x02
#define TMP102_THIGH_REG 0x03
+#define TMP102_CONFREG_MASK (TMP102_CONF_SD | TMP102_CONF_TM | \
+ TMP102_CONF_POL | TMP102_CONF_F0 | \
+ TMP102_CONF_F1 | TMP102_CONF_OS | \
+ TMP102_CONF_EM | TMP102_CONF_AL | \
+ TMP102_CONF_CR0 | TMP102_CONF_CR1)
+
+#define TMP102_CONFIG_CLEAR (TMP102_CONF_SD | TMP102_CONF_OS | \
+ TMP102_CONF_CR0)
+#define TMP102_CONFIG_SET (TMP102_CONF_TM | TMP102_CONF_EM | \
+ TMP102_CONF_CR1)
+
+#define CONVERSION_TIME_MS 35 /* in milli-seconds */
+
struct tmp102 {
- struct i2c_client *client;
- struct device *hwmon_dev;
- struct mutex lock;
+ struct regmap *regmap;
u16 config_orig;
- unsigned long last_update;
- int temp[3];
- bool first_time;
+ unsigned long ready_time;
};
/* convert left adjusted 13-bit TMP102 register value to milliCelsius */
@@ -72,44 +79,22 @@ static inline u16 tmp102_mC_to_reg(int val)
return (val * 128) / 1000;
}
-static const u8 tmp102_reg[] = {
- TMP102_TEMP_REG,
- TMP102_TLOW_REG,
- TMP102_THIGH_REG,
-};
-
-static struct tmp102 *tmp102_update_device(struct device *dev)
-{
- struct tmp102 *tmp102 = dev_get_drvdata(dev);
- struct i2c_client *client = tmp102->client;
-
- mutex_lock(&tmp102->lock);
- if (time_after(jiffies, tmp102->last_update + HZ / 3)) {
- int i;
- for (i = 0; i < ARRAY_SIZE(tmp102->temp); ++i) {
- int status = i2c_smbus_read_word_swapped(client,
- tmp102_reg[i]);
- if (status > -1)
- tmp102->temp[i] = tmp102_reg_to_mC(status);
- }
- tmp102->last_update = jiffies;
- tmp102->first_time = false;
- }
- mutex_unlock(&tmp102->lock);
- return tmp102;
-}
-
static int tmp102_read_temp(void *dev, int *temp)
{
- struct tmp102 *tmp102 = tmp102_update_device(dev);
+ struct tmp102 *tmp102 = dev_get_drvdata(dev);
+ unsigned int reg;
+ int ret;
- /* Is it too early even to return a conversion? */
- if (tmp102->first_time) {
+ if (time_before(jiffies, tmp102->ready_time)) {
dev_dbg(dev, "%s: Conversion not ready yet..\n", __func__);
return -EAGAIN;
}
- *temp = tmp102->temp[0];
+ ret = regmap_read(tmp102->regmap, TMP102_TEMP_REG, &reg);
+ if (ret < 0)
+ return ret;
+
+ *temp = tmp102_reg_to_mC(reg);
return 0;
}
@@ -119,13 +104,20 @@ static ssize_t tmp102_show_temp(struct device *dev,
char *buf)
{
struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
- struct tmp102 *tmp102 = tmp102_update_device(dev);
+ struct tmp102 *tmp102 = dev_get_drvdata(dev);
+ int regaddr = sda->index;
+ unsigned int reg;
+ int err;
- /* Is it too early even to return a read? */
- if (tmp102->first_time)
+ if (regaddr == TMP102_TEMP_REG &&
+ time_before(jiffies, tmp102->ready_time))
return -EAGAIN;
- return sprintf(buf, "%d\n", tmp102->temp[sda->index]);
+ err = regmap_read(tmp102->regmap, regaddr, &reg);
+ if (err < 0)
+ return err;
+
+ return sprintf(buf, "%d\n", tmp102_reg_to_mC(reg));
}
static ssize_t tmp102_set_temp(struct device *dev,
@@ -134,29 +126,26 @@ static ssize_t tmp102_set_temp(struct device *dev,
{
struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
struct tmp102 *tmp102 = dev_get_drvdata(dev);
- struct i2c_client *client = tmp102->client;
+ int reg = sda->index;
long val;
- int status;
+ int err;
if (kstrtol(buf, 10, &val) < 0)
return -EINVAL;
val = clamp_val(val, -256000, 255000);
- mutex_lock(&tmp102->lock);
- tmp102->temp[sda->index] = val;
- status = i2c_smbus_write_word_swapped(client, tmp102_reg[sda->index],
- tmp102_mC_to_reg(val));
- mutex_unlock(&tmp102->lock);
- return status ? : count;
+ err = regmap_write(tmp102->regmap, reg, tmp102_mC_to_reg(val));
+ return err ? : count;
}
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tmp102_show_temp, NULL , 0);
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tmp102_show_temp, NULL,
+ TMP102_TEMP_REG);
static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO, tmp102_show_temp,
- tmp102_set_temp, 1);
+ tmp102_set_temp, TMP102_TLOW_REG);
static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, tmp102_show_temp,
- tmp102_set_temp, 2);
+ tmp102_set_temp, TMP102_THIGH_REG);
static struct attribute *tmp102_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
@@ -166,20 +155,46 @@ static struct attribute *tmp102_attrs[] = {
};
ATTRIBUTE_GROUPS(tmp102);
-#define TMP102_CONFIG (TMP102_CONF_TM | TMP102_CONF_EM | TMP102_CONF_CR1)
-#define TMP102_CONFIG_RD_ONLY (TMP102_CONF_R0 | TMP102_CONF_R1 | TMP102_CONF_AL)
-
static const struct thermal_zone_of_device_ops tmp102_of_thermal_ops = {
.get_temp = tmp102_read_temp,
};
+static void tmp102_restore_config(void *data)
+{
+ struct tmp102 *tmp102 = data;
+
+ regmap_write(tmp102->regmap, TMP102_CONF_REG, tmp102->config_orig);
+}
+
+static bool tmp102_is_writeable_reg(struct device *dev, unsigned int reg)
+{
+ return reg != TMP102_TEMP_REG;
+}
+
+static bool tmp102_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ return reg == TMP102_TEMP_REG;
+}
+
+static const struct regmap_config tmp102_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = TMP102_THIGH_REG,
+ .writeable_reg = tmp102_is_writeable_reg,
+ .volatile_reg = tmp102_is_volatile_reg,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+ .cache_type = REGCACHE_RBTREE,
+ .use_single_rw = true,
+};
+
static int tmp102_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
struct device *hwmon_dev;
struct tmp102 *tmp102;
- int status;
+ unsigned int regval;
+ int err;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_WORD_DATA)) {
@@ -193,101 +208,84 @@ static int tmp102_probe(struct i2c_client *client,
return -ENOMEM;
i2c_set_clientdata(client, tmp102);
- tmp102->client = client;
- status = i2c_smbus_read_word_swapped(client, TMP102_CONF_REG);
- if (status < 0) {
+ tmp102->regmap = devm_regmap_init_i2c(client, &tmp102_regmap_config);
+ if (IS_ERR(tmp102->regmap))
+ return PTR_ERR(tmp102->regmap);
+
+ err = regmap_read(tmp102->regmap, TMP102_CONF_REG, &regval);
+ if (err < 0) {
dev_err(dev, "error reading config register\n");
- return status;
+ return err;
}
- tmp102->config_orig = status;
- status = i2c_smbus_write_word_swapped(client, TMP102_CONF_REG,
- TMP102_CONFIG);
- if (status < 0) {
- dev_err(dev, "error writing config register\n");
- goto fail_restore_config;
+
+ if ((regval & ~TMP102_CONFREG_MASK) !=
+ (TMP102_CONF_R0 | TMP102_CONF_R1)) {
+ dev_err(dev, "unexpected config register value\n");
+ return -ENODEV;
}
- status = i2c_smbus_read_word_swapped(client, TMP102_CONF_REG);
- if (status < 0) {
- dev_err(dev, "error reading config register\n");
- goto fail_restore_config;
+
+ tmp102->config_orig = regval;
+
+ err = devm_add_action_or_reset(dev, tmp102_restore_config, tmp102);
+ if (err)
+ return err;
+
+ regval &= ~TMP102_CONFIG_CLEAR;
+ regval |= TMP102_CONFIG_SET;
+
+ err = regmap_write(tmp102->regmap, TMP102_CONF_REG, regval);
+ if (err < 0) {
+ dev_err(dev, "error writing config register\n");
+ return err;
}
- status &= ~TMP102_CONFIG_RD_ONLY;
- if (status != TMP102_CONFIG) {
- dev_err(dev, "config settings did not stick\n");
- status = -ENODEV;
- goto fail_restore_config;
+
+ tmp102->ready_time = jiffies;
+ if (tmp102->config_orig & TMP102_CONF_SD) {
+ /*
+ * Mark that we are not ready with data until the first
+ * conversion is complete
+ */
+ tmp102->ready_time += msecs_to_jiffies(CONVERSION_TIME_MS);
}
- tmp102->last_update = jiffies;
- /* Mark that we are not ready with data until conversion is complete */
- tmp102->first_time = true;
- mutex_init(&tmp102->lock);
- hwmon_dev = hwmon_device_register_with_groups(dev, client->name,
- tmp102, tmp102_groups);
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ tmp102,
+ tmp102_groups);
if (IS_ERR(hwmon_dev)) {
dev_dbg(dev, "unable to register hwmon device\n");
- status = PTR_ERR(hwmon_dev);
- goto fail_restore_config;
+ return PTR_ERR(hwmon_dev);
}
- tmp102->hwmon_dev = hwmon_dev;
devm_thermal_zone_of_sensor_register(hwmon_dev, 0, hwmon_dev,
&tmp102_of_thermal_ops);
dev_info(dev, "initialized\n");
return 0;
-
-fail_restore_config:
- i2c_smbus_write_word_swapped(client, TMP102_CONF_REG,
- tmp102->config_orig);
- return status;
-}
-
-static int tmp102_remove(struct i2c_client *client)
-{
- struct tmp102 *tmp102 = i2c_get_clientdata(client);
-
- hwmon_device_unregister(tmp102->hwmon_dev);
-
- /* Stop monitoring if device was stopped originally */
- if (tmp102->config_orig & TMP102_CONF_SD) {
- int config;
-
- config = i2c_smbus_read_word_swapped(client, TMP102_CONF_REG);
- if (config >= 0)
- i2c_smbus_write_word_swapped(client, TMP102_CONF_REG,
- config | TMP102_CONF_SD);
- }
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
static int tmp102_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
- int config;
-
- config = i2c_smbus_read_word_swapped(client, TMP102_CONF_REG);
- if (config < 0)
- return config;
+ struct tmp102 *tmp102 = i2c_get_clientdata(client);
- config |= TMP102_CONF_SD;
- return i2c_smbus_write_word_swapped(client, TMP102_CONF_REG, config);
+ return regmap_update_bits(tmp102->regmap, TMP102_CONF_REG,
+ TMP102_CONF_SD, TMP102_CONF_SD);
}
static int tmp102_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
- int config;
+ struct tmp102 *tmp102 = i2c_get_clientdata(client);
+ int err;
+
+ err = regmap_update_bits(tmp102->regmap, TMP102_CONF_REG,
+ TMP102_CONF_SD, 0);
- config = i2c_smbus_read_word_swapped(client, TMP102_CONF_REG);
- if (config < 0)
- return config;
+ tmp102->ready_time = jiffies + msecs_to_jiffies(CONVERSION_TIME_MS);
- config &= ~TMP102_CONF_SD;
- return i2c_smbus_write_word_swapped(client, TMP102_CONF_REG, config);
+ return err;
}
#endif /* CONFIG_PM */
@@ -303,7 +301,6 @@ static struct i2c_driver tmp102_driver = {
.driver.name = DRIVER_NAME,
.driver.pm = &tmp102_dev_pm_ops,
.probe = tmp102_probe,
- .remove = tmp102_remove,
.id_table = tmp102_id,
};
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
index ccf4cffe0..eeeed2c7d 100644
--- a/drivers/hwmon/tmp401.c
+++ b/drivers/hwmon/tmp401.c
@@ -47,7 +47,7 @@
static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4c, 0x4d,
0x4e, 0x4f, I2C_CLIENT_END };
-enum chips { tmp401, tmp411, tmp431, tmp432, tmp435 };
+enum chips { tmp401, tmp411, tmp431, tmp432, tmp435, tmp461 };
/*
* The TMP401 registers, note some registers have different addresses for
@@ -62,31 +62,34 @@ enum chips { tmp401, tmp411, tmp431, tmp432, tmp435 };
#define TMP401_MANUFACTURER_ID_REG 0xFE
#define TMP401_DEVICE_ID_REG 0xFF
-static const u8 TMP401_TEMP_MSB_READ[6][2] = {
+static const u8 TMP401_TEMP_MSB_READ[7][2] = {
{ 0x00, 0x01 }, /* temp */
{ 0x06, 0x08 }, /* low limit */
{ 0x05, 0x07 }, /* high limit */
{ 0x20, 0x19 }, /* therm (crit) limit */
{ 0x30, 0x34 }, /* lowest */
{ 0x32, 0x36 }, /* highest */
+ { 0, 0x11 }, /* offset */
};
-static const u8 TMP401_TEMP_MSB_WRITE[6][2] = {
+static const u8 TMP401_TEMP_MSB_WRITE[7][2] = {
{ 0, 0 }, /* temp (unused) */
{ 0x0C, 0x0E }, /* low limit */
{ 0x0B, 0x0D }, /* high limit */
{ 0x20, 0x19 }, /* therm (crit) limit */
{ 0x30, 0x34 }, /* lowest */
{ 0x32, 0x36 }, /* highest */
+ { 0, 0x11 }, /* offset */
};
-static const u8 TMP401_TEMP_LSB[6][2] = {
+static const u8 TMP401_TEMP_LSB[7][2] = {
{ 0x15, 0x10 }, /* temp */
{ 0x17, 0x14 }, /* low limit */
{ 0x16, 0x13 }, /* high limit */
{ 0, 0 }, /* therm (crit) limit (unused) */
{ 0x31, 0x35 }, /* lowest */
{ 0x33, 0x37 }, /* highest */
+ { 0, 0x12 }, /* offset */
};
static const u8 TMP432_TEMP_MSB_READ[4][3] = {
@@ -149,6 +152,7 @@ static const struct i2c_device_id tmp401_id[] = {
{ "tmp431", tmp431 },
{ "tmp432", tmp432 },
{ "tmp435", tmp435 },
+ { "tmp461", tmp461 },
{ }
};
MODULE_DEVICE_TABLE(i2c, tmp401_id);
@@ -170,7 +174,7 @@ struct tmp401_data {
/* register values */
u8 status[4];
u8 config;
- u16 temp[6][3];
+ u16 temp[7][3];
u8 temp_crit_hyst;
};
@@ -613,6 +617,22 @@ static const struct attribute_group tmp432_group = {
};
/*
+ * Additional features of the TMP461 chip.
+ * The TMP461 temperature offset for the remote channel.
+ */
+static SENSOR_DEVICE_ATTR_2(temp2_offset, S_IWUSR | S_IRUGO, show_temp,
+ store_temp, 6, 1);
+
+static struct attribute *tmp461_attributes[] = {
+ &sensor_dev_attr_temp2_offset.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group tmp461_group = {
+ .attrs = tmp461_attributes,
+};
+
+/*
* Begin non sysfs callback code (aka Real code)
*/
@@ -714,7 +734,7 @@ static int tmp401_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
static const char * const names[] = {
- "TMP401", "TMP411", "TMP431", "TMP432", "TMP435"
+ "TMP401", "TMP411", "TMP431", "TMP432", "TMP435", "TMP461"
};
struct device *dev = &client->dev;
struct device *hwmon_dev;
@@ -745,6 +765,9 @@ static int tmp401_probe(struct i2c_client *client,
if (data->kind == tmp432)
data->groups[groups++] = &tmp432_group;
+ if (data->kind == tmp461)
+ data->groups[groups++] = &tmp461_group;
+
hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
data, data->groups);
if (IS_ERR(hwmon_dev))
diff --git a/drivers/hwspinlock/qcom_hwspinlock.c b/drivers/hwspinlock/qcom_hwspinlock.c
index c752447fb..fa6880b80 100644
--- a/drivers/hwspinlock/qcom_hwspinlock.c
+++ b/drivers/hwspinlock/qcom_hwspinlock.c
@@ -98,6 +98,7 @@ static int qcom_hwspinlock_probe(struct platform_device *pdev)
}
regmap = syscon_node_to_regmap(syscon);
+ of_node_put(syscon);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
index d83ab8267..2de4cad9c 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x.c
@@ -51,6 +51,8 @@ module_param_named(boot_enable, boot_enable, int, S_IRUGO);
static int etm_count;
static struct etm_drvdata *etmdrvdata[NR_CPUS];
+static enum cpuhp_state hp_online;
+
/*
* Memory mapped writes to clear os lock are not supported on some processors
* and OS lock must be unlocked before any memory mapped access on such
@@ -481,8 +483,7 @@ static int etm_enable_sysfs(struct coresight_device *csdev)
/*
* Configure the ETM only if the CPU is online. If it isn't online
- * hw configuration will take place when 'CPU_STARTING' is received
- * in @etm_cpu_callback.
+ * hw configuration will take place on the local CPU during bring up.
*/
if (cpu_online(drvdata->cpu)) {
ret = smp_call_function_single(drvdata->cpu,
@@ -641,47 +642,44 @@ static const struct coresight_ops etm_cs_ops = {
.source_ops = &etm_source_ops,
};
-static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action,
- void *hcpu)
+static int etm_online_cpu(unsigned int cpu)
{
- unsigned int cpu = (unsigned long)hcpu;
-
if (!etmdrvdata[cpu])
- goto out;
+ return 0;
- switch (action & (~CPU_TASKS_FROZEN)) {
- case CPU_STARTING:
- spin_lock(&etmdrvdata[cpu]->spinlock);
- if (!etmdrvdata[cpu]->os_unlock) {
- etm_os_unlock(etmdrvdata[cpu]);
- etmdrvdata[cpu]->os_unlock = true;
- }
-
- if (local_read(&etmdrvdata[cpu]->mode))
- etm_enable_hw(etmdrvdata[cpu]);
- spin_unlock(&etmdrvdata[cpu]->spinlock);
- break;
+ if (etmdrvdata[cpu]->boot_enable && !etmdrvdata[cpu]->sticky_enable)
+ coresight_enable(etmdrvdata[cpu]->csdev);
+ return 0;
+}
- case CPU_ONLINE:
- if (etmdrvdata[cpu]->boot_enable &&
- !etmdrvdata[cpu]->sticky_enable)
- coresight_enable(etmdrvdata[cpu]->csdev);
- break;
+static int etm_starting_cpu(unsigned int cpu)
+{
+ if (!etmdrvdata[cpu])
+ return 0;
- case CPU_DYING:
- spin_lock(&etmdrvdata[cpu]->spinlock);
- if (local_read(&etmdrvdata[cpu]->mode))
- etm_disable_hw(etmdrvdata[cpu]);
- spin_unlock(&etmdrvdata[cpu]->spinlock);
- break;
+ spin_lock(&etmdrvdata[cpu]->spinlock);
+ if (!etmdrvdata[cpu]->os_unlock) {
+ etm_os_unlock(etmdrvdata[cpu]);
+ etmdrvdata[cpu]->os_unlock = true;
}
-out:
- return NOTIFY_OK;
+
+ if (local_read(&etmdrvdata[cpu]->mode))
+ etm_enable_hw(etmdrvdata[cpu]);
+ spin_unlock(&etmdrvdata[cpu]->spinlock);
+ return 0;
}
-static struct notifier_block etm_cpu_notifier = {
- .notifier_call = etm_cpu_callback,
-};
+static int etm_dying_cpu(unsigned int cpu)
+{
+ if (!etmdrvdata[cpu])
+ return 0;
+
+ spin_lock(&etmdrvdata[cpu]->spinlock);
+ if (local_read(&etmdrvdata[cpu]->mode))
+ etm_disable_hw(etmdrvdata[cpu]);
+ spin_unlock(&etmdrvdata[cpu]->spinlock);
+ return 0;
+}
static bool etm_arch_supported(u8 arch)
{
@@ -806,9 +804,17 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
etm_init_arch_data, drvdata, 1))
dev_err(dev, "ETM arch init failed\n");
- if (!etm_count++)
- register_hotcpu_notifier(&etm_cpu_notifier);
-
+ if (!etm_count++) {
+ cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING,
+ "AP_ARM_CORESIGHT_STARTING",
+ etm_starting_cpu, etm_dying_cpu);
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "AP_ARM_CORESIGHT_ONLINE",
+ etm_online_cpu, NULL);
+ if (ret < 0)
+ goto err_arch_supported;
+ hp_online = ret;
+ }
put_online_cpus();
if (etm_arch_supported(drvdata->arch) == false) {
@@ -839,7 +845,6 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
pm_runtime_put(&adev->dev);
dev_info(dev, "%s initialized\n", (char *)id->data);
-
if (boot_enable) {
coresight_enable(drvdata->csdev);
drvdata->boot_enable = true;
@@ -848,8 +853,11 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
return 0;
err_arch_supported:
- if (--etm_count == 0)
- unregister_hotcpu_notifier(&etm_cpu_notifier);
+ if (--etm_count == 0) {
+ cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
+ if (hp_online)
+ cpuhp_remove_state_nocalls(hp_online);
+ }
return ret;
}
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 462f0dc15..1a5e0d14c 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -48,6 +48,8 @@ static int etm4_count;
static struct etmv4_drvdata *etmdrvdata[NR_CPUS];
static void etm4_set_default(struct etmv4_config *config);
+static enum cpuhp_state hp_online;
+
static void etm4_os_unlock(struct etmv4_drvdata *drvdata)
{
/* Writing any value to ETMOSLAR unlocks the trace registers */
@@ -673,47 +675,44 @@ void etm4_config_trace_mode(struct etmv4_config *config)
config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] = addr_acc;
}
-static int etm4_cpu_callback(struct notifier_block *nfb, unsigned long action,
- void *hcpu)
+static int etm4_online_cpu(unsigned int cpu)
{
- unsigned int cpu = (unsigned long)hcpu;
-
if (!etmdrvdata[cpu])
- goto out;
-
- switch (action & (~CPU_TASKS_FROZEN)) {
- case CPU_STARTING:
- spin_lock(&etmdrvdata[cpu]->spinlock);
- if (!etmdrvdata[cpu]->os_unlock) {
- etm4_os_unlock(etmdrvdata[cpu]);
- etmdrvdata[cpu]->os_unlock = true;
- }
-
- if (local_read(&etmdrvdata[cpu]->mode))
- etm4_enable_hw(etmdrvdata[cpu]);
- spin_unlock(&etmdrvdata[cpu]->spinlock);
- break;
+ return 0;
- case CPU_ONLINE:
- if (etmdrvdata[cpu]->boot_enable &&
- !etmdrvdata[cpu]->sticky_enable)
- coresight_enable(etmdrvdata[cpu]->csdev);
- break;
+ if (etmdrvdata[cpu]->boot_enable && !etmdrvdata[cpu]->sticky_enable)
+ coresight_enable(etmdrvdata[cpu]->csdev);
+ return 0;
+}
- case CPU_DYING:
- spin_lock(&etmdrvdata[cpu]->spinlock);
- if (local_read(&etmdrvdata[cpu]->mode))
- etm4_disable_hw(etmdrvdata[cpu]);
- spin_unlock(&etmdrvdata[cpu]->spinlock);
- break;
+static int etm4_starting_cpu(unsigned int cpu)
+{
+ if (!etmdrvdata[cpu])
+ return 0;
+
+ spin_lock(&etmdrvdata[cpu]->spinlock);
+ if (!etmdrvdata[cpu]->os_unlock) {
+ etm4_os_unlock(etmdrvdata[cpu]);
+ etmdrvdata[cpu]->os_unlock = true;
}
-out:
- return NOTIFY_OK;
+
+ if (local_read(&etmdrvdata[cpu]->mode))
+ etm4_enable_hw(etmdrvdata[cpu]);
+ spin_unlock(&etmdrvdata[cpu]->spinlock);
+ return 0;
}
-static struct notifier_block etm4_cpu_notifier = {
- .notifier_call = etm4_cpu_callback,
-};
+static int etm4_dying_cpu(unsigned int cpu)
+{
+ if (!etmdrvdata[cpu])
+ return 0;
+
+ spin_lock(&etmdrvdata[cpu]->spinlock);
+ if (local_read(&etmdrvdata[cpu]->mode))
+ etm4_disable_hw(etmdrvdata[cpu]);
+ spin_unlock(&etmdrvdata[cpu]->spinlock);
+ return 0;
+}
static void etm4_init_trace_id(struct etmv4_drvdata *drvdata)
{
@@ -767,8 +766,17 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
etm4_init_arch_data, drvdata, 1))
dev_err(dev, "ETM arch init failed\n");
- if (!etm4_count++)
- register_hotcpu_notifier(&etm4_cpu_notifier);
+ if (!etm4_count++) {
+ cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT4_STARTING,
+ "AP_ARM_CORESIGHT4_STARTING",
+ etm4_starting_cpu, etm4_dying_cpu);
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "AP_ARM_CORESIGHT4_ONLINE",
+ etm4_online_cpu, NULL);
+ if (ret < 0)
+ goto err_arch_supported;
+ hp_online = ret;
+ }
put_online_cpus();
@@ -809,8 +817,11 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
return 0;
err_arch_supported:
- if (--etm4_count == 0)
- unregister_hotcpu_notifier(&etm4_cpu_notifier);
+ if (--etm4_count == 0) {
+ cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT4_STARTING);
+ if (hp_online)
+ cpuhp_remove_state_nocalls(hp_online);
+ }
return ret;
}
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index 1be543e8e..6f0a51a2c 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -23,6 +23,7 @@
#include <linux/debugfs.h>
#include <linux/idr.h>
#include <linux/pci.h>
+#include <linux/pm_runtime.h>
#include <linux/dma-mapping.h>
#include "intel_th.h"
@@ -67,23 +68,33 @@ static int intel_th_probe(struct device *dev)
hubdrv = to_intel_th_driver(hub->dev.driver);
+ pm_runtime_set_active(dev);
+ pm_runtime_no_callbacks(dev);
+ pm_runtime_enable(dev);
+
ret = thdrv->probe(to_intel_th_device(dev));
if (ret)
- return ret;
+ goto out_pm;
if (thdrv->attr_group) {
ret = sysfs_create_group(&thdev->dev.kobj, thdrv->attr_group);
- if (ret) {
- thdrv->remove(thdev);
-
- return ret;
- }
+ if (ret)
+ goto out;
}
if (thdev->type == INTEL_TH_OUTPUT &&
!intel_th_output_assigned(thdev))
+ /* does not talk to hardware */
ret = hubdrv->assign(hub, thdev);
+out:
+ if (ret)
+ thdrv->remove(thdev);
+
+out_pm:
+ if (ret)
+ pm_runtime_disable(dev);
+
return ret;
}
@@ -103,6 +114,8 @@ static int intel_th_remove(struct device *dev)
if (thdrv->attr_group)
sysfs_remove_group(&thdev->dev.kobj, thdrv->attr_group);
+ pm_runtime_get_sync(dev);
+
thdrv->remove(thdev);
if (intel_th_output_assigned(thdev)) {
@@ -110,9 +123,14 @@ static int intel_th_remove(struct device *dev)
to_intel_th_driver(dev->parent->driver);
if (hub->dev.driver)
+ /* does not talk to hardware */
hubdrv->unassign(hub, thdev);
}
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
return 0;
}
@@ -185,6 +203,7 @@ static int intel_th_output_activate(struct intel_th_device *thdev)
{
struct intel_th_driver *thdrv =
to_intel_th_driver_or_null(thdev->dev.driver);
+ int ret = 0;
if (!thdrv)
return -ENODEV;
@@ -192,12 +211,17 @@ static int intel_th_output_activate(struct intel_th_device *thdev)
if (!try_module_get(thdrv->driver.owner))
return -ENODEV;
+ pm_runtime_get_sync(&thdev->dev);
+
if (thdrv->activate)
- return thdrv->activate(thdev);
+ ret = thdrv->activate(thdev);
+ else
+ intel_th_trace_enable(thdev);
- intel_th_trace_enable(thdev);
+ if (ret)
+ pm_runtime_put(&thdev->dev);
- return 0;
+ return ret;
}
static void intel_th_output_deactivate(struct intel_th_device *thdev)
@@ -213,6 +237,7 @@ static void intel_th_output_deactivate(struct intel_th_device *thdev)
else
intel_th_trace_disable(thdev);
+ pm_runtime_put(&thdev->dev);
module_put(thdrv->driver.owner);
}
@@ -465,6 +490,38 @@ static struct intel_th_subdevice {
},
};
+#ifdef CONFIG_MODULES
+static void __intel_th_request_hub_module(struct work_struct *work)
+{
+ struct intel_th *th = container_of(work, struct intel_th,
+ request_module_work);
+
+ request_module("intel_th_%s", th->hub->name);
+}
+
+static int intel_th_request_hub_module(struct intel_th *th)
+{
+ INIT_WORK(&th->request_module_work, __intel_th_request_hub_module);
+ schedule_work(&th->request_module_work);
+
+ return 0;
+}
+
+static void intel_th_request_hub_module_flush(struct intel_th *th)
+{
+ flush_work(&th->request_module_work);
+}
+#else
+static inline int intel_th_request_hub_module(struct intel_th *th)
+{
+ return -EINVAL;
+}
+
+static inline void intel_th_request_hub_module_flush(struct intel_th *th)
+{
+}
+#endif /* CONFIG_MODULES */
+
static int intel_th_populate(struct intel_th *th, struct resource *devres,
unsigned int ndevres, int irq)
{
@@ -535,7 +592,7 @@ static int intel_th_populate(struct intel_th *th, struct resource *devres,
/* need switch driver to be loaded to enumerate the rest */
if (subdev->type == INTEL_TH_SWITCH && !req) {
th->hub = thdev;
- err = request_module("intel_th_%s", subdev->name);
+ err = intel_th_request_hub_module(th);
if (!err)
req++;
}
@@ -628,6 +685,10 @@ intel_th_alloc(struct device *dev, struct resource *devres,
dev_set_drvdata(dev, th);
+ pm_runtime_no_callbacks(dev);
+ pm_runtime_put(dev);
+ pm_runtime_allow(dev);
+
err = intel_th_populate(th, devres, ndevres, irq);
if (err)
goto err_chrdev;
@@ -635,6 +696,8 @@ intel_th_alloc(struct device *dev, struct resource *devres,
return th;
err_chrdev:
+ pm_runtime_forbid(dev);
+
__unregister_chrdev(th->major, 0, TH_POSSIBLE_OUTPUTS,
"intel_th/output");
@@ -652,12 +715,16 @@ void intel_th_free(struct intel_th *th)
{
int i;
+ intel_th_request_hub_module_flush(th);
for (i = 0; i < TH_SUBDEVICE_MAX; i++)
if (th->thdev[i] != th->hub)
intel_th_device_remove(th->thdev[i]);
intel_th_device_remove(th->hub);
+ pm_runtime_get_sync(th->dev);
+ pm_runtime_forbid(th->dev);
+
__unregister_chrdev(th->major, 0, TH_POSSIBLE_OUTPUTS,
"intel_th/output");
@@ -682,6 +749,7 @@ int intel_th_trace_enable(struct intel_th_device *thdev)
if (WARN_ON_ONCE(thdev->type != INTEL_TH_OUTPUT))
return -EINVAL;
+ pm_runtime_get_sync(&thdev->dev);
hubdrv->enable(hub, &thdev->output);
return 0;
@@ -702,6 +770,7 @@ int intel_th_trace_disable(struct intel_th_device *thdev)
return -EINVAL;
hubdrv->disable(hub, &thdev->output);
+ pm_runtime_put(&thdev->dev);
return 0;
}
diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
index 9beea0b54..33e09369a 100644
--- a/drivers/hwtracing/intel_th/gth.c
+++ b/drivers/hwtracing/intel_th/gth.c
@@ -22,6 +22,7 @@
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/bitmap.h>
+#include <linux/pm_runtime.h>
#include "intel_th.h"
#include "gth.h"
@@ -190,6 +191,11 @@ static ssize_t master_attr_store(struct device *dev,
if (old_port >= 0) {
gth->master[ma->master] = -1;
clear_bit(ma->master, gth->output[old_port].master);
+
+ /*
+ * if the port is active, program this setting,
+ * implies that runtime PM is on
+ */
if (gth->output[old_port].output->active)
gth_master_set(gth, ma->master, -1);
}
@@ -204,7 +210,7 @@ static ssize_t master_attr_store(struct device *dev,
set_bit(ma->master, gth->output[port].master);
- /* if the port is active, program this setting */
+ /* if the port is active, program this setting, see above */
if (gth->output[port].output->active)
gth_master_set(gth, ma->master, port);
}
@@ -326,11 +332,15 @@ static ssize_t output_attr_show(struct device *dev,
struct gth_device *gth = oa->gth;
size_t count;
+ pm_runtime_get_sync(dev);
+
spin_lock(&gth->gth_lock);
count = snprintf(buf, PAGE_SIZE, "%x\n",
gth_output_parm_get(gth, oa->port, oa->parm));
spin_unlock(&gth->gth_lock);
+ pm_runtime_put(dev);
+
return count;
}
@@ -346,10 +356,14 @@ static ssize_t output_attr_store(struct device *dev,
if (kstrtouint(buf, 16, &config) < 0)
return -EINVAL;
+ pm_runtime_get_sync(dev);
+
spin_lock(&gth->gth_lock);
gth_output_parm_set(gth, oa->port, oa->parm, config);
spin_unlock(&gth->gth_lock);
+ pm_runtime_put(dev);
+
return count;
}
@@ -451,7 +465,7 @@ static int intel_th_output_attributes(struct gth_device *gth)
}
/**
- * intel_th_gth_disable() - enable tracing to an output device
+ * intel_th_gth_disable() - disable tracing to an output device
* @thdev: GTH device
* @output: output device's descriptor
*
diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h
index 0df22e306..4c195786b 100644
--- a/drivers/hwtracing/intel_th/intel_th.h
+++ b/drivers/hwtracing/intel_th/intel_th.h
@@ -114,6 +114,9 @@ intel_th_output_assigned(struct intel_th_device *thdev)
* @unassign: deassociate an output type device from an output port
* @enable: enable tracing for a given output device
* @disable: disable tracing for a given output device
+ * @irq: interrupt callback
+ * @activate: enable tracing on the output's side
+ * @deactivate: disable tracing on the output's side
* @fops: file operations for device nodes
* @attr_group: attributes provided by the driver
*
@@ -205,6 +208,9 @@ struct intel_th {
int id;
int major;
+#ifdef CONFIG_MODULES
+ struct work_struct request_module_work;
+#endif /* CONFIG_MODULES */
#ifdef CONFIG_INTEL_TH_DEBUG
struct dentry *dbg;
#endif
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index 5e25c7eb3..0bba38423 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -80,6 +80,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1a8e),
.driver_data = (kernel_ulong_t)0,
},
+ {
+ /* Kaby Lake PCH-H */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa2a6),
+ .driver_data = (kernel_ulong_t)0,
+ },
{ 0 },
};
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index ff31108b0..51f81d64c 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -15,6 +15,7 @@
* as defined in MIPI STPv2 specification.
*/
+#include <linux/pm_runtime.h>
#include <linux/uaccess.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -482,14 +483,40 @@ static ssize_t stm_char_write(struct file *file, const char __user *buf,
return -EFAULT;
}
+ pm_runtime_get_sync(&stm->dev);
+
count = stm_write(stm->data, stmf->output.master, stmf->output.channel,
kbuf, count);
+ pm_runtime_mark_last_busy(&stm->dev);
+ pm_runtime_put_autosuspend(&stm->dev);
kfree(kbuf);
return count;
}
+static void stm_mmap_open(struct vm_area_struct *vma)
+{
+ struct stm_file *stmf = vma->vm_file->private_data;
+ struct stm_device *stm = stmf->stm;
+
+ pm_runtime_get(&stm->dev);
+}
+
+static void stm_mmap_close(struct vm_area_struct *vma)
+{
+ struct stm_file *stmf = vma->vm_file->private_data;
+ struct stm_device *stm = stmf->stm;
+
+ pm_runtime_mark_last_busy(&stm->dev);
+ pm_runtime_put_autosuspend(&stm->dev);
+}
+
+static const struct vm_operations_struct stm_mmap_vmops = {
+ .open = stm_mmap_open,
+ .close = stm_mmap_close,
+};
+
static int stm_char_mmap(struct file *file, struct vm_area_struct *vma)
{
struct stm_file *stmf = file->private_data;
@@ -514,8 +541,11 @@ static int stm_char_mmap(struct file *file, struct vm_area_struct *vma)
if (!phys)
return -EINVAL;
+ pm_runtime_get_sync(&stm->dev);
+
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_ops = &stm_mmap_vmops;
vm_iomap_memory(vma, phys, size);
return 0;
@@ -701,6 +731,17 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
if (err)
goto err_device;
+ /*
+ * Use delayed autosuspend to avoid bouncing back and forth
+ * on recurring character device writes, with the initial
+ * delay time of 2 seconds.
+ */
+ pm_runtime_no_callbacks(&stm->dev);
+ pm_runtime_use_autosuspend(&stm->dev);
+ pm_runtime_set_autosuspend_delay(&stm->dev, 2000);
+ pm_runtime_set_suspended(&stm->dev);
+ pm_runtime_enable(&stm->dev);
+
return 0;
err_device:
@@ -724,6 +765,9 @@ void stm_unregister_device(struct stm_data *stm_data)
struct stm_source_device *src, *iter;
int i, ret;
+ pm_runtime_dont_use_autosuspend(&stm->dev);
+ pm_runtime_disable(&stm->dev);
+
mutex_lock(&stm->link_mutex);
list_for_each_entry_safe(src, iter, &stm->link_list, link_entry) {
ret = __stm_source_link_drop(src, stm);
@@ -878,6 +922,8 @@ static int __stm_source_link_drop(struct stm_source_device *src,
stm_output_free(link, &src->output);
list_del_init(&src->link_entry);
+ pm_runtime_mark_last_busy(&link->dev);
+ pm_runtime_put_autosuspend(&link->dev);
/* matches stm_find_device() from stm_source_link_store() */
stm_put_device(link);
rcu_assign_pointer(src->link, NULL);
@@ -971,8 +1017,11 @@ static ssize_t stm_source_link_store(struct device *dev,
if (!link)
return -EINVAL;
+ pm_runtime_get(&link->dev);
+
err = stm_source_link_add(src, link);
if (err) {
+ pm_runtime_put_autosuspend(&link->dev);
/* matches the stm_find_device() above */
stm_put_device(link);
}
@@ -1033,6 +1082,9 @@ int stm_source_register_device(struct device *parent,
if (err)
goto err;
+ pm_runtime_no_callbacks(&src->dev);
+ pm_runtime_forbid(&src->dev);
+
err = device_add(&src->dev);
if (err)
goto err;
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index 78fbee463..d223650a9 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -88,8 +88,8 @@ config I2C_SMBUS
tristate "SMBus-specific protocols" if !I2C_HELPER_AUTO
help
Say Y here if you want support for SMBus extensions to the I2C
- specification. At the moment, the only supported extension is
- the SMBus alert protocol.
+ specification. At the moment, two extensions are supported:
+ the SMBus Alert protocol and the SMBus Host Notify protocol.
This support is also available as a module. If so, the module
will be called i2c-smbus.
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index f167021b8..5c3993b26 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -91,6 +91,7 @@ config I2C_I801
tristate "Intel 82801 (ICH/PCH)"
depends on PCI
select CHECK_SIGNATURE if X86 && DMI
+ select I2C_SMBUS
help
If you say yes to this option, support will be included for the Intel
801 family of mainboard I2C interfaces. Specifically, the following
@@ -397,7 +398,7 @@ config I2C_BCM_KONA
config I2C_BRCMSTB
tristate "BRCM Settop I2C controller"
- depends on ARCH_BRCMSTB || COMPILE_TEST
+ depends on ARCH_BRCMSTB || BMIPS_GENERIC || COMPILE_TEST
default y
help
If you say yes to this option, support will be included for the
@@ -490,7 +491,9 @@ config I2C_DESIGNWARE_PCI
config I2C_DESIGNWARE_BAYTRAIL
bool "Intel Baytrail I2C semaphore support"
- depends on I2C_DESIGNWARE_PLATFORM && IOSF_MBI=y && ACPI
+ depends on ACPI
+ depends on (I2C_DESIGNWARE_PLATFORM=m && IOSF_MBI) || \
+ (I2C_DESIGNWARE_PLATFORM=y && IOSF_MBI=y)
help
This driver enables managed host access to the PMIC I2C bus on select
Intel BayTrail platforms using the X-Powers AXP288 PMIC. It allows
@@ -635,7 +638,7 @@ config I2C_LPC2K
config I2C_MESON
tristate "Amlogic Meson I2C controller"
- depends on ARCH_MESON
+ depends on ARCH_MESON || COMPILE_TEST
help
If you say yes to this option, support will be included for the
I2C interface on the Amlogic Meson family of SoCs.
@@ -924,7 +927,7 @@ config I2C_UNIPHIER_F
config I2C_VERSATILE
tristate "ARM Versatile/Realview I2C bus support"
- depends on ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS
+ depends on ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || COMPILE_TEST
select I2C_ALGOBIT
help
Say yes if you want to support the I2C serial bus on ARMs Versatile
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index f23372669..1bb97f658 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -38,6 +38,7 @@
#define AT91_I2C_TIMEOUT msecs_to_jiffies(100) /* transfer timeout */
#define AT91_I2C_DMA_THRESHOLD 8 /* enable DMA if transfer size is bigger than this threshold */
#define AUTOSUSPEND_TIMEOUT 2000
+#define AT91_I2C_MAX_ALT_CMD_DATA_SIZE 256
/* AT91 TWI register definitions */
#define AT91_TWI_CR 0x0000 /* Control Register */
@@ -141,6 +142,7 @@ struct at91_twi_dev {
unsigned twi_cwgr_reg;
struct at91_twi_pdata *pdata;
bool use_dma;
+ bool use_alt_cmd;
bool recv_len_abort;
u32 fifo_size;
struct at91_twi_dma dma;
@@ -269,7 +271,7 @@ static void at91_twi_write_next_byte(struct at91_twi_dev *dev)
/* send stop when last byte has been written */
if (--dev->buf_len == 0)
- if (!dev->pdata->has_alt_cmd)
+ if (!dev->use_alt_cmd)
at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
dev_dbg(dev->dev, "wrote 0x%x, to go %d\n", *dev->buf, dev->buf_len);
@@ -292,7 +294,7 @@ static void at91_twi_write_data_dma_callback(void *data)
* we just have to enable TXCOMP one.
*/
at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
- if (!dev->pdata->has_alt_cmd)
+ if (!dev->use_alt_cmd)
at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
}
@@ -410,7 +412,7 @@ static void at91_twi_read_next_byte(struct at91_twi_dev *dev)
}
/* send stop if second but last byte has been read */
- if (!dev->pdata->has_alt_cmd && dev->buf_len == 1)
+ if (!dev->use_alt_cmd && dev->buf_len == 1)
at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
dev_dbg(dev->dev, "read 0x%x, to go %d\n", *dev->buf, dev->buf_len);
@@ -426,7 +428,7 @@ static void at91_twi_read_data_dma_callback(void *data)
dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg[0]),
dev->buf_len, DMA_FROM_DEVICE);
- if (!dev->pdata->has_alt_cmd) {
+ if (!dev->use_alt_cmd) {
/* The last two bytes have to be read without using dma */
dev->buf += dev->buf_len - 2;
dev->buf_len = 2;
@@ -443,7 +445,7 @@ static void at91_twi_read_data_dma(struct at91_twi_dev *dev)
struct dma_chan *chan_rx = dma->chan_rx;
size_t buf_len;
- buf_len = (dev->pdata->has_alt_cmd) ? dev->buf_len : dev->buf_len - 2;
+ buf_len = (dev->use_alt_cmd) ? dev->buf_len : dev->buf_len - 2;
dma->direction = DMA_FROM_DEVICE;
/* Keep in mind that we won't use dma to read the last two bytes */
@@ -651,7 +653,7 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
unsigned start_flags = AT91_TWI_START;
/* if only one byte is to be read, immediately stop transfer */
- if (!has_alt_cmd && dev->buf_len <= 1 &&
+ if (!dev->use_alt_cmd && dev->buf_len <= 1 &&
!(dev->msg->flags & I2C_M_RECV_LEN))
start_flags |= AT91_TWI_STOP;
at91_twi_write(dev, AT91_TWI_CR, start_flags);
@@ -745,7 +747,7 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
int ret;
unsigned int_addr_flag = 0;
struct i2c_msg *m_start = msg;
- bool is_read, use_alt_cmd = false;
+ bool is_read;
dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num);
@@ -768,14 +770,16 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
at91_twi_write(dev, AT91_TWI_IADR, internal_address);
}
+ dev->use_alt_cmd = false;
is_read = (m_start->flags & I2C_M_RD);
if (dev->pdata->has_alt_cmd) {
- if (m_start->len > 0) {
+ if (m_start->len > 0 &&
+ m_start->len < AT91_I2C_MAX_ALT_CMD_DATA_SIZE) {
at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMEN);
at91_twi_write(dev, AT91_TWI_ACR,
AT91_TWI_ACR_DATAL(m_start->len) |
((is_read) ? AT91_TWI_ACR_DIR : 0));
- use_alt_cmd = true;
+ dev->use_alt_cmd = true;
} else {
at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMDIS);
}
@@ -784,7 +788,7 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
at91_twi_write(dev, AT91_TWI_MMR,
(m_start->addr << 16) |
int_addr_flag |
- ((!use_alt_cmd && is_read) ? AT91_TWI_MREAD : 0));
+ ((!dev->use_alt_cmd && is_read) ? AT91_TWI_MREAD : 0));
dev->buf_len = m_start->len;
dev->buf = m_start->buf;
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index 19c843828..95f7cac76 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -158,7 +158,7 @@ static irqreturn_t bcm_iproc_i2c_isr(int irq, void *data)
if (status & BIT(IS_M_START_BUSY_SHIFT)) {
iproc_i2c->xfer_is_done = 1;
- complete_all(&iproc_i2c->done);
+ complete(&iproc_i2c->done);
}
writel(status, iproc_i2c->base + IS_OFFSET);
diff --git a/drivers/i2c/busses/i2c-bcm-kona.c b/drivers/i2c/busses/i2c-bcm-kona.c
index ac9f47679..258cb9a40 100644
--- a/drivers/i2c/busses/i2c-bcm-kona.c
+++ b/drivers/i2c/busses/i2c-bcm-kona.c
@@ -229,7 +229,7 @@ static irqreturn_t bcm_kona_i2c_isr(int irq, void *devid)
dev->base + TXFCR_OFFSET);
writel(status & ~ISR_RESERVED_MASK, dev->base + ISR_OFFSET);
- complete_all(&dev->done);
+ complete(&dev->done);
return IRQ_HANDLED;
}
@@ -643,7 +643,7 @@ static int bcm_kona_i2c_xfer(struct i2c_adapter *adapter,
if (rc < 0) {
dev_err(dev->device,
"restart cmd failed rc = %d\n", rc);
- goto xfer_send_stop;
+ goto xfer_send_stop;
}
}
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
index 818b051d2..d4f3239b5 100644
--- a/drivers/i2c/busses/i2c-bcm2835.c
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -253,7 +253,8 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
i2c_dev->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(i2c_dev->clk)) {
- dev_err(&pdev->dev, "Could not get clock\n");
+ if (PTR_ERR(i2c_dev->clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Could not get clock\n");
return PTR_ERR(i2c_dev->clk);
}
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
index 6a8cfc134..385b57bfc 100644
--- a/drivers/i2c/busses/i2c-brcmstb.c
+++ b/drivers/i2c/busses/i2c-brcmstb.c
@@ -228,7 +228,7 @@ static irqreturn_t brcmstb_i2c_isr(int irq, void *devid)
return IRQ_NONE;
brcmstb_i2c_enable_disable_irq(dev, INT_DISABLE);
- complete_all(&dev->done);
+ complete(&dev->done);
dev_dbg(dev->device, "isr handled");
return IRQ_HANDLED;
@@ -343,10 +343,9 @@ static int brcmstb_i2c_xfer_bsc_data(struct brcmstb_i2c_dev *dev,
struct bsc_regs *pi2creg = dev->bsc_regmap;
int no_ack = pmsg->flags & I2C_M_IGNORE_NAK;
int data_regsz = brcmstb_i2c_get_data_regsz(dev);
- int xfersz = brcmstb_i2c_get_xfersz(dev);
/* see if the transaction needs to check NACK conditions */
- if (no_ack || len <= xfersz) {
+ if (no_ack) {
cmd = (pmsg->flags & I2C_M_RD) ? CMD_RD_NOACK
: CMD_WR_NOACK;
pi2creg->ctlhi_reg |= BSC_CTLHI_REG_IGNORE_ACK_MASK;
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 90bbd9f9d..3c16a2f7c 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -767,7 +767,7 @@ static int cdns_i2c_setclk(unsigned long clk_in, struct cdns_i2c *id)
* depending on the scaling direction.
*
* Return: NOTIFY_STOP if the rate change should be aborted, NOTIFY_OK
- * to acknowedge the change, NOTIFY_DONE if the notification is
+ * to acknowledge the change, NOTIFY_DONE if the notification is
* considered irrelevant.
*/
static int cdns_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index 99b54be6b..fcd973d51 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -367,13 +367,17 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
dev_dbg(dev->dev, "Fast-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt);
/* Configure SDA Hold Time if required */
- if (dev->sda_hold_time) {
- reg = dw_readl(dev, DW_IC_COMP_VERSION);
- if (reg >= DW_IC_SDA_HOLD_MIN_VERS)
+ reg = dw_readl(dev, DW_IC_COMP_VERSION);
+ if (reg >= DW_IC_SDA_HOLD_MIN_VERS) {
+ if (dev->sda_hold_time) {
dw_writel(dev, dev->sda_hold_time, DW_IC_SDA_HOLD);
- else
- dev_warn(dev->dev,
- "Hardware too old to adjust SDA hold time.");
+ } else {
+ /* Keep previous hold time setting if no one set it */
+ dev->sda_hold_time = dw_readl(dev, DW_IC_SDA_HOLD);
+ }
+ } else {
+ dev_warn(dev->dev,
+ "Hardware too old to adjust SDA hold time.\n");
}
/* Configure Tx/Rx FIFO threshold levels */
@@ -663,7 +667,7 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
i2c_dw_xfer_init(dev);
/* wait for tx to complete */
- if (!wait_for_completion_timeout(&dev->cmd_complete, HZ)) {
+ if (!wait_for_completion_timeout(&dev->cmd_complete, adap->timeout)) {
dev_err(dev->dev, "controller timed out\n");
/* i2c_dw_init implicitly disables the adapter */
i2c_dw_init(dev);
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index cd409e7fb..38493a714 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -26,6 +26,7 @@
#define DW_IC_CON_MASTER 0x1
#define DW_IC_CON_SPEED_STD 0x2
#define DW_IC_CON_SPEED_FAST 0x4
+#define DW_IC_CON_SPEED_MASK 0x6
#define DW_IC_CON_10BITADDR_MASTER 0x10
#define DW_IC_CON_RESTART_EN 0x20
#define DW_IC_CON_SLAVE_DISABLE 0x40
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index 7368be000..96f8230cd 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -6,7 +6,7 @@
* Copyright (C) 2006 Texas Instruments.
* Copyright (C) 2007 MontaVista Software Inc.
* Copyright (C) 2009 Provigent Ltd.
- * Copyright (C) 2011, 2015 Intel Corporation.
+ * Copyright (C) 2011, 2015, 2016 Intel Corporation.
*
* ----------------------------------------------------------------------------
*
@@ -23,31 +23,27 @@
*
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/acpi.h>
#include <linux/delay.h>
-#include <linux/i2c.h>
-#include <linux/errno.h>
-#include <linux/sched.h>
#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
-#include <linux/acpi.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
#include "i2c-designware-core.h"
#define DRIVER_NAME "i2c-designware-pci"
enum dw_pci_ctl_id_t {
- medfield_0,
- medfield_1,
- medfield_2,
- medfield_3,
- medfield_4,
- medfield_5,
-
+ medfield,
+ merrifield,
baytrail,
haswell,
};
@@ -68,6 +64,7 @@ struct dw_pci_controller {
u32 clk_khz;
u32 functionality;
struct dw_scl_sda_cfg *scl_sda_cfg;
+ int (*setup)(struct pci_dev *pdev, struct dw_pci_controller *c);
};
#define INTEL_MID_STD_CFG (DW_IC_CON_MASTER | \
@@ -80,6 +77,14 @@ struct dw_pci_controller {
I2C_FUNC_SMBUS_WORD_DATA | \
I2C_FUNC_SMBUS_I2C_BLOCK)
+/* Merrifield HCNT/LCNT/SDA hold time */
+static struct dw_scl_sda_cfg mrfld_config = {
+ .ss_hcnt = 0x2f8,
+ .fs_hcnt = 0x87,
+ .ss_lcnt = 0x37b,
+ .fs_lcnt = 0x10a,
+};
+
/* BayTrail HCNT/LCNT/SDA hold time */
static struct dw_scl_sda_cfg byt_config = {
.ss_hcnt = 0x200,
@@ -98,48 +103,60 @@ static struct dw_scl_sda_cfg hsw_config = {
.sda_hold = 0x9,
};
+static int mfld_setup(struct pci_dev *pdev, struct dw_pci_controller *c)
+{
+ switch (pdev->device) {
+ case 0x0817:
+ c->bus_cfg &= ~DW_IC_CON_SPEED_MASK;
+ c->bus_cfg |= DW_IC_CON_SPEED_STD;
+ case 0x0818:
+ case 0x0819:
+ c->bus_num = pdev->device - 0x817 + 3;
+ return 0;
+ case 0x082C:
+ case 0x082D:
+ case 0x082E:
+ c->bus_num = pdev->device - 0x82C + 0;
+ return 0;
+ }
+ return -ENODEV;
+}
+
+static int mrfld_setup(struct pci_dev *pdev, struct dw_pci_controller *c)
+{
+ /*
+ * On Intel Merrifield the user visible i2c busses are enumerated
+ * [1..7]. So, we add 1 to shift the default range. Besides that the
+ * first PCI slot provides 4 functions, that's why we have to add 0 to
+ * the first slot and 4 to the next one.
+ */
+ switch (PCI_SLOT(pdev->devfn)) {
+ case 8:
+ c->bus_num = PCI_FUNC(pdev->devfn) + 0 + 1;
+ return 0;
+ case 9:
+ c->bus_num = PCI_FUNC(pdev->devfn) + 4 + 1;
+ return 0;
+ }
+ return -ENODEV;
+}
+
static struct dw_pci_controller dw_pci_controllers[] = {
- [medfield_0] = {
- .bus_num = 0,
- .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
- .tx_fifo_depth = 32,
- .rx_fifo_depth = 32,
- .clk_khz = 25000,
- },
- [medfield_1] = {
- .bus_num = 1,
- .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
- .tx_fifo_depth = 32,
- .rx_fifo_depth = 32,
- .clk_khz = 25000,
- },
- [medfield_2] = {
- .bus_num = 2,
- .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
- .tx_fifo_depth = 32,
- .rx_fifo_depth = 32,
- .clk_khz = 25000,
- },
- [medfield_3] = {
- .bus_num = 3,
- .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_STD,
- .tx_fifo_depth = 32,
- .rx_fifo_depth = 32,
- .clk_khz = 25000,
- },
- [medfield_4] = {
- .bus_num = 4,
+ [medfield] = {
+ .bus_num = -1,
.bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
.tx_fifo_depth = 32,
.rx_fifo_depth = 32,
.clk_khz = 25000,
+ .setup = mfld_setup,
},
- [medfield_5] = {
- .bus_num = 5,
- .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
- .tx_fifo_depth = 32,
- .rx_fifo_depth = 32,
- .clk_khz = 25000,
+ [merrifield] = {
+ .bus_num = -1,
+ .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+ .tx_fifo_depth = 64,
+ .rx_fifo_depth = 64,
+ .scl_sda_cfg = &mrfld_config,
+ .setup = mrfld_setup,
},
[baytrail] = {
.bus_num = -1,
@@ -190,7 +207,7 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
struct dw_i2c_dev *dev;
struct i2c_adapter *adap;
int r;
- struct dw_pci_controller *controller;
+ struct dw_pci_controller *controller;
struct dw_scl_sda_cfg *cfg;
if (id->driver_data >= ARRAY_SIZE(dw_pci_controllers)) {
@@ -224,6 +241,13 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
dev->base = pcim_iomap_table(pdev)[0];
dev->dev = &pdev->dev;
dev->irq = pdev->irq;
+
+ if (controller->setup) {
+ r = controller->setup(pdev, controller);
+ if (r)
+ return r;
+ }
+
dev->functionality = controller->functionality |
DW_DEFAULT_FUNCTIONALITY;
@@ -276,12 +300,15 @@ MODULE_ALIAS("i2c_designware-pci");
static const struct pci_device_id i2_designware_pci_ids[] = {
/* Medfield */
- { PCI_VDEVICE(INTEL, 0x0817), medfield_3 },
- { PCI_VDEVICE(INTEL, 0x0818), medfield_4 },
- { PCI_VDEVICE(INTEL, 0x0819), medfield_5 },
- { PCI_VDEVICE(INTEL, 0x082C), medfield_0 },
- { PCI_VDEVICE(INTEL, 0x082D), medfield_1 },
- { PCI_VDEVICE(INTEL, 0x082E), medfield_2 },
+ { PCI_VDEVICE(INTEL, 0x0817), medfield },
+ { PCI_VDEVICE(INTEL, 0x0818), medfield },
+ { PCI_VDEVICE(INTEL, 0x0819), medfield },
+ { PCI_VDEVICE(INTEL, 0x082C), medfield },
+ { PCI_VDEVICE(INTEL, 0x082D), medfield },
+ { PCI_VDEVICE(INTEL, 0x082E), medfield },
+ /* Merrifield */
+ { PCI_VDEVICE(INTEL, 0x1195), merrifield },
+ { PCI_VDEVICE(INTEL, 0x1196), merrifield },
/* Baytrail */
{ PCI_VDEVICE(INTEL, 0x0F41), baytrail },
{ PCI_VDEVICE(INTEL, 0x0F42), baytrail },
diff --git a/drivers/i2c/busses/i2c-elektor.c b/drivers/i2c/busses/i2c-elektor.c
index 92e8c0ce1..8af62fb3f 100644
--- a/drivers/i2c/busses/i2c-elektor.c
+++ b/drivers/i2c/busses/i2c-elektor.c
@@ -319,16 +319,6 @@ static struct isa_driver i2c_elektor_driver = {
},
};
-static int __init i2c_pcfisa_init(void)
-{
- return isa_register_driver(&i2c_elektor_driver, 1);
-}
-
-static void __exit i2c_pcfisa_exit(void)
-{
- isa_unregister_driver(&i2c_elektor_driver);
-}
-
MODULE_AUTHOR("Hans Berglund <hb@spacetec.no>");
MODULE_DESCRIPTION("I2C-Bus adapter routines for PCF8584 ISA bus adapter");
MODULE_LICENSE("GPL");
@@ -338,6 +328,4 @@ module_param(irq, int, 0);
module_param(clock, int, 0);
module_param(own, int, 0);
module_param(mmapped, int, 0);
-
-module_init(i2c_pcfisa_init);
-module_exit(i2c_pcfisa_exit);
+module_isa_driver(i2c_elektor_driver, 1);
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 4a60ad214..5ef9b733d 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -72,6 +72,7 @@
* Block process call transaction no
* I2C block read transaction yes (doesn't use the block buffer)
* Slave mode no
+ * SMBus Host Notify yes
* Interrupt processing yes
*
* See the file Documentation/i2c/busses/i2c-i801 for details.
@@ -86,6 +87,7 @@
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/i2c.h>
+#include <linux/i2c-smbus.h>
#include <linux/acpi.h>
#include <linux/io.h>
#include <linux/dmi.h>
@@ -96,8 +98,7 @@
#include <linux/platform_data/itco_wdt.h>
#include <linux/pm_runtime.h>
-#if (defined CONFIG_I2C_MUX_GPIO || defined CONFIG_I2C_MUX_GPIO_MODULE) && \
- defined CONFIG_DMI
+#if IS_ENABLED(CONFIG_I2C_MUX_GPIO) && defined CONFIG_DMI
#include <linux/gpio.h>
#include <linux/i2c-mux-gpio.h>
#endif
@@ -113,6 +114,10 @@
#define SMBPEC(p) (8 + (p)->smba) /* ICH3 and later */
#define SMBAUXSTS(p) (12 + (p)->smba) /* ICH4 and later */
#define SMBAUXCTL(p) (13 + (p)->smba) /* ICH4 and later */
+#define SMBSLVSTS(p) (16 + (p)->smba) /* ICH3 and later */
+#define SMBSLVCMD(p) (17 + (p)->smba) /* ICH3 and later */
+#define SMBNTFDADD(p) (20 + (p)->smba) /* ICH3 and later */
+#define SMBNTFDDAT(p) (22 + (p)->smba) /* ICH3 and later */
/* PCI Address Constants */
#define SMBBAR 4
@@ -144,6 +149,10 @@
/* TCO configuration bits for TCOCTL */
#define TCOCTL_EN 0x0100
+/* Auxiliary status register bits, ICH4+ only */
+#define SMBAUXSTS_CRCE 1
+#define SMBAUXSTS_STCO 2
+
/* Auxiliary control register bits, ICH4+ only */
#define SMBAUXCTL_CRC 1
#define SMBAUXCTL_E32B 2
@@ -177,6 +186,12 @@
#define SMBHSTSTS_INTR 0x02
#define SMBHSTSTS_HOST_BUSY 0x01
+/* Host Notify Status registers bits */
+#define SMBSLVSTS_HST_NTFY_STS 1
+
+/* Host Notify Command registers bits */
+#define SMBSLVCMD_HST_NTFY_INTREN 0x01
+
#define STATUS_ERROR_FLAGS (SMBHSTSTS_FAILED | SMBHSTSTS_BUS_ERR | \
SMBHSTSTS_DEV_ERR)
@@ -239,8 +254,7 @@ struct i801_priv {
int len;
u8 *data;
-#if (defined CONFIG_I2C_MUX_GPIO || defined CONFIG_I2C_MUX_GPIO_MODULE) && \
- defined CONFIG_DMI
+#if IS_ENABLED(CONFIG_I2C_MUX_GPIO) && defined CONFIG_DMI
const struct i801_mux_config *mux_drvdata;
struct platform_device *mux_pdev;
#endif
@@ -252,13 +266,17 @@ struct i801_priv {
*/
bool acpi_reserved;
struct mutex acpi_lock;
+ struct smbus_host_notify *host_notify;
};
+#define SMBHSTNTFY_SIZE 8
+
#define FEATURE_SMBUS_PEC (1 << 0)
#define FEATURE_BLOCK_BUFFER (1 << 1)
#define FEATURE_BLOCK_PROC (1 << 2)
#define FEATURE_I2C_BLOCK_READ (1 << 3)
#define FEATURE_IRQ (1 << 4)
+#define FEATURE_HOST_NOTIFY (1 << 5)
/* Not really a feature, but it's convenient to handle it as such */
#define FEATURE_IDF (1 << 15)
#define FEATURE_TCO (1 << 16)
@@ -269,6 +287,7 @@ static const char *i801_feature_names[] = {
"Block process call",
"I2C block read",
"Interrupt",
+ "SMBus Host Notify",
};
static unsigned int disable_features;
@@ -277,7 +296,8 @@ MODULE_PARM_DESC(disable_features, "Disable selected driver features:\n"
"\t\t 0x01 disable SMBus PEC\n"
"\t\t 0x02 disable the block buffer\n"
"\t\t 0x08 disable the I2C block read functionality\n"
- "\t\t 0x10 don't use interrupts ");
+ "\t\t 0x10 don't use interrupts\n"
+ "\t\t 0x20 disable SMBus Host Notify ");
/* Make sure the SMBus host is ready to start transmitting.
Return 0 if it is, -EBUSY if it is not. */
@@ -305,6 +325,29 @@ static int i801_check_pre(struct i801_priv *priv)
}
}
+ /*
+ * Clear CRC status if needed.
+ * During normal operation, i801_check_post() takes care
+ * of it after every operation. We do it here only in case
+ * the hardware was already in this state when the driver
+ * started.
+ */
+ if (priv->features & FEATURE_SMBUS_PEC) {
+ status = inb_p(SMBAUXSTS(priv)) & SMBAUXSTS_CRCE;
+ if (status) {
+ dev_dbg(&priv->pci_dev->dev,
+ "Clearing aux status flags (%02x)\n", status);
+ outb_p(status, SMBAUXSTS(priv));
+ status = inb_p(SMBAUXSTS(priv)) & SMBAUXSTS_CRCE;
+ if (status) {
+ dev_err(&priv->pci_dev->dev,
+ "Failed clearing aux status flags (%02x)\n",
+ status);
+ return -EBUSY;
+ }
+ }
+ }
+
return 0;
}
@@ -348,8 +391,30 @@ static int i801_check_post(struct i801_priv *priv, int status)
dev_err(&priv->pci_dev->dev, "Transaction failed\n");
}
if (status & SMBHSTSTS_DEV_ERR) {
- result = -ENXIO;
- dev_dbg(&priv->pci_dev->dev, "No response\n");
+ /*
+ * This may be a PEC error, check and clear it.
+ *
+ * AUXSTS is handled differently from HSTSTS.
+ * For HSTSTS, i801_isr() or i801_wait_intr()
+ * has already cleared the error bits in hardware,
+ * and we are passed a copy of the original value
+ * in "status".
+ * For AUXSTS, the hardware register is left
+ * for us to handle here.
+ * This is asymmetric, slightly iffy, but safe,
+ * since all this code is serialized and the CRCE
+ * bit is harmless as long as it's cleared before
+ * the next operation.
+ */
+ if ((priv->features & FEATURE_SMBUS_PEC) &&
+ (inb_p(SMBAUXSTS(priv)) & SMBAUXSTS_CRCE)) {
+ outb_p(SMBAUXSTS_CRCE, SMBAUXSTS(priv));
+ result = -EBADMSG;
+ dev_dbg(&priv->pci_dev->dev, "PEC error\n");
+ } else {
+ result = -ENXIO;
+ dev_dbg(&priv->pci_dev->dev, "No response\n");
+ }
}
if (status & SMBHSTSTS_BUS_ERR) {
result = -EAGAIN;
@@ -511,8 +576,23 @@ static void i801_isr_byte_done(struct i801_priv *priv)
outb_p(SMBHSTSTS_BYTE_DONE, SMBHSTSTS(priv));
}
+static irqreturn_t i801_host_notify_isr(struct i801_priv *priv)
+{
+ unsigned short addr;
+ unsigned int data;
+
+ addr = inb_p(SMBNTFDADD(priv)) >> 1;
+ data = inw_p(SMBNTFDDAT(priv));
+
+ i2c_handle_smbus_host_notify(priv->host_notify, addr, data);
+
+ /* clear Host Notify bit and return */
+ outb_p(SMBSLVSTS_HST_NTFY_STS, SMBSLVSTS(priv));
+ return IRQ_HANDLED;
+}
+
/*
- * There are two kinds of interrupts:
+ * There are three kinds of interrupts:
*
* 1) i801 signals transaction completion with one of these interrupts:
* INTR - Success
@@ -524,6 +604,8 @@ static void i801_isr_byte_done(struct i801_priv *priv)
*
* 2) For byte-by-byte (I2C read/write) transactions, one BYTE_DONE interrupt
* occurs for each byte of a byte-by-byte to prepare the next byte.
+ *
+ * 3) Host Notify interrupts
*/
static irqreturn_t i801_isr(int irq, void *dev_id)
{
@@ -536,6 +618,12 @@ static irqreturn_t i801_isr(int irq, void *dev_id)
if (!(pcists & SMBPCISTS_INTS))
return IRQ_NONE;
+ if (priv->features & FEATURE_HOST_NOTIFY) {
+ status = inb_p(SMBSLVSTS(priv));
+ if (status & SMBSLVSTS_HST_NTFY_STS)
+ return i801_host_notify_isr(priv);
+ }
+
status = inb_p(SMBHSTSTS(priv));
if (status & SMBHSTSTS_BYTE_DONE)
i801_isr_byte_done(priv);
@@ -547,7 +635,7 @@ static irqreturn_t i801_isr(int irq, void *dev_id)
status &= SMBHSTSTS_INTR | STATUS_ERROR_FLAGS;
if (status) {
outb_p(status, SMBHSTSTS(priv));
- priv->status |= status;
+ priv->status = status;
wake_up(&priv->waitq);
}
@@ -847,7 +935,28 @@ static u32 i801_func(struct i2c_adapter *adapter)
I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_WRITE_I2C_BLOCK |
((priv->features & FEATURE_SMBUS_PEC) ? I2C_FUNC_SMBUS_PEC : 0) |
((priv->features & FEATURE_I2C_BLOCK_READ) ?
- I2C_FUNC_SMBUS_READ_I2C_BLOCK : 0);
+ I2C_FUNC_SMBUS_READ_I2C_BLOCK : 0) |
+ ((priv->features & FEATURE_HOST_NOTIFY) ?
+ I2C_FUNC_SMBUS_HOST_NOTIFY : 0);
+}
+
+static int i801_enable_host_notify(struct i2c_adapter *adapter)
+{
+ struct i801_priv *priv = i2c_get_adapdata(adapter);
+
+ if (!(priv->features & FEATURE_HOST_NOTIFY))
+ return -ENOTSUPP;
+
+ if (!priv->host_notify)
+ priv->host_notify = i2c_setup_smbus_host_notify(adapter);
+ if (!priv->host_notify)
+ return -ENOMEM;
+
+ outb_p(SMBSLVCMD_HST_NTFY_INTREN, SMBSLVCMD(priv));
+ /* clear Host Notify bit to allow a new notification */
+ outb_p(SMBSLVSTS_HST_NTFY_STS, SMBSLVSTS(priv));
+
+ return 0;
}
static const struct i2c_algorithm smbus_algorithm = {
@@ -1022,8 +1131,7 @@ static void __init input_apanel_init(void) {}
static void i801_probe_optional_slaves(struct i801_priv *priv) {}
#endif /* CONFIG_X86 && CONFIG_DMI */
-#if (defined CONFIG_I2C_MUX_GPIO || defined CONFIG_I2C_MUX_GPIO_MODULE) && \
- defined CONFIG_DMI
+#if IS_ENABLED(CONFIG_I2C_MUX_GPIO) && defined CONFIG_DMI
static struct i801_mux_config i801_mux_config_asus_z8_d12 = {
.gpio_chip = "gpio_ich",
.values = { 0x02, 0x03 },
@@ -1379,6 +1487,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
priv->features |= FEATURE_SMBUS_PEC;
priv->features |= FEATURE_BLOCK_BUFFER;
priv->features |= FEATURE_TCO;
+ priv->features |= FEATURE_HOST_NOTIFY;
break;
case PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0:
@@ -1398,6 +1507,8 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
priv->features |= FEATURE_BLOCK_BUFFER;
/* fall through */
case PCI_DEVICE_ID_INTEL_82801CA_3:
+ priv->features |= FEATURE_HOST_NOTIFY;
+ /* fall through */
case PCI_DEVICE_ID_INTEL_82801BA_2:
case PCI_DEVICE_ID_INTEL_82801AB_3:
case PCI_DEVICE_ID_INTEL_82801AA_3:
@@ -1507,6 +1618,15 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
return err;
}
+ /*
+ * Enable Host Notify for chips that supports it.
+ * It is done after i2c_add_adapter() so that we are sure the work queue
+ * is not used if i2c_add_adapter() fails.
+ */
+ err = i801_enable_host_notify(&priv->adapter);
+ if (err && err != -ENOTSUPP)
+ dev_warn(&dev->dev, "Unable to enable SMBus Host Notify\n");
+
i801_probe_optional_slaves(priv);
/* We ignore errors - multiplexing is optional */
i801_add_mux(priv);
@@ -1553,6 +1673,14 @@ static int i801_suspend(struct device *dev)
static int i801_resume(struct device *dev)
{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct i801_priv *priv = pci_get_drvdata(pci_dev);
+ int err;
+
+ err = i801_enable_host_notify(&priv->adapter);
+ if (err && err != -ENOTSUPP)
+ dev_warn(dev, "Unable to enable SMBus Host Notify\n");
+
return 0;
}
#endif
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index ba14a863b..cd9872594 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -791,10 +791,6 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, 0x0);
- i2c->cmd = 0;
- memset(i2c->cmd_buf, 0, BUFSIZE);
- memset(i2c->data_buf, 0, BUFSIZE);
-
i2c->irq = platform_get_irq(pdev, 0);
ret = devm_request_irq(&pdev->dev, i2c->irq, jz4780_i2c_irq, 0,
dev_name(&pdev->dev), i2c);
diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c
index 71d3929ad..76e289809 100644
--- a/drivers/i2c/busses/i2c-meson.c
+++ b/drivers/i2c/busses/i2c-meson.c
@@ -211,7 +211,7 @@ static void meson_i2c_stop(struct meson_i2c *i2c)
meson_i2c_add_token(i2c, TOKEN_STOP);
} else {
i2c->state = STATE_IDLE;
- complete_all(&i2c->done);
+ complete(&i2c->done);
}
}
@@ -238,7 +238,7 @@ static irqreturn_t meson_i2c_irq(int irqno, void *dev_id)
dev_dbg(i2c->dev, "error bit set\n");
i2c->error = -ENXIO;
i2c->state = STATE_IDLE;
- complete_all(&i2c->done);
+ complete(&i2c->done);
goto out;
}
@@ -269,7 +269,7 @@ static irqreturn_t meson_i2c_irq(int irqno, void *dev_id)
break;
case STATE_STOP:
i2c->state = STATE_IDLE;
- complete_all(&i2c->done);
+ complete(&i2c->done);
break;
case STATE_IDLE:
break;
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index dfa7a4b4a..ac88a5241 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -379,6 +379,7 @@ static int ocores_i2c_of_probe(struct platform_device *pdev,
if (!clock_frequency_present) {
dev_err(&pdev->dev,
"Missing required parameter 'opencores,ip-clock-frequency'\n");
+ clk_disable_unprepare(i2c->clk);
return -ENODEV;
}
i2c->ip_clock_khz = clock_frequency / 1000;
@@ -467,20 +468,21 @@ static int ocores_i2c_probe(struct platform_device *pdev)
default:
dev_err(&pdev->dev, "Unsupported I/O width (%d)\n",
i2c->reg_io_width);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_clk;
}
}
ret = ocores_init(&pdev->dev, i2c);
if (ret)
- return ret;
+ goto err_clk;
init_waitqueue_head(&i2c->wait);
ret = devm_request_irq(&pdev->dev, irq, ocores_isr, 0,
pdev->name, i2c);
if (ret) {
dev_err(&pdev->dev, "Cannot claim IRQ\n");
- return ret;
+ goto err_clk;
}
/* hook up driver to tree */
@@ -494,7 +496,7 @@ static int ocores_i2c_probe(struct platform_device *pdev)
ret = i2c_add_adapter(&i2c->adap);
if (ret) {
dev_err(&pdev->dev, "Failed to add adapter\n");
- return ret;
+ goto err_clk;
}
/* add in known devices to the bus */
@@ -504,6 +506,10 @@ static int ocores_i2c_probe(struct platform_device *pdev)
}
return 0;
+
+err_clk:
+ clk_disable_unprepare(i2c->clk);
+ return ret;
}
static int ocores_i2c_remove(struct platform_device *pdev)
diff --git a/drivers/i2c/busses/i2c-opal.c b/drivers/i2c/busses/i2c-opal.c
index 75dd6d041..11e2a1fc1 100644
--- a/drivers/i2c/busses/i2c-opal.c
+++ b/drivers/i2c/busses/i2c-opal.c
@@ -71,7 +71,7 @@ static int i2c_opal_send_request(u32 bus_id, struct opal_i2c_request *req)
if (rc)
goto exit;
- rc = be64_to_cpu(msg.params[1]);
+ rc = opal_get_async_rc(msg);
if (rc != OPAL_SUCCESS) {
rc = i2c_opal_translate_error(rc);
goto exit;
diff --git a/drivers/i2c/busses/i2c-pca-isa.c b/drivers/i2c/busses/i2c-pca-isa.c
index e0eb4ca01..ba88f17f6 100644
--- a/drivers/i2c/busses/i2c-pca-isa.c
+++ b/drivers/i2c/busses/i2c-pca-isa.c
@@ -193,23 +193,12 @@ static struct isa_driver pca_isa_driver = {
}
};
-static int __init pca_isa_init(void)
-{
- return isa_register_driver(&pca_isa_driver, 1);
-}
-
-static void __exit pca_isa_exit(void)
-{
- isa_unregister_driver(&pca_isa_driver);
-}
-
MODULE_AUTHOR("Ian Campbell <icampbell@arcom.com>");
MODULE_DESCRIPTION("ISA base PCA9564/PCA9665 driver");
MODULE_LICENSE("GPL");
module_param(base, ulong, 0);
MODULE_PARM_DESC(base, "I/O base address");
-
module_param(irq, int, 0);
MODULE_PARM_DESC(irq, "IRQ");
module_param(clock, int, 0);
@@ -220,6 +209,4 @@ MODULE_PARM_DESC(clock, "Clock rate in hertz.\n\t\t"
"\t\t\t\tFast: 100100 - 400099\n"
"\t\t\t\tFast+: 400100 - 10000099\n"
"\t\t\t\tTurbo: Up to 1265800");
-
-module_init(pca_isa_init);
-module_exit(pca_isa_exit);
+module_isa_driver(pca_isa_driver, 1);
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index 57256bb42..a8497cfda 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -213,14 +213,16 @@ static irqreturn_t qup_i2c_interrupt(int irq, void *dev)
bus_err &= I2C_STATUS_ERROR_MASK;
qup_err &= QUP_STATUS_ERROR_FLAGS;
- if (qup_err) {
- /* Clear Error interrupt */
+ /* Clear the error bits in QUP_ERROR_FLAGS */
+ if (qup_err)
writel(qup_err, qup->base + QUP_ERROR_FLAGS);
- goto done;
- }
- if (bus_err) {
- /* Clear Error interrupt */
+ /* Clear the error bits in QUP_I2C_STATUS */
+ if (bus_err)
+ writel(bus_err, qup->base + QUP_I2C_STATUS);
+
+ /* Reset the QUP State in case of error */
+ if (qup_err || bus_err) {
writel(QUP_RESET_STATE, qup->base + QUP_STATE);
goto done;
}
@@ -310,6 +312,7 @@ static int qup_i2c_wait_ready(struct qup_i2c_dev *qup, int op, bool val,
u32 opflags;
u32 status;
u32 shift = __ffs(op);
+ int ret = 0;
len *= qup->one_byte_t;
/* timeout after a wait of twice the max time */
@@ -321,18 +324,28 @@ static int qup_i2c_wait_ready(struct qup_i2c_dev *qup, int op, bool val,
if (((opflags & op) >> shift) == val) {
if ((op == QUP_OUT_NOT_EMPTY) && qup->is_last) {
- if (!(status & I2C_STATUS_BUS_ACTIVE))
- return 0;
+ if (!(status & I2C_STATUS_BUS_ACTIVE)) {
+ ret = 0;
+ goto done;
+ }
} else {
- return 0;
+ ret = 0;
+ goto done;
}
}
- if (time_after(jiffies, timeout))
- return -ETIMEDOUT;
-
+ if (time_after(jiffies, timeout)) {
+ ret = -ETIMEDOUT;
+ goto done;
+ }
usleep_range(len, len * 2);
}
+
+done:
+ if (qup->bus_err || qup->qup_err)
+ ret = (qup->bus_err & QUP_I2C_NACK_FLAG) ? -ENXIO : -EIO;
+
+ return ret;
}
static void qup_i2c_set_write_mode_v2(struct qup_i2c_dev *qup,
@@ -585,8 +598,8 @@ static void qup_i2c_bam_cb(void *data)
}
static int qup_sg_set_buf(struct scatterlist *sg, void *buf,
- struct qup_i2c_tag *tg, unsigned int buflen,
- struct qup_i2c_dev *qup, int map, int dir)
+ unsigned int buflen, struct qup_i2c_dev *qup,
+ int dir)
{
int ret;
@@ -595,9 +608,6 @@ static int qup_sg_set_buf(struct scatterlist *sg, void *buf,
if (!ret)
return -EINVAL;
- if (!map)
- sg_dma_address(sg) = tg->addr + ((u8 *)buf - tg->start);
-
return 0;
}
@@ -649,37 +659,37 @@ static int qup_i2c_bam_do_xfer(struct qup_i2c_dev *qup, struct i2c_msg *msg,
u8 *tags;
while (idx < num) {
- blocks = (msg->len + limit) / limit;
- rem = msg->len % limit;
tx_len = 0, len = 0, i = 0;
qup->is_last = (idx == (num - 1));
qup_i2c_set_blk_data(qup, msg);
+ blocks = qup->blk.count;
+ rem = msg->len - (blocks - 1) * limit;
+
if (msg->flags & I2C_M_RD) {
rx_nents += (blocks * 2) + 1;
tx_nents += 1;
while (qup->blk.pos < blocks) {
- /* length set to '0' implies 256 bytes */
- tlen = (i == (blocks - 1)) ? rem : 0;
+ tlen = (i == (blocks - 1)) ? rem : limit;
tags = &qup->start_tag.start[off + len];
len += qup_i2c_set_tags(tags, qup, msg, 1);
+ qup->blk.data_len -= tlen;
/* scratch buf to read the start and len tags */
ret = qup_sg_set_buf(&qup->brx.sg[rx_buf++],
&qup->brx.tag.start[0],
- &qup->brx.tag,
- 2, qup, 0, 0);
+ 2, qup, DMA_FROM_DEVICE);
if (ret)
return ret;
ret = qup_sg_set_buf(&qup->brx.sg[rx_buf++],
&msg->buf[limit * i],
- NULL, tlen, qup,
- 1, DMA_FROM_DEVICE);
+ tlen, qup,
+ DMA_FROM_DEVICE);
if (ret)
return ret;
@@ -688,7 +698,7 @@ static int qup_i2c_bam_do_xfer(struct qup_i2c_dev *qup, struct i2c_msg *msg,
}
ret = qup_sg_set_buf(&qup->btx.sg[tx_buf++],
&qup->start_tag.start[off],
- &qup->start_tag, len, qup, 0, 0);
+ len, qup, DMA_TO_DEVICE);
if (ret)
return ret;
@@ -696,30 +706,28 @@ static int qup_i2c_bam_do_xfer(struct qup_i2c_dev *qup, struct i2c_msg *msg,
/* scratch buf to read the BAM EOT and FLUSH tags */
ret = qup_sg_set_buf(&qup->brx.sg[rx_buf++],
&qup->brx.tag.start[0],
- &qup->brx.tag, 2,
- qup, 0, 0);
+ 2, qup, DMA_FROM_DEVICE);
if (ret)
return ret;
} else {
tx_nents += (blocks * 2);
while (qup->blk.pos < blocks) {
- tlen = (i == (blocks - 1)) ? rem : 0;
+ tlen = (i == (blocks - 1)) ? rem : limit;
tags = &qup->start_tag.start[off + tx_len];
len = qup_i2c_set_tags(tags, qup, msg, 1);
+ qup->blk.data_len -= tlen;
ret = qup_sg_set_buf(&qup->btx.sg[tx_buf++],
- tags,
- &qup->start_tag, len,
- qup, 0, 0);
+ tags, len,
+ qup, DMA_TO_DEVICE);
if (ret)
return ret;
tx_len += len;
ret = qup_sg_set_buf(&qup->btx.sg[tx_buf++],
&msg->buf[limit * i],
- NULL, tlen, qup, 1,
- DMA_TO_DEVICE);
+ tlen, qup, DMA_TO_DEVICE);
if (ret)
return ret;
i++;
@@ -738,8 +746,7 @@ static int qup_i2c_bam_do_xfer(struct qup_i2c_dev *qup, struct i2c_msg *msg,
QUP_BAM_FLUSH_STOP;
ret = qup_sg_set_buf(&qup->btx.sg[tx_buf++],
&qup->btx.tag.start[0],
- &qup->btx.tag, len,
- qup, 0, 0);
+ len, qup, DMA_TO_DEVICE);
if (ret)
return ret;
tx_nents += 1;
@@ -801,39 +808,35 @@ static int qup_i2c_bam_do_xfer(struct qup_i2c_dev *qup, struct i2c_msg *msg,
}
if (ret || qup->bus_err || qup->qup_err) {
- if (qup->bus_err & QUP_I2C_NACK_FLAG) {
- msg--;
- dev_err(qup->dev, "NACK from %x\n", msg->addr);
- ret = -EIO;
+ if (qup_i2c_change_state(qup, QUP_RUN_STATE)) {
+ dev_err(qup->dev, "change to run state timed out");
+ goto desc_err;
+ }
- if (qup_i2c_change_state(qup, QUP_RUN_STATE)) {
- dev_err(qup->dev, "change to run state timed out");
- return ret;
- }
+ if (rx_nents)
+ writel(QUP_BAM_INPUT_EOT,
+ qup->base + QUP_OUT_FIFO_BASE);
- if (rx_nents)
- writel(QUP_BAM_INPUT_EOT,
- qup->base + QUP_OUT_FIFO_BASE);
+ writel(QUP_BAM_FLUSH_STOP, qup->base + QUP_OUT_FIFO_BASE);
- writel(QUP_BAM_FLUSH_STOP,
- qup->base + QUP_OUT_FIFO_BASE);
+ qup_i2c_flush(qup);
- qup_i2c_flush(qup);
+ /* wait for remaining interrupts to occur */
+ if (!wait_for_completion_timeout(&qup->xfer, HZ))
+ dev_err(qup->dev, "flush timed out\n");
- /* wait for remaining interrupts to occur */
- if (!wait_for_completion_timeout(&qup->xfer, HZ))
- dev_err(qup->dev, "flush timed out\n");
+ qup_i2c_rel_dma(qup);
- qup_i2c_rel_dma(qup);
- }
+ ret = (qup->bus_err & QUP_I2C_NACK_FLAG) ? -ENXIO : -EIO;
}
+desc_err:
dma_unmap_sg(qup->dev, qup->btx.sg, tx_nents, DMA_TO_DEVICE);
if (rx_nents)
dma_unmap_sg(qup->dev, qup->brx.sg, rx_nents,
DMA_FROM_DEVICE);
-desc_err:
+
return ret;
}
@@ -849,9 +852,6 @@ static int qup_i2c_bam_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
if (ret)
goto out;
- qup->bus_err = 0;
- qup->qup_err = 0;
-
writel(0, qup->base + QUP_MX_INPUT_CNT);
writel(0, qup->base + QUP_MX_OUTPUT_CNT);
@@ -889,12 +889,8 @@ static int qup_i2c_wait_for_complete(struct qup_i2c_dev *qup,
ret = -ETIMEDOUT;
}
- if (qup->bus_err || qup->qup_err) {
- if (qup->bus_err & QUP_I2C_NACK_FLAG) {
- dev_err(qup->dev, "NACK from %x\n", msg->addr);
- ret = -EIO;
- }
- }
+ if (qup->bus_err || qup->qup_err)
+ ret = (qup->bus_err & QUP_I2C_NACK_FLAG) ? -ENXIO : -EIO;
return ret;
}
@@ -1020,7 +1016,7 @@ static void qup_i2c_issue_read(struct qup_i2c_dev *qup, struct i2c_msg *msg)
{
u32 addr, len, val;
- addr = (msg->addr << 1) | 1;
+ addr = i2c_8bit_addr_from_msg(msg);
/* 0 is used to specify a length 256 (QUP_READ_LIMIT) */
len = (msg->len == QUP_READ_LIMIT) ? 0 : msg->len;
@@ -1186,6 +1182,9 @@ static int qup_i2c_xfer(struct i2c_adapter *adap,
if (ret < 0)
goto out;
+ qup->bus_err = 0;
+ qup->qup_err = 0;
+
writel(1, qup->base + QUP_SW_RESET);
ret = qup_i2c_poll_state(qup, QUP_RESET_STATE);
if (ret)
@@ -1235,6 +1234,9 @@ static int qup_i2c_xfer_v2(struct i2c_adapter *adap,
struct qup_i2c_dev *qup = i2c_get_adapdata(adap);
int ret, len, idx = 0, use_dma = 0;
+ qup->bus_err = 0;
+ qup->qup_err = 0;
+
ret = pm_runtime_get_sync(qup->dev);
if (ret < 0)
goto out;
@@ -1409,27 +1411,21 @@ static int qup_i2c_probe(struct platform_device *pdev)
/* 2 tag bytes for each block + 5 for start, stop tags */
size = blocks * 2 + 5;
- qup->dpool = dma_pool_create("qup_i2c-dma-pool", &pdev->dev,
- size, 4, 0);
- qup->start_tag.start = dma_pool_alloc(qup->dpool, GFP_KERNEL,
- &qup->start_tag.addr);
+ qup->start_tag.start = devm_kzalloc(&pdev->dev,
+ size, GFP_KERNEL);
if (!qup->start_tag.start) {
ret = -ENOMEM;
goto fail_dma;
}
- qup->brx.tag.start = dma_pool_alloc(qup->dpool,
- GFP_KERNEL,
- &qup->brx.tag.addr);
+ qup->brx.tag.start = devm_kzalloc(&pdev->dev, 2, GFP_KERNEL);
if (!qup->brx.tag.start) {
ret = -ENOMEM;
goto fail_dma;
}
- qup->btx.tag.start = dma_pool_alloc(qup->dpool,
- GFP_KERNEL,
- &qup->btx.tag.addr);
+ qup->btx.tag.start = devm_kzalloc(&pdev->dev, 2, GFP_KERNEL);
if (!qup->btx.tag.start) {
ret = -ENOMEM;
goto fail_dma;
@@ -1568,13 +1564,6 @@ static int qup_i2c_remove(struct platform_device *pdev)
struct qup_i2c_dev *qup = platform_get_drvdata(pdev);
if (qup->is_dma) {
- dma_pool_free(qup->dpool, qup->start_tag.start,
- qup->start_tag.addr);
- dma_pool_free(qup->dpool, qup->brx.tag.start,
- qup->brx.tag.addr);
- dma_pool_free(qup->dpool, qup->btx.tag.start,
- qup->btx.tag.addr);
- dma_pool_destroy(qup->dpool);
dma_release_channel(qup->btx.dma);
dma_release_channel(qup->brx.dma);
}
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 52407f3c9..9bd849dac 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -378,7 +378,7 @@ static void rcar_i2c_dma(struct rcar_i2c_priv *priv)
}
dma_addr = dma_map_single(chan->device->dev, buf, len, dir);
- if (dma_mapping_error(dev, dma_addr)) {
+ if (dma_mapping_error(chan->device->dev, dma_addr)) {
dev_dbg(dev, "dma map failed, using PIO\n");
return;
}
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 80bed02cd..5c5b7cada 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -58,6 +58,12 @@ enum {
#define REG_CON_LASTACK BIT(5) /* 1: send NACK after last received byte */
#define REG_CON_ACTACK BIT(6) /* 1: stop if NACK is received */
+#define REG_CON_TUNING_MASK GENMASK(15, 8)
+
+#define REG_CON_SDA_CFG(cfg) ((cfg) << 8)
+#define REG_CON_STA_CFG(cfg) ((cfg) << 12)
+#define REG_CON_STO_CFG(cfg) ((cfg) << 14)
+
/* REG_MRXADDR bits */
#define REG_MRXADDR_VALID(x) BIT(24 + (x)) /* [x*8+7:x*8] of MRX[R]ADDR valid */
@@ -75,6 +81,77 @@ enum {
#define WAIT_TIMEOUT 1000 /* ms */
#define DEFAULT_SCL_RATE (100 * 1000) /* Hz */
+/**
+ * struct i2c_spec_values:
+ * @min_hold_start_ns: min hold time (repeated) START condition
+ * @min_low_ns: min LOW period of the SCL clock
+ * @min_high_ns: min HIGH period of the SCL cloc
+ * @min_setup_start_ns: min set-up time for a repeated START conditio
+ * @max_data_hold_ns: max data hold time
+ * @min_data_setup_ns: min data set-up time
+ * @min_setup_stop_ns: min set-up time for STOP condition
+ * @min_hold_buffer_ns: min bus free time between a STOP and
+ * START condition
+ */
+struct i2c_spec_values {
+ unsigned long min_hold_start_ns;
+ unsigned long min_low_ns;
+ unsigned long min_high_ns;
+ unsigned long min_setup_start_ns;
+ unsigned long max_data_hold_ns;
+ unsigned long min_data_setup_ns;
+ unsigned long min_setup_stop_ns;
+ unsigned long min_hold_buffer_ns;
+};
+
+static const struct i2c_spec_values standard_mode_spec = {
+ .min_hold_start_ns = 4000,
+ .min_low_ns = 4700,
+ .min_high_ns = 4000,
+ .min_setup_start_ns = 4700,
+ .max_data_hold_ns = 3450,
+ .min_data_setup_ns = 250,
+ .min_setup_stop_ns = 4000,
+ .min_hold_buffer_ns = 4700,
+};
+
+static const struct i2c_spec_values fast_mode_spec = {
+ .min_hold_start_ns = 600,
+ .min_low_ns = 1300,
+ .min_high_ns = 600,
+ .min_setup_start_ns = 600,
+ .max_data_hold_ns = 900,
+ .min_data_setup_ns = 100,
+ .min_setup_stop_ns = 600,
+ .min_hold_buffer_ns = 1300,
+};
+
+static const struct i2c_spec_values fast_mode_plus_spec = {
+ .min_hold_start_ns = 260,
+ .min_low_ns = 500,
+ .min_high_ns = 260,
+ .min_setup_start_ns = 260,
+ .max_data_hold_ns = 400,
+ .min_data_setup_ns = 50,
+ .min_setup_stop_ns = 260,
+ .min_hold_buffer_ns = 500,
+};
+
+/**
+ * struct rk3x_i2c_calced_timings:
+ * @div_low: Divider output for low
+ * @div_high: Divider output for high
+ * @tuning: Used to adjust setup/hold data time,
+ * setup/hold start time and setup stop time for
+ * v1's calc_timings, the tuning should all be 0
+ * for old hardware anyone using v0's calc_timings.
+ */
+struct rk3x_i2c_calced_timings {
+ unsigned long div_low;
+ unsigned long div_high;
+ unsigned int tuning;
+};
+
enum rk3x_i2c_state {
STATE_IDLE,
STATE_START,
@@ -85,11 +162,35 @@ enum rk3x_i2c_state {
/**
* @grf_offset: offset inside the grf regmap for setting the i2c type
+ * @calc_timings: Callback function for i2c timing information calculated
*/
struct rk3x_i2c_soc_data {
int grf_offset;
+ int (*calc_timings)(unsigned long, struct i2c_timings *,
+ struct rk3x_i2c_calced_timings *);
};
+/**
+ * struct rk3x_i2c - private data of the controller
+ * @adap: corresponding I2C adapter
+ * @dev: device for this controller
+ * @soc_data: related soc data struct
+ * @regs: virtual memory area
+ * @clk: function clk for rk3399 or function & Bus clks for others
+ * @pclk: Bus clk for rk3399
+ * @clk_rate_nb: i2c clk rate change notify
+ * @t: I2C known timing information
+ * @lock: spinlock for the i2c bus
+ * @wait: the waitqueue to wait for i2c transfer
+ * @busy: the condition for the event to wait for
+ * @msg: current i2c message
+ * @addr: addr of i2c slave device
+ * @mode: mode of i2c transfer
+ * @is_last_msg: flag determines whether it is the last msg in this transfer
+ * @state: state of i2c transfer
+ * @processed: byte length which has been send or received
+ * @error: error code for i2c transfer
+ */
struct rk3x_i2c {
struct i2c_adapter adap;
struct device *dev;
@@ -98,6 +199,7 @@ struct rk3x_i2c {
/* Hardware resources */
void __iomem *regs;
struct clk *clk;
+ struct clk *pclk;
struct notifier_block clk_rate_nb;
/* Settings */
@@ -116,7 +218,7 @@ struct rk3x_i2c {
/* I2C state machine */
enum rk3x_i2c_state state;
- unsigned int processed; /* sent/received bytes */
+ unsigned int processed;
int error;
};
@@ -142,13 +244,12 @@ static inline void rk3x_i2c_clean_ipd(struct rk3x_i2c *i2c)
*/
static void rk3x_i2c_start(struct rk3x_i2c *i2c)
{
- u32 val;
+ u32 val = i2c_readl(i2c, REG_CON) & REG_CON_TUNING_MASK;
- rk3x_i2c_clean_ipd(i2c);
i2c_writel(i2c, REG_INT_START, REG_IEN);
/* enable adapter with correct mode, send START condition */
- val = REG_CON_EN | REG_CON_MOD(i2c->mode) | REG_CON_START;
+ val |= REG_CON_EN | REG_CON_MOD(i2c->mode) | REG_CON_START;
/* if we want to react to NACK, set ACTACK bit */
if (!(i2c->msg->flags & I2C_M_IGNORE_NAK))
@@ -189,7 +290,8 @@ static void rk3x_i2c_stop(struct rk3x_i2c *i2c, int error)
* get the intended effect by resetting its internal state
* and issuing an ordinary START.
*/
- i2c_writel(i2c, 0, REG_CON);
+ ctrl = i2c_readl(i2c, REG_CON) & REG_CON_TUNING_MASK;
+ i2c_writel(i2c, ctrl, REG_CON);
/* signal that we are finished with the current msg */
wake_up(&i2c->wait);
@@ -431,26 +533,37 @@ out:
}
/**
+ * Get timing values of I2C specification
+ *
+ * @speed: Desired SCL frequency
+ *
+ * Returns: Matched i2c spec values.
+ */
+static const struct i2c_spec_values *rk3x_i2c_get_spec(unsigned int speed)
+{
+ if (speed <= 100000)
+ return &standard_mode_spec;
+ else if (speed <= 400000)
+ return &fast_mode_spec;
+ else
+ return &fast_mode_plus_spec;
+}
+
+/**
* Calculate divider values for desired SCL frequency
*
* @clk_rate: I2C input clock rate
- * @t: Known I2C timing information.
- * @div_low: Divider output for low
- * @div_high: Divider output for high
+ * @t: Known I2C timing information
+ * @t_calc: Caculated rk3x private timings that would be written into regs
*
* Returns: 0 on success, -EINVAL if the goal SCL rate is too slow. In that case
* a best-effort divider value is returned in divs. If the target rate is
* too high, we silently use the highest possible rate.
*/
-static int rk3x_i2c_calc_divs(unsigned long clk_rate,
- struct i2c_timings *t,
- unsigned long *div_low,
- unsigned long *div_high)
+static int rk3x_i2c_v0_calc_timings(unsigned long clk_rate,
+ struct i2c_timings *t,
+ struct rk3x_i2c_calced_timings *t_calc)
{
- unsigned long spec_min_low_ns, spec_min_high_ns;
- unsigned long spec_setup_start, spec_max_data_hold_ns;
- unsigned long data_hold_buffer_ns;
-
unsigned long min_low_ns, min_high_ns;
unsigned long max_low_ns, min_total_ns;
@@ -462,6 +575,8 @@ static int rk3x_i2c_calc_divs(unsigned long clk_rate,
unsigned long min_div_for_hold, min_total_div;
unsigned long extra_div, extra_low_div, ideal_low_div;
+ unsigned long data_hold_buffer_ns = 50;
+ const struct i2c_spec_values *spec;
int ret = 0;
/* Only support standard-mode and fast-mode */
@@ -484,22 +599,8 @@ static int rk3x_i2c_calc_divs(unsigned long clk_rate,
* This is because the i2c host on Rockchip holds the data line
* for half the low time.
*/
- if (t->bus_freq_hz <= 100000) {
- /* Standard-mode */
- spec_min_low_ns = 4700;
- spec_setup_start = 4700;
- spec_min_high_ns = 4000;
- spec_max_data_hold_ns = 3450;
- data_hold_buffer_ns = 50;
- } else {
- /* Fast-mode */
- spec_min_low_ns = 1300;
- spec_setup_start = 600;
- spec_min_high_ns = 600;
- spec_max_data_hold_ns = 900;
- data_hold_buffer_ns = 50;
- }
- min_high_ns = t->scl_rise_ns + spec_min_high_ns;
+ spec = rk3x_i2c_get_spec(t->bus_freq_hz);
+ min_high_ns = t->scl_rise_ns + spec->min_high_ns;
/*
* Timings for repeated start:
@@ -509,14 +610,14 @@ static int rk3x_i2c_calc_divs(unsigned long clk_rate,
* We need to account for those rules in picking our "high" time so
* we meet tSU;STA and tHD;STA times.
*/
- min_high_ns = max(min_high_ns,
- DIV_ROUND_UP((t->scl_rise_ns + spec_setup_start) * 1000, 875));
- min_high_ns = max(min_high_ns,
- DIV_ROUND_UP((t->scl_rise_ns + spec_setup_start +
- t->sda_fall_ns + spec_min_high_ns), 2));
-
- min_low_ns = t->scl_fall_ns + spec_min_low_ns;
- max_low_ns = spec_max_data_hold_ns * 2 - data_hold_buffer_ns;
+ min_high_ns = max(min_high_ns, DIV_ROUND_UP(
+ (t->scl_rise_ns + spec->min_setup_start_ns) * 1000, 875));
+ min_high_ns = max(min_high_ns, DIV_ROUND_UP(
+ (t->scl_rise_ns + spec->min_setup_start_ns + t->sda_fall_ns +
+ spec->min_high_ns), 2));
+
+ min_low_ns = t->scl_fall_ns + spec->min_low_ns;
+ max_low_ns = spec->max_data_hold_ns * 2 - data_hold_buffer_ns;
min_total_ns = min_low_ns + min_high_ns;
/* Adjust to avoid overflow */
@@ -552,8 +653,8 @@ static int rk3x_i2c_calc_divs(unsigned long clk_rate,
* Time needed to meet hold requirements is important.
* Just use that.
*/
- *div_low = min_low_div;
- *div_high = min_high_div;
+ t_calc->div_low = min_low_div;
+ t_calc->div_high = min_high_div;
} else {
/*
* We've got to distribute some time among the low and high
@@ -582,25 +683,186 @@ static int rk3x_i2c_calc_divs(unsigned long clk_rate,
/* Give low the "ideal" and give high whatever extra is left */
extra_low_div = ideal_low_div - min_low_div;
- *div_low = ideal_low_div;
- *div_high = min_high_div + (extra_div - extra_low_div);
+ t_calc->div_low = ideal_low_div;
+ t_calc->div_high = min_high_div + (extra_div - extra_low_div);
}
/*
* Adjust to the fact that the hardware has an implicit "+1".
* NOTE: Above calculations always produce div_low > 0 and div_high > 0.
*/
- *div_low = *div_low - 1;
- *div_high = *div_high - 1;
+ t_calc->div_low--;
+ t_calc->div_high--;
/* Maximum divider supported by hw is 0xffff */
- if (*div_low > 0xffff) {
- *div_low = 0xffff;
+ if (t_calc->div_low > 0xffff) {
+ t_calc->div_low = 0xffff;
ret = -EINVAL;
}
- if (*div_high > 0xffff) {
- *div_high = 0xffff;
+ if (t_calc->div_high > 0xffff) {
+ t_calc->div_high = 0xffff;
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/**
+ * Calculate timing values for desired SCL frequency
+ *
+ * @clk_rate: I2C input clock rate
+ * @t: Known I2C timing information
+ * @t_calc: Caculated rk3x private timings that would be written into regs
+ *
+ * Returns: 0 on success, -EINVAL if the goal SCL rate is too slow. In that case
+ * a best-effort divider value is returned in divs. If the target rate is
+ * too high, we silently use the highest possible rate.
+ * The following formulas are v1's method to calculate timings.
+ *
+ * l = divl + 1;
+ * h = divh + 1;
+ * s = sda_update_config + 1;
+ * u = start_setup_config + 1;
+ * p = stop_setup_config + 1;
+ * T = Tclk_i2c;
+ *
+ * tHigh = 8 * h * T;
+ * tLow = 8 * l * T;
+ *
+ * tHD;sda = (l * s + 1) * T;
+ * tSU;sda = [(8 - s) * l + 1] * T;
+ * tI2C = 8 * (l + h) * T;
+ *
+ * tSU;sta = (8h * u + 1) * T;
+ * tHD;sta = [8h * (u + 1) - 1] * T;
+ * tSU;sto = (8h * p + 1) * T;
+ */
+static int rk3x_i2c_v1_calc_timings(unsigned long clk_rate,
+ struct i2c_timings *t,
+ struct rk3x_i2c_calced_timings *t_calc)
+{
+ unsigned long min_low_ns, min_high_ns, min_total_ns;
+ unsigned long min_setup_start_ns, min_setup_data_ns;
+ unsigned long min_setup_stop_ns, max_hold_data_ns;
+
+ unsigned long clk_rate_khz, scl_rate_khz;
+
+ unsigned long min_low_div, min_high_div;
+
+ unsigned long min_div_for_hold, min_total_div;
+ unsigned long extra_div, extra_low_div;
+ unsigned long sda_update_cfg, stp_sta_cfg, stp_sto_cfg;
+
+ const struct i2c_spec_values *spec;
+ int ret = 0;
+
+ /* Support standard-mode, fast-mode and fast-mode plus */
+ if (WARN_ON(t->bus_freq_hz > 1000000))
+ t->bus_freq_hz = 1000000;
+
+ /* prevent scl_rate_khz from becoming 0 */
+ if (WARN_ON(t->bus_freq_hz < 1000))
+ t->bus_freq_hz = 1000;
+
+ /*
+ * min_low_ns: The minimum number of ns we need to hold low to
+ * meet I2C specification, should include fall time.
+ * min_high_ns: The minimum number of ns we need to hold high to
+ * meet I2C specification, should include rise time.
+ */
+ spec = rk3x_i2c_get_spec(t->bus_freq_hz);
+
+ /* calculate min-divh and min-divl */
+ clk_rate_khz = DIV_ROUND_UP(clk_rate, 1000);
+ scl_rate_khz = t->bus_freq_hz / 1000;
+ min_total_div = DIV_ROUND_UP(clk_rate_khz, scl_rate_khz * 8);
+
+ min_high_ns = t->scl_rise_ns + spec->min_high_ns;
+ min_high_div = DIV_ROUND_UP(clk_rate_khz * min_high_ns, 8 * 1000000);
+
+ min_low_ns = t->scl_fall_ns + spec->min_low_ns;
+ min_low_div = DIV_ROUND_UP(clk_rate_khz * min_low_ns, 8 * 1000000);
+
+ /*
+ * Final divh and divl must be greater than 0, otherwise the
+ * hardware would not output the i2c clk.
+ */
+ min_high_div = (min_high_div < 1) ? 2 : min_high_div;
+ min_low_div = (min_low_div < 1) ? 2 : min_low_div;
+
+ /* These are the min dividers needed for min hold times. */
+ min_div_for_hold = (min_low_div + min_high_div);
+ min_total_ns = min_low_ns + min_high_ns;
+
+ /*
+ * This is the maximum divider so we don't go over the maximum.
+ * We don't round up here (we round down) since this is a maximum.
+ */
+ if (min_div_for_hold >= min_total_div) {
+ /*
+ * Time needed to meet hold requirements is important.
+ * Just use that.
+ */
+ t_calc->div_low = min_low_div;
+ t_calc->div_high = min_high_div;
+ } else {
+ /*
+ * We've got to distribute some time among the low and high
+ * so we don't run too fast.
+ * We'll try to split things up by the scale of min_low_div and
+ * min_high_div, biasing slightly towards having a higher div
+ * for low (spend more time low).
+ */
+ extra_div = min_total_div - min_div_for_hold;
+ extra_low_div = DIV_ROUND_UP(min_low_div * extra_div,
+ min_div_for_hold);
+
+ t_calc->div_low = min_low_div + extra_low_div;
+ t_calc->div_high = min_high_div + (extra_div - extra_low_div);
+ }
+
+ /*
+ * calculate sda data hold count by the rules, data_upd_st:3
+ * is a appropriate value to reduce calculated times.
+ */
+ for (sda_update_cfg = 3; sda_update_cfg > 0; sda_update_cfg--) {
+ max_hold_data_ns = DIV_ROUND_UP((sda_update_cfg
+ * (t_calc->div_low) + 1)
+ * 1000000, clk_rate_khz);
+ min_setup_data_ns = DIV_ROUND_UP(((8 - sda_update_cfg)
+ * (t_calc->div_low) + 1)
+ * 1000000, clk_rate_khz);
+ if ((max_hold_data_ns < spec->max_data_hold_ns) &&
+ (min_setup_data_ns > spec->min_data_setup_ns))
+ break;
+ }
+
+ /* calculate setup start config */
+ min_setup_start_ns = t->scl_rise_ns + spec->min_setup_start_ns;
+ stp_sta_cfg = DIV_ROUND_UP(clk_rate_khz * min_setup_start_ns
+ - 1000000, 8 * 1000000 * (t_calc->div_high));
+
+ /* calculate setup stop config */
+ min_setup_stop_ns = t->scl_rise_ns + spec->min_setup_stop_ns;
+ stp_sto_cfg = DIV_ROUND_UP(clk_rate_khz * min_setup_stop_ns
+ - 1000000, 8 * 1000000 * (t_calc->div_high));
+
+ t_calc->tuning = REG_CON_SDA_CFG(--sda_update_cfg) |
+ REG_CON_STA_CFG(--stp_sta_cfg) |
+ REG_CON_STO_CFG(--stp_sto_cfg);
+
+ t_calc->div_low--;
+ t_calc->div_high--;
+
+ /* Maximum divider supported by hw is 0xffff */
+ if (t_calc->div_low > 0xffff) {
+ t_calc->div_low = 0xffff;
+ ret = -EINVAL;
+ }
+
+ if (t_calc->div_high > 0xffff) {
+ t_calc->div_high = 0xffff;
ret = -EINVAL;
}
@@ -610,19 +872,31 @@ static int rk3x_i2c_calc_divs(unsigned long clk_rate,
static void rk3x_i2c_adapt_div(struct rk3x_i2c *i2c, unsigned long clk_rate)
{
struct i2c_timings *t = &i2c->t;
- unsigned long div_low, div_high;
+ struct rk3x_i2c_calced_timings calc;
u64 t_low_ns, t_high_ns;
+ unsigned long flags;
+ u32 val;
int ret;
- ret = rk3x_i2c_calc_divs(clk_rate, t, &div_low, &div_high);
+ ret = i2c->soc_data->calc_timings(clk_rate, t, &calc);
WARN_ONCE(ret != 0, "Could not reach SCL freq %u", t->bus_freq_hz);
- clk_enable(i2c->clk);
- i2c_writel(i2c, (div_high << 16) | (div_low & 0xffff), REG_CLKDIV);
- clk_disable(i2c->clk);
+ clk_enable(i2c->pclk);
+
+ spin_lock_irqsave(&i2c->lock, flags);
+ val = i2c_readl(i2c, REG_CON);
+ val &= ~REG_CON_TUNING_MASK;
+ val |= calc.tuning;
+ i2c_writel(i2c, val, REG_CON);
+ i2c_writel(i2c, (calc.div_high << 16) | (calc.div_low & 0xffff),
+ REG_CLKDIV);
+ spin_unlock_irqrestore(&i2c->lock, flags);
- t_low_ns = div_u64(((u64)div_low + 1) * 8 * 1000000000, clk_rate);
- t_high_ns = div_u64(((u64)div_high + 1) * 8 * 1000000000, clk_rate);
+ clk_disable(i2c->pclk);
+
+ t_low_ns = div_u64(((u64)calc.div_low + 1) * 8 * 1000000000, clk_rate);
+ t_high_ns = div_u64(((u64)calc.div_high + 1) * 8 * 1000000000,
+ clk_rate);
dev_dbg(i2c->dev,
"CLK %lukhz, Req %uns, Act low %lluns high %lluns\n",
clk_rate / 1000,
@@ -644,7 +918,7 @@ static void rk3x_i2c_adapt_div(struct rk3x_i2c *i2c, unsigned long clk_rate)
* Code adapted from i2c-cadence.c.
*
* Return: NOTIFY_STOP if the rate change should be aborted, NOTIFY_OK
- * to acknowedge the change, NOTIFY_DONE if the notification is
+ * to acknowledge the change, NOTIFY_DONE if the notification is
* considered irrelevant.
*/
static int rk3x_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long
@@ -652,12 +926,17 @@ static int rk3x_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long
{
struct clk_notifier_data *ndata = data;
struct rk3x_i2c *i2c = container_of(nb, struct rk3x_i2c, clk_rate_nb);
- unsigned long div_low, div_high;
+ struct rk3x_i2c_calced_timings calc;
switch (event) {
case PRE_RATE_CHANGE:
- if (rk3x_i2c_calc_divs(ndata->new_rate, &i2c->t,
- &div_low, &div_high) != 0)
+ /*
+ * Try the calculation (but don't store the result) ahead of
+ * time to see if we need to block the clock change. Timings
+ * shouldn't actually take effect until rk3x_i2c_adapt_div().
+ */
+ if (i2c->soc_data->calc_timings(ndata->new_rate, &i2c->t,
+ &calc) != 0)
return NOTIFY_STOP;
/* scale up */
@@ -767,12 +1046,14 @@ static int rk3x_i2c_xfer(struct i2c_adapter *adap,
{
struct rk3x_i2c *i2c = (struct rk3x_i2c *)adap->algo_data;
unsigned long timeout, flags;
+ u32 val;
int ret = 0;
int i;
spin_lock_irqsave(&i2c->lock, flags);
clk_enable(i2c->clk);
+ clk_enable(i2c->pclk);
i2c->is_last_msg = false;
@@ -806,7 +1087,9 @@ static int rk3x_i2c_xfer(struct i2c_adapter *adap,
/* Force a STOP condition without interrupt */
i2c_writel(i2c, 0, REG_IEN);
- i2c_writel(i2c, REG_CON_EN | REG_CON_STOP, REG_CON);
+ val = i2c_readl(i2c, REG_CON) & REG_CON_TUNING_MASK;
+ val |= REG_CON_EN | REG_CON_STOP;
+ i2c_writel(i2c, val, REG_CON);
i2c->state = STATE_IDLE;
@@ -820,12 +1103,23 @@ static int rk3x_i2c_xfer(struct i2c_adapter *adap,
}
}
+ clk_disable(i2c->pclk);
clk_disable(i2c->clk);
+
spin_unlock_irqrestore(&i2c->lock, flags);
return ret < 0 ? ret : num;
}
+static __maybe_unused int rk3x_i2c_resume(struct device *dev)
+{
+ struct rk3x_i2c *i2c = dev_get_drvdata(dev);
+
+ rk3x_i2c_adapt_div(i2c, clk_get_rate(i2c->clk));
+
+ return 0;
+}
+
static u32 rk3x_i2c_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING;
@@ -836,17 +1130,52 @@ static const struct i2c_algorithm rk3x_i2c_algorithm = {
.functionality = rk3x_i2c_func,
};
-static struct rk3x_i2c_soc_data soc_data[3] = {
- { .grf_offset = 0x154 }, /* rk3066 */
- { .grf_offset = 0x0a4 }, /* rk3188 */
- { .grf_offset = -1 }, /* no I2C switching needed */
+static const struct rk3x_i2c_soc_data rk3066_soc_data = {
+ .grf_offset = 0x154,
+ .calc_timings = rk3x_i2c_v0_calc_timings,
+};
+
+static const struct rk3x_i2c_soc_data rk3188_soc_data = {
+ .grf_offset = 0x0a4,
+ .calc_timings = rk3x_i2c_v0_calc_timings,
+};
+
+static const struct rk3x_i2c_soc_data rk3228_soc_data = {
+ .grf_offset = -1,
+ .calc_timings = rk3x_i2c_v0_calc_timings,
+};
+
+static const struct rk3x_i2c_soc_data rk3288_soc_data = {
+ .grf_offset = -1,
+ .calc_timings = rk3x_i2c_v0_calc_timings,
+};
+
+static const struct rk3x_i2c_soc_data rk3399_soc_data = {
+ .grf_offset = -1,
+ .calc_timings = rk3x_i2c_v1_calc_timings,
};
static const struct of_device_id rk3x_i2c_match[] = {
- { .compatible = "rockchip,rk3066-i2c", .data = (void *)&soc_data[0] },
- { .compatible = "rockchip,rk3188-i2c", .data = (void *)&soc_data[1] },
- { .compatible = "rockchip,rk3228-i2c", .data = (void *)&soc_data[2] },
- { .compatible = "rockchip,rk3288-i2c", .data = (void *)&soc_data[2] },
+ {
+ .compatible = "rockchip,rk3066-i2c",
+ .data = (void *)&rk3066_soc_data
+ },
+ {
+ .compatible = "rockchip,rk3188-i2c",
+ .data = (void *)&rk3188_soc_data
+ },
+ {
+ .compatible = "rockchip,rk3228-i2c",
+ .data = (void *)&rk3228_soc_data
+ },
+ {
+ .compatible = "rockchip,rk3288-i2c",
+ .data = (void *)&rk3288_soc_data
+ },
+ {
+ .compatible = "rockchip,rk3399-i2c",
+ .data = (void *)&rk3399_soc_data
+ },
{},
};
MODULE_DEVICE_TABLE(of, rk3x_i2c_match);
@@ -886,12 +1215,6 @@ static int rk3x_i2c_probe(struct platform_device *pdev)
spin_lock_init(&i2c->lock);
init_waitqueue_head(&i2c->wait);
- i2c->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(i2c->clk)) {
- dev_err(&pdev->dev, "cannot get clock\n");
- return PTR_ERR(i2c->clk);
- }
-
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
i2c->regs = devm_ioremap_resource(&pdev->dev, mem);
if (IS_ERR(i2c->regs))
@@ -945,17 +1268,44 @@ static int rk3x_i2c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, i2c);
+ if (i2c->soc_data->calc_timings == rk3x_i2c_v0_calc_timings) {
+ /* Only one clock to use for bus clock and peripheral clock */
+ i2c->clk = devm_clk_get(&pdev->dev, NULL);
+ i2c->pclk = i2c->clk;
+ } else {
+ i2c->clk = devm_clk_get(&pdev->dev, "i2c");
+ i2c->pclk = devm_clk_get(&pdev->dev, "pclk");
+ }
+
+ if (IS_ERR(i2c->clk)) {
+ ret = PTR_ERR(i2c->clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Can't get bus clk: %d\n", ret);
+ return ret;
+ }
+ if (IS_ERR(i2c->pclk)) {
+ ret = PTR_ERR(i2c->pclk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Can't get periph clk: %d\n", ret);
+ return ret;
+ }
+
ret = clk_prepare(i2c->clk);
if (ret < 0) {
- dev_err(&pdev->dev, "Could not prepare clock\n");
+ dev_err(&pdev->dev, "Can't prepare bus clk: %d\n", ret);
return ret;
}
+ ret = clk_prepare(i2c->pclk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Can't prepare periph clock: %d\n", ret);
+ goto err_clk;
+ }
i2c->clk_rate_nb.notifier_call = rk3x_i2c_clk_notifier_cb;
ret = clk_notifier_register(i2c->clk, &i2c->clk_rate_nb);
if (ret != 0) {
dev_err(&pdev->dev, "Unable to register clock notifier\n");
- goto err_clk;
+ goto err_pclk;
}
clk_rate = clk_get_rate(i2c->clk);
@@ -973,6 +1323,8 @@ static int rk3x_i2c_probe(struct platform_device *pdev)
err_clk_notifier:
clk_notifier_unregister(i2c->clk, &i2c->clk_rate_nb);
+err_pclk:
+ clk_unprepare(i2c->pclk);
err_clk:
clk_unprepare(i2c->clk);
return ret;
@@ -985,17 +1337,21 @@ static int rk3x_i2c_remove(struct platform_device *pdev)
i2c_del_adapter(&i2c->adap);
clk_notifier_unregister(i2c->clk, &i2c->clk_rate_nb);
+ clk_unprepare(i2c->pclk);
clk_unprepare(i2c->clk);
return 0;
}
+static SIMPLE_DEV_PM_OPS(rk3x_i2c_pm_ops, NULL, rk3x_i2c_resume);
+
static struct platform_driver rk3x_i2c_driver = {
.probe = rk3x_i2c_probe,
.remove = rk3x_i2c_remove,
.driver = {
.name = "rk3x-i2c",
.of_match_table = rk3x_i2c_match,
+ .pm = &rk3x_i2c_pm_ops,
},
};
diff --git a/drivers/i2c/busses/i2c-robotfuzz-osif.c b/drivers/i2c/busses/i2c-robotfuzz-osif.c
index ced9c6a30..89d8b41b6 100644
--- a/drivers/i2c/busses/i2c-robotfuzz-osif.c
+++ b/drivers/i2c/busses/i2c-robotfuzz-osif.c
@@ -125,7 +125,7 @@ static struct i2c_algorithm osif_algorithm = {
#define USB_OSIF_VENDOR_ID 0x1964
#define USB_OSIF_PRODUCT_ID 0x0001
-static struct usb_device_id osif_table[] = {
+static const struct usb_device_id osif_table[] = {
{ USB_DEVICE(USB_OSIF_VENDOR_ID, USB_OSIF_PRODUCT_ID) },
{ }
};
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 6fb3e2645..05b1eeab9 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -610,7 +610,7 @@ static void sh_mobile_i2c_xfer_dma(struct sh_mobile_i2c_data *pd)
return;
dma_addr = dma_map_single(chan->device->dev, pd->msg->buf, pd->msg->len, dir);
- if (dma_mapping_error(pd->dev, dma_addr)) {
+ if (dma_mapping_error(chan->device->dev, dma_addr)) {
dev_dbg(pd->dev, "dma map failed, using PIO\n");
return;
}
diff --git a/drivers/i2c/busses/i2c-versatile.c b/drivers/i2c/busses/i2c-versatile.c
index 240637f01..c73d2d220 100644
--- a/drivers/i2c/busses/i2c-versatile.c
+++ b/drivers/i2c/busses/i2c-versatile.c
@@ -70,28 +70,14 @@ static int i2c_versatile_probe(struct platform_device *dev)
struct resource *r;
int ret;
+ i2c = devm_kzalloc(&dev->dev, sizeof(struct i2c_versatile), GFP_KERNEL);
+ if (!i2c)
+ return -ENOMEM;
+
r = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (!r) {
- ret = -EINVAL;
- goto err_out;
- }
-
- if (!request_mem_region(r->start, resource_size(r), "versatile-i2c")) {
- ret = -EBUSY;
- goto err_out;
- }
-
- i2c = kzalloc(sizeof(struct i2c_versatile), GFP_KERNEL);
- if (!i2c) {
- ret = -ENOMEM;
- goto err_release;
- }
-
- i2c->base = ioremap(r->start, resource_size(r));
- if (!i2c->base) {
- ret = -ENOMEM;
- goto err_free;
- }
+ i2c->base = devm_ioremap_resource(&dev->dev, r);
+ if (IS_ERR(i2c->base))
+ return PTR_ERR(i2c->base);
writel(SCL | SDA, i2c->base + I2C_CONTROLS);
@@ -105,18 +91,12 @@ static int i2c_versatile_probe(struct platform_device *dev)
i2c->adap.nr = dev->id;
ret = i2c_bit_add_numbered_bus(&i2c->adap);
- if (ret >= 0) {
- platform_set_drvdata(dev, i2c);
- return 0;
- }
-
- iounmap(i2c->base);
- err_free:
- kfree(i2c);
- err_release:
- release_mem_region(r->start, resource_size(r));
- err_out:
- return ret;
+ if (ret < 0)
+ return ret;
+
+ platform_set_drvdata(dev, i2c);
+
+ return 0;
}
static int i2c_versatile_remove(struct platform_device *dev)
diff --git a/drivers/i2c/busses/i2c-xlp9xx.c b/drivers/i2c/busses/i2c-xlp9xx.c
index c941418f0..55a7bef1b 100644
--- a/drivers/i2c/busses/i2c-xlp9xx.c
+++ b/drivers/i2c/busses/i2c-xlp9xx.c
@@ -6,6 +6,7 @@
* warranty of any kind, whether express or implied.
*/
+#include <linux/acpi.h>
#include <linux/completion.h>
#include <linux/i2c.h>
#include <linux/init.h>
@@ -341,11 +342,10 @@ static struct i2c_algorithm xlp9xx_i2c_algo = {
static int xlp9xx_i2c_get_frequency(struct platform_device *pdev,
struct xlp9xx_i2c_dev *priv)
{
- struct device_node *np = pdev->dev.of_node;
u32 freq;
int err;
- err = of_property_read_u32(np, "clock-frequency", &freq);
+ err = device_property_read_u32(&pdev->dev, "clock-frequency", &freq);
if (err) {
freq = XLP9XX_I2C_DEFAULT_FREQ;
dev_dbg(&pdev->dev, "using default frequency %u\n", freq);
@@ -429,12 +429,21 @@ static const struct of_device_id xlp9xx_i2c_of_match[] = {
{ /* sentinel */ },
};
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xlp9xx_i2c_acpi_ids[] = {
+ {"BRCM9007", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, xlp9xx_i2c_acpi_ids);
+#endif
+
static struct platform_driver xlp9xx_i2c_driver = {
.probe = xlp9xx_i2c_probe,
.remove = xlp9xx_i2c_remove,
.driver = {
.name = "xlp9xx-i2c",
.of_match_table = xlp9xx_i2c_of_match,
+ .acpi_match_table = ACPI_PTR(xlp9xx_i2c_acpi_ids),
},
};
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index af11b6589..da3a02ef4 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -27,6 +27,8 @@
I2C slave support (c) 2014 by Wolfram Sang <wsa@sang-engineering.com>
*/
+#define pr_fmt(fmt) "i2c-core: " fmt
+
#include <dt-bindings/i2c/i2c.h>
#include <asm/uaccess.h>
#include <linux/acpi.h>
@@ -107,12 +109,11 @@ struct acpi_i2c_lookup {
acpi_handle device_handle;
};
-static int acpi_i2c_find_address(struct acpi_resource *ares, void *data)
+static int acpi_i2c_fill_info(struct acpi_resource *ares, void *data)
{
struct acpi_i2c_lookup *lookup = data;
struct i2c_board_info *info = lookup->info;
struct acpi_resource_i2c_serialbus *sb;
- acpi_handle adapter_handle;
acpi_status status;
if (info->addr || ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
@@ -122,80 +123,102 @@ static int acpi_i2c_find_address(struct acpi_resource *ares, void *data)
if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_I2C)
return 1;
- /*
- * Extract the ResourceSource and make sure that the handle matches
- * with the I2C adapter handle.
- */
status = acpi_get_handle(lookup->device_handle,
sb->resource_source.string_ptr,
- &adapter_handle);
- if (ACPI_SUCCESS(status) && adapter_handle == lookup->adapter_handle) {
- info->addr = sb->slave_address;
- if (sb->access_mode == ACPI_I2C_10BIT_MODE)
- info->flags |= I2C_CLIENT_TEN;
- }
+ &lookup->adapter_handle);
+ if (!ACPI_SUCCESS(status))
+ return 1;
+
+ info->addr = sb->slave_address;
+ if (sb->access_mode == ACPI_I2C_10BIT_MODE)
+ info->flags |= I2C_CLIENT_TEN;
return 1;
}
-static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level,
- void *data, void **return_value)
+static int acpi_i2c_get_info(struct acpi_device *adev,
+ struct i2c_board_info *info,
+ acpi_handle *adapter_handle)
{
- struct i2c_adapter *adapter = data;
struct list_head resource_list;
- struct acpi_i2c_lookup lookup;
struct resource_entry *entry;
- struct i2c_board_info info;
- struct acpi_device *adev;
+ struct acpi_i2c_lookup lookup;
int ret;
- if (acpi_bus_get_device(handle, &adev))
- return AE_OK;
- if (acpi_bus_get_status(adev) || !adev->status.present)
- return AE_OK;
+ if (acpi_bus_get_status(adev) || !adev->status.present ||
+ acpi_device_enumerated(adev))
+ return -EINVAL;
- memset(&info, 0, sizeof(info));
- info.fwnode = acpi_fwnode_handle(adev);
+ memset(info, 0, sizeof(*info));
+ info->fwnode = acpi_fwnode_handle(adev);
memset(&lookup, 0, sizeof(lookup));
- lookup.adapter_handle = ACPI_HANDLE(&adapter->dev);
- lookup.device_handle = handle;
- lookup.info = &info;
+ lookup.device_handle = acpi_device_handle(adev);
+ lookup.info = info;
- /*
- * Look up for I2cSerialBus resource with ResourceSource that
- * matches with this adapter.
- */
+ /* Look up for I2cSerialBus resource */
INIT_LIST_HEAD(&resource_list);
ret = acpi_dev_get_resources(adev, &resource_list,
- acpi_i2c_find_address, &lookup);
+ acpi_i2c_fill_info, &lookup);
acpi_dev_free_resource_list(&resource_list);
- if (ret < 0 || !info.addr)
- return AE_OK;
+ if (ret < 0 || !info->addr)
+ return -EINVAL;
+
+ *adapter_handle = lookup.adapter_handle;
/* Then fill IRQ number if any */
ret = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
if (ret < 0)
- return AE_OK;
+ return -EINVAL;
resource_list_for_each_entry(entry, &resource_list) {
if (resource_type(entry->res) == IORESOURCE_IRQ) {
- info.irq = entry->res->start;
+ info->irq = entry->res->start;
break;
}
}
acpi_dev_free_resource_list(&resource_list);
+ strlcpy(info->type, dev_name(&adev->dev), sizeof(info->type));
+
+ return 0;
+}
+
+static void acpi_i2c_register_device(struct i2c_adapter *adapter,
+ struct acpi_device *adev,
+ struct i2c_board_info *info)
+{
adev->power.flags.ignore_parent = true;
- strlcpy(info.type, dev_name(&adev->dev), sizeof(info.type));
- if (!i2c_new_device(adapter, &info)) {
+ acpi_device_set_enumerated(adev);
+
+ if (!i2c_new_device(adapter, info)) {
adev->power.flags.ignore_parent = false;
dev_err(&adapter->dev,
"failed to add I2C device %s from ACPI\n",
dev_name(&adev->dev));
}
+}
+
+static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level,
+ void *data, void **return_value)
+{
+ struct i2c_adapter *adapter = data;
+ struct acpi_device *adev;
+ acpi_handle adapter_handle;
+ struct i2c_board_info info;
+
+ if (acpi_bus_get_device(handle, &adev))
+ return AE_OK;
+
+ if (acpi_i2c_get_info(adev, &info, &adapter_handle))
+ return AE_OK;
+
+ if (adapter_handle != ACPI_HANDLE(&adapter->dev))
+ return AE_OK;
+
+ acpi_i2c_register_device(adapter, adev, &info);
return AE_OK;
}
@@ -225,8 +248,80 @@ static void acpi_i2c_register_devices(struct i2c_adapter *adap)
dev_warn(&adap->dev, "failed to enumerate I2C slaves\n");
}
+static int acpi_i2c_match_adapter(struct device *dev, void *data)
+{
+ struct i2c_adapter *adapter = i2c_verify_adapter(dev);
+
+ if (!adapter)
+ return 0;
+
+ return ACPI_HANDLE(dev) == (acpi_handle)data;
+}
+
+static int acpi_i2c_match_device(struct device *dev, void *data)
+{
+ return ACPI_COMPANION(dev) == data;
+}
+
+static struct i2c_adapter *acpi_i2c_find_adapter_by_handle(acpi_handle handle)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&i2c_bus_type, NULL, handle,
+ acpi_i2c_match_adapter);
+ return dev ? i2c_verify_adapter(dev) : NULL;
+}
+
+static struct i2c_client *acpi_i2c_find_client_by_adev(struct acpi_device *adev)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&i2c_bus_type, NULL, adev, acpi_i2c_match_device);
+ return dev ? i2c_verify_client(dev) : NULL;
+}
+
+static int acpi_i2c_notify(struct notifier_block *nb, unsigned long value,
+ void *arg)
+{
+ struct acpi_device *adev = arg;
+ struct i2c_board_info info;
+ acpi_handle adapter_handle;
+ struct i2c_adapter *adapter;
+ struct i2c_client *client;
+
+ switch (value) {
+ case ACPI_RECONFIG_DEVICE_ADD:
+ if (acpi_i2c_get_info(adev, &info, &adapter_handle))
+ break;
+
+ adapter = acpi_i2c_find_adapter_by_handle(adapter_handle);
+ if (!adapter)
+ break;
+
+ acpi_i2c_register_device(adapter, adev, &info);
+ break;
+ case ACPI_RECONFIG_DEVICE_REMOVE:
+ if (!acpi_device_enumerated(adev))
+ break;
+
+ client = acpi_i2c_find_client_by_adev(adev);
+ if (!client)
+ break;
+
+ i2c_unregister_device(client);
+ put_device(&client->dev);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block i2c_acpi_notifier = {
+ .notifier_call = acpi_i2c_notify,
+};
#else /* CONFIG_ACPI */
static inline void acpi_i2c_register_devices(struct i2c_adapter *adap) { }
+extern struct notifier_block i2c_acpi_notifier;
#endif /* CONFIG_ACPI */
#ifdef CONFIG_ACPI_I2C_OPREGION
@@ -400,7 +495,8 @@ acpi_i2c_space_handler(u32 function, acpi_physical_address command,
break;
default:
- pr_info("protocol(0x%02x) is not supported.\n", accessor_type);
+ dev_warn(&adapter->dev, "protocol 0x%02x not supported for client 0x%02x\n",
+ accessor_type, client->addr);
ret = AE_BAD_PARAMETER;
goto err;
}
@@ -666,6 +762,47 @@ int i2c_recover_bus(struct i2c_adapter *adap)
}
EXPORT_SYMBOL_GPL(i2c_recover_bus);
+static void i2c_init_recovery(struct i2c_adapter *adap)
+{
+ struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
+ char *err_str;
+
+ if (!bri)
+ return;
+
+ if (!bri->recover_bus) {
+ err_str = "no recover_bus() found";
+ goto err;
+ }
+
+ /* Generic GPIO recovery */
+ if (bri->recover_bus == i2c_generic_gpio_recovery) {
+ if (!gpio_is_valid(bri->scl_gpio)) {
+ err_str = "invalid SCL gpio";
+ goto err;
+ }
+
+ if (gpio_is_valid(bri->sda_gpio))
+ bri->get_sda = get_sda_gpio_value;
+ else
+ bri->get_sda = NULL;
+
+ bri->get_scl = get_scl_gpio_value;
+ bri->set_scl = set_scl_gpio_value;
+ } else if (bri->recover_bus == i2c_generic_scl_recovery) {
+ /* Generic SCL recovery */
+ if (!bri->set_scl || !bri->get_scl) {
+ err_str = "no {get|set}_scl() found";
+ goto err;
+ }
+ }
+
+ return;
+ err:
+ dev_err(&adap->dev, "Not using recovery: %s\n", err_str);
+ adap->bus_recovery_info = NULL;
+}
+
static int i2c_device_probe(struct device *dev)
{
struct i2c_client *client = i2c_verify_client(dev);
@@ -1089,6 +1226,8 @@ void i2c_unregister_device(struct i2c_client *client)
{
if (client->dev.of_node)
of_node_clear_flag(client->dev.of_node, OF_POPULATED);
+ if (ACPI_COMPANION(&client->dev))
+ acpi_device_clear_enumerated(ACPI_COMPANION(&client->dev));
device_unregister(&client->dev);
}
EXPORT_SYMBOL_GPL(i2c_unregister_device);
@@ -1145,6 +1284,47 @@ struct i2c_client *i2c_new_dummy(struct i2c_adapter *adapter, u16 address)
}
EXPORT_SYMBOL_GPL(i2c_new_dummy);
+/**
+ * i2c_new_secondary_device - Helper to get the instantiated secondary address
+ * and create the associated device
+ * @client: Handle to the primary client
+ * @name: Handle to specify which secondary address to get
+ * @default_addr: Used as a fallback if no secondary address was specified
+ * Context: can sleep
+ *
+ * I2C clients can be composed of multiple I2C slaves bound together in a single
+ * component. The I2C client driver then binds to the master I2C slave and needs
+ * to create I2C dummy clients to communicate with all the other slaves.
+ *
+ * This function creates and returns an I2C dummy client whose I2C address is
+ * retrieved from the platform firmware based on the given slave name. If no
+ * address is specified by the firmware default_addr is used.
+ *
+ * On DT-based platforms the address is retrieved from the "reg" property entry
+ * cell whose "reg-names" value matches the slave name.
+ *
+ * This returns the new i2c client, which should be saved for later use with
+ * i2c_unregister_device(); or NULL to indicate an error.
+ */
+struct i2c_client *i2c_new_secondary_device(struct i2c_client *client,
+ const char *name,
+ u16 default_addr)
+{
+ struct device_node *np = client->dev.of_node;
+ u32 addr = default_addr;
+ int i;
+
+ if (np) {
+ i = of_property_match_string(np, "reg-names", name);
+ if (i >= 0)
+ of_property_read_u32_index(np, "reg", i, &addr);
+ }
+
+ dev_dbg(&client->adapter->dev, "Address for %s : 0x%x\n", name, addr);
+ return i2c_new_dummy(client->adapter, addr);
+}
+EXPORT_SYMBOL_GPL(i2c_new_secondary_device);
+
/* ------------------------------------------------------------------------- */
/* I2C bus adapters -- one roots each I2C or SMBUS segment */
@@ -1513,7 +1693,7 @@ static int __process_new_adapter(struct device_driver *d, void *data)
static int i2c_register_adapter(struct i2c_adapter *adap)
{
- int res = 0;
+ int res = -EINVAL;
/* Can't register until after driver model init */
if (WARN_ON(!is_registered)) {
@@ -1522,15 +1702,12 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
}
/* Sanity checks */
- if (unlikely(adap->name[0] == '\0')) {
- pr_err("i2c-core: Attempt to register an adapter with "
- "no name!\n");
- return -EINVAL;
- }
- if (unlikely(!adap->algo)) {
- pr_err("i2c-core: Attempt to register adapter '%s' with "
- "no algo!\n", adap->name);
- return -EINVAL;
+ if (WARN(!adap->name[0], "i2c adapter has no name"))
+ goto out_list;
+
+ if (!adap->algo) {
+ pr_err("adapter '%s': no algo supplied!\n", adap->name);
+ goto out_list;
}
if (!adap->lock_bus) {
@@ -1552,8 +1729,10 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
adap->dev.bus = &i2c_bus_type;
adap->dev.type = &i2c_adapter_type;
res = device_register(&adap->dev);
- if (res)
+ if (res) {
+ pr_err("adapter '%s': can't register device (%d)\n", adap->name, res);
goto out_list;
+ }
dev_dbg(&adap->dev, "adapter [%s] registered\n", adap->name);
@@ -1569,41 +1748,8 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
"Failed to create compatibility class link\n");
#endif
- /* bus recovery specific initialization */
- if (adap->bus_recovery_info) {
- struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
-
- if (!bri->recover_bus) {
- dev_err(&adap->dev, "No recover_bus() found, not using recovery\n");
- adap->bus_recovery_info = NULL;
- goto exit_recovery;
- }
-
- /* Generic GPIO recovery */
- if (bri->recover_bus == i2c_generic_gpio_recovery) {
- if (!gpio_is_valid(bri->scl_gpio)) {
- dev_err(&adap->dev, "Invalid SCL gpio, not using recovery\n");
- adap->bus_recovery_info = NULL;
- goto exit_recovery;
- }
+ i2c_init_recovery(adap);
- if (gpio_is_valid(bri->sda_gpio))
- bri->get_sda = get_sda_gpio_value;
- else
- bri->get_sda = NULL;
-
- bri->get_scl = get_scl_gpio_value;
- bri->set_scl = set_scl_gpio_value;
- } else if (bri->recover_bus == i2c_generic_scl_recovery) {
- /* Generic SCL recovery */
- if (!bri->set_scl || !bri->get_scl) {
- dev_err(&adap->dev, "No {get|set}_scl() found, not using recovery\n");
- adap->bus_recovery_info = NULL;
- }
- }
- }
-
-exit_recovery:
/* create pre-declared device nodes */
of_i2c_register_devices(adap);
acpi_i2c_register_devices(adap);
@@ -1635,13 +1781,12 @@ out_list:
*/
static int __i2c_add_numbered_adapter(struct i2c_adapter *adap)
{
- int id;
+ int id;
mutex_lock(&core_lock);
- id = idr_alloc(&i2c_adapter_idr, adap, adap->nr, adap->nr + 1,
- GFP_KERNEL);
+ id = idr_alloc(&i2c_adapter_idr, adap, adap->nr, adap->nr + 1, GFP_KERNEL);
mutex_unlock(&core_lock);
- if (id < 0)
+ if (WARN(id < 0, "couldn't get idr"))
return id == -ENOSPC ? -EBUSY : id;
return i2c_register_adapter(adap);
@@ -1678,7 +1823,7 @@ int i2c_add_adapter(struct i2c_adapter *adapter)
id = idr_alloc(&i2c_adapter_idr, adapter,
__i2c_first_dynamic_bus_num, 0, GFP_KERNEL);
mutex_unlock(&core_lock);
- if (id < 0)
+ if (WARN(id < 0, "couldn't get idr"))
return id;
adapter->nr = id;
@@ -1776,8 +1921,7 @@ void i2c_del_adapter(struct i2c_adapter *adap)
found = idr_find(&i2c_adapter_idr, adap->nr);
mutex_unlock(&core_lock);
if (found != adap) {
- pr_debug("i2c-core: attempting to delete unregistered "
- "adapter [%s]\n", adap->name);
+ pr_debug("attempting to delete unregistered adapter [%s]\n", adap->name);
return;
}
@@ -1937,7 +2081,7 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
if (res)
return res;
- pr_debug("i2c-core: driver [%s] registered\n", driver->driver.name);
+ pr_debug("driver [%s] registered\n", driver->driver.name);
INIT_LIST_HEAD(&driver->clients);
/* Walk the adapters that are already present */
@@ -1964,7 +2108,7 @@ void i2c_del_driver(struct i2c_driver *driver)
i2c_for_each_dev(driver, __process_removed_driver);
driver_unregister(&driver->driver);
- pr_debug("i2c-core: driver [%s] unregistered\n", driver->driver.name);
+ pr_debug("driver [%s] unregistered\n", driver->driver.name);
}
EXPORT_SYMBOL(i2c_del_driver);
@@ -2055,8 +2199,8 @@ static int of_i2c_notify(struct notifier_block *nb, unsigned long action,
put_device(&adap->dev);
if (IS_ERR(client)) {
- pr_err("%s: failed to create for '%s'\n",
- __func__, rd->dn->full_name);
+ dev_err(&adap->dev, "failed to create client for '%s'\n",
+ rd->dn->full_name);
return notifier_from_errno(PTR_ERR(client));
}
break;
@@ -2117,6 +2261,8 @@ static int __init i2c_init(void)
if (IS_ENABLED(CONFIG_OF_DYNAMIC))
WARN_ON(of_reconfig_notifier_register(&i2c_of_notifier));
+ if (IS_ENABLED(CONFIG_ACPI))
+ WARN_ON(acpi_reconfig_notifier_register(&i2c_acpi_notifier));
return 0;
@@ -2132,6 +2278,8 @@ bus_err:
static void __exit i2c_exit(void)
{
+ if (IS_ENABLED(CONFIG_ACPI))
+ WARN_ON(acpi_reconfig_notifier_unregister(&i2c_acpi_notifier));
if (IS_ENABLED(CONFIG_OF_DYNAMIC))
WARN_ON(of_reconfig_notifier_unregister(&i2c_of_notifier));
i2c_del_driver(&dummy_driver);
@@ -2673,7 +2821,7 @@ static int i2c_smbus_check_pec(u8 cpec, struct i2c_msg *msg)
cpec = i2c_smbus_msg_pec(cpec, msg);
if (rpec != cpec) {
- pr_debug("i2c-core: Bad PEC 0x%02x vs. 0x%02x\n",
+ pr_debug("Bad PEC 0x%02x vs. 0x%02x\n",
rpec, cpec);
return -EBADMSG;
}
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 6ecfd7627..66f323fd3 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -485,13 +485,8 @@ static int i2cdev_open(struct inode *inode, struct file *file)
unsigned int minor = iminor(inode);
struct i2c_client *client;
struct i2c_adapter *adap;
- struct i2c_dev *i2c_dev;
-
- i2c_dev = i2c_dev_get_by_minor(minor);
- if (!i2c_dev)
- return -ENODEV;
- adap = i2c_get_adapter(i2c_dev->adap->nr);
+ adap = i2c_get_adapter(minor);
if (!adap)
return -ENODEV;
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
index abb55d3e7..b0d2679c6 100644
--- a/drivers/i2c/i2c-smbus.c
+++ b/drivers/i2c/i2c-smbus.c
@@ -33,7 +33,8 @@ struct i2c_smbus_alert {
struct alert_data {
unsigned short addr;
- u8 flag:1;
+ enum i2c_alert_protocol type;
+ unsigned int data;
};
/* If this is the alerting device, notify its driver */
@@ -56,7 +57,7 @@ static int smbus_do_alert(struct device *dev, void *addrp)
if (client->dev.driver) {
driver = to_i2c_driver(client->dev.driver);
if (driver->alert)
- driver->alert(client, data->flag);
+ driver->alert(client, data->type, data->data);
else
dev_warn(&client->dev, "no driver alert()!\n");
} else
@@ -96,8 +97,9 @@ static void smbus_alert(struct work_struct *work)
if (status < 0)
break;
- data.flag = status & 1;
+ data.data = status & 1;
data.addr = status >> 1;
+ data.type = I2C_PROTOCOL_SMBUS_ALERT;
if (data.addr == prev_addr) {
dev_warn(&ara->dev, "Duplicate SMBALERT# from dev "
@@ -105,7 +107,7 @@ static void smbus_alert(struct work_struct *work)
break;
}
dev_dbg(&ara->dev, "SMBALERT# from dev 0x%02x, flag %d\n",
- data.addr, data.flag);
+ data.addr, data.data);
/* Notify driver for the device which issued the alert */
device_for_each_child(&ara->adapter->dev, &data,
@@ -239,6 +241,108 @@ int i2c_handle_smbus_alert(struct i2c_client *ara)
}
EXPORT_SYMBOL_GPL(i2c_handle_smbus_alert);
+static void smbus_host_notify_work(struct work_struct *work)
+{
+ struct alert_data alert;
+ struct i2c_adapter *adapter;
+ unsigned long flags;
+ u16 payload;
+ u8 addr;
+ struct smbus_host_notify *data;
+
+ data = container_of(work, struct smbus_host_notify, work);
+
+ spin_lock_irqsave(&data->lock, flags);
+ payload = data->payload;
+ addr = data->addr;
+ adapter = data->adapter;
+
+ /* clear the pending bit and release the spinlock */
+ data->pending = false;
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ if (!adapter || !addr)
+ return;
+
+ alert.type = I2C_PROTOCOL_SMBUS_HOST_NOTIFY;
+ alert.addr = addr;
+ alert.data = payload;
+
+ device_for_each_child(&adapter->dev, &alert, smbus_do_alert);
+}
+
+/**
+ * i2c_setup_smbus_host_notify - Allocate a new smbus_host_notify for the given
+ * I2C adapter.
+ * @adapter: the adapter we want to associate a Host Notify function
+ *
+ * Returns a struct smbus_host_notify pointer on success, and NULL on failure.
+ * The resulting smbus_host_notify must not be freed afterwards, it is a
+ * managed resource already.
+ */
+struct smbus_host_notify *i2c_setup_smbus_host_notify(struct i2c_adapter *adap)
+{
+ struct smbus_host_notify *host_notify;
+
+ host_notify = devm_kzalloc(&adap->dev, sizeof(struct smbus_host_notify),
+ GFP_KERNEL);
+ if (!host_notify)
+ return NULL;
+
+ host_notify->adapter = adap;
+
+ spin_lock_init(&host_notify->lock);
+ INIT_WORK(&host_notify->work, smbus_host_notify_work);
+
+ return host_notify;
+}
+EXPORT_SYMBOL_GPL(i2c_setup_smbus_host_notify);
+
+/**
+ * i2c_handle_smbus_host_notify - Forward a Host Notify event to the correct
+ * I2C client.
+ * @host_notify: the struct host_notify attached to the relevant adapter
+ * @addr: the I2C address of the notifying device
+ * @data: the payload of the notification
+ * Context: can't sleep
+ *
+ * Helper function to be called from an I2C bus driver's interrupt
+ * handler. It will schedule the Host Notify work, in turn calling the
+ * corresponding I2C device driver's alert function.
+ *
+ * host_notify should be a valid pointer previously returned by
+ * i2c_setup_smbus_host_notify().
+ */
+int i2c_handle_smbus_host_notify(struct smbus_host_notify *host_notify,
+ unsigned short addr, unsigned int data)
+{
+ unsigned long flags;
+ struct i2c_adapter *adapter;
+
+ if (!host_notify || !host_notify->adapter)
+ return -EINVAL;
+
+ adapter = host_notify->adapter;
+
+ spin_lock_irqsave(&host_notify->lock, flags);
+
+ if (host_notify->pending) {
+ spin_unlock_irqrestore(&host_notify->lock, flags);
+ dev_warn(&adapter->dev, "Host Notify already scheduled.\n");
+ return -EBUSY;
+ }
+
+ host_notify->payload = data;
+ host_notify->addr = addr;
+
+ /* Mark that there is a pending notification and release the lock */
+ host_notify->pending = true;
+ spin_unlock_irqrestore(&host_notify->lock, flags);
+
+ return schedule_work(&host_notify->work);
+}
+EXPORT_SYMBOL_GPL(i2c_handle_smbus_host_notify);
+
module_i2c_driver(smbalert_driver);
MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
index 215ac87f6..b3893f628 100644
--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
@@ -37,8 +37,6 @@ struct i2c_demux_pinctrl_priv {
struct i2c_demux_pinctrl_chan chan[];
};
-static struct property status_okay = { .name = "status", .length = 3, .value = "ok" };
-
static int i2c_demux_master_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
{
struct i2c_demux_pinctrl_priv *priv = adap->algo_data;
@@ -107,6 +105,7 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne
of_changeset_revert(&priv->chan[new_chan].chgset);
err:
dev_err(priv->dev, "failed to setup demux-adapter %d (%d)\n", new_chan, ret);
+ priv->cur_chan = -EINVAL;
return ret;
}
@@ -192,6 +191,7 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct i2c_demux_pinctrl_priv *priv;
+ struct property *props;
int num_chan, i, j, err;
num_chan = of_count_phandle_with_args(np, "i2c-parent", NULL);
@@ -202,7 +202,10 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
priv = devm_kzalloc(&pdev->dev, sizeof(*priv)
+ num_chan * sizeof(struct i2c_demux_pinctrl_chan), GFP_KERNEL);
- if (!priv)
+
+ props = devm_kcalloc(&pdev->dev, num_chan, sizeof(*props), GFP_KERNEL);
+
+ if (!priv || !props)
return -ENOMEM;
err = of_property_read_string(np, "i2c-bus-name", &priv->bus_name);
@@ -220,8 +223,12 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
}
priv->chan[i].parent_np = adap_np;
+ props[i].name = devm_kstrdup(&pdev->dev, "status", GFP_KERNEL);
+ props[i].value = devm_kstrdup(&pdev->dev, "ok", GFP_KERNEL);
+ props[i].length = 3;
+
of_changeset_init(&priv->chan[i].chgset);
- of_changeset_update_property(&priv->chan[i].chgset, adap_np, &status_okay);
+ of_changeset_update_property(&priv->chan[i].chgset, adap_np, &props[i]);
}
priv->num_chan = num_chan;
diff --git a/drivers/ide/cmd640.c b/drivers/ide/cmd640.c
index 70f0a2754..004243bd8 100644
--- a/drivers/ide/cmd640.c
+++ b/drivers/ide/cmd640.c
@@ -695,7 +695,7 @@ static const struct ide_port_info cmd640_port_info __initconst = {
.pio_mask = ATA_PIO5,
};
-static int cmd640x_init_one(unsigned long base, unsigned long ctl)
+static int __init cmd640x_init_one(unsigned long base, unsigned long ctl)
{
if (!request_region(base, 8, DRV_NAME)) {
printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
index f94baadbf..0ceae5cbd 100644
--- a/drivers/ide/hpt366.c
+++ b/drivers/ide/hpt366.c
@@ -1012,7 +1012,7 @@ static int init_chipset_hpt366(struct pci_dev *dev)
pci_read_config_dword(dev, 0x40, &itr1);
/* Detect PCI clock by looking at cmd_high_time. */
- switch((itr1 >> 8) & 0x07) {
+ switch ((itr1 >> 8) & 0x0f) {
case 0x09:
pci_clk = 40;
break;
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index ef907fd5b..bf9a2ad29 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1770,7 +1770,6 @@ static int ide_cd_probe(ide_drive_t *drive)
drive->driver_data = info;
g->minors = 1;
- g->driverfs_dev = &drive->gendev;
g->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE;
if (ide_cdrom_setup(drive)) {
put_device(&info->dev);
@@ -1780,7 +1779,7 @@ static int ide_cd_probe(ide_drive_t *drive)
ide_cd_read_toc(drive, &sense);
g->fops = &idecd_ops;
g->flags |= GENHD_FL_REMOVABLE | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
- add_disk(g);
+ device_add_disk(&drive->gendev, g);
return 0;
out_free_disk:
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c
index 474173eb3..5887a7a09 100644
--- a/drivers/ide/ide-cd_ioctl.c
+++ b/drivers/ide/ide-cd_ioctl.c
@@ -459,9 +459,6 @@ int ide_cdrom_packet(struct cdrom_device_info *cdi,
layer. the packet must be complete, as we do not
touch it at all. */
- if (cgc->data_direction == CGC_DATA_WRITE)
- flags |= REQ_WRITE;
-
if (cgc->sense)
memset(cgc->sense, 0, sizeof(struct request_sense));
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 05dbcce70..83679da0c 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -186,7 +186,7 @@ static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
BUG_ON(drive->dev_flags & IDE_DFLAG_BLOCKED);
BUG_ON(rq->cmd_type != REQ_TYPE_FS);
- ledtrig_ide_activity();
+ ledtrig_disk_activity();
pr_debug("%s: %sing: block=%llu, sectors=%u\n",
drive->name, rq_data_dir(rq) == READ ? "read" : "writ",
@@ -431,7 +431,7 @@ static int idedisk_prep_fn(struct request_queue *q, struct request *rq)
ide_drive_t *drive = q->queuedata;
struct ide_cmd *cmd;
- if (!(rq->cmd_flags & REQ_FLUSH))
+ if (req_op(rq) != REQ_OP_FLUSH)
return BLKPREP_OK;
if (rq->special) {
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 2fb5350c5..f079d8d1d 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -206,7 +206,7 @@ static void idefloppy_create_rw_cmd(ide_drive_t *drive,
memcpy(rq->cmd, pc->c, 12);
pc->rq = rq;
- if (rq->cmd_flags & REQ_WRITE)
+ if (cmd == WRITE)
pc->flags |= PC_FLAG_WRITING;
pc->flags |= PC_FLAG_DMA_OK;
diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c
index 838996a00..e823394ed 100644
--- a/drivers/ide/ide-gd.c
+++ b/drivers/ide/ide-gd.c
@@ -412,12 +412,11 @@ static int ide_gd_probe(ide_drive_t *drive)
set_capacity(g, ide_gd_capacity(drive));
g->minors = IDE_DISK_MINORS;
- g->driverfs_dev = &drive->gendev;
g->flags |= GENHD_FL_EXT_DEVT;
if (drive->dev_flags & IDE_DFLAG_REMOVABLE)
g->flags = GENHD_FL_REMOVABLE;
g->fops = &ide_gd_ops;
- add_disk(g);
+ device_add_disk(&drive->gendev, g);
return 0;
out_free_disk:
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 12fa04997..9ecf4e35a 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -2052,12 +2052,12 @@ static int __init idetape_init(void)
error = driver_register(&idetape_driver.gen_driver);
if (error)
- goto out_free_driver;
+ goto out_free_chrdev;
return 0;
-out_free_driver:
- driver_unregister(&idetape_driver.gen_driver);
+out_free_chrdev:
+ unregister_chrdev(IDETAPE_MAJOR, "ht");
out_free_class:
class_destroy(idetape_sysfs_class);
out:
diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c
index 7f0434f7e..0c5d3a994 100644
--- a/drivers/ide/pmac.c
+++ b/drivers/ide/pmac.c
@@ -707,6 +707,7 @@ set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2,
*timings = ((*timings) & ~TR_133_PIOREG_MDMA_MASK) | tr;
*timings2 = (*timings2) & ~TR_133_UDMAREG_UDMA_EN;
}
+ break;
case controller_un_ata6:
case controller_k2_ata6: {
/* 100Mhz cell */
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index c96649292..67ec58f9e 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -46,8 +46,6 @@
* to avoid complications with the lapic timer workaround.
* Have not seen issues with suspend, but may need same workaround here.
*
- * There is currently no kernel-based automatic probing/loading mechanism
- * if the driver is built as a module.
*/
/* un-comment DEBUG to enable pr_debug() statements */
@@ -60,8 +58,9 @@
#include <linux/sched.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
-#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
#include <asm/mwait.h>
#include <asm/msr.h>
@@ -827,6 +826,35 @@ static struct cpuidle_state bxt_cstates[] = {
.enter = NULL }
};
+static struct cpuidle_state dnv_cstates[] = {
+ {
+ .name = "C1-DNV",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00),
+ .exit_latency = 2,
+ .target_residency = 2,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C1E-DNV",
+ .desc = "MWAIT 0x01",
+ .flags = MWAIT2flg(0x01),
+ .exit_latency = 10,
+ .target_residency = 20,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C6-DNV",
+ .desc = "MWAIT 0x20",
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 50,
+ .target_residency = 500,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .enter = NULL }
+};
+
/**
* intel_idle
* @dev: cpuidle_device
@@ -1016,45 +1044,50 @@ static const struct idle_cpu idle_cpu_bxt = {
.disable_promotion_to_c1e = true,
};
+static const struct idle_cpu idle_cpu_dnv = {
+ .state_table = dnv_cstates,
+ .disable_promotion_to_c1e = true,
+};
+
#define ICPU(model, cpu) \
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
static const struct x86_cpu_id intel_idle_ids[] __initconst = {
- ICPU(0x1a, idle_cpu_nehalem),
- ICPU(0x1e, idle_cpu_nehalem),
- ICPU(0x1f, idle_cpu_nehalem),
- ICPU(0x25, idle_cpu_nehalem),
- ICPU(0x2c, idle_cpu_nehalem),
- ICPU(0x2e, idle_cpu_nehalem),
- ICPU(0x1c, idle_cpu_atom),
- ICPU(0x26, idle_cpu_lincroft),
- ICPU(0x2f, idle_cpu_nehalem),
- ICPU(0x2a, idle_cpu_snb),
- ICPU(0x2d, idle_cpu_snb),
- ICPU(0x36, idle_cpu_atom),
- ICPU(0x37, idle_cpu_byt),
- ICPU(0x4c, idle_cpu_cht),
- ICPU(0x3a, idle_cpu_ivb),
- ICPU(0x3e, idle_cpu_ivt),
- ICPU(0x3c, idle_cpu_hsw),
- ICPU(0x3f, idle_cpu_hsw),
- ICPU(0x45, idle_cpu_hsw),
- ICPU(0x46, idle_cpu_hsw),
- ICPU(0x4d, idle_cpu_avn),
- ICPU(0x3d, idle_cpu_bdw),
- ICPU(0x47, idle_cpu_bdw),
- ICPU(0x4f, idle_cpu_bdw),
- ICPU(0x56, idle_cpu_bdw),
- ICPU(0x4e, idle_cpu_skl),
- ICPU(0x5e, idle_cpu_skl),
- ICPU(0x8e, idle_cpu_skl),
- ICPU(0x9e, idle_cpu_skl),
- ICPU(0x55, idle_cpu_skx),
- ICPU(0x57, idle_cpu_knl),
- ICPU(0x5c, idle_cpu_bxt),
+ ICPU(INTEL_FAM6_NEHALEM_EP, idle_cpu_nehalem),
+ ICPU(INTEL_FAM6_NEHALEM, idle_cpu_nehalem),
+ ICPU(INTEL_FAM6_NEHALEM_G, idle_cpu_nehalem),
+ ICPU(INTEL_FAM6_WESTMERE, idle_cpu_nehalem),
+ ICPU(INTEL_FAM6_WESTMERE_EP, idle_cpu_nehalem),
+ ICPU(INTEL_FAM6_NEHALEM_EX, idle_cpu_nehalem),
+ ICPU(INTEL_FAM6_ATOM_PINEVIEW, idle_cpu_atom),
+ ICPU(INTEL_FAM6_ATOM_LINCROFT, idle_cpu_lincroft),
+ ICPU(INTEL_FAM6_WESTMERE_EX, idle_cpu_nehalem),
+ ICPU(INTEL_FAM6_SANDYBRIDGE, idle_cpu_snb),
+ ICPU(INTEL_FAM6_SANDYBRIDGE_X, idle_cpu_snb),
+ ICPU(INTEL_FAM6_ATOM_CEDARVIEW, idle_cpu_atom),
+ ICPU(INTEL_FAM6_ATOM_SILVERMONT1, idle_cpu_byt),
+ ICPU(INTEL_FAM6_ATOM_AIRMONT, idle_cpu_cht),
+ ICPU(INTEL_FAM6_IVYBRIDGE, idle_cpu_ivb),
+ ICPU(INTEL_FAM6_IVYBRIDGE_X, idle_cpu_ivt),
+ ICPU(INTEL_FAM6_HASWELL_CORE, idle_cpu_hsw),
+ ICPU(INTEL_FAM6_HASWELL_X, idle_cpu_hsw),
+ ICPU(INTEL_FAM6_HASWELL_ULT, idle_cpu_hsw),
+ ICPU(INTEL_FAM6_HASWELL_GT3E, idle_cpu_hsw),
+ ICPU(INTEL_FAM6_ATOM_SILVERMONT2, idle_cpu_avn),
+ ICPU(INTEL_FAM6_BROADWELL_CORE, idle_cpu_bdw),
+ ICPU(INTEL_FAM6_BROADWELL_GT3E, idle_cpu_bdw),
+ ICPU(INTEL_FAM6_BROADWELL_X, idle_cpu_bdw),
+ ICPU(INTEL_FAM6_BROADWELL_XEON_D, idle_cpu_bdw),
+ ICPU(INTEL_FAM6_SKYLAKE_MOBILE, idle_cpu_skl),
+ ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, idle_cpu_skl),
+ ICPU(INTEL_FAM6_KABYLAKE_MOBILE, idle_cpu_skl),
+ ICPU(INTEL_FAM6_KABYLAKE_DESKTOP, idle_cpu_skl),
+ ICPU(INTEL_FAM6_SKYLAKE_X, idle_cpu_skx),
+ ICPU(INTEL_FAM6_XEON_PHI_KNL, idle_cpu_knl),
+ ICPU(INTEL_FAM6_ATOM_GOLDMONT, idle_cpu_bxt),
+ ICPU(INTEL_FAM6_ATOM_DENVERTON, idle_cpu_dnv),
{}
};
-MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
/*
* intel_idle_probe()
@@ -1154,7 +1187,10 @@ static unsigned long long irtl_2_usec(unsigned long long irtl)
{
unsigned long long ns;
- ns = irtl_ns_units[(irtl >> 10) & 0x3];
+ if (!irtl)
+ return 0;
+
+ ns = irtl_ns_units[(irtl >> 10) & 0x7];
return div64_u64((irtl & 0x3FF) * ns, 1000);
}
@@ -1167,43 +1203,39 @@ static unsigned long long irtl_2_usec(unsigned long long irtl)
static void bxt_idle_state_table_update(void)
{
unsigned long long msr;
+ unsigned int usec;
rdmsrl(MSR_PKGC6_IRTL, msr);
- if (msr) {
- unsigned int usec = irtl_2_usec(msr);
-
+ usec = irtl_2_usec(msr);
+ if (usec) {
bxt_cstates[2].exit_latency = usec;
bxt_cstates[2].target_residency = usec;
}
rdmsrl(MSR_PKGC7_IRTL, msr);
- if (msr) {
- unsigned int usec = irtl_2_usec(msr);
-
+ usec = irtl_2_usec(msr);
+ if (usec) {
bxt_cstates[3].exit_latency = usec;
bxt_cstates[3].target_residency = usec;
}
rdmsrl(MSR_PKGC8_IRTL, msr);
- if (msr) {
- unsigned int usec = irtl_2_usec(msr);
-
+ usec = irtl_2_usec(msr);
+ if (usec) {
bxt_cstates[4].exit_latency = usec;
bxt_cstates[4].target_residency = usec;
}
rdmsrl(MSR_PKGC9_IRTL, msr);
- if (msr) {
- unsigned int usec = irtl_2_usec(msr);
-
+ usec = irtl_2_usec(msr);
+ if (usec) {
bxt_cstates[5].exit_latency = usec;
bxt_cstates[5].target_residency = usec;
}
rdmsrl(MSR_PKGC10_IRTL, msr);
- if (msr) {
- unsigned int usec = irtl_2_usec(msr);
-
+ usec = irtl_2_usec(msr);
+ if (usec) {
bxt_cstates[6].exit_latency = usec;
bxt_cstates[6].target_residency = usec;
}
@@ -1261,13 +1293,13 @@ static void intel_idle_state_table_update(void)
{
switch (boot_cpu_data.x86_model) {
- case 0x3e: /* IVT */
+ case INTEL_FAM6_IVYBRIDGE_X:
ivt_idle_state_table_update();
break;
- case 0x5c: /* BXT */
+ case INTEL_FAM6_ATOM_GOLDMONT:
bxt_idle_state_table_update();
break;
- case 0x5e: /* SKL-H */
+ case INTEL_FAM6_SKYLAKE_DESKTOP:
sklh_idle_state_table_update();
break;
}
@@ -1415,34 +1447,12 @@ static int __init intel_idle_init(void)
return 0;
}
+device_initcall(intel_idle_init);
-static void __exit intel_idle_exit(void)
-{
- struct cpuidle_device *dev;
- int i;
-
- cpu_notifier_register_begin();
-
- if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE)
- on_each_cpu(__setup_broadcast_timer, (void *)false, 1);
- __unregister_cpu_notifier(&cpu_hotplug_notifier);
-
- for_each_possible_cpu(i) {
- dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
- cpuidle_unregister_device(dev);
- }
-
- cpu_notifier_register_done();
-
- cpuidle_unregister_driver(&intel_idle_driver);
- free_percpu(intel_idle_cpuidle_devices);
-}
-
-module_init(intel_idle_init);
-module_exit(intel_idle_exit);
-
+/*
+ * We are not really modular, but we used to support that. Meaning we also
+ * support "intel_idle.max_cstate=..." at boot and also a read-only export of
+ * it at /sys/module/intel_idle/parameters/max_cstate -- so using module_param
+ * is the easiest way (currently) to continue doing that.
+ */
module_param(max_cstate, int, 0444);
-
-MODULE_AUTHOR("Len Brown <len.brown@intel.com>");
-MODULE_DESCRIPTION("Cpuidle driver for Intel Hardware v" INTEL_IDLE_VERSION);
-MODULE_LICENSE("GPL");
diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig
index 505e921f0..6743b1819 100644
--- a/drivers/iio/Kconfig
+++ b/drivers/iio/Kconfig
@@ -46,6 +46,14 @@ config IIO_CONSUMERS_PER_TRIGGER
This value controls the maximum number of consumers that a
given trigger may handle. Default is 2.
+config IIO_SW_DEVICE
+ tristate "Enable software IIO device support"
+ select IIO_CONFIGFS
+ help
+ Provides IIO core support for software devices. A software
+ device can be created via configfs or directly by a driver
+ using the API provided.
+
config IIO_SW_TRIGGER
tristate "Enable software triggers support"
select IIO_CONFIGFS
diff --git a/drivers/iio/Makefile b/drivers/iio/Makefile
index 20f649073..87e4c4369 100644
--- a/drivers/iio/Makefile
+++ b/drivers/iio/Makefile
@@ -8,6 +8,7 @@ industrialio-$(CONFIG_IIO_BUFFER) += industrialio-buffer.o
industrialio-$(CONFIG_IIO_TRIGGER) += industrialio-trigger.o
obj-$(CONFIG_IIO_CONFIGFS) += industrialio-configfs.o
+obj-$(CONFIG_IIO_SW_DEVICE) += industrialio-sw-device.o
obj-$(CONFIG_IIO_SW_TRIGGER) += industrialio-sw-trigger.o
obj-$(CONFIG_IIO_TRIGGERED_EVENT) += industrialio-triggered-event.o
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index e4a758cd7..78f148ea9 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -17,6 +17,18 @@ config BMA180
To compile this driver as a module, choose M here: the
module will be called bma180.
+config BMA220
+ tristate "Bosch BMA220 3-Axis Accelerometer Driver"
+ depends on SPI
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to add support for the Bosch BMA220 triaxial
+ acceleration sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bma220_spi.
+
config BMC150_ACCEL
tristate "Bosch BMC150 Accelerometer Driver"
select IIO_BUFFER
@@ -136,13 +148,23 @@ config MMA7455_SPI
To compile this driver as a module, choose M here: the module
will be called mma7455_spi.
+config MMA7660
+ tristate "Freescale MMA7660FC 3-Axis Accelerometer Driver"
+ depends on I2C
+ help
+ Say yes here to get support for the Freescale MMA7660FC 3-Axis
+ accelerometer.
+
+ Choosing M will build the driver as a module. If so, the module
+ will be called mma7660.
+
config MMA8452
- tristate "Freescale MMA8452Q and similar Accelerometers Driver"
+ tristate "Freescale / NXP MMA8452Q and similar Accelerometers Driver"
depends on I2C
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
- Say yes here to build support for the following Freescale 3-axis
+ Say yes here to build support for the following Freescale / NXP 3-axis
accelerometers: MMA8451Q, MMA8452Q, MMA8453Q, MMA8652FC, MMA8653FC,
FXLS8471Q.
@@ -214,7 +236,8 @@ config STK8312
config STK8BA50
tristate "Sensortek STK8BA50 3-Axis Accelerometer Driver"
depends on I2C
- depends on IIO_TRIGGER
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
Say yes here to get support for the Sensortek STK8BA50 3-axis
accelerometer.
diff --git a/drivers/iio/accel/Makefile b/drivers/iio/accel/Makefile
index 71b6794de..6cedbecca 100644
--- a/drivers/iio/accel/Makefile
+++ b/drivers/iio/accel/Makefile
@@ -4,6 +4,7 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_BMA180) += bma180.o
+obj-$(CONFIG_BMA220) += bma220_spi.o
obj-$(CONFIG_BMC150_ACCEL) += bmc150-accel-core.o
obj-$(CONFIG_BMC150_ACCEL_I2C) += bmc150-accel-i2c.o
obj-$(CONFIG_BMC150_ACCEL_SPI) += bmc150-accel-spi.o
@@ -15,6 +16,8 @@ obj-$(CONFIG_MMA7455) += mma7455_core.o
obj-$(CONFIG_MMA7455_I2C) += mma7455_i2c.o
obj-$(CONFIG_MMA7455_SPI) += mma7455_spi.o
+obj-$(CONFIG_MMA7660) += mma7660.o
+
obj-$(CONFIG_MMA8452) += mma8452.o
obj-$(CONFIG_MMA9551_CORE) += mma9551_core.o
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index f04b88406..e3f88ba5f 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -654,7 +654,7 @@ static irqreturn_t bma180_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct bma180_data *data = iio_priv(indio_dev);
- int64_t time_ns = iio_get_time_ns();
+ s64 time_ns = iio_get_time_ns(indio_dev);
int bit, ret, i = 0;
mutex_lock(&data->mutex);
diff --git a/drivers/iio/accel/bma220_spi.c b/drivers/iio/accel/bma220_spi.c
new file mode 100644
index 000000000..5099f295d
--- /dev/null
+++ b/drivers/iio/accel/bma220_spi.c
@@ -0,0 +1,338 @@
+/**
+ * BMA220 Digital triaxial acceleration sensor driver
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ */
+
+#include <linux/acpi.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/spi/spi.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#define BMA220_REG_ID 0x00
+#define BMA220_REG_ACCEL_X 0x02
+#define BMA220_REG_ACCEL_Y 0x03
+#define BMA220_REG_ACCEL_Z 0x04
+#define BMA220_REG_RANGE 0x11
+#define BMA220_REG_SUSPEND 0x18
+
+#define BMA220_CHIP_ID 0xDD
+#define BMA220_READ_MASK 0x80
+#define BMA220_RANGE_MASK 0x03
+#define BMA220_DATA_SHIFT 2
+#define BMA220_SUSPEND_SLEEP 0xFF
+#define BMA220_SUSPEND_WAKE 0x00
+
+#define BMA220_DEVICE_NAME "bma220"
+#define BMA220_SCALE_AVAILABLE "0.623 1.248 2.491 4.983"
+
+#define BMA220_ACCEL_CHANNEL(index, reg, axis) { \
+ .type = IIO_ACCEL, \
+ .address = reg, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .scan_index = index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 6, \
+ .storagebits = 8, \
+ .shift = BMA220_DATA_SHIFT, \
+ .endianness = IIO_CPU, \
+ }, \
+}
+
+enum bma220_axis {
+ AXIS_X,
+ AXIS_Y,
+ AXIS_Z,
+};
+
+static IIO_CONST_ATTR(in_accel_scale_available, BMA220_SCALE_AVAILABLE);
+
+static struct attribute *bma220_attributes[] = {
+ &iio_const_attr_in_accel_scale_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group bma220_attribute_group = {
+ .attrs = bma220_attributes,
+};
+
+static const int bma220_scale_table[][4] = {
+ {0, 623000}, {1, 248000}, {2, 491000}, {4, 983000}
+};
+
+struct bma220_data {
+ struct spi_device *spi_device;
+ struct mutex lock;
+ s8 buffer[16]; /* 3x8-bit channels + 5x8 padding + 8x8 timestamp */
+ u8 tx_buf[2] ____cacheline_aligned;
+};
+
+static const struct iio_chan_spec bma220_channels[] = {
+ BMA220_ACCEL_CHANNEL(0, BMA220_REG_ACCEL_X, X),
+ BMA220_ACCEL_CHANNEL(1, BMA220_REG_ACCEL_Y, Y),
+ BMA220_ACCEL_CHANNEL(2, BMA220_REG_ACCEL_Z, Z),
+ IIO_CHAN_SOFT_TIMESTAMP(3),
+};
+
+static inline int bma220_read_reg(struct spi_device *spi, u8 reg)
+{
+ return spi_w8r8(spi, reg | BMA220_READ_MASK);
+}
+
+static const unsigned long bma220_accel_scan_masks[] = {
+ BIT(AXIS_X) | BIT(AXIS_Y) | BIT(AXIS_Z),
+ 0
+};
+
+static irqreturn_t bma220_trigger_handler(int irq, void *p)
+{
+ int ret;
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct bma220_data *data = iio_priv(indio_dev);
+ struct spi_device *spi = data->spi_device;
+
+ mutex_lock(&data->lock);
+ data->tx_buf[0] = BMA220_REG_ACCEL_X | BMA220_READ_MASK;
+ ret = spi_write_then_read(spi, data->tx_buf, 1, data->buffer,
+ ARRAY_SIZE(bma220_channels) - 1);
+ if (ret < 0)
+ goto err;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ pf->timestamp);
+err:
+ mutex_unlock(&data->lock);
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int bma220_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ int ret;
+ u8 range_idx;
+ struct bma220_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = bma220_read_reg(data->spi_device, chan->address);
+ if (ret < 0)
+ return -EINVAL;
+ *val = sign_extend32(ret >> BMA220_DATA_SHIFT, 5);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ ret = bma220_read_reg(data->spi_device, BMA220_REG_RANGE);
+ if (ret < 0)
+ return ret;
+ range_idx = ret & BMA220_RANGE_MASK;
+ *val = bma220_scale_table[range_idx][0];
+ *val2 = bma220_scale_table[range_idx][1];
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+
+ return -EINVAL;
+}
+
+static int bma220_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ int i;
+ int ret;
+ int index = -1;
+ struct bma220_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ for (i = 0; i < ARRAY_SIZE(bma220_scale_table); i++)
+ if (val == bma220_scale_table[i][0] &&
+ val2 == bma220_scale_table[i][1]) {
+ index = i;
+ break;
+ }
+ if (index < 0)
+ return -EINVAL;
+
+ mutex_lock(&data->lock);
+ data->tx_buf[0] = BMA220_REG_RANGE;
+ data->tx_buf[1] = index;
+ ret = spi_write(data->spi_device, data->tx_buf,
+ sizeof(data->tx_buf));
+ if (ret < 0)
+ dev_err(&data->spi_device->dev,
+ "failed to set measurement range\n");
+ mutex_unlock(&data->lock);
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info bma220_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = bma220_read_raw,
+ .write_raw = bma220_write_raw,
+ .attrs = &bma220_attribute_group,
+};
+
+static int bma220_init(struct spi_device *spi)
+{
+ int ret;
+
+ ret = bma220_read_reg(spi, BMA220_REG_ID);
+ if (ret != BMA220_CHIP_ID)
+ return -ENODEV;
+
+ /* Make sure the chip is powered on */
+ ret = bma220_read_reg(spi, BMA220_REG_SUSPEND);
+ if (ret < 0)
+ return ret;
+ else if (ret == BMA220_SUSPEND_WAKE)
+ return bma220_read_reg(spi, BMA220_REG_SUSPEND);
+
+ return 0;
+}
+
+static int bma220_deinit(struct spi_device *spi)
+{
+ int ret;
+
+ /* Make sure the chip is powered off */
+ ret = bma220_read_reg(spi, BMA220_REG_SUSPEND);
+ if (ret < 0)
+ return ret;
+ else if (ret == BMA220_SUSPEND_SLEEP)
+ return bma220_read_reg(spi, BMA220_REG_SUSPEND);
+
+ return 0;
+}
+
+static int bma220_probe(struct spi_device *spi)
+{
+ int ret;
+ struct iio_dev *indio_dev;
+ struct bma220_data *data;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*data));
+ if (!indio_dev) {
+ dev_err(&spi->dev, "iio allocation failed!\n");
+ return -ENOMEM;
+ }
+
+ data = iio_priv(indio_dev);
+ data->spi_device = spi;
+ spi_set_drvdata(spi, indio_dev);
+ mutex_init(&data->lock);
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->info = &bma220_info;
+ indio_dev->name = BMA220_DEVICE_NAME;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = bma220_channels;
+ indio_dev->num_channels = ARRAY_SIZE(bma220_channels);
+ indio_dev->available_scan_masks = bma220_accel_scan_masks;
+
+ ret = bma220_init(data->spi_device);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_triggered_buffer_setup(indio_dev, iio_pollfunc_store_time,
+ bma220_trigger_handler, NULL);
+ if (ret < 0) {
+ dev_err(&spi->dev, "iio triggered buffer setup failed\n");
+ goto err_suspend;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0) {
+ dev_err(&spi->dev, "iio_device_register failed\n");
+ iio_triggered_buffer_cleanup(indio_dev);
+ goto err_suspend;
+ }
+
+ return 0;
+
+err_suspend:
+ return bma220_deinit(spi);
+}
+
+static int bma220_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ return bma220_deinit(spi);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int bma220_suspend(struct device *dev)
+{
+ struct bma220_data *data =
+ iio_priv(spi_get_drvdata(to_spi_device(dev)));
+
+ /* The chip can be suspended/woken up by a simple register read. */
+ return bma220_read_reg(data->spi_device, BMA220_REG_SUSPEND);
+}
+
+static int bma220_resume(struct device *dev)
+{
+ struct bma220_data *data =
+ iio_priv(spi_get_drvdata(to_spi_device(dev)));
+
+ return bma220_read_reg(data->spi_device, BMA220_REG_SUSPEND);
+}
+
+static SIMPLE_DEV_PM_OPS(bma220_pm_ops, bma220_suspend, bma220_resume);
+
+#define BMA220_PM_OPS (&bma220_pm_ops)
+#else
+#define BMA220_PM_OPS NULL
+#endif
+
+static const struct spi_device_id bma220_spi_id[] = {
+ {"bma220", 0},
+ {}
+};
+
+static const struct acpi_device_id bma220_acpi_id[] = {
+ {"BMA0220", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(spi, bma220_spi_id);
+
+static struct spi_driver bma220_driver = {
+ .driver = {
+ .name = "bma220_spi",
+ .pm = BMA220_PM_OPS,
+ .acpi_match_table = ACPI_PTR(bma220_acpi_id),
+ },
+ .probe = bma220_probe,
+ .remove = bma220_remove,
+ .id_table = bma220_spi_id,
+};
+
+module_spi_driver(bma220_driver);
+
+MODULE_AUTHOR("Tiberiu Breana <tiberiu.a.breana@intel.com>");
+MODULE_DESCRIPTION("BMA220 acceleration sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index 3833c83a4..59b380dbf 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -904,7 +904,7 @@ static int __bmc150_accel_fifo_flush(struct iio_dev *indio_dev,
*/
if (!irq) {
data->old_timestamp = data->timestamp;
- data->timestamp = iio_get_time_ns();
+ data->timestamp = iio_get_time_ns(indio_dev);
}
/*
@@ -1306,7 +1306,7 @@ static irqreturn_t bmc150_accel_irq_handler(int irq, void *private)
int i;
data->old_timestamp = data->timestamp;
- data->timestamp = iio_get_time_ns();
+ data->timestamp = iio_get_time_ns(indio_dev);
for (i = 0; i < BMC150_ACCEL_TRIGGERS; i++) {
if (data->triggers[i].enabled) {
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index bfe219a8b..765a72362 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -1129,7 +1129,7 @@ static irqreturn_t kxcjk1013_data_rdy_trig_poll(int irq, void *private)
struct iio_dev *indio_dev = private;
struct kxcjk1013_data *data = iio_priv(indio_dev);
- data->timestamp = iio_get_time_ns();
+ data->timestamp = iio_get_time_ns(indio_dev);
if (data->dready_trigger_on)
iio_trigger_poll(data->dready_trig);
diff --git a/drivers/iio/accel/mma7455_core.c b/drivers/iio/accel/mma7455_core.c
index c902f54c2..6551085be 100644
--- a/drivers/iio/accel/mma7455_core.c
+++ b/drivers/iio/accel/mma7455_core.c
@@ -97,7 +97,8 @@ static irqreturn_t mma7455_trigger_handler(int irq, void *p)
if (ret)
goto done;
- iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns());
+ iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/accel/mma7660.c b/drivers/iio/accel/mma7660.c
new file mode 100644
index 000000000..0acdee516
--- /dev/null
+++ b/drivers/iio/accel/mma7660.c
@@ -0,0 +1,277 @@
+/**
+ * Freescale MMA7660FC 3-Axis Accelerometer
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * IIO driver for Freescale MMA7660FC; 7-bit I2C address: 0x4c.
+ */
+
+#include <linux/acpi.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define MMA7660_DRIVER_NAME "mma7660"
+
+#define MMA7660_REG_XOUT 0x00
+#define MMA7660_REG_YOUT 0x01
+#define MMA7660_REG_ZOUT 0x02
+#define MMA7660_REG_OUT_BIT_ALERT BIT(6)
+
+#define MMA7660_REG_MODE 0x07
+#define MMA7660_REG_MODE_BIT_MODE BIT(0)
+#define MMA7660_REG_MODE_BIT_TON BIT(2)
+
+#define MMA7660_I2C_READ_RETRIES 5
+
+/*
+ * The accelerometer has one measurement range:
+ *
+ * -1.5g - +1.5g (6-bit, signed)
+ *
+ * scale = (1.5 + 1.5) * 9.81 / (2^6 - 1) = 0.467142857
+ */
+
+#define MMA7660_SCALE_AVAIL "0.467142857"
+
+const int mma7660_nscale = 467142857;
+
+#define MMA7660_CHANNEL(reg, axis) { \
+ .type = IIO_ACCEL, \
+ .address = reg, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+}
+
+static const struct iio_chan_spec mma7660_channels[] = {
+ MMA7660_CHANNEL(MMA7660_REG_XOUT, X),
+ MMA7660_CHANNEL(MMA7660_REG_YOUT, Y),
+ MMA7660_CHANNEL(MMA7660_REG_ZOUT, Z),
+};
+
+enum mma7660_mode {
+ MMA7660_MODE_STANDBY,
+ MMA7660_MODE_ACTIVE
+};
+
+struct mma7660_data {
+ struct i2c_client *client;
+ struct mutex lock;
+ enum mma7660_mode mode;
+};
+
+static IIO_CONST_ATTR(in_accel_scale_available, MMA7660_SCALE_AVAIL);
+
+static struct attribute *mma7660_attributes[] = {
+ &iio_const_attr_in_accel_scale_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group mma7660_attribute_group = {
+ .attrs = mma7660_attributes
+};
+
+static int mma7660_set_mode(struct mma7660_data *data,
+ enum mma7660_mode mode)
+{
+ int ret;
+ struct i2c_client *client = data->client;
+
+ if (mode == data->mode)
+ return 0;
+
+ ret = i2c_smbus_read_byte_data(client, MMA7660_REG_MODE);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to read sensor mode\n");
+ return ret;
+ }
+
+ if (mode == MMA7660_MODE_ACTIVE) {
+ ret &= ~MMA7660_REG_MODE_BIT_TON;
+ ret |= MMA7660_REG_MODE_BIT_MODE;
+ } else {
+ ret &= ~MMA7660_REG_MODE_BIT_TON;
+ ret &= ~MMA7660_REG_MODE_BIT_MODE;
+ }
+
+ ret = i2c_smbus_write_byte_data(client, MMA7660_REG_MODE, ret);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to change sensor mode\n");
+ return ret;
+ }
+
+ data->mode = mode;
+
+ return ret;
+}
+
+static int mma7660_read_accel(struct mma7660_data *data, u8 address)
+{
+ int ret, retries = MMA7660_I2C_READ_RETRIES;
+ struct i2c_client *client = data->client;
+
+ /*
+ * Read data. If the Alert bit is set, the register was read at
+ * the same time as the device was attempting to update the content.
+ * The solution is to read the register again. Do this only
+ * MMA7660_I2C_READ_RETRIES times to avoid spending too much time
+ * in the kernel.
+ */
+ do {
+ ret = i2c_smbus_read_byte_data(client, address);
+ if (ret < 0) {
+ dev_err(&client->dev, "register read failed\n");
+ return ret;
+ }
+ } while (retries-- > 0 && ret & MMA7660_REG_OUT_BIT_ALERT);
+
+ if (ret & MMA7660_REG_OUT_BIT_ALERT) {
+ dev_err(&client->dev, "all register read retries failed\n");
+ return -ETIMEDOUT;
+ }
+
+ return ret;
+}
+
+static int mma7660_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct mma7660_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&data->lock);
+ ret = mma7660_read_accel(data, chan->address);
+ mutex_unlock(&data->lock);
+ if (ret < 0)
+ return ret;
+ *val = sign_extend32(ret, 5);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ *val2 = mma7660_nscale;
+ return IIO_VAL_INT_PLUS_NANO;
+ default:
+ return -EINVAL;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info mma7660_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = mma7660_read_raw,
+ .attrs = &mma7660_attribute_group,
+};
+
+static int mma7660_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct iio_dev *indio_dev;
+ struct mma7660_data *data;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev) {
+ dev_err(&client->dev, "iio allocation failed!\n");
+ return -ENOMEM;
+ }
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ i2c_set_clientdata(client, indio_dev);
+ mutex_init(&data->lock);
+ data->mode = MMA7660_MODE_STANDBY;
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &mma7660_info;
+ indio_dev->name = MMA7660_DRIVER_NAME;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = mma7660_channels;
+ indio_dev->num_channels = ARRAY_SIZE(mma7660_channels);
+
+ ret = mma7660_set_mode(data, MMA7660_MODE_ACTIVE);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0) {
+ dev_err(&client->dev, "device_register failed\n");
+ mma7660_set_mode(data, MMA7660_MODE_STANDBY);
+ }
+
+ return ret;
+}
+
+static int mma7660_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+
+ return mma7660_set_mode(iio_priv(indio_dev), MMA7660_MODE_STANDBY);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mma7660_suspend(struct device *dev)
+{
+ struct mma7660_data *data;
+
+ data = iio_priv(i2c_get_clientdata(to_i2c_client(dev)));
+
+ return mma7660_set_mode(data, MMA7660_MODE_STANDBY);
+}
+
+static int mma7660_resume(struct device *dev)
+{
+ struct mma7660_data *data;
+
+ data = iio_priv(i2c_get_clientdata(to_i2c_client(dev)));
+
+ return mma7660_set_mode(data, MMA7660_MODE_ACTIVE);
+}
+
+static SIMPLE_DEV_PM_OPS(mma7660_pm_ops, mma7660_suspend, mma7660_resume);
+
+#define MMA7660_PM_OPS (&mma7660_pm_ops)
+#else
+#define MMA7660_PM_OPS NULL
+#endif
+
+static const struct i2c_device_id mma7660_i2c_id[] = {
+ {"mma7660", 0},
+ {}
+};
+
+static const struct acpi_device_id mma7660_acpi_id[] = {
+ {"MMA7660", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(acpi, mma7660_acpi_id);
+
+static struct i2c_driver mma7660_driver = {
+ .driver = {
+ .name = "mma7660",
+ .pm = MMA7660_PM_OPS,
+ .acpi_match_table = ACPI_PTR(mma7660_acpi_id),
+ },
+ .probe = mma7660_probe,
+ .remove = mma7660_remove,
+ .id_table = mma7660_i2c_id,
+};
+
+module_i2c_driver(mma7660_driver);
+
+MODULE_AUTHOR("Constantin Musca <constantin.musca@intel.com>");
+MODULE_DESCRIPTION("Freescale MMA7660FC 3-Axis Accelerometer driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index e225d3c53..d41e1b588 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -1,22 +1,22 @@
/*
- * mma8452.c - Support for following Freescale 3-axis accelerometers:
+ * mma8452.c - Support for following Freescale / NXP 3-axis accelerometers:
*
- * MMA8451Q (14 bit)
- * MMA8452Q (12 bit)
- * MMA8453Q (10 bit)
- * MMA8652FC (12 bit)
- * MMA8653FC (10 bit)
- * FXLS8471Q (14 bit)
+ * device name digital output 7-bit I2C slave address (pin selectable)
+ * ---------------------------------------------------------------------
+ * MMA8451Q 14 bit 0x1c / 0x1d
+ * MMA8452Q 12 bit 0x1c / 0x1d
+ * MMA8453Q 10 bit 0x1c / 0x1d
+ * MMA8652FC 12 bit 0x1d
+ * MMA8653FC 10 bit 0x1d
+ * FXLS8471Q 14 bit 0x1e / 0x1d / 0x1c / 0x1f
*
- * Copyright 2015 Martin Kepplinger <martin.kepplinger@theobroma-systems.com>
+ * Copyright 2015 Martin Kepplinger <martink@posteo.de>
* Copyright 2014 Peter Meerwald <pmeerw@pmeerw.net>
*
* This file is subject to the terms and conditions of version 2 of
* the GNU General Public License. See the file COPYING in the main
* directory of this archive for more details.
*
- * 7-bit I2C slave address 0x1c/0x1d (pin selectable)
- *
* TODO: orientation events
*/
@@ -76,6 +76,8 @@
#define MMA8452_CTRL_DR_DEFAULT 0x4 /* 50 Hz sample frequency */
#define MMA8452_CTRL_REG2 0x2b
#define MMA8452_CTRL_REG2_RST BIT(6)
+#define MMA8452_CTRL_REG2_MODS_SHIFT 3
+#define MMA8452_CTRL_REG2_MODS_MASK 0x1b
#define MMA8452_CTRL_REG4 0x2d
#define MMA8452_CTRL_REG5 0x2e
#define MMA8452_OFF_X 0x2f
@@ -106,7 +108,7 @@ struct mma8452_data {
};
/**
- * struct mma_chip_info - chip specific data for Freescale's accelerometers
+ * struct mma_chip_info - chip specific data
* @chip_id: WHO_AM_I register's value
* @channels: struct iio_chan_spec matching the device's
* capabilities
@@ -257,20 +259,17 @@ static const int mma8452_samp_freq[8][2] = {
{6, 250000}, {1, 560000}
};
-/* Datasheet table 35 (step time vs sample frequency) */
-static const int mma8452_transient_time_step_us[8] = {
- 1250,
- 2500,
- 5000,
- 10000,
- 20000,
- 20000,
- 20000,
- 20000
+/* Datasheet table: step time "Relationship with the ODR" (sample frequency) */
+static const int mma8452_transient_time_step_us[4][8] = {
+ { 1250, 2500, 5000, 10000, 20000, 20000, 20000, 20000 }, /* normal */
+ { 1250, 2500, 5000, 10000, 20000, 80000, 80000, 80000 }, /* l p l n */
+ { 1250, 2500, 2500, 2500, 2500, 2500, 2500, 2500 }, /* high res*/
+ { 1250, 2500, 5000, 10000, 20000, 80000, 160000, 160000 } /* l p */
};
-/* Datasheet table 18 (normal mode) */
-static const int mma8452_hp_filter_cutoff[8][4][2] = {
+/* Datasheet table "High-Pass Filter Cutoff Options" */
+static const int mma8452_hp_filter_cutoff[4][8][4][2] = {
+ { /* normal */
{ {16, 0}, {8, 0}, {4, 0}, {2, 0} }, /* 800 Hz sample */
{ {16, 0}, {8, 0}, {4, 0}, {2, 0} }, /* 400 Hz sample */
{ {8, 0}, {4, 0}, {2, 0}, {1, 0} }, /* 200 Hz sample */
@@ -279,8 +278,61 @@ static const int mma8452_hp_filter_cutoff[8][4][2] = {
{ {2, 0}, {1, 0}, {0, 500000}, {0, 250000} }, /* 12.5 Hz sample */
{ {2, 0}, {1, 0}, {0, 500000}, {0, 250000} }, /* 6.25 Hz sample */
{ {2, 0}, {1, 0}, {0, 500000}, {0, 250000} } /* 1.56 Hz sample */
+ },
+ { /* low noise low power */
+ { {16, 0}, {8, 0}, {4, 0}, {2, 0} },
+ { {16, 0}, {8, 0}, {4, 0}, {2, 0} },
+ { {8, 0}, {4, 0}, {2, 0}, {1, 0} },
+ { {4, 0}, {2, 0}, {1, 0}, {0, 500000} },
+ { {2, 0}, {1, 0}, {0, 500000}, {0, 250000} },
+ { {0, 500000}, {0, 250000}, {0, 125000}, {0, 063000} },
+ { {0, 500000}, {0, 250000}, {0, 125000}, {0, 063000} },
+ { {0, 500000}, {0, 250000}, {0, 125000}, {0, 063000} }
+ },
+ { /* high resolution */
+ { {16, 0}, {8, 0}, {4, 0}, {2, 0} },
+ { {16, 0}, {8, 0}, {4, 0}, {2, 0} },
+ { {16, 0}, {8, 0}, {4, 0}, {2, 0} },
+ { {16, 0}, {8, 0}, {4, 0}, {2, 0} },
+ { {16, 0}, {8, 0}, {4, 0}, {2, 0} },
+ { {16, 0}, {8, 0}, {4, 0}, {2, 0} },
+ { {16, 0}, {8, 0}, {4, 0}, {2, 0} },
+ { {16, 0}, {8, 0}, {4, 0}, {2, 0} }
+ },
+ { /* low power */
+ { {16, 0}, {8, 0}, {4, 0}, {2, 0} },
+ { {8, 0}, {4, 0}, {2, 0}, {1, 0} },
+ { {4, 0}, {2, 0}, {1, 0}, {0, 500000} },
+ { {2, 0}, {1, 0}, {0, 500000}, {0, 250000} },
+ { {1, 0}, {0, 500000}, {0, 250000}, {0, 125000} },
+ { {0, 250000}, {0, 125000}, {0, 063000}, {0, 031000} },
+ { {0, 250000}, {0, 125000}, {0, 063000}, {0, 031000} },
+ { {0, 250000}, {0, 125000}, {0, 063000}, {0, 031000} }
+ }
};
+/* Datasheet table "MODS Oversampling modes averaging values at each ODR" */
+static const u16 mma8452_os_ratio[4][8] = {
+ /* 800 Hz, 400 Hz, ... , 1.56 Hz */
+ { 2, 4, 4, 4, 4, 16, 32, 128 }, /* normal */
+ { 2, 4, 4, 4, 4, 4, 8, 32 }, /* low power low noise */
+ { 2, 4, 8, 16, 32, 128, 256, 1024 }, /* high resolution */
+ { 2, 2, 2, 2, 2, 2, 4, 16 } /* low power */
+};
+
+static int mma8452_get_power_mode(struct mma8452_data *data)
+{
+ int reg;
+
+ reg = i2c_smbus_read_byte_data(data->client,
+ MMA8452_CTRL_REG2);
+ if (reg < 0)
+ return reg;
+
+ return ((reg & MMA8452_CTRL_REG2_MODS_MASK) >>
+ MMA8452_CTRL_REG2_MODS_SHIFT);
+}
+
static ssize_t mma8452_show_samp_freq_avail(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -306,10 +358,39 @@ static ssize_t mma8452_show_hp_cutoff_avail(struct device *dev,
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct mma8452_data *data = iio_priv(indio_dev);
+ int i, j;
+
+ i = mma8452_get_odr_index(data);
+ j = mma8452_get_power_mode(data);
+ if (j < 0)
+ return j;
+
+ return mma8452_show_int_plus_micros(buf, mma8452_hp_filter_cutoff[j][i],
+ ARRAY_SIZE(mma8452_hp_filter_cutoff[0][0]));
+}
+
+static ssize_t mma8452_show_os_ratio_avail(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct mma8452_data *data = iio_priv(indio_dev);
int i = mma8452_get_odr_index(data);
+ int j;
+ u16 val = 0;
+ size_t len = 0;
- return mma8452_show_int_plus_micros(buf, mma8452_hp_filter_cutoff[i],
- ARRAY_SIZE(mma8452_hp_filter_cutoff[0]));
+ for (j = 0; j < ARRAY_SIZE(mma8452_os_ratio); j++) {
+ if (val == mma8452_os_ratio[j][i])
+ continue;
+
+ val = mma8452_os_ratio[j][i];
+
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%d ", val);
+ }
+ buf[len - 1] = '\n';
+
+ return len;
}
static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(mma8452_show_samp_freq_avail);
@@ -317,6 +398,8 @@ static IIO_DEVICE_ATTR(in_accel_scale_available, S_IRUGO,
mma8452_show_scale_avail, NULL, 0);
static IIO_DEVICE_ATTR(in_accel_filter_high_pass_3db_frequency_available,
S_IRUGO, mma8452_show_hp_cutoff_avail, NULL, 0);
+static IIO_DEVICE_ATTR(in_accel_oversampling_ratio_available, S_IRUGO,
+ mma8452_show_os_ratio_avail, NULL, 0);
static int mma8452_get_samp_freq_index(struct mma8452_data *data,
int val, int val2)
@@ -335,24 +418,33 @@ static int mma8452_get_scale_index(struct mma8452_data *data, int val, int val2)
static int mma8452_get_hp_filter_index(struct mma8452_data *data,
int val, int val2)
{
- int i = mma8452_get_odr_index(data);
+ int i, j;
+
+ i = mma8452_get_odr_index(data);
+ j = mma8452_get_power_mode(data);
+ if (j < 0)
+ return j;
- return mma8452_get_int_plus_micros_index(mma8452_hp_filter_cutoff[i],
- ARRAY_SIZE(mma8452_hp_filter_cutoff[0]), val, val2);
+ return mma8452_get_int_plus_micros_index(mma8452_hp_filter_cutoff[j][i],
+ ARRAY_SIZE(mma8452_hp_filter_cutoff[0][0]), val, val2);
}
static int mma8452_read_hp_filter(struct mma8452_data *data, int *hz, int *uHz)
{
- int i, ret;
+ int j, i, ret;
ret = i2c_smbus_read_byte_data(data->client, MMA8452_HP_FILTER_CUTOFF);
if (ret < 0)
return ret;
i = mma8452_get_odr_index(data);
+ j = mma8452_get_power_mode(data);
+ if (j < 0)
+ return j;
+
ret &= MMA8452_HP_FILTER_CUTOFF_SEL_MASK;
- *hz = mma8452_hp_filter_cutoff[i][ret][0];
- *uHz = mma8452_hp_filter_cutoff[i][ret][1];
+ *hz = mma8452_hp_filter_cutoff[j][i][ret][0];
+ *uHz = mma8452_hp_filter_cutoff[j][i][ret][1];
return 0;
}
@@ -414,6 +506,15 @@ static int mma8452_read_raw(struct iio_dev *indio_dev,
}
return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ ret = mma8452_get_power_mode(data);
+ if (ret < 0)
+ return ret;
+
+ i = mma8452_get_odr_index(data);
+
+ *val = mma8452_os_ratio[ret][i];
+ return IIO_VAL_INT;
}
return -EINVAL;
@@ -480,6 +581,21 @@ fail:
return ret;
}
+static int mma8452_set_power_mode(struct mma8452_data *data, u8 mode)
+{
+ int reg;
+
+ reg = i2c_smbus_read_byte_data(data->client,
+ MMA8452_CTRL_REG2);
+ if (reg < 0)
+ return reg;
+
+ reg &= ~MMA8452_CTRL_REG2_MODS_MASK;
+ reg |= mode << MMA8452_CTRL_REG2_MODS_SHIFT;
+
+ return mma8452_change_config(data, MMA8452_CTRL_REG2, reg);
+}
+
/* returns >0 if in freefall mode, 0 if not or <0 if an error occurred */
static int mma8452_freefall_mode_enabled(struct mma8452_data *data)
{
@@ -518,11 +634,7 @@ static int mma8452_set_freefall_mode(struct mma8452_data *data, bool state)
val |= MMA8452_FF_MT_CFG_OAE;
}
- val = mma8452_change_config(data, chip->ev_cfg, val);
- if (val)
- return val;
-
- return 0;
+ return mma8452_change_config(data, chip->ev_cfg, val);
}
static int mma8452_set_hp_filter_frequency(struct mma8452_data *data,
@@ -597,6 +709,14 @@ static int mma8452_write_raw(struct iio_dev *indio_dev,
return mma8452_change_config(data, MMA8452_DATA_CFG,
data->data_cfg);
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ ret = mma8452_get_odr_index(data);
+
+ for (i = 0; i < ARRAY_SIZE(mma8452_os_ratio); i++) {
+ if (mma8452_os_ratio[i][ret] == val)
+ return mma8452_set_power_mode(data, i);
+ }
+
default:
return -EINVAL;
}
@@ -610,7 +730,7 @@ static int mma8452_read_thresh(struct iio_dev *indio_dev,
int *val, int *val2)
{
struct mma8452_data *data = iio_priv(indio_dev);
- int ret, us;
+ int ret, us, power_mode;
switch (info) {
case IIO_EV_INFO_VALUE:
@@ -629,7 +749,11 @@ static int mma8452_read_thresh(struct iio_dev *indio_dev,
if (ret < 0)
return ret;
- us = ret * mma8452_transient_time_step_us[
+ power_mode = mma8452_get_power_mode(data);
+ if (power_mode < 0)
+ return power_mode;
+
+ us = ret * mma8452_transient_time_step_us[power_mode][
mma8452_get_odr_index(data)];
*val = us / USEC_PER_SEC;
*val2 = us % USEC_PER_SEC;
@@ -677,8 +801,12 @@ static int mma8452_write_thresh(struct iio_dev *indio_dev,
val);
case IIO_EV_INFO_PERIOD:
+ ret = mma8452_get_power_mode(data);
+ if (ret < 0)
+ return ret;
+
steps = (val * USEC_PER_SEC + val2) /
- mma8452_transient_time_step_us[
+ mma8452_transient_time_step_us[ret][
mma8452_get_odr_index(data)];
if (steps < 0 || steps > 0xff)
@@ -785,7 +913,7 @@ static int mma8452_write_event_config(struct iio_dev *indio_dev,
static void mma8452_transient_interrupt(struct iio_dev *indio_dev)
{
struct mma8452_data *data = iio_priv(indio_dev);
- s64 ts = iio_get_time_ns();
+ s64 ts = iio_get_time_ns(indio_dev);
int src;
src = i2c_smbus_read_byte_data(data->client, data->chip_info->ev_src);
@@ -865,7 +993,7 @@ static irqreturn_t mma8452_trigger_handler(int irq, void *p)
goto done;
iio_push_to_buffers_with_timestamp(indio_dev, buffer,
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
@@ -978,7 +1106,8 @@ static struct attribute_group mma8452_event_attribute_group = {
BIT(IIO_CHAN_INFO_CALIBBIAS), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
BIT(IIO_CHAN_INFO_SCALE) | \
- BIT(IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY), \
+ BIT(IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY) | \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
.scan_index = idx, \
.scan_type = { \
.sign = 's', \
@@ -998,7 +1127,8 @@ static struct attribute_group mma8452_event_attribute_group = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
BIT(IIO_CHAN_INFO_CALIBBIAS), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
- BIT(IIO_CHAN_INFO_SCALE), \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
.scan_index = idx, \
.scan_type = { \
.sign = 's', \
@@ -1171,6 +1301,7 @@ static struct attribute *mma8452_attributes[] = {
&iio_dev_attr_sampling_frequency_available.dev_attr.attr,
&iio_dev_attr_in_accel_scale_available.dev_attr.attr,
&iio_dev_attr_in_accel_filter_high_pass_3db_frequency_available.dev_attr.attr,
+ &iio_dev_attr_in_accel_oversampling_ratio_available.dev_attr.attr,
NULL
};
@@ -1444,8 +1575,8 @@ static int mma8452_probe(struct i2c_client *client,
goto buffer_cleanup;
ret = mma8452_set_freefall_mode(data, false);
- if (ret)
- return ret;
+ if (ret < 0)
+ goto buffer_cleanup;
return 0;
@@ -1558,5 +1689,5 @@ static struct i2c_driver mma8452_driver = {
module_i2c_driver(mma8452_driver);
MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>");
-MODULE_DESCRIPTION("Freescale MMA8452 accelerometer driver");
+MODULE_DESCRIPTION("Freescale / NXP MMA8452 accelerometer driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/accel/mma9551.c b/drivers/iio/accel/mma9551.c
index d899a4d43..bf2704435 100644
--- a/drivers/iio/accel/mma9551.c
+++ b/drivers/iio/accel/mma9551.c
@@ -391,7 +391,7 @@ static irqreturn_t mma9551_event_handler(int irq, void *private)
iio_push_event(indio_dev,
IIO_MOD_EVENT_CODE(IIO_INCLI, 0, (mma_axis + 1),
IIO_EV_TYPE_ROC, IIO_EV_DIR_RISING),
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
out:
mutex_unlock(&data->mutex);
diff --git a/drivers/iio/accel/mma9553.c b/drivers/iio/accel/mma9553.c
index bb05f3efd..36bf19733 100644
--- a/drivers/iio/accel/mma9553.c
+++ b/drivers/iio/accel/mma9553.c
@@ -1001,7 +1001,7 @@ static irqreturn_t mma9553_irq_handler(int irq, void *private)
struct iio_dev *indio_dev = private;
struct mma9553_data *data = iio_priv(indio_dev);
- data->timestamp = iio_get_time_ns();
+ data->timestamp = iio_get_time_ns(indio_dev);
/*
* Since we only configure the interrupt pin when an
* event is enabled, we are sure we have at least
diff --git a/drivers/iio/accel/st_accel.h b/drivers/iio/accel/st_accel.h
index 57f83a679..f8dfdb690 100644
--- a/drivers/iio/accel/st_accel.h
+++ b/drivers/iio/accel/st_accel.h
@@ -29,6 +29,7 @@
#define LSM330_ACCEL_DEV_NAME "lsm330_accel"
#define LSM303AGR_ACCEL_DEV_NAME "lsm303agr_accel"
#define LIS2DH12_ACCEL_DEV_NAME "lis2dh12_accel"
+#define LIS3L02DQ_ACCEL_DEV_NAME "lis3l02dq"
/**
* struct st_sensors_platform_data - default accel platform data
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index 4d95bfc47..da3fb069e 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -215,6 +215,22 @@
#define ST_ACCEL_6_IHL_IRQ_MASK 0x80
#define ST_ACCEL_6_MULTIREAD_BIT true
+/* CUSTOM VALUES FOR SENSOR 7 */
+#define ST_ACCEL_7_ODR_ADDR 0x20
+#define ST_ACCEL_7_ODR_MASK 0x30
+#define ST_ACCEL_7_ODR_AVL_280HZ_VAL 0x00
+#define ST_ACCEL_7_ODR_AVL_560HZ_VAL 0x01
+#define ST_ACCEL_7_ODR_AVL_1120HZ_VAL 0x02
+#define ST_ACCEL_7_ODR_AVL_4480HZ_VAL 0x03
+#define ST_ACCEL_7_PW_ADDR 0x20
+#define ST_ACCEL_7_PW_MASK 0xc0
+#define ST_ACCEL_7_FS_AVL_2_GAIN IIO_G_TO_M_S_2(488)
+#define ST_ACCEL_7_BDU_ADDR 0x21
+#define ST_ACCEL_7_BDU_MASK 0x40
+#define ST_ACCEL_7_DRDY_IRQ_ADDR 0x21
+#define ST_ACCEL_7_DRDY_IRQ_INT1_MASK 0x04
+#define ST_ACCEL_7_MULTIREAD_BIT false
+
static const struct iio_chan_spec st_accel_8bit_channels[] = {
ST_SENSORS_LSM_CHANNELS(IIO_ACCEL,
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
@@ -662,6 +678,54 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.multi_read_bit = ST_ACCEL_6_MULTIREAD_BIT,
.bootime = 2,
},
+ {
+ /* No WAI register present */
+ .sensors_supported = {
+ [0] = LIS3L02DQ_ACCEL_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_accel_12bit_channels,
+ .odr = {
+ .addr = ST_ACCEL_7_ODR_ADDR,
+ .mask = ST_ACCEL_7_ODR_MASK,
+ .odr_avl = {
+ { 280, ST_ACCEL_7_ODR_AVL_280HZ_VAL, },
+ { 560, ST_ACCEL_7_ODR_AVL_560HZ_VAL, },
+ { 1120, ST_ACCEL_7_ODR_AVL_1120HZ_VAL, },
+ { 4480, ST_ACCEL_7_ODR_AVL_4480HZ_VAL, },
+ },
+ },
+ .pw = {
+ .addr = ST_ACCEL_7_PW_ADDR,
+ .mask = ST_ACCEL_7_PW_MASK,
+ .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .enable_axis = {
+ .addr = ST_SENSORS_DEFAULT_AXIS_ADDR,
+ .mask = ST_SENSORS_DEFAULT_AXIS_MASK,
+ },
+ .fs = {
+ .fs_avl = {
+ [0] = {
+ .num = ST_ACCEL_FS_AVL_2G,
+ .gain = ST_ACCEL_7_FS_AVL_2_GAIN,
+ },
+ },
+ },
+ /*
+ * The part has a BDU bit but if set the data is never
+ * updated so don't set it.
+ */
+ .bdu = {
+ },
+ .drdy_irq = {
+ .addr = ST_ACCEL_7_DRDY_IRQ_ADDR,
+ .mask_int1 = ST_ACCEL_7_DRDY_IRQ_INT1_MASK,
+ .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ },
+ .multi_read_bit = ST_ACCEL_7_MULTIREAD_BIT,
+ .bootime = 2,
+ },
};
static int st_accel_read_raw(struct iio_dev *indio_dev,
@@ -758,13 +822,15 @@ int st_accel_common_probe(struct iio_dev *indio_dev)
indio_dev->info = &accel_info;
mutex_init(&adata->tb.buf_lock);
- st_sensors_power_enable(indio_dev);
+ err = st_sensors_power_enable(indio_dev);
+ if (err)
+ return err;
err = st_sensors_check_device_support(indio_dev,
ARRAY_SIZE(st_accel_sensors_settings),
st_accel_sensors_settings);
if (err < 0)
- return err;
+ goto st_accel_power_off;
adata->num_data_channels = ST_ACCEL_NUMBER_DATA_CHANNELS;
adata->multiread_bit = adata->sensor_settings->multi_read_bit;
@@ -781,11 +847,11 @@ int st_accel_common_probe(struct iio_dev *indio_dev)
err = st_sensors_init_sensor(indio_dev, adata->dev->platform_data);
if (err < 0)
- return err;
+ goto st_accel_power_off;
err = st_accel_allocate_ring(indio_dev);
if (err < 0)
- return err;
+ goto st_accel_power_off;
if (irq > 0) {
err = st_sensors_allocate_trigger(indio_dev,
@@ -808,6 +874,8 @@ st_accel_device_register_error:
st_sensors_deallocate_trigger(indio_dev);
st_accel_probe_trigger_error:
st_accel_deallocate_ring(indio_dev);
+st_accel_power_off:
+ st_sensors_power_disable(indio_dev);
return err;
}
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
index 7333ee9fb..e9d427a5d 100644
--- a/drivers/iio/accel/st_accel_i2c.c
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -80,6 +80,10 @@ static const struct of_device_id st_accel_of_match[] = {
.compatible = "st,h3lis331dl-accel",
.data = H3LIS331DL_DRIVER_NAME,
},
+ {
+ .compatible = "st,lis3l02dq",
+ .data = LIS3L02DQ_ACCEL_DEV_NAME,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_accel_of_match);
@@ -130,6 +134,7 @@ static const struct i2c_device_id st_accel_id_table[] = {
{ LSM330_ACCEL_DEV_NAME },
{ LSM303AGR_ACCEL_DEV_NAME },
{ LIS2DH12_ACCEL_DEV_NAME },
+ { LIS3L02DQ_ACCEL_DEV_NAME },
{},
};
MODULE_DEVICE_TABLE(i2c, st_accel_id_table);
diff --git a/drivers/iio/accel/st_accel_spi.c b/drivers/iio/accel/st_accel_spi.c
index fcd5847a3..efd43941d 100644
--- a/drivers/iio/accel/st_accel_spi.c
+++ b/drivers/iio/accel/st_accel_spi.c
@@ -59,6 +59,7 @@ static const struct spi_device_id st_accel_id_table[] = {
{ LSM330_ACCEL_DEV_NAME },
{ LSM303AGR_ACCEL_DEV_NAME },
{ LIS2DH12_ACCEL_DEV_NAME },
+ { LIS3L02DQ_ACCEL_DEV_NAME },
{},
};
MODULE_DEVICE_TABLE(spi, st_accel_id_table);
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index f7232185d..767577298 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -153,6 +153,18 @@ config AXP288_ADC
To compile this driver as a module, choose M here: the module will be
called axp288_adc.
+config BCM_IPROC_ADC
+ tristate "Broadcom IPROC ADC driver"
+ depends on ARCH_BCM_IPROC || COMPILE_TEST
+ depends on MFD_SYSCON
+ default ARCH_BCM_CYGNUS
+ help
+ Say Y here if you want to add support for the Broadcom static
+ ADC driver.
+
+ Broadcom iProc ADC driver. Broadcom iProc ADC controller has 8
+ channels. The driver allows the user to read voltage values.
+
config BERLIN2_ADC
tristate "Marvell Berlin2 ADC driver"
depends on ARCH_BERLIN
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 38638d46f..0ba0d500e 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_AD799X) += ad799x.o
obj-$(CONFIG_AT91_ADC) += at91_adc.o
obj-$(CONFIG_AT91_SAMA5D2_ADC) += at91-sama5d2_adc.o
obj-$(CONFIG_AXP288_ADC) += axp288_adc.o
+obj-$(CONFIG_BCM_IPROC_ADC) += bcm_iproc_adc.o
obj-$(CONFIG_BERLIN2_ADC) += berlin2-adc.o
obj-$(CONFIG_CC10001_ADC) += cc10001_adc.o
obj-$(CONFIG_DA9150_GPADC) += da9150-gpadc.o
diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
index 2123f0ac2..c0f6a98fd 100644
--- a/drivers/iio/adc/ad7266.c
+++ b/drivers/iio/adc/ad7266.c
@@ -154,12 +154,11 @@ static int ad7266_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- if (iio_buffer_enabled(indio_dev))
- return -EBUSY;
-
- ret = ad7266_read_single(st, val, chan->address);
+ ret = iio_device_claim_direct_mode(indio_dev);
if (ret)
return ret;
+ ret = ad7266_read_single(st, val, chan->address);
+ iio_device_release_direct_mode(indio_dev);
*val = (*val >> 2) & 0xfff;
if (chan->scan_type.sign == 's')
@@ -441,6 +440,7 @@ static int ad7266_probe(struct spi_device *spi)
st->spi = spi;
indio_dev->dev.parent = &spi->dev;
+ indio_dev->dev.of_node = spi->dev.of_node;
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &ad7266_info;
diff --git a/drivers/iio/adc/ad7291.c b/drivers/iio/adc/ad7291.c
index c0eabf156..1d90b0273 100644
--- a/drivers/iio/adc/ad7291.c
+++ b/drivers/iio/adc/ad7291.c
@@ -115,7 +115,7 @@ static irqreturn_t ad7291_event_handler(int irq, void *private)
u16 t_status, v_status;
u16 command;
int i;
- s64 timestamp = iio_get_time_ns();
+ s64 timestamp = iio_get_time_ns(indio_dev);
if (ad7291_i2c_read(chip, AD7291_T_ALERT_STATUS, &t_status))
return IRQ_HANDLED;
@@ -505,6 +505,7 @@ static int ad7291_probe(struct i2c_client *client,
indio_dev->num_channels = ARRAY_SIZE(ad7291_channels);
indio_dev->dev.parent = &client->dev;
+ indio_dev->dev.of_node = client->dev.of_node;
indio_dev->info = &ad7291_info;
indio_dev->modes = INDIO_DIRECT_MODE;
diff --git a/drivers/iio/adc/ad7298.c b/drivers/iio/adc/ad7298.c
index 62bb8f7ce..10ec8fce3 100644
--- a/drivers/iio/adc/ad7298.c
+++ b/drivers/iio/adc/ad7298.c
@@ -163,7 +163,7 @@ static irqreturn_t ad7298_trigger_handler(int irq, void *p)
goto done;
iio_push_to_buffers_with_timestamp(indio_dev, st->rx_buf,
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
@@ -315,6 +315,7 @@ static int ad7298_probe(struct spi_device *spi)
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->dev.parent = &spi->dev;
+ indio_dev->dev.of_node = spi->dev.of_node;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = ad7298_channels;
indio_dev->num_channels = ARRAY_SIZE(ad7298_channels);
diff --git a/drivers/iio/adc/ad7476.c b/drivers/iio/adc/ad7476.c
index be85c2a0a..b7ecf9aab 100644
--- a/drivers/iio/adc/ad7476.c
+++ b/drivers/iio/adc/ad7476.c
@@ -70,7 +70,7 @@ static irqreturn_t ad7476_trigger_handler(int irq, void *p)
goto done;
iio_push_to_buffers_with_timestamp(indio_dev, st->data,
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
@@ -106,12 +106,11 @@ static int ad7476_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- mutex_lock(&indio_dev->mlock);
- if (iio_buffer_enabled(indio_dev))
- ret = -EBUSY;
- else
- ret = ad7476_scan_direct(st);
- mutex_unlock(&indio_dev->mlock);
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+ ret = ad7476_scan_direct(st);
+ iio_device_release_direct_mode(indio_dev);
if (ret < 0)
return ret;
@@ -228,6 +227,7 @@ static int ad7476_probe(struct spi_device *spi)
/* Establish that the iio_dev is a child of the spi device */
indio_dev->dev.parent = &spi->dev;
+ indio_dev->dev.of_node = spi->dev.of_node;
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = st->chip_info->channel;
diff --git a/drivers/iio/adc/ad7791.c b/drivers/iio/adc/ad7791.c
index cf172d58c..1817ebf5a 100644
--- a/drivers/iio/adc/ad7791.c
+++ b/drivers/iio/adc/ad7791.c
@@ -272,30 +272,22 @@ static ssize_t ad7791_write_frequency(struct device *dev,
struct ad7791_state *st = iio_priv(indio_dev);
int i, ret;
- mutex_lock(&indio_dev->mlock);
- if (iio_buffer_enabled(indio_dev)) {
- mutex_unlock(&indio_dev->mlock);
- return -EBUSY;
- }
- mutex_unlock(&indio_dev->mlock);
-
- ret = -EINVAL;
-
- for (i = 0; i < ARRAY_SIZE(ad7791_sample_freq_avail); i++) {
- if (sysfs_streq(ad7791_sample_freq_avail[i], buf)) {
-
- mutex_lock(&indio_dev->mlock);
- st->filter &= ~AD7791_FILTER_RATE_MASK;
- st->filter |= i;
- ad_sd_write_reg(&st->sd, AD7791_REG_FILTER,
- sizeof(st->filter), st->filter);
- mutex_unlock(&indio_dev->mlock);
- ret = 0;
+ for (i = 0; i < ARRAY_SIZE(ad7791_sample_freq_avail); i++)
+ if (sysfs_streq(ad7791_sample_freq_avail[i], buf))
break;
- }
- }
+ if (i == ARRAY_SIZE(ad7791_sample_freq_avail))
+ return -EINVAL;
+
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+ st->filter &= ~AD7791_FILTER_RATE_MASK;
+ st->filter |= i;
+ ad_sd_write_reg(&st->sd, AD7791_REG_FILTER, sizeof(st->filter),
+ st->filter);
+ iio_device_release_direct_mode(indio_dev);
- return ret ? ret : len;
+ return len;
}
static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
@@ -383,6 +375,7 @@ static int ad7791_probe(struct spi_device *spi)
spi_set_drvdata(spi, indio_dev);
indio_dev->dev.parent = &spi->dev;
+ indio_dev->dev.of_node = spi->dev.of_node;
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = st->info->channels;
diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
index 7b07bb651..847789bae 100644
--- a/drivers/iio/adc/ad7793.c
+++ b/drivers/iio/adc/ad7793.c
@@ -369,13 +369,6 @@ static ssize_t ad7793_write_frequency(struct device *dev,
long lval;
int i, ret;
- mutex_lock(&indio_dev->mlock);
- if (iio_buffer_enabled(indio_dev)) {
- mutex_unlock(&indio_dev->mlock);
- return -EBUSY;
- }
- mutex_unlock(&indio_dev->mlock);
-
ret = kstrtol(buf, 10, &lval);
if (ret)
return ret;
@@ -383,20 +376,21 @@ static ssize_t ad7793_write_frequency(struct device *dev,
if (lval == 0)
return -EINVAL;
- ret = -EINVAL;
-
for (i = 0; i < 16; i++)
- if (lval == st->chip_info->sample_freq_avail[i]) {
- mutex_lock(&indio_dev->mlock);
- st->mode &= ~AD7793_MODE_RATE(-1);
- st->mode |= AD7793_MODE_RATE(i);
- ad_sd_write_reg(&st->sd, AD7793_REG_MODE,
- sizeof(st->mode), st->mode);
- mutex_unlock(&indio_dev->mlock);
- ret = 0;
- }
+ if (lval == st->chip_info->sample_freq_avail[i])
+ break;
+ if (i == 16)
+ return -EINVAL;
- return ret ? ret : len;
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+ st->mode &= ~AD7793_MODE_RATE(-1);
+ st->mode |= AD7793_MODE_RATE(i);
+ ad_sd_write_reg(&st->sd, AD7793_REG_MODE, sizeof(st->mode), st->mode);
+ iio_device_release_direct_mode(indio_dev);
+
+ return len;
}
static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
@@ -790,6 +784,7 @@ static int ad7793_probe(struct spi_device *spi)
spi_set_drvdata(spi, indio_dev);
indio_dev->dev.parent = &spi->dev;
+ indio_dev->dev.of_node = spi->dev.of_node;
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = st->chip_info->channels;
diff --git a/drivers/iio/adc/ad7887.c b/drivers/iio/adc/ad7887.c
index 2d3c397e6..7a483bfbd 100644
--- a/drivers/iio/adc/ad7887.c
+++ b/drivers/iio/adc/ad7887.c
@@ -122,7 +122,7 @@ static irqreturn_t ad7887_trigger_handler(int irq, void *p)
goto done;
iio_push_to_buffers_with_timestamp(indio_dev, st->data,
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
@@ -156,12 +156,11 @@ static int ad7887_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- mutex_lock(&indio_dev->mlock);
- if (iio_buffer_enabled(indio_dev))
- ret = -EBUSY;
- else
- ret = ad7887_scan_direct(st, chan->address);
- mutex_unlock(&indio_dev->mlock);
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+ ret = ad7887_scan_direct(st, chan->address);
+ iio_device_release_direct_mode(indio_dev);
if (ret < 0)
return ret;
@@ -265,6 +264,7 @@ static int ad7887_probe(struct spi_device *spi)
/* Estabilish that the iio_dev is a child of the spi device */
indio_dev->dev.parent = &spi->dev;
+ indio_dev->dev.of_node = spi->dev.of_node;
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->info = &ad7887_info;
indio_dev->modes = INDIO_DIRECT_MODE;
diff --git a/drivers/iio/adc/ad7923.c b/drivers/iio/adc/ad7923.c
index 45e29ccd8..77a675e11 100644
--- a/drivers/iio/adc/ad7923.c
+++ b/drivers/iio/adc/ad7923.c
@@ -181,7 +181,7 @@ static irqreturn_t ad7923_trigger_handler(int irq, void *p)
goto done;
iio_push_to_buffers_with_timestamp(indio_dev, st->rx_buf,
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
@@ -233,12 +233,11 @@ static int ad7923_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- mutex_lock(&indio_dev->mlock);
- if (iio_buffer_enabled(indio_dev))
- ret = -EBUSY;
- else
- ret = ad7923_scan_direct(st, chan->address);
- mutex_unlock(&indio_dev->mlock);
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+ ret = ad7923_scan_direct(st, chan->address);
+ iio_device_release_direct_mode(indio_dev);
if (ret < 0)
return ret;
@@ -289,6 +288,7 @@ static int ad7923_probe(struct spi_device *spi)
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->dev.parent = &spi->dev;
+ indio_dev->dev.of_node = spi->dev.of_node;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = info->channels;
indio_dev->num_channels = info->num_channels;
diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c
index 039622335..9704090b7 100644
--- a/drivers/iio/adc/ad799x.c
+++ b/drivers/iio/adc/ad799x.c
@@ -212,7 +212,7 @@ static irqreturn_t ad799x_trigger_handler(int irq, void *p)
goto out;
iio_push_to_buffers_with_timestamp(indio_dev, st->rx_buf,
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
out:
iio_trigger_notify_done(indio_dev->trig);
@@ -282,12 +282,11 @@ static int ad799x_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- mutex_lock(&indio_dev->mlock);
- if (iio_buffer_enabled(indio_dev))
- ret = -EBUSY;
- else
- ret = ad799x_scan_direct(st, chan->scan_index);
- mutex_unlock(&indio_dev->mlock);
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+ ret = ad799x_scan_direct(st, chan->scan_index);
+ iio_device_release_direct_mode(indio_dev);
if (ret < 0)
return ret;
@@ -395,11 +394,9 @@ static int ad799x_write_event_config(struct iio_dev *indio_dev,
struct ad799x_state *st = iio_priv(indio_dev);
int ret;
- mutex_lock(&indio_dev->mlock);
- if (iio_buffer_enabled(indio_dev)) {
- ret = -EBUSY;
- goto done;
- }
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
if (state)
st->config |= BIT(chan->scan_index) << AD799X_CHANNEL_SHIFT;
@@ -412,10 +409,7 @@ static int ad799x_write_event_config(struct iio_dev *indio_dev,
st->config &= ~AD7998_ALERT_EN;
ret = ad799x_write_config(st, st->config);
-
-done:
- mutex_unlock(&indio_dev->mlock);
-
+ iio_device_release_direct_mode(indio_dev);
return ret;
}
@@ -508,7 +502,7 @@ static irqreturn_t ad799x_event_handler(int irq, void *private)
(i >> 1),
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_FALLING),
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
}
done:
@@ -813,6 +807,7 @@ static int ad799x_probe(struct i2c_client *client,
st->client = client;
indio_dev->dev.parent = &client->dev;
+ indio_dev->dev.of_node = client->dev.of_node;
indio_dev->name = id->name;
indio_dev->info = st->chip_config->info;
diff --git a/drivers/iio/adc/bcm_iproc_adc.c b/drivers/iio/adc/bcm_iproc_adc.c
new file mode 100644
index 000000000..21d38c8af
--- /dev/null
+++ b/drivers/iio/adc/bcm_iproc_adc.c
@@ -0,0 +1,644 @@
+/*
+ * Copyright 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation (the "GPL").
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 (GPLv2) for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 (GPLv2) along with this source code.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+
+#include <linux/iio/iio.h>
+
+/* Below Register's are common to IPROC ADC and Touchscreen IP */
+#define IPROC_REGCTL1 0x00
+#define IPROC_REGCTL2 0x04
+#define IPROC_INTERRUPT_THRES 0x08
+#define IPROC_INTERRUPT_MASK 0x0c
+#define IPROC_INTERRUPT_STATUS 0x10
+#define IPROC_ANALOG_CONTROL 0x1c
+#define IPROC_CONTROLLER_STATUS 0x14
+#define IPROC_AUX_DATA 0x20
+#define IPROC_SOFT_BYPASS_CONTROL 0x38
+#define IPROC_SOFT_BYPASS_DATA 0x3C
+
+/* IPROC ADC Channel register offsets */
+#define IPROC_ADC_CHANNEL_REGCTL1 0x800
+#define IPROC_ADC_CHANNEL_REGCTL2 0x804
+#define IPROC_ADC_CHANNEL_STATUS 0x808
+#define IPROC_ADC_CHANNEL_INTERRUPT_STATUS 0x80c
+#define IPROC_ADC_CHANNEL_INTERRUPT_MASK 0x810
+#define IPROC_ADC_CHANNEL_DATA 0x814
+#define IPROC_ADC_CHANNEL_OFFSET 0x20
+
+/* Bit definitions for IPROC_REGCTL2 */
+#define IPROC_ADC_AUXIN_SCAN_ENA BIT(0)
+#define IPROC_ADC_PWR_LDO BIT(5)
+#define IPROC_ADC_PWR_ADC BIT(4)
+#define IPROC_ADC_PWR_BG BIT(3)
+#define IPROC_ADC_CONTROLLER_EN BIT(17)
+
+/* Bit definitions for IPROC_INTERRUPT_MASK and IPROC_INTERRUPT_STATUS */
+#define IPROC_ADC_AUXDATA_RDY_INTR BIT(3)
+#define IPROC_ADC_INTR 9
+#define IPROC_ADC_INTR_MASK (0xFF << IPROC_ADC_INTR)
+
+/* Bit definitions for IPROC_ANALOG_CONTROL */
+#define IPROC_ADC_CHANNEL_SEL 11
+#define IPROC_ADC_CHANNEL_SEL_MASK (0x7 << IPROC_ADC_CHANNEL_SEL)
+
+/* Bit definitions for IPROC_ADC_CHANNEL_REGCTL1 */
+#define IPROC_ADC_CHANNEL_ROUNDS 0x2
+#define IPROC_ADC_CHANNEL_ROUNDS_MASK (0x3F << IPROC_ADC_CHANNEL_ROUNDS)
+#define IPROC_ADC_CHANNEL_MODE 0x1
+#define IPROC_ADC_CHANNEL_MODE_MASK (0x1 << IPROC_ADC_CHANNEL_MODE)
+#define IPROC_ADC_CHANNEL_MODE_TDM 0x1
+#define IPROC_ADC_CHANNEL_MODE_SNAPSHOT 0x0
+#define IPROC_ADC_CHANNEL_ENABLE 0x0
+#define IPROC_ADC_CHANNEL_ENABLE_MASK 0x1
+
+/* Bit definitions for IPROC_ADC_CHANNEL_REGCTL2 */
+#define IPROC_ADC_CHANNEL_WATERMARK 0x0
+#define IPROC_ADC_CHANNEL_WATERMARK_MASK \
+ (0x3F << IPROC_ADC_CHANNEL_WATERMARK)
+
+#define IPROC_ADC_WATER_MARK_LEVEL 0x1
+
+/* Bit definitions for IPROC_ADC_CHANNEL_STATUS */
+#define IPROC_ADC_CHANNEL_DATA_LOST 0x0
+#define IPROC_ADC_CHANNEL_DATA_LOST_MASK \
+ (0x0 << IPROC_ADC_CHANNEL_DATA_LOST)
+#define IPROC_ADC_CHANNEL_VALID_ENTERIES 0x1
+#define IPROC_ADC_CHANNEL_VALID_ENTERIES_MASK \
+ (0xFF << IPROC_ADC_CHANNEL_VALID_ENTERIES)
+#define IPROC_ADC_CHANNEL_TOTAL_ENTERIES 0x9
+#define IPROC_ADC_CHANNEL_TOTAL_ENTERIES_MASK \
+ (0xFF << IPROC_ADC_CHANNEL_TOTAL_ENTERIES)
+
+/* Bit definitions for IPROC_ADC_CHANNEL_INTERRUPT_MASK */
+#define IPROC_ADC_CHANNEL_WTRMRK_INTR 0x0
+#define IPROC_ADC_CHANNEL_WTRMRK_INTR_MASK \
+ (0x1 << IPROC_ADC_CHANNEL_WTRMRK_INTR)
+#define IPROC_ADC_CHANNEL_FULL_INTR 0x1
+#define IPROC_ADC_CHANNEL_FULL_INTR_MASK \
+ (0x1 << IPROC_ADC_IPROC_ADC_CHANNEL_FULL_INTR)
+#define IPROC_ADC_CHANNEL_EMPTY_INTR 0x2
+#define IPROC_ADC_CHANNEL_EMPTY_INTR_MASK \
+ (0x1 << IPROC_ADC_CHANNEL_EMPTY_INTR)
+
+#define IPROC_ADC_WATER_MARK_INTR_ENABLE 0x1
+
+/* Number of time to retry a set of the interrupt mask reg */
+#define IPROC_ADC_INTMASK_RETRY_ATTEMPTS 10
+
+#define IPROC_ADC_READ_TIMEOUT (HZ*2)
+
+#define iproc_adc_dbg_reg(dev, priv, reg) \
+do { \
+ u32 val; \
+ regmap_read(priv->regmap, reg, &val); \
+ dev_dbg(dev, "%20s= 0x%08x\n", #reg, val); \
+} while (0)
+
+struct iproc_adc_priv {
+ struct regmap *regmap;
+ struct clk *adc_clk;
+ struct mutex mutex;
+ int irqno;
+ int chan_val;
+ int chan_id;
+ struct completion completion;
+};
+
+static void iproc_adc_reg_dump(struct iio_dev *indio_dev)
+{
+ struct device *dev = &indio_dev->dev;
+ struct iproc_adc_priv *adc_priv = iio_priv(indio_dev);
+
+ iproc_adc_dbg_reg(dev, adc_priv, IPROC_REGCTL1);
+ iproc_adc_dbg_reg(dev, adc_priv, IPROC_REGCTL2);
+ iproc_adc_dbg_reg(dev, adc_priv, IPROC_INTERRUPT_THRES);
+ iproc_adc_dbg_reg(dev, adc_priv, IPROC_INTERRUPT_MASK);
+ iproc_adc_dbg_reg(dev, adc_priv, IPROC_INTERRUPT_STATUS);
+ iproc_adc_dbg_reg(dev, adc_priv, IPROC_CONTROLLER_STATUS);
+ iproc_adc_dbg_reg(dev, adc_priv, IPROC_ANALOG_CONTROL);
+ iproc_adc_dbg_reg(dev, adc_priv, IPROC_AUX_DATA);
+ iproc_adc_dbg_reg(dev, adc_priv, IPROC_SOFT_BYPASS_CONTROL);
+ iproc_adc_dbg_reg(dev, adc_priv, IPROC_SOFT_BYPASS_DATA);
+}
+
+static irqreturn_t iproc_adc_interrupt_handler(int irq, void *data)
+{
+ u32 channel_intr_status;
+ u32 intr_status;
+ u32 intr_mask;
+ struct iio_dev *indio_dev = data;
+ struct iproc_adc_priv *adc_priv = iio_priv(indio_dev);
+
+ /*
+ * This interrupt is shared with the touchscreen driver.
+ * Make sure this interrupt is intended for us.
+ * Handle only ADC channel specific interrupts.
+ */
+ regmap_read(adc_priv->regmap, IPROC_INTERRUPT_STATUS, &intr_status);
+ regmap_read(adc_priv->regmap, IPROC_INTERRUPT_MASK, &intr_mask);
+ intr_status = intr_status & intr_mask;
+ channel_intr_status = (intr_status & IPROC_ADC_INTR_MASK) >>
+ IPROC_ADC_INTR;
+ if (channel_intr_status)
+ return IRQ_WAKE_THREAD;
+
+ return IRQ_NONE;
+}
+
+static irqreturn_t iproc_adc_interrupt_thread(int irq, void *data)
+{
+ irqreturn_t retval = IRQ_NONE;
+ struct iproc_adc_priv *adc_priv;
+ struct iio_dev *indio_dev = data;
+ unsigned int valid_entries;
+ u32 intr_status;
+ u32 intr_channels;
+ u32 channel_status;
+ u32 ch_intr_status;
+
+ adc_priv = iio_priv(indio_dev);
+
+ regmap_read(adc_priv->regmap, IPROC_INTERRUPT_STATUS, &intr_status);
+ dev_dbg(&indio_dev->dev, "iproc_adc_interrupt_thread(),INTRPT_STS:%x\n",
+ intr_status);
+
+ intr_channels = (intr_status & IPROC_ADC_INTR_MASK) >> IPROC_ADC_INTR;
+ if (intr_channels) {
+ regmap_read(adc_priv->regmap,
+ IPROC_ADC_CHANNEL_INTERRUPT_STATUS +
+ IPROC_ADC_CHANNEL_OFFSET * adc_priv->chan_id,
+ &ch_intr_status);
+
+ if (ch_intr_status & IPROC_ADC_CHANNEL_WTRMRK_INTR_MASK) {
+ regmap_read(adc_priv->regmap,
+ IPROC_ADC_CHANNEL_STATUS +
+ IPROC_ADC_CHANNEL_OFFSET *
+ adc_priv->chan_id,
+ &channel_status);
+
+ valid_entries = ((channel_status &
+ IPROC_ADC_CHANNEL_VALID_ENTERIES_MASK) >>
+ IPROC_ADC_CHANNEL_VALID_ENTERIES);
+ if (valid_entries >= 1) {
+ regmap_read(adc_priv->regmap,
+ IPROC_ADC_CHANNEL_DATA +
+ IPROC_ADC_CHANNEL_OFFSET *
+ adc_priv->chan_id,
+ &adc_priv->chan_val);
+ complete(&adc_priv->completion);
+ } else {
+ dev_err(&indio_dev->dev,
+ "No data rcvd on channel %d\n",
+ adc_priv->chan_id);
+ }
+ regmap_write(adc_priv->regmap,
+ IPROC_ADC_CHANNEL_INTERRUPT_MASK +
+ IPROC_ADC_CHANNEL_OFFSET *
+ adc_priv->chan_id,
+ (ch_intr_status &
+ ~(IPROC_ADC_CHANNEL_WTRMRK_INTR_MASK)));
+ }
+ regmap_write(adc_priv->regmap,
+ IPROC_ADC_CHANNEL_INTERRUPT_STATUS +
+ IPROC_ADC_CHANNEL_OFFSET * adc_priv->chan_id,
+ ch_intr_status);
+ regmap_write(adc_priv->regmap, IPROC_INTERRUPT_STATUS,
+ intr_channels);
+ retval = IRQ_HANDLED;
+ }
+
+ return retval;
+}
+
+static int iproc_adc_do_read(struct iio_dev *indio_dev,
+ int channel,
+ u16 *p_adc_data)
+{
+ int read_len = 0;
+ u32 val;
+ u32 mask;
+ u32 val_check;
+ int failed_cnt = 0;
+ struct iproc_adc_priv *adc_priv = iio_priv(indio_dev);
+
+ mutex_lock(&adc_priv->mutex);
+
+ /*
+ * After a read is complete the ADC interrupts will be disabled so
+ * we can assume this section of code is safe from interrupts.
+ */
+ adc_priv->chan_val = -1;
+ adc_priv->chan_id = channel;
+
+ reinit_completion(&adc_priv->completion);
+ /* Clear any pending interrupt */
+ regmap_update_bits(adc_priv->regmap, IPROC_INTERRUPT_STATUS,
+ IPROC_ADC_INTR_MASK | IPROC_ADC_AUXDATA_RDY_INTR,
+ ((0x0 << channel) << IPROC_ADC_INTR) |
+ IPROC_ADC_AUXDATA_RDY_INTR);
+
+ /* Configure channel for snapshot mode and enable */
+ val = (BIT(IPROC_ADC_CHANNEL_ROUNDS) |
+ (IPROC_ADC_CHANNEL_MODE_SNAPSHOT << IPROC_ADC_CHANNEL_MODE) |
+ (0x1 << IPROC_ADC_CHANNEL_ENABLE));
+
+ mask = IPROC_ADC_CHANNEL_ROUNDS_MASK | IPROC_ADC_CHANNEL_MODE_MASK |
+ IPROC_ADC_CHANNEL_ENABLE_MASK;
+ regmap_update_bits(adc_priv->regmap, (IPROC_ADC_CHANNEL_REGCTL1 +
+ IPROC_ADC_CHANNEL_OFFSET * channel),
+ mask, val);
+
+ /* Set the Watermark for a channel */
+ regmap_update_bits(adc_priv->regmap, (IPROC_ADC_CHANNEL_REGCTL2 +
+ IPROC_ADC_CHANNEL_OFFSET * channel),
+ IPROC_ADC_CHANNEL_WATERMARK_MASK,
+ 0x1);
+
+ /* Enable water mark interrupt */
+ regmap_update_bits(adc_priv->regmap, (IPROC_ADC_CHANNEL_INTERRUPT_MASK +
+ IPROC_ADC_CHANNEL_OFFSET *
+ channel),
+ IPROC_ADC_CHANNEL_WTRMRK_INTR_MASK,
+ IPROC_ADC_WATER_MARK_INTR_ENABLE);
+ regmap_read(adc_priv->regmap, IPROC_INTERRUPT_MASK, &val);
+
+ /* Enable ADC interrupt for a channel */
+ val |= (BIT(channel) << IPROC_ADC_INTR);
+ regmap_write(adc_priv->regmap, IPROC_INTERRUPT_MASK, val);
+
+ /*
+ * There seems to be a very rare issue where writing to this register
+ * does not take effect. To work around the issue we will try multiple
+ * writes. In total we will spend about 10*10 = 100 us attempting this.
+ * Testing has shown that this may loop a few time, but we have never
+ * hit the full count.
+ */
+ regmap_read(adc_priv->regmap, IPROC_INTERRUPT_MASK, &val_check);
+ while (val_check != val) {
+ failed_cnt++;
+
+ if (failed_cnt > IPROC_ADC_INTMASK_RETRY_ATTEMPTS)
+ break;
+
+ udelay(10);
+ regmap_update_bits(adc_priv->regmap, IPROC_INTERRUPT_MASK,
+ IPROC_ADC_INTR_MASK,
+ ((0x1 << channel) <<
+ IPROC_ADC_INTR));
+
+ regmap_read(adc_priv->regmap, IPROC_INTERRUPT_MASK, &val_check);
+ }
+
+ if (failed_cnt) {
+ dev_dbg(&indio_dev->dev,
+ "IntMask failed (%d times)", failed_cnt);
+ if (failed_cnt > IPROC_ADC_INTMASK_RETRY_ATTEMPTS) {
+ dev_err(&indio_dev->dev,
+ "IntMask set failed. Read will likely fail.");
+ read_len = -EIO;
+ goto adc_err;
+ };
+ }
+ regmap_read(adc_priv->regmap, IPROC_INTERRUPT_MASK, &val_check);
+
+ if (wait_for_completion_timeout(&adc_priv->completion,
+ IPROC_ADC_READ_TIMEOUT) > 0) {
+
+ /* Only the lower 16 bits are relevant */
+ *p_adc_data = adc_priv->chan_val & 0xFFFF;
+ read_len = sizeof(*p_adc_data);
+
+ } else {
+ /*
+ * We never got the interrupt, something went wrong.
+ * Perhaps the interrupt may still be coming, we do not want
+ * that now. Lets disable the ADC interrupt, and clear the
+ * status to put it back in to normal state.
+ */
+ read_len = -ETIMEDOUT;
+ goto adc_err;
+ }
+ mutex_unlock(&adc_priv->mutex);
+
+ return read_len;
+
+adc_err:
+ regmap_update_bits(adc_priv->regmap, IPROC_INTERRUPT_MASK,
+ IPROC_ADC_INTR_MASK,
+ ((0x0 << channel) << IPROC_ADC_INTR));
+
+ regmap_update_bits(adc_priv->regmap, IPROC_INTERRUPT_STATUS,
+ IPROC_ADC_INTR_MASK,
+ ((0x0 << channel) << IPROC_ADC_INTR));
+
+ dev_err(&indio_dev->dev, "Timed out waiting for ADC data!\n");
+ iproc_adc_reg_dump(indio_dev);
+ mutex_unlock(&adc_priv->mutex);
+
+ return read_len;
+}
+
+static int iproc_adc_enable(struct iio_dev *indio_dev)
+{
+ u32 val;
+ u32 channel_id;
+ struct iproc_adc_priv *adc_priv = iio_priv(indio_dev);
+ int ret;
+
+ /* Set i_amux = 3b'000, select channel 0 */
+ ret = regmap_update_bits(adc_priv->regmap, IPROC_ANALOG_CONTROL,
+ IPROC_ADC_CHANNEL_SEL_MASK, 0);
+ if (ret) {
+ dev_err(&indio_dev->dev,
+ "failed to write IPROC_ANALOG_CONTROL %d\n", ret);
+ return ret;
+ }
+ adc_priv->chan_val = -1;
+
+ /*
+ * PWR up LDO, ADC, and Band Gap (0 to enable)
+ * Also enable ADC controller (set high)
+ */
+ ret = regmap_read(adc_priv->regmap, IPROC_REGCTL2, &val);
+ if (ret) {
+ dev_err(&indio_dev->dev,
+ "failed to read IPROC_REGCTL2 %d\n", ret);
+ return ret;
+ }
+
+ val &= ~(IPROC_ADC_PWR_LDO | IPROC_ADC_PWR_ADC | IPROC_ADC_PWR_BG);
+
+ ret = regmap_write(adc_priv->regmap, IPROC_REGCTL2, val);
+ if (ret) {
+ dev_err(&indio_dev->dev,
+ "failed to write IPROC_REGCTL2 %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_read(adc_priv->regmap, IPROC_REGCTL2, &val);
+ if (ret) {
+ dev_err(&indio_dev->dev,
+ "failed to read IPROC_REGCTL2 %d\n", ret);
+ return ret;
+ }
+
+ val |= IPROC_ADC_CONTROLLER_EN;
+ ret = regmap_write(adc_priv->regmap, IPROC_REGCTL2, val);
+ if (ret) {
+ dev_err(&indio_dev->dev,
+ "failed to write IPROC_REGCTL2 %d\n", ret);
+ return ret;
+ }
+
+ for (channel_id = 0; channel_id < indio_dev->num_channels;
+ channel_id++) {
+ ret = regmap_write(adc_priv->regmap,
+ IPROC_ADC_CHANNEL_INTERRUPT_MASK +
+ IPROC_ADC_CHANNEL_OFFSET * channel_id, 0);
+ if (ret) {
+ dev_err(&indio_dev->dev,
+ "failed to write ADC_CHANNEL_INTERRUPT_MASK %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = regmap_write(adc_priv->regmap,
+ IPROC_ADC_CHANNEL_INTERRUPT_STATUS +
+ IPROC_ADC_CHANNEL_OFFSET * channel_id, 0);
+ if (ret) {
+ dev_err(&indio_dev->dev,
+ "failed to write ADC_CHANNEL_INTERRUPT_STATUS %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void iproc_adc_disable(struct iio_dev *indio_dev)
+{
+ u32 val;
+ int ret;
+ struct iproc_adc_priv *adc_priv = iio_priv(indio_dev);
+
+ ret = regmap_read(adc_priv->regmap, IPROC_REGCTL2, &val);
+ if (ret) {
+ dev_err(&indio_dev->dev,
+ "failed to read IPROC_REGCTL2 %d\n", ret);
+ return;
+ }
+
+ val &= ~IPROC_ADC_CONTROLLER_EN;
+ ret = regmap_write(adc_priv->regmap, IPROC_REGCTL2, val);
+ if (ret) {
+ dev_err(&indio_dev->dev,
+ "failed to write IPROC_REGCTL2 %d\n", ret);
+ return;
+ }
+}
+
+static int iproc_adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask)
+{
+ u16 adc_data;
+ int err;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ err = iproc_adc_do_read(indio_dev, chan->channel, &adc_data);
+ if (err < 0)
+ return err;
+ *val = adc_data;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ *val = 1800;
+ *val2 = 10;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info iproc_adc_iio_info = {
+ .read_raw = &iproc_adc_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+#define IPROC_ADC_CHANNEL(_index, _id) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = _index, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .datasheet_name = _id, \
+}
+
+static const struct iio_chan_spec iproc_adc_iio_channels[] = {
+ IPROC_ADC_CHANNEL(0, "adc0"),
+ IPROC_ADC_CHANNEL(1, "adc1"),
+ IPROC_ADC_CHANNEL(2, "adc2"),
+ IPROC_ADC_CHANNEL(3, "adc3"),
+ IPROC_ADC_CHANNEL(4, "adc4"),
+ IPROC_ADC_CHANNEL(5, "adc5"),
+ IPROC_ADC_CHANNEL(6, "adc6"),
+ IPROC_ADC_CHANNEL(7, "adc7"),
+};
+
+static int iproc_adc_probe(struct platform_device *pdev)
+{
+ struct iproc_adc_priv *adc_priv;
+ struct iio_dev *indio_dev = NULL;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev,
+ sizeof(*adc_priv));
+ if (!indio_dev) {
+ dev_err(&pdev->dev, "failed to allocate iio device\n");
+ return -ENOMEM;
+ }
+
+ adc_priv = iio_priv(indio_dev);
+ platform_set_drvdata(pdev, indio_dev);
+
+ mutex_init(&adc_priv->mutex);
+
+ init_completion(&adc_priv->completion);
+
+ adc_priv->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "adc-syscon");
+ if (IS_ERR(adc_priv->regmap)) {
+ dev_err(&pdev->dev, "failed to get handle for tsc syscon\n");
+ ret = PTR_ERR(adc_priv->regmap);
+ return ret;
+ }
+
+ adc_priv->adc_clk = devm_clk_get(&pdev->dev, "tsc_clk");
+ if (IS_ERR(adc_priv->adc_clk)) {
+ dev_err(&pdev->dev,
+ "failed getting clock tsc_clk\n");
+ ret = PTR_ERR(adc_priv->adc_clk);
+ return ret;
+ }
+
+ adc_priv->irqno = platform_get_irq(pdev, 0);
+ if (adc_priv->irqno <= 0) {
+ dev_err(&pdev->dev, "platform_get_irq failed\n");
+ ret = -ENODEV;
+ return ret;
+ }
+
+ ret = regmap_update_bits(adc_priv->regmap, IPROC_REGCTL2,
+ IPROC_ADC_AUXIN_SCAN_ENA, 0);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to write IPROC_REGCTL2 %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, adc_priv->irqno,
+ iproc_adc_interrupt_thread,
+ iproc_adc_interrupt_handler,
+ IRQF_SHARED, "iproc-adc", indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "request_irq error %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(adc_priv->adc_clk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "clk_prepare_enable failed %d\n", ret);
+ return ret;
+ }
+
+ ret = iproc_adc_enable(indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable adc %d\n", ret);
+ goto err_adc_enable;
+ }
+
+ indio_dev->name = "iproc-static-adc";
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->dev.of_node = pdev->dev.of_node;
+ indio_dev->info = &iproc_adc_iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = iproc_adc_iio_channels;
+ indio_dev->num_channels = ARRAY_SIZE(iproc_adc_iio_channels);
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "iio_device_register failed:err %d\n", ret);
+ goto err_clk;
+ }
+
+ return 0;
+
+err_clk:
+ iproc_adc_disable(indio_dev);
+err_adc_enable:
+ clk_disable_unprepare(adc_priv->adc_clk);
+
+ return ret;
+}
+
+static int iproc_adc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct iproc_adc_priv *adc_priv = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ iproc_adc_disable(indio_dev);
+ clk_disable_unprepare(adc_priv->adc_clk);
+
+ return 0;
+}
+
+static const struct of_device_id iproc_adc_of_match[] = {
+ {.compatible = "brcm,iproc-static-adc", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, iproc_adc_of_match);
+
+static struct platform_driver iproc_adc_driver = {
+ .probe = iproc_adc_probe,
+ .remove = iproc_adc_remove,
+ .driver = {
+ .name = "iproc-static-adc",
+ .of_match_table = of_match_ptr(iproc_adc_of_match),
+ },
+};
+module_platform_driver(iproc_adc_driver);
+
+MODULE_DESCRIPTION("Broadcom iProc ADC controller driver");
+MODULE_AUTHOR("Raveendra Padasalagi <raveendra.padasalagi@broadcom.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/cc10001_adc.c b/drivers/iio/adc/cc10001_adc.c
index 8254f529b..91636c0ba 100644
--- a/drivers/iio/adc/cc10001_adc.c
+++ b/drivers/iio/adc/cc10001_adc.c
@@ -186,7 +186,7 @@ done:
if (!sample_invalid)
iio_push_to_buffers_with_timestamp(indio_dev, data,
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
diff --git a/drivers/iio/adc/hi8435.c b/drivers/iio/adc/hi8435.c
index c73c6c62a..678e8c7ea 100644
--- a/drivers/iio/adc/hi8435.c
+++ b/drivers/iio/adc/hi8435.c
@@ -400,7 +400,7 @@ static void hi8435_iio_push_event(struct iio_dev *idev, unsigned int val)
iio_push_event(idev,
IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, i,
IIO_EV_TYPE_THRESH, dir),
- iio_get_time_ns());
+ iio_get_time_ns(idev));
}
}
@@ -455,6 +455,7 @@ static int hi8435_probe(struct spi_device *spi)
mutex_init(&priv->lock);
idev->dev.parent = &spi->dev;
+ idev->dev.of_node = spi->dev.of_node;
idev->name = spi_get_device_id(spi)->name;
idev->modes = INDIO_DIRECT_MODE;
idev->info = &hi8435_info;
diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c
index 502f2fbe8..955f3fdaf 100644
--- a/drivers/iio/adc/ina2xx-adc.c
+++ b/drivers/iio/adc/ina2xx-adc.c
@@ -465,7 +465,7 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev)
s64 time_a, time_b;
unsigned int alert;
- time_a = iio_get_time_ns();
+ time_a = iio_get_time_ns(indio_dev);
/*
* Because the timer thread and the chip conversion clock
@@ -504,7 +504,7 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev)
data[i++] = val;
}
- time_b = iio_get_time_ns();
+ time_b = iio_get_time_ns(indio_dev);
iio_push_to_buffers_with_timestamp(indio_dev,
(unsigned int *)data, time_a);
@@ -554,7 +554,7 @@ static int ina2xx_buffer_enable(struct iio_dev *indio_dev)
dev_dbg(&indio_dev->dev, "Async readout mode: %d\n",
chip->allow_async_readout);
- chip->prev_ns = iio_get_time_ns();
+ chip->prev_ns = iio_get_time_ns(indio_dev);
chip->task = kthread_run(ina2xx_capture_thread, (void *)indio_dev,
"%s:%d-%uus", indio_dev->name, indio_dev->id,
@@ -691,6 +691,7 @@ static int ina2xx_probe(struct i2c_client *client,
indio_dev->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_SOFTWARE;
indio_dev->dev.parent = &client->dev;
+ indio_dev->dev.of_node = client->dev.of_node;
indio_dev->channels = ina2xx_channels;
indio_dev->num_channels = ARRAY_SIZE(ina2xx_channels);
indio_dev->name = id->name;
diff --git a/drivers/iio/adc/max1027.c b/drivers/iio/adc/max1027.c
index 41d495c60..712fbd2b1 100644
--- a/drivers/iio/adc/max1027.c
+++ b/drivers/iio/adc/max1027.c
@@ -426,6 +426,7 @@ static int max1027_probe(struct spi_device *spi)
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->dev.parent = &spi->dev;
+ indio_dev->dev.of_node = spi->dev.of_node;
indio_dev->info = &max1027_info;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = st->info->channels;
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index 998dc3caa..841a13c9b 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -25,6 +25,8 @@
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -788,7 +790,7 @@ static irqreturn_t max1363_event_handler(int irq, void *private)
{
struct iio_dev *indio_dev = private;
struct max1363_state *st = iio_priv(indio_dev);
- s64 timestamp = iio_get_time_ns();
+ s64 timestamp = iio_get_time_ns(indio_dev);
unsigned long mask, loc;
u8 rx;
u8 tx[2] = { st->setupbyte,
@@ -1506,7 +1508,8 @@ static irqreturn_t max1363_trigger_handler(int irq, void *p)
if (b_sent < 0)
goto done_free;
- iio_push_to_buffers_with_timestamp(indio_dev, rxbuf, iio_get_time_ns());
+ iio_push_to_buffers_with_timestamp(indio_dev, rxbuf,
+ iio_get_time_ns(indio_dev));
done_free:
kfree(rxbuf);
@@ -1516,6 +1519,56 @@ done:
return IRQ_HANDLED;
}
+#ifdef CONFIG_OF
+
+#define MAX1363_COMPATIBLE(of_compatible, cfg) { \
+ .compatible = of_compatible, \
+ .data = &max1363_chip_info_tbl[cfg], \
+}
+
+static const struct of_device_id max1363_of_match[] = {
+ MAX1363_COMPATIBLE("maxim,max1361", max1361),
+ MAX1363_COMPATIBLE("maxim,max1362", max1362),
+ MAX1363_COMPATIBLE("maxim,max1363", max1363),
+ MAX1363_COMPATIBLE("maxim,max1364", max1364),
+ MAX1363_COMPATIBLE("maxim,max1036", max1036),
+ MAX1363_COMPATIBLE("maxim,max1037", max1037),
+ MAX1363_COMPATIBLE("maxim,max1038", max1038),
+ MAX1363_COMPATIBLE("maxim,max1039", max1039),
+ MAX1363_COMPATIBLE("maxim,max1136", max1136),
+ MAX1363_COMPATIBLE("maxim,max1137", max1137),
+ MAX1363_COMPATIBLE("maxim,max1138", max1138),
+ MAX1363_COMPATIBLE("maxim,max1139", max1139),
+ MAX1363_COMPATIBLE("maxim,max1236", max1236),
+ MAX1363_COMPATIBLE("maxim,max1237", max1237),
+ MAX1363_COMPATIBLE("maxim,max1238", max1238),
+ MAX1363_COMPATIBLE("maxim,max1239", max1239),
+ MAX1363_COMPATIBLE("maxim,max11600", max11600),
+ MAX1363_COMPATIBLE("maxim,max11601", max11601),
+ MAX1363_COMPATIBLE("maxim,max11602", max11602),
+ MAX1363_COMPATIBLE("maxim,max11603", max11603),
+ MAX1363_COMPATIBLE("maxim,max11604", max11604),
+ MAX1363_COMPATIBLE("maxim,max11605", max11605),
+ MAX1363_COMPATIBLE("maxim,max11606", max11606),
+ MAX1363_COMPATIBLE("maxim,max11607", max11607),
+ MAX1363_COMPATIBLE("maxim,max11608", max11608),
+ MAX1363_COMPATIBLE("maxim,max11609", max11609),
+ MAX1363_COMPATIBLE("maxim,max11610", max11610),
+ MAX1363_COMPATIBLE("maxim,max11611", max11611),
+ MAX1363_COMPATIBLE("maxim,max11612", max11612),
+ MAX1363_COMPATIBLE("maxim,max11613", max11613),
+ MAX1363_COMPATIBLE("maxim,max11614", max11614),
+ MAX1363_COMPATIBLE("maxim,max11615", max11615),
+ MAX1363_COMPATIBLE("maxim,max11616", max11616),
+ MAX1363_COMPATIBLE("maxim,max11617", max11617),
+ MAX1363_COMPATIBLE("maxim,max11644", max11644),
+ MAX1363_COMPATIBLE("maxim,max11645", max11645),
+ MAX1363_COMPATIBLE("maxim,max11646", max11646),
+ MAX1363_COMPATIBLE("maxim,max11647", max11647),
+ { /* sentinel */ }
+};
+#endif
+
static int max1363_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -1523,6 +1576,7 @@ static int max1363_probe(struct i2c_client *client,
struct max1363_state *st;
struct iio_dev *indio_dev;
struct regulator *vref;
+ const struct of_device_id *match;
indio_dev = devm_iio_device_alloc(&client->dev,
sizeof(struct max1363_state));
@@ -1549,7 +1603,12 @@ static int max1363_probe(struct i2c_client *client,
/* this is only used for device removal purposes */
i2c_set_clientdata(client, indio_dev);
- st->chip_info = &max1363_chip_info_tbl[id->driver_data];
+ match = of_match_device(of_match_ptr(max1363_of_match),
+ &client->dev);
+ if (match)
+ st->chip_info = of_device_get_match_data(&client->dev);
+ else
+ st->chip_info = &max1363_chip_info_tbl[id->driver_data];
st->client = client;
st->vref_uv = st->chip_info->int_vref_mv * 1000;
@@ -1587,6 +1646,7 @@ static int max1363_probe(struct i2c_client *client,
/* Establish that the iio_dev is a child of the i2c device */
indio_dev->dev.parent = &client->dev;
+ indio_dev->dev.of_node = client->dev.of_node;
indio_dev->name = id->name;
indio_dev->channels = st->chip_info->channels;
indio_dev->num_channels = st->chip_info->num_channels;
@@ -1692,6 +1752,7 @@ MODULE_DEVICE_TABLE(i2c, max1363_id);
static struct i2c_driver max1363_driver = {
.driver = {
.name = "max1363",
+ .of_match_table = of_match_ptr(max1363_of_match),
},
.probe = max1363_probe,
.remove = max1363_remove,
diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c
index a850ca7d1..634717ae1 100644
--- a/drivers/iio/adc/mcp320x.c
+++ b/drivers/iio/adc/mcp320x.c
@@ -308,6 +308,7 @@ static int mcp320x_probe(struct spi_device *spi)
adc->spi = spi;
indio_dev->dev.parent = &spi->dev;
+ indio_dev->dev.of_node = spi->dev.of_node;
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &mcp320x_info;
diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c
index d1172dc1e..254135e07 100644
--- a/drivers/iio/adc/mcp3422.c
+++ b/drivers/iio/adc/mcp3422.c
@@ -352,6 +352,7 @@ static int mcp3422_probe(struct i2c_client *client,
mutex_init(&adc->lock);
indio_dev->dev.parent = &client->dev;
+ indio_dev->dev.of_node = client->dev.of_node;
indio_dev->name = dev_name(&client->dev);
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &mcp3422_info;
diff --git a/drivers/iio/adc/mxs-lradc.c b/drivers/iio/adc/mxs-lradc.c
index ad26da1ed..b84d37c80 100644
--- a/drivers/iio/adc/mxs-lradc.c
+++ b/drivers/iio/adc/mxs-lradc.c
@@ -373,13 +373,6 @@ static u32 mxs_lradc_plate_mask(struct mxs_lradc *lradc)
return LRADC_CTRL0_MX28_PLATE_MASK;
}
-static u32 mxs_lradc_irq_en_mask(struct mxs_lradc *lradc)
-{
- if (lradc->soc == IMX23_LRADC)
- return LRADC_CTRL1_MX23_LRADC_IRQ_EN_MASK;
- return LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK;
-}
-
static u32 mxs_lradc_irq_mask(struct mxs_lradc *lradc)
{
if (lradc->soc == IMX23_LRADC)
@@ -1120,18 +1113,16 @@ static int mxs_lradc_ts_register(struct mxs_lradc *lradc)
{
struct input_dev *input;
struct device *dev = lradc->dev;
- int ret;
if (!lradc->use_touchscreen)
return 0;
- input = input_allocate_device();
+ input = devm_input_allocate_device(dev);
if (!input)
return -ENOMEM;
input->name = DRIVER_NAME;
input->id.bustype = BUS_HOST;
- input->dev.parent = dev;
input->open = mxs_lradc_ts_open;
input->close = mxs_lradc_ts_close;
@@ -1146,20 +1137,8 @@ static int mxs_lradc_ts_register(struct mxs_lradc *lradc)
lradc->ts_input = input;
input_set_drvdata(input, lradc);
- ret = input_register_device(input);
- if (ret)
- input_free_device(lradc->ts_input);
-
- return ret;
-}
-
-static void mxs_lradc_ts_unregister(struct mxs_lradc *lradc)
-{
- if (!lradc->use_touchscreen)
- return;
- mxs_lradc_disable_ts(lradc);
- input_unregister_device(lradc->ts_input);
+ return input_register_device(input);
}
/*
@@ -1510,7 +1489,9 @@ static void mxs_lradc_hw_stop(struct mxs_lradc *lradc)
{
int i;
- mxs_lradc_reg_clear(lradc, mxs_lradc_irq_en_mask(lradc), LRADC_CTRL1);
+ mxs_lradc_reg_clear(lradc,
+ lradc->buffer_vchans << LRADC_CTRL1_LRADC_IRQ_EN_OFFSET,
+ LRADC_CTRL1);
for (i = 0; i < LRADC_MAX_DELAY_CHANS; i++)
mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(i));
@@ -1721,13 +1702,11 @@ static int mxs_lradc_probe(struct platform_device *pdev)
ret = iio_device_register(iio);
if (ret) {
dev_err(dev, "Failed to register IIO device\n");
- goto err_ts;
+ return ret;
}
return 0;
-err_ts:
- mxs_lradc_ts_unregister(lradc);
err_ts_register:
mxs_lradc_hw_stop(lradc);
err_dev:
@@ -1745,7 +1724,6 @@ static int mxs_lradc_remove(struct platform_device *pdev)
struct mxs_lradc *lradc = iio_priv(iio);
iio_device_unregister(iio);
- mxs_lradc_ts_unregister(lradc);
mxs_lradc_hw_stop(lradc);
mxs_lradc_trigger_remove(iio);
iio_triggered_buffer_cleanup(iio);
diff --git a/drivers/iio/adc/nau7802.c b/drivers/iio/adc/nau7802.c
index e525aa647..db9b829cc 100644
--- a/drivers/iio/adc/nau7802.c
+++ b/drivers/iio/adc/nau7802.c
@@ -79,10 +79,29 @@ static const struct iio_chan_spec nau7802_chan_array[] = {
static const u16 nau7802_sample_freq_avail[] = {10, 20, 40, 80,
10, 10, 10, 320};
+static ssize_t nau7802_show_scales(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nau7802_state *st = iio_priv(dev_to_iio_dev(dev));
+ int i, len = 0;
+
+ for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "0.%09d ",
+ st->scale_avail[i]);
+
+ buf[len-1] = '\n';
+
+ return len;
+}
+
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("10 40 80 320");
+static IIO_DEVICE_ATTR(in_voltage_scale_available, S_IRUGO, nau7802_show_scales,
+ NULL, 0);
+
static struct attribute *nau7802_attributes[] = {
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ &iio_dev_attr_in_voltage_scale_available.dev_attr.attr,
NULL
};
@@ -414,6 +433,7 @@ static int nau7802_probe(struct i2c_client *client,
i2c_set_clientdata(client, indio_dev);
indio_dev->dev.parent = &client->dev;
+ indio_dev->dev.of_node = client->dev.of_node;
indio_dev->name = dev_name(&client->dev);
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &nau7802_info;
diff --git a/drivers/iio/adc/ti-adc081c.c b/drivers/iio/adc/ti-adc081c.c
index 9fd032d9f..319172cf7 100644
--- a/drivers/iio/adc/ti-adc081c.c
+++ b/drivers/iio/adc/ti-adc081c.c
@@ -22,6 +22,7 @@
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/acpi.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
@@ -138,7 +139,8 @@ static irqreturn_t adc081c_trigger_handler(int irq, void *p)
if (ret < 0)
goto out;
buf[0] = ret;
- iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns());
+ iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ iio_get_time_ns(indio_dev));
out:
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
@@ -149,12 +151,24 @@ static int adc081c_probe(struct i2c_client *client,
{
struct iio_dev *iio;
struct adc081c *adc;
- struct adcxx1c_model *model = &adcxx1c_models[id->driver_data];
+ struct adcxx1c_model *model;
int err;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA))
return -EOPNOTSUPP;
+ if (ACPI_COMPANION(&client->dev)) {
+ const struct acpi_device_id *ad_id;
+
+ ad_id = acpi_match_device(client->dev.driver->acpi_match_table,
+ &client->dev);
+ if (!ad_id)
+ return -ENODEV;
+ model = &adcxx1c_models[ad_id->driver_data];
+ } else {
+ model = &adcxx1c_models[id->driver_data];
+ }
+
iio = devm_iio_device_alloc(&client->dev, sizeof(*adc));
if (!iio)
return -ENOMEM;
@@ -172,6 +186,7 @@ static int adc081c_probe(struct i2c_client *client,
return err;
iio->dev.parent = &client->dev;
+ iio->dev.of_node = client->dev.of_node;
iio->name = dev_name(&client->dev);
iio->modes = INDIO_DIRECT_MODE;
iio->info = &adc081c_info;
@@ -231,10 +246,21 @@ static const struct of_device_id adc081c_of_match[] = {
MODULE_DEVICE_TABLE(of, adc081c_of_match);
#endif
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id adc081c_acpi_match[] = {
+ { "ADC081C", ADC081C },
+ { "ADC101C", ADC101C },
+ { "ADC121C", ADC121C },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, adc081c_acpi_match);
+#endif
+
static struct i2c_driver adc081c_driver = {
.driver = {
.name = "adc081c",
.of_match_table = of_match_ptr(adc081c_of_match),
+ .acpi_match_table = ACPI_PTR(adc081c_acpi_match),
},
.probe = adc081c_probe,
.remove = adc081c_remove,
diff --git a/drivers/iio/adc/ti-adc0832.c b/drivers/iio/adc/ti-adc0832.c
index 0afeac0c9..f4ba23eff 100644
--- a/drivers/iio/adc/ti-adc0832.c
+++ b/drivers/iio/adc/ti-adc0832.c
@@ -194,6 +194,7 @@ static int adc0832_probe(struct spi_device *spi)
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->dev.parent = &spi->dev;
+ indio_dev->dev.of_node = spi->dev.of_node;
indio_dev->info = &adc0832_info;
indio_dev->modes = INDIO_DIRECT_MODE;
diff --git a/drivers/iio/adc/ti-adc128s052.c b/drivers/iio/adc/ti-adc128s052.c
index bc58867d6..89dfbd31b 100644
--- a/drivers/iio/adc/ti-adc128s052.c
+++ b/drivers/iio/adc/ti-adc128s052.c
@@ -150,6 +150,7 @@ static int adc128_probe(struct spi_device *spi)
spi_set_drvdata(spi, indio_dev);
indio_dev->dev.parent = &spi->dev;
+ indio_dev->dev.of_node = spi->dev.of_node;
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &adc128_info;
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
index fe96af605..066abaf80 100644
--- a/drivers/iio/adc/ti-ads1015.c
+++ b/drivers/iio/adc/ti-ads1015.c
@@ -55,6 +55,11 @@
#define ADS1015_DEFAULT_DATA_RATE 4
#define ADS1015_DEFAULT_CHAN 0
+enum {
+ ADS1015,
+ ADS1115,
+};
+
enum ads1015_channels {
ADS1015_AIN0_AIN1 = 0,
ADS1015_AIN0_AIN3,
@@ -71,6 +76,10 @@ static const unsigned int ads1015_data_rate[] = {
128, 250, 490, 920, 1600, 2400, 3300, 3300
};
+static const unsigned int ads1115_data_rate[] = {
+ 8, 16, 32, 64, 128, 250, 475, 860
+};
+
static const struct {
int scale;
int uscale;
@@ -101,6 +110,7 @@ static const struct {
.shift = 4, \
.endianness = IIO_CPU, \
}, \
+ .datasheet_name = "AIN"#_chan, \
}
#define ADS1015_V_DIFF_CHAN(_chan, _chan2, _addr) { \
@@ -121,6 +131,45 @@ static const struct {
.shift = 4, \
.endianness = IIO_CPU, \
}, \
+ .datasheet_name = "AIN"#_chan"-AIN"#_chan2, \
+}
+
+#define ADS1115_V_CHAN(_chan, _addr) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .address = _addr, \
+ .channel = _chan, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = _addr, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_CPU, \
+ }, \
+ .datasheet_name = "AIN"#_chan, \
+}
+
+#define ADS1115_V_DIFF_CHAN(_chan, _chan2, _addr) { \
+ .type = IIO_VOLTAGE, \
+ .differential = 1, \
+ .indexed = 1, \
+ .address = _addr, \
+ .channel = _chan, \
+ .channel2 = _chan2, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = _addr, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_CPU, \
+ }, \
+ .datasheet_name = "AIN"#_chan"-AIN"#_chan2, \
}
struct ads1015_data {
@@ -131,6 +180,8 @@ struct ads1015_data {
*/
struct mutex lock;
struct ads1015_channel_data channel_data[ADS1015_CHANNELS];
+
+ unsigned int *data_rate;
};
static bool ads1015_is_writeable_reg(struct device *dev, unsigned int reg)
@@ -157,6 +208,18 @@ static const struct iio_chan_spec ads1015_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(ADS1015_TIMESTAMP),
};
+static const struct iio_chan_spec ads1115_channels[] = {
+ ADS1115_V_DIFF_CHAN(0, 1, ADS1015_AIN0_AIN1),
+ ADS1115_V_DIFF_CHAN(0, 3, ADS1015_AIN0_AIN3),
+ ADS1115_V_DIFF_CHAN(1, 3, ADS1015_AIN1_AIN3),
+ ADS1115_V_DIFF_CHAN(2, 3, ADS1015_AIN2_AIN3),
+ ADS1115_V_CHAN(0, ADS1015_AIN0),
+ ADS1115_V_CHAN(1, ADS1015_AIN1),
+ ADS1115_V_CHAN(2, ADS1015_AIN2),
+ ADS1115_V_CHAN(3, ADS1015_AIN3),
+ IIO_CHAN_SOFT_TIMESTAMP(ADS1015_TIMESTAMP),
+};
+
static int ads1015_set_power_state(struct ads1015_data *data, bool on)
{
int ret;
@@ -196,7 +259,7 @@ int ads1015_get_adc_result(struct ads1015_data *data, int chan, int *val)
return ret;
if (change) {
- conv_time = DIV_ROUND_UP(USEC_PER_SEC, ads1015_data_rate[dr]);
+ conv_time = DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr]);
usleep_range(conv_time, conv_time + 1);
}
@@ -225,7 +288,8 @@ static irqreturn_t ads1015_trigger_handler(int irq, void *p)
buf[0] = res;
mutex_unlock(&data->lock);
- iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns());
+ iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ iio_get_time_ns(indio_dev));
err:
iio_trigger_notify_done(indio_dev->trig);
@@ -263,7 +327,7 @@ static int ads1015_set_data_rate(struct ads1015_data *data, int chan, int rate)
int i, ret, rindex = -1;
for (i = 0; i < ARRAY_SIZE(ads1015_data_rate); i++)
- if (ads1015_data_rate[i] == rate) {
+ if (data->data_rate[i] == rate) {
rindex = i;
break;
}
@@ -291,7 +355,9 @@ static int ads1015_read_raw(struct iio_dev *indio_dev,
mutex_lock(&indio_dev->mlock);
mutex_lock(&data->lock);
switch (mask) {
- case IIO_CHAN_INFO_RAW:
+ case IIO_CHAN_INFO_RAW: {
+ int shift = chan->scan_type.shift;
+
if (iio_buffer_enabled(indio_dev)) {
ret = -EBUSY;
break;
@@ -307,8 +373,7 @@ static int ads1015_read_raw(struct iio_dev *indio_dev,
break;
}
- /* 12 bit res, D0 is bit 4 in conversion register */
- *val = sign_extend32(*val >> 4, 11);
+ *val = sign_extend32(*val >> shift, 15 - shift);
ret = ads1015_set_power_state(data, false);
if (ret < 0)
@@ -316,6 +381,7 @@ static int ads1015_read_raw(struct iio_dev *indio_dev,
ret = IIO_VAL_INT;
break;
+ }
case IIO_CHAN_INFO_SCALE:
idx = data->channel_data[chan->address].pga;
*val = ads1015_scale[idx].scale;
@@ -324,7 +390,7 @@ static int ads1015_read_raw(struct iio_dev *indio_dev,
break;
case IIO_CHAN_INFO_SAMP_FREQ:
idx = data->channel_data[chan->address].data_rate;
- *val = ads1015_data_rate[idx];
+ *val = data->data_rate[idx];
ret = IIO_VAL_INT;
break;
default:
@@ -380,12 +446,15 @@ static const struct iio_buffer_setup_ops ads1015_buffer_setup_ops = {
};
static IIO_CONST_ATTR(scale_available, "3 2 1 0.5 0.25 0.125");
-static IIO_CONST_ATTR(sampling_frequency_available,
- "128 250 490 920 1600 2400 3300");
+
+static IIO_CONST_ATTR_NAMED(ads1015_sampling_frequency_available,
+ sampling_frequency_available, "128 250 490 920 1600 2400 3300");
+static IIO_CONST_ATTR_NAMED(ads1115_sampling_frequency_available,
+ sampling_frequency_available, "8 16 32 64 128 250 475 860");
static struct attribute *ads1015_attributes[] = {
&iio_const_attr_scale_available.dev_attr.attr,
- &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ &iio_const_attr_ads1015_sampling_frequency_available.dev_attr.attr,
NULL,
};
@@ -393,11 +462,28 @@ static const struct attribute_group ads1015_attribute_group = {
.attrs = ads1015_attributes,
};
-static const struct iio_info ads1015_info = {
+static struct attribute *ads1115_attributes[] = {
+ &iio_const_attr_scale_available.dev_attr.attr,
+ &iio_const_attr_ads1115_sampling_frequency_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ads1115_attribute_group = {
+ .attrs = ads1115_attributes,
+};
+
+static struct iio_info ads1015_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = ads1015_read_raw,
+ .write_raw = ads1015_write_raw,
+ .attrs = &ads1015_attribute_group,
+};
+
+static struct iio_info ads1115_info = {
.driver_module = THIS_MODULE,
.read_raw = ads1015_read_raw,
.write_raw = ads1015_write_raw,
- .attrs = &ads1015_attribute_group,
+ .attrs = &ads1115_attribute_group,
};
#ifdef CONFIG_OF
@@ -501,12 +587,25 @@ static int ads1015_probe(struct i2c_client *client,
mutex_init(&data->lock);
indio_dev->dev.parent = &client->dev;
- indio_dev->info = &ads1015_info;
+ indio_dev->dev.of_node = client->dev.of_node;
indio_dev->name = ADS1015_DRV_NAME;
- indio_dev->channels = ads1015_channels;
- indio_dev->num_channels = ARRAY_SIZE(ads1015_channels);
indio_dev->modes = INDIO_DIRECT_MODE;
+ switch (id->driver_data) {
+ case ADS1015:
+ indio_dev->channels = ads1015_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ads1015_channels);
+ indio_dev->info = &ads1015_info;
+ data->data_rate = (unsigned int *) &ads1015_data_rate;
+ break;
+ case ADS1115:
+ indio_dev->channels = ads1115_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ads1115_channels);
+ indio_dev->info = &ads1115_info;
+ data->data_rate = (unsigned int *) &ads1115_data_rate;
+ break;
+ }
+
/* we need to keep this ABI the same as used by hwmon ADS1015 driver */
ads1015_get_channels_config(client);
@@ -591,7 +690,8 @@ static const struct dev_pm_ops ads1015_pm_ops = {
};
static const struct i2c_device_id ads1015_id[] = {
- {"ads1015", 0},
+ {"ads1015", ADS1015},
+ {"ads1115", ADS1115},
{}
};
MODULE_DEVICE_TABLE(i2c, ads1015_id);
diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c
index 03e907028..c40043990 100644
--- a/drivers/iio/adc/ti-ads8688.c
+++ b/drivers/iio/adc/ti-ads8688.c
@@ -421,6 +421,7 @@ static int ads8688_probe(struct spi_device *spi)
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->dev.parent = &spi->dev;
+ indio_dev->dev.of_node = spi->dev.of_node;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = st->chip_info->channels;
indio_dev->num_channels = st->chip_info->num_channels;
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index 0470fc843..c3cfacca2 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -327,8 +327,7 @@ static int tiadc_channel_init(struct iio_dev *indio_dev, int channels)
int i;
indio_dev->num_channels = channels;
- chan_array = kcalloc(channels,
- sizeof(struct iio_chan_spec), GFP_KERNEL);
+ chan_array = kcalloc(channels, sizeof(*chan_array), GFP_KERNEL);
if (chan_array == NULL)
return -ENOMEM;
@@ -474,8 +473,7 @@ static int tiadc_probe(struct platform_device *pdev)
return -EINVAL;
}
- indio_dev = devm_iio_device_alloc(&pdev->dev,
- sizeof(struct tiadc_device));
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*indio_dev));
if (indio_dev == NULL) {
dev_err(&pdev->dev, "failed to allocate iio device\n");
return -ENOMEM;
@@ -539,8 +537,7 @@ static int tiadc_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int tiadc_suspend(struct device *dev)
+static int __maybe_unused tiadc_suspend(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct tiadc_device *adc_dev = iio_priv(indio_dev);
@@ -558,7 +555,7 @@ static int tiadc_suspend(struct device *dev)
return 0;
}
-static int tiadc_resume(struct device *dev)
+static int __maybe_unused tiadc_resume(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct tiadc_device *adc_dev = iio_priv(indio_dev);
@@ -575,14 +572,7 @@ static int tiadc_resume(struct device *dev)
return 0;
}
-static const struct dev_pm_ops tiadc_pm_ops = {
- .suspend = tiadc_suspend,
- .resume = tiadc_resume,
-};
-#define TIADC_PM_OPS (&tiadc_pm_ops)
-#else
-#define TIADC_PM_OPS NULL
-#endif
+static SIMPLE_DEV_PM_OPS(tiadc_pm_ops, tiadc_suspend, tiadc_resume);
static const struct of_device_id ti_adc_dt_ids[] = {
{ .compatible = "ti,am3359-adc", },
@@ -593,7 +583,7 @@ MODULE_DEVICE_TABLE(of, ti_adc_dt_ids);
static struct platform_driver tiadc_driver = {
.driver = {
.name = "TI-am335x-adc",
- .pm = TIADC_PM_OPS,
+ .pm = &tiadc_pm_ops,
.of_match_table = ti_adc_dt_ids,
},
.probe = tiadc_probe,
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index 653bf1379..228a003ad 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -594,7 +594,8 @@ static irqreturn_t vf610_adc_isr(int irq, void *dev_id)
if (iio_buffer_enabled(indio_dev)) {
info->buffer[0] = info->value;
iio_push_to_buffers_with_timestamp(indio_dev,
- info->buffer, iio_get_time_ns());
+ info->buffer,
+ iio_get_time_ns(indio_dev));
iio_trigger_notify_done(indio_dev->trig);
} else
complete(&info->completion);
diff --git a/drivers/iio/adc/xilinx-xadc-events.c b/drivers/iio/adc/xilinx-xadc-events.c
index edcf3aabd..6d5c2a6f4 100644
--- a/drivers/iio/adc/xilinx-xadc-events.c
+++ b/drivers/iio/adc/xilinx-xadc-events.c
@@ -46,7 +46,7 @@ static void xadc_handle_event(struct iio_dev *indio_dev, unsigned int event)
iio_push_event(indio_dev,
IIO_UNMOD_EVENT_CODE(chan->type, chan->channel,
IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING),
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
} else {
/*
* For other channels we don't know whether it is a upper or
@@ -56,7 +56,7 @@ static void xadc_handle_event(struct iio_dev *indio_dev, unsigned int event)
iio_push_event(indio_dev,
IIO_UNMOD_EVENT_CODE(chan->type, chan->channel,
IIO_EV_TYPE_THRESH, IIO_EV_DIR_EITHER),
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
}
}
diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c
index 212cbedc7..dd99d273b 100644
--- a/drivers/iio/buffer/industrialio-buffer-dma.c
+++ b/drivers/iio/buffer/industrialio-buffer-dma.c
@@ -305,7 +305,7 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer)
queue->fileio.active_block = NULL;
spin_lock_irq(&queue->list_lock);
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
block = queue->fileio.blocks[i];
/* If we can't re-use it free it */
@@ -323,7 +323,7 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer)
INIT_LIST_HEAD(&queue->incoming);
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
if (queue->fileio.blocks[i]) {
block = queue->fileio.blocks[i];
if (block->state == IIO_BLOCK_STATE_DEAD) {
diff --git a/drivers/iio/chemical/Kconfig b/drivers/iio/chemical/Kconfig
index f73290f84..4bcc025e8 100644
--- a/drivers/iio/chemical/Kconfig
+++ b/drivers/iio/chemical/Kconfig
@@ -5,15 +5,17 @@
menu "Chemical Sensors"
config ATLAS_PH_SENSOR
- tristate "Atlas Scientific OEM pH-SM sensor"
+ tristate "Atlas Scientific OEM SM sensors"
depends on I2C
select REGMAP_I2C
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
select IRQ_WORK
help
- Say Y here to build I2C interface support for the Atlas
- Scientific OEM pH-SM sensor.
+ Say Y here to build I2C interface support for the following
+ Atlas Scientific OEM SM sensors:
+ * pH SM sensor
+ * EC SM sensor
To compile this driver as module, choose M here: the
module will be called atlas-ph-sensor.
diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c
index 62b37cd8f..407f141a1 100644
--- a/drivers/iio/chemical/atlas-ph-sensor.c
+++ b/drivers/iio/chemical/atlas-ph-sensor.c
@@ -24,6 +24,7 @@
#include <linux/irq_work.h>
#include <linux/gpio.h>
#include <linux/i2c.h>
+#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
@@ -43,29 +44,50 @@
#define ATLAS_REG_PWR_CONTROL 0x06
-#define ATLAS_REG_CALIB_STATUS 0x0d
-#define ATLAS_REG_CALIB_STATUS_MASK 0x07
-#define ATLAS_REG_CALIB_STATUS_LOW BIT(0)
-#define ATLAS_REG_CALIB_STATUS_MID BIT(1)
-#define ATLAS_REG_CALIB_STATUS_HIGH BIT(2)
+#define ATLAS_REG_PH_CALIB_STATUS 0x0d
+#define ATLAS_REG_PH_CALIB_STATUS_MASK 0x07
+#define ATLAS_REG_PH_CALIB_STATUS_LOW BIT(0)
+#define ATLAS_REG_PH_CALIB_STATUS_MID BIT(1)
+#define ATLAS_REG_PH_CALIB_STATUS_HIGH BIT(2)
-#define ATLAS_REG_TEMP_DATA 0x0e
+#define ATLAS_REG_EC_CALIB_STATUS 0x0f
+#define ATLAS_REG_EC_CALIB_STATUS_MASK 0x0f
+#define ATLAS_REG_EC_CALIB_STATUS_DRY BIT(0)
+#define ATLAS_REG_EC_CALIB_STATUS_SINGLE BIT(1)
+#define ATLAS_REG_EC_CALIB_STATUS_LOW BIT(2)
+#define ATLAS_REG_EC_CALIB_STATUS_HIGH BIT(3)
+
+#define ATLAS_REG_PH_TEMP_DATA 0x0e
#define ATLAS_REG_PH_DATA 0x16
+#define ATLAS_REG_EC_PROBE 0x08
+#define ATLAS_REG_EC_TEMP_DATA 0x10
+#define ATLAS_REG_EC_DATA 0x18
+#define ATLAS_REG_TDS_DATA 0x1c
+#define ATLAS_REG_PSS_DATA 0x20
+
#define ATLAS_PH_INT_TIME_IN_US 450000
+#define ATLAS_EC_INT_TIME_IN_US 650000
+
+enum {
+ ATLAS_PH_SM,
+ ATLAS_EC_SM,
+};
struct atlas_data {
struct i2c_client *client;
struct iio_trigger *trig;
+ struct atlas_device *chip;
struct regmap *regmap;
struct irq_work work;
- __be32 buffer[4]; /* 32-bit pH data + 32-bit pad + 64-bit timestamp */
+ __be32 buffer[6]; /* 96-bit data + 32-bit pad + 64-bit timestamp */
};
static const struct regmap_range atlas_volatile_ranges[] = {
regmap_reg_range(ATLAS_REG_INT_CONTROL, ATLAS_REG_INT_CONTROL),
regmap_reg_range(ATLAS_REG_PH_DATA, ATLAS_REG_PH_DATA + 4),
+ regmap_reg_range(ATLAS_REG_EC_DATA, ATLAS_REG_PSS_DATA + 4),
};
static const struct regmap_access_table atlas_volatile_table = {
@@ -80,13 +102,14 @@ static const struct regmap_config atlas_regmap_config = {
.val_bits = 8,
.volatile_table = &atlas_volatile_table,
- .max_register = ATLAS_REG_PH_DATA + 4,
+ .max_register = ATLAS_REG_PSS_DATA + 4,
.cache_type = REGCACHE_RBTREE,
};
-static const struct iio_chan_spec atlas_channels[] = {
+static const struct iio_chan_spec atlas_ph_channels[] = {
{
.type = IIO_PH,
+ .address = ATLAS_REG_PH_DATA,
.info_mask_separate =
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
.scan_index = 0,
@@ -100,7 +123,7 @@ static const struct iio_chan_spec atlas_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(1),
{
.type = IIO_TEMP,
- .address = ATLAS_REG_TEMP_DATA,
+ .address = ATLAS_REG_PH_TEMP_DATA,
.info_mask_separate =
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
.output = 1,
@@ -108,6 +131,142 @@ static const struct iio_chan_spec atlas_channels[] = {
},
};
+#define ATLAS_EC_CHANNEL(_idx, _addr) \
+ {\
+ .type = IIO_CONCENTRATION, \
+ .indexed = 1, \
+ .channel = _idx, \
+ .address = _addr, \
+ .info_mask_separate = \
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), \
+ .scan_index = _idx + 1, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 32, \
+ .storagebits = 32, \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+static const struct iio_chan_spec atlas_ec_channels[] = {
+ {
+ .type = IIO_ELECTRICALCONDUCTIVITY,
+ .address = ATLAS_REG_EC_DATA,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 32,
+ .storagebits = 32,
+ .endianness = IIO_BE,
+ },
+ },
+ ATLAS_EC_CHANNEL(0, ATLAS_REG_TDS_DATA),
+ ATLAS_EC_CHANNEL(1, ATLAS_REG_PSS_DATA),
+ IIO_CHAN_SOFT_TIMESTAMP(3),
+ {
+ .type = IIO_TEMP,
+ .address = ATLAS_REG_EC_TEMP_DATA,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ .output = 1,
+ .scan_index = -1
+ },
+};
+
+static int atlas_check_ph_calibration(struct atlas_data *data)
+{
+ struct device *dev = &data->client->dev;
+ int ret;
+ unsigned int val;
+
+ ret = regmap_read(data->regmap, ATLAS_REG_PH_CALIB_STATUS, &val);
+ if (ret)
+ return ret;
+
+ if (!(val & ATLAS_REG_PH_CALIB_STATUS_MASK)) {
+ dev_warn(dev, "device has not been calibrated\n");
+ return 0;
+ }
+
+ if (!(val & ATLAS_REG_PH_CALIB_STATUS_LOW))
+ dev_warn(dev, "device missing low point calibration\n");
+
+ if (!(val & ATLAS_REG_PH_CALIB_STATUS_MID))
+ dev_warn(dev, "device missing mid point calibration\n");
+
+ if (!(val & ATLAS_REG_PH_CALIB_STATUS_HIGH))
+ dev_warn(dev, "device missing high point calibration\n");
+
+ return 0;
+}
+
+static int atlas_check_ec_calibration(struct atlas_data *data)
+{
+ struct device *dev = &data->client->dev;
+ int ret;
+ unsigned int val;
+
+ ret = regmap_bulk_read(data->regmap, ATLAS_REG_EC_PROBE, &val, 2);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "probe set to K = %d.%.2d", be16_to_cpu(val) / 100,
+ be16_to_cpu(val) % 100);
+
+ ret = regmap_read(data->regmap, ATLAS_REG_EC_CALIB_STATUS, &val);
+ if (ret)
+ return ret;
+
+ if (!(val & ATLAS_REG_EC_CALIB_STATUS_MASK)) {
+ dev_warn(dev, "device has not been calibrated\n");
+ return 0;
+ }
+
+ if (!(val & ATLAS_REG_EC_CALIB_STATUS_DRY))
+ dev_warn(dev, "device missing dry point calibration\n");
+
+ if (val & ATLAS_REG_EC_CALIB_STATUS_SINGLE) {
+ dev_warn(dev, "device using single point calibration\n");
+ } else {
+ if (!(val & ATLAS_REG_EC_CALIB_STATUS_LOW))
+ dev_warn(dev, "device missing low point calibration\n");
+
+ if (!(val & ATLAS_REG_EC_CALIB_STATUS_HIGH))
+ dev_warn(dev, "device missing high point calibration\n");
+ }
+
+ return 0;
+}
+
+struct atlas_device {
+ const struct iio_chan_spec *channels;
+ int num_channels;
+ int data_reg;
+
+ int (*calibration)(struct atlas_data *data);
+ int delay;
+};
+
+static struct atlas_device atlas_devices[] = {
+ [ATLAS_PH_SM] = {
+ .channels = atlas_ph_channels,
+ .num_channels = 3,
+ .data_reg = ATLAS_REG_PH_DATA,
+ .calibration = &atlas_check_ph_calibration,
+ .delay = ATLAS_PH_INT_TIME_IN_US,
+ },
+ [ATLAS_EC_SM] = {
+ .channels = atlas_ec_channels,
+ .num_channels = 5,
+ .data_reg = ATLAS_REG_EC_DATA,
+ .calibration = &atlas_check_ec_calibration,
+ .delay = ATLAS_EC_INT_TIME_IN_US,
+ },
+
+};
+
static int atlas_set_powermode(struct atlas_data *data, int on)
{
return regmap_write(data->regmap, ATLAS_REG_PWR_CONTROL, on);
@@ -178,12 +337,13 @@ static irqreturn_t atlas_trigger_handler(int irq, void *private)
struct atlas_data *data = iio_priv(indio_dev);
int ret;
- ret = regmap_bulk_read(data->regmap, ATLAS_REG_PH_DATA,
- (u8 *) &data->buffer, sizeof(data->buffer[0]));
+ ret = regmap_bulk_read(data->regmap, data->chip->data_reg,
+ (u8 *) &data->buffer,
+ sizeof(__be32) * (data->chip->num_channels - 2));
if (!ret)
iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
iio_trigger_notify_done(indio_dev->trig);
@@ -200,7 +360,7 @@ static irqreturn_t atlas_interrupt_handler(int irq, void *private)
return IRQ_HANDLED;
}
-static int atlas_read_ph_measurement(struct atlas_data *data, __be32 *val)
+static int atlas_read_measurement(struct atlas_data *data, int reg, __be32 *val)
{
struct device *dev = &data->client->dev;
int suspended = pm_runtime_suspended(dev);
@@ -213,11 +373,9 @@ static int atlas_read_ph_measurement(struct atlas_data *data, __be32 *val)
}
if (suspended)
- usleep_range(ATLAS_PH_INT_TIME_IN_US,
- ATLAS_PH_INT_TIME_IN_US + 100000);
+ usleep_range(data->chip->delay, data->chip->delay + 100000);
- ret = regmap_bulk_read(data->regmap, ATLAS_REG_PH_DATA,
- (u8 *) val, sizeof(*val));
+ ret = regmap_bulk_read(data->regmap, reg, (u8 *) val, sizeof(*val));
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
@@ -242,12 +400,15 @@ static int atlas_read_raw(struct iio_dev *indio_dev,
(u8 *) &reg, sizeof(reg));
break;
case IIO_PH:
+ case IIO_CONCENTRATION:
+ case IIO_ELECTRICALCONDUCTIVITY:
mutex_lock(&indio_dev->mlock);
if (iio_buffer_enabled(indio_dev))
ret = -EBUSY;
else
- ret = atlas_read_ph_measurement(data, &reg);
+ ret = atlas_read_measurement(data,
+ chan->address, &reg);
mutex_unlock(&indio_dev->mlock);
break;
@@ -271,6 +432,14 @@ static int atlas_read_raw(struct iio_dev *indio_dev,
*val = 1; /* 0.001 */
*val2 = 1000;
break;
+ case IIO_ELECTRICALCONDUCTIVITY:
+ *val = 1; /* 0.00001 */
+ *val2 = 100000;
+ break;
+ case IIO_CONCENTRATION:
+ *val = 0; /* 0.000000001 */
+ *val2 = 1000;
+ return IIO_VAL_INT_PLUS_NANO;
default:
return -EINVAL;
}
@@ -303,37 +472,26 @@ static const struct iio_info atlas_info = {
.write_raw = atlas_write_raw,
};
-static int atlas_check_calibration(struct atlas_data *data)
-{
- struct device *dev = &data->client->dev;
- int ret;
- unsigned int val;
-
- ret = regmap_read(data->regmap, ATLAS_REG_CALIB_STATUS, &val);
- if (ret)
- return ret;
-
- if (!(val & ATLAS_REG_CALIB_STATUS_MASK)) {
- dev_warn(dev, "device has not been calibrated\n");
- return 0;
- }
-
- if (!(val & ATLAS_REG_CALIB_STATUS_LOW))
- dev_warn(dev, "device missing low point calibration\n");
-
- if (!(val & ATLAS_REG_CALIB_STATUS_MID))
- dev_warn(dev, "device missing mid point calibration\n");
-
- if (!(val & ATLAS_REG_CALIB_STATUS_HIGH))
- dev_warn(dev, "device missing high point calibration\n");
+static const struct i2c_device_id atlas_id[] = {
+ { "atlas-ph-sm", ATLAS_PH_SM},
+ { "atlas-ec-sm", ATLAS_EC_SM},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, atlas_id);
- return 0;
+static const struct of_device_id atlas_dt_ids[] = {
+ { .compatible = "atlas,ph-sm", .data = (void *)ATLAS_PH_SM, },
+ { .compatible = "atlas,ec-sm", .data = (void *)ATLAS_EC_SM, },
+ { }
};
+MODULE_DEVICE_TABLE(of, atlas_dt_ids);
static int atlas_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct atlas_data *data;
+ struct atlas_device *chip;
+ const struct of_device_id *of_id;
struct iio_trigger *trig;
struct iio_dev *indio_dev;
int ret;
@@ -342,10 +500,16 @@ static int atlas_probe(struct i2c_client *client,
if (!indio_dev)
return -ENOMEM;
+ of_id = of_match_device(atlas_dt_ids, &client->dev);
+ if (!of_id)
+ chip = &atlas_devices[id->driver_data];
+ else
+ chip = &atlas_devices[(unsigned long)of_id->data];
+
indio_dev->info = &atlas_info;
indio_dev->name = ATLAS_DRV_NAME;
- indio_dev->channels = atlas_channels;
- indio_dev->num_channels = ARRAY_SIZE(atlas_channels);
+ indio_dev->channels = chip->channels;
+ indio_dev->num_channels = chip->num_channels;
indio_dev->modes = INDIO_BUFFER_SOFTWARE | INDIO_DIRECT_MODE;
indio_dev->dev.parent = &client->dev;
@@ -358,6 +522,7 @@ static int atlas_probe(struct i2c_client *client,
data = iio_priv(indio_dev);
data->client = client;
data->trig = trig;
+ data->chip = chip;
trig->dev.parent = indio_dev->dev.parent;
trig->ops = &atlas_interrupt_trigger_ops;
iio_trigger_set_drvdata(trig, indio_dev);
@@ -379,7 +544,7 @@ static int atlas_probe(struct i2c_client *client,
return -EINVAL;
}
- ret = atlas_check_calibration(data);
+ ret = chip->calibration(data);
if (ret)
return ret;
@@ -480,18 +645,6 @@ static const struct dev_pm_ops atlas_pm_ops = {
atlas_runtime_resume, NULL)
};
-static const struct i2c_device_id atlas_id[] = {
- { "atlas-ph-sm", 0 },
- {}
-};
-MODULE_DEVICE_TABLE(i2c, atlas_id);
-
-static const struct of_device_id atlas_dt_ids[] = {
- { .compatible = "atlas,ph-sm" },
- { }
-};
-MODULE_DEVICE_TABLE(of, atlas_dt_ids);
-
static struct i2c_driver atlas_driver = {
.driver = {
.name = ATLAS_DRV_NAME,
diff --git a/drivers/iio/common/st_sensors/st_sensors_buffer.c b/drivers/iio/common/st_sensors/st_sensors_buffer.c
index f1693dbeb..d06e728ce 100644
--- a/drivers/iio/common/st_sensors/st_sensors_buffer.c
+++ b/drivers/iio/common/st_sensors/st_sensors_buffer.c
@@ -22,34 +22,32 @@
#include <linux/iio/common/st_sensors.h>
-int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf)
+static int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf)
{
- int i, len;
- int total = 0;
+ int i;
struct st_sensor_data *sdata = iio_priv(indio_dev);
unsigned int num_data_channels = sdata->num_data_channels;
- for (i = 0; i < num_data_channels; i++) {
- unsigned int bytes_to_read;
-
- if (test_bit(i, indio_dev->active_scan_mask)) {
- bytes_to_read = indio_dev->channels[i].scan_type.storagebits >> 3;
- len = sdata->tf->read_multiple_byte(&sdata->tb,
- sdata->dev, indio_dev->channels[i].address,
- bytes_to_read,
- buf + total, sdata->multiread_bit);
-
- if (len < bytes_to_read)
- return -EIO;
-
- /* Advance the buffer pointer */
- total += len;
- }
+ for_each_set_bit(i, indio_dev->active_scan_mask, num_data_channels) {
+ const struct iio_chan_spec *channel = &indio_dev->channels[i];
+ unsigned int bytes_to_read = channel->scan_type.realbits >> 3;
+ unsigned int storage_bytes =
+ channel->scan_type.storagebits >> 3;
+
+ buf = PTR_ALIGN(buf, storage_bytes);
+ if (sdata->tf->read_multiple_byte(&sdata->tb, sdata->dev,
+ channel->address,
+ bytes_to_read, buf,
+ sdata->multiread_bit) <
+ bytes_to_read)
+ return -EIO;
+
+ /* Advance the buffer pointer */
+ buf += storage_bytes;
}
- return total;
+ return 0;
}
-EXPORT_SYMBOL(st_sensors_get_buffer_element);
irqreturn_t st_sensors_trigger_handler(int irq, void *p)
{
@@ -59,11 +57,16 @@ irqreturn_t st_sensors_trigger_handler(int irq, void *p)
struct st_sensor_data *sdata = iio_priv(indio_dev);
s64 timestamp;
- /* If we do timetamping here, do it before reading the values */
+ /*
+ * If we do timetamping here, do it before reading the values, because
+ * once we've read the values, new interrupts can occur (when using
+ * the hardware trigger) and the hw_timestamp may get updated.
+ * By storing it in a local variable first, we are safe.
+ */
if (sdata->hw_irq_trigger)
timestamp = sdata->hw_timestamp;
else
- timestamp = iio_get_time_ns();
+ timestamp = iio_get_time_ns(indio_dev);
len = st_sensors_get_buffer_element(indio_dev, sdata->buffer_data);
if (len < 0)
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 9e59c90f6..2d5282e05 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -228,7 +228,7 @@ int st_sensors_set_axis_enable(struct iio_dev *indio_dev, u8 axis_enable)
}
EXPORT_SYMBOL(st_sensors_set_axis_enable);
-void st_sensors_power_enable(struct iio_dev *indio_dev)
+int st_sensors_power_enable(struct iio_dev *indio_dev)
{
struct st_sensor_data *pdata = iio_priv(indio_dev);
int err;
@@ -237,18 +237,37 @@ void st_sensors_power_enable(struct iio_dev *indio_dev)
pdata->vdd = devm_regulator_get_optional(indio_dev->dev.parent, "vdd");
if (!IS_ERR(pdata->vdd)) {
err = regulator_enable(pdata->vdd);
- if (err != 0)
+ if (err != 0) {
dev_warn(&indio_dev->dev,
"Failed to enable specified Vdd supply\n");
+ return err;
+ }
+ } else {
+ err = PTR_ERR(pdata->vdd);
+ if (err != -ENODEV)
+ return err;
}
pdata->vdd_io = devm_regulator_get_optional(indio_dev->dev.parent, "vddio");
if (!IS_ERR(pdata->vdd_io)) {
err = regulator_enable(pdata->vdd_io);
- if (err != 0)
+ if (err != 0) {
dev_warn(&indio_dev->dev,
"Failed to enable specified Vdd_IO supply\n");
+ goto st_sensors_disable_vdd;
+ }
+ } else {
+ err = PTR_ERR(pdata->vdd_io);
+ if (err != -ENODEV)
+ goto st_sensors_disable_vdd;
}
+
+ return 0;
+
+st_sensors_disable_vdd:
+ if (!IS_ERR_OR_NULL(pdata->vdd))
+ regulator_disable(pdata->vdd);
+ return err;
}
EXPORT_SYMBOL(st_sensors_power_enable);
@@ -256,10 +275,10 @@ void st_sensors_power_disable(struct iio_dev *indio_dev)
{
struct st_sensor_data *pdata = iio_priv(indio_dev);
- if (!IS_ERR(pdata->vdd))
+ if (!IS_ERR_OR_NULL(pdata->vdd))
regulator_disable(pdata->vdd);
- if (!IS_ERR(pdata->vdd_io))
+ if (!IS_ERR_OR_NULL(pdata->vdd_io))
regulator_disable(pdata->vdd_io);
}
EXPORT_SYMBOL(st_sensors_power_disable);
@@ -471,7 +490,7 @@ static int st_sensors_read_axis_data(struct iio_dev *indio_dev,
int err;
u8 *outdata;
struct st_sensor_data *sdata = iio_priv(indio_dev);
- unsigned int byte_for_channel = ch->scan_type.storagebits >> 3;
+ unsigned int byte_for_channel = ch->scan_type.realbits >> 3;
outdata = kmalloc(byte_for_channel, GFP_KERNEL);
if (!outdata)
@@ -531,7 +550,7 @@ int st_sensors_check_device_support(struct iio_dev *indio_dev,
int num_sensors_list,
const struct st_sensor_settings *sensor_settings)
{
- int i, n, err;
+ int i, n, err = 0;
u8 wai;
struct st_sensor_data *sdata = iio_priv(indio_dev);
@@ -551,17 +570,21 @@ int st_sensors_check_device_support(struct iio_dev *indio_dev,
return -ENODEV;
}
- err = sdata->tf->read_byte(&sdata->tb, sdata->dev,
- sensor_settings[i].wai_addr, &wai);
- if (err < 0) {
- dev_err(&indio_dev->dev, "failed to read Who-Am-I register.\n");
- return err;
- }
+ if (sensor_settings[i].wai_addr) {
+ err = sdata->tf->read_byte(&sdata->tb, sdata->dev,
+ sensor_settings[i].wai_addr, &wai);
+ if (err < 0) {
+ dev_err(&indio_dev->dev,
+ "failed to read Who-Am-I register.\n");
+ return err;
+ }
- if (sensor_settings[i].wai != wai) {
- dev_err(&indio_dev->dev, "%s: WhoAmI mismatch (0x%x).\n",
- indio_dev->name, wai);
- return -EINVAL;
+ if (sensor_settings[i].wai != wai) {
+ dev_err(&indio_dev->dev,
+ "%s: WhoAmI mismatch (0x%x).\n",
+ indio_dev->name, wai);
+ return -EINVAL;
+ }
}
sdata->sensor_settings =
diff --git a/drivers/iio/common/st_sensors/st_sensors_i2c.c b/drivers/iio/common/st_sensors/st_sensors_i2c.c
index 98cfee296..b43aa3603 100644
--- a/drivers/iio/common/st_sensors/st_sensors_i2c.c
+++ b/drivers/iio/common/st_sensors/st_sensors_i2c.c
@@ -48,8 +48,8 @@ static int st_sensors_i2c_read_multiple_byte(
if (multiread_bit)
reg_addr |= ST_SENSORS_I2C_MULTIREAD;
- return i2c_smbus_read_i2c_block_data(to_i2c_client(dev),
- reg_addr, len, data);
+ return i2c_smbus_read_i2c_block_data_or_emulated(to_i2c_client(dev),
+ reg_addr, len, data);
}
static int st_sensors_i2c_write_byte(struct st_sensor_transfer_buffer *tb,
diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c
index 296e4ff19..e66f12ee8 100644
--- a/drivers/iio/common/st_sensors/st_sensors_trigger.c
+++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c
@@ -18,6 +18,50 @@
#include "st_sensors_core.h"
/**
+ * st_sensors_new_samples_available() - check if more samples came in
+ * returns:
+ * 0 - no new samples available
+ * 1 - new samples available
+ * negative - error or unknown
+ */
+static int st_sensors_new_samples_available(struct iio_dev *indio_dev,
+ struct st_sensor_data *sdata)
+{
+ u8 status;
+ int ret;
+
+ /* How would I know if I can't check it? */
+ if (!sdata->sensor_settings->drdy_irq.addr_stat_drdy)
+ return -EINVAL;
+
+ /* No scan mask, no interrupt */
+ if (!indio_dev->active_scan_mask)
+ return 0;
+
+ ret = sdata->tf->read_byte(&sdata->tb, sdata->dev,
+ sdata->sensor_settings->drdy_irq.addr_stat_drdy,
+ &status);
+ if (ret < 0) {
+ dev_err(sdata->dev,
+ "error checking samples available\n");
+ return ret;
+ }
+ /*
+ * the lower bits of .active_scan_mask[0] is directly mapped
+ * to the channels on the sensor: either bit 0 for
+ * one-dimensional sensors, or e.g. x,y,z for accelerometers,
+ * gyroscopes or magnetometers. No sensor use more than 3
+ * channels, so cut the other status bits here.
+ */
+ status &= 0x07;
+
+ if (status & (u8)indio_dev->active_scan_mask[0])
+ return 1;
+
+ return 0;
+}
+
+/**
* st_sensors_irq_handler() - top half of the IRQ-based triggers
* @irq: irq number
* @p: private handler data
@@ -29,7 +73,7 @@ irqreturn_t st_sensors_irq_handler(int irq, void *p)
struct st_sensor_data *sdata = iio_priv(indio_dev);
/* Get the time stamp as close in time as possible */
- sdata->hw_timestamp = iio_get_time_ns();
+ sdata->hw_timestamp = iio_get_time_ns(indio_dev);
return IRQ_WAKE_THREAD;
}
@@ -43,44 +87,43 @@ irqreturn_t st_sensors_irq_thread(int irq, void *p)
struct iio_trigger *trig = p;
struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
struct st_sensor_data *sdata = iio_priv(indio_dev);
- int ret;
/*
* If this trigger is backed by a hardware interrupt and we have a
- * status register, check if this IRQ came from us
+ * status register, check if this IRQ came from us. Notice that
+ * we will process also if st_sensors_new_samples_available()
+ * returns negative: if we can't check status, then poll
+ * unconditionally.
*/
- if (sdata->sensor_settings->drdy_irq.addr_stat_drdy) {
- u8 status;
-
- ret = sdata->tf->read_byte(&sdata->tb, sdata->dev,
- sdata->sensor_settings->drdy_irq.addr_stat_drdy,
- &status);
- if (ret < 0) {
- dev_err(sdata->dev, "could not read channel status\n");
- goto out_poll;
- }
- /*
- * the lower bits of .active_scan_mask[0] is directly mapped
- * to the channels on the sensor: either bit 0 for
- * one-dimensional sensors, or e.g. x,y,z for accelerometers,
- * gyroscopes or magnetometers. No sensor use more than 3
- * channels, so cut the other status bits here.
- */
- status &= 0x07;
+ if (sdata->hw_irq_trigger &&
+ st_sensors_new_samples_available(indio_dev, sdata)) {
+ iio_trigger_poll_chained(p);
+ } else {
+ dev_dbg(sdata->dev, "spurious IRQ\n");
+ return IRQ_NONE;
+ }
- /*
- * If this was not caused by any channels on this sensor,
- * return IRQ_NONE
- */
- if (!indio_dev->active_scan_mask)
- return IRQ_NONE;
- if (!(status & (u8)indio_dev->active_scan_mask[0]))
- return IRQ_NONE;
+ /*
+ * If we have proper level IRQs the handler will be re-entered if
+ * the line is still active, so return here and come back in through
+ * the top half if need be.
+ */
+ if (!sdata->edge_irq)
+ return IRQ_HANDLED;
+
+ /*
+ * If we are using egde IRQs, new samples arrived while processing
+ * the IRQ and those may be missed unless we pick them here, so poll
+ * again. If the sensor delivery frequency is very high, this thread
+ * turns into a polled loop handler.
+ */
+ while (sdata->hw_irq_trigger &&
+ st_sensors_new_samples_available(indio_dev, sdata)) {
+ dev_dbg(sdata->dev, "more samples came in during polling\n");
+ sdata->hw_timestamp = iio_get_time_ns(indio_dev);
+ iio_trigger_poll_chained(p);
}
-out_poll:
- /* It's our IRQ: proceed to handle the register polling */
- iio_trigger_poll_chained(p);
return IRQ_HANDLED;
}
@@ -107,13 +150,18 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
* If the IRQ is triggered on falling edge, we need to mark the
* interrupt as active low, if the hardware supports this.
*/
- if (irq_trig == IRQF_TRIGGER_FALLING) {
+ switch(irq_trig) {
+ case IRQF_TRIGGER_FALLING:
+ case IRQF_TRIGGER_LOW:
if (!sdata->sensor_settings->drdy_irq.addr_ihl) {
dev_err(&indio_dev->dev,
- "falling edge specified for IRQ but hardware "
- "only support rising edge, will request "
- "rising edge\n");
- irq_trig = IRQF_TRIGGER_RISING;
+ "falling/low specified for IRQ "
+ "but hardware only support rising/high: "
+ "will request rising/high\n");
+ if (irq_trig == IRQF_TRIGGER_FALLING)
+ irq_trig = IRQF_TRIGGER_RISING;
+ if (irq_trig == IRQF_TRIGGER_LOW)
+ irq_trig = IRQF_TRIGGER_HIGH;
} else {
/* Set up INT active low i.e. falling edge */
err = st_sensors_write_data_with_mask(indio_dev,
@@ -122,20 +170,39 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
if (err < 0)
goto iio_trigger_free;
dev_info(&indio_dev->dev,
- "interrupts on the falling edge\n");
+ "interrupts on the falling edge or "
+ "active low level\n");
}
- } else if (irq_trig == IRQF_TRIGGER_RISING) {
+ break;
+ case IRQF_TRIGGER_RISING:
dev_info(&indio_dev->dev,
"interrupts on the rising edge\n");
-
- } else {
+ break;
+ case IRQF_TRIGGER_HIGH:
+ dev_info(&indio_dev->dev,
+ "interrupts active high level\n");
+ break;
+ default:
+ /* This is the most preferred mode, if possible */
dev_err(&indio_dev->dev,
- "unsupported IRQ trigger specified (%lx), only "
- "rising and falling edges supported, enforce "
+ "unsupported IRQ trigger specified (%lx), enforce "
"rising edge\n", irq_trig);
irq_trig = IRQF_TRIGGER_RISING;
}
+ /* Tell the interrupt handler that we're dealing with edges */
+ if (irq_trig == IRQF_TRIGGER_FALLING ||
+ irq_trig == IRQF_TRIGGER_RISING)
+ sdata->edge_irq = true;
+ else
+ /*
+ * If we're not using edges (i.e. level interrupts) we
+ * just mask off the IRQ, handle one interrupt, then
+ * if the line is still low, we return to the
+ * interrupt handler top half again and start over.
+ */
+ irq_trig |= IRQF_ONESHOT;
+
/*
* If the interrupt pin is Open Drain, by definition this
* means that the interrupt line may be shared with other
@@ -148,9 +215,6 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
sdata->sensor_settings->drdy_irq.addr_stat_drdy)
irq_trig |= IRQF_SHARED;
- /* Let's create an interrupt thread masking the hard IRQ here */
- irq_trig |= IRQF_ONESHOT;
-
err = request_threaded_irq(sdata->get_irq_data_ready(indio_dev),
st_sensors_irq_handler,
st_sensors_irq_thread,
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index f7c71da42..ca814479f 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -248,11 +248,12 @@ config MCP4922
config STX104
tristate "Apex Embedded Systems STX104 DAC driver"
depends on X86 && ISA_BUS_API
+ select GPIOLIB
help
- Say yes here to build support for the 2-channel DAC on the Apex
- Embedded Systems STX104 integrated analog PC/104 card. The base port
- addresses for the devices may be configured via the "base" module
- parameter array.
+ Say yes here to build support for the 2-channel DAC and GPIO on the
+ Apex Embedded Systems STX104 integrated analog PC/104 card. The base
+ port addresses for the devices may be configured via the base array
+ module parameter.
config VF610_DAC
tristate "Vybrid vf610 DAC driver"
diff --git a/drivers/iio/dac/ad5421.c b/drivers/iio/dac/ad5421.c
index 968712be9..559061ab1 100644
--- a/drivers/iio/dac/ad5421.c
+++ b/drivers/iio/dac/ad5421.c
@@ -242,7 +242,7 @@ static irqreturn_t ad5421_fault_handler(int irq, void *data)
0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_RISING),
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
}
if (events & AD5421_FAULT_UNDER_CURRENT) {
@@ -251,7 +251,7 @@ static irqreturn_t ad5421_fault_handler(int irq, void *data)
0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_FALLING),
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
}
if (events & AD5421_FAULT_TEMP_OVER_140) {
@@ -260,7 +260,7 @@ static irqreturn_t ad5421_fault_handler(int irq, void *data)
0,
IIO_EV_TYPE_MAG,
IIO_EV_DIR_RISING),
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
}
old_fault = fault;
diff --git a/drivers/iio/dac/ad5504.c b/drivers/iio/dac/ad5504.c
index 4e4c20d6d..788b3d6fd 100644
--- a/drivers/iio/dac/ad5504.c
+++ b/drivers/iio/dac/ad5504.c
@@ -223,7 +223,7 @@ static irqreturn_t ad5504_event_handler(int irq, void *private)
0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_RISING),
- iio_get_time_ns());
+ iio_get_time_ns((struct iio_dev *)private));
return IRQ_HANDLED;
}
diff --git a/drivers/iio/dac/ad5755.c b/drivers/iio/dac/ad5755.c
index bfb350a85..0fde593ec 100644
--- a/drivers/iio/dac/ad5755.c
+++ b/drivers/iio/dac/ad5755.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/delay.h>
+#include <linux/of.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/platform_data/ad5755.h>
@@ -109,6 +110,51 @@ enum ad5755_type {
ID_AD5737,
};
+#ifdef CONFIG_OF
+static const int ad5755_dcdc_freq_table[][2] = {
+ { 250000, AD5755_DC_DC_FREQ_250kHZ },
+ { 410000, AD5755_DC_DC_FREQ_410kHZ },
+ { 650000, AD5755_DC_DC_FREQ_650kHZ }
+};
+
+static const int ad5755_dcdc_maxv_table[][2] = {
+ { 23000000, AD5755_DC_DC_MAXV_23V },
+ { 24500000, AD5755_DC_DC_MAXV_24V5 },
+ { 27000000, AD5755_DC_DC_MAXV_27V },
+ { 29500000, AD5755_DC_DC_MAXV_29V5 },
+};
+
+static const int ad5755_slew_rate_table[][2] = {
+ { 64000, AD5755_SLEW_RATE_64k },
+ { 32000, AD5755_SLEW_RATE_32k },
+ { 16000, AD5755_SLEW_RATE_16k },
+ { 8000, AD5755_SLEW_RATE_8k },
+ { 4000, AD5755_SLEW_RATE_4k },
+ { 2000, AD5755_SLEW_RATE_2k },
+ { 1000, AD5755_SLEW_RATE_1k },
+ { 500, AD5755_SLEW_RATE_500 },
+ { 250, AD5755_SLEW_RATE_250 },
+ { 125, AD5755_SLEW_RATE_125 },
+ { 64, AD5755_SLEW_RATE_64 },
+ { 32, AD5755_SLEW_RATE_32 },
+ { 16, AD5755_SLEW_RATE_16 },
+ { 8, AD5755_SLEW_RATE_8 },
+ { 4, AD5755_SLEW_RATE_4 },
+ { 0, AD5755_SLEW_RATE_0_5 },
+};
+
+static const int ad5755_slew_step_table[][2] = {
+ { 256, AD5755_SLEW_STEP_SIZE_256 },
+ { 128, AD5755_SLEW_STEP_SIZE_128 },
+ { 64, AD5755_SLEW_STEP_SIZE_64 },
+ { 32, AD5755_SLEW_STEP_SIZE_32 },
+ { 16, AD5755_SLEW_STEP_SIZE_16 },
+ { 4, AD5755_SLEW_STEP_SIZE_4 },
+ { 2, AD5755_SLEW_STEP_SIZE_2 },
+ { 1, AD5755_SLEW_STEP_SIZE_1 },
+};
+#endif
+
static int ad5755_write_unlocked(struct iio_dev *indio_dev,
unsigned int reg, unsigned int val)
{
@@ -556,6 +602,129 @@ static const struct ad5755_platform_data ad5755_default_pdata = {
},
};
+#ifdef CONFIG_OF
+static struct ad5755_platform_data *ad5755_parse_dt(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct device_node *pp;
+ struct ad5755_platform_data *pdata;
+ unsigned int tmp;
+ unsigned int tmparray[3];
+ int devnr, i;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ pdata->ext_dc_dc_compenstation_resistor =
+ of_property_read_bool(np, "adi,ext-dc-dc-compenstation-resistor");
+
+ if (!of_property_read_u32(np, "adi,dc-dc-phase", &tmp))
+ pdata->dc_dc_phase = tmp;
+ else
+ pdata->dc_dc_phase = AD5755_DC_DC_PHASE_ALL_SAME_EDGE;
+
+ pdata->dc_dc_freq = AD5755_DC_DC_FREQ_410kHZ;
+ if (!of_property_read_u32(np, "adi,dc-dc-freq-hz", &tmp)) {
+ for (i = 0; i < ARRAY_SIZE(ad5755_dcdc_freq_table); i++) {
+ if (tmp == ad5755_dcdc_freq_table[i][0]) {
+ pdata->dc_dc_freq = ad5755_dcdc_freq_table[i][1];
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(ad5755_dcdc_freq_table)) {
+ dev_err(dev,
+ "adi,dc-dc-freq out of range selecting 410kHz");
+ }
+ }
+
+ pdata->dc_dc_maxv = AD5755_DC_DC_MAXV_23V;
+ if (!of_property_read_u32(np, "adi,dc-dc-max-microvolt", &tmp)) {
+ for (i = 0; i < ARRAY_SIZE(ad5755_dcdc_maxv_table); i++) {
+ if (tmp == ad5755_dcdc_maxv_table[i][0]) {
+ pdata->dc_dc_maxv = ad5755_dcdc_maxv_table[i][1];
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(ad5755_dcdc_maxv_table)) {
+ dev_err(dev,
+ "adi,dc-dc-maxv out of range selecting 23V");
+ }
+ }
+
+ devnr = 0;
+ for_each_child_of_node(np, pp) {
+ if (devnr > AD5755_NUM_CHANNELS) {
+ dev_err(dev,
+ "There is to many channels defined in DT\n");
+ goto error_out;
+ }
+
+ if (!of_property_read_u32(pp, "adi,mode", &tmp))
+ pdata->dac[devnr].mode = tmp;
+ else
+ pdata->dac[devnr].mode = AD5755_MODE_CURRENT_4mA_20mA;
+
+ pdata->dac[devnr].ext_current_sense_resistor =
+ of_property_read_bool(pp, "adi,ext-current-sense-resistor");
+
+ pdata->dac[devnr].enable_voltage_overrange =
+ of_property_read_bool(pp, "adi,enable-voltage-overrange");
+
+ if (!of_property_read_u32_array(pp, "adi,slew", tmparray, 3)) {
+ pdata->dac[devnr].slew.enable = tmparray[0];
+
+ pdata->dac[devnr].slew.rate = AD5755_SLEW_RATE_64k;
+ for (i = 0; i < ARRAY_SIZE(ad5755_slew_rate_table); i++) {
+ if (tmparray[1] == ad5755_slew_rate_table[i][0]) {
+ pdata->dac[devnr].slew.rate =
+ ad5755_slew_rate_table[i][1];
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(ad5755_slew_rate_table)) {
+ dev_err(dev,
+ "channel %d slew rate out of range selecting 64kHz",
+ devnr);
+ }
+
+ pdata->dac[devnr].slew.step_size = AD5755_SLEW_STEP_SIZE_1;
+ for (i = 0; i < ARRAY_SIZE(ad5755_slew_step_table); i++) {
+ if (tmparray[2] == ad5755_slew_step_table[i][0]) {
+ pdata->dac[devnr].slew.step_size =
+ ad5755_slew_step_table[i][1];
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(ad5755_slew_step_table)) {
+ dev_err(dev,
+ "channel %d slew step size out of range selecting 1 LSB",
+ devnr);
+ }
+ } else {
+ pdata->dac[devnr].slew.enable = false;
+ pdata->dac[devnr].slew.rate = AD5755_SLEW_RATE_64k;
+ pdata->dac[devnr].slew.step_size =
+ AD5755_SLEW_STEP_SIZE_1;
+ }
+ devnr++;
+ }
+
+ return pdata;
+
+ error_out:
+ devm_kfree(dev, pdata);
+ return NULL;
+}
+#else
+static
+struct ad5755_platform_data *ad5755_parse_dt(struct device *dev)
+{
+ return NULL;
+}
+#endif
+
static int ad5755_probe(struct spi_device *spi)
{
enum ad5755_type type = spi_get_device_id(spi)->driver_data;
@@ -583,8 +752,15 @@ static int ad5755_probe(struct spi_device *spi)
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->num_channels = AD5755_NUM_CHANNELS;
- if (!pdata)
+ if (spi->dev.of_node)
+ pdata = ad5755_parse_dt(&spi->dev);
+ else
+ pdata = spi->dev.platform_data;
+
+ if (!pdata) {
+ dev_warn(&spi->dev, "no platform data? using default\n");
pdata = &ad5755_default_pdata;
+ }
ret = ad5755_init_channels(indio_dev, pdata);
if (ret)
@@ -607,6 +783,16 @@ static const struct spi_device_id ad5755_id[] = {
};
MODULE_DEVICE_TABLE(spi, ad5755_id);
+static const struct of_device_id ad5755_of_match[] = {
+ { .compatible = "adi,ad5755" },
+ { .compatible = "adi,ad5755-1" },
+ { .compatible = "adi,ad5757" },
+ { .compatible = "adi,ad5735" },
+ { .compatible = "adi,ad5737" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ad5755_of_match);
+
static struct spi_driver ad5755_driver = {
.driver = {
.name = "ad5755",
diff --git a/drivers/iio/dac/stx104.c b/drivers/iio/dac/stx104.c
index 279412208..bebbd0030 100644
--- a/drivers/iio/dac/stx104.c
+++ b/drivers/iio/dac/stx104.c
@@ -14,6 +14,7 @@
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/errno.h>
+#include <linux/gpio/driver.h>
#include <linux/iio/iio.h>
#include <linux/iio/types.h>
#include <linux/io.h>
@@ -21,6 +22,7 @@
#include <linux/isa.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/spinlock.h>
#define STX104_NUM_CHAN 2
@@ -49,6 +51,30 @@ struct stx104_iio {
unsigned base;
};
+/**
+ * struct stx104_gpio - GPIO device private data structure
+ * @chip: instance of the gpio_chip
+ * @lock: synchronization lock to prevent I/O race conditions
+ * @base: base port address of the GPIO device
+ * @out_state: output bits state
+ */
+struct stx104_gpio {
+ struct gpio_chip chip;
+ spinlock_t lock;
+ unsigned int base;
+ unsigned int out_state;
+};
+
+/**
+ * struct stx104_dev - STX104 device private data structure
+ * @indio_dev: IIO device
+ * @chip: instance of the gpio_chip
+ */
+struct stx104_dev {
+ struct iio_dev *indio_dev;
+ struct gpio_chip *chip;
+};
+
static int stx104_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val, int *val2, long mask)
{
@@ -88,15 +114,87 @@ static const struct iio_chan_spec stx104_channels[STX104_NUM_CHAN] = {
STX104_CHAN(1)
};
+static int stx104_gpio_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ /* GPIO 0-3 are input only, while the rest are output only */
+ if (offset < 4)
+ return 1;
+
+ return 0;
+}
+
+static int stx104_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ if (offset >= 4)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int stx104_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ if (offset < 4)
+ return -EINVAL;
+
+ chip->set(chip, offset, value);
+ return 0;
+}
+
+static int stx104_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct stx104_gpio *const stx104gpio = gpiochip_get_data(chip);
+
+ if (offset >= 4)
+ return -EINVAL;
+
+ return !!(inb(stx104gpio->base) & BIT(offset));
+}
+
+static void stx104_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct stx104_gpio *const stx104gpio = gpiochip_get_data(chip);
+ const unsigned int mask = BIT(offset) >> 4;
+ unsigned long flags;
+
+ if (offset < 4)
+ return;
+
+ spin_lock_irqsave(&stx104gpio->lock, flags);
+
+ if (value)
+ stx104gpio->out_state |= mask;
+ else
+ stx104gpio->out_state &= ~mask;
+
+ outb(stx104gpio->out_state, stx104gpio->base);
+
+ spin_unlock_irqrestore(&stx104gpio->lock, flags);
+}
+
static int stx104_probe(struct device *dev, unsigned int id)
{
struct iio_dev *indio_dev;
struct stx104_iio *priv;
+ struct stx104_gpio *stx104gpio;
+ struct stx104_dev *stx104dev;
+ int err;
indio_dev = devm_iio_device_alloc(dev, sizeof(*priv));
if (!indio_dev)
return -ENOMEM;
+ stx104gpio = devm_kzalloc(dev, sizeof(*stx104gpio), GFP_KERNEL);
+ if (!stx104gpio)
+ return -ENOMEM;
+
+ stx104dev = devm_kzalloc(dev, sizeof(*stx104dev), GFP_KERNEL);
+ if (!stx104dev)
+ return -ENOMEM;
+
if (!devm_request_region(dev, base[id], STX104_EXTENT,
dev_name(dev))) {
dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
@@ -117,14 +215,57 @@ static int stx104_probe(struct device *dev, unsigned int id)
outw(0, base[id] + 4);
outw(0, base[id] + 6);
- return devm_iio_device_register(dev, indio_dev);
+ stx104gpio->chip.label = dev_name(dev);
+ stx104gpio->chip.parent = dev;
+ stx104gpio->chip.owner = THIS_MODULE;
+ stx104gpio->chip.base = -1;
+ stx104gpio->chip.ngpio = 8;
+ stx104gpio->chip.get_direction = stx104_gpio_get_direction;
+ stx104gpio->chip.direction_input = stx104_gpio_direction_input;
+ stx104gpio->chip.direction_output = stx104_gpio_direction_output;
+ stx104gpio->chip.get = stx104_gpio_get;
+ stx104gpio->chip.set = stx104_gpio_set;
+ stx104gpio->base = base[id] + 3;
+ stx104gpio->out_state = 0x0;
+
+ spin_lock_init(&stx104gpio->lock);
+
+ stx104dev->indio_dev = indio_dev;
+ stx104dev->chip = &stx104gpio->chip;
+ dev_set_drvdata(dev, stx104dev);
+
+ err = gpiochip_add_data(&stx104gpio->chip, stx104gpio);
+ if (err) {
+ dev_err(dev, "GPIO registering failed (%d)\n", err);
+ return err;
+ }
+
+ err = iio_device_register(indio_dev);
+ if (err) {
+ dev_err(dev, "IIO device registering failed (%d)\n", err);
+ gpiochip_remove(&stx104gpio->chip);
+ return err;
+ }
+
+ return 0;
+}
+
+static int stx104_remove(struct device *dev, unsigned int id)
+{
+ struct stx104_dev *const stx104dev = dev_get_drvdata(dev);
+
+ iio_device_unregister(stx104dev->indio_dev);
+ gpiochip_remove(stx104dev->chip);
+
+ return 0;
}
static struct isa_driver stx104_driver = {
.probe = stx104_probe,
.driver = {
.name = "stx104"
- }
+ },
+ .remove = stx104_remove
};
module_isa_driver(stx104_driver, num_stx104);
diff --git a/drivers/iio/dummy/Kconfig b/drivers/iio/dummy/Kconfig
index 71805ced1..aa5824d96 100644
--- a/drivers/iio/dummy/Kconfig
+++ b/drivers/iio/dummy/Kconfig
@@ -10,6 +10,7 @@ config IIO_DUMMY_EVGEN
config IIO_SIMPLE_DUMMY
tristate "An example driver with no hardware requirements"
+ depends on IIO_SW_DEVICE
help
Driver intended mainly as documentation for how to write
a driver. May also be useful for testing userspace code
diff --git a/drivers/iio/dummy/iio_simple_dummy.c b/drivers/iio/dummy/iio_simple_dummy.c
index 43fe4ba7d..ad3410e52 100644
--- a/drivers/iio/dummy/iio_simple_dummy.c
+++ b/drivers/iio/dummy/iio_simple_dummy.c
@@ -17,26 +17,18 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/string.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/events.h>
#include <linux/iio/buffer.h>
+#include <linux/iio/sw_device.h>
#include "iio_simple_dummy.h"
-/*
- * A few elements needed to fake a bus for this driver
- * Note instances parameter controls how many of these
- * dummy devices are registered.
- */
-static unsigned instances = 1;
-module_param(instances, uint, 0);
-
-/* Pointer array used to fake bus elements */
-static struct iio_dev **iio_dummy_devs;
-
-/* Fake a name for the part number, usually obtained from the id table */
-static const char *iio_dummy_part_number = "iio_dummy_part_no";
+static struct config_item_type iio_dummy_type = {
+ .ct_owner = THIS_MODULE,
+};
/**
* struct iio_dummy_accel_calibscale - realworld to register mapping
@@ -572,12 +564,18 @@ static int iio_dummy_init_device(struct iio_dev *indio_dev)
* const struct i2c_device_id *id)
* SPI: iio_dummy_probe(struct spi_device *spi)
*/
-static int iio_dummy_probe(int index)
+static struct iio_sw_device *iio_dummy_probe(const char *name)
{
int ret;
struct iio_dev *indio_dev;
struct iio_dummy_state *st;
+ struct iio_sw_device *swd;
+ swd = kzalloc(sizeof(*swd), GFP_KERNEL);
+ if (!swd) {
+ ret = -ENOMEM;
+ goto error_kzalloc;
+ }
/*
* Allocate an IIO device.
*
@@ -608,7 +606,7 @@ static int iio_dummy_probe(int index)
* i2c_set_clientdata(client, indio_dev);
* spi_set_drvdata(spi, indio_dev);
*/
- iio_dummy_devs[index] = indio_dev;
+ swd->device = indio_dev;
/*
* Set the device name.
@@ -619,7 +617,7 @@ static int iio_dummy_probe(int index)
* indio_dev->name = id->name;
* indio_dev->name = spi_get_device_id(spi)->name;
*/
- indio_dev->name = iio_dummy_part_number;
+ indio_dev->name = kstrdup(name, GFP_KERNEL);
/* Provide description of available channels */
indio_dev->channels = iio_dummy_channels;
@@ -646,7 +644,9 @@ static int iio_dummy_probe(int index)
if (ret < 0)
goto error_unconfigure_buffer;
- return 0;
+ iio_swd_group_init_type_name(swd, name, &iio_dummy_type);
+
+ return swd;
error_unconfigure_buffer:
iio_simple_dummy_unconfigure_buffer(indio_dev);
error_unregister_events:
@@ -654,16 +654,18 @@ error_unregister_events:
error_free_device:
iio_device_free(indio_dev);
error_ret:
- return ret;
+ kfree(swd);
+error_kzalloc:
+ return ERR_PTR(ret);
}
/**
* iio_dummy_remove() - device instance removal function
- * @index: device index.
+ * @swd: pointer to software IIO device abstraction
*
* Parameters follow those of iio_dummy_probe for buses.
*/
-static void iio_dummy_remove(int index)
+static int iio_dummy_remove(struct iio_sw_device *swd)
{
/*
* Get a pointer to the device instance iio_dev structure
@@ -671,7 +673,7 @@ static void iio_dummy_remove(int index)
* struct iio_dev *indio_dev = i2c_get_clientdata(client);
* struct iio_dev *indio_dev = spi_get_drvdata(spi);
*/
- struct iio_dev *indio_dev = iio_dummy_devs[index];
+ struct iio_dev *indio_dev = swd->device;
/* Unregister the device */
iio_device_unregister(indio_dev);
@@ -684,11 +686,13 @@ static void iio_dummy_remove(int index)
iio_simple_dummy_events_unregister(indio_dev);
/* Free all structures */
+ kfree(indio_dev->name);
iio_device_free(indio_dev);
-}
+ return 0;
+}
/**
- * iio_dummy_init() - device driver registration
+ * module_iio_sw_device_driver() - device driver registration
*
* Varies depending on bus type of the device. As there is no device
* here, call probe directly. For information on device registration
@@ -697,50 +701,18 @@ static void iio_dummy_remove(int index)
* spi:
* Documentation/spi/spi-summary
*/
-static __init int iio_dummy_init(void)
-{
- int i, ret;
-
- if (instances > 10) {
- instances = 1;
- return -EINVAL;
- }
-
- /* Fake a bus */
- iio_dummy_devs = kcalloc(instances, sizeof(*iio_dummy_devs),
- GFP_KERNEL);
- /* Here we have no actual device so call probe */
- for (i = 0; i < instances; i++) {
- ret = iio_dummy_probe(i);
- if (ret < 0)
- goto error_remove_devs;
- }
- return 0;
-
-error_remove_devs:
- while (i--)
- iio_dummy_remove(i);
-
- kfree(iio_dummy_devs);
- return ret;
-}
-module_init(iio_dummy_init);
+static const struct iio_sw_device_ops iio_dummy_device_ops = {
+ .probe = iio_dummy_probe,
+ .remove = iio_dummy_remove,
+};
-/**
- * iio_dummy_exit() - device driver removal
- *
- * Varies depending on bus type of the device.
- * As there is no device here, call remove directly.
- */
-static __exit void iio_dummy_exit(void)
-{
- int i;
+static struct iio_sw_device_type iio_dummy_device = {
+ .name = "dummy",
+ .owner = THIS_MODULE,
+ .ops = &iio_dummy_device_ops,
+};
- for (i = 0; i < instances; i++)
- iio_dummy_remove(i);
- kfree(iio_dummy_devs);
-}
-module_exit(iio_dummy_exit);
+module_iio_sw_device_driver(iio_dummy_device);
MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>");
MODULE_DESCRIPTION("IIO dummy driver");
diff --git a/drivers/iio/dummy/iio_simple_dummy_buffer.c b/drivers/iio/dummy/iio_simple_dummy_buffer.c
index cf44a6f79..b383892a5 100644
--- a/drivers/iio/dummy/iio_simple_dummy_buffer.c
+++ b/drivers/iio/dummy/iio_simple_dummy_buffer.c
@@ -85,7 +85,8 @@ static irqreturn_t iio_simple_dummy_trigger_h(int irq, void *p)
}
}
- iio_push_to_buffers_with_timestamp(indio_dev, data, iio_get_time_ns());
+ iio_push_to_buffers_with_timestamp(indio_dev, data,
+ iio_get_time_ns(indio_dev));
kfree(data);
diff --git a/drivers/iio/dummy/iio_simple_dummy_events.c b/drivers/iio/dummy/iio_simple_dummy_events.c
index 6eb600ff7..ed63ffd84 100644
--- a/drivers/iio/dummy/iio_simple_dummy_events.c
+++ b/drivers/iio/dummy/iio_simple_dummy_events.c
@@ -158,7 +158,7 @@ static irqreturn_t iio_simple_dummy_get_timestamp(int irq, void *private)
struct iio_dev *indio_dev = private;
struct iio_dummy_state *st = iio_priv(indio_dev);
- st->event_timestamp = iio_get_time_ns();
+ st->event_timestamp = iio_get_time_ns(indio_dev);
return IRQ_WAKE_THREAD;
}
diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
index 7ccc04406..f7fcfa886 100644
--- a/drivers/iio/gyro/bmg160_core.c
+++ b/drivers/iio/gyro/bmg160_core.c
@@ -50,6 +50,7 @@
#define BMG160_REG_PMU_BW 0x10
#define BMG160_NO_FILTER 0
#define BMG160_DEF_BW 100
+#define BMG160_REG_PMU_BW_RES BIT(7)
#define BMG160_REG_INT_MAP_0 0x17
#define BMG160_INT_MAP_0_BIT_ANY BIT(1)
@@ -100,7 +101,6 @@ struct bmg160_data {
struct iio_trigger *motion_trig;
struct mutex mutex;
s16 buffer[8];
- u8 bw_bits;
u32 dps_range;
int ev_enable_state;
int slope_thres;
@@ -117,13 +117,16 @@ enum bmg160_axis {
};
static const struct {
- int val;
+ int odr;
+ int filter;
int bw_bits;
-} bmg160_samp_freq_table[] = { {100, 0x07},
- {200, 0x06},
- {400, 0x03},
- {1000, 0x02},
- {2000, 0x01} };
+} bmg160_samp_freq_table[] = { {100, 32, 0x07},
+ {200, 64, 0x06},
+ {100, 12, 0x05},
+ {200, 23, 0x04},
+ {400, 47, 0x03},
+ {1000, 116, 0x02},
+ {2000, 230, 0x01} };
static const struct {
int scale;
@@ -153,7 +156,7 @@ static int bmg160_convert_freq_to_bit(int val)
int i;
for (i = 0; i < ARRAY_SIZE(bmg160_samp_freq_table); ++i) {
- if (bmg160_samp_freq_table[i].val == val)
+ if (bmg160_samp_freq_table[i].odr == val)
return bmg160_samp_freq_table[i].bw_bits;
}
@@ -176,7 +179,53 @@ static int bmg160_set_bw(struct bmg160_data *data, int val)
return ret;
}
- data->bw_bits = bw_bits;
+ return 0;
+}
+
+static int bmg160_get_filter(struct bmg160_data *data, int *val)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ int ret;
+ int i;
+ unsigned int bw_bits;
+
+ ret = regmap_read(data->regmap, BMG160_REG_PMU_BW, &bw_bits);
+ if (ret < 0) {
+ dev_err(dev, "Error reading reg_pmu_bw\n");
+ return ret;
+ }
+
+ /* Ignore the readonly reserved bit. */
+ bw_bits &= ~BMG160_REG_PMU_BW_RES;
+
+ for (i = 0; i < ARRAY_SIZE(bmg160_samp_freq_table); ++i) {
+ if (bmg160_samp_freq_table[i].bw_bits == bw_bits)
+ break;
+ }
+
+ *val = bmg160_samp_freq_table[i].filter;
+
+ return ret ? ret : IIO_VAL_INT;
+}
+
+
+static int bmg160_set_filter(struct bmg160_data *data, int val)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ int ret;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bmg160_samp_freq_table); ++i) {
+ if (bmg160_samp_freq_table[i].filter == val)
+ break;
+ }
+
+ ret = regmap_write(data->regmap, BMG160_REG_PMU_BW,
+ bmg160_samp_freq_table[i].bw_bits);
+ if (ret < 0) {
+ dev_err(dev, "Error writing reg_pmu_bw\n");
+ return ret;
+ }
return 0;
}
@@ -386,11 +435,23 @@ static int bmg160_setup_new_data_interrupt(struct bmg160_data *data,
static int bmg160_get_bw(struct bmg160_data *data, int *val)
{
+ struct device *dev = regmap_get_device(data->regmap);
int i;
+ unsigned int bw_bits;
+ int ret;
+
+ ret = regmap_read(data->regmap, BMG160_REG_PMU_BW, &bw_bits);
+ if (ret < 0) {
+ dev_err(dev, "Error reading reg_pmu_bw\n");
+ return ret;
+ }
+
+ /* Ignore the readonly reserved bit. */
+ bw_bits &= ~BMG160_REG_PMU_BW_RES;
for (i = 0; i < ARRAY_SIZE(bmg160_samp_freq_table); ++i) {
- if (bmg160_samp_freq_table[i].bw_bits == data->bw_bits) {
- *val = bmg160_samp_freq_table[i].val;
+ if (bmg160_samp_freq_table[i].bw_bits == bw_bits) {
+ *val = bmg160_samp_freq_table[i].odr;
return IIO_VAL_INT;
}
}
@@ -507,6 +568,8 @@ static int bmg160_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
} else
return -EINVAL;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ return bmg160_get_filter(data, val);
case IIO_CHAN_INFO_SCALE:
*val = 0;
switch (chan->type) {
@@ -571,6 +634,26 @@ static int bmg160_write_raw(struct iio_dev *indio_dev,
ret = bmg160_set_power_state(data, false);
mutex_unlock(&data->mutex);
return ret;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ if (val2)
+ return -EINVAL;
+
+ mutex_lock(&data->mutex);
+ ret = bmg160_set_power_state(data, true);
+ if (ret < 0) {
+ bmg160_set_power_state(data, false);
+ mutex_unlock(&data->mutex);
+ return ret;
+ }
+ ret = bmg160_set_filter(data, val);
+ if (ret < 0) {
+ bmg160_set_power_state(data, false);
+ mutex_unlock(&data->mutex);
+ return ret;
+ }
+ ret = bmg160_set_power_state(data, false);
+ mutex_unlock(&data->mutex);
+ return ret;
case IIO_CHAN_INFO_SCALE:
if (val)
return -EINVAL;
@@ -728,7 +811,8 @@ static const struct iio_event_spec bmg160_event = {
.channel2 = IIO_MOD_##_axis, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
- BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
.scan_index = AXIS_##_axis, \
.scan_type = { \
.sign = 's', \
@@ -885,25 +969,25 @@ static irqreturn_t bmg160_event_handler(int irq, void *private)
if (val & BMG160_ANY_MOTION_BIT_X)
iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ANGL_VEL,
- 0,
- IIO_MOD_X,
- IIO_EV_TYPE_ROC,
- dir),
- iio_get_time_ns());
+ 0,
+ IIO_MOD_X,
+ IIO_EV_TYPE_ROC,
+ dir),
+ iio_get_time_ns(indio_dev));
if (val & BMG160_ANY_MOTION_BIT_Y)
iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ANGL_VEL,
- 0,
- IIO_MOD_Y,
- IIO_EV_TYPE_ROC,
- dir),
- iio_get_time_ns());
+ 0,
+ IIO_MOD_Y,
+ IIO_EV_TYPE_ROC,
+ dir),
+ iio_get_time_ns(indio_dev));
if (val & BMG160_ANY_MOTION_BIT_Z)
iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ANGL_VEL,
- 0,
- IIO_MOD_Z,
- IIO_EV_TYPE_ROC,
- dir),
- iio_get_time_ns());
+ 0,
+ IIO_MOD_Z,
+ IIO_EV_TYPE_ROC,
+ dir),
+ iio_get_time_ns(indio_dev));
ack_intr_status:
if (!data->dready_trigger_on) {
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index a8012955a..aea034d8f 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -426,13 +426,15 @@ int st_gyro_common_probe(struct iio_dev *indio_dev)
indio_dev->info = &gyro_info;
mutex_init(&gdata->tb.buf_lock);
- st_sensors_power_enable(indio_dev);
+ err = st_sensors_power_enable(indio_dev);
+ if (err)
+ return err;
err = st_sensors_check_device_support(indio_dev,
ARRAY_SIZE(st_gyro_sensors_settings),
st_gyro_sensors_settings);
if (err < 0)
- return err;
+ goto st_gyro_power_off;
gdata->num_data_channels = ST_GYRO_NUMBER_DATA_CHANNELS;
gdata->multiread_bit = gdata->sensor_settings->multi_read_bit;
@@ -446,11 +448,11 @@ int st_gyro_common_probe(struct iio_dev *indio_dev)
err = st_sensors_init_sensor(indio_dev,
(struct st_sensors_platform_data *)&gyro_pdata);
if (err < 0)
- return err;
+ goto st_gyro_power_off;
err = st_gyro_allocate_ring(indio_dev);
if (err < 0)
- return err;
+ goto st_gyro_power_off;
if (irq > 0) {
err = st_sensors_allocate_trigger(indio_dev,
@@ -473,6 +475,8 @@ st_gyro_device_register_error:
st_sensors_deallocate_trigger(indio_dev);
st_gyro_probe_trigger_error:
st_gyro_deallocate_ring(indio_dev);
+st_gyro_power_off:
+ st_sensors_power_disable(indio_dev);
return err;
}
diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c
index 88e43f87b..9a081465c 100644
--- a/drivers/iio/health/afe4403.c
+++ b/drivers/iio/health/afe4403.c
@@ -1,7 +1,7 @@
/*
* AFE4403 Heart Rate Monitors and Low-Cost Pulse Oximeters
*
- * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
* Andrew F. Davis <afd@ti.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -39,127 +39,90 @@
#define AFE4403_TIAGAIN 0x20
#define AFE4403_TIA_AMB_GAIN 0x21
-/* AFE4403 GAIN register fields */
-#define AFE4403_TIAGAIN_RES_MASK GENMASK(2, 0)
-#define AFE4403_TIAGAIN_RES_SHIFT 0
-#define AFE4403_TIAGAIN_CAP_MASK GENMASK(7, 3)
-#define AFE4403_TIAGAIN_CAP_SHIFT 3
-
-/* AFE4403 LEDCNTRL register fields */
-#define AFE440X_LEDCNTRL_LED1_MASK GENMASK(15, 8)
-#define AFE440X_LEDCNTRL_LED1_SHIFT 8
-#define AFE440X_LEDCNTRL_LED2_MASK GENMASK(7, 0)
-#define AFE440X_LEDCNTRL_LED2_SHIFT 0
-#define AFE440X_LEDCNTRL_LED_RANGE_MASK GENMASK(17, 16)
-#define AFE440X_LEDCNTRL_LED_RANGE_SHIFT 16
-
-/* AFE4403 CONTROL2 register fields */
-#define AFE440X_CONTROL2_PWR_DWN_TX BIT(2)
-#define AFE440X_CONTROL2_EN_SLOW_DIAG BIT(8)
-#define AFE440X_CONTROL2_DIAG_OUT_TRI BIT(10)
-#define AFE440X_CONTROL2_TX_BRDG_MOD BIT(11)
-#define AFE440X_CONTROL2_TX_REF_MASK GENMASK(18, 17)
-#define AFE440X_CONTROL2_TX_REF_SHIFT 17
-
-/* AFE4404 NULL fields */
-#define NULL_MASK 0
-#define NULL_SHIFT 0
-
-/* AFE4403 LEDCNTRL values */
-#define AFE440X_LEDCNTRL_RANGE_TX_HALF 0x1
-#define AFE440X_LEDCNTRL_RANGE_TX_FULL 0x2
-#define AFE440X_LEDCNTRL_RANGE_TX_OFF 0x3
-
-/* AFE4403 CONTROL2 values */
-#define AFE440X_CONTROL2_TX_REF_025 0x0
-#define AFE440X_CONTROL2_TX_REF_050 0x1
-#define AFE440X_CONTROL2_TX_REF_100 0x2
-#define AFE440X_CONTROL2_TX_REF_075 0x3
-
-/* AFE4403 CONTROL3 values */
-#define AFE440X_CONTROL3_CLK_DIV_2 0x0
-#define AFE440X_CONTROL3_CLK_DIV_4 0x2
-#define AFE440X_CONTROL3_CLK_DIV_6 0x3
-#define AFE440X_CONTROL3_CLK_DIV_8 0x4
-#define AFE440X_CONTROL3_CLK_DIV_12 0x5
-#define AFE440X_CONTROL3_CLK_DIV_1 0x7
-
-/* AFE4403 TIAGAIN_CAP values */
-#define AFE4403_TIAGAIN_CAP_5_P 0x0
-#define AFE4403_TIAGAIN_CAP_10_P 0x1
-#define AFE4403_TIAGAIN_CAP_20_P 0x2
-#define AFE4403_TIAGAIN_CAP_30_P 0x3
-#define AFE4403_TIAGAIN_CAP_55_P 0x8
-#define AFE4403_TIAGAIN_CAP_155_P 0x10
-
-/* AFE4403 TIAGAIN_RES values */
-#define AFE4403_TIAGAIN_RES_500_K 0x0
-#define AFE4403_TIAGAIN_RES_250_K 0x1
-#define AFE4403_TIAGAIN_RES_100_K 0x2
-#define AFE4403_TIAGAIN_RES_50_K 0x3
-#define AFE4403_TIAGAIN_RES_25_K 0x4
-#define AFE4403_TIAGAIN_RES_10_K 0x5
-#define AFE4403_TIAGAIN_RES_1_M 0x6
-#define AFE4403_TIAGAIN_RES_NONE 0x7
+enum afe4403_fields {
+ /* Gains */
+ F_RF_LED1, F_CF_LED1,
+ F_RF_LED, F_CF_LED,
+
+ /* LED Current */
+ F_ILED1, F_ILED2,
+
+ /* sentinel */
+ F_MAX_FIELDS
+};
+
+static const struct reg_field afe4403_reg_fields[] = {
+ /* Gains */
+ [F_RF_LED1] = REG_FIELD(AFE4403_TIAGAIN, 0, 2),
+ [F_CF_LED1] = REG_FIELD(AFE4403_TIAGAIN, 3, 7),
+ [F_RF_LED] = REG_FIELD(AFE4403_TIA_AMB_GAIN, 0, 2),
+ [F_CF_LED] = REG_FIELD(AFE4403_TIA_AMB_GAIN, 3, 7),
+ /* LED Current */
+ [F_ILED1] = REG_FIELD(AFE440X_LEDCNTRL, 0, 7),
+ [F_ILED2] = REG_FIELD(AFE440X_LEDCNTRL, 8, 15),
+};
/**
- * struct afe4403_data
- * @dev - Device structure
- * @spi - SPI device handle
- * @regmap - Register map of the device
- * @regulator - Pointer to the regulator for the IC
- * @trig - IIO trigger for this device
- * @irq - ADC_RDY line interrupt number
+ * struct afe4403_data - AFE4403 device instance data
+ * @dev: Device structure
+ * @spi: SPI device handle
+ * @regmap: Register map of the device
+ * @fields: Register fields of the device
+ * @regulator: Pointer to the regulator for the IC
+ * @trig: IIO trigger for this device
+ * @irq: ADC_RDY line interrupt number
*/
struct afe4403_data {
struct device *dev;
struct spi_device *spi;
struct regmap *regmap;
+ struct regmap_field *fields[F_MAX_FIELDS];
struct regulator *regulator;
struct iio_trigger *trig;
int irq;
};
enum afe4403_chan_id {
+ LED2 = 1,
+ ALED2,
LED1,
ALED1,
- LED2,
- ALED2,
- LED1_ALED1,
LED2_ALED2,
- ILED1,
- ILED2,
+ LED1_ALED1,
};
-static const struct afe440x_reg_info afe4403_reg_info[] = {
- [LED1] = AFE440X_REG_INFO(AFE440X_LED1VAL, 0, NULL),
- [ALED1] = AFE440X_REG_INFO(AFE440X_ALED1VAL, 0, NULL),
- [LED2] = AFE440X_REG_INFO(AFE440X_LED2VAL, 0, NULL),
- [ALED2] = AFE440X_REG_INFO(AFE440X_ALED2VAL, 0, NULL),
- [LED1_ALED1] = AFE440X_REG_INFO(AFE440X_LED1_ALED1VAL, 0, NULL),
- [LED2_ALED2] = AFE440X_REG_INFO(AFE440X_LED2_ALED2VAL, 0, NULL),
- [ILED1] = AFE440X_REG_INFO(AFE440X_LEDCNTRL, 0, AFE440X_LEDCNTRL_LED1),
- [ILED2] = AFE440X_REG_INFO(AFE440X_LEDCNTRL, 0, AFE440X_LEDCNTRL_LED2),
+static const unsigned int afe4403_channel_values[] = {
+ [LED2] = AFE440X_LED2VAL,
+ [ALED2] = AFE440X_ALED2VAL,
+ [LED1] = AFE440X_LED1VAL,
+ [ALED1] = AFE440X_ALED1VAL,
+ [LED2_ALED2] = AFE440X_LED2_ALED2VAL,
+ [LED1_ALED1] = AFE440X_LED1_ALED1VAL,
+};
+
+static const unsigned int afe4403_channel_leds[] = {
+ [LED2] = F_ILED2,
+ [LED1] = F_ILED1,
};
static const struct iio_chan_spec afe4403_channels[] = {
/* ADC values */
- AFE440X_INTENSITY_CHAN(LED1, "led1", 0),
- AFE440X_INTENSITY_CHAN(ALED1, "led1_ambient", 0),
- AFE440X_INTENSITY_CHAN(LED2, "led2", 0),
- AFE440X_INTENSITY_CHAN(ALED2, "led2_ambient", 0),
- AFE440X_INTENSITY_CHAN(LED1_ALED1, "led1-led1_ambient", 0),
- AFE440X_INTENSITY_CHAN(LED2_ALED2, "led2-led2_ambient", 0),
+ AFE440X_INTENSITY_CHAN(LED2, 0),
+ AFE440X_INTENSITY_CHAN(ALED2, 0),
+ AFE440X_INTENSITY_CHAN(LED1, 0),
+ AFE440X_INTENSITY_CHAN(ALED1, 0),
+ AFE440X_INTENSITY_CHAN(LED2_ALED2, 0),
+ AFE440X_INTENSITY_CHAN(LED1_ALED1, 0),
/* LED current */
- AFE440X_CURRENT_CHAN(ILED1, "led1"),
- AFE440X_CURRENT_CHAN(ILED2, "led2"),
+ AFE440X_CURRENT_CHAN(LED2),
+ AFE440X_CURRENT_CHAN(LED1),
};
static const struct afe440x_val_table afe4403_res_table[] = {
{ 500000 }, { 250000 }, { 100000 }, { 50000 },
{ 25000 }, { 10000 }, { 1000000 }, { 0 },
};
-AFE440X_TABLE_ATTR(tia_resistance_available, afe4403_res_table);
+AFE440X_TABLE_ATTR(in_intensity_resistance_available, afe4403_res_table);
static const struct afe440x_val_table afe4403_cap_table[] = {
{ 0, 5000 }, { 0, 10000 }, { 0, 20000 }, { 0, 25000 },
@@ -171,7 +134,7 @@ static const struct afe440x_val_table afe4403_cap_table[] = {
{ 0, 205000 }, { 0, 210000 }, { 0, 220000 }, { 0, 225000 },
{ 0, 230000 }, { 0, 235000 }, { 0, 245000 }, { 0, 250000 },
};
-AFE440X_TABLE_ATTR(tia_capacitance_available, afe4403_cap_table);
+AFE440X_TABLE_ATTR(in_intensity_capacitance_available, afe4403_cap_table);
static ssize_t afe440x_show_register(struct device *dev,
struct device_attribute *attr,
@@ -180,38 +143,21 @@ static ssize_t afe440x_show_register(struct device *dev,
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct afe4403_data *afe = iio_priv(indio_dev);
struct afe440x_attr *afe440x_attr = to_afe440x_attr(attr);
- unsigned int reg_val, type;
+ unsigned int reg_val;
int vals[2];
- int ret, val_len;
+ int ret;
- ret = regmap_read(afe->regmap, afe440x_attr->reg, &reg_val);
+ ret = regmap_field_read(afe->fields[afe440x_attr->field], &reg_val);
if (ret)
return ret;
- reg_val &= afe440x_attr->mask;
- reg_val >>= afe440x_attr->shift;
-
- switch (afe440x_attr->type) {
- case SIMPLE:
- type = IIO_VAL_INT;
- val_len = 1;
- vals[0] = reg_val;
- break;
- case RESISTANCE:
- case CAPACITANCE:
- type = IIO_VAL_INT_PLUS_MICRO;
- val_len = 2;
- if (reg_val < afe440x_attr->table_size) {
- vals[0] = afe440x_attr->val_table[reg_val].integer;
- vals[1] = afe440x_attr->val_table[reg_val].fract;
- break;
- }
- return -EINVAL;
- default:
+ if (reg_val >= afe440x_attr->table_size)
return -EINVAL;
- }
- return iio_format_value(buf, type, val_len, vals);
+ vals[0] = afe440x_attr->val_table[reg_val].integer;
+ vals[1] = afe440x_attr->val_table[reg_val].fract;
+
+ return iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO, 2, vals);
}
static ssize_t afe440x_store_register(struct device *dev,
@@ -227,48 +173,43 @@ static ssize_t afe440x_store_register(struct device *dev,
if (ret)
return ret;
- switch (afe440x_attr->type) {
- case SIMPLE:
- val = integer;
- break;
- case RESISTANCE:
- case CAPACITANCE:
- for (val = 0; val < afe440x_attr->table_size; val++)
- if (afe440x_attr->val_table[val].integer == integer &&
- afe440x_attr->val_table[val].fract == fract)
- break;
- if (val == afe440x_attr->table_size)
- return -EINVAL;
- break;
- default:
+ for (val = 0; val < afe440x_attr->table_size; val++)
+ if (afe440x_attr->val_table[val].integer == integer &&
+ afe440x_attr->val_table[val].fract == fract)
+ break;
+ if (val == afe440x_attr->table_size)
return -EINVAL;
- }
- ret = regmap_update_bits(afe->regmap, afe440x_attr->reg,
- afe440x_attr->mask,
- (val << afe440x_attr->shift));
+ ret = regmap_field_write(afe->fields[afe440x_attr->field], val);
if (ret)
return ret;
return count;
}
-static AFE440X_ATTR(tia_separate_en, AFE4403_TIAGAIN, AFE440X_TIAGAIN_ENSEPGAIN, SIMPLE, NULL, 0);
+static AFE440X_ATTR(in_intensity1_resistance, F_RF_LED, afe4403_res_table);
+static AFE440X_ATTR(in_intensity1_capacitance, F_CF_LED, afe4403_cap_table);
+
+static AFE440X_ATTR(in_intensity2_resistance, F_RF_LED, afe4403_res_table);
+static AFE440X_ATTR(in_intensity2_capacitance, F_CF_LED, afe4403_cap_table);
-static AFE440X_ATTR(tia_resistance1, AFE4403_TIAGAIN, AFE4403_TIAGAIN_RES, RESISTANCE, afe4403_res_table, ARRAY_SIZE(afe4403_res_table));
-static AFE440X_ATTR(tia_capacitance1, AFE4403_TIAGAIN, AFE4403_TIAGAIN_CAP, CAPACITANCE, afe4403_cap_table, ARRAY_SIZE(afe4403_cap_table));
+static AFE440X_ATTR(in_intensity3_resistance, F_RF_LED1, afe4403_res_table);
+static AFE440X_ATTR(in_intensity3_capacitance, F_CF_LED1, afe4403_cap_table);
-static AFE440X_ATTR(tia_resistance2, AFE4403_TIA_AMB_GAIN, AFE4403_TIAGAIN_RES, RESISTANCE, afe4403_res_table, ARRAY_SIZE(afe4403_res_table));
-static AFE440X_ATTR(tia_capacitance2, AFE4403_TIA_AMB_GAIN, AFE4403_TIAGAIN_RES, CAPACITANCE, afe4403_cap_table, ARRAY_SIZE(afe4403_cap_table));
+static AFE440X_ATTR(in_intensity4_resistance, F_RF_LED1, afe4403_res_table);
+static AFE440X_ATTR(in_intensity4_capacitance, F_CF_LED1, afe4403_cap_table);
static struct attribute *afe440x_attributes[] = {
- &afe440x_attr_tia_separate_en.dev_attr.attr,
- &afe440x_attr_tia_resistance1.dev_attr.attr,
- &afe440x_attr_tia_capacitance1.dev_attr.attr,
- &afe440x_attr_tia_resistance2.dev_attr.attr,
- &afe440x_attr_tia_capacitance2.dev_attr.attr,
- &dev_attr_tia_resistance_available.attr,
- &dev_attr_tia_capacitance_available.attr,
+ &dev_attr_in_intensity_resistance_available.attr,
+ &dev_attr_in_intensity_capacitance_available.attr,
+ &afe440x_attr_in_intensity1_resistance.dev_attr.attr,
+ &afe440x_attr_in_intensity1_capacitance.dev_attr.attr,
+ &afe440x_attr_in_intensity2_resistance.dev_attr.attr,
+ &afe440x_attr_in_intensity2_capacitance.dev_attr.attr,
+ &afe440x_attr_in_intensity3_resistance.dev_attr.attr,
+ &afe440x_attr_in_intensity3_capacitance.dev_attr.attr,
+ &afe440x_attr_in_intensity4_resistance.dev_attr.attr,
+ &afe440x_attr_in_intensity4_capacitance.dev_attr.attr,
NULL
};
@@ -309,35 +250,26 @@ static int afe4403_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct afe4403_data *afe = iio_priv(indio_dev);
- const struct afe440x_reg_info reg_info = afe4403_reg_info[chan->address];
+ unsigned int reg = afe4403_channel_values[chan->address];
+ unsigned int field = afe4403_channel_leds[chan->address];
int ret;
switch (chan->type) {
case IIO_INTENSITY:
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = afe4403_read(afe, reg_info.reg, val);
- if (ret)
- return ret;
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_OFFSET:
- ret = regmap_read(afe->regmap, reg_info.offreg,
- val);
+ ret = afe4403_read(afe, reg, val);
if (ret)
return ret;
- *val &= reg_info.mask;
- *val >>= reg_info.shift;
return IIO_VAL_INT;
}
break;
case IIO_CURRENT:
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = regmap_read(afe->regmap, reg_info.reg, val);
+ ret = regmap_field_read(afe->fields[field], val);
if (ret)
return ret;
- *val &= reg_info.mask;
- *val >>= reg_info.shift;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
*val = 0;
@@ -357,25 +289,13 @@ static int afe4403_write_raw(struct iio_dev *indio_dev,
int val, int val2, long mask)
{
struct afe4403_data *afe = iio_priv(indio_dev);
- const struct afe440x_reg_info reg_info = afe4403_reg_info[chan->address];
+ unsigned int field = afe4403_channel_leds[chan->address];
switch (chan->type) {
- case IIO_INTENSITY:
- switch (mask) {
- case IIO_CHAN_INFO_OFFSET:
- return regmap_update_bits(afe->regmap,
- reg_info.offreg,
- reg_info.mask,
- (val << reg_info.shift));
- }
- break;
case IIO_CURRENT:
switch (mask) {
case IIO_CHAN_INFO_RAW:
- return regmap_update_bits(afe->regmap,
- reg_info.reg,
- reg_info.mask,
- (val << reg_info.shift));
+ return regmap_field_write(afe->fields[field], val);
}
break;
default:
@@ -410,7 +330,7 @@ static irqreturn_t afe4403_trigger_handler(int irq, void *private)
for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->masklength) {
ret = spi_write_then_read(afe->spi,
- &afe4403_reg_info[bit].reg, 1,
+ &afe4403_channel_values[bit], 1,
rx, 3);
if (ret)
goto err;
@@ -472,12 +392,8 @@ static const struct iio_trigger_ops afe4403_trigger_ops = {
static const struct reg_sequence afe4403_reg_sequences[] = {
AFE4403_TIMING_PAIRS,
- { AFE440X_CONTROL1, AFE440X_CONTROL1_TIMEREN | 0x000007},
- { AFE4403_TIA_AMB_GAIN, AFE4403_TIAGAIN_RES_1_M },
- { AFE440X_LEDCNTRL, (0x14 << AFE440X_LEDCNTRL_LED1_SHIFT) |
- (0x14 << AFE440X_LEDCNTRL_LED2_SHIFT) },
- { AFE440X_CONTROL2, AFE440X_CONTROL2_TX_REF_050 <<
- AFE440X_CONTROL2_TX_REF_SHIFT },
+ { AFE440X_CONTROL1, AFE440X_CONTROL1_TIMEREN },
+ { AFE4403_TIAGAIN, AFE440X_TIAGAIN_ENSEPGAIN },
};
static const struct regmap_range afe4403_yes_ranges[] = {
@@ -498,13 +414,11 @@ static const struct regmap_config afe4403_regmap_config = {
.volatile_table = &afe4403_volatile_table,
};
-#ifdef CONFIG_OF
static const struct of_device_id afe4403_of_match[] = {
{ .compatible = "ti,afe4403", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, afe4403_of_match);
-#endif
static int __maybe_unused afe4403_suspend(struct device *dev)
{
@@ -553,7 +467,7 @@ static int afe4403_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
struct afe4403_data *afe;
- int ret;
+ int i, ret;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*afe));
if (!indio_dev)
@@ -572,6 +486,15 @@ static int afe4403_probe(struct spi_device *spi)
return PTR_ERR(afe->regmap);
}
+ for (i = 0; i < F_MAX_FIELDS; i++) {
+ afe->fields[i] = devm_regmap_field_alloc(afe->dev, afe->regmap,
+ afe4403_reg_fields[i]);
+ if (IS_ERR(afe->fields[i])) {
+ dev_err(afe->dev, "Unable to allocate regmap fields\n");
+ return PTR_ERR(afe->fields[i]);
+ }
+ }
+
afe->regulator = devm_regulator_get(afe->dev, "tx_sup");
if (IS_ERR(afe->regulator)) {
dev_err(afe->dev, "Unable to get regulator\n");
@@ -694,7 +617,7 @@ MODULE_DEVICE_TABLE(spi, afe4403_ids);
static struct spi_driver afe4403_spi_driver = {
.driver = {
.name = AFE4403_DRIVER_NAME,
- .of_match_table = of_match_ptr(afe4403_of_match),
+ .of_match_table = afe4403_of_match,
.pm = &afe4403_pm_ops,
},
.probe = afe4403_probe,
@@ -704,5 +627,5 @@ static struct spi_driver afe4403_spi_driver = {
module_spi_driver(afe4403_spi_driver);
MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
-MODULE_DESCRIPTION("TI AFE4403 Heart Rate and Pulse Oximeter");
+MODULE_DESCRIPTION("TI AFE4403 Heart Rate Monitor and Pulse Oximeter AFE");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c
index 5096a4643..45266404f 100644
--- a/drivers/iio/health/afe4404.c
+++ b/drivers/iio/health/afe4404.c
@@ -1,7 +1,7 @@
/*
* AFE4404 Heart Rate Monitors and Low-Cost Pulse Oximeters
*
- * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
* Andrew F. Davis <afd@ti.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -48,118 +48,102 @@
#define AFE4404_AVG_LED2_ALED2VAL 0x3f
#define AFE4404_AVG_LED1_ALED1VAL 0x40
-/* AFE4404 GAIN register fields */
-#define AFE4404_TIA_GAIN_RES_MASK GENMASK(2, 0)
-#define AFE4404_TIA_GAIN_RES_SHIFT 0
-#define AFE4404_TIA_GAIN_CAP_MASK GENMASK(5, 3)
-#define AFE4404_TIA_GAIN_CAP_SHIFT 3
+/* AFE4404 CONTROL2 register fields */
+#define AFE440X_CONTROL2_OSC_ENABLE BIT(9)
-/* AFE4404 LEDCNTRL register fields */
-#define AFE4404_LEDCNTRL_ILED1_MASK GENMASK(5, 0)
-#define AFE4404_LEDCNTRL_ILED1_SHIFT 0
-#define AFE4404_LEDCNTRL_ILED2_MASK GENMASK(11, 6)
-#define AFE4404_LEDCNTRL_ILED2_SHIFT 6
-#define AFE4404_LEDCNTRL_ILED3_MASK GENMASK(17, 12)
-#define AFE4404_LEDCNTRL_ILED3_SHIFT 12
+enum afe4404_fields {
+ /* Gains */
+ F_TIA_GAIN_SEP, F_TIA_CF_SEP,
+ F_TIA_GAIN, TIA_CF,
-/* AFE4404 CONTROL2 register fields */
-#define AFE440X_CONTROL2_ILED_2X_MASK BIT(17)
-#define AFE440X_CONTROL2_ILED_2X_SHIFT 17
-
-/* AFE4404 CONTROL3 register fields */
-#define AFE440X_CONTROL3_OSC_ENABLE BIT(9)
-
-/* AFE4404 OFFDAC register current fields */
-#define AFE4404_OFFDAC_CURR_LED1_MASK GENMASK(9, 5)
-#define AFE4404_OFFDAC_CURR_LED1_SHIFT 5
-#define AFE4404_OFFDAC_CURR_LED2_MASK GENMASK(19, 15)
-#define AFE4404_OFFDAC_CURR_LED2_SHIFT 15
-#define AFE4404_OFFDAC_CURR_LED3_MASK GENMASK(4, 0)
-#define AFE4404_OFFDAC_CURR_LED3_SHIFT 0
-#define AFE4404_OFFDAC_CURR_ALED1_MASK GENMASK(14, 10)
-#define AFE4404_OFFDAC_CURR_ALED1_SHIFT 10
-#define AFE4404_OFFDAC_CURR_ALED2_MASK GENMASK(4, 0)
-#define AFE4404_OFFDAC_CURR_ALED2_SHIFT 0
-
-/* AFE4404 NULL fields */
-#define NULL_MASK 0
-#define NULL_SHIFT 0
-
-/* AFE4404 TIA_GAIN_CAP values */
-#define AFE4404_TIA_GAIN_CAP_5_P 0x0
-#define AFE4404_TIA_GAIN_CAP_2_5_P 0x1
-#define AFE4404_TIA_GAIN_CAP_10_P 0x2
-#define AFE4404_TIA_GAIN_CAP_7_5_P 0x3
-#define AFE4404_TIA_GAIN_CAP_20_P 0x4
-#define AFE4404_TIA_GAIN_CAP_17_5_P 0x5
-#define AFE4404_TIA_GAIN_CAP_25_P 0x6
-#define AFE4404_TIA_GAIN_CAP_22_5_P 0x7
-
-/* AFE4404 TIA_GAIN_RES values */
-#define AFE4404_TIA_GAIN_RES_500_K 0x0
-#define AFE4404_TIA_GAIN_RES_250_K 0x1
-#define AFE4404_TIA_GAIN_RES_100_K 0x2
-#define AFE4404_TIA_GAIN_RES_50_K 0x3
-#define AFE4404_TIA_GAIN_RES_25_K 0x4
-#define AFE4404_TIA_GAIN_RES_10_K 0x5
-#define AFE4404_TIA_GAIN_RES_1_M 0x6
-#define AFE4404_TIA_GAIN_RES_2_M 0x7
+ /* LED Current */
+ F_ILED1, F_ILED2, F_ILED3,
+
+ /* Offset DAC */
+ F_OFFDAC_AMB2, F_OFFDAC_LED1, F_OFFDAC_AMB1, F_OFFDAC_LED2,
+
+ /* sentinel */
+ F_MAX_FIELDS
+};
+
+static const struct reg_field afe4404_reg_fields[] = {
+ /* Gains */
+ [F_TIA_GAIN_SEP] = REG_FIELD(AFE4404_TIA_GAIN_SEP, 0, 2),
+ [F_TIA_CF_SEP] = REG_FIELD(AFE4404_TIA_GAIN_SEP, 3, 5),
+ [F_TIA_GAIN] = REG_FIELD(AFE4404_TIA_GAIN, 0, 2),
+ [TIA_CF] = REG_FIELD(AFE4404_TIA_GAIN, 3, 5),
+ /* LED Current */
+ [F_ILED1] = REG_FIELD(AFE440X_LEDCNTRL, 0, 5),
+ [F_ILED2] = REG_FIELD(AFE440X_LEDCNTRL, 6, 11),
+ [F_ILED3] = REG_FIELD(AFE440X_LEDCNTRL, 12, 17),
+ /* Offset DAC */
+ [F_OFFDAC_AMB2] = REG_FIELD(AFE4404_OFFDAC, 0, 4),
+ [F_OFFDAC_LED1] = REG_FIELD(AFE4404_OFFDAC, 5, 9),
+ [F_OFFDAC_AMB1] = REG_FIELD(AFE4404_OFFDAC, 10, 14),
+ [F_OFFDAC_LED2] = REG_FIELD(AFE4404_OFFDAC, 15, 19),
+};
/**
- * struct afe4404_data
- * @dev - Device structure
- * @regmap - Register map of the device
- * @regulator - Pointer to the regulator for the IC
- * @trig - IIO trigger for this device
- * @irq - ADC_RDY line interrupt number
+ * struct afe4404_data - AFE4404 device instance data
+ * @dev: Device structure
+ * @regmap: Register map of the device
+ * @fields: Register fields of the device
+ * @regulator: Pointer to the regulator for the IC
+ * @trig: IIO trigger for this device
+ * @irq: ADC_RDY line interrupt number
*/
struct afe4404_data {
struct device *dev;
struct regmap *regmap;
+ struct regmap_field *fields[F_MAX_FIELDS];
struct regulator *regulator;
struct iio_trigger *trig;
int irq;
};
enum afe4404_chan_id {
+ LED2 = 1,
+ ALED2,
LED1,
ALED1,
- LED2,
- ALED2,
- LED3,
- LED1_ALED1,
LED2_ALED2,
- ILED1,
- ILED2,
- ILED3,
+ LED1_ALED1,
+};
+
+static const unsigned int afe4404_channel_values[] = {
+ [LED2] = AFE440X_LED2VAL,
+ [ALED2] = AFE440X_ALED2VAL,
+ [LED1] = AFE440X_LED1VAL,
+ [ALED1] = AFE440X_ALED1VAL,
+ [LED2_ALED2] = AFE440X_LED2_ALED2VAL,
+ [LED1_ALED1] = AFE440X_LED1_ALED1VAL,
};
-static const struct afe440x_reg_info afe4404_reg_info[] = {
- [LED1] = AFE440X_REG_INFO(AFE440X_LED1VAL, AFE4404_OFFDAC, AFE4404_OFFDAC_CURR_LED1),
- [ALED1] = AFE440X_REG_INFO(AFE440X_ALED1VAL, AFE4404_OFFDAC, AFE4404_OFFDAC_CURR_ALED1),
- [LED2] = AFE440X_REG_INFO(AFE440X_LED2VAL, AFE4404_OFFDAC, AFE4404_OFFDAC_CURR_LED2),
- [ALED2] = AFE440X_REG_INFO(AFE440X_ALED2VAL, AFE4404_OFFDAC, AFE4404_OFFDAC_CURR_ALED2),
- [LED3] = AFE440X_REG_INFO(AFE440X_ALED2VAL, 0, NULL),
- [LED1_ALED1] = AFE440X_REG_INFO(AFE440X_LED1_ALED1VAL, 0, NULL),
- [LED2_ALED2] = AFE440X_REG_INFO(AFE440X_LED2_ALED2VAL, 0, NULL),
- [ILED1] = AFE440X_REG_INFO(AFE440X_LEDCNTRL, 0, AFE4404_LEDCNTRL_ILED1),
- [ILED2] = AFE440X_REG_INFO(AFE440X_LEDCNTRL, 0, AFE4404_LEDCNTRL_ILED2),
- [ILED3] = AFE440X_REG_INFO(AFE440X_LEDCNTRL, 0, AFE4404_LEDCNTRL_ILED3),
+static const unsigned int afe4404_channel_leds[] = {
+ [LED2] = F_ILED2,
+ [ALED2] = F_ILED3,
+ [LED1] = F_ILED1,
+};
+
+static const unsigned int afe4404_channel_offdacs[] = {
+ [LED2] = F_OFFDAC_LED2,
+ [ALED2] = F_OFFDAC_AMB2,
+ [LED1] = F_OFFDAC_LED1,
+ [ALED1] = F_OFFDAC_AMB1,
};
static const struct iio_chan_spec afe4404_channels[] = {
/* ADC values */
- AFE440X_INTENSITY_CHAN(LED1, "led1", BIT(IIO_CHAN_INFO_OFFSET)),
- AFE440X_INTENSITY_CHAN(ALED1, "led1_ambient", BIT(IIO_CHAN_INFO_OFFSET)),
- AFE440X_INTENSITY_CHAN(LED2, "led2", BIT(IIO_CHAN_INFO_OFFSET)),
- AFE440X_INTENSITY_CHAN(ALED2, "led2_ambient", BIT(IIO_CHAN_INFO_OFFSET)),
- AFE440X_INTENSITY_CHAN(LED3, "led3", BIT(IIO_CHAN_INFO_OFFSET)),
- AFE440X_INTENSITY_CHAN(LED1_ALED1, "led1-led1_ambient", 0),
- AFE440X_INTENSITY_CHAN(LED2_ALED2, "led2-led2_ambient", 0),
+ AFE440X_INTENSITY_CHAN(LED2, BIT(IIO_CHAN_INFO_OFFSET)),
+ AFE440X_INTENSITY_CHAN(ALED2, BIT(IIO_CHAN_INFO_OFFSET)),
+ AFE440X_INTENSITY_CHAN(LED1, BIT(IIO_CHAN_INFO_OFFSET)),
+ AFE440X_INTENSITY_CHAN(ALED1, BIT(IIO_CHAN_INFO_OFFSET)),
+ AFE440X_INTENSITY_CHAN(LED2_ALED2, 0),
+ AFE440X_INTENSITY_CHAN(LED1_ALED1, 0),
/* LED current */
- AFE440X_CURRENT_CHAN(ILED1, "led1"),
- AFE440X_CURRENT_CHAN(ILED2, "led2"),
- AFE440X_CURRENT_CHAN(ILED3, "led3"),
+ AFE440X_CURRENT_CHAN(LED2),
+ AFE440X_CURRENT_CHAN(ALED2),
+ AFE440X_CURRENT_CHAN(LED1),
};
static const struct afe440x_val_table afe4404_res_table[] = {
@@ -172,7 +156,7 @@ static const struct afe440x_val_table afe4404_res_table[] = {
{ .integer = 1000000, .fract = 0 },
{ .integer = 2000000, .fract = 0 },
};
-AFE440X_TABLE_ATTR(tia_resistance_available, afe4404_res_table);
+AFE440X_TABLE_ATTR(in_intensity_resistance_available, afe4404_res_table);
static const struct afe440x_val_table afe4404_cap_table[] = {
{ .integer = 0, .fract = 5000 },
@@ -184,7 +168,7 @@ static const struct afe440x_val_table afe4404_cap_table[] = {
{ .integer = 0, .fract = 25000 },
{ .integer = 0, .fract = 22500 },
};
-AFE440X_TABLE_ATTR(tia_capacitance_available, afe4404_cap_table);
+AFE440X_TABLE_ATTR(in_intensity_capacitance_available, afe4404_cap_table);
static ssize_t afe440x_show_register(struct device *dev,
struct device_attribute *attr,
@@ -193,38 +177,21 @@ static ssize_t afe440x_show_register(struct device *dev,
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct afe4404_data *afe = iio_priv(indio_dev);
struct afe440x_attr *afe440x_attr = to_afe440x_attr(attr);
- unsigned int reg_val, type;
+ unsigned int reg_val;
int vals[2];
- int ret, val_len;
+ int ret;
- ret = regmap_read(afe->regmap, afe440x_attr->reg, &reg_val);
+ ret = regmap_field_read(afe->fields[afe440x_attr->field], &reg_val);
if (ret)
return ret;
- reg_val &= afe440x_attr->mask;
- reg_val >>= afe440x_attr->shift;
-
- switch (afe440x_attr->type) {
- case SIMPLE:
- type = IIO_VAL_INT;
- val_len = 1;
- vals[0] = reg_val;
- break;
- case RESISTANCE:
- case CAPACITANCE:
- type = IIO_VAL_INT_PLUS_MICRO;
- val_len = 2;
- if (reg_val < afe440x_attr->table_size) {
- vals[0] = afe440x_attr->val_table[reg_val].integer;
- vals[1] = afe440x_attr->val_table[reg_val].fract;
- break;
- }
- return -EINVAL;
- default:
+ if (reg_val >= afe440x_attr->table_size)
return -EINVAL;
- }
- return iio_format_value(buf, type, val_len, vals);
+ vals[0] = afe440x_attr->val_table[reg_val].integer;
+ vals[1] = afe440x_attr->val_table[reg_val].fract;
+
+ return iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO, 2, vals);
}
static ssize_t afe440x_store_register(struct device *dev,
@@ -240,48 +207,43 @@ static ssize_t afe440x_store_register(struct device *dev,
if (ret)
return ret;
- switch (afe440x_attr->type) {
- case SIMPLE:
- val = integer;
- break;
- case RESISTANCE:
- case CAPACITANCE:
- for (val = 0; val < afe440x_attr->table_size; val++)
- if (afe440x_attr->val_table[val].integer == integer &&
- afe440x_attr->val_table[val].fract == fract)
- break;
- if (val == afe440x_attr->table_size)
- return -EINVAL;
- break;
- default:
+ for (val = 0; val < afe440x_attr->table_size; val++)
+ if (afe440x_attr->val_table[val].integer == integer &&
+ afe440x_attr->val_table[val].fract == fract)
+ break;
+ if (val == afe440x_attr->table_size)
return -EINVAL;
- }
- ret = regmap_update_bits(afe->regmap, afe440x_attr->reg,
- afe440x_attr->mask,
- (val << afe440x_attr->shift));
+ ret = regmap_field_write(afe->fields[afe440x_attr->field], val);
if (ret)
return ret;
return count;
}
-static AFE440X_ATTR(tia_separate_en, AFE4404_TIA_GAIN_SEP, AFE440X_TIAGAIN_ENSEPGAIN, SIMPLE, NULL, 0);
+static AFE440X_ATTR(in_intensity1_resistance, F_TIA_GAIN_SEP, afe4404_res_table);
+static AFE440X_ATTR(in_intensity1_capacitance, F_TIA_CF_SEP, afe4404_cap_table);
+
+static AFE440X_ATTR(in_intensity2_resistance, F_TIA_GAIN_SEP, afe4404_res_table);
+static AFE440X_ATTR(in_intensity2_capacitance, F_TIA_CF_SEP, afe4404_cap_table);
-static AFE440X_ATTR(tia_resistance1, AFE4404_TIA_GAIN, AFE4404_TIA_GAIN_RES, RESISTANCE, afe4404_res_table, ARRAY_SIZE(afe4404_res_table));
-static AFE440X_ATTR(tia_capacitance1, AFE4404_TIA_GAIN, AFE4404_TIA_GAIN_CAP, CAPACITANCE, afe4404_cap_table, ARRAY_SIZE(afe4404_cap_table));
+static AFE440X_ATTR(in_intensity3_resistance, F_TIA_GAIN, afe4404_res_table);
+static AFE440X_ATTR(in_intensity3_capacitance, TIA_CF, afe4404_cap_table);
-static AFE440X_ATTR(tia_resistance2, AFE4404_TIA_GAIN_SEP, AFE4404_TIA_GAIN_RES, RESISTANCE, afe4404_res_table, ARRAY_SIZE(afe4404_res_table));
-static AFE440X_ATTR(tia_capacitance2, AFE4404_TIA_GAIN_SEP, AFE4404_TIA_GAIN_CAP, CAPACITANCE, afe4404_cap_table, ARRAY_SIZE(afe4404_cap_table));
+static AFE440X_ATTR(in_intensity4_resistance, F_TIA_GAIN, afe4404_res_table);
+static AFE440X_ATTR(in_intensity4_capacitance, TIA_CF, afe4404_cap_table);
static struct attribute *afe440x_attributes[] = {
- &afe440x_attr_tia_separate_en.dev_attr.attr,
- &afe440x_attr_tia_resistance1.dev_attr.attr,
- &afe440x_attr_tia_capacitance1.dev_attr.attr,
- &afe440x_attr_tia_resistance2.dev_attr.attr,
- &afe440x_attr_tia_capacitance2.dev_attr.attr,
- &dev_attr_tia_resistance_available.attr,
- &dev_attr_tia_capacitance_available.attr,
+ &dev_attr_in_intensity_resistance_available.attr,
+ &dev_attr_in_intensity_capacitance_available.attr,
+ &afe440x_attr_in_intensity1_resistance.dev_attr.attr,
+ &afe440x_attr_in_intensity1_capacitance.dev_attr.attr,
+ &afe440x_attr_in_intensity2_resistance.dev_attr.attr,
+ &afe440x_attr_in_intensity2_capacitance.dev_attr.attr,
+ &afe440x_attr_in_intensity3_resistance.dev_attr.attr,
+ &afe440x_attr_in_intensity3_capacitance.dev_attr.attr,
+ &afe440x_attr_in_intensity4_resistance.dev_attr.attr,
+ &afe440x_attr_in_intensity4_capacitance.dev_attr.attr,
NULL
};
@@ -294,35 +256,32 @@ static int afe4404_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct afe4404_data *afe = iio_priv(indio_dev);
- const struct afe440x_reg_info reg_info = afe4404_reg_info[chan->address];
+ unsigned int value_reg = afe4404_channel_values[chan->address];
+ unsigned int led_field = afe4404_channel_leds[chan->address];
+ unsigned int offdac_field = afe4404_channel_offdacs[chan->address];
int ret;
switch (chan->type) {
case IIO_INTENSITY:
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = regmap_read(afe->regmap, reg_info.reg, val);
+ ret = regmap_read(afe->regmap, value_reg, val);
if (ret)
return ret;
return IIO_VAL_INT;
case IIO_CHAN_INFO_OFFSET:
- ret = regmap_read(afe->regmap, reg_info.offreg,
- val);
+ ret = regmap_field_read(afe->fields[offdac_field], val);
if (ret)
return ret;
- *val &= reg_info.mask;
- *val >>= reg_info.shift;
return IIO_VAL_INT;
}
break;
case IIO_CURRENT:
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = regmap_read(afe->regmap, reg_info.reg, val);
+ ret = regmap_field_read(afe->fields[led_field], val);
if (ret)
return ret;
- *val &= reg_info.mask;
- *val >>= reg_info.shift;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
*val = 0;
@@ -342,25 +301,20 @@ static int afe4404_write_raw(struct iio_dev *indio_dev,
int val, int val2, long mask)
{
struct afe4404_data *afe = iio_priv(indio_dev);
- const struct afe440x_reg_info reg_info = afe4404_reg_info[chan->address];
+ unsigned int led_field = afe4404_channel_leds[chan->address];
+ unsigned int offdac_field = afe4404_channel_offdacs[chan->address];
switch (chan->type) {
case IIO_INTENSITY:
switch (mask) {
case IIO_CHAN_INFO_OFFSET:
- return regmap_update_bits(afe->regmap,
- reg_info.offreg,
- reg_info.mask,
- (val << reg_info.shift));
+ return regmap_field_write(afe->fields[offdac_field], val);
}
break;
case IIO_CURRENT:
switch (mask) {
case IIO_CHAN_INFO_RAW:
- return regmap_update_bits(afe->regmap,
- reg_info.reg,
- reg_info.mask,
- (val << reg_info.shift));
+ return regmap_field_write(afe->fields[led_field], val);
}
break;
default:
@@ -387,7 +341,7 @@ static irqreturn_t afe4404_trigger_handler(int irq, void *private)
for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->masklength) {
- ret = regmap_read(afe->regmap, afe4404_reg_info[bit].reg,
+ ret = regmap_read(afe->regmap, afe4404_channel_values[bit],
&buffer[i++]);
if (ret)
goto err;
@@ -443,11 +397,8 @@ static const struct iio_trigger_ops afe4404_trigger_ops = {
static const struct reg_sequence afe4404_reg_sequences[] = {
AFE4404_TIMING_PAIRS,
{ AFE440X_CONTROL1, AFE440X_CONTROL1_TIMEREN },
- { AFE4404_TIA_GAIN, AFE4404_TIA_GAIN_RES_50_K },
- { AFE440X_LEDCNTRL, (0xf << AFE4404_LEDCNTRL_ILED1_SHIFT) |
- (0x3 << AFE4404_LEDCNTRL_ILED2_SHIFT) |
- (0x3 << AFE4404_LEDCNTRL_ILED3_SHIFT) },
- { AFE440X_CONTROL2, AFE440X_CONTROL3_OSC_ENABLE },
+ { AFE4404_TIA_GAIN_SEP, AFE440X_TIAGAIN_ENSEPGAIN },
+ { AFE440X_CONTROL2, AFE440X_CONTROL2_OSC_ENABLE },
};
static const struct regmap_range afe4404_yes_ranges[] = {
@@ -469,13 +420,11 @@ static const struct regmap_config afe4404_regmap_config = {
.volatile_table = &afe4404_volatile_table,
};
-#ifdef CONFIG_OF
static const struct of_device_id afe4404_of_match[] = {
{ .compatible = "ti,afe4404", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, afe4404_of_match);
-#endif
static int __maybe_unused afe4404_suspend(struct device *dev)
{
@@ -525,7 +474,7 @@ static int afe4404_probe(struct i2c_client *client,
{
struct iio_dev *indio_dev;
struct afe4404_data *afe;
- int ret;
+ int i, ret;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*afe));
if (!indio_dev)
@@ -543,6 +492,15 @@ static int afe4404_probe(struct i2c_client *client,
return PTR_ERR(afe->regmap);
}
+ for (i = 0; i < F_MAX_FIELDS; i++) {
+ afe->fields[i] = devm_regmap_field_alloc(afe->dev, afe->regmap,
+ afe4404_reg_fields[i]);
+ if (IS_ERR(afe->fields[i])) {
+ dev_err(afe->dev, "Unable to allocate regmap fields\n");
+ return PTR_ERR(afe->fields[i]);
+ }
+ }
+
afe->regulator = devm_regulator_get(afe->dev, "tx_sup");
if (IS_ERR(afe->regulator)) {
dev_err(afe->dev, "Unable to get regulator\n");
@@ -665,7 +623,7 @@ MODULE_DEVICE_TABLE(i2c, afe4404_ids);
static struct i2c_driver afe4404_i2c_driver = {
.driver = {
.name = AFE4404_DRIVER_NAME,
- .of_match_table = of_match_ptr(afe4404_of_match),
+ .of_match_table = afe4404_of_match,
.pm = &afe4404_pm_ops,
},
.probe = afe4404_probe,
@@ -675,5 +633,5 @@ static struct i2c_driver afe4404_i2c_driver = {
module_i2c_driver(afe4404_i2c_driver);
MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
-MODULE_DESCRIPTION("TI AFE4404 Heart Rate and Pulse Oximeter");
+MODULE_DESCRIPTION("TI AFE4404 Heart Rate Monitor and Pulse Oximeter AFE");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/health/afe440x.h b/drivers/iio/health/afe440x.h
index c671ab78a..1a0f24704 100644
--- a/drivers/iio/health/afe440x.h
+++ b/drivers/iio/health/afe440x.h
@@ -71,8 +71,7 @@
#define AFE440X_CONTROL1_TIMEREN BIT(8)
/* TIAGAIN register fields */
-#define AFE440X_TIAGAIN_ENSEPGAIN_MASK BIT(15)
-#define AFE440X_TIAGAIN_ENSEPGAIN_SHIFT 15
+#define AFE440X_TIAGAIN_ENSEPGAIN BIT(15)
/* CONTROL2 register fields */
#define AFE440X_CONTROL2_PDN_AFE BIT(0)
@@ -89,22 +88,7 @@
#define AFE440X_CONTROL0_WRITE 0x0
#define AFE440X_CONTROL0_READ 0x1
-struct afe440x_reg_info {
- unsigned int reg;
- unsigned int offreg;
- unsigned int shift;
- unsigned int mask;
-};
-
-#define AFE440X_REG_INFO(_reg, _offreg, _sm) \
- { \
- .reg = _reg, \
- .offreg = _offreg, \
- .shift = _sm ## _SHIFT, \
- .mask = _sm ## _MASK, \
- }
-
-#define AFE440X_INTENSITY_CHAN(_index, _name, _mask) \
+#define AFE440X_INTENSITY_CHAN(_index, _mask) \
{ \
.type = IIO_INTENSITY, \
.channel = _index, \
@@ -116,29 +100,23 @@ struct afe440x_reg_info {
.storagebits = 32, \
.endianness = IIO_CPU, \
}, \
- .extend_name = _name, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
_mask, \
+ .indexed = true, \
}
-#define AFE440X_CURRENT_CHAN(_index, _name) \
+#define AFE440X_CURRENT_CHAN(_index) \
{ \
.type = IIO_CURRENT, \
.channel = _index, \
.address = _index, \
- .scan_index = _index, \
- .extend_name = _name, \
+ .scan_index = -1, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
BIT(IIO_CHAN_INFO_SCALE), \
+ .indexed = true, \
.output = true, \
}
-enum afe440x_reg_type {
- SIMPLE,
- RESISTANCE,
- CAPACITANCE,
-};
-
struct afe440x_val_table {
int integer;
int fract;
@@ -164,10 +142,7 @@ static DEVICE_ATTR_RO(_name)
struct afe440x_attr {
struct device_attribute dev_attr;
- unsigned int reg;
- unsigned int shift;
- unsigned int mask;
- enum afe440x_reg_type type;
+ unsigned int field;
const struct afe440x_val_table *val_table;
unsigned int table_size;
};
@@ -175,17 +150,14 @@ struct afe440x_attr {
#define to_afe440x_attr(_dev_attr) \
container_of(_dev_attr, struct afe440x_attr, dev_attr)
-#define AFE440X_ATTR(_name, _reg, _field, _type, _table, _size) \
+#define AFE440X_ATTR(_name, _field, _table) \
struct afe440x_attr afe440x_attr_##_name = { \
.dev_attr = __ATTR(_name, (S_IRUGO | S_IWUSR), \
afe440x_show_register, \
afe440x_store_register), \
- .reg = _reg, \
- .shift = _field ## _SHIFT, \
- .mask = _field ## _MASK, \
- .type = _type, \
+ .field = _field, \
.val_table = _table, \
- .table_size = _size, \
+ .table_size = ARRAY_SIZE(_table), \
}
#endif /* _AFE440X_H */
diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig
index 738a86d9e..d04124345 100644
--- a/drivers/iio/humidity/Kconfig
+++ b/drivers/iio/humidity/Kconfig
@@ -6,6 +6,8 @@ menu "Humidity sensors"
config AM2315
tristate "Aosong AM2315 relative humidity and temperature sensor"
depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
If you say yes here you get support for the Aosong AM2315
relative humidity and ambient temperature sensor.
diff --git a/drivers/iio/humidity/am2315.c b/drivers/iio/humidity/am2315.c
index 0ebced557..ff96b6d0f 100644
--- a/drivers/iio/humidity/am2315.c
+++ b/drivers/iio/humidity/am2315.c
@@ -276,6 +276,7 @@ static const struct i2c_device_id am2315_i2c_id[] = {
{"am2315", 0},
{}
};
+MODULE_DEVICE_TABLE(i2c, am2315_i2c_id);
static const struct acpi_device_id am2315_acpi_id[] = {
{"AOS2315", 0},
diff --git a/drivers/iio/humidity/htu21.c b/drivers/iio/humidity/htu21.c
index 11cbc38b4..0fbbd8c40 100644
--- a/drivers/iio/humidity/htu21.c
+++ b/drivers/iio/humidity/htu21.c
@@ -236,6 +236,7 @@ static const struct i2c_device_id htu21_id[] = {
{"ms8607-humidity", MS8607},
{}
};
+MODULE_DEVICE_TABLE(i2c, htu21_id);
static struct i2c_driver htu21_driver = {
.probe = htu21_probe,
diff --git a/drivers/iio/iio_core.h b/drivers/iio/iio_core.h
index 359883525..4c45488e3 100644
--- a/drivers/iio/iio_core.h
+++ b/drivers/iio/iio_core.h
@@ -79,4 +79,7 @@ void iio_device_unregister_eventset(struct iio_dev *indio_dev);
void iio_device_wakeup_eventset(struct iio_dev *indio_dev);
int iio_event_getfd(struct iio_dev *indio_dev);
+struct iio_event_interface;
+bool iio_event_enabled(const struct iio_event_interface *ev_int);
+
#endif
diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c
index b8a290ec9..e0251b8c1 100644
--- a/drivers/iio/imu/bmi160/bmi160_core.c
+++ b/drivers/iio/imu/bmi160/bmi160_core.c
@@ -20,6 +20,7 @@
#include <linux/iio/triggered_buffer.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/buffer.h>
+#include <linux/iio/sysfs.h>
#include "bmi160.h"
@@ -410,7 +411,8 @@ static irqreturn_t bmi160_trigger_handler(int irq, void *p)
buf[j++] = sample;
}
- iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns());
+ iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
@@ -466,10 +468,36 @@ static int bmi160_write_raw(struct iio_dev *indio_dev,
return 0;
}
+static
+IIO_CONST_ATTR(in_accel_sampling_frequency_available,
+ "0.78125 1.5625 3.125 6.25 12.5 25 50 100 200 400 800 1600");
+static
+IIO_CONST_ATTR(in_anglvel_sampling_frequency_available,
+ "25 50 100 200 400 800 1600 3200");
+static
+IIO_CONST_ATTR(in_accel_scale_available,
+ "0.000598 0.001197 0.002394 0.004788");
+static
+IIO_CONST_ATTR(in_anglvel_scale_available,
+ "0.001065 0.000532 0.000266 0.000133 0.000066");
+
+static struct attribute *bmi160_attrs[] = {
+ &iio_const_attr_in_accel_sampling_frequency_available.dev_attr.attr,
+ &iio_const_attr_in_anglvel_sampling_frequency_available.dev_attr.attr,
+ &iio_const_attr_in_accel_scale_available.dev_attr.attr,
+ &iio_const_attr_in_anglvel_scale_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group bmi160_attrs_group = {
+ .attrs = bmi160_attrs,
+};
+
static const struct iio_info bmi160_info = {
.driver_module = THIS_MODULE,
.read_raw = bmi160_read_raw,
.write_raw = bmi160_write_raw,
+ .attrs = &bmi160_attrs_group,
};
static const char *bmi160_match_acpi_device(struct device *dev)
diff --git a/drivers/iio/imu/inv_mpu6050/Kconfig b/drivers/iio/imu/inv_mpu6050/Kconfig
index f756feecf..5483b2ea7 100644
--- a/drivers/iio/imu/inv_mpu6050/Kconfig
+++ b/drivers/iio/imu/inv_mpu6050/Kconfig
@@ -13,8 +13,8 @@ config INV_MPU6050_I2C
select INV_MPU6050_IIO
select REGMAP_I2C
help
- This driver supports the Invensense MPU6050/6500/9150 motion tracking
- devices over I2C.
+ This driver supports the Invensense MPU6050/6500/9150 and ICM20608
+ motion tracking devices over I2C.
This driver can be built as a module. The module will be called
inv-mpu6050-i2c.
@@ -24,7 +24,7 @@ config INV_MPU6050_SPI
select INV_MPU6050_IIO
select REGMAP_SPI
help
- This driver supports the Invensense MPU6000/6500/9150 motion tracking
- devices over SPI.
+ This driver supports the Invensense MPU6050/6500/9150 and ICM20608
+ motion tracking devices over SPI.
This driver can be built as a module. The module will be called
inv-mpu6050-spi.
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index ee40dae5a..b9fcbf18a 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -113,6 +113,12 @@ static const struct inv_mpu6050_hw hw_info[] = {
.reg = &reg_set_6050,
.config = &chip_config_6050,
},
+ {
+ .whoami = INV_ICM20608_WHOAMI_VALUE,
+ .name = "ICM20608",
+ .reg = &reg_set_6500,
+ .config = &chip_config_6050,
+ },
};
int inv_mpu6050_switch_engine(struct inv_mpu6050_state *st, bool en, u32 mask)
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index e1fd7fa53..19580d1db 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -170,6 +170,7 @@ static const struct i2c_device_id inv_mpu_id[] = {
{"mpu6050", INV_MPU6050},
{"mpu6500", INV_MPU6500},
{"mpu9150", INV_MPU9150},
+ {"icm20608", INV_ICM20608},
{}
};
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index 3bf8544cc..f0e8c5dd9 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -70,6 +70,7 @@ enum inv_devices {
INV_MPU6500,
INV_MPU6000,
INV_MPU9150,
+ INV_ICM20608,
INV_NUM_PARTS
};
@@ -225,6 +226,7 @@ struct inv_mpu6050_state {
#define INV_MPU6050_WHOAMI_VALUE 0x68
#define INV_MPU6500_WHOAMI_VALUE 0x70
#define INV_MPU9150_WHOAMI_VALUE 0x68
+#define INV_ICM20608_WHOAMI_VALUE 0xAF
/* scan element definition */
enum inv_mpu6050_scan {
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
index d0700628e..3a9f3eac9 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
@@ -107,7 +107,7 @@ irqreturn_t inv_mpu6050_irq_handler(int irq, void *p)
struct inv_mpu6050_state *st = iio_priv(indio_dev);
s64 timestamp;
- timestamp = iio_get_time_ns();
+ timestamp = iio_get_time_ns(indio_dev);
kfifo_in_spinlocked(&st->timestamps, &timestamp, 1,
&st->time_stamp_lock);
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
index 190a4a51c..6e6476dfa 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
@@ -82,6 +82,7 @@ static const struct spi_device_id inv_mpu_id[] = {
{"mpu6000", INV_MPU6000},
{"mpu6500", INV_MPU6500},
{"mpu9150", INV_MPU9150},
+ {"icm20608", INV_ICM20608},
{}
};
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 2e6a42758..d2b889918 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -80,6 +80,7 @@ static const char * const iio_chan_type_name_spec[] = {
[IIO_RESISTANCE] = "resistance",
[IIO_PH] = "ph",
[IIO_UVINDEX] = "uvindex",
+ [IIO_ELECTRICALCONDUCTIVITY] = "electricalconductivity",
};
static const char * const iio_modifier_names[] = {
@@ -177,6 +178,86 @@ ssize_t iio_read_const_attr(struct device *dev,
}
EXPORT_SYMBOL(iio_read_const_attr);
+static int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id)
+{
+ int ret;
+ const struct iio_event_interface *ev_int = indio_dev->event_interface;
+
+ ret = mutex_lock_interruptible(&indio_dev->mlock);
+ if (ret)
+ return ret;
+ if ((ev_int && iio_event_enabled(ev_int)) ||
+ iio_buffer_enabled(indio_dev)) {
+ mutex_unlock(&indio_dev->mlock);
+ return -EBUSY;
+ }
+ indio_dev->clock_id = clock_id;
+ mutex_unlock(&indio_dev->mlock);
+
+ return 0;
+}
+
+/**
+ * iio_get_time_ns() - utility function to get a time stamp for events etc
+ * @indio_dev: device
+ */
+s64 iio_get_time_ns(const struct iio_dev *indio_dev)
+{
+ struct timespec tp;
+
+ switch (iio_device_get_clock(indio_dev)) {
+ case CLOCK_REALTIME:
+ ktime_get_real_ts(&tp);
+ break;
+ case CLOCK_MONOTONIC:
+ ktime_get_ts(&tp);
+ break;
+ case CLOCK_MONOTONIC_RAW:
+ getrawmonotonic(&tp);
+ break;
+ case CLOCK_REALTIME_COARSE:
+ tp = current_kernel_time();
+ break;
+ case CLOCK_MONOTONIC_COARSE:
+ tp = get_monotonic_coarse();
+ break;
+ case CLOCK_BOOTTIME:
+ get_monotonic_boottime(&tp);
+ break;
+ case CLOCK_TAI:
+ timekeeping_clocktai(&tp);
+ break;
+ default:
+ BUG();
+ }
+
+ return timespec_to_ns(&tp);
+}
+EXPORT_SYMBOL(iio_get_time_ns);
+
+/**
+ * iio_get_time_res() - utility function to get time stamp clock resolution in
+ * nano seconds.
+ * @indio_dev: device
+ */
+unsigned int iio_get_time_res(const struct iio_dev *indio_dev)
+{
+ switch (iio_device_get_clock(indio_dev)) {
+ case CLOCK_REALTIME:
+ case CLOCK_MONOTONIC:
+ case CLOCK_MONOTONIC_RAW:
+ case CLOCK_BOOTTIME:
+ case CLOCK_TAI:
+ return hrtimer_resolution;
+ case CLOCK_REALTIME_COARSE:
+ case CLOCK_MONOTONIC_COARSE:
+ return LOW_RES_NSEC;
+ default:
+ BUG();
+ }
+}
+EXPORT_SYMBOL(iio_get_time_res);
+
static int __init iio_init(void)
{
int ret;
@@ -988,11 +1069,91 @@ static ssize_t iio_show_dev_name(struct device *dev,
static DEVICE_ATTR(name, S_IRUGO, iio_show_dev_name, NULL);
+static ssize_t iio_show_timestamp_clock(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ const struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ const clockid_t clk = iio_device_get_clock(indio_dev);
+ const char *name;
+ ssize_t sz;
+
+ switch (clk) {
+ case CLOCK_REALTIME:
+ name = "realtime\n";
+ sz = sizeof("realtime\n");
+ break;
+ case CLOCK_MONOTONIC:
+ name = "monotonic\n";
+ sz = sizeof("monotonic\n");
+ break;
+ case CLOCK_MONOTONIC_RAW:
+ name = "monotonic_raw\n";
+ sz = sizeof("monotonic_raw\n");
+ break;
+ case CLOCK_REALTIME_COARSE:
+ name = "realtime_coarse\n";
+ sz = sizeof("realtime_coarse\n");
+ break;
+ case CLOCK_MONOTONIC_COARSE:
+ name = "monotonic_coarse\n";
+ sz = sizeof("monotonic_coarse\n");
+ break;
+ case CLOCK_BOOTTIME:
+ name = "boottime\n";
+ sz = sizeof("boottime\n");
+ break;
+ case CLOCK_TAI:
+ name = "tai\n";
+ sz = sizeof("tai\n");
+ break;
+ default:
+ BUG();
+ }
+
+ memcpy(buf, name, sz);
+ return sz;
+}
+
+static ssize_t iio_store_timestamp_clock(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ clockid_t clk;
+ int ret;
+
+ if (sysfs_streq(buf, "realtime"))
+ clk = CLOCK_REALTIME;
+ else if (sysfs_streq(buf, "monotonic"))
+ clk = CLOCK_MONOTONIC;
+ else if (sysfs_streq(buf, "monotonic_raw"))
+ clk = CLOCK_MONOTONIC_RAW;
+ else if (sysfs_streq(buf, "realtime_coarse"))
+ clk = CLOCK_REALTIME_COARSE;
+ else if (sysfs_streq(buf, "monotonic_coarse"))
+ clk = CLOCK_MONOTONIC_COARSE;
+ else if (sysfs_streq(buf, "boottime"))
+ clk = CLOCK_BOOTTIME;
+ else if (sysfs_streq(buf, "tai"))
+ clk = CLOCK_TAI;
+ else
+ return -EINVAL;
+
+ ret = iio_device_set_clock(dev_to_iio_dev(dev), clk);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static DEVICE_ATTR(current_timestamp_clock, S_IRUGO | S_IWUSR,
+ iio_show_timestamp_clock, iio_store_timestamp_clock);
+
static int iio_device_register_sysfs(struct iio_dev *indio_dev)
{
int i, ret = 0, attrcount, attrn, attrcount_orig = 0;
struct iio_dev_attr *p;
- struct attribute **attr;
+ struct attribute **attr, *clk = NULL;
/* First count elements in any existing group */
if (indio_dev->info->attrs) {
@@ -1007,16 +1168,25 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev)
*/
if (indio_dev->channels)
for (i = 0; i < indio_dev->num_channels; i++) {
- ret = iio_device_add_channel_sysfs(indio_dev,
- &indio_dev
- ->channels[i]);
+ const struct iio_chan_spec *chan =
+ &indio_dev->channels[i];
+
+ if (chan->type == IIO_TIMESTAMP)
+ clk = &dev_attr_current_timestamp_clock.attr;
+
+ ret = iio_device_add_channel_sysfs(indio_dev, chan);
if (ret < 0)
goto error_clear_attrs;
attrcount += ret;
}
+ if (indio_dev->event_interface)
+ clk = &dev_attr_current_timestamp_clock.attr;
+
if (indio_dev->name)
attrcount++;
+ if (clk)
+ attrcount++;
indio_dev->chan_attr_group.attrs = kcalloc(attrcount + 1,
sizeof(indio_dev->chan_attr_group.attrs[0]),
@@ -1037,6 +1207,8 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev)
indio_dev->chan_attr_group.attrs[attrn++] = &p->dev_attr.attr;
if (indio_dev->name)
indio_dev->chan_attr_group.attrs[attrn++] = &dev_attr_name.attr;
+ if (clk)
+ indio_dev->chan_attr_group.attrs[attrn++] = clk;
indio_dev->groups[indio_dev->groupcounter++] =
&indio_dev->chan_attr_group;
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index cae332b1d..0ebfc923a 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -44,6 +44,11 @@ struct iio_event_interface {
struct mutex read_lock;
};
+bool iio_event_enabled(const struct iio_event_interface *ev_int)
+{
+ return !!test_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
+}
+
/**
* iio_push_event() - try to add event to the list for userspace reading
* @indio_dev: IIO device structure
@@ -60,7 +65,7 @@ int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
int copied;
/* Does anyone care? */
- if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
+ if (iio_event_enabled(ev_int)) {
ev.id = ev_code;
ev.timestamp = timestamp;
@@ -180,8 +185,14 @@ int iio_event_getfd(struct iio_dev *indio_dev)
if (ev_int == NULL)
return -ENODEV;
- if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags))
- return -EBUSY;
+ fd = mutex_lock_interruptible(&indio_dev->mlock);
+ if (fd)
+ return fd;
+
+ if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
+ fd = -EBUSY;
+ goto unlock;
+ }
iio_device_get(indio_dev);
@@ -194,6 +205,8 @@ int iio_event_getfd(struct iio_dev *indio_dev)
kfifo_reset_out(&ev_int->det_events);
}
+unlock:
+ mutex_unlock(&indio_dev->mlock);
return fd;
}
diff --git a/drivers/iio/industrialio-sw-device.c b/drivers/iio/industrialio-sw-device.c
new file mode 100644
index 000000000..81b49cfca
--- /dev/null
+++ b/drivers/iio/industrialio-sw-device.c
@@ -0,0 +1,182 @@
+/*
+ * The Industrial I/O core, software IIO devices functions
+ *
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kmod.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include <linux/iio/sw_device.h>
+#include <linux/iio/configfs.h>
+#include <linux/configfs.h>
+
+static struct config_group *iio_devices_group;
+static struct config_item_type iio_device_type_group_type;
+
+static struct config_item_type iio_devices_group_type = {
+ .ct_owner = THIS_MODULE,
+};
+
+static LIST_HEAD(iio_device_types_list);
+static DEFINE_MUTEX(iio_device_types_lock);
+
+static
+struct iio_sw_device_type *__iio_find_sw_device_type(const char *name,
+ unsigned len)
+{
+ struct iio_sw_device_type *d = NULL, *iter;
+
+ list_for_each_entry(iter, &iio_device_types_list, list)
+ if (!strcmp(iter->name, name)) {
+ d = iter;
+ break;
+ }
+
+ return d;
+}
+
+int iio_register_sw_device_type(struct iio_sw_device_type *d)
+{
+ struct iio_sw_device_type *iter;
+ int ret = 0;
+
+ mutex_lock(&iio_device_types_lock);
+ iter = __iio_find_sw_device_type(d->name, strlen(d->name));
+ if (iter)
+ ret = -EBUSY;
+ else
+ list_add_tail(&d->list, &iio_device_types_list);
+ mutex_unlock(&iio_device_types_lock);
+
+ if (ret)
+ return ret;
+
+ d->group = configfs_register_default_group(iio_devices_group, d->name,
+ &iio_device_type_group_type);
+ if (IS_ERR(d->group))
+ ret = PTR_ERR(d->group);
+
+ return ret;
+}
+EXPORT_SYMBOL(iio_register_sw_device_type);
+
+void iio_unregister_sw_device_type(struct iio_sw_device_type *dt)
+{
+ struct iio_sw_device_type *iter;
+
+ mutex_lock(&iio_device_types_lock);
+ iter = __iio_find_sw_device_type(dt->name, strlen(dt->name));
+ if (iter)
+ list_del(&dt->list);
+ mutex_unlock(&iio_device_types_lock);
+
+ configfs_unregister_default_group(dt->group);
+}
+EXPORT_SYMBOL(iio_unregister_sw_device_type);
+
+static
+struct iio_sw_device_type *iio_get_sw_device_type(const char *name)
+{
+ struct iio_sw_device_type *dt;
+
+ mutex_lock(&iio_device_types_lock);
+ dt = __iio_find_sw_device_type(name, strlen(name));
+ if (dt && !try_module_get(dt->owner))
+ dt = NULL;
+ mutex_unlock(&iio_device_types_lock);
+
+ return dt;
+}
+
+struct iio_sw_device *iio_sw_device_create(const char *type, const char *name)
+{
+ struct iio_sw_device *d;
+ struct iio_sw_device_type *dt;
+
+ dt = iio_get_sw_device_type(type);
+ if (!dt) {
+ pr_err("Invalid device type: %s\n", type);
+ return ERR_PTR(-EINVAL);
+ }
+ d = dt->ops->probe(name);
+ if (IS_ERR(d))
+ goto out_module_put;
+
+ d->device_type = dt;
+
+ return d;
+out_module_put:
+ module_put(dt->owner);
+ return d;
+}
+EXPORT_SYMBOL(iio_sw_device_create);
+
+void iio_sw_device_destroy(struct iio_sw_device *d)
+{
+ struct iio_sw_device_type *dt = d->device_type;
+
+ dt->ops->remove(d);
+ module_put(dt->owner);
+}
+EXPORT_SYMBOL(iio_sw_device_destroy);
+
+static struct config_group *device_make_group(struct config_group *group,
+ const char *name)
+{
+ struct iio_sw_device *d;
+
+ d = iio_sw_device_create(group->cg_item.ci_name, name);
+ if (IS_ERR(d))
+ return ERR_CAST(d);
+
+ config_item_set_name(&d->group.cg_item, "%s", name);
+
+ return &d->group;
+}
+
+static void device_drop_group(struct config_group *group,
+ struct config_item *item)
+{
+ struct iio_sw_device *d = to_iio_sw_device(item);
+
+ iio_sw_device_destroy(d);
+ config_item_put(item);
+}
+
+static struct configfs_group_operations device_ops = {
+ .make_group = &device_make_group,
+ .drop_item = &device_drop_group,
+};
+
+static struct config_item_type iio_device_type_group_type = {
+ .ct_group_ops = &device_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static int __init iio_sw_device_init(void)
+{
+ iio_devices_group =
+ configfs_register_default_group(&iio_configfs_subsys.su_group,
+ "devices",
+ &iio_devices_group_type);
+ return PTR_ERR_OR_ZERO(iio_devices_group);
+}
+module_init(iio_sw_device_init);
+
+static void __exit iio_sw_device_exit(void)
+{
+ configfs_unregister_default_group(iio_devices_group);
+}
+module_exit(iio_sw_device_exit);
+
+MODULE_AUTHOR("Daniel Baluta <daniel.baluta@intel.com>");
+MODULE_DESCRIPTION("Industrial I/O software devices support");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index 0c52dfe64..7ad82fdd3 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -64,10 +64,16 @@ static struct attribute *iio_trig_dev_attrs[] = {
};
ATTRIBUTE_GROUPS(iio_trig_dev);
+static struct iio_trigger *__iio_trigger_find_by_name(const char *name);
+
int iio_trigger_register(struct iio_trigger *trig_info)
{
int ret;
+ /* trig_info->ops is required for the module member */
+ if (!trig_info->ops)
+ return -EINVAL;
+
trig_info->id = ida_simple_get(&iio_trigger_ida, 0, 0, GFP_KERNEL);
if (trig_info->id < 0)
return trig_info->id;
@@ -82,11 +88,19 @@ int iio_trigger_register(struct iio_trigger *trig_info)
/* Add to list of available triggers held by the IIO core */
mutex_lock(&iio_trigger_list_lock);
+ if (__iio_trigger_find_by_name(trig_info->name)) {
+ pr_err("Duplicate trigger name '%s'\n", trig_info->name);
+ ret = -EEXIST;
+ goto error_device_del;
+ }
list_add_tail(&trig_info->list, &iio_trigger_list);
mutex_unlock(&iio_trigger_list_lock);
return 0;
+error_device_del:
+ mutex_unlock(&iio_trigger_list_lock);
+ device_del(&trig_info->dev);
error_unregister_id:
ida_simple_remove(&iio_trigger_ida, trig_info->id);
return ret;
@@ -105,6 +119,18 @@ void iio_trigger_unregister(struct iio_trigger *trig_info)
}
EXPORT_SYMBOL(iio_trigger_unregister);
+/* Search for trigger by name, assuming iio_trigger_list_lock held */
+static struct iio_trigger *__iio_trigger_find_by_name(const char *name)
+{
+ struct iio_trigger *iter;
+
+ list_for_each_entry(iter, &iio_trigger_list, list)
+ if (!strcmp(iter->name, name))
+ return iter;
+
+ return NULL;
+}
+
static struct iio_trigger *iio_trigger_find_by_name(const char *name,
size_t len)
{
@@ -164,8 +190,7 @@ EXPORT_SYMBOL(iio_trigger_poll_chained);
void iio_trigger_notify_done(struct iio_trigger *trig)
{
- if (atomic_dec_and_test(&trig->use_count) && trig->ops &&
- trig->ops->try_reenable)
+ if (atomic_dec_and_test(&trig->use_count) && trig->ops->try_reenable)
if (trig->ops->try_reenable(trig))
/* Missed an interrupt so launch new poll now */
iio_trigger_poll(trig);
@@ -224,7 +249,7 @@ static int iio_trigger_attach_poll_func(struct iio_trigger *trig,
goto out_put_irq;
/* Enable trigger in driver */
- if (trig->ops && trig->ops->set_trigger_state && notinuse) {
+ if (trig->ops->set_trigger_state && notinuse) {
ret = trig->ops->set_trigger_state(trig, true);
if (ret < 0)
goto out_free_irq;
@@ -249,7 +274,7 @@ static int iio_trigger_detach_poll_func(struct iio_trigger *trig,
= (bitmap_weight(trig->pool,
CONFIG_IIO_CONSUMERS_PER_TRIGGER)
== 1);
- if (trig->ops && trig->ops->set_trigger_state && no_other_users) {
+ if (trig->ops->set_trigger_state && no_other_users) {
ret = trig->ops->set_trigger_state(trig, false);
if (ret)
return ret;
@@ -264,7 +289,7 @@ static int iio_trigger_detach_poll_func(struct iio_trigger *trig,
irqreturn_t iio_pollfunc_store_time(int irq, void *p)
{
struct iio_poll_func *pf = p;
- pf->timestamp = iio_get_time_ns();
+ pf->timestamp = iio_get_time_ns(pf->indio_dev);
return IRQ_WAKE_THREAD;
}
EXPORT_SYMBOL(iio_pollfunc_store_time);
@@ -371,7 +396,7 @@ static ssize_t iio_trigger_write_current(struct device *dev,
return ret;
}
- if (trig && trig->ops && trig->ops->validate_device) {
+ if (trig && trig->ops->validate_device) {
ret = trig->ops->validate_device(trig, indio_dev);
if (ret)
return ret;
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index 7c566f516..357494518 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -76,7 +76,6 @@ config BH1750
config BH1780
tristate "ROHM BH1780 ambient light sensor"
depends on I2C
- depends on !SENSORS_BH1780
help
Say Y here to build support for the ROHM BH1780GLI ambient
light sensor.
@@ -238,6 +237,8 @@ config MAX44000
tristate "MAX44000 Ambient and Infrared Proximity Sensor"
depends on I2C
select REGMAP_I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
Say Y here if you want to build support for Maxim Integrated's
MAX44000 ambient and infrared proximity sensor device.
diff --git a/drivers/iio/light/acpi-als.c b/drivers/iio/light/acpi-als.c
index 53201d99a..f0b47c501 100644
--- a/drivers/iio/light/acpi-als.c
+++ b/drivers/iio/light/acpi-als.c
@@ -118,7 +118,7 @@ static void acpi_als_notify(struct acpi_device *device, u32 event)
struct iio_dev *indio_dev = acpi_driver_data(device);
struct acpi_als *als = iio_priv(indio_dev);
s32 *buffer = als->evt_buffer;
- s64 time_ns = iio_get_time_ns();
+ s64 time_ns = iio_get_time_ns(indio_dev);
s32 val;
int ret;
diff --git a/drivers/iio/light/adjd_s311.c b/drivers/iio/light/adjd_s311.c
index 09ad5f1ce..0113fc843 100644
--- a/drivers/iio/light/adjd_s311.c
+++ b/drivers/iio/light/adjd_s311.c
@@ -118,7 +118,7 @@ static irqreturn_t adjd_s311_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct adjd_s311_data *data = iio_priv(indio_dev);
- s64 time_ns = iio_get_time_ns();
+ s64 time_ns = iio_get_time_ns(indio_dev);
int i, j = 0;
int ret = adjd_s311_req_data(indio_dev);
diff --git a/drivers/iio/light/apds9300.c b/drivers/iio/light/apds9300.c
index e1b9fa5a7..649b26f67 100644
--- a/drivers/iio/light/apds9300.c
+++ b/drivers/iio/light/apds9300.c
@@ -396,7 +396,7 @@ static irqreturn_t apds9300_interrupt_handler(int irq, void *private)
IIO_UNMOD_EVENT_CODE(IIO_INTENSITY, 0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_EITHER),
- iio_get_time_ns());
+ iio_get_time_ns(dev_info));
apds9300_clear_intr(data);
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
index 651d57b8a..a4304edc3 100644
--- a/drivers/iio/light/apds9960.c
+++ b/drivers/iio/light/apds9960.c
@@ -807,7 +807,7 @@ static irqreturn_t apds9960_interrupt_handler(int irq, void *private)
IIO_UNMOD_EVENT_CODE(IIO_INTENSITY, 0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_EITHER),
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
regmap_write(data->regmap, APDS9960_REG_CICLEAR, 1);
}
@@ -816,7 +816,7 @@ static irqreturn_t apds9960_interrupt_handler(int irq, void *private)
IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_EITHER),
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
regmap_write(data->regmap, APDS9960_REG_PICLEAR, 1);
}
diff --git a/drivers/iio/light/cm36651.c b/drivers/iio/light/cm36651.c
index c8d7b5ea7..9d66e89c5 100644
--- a/drivers/iio/light/cm36651.c
+++ b/drivers/iio/light/cm36651.c
@@ -268,7 +268,7 @@ static irqreturn_t cm36651_irq_handler(int irq, void *data)
CM36651_CMD_READ_RAW_PROXIMITY,
IIO_EV_TYPE_THRESH, ev_dir);
- iio_push_event(indio_dev, ev_code, iio_get_time_ns());
+ iio_push_event(indio_dev, ev_code, iio_get_time_ns(indio_dev));
return IRQ_HANDLED;
}
diff --git a/drivers/iio/light/gp2ap020a00f.c b/drivers/iio/light/gp2ap020a00f.c
index 6d41086f7..6ada9149f 100644
--- a/drivers/iio/light/gp2ap020a00f.c
+++ b/drivers/iio/light/gp2ap020a00f.c
@@ -851,7 +851,7 @@ static irqreturn_t gp2ap020a00f_prox_sensing_handler(int irq, void *data)
GP2AP020A00F_SCAN_MODE_PROXIMITY,
IIO_EV_TYPE_ROC,
IIO_EV_DIR_RISING),
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
} else {
iio_push_event(indio_dev,
IIO_UNMOD_EVENT_CODE(
@@ -859,7 +859,7 @@ static irqreturn_t gp2ap020a00f_prox_sensing_handler(int irq, void *data)
GP2AP020A00F_SCAN_MODE_PROXIMITY,
IIO_EV_TYPE_ROC,
IIO_EV_DIR_FALLING),
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
}
}
@@ -925,7 +925,7 @@ static irqreturn_t gp2ap020a00f_thresh_event_handler(int irq, void *data)
IIO_MOD_LIGHT_CLEAR,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_RISING),
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
}
if (test_bit(GP2AP020A00F_FLAG_ALS_FALLING_EV, &priv->flags)) {
@@ -939,7 +939,7 @@ static irqreturn_t gp2ap020a00f_thresh_event_handler(int irq, void *data)
IIO_MOD_LIGHT_CLEAR,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_FALLING),
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
}
}
@@ -1287,22 +1287,14 @@ static int gp2ap020a00f_read_raw(struct iio_dev *indio_dev,
struct gp2ap020a00f_data *data = iio_priv(indio_dev);
int err = -EINVAL;
- mutex_lock(&data->lock);
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- if (iio_buffer_enabled(indio_dev)) {
- err = -EBUSY;
- goto error_unlock;
- }
+ if (mask == IIO_CHAN_INFO_RAW) {
+ err = iio_device_claim_direct_mode(indio_dev);
+ if (err)
+ return err;
err = gp2ap020a00f_read_channel(data, chan, val);
- break;
+ iio_device_release_direct_mode(indio_dev);
}
-
-error_unlock:
- mutex_unlock(&data->lock);
-
return err < 0 ? err : IIO_VAL_INT;
}
diff --git a/drivers/iio/light/isl29125.c b/drivers/iio/light/isl29125.c
index e2945a20e..1d2c0c8a1 100644
--- a/drivers/iio/light/isl29125.c
+++ b/drivers/iio/light/isl29125.c
@@ -44,13 +44,15 @@
#define ISL29125_MODE_B 0x3
#define ISL29125_MODE_RGB 0x5
+#define ISL29125_SENSING_RANGE_0 5722 /* 375 lux full range */
+#define ISL29125_SENSING_RANGE_1 152590 /* 10k lux full range */
+
#define ISL29125_MODE_RANGE BIT(3)
#define ISL29125_STATUS_CONV BIT(1)
struct isl29125_data {
struct i2c_client *client;
- struct mutex lock;
u8 conf1;
u16 buffer[8]; /* 3x 16-bit, padding, 8 bytes timestamp */
};
@@ -128,11 +130,11 @@ static int isl29125_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- if (iio_buffer_enabled(indio_dev))
- return -EBUSY;
- mutex_lock(&data->lock);
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
ret = isl29125_read_data(data, chan->scan_index);
- mutex_unlock(&data->lock);
+ iio_device_release_direct_mode(indio_dev);
if (ret < 0)
return ret;
*val = ret;
@@ -140,9 +142,9 @@ static int isl29125_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_SCALE:
*val = 0;
if (data->conf1 & ISL29125_MODE_RANGE)
- *val2 = 152590; /* 10k lux full range */
+ *val2 = ISL29125_SENSING_RANGE_1; /*10k lux full range*/
else
- *val2 = 5722; /* 375 lux full range */
+ *val2 = ISL29125_SENSING_RANGE_0; /*375 lux full range*/
return IIO_VAL_INT_PLUS_MICRO;
}
return -EINVAL;
@@ -158,9 +160,9 @@ static int isl29125_write_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_SCALE:
if (val != 0)
return -EINVAL;
- if (val2 == 152590)
+ if (val2 == ISL29125_SENSING_RANGE_1)
data->conf1 |= ISL29125_MODE_RANGE;
- else if (val2 == 5722)
+ else if (val2 == ISL29125_SENSING_RANGE_0)
data->conf1 &= ~ISL29125_MODE_RANGE;
else
return -EINVAL;
@@ -189,7 +191,7 @@ static irqreturn_t isl29125_trigger_handler(int irq, void *p)
}
iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
@@ -259,7 +261,6 @@ static int isl29125_probe(struct i2c_client *client,
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
data->client = client;
- mutex_init(&data->lock);
indio_dev->dev.parent = &client->dev;
indio_dev->info = &isl29125_info;
diff --git a/drivers/iio/light/jsa1212.c b/drivers/iio/light/jsa1212.c
index 99a62816c..e8a8931b4 100644
--- a/drivers/iio/light/jsa1212.c
+++ b/drivers/iio/light/jsa1212.c
@@ -325,9 +325,6 @@ static int jsa1212_probe(struct i2c_client *client,
struct regmap *regmap;
int ret;
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
- return -EOPNOTSUPP;
-
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
diff --git a/drivers/iio/light/lm3533-als.c b/drivers/iio/light/lm3533-als.c
index e56937c40..f409c2047 100644
--- a/drivers/iio/light/lm3533-als.c
+++ b/drivers/iio/light/lm3533-als.c
@@ -267,7 +267,7 @@ static irqreturn_t lm3533_als_isr(int irq, void *dev_id)
0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_EITHER),
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
out:
return IRQ_HANDLED;
}
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index 6bf89d8f3..3afc53a3d 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -1256,7 +1256,8 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p)
buf[j++] = psdata & LTR501_PS_DATA_MASK;
}
- iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns());
+ iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
@@ -1282,14 +1283,14 @@ static irqreturn_t ltr501_interrupt_handler(int irq, void *private)
IIO_UNMOD_EVENT_CODE(IIO_INTENSITY, 0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_EITHER),
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
if (status & LTR501_STATUS_PS_INTR)
iio_push_event(indio_dev,
IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_EITHER),
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
return IRQ_HANDLED;
}
diff --git a/drivers/iio/light/max44000.c b/drivers/iio/light/max44000.c
index f17cb2ea1..6511b20a2 100644
--- a/drivers/iio/light/max44000.c
+++ b/drivers/iio/light/max44000.c
@@ -511,7 +511,8 @@ static irqreturn_t max44000_trigger_handler(int irq, void *p)
}
mutex_unlock(&data->lock);
- iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns());
+ iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ iio_get_time_ns(indio_dev));
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
diff --git a/drivers/iio/light/opt3001.c b/drivers/iio/light/opt3001.c
index b776c8ed4..78c9b3a64 100644
--- a/drivers/iio/light/opt3001.c
+++ b/drivers/iio/light/opt3001.c
@@ -713,13 +713,13 @@ static irqreturn_t opt3001_irq(int irq, void *_iio)
IIO_UNMOD_EVENT_CODE(IIO_LIGHT, 0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_RISING),
- iio_get_time_ns());
+ iio_get_time_ns(iio));
if (ret & OPT3001_CONFIGURATION_FL)
iio_push_event(iio,
IIO_UNMOD_EVENT_CODE(IIO_LIGHT, 0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_FALLING),
- iio_get_time_ns());
+ iio_get_time_ns(iio));
} else if (ret & OPT3001_CONFIGURATION_CRF) {
ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_RESULT);
if (ret < 0) {
diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c
index 9e847f8f4..45cf8b0a4 100644
--- a/drivers/iio/light/stk3310.c
+++ b/drivers/iio/light/stk3310.c
@@ -528,7 +528,7 @@ static irqreturn_t stk3310_irq_handler(int irq, void *private)
struct iio_dev *indio_dev = private;
struct stk3310_data *data = iio_priv(indio_dev);
- data->timestamp = iio_get_time_ns();
+ data->timestamp = iio_get_time_ns(indio_dev);
return IRQ_WAKE_THREAD;
}
diff --git a/drivers/iio/light/tcs3414.c b/drivers/iio/light/tcs3414.c
index f90f8c591..a795afb76 100644
--- a/drivers/iio/light/tcs3414.c
+++ b/drivers/iio/light/tcs3414.c
@@ -53,7 +53,6 @@
struct tcs3414_data {
struct i2c_client *client;
- struct mutex lock;
u8 control;
u8 gain;
u8 timing;
@@ -134,16 +133,16 @@ static int tcs3414_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- if (iio_buffer_enabled(indio_dev))
- return -EBUSY;
- mutex_lock(&data->lock);
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
ret = tcs3414_req_data(data);
if (ret < 0) {
- mutex_unlock(&data->lock);
+ iio_device_release_direct_mode(indio_dev);
return ret;
}
ret = i2c_smbus_read_word_data(data->client, chan->address);
- mutex_unlock(&data->lock);
+ iio_device_release_direct_mode(indio_dev);
if (ret < 0)
return ret;
*val = ret;
@@ -217,7 +216,7 @@ static irqreturn_t tcs3414_trigger_handler(int irq, void *p)
}
iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
@@ -288,7 +287,6 @@ static int tcs3414_probe(struct i2c_client *client,
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
data->client = client;
- mutex_init(&data->lock);
indio_dev->dev.parent = &client->dev;
indio_dev->info = &tcs3414_info;
diff --git a/drivers/iio/light/tcs3472.c b/drivers/iio/light/tcs3472.c
index 1b530bf04..3aa71e34a 100644
--- a/drivers/iio/light/tcs3472.c
+++ b/drivers/iio/light/tcs3472.c
@@ -52,7 +52,6 @@
struct tcs3472_data {
struct i2c_client *client;
- struct mutex lock;
u8 enable;
u8 control;
u8 atime;
@@ -117,17 +116,16 @@ static int tcs3472_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- if (iio_buffer_enabled(indio_dev))
- return -EBUSY;
-
- mutex_lock(&data->lock);
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
ret = tcs3472_req_data(data);
if (ret < 0) {
- mutex_unlock(&data->lock);
+ iio_device_release_direct_mode(indio_dev);
return ret;
}
ret = i2c_smbus_read_word_data(data->client, chan->address);
- mutex_unlock(&data->lock);
+ iio_device_release_direct_mode(indio_dev);
if (ret < 0)
return ret;
*val = ret;
@@ -204,7 +202,7 @@ static irqreturn_t tcs3472_trigger_handler(int irq, void *p)
}
iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
@@ -263,7 +261,6 @@ static int tcs3472_probe(struct i2c_client *client,
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
data->client = client;
- mutex_init(&data->lock);
indio_dev->dev.parent = &client->dev;
indio_dev->info = &tcs3472_info;
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
index 57b108c30..04598ae99 100644
--- a/drivers/iio/light/tsl2563.c
+++ b/drivers/iio/light/tsl2563.c
@@ -630,7 +630,7 @@ static irqreturn_t tsl2563_event_handler(int irq, void *private)
0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_EITHER),
- iio_get_time_ns());
+ iio_get_time_ns(dev_info));
/* clear the interrupt and push the event */
i2c_smbus_write_byte(chip->client, TSL2563_CMD | TSL2563_CLEARINT);
diff --git a/drivers/iio/light/us5182d.c b/drivers/iio/light/us5182d.c
index 45bc2f742..20c40f780 100644
--- a/drivers/iio/light/us5182d.c
+++ b/drivers/iio/light/us5182d.c
@@ -833,7 +833,7 @@ static irqreturn_t us5182d_irq_thread_handler(int irq, void *private)
dir = ret & US5182D_CFG0_PROX ? IIO_EV_DIR_RISING : IIO_EV_DIR_FALLING;
ev = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 1, IIO_EV_TYPE_THRESH, dir);
- iio_push_event(indio_dev, ev, iio_get_time_ns());
+ iio_push_event(indio_dev, ev, iio_get_time_ns(indio_dev));
ret = i2c_smbus_write_byte_data(data->client, US5182D_REG_CFG0,
ret & ~US5182D_CFG0_PX_IRQ);
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index 84e6559cc..1f842abcb 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -44,6 +44,7 @@ config BMC150_MAGN_I2C
This driver is only implementing magnetometer part, which has
its own address and register map.
+ This driver also supports I2C Bosch BMC156 and BMM150 chips.
To compile this driver as a module, choose M here: the module will be
called bmc150_magn_i2c.
@@ -60,6 +61,7 @@ config BMC150_MAGN_SPI
This driver is only implementing magnetometer part, which has
its own address and register map.
+ This driver also supports SPI Bosch BMC156 and BMM150 chips.
To compile this driver as a module, choose M here: the module will be
called bmc150_magn_spi.
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index 609a2c401..af8606cc7 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -33,6 +33,7 @@
#include <linux/of_gpio.h>
#include <linux/acpi.h>
#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -379,37 +380,40 @@ struct ak8975_data {
u8 cntl_cache;
struct iio_mount_matrix orientation;
struct regulator *vdd;
+ struct regulator *vid;
};
/* Enable attached power regulator if any. */
-static int ak8975_power_on(struct i2c_client *client)
+static int ak8975_power_on(const struct ak8975_data *data)
{
- const struct iio_dev *indio_dev = i2c_get_clientdata(client);
- struct ak8975_data *data = iio_priv(indio_dev);
int ret;
- data->vdd = devm_regulator_get(&client->dev, "vdd");
- if (IS_ERR_OR_NULL(data->vdd)) {
- ret = PTR_ERR(data->vdd);
- if (ret == -ENODEV)
- ret = 0;
- } else {
- ret = regulator_enable(data->vdd);
+ ret = regulator_enable(data->vdd);
+ if (ret) {
+ dev_warn(&data->client->dev,
+ "Failed to enable specified Vdd supply\n");
+ return ret;
}
-
- if (ret)
- dev_err(&client->dev, "failed to enable Vdd supply: %d\n", ret);
- return ret;
+ ret = regulator_enable(data->vid);
+ if (ret) {
+ dev_warn(&data->client->dev,
+ "Failed to enable specified Vid supply\n");
+ return ret;
+ }
+ /*
+ * According to the datasheet the power supply rise time i 200us
+ * and the minimum wait time before mode setting is 100us, in
+ * total 300 us. Add some margin and say minimum 500us here.
+ */
+ usleep_range(500, 1000);
+ return 0;
}
/* Disable attached power regulator if any. */
-static void ak8975_power_off(const struct i2c_client *client)
+static void ak8975_power_off(const struct ak8975_data *data)
{
- const struct iio_dev *indio_dev = i2c_get_clientdata(client);
- const struct ak8975_data *data = iio_priv(indio_dev);
-
- if (!IS_ERR_OR_NULL(data->vdd))
- regulator_disable(data->vdd);
+ regulator_disable(data->vid);
+ regulator_disable(data->vdd);
}
/*
@@ -430,8 +434,8 @@ static int ak8975_who_i_am(struct i2c_client *client,
* AK8975 | DEVICE_ID | NA
* AK8963 | DEVICE_ID | NA
*/
- ret = i2c_smbus_read_i2c_block_data(client, AK09912_REG_WIA1,
- 2, wia_val);
+ ret = i2c_smbus_read_i2c_block_data_or_emulated(
+ client, AK09912_REG_WIA1, 2, wia_val);
if (ret < 0) {
dev_err(&client->dev, "Error reading WIA\n");
return ret;
@@ -543,9 +547,9 @@ static int ak8975_setup(struct i2c_client *client)
}
/* Get asa data and store in the device data. */
- ret = i2c_smbus_read_i2c_block_data(client,
- data->def->ctrl_regs[ASA_BASE],
- 3, data->asa);
+ ret = i2c_smbus_read_i2c_block_data_or_emulated(
+ client, data->def->ctrl_regs[ASA_BASE],
+ 3, data->asa);
if (ret < 0) {
dev_err(&client->dev, "Not able to read asa data\n");
return ret;
@@ -686,22 +690,31 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
struct ak8975_data *data = iio_priv(indio_dev);
const struct i2c_client *client = data->client;
const struct ak_def *def = data->def;
+ u16 buff;
int ret;
+ pm_runtime_get_sync(&data->client->dev);
+
mutex_lock(&data->lock);
ret = ak8975_start_read_axis(data, client);
if (ret)
goto exit;
- ret = i2c_smbus_read_word_data(client, def->data_regs[index]);
+ ret = i2c_smbus_read_i2c_block_data_or_emulated(
+ client, def->data_regs[index],
+ sizeof(buff), (u8*)&buff);
if (ret < 0)
goto exit;
mutex_unlock(&data->lock);
- /* Clamp to valid range. */
- *val = clamp_t(s16, ret, -def->range, def->range);
+ pm_runtime_mark_last_busy(&data->client->dev);
+ pm_runtime_put_autosuspend(&data->client->dev);
+
+ /* Swap bytes and convert to valid range. */
+ buff = le16_to_cpu(buff);
+ *val = clamp_t(s16, buff, -def->range, def->range);
return IIO_VAL_INT;
exit:
@@ -825,7 +838,8 @@ static void ak8975_fill_buffer(struct iio_dev *indio_dev)
buff[1] = clamp_t(s16, le16_to_cpu(buff[1]), -def->range, def->range);
buff[2] = clamp_t(s16, le16_to_cpu(buff[2]), -def->range, def->range);
- iio_push_to_buffers_with_timestamp(indio_dev, buff, iio_get_time_ns());
+ iio_push_to_buffers_with_timestamp(indio_dev, buff,
+ iio_get_time_ns(indio_dev));
return;
unlock:
@@ -919,7 +933,15 @@ static int ak8975_probe(struct i2c_client *client,
data->def = &ak_def_array[chipset];
- err = ak8975_power_on(client);
+ /* Fetch the regulators */
+ data->vdd = devm_regulator_get(&client->dev, "vdd");
+ if (IS_ERR(data->vdd))
+ return PTR_ERR(data->vdd);
+ data->vid = devm_regulator_get(&client->dev, "vid");
+ if (IS_ERR(data->vid))
+ return PTR_ERR(data->vid);
+
+ err = ak8975_power_on(data);
if (err)
return err;
@@ -959,26 +981,93 @@ static int ak8975_probe(struct i2c_client *client,
goto cleanup_buffer;
}
+ /* Enable runtime PM */
+ pm_runtime_get_noresume(&client->dev);
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ /*
+ * The device comes online in 500us, so add two orders of magnitude
+ * of delay before autosuspending: 50 ms.
+ */
+ pm_runtime_set_autosuspend_delay(&client->dev, 50);
+ pm_runtime_use_autosuspend(&client->dev);
+ pm_runtime_put(&client->dev);
+
return 0;
cleanup_buffer:
iio_triggered_buffer_cleanup(indio_dev);
power_off:
- ak8975_power_off(client);
+ ak8975_power_off(data);
return err;
}
static int ak8975_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct ak8975_data *data = iio_priv(indio_dev);
+ pm_runtime_get_sync(&client->dev);
+ pm_runtime_put_noidle(&client->dev);
+ pm_runtime_disable(&client->dev);
iio_device_unregister(indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
- ak8975_power_off(client);
+ ak8975_set_mode(data, POWER_DOWN);
+ ak8975_power_off(data);
return 0;
}
+#ifdef CONFIG_PM
+static int ak8975_runtime_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct ak8975_data *data = iio_priv(indio_dev);
+ int ret;
+
+ /* Set the device in power down if it wasn't already */
+ ret = ak8975_set_mode(data, POWER_DOWN);
+ if (ret < 0) {
+ dev_err(&client->dev, "Error in setting power-down mode\n");
+ return ret;
+ }
+ /* Next cut the regulators */
+ ak8975_power_off(data);
+
+ return 0;
+}
+
+static int ak8975_runtime_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct ak8975_data *data = iio_priv(indio_dev);
+ int ret;
+
+ /* Take up the regulators */
+ ak8975_power_on(data);
+ /*
+ * We come up in powered down mode, the reading routines will
+ * put us in the mode to read values later.
+ */
+ ret = ak8975_set_mode(data, POWER_DOWN);
+ if (ret < 0) {
+ dev_err(&client->dev, "Error in setting power-down mode\n");
+ return ret;
+ }
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops ak8975_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(ak8975_runtime_suspend,
+ ak8975_runtime_resume, NULL)
+};
+
static const struct i2c_device_id ak8975_id[] = {
{"ak8975", AK8975},
{"ak8963", AK8963},
@@ -1006,6 +1095,7 @@ MODULE_DEVICE_TABLE(of, ak8975_of_match);
static struct i2c_driver ak8975_driver = {
.driver = {
.name = "ak8975",
+ .pm = &ak8975_dev_pm_ops,
.of_match_table = of_match_ptr(ak8975_of_match),
.acpi_match_table = ACPI_PTR(ak_acpi_match),
},
diff --git a/drivers/iio/magnetometer/bmc150_magn_i2c.c b/drivers/iio/magnetometer/bmc150_magn_i2c.c
index eddc7f0d0..ee0572258 100644
--- a/drivers/iio/magnetometer/bmc150_magn_i2c.c
+++ b/drivers/iio/magnetometer/bmc150_magn_i2c.c
@@ -2,6 +2,7 @@
* 3-axis magnetometer driver supporting following I2C Bosch-Sensortec chips:
* - BMC150
* - BMC156
+ * - BMM150
*
* Copyright (c) 2016, Intel Corporation.
*
@@ -49,6 +50,7 @@ static int bmc150_magn_i2c_remove(struct i2c_client *client)
static const struct acpi_device_id bmc150_magn_acpi_match[] = {
{"BMC150B", 0},
{"BMC156B", 0},
+ {"BMM150B", 0},
{},
};
MODULE_DEVICE_TABLE(acpi, bmc150_magn_acpi_match);
@@ -56,6 +58,7 @@ MODULE_DEVICE_TABLE(acpi, bmc150_magn_acpi_match);
static const struct i2c_device_id bmc150_magn_i2c_id[] = {
{"bmc150_magn", 0},
{"bmc156_magn", 0},
+ {"bmm150_magn", 0},
{}
};
MODULE_DEVICE_TABLE(i2c, bmc150_magn_i2c_id);
diff --git a/drivers/iio/magnetometer/bmc150_magn_spi.c b/drivers/iio/magnetometer/bmc150_magn_spi.c
index c4c738a07..7d4152d4d 100644
--- a/drivers/iio/magnetometer/bmc150_magn_spi.c
+++ b/drivers/iio/magnetometer/bmc150_magn_spi.c
@@ -2,6 +2,7 @@
* 3-axis magnetometer driver support following SPI Bosch-Sensortec chips:
* - BMC150
* - BMC156
+ * - BMM150
*
* Copyright (c) 2016, Intel Corporation.
*
@@ -41,6 +42,7 @@ static int bmc150_magn_spi_remove(struct spi_device *spi)
static const struct spi_device_id bmc150_magn_spi_id[] = {
{"bmc150_magn", 0},
{"bmc156_magn", 0},
+ {"bmm150_magn", 0},
{}
};
MODULE_DEVICE_TABLE(spi, bmc150_magn_spi_id);
@@ -48,6 +50,7 @@ MODULE_DEVICE_TABLE(spi, bmc150_magn_spi_id);
static const struct acpi_device_id bmc150_magn_acpi_match[] = {
{"BMC150B", 0},
{"BMC156B", 0},
+ {"BMM150B", 0},
{},
};
MODULE_DEVICE_TABLE(acpi, bmc150_magn_acpi_match);
diff --git a/drivers/iio/magnetometer/hmc5843_core.c b/drivers/iio/magnetometer/hmc5843_core.c
index 77882b466..ba3e2a374 100644
--- a/drivers/iio/magnetometer/hmc5843_core.c
+++ b/drivers/iio/magnetometer/hmc5843_core.c
@@ -451,7 +451,7 @@ static irqreturn_t hmc5843_trigger_handler(int irq, void *p)
goto done;
iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c
index 261d51742..f2be4a049 100644
--- a/drivers/iio/magnetometer/mag3110.c
+++ b/drivers/iio/magnetometer/mag3110.c
@@ -261,7 +261,7 @@ static irqreturn_t mag3110_trigger_handler(int irq, void *p)
}
iio_push_to_buffers_with_timestamp(indio_dev, buffer,
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index 8250fc322..3e1f06b22 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -589,13 +589,15 @@ int st_magn_common_probe(struct iio_dev *indio_dev)
indio_dev->info = &magn_info;
mutex_init(&mdata->tb.buf_lock);
- st_sensors_power_enable(indio_dev);
+ err = st_sensors_power_enable(indio_dev);
+ if (err)
+ return err;
err = st_sensors_check_device_support(indio_dev,
ARRAY_SIZE(st_magn_sensors_settings),
st_magn_sensors_settings);
if (err < 0)
- return err;
+ goto st_magn_power_off;
mdata->num_data_channels = ST_MAGN_NUMBER_DATA_CHANNELS;
mdata->multiread_bit = mdata->sensor_settings->multi_read_bit;
@@ -608,11 +610,11 @@ int st_magn_common_probe(struct iio_dev *indio_dev)
err = st_sensors_init_sensor(indio_dev, NULL);
if (err < 0)
- return err;
+ goto st_magn_power_off;
err = st_magn_allocate_ring(indio_dev);
if (err < 0)
- return err;
+ goto st_magn_power_off;
if (irq > 0) {
err = st_sensors_allocate_trigger(indio_dev,
@@ -635,6 +637,8 @@ st_magn_device_register_error:
st_sensors_deallocate_trigger(indio_dev);
st_magn_probe_trigger_error:
st_magn_deallocate_ring(indio_dev);
+st_magn_power_off:
+ st_sensors_power_disable(indio_dev);
return err;
}
diff --git a/drivers/iio/potentiometer/Kconfig b/drivers/iio/potentiometer/Kconfig
index 6acb23810..2e9da1cf3 100644
--- a/drivers/iio/potentiometer/Kconfig
+++ b/drivers/iio/potentiometer/Kconfig
@@ -10,11 +10,22 @@ config DS1803
depends on I2C
help
Say yes here to build support for the Maxim Integrated DS1803
- digital potentiomenter chip.
+ digital potentiometer chip.
To compile this driver as a module, choose M here: the
module will be called ds1803.
+config MAX5487
+ tristate "Maxim MAX5487/MAX5488/MAX5489 Digital Potentiometer driver"
+ depends on SPI
+ help
+ Say yes here to build support for the Maxim
+ MAX5487, MAX5488, MAX5489 digital potentiometer
+ chips.
+
+ To compile this driver as a module, choose M here: the
+ module will be called max5487.
+
config MCP4131
tristate "Microchip MCP413X/414X/415X/416X/423X/424X/425X/426X Digital Potentiometer driver"
depends on SPI
@@ -28,7 +39,7 @@ config MCP4131
MCP4241, MCP4242,
MCP4251, MCP4252,
MCP4261, MCP4262,
- digital potentiomenter chips.
+ digital potentiometer chips.
To compile this driver as a module, choose M here: the
module will be called mcp4131.
@@ -38,9 +49,11 @@ config MCP4531
depends on I2C
help
Say yes here to build support for the Microchip
- MCP4531, MCP4532, MCP4551, MCP4552,
- MCP4631, MCP4632, MCP4651, MCP4652
- digital potentiomenter chips.
+ MCP4531, MCP4532, MCP4541, MCP4542,
+ MCP4551, MCP4552, MCP4561, MCP4562,
+ MCP4631, MCP4632, MCP4641, MCP4642,
+ MCP4651, MCP4652, MCP4661, MCP4662
+ digital potentiometer chips.
To compile this driver as a module, choose M here: the
module will be called mcp4531.
diff --git a/drivers/iio/potentiometer/Makefile b/drivers/iio/potentiometer/Makefile
index 6007faa2f..8adb58f38 100644
--- a/drivers/iio/potentiometer/Makefile
+++ b/drivers/iio/potentiometer/Makefile
@@ -4,6 +4,7 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_DS1803) += ds1803.o
+obj-$(CONFIG_MAX5487) += max5487.o
obj-$(CONFIG_MCP4131) += mcp4131.o
obj-$(CONFIG_MCP4531) += mcp4531.o
obj-$(CONFIG_TPL0102) += tpl0102.o
diff --git a/drivers/iio/potentiometer/max5487.c b/drivers/iio/potentiometer/max5487.c
new file mode 100644
index 000000000..6c50939a2
--- /dev/null
+++ b/drivers/iio/potentiometer/max5487.c
@@ -0,0 +1,161 @@
+/*
+ * max5487.c - Support for MAX5487, MAX5488, MAX5489 digital potentiometers
+ *
+ * Copyright (C) 2016 Cristina-Gabriela Moraru <cristina.moraru09@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/acpi.h>
+
+#include <linux/iio/sysfs.h>
+#include <linux/iio/iio.h>
+
+#define MAX5487_WRITE_WIPER_A (0x01 << 8)
+#define MAX5487_WRITE_WIPER_B (0x02 << 8)
+
+/* copy both wiper regs to NV regs */
+#define MAX5487_COPY_AB_TO_NV (0x23 << 8)
+/* copy both NV regs to wiper regs */
+#define MAX5487_COPY_NV_TO_AB (0x33 << 8)
+
+#define MAX5487_MAX_POS 255
+
+struct max5487_data {
+ struct spi_device *spi;
+ int kohms;
+};
+
+#define MAX5487_CHANNEL(ch, addr) { \
+ .type = IIO_RESISTANCE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = ch, \
+ .address = addr, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+}
+
+static const struct iio_chan_spec max5487_channels[] = {
+ MAX5487_CHANNEL(0, MAX5487_WRITE_WIPER_A),
+ MAX5487_CHANNEL(1, MAX5487_WRITE_WIPER_B),
+};
+
+static int max5487_write_cmd(struct spi_device *spi, u16 cmd)
+{
+ return spi_write(spi, (const void *) &cmd, sizeof(u16));
+}
+
+static int max5487_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct max5487_data *data = iio_priv(indio_dev);
+
+ if (mask != IIO_CHAN_INFO_SCALE)
+ return -EINVAL;
+
+ *val = 1000 * data->kohms;
+ *val2 = MAX5487_MAX_POS;
+
+ return IIO_VAL_FRACTIONAL;
+}
+
+static int max5487_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct max5487_data *data = iio_priv(indio_dev);
+
+ if (mask != IIO_CHAN_INFO_RAW)
+ return -EINVAL;
+
+ if (val < 0 || val > MAX5487_MAX_POS)
+ return -EINVAL;
+
+ return max5487_write_cmd(data->spi, chan->address | val);
+}
+
+static const struct iio_info max5487_info = {
+ .read_raw = max5487_read_raw,
+ .write_raw = max5487_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int max5487_spi_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct max5487_data *data;
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ dev_set_drvdata(&spi->dev, indio_dev);
+ data = iio_priv(indio_dev);
+
+ data->spi = spi;
+ data->kohms = id->driver_data;
+
+ indio_dev->info = &max5487_info;
+ indio_dev->name = id->name;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = max5487_channels;
+ indio_dev->num_channels = ARRAY_SIZE(max5487_channels);
+
+ /* restore both wiper regs from NV regs */
+ ret = max5487_write_cmd(data->spi, MAX5487_COPY_NV_TO_AB);
+ if (ret < 0)
+ return ret;
+
+ return iio_device_register(indio_dev);
+}
+
+static int max5487_spi_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(&spi->dev);
+
+ iio_device_unregister(indio_dev);
+
+ /* save both wiper regs to NV regs */
+ return max5487_write_cmd(spi, MAX5487_COPY_AB_TO_NV);
+}
+
+static const struct spi_device_id max5487_id[] = {
+ { "MAX5487", 10 },
+ { "MAX5488", 50 },
+ { "MAX5489", 100 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, max5487_id);
+
+static const struct acpi_device_id max5487_acpi_match[] = {
+ { "MAX5487", 10 },
+ { "MAX5488", 50 },
+ { "MAX5489", 100 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, max5487_acpi_match);
+
+static struct spi_driver max5487_driver = {
+ .driver = {
+ .name = "max5487",
+ .owner = THIS_MODULE,
+ .acpi_match_table = ACPI_PTR(max5487_acpi_match),
+ },
+ .id_table = max5487_id,
+ .probe = max5487_spi_probe,
+ .remove = max5487_spi_remove
+};
+module_spi_driver(max5487_driver);
+
+MODULE_AUTHOR("Cristina-Gabriela Moraru <cristina.moraru09@gmail.com>");
+MODULE_DESCRIPTION("max5487 SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/potentiometer/mcp4531.c b/drivers/iio/potentiometer/mcp4531.c
index 3b72e1a59..13b6ae2fc 100644
--- a/drivers/iio/potentiometer/mcp4531.c
+++ b/drivers/iio/potentiometer/mcp4531.c
@@ -8,12 +8,20 @@
* DEVID #Wipers #Positions Resistor Opts (kOhm) i2c address
* mcp4531 1 129 5, 10, 50, 100 010111x
* mcp4532 1 129 5, 10, 50, 100 01011xx
+ * mcp4541 1 129 5, 10, 50, 100 010111x
+ * mcp4542 1 129 5, 10, 50, 100 01011xx
* mcp4551 1 257 5, 10, 50, 100 010111x
* mcp4552 1 257 5, 10, 50, 100 01011xx
+ * mcp4561 1 257 5, 10, 50, 100 010111x
+ * mcp4562 1 257 5, 10, 50, 100 01011xx
* mcp4631 2 129 5, 10, 50, 100 0101xxx
* mcp4632 2 129 5, 10, 50, 100 01011xx
+ * mcp4641 2 129 5, 10, 50, 100 0101xxx
+ * mcp4642 2 129 5, 10, 50, 100 01011xx
* mcp4651 2 257 5, 10, 50, 100 0101xxx
* mcp4652 2 257 5, 10, 50, 100 01011xx
+ * mcp4661 2 257 5, 10, 50, 100 0101xxx
+ * mcp4662 2 257 5, 10, 50, 100 01011xx
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
@@ -23,6 +31,8 @@
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/iio/iio.h>
@@ -37,18 +47,34 @@ enum mcp4531_type {
MCP453x_103,
MCP453x_503,
MCP453x_104,
+ MCP454x_502,
+ MCP454x_103,
+ MCP454x_503,
+ MCP454x_104,
MCP455x_502,
MCP455x_103,
MCP455x_503,
MCP455x_104,
+ MCP456x_502,
+ MCP456x_103,
+ MCP456x_503,
+ MCP456x_104,
MCP463x_502,
MCP463x_103,
MCP463x_503,
MCP463x_104,
+ MCP464x_502,
+ MCP464x_103,
+ MCP464x_503,
+ MCP464x_104,
MCP465x_502,
MCP465x_103,
MCP465x_503,
MCP465x_104,
+ MCP466x_502,
+ MCP466x_103,
+ MCP466x_503,
+ MCP466x_104,
};
static const struct mcp4531_cfg mcp4531_cfg[] = {
@@ -56,18 +82,34 @@ static const struct mcp4531_cfg mcp4531_cfg[] = {
[MCP453x_103] = { .wipers = 1, .max_pos = 128, .kohms = 10, },
[MCP453x_503] = { .wipers = 1, .max_pos = 128, .kohms = 50, },
[MCP453x_104] = { .wipers = 1, .max_pos = 128, .kohms = 100, },
+ [MCP454x_502] = { .wipers = 1, .max_pos = 128, .kohms = 5, },
+ [MCP454x_103] = { .wipers = 1, .max_pos = 128, .kohms = 10, },
+ [MCP454x_503] = { .wipers = 1, .max_pos = 128, .kohms = 50, },
+ [MCP454x_104] = { .wipers = 1, .max_pos = 128, .kohms = 100, },
[MCP455x_502] = { .wipers = 1, .max_pos = 256, .kohms = 5, },
[MCP455x_103] = { .wipers = 1, .max_pos = 256, .kohms = 10, },
[MCP455x_503] = { .wipers = 1, .max_pos = 256, .kohms = 50, },
[MCP455x_104] = { .wipers = 1, .max_pos = 256, .kohms = 100, },
+ [MCP456x_502] = { .wipers = 1, .max_pos = 256, .kohms = 5, },
+ [MCP456x_103] = { .wipers = 1, .max_pos = 256, .kohms = 10, },
+ [MCP456x_503] = { .wipers = 1, .max_pos = 256, .kohms = 50, },
+ [MCP456x_104] = { .wipers = 1, .max_pos = 256, .kohms = 100, },
[MCP463x_502] = { .wipers = 2, .max_pos = 128, .kohms = 5, },
[MCP463x_103] = { .wipers = 2, .max_pos = 128, .kohms = 10, },
[MCP463x_503] = { .wipers = 2, .max_pos = 128, .kohms = 50, },
[MCP463x_104] = { .wipers = 2, .max_pos = 128, .kohms = 100, },
+ [MCP464x_502] = { .wipers = 2, .max_pos = 128, .kohms = 5, },
+ [MCP464x_103] = { .wipers = 2, .max_pos = 128, .kohms = 10, },
+ [MCP464x_503] = { .wipers = 2, .max_pos = 128, .kohms = 50, },
+ [MCP464x_104] = { .wipers = 2, .max_pos = 128, .kohms = 100, },
[MCP465x_502] = { .wipers = 2, .max_pos = 256, .kohms = 5, },
[MCP465x_103] = { .wipers = 2, .max_pos = 256, .kohms = 10, },
[MCP465x_503] = { .wipers = 2, .max_pos = 256, .kohms = 50, },
[MCP465x_104] = { .wipers = 2, .max_pos = 256, .kohms = 100, },
+ [MCP466x_502] = { .wipers = 2, .max_pos = 256, .kohms = 5, },
+ [MCP466x_103] = { .wipers = 2, .max_pos = 256, .kohms = 10, },
+ [MCP466x_503] = { .wipers = 2, .max_pos = 256, .kohms = 50, },
+ [MCP466x_104] = { .wipers = 2, .max_pos = 256, .kohms = 100, },
};
#define MCP4531_WRITE (0 << 2)
@@ -148,12 +190,89 @@ static const struct iio_info mcp4531_info = {
.driver_module = THIS_MODULE,
};
+#ifdef CONFIG_OF
+
+#define MCP4531_COMPATIBLE(of_compatible, cfg) { \
+ .compatible = of_compatible, \
+ .data = &mcp4531_cfg[cfg], \
+}
+
+static const struct of_device_id mcp4531_of_match[] = {
+ MCP4531_COMPATIBLE("microchip,mcp4531-502", MCP453x_502),
+ MCP4531_COMPATIBLE("microchip,mcp4531-103", MCP453x_103),
+ MCP4531_COMPATIBLE("microchip,mcp4531-503", MCP453x_503),
+ MCP4531_COMPATIBLE("microchip,mcp4531-104", MCP453x_104),
+ MCP4531_COMPATIBLE("microchip,mcp4532-502", MCP453x_502),
+ MCP4531_COMPATIBLE("microchip,mcp4532-103", MCP453x_103),
+ MCP4531_COMPATIBLE("microchip,mcp4532-503", MCP453x_503),
+ MCP4531_COMPATIBLE("microchip,mcp4532-104", MCP453x_104),
+ MCP4531_COMPATIBLE("microchip,mcp4541-502", MCP454x_502),
+ MCP4531_COMPATIBLE("microchip,mcp4541-103", MCP454x_103),
+ MCP4531_COMPATIBLE("microchip,mcp4541-503", MCP454x_503),
+ MCP4531_COMPATIBLE("microchip,mcp4541-104", MCP454x_104),
+ MCP4531_COMPATIBLE("microchip,mcp4542-502", MCP454x_502),
+ MCP4531_COMPATIBLE("microchip,mcp4542-103", MCP454x_103),
+ MCP4531_COMPATIBLE("microchip,mcp4542-503", MCP454x_503),
+ MCP4531_COMPATIBLE("microchip,mcp4542-104", MCP454x_104),
+ MCP4531_COMPATIBLE("microchip,mcp4551-502", MCP455x_502),
+ MCP4531_COMPATIBLE("microchip,mcp4551-103", MCP455x_103),
+ MCP4531_COMPATIBLE("microchip,mcp4551-503", MCP455x_503),
+ MCP4531_COMPATIBLE("microchip,mcp4551-104", MCP455x_104),
+ MCP4531_COMPATIBLE("microchip,mcp4552-502", MCP455x_502),
+ MCP4531_COMPATIBLE("microchip,mcp4552-103", MCP455x_103),
+ MCP4531_COMPATIBLE("microchip,mcp4552-503", MCP455x_503),
+ MCP4531_COMPATIBLE("microchip,mcp4552-104", MCP455x_104),
+ MCP4531_COMPATIBLE("microchip,mcp4561-502", MCP456x_502),
+ MCP4531_COMPATIBLE("microchip,mcp4561-103", MCP456x_103),
+ MCP4531_COMPATIBLE("microchip,mcp4561-503", MCP456x_503),
+ MCP4531_COMPATIBLE("microchip,mcp4561-104", MCP456x_104),
+ MCP4531_COMPATIBLE("microchip,mcp4562-502", MCP456x_502),
+ MCP4531_COMPATIBLE("microchip,mcp4562-103", MCP456x_103),
+ MCP4531_COMPATIBLE("microchip,mcp4562-503", MCP456x_503),
+ MCP4531_COMPATIBLE("microchip,mcp4562-104", MCP456x_104),
+ MCP4531_COMPATIBLE("microchip,mcp4631-502", MCP463x_502),
+ MCP4531_COMPATIBLE("microchip,mcp4631-103", MCP463x_103),
+ MCP4531_COMPATIBLE("microchip,mcp4631-503", MCP463x_503),
+ MCP4531_COMPATIBLE("microchip,mcp4631-104", MCP463x_104),
+ MCP4531_COMPATIBLE("microchip,mcp4632-502", MCP463x_502),
+ MCP4531_COMPATIBLE("microchip,mcp4632-103", MCP463x_103),
+ MCP4531_COMPATIBLE("microchip,mcp4632-503", MCP463x_503),
+ MCP4531_COMPATIBLE("microchip,mcp4632-104", MCP463x_104),
+ MCP4531_COMPATIBLE("microchip,mcp4641-502", MCP464x_502),
+ MCP4531_COMPATIBLE("microchip,mcp4641-103", MCP464x_103),
+ MCP4531_COMPATIBLE("microchip,mcp4641-503", MCP464x_503),
+ MCP4531_COMPATIBLE("microchip,mcp4641-104", MCP464x_104),
+ MCP4531_COMPATIBLE("microchip,mcp4642-502", MCP464x_502),
+ MCP4531_COMPATIBLE("microchip,mcp4642-103", MCP464x_103),
+ MCP4531_COMPATIBLE("microchip,mcp4642-503", MCP464x_503),
+ MCP4531_COMPATIBLE("microchip,mcp4642-104", MCP464x_104),
+ MCP4531_COMPATIBLE("microchip,mcp4651-502", MCP465x_502),
+ MCP4531_COMPATIBLE("microchip,mcp4651-103", MCP465x_103),
+ MCP4531_COMPATIBLE("microchip,mcp4651-503", MCP465x_503),
+ MCP4531_COMPATIBLE("microchip,mcp4651-104", MCP465x_104),
+ MCP4531_COMPATIBLE("microchip,mcp4652-502", MCP465x_502),
+ MCP4531_COMPATIBLE("microchip,mcp4652-103", MCP465x_103),
+ MCP4531_COMPATIBLE("microchip,mcp4652-503", MCP465x_503),
+ MCP4531_COMPATIBLE("microchip,mcp4652-104", MCP465x_104),
+ MCP4531_COMPATIBLE("microchip,mcp4661-502", MCP466x_502),
+ MCP4531_COMPATIBLE("microchip,mcp4661-103", MCP466x_103),
+ MCP4531_COMPATIBLE("microchip,mcp4661-503", MCP466x_503),
+ MCP4531_COMPATIBLE("microchip,mcp4661-104", MCP466x_104),
+ MCP4531_COMPATIBLE("microchip,mcp4662-502", MCP466x_502),
+ MCP4531_COMPATIBLE("microchip,mcp4662-103", MCP466x_103),
+ MCP4531_COMPATIBLE("microchip,mcp4662-503", MCP466x_503),
+ MCP4531_COMPATIBLE("microchip,mcp4662-104", MCP466x_104),
+ { /* sentinel */ }
+};
+#endif
+
static int mcp4531_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
struct mcp4531_data *data;
struct iio_dev *indio_dev;
+ const struct of_device_id *match;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_WORD_DATA)) {
@@ -167,7 +286,12 @@ static int mcp4531_probe(struct i2c_client *client,
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
data->client = client;
- data->cfg = &mcp4531_cfg[id->driver_data];
+
+ match = of_match_device(of_match_ptr(mcp4531_of_match), dev);
+ if (match)
+ data->cfg = of_device_get_match_data(dev);
+ else
+ data->cfg = &mcp4531_cfg[id->driver_data];
indio_dev->dev.parent = dev;
indio_dev->info = &mcp4531_info;
@@ -187,6 +311,14 @@ static const struct i2c_device_id mcp4531_id[] = {
{ "mcp4532-103", MCP453x_103 },
{ "mcp4532-503", MCP453x_503 },
{ "mcp4532-104", MCP453x_104 },
+ { "mcp4541-502", MCP454x_502 },
+ { "mcp4541-103", MCP454x_103 },
+ { "mcp4541-503", MCP454x_503 },
+ { "mcp4541-104", MCP454x_104 },
+ { "mcp4542-502", MCP454x_502 },
+ { "mcp4542-103", MCP454x_103 },
+ { "mcp4542-503", MCP454x_503 },
+ { "mcp4542-104", MCP454x_104 },
{ "mcp4551-502", MCP455x_502 },
{ "mcp4551-103", MCP455x_103 },
{ "mcp4551-503", MCP455x_503 },
@@ -195,6 +327,14 @@ static const struct i2c_device_id mcp4531_id[] = {
{ "mcp4552-103", MCP455x_103 },
{ "mcp4552-503", MCP455x_503 },
{ "mcp4552-104", MCP455x_104 },
+ { "mcp4561-502", MCP456x_502 },
+ { "mcp4561-103", MCP456x_103 },
+ { "mcp4561-503", MCP456x_503 },
+ { "mcp4561-104", MCP456x_104 },
+ { "mcp4562-502", MCP456x_502 },
+ { "mcp4562-103", MCP456x_103 },
+ { "mcp4562-503", MCP456x_503 },
+ { "mcp4562-104", MCP456x_104 },
{ "mcp4631-502", MCP463x_502 },
{ "mcp4631-103", MCP463x_103 },
{ "mcp4631-503", MCP463x_503 },
@@ -203,6 +343,14 @@ static const struct i2c_device_id mcp4531_id[] = {
{ "mcp4632-103", MCP463x_103 },
{ "mcp4632-503", MCP463x_503 },
{ "mcp4632-104", MCP463x_104 },
+ { "mcp4641-502", MCP464x_502 },
+ { "mcp4641-103", MCP464x_103 },
+ { "mcp4641-503", MCP464x_503 },
+ { "mcp4641-104", MCP464x_104 },
+ { "mcp4642-502", MCP464x_502 },
+ { "mcp4642-103", MCP464x_103 },
+ { "mcp4642-503", MCP464x_503 },
+ { "mcp4642-104", MCP464x_104 },
{ "mcp4651-502", MCP465x_502 },
{ "mcp4651-103", MCP465x_103 },
{ "mcp4651-503", MCP465x_503 },
@@ -211,6 +359,14 @@ static const struct i2c_device_id mcp4531_id[] = {
{ "mcp4652-103", MCP465x_103 },
{ "mcp4652-503", MCP465x_503 },
{ "mcp4652-104", MCP465x_104 },
+ { "mcp4661-502", MCP466x_502 },
+ { "mcp4661-103", MCP466x_103 },
+ { "mcp4661-503", MCP466x_503 },
+ { "mcp4661-104", MCP466x_104 },
+ { "mcp4662-502", MCP466x_502 },
+ { "mcp4662-103", MCP466x_103 },
+ { "mcp4662-503", MCP466x_503 },
+ { "mcp4662-104", MCP466x_104 },
{}
};
MODULE_DEVICE_TABLE(i2c, mcp4531_id);
@@ -218,6 +374,7 @@ MODULE_DEVICE_TABLE(i2c, mcp4531_id);
static struct i2c_driver mcp4531_driver = {
.driver = {
.name = "mcp4531",
+ .of_match_table = of_match_ptr(mcp4531_of_match),
},
.probe = mcp4531_probe,
.id_table = mcp4531_id,
diff --git a/drivers/iio/potentiometer/tpl0102.c b/drivers/iio/potentiometer/tpl0102.c
index 5c304d42d..7b6b54531 100644
--- a/drivers/iio/potentiometer/tpl0102.c
+++ b/drivers/iio/potentiometer/tpl0102.c
@@ -116,10 +116,6 @@ static int tpl0102_probe(struct i2c_client *client,
struct tpl0102_data *data;
struct iio_dev *indio_dev;
- if (!i2c_check_functionality(client->adapter,
- I2C_FUNC_SMBUS_WORD_DATA))
- return -EOPNOTSUPP;
-
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
index cda9f128f..d130cdc78 100644
--- a/drivers/iio/pressure/Kconfig
+++ b/drivers/iio/pressure/Kconfig
@@ -6,16 +6,33 @@
menu "Pressure sensors"
config BMP280
- tristate "Bosch Sensortec BMP180 and BMP280 pressure sensor driver"
- depends on I2C
+ tristate "Bosch Sensortec BMP180/BMP280 pressure sensor I2C driver"
+ depends on (I2C || SPI_MASTER)
depends on !(BMP085_I2C=y || BMP085_I2C=m)
- select REGMAP_I2C
+ depends on !(BMP085_SPI=y || BMP085_SPI=m)
+ select REGMAP
+ select BMP280_I2C if (I2C)
+ select BMP280_SPI if (SPI_MASTER)
help
Say yes here to build support for Bosch Sensortec BMP180 and BMP280
- pressure and temperature sensors.
+ pressure and temperature sensors. Also supports the BE280 with
+ an additional humidity sensor channel.
- To compile this driver as a module, choose M here: the module
- will be called bmp280.
+ To compile this driver as a module, choose M here: the core module
+ will be called bmp280 and you will also get bmp280-i2c for I2C
+ and/or bmp280-spi for SPI support.
+
+config BMP280_I2C
+ tristate
+ depends on BMP280
+ depends on I2C
+ select REGMAP_I2C
+
+config BMP280_SPI
+ tristate
+ depends on BMP280
+ depends on SPI_MASTER
+ select REGMAP
config HID_SENSOR_PRESS
depends on HID_SENSOR_HUB
@@ -130,7 +147,7 @@ config IIO_ST_PRESS
select IIO_TRIGGERED_BUFFER if (IIO_BUFFER)
help
Say yes here to build support for STMicroelectronics pressure
- sensors: LPS001WP, LPS25H, LPS331AP.
+ sensors: LPS001WP, LPS25H, LPS331AP, LPS22HB.
This driver can also be built as a module. If so, these modules
will be created:
diff --git a/drivers/iio/pressure/Makefile b/drivers/iio/pressure/Makefile
index 17d6e7afa..7f395bed5 100644
--- a/drivers/iio/pressure/Makefile
+++ b/drivers/iio/pressure/Makefile
@@ -4,6 +4,9 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_BMP280) += bmp280.o
+bmp280-objs := bmp280-core.o bmp280-regmap.o
+obj-$(CONFIG_BMP280_I2C) += bmp280-i2c.o
+obj-$(CONFIG_BMP280_SPI) += bmp280-spi.o
obj-$(CONFIG_HID_SENSOR_PRESS) += hid-sensor-press.o
obj-$(CONFIG_HP03) += hp03.o
obj-$(CONFIG_MPL115) += mpl115.o
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
new file mode 100644
index 000000000..e5a533cbd
--- /dev/null
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -0,0 +1,1119 @@
+/*
+ * Copyright (c) 2010 Christoph Mair <christoph.mair@gmail.com>
+ * Copyright (c) 2012 Bosch Sensortec GmbH
+ * Copyright (c) 2012 Unixphere AB
+ * Copyright (c) 2014 Intel Corporation
+ * Copyright (c) 2016 Linus Walleij <linus.walleij@linaro.org>
+ *
+ * Driver for Bosch Sensortec BMP180 and BMP280 digital pressure sensor.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Datasheet:
+ * https://ae-bst.resource.bosch.com/media/_tech/media/datasheets/BST-BMP180-DS000-121.pdf
+ * https://ae-bst.resource.bosch.com/media/_tech/media/datasheets/BST-BMP280-DS001-12.pdf
+ * https://ae-bst.resource.bosch.com/media/_tech/media/datasheets/BST-BME280_DS001-11.pdf
+ */
+
+#define pr_fmt(fmt) "bmp280: " fmt
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h> /* For irq_get_irq_data() */
+#include <linux/completion.h>
+#include <linux/pm_runtime.h>
+#include <linux/random.h>
+
+#include "bmp280.h"
+
+/*
+ * These enums are used for indexing into the array of calibration
+ * coefficients for BMP180.
+ */
+enum { AC1, AC2, AC3, AC4, AC5, AC6, B1, B2, MB, MC, MD };
+
+struct bmp180_calib {
+ s16 AC1;
+ s16 AC2;
+ s16 AC3;
+ u16 AC4;
+ u16 AC5;
+ u16 AC6;
+ s16 B1;
+ s16 B2;
+ s16 MB;
+ s16 MC;
+ s16 MD;
+};
+
+struct bmp280_data {
+ struct device *dev;
+ struct mutex lock;
+ struct regmap *regmap;
+ struct completion done;
+ bool use_eoc;
+ const struct bmp280_chip_info *chip_info;
+ struct bmp180_calib calib;
+ struct regulator *vddd;
+ struct regulator *vdda;
+ unsigned int start_up_time; /* in milliseconds */
+
+ /* log of base 2 of oversampling rate */
+ u8 oversampling_press;
+ u8 oversampling_temp;
+ u8 oversampling_humid;
+
+ /*
+ * Carryover value from temperature conversion, used in pressure
+ * calculation.
+ */
+ s32 t_fine;
+};
+
+struct bmp280_chip_info {
+ const int *oversampling_temp_avail;
+ int num_oversampling_temp_avail;
+
+ const int *oversampling_press_avail;
+ int num_oversampling_press_avail;
+
+ const int *oversampling_humid_avail;
+ int num_oversampling_humid_avail;
+
+ int (*chip_config)(struct bmp280_data *);
+ int (*read_temp)(struct bmp280_data *, int *);
+ int (*read_press)(struct bmp280_data *, int *, int *);
+ int (*read_humid)(struct bmp280_data *, int *, int *);
+};
+
+/*
+ * These enums are used for indexing into the array of compensation
+ * parameters for BMP280.
+ */
+enum { T1, T2, T3 };
+enum { P1, P2, P3, P4, P5, P6, P7, P8, P9 };
+
+static const struct iio_chan_spec bmp280_channels[] = {
+ {
+ .type = IIO_PRESSURE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ },
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ },
+ {
+ .type = IIO_HUMIDITYRELATIVE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ },
+};
+
+/*
+ * Returns humidity in percent, resolution is 0.01 percent. Output value of
+ * "47445" represents 47445/1024 = 46.333 %RH.
+ *
+ * Taken from BME280 datasheet, Section 4.2.3, "Compensation formula".
+ */
+
+static u32 bmp280_compensate_humidity(struct bmp280_data *data,
+ s32 adc_humidity)
+{
+ struct device *dev = data->dev;
+ unsigned int H1, H3, tmp;
+ int H2, H4, H5, H6, ret, var;
+
+ ret = regmap_read(data->regmap, BMP280_REG_COMP_H1, &H1);
+ if (ret < 0) {
+ dev_err(dev, "failed to read H1 comp value\n");
+ return ret;
+ }
+
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H2, &tmp, 2);
+ if (ret < 0) {
+ dev_err(dev, "failed to read H2 comp value\n");
+ return ret;
+ }
+ H2 = sign_extend32(le16_to_cpu(tmp), 15);
+
+ ret = regmap_read(data->regmap, BMP280_REG_COMP_H3, &H3);
+ if (ret < 0) {
+ dev_err(dev, "failed to read H3 comp value\n");
+ return ret;
+ }
+
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H4, &tmp, 2);
+ if (ret < 0) {
+ dev_err(dev, "failed to read H4 comp value\n");
+ return ret;
+ }
+ H4 = sign_extend32(((be16_to_cpu(tmp) >> 4) & 0xff0) |
+ (be16_to_cpu(tmp) & 0xf), 11);
+
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H5, &tmp, 2);
+ if (ret < 0) {
+ dev_err(dev, "failed to read H5 comp value\n");
+ return ret;
+ }
+ H5 = sign_extend32(((le16_to_cpu(tmp) >> 4) & 0xfff), 11);
+
+ ret = regmap_read(data->regmap, BMP280_REG_COMP_H6, &tmp);
+ if (ret < 0) {
+ dev_err(dev, "failed to read H6 comp value\n");
+ return ret;
+ }
+ H6 = sign_extend32(tmp, 7);
+
+ var = ((s32)data->t_fine) - 76800;
+ var = ((((adc_humidity << 14) - (H4 << 20) - (H5 * var)) + 16384) >> 15)
+ * (((((((var * H6) >> 10) * (((var * H3) >> 11) + 32768)) >> 10)
+ + 2097152) * H2 + 8192) >> 14);
+ var -= ((((var >> 15) * (var >> 15)) >> 7) * H1) >> 4;
+
+ return var >> 12;
+};
+
+/*
+ * Returns temperature in DegC, resolution is 0.01 DegC. Output value of
+ * "5123" equals 51.23 DegC. t_fine carries fine temperature as global
+ * value.
+ *
+ * Taken from datasheet, Section 3.11.3, "Compensation formula".
+ */
+static s32 bmp280_compensate_temp(struct bmp280_data *data,
+ s32 adc_temp)
+{
+ int ret;
+ s32 var1, var2;
+ __le16 buf[BMP280_COMP_TEMP_REG_COUNT / 2];
+
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_TEMP_START,
+ buf, BMP280_COMP_TEMP_REG_COUNT);
+ if (ret < 0) {
+ dev_err(data->dev,
+ "failed to read temperature calibration parameters\n");
+ return ret;
+ }
+
+ /*
+ * The double casts are necessary because le16_to_cpu returns an
+ * unsigned 16-bit value. Casting that value directly to a
+ * signed 32-bit will not do proper sign extension.
+ *
+ * Conversely, T1 and P1 are unsigned values, so they can be
+ * cast straight to the larger type.
+ */
+ var1 = (((adc_temp >> 3) - ((s32)le16_to_cpu(buf[T1]) << 1)) *
+ ((s32)(s16)le16_to_cpu(buf[T2]))) >> 11;
+ var2 = (((((adc_temp >> 4) - ((s32)le16_to_cpu(buf[T1]))) *
+ ((adc_temp >> 4) - ((s32)le16_to_cpu(buf[T1])))) >> 12) *
+ ((s32)(s16)le16_to_cpu(buf[T3]))) >> 14;
+ data->t_fine = var1 + var2;
+
+ return (data->t_fine * 5 + 128) >> 8;
+}
+
+/*
+ * Returns pressure in Pa as unsigned 32 bit integer in Q24.8 format (24
+ * integer bits and 8 fractional bits). Output value of "24674867"
+ * represents 24674867/256 = 96386.2 Pa = 963.862 hPa
+ *
+ * Taken from datasheet, Section 3.11.3, "Compensation formula".
+ */
+static u32 bmp280_compensate_press(struct bmp280_data *data,
+ s32 adc_press)
+{
+ int ret;
+ s64 var1, var2, p;
+ __le16 buf[BMP280_COMP_PRESS_REG_COUNT / 2];
+
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_PRESS_START,
+ buf, BMP280_COMP_PRESS_REG_COUNT);
+ if (ret < 0) {
+ dev_err(data->dev,
+ "failed to read pressure calibration parameters\n");
+ return ret;
+ }
+
+ var1 = ((s64)data->t_fine) - 128000;
+ var2 = var1 * var1 * (s64)(s16)le16_to_cpu(buf[P6]);
+ var2 += (var1 * (s64)(s16)le16_to_cpu(buf[P5])) << 17;
+ var2 += ((s64)(s16)le16_to_cpu(buf[P4])) << 35;
+ var1 = ((var1 * var1 * (s64)(s16)le16_to_cpu(buf[P3])) >> 8) +
+ ((var1 * (s64)(s16)le16_to_cpu(buf[P2])) << 12);
+ var1 = ((((s64)1) << 47) + var1) * ((s64)le16_to_cpu(buf[P1])) >> 33;
+
+ if (var1 == 0)
+ return 0;
+
+ p = ((((s64)1048576 - adc_press) << 31) - var2) * 3125;
+ p = div64_s64(p, var1);
+ var1 = (((s64)(s16)le16_to_cpu(buf[P9])) * (p >> 13) * (p >> 13)) >> 25;
+ var2 = (((s64)(s16)le16_to_cpu(buf[P8])) * p) >> 19;
+ p = ((p + var1 + var2) >> 8) + (((s64)(s16)le16_to_cpu(buf[P7])) << 4);
+
+ return (u32)p;
+}
+
+static int bmp280_read_temp(struct bmp280_data *data,
+ int *val)
+{
+ int ret;
+ __be32 tmp = 0;
+ s32 adc_temp, comp_temp;
+
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_TEMP_MSB,
+ (u8 *) &tmp, 3);
+ if (ret < 0) {
+ dev_err(data->dev, "failed to read temperature\n");
+ return ret;
+ }
+
+ adc_temp = be32_to_cpu(tmp) >> 12;
+ comp_temp = bmp280_compensate_temp(data, adc_temp);
+
+ /*
+ * val might be NULL if we're called by the read_press routine,
+ * who only cares about the carry over t_fine value.
+ */
+ if (val) {
+ *val = comp_temp * 10;
+ return IIO_VAL_INT;
+ }
+
+ return 0;
+}
+
+static int bmp280_read_press(struct bmp280_data *data,
+ int *val, int *val2)
+{
+ int ret;
+ __be32 tmp = 0;
+ s32 adc_press;
+ u32 comp_press;
+
+ /* Read and compensate temperature so we get a reading of t_fine. */
+ ret = bmp280_read_temp(data, NULL);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_PRESS_MSB,
+ (u8 *) &tmp, 3);
+ if (ret < 0) {
+ dev_err(data->dev, "failed to read pressure\n");
+ return ret;
+ }
+
+ adc_press = be32_to_cpu(tmp) >> 12;
+ comp_press = bmp280_compensate_press(data, adc_press);
+
+ *val = comp_press;
+ *val2 = 256000;
+
+ return IIO_VAL_FRACTIONAL;
+}
+
+static int bmp280_read_humid(struct bmp280_data *data, int *val, int *val2)
+{
+ int ret;
+ __be16 tmp = 0;
+ s32 adc_humidity;
+ u32 comp_humidity;
+
+ /* Read and compensate temperature so we get a reading of t_fine. */
+ ret = bmp280_read_temp(data, NULL);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_HUMIDITY_MSB,
+ (u8 *) &tmp, 2);
+ if (ret < 0) {
+ dev_err(data->dev, "failed to read humidity\n");
+ return ret;
+ }
+
+ adc_humidity = be16_to_cpu(tmp);
+ comp_humidity = bmp280_compensate_humidity(data, adc_humidity);
+
+ *val = comp_humidity;
+ *val2 = 1024;
+
+ return IIO_VAL_FRACTIONAL;
+}
+
+static int bmp280_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ int ret;
+ struct bmp280_data *data = iio_priv(indio_dev);
+
+ pm_runtime_get_sync(data->dev);
+ mutex_lock(&data->lock);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_PROCESSED:
+ switch (chan->type) {
+ case IIO_HUMIDITYRELATIVE:
+ ret = data->chip_info->read_humid(data, val, val2);
+ break;
+ case IIO_PRESSURE:
+ ret = data->chip_info->read_press(data, val, val2);
+ break;
+ case IIO_TEMP:
+ ret = data->chip_info->read_temp(data, val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ break;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ switch (chan->type) {
+ case IIO_HUMIDITYRELATIVE:
+ *val = 1 << data->oversampling_humid;
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_PRESSURE:
+ *val = 1 << data->oversampling_press;
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_TEMP:
+ *val = 1 << data->oversampling_temp;
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&data->lock);
+ pm_runtime_mark_last_busy(data->dev);
+ pm_runtime_put_autosuspend(data->dev);
+
+ return ret;
+}
+
+static int bmp280_write_oversampling_ratio_humid(struct bmp280_data *data,
+ int val)
+{
+ int i;
+ const int *avail = data->chip_info->oversampling_humid_avail;
+ const int n = data->chip_info->num_oversampling_humid_avail;
+
+ for (i = 0; i < n; i++) {
+ if (avail[i] == val) {
+ data->oversampling_humid = ilog2(val);
+
+ return data->chip_info->chip_config(data);
+ }
+ }
+ return -EINVAL;
+}
+
+static int bmp280_write_oversampling_ratio_temp(struct bmp280_data *data,
+ int val)
+{
+ int i;
+ const int *avail = data->chip_info->oversampling_temp_avail;
+ const int n = data->chip_info->num_oversampling_temp_avail;
+
+ for (i = 0; i < n; i++) {
+ if (avail[i] == val) {
+ data->oversampling_temp = ilog2(val);
+
+ return data->chip_info->chip_config(data);
+ }
+ }
+ return -EINVAL;
+}
+
+static int bmp280_write_oversampling_ratio_press(struct bmp280_data *data,
+ int val)
+{
+ int i;
+ const int *avail = data->chip_info->oversampling_press_avail;
+ const int n = data->chip_info->num_oversampling_press_avail;
+
+ for (i = 0; i < n; i++) {
+ if (avail[i] == val) {
+ data->oversampling_press = ilog2(val);
+
+ return data->chip_info->chip_config(data);
+ }
+ }
+ return -EINVAL;
+}
+
+static int bmp280_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ int ret = 0;
+ struct bmp280_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ pm_runtime_get_sync(data->dev);
+ mutex_lock(&data->lock);
+ switch (chan->type) {
+ case IIO_HUMIDITYRELATIVE:
+ ret = bmp280_write_oversampling_ratio_humid(data, val);
+ break;
+ case IIO_PRESSURE:
+ ret = bmp280_write_oversampling_ratio_press(data, val);
+ break;
+ case IIO_TEMP:
+ ret = bmp280_write_oversampling_ratio_temp(data, val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ mutex_unlock(&data->lock);
+ pm_runtime_mark_last_busy(data->dev);
+ pm_runtime_put_autosuspend(data->dev);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static ssize_t bmp280_show_avail(char *buf, const int *vals, const int n)
+{
+ size_t len = 0;
+ int i;
+
+ for (i = 0; i < n; i++)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%d ", vals[i]);
+
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+static ssize_t bmp280_show_temp_oversampling_avail(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bmp280_data *data = iio_priv(dev_to_iio_dev(dev));
+
+ return bmp280_show_avail(buf, data->chip_info->oversampling_temp_avail,
+ data->chip_info->num_oversampling_temp_avail);
+}
+
+static ssize_t bmp280_show_press_oversampling_avail(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bmp280_data *data = iio_priv(dev_to_iio_dev(dev));
+
+ return bmp280_show_avail(buf, data->chip_info->oversampling_press_avail,
+ data->chip_info->num_oversampling_press_avail);
+}
+
+static IIO_DEVICE_ATTR(in_temp_oversampling_ratio_available,
+ S_IRUGO, bmp280_show_temp_oversampling_avail, NULL, 0);
+
+static IIO_DEVICE_ATTR(in_pressure_oversampling_ratio_available,
+ S_IRUGO, bmp280_show_press_oversampling_avail, NULL, 0);
+
+static struct attribute *bmp280_attributes[] = {
+ &iio_dev_attr_in_temp_oversampling_ratio_available.dev_attr.attr,
+ &iio_dev_attr_in_pressure_oversampling_ratio_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group bmp280_attrs_group = {
+ .attrs = bmp280_attributes,
+};
+
+static const struct iio_info bmp280_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &bmp280_read_raw,
+ .write_raw = &bmp280_write_raw,
+ .attrs = &bmp280_attrs_group,
+};
+
+static int bmp280_chip_config(struct bmp280_data *data)
+{
+ int ret;
+ u8 osrs = BMP280_OSRS_TEMP_X(data->oversampling_temp + 1) |
+ BMP280_OSRS_PRESS_X(data->oversampling_press + 1);
+
+ ret = regmap_update_bits(data->regmap, BMP280_REG_CTRL_MEAS,
+ BMP280_OSRS_TEMP_MASK |
+ BMP280_OSRS_PRESS_MASK |
+ BMP280_MODE_MASK,
+ osrs | BMP280_MODE_NORMAL);
+ if (ret < 0) {
+ dev_err(data->dev,
+ "failed to write ctrl_meas register\n");
+ return ret;
+ }
+
+ ret = regmap_update_bits(data->regmap, BMP280_REG_CONFIG,
+ BMP280_FILTER_MASK,
+ BMP280_FILTER_4X);
+ if (ret < 0) {
+ dev_err(data->dev,
+ "failed to write config register\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static const int bmp280_oversampling_avail[] = { 1, 2, 4, 8, 16 };
+
+static const struct bmp280_chip_info bmp280_chip_info = {
+ .oversampling_temp_avail = bmp280_oversampling_avail,
+ .num_oversampling_temp_avail = ARRAY_SIZE(bmp280_oversampling_avail),
+
+ .oversampling_press_avail = bmp280_oversampling_avail,
+ .num_oversampling_press_avail = ARRAY_SIZE(bmp280_oversampling_avail),
+
+ .chip_config = bmp280_chip_config,
+ .read_temp = bmp280_read_temp,
+ .read_press = bmp280_read_press,
+};
+
+static int bme280_chip_config(struct bmp280_data *data)
+{
+ int ret = bmp280_chip_config(data);
+ u8 osrs = BMP280_OSRS_HUMIDITIY_X(data->oversampling_humid + 1);
+
+ if (ret < 0)
+ return ret;
+
+ return regmap_update_bits(data->regmap, BMP280_REG_CTRL_HUMIDITY,
+ BMP280_OSRS_HUMIDITY_MASK, osrs);
+}
+
+static const struct bmp280_chip_info bme280_chip_info = {
+ .oversampling_temp_avail = bmp280_oversampling_avail,
+ .num_oversampling_temp_avail = ARRAY_SIZE(bmp280_oversampling_avail),
+
+ .oversampling_press_avail = bmp280_oversampling_avail,
+ .num_oversampling_press_avail = ARRAY_SIZE(bmp280_oversampling_avail),
+
+ .oversampling_humid_avail = bmp280_oversampling_avail,
+ .num_oversampling_humid_avail = ARRAY_SIZE(bmp280_oversampling_avail),
+
+ .chip_config = bme280_chip_config,
+ .read_temp = bmp280_read_temp,
+ .read_press = bmp280_read_press,
+ .read_humid = bmp280_read_humid,
+};
+
+static int bmp180_measure(struct bmp280_data *data, u8 ctrl_meas)
+{
+ int ret;
+ const int conversion_time_max[] = { 4500, 7500, 13500, 25500 };
+ unsigned int delay_us;
+ unsigned int ctrl;
+
+ if (data->use_eoc)
+ init_completion(&data->done);
+
+ ret = regmap_write(data->regmap, BMP280_REG_CTRL_MEAS, ctrl_meas);
+ if (ret)
+ return ret;
+
+ if (data->use_eoc) {
+ /*
+ * If we have a completion interrupt, use it, wait up to
+ * 100ms. The longest conversion time listed is 76.5 ms for
+ * advanced resolution mode.
+ */
+ ret = wait_for_completion_timeout(&data->done,
+ 1 + msecs_to_jiffies(100));
+ if (!ret)
+ dev_err(data->dev, "timeout waiting for completion\n");
+ } else {
+ if (ctrl_meas == BMP180_MEAS_TEMP)
+ delay_us = 4500;
+ else
+ delay_us =
+ conversion_time_max[data->oversampling_press];
+
+ usleep_range(delay_us, delay_us + 1000);
+ }
+
+ ret = regmap_read(data->regmap, BMP280_REG_CTRL_MEAS, &ctrl);
+ if (ret)
+ return ret;
+
+ /* The value of this bit reset to "0" after conversion is complete */
+ if (ctrl & BMP180_MEAS_SCO)
+ return -EIO;
+
+ return 0;
+}
+
+static int bmp180_read_adc_temp(struct bmp280_data *data, int *val)
+{
+ int ret;
+ __be16 tmp = 0;
+
+ ret = bmp180_measure(data, BMP180_MEAS_TEMP);
+ if (ret)
+ return ret;
+
+ ret = regmap_bulk_read(data->regmap, BMP180_REG_OUT_MSB, (u8 *)&tmp, 2);
+ if (ret)
+ return ret;
+
+ *val = be16_to_cpu(tmp);
+
+ return 0;
+}
+
+static int bmp180_read_calib(struct bmp280_data *data,
+ struct bmp180_calib *calib)
+{
+ int ret;
+ int i;
+ __be16 buf[BMP180_REG_CALIB_COUNT / 2];
+
+ ret = regmap_bulk_read(data->regmap, BMP180_REG_CALIB_START, buf,
+ sizeof(buf));
+
+ if (ret < 0)
+ return ret;
+
+ /* None of the words has the value 0 or 0xFFFF */
+ for (i = 0; i < ARRAY_SIZE(buf); i++) {
+ if (buf[i] == cpu_to_be16(0) || buf[i] == cpu_to_be16(0xffff))
+ return -EIO;
+ }
+
+ /* Toss the calibration data into the entropy pool */
+ add_device_randomness(buf, sizeof(buf));
+
+ calib->AC1 = be16_to_cpu(buf[AC1]);
+ calib->AC2 = be16_to_cpu(buf[AC2]);
+ calib->AC3 = be16_to_cpu(buf[AC3]);
+ calib->AC4 = be16_to_cpu(buf[AC4]);
+ calib->AC5 = be16_to_cpu(buf[AC5]);
+ calib->AC6 = be16_to_cpu(buf[AC6]);
+ calib->B1 = be16_to_cpu(buf[B1]);
+ calib->B2 = be16_to_cpu(buf[B2]);
+ calib->MB = be16_to_cpu(buf[MB]);
+ calib->MC = be16_to_cpu(buf[MC]);
+ calib->MD = be16_to_cpu(buf[MD]);
+
+ return 0;
+}
+
+/*
+ * Returns temperature in DegC, resolution is 0.1 DegC.
+ * t_fine carries fine temperature as global value.
+ *
+ * Taken from datasheet, Section 3.5, "Calculating pressure and temperature".
+ */
+static s32 bmp180_compensate_temp(struct bmp280_data *data, s32 adc_temp)
+{
+ s32 x1, x2;
+ struct bmp180_calib *calib = &data->calib;
+
+ x1 = ((adc_temp - calib->AC6) * calib->AC5) >> 15;
+ x2 = (calib->MC << 11) / (x1 + calib->MD);
+ data->t_fine = x1 + x2;
+
+ return (data->t_fine + 8) >> 4;
+}
+
+static int bmp180_read_temp(struct bmp280_data *data, int *val)
+{
+ int ret;
+ s32 adc_temp, comp_temp;
+
+ ret = bmp180_read_adc_temp(data, &adc_temp);
+ if (ret)
+ return ret;
+
+ comp_temp = bmp180_compensate_temp(data, adc_temp);
+
+ /*
+ * val might be NULL if we're called by the read_press routine,
+ * who only cares about the carry over t_fine value.
+ */
+ if (val) {
+ *val = comp_temp * 100;
+ return IIO_VAL_INT;
+ }
+
+ return 0;
+}
+
+static int bmp180_read_adc_press(struct bmp280_data *data, int *val)
+{
+ int ret;
+ __be32 tmp = 0;
+ u8 oss = data->oversampling_press;
+
+ ret = bmp180_measure(data, BMP180_MEAS_PRESS_X(oss));
+ if (ret)
+ return ret;
+
+ ret = regmap_bulk_read(data->regmap, BMP180_REG_OUT_MSB, (u8 *)&tmp, 3);
+ if (ret)
+ return ret;
+
+ *val = (be32_to_cpu(tmp) >> 8) >> (8 - oss);
+
+ return 0;
+}
+
+/*
+ * Returns pressure in Pa, resolution is 1 Pa.
+ *
+ * Taken from datasheet, Section 3.5, "Calculating pressure and temperature".
+ */
+static u32 bmp180_compensate_press(struct bmp280_data *data, s32 adc_press)
+{
+ s32 x1, x2, x3, p;
+ s32 b3, b6;
+ u32 b4, b7;
+ s32 oss = data->oversampling_press;
+ struct bmp180_calib *calib = &data->calib;
+
+ b6 = data->t_fine - 4000;
+ x1 = (calib->B2 * (b6 * b6 >> 12)) >> 11;
+ x2 = calib->AC2 * b6 >> 11;
+ x3 = x1 + x2;
+ b3 = ((((s32)calib->AC1 * 4 + x3) << oss) + 2) / 4;
+ x1 = calib->AC3 * b6 >> 13;
+ x2 = (calib->B1 * ((b6 * b6) >> 12)) >> 16;
+ x3 = (x1 + x2 + 2) >> 2;
+ b4 = calib->AC4 * (u32)(x3 + 32768) >> 15;
+ b7 = ((u32)adc_press - b3) * (50000 >> oss);
+ if (b7 < 0x80000000)
+ p = (b7 * 2) / b4;
+ else
+ p = (b7 / b4) * 2;
+
+ x1 = (p >> 8) * (p >> 8);
+ x1 = (x1 * 3038) >> 16;
+ x2 = (-7357 * p) >> 16;
+
+ return p + ((x1 + x2 + 3791) >> 4);
+}
+
+static int bmp180_read_press(struct bmp280_data *data,
+ int *val, int *val2)
+{
+ int ret;
+ s32 adc_press;
+ u32 comp_press;
+
+ /* Read and compensate temperature so we get a reading of t_fine. */
+ ret = bmp180_read_temp(data, NULL);
+ if (ret)
+ return ret;
+
+ ret = bmp180_read_adc_press(data, &adc_press);
+ if (ret)
+ return ret;
+
+ comp_press = bmp180_compensate_press(data, adc_press);
+
+ *val = comp_press;
+ *val2 = 1000;
+
+ return IIO_VAL_FRACTIONAL;
+}
+
+static int bmp180_chip_config(struct bmp280_data *data)
+{
+ return 0;
+}
+
+static const int bmp180_oversampling_temp_avail[] = { 1 };
+static const int bmp180_oversampling_press_avail[] = { 1, 2, 4, 8 };
+
+static const struct bmp280_chip_info bmp180_chip_info = {
+ .oversampling_temp_avail = bmp180_oversampling_temp_avail,
+ .num_oversampling_temp_avail =
+ ARRAY_SIZE(bmp180_oversampling_temp_avail),
+
+ .oversampling_press_avail = bmp180_oversampling_press_avail,
+ .num_oversampling_press_avail =
+ ARRAY_SIZE(bmp180_oversampling_press_avail),
+
+ .chip_config = bmp180_chip_config,
+ .read_temp = bmp180_read_temp,
+ .read_press = bmp180_read_press,
+};
+
+static irqreturn_t bmp085_eoc_irq(int irq, void *d)
+{
+ struct bmp280_data *data = d;
+
+ complete(&data->done);
+
+ return IRQ_HANDLED;
+}
+
+static int bmp085_fetch_eoc_irq(struct device *dev,
+ const char *name,
+ int irq,
+ struct bmp280_data *data)
+{
+ unsigned long irq_trig;
+ int ret;
+
+ irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
+ if (irq_trig != IRQF_TRIGGER_RISING) {
+ dev_err(dev, "non-rising trigger given for EOC interrupt, "
+ "trying to enforce it\n");
+ irq_trig = IRQF_TRIGGER_RISING;
+ }
+ ret = devm_request_threaded_irq(dev,
+ irq,
+ bmp085_eoc_irq,
+ NULL,
+ irq_trig,
+ name,
+ data);
+ if (ret) {
+ /* Bail out without IRQ but keep the driver in place */
+ dev_err(dev, "unable to request DRDY IRQ\n");
+ return 0;
+ }
+
+ data->use_eoc = true;
+ return 0;
+}
+
+int bmp280_common_probe(struct device *dev,
+ struct regmap *regmap,
+ unsigned int chip,
+ const char *name,
+ int irq)
+{
+ int ret;
+ struct iio_dev *indio_dev;
+ struct bmp280_data *data;
+ unsigned int chip_id;
+ struct gpio_desc *gpiod;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ mutex_init(&data->lock);
+ data->dev = dev;
+
+ indio_dev->dev.parent = dev;
+ indio_dev->name = name;
+ indio_dev->channels = bmp280_channels;
+ indio_dev->info = &bmp280_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ switch (chip) {
+ case BMP180_CHIP_ID:
+ indio_dev->num_channels = 2;
+ data->chip_info = &bmp180_chip_info;
+ data->oversampling_press = ilog2(8);
+ data->oversampling_temp = ilog2(1);
+ data->start_up_time = 10;
+ break;
+ case BMP280_CHIP_ID:
+ indio_dev->num_channels = 2;
+ data->chip_info = &bmp280_chip_info;
+ data->oversampling_press = ilog2(16);
+ data->oversampling_temp = ilog2(2);
+ data->start_up_time = 2;
+ break;
+ case BME280_CHIP_ID:
+ indio_dev->num_channels = 3;
+ data->chip_info = &bme280_chip_info;
+ data->oversampling_press = ilog2(16);
+ data->oversampling_humid = ilog2(16);
+ data->oversampling_temp = ilog2(2);
+ data->start_up_time = 2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Bring up regulators */
+ data->vddd = devm_regulator_get(dev, "vddd");
+ if (IS_ERR(data->vddd)) {
+ dev_err(dev, "failed to get VDDD regulator\n");
+ return PTR_ERR(data->vddd);
+ }
+ ret = regulator_enable(data->vddd);
+ if (ret) {
+ dev_err(dev, "failed to enable VDDD regulator\n");
+ return ret;
+ }
+ data->vdda = devm_regulator_get(dev, "vdda");
+ if (IS_ERR(data->vdda)) {
+ dev_err(dev, "failed to get VDDA regulator\n");
+ ret = PTR_ERR(data->vdda);
+ goto out_disable_vddd;
+ }
+ ret = regulator_enable(data->vdda);
+ if (ret) {
+ dev_err(dev, "failed to enable VDDA regulator\n");
+ goto out_disable_vddd;
+ }
+ /* Wait to make sure we started up properly */
+ mdelay(data->start_up_time);
+
+ /* Bring chip out of reset if there is an assigned GPIO line */
+ gpiod = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ /* Deassert the signal */
+ if (!IS_ERR(gpiod)) {
+ dev_info(dev, "release reset\n");
+ gpiod_set_value(gpiod, 0);
+ }
+
+ data->regmap = regmap;
+ ret = regmap_read(regmap, BMP280_REG_ID, &chip_id);
+ if (ret < 0)
+ goto out_disable_vdda;
+ if (chip_id != chip) {
+ dev_err(dev, "bad chip id: expected %x got %x\n",
+ chip, chip_id);
+ ret = -EINVAL;
+ goto out_disable_vdda;
+ }
+
+ ret = data->chip_info->chip_config(data);
+ if (ret < 0)
+ goto out_disable_vdda;
+
+ dev_set_drvdata(dev, indio_dev);
+
+ /*
+ * The BMP085 and BMP180 has calibration in an E2PROM, read it out
+ * at probe time. It will not change.
+ */
+ if (chip_id == BMP180_CHIP_ID) {
+ ret = bmp180_read_calib(data, &data->calib);
+ if (ret < 0) {
+ dev_err(data->dev,
+ "failed to read calibration coefficients\n");
+ goto out_disable_vdda;
+ }
+ }
+
+ /*
+ * Attempt to grab an optional EOC IRQ - only the BMP085 has this
+ * however as it happens, the BMP085 shares the chip ID of BMP180
+ * so we look for an IRQ if we have that.
+ */
+ if (irq > 0 || (chip_id == BMP180_CHIP_ID)) {
+ ret = bmp085_fetch_eoc_irq(dev, name, irq, data);
+ if (ret)
+ goto out_disable_vdda;
+ }
+
+ /* Enable runtime PM */
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ /*
+ * Set autosuspend to two orders of magnitude larger than the
+ * start-up time.
+ */
+ pm_runtime_set_autosuspend_delay(dev, data->start_up_time *100);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_put(dev);
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto out_runtime_pm_disable;
+
+
+ return 0;
+
+out_runtime_pm_disable:
+ pm_runtime_get_sync(data->dev);
+ pm_runtime_put_noidle(data->dev);
+ pm_runtime_disable(data->dev);
+out_disable_vdda:
+ regulator_disable(data->vdda);
+out_disable_vddd:
+ regulator_disable(data->vddd);
+ return ret;
+}
+EXPORT_SYMBOL(bmp280_common_probe);
+
+int bmp280_common_remove(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct bmp280_data *data = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ pm_runtime_get_sync(data->dev);
+ pm_runtime_put_noidle(data->dev);
+ pm_runtime_disable(data->dev);
+ regulator_disable(data->vdda);
+ regulator_disable(data->vddd);
+ return 0;
+}
+EXPORT_SYMBOL(bmp280_common_remove);
+
+#ifdef CONFIG_PM
+static int bmp280_runtime_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct bmp280_data *data = iio_priv(indio_dev);
+ int ret;
+
+ ret = regulator_disable(data->vdda);
+ if (ret)
+ return ret;
+ return regulator_disable(data->vddd);
+}
+
+static int bmp280_runtime_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct bmp280_data *data = iio_priv(indio_dev);
+ int ret;
+
+ ret = regulator_enable(data->vddd);
+ if (ret)
+ return ret;
+ ret = regulator_enable(data->vdda);
+ if (ret)
+ return ret;
+ msleep(data->start_up_time);
+ return data->chip_info->chip_config(data);
+}
+#endif /* CONFIG_PM */
+
+const struct dev_pm_ops bmp280_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(bmp280_runtime_suspend,
+ bmp280_runtime_resume, NULL)
+};
+EXPORT_SYMBOL(bmp280_dev_pm_ops);
+
+MODULE_AUTHOR("Vlad Dogaru <vlad.dogaru@intel.com>");
+MODULE_DESCRIPTION("Driver for Bosch Sensortec BMP180/BMP280 pressure and temperature sensor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/pressure/bmp280-i2c.c b/drivers/iio/pressure/bmp280-i2c.c
new file mode 100644
index 000000000..03742b15b
--- /dev/null
+++ b/drivers/iio/pressure/bmp280-i2c.c
@@ -0,0 +1,91 @@
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/acpi.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include "bmp280.h"
+
+static int bmp280_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct regmap *regmap;
+ const struct regmap_config *regmap_config;
+
+ switch (id->driver_data) {
+ case BMP180_CHIP_ID:
+ regmap_config = &bmp180_regmap_config;
+ break;
+ case BMP280_CHIP_ID:
+ case BME280_CHIP_ID:
+ regmap_config = &bmp280_regmap_config;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap = devm_regmap_init_i2c(client, regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "failed to allocate register map\n");
+ return PTR_ERR(regmap);
+ }
+
+ return bmp280_common_probe(&client->dev,
+ regmap,
+ id->driver_data,
+ id->name,
+ client->irq);
+}
+
+static int bmp280_i2c_remove(struct i2c_client *client)
+{
+ return bmp280_common_remove(&client->dev);
+}
+
+static const struct acpi_device_id bmp280_acpi_i2c_match[] = {
+ {"BMP0280", BMP280_CHIP_ID },
+ {"BMP0180", BMP180_CHIP_ID },
+ {"BMP0085", BMP180_CHIP_ID },
+ {"BME0280", BME280_CHIP_ID },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, bmp280_acpi_i2c_match);
+
+#ifdef CONFIG_OF
+static const struct of_device_id bmp280_of_i2c_match[] = {
+ { .compatible = "bosch,bme280", .data = (void *)BME280_CHIP_ID },
+ { .compatible = "bosch,bmp280", .data = (void *)BMP280_CHIP_ID },
+ { .compatible = "bosch,bmp180", .data = (void *)BMP180_CHIP_ID },
+ { .compatible = "bosch,bmp085", .data = (void *)BMP180_CHIP_ID },
+ { },
+};
+MODULE_DEVICE_TABLE(of, bmp280_of_i2c_match);
+#else
+#define bmp280_of_i2c_match NULL
+#endif
+
+static const struct i2c_device_id bmp280_i2c_id[] = {
+ {"bmp280", BMP280_CHIP_ID },
+ {"bmp180", BMP180_CHIP_ID },
+ {"bmp085", BMP180_CHIP_ID },
+ {"bme280", BME280_CHIP_ID },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, bmp280_i2c_id);
+
+static struct i2c_driver bmp280_i2c_driver = {
+ .driver = {
+ .name = "bmp280",
+ .acpi_match_table = ACPI_PTR(bmp280_acpi_i2c_match),
+ .of_match_table = of_match_ptr(bmp280_of_i2c_match),
+ .pm = &bmp280_dev_pm_ops,
+ },
+ .probe = bmp280_i2c_probe,
+ .remove = bmp280_i2c_remove,
+ .id_table = bmp280_i2c_id,
+};
+module_i2c_driver(bmp280_i2c_driver);
+
+MODULE_AUTHOR("Vlad Dogaru <vlad.dogaru@intel.com>");
+MODULE_DESCRIPTION("Driver for Bosch Sensortec BMP180/BMP280 pressure and temperature sensor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/pressure/bmp280-regmap.c b/drivers/iio/pressure/bmp280-regmap.c
new file mode 100644
index 000000000..6807113ec
--- /dev/null
+++ b/drivers/iio/pressure/bmp280-regmap.c
@@ -0,0 +1,84 @@
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "bmp280.h"
+
+static bool bmp180_is_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case BMP280_REG_CTRL_MEAS:
+ case BMP280_REG_RESET:
+ return true;
+ default:
+ return false;
+ };
+}
+
+static bool bmp180_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case BMP180_REG_OUT_XLSB:
+ case BMP180_REG_OUT_LSB:
+ case BMP180_REG_OUT_MSB:
+ case BMP280_REG_CTRL_MEAS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+const struct regmap_config bmp180_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = BMP180_REG_OUT_XLSB,
+ .cache_type = REGCACHE_RBTREE,
+
+ .writeable_reg = bmp180_is_writeable_reg,
+ .volatile_reg = bmp180_is_volatile_reg,
+};
+EXPORT_SYMBOL(bmp180_regmap_config);
+
+static bool bmp280_is_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case BMP280_REG_CONFIG:
+ case BMP280_REG_CTRL_HUMIDITY:
+ case BMP280_REG_CTRL_MEAS:
+ case BMP280_REG_RESET:
+ return true;
+ default:
+ return false;
+ };
+}
+
+static bool bmp280_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case BMP280_REG_HUMIDITY_LSB:
+ case BMP280_REG_HUMIDITY_MSB:
+ case BMP280_REG_TEMP_XLSB:
+ case BMP280_REG_TEMP_LSB:
+ case BMP280_REG_TEMP_MSB:
+ case BMP280_REG_PRESS_XLSB:
+ case BMP280_REG_PRESS_LSB:
+ case BMP280_REG_PRESS_MSB:
+ case BMP280_REG_STATUS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+const struct regmap_config bmp280_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = BMP280_REG_HUMIDITY_LSB,
+ .cache_type = REGCACHE_RBTREE,
+
+ .writeable_reg = bmp280_is_writeable_reg,
+ .volatile_reg = bmp280_is_volatile_reg,
+};
+EXPORT_SYMBOL(bmp280_regmap_config);
diff --git a/drivers/iio/pressure/bmp280-spi.c b/drivers/iio/pressure/bmp280-spi.c
new file mode 100644
index 000000000..17bc95586
--- /dev/null
+++ b/drivers/iio/pressure/bmp280-spi.c
@@ -0,0 +1,125 @@
+/*
+ * SPI interface for the BMP280 driver
+ *
+ * Inspired by the older BMP085 driver drivers/misc/bmp085-spi.c
+ */
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/err.h>
+#include <linux/regmap.h>
+
+#include "bmp280.h"
+
+static int bmp280_regmap_spi_write(void *context, const void *data,
+ size_t count)
+{
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+ u8 buf[2];
+
+ memcpy(buf, data, 2);
+ /*
+ * The SPI register address (= full register address without bit 7) and
+ * the write command (bit7 = RW = '0')
+ */
+ buf[0] &= ~0x80;
+
+ return spi_write_then_read(spi, buf, 2, NULL, 0);
+}
+
+static int bmp280_regmap_spi_read(void *context, const void *reg,
+ size_t reg_size, void *val, size_t val_size)
+{
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+
+ return spi_write_then_read(spi, reg, reg_size, val, val_size);
+}
+
+static struct regmap_bus bmp280_regmap_bus = {
+ .write = bmp280_regmap_spi_write,
+ .read = bmp280_regmap_spi_read,
+ .reg_format_endian_default = REGMAP_ENDIAN_BIG,
+ .val_format_endian_default = REGMAP_ENDIAN_BIG,
+};
+
+static int bmp280_spi_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ struct regmap *regmap;
+ const struct regmap_config *regmap_config;
+ int ret;
+
+ spi->bits_per_word = 8;
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ dev_err(&spi->dev, "spi_setup failed!\n");
+ return ret;
+ }
+
+ switch (id->driver_data) {
+ case BMP180_CHIP_ID:
+ regmap_config = &bmp180_regmap_config;
+ break;
+ case BMP280_CHIP_ID:
+ case BME280_CHIP_ID:
+ regmap_config = &bmp280_regmap_config;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap = devm_regmap_init(&spi->dev,
+ &bmp280_regmap_bus,
+ &spi->dev,
+ regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&spi->dev, "failed to allocate register map\n");
+ return PTR_ERR(regmap);
+ }
+
+ return bmp280_common_probe(&spi->dev,
+ regmap,
+ id->driver_data,
+ id->name,
+ spi->irq);
+}
+
+static int bmp280_spi_remove(struct spi_device *spi)
+{
+ return bmp280_common_remove(&spi->dev);
+}
+
+static const struct of_device_id bmp280_of_spi_match[] = {
+ { .compatible = "bosch,bmp085", },
+ { .compatible = "bosch,bmp180", },
+ { .compatible = "bosch,bmp181", },
+ { .compatible = "bosch,bmp280", },
+ { .compatible = "bosch,bme280", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, bmp280_of_spi_match);
+
+static const struct spi_device_id bmp280_spi_id[] = {
+ { "bmp180", BMP180_CHIP_ID },
+ { "bmp181", BMP180_CHIP_ID },
+ { "bmp280", BMP280_CHIP_ID },
+ { "bme280", BME280_CHIP_ID },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, bmp280_spi_id);
+
+static struct spi_driver bmp280_spi_driver = {
+ .driver = {
+ .name = "bmp280",
+ .of_match_table = bmp280_of_spi_match,
+ .pm = &bmp280_dev_pm_ops,
+ },
+ .id_table = bmp280_spi_id,
+ .probe = bmp280_spi_probe,
+ .remove = bmp280_spi_remove,
+};
+module_spi_driver(bmp280_spi_driver);
+
+MODULE_DESCRIPTION("BMP280 SPI bus driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/pressure/bmp280.h b/drivers/iio/pressure/bmp280.h
new file mode 100644
index 000000000..2c770e13b
--- /dev/null
+++ b/drivers/iio/pressure/bmp280.h
@@ -0,0 +1,112 @@
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/regmap.h>
+
+/* BMP280 specific registers */
+#define BMP280_REG_HUMIDITY_LSB 0xFE
+#define BMP280_REG_HUMIDITY_MSB 0xFD
+#define BMP280_REG_TEMP_XLSB 0xFC
+#define BMP280_REG_TEMP_LSB 0xFB
+#define BMP280_REG_TEMP_MSB 0xFA
+#define BMP280_REG_PRESS_XLSB 0xF9
+#define BMP280_REG_PRESS_LSB 0xF8
+#define BMP280_REG_PRESS_MSB 0xF7
+
+#define BMP280_REG_CONFIG 0xF5
+#define BMP280_REG_CTRL_MEAS 0xF4
+#define BMP280_REG_STATUS 0xF3
+#define BMP280_REG_CTRL_HUMIDITY 0xF2
+
+/* Due to non linear mapping, and data sizes we can't do a bulk read */
+#define BMP280_REG_COMP_H1 0xA1
+#define BMP280_REG_COMP_H2 0xE1
+#define BMP280_REG_COMP_H3 0xE3
+#define BMP280_REG_COMP_H4 0xE4
+#define BMP280_REG_COMP_H5 0xE5
+#define BMP280_REG_COMP_H6 0xE7
+
+#define BMP280_REG_COMP_TEMP_START 0x88
+#define BMP280_COMP_TEMP_REG_COUNT 6
+
+#define BMP280_REG_COMP_PRESS_START 0x8E
+#define BMP280_COMP_PRESS_REG_COUNT 18
+
+#define BMP280_FILTER_MASK (BIT(4) | BIT(3) | BIT(2))
+#define BMP280_FILTER_OFF 0
+#define BMP280_FILTER_2X BIT(2)
+#define BMP280_FILTER_4X BIT(3)
+#define BMP280_FILTER_8X (BIT(3) | BIT(2))
+#define BMP280_FILTER_16X BIT(4)
+
+#define BMP280_OSRS_HUMIDITY_MASK (BIT(2) | BIT(1) | BIT(0))
+#define BMP280_OSRS_HUMIDITIY_X(osrs_h) ((osrs_h) << 0)
+#define BMP280_OSRS_HUMIDITY_SKIP 0
+#define BMP280_OSRS_HUMIDITY_1X BMP280_OSRS_HUMIDITIY_X(1)
+#define BMP280_OSRS_HUMIDITY_2X BMP280_OSRS_HUMIDITIY_X(2)
+#define BMP280_OSRS_HUMIDITY_4X BMP280_OSRS_HUMIDITIY_X(3)
+#define BMP280_OSRS_HUMIDITY_8X BMP280_OSRS_HUMIDITIY_X(4)
+#define BMP280_OSRS_HUMIDITY_16X BMP280_OSRS_HUMIDITIY_X(5)
+
+#define BMP280_OSRS_TEMP_MASK (BIT(7) | BIT(6) | BIT(5))
+#define BMP280_OSRS_TEMP_SKIP 0
+#define BMP280_OSRS_TEMP_X(osrs_t) ((osrs_t) << 5)
+#define BMP280_OSRS_TEMP_1X BMP280_OSRS_TEMP_X(1)
+#define BMP280_OSRS_TEMP_2X BMP280_OSRS_TEMP_X(2)
+#define BMP280_OSRS_TEMP_4X BMP280_OSRS_TEMP_X(3)
+#define BMP280_OSRS_TEMP_8X BMP280_OSRS_TEMP_X(4)
+#define BMP280_OSRS_TEMP_16X BMP280_OSRS_TEMP_X(5)
+
+#define BMP280_OSRS_PRESS_MASK (BIT(4) | BIT(3) | BIT(2))
+#define BMP280_OSRS_PRESS_SKIP 0
+#define BMP280_OSRS_PRESS_X(osrs_p) ((osrs_p) << 2)
+#define BMP280_OSRS_PRESS_1X BMP280_OSRS_PRESS_X(1)
+#define BMP280_OSRS_PRESS_2X BMP280_OSRS_PRESS_X(2)
+#define BMP280_OSRS_PRESS_4X BMP280_OSRS_PRESS_X(3)
+#define BMP280_OSRS_PRESS_8X BMP280_OSRS_PRESS_X(4)
+#define BMP280_OSRS_PRESS_16X BMP280_OSRS_PRESS_X(5)
+
+#define BMP280_MODE_MASK (BIT(1) | BIT(0))
+#define BMP280_MODE_SLEEP 0
+#define BMP280_MODE_FORCED BIT(0)
+#define BMP280_MODE_NORMAL (BIT(1) | BIT(0))
+
+/* BMP180 specific registers */
+#define BMP180_REG_OUT_XLSB 0xF8
+#define BMP180_REG_OUT_LSB 0xF7
+#define BMP180_REG_OUT_MSB 0xF6
+
+#define BMP180_REG_CALIB_START 0xAA
+#define BMP180_REG_CALIB_COUNT 22
+
+#define BMP180_MEAS_SCO BIT(5)
+#define BMP180_MEAS_TEMP (0x0E | BMP180_MEAS_SCO)
+#define BMP180_MEAS_PRESS_X(oss) ((oss) << 6 | 0x14 | BMP180_MEAS_SCO)
+#define BMP180_MEAS_PRESS_1X BMP180_MEAS_PRESS_X(0)
+#define BMP180_MEAS_PRESS_2X BMP180_MEAS_PRESS_X(1)
+#define BMP180_MEAS_PRESS_4X BMP180_MEAS_PRESS_X(2)
+#define BMP180_MEAS_PRESS_8X BMP180_MEAS_PRESS_X(3)
+
+/* BMP180 and BMP280 common registers */
+#define BMP280_REG_CTRL_MEAS 0xF4
+#define BMP280_REG_RESET 0xE0
+#define BMP280_REG_ID 0xD0
+
+#define BMP180_CHIP_ID 0x55
+#define BMP280_CHIP_ID 0x58
+#define BME280_CHIP_ID 0x60
+#define BMP280_SOFT_RESET_VAL 0xB6
+
+/* Regmap configurations */
+extern const struct regmap_config bmp180_regmap_config;
+extern const struct regmap_config bmp280_regmap_config;
+
+/* Probe called from different transports */
+int bmp280_common_probe(struct device *dev,
+ struct regmap *regmap,
+ unsigned int chip,
+ const char *name,
+ int irq);
+int bmp280_common_remove(struct device *dev);
+
+/* PM ops */
+extern const struct dev_pm_ops bmp280_dev_pm_ops;
diff --git a/drivers/iio/pressure/hp206c.c b/drivers/iio/pressure/hp206c.c
index 90f2b6e4a..12f769e86 100644
--- a/drivers/iio/pressure/hp206c.c
+++ b/drivers/iio/pressure/hp206c.c
@@ -401,6 +401,7 @@ static const struct i2c_device_id hp206c_id[] = {
{"hp206c"},
{}
};
+MODULE_DEVICE_TABLE(i2c, hp206c_id);
#ifdef CONFIG_ACPI
static const struct acpi_device_id hp206c_acpi_match[] = {
diff --git a/drivers/iio/pressure/mpl3115.c b/drivers/iio/pressure/mpl3115.c
index 01b2e0b18..6392d7b62 100644
--- a/drivers/iio/pressure/mpl3115.c
+++ b/drivers/iio/pressure/mpl3115.c
@@ -171,7 +171,7 @@ static irqreturn_t mpl3115_trigger_handler(int irq, void *p)
mutex_unlock(&data->lock);
iio_push_to_buffers_with_timestamp(indio_dev, buffer,
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c
index 76578b07b..feb41f82c 100644
--- a/drivers/iio/pressure/ms5611_core.c
+++ b/drivers/iio/pressure/ms5611_core.c
@@ -224,7 +224,8 @@ static irqreturn_t ms5611_trigger_handler(int irq, void *p)
if (ret < 0)
goto err;
- iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns());
+ iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ iio_get_time_ns(indio_dev));
err:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/pressure/ms5637.c b/drivers/iio/pressure/ms5637.c
index e68052c11..953ffbc0e 100644
--- a/drivers/iio/pressure/ms5637.c
+++ b/drivers/iio/pressure/ms5637.c
@@ -1,6 +1,6 @@
/*
- * ms5637.c - Support for Measurement-Specialties ms5637 and ms8607
- * pressure & temperature sensor
+ * ms5637.c - Support for Measurement-Specialties MS5637, MS5805
+ * MS5837 and MS8607 pressure & temperature sensor
*
* Copyright (c) 2015 Measurement-Specialties
*
@@ -11,6 +11,10 @@
* Datasheet:
* http://www.meas-spec.com/downloads/MS5637-02BA03.pdf
* Datasheet:
+ * http://www.meas-spec.com/downloads/MS5805-02BA01.pdf
+ * Datasheet:
+ * http://www.meas-spec.com/downloads/MS5837-30BA.pdf
+ * Datasheet:
* http://www.meas-spec.com/downloads/MS8607-02BA01.pdf
*/
@@ -170,9 +174,12 @@ static int ms5637_probe(struct i2c_client *client,
static const struct i2c_device_id ms5637_id[] = {
{"ms5637", 0},
- {"ms8607-temppressure", 1},
+ {"ms5805", 0},
+ {"ms5837", 0},
+ {"ms8607-temppressure", 0},
{}
};
+MODULE_DEVICE_TABLE(i2c, ms5637_id);
static struct i2c_driver ms5637_driver = {
.probe = ms5637_probe,
diff --git a/drivers/iio/pressure/st_pressure.h b/drivers/iio/pressure/st_pressure.h
index f5f414900..903a21e46 100644
--- a/drivers/iio/pressure/st_pressure.h
+++ b/drivers/iio/pressure/st_pressure.h
@@ -17,6 +17,7 @@
#define LPS001WP_PRESS_DEV_NAME "lps001wp"
#define LPS25H_PRESS_DEV_NAME "lps25h"
#define LPS331AP_PRESS_DEV_NAME "lps331ap"
+#define LPS22HB_PRESS_DEV_NAME "lps22hb"
/**
* struct st_sensors_platform_data - default press platform data
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index 92a118c3c..55df9a75e 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -28,6 +28,72 @@
#include <linux/iio/common/st_sensors.h>
#include "st_pressure.h"
+/*
+ * About determining pressure scaling factors
+ * ------------------------------------------
+ *
+ * Datasheets specify typical pressure sensitivity so that pressure is computed
+ * according to the following equation :
+ * pressure[mBar] = raw / sensitivity
+ * where :
+ * raw the 24 bits long raw sampled pressure
+ * sensitivity a scaling factor specified by the datasheet in LSB/mBar
+ *
+ * IIO ABI expects pressure to be expressed as kPascal, hence pressure should be
+ * computed according to :
+ * pressure[kPascal] = pressure[mBar] / 10
+ * = raw / (sensitivity * 10) (1)
+ *
+ * Finally, st_press_read_raw() returns pressure scaling factor as an
+ * IIO_VAL_INT_PLUS_NANO with a zero integral part and "gain" as decimal part.
+ * Therefore, from (1), "gain" becomes :
+ * gain = 10^9 / (sensitivity * 10)
+ * = 10^8 / sensitivity
+ *
+ * About determining temperature scaling factors and offsets
+ * ---------------------------------------------------------
+ *
+ * Datasheets specify typical temperature sensitivity and offset so that
+ * temperature is computed according to the following equation :
+ * temp[Celsius] = offset[Celsius] + (raw / sensitivity)
+ * where :
+ * raw the 16 bits long raw sampled temperature
+ * offset a constant specified by the datasheet in degree Celsius
+ * (sometimes zero)
+ * sensitivity a scaling factor specified by the datasheet in LSB/Celsius
+ *
+ * IIO ABI expects temperature to be expressed as milli degree Celsius such as
+ * user space should compute temperature according to :
+ * temp[mCelsius] = temp[Celsius] * 10^3
+ * = (offset[Celsius] + (raw / sensitivity)) * 10^3
+ * = ((offset[Celsius] * sensitivity) + raw) *
+ * (10^3 / sensitivity) (2)
+ *
+ * IIO ABI expects user space to apply offset and scaling factors to raw samples
+ * according to :
+ * temp[mCelsius] = (OFFSET + raw) * SCALE
+ * where :
+ * OFFSET an arbitrary constant exposed by device
+ * SCALE an arbitrary scaling factor exposed by device
+ *
+ * Matching OFFSET and SCALE with members of (2) gives :
+ * OFFSET = offset[Celsius] * sensitivity (3)
+ * SCALE = 10^3 / sensitivity (4)
+ *
+ * st_press_read_raw() returns temperature scaling factor as an
+ * IIO_VAL_FRACTIONAL with a 10^3 numerator and "gain2" as denominator.
+ * Therefore, from (3), "gain2" becomes :
+ * gain2 = sensitivity
+ *
+ * When declared within channel, i.e. for a non zero specified offset,
+ * st_press_read_raw() will return the latter as an IIO_VAL_FRACTIONAL such as :
+ * numerator = OFFSET * 10^3
+ * denominator = 10^3
+ * giving from (4):
+ * numerator = offset[Celsius] * 10^3 * sensitivity
+ * = offset[mCelsius] * gain2
+ */
+
#define MCELSIUS_PER_CELSIUS 1000
/* Default pressure sensitivity */
@@ -39,8 +105,6 @@
#define ST_PRESS_LSB_PER_CELSIUS 480UL
#define ST_PRESS_MILLI_CELSIUS_OFFSET 42500UL
-#define ST_PRESS_NUMBER_DATA_CHANNELS 1
-
/* FULLSCALE */
#define ST_PRESS_FS_AVL_1100MB 1100
#define ST_PRESS_FS_AVL_1260MB 1260
@@ -48,7 +112,11 @@
#define ST_PRESS_1_OUT_XL_ADDR 0x28
#define ST_TEMP_1_OUT_L_ADDR 0x2b
-/* CUSTOM VALUES FOR LPS331AP SENSOR */
+/*
+ * CUSTOM VALUES FOR LPS331AP SENSOR
+ * See LPS331AP datasheet:
+ * http://www2.st.com/resource/en/datasheet/lps331ap.pdf
+ */
#define ST_PRESS_LPS331AP_WAI_EXP 0xbb
#define ST_PRESS_LPS331AP_ODR_ADDR 0x20
#define ST_PRESS_LPS331AP_ODR_MASK 0x70
@@ -71,7 +139,9 @@
#define ST_PRESS_LPS331AP_OD_IRQ_MASK 0x40
#define ST_PRESS_LPS331AP_MULTIREAD_BIT true
-/* CUSTOM VALUES FOR LPS001WP SENSOR */
+/*
+ * CUSTOM VALUES FOR THE OBSOLETE LPS001WP SENSOR
+ */
/* LPS001WP pressure resolution */
#define ST_PRESS_LPS001WP_LSB_PER_MBAR 16UL
@@ -94,7 +164,11 @@
#define ST_PRESS_LPS001WP_OUT_L_ADDR 0x28
#define ST_TEMP_LPS001WP_OUT_L_ADDR 0x2a
-/* CUSTOM VALUES FOR LPS25H SENSOR */
+/*
+ * CUSTOM VALUES FOR LPS25H SENSOR
+ * See LPS25H datasheet:
+ * http://www2.st.com/resource/en/datasheet/lps25h.pdf
+ */
#define ST_PRESS_LPS25H_WAI_EXP 0xbd
#define ST_PRESS_LPS25H_ODR_ADDR 0x20
#define ST_PRESS_LPS25H_ODR_MASK 0x70
@@ -117,27 +191,54 @@
#define ST_PRESS_LPS25H_OUT_XL_ADDR 0x28
#define ST_TEMP_LPS25H_OUT_L_ADDR 0x2b
+/*
+ * CUSTOM VALUES FOR LPS22HB SENSOR
+ * See LPS22HB datasheet:
+ * http://www2.st.com/resource/en/datasheet/lps22hb.pdf
+ */
+
+/* LPS22HB temperature sensitivity */
+#define ST_PRESS_LPS22HB_LSB_PER_CELSIUS 100UL
+
+#define ST_PRESS_LPS22HB_WAI_EXP 0xb1
+#define ST_PRESS_LPS22HB_ODR_ADDR 0x10
+#define ST_PRESS_LPS22HB_ODR_MASK 0x70
+#define ST_PRESS_LPS22HB_ODR_AVL_1HZ_VAL 0x01
+#define ST_PRESS_LPS22HB_ODR_AVL_10HZ_VAL 0x02
+#define ST_PRESS_LPS22HB_ODR_AVL_25HZ_VAL 0x03
+#define ST_PRESS_LPS22HB_ODR_AVL_50HZ_VAL 0x04
+#define ST_PRESS_LPS22HB_ODR_AVL_75HZ_VAL 0x05
+#define ST_PRESS_LPS22HB_PW_ADDR 0x10
+#define ST_PRESS_LPS22HB_PW_MASK 0x70
+#define ST_PRESS_LPS22HB_BDU_ADDR 0x10
+#define ST_PRESS_LPS22HB_BDU_MASK 0x02
+#define ST_PRESS_LPS22HB_DRDY_IRQ_ADDR 0x12
+#define ST_PRESS_LPS22HB_DRDY_IRQ_INT1_MASK 0x04
+#define ST_PRESS_LPS22HB_DRDY_IRQ_INT2_MASK 0x08
+#define ST_PRESS_LPS22HB_IHL_IRQ_ADDR 0x12
+#define ST_PRESS_LPS22HB_IHL_IRQ_MASK 0x80
+#define ST_PRESS_LPS22HB_OD_IRQ_ADDR 0x12
+#define ST_PRESS_LPS22HB_OD_IRQ_MASK 0x40
+#define ST_PRESS_LPS22HB_MULTIREAD_BIT true
+
static const struct iio_chan_spec st_press_1_channels[] = {
{
.type = IIO_PRESSURE,
- .channel2 = IIO_NO_MOD,
.address = ST_PRESS_1_OUT_XL_ADDR,
- .scan_index = ST_SENSORS_SCAN_X,
+ .scan_index = 0,
.scan_type = {
.sign = 'u',
.realbits = 24,
- .storagebits = 24,
+ .storagebits = 32,
.endianness = IIO_LE,
},
.info_mask_separate =
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
- .modified = 0,
},
{
.type = IIO_TEMP,
- .channel2 = IIO_NO_MOD,
.address = ST_TEMP_1_OUT_L_ADDR,
- .scan_index = -1,
+ .scan_index = 1,
.scan_type = {
.sign = 'u',
.realbits = 16,
@@ -148,17 +249,15 @@ static const struct iio_chan_spec st_press_1_channels[] = {
BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_OFFSET),
- .modified = 0,
},
- IIO_CHAN_SOFT_TIMESTAMP(1)
+ IIO_CHAN_SOFT_TIMESTAMP(2)
};
static const struct iio_chan_spec st_press_lps001wp_channels[] = {
{
.type = IIO_PRESSURE,
- .channel2 = IIO_NO_MOD,
.address = ST_PRESS_LPS001WP_OUT_L_ADDR,
- .scan_index = ST_SENSORS_SCAN_X,
+ .scan_index = 0,
.scan_type = {
.sign = 'u',
.realbits = 16,
@@ -168,13 +267,11 @@ static const struct iio_chan_spec st_press_lps001wp_channels[] = {
.info_mask_separate =
BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_SCALE),
- .modified = 0,
},
{
.type = IIO_TEMP,
- .channel2 = IIO_NO_MOD,
.address = ST_TEMP_LPS001WP_OUT_L_ADDR,
- .scan_index = -1,
+ .scan_index = 1,
.scan_type = {
.sign = 'u',
.realbits = 16,
@@ -184,9 +281,42 @@ static const struct iio_chan_spec st_press_lps001wp_channels[] = {
.info_mask_separate =
BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_SCALE),
- .modified = 0,
},
- IIO_CHAN_SOFT_TIMESTAMP(1)
+ IIO_CHAN_SOFT_TIMESTAMP(2)
+};
+
+static const struct iio_chan_spec st_press_lps22hb_channels[] = {
+ {
+ .type = IIO_PRESSURE,
+ .address = ST_PRESS_1_OUT_XL_ADDR,
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 24,
+ .storagebits = 32,
+ .endianness = IIO_LE,
+ },
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ },
+ {
+ .type = IIO_TEMP,
+ .address = ST_TEMP_1_OUT_L_ADDR,
+ .scan_index = 1,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_LE,
+ },
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(2)
};
static const struct st_sensor_settings st_press_sensors_settings[] = {
@@ -346,6 +476,59 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
.multi_read_bit = ST_PRESS_LPS25H_MULTIREAD_BIT,
.bootime = 2,
},
+ {
+ .wai = ST_PRESS_LPS22HB_WAI_EXP,
+ .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
+ .sensors_supported = {
+ [0] = LPS22HB_PRESS_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_press_lps22hb_channels,
+ .num_ch = ARRAY_SIZE(st_press_lps22hb_channels),
+ .odr = {
+ .addr = ST_PRESS_LPS22HB_ODR_ADDR,
+ .mask = ST_PRESS_LPS22HB_ODR_MASK,
+ .odr_avl = {
+ { 1, ST_PRESS_LPS22HB_ODR_AVL_1HZ_VAL, },
+ { 10, ST_PRESS_LPS22HB_ODR_AVL_10HZ_VAL, },
+ { 25, ST_PRESS_LPS22HB_ODR_AVL_25HZ_VAL, },
+ { 50, ST_PRESS_LPS22HB_ODR_AVL_50HZ_VAL, },
+ { 75, ST_PRESS_LPS22HB_ODR_AVL_75HZ_VAL, },
+ },
+ },
+ .pw = {
+ .addr = ST_PRESS_LPS22HB_PW_ADDR,
+ .mask = ST_PRESS_LPS22HB_PW_MASK,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .fs = {
+ .fs_avl = {
+ /*
+ * Pressure and temperature sensitivity values
+ * as defined in table 3 of LPS22HB datasheet.
+ */
+ [0] = {
+ .num = ST_PRESS_FS_AVL_1260MB,
+ .gain = ST_PRESS_KPASCAL_NANO_SCALE,
+ .gain2 = ST_PRESS_LPS22HB_LSB_PER_CELSIUS,
+ },
+ },
+ },
+ .bdu = {
+ .addr = ST_PRESS_LPS22HB_BDU_ADDR,
+ .mask = ST_PRESS_LPS22HB_BDU_MASK,
+ },
+ .drdy_irq = {
+ .addr = ST_PRESS_LPS22HB_DRDY_IRQ_ADDR,
+ .mask_int1 = ST_PRESS_LPS22HB_DRDY_IRQ_INT1_MASK,
+ .mask_int2 = ST_PRESS_LPS22HB_DRDY_IRQ_INT2_MASK,
+ .addr_ihl = ST_PRESS_LPS22HB_IHL_IRQ_ADDR,
+ .mask_ihl = ST_PRESS_LPS22HB_IHL_IRQ_MASK,
+ .addr_od = ST_PRESS_LPS22HB_OD_IRQ_ADDR,
+ .mask_od = ST_PRESS_LPS22HB_OD_IRQ_MASK,
+ .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ },
+ .multi_read_bit = ST_PRESS_LPS22HB_MULTIREAD_BIT,
+ },
};
static int st_press_write_raw(struct iio_dev *indio_dev,
@@ -462,23 +645,30 @@ int st_press_common_probe(struct iio_dev *indio_dev)
indio_dev->info = &press_info;
mutex_init(&press_data->tb.buf_lock);
- st_sensors_power_enable(indio_dev);
+ err = st_sensors_power_enable(indio_dev);
+ if (err)
+ return err;
err = st_sensors_check_device_support(indio_dev,
ARRAY_SIZE(st_press_sensors_settings),
st_press_sensors_settings);
if (err < 0)
- return err;
-
- press_data->num_data_channels = ST_PRESS_NUMBER_DATA_CHANNELS;
+ goto st_press_power_off;
+
+ /*
+ * Skip timestamping channel while declaring available channels to
+ * common st_sensor layer. Look at st_sensors_get_buffer_element() to
+ * see how timestamps are explicitly pushed as last samples block
+ * element.
+ */
+ press_data->num_data_channels = press_data->sensor_settings->num_ch - 1;
press_data->multiread_bit = press_data->sensor_settings->multi_read_bit;
indio_dev->channels = press_data->sensor_settings->ch;
indio_dev->num_channels = press_data->sensor_settings->num_ch;
- if (press_data->sensor_settings->fs.addr != 0)
- press_data->current_fullscale =
- (struct st_sensor_fullscale_avl *)
- &press_data->sensor_settings->fs.fs_avl[0];
+ press_data->current_fullscale =
+ (struct st_sensor_fullscale_avl *)
+ &press_data->sensor_settings->fs.fs_avl[0];
press_data->odr = press_data->sensor_settings->odr.odr_avl[0].hz;
@@ -490,11 +680,11 @@ int st_press_common_probe(struct iio_dev *indio_dev)
err = st_sensors_init_sensor(indio_dev, press_data->dev->platform_data);
if (err < 0)
- return err;
+ goto st_press_power_off;
err = st_press_allocate_ring(indio_dev);
if (err < 0)
- return err;
+ goto st_press_power_off;
if (irq > 0) {
err = st_sensors_allocate_trigger(indio_dev,
@@ -517,6 +707,8 @@ st_press_device_register_error:
st_sensors_deallocate_trigger(indio_dev);
st_press_probe_trigger_error:
st_press_deallocate_ring(indio_dev);
+st_press_power_off:
+ st_sensors_power_disable(indio_dev);
return err;
}
diff --git a/drivers/iio/pressure/st_pressure_i2c.c b/drivers/iio/pressure/st_pressure_i2c.c
index 8fcf9766e..ed18701c6 100644
--- a/drivers/iio/pressure/st_pressure_i2c.c
+++ b/drivers/iio/pressure/st_pressure_i2c.c
@@ -32,6 +32,10 @@ static const struct of_device_id st_press_of_match[] = {
.compatible = "st,lps331ap-press",
.data = LPS331AP_PRESS_DEV_NAME,
},
+ {
+ .compatible = "st,lps22hb-press",
+ .data = LPS22HB_PRESS_DEV_NAME,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_press_of_match);
diff --git a/drivers/iio/pressure/st_pressure_spi.c b/drivers/iio/pressure/st_pressure_spi.c
index 40c0692ff..550508025 100644
--- a/drivers/iio/pressure/st_pressure_spi.c
+++ b/drivers/iio/pressure/st_pressure_spi.c
@@ -50,6 +50,7 @@ static const struct spi_device_id st_press_id_table[] = {
{ LPS001WP_PRESS_DEV_NAME },
{ LPS25H_PRESS_DEV_NAME },
{ LPS331AP_PRESS_DEV_NAME },
+ { LPS22HB_PRESS_DEV_NAME },
{},
};
MODULE_DEVICE_TABLE(spi, st_press_id_table);
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index a0aedda7d..5656deb17 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -231,10 +231,16 @@ static void as3935_event_work(struct work_struct *work)
{
struct as3935_state *st;
int val;
+ int ret;
st = container_of(work, struct as3935_state, work.work);
- as3935_read(st, AS3935_INT, &val);
+ ret = as3935_read(st, AS3935_INT, &val);
+ if (ret) {
+ dev_warn(&st->spi->dev, "read error\n");
+ return;
+ }
+
val &= AS3935_INT_MASK;
switch (val) {
@@ -242,7 +248,7 @@ static void as3935_event_work(struct work_struct *work)
iio_trigger_poll(st->trig);
break;
case AS3935_NOISE_INT:
- dev_warn(&st->spi->dev, "noise level is too high");
+ dev_warn(&st->spi->dev, "noise level is too high\n");
break;
}
}
@@ -346,7 +352,6 @@ static int as3935_probe(struct spi_device *spi)
st = iio_priv(indio_dev);
st->spi = spi;
- st->tune_cap = 0;
spi_set_drvdata(spi, indio_dev);
mutex_init(&st->lock);
@@ -468,4 +473,3 @@ module_spi_driver(as3935_driver);
MODULE_AUTHOR("Matt Ranostay <mranostay@gmail.com>");
MODULE_DESCRIPTION("AS3935 lightning sensor");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("spi:as3935");
diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
index 4f502386a..3141c3c16 100644
--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
@@ -203,22 +203,19 @@ static int lidar_read_raw(struct iio_dev *indio_dev,
struct lidar_data *data = iio_priv(indio_dev);
int ret = -EINVAL;
- mutex_lock(&indio_dev->mlock);
-
- if (iio_buffer_enabled(indio_dev) && mask == IIO_CHAN_INFO_RAW) {
- ret = -EBUSY;
- goto error_busy;
- }
-
switch (mask) {
case IIO_CHAN_INFO_RAW: {
u16 reg;
+ if (iio_device_claim_direct_mode(indio_dev))
+ return -EBUSY;
+
ret = lidar_get_measurement(data, &reg);
if (!ret) {
*val = reg;
ret = IIO_VAL_INT;
}
+ iio_device_release_direct_mode(indio_dev);
break;
}
case IIO_CHAN_INFO_SCALE:
@@ -228,9 +225,6 @@ static int lidar_read_raw(struct iio_dev *indio_dev,
break;
}
-error_busy:
- mutex_unlock(&indio_dev->mlock);
-
return ret;
}
@@ -244,7 +238,7 @@ static irqreturn_t lidar_trigger_handler(int irq, void *private)
ret = lidar_get_measurement(data, data->buffer);
if (!ret) {
iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
} else if (ret != -EINVAL) {
dev_err(&data->client->dev, "cannot read LIDAR measurement");
}
diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c
index 66cd09a18..1d74b3aaf 100644
--- a/drivers/iio/proximity/sx9500.c
+++ b/drivers/iio/proximity/sx9500.c
@@ -492,7 +492,7 @@ static void sx9500_push_events(struct iio_dev *indio_dev)
dir = new_prox ? IIO_EV_DIR_FALLING : IIO_EV_DIR_RISING;
ev = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, chan,
IIO_EV_TYPE_THRESH, dir);
- iio_push_event(indio_dev, ev, iio_get_time_ns());
+ iio_push_event(indio_dev, ev, iio_get_time_ns(indio_dev));
data->prox_stat[chan] = new_prox;
}
}
@@ -669,7 +669,7 @@ static irqreturn_t sx9500_trigger_handler(int irq, void *private)
}
iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
out:
mutex_unlock(&data->mutex);
diff --git a/drivers/iio/temperature/tsys02d.c b/drivers/iio/temperature/tsys02d.c
index ab6fe8f6f..c0a19a000 100644
--- a/drivers/iio/temperature/tsys02d.c
+++ b/drivers/iio/temperature/tsys02d.c
@@ -174,6 +174,7 @@ static const struct i2c_device_id tsys02d_id[] = {
{"tsys02d", 0},
{}
};
+MODULE_DEVICE_TABLE(i2c, tsys02d_id);
static struct i2c_driver tsys02d_driver = {
.probe = tsys02d_probe,
diff --git a/drivers/iio/trigger/Kconfig b/drivers/iio/trigger/Kconfig
index 519e6772f..809b2e7d5 100644
--- a/drivers/iio/trigger/Kconfig
+++ b/drivers/iio/trigger/Kconfig
@@ -24,6 +24,18 @@ config IIO_INTERRUPT_TRIGGER
To compile this driver as a module, choose M here: the
module will be called iio-trig-interrupt.
+config IIO_TIGHTLOOP_TRIGGER
+ tristate "A kthread based hammering loop trigger"
+ depends on IIO_SW_TRIGGER
+ help
+ An experimental trigger, used to allow sensors to be sampled as fast
+ as possible under the limitations of whatever else is going on.
+ Uses a tight loop in a kthread. Will only work with lower half only
+ trigger consumers.
+
+ To compile this driver as a module, choose M here: the
+ module will be called iio-trig-loop.
+
config IIO_SYSFS_TRIGGER
tristate "SYSFS trigger"
depends on SYSFS
diff --git a/drivers/iio/trigger/Makefile b/drivers/iio/trigger/Makefile
index fe06eb564..aab4dc233 100644
--- a/drivers/iio/trigger/Makefile
+++ b/drivers/iio/trigger/Makefile
@@ -7,3 +7,4 @@
obj-$(CONFIG_IIO_HRTIMER_TRIGGER) += iio-trig-hrtimer.o
obj-$(CONFIG_IIO_INTERRUPT_TRIGGER) += iio-trig-interrupt.o
obj-$(CONFIG_IIO_SYSFS_TRIGGER) += iio-trig-sysfs.o
+obj-$(CONFIG_IIO_TIGHTLOOP_TRIGGER) += iio-trig-loop.o
diff --git a/drivers/iio/trigger/iio-trig-loop.c b/drivers/iio/trigger/iio-trig-loop.c
new file mode 100644
index 000000000..dc6be28f9
--- /dev/null
+++ b/drivers/iio/trigger/iio-trig-loop.c
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2016 Jonathan Cameron <jic23@kernel.org>
+ *
+ * Licensed under the GPL-2.
+ *
+ * Based on a mashup of the hrtimer trigger and continuous sampling proposal of
+ * Gregor Boirie <gregor.boirie@parrot.com>
+ *
+ * Note this is still rather experimental and may eat babies.
+ *
+ * Todo
+ * * Protect against connection of devices that 'need' the top half
+ * handler.
+ * * Work out how to run top half handlers in this context if it is
+ * safe to do so (timestamp grabbing for example)
+ *
+ * Tested against a max1363. Used about 33% cpu for the thread and 20%
+ * for generic_buffer piping to /dev/null. Watermark set at 64 on a 128
+ * element kfifo buffer.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/irq_work.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/sw_trigger.h>
+
+struct iio_loop_info {
+ struct iio_sw_trigger swt;
+ struct task_struct *task;
+};
+
+static struct config_item_type iio_loop_type = {
+ .ct_owner = THIS_MODULE,
+};
+
+static int iio_loop_thread(void *data)
+{
+ struct iio_trigger *trig = data;
+
+ set_freezable();
+
+ do {
+ iio_trigger_poll_chained(trig);
+ } while (likely(!kthread_freezable_should_stop(NULL)));
+
+ return 0;
+}
+
+static int iio_loop_trigger_set_state(struct iio_trigger *trig, bool state)
+{
+ struct iio_loop_info *loop_trig = iio_trigger_get_drvdata(trig);
+
+ if (state) {
+ loop_trig->task = kthread_run(iio_loop_thread,
+ trig, trig->name);
+ if (unlikely(IS_ERR(loop_trig->task))) {
+ dev_err(&trig->dev,
+ "failed to create trigger loop thread\n");
+ return PTR_ERR(loop_trig->task);
+ }
+ } else {
+ kthread_stop(loop_trig->task);
+ }
+
+ return 0;
+}
+
+static const struct iio_trigger_ops iio_loop_trigger_ops = {
+ .set_trigger_state = iio_loop_trigger_set_state,
+ .owner = THIS_MODULE,
+};
+
+static struct iio_sw_trigger *iio_trig_loop_probe(const char *name)
+{
+ struct iio_loop_info *trig_info;
+ int ret;
+
+ trig_info = kzalloc(sizeof(*trig_info), GFP_KERNEL);
+ if (!trig_info)
+ return ERR_PTR(-ENOMEM);
+
+ trig_info->swt.trigger = iio_trigger_alloc("%s", name);
+ if (!trig_info->swt.trigger) {
+ ret = -ENOMEM;
+ goto err_free_trig_info;
+ }
+
+ iio_trigger_set_drvdata(trig_info->swt.trigger, trig_info);
+ trig_info->swt.trigger->ops = &iio_loop_trigger_ops;
+
+ ret = iio_trigger_register(trig_info->swt.trigger);
+ if (ret)
+ goto err_free_trigger;
+
+ iio_swt_group_init_type_name(&trig_info->swt, name, &iio_loop_type);
+
+ return &trig_info->swt;
+
+err_free_trigger:
+ iio_trigger_free(trig_info->swt.trigger);
+err_free_trig_info:
+ kfree(trig_info);
+
+ return ERR_PTR(ret);
+}
+
+static int iio_trig_loop_remove(struct iio_sw_trigger *swt)
+{
+ struct iio_loop_info *trig_info;
+
+ trig_info = iio_trigger_get_drvdata(swt->trigger);
+
+ iio_trigger_unregister(swt->trigger);
+ iio_trigger_free(swt->trigger);
+ kfree(trig_info);
+
+ return 0;
+}
+
+static const struct iio_sw_trigger_ops iio_trig_loop_ops = {
+ .probe = iio_trig_loop_probe,
+ .remove = iio_trig_loop_remove,
+};
+
+static struct iio_sw_trigger_type iio_trig_loop = {
+ .name = "loop",
+ .owner = THIS_MODULE,
+ .ops = &iio_trig_loop_ops,
+};
+
+module_iio_sw_trigger_driver(iio_trig_loop);
+
+MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>");
+MODULE_DESCRIPTION("Loop based trigger for the iio subsystem");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:iio-trig-loop");
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 2137adfbd..e9b7dc037 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -84,6 +84,7 @@ source "drivers/infiniband/ulp/iser/Kconfig"
source "drivers/infiniband/ulp/isert/Kconfig"
source "drivers/infiniband/sw/rdmavt/Kconfig"
+source "drivers/infiniband/sw/rxe/Kconfig"
source "drivers/infiniband/hw/hfi1/Kconfig"
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index ad1b1adcf..5f65a78b2 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -68,6 +68,7 @@ MODULE_DESCRIPTION("Generic RDMA CM Agent");
MODULE_LICENSE("Dual BSD/GPL");
#define CMA_CM_RESPONSE_TIMEOUT 20
+#define CMA_QUERY_CLASSPORT_INFO_TIMEOUT 3000
#define CMA_MAX_CM_RETRIES 15
#define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
#define CMA_IBOE_PACKET_LIFETIME 18
@@ -162,6 +163,14 @@ struct rdma_bind_list {
unsigned short port;
};
+struct class_port_info_context {
+ struct ib_class_port_info *class_port_info;
+ struct ib_device *device;
+ struct completion done;
+ struct ib_sa_query *sa_query;
+ u8 port_num;
+};
+
static int cma_ps_alloc(struct net *net, enum rdma_port_space ps,
struct rdma_bind_list *bind_list, int snum)
{
@@ -306,6 +315,7 @@ struct cma_multicast {
struct sockaddr_storage addr;
struct kref mcref;
bool igmp_joined;
+ u8 join_state;
};
struct cma_work {
@@ -2452,18 +2462,24 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
if (addr->dev_addr.bound_dev_if) {
ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if);
- if (!ndev)
- return -ENODEV;
+ if (!ndev) {
+ ret = -ENODEV;
+ goto err2;
+ }
if (ndev->flags & IFF_LOOPBACK) {
dev_put(ndev);
- if (!id_priv->id.device->get_netdev)
- return -EOPNOTSUPP;
+ if (!id_priv->id.device->get_netdev) {
+ ret = -EOPNOTSUPP;
+ goto err2;
+ }
ndev = id_priv->id.device->get_netdev(id_priv->id.device,
id_priv->id.port_num);
- if (!ndev)
- return -ENODEV;
+ if (!ndev) {
+ ret = -ENODEV;
+ goto err2;
+ }
}
route->path_rec->net = &init_net;
@@ -3752,10 +3768,63 @@ static void cma_set_mgid(struct rdma_id_private *id_priv,
}
}
+static void cma_query_sa_classport_info_cb(int status,
+ struct ib_class_port_info *rec,
+ void *context)
+{
+ struct class_port_info_context *cb_ctx = context;
+
+ WARN_ON(!context);
+
+ if (status || !rec) {
+ pr_debug("RDMA CM: %s port %u failed query ClassPortInfo status: %d\n",
+ cb_ctx->device->name, cb_ctx->port_num, status);
+ goto out;
+ }
+
+ memcpy(cb_ctx->class_port_info, rec, sizeof(struct ib_class_port_info));
+
+out:
+ complete(&cb_ctx->done);
+}
+
+static int cma_query_sa_classport_info(struct ib_device *device, u8 port_num,
+ struct ib_class_port_info *class_port_info)
+{
+ struct class_port_info_context *cb_ctx;
+ int ret;
+
+ cb_ctx = kmalloc(sizeof(*cb_ctx), GFP_KERNEL);
+ if (!cb_ctx)
+ return -ENOMEM;
+
+ cb_ctx->device = device;
+ cb_ctx->class_port_info = class_port_info;
+ cb_ctx->port_num = port_num;
+ init_completion(&cb_ctx->done);
+
+ ret = ib_sa_classport_info_rec_query(&sa_client, device, port_num,
+ CMA_QUERY_CLASSPORT_INFO_TIMEOUT,
+ GFP_KERNEL, cma_query_sa_classport_info_cb,
+ cb_ctx, &cb_ctx->sa_query);
+ if (ret < 0) {
+ pr_err("RDMA CM: %s port %u failed to send ClassPortInfo query, ret: %d\n",
+ device->name, port_num, ret);
+ goto out;
+ }
+
+ wait_for_completion(&cb_ctx->done);
+
+out:
+ kfree(cb_ctx);
+ return ret;
+}
+
static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
struct cma_multicast *mc)
{
struct ib_sa_mcmember_rec rec;
+ struct ib_class_port_info class_port_info;
struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
ib_sa_comp_mask comp_mask;
int ret;
@@ -3774,7 +3843,24 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
rec.qkey = cpu_to_be32(id_priv->qkey);
rdma_addr_get_sgid(dev_addr, &rec.port_gid);
rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
- rec.join_state = 1;
+ rec.join_state = mc->join_state;
+
+ if (rec.join_state == BIT(SENDONLY_FULLMEMBER_JOIN)) {
+ ret = cma_query_sa_classport_info(id_priv->id.device,
+ id_priv->id.port_num,
+ &class_port_info);
+
+ if (ret)
+ return ret;
+
+ if (!(ib_get_cpi_capmask2(&class_port_info) &
+ IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT)) {
+ pr_warn("RDMA CM: %s port %u Unable to multicast join\n"
+ "RDMA CM: SM doesn't support Send Only Full Member option\n",
+ id_priv->id.device->name, id_priv->id.port_num);
+ return -EOPNOTSUPP;
+ }
+ }
comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID |
IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE |
@@ -3843,6 +3929,9 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
struct sockaddr *addr = (struct sockaddr *)&mc->addr;
struct net_device *ndev = NULL;
enum ib_gid_type gid_type;
+ bool send_only;
+
+ send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN);
if (cma_zero_addr((struct sockaddr *)&mc->addr))
return -EINVAL;
@@ -3878,10 +3967,12 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
if (addr->sa_family == AF_INET) {
if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
- err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid,
- true);
- if (!err)
- mc->igmp_joined = true;
+ if (!send_only) {
+ err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid,
+ true);
+ if (!err)
+ mc->igmp_joined = true;
+ }
}
} else {
if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
@@ -3911,7 +4002,7 @@ out1:
}
int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
- void *context)
+ u8 join_state, void *context)
{
struct rdma_id_private *id_priv;
struct cma_multicast *mc;
@@ -3930,6 +4021,7 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
mc->context = context;
mc->id_priv = id_priv;
mc->igmp_joined = false;
+ mc->join_state = join_state;
spin_lock(&id_priv->lock);
list_add(&mc->list, &id_priv->mc_list);
spin_unlock(&id_priv->lock);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 5c155fa91..760ef603a 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -311,6 +311,15 @@ static int read_port_immutable(struct ib_device *device)
return 0;
}
+void ib_get_device_fw_str(struct ib_device *dev, char *str, size_t str_len)
+{
+ if (dev->get_dev_fw_str)
+ dev->get_dev_fw_str(dev, str, str_len);
+ else
+ str[0] = '\0';
+}
+EXPORT_SYMBOL(ib_get_device_fw_str);
+
/**
* ib_register_device - Register an IB device with IB core
* @device:Device to register
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index f0572049d..357624f8b 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -183,15 +183,14 @@ static void free_cm_id(struct iwcm_id_private *cm_id_priv)
/*
* Release a reference on cm_id. If the last reference is being
- * released, enable the waiting thread (in iw_destroy_cm_id) to
- * get woken up, and return 1 if a thread is already waiting.
+ * released, free the cm_id and return 1.
*/
static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv)
{
BUG_ON(atomic_read(&cm_id_priv->refcount)==0);
if (atomic_dec_and_test(&cm_id_priv->refcount)) {
BUG_ON(!list_empty(&cm_id_priv->work_list));
- complete(&cm_id_priv->destroy_comp);
+ free_cm_id(cm_id_priv);
return 1;
}
@@ -208,19 +207,10 @@ static void add_ref(struct iw_cm_id *cm_id)
static void rem_ref(struct iw_cm_id *cm_id)
{
struct iwcm_id_private *cm_id_priv;
- int cb_destroy;
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
- /*
- * Test bit before deref in case the cm_id gets freed on another
- * thread.
- */
- cb_destroy = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
- if (iwcm_deref_id(cm_id_priv) && cb_destroy) {
- BUG_ON(!list_empty(&cm_id_priv->work_list));
- free_cm_id(cm_id_priv);
- }
+ (void)iwcm_deref_id(cm_id_priv);
}
static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event);
@@ -370,6 +360,12 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
wait_event(cm_id_priv->connect_wait,
!test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags));
+ /*
+ * Since we're deleting the cm_id, drop any events that
+ * might arrive before the last dereference.
+ */
+ set_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags);
+
spin_lock_irqsave(&cm_id_priv->lock, flags);
switch (cm_id_priv->state) {
case IW_CM_STATE_LISTEN:
@@ -433,13 +429,7 @@ void iw_destroy_cm_id(struct iw_cm_id *cm_id)
struct iwcm_id_private *cm_id_priv;
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
- BUG_ON(test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags));
-
destroy_cm_id(cm_id);
-
- wait_for_completion(&cm_id_priv->destroy_comp);
-
- free_cm_id(cm_id_priv);
}
EXPORT_SYMBOL(iw_destroy_cm_id);
@@ -809,10 +799,7 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
ret = cm_id->cm_handler(cm_id, iw_event);
if (ret) {
iw_cm_reject(cm_id, NULL, 0);
- set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
- destroy_cm_id(cm_id);
- if (atomic_read(&cm_id_priv->refcount)==0)
- free_cm_id(cm_id_priv);
+ iw_destroy_cm_id(cm_id);
}
out:
@@ -1000,7 +987,6 @@ static void cm_work_handler(struct work_struct *_work)
unsigned long flags;
int empty;
int ret = 0;
- int destroy_id;
spin_lock_irqsave(&cm_id_priv->lock, flags);
empty = list_empty(&cm_id_priv->work_list);
@@ -1013,20 +999,14 @@ static void cm_work_handler(struct work_struct *_work)
put_work(work);
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- ret = process_event(cm_id_priv, &levent);
- if (ret) {
- set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
- destroy_cm_id(&cm_id_priv->id);
- }
- BUG_ON(atomic_read(&cm_id_priv->refcount)==0);
- destroy_id = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
- if (iwcm_deref_id(cm_id_priv)) {
- if (destroy_id) {
- BUG_ON(!list_empty(&cm_id_priv->work_list));
- free_cm_id(cm_id_priv);
- }
+ if (!test_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags)) {
+ ret = process_event(cm_id_priv, &levent);
+ if (ret)
+ destroy_cm_id(&cm_id_priv->id);
+ } else
+ pr_debug("dropping event %d\n", levent.event);
+ if (iwcm_deref_id(cm_id_priv))
return;
- }
if (empty)
return;
spin_lock_irqsave(&cm_id_priv->lock, flags);
diff --git a/drivers/infiniband/core/iwcm.h b/drivers/infiniband/core/iwcm.h
index 3f6cc8256..82c2cd1b0 100644
--- a/drivers/infiniband/core/iwcm.h
+++ b/drivers/infiniband/core/iwcm.h
@@ -56,7 +56,7 @@ struct iwcm_id_private {
struct list_head work_free_list;
};
-#define IWCM_F_CALLBACK_DESTROY 1
+#define IWCM_F_DROP_EVENTS 1
#define IWCM_F_CONNECT_WAIT 2
#endif /* IWCM_H */
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index b65e06c56..ade71e7f0 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -37,6 +37,7 @@
#define IWPM_MAPINFO_HASH_MASK (IWPM_MAPINFO_HASH_SIZE - 1)
#define IWPM_REMINFO_HASH_SIZE 64
#define IWPM_REMINFO_HASH_MASK (IWPM_REMINFO_HASH_SIZE - 1)
+#define IWPM_MSG_SIZE 512
static LIST_HEAD(iwpm_nlmsg_req_list);
static DEFINE_SPINLOCK(iwpm_nlmsg_req_lock);
@@ -452,7 +453,7 @@ struct sk_buff *iwpm_create_nlmsg(u32 nl_op, struct nlmsghdr **nlh,
{
struct sk_buff *skb = NULL;
- skb = dev_alloc_skb(NLMSG_GOODSIZE);
+ skb = dev_alloc_skb(IWPM_MSG_SIZE);
if (!skb) {
pr_err("%s Unable to allocate skb\n", __func__);
goto create_nlmsg_exit;
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index a83ec28a1..51c79b2fb 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -93,18 +93,6 @@ enum {
struct mcast_member;
-/*
-* There are 4 types of join states:
-* FullMember, NonMember, SendOnlyNonMember, SendOnlyFullMember.
-*/
-enum {
- FULLMEMBER_JOIN,
- NONMEMBER_JOIN,
- SENDONLY_NONMEBER_JOIN,
- SENDONLY_FULLMEMBER_JOIN,
- NUM_JOIN_MEMBERSHIP_TYPES,
-};
-
struct mcast_group {
struct ib_sa_mcmember_rec rec;
struct rb_node node;
@@ -118,7 +106,6 @@ struct mcast_group {
atomic_t refcount;
enum mcast_group_state state;
struct ib_sa_query *query;
- int query_id;
u16 pkey_index;
u8 leave_state;
int retries;
@@ -352,11 +339,7 @@ static int send_join(struct mcast_group *group, struct mcast_member *member)
member->multicast.comp_mask,
3000, GFP_KERNEL, join_handler, group,
&group->query);
- if (ret >= 0) {
- group->query_id = ret;
- ret = 0;
- }
- return ret;
+ return (ret > 0) ? 0 : ret;
}
static int send_leave(struct mcast_group *group, u8 leave_state)
@@ -376,11 +359,7 @@ static int send_leave(struct mcast_group *group, u8 leave_state)
IB_SA_MCMEMBER_REC_JOIN_STATE,
3000, GFP_KERNEL, leave_handler,
group, &group->query);
- if (ret >= 0) {
- group->query_id = ret;
- ret = 0;
- }
- return ret;
+ return (ret > 0) ? 0 : ret;
}
static void join_group(struct mcast_group *group, struct mcast_member *member,
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index 9b8c20c82..10469b008 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -229,7 +229,10 @@ static void ibnl_rcv(struct sk_buff *skb)
int ibnl_unicast(struct sk_buff *skb, struct nlmsghdr *nlh,
__u32 pid)
{
- return nlmsg_unicast(nls, skb, pid);
+ int err;
+
+ err = netlink_unicast(nls, skb, pid, 0);
+ return (err < 0) ? err : 0;
}
EXPORT_SYMBOL(ibnl_unicast);
@@ -252,6 +255,7 @@ int __init ibnl_init(void)
return -ENOMEM;
}
+ nls->sk_sndtimeo = 10 * HZ;
return 0;
}
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index e95538650..b9bf7aa05 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -65,10 +65,17 @@ struct ib_sa_sm_ah {
u8 src_path_mask;
};
+struct ib_sa_classport_cache {
+ bool valid;
+ struct ib_class_port_info data;
+};
+
struct ib_sa_port {
struct ib_mad_agent *agent;
struct ib_sa_sm_ah *sm_ah;
struct work_struct update_task;
+ struct ib_sa_classport_cache classport_info;
+ spinlock_t classport_lock; /* protects class port info set */
spinlock_t ah_lock;
u8 port_num;
};
@@ -998,6 +1005,13 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event
port->sm_ah = NULL;
spin_unlock_irqrestore(&port->ah_lock, flags);
+ if (event->event == IB_EVENT_SM_CHANGE ||
+ event->event == IB_EVENT_CLIENT_REREGISTER ||
+ event->event == IB_EVENT_LID_CHANGE) {
+ spin_lock_irqsave(&port->classport_lock, flags);
+ port->classport_info.valid = false;
+ spin_unlock_irqrestore(&port->classport_lock, flags);
+ }
queue_work(ib_wq, &sa_dev->port[event->element.port_num -
sa_dev->start_port].update_task);
}
@@ -1719,6 +1733,7 @@ static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query,
int status,
struct ib_sa_mad *mad)
{
+ unsigned long flags;
struct ib_sa_classport_info_query *query =
container_of(sa_query, struct ib_sa_classport_info_query, sa_query);
@@ -1728,6 +1743,16 @@ static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query,
ib_unpack(classport_info_rec_table,
ARRAY_SIZE(classport_info_rec_table),
mad->data, &rec);
+
+ spin_lock_irqsave(&sa_query->port->classport_lock, flags);
+ if (!status && !sa_query->port->classport_info.valid) {
+ memcpy(&sa_query->port->classport_info.data, &rec,
+ sizeof(sa_query->port->classport_info.data));
+
+ sa_query->port->classport_info.valid = true;
+ }
+ spin_unlock_irqrestore(&sa_query->port->classport_lock, flags);
+
query->callback(status, &rec, query->context);
} else {
query->callback(status, NULL, query->context);
@@ -1754,7 +1779,9 @@ int ib_sa_classport_info_rec_query(struct ib_sa_client *client,
struct ib_sa_port *port;
struct ib_mad_agent *agent;
struct ib_sa_mad *mad;
+ struct ib_class_port_info cached_class_port_info;
int ret;
+ unsigned long flags;
if (!sa_dev)
return -ENODEV;
@@ -1762,6 +1789,17 @@ int ib_sa_classport_info_rec_query(struct ib_sa_client *client,
port = &sa_dev->port[port_num - sa_dev->start_port];
agent = port->agent;
+ /* Use cached ClassPortInfo attribute if valid instead of sending mad */
+ spin_lock_irqsave(&port->classport_lock, flags);
+ if (port->classport_info.valid && callback) {
+ memcpy(&cached_class_port_info, &port->classport_info.data,
+ sizeof(cached_class_port_info));
+ spin_unlock_irqrestore(&port->classport_lock, flags);
+ callback(0, &cached_class_port_info, context);
+ return 0;
+ }
+ spin_unlock_irqrestore(&port->classport_lock, flags);
+
query = kzalloc(sizeof(*query), gfp_mask);
if (!query)
return -ENOMEM;
@@ -1885,6 +1923,9 @@ static void ib_sa_add_one(struct ib_device *device)
sa_dev->port[i].sm_ah = NULL;
sa_dev->port[i].port_num = i + s;
+ spin_lock_init(&sa_dev->port[i].classport_lock);
+ sa_dev->port[i].classport_info.valid = false;
+
sa_dev->port[i].agent =
ib_register_mad_agent(device, i + s, IB_QPT_GSI,
NULL, 0, send_handler,
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 60df4f8e8..15defefec 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -38,6 +38,7 @@
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/netdevice.h>
+#include <linux/ethtool.h>
#include <rdma/ib_mad.h>
#include <rdma/ib_pma.h>
@@ -1200,16 +1201,28 @@ static ssize_t set_node_desc(struct device *device,
return count;
}
+static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct ib_device *dev = container_of(device, struct ib_device, dev);
+
+ ib_get_device_fw_str(dev, buf, PAGE_SIZE);
+ strlcat(buf, "\n", PAGE_SIZE);
+ return strlen(buf);
+}
+
static DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL);
static DEVICE_ATTR(sys_image_guid, S_IRUGO, show_sys_image_guid, NULL);
static DEVICE_ATTR(node_guid, S_IRUGO, show_node_guid, NULL);
static DEVICE_ATTR(node_desc, S_IRUGO | S_IWUSR, show_node_desc, set_node_desc);
+static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
static struct device_attribute *ib_class_attributes[] = {
&dev_attr_node_type,
&dev_attr_sys_image_guid,
&dev_attr_node_guid,
- &dev_attr_node_desc
+ &dev_attr_node_desc,
+ &dev_attr_fw_ver,
};
static void free_port_list_attributes(struct ib_device *device)
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index c0f3826ab..2825ece91 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -106,6 +106,7 @@ struct ucma_multicast {
int events_reported;
u64 uid;
+ u8 join_state;
struct list_head list;
struct sockaddr_storage addr;
};
@@ -1317,12 +1318,20 @@ static ssize_t ucma_process_join(struct ucma_file *file,
struct ucma_multicast *mc;
struct sockaddr *addr;
int ret;
+ u8 join_state;
if (out_len < sizeof(resp))
return -ENOSPC;
addr = (struct sockaddr *) &cmd->addr;
- if (cmd->reserved || !cmd->addr_size || (cmd->addr_size != rdma_addr_size(addr)))
+ if (!cmd->addr_size || (cmd->addr_size != rdma_addr_size(addr)))
+ return -EINVAL;
+
+ if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER)
+ join_state = BIT(FULLMEMBER_JOIN);
+ else if (cmd->join_flags == RDMA_MC_JOIN_FLAG_SENDONLY_FULLMEMBER)
+ join_state = BIT(SENDONLY_FULLMEMBER_JOIN);
+ else
return -EINVAL;
ctx = ucma_get_ctx(file, cmd->id);
@@ -1335,10 +1344,11 @@ static ssize_t ucma_process_join(struct ucma_file *file,
ret = -ENOMEM;
goto err1;
}
-
+ mc->join_state = join_state;
mc->uid = cmd->uid;
memcpy(&mc->addr, addr, cmd->addr_size);
- ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr, mc);
+ ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr,
+ join_state, mc);
if (ret)
goto err2;
@@ -1382,7 +1392,7 @@ static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
join_cmd.uid = cmd.uid;
join_cmd.id = cmd.id;
join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr);
- join_cmd.reserved = 0;
+ join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER;
memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size);
return ucma_process_join(file, &join_cmd, out_len);
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index fe4d2e1a8..c68746ce6 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -37,7 +37,6 @@
#include <linux/sched.h>
#include <linux/export.h>
#include <linux/hugetlb.h>
-#include <linux/dma-attrs.h>
#include <linux/slab.h>
#include <rdma/ib_umem_odp.h>
@@ -92,12 +91,12 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
unsigned long npages;
int ret;
int i;
- DEFINE_DMA_ATTRS(attrs);
+ unsigned long dma_attrs = 0;
struct scatterlist *sg, *sg_list_start;
int need_release = 0;
if (dmasync)
- dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
+ dma_attrs |= DMA_ATTR_WRITE_BARRIER;
if (!size)
return ERR_PTR(-EINVAL);
@@ -215,7 +214,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
umem->sg_head.sgl,
umem->npages,
DMA_BIDIRECTIONAL,
- &attrs);
+ dma_attrs);
if (umem->nmap <= 0) {
ret = -ENOMEM;
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index 9245e55de..df26a741c 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -163,6 +163,10 @@ struct ib_uqp_object {
struct ib_uxrcd_object *uxrcd;
};
+struct ib_uwq_object {
+ struct ib_uevent_object uevent;
+};
+
struct ib_ucq_object {
struct ib_uobject uobject;
struct ib_uverbs_file *uverbs_file;
@@ -182,6 +186,8 @@ extern struct idr ib_uverbs_qp_idr;
extern struct idr ib_uverbs_srq_idr;
extern struct idr ib_uverbs_xrcd_idr;
extern struct idr ib_uverbs_rule_idr;
+extern struct idr ib_uverbs_wq_idr;
+extern struct idr ib_uverbs_rwq_ind_tbl_idr;
void idr_remove_uobj(struct idr *idp, struct ib_uobject *uobj);
@@ -200,6 +206,7 @@ void ib_uverbs_release_uevent(struct ib_uverbs_file *file,
void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context);
void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr);
void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr);
+void ib_uverbs_wq_event_handler(struct ib_event *event, void *context_ptr);
void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr);
void ib_uverbs_event_handler(struct ib_event_handler *handler,
struct ib_event *event);
@@ -220,6 +227,7 @@ struct ib_uverbs_flow_spec {
struct ib_uverbs_flow_spec_eth eth;
struct ib_uverbs_flow_spec_ipv4 ipv4;
struct ib_uverbs_flow_spec_tcp_udp tcp_udp;
+ struct ib_uverbs_flow_spec_ipv6 ipv6;
};
};
@@ -276,5 +284,10 @@ IB_UVERBS_DECLARE_EX_CMD(destroy_flow);
IB_UVERBS_DECLARE_EX_CMD(query_device);
IB_UVERBS_DECLARE_EX_CMD(create_cq);
IB_UVERBS_DECLARE_EX_CMD(create_qp);
+IB_UVERBS_DECLARE_EX_CMD(create_wq);
+IB_UVERBS_DECLARE_EX_CMD(modify_wq);
+IB_UVERBS_DECLARE_EX_CMD(destroy_wq);
+IB_UVERBS_DECLARE_EX_CMD(create_rwq_ind_table);
+IB_UVERBS_DECLARE_EX_CMD(destroy_rwq_ind_table);
#endif /* UVERBS_H */
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 825021d10..f66473181 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -57,6 +57,8 @@ static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" };
static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" };
static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" };
static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" };
+static struct uverbs_lock_class wq_lock_class = { .name = "WQ-uobj" };
+static struct uverbs_lock_class rwq_ind_table_lock_class = { .name = "IND_TBL-uobj" };
/*
* The ib_uobject locking scheme is as follows:
@@ -243,6 +245,27 @@ static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context)
return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0);
}
+static struct ib_wq *idr_read_wq(int wq_handle, struct ib_ucontext *context)
+{
+ return idr_read_obj(&ib_uverbs_wq_idr, wq_handle, context, 0);
+}
+
+static void put_wq_read(struct ib_wq *wq)
+{
+ put_uobj_read(wq->uobject);
+}
+
+static struct ib_rwq_ind_table *idr_read_rwq_indirection_table(int ind_table_handle,
+ struct ib_ucontext *context)
+{
+ return idr_read_obj(&ib_uverbs_rwq_ind_tbl_idr, ind_table_handle, context, 0);
+}
+
+static void put_rwq_indirection_table_read(struct ib_rwq_ind_table *ind_table)
+{
+ put_uobj_read(ind_table->uobject);
+}
+
static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context)
{
struct ib_uobject *uobj;
@@ -326,6 +349,8 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
INIT_LIST_HEAD(&ucontext->qp_list);
INIT_LIST_HEAD(&ucontext->srq_list);
INIT_LIST_HEAD(&ucontext->ah_list);
+ INIT_LIST_HEAD(&ucontext->wq_list);
+ INIT_LIST_HEAD(&ucontext->rwq_ind_tbl_list);
INIT_LIST_HEAD(&ucontext->xrcd_list);
INIT_LIST_HEAD(&ucontext->rule_list);
rcu_read_lock();
@@ -1750,6 +1775,8 @@ static int create_qp(struct ib_uverbs_file *file,
struct ib_qp_init_attr attr = {};
struct ib_uverbs_ex_create_qp_resp resp;
int ret;
+ struct ib_rwq_ind_table *ind_tbl = NULL;
+ bool has_sq = true;
if (cmd->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
return -EPERM;
@@ -1761,6 +1788,32 @@ static int create_qp(struct ib_uverbs_file *file,
init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext,
&qp_lock_class);
down_write(&obj->uevent.uobject.mutex);
+ if (cmd_sz >= offsetof(typeof(*cmd), rwq_ind_tbl_handle) +
+ sizeof(cmd->rwq_ind_tbl_handle) &&
+ (cmd->comp_mask & IB_UVERBS_CREATE_QP_MASK_IND_TABLE)) {
+ ind_tbl = idr_read_rwq_indirection_table(cmd->rwq_ind_tbl_handle,
+ file->ucontext);
+ if (!ind_tbl) {
+ ret = -EINVAL;
+ goto err_put;
+ }
+
+ attr.rwq_ind_tbl = ind_tbl;
+ }
+
+ if ((cmd_sz >= offsetof(typeof(*cmd), reserved1) +
+ sizeof(cmd->reserved1)) && cmd->reserved1) {
+ ret = -EOPNOTSUPP;
+ goto err_put;
+ }
+
+ if (ind_tbl && (cmd->max_recv_wr || cmd->max_recv_sge || cmd->is_srq)) {
+ ret = -EINVAL;
+ goto err_put;
+ }
+
+ if (ind_tbl && !cmd->max_send_wr)
+ has_sq = false;
if (cmd->qp_type == IB_QPT_XRC_TGT) {
xrcd = idr_read_xrcd(cmd->pd_handle, file->ucontext,
@@ -1784,20 +1837,24 @@ static int create_qp(struct ib_uverbs_file *file,
}
}
- if (cmd->recv_cq_handle != cmd->send_cq_handle) {
- rcq = idr_read_cq(cmd->recv_cq_handle,
- file->ucontext, 0);
- if (!rcq) {
- ret = -EINVAL;
- goto err_put;
+ if (!ind_tbl) {
+ if (cmd->recv_cq_handle != cmd->send_cq_handle) {
+ rcq = idr_read_cq(cmd->recv_cq_handle,
+ file->ucontext, 0);
+ if (!rcq) {
+ ret = -EINVAL;
+ goto err_put;
+ }
}
}
}
- scq = idr_read_cq(cmd->send_cq_handle, file->ucontext, !!rcq);
- rcq = rcq ?: scq;
+ if (has_sq)
+ scq = idr_read_cq(cmd->send_cq_handle, file->ucontext, !!rcq);
+ if (!ind_tbl)
+ rcq = rcq ?: scq;
pd = idr_read_pd(cmd->pd_handle, file->ucontext);
- if (!pd || !scq) {
+ if (!pd || (!scq && has_sq)) {
ret = -EINVAL;
goto err_put;
}
@@ -1864,16 +1921,20 @@ static int create_qp(struct ib_uverbs_file *file,
qp->send_cq = attr.send_cq;
qp->recv_cq = attr.recv_cq;
qp->srq = attr.srq;
+ qp->rwq_ind_tbl = ind_tbl;
qp->event_handler = attr.event_handler;
qp->qp_context = attr.qp_context;
qp->qp_type = attr.qp_type;
atomic_set(&qp->usecnt, 0);
atomic_inc(&pd->usecnt);
- atomic_inc(&attr.send_cq->usecnt);
+ if (attr.send_cq)
+ atomic_inc(&attr.send_cq->usecnt);
if (attr.recv_cq)
atomic_inc(&attr.recv_cq->usecnt);
if (attr.srq)
atomic_inc(&attr.srq->usecnt);
+ if (ind_tbl)
+ atomic_inc(&ind_tbl->usecnt);
}
qp->uobject = &obj->uevent.uobject;
@@ -1913,6 +1974,8 @@ static int create_qp(struct ib_uverbs_file *file,
put_cq_read(rcq);
if (srq)
put_srq_read(srq);
+ if (ind_tbl)
+ put_rwq_indirection_table_read(ind_tbl);
mutex_lock(&file->mutex);
list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
@@ -1940,6 +2003,8 @@ err_put:
put_cq_read(rcq);
if (srq)
put_srq_read(srq);
+ if (ind_tbl)
+ put_rwq_indirection_table_read(ind_tbl);
put_uobj_write(&obj->uevent.uobject);
return ret;
@@ -2033,7 +2098,7 @@ int ib_uverbs_ex_create_qp(struct ib_uverbs_file *file,
if (err)
return err;
- if (cmd.comp_mask)
+ if (cmd.comp_mask & ~IB_UVERBS_CREATE_QP_SUP_COMP_MASK)
return -EINVAL;
if (cmd.reserved)
@@ -3040,6 +3105,15 @@ static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
memcpy(&ib_spec->ipv4.mask, &kern_spec->ipv4.mask,
sizeof(struct ib_flow_ipv4_filter));
break;
+ case IB_FLOW_SPEC_IPV6:
+ ib_spec->ipv6.size = sizeof(struct ib_flow_spec_ipv6);
+ if (ib_spec->ipv6.size != kern_spec->ipv6.size)
+ return -EINVAL;
+ memcpy(&ib_spec->ipv6.val, &kern_spec->ipv6.val,
+ sizeof(struct ib_flow_ipv6_filter));
+ memcpy(&ib_spec->ipv6.mask, &kern_spec->ipv6.mask,
+ sizeof(struct ib_flow_ipv6_filter));
+ break;
case IB_FLOW_SPEC_TCP:
case IB_FLOW_SPEC_UDP:
ib_spec->tcp_udp.size = sizeof(struct ib_flow_spec_tcp_udp);
@@ -3056,6 +3130,445 @@ static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
return 0;
}
+int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
+ struct ib_udata *ucore,
+ struct ib_udata *uhw)
+{
+ struct ib_uverbs_ex_create_wq cmd = {};
+ struct ib_uverbs_ex_create_wq_resp resp = {};
+ struct ib_uwq_object *obj;
+ int err = 0;
+ struct ib_cq *cq;
+ struct ib_pd *pd;
+ struct ib_wq *wq;
+ struct ib_wq_init_attr wq_init_attr = {};
+ size_t required_cmd_sz;
+ size_t required_resp_len;
+
+ required_cmd_sz = offsetof(typeof(cmd), max_sge) + sizeof(cmd.max_sge);
+ required_resp_len = offsetof(typeof(resp), wqn) + sizeof(resp.wqn);
+
+ if (ucore->inlen < required_cmd_sz)
+ return -EINVAL;
+
+ if (ucore->outlen < required_resp_len)
+ return -ENOSPC;
+
+ if (ucore->inlen > sizeof(cmd) &&
+ !ib_is_udata_cleared(ucore, sizeof(cmd),
+ ucore->inlen - sizeof(cmd)))
+ return -EOPNOTSUPP;
+
+ err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
+ if (err)
+ return err;
+
+ if (cmd.comp_mask)
+ return -EOPNOTSUPP;
+
+ obj = kmalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return -ENOMEM;
+
+ init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext,
+ &wq_lock_class);
+ down_write(&obj->uevent.uobject.mutex);
+ pd = idr_read_pd(cmd.pd_handle, file->ucontext);
+ if (!pd) {
+ err = -EINVAL;
+ goto err_uobj;
+ }
+
+ cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
+ if (!cq) {
+ err = -EINVAL;
+ goto err_put_pd;
+ }
+
+ wq_init_attr.cq = cq;
+ wq_init_attr.max_sge = cmd.max_sge;
+ wq_init_attr.max_wr = cmd.max_wr;
+ wq_init_attr.wq_context = file;
+ wq_init_attr.wq_type = cmd.wq_type;
+ wq_init_attr.event_handler = ib_uverbs_wq_event_handler;
+ obj->uevent.events_reported = 0;
+ INIT_LIST_HEAD(&obj->uevent.event_list);
+ wq = pd->device->create_wq(pd, &wq_init_attr, uhw);
+ if (IS_ERR(wq)) {
+ err = PTR_ERR(wq);
+ goto err_put_cq;
+ }
+
+ wq->uobject = &obj->uevent.uobject;
+ obj->uevent.uobject.object = wq;
+ wq->wq_type = wq_init_attr.wq_type;
+ wq->cq = cq;
+ wq->pd = pd;
+ wq->device = pd->device;
+ wq->wq_context = wq_init_attr.wq_context;
+ atomic_set(&wq->usecnt, 0);
+ atomic_inc(&pd->usecnt);
+ atomic_inc(&cq->usecnt);
+ wq->uobject = &obj->uevent.uobject;
+ obj->uevent.uobject.object = wq;
+ err = idr_add_uobj(&ib_uverbs_wq_idr, &obj->uevent.uobject);
+ if (err)
+ goto destroy_wq;
+
+ memset(&resp, 0, sizeof(resp));
+ resp.wq_handle = obj->uevent.uobject.id;
+ resp.max_sge = wq_init_attr.max_sge;
+ resp.max_wr = wq_init_attr.max_wr;
+ resp.wqn = wq->wq_num;
+ resp.response_length = required_resp_len;
+ err = ib_copy_to_udata(ucore,
+ &resp, resp.response_length);
+ if (err)
+ goto err_copy;
+
+ put_pd_read(pd);
+ put_cq_read(cq);
+
+ mutex_lock(&file->mutex);
+ list_add_tail(&obj->uevent.uobject.list, &file->ucontext->wq_list);
+ mutex_unlock(&file->mutex);
+
+ obj->uevent.uobject.live = 1;
+ up_write(&obj->uevent.uobject.mutex);
+ return 0;
+
+err_copy:
+ idr_remove_uobj(&ib_uverbs_wq_idr, &obj->uevent.uobject);
+destroy_wq:
+ ib_destroy_wq(wq);
+err_put_cq:
+ put_cq_read(cq);
+err_put_pd:
+ put_pd_read(pd);
+err_uobj:
+ put_uobj_write(&obj->uevent.uobject);
+
+ return err;
+}
+
+int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
+ struct ib_udata *ucore,
+ struct ib_udata *uhw)
+{
+ struct ib_uverbs_ex_destroy_wq cmd = {};
+ struct ib_uverbs_ex_destroy_wq_resp resp = {};
+ struct ib_wq *wq;
+ struct ib_uobject *uobj;
+ struct ib_uwq_object *obj;
+ size_t required_cmd_sz;
+ size_t required_resp_len;
+ int ret;
+
+ required_cmd_sz = offsetof(typeof(cmd), wq_handle) + sizeof(cmd.wq_handle);
+ required_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
+
+ if (ucore->inlen < required_cmd_sz)
+ return -EINVAL;
+
+ if (ucore->outlen < required_resp_len)
+ return -ENOSPC;
+
+ if (ucore->inlen > sizeof(cmd) &&
+ !ib_is_udata_cleared(ucore, sizeof(cmd),
+ ucore->inlen - sizeof(cmd)))
+ return -EOPNOTSUPP;
+
+ ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
+ if (ret)
+ return ret;
+
+ if (cmd.comp_mask)
+ return -EOPNOTSUPP;
+
+ resp.response_length = required_resp_len;
+ uobj = idr_write_uobj(&ib_uverbs_wq_idr, cmd.wq_handle,
+ file->ucontext);
+ if (!uobj)
+ return -EINVAL;
+
+ wq = uobj->object;
+ obj = container_of(uobj, struct ib_uwq_object, uevent.uobject);
+ ret = ib_destroy_wq(wq);
+ if (!ret)
+ uobj->live = 0;
+
+ put_uobj_write(uobj);
+ if (ret)
+ return ret;
+
+ idr_remove_uobj(&ib_uverbs_wq_idr, uobj);
+
+ mutex_lock(&file->mutex);
+ list_del(&uobj->list);
+ mutex_unlock(&file->mutex);
+
+ ib_uverbs_release_uevent(file, &obj->uevent);
+ resp.events_reported = obj->uevent.events_reported;
+ put_uobj(uobj);
+
+ ret = ib_copy_to_udata(ucore, &resp, resp.response_length);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
+ struct ib_udata *ucore,
+ struct ib_udata *uhw)
+{
+ struct ib_uverbs_ex_modify_wq cmd = {};
+ struct ib_wq *wq;
+ struct ib_wq_attr wq_attr = {};
+ size_t required_cmd_sz;
+ int ret;
+
+ required_cmd_sz = offsetof(typeof(cmd), curr_wq_state) + sizeof(cmd.curr_wq_state);
+ if (ucore->inlen < required_cmd_sz)
+ return -EINVAL;
+
+ if (ucore->inlen > sizeof(cmd) &&
+ !ib_is_udata_cleared(ucore, sizeof(cmd),
+ ucore->inlen - sizeof(cmd)))
+ return -EOPNOTSUPP;
+
+ ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
+ if (ret)
+ return ret;
+
+ if (!cmd.attr_mask)
+ return -EINVAL;
+
+ if (cmd.attr_mask > (IB_WQ_STATE | IB_WQ_CUR_STATE))
+ return -EINVAL;
+
+ wq = idr_read_wq(cmd.wq_handle, file->ucontext);
+ if (!wq)
+ return -EINVAL;
+
+ wq_attr.curr_wq_state = cmd.curr_wq_state;
+ wq_attr.wq_state = cmd.wq_state;
+ ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask, uhw);
+ put_wq_read(wq);
+ return ret;
+}
+
+int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
+ struct ib_udata *ucore,
+ struct ib_udata *uhw)
+{
+ struct ib_uverbs_ex_create_rwq_ind_table cmd = {};
+ struct ib_uverbs_ex_create_rwq_ind_table_resp resp = {};
+ struct ib_uobject *uobj;
+ int err = 0;
+ struct ib_rwq_ind_table_init_attr init_attr = {};
+ struct ib_rwq_ind_table *rwq_ind_tbl;
+ struct ib_wq **wqs = NULL;
+ u32 *wqs_handles = NULL;
+ struct ib_wq *wq = NULL;
+ int i, j, num_read_wqs;
+ u32 num_wq_handles;
+ u32 expected_in_size;
+ size_t required_cmd_sz_header;
+ size_t required_resp_len;
+
+ required_cmd_sz_header = offsetof(typeof(cmd), log_ind_tbl_size) + sizeof(cmd.log_ind_tbl_size);
+ required_resp_len = offsetof(typeof(resp), ind_tbl_num) + sizeof(resp.ind_tbl_num);
+
+ if (ucore->inlen < required_cmd_sz_header)
+ return -EINVAL;
+
+ if (ucore->outlen < required_resp_len)
+ return -ENOSPC;
+
+ err = ib_copy_from_udata(&cmd, ucore, required_cmd_sz_header);
+ if (err)
+ return err;
+
+ ucore->inbuf += required_cmd_sz_header;
+ ucore->inlen -= required_cmd_sz_header;
+
+ if (cmd.comp_mask)
+ return -EOPNOTSUPP;
+
+ if (cmd.log_ind_tbl_size > IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE)
+ return -EINVAL;
+
+ num_wq_handles = 1 << cmd.log_ind_tbl_size;
+ expected_in_size = num_wq_handles * sizeof(__u32);
+ if (num_wq_handles == 1)
+ /* input size for wq handles is u64 aligned */
+ expected_in_size += sizeof(__u32);
+
+ if (ucore->inlen < expected_in_size)
+ return -EINVAL;
+
+ if (ucore->inlen > expected_in_size &&
+ !ib_is_udata_cleared(ucore, expected_in_size,
+ ucore->inlen - expected_in_size))
+ return -EOPNOTSUPP;
+
+ wqs_handles = kcalloc(num_wq_handles, sizeof(*wqs_handles),
+ GFP_KERNEL);
+ if (!wqs_handles)
+ return -ENOMEM;
+
+ err = ib_copy_from_udata(wqs_handles, ucore,
+ num_wq_handles * sizeof(__u32));
+ if (err)
+ goto err_free;
+
+ wqs = kcalloc(num_wq_handles, sizeof(*wqs), GFP_KERNEL);
+ if (!wqs) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ for (num_read_wqs = 0; num_read_wqs < num_wq_handles;
+ num_read_wqs++) {
+ wq = idr_read_wq(wqs_handles[num_read_wqs], file->ucontext);
+ if (!wq) {
+ err = -EINVAL;
+ goto put_wqs;
+ }
+
+ wqs[num_read_wqs] = wq;
+ }
+
+ uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
+ if (!uobj) {
+ err = -ENOMEM;
+ goto put_wqs;
+ }
+
+ init_uobj(uobj, 0, file->ucontext, &rwq_ind_table_lock_class);
+ down_write(&uobj->mutex);
+ init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size;
+ init_attr.ind_tbl = wqs;
+ rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr, uhw);
+
+ if (IS_ERR(rwq_ind_tbl)) {
+ err = PTR_ERR(rwq_ind_tbl);
+ goto err_uobj;
+ }
+
+ rwq_ind_tbl->ind_tbl = wqs;
+ rwq_ind_tbl->log_ind_tbl_size = init_attr.log_ind_tbl_size;
+ rwq_ind_tbl->uobject = uobj;
+ uobj->object = rwq_ind_tbl;
+ rwq_ind_tbl->device = ib_dev;
+ atomic_set(&rwq_ind_tbl->usecnt, 0);
+
+ for (i = 0; i < num_wq_handles; i++)
+ atomic_inc(&wqs[i]->usecnt);
+
+ err = idr_add_uobj(&ib_uverbs_rwq_ind_tbl_idr, uobj);
+ if (err)
+ goto destroy_ind_tbl;
+
+ resp.ind_tbl_handle = uobj->id;
+ resp.ind_tbl_num = rwq_ind_tbl->ind_tbl_num;
+ resp.response_length = required_resp_len;
+
+ err = ib_copy_to_udata(ucore,
+ &resp, resp.response_length);
+ if (err)
+ goto err_copy;
+
+ kfree(wqs_handles);
+
+ for (j = 0; j < num_read_wqs; j++)
+ put_wq_read(wqs[j]);
+
+ mutex_lock(&file->mutex);
+ list_add_tail(&uobj->list, &file->ucontext->rwq_ind_tbl_list);
+ mutex_unlock(&file->mutex);
+
+ uobj->live = 1;
+
+ up_write(&uobj->mutex);
+ return 0;
+
+err_copy:
+ idr_remove_uobj(&ib_uverbs_rwq_ind_tbl_idr, uobj);
+destroy_ind_tbl:
+ ib_destroy_rwq_ind_table(rwq_ind_tbl);
+err_uobj:
+ put_uobj_write(uobj);
+put_wqs:
+ for (j = 0; j < num_read_wqs; j++)
+ put_wq_read(wqs[j]);
+err_free:
+ kfree(wqs_handles);
+ kfree(wqs);
+ return err;
+}
+
+int ib_uverbs_ex_destroy_rwq_ind_table(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
+ struct ib_udata *ucore,
+ struct ib_udata *uhw)
+{
+ struct ib_uverbs_ex_destroy_rwq_ind_table cmd = {};
+ struct ib_rwq_ind_table *rwq_ind_tbl;
+ struct ib_uobject *uobj;
+ int ret;
+ struct ib_wq **ind_tbl;
+ size_t required_cmd_sz;
+
+ required_cmd_sz = offsetof(typeof(cmd), ind_tbl_handle) + sizeof(cmd.ind_tbl_handle);
+
+ if (ucore->inlen < required_cmd_sz)
+ return -EINVAL;
+
+ if (ucore->inlen > sizeof(cmd) &&
+ !ib_is_udata_cleared(ucore, sizeof(cmd),
+ ucore->inlen - sizeof(cmd)))
+ return -EOPNOTSUPP;
+
+ ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
+ if (ret)
+ return ret;
+
+ if (cmd.comp_mask)
+ return -EOPNOTSUPP;
+
+ uobj = idr_write_uobj(&ib_uverbs_rwq_ind_tbl_idr, cmd.ind_tbl_handle,
+ file->ucontext);
+ if (!uobj)
+ return -EINVAL;
+ rwq_ind_tbl = uobj->object;
+ ind_tbl = rwq_ind_tbl->ind_tbl;
+
+ ret = ib_destroy_rwq_ind_table(rwq_ind_tbl);
+ if (!ret)
+ uobj->live = 0;
+
+ put_uobj_write(uobj);
+
+ if (ret)
+ return ret;
+
+ idr_remove_uobj(&ib_uverbs_rwq_ind_tbl_idr, uobj);
+
+ mutex_lock(&file->mutex);
+ list_del(&uobj->list);
+ mutex_unlock(&file->mutex);
+
+ put_uobj(uobj);
+ kfree(ind_tbl);
+ return ret;
+}
+
int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
struct ib_device *ib_dev,
struct ib_udata *ucore,
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 09d515763..0012fa58c 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -76,6 +76,8 @@ DEFINE_IDR(ib_uverbs_qp_idr);
DEFINE_IDR(ib_uverbs_srq_idr);
DEFINE_IDR(ib_uverbs_xrcd_idr);
DEFINE_IDR(ib_uverbs_rule_idr);
+DEFINE_IDR(ib_uverbs_wq_idr);
+DEFINE_IDR(ib_uverbs_rwq_ind_tbl_idr);
static DEFINE_SPINLOCK(map_lock);
static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES);
@@ -130,6 +132,11 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
[IB_USER_VERBS_EX_CMD_QUERY_DEVICE] = ib_uverbs_ex_query_device,
[IB_USER_VERBS_EX_CMD_CREATE_CQ] = ib_uverbs_ex_create_cq,
[IB_USER_VERBS_EX_CMD_CREATE_QP] = ib_uverbs_ex_create_qp,
+ [IB_USER_VERBS_EX_CMD_CREATE_WQ] = ib_uverbs_ex_create_wq,
+ [IB_USER_VERBS_EX_CMD_MODIFY_WQ] = ib_uverbs_ex_modify_wq,
+ [IB_USER_VERBS_EX_CMD_DESTROY_WQ] = ib_uverbs_ex_destroy_wq,
+ [IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL] = ib_uverbs_ex_create_rwq_ind_table,
+ [IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL] = ib_uverbs_ex_destroy_rwq_ind_table,
};
static void ib_uverbs_add_one(struct ib_device *device);
@@ -265,6 +272,27 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
kfree(uqp);
}
+ list_for_each_entry_safe(uobj, tmp, &context->rwq_ind_tbl_list, list) {
+ struct ib_rwq_ind_table *rwq_ind_tbl = uobj->object;
+ struct ib_wq **ind_tbl = rwq_ind_tbl->ind_tbl;
+
+ idr_remove_uobj(&ib_uverbs_rwq_ind_tbl_idr, uobj);
+ ib_destroy_rwq_ind_table(rwq_ind_tbl);
+ kfree(ind_tbl);
+ kfree(uobj);
+ }
+
+ list_for_each_entry_safe(uobj, tmp, &context->wq_list, list) {
+ struct ib_wq *wq = uobj->object;
+ struct ib_uwq_object *uwq =
+ container_of(uobj, struct ib_uwq_object, uevent.uobject);
+
+ idr_remove_uobj(&ib_uverbs_wq_idr, uobj);
+ ib_destroy_wq(wq);
+ ib_uverbs_release_uevent(file, &uwq->uevent);
+ kfree(uwq);
+ }
+
list_for_each_entry_safe(uobj, tmp, &context->srq_list, list) {
struct ib_srq *srq = uobj->object;
struct ib_uevent_object *uevent =
@@ -568,6 +596,16 @@ void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr)
&uobj->events_reported);
}
+void ib_uverbs_wq_event_handler(struct ib_event *event, void *context_ptr)
+{
+ struct ib_uevent_object *uobj = container_of(event->element.wq->uobject,
+ struct ib_uevent_object, uobject);
+
+ ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
+ event->event, &uobj->event_list,
+ &uobj->events_reported);
+}
+
void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr)
{
struct ib_uevent_object *uobj;
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index e39a0b597..f2b776efa 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -758,6 +758,12 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
struct ib_qp *qp;
int ret;
+ if (qp_init_attr->rwq_ind_tbl &&
+ (qp_init_attr->recv_cq ||
+ qp_init_attr->srq || qp_init_attr->cap.max_recv_wr ||
+ qp_init_attr->cap.max_recv_sge))
+ return ERR_PTR(-EINVAL);
+
/*
* If the callers is using the RDMA API calculate the resources
* needed for the RDMA READ/WRITE operations.
@@ -775,6 +781,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
qp->real_qp = qp;
qp->uobject = NULL;
qp->qp_type = qp_init_attr->qp_type;
+ qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl;
atomic_set(&qp->usecnt, 0);
qp->mrs_used = 0;
@@ -792,7 +799,8 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
qp->srq = NULL;
} else {
qp->recv_cq = qp_init_attr->recv_cq;
- atomic_inc(&qp_init_attr->recv_cq->usecnt);
+ if (qp_init_attr->recv_cq)
+ atomic_inc(&qp_init_attr->recv_cq->usecnt);
qp->srq = qp_init_attr->srq;
if (qp->srq)
atomic_inc(&qp_init_attr->srq->usecnt);
@@ -803,7 +811,10 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
qp->xrcd = NULL;
atomic_inc(&pd->usecnt);
- atomic_inc(&qp_init_attr->send_cq->usecnt);
+ if (qp_init_attr->send_cq)
+ atomic_inc(&qp_init_attr->send_cq->usecnt);
+ if (qp_init_attr->rwq_ind_tbl)
+ atomic_inc(&qp->rwq_ind_tbl->usecnt);
if (qp_init_attr->cap.max_rdma_ctxs) {
ret = rdma_rw_init_mrs(qp, qp_init_attr);
@@ -1292,6 +1303,7 @@ int ib_destroy_qp(struct ib_qp *qp)
struct ib_pd *pd;
struct ib_cq *scq, *rcq;
struct ib_srq *srq;
+ struct ib_rwq_ind_table *ind_tbl;
int ret;
WARN_ON_ONCE(qp->mrs_used > 0);
@@ -1306,6 +1318,7 @@ int ib_destroy_qp(struct ib_qp *qp)
scq = qp->send_cq;
rcq = qp->recv_cq;
srq = qp->srq;
+ ind_tbl = qp->rwq_ind_tbl;
if (!qp->uobject)
rdma_rw_cleanup_mrs(qp);
@@ -1320,6 +1333,8 @@ int ib_destroy_qp(struct ib_qp *qp)
atomic_dec(&rcq->usecnt);
if (srq)
atomic_dec(&srq->usecnt);
+ if (ind_tbl)
+ atomic_dec(&ind_tbl->usecnt);
}
return ret;
@@ -1567,6 +1582,150 @@ int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
}
EXPORT_SYMBOL(ib_dealloc_xrcd);
+/**
+ * ib_create_wq - Creates a WQ associated with the specified protection
+ * domain.
+ * @pd: The protection domain associated with the WQ.
+ * @wq_init_attr: A list of initial attributes required to create the
+ * WQ. If WQ creation succeeds, then the attributes are updated to
+ * the actual capabilities of the created WQ.
+ *
+ * wq_init_attr->max_wr and wq_init_attr->max_sge determine
+ * the requested size of the WQ, and set to the actual values allocated
+ * on return.
+ * If ib_create_wq() succeeds, then max_wr and max_sge will always be
+ * at least as large as the requested values.
+ */
+struct ib_wq *ib_create_wq(struct ib_pd *pd,
+ struct ib_wq_init_attr *wq_attr)
+{
+ struct ib_wq *wq;
+
+ if (!pd->device->create_wq)
+ return ERR_PTR(-ENOSYS);
+
+ wq = pd->device->create_wq(pd, wq_attr, NULL);
+ if (!IS_ERR(wq)) {
+ wq->event_handler = wq_attr->event_handler;
+ wq->wq_context = wq_attr->wq_context;
+ wq->wq_type = wq_attr->wq_type;
+ wq->cq = wq_attr->cq;
+ wq->device = pd->device;
+ wq->pd = pd;
+ wq->uobject = NULL;
+ atomic_inc(&pd->usecnt);
+ atomic_inc(&wq_attr->cq->usecnt);
+ atomic_set(&wq->usecnt, 0);
+ }
+ return wq;
+}
+EXPORT_SYMBOL(ib_create_wq);
+
+/**
+ * ib_destroy_wq - Destroys the specified WQ.
+ * @wq: The WQ to destroy.
+ */
+int ib_destroy_wq(struct ib_wq *wq)
+{
+ int err;
+ struct ib_cq *cq = wq->cq;
+ struct ib_pd *pd = wq->pd;
+
+ if (atomic_read(&wq->usecnt))
+ return -EBUSY;
+
+ err = wq->device->destroy_wq(wq);
+ if (!err) {
+ atomic_dec(&pd->usecnt);
+ atomic_dec(&cq->usecnt);
+ }
+ return err;
+}
+EXPORT_SYMBOL(ib_destroy_wq);
+
+/**
+ * ib_modify_wq - Modifies the specified WQ.
+ * @wq: The WQ to modify.
+ * @wq_attr: On input, specifies the WQ attributes to modify.
+ * @wq_attr_mask: A bit-mask used to specify which attributes of the WQ
+ * are being modified.
+ * On output, the current values of selected WQ attributes are returned.
+ */
+int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
+ u32 wq_attr_mask)
+{
+ int err;
+
+ if (!wq->device->modify_wq)
+ return -ENOSYS;
+
+ err = wq->device->modify_wq(wq, wq_attr, wq_attr_mask, NULL);
+ return err;
+}
+EXPORT_SYMBOL(ib_modify_wq);
+
+/*
+ * ib_create_rwq_ind_table - Creates a RQ Indirection Table.
+ * @device: The device on which to create the rwq indirection table.
+ * @ib_rwq_ind_table_init_attr: A list of initial attributes required to
+ * create the Indirection Table.
+ *
+ * Note: The life time of ib_rwq_ind_table_init_attr->ind_tbl is not less
+ * than the created ib_rwq_ind_table object and the caller is responsible
+ * for its memory allocation/free.
+ */
+struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
+ struct ib_rwq_ind_table_init_attr *init_attr)
+{
+ struct ib_rwq_ind_table *rwq_ind_table;
+ int i;
+ u32 table_size;
+
+ if (!device->create_rwq_ind_table)
+ return ERR_PTR(-ENOSYS);
+
+ table_size = (1 << init_attr->log_ind_tbl_size);
+ rwq_ind_table = device->create_rwq_ind_table(device,
+ init_attr, NULL);
+ if (IS_ERR(rwq_ind_table))
+ return rwq_ind_table;
+
+ rwq_ind_table->ind_tbl = init_attr->ind_tbl;
+ rwq_ind_table->log_ind_tbl_size = init_attr->log_ind_tbl_size;
+ rwq_ind_table->device = device;
+ rwq_ind_table->uobject = NULL;
+ atomic_set(&rwq_ind_table->usecnt, 0);
+
+ for (i = 0; i < table_size; i++)
+ atomic_inc(&rwq_ind_table->ind_tbl[i]->usecnt);
+
+ return rwq_ind_table;
+}
+EXPORT_SYMBOL(ib_create_rwq_ind_table);
+
+/*
+ * ib_destroy_rwq_ind_table - Destroys the specified Indirection Table.
+ * @wq_ind_table: The Indirection Table to destroy.
+*/
+int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table)
+{
+ int err, i;
+ u32 table_size = (1 << rwq_ind_table->log_ind_tbl_size);
+ struct ib_wq **ind_tbl = rwq_ind_table->ind_tbl;
+
+ if (atomic_read(&rwq_ind_table->usecnt))
+ return -EBUSY;
+
+ err = rwq_ind_table->device->destroy_rwq_ind_table(rwq_ind_table);
+ if (!err) {
+ for (i = 0; i < table_size; i++)
+ atomic_dec(&ind_tbl[i]->usecnt);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(ib_destroy_rwq_ind_table);
+
struct ib_flow *ib_create_flow(struct ib_qp *qp,
struct ib_flow_attr *flow_attr,
int domain)
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 3e8431b5c..04bbf172a 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1396,10 +1396,10 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
state_set(&child_ep->com, CONNECTING);
child_ep->com.tdev = tdev;
child_ep->com.cm_id = NULL;
- child_ep->com.local_addr.sin_family = PF_INET;
+ child_ep->com.local_addr.sin_family = AF_INET;
child_ep->com.local_addr.sin_port = req->local_port;
child_ep->com.local_addr.sin_addr.s_addr = req->local_ip;
- child_ep->com.remote_addr.sin_family = PF_INET;
+ child_ep->com.remote_addr.sin_family = AF_INET;
child_ep->com.remote_addr.sin_port = req->peer_port;
child_ep->com.remote_addr.sin_addr.s_addr = req->peer_ip;
get_ep(&parent_ep->com);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index bb1a839d4..3edb80644 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -1183,18 +1183,6 @@ static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", iwch_dev->rdev.t3cdev_p->type);
}
-static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
- ibdev.dev);
- struct ethtool_drvinfo info;
- struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
-
- PDBG("%s dev 0x%p\n", __func__, dev);
- lldev->ethtool_ops->get_drvinfo(lldev, &info);
- return sprintf(buf, "%s\n", info.fw_version);
-}
-
static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -1334,13 +1322,11 @@ static int iwch_get_mib(struct ib_device *ibdev, struct rdma_hw_stats *stats,
}
static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
static struct device_attribute *iwch_class_attributes[] = {
&dev_attr_hw_rev,
- &dev_attr_fw_ver,
&dev_attr_hca_type,
&dev_attr_board_id,
};
@@ -1362,6 +1348,18 @@ static int iwch_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0;
}
+static void get_dev_fw_ver_str(struct ib_device *ibdev, char *str,
+ size_t str_len)
+{
+ struct iwch_dev *iwch_dev = to_iwch_dev(ibdev);
+ struct ethtool_drvinfo info;
+ struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
+
+ PDBG("%s dev 0x%p\n", __func__, iwch_dev);
+ lldev->ethtool_ops->get_drvinfo(lldev, &info);
+ snprintf(str, str_len, "%s", info.fw_version);
+}
+
int iwch_register_device(struct iwch_dev *dev)
{
int ret;
@@ -1437,6 +1435,7 @@ int iwch_register_device(struct iwch_dev *dev)
dev->ibdev.get_hw_stats = iwch_get_mib;
dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION;
dev->ibdev.get_port_immutable = iwch_port_immutable;
+ dev->ibdev.get_dev_fw_str = get_dev_fw_ver_str;
dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
if (!dev->ibdev.iwcm)
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index be00c4bc0..80f988984 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -294,6 +294,25 @@ static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
return;
}
+static int alloc_ep_skb_list(struct sk_buff_head *ep_skb_list, int size)
+{
+ struct sk_buff *skb;
+ unsigned int i;
+ size_t len;
+
+ len = roundup(sizeof(union cpl_wr_size), 16);
+ for (i = 0; i < size; i++) {
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ goto fail;
+ skb_queue_tail(ep_skb_list, skb);
+ }
+ return 0;
+fail:
+ skb_queue_purge(ep_skb_list);
+ return -ENOMEM;
+}
+
static void *alloc_ep(int size, gfp_t gfp)
{
struct c4iw_ep_common *epc;
@@ -314,6 +333,8 @@ static void remove_ep_tid(struct c4iw_ep *ep)
spin_lock_irqsave(&ep->com.dev->lock, flags);
_remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid, 0);
+ if (idr_is_empty(&ep->com.dev->hwtid_idr))
+ wake_up(&ep->com.dev->wait);
spin_unlock_irqrestore(&ep->com.dev->lock, flags);
}
@@ -384,6 +405,8 @@ void _c4iw_free_ep(struct kref *kref)
if (ep->mpa_skb)
kfree_skb(ep->mpa_skb);
}
+ if (!skb_queue_empty(&ep->com.ep_skb_list))
+ skb_queue_purge(&ep->com.ep_skb_list);
kfree(ep);
}
@@ -620,25 +643,27 @@ static void abort_arp_failure(void *handle, struct sk_buff *skb)
}
}
-static int send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
+static int send_flowc(struct c4iw_ep *ep)
{
- unsigned int flowclen = 80;
struct fw_flowc_wr *flowc;
+ struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list);
int i;
u16 vlan = ep->l2t->vlan;
int nparams;
+ if (WARN_ON(!skb))
+ return -ENOMEM;
+
if (vlan == CPL_L2T_VLAN_NONE)
nparams = 8;
else
nparams = 9;
- skb = get_skb(skb, flowclen, GFP_KERNEL);
- flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
+ flowc = (struct fw_flowc_wr *)__skb_put(skb, FLOWC_LEN);
flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
FW_FLOWC_WR_NPARAMS_V(nparams));
- flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(flowclen,
+ flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(FLOWC_LEN,
16)) | FW_WR_FLOWID_V(ep->hwtid));
flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
@@ -679,18 +704,16 @@ static int send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
return c4iw_ofld_send(&ep->com.dev->rdev, skb);
}
-static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp)
+static int send_halfclose(struct c4iw_ep *ep)
{
struct cpl_close_con_req *req;
- struct sk_buff *skb;
+ struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list);
int wrlen = roundup(sizeof *req, 16);
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
- skb = get_skb(NULL, wrlen, gfp);
- if (!skb) {
- printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
+ if (WARN_ON(!skb))
return -ENOMEM;
- }
+
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
req = (struct cpl_close_con_req *) skb_put(skb, wrlen);
@@ -701,26 +724,24 @@ static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp)
return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
}
-static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
+static int send_abort(struct c4iw_ep *ep)
{
struct cpl_abort_req *req;
int wrlen = roundup(sizeof *req, 16);
+ struct sk_buff *req_skb = skb_dequeue(&ep->com.ep_skb_list);
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
- skb = get_skb(skb, wrlen, gfp);
- if (!skb) {
- printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
- __func__);
+ if (WARN_ON(!req_skb))
return -ENOMEM;
- }
- set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
- t4_set_arp_err_handler(skb, ep, abort_arp_failure);
- req = (struct cpl_abort_req *) skb_put(skb, wrlen);
+
+ set_wr_txq(req_skb, CPL_PRIORITY_DATA, ep->txq_idx);
+ t4_set_arp_err_handler(req_skb, ep, abort_arp_failure);
+ req = (struct cpl_abort_req *)skb_put(req_skb, wrlen);
memset(req, 0, wrlen);
INIT_TP_WR(req, ep->hwtid);
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
req->cmd = CPL_ABORT_SEND_RST;
- return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
+ return c4iw_l2t_send(&ep->com.dev->rdev, req_skb, ep->l2t);
}
static void best_mtu(const unsigned short *mtus, unsigned short mtu,
@@ -992,9 +1013,19 @@ static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
mpa = (struct mpa_message *)(req + 1);
memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
- mpa->flags = (crc_enabled ? MPA_CRC : 0) |
- (markers_enabled ? MPA_MARKERS : 0) |
- (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0);
+
+ mpa->flags = 0;
+ if (crc_enabled)
+ mpa->flags |= MPA_CRC;
+ if (markers_enabled) {
+ mpa->flags |= MPA_MARKERS;
+ ep->mpa_attr.recv_marker_enabled = 1;
+ } else {
+ ep->mpa_attr.recv_marker_enabled = 0;
+ }
+ if (mpa_rev_to_use == 2)
+ mpa->flags |= MPA_ENHANCED_RDMA_CONN;
+
mpa->private_data_size = htons(ep->plen);
mpa->revision = mpa_rev_to_use;
if (mpa_rev_to_use == 1) {
@@ -1169,8 +1200,11 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
mpa = (struct mpa_message *)(req + 1);
memset(mpa, 0, sizeof(*mpa));
memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
- mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
- (markers_enabled ? MPA_MARKERS : 0);
+ mpa->flags = 0;
+ if (ep->mpa_attr.crc_enabled)
+ mpa->flags |= MPA_CRC;
+ if (ep->mpa_attr.recv_marker_enabled)
+ mpa->flags |= MPA_MARKERS;
mpa->revision = ep->mpa_attr.version;
mpa->private_data_size = htons(plen);
@@ -1248,7 +1282,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
set_bit(ACT_ESTAB, &ep->com.history);
/* start MPA negotiation */
- ret = send_flowc(ep, NULL);
+ ret = send_flowc(ep);
if (ret)
goto err;
if (ep->retry_with_mpa_v1)
@@ -1555,7 +1589,6 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
*/
__state_set(&ep->com, FPDU_MODE);
ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
- ep->mpa_attr.recv_marker_enabled = markers_enabled;
ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
ep->mpa_attr.version = mpa->revision;
ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
@@ -1796,8 +1829,12 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
(ep->mpa_pkt + sizeof(*mpa));
ep->ird = ntohs(mpa_v2_params->ird) &
MPA_V2_IRD_ORD_MASK;
+ ep->ird = min_t(u32, ep->ird,
+ cur_max_read_depth(ep->com.dev));
ep->ord = ntohs(mpa_v2_params->ord) &
MPA_V2_IRD_ORD_MASK;
+ ep->ord = min_t(u32, ep->ord,
+ cur_max_read_depth(ep->com.dev));
PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird,
ep->ord);
if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
@@ -2004,12 +2041,17 @@ static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
}
/*
- * Return whether a failed active open has allocated a TID
+ * Some of the error codes above implicitly indicate that there is no TID
+ * allocated with the result of an ACT_OPEN. We use this predicate to make
+ * that explicit.
*/
static inline int act_open_has_tid(int status)
{
- return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
- status != CPL_ERR_ARP_MISS;
+ return (status != CPL_ERR_TCAM_PARITY &&
+ status != CPL_ERR_TCAM_MISS &&
+ status != CPL_ERR_TCAM_FULL &&
+ status != CPL_ERR_CONN_EXIST_SYNRECV &&
+ status != CPL_ERR_CONN_EXIST);
}
/* Returns whether a CPL status conveys negative advice.
@@ -2077,8 +2119,10 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
}
ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
n, pdev, rt_tos2priority(tos));
- if (!ep->l2t)
+ if (!ep->l2t) {
+ dev_put(pdev);
goto out;
+ }
ep->mtu = pdev->mtu;
ep->tx_chan = cxgb4_port_chan(pdev);
ep->smac_idx = cxgb4_tp_smt_idx(adapter_type,
@@ -2130,6 +2174,7 @@ out:
static int c4iw_reconnect(struct c4iw_ep *ep)
{
int err = 0;
+ int size = 0;
struct sockaddr_in *laddr = (struct sockaddr_in *)
&ep->com.cm_id->m_local_addr;
struct sockaddr_in *raddr = (struct sockaddr_in *)
@@ -2145,6 +2190,21 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
init_timer(&ep->timer);
c4iw_init_wr_wait(&ep->com.wr_wait);
+ /* When MPA revision is different on nodes, the node with MPA_rev=2
+ * tries to reconnect with MPA_rev 1 for the same EP through
+ * c4iw_reconnect(), where the same EP is assigned with new tid for
+ * further connection establishment. As we are using the same EP pointer
+ * for reconnect, few skbs are used during the previous c4iw_connect(),
+ * which leaves the EP with inadequate skbs for further
+ * c4iw_reconnect(), Further causing an assert BUG_ON() due to empty
+ * skb_list() during peer_abort(). Allocate skbs which is already used.
+ */
+ size = (CN_MAX_CON_BUF - skb_queue_len(&ep->com.ep_skb_list));
+ if (alloc_ep_skb_list(&ep->com.ep_skb_list, size)) {
+ err = -ENOMEM;
+ goto fail1;
+ }
+
/*
* Allocate an active TID to initiate a TCP connection.
*/
@@ -2210,6 +2270,7 @@ fail2:
* response of 1st connect request.
*/
connect_reply_upcall(ep, -ECONNRESET);
+fail1:
c4iw_put_ep(&ep->com);
out:
return err;
@@ -2576,6 +2637,10 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
if (peer_mss && child_ep->mtu > (peer_mss + hdrs))
child_ep->mtu = peer_mss + hdrs;
+ skb_queue_head_init(&child_ep->com.ep_skb_list);
+ if (alloc_ep_skb_list(&child_ep->com.ep_skb_list, CN_MAX_CON_BUF))
+ goto fail;
+
state_set(&child_ep->com, CONNECTING);
child_ep->com.dev = dev;
child_ep->com.cm_id = NULL;
@@ -2640,6 +2705,8 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
(const u32 *)&sin6->sin6_addr.s6_addr, 1);
}
goto out;
+fail:
+ c4iw_put_ep(&child_ep->com);
reject:
reject_cr(dev, hwtid, skb);
if (parent_ep)
@@ -2670,7 +2737,7 @@ static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
ep->com.state = MPA_REQ_WAIT;
start_ep_timer(ep);
set_bit(PASS_ESTAB, &ep->com.history);
- ret = send_flowc(ep, skb);
+ ret = send_flowc(ep);
mutex_unlock(&ep->com.mutex);
if (ret)
c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
@@ -2871,10 +2938,8 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
}
mutex_unlock(&ep->com.mutex);
- rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
- if (!rpl_skb) {
- printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
- __func__);
+ rpl_skb = skb_dequeue(&ep->com.ep_skb_list);
+ if (WARN_ON(!rpl_skb)) {
release = 1;
goto out;
}
@@ -3025,9 +3090,9 @@ out:
int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
{
- int err = 0;
- int disconnect = 0;
+ int abort;
struct c4iw_ep *ep = to_ep(cm_id);
+
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
mutex_lock(&ep->com.mutex);
@@ -3038,16 +3103,13 @@ int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
}
set_bit(ULP_REJECT, &ep->com.history);
if (mpa_rev == 0)
- disconnect = 2;
- else {
- err = send_mpa_reject(ep, pdata, pdata_len);
- disconnect = 1;
- }
+ abort = 1;
+ else
+ abort = send_mpa_reject(ep, pdata, pdata_len);
mutex_unlock(&ep->com.mutex);
- if (disconnect) {
- stop_ep_timer(ep);
- err = c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL);
- }
+
+ stop_ep_timer(ep);
+ c4iw_ep_disconnect(ep, abort != 0, GFP_KERNEL);
c4iw_put_ep(&ep->com);
return 0;
}
@@ -3082,7 +3144,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
if (conn_param->ord > ep->ird) {
if (RELAXED_IRD_NEGOTIATION) {
- ep->ord = ep->ird;
+ conn_param->ord = ep->ird;
} else {
ep->ird = conn_param->ird;
ep->ord = conn_param->ord;
@@ -3248,6 +3310,13 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
err = -ENOMEM;
goto out;
}
+
+ skb_queue_head_init(&ep->com.ep_skb_list);
+ if (alloc_ep_skb_list(&ep->com.ep_skb_list, CN_MAX_CON_BUF)) {
+ err = -ENOMEM;
+ goto fail1;
+ }
+
init_timer(&ep->timer);
ep->plen = conn_param->private_data_len;
if (ep->plen)
@@ -3266,7 +3335,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (!ep->com.qp) {
PDBG("%s qpn 0x%x not found!\n", __func__, conn_param->qpn);
err = -EINVAL;
- goto fail1;
+ goto fail2;
}
ref_qp(ep);
PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
@@ -3279,7 +3348,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (ep->atid == -1) {
printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
err = -ENOMEM;
- goto fail1;
+ goto fail2;
}
insert_handle(dev, &dev->atid_idr, ep, ep->atid);
@@ -3303,7 +3372,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (raddr->sin_addr.s_addr == htonl(INADDR_ANY)) {
err = pick_local_ipaddrs(dev, cm_id);
if (err)
- goto fail1;
+ goto fail2;
}
/* find a route */
@@ -3323,7 +3392,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) {
err = pick_local_ip6addrs(dev, cm_id);
if (err)
- goto fail1;
+ goto fail2;
}
/* find a route */
@@ -3339,14 +3408,14 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (!ep->dst) {
printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
err = -EHOSTUNREACH;
- goto fail2;
+ goto fail3;
}
err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true,
ep->com.dev->rdev.lldi.adapter_type, cm_id->tos);
if (err) {
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
- goto fail3;
+ goto fail4;
}
PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
@@ -3362,13 +3431,15 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
goto out;
cxgb4_l2t_release(ep->l2t);
-fail3:
+fail4:
dst_release(ep->dst);
-fail2:
+fail3:
remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
-fail1:
+fail2:
+ skb_queue_purge(&ep->com.ep_skb_list);
deref_cm_id(&ep->com);
+fail1:
c4iw_put_ep(&ep->com);
out:
return err;
@@ -3461,6 +3532,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
err = -ENOMEM;
goto fail1;
}
+ skb_queue_head_init(&ep->com.ep_skb_list);
PDBG("%s ep %p\n", __func__, ep);
ep->com.cm_id = cm_id;
ref_cm_id(&ep->com);
@@ -3577,6 +3649,7 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
case MPA_REQ_RCVD:
case MPA_REP_SENT:
case FPDU_MODE:
+ case CONNECTING:
close = 1;
if (abrupt)
ep->com.state = ABORTING;
@@ -3621,10 +3694,10 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
if (abrupt) {
set_bit(EP_DISC_ABORT, &ep->com.history);
close_complete_upcall(ep, -ECONNRESET);
- ret = send_abort(ep, NULL, gfp);
+ ret = send_abort(ep);
} else {
set_bit(EP_DISC_CLOSE, &ep->com.history);
- ret = send_halfclose(ep, gfp);
+ ret = send_halfclose(ep);
}
if (ret) {
set_bit(EP_DISC_FAIL, &ep->com.history);
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index b0b955724..ac926c942 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -33,19 +33,15 @@
#include "iw_cxgb4.h"
static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
- struct c4iw_dev_ucontext *uctx)
+ struct c4iw_dev_ucontext *uctx, struct sk_buff *skb)
{
struct fw_ri_res_wr *res_wr;
struct fw_ri_res *res;
int wr_len;
struct c4iw_wr_wait wr_wait;
- struct sk_buff *skb;
int ret;
wr_len = sizeof *res_wr + sizeof *res;
- skb = alloc_skb(wr_len, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
@@ -863,7 +859,9 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq)
ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context)
: NULL;
destroy_cq(&chp->rhp->rdev, &chp->cq,
- ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx);
+ ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx,
+ chp->destroy_skb);
+ chp->destroy_skb = NULL;
kfree(chp);
return 0;
}
@@ -879,7 +877,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
struct c4iw_cq *chp;
struct c4iw_create_cq_resp uresp;
struct c4iw_ucontext *ucontext = NULL;
- int ret;
+ int ret, wr_len;
size_t memsize, hwentries;
struct c4iw_mm_entry *mm, *mm2;
@@ -896,6 +894,13 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
if (!chp)
return ERR_PTR(-ENOMEM);
+ wr_len = sizeof(struct fw_ri_res_wr) + sizeof(struct fw_ri_res);
+ chp->destroy_skb = alloc_skb(wr_len, GFP_KERNEL);
+ if (!chp->destroy_skb) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
if (ib_context)
ucontext = to_c4iw_ucontext(ib_context);
@@ -936,7 +941,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
ret = create_cq(&rhp->rdev, &chp->cq,
ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
if (ret)
- goto err1;
+ goto err2;
chp->rhp = rhp;
chp->cq.size--; /* status page */
@@ -947,15 +952,15 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
init_waitqueue_head(&chp->wait);
ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
if (ret)
- goto err2;
+ goto err3;
if (ucontext) {
mm = kmalloc(sizeof *mm, GFP_KERNEL);
if (!mm)
- goto err3;
+ goto err4;
mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
if (!mm2)
- goto err4;
+ goto err5;
uresp.qid_mask = rhp->rdev.cqmask;
uresp.cqid = chp->cq.cqid;
@@ -970,7 +975,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
ret = ib_copy_to_udata(udata, &uresp,
sizeof(uresp) - sizeof(uresp.reserved));
if (ret)
- goto err5;
+ goto err6;
mm->key = uresp.key;
mm->addr = virt_to_phys(chp->cq.queue);
@@ -986,15 +991,18 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
__func__, chp->cq.cqid, chp, chp->cq.size,
chp->cq.memsize, (unsigned long long) chp->cq.dma_addr);
return &chp->ibcq;
-err5:
+err6:
kfree(mm2);
-err4:
+err5:
kfree(mm);
-err3:
+err4:
remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
-err2:
+err3:
destroy_cq(&chp->rhp->rdev, &chp->cq,
- ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+ ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
+ chp->destroy_skb);
+err2:
+ kfree_skb(chp->destroy_skb);
err1:
kfree(chp);
return ERR_PTR(ret);
@@ -1008,15 +1016,15 @@ int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
{
struct c4iw_cq *chp;
- int ret;
+ int ret = 0;
unsigned long flag;
chp = to_c4iw_cq(ibcq);
spin_lock_irqsave(&chp->lock, flag);
- ret = t4_arm_cq(&chp->cq,
- (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
+ t4_arm_cq(&chp->cq,
+ (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
+ if (flags & IB_CQ_REPORT_MISSED_EVENTS)
+ ret = t4_cq_notempty(&chp->cq);
spin_unlock_irqrestore(&chp->lock, flag);
- if (ret && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
- ret = 0;
return ret;
}
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index ae2e8b23d..3c4b2126e 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -317,7 +317,7 @@ static int qp_open(struct inode *inode, struct file *file)
idr_for_each(&qpd->devp->qpidr, count_idrs, &count);
spin_unlock_irq(&qpd->devp->lock);
- qpd->bufsize = count * 128;
+ qpd->bufsize = count * 180;
qpd->buf = vmalloc(qpd->bufsize);
if (!qpd->buf) {
kfree(qpd);
@@ -872,9 +872,13 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev)
static void c4iw_dealloc(struct uld_ctx *ctx)
{
c4iw_rdev_close(&ctx->dev->rdev);
+ WARN_ON_ONCE(!idr_is_empty(&ctx->dev->cqidr));
idr_destroy(&ctx->dev->cqidr);
+ WARN_ON_ONCE(!idr_is_empty(&ctx->dev->qpidr));
idr_destroy(&ctx->dev->qpidr);
+ WARN_ON_ONCE(!idr_is_empty(&ctx->dev->mmidr));
idr_destroy(&ctx->dev->mmidr);
+ wait_event(ctx->dev->wait, idr_is_empty(&ctx->dev->hwtid_idr));
idr_destroy(&ctx->dev->hwtid_idr);
idr_destroy(&ctx->dev->stid_idr);
idr_destroy(&ctx->dev->atid_idr);
@@ -992,6 +996,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
mutex_init(&devp->rdev.stats.lock);
mutex_init(&devp->db_mutex);
INIT_LIST_HEAD(&devp->db_fc_list);
+ init_waitqueue_head(&devp->wait);
devp->avail_ird = devp->rdev.lldi.max_ird_adapter;
if (c4iw_debugfs_root) {
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index f6f34a75a..4b83b84f7 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -263,6 +263,7 @@ struct c4iw_dev {
struct idr stid_idr;
struct list_head db_fc_list;
u32 avail_ird;
+ wait_queue_head_t wait;
};
static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
@@ -384,6 +385,7 @@ struct c4iw_mr {
struct ib_mr ibmr;
struct ib_umem *umem;
struct c4iw_dev *rhp;
+ struct sk_buff *dereg_skb;
u64 kva;
struct tpt_attributes attr;
u64 *mpl;
@@ -400,6 +402,7 @@ static inline struct c4iw_mr *to_c4iw_mr(struct ib_mr *ibmr)
struct c4iw_mw {
struct ib_mw ibmw;
struct c4iw_dev *rhp;
+ struct sk_buff *dereg_skb;
u64 kva;
struct tpt_attributes attr;
};
@@ -412,6 +415,7 @@ static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw)
struct c4iw_cq {
struct ib_cq ibcq;
struct c4iw_dev *rhp;
+ struct sk_buff *destroy_skb;
struct t4_cq cq;
spinlock_t lock;
spinlock_t comp_handler_lock;
@@ -472,7 +476,7 @@ struct c4iw_qp {
struct t4_wq wq;
spinlock_t lock;
struct mutex mutex;
- atomic_t refcnt;
+ struct kref kref;
wait_queue_head_t wait;
struct timer_list timer;
int sq_sig_all;
@@ -789,10 +793,29 @@ enum c4iw_ep_history {
CM_ID_DEREFED = 28,
};
+enum conn_pre_alloc_buffers {
+ CN_ABORT_REQ_BUF,
+ CN_ABORT_RPL_BUF,
+ CN_CLOSE_CON_REQ_BUF,
+ CN_DESTROY_BUF,
+ CN_FLOWC_BUF,
+ CN_MAX_CON_BUF
+};
+
+#define FLOWC_LEN 80
+union cpl_wr_size {
+ struct cpl_abort_req abrt_req;
+ struct cpl_abort_rpl abrt_rpl;
+ struct fw_ri_wr ri_req;
+ struct cpl_close_con_req close_req;
+ char flowc_buf[FLOWC_LEN];
+};
+
struct c4iw_ep_common {
struct iw_cm_id *cm_id;
struct c4iw_qp *qp;
struct c4iw_dev *dev;
+ struct sk_buff_head ep_skb_list;
enum c4iw_ep_state state;
struct kref kref;
struct mutex mutex;
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 55d0651ee..0b91b0f4d 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -59,9 +59,9 @@ static int mr_exceeds_hw_limits(struct c4iw_dev *dev, u64 length)
}
static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
- u32 len, dma_addr_t data, int wait)
+ u32 len, dma_addr_t data,
+ int wait, struct sk_buff *skb)
{
- struct sk_buff *skb;
struct ulp_mem_io *req;
struct ulptx_sgl *sgl;
u8 wr_len;
@@ -74,9 +74,11 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
c4iw_init_wr_wait(&wr_wait);
wr_len = roundup(sizeof(*req) + sizeof(*sgl), 16);
- skb = alloc_skb(wr_len, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
+ if (!skb) {
+ skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
+ if (!skb)
+ return -ENOMEM;
+ }
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
@@ -108,9 +110,8 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
}
static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
- void *data)
+ void *data, struct sk_buff *skb)
{
- struct sk_buff *skb;
struct ulp_mem_io *req;
struct ulptx_idata *sc;
u8 wr_len, *to_dp, *from_dp;
@@ -134,9 +135,11 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
wr_len = roundup(sizeof *req + sizeof *sc +
roundup(copy_len, T4_ULPTX_MIN_IO), 16);
- skb = alloc_skb(wr_len, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
+ if (!skb) {
+ skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
+ if (!skb)
+ return -ENOMEM;
+ }
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
@@ -173,6 +176,7 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO -
(copy_len % T4_ULPTX_MIN_IO));
ret = c4iw_ofld_send(rdev, skb);
+ skb = NULL;
if (ret)
return ret;
len -= C4IW_MAX_INLINE_SIZE;
@@ -182,7 +186,8 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
return ret;
}
-static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
+static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len,
+ void *data, struct sk_buff *skb)
{
u32 remain = len;
u32 dmalen;
@@ -205,7 +210,7 @@ static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *
dmalen = T4_ULPTX_MAX_DMA;
remain -= dmalen;
ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen, daddr,
- !remain);
+ !remain, skb);
if (ret)
goto out;
addr += dmalen >> 5;
@@ -213,7 +218,7 @@ static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *
daddr += dmalen;
}
if (remain)
- ret = _c4iw_write_mem_inline(rdev, addr, remain, data);
+ ret = _c4iw_write_mem_inline(rdev, addr, remain, data, skb);
out:
dma_unmap_single(&rdev->lldi.pdev->dev, save, len, DMA_TO_DEVICE);
return ret;
@@ -224,23 +229,25 @@ out:
* If data is NULL, clear len byte of memory to zero.
*/
static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
- void *data)
+ void *data, struct sk_buff *skb)
{
if (is_t5(rdev->lldi.adapter_type) && use_dsgl) {
if (len > inline_threshold) {
- if (_c4iw_write_mem_dma(rdev, addr, len, data)) {
+ if (_c4iw_write_mem_dma(rdev, addr, len, data, skb)) {
printk_ratelimited(KERN_WARNING
"%s: dma map"
" failure (non fatal)\n",
pci_name(rdev->lldi.pdev));
return _c4iw_write_mem_inline(rdev, addr, len,
- data);
- } else
+ data, skb);
+ } else {
return 0;
+ }
} else
- return _c4iw_write_mem_inline(rdev, addr, len, data);
+ return _c4iw_write_mem_inline(rdev, addr,
+ len, data, skb);
} else
- return _c4iw_write_mem_inline(rdev, addr, len, data);
+ return _c4iw_write_mem_inline(rdev, addr, len, data, skb);
}
/*
@@ -253,7 +260,8 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
u32 *stag, u8 stag_state, u32 pdid,
enum fw_ri_stag_type type, enum fw_ri_mem_perms perm,
int bind_enabled, u32 zbva, u64 to,
- u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr)
+ u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr,
+ struct sk_buff *skb)
{
int err;
struct fw_ri_tpte tpt;
@@ -307,7 +315,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
}
err = write_adapter_mem(rdev, stag_idx +
(rdev->lldi.vr->stag.start >> 5),
- sizeof(tpt), &tpt);
+ sizeof(tpt), &tpt, skb);
if (reset_tpt_entry) {
c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
@@ -327,28 +335,29 @@ static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl,
__func__, pbl_addr, rdev->lldi.vr->pbl.start,
pbl_size);
- err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl);
+ err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl, NULL);
return err;
}
static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size,
- u32 pbl_addr)
+ u32 pbl_addr, struct sk_buff *skb)
{
return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0,
- pbl_size, pbl_addr);
+ pbl_size, pbl_addr, skb);
}
static int allocate_window(struct c4iw_rdev *rdev, u32 * stag, u32 pdid)
{
*stag = T4_STAG_UNSET;
return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0,
- 0UL, 0, 0, 0, 0);
+ 0UL, 0, 0, 0, 0, NULL);
}
-static int deallocate_window(struct c4iw_rdev *rdev, u32 stag)
+static int deallocate_window(struct c4iw_rdev *rdev, u32 stag,
+ struct sk_buff *skb)
{
return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0,
- 0);
+ 0, skb);
}
static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
@@ -356,7 +365,7 @@ static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
{
*stag = T4_STAG_UNSET;
return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0,
- 0UL, 0, 0, pbl_size, pbl_addr);
+ 0UL, 0, 0, pbl_size, pbl_addr, NULL);
}
static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
@@ -383,14 +392,16 @@ static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
mhp->attr.mw_bind_enable, mhp->attr.zbva,
mhp->attr.va_fbo, mhp->attr.len ?
mhp->attr.len : -1, shift - 12,
- mhp->attr.pbl_size, mhp->attr.pbl_addr);
+ mhp->attr.pbl_size, mhp->attr.pbl_addr, NULL);
if (ret)
return ret;
ret = finish_mem_reg(mhp, stag);
- if (ret)
+ if (ret) {
dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
- mhp->attr.pbl_addr);
+ mhp->attr.pbl_addr, mhp->dereg_skb);
+ mhp->dereg_skb = NULL;
+ }
return ret;
}
@@ -423,6 +434,12 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
if (!mhp)
return ERR_PTR(-ENOMEM);
+ mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
+ if (!mhp->dereg_skb) {
+ ret = -ENOMEM;
+ goto err0;
+ }
+
mhp->rhp = rhp;
mhp->attr.pdid = php->pdid;
mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
@@ -435,7 +452,8 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid,
FW_RI_STAG_NSMR, mhp->attr.perms,
- mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0);
+ mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0,
+ NULL);
if (ret)
goto err1;
@@ -445,8 +463,10 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
return &mhp->ibmr;
err2:
dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
- mhp->attr.pbl_addr);
+ mhp->attr.pbl_addr, mhp->dereg_skb);
err1:
+ kfree_skb(mhp->dereg_skb);
+err0:
kfree(mhp);
return ERR_PTR(ret);
}
@@ -481,11 +501,18 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!mhp)
return ERR_PTR(-ENOMEM);
+ mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
+ if (!mhp->dereg_skb) {
+ kfree(mhp);
+ return ERR_PTR(-ENOMEM);
+ }
+
mhp->rhp = rhp;
mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
if (IS_ERR(mhp->umem)) {
err = PTR_ERR(mhp->umem);
+ kfree_skb(mhp->dereg_skb);
kfree(mhp);
return ERR_PTR(err);
}
@@ -550,6 +577,7 @@ err_pbl:
err:
ib_umem_release(mhp->umem);
+ kfree_skb(mhp->dereg_skb);
kfree(mhp);
return ERR_PTR(err);
}
@@ -572,11 +600,16 @@ struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
if (!mhp)
return ERR_PTR(-ENOMEM);
- ret = allocate_window(&rhp->rdev, &stag, php->pdid);
- if (ret) {
- kfree(mhp);
- return ERR_PTR(ret);
+
+ mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
+ if (!mhp->dereg_skb) {
+ ret = -ENOMEM;
+ goto free_mhp;
}
+
+ ret = allocate_window(&rhp->rdev, &stag, php->pdid);
+ if (ret)
+ goto free_skb;
mhp->rhp = rhp;
mhp->attr.pdid = php->pdid;
mhp->attr.type = FW_RI_STAG_MW;
@@ -584,12 +617,19 @@ struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
mmid = (stag) >> 8;
mhp->ibmw.rkey = stag;
if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
- deallocate_window(&rhp->rdev, mhp->attr.stag);
- kfree(mhp);
- return ERR_PTR(-ENOMEM);
+ ret = -ENOMEM;
+ goto dealloc_win;
}
PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
return &(mhp->ibmw);
+
+dealloc_win:
+ deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb);
+free_skb:
+ kfree_skb(mhp->dereg_skb);
+free_mhp:
+ kfree(mhp);
+ return ERR_PTR(ret);
}
int c4iw_dealloc_mw(struct ib_mw *mw)
@@ -602,7 +642,8 @@ int c4iw_dealloc_mw(struct ib_mw *mw)
rhp = mhp->rhp;
mmid = (mw->rkey) >> 8;
remove_handle(rhp, &rhp->mmidr, mmid);
- deallocate_window(&rhp->rdev, mhp->attr.stag);
+ deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb);
+ kfree_skb(mhp->dereg_skb);
kfree(mhp);
PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
return 0;
@@ -666,7 +707,7 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
return &(mhp->ibmr);
err3:
dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
- mhp->attr.pbl_addr);
+ mhp->attr.pbl_addr, mhp->dereg_skb);
err2:
c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
mhp->attr.pbl_size << 3);
@@ -717,7 +758,7 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr)
dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev,
mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr);
dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
- mhp->attr.pbl_addr);
+ mhp->attr.pbl_addr, mhp->dereg_skb);
if (mhp->attr.pbl_size)
c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
mhp->attr.pbl_size << 3);
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index dd8a86b72..df127ce6b 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -409,20 +409,6 @@ static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
CHELSIO_CHIP_RELEASE(c4iw_dev->rdev.lldi.adapter_type));
}
-static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
- ibdev.dev);
- PDBG("%s dev 0x%p\n", __func__, dev);
-
- return sprintf(buf, "%u.%u.%u.%u\n",
- FW_HDR_FW_VER_MAJOR_G(c4iw_dev->rdev.lldi.fw_vers),
- FW_HDR_FW_VER_MINOR_G(c4iw_dev->rdev.lldi.fw_vers),
- FW_HDR_FW_VER_MICRO_G(c4iw_dev->rdev.lldi.fw_vers),
- FW_HDR_FW_VER_BUILD_G(c4iw_dev->rdev.lldi.fw_vers));
-}
-
static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -502,13 +488,11 @@ static int c4iw_get_mib(struct ib_device *ibdev,
}
static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
static struct device_attribute *c4iw_class_attributes[] = {
&dev_attr_hw_rev,
- &dev_attr_fw_ver,
&dev_attr_hca_type,
&dev_attr_board_id,
};
@@ -530,6 +514,20 @@ static int c4iw_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0;
}
+static void get_dev_fw_str(struct ib_device *dev, char *str,
+ size_t str_len)
+{
+ struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
+ ibdev);
+ PDBG("%s dev 0x%p\n", __func__, dev);
+
+ snprintf(str, str_len, "%u.%u.%u.%u",
+ FW_HDR_FW_VER_MAJOR_G(c4iw_dev->rdev.lldi.fw_vers),
+ FW_HDR_FW_VER_MINOR_G(c4iw_dev->rdev.lldi.fw_vers),
+ FW_HDR_FW_VER_MICRO_G(c4iw_dev->rdev.lldi.fw_vers),
+ FW_HDR_FW_VER_BUILD_G(c4iw_dev->rdev.lldi.fw_vers));
+}
+
int c4iw_register_device(struct c4iw_dev *dev)
{
int ret;
@@ -605,6 +603,7 @@ int c4iw_register_device(struct c4iw_dev *dev)
dev->ibdev.get_hw_stats = c4iw_get_mib;
dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
dev->ibdev.get_port_immutable = c4iw_port_immutable;
+ dev->ibdev.get_dev_fw_str = get_dev_fw_str;
dev->ibdev.drain_sq = c4iw_drain_sq;
dev->ibdev.drain_rq = c4iw_drain_rq;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index e8993e49b..690435229 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -683,17 +683,25 @@ static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr,
return 0;
}
+static void _free_qp(struct kref *kref)
+{
+ struct c4iw_qp *qhp;
+
+ qhp = container_of(kref, struct c4iw_qp, kref);
+ PDBG("%s qhp %p\n", __func__, qhp);
+ kfree(qhp);
+}
+
void c4iw_qp_add_ref(struct ib_qp *qp)
{
PDBG("%s ib_qp %p\n", __func__, qp);
- atomic_inc(&(to_c4iw_qp(qp)->refcnt));
+ kref_get(&to_c4iw_qp(qp)->kref);
}
void c4iw_qp_rem_ref(struct ib_qp *qp)
{
PDBG("%s ib_qp %p\n", __func__, qp);
- if (atomic_dec_and_test(&(to_c4iw_qp(qp)->refcnt)))
- wake_up(&(to_c4iw_qp(qp)->wait));
+ kref_put(&to_c4iw_qp(qp)->kref, _free_qp);
}
static void add_to_fc_list(struct list_head *head, struct list_head *entry)
@@ -1081,9 +1089,10 @@ static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
qhp->ep->hwtid);
- skb = alloc_skb(sizeof *wqe, gfp);
- if (!skb)
+ skb = skb_dequeue(&qhp->ep->com.ep_skb_list);
+ if (WARN_ON(!skb))
return;
+
set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
@@ -1202,9 +1211,10 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
ep->hwtid);
- skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
- if (!skb)
+ skb = skb_dequeue(&ep->com.ep_skb_list);
+ if (WARN_ON(!skb))
return -ENOMEM;
+
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
@@ -1592,8 +1602,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
wait_event(qhp->wait, !qhp->ep);
remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
- atomic_dec(&qhp->refcnt);
- wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
spin_lock_irq(&rhp->lock);
if (!list_empty(&qhp->db_fc_entry))
@@ -1606,8 +1614,9 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
destroy_qp(&rhp->rdev, &qhp->wq,
ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+ c4iw_qp_rem_ref(ib_qp);
+
PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
- kfree(qhp);
return 0;
}
@@ -1704,7 +1713,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
init_completion(&qhp->rq_drained);
mutex_init(&qhp->mutex);
init_waitqueue_head(&qhp->wait);
- atomic_set(&qhp->refcnt, 1);
+ kref_init(&qhp->kref);
ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
if (ret)
@@ -1896,12 +1905,20 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
return 0;
}
+static void move_qp_to_err(struct c4iw_qp *qp)
+{
+ struct c4iw_qp_attributes attrs = { .next_state = C4IW_QP_STATE_ERROR };
+
+ (void)c4iw_modify_qp(qp->rhp, qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
+}
+
void c4iw_drain_sq(struct ib_qp *ibqp)
{
struct c4iw_qp *qp = to_c4iw_qp(ibqp);
unsigned long flag;
bool need_to_wait;
+ move_qp_to_err(qp);
spin_lock_irqsave(&qp->lock, flag);
need_to_wait = !t4_sq_empty(&qp->wq);
spin_unlock_irqrestore(&qp->lock, flag);
@@ -1916,6 +1933,7 @@ void c4iw_drain_rq(struct ib_qp *ibqp)
unsigned long flag;
bool need_to_wait;
+ move_qp_to_err(qp);
spin_lock_irqsave(&qp->lock, flag);
need_to_wait = !t4_rq_empty(&qp->wq);
spin_unlock_irqrestore(&qp->lock, flag);
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 6126bbe36..02173f431 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -634,6 +634,11 @@ static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe)
return (CQE_GENBIT(cqe) == cq->gen);
}
+static inline int t4_cq_notempty(struct t4_cq *cq)
+{
+ return cq->sw_in_use || t4_valid_cqe(cq, &cq->queue[cq->cidx]);
+}
+
static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
{
int ret;
diff --git a/drivers/infiniband/hw/hfi1/Kconfig b/drivers/infiniband/hw/hfi1/Kconfig
index f846fd51b..f6ea08817 100644
--- a/drivers/infiniband/hw/hfi1/Kconfig
+++ b/drivers/infiniband/hw/hfi1/Kconfig
@@ -1,8 +1,9 @@
config INFINIBAND_HFI1
tristate "Intel OPA Gen1 support"
- depends on X86_64 && INFINIBAND_RDMAVT
+ depends on X86_64 && INFINIBAND_RDMAVT && I2C
select MMU_NOTIFIER
select CRC32
+ select I2C_ALGOBIT
---help---
This is a low-level driver for Intel OPA Gen1 adapter.
config HFI1_DEBUG_SDMA_ORDER
diff --git a/drivers/infiniband/hw/hfi1/Makefile b/drivers/infiniband/hw/hfi1/Makefile
index 9b5382c94..0cf97a09b 100644
--- a/drivers/infiniband/hw/hfi1/Makefile
+++ b/drivers/infiniband/hw/hfi1/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_INFINIBAND_HFI1) += hfi1.o
hfi1-y := affinity.o chip.o device.o driver.o efivar.o \
eprom.o file_ops.o firmware.o \
init.o intr.o mad.o mmu_rb.o pcie.o pio.o pio_copy.o platform.o \
- qp.o qsfp.o rc.o ruc.o sdma.o sysfs.o trace.o twsi.o \
+ qp.o qsfp.o rc.o ruc.o sdma.o sysfs.o trace.o \
uc.o ud.o user_exp_rcv.o user_pages.o user_sdma.o verbs.o \
verbs_txreq.o
hfi1-$(CONFIG_DEBUG_FS) += debugfs.o
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index 14d7eeb09..0566393e5 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -53,6 +53,11 @@
#include "sdma.h"
#include "trace.h"
+struct hfi1_affinity_node_list node_affinity = {
+ .list = LIST_HEAD_INIT(node_affinity.list),
+ .lock = __SPIN_LOCK_UNLOCKED(&node_affinity.lock),
+};
+
/* Name of IRQ types, indexed by enum irq_type */
static const char * const irq_type_names[] = {
"SDMA",
@@ -61,6 +66,9 @@ static const char * const irq_type_names[] = {
"OTHER",
};
+/* Per NUMA node count of HFI devices */
+static unsigned int *hfi1_per_node_cntr;
+
static inline void init_cpu_mask_set(struct cpu_mask_set *set)
{
cpumask_clear(&set->mask);
@@ -69,47 +77,136 @@ static inline void init_cpu_mask_set(struct cpu_mask_set *set)
}
/* Initialize non-HT cpu cores mask */
-int init_real_cpu_mask(struct hfi1_devdata *dd)
+void init_real_cpu_mask(void)
{
- struct hfi1_affinity *info;
int possible, curr_cpu, i, ht;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- cpumask_clear(&info->real_cpu_mask);
+ cpumask_clear(&node_affinity.real_cpu_mask);
/* Start with cpu online mask as the real cpu mask */
- cpumask_copy(&info->real_cpu_mask, cpu_online_mask);
+ cpumask_copy(&node_affinity.real_cpu_mask, cpu_online_mask);
/*
* Remove HT cores from the real cpu mask. Do this in two steps below.
*/
- possible = cpumask_weight(&info->real_cpu_mask);
+ possible = cpumask_weight(&node_affinity.real_cpu_mask);
ht = cpumask_weight(topology_sibling_cpumask(
- cpumask_first(&info->real_cpu_mask)));
+ cpumask_first(&node_affinity.real_cpu_mask)));
/*
* Step 1. Skip over the first N HT siblings and use them as the
* "real" cores. Assumes that HT cores are not enumerated in
* succession (except in the single core case).
*/
- curr_cpu = cpumask_first(&info->real_cpu_mask);
+ curr_cpu = cpumask_first(&node_affinity.real_cpu_mask);
for (i = 0; i < possible / ht; i++)
- curr_cpu = cpumask_next(curr_cpu, &info->real_cpu_mask);
+ curr_cpu = cpumask_next(curr_cpu, &node_affinity.real_cpu_mask);
/*
* Step 2. Remove the remaining HT siblings. Use cpumask_next() to
* skip any gaps.
*/
for (; i < possible; i++) {
- cpumask_clear_cpu(curr_cpu, &info->real_cpu_mask);
- curr_cpu = cpumask_next(curr_cpu, &info->real_cpu_mask);
+ cpumask_clear_cpu(curr_cpu, &node_affinity.real_cpu_mask);
+ curr_cpu = cpumask_next(curr_cpu, &node_affinity.real_cpu_mask);
+ }
+}
+
+int node_affinity_init(void)
+{
+ int node;
+ struct pci_dev *dev = NULL;
+ const struct pci_device_id *ids = hfi1_pci_tbl;
+
+ cpumask_clear(&node_affinity.proc.used);
+ cpumask_copy(&node_affinity.proc.mask, cpu_online_mask);
+
+ node_affinity.proc.gen = 0;
+ node_affinity.num_core_siblings =
+ cpumask_weight(topology_sibling_cpumask(
+ cpumask_first(&node_affinity.proc.mask)
+ ));
+ node_affinity.num_online_nodes = num_online_nodes();
+ node_affinity.num_online_cpus = num_online_cpus();
+
+ /*
+ * The real cpu mask is part of the affinity struct but it has to be
+ * initialized early. It is needed to calculate the number of user
+ * contexts in set_up_context_variables().
+ */
+ init_real_cpu_mask();
+
+ hfi1_per_node_cntr = kcalloc(num_possible_nodes(),
+ sizeof(*hfi1_per_node_cntr), GFP_KERNEL);
+ if (!hfi1_per_node_cntr)
+ return -ENOMEM;
+
+ while (ids->vendor) {
+ dev = NULL;
+ while ((dev = pci_get_device(ids->vendor, ids->device, dev))) {
+ node = pcibus_to_node(dev->bus);
+ if (node < 0)
+ node = numa_node_id();
+
+ hfi1_per_node_cntr[node]++;
+ }
+ ids++;
}
- dd->affinity = info;
return 0;
}
+void node_affinity_destroy(void)
+{
+ struct list_head *pos, *q;
+ struct hfi1_affinity_node *entry;
+
+ spin_lock(&node_affinity.lock);
+ list_for_each_safe(pos, q, &node_affinity.list) {
+ entry = list_entry(pos, struct hfi1_affinity_node,
+ list);
+ list_del(pos);
+ kfree(entry);
+ }
+ spin_unlock(&node_affinity.lock);
+ kfree(hfi1_per_node_cntr);
+}
+
+static struct hfi1_affinity_node *node_affinity_allocate(int node)
+{
+ struct hfi1_affinity_node *entry;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return NULL;
+ entry->node = node;
+ INIT_LIST_HEAD(&entry->list);
+
+ return entry;
+}
+
+/*
+ * It appends an entry to the list.
+ * It *must* be called with node_affinity.lock held.
+ */
+static void node_affinity_add_tail(struct hfi1_affinity_node *entry)
+{
+ list_add_tail(&entry->list, &node_affinity.list);
+}
+
+/* It must be called with node_affinity.lock held */
+static struct hfi1_affinity_node *node_affinity_lookup(int node)
+{
+ struct list_head *pos;
+ struct hfi1_affinity_node *entry;
+
+ list_for_each(pos, &node_affinity.list) {
+ entry = list_entry(pos, struct hfi1_affinity_node, list);
+ if (entry->node == node)
+ return entry;
+ }
+
+ return NULL;
+}
+
/*
* Interrupt affinity.
*
@@ -121,10 +218,10 @@ int init_real_cpu_mask(struct hfi1_devdata *dd)
* to the node relative 1 as necessary.
*
*/
-void hfi1_dev_affinity_init(struct hfi1_devdata *dd)
+int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
{
int node = pcibus_to_node(dd->pcidev->bus);
- struct hfi1_affinity *info = dd->affinity;
+ struct hfi1_affinity_node *entry;
const struct cpumask *local_mask;
int curr_cpu, possible, i;
@@ -132,56 +229,93 @@ void hfi1_dev_affinity_init(struct hfi1_devdata *dd)
node = numa_node_id();
dd->node = node;
- spin_lock_init(&info->lock);
-
- init_cpu_mask_set(&info->def_intr);
- init_cpu_mask_set(&info->rcv_intr);
- init_cpu_mask_set(&info->proc);
-
local_mask = cpumask_of_node(dd->node);
if (cpumask_first(local_mask) >= nr_cpu_ids)
local_mask = topology_core_cpumask(0);
- /* Use the "real" cpu mask of this node as the default */
- cpumask_and(&info->def_intr.mask, &info->real_cpu_mask, local_mask);
-
- /* fill in the receive list */
- possible = cpumask_weight(&info->def_intr.mask);
- curr_cpu = cpumask_first(&info->def_intr.mask);
- if (possible == 1) {
- /* only one CPU, everyone will use it */
- cpumask_set_cpu(curr_cpu, &info->rcv_intr.mask);
- } else {
- /*
- * Retain the first CPU in the default list for the control
- * context.
- */
- curr_cpu = cpumask_next(curr_cpu, &info->def_intr.mask);
- /*
- * Remove the remaining kernel receive queues from
- * the default list and add them to the receive list.
- */
- for (i = 0; i < dd->n_krcv_queues - 1; i++) {
- cpumask_clear_cpu(curr_cpu, &info->def_intr.mask);
- cpumask_set_cpu(curr_cpu, &info->rcv_intr.mask);
- curr_cpu = cpumask_next(curr_cpu, &info->def_intr.mask);
- if (curr_cpu >= nr_cpu_ids)
- break;
+
+ spin_lock(&node_affinity.lock);
+ entry = node_affinity_lookup(dd->node);
+ spin_unlock(&node_affinity.lock);
+
+ /*
+ * If this is the first time this NUMA node's affinity is used,
+ * create an entry in the global affinity structure and initialize it.
+ */
+ if (!entry) {
+ entry = node_affinity_allocate(node);
+ if (!entry) {
+ dd_dev_err(dd,
+ "Unable to allocate global affinity node\n");
+ return -ENOMEM;
}
- }
+ init_cpu_mask_set(&entry->def_intr);
+ init_cpu_mask_set(&entry->rcv_intr);
+ cpumask_clear(&entry->general_intr_mask);
+ /* Use the "real" cpu mask of this node as the default */
+ cpumask_and(&entry->def_intr.mask, &node_affinity.real_cpu_mask,
+ local_mask);
+
+ /* fill in the receive list */
+ possible = cpumask_weight(&entry->def_intr.mask);
+ curr_cpu = cpumask_first(&entry->def_intr.mask);
+
+ if (possible == 1) {
+ /* only one CPU, everyone will use it */
+ cpumask_set_cpu(curr_cpu, &entry->rcv_intr.mask);
+ cpumask_set_cpu(curr_cpu, &entry->general_intr_mask);
+ } else {
+ /*
+ * The general/control context will be the first CPU in
+ * the default list, so it is removed from the default
+ * list and added to the general interrupt list.
+ */
+ cpumask_clear_cpu(curr_cpu, &entry->def_intr.mask);
+ cpumask_set_cpu(curr_cpu, &entry->general_intr_mask);
+ curr_cpu = cpumask_next(curr_cpu,
+ &entry->def_intr.mask);
- cpumask_copy(&info->proc.mask, cpu_online_mask);
-}
+ /*
+ * Remove the remaining kernel receive queues from
+ * the default list and add them to the receive list.
+ */
+ for (i = 0;
+ i < (dd->n_krcv_queues - 1) *
+ hfi1_per_node_cntr[dd->node];
+ i++) {
+ cpumask_clear_cpu(curr_cpu,
+ &entry->def_intr.mask);
+ cpumask_set_cpu(curr_cpu,
+ &entry->rcv_intr.mask);
+ curr_cpu = cpumask_next(curr_cpu,
+ &entry->def_intr.mask);
+ if (curr_cpu >= nr_cpu_ids)
+ break;
+ }
-void hfi1_dev_affinity_free(struct hfi1_devdata *dd)
-{
- kfree(dd->affinity);
+ /*
+ * If there ends up being 0 CPU cores leftover for SDMA
+ * engines, use the same CPU cores as general/control
+ * context.
+ */
+ if (cpumask_weight(&entry->def_intr.mask) == 0)
+ cpumask_copy(&entry->def_intr.mask,
+ &entry->general_intr_mask);
+ }
+
+ spin_lock(&node_affinity.lock);
+ node_affinity_add_tail(entry);
+ spin_unlock(&node_affinity.lock);
+ }
+
+ return 0;
}
int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix)
{
int ret;
cpumask_var_t diff;
- struct cpu_mask_set *set;
+ struct hfi1_affinity_node *entry;
+ struct cpu_mask_set *set = NULL;
struct sdma_engine *sde = NULL;
struct hfi1_ctxtdata *rcd = NULL;
char extra[64];
@@ -194,22 +328,25 @@ int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix)
if (!ret)
return -ENOMEM;
+ spin_lock(&node_affinity.lock);
+ entry = node_affinity_lookup(dd->node);
+ spin_unlock(&node_affinity.lock);
+
switch (msix->type) {
case IRQ_SDMA:
sde = (struct sdma_engine *)msix->arg;
scnprintf(extra, 64, "engine %u", sde->this_idx);
- /* fall through */
+ set = &entry->def_intr;
+ break;
case IRQ_GENERAL:
- set = &dd->affinity->def_intr;
+ cpu = cpumask_first(&entry->general_intr_mask);
break;
case IRQ_RCVCTXT:
rcd = (struct hfi1_ctxtdata *)msix->arg;
- if (rcd->ctxt == HFI1_CTRL_CTXT) {
- set = &dd->affinity->def_intr;
- cpu = cpumask_first(&set->mask);
- } else {
- set = &dd->affinity->rcv_intr;
- }
+ if (rcd->ctxt == HFI1_CTRL_CTXT)
+ cpu = cpumask_first(&entry->general_intr_mask);
+ else
+ set = &entry->rcv_intr;
scnprintf(extra, 64, "ctxt %u", rcd->ctxt);
break;
default:
@@ -218,12 +355,12 @@ int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix)
}
/*
- * The control receive context is placed on a particular CPU, which
- * is set above. Skip accounting for it. Everything else finds its
- * CPU here.
+ * The general and control contexts are placed on a particular
+ * CPU, which is set above. Skip accounting for it. Everything else
+ * finds its CPU here.
*/
- if (cpu == -1) {
- spin_lock(&dd->affinity->lock);
+ if (cpu == -1 && set) {
+ spin_lock(&node_affinity.lock);
if (cpumask_equal(&set->mask, &set->used)) {
/*
* We've used up all the CPUs, bump up the generation
@@ -235,7 +372,7 @@ int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix)
cpumask_andnot(diff, &set->mask, &set->used);
cpu = cpumask_first(diff);
cpumask_set_cpu(cpu, &set->used);
- spin_unlock(&dd->affinity->lock);
+ spin_unlock(&node_affinity.lock);
}
switch (msix->type) {
@@ -263,43 +400,84 @@ void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
{
struct cpu_mask_set *set = NULL;
struct hfi1_ctxtdata *rcd;
+ struct hfi1_affinity_node *entry;
+
+ spin_lock(&node_affinity.lock);
+ entry = node_affinity_lookup(dd->node);
+ spin_unlock(&node_affinity.lock);
switch (msix->type) {
case IRQ_SDMA:
+ set = &entry->def_intr;
+ break;
case IRQ_GENERAL:
- set = &dd->affinity->def_intr;
+ /* Don't do accounting for general contexts */
break;
case IRQ_RCVCTXT:
rcd = (struct hfi1_ctxtdata *)msix->arg;
- /* only do accounting for non control contexts */
+ /* Don't do accounting for control contexts */
if (rcd->ctxt != HFI1_CTRL_CTXT)
- set = &dd->affinity->rcv_intr;
+ set = &entry->rcv_intr;
break;
default:
return;
}
if (set) {
- spin_lock(&dd->affinity->lock);
+ spin_lock(&node_affinity.lock);
cpumask_andnot(&set->used, &set->used, &msix->mask);
if (cpumask_empty(&set->used) && set->gen) {
set->gen--;
cpumask_copy(&set->used, &set->mask);
}
- spin_unlock(&dd->affinity->lock);
+ spin_unlock(&node_affinity.lock);
}
irq_set_affinity_hint(msix->msix.vector, NULL);
cpumask_clear(&msix->mask);
}
-int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
+/* This should be called with node_affinity.lock held */
+static void find_hw_thread_mask(uint hw_thread_no, cpumask_var_t hw_thread_mask,
+ struct hfi1_affinity_node_list *affinity)
{
- int cpu = -1, ret;
- cpumask_var_t diff, mask, intrs;
+ int possible, curr_cpu, i;
+ uint num_cores_per_socket = node_affinity.num_online_cpus /
+ affinity->num_core_siblings /
+ node_affinity.num_online_nodes;
+
+ cpumask_copy(hw_thread_mask, &affinity->proc.mask);
+ if (affinity->num_core_siblings > 0) {
+ /* Removing other siblings not needed for now */
+ possible = cpumask_weight(hw_thread_mask);
+ curr_cpu = cpumask_first(hw_thread_mask);
+ for (i = 0;
+ i < num_cores_per_socket * node_affinity.num_online_nodes;
+ i++)
+ curr_cpu = cpumask_next(curr_cpu, hw_thread_mask);
+
+ for (; i < possible; i++) {
+ cpumask_clear_cpu(curr_cpu, hw_thread_mask);
+ curr_cpu = cpumask_next(curr_cpu, hw_thread_mask);
+ }
+
+ /* Identifying correct HW threads within physical cores */
+ cpumask_shift_left(hw_thread_mask, hw_thread_mask,
+ num_cores_per_socket *
+ node_affinity.num_online_nodes *
+ hw_thread_no);
+ }
+}
+
+int hfi1_get_proc_affinity(int node)
+{
+ int cpu = -1, ret, i;
+ struct hfi1_affinity_node *entry;
+ cpumask_var_t diff, hw_thread_mask, available_mask, intrs_mask;
const struct cpumask *node_mask,
*proc_mask = tsk_cpus_allowed(current);
- struct cpu_mask_set *set = &dd->affinity->proc;
+ struct hfi1_affinity_node_list *affinity = &node_affinity;
+ struct cpu_mask_set *set = &affinity->proc;
/*
* check whether process/context affinity has already
@@ -325,22 +503,41 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
/*
* The process does not have a preset CPU affinity so find one to
- * recommend. We prefer CPUs on the same NUMA as the device.
+ * recommend using the following algorithm:
+ *
+ * For each user process that is opening a context on HFI Y:
+ * a) If all cores are filled, reinitialize the bitmask
+ * b) Fill real cores first, then HT cores (First set of HT
+ * cores on all physical cores, then second set of HT core,
+ * and, so on) in the following order:
+ *
+ * 1. Same NUMA node as HFI Y and not running an IRQ
+ * handler
+ * 2. Same NUMA node as HFI Y and running an IRQ handler
+ * 3. Different NUMA node to HFI Y and not running an IRQ
+ * handler
+ * 4. Different NUMA node to HFI Y and running an IRQ
+ * handler
+ * c) Mark core as filled in the bitmask. As user processes are
+ * done, clear cores from the bitmask.
*/
ret = zalloc_cpumask_var(&diff, GFP_KERNEL);
if (!ret)
goto done;
- ret = zalloc_cpumask_var(&mask, GFP_KERNEL);
+ ret = zalloc_cpumask_var(&hw_thread_mask, GFP_KERNEL);
if (!ret)
goto free_diff;
- ret = zalloc_cpumask_var(&intrs, GFP_KERNEL);
+ ret = zalloc_cpumask_var(&available_mask, GFP_KERNEL);
+ if (!ret)
+ goto free_hw_thread_mask;
+ ret = zalloc_cpumask_var(&intrs_mask, GFP_KERNEL);
if (!ret)
- goto free_mask;
+ goto free_available_mask;
- spin_lock(&dd->affinity->lock);
+ spin_lock(&affinity->lock);
/*
- * If we've used all available CPUs, clear the mask and start
+ * If we've used all available HW threads, clear the mask and start
* overloading.
*/
if (cpumask_equal(&set->mask, &set->used)) {
@@ -348,81 +545,204 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
cpumask_clear(&set->used);
}
- /* CPUs used by interrupt handlers */
- cpumask_copy(intrs, (dd->affinity->def_intr.gen ?
- &dd->affinity->def_intr.mask :
- &dd->affinity->def_intr.used));
- cpumask_or(intrs, intrs, (dd->affinity->rcv_intr.gen ?
- &dd->affinity->rcv_intr.mask :
- &dd->affinity->rcv_intr.used));
+ /*
+ * If NUMA node has CPUs used by interrupt handlers, include them in the
+ * interrupt handler mask.
+ */
+ entry = node_affinity_lookup(node);
+ if (entry) {
+ cpumask_copy(intrs_mask, (entry->def_intr.gen ?
+ &entry->def_intr.mask :
+ &entry->def_intr.used));
+ cpumask_or(intrs_mask, intrs_mask, (entry->rcv_intr.gen ?
+ &entry->rcv_intr.mask :
+ &entry->rcv_intr.used));
+ cpumask_or(intrs_mask, intrs_mask, &entry->general_intr_mask);
+ }
hfi1_cdbg(PROC, "CPUs used by interrupts: %*pbl",
- cpumask_pr_args(intrs));
+ cpumask_pr_args(intrs_mask));
+
+ cpumask_copy(hw_thread_mask, &set->mask);
/*
- * If we don't have a NUMA node requested, preference is towards
- * device NUMA node
+ * If HT cores are enabled, identify which HW threads within the
+ * physical cores should be used.
*/
- if (node == -1)
- node = dd->node;
+ if (affinity->num_core_siblings > 0) {
+ for (i = 0; i < affinity->num_core_siblings; i++) {
+ find_hw_thread_mask(i, hw_thread_mask, affinity);
+
+ /*
+ * If there's at least one available core for this HW
+ * thread number, stop looking for a core.
+ *
+ * diff will always be not empty at least once in this
+ * loop as the used mask gets reset when
+ * (set->mask == set->used) before this loop.
+ */
+ cpumask_andnot(diff, hw_thread_mask, &set->used);
+ if (!cpumask_empty(diff))
+ break;
+ }
+ }
+ hfi1_cdbg(PROC, "Same available HW thread on all physical CPUs: %*pbl",
+ cpumask_pr_args(hw_thread_mask));
+
node_mask = cpumask_of_node(node);
- hfi1_cdbg(PROC, "device on NUMA %u, CPUs %*pbl", node,
+ hfi1_cdbg(PROC, "Device on NUMA %u, CPUs %*pbl", node,
cpumask_pr_args(node_mask));
- /* diff will hold all unused cpus */
- cpumask_andnot(diff, &set->mask, &set->used);
- hfi1_cdbg(PROC, "unused CPUs (all) %*pbl", cpumask_pr_args(diff));
-
- /* get cpumask of available CPUs on preferred NUMA */
- cpumask_and(mask, diff, node_mask);
- hfi1_cdbg(PROC, "available cpus on NUMA %*pbl", cpumask_pr_args(mask));
+ /* Get cpumask of available CPUs on preferred NUMA */
+ cpumask_and(available_mask, hw_thread_mask, node_mask);
+ cpumask_andnot(available_mask, available_mask, &set->used);
+ hfi1_cdbg(PROC, "Available CPUs on NUMA %u: %*pbl", node,
+ cpumask_pr_args(available_mask));
/*
* At first, we don't want to place processes on the same
- * CPUs as interrupt handlers.
+ * CPUs as interrupt handlers. Then, CPUs running interrupt
+ * handlers are used.
+ *
+ * 1) If diff is not empty, then there are CPUs not running
+ * non-interrupt handlers available, so diff gets copied
+ * over to available_mask.
+ * 2) If diff is empty, then all CPUs not running interrupt
+ * handlers are taken, so available_mask contains all
+ * available CPUs running interrupt handlers.
+ * 3) If available_mask is empty, then all CPUs on the
+ * preferred NUMA node are taken, so other NUMA nodes are
+ * used for process assignments using the same method as
+ * the preferred NUMA node.
*/
- cpumask_andnot(diff, mask, intrs);
+ cpumask_andnot(diff, available_mask, intrs_mask);
if (!cpumask_empty(diff))
- cpumask_copy(mask, diff);
+ cpumask_copy(available_mask, diff);
- /*
- * if we don't have a cpu on the preferred NUMA, get
- * the list of the remaining available CPUs
- */
- if (cpumask_empty(mask)) {
- cpumask_andnot(diff, &set->mask, &set->used);
- cpumask_andnot(mask, diff, node_mask);
+ /* If we don't have CPUs on the preferred node, use other NUMA nodes */
+ if (cpumask_empty(available_mask)) {
+ cpumask_andnot(available_mask, hw_thread_mask, &set->used);
+ /* Excluding preferred NUMA cores */
+ cpumask_andnot(available_mask, available_mask, node_mask);
+ hfi1_cdbg(PROC,
+ "Preferred NUMA node cores are taken, cores available in other NUMA nodes: %*pbl",
+ cpumask_pr_args(available_mask));
+
+ /*
+ * At first, we don't want to place processes on the same
+ * CPUs as interrupt handlers.
+ */
+ cpumask_andnot(diff, available_mask, intrs_mask);
+ if (!cpumask_empty(diff))
+ cpumask_copy(available_mask, diff);
}
- hfi1_cdbg(PROC, "possible CPUs for process %*pbl",
- cpumask_pr_args(mask));
+ hfi1_cdbg(PROC, "Possible CPUs for process: %*pbl",
+ cpumask_pr_args(available_mask));
- cpu = cpumask_first(mask);
+ cpu = cpumask_first(available_mask);
if (cpu >= nr_cpu_ids) /* empty */
cpu = -1;
else
cpumask_set_cpu(cpu, &set->used);
- spin_unlock(&dd->affinity->lock);
-
- free_cpumask_var(intrs);
-free_mask:
- free_cpumask_var(mask);
+ spin_unlock(&affinity->lock);
+ hfi1_cdbg(PROC, "Process assigned to CPU %d", cpu);
+
+ free_cpumask_var(intrs_mask);
+free_available_mask:
+ free_cpumask_var(available_mask);
+free_hw_thread_mask:
+ free_cpumask_var(hw_thread_mask);
free_diff:
free_cpumask_var(diff);
done:
return cpu;
}
-void hfi1_put_proc_affinity(struct hfi1_devdata *dd, int cpu)
+void hfi1_put_proc_affinity(int cpu)
{
- struct cpu_mask_set *set = &dd->affinity->proc;
+ struct hfi1_affinity_node_list *affinity = &node_affinity;
+ struct cpu_mask_set *set = &affinity->proc;
if (cpu < 0)
return;
- spin_lock(&dd->affinity->lock);
+ spin_lock(&affinity->lock);
cpumask_clear_cpu(cpu, &set->used);
+ hfi1_cdbg(PROC, "Returning CPU %d for future process assignment", cpu);
if (cpumask_empty(&set->used) && set->gen) {
set->gen--;
cpumask_copy(&set->used, &set->mask);
}
- spin_unlock(&dd->affinity->lock);
+ spin_unlock(&affinity->lock);
+}
+
+/* Prevents concurrent reads and writes of the sdma_affinity attrib */
+static DEFINE_MUTEX(sdma_affinity_mutex);
+
+int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf,
+ size_t count)
+{
+ struct hfi1_affinity_node *entry;
+ cpumask_var_t mask;
+ int ret, i;
+
+ spin_lock(&node_affinity.lock);
+ entry = node_affinity_lookup(dd->node);
+ spin_unlock(&node_affinity.lock);
+
+ if (!entry)
+ return -EINVAL;
+
+ ret = zalloc_cpumask_var(&mask, GFP_KERNEL);
+ if (!ret)
+ return -ENOMEM;
+
+ ret = cpulist_parse(buf, mask);
+ if (ret)
+ goto out;
+
+ if (!cpumask_subset(mask, cpu_online_mask) || cpumask_empty(mask)) {
+ dd_dev_warn(dd, "Invalid CPU mask\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ mutex_lock(&sdma_affinity_mutex);
+ /* reset the SDMA interrupt affinity details */
+ init_cpu_mask_set(&entry->def_intr);
+ cpumask_copy(&entry->def_intr.mask, mask);
+ /*
+ * Reassign the affinity for each SDMA interrupt.
+ */
+ for (i = 0; i < dd->num_msix_entries; i++) {
+ struct hfi1_msix_entry *msix;
+
+ msix = &dd->msix_entries[i];
+ if (msix->type != IRQ_SDMA)
+ continue;
+
+ ret = hfi1_get_irq_affinity(dd, msix);
+
+ if (ret)
+ break;
+ }
+ mutex_unlock(&sdma_affinity_mutex);
+out:
+ free_cpumask_var(mask);
+ return ret ? ret : strnlen(buf, PAGE_SIZE);
}
+int hfi1_get_sdma_affinity(struct hfi1_devdata *dd, char *buf)
+{
+ struct hfi1_affinity_node *entry;
+
+ spin_lock(&node_affinity.lock);
+ entry = node_affinity_lookup(dd->node);
+ spin_unlock(&node_affinity.lock);
+
+ if (!entry)
+ return -EINVAL;
+
+ mutex_lock(&sdma_affinity_mutex);
+ cpumap_print_to_pagebuf(true, buf, &entry->def_intr.mask);
+ mutex_unlock(&sdma_affinity_mutex);
+ return strnlen(buf, PAGE_SIZE);
+}
diff --git a/drivers/infiniband/hw/hfi1/affinity.h b/drivers/infiniband/hw/hfi1/affinity.h
index 20f52fe74..8879cf7a8 100644
--- a/drivers/infiniband/hw/hfi1/affinity.h
+++ b/drivers/infiniband/hw/hfi1/affinity.h
@@ -73,7 +73,6 @@ struct cpu_mask_set {
struct hfi1_affinity {
struct cpu_mask_set def_intr;
struct cpu_mask_set rcv_intr;
- struct cpu_mask_set proc;
struct cpumask real_cpu_mask;
/* spin lock to protect affinity struct */
spinlock_t lock;
@@ -82,11 +81,9 @@ struct hfi1_affinity {
struct hfi1_msix_entry;
/* Initialize non-HT cpu cores mask */
-int init_real_cpu_mask(struct hfi1_devdata *);
+void init_real_cpu_mask(void);
/* Initialize driver affinity data */
-void hfi1_dev_affinity_init(struct hfi1_devdata *);
-/* Free driver affinity data */
-void hfi1_dev_affinity_free(struct hfi1_devdata *);
+int hfi1_dev_affinity_init(struct hfi1_devdata *);
/*
* Set IRQ affinity to a CPU. The function will determine the
* CPU and set the affinity to it.
@@ -101,8 +98,35 @@ void hfi1_put_irq_affinity(struct hfi1_devdata *, struct hfi1_msix_entry *);
* Determine a CPU affinity for a user process, if the process does not
* have an affinity set yet.
*/
-int hfi1_get_proc_affinity(struct hfi1_devdata *, int);
+int hfi1_get_proc_affinity(int);
/* Release a CPU used by a user process. */
-void hfi1_put_proc_affinity(struct hfi1_devdata *, int);
+void hfi1_put_proc_affinity(int);
+
+int hfi1_get_sdma_affinity(struct hfi1_devdata *dd, char *buf);
+int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf,
+ size_t count);
+
+struct hfi1_affinity_node {
+ int node;
+ struct cpu_mask_set def_intr;
+ struct cpu_mask_set rcv_intr;
+ struct cpumask general_intr_mask;
+ struct list_head list;
+};
+
+struct hfi1_affinity_node_list {
+ struct list_head list;
+ struct cpumask real_cpu_mask;
+ struct cpu_mask_set proc;
+ int num_core_siblings;
+ int num_online_nodes;
+ int num_online_cpus;
+ /* protect affinity node list */
+ spinlock_t lock;
+};
+
+int node_affinity_init(void);
+void node_affinity_destroy(void);
+extern struct hfi1_affinity_node_list node_affinity;
#endif /* _HFI1_AFFINITY_H */
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index dad4d0ebb..cc38004ce 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -63,6 +63,7 @@
#include "efivar.h"
#include "platform.h"
#include "aspm.h"
+#include "affinity.h"
#define NUM_IB_PORTS 1
@@ -121,6 +122,7 @@ struct flag_table {
#define SEC_SC_HALTED 0x4 /* per-context only */
#define SEC_SPC_FREEZE 0x8 /* per-HFI only */
+#define DEFAULT_KRCVQS 2
#define MIN_KERNEL_KCTXTS 2
#define FIRST_KERNEL_KCTXT 1
/* sizes for both the QP and RSM map tables */
@@ -238,6 +240,9 @@ struct flag_table {
/* all CceStatus sub-block RXE pause bits */
#define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
+#define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
+#define CNTR_32BIT_MAX 0x00000000FFFFFFFF
+
/*
* CCE Error flags.
*/
@@ -3947,6 +3952,28 @@ static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
return dd->sw_send_dma_eng_err_status_cnt[0];
}
+static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry,
+ void *context, int vl, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ u64 val = 0;
+ u64 csr = entry->csr;
+
+ val = read_write_csr(dd, csr, mode, data);
+ if (mode == CNTR_MODE_R) {
+ val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ?
+ CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors;
+ } else if (mode == CNTR_MODE_W) {
+ dd->sw_rcv_bypass_packet_errors = 0;
+ } else {
+ dd_dev_err(dd, "Invalid cntr register access mode");
+ return 0;
+ }
+ return val;
+}
+
#define def_access_sw_cpu(cntr) \
static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
void *context, int vl, int mode, u64 data) \
@@ -4020,7 +4047,8 @@ static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
[C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
CNTR_SYNTH),
-[C_DC_RCV_ERR] = DC_PERF_CNTR(DcRecvErr, DCC_ERR_PORTRCV_ERR_CNT, CNTR_SYNTH),
+[C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH,
+ access_dc_rcv_err_cnt),
[C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
CNTR_SYNTH),
[C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
@@ -8798,30 +8826,6 @@ static int write_tx_settings(struct hfi1_devdata *dd,
return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
}
-static void check_fabric_firmware_versions(struct hfi1_devdata *dd)
-{
- u32 frame, version, prod_id;
- int ret, lane;
-
- /* 4 lanes */
- for (lane = 0; lane < 4; lane++) {
- ret = read_8051_config(dd, SPICO_FW_VERSION, lane, &frame);
- if (ret) {
- dd_dev_err(dd,
- "Unable to read lane %d firmware details\n",
- lane);
- continue;
- }
- version = (frame >> SPICO_ROM_VERSION_SHIFT)
- & SPICO_ROM_VERSION_MASK;
- prod_id = (frame >> SPICO_ROM_PROD_ID_SHIFT)
- & SPICO_ROM_PROD_ID_MASK;
- dd_dev_info(dd,
- "Lane %d firmware: version 0x%04x, prod_id 0x%04x\n",
- lane, version, prod_id);
- }
-}
-
/*
* Read an idle LCB message.
*
@@ -9187,17 +9191,24 @@ static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
unsigned long timeout;
/*
- * Check for QSFP interrupt for t_init (SFF 8679)
+ * Some QSFP cables have a quirk that asserts the IntN line as a side
+ * effect of power up on plug-in. We ignore this false positive
+ * interrupt until the module has finished powering up by waiting for
+ * a minimum timeout of the module inrush initialization time of
+ * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the
+ * module have stabilized.
+ */
+ msleep(500);
+
+ /*
+ * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1)
*/
timeout = jiffies + msecs_to_jiffies(2000);
while (1) {
mask = read_csr(dd, dd->hfi1_id ?
ASIC_QSFP2_IN : ASIC_QSFP1_IN);
- if (!(mask & QSFP_HFI0_INT_N)) {
- write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR :
- ASIC_QSFP1_CLEAR, QSFP_HFI0_INT_N);
+ if (!(mask & QSFP_HFI0_INT_N))
break;
- }
if (time_after(jiffies, timeout)) {
dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
__func__);
@@ -9213,10 +9224,17 @@ static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
u64 mask;
mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
- if (enable)
+ if (enable) {
+ /*
+ * Clear the status register to avoid an immediate interrupt
+ * when we re-enable the IntN pin
+ */
+ write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
+ QSFP_HFI0_INT_N);
mask |= (u64)QSFP_HFI0_INT_N;
- else
+ } else {
mask &= ~(u64)QSFP_HFI0_INT_N;
+ }
write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
}
@@ -9472,6 +9490,78 @@ static void init_lcb(struct hfi1_devdata *dd)
write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
}
+/*
+ * Perform a test read on the QSFP. Return 0 on success, -ERRNO
+ * on error.
+ */
+static int test_qsfp_read(struct hfi1_pportdata *ppd)
+{
+ int ret;
+ u8 status;
+
+ /* report success if not a QSFP */
+ if (ppd->port_type != PORT_TYPE_QSFP)
+ return 0;
+
+ /* read byte 2, the status byte */
+ ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1);
+ if (ret < 0)
+ return ret;
+ if (ret != 1)
+ return -EIO;
+
+ return 0; /* success */
+}
+
+/*
+ * Values for QSFP retry.
+ *
+ * Give up after 10s (20 x 500ms). The overall timeout was empirically
+ * arrived at from experience on a large cluster.
+ */
+#define MAX_QSFP_RETRIES 20
+#define QSFP_RETRY_WAIT 500 /* msec */
+
+/*
+ * Try a QSFP read. If it fails, schedule a retry for later.
+ * Called on first link activation after driver load.
+ */
+static void try_start_link(struct hfi1_pportdata *ppd)
+{
+ if (test_qsfp_read(ppd)) {
+ /* read failed */
+ if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) {
+ dd_dev_err(ppd->dd, "QSFP not responding, giving up\n");
+ return;
+ }
+ dd_dev_info(ppd->dd,
+ "QSFP not responding, waiting and retrying %d\n",
+ (int)ppd->qsfp_retry_count);
+ ppd->qsfp_retry_count++;
+ queue_delayed_work(ppd->hfi1_wq, &ppd->start_link_work,
+ msecs_to_jiffies(QSFP_RETRY_WAIT));
+ return;
+ }
+ ppd->qsfp_retry_count = 0;
+
+ /*
+ * Tune the SerDes to a ballpark setting for optimal signal and bit
+ * error rate. Needs to be done before starting the link.
+ */
+ tune_serdes(ppd);
+ start_link(ppd);
+}
+
+/*
+ * Workqueue function to start the link after a delay.
+ */
+void handle_start_link(struct work_struct *work)
+{
+ struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
+ start_link_work.work);
+ try_start_link(ppd);
+}
+
int bringup_serdes(struct hfi1_pportdata *ppd)
{
struct hfi1_devdata *dd = ppd->dd;
@@ -9507,14 +9597,8 @@ int bringup_serdes(struct hfi1_pportdata *ppd)
set_qsfp_int_n(ppd, 1);
}
- /*
- * Tune the SerDes to a ballpark setting for
- * optimal signal and bit error rate
- * Needs to be done before starting the link
- */
- tune_serdes(ppd);
-
- return start_link(ppd);
+ try_start_link(ppd);
+ return 0;
}
void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
@@ -9531,6 +9615,10 @@ void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
ppd->driver_link_ready = 0;
ppd->link_enabled = 0;
+ ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */
+ flush_delayed_work(&ppd->start_link_work);
+ cancel_delayed_work_sync(&ppd->start_link_work);
+
ppd->offline_disabled_reason =
HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
@@ -9630,14 +9718,6 @@ void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
}
-int hfi1_get_base_kinfo(struct hfi1_ctxtdata *rcd,
- struct hfi1_ctxt_info *kinfo)
-{
- kinfo->runtime_flags = (HFI1_MISC_GET() << HFI1_CAP_USER_SHIFT) |
- HFI1_CAP_UGET(MASK) | HFI1_CAP_KGET(K2U);
- return 0;
-}
-
struct hfi1_message_header *hfi1_get_msgheader(
struct hfi1_devdata *dd, __le32 *rhf_addr)
{
@@ -9890,6 +9970,131 @@ static int wait_phy_linkstate(struct hfi1_devdata *dd, u32 state, u32 msecs)
return 0;
}
+static const char *state_completed_string(u32 completed)
+{
+ static const char * const state_completed[] = {
+ "EstablishComm",
+ "OptimizeEQ",
+ "VerifyCap"
+ };
+
+ if (completed < ARRAY_SIZE(state_completed))
+ return state_completed[completed];
+
+ return "unknown";
+}
+
+static const char all_lanes_dead_timeout_expired[] =
+ "All lanes were inactive – was the interconnect media removed?";
+static const char tx_out_of_policy[] =
+ "Passing lanes on local port do not meet the local link width policy";
+static const char no_state_complete[] =
+ "State timeout occurred before link partner completed the state";
+static const char * const state_complete_reasons[] = {
+ [0x00] = "Reason unknown",
+ [0x01] = "Link was halted by driver, refer to LinkDownReason",
+ [0x02] = "Link partner reported failure",
+ [0x10] = "Unable to achieve frame sync on any lane",
+ [0x11] =
+ "Unable to find a common bit rate with the link partner",
+ [0x12] =
+ "Unable to achieve frame sync on sufficient lanes to meet the local link width policy",
+ [0x13] =
+ "Unable to identify preset equalization on sufficient lanes to meet the local link width policy",
+ [0x14] = no_state_complete,
+ [0x15] =
+ "State timeout occurred before link partner identified equalization presets",
+ [0x16] =
+ "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy",
+ [0x17] = tx_out_of_policy,
+ [0x20] = all_lanes_dead_timeout_expired,
+ [0x21] =
+ "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy",
+ [0x22] = no_state_complete,
+ [0x23] =
+ "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy",
+ [0x24] = tx_out_of_policy,
+ [0x30] = all_lanes_dead_timeout_expired,
+ [0x31] =
+ "State timeout occurred waiting for host to process received frames",
+ [0x32] = no_state_complete,
+ [0x33] =
+ "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
+ [0x34] = tx_out_of_policy,
+};
+
+static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd,
+ u32 code)
+{
+ const char *str = NULL;
+
+ if (code < ARRAY_SIZE(state_complete_reasons))
+ str = state_complete_reasons[code];
+
+ if (str)
+ return str;
+ return "Reserved";
+}
+
+/* describe the given last state complete frame */
+static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame,
+ const char *prefix)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ u32 success;
+ u32 state;
+ u32 reason;
+ u32 lanes;
+
+ /*
+ * Decode frame:
+ * [ 0: 0] - success
+ * [ 3: 1] - state
+ * [ 7: 4] - next state timeout
+ * [15: 8] - reason code
+ * [31:16] - lanes
+ */
+ success = frame & 0x1;
+ state = (frame >> 1) & 0x7;
+ reason = (frame >> 8) & 0xff;
+ lanes = (frame >> 16) & 0xffff;
+
+ dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n",
+ prefix, frame);
+ dd_dev_err(dd, " last reported state state: %s (0x%x)\n",
+ state_completed_string(state), state);
+ dd_dev_err(dd, " state successfully completed: %s\n",
+ success ? "yes" : "no");
+ dd_dev_err(dd, " fail reason 0x%x: %s\n",
+ reason, state_complete_reason_code_string(ppd, reason));
+ dd_dev_err(dd, " passing lane mask: 0x%x", lanes);
+}
+
+/*
+ * Read the last state complete frames and explain them. This routine
+ * expects to be called if the link went down during link negotiation
+ * and initialization (LNI). That is, anywhere between polling and link up.
+ */
+static void check_lni_states(struct hfi1_pportdata *ppd)
+{
+ u32 last_local_state;
+ u32 last_remote_state;
+
+ read_last_local_state(ppd->dd, &last_local_state);
+ read_last_remote_state(ppd->dd, &last_remote_state);
+
+ /*
+ * Don't report anything if there is nothing to report. A value of
+ * 0 means the link was taken down while polling and there was no
+ * training in-process.
+ */
+ if (last_local_state == 0 && last_remote_state == 0)
+ return;
+
+ decode_state_complete(ppd, last_local_state, "transmitted");
+ decode_state_complete(ppd, last_remote_state, "received");
+}
+
/*
* Helper for set_link_state(). Do not call except from that routine.
* Expects ppd->hls_mutex to be held.
@@ -9902,8 +10107,6 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
{
struct hfi1_devdata *dd = ppd->dd;
u32 pstate, previous_state;
- u32 last_local_state;
- u32 last_remote_state;
int ret;
int do_transition;
int do_wait;
@@ -10003,12 +10206,7 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
} else if (previous_state
& (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
/* went down while attempting link up */
- /* byte 1 of last_*_state is the failure reason */
- read_last_local_state(dd, &last_local_state);
- read_last_remote_state(dd, &last_remote_state);
- dd_dev_err(dd,
- "LNI failure last states: local 0x%08x, remote 0x%08x\n",
- last_local_state, last_remote_state);
+ check_lni_states(ppd);
}
/* the active link width (downgrade) is 0 on link down */
@@ -11668,9 +11866,6 @@ static void free_cntrs(struct hfi1_devdata *dd)
dd->cntrnames = NULL;
}
-#define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
-#define CNTR_32BIT_MAX 0x00000000FFFFFFFF
-
static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
u64 *psval, void *context, int vl)
{
@@ -12325,37 +12520,6 @@ u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd)
return ib_pstate;
}
-/*
- * Read/modify/write ASIC_QSFP register bits as selected by mask
- * data: 0 or 1 in the positions depending on what needs to be written
- * dir: 0 for read, 1 for write
- * mask: select by setting
- * I2CCLK (bit 0)
- * I2CDATA (bit 1)
- */
-u64 hfi1_gpio_mod(struct hfi1_devdata *dd, u32 target, u32 data, u32 dir,
- u32 mask)
-{
- u64 qsfp_oe, target_oe;
-
- target_oe = target ? ASIC_QSFP2_OE : ASIC_QSFP1_OE;
- if (mask) {
- /* We are writing register bits, so lock access */
- dir &= mask;
- data &= mask;
-
- qsfp_oe = read_csr(dd, target_oe);
- qsfp_oe = (qsfp_oe & ~(u64)mask) | (u64)dir;
- write_csr(dd, target_oe, qsfp_oe);
- }
- /* We are exclusively reading bits here, but it is unlikely
- * we'll get valid data when we set the direction of the pin
- * in the same call, so read should call this function again
- * to get valid data
- */
- return read_csr(dd, target ? ASIC_QSFP2_IN : ASIC_QSFP1_IN);
-}
-
#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
@@ -12771,7 +12935,7 @@ fail:
*/
static int set_up_context_variables(struct hfi1_devdata *dd)
{
- int num_kernel_contexts;
+ unsigned long num_kernel_contexts;
int total_contexts;
int ret;
unsigned ngroups;
@@ -12780,7 +12944,6 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
/*
* Kernel receive contexts:
- * - min of 2 or 1 context/numa (excluding control context)
* - Context 0 - control context (VL15/multicast/error)
* - Context 1 - first kernel context
* - Context 2 - second kernel context
@@ -12794,18 +12957,16 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
*/
num_kernel_contexts = n_krcvqs + 1;
else
- num_kernel_contexts = num_online_nodes() + 1;
- num_kernel_contexts =
- max_t(int, MIN_KERNEL_KCTXTS, num_kernel_contexts);
+ num_kernel_contexts = DEFAULT_KRCVQS + 1;
/*
* Every kernel receive context needs an ACK send context.
* one send context is allocated for each VL{0-7} and VL15
*/
if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
dd_dev_err(dd,
- "Reducing # kernel rcv contexts to: %d, from %d\n",
+ "Reducing # kernel rcv contexts to: %d, from %lu\n",
(int)(dd->chip_send_contexts - num_vls - 1),
- (int)num_kernel_contexts);
+ num_kernel_contexts);
num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
}
/*
@@ -12815,7 +12976,7 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
*/
if (num_user_contexts < 0)
num_user_contexts =
- cpumask_weight(&dd->affinity->real_cpu_mask);
+ cpumask_weight(&node_affinity.real_cpu_mask);
total_contexts = num_kernel_contexts + num_user_contexts;
@@ -14141,6 +14302,11 @@ static int init_asic_data(struct hfi1_devdata *dd)
}
dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
spin_unlock_irqrestore(&hfi1_devs_lock, flags);
+
+ /* first one through - set up i2c devices */
+ if (!peer)
+ ret = set_up_i2c(dd, dd->asic_data);
+
return ret;
}
@@ -14445,19 +14611,6 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
(dd->revision >> CCE_REVISION_SW_SHIFT)
& CCE_REVISION_SW_MASK);
- /*
- * The real cpu mask is part of the affinity struct but has to be
- * initialized earlier than the rest of the affinity struct because it
- * is needed to calculate the number of user contexts in
- * set_up_context_variables(). However, hfi1_dev_affinity_init(),
- * which initializes the rest of the affinity struct members,
- * depends on set_up_context_variables() for the number of kernel
- * contexts, so it cannot be called before set_up_context_variables().
- */
- ret = init_real_cpu_mask(dd);
- if (ret)
- goto bail_cleanup;
-
ret = set_up_context_variables(dd);
if (ret)
goto bail_cleanup;
@@ -14471,7 +14624,9 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
/* set up KDETH QP prefix in both RX and TX CSRs */
init_kdeth_qp(dd);
- hfi1_dev_affinity_init(dd);
+ ret = hfi1_dev_affinity_init(dd);
+ if (ret)
+ goto bail_cleanup;
/* send contexts must be set up before receive contexts */
ret = init_send_contexts(dd);
@@ -14508,8 +14663,14 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
/* set up LCB access - must be after set_up_interrupts() */
init_lcb_access(dd);
+ /*
+ * Serial number is created from the base guid:
+ * [27:24] = base guid [38:35]
+ * [23: 0] = base guid [23: 0]
+ */
snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
- dd->base_guid & 0xFFFFFF);
+ (dd->base_guid & 0xFFFFFF) |
+ ((dd->base_guid >> 11) & 0xF000000));
dd->oui1 = dd->base_guid >> 56 & 0xFF;
dd->oui2 = dd->base_guid >> 48 & 0xFF;
@@ -14518,7 +14679,6 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
if (ret)
goto bail_clear_intr;
- check_fabric_firmware_versions(dd);
thermal_init(dd);
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index 66a327978..e29573769 100644
--- a/drivers/infiniband/hw/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -640,6 +640,7 @@ extern uint platform_config_load;
/* SBus commands */
#define RESET_SBUS_RECEIVER 0x20
#define WRITE_SBUS_RECEIVER 0x21
+#define READ_SBUS_RECEIVER 0x22
void sbus_request(struct hfi1_devdata *dd,
u8 receiver_addr, u8 data_addr, u8 command, u32 data_in);
int sbus_request_slow(struct hfi1_devdata *dd,
@@ -705,6 +706,7 @@ void handle_link_up(struct work_struct *work);
void handle_link_down(struct work_struct *work);
void handle_link_downgrade(struct work_struct *work);
void handle_link_bounce(struct work_struct *work);
+void handle_start_link(struct work_struct *work);
void handle_sma_message(struct work_struct *work);
void reset_qsfp(struct hfi1_pportdata *ppd);
void qsfp_event(struct work_struct *work);
@@ -1336,10 +1338,6 @@ void hfi1_start_cleanup(struct hfi1_devdata *dd);
void hfi1_clear_tids(struct hfi1_ctxtdata *rcd);
struct hfi1_message_header *hfi1_get_msgheader(
struct hfi1_devdata *dd, __le32 *rhf_addr);
-int hfi1_get_base_kinfo(struct hfi1_ctxtdata *rcd,
- struct hfi1_ctxt_info *kinfo);
-u64 hfi1_gpio_mod(struct hfi1_devdata *dd, u32 target, u32 data, u32 dir,
- u32 mask);
int hfi1_init_ctxt(struct send_context *sc);
void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
u32 type, unsigned long pa, u16 order);
diff --git a/drivers/infiniband/hw/hfi1/chip_registers.h b/drivers/infiniband/hw/hfi1/chip_registers.h
index 8744de666..5b9993899 100644
--- a/drivers/infiniband/hw/hfi1/chip_registers.h
+++ b/drivers/infiniband/hw/hfi1/chip_registers.h
@@ -471,6 +471,10 @@
#define ASIC_STS_SBUS_RESULT (ASIC + 0x000000000010)
#define ASIC_STS_SBUS_RESULT_DONE_SMASK 0x1ull
#define ASIC_STS_SBUS_RESULT_RCV_DATA_VALID_SMASK 0x2ull
+#define ASIC_STS_SBUS_RESULT_RESULT_CODE_SHIFT 2
+#define ASIC_STS_SBUS_RESULT_RESULT_CODE_MASK 0x7ull
+#define ASIC_STS_SBUS_RESULT_DATA_OUT_SHIFT 32
+#define ASIC_STS_SBUS_RESULT_DATA_OUT_MASK 0xFFFFFFFFull
#define ASIC_STS_THERM (ASIC + 0x000000000058)
#define ASIC_STS_THERM_CRIT_TEMP_MASK 0x7FFull
#define ASIC_STS_THERM_CRIT_TEMP_SHIFT 18
diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c
index a49cc88f0..5e9be16f6 100644
--- a/drivers/infiniband/hw/hfi1/debugfs.c
+++ b/drivers/infiniband/hw/hfi1/debugfs.c
@@ -59,6 +59,40 @@
static struct dentry *hfi1_dbg_root;
+/* wrappers to enforce srcu in seq file */
+static ssize_t hfi1_seq_read(
+ struct file *file,
+ char __user *buf,
+ size_t size,
+ loff_t *ppos)
+{
+ struct dentry *d = file->f_path.dentry;
+ int srcu_idx;
+ ssize_t r;
+
+ r = debugfs_use_file_start(d, &srcu_idx);
+ if (likely(!r))
+ r = seq_read(file, buf, size, ppos);
+ debugfs_use_file_finish(srcu_idx);
+ return r;
+}
+
+static loff_t hfi1_seq_lseek(
+ struct file *file,
+ loff_t offset,
+ int whence)
+{
+ struct dentry *d = file->f_path.dentry;
+ int srcu_idx;
+ loff_t r;
+
+ r = debugfs_use_file_start(d, &srcu_idx);
+ if (likely(!r))
+ r = seq_lseek(file, offset, whence);
+ debugfs_use_file_finish(srcu_idx);
+ return r;
+}
+
#define private2dd(file) (file_inode(file)->i_private)
#define private2ppd(file) (file_inode(file)->i_private)
@@ -87,8 +121,8 @@ static int _##name##_open(struct inode *inode, struct file *s) \
static const struct file_operations _##name##_file_ops = { \
.owner = THIS_MODULE, \
.open = _##name##_open, \
- .read = seq_read, \
- .llseek = seq_lseek, \
+ .read = hfi1_seq_read, \
+ .llseek = hfi1_seq_lseek, \
.release = seq_release \
}
@@ -105,11 +139,9 @@ do { \
DEBUGFS_FILE_CREATE(#name, parent, data, &_##name##_file_ops, S_IRUGO)
static void *_opcode_stats_seq_start(struct seq_file *s, loff_t *pos)
-__acquires(RCU)
{
struct hfi1_opcode_stats_perctx *opstats;
- rcu_read_lock();
if (*pos >= ARRAY_SIZE(opstats->stats))
return NULL;
return pos;
@@ -126,9 +158,7 @@ static void *_opcode_stats_seq_next(struct seq_file *s, void *v, loff_t *pos)
}
static void _opcode_stats_seq_stop(struct seq_file *s, void *v)
-__releases(RCU)
{
- rcu_read_unlock();
}
static int _opcode_stats_seq_show(struct seq_file *s, void *v)
@@ -285,12 +315,10 @@ DEBUGFS_SEQ_FILE_OPEN(qp_stats)
DEBUGFS_FILE_OPS(qp_stats);
static void *_sdes_seq_start(struct seq_file *s, loff_t *pos)
-__acquires(RCU)
{
struct hfi1_ibdev *ibd;
struct hfi1_devdata *dd;
- rcu_read_lock();
ibd = (struct hfi1_ibdev *)s->private;
dd = dd_from_dev(ibd);
if (!dd->per_sdma || *pos >= dd->num_sdma)
@@ -310,9 +338,7 @@ static void *_sdes_seq_next(struct seq_file *s, void *v, loff_t *pos)
}
static void _sdes_seq_stop(struct seq_file *s, void *v)
-__releases(RCU)
{
- rcu_read_unlock();
}
static int _sdes_seq_show(struct seq_file *s, void *v)
@@ -339,11 +365,9 @@ static ssize_t dev_counters_read(struct file *file, char __user *buf,
struct hfi1_devdata *dd;
ssize_t rval;
- rcu_read_lock();
dd = private2dd(file);
avail = hfi1_read_cntrs(dd, NULL, &counters);
rval = simple_read_from_buffer(buf, count, ppos, counters, avail);
- rcu_read_unlock();
return rval;
}
@@ -356,11 +380,9 @@ static ssize_t dev_names_read(struct file *file, char __user *buf,
struct hfi1_devdata *dd;
ssize_t rval;
- rcu_read_lock();
dd = private2dd(file);
avail = hfi1_read_cntrs(dd, &names, NULL);
rval = simple_read_from_buffer(buf, count, ppos, names, avail);
- rcu_read_unlock();
return rval;
}
@@ -383,11 +405,9 @@ static ssize_t portnames_read(struct file *file, char __user *buf,
struct hfi1_devdata *dd;
ssize_t rval;
- rcu_read_lock();
dd = private2dd(file);
avail = hfi1_read_portcntrs(dd->pport, &names, NULL);
rval = simple_read_from_buffer(buf, count, ppos, names, avail);
- rcu_read_unlock();
return rval;
}
@@ -400,11 +420,9 @@ static ssize_t portcntrs_debugfs_read(struct file *file, char __user *buf,
struct hfi1_pportdata *ppd;
ssize_t rval;
- rcu_read_lock();
ppd = private2ppd(file);
avail = hfi1_read_portcntrs(ppd, NULL, &counters);
rval = simple_read_from_buffer(buf, count, ppos, counters, avail);
- rcu_read_unlock();
return rval;
}
@@ -434,16 +452,13 @@ static ssize_t asic_flags_read(struct file *file, char __user *buf,
int used;
int i;
- rcu_read_lock();
ppd = private2ppd(file);
dd = ppd->dd;
size = PAGE_SIZE;
used = 0;
tmp = kmalloc(size, GFP_KERNEL);
- if (!tmp) {
- rcu_read_unlock();
+ if (!tmp)
return -ENOMEM;
- }
scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
used += scnprintf(tmp + used, size - used,
@@ -470,7 +485,6 @@ static ssize_t asic_flags_read(struct file *file, char __user *buf,
used += scnprintf(tmp + used, size - used, "Write bits to clear\n");
ret = simple_read_from_buffer(buf, count, ppos, tmp, used);
- rcu_read_unlock();
kfree(tmp);
return ret;
}
@@ -486,15 +500,12 @@ static ssize_t asic_flags_write(struct file *file, const char __user *buf,
u64 scratch0;
u64 clear;
- rcu_read_lock();
ppd = private2ppd(file);
dd = ppd->dd;
buff = kmalloc(count + 1, GFP_KERNEL);
- if (!buff) {
- ret = -ENOMEM;
- goto do_return;
- }
+ if (!buff)
+ return -ENOMEM;
ret = copy_from_user(buff, buf, count);
if (ret > 0) {
@@ -527,8 +538,6 @@ static ssize_t asic_flags_write(struct file *file, const char __user *buf,
do_free:
kfree(buff);
- do_return:
- rcu_read_unlock();
return ret;
}
@@ -542,18 +551,14 @@ static ssize_t qsfp_debugfs_dump(struct file *file, char __user *buf,
char *tmp;
int ret;
- rcu_read_lock();
ppd = private2ppd(file);
tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!tmp) {
- rcu_read_unlock();
+ if (!tmp)
return -ENOMEM;
- }
ret = qsfp_dump(ppd, tmp, PAGE_SIZE);
if (ret > 0)
ret = simple_read_from_buffer(buf, count, ppos, tmp, ret);
- rcu_read_unlock();
kfree(tmp);
return ret;
}
@@ -569,7 +574,6 @@ static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf,
int offset;
int total_written;
- rcu_read_lock();
ppd = private2ppd(file);
/* byte offset format: [offsetSize][i2cAddr][offsetHigh][offsetLow] */
@@ -577,16 +581,12 @@ static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf,
offset = *ppos & 0xffff;
/* explicitly reject invalid address 0 to catch cp and cat */
- if (i2c_addr == 0) {
- ret = -EINVAL;
- goto _return;
- }
+ if (i2c_addr == 0)
+ return -EINVAL;
buff = kmalloc(count, GFP_KERNEL);
- if (!buff) {
- ret = -ENOMEM;
- goto _return;
- }
+ if (!buff)
+ return -ENOMEM;
ret = copy_from_user(buff, buf, count);
if (ret > 0) {
@@ -606,8 +606,6 @@ static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf,
_free:
kfree(buff);
- _return:
- rcu_read_unlock();
return ret;
}
@@ -636,7 +634,6 @@ static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf,
int offset;
int total_read;
- rcu_read_lock();
ppd = private2ppd(file);
/* byte offset format: [offsetSize][i2cAddr][offsetHigh][offsetLow] */
@@ -644,16 +641,12 @@ static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf,
offset = *ppos & 0xffff;
/* explicitly reject invalid address 0 to catch cp and cat */
- if (i2c_addr == 0) {
- ret = -EINVAL;
- goto _return;
- }
+ if (i2c_addr == 0)
+ return -EINVAL;
buff = kmalloc(count, GFP_KERNEL);
- if (!buff) {
- ret = -ENOMEM;
- goto _return;
- }
+ if (!buff)
+ return -ENOMEM;
total_read = i2c_read(ppd, target, i2c_addr, offset, buff, count);
if (total_read < 0) {
@@ -673,8 +666,6 @@ static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf,
_free:
kfree(buff);
- _return:
- rcu_read_unlock();
return ret;
}
@@ -701,26 +692,20 @@ static ssize_t __qsfp_debugfs_write(struct file *file, const char __user *buf,
int ret;
int total_written;
- rcu_read_lock();
- if (*ppos + count > QSFP_PAGESIZE * 4) { /* base page + page00-page03 */
- ret = -EINVAL;
- goto _return;
- }
+ if (*ppos + count > QSFP_PAGESIZE * 4) /* base page + page00-page03 */
+ return -EINVAL;
ppd = private2ppd(file);
buff = kmalloc(count, GFP_KERNEL);
- if (!buff) {
- ret = -ENOMEM;
- goto _return;
- }
+ if (!buff)
+ return -ENOMEM;
ret = copy_from_user(buff, buf, count);
if (ret > 0) {
ret = -EFAULT;
goto _free;
}
-
total_written = qsfp_write(ppd, target, *ppos, buff, count);
if (total_written < 0) {
ret = total_written;
@@ -733,8 +718,6 @@ static ssize_t __qsfp_debugfs_write(struct file *file, const char __user *buf,
_free:
kfree(buff);
- _return:
- rcu_read_unlock();
return ret;
}
@@ -761,7 +744,6 @@ static ssize_t __qsfp_debugfs_read(struct file *file, char __user *buf,
int ret;
int total_read;
- rcu_read_lock();
if (*ppos + count > QSFP_PAGESIZE * 4) { /* base page + page00-page03 */
ret = -EINVAL;
goto _return;
@@ -794,7 +776,6 @@ static ssize_t __qsfp_debugfs_read(struct file *file, char __user *buf,
_free:
kfree(buff);
_return:
- rcu_read_unlock();
return ret;
}
@@ -1010,7 +991,6 @@ void hfi1_dbg_ibdev_exit(struct hfi1_ibdev *ibd)
debugfs_remove_recursive(ibd->hfi1_ibdev_dbg);
out:
ibd->hfi1_ibdev_dbg = NULL;
- synchronize_rcu();
}
/*
@@ -1035,9 +1015,7 @@ static const char * const hfi1_statnames[] = {
};
static void *_driver_stats_names_seq_start(struct seq_file *s, loff_t *pos)
-__acquires(RCU)
{
- rcu_read_lock();
if (*pos >= ARRAY_SIZE(hfi1_statnames))
return NULL;
return pos;
@@ -1055,9 +1033,7 @@ static void *_driver_stats_names_seq_next(
}
static void _driver_stats_names_seq_stop(struct seq_file *s, void *v)
-__releases(RCU)
{
- rcu_read_unlock();
}
static int _driver_stats_names_seq_show(struct seq_file *s, void *v)
@@ -1073,9 +1049,7 @@ DEBUGFS_SEQ_FILE_OPEN(driver_stats_names)
DEBUGFS_FILE_OPS(driver_stats_names);
static void *_driver_stats_seq_start(struct seq_file *s, loff_t *pos)
-__acquires(RCU)
{
- rcu_read_lock();
if (*pos >= ARRAY_SIZE(hfi1_statnames))
return NULL;
return pos;
@@ -1090,9 +1064,7 @@ static void *_driver_stats_seq_next(struct seq_file *s, void *v, loff_t *pos)
}
static void _driver_stats_seq_stop(struct seq_file *s, void *v)
-__releases(RCU)
{
- rcu_read_unlock();
}
static u64 hfi1_sps_ints(void)
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index c75b0ae68..303f10555 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -392,9 +392,7 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd,
u16 rlid;
u8 svc_type, sl, sc5;
- sc5 = (be16_to_cpu(rhdr->lrh[0]) >> 12) & 0xf;
- if (rhf_dc_info(packet->rhf))
- sc5 |= 0x10;
+ sc5 = hdr2sc(rhdr, packet->rhf);
sl = ibp->sc_to_sl[sc5];
lqpn = be32_to_cpu(bth[1]) & RVT_QPN_MASK;
@@ -450,14 +448,20 @@ static inline void init_packet(struct hfi1_ctxtdata *rcd,
packet->rcv_flags = 0;
}
-static void process_ecn(struct rvt_qp *qp, struct hfi1_ib_header *hdr,
- struct hfi1_other_headers *ohdr,
- u64 rhf, u32 bth1, struct ib_grh *grh)
+void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
+ bool do_cnp)
{
struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- u32 rqpn = 0;
- u16 rlid;
- u8 sc5, svc_type;
+ struct hfi1_ib_header *hdr = pkt->hdr;
+ struct hfi1_other_headers *ohdr = pkt->ohdr;
+ struct ib_grh *grh = NULL;
+ u32 rqpn = 0, bth1;
+ u16 rlid, dlid = be16_to_cpu(hdr->lrh[1]);
+ u8 sc, svc_type;
+ bool is_mcast = false;
+
+ if (pkt->rcv_flags & HFI1_HAS_GRH)
+ grh = &hdr->u.l.grh;
switch (qp->ibqp.qp_type) {
case IB_QPT_SMI:
@@ -466,6 +470,8 @@ static void process_ecn(struct rvt_qp *qp, struct hfi1_ib_header *hdr,
rlid = be16_to_cpu(hdr->lrh[3]);
rqpn = be32_to_cpu(ohdr->u.ud.deth[1]) & RVT_QPN_MASK;
svc_type = IB_CC_SVCTYPE_UD;
+ is_mcast = (dlid > be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
+ (dlid != be16_to_cpu(IB_LID_PERMISSIVE));
break;
case IB_QPT_UC:
rlid = qp->remote_ah_attr.dlid;
@@ -481,24 +487,23 @@ static void process_ecn(struct rvt_qp *qp, struct hfi1_ib_header *hdr,
return;
}
- sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
- if (rhf_dc_info(rhf))
- sc5 |= 0x10;
+ sc = hdr2sc((struct hfi1_message_header *)hdr, pkt->rhf);
- if (bth1 & HFI1_FECN_SMASK) {
+ bth1 = be32_to_cpu(ohdr->bth[1]);
+ if (do_cnp && (bth1 & HFI1_FECN_SMASK)) {
u16 pkey = (u16)be32_to_cpu(ohdr->bth[0]);
- u16 dlid = be16_to_cpu(hdr->lrh[1]);
- return_cnp(ibp, qp, rqpn, pkey, dlid, rlid, sc5, grh);
+ return_cnp(ibp, qp, rqpn, pkey, dlid, rlid, sc, grh);
}
- if (bth1 & HFI1_BECN_SMASK) {
+ if (!is_mcast && (bth1 & HFI1_BECN_SMASK)) {
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
u32 lqpn = bth1 & RVT_QPN_MASK;
- u8 sl = ibp->sc_to_sl[sc5];
+ u8 sl = ibp->sc_to_sl[sc];
process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type);
}
+
}
struct ps_mdata {
@@ -596,7 +601,6 @@ static void __prescan_rxq(struct hfi1_packet *packet)
struct rvt_qp *qp;
struct hfi1_ib_header *hdr;
struct hfi1_other_headers *ohdr;
- struct ib_grh *grh = NULL;
struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
u64 rhf = rhf_to_cpu(rhf_addr);
u32 etype = rhf_rcv_type(rhf), qpn, bth1;
@@ -616,14 +620,13 @@ static void __prescan_rxq(struct hfi1_packet *packet)
hfi1_get_msgheader(dd, rhf_addr);
lnh = be16_to_cpu(hdr->lrh[0]) & 3;
- if (lnh == HFI1_LRH_BTH) {
+ if (lnh == HFI1_LRH_BTH)
ohdr = &hdr->u.oth;
- } else if (lnh == HFI1_LRH_GRH) {
+ else if (lnh == HFI1_LRH_GRH)
ohdr = &hdr->u.l.oth;
- grh = &hdr->u.l.grh;
- } else {
+ else
goto next; /* just in case */
- }
+
bth1 = be32_to_cpu(ohdr->bth[1]);
is_ecn = !!(bth1 & (HFI1_FECN_SMASK | HFI1_BECN_SMASK));
@@ -639,7 +642,7 @@ static void __prescan_rxq(struct hfi1_packet *packet)
goto next;
}
- process_ecn(qp, hdr, ohdr, rhf, bth1, grh);
+ process_ecn(qp, packet, true);
rcu_read_unlock();
/* turn off BECN, FECN */
@@ -885,14 +888,15 @@ void set_all_slowpath(struct hfi1_devdata *dd)
}
static inline int set_armed_to_active(struct hfi1_ctxtdata *rcd,
- struct hfi1_packet packet,
+ struct hfi1_packet *packet,
struct hfi1_devdata *dd)
{
struct work_struct *lsaw = &rcd->ppd->linkstate_active_work;
- struct hfi1_message_header *hdr = hfi1_get_msgheader(packet.rcd->dd,
- packet.rhf_addr);
+ struct hfi1_message_header *hdr = hfi1_get_msgheader(packet->rcd->dd,
+ packet->rhf_addr);
+ u8 etype = rhf_rcv_type(packet->rhf);
- if (hdr2sc(hdr, packet.rhf) != 0xf) {
+ if (etype == RHF_RCV_TYPE_IB && hdr2sc(hdr, packet->rhf) != 0xf) {
int hwstate = read_logical_state(dd);
if (hwstate != LSTATE_ACTIVE) {
@@ -976,7 +980,7 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
/* Auto activate link on non-SC15 packet receive */
if (unlikely(rcd->ppd->host_link_state ==
HLS_UP_ARMED) &&
- set_armed_to_active(rcd, packet, dd))
+ set_armed_to_active(rcd, &packet, dd))
goto bail;
last = process_rcv_packet(&packet, thread);
}
@@ -1362,6 +1366,7 @@ int process_receive_bypass(struct hfi1_packet *packet)
dd_dev_err(packet->rcd->dd,
"Bypass packets are not supported in normal operation. Dropping\n");
+ incr_cntr64(&packet->rcd->dd->sw_rcv_bypass_packet_errors);
return RHF_RCV_CONTINUE;
}
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index c702a0096..7e03ccd25 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -168,6 +168,7 @@ static inline int is_valid_mmap(u64 token)
static int hfi1_file_open(struct inode *inode, struct file *fp)
{
+ struct hfi1_filedata *fd;
struct hfi1_devdata *dd = container_of(inode->i_cdev,
struct hfi1_devdata,
user_cdev);
@@ -176,10 +177,18 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
kobject_get(&dd->kobj);
/* The real work is performed later in assign_ctxt() */
- fp->private_data = kzalloc(sizeof(struct hfi1_filedata), GFP_KERNEL);
- if (fp->private_data) /* no cpu affinity by default */
- ((struct hfi1_filedata *)fp->private_data)->rec_cpu_num = -1;
- return fp->private_data ? 0 : -ENOMEM;
+
+ fd = kzalloc(sizeof(*fd), GFP_KERNEL);
+
+ if (fd) {
+ fd->rec_cpu_num = -1; /* no cpu affinity by default */
+ fd->mm = current->mm;
+ atomic_inc(&fd->mm->mm_count);
+ }
+
+ fp->private_data = fd;
+
+ return fd ? 0 : -ENOMEM;
}
static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
@@ -214,7 +223,7 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
ret = assign_ctxt(fp, &uinfo);
if (ret < 0)
return ret;
- setup_ctxt(fp);
+ ret = setup_ctxt(fp);
if (ret)
return ret;
ret = user_init(fp);
@@ -228,7 +237,7 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
sizeof(struct hfi1_base_info));
break;
case HFI1_IOCTL_CREDIT_UPD:
- if (uctxt && uctxt->sc)
+ if (uctxt)
sc_return_credits(uctxt->sc);
break;
@@ -392,41 +401,38 @@ static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
struct hfi1_filedata *fd = kiocb->ki_filp->private_data;
struct hfi1_user_sdma_pkt_q *pq = fd->pq;
struct hfi1_user_sdma_comp_q *cq = fd->cq;
- int ret = 0, done = 0, reqs = 0;
+ int done = 0, reqs = 0;
unsigned long dim = from->nr_segs;
- if (!cq || !pq) {
- ret = -EIO;
- goto done;
- }
+ if (!cq || !pq)
+ return -EIO;
- if (!iter_is_iovec(from) || !dim) {
- ret = -EINVAL;
- goto done;
- }
+ if (!iter_is_iovec(from) || !dim)
+ return -EINVAL;
hfi1_cdbg(SDMA, "SDMA request from %u:%u (%lu)",
fd->uctxt->ctxt, fd->subctxt, dim);
- if (atomic_read(&pq->n_reqs) == pq->n_max_reqs) {
- ret = -ENOSPC;
- goto done;
- }
+ if (atomic_read(&pq->n_reqs) == pq->n_max_reqs)
+ return -ENOSPC;
while (dim) {
+ int ret;
unsigned long count = 0;
ret = hfi1_user_sdma_process_request(
kiocb->ki_filp, (struct iovec *)(from->iov + done),
dim, &count);
- if (ret)
- goto done;
+ if (ret) {
+ reqs = ret;
+ break;
+ }
dim -= count;
done += count;
reqs++;
}
-done:
- return ret ? ret : reqs;
+
+ return reqs;
}
static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
@@ -718,7 +724,7 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
hfi1_user_sdma_free_queues(fdata);
/* release the cpu */
- hfi1_put_proc_affinity(dd, fdata->rec_cpu_num);
+ hfi1_put_proc_affinity(fdata->rec_cpu_num);
/*
* Clear any left over, unhandled events so the next process that
@@ -730,7 +736,6 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
if (--uctxt->cnt) {
uctxt->active_slaves &= ~(1 << fdata->subctxt);
- uctxt->subpid[fdata->subctxt] = 0;
mutex_unlock(&hfi1_mutex);
goto done;
}
@@ -756,7 +761,6 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
write_kctxt_csr(dd, uctxt->sc->hw_context, SEND_CTXT_CHECK_ENABLE,
hfi1_pkt_default_send_ctxt_mask(dd, uctxt->sc->type));
sc_disable(uctxt->sc);
- uctxt->pid = 0;
spin_unlock_irqrestore(&dd->uctxt_lock, flags);
dd->rcd[uctxt->ctxt] = NULL;
@@ -776,6 +780,7 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
mutex_unlock(&hfi1_mutex);
hfi1_free_ctxtdata(dd, uctxt);
done:
+ mmdrop(fdata->mm);
kobject_put(&dd->kobj);
kfree(fdata);
return 0;
@@ -818,9 +823,10 @@ static int assign_ctxt(struct file *fp, struct hfi1_user_info *uinfo)
ret = find_shared_ctxt(fp, uinfo);
if (ret < 0)
goto done_unlock;
- if (ret)
- fd->rec_cpu_num = hfi1_get_proc_affinity(
- fd->uctxt->dd, fd->uctxt->numa_id);
+ if (ret) {
+ fd->rec_cpu_num =
+ hfi1_get_proc_affinity(fd->uctxt->numa_id);
+ }
}
/*
@@ -895,7 +901,6 @@ static int find_shared_ctxt(struct file *fp,
}
fd->uctxt = uctxt;
fd->subctxt = uctxt->cnt++;
- uctxt->subpid[fd->subctxt] = current->pid;
uctxt->active_slaves |= 1 << fd->subctxt;
ret = 1;
goto done;
@@ -932,7 +937,11 @@ static int allocate_ctxt(struct file *fp, struct hfi1_devdata *dd,
if (ctxt == dd->num_rcv_contexts)
return -EBUSY;
- fd->rec_cpu_num = hfi1_get_proc_affinity(dd, -1);
+ /*
+ * If we don't have a NUMA node requested, preference is towards
+ * device NUMA node.
+ */
+ fd->rec_cpu_num = hfi1_get_proc_affinity(dd->node);
if (fd->rec_cpu_num != -1)
numa = cpu_to_node(fd->rec_cpu_num);
else
@@ -976,8 +985,7 @@ static int allocate_ctxt(struct file *fp, struct hfi1_devdata *dd,
return ret;
}
uctxt->userversion = uinfo->userversion;
- uctxt->pid = current->pid;
- uctxt->flags = HFI1_CAP_UGET(MASK);
+ uctxt->flags = hfi1_cap_mask; /* save current flag state */
init_waitqueue_head(&uctxt->wait);
strlcpy(uctxt->comm, current->comm, sizeof(uctxt->comm));
memcpy(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid));
@@ -1080,18 +1088,18 @@ static int user_init(struct file *fp)
hfi1_set_ctxt_jkey(uctxt->dd, uctxt->ctxt, uctxt->jkey);
rcvctrl_ops = HFI1_RCVCTRL_CTXT_ENB;
- if (HFI1_CAP_KGET_MASK(uctxt->flags, HDRSUPP))
+ if (HFI1_CAP_UGET_MASK(uctxt->flags, HDRSUPP))
rcvctrl_ops |= HFI1_RCVCTRL_TIDFLOW_ENB;
/*
* Ignore the bit in the flags for now until proper
* support for multiple packet per rcv array entry is
* added.
*/
- if (!HFI1_CAP_KGET_MASK(uctxt->flags, MULTI_PKT_EGR))
+ if (!HFI1_CAP_UGET_MASK(uctxt->flags, MULTI_PKT_EGR))
rcvctrl_ops |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
- if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_EGR_FULL))
+ if (HFI1_CAP_UGET_MASK(uctxt->flags, NODROP_EGR_FULL))
rcvctrl_ops |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
- if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_RHQ_FULL))
+ if (HFI1_CAP_UGET_MASK(uctxt->flags, NODROP_RHQ_FULL))
rcvctrl_ops |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
/*
* The RcvCtxtCtrl.TailUpd bit has to be explicitly written.
@@ -1099,7 +1107,7 @@ static int user_init(struct file *fp)
* uses of the chip or ctxt. Therefore, add the rcvctrl op
* for both cases.
*/
- if (HFI1_CAP_KGET_MASK(uctxt->flags, DMA_RTAIL))
+ if (HFI1_CAP_UGET_MASK(uctxt->flags, DMA_RTAIL))
rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB;
else
rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_DIS;
@@ -1122,9 +1130,14 @@ static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len)
int ret = 0;
memset(&cinfo, 0, sizeof(cinfo));
- ret = hfi1_get_base_kinfo(uctxt, &cinfo);
- if (ret < 0)
- goto done;
+ cinfo.runtime_flags = (((uctxt->flags >> HFI1_CAP_MISC_SHIFT) &
+ HFI1_CAP_MISC_MASK) << HFI1_CAP_USER_SHIFT) |
+ HFI1_CAP_UGET_MASK(uctxt->flags, MASK) |
+ HFI1_CAP_KGET_MASK(uctxt->flags, K2U);
+ /* adjust flag if this fd is not able to cache */
+ if (!fd->handler)
+ cinfo.runtime_flags |= HFI1_CAP_TID_UNMAP; /* no caching */
+
cinfo.num_active = hfi1_count_active_units();
cinfo.unit = uctxt->dd->unit;
cinfo.ctxt = uctxt->ctxt;
@@ -1146,7 +1159,7 @@ static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len)
trace_hfi1_ctxt_info(uctxt->dd, uctxt->ctxt, fd->subctxt, cinfo);
if (copy_to_user(ubase, &cinfo, sizeof(cinfo)))
ret = -EFAULT;
-done:
+
return ret;
}
diff --git a/drivers/infiniband/hw/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c
index cbd965cfa..9c223f933 100644
--- a/drivers/infiniband/hw/hfi1/firmware.c
+++ b/drivers/infiniband/hw/hfi1/firmware.c
@@ -206,6 +206,9 @@ static const struct firmware *platform_config;
/* the number of fabric SerDes on the SBus */
#define NUM_FABRIC_SERDES 4
+/* ASIC_STS_SBUS_RESULT.RESULT_CODE value */
+#define SBUS_READ_COMPLETE 0x4
+
/* SBus fabric SerDes addresses, one set per HFI */
static const u8 fabric_serdes_addrs[2][NUM_FABRIC_SERDES] = {
{ 0x01, 0x02, 0x03, 0x04 },
@@ -240,6 +243,7 @@ static const u8 all_pcie_serdes_broadcast = 0xe0;
static void dispose_one_firmware(struct firmware_details *fdet);
static int load_fabric_serdes_firmware(struct hfi1_devdata *dd,
struct firmware_details *fdet);
+static void dump_fw_version(struct hfi1_devdata *dd);
/*
* Read a single 64-bit value from 8051 data memory.
@@ -1079,6 +1083,44 @@ void sbus_request(struct hfi1_devdata *dd,
}
/*
+ * Read a value from the SBus.
+ *
+ * Requires the caller to be in fast mode
+ */
+static u32 sbus_read(struct hfi1_devdata *dd, u8 receiver_addr, u8 data_addr,
+ u32 data_in)
+{
+ u64 reg;
+ int retries;
+ int success = 0;
+ u32 result = 0;
+ u32 result_code = 0;
+
+ sbus_request(dd, receiver_addr, data_addr, READ_SBUS_RECEIVER, data_in);
+
+ for (retries = 0; retries < 100; retries++) {
+ usleep_range(1000, 1200); /* arbitrary */
+ reg = read_csr(dd, ASIC_STS_SBUS_RESULT);
+ result_code = (reg >> ASIC_STS_SBUS_RESULT_RESULT_CODE_SHIFT)
+ & ASIC_STS_SBUS_RESULT_RESULT_CODE_MASK;
+ if (result_code != SBUS_READ_COMPLETE)
+ continue;
+
+ success = 1;
+ result = (reg >> ASIC_STS_SBUS_RESULT_DATA_OUT_SHIFT)
+ & ASIC_STS_SBUS_RESULT_DATA_OUT_MASK;
+ break;
+ }
+
+ if (!success) {
+ dd_dev_err(dd, "%s: read failed, result code 0x%x\n", __func__,
+ result_code);
+ }
+
+ return result;
+}
+
+/*
* Turn off the SBus and fabric serdes spicos.
*
* + Must be called with Sbus fast mode turned on.
@@ -1636,6 +1678,7 @@ int load_firmware(struct hfi1_devdata *dd)
return ret;
}
+ dump_fw_version(dd);
return 0;
}
@@ -2054,3 +2097,85 @@ void read_guid(struct hfi1_devdata *dd)
dd_dev_info(dd, "GUID %llx",
(unsigned long long)dd->base_guid);
}
+
+/* read and display firmware version info */
+static void dump_fw_version(struct hfi1_devdata *dd)
+{
+ u32 pcie_vers[NUM_PCIE_SERDES];
+ u32 fabric_vers[NUM_FABRIC_SERDES];
+ u32 sbus_vers;
+ int i;
+ int all_same;
+ int ret;
+ u8 rcv_addr;
+
+ ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
+ if (ret) {
+ dd_dev_err(dd, "Unable to acquire SBus to read firmware versions\n");
+ return;
+ }
+
+ /* set fast mode */
+ set_sbus_fast_mode(dd);
+
+ /* read version for SBus Master */
+ sbus_request(dd, SBUS_MASTER_BROADCAST, 0x02, WRITE_SBUS_RECEIVER, 0);
+ sbus_request(dd, SBUS_MASTER_BROADCAST, 0x07, WRITE_SBUS_RECEIVER, 0x1);
+ /* wait for interrupt to be processed */
+ usleep_range(10000, 11000);
+ sbus_vers = sbus_read(dd, SBUS_MASTER_BROADCAST, 0x08, 0x1);
+ dd_dev_info(dd, "SBus Master firmware version 0x%08x\n", sbus_vers);
+
+ /* read version for PCIe SerDes */
+ all_same = 1;
+ pcie_vers[0] = 0;
+ for (i = 0; i < NUM_PCIE_SERDES; i++) {
+ rcv_addr = pcie_serdes_addrs[dd->hfi1_id][i];
+ sbus_request(dd, rcv_addr, 0x03, WRITE_SBUS_RECEIVER, 0);
+ /* wait for interrupt to be processed */
+ usleep_range(10000, 11000);
+ pcie_vers[i] = sbus_read(dd, rcv_addr, 0x04, 0x0);
+ if (i > 0 && pcie_vers[0] != pcie_vers[i])
+ all_same = 0;
+ }
+
+ if (all_same) {
+ dd_dev_info(dd, "PCIe SerDes firmware version 0x%x\n",
+ pcie_vers[0]);
+ } else {
+ dd_dev_warn(dd, "PCIe SerDes do not have the same firmware version\n");
+ for (i = 0; i < NUM_PCIE_SERDES; i++) {
+ dd_dev_info(dd,
+ "PCIe SerDes lane %d firmware version 0x%x\n",
+ i, pcie_vers[i]);
+ }
+ }
+
+ /* read version for fabric SerDes */
+ all_same = 1;
+ fabric_vers[0] = 0;
+ for (i = 0; i < NUM_FABRIC_SERDES; i++) {
+ rcv_addr = fabric_serdes_addrs[dd->hfi1_id][i];
+ sbus_request(dd, rcv_addr, 0x03, WRITE_SBUS_RECEIVER, 0);
+ /* wait for interrupt to be processed */
+ usleep_range(10000, 11000);
+ fabric_vers[i] = sbus_read(dd, rcv_addr, 0x04, 0x0);
+ if (i > 0 && fabric_vers[0] != fabric_vers[i])
+ all_same = 0;
+ }
+
+ if (all_same) {
+ dd_dev_info(dd, "Fabric SerDes firmware version 0x%x\n",
+ fabric_vers[0]);
+ } else {
+ dd_dev_warn(dd, "Fabric SerDes do not have the same firmware version\n");
+ for (i = 0; i < NUM_FABRIC_SERDES; i++) {
+ dd_dev_info(dd,
+ "Fabric SerDes lane %d firmware version 0x%x\n",
+ i, fabric_vers[i]);
+ }
+ }
+
+ clear_sbus_fast_mode(dd);
+ release_chip_resource(dd, CR_SBUS);
+}
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index 4417a0fd3..325ec2113 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -62,6 +62,8 @@
#include <linux/cdev.h>
#include <linux/delay.h>
#include <linux/kthread.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
#include <rdma/rdma_vt.h>
#include "chip_registers.h"
@@ -253,7 +255,7 @@ struct hfi1_ctxtdata {
/* chip offset of PIO buffers for this ctxt */
u32 piobufs;
/* per-context configuration flags */
- u32 flags;
+ unsigned long flags;
/* per-context event flags for fileops/intr communication */
unsigned long event_flags;
/* WAIT_RCV that timed out, no interrupt */
@@ -268,9 +270,6 @@ struct hfi1_ctxtdata {
u32 urgent;
/* saved total number of polled urgent packets for poll edge trigger */
u32 urgent_poll;
- /* pid of process using this ctxt */
- pid_t pid;
- pid_t subpid[HFI1_MAX_SHARED_CTXTS];
/* same size as task_struct .comm[], command that opened context */
char comm[TASK_COMM_LEN];
/* so file ops can get at unit */
@@ -366,11 +365,6 @@ struct hfi1_packet {
u8 etype;
};
-static inline bool has_sc4_bit(struct hfi1_packet *p)
-{
- return !!rhf_dc_info(p->rhf);
-}
-
/*
* Private data for snoop/capture support.
*/
@@ -611,6 +605,7 @@ struct hfi1_pportdata {
struct work_struct freeze_work;
struct work_struct link_downgrade_work;
struct work_struct link_bounce_work;
+ struct delayed_work start_link_work;
/* host link state variables */
struct mutex hls_lock;
u32 host_link_state;
@@ -665,6 +660,7 @@ struct hfi1_pportdata {
u8 linkinit_reason;
u8 local_tx_rate; /* rate given to 8051 firmware */
u8 last_pstate; /* info only */
+ u8 qsfp_retry_count;
/* placeholders for IB MAD packet settings */
u8 overrun_threshold;
@@ -805,10 +801,19 @@ struct hfi1_temp {
u8 triggers; /* temperature triggers */
};
+struct hfi1_i2c_bus {
+ struct hfi1_devdata *controlling_dd; /* current controlling device */
+ struct i2c_adapter adapter; /* bus details */
+ struct i2c_algo_bit_data algo; /* bus algorithm details */
+ int num; /* bus number, 0 or 1 */
+};
+
/* common data between shared ASIC HFIs */
struct hfi1_asic_data {
struct hfi1_devdata *dds[2]; /* back pointers */
struct mutex asic_resource_mutex;
+ struct hfi1_i2c_bus *i2c_bus0;
+ struct hfi1_i2c_bus *i2c_bus1;
};
/* device data struct now contains only "general per-device" info.
@@ -1128,7 +1133,8 @@ struct hfi1_devdata {
NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS];
/* Software counter that aggregates all cce_err_status errors */
u64 sw_cce_err_status_aggregate;
-
+ /* Software counter that aggregates all bypass packet rcv errors */
+ u64 sw_rcv_bypass_packet_errors;
/* receive interrupt functions */
rhf_rcv_function_ptr *rhf_rcv_function_map;
rhf_rcv_function_ptr normal_rhf_rcv_functions[8];
@@ -1174,6 +1180,8 @@ struct hfi1_devdata {
/* 8051 firmware version helper */
#define dc8051_ver(a, b) ((a) << 8 | (b))
+#define dc8051_ver_maj(a) ((a & 0xff00) >> 8)
+#define dc8051_ver_min(a) (a & 0x00ff)
/* f_put_tid types */
#define PT_EXPECTED 0
@@ -1182,6 +1190,7 @@ struct hfi1_devdata {
struct tid_rb_node;
struct mmu_rb_node;
+struct mmu_rb_handler;
/* Private data for file operations */
struct hfi1_filedata {
@@ -1192,7 +1201,7 @@ struct hfi1_filedata {
/* for cpu affinity; -1 if none */
int rec_cpu_num;
u32 tid_n_pinned;
- struct rb_root tid_rb_root;
+ struct mmu_rb_handler *handler;
struct tid_rb_node **entry_to_rb;
spinlock_t tid_lock; /* protect tid_[limit,used] counters */
u32 tid_limit;
@@ -1201,6 +1210,7 @@ struct hfi1_filedata {
u32 invalid_tid_idx;
/* protect invalid_tids array and invalid_tid_idx */
spinlock_t invalid_lock;
+ struct mm_struct *mm;
};
extern struct list_head hfi1_dev_list;
@@ -1234,6 +1244,8 @@ int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *, int);
int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *, int);
void set_all_slowpath(struct hfi1_devdata *dd);
+extern const struct pci_device_id hfi1_pci_tbl[];
+
/* receive packet handler dispositions */
#define RCV_PKT_OK 0x0 /* keep going */
#define RCV_PKT_LIMIT 0x1 /* stop, hit limit, start thread */
@@ -1259,12 +1271,29 @@ void receive_interrupt_work(struct work_struct *work);
static inline int hdr2sc(struct hfi1_message_header *hdr, u64 rhf)
{
return ((be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf) |
- ((!!(rhf & RHF_DC_INFO_SMASK)) << 4);
+ ((!!(rhf_dc_info(rhf))) << 4);
}
+#define HFI1_JKEY_WIDTH 16
+#define HFI1_JKEY_MASK (BIT(16) - 1)
+#define HFI1_ADMIN_JKEY_RANGE 32
+
+/*
+ * J_KEYs are split and allocated in the following groups:
+ * 0 - 31 - users with administrator privileges
+ * 32 - 63 - kernel protocols using KDETH packets
+ * 64 - 65535 - all other users using KDETH packets
+ */
static inline u16 generate_jkey(kuid_t uid)
{
- return from_kuid(current_user_ns(), uid) & 0xffff;
+ u16 jkey = from_kuid(current_user_ns(), uid) & HFI1_JKEY_MASK;
+
+ if (capable(CAP_SYS_ADMIN))
+ jkey &= HFI1_ADMIN_JKEY_RANGE - 1;
+ else if (jkey < 64)
+ jkey |= BIT(HFI1_JKEY_WIDTH - 1);
+
+ return jkey;
}
/*
@@ -1569,6 +1598,22 @@ static inline struct hfi1_ibport *to_iport(struct ib_device *ibdev, u8 port)
return &dd->pport[pidx].ibport_data;
}
+void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
+ bool do_cnp);
+static inline bool process_ecn(struct rvt_qp *qp, struct hfi1_packet *pkt,
+ bool do_cnp)
+{
+ struct hfi1_other_headers *ohdr = pkt->ohdr;
+ u32 bth1;
+
+ bth1 = be32_to_cpu(ohdr->bth[1]);
+ if (unlikely(bth1 & (HFI1_BECN_SMASK | HFI1_FECN_SMASK))) {
+ hfi1_process_ecn_slowpath(qp, pkt, do_cnp);
+ return bth1 & HFI1_FECN_SMASK;
+ }
+ return false;
+}
+
/*
* Return the indexed PKEY from the port PKEY table.
*/
@@ -1586,8 +1631,7 @@ static inline u16 hfi1_get_pkey(struct hfi1_ibport *ibp, unsigned index)
}
/*
- * Readers of cc_state must call get_cc_state() under rcu_read_lock().
- * Writers of cc_state must call get_cc_state() under cc_state_lock.
+ * Called by readers of cc_state only, must call under rcu_read_lock().
*/
static inline struct cc_state *get_cc_state(struct hfi1_pportdata *ppd)
{
@@ -1595,6 +1639,16 @@ static inline struct cc_state *get_cc_state(struct hfi1_pportdata *ppd)
}
/*
+ * Called by writers of cc_state only, must call under cc_state_lock.
+ */
+static inline
+struct cc_state *get_cc_state_protected(struct hfi1_pportdata *ppd)
+{
+ return rcu_dereference_protected(ppd->cc_state,
+ lockdep_is_held(&ppd->cc_state_lock));
+}
+
+/*
* values for dd->flags (_device_ related flags)
*/
#define HFI1_INITTED 0x1 /* chip and driver up and initted */
@@ -1621,7 +1675,6 @@ static inline struct cc_state *get_cc_state(struct hfi1_pportdata *ppd)
struct hfi1_devdata *hfi1_init_dd(struct pci_dev *,
const struct pci_device_id *);
void hfi1_free_devdata(struct hfi1_devdata *);
-void cc_state_reclaim(struct rcu_head *rcu);
struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra);
/* LED beaconing functions */
@@ -1669,9 +1722,12 @@ void shutdown_led_override(struct hfi1_pportdata *ppd);
*/
#define DEFAULT_RCVHDR_ENTSIZE 32
-bool hfi1_can_pin_pages(struct hfi1_devdata *, u32, u32);
-int hfi1_acquire_user_pages(unsigned long, size_t, bool, struct page **);
-void hfi1_release_user_pages(struct mm_struct *, struct page **, size_t, bool);
+bool hfi1_can_pin_pages(struct hfi1_devdata *dd, struct mm_struct *mm,
+ u32 nlocked, u32 npages);
+int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr,
+ size_t npages, bool writable, struct page **pages);
+void hfi1_release_user_pages(struct mm_struct *mm, struct page **p,
+ size_t npages, bool dirty);
static inline void clear_rcvhdrtail(const struct hfi1_ctxtdata *rcd)
{
@@ -1750,7 +1806,7 @@ extern unsigned int hfi1_max_mtu;
extern unsigned int hfi1_cu;
extern unsigned int user_credit_return_threshold;
extern int num_user_contexts;
-extern unsigned n_krcvqs;
+extern unsigned long n_krcvqs;
extern uint krcvqs[];
extern int krcvqsset;
extern uint kdeth_qp;
@@ -1947,4 +2003,55 @@ static inline u32 qsfp_resource(struct hfi1_devdata *dd)
int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp);
+#define DD_DEV_ENTRY(dd) __string(dev, dev_name(&(dd)->pcidev->dev))
+#define DD_DEV_ASSIGN(dd) __assign_str(dev, dev_name(&(dd)->pcidev->dev))
+
+#define packettype_name(etype) { RHF_RCV_TYPE_##etype, #etype }
+#define show_packettype(etype) \
+__print_symbolic(etype, \
+ packettype_name(EXPECTED), \
+ packettype_name(EAGER), \
+ packettype_name(IB), \
+ packettype_name(ERROR), \
+ packettype_name(BYPASS))
+
+#define ib_opcode_name(opcode) { IB_OPCODE_##opcode, #opcode }
+#define show_ib_opcode(opcode) \
+__print_symbolic(opcode, \
+ ib_opcode_name(RC_SEND_FIRST), \
+ ib_opcode_name(RC_SEND_MIDDLE), \
+ ib_opcode_name(RC_SEND_LAST), \
+ ib_opcode_name(RC_SEND_LAST_WITH_IMMEDIATE), \
+ ib_opcode_name(RC_SEND_ONLY), \
+ ib_opcode_name(RC_SEND_ONLY_WITH_IMMEDIATE), \
+ ib_opcode_name(RC_RDMA_WRITE_FIRST), \
+ ib_opcode_name(RC_RDMA_WRITE_MIDDLE), \
+ ib_opcode_name(RC_RDMA_WRITE_LAST), \
+ ib_opcode_name(RC_RDMA_WRITE_LAST_WITH_IMMEDIATE), \
+ ib_opcode_name(RC_RDMA_WRITE_ONLY), \
+ ib_opcode_name(RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE), \
+ ib_opcode_name(RC_RDMA_READ_REQUEST), \
+ ib_opcode_name(RC_RDMA_READ_RESPONSE_FIRST), \
+ ib_opcode_name(RC_RDMA_READ_RESPONSE_MIDDLE), \
+ ib_opcode_name(RC_RDMA_READ_RESPONSE_LAST), \
+ ib_opcode_name(RC_RDMA_READ_RESPONSE_ONLY), \
+ ib_opcode_name(RC_ACKNOWLEDGE), \
+ ib_opcode_name(RC_ATOMIC_ACKNOWLEDGE), \
+ ib_opcode_name(RC_COMPARE_SWAP), \
+ ib_opcode_name(RC_FETCH_ADD), \
+ ib_opcode_name(UC_SEND_FIRST), \
+ ib_opcode_name(UC_SEND_MIDDLE), \
+ ib_opcode_name(UC_SEND_LAST), \
+ ib_opcode_name(UC_SEND_LAST_WITH_IMMEDIATE), \
+ ib_opcode_name(UC_SEND_ONLY), \
+ ib_opcode_name(UC_SEND_ONLY_WITH_IMMEDIATE), \
+ ib_opcode_name(UC_RDMA_WRITE_FIRST), \
+ ib_opcode_name(UC_RDMA_WRITE_MIDDLE), \
+ ib_opcode_name(UC_RDMA_WRITE_LAST), \
+ ib_opcode_name(UC_RDMA_WRITE_LAST_WITH_IMMEDIATE), \
+ ib_opcode_name(UC_RDMA_WRITE_ONLY), \
+ ib_opcode_name(UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE), \
+ ib_opcode_name(UD_SEND_ONLY), \
+ ib_opcode_name(UD_SEND_ONLY_WITH_IMMEDIATE), \
+ ib_opcode_name(CNP))
#endif /* _HFI1_KERNEL_H */
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index eed971ccd..384b43d2f 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -64,6 +64,7 @@
#include "debugfs.h"
#include "verbs.h"
#include "aspm.h"
+#include "affinity.h"
#undef pr_fmt
#define pr_fmt(fmt) DRIVER_NAME ": " fmt
@@ -93,7 +94,7 @@ module_param_array(krcvqs, uint, &krcvqsset, S_IRUGO);
MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL");
/* computed based on above array */
-unsigned n_krcvqs;
+unsigned long n_krcvqs;
static unsigned hfi1_rcvarr_split = 25;
module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO);
@@ -474,8 +475,9 @@ static enum hrtimer_restart cca_timer_fn(struct hrtimer *t)
void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
struct hfi1_devdata *dd, u8 hw_pidx, u8 port)
{
- int i, size;
+ int i;
uint default_pkey_idx;
+ struct cc_state *cc_state;
ppd->dd = dd;
ppd->hw_pidx = hw_pidx;
@@ -498,6 +500,7 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade);
INIT_WORK(&ppd->sma_message_work, handle_sma_message);
INIT_WORK(&ppd->link_bounce_work, handle_link_bounce);
+ INIT_DELAYED_WORK(&ppd->start_link_work, handle_start_link);
INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work);
INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event);
@@ -526,9 +529,9 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
spin_lock_init(&ppd->cc_state_lock);
spin_lock_init(&ppd->cc_log_lock);
- size = sizeof(struct cc_state);
- RCU_INIT_POINTER(ppd->cc_state, kzalloc(size, GFP_KERNEL));
- if (!rcu_dereference(ppd->cc_state))
+ cc_state = kzalloc(sizeof(*cc_state), GFP_KERNEL);
+ RCU_INIT_POINTER(ppd->cc_state, cc_state);
+ if (!cc_state)
goto bail;
return;
@@ -972,39 +975,49 @@ void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
/*
* Release our hold on the shared asic data. If we are the last one,
- * free the structure. Must be holding hfi1_devs_lock.
+ * return the structure to be finalized outside the lock. Must be
+ * holding hfi1_devs_lock.
*/
-static void release_asic_data(struct hfi1_devdata *dd)
+static struct hfi1_asic_data *release_asic_data(struct hfi1_devdata *dd)
{
+ struct hfi1_asic_data *ad;
int other;
if (!dd->asic_data)
- return;
+ return NULL;
dd->asic_data->dds[dd->hfi1_id] = NULL;
other = dd->hfi1_id ? 0 : 1;
- if (!dd->asic_data->dds[other]) {
- /* we are the last holder, free it */
- kfree(dd->asic_data);
- }
+ ad = dd->asic_data;
dd->asic_data = NULL;
+ /* return NULL if the other dd still has a link */
+ return ad->dds[other] ? NULL : ad;
+}
+
+static void finalize_asic_data(struct hfi1_devdata *dd,
+ struct hfi1_asic_data *ad)
+{
+ clean_up_i2c(dd, ad);
+ kfree(ad);
}
static void __hfi1_free_devdata(struct kobject *kobj)
{
struct hfi1_devdata *dd =
container_of(kobj, struct hfi1_devdata, kobj);
+ struct hfi1_asic_data *ad;
unsigned long flags;
spin_lock_irqsave(&hfi1_devs_lock, flags);
idr_remove(&hfi1_unit_table, dd->unit);
list_del(&dd->list);
- release_asic_data(dd);
+ ad = release_asic_data(dd);
spin_unlock_irqrestore(&hfi1_devs_lock, flags);
+ if (ad)
+ finalize_asic_data(dd, ad);
free_platform_config(dd);
rcu_barrier(); /* wait for rcu callbacks to complete */
free_percpu(dd->int_counter);
free_percpu(dd->rcv_limit);
- hfi1_dev_affinity_free(dd);
free_percpu(dd->send_schedule);
rvt_dealloc_device(&dd->verbs_dev.rdi);
}
@@ -1162,7 +1175,7 @@ static int init_one(struct pci_dev *, const struct pci_device_id *);
#define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: "
#define PFX DRIVER_NAME ": "
-static const struct pci_device_id hfi1_pci_tbl[] = {
+const struct pci_device_id hfi1_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL0) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL1) },
{ 0, }
@@ -1198,6 +1211,10 @@ static int __init hfi1_mod_init(void)
if (ret)
goto bail;
+ ret = node_affinity_init();
+ if (ret)
+ goto bail;
+
/* validate max MTU before any devices start */
if (!valid_opa_max_mtu(hfi1_max_mtu)) {
pr_err("Invalid max_mtu 0x%x, using 0x%x instead\n",
@@ -1278,6 +1295,7 @@ module_init(hfi1_mod_init);
static void __exit hfi1_mod_cleanup(void)
{
pci_unregister_driver(&hfi1_pci_driver);
+ node_affinity_destroy();
hfi1_wss_exit();
hfi1_dbg_exit();
hfi1_cpulist_count = 0;
@@ -1311,12 +1329,12 @@ static void cleanup_device_data(struct hfi1_devdata *dd)
hrtimer_cancel(&ppd->cca_timer[i].hrtimer);
spin_lock(&ppd->cc_state_lock);
- cc_state = get_cc_state(ppd);
+ cc_state = get_cc_state_protected(ppd);
RCU_INIT_POINTER(ppd->cc_state, NULL);
spin_unlock(&ppd->cc_state_lock);
if (cc_state)
- call_rcu(&cc_state->rcu, cc_state_reclaim);
+ kfree_rcu(cc_state, rcu);
}
free_credit_return(dd);
@@ -1760,8 +1778,8 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
hfi1_cdbg(PROC,
"ctxt%u: Alloced %u rcv tid entries @ %uKB, total %zuKB\n",
- rcd->ctxt, rcd->egrbufs.alloced, rcd->egrbufs.rcvtid_size,
- rcd->egrbufs.size);
+ rcd->ctxt, rcd->egrbufs.alloced,
+ rcd->egrbufs.rcvtid_size / 1024, rcd->egrbufs.size / 1024);
/*
* Set the contexts rcv array head update threshold to the closest
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
index fca07a1d6..7ffc14f21 100644
--- a/drivers/infiniband/hw/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -588,7 +588,6 @@ static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
pi->port_phys_conf = (ppd->port_type & 0xf);
-#if PI_LED_ENABLE_SUP
pi->port_states.ledenable_offlinereason = ppd->neighbor_normal << 4;
pi->port_states.ledenable_offlinereason |=
ppd->is_sm_config_started << 5;
@@ -602,11 +601,6 @@ static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
pi->port_states.ledenable_offlinereason |= is_beaconing_active << 6;
pi->port_states.ledenable_offlinereason |=
ppd->offline_disabled_reason;
-#else
- pi->port_states.offline_reason = ppd->neighbor_normal << 4;
- pi->port_states.offline_reason |= ppd->is_sm_config_started << 5;
- pi->port_states.offline_reason |= ppd->offline_disabled_reason;
-#endif /* PI_LED_ENABLE_SUP */
pi->port_states.portphysstate_portstate =
(hfi1_ibphys_portstate(ppd) << 4) | state;
@@ -1752,17 +1746,11 @@ static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
if (start_of_sm_config && (lstate == IB_PORT_INIT))
ppd->is_sm_config_started = 1;
-#if PI_LED_ENABLE_SUP
psi->port_states.ledenable_offlinereason = ppd->neighbor_normal << 4;
psi->port_states.ledenable_offlinereason |=
ppd->is_sm_config_started << 5;
psi->port_states.ledenable_offlinereason |=
ppd->offline_disabled_reason;
-#else
- psi->port_states.offline_reason = ppd->neighbor_normal << 4;
- psi->port_states.offline_reason |= ppd->is_sm_config_started << 5;
- psi->port_states.offline_reason |= ppd->offline_disabled_reason;
-#endif /* PI_LED_ENABLE_SUP */
psi->port_states.portphysstate_portstate =
(hfi1_ibphys_portstate(ppd) << 4) | (lstate & 0xf);
@@ -1831,6 +1819,11 @@ static int __subn_get_opa_cable_info(struct opa_smp *smp, u32 am, u8 *data,
u32 len = OPA_AM_CI_LEN(am) + 1;
int ret;
+ if (dd->pport->port_type != PORT_TYPE_QSFP) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
#define __CI_PAGE_SIZE BIT(7) /* 128 bytes */
#define __CI_PAGE_MASK ~(__CI_PAGE_SIZE - 1)
#define __CI_PAGE_NUM(a) ((a) & __CI_PAGE_MASK)
@@ -2430,14 +2423,9 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
rsp->port_rcv_remote_physical_errors =
cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
CNTR_INVALID_VL));
- tmp = read_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL);
- tmp2 = tmp + read_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL);
- if (tmp2 < tmp) {
- /* overflow/wrapped */
- rsp->local_link_integrity_errors = cpu_to_be64(~0);
- } else {
- rsp->local_link_integrity_errors = cpu_to_be64(tmp2);
- }
+ rsp->local_link_integrity_errors =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RX_REPLAY,
+ CNTR_INVALID_VL));
tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
tmp2 = tmp + read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
CNTR_INVALID_VL);
@@ -2499,6 +2487,9 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN_VL,
idx_from_vl(vl)));
+ rsp->vls[vfi].port_vl_xmit_discards =
+ cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD_VL,
+ idx_from_vl(vl)));
vlinfo++;
vfi++;
}
@@ -2529,9 +2520,8 @@ static u64 get_error_counter_summary(struct ib_device *ibdev, u8 port,
error_counter_summary += read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
CNTR_INVALID_VL);
/* local link integrity must be right-shifted by the lli resolution */
- tmp = read_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL);
- tmp += read_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL);
- error_counter_summary += (tmp >> res_lli);
+ error_counter_summary += (read_dev_cntr(dd, C_DC_RX_REPLAY,
+ CNTR_INVALID_VL) >> res_lli);
/* link error recovery must b right-shifted by the ler resolution */
tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
tmp += read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, CNTR_INVALID_VL);
@@ -2614,7 +2604,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
u8 lq, num_vls;
u8 res_lli, res_ler;
u64 port_mask;
- unsigned long port_num;
+ u8 port_num;
unsigned long vl;
u32 vl_select_mask;
int vfi;
@@ -2648,9 +2638,9 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
*/
port_mask = be64_to_cpu(req->port_select_mask[3]);
port_num = find_first_bit((unsigned long *)&port_mask,
- sizeof(port_mask));
+ sizeof(port_mask) * 8);
- if ((u8)port_num != port) {
+ if (port_num != port) {
pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)pmp);
}
@@ -2800,14 +2790,9 @@ static void pma_get_opa_port_ectrs(struct ib_device *ibdev,
rsp->port_rcv_constraint_errors =
cpu_to_be64(read_port_cntr(ppd, C_SW_RCV_CSTR_ERR,
CNTR_INVALID_VL));
- tmp = read_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL);
- tmp2 = tmp + read_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL);
- if (tmp2 < tmp) {
- /* overflow/wrapped */
- rsp->local_link_integrity_errors = cpu_to_be64(~0);
- } else {
- rsp->local_link_integrity_errors = cpu_to_be64(tmp2);
- }
+ rsp->local_link_integrity_errors =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RX_REPLAY,
+ CNTR_INVALID_VL));
rsp->excessive_buffer_overruns =
cpu_to_be64(read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL));
}
@@ -2857,7 +2842,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
*/
port_mask = be64_to_cpu(req->port_select_mask[3]);
port_num = find_first_bit((unsigned long *)&port_mask,
- sizeof(port_mask));
+ sizeof(port_mask) * 8);
if (port_num != port) {
pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
@@ -2883,14 +2868,17 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
rsp->uncorrectable_errors = tmp < 0x100 ? (tmp & 0xff) : 0xff;
-
+ rsp->port_rcv_errors =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL));
vlinfo = &rsp->vls[0];
vfi = 0;
vl_select_mask = be32_to_cpu(req->vl_select_mask);
for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
8 * sizeof(req->vl_select_mask)) {
memset(vlinfo, 0, sizeof(*vlinfo));
- /* vlinfo->vls[vfi].port_vl_xmit_discards ??? */
+ rsp->vls[vfi].port_vl_xmit_discards =
+ cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD_VL,
+ idx_from_vl(vl)));
vlinfo += 1;
vfi++;
}
@@ -3027,7 +3015,7 @@ static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp,
*/
port_mask = be64_to_cpu(req->port_select_mask[3]);
port_num = find_first_bit((unsigned long *)&port_mask,
- sizeof(port_mask));
+ sizeof(port_mask) * 8);
if (port_num != port) {
pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
@@ -3162,10 +3150,8 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
if (counter_select & CS_PORT_RCV_REMOTE_PHYSICAL_ERRORS)
write_dev_cntr(dd, C_DC_RMT_PHY_ERR, CNTR_INVALID_VL, 0);
- if (counter_select & CS_LOCAL_LINK_INTEGRITY_ERRORS) {
- write_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL, 0);
+ if (counter_select & CS_LOCAL_LINK_INTEGRITY_ERRORS)
write_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL, 0);
- }
if (counter_select & CS_LINK_ERROR_RECOVERY) {
write_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL, 0);
@@ -3223,7 +3209,9 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
/* if (counter_select & CS_PORT_MARK_FECN)
* write_csr(dd, DCC_PRF_PORT_VL_MARK_FECN_CNT + offset, 0);
*/
- /* port_vl_xmit_discards ??? */
+ if (counter_select & C_SW_XMIT_DSCD_VL)
+ write_port_cntr(ppd, C_SW_XMIT_DSCD_VL,
+ idx_from_vl(vl), 0);
}
if (resp_len)
@@ -3264,7 +3252,7 @@ static int pma_set_opa_errorinfo(struct opa_pma_mad *pmp,
*/
port_mask = be64_to_cpu(req->port_select_mask[3]);
port_num = find_first_bit((unsigned long *)&port_mask,
- sizeof(port_mask));
+ sizeof(port_mask) * 8);
if (port_num != port) {
pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
@@ -3392,7 +3380,7 @@ static void apply_cc_state(struct hfi1_pportdata *ppd)
*/
spin_lock(&ppd->cc_state_lock);
- old_cc_state = get_cc_state(ppd);
+ old_cc_state = get_cc_state_protected(ppd);
if (!old_cc_state) {
/* never active, or shutting down */
spin_unlock(&ppd->cc_state_lock);
@@ -3415,7 +3403,7 @@ static void apply_cc_state(struct hfi1_pportdata *ppd)
spin_unlock(&ppd->cc_state_lock);
- call_rcu(&old_cc_state->rcu, cc_state_reclaim);
+ kfree_rcu(old_cc_state, rcu);
}
static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
@@ -3570,13 +3558,6 @@ static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
return reply((struct ib_mad_hdr *)smp);
}
-void cc_state_reclaim(struct rcu_head *rcu)
-{
- struct cc_state *cc_state = container_of(rcu, struct cc_state, rcu);
-
- kfree(cc_state);
-}
-
static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
struct ib_device *ibdev, u8 port,
u32 *resp_len)
@@ -3960,7 +3941,6 @@ void clear_linkup_counters(struct hfi1_devdata *dd)
write_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL, 0);
write_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, CNTR_INVALID_VL, 0);
/* LocalLinkIntegrityErrors */
- write_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL, 0);
write_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL, 0);
/* ExcessiveBufferOverruns */
write_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL, 0);
diff --git a/drivers/infiniband/hw/hfi1/mad.h b/drivers/infiniband/hw/hfi1/mad.h
index 8b734aaae..5aa3fd1be 100644
--- a/drivers/infiniband/hw/hfi1/mad.h
+++ b/drivers/infiniband/hw/hfi1/mad.h
@@ -48,15 +48,8 @@
#define _HFI1_MAD_H
#include <rdma/ib_pma.h>
-#define USE_PI_LED_ENABLE 1 /*
- * use led enabled bit in struct
- * opa_port_states, if available
- */
#include <rdma/opa_smi.h>
#include <rdma/opa_port_info.h>
-#ifndef PI_LED_ENABLE_SUP
-#define PI_LED_ENABLE_SUP 0
-#endif
#include "opa_compat.h"
/*
diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
index b7a80aa1a..7ad30898f 100644
--- a/drivers/infiniband/hw/hfi1/mmu_rb.c
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
@@ -53,19 +53,20 @@
#include "trace.h"
struct mmu_rb_handler {
- struct list_head list;
struct mmu_notifier mn;
- struct rb_root *root;
+ struct rb_root root;
+ void *ops_arg;
spinlock_t lock; /* protect the RB tree */
struct mmu_rb_ops *ops;
+ struct mm_struct *mm;
+ struct list_head lru_list;
+ struct work_struct del_work;
+ struct list_head del_list;
+ struct workqueue_struct *wq;
};
-static LIST_HEAD(mmu_rb_handlers);
-static DEFINE_SPINLOCK(mmu_rb_lock); /* protect mmu_rb_handlers list */
-
static unsigned long mmu_node_start(struct mmu_rb_node *);
static unsigned long mmu_node_last(struct mmu_rb_node *);
-static struct mmu_rb_handler *find_mmu_handler(struct rb_root *);
static inline void mmu_notifier_page(struct mmu_notifier *, struct mm_struct *,
unsigned long);
static inline void mmu_notifier_range_start(struct mmu_notifier *,
@@ -76,6 +77,9 @@ static void mmu_notifier_mem_invalidate(struct mmu_notifier *,
unsigned long, unsigned long);
static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *,
unsigned long, unsigned long);
+static void do_remove(struct mmu_rb_handler *handler,
+ struct list_head *del_list);
+static void handle_remove(struct work_struct *work);
static struct mmu_notifier_ops mn_opts = {
.invalidate_page = mmu_notifier_page,
@@ -95,73 +99,79 @@ static unsigned long mmu_node_last(struct mmu_rb_node *node)
return PAGE_ALIGN(node->addr + node->len) - 1;
}
-int hfi1_mmu_rb_register(struct rb_root *root, struct mmu_rb_ops *ops)
+int hfi1_mmu_rb_register(void *ops_arg, struct mm_struct *mm,
+ struct mmu_rb_ops *ops,
+ struct workqueue_struct *wq,
+ struct mmu_rb_handler **handler)
{
struct mmu_rb_handler *handlr;
-
- if (!ops->invalidate)
- return -EINVAL;
+ int ret;
handlr = kmalloc(sizeof(*handlr), GFP_KERNEL);
if (!handlr)
return -ENOMEM;
- handlr->root = root;
+ handlr->root = RB_ROOT;
handlr->ops = ops;
+ handlr->ops_arg = ops_arg;
INIT_HLIST_NODE(&handlr->mn.hlist);
spin_lock_init(&handlr->lock);
handlr->mn.ops = &mn_opts;
- spin_lock(&mmu_rb_lock);
- list_add_tail_rcu(&handlr->list, &mmu_rb_handlers);
- spin_unlock(&mmu_rb_lock);
+ handlr->mm = mm;
+ INIT_WORK(&handlr->del_work, handle_remove);
+ INIT_LIST_HEAD(&handlr->del_list);
+ INIT_LIST_HEAD(&handlr->lru_list);
+ handlr->wq = wq;
+
+ ret = mmu_notifier_register(&handlr->mn, handlr->mm);
+ if (ret) {
+ kfree(handlr);
+ return ret;
+ }
- return mmu_notifier_register(&handlr->mn, current->mm);
+ *handler = handlr;
+ return 0;
}
-void hfi1_mmu_rb_unregister(struct rb_root *root)
+void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
{
- struct mmu_rb_handler *handler = find_mmu_handler(root);
+ struct mmu_rb_node *rbnode;
+ struct rb_node *node;
unsigned long flags;
-
- if (!handler)
- return;
+ struct list_head del_list;
/* Unregister first so we don't get any more notifications. */
- if (current->mm)
- mmu_notifier_unregister(&handler->mn, current->mm);
+ mmu_notifier_unregister(&handler->mn, handler->mm);
- spin_lock(&mmu_rb_lock);
- list_del_rcu(&handler->list);
- spin_unlock(&mmu_rb_lock);
- synchronize_rcu();
+ /*
+ * Make sure the wq delete handler is finished running. It will not
+ * be triggered once the mmu notifiers are unregistered above.
+ */
+ flush_work(&handler->del_work);
+
+ INIT_LIST_HEAD(&del_list);
spin_lock_irqsave(&handler->lock, flags);
- if (!RB_EMPTY_ROOT(root)) {
- struct rb_node *node;
- struct mmu_rb_node *rbnode;
-
- while ((node = rb_first(root))) {
- rbnode = rb_entry(node, struct mmu_rb_node, node);
- rb_erase(node, root);
- if (handler->ops->remove)
- handler->ops->remove(root, rbnode, NULL);
- }
+ while ((node = rb_first(&handler->root))) {
+ rbnode = rb_entry(node, struct mmu_rb_node, node);
+ rb_erase(node, &handler->root);
+ /* move from LRU list to delete list */
+ list_move(&rbnode->list, &del_list);
}
spin_unlock_irqrestore(&handler->lock, flags);
+ do_remove(handler, &del_list);
+
kfree(handler);
}
-int hfi1_mmu_rb_insert(struct rb_root *root, struct mmu_rb_node *mnode)
+int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
+ struct mmu_rb_node *mnode)
{
- struct mmu_rb_handler *handler = find_mmu_handler(root);
struct mmu_rb_node *node;
unsigned long flags;
int ret = 0;
- if (!handler)
- return -EINVAL;
-
spin_lock_irqsave(&handler->lock, flags);
hfi1_cdbg(MMU, "Inserting node addr 0x%llx, len %u", mnode->addr,
mnode->len);
@@ -170,12 +180,13 @@ int hfi1_mmu_rb_insert(struct rb_root *root, struct mmu_rb_node *mnode)
ret = -EINVAL;
goto unlock;
}
- __mmu_int_rb_insert(mnode, root);
+ __mmu_int_rb_insert(mnode, &handler->root);
+ list_add(&mnode->list, &handler->lru_list);
- if (handler->ops->insert) {
- ret = handler->ops->insert(root, mnode);
- if (ret)
- __mmu_int_rb_remove(mnode, root);
+ ret = handler->ops->insert(handler->ops_arg, mnode);
+ if (ret) {
+ __mmu_int_rb_remove(mnode, &handler->root);
+ list_del(&mnode->list); /* remove from LRU list */
}
unlock:
spin_unlock_irqrestore(&handler->lock, flags);
@@ -191,10 +202,10 @@ static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
hfi1_cdbg(MMU, "Searching for addr 0x%llx, len %u", addr, len);
if (!handler->ops->filter) {
- node = __mmu_int_rb_iter_first(handler->root, addr,
+ node = __mmu_int_rb_iter_first(&handler->root, addr,
(addr + len) - 1);
} else {
- for (node = __mmu_int_rb_iter_first(handler->root, addr,
+ for (node = __mmu_int_rb_iter_first(&handler->root, addr,
(addr + len) - 1);
node;
node = __mmu_int_rb_iter_next(node, addr,
@@ -206,82 +217,72 @@ static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
return node;
}
-/* Caller must *not* hold handler lock. */
-static void __mmu_rb_remove(struct mmu_rb_handler *handler,
- struct mmu_rb_node *node, struct mm_struct *mm)
-{
- unsigned long flags;
-
- /* Validity of handler and node pointers has been checked by caller. */
- hfi1_cdbg(MMU, "Removing node addr 0x%llx, len %u", node->addr,
- node->len);
- spin_lock_irqsave(&handler->lock, flags);
- __mmu_int_rb_remove(node, handler->root);
- spin_unlock_irqrestore(&handler->lock, flags);
-
- if (handler->ops->remove)
- handler->ops->remove(handler->root, node, mm);
-}
-
-struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *root, unsigned long addr,
- unsigned long len)
+struct mmu_rb_node *hfi1_mmu_rb_extract(struct mmu_rb_handler *handler,
+ unsigned long addr, unsigned long len)
{
- struct mmu_rb_handler *handler = find_mmu_handler(root);
struct mmu_rb_node *node;
unsigned long flags;
- if (!handler)
- return ERR_PTR(-EINVAL);
-
spin_lock_irqsave(&handler->lock, flags);
node = __mmu_rb_search(handler, addr, len);
+ if (node) {
+ __mmu_int_rb_remove(node, &handler->root);
+ list_del(&node->list); /* remove from LRU list */
+ }
spin_unlock_irqrestore(&handler->lock, flags);
return node;
}
-struct mmu_rb_node *hfi1_mmu_rb_extract(struct rb_root *root,
- unsigned long addr, unsigned long len)
+void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
{
- struct mmu_rb_handler *handler = find_mmu_handler(root);
- struct mmu_rb_node *node;
+ struct mmu_rb_node *rbnode, *ptr;
+ struct list_head del_list;
unsigned long flags;
+ bool stop = false;
- if (!handler)
- return ERR_PTR(-EINVAL);
+ INIT_LIST_HEAD(&del_list);
spin_lock_irqsave(&handler->lock, flags);
- node = __mmu_rb_search(handler, addr, len);
- if (node)
- __mmu_int_rb_remove(node, handler->root);
+ list_for_each_entry_safe_reverse(rbnode, ptr, &handler->lru_list,
+ list) {
+ if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg,
+ &stop)) {
+ __mmu_int_rb_remove(rbnode, &handler->root);
+ /* move from LRU list to delete list */
+ list_move(&rbnode->list, &del_list);
+ }
+ if (stop)
+ break;
+ }
spin_unlock_irqrestore(&handler->lock, flags);
- return node;
+ while (!list_empty(&del_list)) {
+ rbnode = list_first_entry(&del_list, struct mmu_rb_node, list);
+ list_del(&rbnode->list);
+ handler->ops->remove(handler->ops_arg, rbnode);
+ }
}
-void hfi1_mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node)
+/*
+ * It is up to the caller to ensure that this function does not race with the
+ * mmu invalidate notifier which may be calling the users remove callback on
+ * 'node'.
+ */
+void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
+ struct mmu_rb_node *node)
{
- struct mmu_rb_handler *handler = find_mmu_handler(root);
-
- if (!handler || !node)
- return;
-
- __mmu_rb_remove(handler, node, NULL);
-}
+ unsigned long flags;
-static struct mmu_rb_handler *find_mmu_handler(struct rb_root *root)
-{
- struct mmu_rb_handler *handler;
+ /* Validity of handler and node pointers has been checked by caller. */
+ hfi1_cdbg(MMU, "Removing node addr 0x%llx, len %u", node->addr,
+ node->len);
+ spin_lock_irqsave(&handler->lock, flags);
+ __mmu_int_rb_remove(node, &handler->root);
+ list_del(&node->list); /* remove from LRU list */
+ spin_unlock_irqrestore(&handler->lock, flags);
- rcu_read_lock();
- list_for_each_entry_rcu(handler, &mmu_rb_handlers, list) {
- if (handler->root == root)
- goto unlock;
- }
- handler = NULL;
-unlock:
- rcu_read_unlock();
- return handler;
+ handler->ops->remove(handler->ops_arg, node);
}
static inline void mmu_notifier_page(struct mmu_notifier *mn,
@@ -304,9 +305,10 @@ static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn,
{
struct mmu_rb_handler *handler =
container_of(mn, struct mmu_rb_handler, mn);
- struct rb_root *root = handler->root;
+ struct rb_root *root = &handler->root;
struct mmu_rb_node *node, *ptr = NULL;
unsigned long flags;
+ bool added = false;
spin_lock_irqsave(&handler->lock, flags);
for (node = __mmu_int_rb_iter_first(root, start, end - 1);
@@ -315,11 +317,53 @@ static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn,
ptr = __mmu_int_rb_iter_next(node, start, end - 1);
hfi1_cdbg(MMU, "Invalidating node addr 0x%llx, len %u",
node->addr, node->len);
- if (handler->ops->invalidate(root, node)) {
+ if (handler->ops->invalidate(handler->ops_arg, node)) {
__mmu_int_rb_remove(node, root);
- if (handler->ops->remove)
- handler->ops->remove(root, node, mm);
+ /* move from LRU list to delete list */
+ list_move(&node->list, &handler->del_list);
+ added = true;
}
}
spin_unlock_irqrestore(&handler->lock, flags);
+
+ if (added)
+ queue_work(handler->wq, &handler->del_work);
+}
+
+/*
+ * Call the remove function for the given handler and the list. This
+ * is expected to be called with a delete list extracted from handler.
+ * The caller should not be holding the handler lock.
+ */
+static void do_remove(struct mmu_rb_handler *handler,
+ struct list_head *del_list)
+{
+ struct mmu_rb_node *node;
+
+ while (!list_empty(del_list)) {
+ node = list_first_entry(del_list, struct mmu_rb_node, list);
+ list_del(&node->list);
+ handler->ops->remove(handler->ops_arg, node);
+ }
+}
+
+/*
+ * Work queue function to remove all nodes that have been queued up to
+ * be removed. The key feature is that mm->mmap_sem is not being held
+ * and the remove callback can sleep while taking it, if needed.
+ */
+static void handle_remove(struct work_struct *work)
+{
+ struct mmu_rb_handler *handler = container_of(work,
+ struct mmu_rb_handler,
+ del_work);
+ struct list_head del_list;
+ unsigned long flags;
+
+ /* remove anything that is queued to get removed */
+ spin_lock_irqsave(&handler->lock, flags);
+ list_replace_init(&handler->del_list, &del_list);
+ spin_unlock_irqrestore(&handler->lock, flags);
+
+ do_remove(handler, &del_list);
}
diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.h b/drivers/infiniband/hw/hfi1/mmu_rb.h
index 7a57b9c49..754f6ebf1 100644
--- a/drivers/infiniband/hw/hfi1/mmu_rb.h
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.h
@@ -54,23 +54,34 @@ struct mmu_rb_node {
unsigned long len;
unsigned long __last;
struct rb_node node;
+ struct list_head list;
};
+/*
+ * NOTE: filter, insert, invalidate, and evict must not sleep. Only remove is
+ * allowed to sleep.
+ */
struct mmu_rb_ops {
- bool (*filter)(struct mmu_rb_node *, unsigned long, unsigned long);
- int (*insert)(struct rb_root *, struct mmu_rb_node *);
- void (*remove)(struct rb_root *, struct mmu_rb_node *,
- struct mm_struct *);
- int (*invalidate)(struct rb_root *, struct mmu_rb_node *);
+ bool (*filter)(struct mmu_rb_node *node, unsigned long addr,
+ unsigned long len);
+ int (*insert)(void *ops_arg, struct mmu_rb_node *mnode);
+ void (*remove)(void *ops_arg, struct mmu_rb_node *mnode);
+ int (*invalidate)(void *ops_arg, struct mmu_rb_node *node);
+ int (*evict)(void *ops_arg, struct mmu_rb_node *mnode,
+ void *evict_arg, bool *stop);
};
-int hfi1_mmu_rb_register(struct rb_root *root, struct mmu_rb_ops *ops);
-void hfi1_mmu_rb_unregister(struct rb_root *);
-int hfi1_mmu_rb_insert(struct rb_root *, struct mmu_rb_node *);
-void hfi1_mmu_rb_remove(struct rb_root *, struct mmu_rb_node *);
-struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *, unsigned long,
- unsigned long);
-struct mmu_rb_node *hfi1_mmu_rb_extract(struct rb_root *, unsigned long,
- unsigned long);
+int hfi1_mmu_rb_register(void *ops_arg, struct mm_struct *mm,
+ struct mmu_rb_ops *ops,
+ struct workqueue_struct *wq,
+ struct mmu_rb_handler **handler);
+void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler);
+int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
+ struct mmu_rb_node *mnode);
+void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg);
+void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
+ struct mmu_rb_node *mnode);
+struct mmu_rb_node *hfi1_mmu_rb_extract(struct mmu_rb_handler *handler,
+ unsigned long addr, unsigned long len);
#endif /* _HFI1_MMU_RB_H */
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 0bac21e6a..89c68da1c 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -679,6 +679,10 @@ static uint pcie_pset = UNSET_PSET;
module_param(pcie_pset, uint, S_IRUGO);
MODULE_PARM_DESC(pcie_pset, "PCIe Eq Pset value to use, range is 0-10");
+static uint pcie_ctle = 1; /* discrete on, integrated off */
+module_param(pcie_ctle, uint, S_IRUGO);
+MODULE_PARM_DESC(pcie_ctle, "PCIe static CTLE mode, bit 0 - discrete on/off, bit 1 - integrated on/off");
+
/* equalization columns */
#define PREC 0
#define ATTN 1
@@ -716,6 +720,36 @@ static const u8 integrated_preliminary_eq[11][3] = {
{ 0x00, 0x1e, 0x0a }, /* p10 */
};
+static const u8 discrete_ctle_tunings[11][4] = {
+ /* DC LF HF BW */
+ { 0x48, 0x0b, 0x04, 0x04 }, /* p0 */
+ { 0x60, 0x05, 0x0f, 0x0a }, /* p1 */
+ { 0x50, 0x09, 0x06, 0x06 }, /* p2 */
+ { 0x68, 0x05, 0x0f, 0x0a }, /* p3 */
+ { 0x80, 0x05, 0x0f, 0x0a }, /* p4 */
+ { 0x70, 0x05, 0x0f, 0x0a }, /* p5 */
+ { 0x68, 0x05, 0x0f, 0x0a }, /* p6 */
+ { 0x38, 0x0f, 0x00, 0x00 }, /* p7 */
+ { 0x48, 0x09, 0x06, 0x06 }, /* p8 */
+ { 0x60, 0x05, 0x0f, 0x0a }, /* p9 */
+ { 0x38, 0x0f, 0x00, 0x00 }, /* p10 */
+};
+
+static const u8 integrated_ctle_tunings[11][4] = {
+ /* DC LF HF BW */
+ { 0x38, 0x0f, 0x00, 0x00 }, /* p0 */
+ { 0x38, 0x0f, 0x00, 0x00 }, /* p1 */
+ { 0x38, 0x0f, 0x00, 0x00 }, /* p2 */
+ { 0x38, 0x0f, 0x00, 0x00 }, /* p3 */
+ { 0x58, 0x0a, 0x05, 0x05 }, /* p4 */
+ { 0x48, 0x0a, 0x05, 0x05 }, /* p5 */
+ { 0x40, 0x0a, 0x05, 0x05 }, /* p6 */
+ { 0x38, 0x0f, 0x00, 0x00 }, /* p7 */
+ { 0x38, 0x0f, 0x00, 0x00 }, /* p8 */
+ { 0x38, 0x09, 0x06, 0x06 }, /* p9 */
+ { 0x38, 0x0e, 0x01, 0x01 }, /* p10 */
+};
+
/* helper to format the value to write to hardware */
#define eq_value(pre, curr, post) \
((((u32)(pre)) << \
@@ -951,11 +985,14 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd)
u32 status, err;
int ret;
int do_retry, retry_count = 0;
+ int intnum = 0;
uint default_pset;
u16 target_vector, target_speed;
u16 lnkctl2, vendor;
u8 div;
const u8 (*eq)[3];
+ const u8 (*ctle_tunings)[4];
+ uint static_ctle_mode;
int return_error = 0;
/* PCIe Gen3 is for the ASIC only */
@@ -1089,6 +1126,9 @@ retry:
div = 3;
eq = discrete_preliminary_eq;
default_pset = DEFAULT_DISCRETE_PSET;
+ ctle_tunings = discrete_ctle_tunings;
+ /* bit 0 - discrete on/off */
+ static_ctle_mode = pcie_ctle & 0x1;
} else {
/* 400mV, FS=29, LF = 9 */
fs = 29;
@@ -1096,6 +1136,9 @@ retry:
div = 1;
eq = integrated_preliminary_eq;
default_pset = DEFAULT_MCP_PSET;
+ ctle_tunings = integrated_ctle_tunings;
+ /* bit 1 - integrated on/off */
+ static_ctle_mode = (pcie_ctle >> 1) & 0x1;
}
pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL101,
(fs <<
@@ -1135,16 +1178,33 @@ retry:
* step 5c: Program gasket interrupts
*/
/* set the Rx Bit Rate to REFCLK ratio */
- write_gasket_interrupt(dd, 0, 0x0006, 0x0050);
+ write_gasket_interrupt(dd, intnum++, 0x0006, 0x0050);
/* disable pCal for PCIe Gen3 RX equalization */
- write_gasket_interrupt(dd, 1, 0x0026, 0x5b01);
+ /* select adaptive or static CTLE */
+ write_gasket_interrupt(dd, intnum++, 0x0026,
+ 0x5b01 | (static_ctle_mode << 3));
/*
* Enable iCal for PCIe Gen3 RX equalization, and set which
* evaluation of RX_EQ_EVAL will launch the iCal procedure.
*/
- write_gasket_interrupt(dd, 2, 0x0026, 0x5202);
+ write_gasket_interrupt(dd, intnum++, 0x0026, 0x5202);
+
+ if (static_ctle_mode) {
+ /* apply static CTLE tunings */
+ u8 pcie_dc, pcie_lf, pcie_hf, pcie_bw;
+
+ pcie_dc = ctle_tunings[pcie_pset][0];
+ pcie_lf = ctle_tunings[pcie_pset][1];
+ pcie_hf = ctle_tunings[pcie_pset][2];
+ pcie_bw = ctle_tunings[pcie_pset][3];
+ write_gasket_interrupt(dd, intnum++, 0x0026, 0x0200 | pcie_dc);
+ write_gasket_interrupt(dd, intnum++, 0x0026, 0x0100 | pcie_lf);
+ write_gasket_interrupt(dd, intnum++, 0x0026, 0x0000 | pcie_hf);
+ write_gasket_interrupt(dd, intnum++, 0x0026, 0x5500 | pcie_bw);
+ }
+
/* terminate list */
- write_gasket_interrupt(dd, 3, 0x0000, 0x0000);
+ write_gasket_interrupt(dd, intnum++, 0x0000, 0x0000);
/*
* step 5d: program XMT margin
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index d4022450b..ac1bf4a73 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -1952,13 +1952,17 @@ int init_pervl_scs(struct hfi1_devdata *dd)
dd->vld[15].sc = sc_alloc(dd, SC_VL15,
dd->rcd[0]->rcvhdrqentsize, dd->node);
if (!dd->vld[15].sc)
- goto nomem;
+ return -ENOMEM;
+
hfi1_init_ctxt(dd->vld[15].sc);
dd->vld[15].mtu = enum_to_mtu(OPA_MTU_2048);
- dd->kernel_send_context = kmalloc_node(dd->num_send_contexts *
+ dd->kernel_send_context = kzalloc_node(dd->num_send_contexts *
sizeof(struct send_context *),
GFP_KERNEL, dd->node);
+ if (!dd->kernel_send_context)
+ goto freesc15;
+
dd->kernel_send_context[0] = dd->vld[15].sc;
for (i = 0; i < num_vls; i++) {
@@ -2010,12 +2014,21 @@ int init_pervl_scs(struct hfi1_devdata *dd)
if (pio_map_init(dd, ppd->port - 1, num_vls, NULL))
goto nomem;
return 0;
+
nomem:
- sc_free(dd->vld[15].sc);
- for (i = 0; i < num_vls; i++)
+ for (i = 0; i < num_vls; i++) {
sc_free(dd->vld[i].sc);
+ dd->vld[i].sc = NULL;
+ }
+
for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++)
sc_free(dd->kernel_send_context[i + 1]);
+
+ kfree(dd->kernel_send_context);
+ dd->kernel_send_context = NULL;
+
+freesc15:
+ sc_free(dd->vld[15].sc);
return -ENOMEM;
}
diff --git a/drivers/infiniband/hw/hfi1/pio_copy.c b/drivers/infiniband/hw/hfi1/pio_copy.c
index 8c25e1b58..3a1ef3056 100644
--- a/drivers/infiniband/hw/hfi1/pio_copy.c
+++ b/drivers/infiniband/hw/hfi1/pio_copy.c
@@ -771,6 +771,9 @@ void seg_pio_copy_mid(struct pio_buf *pbuf, const void *from, size_t nbytes)
read_extra_bytes(pbuf, from, to_fill);
from += to_fill;
nbytes -= to_fill;
+ /* may not be enough valid bytes left to align */
+ if (extra > nbytes)
+ extra = nbytes;
/* ...now write carry */
dest = pbuf->start + (pbuf->qw_written * sizeof(u64));
@@ -798,6 +801,15 @@ void seg_pio_copy_mid(struct pio_buf *pbuf, const void *from, size_t nbytes)
read_low_bytes(pbuf, from, extra);
from += extra;
nbytes -= extra;
+ /*
+ * If no bytes are left, return early - we are done.
+ * NOTE: This short-circuit is *required* because
+ * "extra" may have been reduced in size and "from"
+ * is not aligned, as required when leaving this
+ * if block.
+ */
+ if (nbytes == 0)
+ return;
}
/* at this point, from is QW aligned */
diff --git a/drivers/infiniband/hw/hfi1/platform.c b/drivers/infiniband/hw/hfi1/platform.c
index 1d09f767b..965c8aef0 100644
--- a/drivers/infiniband/hw/hfi1/platform.c
+++ b/drivers/infiniband/hw/hfi1/platform.c
@@ -537,20 +537,6 @@ static void apply_tunings(
u8 precur = 0, attn = 0, postcur = 0, external_device_config = 0;
u8 *cache = ppd->qsfp_info.cache;
- /* Enable external device config if channel is limiting active */
- read_8051_config(ppd->dd, LINK_OPTIMIZATION_SETTINGS,
- GENERAL_CONFIG, &config_data);
- config_data &= ~(0xff << ENABLE_EXT_DEV_CONFIG_SHIFT);
- config_data |= ((u32)limiting_active << ENABLE_EXT_DEV_CONFIG_SHIFT);
- ret = load_8051_config(ppd->dd, LINK_OPTIMIZATION_SETTINGS,
- GENERAL_CONFIG, config_data);
- if (ret != HCMD_SUCCESS)
- dd_dev_err(
- ppd->dd,
- "%s: Failed to set enable external device config\n",
- __func__);
-
- config_data = 0; /* re-init */
/* Pass tuning method to 8051 */
read_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
&config_data);
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 995c89766..4e4d8317c 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -52,6 +52,7 @@
#include <linux/seq_file.h>
#include <rdma/rdma_vt.h>
#include <rdma/rdmavt_qp.h>
+#include <rdma/ib_verbs.h>
#include "hfi.h"
#include "qp.h"
@@ -115,6 +116,66 @@ static const u16 credit_table[31] = {
32768 /* 1E */
};
+const struct rvt_operation_params hfi1_post_parms[RVT_OPERATION_MAX] = {
+[IB_WR_RDMA_WRITE] = {
+ .length = sizeof(struct ib_rdma_wr),
+ .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
+},
+
+[IB_WR_RDMA_READ] = {
+ .length = sizeof(struct ib_rdma_wr),
+ .qpt_support = BIT(IB_QPT_RC),
+ .flags = RVT_OPERATION_ATOMIC,
+},
+
+[IB_WR_ATOMIC_CMP_AND_SWP] = {
+ .length = sizeof(struct ib_atomic_wr),
+ .qpt_support = BIT(IB_QPT_RC),
+ .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
+},
+
+[IB_WR_ATOMIC_FETCH_AND_ADD] = {
+ .length = sizeof(struct ib_atomic_wr),
+ .qpt_support = BIT(IB_QPT_RC),
+ .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
+},
+
+[IB_WR_RDMA_WRITE_WITH_IMM] = {
+ .length = sizeof(struct ib_rdma_wr),
+ .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
+},
+
+[IB_WR_SEND] = {
+ .length = sizeof(struct ib_send_wr),
+ .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
+ BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
+},
+
+[IB_WR_SEND_WITH_IMM] = {
+ .length = sizeof(struct ib_send_wr),
+ .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
+ BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
+},
+
+[IB_WR_REG_MR] = {
+ .length = sizeof(struct ib_reg_wr),
+ .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
+ .flags = RVT_OPERATION_LOCAL,
+},
+
+[IB_WR_LOCAL_INV] = {
+ .length = sizeof(struct ib_send_wr),
+ .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
+ .flags = RVT_OPERATION_LOCAL,
+},
+
+[IB_WR_SEND_WITH_INV] = {
+ .length = sizeof(struct ib_send_wr),
+ .qpt_support = BIT(IB_QPT_RC),
+},
+
+};
+
static void flush_tx_list(struct rvt_qp *qp)
{
struct hfi1_qp_priv *priv = qp->priv;
@@ -741,8 +802,9 @@ void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp,
priv->owner = qp;
- priv->s_hdr = kzalloc_node(sizeof(*priv->s_hdr), gfp, rdi->dparms.node);
- if (!priv->s_hdr) {
+ priv->s_ahg = kzalloc_node(sizeof(*priv->s_ahg), gfp,
+ rdi->dparms.node);
+ if (!priv->s_ahg) {
kfree(priv);
return ERR_PTR(-ENOMEM);
}
@@ -755,7 +817,7 @@ void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
{
struct hfi1_qp_priv *priv = qp->priv;
- kfree(priv->s_hdr);
+ kfree(priv->s_ahg);
kfree(priv);
}
diff --git a/drivers/infiniband/hw/hfi1/qp.h b/drivers/infiniband/hw/hfi1/qp.h
index e7bc8d6cf..587d84d65 100644
--- a/drivers/infiniband/hw/hfi1/qp.h
+++ b/drivers/infiniband/hw/hfi1/qp.h
@@ -54,6 +54,8 @@
extern unsigned int hfi1_qp_table_size;
+extern const struct rvt_operation_params hfi1_post_parms[];
+
/*
* free_ahg - clear ahg from QP
*/
@@ -61,7 +63,7 @@ static inline void clear_ahg(struct rvt_qp *qp)
{
struct hfi1_qp_priv *priv = qp->priv;
- priv->s_hdr->ahgcount = 0;
+ priv->s_ahg->ahgcount = 0;
qp->s_flags &= ~(RVT_S_AHG_VALID | RVT_S_AHG_CLEAR);
if (priv->s_sde && qp->s_ahgidx >= 0)
sdma_ahg_free(priv->s_sde, qp->s_ahgidx);
diff --git a/drivers/infiniband/hw/hfi1/qsfp.c b/drivers/infiniband/hw/hfi1/qsfp.c
index 9fb561682..4e95ad810 100644
--- a/drivers/infiniband/hw/hfi1/qsfp.c
+++ b/drivers/infiniband/hw/hfi1/qsfp.c
@@ -50,46 +50,285 @@
#include <linux/vmalloc.h>
#include "hfi.h"
-#include "twsi.h"
+
+/* for the given bus number, return the CSR for reading an i2c line */
+static inline u32 i2c_in_csr(u32 bus_num)
+{
+ return bus_num ? ASIC_QSFP2_IN : ASIC_QSFP1_IN;
+}
+
+/* for the given bus number, return the CSR for writing an i2c line */
+static inline u32 i2c_oe_csr(u32 bus_num)
+{
+ return bus_num ? ASIC_QSFP2_OE : ASIC_QSFP1_OE;
+}
+
+static void hfi1_setsda(void *data, int state)
+{
+ struct hfi1_i2c_bus *bus = (struct hfi1_i2c_bus *)data;
+ struct hfi1_devdata *dd = bus->controlling_dd;
+ u64 reg;
+ u32 target_oe;
+
+ target_oe = i2c_oe_csr(bus->num);
+ reg = read_csr(dd, target_oe);
+ /*
+ * The OE bit value is inverted and connected to the pin. When
+ * OE is 0 the pin is left to be pulled up, when the OE is 1
+ * the pin is driven low. This matches the "open drain" or "open
+ * collector" convention.
+ */
+ if (state)
+ reg &= ~QSFP_HFI0_I2CDAT;
+ else
+ reg |= QSFP_HFI0_I2CDAT;
+ write_csr(dd, target_oe, reg);
+ /* do a read to force the write into the chip */
+ (void)read_csr(dd, target_oe);
+}
+
+static void hfi1_setscl(void *data, int state)
+{
+ struct hfi1_i2c_bus *bus = (struct hfi1_i2c_bus *)data;
+ struct hfi1_devdata *dd = bus->controlling_dd;
+ u64 reg;
+ u32 target_oe;
+
+ target_oe = i2c_oe_csr(bus->num);
+ reg = read_csr(dd, target_oe);
+ /*
+ * The OE bit value is inverted and connected to the pin. When
+ * OE is 0 the pin is left to be pulled up, when the OE is 1
+ * the pin is driven low. This matches the "open drain" or "open
+ * collector" convention.
+ */
+ if (state)
+ reg &= ~QSFP_HFI0_I2CCLK;
+ else
+ reg |= QSFP_HFI0_I2CCLK;
+ write_csr(dd, target_oe, reg);
+ /* do a read to force the write into the chip */
+ (void)read_csr(dd, target_oe);
+}
+
+static int hfi1_getsda(void *data)
+{
+ struct hfi1_i2c_bus *bus = (struct hfi1_i2c_bus *)data;
+ u64 reg;
+ u32 target_in;
+
+ hfi1_setsda(data, 1); /* clear OE so we do not pull line down */
+ udelay(2); /* 1us pull up + 250ns hold */
+
+ target_in = i2c_in_csr(bus->num);
+ reg = read_csr(bus->controlling_dd, target_in);
+ return !!(reg & QSFP_HFI0_I2CDAT);
+}
+
+static int hfi1_getscl(void *data)
+{
+ struct hfi1_i2c_bus *bus = (struct hfi1_i2c_bus *)data;
+ u64 reg;
+ u32 target_in;
+
+ hfi1_setscl(data, 1); /* clear OE so we do not pull line down */
+ udelay(2); /* 1us pull up + 250ns hold */
+
+ target_in = i2c_in_csr(bus->num);
+ reg = read_csr(bus->controlling_dd, target_in);
+ return !!(reg & QSFP_HFI0_I2CCLK);
+}
/*
- * QSFP support for hfi driver, using "Two Wire Serial Interface" driver
- * in twsi.c
+ * Allocate and initialize the given i2c bus number.
+ * Returns NULL on failure.
*/
-#define I2C_MAX_RETRY 4
+static struct hfi1_i2c_bus *init_i2c_bus(struct hfi1_devdata *dd,
+ struct hfi1_asic_data *ad, int num)
+{
+ struct hfi1_i2c_bus *bus;
+ int ret;
+
+ bus = kzalloc(sizeof(*bus), GFP_KERNEL);
+ if (!bus)
+ return NULL;
+
+ bus->controlling_dd = dd;
+ bus->num = num; /* our bus number */
+
+ bus->algo.setsda = hfi1_setsda;
+ bus->algo.setscl = hfi1_setscl;
+ bus->algo.getsda = hfi1_getsda;
+ bus->algo.getscl = hfi1_getscl;
+ bus->algo.udelay = 5;
+ bus->algo.timeout = usecs_to_jiffies(50);
+ bus->algo.data = bus;
+
+ bus->adapter.owner = THIS_MODULE;
+ bus->adapter.algo_data = &bus->algo;
+ bus->adapter.dev.parent = &dd->pcidev->dev;
+ snprintf(bus->adapter.name, sizeof(bus->adapter.name),
+ "hfi1_i2c%d", num);
+
+ ret = i2c_bit_add_bus(&bus->adapter);
+ if (ret) {
+ dd_dev_info(dd, "%s: unable to add i2c bus %d, err %d\n",
+ __func__, num, ret);
+ kfree(bus);
+ return NULL;
+ }
+
+ return bus;
+}
/*
- * Raw i2c write. No set-up or lock checking.
+ * Initialize i2c buses.
+ * Return 0 on success, -errno on error.
*/
-static int __i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr,
- int offset, void *bp, int len)
+int set_up_i2c(struct hfi1_devdata *dd, struct hfi1_asic_data *ad)
{
- struct hfi1_devdata *dd = ppd->dd;
- int ret, cnt;
- u8 *buff = bp;
+ ad->i2c_bus0 = init_i2c_bus(dd, ad, 0);
+ ad->i2c_bus1 = init_i2c_bus(dd, ad, 1);
+ if (!ad->i2c_bus0 || !ad->i2c_bus1)
+ return -ENOMEM;
+ return 0;
+};
- cnt = 0;
- while (cnt < len) {
- int wlen = len - cnt;
+static void clean_i2c_bus(struct hfi1_i2c_bus *bus)
+{
+ if (bus) {
+ i2c_del_adapter(&bus->adapter);
+ kfree(bus);
+ }
+}
- ret = hfi1_twsi_blk_wr(dd, target, i2c_addr, offset,
- buff + cnt, wlen);
- if (ret) {
- /* hfi1_twsi_blk_wr() 1 for error, else 0 */
- return -EIO;
- }
- offset += wlen;
- cnt += wlen;
+void clean_up_i2c(struct hfi1_devdata *dd, struct hfi1_asic_data *ad)
+{
+ clean_i2c_bus(ad->i2c_bus0);
+ ad->i2c_bus0 = NULL;
+ clean_i2c_bus(ad->i2c_bus1);
+ ad->i2c_bus1 = NULL;
+}
+
+static int i2c_bus_write(struct hfi1_devdata *dd, struct hfi1_i2c_bus *i2c,
+ u8 slave_addr, int offset, int offset_size,
+ u8 *data, u16 len)
+{
+ int ret;
+ int num_msgs;
+ u8 offset_bytes[2];
+ struct i2c_msg msgs[2];
+
+ switch (offset_size) {
+ case 0:
+ num_msgs = 1;
+ msgs[0].addr = slave_addr;
+ msgs[0].flags = 0;
+ msgs[0].len = len;
+ msgs[0].buf = data;
+ break;
+ case 2:
+ offset_bytes[1] = (offset >> 8) & 0xff;
+ /* fall through */
+ case 1:
+ num_msgs = 2;
+ offset_bytes[0] = offset & 0xff;
+
+ msgs[0].addr = slave_addr;
+ msgs[0].flags = 0;
+ msgs[0].len = offset_size;
+ msgs[0].buf = offset_bytes;
+
+ msgs[1].addr = slave_addr;
+ msgs[1].flags = I2C_M_NOSTART,
+ msgs[1].len = len;
+ msgs[1].buf = data;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ i2c->controlling_dd = dd;
+ ret = i2c_transfer(&i2c->adapter, msgs, num_msgs);
+ if (ret != num_msgs) {
+ dd_dev_err(dd, "%s: bus %d, i2c slave 0x%x, offset 0x%x, len 0x%x; write failed, ret %d\n",
+ __func__, i2c->num, slave_addr, offset, len, ret);
+ return ret < 0 ? ret : -EIO;
+ }
+ return 0;
+}
+
+static int i2c_bus_read(struct hfi1_devdata *dd, struct hfi1_i2c_bus *bus,
+ u8 slave_addr, int offset, int offset_size,
+ u8 *data, u16 len)
+{
+ int ret;
+ int num_msgs;
+ u8 offset_bytes[2];
+ struct i2c_msg msgs[2];
+
+ switch (offset_size) {
+ case 0:
+ num_msgs = 1;
+ msgs[0].addr = slave_addr;
+ msgs[0].flags = I2C_M_RD;
+ msgs[0].len = len;
+ msgs[0].buf = data;
+ break;
+ case 2:
+ offset_bytes[1] = (offset >> 8) & 0xff;
+ /* fall through */
+ case 1:
+ num_msgs = 2;
+ offset_bytes[0] = offset & 0xff;
+
+ msgs[0].addr = slave_addr;
+ msgs[0].flags = 0;
+ msgs[0].len = offset_size;
+ msgs[0].buf = offset_bytes;
+
+ msgs[1].addr = slave_addr;
+ msgs[1].flags = I2C_M_RD,
+ msgs[1].len = len;
+ msgs[1].buf = data;
+ break;
+ default:
+ return -EINVAL;
}
- /* Must wait min 20us between qsfp i2c transactions */
- udelay(20);
+ bus->controlling_dd = dd;
+ ret = i2c_transfer(&bus->adapter, msgs, num_msgs);
+ if (ret != num_msgs) {
+ dd_dev_err(dd, "%s: bus %d, i2c slave 0x%x, offset 0x%x, len 0x%x; read failed, ret %d\n",
+ __func__, bus->num, slave_addr, offset, len, ret);
+ return ret < 0 ? ret : -EIO;
+ }
+ return 0;
+}
- return cnt;
+/*
+ * Raw i2c write. No set-up or lock checking.
+ *
+ * Return 0 on success, -errno on error.
+ */
+static int __i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr,
+ int offset, void *bp, int len)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ struct hfi1_i2c_bus *bus;
+ u8 slave_addr;
+ int offset_size;
+
+ bus = target ? dd->asic_data->i2c_bus1 : dd->asic_data->i2c_bus0;
+ slave_addr = (i2c_addr & 0xff) >> 1; /* convert to 7-bit addr */
+ offset_size = (i2c_addr >> 8) & 0x3;
+ return i2c_bus_write(dd, bus, slave_addr, offset, offset_size, bp, len);
}
/*
* Caller must hold the i2c chain resource.
+ *
+ * Return number of bytes written, or -errno.
*/
int i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset,
void *bp, int len)
@@ -99,63 +338,36 @@ int i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset,
if (!check_chip_resource(ppd->dd, i2c_target(target), __func__))
return -EACCES;
- /* make sure the TWSI bus is in a sane state */
- ret = hfi1_twsi_reset(ppd->dd, target);
- if (ret) {
- hfi1_dev_porterr(ppd->dd, ppd->port,
- "I2C chain %d write interface reset failed\n",
- target);
+ ret = __i2c_write(ppd, target, i2c_addr, offset, bp, len);
+ if (ret)
return ret;
- }
- return __i2c_write(ppd, target, i2c_addr, offset, bp, len);
+ return len;
}
/*
* Raw i2c read. No set-up or lock checking.
+ *
+ * Return 0 on success, -errno on error.
*/
static int __i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr,
int offset, void *bp, int len)
{
struct hfi1_devdata *dd = ppd->dd;
- int ret, cnt, pass = 0;
- int orig_offset = offset;
-
- cnt = 0;
- while (cnt < len) {
- int rlen = len - cnt;
-
- ret = hfi1_twsi_blk_rd(dd, target, i2c_addr, offset,
- bp + cnt, rlen);
- /* Some QSFP's fail first try. Retry as experiment */
- if (ret && cnt == 0 && ++pass < I2C_MAX_RETRY)
- continue;
- if (ret) {
- /* hfi1_twsi_blk_rd() 1 for error, else 0 */
- ret = -EIO;
- goto exit;
- }
- offset += rlen;
- cnt += rlen;
- }
-
- ret = cnt;
-
-exit:
- if (ret < 0) {
- hfi1_dev_porterr(dd, ppd->port,
- "I2C chain %d read failed, addr 0x%x, offset 0x%x, len %d\n",
- target, i2c_addr, orig_offset, len);
- }
-
- /* Must wait min 20us between qsfp i2c transactions */
- udelay(20);
-
- return ret;
+ struct hfi1_i2c_bus *bus;
+ u8 slave_addr;
+ int offset_size;
+
+ bus = target ? dd->asic_data->i2c_bus1 : dd->asic_data->i2c_bus0;
+ slave_addr = (i2c_addr & 0xff) >> 1; /* convert to 7-bit addr */
+ offset_size = (i2c_addr >> 8) & 0x3;
+ return i2c_bus_read(dd, bus, slave_addr, offset, offset_size, bp, len);
}
/*
* Caller must hold the i2c chain resource.
+ *
+ * Return number of bytes read, or -errno.
*/
int i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset,
void *bp, int len)
@@ -165,16 +377,11 @@ int i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset,
if (!check_chip_resource(ppd->dd, i2c_target(target), __func__))
return -EACCES;
- /* make sure the TWSI bus is in a sane state */
- ret = hfi1_twsi_reset(ppd->dd, target);
- if (ret) {
- hfi1_dev_porterr(ppd->dd, ppd->port,
- "I2C chain %d read interface reset failed\n",
- target);
+ ret = __i2c_read(ppd, target, i2c_addr, offset, bp, len);
+ if (ret)
return ret;
- }
- return __i2c_read(ppd, target, i2c_addr, offset, bp, len);
+ return len;
}
/*
@@ -182,6 +389,8 @@ int i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset,
* by writing @addr = ((256 * n) + m)
*
* Caller must hold the i2c chain resource.
+ *
+ * Return number of bytes written or -errno.
*/
int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
int len)
@@ -189,21 +398,12 @@ int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
int count = 0;
int offset;
int nwrite;
- int ret;
+ int ret = 0;
u8 page;
if (!check_chip_resource(ppd->dd, i2c_target(target), __func__))
return -EACCES;
- /* make sure the TWSI bus is in a sane state */
- ret = hfi1_twsi_reset(ppd->dd, target);
- if (ret) {
- hfi1_dev_porterr(ppd->dd, ppd->port,
- "QSFP chain %d write interface reset failed\n",
- target);
- return ret;
- }
-
while (count < len) {
/*
* Set the qsfp page based on a zero-based address
@@ -213,11 +413,12 @@ int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
ret = __i2c_write(ppd, target, QSFP_DEV | QSFP_OFFSET_SIZE,
QSFP_PAGE_SELECT_BYTE_OFFS, &page, 1);
- if (ret != 1) {
+ /* QSFPs require a 5-10msec delay after write operations */
+ mdelay(5);
+ if (ret) {
hfi1_dev_porterr(ppd->dd, ppd->port,
"QSFP chain %d can't write QSFP_PAGE_SELECT_BYTE: %d\n",
target, ret);
- ret = -EIO;
break;
}
@@ -229,11 +430,13 @@ int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
ret = __i2c_write(ppd, target, QSFP_DEV | QSFP_OFFSET_SIZE,
offset, bp + count, nwrite);
- if (ret <= 0) /* stop on error or nothing written */
+ /* QSFPs require a 5-10msec delay after write operations */
+ mdelay(5);
+ if (ret) /* stop on error */
break;
- count += ret;
- addr += ret;
+ count += nwrite;
+ addr += nwrite;
}
if (ret < 0)
@@ -243,7 +446,7 @@ int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
/*
* Perform a stand-alone single QSFP write. Acquire the resource, do the
- * read, then release the resource.
+ * write, then release the resource.
*/
int one_qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
int len)
@@ -266,6 +469,8 @@ int one_qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
* by reading @addr = ((256 * n) + m)
*
* Caller must hold the i2c chain resource.
+ *
+ * Return the number of bytes read or -errno.
*/
int qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
int len)
@@ -273,21 +478,12 @@ int qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
int count = 0;
int offset;
int nread;
- int ret;
+ int ret = 0;
u8 page;
if (!check_chip_resource(ppd->dd, i2c_target(target), __func__))
return -EACCES;
- /* make sure the TWSI bus is in a sane state */
- ret = hfi1_twsi_reset(ppd->dd, target);
- if (ret) {
- hfi1_dev_porterr(ppd->dd, ppd->port,
- "QSFP chain %d read interface reset failed\n",
- target);
- return ret;
- }
-
while (count < len) {
/*
* Set the qsfp page based on a zero-based address
@@ -296,11 +492,12 @@ int qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
page = (u8)(addr / QSFP_PAGESIZE);
ret = __i2c_write(ppd, target, QSFP_DEV | QSFP_OFFSET_SIZE,
QSFP_PAGE_SELECT_BYTE_OFFS, &page, 1);
- if (ret != 1) {
+ /* QSFPs require a 5-10msec delay after write operations */
+ mdelay(5);
+ if (ret) {
hfi1_dev_porterr(ppd->dd, ppd->port,
"QSFP chain %d can't write QSFP_PAGE_SELECT_BYTE: %d\n",
target, ret);
- ret = -EIO;
break;
}
@@ -310,15 +507,13 @@ int qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
if (((addr % QSFP_RW_BOUNDARY) + nread) > QSFP_RW_BOUNDARY)
nread = QSFP_RW_BOUNDARY - (addr % QSFP_RW_BOUNDARY);
- /* QSFPs require a 5-10msec delay after write operations */
- mdelay(5);
ret = __i2c_read(ppd, target, QSFP_DEV | QSFP_OFFSET_SIZE,
offset, bp + count, nread);
- if (ret <= 0) /* stop on error or nothing read */
+ if (ret) /* stop on error */
break;
- count += ret;
- addr += ret;
+ count += nread;
+ addr += nread;
}
if (ret < 0)
@@ -511,8 +706,8 @@ int get_cable_info(struct hfi1_devdata *dd, u32 port_num, u32 addr, u32 len,
u8 *data)
{
struct hfi1_pportdata *ppd;
- u32 excess_len = 0;
- int ret = 0;
+ u32 excess_len = len;
+ int ret = 0, offset = 0;
if (port_num > dd->num_pports || port_num < 1) {
dd_dev_info(dd, "%s: Invalid port number %d\n",
@@ -545,6 +740,34 @@ int get_cable_info(struct hfi1_devdata *dd, u32 port_num, u32 addr, u32 len,
}
memcpy(data, &ppd->qsfp_info.cache[addr], len);
+
+ if (addr <= QSFP_MONITOR_VAL_END &&
+ (addr + len) >= QSFP_MONITOR_VAL_START) {
+ /* Overlap with the dynamic channel monitor range */
+ if (addr < QSFP_MONITOR_VAL_START) {
+ if (addr + len <= QSFP_MONITOR_VAL_END)
+ len = addr + len - QSFP_MONITOR_VAL_START;
+ else
+ len = QSFP_MONITOR_RANGE;
+ offset = QSFP_MONITOR_VAL_START - addr;
+ addr = QSFP_MONITOR_VAL_START;
+ } else if (addr == QSFP_MONITOR_VAL_START) {
+ offset = 0;
+ if (addr + len > QSFP_MONITOR_VAL_END)
+ len = QSFP_MONITOR_RANGE;
+ } else {
+ offset = 0;
+ if (addr + len > QSFP_MONITOR_VAL_END)
+ len = QSFP_MONITOR_VAL_END - addr + 1;
+ }
+ /* Refresh the values of the dynamic monitors from the cable */
+ ret = one_qsfp_read(ppd, dd->hfi1_id, addr, data + offset, len);
+ if (ret != len) {
+ ret = -EAGAIN;
+ goto set_zeroes;
+ }
+ }
+
return 0;
set_zeroes:
diff --git a/drivers/infiniband/hw/hfi1/qsfp.h b/drivers/infiniband/hw/hfi1/qsfp.h
index dadc66c44..36cf52359 100644
--- a/drivers/infiniband/hw/hfi1/qsfp.h
+++ b/drivers/infiniband/hw/hfi1/qsfp.h
@@ -74,6 +74,9 @@
/* Defined fields that Intel requires of qualified cables */
/* Byte 0 is Identifier, not checked */
/* Byte 1 is reserved "status MSB" */
+#define QSFP_MONITOR_VAL_START 22
+#define QSFP_MONITOR_VAL_END 81
+#define QSFP_MONITOR_RANGE (QSFP_MONITOR_VAL_END - QSFP_MONITOR_VAL_START + 1)
#define QSFP_TX_CTRL_BYTE_OFFS 86
#define QSFP_PWR_CTRL_BYTE_OFFS 93
#define QSFP_CDR_CTRL_BYTE_OFFS 98
@@ -238,3 +241,6 @@ int one_qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
int len);
int one_qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
int len);
+struct hfi1_asic_data;
+int set_up_i2c(struct hfi1_devdata *dd, struct hfi1_asic_data *ad);
+void clean_up_i2c(struct hfi1_devdata *dd, struct hfi1_asic_data *ad);
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 792f15eb8..5da190e60 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -477,6 +477,37 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
qp->s_flags |= RVT_S_WAIT_FENCE;
goto bail;
}
+ /*
+ * Local operations are processed immediately
+ * after all prior requests have completed
+ */
+ if (wqe->wr.opcode == IB_WR_REG_MR ||
+ wqe->wr.opcode == IB_WR_LOCAL_INV) {
+ int local_ops = 0;
+ int err = 0;
+
+ if (qp->s_last != qp->s_cur)
+ goto bail;
+ if (++qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ if (++qp->s_tail == qp->s_size)
+ qp->s_tail = 0;
+ if (!(wqe->wr.send_flags &
+ RVT_SEND_COMPLETION_ONLY)) {
+ err = rvt_invalidate_rkey(
+ qp,
+ wqe->wr.ex.invalidate_rkey);
+ local_ops = 1;
+ }
+ hfi1_send_complete(qp, wqe,
+ err ? IB_WC_LOC_PROT_ERR
+ : IB_WC_SUCCESS);
+ if (local_ops)
+ atomic_dec(&qp->local_ops_pending);
+ qp->s_hdrwords = 0;
+ goto done_free_tx;
+ }
+
newreq = 1;
qp->s_psn = wqe->psn;
}
@@ -491,6 +522,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
switch (wqe->wr.opcode) {
case IB_WR_SEND:
case IB_WR_SEND_WITH_IMM:
+ case IB_WR_SEND_WITH_INV:
/* If no credit, return. */
if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
@@ -504,11 +536,17 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
}
if (wqe->wr.opcode == IB_WR_SEND) {
qp->s_state = OP(SEND_ONLY);
- } else {
+ } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
/* Immediate data comes after the BTH */
ohdr->u.imm_data = wqe->wr.ex.imm_data;
hwords += 1;
+ } else {
+ qp->s_state = OP(SEND_ONLY_WITH_INVALIDATE);
+ /* Invalidate rkey comes after the BTH */
+ ohdr->u.ieth = cpu_to_be32(
+ wqe->wr.ex.invalidate_rkey);
+ hwords += 1;
}
if (wqe->wr.send_flags & IB_SEND_SOLICITED)
bth0 |= IB_BTH_SOLICITED;
@@ -671,11 +709,16 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
}
if (wqe->wr.opcode == IB_WR_SEND) {
qp->s_state = OP(SEND_LAST);
- } else {
+ } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
/* Immediate data comes after the BTH */
ohdr->u.imm_data = wqe->wr.ex.imm_data;
hwords += 1;
+ } else {
+ qp->s_state = OP(SEND_LAST_WITH_INVALIDATE);
+ /* invalidate data comes after the BTH */
+ ohdr->u.ieth = cpu_to_be32(wqe->wr.ex.invalidate_rkey);
+ hwords += 1;
}
if (wqe->wr.send_flags & IB_SEND_SOLICITED)
bth0 |= IB_BTH_SOLICITED;
@@ -1047,7 +1090,7 @@ void hfi1_rc_timeout(unsigned long arg)
ibp->rvp.n_rc_timeouts++;
qp->s_flags &= ~RVT_S_TIMER;
del_timer(&qp->s_timer);
- trace_hfi1_rc_timeout(qp, qp->s_last_psn + 1);
+ trace_hfi1_timeout(qp, qp->s_last_psn + 1);
restart_rc(qp, qp->s_last_psn + 1, 1);
hfi1_schedule_send(qp);
}
@@ -1171,7 +1214,7 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_ib_header *hdr)
* If we were waiting for sends to complete before re-sending,
* and they are now complete, restart sending.
*/
- trace_hfi1_rc_sendcomplete(qp, psn);
+ trace_hfi1_sendcomplete(qp, psn);
if (qp->s_flags & RVT_S_WAIT_PSN &&
cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
qp->s_flags &= ~RVT_S_WAIT_PSN;
@@ -1567,7 +1610,7 @@ static void rc_rcv_resp(struct hfi1_ibport *ibp,
spin_lock_irqsave(&qp->s_lock, flags);
- trace_hfi1_rc_ack(qp, psn);
+ trace_hfi1_ack(qp, psn);
/* Ignore invalid responses. */
smp_read_barrier_depends(); /* see post_one_send */
@@ -1782,7 +1825,7 @@ static noinline int rc_rcv_error(struct hfi1_other_headers *ohdr, void *data,
u8 i, prev;
int old_req;
- trace_hfi1_rc_rcv_error(qp, psn);
+ trace_hfi1_rcv_error(qp, psn);
if (diff > 0) {
/*
* Packet sequence error.
@@ -2086,7 +2129,6 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
u32 tlen = packet->tlen;
struct rvt_qp *qp = packet->qp;
struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
struct hfi1_other_headers *ohdr = packet->ohdr;
u32 bth0, opcode;
u32 hdrsize = packet->hlen;
@@ -2097,30 +2139,15 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
int diff;
struct ib_reth *reth;
unsigned long flags;
- u32 bth1;
int ret, is_fecn = 0;
int copy_last = 0;
+ u32 rkey;
bth0 = be32_to_cpu(ohdr->bth[0]);
if (hfi1_ruc_check_hdr(ibp, hdr, rcv_flags & HFI1_HAS_GRH, qp, bth0))
return;
- bth1 = be32_to_cpu(ohdr->bth[1]);
- if (unlikely(bth1 & (HFI1_BECN_SMASK | HFI1_FECN_SMASK))) {
- if (bth1 & HFI1_BECN_SMASK) {
- u16 rlid = qp->remote_ah_attr.dlid;
- u32 lqpn, rqpn;
-
- lqpn = qp->ibqp.qp_num;
- rqpn = qp->remote_qpn;
- process_becn(
- ppd,
- qp->remote_ah_attr.sl,
- rlid, lqpn, rqpn,
- IB_CC_SVCTYPE_RC);
- }
- is_fecn = bth1 & HFI1_FECN_SMASK;
- }
+ is_fecn = process_ecn(qp, packet, false);
psn = be32_to_cpu(ohdr->bth[2]);
opcode = (bth0 >> 24) & 0xff;
@@ -2154,7 +2181,8 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
case OP(SEND_MIDDLE):
if (opcode == OP(SEND_MIDDLE) ||
opcode == OP(SEND_LAST) ||
- opcode == OP(SEND_LAST_WITH_IMMEDIATE))
+ opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
+ opcode == OP(SEND_LAST_WITH_INVALIDATE))
break;
goto nack_inv;
@@ -2170,6 +2198,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
if (opcode == OP(SEND_MIDDLE) ||
opcode == OP(SEND_LAST) ||
opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
+ opcode == OP(SEND_LAST_WITH_INVALIDATE) ||
opcode == OP(RDMA_WRITE_MIDDLE) ||
opcode == OP(RDMA_WRITE_LAST) ||
opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
@@ -2218,6 +2247,7 @@ send_middle:
case OP(SEND_ONLY):
case OP(SEND_ONLY_WITH_IMMEDIATE):
+ case OP(SEND_ONLY_WITH_INVALIDATE):
ret = hfi1_rvt_get_rwqe(qp, 0);
if (ret < 0)
goto nack_op_err;
@@ -2226,12 +2256,22 @@ send_middle:
qp->r_rcv_len = 0;
if (opcode == OP(SEND_ONLY))
goto no_immediate_data;
+ if (opcode == OP(SEND_ONLY_WITH_INVALIDATE))
+ goto send_last_inv;
/* FALLTHROUGH for SEND_ONLY_WITH_IMMEDIATE */
case OP(SEND_LAST_WITH_IMMEDIATE):
send_last_imm:
wc.ex.imm_data = ohdr->u.imm_data;
wc.wc_flags = IB_WC_WITH_IMM;
goto send_last;
+ case OP(SEND_LAST_WITH_INVALIDATE):
+send_last_inv:
+ rkey = be32_to_cpu(ohdr->u.ieth);
+ if (rvt_invalidate_rkey(qp, rkey))
+ goto no_immediate_data;
+ wc.ex.invalidate_rkey = rkey;
+ wc.wc_flags = IB_WC_WITH_INVALIDATE;
+ goto send_last;
case OP(RDMA_WRITE_LAST):
copy_last = ibpd_to_rvtpd(qp->ibqp.pd)->user;
/* fall through */
diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
index a659aec3c..48d5094f9 100644
--- a/drivers/infiniband/hw/hfi1/ruc.c
+++ b/drivers/infiniband/hw/hfi1/ruc.c
@@ -372,6 +372,7 @@ static void ruc_loopback(struct rvt_qp *sqp)
int ret;
int copy_last = 0;
u32 to;
+ int local_ops = 0;
rcu_read_lock();
@@ -440,11 +441,31 @@ again:
sqp->s_sge.num_sge = wqe->wr.num_sge;
sqp->s_len = wqe->length;
switch (wqe->wr.opcode) {
+ case IB_WR_REG_MR:
+ goto send_comp;
+
+ case IB_WR_LOCAL_INV:
+ if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) {
+ if (rvt_invalidate_rkey(sqp,
+ wqe->wr.ex.invalidate_rkey))
+ send_status = IB_WC_LOC_PROT_ERR;
+ local_ops = 1;
+ }
+ goto send_comp;
+
+ case IB_WR_SEND_WITH_INV:
+ if (!rvt_invalidate_rkey(qp, wqe->wr.ex.invalidate_rkey)) {
+ wc.wc_flags = IB_WC_WITH_INVALIDATE;
+ wc.ex.invalidate_rkey = wqe->wr.ex.invalidate_rkey;
+ }
+ goto send;
+
case IB_WR_SEND_WITH_IMM:
wc.wc_flags = IB_WC_WITH_IMM;
wc.ex.imm_data = wqe->wr.ex.imm_data;
/* FALLTHROUGH */
case IB_WR_SEND:
+send:
ret = hfi1_rvt_get_rwqe(qp, 0);
if (ret < 0)
goto op_err;
@@ -583,6 +604,10 @@ send_comp:
flush_send:
sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
hfi1_send_complete(sqp, wqe, send_status);
+ if (local_ops) {
+ atomic_dec(&sqp->local_ops_pending);
+ local_ops = 0;
+ }
goto again;
rnr_nak:
@@ -683,10 +708,10 @@ u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr,
return sizeof(struct ib_grh) / sizeof(u32);
}
-#define BTH2_OFFSET (offsetof(struct hfi1_pio_header, hdr.u.oth.bth[2]) / 4)
+#define BTH2_OFFSET (offsetof(struct hfi1_sdma_header, hdr.u.oth.bth[2]) / 4)
/**
- * build_ahg - create ahg in s_hdr
+ * build_ahg - create ahg in s_ahg
* @qp: a pointer to QP
* @npsn: the next PSN for the request/response
*
@@ -708,19 +733,18 @@ static inline void build_ahg(struct rvt_qp *qp, u32 npsn)
qp->s_ahgidx = sdma_ahg_alloc(priv->s_sde);
if (qp->s_ahgidx >= 0) {
qp->s_ahgpsn = npsn;
- priv->s_hdr->tx_flags |= SDMA_TXREQ_F_AHG_COPY;
+ priv->s_ahg->tx_flags |= SDMA_TXREQ_F_AHG_COPY;
/* save to protect a change in another thread */
- priv->s_hdr->sde = priv->s_sde;
- priv->s_hdr->ahgidx = qp->s_ahgidx;
+ priv->s_ahg->ahgidx = qp->s_ahgidx;
qp->s_flags |= RVT_S_AHG_VALID;
}
} else {
/* subsequent middle after valid */
if (qp->s_ahgidx >= 0) {
- priv->s_hdr->tx_flags |= SDMA_TXREQ_F_USE_AHG;
- priv->s_hdr->ahgidx = qp->s_ahgidx;
- priv->s_hdr->ahgcount++;
- priv->s_hdr->ahgdesc[0] =
+ priv->s_ahg->tx_flags |= SDMA_TXREQ_F_USE_AHG;
+ priv->s_ahg->ahgidx = qp->s_ahgidx;
+ priv->s_ahg->ahgcount++;
+ priv->s_ahg->ahgdesc[0] =
sdma_build_ahg_descriptor(
(__force u16)cpu_to_be16((u16)npsn),
BTH2_OFFSET,
@@ -728,8 +752,8 @@ static inline void build_ahg(struct rvt_qp *qp, u32 npsn)
16);
if ((npsn & 0xffff0000) !=
(qp->s_ahgpsn & 0xffff0000)) {
- priv->s_hdr->ahgcount++;
- priv->s_hdr->ahgdesc[1] =
+ priv->s_ahg->ahgcount++;
+ priv->s_ahg->ahgdesc[1] =
sdma_build_ahg_descriptor(
(__force u16)cpu_to_be16(
(u16)(npsn >> 16)),
@@ -766,7 +790,7 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct hfi1_other_headers *ohdr,
}
lrh0 |= (priv->s_sc & 0xf) << 12 | (qp->remote_ah_attr.sl & 0xf) << 4;
/*
- * reset s_hdr/AHG fields
+ * reset s_ahg/AHG fields
*
* This insures that the ahgentry/ahgcount
* are at a non-AHG default to protect
@@ -776,10 +800,9 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct hfi1_other_headers *ohdr,
* build_ahg() will modify as appropriate
* to use the AHG feature.
*/
- priv->s_hdr->tx_flags = 0;
- priv->s_hdr->ahgcount = 0;
- priv->s_hdr->ahgidx = 0;
- priv->s_hdr->sde = NULL;
+ priv->s_ahg->tx_flags = 0;
+ priv->s_ahg->ahgcount = 0;
+ priv->s_ahg->ahgidx = 0;
if (qp->s_mig_state == IB_MIG_MIGRATED)
bth0 |= IB_BTH_MIG_REQ;
else
@@ -890,7 +913,7 @@ void hfi1_do_send(struct rvt_qp *qp)
*/
if (hfi1_verbs_send(qp, &ps))
return;
- /* Record that s_hdr is empty. */
+ /* Record that s_ahg is empty. */
qp->s_hdrwords = 0;
/* allow other tasks to run */
if (unlikely(time_after(jiffies, timeout))) {
diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c
index 91fc2aed6..74c84c655 100644
--- a/drivers/infiniband/hw/hfi1/sysfs.c
+++ b/drivers/infiniband/hw/hfi1/sysfs.c
@@ -49,6 +49,7 @@
#include "hfi.h"
#include "mad.h"
#include "trace.h"
+#include "affinity.h"
/*
* Start of per-port congestion control structures and support code
@@ -622,6 +623,27 @@ static ssize_t show_tempsense(struct device *device,
return ret;
}
+static ssize_t show_sdma_affinity(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct hfi1_ibdev *dev =
+ container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
+ struct hfi1_devdata *dd = dd_from_dev(dev);
+
+ return hfi1_get_sdma_affinity(dd, buf);
+}
+
+static ssize_t store_sdma_affinity(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hfi1_ibdev *dev =
+ container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
+ struct hfi1_devdata *dd = dd_from_dev(dev);
+
+ return hfi1_set_sdma_affinity(dd, buf, count);
+}
+
/*
* end of per-unit (or driver, in some cases, but replicated
* per unit) functions
@@ -636,6 +658,8 @@ static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
+static DEVICE_ATTR(sdma_affinity, S_IWUSR | S_IRUGO, show_sdma_affinity,
+ store_sdma_affinity);
static struct device_attribute *hfi1_attributes[] = {
&dev_attr_hw_rev,
@@ -646,6 +670,7 @@ static struct device_attribute *hfi1_attributes[] = {
&dev_attr_boardversion,
&dev_attr_tempsense,
&dev_attr_chip_reset,
+ &dev_attr_sdma_affinity,
};
int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
diff --git a/drivers/infiniband/hw/hfi1/trace.h b/drivers/infiniband/hw/hfi1/trace.h
index 28c1d0832..92dc88f01 100644
--- a/drivers/infiniband/hw/hfi1/trace.h
+++ b/drivers/infiniband/hw/hfi1/trace.h
@@ -44,1329 +44,10 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
-#undef TRACE_SYSTEM_VAR
-#define TRACE_SYSTEM_VAR hfi1
-
-#if !defined(__HFI1_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
-#define __HFI1_TRACE_H
-
-#include <linux/tracepoint.h>
-#include <linux/trace_seq.h>
-
-#include "hfi.h"
-#include "mad.h"
-#include "sdma.h"
-
-#define DD_DEV_ENTRY(dd) __string(dev, dev_name(&(dd)->pcidev->dev))
-#define DD_DEV_ASSIGN(dd) __assign_str(dev, dev_name(&(dd)->pcidev->dev))
-
-#define packettype_name(etype) { RHF_RCV_TYPE_##etype, #etype }
-#define show_packettype(etype) \
-__print_symbolic(etype, \
- packettype_name(EXPECTED), \
- packettype_name(EAGER), \
- packettype_name(IB), \
- packettype_name(ERROR), \
- packettype_name(BYPASS))
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM hfi1_rx
-
-TRACE_EVENT(hfi1_rcvhdr,
- TP_PROTO(struct hfi1_devdata *dd,
- u32 ctxt,
- u64 eflags,
- u32 etype,
- u32 hlen,
- u32 tlen,
- u32 updegr,
- u32 etail
- ),
- TP_ARGS(dd, ctxt, eflags, etype, hlen, tlen, updegr, etail),
- TP_STRUCT__entry(DD_DEV_ENTRY(dd)
- __field(u64, eflags)
- __field(u32, ctxt)
- __field(u32, etype)
- __field(u32, hlen)
- __field(u32, tlen)
- __field(u32, updegr)
- __field(u32, etail)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(dd);
- __entry->eflags = eflags;
- __entry->ctxt = ctxt;
- __entry->etype = etype;
- __entry->hlen = hlen;
- __entry->tlen = tlen;
- __entry->updegr = updegr;
- __entry->etail = etail;
- ),
- TP_printk(
- "[%s] ctxt %d eflags 0x%llx etype %d,%s hlen %d tlen %d updegr %d etail %d",
- __get_str(dev),
- __entry->ctxt,
- __entry->eflags,
- __entry->etype, show_packettype(__entry->etype),
- __entry->hlen,
- __entry->tlen,
- __entry->updegr,
- __entry->etail
- )
-);
-
-TRACE_EVENT(hfi1_receive_interrupt,
- TP_PROTO(struct hfi1_devdata *dd, u32 ctxt),
- TP_ARGS(dd, ctxt),
- TP_STRUCT__entry(DD_DEV_ENTRY(dd)
- __field(u32, ctxt)
- __field(u8, slow_path)
- __field(u8, dma_rtail)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(dd);
- __entry->ctxt = ctxt;
- if (dd->rcd[ctxt]->do_interrupt ==
- &handle_receive_interrupt) {
- __entry->slow_path = 1;
- __entry->dma_rtail = 0xFF;
- } else if (dd->rcd[ctxt]->do_interrupt ==
- &handle_receive_interrupt_dma_rtail){
- __entry->dma_rtail = 1;
- __entry->slow_path = 0;
- } else if (dd->rcd[ctxt]->do_interrupt ==
- &handle_receive_interrupt_nodma_rtail) {
- __entry->dma_rtail = 0;
- __entry->slow_path = 0;
- }
- ),
- TP_printk("[%s] ctxt %d SlowPath: %d DmaRtail: %d",
- __get_str(dev),
- __entry->ctxt,
- __entry->slow_path,
- __entry->dma_rtail
- )
-);
-
-TRACE_EVENT(hfi1_exp_tid_reg,
- TP_PROTO(unsigned ctxt, u16 subctxt, u32 rarr,
- u32 npages, unsigned long va, unsigned long pa,
- dma_addr_t dma),
- TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma),
- TP_STRUCT__entry(
- __field(unsigned, ctxt)
- __field(u16, subctxt)
- __field(u32, rarr)
- __field(u32, npages)
- __field(unsigned long, va)
- __field(unsigned long, pa)
- __field(dma_addr_t, dma)
- ),
- TP_fast_assign(
- __entry->ctxt = ctxt;
- __entry->subctxt = subctxt;
- __entry->rarr = rarr;
- __entry->npages = npages;
- __entry->va = va;
- __entry->pa = pa;
- __entry->dma = dma;
- ),
- TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx, va:0x%lx dma:0x%llx",
- __entry->ctxt,
- __entry->subctxt,
- __entry->rarr,
- __entry->npages,
- __entry->pa,
- __entry->va,
- __entry->dma
- )
- );
-
-TRACE_EVENT(hfi1_exp_tid_unreg,
- TP_PROTO(unsigned ctxt, u16 subctxt, u32 rarr, u32 npages,
- unsigned long va, unsigned long pa, dma_addr_t dma),
- TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma),
- TP_STRUCT__entry(
- __field(unsigned, ctxt)
- __field(u16, subctxt)
- __field(u32, rarr)
- __field(u32, npages)
- __field(unsigned long, va)
- __field(unsigned long, pa)
- __field(dma_addr_t, dma)
- ),
- TP_fast_assign(
- __entry->ctxt = ctxt;
- __entry->subctxt = subctxt;
- __entry->rarr = rarr;
- __entry->npages = npages;
- __entry->va = va;
- __entry->pa = pa;
- __entry->dma = dma;
- ),
- TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx, va:0x%lx dma:0x%llx",
- __entry->ctxt,
- __entry->subctxt,
- __entry->rarr,
- __entry->npages,
- __entry->pa,
- __entry->va,
- __entry->dma
- )
- );
-
-TRACE_EVENT(hfi1_exp_tid_inval,
- TP_PROTO(unsigned ctxt, u16 subctxt, unsigned long va, u32 rarr,
- u32 npages, dma_addr_t dma),
- TP_ARGS(ctxt, subctxt, va, rarr, npages, dma),
- TP_STRUCT__entry(
- __field(unsigned, ctxt)
- __field(u16, subctxt)
- __field(unsigned long, va)
- __field(u32, rarr)
- __field(u32, npages)
- __field(dma_addr_t, dma)
- ),
- TP_fast_assign(
- __entry->ctxt = ctxt;
- __entry->subctxt = subctxt;
- __entry->va = va;
- __entry->rarr = rarr;
- __entry->npages = npages;
- __entry->dma = dma;
- ),
- TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx dma: 0x%llx",
- __entry->ctxt,
- __entry->subctxt,
- __entry->rarr,
- __entry->npages,
- __entry->va,
- __entry->dma
- )
- );
-
-TRACE_EVENT(hfi1_mmu_invalidate,
- TP_PROTO(unsigned ctxt, u16 subctxt, const char *type,
- unsigned long start, unsigned long end),
- TP_ARGS(ctxt, subctxt, type, start, end),
- TP_STRUCT__entry(
- __field(unsigned, ctxt)
- __field(u16, subctxt)
- __string(type, type)
- __field(unsigned long, start)
- __field(unsigned long, end)
- ),
- TP_fast_assign(
- __entry->ctxt = ctxt;
- __entry->subctxt = subctxt;
- __assign_str(type, type);
- __entry->start = start;
- __entry->end = end;
- ),
- TP_printk("[%3u:%02u] MMU Invalidate (%s) 0x%lx - 0x%lx",
- __entry->ctxt,
- __entry->subctxt,
- __get_str(type),
- __entry->start,
- __entry->end
- )
- );
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM hfi1_tx
-
-TRACE_EVENT(hfi1_piofree,
- TP_PROTO(struct send_context *sc, int extra),
- TP_ARGS(sc, extra),
- TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd)
- __field(u32, sw_index)
- __field(u32, hw_context)
- __field(int, extra)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(sc->dd);
- __entry->sw_index = sc->sw_index;
- __entry->hw_context = sc->hw_context;
- __entry->extra = extra;
- ),
- TP_printk("[%s] ctxt %u(%u) extra %d",
- __get_str(dev),
- __entry->sw_index,
- __entry->hw_context,
- __entry->extra
- )
-);
-
-TRACE_EVENT(hfi1_wantpiointr,
- TP_PROTO(struct send_context *sc, u32 needint, u64 credit_ctrl),
- TP_ARGS(sc, needint, credit_ctrl),
- TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd)
- __field(u32, sw_index)
- __field(u32, hw_context)
- __field(u32, needint)
- __field(u64, credit_ctrl)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(sc->dd);
- __entry->sw_index = sc->sw_index;
- __entry->hw_context = sc->hw_context;
- __entry->needint = needint;
- __entry->credit_ctrl = credit_ctrl;
- ),
- TP_printk("[%s] ctxt %u(%u) on %d credit_ctrl 0x%llx",
- __get_str(dev),
- __entry->sw_index,
- __entry->hw_context,
- __entry->needint,
- (unsigned long long)__entry->credit_ctrl
- )
-);
-
-DECLARE_EVENT_CLASS(hfi1_qpsleepwakeup_template,
- TP_PROTO(struct rvt_qp *qp, u32 flags),
- TP_ARGS(qp, flags),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
- __field(u32, qpn)
- __field(u32, flags)
- __field(u32, s_flags)
- ),
- TP_fast_assign(
- DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
- __entry->flags = flags;
- __entry->qpn = qp->ibqp.qp_num;
- __entry->s_flags = qp->s_flags;
- ),
- TP_printk(
- "[%s] qpn 0x%x flags 0x%x s_flags 0x%x",
- __get_str(dev),
- __entry->qpn,
- __entry->flags,
- __entry->s_flags
- )
-);
-
-DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpwakeup,
- TP_PROTO(struct rvt_qp *qp, u32 flags),
- TP_ARGS(qp, flags));
-
-DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpsleep,
- TP_PROTO(struct rvt_qp *qp, u32 flags),
- TP_ARGS(qp, flags));
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM hfi1_ibhdrs
-
-u8 ibhdr_exhdr_len(struct hfi1_ib_header *hdr);
-const char *parse_everbs_hdrs(struct trace_seq *p, u8 opcode, void *ehdrs);
-
-#define __parse_ib_ehdrs(op, ehdrs) parse_everbs_hdrs(p, op, ehdrs)
-
-const char *parse_sdma_flags(struct trace_seq *p, u64 desc0, u64 desc1);
-
-#define __parse_sdma_flags(desc0, desc1) parse_sdma_flags(p, desc0, desc1)
-
-#define lrh_name(lrh) { HFI1_##lrh, #lrh }
-#define show_lnh(lrh) \
-__print_symbolic(lrh, \
- lrh_name(LRH_BTH), \
- lrh_name(LRH_GRH))
-
-#define ib_opcode_name(opcode) { IB_OPCODE_##opcode, #opcode }
-#define show_ib_opcode(opcode) \
-__print_symbolic(opcode, \
- ib_opcode_name(RC_SEND_FIRST), \
- ib_opcode_name(RC_SEND_MIDDLE), \
- ib_opcode_name(RC_SEND_LAST), \
- ib_opcode_name(RC_SEND_LAST_WITH_IMMEDIATE), \
- ib_opcode_name(RC_SEND_ONLY), \
- ib_opcode_name(RC_SEND_ONLY_WITH_IMMEDIATE), \
- ib_opcode_name(RC_RDMA_WRITE_FIRST), \
- ib_opcode_name(RC_RDMA_WRITE_MIDDLE), \
- ib_opcode_name(RC_RDMA_WRITE_LAST), \
- ib_opcode_name(RC_RDMA_WRITE_LAST_WITH_IMMEDIATE), \
- ib_opcode_name(RC_RDMA_WRITE_ONLY), \
- ib_opcode_name(RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE), \
- ib_opcode_name(RC_RDMA_READ_REQUEST), \
- ib_opcode_name(RC_RDMA_READ_RESPONSE_FIRST), \
- ib_opcode_name(RC_RDMA_READ_RESPONSE_MIDDLE), \
- ib_opcode_name(RC_RDMA_READ_RESPONSE_LAST), \
- ib_opcode_name(RC_RDMA_READ_RESPONSE_ONLY), \
- ib_opcode_name(RC_ACKNOWLEDGE), \
- ib_opcode_name(RC_ATOMIC_ACKNOWLEDGE), \
- ib_opcode_name(RC_COMPARE_SWAP), \
- ib_opcode_name(RC_FETCH_ADD), \
- ib_opcode_name(RC_SEND_LAST_WITH_INVALIDATE), \
- ib_opcode_name(RC_SEND_ONLY_WITH_INVALIDATE), \
- ib_opcode_name(UC_SEND_FIRST), \
- ib_opcode_name(UC_SEND_MIDDLE), \
- ib_opcode_name(UC_SEND_LAST), \
- ib_opcode_name(UC_SEND_LAST_WITH_IMMEDIATE), \
- ib_opcode_name(UC_SEND_ONLY), \
- ib_opcode_name(UC_SEND_ONLY_WITH_IMMEDIATE), \
- ib_opcode_name(UC_RDMA_WRITE_FIRST), \
- ib_opcode_name(UC_RDMA_WRITE_MIDDLE), \
- ib_opcode_name(UC_RDMA_WRITE_LAST), \
- ib_opcode_name(UC_RDMA_WRITE_LAST_WITH_IMMEDIATE), \
- ib_opcode_name(UC_RDMA_WRITE_ONLY), \
- ib_opcode_name(UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE), \
- ib_opcode_name(UD_SEND_ONLY), \
- ib_opcode_name(UD_SEND_ONLY_WITH_IMMEDIATE), \
- ib_opcode_name(CNP))
-
-#define LRH_PRN "vl %d lver %d sl %d lnh %d,%s dlid %.4x len %d slid %.4x"
-#define BTH_PRN \
- "op 0x%.2x,%s se %d m %d pad %d tver %d pkey 0x%.4x " \
- "f %d b %d qpn 0x%.6x a %d psn 0x%.8x"
-#define EHDR_PRN "%s"
-
-DECLARE_EVENT_CLASS(hfi1_ibhdr_template,
- TP_PROTO(struct hfi1_devdata *dd,
- struct hfi1_ib_header *hdr),
- TP_ARGS(dd, hdr),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(dd)
- /* LRH */
- __field(u8, vl)
- __field(u8, lver)
- __field(u8, sl)
- __field(u8, lnh)
- __field(u16, dlid)
- __field(u16, len)
- __field(u16, slid)
- /* BTH */
- __field(u8, opcode)
- __field(u8, se)
- __field(u8, m)
- __field(u8, pad)
- __field(u8, tver)
- __field(u16, pkey)
- __field(u8, f)
- __field(u8, b)
- __field(u32, qpn)
- __field(u8, a)
- __field(u32, psn)
- /* extended headers */
- __dynamic_array(u8, ehdrs, ibhdr_exhdr_len(hdr))
- ),
- TP_fast_assign(
- struct hfi1_other_headers *ohdr;
-
- DD_DEV_ASSIGN(dd);
- /* LRH */
- __entry->vl =
- (u8)(be16_to_cpu(hdr->lrh[0]) >> 12);
- __entry->lver =
- (u8)(be16_to_cpu(hdr->lrh[0]) >> 8) & 0xf;
- __entry->sl =
- (u8)(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xf;
- __entry->lnh =
- (u8)(be16_to_cpu(hdr->lrh[0]) & 3);
- __entry->dlid =
- be16_to_cpu(hdr->lrh[1]);
- /* allow for larger len */
- __entry->len =
- be16_to_cpu(hdr->lrh[2]);
- __entry->slid =
- be16_to_cpu(hdr->lrh[3]);
- /* BTH */
- if (__entry->lnh == HFI1_LRH_BTH)
- ohdr = &hdr->u.oth;
- else
- ohdr = &hdr->u.l.oth;
- __entry->opcode =
- (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
- __entry->se =
- (be32_to_cpu(ohdr->bth[0]) >> 23) & 1;
- __entry->m =
- (be32_to_cpu(ohdr->bth[0]) >> 22) & 1;
- __entry->pad =
- (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
- __entry->tver =
- (be32_to_cpu(ohdr->bth[0]) >> 16) & 0xf;
- __entry->pkey =
- be32_to_cpu(ohdr->bth[0]) & 0xffff;
- __entry->f =
- (be32_to_cpu(ohdr->bth[1]) >> HFI1_FECN_SHIFT) &
- HFI1_FECN_MASK;
- __entry->b =
- (be32_to_cpu(ohdr->bth[1]) >> HFI1_BECN_SHIFT) &
- HFI1_BECN_MASK;
- __entry->qpn =
- be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
- __entry->a =
- (be32_to_cpu(ohdr->bth[2]) >> 31) & 1;
- /* allow for larger PSN */
- __entry->psn =
- be32_to_cpu(ohdr->bth[2]) & 0x7fffffff;
- /* extended headers */
- memcpy(__get_dynamic_array(ehdrs), &ohdr->u,
- ibhdr_exhdr_len(hdr));
- ),
- TP_printk("[%s] " LRH_PRN " " BTH_PRN " " EHDR_PRN,
- __get_str(dev),
- /* LRH */
- __entry->vl,
- __entry->lver,
- __entry->sl,
- __entry->lnh, show_lnh(__entry->lnh),
- __entry->dlid,
- __entry->len,
- __entry->slid,
- /* BTH */
- __entry->opcode, show_ib_opcode(__entry->opcode),
- __entry->se,
- __entry->m,
- __entry->pad,
- __entry->tver,
- __entry->pkey,
- __entry->f,
- __entry->b,
- __entry->qpn,
- __entry->a,
- __entry->psn,
- /* extended headers */
- __parse_ib_ehdrs(
- __entry->opcode,
- (void *)__get_dynamic_array(ehdrs))
- )
-);
-
-DEFINE_EVENT(hfi1_ibhdr_template, input_ibhdr,
- TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
- TP_ARGS(dd, hdr));
-
-DEFINE_EVENT(hfi1_ibhdr_template, pio_output_ibhdr,
- TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
- TP_ARGS(dd, hdr));
-
-DEFINE_EVENT(hfi1_ibhdr_template, ack_output_ibhdr,
- TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
- TP_ARGS(dd, hdr));
-
-DEFINE_EVENT(hfi1_ibhdr_template, sdma_output_ibhdr,
- TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
- TP_ARGS(dd, hdr));
-
-#define SNOOP_PRN \
- "slid %.4x dlid %.4x qpn 0x%.6x opcode 0x%.2x,%s " \
- "svc lvl %d pkey 0x%.4x [header = %d bytes] [data = %d bytes]"
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM hfi1_snoop
-
-TRACE_EVENT(snoop_capture,
- TP_PROTO(struct hfi1_devdata *dd,
- int hdr_len,
- struct hfi1_ib_header *hdr,
- int data_len,
- void *data),
- TP_ARGS(dd, hdr_len, hdr, data_len, data),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(dd)
- __field(u16, slid)
- __field(u16, dlid)
- __field(u32, qpn)
- __field(u8, opcode)
- __field(u8, sl)
- __field(u16, pkey)
- __field(u32, hdr_len)
- __field(u32, data_len)
- __field(u8, lnh)
- __dynamic_array(u8, raw_hdr, hdr_len)
- __dynamic_array(u8, raw_pkt, data_len)
- ),
- TP_fast_assign(
- struct hfi1_other_headers *ohdr;
-
- __entry->lnh = (u8)(be16_to_cpu(hdr->lrh[0]) & 3);
- if (__entry->lnh == HFI1_LRH_BTH)
- ohdr = &hdr->u.oth;
- else
- ohdr = &hdr->u.l.oth;
- DD_DEV_ASSIGN(dd);
- __entry->slid = be16_to_cpu(hdr->lrh[3]);
- __entry->dlid = be16_to_cpu(hdr->lrh[1]);
- __entry->qpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
- __entry->opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
- __entry->sl = (u8)(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xf;
- __entry->pkey = be32_to_cpu(ohdr->bth[0]) & 0xffff;
- __entry->hdr_len = hdr_len;
- __entry->data_len = data_len;
- memcpy(__get_dynamic_array(raw_hdr), hdr, hdr_len);
- memcpy(__get_dynamic_array(raw_pkt), data, data_len);
- ),
- TP_printk(
- "[%s] " SNOOP_PRN,
- __get_str(dev),
- __entry->slid,
- __entry->dlid,
- __entry->qpn,
- __entry->opcode,
- show_ib_opcode(__entry->opcode),
- __entry->sl,
- __entry->pkey,
- __entry->hdr_len,
- __entry->data_len
- )
-);
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM hfi1_ctxts
-
-#define UCTXT_FMT \
- "cred:%u, credaddr:0x%llx, piobase:0x%llx, rcvhdr_cnt:%u, " \
- "rcvbase:0x%llx, rcvegrc:%u, rcvegrb:0x%llx"
-TRACE_EVENT(hfi1_uctxtdata,
- TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ctxtdata *uctxt),
- TP_ARGS(dd, uctxt),
- TP_STRUCT__entry(DD_DEV_ENTRY(dd)
- __field(unsigned, ctxt)
- __field(u32, credits)
- __field(u64, hw_free)
- __field(u64, piobase)
- __field(u16, rcvhdrq_cnt)
- __field(u64, rcvhdrq_phys)
- __field(u32, eager_cnt)
- __field(u64, rcvegr_phys)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(dd);
- __entry->ctxt = uctxt->ctxt;
- __entry->credits = uctxt->sc->credits;
- __entry->hw_free = (u64)uctxt->sc->hw_free;
- __entry->piobase = (u64)uctxt->sc->base_addr;
- __entry->rcvhdrq_cnt = uctxt->rcvhdrq_cnt;
- __entry->rcvhdrq_phys = uctxt->rcvhdrq_phys;
- __entry->eager_cnt = uctxt->egrbufs.alloced;
- __entry->rcvegr_phys =
- uctxt->egrbufs.rcvtids[0].phys;
- ),
- TP_printk("[%s] ctxt %u " UCTXT_FMT,
- __get_str(dev),
- __entry->ctxt,
- __entry->credits,
- __entry->hw_free,
- __entry->piobase,
- __entry->rcvhdrq_cnt,
- __entry->rcvhdrq_phys,
- __entry->eager_cnt,
- __entry->rcvegr_phys
- )
-);
-
-#define CINFO_FMT \
- "egrtids:%u, egr_size:%u, hdrq_cnt:%u, hdrq_size:%u, sdma_ring_size:%u"
-TRACE_EVENT(hfi1_ctxt_info,
- TP_PROTO(struct hfi1_devdata *dd, unsigned ctxt, unsigned subctxt,
- struct hfi1_ctxt_info cinfo),
- TP_ARGS(dd, ctxt, subctxt, cinfo),
- TP_STRUCT__entry(DD_DEV_ENTRY(dd)
- __field(unsigned, ctxt)
- __field(unsigned, subctxt)
- __field(u16, egrtids)
- __field(u16, rcvhdrq_cnt)
- __field(u16, rcvhdrq_size)
- __field(u16, sdma_ring_size)
- __field(u32, rcvegr_size)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(dd);
- __entry->ctxt = ctxt;
- __entry->subctxt = subctxt;
- __entry->egrtids = cinfo.egrtids;
- __entry->rcvhdrq_cnt = cinfo.rcvhdrq_cnt;
- __entry->rcvhdrq_size = cinfo.rcvhdrq_entsize;
- __entry->sdma_ring_size = cinfo.sdma_ring_size;
- __entry->rcvegr_size = cinfo.rcvegr_size;
- ),
- TP_printk("[%s] ctxt %u:%u " CINFO_FMT,
- __get_str(dev),
- __entry->ctxt,
- __entry->subctxt,
- __entry->egrtids,
- __entry->rcvegr_size,
- __entry->rcvhdrq_cnt,
- __entry->rcvhdrq_size,
- __entry->sdma_ring_size
- )
-);
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM hfi1_sma
-
-#define BCT_FORMAT \
- "shared_limit %x vls 0-7 [%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x] 15 [%x,%x]"
-
-#define BCT(field) \
- be16_to_cpu( \
- ((struct buffer_control *)__get_dynamic_array(bct))->field \
- )
-
-DECLARE_EVENT_CLASS(hfi1_bct_template,
- TP_PROTO(struct hfi1_devdata *dd,
- struct buffer_control *bc),
- TP_ARGS(dd, bc),
- TP_STRUCT__entry(DD_DEV_ENTRY(dd)
- __dynamic_array(u8, bct, sizeof(*bc))
- ),
- TP_fast_assign(DD_DEV_ASSIGN(dd);
- memcpy(__get_dynamic_array(bct), bc,
- sizeof(*bc));
- ),
- TP_printk(BCT_FORMAT,
- BCT(overall_shared_limit),
-
- BCT(vl[0].dedicated),
- BCT(vl[0].shared),
-
- BCT(vl[1].dedicated),
- BCT(vl[1].shared),
-
- BCT(vl[2].dedicated),
- BCT(vl[2].shared),
-
- BCT(vl[3].dedicated),
- BCT(vl[3].shared),
-
- BCT(vl[4].dedicated),
- BCT(vl[4].shared),
-
- BCT(vl[5].dedicated),
- BCT(vl[5].shared),
-
- BCT(vl[6].dedicated),
- BCT(vl[6].shared),
-
- BCT(vl[7].dedicated),
- BCT(vl[7].shared),
-
- BCT(vl[15].dedicated),
- BCT(vl[15].shared)
- )
-);
-
-DEFINE_EVENT(hfi1_bct_template, bct_set,
- TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
- TP_ARGS(dd, bc));
-
-DEFINE_EVENT(hfi1_bct_template, bct_get,
- TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
- TP_ARGS(dd, bc));
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM hfi1_sdma
-
-TRACE_EVENT(hfi1_sdma_descriptor,
- TP_PROTO(struct sdma_engine *sde,
- u64 desc0,
- u64 desc1,
- u16 e,
- void *descp),
- TP_ARGS(sde, desc0, desc1, e, descp),
- TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
- __field(void *, descp)
- __field(u64, desc0)
- __field(u64, desc1)
- __field(u16, e)
- __field(u8, idx)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
- __entry->desc0 = desc0;
- __entry->desc1 = desc1;
- __entry->idx = sde->this_idx;
- __entry->descp = descp;
- __entry->e = e;
- ),
- TP_printk(
- "[%s] SDE(%u) flags:%s addr:0x%016llx gen:%u len:%u d0:%016llx d1:%016llx to %p,%u",
- __get_str(dev),
- __entry->idx,
- __parse_sdma_flags(__entry->desc0, __entry->desc1),
- (__entry->desc0 >> SDMA_DESC0_PHY_ADDR_SHIFT) &
- SDMA_DESC0_PHY_ADDR_MASK,
- (u8)((__entry->desc1 >> SDMA_DESC1_GENERATION_SHIFT) &
- SDMA_DESC1_GENERATION_MASK),
- (u16)((__entry->desc0 >> SDMA_DESC0_BYTE_COUNT_SHIFT) &
- SDMA_DESC0_BYTE_COUNT_MASK),
- __entry->desc0,
- __entry->desc1,
- __entry->descp,
- __entry->e
- )
-);
-
-TRACE_EVENT(hfi1_sdma_engine_select,
- TP_PROTO(struct hfi1_devdata *dd, u32 sel, u8 vl, u8 idx),
- TP_ARGS(dd, sel, vl, idx),
- TP_STRUCT__entry(DD_DEV_ENTRY(dd)
- __field(u32, sel)
- __field(u8, vl)
- __field(u8, idx)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(dd);
- __entry->sel = sel;
- __entry->vl = vl;
- __entry->idx = idx;
- ),
- TP_printk("[%s] selecting SDE %u sel 0x%x vl %u",
- __get_str(dev),
- __entry->idx,
- __entry->sel,
- __entry->vl
- )
-);
-
-DECLARE_EVENT_CLASS(hfi1_sdma_engine_class,
- TP_PROTO(struct sdma_engine *sde, u64 status),
- TP_ARGS(sde, status),
- TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
- __field(u64, status)
- __field(u8, idx)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
- __entry->status = status;
- __entry->idx = sde->this_idx;
- ),
- TP_printk("[%s] SDE(%u) status %llx",
- __get_str(dev),
- __entry->idx,
- (unsigned long long)__entry->status
- )
-);
-
-DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_interrupt,
- TP_PROTO(struct sdma_engine *sde, u64 status),
- TP_ARGS(sde, status)
-);
-
-DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_progress,
- TP_PROTO(struct sdma_engine *sde, u64 status),
- TP_ARGS(sde, status)
-);
-
-DECLARE_EVENT_CLASS(hfi1_sdma_ahg_ad,
- TP_PROTO(struct sdma_engine *sde, int aidx),
- TP_ARGS(sde, aidx),
- TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
- __field(int, aidx)
- __field(u8, idx)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
- __entry->idx = sde->this_idx;
- __entry->aidx = aidx;
- ),
- TP_printk("[%s] SDE(%u) aidx %d",
- __get_str(dev),
- __entry->idx,
- __entry->aidx
- )
-);
-
-DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_allocate,
- TP_PROTO(struct sdma_engine *sde, int aidx),
- TP_ARGS(sde, aidx));
-
-DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_deallocate,
- TP_PROTO(struct sdma_engine *sde, int aidx),
- TP_ARGS(sde, aidx));
-
-#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
-TRACE_EVENT(hfi1_sdma_progress,
- TP_PROTO(struct sdma_engine *sde,
- u16 hwhead,
- u16 swhead,
- struct sdma_txreq *txp
- ),
- TP_ARGS(sde, hwhead, swhead, txp),
- TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
- __field(u64, sn)
- __field(u16, hwhead)
- __field(u16, swhead)
- __field(u16, txnext)
- __field(u16, tx_tail)
- __field(u16, tx_head)
- __field(u8, idx)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
- __entry->hwhead = hwhead;
- __entry->swhead = swhead;
- __entry->tx_tail = sde->tx_tail;
- __entry->tx_head = sde->tx_head;
- __entry->txnext = txp ? txp->next_descq_idx : ~0;
- __entry->idx = sde->this_idx;
- __entry->sn = txp ? txp->sn : ~0;
- ),
- TP_printk(
- "[%s] SDE(%u) sn %llu hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
- __get_str(dev),
- __entry->idx,
- __entry->sn,
- __entry->hwhead,
- __entry->swhead,
- __entry->txnext,
- __entry->tx_head,
- __entry->tx_tail
- )
-);
-#else
-TRACE_EVENT(hfi1_sdma_progress,
- TP_PROTO(struct sdma_engine *sde,
- u16 hwhead, u16 swhead,
- struct sdma_txreq *txp
- ),
- TP_ARGS(sde, hwhead, swhead, txp),
- TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
- __field(u16, hwhead)
- __field(u16, swhead)
- __field(u16, txnext)
- __field(u16, tx_tail)
- __field(u16, tx_head)
- __field(u8, idx)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
- __entry->hwhead = hwhead;
- __entry->swhead = swhead;
- __entry->tx_tail = sde->tx_tail;
- __entry->tx_head = sde->tx_head;
- __entry->txnext = txp ? txp->next_descq_idx : ~0;
- __entry->idx = sde->this_idx;
- ),
- TP_printk(
- "[%s] SDE(%u) hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
- __get_str(dev),
- __entry->idx,
- __entry->hwhead,
- __entry->swhead,
- __entry->txnext,
- __entry->tx_head,
- __entry->tx_tail
- )
-);
-#endif
-
-DECLARE_EVENT_CLASS(hfi1_sdma_sn,
- TP_PROTO(struct sdma_engine *sde, u64 sn),
- TP_ARGS(sde, sn),
- TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
- __field(u64, sn)
- __field(u8, idx)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
- __entry->sn = sn;
- __entry->idx = sde->this_idx;
- ),
- TP_printk("[%s] SDE(%u) sn %llu",
- __get_str(dev),
- __entry->idx,
- __entry->sn
- )
-);
-
-DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_out_sn,
- TP_PROTO(
- struct sdma_engine *sde,
- u64 sn
- ),
- TP_ARGS(sde, sn)
-);
-
-DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_in_sn,
- TP_PROTO(struct sdma_engine *sde, u64 sn),
- TP_ARGS(sde, sn)
-);
-
-#define USDMA_HDR_FORMAT \
- "[%s:%u:%u:%u] PBC=(0x%x 0x%x) LRH=(0x%x 0x%x) BTH=(0x%x 0x%x 0x%x) KDETH=(0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x) TIDVal=0x%x"
-
-TRACE_EVENT(hfi1_sdma_user_header,
- TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
- struct hfi1_pkt_header *hdr, u32 tidval),
- TP_ARGS(dd, ctxt, subctxt, req, hdr, tidval),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(dd)
- __field(u16, ctxt)
- __field(u8, subctxt)
- __field(u16, req)
- __field(__le32, pbc0)
- __field(__le32, pbc1)
- __field(__be32, lrh0)
- __field(__be32, lrh1)
- __field(__be32, bth0)
- __field(__be32, bth1)
- __field(__be32, bth2)
- __field(__le32, kdeth0)
- __field(__le32, kdeth1)
- __field(__le32, kdeth2)
- __field(__le32, kdeth3)
- __field(__le32, kdeth4)
- __field(__le32, kdeth5)
- __field(__le32, kdeth6)
- __field(__le32, kdeth7)
- __field(__le32, kdeth8)
- __field(u32, tidval)
- ),
- TP_fast_assign(
- __le32 *pbc = (__le32 *)hdr->pbc;
- __be32 *lrh = (__be32 *)hdr->lrh;
- __be32 *bth = (__be32 *)hdr->bth;
- __le32 *kdeth = (__le32 *)&hdr->kdeth;
-
- DD_DEV_ASSIGN(dd);
- __entry->ctxt = ctxt;
- __entry->subctxt = subctxt;
- __entry->req = req;
- __entry->pbc0 = pbc[0];
- __entry->pbc1 = pbc[1];
- __entry->lrh0 = be32_to_cpu(lrh[0]);
- __entry->lrh1 = be32_to_cpu(lrh[1]);
- __entry->bth0 = be32_to_cpu(bth[0]);
- __entry->bth1 = be32_to_cpu(bth[1]);
- __entry->bth2 = be32_to_cpu(bth[2]);
- __entry->kdeth0 = kdeth[0];
- __entry->kdeth1 = kdeth[1];
- __entry->kdeth2 = kdeth[2];
- __entry->kdeth3 = kdeth[3];
- __entry->kdeth4 = kdeth[4];
- __entry->kdeth5 = kdeth[5];
- __entry->kdeth6 = kdeth[6];
- __entry->kdeth7 = kdeth[7];
- __entry->kdeth8 = kdeth[8];
- __entry->tidval = tidval;
- ),
- TP_printk(USDMA_HDR_FORMAT,
- __get_str(dev),
- __entry->ctxt,
- __entry->subctxt,
- __entry->req,
- __entry->pbc1,
- __entry->pbc0,
- __entry->lrh0,
- __entry->lrh1,
- __entry->bth0,
- __entry->bth1,
- __entry->bth2,
- __entry->kdeth0,
- __entry->kdeth1,
- __entry->kdeth2,
- __entry->kdeth3,
- __entry->kdeth4,
- __entry->kdeth5,
- __entry->kdeth6,
- __entry->kdeth7,
- __entry->kdeth8,
- __entry->tidval
- )
- );
-
-#define SDMA_UREQ_FMT \
- "[%s:%u:%u] ver/op=0x%x, iovcnt=%u, npkts=%u, frag=%u, idx=%u"
-TRACE_EVENT(hfi1_sdma_user_reqinfo,
- TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 *i),
- TP_ARGS(dd, ctxt, subctxt, i),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(dd);
- __field(u16, ctxt)
- __field(u8, subctxt)
- __field(u8, ver_opcode)
- __field(u8, iovcnt)
- __field(u16, npkts)
- __field(u16, fragsize)
- __field(u16, comp_idx)
- ),
- TP_fast_assign(
- DD_DEV_ASSIGN(dd);
- __entry->ctxt = ctxt;
- __entry->subctxt = subctxt;
- __entry->ver_opcode = i[0] & 0xff;
- __entry->iovcnt = (i[0] >> 8) & 0xff;
- __entry->npkts = i[1];
- __entry->fragsize = i[2];
- __entry->comp_idx = i[3];
- ),
- TP_printk(SDMA_UREQ_FMT,
- __get_str(dev),
- __entry->ctxt,
- __entry->subctxt,
- __entry->ver_opcode,
- __entry->iovcnt,
- __entry->npkts,
- __entry->fragsize,
- __entry->comp_idx
- )
- );
-
-#define usdma_complete_name(st) { st, #st }
-#define show_usdma_complete_state(st) \
- __print_symbolic(st, \
- usdma_complete_name(FREE), \
- usdma_complete_name(QUEUED), \
- usdma_complete_name(COMPLETE), \
- usdma_complete_name(ERROR))
-
-TRACE_EVENT(hfi1_sdma_user_completion,
- TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 idx,
- u8 state, int code),
- TP_ARGS(dd, ctxt, subctxt, idx, state, code),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(dd)
- __field(u16, ctxt)
- __field(u8, subctxt)
- __field(u16, idx)
- __field(u8, state)
- __field(int, code)
- ),
- TP_fast_assign(
- DD_DEV_ASSIGN(dd);
- __entry->ctxt = ctxt;
- __entry->subctxt = subctxt;
- __entry->idx = idx;
- __entry->state = state;
- __entry->code = code;
- ),
- TP_printk("[%s:%u:%u:%u] SDMA completion state %s (%d)",
- __get_str(dev), __entry->ctxt, __entry->subctxt,
- __entry->idx, show_usdma_complete_state(__entry->state),
- __entry->code)
- );
-
-const char *print_u32_array(struct trace_seq *, u32 *, int);
-#define __print_u32_hex(arr, len) print_u32_array(p, arr, len)
-
-TRACE_EVENT(hfi1_sdma_user_header_ahg,
- TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
- u8 sde, u8 ahgidx, u32 *ahg, int len, u32 tidval),
- TP_ARGS(dd, ctxt, subctxt, req, sde, ahgidx, ahg, len, tidval),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(dd)
- __field(u16, ctxt)
- __field(u8, subctxt)
- __field(u16, req)
- __field(u8, sde)
- __field(u8, idx)
- __field(int, len)
- __field(u32, tidval)
- __array(u32, ahg, 10)
- ),
- TP_fast_assign(
- DD_DEV_ASSIGN(dd);
- __entry->ctxt = ctxt;
- __entry->subctxt = subctxt;
- __entry->req = req;
- __entry->sde = sde;
- __entry->idx = ahgidx;
- __entry->len = len;
- __entry->tidval = tidval;
- memcpy(__entry->ahg, ahg, len * sizeof(u32));
- ),
- TP_printk("[%s:%u:%u:%u] (SDE%u/AHG%u) ahg[0-%d]=(%s) TIDVal=0x%x",
- __get_str(dev),
- __entry->ctxt,
- __entry->subctxt,
- __entry->req,
- __entry->sde,
- __entry->idx,
- __entry->len - 1,
- __print_u32_hex(__entry->ahg, __entry->len),
- __entry->tidval
- )
- );
-
-TRACE_EVENT(hfi1_sdma_state,
- TP_PROTO(struct sdma_engine *sde,
- const char *cstate,
- const char *nstate
- ),
- TP_ARGS(sde, cstate, nstate),
- TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
- __string(curstate, cstate)
- __string(newstate, nstate)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
- __assign_str(curstate, cstate);
- __assign_str(newstate, nstate);
- ),
- TP_printk("[%s] current state %s new state %s",
- __get_str(dev),
- __get_str(curstate),
- __get_str(newstate)
- )
-);
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM hfi1_rc
-
-DECLARE_EVENT_CLASS(hfi1_rc_template,
- TP_PROTO(struct rvt_qp *qp, u32 psn),
- TP_ARGS(qp, psn),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
- __field(u32, qpn)
- __field(u32, s_flags)
- __field(u32, psn)
- __field(u32, s_psn)
- __field(u32, s_next_psn)
- __field(u32, s_sending_psn)
- __field(u32, s_sending_hpsn)
- __field(u32, r_psn)
- ),
- TP_fast_assign(
- DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
- __entry->qpn = qp->ibqp.qp_num;
- __entry->s_flags = qp->s_flags;
- __entry->psn = psn;
- __entry->s_psn = qp->s_psn;
- __entry->s_next_psn = qp->s_next_psn;
- __entry->s_sending_psn = qp->s_sending_psn;
- __entry->s_sending_hpsn = qp->s_sending_hpsn;
- __entry->r_psn = qp->r_psn;
- ),
- TP_printk(
- "[%s] qpn 0x%x s_flags 0x%x psn 0x%x s_psn 0x%x s_next_psn 0x%x s_sending_psn 0x%x sending_hpsn 0x%x r_psn 0x%x",
- __get_str(dev),
- __entry->qpn,
- __entry->s_flags,
- __entry->psn,
- __entry->s_psn,
- __entry->s_next_psn,
- __entry->s_sending_psn,
- __entry->s_sending_hpsn,
- __entry->r_psn
- )
-);
-
-DEFINE_EVENT(hfi1_rc_template, hfi1_rc_sendcomplete,
- TP_PROTO(struct rvt_qp *qp, u32 psn),
- TP_ARGS(qp, psn)
-);
-
-DEFINE_EVENT(hfi1_rc_template, hfi1_rc_ack,
- TP_PROTO(struct rvt_qp *qp, u32 psn),
- TP_ARGS(qp, psn)
-);
-
-DEFINE_EVENT(hfi1_rc_template, hfi1_rc_timeout,
- TP_PROTO(struct rvt_qp *qp, u32 psn),
- TP_ARGS(qp, psn)
-);
-
-DEFINE_EVENT(hfi1_rc_template, hfi1_rc_rcv_error,
- TP_PROTO(struct rvt_qp *qp, u32 psn),
- TP_ARGS(qp, psn)
-);
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM hfi1_misc
-
-TRACE_EVENT(hfi1_interrupt,
- TP_PROTO(struct hfi1_devdata *dd, const struct is_table *is_entry,
- int src),
- TP_ARGS(dd, is_entry, src),
- TP_STRUCT__entry(DD_DEV_ENTRY(dd)
- __array(char, buf, 64)
- __field(int, src)
- ),
- TP_fast_assign(DD_DEV_ASSIGN(dd)
- is_entry->is_name(__entry->buf, 64,
- src - is_entry->start);
- __entry->src = src;
- ),
- TP_printk("[%s] source: %s [%d]", __get_str(dev), __entry->buf,
- __entry->src)
-);
-
-/*
- * Note:
- * This produces a REALLY ugly trace in the console output when the string is
- * too long.
- */
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM hfi1_trace
-
-#define MAX_MSG_LEN 512
-
-DECLARE_EVENT_CLASS(hfi1_trace_template,
- TP_PROTO(const char *function, struct va_format *vaf),
- TP_ARGS(function, vaf),
- TP_STRUCT__entry(__string(function, function)
- __dynamic_array(char, msg, MAX_MSG_LEN)
- ),
- TP_fast_assign(__assign_str(function, function);
- WARN_ON_ONCE(vsnprintf
- (__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >=
- MAX_MSG_LEN);
- ),
- TP_printk("(%s) %s",
- __get_str(function),
- __get_str(msg))
-);
-
-/*
- * It may be nice to macroize the __hfi1_trace but the va_* stuff requires an
- * actual function to work and can not be in a macro.
- */
-#define __hfi1_trace_def(lvl) \
-void __hfi1_trace_##lvl(const char *funct, char *fmt, ...); \
- \
-DEFINE_EVENT(hfi1_trace_template, hfi1_ ##lvl, \
- TP_PROTO(const char *function, struct va_format *vaf), \
- TP_ARGS(function, vaf))
-
-#define __hfi1_trace_fn(lvl) \
-void __hfi1_trace_##lvl(const char *func, char *fmt, ...) \
-{ \
- struct va_format vaf = { \
- .fmt = fmt, \
- }; \
- va_list args; \
- \
- va_start(args, fmt); \
- vaf.va = &args; \
- trace_hfi1_ ##lvl(func, &vaf); \
- va_end(args); \
- return; \
-}
-
-/*
- * To create a new trace level simply define it below and as a __hfi1_trace_fn
- * in trace.c. This will create all the hooks for calling
- * hfi1_cdbg(LVL, fmt, ...); as well as take care of all
- * the debugfs stuff.
- */
-__hfi1_trace_def(PKT);
-__hfi1_trace_def(PROC);
-__hfi1_trace_def(SDMA);
-__hfi1_trace_def(LINKVERB);
-__hfi1_trace_def(DEBUG);
-__hfi1_trace_def(SNOOP);
-__hfi1_trace_def(CNTR);
-__hfi1_trace_def(PIO);
-__hfi1_trace_def(DC8051);
-__hfi1_trace_def(FIRMWARE);
-__hfi1_trace_def(RCVCTRL);
-__hfi1_trace_def(TID);
-__hfi1_trace_def(MMU);
-__hfi1_trace_def(IOCTL);
-
-#define hfi1_cdbg(which, fmt, ...) \
- __hfi1_trace_##which(__func__, fmt, ##__VA_ARGS__)
-
-#define hfi1_dbg(fmt, ...) \
- hfi1_cdbg(DEBUG, fmt, ##__VA_ARGS__)
-
-/*
- * Define HFI1_EARLY_DBG at compile time or here to enable early trace
- * messages. Do not check in an enablement for this.
- */
-
-#ifdef HFI1_EARLY_DBG
-#define hfi1_dbg_early(fmt, ...) \
- trace_printk(fmt, ##__VA_ARGS__)
-#else
-#define hfi1_dbg_early(fmt, ...)
-#endif
-
-#endif /* __HFI1_TRACE_H */
-
-#undef TRACE_INCLUDE_PATH
-#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_PATH .
-#define TRACE_INCLUDE_FILE trace
-#include <trace/define_trace.h>
+#include "trace_dbg.h"
+#include "trace_misc.h"
+#include "trace_ctxts.h"
+#include "trace_ibhdrs.h"
+#include "trace_rc.h"
+#include "trace_rx.h"
+#include "trace_tx.h"
diff --git a/drivers/infiniband/hw/hfi1/trace_ctxts.h b/drivers/infiniband/hw/hfi1/trace_ctxts.h
new file mode 100644
index 000000000..31654bbac
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/trace_ctxts.h
@@ -0,0 +1,141 @@
+/*
+* Copyright(c) 2015, 2016 Intel Corporation.
+*
+* This file is provided under a dual BSD/GPLv2 license. When using or
+* redistributing this file, you may do so under either license.
+*
+* GPL LICENSE SUMMARY
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* BSD LICENSE
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* - Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* - Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* - Neither the name of Intel Corporation nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+*/
+#if !defined(__HFI1_TRACE_CTXTS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __HFI1_TRACE_CTXTS_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include "hfi.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hfi1_ctxts
+
+#define UCTXT_FMT \
+ "cred:%u, credaddr:0x%llx, piobase:0x%p, rcvhdr_cnt:%u, " \
+ "rcvbase:0x%llx, rcvegrc:%u, rcvegrb:0x%llx"
+TRACE_EVENT(hfi1_uctxtdata,
+ TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ctxtdata *uctxt),
+ TP_ARGS(dd, uctxt),
+ TP_STRUCT__entry(DD_DEV_ENTRY(dd)
+ __field(unsigned int, ctxt)
+ __field(u32, credits)
+ __field(u64, hw_free)
+ __field(void __iomem *, piobase)
+ __field(u16, rcvhdrq_cnt)
+ __field(u64, rcvhdrq_phys)
+ __field(u32, eager_cnt)
+ __field(u64, rcvegr_phys)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(dd);
+ __entry->ctxt = uctxt->ctxt;
+ __entry->credits = uctxt->sc->credits;
+ __entry->hw_free = le64_to_cpu(*uctxt->sc->hw_free);
+ __entry->piobase = uctxt->sc->base_addr;
+ __entry->rcvhdrq_cnt = uctxt->rcvhdrq_cnt;
+ __entry->rcvhdrq_phys = uctxt->rcvhdrq_phys;
+ __entry->eager_cnt = uctxt->egrbufs.alloced;
+ __entry->rcvegr_phys =
+ uctxt->egrbufs.rcvtids[0].phys;
+ ),
+ TP_printk("[%s] ctxt %u " UCTXT_FMT,
+ __get_str(dev),
+ __entry->ctxt,
+ __entry->credits,
+ __entry->hw_free,
+ __entry->piobase,
+ __entry->rcvhdrq_cnt,
+ __entry->rcvhdrq_phys,
+ __entry->eager_cnt,
+ __entry->rcvegr_phys
+ )
+);
+
+#define CINFO_FMT \
+ "egrtids:%u, egr_size:%u, hdrq_cnt:%u, hdrq_size:%u, sdma_ring_size:%u"
+TRACE_EVENT(hfi1_ctxt_info,
+ TP_PROTO(struct hfi1_devdata *dd, unsigned int ctxt,
+ unsigned int subctxt,
+ struct hfi1_ctxt_info cinfo),
+ TP_ARGS(dd, ctxt, subctxt, cinfo),
+ TP_STRUCT__entry(DD_DEV_ENTRY(dd)
+ __field(unsigned int, ctxt)
+ __field(unsigned int, subctxt)
+ __field(u16, egrtids)
+ __field(u16, rcvhdrq_cnt)
+ __field(u16, rcvhdrq_size)
+ __field(u16, sdma_ring_size)
+ __field(u32, rcvegr_size)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(dd);
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __entry->egrtids = cinfo.egrtids;
+ __entry->rcvhdrq_cnt = cinfo.rcvhdrq_cnt;
+ __entry->rcvhdrq_size = cinfo.rcvhdrq_entsize;
+ __entry->sdma_ring_size = cinfo.sdma_ring_size;
+ __entry->rcvegr_size = cinfo.rcvegr_size;
+ ),
+ TP_printk("[%s] ctxt %u:%u " CINFO_FMT,
+ __get_str(dev),
+ __entry->ctxt,
+ __entry->subctxt,
+ __entry->egrtids,
+ __entry->rcvegr_size,
+ __entry->rcvhdrq_cnt,
+ __entry->rcvhdrq_size,
+ __entry->sdma_ring_size
+ )
+);
+
+#endif /* __HFI1_TRACE_CTXTS_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_ctxts
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/hw/hfi1/trace_dbg.h b/drivers/infiniband/hw/hfi1/trace_dbg.h
new file mode 100644
index 000000000..0e7d92953
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/trace_dbg.h
@@ -0,0 +1,155 @@
+/*
+* Copyright(c) 2015, 2016 Intel Corporation.
+*
+* This file is provided under a dual BSD/GPLv2 license. When using or
+* redistributing this file, you may do so under either license.
+*
+* GPL LICENSE SUMMARY
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* BSD LICENSE
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* - Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* - Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* - Neither the name of Intel Corporation nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+*/
+#if !defined(__HFI1_TRACE_EXTRA_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __HFI1_TRACE_EXTRA_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include "hfi.h"
+
+/*
+ * Note:
+ * This produces a REALLY ugly trace in the console output when the string is
+ * too long.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hfi1_dbg
+
+#define MAX_MSG_LEN 512
+
+DECLARE_EVENT_CLASS(hfi1_trace_template,
+ TP_PROTO(const char *function, struct va_format *vaf),
+ TP_ARGS(function, vaf),
+ TP_STRUCT__entry(__string(function, function)
+ __dynamic_array(char, msg, MAX_MSG_LEN)
+ ),
+ TP_fast_assign(__assign_str(function, function);
+ WARN_ON_ONCE(vsnprintf
+ (__get_dynamic_array(msg),
+ MAX_MSG_LEN, vaf->fmt,
+ *vaf->va) >=
+ MAX_MSG_LEN);
+ ),
+ TP_printk("(%s) %s",
+ __get_str(function),
+ __get_str(msg))
+);
+
+/*
+ * It may be nice to macroize the __hfi1_trace but the va_* stuff requires an
+ * actual function to work and can not be in a macro.
+ */
+#define __hfi1_trace_def(lvl) \
+void __hfi1_trace_##lvl(const char *funct, char *fmt, ...); \
+ \
+DEFINE_EVENT(hfi1_trace_template, hfi1_ ##lvl, \
+ TP_PROTO(const char *function, struct va_format *vaf), \
+ TP_ARGS(function, vaf))
+
+#define __hfi1_trace_fn(lvl) \
+void __hfi1_trace_##lvl(const char *func, char *fmt, ...) \
+{ \
+ struct va_format vaf = { \
+ .fmt = fmt, \
+ }; \
+ va_list args; \
+ \
+ va_start(args, fmt); \
+ vaf.va = &args; \
+ trace_hfi1_ ##lvl(func, &vaf); \
+ va_end(args); \
+ return; \
+}
+
+/*
+ * To create a new trace level simply define it below and as a __hfi1_trace_fn
+ * in trace.c. This will create all the hooks for calling
+ * hfi1_cdbg(LVL, fmt, ...); as well as take care of all
+ * the debugfs stuff.
+ */
+__hfi1_trace_def(PKT);
+__hfi1_trace_def(PROC);
+__hfi1_trace_def(SDMA);
+__hfi1_trace_def(LINKVERB);
+__hfi1_trace_def(DEBUG);
+__hfi1_trace_def(SNOOP);
+__hfi1_trace_def(CNTR);
+__hfi1_trace_def(PIO);
+__hfi1_trace_def(DC8051);
+__hfi1_trace_def(FIRMWARE);
+__hfi1_trace_def(RCVCTRL);
+__hfi1_trace_def(TID);
+__hfi1_trace_def(MMU);
+__hfi1_trace_def(IOCTL);
+
+#define hfi1_cdbg(which, fmt, ...) \
+ __hfi1_trace_##which(__func__, fmt, ##__VA_ARGS__)
+
+#define hfi1_dbg(fmt, ...) \
+ hfi1_cdbg(DEBUG, fmt, ##__VA_ARGS__)
+
+/*
+ * Define HFI1_EARLY_DBG at compile time or here to enable early trace
+ * messages. Do not check in an enablement for this.
+ */
+
+#ifdef HFI1_EARLY_DBG
+#define hfi1_dbg_early(fmt, ...) \
+ trace_printk(fmt, ##__VA_ARGS__)
+#else
+#define hfi1_dbg_early(fmt, ...)
+#endif
+
+#endif /* __HFI1_TRACE_EXTRA_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_dbg
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/hw/hfi1/trace_ibhdrs.h b/drivers/infiniband/hw/hfi1/trace_ibhdrs.h
new file mode 100644
index 000000000..c3e41aed0
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/trace_ibhdrs.h
@@ -0,0 +1,209 @@
+/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#if !defined(__HFI1_TRACE_IBHDRS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __HFI1_TRACE_IBHDRS_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include "hfi.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hfi1_ibhdrs
+
+u8 ibhdr_exhdr_len(struct hfi1_ib_header *hdr);
+const char *parse_everbs_hdrs(struct trace_seq *p, u8 opcode, void *ehdrs);
+
+#define __parse_ib_ehdrs(op, ehdrs) parse_everbs_hdrs(p, op, ehdrs)
+
+#define lrh_name(lrh) { HFI1_##lrh, #lrh }
+#define show_lnh(lrh) \
+__print_symbolic(lrh, \
+ lrh_name(LRH_BTH), \
+ lrh_name(LRH_GRH))
+
+#define LRH_PRN "vl %d lver %d sl %d lnh %d,%s dlid %.4x len %d slid %.4x"
+#define BTH_PRN \
+ "op 0x%.2x,%s se %d m %d pad %d tver %d pkey 0x%.4x " \
+ "f %d b %d qpn 0x%.6x a %d psn 0x%.8x"
+#define EHDR_PRN "%s"
+
+DECLARE_EVENT_CLASS(hfi1_ibhdr_template,
+ TP_PROTO(struct hfi1_devdata *dd,
+ struct hfi1_ib_header *hdr),
+ TP_ARGS(dd, hdr),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd)
+ /* LRH */
+ __field(u8, vl)
+ __field(u8, lver)
+ __field(u8, sl)
+ __field(u8, lnh)
+ __field(u16, dlid)
+ __field(u16, len)
+ __field(u16, slid)
+ /* BTH */
+ __field(u8, opcode)
+ __field(u8, se)
+ __field(u8, m)
+ __field(u8, pad)
+ __field(u8, tver)
+ __field(u16, pkey)
+ __field(u8, f)
+ __field(u8, b)
+ __field(u32, qpn)
+ __field(u8, a)
+ __field(u32, psn)
+ /* extended headers */
+ __dynamic_array(u8, ehdrs, ibhdr_exhdr_len(hdr))
+ ),
+ TP_fast_assign(
+ struct hfi1_other_headers *ohdr;
+
+ DD_DEV_ASSIGN(dd);
+ /* LRH */
+ __entry->vl =
+ (u8)(be16_to_cpu(hdr->lrh[0]) >> 12);
+ __entry->lver =
+ (u8)(be16_to_cpu(hdr->lrh[0]) >> 8) & 0xf;
+ __entry->sl =
+ (u8)(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xf;
+ __entry->lnh =
+ (u8)(be16_to_cpu(hdr->lrh[0]) & 3);
+ __entry->dlid =
+ be16_to_cpu(hdr->lrh[1]);
+ /* allow for larger len */
+ __entry->len =
+ be16_to_cpu(hdr->lrh[2]);
+ __entry->slid =
+ be16_to_cpu(hdr->lrh[3]);
+ /* BTH */
+ if (__entry->lnh == HFI1_LRH_BTH)
+ ohdr = &hdr->u.oth;
+ else
+ ohdr = &hdr->u.l.oth;
+ __entry->opcode =
+ (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
+ __entry->se =
+ (be32_to_cpu(ohdr->bth[0]) >> 23) & 1;
+ __entry->m =
+ (be32_to_cpu(ohdr->bth[0]) >> 22) & 1;
+ __entry->pad =
+ (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+ __entry->tver =
+ (be32_to_cpu(ohdr->bth[0]) >> 16) & 0xf;
+ __entry->pkey =
+ be32_to_cpu(ohdr->bth[0]) & 0xffff;
+ __entry->f =
+ (be32_to_cpu(ohdr->bth[1]) >> HFI1_FECN_SHIFT) &
+ HFI1_FECN_MASK;
+ __entry->b =
+ (be32_to_cpu(ohdr->bth[1]) >> HFI1_BECN_SHIFT) &
+ HFI1_BECN_MASK;
+ __entry->qpn =
+ be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
+ __entry->a =
+ (be32_to_cpu(ohdr->bth[2]) >> 31) & 1;
+ /* allow for larger PSN */
+ __entry->psn =
+ be32_to_cpu(ohdr->bth[2]) & 0x7fffffff;
+ /* extended headers */
+ memcpy(__get_dynamic_array(ehdrs), &ohdr->u,
+ ibhdr_exhdr_len(hdr));
+ ),
+ TP_printk("[%s] " LRH_PRN " " BTH_PRN " " EHDR_PRN,
+ __get_str(dev),
+ /* LRH */
+ __entry->vl,
+ __entry->lver,
+ __entry->sl,
+ __entry->lnh, show_lnh(__entry->lnh),
+ __entry->dlid,
+ __entry->len,
+ __entry->slid,
+ /* BTH */
+ __entry->opcode, show_ib_opcode(__entry->opcode),
+ __entry->se,
+ __entry->m,
+ __entry->pad,
+ __entry->tver,
+ __entry->pkey,
+ __entry->f,
+ __entry->b,
+ __entry->qpn,
+ __entry->a,
+ __entry->psn,
+ /* extended headers */
+ __parse_ib_ehdrs(
+ __entry->opcode,
+ (void *)__get_dynamic_array(ehdrs))
+ )
+);
+
+DEFINE_EVENT(hfi1_ibhdr_template, input_ibhdr,
+ TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
+ TP_ARGS(dd, hdr));
+
+DEFINE_EVENT(hfi1_ibhdr_template, pio_output_ibhdr,
+ TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
+ TP_ARGS(dd, hdr));
+
+DEFINE_EVENT(hfi1_ibhdr_template, ack_output_ibhdr,
+ TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
+ TP_ARGS(dd, hdr));
+
+DEFINE_EVENT(hfi1_ibhdr_template, sdma_output_ibhdr,
+ TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
+ TP_ARGS(dd, hdr));
+
+#endif /* __HFI1_TRACE_IBHDRS_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_ibhdrs
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/hw/hfi1/trace_misc.h b/drivers/infiniband/hw/hfi1/trace_misc.h
new file mode 100644
index 000000000..d308454af
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/trace_misc.h
@@ -0,0 +1,81 @@
+/*
+* Copyright(c) 2015, 2016 Intel Corporation.
+*
+* This file is provided under a dual BSD/GPLv2 license. When using or
+* redistributing this file, you may do so under either license.
+*
+* GPL LICENSE SUMMARY
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* BSD LICENSE
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* - Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* - Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* - Neither the name of Intel Corporation nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+*/
+#if !defined(__HFI1_TRACE_MISC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __HFI1_TRACE_MISC_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include "hfi.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hfi1_misc
+
+TRACE_EVENT(hfi1_interrupt,
+ TP_PROTO(struct hfi1_devdata *dd, const struct is_table *is_entry,
+ int src),
+ TP_ARGS(dd, is_entry, src),
+ TP_STRUCT__entry(DD_DEV_ENTRY(dd)
+ __array(char, buf, 64)
+ __field(int, src)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(dd)
+ is_entry->is_name(__entry->buf, 64,
+ src - is_entry->start);
+ __entry->src = src;
+ ),
+ TP_printk("[%s] source: %s [%d]", __get_str(dev), __entry->buf,
+ __entry->src)
+);
+
+#endif /* __HFI1_TRACE_MISC_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_misc
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/hw/hfi1/trace_rc.h b/drivers/infiniband/hw/hfi1/trace_rc.h
new file mode 100644
index 000000000..5ea5005f9
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/trace_rc.h
@@ -0,0 +1,123 @@
+/*
+* Copyright(c) 2015, 2016 Intel Corporation.
+*
+* This file is provided under a dual BSD/GPLv2 license. When using or
+* redistributing this file, you may do so under either license.
+*
+* GPL LICENSE SUMMARY
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* BSD LICENSE
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+*
+* - Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* - Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in
+* the documentation and/or other materials provided with the
+* distribution.
+* - Neither the name of Intel Corporation nor the names of its
+* contributors may be used to endorse or promote products derived
+* from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+*/
+#if !defined(__HFI1_TRACE_RC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __HFI1_TRACE_RC_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include "hfi.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hfi1_rc
+
+DECLARE_EVENT_CLASS(hfi1_rc_template,
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
+ TP_ARGS(qp, psn),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(u32, s_flags)
+ __field(u32, psn)
+ __field(u32, s_psn)
+ __field(u32, s_next_psn)
+ __field(u32, s_sending_psn)
+ __field(u32, s_sending_hpsn)
+ __field(u32, r_psn)
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->s_flags = qp->s_flags;
+ __entry->psn = psn;
+ __entry->s_psn = qp->s_psn;
+ __entry->s_next_psn = qp->s_next_psn;
+ __entry->s_sending_psn = qp->s_sending_psn;
+ __entry->s_sending_hpsn = qp->s_sending_hpsn;
+ __entry->r_psn = qp->r_psn;
+ ),
+ TP_printk(
+ "[%s] qpn 0x%x s_flags 0x%x psn 0x%x s_psn 0x%x s_next_psn 0x%x s_sending_psn 0x%x sending_hpsn 0x%x r_psn 0x%x",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->s_flags,
+ __entry->psn,
+ __entry->s_psn,
+ __entry->s_next_psn,
+ __entry->s_sending_psn,
+ __entry->s_sending_hpsn,
+ __entry->r_psn
+ )
+);
+
+DEFINE_EVENT(hfi1_rc_template, hfi1_sendcomplete,
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
+ TP_ARGS(qp, psn)
+);
+
+DEFINE_EVENT(hfi1_rc_template, hfi1_ack,
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
+ TP_ARGS(qp, psn)
+);
+
+DEFINE_EVENT(hfi1_rc_template, hfi1_timeout,
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
+ TP_ARGS(qp, psn)
+);
+
+DEFINE_EVENT(hfi1_rc_template, hfi1_rcv_error,
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
+ TP_ARGS(qp, psn)
+);
+
+#endif /* __HFI1_TRACE_RC_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_rc
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/hw/hfi1/trace_rx.h b/drivers/infiniband/hw/hfi1/trace_rx.h
new file mode 100644
index 000000000..9ba1f615e
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/trace_rx.h
@@ -0,0 +1,322 @@
+/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#if !defined(__HFI1_TRACE_RX_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __HFI1_TRACE_RX_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include "hfi.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hfi1_rx
+
+TRACE_EVENT(hfi1_rcvhdr,
+ TP_PROTO(struct hfi1_devdata *dd,
+ u32 ctxt,
+ u64 eflags,
+ u32 etype,
+ u32 hlen,
+ u32 tlen,
+ u32 updegr,
+ u32 etail
+ ),
+ TP_ARGS(dd, ctxt, eflags, etype, hlen, tlen, updegr, etail),
+ TP_STRUCT__entry(DD_DEV_ENTRY(dd)
+ __field(u64, eflags)
+ __field(u32, ctxt)
+ __field(u32, etype)
+ __field(u32, hlen)
+ __field(u32, tlen)
+ __field(u32, updegr)
+ __field(u32, etail)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(dd);
+ __entry->eflags = eflags;
+ __entry->ctxt = ctxt;
+ __entry->etype = etype;
+ __entry->hlen = hlen;
+ __entry->tlen = tlen;
+ __entry->updegr = updegr;
+ __entry->etail = etail;
+ ),
+ TP_printk(
+ "[%s] ctxt %d eflags 0x%llx etype %d,%s hlen %d tlen %d updegr %d etail %d",
+ __get_str(dev),
+ __entry->ctxt,
+ __entry->eflags,
+ __entry->etype, show_packettype(__entry->etype),
+ __entry->hlen,
+ __entry->tlen,
+ __entry->updegr,
+ __entry->etail
+ )
+);
+
+TRACE_EVENT(hfi1_receive_interrupt,
+ TP_PROTO(struct hfi1_devdata *dd, u32 ctxt),
+ TP_ARGS(dd, ctxt),
+ TP_STRUCT__entry(DD_DEV_ENTRY(dd)
+ __field(u32, ctxt)
+ __field(u8, slow_path)
+ __field(u8, dma_rtail)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(dd);
+ __entry->ctxt = ctxt;
+ if (dd->rcd[ctxt]->do_interrupt ==
+ &handle_receive_interrupt) {
+ __entry->slow_path = 1;
+ __entry->dma_rtail = 0xFF;
+ } else if (dd->rcd[ctxt]->do_interrupt ==
+ &handle_receive_interrupt_dma_rtail){
+ __entry->dma_rtail = 1;
+ __entry->slow_path = 0;
+ } else if (dd->rcd[ctxt]->do_interrupt ==
+ &handle_receive_interrupt_nodma_rtail) {
+ __entry->dma_rtail = 0;
+ __entry->slow_path = 0;
+ }
+ ),
+ TP_printk("[%s] ctxt %d SlowPath: %d DmaRtail: %d",
+ __get_str(dev),
+ __entry->ctxt,
+ __entry->slow_path,
+ __entry->dma_rtail
+ )
+);
+
+TRACE_EVENT(hfi1_exp_tid_reg,
+ TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr,
+ u32 npages, unsigned long va, unsigned long pa,
+ dma_addr_t dma),
+ TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma),
+ TP_STRUCT__entry(
+ __field(unsigned int, ctxt)
+ __field(u16, subctxt)
+ __field(u32, rarr)
+ __field(u32, npages)
+ __field(unsigned long, va)
+ __field(unsigned long, pa)
+ __field(dma_addr_t, dma)
+ ),
+ TP_fast_assign(
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __entry->rarr = rarr;
+ __entry->npages = npages;
+ __entry->va = va;
+ __entry->pa = pa;
+ __entry->dma = dma;
+ ),
+ TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx, va:0x%lx dma:0x%llx",
+ __entry->ctxt,
+ __entry->subctxt,
+ __entry->rarr,
+ __entry->npages,
+ __entry->pa,
+ __entry->va,
+ __entry->dma
+ )
+ );
+
+TRACE_EVENT(hfi1_exp_tid_unreg,
+ TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages,
+ unsigned long va, unsigned long pa, dma_addr_t dma),
+ TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma),
+ TP_STRUCT__entry(
+ __field(unsigned int, ctxt)
+ __field(u16, subctxt)
+ __field(u32, rarr)
+ __field(u32, npages)
+ __field(unsigned long, va)
+ __field(unsigned long, pa)
+ __field(dma_addr_t, dma)
+ ),
+ TP_fast_assign(
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __entry->rarr = rarr;
+ __entry->npages = npages;
+ __entry->va = va;
+ __entry->pa = pa;
+ __entry->dma = dma;
+ ),
+ TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx, va:0x%lx dma:0x%llx",
+ __entry->ctxt,
+ __entry->subctxt,
+ __entry->rarr,
+ __entry->npages,
+ __entry->pa,
+ __entry->va,
+ __entry->dma
+ )
+ );
+
+TRACE_EVENT(hfi1_exp_tid_inval,
+ TP_PROTO(unsigned int ctxt, u16 subctxt, unsigned long va, u32 rarr,
+ u32 npages, dma_addr_t dma),
+ TP_ARGS(ctxt, subctxt, va, rarr, npages, dma),
+ TP_STRUCT__entry(
+ __field(unsigned int, ctxt)
+ __field(u16, subctxt)
+ __field(unsigned long, va)
+ __field(u32, rarr)
+ __field(u32, npages)
+ __field(dma_addr_t, dma)
+ ),
+ TP_fast_assign(
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __entry->va = va;
+ __entry->rarr = rarr;
+ __entry->npages = npages;
+ __entry->dma = dma;
+ ),
+ TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx dma: 0x%llx",
+ __entry->ctxt,
+ __entry->subctxt,
+ __entry->rarr,
+ __entry->npages,
+ __entry->va,
+ __entry->dma
+ )
+ );
+
+TRACE_EVENT(hfi1_mmu_invalidate,
+ TP_PROTO(unsigned int ctxt, u16 subctxt, const char *type,
+ unsigned long start, unsigned long end),
+ TP_ARGS(ctxt, subctxt, type, start, end),
+ TP_STRUCT__entry(
+ __field(unsigned int, ctxt)
+ __field(u16, subctxt)
+ __string(type, type)
+ __field(unsigned long, start)
+ __field(unsigned long, end)
+ ),
+ TP_fast_assign(
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __assign_str(type, type);
+ __entry->start = start;
+ __entry->end = end;
+ ),
+ TP_printk("[%3u:%02u] MMU Invalidate (%s) 0x%lx - 0x%lx",
+ __entry->ctxt,
+ __entry->subctxt,
+ __get_str(type),
+ __entry->start,
+ __entry->end
+ )
+ );
+
+#define SNOOP_PRN \
+ "slid %.4x dlid %.4x qpn 0x%.6x opcode 0x%.2x,%s " \
+ "svc lvl %d pkey 0x%.4x [header = %d bytes] [data = %d bytes]"
+
+TRACE_EVENT(snoop_capture,
+ TP_PROTO(struct hfi1_devdata *dd,
+ int hdr_len,
+ struct hfi1_ib_header *hdr,
+ int data_len,
+ void *data),
+ TP_ARGS(dd, hdr_len, hdr, data_len, data),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd)
+ __field(u16, slid)
+ __field(u16, dlid)
+ __field(u32, qpn)
+ __field(u8, opcode)
+ __field(u8, sl)
+ __field(u16, pkey)
+ __field(u32, hdr_len)
+ __field(u32, data_len)
+ __field(u8, lnh)
+ __dynamic_array(u8, raw_hdr, hdr_len)
+ __dynamic_array(u8, raw_pkt, data_len)
+ ),
+ TP_fast_assign(
+ struct hfi1_other_headers *ohdr;
+
+ __entry->lnh = (u8)(be16_to_cpu(hdr->lrh[0]) & 3);
+ if (__entry->lnh == HFI1_LRH_BTH)
+ ohdr = &hdr->u.oth;
+ else
+ ohdr = &hdr->u.l.oth;
+ DD_DEV_ASSIGN(dd);
+ __entry->slid = be16_to_cpu(hdr->lrh[3]);
+ __entry->dlid = be16_to_cpu(hdr->lrh[1]);
+ __entry->qpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
+ __entry->opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
+ __entry->sl = (u8)(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xf;
+ __entry->pkey = be32_to_cpu(ohdr->bth[0]) & 0xffff;
+ __entry->hdr_len = hdr_len;
+ __entry->data_len = data_len;
+ memcpy(__get_dynamic_array(raw_hdr), hdr, hdr_len);
+ memcpy(__get_dynamic_array(raw_pkt), data, data_len);
+ ),
+ TP_printk(
+ "[%s] " SNOOP_PRN,
+ __get_str(dev),
+ __entry->slid,
+ __entry->dlid,
+ __entry->qpn,
+ __entry->opcode,
+ show_ib_opcode(__entry->opcode),
+ __entry->sl,
+ __entry->pkey,
+ __entry->hdr_len,
+ __entry->data_len
+ )
+);
+
+#endif /* __HFI1_TRACE_RX_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_rx
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/hw/hfi1/trace_tx.h b/drivers/infiniband/hw/hfi1/trace_tx.h
new file mode 100644
index 000000000..415d6be42
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/trace_tx.h
@@ -0,0 +1,642 @@
+/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#if !defined(__HFI1_TRACE_TX_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __HFI1_TRACE_TX_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include "hfi.h"
+#include "mad.h"
+#include "sdma.h"
+
+const char *parse_sdma_flags(struct trace_seq *p, u64 desc0, u64 desc1);
+
+#define __parse_sdma_flags(desc0, desc1) parse_sdma_flags(p, desc0, desc1)
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hfi1_tx
+
+TRACE_EVENT(hfi1_piofree,
+ TP_PROTO(struct send_context *sc, int extra),
+ TP_ARGS(sc, extra),
+ TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd)
+ __field(u32, sw_index)
+ __field(u32, hw_context)
+ __field(int, extra)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(sc->dd);
+ __entry->sw_index = sc->sw_index;
+ __entry->hw_context = sc->hw_context;
+ __entry->extra = extra;
+ ),
+ TP_printk("[%s] ctxt %u(%u) extra %d",
+ __get_str(dev),
+ __entry->sw_index,
+ __entry->hw_context,
+ __entry->extra
+ )
+);
+
+TRACE_EVENT(hfi1_wantpiointr,
+ TP_PROTO(struct send_context *sc, u32 needint, u64 credit_ctrl),
+ TP_ARGS(sc, needint, credit_ctrl),
+ TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd)
+ __field(u32, sw_index)
+ __field(u32, hw_context)
+ __field(u32, needint)
+ __field(u64, credit_ctrl)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(sc->dd);
+ __entry->sw_index = sc->sw_index;
+ __entry->hw_context = sc->hw_context;
+ __entry->needint = needint;
+ __entry->credit_ctrl = credit_ctrl;
+ ),
+ TP_printk("[%s] ctxt %u(%u) on %d credit_ctrl 0x%llx",
+ __get_str(dev),
+ __entry->sw_index,
+ __entry->hw_context,
+ __entry->needint,
+ (unsigned long long)__entry->credit_ctrl
+ )
+);
+
+DECLARE_EVENT_CLASS(hfi1_qpsleepwakeup_template,
+ TP_PROTO(struct rvt_qp *qp, u32 flags),
+ TP_ARGS(qp, flags),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(u32, flags)
+ __field(u32, s_flags)
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
+ __entry->flags = flags;
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->s_flags = qp->s_flags;
+ ),
+ TP_printk(
+ "[%s] qpn 0x%x flags 0x%x s_flags 0x%x",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->flags,
+ __entry->s_flags
+ )
+);
+
+DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpwakeup,
+ TP_PROTO(struct rvt_qp *qp, u32 flags),
+ TP_ARGS(qp, flags));
+
+DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpsleep,
+ TP_PROTO(struct rvt_qp *qp, u32 flags),
+ TP_ARGS(qp, flags));
+
+TRACE_EVENT(hfi1_sdma_descriptor,
+ TP_PROTO(struct sdma_engine *sde,
+ u64 desc0,
+ u64 desc1,
+ u16 e,
+ void *descp),
+ TP_ARGS(sde, desc0, desc1, e, descp),
+ TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
+ __field(void *, descp)
+ __field(u64, desc0)
+ __field(u64, desc1)
+ __field(u16, e)
+ __field(u8, idx)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
+ __entry->desc0 = desc0;
+ __entry->desc1 = desc1;
+ __entry->idx = sde->this_idx;
+ __entry->descp = descp;
+ __entry->e = e;
+ ),
+ TP_printk(
+ "[%s] SDE(%u) flags:%s addr:0x%016llx gen:%u len:%u d0:%016llx d1:%016llx to %p,%u",
+ __get_str(dev),
+ __entry->idx,
+ __parse_sdma_flags(__entry->desc0, __entry->desc1),
+ (__entry->desc0 >> SDMA_DESC0_PHY_ADDR_SHIFT) &
+ SDMA_DESC0_PHY_ADDR_MASK,
+ (u8)((__entry->desc1 >> SDMA_DESC1_GENERATION_SHIFT) &
+ SDMA_DESC1_GENERATION_MASK),
+ (u16)((__entry->desc0 >> SDMA_DESC0_BYTE_COUNT_SHIFT) &
+ SDMA_DESC0_BYTE_COUNT_MASK),
+ __entry->desc0,
+ __entry->desc1,
+ __entry->descp,
+ __entry->e
+ )
+);
+
+TRACE_EVENT(hfi1_sdma_engine_select,
+ TP_PROTO(struct hfi1_devdata *dd, u32 sel, u8 vl, u8 idx),
+ TP_ARGS(dd, sel, vl, idx),
+ TP_STRUCT__entry(DD_DEV_ENTRY(dd)
+ __field(u32, sel)
+ __field(u8, vl)
+ __field(u8, idx)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(dd);
+ __entry->sel = sel;
+ __entry->vl = vl;
+ __entry->idx = idx;
+ ),
+ TP_printk("[%s] selecting SDE %u sel 0x%x vl %u",
+ __get_str(dev),
+ __entry->idx,
+ __entry->sel,
+ __entry->vl
+ )
+);
+
+DECLARE_EVENT_CLASS(hfi1_sdma_engine_class,
+ TP_PROTO(struct sdma_engine *sde, u64 status),
+ TP_ARGS(sde, status),
+ TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
+ __field(u64, status)
+ __field(u8, idx)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
+ __entry->status = status;
+ __entry->idx = sde->this_idx;
+ ),
+ TP_printk("[%s] SDE(%u) status %llx",
+ __get_str(dev),
+ __entry->idx,
+ (unsigned long long)__entry->status
+ )
+);
+
+DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_interrupt,
+ TP_PROTO(struct sdma_engine *sde, u64 status),
+ TP_ARGS(sde, status)
+);
+
+DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_progress,
+ TP_PROTO(struct sdma_engine *sde, u64 status),
+ TP_ARGS(sde, status)
+);
+
+DECLARE_EVENT_CLASS(hfi1_sdma_ahg_ad,
+ TP_PROTO(struct sdma_engine *sde, int aidx),
+ TP_ARGS(sde, aidx),
+ TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
+ __field(int, aidx)
+ __field(u8, idx)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
+ __entry->idx = sde->this_idx;
+ __entry->aidx = aidx;
+ ),
+ TP_printk("[%s] SDE(%u) aidx %d",
+ __get_str(dev),
+ __entry->idx,
+ __entry->aidx
+ )
+);
+
+DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_allocate,
+ TP_PROTO(struct sdma_engine *sde, int aidx),
+ TP_ARGS(sde, aidx));
+
+DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_deallocate,
+ TP_PROTO(struct sdma_engine *sde, int aidx),
+ TP_ARGS(sde, aidx));
+
+#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
+TRACE_EVENT(hfi1_sdma_progress,
+ TP_PROTO(struct sdma_engine *sde,
+ u16 hwhead,
+ u16 swhead,
+ struct sdma_txreq *txp
+ ),
+ TP_ARGS(sde, hwhead, swhead, txp),
+ TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
+ __field(u64, sn)
+ __field(u16, hwhead)
+ __field(u16, swhead)
+ __field(u16, txnext)
+ __field(u16, tx_tail)
+ __field(u16, tx_head)
+ __field(u8, idx)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
+ __entry->hwhead = hwhead;
+ __entry->swhead = swhead;
+ __entry->tx_tail = sde->tx_tail;
+ __entry->tx_head = sde->tx_head;
+ __entry->txnext = txp ? txp->next_descq_idx : ~0;
+ __entry->idx = sde->this_idx;
+ __entry->sn = txp ? txp->sn : ~0;
+ ),
+ TP_printk(
+ "[%s] SDE(%u) sn %llu hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
+ __get_str(dev),
+ __entry->idx,
+ __entry->sn,
+ __entry->hwhead,
+ __entry->swhead,
+ __entry->txnext,
+ __entry->tx_head,
+ __entry->tx_tail
+ )
+);
+#else
+TRACE_EVENT(hfi1_sdma_progress,
+ TP_PROTO(struct sdma_engine *sde,
+ u16 hwhead, u16 swhead,
+ struct sdma_txreq *txp
+ ),
+ TP_ARGS(sde, hwhead, swhead, txp),
+ TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
+ __field(u16, hwhead)
+ __field(u16, swhead)
+ __field(u16, txnext)
+ __field(u16, tx_tail)
+ __field(u16, tx_head)
+ __field(u8, idx)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
+ __entry->hwhead = hwhead;
+ __entry->swhead = swhead;
+ __entry->tx_tail = sde->tx_tail;
+ __entry->tx_head = sde->tx_head;
+ __entry->txnext = txp ? txp->next_descq_idx : ~0;
+ __entry->idx = sde->this_idx;
+ ),
+ TP_printk(
+ "[%s] SDE(%u) hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
+ __get_str(dev),
+ __entry->idx,
+ __entry->hwhead,
+ __entry->swhead,
+ __entry->txnext,
+ __entry->tx_head,
+ __entry->tx_tail
+ )
+);
+#endif
+
+DECLARE_EVENT_CLASS(hfi1_sdma_sn,
+ TP_PROTO(struct sdma_engine *sde, u64 sn),
+ TP_ARGS(sde, sn),
+ TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
+ __field(u64, sn)
+ __field(u8, idx)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
+ __entry->sn = sn;
+ __entry->idx = sde->this_idx;
+ ),
+ TP_printk("[%s] SDE(%u) sn %llu",
+ __get_str(dev),
+ __entry->idx,
+ __entry->sn
+ )
+);
+
+DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_out_sn,
+ TP_PROTO(
+ struct sdma_engine *sde,
+ u64 sn
+ ),
+ TP_ARGS(sde, sn)
+);
+
+DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_in_sn,
+ TP_PROTO(struct sdma_engine *sde, u64 sn),
+ TP_ARGS(sde, sn)
+);
+
+#define USDMA_HDR_FORMAT \
+ "[%s:%u:%u:%u] PBC=(0x%x 0x%x) LRH=(0x%x 0x%x) BTH=(0x%x 0x%x 0x%x) KDETH=(0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x) TIDVal=0x%x"
+
+TRACE_EVENT(hfi1_sdma_user_header,
+ TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
+ struct hfi1_pkt_header *hdr, u32 tidval),
+ TP_ARGS(dd, ctxt, subctxt, req, hdr, tidval),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd)
+ __field(u16, ctxt)
+ __field(u8, subctxt)
+ __field(u16, req)
+ __field(u32, pbc0)
+ __field(u32, pbc1)
+ __field(u32, lrh0)
+ __field(u32, lrh1)
+ __field(u32, bth0)
+ __field(u32, bth1)
+ __field(u32, bth2)
+ __field(u32, kdeth0)
+ __field(u32, kdeth1)
+ __field(u32, kdeth2)
+ __field(u32, kdeth3)
+ __field(u32, kdeth4)
+ __field(u32, kdeth5)
+ __field(u32, kdeth6)
+ __field(u32, kdeth7)
+ __field(u32, kdeth8)
+ __field(u32, tidval)
+ ),
+ TP_fast_assign(
+ __le32 *pbc = (__le32 *)hdr->pbc;
+ __be32 *lrh = (__be32 *)hdr->lrh;
+ __be32 *bth = (__be32 *)hdr->bth;
+ __le32 *kdeth = (__le32 *)&hdr->kdeth;
+
+ DD_DEV_ASSIGN(dd);
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __entry->req = req;
+ __entry->pbc0 = le32_to_cpu(pbc[0]);
+ __entry->pbc1 = le32_to_cpu(pbc[1]);
+ __entry->lrh0 = be32_to_cpu(lrh[0]);
+ __entry->lrh1 = be32_to_cpu(lrh[1]);
+ __entry->bth0 = be32_to_cpu(bth[0]);
+ __entry->bth1 = be32_to_cpu(bth[1]);
+ __entry->bth2 = be32_to_cpu(bth[2]);
+ __entry->kdeth0 = le32_to_cpu(kdeth[0]);
+ __entry->kdeth1 = le32_to_cpu(kdeth[1]);
+ __entry->kdeth2 = le32_to_cpu(kdeth[2]);
+ __entry->kdeth3 = le32_to_cpu(kdeth[3]);
+ __entry->kdeth4 = le32_to_cpu(kdeth[4]);
+ __entry->kdeth5 = le32_to_cpu(kdeth[5]);
+ __entry->kdeth6 = le32_to_cpu(kdeth[6]);
+ __entry->kdeth7 = le32_to_cpu(kdeth[7]);
+ __entry->kdeth8 = le32_to_cpu(kdeth[8]);
+ __entry->tidval = tidval;
+ ),
+ TP_printk(USDMA_HDR_FORMAT,
+ __get_str(dev),
+ __entry->ctxt,
+ __entry->subctxt,
+ __entry->req,
+ __entry->pbc1,
+ __entry->pbc0,
+ __entry->lrh0,
+ __entry->lrh1,
+ __entry->bth0,
+ __entry->bth1,
+ __entry->bth2,
+ __entry->kdeth0,
+ __entry->kdeth1,
+ __entry->kdeth2,
+ __entry->kdeth3,
+ __entry->kdeth4,
+ __entry->kdeth5,
+ __entry->kdeth6,
+ __entry->kdeth7,
+ __entry->kdeth8,
+ __entry->tidval
+ )
+);
+
+#define SDMA_UREQ_FMT \
+ "[%s:%u:%u] ver/op=0x%x, iovcnt=%u, npkts=%u, frag=%u, idx=%u"
+TRACE_EVENT(hfi1_sdma_user_reqinfo,
+ TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 *i),
+ TP_ARGS(dd, ctxt, subctxt, i),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd);
+ __field(u16, ctxt)
+ __field(u8, subctxt)
+ __field(u8, ver_opcode)
+ __field(u8, iovcnt)
+ __field(u16, npkts)
+ __field(u16, fragsize)
+ __field(u16, comp_idx)
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(dd);
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __entry->ver_opcode = i[0] & 0xff;
+ __entry->iovcnt = (i[0] >> 8) & 0xff;
+ __entry->npkts = i[1];
+ __entry->fragsize = i[2];
+ __entry->comp_idx = i[3];
+ ),
+ TP_printk(SDMA_UREQ_FMT,
+ __get_str(dev),
+ __entry->ctxt,
+ __entry->subctxt,
+ __entry->ver_opcode,
+ __entry->iovcnt,
+ __entry->npkts,
+ __entry->fragsize,
+ __entry->comp_idx
+ )
+);
+
+#define usdma_complete_name(st) { st, #st }
+#define show_usdma_complete_state(st) \
+ __print_symbolic(st, \
+ usdma_complete_name(FREE), \
+ usdma_complete_name(QUEUED), \
+ usdma_complete_name(COMPLETE), \
+ usdma_complete_name(ERROR))
+
+TRACE_EVENT(hfi1_sdma_user_completion,
+ TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 idx,
+ u8 state, int code),
+ TP_ARGS(dd, ctxt, subctxt, idx, state, code),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd)
+ __field(u16, ctxt)
+ __field(u8, subctxt)
+ __field(u16, idx)
+ __field(u8, state)
+ __field(int, code)
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(dd);
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __entry->idx = idx;
+ __entry->state = state;
+ __entry->code = code;
+ ),
+ TP_printk("[%s:%u:%u:%u] SDMA completion state %s (%d)",
+ __get_str(dev), __entry->ctxt, __entry->subctxt,
+ __entry->idx, show_usdma_complete_state(__entry->state),
+ __entry->code)
+);
+
+const char *print_u32_array(struct trace_seq *, u32 *, int);
+#define __print_u32_hex(arr, len) print_u32_array(p, arr, len)
+
+TRACE_EVENT(hfi1_sdma_user_header_ahg,
+ TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
+ u8 sde, u8 ahgidx, u32 *ahg, int len, u32 tidval),
+ TP_ARGS(dd, ctxt, subctxt, req, sde, ahgidx, ahg, len, tidval),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd)
+ __field(u16, ctxt)
+ __field(u8, subctxt)
+ __field(u16, req)
+ __field(u8, sde)
+ __field(u8, idx)
+ __field(int, len)
+ __field(u32, tidval)
+ __array(u32, ahg, 10)
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(dd);
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __entry->req = req;
+ __entry->sde = sde;
+ __entry->idx = ahgidx;
+ __entry->len = len;
+ __entry->tidval = tidval;
+ memcpy(__entry->ahg, ahg, len * sizeof(u32));
+ ),
+ TP_printk("[%s:%u:%u:%u] (SDE%u/AHG%u) ahg[0-%d]=(%s) TIDVal=0x%x",
+ __get_str(dev),
+ __entry->ctxt,
+ __entry->subctxt,
+ __entry->req,
+ __entry->sde,
+ __entry->idx,
+ __entry->len - 1,
+ __print_u32_hex(__entry->ahg, __entry->len),
+ __entry->tidval
+ )
+);
+
+TRACE_EVENT(hfi1_sdma_state,
+ TP_PROTO(struct sdma_engine *sde,
+ const char *cstate,
+ const char *nstate
+ ),
+ TP_ARGS(sde, cstate, nstate),
+ TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
+ __string(curstate, cstate)
+ __string(newstate, nstate)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
+ __assign_str(curstate, cstate);
+ __assign_str(newstate, nstate);
+ ),
+ TP_printk("[%s] current state %s new state %s",
+ __get_str(dev),
+ __get_str(curstate),
+ __get_str(newstate)
+ )
+);
+
+#define BCT_FORMAT \
+ "shared_limit %x vls 0-7 [%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x] 15 [%x,%x]"
+
+#define BCT(field) \
+ be16_to_cpu( \
+ ((struct buffer_control *)__get_dynamic_array(bct))->field \
+ )
+
+DECLARE_EVENT_CLASS(hfi1_bct_template,
+ TP_PROTO(struct hfi1_devdata *dd,
+ struct buffer_control *bc),
+ TP_ARGS(dd, bc),
+ TP_STRUCT__entry(DD_DEV_ENTRY(dd)
+ __dynamic_array(u8, bct, sizeof(*bc))
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(dd);
+ memcpy(__get_dynamic_array(bct), bc,
+ sizeof(*bc));
+ ),
+ TP_printk(BCT_FORMAT,
+ BCT(overall_shared_limit),
+
+ BCT(vl[0].dedicated),
+ BCT(vl[0].shared),
+
+ BCT(vl[1].dedicated),
+ BCT(vl[1].shared),
+
+ BCT(vl[2].dedicated),
+ BCT(vl[2].shared),
+
+ BCT(vl[3].dedicated),
+ BCT(vl[3].shared),
+
+ BCT(vl[4].dedicated),
+ BCT(vl[4].shared),
+
+ BCT(vl[5].dedicated),
+ BCT(vl[5].shared),
+
+ BCT(vl[6].dedicated),
+ BCT(vl[6].shared),
+
+ BCT(vl[7].dedicated),
+ BCT(vl[7].shared),
+
+ BCT(vl[15].dedicated),
+ BCT(vl[15].shared)
+ )
+);
+
+DEFINE_EVENT(hfi1_bct_template, bct_set,
+ TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
+ TP_ARGS(dd, bc));
+
+DEFINE_EVENT(hfi1_bct_template, bct_get,
+ TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
+ TP_ARGS(dd, bc));
+
+#endif /* __HFI1_TRACE_TX_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_tx
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c
index df773d433..a726d96d1 100644
--- a/drivers/infiniband/hw/hfi1/uc.c
+++ b/drivers/infiniband/hw/hfi1/uc.c
@@ -119,6 +119,31 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
goto bail;
}
/*
+ * Local operations are processed immediately
+ * after all prior requests have completed.
+ */
+ if (wqe->wr.opcode == IB_WR_REG_MR ||
+ wqe->wr.opcode == IB_WR_LOCAL_INV) {
+ int local_ops = 0;
+ int err = 0;
+
+ if (qp->s_last != qp->s_cur)
+ goto bail;
+ if (++qp->s_cur == qp->s_size)
+ qp->s_cur = 0;
+ if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) {
+ err = rvt_invalidate_rkey(
+ qp, wqe->wr.ex.invalidate_rkey);
+ local_ops = 1;
+ }
+ hfi1_send_complete(qp, wqe, err ? IB_WC_LOC_PROT_ERR
+ : IB_WC_SUCCESS);
+ if (local_ops)
+ atomic_dec(&qp->local_ops_pending);
+ qp->s_hdrwords = 0;
+ goto done_free_tx;
+ }
+ /*
* Start a new request.
*/
qp->s_psn = wqe->psn;
@@ -294,46 +319,12 @@ void hfi1_uc_rcv(struct hfi1_packet *packet)
struct ib_reth *reth;
int has_grh = rcv_flags & HFI1_HAS_GRH;
int ret;
- u32 bth1;
bth0 = be32_to_cpu(ohdr->bth[0]);
if (hfi1_ruc_check_hdr(ibp, hdr, has_grh, qp, bth0))
return;
- bth1 = be32_to_cpu(ohdr->bth[1]);
- if (unlikely(bth1 & (HFI1_BECN_SMASK | HFI1_FECN_SMASK))) {
- if (bth1 & HFI1_BECN_SMASK) {
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- u32 rqpn, lqpn;
- u16 rlid = be16_to_cpu(hdr->lrh[3]);
- u8 sl, sc5;
-
- lqpn = bth1 & RVT_QPN_MASK;
- rqpn = qp->remote_qpn;
-
- sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl];
- sl = ibp->sc_to_sl[sc5];
-
- process_becn(ppd, sl, rlid, lqpn, rqpn,
- IB_CC_SVCTYPE_UC);
- }
-
- if (bth1 & HFI1_FECN_SMASK) {
- struct ib_grh *grh = NULL;
- u16 pkey = (u16)be32_to_cpu(ohdr->bth[0]);
- u16 slid = be16_to_cpu(hdr->lrh[3]);
- u16 dlid = be16_to_cpu(hdr->lrh[1]);
- u32 src_qp = qp->remote_qpn;
- u8 sc5;
-
- sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl];
- if (has_grh)
- grh = &hdr->u.l.grh;
-
- return_cnp(ibp, qp, src_qp, pkey, dlid, slid, sc5,
- grh);
- }
- }
+ process_ecn(qp, packet, true);
psn = be32_to_cpu(ohdr->bth[2]);
opcode = (bth0 >> 24) & 0xff;
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index be91f6fa1..f01e8e1d6 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -184,8 +184,12 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
}
if (ah_attr->ah_flags & IB_AH_GRH) {
- hfi1_copy_sge(&qp->r_sge, &ah_attr->grh,
- sizeof(struct ib_grh), 1, 0);
+ struct ib_grh grh;
+ struct ib_global_route grd = ah_attr->grh;
+
+ hfi1_make_grh(ibp, &grh, &grd, 0, 0);
+ hfi1_copy_sge(&qp->r_sge, &grh,
+ sizeof(grh), 1, 0);
wc.wc_flags |= IB_WC_GRH;
} else {
hfi1_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
@@ -430,10 +434,9 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
qp->qkey : wqe->ud_wr.remote_qkey);
ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
/* disarm any ahg */
- priv->s_hdr->ahgcount = 0;
- priv->s_hdr->ahgidx = 0;
- priv->s_hdr->tx_flags = 0;
- priv->s_hdr->sde = NULL;
+ priv->s_ahg->ahgcount = 0;
+ priv->s_ahg->ahgidx = 0;
+ priv->s_ahg->tx_flags = 0;
/* pbc */
ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2;
@@ -665,13 +668,13 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
struct hfi1_other_headers *ohdr = packet->ohdr;
int opcode;
u32 hdrsize = packet->hlen;
- u32 pad;
struct ib_wc wc;
u32 qkey;
u32 src_qp;
u16 dlid, pkey;
int mgmt_pkey_idx = -1;
struct hfi1_ibport *ibp = &packet->rcd->ppd->ibport_data;
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
struct hfi1_ib_header *hdr = packet->hdr;
u32 rcv_flags = packet->rcv_flags;
void *data = packet->ebuf;
@@ -680,52 +683,33 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
bool has_grh = rcv_flags & HFI1_HAS_GRH;
u8 sc5 = hdr2sc((struct hfi1_message_header *)hdr, packet->rhf);
u32 bth1;
- int is_mcast;
- struct ib_grh *grh = NULL;
+ u8 sl_from_sc, sl;
+ u16 slid;
+ u8 extra_bytes;
qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & RVT_QPN_MASK;
dlid = be16_to_cpu(hdr->lrh[1]);
- is_mcast = (dlid > be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
- (dlid != be16_to_cpu(IB_LID_PERMISSIVE));
bth1 = be32_to_cpu(ohdr->bth[1]);
- if (unlikely(bth1 & HFI1_BECN_SMASK)) {
- /*
- * In pre-B0 h/w the CNP_OPCODE is handled via an
- * error path.
- */
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- u32 lqpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
- u8 sl;
-
- sl = ibp->sc_to_sl[sc5];
-
- process_becn(ppd, sl, 0, lqpn, 0, IB_CC_SVCTYPE_UD);
- }
+ slid = be16_to_cpu(hdr->lrh[3]);
+ pkey = (u16)be32_to_cpu(ohdr->bth[0]);
+ sl = (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xf;
+ extra_bytes = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+ extra_bytes += (SIZE_OF_CRC << 2);
+ sl_from_sc = ibp->sc_to_sl[sc5];
- /*
- * The opcode is in the low byte when its in network order
- * (top byte when in host order).
- */
opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
opcode &= 0xff;
- pkey = (u16)be32_to_cpu(ohdr->bth[0]);
-
- if (!is_mcast && (opcode != IB_OPCODE_CNP) && bth1 & HFI1_FECN_SMASK) {
- u16 slid = be16_to_cpu(hdr->lrh[3]);
-
- return_cnp(ibp, qp, src_qp, pkey, dlid, slid, sc5, grh);
- }
+ process_ecn(qp, packet, (opcode != IB_OPCODE_CNP));
/*
* Get the number of bytes the message was padded by
* and drop incomplete packets.
*/
- pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
- if (unlikely(tlen < (hdrsize + pad + 4)))
+ if (unlikely(tlen < (hdrsize + extra_bytes)))
goto drop;
- tlen -= hdrsize + pad + 4;
+ tlen -= hdrsize + extra_bytes;
/*
* Check that the permissive LID is only used on QP0
@@ -736,10 +720,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
hdr->lrh[3] == IB_LID_PERMISSIVE))
goto drop;
if (qp->ibqp.qp_num > 1) {
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- u16 slid;
-
- slid = be16_to_cpu(hdr->lrh[3]);
if (unlikely(rcv_pkey_check(ppd, pkey, sc5, slid))) {
/*
* Traps will not be sent for packets dropped
@@ -748,12 +728,9 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
* IB spec (release 1.3, section 10.9.4)
*/
hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_P_KEY,
- pkey,
- (be16_to_cpu(hdr->lrh[0]) >> 4) &
- 0xF,
+ pkey, sl,
src_qp, qp->ibqp.qp_num,
- be16_to_cpu(hdr->lrh[3]),
- be16_to_cpu(hdr->lrh[1]));
+ slid, dlid);
return;
}
} else {
@@ -763,22 +740,18 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
goto drop;
}
if (unlikely(qkey != qp->qkey)) {
- hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_Q_KEY, qkey,
- (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
+ hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_Q_KEY, qkey, sl,
src_qp, qp->ibqp.qp_num,
- be16_to_cpu(hdr->lrh[3]),
- be16_to_cpu(hdr->lrh[1]));
+ slid, dlid);
return;
}
/* Drop invalid MAD packets (see 13.5.3.1). */
if (unlikely(qp->ibqp.qp_num == 1 &&
- (tlen > 2048 ||
- (be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))
+ (tlen > 2048 || (sc5 == 0xF))))
goto drop;
} else {
/* Received on QP0, and so by definition, this is an SMP */
struct opa_smp *smp = (struct opa_smp *)data;
- u16 slid = be16_to_cpu(hdr->lrh[3]);
if (opa_smp_check(ibp, pkey, sc5, qp, slid, smp))
goto drop;
@@ -861,7 +834,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
qp->ibqp.qp_type == IB_QPT_SMI) {
if (mgmt_pkey_idx < 0) {
if (net_ratelimit()) {
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
struct hfi1_devdata *dd = ppd->dd;
dd_dev_err(dd, "QP type %d mgmt_pkey_idx < 0 and packet not dropped???\n",
@@ -874,8 +846,8 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
wc.pkey_index = 0;
}
- wc.slid = be16_to_cpu(hdr->lrh[3]);
- wc.sl = ibp->sc_to_sl[sc5];
+ wc.slid = slid;
+ wc.sl = sl_from_sc;
/*
* Save the LMC lower bits if the destination LID is a unicast LID.
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
index 1b640a35b..64d265254 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
@@ -82,24 +82,25 @@ struct tid_pageset {
((unsigned long)vaddr & PAGE_MASK)) >> PAGE_SHIFT))
static void unlock_exp_tids(struct hfi1_ctxtdata *, struct exp_tid_set *,
- struct rb_root *);
+ struct hfi1_filedata *);
static u32 find_phys_blocks(struct page **, unsigned, struct tid_pageset *);
static int set_rcvarray_entry(struct file *, unsigned long, u32,
struct tid_group *, struct page **, unsigned);
-static int mmu_rb_insert(struct rb_root *, struct mmu_rb_node *);
-static void mmu_rb_remove(struct rb_root *, struct mmu_rb_node *,
- struct mm_struct *);
-static int mmu_rb_invalidate(struct rb_root *, struct mmu_rb_node *);
+static int tid_rb_insert(void *, struct mmu_rb_node *);
+static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
+ struct tid_rb_node *tnode);
+static void tid_rb_remove(void *, struct mmu_rb_node *);
+static int tid_rb_invalidate(void *, struct mmu_rb_node *);
static int program_rcvarray(struct file *, unsigned long, struct tid_group *,
struct tid_pageset *, unsigned, u16, struct page **,
u32 *, unsigned *, unsigned *);
static int unprogram_rcvarray(struct file *, u32, struct tid_group **);
-static void clear_tid_node(struct hfi1_filedata *, u16, struct tid_rb_node *);
+static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node);
static struct mmu_rb_ops tid_rb_ops = {
- .insert = mmu_rb_insert,
- .remove = mmu_rb_remove,
- .invalidate = mmu_rb_invalidate
+ .insert = tid_rb_insert,
+ .remove = tid_rb_remove,
+ .invalidate = tid_rb_invalidate
};
static inline u32 rcventry2tidinfo(u32 rcventry)
@@ -162,7 +163,6 @@ int hfi1_user_exp_rcv_init(struct file *fp)
spin_lock_init(&fd->tid_lock);
spin_lock_init(&fd->invalid_lock);
- fd->tid_rb_root = RB_ROOT;
if (!uctxt->subctxt_cnt || !fd->subctxt) {
exp_tid_group_init(&uctxt->tid_group_list);
@@ -197,7 +197,7 @@ int hfi1_user_exp_rcv_init(struct file *fp)
if (!fd->entry_to_rb)
return -ENOMEM;
- if (!HFI1_CAP_IS_USET(TID_UNMAP)) {
+ if (!HFI1_CAP_UGET_MASK(uctxt->flags, TID_UNMAP)) {
fd->invalid_tid_idx = 0;
fd->invalid_tids = kzalloc(uctxt->expected_count *
sizeof(u32), GFP_KERNEL);
@@ -208,15 +208,15 @@ int hfi1_user_exp_rcv_init(struct file *fp)
/*
* Register MMU notifier callbacks. If the registration
- * fails, continue but turn off the TID caching for
- * all user contexts.
+ * fails, continue without TID caching for this context.
*/
- ret = hfi1_mmu_rb_register(&fd->tid_rb_root, &tid_rb_ops);
+ ret = hfi1_mmu_rb_register(fd, fd->mm, &tid_rb_ops,
+ dd->pport->hfi1_wq,
+ &fd->handler);
if (ret) {
dd_dev_info(dd,
"Failed MMU notifier registration %d\n",
ret);
- HFI1_CAP_USET(TID_UNMAP);
ret = 0;
}
}
@@ -235,7 +235,7 @@ int hfi1_user_exp_rcv_init(struct file *fp)
* init.
*/
spin_lock(&fd->tid_lock);
- if (uctxt->subctxt_cnt && !HFI1_CAP_IS_USET(TID_UNMAP)) {
+ if (uctxt->subctxt_cnt && fd->handler) {
u16 remainder;
fd->tid_limit = uctxt->expected_count / uctxt->subctxt_cnt;
@@ -261,18 +261,16 @@ int hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
* The notifier would have been removed when the process'es mm
* was freed.
*/
- if (!HFI1_CAP_IS_USET(TID_UNMAP))
- hfi1_mmu_rb_unregister(&fd->tid_rb_root);
+ if (fd->handler)
+ hfi1_mmu_rb_unregister(fd->handler);
kfree(fd->invalid_tids);
if (!uctxt->cnt) {
if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list))
- unlock_exp_tids(uctxt, &uctxt->tid_full_list,
- &fd->tid_rb_root);
+ unlock_exp_tids(uctxt, &uctxt->tid_full_list, fd);
if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list))
- unlock_exp_tids(uctxt, &uctxt->tid_used_list,
- &fd->tid_rb_root);
+ unlock_exp_tids(uctxt, &uctxt->tid_used_list, fd);
list_for_each_entry_safe(grp, gptr, &uctxt->tid_group_list.list,
list) {
list_del_init(&grp->list);
@@ -399,12 +397,12 @@ int hfi1_user_exp_rcv_setup(struct file *fp, struct hfi1_tid_info *tinfo)
* pages, accept the amount pinned so far and program only that.
* User space knows how to deal with partially programmed buffers.
*/
- if (!hfi1_can_pin_pages(dd, fd->tid_n_pinned, npages)) {
+ if (!hfi1_can_pin_pages(dd, fd->mm, fd->tid_n_pinned, npages)) {
ret = -ENOMEM;
goto bail;
}
- pinned = hfi1_acquire_user_pages(vaddr, npages, true, pages);
+ pinned = hfi1_acquire_user_pages(fd->mm, vaddr, npages, true, pages);
if (pinned <= 0) {
ret = pinned;
goto bail;
@@ -559,7 +557,7 @@ nomem:
* for example), unpin all unmapped pages so we can pin them nex time.
*/
if (mapped_pages != pinned) {
- hfi1_release_user_pages(current->mm, &pages[mapped_pages],
+ hfi1_release_user_pages(fd->mm, &pages[mapped_pages],
pinned - mapped_pages,
false);
fd->tid_n_pinned -= pinned - mapped_pages;
@@ -829,7 +827,6 @@ static int set_rcvarray_entry(struct file *fp, unsigned long vaddr,
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct tid_rb_node *node;
struct hfi1_devdata *dd = uctxt->dd;
- struct rb_root *root = &fd->tid_rb_root;
dma_addr_t phys;
/*
@@ -861,10 +858,10 @@ static int set_rcvarray_entry(struct file *fp, unsigned long vaddr,
node->freed = false;
memcpy(node->pages, pages, sizeof(struct page *) * npages);
- if (HFI1_CAP_IS_USET(TID_UNMAP))
- ret = mmu_rb_insert(root, &node->mmu);
+ if (!fd->handler)
+ ret = tid_rb_insert(fd, &node->mmu);
else
- ret = hfi1_mmu_rb_insert(root, &node->mmu);
+ ret = hfi1_mmu_rb_insert(fd->handler, &node->mmu);
if (ret) {
hfi1_cdbg(TID, "Failed to insert RB node %u 0x%lx, 0x%lx %d",
@@ -904,19 +901,19 @@ static int unprogram_rcvarray(struct file *fp, u32 tidinfo,
node = fd->entry_to_rb[rcventry];
if (!node || node->rcventry != (uctxt->expected_base + rcventry))
return -EBADF;
- if (HFI1_CAP_IS_USET(TID_UNMAP))
- mmu_rb_remove(&fd->tid_rb_root, &node->mmu, NULL);
- else
- hfi1_mmu_rb_remove(&fd->tid_rb_root, &node->mmu);
if (grp)
*grp = node->grp;
- clear_tid_node(fd, fd->subctxt, node);
+
+ if (!fd->handler)
+ cacheless_tid_rb_remove(fd, node);
+ else
+ hfi1_mmu_rb_remove(fd->handler, &node->mmu);
+
return 0;
}
-static void clear_tid_node(struct hfi1_filedata *fd, u16 subctxt,
- struct tid_rb_node *node)
+static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node)
{
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_devdata *dd = uctxt->dd;
@@ -934,7 +931,7 @@ static void clear_tid_node(struct hfi1_filedata *fd, u16 subctxt,
pci_unmap_single(dd->pcidev, node->dma_addr, node->mmu.len,
PCI_DMA_FROMDEVICE);
- hfi1_release_user_pages(current->mm, node->pages, node->npages, true);
+ hfi1_release_user_pages(fd->mm, node->pages, node->npages, true);
fd->tid_n_pinned -= node->npages;
node->grp->used--;
@@ -949,12 +946,15 @@ static void clear_tid_node(struct hfi1_filedata *fd, u16 subctxt,
kfree(node);
}
+/*
+ * As a simple helper for hfi1_user_exp_rcv_free, this function deals with
+ * clearing nodes in the non-cached case.
+ */
static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt,
- struct exp_tid_set *set, struct rb_root *root)
+ struct exp_tid_set *set,
+ struct hfi1_filedata *fd)
{
struct tid_group *grp, *ptr;
- struct hfi1_filedata *fd = container_of(root, struct hfi1_filedata,
- tid_rb_root);
int i;
list_for_each_entry_safe(grp, ptr, &set->list, list) {
@@ -969,22 +969,23 @@ static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt,
uctxt->expected_base];
if (!node || node->rcventry != rcventry)
continue;
- if (HFI1_CAP_IS_USET(TID_UNMAP))
- mmu_rb_remove(&fd->tid_rb_root,
- &node->mmu, NULL);
- else
- hfi1_mmu_rb_remove(&fd->tid_rb_root,
- &node->mmu);
- clear_tid_node(fd, -1, node);
+
+ cacheless_tid_rb_remove(fd, node);
}
}
}
}
-static int mmu_rb_invalidate(struct rb_root *root, struct mmu_rb_node *mnode)
+/*
+ * Always return 0 from this function. A non-zero return indicates that the
+ * remove operation will be called and that memory should be unpinned.
+ * However, the driver cannot unpin out from under PSM. Instead, retain the
+ * memory (by returning 0) and inform PSM that the memory is going away. PSM
+ * will call back later when it has removed the memory from its list.
+ */
+static int tid_rb_invalidate(void *arg, struct mmu_rb_node *mnode)
{
- struct hfi1_filedata *fdata =
- container_of(root, struct hfi1_filedata, tid_rb_root);
+ struct hfi1_filedata *fdata = arg;
struct hfi1_ctxtdata *uctxt = fdata->uctxt;
struct tid_rb_node *node =
container_of(mnode, struct tid_rb_node, mmu);
@@ -1025,10 +1026,9 @@ static int mmu_rb_invalidate(struct rb_root *root, struct mmu_rb_node *mnode)
return 0;
}
-static int mmu_rb_insert(struct rb_root *root, struct mmu_rb_node *node)
+static int tid_rb_insert(void *arg, struct mmu_rb_node *node)
{
- struct hfi1_filedata *fdata =
- container_of(root, struct hfi1_filedata, tid_rb_root);
+ struct hfi1_filedata *fdata = arg;
struct tid_rb_node *tnode =
container_of(node, struct tid_rb_node, mmu);
u32 base = fdata->uctxt->expected_base;
@@ -1037,14 +1037,20 @@ static int mmu_rb_insert(struct rb_root *root, struct mmu_rb_node *node)
return 0;
}
-static void mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node,
- struct mm_struct *mm)
+static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
+ struct tid_rb_node *tnode)
{
- struct hfi1_filedata *fdata =
- container_of(root, struct hfi1_filedata, tid_rb_root);
- struct tid_rb_node *tnode =
- container_of(node, struct tid_rb_node, mmu);
u32 base = fdata->uctxt->expected_base;
fdata->entry_to_rb[tnode->rcventry - base] = NULL;
+ clear_tid_node(fdata, tnode);
+}
+
+static void tid_rb_remove(void *arg, struct mmu_rb_node *node)
+{
+ struct hfi1_filedata *fdata = arg;
+ struct tid_rb_node *tnode =
+ container_of(node, struct tid_rb_node, mmu);
+
+ cacheless_tid_rb_remove(fdata, tnode);
}
diff --git a/drivers/infiniband/hw/hfi1/user_pages.c b/drivers/infiniband/hw/hfi1/user_pages.c
index 88e10b5f5..20f4ddcac 100644
--- a/drivers/infiniband/hw/hfi1/user_pages.c
+++ b/drivers/infiniband/hw/hfi1/user_pages.c
@@ -68,7 +68,8 @@ MODULE_PARM_DESC(cache_size, "Send and receive side cache size limit (in MB)");
* could keeping caching buffers.
*
*/
-bool hfi1_can_pin_pages(struct hfi1_devdata *dd, u32 nlocked, u32 npages)
+bool hfi1_can_pin_pages(struct hfi1_devdata *dd, struct mm_struct *mm,
+ u32 nlocked, u32 npages)
{
unsigned long ulimit = rlimit(RLIMIT_MEMLOCK), pinned, cache_limit,
size = (cache_size * (1UL << 20)); /* convert to bytes */
@@ -89,9 +90,9 @@ bool hfi1_can_pin_pages(struct hfi1_devdata *dd, u32 nlocked, u32 npages)
/* Convert to number of pages */
size = DIV_ROUND_UP(size, PAGE_SIZE);
- down_read(&current->mm->mmap_sem);
- pinned = current->mm->pinned_vm;
- up_read(&current->mm->mmap_sem);
+ down_read(&mm->mmap_sem);
+ pinned = mm->pinned_vm;
+ up_read(&mm->mmap_sem);
/* First, check the absolute limit against all pinned pages. */
if (pinned + npages >= ulimit && !can_lock)
@@ -100,8 +101,8 @@ bool hfi1_can_pin_pages(struct hfi1_devdata *dd, u32 nlocked, u32 npages)
return ((nlocked + npages) <= size) || can_lock;
}
-int hfi1_acquire_user_pages(unsigned long vaddr, size_t npages, bool writable,
- struct page **pages)
+int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t npages,
+ bool writable, struct page **pages)
{
int ret;
@@ -109,9 +110,9 @@ int hfi1_acquire_user_pages(unsigned long vaddr, size_t npages, bool writable,
if (ret < 0)
return ret;
- down_write(&current->mm->mmap_sem);
- current->mm->pinned_vm += ret;
- up_write(&current->mm->mmap_sem);
+ down_write(&mm->mmap_sem);
+ mm->pinned_vm += ret;
+ up_write(&mm->mmap_sem);
return ret;
}
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index 47ffd273e..1694037d1 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -114,6 +114,8 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12
#define KDETH_HCRC_LOWER_SHIFT 24
#define KDETH_HCRC_LOWER_MASK 0xff
+#define AHG_KDETH_INTR_SHIFT 12
+
#define PBC2LRH(x) ((((x) & 0xfff) << 2) - 4)
#define LRH2PBC(x) ((((x) >> 2) + 1) & 0xfff)
@@ -145,7 +147,7 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12
/* Last packet in the request */
#define TXREQ_FLAGS_REQ_LAST_PKT BIT(0)
-#define SDMA_REQ_IN_USE 0
+/* SDMA request flag bits */
#define SDMA_REQ_FOR_THREAD 1
#define SDMA_REQ_SEND_DONE 2
#define SDMA_REQ_HAVE_AHG 3
@@ -183,16 +185,18 @@ struct user_sdma_iovec {
struct sdma_mmu_node *node;
};
-#define SDMA_CACHE_NODE_EVICT 0
-
struct sdma_mmu_node {
struct mmu_rb_node rb;
- struct list_head list;
struct hfi1_user_sdma_pkt_q *pq;
atomic_t refcount;
struct page **pages;
unsigned npages;
- unsigned long flags;
+};
+
+/* evict operation argument */
+struct evict_data {
+ u32 cleared; /* count evicted so far */
+ u32 target; /* target count to evict */
};
struct user_sdma_request {
@@ -305,14 +309,16 @@ static int defer_packet_queue(
unsigned seq);
static void activate_packet_queue(struct iowait *, int);
static bool sdma_rb_filter(struct mmu_rb_node *, unsigned long, unsigned long);
-static int sdma_rb_insert(struct rb_root *, struct mmu_rb_node *);
-static void sdma_rb_remove(struct rb_root *, struct mmu_rb_node *,
- struct mm_struct *);
-static int sdma_rb_invalidate(struct rb_root *, struct mmu_rb_node *);
+static int sdma_rb_insert(void *, struct mmu_rb_node *);
+static int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode,
+ void *arg2, bool *stop);
+static void sdma_rb_remove(void *, struct mmu_rb_node *);
+static int sdma_rb_invalidate(void *, struct mmu_rb_node *);
static struct mmu_rb_ops sdma_rb_ops = {
.filter = sdma_rb_filter,
.insert = sdma_rb_insert,
+ .evict = sdma_rb_evict,
.remove = sdma_rb_remove,
.invalidate = sdma_rb_invalidate
};
@@ -397,6 +403,11 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp)
if (!pq->reqs)
goto pq_reqs_nomem;
+ memsize = BITS_TO_LONGS(hfi1_sdma_comp_ring_size) * sizeof(long);
+ pq->req_in_use = kzalloc(memsize, GFP_KERNEL);
+ if (!pq->req_in_use)
+ goto pq_reqs_no_in_use;
+
INIT_LIST_HEAD(&pq->list);
pq->dd = dd;
pq->ctxt = uctxt->ctxt;
@@ -405,9 +416,8 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp)
pq->state = SDMA_PKT_Q_INACTIVE;
atomic_set(&pq->n_reqs, 0);
init_waitqueue_head(&pq->wait);
- pq->sdma_rb_root = RB_ROOT;
- INIT_LIST_HEAD(&pq->evict);
- spin_lock_init(&pq->evict_lock);
+ atomic_set(&pq->n_locked, 0);
+ pq->mm = fd->mm;
iowait_init(&pq->busy, 0, NULL, defer_packet_queue,
activate_packet_queue, NULL);
@@ -437,7 +447,8 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp)
cq->nentries = hfi1_sdma_comp_ring_size;
fd->cq = cq;
- ret = hfi1_mmu_rb_register(&pq->sdma_rb_root, &sdma_rb_ops);
+ ret = hfi1_mmu_rb_register(pq, pq->mm, &sdma_rb_ops, dd->pport->hfi1_wq,
+ &pq->handler);
if (ret) {
dd_dev_err(dd, "Failed to register with MMU %d", ret);
goto done;
@@ -453,6 +464,8 @@ cq_comps_nomem:
cq_nomem:
kmem_cache_destroy(pq->txreq_cache);
pq_txreq_nomem:
+ kfree(pq->req_in_use);
+pq_reqs_no_in_use:
kfree(pq->reqs);
pq_reqs_nomem:
kfree(pq);
@@ -472,8 +485,9 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd)
hfi1_cdbg(SDMA, "[%u:%u:%u] Freeing user SDMA queues", uctxt->dd->unit,
uctxt->ctxt, fd->subctxt);
pq = fd->pq;
- hfi1_mmu_rb_unregister(&pq->sdma_rb_root);
if (pq) {
+ if (pq->handler)
+ hfi1_mmu_rb_unregister(pq->handler);
spin_lock_irqsave(&uctxt->sdma_qlock, flags);
if (!list_empty(&pq->list))
list_del_init(&pq->list);
@@ -484,6 +498,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd)
pq->wait,
(ACCESS_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE));
kfree(pq->reqs);
+ kfree(pq->req_in_use);
kmem_cache_destroy(pq->txreq_cache);
kfree(pq);
fd->pq = NULL;
@@ -496,10 +511,31 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd)
return 0;
}
+static u8 dlid_to_selector(u16 dlid)
+{
+ static u8 mapping[256];
+ static int initialized;
+ static u8 next;
+ int hash;
+
+ if (!initialized) {
+ memset(mapping, 0xFF, 256);
+ initialized = 1;
+ }
+
+ hash = ((dlid >> 8) ^ dlid) & 0xFF;
+ if (mapping[hash] == 0xFF) {
+ mapping[hash] = next;
+ next = (next + 1) & 0x7F;
+ }
+
+ return mapping[hash];
+}
+
int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
unsigned long dim, unsigned long *count)
{
- int ret = 0, i = 0;
+ int ret = 0, i;
struct hfi1_filedata *fd = fp->private_data;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_user_sdma_pkt_q *pq = fd->pq;
@@ -511,6 +547,8 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
struct user_sdma_request *req;
u8 opcode, sc, vl;
int req_queued = 0;
+ u16 dlid;
+ u8 selector;
if (iovec[idx].iov_len < sizeof(info) + sizeof(req->hdr)) {
hfi1_cdbg(
@@ -529,30 +567,48 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
trace_hfi1_sdma_user_reqinfo(dd, uctxt->ctxt, fd->subctxt,
(u16 *)&info);
- if (cq->comps[info.comp_idx].status == QUEUED ||
- test_bit(SDMA_REQ_IN_USE, &pq->reqs[info.comp_idx].flags)) {
- hfi1_cdbg(SDMA, "[%u:%u:%u] Entry %u is in QUEUED state",
- dd->unit, uctxt->ctxt, fd->subctxt,
- info.comp_idx);
- return -EBADSLT;
+
+ if (info.comp_idx >= hfi1_sdma_comp_ring_size) {
+ hfi1_cdbg(SDMA,
+ "[%u:%u:%u:%u] Invalid comp index",
+ dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx);
+ return -EINVAL;
}
+
+ /*
+ * Sanity check the header io vector count. Need at least 1 vector
+ * (header) and cannot be larger than the actual io vector count.
+ */
+ if (req_iovcnt(info.ctrl) < 1 || req_iovcnt(info.ctrl) > dim) {
+ hfi1_cdbg(SDMA,
+ "[%u:%u:%u:%u] Invalid iov count %d, dim %ld",
+ dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx,
+ req_iovcnt(info.ctrl), dim);
+ return -EINVAL;
+ }
+
if (!info.fragsize) {
hfi1_cdbg(SDMA,
"[%u:%u:%u:%u] Request does not specify fragsize",
dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx);
return -EINVAL;
}
+
+ /* Try to claim the request. */
+ if (test_and_set_bit(info.comp_idx, pq->req_in_use)) {
+ hfi1_cdbg(SDMA, "[%u:%u:%u] Entry %u is in use",
+ dd->unit, uctxt->ctxt, fd->subctxt,
+ info.comp_idx);
+ return -EBADSLT;
+ }
/*
- * We've done all the safety checks that we can up to this point,
- * "allocate" the request entry.
+ * All safety checks have been done and this request has been claimed.
*/
hfi1_cdbg(SDMA, "[%u:%u:%u] Using req/comp entry %u\n", dd->unit,
uctxt->ctxt, fd->subctxt, info.comp_idx);
req = pq->reqs + info.comp_idx;
memset(req, 0, sizeof(*req));
- /* Mark the request as IN_USE before we start filling it in. */
- set_bit(SDMA_REQ_IN_USE, &req->flags);
- req->data_iovs = req_iovcnt(info.ctrl) - 1;
+ req->data_iovs = req_iovcnt(info.ctrl) - 1; /* subtract header vector */
req->pq = pq;
req->cq = cq;
req->status = -1;
@@ -560,13 +616,22 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
memcpy(&req->info, &info, sizeof(info));
- if (req_opcode(info.ctrl) == EXPECTED)
+ if (req_opcode(info.ctrl) == EXPECTED) {
+ /* expected must have a TID info and at least one data vector */
+ if (req->data_iovs < 2) {
+ SDMA_DBG(req,
+ "Not enough vectors for expected request");
+ ret = -EINVAL;
+ goto free_req;
+ }
req->data_iovs--;
+ }
if (!info.npkts || req->data_iovs > MAX_VECTORS_PER_REQ) {
SDMA_DBG(req, "Too many vectors (%u/%u)", req->data_iovs,
MAX_VECTORS_PER_REQ);
- return -EINVAL;
+ ret = -EINVAL;
+ goto free_req;
}
/* Copy the header from the user buffer */
ret = copy_from_user(&req->hdr, iovec[idx].iov_base + sizeof(info),
@@ -634,7 +699,7 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
idx++;
/* Save all the IO vector structures */
- while (i < req->data_iovs) {
+ for (i = 0; i < req->data_iovs; i++) {
INIT_LIST_HEAD(&req->iovs[i].list);
memcpy(&req->iovs[i].iov, iovec + idx++, sizeof(struct iovec));
ret = pin_vector_pages(req, &req->iovs[i]);
@@ -642,7 +707,7 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
req->status = ret;
goto free_req;
}
- req->data_len += req->iovs[i++].iov.iov_len;
+ req->data_len += req->iovs[i].iov.iov_len;
}
SDMA_DBG(req, "total data length %u", req->data_len);
@@ -686,9 +751,13 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
idx++;
}
+ dlid = be16_to_cpu(req->hdr.lrh[1]);
+ selector = dlid_to_selector(dlid);
+
/* Have to select the engine */
req->sde = sdma_select_engine_vl(dd,
- (u32)(uctxt->ctxt + fd->subctxt),
+ (u32)(uctxt->ctxt + fd->subctxt +
+ selector),
vl);
if (!req->sde || !sdma_running(req->sde)) {
ret = -ECOMM;
@@ -766,14 +835,21 @@ static inline u32 compute_data_length(struct user_sdma_request *req,
* The size of the data of the first packet is in the header
* template. However, it includes the header and ICRC, which need
* to be subtracted.
+ * The minimum representable packet data length in a header is 4 bytes,
+ * therefore, when the data length request is less than 4 bytes, there's
+ * only one packet, and the packet data length is equal to that of the
+ * request data length.
* The size of the remaining packets is the minimum of the frag
* size (MTU) or remaining data in the request.
*/
u32 len;
if (!req->seqnum) {
- len = ((be16_to_cpu(req->hdr.lrh[2]) << 2) -
- (sizeof(tx->hdr) - 4));
+ if (req->data_len < sizeof(u32))
+ len = req->data_len;
+ else
+ len = ((be16_to_cpu(req->hdr.lrh[2]) << 2) -
+ (sizeof(tx->hdr) - 4));
} else if (req_opcode(req->info.ctrl) == EXPECTED) {
u32 tidlen = EXP_TID_GET(req->tids[req->tididx], LEN) *
PAGE_SIZE;
@@ -803,6 +879,13 @@ static inline u32 compute_data_length(struct user_sdma_request *req,
return len;
}
+static inline u32 pad_len(u32 len)
+{
+ if (len & (sizeof(u32) - 1))
+ len += sizeof(u32) - (len & (sizeof(u32) - 1));
+ return len;
+}
+
static inline u32 get_lrh_len(struct hfi1_pkt_header hdr, u32 len)
{
/* (Size of complete header - size of PBC) + 4B ICRC + data length */
@@ -894,7 +977,8 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
if (test_bit(SDMA_REQ_HAVE_AHG, &req->flags)) {
if (!req->seqnum) {
u16 pbclen = le16_to_cpu(req->hdr.pbc[0]);
- u32 lrhlen = get_lrh_len(req->hdr, datalen);
+ u32 lrhlen = get_lrh_len(req->hdr,
+ pad_len(datalen));
/*
* Copy the request header into the tx header
* because the HW needs a cacheline-aligned
@@ -1048,39 +1132,24 @@ static inline int num_user_pages(const struct iovec *iov)
static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages)
{
- u32 cleared = 0;
- struct sdma_mmu_node *node, *ptr;
- struct list_head to_evict = LIST_HEAD_INIT(to_evict);
-
- spin_lock(&pq->evict_lock);
- list_for_each_entry_safe_reverse(node, ptr, &pq->evict, list) {
- /* Make sure that no one is still using the node. */
- if (!atomic_read(&node->refcount)) {
- set_bit(SDMA_CACHE_NODE_EVICT, &node->flags);
- list_del_init(&node->list);
- list_add(&node->list, &to_evict);
- cleared += node->npages;
- if (cleared >= npages)
- break;
- }
- }
- spin_unlock(&pq->evict_lock);
-
- list_for_each_entry_safe(node, ptr, &to_evict, list)
- hfi1_mmu_rb_remove(&pq->sdma_rb_root, &node->rb);
+ struct evict_data evict_data;
- return cleared;
+ evict_data.cleared = 0;
+ evict_data.target = npages;
+ hfi1_mmu_rb_evict(pq->handler, &evict_data);
+ return evict_data.cleared;
}
static int pin_vector_pages(struct user_sdma_request *req,
- struct user_sdma_iovec *iovec) {
+ struct user_sdma_iovec *iovec)
+{
int ret = 0, pinned, npages, cleared;
struct page **pages;
struct hfi1_user_sdma_pkt_q *pq = req->pq;
struct sdma_mmu_node *node = NULL;
struct mmu_rb_node *rb_node;
- rb_node = hfi1_mmu_rb_extract(&pq->sdma_rb_root,
+ rb_node = hfi1_mmu_rb_extract(pq->handler,
(unsigned long)iovec->iov.iov_base,
iovec->iov.iov_len);
if (rb_node && !IS_ERR(rb_node))
@@ -1096,7 +1165,6 @@ static int pin_vector_pages(struct user_sdma_request *req,
node->rb.addr = (unsigned long)iovec->iov.iov_base;
node->pq = pq;
atomic_set(&node->refcount, 0);
- INIT_LIST_HEAD(&node->list);
}
npages = num_user_pages(&iovec->iov);
@@ -1111,28 +1179,14 @@ static int pin_vector_pages(struct user_sdma_request *req,
npages -= node->npages;
- /*
- * If rb_node is NULL, it means that this is brand new node
- * and, therefore not on the eviction list.
- * If, however, the rb_node is non-NULL, it means that the
- * node is already in RB tree and, therefore on the eviction
- * list (nodes are unconditionally inserted in the eviction
- * list). In that case, we have to remove the node prior to
- * calling the eviction function in order to prevent it from
- * freeing this node.
- */
- if (rb_node) {
- spin_lock(&pq->evict_lock);
- list_del_init(&node->list);
- spin_unlock(&pq->evict_lock);
- }
retry:
- if (!hfi1_can_pin_pages(pq->dd, pq->n_locked, npages)) {
+ if (!hfi1_can_pin_pages(pq->dd, pq->mm,
+ atomic_read(&pq->n_locked), npages)) {
cleared = sdma_cache_evict(pq, npages);
if (cleared >= npages)
goto retry;
}
- pinned = hfi1_acquire_user_pages(
+ pinned = hfi1_acquire_user_pages(pq->mm,
((unsigned long)iovec->iov.iov_base +
(node->npages * PAGE_SIZE)), npages, 0,
pages + node->npages);
@@ -1142,7 +1196,7 @@ retry:
goto bail;
}
if (pinned != npages) {
- unpin_vector_pages(current->mm, pages, node->npages,
+ unpin_vector_pages(pq->mm, pages, node->npages,
pinned);
ret = -EFAULT;
goto bail;
@@ -1152,28 +1206,22 @@ retry:
node->pages = pages;
node->npages += pinned;
npages = node->npages;
- spin_lock(&pq->evict_lock);
- list_add(&node->list, &pq->evict);
- pq->n_locked += pinned;
- spin_unlock(&pq->evict_lock);
+ atomic_add(pinned, &pq->n_locked);
}
iovec->pages = node->pages;
iovec->npages = npages;
iovec->node = node;
- ret = hfi1_mmu_rb_insert(&req->pq->sdma_rb_root, &node->rb);
+ ret = hfi1_mmu_rb_insert(req->pq->handler, &node->rb);
if (ret) {
- spin_lock(&pq->evict_lock);
- if (!list_empty(&node->list))
- list_del(&node->list);
- pq->n_locked -= node->npages;
- spin_unlock(&pq->evict_lock);
+ atomic_sub(node->npages, &pq->n_locked);
+ iovec->node = NULL;
goto bail;
}
return 0;
bail:
if (rb_node)
- unpin_vector_pages(current->mm, node->pages, 0, node->npages);
+ unpin_vector_pages(pq->mm, node->pages, 0, node->npages);
kfree(node);
return ret;
}
@@ -1181,7 +1229,7 @@ bail:
static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
unsigned start, unsigned npages)
{
- hfi1_release_user_pages(mm, pages + start, npages, 0);
+ hfi1_release_user_pages(mm, pages + start, npages, false);
kfree(pages);
}
@@ -1192,16 +1240,14 @@ static int check_header_template(struct user_sdma_request *req,
/*
* Perform safety checks for any type of packet:
* - transfer size is multiple of 64bytes
- * - packet length is multiple of 4bytes
- * - entire request length is multiple of 4bytes
+ * - packet length is multiple of 4 bytes
* - packet length is not larger than MTU size
*
* These checks are only done for the first packet of the
* transfer since the header is "given" to us by user space.
* For the remainder of the packets we compute the values.
*/
- if (req->info.fragsize % PIO_BLOCK_SIZE ||
- lrhlen & 0x3 || req->data_len & 0x3 ||
+ if (req->info.fragsize % PIO_BLOCK_SIZE || lrhlen & 0x3 ||
lrhlen > get_lrh_len(*hdr, req->info.fragsize))
return -EINVAL;
@@ -1263,7 +1309,7 @@ static int set_txreq_header(struct user_sdma_request *req,
struct hfi1_pkt_header *hdr = &tx->hdr;
u16 pbclen;
int ret;
- u32 tidval = 0, lrhlen = get_lrh_len(*hdr, datalen);
+ u32 tidval = 0, lrhlen = get_lrh_len(*hdr, pad_len(datalen));
/* Copy the header template to the request before modification */
memcpy(hdr, &req->hdr, sizeof(*hdr));
@@ -1374,7 +1420,7 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
struct hfi1_user_sdma_pkt_q *pq = req->pq;
struct hfi1_pkt_header *hdr = &req->hdr;
u16 pbclen = le16_to_cpu(hdr->pbc[0]);
- u32 val32, tidval = 0, lrhlen = get_lrh_len(*hdr, len);
+ u32 val32, tidval = 0, lrhlen = get_lrh_len(*hdr, pad_len(len));
if (PBC2LRH(pbclen) != lrhlen) {
/* PBC.PbcLengthDWs */
@@ -1436,7 +1482,8 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
/* Clear KDETH.SH on last packet */
if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT)) {
val |= cpu_to_le16(KDETH_GET(hdr->kdeth.ver_tid_offset,
- INTR) >> 16);
+ INTR) <<
+ AHG_KDETH_INTR_SHIFT);
val &= cpu_to_le16(~(1U << 13));
AHG_HEADER_SET(req->ahg, diff, 7, 16, 14, val);
} else {
@@ -1534,14 +1581,14 @@ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
continue;
if (unpin)
- hfi1_mmu_rb_remove(&req->pq->sdma_rb_root,
+ hfi1_mmu_rb_remove(req->pq->handler,
&node->rb);
else
atomic_dec(&node->refcount);
}
}
kfree(req->tids);
- clear_bit(SDMA_REQ_IN_USE, &req->flags);
+ clear_bit(req->info.comp_idx, req->pq->req_in_use);
}
static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq,
@@ -1564,7 +1611,7 @@ static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
return (bool)(node->addr == addr);
}
-static int sdma_rb_insert(struct rb_root *root, struct mmu_rb_node *mnode)
+static int sdma_rb_insert(void *arg, struct mmu_rb_node *mnode)
{
struct sdma_mmu_node *node =
container_of(mnode, struct sdma_mmu_node, rb);
@@ -1573,48 +1620,45 @@ static int sdma_rb_insert(struct rb_root *root, struct mmu_rb_node *mnode)
return 0;
}
-static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode,
- struct mm_struct *mm)
+/*
+ * Return 1 to remove the node from the rb tree and call the remove op.
+ *
+ * Called with the rb tree lock held.
+ */
+static int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode,
+ void *evict_arg, bool *stop)
+{
+ struct sdma_mmu_node *node =
+ container_of(mnode, struct sdma_mmu_node, rb);
+ struct evict_data *evict_data = evict_arg;
+
+ /* is this node still being used? */
+ if (atomic_read(&node->refcount))
+ return 0; /* keep this node */
+
+ /* this node will be evicted, add its pages to our count */
+ evict_data->cleared += node->npages;
+
+ /* have enough pages been cleared? */
+ if (evict_data->cleared >= evict_data->target)
+ *stop = true;
+
+ return 1; /* remove this node */
+}
+
+static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode)
{
struct sdma_mmu_node *node =
container_of(mnode, struct sdma_mmu_node, rb);
- spin_lock(&node->pq->evict_lock);
- /*
- * We've been called by the MMU notifier but this node has been
- * scheduled for eviction. The eviction function will take care
- * of freeing this node.
- * We have to take the above lock first because we are racing
- * against the setting of the bit in the eviction function.
- */
- if (mm && test_bit(SDMA_CACHE_NODE_EVICT, &node->flags)) {
- spin_unlock(&node->pq->evict_lock);
- return;
- }
+ atomic_sub(node->npages, &node->pq->n_locked);
- if (!list_empty(&node->list))
- list_del(&node->list);
- node->pq->n_locked -= node->npages;
- spin_unlock(&node->pq->evict_lock);
+ unpin_vector_pages(node->pq->mm, node->pages, 0, node->npages);
- /*
- * If mm is set, we are being called by the MMU notifier and we
- * should not pass a mm_struct to unpin_vector_page(). This is to
- * prevent a deadlock when hfi1_release_user_pages() attempts to
- * take the mmap_sem, which the MMU notifier has already taken.
- */
- unpin_vector_pages(mm ? NULL : current->mm, node->pages, 0,
- node->npages);
- /*
- * If called by the MMU notifier, we have to adjust the pinned
- * page count ourselves.
- */
- if (mm)
- mm->pinned_vm -= node->npages;
kfree(node);
}
-static int sdma_rb_invalidate(struct rb_root *root, struct mmu_rb_node *mnode)
+static int sdma_rb_invalidate(void *arg, struct mmu_rb_node *mnode)
{
struct sdma_mmu_node *node =
container_of(mnode, struct sdma_mmu_node, rb);
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h
index b9240e351..39001714f 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.h
+++ b/drivers/infiniband/hw/hfi1/user_sdma.h
@@ -63,14 +63,14 @@ struct hfi1_user_sdma_pkt_q {
struct hfi1_devdata *dd;
struct kmem_cache *txreq_cache;
struct user_sdma_request *reqs;
+ unsigned long *req_in_use;
struct iowait busy;
unsigned state;
wait_queue_head_t wait;
unsigned long unpinned;
- struct rb_root sdma_rb_root;
- u32 n_locked;
- struct list_head evict;
- spinlock_t evict_lock; /* protect evict and n_locked */
+ struct mmu_rb_handler *handler;
+ atomic_t n_locked;
+ struct mm_struct *mm;
};
struct hfi1_user_sdma_comp_q {
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 849c4b939..2b3595409 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -306,7 +306,10 @@ const enum ib_wc_opcode ib_hfi1_wc_opcode[] = {
[IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
[IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
[IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
- [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
+ [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD,
+ [IB_WR_SEND_WITH_INV] = IB_WC_SEND,
+ [IB_WR_LOCAL_INV] = IB_WC_LOCAL_INV,
+ [IB_WR_REG_MR] = IB_WC_REG_MR
};
/*
@@ -378,6 +381,8 @@ static const opcode_handler opcode_handler_tbl[256] = {
[IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = &hfi1_rc_rcv,
[IB_OPCODE_RC_COMPARE_SWAP] = &hfi1_rc_rcv,
[IB_OPCODE_RC_FETCH_ADD] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE] = &hfi1_rc_rcv,
+ [IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE] = &hfi1_rc_rcv,
/* UC */
[IB_OPCODE_UC_SEND_FIRST] = &hfi1_uc_rcv,
[IB_OPCODE_UC_SEND_MIDDLE] = &hfi1_uc_rcv,
@@ -540,19 +545,15 @@ void hfi1_skip_sge(struct rvt_sge_state *ss, u32 length, int release)
/*
* Make sure the QP is ready and able to accept the given opcode.
*/
-static inline int qp_ok(int opcode, struct hfi1_packet *packet)
+static inline opcode_handler qp_ok(int opcode, struct hfi1_packet *packet)
{
- struct hfi1_ibport *ibp;
-
if (!(ib_rvt_state_ops[packet->qp->state] & RVT_PROCESS_RECV_OK))
- goto dropit;
+ return NULL;
if (((opcode & RVT_OPCODE_QP_MASK) == packet->qp->allowed_ops) ||
(opcode == IB_OPCODE_CNP))
- return 1;
-dropit:
- ibp = &packet->rcd->ppd->ibport_data;
- ibp->rvp.n_pkt_drops++;
- return 0;
+ return opcode_handler_tbl[opcode];
+
+ return NULL;
}
/**
@@ -571,6 +572,7 @@ void hfi1_ib_rcv(struct hfi1_packet *packet)
struct hfi1_pportdata *ppd = rcd->ppd;
struct hfi1_ibport *ibp = &ppd->ibport_data;
struct rvt_dev_info *rdi = &ppd->dd->verbs_dev.rdi;
+ opcode_handler packet_handler;
unsigned long flags;
u32 qp_num;
int lnh;
@@ -616,8 +618,11 @@ void hfi1_ib_rcv(struct hfi1_packet *packet)
list_for_each_entry_rcu(p, &mcast->qp_list, list) {
packet->qp = p->qp;
spin_lock_irqsave(&packet->qp->r_lock, flags);
- if (likely((qp_ok(opcode, packet))))
- opcode_handler_tbl[opcode](packet);
+ packet_handler = qp_ok(opcode, packet);
+ if (likely(packet_handler))
+ packet_handler(packet);
+ else
+ ibp->rvp.n_pkt_drops++;
spin_unlock_irqrestore(&packet->qp->r_lock, flags);
}
/*
@@ -634,8 +639,11 @@ void hfi1_ib_rcv(struct hfi1_packet *packet)
goto drop;
}
spin_lock_irqsave(&packet->qp->r_lock, flags);
- if (likely((qp_ok(opcode, packet))))
- opcode_handler_tbl[opcode](packet);
+ packet_handler = qp_ok(opcode, packet);
+ if (likely(packet_handler))
+ packet_handler(packet);
+ else
+ ibp->rvp.n_pkt_drops++;
spin_unlock_irqrestore(&packet->qp->r_lock, flags);
rcu_read_unlock();
}
@@ -808,19 +816,19 @@ static int build_verbs_tx_desc(
struct rvt_sge_state *ss,
u32 length,
struct verbs_txreq *tx,
- struct ahg_ib_header *ahdr,
+ struct hfi1_ahg_info *ahg_info,
u64 pbc)
{
int ret = 0;
- struct hfi1_pio_header *phdr = &tx->phdr;
+ struct hfi1_sdma_header *phdr = &tx->phdr;
u16 hdrbytes = tx->hdr_dwords << 2;
- if (!ahdr->ahgcount) {
+ if (!ahg_info->ahgcount) {
ret = sdma_txinit_ahg(
&tx->txreq,
- ahdr->tx_flags,
+ ahg_info->tx_flags,
hdrbytes + length,
- ahdr->ahgidx,
+ ahg_info->ahgidx,
0,
NULL,
0,
@@ -838,11 +846,11 @@ static int build_verbs_tx_desc(
} else {
ret = sdma_txinit_ahg(
&tx->txreq,
- ahdr->tx_flags,
+ ahg_info->tx_flags,
length,
- ahdr->ahgidx,
- ahdr->ahgcount,
- ahdr->ahgdesc,
+ ahg_info->ahgidx,
+ ahg_info->ahgcount,
+ ahg_info->ahgdesc,
hdrbytes,
verbs_sdma_complete);
if (ret)
@@ -860,7 +868,7 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
u64 pbc)
{
struct hfi1_qp_priv *priv = qp->priv;
- struct ahg_ib_header *ahdr = priv->s_hdr;
+ struct hfi1_ahg_info *ahg_info = priv->s_ahg;
u32 hdrwords = qp->s_hdrwords;
struct rvt_sge_state *ss = qp->s_cur_sge;
u32 len = qp->s_cur_size;
@@ -888,7 +896,7 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
plen);
}
tx->wqe = qp->s_wqe;
- ret = build_verbs_tx_desc(tx->sde, ss, len, tx, ahdr, pbc);
+ ret = build_verbs_tx_desc(tx->sde, ss, len, tx, ahg_info, pbc);
if (unlikely(ret))
goto bail_build;
}
@@ -1291,19 +1299,24 @@ int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
static void hfi1_fill_device_attr(struct hfi1_devdata *dd)
{
struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
+ u16 ver = dd->dc8051_ver;
memset(&rdi->dparms.props, 0, sizeof(rdi->dparms.props));
+ rdi->dparms.props.fw_ver = ((u64)(dc8051_ver_maj(ver)) << 16) |
+ (u64)dc8051_ver_min(ver);
rdi->dparms.props.device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
- IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
+ IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE |
+ IB_DEVICE_MEM_MGT_EXTENSIONS;
rdi->dparms.props.page_size_cap = PAGE_SIZE;
rdi->dparms.props.vendor_id = dd->oui1 << 16 | dd->oui2 << 8 | dd->oui3;
rdi->dparms.props.vendor_part_id = dd->pcidev->device;
rdi->dparms.props.hw_ver = dd->minrev;
rdi->dparms.props.sys_image_guid = ib_hfi1_sys_image_guid;
- rdi->dparms.props.max_mr_size = ~0ULL;
+ rdi->dparms.props.max_mr_size = U64_MAX;
+ rdi->dparms.props.max_fast_reg_page_list_len = UINT_MAX;
rdi->dparms.props.max_qp = hfi1_max_qps;
rdi->dparms.props.max_qp_wr = hfi1_max_qp_wrs;
rdi->dparms.props.max_sge = hfi1_max_sges;
@@ -1567,6 +1580,17 @@ static void init_ibport(struct hfi1_pportdata *ppd)
RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
}
+static void hfi1_get_dev_fw_str(struct ib_device *ibdev, char *str,
+ size_t str_len)
+{
+ struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
+ struct hfi1_ibdev *dev = dev_from_rdi(rdi);
+ u16 ver = dd_from_dev(dev)->dc8051_ver;
+
+ snprintf(str, str_len, "%u.%u", dc8051_ver_maj(ver),
+ dc8051_ver_min(ver));
+}
+
/**
* hfi1_register_ib_device - register our device with the infiniband core
* @dd: the device data structure
@@ -1613,6 +1637,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
/* keep process mad in the driver */
ibdev->process_mad = hfi1_process_mad;
+ ibdev->get_dev_fw_str = hfi1_get_dev_fw_str;
strncpy(ibdev->node_desc, init_utsname()->nodename,
sizeof(ibdev->node_desc));
@@ -1680,6 +1705,9 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
dd->verbs_dev.rdi.dparms.nports = dd->num_pports;
dd->verbs_dev.rdi.dparms.npkeys = hfi1_get_npkeys(dd);
+ /* post send table */
+ dd->verbs_dev.rdi.post_parms = hfi1_post_parms;
+
ppd = dd->pport;
for (i = 0; i < dd->num_pports; i++, ppd++)
rvt_init_port(&dd->verbs_dev.rdi,
@@ -1730,8 +1758,7 @@ void hfi1_cnp_rcv(struct hfi1_packet *packet)
struct rvt_qp *qp = packet->qp;
u32 lqpn, rqpn = 0;
u16 rlid = 0;
- u8 sl, sc5, sc4_bit, svc_type;
- bool sc4_set = has_sc4_bit(packet);
+ u8 sl, sc5, svc_type;
switch (packet->qp->ibqp.qp_type) {
case IB_QPT_UC:
@@ -1754,9 +1781,7 @@ void hfi1_cnp_rcv(struct hfi1_packet *packet)
return;
}
- sc4_bit = sc4_set << 4;
- sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
- sc5 |= sc4_bit;
+ sc5 = hdr2sc((struct hfi1_message_header *)hdr, packet->rhf);
sl = ibp->sc_to_sl[sc5];
lqpn = qp->ibqp.qp_num;
diff --git a/drivers/infiniband/hw/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h
index 488356775..d1b101c54 100644
--- a/drivers/infiniband/hw/hfi1/verbs.h
+++ b/drivers/infiniband/hw/hfi1/verbs.h
@@ -178,16 +178,14 @@ struct hfi1_ib_header {
} u;
} __packed;
-struct ahg_ib_header {
- struct sdma_engine *sde;
+struct hfi1_ahg_info {
u32 ahgdesc[2];
u16 tx_flags;
u8 ahgcount;
u8 ahgidx;
- struct hfi1_ib_header ibh;
};
-struct hfi1_pio_header {
+struct hfi1_sdma_header {
__le64 pbc;
struct hfi1_ib_header hdr;
} __packed;
@@ -197,7 +195,7 @@ struct hfi1_pio_header {
* pair is made common
*/
struct hfi1_qp_priv {
- struct ahg_ib_header *s_hdr; /* next header to send */
+ struct hfi1_ahg_info *s_ahg; /* ahg info for next header */
struct sdma_engine *s_sde; /* current sde */
struct send_context *s_sendcontext; /* current sendcontext */
u8 s_sc; /* SC[0..4] for next packet */
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h
index a1d6e0807..566089759 100644
--- a/drivers/infiniband/hw/hfi1/verbs_txreq.h
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h
@@ -56,7 +56,7 @@
#include "iowait.h"
struct verbs_txreq {
- struct hfi1_pio_header phdr;
+ struct hfi1_sdma_header phdr;
struct sdma_txreq txreq;
struct rvt_qp *qp;
struct rvt_swqe *wqe;
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
index b738acdb9..8ec09e470 100644
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -232,7 +232,7 @@ struct i40iw_device {
struct i40e_client *client;
struct i40iw_hw hw;
struct i40iw_cm_core cm_core;
- unsigned long *mem_resources;
+ u8 *mem_resources;
unsigned long *allocated_qps;
unsigned long *allocated_cqs;
unsigned long *allocated_mrs;
@@ -435,8 +435,8 @@ static inline int i40iw_alloc_resource(struct i40iw_device *iwdev,
*next = resource_num + 1;
if (*next == max_resources)
*next = 0;
- spin_unlock_irqrestore(&iwdev->resource_lock, flags);
*req_resource_num = resource_num;
+ spin_unlock_irqrestore(&iwdev->resource_lock, flags);
return 0;
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index d2fa72516..7ca063857 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -535,8 +535,8 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
buf += hdr_len;
}
- if (pd_len)
- memcpy(buf, pdata->addr, pd_len);
+ if (pdata && pdata->addr)
+ memcpy(buf, pdata->addr, pdata->size);
atomic_set(&sqbuf->refcount, 1);
@@ -1567,12 +1567,12 @@ static enum i40iw_status_code i40iw_del_multiple_qhash(
ret = i40iw_manage_qhash(iwdev, cm_info,
I40IW_QHASH_TYPE_TCP_SYN,
I40IW_QHASH_MANAGE_TYPE_DELETE, NULL, false);
- kfree(child_listen_node);
- cm_parent_listen_node->cm_core->stats_listen_nodes_destroyed++;
i40iw_debug(&iwdev->sc_dev,
I40IW_DEBUG_CM,
"freed pointer = %p\n",
child_listen_node);
+ kfree(child_listen_node);
+ cm_parent_listen_node->cm_core->stats_listen_nodes_destroyed++;
}
spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
@@ -3347,26 +3347,6 @@ int i40iw_cm_disconn(struct i40iw_qp *iwqp)
}
/**
- * i40iw_loopback_nop - Send a nop
- * @qp: associated hw qp
- */
-static void i40iw_loopback_nop(struct i40iw_sc_qp *qp)
-{
- u64 *wqe;
- u64 header;
-
- wqe = qp->qp_uk.sq_base->elem;
- set_64bit_val(wqe, 0, 0);
- set_64bit_val(wqe, 8, 0);
- set_64bit_val(wqe, 16, 0);
-
- header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) |
- LS_64(0, I40IWQPSQ_SIGCOMPL) |
- LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
- set_64bit_val(wqe, 24, header);
-}
-
-/**
* i40iw_qp_disconnect - free qp and close cm
* @iwqp: associate qp for the connection
*/
@@ -3638,7 +3618,7 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
} else {
if (iwqp->page)
iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
- i40iw_loopback_nop(&iwqp->sc_qp);
+ dev->iw_priv_qp_ops->qp_send_lsmm(&iwqp->sc_qp, NULL, 0, 0);
}
if (iwqp->page)
diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h
index bd942da91..2fac1db0e 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_d.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_d.h
@@ -1557,6 +1557,9 @@ enum i40iw_alignment {
#define I40IW_RING_MOVE_TAIL(_ring) \
(_ring).tail = ((_ring).tail + 1) % (_ring).size
+#define I40IW_RING_MOVE_HEAD_NOCHECK(_ring) \
+ (_ring).head = ((_ring).head + 1) % (_ring).size
+
#define I40IW_RING_MOVE_TAIL_BY_COUNT(_ring, _count) \
(_ring).tail = ((_ring).tail + (_count)) % (_ring).size
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
index 3ee0cad96..0c92a40b3 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
@@ -265,6 +265,7 @@ void i40iw_next_iw_state(struct i40iw_qp *iwqp,
info.dont_send_fin = false;
if (iwqp->sc_qp.term_flags && (state == I40IW_QP_STATE_ERROR))
info.reset_tcp_conn = true;
+ iwqp->hw_iwarp_state = state;
i40iw_hw_modify_qp(iwqp->iwdev, iwqp, &info, 0);
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index 6e9081380..445e230d5 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -100,7 +100,7 @@ static struct notifier_block i40iw_net_notifier = {
.notifier_call = i40iw_net_event
};
-static int i40iw_notifiers_registered;
+static atomic_t i40iw_notifiers_registered;
/**
* i40iw_find_i40e_handler - find a handler given a client info
@@ -1342,12 +1342,11 @@ exit:
*/
static void i40iw_register_notifiers(void)
{
- if (!i40iw_notifiers_registered) {
+ if (atomic_inc_return(&i40iw_notifiers_registered) == 1) {
register_inetaddr_notifier(&i40iw_inetaddr_notifier);
register_inet6addr_notifier(&i40iw_inetaddr6_notifier);
register_netevent_notifier(&i40iw_net_notifier);
}
- i40iw_notifiers_registered++;
}
/**
@@ -1429,8 +1428,7 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset, bool del
i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
/* fallthrough */
case INET_NOTIFIER:
- if (i40iw_notifiers_registered > 0) {
- i40iw_notifiers_registered--;
+ if (!atomic_dec_return(&i40iw_notifiers_registered)) {
unregister_netevent_notifier(&i40iw_net_notifier);
unregister_inetaddr_notifier(&i40iw_inetaddr_notifier);
unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
@@ -1558,6 +1556,10 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
enum i40iw_status_code status;
struct i40iw_handler *hdl;
+ hdl = i40iw_find_netdev(ldev->netdev);
+ if (hdl)
+ return 0;
+
hdl = kzalloc(sizeof(*hdl), GFP_KERNEL);
if (!hdl)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
index e9c6e82af..c62d354f7 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
@@ -1025,6 +1025,8 @@ static void i40iw_ieq_compl_pfpdu(struct i40iw_puda_rsrc *ieq,
u16 txoffset, bufoffset;
buf = i40iw_puda_get_listbuf(pbufl);
+ if (!buf)
+ return;
nextseqnum = buf->seqnum + fpdu_len;
txbuf->totallen = buf->hdrlen + fpdu_len;
txbuf->data = (u8 *)txbuf->mem.va + buf->hdrlen;
@@ -1048,6 +1050,8 @@ static void i40iw_ieq_compl_pfpdu(struct i40iw_puda_rsrc *ieq,
fpdu_len -= buf->datalen;
i40iw_puda_ret_bufpool(ieq, buf);
buf = i40iw_puda_get_listbuf(pbufl);
+ if (!buf)
+ return;
bufoffset = (u16)(buf->data - (u8 *)buf->mem.va);
} while (1);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_type.h b/drivers/infiniband/hw/i40iw/i40iw_type.h
index 16cc61720..2b1a04e9c 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_type.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_type.h
@@ -667,7 +667,7 @@ struct i40iw_tcp_offload_info {
bool time_stamp;
u8 cwnd_inc_limit;
bool drop_ooo_seg;
- bool dup_ack_thresh;
+ u8 dup_ack_thresh;
u8 ttl;
u8 src_mac_addr_idx;
bool avoid_stretch_ack;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_uk.c b/drivers/infiniband/hw/i40iw/i40iw_uk.c
index e35faea88..4d28c3cb0 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_uk.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_uk.c
@@ -291,9 +291,9 @@ static enum i40iw_status_code i40iw_rdma_write(struct i40iw_qp_uk *qp,
i40iw_set_fragment(wqe, 0, op_info->lo_sg_list);
- for (i = 1; i < op_info->num_lo_sges; i++) {
- byte_off = 32 + (i - 1) * 16;
+ for (i = 1, byte_off = 32; i < op_info->num_lo_sges; i++) {
i40iw_set_fragment(wqe, byte_off, &op_info->lo_sg_list[i]);
+ byte_off += 16;
}
wmb(); /* make sure WQE is populated before valid bit is set */
@@ -401,9 +401,9 @@ static enum i40iw_status_code i40iw_send(struct i40iw_qp_uk *qp,
i40iw_set_fragment(wqe, 0, op_info->sg_list);
- for (i = 1; i < op_info->num_sges; i++) {
- byte_off = 32 + (i - 1) * 16;
+ for (i = 1, byte_off = 32; i < op_info->num_sges; i++) {
i40iw_set_fragment(wqe, byte_off, &op_info->sg_list[i]);
+ byte_off += 16;
}
wmb(); /* make sure WQE is populated before valid bit is set */
@@ -685,9 +685,9 @@ static enum i40iw_status_code i40iw_post_receive(struct i40iw_qp_uk *qp,
i40iw_set_fragment(wqe, 0, info->sg_list);
- for (i = 1; i < info->num_sges; i++) {
- byte_off = 32 + (i - 1) * 16;
+ for (i = 1, byte_off = 32; i < info->num_sges; i++) {
i40iw_set_fragment(wqe, byte_off, &info->sg_list[i]);
+ byte_off += 16;
}
wmb(); /* make sure WQE is populated before valid bit is set */
@@ -753,8 +753,7 @@ static enum i40iw_status_code i40iw_cq_post_entries(struct i40iw_cq_uk *cq,
* @post_cq: update cq tail
*/
static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq,
- struct i40iw_cq_poll_info *info,
- bool post_cq)
+ struct i40iw_cq_poll_info *info)
{
u64 comp_ctx, qword0, qword2, qword3, wqe_qword;
u64 *cqe, *sw_wqe;
@@ -762,7 +761,6 @@ static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq,
struct i40iw_ring *pring = NULL;
u32 wqe_idx, q_type, array_idx = 0;
enum i40iw_status_code ret_code = 0;
- enum i40iw_status_code ret_code2 = 0;
bool move_cq_head = true;
u8 polarity;
u8 addl_wqes = 0;
@@ -870,19 +868,14 @@ exit:
move_cq_head = false;
if (move_cq_head) {
- I40IW_RING_MOVE_HEAD(cq->cq_ring, ret_code2);
-
- if (ret_code2 && !ret_code)
- ret_code = ret_code2;
+ I40IW_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
if (I40IW_RING_GETCURRENT_HEAD(cq->cq_ring) == 0)
cq->polarity ^= 1;
- if (post_cq) {
- I40IW_RING_MOVE_TAIL(cq->cq_ring);
- set_64bit_val(cq->shadow_area, 0,
- I40IW_RING_GETCURRENT_HEAD(cq->cq_ring));
- }
+ I40IW_RING_MOVE_TAIL(cq->cq_ring);
+ set_64bit_val(cq->shadow_area, 0,
+ I40IW_RING_GETCURRENT_HEAD(cq->cq_ring));
} else {
if (info->is_srq)
return ret_code;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_user.h b/drivers/infiniband/hw/i40iw/i40iw_user.h
index 4627646fe..276bcefff 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_user.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_user.h
@@ -327,7 +327,7 @@ struct i40iw_cq_ops {
void (*iw_cq_request_notification)(struct i40iw_cq_uk *,
enum i40iw_completion_notify);
enum i40iw_status_code (*iw_cq_poll_completion)(struct i40iw_cq_uk *,
- struct i40iw_cq_poll_info *, bool);
+ struct i40iw_cq_poll_info *);
enum i40iw_status_code (*iw_cq_post_entries)(struct i40iw_cq_uk *, u8 count);
void (*iw_cq_clean)(void *, struct i40iw_cq_uk *);
};
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index 0e8db0a35..6fd043b1d 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -673,8 +673,11 @@ enum i40iw_status_code i40iw_free_virt_mem(struct i40iw_hw *hw,
{
if (!mem)
return I40IW_ERR_PARAM;
+ /*
+ * mem->va points to the parent of mem, so both mem and mem->va
+ * can not be touched once mem->va is freed
+ */
kfree(mem->va);
- mem->va = NULL;
return 0;
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 283b64c94..6329c971c 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -529,7 +529,7 @@ static int i40iw_setup_kmode_qp(struct i40iw_device *iwdev,
status = i40iw_get_wqe_shift(rq_size, ukinfo->max_rq_frag_cnt, 0, &rqshift);
if (status)
- return -ENOSYS;
+ return -ENOMEM;
sqdepth = sq_size << sqshift;
rqdepth = rq_size << rqshift;
@@ -671,7 +671,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
if (init_attr->qp_type != IB_QPT_RC) {
- err_code = -ENOSYS;
+ err_code = -EINVAL;
goto error;
}
if (iwdev->push_mode)
@@ -794,7 +794,6 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
return &iwqp->ibqp;
error:
i40iw_free_qp_resources(iwdev, iwqp, qp_num);
- kfree(mem);
return ERR_PTR(err_code);
}
@@ -1840,6 +1839,7 @@ struct ib_mr *i40iw_reg_phys_mr(struct ib_pd *pd,
iwmr->ibmr.lkey = stag;
iwmr->page_cnt = 1;
iwmr->pgaddrmem[0] = addr;
+ iwmr->length = size;
status = i40iw_hwreg_mr(iwdev, iwmr, access);
if (status) {
i40iw_free_stag(iwdev, stag);
@@ -1863,7 +1863,7 @@ static struct ib_mr *i40iw_get_dma_mr(struct ib_pd *pd, int acc)
{
u64 kva = 0;
- return i40iw_reg_phys_mr(pd, 0, 0xffffffffffULL, acc, &kva);
+ return i40iw_reg_phys_mr(pd, 0, 0, acc, &kva);
}
/**
@@ -1925,8 +1925,7 @@ static int i40iw_dereg_mr(struct ib_mr *ib_mr)
}
if (iwpbl->pbl_allocated)
i40iw_free_pble(iwdev->pble_rsrc, palloc);
- kfree(iwpbl->iwmr);
- iwpbl->iwmr = NULL;
+ kfree(iwmr);
return 0;
}
@@ -1975,18 +1974,6 @@ static ssize_t i40iw_show_rev(struct device *dev,
}
/**
- * i40iw_show_fw_ver
- */
-static ssize_t i40iw_show_fw_ver(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- u32 firmware_version = I40IW_FW_VERSION;
-
- return sprintf(buf, "%u.%u\n", firmware_version,
- (firmware_version & 0x000000ff));
-}
-
-/**
* i40iw_show_hca
*/
static ssize_t i40iw_show_hca(struct device *dev,
@@ -2006,13 +1993,11 @@ static ssize_t i40iw_show_board(struct device *dev,
}
static DEVICE_ATTR(hw_rev, S_IRUGO, i40iw_show_rev, NULL);
-static DEVICE_ATTR(fw_ver, S_IRUGO, i40iw_show_fw_ver, NULL);
static DEVICE_ATTR(hca_type, S_IRUGO, i40iw_show_hca, NULL);
static DEVICE_ATTR(board_id, S_IRUGO, i40iw_show_board, NULL);
static struct device_attribute *i40iw_dev_attributes[] = {
&dev_attr_hw_rev,
- &dev_attr_fw_ver,
&dev_attr_hca_type,
&dev_attr_board_id
};
@@ -2091,8 +2076,12 @@ static int i40iw_post_send(struct ib_qp *ibqp,
ret = ukqp->ops.iw_send(ukqp, &info, ib_wr->ex.invalidate_rkey, false);
}
- if (ret)
- err = -EIO;
+ if (ret) {
+ if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
+ err = -ENOMEM;
+ else
+ err = -EINVAL;
+ }
break;
case IB_WR_RDMA_WRITE:
info.op_type = I40IW_OP_TYPE_RDMA_WRITE;
@@ -2113,8 +2102,12 @@ static int i40iw_post_send(struct ib_qp *ibqp,
ret = ukqp->ops.iw_rdma_write(ukqp, &info, false);
}
- if (ret)
- err = -EIO;
+ if (ret) {
+ if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
+ err = -ENOMEM;
+ else
+ err = -EINVAL;
+ }
break;
case IB_WR_RDMA_READ_WITH_INV:
inv_stag = true;
@@ -2132,15 +2125,19 @@ static int i40iw_post_send(struct ib_qp *ibqp,
info.op.rdma_read.lo_addr.stag = ib_wr->sg_list->lkey;
info.op.rdma_read.lo_addr.len = ib_wr->sg_list->length;
ret = ukqp->ops.iw_rdma_read(ukqp, &info, inv_stag, false);
- if (ret)
- err = -EIO;
+ if (ret) {
+ if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
+ err = -ENOMEM;
+ else
+ err = -EINVAL;
+ }
break;
case IB_WR_LOCAL_INV:
info.op_type = I40IW_OP_TYPE_INV_STAG;
info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;
ret = ukqp->ops.iw_stag_local_invalidate(ukqp, &info, true);
if (ret)
- err = -EIO;
+ err = -ENOMEM;
break;
case IB_WR_REG_MR:
{
@@ -2174,7 +2171,7 @@ static int i40iw_post_send(struct ib_qp *ibqp,
ret = dev->iw_priv_qp_ops->iw_mr_fast_register(&iwqp->sc_qp, &info, true);
if (ret)
- err = -EIO;
+ err = -ENOMEM;
break;
}
default:
@@ -2214,6 +2211,7 @@ static int i40iw_post_recv(struct ib_qp *ibqp,
struct i40iw_sge sg_list[I40IW_MAX_WQ_FRAGMENT_COUNT];
enum i40iw_status_code ret = 0;
unsigned long flags;
+ int err = 0;
iwqp = (struct i40iw_qp *)ibqp;
ukqp = &iwqp->sc_qp.qp_uk;
@@ -2228,6 +2226,10 @@ static int i40iw_post_recv(struct ib_qp *ibqp,
ret = ukqp->ops.iw_post_receive(ukqp, &post_recv);
if (ret) {
i40iw_pr_err(" post_recv err %d\n", ret);
+ if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
+ err = -ENOMEM;
+ else
+ err = -EINVAL;
*bad_wr = ib_wr;
goto out;
}
@@ -2235,9 +2237,7 @@ static int i40iw_post_recv(struct ib_qp *ibqp,
}
out:
spin_unlock_irqrestore(&iwqp->lock, flags);
- if (ret)
- return -ENOSYS;
- return 0;
+ return err;
}
/**
@@ -2264,7 +2264,7 @@ static int i40iw_poll_cq(struct ib_cq *ibcq,
spin_lock_irqsave(&iwcq->lock, flags);
while (cqe_count < num_entries) {
- ret = ukcq->ops.iw_cq_poll_completion(ukcq, &cq_poll_info, true);
+ ret = ukcq->ops.iw_cq_poll_completion(ukcq, &cq_poll_info);
if (ret == I40IW_ERR_QUEUE_EMPTY) {
break;
} else if (ret == I40IW_ERR_QUEUE_DESTROYED) {
@@ -2437,6 +2437,15 @@ static const char * const i40iw_hw_stat_names[] = {
"iwRdmaInv"
};
+static void i40iw_get_dev_fw_str(struct ib_device *dev, char *str,
+ size_t str_len)
+{
+ u32 firmware_version = I40IW_FW_VERSION;
+
+ snprintf(str, str_len, "%u.%u", firmware_version,
+ (firmware_version & 0x000000ff));
+}
+
/**
* i40iw_alloc_hw_stats - Allocate a hw stats structure
* @ibdev: device pointer from stack
@@ -2528,7 +2537,7 @@ static int i40iw_modify_port(struct ib_device *ibdev,
int port_modify_mask,
struct ib_port_modify *props)
{
- return 0;
+ return -ENOSYS;
}
/**
@@ -2660,6 +2669,7 @@ static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev
memcpy(iwibdev->ibdev.iwcm->ifname, netdev->name,
sizeof(iwibdev->ibdev.iwcm->ifname));
iwibdev->ibdev.get_port_immutable = i40iw_port_immutable;
+ iwibdev->ibdev.get_dev_fw_str = i40iw_get_dev_fw_str;
iwibdev->ibdev.poll_cq = i40iw_poll_cq;
iwibdev->ibdev.req_notify_cq = i40iw_req_notify_cq;
iwibdev->ibdev.post_send = i40iw_post_send;
@@ -2723,7 +2733,7 @@ int i40iw_register_rdma_device(struct i40iw_device *iwdev)
iwdev->iwibdev = i40iw_init_rdma_device(iwdev);
if (!iwdev->iwibdev)
- return -ENOSYS;
+ return -ENOMEM;
iwibdev = iwdev->iwibdev;
ret = ib_register_device(&iwibdev->ibdev, NULL);
@@ -2748,5 +2758,5 @@ error:
kfree(iwdev->iwibdev->ibdev.iwcm);
iwdev->iwibdev->ibdev.iwcm = NULL;
ib_dealloc_device(&iwdev->iwibdev->ibdev);
- return -ENOSYS;
+ return ret;
}
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 9f8b516eb..5df63daca 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -288,7 +288,7 @@ static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
if (cq->resize_buf)
return -EBUSY;
- cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
+ cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_KERNEL);
if (!cq->resize_buf)
return -ENOMEM;
@@ -316,7 +316,7 @@ static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq
if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
return -EFAULT;
- cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
+ cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_KERNEL);
if (!cq->resize_buf)
return -ENOMEM;
@@ -576,8 +576,8 @@ static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum)
checksum == cpu_to_be16(0xffff);
}
-static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc,
- unsigned tail, struct mlx4_cqe *cqe, int is_eth)
+static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc,
+ unsigned tail, struct mlx4_cqe *cqe, int is_eth)
{
struct mlx4_ib_proxy_sqp_hdr *hdr;
@@ -600,8 +600,6 @@ static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct
wc->slid = be16_to_cpu(hdr->tun.slid_mac_47_32);
wc->sl = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12);
}
-
- return 0;
}
static void mlx4_ib_qp_sw_comp(struct mlx4_ib_qp *qp, int num_entries,
@@ -689,12 +687,6 @@ repoll:
is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
MLX4_CQE_OPCODE_ERROR;
- if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP &&
- is_send)) {
- pr_warn("Completion for NOP opcode detected!\n");
- return -EINVAL;
- }
-
/* Resize CQ in progress */
if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) {
if (cq->resize_buf) {
@@ -720,12 +712,6 @@ repoll:
*/
mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev,
be32_to_cpu(cqe->vlan_my_qpn));
- if (unlikely(!mqp)) {
- pr_warn("CQ %06x with entry for unknown QPN %06x\n",
- cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK);
- return -EINVAL;
- }
-
*cur_qp = to_mibqp(mqp);
}
@@ -738,11 +724,6 @@ repoll:
/* SRQ is also in the radix tree */
msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev,
srq_num);
- if (unlikely(!msrq)) {
- pr_warn("CQ %06x with entry for unknown SRQN %06x\n",
- cq->mcq.cqn, srq_num);
- return -EINVAL;
- }
}
if (is_send) {
@@ -852,9 +833,11 @@ repoll:
if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) {
if ((*cur_qp)->mlx4_ib_qp_type &
(MLX4_IB_QPT_PROXY_SMI_OWNER |
- MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI))
- return use_tunnel_data(*cur_qp, cq, wc, tail,
- cqe, is_eth);
+ MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) {
+ use_tunnel_data(*cur_qp, cq, wc, tail, cqe,
+ is_eth);
+ return 0;
+ }
}
wc->slid = be16_to_cpu(cqe->rlid);
@@ -891,7 +874,6 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
struct mlx4_ib_qp *cur_qp = NULL;
unsigned long flags;
int npolled;
- int err = 0;
struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device);
spin_lock_irqsave(&cq->lock, flags);
@@ -901,8 +883,7 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
}
for (npolled = 0; npolled < num_entries; ++npolled) {
- err = mlx4_ib_poll_one(cq, &cur_qp, wc + npolled);
- if (err)
+ if (mlx4_ib_poll_one(cq, &cur_qp, wc + npolled))
break;
}
@@ -911,10 +892,7 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
out:
spin_unlock_irqrestore(&cq->lock, flags);
- if (err == 0 || err == -EAGAIN)
- return npolled;
- else
- return err;
+ return npolled;
}
int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 9c2e53d28..0f21c3a25 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1128,6 +1128,27 @@ void handle_port_mgmt_change_event(struct work_struct *work)
/* Generate GUID changed event */
if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) {
+ if (mlx4_is_master(dev->dev)) {
+ union ib_gid gid;
+ int err = 0;
+
+ if (!eqe->event.port_mgmt_change.params.port_info.gid_prefix)
+ err = __mlx4_ib_query_gid(&dev->ib_dev, port, 0, &gid, 1);
+ else
+ gid.global.subnet_prefix =
+ eqe->event.port_mgmt_change.params.port_info.gid_prefix;
+ if (err) {
+ pr_warn("Could not change QP1 subnet prefix for port %d: query_gid error (%d)\n",
+ port, err);
+ } else {
+ pr_debug("Changing QP1 subnet prefix for port %d. old=0x%llx. new=0x%llx\n",
+ port,
+ (u64)atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix),
+ be64_to_cpu(gid.global.subnet_prefix));
+ atomic64_set(&dev->sriov.demux[port - 1].subnet_prefix,
+ be64_to_cpu(gid.global.subnet_prefix));
+ }
+ }
mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
/*if master, notify all slaves*/
if (mlx4_is_master(dev->dev))
@@ -2202,6 +2223,8 @@ int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev)
if (err)
goto demux_err;
dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id;
+ atomic64_set(&dev->sriov.demux[i].subnet_prefix,
+ be64_to_cpu(gid.global.subnet_prefix));
err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1,
&dev->sriov.sqps[i]);
if (err)
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 42a46078d..87ba9bca4 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -2025,16 +2025,6 @@ static ssize_t show_hca(struct device *device, struct device_attribute *attr,
return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device);
}
-static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
- char *buf)
-{
- struct mlx4_ib_dev *dev =
- container_of(device, struct mlx4_ib_dev, ib_dev.dev);
- return sprintf(buf, "%d.%d.%d\n", (int) (dev->dev->caps.fw_ver >> 32),
- (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
- (int) dev->dev->caps.fw_ver & 0xffff);
-}
-
static ssize_t show_rev(struct device *device, struct device_attribute *attr,
char *buf)
{
@@ -2053,17 +2043,207 @@ static ssize_t show_board(struct device *device, struct device_attribute *attr,
}
static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
static struct device_attribute *mlx4_class_attributes[] = {
&dev_attr_hw_rev,
- &dev_attr_fw_ver,
&dev_attr_hca_type,
&dev_attr_board_id
};
+struct diag_counter {
+ const char *name;
+ u32 offset;
+};
+
+#define DIAG_COUNTER(_name, _offset) \
+ { .name = #_name, .offset = _offset }
+
+static const struct diag_counter diag_basic[] = {
+ DIAG_COUNTER(rq_num_lle, 0x00),
+ DIAG_COUNTER(sq_num_lle, 0x04),
+ DIAG_COUNTER(rq_num_lqpoe, 0x08),
+ DIAG_COUNTER(sq_num_lqpoe, 0x0C),
+ DIAG_COUNTER(rq_num_lpe, 0x18),
+ DIAG_COUNTER(sq_num_lpe, 0x1C),
+ DIAG_COUNTER(rq_num_wrfe, 0x20),
+ DIAG_COUNTER(sq_num_wrfe, 0x24),
+ DIAG_COUNTER(sq_num_mwbe, 0x2C),
+ DIAG_COUNTER(sq_num_bre, 0x34),
+ DIAG_COUNTER(sq_num_rire, 0x44),
+ DIAG_COUNTER(rq_num_rire, 0x48),
+ DIAG_COUNTER(sq_num_rae, 0x4C),
+ DIAG_COUNTER(rq_num_rae, 0x50),
+ DIAG_COUNTER(sq_num_roe, 0x54),
+ DIAG_COUNTER(sq_num_tree, 0x5C),
+ DIAG_COUNTER(sq_num_rree, 0x64),
+ DIAG_COUNTER(rq_num_rnr, 0x68),
+ DIAG_COUNTER(sq_num_rnr, 0x6C),
+ DIAG_COUNTER(rq_num_oos, 0x100),
+ DIAG_COUNTER(sq_num_oos, 0x104),
+};
+
+static const struct diag_counter diag_ext[] = {
+ DIAG_COUNTER(rq_num_dup, 0x130),
+ DIAG_COUNTER(sq_num_to, 0x134),
+};
+
+static const struct diag_counter diag_device_only[] = {
+ DIAG_COUNTER(num_cqovf, 0x1A0),
+ DIAG_COUNTER(rq_num_udsdprd, 0x118),
+};
+
+static struct rdma_hw_stats *mlx4_ib_alloc_hw_stats(struct ib_device *ibdev,
+ u8 port_num)
+{
+ struct mlx4_ib_dev *dev = to_mdev(ibdev);
+ struct mlx4_ib_diag_counters *diag = dev->diag_counters;
+
+ if (!diag[!!port_num].name)
+ return NULL;
+
+ return rdma_alloc_hw_stats_struct(diag[!!port_num].name,
+ diag[!!port_num].num_counters,
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
+
+static int mlx4_ib_get_hw_stats(struct ib_device *ibdev,
+ struct rdma_hw_stats *stats,
+ u8 port, int index)
+{
+ struct mlx4_ib_dev *dev = to_mdev(ibdev);
+ struct mlx4_ib_diag_counters *diag = dev->diag_counters;
+ u32 hw_value[ARRAY_SIZE(diag_device_only) +
+ ARRAY_SIZE(diag_ext) + ARRAY_SIZE(diag_basic)] = {};
+ int ret;
+ int i;
+
+ ret = mlx4_query_diag_counters(dev->dev,
+ MLX4_OP_MOD_QUERY_TRANSPORT_CI_ERRORS,
+ diag[!!port].offset, hw_value,
+ diag[!!port].num_counters, port);
+
+ if (ret)
+ return ret;
+
+ for (i = 0; i < diag[!!port].num_counters; i++)
+ stats->value[i] = hw_value[i];
+
+ return diag[!!port].num_counters;
+}
+
+static int __mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev,
+ const char ***name,
+ u32 **offset,
+ u32 *num,
+ bool port)
+{
+ u32 num_counters;
+
+ num_counters = ARRAY_SIZE(diag_basic);
+
+ if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT)
+ num_counters += ARRAY_SIZE(diag_ext);
+
+ if (!port)
+ num_counters += ARRAY_SIZE(diag_device_only);
+
+ *name = kcalloc(num_counters, sizeof(**name), GFP_KERNEL);
+ if (!*name)
+ return -ENOMEM;
+
+ *offset = kcalloc(num_counters, sizeof(**offset), GFP_KERNEL);
+ if (!*offset)
+ goto err_name;
+
+ *num = num_counters;
+
+ return 0;
+
+err_name:
+ kfree(*name);
+ return -ENOMEM;
+}
+
+static void mlx4_ib_fill_diag_counters(struct mlx4_ib_dev *ibdev,
+ const char **name,
+ u32 *offset,
+ bool port)
+{
+ int i;
+ int j;
+
+ for (i = 0, j = 0; i < ARRAY_SIZE(diag_basic); i++, j++) {
+ name[i] = diag_basic[i].name;
+ offset[i] = diag_basic[i].offset;
+ }
+
+ if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) {
+ for (i = 0; i < ARRAY_SIZE(diag_ext); i++, j++) {
+ name[j] = diag_ext[i].name;
+ offset[j] = diag_ext[i].offset;
+ }
+ }
+
+ if (!port) {
+ for (i = 0; i < ARRAY_SIZE(diag_device_only); i++, j++) {
+ name[j] = diag_device_only[i].name;
+ offset[j] = diag_device_only[i].offset;
+ }
+ }
+}
+
+static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
+{
+ struct mlx4_ib_diag_counters *diag = ibdev->diag_counters;
+ int i;
+ int ret;
+ bool per_port = !!(ibdev->dev->caps.flags2 &
+ MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT);
+
+ if (mlx4_is_slave(ibdev->dev))
+ return 0;
+
+ for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
+ /* i == 1 means we are building port counters */
+ if (i && !per_port)
+ continue;
+
+ ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].name,
+ &diag[i].offset,
+ &diag[i].num_counters, i);
+ if (ret)
+ goto err_alloc;
+
+ mlx4_ib_fill_diag_counters(ibdev, diag[i].name,
+ diag[i].offset, i);
+ }
+
+ ibdev->ib_dev.get_hw_stats = mlx4_ib_get_hw_stats;
+ ibdev->ib_dev.alloc_hw_stats = mlx4_ib_alloc_hw_stats;
+
+ return 0;
+
+err_alloc:
+ if (i) {
+ kfree(diag[i - 1].name);
+ kfree(diag[i - 1].offset);
+ }
+
+ return ret;
+}
+
+static void mlx4_ib_diag_cleanup(struct mlx4_ib_dev *ibdev)
+{
+ int i;
+
+ for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
+ kfree(ibdev->diag_counters[i].offset);
+ kfree(ibdev->diag_counters[i].name);
+ }
+}
+
#define MLX4_IB_INVALID_MAC ((u64)-1)
static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
struct net_device *dev,
@@ -2280,6 +2460,17 @@ static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0;
}
+static void get_fw_ver_str(struct ib_device *device, char *str,
+ size_t str_len)
+{
+ struct mlx4_ib_dev *dev =
+ container_of(device, struct mlx4_ib_dev, ib_dev);
+ snprintf(str, str_len, "%d.%d.%d",
+ (int) (dev->dev->caps.fw_ver >> 32),
+ (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
+ (int) dev->dev->caps.fw_ver & 0xffff);
+}
+
static void *mlx4_ib_add(struct mlx4_dev *dev)
{
struct mlx4_ib_dev *ibdev;
@@ -2413,6 +2604,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach;
ibdev->ib_dev.process_mad = mlx4_ib_process_mad;
ibdev->ib_dev.get_port_immutable = mlx4_port_immutable;
+ ibdev->ib_dev.get_dev_fw_str = get_fw_ver_str;
ibdev->ib_dev.disassociate_ucontext = mlx4_ib_disassociate_ucontext;
if (!mlx4_is_slave(ibdev->dev)) {
@@ -2555,9 +2747,12 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]);
- if (ib_register_device(&ibdev->ib_dev, NULL))
+ if (mlx4_ib_alloc_diag_counters(ibdev))
goto err_steer_free_bitmap;
+ if (ib_register_device(&ibdev->ib_dev, NULL))
+ goto err_diag_counters;
+
if (mlx4_ib_mad_init(ibdev))
goto err_reg;
@@ -2623,6 +2818,9 @@ err_mad:
err_reg:
ib_unregister_device(&ibdev->ib_dev);
+err_diag_counters:
+ mlx4_ib_diag_cleanup(ibdev);
+
err_steer_free_bitmap:
kfree(ibdev->ib_uc_qpns_bitmap);
@@ -2726,6 +2924,7 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
mlx4_ib_close_sriov(ibdev);
mlx4_ib_mad_cleanup(ibdev);
ib_unregister_device(&ibdev->ib_dev);
+ mlx4_ib_diag_cleanup(ibdev);
if (ibdev->iboe.nb.notifier_call) {
if (unregister_netdevice_notifier(&ibdev->iboe.nb))
pr_warn("failure unregistering notifier\n");
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
index 8f7ad0791..097bfcc4e 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
@@ -489,7 +489,7 @@ static u8 get_leave_state(struct mcast_group *group)
if (!group->members[i])
leave_state |= (1 << i);
- return leave_state & (group->rec.scope_join_state & 7);
+ return leave_state & (group->rec.scope_join_state & 0xf);
}
static int join_group(struct mcast_group *group, int slave, u8 join_mask)
@@ -564,8 +564,8 @@ static void mlx4_ib_mcg_timeout_handler(struct work_struct *work)
} else
mcg_warn_group(group, "DRIVER BUG\n");
} else if (group->state == MCAST_LEAVE_SENT) {
- if (group->rec.scope_join_state & 7)
- group->rec.scope_join_state &= 0xf8;
+ if (group->rec.scope_join_state & 0xf)
+ group->rec.scope_join_state &= 0xf0;
group->state = MCAST_IDLE;
mutex_unlock(&group->lock);
if (release_group(group, 1))
@@ -605,7 +605,7 @@ static int handle_leave_req(struct mcast_group *group, u8 leave_mask,
static int handle_join_req(struct mcast_group *group, u8 join_mask,
struct mcast_req *req)
{
- u8 group_join_state = group->rec.scope_join_state & 7;
+ u8 group_join_state = group->rec.scope_join_state & 0xf;
int ref = 0;
u16 status;
struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
@@ -690,8 +690,8 @@ static void mlx4_ib_mcg_work_handler(struct work_struct *work)
u8 cur_join_state;
resp_join_state = ((struct ib_sa_mcmember_data *)
- group->response_sa_mad.data)->scope_join_state & 7;
- cur_join_state = group->rec.scope_join_state & 7;
+ group->response_sa_mad.data)->scope_join_state & 0xf;
+ cur_join_state = group->rec.scope_join_state & 0xf;
if (method == IB_MGMT_METHOD_GET_RESP) {
/* successfull join */
@@ -710,7 +710,7 @@ process_requests:
req = list_first_entry(&group->pending_list, struct mcast_req,
group_list);
sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
- req_join_state = sa_data->scope_join_state & 0x7;
+ req_join_state = sa_data->scope_join_state & 0xf;
/* For a leave request, we will immediately answer the VF, and
* update our internal counters. The actual leave will be sent
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 29acda249..686ab48ff 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -448,7 +448,7 @@ struct mlx4_ib_demux_ctx {
struct workqueue_struct *wq;
struct workqueue_struct *ud_wq;
spinlock_t ud_lock;
- __be64 subnet_prefix;
+ atomic64_t subnet_prefix;
__be64 guid_cache[128];
struct mlx4_ib_dev *dev;
/* the following lock protects both mcg_table and mcg_mgid0_list */
@@ -549,6 +549,14 @@ struct mlx4_ib_counters {
u32 default_counter;
};
+#define MLX4_DIAG_COUNTERS_TYPES 2
+
+struct mlx4_ib_diag_counters {
+ const char **name;
+ u32 *offset;
+ u32 num_counters;
+};
+
struct mlx4_ib_dev {
struct ib_device ib_dev;
struct mlx4_dev *dev;
@@ -585,6 +593,7 @@ struct mlx4_ib_dev {
/* protect resources needed as part of reset flow */
spinlock_t reset_flow_resource_lock;
struct list_head qp_list;
+ struct mlx4_ib_diag_counters diag_counters[MLX4_DIAG_COUNTERS_TYPES];
};
struct ib_event_work {
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 8db8405c1..7fb9629bd 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -232,7 +232,7 @@ static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n, int size)
}
} else {
ctrl = buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1));
- s = (ctrl->fence_size & 0x3f) << 4;
+ s = (ctrl->qpn_vlan.fence_size & 0x3f) << 4;
for (i = 64; i < s; i += 64) {
wqe = buf + i;
*wqe = cpu_to_be32(0xffffffff);
@@ -264,7 +264,7 @@ static void post_nop_wqe(struct mlx4_ib_qp *qp, int n, int size)
inl->byte_count = cpu_to_be32(1 << 31 | (size - s - sizeof *inl));
}
ctrl->srcrb_flags = 0;
- ctrl->fence_size = size / 16;
+ ctrl->qpn_vlan.fence_size = size / 16;
/*
* Make sure descriptor is fully written before setting ownership bit
* (because HW can start executing as soon as we do).
@@ -1992,7 +1992,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
ctrl = get_send_wqe(qp, i);
ctrl->owner_opcode = cpu_to_be32(1 << 31);
if (qp->sq_max_wqes_per_wr == 1)
- ctrl->fence_size = 1 << (qp->sq.wqe_shift - 4);
+ ctrl->qpn_vlan.fence_size =
+ 1 << (qp->sq.wqe_shift - 4);
stamp_send_wqe(qp, i, 1 << qp->sq.wqe_shift);
}
@@ -2492,24 +2493,27 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr,
sqp->ud_header.grh.flow_label =
ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff);
sqp->ud_header.grh.hop_limit = ah->av.ib.hop_limit;
- if (is_eth)
+ if (is_eth) {
memcpy(sqp->ud_header.grh.source_gid.raw, sgid.raw, 16);
- else {
- if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
- /* When multi-function is enabled, the ib_core gid
- * indexes don't necessarily match the hw ones, so
- * we must use our own cache */
- sqp->ud_header.grh.source_gid.global.subnet_prefix =
- to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
- subnet_prefix;
- sqp->ud_header.grh.source_gid.global.interface_id =
- to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
- guid_cache[ah->av.ib.gid_index];
- } else
- ib_get_cached_gid(ib_dev,
- be32_to_cpu(ah->av.ib.port_pd) >> 24,
- ah->av.ib.gid_index,
- &sqp->ud_header.grh.source_gid, NULL);
+ } else {
+ if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
+ /* When multi-function is enabled, the ib_core gid
+ * indexes don't necessarily match the hw ones, so
+ * we must use our own cache
+ */
+ sqp->ud_header.grh.source_gid.global.subnet_prefix =
+ cpu_to_be64(atomic64_read(&(to_mdev(ib_dev)->sriov.
+ demux[sqp->qp.port - 1].
+ subnet_prefix)));
+ sqp->ud_header.grh.source_gid.global.interface_id =
+ to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
+ guid_cache[ah->av.ib.gid_index];
+ } else {
+ ib_get_cached_gid(ib_dev,
+ be32_to_cpu(ah->av.ib.port_pd) >> 24,
+ ah->av.ib.gid_index,
+ &sqp->ud_header.grh.source_gid, NULL);
+ }
}
memcpy(sqp->ud_header.grh.destination_gid.raw,
ah->av.ib.dgid, 16);
@@ -3169,8 +3173,8 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
wmb();
*lso_wqe = lso_hdr_sz;
- ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ?
- MLX4_WQE_CTRL_FENCE : 0) | size;
+ ctrl->qpn_vlan.fence_size = (wr->send_flags & IB_SEND_FENCE ?
+ MLX4_WQE_CTRL_FENCE : 0) | size;
/*
* Make sure descriptor is fully written before
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 9c0e67bd2..e4fac9292 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -424,6 +424,83 @@ static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
item->key = be32_to_cpu(cqe->mkey);
}
+static void sw_send_comp(struct mlx5_ib_qp *qp, int num_entries,
+ struct ib_wc *wc, int *npolled)
+{
+ struct mlx5_ib_wq *wq;
+ unsigned int cur;
+ unsigned int idx;
+ int np;
+ int i;
+
+ wq = &qp->sq;
+ cur = wq->head - wq->tail;
+ np = *npolled;
+
+ if (cur == 0)
+ return;
+
+ for (i = 0; i < cur && np < num_entries; i++) {
+ idx = wq->last_poll & (wq->wqe_cnt - 1);
+ wc->wr_id = wq->wrid[idx];
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
+ wq->tail++;
+ np++;
+ wc->qp = &qp->ibqp;
+ wc++;
+ wq->last_poll = wq->w_list[idx].next;
+ }
+ *npolled = np;
+}
+
+static void sw_recv_comp(struct mlx5_ib_qp *qp, int num_entries,
+ struct ib_wc *wc, int *npolled)
+{
+ struct mlx5_ib_wq *wq;
+ unsigned int cur;
+ int np;
+ int i;
+
+ wq = &qp->rq;
+ cur = wq->head - wq->tail;
+ np = *npolled;
+
+ if (cur == 0)
+ return;
+
+ for (i = 0; i < cur && np < num_entries; i++) {
+ wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
+ wq->tail++;
+ np++;
+ wc->qp = &qp->ibqp;
+ wc++;
+ }
+ *npolled = np;
+}
+
+static void mlx5_ib_poll_sw_comp(struct mlx5_ib_cq *cq, int num_entries,
+ struct ib_wc *wc, int *npolled)
+{
+ struct mlx5_ib_qp *qp;
+
+ *npolled = 0;
+ /* Find uncompleted WQEs belonging to that cq and retrun mmics ones */
+ list_for_each_entry(qp, &cq->list_send_qp, cq_send_list) {
+ sw_send_comp(qp, num_entries, wc + *npolled, npolled);
+ if (*npolled >= num_entries)
+ return;
+ }
+
+ list_for_each_entry(qp, &cq->list_recv_qp, cq_recv_list) {
+ sw_recv_comp(qp, num_entries, wc + *npolled, npolled);
+ if (*npolled >= num_entries)
+ return;
+ }
+}
+
static int mlx5_poll_one(struct mlx5_ib_cq *cq,
struct mlx5_ib_qp **cur_qp,
struct ib_wc *wc)
@@ -476,12 +553,6 @@ repoll:
* from the table.
*/
mqp = __mlx5_qp_lookup(dev->mdev, qpn);
- if (unlikely(!mqp)) {
- mlx5_ib_warn(dev, "CQE@CQ %06x for unknown QPN %6x\n",
- cq->mcq.cqn, qpn);
- return -EINVAL;
- }
-
*cur_qp = to_mibqp(mqp);
}
@@ -542,13 +613,6 @@ repoll:
read_lock(&dev->mdev->priv.mkey_table.lock);
mmkey = __mlx5_mr_lookup(dev->mdev,
mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
- if (unlikely(!mmkey)) {
- read_unlock(&dev->mdev->priv.mkey_table.lock);
- mlx5_ib_warn(dev, "CQE@CQ %06x for unknown MR %6x\n",
- cq->mcq.cqn, be32_to_cpu(sig_err_cqe->mkey));
- return -EINVAL;
- }
-
mr = to_mibmr(mmkey);
get_sig_err_item(sig_err_cqe, &mr->sig->err_item);
mr->sig->sig_err_exists = true;
@@ -594,31 +658,32 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
{
struct mlx5_ib_cq *cq = to_mcq(ibcq);
struct mlx5_ib_qp *cur_qp = NULL;
+ struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
+ struct mlx5_core_dev *mdev = dev->mdev;
unsigned long flags;
int soft_polled = 0;
int npolled;
- int err = 0;
spin_lock_irqsave(&cq->lock, flags);
+ if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ mlx5_ib_poll_sw_comp(cq, num_entries, wc, &npolled);
+ goto out;
+ }
if (unlikely(!list_empty(&cq->wc_list)))
soft_polled = poll_soft_wc(cq, num_entries, wc);
for (npolled = 0; npolled < num_entries - soft_polled; npolled++) {
- err = mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled);
- if (err)
+ if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled))
break;
}
if (npolled)
mlx5_cq_set_ci(&cq->mcq);
-
+out:
spin_unlock_irqrestore(&cq->lock, flags);
- if (err == 0 || err == -EAGAIN)
- return soft_polled + npolled;
- else
- return err;
+ return soft_polled + npolled;
}
int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
@@ -843,6 +908,8 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
cq->resize_buf = NULL;
cq->resize_umem = NULL;
cq->create_flags = attr->flags;
+ INIT_LIST_HEAD(&cq->list_send_qp);
+ INIT_LIST_HEAD(&cq->list_recv_qp);
if (context) {
err = create_cq_user(dev, udata, context, cq, entries,
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index b48ad8531..e19537cf4 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -37,16 +37,17 @@
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
-#include <linux/io-mapping.h>
#if defined(CONFIG_X86)
#include <asm/pat.h>
#endif
#include <linux/sched.h>
+#include <linux/delay.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_cache.h>
#include <linux/mlx5/port.h>
#include <linux/mlx5/vport.h>
+#include <linux/list.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_umem.h>
#include <linux/in.h>
@@ -287,7 +288,9 @@ __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
{
- return !MLX5_CAP_GEN(dev->mdev, ib_virt);
+ if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
+ return !MLX5_CAP_GEN(dev->mdev, ib_virt);
+ return 0;
}
enum {
@@ -457,8 +460,17 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
int max_rq_sg;
int max_sq_sg;
u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
+ struct mlx5_ib_query_device_resp resp = {};
+ size_t resp_len;
+ u64 max_tso;
- if (uhw->inlen || uhw->outlen)
+ resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length);
+ if (uhw->outlen && uhw->outlen < resp_len)
+ return -EINVAL;
+ else
+ resp.response_length = resp_len;
+
+ if (uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen))
return -EINVAL;
memset(props, 0, sizeof(*props));
@@ -511,10 +523,21 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (MLX5_CAP_GEN(mdev, block_lb_mc))
props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
- if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
- (MLX5_CAP_ETH(dev->mdev, csum_cap)))
+ if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads)) {
+ if (MLX5_CAP_ETH(mdev, csum_cap))
props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
+ if (field_avail(typeof(resp), tso_caps, uhw->outlen)) {
+ max_tso = MLX5_CAP_ETH(mdev, max_lso_cap);
+ if (max_tso) {
+ resp.tso_caps.max_tso = 1 << max_tso;
+ resp.tso_caps.supported_qpts |=
+ 1 << IB_QPT_RAW_PACKET;
+ resp.response_length += sizeof(resp.tso_caps);
+ }
+ }
+ }
+
if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
props->device_cap_flags |= IB_DEVICE_UD_TSO;
@@ -576,6 +599,13 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (!mlx5_core_is_pf(mdev))
props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION;
+ if (uhw->outlen) {
+ err = ib_copy_to_udata(uhw, &resp, resp.response_length);
+
+ if (err)
+ return err;
+ }
+
return 0;
}
@@ -983,6 +1013,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
goto out_uars;
}
+ INIT_LIST_HEAD(&context->vma_private_list);
INIT_LIST_HEAD(&context->db_page_list);
mutex_init(&context->db_page_mutex);
@@ -992,6 +1023,11 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
if (field_avail(typeof(resp), cqe_version, udata->outlen))
resp.response_length += sizeof(resp.cqe_version);
+ if (field_avail(typeof(resp), cmds_supp_uhw, udata->outlen)) {
+ resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE;
+ resp.response_length += sizeof(resp.cmds_supp_uhw);
+ }
+
/*
* We don't want to expose information from the PCI bar that is located
* after 4096 bytes, so if the arch only supports larger pages, let's
@@ -1006,8 +1042,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
offsetof(struct mlx5_init_seg, internal_timer_h) %
PAGE_SIZE;
resp.response_length += sizeof(resp.hca_core_clock_offset) +
- sizeof(resp.reserved2) +
- sizeof(resp.reserved3);
+ sizeof(resp.reserved2);
}
err = ib_copy_to_udata(udata, &resp, resp.response_length);
@@ -1086,6 +1121,125 @@ static int get_index(unsigned long offset)
return get_arg(offset);
}
+static void mlx5_ib_vma_open(struct vm_area_struct *area)
+{
+ /* vma_open is called when a new VMA is created on top of our VMA. This
+ * is done through either mremap flow or split_vma (usually due to
+ * mlock, madvise, munmap, etc.) We do not support a clone of the VMA,
+ * as this VMA is strongly hardware related. Therefore we set the
+ * vm_ops of the newly created/cloned VMA to NULL, to prevent it from
+ * calling us again and trying to do incorrect actions. We assume that
+ * the original VMA size is exactly a single page, and therefore all
+ * "splitting" operation will not happen to it.
+ */
+ area->vm_ops = NULL;
+}
+
+static void mlx5_ib_vma_close(struct vm_area_struct *area)
+{
+ struct mlx5_ib_vma_private_data *mlx5_ib_vma_priv_data;
+
+ /* It's guaranteed that all VMAs opened on a FD are closed before the
+ * file itself is closed, therefore no sync is needed with the regular
+ * closing flow. (e.g. mlx5 ib_dealloc_ucontext)
+ * However need a sync with accessing the vma as part of
+ * mlx5_ib_disassociate_ucontext.
+ * The close operation is usually called under mm->mmap_sem except when
+ * process is exiting.
+ * The exiting case is handled explicitly as part of
+ * mlx5_ib_disassociate_ucontext.
+ */
+ mlx5_ib_vma_priv_data = (struct mlx5_ib_vma_private_data *)area->vm_private_data;
+
+ /* setting the vma context pointer to null in the mlx5_ib driver's
+ * private data, to protect a race condition in
+ * mlx5_ib_disassociate_ucontext().
+ */
+ mlx5_ib_vma_priv_data->vma = NULL;
+ list_del(&mlx5_ib_vma_priv_data->list);
+ kfree(mlx5_ib_vma_priv_data);
+}
+
+static const struct vm_operations_struct mlx5_ib_vm_ops = {
+ .open = mlx5_ib_vma_open,
+ .close = mlx5_ib_vma_close
+};
+
+static int mlx5_ib_set_vma_data(struct vm_area_struct *vma,
+ struct mlx5_ib_ucontext *ctx)
+{
+ struct mlx5_ib_vma_private_data *vma_prv;
+ struct list_head *vma_head = &ctx->vma_private_list;
+
+ vma_prv = kzalloc(sizeof(*vma_prv), GFP_KERNEL);
+ if (!vma_prv)
+ return -ENOMEM;
+
+ vma_prv->vma = vma;
+ vma->vm_private_data = vma_prv;
+ vma->vm_ops = &mlx5_ib_vm_ops;
+
+ list_add(&vma_prv->list, vma_head);
+
+ return 0;
+}
+
+static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
+{
+ int ret;
+ struct vm_area_struct *vma;
+ struct mlx5_ib_vma_private_data *vma_private, *n;
+ struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
+ struct task_struct *owning_process = NULL;
+ struct mm_struct *owning_mm = NULL;
+
+ owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID);
+ if (!owning_process)
+ return;
+
+ owning_mm = get_task_mm(owning_process);
+ if (!owning_mm) {
+ pr_info("no mm, disassociate ucontext is pending task termination\n");
+ while (1) {
+ put_task_struct(owning_process);
+ usleep_range(1000, 2000);
+ owning_process = get_pid_task(ibcontext->tgid,
+ PIDTYPE_PID);
+ if (!owning_process ||
+ owning_process->state == TASK_DEAD) {
+ pr_info("disassociate ucontext done, task was terminated\n");
+ /* in case task was dead need to release the
+ * task struct.
+ */
+ if (owning_process)
+ put_task_struct(owning_process);
+ return;
+ }
+ }
+ }
+
+ /* need to protect from a race on closing the vma as part of
+ * mlx5_ib_vma_close.
+ */
+ down_read(&owning_mm->mmap_sem);
+ list_for_each_entry_safe(vma_private, n, &context->vma_private_list,
+ list) {
+ vma = vma_private->vma;
+ ret = zap_vma_ptes(vma, vma->vm_start,
+ PAGE_SIZE);
+ WARN_ONCE(ret, "%s: zap_vma_ptes failed", __func__);
+ /* context going to be destroyed, should
+ * not access ops any more.
+ */
+ vma->vm_ops = NULL;
+ list_del(&vma_private->list);
+ kfree(vma_private);
+ }
+ up_read(&owning_mm->mmap_sem);
+ mmput(owning_mm);
+ put_task_struct(owning_process);
+}
+
static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
{
switch (cmd) {
@@ -1101,8 +1255,10 @@ static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
}
static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
- struct vm_area_struct *vma, struct mlx5_uuar_info *uuari)
+ struct vm_area_struct *vma,
+ struct mlx5_ib_ucontext *context)
{
+ struct mlx5_uuar_info *uuari = &context->uuari;
int err;
unsigned long idx;
phys_addr_t pfn, pa;
@@ -1152,14 +1308,13 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
mlx5_ib_dbg(dev, "mapped %s at 0x%lx, PA %pa\n", mmap_cmd2str(cmd),
vma->vm_start, &pa);
- return 0;
+ return mlx5_ib_set_vma_data(vma, context);
}
static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
{
struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
- struct mlx5_uuar_info *uuari = &context->uuari;
unsigned long command;
phys_addr_t pfn;
@@ -1168,7 +1323,7 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
case MLX5_IB_MMAP_WC_PAGE:
case MLX5_IB_MMAP_NC_PAGE:
case MLX5_IB_MMAP_REGULAR_PAGE:
- return uar_mmap(dev, command, vma, uuari);
+ return uar_mmap(dev, command, vma, context);
case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
return -ENOSYS;
@@ -1275,6 +1430,13 @@ static int parse_flow_attr(u32 *match_c, u32 *match_v,
dmac_47_16),
ib_spec->eth.val.dst_mac);
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+ smac_47_16),
+ ib_spec->eth.mask.src_mac);
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
+ smac_47_16),
+ ib_spec->eth.val.src_mac);
+
if (ib_spec->eth.mask.vlan_tag) {
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
vlan_tag, 1);
@@ -1331,6 +1493,32 @@ static int parse_flow_attr(u32 *match_c, u32 *match_v,
&ib_spec->ipv4.val.dst_ip,
sizeof(ib_spec->ipv4.val.dst_ip));
break;
+ case IB_FLOW_SPEC_IPV6:
+ if (ib_spec->size != sizeof(ib_spec->ipv6))
+ return -EINVAL;
+
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
+ ethertype, 0xffff);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+ ethertype, ETH_P_IPV6);
+
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ &ib_spec->ipv6.mask.src_ip,
+ sizeof(ib_spec->ipv6.mask.src_ip));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ &ib_spec->ipv6.val.src_ip,
+ sizeof(ib_spec->ipv6.val.src_ip));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ &ib_spec->ipv6.mask.dst_ip,
+ sizeof(ib_spec->ipv6.mask.dst_ip));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ &ib_spec->ipv6.val.dst_ip,
+ sizeof(ib_spec->ipv6.val.dst_ip));
+ break;
case IB_FLOW_SPEC_TCP:
if (ib_spec->size != sizeof(ib_spec->tcp_udp))
return -EINVAL;
@@ -1528,21 +1716,18 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
{
struct mlx5_flow_table *ft = ft_prio->flow_table;
struct mlx5_ib_flow_handler *handler;
+ struct mlx5_flow_spec *spec;
void *ib_flow = flow_attr + 1;
- u8 match_criteria_enable = 0;
unsigned int spec_index;
- u32 *match_c;
- u32 *match_v;
u32 action;
int err = 0;
if (!is_valid_attr(flow_attr))
return ERR_PTR(-EINVAL);
- match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
- match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+ spec = mlx5_vzalloc(sizeof(*spec));
handler = kzalloc(sizeof(*handler), GFP_KERNEL);
- if (!handler || !match_c || !match_v) {
+ if (!handler || !spec) {
err = -ENOMEM;
goto free;
}
@@ -1550,7 +1735,8 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
INIT_LIST_HEAD(&handler->list);
for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
- err = parse_flow_attr(match_c, match_v, ib_flow);
+ err = parse_flow_attr(spec->match_criteria,
+ spec->match_value, ib_flow);
if (err < 0)
goto free;
@@ -1558,11 +1744,11 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
}
/* Outer header support only */
- match_criteria_enable = (!outer_header_zero(match_c)) << 0;
+ spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria))
+ << 0;
action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
- handler->rule = mlx5_add_flow_rule(ft, match_criteria_enable,
- match_c, match_v,
+ handler->rule = mlx5_add_flow_rule(ft, spec,
action,
MLX5_FS_DEFAULT_FLOW_TAG,
dst);
@@ -1578,8 +1764,7 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
free:
if (err)
kfree(handler);
- kfree(match_c);
- kfree(match_v);
+ kvfree(spec);
return err ? ERR_PTR(err) : handler;
}
@@ -1673,6 +1858,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
int domain)
{
struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ struct mlx5_ib_qp *mqp = to_mqp(qp);
struct mlx5_ib_flow_handler *handler = NULL;
struct mlx5_flow_destination *dst = NULL;
struct mlx5_ib_flow_prio *ft_prio;
@@ -1699,7 +1885,10 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
}
dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
- dst->tir_num = to_mqp(qp)->raw_packet_qp.rq.tirn;
+ if (mqp->flags & MLX5_IB_QP_RSS)
+ dst->tir_num = mqp->rss_qp.tirn;
+ else
+ dst->tir_num = mqp->raw_packet_qp.rq.tirn;
if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) {
@@ -1804,15 +1993,6 @@ static ssize_t show_hca(struct device *device, struct device_attribute *attr,
return sprintf(buf, "MT%d\n", dev->mdev->pdev->device);
}
-static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
- char *buf)
-{
- struct mlx5_ib_dev *dev =
- container_of(device, struct mlx5_ib_dev, ib_dev.dev);
- return sprintf(buf, "%d.%d.%04d\n", fw_rev_maj(dev->mdev),
- fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
-}
-
static ssize_t show_rev(struct device *device, struct device_attribute *attr,
char *buf)
{
@@ -1831,7 +2011,6 @@ static ssize_t show_board(struct device *device, struct device_attribute *attr,
}
static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
static DEVICE_ATTR(fw_pages, S_IRUGO, show_fw_pages, NULL);
@@ -1839,7 +2018,6 @@ static DEVICE_ATTR(reg_pages, S_IRUGO, show_reg_pages, NULL);
static struct device_attribute *mlx5_class_attributes[] = {
&dev_attr_hw_rev,
- &dev_attr_fw_ver,
&dev_attr_hca_type,
&dev_attr_board_id,
&dev_attr_fw_pages,
@@ -1857,6 +2035,65 @@ static void pkey_change_handler(struct work_struct *work)
mutex_unlock(&ports->devr->mutex);
}
+static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev)
+{
+ struct mlx5_ib_qp *mqp;
+ struct mlx5_ib_cq *send_mcq, *recv_mcq;
+ struct mlx5_core_cq *mcq;
+ struct list_head cq_armed_list;
+ unsigned long flags_qp;
+ unsigned long flags_cq;
+ unsigned long flags;
+
+ INIT_LIST_HEAD(&cq_armed_list);
+
+ /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
+ spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
+ list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
+ spin_lock_irqsave(&mqp->sq.lock, flags_qp);
+ if (mqp->sq.tail != mqp->sq.head) {
+ send_mcq = to_mcq(mqp->ibqp.send_cq);
+ spin_lock_irqsave(&send_mcq->lock, flags_cq);
+ if (send_mcq->mcq.comp &&
+ mqp->ibqp.send_cq->comp_handler) {
+ if (!send_mcq->mcq.reset_notify_added) {
+ send_mcq->mcq.reset_notify_added = 1;
+ list_add_tail(&send_mcq->mcq.reset_notify,
+ &cq_armed_list);
+ }
+ }
+ spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
+ }
+ spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
+ spin_lock_irqsave(&mqp->rq.lock, flags_qp);
+ /* no handling is needed for SRQ */
+ if (!mqp->ibqp.srq) {
+ if (mqp->rq.tail != mqp->rq.head) {
+ recv_mcq = to_mcq(mqp->ibqp.recv_cq);
+ spin_lock_irqsave(&recv_mcq->lock, flags_cq);
+ if (recv_mcq->mcq.comp &&
+ mqp->ibqp.recv_cq->comp_handler) {
+ if (!recv_mcq->mcq.reset_notify_added) {
+ recv_mcq->mcq.reset_notify_added = 1;
+ list_add_tail(&recv_mcq->mcq.reset_notify,
+ &cq_armed_list);
+ }
+ }
+ spin_unlock_irqrestore(&recv_mcq->lock,
+ flags_cq);
+ }
+ }
+ spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
+ }
+ /*At that point all inflight post send were put to be executed as of we
+ * lock/unlock above locks Now need to arm all involved CQs.
+ */
+ list_for_each_entry(mcq, &cq_armed_list, reset_notify) {
+ mcq->comp(mcq);
+ }
+ spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
+}
+
static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
enum mlx5_dev_event event, unsigned long param)
{
@@ -1869,6 +2106,7 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
case MLX5_DEV_EVENT_SYS_ERROR:
ibdev->ib_active = false;
ibev.event = IB_EVENT_DEVICE_FATAL;
+ mlx5_ib_handle_internal_error(ibdev);
break;
case MLX5_DEV_EVENT_PORT_UP:
@@ -2275,6 +2513,15 @@ static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0;
}
+static void get_dev_fw_str(struct ib_device *ibdev, char *str,
+ size_t str_len)
+{
+ struct mlx5_ib_dev *dev =
+ container_of(ibdev, struct mlx5_ib_dev, ib_dev);
+ snprintf(str, str_len, "%d.%d.%04d", fw_rev_maj(dev->mdev),
+ fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
+}
+
static int mlx5_enable_roce(struct mlx5_ib_dev *dev)
{
int err;
@@ -2301,6 +2548,113 @@ static void mlx5_disable_roce(struct mlx5_ib_dev *dev)
unregister_netdevice_notifier(&dev->roce.nb);
}
+static void mlx5_ib_dealloc_q_counters(struct mlx5_ib_dev *dev)
+{
+ unsigned int i;
+
+ for (i = 0; i < dev->num_ports; i++)
+ mlx5_core_dealloc_q_counter(dev->mdev,
+ dev->port[i].q_cnt_id);
+}
+
+static int mlx5_ib_alloc_q_counters(struct mlx5_ib_dev *dev)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < dev->num_ports; i++) {
+ ret = mlx5_core_alloc_q_counter(dev->mdev,
+ &dev->port[i].q_cnt_id);
+ if (ret) {
+ mlx5_ib_warn(dev,
+ "couldn't allocate queue counter for port %d, err %d\n",
+ i + 1, ret);
+ goto dealloc_counters;
+ }
+ }
+
+ return 0;
+
+dealloc_counters:
+ while (--i >= 0)
+ mlx5_core_dealloc_q_counter(dev->mdev,
+ dev->port[i].q_cnt_id);
+
+ return ret;
+}
+
+static const char * const names[] = {
+ "rx_write_requests",
+ "rx_read_requests",
+ "rx_atomic_requests",
+ "out_of_buffer",
+ "out_of_sequence",
+ "duplicate_request",
+ "rnr_nak_retry_err",
+ "packet_seq_err",
+ "implied_nak_seq_err",
+ "local_ack_timeout_err",
+};
+
+static const size_t stats_offsets[] = {
+ MLX5_BYTE_OFF(query_q_counter_out, rx_write_requests),
+ MLX5_BYTE_OFF(query_q_counter_out, rx_read_requests),
+ MLX5_BYTE_OFF(query_q_counter_out, rx_atomic_requests),
+ MLX5_BYTE_OFF(query_q_counter_out, out_of_buffer),
+ MLX5_BYTE_OFF(query_q_counter_out, out_of_sequence),
+ MLX5_BYTE_OFF(query_q_counter_out, duplicate_request),
+ MLX5_BYTE_OFF(query_q_counter_out, rnr_nak_retry_err),
+ MLX5_BYTE_OFF(query_q_counter_out, packet_seq_err),
+ MLX5_BYTE_OFF(query_q_counter_out, implied_nak_seq_err),
+ MLX5_BYTE_OFF(query_q_counter_out, local_ack_timeout_err),
+};
+
+static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev,
+ u8 port_num)
+{
+ BUILD_BUG_ON(ARRAY_SIZE(names) != ARRAY_SIZE(stats_offsets));
+
+ /* We support only per port stats */
+ if (port_num == 0)
+ return NULL;
+
+ return rdma_alloc_hw_stats_struct(names, ARRAY_SIZE(names),
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
+
+static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
+ struct rdma_hw_stats *stats,
+ u8 port, int index)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out);
+ void *out;
+ __be32 val;
+ int ret;
+ int i;
+
+ if (!port || !stats)
+ return -ENOSYS;
+
+ out = mlx5_vzalloc(outlen);
+ if (!out)
+ return -ENOMEM;
+
+ ret = mlx5_core_query_q_counter(dev->mdev,
+ dev->port[port - 1].q_cnt_id, 0,
+ out, outlen);
+ if (ret)
+ goto free;
+
+ for (i = 0; i < ARRAY_SIZE(names); i++) {
+ val = *(__be32 *)(out + stats_offsets[i]);
+ stats->value[i] = (u64)be32_to_cpu(val);
+ }
+free:
+ kvfree(out);
+ return ARRAY_SIZE(names);
+}
+
static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
{
struct mlx5_ib_dev *dev;
@@ -2323,10 +2677,15 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->mdev = mdev;
+ dev->port = kcalloc(MLX5_CAP_GEN(mdev, num_ports), sizeof(*dev->port),
+ GFP_KERNEL);
+ if (!dev->port)
+ goto err_dealloc;
+
rwlock_init(&dev->roce.netdev_lock);
err = get_port_caps(dev);
if (err)
- goto err_dealloc;
+ goto err_free_port;
if (mlx5_use_mad_ifc(dev))
get_ext_port_caps(dev);
@@ -2421,6 +2780,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->ib_dev.map_mr_sg = mlx5_ib_map_mr_sg;
dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
dev->ib_dev.get_port_immutable = mlx5_port_immutable;
+ dev->ib_dev.get_dev_fw_str = get_dev_fw_str;
if (mlx5_core_is_pf(mdev)) {
dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config;
dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state;
@@ -2428,6 +2788,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->ib_dev.set_vf_guid = mlx5_ib_set_vf_guid;
}
+ dev->ib_dev.disassociate_ucontext = mlx5_ib_disassociate_ucontext;
+
mlx5_ib_internal_fill_odp_caps(dev);
if (MLX5_CAP_GEN(mdev, imaicl)) {
@@ -2438,6 +2800,12 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
(1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
}
+ if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt) &&
+ MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) {
+ dev->ib_dev.get_hw_stats = mlx5_ib_get_hw_stats;
+ dev->ib_dev.alloc_hw_stats = mlx5_ib_alloc_hw_stats;
+ }
+
if (MLX5_CAP_GEN(mdev, xrc)) {
dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
@@ -2450,9 +2818,19 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
IB_LINK_LAYER_ETHERNET) {
dev->ib_dev.create_flow = mlx5_ib_create_flow;
dev->ib_dev.destroy_flow = mlx5_ib_destroy_flow;
+ dev->ib_dev.create_wq = mlx5_ib_create_wq;
+ dev->ib_dev.modify_wq = mlx5_ib_modify_wq;
+ dev->ib_dev.destroy_wq = mlx5_ib_destroy_wq;
+ dev->ib_dev.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table;
+ dev->ib_dev.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table;
dev->ib_dev.uverbs_ex_cmd_mask |=
(1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
- (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
+ (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW) |
+ (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
+ (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
+ (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
+ (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
+ (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
}
err = init_node_data(dev);
if (err)
@@ -2460,6 +2838,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
mutex_init(&dev->flow_db.lock);
mutex_init(&dev->cap_mask_mutex);
+ INIT_LIST_HEAD(&dev->qp_list);
+ spin_lock_init(&dev->reset_flow_resource_lock);
if (ll == IB_LINK_LAYER_ETHERNET) {
err = mlx5_enable_roce(dev);
@@ -2475,10 +2855,14 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
if (err)
goto err_rsrc;
- err = ib_register_device(&dev->ib_dev, NULL);
+ err = mlx5_ib_alloc_q_counters(dev);
if (err)
goto err_odp;
+ err = ib_register_device(&dev->ib_dev, NULL);
+ if (err)
+ goto err_q_cnt;
+
err = create_umr_res(dev);
if (err)
goto err_dev;
@@ -2500,6 +2884,9 @@ err_umrc:
err_dev:
ib_unregister_device(&dev->ib_dev);
+err_q_cnt:
+ mlx5_ib_dealloc_q_counters(dev);
+
err_odp:
mlx5_ib_odp_remove_one(dev);
@@ -2510,6 +2897,9 @@ err_disable_roce:
if (ll == IB_LINK_LAYER_ETHERNET)
mlx5_disable_roce(dev);
+err_free_port:
+ kfree(dev->port);
+
err_dealloc:
ib_dealloc_device((struct ib_device *)dev);
@@ -2522,11 +2912,13 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1);
ib_unregister_device(&dev->ib_dev);
+ mlx5_ib_dealloc_q_counters(dev);
destroy_umrc_res(dev);
mlx5_ib_odp_remove_one(dev);
destroy_dev_resources(&dev->devr);
if (ll == IB_LINK_LAYER_ETHERNET)
mlx5_disable_roce(dev);
+ kfree(dev->port);
ib_dealloc_device(&dev->ib_dev);
}
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index 40df2cca0..996b54e36 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -71,7 +71,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
addr = addr >> page_shift;
tmp = (unsigned long)addr;
- m = find_first_bit(&tmp, sizeof(tmp));
+ m = find_first_bit(&tmp, BITS_PER_LONG);
skip = 1 << m;
mask = skip - 1;
i = 0;
@@ -81,7 +81,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
for (k = 0; k < len; k++) {
if (!(i & mask)) {
tmp = (unsigned long)pfn;
- m = min_t(unsigned long, m, find_first_bit(&tmp, sizeof(tmp)));
+ m = min_t(unsigned long, m, find_first_bit(&tmp, BITS_PER_LONG));
skip = 1 << m;
mask = skip - 1;
base = pfn;
@@ -89,7 +89,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
} else {
if (base + p != pfn) {
tmp = (unsigned long)p;
- m = find_first_bit(&tmp, sizeof(tmp));
+ m = find_first_bit(&tmp, BITS_PER_LONG);
skip = 1 << m;
mask = skip - 1;
base = pfn;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index c4a982582..95146f4aa 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -105,6 +105,11 @@ enum {
MLX5_CQE_VERSION_V1,
};
+struct mlx5_ib_vma_private_data {
+ struct list_head list;
+ struct vm_area_struct *vma;
+};
+
struct mlx5_ib_ucontext {
struct ib_ucontext ibucontext;
struct list_head db_page_list;
@@ -116,6 +121,7 @@ struct mlx5_ib_ucontext {
u8 cqe_version;
/* Transport Domain number */
u32 tdn;
+ struct list_head vma_private_list;
};
static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
@@ -217,12 +223,41 @@ struct mlx5_ib_wq {
void *qend;
};
+struct mlx5_ib_rwq {
+ struct ib_wq ibwq;
+ u32 rqn;
+ u32 rq_num_pas;
+ u32 log_rq_stride;
+ u32 log_rq_size;
+ u32 rq_page_offset;
+ u32 log_page_size;
+ struct ib_umem *umem;
+ size_t buf_size;
+ unsigned int page_shift;
+ int create_type;
+ struct mlx5_db db;
+ u32 user_index;
+ u32 wqe_count;
+ u32 wqe_shift;
+ int wq_sig;
+};
+
enum {
MLX5_QP_USER,
MLX5_QP_KERNEL,
MLX5_QP_EMPTY
};
+enum {
+ MLX5_WQ_USER,
+ MLX5_WQ_KERNEL
+};
+
+struct mlx5_ib_rwq_ind_table {
+ struct ib_rwq_ind_table ib_rwq_ind_tbl;
+ u32 rqtn;
+};
+
/*
* Connect-IB can trigger up to four concurrent pagefaults
* per-QP.
@@ -266,6 +301,10 @@ struct mlx5_ib_qp_trans {
u8 resp_depth;
};
+struct mlx5_ib_rss_qp {
+ u32 tirn;
+};
+
struct mlx5_ib_rq {
struct mlx5_ib_qp_base base;
struct mlx5_ib_wq *rq;
@@ -294,6 +333,7 @@ struct mlx5_ib_qp {
union {
struct mlx5_ib_qp_trans trans_qp;
struct mlx5_ib_raw_packet_qp raw_packet_qp;
+ struct mlx5_ib_rss_qp rss_qp;
};
struct mlx5_buf buf;
@@ -340,6 +380,9 @@ struct mlx5_ib_qp {
spinlock_t disable_page_faults_lock;
struct mlx5_ib_pfault pagefaults[MLX5_IB_PAGEFAULT_CONTEXTS];
#endif
+ struct list_head qps_list;
+ struct list_head cq_recv_list;
+ struct list_head cq_send_list;
};
struct mlx5_ib_cq_buf {
@@ -359,6 +402,7 @@ enum mlx5_ib_qp_flags {
/* QP uses 1 as its source QP number */
MLX5_IB_QP_SQPN_QP1 = 1 << 6,
MLX5_IB_QP_CAP_SCATTER_FCS = 1 << 7,
+ MLX5_IB_QP_RSS = 1 << 8,
};
struct mlx5_umr_wr {
@@ -401,6 +445,8 @@ struct mlx5_ib_cq {
struct mlx5_ib_cq_buf *resize_buf;
struct ib_umem *resize_umem;
int cqe_size;
+ struct list_head list_send_qp;
+ struct list_head list_recv_qp;
u32 create_flags;
struct list_head wc_list;
enum ib_cq_notify_flags notify_flags;
@@ -546,6 +592,10 @@ struct mlx5_ib_resources {
struct mutex mutex;
};
+struct mlx5_ib_port {
+ u16 q_cnt_id;
+};
+
struct mlx5_roce {
/* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL
* netdev pointer
@@ -581,6 +631,11 @@ struct mlx5_ib_dev {
struct srcu_struct mr_srcu;
#endif
struct mlx5_ib_flow_db flow_db;
+ /* protect resources needed as part of reset flow */
+ spinlock_t reset_flow_resource_lock;
+ struct list_head qp_list;
+ /* Array with num_ports elements */
+ struct mlx5_ib_port *port;
};
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
@@ -628,6 +683,16 @@ static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp)
return container_of(ibqp, struct mlx5_ib_qp, ibqp);
}
+static inline struct mlx5_ib_rwq *to_mrwq(struct ib_wq *ibwq)
+{
+ return container_of(ibwq, struct mlx5_ib_rwq, ibwq);
+}
+
+static inline struct mlx5_ib_rwq_ind_table *to_mrwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
+{
+ return container_of(ib_rwq_ind_tbl, struct mlx5_ib_rwq_ind_table, ib_rwq_ind_tbl);
+}
+
static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq)
{
return container_of(msrq, struct mlx5_ib_srq, msrq);
@@ -762,6 +827,16 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift);
int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
struct ib_mr_status *mr_status);
+struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
+ struct ib_wq_init_attr *init_attr,
+ struct ib_udata *udata);
+int mlx5_ib_destroy_wq(struct ib_wq *wq);
+int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
+ u32 wq_attr_mask, struct ib_udata *udata);
+struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
+ struct ib_rwq_ind_table_init_attr *init_attr,
+ struct ib_udata *udata);
+int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
extern struct workqueue_struct *mlx5_ib_page_fault_wq;
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 8cf2ce505..4b021305c 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1193,12 +1193,16 @@ error:
static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
+ struct mlx5_core_dev *mdev = dev->mdev;
struct umr_common *umrc = &dev->umrc;
struct mlx5_ib_umr_context umr_context;
struct mlx5_umr_wr umrwr = {};
struct ib_send_wr *bad;
int err;
+ if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
+ return 0;
+
mlx5_ib_init_umr_context(&umr_context);
umrwr.wr.wr_cqe = &umr_context.cqe;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index ce0a7ab35..affc3f659 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -77,6 +77,10 @@ struct mlx5_wqe_eth_pad {
u8 rsvd0[16];
};
+static void get_cqs(enum ib_qp_type qp_type,
+ struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq,
+ struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq);
+
static int is_qp0(enum ib_qp_type qp_type)
{
return qp_type == IB_QPT_SMI;
@@ -609,6 +613,11 @@ static int to_mlx5_st(enum ib_qp_type type)
}
}
+static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq,
+ struct mlx5_ib_cq *recv_cq);
+static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq,
+ struct mlx5_ib_cq *recv_cq);
+
static int uuarn_to_uar_index(struct mlx5_uuar_info *uuari, int uuarn)
{
return uuari->uars[uuarn / MLX5_BF_REGS_PER_PAGE].index;
@@ -649,6 +658,71 @@ err_umem:
return err;
}
+static void destroy_user_rq(struct ib_pd *pd, struct mlx5_ib_rwq *rwq)
+{
+ struct mlx5_ib_ucontext *context;
+
+ context = to_mucontext(pd->uobject->context);
+ mlx5_ib_db_unmap_user(context, &rwq->db);
+ if (rwq->umem)
+ ib_umem_release(rwq->umem);
+}
+
+static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct mlx5_ib_rwq *rwq,
+ struct mlx5_ib_create_wq *ucmd)
+{
+ struct mlx5_ib_ucontext *context;
+ int page_shift = 0;
+ int npages;
+ u32 offset = 0;
+ int ncont = 0;
+ int err;
+
+ if (!ucmd->buf_addr)
+ return -EINVAL;
+
+ context = to_mucontext(pd->uobject->context);
+ rwq->umem = ib_umem_get(pd->uobject->context, ucmd->buf_addr,
+ rwq->buf_size, 0, 0);
+ if (IS_ERR(rwq->umem)) {
+ mlx5_ib_dbg(dev, "umem_get failed\n");
+ err = PTR_ERR(rwq->umem);
+ return err;
+ }
+
+ mlx5_ib_cont_pages(rwq->umem, ucmd->buf_addr, &npages, &page_shift,
+ &ncont, NULL);
+ err = mlx5_ib_get_buf_offset(ucmd->buf_addr, page_shift,
+ &rwq->rq_page_offset);
+ if (err) {
+ mlx5_ib_warn(dev, "bad offset\n");
+ goto err_umem;
+ }
+
+ rwq->rq_num_pas = ncont;
+ rwq->page_shift = page_shift;
+ rwq->log_page_size = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
+ rwq->wq_sig = !!(ucmd->flags & MLX5_WQ_FLAG_SIGNATURE);
+
+ mlx5_ib_dbg(dev, "addr 0x%llx, size %zd, npages %d, page_shift %d, ncont %d, offset %d\n",
+ (unsigned long long)ucmd->buf_addr, rwq->buf_size,
+ npages, page_shift, ncont, offset);
+
+ err = mlx5_ib_db_map_user(context, ucmd->db_addr, &rwq->db);
+ if (err) {
+ mlx5_ib_dbg(dev, "map failed\n");
+ goto err_umem;
+ }
+
+ rwq->create_type = MLX5_WQ_USER;
+ return 0;
+
+err_umem:
+ ib_umem_release(rwq->umem);
+ return err;
+}
+
static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
struct mlx5_ib_qp *qp, struct ib_udata *udata,
struct ib_qp_init_attr *attr,
@@ -1201,6 +1275,188 @@ static void raw_packet_qp_copy_info(struct mlx5_ib_qp *qp,
rq->doorbell = &qp->db;
}
+static void destroy_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
+{
+ mlx5_core_destroy_tir(dev->mdev, qp->rss_qp.tirn);
+}
+
+static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ struct ib_pd *pd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct ib_uobject *uobj = pd->uobject;
+ struct ib_ucontext *ucontext = uobj->context;
+ struct mlx5_ib_ucontext *mucontext = to_mucontext(ucontext);
+ struct mlx5_ib_create_qp_resp resp = {};
+ int inlen;
+ int err;
+ u32 *in;
+ void *tirc;
+ void *hfso;
+ u32 selected_fields = 0;
+ size_t min_resp_len;
+ u32 tdn = mucontext->tdn;
+ struct mlx5_ib_create_qp_rss ucmd = {};
+ size_t required_cmd_sz;
+
+ if (init_attr->qp_type != IB_QPT_RAW_PACKET)
+ return -EOPNOTSUPP;
+
+ if (init_attr->create_flags || init_attr->send_cq)
+ return -EINVAL;
+
+ min_resp_len = offsetof(typeof(resp), uuar_index) + sizeof(resp.uuar_index);
+ if (udata->outlen < min_resp_len)
+ return -EINVAL;
+
+ required_cmd_sz = offsetof(typeof(ucmd), reserved1) + sizeof(ucmd.reserved1);
+ if (udata->inlen < required_cmd_sz) {
+ mlx5_ib_dbg(dev, "invalid inlen\n");
+ return -EINVAL;
+ }
+
+ if (udata->inlen > sizeof(ucmd) &&
+ !ib_is_udata_cleared(udata, sizeof(ucmd),
+ udata->inlen - sizeof(ucmd))) {
+ mlx5_ib_dbg(dev, "inlen is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) {
+ mlx5_ib_dbg(dev, "copy failed\n");
+ return -EFAULT;
+ }
+
+ if (ucmd.comp_mask) {
+ mlx5_ib_dbg(dev, "invalid comp mask\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (memchr_inv(ucmd.reserved, 0, sizeof(ucmd.reserved)) || ucmd.reserved1) {
+ mlx5_ib_dbg(dev, "invalid reserved\n");
+ return -EOPNOTSUPP;
+ }
+
+ err = ib_copy_to_udata(udata, &resp, min_resp_len);
+ if (err) {
+ mlx5_ib_dbg(dev, "copy failed\n");
+ return -EINVAL;
+ }
+
+ inlen = MLX5_ST_SZ_BYTES(create_tir_in);
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+ MLX5_SET(tirc, tirc, disp_type,
+ MLX5_TIRC_DISP_TYPE_INDIRECT);
+ MLX5_SET(tirc, tirc, indirect_table,
+ init_attr->rwq_ind_tbl->ind_tbl_num);
+ MLX5_SET(tirc, tirc, transport_domain, tdn);
+
+ hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+ switch (ucmd.rx_hash_function) {
+ case MLX5_RX_HASH_FUNC_TOEPLITZ:
+ {
+ void *rss_key = MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
+ size_t len = MLX5_FLD_SZ_BYTES(tirc, rx_hash_toeplitz_key);
+
+ if (len != ucmd.rx_key_len) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ);
+ MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
+ memcpy(rss_key, ucmd.rx_hash_key, len);
+ break;
+ }
+ default:
+ err = -EOPNOTSUPP;
+ goto err;
+ }
+
+ if (!ucmd.rx_hash_fields_mask) {
+ /* special case when this TIR serves as steering entry without hashing */
+ if (!init_attr->rwq_ind_tbl->log_ind_tbl_size)
+ goto create_tir;
+ err = -EINVAL;
+ goto err;
+ }
+
+ if (((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
+ (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4)) &&
+ ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) ||
+ (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ /* If none of IPV4 & IPV6 SRC/DST was set - this bit field is ignored */
+ if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
+ (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4))
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ else if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) ||
+ (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+
+ if (((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
+ (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP)) &&
+ ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) ||
+ (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ /* If none of TCP & UDP SRC/DST was set - this bit field is ignored */
+ if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
+ (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP))
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_TCP);
+ else if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) ||
+ (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_UDP);
+
+ if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
+ (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6))
+ selected_fields |= MLX5_HASH_FIELD_SEL_SRC_IP;
+
+ if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4) ||
+ (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))
+ selected_fields |= MLX5_HASH_FIELD_SEL_DST_IP;
+
+ if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
+ (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP))
+ selected_fields |= MLX5_HASH_FIELD_SEL_L4_SPORT;
+
+ if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP) ||
+ (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
+ selected_fields |= MLX5_HASH_FIELD_SEL_L4_DPORT;
+
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields, selected_fields);
+
+create_tir:
+ err = mlx5_core_create_tir(dev->mdev, in, inlen, &qp->rss_qp.tirn);
+
+ if (err)
+ goto err;
+
+ kvfree(in);
+ /* qpn is reserved for that QP */
+ qp->trans_qp.base.mqp.qpn = 0;
+ qp->flags |= MLX5_IB_QP_RSS;
+ return 0;
+
+err:
+ kvfree(in);
+ return err;
+}
+
static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata, struct mlx5_ib_qp *qp)
@@ -1211,6 +1467,9 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
struct mlx5_ib_create_qp_resp resp;
struct mlx5_create_qp_mbox_in *in;
struct mlx5_ib_create_qp ucmd;
+ struct mlx5_ib_cq *send_cq;
+ struct mlx5_ib_cq *recv_cq;
+ unsigned long flags;
int inlen = sizeof(*in);
int err;
u32 uidx = MLX5_IB_DEFAULT_UIDX;
@@ -1227,6 +1486,14 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
spin_lock_init(&qp->sq.lock);
spin_lock_init(&qp->rq.lock);
+ if (init_attr->rwq_ind_tbl) {
+ if (!udata)
+ return -ENOSYS;
+
+ err = create_rss_raw_qp_tir(dev, qp, pd, init_attr, udata);
+ return err;
+ }
+
if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
if (!MLX5_CAP_GEN(mdev, block_lb_mc)) {
mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n");
@@ -1460,6 +1727,23 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
base->container_mibqp = qp;
base->mqp.event = mlx5_ib_qp_event;
+ get_cqs(init_attr->qp_type, init_attr->send_cq, init_attr->recv_cq,
+ &send_cq, &recv_cq);
+ spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+ mlx5_ib_lock_cqs(send_cq, recv_cq);
+ /* Maintain device to QPs access, needed for further handling via reset
+ * flow
+ */
+ list_add_tail(&qp->qps_list, &dev->qp_list);
+ /* Maintain CQ to QPs access, needed for further handling via reset flow
+ */
+ if (send_cq)
+ list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp);
+ if (recv_cq)
+ list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp);
+ mlx5_ib_unlock_cqs(send_cq, recv_cq);
+ spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
+
return 0;
err_create:
@@ -1478,23 +1762,23 @@ static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv
if (send_cq) {
if (recv_cq) {
if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
- spin_lock_irq(&send_cq->lock);
+ spin_lock(&send_cq->lock);
spin_lock_nested(&recv_cq->lock,
SINGLE_DEPTH_NESTING);
} else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
- spin_lock_irq(&send_cq->lock);
+ spin_lock(&send_cq->lock);
__acquire(&recv_cq->lock);
} else {
- spin_lock_irq(&recv_cq->lock);
+ spin_lock(&recv_cq->lock);
spin_lock_nested(&send_cq->lock,
SINGLE_DEPTH_NESTING);
}
} else {
- spin_lock_irq(&send_cq->lock);
+ spin_lock(&send_cq->lock);
__acquire(&recv_cq->lock);
}
} else if (recv_cq) {
- spin_lock_irq(&recv_cq->lock);
+ spin_lock(&recv_cq->lock);
__acquire(&send_cq->lock);
} else {
__acquire(&send_cq->lock);
@@ -1509,21 +1793,21 @@ static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *re
if (recv_cq) {
if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
spin_unlock(&recv_cq->lock);
- spin_unlock_irq(&send_cq->lock);
+ spin_unlock(&send_cq->lock);
} else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
__release(&recv_cq->lock);
- spin_unlock_irq(&send_cq->lock);
+ spin_unlock(&send_cq->lock);
} else {
spin_unlock(&send_cq->lock);
- spin_unlock_irq(&recv_cq->lock);
+ spin_unlock(&recv_cq->lock);
}
} else {
__release(&recv_cq->lock);
- spin_unlock_irq(&send_cq->lock);
+ spin_unlock(&send_cq->lock);
}
} else if (recv_cq) {
__release(&send_cq->lock);
- spin_unlock_irq(&recv_cq->lock);
+ spin_unlock(&recv_cq->lock);
} else {
__release(&recv_cq->lock);
__release(&send_cq->lock);
@@ -1535,17 +1819,18 @@ static struct mlx5_ib_pd *get_pd(struct mlx5_ib_qp *qp)
return to_mpd(qp->ibqp.pd);
}
-static void get_cqs(struct mlx5_ib_qp *qp,
+static void get_cqs(enum ib_qp_type qp_type,
+ struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq,
struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq)
{
- switch (qp->ibqp.qp_type) {
+ switch (qp_type) {
case IB_QPT_XRC_TGT:
*send_cq = NULL;
*recv_cq = NULL;
break;
case MLX5_IB_QPT_REG_UMR:
case IB_QPT_XRC_INI:
- *send_cq = to_mcq(qp->ibqp.send_cq);
+ *send_cq = ib_send_cq ? to_mcq(ib_send_cq) : NULL;
*recv_cq = NULL;
break;
@@ -1557,8 +1842,8 @@ static void get_cqs(struct mlx5_ib_qp *qp,
case IB_QPT_RAW_IPV6:
case IB_QPT_RAW_ETHERTYPE:
case IB_QPT_RAW_PACKET:
- *send_cq = to_mcq(qp->ibqp.send_cq);
- *recv_cq = to_mcq(qp->ibqp.recv_cq);
+ *send_cq = ib_send_cq ? to_mcq(ib_send_cq) : NULL;
+ *recv_cq = ib_recv_cq ? to_mcq(ib_recv_cq) : NULL;
break;
case IB_QPT_MAX:
@@ -1577,8 +1862,14 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
struct mlx5_ib_cq *send_cq, *recv_cq;
struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
struct mlx5_modify_qp_mbox_in *in;
+ unsigned long flags;
int err;
+ if (qp->ibqp.rwq_ind_tbl) {
+ destroy_rss_raw_qp_tir(dev, qp);
+ return;
+ }
+
base = qp->ibqp.qp_type == IB_QPT_RAW_PACKET ?
&qp->raw_packet_qp.rq.base :
&qp->trans_qp.base;
@@ -1602,17 +1893,28 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
base->mqp.qpn);
}
- get_cqs(qp, &send_cq, &recv_cq);
+ get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq,
+ &send_cq, &recv_cq);
+
+ spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+ mlx5_ib_lock_cqs(send_cq, recv_cq);
+ /* del from lists under both locks above to protect reset flow paths */
+ list_del(&qp->qps_list);
+ if (send_cq)
+ list_del(&qp->cq_send_list);
+
+ if (recv_cq)
+ list_del(&qp->cq_recv_list);
if (qp->create_type == MLX5_QP_KERNEL) {
- mlx5_ib_lock_cqs(send_cq, recv_cq);
__mlx5_ib_cq_clean(recv_cq, base->mqp.qpn,
qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
if (send_cq != recv_cq)
__mlx5_ib_cq_clean(send_cq, base->mqp.qpn,
NULL);
- mlx5_ib_unlock_cqs(send_cq, recv_cq);
}
+ mlx5_ib_unlock_cqs(send_cq, recv_cq);
+ spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) {
destroy_raw_packet_qp(dev, qp);
@@ -2300,7 +2602,8 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
}
pd = get_pd(qp);
- get_cqs(qp, &send_cq, &recv_cq);
+ get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq,
+ &send_cq, &recv_cq);
context->flags_pd = cpu_to_be32(pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn);
context->cqn_send = send_cq ? cpu_to_be32(send_cq->mcq.cqn) : 0;
@@ -2349,6 +2652,15 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
else
sqd_event = 0;
+ if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
+ u8 port_num = (attr_mask & IB_QP_PORT ? attr->port_num :
+ qp->port) - 1;
+ struct mlx5_ib_port *mibport = &dev->port[port_num];
+
+ context->qp_counter_set_usr_page |=
+ cpu_to_be32((u32)(mibport->q_cnt_id) << 24);
+ }
+
if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
context->sq_crq_size |= cpu_to_be16(1 << 4);
@@ -2439,6 +2751,9 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int port;
enum rdma_link_layer ll = IB_LINK_LAYER_UNSPECIFIED;
+ if (ibqp->rwq_ind_tbl)
+ return -ENOSYS;
+
if (unlikely(ibqp->qp_type == IB_QPT_GSI))
return mlx5_ib_gsi_modify_qp(ibqp, attr, attr_mask);
@@ -3344,12 +3659,8 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
struct ib_send_wr *wr, unsigned *idx,
int *size, int nreq)
{
- int err = 0;
-
- if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) {
- err = -ENOMEM;
- return err;
- }
+ if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)))
+ return -ENOMEM;
*idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
*seg = mlx5_get_send_wqe(qp, *idx);
@@ -3365,7 +3676,7 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
*seg += sizeof(**ctrl);
*size = sizeof(**ctrl) / 16;
- return err;
+ return 0;
}
static void finish_wqe(struct mlx5_ib_qp *qp,
@@ -3397,6 +3708,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
{
struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_ib_qp *qp;
struct mlx5_ib_mr *mr;
struct mlx5_wqe_data_seg *dpseg;
@@ -3424,6 +3736,13 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
spin_lock_irqsave(&qp->sq.lock, flags);
+ if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ err = -EIO;
+ *bad_wr = wr;
+ nreq = 0;
+ goto out;
+ }
+
for (nreq = 0; wr; nreq++, wr = wr->next) {
if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) {
mlx5_ib_warn(dev, "\n");
@@ -3436,7 +3755,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
num_sge = wr->num_sge;
if (unlikely(num_sge > qp->sq.max_gs)) {
mlx5_ib_warn(dev, "\n");
- err = -ENOMEM;
+ err = -EINVAL;
*bad_wr = wr;
goto out;
}
@@ -3725,6 +4044,8 @@ int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
struct mlx5_ib_qp *qp = to_mqp(ibqp);
struct mlx5_wqe_data_seg *scat;
struct mlx5_rwqe_sig *sig;
+ struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ struct mlx5_core_dev *mdev = dev->mdev;
unsigned long flags;
int err = 0;
int nreq;
@@ -3736,6 +4057,13 @@ int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
spin_lock_irqsave(&qp->rq.lock, flags);
+ if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ err = -EIO;
+ *bad_wr = wr;
+ nreq = 0;
+ goto out;
+ }
+
ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
for (nreq = 0; wr; nreq++, wr = wr->next) {
@@ -4055,6 +4383,9 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
int err = 0;
u8 raw_packet_qp_state;
+ if (ibqp->rwq_ind_tbl)
+ return -ENOSYS;
+
if (unlikely(ibqp->qp_type == IB_QPT_GSI))
return mlx5_ib_gsi_query_qp(ibqp, qp_attr, qp_attr_mask,
qp_init_attr);
@@ -4164,3 +4495,322 @@ int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
return 0;
}
+
+static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
+ struct ib_wq_init_attr *init_attr)
+{
+ struct mlx5_ib_dev *dev;
+ __be64 *rq_pas0;
+ void *in;
+ void *rqc;
+ void *wq;
+ int inlen;
+ int err;
+
+ dev = to_mdev(pd->device);
+
+ inlen = MLX5_ST_SZ_BYTES(create_rq_in) + sizeof(u64) * rwq->rq_num_pas;
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
+ MLX5_SET(rqc, rqc, mem_rq_type,
+ MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE);
+ MLX5_SET(rqc, rqc, user_index, rwq->user_index);
+ MLX5_SET(rqc, rqc, cqn, to_mcq(init_attr->cq)->mcq.cqn);
+ MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
+ MLX5_SET(rqc, rqc, flush_in_error_en, 1);
+ wq = MLX5_ADDR_OF(rqc, rqc, wq);
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
+ MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
+ MLX5_SET(wq, wq, log_wq_stride, rwq->log_rq_stride);
+ MLX5_SET(wq, wq, log_wq_sz, rwq->log_rq_size);
+ MLX5_SET(wq, wq, pd, to_mpd(pd)->pdn);
+ MLX5_SET(wq, wq, page_offset, rwq->rq_page_offset);
+ MLX5_SET(wq, wq, log_wq_pg_sz, rwq->log_page_size);
+ MLX5_SET(wq, wq, wq_signature, rwq->wq_sig);
+ MLX5_SET64(wq, wq, dbr_addr, rwq->db.dma);
+ rq_pas0 = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
+ mlx5_ib_populate_pas(dev, rwq->umem, rwq->page_shift, rq_pas0, 0);
+ err = mlx5_core_create_rq(dev->mdev, in, inlen, &rwq->rqn);
+ kvfree(in);
+ return err;
+}
+
+static int set_user_rq_size(struct mlx5_ib_dev *dev,
+ struct ib_wq_init_attr *wq_init_attr,
+ struct mlx5_ib_create_wq *ucmd,
+ struct mlx5_ib_rwq *rwq)
+{
+ /* Sanity check RQ size before proceeding */
+ if (wq_init_attr->max_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_wq_sz)))
+ return -EINVAL;
+
+ if (!ucmd->rq_wqe_count)
+ return -EINVAL;
+
+ rwq->wqe_count = ucmd->rq_wqe_count;
+ rwq->wqe_shift = ucmd->rq_wqe_shift;
+ rwq->buf_size = (rwq->wqe_count << rwq->wqe_shift);
+ rwq->log_rq_stride = rwq->wqe_shift;
+ rwq->log_rq_size = ilog2(rwq->wqe_count);
+ return 0;
+}
+
+static int prepare_user_rq(struct ib_pd *pd,
+ struct ib_wq_init_attr *init_attr,
+ struct ib_udata *udata,
+ struct mlx5_ib_rwq *rwq)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct mlx5_ib_create_wq ucmd = {};
+ int err;
+ size_t required_cmd_sz;
+
+ required_cmd_sz = offsetof(typeof(ucmd), reserved) + sizeof(ucmd.reserved);
+ if (udata->inlen < required_cmd_sz) {
+ mlx5_ib_dbg(dev, "invalid inlen\n");
+ return -EINVAL;
+ }
+
+ if (udata->inlen > sizeof(ucmd) &&
+ !ib_is_udata_cleared(udata, sizeof(ucmd),
+ udata->inlen - sizeof(ucmd))) {
+ mlx5_ib_dbg(dev, "inlen is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) {
+ mlx5_ib_dbg(dev, "copy failed\n");
+ return -EFAULT;
+ }
+
+ if (ucmd.comp_mask) {
+ mlx5_ib_dbg(dev, "invalid comp mask\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (ucmd.reserved) {
+ mlx5_ib_dbg(dev, "invalid reserved\n");
+ return -EOPNOTSUPP;
+ }
+
+ err = set_user_rq_size(dev, init_attr, &ucmd, rwq);
+ if (err) {
+ mlx5_ib_dbg(dev, "err %d\n", err);
+ return err;
+ }
+
+ err = create_user_rq(dev, pd, rwq, &ucmd);
+ if (err) {
+ mlx5_ib_dbg(dev, "err %d\n", err);
+ if (err)
+ return err;
+ }
+
+ rwq->user_index = ucmd.user_index;
+ return 0;
+}
+
+struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
+ struct ib_wq_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev;
+ struct mlx5_ib_rwq *rwq;
+ struct mlx5_ib_create_wq_resp resp = {};
+ size_t min_resp_len;
+ int err;
+
+ if (!udata)
+ return ERR_PTR(-ENOSYS);
+
+ min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
+ if (udata->outlen && udata->outlen < min_resp_len)
+ return ERR_PTR(-EINVAL);
+
+ dev = to_mdev(pd->device);
+ switch (init_attr->wq_type) {
+ case IB_WQT_RQ:
+ rwq = kzalloc(sizeof(*rwq), GFP_KERNEL);
+ if (!rwq)
+ return ERR_PTR(-ENOMEM);
+ err = prepare_user_rq(pd, init_attr, udata, rwq);
+ if (err)
+ goto err;
+ err = create_rq(rwq, pd, init_attr);
+ if (err)
+ goto err_user_rq;
+ break;
+ default:
+ mlx5_ib_dbg(dev, "unsupported wq type %d\n",
+ init_attr->wq_type);
+ return ERR_PTR(-EINVAL);
+ }
+
+ rwq->ibwq.wq_num = rwq->rqn;
+ rwq->ibwq.state = IB_WQS_RESET;
+ if (udata->outlen) {
+ resp.response_length = offsetof(typeof(resp), response_length) +
+ sizeof(resp.response_length);
+ err = ib_copy_to_udata(udata, &resp, resp.response_length);
+ if (err)
+ goto err_copy;
+ }
+
+ return &rwq->ibwq;
+
+err_copy:
+ mlx5_core_destroy_rq(dev->mdev, rwq->rqn);
+err_user_rq:
+ destroy_user_rq(pd, rwq);
+err:
+ kfree(rwq);
+ return ERR_PTR(err);
+}
+
+int mlx5_ib_destroy_wq(struct ib_wq *wq)
+{
+ struct mlx5_ib_dev *dev = to_mdev(wq->device);
+ struct mlx5_ib_rwq *rwq = to_mrwq(wq);
+
+ mlx5_core_destroy_rq(dev->mdev, rwq->rqn);
+ destroy_user_rq(wq->pd, rwq);
+ kfree(rwq);
+
+ return 0;
+}
+
+struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
+ struct ib_rwq_ind_table_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+ struct mlx5_ib_rwq_ind_table *rwq_ind_tbl;
+ int sz = 1 << init_attr->log_ind_tbl_size;
+ struct mlx5_ib_create_rwq_ind_tbl_resp resp = {};
+ size_t min_resp_len;
+ int inlen;
+ int err;
+ int i;
+ u32 *in;
+ void *rqtc;
+
+ if (udata->inlen > 0 &&
+ !ib_is_udata_cleared(udata, 0,
+ udata->inlen))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
+ if (udata->outlen && udata->outlen < min_resp_len)
+ return ERR_PTR(-EINVAL);
+
+ rwq_ind_tbl = kzalloc(sizeof(*rwq_ind_tbl), GFP_KERNEL);
+ if (!rwq_ind_tbl)
+ return ERR_PTR(-ENOMEM);
+
+ inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
+ in = mlx5_vzalloc(inlen);
+ if (!in) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
+
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
+ MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
+
+ for (i = 0; i < sz; i++)
+ MLX5_SET(rqtc, rqtc, rq_num[i], init_attr->ind_tbl[i]->wq_num);
+
+ err = mlx5_core_create_rqt(dev->mdev, in, inlen, &rwq_ind_tbl->rqtn);
+ kvfree(in);
+
+ if (err)
+ goto err;
+
+ rwq_ind_tbl->ib_rwq_ind_tbl.ind_tbl_num = rwq_ind_tbl->rqtn;
+ if (udata->outlen) {
+ resp.response_length = offsetof(typeof(resp), response_length) +
+ sizeof(resp.response_length);
+ err = ib_copy_to_udata(udata, &resp, resp.response_length);
+ if (err)
+ goto err_copy;
+ }
+
+ return &rwq_ind_tbl->ib_rwq_ind_tbl;
+
+err_copy:
+ mlx5_core_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn);
+err:
+ kfree(rwq_ind_tbl);
+ return ERR_PTR(err);
+}
+
+int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
+{
+ struct mlx5_ib_rwq_ind_table *rwq_ind_tbl = to_mrwq_ind_table(ib_rwq_ind_tbl);
+ struct mlx5_ib_dev *dev = to_mdev(ib_rwq_ind_tbl->device);
+
+ mlx5_core_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn);
+
+ kfree(rwq_ind_tbl);
+ return 0;
+}
+
+int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
+ u32 wq_attr_mask, struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev = to_mdev(wq->device);
+ struct mlx5_ib_rwq *rwq = to_mrwq(wq);
+ struct mlx5_ib_modify_wq ucmd = {};
+ size_t required_cmd_sz;
+ int curr_wq_state;
+ int wq_state;
+ int inlen;
+ int err;
+ void *rqc;
+ void *in;
+
+ required_cmd_sz = offsetof(typeof(ucmd), reserved) + sizeof(ucmd.reserved);
+ if (udata->inlen < required_cmd_sz)
+ return -EINVAL;
+
+ if (udata->inlen > sizeof(ucmd) &&
+ !ib_is_udata_cleared(udata, sizeof(ucmd),
+ udata->inlen - sizeof(ucmd)))
+ return -EOPNOTSUPP;
+
+ if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen)))
+ return -EFAULT;
+
+ if (ucmd.comp_mask || ucmd.reserved)
+ return -EOPNOTSUPP;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
+
+ curr_wq_state = (wq_attr_mask & IB_WQ_CUR_STATE) ?
+ wq_attr->curr_wq_state : wq->state;
+ wq_state = (wq_attr_mask & IB_WQ_STATE) ?
+ wq_attr->wq_state : curr_wq_state;
+ if (curr_wq_state == IB_WQS_ERR)
+ curr_wq_state = MLX5_RQC_STATE_ERR;
+ if (wq_state == IB_WQS_ERR)
+ wq_state = MLX5_RQC_STATE_ERR;
+ MLX5_SET(modify_rq_in, in, rq_state, curr_wq_state);
+ MLX5_SET(rqc, rqc, state, wq_state);
+
+ err = mlx5_core_modify_rq(dev->mdev, rwq->rqn, in, inlen);
+ kvfree(in);
+ if (!err)
+ rwq->ibwq.state = (wq_state == MLX5_RQC_STATE_ERR) ? IB_WQS_ERR : wq_state;
+
+ return err;
+}
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 3b2ddd64a..ed6ac5235 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -74,14 +74,12 @@ static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, enum mlx5_event type)
}
static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
- struct mlx5_create_srq_mbox_in **in,
- struct ib_udata *udata, int buf_size, int *inlen,
- int is_xrc)
+ struct mlx5_srq_attr *in,
+ struct ib_udata *udata, int buf_size)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_ib_create_srq ucmd = {};
size_t ucmdlen;
- void *xsrqc;
int err;
int npages;
int page_shift;
@@ -104,7 +102,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
udata->inlen - sizeof(ucmd)))
return -EINVAL;
- if (is_xrc) {
+ if (in->type == IB_SRQT_XRC) {
err = get_srq_user_index(to_mucontext(pd->uobject->context),
&ucmd, udata->inlen, &uidx);
if (err)
@@ -130,14 +128,13 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
goto err_umem;
}
- *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont;
- *in = mlx5_vzalloc(*inlen);
- if (!(*in)) {
+ in->pas = mlx5_vzalloc(sizeof(*in->pas) * ncont);
+ if (!in->pas) {
err = -ENOMEM;
goto err_umem;
}
- mlx5_ib_populate_pas(dev, srq->umem, page_shift, (*in)->pas, 0);
+ mlx5_ib_populate_pas(dev, srq->umem, page_shift, in->pas, 0);
err = mlx5_ib_db_map_user(to_mucontext(pd->uobject->context),
ucmd.db_addr, &srq->db);
@@ -146,20 +143,16 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
goto err_in;
}
- (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
- (*in)->ctx.pgoff_cqn = cpu_to_be32(offset << 26);
-
- if ((MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) &&
- is_xrc){
- xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in,
- xrc_srq_context_entry);
- MLX5_SET(xrc_srqc, xsrqc, user_index, uidx);
- }
+ in->log_page_size = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
+ in->page_offset = offset;
+ if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
+ in->type == IB_SRQT_XRC)
+ in->user_index = uidx;
return 0;
err_in:
- kvfree(*in);
+ kvfree(in->pas);
err_umem:
ib_umem_release(srq->umem);
@@ -168,15 +161,13 @@ err_umem:
}
static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
- struct mlx5_create_srq_mbox_in **in, int buf_size,
- int *inlen, int is_xrc)
+ struct mlx5_srq_attr *in, int buf_size)
{
int err;
int i;
struct mlx5_wqe_srq_next_seg *next;
int page_shift;
int npages;
- void *xsrqc;
err = mlx5_db_alloc(dev->mdev, &srq->db);
if (err) {
@@ -204,13 +195,12 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
npages = DIV_ROUND_UP(srq->buf.npages, 1 << (page_shift - PAGE_SHIFT));
mlx5_ib_dbg(dev, "buf_size %d, page_shift %d, npages %d, calc npages %d\n",
buf_size, page_shift, srq->buf.npages, npages);
- *inlen = sizeof(**in) + sizeof(*(*in)->pas) * npages;
- *in = mlx5_vzalloc(*inlen);
- if (!*in) {
+ in->pas = mlx5_vzalloc(sizeof(*in->pas) * npages);
+ if (!in->pas) {
err = -ENOMEM;
goto err_buf;
}
- mlx5_fill_page_array(&srq->buf, (*in)->pas);
+ mlx5_fill_page_array(&srq->buf, in->pas);
srq->wrid = kmalloc(srq->msrq.max * sizeof(u64), GFP_KERNEL);
if (!srq->wrid) {
@@ -221,20 +211,15 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
}
srq->wq_sig = !!srq_signature;
- (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
-
- if ((MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) &&
- is_xrc){
- xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in,
- xrc_srq_context_entry);
- /* 0xffffff means we ask to work with cqe version 0 */
- MLX5_SET(xrc_srqc, xsrqc, user_index, MLX5_IB_DEFAULT_UIDX);
- }
+ in->log_page_size = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
+ if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
+ in->type == IB_SRQT_XRC)
+ in->user_index = MLX5_IB_DEFAULT_UIDX;
return 0;
err_in:
- kvfree(*in);
+ kvfree(in->pas);
err_buf:
mlx5_buf_free(dev->mdev, &srq->buf);
@@ -267,10 +252,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
int desc_size;
int buf_size;
int err;
- struct mlx5_create_srq_mbox_in *uninitialized_var(in);
- int uninitialized_var(inlen);
- int is_xrc;
- u32 flgs, xrcdn;
+ struct mlx5_srq_attr in = {0};
__u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
/* Sanity check SRQ size before proceeding */
@@ -302,14 +284,10 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs,
srq->msrq.max_avail_gather);
- is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
-
if (pd->uobject)
- err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen,
- is_xrc);
+ err = create_srq_user(pd, srq, &in, udata, buf_size);
else
- err = create_srq_kernel(dev, srq, &in, buf_size, &inlen,
- is_xrc);
+ err = create_srq_kernel(dev, srq, &in, buf_size);
if (err) {
mlx5_ib_warn(dev, "create srq %s failed, err %d\n",
@@ -317,23 +295,23 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
goto err_srq;
}
- in->ctx.state_log_sz = ilog2(srq->msrq.max);
- flgs = ((srq->msrq.wqe_shift - 4) | (is_xrc << 5) | (srq->wq_sig << 7)) << 24;
- xrcdn = 0;
- if (is_xrc) {
- xrcdn = to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn;
- in->ctx.pgoff_cqn |= cpu_to_be32(to_mcq(init_attr->ext.xrc.cq)->mcq.cqn);
+ in.type = init_attr->srq_type;
+ in.log_size = ilog2(srq->msrq.max);
+ in.wqe_shift = srq->msrq.wqe_shift - 4;
+ if (srq->wq_sig)
+ in.flags |= MLX5_SRQ_FLAG_WQ_SIG;
+ if (init_attr->srq_type == IB_SRQT_XRC) {
+ in.xrcd = to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn;
+ in.cqn = to_mcq(init_attr->ext.xrc.cq)->mcq.cqn;
} else if (init_attr->srq_type == IB_SRQT_BASIC) {
- xrcdn = to_mxrcd(dev->devr.x0)->xrcdn;
- in->ctx.pgoff_cqn |= cpu_to_be32(to_mcq(dev->devr.c0)->mcq.cqn);
+ in.xrcd = to_mxrcd(dev->devr.x0)->xrcdn;
+ in.cqn = to_mcq(dev->devr.c0)->mcq.cqn;
}
- in->ctx.flags_xrcd = cpu_to_be32((flgs & 0xFF000000) | (xrcdn & 0xFFFFFF));
-
- in->ctx.pd = cpu_to_be32(to_mpd(pd)->pdn);
- in->ctx.db_record = cpu_to_be64(srq->db.dma);
- err = mlx5_core_create_srq(dev->mdev, &srq->msrq, in, inlen, is_xrc);
- kvfree(in);
+ in.pd = to_mpd(pd)->pdn;
+ in.db_record = srq->db.dma;
+ err = mlx5_core_create_srq(dev->mdev, &srq->msrq, &in);
+ kvfree(in.pas);
if (err) {
mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
goto err_usr_kern_srq;
@@ -401,7 +379,7 @@ int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
struct mlx5_ib_srq *srq = to_msrq(ibsrq);
int ret;
- struct mlx5_query_srq_mbox_out *out;
+ struct mlx5_srq_attr *out;
out = kzalloc(sizeof(*out), GFP_KERNEL);
if (!out)
@@ -411,7 +389,7 @@ int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
if (ret)
goto out_box;
- srq_attr->srq_limit = be16_to_cpu(out->ctx.lwm);
+ srq_attr->srq_limit = out->lwm;
srq_attr->max_wr = srq->msrq.max - 1;
srq_attr->max_sge = srq->msrq.max_gs;
@@ -458,6 +436,8 @@ int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
struct mlx5_ib_srq *srq = to_msrq(ibsrq);
struct mlx5_wqe_srq_next_seg *next;
struct mlx5_wqe_data_seg *scat;
+ struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
+ struct mlx5_core_dev *mdev = dev->mdev;
unsigned long flags;
int err = 0;
int nreq;
@@ -465,6 +445,12 @@ int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
spin_lock_irqsave(&srq->lock, flags);
+ if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ err = -EIO;
+ *bad_wr = wr;
+ goto out;
+ }
+
for (nreq = 0; wr; nreq++, wr = wr->next) {
if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
err = -EINVAL;
@@ -507,7 +493,7 @@ int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
*srq->db.db = cpu_to_be32(srq->wqe_ctr);
}
-
+out:
spin_unlock_irqrestore(&srq->lock, flags);
return err;
diff --git a/drivers/infiniband/hw/mlx5/user.h b/drivers/infiniband/hw/mlx5/user.h
index 61bc308bb..188dac430 100644
--- a/drivers/infiniband/hw/mlx5/user.h
+++ b/drivers/infiniband/hw/mlx5/user.h
@@ -46,6 +46,10 @@ enum {
MLX5_SRQ_FLAG_SIGNATURE = 1 << 0,
};
+enum {
+ MLX5_WQ_FLAG_SIGNATURE = 1 << 0,
+};
+
/* Increment this value if any changes that break userspace ABI
* compatibility are made.
@@ -79,6 +83,10 @@ enum mlx5_ib_alloc_ucontext_resp_mask {
MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET = 1UL << 0,
};
+enum mlx5_user_cmds_supp_uhw {
+ MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE = 1 << 0,
+};
+
struct mlx5_ib_alloc_ucontext_resp {
__u32 qp_tab_size;
__u32 bf_reg_size;
@@ -94,8 +102,8 @@ struct mlx5_ib_alloc_ucontext_resp {
__u32 comp_mask;
__u32 response_length;
__u8 cqe_version;
- __u8 reserved2;
- __u16 reserved3;
+ __u8 cmds_supp_uhw;
+ __u16 reserved2;
__u64 hca_core_clock_offset;
};
@@ -103,6 +111,22 @@ struct mlx5_ib_alloc_pd_resp {
__u32 pdn;
};
+struct mlx5_ib_tso_caps {
+ __u32 max_tso; /* Maximum tso payload size in bytes */
+
+ /* Corresponding bit will be set if qp type from
+ * 'enum ib_qp_type' is supported, e.g.
+ * supported_qpts |= 1 << IB_QPT_UD
+ */
+ __u32 supported_qpts;
+};
+
+struct mlx5_ib_query_device_resp {
+ __u32 comp_mask;
+ __u32 response_length;
+ struct mlx5_ib_tso_caps tso_caps;
+};
+
struct mlx5_ib_create_cq {
__u64 buf_addr;
__u64 db_addr;
@@ -148,6 +172,40 @@ struct mlx5_ib_create_qp {
__u64 sq_buf_addr;
};
+/* RX Hash function flags */
+enum mlx5_rx_hash_function_flags {
+ MLX5_RX_HASH_FUNC_TOEPLITZ = 1 << 0,
+};
+
+/*
+ * RX Hash flags, these flags allows to set which incoming packet's field should
+ * participates in RX Hash. Each flag represent certain packet's field,
+ * when the flag is set the field that is represented by the flag will
+ * participate in RX Hash calculation.
+ * Note: *IPV4 and *IPV6 flags can't be enabled together on the same QP
+ * and *TCP and *UDP flags can't be enabled together on the same QP.
+*/
+enum mlx5_rx_hash_fields {
+ MLX5_RX_HASH_SRC_IPV4 = 1 << 0,
+ MLX5_RX_HASH_DST_IPV4 = 1 << 1,
+ MLX5_RX_HASH_SRC_IPV6 = 1 << 2,
+ MLX5_RX_HASH_DST_IPV6 = 1 << 3,
+ MLX5_RX_HASH_SRC_PORT_TCP = 1 << 4,
+ MLX5_RX_HASH_DST_PORT_TCP = 1 << 5,
+ MLX5_RX_HASH_SRC_PORT_UDP = 1 << 6,
+ MLX5_RX_HASH_DST_PORT_UDP = 1 << 7
+};
+
+struct mlx5_ib_create_qp_rss {
+ __u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */
+ __u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */
+ __u8 rx_key_len; /* valid only for Toeplitz */
+ __u8 reserved[6];
+ __u8 rx_hash_key[128]; /* valid only for Toeplitz */
+ __u32 comp_mask;
+ __u32 reserved1;
+};
+
struct mlx5_ib_create_qp_resp {
__u32 uuar_index;
};
@@ -159,6 +217,32 @@ struct mlx5_ib_alloc_mw {
__u16 reserved2;
};
+struct mlx5_ib_create_wq {
+ __u64 buf_addr;
+ __u64 db_addr;
+ __u32 rq_wqe_count;
+ __u32 rq_wqe_shift;
+ __u32 user_index;
+ __u32 flags;
+ __u32 comp_mask;
+ __u32 reserved;
+};
+
+struct mlx5_ib_create_wq_resp {
+ __u32 response_length;
+ __u32 reserved;
+};
+
+struct mlx5_ib_create_rwq_ind_tbl_resp {
+ __u32 response_length;
+ __u32 reserved;
+};
+
+struct mlx5_ib_modify_wq {
+ __u32 comp_mask;
+ __u32 reserved;
+};
+
static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
struct mlx5_ib_create_qp *ucmd,
int inlen,
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 9866c35cc..da2335f7f 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -1081,16 +1081,6 @@ static ssize_t show_rev(struct device *device, struct device_attribute *attr,
return sprintf(buf, "%x\n", dev->rev_id);
}
-static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
- char *buf)
-{
- struct mthca_dev *dev =
- container_of(device, struct mthca_dev, ib_dev.dev);
- return sprintf(buf, "%d.%d.%d\n", (int) (dev->fw_ver >> 32),
- (int) (dev->fw_ver >> 16) & 0xffff,
- (int) dev->fw_ver & 0xffff);
-}
-
static ssize_t show_hca(struct device *device, struct device_attribute *attr,
char *buf)
{
@@ -1120,13 +1110,11 @@ static ssize_t show_board(struct device *device, struct device_attribute *attr,
}
static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
static struct device_attribute *mthca_dev_attributes[] = {
&dev_attr_hw_rev,
- &dev_attr_fw_ver,
&dev_attr_hca_type,
&dev_attr_board_id
};
@@ -1187,6 +1175,17 @@ static int mthca_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0;
}
+static void get_dev_fw_str(struct ib_device *device, char *str,
+ size_t str_len)
+{
+ struct mthca_dev *dev =
+ container_of(device, struct mthca_dev, ib_dev);
+ snprintf(str, str_len, "%d.%d.%d",
+ (int) (dev->fw_ver >> 32),
+ (int) (dev->fw_ver >> 16) & 0xffff,
+ (int) dev->fw_ver & 0xffff);
+}
+
int mthca_register_device(struct mthca_dev *dev)
{
int ret;
@@ -1266,6 +1265,7 @@ int mthca_register_device(struct mthca_dev *dev)
dev->ib_dev.reg_user_mr = mthca_reg_user_mr;
dev->ib_dev.dereg_mr = mthca_dereg_mr;
dev->ib_dev.get_port_immutable = mthca_port_immutable;
+ dev->ib_dev.get_dev_fw_str = get_dev_fw_str;
if (dev->mthca_flags & MTHCA_FLAG_FMR) {
dev->ib_dev.alloc_fmr = mthca_alloc_fmr;
diff --git a/drivers/infiniband/hw/mthca/mthca_reset.c b/drivers/infiniband/hw/mthca/mthca_reset.c
index 74c6a9426..6727af27c 100644
--- a/drivers/infiniband/hw/mthca/mthca_reset.c
+++ b/drivers/infiniband/hw/mthca/mthca_reset.c
@@ -98,7 +98,7 @@ int mthca_reset(struct mthca_dev *mdev)
err = -ENOMEM;
mthca_err(mdev, "Couldn't allocate memory to save HCA "
"PCI header, aborting.\n");
- goto out;
+ goto put_dev;
}
for (i = 0; i < 64; ++i) {
@@ -108,7 +108,7 @@ int mthca_reset(struct mthca_dev *mdev)
err = -ENODEV;
mthca_err(mdev, "Couldn't save HCA "
"PCI header, aborting.\n");
- goto out;
+ goto free_hca;
}
}
@@ -121,7 +121,7 @@ int mthca_reset(struct mthca_dev *mdev)
err = -ENOMEM;
mthca_err(mdev, "Couldn't allocate memory to save HCA "
"bridge PCI header, aborting.\n");
- goto out;
+ goto free_hca;
}
for (i = 0; i < 64; ++i) {
@@ -131,7 +131,7 @@ int mthca_reset(struct mthca_dev *mdev)
err = -ENODEV;
mthca_err(mdev, "Couldn't save HCA bridge "
"PCI header, aborting.\n");
- goto out;
+ goto free_bh;
}
}
bridge_pcix_cap = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
@@ -139,7 +139,7 @@ int mthca_reset(struct mthca_dev *mdev)
err = -ENODEV;
mthca_err(mdev, "Couldn't locate HCA bridge "
"PCI-X capability, aborting.\n");
- goto out;
+ goto free_bh;
}
}
@@ -152,7 +152,7 @@ int mthca_reset(struct mthca_dev *mdev)
err = -ENOMEM;
mthca_err(mdev, "Couldn't map HCA reset register, "
"aborting.\n");
- goto out;
+ goto free_bh;
}
writel(MTHCA_RESET_VALUE, reset);
@@ -172,7 +172,7 @@ int mthca_reset(struct mthca_dev *mdev)
err = -ENODEV;
mthca_err(mdev, "Couldn't access HCA after reset, "
"aborting.\n");
- goto out;
+ goto free_bh;
}
if (v != 0xffffffff)
@@ -184,7 +184,7 @@ int mthca_reset(struct mthca_dev *mdev)
err = -ENODEV;
mthca_err(mdev, "PCI device did not come back after reset, "
"aborting.\n");
- goto out;
+ goto free_bh;
}
good:
@@ -195,14 +195,14 @@ good:
err = -ENODEV;
mthca_err(mdev, "Couldn't restore HCA bridge Upstream "
"split transaction control, aborting.\n");
- goto out;
+ goto free_bh;
}
if (pci_write_config_dword(bridge, bridge_pcix_cap + 0xc,
bridge_header[(bridge_pcix_cap + 0xc) / 4])) {
err = -ENODEV;
mthca_err(mdev, "Couldn't restore HCA bridge Downstream "
"split transaction control, aborting.\n");
- goto out;
+ goto free_bh;
}
/*
* Bridge control register is at 0x3e, so we'll
@@ -216,7 +216,7 @@ good:
err = -ENODEV;
mthca_err(mdev, "Couldn't restore HCA bridge reg %x, "
"aborting.\n", i);
- goto out;
+ goto free_bh;
}
}
@@ -225,7 +225,7 @@ good:
err = -ENODEV;
mthca_err(mdev, "Couldn't restore HCA bridge COMMAND, "
"aborting.\n");
- goto out;
+ goto free_bh;
}
}
@@ -235,7 +235,7 @@ good:
err = -ENODEV;
mthca_err(mdev, "Couldn't restore HCA PCI-X "
"command register, aborting.\n");
- goto out;
+ goto free_bh;
}
}
@@ -246,7 +246,7 @@ good:
err = -ENODEV;
mthca_err(mdev, "Couldn't restore HCA PCI Express "
"Device Control register, aborting.\n");
- goto out;
+ goto free_bh;
}
linkctl = hca_header[(hca_pcie_cap + PCI_EXP_LNKCTL) / 4];
if (pcie_capability_write_word(mdev->pdev, PCI_EXP_LNKCTL,
@@ -254,7 +254,7 @@ good:
err = -ENODEV;
mthca_err(mdev, "Couldn't restore HCA PCI Express "
"Link control register, aborting.\n");
- goto out;
+ goto free_bh;
}
}
@@ -266,7 +266,7 @@ good:
err = -ENODEV;
mthca_err(mdev, "Couldn't restore HCA reg %x, "
"aborting.\n", i);
- goto out;
+ goto free_bh;
}
}
@@ -275,14 +275,12 @@ good:
err = -ENODEV;
mthca_err(mdev, "Couldn't restore HCA COMMAND, "
"aborting.\n");
- goto out;
}
-
-out:
- if (bridge)
- pci_dev_put(bridge);
+free_bh:
kfree(bridge_header);
+free_hca:
kfree(hca_header);
-
+put_dev:
+ pci_dev_put(bridge);
return err;
}
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 464d6da5f..bd6912573 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -2606,23 +2606,6 @@ static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
/**
- * show_fw_ver
- */
-static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct nes_ib_device *nesibdev =
- container_of(dev, struct nes_ib_device, ibdev.dev);
- struct nes_vnic *nesvnic = nesibdev->nesvnic;
-
- nes_debug(NES_DBG_INIT, "\n");
- return sprintf(buf, "%u.%u\n",
- (nesvnic->nesdev->nesadapter->firmware_version >> 16),
- (nesvnic->nesdev->nesadapter->firmware_version & 0x000000ff));
-}
-
-
-/**
* show_hca
*/
static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
@@ -2645,13 +2628,11 @@ static ssize_t show_board(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
static struct device_attribute *nes_dev_attributes[] = {
&dev_attr_hw_rev,
- &dev_attr_fw_ver,
&dev_attr_hca_type,
&dev_attr_board_id
};
@@ -3703,6 +3684,19 @@ static int nes_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0;
}
+static void get_dev_fw_str(struct ib_device *dev, char *str,
+ size_t str_len)
+{
+ struct nes_ib_device *nesibdev =
+ container_of(dev, struct nes_ib_device, ibdev);
+ struct nes_vnic *nesvnic = nesibdev->nesvnic;
+
+ nes_debug(NES_DBG_INIT, "\n");
+ snprintf(str, str_len, "%u.%u",
+ (nesvnic->nesdev->nesadapter->firmware_version >> 16),
+ (nesvnic->nesdev->nesadapter->firmware_version & 0x000000ff));
+}
+
/**
* nes_init_ofa_device
*/
@@ -3802,6 +3796,7 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev)
nesibdev->ibdev.iwcm->create_listen = nes_create_listen;
nesibdev->ibdev.iwcm->destroy_listen = nes_destroy_listen;
nesibdev->ibdev.get_port_immutable = nes_port_immutable;
+ nesibdev->ibdev.get_dev_fw_str = get_dev_fw_str;
memcpy(nesibdev->ibdev.iwcm->ifname, netdev->name,
sizeof(nesibdev->ibdev.iwcm->ifname));
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 16740dcb8..67fc0b685 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -1156,18 +1156,18 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
attr->max_srq =
(rsp->max_srq_rpir_qps & OCRDMA_MBX_QUERY_CFG_MAX_SRQ_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET;
- attr->max_send_sge = ((rsp->max_write_send_sge &
+ attr->max_send_sge = ((rsp->max_recv_send_sge &
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT);
- attr->max_recv_sge = (rsp->max_write_send_sge &
- OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
- OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT;
+ attr->max_recv_sge = (rsp->max_recv_send_sge &
+ OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_SHIFT;
attr->max_srq_sge = (rsp->max_srq_rqe_sge &
OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET;
- attr->max_rdma_sge = (rsp->max_write_send_sge &
- OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK) >>
- OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT;
+ attr->max_rdma_sge = (rsp->max_wr_rd_sge &
+ OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_SHIFT;
attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp &
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 3d75f65ce..07d0c6c5b 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -107,6 +107,14 @@ static int ocrdma_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0;
}
+static void get_dev_fw_str(struct ib_device *device, char *str,
+ size_t str_len)
+{
+ struct ocrdma_dev *dev = get_ocrdma_dev(device);
+
+ snprintf(str, str_len, "%s", &dev->attr.fw_ver[0]);
+}
+
static int ocrdma_register_device(struct ocrdma_dev *dev)
{
strlcpy(dev->ibdev.name, "ocrdma%d", IB_DEVICE_NAME_MAX);
@@ -193,6 +201,7 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
dev->ibdev.process_mad = ocrdma_process_mad;
dev->ibdev.get_port_immutable = ocrdma_port_immutable;
+ dev->ibdev.get_dev_fw_str = get_dev_fw_str;
if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
dev->ibdev.uverbs_cmd_mask |=
@@ -262,14 +271,6 @@ static ssize_t show_rev(struct device *device, struct device_attribute *attr,
return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->nic_info.pdev->vendor);
}
-static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
- char *buf)
-{
- struct ocrdma_dev *dev = dev_get_drvdata(device);
-
- return scnprintf(buf, PAGE_SIZE, "%s\n", &dev->attr.fw_ver[0]);
-}
-
static ssize_t show_hca_type(struct device *device,
struct device_attribute *attr, char *buf)
{
@@ -279,12 +280,10 @@ static ssize_t show_hca_type(struct device *device,
}
static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
static DEVICE_ATTR(hca_type, S_IRUGO, show_hca_type, NULL);
static struct device_attribute *ocrdma_attributes[] = {
&dev_attr_hw_rev,
- &dev_attr_fw_ver,
&dev_attr_hca_type
};
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
index 0efc9662c..37df4481b 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
@@ -554,9 +554,9 @@ enum {
OCRDMA_MBX_QUERY_CFG_L3_TYPE_MASK = 0x18,
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT = 0,
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK = 0xFFFF,
- OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT = 16,
- OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK = 0xFFFF <<
- OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT,
+ OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_SHIFT = 16,
+ OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_MASK = 0xFFFF <<
+ OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_SHIFT,
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT = 0,
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK = 0xFFFF,
@@ -612,6 +612,8 @@ enum {
OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET = 0,
OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK = 0xFFFF <<
OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET,
+ OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_SHIFT = 0,
+ OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_MASK = 0xFFFF,
};
struct ocrdma_mbx_query_config {
@@ -619,7 +621,7 @@ struct ocrdma_mbx_query_config {
struct ocrdma_mbx_rsp rsp;
u32 qp_srq_cq_ird_ord;
u32 max_pd_ca_ack_delay;
- u32 max_write_send_sge;
+ u32 max_recv_send_sge;
u32 max_ird_ord_per_qp;
u32 max_shared_ird_ord;
u32 max_mr;
@@ -639,6 +641,8 @@ struct ocrdma_mbx_query_config {
u32 max_wqes_rqes_per_q;
u32 max_cq_cqes_per_cq;
u32 max_srq_rqe_sge;
+ u32 max_wr_rd_sge;
+ u32 ird_pgsz_num_pages;
};
struct ocrdma_fw_ver_rsp {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index b1a3d91fe..0aa854737 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -125,8 +125,8 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
IB_DEVICE_SYS_IMAGE_GUID |
IB_DEVICE_LOCAL_DMA_LKEY |
IB_DEVICE_MEM_MGT_EXTENSIONS;
- attr->max_sge = dev->attr.max_send_sge;
- attr->max_sge_rd = attr->max_sge;
+ attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_recv_sge);
+ attr->max_sge_rd = dev->attr.max_rdma_sge;
attr->max_cq = dev->attr.max_cq;
attr->max_cqe = dev->attr.max_cqe;
attr->max_mr = dev->attr.max_mr;
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index fcdf37913..c3edc033f 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -328,26 +328,12 @@ static ssize_t flash_write(struct file *file, const char __user *buf,
pos = *ppos;
- if (pos != 0) {
- ret = -EINVAL;
- goto bail;
- }
-
- if (count != sizeof(struct qib_flash)) {
- ret = -EINVAL;
- goto bail;
- }
-
- tmp = kmalloc(count, GFP_KERNEL);
- if (!tmp) {
- ret = -ENOMEM;
- goto bail;
- }
+ if (pos != 0 || count != sizeof(struct qib_flash))
+ return -EINVAL;
- if (copy_from_user(tmp, buf, count)) {
- ret = -EFAULT;
- goto bail_tmp;
- }
+ tmp = memdup_user(buf, count);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
dd = private2dd(file);
if (qib_eeprom_write(dd, pos, tmp, count)) {
@@ -361,8 +347,6 @@ static ssize_t flash_write(struct file *file, const char __user *buf,
bail_tmp:
kfree(tmp);
-
-bail:
return ret;
}
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 7119a7da2..f9b8cd235 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -106,6 +106,49 @@ static u32 credit_table[31] = {
32768 /* 1E */
};
+const struct rvt_operation_params qib_post_parms[RVT_OPERATION_MAX] = {
+[IB_WR_RDMA_WRITE] = {
+ .length = sizeof(struct ib_rdma_wr),
+ .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
+},
+
+[IB_WR_RDMA_READ] = {
+ .length = sizeof(struct ib_rdma_wr),
+ .qpt_support = BIT(IB_QPT_RC),
+ .flags = RVT_OPERATION_ATOMIC,
+},
+
+[IB_WR_ATOMIC_CMP_AND_SWP] = {
+ .length = sizeof(struct ib_atomic_wr),
+ .qpt_support = BIT(IB_QPT_RC),
+ .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
+},
+
+[IB_WR_ATOMIC_FETCH_AND_ADD] = {
+ .length = sizeof(struct ib_atomic_wr),
+ .qpt_support = BIT(IB_QPT_RC),
+ .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
+},
+
+[IB_WR_RDMA_WRITE_WITH_IMM] = {
+ .length = sizeof(struct ib_rdma_wr),
+ .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
+},
+
+[IB_WR_SEND] = {
+ .length = sizeof(struct ib_send_wr),
+ .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
+ BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
+},
+
+[IB_WR_SEND_WITH_IMM] = {
+ .length = sizeof(struct ib_send_wr),
+ .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
+ BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
+},
+
+};
+
static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map,
gfp_t gfp)
{
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index 846e6c726..10d062561 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -169,8 +169,12 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
}
if (ah_attr->ah_flags & IB_AH_GRH) {
- qib_copy_sge(&qp->r_sge, &ah_attr->grh,
- sizeof(struct ib_grh), 1);
+ struct ib_grh grh;
+ struct ib_global_route grd = ah_attr->grh;
+
+ qib_make_grh(ibp, &grh, &grd, 0, 0);
+ qib_copy_sge(&qp->r_sge, &grh,
+ sizeof(grh), 1);
wc.wc_flags |= IB_WC_GRH;
} else
qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index cbf6200e6..fd1dfbce5 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -1582,6 +1582,8 @@ static void qib_fill_device_attr(struct qib_devdata *dd)
rdi->dparms.props.max_total_mcast_qp_attach =
rdi->dparms.props.max_mcast_qp_attach *
rdi->dparms.props.max_mcast_grp;
+ /* post send table */
+ dd->verbs_dev.rdi.post_parms = qib_post_parms;
}
/**
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index 4f878151f..736ced684 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -497,4 +497,6 @@ extern unsigned int ib_qib_max_srq_wrs;
extern const u32 ib_qib_rnr_table[];
+extern const struct rvt_operation_params qib_post_parms[];
+
#endif /* QIB_VERBS_H */
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c
index 565c881a4..0a89a9555 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_main.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c
@@ -331,6 +331,21 @@ static int usnic_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0;
}
+static void usnic_get_dev_fw_str(struct ib_device *device,
+ char *str,
+ size_t str_len)
+{
+ struct usnic_ib_dev *us_ibdev =
+ container_of(device, struct usnic_ib_dev, ib_dev);
+ struct ethtool_drvinfo info;
+
+ mutex_lock(&us_ibdev->usdev_lock);
+ us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info);
+ mutex_unlock(&us_ibdev->usdev_lock);
+
+ snprintf(str, str_len, "%s", info.fw_version);
+}
+
/* Start of PF discovery section */
static void *usnic_ib_device_add(struct pci_dev *dev)
{
@@ -414,6 +429,7 @@ static void *usnic_ib_device_add(struct pci_dev *dev)
us_ibdev->ib_dev.req_notify_cq = usnic_ib_req_notify_cq;
us_ibdev->ib_dev.get_dma_mr = usnic_ib_get_dma_mr;
us_ibdev->ib_dev.get_port_immutable = usnic_port_immutable;
+ us_ibdev->ib_dev.get_dev_fw_str = usnic_get_dev_fw_str;
if (ib_register_device(&us_ibdev->ib_dev, NULL))
@@ -648,7 +664,8 @@ static int __init usnic_ib_init(void)
return err;
}
- if (pci_register_driver(&usnic_ib_pci_driver)) {
+ err = pci_register_driver(&usnic_ib_pci_driver);
+ if (err) {
usnic_err("Unable to register with PCI\n");
goto out_umem_fini;
}
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
index 3412ea061..80ef3f899 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
@@ -45,21 +45,6 @@
#include "usnic_ib_verbs.h"
#include "usnic_log.h"
-static ssize_t usnic_ib_show_fw_ver(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct usnic_ib_dev *us_ibdev =
- container_of(device, struct usnic_ib_dev, ib_dev.dev);
- struct ethtool_drvinfo info;
-
- mutex_lock(&us_ibdev->usdev_lock);
- us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info);
- mutex_unlock(&us_ibdev->usdev_lock);
-
- return scnprintf(buf, PAGE_SIZE, "%s\n", info.fw_version);
-}
-
static ssize_t usnic_ib_show_board(struct device *device,
struct device_attribute *attr,
char *buf)
@@ -192,7 +177,6 @@ usnic_ib_show_cq_per_vf(struct device *device, struct device_attribute *attr,
us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ]);
}
-static DEVICE_ATTR(fw_ver, S_IRUGO, usnic_ib_show_fw_ver, NULL);
static DEVICE_ATTR(board_id, S_IRUGO, usnic_ib_show_board, NULL);
static DEVICE_ATTR(config, S_IRUGO, usnic_ib_show_config, NULL);
static DEVICE_ATTR(iface, S_IRUGO, usnic_ib_show_iface, NULL);
@@ -201,7 +185,6 @@ static DEVICE_ATTR(qp_per_vf, S_IRUGO, usnic_ib_show_qp_per_vf, NULL);
static DEVICE_ATTR(cq_per_vf, S_IRUGO, usnic_ib_show_cq_per_vf, NULL);
static struct device_attribute *usnic_class_attributes[] = {
- &dev_attr_fw_ver,
&dev_attr_board_id,
&dev_attr_config,
&dev_attr_iface,
diff --git a/drivers/infiniband/sw/Makefile b/drivers/infiniband/sw/Makefile
index 988b6a010..8b095b27d 100644
--- a/drivers/infiniband/sw/Makefile
+++ b/drivers/infiniband/sw/Makefile
@@ -1 +1,2 @@
obj-$(CONFIG_INFINIBAND_RDMAVT) += rdmavt/
+obj-$(CONFIG_RDMA_RXE) += rxe/
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 0f4d4500f..46b649700 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -140,6 +140,7 @@ static int rvt_init_mregion(struct rvt_mregion *mr, struct ib_pd *pd,
init_completion(&mr->comp);
/* count returning the ptr to user */
atomic_set(&mr->refcount, 1);
+ atomic_set(&mr->lkey_invalid, 0);
mr->pd = pd;
mr->max_segs = count;
return 0;
@@ -293,7 +294,7 @@ static void __rvt_free_mr(struct rvt_mr *mr)
{
rvt_deinit_mregion(&mr->mr);
rvt_free_lkey(&mr->mr);
- vfree(mr);
+ kfree(mr);
}
/**
@@ -480,6 +481,123 @@ struct ib_mr *rvt_alloc_mr(struct ib_pd *pd,
}
/**
+ * rvt_set_page - page assignment function called by ib_sg_to_pages
+ * @ibmr: memory region
+ * @addr: dma address of mapped page
+ *
+ * Return: 0 on success
+ */
+static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
+{
+ struct rvt_mr *mr = to_imr(ibmr);
+ u32 ps = 1 << mr->mr.page_shift;
+ u32 mapped_segs = mr->mr.length >> mr->mr.page_shift;
+ int m, n;
+
+ if (unlikely(mapped_segs == mr->mr.max_segs))
+ return -ENOMEM;
+
+ if (mr->mr.length == 0) {
+ mr->mr.user_base = addr;
+ mr->mr.iova = addr;
+ }
+
+ m = mapped_segs / RVT_SEGSZ;
+ n = mapped_segs % RVT_SEGSZ;
+ mr->mr.map[m]->segs[n].vaddr = (void *)addr;
+ mr->mr.map[m]->segs[n].length = ps;
+ mr->mr.length += ps;
+
+ return 0;
+}
+
+/**
+ * rvt_map_mr_sg - map sg list and set it the memory region
+ * @ibmr: memory region
+ * @sg: dma mapped scatterlist
+ * @sg_nents: number of entries in sg
+ * @sg_offset: offset in bytes into sg
+ *
+ * Return: number of sg elements mapped to the memory region
+ */
+int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
+ int sg_nents, unsigned int *sg_offset)
+{
+ struct rvt_mr *mr = to_imr(ibmr);
+
+ mr->mr.length = 0;
+ mr->mr.page_shift = PAGE_SHIFT;
+ return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
+ rvt_set_page);
+}
+
+/**
+ * rvt_fast_reg_mr - fast register physical MR
+ * @qp: the queue pair where the work request comes from
+ * @ibmr: the memory region to be registered
+ * @key: updated key for this memory region
+ * @access: access flags for this memory region
+ *
+ * Returns 0 on success.
+ */
+int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key,
+ int access)
+{
+ struct rvt_mr *mr = to_imr(ibmr);
+
+ if (qp->ibqp.pd != mr->mr.pd)
+ return -EACCES;
+
+ /* not applicable to dma MR or user MR */
+ if (!mr->mr.lkey || mr->umem)
+ return -EINVAL;
+
+ if ((key & 0xFFFFFF00) != (mr->mr.lkey & 0xFFFFFF00))
+ return -EINVAL;
+
+ ibmr->lkey = key;
+ ibmr->rkey = key;
+ mr->mr.lkey = key;
+ mr->mr.access_flags = access;
+ atomic_set(&mr->mr.lkey_invalid, 0);
+
+ return 0;
+}
+EXPORT_SYMBOL(rvt_fast_reg_mr);
+
+/**
+ * rvt_invalidate_rkey - invalidate an MR rkey
+ * @qp: queue pair associated with the invalidate op
+ * @rkey: rkey to invalidate
+ *
+ * Returns 0 on success.
+ */
+int rvt_invalidate_rkey(struct rvt_qp *qp, u32 rkey)
+{
+ struct rvt_dev_info *dev = ib_to_rvt(qp->ibqp.device);
+ struct rvt_lkey_table *rkt = &dev->lkey_table;
+ struct rvt_mregion *mr;
+
+ if (rkey == 0)
+ return -EINVAL;
+
+ rcu_read_lock();
+ mr = rcu_dereference(
+ rkt->table[(rkey >> (32 - dev->dparms.lkey_table_size))]);
+ if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
+ goto bail;
+
+ atomic_set(&mr->lkey_invalid, 1);
+ rcu_read_unlock();
+ return 0;
+
+bail:
+ rcu_read_unlock();
+ return -EINVAL;
+}
+EXPORT_SYMBOL(rvt_invalidate_rkey);
+
+/**
* rvt_alloc_fmr - allocate a fast memory region
* @pd: the protection domain for this memory region
* @mr_access_flags: access flags for this memory region
@@ -682,7 +800,8 @@ int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
}
mr = rcu_dereference(
rkt->table[(sge->lkey >> (32 - dev->dparms.lkey_table_size))]);
- if (unlikely(!mr || mr->lkey != sge->lkey || mr->pd != &pd->ibpd))
+ if (unlikely(!mr || atomic_read(&mr->lkey_invalid) ||
+ mr->lkey != sge->lkey || mr->pd != &pd->ibpd))
goto bail;
off = sge->addr - mr->user_base;
@@ -782,7 +901,8 @@ int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
mr = rcu_dereference(
rkt->table[(rkey >> (32 - dev->dparms.lkey_table_size))]);
- if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
+ if (unlikely(!mr || atomic_read(&mr->lkey_invalid) ||
+ mr->lkey != rkey || qp->ibqp.pd != mr->pd))
goto bail;
off = vaddr - mr->iova;
diff --git a/drivers/infiniband/sw/rdmavt/mr.h b/drivers/infiniband/sw/rdmavt/mr.h
index 69380512c..132800ee0 100644
--- a/drivers/infiniband/sw/rdmavt/mr.h
+++ b/drivers/infiniband/sw/rdmavt/mr.h
@@ -82,6 +82,8 @@ int rvt_dereg_mr(struct ib_mr *ibmr);
struct ib_mr *rvt_alloc_mr(struct ib_pd *pd,
enum ib_mr_type mr_type,
u32 max_num_sg);
+int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
+ int sg_nents, unsigned int *sg_offset);
struct ib_fmr *rvt_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
struct ib_fmr_attr *fmr_attr);
int rvt_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 41ba7e9ca..870b4f212 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -435,8 +435,7 @@ static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
for (n = 0; n < rvt_max_atomic(rdi); n++) {
struct rvt_ack_entry *e = &qp->s_ack_queue[n];
- if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
- e->rdma_sge.mr) {
+ if (e->rdma_sge.mr) {
rvt_put_mr(e->rdma_sge.mr);
e->rdma_sge.mr = NULL;
}
@@ -584,6 +583,7 @@ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
qp->r_rq.wq->tail = 0;
}
qp->r_sge.num_sge = 0;
+ atomic_set(&qp->s_reserved_used, 0);
}
/**
@@ -613,6 +613,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
void *priv = NULL;
gfp_t gfp;
+ size_t sqsize;
if (!rdi)
return ERR_PTR(-EINVAL);
@@ -643,7 +644,9 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
init_attr->cap.max_recv_wr == 0)
return ERR_PTR(-EINVAL);
}
-
+ sqsize =
+ init_attr->cap.max_send_wr + 1 +
+ rdi->dparms.reserved_operations;
switch (init_attr->qp_type) {
case IB_QPT_SMI:
case IB_QPT_GSI:
@@ -658,11 +661,11 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
sizeof(struct rvt_swqe);
if (gfp == GFP_NOIO)
swq = __vmalloc(
- (init_attr->cap.max_send_wr + 1) * sz,
+ sqsize * sz,
gfp | __GFP_ZERO, PAGE_KERNEL);
else
swq = vzalloc_node(
- (init_attr->cap.max_send_wr + 1) * sz,
+ sqsize * sz,
rdi->dparms.node);
if (!swq)
return ERR_PTR(-ENOMEM);
@@ -741,13 +744,14 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
spin_lock_init(&qp->s_lock);
spin_lock_init(&qp->r_rq.lock);
atomic_set(&qp->refcount, 0);
+ atomic_set(&qp->local_ops_pending, 0);
init_waitqueue_head(&qp->wait);
init_timer(&qp->s_timer);
qp->s_timer.data = (unsigned long)qp;
INIT_LIST_HEAD(&qp->rspwait);
qp->state = IB_QPS_RESET;
qp->s_wq = swq;
- qp->s_size = init_attr->cap.max_send_wr + 1;
+ qp->s_size = sqsize;
qp->s_avail = init_attr->cap.max_send_wr;
qp->s_max_sge = init_attr->cap.max_send_sge;
if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
@@ -869,7 +873,8 @@ bail_qpn:
free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
bail_rq_wq:
- vfree(qp->r_rq.wq);
+ if (!qp->ip)
+ vfree(qp->r_rq.wq);
bail_driver_priv:
rdi->driver_f.qp_priv_free(rdi, qp);
@@ -1332,7 +1337,8 @@ int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
attr->sq_psn = qp->s_next_psn & rdi->dparms.psn_mask;
attr->dest_qp_num = qp->remote_qpn;
attr->qp_access_flags = qp->qp_access_flags;
- attr->cap.max_send_wr = qp->s_size - 1;
+ attr->cap.max_send_wr = qp->s_size - 1 -
+ rdi->dparms.reserved_operations;
attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
attr->cap.max_send_sge = qp->s_max_sge;
attr->cap.max_recv_sge = qp->r_rq.max_sge;
@@ -1440,25 +1446,116 @@ int rvt_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
}
/**
- * qp_get_savail - return number of avail send entries
+ * rvt_qp_valid_operation - validate post send wr request
+ * @qp - the qp
+ * @post-parms - the post send table for the driver
+ * @wr - the work request
*
+ * The routine validates the operation based on the
+ * validation table an returns the length of the operation
+ * which can extend beyond the ib_send_bw. Operation
+ * dependent flags key atomic operation validation.
+ *
+ * There is an exception for UD qps that validates the pd and
+ * overrides the length to include the additional UD specific
+ * length.
+ *
+ * Returns a negative error or the length of the work request
+ * for building the swqe.
+ */
+static inline int rvt_qp_valid_operation(
+ struct rvt_qp *qp,
+ const struct rvt_operation_params *post_parms,
+ struct ib_send_wr *wr)
+{
+ int len;
+
+ if (wr->opcode >= RVT_OPERATION_MAX || !post_parms[wr->opcode].length)
+ return -EINVAL;
+ if (!(post_parms[wr->opcode].qpt_support & BIT(qp->ibqp.qp_type)))
+ return -EINVAL;
+ if ((post_parms[wr->opcode].flags & RVT_OPERATION_PRIV) &&
+ ibpd_to_rvtpd(qp->ibqp.pd)->user)
+ return -EINVAL;
+ if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC_SGE &&
+ (wr->num_sge == 0 ||
+ wr->sg_list[0].length < sizeof(u64) ||
+ wr->sg_list[0].addr & (sizeof(u64) - 1)))
+ return -EINVAL;
+ if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC &&
+ !qp->s_max_rd_atomic)
+ return -EINVAL;
+ len = post_parms[wr->opcode].length;
+ /* UD specific */
+ if (qp->ibqp.qp_type != IB_QPT_UC &&
+ qp->ibqp.qp_type != IB_QPT_RC) {
+ if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
+ return -EINVAL;
+ len = sizeof(struct ib_ud_wr);
+ }
+ return len;
+}
+
+/**
+ * rvt_qp_is_avail - determine queue capacity
* @qp - the qp
+ * @rdi - the rdmavt device
+ * @reserved_op - is reserved operation
*
* This assumes the s_hlock is held but the s_last
* qp variable is uncontrolled.
+ *
+ * For non reserved operations, the qp->s_avail
+ * may be changed.
+ *
+ * The return value is zero or a -ENOMEM.
*/
-static inline u32 qp_get_savail(struct rvt_qp *qp)
+static inline int rvt_qp_is_avail(
+ struct rvt_qp *qp,
+ struct rvt_dev_info *rdi,
+ bool reserved_op)
{
u32 slast;
- u32 ret;
-
+ u32 avail;
+ u32 reserved_used;
+
+ /* see rvt_qp_wqe_unreserve() */
+ smp_mb__before_atomic();
+ reserved_used = atomic_read(&qp->s_reserved_used);
+ if (unlikely(reserved_op)) {
+ /* see rvt_qp_wqe_unreserve() */
+ smp_mb__before_atomic();
+ if (reserved_used >= rdi->dparms.reserved_operations)
+ return -ENOMEM;
+ return 0;
+ }
+ /* non-reserved operations */
+ if (likely(qp->s_avail))
+ return 0;
smp_read_barrier_depends(); /* see rc.c */
slast = ACCESS_ONCE(qp->s_last);
if (qp->s_head >= slast)
- ret = qp->s_size - (qp->s_head - slast);
+ avail = qp->s_size - (qp->s_head - slast);
else
- ret = slast - qp->s_head;
- return ret - 1;
+ avail = slast - qp->s_head;
+
+ /* see rvt_qp_wqe_unreserve() */
+ smp_mb__before_atomic();
+ reserved_used = atomic_read(&qp->s_reserved_used);
+ avail = avail - 1 -
+ (rdi->dparms.reserved_operations - reserved_used);
+ /* insure we don't assign a negative s_avail */
+ if ((s32)avail <= 0)
+ return -ENOMEM;
+ qp->s_avail = avail;
+ if (WARN_ON(qp->s_avail >
+ (qp->s_size - 1 - rdi->dparms.reserved_operations)))
+ rvt_pr_err(rdi,
+ "More avail entries than QP RB size.\nQP: %u, size: %u, avail: %u\nhead: %u, tail: %u, cur: %u, acked: %u, last: %u",
+ qp->ibqp.qp_num, qp->s_size, qp->s_avail,
+ qp->s_head, qp->s_tail, qp->s_cur,
+ qp->s_acked, qp->s_last);
+ return 0;
}
/**
@@ -1480,49 +1577,64 @@ static int rvt_post_one_wr(struct rvt_qp *qp,
struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
u8 log_pmtu;
int ret;
+ size_t cplen;
+ bool reserved_op;
+ int local_ops_delayed = 0;
+
+ BUILD_BUG_ON(IB_QPT_MAX >= (sizeof(u32) * BITS_PER_BYTE));
/* IB spec says that num_sge == 0 is OK. */
if (unlikely(wr->num_sge > qp->s_max_sge))
return -EINVAL;
+ ret = rvt_qp_valid_operation(qp, rdi->post_parms, wr);
+ if (ret < 0)
+ return ret;
+ cplen = ret;
+
/*
- * Don't allow RDMA reads or atomic operations on UC or
- * undefined operations.
- * Make sure buffer is large enough to hold the result for atomics.
+ * Local operations include fast register and local invalidate.
+ * Fast register needs to be processed immediately because the
+ * registered lkey may be used by following work requests and the
+ * lkey needs to be valid at the time those requests are posted.
+ * Local invalidate can be processed immediately if fencing is
+ * not required and no previous local invalidate ops are pending.
+ * Signaled local operations that have been processed immediately
+ * need to have requests with "completion only" flags set posted
+ * to the send queue in order to generate completions.
*/
- if (qp->ibqp.qp_type == IB_QPT_UC) {
- if ((unsigned)wr->opcode >= IB_WR_RDMA_READ)
- return -EINVAL;
- } else if (qp->ibqp.qp_type != IB_QPT_RC) {
- /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */
- if (wr->opcode != IB_WR_SEND &&
- wr->opcode != IB_WR_SEND_WITH_IMM)
- return -EINVAL;
- /* Check UD destination address PD */
- if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
+ if ((rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL)) {
+ switch (wr->opcode) {
+ case IB_WR_REG_MR:
+ ret = rvt_fast_reg_mr(qp,
+ reg_wr(wr)->mr,
+ reg_wr(wr)->key,
+ reg_wr(wr)->access);
+ if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
+ return ret;
+ break;
+ case IB_WR_LOCAL_INV:
+ if ((wr->send_flags & IB_SEND_FENCE) ||
+ atomic_read(&qp->local_ops_pending)) {
+ local_ops_delayed = 1;
+ } else {
+ ret = rvt_invalidate_rkey(
+ qp, wr->ex.invalidate_rkey);
+ if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
+ return ret;
+ }
+ break;
+ default:
return -EINVAL;
- } else if ((unsigned)wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) {
- return -EINVAL;
- } else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
- (wr->num_sge == 0 ||
- wr->sg_list[0].length < sizeof(u64) ||
- wr->sg_list[0].addr & (sizeof(u64) - 1))) {
- return -EINVAL;
- } else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) {
- return -EINVAL;
+ }
}
+
+ reserved_op = rdi->post_parms[wr->opcode].flags &
+ RVT_OPERATION_USE_RESERVE;
/* check for avail */
- if (unlikely(!qp->s_avail)) {
- qp->s_avail = qp_get_savail(qp);
- if (WARN_ON(qp->s_avail > (qp->s_size - 1)))
- rvt_pr_err(rdi,
- "More avail entries than QP RB size.\nQP: %u, size: %u, avail: %u\nhead: %u, tail: %u, cur: %u, acked: %u, last: %u",
- qp->ibqp.qp_num, qp->s_size, qp->s_avail,
- qp->s_head, qp->s_tail, qp->s_cur,
- qp->s_acked, qp->s_last);
- if (!qp->s_avail)
- return -ENOMEM;
- }
+ ret = rvt_qp_is_avail(qp, rdi, reserved_op);
+ if (ret)
+ return ret;
next = qp->s_head + 1;
if (next >= qp->s_size)
next = 0;
@@ -1531,18 +1643,8 @@ static int rvt_post_one_wr(struct rvt_qp *qp,
pd = ibpd_to_rvtpd(qp->ibqp.pd);
wqe = rvt_get_swqe_ptr(qp, qp->s_head);
- if (qp->ibqp.qp_type != IB_QPT_UC &&
- qp->ibqp.qp_type != IB_QPT_RC)
- memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr));
- else if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
- wr->opcode == IB_WR_RDMA_WRITE ||
- wr->opcode == IB_WR_RDMA_READ)
- memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr));
- else if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
- wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
- memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr));
- else
- memcpy(&wqe->wr, wr, sizeof(wqe->wr));
+ /* cplen has length from above */
+ memcpy(&wqe->wr, wr, cplen);
wqe->length = 0;
j = 0;
@@ -1585,14 +1687,29 @@ static int rvt_post_one_wr(struct rvt_qp *qp,
atomic_inc(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount);
}
- wqe->ssn = qp->s_ssn++;
- wqe->psn = qp->s_next_psn;
- wqe->lpsn = wqe->psn +
- (wqe->length ? ((wqe->length - 1) >> log_pmtu) : 0);
- qp->s_next_psn = wqe->lpsn + 1;
+ if (rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL) {
+ if (local_ops_delayed)
+ atomic_inc(&qp->local_ops_pending);
+ else
+ wqe->wr.send_flags |= RVT_SEND_COMPLETION_ONLY;
+ wqe->ssn = 0;
+ wqe->psn = 0;
+ wqe->lpsn = 0;
+ } else {
+ wqe->ssn = qp->s_ssn++;
+ wqe->psn = qp->s_next_psn;
+ wqe->lpsn = wqe->psn +
+ (wqe->length ?
+ ((wqe->length - 1) >> log_pmtu) :
+ 0);
+ qp->s_next_psn = wqe->lpsn + 1;
+ }
trace_rvt_post_one_wr(qp, wqe);
+ if (unlikely(reserved_op))
+ rvt_qp_wqe_reserve(qp, wqe);
+ else
+ qp->s_avail--;
smp_wmb(); /* see request builders */
- qp->s_avail--;
qp->s_head = next;
return 0;
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
index 30c4fda7a..d430c2f7c 100644
--- a/drivers/infiniband/sw/rdmavt/vt.c
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -370,6 +370,7 @@ enum {
REG_USER_MR,
DEREG_MR,
ALLOC_MR,
+ MAP_MR_SG,
ALLOC_FMR,
MAP_PHYS_FMR,
UNMAP_FMR,
@@ -528,7 +529,8 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb)
post_send),
rvt_post_send))
if (!rdi->driver_f.schedule_send ||
- !rdi->driver_f.do_send)
+ !rdi->driver_f.do_send ||
+ !rdi->post_parms)
return -EINVAL;
break;
@@ -633,6 +635,12 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb)
rvt_alloc_mr);
break;
+ case MAP_MR_SG:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ map_mr_sg),
+ rvt_map_mr_sg);
+ break;
+
case MAP_PHYS_FMR:
check_driver_override(rdi, offsetof(struct ib_device,
map_phys_fmr),
diff --git a/drivers/infiniband/sw/rxe/Kconfig b/drivers/infiniband/sw/rxe/Kconfig
new file mode 100644
index 000000000..1e4e628fe
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/Kconfig
@@ -0,0 +1,24 @@
+config RDMA_RXE
+ tristate "Software RDMA over Ethernet (RoCE) driver"
+ depends on INET && PCI && INFINIBAND
+ depends on NET_UDP_TUNNEL
+ ---help---
+ This driver implements the InfiniBand RDMA transport over
+ the Linux network stack. It enables a system with a
+ standard Ethernet adapter to interoperate with a RoCE
+ adapter or with another system running the RXE driver.
+ Documentation on InfiniBand and RoCE can be downloaded at
+ www.infinibandta.org and www.openfabrics.org. (See also
+ siw which is a similar software driver for iWARP.)
+
+ The driver is split into two layers, one interfaces with the
+ Linux RDMA stack and implements a kernel or user space
+ verbs API. The user space verbs API requires a support
+ library named librxe which is loaded by the generic user
+ space verbs API, libibverbs. The other layer interfaces
+ with the Linux network stack at layer 3.
+
+ To configure and work with soft-RoCE driver please use the
+ following wiki page under "configure Soft-RoCE (RXE)" section:
+
+ https://github.com/SoftRoCE/rxe-dev/wiki/rxe-dev:-Home
diff --git a/drivers/infiniband/sw/rxe/Makefile b/drivers/infiniband/sw/rxe/Makefile
new file mode 100644
index 000000000..3b3fb9d1c
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/Makefile
@@ -0,0 +1,24 @@
+obj-$(CONFIG_RDMA_RXE) += rdma_rxe.o
+
+rdma_rxe-y := \
+ rxe.o \
+ rxe_comp.o \
+ rxe_req.o \
+ rxe_resp.o \
+ rxe_recv.o \
+ rxe_pool.o \
+ rxe_queue.o \
+ rxe_verbs.o \
+ rxe_av.o \
+ rxe_srq.o \
+ rxe_qp.o \
+ rxe_cq.o \
+ rxe_mr.o \
+ rxe_dma.o \
+ rxe_opcode.o \
+ rxe_mmap.o \
+ rxe_icrc.o \
+ rxe_mcast.o \
+ rxe_task.o \
+ rxe_net.o \
+ rxe_sysfs.o
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
new file mode 100644
index 000000000..ddd59270f
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -0,0 +1,405 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "rxe.h"
+#include "rxe_loc.h"
+
+MODULE_AUTHOR("Bob Pearson, Frank Zago, John Groves, Kamal Heib");
+MODULE_DESCRIPTION("Soft RDMA transport");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION("0.2");
+
+/* free resources for all ports on a device */
+static void rxe_cleanup_ports(struct rxe_dev *rxe)
+{
+ kfree(rxe->port.pkey_tbl);
+ rxe->port.pkey_tbl = NULL;
+
+}
+
+/* free resources for a rxe device all objects created for this device must
+ * have been destroyed
+ */
+static void rxe_cleanup(struct rxe_dev *rxe)
+{
+ rxe_pool_cleanup(&rxe->uc_pool);
+ rxe_pool_cleanup(&rxe->pd_pool);
+ rxe_pool_cleanup(&rxe->ah_pool);
+ rxe_pool_cleanup(&rxe->srq_pool);
+ rxe_pool_cleanup(&rxe->qp_pool);
+ rxe_pool_cleanup(&rxe->cq_pool);
+ rxe_pool_cleanup(&rxe->mr_pool);
+ rxe_pool_cleanup(&rxe->mw_pool);
+ rxe_pool_cleanup(&rxe->mc_grp_pool);
+ rxe_pool_cleanup(&rxe->mc_elem_pool);
+
+ rxe_cleanup_ports(rxe);
+}
+
+/* called when all references have been dropped */
+void rxe_release(struct kref *kref)
+{
+ struct rxe_dev *rxe = container_of(kref, struct rxe_dev, ref_cnt);
+
+ rxe_cleanup(rxe);
+ ib_dealloc_device(&rxe->ib_dev);
+}
+
+void rxe_dev_put(struct rxe_dev *rxe)
+{
+ kref_put(&rxe->ref_cnt, rxe_release);
+}
+EXPORT_SYMBOL_GPL(rxe_dev_put);
+
+/* initialize rxe device parameters */
+static int rxe_init_device_param(struct rxe_dev *rxe)
+{
+ rxe->max_inline_data = RXE_MAX_INLINE_DATA;
+
+ rxe->attr.fw_ver = RXE_FW_VER;
+ rxe->attr.max_mr_size = RXE_MAX_MR_SIZE;
+ rxe->attr.page_size_cap = RXE_PAGE_SIZE_CAP;
+ rxe->attr.vendor_id = RXE_VENDOR_ID;
+ rxe->attr.vendor_part_id = RXE_VENDOR_PART_ID;
+ rxe->attr.hw_ver = RXE_HW_VER;
+ rxe->attr.max_qp = RXE_MAX_QP;
+ rxe->attr.max_qp_wr = RXE_MAX_QP_WR;
+ rxe->attr.device_cap_flags = RXE_DEVICE_CAP_FLAGS;
+ rxe->attr.max_sge = RXE_MAX_SGE;
+ rxe->attr.max_sge_rd = RXE_MAX_SGE_RD;
+ rxe->attr.max_cq = RXE_MAX_CQ;
+ rxe->attr.max_cqe = (1 << RXE_MAX_LOG_CQE) - 1;
+ rxe->attr.max_mr = RXE_MAX_MR;
+ rxe->attr.max_pd = RXE_MAX_PD;
+ rxe->attr.max_qp_rd_atom = RXE_MAX_QP_RD_ATOM;
+ rxe->attr.max_ee_rd_atom = RXE_MAX_EE_RD_ATOM;
+ rxe->attr.max_res_rd_atom = RXE_MAX_RES_RD_ATOM;
+ rxe->attr.max_qp_init_rd_atom = RXE_MAX_QP_INIT_RD_ATOM;
+ rxe->attr.max_ee_init_rd_atom = RXE_MAX_EE_INIT_RD_ATOM;
+ rxe->attr.atomic_cap = RXE_ATOMIC_CAP;
+ rxe->attr.max_ee = RXE_MAX_EE;
+ rxe->attr.max_rdd = RXE_MAX_RDD;
+ rxe->attr.max_mw = RXE_MAX_MW;
+ rxe->attr.max_raw_ipv6_qp = RXE_MAX_RAW_IPV6_QP;
+ rxe->attr.max_raw_ethy_qp = RXE_MAX_RAW_ETHY_QP;
+ rxe->attr.max_mcast_grp = RXE_MAX_MCAST_GRP;
+ rxe->attr.max_mcast_qp_attach = RXE_MAX_MCAST_QP_ATTACH;
+ rxe->attr.max_total_mcast_qp_attach = RXE_MAX_TOT_MCAST_QP_ATTACH;
+ rxe->attr.max_ah = RXE_MAX_AH;
+ rxe->attr.max_fmr = RXE_MAX_FMR;
+ rxe->attr.max_map_per_fmr = RXE_MAX_MAP_PER_FMR;
+ rxe->attr.max_srq = RXE_MAX_SRQ;
+ rxe->attr.max_srq_wr = RXE_MAX_SRQ_WR;
+ rxe->attr.max_srq_sge = RXE_MAX_SRQ_SGE;
+ rxe->attr.max_fast_reg_page_list_len = RXE_MAX_FMR_PAGE_LIST_LEN;
+ rxe->attr.max_pkeys = RXE_MAX_PKEYS;
+ rxe->attr.local_ca_ack_delay = RXE_LOCAL_CA_ACK_DELAY;
+
+ rxe->max_ucontext = RXE_MAX_UCONTEXT;
+
+ return 0;
+}
+
+/* initialize port attributes */
+static int rxe_init_port_param(struct rxe_port *port)
+{
+ port->attr.state = RXE_PORT_STATE;
+ port->attr.max_mtu = RXE_PORT_MAX_MTU;
+ port->attr.active_mtu = RXE_PORT_ACTIVE_MTU;
+ port->attr.gid_tbl_len = RXE_PORT_GID_TBL_LEN;
+ port->attr.port_cap_flags = RXE_PORT_PORT_CAP_FLAGS;
+ port->attr.max_msg_sz = RXE_PORT_MAX_MSG_SZ;
+ port->attr.bad_pkey_cntr = RXE_PORT_BAD_PKEY_CNTR;
+ port->attr.qkey_viol_cntr = RXE_PORT_QKEY_VIOL_CNTR;
+ port->attr.pkey_tbl_len = RXE_PORT_PKEY_TBL_LEN;
+ port->attr.lid = RXE_PORT_LID;
+ port->attr.sm_lid = RXE_PORT_SM_LID;
+ port->attr.lmc = RXE_PORT_LMC;
+ port->attr.max_vl_num = RXE_PORT_MAX_VL_NUM;
+ port->attr.sm_sl = RXE_PORT_SM_SL;
+ port->attr.subnet_timeout = RXE_PORT_SUBNET_TIMEOUT;
+ port->attr.init_type_reply = RXE_PORT_INIT_TYPE_REPLY;
+ port->attr.active_width = RXE_PORT_ACTIVE_WIDTH;
+ port->attr.active_speed = RXE_PORT_ACTIVE_SPEED;
+ port->attr.phys_state = RXE_PORT_PHYS_STATE;
+ port->mtu_cap =
+ ib_mtu_enum_to_int(RXE_PORT_ACTIVE_MTU);
+ port->subnet_prefix = cpu_to_be64(RXE_PORT_SUBNET_PREFIX);
+
+ return 0;
+}
+
+/* initialize port state, note IB convention that HCA ports are always
+ * numbered from 1
+ */
+static int rxe_init_ports(struct rxe_dev *rxe)
+{
+ struct rxe_port *port = &rxe->port;
+
+ rxe_init_port_param(port);
+
+ if (!port->attr.pkey_tbl_len || !port->attr.gid_tbl_len)
+ return -EINVAL;
+
+ port->pkey_tbl = kcalloc(port->attr.pkey_tbl_len,
+ sizeof(*port->pkey_tbl), GFP_KERNEL);
+
+ if (!port->pkey_tbl)
+ return -ENOMEM;
+
+ port->pkey_tbl[0] = 0xffff;
+ port->port_guid = rxe->ifc_ops->port_guid(rxe);
+
+ spin_lock_init(&port->port_lock);
+
+ return 0;
+}
+
+/* init pools of managed objects */
+static int rxe_init_pools(struct rxe_dev *rxe)
+{
+ int err;
+
+ err = rxe_pool_init(rxe, &rxe->uc_pool, RXE_TYPE_UC,
+ rxe->max_ucontext);
+ if (err)
+ goto err1;
+
+ err = rxe_pool_init(rxe, &rxe->pd_pool, RXE_TYPE_PD,
+ rxe->attr.max_pd);
+ if (err)
+ goto err2;
+
+ err = rxe_pool_init(rxe, &rxe->ah_pool, RXE_TYPE_AH,
+ rxe->attr.max_ah);
+ if (err)
+ goto err3;
+
+ err = rxe_pool_init(rxe, &rxe->srq_pool, RXE_TYPE_SRQ,
+ rxe->attr.max_srq);
+ if (err)
+ goto err4;
+
+ err = rxe_pool_init(rxe, &rxe->qp_pool, RXE_TYPE_QP,
+ rxe->attr.max_qp);
+ if (err)
+ goto err5;
+
+ err = rxe_pool_init(rxe, &rxe->cq_pool, RXE_TYPE_CQ,
+ rxe->attr.max_cq);
+ if (err)
+ goto err6;
+
+ err = rxe_pool_init(rxe, &rxe->mr_pool, RXE_TYPE_MR,
+ rxe->attr.max_mr);
+ if (err)
+ goto err7;
+
+ err = rxe_pool_init(rxe, &rxe->mw_pool, RXE_TYPE_MW,
+ rxe->attr.max_mw);
+ if (err)
+ goto err8;
+
+ err = rxe_pool_init(rxe, &rxe->mc_grp_pool, RXE_TYPE_MC_GRP,
+ rxe->attr.max_mcast_grp);
+ if (err)
+ goto err9;
+
+ err = rxe_pool_init(rxe, &rxe->mc_elem_pool, RXE_TYPE_MC_ELEM,
+ rxe->attr.max_total_mcast_qp_attach);
+ if (err)
+ goto err10;
+
+ return 0;
+
+err10:
+ rxe_pool_cleanup(&rxe->mc_grp_pool);
+err9:
+ rxe_pool_cleanup(&rxe->mw_pool);
+err8:
+ rxe_pool_cleanup(&rxe->mr_pool);
+err7:
+ rxe_pool_cleanup(&rxe->cq_pool);
+err6:
+ rxe_pool_cleanup(&rxe->qp_pool);
+err5:
+ rxe_pool_cleanup(&rxe->srq_pool);
+err4:
+ rxe_pool_cleanup(&rxe->ah_pool);
+err3:
+ rxe_pool_cleanup(&rxe->pd_pool);
+err2:
+ rxe_pool_cleanup(&rxe->uc_pool);
+err1:
+ return err;
+}
+
+/* initialize rxe device state */
+static int rxe_init(struct rxe_dev *rxe)
+{
+ int err;
+
+ /* init default device parameters */
+ rxe_init_device_param(rxe);
+
+ err = rxe_init_ports(rxe);
+ if (err)
+ goto err1;
+
+ err = rxe_init_pools(rxe);
+ if (err)
+ goto err2;
+
+ /* init pending mmap list */
+ spin_lock_init(&rxe->mmap_offset_lock);
+ spin_lock_init(&rxe->pending_lock);
+ INIT_LIST_HEAD(&rxe->pending_mmaps);
+ INIT_LIST_HEAD(&rxe->list);
+
+ mutex_init(&rxe->usdev_lock);
+
+ return 0;
+
+err2:
+ rxe_cleanup_ports(rxe);
+err1:
+ return err;
+}
+
+int rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu)
+{
+ struct rxe_port *port = &rxe->port;
+ enum ib_mtu mtu;
+
+ mtu = eth_mtu_int_to_enum(ndev_mtu);
+
+ /* Make sure that new MTU in range */
+ mtu = mtu ? min_t(enum ib_mtu, mtu, RXE_PORT_MAX_MTU) : IB_MTU_256;
+
+ port->attr.active_mtu = mtu;
+ port->mtu_cap = ib_mtu_enum_to_int(mtu);
+
+ return 0;
+}
+EXPORT_SYMBOL(rxe_set_mtu);
+
+/* called by ifc layer to create new rxe device.
+ * The caller should allocate memory for rxe by calling ib_alloc_device.
+ */
+int rxe_add(struct rxe_dev *rxe, unsigned int mtu)
+{
+ int err;
+
+ kref_init(&rxe->ref_cnt);
+
+ err = rxe_init(rxe);
+ if (err)
+ goto err1;
+
+ err = rxe_set_mtu(rxe, mtu);
+ if (err)
+ goto err1;
+
+ err = rxe_register_device(rxe);
+ if (err)
+ goto err1;
+
+ return 0;
+
+err1:
+ rxe_dev_put(rxe);
+ return err;
+}
+EXPORT_SYMBOL(rxe_add);
+
+/* called by the ifc layer to remove a device */
+void rxe_remove(struct rxe_dev *rxe)
+{
+ rxe_unregister_device(rxe);
+
+ rxe_dev_put(rxe);
+}
+EXPORT_SYMBOL(rxe_remove);
+
+static int __init rxe_module_init(void)
+{
+ int err;
+
+ /* initialize slab caches for managed objects */
+ err = rxe_cache_init();
+ if (err) {
+ pr_err("rxe: unable to init object pools\n");
+ return err;
+ }
+
+ err = rxe_net_ipv4_init();
+ if (err) {
+ pr_err("rxe: unable to init ipv4 tunnel\n");
+ rxe_cache_exit();
+ goto exit;
+ }
+
+ err = rxe_net_ipv6_init();
+ if (err) {
+ pr_err("rxe: unable to init ipv6 tunnel\n");
+ rxe_cache_exit();
+ goto exit;
+ }
+
+ err = register_netdevice_notifier(&rxe_net_notifier);
+ if (err) {
+ pr_err("rxe: Failed to rigister netdev notifier\n");
+ goto exit;
+ }
+
+ pr_info("rxe: loaded\n");
+
+ return 0;
+
+exit:
+ rxe_release_udp_tunnel(recv_sockets.sk4);
+ rxe_release_udp_tunnel(recv_sockets.sk6);
+ return err;
+}
+
+static void __exit rxe_module_exit(void)
+{
+ rxe_remove_all();
+ rxe_net_exit();
+ rxe_cache_exit();
+
+ pr_info("rxe: unloaded\n");
+}
+
+module_init(rxe_module_init);
+module_exit(rxe_module_exit);
diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
new file mode 100644
index 000000000..12c71c549
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef RXE_H
+#define RXE_H
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/crc32.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_pack.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_cache.h>
+#include <rdma/ib_addr.h>
+
+#include "rxe_net.h"
+#include "rxe_opcode.h"
+#include "rxe_hdr.h"
+#include "rxe_param.h"
+#include "rxe_verbs.h"
+
+#define RXE_UVERBS_ABI_VERSION (1)
+
+#define IB_PHYS_STATE_LINK_UP (5)
+#define IB_PHYS_STATE_LINK_DOWN (3)
+
+#define RXE_ROCE_V2_SPORT (0xc000)
+
+int rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu);
+
+int rxe_add(struct rxe_dev *rxe, unsigned int mtu);
+void rxe_remove(struct rxe_dev *rxe);
+void rxe_remove_all(void);
+
+int rxe_rcv(struct sk_buff *skb);
+
+void rxe_dev_put(struct rxe_dev *rxe);
+struct rxe_dev *net_to_rxe(struct net_device *ndev);
+struct rxe_dev *get_rxe_by_name(const char* name);
+
+void rxe_port_up(struct rxe_dev *rxe);
+void rxe_port_down(struct rxe_dev *rxe);
+
+#endif /* RXE_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_av.c b/drivers/infiniband/sw/rxe/rxe_av.c
new file mode 100644
index 000000000..5c9474212
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_av.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "rxe.h"
+#include "rxe_loc.h"
+
+int rxe_av_chk_attr(struct rxe_dev *rxe, struct ib_ah_attr *attr)
+{
+ struct rxe_port *port;
+
+ if (attr->port_num != 1) {
+ pr_info("rxe: invalid port_num = %d\n", attr->port_num);
+ return -EINVAL;
+ }
+
+ port = &rxe->port;
+
+ if (attr->ah_flags & IB_AH_GRH) {
+ if (attr->grh.sgid_index > port->attr.gid_tbl_len) {
+ pr_info("rxe: invalid sgid index = %d\n",
+ attr->grh.sgid_index);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int rxe_av_from_attr(struct rxe_dev *rxe, u8 port_num,
+ struct rxe_av *av, struct ib_ah_attr *attr)
+{
+ memset(av, 0, sizeof(*av));
+ memcpy(&av->grh, &attr->grh, sizeof(attr->grh));
+ av->port_num = port_num;
+ return 0;
+}
+
+int rxe_av_to_attr(struct rxe_dev *rxe, struct rxe_av *av,
+ struct ib_ah_attr *attr)
+{
+ memcpy(&attr->grh, &av->grh, sizeof(av->grh));
+ attr->port_num = av->port_num;
+ return 0;
+}
+
+int rxe_av_fill_ip_info(struct rxe_dev *rxe,
+ struct rxe_av *av,
+ struct ib_ah_attr *attr,
+ struct ib_gid_attr *sgid_attr,
+ union ib_gid *sgid)
+{
+ rdma_gid2ip(&av->sgid_addr._sockaddr, sgid);
+ rdma_gid2ip(&av->dgid_addr._sockaddr, &attr->grh.dgid);
+ av->network_type = ib_gid_to_network_type(sgid_attr->gid_type, sgid);
+
+ return 0;
+}
+
+struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt)
+{
+ if (!pkt || !pkt->qp)
+ return NULL;
+
+ if (qp_type(pkt->qp) == IB_QPT_RC || qp_type(pkt->qp) == IB_QPT_UC)
+ return &pkt->qp->pri_av;
+
+ return (pkt->wqe) ? &pkt->wqe->av : NULL;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
new file mode 100644
index 000000000..1c59ef2c6
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -0,0 +1,747 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/skbuff.h>
+
+#include "rxe.h"
+#include "rxe_loc.h"
+#include "rxe_queue.h"
+#include "rxe_task.h"
+
+enum comp_state {
+ COMPST_GET_ACK,
+ COMPST_GET_WQE,
+ COMPST_COMP_WQE,
+ COMPST_COMP_ACK,
+ COMPST_CHECK_PSN,
+ COMPST_CHECK_ACK,
+ COMPST_READ,
+ COMPST_ATOMIC,
+ COMPST_WRITE_SEND,
+ COMPST_UPDATE_COMP,
+ COMPST_ERROR_RETRY,
+ COMPST_RNR_RETRY,
+ COMPST_ERROR,
+ COMPST_EXIT, /* We have an issue, and we want to rerun the completer */
+ COMPST_DONE, /* The completer finished successflly */
+};
+
+static char *comp_state_name[] = {
+ [COMPST_GET_ACK] = "GET ACK",
+ [COMPST_GET_WQE] = "GET WQE",
+ [COMPST_COMP_WQE] = "COMP WQE",
+ [COMPST_COMP_ACK] = "COMP ACK",
+ [COMPST_CHECK_PSN] = "CHECK PSN",
+ [COMPST_CHECK_ACK] = "CHECK ACK",
+ [COMPST_READ] = "READ",
+ [COMPST_ATOMIC] = "ATOMIC",
+ [COMPST_WRITE_SEND] = "WRITE/SEND",
+ [COMPST_UPDATE_COMP] = "UPDATE COMP",
+ [COMPST_ERROR_RETRY] = "ERROR RETRY",
+ [COMPST_RNR_RETRY] = "RNR RETRY",
+ [COMPST_ERROR] = "ERROR",
+ [COMPST_EXIT] = "EXIT",
+ [COMPST_DONE] = "DONE",
+};
+
+static unsigned long rnrnak_usec[32] = {
+ [IB_RNR_TIMER_655_36] = 655360,
+ [IB_RNR_TIMER_000_01] = 10,
+ [IB_RNR_TIMER_000_02] = 20,
+ [IB_RNR_TIMER_000_03] = 30,
+ [IB_RNR_TIMER_000_04] = 40,
+ [IB_RNR_TIMER_000_06] = 60,
+ [IB_RNR_TIMER_000_08] = 80,
+ [IB_RNR_TIMER_000_12] = 120,
+ [IB_RNR_TIMER_000_16] = 160,
+ [IB_RNR_TIMER_000_24] = 240,
+ [IB_RNR_TIMER_000_32] = 320,
+ [IB_RNR_TIMER_000_48] = 480,
+ [IB_RNR_TIMER_000_64] = 640,
+ [IB_RNR_TIMER_000_96] = 960,
+ [IB_RNR_TIMER_001_28] = 1280,
+ [IB_RNR_TIMER_001_92] = 1920,
+ [IB_RNR_TIMER_002_56] = 2560,
+ [IB_RNR_TIMER_003_84] = 3840,
+ [IB_RNR_TIMER_005_12] = 5120,
+ [IB_RNR_TIMER_007_68] = 7680,
+ [IB_RNR_TIMER_010_24] = 10240,
+ [IB_RNR_TIMER_015_36] = 15360,
+ [IB_RNR_TIMER_020_48] = 20480,
+ [IB_RNR_TIMER_030_72] = 30720,
+ [IB_RNR_TIMER_040_96] = 40960,
+ [IB_RNR_TIMER_061_44] = 61410,
+ [IB_RNR_TIMER_081_92] = 81920,
+ [IB_RNR_TIMER_122_88] = 122880,
+ [IB_RNR_TIMER_163_84] = 163840,
+ [IB_RNR_TIMER_245_76] = 245760,
+ [IB_RNR_TIMER_327_68] = 327680,
+ [IB_RNR_TIMER_491_52] = 491520,
+};
+
+static inline unsigned long rnrnak_jiffies(u8 timeout)
+{
+ return max_t(unsigned long,
+ usecs_to_jiffies(rnrnak_usec[timeout]), 1);
+}
+
+static enum ib_wc_opcode wr_to_wc_opcode(enum ib_wr_opcode opcode)
+{
+ switch (opcode) {
+ case IB_WR_RDMA_WRITE: return IB_WC_RDMA_WRITE;
+ case IB_WR_RDMA_WRITE_WITH_IMM: return IB_WC_RDMA_WRITE;
+ case IB_WR_SEND: return IB_WC_SEND;
+ case IB_WR_SEND_WITH_IMM: return IB_WC_SEND;
+ case IB_WR_RDMA_READ: return IB_WC_RDMA_READ;
+ case IB_WR_ATOMIC_CMP_AND_SWP: return IB_WC_COMP_SWAP;
+ case IB_WR_ATOMIC_FETCH_AND_ADD: return IB_WC_FETCH_ADD;
+ case IB_WR_LSO: return IB_WC_LSO;
+ case IB_WR_SEND_WITH_INV: return IB_WC_SEND;
+ case IB_WR_RDMA_READ_WITH_INV: return IB_WC_RDMA_READ;
+ case IB_WR_LOCAL_INV: return IB_WC_LOCAL_INV;
+ case IB_WR_REG_MR: return IB_WC_REG_MR;
+
+ default:
+ return 0xff;
+ }
+}
+
+void retransmit_timer(unsigned long data)
+{
+ struct rxe_qp *qp = (struct rxe_qp *)data;
+
+ if (qp->valid) {
+ qp->comp.timeout = 1;
+ rxe_run_task(&qp->comp.task, 1);
+ }
+}
+
+void rxe_comp_queue_pkt(struct rxe_dev *rxe, struct rxe_qp *qp,
+ struct sk_buff *skb)
+{
+ int must_sched;
+
+ skb_queue_tail(&qp->resp_pkts, skb);
+
+ must_sched = skb_queue_len(&qp->resp_pkts) > 1;
+ rxe_run_task(&qp->comp.task, must_sched);
+}
+
+static inline enum comp_state get_wqe(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt,
+ struct rxe_send_wqe **wqe_p)
+{
+ struct rxe_send_wqe *wqe;
+
+ /* we come here whether or not we found a response packet to see if
+ * there are any posted WQEs
+ */
+ wqe = queue_head(qp->sq.queue);
+ *wqe_p = wqe;
+
+ /* no WQE or requester has not started it yet */
+ if (!wqe || wqe->state == wqe_state_posted)
+ return pkt ? COMPST_DONE : COMPST_EXIT;
+
+ /* WQE does not require an ack */
+ if (wqe->state == wqe_state_done)
+ return COMPST_COMP_WQE;
+
+ /* WQE caused an error */
+ if (wqe->state == wqe_state_error)
+ return COMPST_ERROR;
+
+ /* we have a WQE, if we also have an ack check its PSN */
+ return pkt ? COMPST_CHECK_PSN : COMPST_EXIT;
+}
+
+static inline void reset_retry_counters(struct rxe_qp *qp)
+{
+ qp->comp.retry_cnt = qp->attr.retry_cnt;
+ qp->comp.rnr_retry = qp->attr.rnr_retry;
+}
+
+static inline enum comp_state check_psn(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt,
+ struct rxe_send_wqe *wqe)
+{
+ s32 diff;
+
+ /* check to see if response is past the oldest WQE. if it is, complete
+ * send/write or error read/atomic
+ */
+ diff = psn_compare(pkt->psn, wqe->last_psn);
+ if (diff > 0) {
+ if (wqe->state == wqe_state_pending) {
+ if (wqe->mask & WR_ATOMIC_OR_READ_MASK)
+ return COMPST_ERROR_RETRY;
+
+ reset_retry_counters(qp);
+ return COMPST_COMP_WQE;
+ } else {
+ return COMPST_DONE;
+ }
+ }
+
+ /* compare response packet to expected response */
+ diff = psn_compare(pkt->psn, qp->comp.psn);
+ if (diff < 0) {
+ /* response is most likely a retried packet if it matches an
+ * uncompleted WQE go complete it else ignore it
+ */
+ if (pkt->psn == wqe->last_psn)
+ return COMPST_COMP_ACK;
+ else
+ return COMPST_DONE;
+ } else if ((diff > 0) && (wqe->mask & WR_ATOMIC_OR_READ_MASK)) {
+ return COMPST_ERROR_RETRY;
+ } else {
+ return COMPST_CHECK_ACK;
+ }
+}
+
+static inline enum comp_state check_ack(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt,
+ struct rxe_send_wqe *wqe)
+{
+ unsigned int mask = pkt->mask;
+ u8 syn;
+
+ /* Check the sequence only */
+ switch (qp->comp.opcode) {
+ case -1:
+ /* Will catch all *_ONLY cases. */
+ if (!(mask & RXE_START_MASK))
+ return COMPST_ERROR;
+
+ break;
+
+ case IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST:
+ case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE:
+ if (pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE &&
+ pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST) {
+ return COMPST_ERROR;
+ }
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ /* Check operation validity. */
+ switch (pkt->opcode) {
+ case IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST:
+ case IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST:
+ case IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY:
+ syn = aeth_syn(pkt);
+
+ if ((syn & AETH_TYPE_MASK) != AETH_ACK)
+ return COMPST_ERROR;
+
+ /* Fall through (IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE
+ * doesn't have an AETH)
+ */
+ case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE:
+ if (wqe->wr.opcode != IB_WR_RDMA_READ &&
+ wqe->wr.opcode != IB_WR_RDMA_READ_WITH_INV) {
+ return COMPST_ERROR;
+ }
+ reset_retry_counters(qp);
+ return COMPST_READ;
+
+ case IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE:
+ syn = aeth_syn(pkt);
+
+ if ((syn & AETH_TYPE_MASK) != AETH_ACK)
+ return COMPST_ERROR;
+
+ if (wqe->wr.opcode != IB_WR_ATOMIC_CMP_AND_SWP &&
+ wqe->wr.opcode != IB_WR_ATOMIC_FETCH_AND_ADD)
+ return COMPST_ERROR;
+ reset_retry_counters(qp);
+ return COMPST_ATOMIC;
+
+ case IB_OPCODE_RC_ACKNOWLEDGE:
+ syn = aeth_syn(pkt);
+ switch (syn & AETH_TYPE_MASK) {
+ case AETH_ACK:
+ reset_retry_counters(qp);
+ return COMPST_WRITE_SEND;
+
+ case AETH_RNR_NAK:
+ return COMPST_RNR_RETRY;
+
+ case AETH_NAK:
+ switch (syn) {
+ case AETH_NAK_PSN_SEQ_ERROR:
+ /* a nak implicitly acks all packets with psns
+ * before
+ */
+ if (psn_compare(pkt->psn, qp->comp.psn) > 0) {
+ qp->comp.psn = pkt->psn;
+ if (qp->req.wait_psn) {
+ qp->req.wait_psn = 0;
+ rxe_run_task(&qp->req.task, 1);
+ }
+ }
+ return COMPST_ERROR_RETRY;
+
+ case AETH_NAK_INVALID_REQ:
+ wqe->status = IB_WC_REM_INV_REQ_ERR;
+ return COMPST_ERROR;
+
+ case AETH_NAK_REM_ACC_ERR:
+ wqe->status = IB_WC_REM_ACCESS_ERR;
+ return COMPST_ERROR;
+
+ case AETH_NAK_REM_OP_ERR:
+ wqe->status = IB_WC_REM_OP_ERR;
+ return COMPST_ERROR;
+
+ default:
+ pr_warn("unexpected nak %x\n", syn);
+ wqe->status = IB_WC_REM_OP_ERR;
+ return COMPST_ERROR;
+ }
+
+ default:
+ return COMPST_ERROR;
+ }
+ break;
+
+ default:
+ pr_warn("unexpected opcode\n");
+ }
+
+ return COMPST_ERROR;
+}
+
+static inline enum comp_state do_read(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt,
+ struct rxe_send_wqe *wqe)
+{
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ int ret;
+
+ ret = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE,
+ &wqe->dma, payload_addr(pkt),
+ payload_size(pkt), to_mem_obj, NULL);
+ if (ret)
+ return COMPST_ERROR;
+
+ if (wqe->dma.resid == 0 && (pkt->mask & RXE_END_MASK))
+ return COMPST_COMP_ACK;
+ else
+ return COMPST_UPDATE_COMP;
+}
+
+static inline enum comp_state do_atomic(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt,
+ struct rxe_send_wqe *wqe)
+{
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ int ret;
+
+ u64 atomic_orig = atmack_orig(pkt);
+
+ ret = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE,
+ &wqe->dma, &atomic_orig,
+ sizeof(u64), to_mem_obj, NULL);
+ if (ret)
+ return COMPST_ERROR;
+ else
+ return COMPST_COMP_ACK;
+}
+
+static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
+ struct rxe_cqe *cqe)
+{
+ memset(cqe, 0, sizeof(*cqe));
+
+ if (!qp->is_user) {
+ struct ib_wc *wc = &cqe->ibwc;
+
+ wc->wr_id = wqe->wr.wr_id;
+ wc->status = wqe->status;
+ wc->opcode = wr_to_wc_opcode(wqe->wr.opcode);
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
+ wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
+ wc->wc_flags = IB_WC_WITH_IMM;
+ wc->byte_len = wqe->dma.length;
+ wc->qp = &qp->ibqp;
+ } else {
+ struct ib_uverbs_wc *uwc = &cqe->uibwc;
+
+ uwc->wr_id = wqe->wr.wr_id;
+ uwc->status = wqe->status;
+ uwc->opcode = wr_to_wc_opcode(wqe->wr.opcode);
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
+ wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
+ uwc->wc_flags = IB_WC_WITH_IMM;
+ uwc->byte_len = wqe->dma.length;
+ uwc->qp_num = qp->ibqp.qp_num;
+ }
+}
+
+static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
+{
+ struct rxe_cqe cqe;
+
+ if ((qp->sq_sig_type == IB_SIGNAL_ALL_WR) ||
+ (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
+ (qp->req.state == QP_STATE_ERROR)) {
+ make_send_cqe(qp, wqe, &cqe);
+ rxe_cq_post(qp->scq, &cqe, 0);
+ }
+
+ advance_consumer(qp->sq.queue);
+
+ /*
+ * we completed something so let req run again
+ * if it is trying to fence
+ */
+ if (qp->req.wait_fence) {
+ qp->req.wait_fence = 0;
+ rxe_run_task(&qp->req.task, 1);
+ }
+}
+
+static inline enum comp_state complete_ack(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt,
+ struct rxe_send_wqe *wqe)
+{
+ unsigned long flags;
+
+ if (wqe->has_rd_atomic) {
+ wqe->has_rd_atomic = 0;
+ atomic_inc(&qp->req.rd_atomic);
+ if (qp->req.need_rd_atomic) {
+ qp->comp.timeout_retry = 0;
+ qp->req.need_rd_atomic = 0;
+ rxe_run_task(&qp->req.task, 1);
+ }
+ }
+
+ if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
+ /* state_lock used by requester & completer */
+ spin_lock_irqsave(&qp->state_lock, flags);
+ if ((qp->req.state == QP_STATE_DRAIN) &&
+ (qp->comp.psn == qp->req.psn)) {
+ qp->req.state = QP_STATE_DRAINED;
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+
+ if (qp->ibqp.event_handler) {
+ struct ib_event ev;
+
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_SQ_DRAINED;
+ qp->ibqp.event_handler(&ev,
+ qp->ibqp.qp_context);
+ }
+ } else {
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+ }
+ }
+
+ do_complete(qp, wqe);
+
+ if (psn_compare(pkt->psn, qp->comp.psn) >= 0)
+ return COMPST_UPDATE_COMP;
+ else
+ return COMPST_DONE;
+}
+
+static inline enum comp_state complete_wqe(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt,
+ struct rxe_send_wqe *wqe)
+{
+ qp->comp.opcode = -1;
+
+ if (pkt) {
+ if (psn_compare(pkt->psn, qp->comp.psn) >= 0)
+ qp->comp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
+
+ if (qp->req.wait_psn) {
+ qp->req.wait_psn = 0;
+ rxe_run_task(&qp->req.task, 1);
+ }
+ }
+
+ do_complete(qp, wqe);
+
+ return COMPST_GET_WQE;
+}
+
+int rxe_completer(void *arg)
+{
+ struct rxe_qp *qp = (struct rxe_qp *)arg;
+ struct rxe_send_wqe *wqe = wqe;
+ struct sk_buff *skb = NULL;
+ struct rxe_pkt_info *pkt = NULL;
+ enum comp_state state;
+
+ if (!qp->valid) {
+ while ((skb = skb_dequeue(&qp->resp_pkts))) {
+ rxe_drop_ref(qp);
+ kfree_skb(skb);
+ }
+ skb = NULL;
+ pkt = NULL;
+
+ while (queue_head(qp->sq.queue))
+ advance_consumer(qp->sq.queue);
+
+ goto exit;
+ }
+
+ if (qp->req.state == QP_STATE_ERROR) {
+ while ((skb = skb_dequeue(&qp->resp_pkts))) {
+ rxe_drop_ref(qp);
+ kfree_skb(skb);
+ }
+ skb = NULL;
+ pkt = NULL;
+
+ while ((wqe = queue_head(qp->sq.queue))) {
+ wqe->status = IB_WC_WR_FLUSH_ERR;
+ do_complete(qp, wqe);
+ }
+
+ goto exit;
+ }
+
+ if (qp->req.state == QP_STATE_RESET) {
+ while ((skb = skb_dequeue(&qp->resp_pkts))) {
+ rxe_drop_ref(qp);
+ kfree_skb(skb);
+ }
+ skb = NULL;
+ pkt = NULL;
+
+ while (queue_head(qp->sq.queue))
+ advance_consumer(qp->sq.queue);
+
+ goto exit;
+ }
+
+ if (qp->comp.timeout) {
+ qp->comp.timeout_retry = 1;
+ qp->comp.timeout = 0;
+ } else {
+ qp->comp.timeout_retry = 0;
+ }
+
+ if (qp->req.need_retry)
+ goto exit;
+
+ state = COMPST_GET_ACK;
+
+ while (1) {
+ pr_debug("state = %s\n", comp_state_name[state]);
+ switch (state) {
+ case COMPST_GET_ACK:
+ skb = skb_dequeue(&qp->resp_pkts);
+ if (skb) {
+ pkt = SKB_TO_PKT(skb);
+ qp->comp.timeout_retry = 0;
+ }
+ state = COMPST_GET_WQE;
+ break;
+
+ case COMPST_GET_WQE:
+ state = get_wqe(qp, pkt, &wqe);
+ break;
+
+ case COMPST_CHECK_PSN:
+ state = check_psn(qp, pkt, wqe);
+ break;
+
+ case COMPST_CHECK_ACK:
+ state = check_ack(qp, pkt, wqe);
+ break;
+
+ case COMPST_READ:
+ state = do_read(qp, pkt, wqe);
+ break;
+
+ case COMPST_ATOMIC:
+ state = do_atomic(qp, pkt, wqe);
+ break;
+
+ case COMPST_WRITE_SEND:
+ if (wqe->state == wqe_state_pending &&
+ wqe->last_psn == pkt->psn)
+ state = COMPST_COMP_ACK;
+ else
+ state = COMPST_UPDATE_COMP;
+ break;
+
+ case COMPST_COMP_ACK:
+ state = complete_ack(qp, pkt, wqe);
+ break;
+
+ case COMPST_COMP_WQE:
+ state = complete_wqe(qp, pkt, wqe);
+ break;
+
+ case COMPST_UPDATE_COMP:
+ if (pkt->mask & RXE_END_MASK)
+ qp->comp.opcode = -1;
+ else
+ qp->comp.opcode = pkt->opcode;
+
+ if (psn_compare(pkt->psn, qp->comp.psn) >= 0)
+ qp->comp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
+
+ if (qp->req.wait_psn) {
+ qp->req.wait_psn = 0;
+ rxe_run_task(&qp->req.task, 1);
+ }
+
+ state = COMPST_DONE;
+ break;
+
+ case COMPST_DONE:
+ if (pkt) {
+ rxe_drop_ref(pkt->qp);
+ kfree_skb(skb);
+ }
+ goto done;
+
+ case COMPST_EXIT:
+ if (qp->comp.timeout_retry && wqe) {
+ state = COMPST_ERROR_RETRY;
+ break;
+ }
+
+ /* re reset the timeout counter if
+ * (1) QP is type RC
+ * (2) the QP is alive
+ * (3) there is a packet sent by the requester that
+ * might be acked (we still might get spurious
+ * timeouts but try to keep them as few as possible)
+ * (4) the timeout parameter is set
+ */
+ if ((qp_type(qp) == IB_QPT_RC) &&
+ (qp->req.state == QP_STATE_READY) &&
+ (psn_compare(qp->req.psn, qp->comp.psn) > 0) &&
+ qp->qp_timeout_jiffies)
+ mod_timer(&qp->retrans_timer,
+ jiffies + qp->qp_timeout_jiffies);
+ goto exit;
+
+ case COMPST_ERROR_RETRY:
+ /* we come here if the retry timer fired and we did
+ * not receive a response packet. try to retry the send
+ * queue if that makes sense and the limits have not
+ * been exceeded. remember that some timeouts are
+ * spurious since we do not reset the timer but kick
+ * it down the road or let it expire
+ */
+
+ /* there is nothing to retry in this case */
+ if (!wqe || (wqe->state == wqe_state_posted))
+ goto exit;
+
+ if (qp->comp.retry_cnt > 0) {
+ if (qp->comp.retry_cnt != 7)
+ qp->comp.retry_cnt--;
+
+ /* no point in retrying if we have already
+ * seen the last ack that the requester could
+ * have caused
+ */
+ if (psn_compare(qp->req.psn,
+ qp->comp.psn) > 0) {
+ /* tell the requester to retry the
+ * send send queue next time around
+ */
+ qp->req.need_retry = 1;
+ rxe_run_task(&qp->req.task, 1);
+ }
+
+ if (pkt) {
+ rxe_drop_ref(pkt->qp);
+ kfree_skb(skb);
+ }
+
+ goto exit;
+
+ } else {
+ wqe->status = IB_WC_RETRY_EXC_ERR;
+ state = COMPST_ERROR;
+ }
+ break;
+
+ case COMPST_RNR_RETRY:
+ if (qp->comp.rnr_retry > 0) {
+ if (qp->comp.rnr_retry != 7)
+ qp->comp.rnr_retry--;
+
+ qp->req.need_retry = 1;
+ pr_debug("set rnr nak timer\n");
+ mod_timer(&qp->rnr_nak_timer,
+ jiffies + rnrnak_jiffies(aeth_syn(pkt)
+ & ~AETH_TYPE_MASK));
+ goto exit;
+ } else {
+ wqe->status = IB_WC_RNR_RETRY_EXC_ERR;
+ state = COMPST_ERROR;
+ }
+ break;
+
+ case COMPST_ERROR:
+ do_complete(qp, wqe);
+ rxe_qp_error(qp);
+
+ if (pkt) {
+ rxe_drop_ref(pkt->qp);
+ kfree_skb(skb);
+ }
+
+ goto exit;
+ }
+ }
+
+exit:
+ /* we come here if we are done with processing and want the task to
+ * exit from the loop calling us
+ */
+ return -EAGAIN;
+
+done:
+ /* we come here if we have processed a packet we want the task to call
+ * us again to see if there is anything else to do
+ */
+ return 0;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
new file mode 100644
index 000000000..e5e6a5e7d
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_cq.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "rxe.h"
+#include "rxe_loc.h"
+#include "rxe_queue.h"
+
+int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
+ int cqe, int comp_vector, struct ib_udata *udata)
+{
+ int count;
+
+ if (cqe <= 0) {
+ pr_warn("cqe(%d) <= 0\n", cqe);
+ goto err1;
+ }
+
+ if (cqe > rxe->attr.max_cqe) {
+ pr_warn("cqe(%d) > max_cqe(%d)\n",
+ cqe, rxe->attr.max_cqe);
+ goto err1;
+ }
+
+ if (cq) {
+ count = queue_count(cq->queue);
+ if (cqe < count) {
+ pr_warn("cqe(%d) < current # elements in queue (%d)",
+ cqe, count);
+ goto err1;
+ }
+ }
+
+ return 0;
+
+err1:
+ return -EINVAL;
+}
+
+static void rxe_send_complete(unsigned long data)
+{
+ struct rxe_cq *cq = (struct rxe_cq *)data;
+
+ cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+}
+
+int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
+ int comp_vector, struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ int err;
+
+ cq->queue = rxe_queue_init(rxe, &cqe,
+ sizeof(struct rxe_cqe));
+ if (!cq->queue) {
+ pr_warn("unable to create cq\n");
+ return -ENOMEM;
+ }
+
+ err = do_mmap_info(rxe, udata, false, context, cq->queue->buf,
+ cq->queue->buf_size, &cq->queue->ip);
+ if (err) {
+ kvfree(cq->queue->buf);
+ kfree(cq->queue);
+ return err;
+ }
+
+ if (udata)
+ cq->is_user = 1;
+
+ tasklet_init(&cq->comp_task, rxe_send_complete, (unsigned long)cq);
+
+ spin_lock_init(&cq->cq_lock);
+ cq->ibcq.cqe = cqe;
+ return 0;
+}
+
+int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe, struct ib_udata *udata)
+{
+ int err;
+
+ err = rxe_queue_resize(cq->queue, (unsigned int *)&cqe,
+ sizeof(struct rxe_cqe),
+ cq->queue->ip ? cq->queue->ip->context : NULL,
+ udata, NULL, &cq->cq_lock);
+ if (!err)
+ cq->ibcq.cqe = cqe;
+
+ return err;
+}
+
+int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
+{
+ struct ib_event ev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cq->cq_lock, flags);
+
+ if (unlikely(queue_full(cq->queue))) {
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+ if (cq->ibcq.event_handler) {
+ ev.device = cq->ibcq.device;
+ ev.element.cq = &cq->ibcq;
+ ev.event = IB_EVENT_CQ_ERR;
+ cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
+ }
+
+ return -EBUSY;
+ }
+
+ memcpy(producer_addr(cq->queue), cqe, sizeof(*cqe));
+
+ /* make sure all changes to the CQ are written before we update the
+ * producer pointer
+ */
+ smp_wmb();
+
+ advance_producer(cq->queue);
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+
+ if ((cq->notify == IB_CQ_NEXT_COMP) ||
+ (cq->notify == IB_CQ_SOLICITED && solicited)) {
+ cq->notify = 0;
+ tasklet_schedule(&cq->comp_task);
+ }
+
+ return 0;
+}
+
+void rxe_cq_cleanup(void *arg)
+{
+ struct rxe_cq *cq = arg;
+
+ if (cq->queue)
+ rxe_queue_cleanup(cq->queue);
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_dma.c b/drivers/infiniband/sw/rxe/rxe_dma.c
new file mode 100644
index 000000000..7634c1a81
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_dma.c
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "rxe.h"
+#include "rxe_loc.h"
+
+#define DMA_BAD_ADDER ((u64)0)
+
+static int rxe_mapping_error(struct ib_device *dev, u64 dma_addr)
+{
+ return dma_addr == DMA_BAD_ADDER;
+}
+
+static u64 rxe_dma_map_single(struct ib_device *dev,
+ void *cpu_addr, size_t size,
+ enum dma_data_direction direction)
+{
+ WARN_ON(!valid_dma_direction(direction));
+ return (uintptr_t)cpu_addr;
+}
+
+static void rxe_dma_unmap_single(struct ib_device *dev,
+ u64 addr, size_t size,
+ enum dma_data_direction direction)
+{
+ WARN_ON(!valid_dma_direction(direction));
+}
+
+static u64 rxe_dma_map_page(struct ib_device *dev,
+ struct page *page,
+ unsigned long offset,
+ size_t size, enum dma_data_direction direction)
+{
+ u64 addr;
+
+ WARN_ON(!valid_dma_direction(direction));
+
+ if (offset + size > PAGE_SIZE) {
+ addr = DMA_BAD_ADDER;
+ goto done;
+ }
+
+ addr = (uintptr_t)page_address(page);
+ if (addr)
+ addr += offset;
+
+done:
+ return addr;
+}
+
+static void rxe_dma_unmap_page(struct ib_device *dev,
+ u64 addr, size_t size,
+ enum dma_data_direction direction)
+{
+ WARN_ON(!valid_dma_direction(direction));
+}
+
+static int rxe_map_sg(struct ib_device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction direction)
+{
+ struct scatterlist *sg;
+ u64 addr;
+ int i;
+ int ret = nents;
+
+ WARN_ON(!valid_dma_direction(direction));
+
+ for_each_sg(sgl, sg, nents, i) {
+ addr = (uintptr_t)page_address(sg_page(sg));
+ if (!addr) {
+ ret = 0;
+ break;
+ }
+ sg->dma_address = addr + sg->offset;
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ sg->dma_length = sg->length;
+#endif
+ }
+
+ return ret;
+}
+
+static void rxe_unmap_sg(struct ib_device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction direction)
+{
+ WARN_ON(!valid_dma_direction(direction));
+}
+
+static void rxe_sync_single_for_cpu(struct ib_device *dev,
+ u64 addr,
+ size_t size, enum dma_data_direction dir)
+{
+}
+
+static void rxe_sync_single_for_device(struct ib_device *dev,
+ u64 addr,
+ size_t size, enum dma_data_direction dir)
+{
+}
+
+static void *rxe_dma_alloc_coherent(struct ib_device *dev, size_t size,
+ u64 *dma_handle, gfp_t flag)
+{
+ struct page *p;
+ void *addr = NULL;
+
+ p = alloc_pages(flag, get_order(size));
+ if (p)
+ addr = page_address(p);
+
+ if (dma_handle)
+ *dma_handle = (uintptr_t)addr;
+
+ return addr;
+}
+
+static void rxe_dma_free_coherent(struct ib_device *dev, size_t size,
+ void *cpu_addr, u64 dma_handle)
+{
+ free_pages((unsigned long)cpu_addr, get_order(size));
+}
+
+struct ib_dma_mapping_ops rxe_dma_mapping_ops = {
+ .mapping_error = rxe_mapping_error,
+ .map_single = rxe_dma_map_single,
+ .unmap_single = rxe_dma_unmap_single,
+ .map_page = rxe_dma_map_page,
+ .unmap_page = rxe_dma_unmap_page,
+ .map_sg = rxe_map_sg,
+ .unmap_sg = rxe_unmap_sg,
+ .sync_single_for_cpu = rxe_sync_single_for_cpu,
+ .sync_single_for_device = rxe_sync_single_for_device,
+ .alloc_coherent = rxe_dma_alloc_coherent,
+ .free_coherent = rxe_dma_free_coherent
+};
diff --git a/drivers/infiniband/sw/rxe/rxe_hdr.h b/drivers/infiniband/sw/rxe/rxe_hdr.h
new file mode 100644
index 000000000..d57b5e956
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_hdr.h
@@ -0,0 +1,952 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef RXE_HDR_H
+#define RXE_HDR_H
+
+/* extracted information about a packet carried in an sk_buff struct fits in
+ * the skbuff cb array. Must be at most 48 bytes. stored in control block of
+ * sk_buff for received packets.
+ */
+struct rxe_pkt_info {
+ struct rxe_dev *rxe; /* device that owns packet */
+ struct rxe_qp *qp; /* qp that owns packet */
+ struct rxe_send_wqe *wqe; /* send wqe */
+ u8 *hdr; /* points to bth */
+ u32 mask; /* useful info about pkt */
+ u32 psn; /* bth psn of packet */
+ u16 pkey_index; /* partition of pkt */
+ u16 paylen; /* length of bth - icrc */
+ u8 port_num; /* port pkt received on */
+ u8 opcode; /* bth opcode of packet */
+ u8 offset; /* bth offset from pkt->hdr */
+};
+
+/* Macros should be used only for received skb */
+#define SKB_TO_PKT(skb) ((struct rxe_pkt_info *)(skb)->cb)
+#define PKT_TO_SKB(pkt) container_of((void *)(pkt), struct sk_buff, cb)
+
+/*
+ * IBA header types and methods
+ *
+ * Some of these are for reference and completeness only since
+ * rxe does not currently support RD transport
+ * most of this could be moved into IB core. ib_pack.h has
+ * part of this but is incomplete
+ *
+ * Header specific routines to insert/extract values to/from headers
+ * the routines that are named __hhh_(set_)fff() take a pointer to a
+ * hhh header and get(set) the fff field. The routines named
+ * hhh_(set_)fff take a packet info struct and find the
+ * header and field based on the opcode in the packet.
+ * Conversion to/from network byte order from cpu order is also done.
+ */
+
+#define RXE_ICRC_SIZE (4)
+#define RXE_MAX_HDR_LENGTH (80)
+
+/******************************************************************************
+ * Base Transport Header
+ ******************************************************************************/
+struct rxe_bth {
+ u8 opcode;
+ u8 flags;
+ __be16 pkey;
+ __be32 qpn;
+ __be32 apsn;
+};
+
+#define BTH_TVER (0)
+#define BTH_DEF_PKEY (0xffff)
+
+#define BTH_SE_MASK (0x80)
+#define BTH_MIG_MASK (0x40)
+#define BTH_PAD_MASK (0x30)
+#define BTH_TVER_MASK (0x0f)
+#define BTH_FECN_MASK (0x80000000)
+#define BTH_BECN_MASK (0x40000000)
+#define BTH_RESV6A_MASK (0x3f000000)
+#define BTH_QPN_MASK (0x00ffffff)
+#define BTH_ACK_MASK (0x80000000)
+#define BTH_RESV7_MASK (0x7f000000)
+#define BTH_PSN_MASK (0x00ffffff)
+
+static inline u8 __bth_opcode(void *arg)
+{
+ struct rxe_bth *bth = arg;
+
+ return bth->opcode;
+}
+
+static inline void __bth_set_opcode(void *arg, u8 opcode)
+{
+ struct rxe_bth *bth = arg;
+
+ bth->opcode = opcode;
+}
+
+static inline u8 __bth_se(void *arg)
+{
+ struct rxe_bth *bth = arg;
+
+ return 0 != (BTH_SE_MASK & bth->flags);
+}
+
+static inline void __bth_set_se(void *arg, int se)
+{
+ struct rxe_bth *bth = arg;
+
+ if (se)
+ bth->flags |= BTH_SE_MASK;
+ else
+ bth->flags &= ~BTH_SE_MASK;
+}
+
+static inline u8 __bth_mig(void *arg)
+{
+ struct rxe_bth *bth = arg;
+
+ return 0 != (BTH_MIG_MASK & bth->flags);
+}
+
+static inline void __bth_set_mig(void *arg, u8 mig)
+{
+ struct rxe_bth *bth = arg;
+
+ if (mig)
+ bth->flags |= BTH_MIG_MASK;
+ else
+ bth->flags &= ~BTH_MIG_MASK;
+}
+
+static inline u8 __bth_pad(void *arg)
+{
+ struct rxe_bth *bth = arg;
+
+ return (BTH_PAD_MASK & bth->flags) >> 4;
+}
+
+static inline void __bth_set_pad(void *arg, u8 pad)
+{
+ struct rxe_bth *bth = arg;
+
+ bth->flags = (BTH_PAD_MASK & (pad << 4)) |
+ (~BTH_PAD_MASK & bth->flags);
+}
+
+static inline u8 __bth_tver(void *arg)
+{
+ struct rxe_bth *bth = arg;
+
+ return BTH_TVER_MASK & bth->flags;
+}
+
+static inline void __bth_set_tver(void *arg, u8 tver)
+{
+ struct rxe_bth *bth = arg;
+
+ bth->flags = (BTH_TVER_MASK & tver) |
+ (~BTH_TVER_MASK & bth->flags);
+}
+
+static inline u16 __bth_pkey(void *arg)
+{
+ struct rxe_bth *bth = arg;
+
+ return be16_to_cpu(bth->pkey);
+}
+
+static inline void __bth_set_pkey(void *arg, u16 pkey)
+{
+ struct rxe_bth *bth = arg;
+
+ bth->pkey = cpu_to_be16(pkey);
+}
+
+static inline u32 __bth_qpn(void *arg)
+{
+ struct rxe_bth *bth = arg;
+
+ return BTH_QPN_MASK & be32_to_cpu(bth->qpn);
+}
+
+static inline void __bth_set_qpn(void *arg, u32 qpn)
+{
+ struct rxe_bth *bth = arg;
+ u32 resvqpn = be32_to_cpu(bth->qpn);
+
+ bth->qpn = cpu_to_be32((BTH_QPN_MASK & qpn) |
+ (~BTH_QPN_MASK & resvqpn));
+}
+
+static inline int __bth_fecn(void *arg)
+{
+ struct rxe_bth *bth = arg;
+
+ return 0 != (cpu_to_be32(BTH_FECN_MASK) & bth->qpn);
+}
+
+static inline void __bth_set_fecn(void *arg, int fecn)
+{
+ struct rxe_bth *bth = arg;
+
+ if (fecn)
+ bth->qpn |= cpu_to_be32(BTH_FECN_MASK);
+ else
+ bth->qpn &= ~cpu_to_be32(BTH_FECN_MASK);
+}
+
+static inline int __bth_becn(void *arg)
+{
+ struct rxe_bth *bth = arg;
+
+ return 0 != (cpu_to_be32(BTH_BECN_MASK) & bth->qpn);
+}
+
+static inline void __bth_set_becn(void *arg, int becn)
+{
+ struct rxe_bth *bth = arg;
+
+ if (becn)
+ bth->qpn |= cpu_to_be32(BTH_BECN_MASK);
+ else
+ bth->qpn &= ~cpu_to_be32(BTH_BECN_MASK);
+}
+
+static inline u8 __bth_resv6a(void *arg)
+{
+ struct rxe_bth *bth = arg;
+
+ return (BTH_RESV6A_MASK & be32_to_cpu(bth->qpn)) >> 24;
+}
+
+static inline void __bth_set_resv6a(void *arg)
+{
+ struct rxe_bth *bth = arg;
+
+ bth->qpn = cpu_to_be32(~BTH_RESV6A_MASK);
+}
+
+static inline int __bth_ack(void *arg)
+{
+ struct rxe_bth *bth = arg;
+
+ return 0 != (cpu_to_be32(BTH_ACK_MASK) & bth->apsn);
+}
+
+static inline void __bth_set_ack(void *arg, int ack)
+{
+ struct rxe_bth *bth = arg;
+
+ if (ack)
+ bth->apsn |= cpu_to_be32(BTH_ACK_MASK);
+ else
+ bth->apsn &= ~cpu_to_be32(BTH_ACK_MASK);
+}
+
+static inline void __bth_set_resv7(void *arg)
+{
+ struct rxe_bth *bth = arg;
+
+ bth->apsn &= ~cpu_to_be32(BTH_RESV7_MASK);
+}
+
+static inline u32 __bth_psn(void *arg)
+{
+ struct rxe_bth *bth = arg;
+
+ return BTH_PSN_MASK & be32_to_cpu(bth->apsn);
+}
+
+static inline void __bth_set_psn(void *arg, u32 psn)
+{
+ struct rxe_bth *bth = arg;
+ u32 apsn = be32_to_cpu(bth->apsn);
+
+ bth->apsn = cpu_to_be32((BTH_PSN_MASK & psn) |
+ (~BTH_PSN_MASK & apsn));
+}
+
+static inline u8 bth_opcode(struct rxe_pkt_info *pkt)
+{
+ return __bth_opcode(pkt->hdr + pkt->offset);
+}
+
+static inline void bth_set_opcode(struct rxe_pkt_info *pkt, u8 opcode)
+{
+ __bth_set_opcode(pkt->hdr + pkt->offset, opcode);
+}
+
+static inline u8 bth_se(struct rxe_pkt_info *pkt)
+{
+ return __bth_se(pkt->hdr + pkt->offset);
+}
+
+static inline void bth_set_se(struct rxe_pkt_info *pkt, int se)
+{
+ __bth_set_se(pkt->hdr + pkt->offset, se);
+}
+
+static inline u8 bth_mig(struct rxe_pkt_info *pkt)
+{
+ return __bth_mig(pkt->hdr + pkt->offset);
+}
+
+static inline void bth_set_mig(struct rxe_pkt_info *pkt, u8 mig)
+{
+ __bth_set_mig(pkt->hdr + pkt->offset, mig);
+}
+
+static inline u8 bth_pad(struct rxe_pkt_info *pkt)
+{
+ return __bth_pad(pkt->hdr + pkt->offset);
+}
+
+static inline void bth_set_pad(struct rxe_pkt_info *pkt, u8 pad)
+{
+ __bth_set_pad(pkt->hdr + pkt->offset, pad);
+}
+
+static inline u8 bth_tver(struct rxe_pkt_info *pkt)
+{
+ return __bth_tver(pkt->hdr + pkt->offset);
+}
+
+static inline void bth_set_tver(struct rxe_pkt_info *pkt, u8 tver)
+{
+ __bth_set_tver(pkt->hdr + pkt->offset, tver);
+}
+
+static inline u16 bth_pkey(struct rxe_pkt_info *pkt)
+{
+ return __bth_pkey(pkt->hdr + pkt->offset);
+}
+
+static inline void bth_set_pkey(struct rxe_pkt_info *pkt, u16 pkey)
+{
+ __bth_set_pkey(pkt->hdr + pkt->offset, pkey);
+}
+
+static inline u32 bth_qpn(struct rxe_pkt_info *pkt)
+{
+ return __bth_qpn(pkt->hdr + pkt->offset);
+}
+
+static inline void bth_set_qpn(struct rxe_pkt_info *pkt, u32 qpn)
+{
+ __bth_set_qpn(pkt->hdr + pkt->offset, qpn);
+}
+
+static inline int bth_fecn(struct rxe_pkt_info *pkt)
+{
+ return __bth_fecn(pkt->hdr + pkt->offset);
+}
+
+static inline void bth_set_fecn(struct rxe_pkt_info *pkt, int fecn)
+{
+ __bth_set_fecn(pkt->hdr + pkt->offset, fecn);
+}
+
+static inline int bth_becn(struct rxe_pkt_info *pkt)
+{
+ return __bth_becn(pkt->hdr + pkt->offset);
+}
+
+static inline void bth_set_becn(struct rxe_pkt_info *pkt, int becn)
+{
+ __bth_set_becn(pkt->hdr + pkt->offset, becn);
+}
+
+static inline u8 bth_resv6a(struct rxe_pkt_info *pkt)
+{
+ return __bth_resv6a(pkt->hdr + pkt->offset);
+}
+
+static inline void bth_set_resv6a(struct rxe_pkt_info *pkt)
+{
+ __bth_set_resv6a(pkt->hdr + pkt->offset);
+}
+
+static inline int bth_ack(struct rxe_pkt_info *pkt)
+{
+ return __bth_ack(pkt->hdr + pkt->offset);
+}
+
+static inline void bth_set_ack(struct rxe_pkt_info *pkt, int ack)
+{
+ __bth_set_ack(pkt->hdr + pkt->offset, ack);
+}
+
+static inline void bth_set_resv7(struct rxe_pkt_info *pkt)
+{
+ __bth_set_resv7(pkt->hdr + pkt->offset);
+}
+
+static inline u32 bth_psn(struct rxe_pkt_info *pkt)
+{
+ return __bth_psn(pkt->hdr + pkt->offset);
+}
+
+static inline void bth_set_psn(struct rxe_pkt_info *pkt, u32 psn)
+{
+ __bth_set_psn(pkt->hdr + pkt->offset, psn);
+}
+
+static inline void bth_init(struct rxe_pkt_info *pkt, u8 opcode, int se,
+ int mig, int pad, u16 pkey, u32 qpn, int ack_req,
+ u32 psn)
+{
+ struct rxe_bth *bth = (struct rxe_bth *)(pkt->hdr + pkt->offset);
+
+ bth->opcode = opcode;
+ bth->flags = (pad << 4) & BTH_PAD_MASK;
+ if (se)
+ bth->flags |= BTH_SE_MASK;
+ if (mig)
+ bth->flags |= BTH_MIG_MASK;
+ bth->pkey = cpu_to_be16(pkey);
+ bth->qpn = cpu_to_be32(qpn & BTH_QPN_MASK);
+ psn &= BTH_PSN_MASK;
+ if (ack_req)
+ psn |= BTH_ACK_MASK;
+ bth->apsn = cpu_to_be32(psn);
+}
+
+/******************************************************************************
+ * Reliable Datagram Extended Transport Header
+ ******************************************************************************/
+struct rxe_rdeth {
+ __be32 een;
+};
+
+#define RDETH_EEN_MASK (0x00ffffff)
+
+static inline u8 __rdeth_een(void *arg)
+{
+ struct rxe_rdeth *rdeth = arg;
+
+ return RDETH_EEN_MASK & be32_to_cpu(rdeth->een);
+}
+
+static inline void __rdeth_set_een(void *arg, u32 een)
+{
+ struct rxe_rdeth *rdeth = arg;
+
+ rdeth->een = cpu_to_be32(RDETH_EEN_MASK & een);
+}
+
+static inline u8 rdeth_een(struct rxe_pkt_info *pkt)
+{
+ return __rdeth_een(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_RDETH]);
+}
+
+static inline void rdeth_set_een(struct rxe_pkt_info *pkt, u32 een)
+{
+ __rdeth_set_een(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_RDETH], een);
+}
+
+/******************************************************************************
+ * Datagram Extended Transport Header
+ ******************************************************************************/
+struct rxe_deth {
+ __be32 qkey;
+ __be32 sqp;
+};
+
+#define GSI_QKEY (0x80010000)
+#define DETH_SQP_MASK (0x00ffffff)
+
+static inline u32 __deth_qkey(void *arg)
+{
+ struct rxe_deth *deth = arg;
+
+ return be32_to_cpu(deth->qkey);
+}
+
+static inline void __deth_set_qkey(void *arg, u32 qkey)
+{
+ struct rxe_deth *deth = arg;
+
+ deth->qkey = cpu_to_be32(qkey);
+}
+
+static inline u32 __deth_sqp(void *arg)
+{
+ struct rxe_deth *deth = arg;
+
+ return DETH_SQP_MASK & be32_to_cpu(deth->sqp);
+}
+
+static inline void __deth_set_sqp(void *arg, u32 sqp)
+{
+ struct rxe_deth *deth = arg;
+
+ deth->sqp = cpu_to_be32(DETH_SQP_MASK & sqp);
+}
+
+static inline u32 deth_qkey(struct rxe_pkt_info *pkt)
+{
+ return __deth_qkey(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_DETH]);
+}
+
+static inline void deth_set_qkey(struct rxe_pkt_info *pkt, u32 qkey)
+{
+ __deth_set_qkey(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_DETH], qkey);
+}
+
+static inline u32 deth_sqp(struct rxe_pkt_info *pkt)
+{
+ return __deth_sqp(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_DETH]);
+}
+
+static inline void deth_set_sqp(struct rxe_pkt_info *pkt, u32 sqp)
+{
+ __deth_set_sqp(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_DETH], sqp);
+}
+
+/******************************************************************************
+ * RDMA Extended Transport Header
+ ******************************************************************************/
+struct rxe_reth {
+ __be64 va;
+ __be32 rkey;
+ __be32 len;
+};
+
+static inline u64 __reth_va(void *arg)
+{
+ struct rxe_reth *reth = arg;
+
+ return be64_to_cpu(reth->va);
+}
+
+static inline void __reth_set_va(void *arg, u64 va)
+{
+ struct rxe_reth *reth = arg;
+
+ reth->va = cpu_to_be64(va);
+}
+
+static inline u32 __reth_rkey(void *arg)
+{
+ struct rxe_reth *reth = arg;
+
+ return be32_to_cpu(reth->rkey);
+}
+
+static inline void __reth_set_rkey(void *arg, u32 rkey)
+{
+ struct rxe_reth *reth = arg;
+
+ reth->rkey = cpu_to_be32(rkey);
+}
+
+static inline u32 __reth_len(void *arg)
+{
+ struct rxe_reth *reth = arg;
+
+ return be32_to_cpu(reth->len);
+}
+
+static inline void __reth_set_len(void *arg, u32 len)
+{
+ struct rxe_reth *reth = arg;
+
+ reth->len = cpu_to_be32(len);
+}
+
+static inline u64 reth_va(struct rxe_pkt_info *pkt)
+{
+ return __reth_va(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_RETH]);
+}
+
+static inline void reth_set_va(struct rxe_pkt_info *pkt, u64 va)
+{
+ __reth_set_va(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_RETH], va);
+}
+
+static inline u32 reth_rkey(struct rxe_pkt_info *pkt)
+{
+ return __reth_rkey(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_RETH]);
+}
+
+static inline void reth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
+{
+ __reth_set_rkey(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_RETH], rkey);
+}
+
+static inline u32 reth_len(struct rxe_pkt_info *pkt)
+{
+ return __reth_len(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_RETH]);
+}
+
+static inline void reth_set_len(struct rxe_pkt_info *pkt, u32 len)
+{
+ __reth_set_len(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_RETH], len);
+}
+
+/******************************************************************************
+ * Atomic Extended Transport Header
+ ******************************************************************************/
+struct rxe_atmeth {
+ __be64 va;
+ __be32 rkey;
+ __be64 swap_add;
+ __be64 comp;
+} __attribute__((__packed__));
+
+static inline u64 __atmeth_va(void *arg)
+{
+ struct rxe_atmeth *atmeth = arg;
+
+ return be64_to_cpu(atmeth->va);
+}
+
+static inline void __atmeth_set_va(void *arg, u64 va)
+{
+ struct rxe_atmeth *atmeth = arg;
+
+ atmeth->va = cpu_to_be64(va);
+}
+
+static inline u32 __atmeth_rkey(void *arg)
+{
+ struct rxe_atmeth *atmeth = arg;
+
+ return be32_to_cpu(atmeth->rkey);
+}
+
+static inline void __atmeth_set_rkey(void *arg, u32 rkey)
+{
+ struct rxe_atmeth *atmeth = arg;
+
+ atmeth->rkey = cpu_to_be32(rkey);
+}
+
+static inline u64 __atmeth_swap_add(void *arg)
+{
+ struct rxe_atmeth *atmeth = arg;
+
+ return be64_to_cpu(atmeth->swap_add);
+}
+
+static inline void __atmeth_set_swap_add(void *arg, u64 swap_add)
+{
+ struct rxe_atmeth *atmeth = arg;
+
+ atmeth->swap_add = cpu_to_be64(swap_add);
+}
+
+static inline u64 __atmeth_comp(void *arg)
+{
+ struct rxe_atmeth *atmeth = arg;
+
+ return be64_to_cpu(atmeth->comp);
+}
+
+static inline void __atmeth_set_comp(void *arg, u64 comp)
+{
+ struct rxe_atmeth *atmeth = arg;
+
+ atmeth->comp = cpu_to_be64(comp);
+}
+
+static inline u64 atmeth_va(struct rxe_pkt_info *pkt)
+{
+ return __atmeth_va(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
+}
+
+static inline void atmeth_set_va(struct rxe_pkt_info *pkt, u64 va)
+{
+ __atmeth_set_va(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_ATMETH], va);
+}
+
+static inline u32 atmeth_rkey(struct rxe_pkt_info *pkt)
+{
+ return __atmeth_rkey(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
+}
+
+static inline void atmeth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
+{
+ __atmeth_set_rkey(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_ATMETH], rkey);
+}
+
+static inline u64 atmeth_swap_add(struct rxe_pkt_info *pkt)
+{
+ return __atmeth_swap_add(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
+}
+
+static inline void atmeth_set_swap_add(struct rxe_pkt_info *pkt, u64 swap_add)
+{
+ __atmeth_set_swap_add(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_ATMETH], swap_add);
+}
+
+static inline u64 atmeth_comp(struct rxe_pkt_info *pkt)
+{
+ return __atmeth_comp(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
+}
+
+static inline void atmeth_set_comp(struct rxe_pkt_info *pkt, u64 comp)
+{
+ __atmeth_set_comp(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_ATMETH], comp);
+}
+
+/******************************************************************************
+ * Ack Extended Transport Header
+ ******************************************************************************/
+struct rxe_aeth {
+ __be32 smsn;
+};
+
+#define AETH_SYN_MASK (0xff000000)
+#define AETH_MSN_MASK (0x00ffffff)
+
+enum aeth_syndrome {
+ AETH_TYPE_MASK = 0xe0,
+ AETH_ACK = 0x00,
+ AETH_RNR_NAK = 0x20,
+ AETH_RSVD = 0x40,
+ AETH_NAK = 0x60,
+ AETH_ACK_UNLIMITED = 0x1f,
+ AETH_NAK_PSN_SEQ_ERROR = 0x60,
+ AETH_NAK_INVALID_REQ = 0x61,
+ AETH_NAK_REM_ACC_ERR = 0x62,
+ AETH_NAK_REM_OP_ERR = 0x63,
+ AETH_NAK_INV_RD_REQ = 0x64,
+};
+
+static inline u8 __aeth_syn(void *arg)
+{
+ struct rxe_aeth *aeth = arg;
+
+ return (AETH_SYN_MASK & be32_to_cpu(aeth->smsn)) >> 24;
+}
+
+static inline void __aeth_set_syn(void *arg, u8 syn)
+{
+ struct rxe_aeth *aeth = arg;
+ u32 smsn = be32_to_cpu(aeth->smsn);
+
+ aeth->smsn = cpu_to_be32((AETH_SYN_MASK & (syn << 24)) |
+ (~AETH_SYN_MASK & smsn));
+}
+
+static inline u32 __aeth_msn(void *arg)
+{
+ struct rxe_aeth *aeth = arg;
+
+ return AETH_MSN_MASK & be32_to_cpu(aeth->smsn);
+}
+
+static inline void __aeth_set_msn(void *arg, u32 msn)
+{
+ struct rxe_aeth *aeth = arg;
+ u32 smsn = be32_to_cpu(aeth->smsn);
+
+ aeth->smsn = cpu_to_be32((AETH_MSN_MASK & msn) |
+ (~AETH_MSN_MASK & smsn));
+}
+
+static inline u8 aeth_syn(struct rxe_pkt_info *pkt)
+{
+ return __aeth_syn(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_AETH]);
+}
+
+static inline void aeth_set_syn(struct rxe_pkt_info *pkt, u8 syn)
+{
+ __aeth_set_syn(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_AETH], syn);
+}
+
+static inline u32 aeth_msn(struct rxe_pkt_info *pkt)
+{
+ return __aeth_msn(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_AETH]);
+}
+
+static inline void aeth_set_msn(struct rxe_pkt_info *pkt, u32 msn)
+{
+ __aeth_set_msn(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_AETH], msn);
+}
+
+/******************************************************************************
+ * Atomic Ack Extended Transport Header
+ ******************************************************************************/
+struct rxe_atmack {
+ __be64 orig;
+};
+
+static inline u64 __atmack_orig(void *arg)
+{
+ struct rxe_atmack *atmack = arg;
+
+ return be64_to_cpu(atmack->orig);
+}
+
+static inline void __atmack_set_orig(void *arg, u64 orig)
+{
+ struct rxe_atmack *atmack = arg;
+
+ atmack->orig = cpu_to_be64(orig);
+}
+
+static inline u64 atmack_orig(struct rxe_pkt_info *pkt)
+{
+ return __atmack_orig(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_ATMACK]);
+}
+
+static inline void atmack_set_orig(struct rxe_pkt_info *pkt, u64 orig)
+{
+ __atmack_set_orig(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_ATMACK], orig);
+}
+
+/******************************************************************************
+ * Immediate Extended Transport Header
+ ******************************************************************************/
+struct rxe_immdt {
+ __be32 imm;
+};
+
+static inline __be32 __immdt_imm(void *arg)
+{
+ struct rxe_immdt *immdt = arg;
+
+ return immdt->imm;
+}
+
+static inline void __immdt_set_imm(void *arg, __be32 imm)
+{
+ struct rxe_immdt *immdt = arg;
+
+ immdt->imm = imm;
+}
+
+static inline __be32 immdt_imm(struct rxe_pkt_info *pkt)
+{
+ return __immdt_imm(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_IMMDT]);
+}
+
+static inline void immdt_set_imm(struct rxe_pkt_info *pkt, __be32 imm)
+{
+ __immdt_set_imm(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_IMMDT], imm);
+}
+
+/******************************************************************************
+ * Invalidate Extended Transport Header
+ ******************************************************************************/
+struct rxe_ieth {
+ __be32 rkey;
+};
+
+static inline u32 __ieth_rkey(void *arg)
+{
+ struct rxe_ieth *ieth = arg;
+
+ return be32_to_cpu(ieth->rkey);
+}
+
+static inline void __ieth_set_rkey(void *arg, u32 rkey)
+{
+ struct rxe_ieth *ieth = arg;
+
+ ieth->rkey = cpu_to_be32(rkey);
+}
+
+static inline u32 ieth_rkey(struct rxe_pkt_info *pkt)
+{
+ return __ieth_rkey(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_IETH]);
+}
+
+static inline void ieth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
+{
+ __ieth_set_rkey(pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_IETH], rkey);
+}
+
+enum rxe_hdr_length {
+ RXE_BTH_BYTES = sizeof(struct rxe_bth),
+ RXE_DETH_BYTES = sizeof(struct rxe_deth),
+ RXE_IMMDT_BYTES = sizeof(struct rxe_immdt),
+ RXE_RETH_BYTES = sizeof(struct rxe_reth),
+ RXE_AETH_BYTES = sizeof(struct rxe_aeth),
+ RXE_ATMACK_BYTES = sizeof(struct rxe_atmack),
+ RXE_ATMETH_BYTES = sizeof(struct rxe_atmeth),
+ RXE_IETH_BYTES = sizeof(struct rxe_ieth),
+ RXE_RDETH_BYTES = sizeof(struct rxe_rdeth),
+};
+
+static inline size_t header_size(struct rxe_pkt_info *pkt)
+{
+ return pkt->offset + rxe_opcode[pkt->opcode].length;
+}
+
+static inline void *payload_addr(struct rxe_pkt_info *pkt)
+{
+ return pkt->hdr + pkt->offset
+ + rxe_opcode[pkt->opcode].offset[RXE_PAYLOAD];
+}
+
+static inline size_t payload_size(struct rxe_pkt_info *pkt)
+{
+ return pkt->paylen - rxe_opcode[pkt->opcode].offset[RXE_PAYLOAD]
+ - bth_pad(pkt) - RXE_ICRC_SIZE;
+}
+
+#endif /* RXE_HDR_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_icrc.c b/drivers/infiniband/sw/rxe/rxe_icrc.c
new file mode 100644
index 000000000..413b56b23
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_icrc.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "rxe.h"
+#include "rxe_loc.h"
+
+/* Compute a partial ICRC for all the IB transport headers. */
+u32 rxe_icrc_hdr(struct rxe_pkt_info *pkt, struct sk_buff *skb)
+{
+ unsigned int bth_offset = 0;
+ struct iphdr *ip4h = NULL;
+ struct ipv6hdr *ip6h = NULL;
+ struct udphdr *udph;
+ struct rxe_bth *bth;
+ int crc;
+ int length;
+ int hdr_size = sizeof(struct udphdr) +
+ (skb->protocol == htons(ETH_P_IP) ?
+ sizeof(struct iphdr) : sizeof(struct ipv6hdr));
+ /* pseudo header buffer size is calculate using ipv6 header size since
+ * it is bigger than ipv4
+ */
+ u8 pshdr[sizeof(struct udphdr) +
+ sizeof(struct ipv6hdr) +
+ RXE_BTH_BYTES];
+
+ /* This seed is the result of computing a CRC with a seed of
+ * 0xfffffff and 8 bytes of 0xff representing a masked LRH.
+ */
+ crc = 0xdebb20e3;
+
+ if (skb->protocol == htons(ETH_P_IP)) { /* IPv4 */
+ memcpy(pshdr, ip_hdr(skb), hdr_size);
+ ip4h = (struct iphdr *)pshdr;
+ udph = (struct udphdr *)(ip4h + 1);
+
+ ip4h->ttl = 0xff;
+ ip4h->check = CSUM_MANGLED_0;
+ ip4h->tos = 0xff;
+ } else { /* IPv6 */
+ memcpy(pshdr, ipv6_hdr(skb), hdr_size);
+ ip6h = (struct ipv6hdr *)pshdr;
+ udph = (struct udphdr *)(ip6h + 1);
+
+ memset(ip6h->flow_lbl, 0xff, sizeof(ip6h->flow_lbl));
+ ip6h->priority = 0xf;
+ ip6h->hop_limit = 0xff;
+ }
+ udph->check = CSUM_MANGLED_0;
+
+ bth_offset += hdr_size;
+
+ memcpy(&pshdr[bth_offset], pkt->hdr, RXE_BTH_BYTES);
+ bth = (struct rxe_bth *)&pshdr[bth_offset];
+
+ /* exclude bth.resv8a */
+ bth->qpn |= cpu_to_be32(~BTH_QPN_MASK);
+
+ length = hdr_size + RXE_BTH_BYTES;
+ crc = crc32_le(crc, pshdr, length);
+
+ /* And finish to compute the CRC on the remainder of the headers. */
+ crc = crc32_le(crc, pkt->hdr + RXE_BTH_BYTES,
+ rxe_opcode[pkt->opcode].length - RXE_BTH_BYTES);
+ return crc;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
new file mode 100644
index 000000000..4a5484ef6
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -0,0 +1,286 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef RXE_LOC_H
+#define RXE_LOC_H
+
+/* rxe_av.c */
+
+int rxe_av_chk_attr(struct rxe_dev *rxe, struct ib_ah_attr *attr);
+
+int rxe_av_from_attr(struct rxe_dev *rxe, u8 port_num,
+ struct rxe_av *av, struct ib_ah_attr *attr);
+
+int rxe_av_to_attr(struct rxe_dev *rxe, struct rxe_av *av,
+ struct ib_ah_attr *attr);
+
+int rxe_av_fill_ip_info(struct rxe_dev *rxe,
+ struct rxe_av *av,
+ struct ib_ah_attr *attr,
+ struct ib_gid_attr *sgid_attr,
+ union ib_gid *sgid);
+
+struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt);
+
+/* rxe_cq.c */
+int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
+ int cqe, int comp_vector, struct ib_udata *udata);
+
+int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
+ int comp_vector, struct ib_ucontext *context,
+ struct ib_udata *udata);
+
+int rxe_cq_resize_queue(struct rxe_cq *cq, int new_cqe, struct ib_udata *udata);
+
+int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited);
+
+void rxe_cq_cleanup(void *arg);
+
+/* rxe_mcast.c */
+int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid,
+ struct rxe_mc_grp **grp_p);
+
+int rxe_mcast_add_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
+ struct rxe_mc_grp *grp);
+
+int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
+ union ib_gid *mgid);
+
+void rxe_drop_all_mcast_groups(struct rxe_qp *qp);
+
+void rxe_mc_cleanup(void *arg);
+
+/* rxe_mmap.c */
+struct rxe_mmap_info {
+ struct list_head pending_mmaps;
+ struct ib_ucontext *context;
+ struct kref ref;
+ void *obj;
+
+ struct mminfo info;
+};
+
+void rxe_mmap_release(struct kref *ref);
+
+struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *dev,
+ u32 size,
+ struct ib_ucontext *context,
+ void *obj);
+
+int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
+
+/* rxe_mr.c */
+enum copy_direction {
+ to_mem_obj,
+ from_mem_obj,
+};
+
+int rxe_mem_init_dma(struct rxe_dev *rxe, struct rxe_pd *pd,
+ int access, struct rxe_mem *mem);
+
+int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start,
+ u64 length, u64 iova, int access, struct ib_udata *udata,
+ struct rxe_mem *mr);
+
+int rxe_mem_init_fast(struct rxe_dev *rxe, struct rxe_pd *pd,
+ int max_pages, struct rxe_mem *mem);
+
+int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr,
+ int length, enum copy_direction dir, u32 *crcp);
+
+int copy_data(struct rxe_dev *rxe, struct rxe_pd *pd, int access,
+ struct rxe_dma_info *dma, void *addr, int length,
+ enum copy_direction dir, u32 *crcp);
+
+void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length);
+
+enum lookup_type {
+ lookup_local,
+ lookup_remote,
+};
+
+struct rxe_mem *lookup_mem(struct rxe_pd *pd, int access, u32 key,
+ enum lookup_type type);
+
+int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length);
+
+int rxe_mem_map_pages(struct rxe_dev *rxe, struct rxe_mem *mem,
+ u64 *page, int num_pages, u64 iova);
+
+void rxe_mem_cleanup(void *arg);
+
+int advance_dma_data(struct rxe_dma_info *dma, unsigned int length);
+
+/* rxe_qp.c */
+int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init);
+
+int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
+ struct ib_qp_init_attr *init, struct ib_udata *udata,
+ struct ib_pd *ibpd);
+
+int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init);
+
+int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
+ struct ib_qp_attr *attr, int mask);
+
+int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr,
+ int mask, struct ib_udata *udata);
+
+int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask);
+
+void rxe_qp_error(struct rxe_qp *qp);
+
+void rxe_qp_destroy(struct rxe_qp *qp);
+
+void rxe_qp_cleanup(void *arg);
+
+static inline int qp_num(struct rxe_qp *qp)
+{
+ return qp->ibqp.qp_num;
+}
+
+static inline enum ib_qp_type qp_type(struct rxe_qp *qp)
+{
+ return qp->ibqp.qp_type;
+}
+
+static inline enum ib_qp_state qp_state(struct rxe_qp *qp)
+{
+ return qp->attr.qp_state;
+}
+
+static inline int qp_mtu(struct rxe_qp *qp)
+{
+ if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
+ return qp->attr.path_mtu;
+ else
+ return RXE_PORT_MAX_MTU;
+}
+
+static inline int rcv_wqe_size(int max_sge)
+{
+ return sizeof(struct rxe_recv_wqe) +
+ max_sge * sizeof(struct ib_sge);
+}
+
+void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res);
+
+static inline void rxe_advance_resp_resource(struct rxe_qp *qp)
+{
+ qp->resp.res_head++;
+ if (unlikely(qp->resp.res_head == qp->attr.max_rd_atomic))
+ qp->resp.res_head = 0;
+}
+
+void retransmit_timer(unsigned long data);
+void rnr_nak_timer(unsigned long data);
+
+void dump_qp(struct rxe_qp *qp);
+
+/* rxe_srq.c */
+#define IB_SRQ_INIT_MASK (~IB_SRQ_LIMIT)
+
+int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
+ struct ib_srq_attr *attr, enum ib_srq_attr_mask mask);
+
+int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
+ struct ib_srq_init_attr *init,
+ struct ib_ucontext *context, struct ib_udata *udata);
+
+int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
+ struct ib_srq_attr *attr, enum ib_srq_attr_mask mask,
+ struct ib_udata *udata);
+
+extern struct ib_dma_mapping_ops rxe_dma_mapping_ops;
+
+void rxe_release(struct kref *kref);
+
+int rxe_completer(void *arg);
+int rxe_requester(void *arg);
+int rxe_responder(void *arg);
+
+u32 rxe_icrc_hdr(struct rxe_pkt_info *pkt, struct sk_buff *skb);
+
+void rxe_resp_queue_pkt(struct rxe_dev *rxe,
+ struct rxe_qp *qp, struct sk_buff *skb);
+
+void rxe_comp_queue_pkt(struct rxe_dev *rxe,
+ struct rxe_qp *qp, struct sk_buff *skb);
+
+static inline unsigned wr_opcode_mask(int opcode, struct rxe_qp *qp)
+{
+ return rxe_wr_opcode_info[opcode].mask[qp->ibqp.qp_type];
+}
+
+static inline int rxe_xmit_packet(struct rxe_dev *rxe, struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt, struct sk_buff *skb)
+{
+ int err;
+ int is_request = pkt->mask & RXE_REQ_MASK;
+
+ if ((is_request && (qp->req.state != QP_STATE_READY)) ||
+ (!is_request && (qp->resp.state != QP_STATE_READY))) {
+ pr_info("Packet dropped. QP is not in ready state\n");
+ goto drop;
+ }
+
+ if (pkt->mask & RXE_LOOPBACK_MASK) {
+ memcpy(SKB_TO_PKT(skb), pkt, sizeof(*pkt));
+ err = rxe->ifc_ops->loopback(skb);
+ } else {
+ err = rxe->ifc_ops->send(rxe, pkt, skb);
+ }
+
+ if (err) {
+ rxe->xmit_errors++;
+ return err;
+ }
+
+ atomic_inc(&qp->skb_out);
+
+ if ((qp_type(qp) != IB_QPT_RC) &&
+ (pkt->mask & RXE_END_MASK)) {
+ pkt->wqe->state = wqe_state_done;
+ rxe_run_task(&qp->comp.task, 1);
+ }
+
+ goto done;
+
+drop:
+ kfree_skb(skb);
+ err = 0;
+done:
+ return err;
+}
+
+#endif /* RXE_LOC_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_mcast.c b/drivers/infiniband/sw/rxe/rxe_mcast.c
new file mode 100644
index 000000000..fa95544ca
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_mcast.c
@@ -0,0 +1,190 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "rxe.h"
+#include "rxe_loc.h"
+
+int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid,
+ struct rxe_mc_grp **grp_p)
+{
+ int err;
+ struct rxe_mc_grp *grp;
+
+ if (rxe->attr.max_mcast_qp_attach == 0) {
+ err = -EINVAL;
+ goto err1;
+ }
+
+ grp = rxe_pool_get_key(&rxe->mc_grp_pool, mgid);
+ if (grp)
+ goto done;
+
+ grp = rxe_alloc(&rxe->mc_grp_pool);
+ if (!grp) {
+ err = -ENOMEM;
+ goto err1;
+ }
+
+ INIT_LIST_HEAD(&grp->qp_list);
+ spin_lock_init(&grp->mcg_lock);
+ grp->rxe = rxe;
+
+ rxe_add_key(grp, mgid);
+
+ err = rxe->ifc_ops->mcast_add(rxe, mgid);
+ if (err)
+ goto err2;
+
+done:
+ *grp_p = grp;
+ return 0;
+
+err2:
+ rxe_drop_ref(grp);
+err1:
+ return err;
+}
+
+int rxe_mcast_add_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
+ struct rxe_mc_grp *grp)
+{
+ int err;
+ struct rxe_mc_elem *elem;
+
+ /* check to see of the qp is already a member of the group */
+ spin_lock_bh(&qp->grp_lock);
+ spin_lock_bh(&grp->mcg_lock);
+ list_for_each_entry(elem, &grp->qp_list, qp_list) {
+ if (elem->qp == qp) {
+ err = 0;
+ goto out;
+ }
+ }
+
+ if (grp->num_qp >= rxe->attr.max_mcast_qp_attach) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ elem = rxe_alloc(&rxe->mc_elem_pool);
+ if (!elem) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* each qp holds a ref on the grp */
+ rxe_add_ref(grp);
+
+ grp->num_qp++;
+ elem->qp = qp;
+ elem->grp = grp;
+
+ list_add(&elem->qp_list, &grp->qp_list);
+ list_add(&elem->grp_list, &qp->grp_list);
+
+ err = 0;
+out:
+ spin_unlock_bh(&grp->mcg_lock);
+ spin_unlock_bh(&qp->grp_lock);
+ return err;
+}
+
+int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
+ union ib_gid *mgid)
+{
+ struct rxe_mc_grp *grp;
+ struct rxe_mc_elem *elem, *tmp;
+
+ grp = rxe_pool_get_key(&rxe->mc_grp_pool, mgid);
+ if (!grp)
+ goto err1;
+
+ spin_lock_bh(&qp->grp_lock);
+ spin_lock_bh(&grp->mcg_lock);
+
+ list_for_each_entry_safe(elem, tmp, &grp->qp_list, qp_list) {
+ if (elem->qp == qp) {
+ list_del(&elem->qp_list);
+ list_del(&elem->grp_list);
+ grp->num_qp--;
+
+ spin_unlock_bh(&grp->mcg_lock);
+ spin_unlock_bh(&qp->grp_lock);
+ rxe_drop_ref(elem);
+ rxe_drop_ref(grp); /* ref held by QP */
+ rxe_drop_ref(grp); /* ref from get_key */
+ return 0;
+ }
+ }
+
+ spin_unlock_bh(&grp->mcg_lock);
+ spin_unlock_bh(&qp->grp_lock);
+ rxe_drop_ref(grp); /* ref from get_key */
+err1:
+ return -EINVAL;
+}
+
+void rxe_drop_all_mcast_groups(struct rxe_qp *qp)
+{
+ struct rxe_mc_grp *grp;
+ struct rxe_mc_elem *elem;
+
+ while (1) {
+ spin_lock_bh(&qp->grp_lock);
+ if (list_empty(&qp->grp_list)) {
+ spin_unlock_bh(&qp->grp_lock);
+ break;
+ }
+ elem = list_first_entry(&qp->grp_list, struct rxe_mc_elem,
+ grp_list);
+ list_del(&elem->grp_list);
+ spin_unlock_bh(&qp->grp_lock);
+
+ grp = elem->grp;
+ spin_lock_bh(&grp->mcg_lock);
+ list_del(&elem->qp_list);
+ grp->num_qp--;
+ spin_unlock_bh(&grp->mcg_lock);
+ rxe_drop_ref(grp);
+ rxe_drop_ref(elem);
+ }
+}
+
+void rxe_mc_cleanup(void *arg)
+{
+ struct rxe_mc_grp *grp = arg;
+ struct rxe_dev *rxe = grp->rxe;
+
+ rxe_drop_key(grp);
+ rxe->ifc_ops->mcast_delete(rxe, &grp->mgid);
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_mmap.c b/drivers/infiniband/sw/rxe/rxe_mmap.c
new file mode 100644
index 000000000..54b3c7c99
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_mmap.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <asm/pgtable.h>
+
+#include "rxe.h"
+#include "rxe_loc.h"
+#include "rxe_queue.h"
+
+void rxe_mmap_release(struct kref *ref)
+{
+ struct rxe_mmap_info *ip = container_of(ref,
+ struct rxe_mmap_info, ref);
+ struct rxe_dev *rxe = to_rdev(ip->context->device);
+
+ spin_lock_bh(&rxe->pending_lock);
+
+ if (!list_empty(&ip->pending_mmaps))
+ list_del(&ip->pending_mmaps);
+
+ spin_unlock_bh(&rxe->pending_lock);
+
+ vfree(ip->obj); /* buf */
+ kfree(ip);
+}
+
+/*
+ * open and close keep track of how many times the memory region is mapped,
+ * to avoid releasing it.
+ */
+static void rxe_vma_open(struct vm_area_struct *vma)
+{
+ struct rxe_mmap_info *ip = vma->vm_private_data;
+
+ kref_get(&ip->ref);
+}
+
+static void rxe_vma_close(struct vm_area_struct *vma)
+{
+ struct rxe_mmap_info *ip = vma->vm_private_data;
+
+ kref_put(&ip->ref, rxe_mmap_release);
+}
+
+static struct vm_operations_struct rxe_vm_ops = {
+ .open = rxe_vma_open,
+ .close = rxe_vma_close,
+};
+
+/**
+ * rxe_mmap - create a new mmap region
+ * @context: the IB user context of the process making the mmap() call
+ * @vma: the VMA to be initialized
+ * Return zero if the mmap is OK. Otherwise, return an errno.
+ */
+int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+{
+ struct rxe_dev *rxe = to_rdev(context->device);
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ unsigned long size = vma->vm_end - vma->vm_start;
+ struct rxe_mmap_info *ip, *pp;
+ int ret;
+
+ /*
+ * Search the device's list of objects waiting for a mmap call.
+ * Normally, this list is very short since a call to create a
+ * CQ, QP, or SRQ is soon followed by a call to mmap().
+ */
+ spin_lock_bh(&rxe->pending_lock);
+ list_for_each_entry_safe(ip, pp, &rxe->pending_mmaps, pending_mmaps) {
+ if (context != ip->context || (__u64)offset != ip->info.offset)
+ continue;
+
+ /* Don't allow a mmap larger than the object. */
+ if (size > ip->info.size) {
+ pr_err("mmap region is larger than the object!\n");
+ spin_unlock_bh(&rxe->pending_lock);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ goto found_it;
+ }
+ pr_warn("unable to find pending mmap info\n");
+ spin_unlock_bh(&rxe->pending_lock);
+ ret = -EINVAL;
+ goto done;
+
+found_it:
+ list_del_init(&ip->pending_mmaps);
+ spin_unlock_bh(&rxe->pending_lock);
+
+ ret = remap_vmalloc_range(vma, ip->obj, 0);
+ if (ret) {
+ pr_err("rxe: err %d from remap_vmalloc_range\n", ret);
+ goto done;
+ }
+
+ vma->vm_ops = &rxe_vm_ops;
+ vma->vm_private_data = ip;
+ rxe_vma_open(vma);
+done:
+ return ret;
+}
+
+/*
+ * Allocate information for rxe_mmap
+ */
+struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe,
+ u32 size,
+ struct ib_ucontext *context,
+ void *obj)
+{
+ struct rxe_mmap_info *ip;
+
+ ip = kmalloc(sizeof(*ip), GFP_KERNEL);
+ if (!ip)
+ return NULL;
+
+ size = PAGE_ALIGN(size);
+
+ spin_lock_bh(&rxe->mmap_offset_lock);
+
+ if (rxe->mmap_offset == 0)
+ rxe->mmap_offset = PAGE_SIZE;
+
+ ip->info.offset = rxe->mmap_offset;
+ rxe->mmap_offset += size;
+
+ spin_unlock_bh(&rxe->mmap_offset_lock);
+
+ INIT_LIST_HEAD(&ip->pending_mmaps);
+ ip->info.size = size;
+ ip->context = context;
+ ip->obj = obj;
+ kref_init(&ip->ref);
+
+ return ip;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
new file mode 100644
index 000000000..f3dab6574
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -0,0 +1,643 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "rxe.h"
+#include "rxe_loc.h"
+
+/*
+ * lfsr (linear feedback shift register) with period 255
+ */
+static u8 rxe_get_key(void)
+{
+ static unsigned key = 1;
+
+ key = key << 1;
+
+ key |= (0 != (key & 0x100)) ^ (0 != (key & 0x10))
+ ^ (0 != (key & 0x80)) ^ (0 != (key & 0x40));
+
+ key &= 0xff;
+
+ return key;
+}
+
+int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length)
+{
+ switch (mem->type) {
+ case RXE_MEM_TYPE_DMA:
+ return 0;
+
+ case RXE_MEM_TYPE_MR:
+ case RXE_MEM_TYPE_FMR:
+ return ((iova < mem->iova) ||
+ ((iova + length) > (mem->iova + mem->length))) ?
+ -EFAULT : 0;
+
+ default:
+ return -EFAULT;
+ }
+}
+
+#define IB_ACCESS_REMOTE (IB_ACCESS_REMOTE_READ \
+ | IB_ACCESS_REMOTE_WRITE \
+ | IB_ACCESS_REMOTE_ATOMIC)
+
+static void rxe_mem_init(int access, struct rxe_mem *mem)
+{
+ u32 lkey = mem->pelem.index << 8 | rxe_get_key();
+ u32 rkey = (access & IB_ACCESS_REMOTE) ? lkey : 0;
+
+ if (mem->pelem.pool->type == RXE_TYPE_MR) {
+ mem->ibmr.lkey = lkey;
+ mem->ibmr.rkey = rkey;
+ }
+
+ mem->lkey = lkey;
+ mem->rkey = rkey;
+ mem->state = RXE_MEM_STATE_INVALID;
+ mem->type = RXE_MEM_TYPE_NONE;
+ mem->map_shift = ilog2(RXE_BUF_PER_MAP);
+}
+
+void rxe_mem_cleanup(void *arg)
+{
+ struct rxe_mem *mem = arg;
+ int i;
+
+ if (mem->umem)
+ ib_umem_release(mem->umem);
+
+ if (mem->map) {
+ for (i = 0; i < mem->num_map; i++)
+ kfree(mem->map[i]);
+
+ kfree(mem->map);
+ }
+}
+
+static int rxe_mem_alloc(struct rxe_dev *rxe, struct rxe_mem *mem, int num_buf)
+{
+ int i;
+ int num_map;
+ struct rxe_map **map = mem->map;
+
+ num_map = (num_buf + RXE_BUF_PER_MAP - 1) / RXE_BUF_PER_MAP;
+
+ mem->map = kmalloc_array(num_map, sizeof(*map), GFP_KERNEL);
+ if (!mem->map)
+ goto err1;
+
+ for (i = 0; i < num_map; i++) {
+ mem->map[i] = kmalloc(sizeof(**map), GFP_KERNEL);
+ if (!mem->map[i])
+ goto err2;
+ }
+
+ WARN_ON(!is_power_of_2(RXE_BUF_PER_MAP));
+
+ mem->map_shift = ilog2(RXE_BUF_PER_MAP);
+ mem->map_mask = RXE_BUF_PER_MAP - 1;
+
+ mem->num_buf = num_buf;
+ mem->num_map = num_map;
+ mem->max_buf = num_map * RXE_BUF_PER_MAP;
+
+ return 0;
+
+err2:
+ for (i--; i >= 0; i--)
+ kfree(mem->map[i]);
+
+ kfree(mem->map);
+err1:
+ return -ENOMEM;
+}
+
+int rxe_mem_init_dma(struct rxe_dev *rxe, struct rxe_pd *pd,
+ int access, struct rxe_mem *mem)
+{
+ rxe_mem_init(access, mem);
+
+ mem->pd = pd;
+ mem->access = access;
+ mem->state = RXE_MEM_STATE_VALID;
+ mem->type = RXE_MEM_TYPE_DMA;
+
+ return 0;
+}
+
+int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start,
+ u64 length, u64 iova, int access, struct ib_udata *udata,
+ struct rxe_mem *mem)
+{
+ int entry;
+ struct rxe_map **map;
+ struct rxe_phys_buf *buf = NULL;
+ struct ib_umem *umem;
+ struct scatterlist *sg;
+ int num_buf;
+ void *vaddr;
+ int err;
+
+ umem = ib_umem_get(pd->ibpd.uobject->context, start, length, access, 0);
+ if (IS_ERR(umem)) {
+ pr_warn("err %d from rxe_umem_get\n",
+ (int)PTR_ERR(umem));
+ err = -EINVAL;
+ goto err1;
+ }
+
+ mem->umem = umem;
+ num_buf = umem->nmap;
+
+ rxe_mem_init(access, mem);
+
+ err = rxe_mem_alloc(rxe, mem, num_buf);
+ if (err) {
+ pr_warn("err %d from rxe_mem_alloc\n", err);
+ ib_umem_release(umem);
+ goto err1;
+ }
+
+ WARN_ON(!is_power_of_2(umem->page_size));
+
+ mem->page_shift = ilog2(umem->page_size);
+ mem->page_mask = umem->page_size - 1;
+
+ num_buf = 0;
+ map = mem->map;
+ if (length > 0) {
+ buf = map[0]->buf;
+
+ for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
+ vaddr = page_address(sg_page(sg));
+ if (!vaddr) {
+ pr_warn("null vaddr\n");
+ err = -ENOMEM;
+ goto err1;
+ }
+
+ buf->addr = (uintptr_t)vaddr;
+ buf->size = umem->page_size;
+ num_buf++;
+ buf++;
+
+ if (num_buf >= RXE_BUF_PER_MAP) {
+ map++;
+ buf = map[0]->buf;
+ num_buf = 0;
+ }
+ }
+ }
+
+ mem->pd = pd;
+ mem->umem = umem;
+ mem->access = access;
+ mem->length = length;
+ mem->iova = iova;
+ mem->va = start;
+ mem->offset = ib_umem_offset(umem);
+ mem->state = RXE_MEM_STATE_VALID;
+ mem->type = RXE_MEM_TYPE_MR;
+
+ return 0;
+
+err1:
+ return err;
+}
+
+int rxe_mem_init_fast(struct rxe_dev *rxe, struct rxe_pd *pd,
+ int max_pages, struct rxe_mem *mem)
+{
+ int err;
+
+ rxe_mem_init(0, mem);
+
+ /* In fastreg, we also set the rkey */
+ mem->ibmr.rkey = mem->ibmr.lkey;
+
+ err = rxe_mem_alloc(rxe, mem, max_pages);
+ if (err)
+ goto err1;
+
+ mem->pd = pd;
+ mem->max_buf = max_pages;
+ mem->state = RXE_MEM_STATE_FREE;
+ mem->type = RXE_MEM_TYPE_MR;
+
+ return 0;
+
+err1:
+ return err;
+}
+
+static void lookup_iova(
+ struct rxe_mem *mem,
+ u64 iova,
+ int *m_out,
+ int *n_out,
+ size_t *offset_out)
+{
+ size_t offset = iova - mem->iova + mem->offset;
+ int map_index;
+ int buf_index;
+ u64 length;
+
+ if (likely(mem->page_shift)) {
+ *offset_out = offset & mem->page_mask;
+ offset >>= mem->page_shift;
+ *n_out = offset & mem->map_mask;
+ *m_out = offset >> mem->map_shift;
+ } else {
+ map_index = 0;
+ buf_index = 0;
+
+ length = mem->map[map_index]->buf[buf_index].size;
+
+ while (offset >= length) {
+ offset -= length;
+ buf_index++;
+
+ if (buf_index == RXE_BUF_PER_MAP) {
+ map_index++;
+ buf_index = 0;
+ }
+ length = mem->map[map_index]->buf[buf_index].size;
+ }
+
+ *m_out = map_index;
+ *n_out = buf_index;
+ *offset_out = offset;
+ }
+}
+
+void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length)
+{
+ size_t offset;
+ int m, n;
+ void *addr;
+
+ if (mem->state != RXE_MEM_STATE_VALID) {
+ pr_warn("mem not in valid state\n");
+ addr = NULL;
+ goto out;
+ }
+
+ if (!mem->map) {
+ addr = (void *)(uintptr_t)iova;
+ goto out;
+ }
+
+ if (mem_check_range(mem, iova, length)) {
+ pr_warn("range violation\n");
+ addr = NULL;
+ goto out;
+ }
+
+ lookup_iova(mem, iova, &m, &n, &offset);
+
+ if (offset + length > mem->map[m]->buf[n].size) {
+ pr_warn("crosses page boundary\n");
+ addr = NULL;
+ goto out;
+ }
+
+ addr = (void *)(uintptr_t)mem->map[m]->buf[n].addr + offset;
+
+out:
+ return addr;
+}
+
+/* copy data from a range (vaddr, vaddr+length-1) to or from
+ * a mem object starting at iova. Compute incremental value of
+ * crc32 if crcp is not zero. caller must hold a reference to mem
+ */
+int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length,
+ enum copy_direction dir, u32 *crcp)
+{
+ int err;
+ int bytes;
+ u8 *va;
+ struct rxe_map **map;
+ struct rxe_phys_buf *buf;
+ int m;
+ int i;
+ size_t offset;
+ u32 crc = crcp ? (*crcp) : 0;
+
+ if (mem->type == RXE_MEM_TYPE_DMA) {
+ u8 *src, *dest;
+
+ src = (dir == to_mem_obj) ?
+ addr : ((void *)(uintptr_t)iova);
+
+ dest = (dir == to_mem_obj) ?
+ ((void *)(uintptr_t)iova) : addr;
+
+ if (crcp)
+ *crcp = crc32_le(*crcp, src, length);
+
+ memcpy(dest, src, length);
+
+ return 0;
+ }
+
+ WARN_ON(!mem->map);
+
+ err = mem_check_range(mem, iova, length);
+ if (err) {
+ err = -EFAULT;
+ goto err1;
+ }
+
+ lookup_iova(mem, iova, &m, &i, &offset);
+
+ map = mem->map + m;
+ buf = map[0]->buf + i;
+
+ while (length > 0) {
+ u8 *src, *dest;
+
+ va = (u8 *)(uintptr_t)buf->addr + offset;
+ src = (dir == to_mem_obj) ? addr : va;
+ dest = (dir == to_mem_obj) ? va : addr;
+
+ bytes = buf->size - offset;
+
+ if (bytes > length)
+ bytes = length;
+
+ if (crcp)
+ crc = crc32_le(crc, src, bytes);
+
+ memcpy(dest, src, bytes);
+
+ length -= bytes;
+ addr += bytes;
+
+ offset = 0;
+ buf++;
+ i++;
+
+ if (i == RXE_BUF_PER_MAP) {
+ i = 0;
+ map++;
+ buf = map[0]->buf;
+ }
+ }
+
+ if (crcp)
+ *crcp = crc;
+
+ return 0;
+
+err1:
+ return err;
+}
+
+/* copy data in or out of a wqe, i.e. sg list
+ * under the control of a dma descriptor
+ */
+int copy_data(
+ struct rxe_dev *rxe,
+ struct rxe_pd *pd,
+ int access,
+ struct rxe_dma_info *dma,
+ void *addr,
+ int length,
+ enum copy_direction dir,
+ u32 *crcp)
+{
+ int bytes;
+ struct rxe_sge *sge = &dma->sge[dma->cur_sge];
+ int offset = dma->sge_offset;
+ int resid = dma->resid;
+ struct rxe_mem *mem = NULL;
+ u64 iova;
+ int err;
+
+ if (length == 0)
+ return 0;
+
+ if (length > resid) {
+ err = -EINVAL;
+ goto err2;
+ }
+
+ if (sge->length && (offset < sge->length)) {
+ mem = lookup_mem(pd, access, sge->lkey, lookup_local);
+ if (!mem) {
+ err = -EINVAL;
+ goto err1;
+ }
+ }
+
+ while (length > 0) {
+ bytes = length;
+
+ if (offset >= sge->length) {
+ if (mem) {
+ rxe_drop_ref(mem);
+ mem = NULL;
+ }
+ sge++;
+ dma->cur_sge++;
+ offset = 0;
+
+ if (dma->cur_sge >= dma->num_sge) {
+ err = -ENOSPC;
+ goto err2;
+ }
+
+ if (sge->length) {
+ mem = lookup_mem(pd, access, sge->lkey,
+ lookup_local);
+ if (!mem) {
+ err = -EINVAL;
+ goto err1;
+ }
+ } else {
+ continue;
+ }
+ }
+
+ if (bytes > sge->length - offset)
+ bytes = sge->length - offset;
+
+ if (bytes > 0) {
+ iova = sge->addr + offset;
+
+ err = rxe_mem_copy(mem, iova, addr, bytes, dir, crcp);
+ if (err)
+ goto err2;
+
+ offset += bytes;
+ resid -= bytes;
+ length -= bytes;
+ addr += bytes;
+ }
+ }
+
+ dma->sge_offset = offset;
+ dma->resid = resid;
+
+ if (mem)
+ rxe_drop_ref(mem);
+
+ return 0;
+
+err2:
+ if (mem)
+ rxe_drop_ref(mem);
+err1:
+ return err;
+}
+
+int advance_dma_data(struct rxe_dma_info *dma, unsigned int length)
+{
+ struct rxe_sge *sge = &dma->sge[dma->cur_sge];
+ int offset = dma->sge_offset;
+ int resid = dma->resid;
+
+ while (length) {
+ unsigned int bytes;
+
+ if (offset >= sge->length) {
+ sge++;
+ dma->cur_sge++;
+ offset = 0;
+ if (dma->cur_sge >= dma->num_sge)
+ return -ENOSPC;
+ }
+
+ bytes = length;
+
+ if (bytes > sge->length - offset)
+ bytes = sge->length - offset;
+
+ offset += bytes;
+ resid -= bytes;
+ length -= bytes;
+ }
+
+ dma->sge_offset = offset;
+ dma->resid = resid;
+
+ return 0;
+}
+
+/* (1) find the mem (mr or mw) corresponding to lkey/rkey
+ * depending on lookup_type
+ * (2) verify that the (qp) pd matches the mem pd
+ * (3) verify that the mem can support the requested access
+ * (4) verify that mem state is valid
+ */
+struct rxe_mem *lookup_mem(struct rxe_pd *pd, int access, u32 key,
+ enum lookup_type type)
+{
+ struct rxe_mem *mem;
+ struct rxe_dev *rxe = to_rdev(pd->ibpd.device);
+ int index = key >> 8;
+
+ if (index >= RXE_MIN_MR_INDEX && index <= RXE_MAX_MR_INDEX) {
+ mem = rxe_pool_get_index(&rxe->mr_pool, index);
+ if (!mem)
+ goto err1;
+ } else {
+ goto err1;
+ }
+
+ if ((type == lookup_local && mem->lkey != key) ||
+ (type == lookup_remote && mem->rkey != key))
+ goto err2;
+
+ if (mem->pd != pd)
+ goto err2;
+
+ if (access && !(access & mem->access))
+ goto err2;
+
+ if (mem->state != RXE_MEM_STATE_VALID)
+ goto err2;
+
+ return mem;
+
+err2:
+ rxe_drop_ref(mem);
+err1:
+ return NULL;
+}
+
+int rxe_mem_map_pages(struct rxe_dev *rxe, struct rxe_mem *mem,
+ u64 *page, int num_pages, u64 iova)
+{
+ int i;
+ int num_buf;
+ int err;
+ struct rxe_map **map;
+ struct rxe_phys_buf *buf;
+ int page_size;
+
+ if (num_pages > mem->max_buf) {
+ err = -EINVAL;
+ goto err1;
+ }
+
+ num_buf = 0;
+ page_size = 1 << mem->page_shift;
+ map = mem->map;
+ buf = map[0]->buf;
+
+ for (i = 0; i < num_pages; i++) {
+ buf->addr = *page++;
+ buf->size = page_size;
+ buf++;
+ num_buf++;
+
+ if (num_buf == RXE_BUF_PER_MAP) {
+ map++;
+ buf = map[0]->buf;
+ num_buf = 0;
+ }
+ }
+
+ mem->iova = iova;
+ mem->va = iova;
+ mem->length = num_pages << mem->page_shift;
+ mem->state = RXE_MEM_STATE_VALID;
+
+ return 0;
+
+err1:
+ return err;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
new file mode 100644
index 000000000..eedf2f1ca
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -0,0 +1,703 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/netdevice.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <net/udp_tunnel.h>
+#include <net/sch_generic.h>
+#include <linux/netfilter.h>
+#include <rdma/ib_addr.h>
+
+#include "rxe.h"
+#include "rxe_net.h"
+#include "rxe_loc.h"
+
+static LIST_HEAD(rxe_dev_list);
+static spinlock_t dev_list_lock; /* spinlock for device list */
+
+struct rxe_dev *net_to_rxe(struct net_device *ndev)
+{
+ struct rxe_dev *rxe;
+ struct rxe_dev *found = NULL;
+
+ spin_lock_bh(&dev_list_lock);
+ list_for_each_entry(rxe, &rxe_dev_list, list) {
+ if (rxe->ndev == ndev) {
+ found = rxe;
+ break;
+ }
+ }
+ spin_unlock_bh(&dev_list_lock);
+
+ return found;
+}
+
+struct rxe_dev *get_rxe_by_name(const char* name)
+{
+ struct rxe_dev *rxe;
+ struct rxe_dev *found = NULL;
+
+ spin_lock_bh(&dev_list_lock);
+ list_for_each_entry(rxe, &rxe_dev_list, list) {
+ if (!strcmp(name, rxe->ib_dev.name)) {
+ found = rxe;
+ break;
+ }
+ }
+ spin_unlock_bh(&dev_list_lock);
+ return found;
+}
+
+
+struct rxe_recv_sockets recv_sockets;
+
+static __be64 rxe_mac_to_eui64(struct net_device *ndev)
+{
+ unsigned char *mac_addr = ndev->dev_addr;
+ __be64 eui64;
+ unsigned char *dst = (unsigned char *)&eui64;
+
+ dst[0] = mac_addr[0] ^ 2;
+ dst[1] = mac_addr[1];
+ dst[2] = mac_addr[2];
+ dst[3] = 0xff;
+ dst[4] = 0xfe;
+ dst[5] = mac_addr[3];
+ dst[6] = mac_addr[4];
+ dst[7] = mac_addr[5];
+
+ return eui64;
+}
+
+static __be64 node_guid(struct rxe_dev *rxe)
+{
+ return rxe_mac_to_eui64(rxe->ndev);
+}
+
+static __be64 port_guid(struct rxe_dev *rxe)
+{
+ return rxe_mac_to_eui64(rxe->ndev);
+}
+
+static struct device *dma_device(struct rxe_dev *rxe)
+{
+ struct net_device *ndev;
+
+ ndev = rxe->ndev;
+
+ if (ndev->priv_flags & IFF_802_1Q_VLAN)
+ ndev = vlan_dev_real_dev(ndev);
+
+ return ndev->dev.parent;
+}
+
+static int mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
+{
+ int err;
+ unsigned char ll_addr[ETH_ALEN];
+
+ ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
+ err = dev_mc_add(rxe->ndev, ll_addr);
+
+ return err;
+}
+
+static int mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid)
+{
+ int err;
+ unsigned char ll_addr[ETH_ALEN];
+
+ ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
+ err = dev_mc_del(rxe->ndev, ll_addr);
+
+ return err;
+}
+
+static struct dst_entry *rxe_find_route4(struct net_device *ndev,
+ struct in_addr *saddr,
+ struct in_addr *daddr)
+{
+ struct rtable *rt;
+ struct flowi4 fl = { { 0 } };
+
+ memset(&fl, 0, sizeof(fl));
+ fl.flowi4_oif = ndev->ifindex;
+ memcpy(&fl.saddr, saddr, sizeof(*saddr));
+ memcpy(&fl.daddr, daddr, sizeof(*daddr));
+ fl.flowi4_proto = IPPROTO_UDP;
+
+ rt = ip_route_output_key(&init_net, &fl);
+ if (IS_ERR(rt)) {
+ pr_err_ratelimited("no route to %pI4\n", &daddr->s_addr);
+ return NULL;
+ }
+
+ return &rt->dst;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static struct dst_entry *rxe_find_route6(struct net_device *ndev,
+ struct in6_addr *saddr,
+ struct in6_addr *daddr)
+{
+ struct dst_entry *ndst;
+ struct flowi6 fl6 = { { 0 } };
+
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.flowi6_oif = ndev->ifindex;
+ memcpy(&fl6.saddr, saddr, sizeof(*saddr));
+ memcpy(&fl6.daddr, daddr, sizeof(*daddr));
+ fl6.flowi6_proto = IPPROTO_UDP;
+
+ if (unlikely(ipv6_stub->ipv6_dst_lookup(sock_net(recv_sockets.sk6->sk),
+ recv_sockets.sk6->sk, &ndst, &fl6))) {
+ pr_err_ratelimited("no route to %pI6\n", daddr);
+ goto put;
+ }
+
+ if (unlikely(ndst->error)) {
+ pr_err("no route to %pI6\n", daddr);
+ goto put;
+ }
+
+ return ndst;
+put:
+ dst_release(ndst);
+ return NULL;
+}
+
+#else
+
+static struct dst_entry *rxe_find_route6(struct net_device *ndev,
+ struct in6_addr *saddr,
+ struct in6_addr *daddr)
+{
+ return NULL;
+}
+
+#endif
+
+static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+{
+ struct udphdr *udph;
+ struct net_device *ndev = skb->dev;
+ struct rxe_dev *rxe = net_to_rxe(ndev);
+ struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
+
+ if (!rxe)
+ goto drop;
+
+ if (skb_linearize(skb)) {
+ pr_err("skb_linearize failed\n");
+ goto drop;
+ }
+
+ udph = udp_hdr(skb);
+ pkt->rxe = rxe;
+ pkt->port_num = 1;
+ pkt->hdr = (u8 *)(udph + 1);
+ pkt->mask = RXE_GRH_MASK;
+ pkt->paylen = be16_to_cpu(udph->len) - sizeof(*udph);
+
+ return rxe_rcv(skb);
+drop:
+ kfree_skb(skb);
+ return 0;
+}
+
+static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
+ bool ipv6)
+{
+ int err;
+ struct socket *sock;
+ struct udp_port_cfg udp_cfg;
+ struct udp_tunnel_sock_cfg tnl_cfg;
+
+ memset(&udp_cfg, 0, sizeof(udp_cfg));
+
+ if (ipv6) {
+ udp_cfg.family = AF_INET6;
+ udp_cfg.ipv6_v6only = 1;
+ } else {
+ udp_cfg.family = AF_INET;
+ }
+
+ udp_cfg.local_udp_port = port;
+
+ /* Create UDP socket */
+ err = udp_sock_create(net, &udp_cfg, &sock);
+ if (err < 0) {
+ pr_err("failed to create udp socket. err = %d\n", err);
+ return ERR_PTR(err);
+ }
+
+ tnl_cfg.sk_user_data = NULL;
+ tnl_cfg.encap_type = 1;
+ tnl_cfg.encap_rcv = rxe_udp_encap_recv;
+ tnl_cfg.encap_destroy = NULL;
+
+ /* Setup UDP tunnel */
+ setup_udp_tunnel_sock(net, sock, &tnl_cfg);
+
+ return sock;
+}
+
+void rxe_release_udp_tunnel(struct socket *sk)
+{
+ if (sk)
+ udp_tunnel_sock_release(sk);
+}
+
+static void prepare_udp_hdr(struct sk_buff *skb, __be16 src_port,
+ __be16 dst_port)
+{
+ struct udphdr *udph;
+
+ __skb_push(skb, sizeof(*udph));
+ skb_reset_transport_header(skb);
+ udph = udp_hdr(skb);
+
+ udph->dest = dst_port;
+ udph->source = src_port;
+ udph->len = htons(skb->len);
+ udph->check = 0;
+}
+
+static void prepare_ipv4_hdr(struct dst_entry *dst, struct sk_buff *skb,
+ __be32 saddr, __be32 daddr, __u8 proto,
+ __u8 tos, __u8 ttl, __be16 df, bool xnet)
+{
+ struct iphdr *iph;
+
+ skb_scrub_packet(skb, xnet);
+
+ skb_clear_hash(skb);
+ skb_dst_set(skb, dst);
+ memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+
+ skb_push(skb, sizeof(struct iphdr));
+ skb_reset_network_header(skb);
+
+ iph = ip_hdr(skb);
+
+ iph->version = IPVERSION;
+ iph->ihl = sizeof(struct iphdr) >> 2;
+ iph->frag_off = df;
+ iph->protocol = proto;
+ iph->tos = tos;
+ iph->daddr = daddr;
+ iph->saddr = saddr;
+ iph->ttl = ttl;
+ __ip_select_ident(dev_net(dst->dev), iph,
+ skb_shinfo(skb)->gso_segs ?: 1);
+ iph->tot_len = htons(skb->len);
+ ip_send_check(iph);
+}
+
+static void prepare_ipv6_hdr(struct dst_entry *dst, struct sk_buff *skb,
+ struct in6_addr *saddr, struct in6_addr *daddr,
+ __u8 proto, __u8 prio, __u8 ttl)
+{
+ struct ipv6hdr *ip6h;
+
+ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+ IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED
+ | IPSKB_REROUTED);
+ skb_dst_set(skb, dst);
+
+ __skb_push(skb, sizeof(*ip6h));
+ skb_reset_network_header(skb);
+ ip6h = ipv6_hdr(skb);
+ ip6_flow_hdr(ip6h, prio, htonl(0));
+ ip6h->payload_len = htons(skb->len);
+ ip6h->nexthdr = proto;
+ ip6h->hop_limit = ttl;
+ ip6h->daddr = *daddr;
+ ip6h->saddr = *saddr;
+ ip6h->payload_len = htons(skb->len - sizeof(*ip6h));
+}
+
+static int prepare4(struct rxe_dev *rxe, struct sk_buff *skb, struct rxe_av *av)
+{
+ struct dst_entry *dst;
+ bool xnet = false;
+ __be16 df = htons(IP_DF);
+ struct in_addr *saddr = &av->sgid_addr._sockaddr_in.sin_addr;
+ struct in_addr *daddr = &av->dgid_addr._sockaddr_in.sin_addr;
+ struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
+
+ dst = rxe_find_route4(rxe->ndev, saddr, daddr);
+ if (!dst) {
+ pr_err("Host not reachable\n");
+ return -EHOSTUNREACH;
+ }
+
+ if (!memcmp(saddr, daddr, sizeof(*daddr)))
+ pkt->mask |= RXE_LOOPBACK_MASK;
+
+ prepare_udp_hdr(skb, htons(RXE_ROCE_V2_SPORT),
+ htons(ROCE_V2_UDP_DPORT));
+
+ prepare_ipv4_hdr(dst, skb, saddr->s_addr, daddr->s_addr, IPPROTO_UDP,
+ av->grh.traffic_class, av->grh.hop_limit, df, xnet);
+ return 0;
+}
+
+static int prepare6(struct rxe_dev *rxe, struct sk_buff *skb, struct rxe_av *av)
+{
+ struct dst_entry *dst;
+ struct in6_addr *saddr = &av->sgid_addr._sockaddr_in6.sin6_addr;
+ struct in6_addr *daddr = &av->dgid_addr._sockaddr_in6.sin6_addr;
+ struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
+
+ dst = rxe_find_route6(rxe->ndev, saddr, daddr);
+ if (!dst) {
+ pr_err("Host not reachable\n");
+ return -EHOSTUNREACH;
+ }
+
+ if (!memcmp(saddr, daddr, sizeof(*daddr)))
+ pkt->mask |= RXE_LOOPBACK_MASK;
+
+ prepare_udp_hdr(skb, htons(RXE_ROCE_V2_SPORT),
+ htons(ROCE_V2_UDP_DPORT));
+
+ prepare_ipv6_hdr(dst, skb, saddr, daddr, IPPROTO_UDP,
+ av->grh.traffic_class,
+ av->grh.hop_limit);
+ return 0;
+}
+
+static int prepare(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
+ struct sk_buff *skb, u32 *crc)
+{
+ int err = 0;
+ struct rxe_av *av = rxe_get_av(pkt);
+
+ if (av->network_type == RDMA_NETWORK_IPV4)
+ err = prepare4(rxe, skb, av);
+ else if (av->network_type == RDMA_NETWORK_IPV6)
+ err = prepare6(rxe, skb, av);
+
+ *crc = rxe_icrc_hdr(pkt, skb);
+
+ return err;
+}
+
+static void rxe_skb_tx_dtor(struct sk_buff *skb)
+{
+ struct sock *sk = skb->sk;
+ struct rxe_qp *qp = sk->sk_user_data;
+ int skb_out = atomic_dec_return(&qp->skb_out);
+
+ if (unlikely(qp->need_req_skb &&
+ skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW))
+ rxe_run_task(&qp->req.task, 1);
+}
+
+static int send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
+ struct sk_buff *skb)
+{
+ struct sk_buff *nskb;
+ struct rxe_av *av;
+ int err;
+
+ av = rxe_get_av(pkt);
+
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb)
+ return -ENOMEM;
+
+ nskb->destructor = rxe_skb_tx_dtor;
+ nskb->sk = pkt->qp->sk->sk;
+
+ if (av->network_type == RDMA_NETWORK_IPV4) {
+ err = ip_local_out(dev_net(skb_dst(skb)->dev), nskb->sk, nskb);
+ } else if (av->network_type == RDMA_NETWORK_IPV6) {
+ err = ip6_local_out(dev_net(skb_dst(skb)->dev), nskb->sk, nskb);
+ } else {
+ pr_err("Unknown layer 3 protocol: %d\n", av->network_type);
+ kfree_skb(nskb);
+ return -EINVAL;
+ }
+
+ if (unlikely(net_xmit_eval(err))) {
+ pr_debug("error sending packet: %d\n", err);
+ return -EAGAIN;
+ }
+
+ kfree_skb(skb);
+
+ return 0;
+}
+
+static int loopback(struct sk_buff *skb)
+{
+ return rxe_rcv(skb);
+}
+
+static inline int addr_same(struct rxe_dev *rxe, struct rxe_av *av)
+{
+ return rxe->port.port_guid == av->grh.dgid.global.interface_id;
+}
+
+static struct sk_buff *init_packet(struct rxe_dev *rxe, struct rxe_av *av,
+ int paylen, struct rxe_pkt_info *pkt)
+{
+ unsigned int hdr_len;
+ struct sk_buff *skb;
+
+ if (av->network_type == RDMA_NETWORK_IPV4)
+ hdr_len = ETH_HLEN + sizeof(struct udphdr) +
+ sizeof(struct iphdr);
+ else
+ hdr_len = ETH_HLEN + sizeof(struct udphdr) +
+ sizeof(struct ipv6hdr);
+
+ skb = alloc_skb(paylen + hdr_len + LL_RESERVED_SPACE(rxe->ndev),
+ GFP_ATOMIC);
+ if (unlikely(!skb))
+ return NULL;
+
+ skb_reserve(skb, hdr_len + LL_RESERVED_SPACE(rxe->ndev));
+
+ skb->dev = rxe->ndev;
+ if (av->network_type == RDMA_NETWORK_IPV4)
+ skb->protocol = htons(ETH_P_IP);
+ else
+ skb->protocol = htons(ETH_P_IPV6);
+
+ pkt->rxe = rxe;
+ pkt->port_num = 1;
+ pkt->hdr = skb_put(skb, paylen);
+ pkt->mask |= RXE_GRH_MASK;
+
+ memset(pkt->hdr, 0, paylen);
+
+ return skb;
+}
+
+/*
+ * this is required by rxe_cfg to match rxe devices in
+ * /sys/class/infiniband up with their underlying ethernet devices
+ */
+static char *parent_name(struct rxe_dev *rxe, unsigned int port_num)
+{
+ return rxe->ndev->name;
+}
+
+static enum rdma_link_layer link_layer(struct rxe_dev *rxe,
+ unsigned int port_num)
+{
+ return IB_LINK_LAYER_ETHERNET;
+}
+
+static struct rxe_ifc_ops ifc_ops = {
+ .node_guid = node_guid,
+ .port_guid = port_guid,
+ .dma_device = dma_device,
+ .mcast_add = mcast_add,
+ .mcast_delete = mcast_delete,
+ .prepare = prepare,
+ .send = send,
+ .loopback = loopback,
+ .init_packet = init_packet,
+ .parent_name = parent_name,
+ .link_layer = link_layer,
+};
+
+struct rxe_dev *rxe_net_add(struct net_device *ndev)
+{
+ int err;
+ struct rxe_dev *rxe = NULL;
+
+ rxe = (struct rxe_dev *)ib_alloc_device(sizeof(*rxe));
+ if (!rxe)
+ return NULL;
+
+ rxe->ifc_ops = &ifc_ops;
+ rxe->ndev = ndev;
+
+ err = rxe_add(rxe, ndev->mtu);
+ if (err) {
+ ib_dealloc_device(&rxe->ib_dev);
+ return NULL;
+ }
+
+ spin_lock_bh(&dev_list_lock);
+ list_add_tail(&rxe_dev_list, &rxe->list);
+ spin_unlock_bh(&dev_list_lock);
+ return rxe;
+}
+
+void rxe_remove_all(void)
+{
+ spin_lock_bh(&dev_list_lock);
+ while (!list_empty(&rxe_dev_list)) {
+ struct rxe_dev *rxe =
+ list_first_entry(&rxe_dev_list, struct rxe_dev, list);
+
+ list_del(&rxe->list);
+ spin_unlock_bh(&dev_list_lock);
+ rxe_remove(rxe);
+ spin_lock_bh(&dev_list_lock);
+ }
+ spin_unlock_bh(&dev_list_lock);
+}
+EXPORT_SYMBOL(rxe_remove_all);
+
+static void rxe_port_event(struct rxe_dev *rxe,
+ enum ib_event_type event)
+{
+ struct ib_event ev;
+
+ ev.device = &rxe->ib_dev;
+ ev.element.port_num = 1;
+ ev.event = event;
+
+ ib_dispatch_event(&ev);
+}
+
+/* Caller must hold net_info_lock */
+void rxe_port_up(struct rxe_dev *rxe)
+{
+ struct rxe_port *port;
+
+ port = &rxe->port;
+ port->attr.state = IB_PORT_ACTIVE;
+ port->attr.phys_state = IB_PHYS_STATE_LINK_UP;
+
+ rxe_port_event(rxe, IB_EVENT_PORT_ACTIVE);
+ pr_info("rxe: set %s active\n", rxe->ib_dev.name);
+ return;
+}
+
+/* Caller must hold net_info_lock */
+void rxe_port_down(struct rxe_dev *rxe)
+{
+ struct rxe_port *port;
+
+ port = &rxe->port;
+ port->attr.state = IB_PORT_DOWN;
+ port->attr.phys_state = IB_PHYS_STATE_LINK_DOWN;
+
+ rxe_port_event(rxe, IB_EVENT_PORT_ERR);
+ pr_info("rxe: set %s down\n", rxe->ib_dev.name);
+ return;
+}
+
+static int rxe_notify(struct notifier_block *not_blk,
+ unsigned long event,
+ void *arg)
+{
+ struct net_device *ndev = netdev_notifier_info_to_dev(arg);
+ struct rxe_dev *rxe = net_to_rxe(ndev);
+
+ if (!rxe)
+ goto out;
+
+ switch (event) {
+ case NETDEV_UNREGISTER:
+ list_del(&rxe->list);
+ rxe_remove(rxe);
+ break;
+ case NETDEV_UP:
+ rxe_port_up(rxe);
+ break;
+ case NETDEV_DOWN:
+ rxe_port_down(rxe);
+ break;
+ case NETDEV_CHANGEMTU:
+ pr_info("rxe: %s changed mtu to %d\n", ndev->name, ndev->mtu);
+ rxe_set_mtu(rxe, ndev->mtu);
+ break;
+ case NETDEV_REBOOT:
+ case NETDEV_CHANGE:
+ case NETDEV_GOING_DOWN:
+ case NETDEV_CHANGEADDR:
+ case NETDEV_CHANGENAME:
+ case NETDEV_FEAT_CHANGE:
+ default:
+ pr_info("rxe: ignoring netdev event = %ld for %s\n",
+ event, ndev->name);
+ break;
+ }
+out:
+ return NOTIFY_OK;
+}
+
+struct notifier_block rxe_net_notifier = {
+ .notifier_call = rxe_notify,
+};
+
+int rxe_net_ipv4_init(void)
+{
+ spin_lock_init(&dev_list_lock);
+
+ recv_sockets.sk4 = rxe_setup_udp_tunnel(&init_net,
+ htons(ROCE_V2_UDP_DPORT), false);
+ if (IS_ERR(recv_sockets.sk4)) {
+ recv_sockets.sk4 = NULL;
+ pr_err("rxe: Failed to create IPv4 UDP tunnel\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+int rxe_net_ipv6_init(void)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+
+ spin_lock_init(&dev_list_lock);
+
+ recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net,
+ htons(ROCE_V2_UDP_DPORT), true);
+ if (IS_ERR(recv_sockets.sk6)) {
+ recv_sockets.sk6 = NULL;
+ pr_err("rxe: Failed to create IPv6 UDP tunnel\n");
+ return -1;
+ }
+#endif
+ return 0;
+}
+
+void rxe_net_exit(void)
+{
+ rxe_release_udp_tunnel(recv_sockets.sk6);
+ rxe_release_udp_tunnel(recv_sockets.sk4);
+ unregister_netdevice_notifier(&rxe_net_notifier);
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_net.h b/drivers/infiniband/sw/rxe/rxe_net.h
new file mode 100644
index 000000000..0daf7f09e
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_net.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef RXE_NET_H
+#define RXE_NET_H
+
+#include <net/sock.h>
+#include <net/if_inet6.h>
+#include <linux/module.h>
+
+struct rxe_recv_sockets {
+ struct socket *sk4;
+ struct socket *sk6;
+};
+
+extern struct rxe_recv_sockets recv_sockets;
+extern struct notifier_block rxe_net_notifier;
+void rxe_release_udp_tunnel(struct socket *sk);
+
+struct rxe_dev *rxe_net_add(struct net_device *ndev);
+
+int rxe_net_ipv4_init(void);
+int rxe_net_ipv6_init(void);
+void rxe_net_exit(void);
+
+#endif /* RXE_NET_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_opcode.c b/drivers/infiniband/sw/rxe/rxe_opcode.c
new file mode 100644
index 000000000..61927c165
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_opcode.c
@@ -0,0 +1,961 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/ib_pack.h>
+#include "rxe_opcode.h"
+#include "rxe_hdr.h"
+
+/* useful information about work request opcodes and pkt opcodes in
+ * table form
+ */
+struct rxe_wr_opcode_info rxe_wr_opcode_info[] = {
+ [IB_WR_RDMA_WRITE] = {
+ .name = "IB_WR_RDMA_WRITE",
+ .mask = {
+ [IB_QPT_RC] = WR_INLINE_MASK | WR_WRITE_MASK,
+ [IB_QPT_UC] = WR_INLINE_MASK | WR_WRITE_MASK,
+ },
+ },
+ [IB_WR_RDMA_WRITE_WITH_IMM] = {
+ .name = "IB_WR_RDMA_WRITE_WITH_IMM",
+ .mask = {
+ [IB_QPT_RC] = WR_INLINE_MASK | WR_WRITE_MASK,
+ [IB_QPT_UC] = WR_INLINE_MASK | WR_WRITE_MASK,
+ },
+ },
+ [IB_WR_SEND] = {
+ .name = "IB_WR_SEND",
+ .mask = {
+ [IB_QPT_SMI] = WR_INLINE_MASK | WR_SEND_MASK,
+ [IB_QPT_GSI] = WR_INLINE_MASK | WR_SEND_MASK,
+ [IB_QPT_RC] = WR_INLINE_MASK | WR_SEND_MASK,
+ [IB_QPT_UC] = WR_INLINE_MASK | WR_SEND_MASK,
+ [IB_QPT_UD] = WR_INLINE_MASK | WR_SEND_MASK,
+ },
+ },
+ [IB_WR_SEND_WITH_IMM] = {
+ .name = "IB_WR_SEND_WITH_IMM",
+ .mask = {
+ [IB_QPT_SMI] = WR_INLINE_MASK | WR_SEND_MASK,
+ [IB_QPT_GSI] = WR_INLINE_MASK | WR_SEND_MASK,
+ [IB_QPT_RC] = WR_INLINE_MASK | WR_SEND_MASK,
+ [IB_QPT_UC] = WR_INLINE_MASK | WR_SEND_MASK,
+ [IB_QPT_UD] = WR_INLINE_MASK | WR_SEND_MASK,
+ },
+ },
+ [IB_WR_RDMA_READ] = {
+ .name = "IB_WR_RDMA_READ",
+ .mask = {
+ [IB_QPT_RC] = WR_READ_MASK,
+ },
+ },
+ [IB_WR_ATOMIC_CMP_AND_SWP] = {
+ .name = "IB_WR_ATOMIC_CMP_AND_SWP",
+ .mask = {
+ [IB_QPT_RC] = WR_ATOMIC_MASK,
+ },
+ },
+ [IB_WR_ATOMIC_FETCH_AND_ADD] = {
+ .name = "IB_WR_ATOMIC_FETCH_AND_ADD",
+ .mask = {
+ [IB_QPT_RC] = WR_ATOMIC_MASK,
+ },
+ },
+ [IB_WR_LSO] = {
+ .name = "IB_WR_LSO",
+ .mask = {
+ /* not supported */
+ },
+ },
+ [IB_WR_SEND_WITH_INV] = {
+ .name = "IB_WR_SEND_WITH_INV",
+ .mask = {
+ [IB_QPT_RC] = WR_INLINE_MASK | WR_SEND_MASK,
+ [IB_QPT_UC] = WR_INLINE_MASK | WR_SEND_MASK,
+ [IB_QPT_UD] = WR_INLINE_MASK | WR_SEND_MASK,
+ },
+ },
+ [IB_WR_RDMA_READ_WITH_INV] = {
+ .name = "IB_WR_RDMA_READ_WITH_INV",
+ .mask = {
+ [IB_QPT_RC] = WR_READ_MASK,
+ },
+ },
+ [IB_WR_LOCAL_INV] = {
+ .name = "IB_WR_LOCAL_INV",
+ .mask = {
+ [IB_QPT_RC] = WR_REG_MASK,
+ },
+ },
+ [IB_WR_REG_MR] = {
+ .name = "IB_WR_REG_MR",
+ .mask = {
+ [IB_QPT_RC] = WR_REG_MASK,
+ },
+ },
+};
+
+struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
+ [IB_OPCODE_RC_SEND_FIRST] = {
+ .name = "IB_OPCODE_RC_SEND_FIRST",
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_RWR_MASK
+ | RXE_SEND_MASK | RXE_START_MASK,
+ .length = RXE_BTH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_SEND_MIDDLE] = {
+ .name = "IB_OPCODE_RC_SEND_MIDDLE]",
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_SEND_MASK
+ | RXE_MIDDLE_MASK,
+ .length = RXE_BTH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_SEND_LAST] = {
+ .name = "IB_OPCODE_RC_SEND_LAST",
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK
+ | RXE_SEND_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE] = {
+ .name = "IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE",
+ .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
+ | RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_IMMDT] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_IMMDT_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_SEND_ONLY] = {
+ .name = "IB_OPCODE_RC_SEND_ONLY",
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK
+ | RXE_RWR_MASK | RXE_SEND_MASK
+ | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE] = {
+ .name = "IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE",
+ .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
+ | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK
+ | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_IMMDT] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_IMMDT_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_RDMA_WRITE_FIRST] = {
+ .name = "IB_OPCODE_RC_RDMA_WRITE_FIRST",
+ .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
+ | RXE_WRITE_MASK | RXE_START_MASK,
+ .length = RXE_BTH_BYTES + RXE_RETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_RDMA_WRITE_MIDDLE] = {
+ .name = "IB_OPCODE_RC_RDMA_WRITE_MIDDLE",
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK
+ | RXE_MIDDLE_MASK,
+ .length = RXE_BTH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_RDMA_WRITE_LAST] = {
+ .name = "IB_OPCODE_RC_RDMA_WRITE_LAST",
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK
+ | RXE_END_MASK,
+ .length = RXE_BTH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = {
+ .name = "IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE",
+ .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
+ | RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK
+ | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_IMMDT] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_IMMDT_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_RDMA_WRITE_ONLY] = {
+ .name = "IB_OPCODE_RC_RDMA_WRITE_ONLY",
+ .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
+ | RXE_WRITE_MASK | RXE_START_MASK
+ | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_RETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = {
+ .name = "IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE",
+ .mask = RXE_RETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK
+ | RXE_REQ_MASK | RXE_WRITE_MASK
+ | RXE_COMP_MASK | RXE_RWR_MASK
+ | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RETH] = RXE_BTH_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES
+ + RXE_RETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RETH_BYTES
+ + RXE_IMMDT_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_RDMA_READ_REQUEST] = {
+ .name = "IB_OPCODE_RC_RDMA_READ_REQUEST",
+ .mask = RXE_RETH_MASK | RXE_REQ_MASK | RXE_READ_MASK
+ | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_RETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST] = {
+ .name = "IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST",
+ .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK
+ | RXE_START_MASK,
+ .length = RXE_BTH_BYTES + RXE_AETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_AETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_AETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE] = {
+ .name = "IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE",
+ .mask = RXE_PAYLOAD_MASK | RXE_ACK_MASK | RXE_MIDDLE_MASK,
+ .length = RXE_BTH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST] = {
+ .name = "IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST",
+ .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK
+ | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_AETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_AETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_AETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY] = {
+ .name = "IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY",
+ .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK
+ | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_AETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_AETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_AETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_ACKNOWLEDGE] = {
+ .name = "IB_OPCODE_RC_ACKNOWLEDGE",
+ .mask = RXE_AETH_MASK | RXE_ACK_MASK | RXE_START_MASK
+ | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_AETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_AETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_AETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = {
+ .name = "IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE",
+ .mask = RXE_AETH_MASK | RXE_ATMACK_MASK | RXE_ACK_MASK
+ | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_ATMACK_BYTES + RXE_AETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_AETH] = RXE_BTH_BYTES,
+ [RXE_ATMACK] = RXE_BTH_BYTES
+ + RXE_AETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_ATMACK_BYTES + RXE_AETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_COMPARE_SWAP] = {
+ .name = "IB_OPCODE_RC_COMPARE_SWAP",
+ .mask = RXE_ATMETH_MASK | RXE_REQ_MASK | RXE_ATOMIC_MASK
+ | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_ATMETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_ATMETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_FETCH_ADD] = {
+ .name = "IB_OPCODE_RC_FETCH_ADD",
+ .mask = RXE_ATMETH_MASK | RXE_REQ_MASK | RXE_ATOMIC_MASK
+ | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_ATMETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_ATMETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE] = {
+ .name = "IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE",
+ .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
+ | RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_IETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_IETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE] = {
+ .name = "IB_OPCODE_RC_SEND_ONLY_INV",
+ .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
+ | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK
+ | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_IETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_IETH_BYTES,
+ }
+ },
+
+ /* UC */
+ [IB_OPCODE_UC_SEND_FIRST] = {
+ .name = "IB_OPCODE_UC_SEND_FIRST",
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_RWR_MASK
+ | RXE_SEND_MASK | RXE_START_MASK,
+ .length = RXE_BTH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES,
+ }
+ },
+ [IB_OPCODE_UC_SEND_MIDDLE] = {
+ .name = "IB_OPCODE_UC_SEND_MIDDLE",
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_SEND_MASK
+ | RXE_MIDDLE_MASK,
+ .length = RXE_BTH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES,
+ }
+ },
+ [IB_OPCODE_UC_SEND_LAST] = {
+ .name = "IB_OPCODE_UC_SEND_LAST",
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK
+ | RXE_SEND_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES,
+ }
+ },
+ [IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE] = {
+ .name = "IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE",
+ .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
+ | RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_IMMDT] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_IMMDT_BYTES,
+ }
+ },
+ [IB_OPCODE_UC_SEND_ONLY] = {
+ .name = "IB_OPCODE_UC_SEND_ONLY",
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK
+ | RXE_RWR_MASK | RXE_SEND_MASK
+ | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES,
+ }
+ },
+ [IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE] = {
+ .name = "IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE",
+ .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
+ | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK
+ | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_IMMDT] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_IMMDT_BYTES,
+ }
+ },
+ [IB_OPCODE_UC_RDMA_WRITE_FIRST] = {
+ .name = "IB_OPCODE_UC_RDMA_WRITE_FIRST",
+ .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
+ | RXE_WRITE_MASK | RXE_START_MASK,
+ .length = RXE_BTH_BYTES + RXE_RETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RETH_BYTES,
+ }
+ },
+ [IB_OPCODE_UC_RDMA_WRITE_MIDDLE] = {
+ .name = "IB_OPCODE_UC_RDMA_WRITE_MIDDLE",
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK
+ | RXE_MIDDLE_MASK,
+ .length = RXE_BTH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES,
+ }
+ },
+ [IB_OPCODE_UC_RDMA_WRITE_LAST] = {
+ .name = "IB_OPCODE_UC_RDMA_WRITE_LAST",
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK
+ | RXE_END_MASK,
+ .length = RXE_BTH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES,
+ }
+ },
+ [IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = {
+ .name = "IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE",
+ .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
+ | RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK
+ | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_IMMDT] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_IMMDT_BYTES,
+ }
+ },
+ [IB_OPCODE_UC_RDMA_WRITE_ONLY] = {
+ .name = "IB_OPCODE_UC_RDMA_WRITE_ONLY",
+ .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
+ | RXE_WRITE_MASK | RXE_START_MASK
+ | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_RETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RETH_BYTES,
+ }
+ },
+ [IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = {
+ .name = "IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE",
+ .mask = RXE_RETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK
+ | RXE_REQ_MASK | RXE_WRITE_MASK
+ | RXE_COMP_MASK | RXE_RWR_MASK
+ | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RETH] = RXE_BTH_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES
+ + RXE_RETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RETH_BYTES
+ + RXE_IMMDT_BYTES,
+ }
+ },
+
+ /* RD */
+ [IB_OPCODE_RD_SEND_FIRST] = {
+ .name = "IB_OPCODE_RD_SEND_FIRST",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK
+ | RXE_REQ_MASK | RXE_RWR_MASK | RXE_SEND_MASK
+ | RXE_START_MASK,
+ .length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_SEND_MIDDLE] = {
+ .name = "IB_OPCODE_RD_SEND_MIDDLE",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK
+ | RXE_REQ_MASK | RXE_SEND_MASK
+ | RXE_MIDDLE_MASK,
+ .length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_SEND_LAST] = {
+ .name = "IB_OPCODE_RD_SEND_LAST",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK
+ | RXE_REQ_MASK | RXE_COMP_MASK | RXE_SEND_MASK
+ | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_SEND_LAST_WITH_IMMEDIATE] = {
+ .name = "IB_OPCODE_RD_SEND_LAST_WITH_IMMEDIATE",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK
+ | RXE_PAYLOAD_MASK | RXE_REQ_MASK
+ | RXE_COMP_MASK | RXE_SEND_MASK
+ | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES
+ + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES
+ + RXE_IMMDT_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_SEND_ONLY] = {
+ .name = "IB_OPCODE_RD_SEND_ONLY",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK
+ | RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK
+ | RXE_SEND_MASK | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_SEND_ONLY_WITH_IMMEDIATE] = {
+ .name = "IB_OPCODE_RD_SEND_ONLY_WITH_IMMEDIATE",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK
+ | RXE_PAYLOAD_MASK | RXE_REQ_MASK
+ | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK
+ | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES
+ + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES
+ + RXE_IMMDT_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_RDMA_WRITE_FIRST] = {
+ .name = "IB_OPCODE_RD_RDMA_WRITE_FIRST",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK
+ | RXE_PAYLOAD_MASK | RXE_REQ_MASK
+ | RXE_WRITE_MASK | RXE_START_MASK,
+ .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES
+ + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES,
+ [RXE_RETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES
+ + RXE_RETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_RDMA_WRITE_MIDDLE] = {
+ .name = "IB_OPCODE_RD_RDMA_WRITE_MIDDLE",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK
+ | RXE_REQ_MASK | RXE_WRITE_MASK
+ | RXE_MIDDLE_MASK,
+ .length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_RDMA_WRITE_LAST] = {
+ .name = "IB_OPCODE_RD_RDMA_WRITE_LAST",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK
+ | RXE_REQ_MASK | RXE_WRITE_MASK
+ | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_RDMA_WRITE_LAST_WITH_IMMEDIATE] = {
+ .name = "IB_OPCODE_RD_RDMA_WRITE_LAST_WITH_IMMEDIATE",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK
+ | RXE_PAYLOAD_MASK | RXE_REQ_MASK
+ | RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK
+ | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES
+ + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES
+ + RXE_IMMDT_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_RDMA_WRITE_ONLY] = {
+ .name = "IB_OPCODE_RD_RDMA_WRITE_ONLY",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK
+ | RXE_PAYLOAD_MASK | RXE_REQ_MASK
+ | RXE_WRITE_MASK | RXE_START_MASK
+ | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES
+ + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES,
+ [RXE_RETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES
+ + RXE_RETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = {
+ .name = "IB_OPCODE_RD_RDMA_WRITE_ONLY_WITH_IMMEDIATE",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK
+ | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK
+ | RXE_REQ_MASK | RXE_WRITE_MASK
+ | RXE_COMP_MASK | RXE_RWR_MASK
+ | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES
+ + RXE_DETH_BYTES + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES,
+ [RXE_RETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES
+ + RXE_RETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES
+ + RXE_RETH_BYTES
+ + RXE_IMMDT_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_RDMA_READ_REQUEST] = {
+ .name = "IB_OPCODE_RD_RDMA_READ_REQUEST",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK
+ | RXE_REQ_MASK | RXE_READ_MASK
+ | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES
+ + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES,
+ [RXE_RETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RETH_BYTES
+ + RXE_DETH_BYTES
+ + RXE_RDETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_RDMA_READ_RESPONSE_FIRST] = {
+ .name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_FIRST",
+ .mask = RXE_RDETH_MASK | RXE_AETH_MASK
+ | RXE_PAYLOAD_MASK | RXE_ACK_MASK
+ | RXE_START_MASK,
+ .length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_AETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_AETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_RDMA_READ_RESPONSE_MIDDLE] = {
+ .name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_MIDDLE",
+ .mask = RXE_RDETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK
+ | RXE_MIDDLE_MASK,
+ .length = RXE_BTH_BYTES + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_RDMA_READ_RESPONSE_LAST] = {
+ .name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_LAST",
+ .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_PAYLOAD_MASK
+ | RXE_ACK_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_AETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_AETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_RDMA_READ_RESPONSE_ONLY] = {
+ .name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_ONLY",
+ .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_PAYLOAD_MASK
+ | RXE_ACK_MASK | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_AETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_AETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_ACKNOWLEDGE] = {
+ .name = "IB_OPCODE_RD_ACKNOWLEDGE",
+ .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_ACK_MASK
+ | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_AETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_ATOMIC_ACKNOWLEDGE] = {
+ .name = "IB_OPCODE_RD_ATOMIC_ACKNOWLEDGE",
+ .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_ATMACK_MASK
+ | RXE_ACK_MASK | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_ATMACK_BYTES + RXE_AETH_BYTES
+ + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_AETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES,
+ [RXE_ATMACK] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_AETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_COMPARE_SWAP] = {
+ .name = "RD_COMPARE_SWAP",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_ATMETH_MASK
+ | RXE_REQ_MASK | RXE_ATOMIC_MASK
+ | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES + RXE_DETH_BYTES
+ + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES,
+ [RXE_ATMETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ + RXE_ATMETH_BYTES
+ + RXE_DETH_BYTES +
+ + RXE_RDETH_BYTES,
+ }
+ },
+ [IB_OPCODE_RD_FETCH_ADD] = {
+ .name = "IB_OPCODE_RD_FETCH_ADD",
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_ATMETH_MASK
+ | RXE_REQ_MASK | RXE_ATOMIC_MASK
+ | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES + RXE_DETH_BYTES
+ + RXE_RDETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_RDETH] = RXE_BTH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES,
+ [RXE_ATMETH] = RXE_BTH_BYTES
+ + RXE_RDETH_BYTES
+ + RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ + RXE_ATMETH_BYTES
+ + RXE_DETH_BYTES +
+ + RXE_RDETH_BYTES,
+ }
+ },
+
+ /* UD */
+ [IB_OPCODE_UD_SEND_ONLY] = {
+ .name = "IB_OPCODE_UD_SEND_ONLY",
+ .mask = RXE_DETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
+ | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK
+ | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_DETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_DETH] = RXE_BTH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_DETH_BYTES,
+ }
+ },
+ [IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE] = {
+ .name = "IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE",
+ .mask = RXE_DETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK
+ | RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK
+ | RXE_SEND_MASK | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES,
+ .offset = {
+ [RXE_BTH] = 0,
+ [RXE_DETH] = RXE_BTH_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES
+ + RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES
+ + RXE_DETH_BYTES
+ + RXE_IMMDT_BYTES,
+ }
+ },
+
+};
diff --git a/drivers/infiniband/sw/rxe/rxe_opcode.h b/drivers/infiniband/sw/rxe/rxe_opcode.h
new file mode 100644
index 000000000..307604e9c
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_opcode.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef RXE_OPCODE_H
+#define RXE_OPCODE_H
+
+/*
+ * contains header bit mask definitions and header lengths
+ * declaration of the rxe_opcode_info struct and
+ * rxe_wr_opcode_info struct
+ */
+
+enum rxe_wr_mask {
+ WR_INLINE_MASK = BIT(0),
+ WR_ATOMIC_MASK = BIT(1),
+ WR_SEND_MASK = BIT(2),
+ WR_READ_MASK = BIT(3),
+ WR_WRITE_MASK = BIT(4),
+ WR_LOCAL_MASK = BIT(5),
+ WR_REG_MASK = BIT(6),
+
+ WR_READ_OR_WRITE_MASK = WR_READ_MASK | WR_WRITE_MASK,
+ WR_READ_WRITE_OR_SEND_MASK = WR_READ_OR_WRITE_MASK | WR_SEND_MASK,
+ WR_WRITE_OR_SEND_MASK = WR_WRITE_MASK | WR_SEND_MASK,
+ WR_ATOMIC_OR_READ_MASK = WR_ATOMIC_MASK | WR_READ_MASK,
+};
+
+#define WR_MAX_QPT (8)
+
+struct rxe_wr_opcode_info {
+ char *name;
+ enum rxe_wr_mask mask[WR_MAX_QPT];
+};
+
+extern struct rxe_wr_opcode_info rxe_wr_opcode_info[];
+
+enum rxe_hdr_type {
+ RXE_LRH,
+ RXE_GRH,
+ RXE_BTH,
+ RXE_RETH,
+ RXE_AETH,
+ RXE_ATMETH,
+ RXE_ATMACK,
+ RXE_IETH,
+ RXE_RDETH,
+ RXE_DETH,
+ RXE_IMMDT,
+ RXE_PAYLOAD,
+ NUM_HDR_TYPES
+};
+
+enum rxe_hdr_mask {
+ RXE_LRH_MASK = BIT(RXE_LRH),
+ RXE_GRH_MASK = BIT(RXE_GRH),
+ RXE_BTH_MASK = BIT(RXE_BTH),
+ RXE_IMMDT_MASK = BIT(RXE_IMMDT),
+ RXE_RETH_MASK = BIT(RXE_RETH),
+ RXE_AETH_MASK = BIT(RXE_AETH),
+ RXE_ATMETH_MASK = BIT(RXE_ATMETH),
+ RXE_ATMACK_MASK = BIT(RXE_ATMACK),
+ RXE_IETH_MASK = BIT(RXE_IETH),
+ RXE_RDETH_MASK = BIT(RXE_RDETH),
+ RXE_DETH_MASK = BIT(RXE_DETH),
+ RXE_PAYLOAD_MASK = BIT(RXE_PAYLOAD),
+
+ RXE_REQ_MASK = BIT(NUM_HDR_TYPES + 0),
+ RXE_ACK_MASK = BIT(NUM_HDR_TYPES + 1),
+ RXE_SEND_MASK = BIT(NUM_HDR_TYPES + 2),
+ RXE_WRITE_MASK = BIT(NUM_HDR_TYPES + 3),
+ RXE_READ_MASK = BIT(NUM_HDR_TYPES + 4),
+ RXE_ATOMIC_MASK = BIT(NUM_HDR_TYPES + 5),
+
+ RXE_RWR_MASK = BIT(NUM_HDR_TYPES + 6),
+ RXE_COMP_MASK = BIT(NUM_HDR_TYPES + 7),
+
+ RXE_START_MASK = BIT(NUM_HDR_TYPES + 8),
+ RXE_MIDDLE_MASK = BIT(NUM_HDR_TYPES + 9),
+ RXE_END_MASK = BIT(NUM_HDR_TYPES + 10),
+
+ RXE_LOOPBACK_MASK = BIT(NUM_HDR_TYPES + 12),
+
+ RXE_READ_OR_ATOMIC = (RXE_READ_MASK | RXE_ATOMIC_MASK),
+ RXE_WRITE_OR_SEND = (RXE_WRITE_MASK | RXE_SEND_MASK),
+};
+
+#define OPCODE_NONE (-1)
+#define RXE_NUM_OPCODE 256
+
+struct rxe_opcode_info {
+ char *name;
+ enum rxe_hdr_mask mask;
+ int length;
+ int offset[NUM_HDR_TYPES];
+};
+
+extern struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE];
+
+#endif /* RXE_OPCODE_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
new file mode 100644
index 000000000..f459c43a7
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_param.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef RXE_PARAM_H
+#define RXE_PARAM_H
+
+static inline enum ib_mtu rxe_mtu_int_to_enum(int mtu)
+{
+ if (mtu < 256)
+ return 0;
+ else if (mtu < 512)
+ return IB_MTU_256;
+ else if (mtu < 1024)
+ return IB_MTU_512;
+ else if (mtu < 2048)
+ return IB_MTU_1024;
+ else if (mtu < 4096)
+ return IB_MTU_2048;
+ else
+ return IB_MTU_4096;
+}
+
+/* Find the IB mtu for a given network MTU. */
+static inline enum ib_mtu eth_mtu_int_to_enum(int mtu)
+{
+ mtu -= RXE_MAX_HDR_LENGTH;
+
+ return rxe_mtu_int_to_enum(mtu);
+}
+
+/* default/initial rxe device parameter settings */
+enum rxe_device_param {
+ RXE_FW_VER = 0,
+ RXE_MAX_MR_SIZE = -1ull,
+ RXE_PAGE_SIZE_CAP = 0xfffff000,
+ RXE_VENDOR_ID = 0,
+ RXE_VENDOR_PART_ID = 0,
+ RXE_HW_VER = 0,
+ RXE_MAX_QP = 0x10000,
+ RXE_MAX_QP_WR = 0x4000,
+ RXE_MAX_INLINE_DATA = 400,
+ RXE_DEVICE_CAP_FLAGS = IB_DEVICE_BAD_PKEY_CNTR
+ | IB_DEVICE_BAD_QKEY_CNTR
+ | IB_DEVICE_AUTO_PATH_MIG
+ | IB_DEVICE_CHANGE_PHY_PORT
+ | IB_DEVICE_UD_AV_PORT_ENFORCE
+ | IB_DEVICE_PORT_ACTIVE_EVENT
+ | IB_DEVICE_SYS_IMAGE_GUID
+ | IB_DEVICE_RC_RNR_NAK_GEN
+ | IB_DEVICE_SRQ_RESIZE
+ | IB_DEVICE_MEM_MGT_EXTENSIONS,
+ RXE_MAX_SGE = 32,
+ RXE_MAX_SGE_RD = 32,
+ RXE_MAX_CQ = 16384,
+ RXE_MAX_LOG_CQE = 13,
+ RXE_MAX_MR = 2 * 1024,
+ RXE_MAX_PD = 0x7ffc,
+ RXE_MAX_QP_RD_ATOM = 128,
+ RXE_MAX_EE_RD_ATOM = 0,
+ RXE_MAX_RES_RD_ATOM = 0x3f000,
+ RXE_MAX_QP_INIT_RD_ATOM = 128,
+ RXE_MAX_EE_INIT_RD_ATOM = 0,
+ RXE_ATOMIC_CAP = 1,
+ RXE_MAX_EE = 0,
+ RXE_MAX_RDD = 0,
+ RXE_MAX_MW = 0,
+ RXE_MAX_RAW_IPV6_QP = 0,
+ RXE_MAX_RAW_ETHY_QP = 0,
+ RXE_MAX_MCAST_GRP = 8192,
+ RXE_MAX_MCAST_QP_ATTACH = 56,
+ RXE_MAX_TOT_MCAST_QP_ATTACH = 0x70000,
+ RXE_MAX_AH = 100,
+ RXE_MAX_FMR = 0,
+ RXE_MAX_MAP_PER_FMR = 0,
+ RXE_MAX_SRQ = 960,
+ RXE_MAX_SRQ_WR = 0x4000,
+ RXE_MIN_SRQ_WR = 1,
+ RXE_MAX_SRQ_SGE = 27,
+ RXE_MIN_SRQ_SGE = 1,
+ RXE_MAX_FMR_PAGE_LIST_LEN = 512,
+ RXE_MAX_PKEYS = 64,
+ RXE_LOCAL_CA_ACK_DELAY = 15,
+
+ RXE_MAX_UCONTEXT = 512,
+
+ RXE_NUM_PORT = 1,
+ RXE_NUM_COMP_VECTORS = 1,
+
+ RXE_MIN_QP_INDEX = 16,
+ RXE_MAX_QP_INDEX = 0x00020000,
+
+ RXE_MIN_SRQ_INDEX = 0x00020001,
+ RXE_MAX_SRQ_INDEX = 0x00040000,
+
+ RXE_MIN_MR_INDEX = 0x00000001,
+ RXE_MAX_MR_INDEX = 0x00040000,
+ RXE_MIN_MW_INDEX = 0x00040001,
+ RXE_MAX_MW_INDEX = 0x00060000,
+ RXE_MAX_PKT_PER_ACK = 64,
+
+ RXE_MAX_UNACKED_PSNS = 128,
+
+ /* Max inflight SKBs per queue pair */
+ RXE_INFLIGHT_SKBS_PER_QP_HIGH = 64,
+ RXE_INFLIGHT_SKBS_PER_QP_LOW = 16,
+
+ /* Delay before calling arbiter timer */
+ RXE_NSEC_ARB_TIMER_DELAY = 200,
+};
+
+/* default/initial rxe port parameters */
+enum rxe_port_param {
+ RXE_PORT_STATE = IB_PORT_DOWN,
+ RXE_PORT_MAX_MTU = IB_MTU_4096,
+ RXE_PORT_ACTIVE_MTU = IB_MTU_256,
+ RXE_PORT_GID_TBL_LEN = 1024,
+ RXE_PORT_PORT_CAP_FLAGS = RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP,
+ RXE_PORT_MAX_MSG_SZ = 0x800000,
+ RXE_PORT_BAD_PKEY_CNTR = 0,
+ RXE_PORT_QKEY_VIOL_CNTR = 0,
+ RXE_PORT_LID = 0,
+ RXE_PORT_SM_LID = 0,
+ RXE_PORT_SM_SL = 0,
+ RXE_PORT_LMC = 0,
+ RXE_PORT_MAX_VL_NUM = 1,
+ RXE_PORT_SUBNET_TIMEOUT = 0,
+ RXE_PORT_INIT_TYPE_REPLY = 0,
+ RXE_PORT_ACTIVE_WIDTH = IB_WIDTH_1X,
+ RXE_PORT_ACTIVE_SPEED = 1,
+ RXE_PORT_PKEY_TBL_LEN = 64,
+ RXE_PORT_PHYS_STATE = 2,
+ RXE_PORT_SUBNET_PREFIX = 0xfe80000000000000ULL,
+};
+
+/* default/initial port info parameters */
+enum rxe_port_info_param {
+ RXE_PORT_INFO_VL_CAP = 4, /* 1-8 */
+ RXE_PORT_INFO_MTU_CAP = 5, /* 4096 */
+ RXE_PORT_INFO_OPER_VL = 1, /* 1 */
+};
+
+#endif /* RXE_PARAM_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
new file mode 100644
index 000000000..6bac0717c
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -0,0 +1,502 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "rxe.h"
+#include "rxe_loc.h"
+
+/* info about object pools
+ * note that mr and mw share a single index space
+ * so that one can map an lkey to the correct type of object
+ */
+struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
+ [RXE_TYPE_UC] = {
+ .name = "rxe-uc",
+ .size = sizeof(struct rxe_ucontext),
+ },
+ [RXE_TYPE_PD] = {
+ .name = "rxe-pd",
+ .size = sizeof(struct rxe_pd),
+ },
+ [RXE_TYPE_AH] = {
+ .name = "rxe-ah",
+ .size = sizeof(struct rxe_ah),
+ .flags = RXE_POOL_ATOMIC,
+ },
+ [RXE_TYPE_SRQ] = {
+ .name = "rxe-srq",
+ .size = sizeof(struct rxe_srq),
+ .flags = RXE_POOL_INDEX,
+ .min_index = RXE_MIN_SRQ_INDEX,
+ .max_index = RXE_MAX_SRQ_INDEX,
+ },
+ [RXE_TYPE_QP] = {
+ .name = "rxe-qp",
+ .size = sizeof(struct rxe_qp),
+ .cleanup = rxe_qp_cleanup,
+ .flags = RXE_POOL_INDEX,
+ .min_index = RXE_MIN_QP_INDEX,
+ .max_index = RXE_MAX_QP_INDEX,
+ },
+ [RXE_TYPE_CQ] = {
+ .name = "rxe-cq",
+ .size = sizeof(struct rxe_cq),
+ .cleanup = rxe_cq_cleanup,
+ },
+ [RXE_TYPE_MR] = {
+ .name = "rxe-mr",
+ .size = sizeof(struct rxe_mem),
+ .cleanup = rxe_mem_cleanup,
+ .flags = RXE_POOL_INDEX,
+ .max_index = RXE_MAX_MR_INDEX,
+ .min_index = RXE_MIN_MR_INDEX,
+ },
+ [RXE_TYPE_MW] = {
+ .name = "rxe-mw",
+ .size = sizeof(struct rxe_mem),
+ .flags = RXE_POOL_INDEX,
+ .max_index = RXE_MAX_MW_INDEX,
+ .min_index = RXE_MIN_MW_INDEX,
+ },
+ [RXE_TYPE_MC_GRP] = {
+ .name = "rxe-mc_grp",
+ .size = sizeof(struct rxe_mc_grp),
+ .cleanup = rxe_mc_cleanup,
+ .flags = RXE_POOL_KEY,
+ .key_offset = offsetof(struct rxe_mc_grp, mgid),
+ .key_size = sizeof(union ib_gid),
+ },
+ [RXE_TYPE_MC_ELEM] = {
+ .name = "rxe-mc_elem",
+ .size = sizeof(struct rxe_mc_elem),
+ .flags = RXE_POOL_ATOMIC,
+ },
+};
+
+static inline char *pool_name(struct rxe_pool *pool)
+{
+ return rxe_type_info[pool->type].name;
+}
+
+static inline struct kmem_cache *pool_cache(struct rxe_pool *pool)
+{
+ return rxe_type_info[pool->type].cache;
+}
+
+static inline enum rxe_elem_type rxe_type(void *arg)
+{
+ struct rxe_pool_entry *elem = arg;
+
+ return elem->pool->type;
+}
+
+int rxe_cache_init(void)
+{
+ int err;
+ int i;
+ size_t size;
+ struct rxe_type_info *type;
+
+ for (i = 0; i < RXE_NUM_TYPES; i++) {
+ type = &rxe_type_info[i];
+ size = ALIGN(type->size, RXE_POOL_ALIGN);
+ type->cache = kmem_cache_create(type->name, size,
+ RXE_POOL_ALIGN,
+ RXE_POOL_CACHE_FLAGS, NULL);
+ if (!type->cache) {
+ pr_err("Unable to init kmem cache for %s\n",
+ type->name);
+ err = -ENOMEM;
+ goto err1;
+ }
+ }
+
+ return 0;
+
+err1:
+ while (--i >= 0) {
+ kmem_cache_destroy(type->cache);
+ type->cache = NULL;
+ }
+
+ return err;
+}
+
+void rxe_cache_exit(void)
+{
+ int i;
+ struct rxe_type_info *type;
+
+ for (i = 0; i < RXE_NUM_TYPES; i++) {
+ type = &rxe_type_info[i];
+ kmem_cache_destroy(type->cache);
+ type->cache = NULL;
+ }
+}
+
+static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
+{
+ int err = 0;
+ size_t size;
+
+ if ((max - min + 1) < pool->max_elem) {
+ pr_warn("not enough indices for max_elem\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ pool->max_index = max;
+ pool->min_index = min;
+
+ size = BITS_TO_LONGS(max - min + 1) * sizeof(long);
+ pool->table = kmalloc(size, GFP_KERNEL);
+ if (!pool->table) {
+ pr_warn("no memory for bit table\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ pool->table_size = size;
+ bitmap_zero(pool->table, max - min + 1);
+
+out:
+ return err;
+}
+
+int rxe_pool_init(
+ struct rxe_dev *rxe,
+ struct rxe_pool *pool,
+ enum rxe_elem_type type,
+ unsigned max_elem)
+{
+ int err = 0;
+ size_t size = rxe_type_info[type].size;
+
+ memset(pool, 0, sizeof(*pool));
+
+ pool->rxe = rxe;
+ pool->type = type;
+ pool->max_elem = max_elem;
+ pool->elem_size = ALIGN(size, RXE_POOL_ALIGN);
+ pool->flags = rxe_type_info[type].flags;
+ pool->tree = RB_ROOT;
+ pool->cleanup = rxe_type_info[type].cleanup;
+
+ atomic_set(&pool->num_elem, 0);
+
+ kref_init(&pool->ref_cnt);
+
+ spin_lock_init(&pool->pool_lock);
+
+ if (rxe_type_info[type].flags & RXE_POOL_INDEX) {
+ err = rxe_pool_init_index(pool,
+ rxe_type_info[type].max_index,
+ rxe_type_info[type].min_index);
+ if (err)
+ goto out;
+ }
+
+ if (rxe_type_info[type].flags & RXE_POOL_KEY) {
+ pool->key_offset = rxe_type_info[type].key_offset;
+ pool->key_size = rxe_type_info[type].key_size;
+ }
+
+ pool->state = rxe_pool_valid;
+
+out:
+ return err;
+}
+
+static void rxe_pool_release(struct kref *kref)
+{
+ struct rxe_pool *pool = container_of(kref, struct rxe_pool, ref_cnt);
+
+ pool->state = rxe_pool_invalid;
+ kfree(pool->table);
+}
+
+static void rxe_pool_put(struct rxe_pool *pool)
+{
+ kref_put(&pool->ref_cnt, rxe_pool_release);
+}
+
+int rxe_pool_cleanup(struct rxe_pool *pool)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pool->pool_lock, flags);
+ pool->state = rxe_pool_invalid;
+ if (atomic_read(&pool->num_elem) > 0)
+ pr_warn("%s pool destroyed with unfree'd elem\n",
+ pool_name(pool));
+ spin_unlock_irqrestore(&pool->pool_lock, flags);
+
+ rxe_pool_put(pool);
+
+ return 0;
+}
+
+static u32 alloc_index(struct rxe_pool *pool)
+{
+ u32 index;
+ u32 range = pool->max_index - pool->min_index + 1;
+
+ index = find_next_zero_bit(pool->table, range, pool->last);
+ if (index >= range)
+ index = find_first_zero_bit(pool->table, range);
+
+ set_bit(index, pool->table);
+ pool->last = index;
+ return index + pool->min_index;
+}
+
+static void insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new)
+{
+ struct rb_node **link = &pool->tree.rb_node;
+ struct rb_node *parent = NULL;
+ struct rxe_pool_entry *elem;
+
+ while (*link) {
+ parent = *link;
+ elem = rb_entry(parent, struct rxe_pool_entry, node);
+
+ if (elem->index == new->index) {
+ pr_warn("element already exists!\n");
+ goto out;
+ }
+
+ if (elem->index > new->index)
+ link = &(*link)->rb_left;
+ else
+ link = &(*link)->rb_right;
+ }
+
+ rb_link_node(&new->node, parent, link);
+ rb_insert_color(&new->node, &pool->tree);
+out:
+ return;
+}
+
+static void insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new)
+{
+ struct rb_node **link = &pool->tree.rb_node;
+ struct rb_node *parent = NULL;
+ struct rxe_pool_entry *elem;
+ int cmp;
+
+ while (*link) {
+ parent = *link;
+ elem = rb_entry(parent, struct rxe_pool_entry, node);
+
+ cmp = memcmp((u8 *)elem + pool->key_offset,
+ (u8 *)new + pool->key_offset, pool->key_size);
+
+ if (cmp == 0) {
+ pr_warn("key already exists!\n");
+ goto out;
+ }
+
+ if (cmp > 0)
+ link = &(*link)->rb_left;
+ else
+ link = &(*link)->rb_right;
+ }
+
+ rb_link_node(&new->node, parent, link);
+ rb_insert_color(&new->node, &pool->tree);
+out:
+ return;
+}
+
+void rxe_add_key(void *arg, void *key)
+{
+ struct rxe_pool_entry *elem = arg;
+ struct rxe_pool *pool = elem->pool;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pool->pool_lock, flags);
+ memcpy((u8 *)elem + pool->key_offset, key, pool->key_size);
+ insert_key(pool, elem);
+ spin_unlock_irqrestore(&pool->pool_lock, flags);
+}
+
+void rxe_drop_key(void *arg)
+{
+ struct rxe_pool_entry *elem = arg;
+ struct rxe_pool *pool = elem->pool;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pool->pool_lock, flags);
+ rb_erase(&elem->node, &pool->tree);
+ spin_unlock_irqrestore(&pool->pool_lock, flags);
+}
+
+void rxe_add_index(void *arg)
+{
+ struct rxe_pool_entry *elem = arg;
+ struct rxe_pool *pool = elem->pool;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pool->pool_lock, flags);
+ elem->index = alloc_index(pool);
+ insert_index(pool, elem);
+ spin_unlock_irqrestore(&pool->pool_lock, flags);
+}
+
+void rxe_drop_index(void *arg)
+{
+ struct rxe_pool_entry *elem = arg;
+ struct rxe_pool *pool = elem->pool;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pool->pool_lock, flags);
+ clear_bit(elem->index - pool->min_index, pool->table);
+ rb_erase(&elem->node, &pool->tree);
+ spin_unlock_irqrestore(&pool->pool_lock, flags);
+}
+
+void *rxe_alloc(struct rxe_pool *pool)
+{
+ struct rxe_pool_entry *elem;
+ unsigned long flags;
+
+ might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC));
+
+ spin_lock_irqsave(&pool->pool_lock, flags);
+ if (pool->state != rxe_pool_valid) {
+ spin_unlock_irqrestore(&pool->pool_lock, flags);
+ return NULL;
+ }
+ kref_get(&pool->ref_cnt);
+ spin_unlock_irqrestore(&pool->pool_lock, flags);
+
+ kref_get(&pool->rxe->ref_cnt);
+
+ if (atomic_inc_return(&pool->num_elem) > pool->max_elem) {
+ atomic_dec(&pool->num_elem);
+ rxe_dev_put(pool->rxe);
+ rxe_pool_put(pool);
+ return NULL;
+ }
+
+ elem = kmem_cache_zalloc(pool_cache(pool),
+ (pool->flags & RXE_POOL_ATOMIC) ?
+ GFP_ATOMIC : GFP_KERNEL);
+
+ elem->pool = pool;
+ kref_init(&elem->ref_cnt);
+
+ return elem;
+}
+
+void rxe_elem_release(struct kref *kref)
+{
+ struct rxe_pool_entry *elem =
+ container_of(kref, struct rxe_pool_entry, ref_cnt);
+ struct rxe_pool *pool = elem->pool;
+
+ if (pool->cleanup)
+ pool->cleanup(elem);
+
+ kmem_cache_free(pool_cache(pool), elem);
+ atomic_dec(&pool->num_elem);
+ rxe_dev_put(pool->rxe);
+ rxe_pool_put(pool);
+}
+
+void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
+{
+ struct rb_node *node = NULL;
+ struct rxe_pool_entry *elem = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pool->pool_lock, flags);
+
+ if (pool->state != rxe_pool_valid)
+ goto out;
+
+ node = pool->tree.rb_node;
+
+ while (node) {
+ elem = rb_entry(node, struct rxe_pool_entry, node);
+
+ if (elem->index > index)
+ node = node->rb_left;
+ else if (elem->index < index)
+ node = node->rb_right;
+ else
+ break;
+ }
+
+ if (node)
+ kref_get(&elem->ref_cnt);
+
+out:
+ spin_unlock_irqrestore(&pool->pool_lock, flags);
+ return node ? (void *)elem : NULL;
+}
+
+void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
+{
+ struct rb_node *node = NULL;
+ struct rxe_pool_entry *elem = NULL;
+ int cmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pool->pool_lock, flags);
+
+ if (pool->state != rxe_pool_valid)
+ goto out;
+
+ node = pool->tree.rb_node;
+
+ while (node) {
+ elem = rb_entry(node, struct rxe_pool_entry, node);
+
+ cmp = memcmp((u8 *)elem + pool->key_offset,
+ key, pool->key_size);
+
+ if (cmp > 0)
+ node = node->rb_left;
+ else if (cmp < 0)
+ node = node->rb_right;
+ else
+ break;
+ }
+
+ if (node)
+ kref_get(&elem->ref_cnt);
+
+out:
+ spin_unlock_irqrestore(&pool->pool_lock, flags);
+ return node ? ((void *)elem) : NULL;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.h b/drivers/infiniband/sw/rxe/rxe_pool.h
new file mode 100644
index 000000000..4d04830ad
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_pool.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef RXE_POOL_H
+#define RXE_POOL_H
+
+#define RXE_POOL_ALIGN (16)
+#define RXE_POOL_CACHE_FLAGS (0)
+
+enum rxe_pool_flags {
+ RXE_POOL_ATOMIC = BIT(0),
+ RXE_POOL_INDEX = BIT(1),
+ RXE_POOL_KEY = BIT(2),
+};
+
+enum rxe_elem_type {
+ RXE_TYPE_UC,
+ RXE_TYPE_PD,
+ RXE_TYPE_AH,
+ RXE_TYPE_SRQ,
+ RXE_TYPE_QP,
+ RXE_TYPE_CQ,
+ RXE_TYPE_MR,
+ RXE_TYPE_MW,
+ RXE_TYPE_MC_GRP,
+ RXE_TYPE_MC_ELEM,
+ RXE_NUM_TYPES, /* keep me last */
+};
+
+struct rxe_type_info {
+ char *name;
+ size_t size;
+ void (*cleanup)(void *obj);
+ enum rxe_pool_flags flags;
+ u32 max_index;
+ u32 min_index;
+ size_t key_offset;
+ size_t key_size;
+ struct kmem_cache *cache;
+};
+
+extern struct rxe_type_info rxe_type_info[];
+
+enum rxe_pool_state {
+ rxe_pool_invalid,
+ rxe_pool_valid,
+};
+
+struct rxe_pool_entry {
+ struct rxe_pool *pool;
+ struct kref ref_cnt;
+ struct list_head list;
+
+ /* only used if indexed or keyed */
+ struct rb_node node;
+ u32 index;
+};
+
+struct rxe_pool {
+ struct rxe_dev *rxe;
+ spinlock_t pool_lock; /* pool spinlock */
+ size_t elem_size;
+ struct kref ref_cnt;
+ void (*cleanup)(void *obj);
+ enum rxe_pool_state state;
+ enum rxe_pool_flags flags;
+ enum rxe_elem_type type;
+
+ unsigned int max_elem;
+ atomic_t num_elem;
+
+ /* only used if indexed or keyed */
+ struct rb_root tree;
+ unsigned long *table;
+ size_t table_size;
+ u32 max_index;
+ u32 min_index;
+ u32 last;
+ size_t key_offset;
+ size_t key_size;
+};
+
+/* initialize slab caches for managed objects */
+int rxe_cache_init(void);
+
+/* cleanup slab caches for managed objects */
+void rxe_cache_exit(void);
+
+/* initialize a pool of objects with given limit on
+ * number of elements. gets parameters from rxe_type_info
+ * pool elements will be allocated out of a slab cache
+ */
+int rxe_pool_init(struct rxe_dev *rxe, struct rxe_pool *pool,
+ enum rxe_elem_type type, u32 max_elem);
+
+/* free resources from object pool */
+int rxe_pool_cleanup(struct rxe_pool *pool);
+
+/* allocate an object from pool */
+void *rxe_alloc(struct rxe_pool *pool);
+
+/* assign an index to an indexed object and insert object into
+ * pool's rb tree
+ */
+void rxe_add_index(void *elem);
+
+/* drop an index and remove object from rb tree */
+void rxe_drop_index(void *elem);
+
+/* assign a key to a keyed object and insert object into
+ * pool's rb tree
+ */
+void rxe_add_key(void *elem, void *key);
+
+/* remove elem from rb tree */
+void rxe_drop_key(void *elem);
+
+/* lookup an indexed object from index. takes a reference on object */
+void *rxe_pool_get_index(struct rxe_pool *pool, u32 index);
+
+/* lookup keyed object from key. takes a reference on the object */
+void *rxe_pool_get_key(struct rxe_pool *pool, void *key);
+
+/* cleanup an object when all references are dropped */
+void rxe_elem_release(struct kref *kref);
+
+/* take a reference on an object */
+#define rxe_add_ref(elem) kref_get(&(elem)->pelem.ref_cnt)
+
+/* drop a reference on an object */
+#define rxe_drop_ref(elem) kref_put(&(elem)->pelem.ref_cnt, rxe_elem_release)
+
+#endif /* RXE_POOL_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
new file mode 100644
index 000000000..22ba24f2a
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -0,0 +1,851 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+
+#include "rxe.h"
+#include "rxe_loc.h"
+#include "rxe_queue.h"
+#include "rxe_task.h"
+
+char *rxe_qp_state_name[] = {
+ [QP_STATE_RESET] = "RESET",
+ [QP_STATE_INIT] = "INIT",
+ [QP_STATE_READY] = "READY",
+ [QP_STATE_DRAIN] = "DRAIN",
+ [QP_STATE_DRAINED] = "DRAINED",
+ [QP_STATE_ERROR] = "ERROR",
+};
+
+static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
+ int has_srq)
+{
+ if (cap->max_send_wr > rxe->attr.max_qp_wr) {
+ pr_warn("invalid send wr = %d > %d\n",
+ cap->max_send_wr, rxe->attr.max_qp_wr);
+ goto err1;
+ }
+
+ if (cap->max_send_sge > rxe->attr.max_sge) {
+ pr_warn("invalid send sge = %d > %d\n",
+ cap->max_send_sge, rxe->attr.max_sge);
+ goto err1;
+ }
+
+ if (!has_srq) {
+ if (cap->max_recv_wr > rxe->attr.max_qp_wr) {
+ pr_warn("invalid recv wr = %d > %d\n",
+ cap->max_recv_wr, rxe->attr.max_qp_wr);
+ goto err1;
+ }
+
+ if (cap->max_recv_sge > rxe->attr.max_sge) {
+ pr_warn("invalid recv sge = %d > %d\n",
+ cap->max_recv_sge, rxe->attr.max_sge);
+ goto err1;
+ }
+ }
+
+ if (cap->max_inline_data > rxe->max_inline_data) {
+ pr_warn("invalid max inline data = %d > %d\n",
+ cap->max_inline_data, rxe->max_inline_data);
+ goto err1;
+ }
+
+ return 0;
+
+err1:
+ return -EINVAL;
+}
+
+int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
+{
+ struct ib_qp_cap *cap = &init->cap;
+ struct rxe_port *port;
+ int port_num = init->port_num;
+
+ if (!init->recv_cq || !init->send_cq) {
+ pr_warn("missing cq\n");
+ goto err1;
+ }
+
+ if (rxe_qp_chk_cap(rxe, cap, !!init->srq))
+ goto err1;
+
+ if (init->qp_type == IB_QPT_SMI || init->qp_type == IB_QPT_GSI) {
+ if (port_num != 1) {
+ pr_warn("invalid port = %d\n", port_num);
+ goto err1;
+ }
+
+ port = &rxe->port;
+
+ if (init->qp_type == IB_QPT_SMI && port->qp_smi_index) {
+ pr_warn("SMI QP exists for port %d\n", port_num);
+ goto err1;
+ }
+
+ if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) {
+ pr_warn("GSI QP exists for port %d\n", port_num);
+ goto err1;
+ }
+ }
+
+ return 0;
+
+err1:
+ return -EINVAL;
+}
+
+static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n)
+{
+ qp->resp.res_head = 0;
+ qp->resp.res_tail = 0;
+ qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL);
+
+ if (!qp->resp.resources)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void free_rd_atomic_resources(struct rxe_qp *qp)
+{
+ if (qp->resp.resources) {
+ int i;
+
+ for (i = 0; i < qp->attr.max_rd_atomic; i++) {
+ struct resp_res *res = &qp->resp.resources[i];
+
+ free_rd_atomic_resource(qp, res);
+ }
+ kfree(qp->resp.resources);
+ qp->resp.resources = NULL;
+ }
+}
+
+void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res)
+{
+ if (res->type == RXE_ATOMIC_MASK) {
+ rxe_drop_ref(qp);
+ kfree_skb(res->atomic.skb);
+ } else if (res->type == RXE_READ_MASK) {
+ if (res->read.mr)
+ rxe_drop_ref(res->read.mr);
+ }
+ res->type = 0;
+}
+
+static void cleanup_rd_atomic_resources(struct rxe_qp *qp)
+{
+ int i;
+ struct resp_res *res;
+
+ if (qp->resp.resources) {
+ for (i = 0; i < qp->attr.max_rd_atomic; i++) {
+ res = &qp->resp.resources[i];
+ free_rd_atomic_resource(qp, res);
+ }
+ }
+}
+
+static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
+ struct ib_qp_init_attr *init)
+{
+ struct rxe_port *port;
+ u32 qpn;
+
+ qp->sq_sig_type = init->sq_sig_type;
+ qp->attr.path_mtu = 1;
+ qp->mtu = ib_mtu_enum_to_int(qp->attr.path_mtu);
+
+ qpn = qp->pelem.index;
+ port = &rxe->port;
+
+ switch (init->qp_type) {
+ case IB_QPT_SMI:
+ qp->ibqp.qp_num = 0;
+ port->qp_smi_index = qpn;
+ qp->attr.port_num = init->port_num;
+ break;
+
+ case IB_QPT_GSI:
+ qp->ibqp.qp_num = 1;
+ port->qp_gsi_index = qpn;
+ qp->attr.port_num = init->port_num;
+ break;
+
+ default:
+ qp->ibqp.qp_num = qpn;
+ break;
+ }
+
+ INIT_LIST_HEAD(&qp->grp_list);
+
+ skb_queue_head_init(&qp->send_pkts);
+
+ spin_lock_init(&qp->grp_lock);
+ spin_lock_init(&qp->state_lock);
+
+ atomic_set(&qp->ssn, 0);
+ atomic_set(&qp->skb_out, 0);
+}
+
+static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
+ struct ib_qp_init_attr *init,
+ struct ib_ucontext *context, struct ib_udata *udata)
+{
+ int err;
+ int wqe_size;
+
+ err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
+ if (err < 0)
+ return err;
+ qp->sk->sk->sk_user_data = qp;
+
+ qp->sq.max_wr = init->cap.max_send_wr;
+ qp->sq.max_sge = init->cap.max_send_sge;
+ qp->sq.max_inline = init->cap.max_inline_data;
+
+ wqe_size = max_t(int, sizeof(struct rxe_send_wqe) +
+ qp->sq.max_sge * sizeof(struct ib_sge),
+ sizeof(struct rxe_send_wqe) +
+ qp->sq.max_inline);
+
+ qp->sq.queue = rxe_queue_init(rxe,
+ &qp->sq.max_wr,
+ wqe_size);
+ if (!qp->sq.queue)
+ return -ENOMEM;
+
+ err = do_mmap_info(rxe, udata, true,
+ context, qp->sq.queue->buf,
+ qp->sq.queue->buf_size, &qp->sq.queue->ip);
+
+ if (err) {
+ kvfree(qp->sq.queue->buf);
+ kfree(qp->sq.queue);
+ return err;
+ }
+
+ qp->req.wqe_index = producer_index(qp->sq.queue);
+ qp->req.state = QP_STATE_RESET;
+ qp->req.opcode = -1;
+ qp->comp.opcode = -1;
+
+ spin_lock_init(&qp->sq.sq_lock);
+ skb_queue_head_init(&qp->req_pkts);
+
+ rxe_init_task(rxe, &qp->req.task, qp,
+ rxe_requester, "req");
+ rxe_init_task(rxe, &qp->comp.task, qp,
+ rxe_completer, "comp");
+
+ init_timer(&qp->rnr_nak_timer);
+ qp->rnr_nak_timer.function = rnr_nak_timer;
+ qp->rnr_nak_timer.data = (unsigned long)qp;
+
+ init_timer(&qp->retrans_timer);
+ qp->retrans_timer.function = retransmit_timer;
+ qp->retrans_timer.data = (unsigned long)qp;
+ qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
+
+ return 0;
+}
+
+static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
+ struct ib_qp_init_attr *init,
+ struct ib_ucontext *context, struct ib_udata *udata)
+{
+ int err;
+ int wqe_size;
+
+ if (!qp->srq) {
+ qp->rq.max_wr = init->cap.max_recv_wr;
+ qp->rq.max_sge = init->cap.max_recv_sge;
+
+ wqe_size = rcv_wqe_size(qp->rq.max_sge);
+
+ pr_debug("max_wr = %d, max_sge = %d, wqe_size = %d\n",
+ qp->rq.max_wr, qp->rq.max_sge, wqe_size);
+
+ qp->rq.queue = rxe_queue_init(rxe,
+ &qp->rq.max_wr,
+ wqe_size);
+ if (!qp->rq.queue)
+ return -ENOMEM;
+
+ err = do_mmap_info(rxe, udata, false, context,
+ qp->rq.queue->buf,
+ qp->rq.queue->buf_size,
+ &qp->rq.queue->ip);
+ if (err) {
+ kvfree(qp->rq.queue->buf);
+ kfree(qp->rq.queue);
+ return err;
+ }
+ }
+
+ spin_lock_init(&qp->rq.producer_lock);
+ spin_lock_init(&qp->rq.consumer_lock);
+
+ skb_queue_head_init(&qp->resp_pkts);
+
+ rxe_init_task(rxe, &qp->resp.task, qp,
+ rxe_responder, "resp");
+
+ qp->resp.opcode = OPCODE_NONE;
+ qp->resp.msn = 0;
+ qp->resp.state = QP_STATE_RESET;
+
+ return 0;
+}
+
+/* called by the create qp verb */
+int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
+ struct ib_qp_init_attr *init, struct ib_udata *udata,
+ struct ib_pd *ibpd)
+{
+ int err;
+ struct rxe_cq *rcq = to_rcq(init->recv_cq);
+ struct rxe_cq *scq = to_rcq(init->send_cq);
+ struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
+ struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;
+
+ rxe_add_ref(pd);
+ rxe_add_ref(rcq);
+ rxe_add_ref(scq);
+ if (srq)
+ rxe_add_ref(srq);
+
+ qp->pd = pd;
+ qp->rcq = rcq;
+ qp->scq = scq;
+ qp->srq = srq;
+
+ rxe_qp_init_misc(rxe, qp, init);
+
+ err = rxe_qp_init_req(rxe, qp, init, context, udata);
+ if (err)
+ goto err1;
+
+ err = rxe_qp_init_resp(rxe, qp, init, context, udata);
+ if (err)
+ goto err2;
+
+ qp->attr.qp_state = IB_QPS_RESET;
+ qp->valid = 1;
+
+ return 0;
+
+err2:
+ rxe_queue_cleanup(qp->sq.queue);
+err1:
+ if (srq)
+ rxe_drop_ref(srq);
+ rxe_drop_ref(scq);
+ rxe_drop_ref(rcq);
+ rxe_drop_ref(pd);
+
+ return err;
+}
+
+/* called by the query qp verb */
+int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init)
+{
+ init->event_handler = qp->ibqp.event_handler;
+ init->qp_context = qp->ibqp.qp_context;
+ init->send_cq = qp->ibqp.send_cq;
+ init->recv_cq = qp->ibqp.recv_cq;
+ init->srq = qp->ibqp.srq;
+
+ init->cap.max_send_wr = qp->sq.max_wr;
+ init->cap.max_send_sge = qp->sq.max_sge;
+ init->cap.max_inline_data = qp->sq.max_inline;
+
+ if (!qp->srq) {
+ init->cap.max_recv_wr = qp->rq.max_wr;
+ init->cap.max_recv_sge = qp->rq.max_sge;
+ }
+
+ init->sq_sig_type = qp->sq_sig_type;
+
+ init->qp_type = qp->ibqp.qp_type;
+ init->port_num = 1;
+
+ return 0;
+}
+
+/* called by the modify qp verb, this routine checks all the parameters before
+ * making any changes
+ */
+int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
+ struct ib_qp_attr *attr, int mask)
+{
+ enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ?
+ attr->cur_qp_state : qp->attr.qp_state;
+ enum ib_qp_state new_state = (mask & IB_QP_STATE) ?
+ attr->qp_state : cur_state;
+
+ if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask,
+ IB_LINK_LAYER_ETHERNET)) {
+ pr_warn("invalid mask or state for qp\n");
+ goto err1;
+ }
+
+ if (mask & IB_QP_STATE) {
+ if (cur_state == IB_QPS_SQD) {
+ if (qp->req.state == QP_STATE_DRAIN &&
+ new_state != IB_QPS_ERR)
+ goto err1;
+ }
+ }
+
+ if (mask & IB_QP_PORT) {
+ if (attr->port_num != 1) {
+ pr_warn("invalid port %d\n", attr->port_num);
+ goto err1;
+ }
+ }
+
+ if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq))
+ goto err1;
+
+ if (mask & IB_QP_AV && rxe_av_chk_attr(rxe, &attr->ah_attr))
+ goto err1;
+
+ if (mask & IB_QP_ALT_PATH) {
+ if (rxe_av_chk_attr(rxe, &attr->alt_ah_attr))
+ goto err1;
+ if (attr->alt_port_num != 1) {
+ pr_warn("invalid alt port %d\n", attr->alt_port_num);
+ goto err1;
+ }
+ if (attr->alt_timeout > 31) {
+ pr_warn("invalid QP alt timeout %d > 31\n",
+ attr->alt_timeout);
+ goto err1;
+ }
+ }
+
+ if (mask & IB_QP_PATH_MTU) {
+ struct rxe_port *port = &rxe->port;
+
+ enum ib_mtu max_mtu = port->attr.max_mtu;
+ enum ib_mtu mtu = attr->path_mtu;
+
+ if (mtu > max_mtu) {
+ pr_debug("invalid mtu (%d) > (%d)\n",
+ ib_mtu_enum_to_int(mtu),
+ ib_mtu_enum_to_int(max_mtu));
+ goto err1;
+ }
+ }
+
+ if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
+ if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) {
+ pr_warn("invalid max_rd_atomic %d > %d\n",
+ attr->max_rd_atomic,
+ rxe->attr.max_qp_rd_atom);
+ goto err1;
+ }
+ }
+
+ if (mask & IB_QP_TIMEOUT) {
+ if (attr->timeout > 31) {
+ pr_warn("invalid QP timeout %d > 31\n",
+ attr->timeout);
+ goto err1;
+ }
+ }
+
+ return 0;
+
+err1:
+ return -EINVAL;
+}
+
+/* move the qp to the reset state */
+static void rxe_qp_reset(struct rxe_qp *qp)
+{
+ /* stop tasks from running */
+ rxe_disable_task(&qp->resp.task);
+
+ /* stop request/comp */
+ if (qp->sq.queue) {
+ if (qp_type(qp) == IB_QPT_RC)
+ rxe_disable_task(&qp->comp.task);
+ rxe_disable_task(&qp->req.task);
+ }
+
+ /* move qp to the reset state */
+ qp->req.state = QP_STATE_RESET;
+ qp->resp.state = QP_STATE_RESET;
+
+ /* let state machines reset themselves drain work and packet queues
+ * etc.
+ */
+ __rxe_do_task(&qp->resp.task);
+
+ if (qp->sq.queue) {
+ __rxe_do_task(&qp->comp.task);
+ __rxe_do_task(&qp->req.task);
+ }
+
+ /* cleanup attributes */
+ atomic_set(&qp->ssn, 0);
+ qp->req.opcode = -1;
+ qp->req.need_retry = 0;
+ qp->req.noack_pkts = 0;
+ qp->resp.msn = 0;
+ qp->resp.opcode = -1;
+ qp->resp.drop_msg = 0;
+ qp->resp.goto_error = 0;
+ qp->resp.sent_psn_nak = 0;
+
+ if (qp->resp.mr) {
+ rxe_drop_ref(qp->resp.mr);
+ qp->resp.mr = NULL;
+ }
+
+ cleanup_rd_atomic_resources(qp);
+
+ /* reenable tasks */
+ rxe_enable_task(&qp->resp.task);
+
+ if (qp->sq.queue) {
+ if (qp_type(qp) == IB_QPT_RC)
+ rxe_enable_task(&qp->comp.task);
+
+ rxe_enable_task(&qp->req.task);
+ }
+}
+
+/* drain the send queue */
+static void rxe_qp_drain(struct rxe_qp *qp)
+{
+ if (qp->sq.queue) {
+ if (qp->req.state != QP_STATE_DRAINED) {
+ qp->req.state = QP_STATE_DRAIN;
+ if (qp_type(qp) == IB_QPT_RC)
+ rxe_run_task(&qp->comp.task, 1);
+ else
+ __rxe_do_task(&qp->comp.task);
+ rxe_run_task(&qp->req.task, 1);
+ }
+ }
+}
+
+/* move the qp to the error state */
+void rxe_qp_error(struct rxe_qp *qp)
+{
+ qp->req.state = QP_STATE_ERROR;
+ qp->resp.state = QP_STATE_ERROR;
+
+ /* drain work and packet queues */
+ rxe_run_task(&qp->resp.task, 1);
+
+ if (qp_type(qp) == IB_QPT_RC)
+ rxe_run_task(&qp->comp.task, 1);
+ else
+ __rxe_do_task(&qp->comp.task);
+ rxe_run_task(&qp->req.task, 1);
+}
+
+/* called by the modify qp verb */
+int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
+ struct ib_udata *udata)
+{
+ int err;
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ union ib_gid sgid;
+ struct ib_gid_attr sgid_attr;
+
+ if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
+ int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic);
+
+ free_rd_atomic_resources(qp);
+
+ err = alloc_rd_atomic_resources(qp, max_rd_atomic);
+ if (err)
+ return err;
+
+ qp->attr.max_rd_atomic = max_rd_atomic;
+ atomic_set(&qp->req.rd_atomic, max_rd_atomic);
+ }
+
+ if (mask & IB_QP_CUR_STATE)
+ qp->attr.cur_qp_state = attr->qp_state;
+
+ if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
+ qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify;
+
+ if (mask & IB_QP_ACCESS_FLAGS)
+ qp->attr.qp_access_flags = attr->qp_access_flags;
+
+ if (mask & IB_QP_PKEY_INDEX)
+ qp->attr.pkey_index = attr->pkey_index;
+
+ if (mask & IB_QP_PORT)
+ qp->attr.port_num = attr->port_num;
+
+ if (mask & IB_QP_QKEY)
+ qp->attr.qkey = attr->qkey;
+
+ if (mask & IB_QP_AV) {
+ ib_get_cached_gid(&rxe->ib_dev, 1,
+ attr->ah_attr.grh.sgid_index, &sgid,
+ &sgid_attr);
+ rxe_av_from_attr(rxe, attr->port_num, &qp->pri_av,
+ &attr->ah_attr);
+ rxe_av_fill_ip_info(rxe, &qp->pri_av, &attr->ah_attr,
+ &sgid_attr, &sgid);
+ if (sgid_attr.ndev)
+ dev_put(sgid_attr.ndev);
+ }
+
+ if (mask & IB_QP_ALT_PATH) {
+ ib_get_cached_gid(&rxe->ib_dev, 1,
+ attr->alt_ah_attr.grh.sgid_index, &sgid,
+ &sgid_attr);
+
+ rxe_av_from_attr(rxe, attr->alt_port_num, &qp->alt_av,
+ &attr->alt_ah_attr);
+ rxe_av_fill_ip_info(rxe, &qp->alt_av, &attr->alt_ah_attr,
+ &sgid_attr, &sgid);
+ if (sgid_attr.ndev)
+ dev_put(sgid_attr.ndev);
+
+ qp->attr.alt_port_num = attr->alt_port_num;
+ qp->attr.alt_pkey_index = attr->alt_pkey_index;
+ qp->attr.alt_timeout = attr->alt_timeout;
+ }
+
+ if (mask & IB_QP_PATH_MTU) {
+ qp->attr.path_mtu = attr->path_mtu;
+ qp->mtu = ib_mtu_enum_to_int(attr->path_mtu);
+ }
+
+ if (mask & IB_QP_TIMEOUT) {
+ qp->attr.timeout = attr->timeout;
+ if (attr->timeout == 0) {
+ qp->qp_timeout_jiffies = 0;
+ } else {
+ /* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */
+ int j = nsecs_to_jiffies(4096ULL << attr->timeout);
+
+ qp->qp_timeout_jiffies = j ? j : 1;
+ }
+ }
+
+ if (mask & IB_QP_RETRY_CNT) {
+ qp->attr.retry_cnt = attr->retry_cnt;
+ qp->comp.retry_cnt = attr->retry_cnt;
+ pr_debug("set retry count = %d\n", attr->retry_cnt);
+ }
+
+ if (mask & IB_QP_RNR_RETRY) {
+ qp->attr.rnr_retry = attr->rnr_retry;
+ qp->comp.rnr_retry = attr->rnr_retry;
+ pr_debug("set rnr retry count = %d\n", attr->rnr_retry);
+ }
+
+ if (mask & IB_QP_RQ_PSN) {
+ qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK);
+ qp->resp.psn = qp->attr.rq_psn;
+ pr_debug("set resp psn = 0x%x\n", qp->resp.psn);
+ }
+
+ if (mask & IB_QP_MIN_RNR_TIMER) {
+ qp->attr.min_rnr_timer = attr->min_rnr_timer;
+ pr_debug("set min rnr timer = 0x%x\n",
+ attr->min_rnr_timer);
+ }
+
+ if (mask & IB_QP_SQ_PSN) {
+ qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
+ qp->req.psn = qp->attr.sq_psn;
+ qp->comp.psn = qp->attr.sq_psn;
+ pr_debug("set req psn = 0x%x\n", qp->req.psn);
+ }
+
+ if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
+ qp->attr.max_dest_rd_atomic =
+ __roundup_pow_of_two(attr->max_dest_rd_atomic);
+ }
+
+ if (mask & IB_QP_PATH_MIG_STATE)
+ qp->attr.path_mig_state = attr->path_mig_state;
+
+ if (mask & IB_QP_DEST_QPN)
+ qp->attr.dest_qp_num = attr->dest_qp_num;
+
+ if (mask & IB_QP_STATE) {
+ qp->attr.qp_state = attr->qp_state;
+
+ switch (attr->qp_state) {
+ case IB_QPS_RESET:
+ pr_debug("qp state -> RESET\n");
+ rxe_qp_reset(qp);
+ break;
+
+ case IB_QPS_INIT:
+ pr_debug("qp state -> INIT\n");
+ qp->req.state = QP_STATE_INIT;
+ qp->resp.state = QP_STATE_INIT;
+ break;
+
+ case IB_QPS_RTR:
+ pr_debug("qp state -> RTR\n");
+ qp->resp.state = QP_STATE_READY;
+ break;
+
+ case IB_QPS_RTS:
+ pr_debug("qp state -> RTS\n");
+ qp->req.state = QP_STATE_READY;
+ break;
+
+ case IB_QPS_SQD:
+ pr_debug("qp state -> SQD\n");
+ rxe_qp_drain(qp);
+ break;
+
+ case IB_QPS_SQE:
+ pr_warn("qp state -> SQE !!?\n");
+ /* Not possible from modify_qp. */
+ break;
+
+ case IB_QPS_ERR:
+ pr_debug("qp state -> ERR\n");
+ rxe_qp_error(qp);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/* called by the query qp verb */
+int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
+{
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+
+ *attr = qp->attr;
+
+ attr->rq_psn = qp->resp.psn;
+ attr->sq_psn = qp->req.psn;
+
+ attr->cap.max_send_wr = qp->sq.max_wr;
+ attr->cap.max_send_sge = qp->sq.max_sge;
+ attr->cap.max_inline_data = qp->sq.max_inline;
+
+ if (!qp->srq) {
+ attr->cap.max_recv_wr = qp->rq.max_wr;
+ attr->cap.max_recv_sge = qp->rq.max_sge;
+ }
+
+ rxe_av_to_attr(rxe, &qp->pri_av, &attr->ah_attr);
+ rxe_av_to_attr(rxe, &qp->alt_av, &attr->alt_ah_attr);
+
+ if (qp->req.state == QP_STATE_DRAIN) {
+ attr->sq_draining = 1;
+ /* applications that get this state
+ * typically spin on it. yield the
+ * processor
+ */
+ cond_resched();
+ } else {
+ attr->sq_draining = 0;
+ }
+
+ pr_debug("attr->sq_draining = %d\n", attr->sq_draining);
+
+ return 0;
+}
+
+/* called by the destroy qp verb */
+void rxe_qp_destroy(struct rxe_qp *qp)
+{
+ qp->valid = 0;
+ qp->qp_timeout_jiffies = 0;
+ rxe_cleanup_task(&qp->resp.task);
+
+ del_timer_sync(&qp->retrans_timer);
+ del_timer_sync(&qp->rnr_nak_timer);
+
+ rxe_cleanup_task(&qp->req.task);
+ if (qp_type(qp) == IB_QPT_RC)
+ rxe_cleanup_task(&qp->comp.task);
+
+ /* flush out any receive wr's or pending requests */
+ __rxe_do_task(&qp->req.task);
+ if (qp->sq.queue) {
+ __rxe_do_task(&qp->comp.task);
+ __rxe_do_task(&qp->req.task);
+ }
+}
+
+/* called when the last reference to the qp is dropped */
+void rxe_qp_cleanup(void *arg)
+{
+ struct rxe_qp *qp = arg;
+
+ rxe_drop_all_mcast_groups(qp);
+
+ if (qp->sq.queue)
+ rxe_queue_cleanup(qp->sq.queue);
+
+ if (qp->srq)
+ rxe_drop_ref(qp->srq);
+
+ if (qp->rq.queue)
+ rxe_queue_cleanup(qp->rq.queue);
+
+ if (qp->scq)
+ rxe_drop_ref(qp->scq);
+ if (qp->rcq)
+ rxe_drop_ref(qp->rcq);
+ if (qp->pd)
+ rxe_drop_ref(qp->pd);
+
+ if (qp->resp.mr) {
+ rxe_drop_ref(qp->resp.mr);
+ qp->resp.mr = NULL;
+ }
+
+ free_rd_atomic_resources(qp);
+
+ kernel_sock_shutdown(qp->sk, SHUT_RDWR);
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c
new file mode 100644
index 000000000..08274254e
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_queue.c
@@ -0,0 +1,217 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must retailuce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/vmalloc.h>
+#include "rxe.h"
+#include "rxe_loc.h"
+#include "rxe_queue.h"
+
+int do_mmap_info(struct rxe_dev *rxe,
+ struct ib_udata *udata,
+ bool is_req,
+ struct ib_ucontext *context,
+ struct rxe_queue_buf *buf,
+ size_t buf_size,
+ struct rxe_mmap_info **ip_p)
+{
+ int err;
+ u32 len, offset;
+ struct rxe_mmap_info *ip = NULL;
+
+ if (udata) {
+ if (is_req) {
+ len = udata->outlen - sizeof(struct mminfo);
+ offset = sizeof(struct mminfo);
+ } else {
+ len = udata->outlen;
+ offset = 0;
+ }
+
+ if (len < sizeof(ip->info))
+ goto err1;
+
+ ip = rxe_create_mmap_info(rxe, buf_size, context, buf);
+ if (!ip)
+ goto err1;
+
+ err = copy_to_user(udata->outbuf + offset, &ip->info,
+ sizeof(ip->info));
+ if (err)
+ goto err2;
+
+ spin_lock_bh(&rxe->pending_lock);
+ list_add(&ip->pending_mmaps, &rxe->pending_mmaps);
+ spin_unlock_bh(&rxe->pending_lock);
+ }
+
+ *ip_p = ip;
+
+ return 0;
+
+err2:
+ kfree(ip);
+err1:
+ return -EINVAL;
+}
+
+struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
+ int *num_elem,
+ unsigned int elem_size)
+{
+ struct rxe_queue *q;
+ size_t buf_size;
+ unsigned int num_slots;
+
+ /* num_elem == 0 is allowed, but uninteresting */
+ if (*num_elem < 0)
+ goto err1;
+
+ q = kmalloc(sizeof(*q), GFP_KERNEL);
+ if (!q)
+ goto err1;
+
+ q->rxe = rxe;
+
+ /* used in resize, only need to copy used part of queue */
+ q->elem_size = elem_size;
+
+ /* pad element up to at least a cacheline and always a power of 2 */
+ if (elem_size < cache_line_size())
+ elem_size = cache_line_size();
+ elem_size = roundup_pow_of_two(elem_size);
+
+ q->log2_elem_size = order_base_2(elem_size);
+
+ num_slots = *num_elem + 1;
+ num_slots = roundup_pow_of_two(num_slots);
+ q->index_mask = num_slots - 1;
+
+ buf_size = sizeof(struct rxe_queue_buf) + num_slots * elem_size;
+
+ q->buf = vmalloc_user(buf_size);
+ if (!q->buf)
+ goto err2;
+
+ q->buf->log2_elem_size = q->log2_elem_size;
+ q->buf->index_mask = q->index_mask;
+
+ q->buf_size = buf_size;
+
+ *num_elem = num_slots - 1;
+ return q;
+
+err2:
+ kfree(q);
+err1:
+ return NULL;
+}
+
+/* copies elements from original q to new q and then swaps the contents of the
+ * two q headers. This is so that if anyone is holding a pointer to q it will
+ * still work
+ */
+static int resize_finish(struct rxe_queue *q, struct rxe_queue *new_q,
+ unsigned int num_elem)
+{
+ if (!queue_empty(q) && (num_elem < queue_count(q)))
+ return -EINVAL;
+
+ while (!queue_empty(q)) {
+ memcpy(producer_addr(new_q), consumer_addr(q),
+ new_q->elem_size);
+ advance_producer(new_q);
+ advance_consumer(q);
+ }
+
+ swap(*q, *new_q);
+
+ return 0;
+}
+
+int rxe_queue_resize(struct rxe_queue *q,
+ unsigned int *num_elem_p,
+ unsigned int elem_size,
+ struct ib_ucontext *context,
+ struct ib_udata *udata,
+ spinlock_t *producer_lock,
+ spinlock_t *consumer_lock)
+{
+ struct rxe_queue *new_q;
+ unsigned int num_elem = *num_elem_p;
+ int err;
+ unsigned long flags = 0, flags1;
+
+ new_q = rxe_queue_init(q->rxe, &num_elem, elem_size);
+ if (!new_q)
+ return -ENOMEM;
+
+ err = do_mmap_info(new_q->rxe, udata, false, context, new_q->buf,
+ new_q->buf_size, &new_q->ip);
+ if (err) {
+ vfree(new_q->buf);
+ kfree(new_q);
+ goto err1;
+ }
+
+ spin_lock_irqsave(consumer_lock, flags1);
+
+ if (producer_lock) {
+ spin_lock_irqsave(producer_lock, flags);
+ err = resize_finish(q, new_q, num_elem);
+ spin_unlock_irqrestore(producer_lock, flags);
+ } else {
+ err = resize_finish(q, new_q, num_elem);
+ }
+
+ spin_unlock_irqrestore(consumer_lock, flags1);
+
+ rxe_queue_cleanup(new_q); /* new/old dep on err */
+ if (err)
+ goto err1;
+
+ *num_elem_p = num_elem;
+ return 0;
+
+err1:
+ return err;
+}
+
+void rxe_queue_cleanup(struct rxe_queue *q)
+{
+ if (q->ip)
+ kref_put(&q->ip->ref, rxe_mmap_release);
+ else
+ vfree(q->buf);
+
+ kfree(q);
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.h b/drivers/infiniband/sw/rxe/rxe_queue.h
new file mode 100644
index 000000000..239fd609c
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_queue.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef RXE_QUEUE_H
+#define RXE_QUEUE_H
+
+/* implements a simple circular buffer that can optionally be
+ * shared between user space and the kernel and can be resized
+
+ * the requested element size is rounded up to a power of 2
+ * and the number of elements in the buffer is also rounded
+ * up to a power of 2. Since the queue is empty when the
+ * producer and consumer indices match the maximum capacity
+ * of the queue is one less than the number of element slots
+ */
+
+/* this data structure is shared between user space and kernel
+ * space for those cases where the queue is shared. It contains
+ * the producer and consumer indices. Is also contains a copy
+ * of the queue size parameters for user space to use but the
+ * kernel must use the parameters in the rxe_queue struct
+ * this MUST MATCH the corresponding librxe struct
+ * for performance reasons arrange to have producer and consumer
+ * pointers in separate cache lines
+ * the kernel should always mask the indices to avoid accessing
+ * memory outside of the data area
+ */
+struct rxe_queue_buf {
+ __u32 log2_elem_size;
+ __u32 index_mask;
+ __u32 pad_1[30];
+ __u32 producer_index;
+ __u32 pad_2[31];
+ __u32 consumer_index;
+ __u32 pad_3[31];
+ __u8 data[0];
+};
+
+struct rxe_queue {
+ struct rxe_dev *rxe;
+ struct rxe_queue_buf *buf;
+ struct rxe_mmap_info *ip;
+ size_t buf_size;
+ size_t elem_size;
+ unsigned int log2_elem_size;
+ unsigned int index_mask;
+};
+
+int do_mmap_info(struct rxe_dev *rxe,
+ struct ib_udata *udata,
+ bool is_req,
+ struct ib_ucontext *context,
+ struct rxe_queue_buf *buf,
+ size_t buf_size,
+ struct rxe_mmap_info **ip_p);
+
+struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
+ int *num_elem,
+ unsigned int elem_size);
+
+int rxe_queue_resize(struct rxe_queue *q,
+ unsigned int *num_elem_p,
+ unsigned int elem_size,
+ struct ib_ucontext *context,
+ struct ib_udata *udata,
+ /* Protect producers while resizing queue */
+ spinlock_t *producer_lock,
+ /* Protect consumers while resizing queue */
+ spinlock_t *consumer_lock);
+
+void rxe_queue_cleanup(struct rxe_queue *queue);
+
+static inline int next_index(struct rxe_queue *q, int index)
+{
+ return (index + 1) & q->buf->index_mask;
+}
+
+static inline int queue_empty(struct rxe_queue *q)
+{
+ return ((q->buf->producer_index - q->buf->consumer_index)
+ & q->index_mask) == 0;
+}
+
+static inline int queue_full(struct rxe_queue *q)
+{
+ return ((q->buf->producer_index + 1 - q->buf->consumer_index)
+ & q->index_mask) == 0;
+}
+
+static inline void advance_producer(struct rxe_queue *q)
+{
+ q->buf->producer_index = (q->buf->producer_index + 1)
+ & q->index_mask;
+}
+
+static inline void advance_consumer(struct rxe_queue *q)
+{
+ q->buf->consumer_index = (q->buf->consumer_index + 1)
+ & q->index_mask;
+}
+
+static inline void *producer_addr(struct rxe_queue *q)
+{
+ return q->buf->data + ((q->buf->producer_index & q->index_mask)
+ << q->log2_elem_size);
+}
+
+static inline void *consumer_addr(struct rxe_queue *q)
+{
+ return q->buf->data + ((q->buf->consumer_index & q->index_mask)
+ << q->log2_elem_size);
+}
+
+static inline unsigned int producer_index(struct rxe_queue *q)
+{
+ return q->buf->producer_index;
+}
+
+static inline unsigned int consumer_index(struct rxe_queue *q)
+{
+ return q->buf->consumer_index;
+}
+
+static inline void *addr_from_index(struct rxe_queue *q, unsigned int index)
+{
+ return q->buf->data + ((index & q->index_mask)
+ << q->buf->log2_elem_size);
+}
+
+static inline unsigned int index_from_addr(const struct rxe_queue *q,
+ const void *addr)
+{
+ return (((u8 *)addr - q->buf->data) >> q->log2_elem_size)
+ & q->index_mask;
+}
+
+static inline unsigned int queue_count(const struct rxe_queue *q)
+{
+ return (q->buf->producer_index - q->buf->consumer_index)
+ & q->index_mask;
+}
+
+static inline void *queue_head(struct rxe_queue *q)
+{
+ return queue_empty(q) ? NULL : consumer_addr(q);
+}
+
+#endif /* RXE_QUEUE_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
new file mode 100644
index 000000000..144d2f129
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -0,0 +1,420 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/skbuff.h>
+
+#include "rxe.h"
+#include "rxe_loc.h"
+
+static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
+ struct rxe_qp *qp)
+{
+ if (unlikely(!qp->valid))
+ goto err1;
+
+ switch (qp_type(qp)) {
+ case IB_QPT_RC:
+ if (unlikely((pkt->opcode & IB_OPCODE_RC) != 0)) {
+ pr_warn_ratelimited("bad qp type\n");
+ goto err1;
+ }
+ break;
+ case IB_QPT_UC:
+ if (unlikely(!(pkt->opcode & IB_OPCODE_UC))) {
+ pr_warn_ratelimited("bad qp type\n");
+ goto err1;
+ }
+ break;
+ case IB_QPT_UD:
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+ if (unlikely(!(pkt->opcode & IB_OPCODE_UD))) {
+ pr_warn_ratelimited("bad qp type\n");
+ goto err1;
+ }
+ break;
+ default:
+ pr_warn_ratelimited("unsupported qp type\n");
+ goto err1;
+ }
+
+ if (pkt->mask & RXE_REQ_MASK) {
+ if (unlikely(qp->resp.state != QP_STATE_READY))
+ goto err1;
+ } else if (unlikely(qp->req.state < QP_STATE_READY ||
+ qp->req.state > QP_STATE_DRAINED)) {
+ goto err1;
+ }
+
+ return 0;
+
+err1:
+ return -EINVAL;
+}
+
+static void set_bad_pkey_cntr(struct rxe_port *port)
+{
+ spin_lock_bh(&port->port_lock);
+ port->attr.bad_pkey_cntr = min((u32)0xffff,
+ port->attr.bad_pkey_cntr + 1);
+ spin_unlock_bh(&port->port_lock);
+}
+
+static void set_qkey_viol_cntr(struct rxe_port *port)
+{
+ spin_lock_bh(&port->port_lock);
+ port->attr.qkey_viol_cntr = min((u32)0xffff,
+ port->attr.qkey_viol_cntr + 1);
+ spin_unlock_bh(&port->port_lock);
+}
+
+static int check_keys(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
+ u32 qpn, struct rxe_qp *qp)
+{
+ int i;
+ int found_pkey = 0;
+ struct rxe_port *port = &rxe->port;
+ u16 pkey = bth_pkey(pkt);
+
+ pkt->pkey_index = 0;
+
+ if (qpn == 1) {
+ for (i = 0; i < port->attr.pkey_tbl_len; i++) {
+ if (pkey_match(pkey, port->pkey_tbl[i])) {
+ pkt->pkey_index = i;
+ found_pkey = 1;
+ break;
+ }
+ }
+
+ if (!found_pkey) {
+ pr_warn_ratelimited("bad pkey = 0x%x\n", pkey);
+ set_bad_pkey_cntr(port);
+ goto err1;
+ }
+ } else if (qpn != 0) {
+ if (unlikely(!pkey_match(pkey,
+ port->pkey_tbl[qp->attr.pkey_index]
+ ))) {
+ pr_warn_ratelimited("bad pkey = 0x%0x\n", pkey);
+ set_bad_pkey_cntr(port);
+ goto err1;
+ }
+ pkt->pkey_index = qp->attr.pkey_index;
+ }
+
+ if ((qp_type(qp) == IB_QPT_UD || qp_type(qp) == IB_QPT_GSI) &&
+ qpn != 0 && pkt->mask) {
+ u32 qkey = (qpn == 1) ? GSI_QKEY : qp->attr.qkey;
+
+ if (unlikely(deth_qkey(pkt) != qkey)) {
+ pr_warn_ratelimited("bad qkey, got 0x%x expected 0x%x for qpn 0x%x\n",
+ deth_qkey(pkt), qkey, qpn);
+ set_qkey_viol_cntr(port);
+ goto err1;
+ }
+ }
+
+ return 0;
+
+err1:
+ return -EINVAL;
+}
+
+static int check_addr(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
+ struct rxe_qp *qp)
+{
+ struct sk_buff *skb = PKT_TO_SKB(pkt);
+
+ if (qp_type(qp) != IB_QPT_RC && qp_type(qp) != IB_QPT_UC)
+ goto done;
+
+ if (unlikely(pkt->port_num != qp->attr.port_num)) {
+ pr_warn_ratelimited("port %d != qp port %d\n",
+ pkt->port_num, qp->attr.port_num);
+ goto err1;
+ }
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct in_addr *saddr =
+ &qp->pri_av.sgid_addr._sockaddr_in.sin_addr;
+ struct in_addr *daddr =
+ &qp->pri_av.dgid_addr._sockaddr_in.sin_addr;
+
+ if (ip_hdr(skb)->daddr != saddr->s_addr) {
+ pr_warn_ratelimited("dst addr %pI4 != qp source addr %pI4\n",
+ &ip_hdr(skb)->daddr,
+ &saddr->s_addr);
+ goto err1;
+ }
+
+ if (ip_hdr(skb)->saddr != daddr->s_addr) {
+ pr_warn_ratelimited("source addr %pI4 != qp dst addr %pI4\n",
+ &ip_hdr(skb)->saddr,
+ &daddr->s_addr);
+ goto err1;
+ }
+
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ struct in6_addr *saddr =
+ &qp->pri_av.sgid_addr._sockaddr_in6.sin6_addr;
+ struct in6_addr *daddr =
+ &qp->pri_av.dgid_addr._sockaddr_in6.sin6_addr;
+
+ if (memcmp(&ipv6_hdr(skb)->daddr, saddr, sizeof(*saddr))) {
+ pr_warn_ratelimited("dst addr %pI6 != qp source addr %pI6\n",
+ &ipv6_hdr(skb)->daddr, saddr);
+ goto err1;
+ }
+
+ if (memcmp(&ipv6_hdr(skb)->saddr, daddr, sizeof(*daddr))) {
+ pr_warn_ratelimited("source addr %pI6 != qp dst addr %pI6\n",
+ &ipv6_hdr(skb)->saddr, daddr);
+ goto err1;
+ }
+ }
+
+done:
+ return 0;
+
+err1:
+ return -EINVAL;
+}
+
+static int hdr_check(struct rxe_pkt_info *pkt)
+{
+ struct rxe_dev *rxe = pkt->rxe;
+ struct rxe_port *port = &rxe->port;
+ struct rxe_qp *qp = NULL;
+ u32 qpn = bth_qpn(pkt);
+ int index;
+ int err;
+
+ if (unlikely(bth_tver(pkt) != BTH_TVER)) {
+ pr_warn_ratelimited("bad tver\n");
+ goto err1;
+ }
+
+ if (qpn != IB_MULTICAST_QPN) {
+ index = (qpn == 0) ? port->qp_smi_index :
+ ((qpn == 1) ? port->qp_gsi_index : qpn);
+ qp = rxe_pool_get_index(&rxe->qp_pool, index);
+ if (unlikely(!qp)) {
+ pr_warn_ratelimited("no qp matches qpn 0x%x\n", qpn);
+ goto err1;
+ }
+
+ err = check_type_state(rxe, pkt, qp);
+ if (unlikely(err))
+ goto err2;
+
+ err = check_addr(rxe, pkt, qp);
+ if (unlikely(err))
+ goto err2;
+
+ err = check_keys(rxe, pkt, qpn, qp);
+ if (unlikely(err))
+ goto err2;
+ } else {
+ if (unlikely((pkt->mask & RXE_GRH_MASK) == 0)) {
+ pr_warn_ratelimited("no grh for mcast qpn\n");
+ goto err1;
+ }
+ }
+
+ pkt->qp = qp;
+ return 0;
+
+err2:
+ if (qp)
+ rxe_drop_ref(qp);
+err1:
+ return -EINVAL;
+}
+
+static inline void rxe_rcv_pkt(struct rxe_dev *rxe,
+ struct rxe_pkt_info *pkt,
+ struct sk_buff *skb)
+{
+ if (pkt->mask & RXE_REQ_MASK)
+ rxe_resp_queue_pkt(rxe, pkt->qp, skb);
+ else
+ rxe_comp_queue_pkt(rxe, pkt->qp, skb);
+}
+
+static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
+{
+ struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
+ struct rxe_mc_grp *mcg;
+ struct sk_buff *skb_copy;
+ struct rxe_mc_elem *mce;
+ struct rxe_qp *qp;
+ union ib_gid dgid;
+ int err;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ ipv6_addr_set_v4mapped(ip_hdr(skb)->daddr,
+ (struct in6_addr *)&dgid);
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ memcpy(&dgid, &ipv6_hdr(skb)->daddr, sizeof(dgid));
+
+ /* lookup mcast group corresponding to mgid, takes a ref */
+ mcg = rxe_pool_get_key(&rxe->mc_grp_pool, &dgid);
+ if (!mcg)
+ goto err1; /* mcast group not registered */
+
+ spin_lock_bh(&mcg->mcg_lock);
+
+ list_for_each_entry(mce, &mcg->qp_list, qp_list) {
+ qp = mce->qp;
+ pkt = SKB_TO_PKT(skb);
+
+ /* validate qp for incoming packet */
+ err = check_type_state(rxe, pkt, qp);
+ if (err)
+ continue;
+
+ err = check_keys(rxe, pkt, bth_qpn(pkt), qp);
+ if (err)
+ continue;
+
+ /* if *not* the last qp in the list
+ * make a copy of the skb to post to the next qp
+ */
+ skb_copy = (mce->qp_list.next != &mcg->qp_list) ?
+ skb_clone(skb, GFP_ATOMIC) : NULL;
+
+ pkt->qp = qp;
+ rxe_add_ref(qp);
+ rxe_rcv_pkt(rxe, pkt, skb);
+
+ skb = skb_copy;
+ if (!skb)
+ break;
+ }
+
+ spin_unlock_bh(&mcg->mcg_lock);
+
+ rxe_drop_ref(mcg); /* drop ref from rxe_pool_get_key. */
+
+err1:
+ if (skb)
+ kfree_skb(skb);
+}
+
+static int rxe_match_dgid(struct rxe_dev *rxe, struct sk_buff *skb)
+{
+ union ib_gid dgid;
+ union ib_gid *pdgid;
+ u16 index;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ ipv6_addr_set_v4mapped(ip_hdr(skb)->daddr,
+ (struct in6_addr *)&dgid);
+ pdgid = &dgid;
+ } else {
+ pdgid = (union ib_gid *)&ipv6_hdr(skb)->daddr;
+ }
+
+ return ib_find_cached_gid_by_port(&rxe->ib_dev, pdgid,
+ IB_GID_TYPE_ROCE_UDP_ENCAP,
+ 1, rxe->ndev, &index);
+}
+
+/* rxe_rcv is called from the interface driver */
+int rxe_rcv(struct sk_buff *skb)
+{
+ int err;
+ struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
+ struct rxe_dev *rxe = pkt->rxe;
+ __be32 *icrcp;
+ u32 calc_icrc, pack_icrc;
+
+ pkt->offset = 0;
+
+ if (unlikely(skb->len < pkt->offset + RXE_BTH_BYTES))
+ goto drop;
+
+ if (unlikely(rxe_match_dgid(rxe, skb) < 0)) {
+ pr_warn_ratelimited("failed matching dgid\n");
+ goto drop;
+ }
+
+ pkt->opcode = bth_opcode(pkt);
+ pkt->psn = bth_psn(pkt);
+ pkt->qp = NULL;
+ pkt->mask |= rxe_opcode[pkt->opcode].mask;
+
+ if (unlikely(skb->len < header_size(pkt)))
+ goto drop;
+
+ err = hdr_check(pkt);
+ if (unlikely(err))
+ goto drop;
+
+ /* Verify ICRC */
+ icrcp = (__be32 *)(pkt->hdr + pkt->paylen - RXE_ICRC_SIZE);
+ pack_icrc = be32_to_cpu(*icrcp);
+
+ calc_icrc = rxe_icrc_hdr(pkt, skb);
+ calc_icrc = crc32_le(calc_icrc, (u8 *)payload_addr(pkt), payload_size(pkt));
+ calc_icrc = cpu_to_be32(~calc_icrc);
+ if (unlikely(calc_icrc != pack_icrc)) {
+ char saddr[sizeof(struct in6_addr)];
+
+ if (skb->protocol == htons(ETH_P_IPV6))
+ sprintf(saddr, "%pI6", &ipv6_hdr(skb)->saddr);
+ else if (skb->protocol == htons(ETH_P_IP))
+ sprintf(saddr, "%pI4", &ip_hdr(skb)->saddr);
+ else
+ sprintf(saddr, "unknown");
+
+ pr_warn_ratelimited("bad ICRC from %s\n", saddr);
+ goto drop;
+ }
+
+ if (unlikely(bth_qpn(pkt) == IB_MULTICAST_QPN))
+ rxe_rcv_mcast_pkt(rxe, skb);
+ else
+ rxe_rcv_pkt(rxe, pkt, skb);
+
+ return 0;
+
+drop:
+ if (pkt->qp)
+ rxe_drop_ref(pkt->qp);
+
+ kfree_skb(skb);
+ return 0;
+}
+EXPORT_SYMBOL(rxe_rcv);
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
new file mode 100644
index 000000000..13a848a51
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -0,0 +1,757 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/skbuff.h>
+
+#include "rxe.h"
+#include "rxe_loc.h"
+#include "rxe_queue.h"
+
+static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
+ unsigned opcode);
+
+static inline void retry_first_write_send(struct rxe_qp *qp,
+ struct rxe_send_wqe *wqe,
+ unsigned mask, int npsn)
+{
+ int i;
+
+ for (i = 0; i < npsn; i++) {
+ int to_send = (wqe->dma.resid > qp->mtu) ?
+ qp->mtu : wqe->dma.resid;
+
+ qp->req.opcode = next_opcode(qp, wqe,
+ wqe->wr.opcode);
+
+ if (wqe->wr.send_flags & IB_SEND_INLINE) {
+ wqe->dma.resid -= to_send;
+ wqe->dma.sge_offset += to_send;
+ } else {
+ advance_dma_data(&wqe->dma, to_send);
+ }
+ if (mask & WR_WRITE_MASK)
+ wqe->iova += qp->mtu;
+ }
+}
+
+static void req_retry(struct rxe_qp *qp)
+{
+ struct rxe_send_wqe *wqe;
+ unsigned int wqe_index;
+ unsigned int mask;
+ int npsn;
+ int first = 1;
+
+ wqe = queue_head(qp->sq.queue);
+ npsn = (qp->comp.psn - wqe->first_psn) & BTH_PSN_MASK;
+
+ qp->req.wqe_index = consumer_index(qp->sq.queue);
+ qp->req.psn = qp->comp.psn;
+ qp->req.opcode = -1;
+
+ for (wqe_index = consumer_index(qp->sq.queue);
+ wqe_index != producer_index(qp->sq.queue);
+ wqe_index = next_index(qp->sq.queue, wqe_index)) {
+ wqe = addr_from_index(qp->sq.queue, wqe_index);
+ mask = wr_opcode_mask(wqe->wr.opcode, qp);
+
+ if (wqe->state == wqe_state_posted)
+ break;
+
+ if (wqe->state == wqe_state_done)
+ continue;
+
+ wqe->iova = (mask & WR_ATOMIC_MASK) ?
+ wqe->wr.wr.atomic.remote_addr :
+ (mask & WR_READ_OR_WRITE_MASK) ?
+ wqe->wr.wr.rdma.remote_addr :
+ 0;
+
+ if (!first || (mask & WR_READ_MASK) == 0) {
+ wqe->dma.resid = wqe->dma.length;
+ wqe->dma.cur_sge = 0;
+ wqe->dma.sge_offset = 0;
+ }
+
+ if (first) {
+ first = 0;
+
+ if (mask & WR_WRITE_OR_SEND_MASK)
+ retry_first_write_send(qp, wqe, mask, npsn);
+
+ if (mask & WR_READ_MASK)
+ wqe->iova += npsn * qp->mtu;
+ }
+
+ wqe->state = wqe_state_posted;
+ }
+}
+
+void rnr_nak_timer(unsigned long data)
+{
+ struct rxe_qp *qp = (struct rxe_qp *)data;
+
+ pr_debug("rnr nak timer fired\n");
+ rxe_run_task(&qp->req.task, 1);
+}
+
+static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
+{
+ struct rxe_send_wqe *wqe = queue_head(qp->sq.queue);
+ unsigned long flags;
+
+ if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
+ /* check to see if we are drained;
+ * state_lock used by requester and completer
+ */
+ spin_lock_irqsave(&qp->state_lock, flags);
+ do {
+ if (qp->req.state != QP_STATE_DRAIN) {
+ /* comp just finished */
+ spin_unlock_irqrestore(&qp->state_lock,
+ flags);
+ break;
+ }
+
+ if (wqe && ((qp->req.wqe_index !=
+ consumer_index(qp->sq.queue)) ||
+ (wqe->state != wqe_state_posted))) {
+ /* comp not done yet */
+ spin_unlock_irqrestore(&qp->state_lock,
+ flags);
+ break;
+ }
+
+ qp->req.state = QP_STATE_DRAINED;
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+
+ if (qp->ibqp.event_handler) {
+ struct ib_event ev;
+
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_SQ_DRAINED;
+ qp->ibqp.event_handler(&ev,
+ qp->ibqp.qp_context);
+ }
+ } while (0);
+ }
+
+ if (qp->req.wqe_index == producer_index(qp->sq.queue))
+ return NULL;
+
+ wqe = addr_from_index(qp->sq.queue, qp->req.wqe_index);
+
+ if (unlikely((qp->req.state == QP_STATE_DRAIN ||
+ qp->req.state == QP_STATE_DRAINED) &&
+ (wqe->state != wqe_state_processing)))
+ return NULL;
+
+ if (unlikely((wqe->wr.send_flags & IB_SEND_FENCE) &&
+ (qp->req.wqe_index != consumer_index(qp->sq.queue)))) {
+ qp->req.wait_fence = 1;
+ return NULL;
+ }
+
+ wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp);
+ return wqe;
+}
+
+static int next_opcode_rc(struct rxe_qp *qp, unsigned opcode, int fits)
+{
+ switch (opcode) {
+ case IB_WR_RDMA_WRITE:
+ if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
+ qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
+ return fits ?
+ IB_OPCODE_RC_RDMA_WRITE_LAST :
+ IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
+ else
+ return fits ?
+ IB_OPCODE_RC_RDMA_WRITE_ONLY :
+ IB_OPCODE_RC_RDMA_WRITE_FIRST;
+
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
+ qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
+ return fits ?
+ IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
+ IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
+ else
+ return fits ?
+ IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
+ IB_OPCODE_RC_RDMA_WRITE_FIRST;
+
+ case IB_WR_SEND:
+ if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
+ qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
+ return fits ?
+ IB_OPCODE_RC_SEND_LAST :
+ IB_OPCODE_RC_SEND_MIDDLE;
+ else
+ return fits ?
+ IB_OPCODE_RC_SEND_ONLY :
+ IB_OPCODE_RC_SEND_FIRST;
+
+ case IB_WR_SEND_WITH_IMM:
+ if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
+ qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
+ return fits ?
+ IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE :
+ IB_OPCODE_RC_SEND_MIDDLE;
+ else
+ return fits ?
+ IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE :
+ IB_OPCODE_RC_SEND_FIRST;
+
+ case IB_WR_RDMA_READ:
+ return IB_OPCODE_RC_RDMA_READ_REQUEST;
+
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ return IB_OPCODE_RC_COMPARE_SWAP;
+
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ return IB_OPCODE_RC_FETCH_ADD;
+
+ case IB_WR_SEND_WITH_INV:
+ if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
+ qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
+ return fits ? IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE :
+ IB_OPCODE_RC_SEND_MIDDLE;
+ else
+ return fits ? IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE :
+ IB_OPCODE_RC_SEND_FIRST;
+ case IB_WR_REG_MR:
+ case IB_WR_LOCAL_INV:
+ return opcode;
+ }
+
+ return -EINVAL;
+}
+
+static int next_opcode_uc(struct rxe_qp *qp, unsigned opcode, int fits)
+{
+ switch (opcode) {
+ case IB_WR_RDMA_WRITE:
+ if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
+ qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
+ return fits ?
+ IB_OPCODE_UC_RDMA_WRITE_LAST :
+ IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
+ else
+ return fits ?
+ IB_OPCODE_UC_RDMA_WRITE_ONLY :
+ IB_OPCODE_UC_RDMA_WRITE_FIRST;
+
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
+ qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
+ return fits ?
+ IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
+ IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
+ else
+ return fits ?
+ IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
+ IB_OPCODE_UC_RDMA_WRITE_FIRST;
+
+ case IB_WR_SEND:
+ if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
+ qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
+ return fits ?
+ IB_OPCODE_UC_SEND_LAST :
+ IB_OPCODE_UC_SEND_MIDDLE;
+ else
+ return fits ?
+ IB_OPCODE_UC_SEND_ONLY :
+ IB_OPCODE_UC_SEND_FIRST;
+
+ case IB_WR_SEND_WITH_IMM:
+ if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
+ qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
+ return fits ?
+ IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE :
+ IB_OPCODE_UC_SEND_MIDDLE;
+ else
+ return fits ?
+ IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE :
+ IB_OPCODE_UC_SEND_FIRST;
+ }
+
+ return -EINVAL;
+}
+
+static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
+ unsigned opcode)
+{
+ int fits = (wqe->dma.resid <= qp->mtu);
+
+ switch (qp_type(qp)) {
+ case IB_QPT_RC:
+ return next_opcode_rc(qp, opcode, fits);
+
+ case IB_QPT_UC:
+ return next_opcode_uc(qp, opcode, fits);
+
+ case IB_QPT_SMI:
+ case IB_QPT_UD:
+ case IB_QPT_GSI:
+ switch (opcode) {
+ case IB_WR_SEND:
+ return IB_OPCODE_UD_SEND_ONLY;
+
+ case IB_WR_SEND_WITH_IMM:
+ return IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static inline int check_init_depth(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
+{
+ int depth;
+
+ if (wqe->has_rd_atomic)
+ return 0;
+
+ qp->req.need_rd_atomic = 1;
+ depth = atomic_dec_return(&qp->req.rd_atomic);
+
+ if (depth >= 0) {
+ qp->req.need_rd_atomic = 0;
+ wqe->has_rd_atomic = 1;
+ return 0;
+ }
+
+ atomic_inc(&qp->req.rd_atomic);
+ return -EAGAIN;
+}
+
+static inline int get_mtu(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
+{
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ struct rxe_port *port;
+ struct rxe_av *av;
+
+ if ((qp_type(qp) == IB_QPT_RC) || (qp_type(qp) == IB_QPT_UC))
+ return qp->mtu;
+
+ av = &wqe->av;
+ port = &rxe->port;
+
+ return port->mtu_cap;
+}
+
+static struct sk_buff *init_req_packet(struct rxe_qp *qp,
+ struct rxe_send_wqe *wqe,
+ int opcode, int payload,
+ struct rxe_pkt_info *pkt)
+{
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ struct rxe_port *port = &rxe->port;
+ struct sk_buff *skb;
+ struct rxe_send_wr *ibwr = &wqe->wr;
+ struct rxe_av *av;
+ int pad = (-payload) & 0x3;
+ int paylen;
+ int solicited;
+ u16 pkey;
+ u32 qp_num;
+ int ack_req;
+
+ /* length from start of bth to end of icrc */
+ paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
+
+ /* pkt->hdr, rxe, port_num and mask are initialized in ifc
+ * layer
+ */
+ pkt->opcode = opcode;
+ pkt->qp = qp;
+ pkt->psn = qp->req.psn;
+ pkt->mask = rxe_opcode[opcode].mask;
+ pkt->paylen = paylen;
+ pkt->offset = 0;
+ pkt->wqe = wqe;
+
+ /* init skb */
+ av = rxe_get_av(pkt);
+ skb = rxe->ifc_ops->init_packet(rxe, av, paylen, pkt);
+ if (unlikely(!skb))
+ return NULL;
+
+ /* init bth */
+ solicited = (ibwr->send_flags & IB_SEND_SOLICITED) &&
+ (pkt->mask & RXE_END_MASK) &&
+ ((pkt->mask & (RXE_SEND_MASK)) ||
+ (pkt->mask & (RXE_WRITE_MASK | RXE_IMMDT_MASK)) ==
+ (RXE_WRITE_MASK | RXE_IMMDT_MASK));
+
+ pkey = (qp_type(qp) == IB_QPT_GSI) ?
+ port->pkey_tbl[ibwr->wr.ud.pkey_index] :
+ port->pkey_tbl[qp->attr.pkey_index];
+
+ qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn :
+ qp->attr.dest_qp_num;
+
+ ack_req = ((pkt->mask & RXE_END_MASK) ||
+ (qp->req.noack_pkts++ > RXE_MAX_PKT_PER_ACK));
+ if (ack_req)
+ qp->req.noack_pkts = 0;
+
+ bth_init(pkt, pkt->opcode, solicited, 0, pad, pkey, qp_num,
+ ack_req, pkt->psn);
+
+ /* init optional headers */
+ if (pkt->mask & RXE_RETH_MASK) {
+ reth_set_rkey(pkt, ibwr->wr.rdma.rkey);
+ reth_set_va(pkt, wqe->iova);
+ reth_set_len(pkt, wqe->dma.length);
+ }
+
+ if (pkt->mask & RXE_IMMDT_MASK)
+ immdt_set_imm(pkt, ibwr->ex.imm_data);
+
+ if (pkt->mask & RXE_IETH_MASK)
+ ieth_set_rkey(pkt, ibwr->ex.invalidate_rkey);
+
+ if (pkt->mask & RXE_ATMETH_MASK) {
+ atmeth_set_va(pkt, wqe->iova);
+ if (opcode == IB_OPCODE_RC_COMPARE_SWAP ||
+ opcode == IB_OPCODE_RD_COMPARE_SWAP) {
+ atmeth_set_swap_add(pkt, ibwr->wr.atomic.swap);
+ atmeth_set_comp(pkt, ibwr->wr.atomic.compare_add);
+ } else {
+ atmeth_set_swap_add(pkt, ibwr->wr.atomic.compare_add);
+ }
+ atmeth_set_rkey(pkt, ibwr->wr.atomic.rkey);
+ }
+
+ if (pkt->mask & RXE_DETH_MASK) {
+ if (qp->ibqp.qp_num == 1)
+ deth_set_qkey(pkt, GSI_QKEY);
+ else
+ deth_set_qkey(pkt, ibwr->wr.ud.remote_qkey);
+ deth_set_sqp(pkt, qp->ibqp.qp_num);
+ }
+
+ return skb;
+}
+
+static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
+ struct rxe_pkt_info *pkt, struct sk_buff *skb,
+ int paylen)
+{
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ u32 crc = 0;
+ u32 *p;
+ int err;
+
+ err = rxe->ifc_ops->prepare(rxe, pkt, skb, &crc);
+ if (err)
+ return err;
+
+ if (pkt->mask & RXE_WRITE_OR_SEND) {
+ if (wqe->wr.send_flags & IB_SEND_INLINE) {
+ u8 *tmp = &wqe->dma.inline_data[wqe->dma.sge_offset];
+
+ crc = crc32_le(crc, tmp, paylen);
+
+ memcpy(payload_addr(pkt), tmp, paylen);
+
+ wqe->dma.resid -= paylen;
+ wqe->dma.sge_offset += paylen;
+ } else {
+ err = copy_data(rxe, qp->pd, 0, &wqe->dma,
+ payload_addr(pkt), paylen,
+ from_mem_obj,
+ &crc);
+ if (err)
+ return err;
+ }
+ }
+ p = payload_addr(pkt) + paylen + bth_pad(pkt);
+
+ *p = ~crc;
+
+ return 0;
+}
+
+static void update_wqe_state(struct rxe_qp *qp,
+ struct rxe_send_wqe *wqe,
+ struct rxe_pkt_info *pkt)
+{
+ if (pkt->mask & RXE_END_MASK) {
+ if (qp_type(qp) == IB_QPT_RC)
+ wqe->state = wqe_state_pending;
+ } else {
+ wqe->state = wqe_state_processing;
+ }
+}
+
+static void update_wqe_psn(struct rxe_qp *qp,
+ struct rxe_send_wqe *wqe,
+ struct rxe_pkt_info *pkt,
+ int payload)
+{
+ /* number of packets left to send including current one */
+ int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu;
+
+ /* handle zero length packet case */
+ if (num_pkt == 0)
+ num_pkt = 1;
+
+ if (pkt->mask & RXE_START_MASK) {
+ wqe->first_psn = qp->req.psn;
+ wqe->last_psn = (qp->req.psn + num_pkt - 1) & BTH_PSN_MASK;
+ }
+
+ if (pkt->mask & RXE_READ_MASK)
+ qp->req.psn = (wqe->first_psn + num_pkt) & BTH_PSN_MASK;
+ else
+ qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
+}
+
+static void save_state(struct rxe_send_wqe *wqe,
+ struct rxe_qp *qp,
+ struct rxe_send_wqe *rollback_wqe,
+ struct rxe_qp *rollback_qp)
+{
+ rollback_wqe->state = wqe->state;
+ rollback_wqe->first_psn = wqe->first_psn;
+ rollback_wqe->last_psn = wqe->last_psn;
+ rollback_qp->req.psn = qp->req.psn;
+}
+
+static void rollback_state(struct rxe_send_wqe *wqe,
+ struct rxe_qp *qp,
+ struct rxe_send_wqe *rollback_wqe,
+ struct rxe_qp *rollback_qp)
+{
+ wqe->state = rollback_wqe->state;
+ wqe->first_psn = rollback_wqe->first_psn;
+ wqe->last_psn = rollback_wqe->last_psn;
+ qp->req.psn = rollback_qp->req.psn;
+}
+
+static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
+ struct rxe_pkt_info *pkt, int payload)
+{
+ qp->req.opcode = pkt->opcode;
+
+ if (pkt->mask & RXE_END_MASK)
+ qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index);
+
+ qp->need_req_skb = 0;
+
+ if (qp->qp_timeout_jiffies && !timer_pending(&qp->retrans_timer))
+ mod_timer(&qp->retrans_timer,
+ jiffies + qp->qp_timeout_jiffies);
+}
+
+int rxe_requester(void *arg)
+{
+ struct rxe_qp *qp = (struct rxe_qp *)arg;
+ struct rxe_pkt_info pkt;
+ struct sk_buff *skb;
+ struct rxe_send_wqe *wqe;
+ unsigned mask;
+ int payload;
+ int mtu;
+ int opcode;
+ int ret;
+ struct rxe_qp rollback_qp;
+ struct rxe_send_wqe rollback_wqe;
+
+next_wqe:
+ if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
+ goto exit;
+
+ if (unlikely(qp->req.state == QP_STATE_RESET)) {
+ qp->req.wqe_index = consumer_index(qp->sq.queue);
+ qp->req.opcode = -1;
+ qp->req.need_rd_atomic = 0;
+ qp->req.wait_psn = 0;
+ qp->req.need_retry = 0;
+ goto exit;
+ }
+
+ if (unlikely(qp->req.need_retry)) {
+ req_retry(qp);
+ qp->req.need_retry = 0;
+ }
+
+ wqe = req_next_wqe(qp);
+ if (unlikely(!wqe))
+ goto exit;
+
+ if (wqe->mask & WR_REG_MASK) {
+ if (wqe->wr.opcode == IB_WR_LOCAL_INV) {
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ struct rxe_mem *rmr;
+
+ rmr = rxe_pool_get_index(&rxe->mr_pool,
+ wqe->wr.ex.invalidate_rkey >> 8);
+ if (!rmr) {
+ pr_err("No mr for key %#x\n", wqe->wr.ex.invalidate_rkey);
+ wqe->state = wqe_state_error;
+ wqe->status = IB_WC_MW_BIND_ERR;
+ goto exit;
+ }
+ rmr->state = RXE_MEM_STATE_FREE;
+ wqe->state = wqe_state_done;
+ wqe->status = IB_WC_SUCCESS;
+ } else if (wqe->wr.opcode == IB_WR_REG_MR) {
+ struct rxe_mem *rmr = to_rmr(wqe->wr.wr.reg.mr);
+
+ rmr->state = RXE_MEM_STATE_VALID;
+ rmr->access = wqe->wr.wr.reg.access;
+ rmr->lkey = wqe->wr.wr.reg.key;
+ rmr->rkey = wqe->wr.wr.reg.key;
+ wqe->state = wqe_state_done;
+ wqe->status = IB_WC_SUCCESS;
+ } else {
+ goto exit;
+ }
+ qp->req.wqe_index = next_index(qp->sq.queue,
+ qp->req.wqe_index);
+ goto next_wqe;
+ }
+
+ if (unlikely(qp_type(qp) == IB_QPT_RC &&
+ qp->req.psn > (qp->comp.psn + RXE_MAX_UNACKED_PSNS))) {
+ qp->req.wait_psn = 1;
+ goto exit;
+ }
+
+ /* Limit the number of inflight SKBs per QP */
+ if (unlikely(atomic_read(&qp->skb_out) >
+ RXE_INFLIGHT_SKBS_PER_QP_HIGH)) {
+ qp->need_req_skb = 1;
+ goto exit;
+ }
+
+ opcode = next_opcode(qp, wqe, wqe->wr.opcode);
+ if (unlikely(opcode < 0)) {
+ wqe->status = IB_WC_LOC_QP_OP_ERR;
+ goto exit;
+ }
+
+ mask = rxe_opcode[opcode].mask;
+ if (unlikely(mask & RXE_READ_OR_ATOMIC)) {
+ if (check_init_depth(qp, wqe))
+ goto exit;
+ }
+
+ mtu = get_mtu(qp, wqe);
+ payload = (mask & RXE_WRITE_OR_SEND) ? wqe->dma.resid : 0;
+ if (payload > mtu) {
+ if (qp_type(qp) == IB_QPT_UD) {
+ /* C10-93.1.1: If the total sum of all the buffer lengths specified for a
+ * UD message exceeds the MTU of the port as returned by QueryHCA, the CI
+ * shall not emit any packets for this message. Further, the CI shall not
+ * generate an error due to this condition.
+ */
+
+ /* fake a successful UD send */
+ wqe->first_psn = qp->req.psn;
+ wqe->last_psn = qp->req.psn;
+ qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
+ qp->req.opcode = IB_OPCODE_UD_SEND_ONLY;
+ qp->req.wqe_index = next_index(qp->sq.queue,
+ qp->req.wqe_index);
+ wqe->state = wqe_state_done;
+ wqe->status = IB_WC_SUCCESS;
+ goto complete;
+ }
+ payload = mtu;
+ }
+
+ skb = init_req_packet(qp, wqe, opcode, payload, &pkt);
+ if (unlikely(!skb)) {
+ pr_err("Failed allocating skb\n");
+ goto err;
+ }
+
+ if (fill_packet(qp, wqe, &pkt, skb, payload)) {
+ pr_debug("Error during fill packet\n");
+ goto err;
+ }
+
+ /*
+ * To prevent a race on wqe access between requester and completer,
+ * wqe members state and psn need to be set before calling
+ * rxe_xmit_packet().
+ * Otherwise, completer might initiate an unjustified retry flow.
+ */
+ save_state(wqe, qp, &rollback_wqe, &rollback_qp);
+ update_wqe_state(qp, wqe, &pkt);
+ update_wqe_psn(qp, wqe, &pkt, payload);
+ ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb);
+ if (ret) {
+ qp->need_req_skb = 1;
+ kfree_skb(skb);
+
+ rollback_state(wqe, qp, &rollback_wqe, &rollback_qp);
+
+ if (ret == -EAGAIN) {
+ rxe_run_task(&qp->req.task, 1);
+ goto exit;
+ }
+
+ goto err;
+ }
+
+ update_state(qp, wqe, &pkt, payload);
+
+ goto next_wqe;
+
+err:
+ kfree_skb(skb);
+ wqe->status = IB_WC_LOC_PROT_ERR;
+ wqe->state = wqe_state_error;
+
+complete:
+ if (qp_type(qp) != IB_QPT_RC) {
+ while (rxe_completer(qp) == 0)
+ ;
+ }
+
+ return 0;
+
+exit:
+ return -EAGAIN;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
new file mode 100644
index 000000000..3e0f0f2ba
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -0,0 +1,1381 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/skbuff.h>
+
+#include "rxe.h"
+#include "rxe_loc.h"
+#include "rxe_queue.h"
+
+enum resp_states {
+ RESPST_NONE,
+ RESPST_GET_REQ,
+ RESPST_CHK_PSN,
+ RESPST_CHK_OP_SEQ,
+ RESPST_CHK_OP_VALID,
+ RESPST_CHK_RESOURCE,
+ RESPST_CHK_LENGTH,
+ RESPST_CHK_RKEY,
+ RESPST_EXECUTE,
+ RESPST_READ_REPLY,
+ RESPST_COMPLETE,
+ RESPST_ACKNOWLEDGE,
+ RESPST_CLEANUP,
+ RESPST_DUPLICATE_REQUEST,
+ RESPST_ERR_MALFORMED_WQE,
+ RESPST_ERR_UNSUPPORTED_OPCODE,
+ RESPST_ERR_MISALIGNED_ATOMIC,
+ RESPST_ERR_PSN_OUT_OF_SEQ,
+ RESPST_ERR_MISSING_OPCODE_FIRST,
+ RESPST_ERR_MISSING_OPCODE_LAST_C,
+ RESPST_ERR_MISSING_OPCODE_LAST_D1E,
+ RESPST_ERR_TOO_MANY_RDMA_ATM_REQ,
+ RESPST_ERR_RNR,
+ RESPST_ERR_RKEY_VIOLATION,
+ RESPST_ERR_LENGTH,
+ RESPST_ERR_CQ_OVERFLOW,
+ RESPST_ERROR,
+ RESPST_RESET,
+ RESPST_DONE,
+ RESPST_EXIT,
+};
+
+static char *resp_state_name[] = {
+ [RESPST_NONE] = "NONE",
+ [RESPST_GET_REQ] = "GET_REQ",
+ [RESPST_CHK_PSN] = "CHK_PSN",
+ [RESPST_CHK_OP_SEQ] = "CHK_OP_SEQ",
+ [RESPST_CHK_OP_VALID] = "CHK_OP_VALID",
+ [RESPST_CHK_RESOURCE] = "CHK_RESOURCE",
+ [RESPST_CHK_LENGTH] = "CHK_LENGTH",
+ [RESPST_CHK_RKEY] = "CHK_RKEY",
+ [RESPST_EXECUTE] = "EXECUTE",
+ [RESPST_READ_REPLY] = "READ_REPLY",
+ [RESPST_COMPLETE] = "COMPLETE",
+ [RESPST_ACKNOWLEDGE] = "ACKNOWLEDGE",
+ [RESPST_CLEANUP] = "CLEANUP",
+ [RESPST_DUPLICATE_REQUEST] = "DUPLICATE_REQUEST",
+ [RESPST_ERR_MALFORMED_WQE] = "ERR_MALFORMED_WQE",
+ [RESPST_ERR_UNSUPPORTED_OPCODE] = "ERR_UNSUPPORTED_OPCODE",
+ [RESPST_ERR_MISALIGNED_ATOMIC] = "ERR_MISALIGNED_ATOMIC",
+ [RESPST_ERR_PSN_OUT_OF_SEQ] = "ERR_PSN_OUT_OF_SEQ",
+ [RESPST_ERR_MISSING_OPCODE_FIRST] = "ERR_MISSING_OPCODE_FIRST",
+ [RESPST_ERR_MISSING_OPCODE_LAST_C] = "ERR_MISSING_OPCODE_LAST_C",
+ [RESPST_ERR_MISSING_OPCODE_LAST_D1E] = "ERR_MISSING_OPCODE_LAST_D1E",
+ [RESPST_ERR_TOO_MANY_RDMA_ATM_REQ] = "ERR_TOO_MANY_RDMA_ATM_REQ",
+ [RESPST_ERR_RNR] = "ERR_RNR",
+ [RESPST_ERR_RKEY_VIOLATION] = "ERR_RKEY_VIOLATION",
+ [RESPST_ERR_LENGTH] = "ERR_LENGTH",
+ [RESPST_ERR_CQ_OVERFLOW] = "ERR_CQ_OVERFLOW",
+ [RESPST_ERROR] = "ERROR",
+ [RESPST_RESET] = "RESET",
+ [RESPST_DONE] = "DONE",
+ [RESPST_EXIT] = "EXIT",
+};
+
+/* rxe_recv calls here to add a request packet to the input queue */
+void rxe_resp_queue_pkt(struct rxe_dev *rxe, struct rxe_qp *qp,
+ struct sk_buff *skb)
+{
+ int must_sched;
+ struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
+
+ skb_queue_tail(&qp->req_pkts, skb);
+
+ must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) ||
+ (skb_queue_len(&qp->req_pkts) > 1);
+
+ rxe_run_task(&qp->resp.task, must_sched);
+}
+
+static inline enum resp_states get_req(struct rxe_qp *qp,
+ struct rxe_pkt_info **pkt_p)
+{
+ struct sk_buff *skb;
+
+ if (qp->resp.state == QP_STATE_ERROR) {
+ skb = skb_dequeue(&qp->req_pkts);
+ if (skb) {
+ /* drain request packet queue */
+ rxe_drop_ref(qp);
+ kfree_skb(skb);
+ return RESPST_GET_REQ;
+ }
+
+ /* go drain recv wr queue */
+ return RESPST_CHK_RESOURCE;
+ }
+
+ skb = skb_peek(&qp->req_pkts);
+ if (!skb)
+ return RESPST_EXIT;
+
+ *pkt_p = SKB_TO_PKT(skb);
+
+ return (qp->resp.res) ? RESPST_READ_REPLY : RESPST_CHK_PSN;
+}
+
+static enum resp_states check_psn(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ int diff = psn_compare(pkt->psn, qp->resp.psn);
+
+ switch (qp_type(qp)) {
+ case IB_QPT_RC:
+ if (diff > 0) {
+ if (qp->resp.sent_psn_nak)
+ return RESPST_CLEANUP;
+
+ qp->resp.sent_psn_nak = 1;
+ return RESPST_ERR_PSN_OUT_OF_SEQ;
+
+ } else if (diff < 0) {
+ return RESPST_DUPLICATE_REQUEST;
+ }
+
+ if (qp->resp.sent_psn_nak)
+ qp->resp.sent_psn_nak = 0;
+
+ break;
+
+ case IB_QPT_UC:
+ if (qp->resp.drop_msg || diff != 0) {
+ if (pkt->mask & RXE_START_MASK) {
+ qp->resp.drop_msg = 0;
+ return RESPST_CHK_OP_SEQ;
+ }
+
+ qp->resp.drop_msg = 1;
+ return RESPST_CLEANUP;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return RESPST_CHK_OP_SEQ;
+}
+
+static enum resp_states check_op_seq(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ switch (qp_type(qp)) {
+ case IB_QPT_RC:
+ switch (qp->resp.opcode) {
+ case IB_OPCODE_RC_SEND_FIRST:
+ case IB_OPCODE_RC_SEND_MIDDLE:
+ switch (pkt->opcode) {
+ case IB_OPCODE_RC_SEND_MIDDLE:
+ case IB_OPCODE_RC_SEND_LAST:
+ case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
+ case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
+ return RESPST_CHK_OP_VALID;
+ default:
+ return RESPST_ERR_MISSING_OPCODE_LAST_C;
+ }
+
+ case IB_OPCODE_RC_RDMA_WRITE_FIRST:
+ case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
+ switch (pkt->opcode) {
+ case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
+ case IB_OPCODE_RC_RDMA_WRITE_LAST:
+ case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
+ return RESPST_CHK_OP_VALID;
+ default:
+ return RESPST_ERR_MISSING_OPCODE_LAST_C;
+ }
+
+ default:
+ switch (pkt->opcode) {
+ case IB_OPCODE_RC_SEND_MIDDLE:
+ case IB_OPCODE_RC_SEND_LAST:
+ case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
+ case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
+ case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
+ case IB_OPCODE_RC_RDMA_WRITE_LAST:
+ case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
+ return RESPST_ERR_MISSING_OPCODE_FIRST;
+ default:
+ return RESPST_CHK_OP_VALID;
+ }
+ }
+ break;
+
+ case IB_QPT_UC:
+ switch (qp->resp.opcode) {
+ case IB_OPCODE_UC_SEND_FIRST:
+ case IB_OPCODE_UC_SEND_MIDDLE:
+ switch (pkt->opcode) {
+ case IB_OPCODE_UC_SEND_MIDDLE:
+ case IB_OPCODE_UC_SEND_LAST:
+ case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
+ return RESPST_CHK_OP_VALID;
+ default:
+ return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
+ }
+
+ case IB_OPCODE_UC_RDMA_WRITE_FIRST:
+ case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
+ switch (pkt->opcode) {
+ case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
+ case IB_OPCODE_UC_RDMA_WRITE_LAST:
+ case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
+ return RESPST_CHK_OP_VALID;
+ default:
+ return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
+ }
+
+ default:
+ switch (pkt->opcode) {
+ case IB_OPCODE_UC_SEND_MIDDLE:
+ case IB_OPCODE_UC_SEND_LAST:
+ case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
+ case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
+ case IB_OPCODE_UC_RDMA_WRITE_LAST:
+ case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
+ qp->resp.drop_msg = 1;
+ return RESPST_CLEANUP;
+ default:
+ return RESPST_CHK_OP_VALID;
+ }
+ }
+ break;
+
+ default:
+ return RESPST_CHK_OP_VALID;
+ }
+}
+
+static enum resp_states check_op_valid(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ switch (qp_type(qp)) {
+ case IB_QPT_RC:
+ if (((pkt->mask & RXE_READ_MASK) &&
+ !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_READ)) ||
+ ((pkt->mask & RXE_WRITE_MASK) &&
+ !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) ||
+ ((pkt->mask & RXE_ATOMIC_MASK) &&
+ !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) {
+ return RESPST_ERR_UNSUPPORTED_OPCODE;
+ }
+
+ break;
+
+ case IB_QPT_UC:
+ if ((pkt->mask & RXE_WRITE_MASK) &&
+ !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) {
+ qp->resp.drop_msg = 1;
+ return RESPST_CLEANUP;
+ }
+
+ break;
+
+ case IB_QPT_UD:
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+ break;
+
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ return RESPST_CHK_RESOURCE;
+}
+
+static enum resp_states get_srq_wqe(struct rxe_qp *qp)
+{
+ struct rxe_srq *srq = qp->srq;
+ struct rxe_queue *q = srq->rq.queue;
+ struct rxe_recv_wqe *wqe;
+ struct ib_event ev;
+
+ if (srq->error)
+ return RESPST_ERR_RNR;
+
+ spin_lock_bh(&srq->rq.consumer_lock);
+
+ wqe = queue_head(q);
+ if (!wqe) {
+ spin_unlock_bh(&srq->rq.consumer_lock);
+ return RESPST_ERR_RNR;
+ }
+
+ /* note kernel and user space recv wqes have same size */
+ memcpy(&qp->resp.srq_wqe, wqe, sizeof(qp->resp.srq_wqe));
+
+ qp->resp.wqe = &qp->resp.srq_wqe.wqe;
+ advance_consumer(q);
+
+ if (srq->limit && srq->ibsrq.event_handler &&
+ (queue_count(q) < srq->limit)) {
+ srq->limit = 0;
+ goto event;
+ }
+
+ spin_unlock_bh(&srq->rq.consumer_lock);
+ return RESPST_CHK_LENGTH;
+
+event:
+ spin_unlock_bh(&srq->rq.consumer_lock);
+ ev.device = qp->ibqp.device;
+ ev.element.srq = qp->ibqp.srq;
+ ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
+ srq->ibsrq.event_handler(&ev, srq->ibsrq.srq_context);
+ return RESPST_CHK_LENGTH;
+}
+
+static enum resp_states check_resource(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ struct rxe_srq *srq = qp->srq;
+
+ if (qp->resp.state == QP_STATE_ERROR) {
+ if (qp->resp.wqe) {
+ qp->resp.status = IB_WC_WR_FLUSH_ERR;
+ return RESPST_COMPLETE;
+ } else if (!srq) {
+ qp->resp.wqe = queue_head(qp->rq.queue);
+ if (qp->resp.wqe) {
+ qp->resp.status = IB_WC_WR_FLUSH_ERR;
+ return RESPST_COMPLETE;
+ } else {
+ return RESPST_EXIT;
+ }
+ } else {
+ return RESPST_EXIT;
+ }
+ }
+
+ if (pkt->mask & RXE_READ_OR_ATOMIC) {
+ /* it is the requesters job to not send
+ * too many read/atomic ops, we just
+ * recycle the responder resource queue
+ */
+ if (likely(qp->attr.max_rd_atomic > 0))
+ return RESPST_CHK_LENGTH;
+ else
+ return RESPST_ERR_TOO_MANY_RDMA_ATM_REQ;
+ }
+
+ if (pkt->mask & RXE_RWR_MASK) {
+ if (srq)
+ return get_srq_wqe(qp);
+
+ qp->resp.wqe = queue_head(qp->rq.queue);
+ return (qp->resp.wqe) ? RESPST_CHK_LENGTH : RESPST_ERR_RNR;
+ }
+
+ return RESPST_CHK_LENGTH;
+}
+
+static enum resp_states check_length(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ switch (qp_type(qp)) {
+ case IB_QPT_RC:
+ return RESPST_CHK_RKEY;
+
+ case IB_QPT_UC:
+ return RESPST_CHK_RKEY;
+
+ default:
+ return RESPST_CHK_RKEY;
+ }
+}
+
+static enum resp_states check_rkey(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ struct rxe_mem *mem;
+ u64 va;
+ u32 rkey;
+ u32 resid;
+ u32 pktlen;
+ int mtu = qp->mtu;
+ enum resp_states state;
+ int access;
+
+ if (pkt->mask & (RXE_READ_MASK | RXE_WRITE_MASK)) {
+ if (pkt->mask & RXE_RETH_MASK) {
+ qp->resp.va = reth_va(pkt);
+ qp->resp.rkey = reth_rkey(pkt);
+ qp->resp.resid = reth_len(pkt);
+ }
+ access = (pkt->mask & RXE_READ_MASK) ? IB_ACCESS_REMOTE_READ
+ : IB_ACCESS_REMOTE_WRITE;
+ } else if (pkt->mask & RXE_ATOMIC_MASK) {
+ qp->resp.va = atmeth_va(pkt);
+ qp->resp.rkey = atmeth_rkey(pkt);
+ qp->resp.resid = sizeof(u64);
+ access = IB_ACCESS_REMOTE_ATOMIC;
+ } else {
+ return RESPST_EXECUTE;
+ }
+
+ va = qp->resp.va;
+ rkey = qp->resp.rkey;
+ resid = qp->resp.resid;
+ pktlen = payload_size(pkt);
+
+ mem = lookup_mem(qp->pd, access, rkey, lookup_remote);
+ if (!mem) {
+ state = RESPST_ERR_RKEY_VIOLATION;
+ goto err1;
+ }
+
+ if (unlikely(mem->state == RXE_MEM_STATE_FREE)) {
+ state = RESPST_ERR_RKEY_VIOLATION;
+ goto err1;
+ }
+
+ if (mem_check_range(mem, va, resid)) {
+ state = RESPST_ERR_RKEY_VIOLATION;
+ goto err2;
+ }
+
+ if (pkt->mask & RXE_WRITE_MASK) {
+ if (resid > mtu) {
+ if (pktlen != mtu || bth_pad(pkt)) {
+ state = RESPST_ERR_LENGTH;
+ goto err2;
+ }
+
+ resid = mtu;
+ } else {
+ if (pktlen != resid) {
+ state = RESPST_ERR_LENGTH;
+ goto err2;
+ }
+ if ((bth_pad(pkt) != (0x3 & (-resid)))) {
+ /* This case may not be exactly that
+ * but nothing else fits.
+ */
+ state = RESPST_ERR_LENGTH;
+ goto err2;
+ }
+ }
+ }
+
+ WARN_ON(qp->resp.mr);
+
+ qp->resp.mr = mem;
+ return RESPST_EXECUTE;
+
+err2:
+ rxe_drop_ref(mem);
+err1:
+ return state;
+}
+
+static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr,
+ int data_len)
+{
+ int err;
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+
+ err = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
+ data_addr, data_len, to_mem_obj, NULL);
+ if (unlikely(err))
+ return (err == -ENOSPC) ? RESPST_ERR_LENGTH
+ : RESPST_ERR_MALFORMED_WQE;
+
+ return RESPST_NONE;
+}
+
+static enum resp_states write_data_in(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ enum resp_states rc = RESPST_NONE;
+ int err;
+ int data_len = payload_size(pkt);
+
+ err = rxe_mem_copy(qp->resp.mr, qp->resp.va, payload_addr(pkt),
+ data_len, to_mem_obj, NULL);
+ if (err) {
+ rc = RESPST_ERR_RKEY_VIOLATION;
+ goto out;
+ }
+
+ qp->resp.va += data_len;
+ qp->resp.resid -= data_len;
+
+out:
+ return rc;
+}
+
+/* Guarantee atomicity of atomic operations at the machine level. */
+static DEFINE_SPINLOCK(atomic_ops_lock);
+
+static enum resp_states process_atomic(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ u64 iova = atmeth_va(pkt);
+ u64 *vaddr;
+ enum resp_states ret;
+ struct rxe_mem *mr = qp->resp.mr;
+
+ if (mr->state != RXE_MEM_STATE_VALID) {
+ ret = RESPST_ERR_RKEY_VIOLATION;
+ goto out;
+ }
+
+ vaddr = iova_to_vaddr(mr, iova, sizeof(u64));
+
+ /* check vaddr is 8 bytes aligned. */
+ if (!vaddr || (uintptr_t)vaddr & 7) {
+ ret = RESPST_ERR_MISALIGNED_ATOMIC;
+ goto out;
+ }
+
+ spin_lock_bh(&atomic_ops_lock);
+
+ qp->resp.atomic_orig = *vaddr;
+
+ if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP ||
+ pkt->opcode == IB_OPCODE_RD_COMPARE_SWAP) {
+ if (*vaddr == atmeth_comp(pkt))
+ *vaddr = atmeth_swap_add(pkt);
+ } else {
+ *vaddr += atmeth_swap_add(pkt);
+ }
+
+ spin_unlock_bh(&atomic_ops_lock);
+
+ ret = RESPST_NONE;
+out:
+ return ret;
+}
+
+static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt,
+ struct rxe_pkt_info *ack,
+ int opcode,
+ int payload,
+ u32 psn,
+ u8 syndrome,
+ u32 *crcp)
+{
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ struct sk_buff *skb;
+ u32 crc = 0;
+ u32 *p;
+ int paylen;
+ int pad;
+ int err;
+
+ /*
+ * allocate packet
+ */
+ pad = (-payload) & 0x3;
+ paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
+
+ skb = rxe->ifc_ops->init_packet(rxe, &qp->pri_av, paylen, ack);
+ if (!skb)
+ return NULL;
+
+ ack->qp = qp;
+ ack->opcode = opcode;
+ ack->mask = rxe_opcode[opcode].mask;
+ ack->offset = pkt->offset;
+ ack->paylen = paylen;
+
+ /* fill in bth using the request packet headers */
+ memcpy(ack->hdr, pkt->hdr, pkt->offset + RXE_BTH_BYTES);
+
+ bth_set_opcode(ack, opcode);
+ bth_set_qpn(ack, qp->attr.dest_qp_num);
+ bth_set_pad(ack, pad);
+ bth_set_se(ack, 0);
+ bth_set_psn(ack, psn);
+ bth_set_ack(ack, 0);
+ ack->psn = psn;
+
+ if (ack->mask & RXE_AETH_MASK) {
+ aeth_set_syn(ack, syndrome);
+ aeth_set_msn(ack, qp->resp.msn);
+ }
+
+ if (ack->mask & RXE_ATMACK_MASK)
+ atmack_set_orig(ack, qp->resp.atomic_orig);
+
+ err = rxe->ifc_ops->prepare(rxe, ack, skb, &crc);
+ if (err) {
+ kfree_skb(skb);
+ return NULL;
+ }
+
+ if (crcp) {
+ /* CRC computation will be continued by the caller */
+ *crcp = crc;
+ } else {
+ p = payload_addr(ack) + payload + bth_pad(ack);
+ *p = ~crc;
+ }
+
+ return skb;
+}
+
+/* RDMA read response. If res is not NULL, then we have a current RDMA request
+ * being processed or replayed.
+ */
+static enum resp_states read_reply(struct rxe_qp *qp,
+ struct rxe_pkt_info *req_pkt)
+{
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ struct rxe_pkt_info ack_pkt;
+ struct sk_buff *skb;
+ int mtu = qp->mtu;
+ enum resp_states state;
+ int payload;
+ int opcode;
+ int err;
+ struct resp_res *res = qp->resp.res;
+ u32 icrc;
+ u32 *p;
+
+ if (!res) {
+ /* This is the first time we process that request. Get a
+ * resource
+ */
+ res = &qp->resp.resources[qp->resp.res_head];
+
+ free_rd_atomic_resource(qp, res);
+ rxe_advance_resp_resource(qp);
+
+ res->type = RXE_READ_MASK;
+
+ res->read.va = qp->resp.va;
+ res->read.va_org = qp->resp.va;
+
+ res->first_psn = req_pkt->psn;
+ res->last_psn = req_pkt->psn +
+ (reth_len(req_pkt) + mtu - 1) /
+ mtu - 1;
+ res->cur_psn = req_pkt->psn;
+
+ res->read.resid = qp->resp.resid;
+ res->read.length = qp->resp.resid;
+ res->read.rkey = qp->resp.rkey;
+
+ /* note res inherits the reference to mr from qp */
+ res->read.mr = qp->resp.mr;
+ qp->resp.mr = NULL;
+
+ qp->resp.res = res;
+ res->state = rdatm_res_state_new;
+ }
+
+ if (res->state == rdatm_res_state_new) {
+ if (res->read.resid <= mtu)
+ opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY;
+ else
+ opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST;
+ } else {
+ if (res->read.resid > mtu)
+ opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE;
+ else
+ opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST;
+ }
+
+ res->state = rdatm_res_state_next;
+
+ payload = min_t(int, res->read.resid, mtu);
+
+ skb = prepare_ack_packet(qp, req_pkt, &ack_pkt, opcode, payload,
+ res->cur_psn, AETH_ACK_UNLIMITED, &icrc);
+ if (!skb)
+ return RESPST_ERR_RNR;
+
+ err = rxe_mem_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt),
+ payload, from_mem_obj, &icrc);
+ if (err)
+ pr_err("Failed copying memory\n");
+
+ p = payload_addr(&ack_pkt) + payload + bth_pad(&ack_pkt);
+ *p = ~icrc;
+
+ err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb);
+ if (err) {
+ pr_err("Failed sending RDMA reply.\n");
+ kfree_skb(skb);
+ return RESPST_ERR_RNR;
+ }
+
+ res->read.va += payload;
+ res->read.resid -= payload;
+ res->cur_psn = (res->cur_psn + 1) & BTH_PSN_MASK;
+
+ if (res->read.resid > 0) {
+ state = RESPST_DONE;
+ } else {
+ qp->resp.res = NULL;
+ qp->resp.opcode = -1;
+ qp->resp.psn = res->cur_psn;
+ state = RESPST_CLEANUP;
+ }
+
+ return state;
+}
+
+/* Executes a new request. A retried request never reach that function (send
+ * and writes are discarded, and reads and atomics are retried elsewhere.
+ */
+static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
+{
+ enum resp_states err;
+
+ if (pkt->mask & RXE_SEND_MASK) {
+ if (qp_type(qp) == IB_QPT_UD ||
+ qp_type(qp) == IB_QPT_SMI ||
+ qp_type(qp) == IB_QPT_GSI) {
+ union rdma_network_hdr hdr;
+ struct sk_buff *skb = PKT_TO_SKB(pkt);
+
+ memset(&hdr, 0, sizeof(hdr));
+ if (skb->protocol == htons(ETH_P_IP))
+ memcpy(&hdr.roce4grh, ip_hdr(skb), sizeof(hdr.roce4grh));
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ memcpy(&hdr.ibgrh, ipv6_hdr(skb), sizeof(hdr.ibgrh));
+
+ err = send_data_in(qp, &hdr, sizeof(hdr));
+ if (err)
+ return err;
+ }
+ err = send_data_in(qp, payload_addr(pkt), payload_size(pkt));
+ if (err)
+ return err;
+ } else if (pkt->mask & RXE_WRITE_MASK) {
+ err = write_data_in(qp, pkt);
+ if (err)
+ return err;
+ } else if (pkt->mask & RXE_READ_MASK) {
+ /* For RDMA Read we can increment the msn now. See C9-148. */
+ qp->resp.msn++;
+ return RESPST_READ_REPLY;
+ } else if (pkt->mask & RXE_ATOMIC_MASK) {
+ err = process_atomic(qp, pkt);
+ if (err)
+ return err;
+ } else
+ /* Unreachable */
+ WARN_ON(1);
+
+ /* We successfully processed this new request. */
+ qp->resp.msn++;
+
+ /* next expected psn, read handles this separately */
+ qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
+
+ qp->resp.opcode = pkt->opcode;
+ qp->resp.status = IB_WC_SUCCESS;
+
+ if (pkt->mask & RXE_COMP_MASK)
+ return RESPST_COMPLETE;
+ else if (qp_type(qp) == IB_QPT_RC)
+ return RESPST_ACKNOWLEDGE;
+ else
+ return RESPST_CLEANUP;
+}
+
+static enum resp_states do_complete(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ struct rxe_cqe cqe;
+ struct ib_wc *wc = &cqe.ibwc;
+ struct ib_uverbs_wc *uwc = &cqe.uibwc;
+ struct rxe_recv_wqe *wqe = qp->resp.wqe;
+
+ if (unlikely(!wqe))
+ return RESPST_CLEANUP;
+
+ memset(&cqe, 0, sizeof(cqe));
+
+ wc->wr_id = wqe->wr_id;
+ wc->status = qp->resp.status;
+ wc->qp = &qp->ibqp;
+
+ /* fields after status are not required for errors */
+ if (wc->status == IB_WC_SUCCESS) {
+ wc->opcode = (pkt->mask & RXE_IMMDT_MASK &&
+ pkt->mask & RXE_WRITE_MASK) ?
+ IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
+ wc->vendor_err = 0;
+ wc->byte_len = wqe->dma.length - wqe->dma.resid;
+
+ /* fields after byte_len are different between kernel and user
+ * space
+ */
+ if (qp->rcq->is_user) {
+ uwc->wc_flags = IB_WC_GRH;
+
+ if (pkt->mask & RXE_IMMDT_MASK) {
+ uwc->wc_flags |= IB_WC_WITH_IMM;
+ uwc->ex.imm_data =
+ (__u32 __force)immdt_imm(pkt);
+ }
+
+ if (pkt->mask & RXE_IETH_MASK) {
+ uwc->wc_flags |= IB_WC_WITH_INVALIDATE;
+ uwc->ex.invalidate_rkey = ieth_rkey(pkt);
+ }
+
+ uwc->qp_num = qp->ibqp.qp_num;
+
+ if (pkt->mask & RXE_DETH_MASK)
+ uwc->src_qp = deth_sqp(pkt);
+
+ uwc->port_num = qp->attr.port_num;
+ } else {
+ struct sk_buff *skb = PKT_TO_SKB(pkt);
+
+ wc->wc_flags = IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE;
+ if (skb->protocol == htons(ETH_P_IP))
+ wc->network_hdr_type = RDMA_NETWORK_IPV4;
+ else
+ wc->network_hdr_type = RDMA_NETWORK_IPV6;
+
+ if (pkt->mask & RXE_IMMDT_MASK) {
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ wc->ex.imm_data = immdt_imm(pkt);
+ }
+
+ if (pkt->mask & RXE_IETH_MASK) {
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ struct rxe_mem *rmr;
+
+ wc->wc_flags |= IB_WC_WITH_INVALIDATE;
+ wc->ex.invalidate_rkey = ieth_rkey(pkt);
+
+ rmr = rxe_pool_get_index(&rxe->mr_pool,
+ wc->ex.invalidate_rkey >> 8);
+ if (unlikely(!rmr)) {
+ pr_err("Bad rkey %#x invalidation\n", wc->ex.invalidate_rkey);
+ return RESPST_ERROR;
+ }
+ rmr->state = RXE_MEM_STATE_FREE;
+ }
+
+ wc->qp = &qp->ibqp;
+
+ if (pkt->mask & RXE_DETH_MASK)
+ wc->src_qp = deth_sqp(pkt);
+
+ wc->port_num = qp->attr.port_num;
+ }
+ }
+
+ /* have copy for srq and reference for !srq */
+ if (!qp->srq)
+ advance_consumer(qp->rq.queue);
+
+ qp->resp.wqe = NULL;
+
+ if (rxe_cq_post(qp->rcq, &cqe, pkt ? bth_se(pkt) : 1))
+ return RESPST_ERR_CQ_OVERFLOW;
+
+ if (qp->resp.state == QP_STATE_ERROR)
+ return RESPST_CHK_RESOURCE;
+
+ if (!pkt)
+ return RESPST_DONE;
+ else if (qp_type(qp) == IB_QPT_RC)
+ return RESPST_ACKNOWLEDGE;
+ else
+ return RESPST_CLEANUP;
+}
+
+static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
+ u8 syndrome, u32 psn)
+{
+ int err = 0;
+ struct rxe_pkt_info ack_pkt;
+ struct sk_buff *skb;
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+
+ skb = prepare_ack_packet(qp, pkt, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE,
+ 0, psn, syndrome, NULL);
+ if (!skb) {
+ err = -ENOMEM;
+ goto err1;
+ }
+
+ err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb);
+ if (err) {
+ pr_err_ratelimited("Failed sending ack\n");
+ kfree_skb(skb);
+ }
+
+err1:
+ return err;
+}
+
+static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
+ u8 syndrome)
+{
+ int rc = 0;
+ struct rxe_pkt_info ack_pkt;
+ struct sk_buff *skb;
+ struct sk_buff *skb_copy;
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ struct resp_res *res;
+
+ skb = prepare_ack_packet(qp, pkt, &ack_pkt,
+ IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, 0, pkt->psn,
+ syndrome, NULL);
+ if (!skb) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ skb_copy = skb_clone(skb, GFP_ATOMIC);
+ if (skb_copy)
+ rxe_add_ref(qp); /* for the new SKB */
+ else {
+ pr_warn("Could not clone atomic response\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ res = &qp->resp.resources[qp->resp.res_head];
+ free_rd_atomic_resource(qp, res);
+ rxe_advance_resp_resource(qp);
+
+ memcpy(SKB_TO_PKT(skb), &ack_pkt, sizeof(skb->cb));
+
+ res->type = RXE_ATOMIC_MASK;
+ res->atomic.skb = skb;
+ res->first_psn = ack_pkt.psn;
+ res->last_psn = ack_pkt.psn;
+ res->cur_psn = ack_pkt.psn;
+
+ rc = rxe_xmit_packet(rxe, qp, &ack_pkt, skb_copy);
+ if (rc) {
+ pr_err_ratelimited("Failed sending ack\n");
+ rxe_drop_ref(qp);
+ kfree_skb(skb_copy);
+ }
+
+out:
+ return rc;
+}
+
+static enum resp_states acknowledge(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ if (qp_type(qp) != IB_QPT_RC)
+ return RESPST_CLEANUP;
+
+ if (qp->resp.aeth_syndrome != AETH_ACK_UNLIMITED)
+ send_ack(qp, pkt, qp->resp.aeth_syndrome, pkt->psn);
+ else if (pkt->mask & RXE_ATOMIC_MASK)
+ send_atomic_ack(qp, pkt, AETH_ACK_UNLIMITED);
+ else if (bth_ack(pkt))
+ send_ack(qp, pkt, AETH_ACK_UNLIMITED, pkt->psn);
+
+ return RESPST_CLEANUP;
+}
+
+static enum resp_states cleanup(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ struct sk_buff *skb;
+
+ if (pkt) {
+ skb = skb_dequeue(&qp->req_pkts);
+ rxe_drop_ref(qp);
+ kfree_skb(skb);
+ }
+
+ if (qp->resp.mr) {
+ rxe_drop_ref(qp->resp.mr);
+ qp->resp.mr = NULL;
+ }
+
+ return RESPST_DONE;
+}
+
+static struct resp_res *find_resource(struct rxe_qp *qp, u32 psn)
+{
+ int i;
+
+ for (i = 0; i < qp->attr.max_rd_atomic; i++) {
+ struct resp_res *res = &qp->resp.resources[i];
+
+ if (res->type == 0)
+ continue;
+
+ if (psn_compare(psn, res->first_psn) >= 0 &&
+ psn_compare(psn, res->last_psn) <= 0) {
+ return res;
+ }
+ }
+
+ return NULL;
+}
+
+static enum resp_states duplicate_request(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
+{
+ enum resp_states rc;
+
+ if (pkt->mask & RXE_SEND_MASK ||
+ pkt->mask & RXE_WRITE_MASK) {
+ /* SEND. Ack again and cleanup. C9-105. */
+ if (bth_ack(pkt))
+ send_ack(qp, pkt, AETH_ACK_UNLIMITED, qp->resp.psn - 1);
+ rc = RESPST_CLEANUP;
+ goto out;
+ } else if (pkt->mask & RXE_READ_MASK) {
+ struct resp_res *res;
+
+ res = find_resource(qp, pkt->psn);
+ if (!res) {
+ /* Resource not found. Class D error. Drop the
+ * request.
+ */
+ rc = RESPST_CLEANUP;
+ goto out;
+ } else {
+ /* Ensure this new request is the same as the previous
+ * one or a subset of it.
+ */
+ u64 iova = reth_va(pkt);
+ u32 resid = reth_len(pkt);
+
+ if (iova < res->read.va_org ||
+ resid > res->read.length ||
+ (iova + resid) > (res->read.va_org +
+ res->read.length)) {
+ rc = RESPST_CLEANUP;
+ goto out;
+ }
+
+ if (reth_rkey(pkt) != res->read.rkey) {
+ rc = RESPST_CLEANUP;
+ goto out;
+ }
+
+ res->cur_psn = pkt->psn;
+ res->state = (pkt->psn == res->first_psn) ?
+ rdatm_res_state_new :
+ rdatm_res_state_replay;
+
+ /* Reset the resource, except length. */
+ res->read.va_org = iova;
+ res->read.va = iova;
+ res->read.resid = resid;
+
+ /* Replay the RDMA read reply. */
+ qp->resp.res = res;
+ rc = RESPST_READ_REPLY;
+ goto out;
+ }
+ } else {
+ struct resp_res *res;
+
+ /* Find the operation in our list of responder resources. */
+ res = find_resource(qp, pkt->psn);
+ if (res) {
+ struct sk_buff *skb_copy;
+
+ skb_copy = skb_clone(res->atomic.skb, GFP_ATOMIC);
+ if (skb_copy) {
+ rxe_add_ref(qp); /* for the new SKB */
+ } else {
+ pr_warn("Couldn't clone atomic resp\n");
+ rc = RESPST_CLEANUP;
+ goto out;
+ }
+
+ /* Resend the result. */
+ rc = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp,
+ pkt, skb_copy);
+ if (rc) {
+ pr_err("Failed resending result. This flow is not handled - skb ignored\n");
+ kfree_skb(skb_copy);
+ rc = RESPST_CLEANUP;
+ goto out;
+ }
+ }
+
+ /* Resource not found. Class D error. Drop the request. */
+ rc = RESPST_CLEANUP;
+ goto out;
+ }
+out:
+ return rc;
+}
+
+/* Process a class A or C. Both are treated the same in this implementation. */
+static void do_class_ac_error(struct rxe_qp *qp, u8 syndrome,
+ enum ib_wc_status status)
+{
+ qp->resp.aeth_syndrome = syndrome;
+ qp->resp.status = status;
+
+ /* indicate that we should go through the ERROR state */
+ qp->resp.goto_error = 1;
+}
+
+static enum resp_states do_class_d1e_error(struct rxe_qp *qp)
+{
+ /* UC */
+ if (qp->srq) {
+ /* Class E */
+ qp->resp.drop_msg = 1;
+ if (qp->resp.wqe) {
+ qp->resp.status = IB_WC_REM_INV_REQ_ERR;
+ return RESPST_COMPLETE;
+ } else {
+ return RESPST_CLEANUP;
+ }
+ } else {
+ /* Class D1. This packet may be the start of a
+ * new message and could be valid. The previous
+ * message is invalid and ignored. reset the
+ * recv wr to its original state
+ */
+ if (qp->resp.wqe) {
+ qp->resp.wqe->dma.resid = qp->resp.wqe->dma.length;
+ qp->resp.wqe->dma.cur_sge = 0;
+ qp->resp.wqe->dma.sge_offset = 0;
+ qp->resp.opcode = -1;
+ }
+
+ if (qp->resp.mr) {
+ rxe_drop_ref(qp->resp.mr);
+ qp->resp.mr = NULL;
+ }
+
+ return RESPST_CLEANUP;
+ }
+}
+
+int rxe_responder(void *arg)
+{
+ struct rxe_qp *qp = (struct rxe_qp *)arg;
+ enum resp_states state;
+ struct rxe_pkt_info *pkt = NULL;
+ int ret = 0;
+
+ qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;
+
+ if (!qp->valid) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ switch (qp->resp.state) {
+ case QP_STATE_RESET:
+ state = RESPST_RESET;
+ break;
+
+ default:
+ state = RESPST_GET_REQ;
+ break;
+ }
+
+ while (1) {
+ pr_debug("state = %s\n", resp_state_name[state]);
+ switch (state) {
+ case RESPST_GET_REQ:
+ state = get_req(qp, &pkt);
+ break;
+ case RESPST_CHK_PSN:
+ state = check_psn(qp, pkt);
+ break;
+ case RESPST_CHK_OP_SEQ:
+ state = check_op_seq(qp, pkt);
+ break;
+ case RESPST_CHK_OP_VALID:
+ state = check_op_valid(qp, pkt);
+ break;
+ case RESPST_CHK_RESOURCE:
+ state = check_resource(qp, pkt);
+ break;
+ case RESPST_CHK_LENGTH:
+ state = check_length(qp, pkt);
+ break;
+ case RESPST_CHK_RKEY:
+ state = check_rkey(qp, pkt);
+ break;
+ case RESPST_EXECUTE:
+ state = execute(qp, pkt);
+ break;
+ case RESPST_COMPLETE:
+ state = do_complete(qp, pkt);
+ break;
+ case RESPST_READ_REPLY:
+ state = read_reply(qp, pkt);
+ break;
+ case RESPST_ACKNOWLEDGE:
+ state = acknowledge(qp, pkt);
+ break;
+ case RESPST_CLEANUP:
+ state = cleanup(qp, pkt);
+ break;
+ case RESPST_DUPLICATE_REQUEST:
+ state = duplicate_request(qp, pkt);
+ break;
+ case RESPST_ERR_PSN_OUT_OF_SEQ:
+ /* RC only - Class B. Drop packet. */
+ send_ack(qp, pkt, AETH_NAK_PSN_SEQ_ERROR, qp->resp.psn);
+ state = RESPST_CLEANUP;
+ break;
+
+ case RESPST_ERR_TOO_MANY_RDMA_ATM_REQ:
+ case RESPST_ERR_MISSING_OPCODE_FIRST:
+ case RESPST_ERR_MISSING_OPCODE_LAST_C:
+ case RESPST_ERR_UNSUPPORTED_OPCODE:
+ case RESPST_ERR_MISALIGNED_ATOMIC:
+ /* RC Only - Class C. */
+ do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
+ IB_WC_REM_INV_REQ_ERR);
+ state = RESPST_COMPLETE;
+ break;
+
+ case RESPST_ERR_MISSING_OPCODE_LAST_D1E:
+ state = do_class_d1e_error(qp);
+ break;
+ case RESPST_ERR_RNR:
+ if (qp_type(qp) == IB_QPT_RC) {
+ /* RC - class B */
+ send_ack(qp, pkt, AETH_RNR_NAK |
+ (~AETH_TYPE_MASK &
+ qp->attr.min_rnr_timer),
+ pkt->psn);
+ } else {
+ /* UD/UC - class D */
+ qp->resp.drop_msg = 1;
+ }
+ state = RESPST_CLEANUP;
+ break;
+
+ case RESPST_ERR_RKEY_VIOLATION:
+ if (qp_type(qp) == IB_QPT_RC) {
+ /* Class C */
+ do_class_ac_error(qp, AETH_NAK_REM_ACC_ERR,
+ IB_WC_REM_ACCESS_ERR);
+ state = RESPST_COMPLETE;
+ } else {
+ qp->resp.drop_msg = 1;
+ if (qp->srq) {
+ /* UC/SRQ Class D */
+ qp->resp.status = IB_WC_REM_ACCESS_ERR;
+ state = RESPST_COMPLETE;
+ } else {
+ /* UC/non-SRQ Class E. */
+ state = RESPST_CLEANUP;
+ }
+ }
+ break;
+
+ case RESPST_ERR_LENGTH:
+ if (qp_type(qp) == IB_QPT_RC) {
+ /* Class C */
+ do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
+ IB_WC_REM_INV_REQ_ERR);
+ state = RESPST_COMPLETE;
+ } else if (qp->srq) {
+ /* UC/UD - class E */
+ qp->resp.status = IB_WC_REM_INV_REQ_ERR;
+ state = RESPST_COMPLETE;
+ } else {
+ /* UC/UD - class D */
+ qp->resp.drop_msg = 1;
+ state = RESPST_CLEANUP;
+ }
+ break;
+
+ case RESPST_ERR_MALFORMED_WQE:
+ /* All, Class A. */
+ do_class_ac_error(qp, AETH_NAK_REM_OP_ERR,
+ IB_WC_LOC_QP_OP_ERR);
+ state = RESPST_COMPLETE;
+ break;
+
+ case RESPST_ERR_CQ_OVERFLOW:
+ /* All - Class G */
+ state = RESPST_ERROR;
+ break;
+
+ case RESPST_DONE:
+ if (qp->resp.goto_error) {
+ state = RESPST_ERROR;
+ break;
+ }
+
+ goto done;
+
+ case RESPST_EXIT:
+ if (qp->resp.goto_error) {
+ state = RESPST_ERROR;
+ break;
+ }
+
+ goto exit;
+
+ case RESPST_RESET: {
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(&qp->req_pkts))) {
+ rxe_drop_ref(qp);
+ kfree_skb(skb);
+ }
+
+ while (!qp->srq && qp->rq.queue &&
+ queue_head(qp->rq.queue))
+ advance_consumer(qp->rq.queue);
+
+ qp->resp.wqe = NULL;
+ goto exit;
+ }
+
+ case RESPST_ERROR:
+ qp->resp.goto_error = 0;
+ pr_warn("qp#%d moved to error state\n", qp_num(qp));
+ rxe_qp_error(qp);
+ goto exit;
+
+ default:
+ WARN_ON(1);
+ }
+ }
+
+exit:
+ ret = -EAGAIN;
+done:
+ return ret;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c
new file mode 100644
index 000000000..2a6e3cd2d
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_srq.c
@@ -0,0 +1,193 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "rxe.h"
+#include "rxe_loc.h"
+#include "rxe_queue.h"
+
+int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
+ struct ib_srq_attr *attr, enum ib_srq_attr_mask mask)
+{
+ if (srq && srq->error) {
+ pr_warn("srq in error state\n");
+ goto err1;
+ }
+
+ if (mask & IB_SRQ_MAX_WR) {
+ if (attr->max_wr > rxe->attr.max_srq_wr) {
+ pr_warn("max_wr(%d) > max_srq_wr(%d)\n",
+ attr->max_wr, rxe->attr.max_srq_wr);
+ goto err1;
+ }
+
+ if (attr->max_wr <= 0) {
+ pr_warn("max_wr(%d) <= 0\n", attr->max_wr);
+ goto err1;
+ }
+
+ if (srq && srq->limit && (attr->max_wr < srq->limit)) {
+ pr_warn("max_wr (%d) < srq->limit (%d)\n",
+ attr->max_wr, srq->limit);
+ goto err1;
+ }
+
+ if (attr->max_wr < RXE_MIN_SRQ_WR)
+ attr->max_wr = RXE_MIN_SRQ_WR;
+ }
+
+ if (mask & IB_SRQ_LIMIT) {
+ if (attr->srq_limit > rxe->attr.max_srq_wr) {
+ pr_warn("srq_limit(%d) > max_srq_wr(%d)\n",
+ attr->srq_limit, rxe->attr.max_srq_wr);
+ goto err1;
+ }
+
+ if (srq && (attr->srq_limit > srq->rq.queue->buf->index_mask)) {
+ pr_warn("srq_limit (%d) > cur limit(%d)\n",
+ attr->srq_limit,
+ srq->rq.queue->buf->index_mask);
+ goto err1;
+ }
+ }
+
+ if (mask == IB_SRQ_INIT_MASK) {
+ if (attr->max_sge > rxe->attr.max_srq_sge) {
+ pr_warn("max_sge(%d) > max_srq_sge(%d)\n",
+ attr->max_sge, rxe->attr.max_srq_sge);
+ goto err1;
+ }
+
+ if (attr->max_sge < RXE_MIN_SRQ_SGE)
+ attr->max_sge = RXE_MIN_SRQ_SGE;
+ }
+
+ return 0;
+
+err1:
+ return -EINVAL;
+}
+
+int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
+ struct ib_srq_init_attr *init,
+ struct ib_ucontext *context, struct ib_udata *udata)
+{
+ int err;
+ int srq_wqe_size;
+ struct rxe_queue *q;
+
+ srq->ibsrq.event_handler = init->event_handler;
+ srq->ibsrq.srq_context = init->srq_context;
+ srq->limit = init->attr.srq_limit;
+ srq->srq_num = srq->pelem.index;
+ srq->rq.max_wr = init->attr.max_wr;
+ srq->rq.max_sge = init->attr.max_sge;
+
+ srq_wqe_size = rcv_wqe_size(srq->rq.max_sge);
+
+ spin_lock_init(&srq->rq.producer_lock);
+ spin_lock_init(&srq->rq.consumer_lock);
+
+ q = rxe_queue_init(rxe, &srq->rq.max_wr,
+ srq_wqe_size);
+ if (!q) {
+ pr_warn("unable to allocate queue for srq\n");
+ return -ENOMEM;
+ }
+
+ srq->rq.queue = q;
+
+ err = do_mmap_info(rxe, udata, false, context, q->buf,
+ q->buf_size, &q->ip);
+ if (err)
+ return err;
+
+ if (udata && udata->outlen >= sizeof(struct mminfo) + sizeof(u32)) {
+ if (copy_to_user(udata->outbuf + sizeof(struct mminfo),
+ &srq->srq_num, sizeof(u32)))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
+ struct ib_srq_attr *attr, enum ib_srq_attr_mask mask,
+ struct ib_udata *udata)
+{
+ int err;
+ struct rxe_queue *q = srq->rq.queue;
+ struct mminfo mi = { .offset = 1, .size = 0};
+
+ if (mask & IB_SRQ_MAX_WR) {
+ /* Check that we can write the mminfo struct to user space */
+ if (udata && udata->inlen >= sizeof(__u64)) {
+ __u64 mi_addr;
+
+ /* Get address of user space mminfo struct */
+ err = ib_copy_from_udata(&mi_addr, udata,
+ sizeof(mi_addr));
+ if (err)
+ goto err1;
+
+ udata->outbuf = (void __user *)(unsigned long)mi_addr;
+ udata->outlen = sizeof(mi);
+
+ if (!access_ok(VERIFY_WRITE,
+ (void __user *)udata->outbuf,
+ udata->outlen)) {
+ err = -EFAULT;
+ goto err1;
+ }
+ }
+
+ err = rxe_queue_resize(q, (unsigned int *)&attr->max_wr,
+ rcv_wqe_size(srq->rq.max_sge),
+ srq->rq.queue->ip ?
+ srq->rq.queue->ip->context :
+ NULL,
+ udata, &srq->rq.producer_lock,
+ &srq->rq.consumer_lock);
+ if (err)
+ goto err2;
+ }
+
+ if (mask & IB_SRQ_LIMIT)
+ srq->limit = attr->srq_limit;
+
+ return 0;
+
+err2:
+ rxe_queue_cleanup(q);
+ srq->rq.queue = NULL;
+err1:
+ return err;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_sysfs.c b/drivers/infiniband/sw/rxe/rxe_sysfs.c
new file mode 100644
index 000000000..cf8e77800
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_sysfs.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "rxe.h"
+#include "rxe_net.h"
+
+/* Copy argument and remove trailing CR. Return the new length. */
+static int sanitize_arg(const char *val, char *intf, int intf_len)
+{
+ int len;
+
+ if (!val)
+ return 0;
+
+ /* Remove newline. */
+ for (len = 0; len < intf_len - 1 && val[len] && val[len] != '\n'; len++)
+ intf[len] = val[len];
+ intf[len] = 0;
+
+ if (len == 0 || (val[len] != 0 && val[len] != '\n'))
+ return 0;
+
+ return len;
+}
+
+static void rxe_set_port_state(struct net_device *ndev)
+{
+ struct rxe_dev *rxe = net_to_rxe(ndev);
+ bool is_up = netif_running(ndev) && netif_carrier_ok(ndev);
+
+ if (!rxe)
+ goto out;
+
+ if (is_up)
+ rxe_port_up(rxe);
+ else
+ rxe_port_down(rxe); /* down for unknown state */
+out:
+ return;
+}
+
+static int rxe_param_set_add(const char *val, const struct kernel_param *kp)
+{
+ int len;
+ int err = 0;
+ char intf[32];
+ struct net_device *ndev = NULL;
+ struct rxe_dev *rxe;
+
+ len = sanitize_arg(val, intf, sizeof(intf));
+ if (!len) {
+ pr_err("rxe: add: invalid interface name\n");
+ err = -EINVAL;
+ goto err;
+ }
+
+ ndev = dev_get_by_name(&init_net, intf);
+ if (!ndev) {
+ pr_err("interface %s not found\n", intf);
+ err = -EINVAL;
+ goto err;
+ }
+
+ if (net_to_rxe(ndev)) {
+ pr_err("rxe: already configured on %s\n", intf);
+ err = -EINVAL;
+ goto err;
+ }
+
+ rxe = rxe_net_add(ndev);
+ if (!rxe) {
+ pr_err("rxe: failed to add %s\n", intf);
+ err = -EINVAL;
+ goto err;
+ }
+
+ rxe_set_port_state(ndev);
+ pr_info("rxe: added %s to %s\n", rxe->ib_dev.name, intf);
+err:
+ if (ndev)
+ dev_put(ndev);
+ return err;
+}
+
+static int rxe_param_set_remove(const char *val, const struct kernel_param *kp)
+{
+ int len;
+ char intf[32];
+ struct rxe_dev *rxe;
+
+ len = sanitize_arg(val, intf, sizeof(intf));
+ if (!len) {
+ pr_err("rxe: add: invalid interface name\n");
+ return -EINVAL;
+ }
+
+ if (strncmp("all", intf, len) == 0) {
+ pr_info("rxe_sys: remove all");
+ rxe_remove_all();
+ return 0;
+ }
+
+ rxe = get_rxe_by_name(intf);
+
+ if (!rxe) {
+ pr_err("rxe: not configured on %s\n", intf);
+ return -EINVAL;
+ }
+
+ list_del(&rxe->list);
+ rxe_remove(rxe);
+
+ return 0;
+}
+
+static const struct kernel_param_ops rxe_add_ops = {
+ .set = rxe_param_set_add,
+};
+
+static const struct kernel_param_ops rxe_remove_ops = {
+ .set = rxe_param_set_remove,
+};
+
+module_param_cb(add, &rxe_add_ops, NULL, 0200);
+MODULE_PARM_DESC(add, "Create RXE device over network interface");
+module_param_cb(remove, &rxe_remove_ops, NULL, 0200);
+MODULE_PARM_DESC(remove, "Remove RXE device over network interface");
diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
new file mode 100644
index 000000000..1e19bf828
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_task.c
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/hardirq.h>
+
+#include "rxe_task.h"
+
+int __rxe_do_task(struct rxe_task *task)
+
+{
+ int ret;
+
+ while ((ret = task->func(task->arg)) == 0)
+ ;
+
+ task->ret = ret;
+
+ return ret;
+}
+
+/*
+ * this locking is due to a potential race where
+ * a second caller finds the task already running
+ * but looks just after the last call to func
+ */
+void rxe_do_task(unsigned long data)
+{
+ int cont;
+ int ret;
+ unsigned long flags;
+ struct rxe_task *task = (struct rxe_task *)data;
+
+ spin_lock_irqsave(&task->state_lock, flags);
+ switch (task->state) {
+ case TASK_STATE_START:
+ task->state = TASK_STATE_BUSY;
+ spin_unlock_irqrestore(&task->state_lock, flags);
+ break;
+
+ case TASK_STATE_BUSY:
+ task->state = TASK_STATE_ARMED;
+ /* fall through to */
+ case TASK_STATE_ARMED:
+ spin_unlock_irqrestore(&task->state_lock, flags);
+ return;
+
+ default:
+ spin_unlock_irqrestore(&task->state_lock, flags);
+ pr_warn("bad state = %d in rxe_do_task\n", task->state);
+ return;
+ }
+
+ do {
+ cont = 0;
+ ret = task->func(task->arg);
+
+ spin_lock_irqsave(&task->state_lock, flags);
+ switch (task->state) {
+ case TASK_STATE_BUSY:
+ if (ret)
+ task->state = TASK_STATE_START;
+ else
+ cont = 1;
+ break;
+
+ /* soneone tried to run the task since the last time we called
+ * func, so we will call one more time regardless of the
+ * return value
+ */
+ case TASK_STATE_ARMED:
+ task->state = TASK_STATE_BUSY;
+ cont = 1;
+ break;
+
+ default:
+ pr_warn("bad state = %d in rxe_do_task\n",
+ task->state);
+ }
+ spin_unlock_irqrestore(&task->state_lock, flags);
+ } while (cont);
+
+ task->ret = ret;
+}
+
+int rxe_init_task(void *obj, struct rxe_task *task,
+ void *arg, int (*func)(void *), char *name)
+{
+ task->obj = obj;
+ task->arg = arg;
+ task->func = func;
+ snprintf(task->name, sizeof(task->name), "%s", name);
+
+ tasklet_init(&task->tasklet, rxe_do_task, (unsigned long)task);
+
+ task->state = TASK_STATE_START;
+ spin_lock_init(&task->state_lock);
+
+ return 0;
+}
+
+void rxe_cleanup_task(struct rxe_task *task)
+{
+ tasklet_kill(&task->tasklet);
+}
+
+void rxe_run_task(struct rxe_task *task, int sched)
+{
+ if (sched)
+ tasklet_schedule(&task->tasklet);
+ else
+ rxe_do_task((unsigned long)task);
+}
+
+void rxe_disable_task(struct rxe_task *task)
+{
+ tasklet_disable(&task->tasklet);
+}
+
+void rxe_enable_task(struct rxe_task *task)
+{
+ tasklet_enable(&task->tasklet);
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_task.h b/drivers/infiniband/sw/rxe/rxe_task.h
new file mode 100644
index 000000000..d14aa6dae
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_task.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef RXE_TASK_H
+#define RXE_TASK_H
+
+enum {
+ TASK_STATE_START = 0,
+ TASK_STATE_BUSY = 1,
+ TASK_STATE_ARMED = 2,
+};
+
+/*
+ * data structure to describe a 'task' which is a short
+ * function that returns 0 as long as it needs to be
+ * called again.
+ */
+struct rxe_task {
+ void *obj;
+ struct tasklet_struct tasklet;
+ int state;
+ spinlock_t state_lock; /* spinlock for task state */
+ void *arg;
+ int (*func)(void *arg);
+ int ret;
+ char name[16];
+};
+
+/*
+ * init rxe_task structure
+ * arg => parameter to pass to fcn
+ * fcn => function to call until it returns != 0
+ */
+int rxe_init_task(void *obj, struct rxe_task *task,
+ void *arg, int (*func)(void *), char *name);
+
+/* cleanup task */
+void rxe_cleanup_task(struct rxe_task *task);
+
+/*
+ * raw call to func in loop without any checking
+ * can call when tasklets are disabled
+ */
+int __rxe_do_task(struct rxe_task *task);
+
+/*
+ * common function called by any of the main tasklets
+ * If there is any chance that there is additional
+ * work to do someone must reschedule the task before
+ * leaving
+ */
+void rxe_do_task(unsigned long data);
+
+/* run a task, else schedule it to run as a tasklet, The decision
+ * to run or schedule tasklet is based on the parameter sched.
+ */
+void rxe_run_task(struct rxe_task *task, int sched);
+
+/* keep a task from scheduling */
+void rxe_disable_task(struct rxe_task *task);
+
+/* allow task to run */
+void rxe_enable_task(struct rxe_task *task);
+
+#endif /* RXE_TASK_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
new file mode 100644
index 000000000..4552be960
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -0,0 +1,1330 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "rxe.h"
+#include "rxe_loc.h"
+#include "rxe_queue.h"
+
+static int rxe_query_device(struct ib_device *dev,
+ struct ib_device_attr *attr,
+ struct ib_udata *uhw)
+{
+ struct rxe_dev *rxe = to_rdev(dev);
+
+ if (uhw->inlen || uhw->outlen)
+ return -EINVAL;
+
+ *attr = rxe->attr;
+ return 0;
+}
+
+static void rxe_eth_speed_to_ib_speed(int speed, u8 *active_speed,
+ u8 *active_width)
+{
+ if (speed <= 1000) {
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_SDR;
+ } else if (speed <= 10000) {
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_FDR10;
+ } else if (speed <= 20000) {
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_DDR;
+ } else if (speed <= 30000) {
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_QDR;
+ } else if (speed <= 40000) {
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_FDR10;
+ } else {
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_EDR;
+ }
+}
+
+static int rxe_query_port(struct ib_device *dev,
+ u8 port_num, struct ib_port_attr *attr)
+{
+ struct rxe_dev *rxe = to_rdev(dev);
+ struct rxe_port *port;
+ u32 speed;
+
+ if (unlikely(port_num != 1)) {
+ pr_warn("invalid port_number %d\n", port_num);
+ goto err1;
+ }
+
+ port = &rxe->port;
+
+ *attr = port->attr;
+
+ mutex_lock(&rxe->usdev_lock);
+ if (rxe->ndev->ethtool_ops->get_link_ksettings) {
+ struct ethtool_link_ksettings ks;
+
+ rxe->ndev->ethtool_ops->get_link_ksettings(rxe->ndev, &ks);
+ speed = ks.base.speed;
+ } else if (rxe->ndev->ethtool_ops->get_settings) {
+ struct ethtool_cmd cmd;
+
+ rxe->ndev->ethtool_ops->get_settings(rxe->ndev, &cmd);
+ speed = cmd.speed;
+ } else {
+ pr_warn("%s speed is unknown, defaulting to 1000\n", rxe->ndev->name);
+ speed = 1000;
+ }
+ rxe_eth_speed_to_ib_speed(speed, &attr->active_speed, &attr->active_width);
+ mutex_unlock(&rxe->usdev_lock);
+
+ return 0;
+
+err1:
+ return -EINVAL;
+}
+
+static int rxe_query_gid(struct ib_device *device,
+ u8 port_num, int index, union ib_gid *gid)
+{
+ int ret;
+
+ if (index > RXE_PORT_GID_TBL_LEN)
+ return -EINVAL;
+
+ ret = ib_get_cached_gid(device, port_num, index, gid, NULL);
+ if (ret == -EAGAIN) {
+ memcpy(gid, &zgid, sizeof(*gid));
+ return 0;
+ }
+
+ return ret;
+}
+
+static int rxe_add_gid(struct ib_device *device, u8 port_num, unsigned int
+ index, const union ib_gid *gid,
+ const struct ib_gid_attr *attr, void **context)
+{
+ if (index >= RXE_PORT_GID_TBL_LEN)
+ return -EINVAL;
+ return 0;
+}
+
+static int rxe_del_gid(struct ib_device *device, u8 port_num, unsigned int
+ index, void **context)
+{
+ if (index >= RXE_PORT_GID_TBL_LEN)
+ return -EINVAL;
+ return 0;
+}
+
+static struct net_device *rxe_get_netdev(struct ib_device *device,
+ u8 port_num)
+{
+ struct rxe_dev *rxe = to_rdev(device);
+
+ if (rxe->ndev) {
+ dev_hold(rxe->ndev);
+ return rxe->ndev;
+ }
+
+ return NULL;
+}
+
+static int rxe_query_pkey(struct ib_device *device,
+ u8 port_num, u16 index, u16 *pkey)
+{
+ struct rxe_dev *rxe = to_rdev(device);
+ struct rxe_port *port;
+
+ if (unlikely(port_num != 1)) {
+ dev_warn(device->dma_device, "invalid port_num = %d\n",
+ port_num);
+ goto err1;
+ }
+
+ port = &rxe->port;
+
+ if (unlikely(index >= port->attr.pkey_tbl_len)) {
+ dev_warn(device->dma_device, "invalid index = %d\n",
+ index);
+ goto err1;
+ }
+
+ *pkey = port->pkey_tbl[index];
+ return 0;
+
+err1:
+ return -EINVAL;
+}
+
+static int rxe_modify_device(struct ib_device *dev,
+ int mask, struct ib_device_modify *attr)
+{
+ struct rxe_dev *rxe = to_rdev(dev);
+
+ if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
+ rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
+
+ if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
+ memcpy(rxe->ib_dev.node_desc,
+ attr->node_desc, sizeof(rxe->ib_dev.node_desc));
+ }
+
+ return 0;
+}
+
+static int rxe_modify_port(struct ib_device *dev,
+ u8 port_num, int mask, struct ib_port_modify *attr)
+{
+ struct rxe_dev *rxe = to_rdev(dev);
+ struct rxe_port *port;
+
+ if (unlikely(port_num != 1)) {
+ pr_warn("invalid port_num = %d\n", port_num);
+ goto err1;
+ }
+
+ port = &rxe->port;
+
+ port->attr.port_cap_flags |= attr->set_port_cap_mask;
+ port->attr.port_cap_flags &= ~attr->clr_port_cap_mask;
+
+ if (mask & IB_PORT_RESET_QKEY_CNTR)
+ port->attr.qkey_viol_cntr = 0;
+
+ return 0;
+
+err1:
+ return -EINVAL;
+}
+
+static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
+ u8 port_num)
+{
+ struct rxe_dev *rxe = to_rdev(dev);
+
+ return rxe->ifc_ops->link_layer(rxe, port_num);
+}
+
+static struct ib_ucontext *rxe_alloc_ucontext(struct ib_device *dev,
+ struct ib_udata *udata)
+{
+ struct rxe_dev *rxe = to_rdev(dev);
+ struct rxe_ucontext *uc;
+
+ uc = rxe_alloc(&rxe->uc_pool);
+ return uc ? &uc->ibuc : ERR_PTR(-ENOMEM);
+}
+
+static int rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
+{
+ struct rxe_ucontext *uc = to_ruc(ibuc);
+
+ rxe_drop_ref(uc);
+ return 0;
+}
+
+static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
+ struct ib_port_immutable *immutable)
+{
+ int err;
+ struct ib_port_attr attr;
+
+ err = rxe_query_port(dev, port_num, &attr);
+ if (err)
+ return err;
+
+ immutable->pkey_tbl_len = attr.pkey_tbl_len;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+ immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+
+ return 0;
+}
+
+static struct ib_pd *rxe_alloc_pd(struct ib_device *dev,
+ struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ struct rxe_dev *rxe = to_rdev(dev);
+ struct rxe_pd *pd;
+
+ pd = rxe_alloc(&rxe->pd_pool);
+ return pd ? &pd->ibpd : ERR_PTR(-ENOMEM);
+}
+
+static int rxe_dealloc_pd(struct ib_pd *ibpd)
+{
+ struct rxe_pd *pd = to_rpd(ibpd);
+
+ rxe_drop_ref(pd);
+ return 0;
+}
+
+static int rxe_init_av(struct rxe_dev *rxe, struct ib_ah_attr *attr,
+ struct rxe_av *av)
+{
+ int err;
+ union ib_gid sgid;
+ struct ib_gid_attr sgid_attr;
+
+ err = ib_get_cached_gid(&rxe->ib_dev, attr->port_num,
+ attr->grh.sgid_index, &sgid,
+ &sgid_attr);
+ if (err) {
+ pr_err("Failed to query sgid. err = %d\n", err);
+ return err;
+ }
+
+ err = rxe_av_from_attr(rxe, attr->port_num, av, attr);
+ if (!err)
+ err = rxe_av_fill_ip_info(rxe, av, attr, &sgid_attr, &sgid);
+
+ if (sgid_attr.ndev)
+ dev_put(sgid_attr.ndev);
+ return err;
+}
+
+static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
+{
+ int err;
+ struct rxe_dev *rxe = to_rdev(ibpd->device);
+ struct rxe_pd *pd = to_rpd(ibpd);
+ struct rxe_ah *ah;
+
+ err = rxe_av_chk_attr(rxe, attr);
+ if (err)
+ goto err1;
+
+ ah = rxe_alloc(&rxe->ah_pool);
+ if (!ah) {
+ err = -ENOMEM;
+ goto err1;
+ }
+
+ rxe_add_ref(pd);
+ ah->pd = pd;
+
+ err = rxe_init_av(rxe, attr, &ah->av);
+ if (err)
+ goto err2;
+
+ return &ah->ibah;
+
+err2:
+ rxe_drop_ref(pd);
+ rxe_drop_ref(ah);
+err1:
+ return ERR_PTR(err);
+}
+
+static int rxe_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
+{
+ int err;
+ struct rxe_dev *rxe = to_rdev(ibah->device);
+ struct rxe_ah *ah = to_rah(ibah);
+
+ err = rxe_av_chk_attr(rxe, attr);
+ if (err)
+ return err;
+
+ err = rxe_init_av(rxe, attr, &ah->av);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int rxe_query_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
+{
+ struct rxe_dev *rxe = to_rdev(ibah->device);
+ struct rxe_ah *ah = to_rah(ibah);
+
+ rxe_av_to_attr(rxe, &ah->av, attr);
+ return 0;
+}
+
+static int rxe_destroy_ah(struct ib_ah *ibah)
+{
+ struct rxe_ah *ah = to_rah(ibah);
+
+ rxe_drop_ref(ah->pd);
+ rxe_drop_ref(ah);
+ return 0;
+}
+
+static int post_one_recv(struct rxe_rq *rq, struct ib_recv_wr *ibwr)
+{
+ int err;
+ int i;
+ u32 length;
+ struct rxe_recv_wqe *recv_wqe;
+ int num_sge = ibwr->num_sge;
+
+ if (unlikely(queue_full(rq->queue))) {
+ err = -ENOMEM;
+ goto err1;
+ }
+
+ if (unlikely(num_sge > rq->max_sge)) {
+ err = -EINVAL;
+ goto err1;
+ }
+
+ length = 0;
+ for (i = 0; i < num_sge; i++)
+ length += ibwr->sg_list[i].length;
+
+ recv_wqe = producer_addr(rq->queue);
+ recv_wqe->wr_id = ibwr->wr_id;
+ recv_wqe->num_sge = num_sge;
+
+ memcpy(recv_wqe->dma.sge, ibwr->sg_list,
+ num_sge * sizeof(struct ib_sge));
+
+ recv_wqe->dma.length = length;
+ recv_wqe->dma.resid = length;
+ recv_wqe->dma.num_sge = num_sge;
+ recv_wqe->dma.cur_sge = 0;
+ recv_wqe->dma.sge_offset = 0;
+
+ /* make sure all changes to the work queue are written before we
+ * update the producer pointer
+ */
+ smp_wmb();
+
+ advance_producer(rq->queue);
+ return 0;
+
+err1:
+ return err;
+}
+
+static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
+ struct ib_srq_init_attr *init,
+ struct ib_udata *udata)
+{
+ int err;
+ struct rxe_dev *rxe = to_rdev(ibpd->device);
+ struct rxe_pd *pd = to_rpd(ibpd);
+ struct rxe_srq *srq;
+ struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;
+
+ err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
+ if (err)
+ goto err1;
+
+ srq = rxe_alloc(&rxe->srq_pool);
+ if (!srq) {
+ err = -ENOMEM;
+ goto err1;
+ }
+
+ rxe_add_index(srq);
+ rxe_add_ref(pd);
+ srq->pd = pd;
+
+ err = rxe_srq_from_init(rxe, srq, init, context, udata);
+ if (err)
+ goto err2;
+
+ return &srq->ibsrq;
+
+err2:
+ rxe_drop_ref(pd);
+ rxe_drop_index(srq);
+ rxe_drop_ref(srq);
+err1:
+ return ERR_PTR(err);
+}
+
+static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask mask,
+ struct ib_udata *udata)
+{
+ int err;
+ struct rxe_srq *srq = to_rsrq(ibsrq);
+ struct rxe_dev *rxe = to_rdev(ibsrq->device);
+
+ err = rxe_srq_chk_attr(rxe, srq, attr, mask);
+ if (err)
+ goto err1;
+
+ err = rxe_srq_from_attr(rxe, srq, attr, mask, udata);
+ if (err)
+ goto err1;
+
+ return 0;
+
+err1:
+ return err;
+}
+
+static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
+{
+ struct rxe_srq *srq = to_rsrq(ibsrq);
+
+ if (srq->error)
+ return -EINVAL;
+
+ attr->max_wr = srq->rq.queue->buf->index_mask;
+ attr->max_sge = srq->rq.max_sge;
+ attr->srq_limit = srq->limit;
+ return 0;
+}
+
+static int rxe_destroy_srq(struct ib_srq *ibsrq)
+{
+ struct rxe_srq *srq = to_rsrq(ibsrq);
+
+ if (srq->rq.queue)
+ rxe_queue_cleanup(srq->rq.queue);
+
+ rxe_drop_ref(srq->pd);
+ rxe_drop_index(srq);
+ rxe_drop_ref(srq);
+
+ return 0;
+}
+
+static int rxe_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ int err = 0;
+ unsigned long flags;
+ struct rxe_srq *srq = to_rsrq(ibsrq);
+
+ spin_lock_irqsave(&srq->rq.producer_lock, flags);
+
+ while (wr) {
+ err = post_one_recv(&srq->rq, wr);
+ if (unlikely(err))
+ break;
+ wr = wr->next;
+ }
+
+ spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
+
+ if (err)
+ *bad_wr = wr;
+
+ return err;
+}
+
+static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
+ struct ib_qp_init_attr *init,
+ struct ib_udata *udata)
+{
+ int err;
+ struct rxe_dev *rxe = to_rdev(ibpd->device);
+ struct rxe_pd *pd = to_rpd(ibpd);
+ struct rxe_qp *qp;
+
+ err = rxe_qp_chk_init(rxe, init);
+ if (err)
+ goto err1;
+
+ qp = rxe_alloc(&rxe->qp_pool);
+ if (!qp) {
+ err = -ENOMEM;
+ goto err1;
+ }
+
+ if (udata) {
+ if (udata->inlen) {
+ err = -EINVAL;
+ goto err1;
+ }
+ qp->is_user = 1;
+ }
+
+ rxe_add_index(qp);
+
+ err = rxe_qp_from_init(rxe, qp, pd, init, udata, ibpd);
+ if (err)
+ goto err2;
+
+ return &qp->ibqp;
+
+err2:
+ rxe_drop_index(qp);
+ rxe_drop_ref(qp);
+err1:
+ return ERR_PTR(err);
+}
+
+static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int mask, struct ib_udata *udata)
+{
+ int err;
+ struct rxe_dev *rxe = to_rdev(ibqp->device);
+ struct rxe_qp *qp = to_rqp(ibqp);
+
+ err = rxe_qp_chk_attr(rxe, qp, attr, mask);
+ if (err)
+ goto err1;
+
+ err = rxe_qp_from_attr(qp, attr, mask, udata);
+ if (err)
+ goto err1;
+
+ return 0;
+
+err1:
+ return err;
+}
+
+static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int mask, struct ib_qp_init_attr *init)
+{
+ struct rxe_qp *qp = to_rqp(ibqp);
+
+ rxe_qp_to_init(qp, init);
+ rxe_qp_to_attr(qp, attr, mask);
+
+ return 0;
+}
+
+static int rxe_destroy_qp(struct ib_qp *ibqp)
+{
+ struct rxe_qp *qp = to_rqp(ibqp);
+
+ rxe_qp_destroy(qp);
+ rxe_drop_index(qp);
+ rxe_drop_ref(qp);
+ return 0;
+}
+
+static int validate_send_wr(struct rxe_qp *qp, struct ib_send_wr *ibwr,
+ unsigned int mask, unsigned int length)
+{
+ int num_sge = ibwr->num_sge;
+ struct rxe_sq *sq = &qp->sq;
+
+ if (unlikely(num_sge > sq->max_sge))
+ goto err1;
+
+ if (unlikely(mask & WR_ATOMIC_MASK)) {
+ if (length < 8)
+ goto err1;
+
+ if (atomic_wr(ibwr)->remote_addr & 0x7)
+ goto err1;
+ }
+
+ if (unlikely((ibwr->send_flags & IB_SEND_INLINE) &&
+ (length > sq->max_inline)))
+ goto err1;
+
+ return 0;
+
+err1:
+ return -EINVAL;
+}
+
+static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
+ struct ib_send_wr *ibwr)
+{
+ wr->wr_id = ibwr->wr_id;
+ wr->num_sge = ibwr->num_sge;
+ wr->opcode = ibwr->opcode;
+ wr->send_flags = ibwr->send_flags;
+
+ if (qp_type(qp) == IB_QPT_UD ||
+ qp_type(qp) == IB_QPT_SMI ||
+ qp_type(qp) == IB_QPT_GSI) {
+ wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn;
+ wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey;
+ if (qp_type(qp) == IB_QPT_GSI)
+ wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index;
+ if (wr->opcode == IB_WR_SEND_WITH_IMM)
+ wr->ex.imm_data = ibwr->ex.imm_data;
+ } else {
+ switch (wr->opcode) {
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ wr->ex.imm_data = ibwr->ex.imm_data;
+ case IB_WR_RDMA_READ:
+ case IB_WR_RDMA_WRITE:
+ wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
+ wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey;
+ break;
+ case IB_WR_SEND_WITH_IMM:
+ wr->ex.imm_data = ibwr->ex.imm_data;
+ break;
+ case IB_WR_SEND_WITH_INV:
+ wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
+ break;
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ wr->wr.atomic.remote_addr =
+ atomic_wr(ibwr)->remote_addr;
+ wr->wr.atomic.compare_add =
+ atomic_wr(ibwr)->compare_add;
+ wr->wr.atomic.swap = atomic_wr(ibwr)->swap;
+ wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey;
+ break;
+ case IB_WR_LOCAL_INV:
+ wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
+ break;
+ case IB_WR_REG_MR:
+ wr->wr.reg.mr = reg_wr(ibwr)->mr;
+ wr->wr.reg.key = reg_wr(ibwr)->key;
+ wr->wr.reg.access = reg_wr(ibwr)->access;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
+ unsigned int mask, unsigned int length,
+ struct rxe_send_wqe *wqe)
+{
+ int num_sge = ibwr->num_sge;
+ struct ib_sge *sge;
+ int i;
+ u8 *p;
+
+ init_send_wr(qp, &wqe->wr, ibwr);
+
+ if (qp_type(qp) == IB_QPT_UD ||
+ qp_type(qp) == IB_QPT_SMI ||
+ qp_type(qp) == IB_QPT_GSI)
+ memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
+
+ if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
+ p = wqe->dma.inline_data;
+
+ sge = ibwr->sg_list;
+ for (i = 0; i < num_sge; i++, sge++) {
+ if (qp->is_user && copy_from_user(p, (__user void *)
+ (uintptr_t)sge->addr, sge->length))
+ return -EFAULT;
+
+ else if (!qp->is_user)
+ memcpy(p, (void *)(uintptr_t)sge->addr,
+ sge->length);
+
+ p += sge->length;
+ }
+ } else if (mask & WR_REG_MASK) {
+ wqe->mask = mask;
+ wqe->state = wqe_state_posted;
+ return 0;
+ } else
+ memcpy(wqe->dma.sge, ibwr->sg_list,
+ num_sge * sizeof(struct ib_sge));
+
+ wqe->iova = (mask & WR_ATOMIC_MASK) ?
+ atomic_wr(ibwr)->remote_addr :
+ rdma_wr(ibwr)->remote_addr;
+ wqe->mask = mask;
+ wqe->dma.length = length;
+ wqe->dma.resid = length;
+ wqe->dma.num_sge = num_sge;
+ wqe->dma.cur_sge = 0;
+ wqe->dma.sge_offset = 0;
+ wqe->state = wqe_state_posted;
+ wqe->ssn = atomic_add_return(1, &qp->ssn);
+
+ return 0;
+}
+
+static int post_one_send(struct rxe_qp *qp, struct ib_send_wr *ibwr,
+ unsigned mask, u32 length)
+{
+ int err;
+ struct rxe_sq *sq = &qp->sq;
+ struct rxe_send_wqe *send_wqe;
+ unsigned long flags;
+
+ err = validate_send_wr(qp, ibwr, mask, length);
+ if (err)
+ return err;
+
+ spin_lock_irqsave(&qp->sq.sq_lock, flags);
+
+ if (unlikely(queue_full(sq->queue))) {
+ err = -ENOMEM;
+ goto err1;
+ }
+
+ send_wqe = producer_addr(sq->queue);
+
+ err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
+ if (unlikely(err))
+ goto err1;
+
+ /*
+ * make sure all changes to the work queue are
+ * written before we update the producer pointer
+ */
+ smp_wmb();
+
+ advance_producer(sq->queue);
+ spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
+
+ return 0;
+
+err1:
+ spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
+ return err;
+}
+
+static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ int err = 0;
+ struct rxe_qp *qp = to_rqp(ibqp);
+ unsigned int mask;
+ unsigned int length = 0;
+ int i;
+ int must_sched;
+
+ if (unlikely(!qp->valid)) {
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
+ if (unlikely(qp->req.state < QP_STATE_READY)) {
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
+ while (wr) {
+ mask = wr_opcode_mask(wr->opcode, qp);
+ if (unlikely(!mask)) {
+ err = -EINVAL;
+ *bad_wr = wr;
+ break;
+ }
+
+ if (unlikely((wr->send_flags & IB_SEND_INLINE) &&
+ !(mask & WR_INLINE_MASK))) {
+ err = -EINVAL;
+ *bad_wr = wr;
+ break;
+ }
+
+ length = 0;
+ for (i = 0; i < wr->num_sge; i++)
+ length += wr->sg_list[i].length;
+
+ err = post_one_send(qp, wr, mask, length);
+
+ if (err) {
+ *bad_wr = wr;
+ break;
+ }
+ wr = wr->next;
+ }
+
+ /*
+ * Must sched in case of GSI QP because ib_send_mad() hold irq lock,
+ * and the requester call ip_local_out_sk() that takes spin_lock_bh.
+ */
+ must_sched = (qp_type(qp) == IB_QPT_GSI) ||
+ (queue_count(qp->sq.queue) > 1);
+
+ rxe_run_task(&qp->req.task, must_sched);
+
+ return err;
+}
+
+static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ int err = 0;
+ struct rxe_qp *qp = to_rqp(ibqp);
+ struct rxe_rq *rq = &qp->rq;
+ unsigned long flags;
+
+ if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
+ *bad_wr = wr;
+ err = -EINVAL;
+ goto err1;
+ }
+
+ if (unlikely(qp->srq)) {
+ *bad_wr = wr;
+ err = -EINVAL;
+ goto err1;
+ }
+
+ spin_lock_irqsave(&rq->producer_lock, flags);
+
+ while (wr) {
+ err = post_one_recv(rq, wr);
+ if (unlikely(err)) {
+ *bad_wr = wr;
+ break;
+ }
+ wr = wr->next;
+ }
+
+ spin_unlock_irqrestore(&rq->producer_lock, flags);
+
+err1:
+ return err;
+}
+
+static struct ib_cq *rxe_create_cq(struct ib_device *dev,
+ const struct ib_cq_init_attr *attr,
+ struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ int err;
+ struct rxe_dev *rxe = to_rdev(dev);
+ struct rxe_cq *cq;
+
+ if (attr->flags)
+ return ERR_PTR(-EINVAL);
+
+ err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector, udata);
+ if (err)
+ goto err1;
+
+ cq = rxe_alloc(&rxe->cq_pool);
+ if (!cq) {
+ err = -ENOMEM;
+ goto err1;
+ }
+
+ err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector,
+ context, udata);
+ if (err)
+ goto err2;
+
+ return &cq->ibcq;
+
+err2:
+ rxe_drop_ref(cq);
+err1:
+ return ERR_PTR(err);
+}
+
+static int rxe_destroy_cq(struct ib_cq *ibcq)
+{
+ struct rxe_cq *cq = to_rcq(ibcq);
+
+ rxe_drop_ref(cq);
+ return 0;
+}
+
+static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
+{
+ int err;
+ struct rxe_cq *cq = to_rcq(ibcq);
+ struct rxe_dev *rxe = to_rdev(ibcq->device);
+
+ err = rxe_cq_chk_attr(rxe, cq, cqe, 0, udata);
+ if (err)
+ goto err1;
+
+ err = rxe_cq_resize_queue(cq, cqe, udata);
+ if (err)
+ goto err1;
+
+ return 0;
+
+err1:
+ return err;
+}
+
+static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
+{
+ int i;
+ struct rxe_cq *cq = to_rcq(ibcq);
+ struct rxe_cqe *cqe;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cq->cq_lock, flags);
+ for (i = 0; i < num_entries; i++) {
+ cqe = queue_head(cq->queue);
+ if (!cqe)
+ break;
+
+ memcpy(wc++, &cqe->ibwc, sizeof(*wc));
+ advance_consumer(cq->queue);
+ }
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+
+ return i;
+}
+
+static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
+{
+ struct rxe_cq *cq = to_rcq(ibcq);
+ int count = queue_count(cq->queue);
+
+ return (count > wc_cnt) ? wc_cnt : count;
+}
+
+static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
+{
+ struct rxe_cq *cq = to_rcq(ibcq);
+
+ if (cq->notify != IB_CQ_NEXT_COMP)
+ cq->notify = flags & IB_CQ_SOLICITED_MASK;
+
+ return 0;
+}
+
+static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
+{
+ struct rxe_dev *rxe = to_rdev(ibpd->device);
+ struct rxe_pd *pd = to_rpd(ibpd);
+ struct rxe_mem *mr;
+ int err;
+
+ mr = rxe_alloc(&rxe->mr_pool);
+ if (!mr) {
+ err = -ENOMEM;
+ goto err1;
+ }
+
+ rxe_add_index(mr);
+
+ rxe_add_ref(pd);
+
+ err = rxe_mem_init_dma(rxe, pd, access, mr);
+ if (err)
+ goto err2;
+
+ return &mr->ibmr;
+
+err2:
+ rxe_drop_ref(pd);
+ rxe_drop_index(mr);
+ rxe_drop_ref(mr);
+err1:
+ return ERR_PTR(err);
+}
+
+static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
+ u64 start,
+ u64 length,
+ u64 iova,
+ int access, struct ib_udata *udata)
+{
+ int err;
+ struct rxe_dev *rxe = to_rdev(ibpd->device);
+ struct rxe_pd *pd = to_rpd(ibpd);
+ struct rxe_mem *mr;
+
+ mr = rxe_alloc(&rxe->mr_pool);
+ if (!mr) {
+ err = -ENOMEM;
+ goto err2;
+ }
+
+ rxe_add_index(mr);
+
+ rxe_add_ref(pd);
+
+ err = rxe_mem_init_user(rxe, pd, start, length, iova,
+ access, udata, mr);
+ if (err)
+ goto err3;
+
+ return &mr->ibmr;
+
+err3:
+ rxe_drop_ref(pd);
+ rxe_drop_index(mr);
+ rxe_drop_ref(mr);
+err2:
+ return ERR_PTR(err);
+}
+
+static int rxe_dereg_mr(struct ib_mr *ibmr)
+{
+ struct rxe_mem *mr = to_rmr(ibmr);
+
+ mr->state = RXE_MEM_STATE_ZOMBIE;
+ rxe_drop_ref(mr->pd);
+ rxe_drop_index(mr);
+ rxe_drop_ref(mr);
+ return 0;
+}
+
+static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd,
+ enum ib_mr_type mr_type,
+ u32 max_num_sg)
+{
+ struct rxe_dev *rxe = to_rdev(ibpd->device);
+ struct rxe_pd *pd = to_rpd(ibpd);
+ struct rxe_mem *mr;
+ int err;
+
+ if (mr_type != IB_MR_TYPE_MEM_REG)
+ return ERR_PTR(-EINVAL);
+
+ mr = rxe_alloc(&rxe->mr_pool);
+ if (!mr) {
+ err = -ENOMEM;
+ goto err1;
+ }
+
+ rxe_add_index(mr);
+
+ rxe_add_ref(pd);
+
+ err = rxe_mem_init_fast(rxe, pd, max_num_sg, mr);
+ if (err)
+ goto err2;
+
+ return &mr->ibmr;
+
+err2:
+ rxe_drop_ref(pd);
+ rxe_drop_index(mr);
+ rxe_drop_ref(mr);
+err1:
+ return ERR_PTR(err);
+}
+
+static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
+{
+ struct rxe_mem *mr = to_rmr(ibmr);
+ struct rxe_map *map;
+ struct rxe_phys_buf *buf;
+
+ if (unlikely(mr->nbuf == mr->num_buf))
+ return -ENOMEM;
+
+ map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
+ buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
+
+ buf->addr = addr;
+ buf->size = ibmr->page_size;
+ mr->nbuf++;
+
+ return 0;
+}
+
+static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset)
+{
+ struct rxe_mem *mr = to_rmr(ibmr);
+ int n;
+
+ mr->nbuf = 0;
+
+ n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
+
+ mr->va = ibmr->iova;
+ mr->iova = ibmr->iova;
+ mr->length = ibmr->length;
+ mr->page_shift = ilog2(ibmr->page_size);
+ mr->page_mask = ibmr->page_size - 1;
+ mr->offset = mr->iova & mr->page_mask;
+
+ return n;
+}
+
+static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
+{
+ int err;
+ struct rxe_dev *rxe = to_rdev(ibqp->device);
+ struct rxe_qp *qp = to_rqp(ibqp);
+ struct rxe_mc_grp *grp;
+
+ /* takes a ref on grp if successful */
+ err = rxe_mcast_get_grp(rxe, mgid, &grp);
+ if (err)
+ return err;
+
+ err = rxe_mcast_add_grp_elem(rxe, qp, grp);
+
+ rxe_drop_ref(grp);
+ return err;
+}
+
+static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
+{
+ struct rxe_dev *rxe = to_rdev(ibqp->device);
+ struct rxe_qp *qp = to_rqp(ibqp);
+
+ return rxe_mcast_drop_grp_elem(rxe, qp, mgid);
+}
+
+static ssize_t rxe_show_parent(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct rxe_dev *rxe = container_of(device, struct rxe_dev,
+ ib_dev.dev);
+ char *name;
+
+ name = rxe->ifc_ops->parent_name(rxe, 1);
+ return snprintf(buf, 16, "%s\n", name);
+}
+
+static DEVICE_ATTR(parent, S_IRUGO, rxe_show_parent, NULL);
+
+static struct device_attribute *rxe_dev_attributes[] = {
+ &dev_attr_parent,
+};
+
+int rxe_register_device(struct rxe_dev *rxe)
+{
+ int err;
+ int i;
+ struct ib_device *dev = &rxe->ib_dev;
+
+ strlcpy(dev->name, "rxe%d", IB_DEVICE_NAME_MAX);
+ strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
+
+ dev->owner = THIS_MODULE;
+ dev->node_type = RDMA_NODE_IB_CA;
+ dev->phys_port_cnt = 1;
+ dev->num_comp_vectors = RXE_NUM_COMP_VECTORS;
+ dev->dma_device = rxe->ifc_ops->dma_device(rxe);
+ dev->local_dma_lkey = 0;
+ dev->node_guid = rxe->ifc_ops->node_guid(rxe);
+ dev->dma_ops = &rxe_dma_mapping_ops;
+
+ dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION;
+ dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
+ | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
+ | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE)
+ | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT)
+ | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD)
+ | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD)
+ | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ)
+ | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ)
+ | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ)
+ | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ)
+ | BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV)
+ | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP)
+ | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP)
+ | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP)
+ | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP)
+ | BIT_ULL(IB_USER_VERBS_CMD_POST_SEND)
+ | BIT_ULL(IB_USER_VERBS_CMD_POST_RECV)
+ | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ)
+ | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ)
+ | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ)
+ | BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ)
+ | BIT_ULL(IB_USER_VERBS_CMD_PEEK_CQ)
+ | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)
+ | BIT_ULL(IB_USER_VERBS_CMD_REG_MR)
+ | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR)
+ | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH)
+ | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_AH)
+ | BIT_ULL(IB_USER_VERBS_CMD_QUERY_AH)
+ | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH)
+ | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST)
+ | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST)
+ ;
+
+ dev->query_device = rxe_query_device;
+ dev->modify_device = rxe_modify_device;
+ dev->query_port = rxe_query_port;
+ dev->modify_port = rxe_modify_port;
+ dev->get_link_layer = rxe_get_link_layer;
+ dev->query_gid = rxe_query_gid;
+ dev->get_netdev = rxe_get_netdev;
+ dev->add_gid = rxe_add_gid;
+ dev->del_gid = rxe_del_gid;
+ dev->query_pkey = rxe_query_pkey;
+ dev->alloc_ucontext = rxe_alloc_ucontext;
+ dev->dealloc_ucontext = rxe_dealloc_ucontext;
+ dev->mmap = rxe_mmap;
+ dev->get_port_immutable = rxe_port_immutable;
+ dev->alloc_pd = rxe_alloc_pd;
+ dev->dealloc_pd = rxe_dealloc_pd;
+ dev->create_ah = rxe_create_ah;
+ dev->modify_ah = rxe_modify_ah;
+ dev->query_ah = rxe_query_ah;
+ dev->destroy_ah = rxe_destroy_ah;
+ dev->create_srq = rxe_create_srq;
+ dev->modify_srq = rxe_modify_srq;
+ dev->query_srq = rxe_query_srq;
+ dev->destroy_srq = rxe_destroy_srq;
+ dev->post_srq_recv = rxe_post_srq_recv;
+ dev->create_qp = rxe_create_qp;
+ dev->modify_qp = rxe_modify_qp;
+ dev->query_qp = rxe_query_qp;
+ dev->destroy_qp = rxe_destroy_qp;
+ dev->post_send = rxe_post_send;
+ dev->post_recv = rxe_post_recv;
+ dev->create_cq = rxe_create_cq;
+ dev->destroy_cq = rxe_destroy_cq;
+ dev->resize_cq = rxe_resize_cq;
+ dev->poll_cq = rxe_poll_cq;
+ dev->peek_cq = rxe_peek_cq;
+ dev->req_notify_cq = rxe_req_notify_cq;
+ dev->get_dma_mr = rxe_get_dma_mr;
+ dev->reg_user_mr = rxe_reg_user_mr;
+ dev->dereg_mr = rxe_dereg_mr;
+ dev->alloc_mr = rxe_alloc_mr;
+ dev->map_mr_sg = rxe_map_mr_sg;
+ dev->attach_mcast = rxe_attach_mcast;
+ dev->detach_mcast = rxe_detach_mcast;
+
+ err = ib_register_device(dev, NULL);
+ if (err) {
+ pr_warn("rxe_register_device failed, err = %d\n", err);
+ goto err1;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i) {
+ err = device_create_file(&dev->dev, rxe_dev_attributes[i]);
+ if (err) {
+ pr_warn("device_create_file failed, i = %d, err = %d\n",
+ i, err);
+ goto err2;
+ }
+ }
+
+ return 0;
+
+err2:
+ ib_unregister_device(dev);
+err1:
+ return err;
+}
+
+int rxe_unregister_device(struct rxe_dev *rxe)
+{
+ int i;
+ struct ib_device *dev = &rxe->ib_dev;
+
+ for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i)
+ device_remove_file(&dev->dev, rxe_dev_attributes[i]);
+
+ ib_unregister_device(dev);
+
+ return 0;
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
new file mode 100644
index 000000000..cac1d52a0
--- /dev/null
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -0,0 +1,480 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
+ * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef RXE_VERBS_H
+#define RXE_VERBS_H
+
+#include <linux/interrupt.h>
+#include <rdma/rdma_user_rxe.h>
+#include "rxe_pool.h"
+#include "rxe_task.h"
+
+static inline int pkey_match(u16 key1, u16 key2)
+{
+ return (((key1 & 0x7fff) != 0) &&
+ ((key1 & 0x7fff) == (key2 & 0x7fff)) &&
+ ((key1 & 0x8000) || (key2 & 0x8000))) ? 1 : 0;
+}
+
+/* Return >0 if psn_a > psn_b
+ * 0 if psn_a == psn_b
+ * <0 if psn_a < psn_b
+ */
+static inline int psn_compare(u32 psn_a, u32 psn_b)
+{
+ s32 diff;
+
+ diff = (psn_a - psn_b) << 8;
+ return diff;
+}
+
+struct rxe_ucontext {
+ struct rxe_pool_entry pelem;
+ struct ib_ucontext ibuc;
+};
+
+struct rxe_pd {
+ struct rxe_pool_entry pelem;
+ struct ib_pd ibpd;
+};
+
+struct rxe_ah {
+ struct rxe_pool_entry pelem;
+ struct ib_ah ibah;
+ struct rxe_pd *pd;
+ struct rxe_av av;
+};
+
+struct rxe_cqe {
+ union {
+ struct ib_wc ibwc;
+ struct ib_uverbs_wc uibwc;
+ };
+};
+
+struct rxe_cq {
+ struct rxe_pool_entry pelem;
+ struct ib_cq ibcq;
+ struct rxe_queue *queue;
+ spinlock_t cq_lock;
+ u8 notify;
+ int is_user;
+ struct tasklet_struct comp_task;
+};
+
+enum wqe_state {
+ wqe_state_posted,
+ wqe_state_processing,
+ wqe_state_pending,
+ wqe_state_done,
+ wqe_state_error,
+};
+
+struct rxe_sq {
+ int max_wr;
+ int max_sge;
+ int max_inline;
+ spinlock_t sq_lock; /* guard queue */
+ struct rxe_queue *queue;
+};
+
+struct rxe_rq {
+ int max_wr;
+ int max_sge;
+ spinlock_t producer_lock; /* guard queue producer */
+ spinlock_t consumer_lock; /* guard queue consumer */
+ struct rxe_queue *queue;
+};
+
+struct rxe_srq {
+ struct rxe_pool_entry pelem;
+ struct ib_srq ibsrq;
+ struct rxe_pd *pd;
+ struct rxe_rq rq;
+ u32 srq_num;
+
+ int limit;
+ int error;
+};
+
+enum rxe_qp_state {
+ QP_STATE_RESET,
+ QP_STATE_INIT,
+ QP_STATE_READY,
+ QP_STATE_DRAIN, /* req only */
+ QP_STATE_DRAINED, /* req only */
+ QP_STATE_ERROR
+};
+
+extern char *rxe_qp_state_name[];
+
+struct rxe_req_info {
+ enum rxe_qp_state state;
+ int wqe_index;
+ u32 psn;
+ int opcode;
+ atomic_t rd_atomic;
+ int wait_fence;
+ int need_rd_atomic;
+ int wait_psn;
+ int need_retry;
+ int noack_pkts;
+ struct rxe_task task;
+};
+
+struct rxe_comp_info {
+ u32 psn;
+ int opcode;
+ int timeout;
+ int timeout_retry;
+ u32 retry_cnt;
+ u32 rnr_retry;
+ struct rxe_task task;
+};
+
+enum rdatm_res_state {
+ rdatm_res_state_next,
+ rdatm_res_state_new,
+ rdatm_res_state_replay,
+};
+
+struct resp_res {
+ int type;
+ u32 first_psn;
+ u32 last_psn;
+ u32 cur_psn;
+ enum rdatm_res_state state;
+
+ union {
+ struct {
+ struct sk_buff *skb;
+ } atomic;
+ struct {
+ struct rxe_mem *mr;
+ u64 va_org;
+ u32 rkey;
+ u32 length;
+ u64 va;
+ u32 resid;
+ } read;
+ };
+};
+
+struct rxe_resp_info {
+ enum rxe_qp_state state;
+ u32 msn;
+ u32 psn;
+ int opcode;
+ int drop_msg;
+ int goto_error;
+ int sent_psn_nak;
+ enum ib_wc_status status;
+ u8 aeth_syndrome;
+
+ /* Receive only */
+ struct rxe_recv_wqe *wqe;
+
+ /* RDMA read / atomic only */
+ u64 va;
+ struct rxe_mem *mr;
+ u32 resid;
+ u32 rkey;
+ u64 atomic_orig;
+
+ /* SRQ only */
+ struct {
+ struct rxe_recv_wqe wqe;
+ struct ib_sge sge[RXE_MAX_SGE];
+ } srq_wqe;
+
+ /* Responder resources. It's a circular list where the oldest
+ * resource is dropped first.
+ */
+ struct resp_res *resources;
+ unsigned int res_head;
+ unsigned int res_tail;
+ struct resp_res *res;
+ struct rxe_task task;
+};
+
+struct rxe_qp {
+ struct rxe_pool_entry pelem;
+ struct ib_qp ibqp;
+ struct ib_qp_attr attr;
+ unsigned int valid;
+ unsigned int mtu;
+ int is_user;
+
+ struct rxe_pd *pd;
+ struct rxe_srq *srq;
+ struct rxe_cq *scq;
+ struct rxe_cq *rcq;
+
+ enum ib_sig_type sq_sig_type;
+
+ struct rxe_sq sq;
+ struct rxe_rq rq;
+
+ struct socket *sk;
+
+ struct rxe_av pri_av;
+ struct rxe_av alt_av;
+
+ /* list of mcast groups qp has joined (for cleanup) */
+ struct list_head grp_list;
+ spinlock_t grp_lock; /* guard grp_list */
+
+ struct sk_buff_head req_pkts;
+ struct sk_buff_head resp_pkts;
+ struct sk_buff_head send_pkts;
+
+ struct rxe_req_info req;
+ struct rxe_comp_info comp;
+ struct rxe_resp_info resp;
+
+ atomic_t ssn;
+ atomic_t skb_out;
+ int need_req_skb;
+
+ /* Timer for retranmitting packet when ACKs have been lost. RC
+ * only. The requester sets it when it is not already
+ * started. The responder resets it whenever an ack is
+ * received.
+ */
+ struct timer_list retrans_timer;
+ u64 qp_timeout_jiffies;
+
+ /* Timer for handling RNR NAKS. */
+ struct timer_list rnr_nak_timer;
+
+ spinlock_t state_lock; /* guard requester and completer */
+};
+
+enum rxe_mem_state {
+ RXE_MEM_STATE_ZOMBIE,
+ RXE_MEM_STATE_INVALID,
+ RXE_MEM_STATE_FREE,
+ RXE_MEM_STATE_VALID,
+};
+
+enum rxe_mem_type {
+ RXE_MEM_TYPE_NONE,
+ RXE_MEM_TYPE_DMA,
+ RXE_MEM_TYPE_MR,
+ RXE_MEM_TYPE_FMR,
+ RXE_MEM_TYPE_MW,
+};
+
+#define RXE_BUF_PER_MAP (PAGE_SIZE / sizeof(struct rxe_phys_buf))
+
+struct rxe_phys_buf {
+ u64 addr;
+ u64 size;
+};
+
+struct rxe_map {
+ struct rxe_phys_buf buf[RXE_BUF_PER_MAP];
+};
+
+struct rxe_mem {
+ struct rxe_pool_entry pelem;
+ union {
+ struct ib_mr ibmr;
+ struct ib_mw ibmw;
+ };
+
+ struct rxe_pd *pd;
+ struct ib_umem *umem;
+
+ u32 lkey;
+ u32 rkey;
+
+ enum rxe_mem_state state;
+ enum rxe_mem_type type;
+ u64 va;
+ u64 iova;
+ size_t length;
+ u32 offset;
+ int access;
+
+ int page_shift;
+ int page_mask;
+ int map_shift;
+ int map_mask;
+
+ u32 num_buf;
+ u32 nbuf;
+
+ u32 max_buf;
+ u32 num_map;
+
+ struct rxe_map **map;
+};
+
+struct rxe_mc_grp {
+ struct rxe_pool_entry pelem;
+ spinlock_t mcg_lock; /* guard group */
+ struct rxe_dev *rxe;
+ struct list_head qp_list;
+ union ib_gid mgid;
+ int num_qp;
+ u32 qkey;
+ u16 pkey;
+};
+
+struct rxe_mc_elem {
+ struct rxe_pool_entry pelem;
+ struct list_head qp_list;
+ struct list_head grp_list;
+ struct rxe_qp *qp;
+ struct rxe_mc_grp *grp;
+};
+
+struct rxe_port {
+ struct ib_port_attr attr;
+ u16 *pkey_tbl;
+ __be64 port_guid;
+ __be64 subnet_prefix;
+ spinlock_t port_lock; /* guard port */
+ unsigned int mtu_cap;
+ /* special QPs */
+ u32 qp_smi_index;
+ u32 qp_gsi_index;
+};
+
+/* callbacks from rdma_rxe to network interface layer */
+struct rxe_ifc_ops {
+ void (*release)(struct rxe_dev *rxe);
+ __be64 (*node_guid)(struct rxe_dev *rxe);
+ __be64 (*port_guid)(struct rxe_dev *rxe);
+ struct device *(*dma_device)(struct rxe_dev *rxe);
+ int (*mcast_add)(struct rxe_dev *rxe, union ib_gid *mgid);
+ int (*mcast_delete)(struct rxe_dev *rxe, union ib_gid *mgid);
+ int (*prepare)(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
+ struct sk_buff *skb, u32 *crc);
+ int (*send)(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
+ struct sk_buff *skb);
+ int (*loopback)(struct sk_buff *skb);
+ struct sk_buff *(*init_packet)(struct rxe_dev *rxe, struct rxe_av *av,
+ int paylen, struct rxe_pkt_info *pkt);
+ char *(*parent_name)(struct rxe_dev *rxe, unsigned int port_num);
+ enum rdma_link_layer (*link_layer)(struct rxe_dev *rxe,
+ unsigned int port_num);
+};
+
+struct rxe_dev {
+ struct ib_device ib_dev;
+ struct ib_device_attr attr;
+ int max_ucontext;
+ int max_inline_data;
+ struct kref ref_cnt;
+ struct mutex usdev_lock;
+
+ struct rxe_ifc_ops *ifc_ops;
+
+ struct net_device *ndev;
+
+ int xmit_errors;
+
+ struct rxe_pool uc_pool;
+ struct rxe_pool pd_pool;
+ struct rxe_pool ah_pool;
+ struct rxe_pool srq_pool;
+ struct rxe_pool qp_pool;
+ struct rxe_pool cq_pool;
+ struct rxe_pool mr_pool;
+ struct rxe_pool mw_pool;
+ struct rxe_pool mc_grp_pool;
+ struct rxe_pool mc_elem_pool;
+
+ spinlock_t pending_lock; /* guard pending_mmaps */
+ struct list_head pending_mmaps;
+
+ spinlock_t mmap_offset_lock; /* guard mmap_offset */
+ int mmap_offset;
+
+ struct rxe_port port;
+ struct list_head list;
+};
+
+static inline struct rxe_dev *to_rdev(struct ib_device *dev)
+{
+ return dev ? container_of(dev, struct rxe_dev, ib_dev) : NULL;
+}
+
+static inline struct rxe_ucontext *to_ruc(struct ib_ucontext *uc)
+{
+ return uc ? container_of(uc, struct rxe_ucontext, ibuc) : NULL;
+}
+
+static inline struct rxe_pd *to_rpd(struct ib_pd *pd)
+{
+ return pd ? container_of(pd, struct rxe_pd, ibpd) : NULL;
+}
+
+static inline struct rxe_ah *to_rah(struct ib_ah *ah)
+{
+ return ah ? container_of(ah, struct rxe_ah, ibah) : NULL;
+}
+
+static inline struct rxe_srq *to_rsrq(struct ib_srq *srq)
+{
+ return srq ? container_of(srq, struct rxe_srq, ibsrq) : NULL;
+}
+
+static inline struct rxe_qp *to_rqp(struct ib_qp *qp)
+{
+ return qp ? container_of(qp, struct rxe_qp, ibqp) : NULL;
+}
+
+static inline struct rxe_cq *to_rcq(struct ib_cq *cq)
+{
+ return cq ? container_of(cq, struct rxe_cq, ibcq) : NULL;
+}
+
+static inline struct rxe_mem *to_rmr(struct ib_mr *mr)
+{
+ return mr ? container_of(mr, struct rxe_mem, ibmr) : NULL;
+}
+
+static inline struct rxe_mem *to_rmw(struct ib_mw *mw)
+{
+ return mw ? container_of(mw, struct rxe_mem, ibmw) : NULL;
+}
+
+int rxe_register_device(struct rxe_dev *rxe);
+int rxe_unregister_device(struct rxe_dev *rxe);
+
+void rxe_mc_cleanup(void *arg);
+
+#endif /* RXE_VERBS_H */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 4f7d9b48d..9dbfcc0ab 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -478,6 +478,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
struct ipoib_ah *address, u32 qpn);
void ipoib_reap_ah(struct work_struct *work);
+struct ipoib_path *__path_find(struct net_device *dev, void *gid);
void ipoib_mark_paths_invalid(struct net_device *dev);
void ipoib_flush_paths(struct net_device *dev);
int ipoib_check_sm_sendonly_fullmember_support(struct ipoib_dev_priv *priv);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 951d9abcc..4ad297d3d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1318,6 +1318,8 @@ void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
}
}
+#define QPN_AND_OPTIONS_OFFSET 4
+
static void ipoib_cm_tx_start(struct work_struct *work)
{
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
@@ -1326,6 +1328,7 @@ static void ipoib_cm_tx_start(struct work_struct *work)
struct ipoib_neigh *neigh;
struct ipoib_cm_tx *p;
unsigned long flags;
+ struct ipoib_path *path;
int ret;
struct ib_sa_path_rec pathrec;
@@ -1338,7 +1341,19 @@ static void ipoib_cm_tx_start(struct work_struct *work)
p = list_entry(priv->cm.start_list.next, typeof(*p), list);
list_del_init(&p->list);
neigh = p->neigh;
+
qpn = IPOIB_QPN(neigh->daddr);
+ /*
+ * As long as the search is with these 2 locks,
+ * path existence indicates its validity.
+ */
+ path = __path_find(dev, neigh->daddr + QPN_AND_OPTIONS_OFFSET);
+ if (!path) {
+ pr_info("%s ignore not valid path %pI6\n",
+ __func__,
+ neigh->daddr + QPN_AND_OPTIONS_OFFSET);
+ goto free_neigh;
+ }
memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
spin_unlock_irqrestore(&priv->lock, flags);
@@ -1350,6 +1365,7 @@ static void ipoib_cm_tx_start(struct work_struct *work)
spin_lock_irqsave(&priv->lock, flags);
if (ret) {
+free_neigh:
neigh = p->neigh;
if (neigh) {
neigh->cm = NULL;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index 1502199c8..7b6d40ff1 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -62,10 +62,8 @@ static void ipoib_get_drvinfo(struct net_device *netdev,
{
struct ipoib_dev_priv *priv = netdev_priv(netdev);
- snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
- "%d.%d.%d", (int)(priv->ca->attrs.fw_ver >> 32),
- (int)(priv->ca->attrs.fw_ver >> 16) & 0xffff,
- (int)priv->ca->attrs.fw_ver & 0xffff);
+ ib_get_device_fw_str(priv->ca, drvinfo->fw_version,
+ sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, dev_name(priv->ca->dma_device),
sizeof(drvinfo->bus_info));
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index dc6d241b9..be11d5d5b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -1161,8 +1161,17 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
}
if (level == IPOIB_FLUSH_LIGHT) {
+ int oper_up;
ipoib_mark_paths_invalid(dev);
+ /* Set IPoIB operation as down to prevent races between:
+ * the flush flow which leaves MCG and on the fly joins
+ * which can happen during that time. mcast restart task
+ * should deal with join requests we missed.
+ */
+ oper_up = test_and_clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
ipoib_mcast_dev_flush(dev);
+ if (oper_up)
+ set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
ipoib_flush_ah(dev);
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 5f58c41ef..cc1c1b062 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -485,7 +485,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf)
return -EINVAL;
}
-static struct ipoib_path *__path_find(struct net_device *dev, void *gid)
+struct ipoib_path *__path_find(struct net_device *dev, void *gid)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct rb_node *n = priv->path_tree.rb_node;
@@ -1967,8 +1967,7 @@ int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca)
priv->hca_caps = hca->attrs.device_cap_flags;
if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
- priv->dev->hw_features = NETIF_F_SG |
- NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
+ priv->dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
if (priv->hca_caps & IB_DEVICE_UD_TSO)
priv->dev->hw_features |= NETIF_F_TSO;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index 1e7cbbaa1..c55ecb2c3 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -135,7 +135,8 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
.cap = {
.max_send_wr = ipoib_sendq_size,
.max_recv_wr = ipoib_recvq_size,
- .max_send_sge = 1,
+ .max_send_sge = min_t(u32, priv->ca->attrs.max_sge,
+ MAX_SKB_FRAGS + 1),
.max_recv_sge = IPOIB_UD_RX_SG
},
.sq_sig_type = IB_SIGNAL_ALL_WR,
@@ -205,10 +206,6 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
if (priv->hca_caps & IB_DEVICE_MANAGED_FLOW_STEERING)
init_attr.create_flags |= IB_QP_CREATE_NETIF_QP;
- if (dev->features & NETIF_F_SG)
- init_attr.cap.max_send_sge =
- min_t(u32, priv->ca->attrs.max_sge, MAX_SKB_FRAGS + 1);
-
priv->qp = ib_create_qp(priv->pd, &init_attr);
if (IS_ERR(priv->qp)) {
printk(KERN_WARNING "%s: failed to create QP\n", ca->name);
@@ -234,6 +231,9 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
priv->rx_wr.next = NULL;
priv->rx_wr.sg_list = priv->rx_sge;
+ if (init_attr.cap.max_send_sge > 1)
+ dev->features |= NETIF_F_SG;
+
priv->max_send_sge = init_attr.cap.max_send_sge;
return 0;
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index a990c0420..cae9bbcc2 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -137,8 +137,6 @@ isert_create_qp(struct isert_conn *isert_conn,
attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
attr.cap.max_rdma_ctxs = ISCSI_DEF_XMIT_CMDS_MAX;
attr.cap.max_send_sge = device->ib_device->attrs.max_sge;
- isert_conn->max_sge = min(device->ib_device->attrs.max_sge,
- device->ib_device->attrs.max_sge_rd);
attr.cap.max_recv_sge = 1;
attr.sq_sig_type = IB_SIGNAL_REQ_WR;
attr.qp_type = IB_QPT_RC;
@@ -405,6 +403,7 @@ isert_init_conn(struct isert_conn *isert_conn)
INIT_LIST_HEAD(&isert_conn->node);
init_completion(&isert_conn->login_comp);
init_completion(&isert_conn->login_req_comp);
+ init_waitqueue_head(&isert_conn->rem_wait);
kref_init(&isert_conn->kref);
mutex_init(&isert_conn->mutex);
INIT_WORK(&isert_conn->release_work, isert_release_work);
@@ -450,7 +449,7 @@ isert_alloc_login_buf(struct isert_conn *isert_conn,
isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL);
if (!isert_conn->login_rsp_buf) {
- isert_err("Unable to allocate isert_conn->login_rspbuf\n");
+ ret = -ENOMEM;
goto out_unmap_login_req_buf;
}
@@ -580,7 +579,8 @@ isert_connect_release(struct isert_conn *isert_conn)
BUG_ON(!device);
isert_free_rx_descriptors(isert_conn);
- if (isert_conn->cm_id)
+ if (isert_conn->cm_id &&
+ !isert_conn->dev_removed)
rdma_destroy_id(isert_conn->cm_id);
if (isert_conn->qp) {
@@ -595,7 +595,10 @@ isert_connect_release(struct isert_conn *isert_conn)
isert_device_put(device);
- kfree(isert_conn);
+ if (isert_conn->dev_removed)
+ wake_up_interruptible(&isert_conn->rem_wait);
+ else
+ kfree(isert_conn);
}
static void
@@ -755,6 +758,7 @@ static int
isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{
struct isert_np *isert_np = cma_id->context;
+ struct isert_conn *isert_conn;
int ret = 0;
isert_info("%s (%d): status %d id %p np %p\n",
@@ -775,10 +779,21 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
break;
case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
- case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
ret = isert_disconnected_handler(cma_id, event->event);
break;
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+ isert_conn = cma_id->qp->qp_context;
+ isert_conn->dev_removed = true;
+ isert_disconnected_handler(cma_id, event->event);
+ wait_event_interruptible(isert_conn->rem_wait,
+ isert_conn->state == ISER_CONN_DOWN);
+ kfree(isert_conn);
+ /*
+ * return non-zero from the callback to destroy
+ * the rdma cm id
+ */
+ return 1;
case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
case RDMA_CM_EVENT_CONNECT_ERROR:
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index e512ba941..c02ada57d 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -138,7 +138,6 @@ struct isert_conn {
u32 responder_resources;
u32 initiator_depth;
bool pi_support;
- u32 max_sge;
struct iser_rx_desc *login_req_buf;
char *login_rsp_buf;
u64 login_req_dma;
@@ -159,6 +158,8 @@ struct isert_conn {
struct work_struct release_work;
bool logout_posted;
bool snd_w_inv;
+ wait_queue_head_t rem_wait;
+ bool dev_removed;
};
#define ISERT_MAX_CQ 64
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 9a3b954e8..883bbfe08 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -522,6 +522,11 @@ static int srpt_refresh_port(struct srpt_port *sport)
if (ret)
goto err_query_port;
+ snprintf(sport->port_guid, sizeof(sport->port_guid),
+ "0x%016llx%016llx",
+ be64_to_cpu(sport->gid.global.subnet_prefix),
+ be64_to_cpu(sport->gid.global.interface_id));
+
if (!sport->mad_agent) {
memset(&reg_req, 0, sizeof(reg_req));
reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
@@ -2262,7 +2267,7 @@ static void srpt_queue_response(struct se_cmd *cmd)
container_of(cmd, struct srpt_send_ioctx, cmd);
struct srpt_rdma_ch *ch = ioctx->ch;
struct srpt_device *sdev = ch->sport->sdev;
- struct ib_send_wr send_wr, *first_wr = NULL, *bad_wr;
+ struct ib_send_wr send_wr, *first_wr = &send_wr, *bad_wr;
struct ib_sge sge;
enum srpt_command_state state;
unsigned long flags;
@@ -2303,11 +2308,8 @@ static void srpt_queue_response(struct se_cmd *cmd)
struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
first_wr = rdma_rw_ctx_wrs(&ctx->rw, ch->qp,
- ch->sport->port, NULL,
- first_wr ? first_wr : &send_wr);
+ ch->sport->port, NULL, first_wr);
}
- } else {
- first_wr = &send_wr;
}
if (state != SRPT_STATE_MGMT)
@@ -2551,10 +2553,6 @@ static void srpt_add_one(struct ib_device *device)
sdev->device->name, i);
goto err_ring;
}
- snprintf(sport->port_guid, sizeof(sport->port_guid),
- "0x%016llx%016llx",
- be64_to_cpu(sport->gid.global.subnet_prefix),
- be64_to_cpu(sport->gid.global.interface_id));
}
spin_lock(&srpt_dev_lock);
diff --git a/drivers/input/input-mt.c b/drivers/input/input-mt.c
index 54fce56c8..a1bbec9cd 100644
--- a/drivers/input/input-mt.c
+++ b/drivers/input/input-mt.c
@@ -218,8 +218,23 @@ void input_mt_report_pointer_emulation(struct input_dev *dev, bool use_count)
}
input_event(dev, EV_KEY, BTN_TOUCH, count > 0);
- if (use_count)
+
+ if (use_count) {
+ if (count == 0 &&
+ !test_bit(ABS_MT_DISTANCE, dev->absbit) &&
+ test_bit(ABS_DISTANCE, dev->absbit) &&
+ input_abs_get_val(dev, ABS_DISTANCE) != 0) {
+ /*
+ * Force reporting BTN_TOOL_FINGER for devices that
+ * only report general hover (and not per-contact
+ * distance) when contact is in proximity but not
+ * on the surface.
+ */
+ count = 1;
+ }
+
input_mt_report_finger_count(dev, count);
+ }
if (oldest) {
int x = input_mt_get_value(oldest, ABS_MT_POSITION_X);
diff --git a/drivers/input/input.c b/drivers/input/input.c
index b87ffbd45..d95c34ee5 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -153,8 +153,6 @@ static void input_pass_values(struct input_dev *dev,
rcu_read_unlock();
- add_input_randomness(vals->type, vals->code, vals->value);
-
/* trigger auto repeat for key events */
if (test_bit(EV_REP, dev->evbit) && test_bit(EV_KEY, dev->evbit)) {
for (v = vals; v != vals + count; v++) {
@@ -371,9 +369,10 @@ static int input_get_disposition(struct input_dev *dev,
static void input_handle_event(struct input_dev *dev,
unsigned int type, unsigned int code, int value)
{
- int disposition;
+ int disposition = input_get_disposition(dev, type, code, &value);
- disposition = input_get_disposition(dev, type, code, &value);
+ if (disposition != INPUT_IGNORE_EVENT && type != EV_SYN)
+ add_input_randomness(type, code, value);
if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event)
dev->event(dev, type, code, value);
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index e819a60b4..b30ab7e7a 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -979,6 +979,12 @@ static const struct input_device_id joydev_ids[] = {
.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
INPUT_DEVICE_ID_MATCH_ABSBIT,
.evbit = { BIT_MASK(EV_ABS) },
+ .absbit = { BIT_MASK(ABS_Z) },
+ },
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
+ INPUT_DEVICE_ID_MATCH_ABSBIT,
+ .evbit = { BIT_MASK(EV_ABS) },
.absbit = { BIT_MASK(ABS_WHEEL) },
},
{
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index a529a4535..83af17ad0 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -115,6 +115,10 @@ static bool sticks_to_null;
module_param(sticks_to_null, bool, S_IRUGO);
MODULE_PARM_DESC(sticks_to_null, "Do not map sticks at all for unknown pads");
+static bool auto_poweroff = true;
+module_param(auto_poweroff, bool, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(auto_poweroff, "Power off wireless controllers on suspend");
+
static const struct xpad_device {
u16 idVendor;
u16 idProduct;
@@ -1248,6 +1252,36 @@ static void xpad_stop_input(struct usb_xpad *xpad)
usb_kill_urb(xpad->irq_in);
}
+static void xpad360w_poweroff_controller(struct usb_xpad *xpad)
+{
+ unsigned long flags;
+ struct xpad_output_packet *packet =
+ &xpad->out_packets[XPAD_OUT_CMD_IDX];
+
+ spin_lock_irqsave(&xpad->odata_lock, flags);
+
+ packet->data[0] = 0x00;
+ packet->data[1] = 0x00;
+ packet->data[2] = 0x08;
+ packet->data[3] = 0xC0;
+ packet->data[4] = 0x00;
+ packet->data[5] = 0x00;
+ packet->data[6] = 0x00;
+ packet->data[7] = 0x00;
+ packet->data[8] = 0x00;
+ packet->data[9] = 0x00;
+ packet->data[10] = 0x00;
+ packet->data[11] = 0x00;
+ packet->len = 12;
+ packet->pending = true;
+
+ /* Reset the sequence so we send out poweroff now */
+ xpad->last_out_packet = -1;
+ xpad_try_sending_next_out_packet(xpad);
+
+ spin_unlock_irqrestore(&xpad->odata_lock, flags);
+}
+
static int xpad360w_start_input(struct usb_xpad *xpad)
{
int error;
@@ -1590,6 +1624,15 @@ static int xpad_suspend(struct usb_interface *intf, pm_message_t message)
* or goes away.
*/
xpad360w_stop_input(xpad);
+
+ /*
+ * The wireless adapter is going off now, so the
+ * gamepads are going to become disconnected.
+ * Unless explicitly disabled, power them down
+ * so they don't just sit there flashing.
+ */
+ if (auto_poweroff && xpad->pad_present)
+ xpad360w_poweroff_controller(xpad);
} else {
mutex_lock(&input->mutex);
if (input->users)
diff --git a/drivers/input/keyboard/clps711x-keypad.c b/drivers/input/keyboard/clps711x-keypad.c
index b637f1af8..997e3e97f 100644
--- a/drivers/input/keyboard/clps711x-keypad.c
+++ b/drivers/input/keyboard/clps711x-keypad.c
@@ -101,7 +101,7 @@ static int clps711x_keypad_probe(struct platform_device *pdev)
return -ENOMEM;
priv->syscon =
- syscon_regmap_lookup_by_compatible("cirrus,clps711x-syscon1");
+ syscon_regmap_lookup_by_compatible("cirrus,ep7209-syscon1");
if (IS_ERR(priv->syscon))
return PTR_ERR(priv->syscon);
@@ -181,7 +181,7 @@ static int clps711x_keypad_remove(struct platform_device *pdev)
}
static const struct of_device_id clps711x_keypad_of_match[] = {
- { .compatible = "cirrus,clps711x-keypad", },
+ { .compatible = "cirrus,ep7209-keypad", },
{ }
};
MODULE_DEVICE_TABLE(of, clps711x_keypad_of_match);
diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c
index b01966dc7..4b0878f35 100644
--- a/drivers/input/keyboard/cros_ec_keyb.c
+++ b/drivers/input/keyboard/cros_ec_keyb.c
@@ -186,7 +186,7 @@ static irqreturn_t cros_ec_keyb_irq(int irq, void *data)
if (ret >= 0)
cros_ec_keyb_process(ckdev, kb_state, ret);
else
- dev_err(ec->dev, "failed to get keyboard state: %d\n", ret);
+ dev_err(ckdev->dev, "failed to get keyboard state: %d\n", ret);
return IRQ_HANDLED;
}
@@ -236,7 +236,7 @@ static void cros_ec_keyb_compute_valid_keys(struct cros_ec_keyb *ckdev)
static int cros_ec_keyb_probe(struct platform_device *pdev)
{
struct cros_ec_device *ec = dev_get_drvdata(pdev->dev.parent);
- struct device *dev = ec->dev;
+ struct device *dev = &pdev->dev;
struct cros_ec_keyb *ckdev;
struct input_dev *idev;
struct device_node *np;
@@ -246,23 +246,22 @@ static int cros_ec_keyb_probe(struct platform_device *pdev)
if (!np)
return -ENODEV;
- ckdev = devm_kzalloc(&pdev->dev, sizeof(*ckdev), GFP_KERNEL);
+ ckdev = devm_kzalloc(dev, sizeof(*ckdev), GFP_KERNEL);
if (!ckdev)
return -ENOMEM;
- err = matrix_keypad_parse_of_params(&pdev->dev, &ckdev->rows,
- &ckdev->cols);
+ err = matrix_keypad_parse_of_params(dev, &ckdev->rows, &ckdev->cols);
if (err)
return err;
- ckdev->valid_keys = devm_kzalloc(&pdev->dev, ckdev->cols, GFP_KERNEL);
+ ckdev->valid_keys = devm_kzalloc(dev, ckdev->cols, GFP_KERNEL);
if (!ckdev->valid_keys)
return -ENOMEM;
- ckdev->old_kb_state = devm_kzalloc(&pdev->dev, ckdev->cols, GFP_KERNEL);
+ ckdev->old_kb_state = devm_kzalloc(dev, ckdev->cols, GFP_KERNEL);
if (!ckdev->old_kb_state)
return -ENOMEM;
- idev = devm_input_allocate_device(&pdev->dev);
+ idev = devm_input_allocate_device(dev);
if (!idev)
return -ENOMEM;
@@ -273,7 +272,7 @@ static int cros_ec_keyb_probe(struct platform_device *pdev)
ckdev->ec = ec;
ckdev->dev = dev;
- dev_set_drvdata(&pdev->dev, ckdev);
+ dev_set_drvdata(dev, ckdev);
idev->name = CROS_EC_DEV_NAME;
idev->phys = ec->phys_name;
@@ -282,7 +281,7 @@ static int cros_ec_keyb_probe(struct platform_device *pdev)
idev->id.bustype = BUS_VIRTUAL;
idev->id.version = 1;
idev->id.product = 0;
- idev->dev.parent = &pdev->dev;
+ idev->dev.parent = dev;
idev->open = cros_ec_keyb_open;
idev->close = cros_ec_keyb_close;
diff --git a/drivers/input/keyboard/tc3589x-keypad.c b/drivers/input/keyboard/tc3589x-keypad.c
index e92dfd888..ec0070e97 100644
--- a/drivers/input/keyboard/tc3589x-keypad.c
+++ b/drivers/input/keyboard/tc3589x-keypad.c
@@ -32,7 +32,7 @@
#define TC3589x_PULL_DOWN_MASK 0x1
#define TC3589x_PULL_UP_MASK 0x2
#define TC3589x_PULLUP_ALL_MASK 0xAA
-#define TC3589x_IO_PULL_VAL(index, mask) ((mask)<<((index)%4)*2))
+#define TC3589x_IO_PULL_VAL(index, mask) ((mask)<<((index)%4)*2)
/* Bit masks for IOCFG register */
#define IOCFG_BALLCFG 0x01
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index 29485bc42..0c07e1023 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -552,7 +552,7 @@ static int tegra_kbc_parse_dt(struct tegra_kbc *kbc)
if (!num_rows || !num_cols || ((num_rows + num_cols) > KBC_MAX_GPIO)) {
dev_err(kbc->dev,
- "keypad rows/columns not porperly specified\n");
+ "keypad rows/columns not properly specified\n");
return -EINVAL;
}
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 1f2337abc..efb0ca871 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -82,6 +82,20 @@ config INPUT_ARIZONA_HAPTICS
To compile this driver as a module, choose M here: the
module will be called arizona-haptics.
+config INPUT_ATMEL_CAPTOUCH
+ tristate "Atmel Capacitive Touch Button Driver"
+ depends on OF || COMPILE_TEST
+ depends on I2C
+ help
+ Say Y here if an Atmel Capacitive Touch Button device which
+ implements "captouch" protocol is connected to I2C bus. Typically
+ this device consists of Atmel Touch sensor controlled by AtMegaXX
+ MCU running firmware based on Qtouch library.
+ One should find "atmel,captouch" node in the board specific DTS.
+
+ To compile this driver as a module, choose M here: the
+ module will be called atmel_captouch.
+
config INPUT_BMA150
tristate "BMA150/SMB380 acceleration sensor support"
depends on I2C
@@ -796,4 +810,13 @@ config INPUT_DRV2667_HAPTICS
To compile this driver as a module, choose M here: the
module will be called drv2667-haptics.
+config INPUT_HISI_POWERKEY
+ tristate "Hisilicon PMIC ONKEY support"
+ depends on ARCH_HISI || COMPILE_TEST
+ help
+ Say Y to enable support for PMIC ONKEY.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hisi_powerkey.
+
endif
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 0357a088c..6a1e5e20f 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_INPUT_APANEL) += apanel.o
obj-$(CONFIG_INPUT_ARIZONA_HAPTICS) += arizona-haptics.o
obj-$(CONFIG_INPUT_ATI_REMOTE2) += ati_remote2.o
obj-$(CONFIG_INPUT_ATLAS_BTNS) += atlas_btns.o
+obj-$(CONFIG_INPUT_ATMEL_CAPTOUCH) += atmel_captouch.o
obj-$(CONFIG_INPUT_BFIN_ROTARY) += bfin_rotary.o
obj-$(CONFIG_INPUT_BMA150) += bma150.o
obj-$(CONFIG_INPUT_CM109) += cm109.o
@@ -34,6 +35,7 @@ obj-$(CONFIG_INPUT_DRV2667_HAPTICS) += drv2667.o
obj-$(CONFIG_INPUT_GP2A) += gp2ap002a00f.o
obj-$(CONFIG_INPUT_GPIO_BEEPER) += gpio-beeper.o
obj-$(CONFIG_INPUT_GPIO_TILT_POLLED) += gpio_tilt_polled.o
+obj-$(CONFIG_INPUT_HISI_POWERKEY) += hisi_powerkey.o
obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o
obj-$(CONFIG_INPUT_IMS_PCU) += ims-pcu.o
obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o
diff --git a/drivers/input/misc/apanel.c b/drivers/input/misc/apanel.c
index a8d2b8db4..53630afab 100644
--- a/drivers/input/misc/apanel.c
+++ b/drivers/input/misc/apanel.c
@@ -297,7 +297,7 @@ static int __init apanel_init(void)
if (slave != i2c_addr) {
pr_notice(APANEL ": only one SMBus slave "
- "address supported, skiping device...\n");
+ "address supported, skipping device...\n");
continue;
}
diff --git a/drivers/input/misc/atmel_captouch.c b/drivers/input/misc/atmel_captouch.c
new file mode 100644
index 000000000..941265415
--- /dev/null
+++ b/drivers/input/misc/atmel_captouch.c
@@ -0,0 +1,290 @@
+/*
+ * Atmel Atmegaxx Capacitive Touch Button Driver
+ *
+ * Copyright (C) 2016 Google, inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * It's irrelevant that the HW used to develop captouch driver is based
+ * on Atmega88PA part and uses QtouchADC parts for sensing touch.
+ * Calling this driver "captouch" is an arbitrary way to distinguish
+ * the protocol this driver supported by other atmel/qtouch drivers.
+ *
+ * Captouch driver supports a newer/different version of the I2C
+ * registers/commands than the qt1070.c driver.
+ * Don't let the similarity of the general driver structure fool you.
+ *
+ * For raw i2c access from userspace, use i2cset/i2cget
+ * to poke at /dev/i2c-N devices.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+
+/* Maximum number of buttons supported */
+#define MAX_NUM_OF_BUTTONS 8
+
+/* Registers */
+#define REG_KEY1_THRESHOLD 0x02
+#define REG_KEY2_THRESHOLD 0x03
+#define REG_KEY3_THRESHOLD 0x04
+#define REG_KEY4_THRESHOLD 0x05
+
+#define REG_KEY1_REF_H 0x20
+#define REG_KEY1_REF_L 0x21
+#define REG_KEY2_REF_H 0x22
+#define REG_KEY2_REF_L 0x23
+#define REG_KEY3_REF_H 0x24
+#define REG_KEY3_REF_L 0x25
+#define REG_KEY4_REF_H 0x26
+#define REG_KEY4_REF_L 0x27
+
+#define REG_KEY1_DLT_H 0x30
+#define REG_KEY1_DLT_L 0x31
+#define REG_KEY2_DLT_H 0x32
+#define REG_KEY2_DLT_L 0x33
+#define REG_KEY3_DLT_H 0x34
+#define REG_KEY3_DLT_L 0x35
+#define REG_KEY4_DLT_H 0x36
+#define REG_KEY4_DLT_L 0x37
+
+#define REG_KEY_STATE 0x3C
+
+/*
+ * @i2c_client: I2C slave device client pointer
+ * @input: Input device pointer
+ * @num_btn: Number of buttons
+ * @keycodes: map of button# to KeyCode
+ * @prev_btn: Previous key state to detect button "press" or "release"
+ * @xfer_buf: I2C transfer buffer
+ */
+struct atmel_captouch_device {
+ struct i2c_client *client;
+ struct input_dev *input;
+ u32 num_btn;
+ u32 keycodes[MAX_NUM_OF_BUTTONS];
+ u8 prev_btn;
+ u8 xfer_buf[8] ____cacheline_aligned;
+};
+
+/*
+ * Read from I2C slave device
+ * The protocol is that the client has to provide both the register address
+ * and the length, and while reading back the device would prepend the data
+ * with address and length for verification.
+ */
+static int atmel_read(struct atmel_captouch_device *capdev,
+ u8 reg, u8 *data, size_t len)
+{
+ struct i2c_client *client = capdev->client;
+ struct device *dev = &client->dev;
+ struct i2c_msg msg[2];
+ int err;
+
+ if (len > sizeof(capdev->xfer_buf) - 2)
+ return -EINVAL;
+
+ capdev->xfer_buf[0] = reg;
+ capdev->xfer_buf[1] = len;
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].buf = capdev->xfer_buf;
+ msg[0].len = 2;
+
+ msg[1].addr = client->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].buf = capdev->xfer_buf;
+ msg[1].len = len + 2;
+
+ err = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ if (err != ARRAY_SIZE(msg))
+ return err < 0 ? err : -EIO;
+
+ if (capdev->xfer_buf[0] != reg) {
+ dev_err(dev,
+ "I2C read error: register address does not match (%#02x vs %02x)\n",
+ capdev->xfer_buf[0], reg);
+ return -ECOMM;
+ }
+
+ memcpy(data, &capdev->xfer_buf[2], len);
+
+ return 0;
+}
+
+/*
+ * Handle interrupt and report the key changes to the input system.
+ * Multi-touch can be supported; however, it really depends on whether
+ * the device can multi-touch.
+ */
+static irqreturn_t atmel_captouch_isr(int irq, void *data)
+{
+ struct atmel_captouch_device *capdev = data;
+ struct device *dev = &capdev->client->dev;
+ int error;
+ int i;
+ u8 new_btn;
+ u8 changed_btn;
+
+ error = atmel_read(capdev, REG_KEY_STATE, &new_btn, 1);
+ if (error) {
+ dev_err(dev, "failed to read button state: %d\n", error);
+ goto out;
+ }
+
+ dev_dbg(dev, "%s: button state %#02x\n", __func__, new_btn);
+
+ changed_btn = new_btn ^ capdev->prev_btn;
+ capdev->prev_btn = new_btn;
+
+ for (i = 0; i < capdev->num_btn; i++) {
+ if (changed_btn & BIT(i))
+ input_report_key(capdev->input,
+ capdev->keycodes[i],
+ new_btn & BIT(i));
+ }
+
+ input_sync(capdev->input);
+
+out:
+ return IRQ_HANDLED;
+}
+
+/*
+ * Probe function to setup the device, input system and interrupt
+ */
+static int atmel_captouch_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct atmel_captouch_device *capdev;
+ struct device *dev = &client->dev;
+ struct device_node *node;
+ int i;
+ int err;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA |
+ I2C_FUNC_SMBUS_I2C_BLOCK)) {
+ dev_err(dev, "needed i2c functionality is not supported\n");
+ return -EINVAL;
+ }
+
+ capdev = devm_kzalloc(dev, sizeof(*capdev), GFP_KERNEL);
+ if (!capdev)
+ return -ENOMEM;
+
+ capdev->client = client;
+ i2c_set_clientdata(client, capdev);
+
+ err = atmel_read(capdev, REG_KEY_STATE,
+ &capdev->prev_btn, sizeof(capdev->prev_btn));
+ if (err) {
+ dev_err(dev, "failed to read initial button state: %d\n", err);
+ return err;
+ }
+
+ capdev->input = devm_input_allocate_device(dev);
+ if (!capdev->input) {
+ dev_err(dev, "failed to allocate input device\n");
+ return -ENOMEM;
+ }
+
+ capdev->input->id.bustype = BUS_I2C;
+ capdev->input->id.product = 0x880A;
+ capdev->input->id.version = 0;
+ capdev->input->name = "ATMegaXX Capacitive Button Controller";
+ __set_bit(EV_KEY, capdev->input->evbit);
+
+ node = dev->of_node;
+ if (!node) {
+ dev_err(dev, "failed to find matching node in device tree\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_bool(node, "autorepeat"))
+ __set_bit(EV_REP, capdev->input->evbit);
+
+ capdev->num_btn = of_property_count_u32_elems(node, "linux,keymap");
+ if (capdev->num_btn > MAX_NUM_OF_BUTTONS)
+ capdev->num_btn = MAX_NUM_OF_BUTTONS;
+
+ err = of_property_read_u32_array(node, "linux,keycodes",
+ capdev->keycodes,
+ capdev->num_btn);
+ if (err) {
+ dev_err(dev,
+ "failed to read linux,keycode property: %d\n", err);
+ return err;
+ }
+
+ for (i = 0; i < capdev->num_btn; i++)
+ __set_bit(capdev->keycodes[i], capdev->input->keybit);
+
+ capdev->input->keycode = capdev->keycodes;
+ capdev->input->keycodesize = sizeof(capdev->keycodes[0]);
+ capdev->input->keycodemax = capdev->num_btn;
+
+ err = input_register_device(capdev->input);
+ if (err)
+ return err;
+
+ err = devm_request_threaded_irq(dev, client->irq,
+ NULL, atmel_captouch_isr,
+ IRQF_ONESHOT,
+ "atmel_captouch", capdev);
+ if (err) {
+ dev_err(dev, "failed to request irq %d: %d\n",
+ client->irq, err);
+ return err;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id atmel_captouch_of_id[] = {
+ {
+ .compatible = "atmel,captouch",
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, atmel_captouch_of_id);
+#endif
+
+static const struct i2c_device_id atmel_captouch_id[] = {
+ { "atmel_captouch", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, atmel_captouch_id);
+
+static struct i2c_driver atmel_captouch_driver = {
+ .probe = atmel_captouch_probe,
+ .id_table = atmel_captouch_id,
+ .driver = {
+ .name = "atmel_captouch",
+ .of_match_table = of_match_ptr(atmel_captouch_of_id),
+ },
+};
+module_i2c_driver(atmel_captouch_driver);
+
+/* Module information */
+MODULE_AUTHOR("Hung-yu Wu <hywu@google.com>");
+MODULE_DESCRIPTION("Atmel ATmegaXX Capacitance Touch Sensor I2C Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/misc/hisi_powerkey.c b/drivers/input/misc/hisi_powerkey.c
new file mode 100644
index 000000000..675539c52
--- /dev/null
+++ b/drivers/input/misc/hisi_powerkey.c
@@ -0,0 +1,142 @@
+/*
+ * Hisilicon PMIC powerkey driver
+ *
+ * Copyright (C) 2013 Hisilicon Ltd.
+ * Copyright (C) 2015, 2016 Linaro Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/reboot.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/input.h>
+#include <linux/slab.h>
+
+/* the held interrupt will trigger after 4 seconds */
+#define MAX_HELD_TIME (4 * MSEC_PER_SEC)
+
+static irqreturn_t hi65xx_power_press_isr(int irq, void *q)
+{
+ struct input_dev *input = q;
+
+ pm_wakeup_event(input->dev.parent, MAX_HELD_TIME);
+ input_report_key(input, KEY_POWER, 1);
+ input_sync(input);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t hi65xx_power_release_isr(int irq, void *q)
+{
+ struct input_dev *input = q;
+
+ pm_wakeup_event(input->dev.parent, MAX_HELD_TIME);
+ input_report_key(input, KEY_POWER, 0);
+ input_sync(input);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t hi65xx_restart_toggle_isr(int irq, void *q)
+{
+ struct input_dev *input = q;
+ int value = test_bit(KEY_RESTART, input->key);
+
+ pm_wakeup_event(input->dev.parent, MAX_HELD_TIME);
+ input_report_key(input, KEY_RESTART, !value);
+ input_sync(input);
+
+ return IRQ_HANDLED;
+}
+
+static const struct {
+ const char *name;
+ irqreturn_t (*handler)(int irq, void *q);
+} hi65xx_irq_info[] = {
+ { "down", hi65xx_power_press_isr },
+ { "up", hi65xx_power_release_isr },
+ { "hold 4s", hi65xx_restart_toggle_isr },
+};
+
+static int hi65xx_powerkey_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct input_dev *input;
+ int irq, i, error;
+
+ input = devm_input_allocate_device(&pdev->dev);
+ if (!input) {
+ dev_err(&pdev->dev, "failed to allocate input device\n");
+ return -ENOMEM;
+ }
+
+ input->phys = "hisi_on/input0";
+ input->name = "HISI 65xx PowerOn Key";
+
+ input_set_capability(input, EV_KEY, KEY_POWER);
+ input_set_capability(input, EV_KEY, KEY_RESTART);
+
+ for (i = 0; i < ARRAY_SIZE(hi65xx_irq_info); i++) {
+
+ irq = platform_get_irq_byname(pdev, hi65xx_irq_info[i].name);
+ if (irq < 0) {
+ error = irq;
+ dev_err(dev, "couldn't get irq %s: %d\n",
+ hi65xx_irq_info[i].name, error);
+ return error;
+ }
+
+ error = devm_request_any_context_irq(dev, irq,
+ hi65xx_irq_info[i].handler,
+ IRQF_ONESHOT,
+ hi65xx_irq_info[i].name,
+ input);
+ if (error < 0) {
+ dev_err(dev, "couldn't request irq %s: %d\n",
+ hi65xx_irq_info[i].name, error);
+ return error;
+ }
+ }
+
+ error = input_register_device(input);
+ if (error) {
+ dev_err(&pdev->dev, "failed to register input device: %d\n",
+ error);
+ return error;
+ }
+
+ device_init_wakeup(&pdev->dev, 1);
+
+ return 0;
+}
+
+static int hi65xx_powerkey_remove(struct platform_device *pdev)
+{
+ device_init_wakeup(&pdev->dev, 0);
+
+ return 0;
+}
+
+static struct platform_driver hi65xx_powerkey_driver = {
+ .driver = {
+ .name = "hi65xx-powerkey",
+ },
+ .probe = hi65xx_powerkey_probe,
+ .remove = hi65xx_powerkey_remove,
+};
+module_platform_driver(hi65xx_powerkey_driver);
+
+MODULE_AUTHOR("Zhiliang Xue <xuezhiliang@huawei.com");
+MODULE_DESCRIPTION("Hisi PMIC Power key driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/misc/regulator-haptic.c b/drivers/input/misc/regulator-haptic.c
index a804705eb..2e8f80193 100644
--- a/drivers/input/misc/regulator-haptic.c
+++ b/drivers/input/misc/regulator-haptic.c
@@ -124,7 +124,7 @@ regulator_haptic_parse_dt(struct device *dev, struct regulator_haptic *haptic)
node = dev->of_node;
if(!node) {
- dev_err(dev, "Missing dveice tree data\n");
+ dev_err(dev, "Missing device tree data\n");
return -EINVAL;
}
diff --git a/drivers/input/misc/rotary_encoder.c b/drivers/input/misc/rotary_encoder.c
index c7fc8d4fb..1588aecaf 100644
--- a/drivers/input/misc/rotary_encoder.c
+++ b/drivers/input/misc/rotary_encoder.c
@@ -28,6 +28,11 @@
#define DRV_NAME "rotary-encoder"
+enum rotary_encoder_encoding {
+ ROTENC_GRAY,
+ ROTENC_BINARY,
+};
+
struct rotary_encoder {
struct input_dev *input;
@@ -37,6 +42,7 @@ struct rotary_encoder {
u32 axis;
bool relative_axis;
bool rollover;
+ enum rotary_encoder_encoding encoding;
unsigned int pos;
@@ -57,8 +63,9 @@ static unsigned int rotary_encoder_get_state(struct rotary_encoder *encoder)
for (i = 0; i < encoder->gpios->ndescs; ++i) {
int val = gpiod_get_value_cansleep(encoder->gpios->desc[i]);
+
/* convert from gray encoding to normal */
- if (ret & 1)
+ if (encoder->encoding == ROTENC_GRAY && ret & 1)
val = !val;
ret = ret << 1 | val;
@@ -213,6 +220,20 @@ static int rotary_encoder_probe(struct platform_device *pdev)
encoder->rollover =
device_property_read_bool(dev, "rotary-encoder,rollover");
+ if (!device_property_present(dev, "rotary-encoder,encoding") ||
+ !device_property_match_string(dev, "rotary-encoder,encoding",
+ "gray")) {
+ dev_info(dev, "gray");
+ encoder->encoding = ROTENC_GRAY;
+ } else if (!device_property_match_string(dev, "rotary-encoder,encoding",
+ "binary")) {
+ dev_info(dev, "binary");
+ encoder->encoding = ROTENC_BINARY;
+ } else {
+ dev_err(dev, "unknown encoding setting\n");
+ return -EINVAL;
+ }
+
device_property_read_u32(dev, "linux,axis", &encoder->axis);
encoder->relative_axis =
device_property_read_bool(dev, "rotary-encoder,relative-axis");
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index 0a9ad2cfb..227fbd2db 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -130,8 +130,8 @@ static int xenkbd_probe(struct xenbus_device *dev,
if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-abs-pointer", "%d", &abs) < 0)
abs = 0;
if (abs) {
- ret = xenbus_printf(XBT_NIL, dev->nodename,
- "request-abs-pointer", "1");
+ ret = xenbus_write(XBT_NIL, dev->nodename,
+ "request-abs-pointer", "1");
if (ret) {
pr_warning("xenkbd: can't request abs-pointer");
abs = 0;
@@ -327,8 +327,8 @@ InitWait:
if (ret < 0)
val = 0;
if (val) {
- ret = xenbus_printf(XBT_NIL, info->xbdev->nodename,
- "request-abs-pointer", "1");
+ ret = xenbus_write(XBT_NIL, info->xbdev->nodename,
+ "request-abs-pointer", "1");
if (ret)
pr_warning("xenkbd: can't request abs-pointer");
}
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index be5b399da..08e252a42 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -222,12 +222,8 @@ static int elantech_write_reg(struct psmouse *psmouse, unsigned char reg,
*/
static void elantech_packet_dump(struct psmouse *psmouse)
{
- int i;
-
- psmouse_printk(KERN_DEBUG, psmouse, "PS/2 packet [");
- for (i = 0; i < psmouse->pktsize; i++)
- printk("%s0x%02x ", i ? ", " : " ", psmouse->packet[i]);
- printk("]\n");
+ psmouse_printk(KERN_DEBUG, psmouse, "PS/2 packet [%*ph]\n",
+ psmouse->pktsize, psmouse->packet);
}
/*
@@ -1708,7 +1704,7 @@ int elantech_init(struct psmouse *psmouse)
snprintf(etd->tp_phys, sizeof(etd->tp_phys), "%s/input1",
psmouse->ps2dev.serio->phys);
tp_dev->phys = etd->tp_phys;
- tp_dev->name = "Elantech PS/2 TrackPoint";
+ tp_dev->name = "ETPS/2 Elantech TrackPoint";
tp_dev->id.bustype = BUS_I8042;
tp_dev->id.vendor = 0x0002;
tp_dev->id.product = PSMOUSE_ELANTECH;
diff --git a/drivers/input/mouse/lifebook.c b/drivers/input/mouse/lifebook.c
index e5ed21682..13d324cef 100644
--- a/drivers/input/mouse/lifebook.c
+++ b/drivers/input/mouse/lifebook.c
@@ -287,7 +287,7 @@ static int lifebook_create_relative_device(struct psmouse *psmouse)
"%s/input1", psmouse->ps2dev.serio->phys);
dev2->phys = priv->phys;
- dev2->name = "PS/2 Touchpad";
+ dev2->name = "LBPS/2 Fujitsu Lifebook Touchpad";
dev2->id.bustype = BUS_I8042;
dev2->id.vendor = 0x0002;
dev2->id.product = PSMOUSE_LIFEBOOK;
diff --git a/drivers/input/rmi4/rmi_bus.c b/drivers/input/rmi4/rmi_bus.c
index 253df96be..a73580654 100644
--- a/drivers/input/rmi4/rmi_bus.c
+++ b/drivers/input/rmi4/rmi_bus.c
@@ -232,10 +232,7 @@ err_put_device:
void rmi_unregister_function(struct rmi_function *fn)
{
device_del(&fn->dev);
-
- if (fn->dev.of_node)
- of_node_put(fn->dev.of_node);
-
+ of_node_put(fn->dev.of_node);
put_device(&fn->dev);
}
diff --git a/drivers/input/rmi4/rmi_f01.c b/drivers/input/rmi4/rmi_f01.c
index eb362bc71..fac81fc9b 100644
--- a/drivers/input/rmi4/rmi_f01.c
+++ b/drivers/input/rmi4/rmi_f01.c
@@ -81,26 +81,26 @@ struct f01_basic_properties {
* This bit disables whatever sleep mode may be selected by the sleep_mode
* field and forces the device to run at full power without sleeping.
*/
-#define RMI_F01_CRTL0_NOSLEEP_BIT BIT(2)
+#define RMI_F01_CTRL0_NOSLEEP_BIT BIT(2)
/*
* When this bit is set, the touch controller employs a noise-filtering
* algorithm designed for use with a connected battery charger.
*/
-#define RMI_F01_CRTL0_CHARGER_BIT BIT(5)
+#define RMI_F01_CTRL0_CHARGER_BIT BIT(5)
/*
* Sets the report rate for the device. The effect of this setting is
* highly product dependent. Check the spec sheet for your particular
* touch sensor.
*/
-#define RMI_F01_CRTL0_REPORTRATE_BIT BIT(6)
+#define RMI_F01_CTRL0_REPORTRATE_BIT BIT(6)
/*
* Written by the host as an indicator that the device has been
* successfully configured.
*/
-#define RMI_F01_CRTL0_CONFIGURED_BIT BIT(7)
+#define RMI_F01_CTRL0_CONFIGURED_BIT BIT(7)
/**
* @ctrl0 - see the bit definitions above.
@@ -330,10 +330,10 @@ static int rmi_f01_probe(struct rmi_function *fn)
case RMI_F01_NOSLEEP_DEFAULT:
break;
case RMI_F01_NOSLEEP_OFF:
- f01->device_control.ctrl0 &= ~RMI_F01_CRTL0_NOSLEEP_BIT;
+ f01->device_control.ctrl0 &= ~RMI_F01_CTRL0_NOSLEEP_BIT;
break;
case RMI_F01_NOSLEEP_ON:
- f01->device_control.ctrl0 |= RMI_F01_CRTL0_NOSLEEP_BIT;
+ f01->device_control.ctrl0 |= RMI_F01_CTRL0_NOSLEEP_BIT;
break;
}
@@ -349,7 +349,7 @@ static int rmi_f01_probe(struct rmi_function *fn)
f01->device_control.ctrl0 &= ~RMI_F01_CTRL0_SLEEP_MODE_MASK;
}
- f01->device_control.ctrl0 |= RMI_F01_CRTL0_CONFIGURED_BIT;
+ f01->device_control.ctrl0 |= RMI_F01_CTRL0_CONFIGURED_BIT;
error = rmi_write(rmi_dev, fn->fd.control_base_addr,
f01->device_control.ctrl0);
@@ -535,8 +535,8 @@ static int rmi_f01_suspend(struct rmi_function *fn)
int error;
f01->old_nosleep =
- f01->device_control.ctrl0 & RMI_F01_CRTL0_NOSLEEP_BIT;
- f01->device_control.ctrl0 &= ~RMI_F01_CRTL0_NOSLEEP_BIT;
+ f01->device_control.ctrl0 & RMI_F01_CTRL0_NOSLEEP_BIT;
+ f01->device_control.ctrl0 &= ~RMI_F01_CTRL0_NOSLEEP_BIT;
f01->device_control.ctrl0 &= ~RMI_F01_CTRL0_SLEEP_MODE_MASK;
if (device_may_wakeup(fn->rmi_dev->xport->dev))
@@ -549,7 +549,7 @@ static int rmi_f01_suspend(struct rmi_function *fn)
if (error) {
dev_err(&fn->dev, "Failed to write sleep mode: %d.\n", error);
if (f01->old_nosleep)
- f01->device_control.ctrl0 |= RMI_F01_CRTL0_NOSLEEP_BIT;
+ f01->device_control.ctrl0 |= RMI_F01_CTRL0_NOSLEEP_BIT;
f01->device_control.ctrl0 &= ~RMI_F01_CTRL0_SLEEP_MODE_MASK;
f01->device_control.ctrl0 |= RMI_SLEEP_MODE_NORMAL;
return error;
@@ -564,7 +564,7 @@ static int rmi_f01_resume(struct rmi_function *fn)
int error;
if (f01->old_nosleep)
- f01->device_control.ctrl0 |= RMI_F01_CRTL0_NOSLEEP_BIT;
+ f01->device_control.ctrl0 |= RMI_F01_CTRL0_NOSLEEP_BIT;
f01->device_control.ctrl0 &= ~RMI_F01_CTRL0_SLEEP_MODE_MASK;
f01->device_control.ctrl0 |= RMI_SLEEP_MODE_NORMAL;
diff --git a/drivers/input/rmi4/rmi_f11.c b/drivers/input/rmi4/rmi_f11.c
index ec8a10d53..20c7134b3 100644
--- a/drivers/input/rmi4/rmi_f11.c
+++ b/drivers/input/rmi4/rmi_f11.c
@@ -530,8 +530,8 @@ static void rmi_f11_rel_pos_report(struct f11_data *f11, u8 n_finger)
struct f11_2d_data *data = &f11->data;
s8 x, y;
- x = data->rel_pos[n_finger * 2];
- y = data->rel_pos[n_finger * 2 + 1];
+ x = data->rel_pos[n_finger * RMI_F11_REL_BYTES];
+ y = data->rel_pos[n_finger * RMI_F11_REL_BYTES + 1];
rmi_2d_sensor_rel_report(sensor, x, y);
}
@@ -1241,7 +1241,6 @@ static int rmi_f11_attention(struct rmi_function *fn, unsigned long *irq_bits)
struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
struct f11_data *f11 = dev_get_drvdata(&fn->dev);
u16 data_base_addr = fn->fd.data_base_addr;
- u16 data_base_addr_offset = 0;
int error;
if (rmi_dev->xport->attn_data) {
@@ -1251,8 +1250,7 @@ static int rmi_f11_attention(struct rmi_function *fn, unsigned long *irq_bits)
rmi_dev->xport->attn_size -= f11->sensor.attn_size;
} else {
error = rmi_read_block(rmi_dev,
- data_base_addr + data_base_addr_offset,
- f11->sensor.data_pkt,
+ data_base_addr, f11->sensor.data_pkt,
f11->sensor.pkt_size);
if (error < 0)
return error;
@@ -1260,7 +1258,6 @@ static int rmi_f11_attention(struct rmi_function *fn, unsigned long *irq_bits)
rmi_f11_finger_handler(f11, &f11->sensor, irq_bits,
drvdata->num_of_irq_regs);
- data_base_addr_offset += f11->sensor.pkt_size;
return 0;
}
diff --git a/drivers/input/rmi4/rmi_f12.c b/drivers/input/rmi4/rmi_f12.c
index 88e91559c..332c02f0b 100644
--- a/drivers/input/rmi4/rmi_f12.c
+++ b/drivers/input/rmi4/rmi_f12.c
@@ -27,7 +27,6 @@ enum rmi_f12_object_type {
};
struct f12_data {
- struct rmi_function *fn;
struct rmi_2d_sensor sensor;
struct rmi_2d_sensor_platform_data sensor_pdata;
diff --git a/drivers/input/rmi4/rmi_i2c.c b/drivers/input/rmi4/rmi_i2c.c
index a96a326b5..6f2e0e4f0 100644
--- a/drivers/input/rmi4/rmi_i2c.c
+++ b/drivers/input/rmi4/rmi_i2c.c
@@ -11,6 +11,8 @@
#include <linux/rmi.h>
#include <linux/irq.h>
#include <linux/of.h>
+#include <linux/delay.h>
+#include <linux/regulator/consumer.h>
#include "rmi_driver.h"
#define BUFFER_SIZE_INCREMENT 32
@@ -37,6 +39,9 @@ struct rmi_i2c_xport {
u8 *tx_buf;
size_t tx_buf_size;
+
+ struct regulator_bulk_data supplies[2];
+ u32 startup_delay;
};
#define RMI_PAGE_SELECT_REGISTER 0xff
@@ -246,6 +251,24 @@ static int rmi_i2c_probe(struct i2c_client *client,
return -ENODEV;
}
+ rmi_i2c->supplies[0].supply = "vdd";
+ rmi_i2c->supplies[1].supply = "vio";
+ retval = devm_regulator_bulk_get(&client->dev,
+ ARRAY_SIZE(rmi_i2c->supplies),
+ rmi_i2c->supplies);
+ if (retval < 0)
+ return retval;
+
+ retval = regulator_bulk_enable(ARRAY_SIZE(rmi_i2c->supplies),
+ rmi_i2c->supplies);
+ if (retval < 0)
+ return retval;
+
+ of_property_read_u32(client->dev.of_node, "syna,startup-delay-ms",
+ &rmi_i2c->startup_delay);
+
+ msleep(rmi_i2c->startup_delay);
+
rmi_i2c->client = client;
mutex_init(&rmi_i2c->page_mutex);
@@ -286,6 +309,8 @@ static int rmi_i2c_remove(struct i2c_client *client)
struct rmi_i2c_xport *rmi_i2c = i2c_get_clientdata(client);
rmi_unregister_transport_device(&rmi_i2c->xport);
+ regulator_bulk_disable(ARRAY_SIZE(rmi_i2c->supplies),
+ rmi_i2c->supplies);
return 0;
}
@@ -308,6 +333,10 @@ static int rmi_i2c_suspend(struct device *dev)
dev_warn(dev, "Failed to enable irq for wake: %d\n",
ret);
}
+
+ regulator_bulk_disable(ARRAY_SIZE(rmi_i2c->supplies),
+ rmi_i2c->supplies);
+
return ret;
}
@@ -317,6 +346,13 @@ static int rmi_i2c_resume(struct device *dev)
struct rmi_i2c_xport *rmi_i2c = i2c_get_clientdata(client);
int ret;
+ ret = regulator_bulk_enable(ARRAY_SIZE(rmi_i2c->supplies),
+ rmi_i2c->supplies);
+ if (ret)
+ return ret;
+
+ msleep(rmi_i2c->startup_delay);
+
enable_irq(rmi_i2c->irq);
if (device_may_wakeup(&client->dev)) {
ret = disable_irq_wake(rmi_i2c->irq);
@@ -346,6 +382,9 @@ static int rmi_i2c_runtime_suspend(struct device *dev)
disable_irq(rmi_i2c->irq);
+ regulator_bulk_disable(ARRAY_SIZE(rmi_i2c->supplies),
+ rmi_i2c->supplies);
+
return 0;
}
@@ -355,6 +394,13 @@ static int rmi_i2c_runtime_resume(struct device *dev)
struct rmi_i2c_xport *rmi_i2c = i2c_get_clientdata(client);
int ret;
+ ret = regulator_bulk_enable(ARRAY_SIZE(rmi_i2c->supplies),
+ rmi_i2c->supplies);
+ if (ret)
+ return ret;
+
+ msleep(rmi_i2c->startup_delay);
+
enable_irq(rmi_i2c->irq);
ret = rmi_driver_resume(rmi_i2c->xport.rmi_dev);
diff --git a/drivers/input/serio/ams_delta_serio.c b/drivers/input/serio/ams_delta_serio.c
index 45887e312..3df501c34 100644
--- a/drivers/input/serio/ams_delta_serio.c
+++ b/drivers/input/serio/ams_delta_serio.c
@@ -56,7 +56,7 @@ static int check_data(int data)
/* it should be odd */
if (!(parity & 0x01)) {
dev_warn(&ams_delta_serio->dev,
- "paritiy check failed, data=0x%X parity=0x%X\n",
+ "parity check failed, data=0x%X parity=0x%X\n",
data, parity);
return SERIO_PARITY;
}
diff --git a/drivers/input/tablet/Kconfig b/drivers/input/tablet/Kconfig
index 623bb9e0d..a2b9f9742 100644
--- a/drivers/input/tablet/Kconfig
+++ b/drivers/input/tablet/Kconfig
@@ -73,6 +73,21 @@ config TABLET_USB_KBTAB
To compile this driver as a module, choose M here: the
module will be called kbtab.
+config TABLET_USB_PEGASUS
+ tristate "Pegasus Mobile Notetaker Pen input tablet support"
+ depends on USB_ARCH_HAS_HCD
+ select USB
+ help
+ Say Y here if you want to use the Pegasus Mobile Notetaker,
+ also known as:
+ Genie e-note The Notetaker,
+ Staedtler Digital ballpoint pen 990 01,
+ IRISnotes Express or
+ NEWLink Digital Note Taker.
+
+ To compile this driver as a module, choose M here: the
+ module will be called pegasus_notetaker.
+
config TABLET_SERIAL_WACOM4
tristate "Wacom protocol 4 serial tablet support"
select SERIO
diff --git a/drivers/input/tablet/Makefile b/drivers/input/tablet/Makefile
index 2e130101c..200fc4e11 100644
--- a/drivers/input/tablet/Makefile
+++ b/drivers/input/tablet/Makefile
@@ -8,4 +8,5 @@ obj-$(CONFIG_TABLET_USB_AIPTEK) += aiptek.o
obj-$(CONFIG_TABLET_USB_GTCO) += gtco.o
obj-$(CONFIG_TABLET_USB_HANWANG) += hanwang.o
obj-$(CONFIG_TABLET_USB_KBTAB) += kbtab.o
+obj-$(CONFIG_TABLET_USB_PEGASUS) += pegasus_notetaker.o
obj-$(CONFIG_TABLET_SERIAL_WACOM4) += wacom_serial4.o
diff --git a/drivers/input/tablet/pegasus_notetaker.c b/drivers/input/tablet/pegasus_notetaker.c
new file mode 100644
index 000000000..949dacc78
--- /dev/null
+++ b/drivers/input/tablet/pegasus_notetaker.c
@@ -0,0 +1,450 @@
+/*
+ * Pegasus Mobile Notetaker Pen input tablet driver
+ *
+ * Copyright (c) 2016 Martin Kepplinger <martink@posteo.de>
+ */
+
+/*
+ * request packet (control endpoint):
+ * |-------------------------------------|
+ * | Report ID | Nr of bytes | command |
+ * | (1 byte) | (1 byte) | (n bytes) |
+ * |-------------------------------------|
+ * | 0x02 | n | |
+ * |-------------------------------------|
+ *
+ * data packet after set xy mode command, 0x80 0xb5 0x02 0x01
+ * and pen is in range:
+ *
+ * byte byte name value (bits)
+ * --------------------------------------------
+ * 0 status 0 1 0 0 0 0 X X
+ * 1 color 0 0 0 0 H 0 S T
+ * 2 X low
+ * 3 X high
+ * 4 Y low
+ * 5 Y high
+ *
+ * X X battery state:
+ * no state reported 0x00
+ * battery low 0x01
+ * battery good 0x02
+ *
+ * H Hovering
+ * S Switch 1 (pen button)
+ * T Tip
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/usb/input.h>
+#include <linux/slab.h>
+
+/* USB HID defines */
+#define USB_REQ_GET_REPORT 0x01
+#define USB_REQ_SET_REPORT 0x09
+
+#define USB_VENDOR_ID_PEGASUSTECH 0x0e20
+#define USB_DEVICE_ID_PEGASUS_NOTETAKER_EN100 0x0101
+
+/* device specific defines */
+#define NOTETAKER_REPORT_ID 0x02
+#define NOTETAKER_SET_CMD 0x80
+#define NOTETAKER_SET_MODE 0xb5
+
+#define NOTETAKER_LED_MOUSE 0x02
+#define PEN_MODE_XY 0x01
+
+#define SPECIAL_COMMAND 0x80
+#define BUTTON_PRESSED 0xb5
+#define COMMAND_VERSION 0xa9
+
+/* in xy data packet */
+#define BATTERY_NO_REPORT 0x40
+#define BATTERY_LOW 0x41
+#define BATTERY_GOOD 0x42
+#define PEN_BUTTON_PRESSED BIT(1)
+#define PEN_TIP BIT(0)
+
+struct pegasus {
+ unsigned char *data;
+ u8 data_len;
+ dma_addr_t data_dma;
+ struct input_dev *dev;
+ struct usb_device *usbdev;
+ struct usb_interface *intf;
+ struct urb *irq;
+ char name[128];
+ char phys[64];
+ struct work_struct init;
+};
+
+static int pegasus_control_msg(struct pegasus *pegasus, u8 *data, int len)
+{
+ const int sizeof_buf = len + 2;
+ int result;
+ int error;
+ u8 *cmd_buf;
+
+ cmd_buf = kmalloc(sizeof_buf, GFP_KERNEL);
+ if (!cmd_buf)
+ return -ENOMEM;
+
+ cmd_buf[0] = NOTETAKER_REPORT_ID;
+ cmd_buf[1] = len;
+ memcpy(cmd_buf + 2, data, len);
+
+ result = usb_control_msg(pegasus->usbdev,
+ usb_sndctrlpipe(pegasus->usbdev, 0),
+ USB_REQ_SET_REPORT,
+ USB_TYPE_VENDOR | USB_DIR_OUT,
+ 0, 0, cmd_buf, sizeof_buf,
+ USB_CTRL_SET_TIMEOUT);
+
+ kfree(cmd_buf);
+
+ if (unlikely(result != sizeof_buf)) {
+ error = result < 0 ? result : -EIO;
+ dev_err(&pegasus->usbdev->dev, "control msg error: %d\n",
+ error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int pegasus_set_mode(struct pegasus *pegasus, u8 mode, u8 led)
+{
+ u8 cmd[] = { NOTETAKER_SET_CMD, NOTETAKER_SET_MODE, led, mode };
+
+ return pegasus_control_msg(pegasus, cmd, sizeof(cmd));
+}
+
+static void pegasus_parse_packet(struct pegasus *pegasus)
+{
+ unsigned char *data = pegasus->data;
+ struct input_dev *dev = pegasus->dev;
+ u16 x, y;
+
+ switch (data[0]) {
+ case SPECIAL_COMMAND:
+ /* device button pressed */
+ if (data[1] == BUTTON_PRESSED)
+ schedule_work(&pegasus->init);
+
+ break;
+
+ /* xy data */
+ case BATTERY_LOW:
+ dev_warn_once(&dev->dev, "Pen battery low\n");
+ /* fall through */
+
+ case BATTERY_NO_REPORT:
+ case BATTERY_GOOD:
+ x = le16_to_cpup((__le16 *)&data[2]);
+ y = le16_to_cpup((__le16 *)&data[4]);
+
+ /* pen-up event */
+ if (x == 0 && y == 0)
+ break;
+
+ input_report_key(dev, BTN_TOUCH, data[1] & PEN_TIP);
+ input_report_key(dev, BTN_RIGHT, data[1] & PEN_BUTTON_PRESSED);
+ input_report_key(dev, BTN_TOOL_PEN, 1);
+ input_report_abs(dev, ABS_X, (s16)x);
+ input_report_abs(dev, ABS_Y, y);
+
+ input_sync(dev);
+ break;
+
+ default:
+ dev_warn_once(&pegasus->usbdev->dev,
+ "unknown answer from device\n");
+ }
+}
+
+static void pegasus_irq(struct urb *urb)
+{
+ struct pegasus *pegasus = urb->context;
+ struct usb_device *dev = pegasus->usbdev;
+ int retval;
+
+ switch (urb->status) {
+ case 0:
+ pegasus_parse_packet(pegasus);
+ usb_mark_last_busy(pegasus->usbdev);
+ break;
+
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ dev_err(&dev->dev, "%s - urb shutting down with status: %d",
+ __func__, urb->status);
+ return;
+
+ default:
+ dev_err(&dev->dev, "%s - nonzero urb status received: %d",
+ __func__, urb->status);
+ break;
+ }
+
+ retval = usb_submit_urb(urb, GFP_ATOMIC);
+ if (retval)
+ dev_err(&dev->dev, "%s - usb_submit_urb failed with result %d",
+ __func__, retval);
+}
+
+static void pegasus_init(struct work_struct *work)
+{
+ struct pegasus *pegasus = container_of(work, struct pegasus, init);
+ int error;
+
+ error = pegasus_set_mode(pegasus, PEN_MODE_XY, NOTETAKER_LED_MOUSE);
+ if (error)
+ dev_err(&pegasus->usbdev->dev, "pegasus_set_mode error: %d\n",
+ error);
+}
+
+static int pegasus_open(struct input_dev *dev)
+{
+ struct pegasus *pegasus = input_get_drvdata(dev);
+ int error;
+
+ error = usb_autopm_get_interface(pegasus->intf);
+ if (error)
+ return error;
+
+ pegasus->irq->dev = pegasus->usbdev;
+ if (usb_submit_urb(pegasus->irq, GFP_KERNEL)) {
+ error = -EIO;
+ goto err_autopm_put;
+ }
+
+ error = pegasus_set_mode(pegasus, PEN_MODE_XY, NOTETAKER_LED_MOUSE);
+ if (error)
+ goto err_kill_urb;
+
+ return 0;
+
+err_kill_urb:
+ usb_kill_urb(pegasus->irq);
+ cancel_work_sync(&pegasus->init);
+err_autopm_put:
+ usb_autopm_put_interface(pegasus->intf);
+ return error;
+}
+
+static void pegasus_close(struct input_dev *dev)
+{
+ struct pegasus *pegasus = input_get_drvdata(dev);
+
+ usb_kill_urb(pegasus->irq);
+ cancel_work_sync(&pegasus->init);
+ usb_autopm_put_interface(pegasus->intf);
+}
+
+static int pegasus_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_device *dev = interface_to_usbdev(intf);
+ struct usb_endpoint_descriptor *endpoint;
+ struct pegasus *pegasus;
+ struct input_dev *input_dev;
+ int error;
+ int pipe;
+
+ /* We control interface 0 */
+ if (intf->cur_altsetting->desc.bInterfaceNumber >= 1)
+ return -ENODEV;
+
+ /* Sanity check that the device has an endpoint */
+ if (intf->altsetting[0].desc.bNumEndpoints < 1) {
+ dev_err(&intf->dev, "Invalid number of endpoints\n");
+ return -EINVAL;
+ }
+
+ endpoint = &intf->cur_altsetting->endpoint[0].desc;
+
+ pegasus = kzalloc(sizeof(*pegasus), GFP_KERNEL);
+ input_dev = input_allocate_device();
+ if (!pegasus || !input_dev) {
+ error = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ pegasus->usbdev = dev;
+ pegasus->dev = input_dev;
+ pegasus->intf = intf;
+
+ pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress);
+ pegasus->data_len = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
+
+ pegasus->data = usb_alloc_coherent(dev, pegasus->data_len, GFP_KERNEL,
+ &pegasus->data_dma);
+ if (!pegasus->data) {
+ error = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ pegasus->irq = usb_alloc_urb(0, GFP_KERNEL);
+ if (!pegasus->irq) {
+ error = -ENOMEM;
+ goto err_free_dma;
+ }
+
+ usb_fill_int_urb(pegasus->irq, dev, pipe,
+ pegasus->data, pegasus->data_len,
+ pegasus_irq, pegasus, endpoint->bInterval);
+
+ pegasus->irq->transfer_dma = pegasus->data_dma;
+ pegasus->irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ if (dev->manufacturer)
+ strlcpy(pegasus->name, dev->manufacturer,
+ sizeof(pegasus->name));
+
+ if (dev->product) {
+ if (dev->manufacturer)
+ strlcat(pegasus->name, " ", sizeof(pegasus->name));
+ strlcat(pegasus->name, dev->product, sizeof(pegasus->name));
+ }
+
+ if (!strlen(pegasus->name))
+ snprintf(pegasus->name, sizeof(pegasus->name),
+ "USB Pegasus Device %04x:%04x",
+ le16_to_cpu(dev->descriptor.idVendor),
+ le16_to_cpu(dev->descriptor.idProduct));
+
+ usb_make_path(dev, pegasus->phys, sizeof(pegasus->phys));
+ strlcat(pegasus->phys, "/input0", sizeof(pegasus->phys));
+
+ INIT_WORK(&pegasus->init, pegasus_init);
+
+ usb_set_intfdata(intf, pegasus);
+
+ input_dev->name = pegasus->name;
+ input_dev->phys = pegasus->phys;
+ usb_to_input_id(dev, &input_dev->id);
+ input_dev->dev.parent = &intf->dev;
+
+ input_set_drvdata(input_dev, pegasus);
+
+ input_dev->open = pegasus_open;
+ input_dev->close = pegasus_close;
+
+ __set_bit(EV_ABS, input_dev->evbit);
+ __set_bit(EV_KEY, input_dev->evbit);
+
+ __set_bit(ABS_X, input_dev->absbit);
+ __set_bit(ABS_Y, input_dev->absbit);
+
+ __set_bit(BTN_TOUCH, input_dev->keybit);
+ __set_bit(BTN_RIGHT, input_dev->keybit);
+ __set_bit(BTN_TOOL_PEN, input_dev->keybit);
+
+ __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
+ __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+
+ input_set_abs_params(input_dev, ABS_X, -1500, 1500, 8, 0);
+ input_set_abs_params(input_dev, ABS_Y, 1600, 3000, 8, 0);
+
+ error = input_register_device(pegasus->dev);
+ if (error)
+ goto err_free_urb;
+
+ return 0;
+
+err_free_urb:
+ usb_free_urb(pegasus->irq);
+err_free_dma:
+ usb_free_coherent(dev, pegasus->data_len,
+ pegasus->data, pegasus->data_dma);
+err_free_mem:
+ input_free_device(input_dev);
+ kfree(pegasus);
+ usb_set_intfdata(intf, NULL);
+
+ return error;
+}
+
+static void pegasus_disconnect(struct usb_interface *intf)
+{
+ struct pegasus *pegasus = usb_get_intfdata(intf);
+
+ input_unregister_device(pegasus->dev);
+
+ usb_free_urb(pegasus->irq);
+ usb_free_coherent(interface_to_usbdev(intf),
+ pegasus->data_len, pegasus->data,
+ pegasus->data_dma);
+
+ kfree(pegasus);
+ usb_set_intfdata(intf, NULL);
+}
+
+static int pegasus_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct pegasus *pegasus = usb_get_intfdata(intf);
+
+ mutex_lock(&pegasus->dev->mutex);
+ usb_kill_urb(pegasus->irq);
+ cancel_work_sync(&pegasus->init);
+ mutex_unlock(&pegasus->dev->mutex);
+
+ return 0;
+}
+
+static int pegasus_resume(struct usb_interface *intf)
+{
+ struct pegasus *pegasus = usb_get_intfdata(intf);
+ int retval = 0;
+
+ mutex_lock(&pegasus->dev->mutex);
+ if (pegasus->dev->users && usb_submit_urb(pegasus->irq, GFP_NOIO) < 0)
+ retval = -EIO;
+ mutex_unlock(&pegasus->dev->mutex);
+
+ return retval;
+}
+
+static int pegasus_reset_resume(struct usb_interface *intf)
+{
+ struct pegasus *pegasus = usb_get_intfdata(intf);
+ int retval = 0;
+
+ mutex_lock(&pegasus->dev->mutex);
+ if (pegasus->dev->users) {
+ retval = pegasus_set_mode(pegasus, PEN_MODE_XY,
+ NOTETAKER_LED_MOUSE);
+ if (!retval && usb_submit_urb(pegasus->irq, GFP_NOIO) < 0)
+ retval = -EIO;
+ }
+ mutex_unlock(&pegasus->dev->mutex);
+
+ return retval;
+}
+
+static const struct usb_device_id pegasus_ids[] = {
+ { USB_DEVICE(USB_VENDOR_ID_PEGASUSTECH,
+ USB_DEVICE_ID_PEGASUS_NOTETAKER_EN100) },
+ { }
+};
+MODULE_DEVICE_TABLE(usb, pegasus_ids);
+
+static struct usb_driver pegasus_driver = {
+ .name = "pegasus_notetaker",
+ .probe = pegasus_probe,
+ .disconnect = pegasus_disconnect,
+ .suspend = pegasus_suspend,
+ .resume = pegasus_resume,
+ .reset_resume = pegasus_reset_resume,
+ .id_table = pegasus_ids,
+ .supports_autosuspend = 1,
+};
+
+module_usb_driver(pegasus_driver);
+
+MODULE_AUTHOR("Martin Kepplinger <martink@posteo.de>");
+MODULE_DESCRIPTION("Pegasus Mobile Notetaker Pen tablet driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 8ecdc38fd..2fb1f430a 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -632,7 +632,7 @@ config TOUCHSCREEN_EDT_FT5X06
config TOUCHSCREEN_MIGOR
tristate "Renesas MIGO-R touchscreen"
- depends on SH_MIGOR && I2C
+ depends on (SH_MIGOR || COMPILE_TEST) && I2C
help
Say Y here to enable MIGO-R touchscreen support.
@@ -1046,6 +1046,44 @@ config TOUCHSCREEN_PCAP
To compile this driver as a module, choose M here: the
module will be called pcap_ts.
+config TOUCHSCREEN_RM_TS
+ tristate "Raydium I2C Touchscreen"
+ depends on I2C
+ depends on GPIOLIB || COMPILE_TEST
+ help
+ Say Y here if you have Raydium series I2C touchscreen,
+ such as RM32380, connected to your system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called raydium_i2c_ts.
+
+config TOUCHSCREEN_SILEAD
+ tristate "Silead I2C touchscreen"
+ depends on I2C
+ help
+ Say Y here if you have the Silead touchscreen connected to
+ your system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called silead.
+
+config TOUCHSCREEN_SIS_I2C
+ tristate "SiS 9200 family I2C touchscreen"
+ depends on I2C
+ select CRC_ITU_T
+ depends on GPIOLIB || COMPILE_TEST
+ help
+ This enables support for SiS 9200 family over I2C based touchscreens.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called sis_i2c.
+
config TOUCHSCREEN_ST1232
tristate "Sitronix ST1232 touchscreen controllers"
depends on I2C
@@ -1094,6 +1132,19 @@ config TOUCHSCREEN_SUR40
To compile this driver as a module, choose M here: the
module will be called sur40.
+config TOUCHSCREEN_SURFACE3_SPI
+ tristate "Ntrig/Microsoft Surface 3 SPI touchscreen"
+ depends on SPI
+ depends on GPIOLIB || COMPILE_TEST
+ help
+ Say Y here if you have the Ntrig/Microsoft SPI touchscreen
+ controller chip as found on the Surface 3 in your system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called surface3_spi.
+
config TOUCHSCREEN_SX8654
tristate "Semtech SX8654 touchscreen"
depends on I2C
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index f42975e71..b4373d6be 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -62,11 +62,15 @@ obj-$(CONFIG_TOUCHSCREEN_USB_COMPOSITE) += usbtouchscreen.o
obj-$(CONFIG_TOUCHSCREEN_PCAP) += pcap_ts.o
obj-$(CONFIG_TOUCHSCREEN_PENMOUNT) += penmount.o
obj-$(CONFIG_TOUCHSCREEN_PIXCIR) += pixcir_i2c_ts.o
+obj-$(CONFIG_TOUCHSCREEN_RM_TS) += raydium_i2c_ts.o
obj-$(CONFIG_TOUCHSCREEN_S3C2410) += s3c2410_ts.o
+obj-$(CONFIG_TOUCHSCREEN_SILEAD) += silead.o
+obj-$(CONFIG_TOUCHSCREEN_SIS_I2C) += sis_i2c.o
obj-$(CONFIG_TOUCHSCREEN_ST1232) += st1232.o
obj-$(CONFIG_TOUCHSCREEN_STMPE) += stmpe-ts.o
obj-$(CONFIG_TOUCHSCREEN_SUN4I) += sun4i-ts.o
obj-$(CONFIG_TOUCHSCREEN_SUR40) += sur40.o
+obj-$(CONFIG_TOUCHSCREEN_SURFACE3_SPI) += surface3_spi.o
obj-$(CONFIG_TOUCHSCREEN_TI_AM335X_TSC) += ti_am335x_tsc.o
obj-$(CONFIG_TOUCHSCREEN_TOUCHIT213) += touchit213.o
obj-$(CONFIG_TOUCHSCREEN_TOUCHRIGHT) += touchright.o
diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c
index e4bf1103e..e16a44667 100644
--- a/drivers/input/touchscreen/ad7879.c
+++ b/drivers/input/touchscreen/ad7879.c
@@ -595,7 +595,7 @@ struct ad7879 *ad7879_probe(struct device *dev, u8 devid, unsigned int irq,
} else {
input_set_abs_params(input_dev, ABS_X, 0, MAX_12BIT, 0, 0);
input_set_abs_params(input_dev, ABS_Y, 0, MAX_12BIT, 0, 0);
- touchscreen_parse_properties(input_dev, false);
+ touchscreen_parse_properties(input_dev, false, NULL);
if (!input_abs_get_max(input_dev, ABS_PRESSURE)) {
dev_err(dev, "Touchscreen pressure is not specified\n");
return ERR_PTR(-EINVAL);
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index a61b2153a..1ce3ecbe3 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -1473,7 +1473,6 @@ static int ads7846_remove(struct spi_device *spi)
ads784x_hwmon_unregister(spi, ts);
- regulator_disable(ts->reg);
regulator_put(ts->reg);
if (!ts->get_pendown_state) {
diff --git a/drivers/input/touchscreen/chipone_icn8318.c b/drivers/input/touchscreen/chipone_icn8318.c
index 22a6fead8..0bf14067c 100644
--- a/drivers/input/touchscreen/chipone_icn8318.c
+++ b/drivers/input/touchscreen/chipone_icn8318.c
@@ -17,6 +17,7 @@
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/input/mt.h>
+#include <linux/input/touchscreen.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -52,11 +53,7 @@ struct icn8318_data {
struct i2c_client *client;
struct input_dev *input;
struct gpio_desc *wake_gpio;
- u32 max_x;
- u32 max_y;
- bool invert_x;
- bool invert_y;
- bool swap_x_y;
+ struct touchscreen_properties prop;
};
static int icn8318_read_touch_data(struct i2c_client *client,
@@ -91,7 +88,7 @@ static irqreturn_t icn8318_irq(int irq, void *dev_id)
struct icn8318_data *data = dev_id;
struct device *dev = &data->client->dev;
struct icn8318_touch_data touch_data;
- int i, ret, x, y;
+ int i, ret;
ret = icn8318_read_touch_data(data->client, &touch_data);
if (ret < 0) {
@@ -124,22 +121,9 @@ static irqreturn_t icn8318_irq(int irq, void *dev_id)
if (!act)
continue;
- x = be16_to_cpu(touch->x);
- y = be16_to_cpu(touch->y);
-
- if (data->invert_x)
- x = data->max_x - x;
-
- if (data->invert_y)
- y = data->max_y - y;
-
- if (!data->swap_x_y) {
- input_event(data->input, EV_ABS, ABS_MT_POSITION_X, x);
- input_event(data->input, EV_ABS, ABS_MT_POSITION_Y, y);
- } else {
- input_event(data->input, EV_ABS, ABS_MT_POSITION_X, y);
- input_event(data->input, EV_ABS, ABS_MT_POSITION_Y, x);
- }
+ touchscreen_report_pos(data->input, &data->prop,
+ be16_to_cpu(touch->x),
+ be16_to_cpu(touch->y), true);
}
input_mt_sync_frame(data->input);
@@ -200,10 +184,8 @@ static int icn8318_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
- struct device_node *np = dev->of_node;
struct icn8318_data *data;
struct input_dev *input;
- u32 fuzz_x = 0, fuzz_y = 0;
int error;
if (!client->irq) {
@@ -223,19 +205,6 @@ static int icn8318_probe(struct i2c_client *client,
return error;
}
- if (of_property_read_u32(np, "touchscreen-size-x", &data->max_x) ||
- of_property_read_u32(np, "touchscreen-size-y", &data->max_y)) {
- dev_err(dev, "Error touchscreen-size-x and/or -y missing\n");
- return -EINVAL;
- }
-
- /* Optional */
- of_property_read_u32(np, "touchscreen-fuzz-x", &fuzz_x);
- of_property_read_u32(np, "touchscreen-fuzz-y", &fuzz_y);
- data->invert_x = of_property_read_bool(np, "touchscreen-inverted-x");
- data->invert_y = of_property_read_bool(np, "touchscreen-inverted-y");
- data->swap_x_y = of_property_read_bool(np, "touchscreen-swapped-x-y");
-
input = devm_input_allocate_device(dev);
if (!input)
return -ENOMEM;
@@ -246,16 +215,14 @@ static int icn8318_probe(struct i2c_client *client,
input->close = icn8318_stop;
input->dev.parent = dev;
- if (!data->swap_x_y) {
- input_set_abs_params(input, ABS_MT_POSITION_X, 0,
- data->max_x, fuzz_x, 0);
- input_set_abs_params(input, ABS_MT_POSITION_Y, 0,
- data->max_y, fuzz_y, 0);
- } else {
- input_set_abs_params(input, ABS_MT_POSITION_X, 0,
- data->max_y, fuzz_y, 0);
- input_set_abs_params(input, ABS_MT_POSITION_Y, 0,
- data->max_x, fuzz_x, 0);
+ input_set_capability(input, EV_ABS, ABS_MT_POSITION_X);
+ input_set_capability(input, EV_ABS, ABS_MT_POSITION_Y);
+
+ touchscreen_parse_properties(input, true, &data->prop);
+ if (!input_abs_get_max(input, ABS_MT_POSITION_X) ||
+ !input_abs_get_max(input, ABS_MT_POSITION_Y)) {
+ dev_err(dev, "Error touchscreen-size-x and/or -y missing\n");
+ return -EINVAL;
}
error = input_mt_init_slots(input, ICN8318_MAX_TOUCHES,
diff --git a/drivers/input/touchscreen/cyttsp_core.c b/drivers/input/touchscreen/cyttsp_core.c
index 91cda8f81..79381cc17 100644
--- a/drivers/input/touchscreen/cyttsp_core.c
+++ b/drivers/input/touchscreen/cyttsp_core.c
@@ -657,7 +657,7 @@ struct cyttsp *cyttsp_probe(const struct cyttsp_bus_ops *bus_ops,
input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_X);
input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_Y);
- touchscreen_parse_properties(input_dev, true);
+ touchscreen_parse_properties(input_dev, true, NULL);
error = input_mt_init_slots(input_dev, CY_MAX_ID, 0);
if (error) {
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 23fbe382d..703e295a3 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -86,6 +86,7 @@ struct edt_reg_addr {
struct edt_ft5x06_ts_data {
struct i2c_client *client;
struct input_dev *input;
+ struct touchscreen_properties prop;
u16 num_x;
u16 num_y;
@@ -246,8 +247,8 @@ static irqreturn_t edt_ft5x06_ts_isr(int irq, void *dev_id)
if (!down)
continue;
- input_report_abs(tsdata->input, ABS_MT_POSITION_X, x);
- input_report_abs(tsdata->input, ABS_MT_POSITION_Y, y);
+ touchscreen_report_pos(tsdata->input, &tsdata->prop, x, y,
+ true);
}
input_mt_report_pointer_emulation(tsdata->input, true);
@@ -972,7 +973,7 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
input_set_abs_params(input, ABS_MT_POSITION_Y,
0, tsdata->num_y * 64 - 1, 0, 0);
- touchscreen_parse_properties(input, true);
+ touchscreen_parse_properties(input, true, &tsdata->prop);
error = input_mt_init_slots(input, tsdata->max_support_points,
INPUT_MT_DIRECT);
diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
index ddf694b9f..fe4848bd1 100644
--- a/drivers/input/touchscreen/ili210x.c
+++ b/drivers/input/touchscreen/ili210x.c
@@ -169,7 +169,7 @@ static ssize_t ili210x_calibrate(struct device *dev,
return count;
}
-static DEVICE_ATTR(calibrate, 0644, NULL, ili210x_calibrate);
+static DEVICE_ATTR(calibrate, S_IWUSR, NULL, ili210x_calibrate);
static struct attribute *ili210x_attributes[] = {
&dev_attr_calibrate.attr,
diff --git a/drivers/input/touchscreen/migor_ts.c b/drivers/input/touchscreen/migor_ts.c
index c038db93e..02fb11985 100644
--- a/drivers/input/touchscreen/migor_ts.c
+++ b/drivers/input/touchscreen/migor_ts.c
@@ -202,7 +202,7 @@ static int migor_ts_remove(struct i2c_client *client)
return 0;
}
-static int migor_ts_suspend(struct device *dev)
+static int __maybe_unused migor_ts_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct migor_ts_priv *priv = i2c_get_clientdata(client);
@@ -213,7 +213,7 @@ static int migor_ts_suspend(struct device *dev)
return 0;
}
-static int migor_ts_resume(struct device *dev)
+static int __maybe_unused migor_ts_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct migor_ts_priv *priv = i2c_get_clientdata(client);
@@ -230,7 +230,7 @@ static const struct i2c_device_id migor_ts_id[] = {
{ "migor_ts", 0 },
{ }
};
-MODULE_DEVICE_TABLE(i2c, migor_ts);
+MODULE_DEVICE_TABLE(i2c, migor_ts_id);
static struct i2c_driver migor_ts_driver = {
.driver = {
diff --git a/drivers/input/touchscreen/of_touchscreen.c b/drivers/input/touchscreen/of_touchscreen.c
index bb6f2fe14..8d7f9c8f2 100644
--- a/drivers/input/touchscreen/of_touchscreen.c
+++ b/drivers/input/touchscreen/of_touchscreen.c
@@ -55,12 +55,16 @@ static void touchscreen_set_params(struct input_dev *dev,
* @input: input device that should be parsed
* @multitouch: specifies whether parsed properties should be applied to
* single-touch or multi-touch axes
+ * @prop: pointer to a struct touchscreen_properties into which to store
+ * axis swap and invert info for use with touchscreen_report_x_y();
+ * or %NULL
*
* This function parses common DT properties for touchscreens and setups the
* input device accordingly. The function keeps previously set up default
* values if no value is specified via DT.
*/
-void touchscreen_parse_properties(struct input_dev *input, bool multitouch)
+void touchscreen_parse_properties(struct input_dev *input, bool multitouch,
+ struct touchscreen_properties *prop)
{
struct device *dev = input->dev.parent;
unsigned int axis;
@@ -104,5 +108,80 @@ void touchscreen_parse_properties(struct input_dev *input, bool multitouch)
&fuzz);
if (data_present)
touchscreen_set_params(input, axis, maximum, fuzz);
+
+ if (!prop)
+ return;
+
+ axis = multitouch ? ABS_MT_POSITION_X : ABS_X;
+
+ prop->max_x = input_abs_get_max(input, axis);
+ prop->max_y = input_abs_get_max(input, axis + 1);
+ prop->invert_x =
+ device_property_read_bool(dev, "touchscreen-inverted-x");
+ prop->invert_y =
+ device_property_read_bool(dev, "touchscreen-inverted-y");
+ prop->swap_x_y =
+ device_property_read_bool(dev, "touchscreen-swapped-x-y");
+
+ if (prop->swap_x_y)
+ swap(input->absinfo[axis], input->absinfo[axis + 1]);
}
EXPORT_SYMBOL(touchscreen_parse_properties);
+
+static void
+touchscreen_apply_prop_to_x_y(const struct touchscreen_properties *prop,
+ unsigned int *x, unsigned int *y)
+{
+ if (prop->invert_x)
+ *x = prop->max_x - *x;
+
+ if (prop->invert_y)
+ *y = prop->max_y - *y;
+
+ if (prop->swap_x_y)
+ swap(*x, *y);
+}
+
+/**
+ * touchscreen_set_mt_pos - Set input_mt_pos coordinates
+ * @pos: input_mt_pos to set coordinates of
+ * @prop: pointer to a struct touchscreen_properties
+ * @x: X coordinate to store in pos
+ * @y: Y coordinate to store in pos
+ *
+ * Adjust the passed in x and y values applying any axis inversion and
+ * swapping requested in the passed in touchscreen_properties and store
+ * the result in a struct input_mt_pos.
+ */
+void touchscreen_set_mt_pos(struct input_mt_pos *pos,
+ const struct touchscreen_properties *prop,
+ unsigned int x, unsigned int y)
+{
+ touchscreen_apply_prop_to_x_y(prop, &x, &y);
+ pos->x = x;
+ pos->y = y;
+}
+EXPORT_SYMBOL(touchscreen_set_mt_pos);
+
+/**
+ * touchscreen_report_pos - Report touchscreen coordinates
+ * @input: input_device to report coordinates for
+ * @prop: pointer to a struct touchscreen_properties
+ * @x: X coordinate to report
+ * @y: Y coordinate to report
+ * @multitouch: Report coordinates on single-touch or multi-touch axes
+ *
+ * Adjust the passed in x and y values applying any axis inversion and
+ * swapping requested in the passed in touchscreen_properties and then
+ * report the resulting coordinates on the input_dev's x and y axis.
+ */
+void touchscreen_report_pos(struct input_dev *input,
+ const struct touchscreen_properties *prop,
+ unsigned int x, unsigned int y,
+ bool multitouch)
+{
+ touchscreen_apply_prop_to_x_y(prop, &x, &y);
+ input_report_abs(input, multitouch ? ABS_MT_POSITION_X : ABS_X, x);
+ input_report_abs(input, multitouch ? ABS_MT_POSITION_Y : ABS_Y, y);
+}
+EXPORT_SYMBOL(touchscreen_report_pos);
diff --git a/drivers/input/touchscreen/pixcir_i2c_ts.c b/drivers/input/touchscreen/pixcir_i2c_ts.c
index 09523a3d3..d159e14f4 100644
--- a/drivers/input/touchscreen/pixcir_i2c_ts.c
+++ b/drivers/input/touchscreen/pixcir_i2c_ts.c
@@ -27,9 +27,9 @@
#include <linux/input/touchscreen.h>
#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
-/*#include <linux/of.h>*/
#include <linux/of_device.h>
#include <linux/platform_data/pixcir_i2c_ts.h>
+#include <asm/unaligned.h>
#define PIXCIR_MAX_SLOTS 5 /* Max fingers supported by driver */
@@ -41,19 +41,15 @@ struct pixcir_i2c_ts_data {
struct gpio_desc *gpio_enable;
struct gpio_desc *gpio_wake;
const struct pixcir_i2c_chip_data *chip;
+ struct touchscreen_properties prop;
int max_fingers; /* Max fingers supported in this instance */
bool running;
};
-struct pixcir_touch {
- int x;
- int y;
- int id;
-};
-
struct pixcir_report_data {
int num_touches;
- struct pixcir_touch touches[PIXCIR_MAX_SLOTS];
+ struct input_mt_pos pos[PIXCIR_MAX_SLOTS];
+ int ids[PIXCIR_MAX_SLOTS];
};
static void pixcir_ts_parse(struct pixcir_i2c_ts_data *tsdata,
@@ -98,11 +94,11 @@ static void pixcir_ts_parse(struct pixcir_i2c_ts_data *tsdata,
bufptr = &rdbuf[2];
for (i = 0; i < touch; i++) {
- report->touches[i].x = (bufptr[1] << 8) | bufptr[0];
- report->touches[i].y = (bufptr[3] << 8) | bufptr[2];
-
+ touchscreen_set_mt_pos(&report->pos[i], &tsdata->prop,
+ get_unaligned_le16(bufptr),
+ get_unaligned_le16(bufptr + 2));
if (chip->has_hw_ids) {
- report->touches[i].id = bufptr[4];
+ report->ids[i] = bufptr[4];
bufptr = bufptr + 5;
} else {
bufptr = bufptr + 4;
@@ -113,9 +109,7 @@ static void pixcir_ts_parse(struct pixcir_i2c_ts_data *tsdata,
static void pixcir_ts_report(struct pixcir_i2c_ts_data *ts,
struct pixcir_report_data *report)
{
- struct input_mt_pos pos[PIXCIR_MAX_SLOTS];
int slots[PIXCIR_MAX_SLOTS];
- struct pixcir_touch *touch;
int n, i, slot;
struct device *dev = &ts->client->dev;
const struct pixcir_i2c_chip_data *chip = ts->chip;
@@ -124,24 +118,16 @@ static void pixcir_ts_report(struct pixcir_i2c_ts_data *ts,
if (n > PIXCIR_MAX_SLOTS)
n = PIXCIR_MAX_SLOTS;
- if (!ts->chip->has_hw_ids) {
- for (i = 0; i < n; i++) {
- touch = &report->touches[i];
- pos[i].x = touch->x;
- pos[i].y = touch->y;
- }
-
- input_mt_assign_slots(ts->input, slots, pos, n, 0);
- }
+ if (!ts->chip->has_hw_ids)
+ input_mt_assign_slots(ts->input, slots, report->pos, n, 0);
for (i = 0; i < n; i++) {
- touch = &report->touches[i];
-
if (chip->has_hw_ids) {
- slot = input_mt_get_slot_by_key(ts->input, touch->id);
+ slot = input_mt_get_slot_by_key(ts->input,
+ report->ids[i]);
if (slot < 0) {
dev_dbg(dev, "no free slot for id 0x%x\n",
- touch->id);
+ report->ids[i]);
continue;
}
} else {
@@ -149,14 +135,15 @@ static void pixcir_ts_report(struct pixcir_i2c_ts_data *ts,
}
input_mt_slot(ts->input, slot);
- input_mt_report_slot_state(ts->input,
- MT_TOOL_FINGER, true);
+ input_mt_report_slot_state(ts->input, MT_TOOL_FINGER, true);
- input_event(ts->input, EV_ABS, ABS_MT_POSITION_X, touch->x);
- input_event(ts->input, EV_ABS, ABS_MT_POSITION_Y, touch->y);
+ input_report_abs(ts->input, ABS_MT_POSITION_X,
+ report->pos[i].x);
+ input_report_abs(ts->input, ABS_MT_POSITION_Y,
+ report->pos[i].y);
dev_dbg(dev, "%d: slot %d, x %d, y %d\n",
- i, slot, touch->x, touch->y);
+ i, slot, report->pos[i].x, report->pos[i].y);
}
input_mt_sync_frame(ts->input);
@@ -515,7 +502,7 @@ static int pixcir_i2c_ts_probe(struct i2c_client *client,
} else {
input_set_capability(input, EV_ABS, ABS_MT_POSITION_X);
input_set_capability(input, EV_ABS, ABS_MT_POSITION_Y);
- touchscreen_parse_properties(input, true);
+ touchscreen_parse_properties(input, true, &tsdata->prop);
if (!input_abs_get_max(input, ABS_MT_POSITION_X) ||
!input_abs_get_max(input, ABS_MT_POSITION_Y)) {
dev_err(dev, "Touchscreen size is not specified\n");
diff --git a/drivers/input/touchscreen/raydium_i2c_ts.c b/drivers/input/touchscreen/raydium_i2c_ts.c
new file mode 100644
index 000000000..99f4df287
--- /dev/null
+++ b/drivers/input/touchscreen/raydium_i2c_ts.c
@@ -0,0 +1,1238 @@
+/*
+ * Raydium touchscreen I2C driver.
+ *
+ * Copyright (C) 2012-2014, Raydium Semiconductor Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, and only version 2, as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Raydium reserves the right to make changes without further notice
+ * to the materials described herein. Raydium does not assume any
+ * liability arising out of the application described herein.
+ *
+ * Contact Raydium Semiconductor Corporation at www.rad-ic.com
+ */
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+
+/* Slave I2C mode */
+#define RM_BOOT_BLDR 0x02
+#define RM_BOOT_MAIN 0x03
+
+/* I2C bootoloader commands */
+#define RM_CMD_BOOT_PAGE_WRT 0x0B /* send bl page write */
+#define RM_CMD_BOOT_WRT 0x11 /* send bl write */
+#define RM_CMD_BOOT_ACK 0x22 /* send ack*/
+#define RM_CMD_BOOT_CHK 0x33 /* send data check */
+#define RM_CMD_BOOT_READ 0x44 /* send wait bl data ready*/
+
+#define RM_BOOT_RDY 0xFF /* bl data ready */
+
+/* I2C main commands */
+#define RM_CMD_QUERY_BANK 0x2B
+#define RM_CMD_DATA_BANK 0x4D
+#define RM_CMD_ENTER_SLEEP 0x4E
+#define RM_CMD_BANK_SWITCH 0xAA
+
+#define RM_RESET_MSG_ADDR 0x40000004
+
+#define RM_MAX_READ_SIZE 56
+#define RM_PACKET_CRC_SIZE 2
+
+/* Touch relative info */
+#define RM_MAX_RETRIES 3
+#define RM_MAX_TOUCH_NUM 10
+#define RM_BOOT_DELAY_MS 100
+
+/* Offsets in contact data */
+#define RM_CONTACT_STATE_POS 0
+#define RM_CONTACT_X_POS 1
+#define RM_CONTACT_Y_POS 3
+#define RM_CONTACT_PRESSURE_POS 5
+#define RM_CONTACT_WIDTH_X_POS 6
+#define RM_CONTACT_WIDTH_Y_POS 7
+
+/* Bootloader relative info */
+#define RM_BL_WRT_CMD_SIZE 3 /* bl flash wrt cmd size */
+#define RM_BL_WRT_PKG_SIZE 32 /* bl wrt pkg size */
+#define RM_BL_WRT_LEN (RM_BL_WRT_PKG_SIZE + RM_BL_WRT_CMD_SIZE)
+#define RM_FW_PAGE_SIZE 128
+#define RM_MAX_FW_RETRIES 30
+#define RM_MAX_FW_SIZE 0xD000
+
+#define RM_POWERON_DELAY_USEC 500
+#define RM_RESET_DELAY_MSEC 50
+
+enum raydium_bl_cmd {
+ BL_HEADER = 0,
+ BL_PAGE_STR,
+ BL_PKG_IDX,
+ BL_DATA_STR,
+};
+
+enum raydium_bl_ack {
+ RAYDIUM_ACK_NULL = 0,
+ RAYDIUM_WAIT_READY,
+ RAYDIUM_PATH_READY,
+};
+
+enum raydium_boot_mode {
+ RAYDIUM_TS_MAIN = 0,
+ RAYDIUM_TS_BLDR,
+};
+
+/* Response to RM_CMD_DATA_BANK request */
+struct raydium_data_info {
+ __le32 data_bank_addr;
+ u8 pkg_size;
+ u8 tp_info_size;
+};
+
+struct raydium_info {
+ __le32 hw_ver; /*device version */
+ u8 main_ver;
+ u8 sub_ver;
+ __le16 ft_ver; /* test version */
+ u8 x_num;
+ u8 y_num;
+ __le16 x_max;
+ __le16 y_max;
+ u8 x_res; /* units/mm */
+ u8 y_res; /* units/mm */
+};
+
+/* struct raydium_data - represents state of Raydium touchscreen device */
+struct raydium_data {
+ struct i2c_client *client;
+ struct input_dev *input;
+
+ struct regulator *avdd;
+ struct regulator *vccio;
+ struct gpio_desc *reset_gpio;
+
+ struct raydium_info info;
+
+ struct mutex sysfs_mutex;
+
+ u8 *report_data;
+
+ u32 data_bank_addr;
+ u8 report_size;
+ u8 contact_size;
+ u8 pkg_size;
+
+ enum raydium_boot_mode boot_mode;
+
+ bool wake_irq_enabled;
+};
+
+static int raydium_i2c_send(struct i2c_client *client,
+ u8 addr, const void *data, size_t len)
+{
+ u8 *buf;
+ int tries = 0;
+ int ret;
+
+ buf = kmalloc(len + 1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf[0] = addr;
+ memcpy(buf + 1, data, len);
+
+ do {
+ ret = i2c_master_send(client, buf, len + 1);
+ if (likely(ret == len + 1))
+ break;
+
+ msleep(20);
+ } while (++tries < RM_MAX_RETRIES);
+
+ kfree(buf);
+
+ if (unlikely(ret != len + 1)) {
+ if (ret >= 0)
+ ret = -EIO;
+ dev_err(&client->dev, "%s failed: %d\n", __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int raydium_i2c_read(struct i2c_client *client,
+ u8 addr, void *data, size_t len)
+{
+ struct i2c_msg xfer[] = {
+ {
+ .addr = client->addr,
+ .len = 1,
+ .buf = &addr,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = data,
+ }
+ };
+ int ret;
+
+ ret = i2c_transfer(client->adapter, xfer, ARRAY_SIZE(xfer));
+ if (unlikely(ret != ARRAY_SIZE(xfer)))
+ return ret < 0 ? ret : -EIO;
+
+ return 0;
+}
+
+static int raydium_i2c_read_message(struct i2c_client *client,
+ u32 addr, void *data, size_t len)
+{
+ __be32 be_addr;
+ size_t xfer_len;
+ int error;
+
+ while (len) {
+ xfer_len = min_t(size_t, len, RM_MAX_READ_SIZE);
+
+ be_addr = cpu_to_be32(addr);
+
+ error = raydium_i2c_send(client, RM_CMD_BANK_SWITCH,
+ &be_addr, sizeof(be_addr));
+ if (!error)
+ error = raydium_i2c_read(client, addr & 0xff,
+ data, xfer_len);
+ if (error)
+ return error;
+
+ len -= xfer_len;
+ data += xfer_len;
+ addr += xfer_len;
+ }
+
+ return 0;
+}
+
+static int raydium_i2c_send_message(struct i2c_client *client,
+ u32 addr, const void *data, size_t len)
+{
+ __be32 be_addr = cpu_to_be32(addr);
+ int error;
+
+ error = raydium_i2c_send(client, RM_CMD_BANK_SWITCH,
+ &be_addr, sizeof(be_addr));
+ if (!error)
+ error = raydium_i2c_send(client, addr & 0xff, data, len);
+
+ return error;
+}
+
+static int raydium_i2c_sw_reset(struct i2c_client *client)
+{
+ const u8 soft_rst_cmd = 0x01;
+ int error;
+
+ error = raydium_i2c_send_message(client, RM_RESET_MSG_ADDR,
+ &soft_rst_cmd, sizeof(soft_rst_cmd));
+ if (error) {
+ dev_err(&client->dev, "software reset failed: %d\n", error);
+ return error;
+ }
+
+ msleep(RM_RESET_DELAY_MSEC);
+
+ return 0;
+}
+
+static int raydium_i2c_query_ts_info(struct raydium_data *ts)
+{
+ struct i2c_client *client = ts->client;
+ struct raydium_data_info data_info;
+ __le32 query_bank_addr;
+
+ int error, retry_cnt;
+
+ for (retry_cnt = 0; retry_cnt < RM_MAX_RETRIES; retry_cnt++) {
+ error = raydium_i2c_read(client, RM_CMD_DATA_BANK,
+ &data_info, sizeof(data_info));
+ if (error)
+ continue;
+
+ /*
+ * Warn user if we already allocated memory for reports and
+ * then the size changed (due to firmware update?) and keep
+ * old size instead.
+ */
+ if (ts->report_data && ts->pkg_size != data_info.pkg_size) {
+ dev_warn(&client->dev,
+ "report size changes, was: %d, new: %d\n",
+ ts->pkg_size, data_info.pkg_size);
+ } else {
+ ts->pkg_size = data_info.pkg_size;
+ ts->report_size = ts->pkg_size - RM_PACKET_CRC_SIZE;
+ }
+
+ ts->contact_size = data_info.tp_info_size;
+ ts->data_bank_addr = le32_to_cpu(data_info.data_bank_addr);
+
+ dev_dbg(&client->dev,
+ "data_bank_addr: %#08x, report_size: %d, contact_size: %d\n",
+ ts->data_bank_addr, ts->report_size, ts->contact_size);
+
+ error = raydium_i2c_read(client, RM_CMD_QUERY_BANK,
+ &query_bank_addr,
+ sizeof(query_bank_addr));
+ if (error)
+ continue;
+
+ error = raydium_i2c_read_message(client,
+ le32_to_cpu(query_bank_addr),
+ &ts->info, sizeof(ts->info));
+ if (error)
+ continue;
+
+ return 0;
+ }
+
+ dev_err(&client->dev, "failed to query device parameters: %d\n", error);
+ return error;
+}
+
+static int raydium_i2c_check_fw_status(struct raydium_data *ts)
+{
+ struct i2c_client *client = ts->client;
+ static const u8 bl_ack = 0x62;
+ static const u8 main_ack = 0x66;
+ u8 buf[4];
+ int error;
+
+ error = raydium_i2c_read(client, RM_CMD_BOOT_READ, buf, sizeof(buf));
+ if (!error) {
+ if (buf[0] == bl_ack)
+ ts->boot_mode = RAYDIUM_TS_BLDR;
+ else if (buf[0] == main_ack)
+ ts->boot_mode = RAYDIUM_TS_MAIN;
+ return 0;
+ }
+
+ return error;
+}
+
+static int raydium_i2c_initialize(struct raydium_data *ts)
+{
+ struct i2c_client *client = ts->client;
+ int error, retry_cnt;
+
+ for (retry_cnt = 0; retry_cnt < RM_MAX_RETRIES; retry_cnt++) {
+ /* Wait for Hello packet */
+ msleep(RM_BOOT_DELAY_MS);
+
+ error = raydium_i2c_check_fw_status(ts);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to read 'hello' packet: %d\n", error);
+ continue;
+ }
+
+ if (ts->boot_mode == RAYDIUM_TS_BLDR ||
+ ts->boot_mode == RAYDIUM_TS_MAIN) {
+ break;
+ }
+ }
+
+ if (error)
+ ts->boot_mode = RAYDIUM_TS_BLDR;
+
+ if (ts->boot_mode == RAYDIUM_TS_BLDR) {
+ ts->info.hw_ver = cpu_to_le32(0xffffffffUL);
+ ts->info.main_ver = 0xff;
+ ts->info.sub_ver = 0xff;
+ } else {
+ raydium_i2c_query_ts_info(ts);
+ }
+
+ return error;
+}
+
+static int raydium_i2c_bl_chk_state(struct i2c_client *client,
+ enum raydium_bl_ack state)
+{
+ static const u8 ack_ok[] = { 0xFF, 0x39, 0x30, 0x30, 0x54 };
+ u8 rbuf[sizeof(ack_ok)];
+ u8 retry;
+ int error;
+
+ for (retry = 0; retry < RM_MAX_FW_RETRIES; retry++) {
+ switch (state) {
+ case RAYDIUM_ACK_NULL:
+ return 0;
+
+ case RAYDIUM_WAIT_READY:
+ error = raydium_i2c_read(client, RM_CMD_BOOT_CHK,
+ &rbuf[0], 1);
+ if (!error && rbuf[0] == RM_BOOT_RDY)
+ return 0;
+
+ break;
+
+ case RAYDIUM_PATH_READY:
+ error = raydium_i2c_read(client, RM_CMD_BOOT_CHK,
+ rbuf, sizeof(rbuf));
+ if (!error && !memcmp(rbuf, ack_ok, sizeof(ack_ok)))
+ return 0;
+
+ break;
+
+ default:
+ dev_err(&client->dev, "%s: invalid target state %d\n",
+ __func__, state);
+ return -EINVAL;
+ }
+
+ msleep(20);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int raydium_i2c_write_object(struct i2c_client *client,
+ const void *data, size_t len,
+ enum raydium_bl_ack state)
+{
+ int error;
+
+ error = raydium_i2c_send(client, RM_CMD_BOOT_WRT, data, len);
+ if (error) {
+ dev_err(&client->dev, "WRT obj command failed: %d\n",
+ error);
+ return error;
+ }
+
+ error = raydium_i2c_send(client, RM_CMD_BOOT_ACK, NULL, 0);
+ if (error) {
+ dev_err(&client->dev, "Ack obj command failed: %d\n", error);
+ return error;
+ }
+
+ error = raydium_i2c_bl_chk_state(client, state);
+ if (error) {
+ dev_err(&client->dev, "BL check state failed: %d\n", error);
+ return error;
+ }
+ return 0;
+}
+
+static bool raydium_i2c_boot_trigger(struct i2c_client *client)
+{
+ static const u8 cmd[7][6] = {
+ { 0x08, 0x0C, 0x09, 0x00, 0x50, 0xD7 },
+ { 0x08, 0x04, 0x09, 0x00, 0x50, 0xA5 },
+ { 0x08, 0x04, 0x09, 0x00, 0x50, 0x00 },
+ { 0x08, 0x04, 0x09, 0x00, 0x50, 0xA5 },
+ { 0x08, 0x0C, 0x09, 0x00, 0x50, 0x00 },
+ { 0x06, 0x01, 0x00, 0x00, 0x00, 0x00 },
+ { 0x02, 0xA2, 0x00, 0x00, 0x00, 0x00 },
+ };
+ int i;
+ int error;
+
+ for (i = 0; i < 7; i++) {
+ error = raydium_i2c_write_object(client, cmd[i], sizeof(cmd[i]),
+ RAYDIUM_WAIT_READY);
+ if (error) {
+ dev_err(&client->dev,
+ "boot trigger failed at step %d: %d\n",
+ i, error);
+ return error;
+ }
+ }
+
+ return 0;
+}
+
+static bool raydium_i2c_fw_trigger(struct i2c_client *client)
+{
+ static const u8 cmd[5][11] = {
+ { 0, 0x09, 0x71, 0x0C, 0x09, 0x00, 0x50, 0xD7, 0, 0, 0 },
+ { 0, 0x09, 0x71, 0x04, 0x09, 0x00, 0x50, 0xA5, 0, 0, 0 },
+ { 0, 0x09, 0x71, 0x04, 0x09, 0x00, 0x50, 0x00, 0, 0, 0 },
+ { 0, 0x09, 0x71, 0x04, 0x09, 0x00, 0x50, 0xA5, 0, 0, 0 },
+ { 0, 0x09, 0x71, 0x0C, 0x09, 0x00, 0x50, 0x00, 0, 0, 0 },
+ };
+ int i;
+ int error;
+
+ for (i = 0; i < 5; i++) {
+ error = raydium_i2c_write_object(client, cmd[i], sizeof(cmd[i]),
+ RAYDIUM_ACK_NULL);
+ if (error) {
+ dev_err(&client->dev,
+ "fw trigger failed at step %d: %d\n",
+ i, error);
+ return error;
+ }
+ }
+
+ return 0;
+}
+
+static int raydium_i2c_check_path(struct i2c_client *client)
+{
+ static const u8 cmd[] = { 0x09, 0x00, 0x09, 0x00, 0x50, 0x10, 0x00 };
+ int error;
+
+ error = raydium_i2c_write_object(client, cmd, sizeof(cmd),
+ RAYDIUM_PATH_READY);
+ if (error) {
+ dev_err(&client->dev, "check path command failed: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int raydium_i2c_enter_bl(struct i2c_client *client)
+{
+ static const u8 cal_cmd[] = { 0x00, 0x01, 0x52 };
+ int error;
+
+ error = raydium_i2c_write_object(client, cal_cmd, sizeof(cal_cmd),
+ RAYDIUM_ACK_NULL);
+ if (error) {
+ dev_err(&client->dev, "enter bl command failed: %d\n", error);
+ return error;
+ }
+
+ msleep(RM_BOOT_DELAY_MS);
+ return 0;
+}
+
+static int raydium_i2c_leave_bl(struct i2c_client *client)
+{
+ static const u8 leave_cmd[] = { 0x05, 0x00 };
+ int error;
+
+ error = raydium_i2c_write_object(client, leave_cmd, sizeof(leave_cmd),
+ RAYDIUM_ACK_NULL);
+ if (error) {
+ dev_err(&client->dev, "leave bl command failed: %d\n", error);
+ return error;
+ }
+
+ msleep(RM_BOOT_DELAY_MS);
+ return 0;
+}
+
+static int raydium_i2c_write_checksum(struct i2c_client *client,
+ size_t length, u16 checksum)
+{
+ u8 checksum_cmd[] = { 0x00, 0x05, 0x6D, 0x00, 0x00, 0x00, 0x00 };
+ int error;
+
+ put_unaligned_le16(length, &checksum_cmd[3]);
+ put_unaligned_le16(checksum, &checksum_cmd[5]);
+
+ error = raydium_i2c_write_object(client,
+ checksum_cmd, sizeof(checksum_cmd),
+ RAYDIUM_ACK_NULL);
+ if (error) {
+ dev_err(&client->dev, "failed to write checksum: %d\n",
+ error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int raydium_i2c_disable_watch_dog(struct i2c_client *client)
+{
+ static const u8 cmd[] = { 0x0A, 0xAA };
+ int error;
+
+ error = raydium_i2c_write_object(client, cmd, sizeof(cmd),
+ RAYDIUM_WAIT_READY);
+ if (error) {
+ dev_err(&client->dev, "disable watchdog command failed: %d\n",
+ error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int raydium_i2c_fw_write_page(struct i2c_client *client,
+ u16 page_idx, const void *data, size_t len)
+{
+ u8 buf[RM_BL_WRT_LEN];
+ size_t xfer_len;
+ int error;
+ int i;
+
+ BUILD_BUG_ON((RM_FW_PAGE_SIZE % RM_BL_WRT_PKG_SIZE) != 0);
+
+ for (i = 0; i < RM_FW_PAGE_SIZE / RM_BL_WRT_PKG_SIZE; i++) {
+ buf[BL_HEADER] = RM_CMD_BOOT_PAGE_WRT;
+ buf[BL_PAGE_STR] = page_idx ? 0xff : 0;
+ buf[BL_PKG_IDX] = i + 1;
+
+ xfer_len = min_t(size_t, len, RM_BL_WRT_PKG_SIZE);
+ memcpy(&buf[BL_DATA_STR], data, xfer_len);
+ if (len < RM_BL_WRT_PKG_SIZE)
+ memset(&buf[BL_DATA_STR + xfer_len], 0xff,
+ RM_BL_WRT_PKG_SIZE - xfer_len);
+
+ error = raydium_i2c_write_object(client, buf, RM_BL_WRT_LEN,
+ RAYDIUM_WAIT_READY);
+ if (error) {
+ dev_err(&client->dev,
+ "page write command failed for page %d, chunk %d: %d\n",
+ page_idx, i, error);
+ return error;
+ }
+
+ data += xfer_len;
+ len -= xfer_len;
+ }
+
+ return error;
+}
+
+static u16 raydium_calc_chksum(const u8 *buf, u16 len)
+{
+ u16 checksum = 0;
+ u16 i;
+
+ for (i = 0; i < len; i++)
+ checksum += buf[i];
+
+ return checksum;
+}
+
+static int raydium_i2c_do_update_firmware(struct raydium_data *ts,
+ const struct firmware *fw)
+{
+ struct i2c_client *client = ts->client;
+ const void *data;
+ size_t data_len;
+ size_t len;
+ int page_nr;
+ int i;
+ int error;
+ u16 fw_checksum;
+
+ if (fw->size == 0 || fw->size > RM_MAX_FW_SIZE) {
+ dev_err(&client->dev, "Invalid firmware length\n");
+ return -EINVAL;
+ }
+
+ error = raydium_i2c_check_fw_status(ts);
+ if (error) {
+ dev_err(&client->dev, "Unable to access IC %d\n", error);
+ return error;
+ }
+
+ if (ts->boot_mode == RAYDIUM_TS_MAIN) {
+ for (i = 0; i < RM_MAX_RETRIES; i++) {
+ error = raydium_i2c_enter_bl(client);
+ if (!error) {
+ error = raydium_i2c_check_fw_status(ts);
+ if (error) {
+ dev_err(&client->dev,
+ "unable to access IC: %d\n",
+ error);
+ return error;
+ }
+
+ if (ts->boot_mode == RAYDIUM_TS_BLDR)
+ break;
+ }
+ }
+
+ if (ts->boot_mode == RAYDIUM_TS_MAIN) {
+ dev_err(&client->dev,
+ "failied to jump to boot loader: %d\n",
+ error);
+ return -EIO;
+ }
+ }
+
+ error = raydium_i2c_disable_watch_dog(client);
+ if (error)
+ return error;
+
+ error = raydium_i2c_check_path(client);
+ if (error)
+ return error;
+
+ error = raydium_i2c_boot_trigger(client);
+ if (error) {
+ dev_err(&client->dev, "send boot trigger fail: %d\n", error);
+ return error;
+ }
+
+ msleep(RM_BOOT_DELAY_MS);
+
+ data = fw->data;
+ data_len = fw->size;
+ page_nr = 0;
+
+ while (data_len) {
+ len = min_t(size_t, data_len, RM_FW_PAGE_SIZE);
+
+ error = raydium_i2c_fw_write_page(client, page_nr++, data, len);
+ if (error)
+ return error;
+
+ msleep(20);
+
+ data += len;
+ data_len -= len;
+ }
+
+ error = raydium_i2c_leave_bl(client);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to leave boot loader: %d\n", error);
+ return error;
+ }
+
+ dev_dbg(&client->dev, "left boot loader mode\n");
+ msleep(RM_BOOT_DELAY_MS);
+
+ error = raydium_i2c_check_fw_status(ts);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to check fw status after write: %d\n",
+ error);
+ return error;
+ }
+
+ if (ts->boot_mode != RAYDIUM_TS_MAIN) {
+ dev_err(&client->dev,
+ "failed to switch to main fw after writing firmware: %d\n",
+ error);
+ return -EINVAL;
+ }
+
+ error = raydium_i2c_fw_trigger(client);
+ if (error) {
+ dev_err(&client->dev, "failed to trigger fw: %d\n", error);
+ return error;
+ }
+
+ fw_checksum = raydium_calc_chksum(fw->data, fw->size);
+
+ error = raydium_i2c_write_checksum(client, fw->size, fw_checksum);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static int raydium_i2c_fw_update(struct raydium_data *ts)
+{
+ struct i2c_client *client = ts->client;
+ const struct firmware *fw = NULL;
+ const char *fw_file = "/*(DEBLOBBED)*/";
+ int error;
+
+ error = reject_firmware(&fw, fw_file, &client->dev);
+ if (error) {
+ dev_err(&client->dev, "Unable to open firmware %s\n", fw_file);
+ return error;
+ }
+
+ disable_irq(client->irq);
+
+ error = raydium_i2c_do_update_firmware(ts, fw);
+ if (error) {
+ dev_err(&client->dev, "firmware update failed: %d\n", error);
+ ts->boot_mode = RAYDIUM_TS_BLDR;
+ goto out_enable_irq;
+ }
+
+ error = raydium_i2c_initialize(ts);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to initialize device after firmware update: %d\n",
+ error);
+ ts->boot_mode = RAYDIUM_TS_BLDR;
+ goto out_enable_irq;
+ }
+
+ ts->boot_mode = RAYDIUM_TS_MAIN;
+
+out_enable_irq:
+ enable_irq(client->irq);
+ msleep(100);
+
+ release_firmware(fw);
+
+ return error;
+}
+
+static void raydium_mt_event(struct raydium_data *ts)
+{
+ int i;
+
+ for (i = 0; i < ts->report_size / ts->contact_size; i++) {
+ u8 *contact = &ts->report_data[ts->contact_size * i];
+ bool state = contact[RM_CONTACT_STATE_POS];
+ u8 wx, wy;
+
+ input_mt_slot(ts->input, i);
+ input_mt_report_slot_state(ts->input, MT_TOOL_FINGER, state);
+
+ if (!state)
+ continue;
+
+ input_report_abs(ts->input, ABS_MT_POSITION_X,
+ get_unaligned_le16(&contact[RM_CONTACT_X_POS]));
+ input_report_abs(ts->input, ABS_MT_POSITION_Y,
+ get_unaligned_le16(&contact[RM_CONTACT_Y_POS]));
+ input_report_abs(ts->input, ABS_MT_PRESSURE,
+ contact[RM_CONTACT_PRESSURE_POS]);
+
+ wx = contact[RM_CONTACT_WIDTH_X_POS];
+ wy = contact[RM_CONTACT_WIDTH_Y_POS];
+
+ input_report_abs(ts->input, ABS_MT_TOUCH_MAJOR, max(wx, wy));
+ input_report_abs(ts->input, ABS_MT_TOUCH_MINOR, min(wx, wy));
+ }
+
+ input_mt_sync_frame(ts->input);
+ input_sync(ts->input);
+}
+
+static irqreturn_t raydium_i2c_irq(int irq, void *_dev)
+{
+ struct raydium_data *ts = _dev;
+ int error;
+ u16 fw_crc;
+ u16 calc_crc;
+
+ if (ts->boot_mode != RAYDIUM_TS_MAIN)
+ goto out;
+
+ error = raydium_i2c_read_message(ts->client, ts->data_bank_addr,
+ ts->report_data, ts->pkg_size);
+ if (error)
+ goto out;
+
+ fw_crc = get_unaligned_le16(&ts->report_data[ts->report_size]);
+ calc_crc = raydium_calc_chksum(ts->report_data, ts->report_size);
+ if (unlikely(fw_crc != calc_crc)) {
+ dev_warn(&ts->client->dev,
+ "%s: invalid packet crc %#04x vs %#04x\n",
+ __func__, calc_crc, fw_crc);
+ goto out;
+ }
+
+ raydium_mt_event(ts);
+
+out:
+ return IRQ_HANDLED;
+}
+
+static ssize_t raydium_i2c_fw_ver_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct raydium_data *ts = i2c_get_clientdata(client);
+
+ return sprintf(buf, "%d.%d\n", ts->info.main_ver, ts->info.sub_ver);
+}
+
+static ssize_t raydium_i2c_hw_ver_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct raydium_data *ts = i2c_get_clientdata(client);
+
+ return sprintf(buf, "%#04x\n", le32_to_cpu(ts->info.hw_ver));
+}
+
+static ssize_t raydium_i2c_boot_mode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct raydium_data *ts = i2c_get_clientdata(client);
+
+ return sprintf(buf, "%s\n",
+ ts->boot_mode == RAYDIUM_TS_MAIN ?
+ "Normal" : "Recovery");
+}
+
+static ssize_t raydium_i2c_update_fw_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct raydium_data *ts = i2c_get_clientdata(client);
+ int error;
+
+ error = mutex_lock_interruptible(&ts->sysfs_mutex);
+ if (error)
+ return error;
+
+ error = raydium_i2c_fw_update(ts);
+
+ mutex_unlock(&ts->sysfs_mutex);
+
+ return error ?: count;
+}
+
+static ssize_t raydium_i2c_calibrate_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct raydium_data *ts = i2c_get_clientdata(client);
+ static const u8 cal_cmd[] = { 0x00, 0x01, 0x9E };
+ int error;
+
+ error = mutex_lock_interruptible(&ts->sysfs_mutex);
+ if (error)
+ return error;
+
+ error = raydium_i2c_write_object(client, cal_cmd, sizeof(cal_cmd),
+ RAYDIUM_WAIT_READY);
+ if (error)
+ dev_err(&client->dev, "calibrate command failed: %d\n", error);
+
+ mutex_unlock(&ts->sysfs_mutex);
+ return error ?: count;
+}
+
+static DEVICE_ATTR(fw_version, S_IRUGO, raydium_i2c_fw_ver_show, NULL);
+static DEVICE_ATTR(hw_version, S_IRUGO, raydium_i2c_hw_ver_show, NULL);
+static DEVICE_ATTR(boot_mode, S_IRUGO, raydium_i2c_boot_mode_show, NULL);
+static DEVICE_ATTR(update_fw, S_IWUSR, NULL, raydium_i2c_update_fw_store);
+static DEVICE_ATTR(calibrate, S_IWUSR, NULL, raydium_i2c_calibrate_store);
+
+static struct attribute *raydium_i2c_attributes[] = {
+ &dev_attr_update_fw.attr,
+ &dev_attr_boot_mode.attr,
+ &dev_attr_fw_version.attr,
+ &dev_attr_hw_version.attr,
+ &dev_attr_calibrate.attr,
+ NULL
+};
+
+static struct attribute_group raydium_i2c_attribute_group = {
+ .attrs = raydium_i2c_attributes,
+};
+
+static void raydium_i2c_remove_sysfs_group(void *_data)
+{
+ struct raydium_data *ts = _data;
+
+ sysfs_remove_group(&ts->client->dev.kobj, &raydium_i2c_attribute_group);
+}
+
+static int raydium_i2c_power_on(struct raydium_data *ts)
+{
+ int error;
+
+ if (!ts->reset_gpio)
+ return 0;
+
+ gpiod_set_value_cansleep(ts->reset_gpio, 1);
+
+ error = regulator_enable(ts->avdd);
+ if (error) {
+ dev_err(&ts->client->dev,
+ "failed to enable avdd regulator: %d\n", error);
+ goto release_reset_gpio;
+ }
+
+ error = regulator_enable(ts->vccio);
+ if (error) {
+ regulator_disable(ts->avdd);
+ dev_err(&ts->client->dev,
+ "failed to enable vccio regulator: %d\n", error);
+ goto release_reset_gpio;
+ }
+
+ udelay(RM_POWERON_DELAY_USEC);
+
+release_reset_gpio:
+ gpiod_set_value_cansleep(ts->reset_gpio, 0);
+
+ if (error)
+ return error;
+
+ msleep(RM_RESET_DELAY_MSEC);
+
+ return 0;
+}
+
+static void raydium_i2c_power_off(void *_data)
+{
+ struct raydium_data *ts = _data;
+
+ if (ts->reset_gpio) {
+ gpiod_set_value_cansleep(ts->reset_gpio, 1);
+ regulator_disable(ts->vccio);
+ regulator_disable(ts->avdd);
+ }
+}
+
+static int raydium_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ union i2c_smbus_data dummy;
+ struct raydium_data *ts;
+ int error;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev,
+ "i2c check functionality error (need I2C_FUNC_I2C)\n");
+ return -ENXIO;
+ }
+
+ ts = devm_kzalloc(&client->dev, sizeof(*ts), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ mutex_init(&ts->sysfs_mutex);
+
+ ts->client = client;
+ i2c_set_clientdata(client, ts);
+
+ ts->avdd = devm_regulator_get(&client->dev, "avdd");
+ if (IS_ERR(ts->avdd)) {
+ error = PTR_ERR(ts->avdd);
+ if (error != -EPROBE_DEFER)
+ dev_err(&client->dev,
+ "Failed to get 'avdd' regulator: %d\n", error);
+ return error;
+ }
+
+ ts->vccio = devm_regulator_get(&client->dev, "vccio");
+ if (IS_ERR(ts->vccio)) {
+ error = PTR_ERR(ts->vccio);
+ if (error != -EPROBE_DEFER)
+ dev_err(&client->dev,
+ "Failed to get 'vccio' regulator: %d\n", error);
+ return error;
+ }
+
+ ts->reset_gpio = devm_gpiod_get_optional(&client->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(ts->reset_gpio)) {
+ error = PTR_ERR(ts->reset_gpio);
+ if (error != -EPROBE_DEFER)
+ dev_err(&client->dev,
+ "failed to get reset gpio: %d\n", error);
+ return error;
+ }
+
+ error = raydium_i2c_power_on(ts);
+ if (error)
+ return error;
+
+ error = devm_add_action(&client->dev, raydium_i2c_power_off, ts);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to install power off action: %d\n", error);
+ raydium_i2c_power_off(ts);
+ return error;
+ }
+
+ /* Make sure there is something at this address */
+ if (i2c_smbus_xfer(client->adapter, client->addr, 0,
+ I2C_SMBUS_READ, 0, I2C_SMBUS_BYTE, &dummy) < 0) {
+ dev_err(&client->dev, "nothing at this address\n");
+ return -ENXIO;
+ }
+
+ error = raydium_i2c_initialize(ts);
+ if (error) {
+ dev_err(&client->dev, "failed to initialize: %d\n", error);
+ return error;
+ }
+
+ ts->report_data = devm_kmalloc(&client->dev,
+ ts->pkg_size, GFP_KERNEL);
+ if (!ts->report_data)
+ return -ENOMEM;
+
+ ts->input = devm_input_allocate_device(&client->dev);
+ if (!ts->input) {
+ dev_err(&client->dev, "Failed to allocate input device\n");
+ return -ENOMEM;
+ }
+
+ ts->input->name = "Raydium Touchscreen";
+ ts->input->id.bustype = BUS_I2C;
+
+ input_set_drvdata(ts->input, ts);
+
+ input_set_abs_params(ts->input, ABS_MT_POSITION_X,
+ 0, le16_to_cpu(ts->info.x_max), 0, 0);
+ input_set_abs_params(ts->input, ABS_MT_POSITION_Y,
+ 0, le16_to_cpu(ts->info.y_max), 0, 0);
+ input_abs_set_res(ts->input, ABS_MT_POSITION_X, ts->info.x_res);
+ input_abs_set_res(ts->input, ABS_MT_POSITION_Y, ts->info.y_res);
+
+ input_set_abs_params(ts->input, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
+ input_set_abs_params(ts->input, ABS_MT_PRESSURE, 0, 255, 0, 0);
+
+ error = input_mt_init_slots(ts->input, RM_MAX_TOUCH_NUM,
+ INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to initialize MT slots: %d\n", error);
+ return error;
+ }
+
+ error = input_register_device(ts->input);
+ if (error) {
+ dev_err(&client->dev,
+ "unable to register input device: %d\n", error);
+ return error;
+ }
+
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, raydium_i2c_irq,
+ IRQF_ONESHOT, client->name, ts);
+ if (error) {
+ dev_err(&client->dev, "Failed to register interrupt\n");
+ return error;
+ }
+
+ error = sysfs_create_group(&client->dev.kobj,
+ &raydium_i2c_attribute_group);
+ if (error) {
+ dev_err(&client->dev, "failed to create sysfs attributes: %d\n",
+ error);
+ return error;
+ }
+
+ error = devm_add_action(&client->dev,
+ raydium_i2c_remove_sysfs_group, ts);
+ if (error) {
+ raydium_i2c_remove_sysfs_group(ts);
+ dev_err(&client->dev,
+ "Failed to add sysfs cleanup action: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static void __maybe_unused raydium_enter_sleep(struct i2c_client *client)
+{
+ static const u8 sleep_cmd[] = { 0x5A, 0xff, 0x00, 0x0f };
+ int error;
+
+ error = raydium_i2c_send(client, RM_CMD_ENTER_SLEEP,
+ sleep_cmd, sizeof(sleep_cmd));
+ if (error)
+ dev_err(&client->dev,
+ "sleep command failed: %d\n", error);
+}
+
+static int __maybe_unused raydium_i2c_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct raydium_data *ts = i2c_get_clientdata(client);
+
+ /* Sleep is not available in BLDR recovery mode */
+ if (ts->boot_mode != RAYDIUM_TS_MAIN)
+ return -EBUSY;
+
+ disable_irq(client->irq);
+
+ if (device_may_wakeup(dev)) {
+ raydium_enter_sleep(client);
+
+ ts->wake_irq_enabled = (enable_irq_wake(client->irq) == 0);
+ } else {
+ raydium_i2c_power_off(ts);
+ }
+
+ return 0;
+}
+
+static int __maybe_unused raydium_i2c_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct raydium_data *ts = i2c_get_clientdata(client);
+
+ if (device_may_wakeup(dev)) {
+ if (ts->wake_irq_enabled)
+ disable_irq_wake(client->irq);
+ raydium_i2c_sw_reset(client);
+ } else {
+ raydium_i2c_power_on(ts);
+ raydium_i2c_initialize(ts);
+ }
+
+ enable_irq(client->irq);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(raydium_i2c_pm_ops,
+ raydium_i2c_suspend, raydium_i2c_resume);
+
+static const struct i2c_device_id raydium_i2c_id[] = {
+ { "raydium_i2c" , 0 },
+ { "rm32380", 0 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, raydium_i2c_id);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id raydium_acpi_id[] = {
+ { "RAYD0001", 0 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(acpi, raydium_acpi_id);
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id raydium_of_match[] = {
+ { .compatible = "raydium,rm32380", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, raydium_of_match);
+#endif
+
+static struct i2c_driver raydium_i2c_driver = {
+ .probe = raydium_i2c_probe,
+ .id_table = raydium_i2c_id,
+ .driver = {
+ .name = "raydium_ts",
+ .pm = &raydium_i2c_pm_ops,
+ .acpi_match_table = ACPI_PTR(raydium_acpi_id),
+ .of_match_table = of_match_ptr(raydium_of_match),
+ },
+};
+module_i2c_driver(raydium_i2c_driver);
+
+MODULE_AUTHOR("Raydium");
+MODULE_DESCRIPTION("Raydium I2c Touchscreen driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c
new file mode 100644
index 000000000..7b817fdc3
--- /dev/null
+++ b/drivers/input/touchscreen/silead.c
@@ -0,0 +1,566 @@
+/* -------------------------------------------------------------------------
+ * Copyright (C) 2014-2015, Intel Corporation
+ *
+ * Derived from:
+ * gslX68X.c
+ * Copyright (C) 2010-2015, Shanghai Sileadinc Co.Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ * -------------------------------------------------------------------------
+ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/acpi.h>
+#include <linux/interrupt.h>
+#include <linux/gpio/consumer.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/input/touchscreen.h>
+#include <linux/pm.h>
+#include <linux/irq.h>
+
+#include <asm/unaligned.h>
+
+#define SILEAD_TS_NAME "silead_ts"
+
+#define SILEAD_REG_RESET 0xE0
+#define SILEAD_REG_DATA 0x80
+#define SILEAD_REG_TOUCH_NR 0x80
+#define SILEAD_REG_POWER 0xBC
+#define SILEAD_REG_CLOCK 0xE4
+#define SILEAD_REG_STATUS 0xB0
+#define SILEAD_REG_ID 0xFC
+#define SILEAD_REG_MEM_CHECK 0xB0
+
+#define SILEAD_STATUS_OK 0x5A5A5A5A
+#define SILEAD_TS_DATA_LEN 44
+#define SILEAD_CLOCK 0x04
+
+#define SILEAD_CMD_RESET 0x88
+#define SILEAD_CMD_START 0x00
+
+#define SILEAD_POINT_DATA_LEN 0x04
+#define SILEAD_POINT_Y_OFF 0x00
+#define SILEAD_POINT_Y_MSB_OFF 0x01
+#define SILEAD_POINT_X_OFF 0x02
+#define SILEAD_POINT_X_MSB_OFF 0x03
+#define SILEAD_TOUCH_ID_MASK 0xF0
+
+#define SILEAD_CMD_SLEEP_MIN 10000
+#define SILEAD_CMD_SLEEP_MAX 20000
+#define SILEAD_POWER_SLEEP 20
+#define SILEAD_STARTUP_SLEEP 30
+
+#define SILEAD_MAX_FINGERS 10
+
+enum silead_ts_power {
+ SILEAD_POWER_ON = 1,
+ SILEAD_POWER_OFF = 0
+};
+
+struct silead_ts_data {
+ struct i2c_client *client;
+ struct gpio_desc *gpio_power;
+ struct input_dev *input;
+ char fw_name[64];
+ struct touchscreen_properties prop;
+ u32 max_fingers;
+ u32 chip_id;
+ struct input_mt_pos pos[SILEAD_MAX_FINGERS];
+ int slots[SILEAD_MAX_FINGERS];
+ int id[SILEAD_MAX_FINGERS];
+};
+
+struct silead_fw_data {
+ u32 offset;
+ u32 val;
+};
+
+static int silead_ts_request_input_dev(struct silead_ts_data *data)
+{
+ struct device *dev = &data->client->dev;
+ int error;
+
+ data->input = devm_input_allocate_device(dev);
+ if (!data->input) {
+ dev_err(dev,
+ "Failed to allocate input device\n");
+ return -ENOMEM;
+ }
+
+ input_set_abs_params(data->input, ABS_MT_POSITION_X, 0, 4095, 0, 0);
+ input_set_abs_params(data->input, ABS_MT_POSITION_Y, 0, 4095, 0, 0);
+ touchscreen_parse_properties(data->input, true, &data->prop);
+
+ input_mt_init_slots(data->input, data->max_fingers,
+ INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED |
+ INPUT_MT_TRACK);
+
+ data->input->name = SILEAD_TS_NAME;
+ data->input->phys = "input/ts";
+ data->input->id.bustype = BUS_I2C;
+
+ error = input_register_device(data->input);
+ if (error) {
+ dev_err(dev, "Failed to register input device: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static void silead_ts_set_power(struct i2c_client *client,
+ enum silead_ts_power state)
+{
+ struct silead_ts_data *data = i2c_get_clientdata(client);
+
+ if (data->gpio_power) {
+ gpiod_set_value_cansleep(data->gpio_power, state);
+ msleep(SILEAD_POWER_SLEEP);
+ }
+}
+
+static void silead_ts_read_data(struct i2c_client *client)
+{
+ struct silead_ts_data *data = i2c_get_clientdata(client);
+ struct input_dev *input = data->input;
+ struct device *dev = &client->dev;
+ u8 *bufp, buf[SILEAD_TS_DATA_LEN];
+ int touch_nr, error, i;
+
+ error = i2c_smbus_read_i2c_block_data(client, SILEAD_REG_DATA,
+ SILEAD_TS_DATA_LEN, buf);
+ if (error < 0) {
+ dev_err(dev, "Data read error %d\n", error);
+ return;
+ }
+
+ touch_nr = buf[0];
+ if (touch_nr > data->max_fingers) {
+ dev_warn(dev, "More touches reported then supported %d > %d\n",
+ touch_nr, data->max_fingers);
+ touch_nr = data->max_fingers;
+ }
+
+ bufp = buf + SILEAD_POINT_DATA_LEN;
+ for (i = 0; i < touch_nr; i++, bufp += SILEAD_POINT_DATA_LEN) {
+ /* Bits 4-7 are the touch id */
+ data->id[i] = (bufp[SILEAD_POINT_X_MSB_OFF] &
+ SILEAD_TOUCH_ID_MASK) >> 4;
+ touchscreen_set_mt_pos(&data->pos[i], &data->prop,
+ get_unaligned_le16(&bufp[SILEAD_POINT_X_OFF]) & 0xfff,
+ get_unaligned_le16(&bufp[SILEAD_POINT_Y_OFF]) & 0xfff);
+ }
+
+ input_mt_assign_slots(input, data->slots, data->pos, touch_nr, 0);
+
+ for (i = 0; i < touch_nr; i++) {
+ input_mt_slot(input, data->slots[i]);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER, true);
+ input_report_abs(input, ABS_MT_POSITION_X, data->pos[i].x);
+ input_report_abs(input, ABS_MT_POSITION_Y, data->pos[i].y);
+
+ dev_dbg(dev, "x=%d y=%d hw_id=%d sw_id=%d\n", data->pos[i].x,
+ data->pos[i].y, data->id[i], data->slots[i]);
+ }
+
+ input_mt_sync_frame(input);
+ input_sync(input);
+}
+
+static int silead_ts_init(struct i2c_client *client)
+{
+ struct silead_ts_data *data = i2c_get_clientdata(client);
+ int error;
+
+ error = i2c_smbus_write_byte_data(client, SILEAD_REG_RESET,
+ SILEAD_CMD_RESET);
+ if (error)
+ goto i2c_write_err;
+ usleep_range(SILEAD_CMD_SLEEP_MIN, SILEAD_CMD_SLEEP_MAX);
+
+ error = i2c_smbus_write_byte_data(client, SILEAD_REG_TOUCH_NR,
+ data->max_fingers);
+ if (error)
+ goto i2c_write_err;
+ usleep_range(SILEAD_CMD_SLEEP_MIN, SILEAD_CMD_SLEEP_MAX);
+
+ error = i2c_smbus_write_byte_data(client, SILEAD_REG_CLOCK,
+ SILEAD_CLOCK);
+ if (error)
+ goto i2c_write_err;
+ usleep_range(SILEAD_CMD_SLEEP_MIN, SILEAD_CMD_SLEEP_MAX);
+
+ error = i2c_smbus_write_byte_data(client, SILEAD_REG_RESET,
+ SILEAD_CMD_START);
+ if (error)
+ goto i2c_write_err;
+ usleep_range(SILEAD_CMD_SLEEP_MIN, SILEAD_CMD_SLEEP_MAX);
+
+ return 0;
+
+i2c_write_err:
+ dev_err(&client->dev, "Registers clear error %d\n", error);
+ return error;
+}
+
+static int silead_ts_reset(struct i2c_client *client)
+{
+ int error;
+
+ error = i2c_smbus_write_byte_data(client, SILEAD_REG_RESET,
+ SILEAD_CMD_RESET);
+ if (error)
+ goto i2c_write_err;
+ usleep_range(SILEAD_CMD_SLEEP_MIN, SILEAD_CMD_SLEEP_MAX);
+
+ error = i2c_smbus_write_byte_data(client, SILEAD_REG_CLOCK,
+ SILEAD_CLOCK);
+ if (error)
+ goto i2c_write_err;
+ usleep_range(SILEAD_CMD_SLEEP_MIN, SILEAD_CMD_SLEEP_MAX);
+
+ error = i2c_smbus_write_byte_data(client, SILEAD_REG_POWER,
+ SILEAD_CMD_START);
+ if (error)
+ goto i2c_write_err;
+ usleep_range(SILEAD_CMD_SLEEP_MIN, SILEAD_CMD_SLEEP_MAX);
+
+ return 0;
+
+i2c_write_err:
+ dev_err(&client->dev, "Chip reset error %d\n", error);
+ return error;
+}
+
+static int silead_ts_startup(struct i2c_client *client)
+{
+ int error;
+
+ error = i2c_smbus_write_byte_data(client, SILEAD_REG_RESET, 0x00);
+ if (error) {
+ dev_err(&client->dev, "Startup error %d\n", error);
+ return error;
+ }
+
+ msleep(SILEAD_STARTUP_SLEEP);
+
+ return 0;
+}
+
+static int silead_ts_load_fw(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct silead_ts_data *data = i2c_get_clientdata(client);
+ unsigned int fw_size, i;
+ const struct firmware *fw;
+ struct silead_fw_data *fw_data;
+ int error;
+
+ dev_dbg(dev, "Firmware file name: %s", data->fw_name);
+
+ error = reject_firmware(&fw, data->fw_name, dev);
+ if (error) {
+ dev_err(dev, "Firmware request error %d\n", error);
+ return error;
+ }
+
+ fw_size = fw->size / sizeof(*fw_data);
+ fw_data = (struct silead_fw_data *)fw->data;
+
+ for (i = 0; i < fw_size; i++) {
+ error = i2c_smbus_write_i2c_block_data(client,
+ fw_data[i].offset,
+ 4,
+ (u8 *)&fw_data[i].val);
+ if (error) {
+ dev_err(dev, "Firmware load error %d\n", error);
+ break;
+ }
+ }
+
+ release_firmware(fw);
+ return error ?: 0;
+}
+
+static u32 silead_ts_get_status(struct i2c_client *client)
+{
+ int error;
+ __le32 status;
+
+ error = i2c_smbus_read_i2c_block_data(client, SILEAD_REG_STATUS,
+ sizeof(status), (u8 *)&status);
+ if (error < 0) {
+ dev_err(&client->dev, "Status read error %d\n", error);
+ return error;
+ }
+
+ return le32_to_cpu(status);
+}
+
+static int silead_ts_get_id(struct i2c_client *client)
+{
+ struct silead_ts_data *data = i2c_get_clientdata(client);
+ __le32 chip_id;
+ int error;
+
+ error = i2c_smbus_read_i2c_block_data(client, SILEAD_REG_ID,
+ sizeof(chip_id), (u8 *)&chip_id);
+ if (error < 0) {
+ dev_err(&client->dev, "Chip ID read error %d\n", error);
+ return error;
+ }
+
+ data->chip_id = le32_to_cpu(chip_id);
+ dev_info(&client->dev, "Silead chip ID: 0x%8X", data->chip_id);
+
+ return 0;
+}
+
+static int silead_ts_setup(struct i2c_client *client)
+{
+ int error;
+ u32 status;
+
+ silead_ts_set_power(client, SILEAD_POWER_OFF);
+ silead_ts_set_power(client, SILEAD_POWER_ON);
+
+ error = silead_ts_get_id(client);
+ if (error)
+ return error;
+
+ error = silead_ts_init(client);
+ if (error)
+ return error;
+
+ error = silead_ts_reset(client);
+ if (error)
+ return error;
+
+ error = silead_ts_load_fw(client);
+ if (error)
+ return error;
+
+ error = silead_ts_startup(client);
+ if (error)
+ return error;
+
+ status = silead_ts_get_status(client);
+ if (status != SILEAD_STATUS_OK) {
+ dev_err(&client->dev,
+ "Initialization error, status: 0x%X\n", status);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static irqreturn_t silead_ts_threaded_irq_handler(int irq, void *id)
+{
+ struct silead_ts_data *data = id;
+ struct i2c_client *client = data->client;
+
+ silead_ts_read_data(client);
+
+ return IRQ_HANDLED;
+}
+
+static void silead_ts_read_props(struct i2c_client *client)
+{
+ struct silead_ts_data *data = i2c_get_clientdata(client);
+ struct device *dev = &client->dev;
+ const char *str;
+ int error;
+
+ error = device_property_read_u32(dev, "silead,max-fingers",
+ &data->max_fingers);
+ if (error) {
+ dev_dbg(dev, "Max fingers read error %d\n", error);
+ data->max_fingers = 5; /* Most devices handle up-to 5 fingers */
+ }
+
+ error = device_property_read_string(dev, "firmware-name", &str);
+ if (!error)
+ /*(DEBLOBBED)*/;
+ else
+ dev_dbg(dev, "Firmware file name read error. Using default.");
+}
+
+#ifdef CONFIG_ACPI
+static int silead_ts_set_default_fw_name(struct silead_ts_data *data,
+ const struct i2c_device_id *id)
+{
+ const struct acpi_device_id *acpi_id;
+ struct device *dev = &data->client->dev;
+ int i;
+
+ if (ACPI_HANDLE(dev)) {
+ acpi_id = acpi_match_device(dev->driver->acpi_match_table, dev);
+ if (!acpi_id)
+ return -ENODEV;
+
+ snprintf(data->fw_name, sizeof(data->fw_name),
+ "/*(DEBLOBBED)*/", acpi_id->id);
+
+ for (i = 0; i < strlen(data->fw_name); i++)
+ data->fw_name[i] = tolower(data->fw_name[i]);
+ } else {
+ snprintf(data->fw_name, sizeof(data->fw_name),
+ "/*(DEBLOBBED)*/", id->name);
+ }
+
+ return 0;
+}
+#else
+static int silead_ts_set_default_fw_name(struct silead_ts_data *data,
+ const struct i2c_device_id *id)
+{
+ snprintf(data->fw_name, sizeof(data->fw_name),
+ "/*(DEBLOBBED)*/", id->name);
+ return 0;
+}
+#endif
+
+static int silead_ts_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct silead_ts_data *data;
+ struct device *dev = &client->dev;
+ int error;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_I2C |
+ I2C_FUNC_SMBUS_READ_I2C_BLOCK |
+ I2C_FUNC_SMBUS_WRITE_I2C_BLOCK)) {
+ dev_err(dev, "I2C functionality check failed\n");
+ return -ENXIO;
+ }
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, data);
+ data->client = client;
+
+ error = silead_ts_set_default_fw_name(data, id);
+ if (error)
+ return error;
+
+ silead_ts_read_props(client);
+
+ /* We must have the IRQ provided by DT or ACPI subsytem */
+ if (client->irq <= 0)
+ return -ENODEV;
+
+ /* Power GPIO pin */
+ data->gpio_power = devm_gpiod_get_optional(dev, "power", GPIOD_OUT_LOW);
+ if (IS_ERR(data->gpio_power)) {
+ if (PTR_ERR(data->gpio_power) != -EPROBE_DEFER)
+ dev_err(dev, "Shutdown GPIO request failed\n");
+ return PTR_ERR(data->gpio_power);
+ }
+
+ error = silead_ts_setup(client);
+ if (error)
+ return error;
+
+ error = silead_ts_request_input_dev(data);
+ if (error)
+ return error;
+
+ error = devm_request_threaded_irq(dev, client->irq,
+ NULL, silead_ts_threaded_irq_handler,
+ IRQF_ONESHOT, client->name, data);
+ if (error) {
+ if (error != -EPROBE_DEFER)
+ dev_err(dev, "IRQ request failed %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused silead_ts_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ silead_ts_set_power(client, SILEAD_POWER_OFF);
+ return 0;
+}
+
+static int __maybe_unused silead_ts_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int error, status;
+
+ silead_ts_set_power(client, SILEAD_POWER_ON);
+
+ error = silead_ts_reset(client);
+ if (error)
+ return error;
+
+ error = silead_ts_startup(client);
+ if (error)
+ return error;
+
+ status = silead_ts_get_status(client);
+ if (status != SILEAD_STATUS_OK) {
+ dev_err(dev, "Resume error, status: 0x%02x\n", status);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(silead_ts_pm, silead_ts_suspend, silead_ts_resume);
+
+static const struct i2c_device_id silead_ts_id[] = {
+ { "gsl1680", 0 },
+ { "gsl1688", 0 },
+ { "gsl3670", 0 },
+ { "gsl3675", 0 },
+ { "gsl3692", 0 },
+ { "mssl1680", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, silead_ts_id);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id silead_ts_acpi_match[] = {
+ { "GSL1680", 0 },
+ { "GSL1688", 0 },
+ { "GSL3670", 0 },
+ { "GSL3675", 0 },
+ { "GSL3692", 0 },
+ { "MSSL1680", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, silead_ts_acpi_match);
+#endif
+
+static struct i2c_driver silead_ts_driver = {
+ .probe = silead_ts_probe,
+ .id_table = silead_ts_id,
+ .driver = {
+ .name = SILEAD_TS_NAME,
+ .acpi_match_table = ACPI_PTR(silead_ts_acpi_match),
+ .pm = &silead_ts_pm,
+ },
+};
+module_i2c_driver(silead_ts_driver);
+
+MODULE_AUTHOR("Robert Dolca <robert.dolca@intel.com>");
+MODULE_DESCRIPTION("Silead I2C touchscreen driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/sis_i2c.c b/drivers/input/touchscreen/sis_i2c.c
new file mode 100644
index 000000000..8d93f8c9a
--- /dev/null
+++ b/drivers/input/touchscreen/sis_i2c.c
@@ -0,0 +1,413 @@
+/*
+ * Touch Screen driver for SiS 9200 family I2C Touch panels
+ *
+ * Copyright (C) 2015 SiS, Inc.
+ * Copyright (C) 2016 Nextfour Group
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/crc-itu-t.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/interrupt.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+
+#define SIS_I2C_NAME "sis_i2c_ts"
+
+/*
+ * The I2C packet format:
+ * le16 byte count
+ * u8 Report ID
+ * <contact data - variable length>
+ * u8 Number of contacts
+ * le16 Scan Time (optional)
+ * le16 CRC
+ *
+ * One touch point information consists of 6+ bytes, the order is:
+ * u8 contact state
+ * u8 finger id
+ * le16 x axis
+ * le16 y axis
+ * u8 contact width (optional)
+ * u8 contact height (optional)
+ * u8 pressure (optional)
+ *
+ * Maximum amount of data transmitted in one shot is 64 bytes, if controller
+ * needs to report more contacts than fit in one packet it will send true
+ * number of contacts in first packet and 0 as number of contacts in second
+ * packet.
+ */
+
+#define SIS_MAX_PACKET_SIZE 64
+
+#define SIS_PKT_LEN_OFFSET 0
+#define SIS_PKT_REPORT_OFFSET 2 /* Report ID/type */
+#define SIS_PKT_CONTACT_OFFSET 3 /* First contact */
+
+#define SIS_SCAN_TIME_LEN 2
+
+/* Supported report types */
+#define SIS_ALL_IN_ONE_PACKAGE 0x10
+#define SIS_PKT_IS_TOUCH(x) (((x) & 0x0f) == 0x01)
+#define SIS_PKT_IS_HIDI2C(x) (((x) & 0x0f) == 0x06)
+
+/* Contact properties within report */
+#define SIS_PKT_HAS_AREA(x) ((x) & BIT(4))
+#define SIS_PKT_HAS_PRESSURE(x) ((x) & BIT(5))
+#define SIS_PKT_HAS_SCANTIME(x) ((x) & BIT(6))
+
+/* Contact size */
+#define SIS_BASE_LEN_PER_CONTACT 6
+#define SIS_AREA_LEN_PER_CONTACT 2
+#define SIS_PRESSURE_LEN_PER_CONTACT 1
+
+/* Offsets within contact data */
+#define SIS_CONTACT_STATUS_OFFSET 0
+#define SIS_CONTACT_ID_OFFSET 1 /* Contact ID */
+#define SIS_CONTACT_X_OFFSET 2
+#define SIS_CONTACT_Y_OFFSET 4
+#define SIS_CONTACT_WIDTH_OFFSET 6
+#define SIS_CONTACT_HEIGHT_OFFSET 7
+#define SIS_CONTACT_PRESSURE_OFFSET(id) (SIS_PKT_HAS_AREA(id) ? 8 : 6)
+
+/* Individual contact state */
+#define SIS_STATUS_UP 0x0
+#define SIS_STATUS_DOWN 0x3
+
+/* Touchscreen parameters */
+#define SIS_MAX_FINGERS 10
+#define SIS_MAX_X 4095
+#define SIS_MAX_Y 4095
+#define SIS_MAX_PRESSURE 255
+
+/* Resolution diagonal */
+#define SIS_AREA_LENGTH_LONGER 5792
+/*((SIS_MAX_X^2) + (SIS_MAX_Y^2))^0.5*/
+#define SIS_AREA_LENGTH_SHORT 5792
+#define SIS_AREA_UNIT (5792 / 32)
+
+struct sis_ts_data {
+ struct i2c_client *client;
+ struct input_dev *input;
+
+ struct gpio_desc *attn_gpio;
+ struct gpio_desc *reset_gpio;
+
+ u8 packet[SIS_MAX_PACKET_SIZE];
+};
+
+static int sis_read_packet(struct i2c_client *client, u8 *buf,
+ unsigned int *num_contacts,
+ unsigned int *contact_size)
+{
+ int count_idx;
+ int ret;
+ u16 len;
+ u16 crc, pkg_crc;
+ u8 report_id;
+
+ ret = i2c_master_recv(client, buf, SIS_MAX_PACKET_SIZE);
+ if (ret <= 0)
+ return -EIO;
+
+ len = get_unaligned_le16(&buf[SIS_PKT_LEN_OFFSET]);
+ if (len > SIS_MAX_PACKET_SIZE) {
+ dev_err(&client->dev,
+ "%s: invalid packet length (%d vs %d)\n",
+ __func__, len, SIS_MAX_PACKET_SIZE);
+ return -E2BIG;
+ }
+
+ if (len < 10)
+ return -EINVAL;
+
+ report_id = buf[SIS_PKT_REPORT_OFFSET];
+ count_idx = len - 1;
+ *contact_size = SIS_BASE_LEN_PER_CONTACT;
+
+ if (report_id != SIS_ALL_IN_ONE_PACKAGE) {
+ if (SIS_PKT_IS_TOUCH(report_id)) {
+ /*
+ * Calculate CRC ignoring packet length
+ * in the beginning and CRC transmitted
+ * at the end of the packet.
+ */
+ crc = crc_itu_t(0, buf + 2, len - 2 - 2);
+ pkg_crc = get_unaligned_le16(&buf[len - 2]);
+
+ if (crc != pkg_crc) {
+ dev_err(&client->dev,
+ "%s: CRC Error (%d vs %d)\n",
+ __func__, crc, pkg_crc);
+ return -EINVAL;
+ }
+
+ count_idx -= 2;
+
+ } else if (!SIS_PKT_IS_HIDI2C(report_id)) {
+ dev_err(&client->dev,
+ "%s: invalid packet ID %#02x\n",
+ __func__, report_id);
+ return -EINVAL;
+ }
+
+ if (SIS_PKT_HAS_SCANTIME(report_id))
+ count_idx -= SIS_SCAN_TIME_LEN;
+
+ if (SIS_PKT_HAS_AREA(report_id))
+ *contact_size += SIS_AREA_LEN_PER_CONTACT;
+ if (SIS_PKT_HAS_PRESSURE(report_id))
+ *contact_size += SIS_PRESSURE_LEN_PER_CONTACT;
+ }
+
+ *num_contacts = buf[count_idx];
+ return 0;
+}
+
+static int sis_ts_report_contact(struct sis_ts_data *ts, const u8 *data, u8 id)
+{
+ struct input_dev *input = ts->input;
+ int slot;
+ u8 status = data[SIS_CONTACT_STATUS_OFFSET];
+ u8 pressure;
+ u8 height, width;
+ u16 x, y;
+
+ if (status != SIS_STATUS_DOWN && status != SIS_STATUS_UP) {
+ dev_err(&ts->client->dev, "Unexpected touch status: %#02x\n",
+ data[SIS_CONTACT_STATUS_OFFSET]);
+ return -EINVAL;
+ }
+
+ slot = input_mt_get_slot_by_key(input, data[SIS_CONTACT_ID_OFFSET]);
+ if (slot < 0)
+ return -ENOENT;
+
+ input_mt_slot(input, slot);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER,
+ status == SIS_STATUS_DOWN);
+
+ if (status == SIS_STATUS_DOWN) {
+ pressure = height = width = 1;
+ if (id != SIS_ALL_IN_ONE_PACKAGE) {
+ if (SIS_PKT_HAS_AREA(id)) {
+ width = data[SIS_CONTACT_WIDTH_OFFSET];
+ height = data[SIS_CONTACT_HEIGHT_OFFSET];
+ }
+
+ if (SIS_PKT_HAS_PRESSURE(id))
+ pressure =
+ data[SIS_CONTACT_PRESSURE_OFFSET(id)];
+ }
+
+ x = get_unaligned_le16(&data[SIS_CONTACT_X_OFFSET]);
+ y = get_unaligned_le16(&data[SIS_CONTACT_Y_OFFSET]);
+
+ input_report_abs(input, ABS_MT_TOUCH_MAJOR,
+ width * SIS_AREA_UNIT);
+ input_report_abs(input, ABS_MT_TOUCH_MINOR,
+ height * SIS_AREA_UNIT);
+ input_report_abs(input, ABS_MT_PRESSURE, pressure);
+ input_report_abs(input, ABS_MT_POSITION_X, x);
+ input_report_abs(input, ABS_MT_POSITION_Y, y);
+ }
+
+ return 0;
+}
+
+static void sis_ts_handle_packet(struct sis_ts_data *ts)
+{
+ const u8 *contact;
+ unsigned int num_to_report = 0;
+ unsigned int num_contacts;
+ unsigned int num_reported;
+ unsigned int contact_size;
+ int error;
+ u8 report_id;
+
+ do {
+ error = sis_read_packet(ts->client, ts->packet,
+ &num_contacts, &contact_size);
+ if (error)
+ break;
+
+ if (num_to_report == 0) {
+ num_to_report = num_contacts;
+ } else if (num_contacts != 0) {
+ dev_err(&ts->client->dev,
+ "%s: nonzero (%d) point count in tail packet\n",
+ __func__, num_contacts);
+ break;
+ }
+
+ report_id = ts->packet[SIS_PKT_REPORT_OFFSET];
+ contact = &ts->packet[SIS_PKT_CONTACT_OFFSET];
+ num_reported = 0;
+
+ while (num_to_report > 0) {
+ error = sis_ts_report_contact(ts, contact, report_id);
+ if (error)
+ break;
+
+ contact += contact_size;
+ num_to_report--;
+ num_reported++;
+
+ if (report_id != SIS_ALL_IN_ONE_PACKAGE &&
+ num_reported >= 5) {
+ /*
+ * The remainder of contacts is sent
+ * in the 2nd packet.
+ */
+ break;
+ }
+ }
+ } while (num_to_report > 0);
+
+ input_mt_sync_frame(ts->input);
+ input_sync(ts->input);
+}
+
+static irqreturn_t sis_ts_irq_handler(int irq, void *dev_id)
+{
+ struct sis_ts_data *ts = dev_id;
+
+ do {
+ sis_ts_handle_packet(ts);
+ } while (ts->attn_gpio && gpiod_get_value_cansleep(ts->attn_gpio));
+
+ return IRQ_HANDLED;
+}
+
+static void sis_ts_reset(struct sis_ts_data *ts)
+{
+ if (ts->reset_gpio) {
+ /* Get out of reset */
+ usleep_range(1000, 2000);
+ gpiod_set_value(ts->reset_gpio, 1);
+ usleep_range(1000, 2000);
+ gpiod_set_value(ts->reset_gpio, 0);
+ msleep(100);
+ }
+}
+
+static int sis_ts_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct sis_ts_data *ts;
+ struct input_dev *input;
+ int error;
+
+ ts = devm_kzalloc(&client->dev, sizeof(*ts), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ ts->client = client;
+ i2c_set_clientdata(client, ts);
+
+ ts->attn_gpio = devm_gpiod_get_optional(&client->dev,
+ "attn", GPIOD_IN);
+ if (IS_ERR(ts->attn_gpio)) {
+ error = PTR_ERR(ts->attn_gpio);
+ if (error != -EPROBE_DEFER)
+ dev_err(&client->dev,
+ "Failed to get attention GPIO: %d\n", error);
+ return error;
+ }
+
+ ts->reset_gpio = devm_gpiod_get_optional(&client->dev,
+ "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ts->reset_gpio)) {
+ error = PTR_ERR(ts->reset_gpio);
+ if (error != -EPROBE_DEFER)
+ dev_err(&client->dev,
+ "Failed to get reset GPIO: %d\n", error);
+ return error;
+ }
+
+ sis_ts_reset(ts);
+
+ ts->input = input = devm_input_allocate_device(&client->dev);
+ if (!input) {
+ dev_err(&client->dev, "Failed to allocate input device\n");
+ return -ENOMEM;
+ }
+
+ input->name = "SiS Touchscreen";
+ input->id.bustype = BUS_I2C;
+
+ input_set_abs_params(input, ABS_MT_POSITION_X, 0, SIS_MAX_X, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y, 0, SIS_MAX_Y, 0, 0);
+ input_set_abs_params(input, ABS_MT_PRESSURE, 0, SIS_MAX_PRESSURE, 0, 0);
+ input_set_abs_params(input, ABS_MT_TOUCH_MAJOR,
+ 0, SIS_AREA_LENGTH_LONGER, 0, 0);
+ input_set_abs_params(input, ABS_MT_TOUCH_MINOR,
+ 0, SIS_AREA_LENGTH_SHORT, 0, 0);
+
+ error = input_mt_init_slots(input, SIS_MAX_FINGERS, INPUT_MT_DIRECT);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to initialize MT slots: %d\n", error);
+ return error;
+ }
+
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, sis_ts_irq_handler,
+ IRQF_ONESHOT,
+ client->name, ts);
+ if (error) {
+ dev_err(&client->dev, "Failed to request IRQ: %d\n", error);
+ return error;
+ }
+
+ error = input_register_device(ts->input);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to register input device: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id sis_ts_dt_ids[] = {
+ { .compatible = "sis,9200-ts" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sis_ts_dt_ids);
+#endif
+
+static const struct i2c_device_id sis_ts_id[] = {
+ { SIS_I2C_NAME, 0 },
+ { "9200-ts", 0 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, sis_ts_id);
+
+static struct i2c_driver sis_ts_driver = {
+ .driver = {
+ .name = SIS_I2C_NAME,
+ .of_match_table = of_match_ptr(sis_ts_dt_ids),
+ },
+ .probe = sis_ts_probe,
+ .id_table = sis_ts_id,
+};
+module_i2c_driver(sis_ts_driver);
+
+MODULE_DESCRIPTION("SiS 9200 Family Touchscreen Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Mika Penttilä <mika.penttila@nextfour.com>");
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
index b7e8c11a6..4ea475775 100644
--- a/drivers/input/touchscreen/sur40.c
+++ b/drivers/input/touchscreen/sur40.c
@@ -151,7 +151,6 @@ struct sur40_state {
struct mutex lock;
struct vb2_queue queue;
- struct vb2_alloc_ctx *alloc_ctx;
struct list_head buf_list;
spinlock_t qlock;
int sequence;
@@ -580,19 +579,13 @@ static int sur40_probe(struct usb_interface *interface,
sur40->queue = sur40_queue;
sur40->queue.drv_priv = sur40;
sur40->queue.lock = &sur40->lock;
+ sur40->queue.dev = sur40->dev;
/* initialize the queue */
error = vb2_queue_init(&sur40->queue);
if (error)
goto err_unreg_v4l2;
- sur40->alloc_ctx = vb2_dma_sg_init_ctx(sur40->dev);
- if (IS_ERR(sur40->alloc_ctx)) {
- dev_err(sur40->dev, "Can't allocate buffer context");
- error = PTR_ERR(sur40->alloc_ctx);
- goto err_unreg_v4l2;
- }
-
sur40->vdev = sur40_video_device;
sur40->vdev.v4l2_dev = &sur40->v4l2;
sur40->vdev.lock = &sur40->lock;
@@ -633,7 +626,6 @@ static void sur40_disconnect(struct usb_interface *interface)
video_unregister_device(&sur40->vdev);
v4l2_device_unregister(&sur40->v4l2);
- vb2_dma_sg_cleanup_ctx(sur40->alloc_ctx);
input_unregister_polled_device(sur40->input);
input_free_polled_device(sur40->input);
@@ -653,13 +645,10 @@ static void sur40_disconnect(struct usb_interface *interface)
*/
static int sur40_queue_setup(struct vb2_queue *q,
unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
- struct sur40_state *sur40 = vb2_get_drv_priv(q);
-
if (q->num_buffers + *nbuffers < 3)
*nbuffers = 3 - q->num_buffers;
- alloc_ctxs[0] = sur40->alloc_ctx;
if (*nplanes)
return sizes[0] < sur40_video_format.sizeimage ? -EINVAL : 0;
@@ -794,7 +783,6 @@ static int sur40_vidioc_enum_fmt(struct file *file, void *priv,
{
if (f->index != 0)
return -EINVAL;
- strlcpy(f->description, "8-bit greyscale", sizeof(f->description));
f->pixelformat = V4L2_PIX_FMT_GREY;
f->flags = 0;
return 0;
diff --git a/drivers/input/touchscreen/surface3_spi.c b/drivers/input/touchscreen/surface3_spi.c
new file mode 100644
index 000000000..e12fb9b63
--- /dev/null
+++ b/drivers/input/touchscreen/surface3_spi.c
@@ -0,0 +1,427 @@
+/*
+ * Driver for Ntrig/Microsoft Touchscreens over SPI
+ *
+ * Copyright (c) 2016 Red Hat Inc.
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; version 2 of the License.
+ */
+
+#include <linux/kernel.h>
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/acpi.h>
+
+#include <asm/unaligned.h>
+
+#define SURFACE3_PACKET_SIZE 264
+
+#define SURFACE3_REPORT_TOUCH 0xd2
+#define SURFACE3_REPORT_PEN 0x16
+
+struct surface3_ts_data {
+ struct spi_device *spi;
+ struct gpio_desc *gpiod_rst[2];
+ struct input_dev *input_dev;
+ struct input_dev *pen_input_dev;
+ int pen_tool;
+
+ u8 rd_buf[SURFACE3_PACKET_SIZE] ____cacheline_aligned;
+};
+
+struct surface3_ts_data_finger {
+ u8 status;
+ __le16 tracking_id;
+ __le16 x;
+ __le16 cx;
+ __le16 y;
+ __le16 cy;
+ __le16 width;
+ __le16 height;
+ u32 padding;
+} __packed;
+
+struct surface3_ts_data_pen {
+ u8 status;
+ __le16 x;
+ __le16 y;
+ __le16 pressure;
+ u8 padding;
+} __packed;
+
+static int surface3_spi_read(struct surface3_ts_data *ts_data)
+{
+ struct spi_device *spi = ts_data->spi;
+
+ memset(ts_data->rd_buf, 0, sizeof(ts_data->rd_buf));
+ return spi_read(spi, ts_data->rd_buf, sizeof(ts_data->rd_buf));
+}
+
+static void surface3_spi_report_touch(struct surface3_ts_data *ts_data,
+ struct surface3_ts_data_finger *finger)
+{
+ int st = finger->status & 0x01;
+ int slot;
+
+ slot = input_mt_get_slot_by_key(ts_data->input_dev,
+ get_unaligned_le16(&finger->tracking_id));
+ if (slot < 0)
+ return;
+
+ input_mt_slot(ts_data->input_dev, slot);
+ input_mt_report_slot_state(ts_data->input_dev, MT_TOOL_FINGER, st);
+ if (st) {
+ input_report_abs(ts_data->input_dev,
+ ABS_MT_POSITION_X,
+ get_unaligned_le16(&finger->x));
+ input_report_abs(ts_data->input_dev,
+ ABS_MT_POSITION_Y,
+ get_unaligned_le16(&finger->y));
+ input_report_abs(ts_data->input_dev,
+ ABS_MT_WIDTH_MAJOR,
+ get_unaligned_le16(&finger->width));
+ input_report_abs(ts_data->input_dev,
+ ABS_MT_WIDTH_MINOR,
+ get_unaligned_le16(&finger->height));
+ }
+}
+
+static void surface3_spi_process_touch(struct surface3_ts_data *ts_data, u8 *data)
+{
+ u16 timestamp;
+ unsigned int i;
+ timestamp = get_unaligned_le16(&data[15]);
+
+ for (i = 0; i < 13; i++) {
+ struct surface3_ts_data_finger *finger;
+
+ finger = (struct surface3_ts_data_finger *)&data[17 +
+ i * sizeof(struct surface3_ts_data_finger)];
+
+ /*
+ * When bit 5 of status is 1, it marks the end of the report:
+ * - touch present: 0xe7
+ * - touch released: 0xe4
+ * - nothing valuable: 0xff
+ */
+ if (finger->status & 0x10)
+ break;
+
+ surface3_spi_report_touch(ts_data, finger);
+ }
+
+ input_mt_sync_frame(ts_data->input_dev);
+ input_sync(ts_data->input_dev);
+}
+
+static void surface3_spi_report_pen(struct surface3_ts_data *ts_data,
+ struct surface3_ts_data_pen *pen)
+{
+ struct input_dev *dev = ts_data->pen_input_dev;
+ int st = pen->status;
+ int prox = st & 0x01;
+ int rubber = st & 0x18;
+ int tool = (prox && rubber) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN;
+
+ /* fake proximity out to switch tools */
+ if (ts_data->pen_tool != tool) {
+ input_report_key(dev, ts_data->pen_tool, 0);
+ input_sync(dev);
+ ts_data->pen_tool = tool;
+ }
+
+ input_report_key(dev, BTN_TOUCH, st & 0x12);
+
+ input_report_key(dev, ts_data->pen_tool, prox);
+
+ if (st) {
+ input_report_key(dev,
+ BTN_STYLUS,
+ st & 0x04);
+
+ input_report_abs(dev,
+ ABS_X,
+ get_unaligned_le16(&pen->x));
+ input_report_abs(dev,
+ ABS_Y,
+ get_unaligned_le16(&pen->y));
+ input_report_abs(dev,
+ ABS_PRESSURE,
+ get_unaligned_le16(&pen->pressure));
+ }
+}
+
+static void surface3_spi_process_pen(struct surface3_ts_data *ts_data, u8 *data)
+{
+ struct surface3_ts_data_pen *pen;
+
+ pen = (struct surface3_ts_data_pen *)&data[15];
+
+ surface3_spi_report_pen(ts_data, pen);
+ input_sync(ts_data->pen_input_dev);
+}
+
+static void surface3_spi_process(struct surface3_ts_data *ts_data)
+{
+ const char header[] = {
+ 0xff, 0xff, 0xff, 0xff, 0xa5, 0x5a, 0xe7, 0x7e, 0x01
+ };
+ u8 *data = ts_data->rd_buf;
+
+ if (memcmp(header, data, sizeof(header)))
+ dev_err(&ts_data->spi->dev,
+ "%s header error: %*ph, ignoring...\n",
+ __func__, (int)sizeof(header), data);
+
+ switch (data[9]) {
+ case SURFACE3_REPORT_TOUCH:
+ surface3_spi_process_touch(ts_data, data);
+ break;
+ case SURFACE3_REPORT_PEN:
+ surface3_spi_process_pen(ts_data, data);
+ break;
+ default:
+ dev_err(&ts_data->spi->dev,
+ "%s unknown packet type: %x, ignoring...\n",
+ __func__, data[9]);
+ break;
+ }
+}
+
+static irqreturn_t surface3_spi_irq_handler(int irq, void *dev_id)
+{
+ struct surface3_ts_data *data = dev_id;
+
+ if (surface3_spi_read(data))
+ return IRQ_HANDLED;
+
+ dev_dbg(&data->spi->dev, "%s received -> %*ph\n",
+ __func__, SURFACE3_PACKET_SIZE, data->rd_buf);
+ surface3_spi_process(data);
+
+ return IRQ_HANDLED;
+}
+
+static void surface3_spi_power(struct surface3_ts_data *data, bool on)
+{
+ gpiod_set_value(data->gpiod_rst[0], on);
+ gpiod_set_value(data->gpiod_rst[1], on);
+ /* let the device settle a little */
+ msleep(20);
+}
+
+/**
+ * surface3_spi_get_gpio_config - Get GPIO config from ACPI/DT
+ *
+ * @ts: surface3_spi_ts_data pointer
+ */
+static int surface3_spi_get_gpio_config(struct surface3_ts_data *data)
+{
+ int error;
+ struct device *dev;
+ struct gpio_desc *gpiod;
+ int i;
+
+ dev = &data->spi->dev;
+
+ /* Get the reset lines GPIO pin number */
+ for (i = 0; i < 2; i++) {
+ gpiod = devm_gpiod_get_index(dev, NULL, i, GPIOD_OUT_LOW);
+ if (IS_ERR(gpiod)) {
+ error = PTR_ERR(gpiod);
+ if (error != -EPROBE_DEFER)
+ dev_err(dev,
+ "Failed to get power GPIO %d: %d\n",
+ i,
+ error);
+ return error;
+ }
+
+ data->gpiod_rst[i] = gpiod;
+ }
+
+ return 0;
+}
+
+static int surface3_spi_create_touch_input(struct surface3_ts_data *data)
+{
+ struct input_dev *input;
+ int error;
+
+ input = devm_input_allocate_device(&data->spi->dev);
+ if (!input)
+ return -ENOMEM;
+
+ data->input_dev = input;
+
+ input_set_abs_params(input, ABS_MT_POSITION_X, 0, 9600, 0, 0);
+ input_abs_set_res(input, ABS_MT_POSITION_X, 40);
+ input_set_abs_params(input, ABS_MT_POSITION_Y, 0, 7200, 0, 0);
+ input_abs_set_res(input, ABS_MT_POSITION_Y, 48);
+ input_set_abs_params(input, ABS_MT_WIDTH_MAJOR, 0, 1024, 0, 0);
+ input_set_abs_params(input, ABS_MT_WIDTH_MINOR, 0, 1024, 0, 0);
+ input_mt_init_slots(input, 10, INPUT_MT_DIRECT);
+
+ input->name = "Surface3 SPI Capacitive TouchScreen";
+ input->phys = "input/ts";
+ input->id.bustype = BUS_SPI;
+ input->id.vendor = 0x045e; /* Microsoft */
+ input->id.product = 0x0001;
+ input->id.version = 0x0000;
+
+ error = input_register_device(input);
+ if (error) {
+ dev_err(&data->spi->dev,
+ "Failed to register input device: %d", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int surface3_spi_create_pen_input(struct surface3_ts_data *data)
+{
+ struct input_dev *input;
+ int error;
+
+ input = devm_input_allocate_device(&data->spi->dev);
+ if (!input)
+ return -ENOMEM;
+
+ data->pen_input_dev = input;
+ data->pen_tool = BTN_TOOL_PEN;
+
+ __set_bit(INPUT_PROP_DIRECT, input->propbit);
+ __set_bit(INPUT_PROP_POINTER, input->propbit);
+ input_set_abs_params(input, ABS_X, 0, 9600, 0, 0);
+ input_abs_set_res(input, ABS_X, 40);
+ input_set_abs_params(input, ABS_Y, 0, 7200, 0, 0);
+ input_abs_set_res(input, ABS_Y, 48);
+ input_set_abs_params(input, ABS_PRESSURE, 0, 1024, 0, 0);
+ input_set_capability(input, EV_KEY, BTN_TOUCH);
+ input_set_capability(input, EV_KEY, BTN_STYLUS);
+ input_set_capability(input, EV_KEY, BTN_TOOL_PEN);
+ input_set_capability(input, EV_KEY, BTN_TOOL_RUBBER);
+
+ input->name = "Surface3 SPI Pen Input";
+ input->phys = "input/ts";
+ input->id.bustype = BUS_SPI;
+ input->id.vendor = 0x045e; /* Microsoft */
+ input->id.product = 0x0002;
+ input->id.version = 0x0000;
+
+ error = input_register_device(input);
+ if (error) {
+ dev_err(&data->spi->dev,
+ "Failed to register input device: %d", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int surface3_spi_probe(struct spi_device *spi)
+{
+ struct surface3_ts_data *data;
+ int error;
+
+ /* Set up SPI*/
+ spi->bits_per_word = 8;
+ spi->mode = SPI_MODE_0;
+ error = spi_setup(spi);
+ if (error)
+ return error;
+
+ data = devm_kzalloc(&spi->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->spi = spi;
+ spi_set_drvdata(spi, data);
+
+ error = surface3_spi_get_gpio_config(data);
+ if (error)
+ return error;
+
+ surface3_spi_power(data, true);
+ surface3_spi_power(data, false);
+ surface3_spi_power(data, true);
+
+ error = surface3_spi_create_touch_input(data);
+ if (error)
+ return error;
+
+ error = surface3_spi_create_pen_input(data);
+ if (error)
+ return error;
+
+ error = devm_request_threaded_irq(&spi->dev, spi->irq,
+ NULL, surface3_spi_irq_handler,
+ IRQF_ONESHOT,
+ "Surface3-irq", data);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static int __maybe_unused surface3_spi_suspend(struct device *dev)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct surface3_ts_data *data = spi_get_drvdata(spi);
+
+ disable_irq(data->spi->irq);
+
+ surface3_spi_power(data, false);
+
+ return 0;
+}
+
+static int __maybe_unused surface3_spi_resume(struct device *dev)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct surface3_ts_data *data = spi_get_drvdata(spi);
+
+ surface3_spi_power(data, true);
+
+ enable_irq(data->spi->irq);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(surface3_spi_pm_ops,
+ surface3_spi_suspend,
+ surface3_spi_resume);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id surface3_spi_acpi_match[] = {
+ { "MSHW0037", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, surface3_spi_acpi_match);
+#endif
+
+static struct spi_driver surface3_spi_driver = {
+ .driver = {
+ .name = "Surface3-spi",
+ .acpi_match_table = ACPI_PTR(surface3_spi_acpi_match),
+ .pm = &surface3_spi_pm_ops,
+ },
+ .probe = surface3_spi_probe,
+};
+
+module_spi_driver(surface3_spi_driver);
+
+MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>");
+MODULE_DESCRIPTION("Surface 3 SPI touchscreen driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c
index 8b3f15ca7..7953381d9 100644
--- a/drivers/input/touchscreen/ti_am335x_tsc.c
+++ b/drivers/input/touchscreen/ti_am335x_tsc.c
@@ -406,7 +406,7 @@ static int titsc_probe(struct platform_device *pdev)
int err;
/* Allocate memory for device */
- ts_dev = kzalloc(sizeof(struct titsc), GFP_KERNEL);
+ ts_dev = kzalloc(sizeof(*ts_dev), GFP_KERNEL);
input_dev = input_allocate_device();
if (!ts_dev || !input_dev) {
dev_err(&pdev->dev, "failed to allocate memory.\n");
diff --git a/drivers/input/touchscreen/tsc200x-core.c b/drivers/input/touchscreen/tsc200x-core.c
index dfa7f1c4f..b7059ed88 100644
--- a/drivers/input/touchscreen/tsc200x-core.c
+++ b/drivers/input/touchscreen/tsc200x-core.c
@@ -568,7 +568,7 @@ int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
input_set_abs_params(input_dev, ABS_PRESSURE, 0, max_p, fudge_p, 0);
if (np)
- touchscreen_parse_properties(input_dev, false);
+ touchscreen_parse_properties(input_dev, false, NULL);
input_dev->open = tsc200x_open;
input_dev->close = tsc200x_close;
diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c
index b6fc4bde7..85e95725d 100644
--- a/drivers/input/touchscreen/wacom_w8001.c
+++ b/drivers/input/touchscreen/wacom_w8001.c
@@ -518,13 +518,21 @@ static int w8001_setup_touch(struct w8001 *w8001, char *basename,
w8001->pktlen = W8001_PKTLEN_TOUCH2FG;
__set_bit(BTN_TOOL_DOUBLETAP, dev->keybit);
- input_mt_init_slots(dev, 2, 0);
+ error = input_mt_init_slots(dev, 2, 0);
+ if (error) {
+ dev_err(&w8001->serio->dev,
+ "failed to initialize MT slots: %d\n", error);
+ return error;
+ }
+
input_set_abs_params(dev, ABS_MT_POSITION_X,
0, touch.x, 0, 0);
input_set_abs_params(dev, ABS_MT_POSITION_Y,
0, touch.y, 0, 0);
input_set_abs_params(dev, ABS_MT_TOOL_TYPE,
0, MT_TOOL_MAX, 0, 0);
+ input_abs_set_res(dev, ABS_MT_POSITION_X, touch.panel_res);
+ input_abs_set_res(dev, ABS_MT_POSITION_Y, touch.panel_res);
strlcat(basename, " 2FG", basename_sz);
if (w8001->max_pen_x && w8001->max_pen_y)
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index ad0860383..d432ca828 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -89,8 +89,8 @@ config MSM_IOMMU
bool "MSM IOMMU Support"
depends on ARM
depends on ARCH_MSM8X60 || ARCH_MSM8960 || COMPILE_TEST
- depends on BROKEN
select IOMMU_API
+ select IOMMU_IO_PGTABLE_ARMV7S
help
Support for the IOMMUs found on certain Qualcomm SOCs.
These IOMMUs allow virtualization of the address space used by most
@@ -111,6 +111,7 @@ config AMD_IOMMU
select PCI_PRI
select PCI_PASID
select IOMMU_API
+ select IOMMU_IOVA
depends on X86_64 && PCI && ACPI
---help---
With this option you can enable support for AMD IOMMU hardware in
@@ -343,4 +344,22 @@ config MTK_IOMMU
If unsure, say N here.
+config MTK_IOMMU_V1
+ bool "MTK IOMMU Version 1 (M4U gen1) Support"
+ depends on ARM
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select ARM_DMA_USE_IOMMU
+ select IOMMU_API
+ select MEMORY
+ select MTK_SMI
+ select COMMON_CLK_MT2701_MMSYS
+ select COMMON_CLK_MT2701_IMGSYS
+ select COMMON_CLK_MT2701_VDECSYS
+ help
+ Support for the M4U on certain Mediatek SoCs. M4U generation 1 HW is
+ Multimedia Memory Managememt Unit. This option enables remapping of
+ DMA memory accesses for the multimedia subsystem.
+
+ if unsure, say N here.
+
endif # IOMMU_SUPPORT
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index c6edb31bf..195f7b997 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -7,7 +7,7 @@ obj-$(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) += io-pgtable-arm-v7s.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
obj-$(CONFIG_IOMMU_IOVA) += iova.o
obj-$(CONFIG_OF_IOMMU) += of_iommu.o
-obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
+obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o
obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
@@ -18,6 +18,7 @@ obj-$(CONFIG_INTEL_IOMMU_SVM) += intel-svm.o
obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o
obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o
obj-$(CONFIG_MTK_IOMMU) += mtk_iommu.o
+obj-$(CONFIG_MTK_IOMMU_V1) += mtk_iommu_v1.o
obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
obj-$(CONFIG_ROCKCHIP_IOMMU) += rockchip-iommu.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 2511c8b6a..96de97a46 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -21,6 +21,7 @@
#include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/amba/bus.h>
+#include <linux/platform_device.h>
#include <linux/pci-ats.h>
#include <linux/bitmap.h>
#include <linux/slab.h>
@@ -38,6 +39,7 @@
#include <linux/dma-contiguous.h>
#include <linux/irqdomain.h>
#include <linux/percpu.h>
+#include <linux/iova.h>
#include <asm/irq_remapping.h>
#include <asm/io_apic.h>
#include <asm/apic.h>
@@ -56,6 +58,17 @@
#define LOOP_TIMEOUT 100000
+/* IO virtual address start page frame number */
+#define IOVA_START_PFN (1)
+#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
+#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
+
+/* Reserved IOVA ranges */
+#define MSI_RANGE_START (0xfee00000)
+#define MSI_RANGE_END (0xfeefffff)
+#define HT_RANGE_START (0xfd00000000ULL)
+#define HT_RANGE_END (0xffffffffffULL)
+
/*
* This bitmap is used to advertise the page sizes our hardware support
* to the IOMMU core, which will then use this information to split
@@ -76,6 +89,25 @@ LIST_HEAD(ioapic_map);
LIST_HEAD(hpet_map);
LIST_HEAD(acpihid_map);
+#define FLUSH_QUEUE_SIZE 256
+
+struct flush_queue_entry {
+ unsigned long iova_pfn;
+ unsigned long pages;
+ struct dma_ops_domain *dma_dom;
+};
+
+struct flush_queue {
+ spinlock_t lock;
+ unsigned next;
+ struct flush_queue_entry *entries;
+};
+
+DEFINE_PER_CPU(struct flush_queue, flush_queue);
+
+static atomic_t queue_timer_on;
+static struct timer_list queue_timer;
+
/*
* Domain for untranslated devices - only allocated
* if iommu=pt passed on kernel cmd line.
@@ -121,44 +153,19 @@ static int protection_domain_init(struct protection_domain *domain);
static void detach_device(struct device *dev);
/*
- * For dynamic growth the aperture size is split into ranges of 128MB of
- * DMA address space each. This struct represents one such range.
- */
-struct aperture_range {
-
- spinlock_t bitmap_lock;
-
- /* address allocation bitmap */
- unsigned long *bitmap;
- unsigned long offset;
- unsigned long next_bit;
-
- /*
- * Array of PTE pages for the aperture. In this array we save all the
- * leaf pages of the domain page table used for the aperture. This way
- * we don't need to walk the page table to find a specific PTE. We can
- * just calculate its address in constant time.
- */
- u64 *pte_pages[64];
-};
-
-/*
* Data container for a dma_ops specific protection domain
*/
struct dma_ops_domain {
/* generic protection domain information */
struct protection_domain domain;
- /* size of the aperture for the mappings */
- unsigned long aperture_size;
-
- /* aperture index we start searching for free addresses */
- u32 __percpu *next_index;
-
- /* address space relevant data */
- struct aperture_range *aperture[APERTURE_MAX_RANGES];
+ /* IOVA RB-Tree */
+ struct iova_domain iovad;
};
+static struct iova_domain reserved_iova_ranges;
+static struct lock_class_key reserved_rbtree_key;
+
/****************************************************************************
*
* Helper functions
@@ -224,6 +231,12 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom)
return container_of(dom, struct protection_domain, domain);
}
+static struct dma_ops_domain* to_dma_ops_domain(struct protection_domain *domain)
+{
+ BUG_ON(domain->flags != PD_DMA_OPS_MASK);
+ return container_of(domain, struct dma_ops_domain, domain);
+}
+
static struct iommu_dev_data *alloc_dev_data(u16 devid)
{
struct iommu_dev_data *dev_data;
@@ -391,43 +404,6 @@ static bool pdev_pri_erratum(struct pci_dev *pdev, u32 erratum)
}
/*
- * This function actually applies the mapping to the page table of the
- * dma_ops domain.
- */
-static void alloc_unity_mapping(struct dma_ops_domain *dma_dom,
- struct unity_map_entry *e)
-{
- u64 addr;
-
- for (addr = e->address_start; addr < e->address_end;
- addr += PAGE_SIZE) {
- if (addr < dma_dom->aperture_size)
- __set_bit(addr >> PAGE_SHIFT,
- dma_dom->aperture[0]->bitmap);
- }
-}
-
-/*
- * Inits the unity mappings required for a specific device
- */
-static void init_unity_mappings_for_device(struct device *dev,
- struct dma_ops_domain *dma_dom)
-{
- struct unity_map_entry *e;
- int devid;
-
- devid = get_device_id(dev);
- if (devid < 0)
- return;
-
- list_for_each_entry(e, &amd_iommu_unity_map, list) {
- if (!(devid >= e->devid_start && devid <= e->devid_end))
- continue;
- alloc_unity_mapping(dma_dom, e);
- }
-}
-
-/*
* This function checks if the driver got a valid device from the caller to
* avoid dereferencing invalid pointers.
*/
@@ -454,24 +430,12 @@ static bool check_device(struct device *dev)
static void init_iommu_group(struct device *dev)
{
- struct dma_ops_domain *dma_domain;
- struct iommu_domain *domain;
struct iommu_group *group;
group = iommu_group_get_for_dev(dev);
if (IS_ERR(group))
return;
- domain = iommu_group_default_domain(group);
- if (!domain)
- goto out;
-
- if (to_pdomain(domain)->flags == PD_DMA_OPS_MASK) {
- dma_domain = to_pdomain(domain)->priv;
- init_unity_mappings_for_device(dev, dma_domain);
- }
-
-out:
iommu_group_put(group);
}
@@ -1222,7 +1186,7 @@ static void domain_flush_complete(struct protection_domain *domain)
int i;
for (i = 0; i < amd_iommus_present; ++i) {
- if (!domain->dev_iommu[i])
+ if (domain && !domain->dev_iommu[i])
continue;
/*
@@ -1399,8 +1363,9 @@ static u64 *fetch_pte(struct protection_domain *domain,
static int iommu_map_page(struct protection_domain *dom,
unsigned long bus_addr,
unsigned long phys_addr,
+ unsigned long page_size,
int prot,
- unsigned long page_size)
+ gfp_t gfp)
{
u64 __pte, *pte;
int i, count;
@@ -1412,7 +1377,7 @@ static int iommu_map_page(struct protection_domain *dom,
return -EINVAL;
count = PAGE_SIZE_PTE_COUNT(page_size);
- pte = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL);
+ pte = alloc_pte(dom, bus_addr, page_size, NULL, gfp);
if (!pte)
return -ENOMEM;
@@ -1476,320 +1441,37 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom,
/****************************************************************************
*
* The next functions belong to the address allocator for the dma_ops
- * interface functions. They work like the allocators in the other IOMMU
- * drivers. Its basically a bitmap which marks the allocated pages in
- * the aperture. Maybe it could be enhanced in the future to a more
- * efficient allocator.
+ * interface functions.
*
****************************************************************************/
-/*
- * The address allocator core functions.
- *
- * called with domain->lock held
- */
-
-/*
- * Used to reserve address ranges in the aperture (e.g. for exclusion
- * ranges.
- */
-static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
- unsigned long start_page,
- unsigned int pages)
-{
- unsigned int i, last_page = dom->aperture_size >> PAGE_SHIFT;
-
- if (start_page + pages > last_page)
- pages = last_page - start_page;
-
- for (i = start_page; i < start_page + pages; ++i) {
- int index = i / APERTURE_RANGE_PAGES;
- int page = i % APERTURE_RANGE_PAGES;
- __set_bit(page, dom->aperture[index]->bitmap);
- }
-}
-
-/*
- * This function is used to add a new aperture range to an existing
- * aperture in case of dma_ops domain allocation or address allocation
- * failure.
- */
-static int alloc_new_range(struct dma_ops_domain *dma_dom,
- bool populate, gfp_t gfp)
-{
- int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT;
- unsigned long i, old_size, pte_pgsize;
- struct aperture_range *range;
- struct amd_iommu *iommu;
- unsigned long flags;
-
-#ifdef CONFIG_IOMMU_STRESS
- populate = false;
-#endif
-
- if (index >= APERTURE_MAX_RANGES)
- return -ENOMEM;
-
- range = kzalloc(sizeof(struct aperture_range), gfp);
- if (!range)
- return -ENOMEM;
-
- range->bitmap = (void *)get_zeroed_page(gfp);
- if (!range->bitmap)
- goto out_free;
-
- range->offset = dma_dom->aperture_size;
-
- spin_lock_init(&range->bitmap_lock);
-
- if (populate) {
- unsigned long address = dma_dom->aperture_size;
- int i, num_ptes = APERTURE_RANGE_PAGES / 512;
- u64 *pte, *pte_page;
-
- for (i = 0; i < num_ptes; ++i) {
- pte = alloc_pte(&dma_dom->domain, address, PAGE_SIZE,
- &pte_page, gfp);
- if (!pte)
- goto out_free;
-
- range->pte_pages[i] = pte_page;
-
- address += APERTURE_RANGE_SIZE / 64;
- }
- }
-
- spin_lock_irqsave(&dma_dom->domain.lock, flags);
-
- /* First take the bitmap_lock and then publish the range */
- spin_lock(&range->bitmap_lock);
-
- old_size = dma_dom->aperture_size;
- dma_dom->aperture[index] = range;
- dma_dom->aperture_size += APERTURE_RANGE_SIZE;
-
- /* Reserve address range used for MSI messages */
- if (old_size < MSI_ADDR_BASE_LO &&
- dma_dom->aperture_size > MSI_ADDR_BASE_LO) {
- unsigned long spage;
- int pages;
-
- pages = iommu_num_pages(MSI_ADDR_BASE_LO, 0x10000, PAGE_SIZE);
- spage = MSI_ADDR_BASE_LO >> PAGE_SHIFT;
-
- dma_ops_reserve_addresses(dma_dom, spage, pages);
- }
-
- /* Initialize the exclusion range if necessary */
- for_each_iommu(iommu) {
- if (iommu->exclusion_start &&
- iommu->exclusion_start >= dma_dom->aperture[index]->offset
- && iommu->exclusion_start < dma_dom->aperture_size) {
- unsigned long startpage;
- int pages = iommu_num_pages(iommu->exclusion_start,
- iommu->exclusion_length,
- PAGE_SIZE);
- startpage = iommu->exclusion_start >> PAGE_SHIFT;
- dma_ops_reserve_addresses(dma_dom, startpage, pages);
- }
- }
-
- /*
- * Check for areas already mapped as present in the new aperture
- * range and mark those pages as reserved in the allocator. Such
- * mappings may already exist as a result of requested unity
- * mappings for devices.
- */
- for (i = dma_dom->aperture[index]->offset;
- i < dma_dom->aperture_size;
- i += pte_pgsize) {
- u64 *pte = fetch_pte(&dma_dom->domain, i, &pte_pgsize);
- if (!pte || !IOMMU_PTE_PRESENT(*pte))
- continue;
-
- dma_ops_reserve_addresses(dma_dom, i >> PAGE_SHIFT,
- pte_pgsize >> 12);
- }
-
- update_domain(&dma_dom->domain);
-
- spin_unlock(&range->bitmap_lock);
-
- spin_unlock_irqrestore(&dma_dom->domain.lock, flags);
-
- return 0;
-
-out_free:
- update_domain(&dma_dom->domain);
-
- free_page((unsigned long)range->bitmap);
-
- kfree(range);
-
- return -ENOMEM;
-}
-
-static dma_addr_t dma_ops_aperture_alloc(struct dma_ops_domain *dom,
- struct aperture_range *range,
- unsigned long pages,
- unsigned long dma_mask,
- unsigned long boundary_size,
- unsigned long align_mask,
- bool trylock)
-{
- unsigned long offset, limit, flags;
- dma_addr_t address;
- bool flush = false;
-
- offset = range->offset >> PAGE_SHIFT;
- limit = iommu_device_max_index(APERTURE_RANGE_PAGES, offset,
- dma_mask >> PAGE_SHIFT);
-
- if (trylock) {
- if (!spin_trylock_irqsave(&range->bitmap_lock, flags))
- return -1;
- } else {
- spin_lock_irqsave(&range->bitmap_lock, flags);
- }
-
- address = iommu_area_alloc(range->bitmap, limit, range->next_bit,
- pages, offset, boundary_size, align_mask);
- if (address == -1) {
- /* Nothing found, retry one time */
- address = iommu_area_alloc(range->bitmap, limit,
- 0, pages, offset, boundary_size,
- align_mask);
- flush = true;
- }
-
- if (address != -1)
- range->next_bit = address + pages;
-
- spin_unlock_irqrestore(&range->bitmap_lock, flags);
-
- if (flush) {
- domain_flush_tlb(&dom->domain);
- domain_flush_complete(&dom->domain);
- }
-
- return address;
-}
-
-static unsigned long dma_ops_area_alloc(struct device *dev,
- struct dma_ops_domain *dom,
- unsigned int pages,
- unsigned long align_mask,
- u64 dma_mask)
-{
- unsigned long boundary_size, mask;
- unsigned long address = -1;
- bool first = true;
- u32 start, i;
-
- preempt_disable();
-
- mask = dma_get_seg_boundary(dev);
-
-again:
- start = this_cpu_read(*dom->next_index);
-
- /* Sanity check - is it really necessary? */
- if (unlikely(start > APERTURE_MAX_RANGES)) {
- start = 0;
- this_cpu_write(*dom->next_index, 0);
- }
-
- boundary_size = mask + 1 ? ALIGN(mask + 1, PAGE_SIZE) >> PAGE_SHIFT :
- 1UL << (BITS_PER_LONG - PAGE_SHIFT);
-
- for (i = 0; i < APERTURE_MAX_RANGES; ++i) {
- struct aperture_range *range;
- int index;
-
- index = (start + i) % APERTURE_MAX_RANGES;
-
- range = dom->aperture[index];
-
- if (!range || range->offset >= dma_mask)
- continue;
-
- address = dma_ops_aperture_alloc(dom, range, pages,
- dma_mask, boundary_size,
- align_mask, first);
- if (address != -1) {
- address = range->offset + (address << PAGE_SHIFT);
- this_cpu_write(*dom->next_index, index);
- break;
- }
- }
-
- if (address == -1 && first) {
- first = false;
- goto again;
- }
- preempt_enable();
-
- return address;
-}
-
-static unsigned long dma_ops_alloc_addresses(struct device *dev,
- struct dma_ops_domain *dom,
- unsigned int pages,
- unsigned long align_mask,
- u64 dma_mask)
+static unsigned long dma_ops_alloc_iova(struct device *dev,
+ struct dma_ops_domain *dma_dom,
+ unsigned int pages, u64 dma_mask)
{
- unsigned long address = -1;
+ unsigned long pfn = 0;
- while (address == -1) {
- address = dma_ops_area_alloc(dev, dom, pages,
- align_mask, dma_mask);
-
- if (address == -1 && alloc_new_range(dom, false, GFP_ATOMIC))
- break;
- }
+ pages = __roundup_pow_of_two(pages);
- if (unlikely(address == -1))
- address = DMA_ERROR_CODE;
+ if (dma_mask > DMA_BIT_MASK(32))
+ pfn = alloc_iova_fast(&dma_dom->iovad, pages,
+ IOVA_PFN(DMA_BIT_MASK(32)));
- WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size);
+ if (!pfn)
+ pfn = alloc_iova_fast(&dma_dom->iovad, pages, IOVA_PFN(dma_mask));
- return address;
+ return (pfn << PAGE_SHIFT);
}
-/*
- * The address free function.
- *
- * called with domain->lock held
- */
-static void dma_ops_free_addresses(struct dma_ops_domain *dom,
- unsigned long address,
- unsigned int pages)
+static void dma_ops_free_iova(struct dma_ops_domain *dma_dom,
+ unsigned long address,
+ unsigned int pages)
{
- unsigned i = address >> APERTURE_RANGE_SHIFT;
- struct aperture_range *range = dom->aperture[i];
- unsigned long flags;
-
- BUG_ON(i >= APERTURE_MAX_RANGES || range == NULL);
-
-#ifdef CONFIG_IOMMU_STRESS
- if (i < 4)
- return;
-#endif
-
- if (amd_iommu_unmap_flush) {
- domain_flush_tlb(&dom->domain);
- domain_flush_complete(&dom->domain);
- }
-
- address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT;
-
- spin_lock_irqsave(&range->bitmap_lock, flags);
- if (address + pages > range->next_bit)
- range->next_bit = address + pages;
- bitmap_clear(range->bitmap, address, pages);
- spin_unlock_irqrestore(&range->bitmap_lock, flags);
+ pages = __roundup_pow_of_two(pages);
+ address >>= PAGE_SHIFT;
+ free_iova_fast(&dma_dom->iovad, address, pages);
}
/****************************************************************************
@@ -1963,44 +1645,18 @@ static void free_gcr3_table(struct protection_domain *domain)
*/
static void dma_ops_domain_free(struct dma_ops_domain *dom)
{
- int i;
-
if (!dom)
return;
- free_percpu(dom->next_index);
-
del_domain_from_list(&dom->domain);
- free_pagetable(&dom->domain);
+ put_iova_domain(&dom->iovad);
- for (i = 0; i < APERTURE_MAX_RANGES; ++i) {
- if (!dom->aperture[i])
- continue;
- free_page((unsigned long)dom->aperture[i]->bitmap);
- kfree(dom->aperture[i]);
- }
+ free_pagetable(&dom->domain);
kfree(dom);
}
-static int dma_ops_domain_alloc_apertures(struct dma_ops_domain *dma_dom,
- int max_apertures)
-{
- int ret, i, apertures;
-
- apertures = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT;
- ret = 0;
-
- for (i = apertures; i < max_apertures; ++i) {
- ret = alloc_new_range(dma_dom, false, GFP_KERNEL);
- if (ret)
- break;
- }
-
- return ret;
-}
-
/*
* Allocates a new protection domain usable for the dma_ops functions.
* It also initializes the page table and the address allocator data
@@ -2009,7 +1665,6 @@ static int dma_ops_domain_alloc_apertures(struct dma_ops_domain *dma_dom,
static struct dma_ops_domain *dma_ops_domain_alloc(void)
{
struct dma_ops_domain *dma_dom;
- int cpu;
dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL);
if (!dma_dom)
@@ -2018,30 +1673,19 @@ static struct dma_ops_domain *dma_ops_domain_alloc(void)
if (protection_domain_init(&dma_dom->domain))
goto free_dma_dom;
- dma_dom->next_index = alloc_percpu(u32);
- if (!dma_dom->next_index)
- goto free_dma_dom;
-
- dma_dom->domain.mode = PAGE_MODE_2_LEVEL;
+ dma_dom->domain.mode = PAGE_MODE_3_LEVEL;
dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
dma_dom->domain.flags = PD_DMA_OPS_MASK;
- dma_dom->domain.priv = dma_dom;
if (!dma_dom->domain.pt_root)
goto free_dma_dom;
- add_domain_to_list(&dma_dom->domain);
-
- if (alloc_new_range(dma_dom, true, GFP_KERNEL))
- goto free_dma_dom;
+ init_iova_domain(&dma_dom->iovad, PAGE_SIZE,
+ IOVA_START_PFN, DMA_32BIT_PFN);
- /*
- * mark the first page as allocated so we never return 0 as
- * a valid dma-address. So we can use 0 as error value
- */
- dma_dom->aperture[0]->bitmap[0] = 1;
+ /* Initialize reserved ranges */
+ copy_reserved_iova(&reserved_iova_ranges, &dma_dom->iovad);
- for_each_possible_cpu(cpu)
- *per_cpu_ptr(dma_dom->next_index, cpu) = 0;
+ add_domain_to_list(&dma_dom->domain);
return dma_dom;
@@ -2484,6 +2128,92 @@ static struct iommu_group *amd_iommu_device_group(struct device *dev)
*
*****************************************************************************/
+static void __queue_flush(struct flush_queue *queue)
+{
+ struct protection_domain *domain;
+ unsigned long flags;
+ int idx;
+
+ /* First flush TLB of all known domains */
+ spin_lock_irqsave(&amd_iommu_pd_lock, flags);
+ list_for_each_entry(domain, &amd_iommu_pd_list, list)
+ domain_flush_tlb(domain);
+ spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
+
+ /* Wait until flushes have completed */
+ domain_flush_complete(NULL);
+
+ for (idx = 0; idx < queue->next; ++idx) {
+ struct flush_queue_entry *entry;
+
+ entry = queue->entries + idx;
+
+ free_iova_fast(&entry->dma_dom->iovad,
+ entry->iova_pfn,
+ entry->pages);
+
+ /* Not really necessary, just to make sure we catch any bugs */
+ entry->dma_dom = NULL;
+ }
+
+ queue->next = 0;
+}
+
+static void queue_flush_all(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct flush_queue *queue;
+ unsigned long flags;
+
+ queue = per_cpu_ptr(&flush_queue, cpu);
+ spin_lock_irqsave(&queue->lock, flags);
+ if (queue->next > 0)
+ __queue_flush(queue);
+ spin_unlock_irqrestore(&queue->lock, flags);
+ }
+}
+
+static void queue_flush_timeout(unsigned long unsused)
+{
+ atomic_set(&queue_timer_on, 0);
+ queue_flush_all();
+}
+
+static void queue_add(struct dma_ops_domain *dma_dom,
+ unsigned long address, unsigned long pages)
+{
+ struct flush_queue_entry *entry;
+ struct flush_queue *queue;
+ unsigned long flags;
+ int idx;
+
+ pages = __roundup_pow_of_two(pages);
+ address >>= PAGE_SHIFT;
+
+ queue = get_cpu_ptr(&flush_queue);
+ spin_lock_irqsave(&queue->lock, flags);
+
+ if (queue->next == FLUSH_QUEUE_SIZE)
+ __queue_flush(queue);
+
+ idx = queue->next++;
+ entry = queue->entries + idx;
+
+ entry->iova_pfn = address;
+ entry->pages = pages;
+ entry->dma_dom = dma_dom;
+
+ spin_unlock_irqrestore(&queue->lock, flags);
+
+ if (atomic_cmpxchg(&queue_timer_on, 0, 1) == 0)
+ mod_timer(&queue_timer, jiffies + msecs_to_jiffies(10));
+
+ put_cpu_ptr(&flush_queue);
+}
+
+
/*
* In the dma_ops path we only have the struct device. This function
* finds the corresponding IOMMU, the protection domain and the
@@ -2494,16 +2224,11 @@ static struct iommu_group *amd_iommu_device_group(struct device *dev)
static struct protection_domain *get_domain(struct device *dev)
{
struct protection_domain *domain;
- struct iommu_domain *io_domain;
if (!check_device(dev))
return ERR_PTR(-EINVAL);
- io_domain = iommu_get_domain_for_dev(dev);
- if (!io_domain)
- return NULL;
-
- domain = to_pdomain(io_domain);
+ domain = get_dev_data(dev)->domain;
if (!dma_ops_domain(domain))
return ERR_PTR(-EBUSY);
@@ -2538,94 +2263,17 @@ static void update_domain(struct protection_domain *domain)
domain->updated = false;
}
-/*
- * This function fetches the PTE for a given address in the aperture
- */
-static u64* dma_ops_get_pte(struct dma_ops_domain *dom,
- unsigned long address)
+static int dir2prot(enum dma_data_direction direction)
{
- struct aperture_range *aperture;
- u64 *pte, *pte_page;
-
- aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
- if (!aperture)
- return NULL;
-
- pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
- if (!pte) {
- pte = alloc_pte(&dom->domain, address, PAGE_SIZE, &pte_page,
- GFP_ATOMIC);
- aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page;
- } else
- pte += PM_LEVEL_INDEX(0, address);
-
- update_domain(&dom->domain);
-
- return pte;
-}
-
-/*
- * This is the generic map function. It maps one 4kb page at paddr to
- * the given address in the DMA address space for the domain.
- */
-static dma_addr_t dma_ops_domain_map(struct dma_ops_domain *dom,
- unsigned long address,
- phys_addr_t paddr,
- int direction)
-{
- u64 *pte, __pte;
-
- WARN_ON(address > dom->aperture_size);
-
- paddr &= PAGE_MASK;
-
- pte = dma_ops_get_pte(dom, address);
- if (!pte)
- return DMA_ERROR_CODE;
-
- __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC;
-
if (direction == DMA_TO_DEVICE)
- __pte |= IOMMU_PTE_IR;
+ return IOMMU_PROT_IR;
else if (direction == DMA_FROM_DEVICE)
- __pte |= IOMMU_PTE_IW;
+ return IOMMU_PROT_IW;
else if (direction == DMA_BIDIRECTIONAL)
- __pte |= IOMMU_PTE_IR | IOMMU_PTE_IW;
-
- WARN_ON_ONCE(*pte);
-
- *pte = __pte;
-
- return (dma_addr_t)address;
-}
-
-/*
- * The generic unmapping function for on page in the DMA address space.
- */
-static void dma_ops_domain_unmap(struct dma_ops_domain *dom,
- unsigned long address)
-{
- struct aperture_range *aperture;
- u64 *pte;
-
- if (address >= dom->aperture_size)
- return;
-
- aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
- if (!aperture)
- return;
-
- pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
- if (!pte)
- return;
-
- pte += PM_LEVEL_INDEX(0, address);
-
- WARN_ON_ONCE(!*pte);
-
- *pte = 0ULL;
+ return IOMMU_PROT_IW | IOMMU_PROT_IR;
+ else
+ return 0;
}
-
/*
* This function contains common code for mapping of a physically
* contiguous memory region into DMA address space. It is used by all
@@ -2636,32 +2284,29 @@ static dma_addr_t __map_single(struct device *dev,
struct dma_ops_domain *dma_dom,
phys_addr_t paddr,
size_t size,
- int dir,
- bool align,
+ enum dma_data_direction direction,
u64 dma_mask)
{
dma_addr_t offset = paddr & ~PAGE_MASK;
dma_addr_t address, start, ret;
unsigned int pages;
- unsigned long align_mask = 0;
+ int prot = 0;
int i;
pages = iommu_num_pages(paddr, size, PAGE_SIZE);
paddr &= PAGE_MASK;
- if (align)
- align_mask = (1UL << get_order(size)) - 1;
-
- address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask,
- dma_mask);
-
+ address = dma_ops_alloc_iova(dev, dma_dom, pages, dma_mask);
if (address == DMA_ERROR_CODE)
goto out;
+ prot = dir2prot(direction);
+
start = address;
for (i = 0; i < pages; ++i) {
- ret = dma_ops_domain_map(dma_dom, start, paddr, dir);
- if (ret == DMA_ERROR_CODE)
+ ret = iommu_map_page(&dma_dom->domain, start, paddr,
+ PAGE_SIZE, prot, GFP_ATOMIC);
+ if (ret)
goto out_unmap;
paddr += PAGE_SIZE;
@@ -2681,10 +2326,13 @@ out_unmap:
for (--i; i >= 0; --i) {
start -= PAGE_SIZE;
- dma_ops_domain_unmap(dma_dom, start);
+ iommu_unmap_page(&dma_dom->domain, start, PAGE_SIZE);
}
- dma_ops_free_addresses(dma_dom, address, pages);
+ domain_flush_tlb(&dma_dom->domain);
+ domain_flush_complete(&dma_dom->domain);
+
+ dma_ops_free_iova(dma_dom, address, pages);
return DMA_ERROR_CODE;
}
@@ -2702,21 +2350,23 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
dma_addr_t i, start;
unsigned int pages;
- if ((dma_addr == DMA_ERROR_CODE) ||
- (dma_addr + size > dma_dom->aperture_size))
- return;
-
flush_addr = dma_addr;
pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
dma_addr &= PAGE_MASK;
start = dma_addr;
for (i = 0; i < pages; ++i) {
- dma_ops_domain_unmap(dma_dom, start);
+ iommu_unmap_page(&dma_dom->domain, start, PAGE_SIZE);
start += PAGE_SIZE;
}
- dma_ops_free_addresses(dma_dom, dma_addr, pages);
+ if (amd_iommu_unmap_flush) {
+ dma_ops_free_iova(dma_dom, dma_addr, pages);
+ domain_flush_tlb(&dma_dom->domain);
+ domain_flush_complete(&dma_dom->domain);
+ } else {
+ queue_add(dma_dom, dma_addr, pages);
+ }
}
/*
@@ -2725,10 +2375,11 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
static dma_addr_t map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
phys_addr_t paddr = page_to_phys(page) + offset;
struct protection_domain *domain;
+ struct dma_ops_domain *dma_dom;
u64 dma_mask;
domain = get_domain(dev);
@@ -2738,24 +2389,53 @@ static dma_addr_t map_page(struct device *dev, struct page *page,
return DMA_ERROR_CODE;
dma_mask = *dev->dma_mask;
+ dma_dom = to_dma_ops_domain(domain);
- return __map_single(dev, domain->priv, paddr, size, dir, false,
- dma_mask);
+ return __map_single(dev, dma_dom, paddr, size, dir, dma_mask);
}
/*
* The exported unmap_single function for dma_ops.
*/
static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
struct protection_domain *domain;
+ struct dma_ops_domain *dma_dom;
domain = get_domain(dev);
if (IS_ERR(domain))
return;
- __unmap_single(domain->priv, dma_addr, size, dir);
+ dma_dom = to_dma_ops_domain(domain);
+
+ __unmap_single(dma_dom, dma_addr, size, dir);
+}
+
+static int sg_num_pages(struct device *dev,
+ struct scatterlist *sglist,
+ int nelems)
+{
+ unsigned long mask, boundary_size;
+ struct scatterlist *s;
+ int i, npages = 0;
+
+ mask = dma_get_seg_boundary(dev);
+ boundary_size = mask + 1 ? ALIGN(mask + 1, PAGE_SIZE) >> PAGE_SHIFT :
+ 1UL << (BITS_PER_LONG - PAGE_SHIFT);
+
+ for_each_sg(sglist, s, nelems, i) {
+ int p, n;
+
+ s->dma_address = npages << PAGE_SHIFT;
+ p = npages % boundary_size;
+ n = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE);
+ if (p + n > boundary_size)
+ npages += boundary_size - p;
+ npages += n;
+ }
+
+ return npages;
}
/*
@@ -2763,46 +2443,79 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
* lists).
*/
static int map_sg(struct device *dev, struct scatterlist *sglist,
- int nelems, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ int nelems, enum dma_data_direction direction,
+ unsigned long attrs)
{
+ int mapped_pages = 0, npages = 0, prot = 0, i;
struct protection_domain *domain;
- int i;
+ struct dma_ops_domain *dma_dom;
struct scatterlist *s;
- phys_addr_t paddr;
- int mapped_elems = 0;
+ unsigned long address;
u64 dma_mask;
domain = get_domain(dev);
if (IS_ERR(domain))
return 0;
+ dma_dom = to_dma_ops_domain(domain);
dma_mask = *dev->dma_mask;
+ npages = sg_num_pages(dev, sglist, nelems);
+
+ address = dma_ops_alloc_iova(dev, dma_dom, npages, dma_mask);
+ if (address == DMA_ERROR_CODE)
+ goto out_err;
+
+ prot = dir2prot(direction);
+
+ /* Map all sg entries */
for_each_sg(sglist, s, nelems, i) {
- paddr = sg_phys(s);
+ int j, pages = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE);
- s->dma_address = __map_single(dev, domain->priv,
- paddr, s->length, dir, false,
- dma_mask);
+ for (j = 0; j < pages; ++j) {
+ unsigned long bus_addr, phys_addr;
+ int ret;
- if (s->dma_address) {
- s->dma_length = s->length;
- mapped_elems++;
- } else
- goto unmap;
+ bus_addr = address + s->dma_address + (j << PAGE_SHIFT);
+ phys_addr = (sg_phys(s) & PAGE_MASK) + (j << PAGE_SHIFT);
+ ret = iommu_map_page(domain, bus_addr, phys_addr, PAGE_SIZE, prot, GFP_ATOMIC);
+ if (ret)
+ goto out_unmap;
+
+ mapped_pages += 1;
+ }
+ }
+
+ /* Everything is mapped - write the right values into s->dma_address */
+ for_each_sg(sglist, s, nelems, i) {
+ s->dma_address += address + s->offset;
+ s->dma_length = s->length;
}
- return mapped_elems;
+ return nelems;
+
+out_unmap:
+ pr_err("%s: IOMMU mapping error in map_sg (io-pages: %d)\n",
+ dev_name(dev), npages);
+
+ for_each_sg(sglist, s, nelems, i) {
+ int j, pages = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE);
+
+ for (j = 0; j < pages; ++j) {
+ unsigned long bus_addr;
+
+ bus_addr = address + s->dma_address + (j << PAGE_SHIFT);
+ iommu_unmap_page(domain, bus_addr, PAGE_SIZE);
-unmap:
- for_each_sg(sglist, s, mapped_elems, i) {
- if (s->dma_address)
- __unmap_single(domain->priv, s->dma_address,
- s->dma_length, dir);
- s->dma_address = s->dma_length = 0;
+ if (--mapped_pages)
+ goto out_free_iova;
+ }
}
+out_free_iova:
+ free_iova_fast(&dma_dom->iovad, address, npages);
+
+out_err:
return 0;
}
@@ -2812,21 +2525,22 @@ unmap:
*/
static void unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct protection_domain *domain;
- struct scatterlist *s;
- int i;
+ struct dma_ops_domain *dma_dom;
+ unsigned long startaddr;
+ int npages = 2;
domain = get_domain(dev);
if (IS_ERR(domain))
return;
- for_each_sg(sglist, s, nelems, i) {
- __unmap_single(domain->priv, s->dma_address,
- s->dma_length, dir);
- s->dma_address = s->dma_length = 0;
- }
+ startaddr = sg_dma_address(sglist) & PAGE_MASK;
+ dma_dom = to_dma_ops_domain(domain);
+ npages = sg_num_pages(dev, sglist, nelems);
+
+ __unmap_single(dma_dom, startaddr, npages << PAGE_SHIFT, dir);
}
/*
@@ -2834,10 +2548,11 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
*/
static void *alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
u64 dma_mask = dev->coherent_dma_mask;
struct protection_domain *domain;
+ struct dma_ops_domain *dma_dom;
struct page *page;
domain = get_domain(dev);
@@ -2848,6 +2563,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
} else if (IS_ERR(domain))
return NULL;
+ dma_dom = to_dma_ops_domain(domain);
size = PAGE_ALIGN(size);
dma_mask = dev->coherent_dma_mask;
flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
@@ -2867,8 +2583,8 @@ static void *alloc_coherent(struct device *dev, size_t size,
if (!dma_mask)
dma_mask = *dev->dma_mask;
- *dma_addr = __map_single(dev, domain->priv, page_to_phys(page),
- size, DMA_BIDIRECTIONAL, true, dma_mask);
+ *dma_addr = __map_single(dev, dma_dom, page_to_phys(page),
+ size, DMA_BIDIRECTIONAL, dma_mask);
if (*dma_addr == DMA_ERROR_CODE)
goto out_free;
@@ -2888,9 +2604,10 @@ out_free:
*/
static void free_coherent(struct device *dev, size_t size,
void *virt_addr, dma_addr_t dma_addr,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct protection_domain *domain;
+ struct dma_ops_domain *dma_dom;
struct page *page;
page = virt_to_page(virt_addr);
@@ -2900,7 +2617,9 @@ static void free_coherent(struct device *dev, size_t size,
if (IS_ERR(domain))
goto free_mem;
- __unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
+ dma_dom = to_dma_ops_domain(domain);
+
+ __unmap_single(dma_dom, dma_addr, size, DMA_BIDIRECTIONAL);
free_mem:
if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
@@ -2916,48 +2635,92 @@ static int amd_iommu_dma_supported(struct device *dev, u64 mask)
return check_device(dev);
}
-static int set_dma_mask(struct device *dev, u64 mask)
+static struct dma_map_ops amd_iommu_dma_ops = {
+ .alloc = alloc_coherent,
+ .free = free_coherent,
+ .map_page = map_page,
+ .unmap_page = unmap_page,
+ .map_sg = map_sg,
+ .unmap_sg = unmap_sg,
+ .dma_supported = amd_iommu_dma_supported,
+};
+
+static int init_reserved_iova_ranges(void)
{
- struct protection_domain *domain;
- int max_apertures = 1;
+ struct pci_dev *pdev = NULL;
+ struct iova *val;
- domain = get_domain(dev);
- if (IS_ERR(domain))
- return PTR_ERR(domain);
+ init_iova_domain(&reserved_iova_ranges, PAGE_SIZE,
+ IOVA_START_PFN, DMA_32BIT_PFN);
- if (mask == DMA_BIT_MASK(64))
- max_apertures = 8;
- else if (mask > DMA_BIT_MASK(32))
- max_apertures = 4;
+ lockdep_set_class(&reserved_iova_ranges.iova_rbtree_lock,
+ &reserved_rbtree_key);
+
+ /* MSI memory range */
+ val = reserve_iova(&reserved_iova_ranges,
+ IOVA_PFN(MSI_RANGE_START), IOVA_PFN(MSI_RANGE_END));
+ if (!val) {
+ pr_err("Reserving MSI range failed\n");
+ return -ENOMEM;
+ }
+
+ /* HT memory range */
+ val = reserve_iova(&reserved_iova_ranges,
+ IOVA_PFN(HT_RANGE_START), IOVA_PFN(HT_RANGE_END));
+ if (!val) {
+ pr_err("Reserving HT range failed\n");
+ return -ENOMEM;
+ }
/*
- * To prevent lock contention it doesn't make sense to allocate more
- * apertures than online cpus
+ * Memory used for PCI resources
+ * FIXME: Check whether we can reserve the PCI-hole completly
*/
- if (max_apertures > num_online_cpus())
- max_apertures = num_online_cpus();
+ for_each_pci_dev(pdev) {
+ int i;
- if (dma_ops_domain_alloc_apertures(domain->priv, max_apertures))
- dev_err(dev, "Can't allocate %d iommu apertures\n",
- max_apertures);
+ for (i = 0; i < PCI_NUM_RESOURCES; ++i) {
+ struct resource *r = &pdev->resource[i];
+
+ if (!(r->flags & IORESOURCE_MEM))
+ continue;
+
+ val = reserve_iova(&reserved_iova_ranges,
+ IOVA_PFN(r->start),
+ IOVA_PFN(r->end));
+ if (!val) {
+ pr_err("Reserve pci-resource range failed\n");
+ return -ENOMEM;
+ }
+ }
+ }
return 0;
}
-static struct dma_map_ops amd_iommu_dma_ops = {
- .alloc = alloc_coherent,
- .free = free_coherent,
- .map_page = map_page,
- .unmap_page = unmap_page,
- .map_sg = map_sg,
- .unmap_sg = unmap_sg,
- .dma_supported = amd_iommu_dma_supported,
- .set_dma_mask = set_dma_mask,
-};
-
int __init amd_iommu_init_api(void)
{
- int err = 0;
+ int ret, cpu, err = 0;
+
+ ret = iova_cache_get();
+ if (ret)
+ return ret;
+
+ ret = init_reserved_iova_ranges();
+ if (ret)
+ return ret;
+
+ for_each_possible_cpu(cpu) {
+ struct flush_queue *queue = per_cpu_ptr(&flush_queue, cpu);
+
+ queue->entries = kzalloc(FLUSH_QUEUE_SIZE *
+ sizeof(*queue->entries),
+ GFP_KERNEL);
+ if (!queue->entries)
+ goto out_put_iova;
+
+ spin_lock_init(&queue->lock);
+ }
err = bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
if (err)
@@ -2967,11 +2730,26 @@ int __init amd_iommu_init_api(void)
if (err)
return err;
#endif
+ err = bus_set_iommu(&platform_bus_type, &amd_iommu_ops);
+ if (err)
+ return err;
return 0;
+
+out_put_iova:
+ for_each_possible_cpu(cpu) {
+ struct flush_queue *queue = per_cpu_ptr(&flush_queue, cpu);
+
+ kfree(queue->entries);
+ }
+
+ return -ENOMEM;
}
int __init amd_iommu_init_dma_ops(void)
{
+ setup_timer(&queue_timer, queue_flush_timeout, 0);
+ atomic_set(&queue_timer_on, 0);
+
swiotlb = iommu_pass_through ? 1 : 0;
iommu_detected = 1;
@@ -2990,6 +2768,7 @@ int __init amd_iommu_init_dma_ops(void)
pr_info("AMD-Vi: Lazy IO/TLB flushing enabled\n");
return 0;
+
}
/*****************************************************************************
@@ -3126,7 +2905,14 @@ static void amd_iommu_domain_free(struct iommu_domain *dom)
switch (dom->type) {
case IOMMU_DOMAIN_DMA:
- dma_dom = domain->priv;
+ /*
+ * First make sure the domain is no longer referenced from the
+ * flush queue
+ */
+ queue_flush_all();
+
+ /* Now release the domain */
+ dma_dom = to_dma_ops_domain(domain);
dma_ops_domain_free(dma_dom);
break;
default:
@@ -3208,7 +2994,7 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
prot |= IOMMU_PROT_IW;
mutex_lock(&domain->api_lock);
- ret = iommu_map_page(domain, iova, paddr, prot, page_size);
+ ret = iommu_map_page(domain, iova, paddr, page_size, prot, GFP_KERNEL);
mutex_unlock(&domain->api_lock);
return ret;
@@ -3310,6 +3096,19 @@ static void amd_iommu_put_dm_regions(struct device *dev,
kfree(entry);
}
+static void amd_iommu_apply_dm_region(struct device *dev,
+ struct iommu_domain *domain,
+ struct iommu_dm_region *region)
+{
+ struct dma_ops_domain *dma_dom = to_dma_ops_domain(to_pdomain(domain));
+ unsigned long start, end;
+
+ start = IOVA_PFN(region->start);
+ end = IOVA_PFN(region->start + region->length);
+
+ WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL);
+}
+
static const struct iommu_ops amd_iommu_ops = {
.capable = amd_iommu_capable,
.domain_alloc = amd_iommu_domain_alloc,
@@ -3325,6 +3124,7 @@ static const struct iommu_ops amd_iommu_ops = {
.device_group = amd_iommu_device_group,
.get_dm_regions = amd_iommu_get_dm_regions,
.put_dm_regions = amd_iommu_put_dm_regions,
+ .apply_dm_region = amd_iommu_apply_dm_region,
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
};
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 590956ac7..caf5e3822 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -421,7 +421,6 @@ struct protection_domain {
bool updated; /* complete domain flush required */
unsigned dev_cnt; /* devices assigned to this domain */
unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
- void *priv; /* private data */
};
/*
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 56999d2fa..594849a3a 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -538,8 +538,7 @@ static void do_fault(struct work_struct *work)
if (access_error(vma, fault))
goto out;
- ret = handle_mm_fault(mm, vma, address, flags);
-
+ ret = handle_mm_fault(vma, address, flags);
out:
up_read(&mm->mmap_sem);
@@ -961,7 +960,7 @@ static int __init amd_iommu_v2_init(void)
spin_lock_init(&state_lock);
ret = -ENOMEM;
- iommu_wq = create_workqueue("amd_iommu_v2");
+ iommu_wq = alloc_workqueue("amd_iommu_v2", WQ_MEM_RECLAIM, 0);
if (iommu_wq == NULL)
goto out;
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 46ba2b64b..641e88761 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -2690,6 +2690,8 @@ static int __init arm_smmu_init(void)
if (ret)
return ret;
+ pci_request_acs();
+
return bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
}
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 31422d440..2db74ebc3 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -967,8 +967,8 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
* handler seeing a half-initialised domain state.
*/
irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
- ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED,
- "arm-smmu-context-fault", domain);
+ ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
+ IRQF_SHARED, "arm-smmu-context-fault", domain);
if (ret < 0) {
dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
cfg->irptndx, irq);
@@ -1008,7 +1008,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
if (cfg->irptndx != INVALID_IRPTNDX) {
irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
- free_irq(irq, domain);
+ devm_free_irq(smmu->dev, irq, domain);
}
free_io_pgtable_ops(smmu_domain->pgtbl_ops);
@@ -1966,15 +1966,15 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
}
for (i = 0; i < smmu->num_global_irqs; ++i) {
- err = request_irq(smmu->irqs[i],
- arm_smmu_global_fault,
- IRQF_SHARED,
- "arm-smmu global fault",
- smmu);
+ err = devm_request_irq(smmu->dev, smmu->irqs[i],
+ arm_smmu_global_fault,
+ IRQF_SHARED,
+ "arm-smmu global fault",
+ smmu);
if (err) {
dev_err(dev, "failed to request global IRQ %d (%u)\n",
i, smmu->irqs[i]);
- goto out_free_irqs;
+ goto out_put_masters;
}
}
@@ -1986,10 +1986,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
arm_smmu_device_reset(smmu);
return 0;
-out_free_irqs:
- while (i--)
- free_irq(smmu->irqs[i], smmu);
-
out_put_masters:
for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
struct arm_smmu_master *master
@@ -2030,7 +2026,7 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
dev_err(dev, "removing device with active domains!\n");
for (i = 0; i < smmu->num_global_irqs; ++i)
- free_irq(smmu->irqs[i], smmu);
+ devm_free_irq(smmu->dev, smmu->irqs[i], smmu);
/* Turn the thing off */
writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
@@ -2076,8 +2072,10 @@ static int __init arm_smmu_init(void)
#endif
#ifdef CONFIG_PCI
- if (!iommu_present(&pci_bus_type))
+ if (!iommu_present(&pci_bus_type)) {
+ pci_request_acs();
bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
+ }
#endif
return 0;
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 97a23082e..00c8a08d5 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -152,12 +152,15 @@ int dma_direction_to_prot(enum dma_data_direction dir, bool coherent)
}
}
-static struct iova *__alloc_iova(struct iova_domain *iovad, size_t size,
+static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size,
dma_addr_t dma_limit)
{
+ struct iova_domain *iovad = domain->iova_cookie;
unsigned long shift = iova_shift(iovad);
unsigned long length = iova_align(iovad, size) >> shift;
+ if (domain->geometry.force_aperture)
+ dma_limit = min(dma_limit, domain->geometry.aperture_end);
/*
* Enforce size-alignment to be safe - there could perhaps be an
* attribute to control this per-device, or at least per-domain...
@@ -287,7 +290,7 @@ void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
* or NULL on failure.
*/
struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
- struct dma_attrs *attrs, int prot, dma_addr_t *handle,
+ unsigned long attrs, int prot, dma_addr_t *handle,
void (*flush_page)(struct device *, const void *, phys_addr_t))
{
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
@@ -307,7 +310,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
} else {
size = ALIGN(size, min_size);
}
- if (dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs))
+ if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
alloc_sizes = min_size;
count = PAGE_ALIGN(size) >> PAGE_SHIFT;
@@ -315,7 +318,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
if (!pages)
return NULL;
- iova = __alloc_iova(iovad, size, dev->coherent_dma_mask);
+ iova = __alloc_iova(domain, size, dev->coherent_dma_mask);
if (!iova)
goto out_free_pages;
@@ -387,7 +390,7 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
phys_addr_t phys = page_to_phys(page) + offset;
size_t iova_off = iova_offset(iovad, phys);
size_t len = iova_align(iovad, size + iova_off);
- struct iova *iova = __alloc_iova(iovad, len, dma_get_mask(dev));
+ struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev));
if (!iova)
return DMA_ERROR_CODE;
@@ -401,7 +404,7 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
}
void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
__iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle);
}
@@ -539,7 +542,7 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
prev = s;
}
- iova = __alloc_iova(iovad, iova_len, dma_get_mask(dev));
+ iova = __alloc_iova(domain, iova_len, dma_get_mask(dev));
if (!iova)
goto out_restore_sg;
@@ -561,7 +564,7 @@ out_restore_sg:
}
void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
/*
* The scatterlist segments are mapped into a single
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 7330a66e2..58470f5ce 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -241,8 +241,20 @@ int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
if (!dmar_match_pci_path(info, scope->bus, path, level))
continue;
- if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT) ^
- (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL)) {
+ /*
+ * We expect devices with endpoint scope to have normal PCI
+ * headers, and devices with bridge scope to have bridge PCI
+ * headers. However PCI NTB devices may be listed in the
+ * DMAR table with bridge scope, even though they have a
+ * normal PCI header. NTB devices are identified by class
+ * "BRIDGE_OTHER" (0680h) - we don't declare a socpe mismatch
+ * for this special case.
+ */
+ if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
+ info->dev->hdr_type != PCI_HEADER_TYPE_NORMAL) ||
+ (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE &&
+ (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
+ info->dev->class >> 8 != PCI_CLASS_BRIDGE_OTHER))) {
pr_warn("Device scope type does not match for %s\n",
pci_name(info->dev));
return -EINVAL;
@@ -1155,8 +1167,6 @@ static int qi_check_fault(struct intel_iommu *iommu, int index)
(unsigned long long)qi->desc[index].high);
memcpy(&qi->desc[index], &qi->desc[wait_index],
sizeof(struct qi_desc));
- __iommu_flush_cache(iommu, &qi->desc[index],
- sizeof(struct qi_desc));
writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
return -EINVAL;
}
@@ -1231,9 +1241,6 @@ restart:
hw[wait_index] = wait_desc;
- __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
- __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
-
qi->free_head = (qi->free_head + 2) % QI_LENGTH;
qi->free_cnt -= 2;
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index e27e3b7df..33dcc29ec 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -54,6 +54,10 @@ typedef u32 sysmmu_pte_t;
#define lv2ent_small(pent) ((*(pent) & 2) == 2)
#define lv2ent_large(pent) ((*(pent) & 3) == 1)
+#ifdef CONFIG_BIG_ENDIAN
+#warning "revisit driver if we can enable big-endian ptes"
+#endif
+
/*
* v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces
* v5.0 introduced support for 36bit physical address space by shifting
@@ -322,14 +326,27 @@ static void __sysmmu_set_ptbase(struct sysmmu_drvdata *data, phys_addr_t pgd)
__sysmmu_tlb_invalidate(data);
}
+static void __sysmmu_enable_clocks(struct sysmmu_drvdata *data)
+{
+ BUG_ON(clk_prepare_enable(data->clk_master));
+ BUG_ON(clk_prepare_enable(data->clk));
+ BUG_ON(clk_prepare_enable(data->pclk));
+ BUG_ON(clk_prepare_enable(data->aclk));
+}
+
+static void __sysmmu_disable_clocks(struct sysmmu_drvdata *data)
+{
+ clk_disable_unprepare(data->aclk);
+ clk_disable_unprepare(data->pclk);
+ clk_disable_unprepare(data->clk);
+ clk_disable_unprepare(data->clk_master);
+}
+
static void __sysmmu_get_version(struct sysmmu_drvdata *data)
{
u32 ver;
- clk_enable(data->clk_master);
- clk_enable(data->clk);
- clk_enable(data->pclk);
- clk_enable(data->aclk);
+ __sysmmu_enable_clocks(data);
ver = readl(data->sfrbase + REG_MMU_VERSION);
@@ -342,10 +359,7 @@ static void __sysmmu_get_version(struct sysmmu_drvdata *data)
dev_dbg(data->sysmmu, "hardware version: %d.%d\n",
MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version));
- clk_disable(data->aclk);
- clk_disable(data->pclk);
- clk_disable(data->clk);
- clk_disable(data->clk_master);
+ __sysmmu_disable_clocks(data);
}
static void show_fault_information(struct sysmmu_drvdata *data,
@@ -427,10 +441,7 @@ static void __sysmmu_disable_nocount(struct sysmmu_drvdata *data)
writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
writel(0, data->sfrbase + REG_MMU_CFG);
- clk_disable(data->aclk);
- clk_disable(data->pclk);
- clk_disable(data->clk);
- clk_disable(data->clk_master);
+ __sysmmu_disable_clocks(data);
}
static bool __sysmmu_disable(struct sysmmu_drvdata *data)
@@ -475,10 +486,7 @@ static void __sysmmu_init_config(struct sysmmu_drvdata *data)
static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data)
{
- clk_enable(data->clk_master);
- clk_enable(data->clk);
- clk_enable(data->pclk);
- clk_enable(data->aclk);
+ __sysmmu_enable_clocks(data);
writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
@@ -488,6 +496,12 @@ static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data)
writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
+ /*
+ * SYSMMU driver keeps master's clock enabled only for the short
+ * time, while accessing the registers. For performing address
+ * translation during DMA transaction it relies on the client
+ * driver to enable it.
+ */
clk_disable(data->clk_master);
}
@@ -524,16 +538,15 @@ static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
{
unsigned long flags;
- clk_enable(data->clk_master);
spin_lock_irqsave(&data->lock, flags);
- if (is_sysmmu_active(data)) {
- if (data->version >= MAKE_MMU_VER(3, 3))
- __sysmmu_tlb_invalidate_entry(data, iova, 1);
+ if (is_sysmmu_active(data) && data->version >= MAKE_MMU_VER(3, 3)) {
+ clk_enable(data->clk_master);
+ __sysmmu_tlb_invalidate_entry(data, iova, 1);
+ clk_disable(data->clk_master);
}
spin_unlock_irqrestore(&data->lock, flags);
- clk_disable(data->clk_master);
}
static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
@@ -572,6 +585,8 @@ static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
spin_unlock_irqrestore(&data->lock, flags);
}
+static struct iommu_ops exynos_iommu_ops;
+
static int __init exynos_sysmmu_probe(struct platform_device *pdev)
{
int irq, ret;
@@ -602,37 +617,22 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev)
}
data->clk = devm_clk_get(dev, "sysmmu");
- if (!IS_ERR(data->clk)) {
- ret = clk_prepare(data->clk);
- if (ret) {
- dev_err(dev, "Failed to prepare clk\n");
- return ret;
- }
- } else {
+ if (PTR_ERR(data->clk) == -ENOENT)
data->clk = NULL;
- }
+ else if (IS_ERR(data->clk))
+ return PTR_ERR(data->clk);
data->aclk = devm_clk_get(dev, "aclk");
- if (!IS_ERR(data->aclk)) {
- ret = clk_prepare(data->aclk);
- if (ret) {
- dev_err(dev, "Failed to prepare aclk\n");
- return ret;
- }
- } else {
+ if (PTR_ERR(data->aclk) == -ENOENT)
data->aclk = NULL;
- }
+ else if (IS_ERR(data->aclk))
+ return PTR_ERR(data->aclk);
data->pclk = devm_clk_get(dev, "pclk");
- if (!IS_ERR(data->pclk)) {
- ret = clk_prepare(data->pclk);
- if (ret) {
- dev_err(dev, "Failed to prepare pclk\n");
- return ret;
- }
- } else {
+ if (PTR_ERR(data->pclk) == -ENOENT)
data->pclk = NULL;
- }
+ else if (IS_ERR(data->pclk))
+ return PTR_ERR(data->pclk);
if (!data->clk && (!data->aclk || !data->pclk)) {
dev_err(dev, "Failed to get device clock(s)!\n");
@@ -640,15 +640,10 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev)
}
data->clk_master = devm_clk_get(dev, "master");
- if (!IS_ERR(data->clk_master)) {
- ret = clk_prepare(data->clk_master);
- if (ret) {
- dev_err(dev, "Failed to prepare master's clk\n");
- return ret;
- }
- } else {
+ if (PTR_ERR(data->clk_master) == -ENOENT)
data->clk_master = NULL;
- }
+ else if (IS_ERR(data->clk_master))
+ return PTR_ERR(data->clk_master);
data->sysmmu = dev;
spin_lock_init(&data->lock);
@@ -665,6 +660,8 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
+ of_iommu_set_ops(dev->of_node, &exynos_iommu_ops);
+
return 0;
}
@@ -717,7 +714,7 @@ static inline void update_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
{
dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent),
DMA_TO_DEVICE);
- *ent = val;
+ *ent = cpu_to_le32(val);
dma_sync_single_for_device(dma_dev, virt_to_phys(ent), sizeof(*ent),
DMA_TO_DEVICE);
}
@@ -1358,7 +1355,6 @@ static int __init exynos_iommu_of_setup(struct device_node *np)
if (!dma_dev)
dma_dev = &pdev->dev;
- of_iommu_set_ops(np, &exynos_iommu_ops);
return 0;
}
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 50b639ba3..ebb5bf3dd 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1672,7 +1672,7 @@ static int iommu_init_domains(struct intel_iommu *iommu)
return -ENOMEM;
}
- size = ((ndomains >> 8) + 1) * sizeof(struct dmar_domain **);
+ size = (ALIGN(ndomains, 256) >> 8) * sizeof(struct dmar_domain **);
iommu->domains = kzalloc(size, GFP_KERNEL);
if (iommu->domains) {
@@ -1737,7 +1737,7 @@ static void disable_dmar_iommu(struct intel_iommu *iommu)
static void free_dmar_iommu(struct intel_iommu *iommu)
{
if ((iommu->domains) && (iommu->domain_ids)) {
- int elems = (cap_ndoms(iommu->cap) >> 8) + 1;
+ int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8;
int i;
for (i = 0; i < elems; i++)
@@ -3552,7 +3552,7 @@ error:
static dma_addr_t intel_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return __intel_map_single(dev, page_to_phys(page) + offset, size,
dir, *dev->dma_mask);
@@ -3711,14 +3711,14 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
intel_unmap(dev, dev_addr, size);
}
static void *intel_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct page *page = NULL;
int order;
@@ -3764,7 +3764,7 @@ static void *intel_alloc_coherent(struct device *dev, size_t size,
}
static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
int order;
struct page *page = virt_to_page(vaddr);
@@ -3779,7 +3779,7 @@ static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK;
unsigned long nrpages = 0;
@@ -3808,7 +3808,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
}
static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
int i;
struct dmar_domain *domain;
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index d9939fa9b..8ebb3530a 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -583,7 +583,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
if (access_error(vma, req))
goto invalid;
- ret = handle_mm_fault(svm->mm, vma, address,
+ ret = handle_mm_fault(vma, address,
req->wr_req ? FAULT_FLAG_WRITE : 0);
if (ret & VM_FAULT_ERROR)
goto invalid;
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 3000051f4..b06d93594 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -34,8 +34,7 @@
#include <trace/events/iommu.h>
static struct kset *iommu_group_kset;
-static struct ida iommu_group_ida;
-static struct mutex iommu_group_mutex;
+static DEFINE_IDA(iommu_group_ida);
struct iommu_callback_data {
const struct iommu_ops *ops;
@@ -144,9 +143,7 @@ static void iommu_group_release(struct kobject *kobj)
if (group->iommu_data_release)
group->iommu_data_release(group->iommu_data);
- mutex_lock(&iommu_group_mutex);
- ida_remove(&iommu_group_ida, group->id);
- mutex_unlock(&iommu_group_mutex);
+ ida_simple_remove(&iommu_group_ida, group->id);
if (group->default_domain)
iommu_domain_free(group->default_domain);
@@ -186,26 +183,17 @@ struct iommu_group *iommu_group_alloc(void)
INIT_LIST_HEAD(&group->devices);
BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
- mutex_lock(&iommu_group_mutex);
-
-again:
- if (unlikely(0 == ida_pre_get(&iommu_group_ida, GFP_KERNEL))) {
+ ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL);
+ if (ret < 0) {
kfree(group);
- mutex_unlock(&iommu_group_mutex);
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(ret);
}
-
- if (-EAGAIN == ida_get_new(&iommu_group_ida, &group->id))
- goto again;
-
- mutex_unlock(&iommu_group_mutex);
+ group->id = ret;
ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
NULL, "%d", group->id);
if (ret) {
- mutex_lock(&iommu_group_mutex);
- ida_remove(&iommu_group_ida, group->id);
- mutex_unlock(&iommu_group_mutex);
+ ida_simple_remove(&iommu_group_ida, group->id);
kfree(group);
return ERR_PTR(ret);
}
@@ -348,6 +336,9 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
list_for_each_entry(entry, &mappings, list) {
dma_addr_t start, end, addr;
+ if (domain->ops->apply_dm_region)
+ domain->ops->apply_dm_region(dev, domain, entry);
+
start = ALIGN(entry->start, pg_size);
end = ALIGN(entry->start + entry->length, pg_size);
@@ -1483,9 +1474,6 @@ static int __init iommu_init(void)
{
iommu_group_kset = kset_create_and_add("iommu_groups",
NULL, kernel_kobj);
- ida_init(&iommu_group_ida);
- mutex_init(&iommu_group_mutex);
-
BUG_ON(!iommu_group_kset);
return 0;
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index e321fa517..b09692bb5 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -27,32 +27,35 @@
#include <linux/slab.h>
#include <linux/iommu.h>
#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/of_iommu.h>
#include <asm/cacheflush.h>
#include <asm/sizes.h>
#include "msm_iommu_hw-8xxx.h"
#include "msm_iommu.h"
+#include "io-pgtable.h"
#define MRC(reg, processor, op1, crn, crm, op2) \
__asm__ __volatile__ ( \
" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
: "=r" (reg))
-#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
-#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
-
/* bitmap of the page sizes currently supported */
#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
-static int msm_iommu_tex_class[4];
-
DEFINE_SPINLOCK(msm_iommu_lock);
+static LIST_HEAD(qcom_iommu_devices);
+static struct iommu_ops msm_iommu_ops;
struct msm_priv {
- unsigned long *pgtable;
struct list_head list_attached;
struct iommu_domain domain;
+ struct io_pgtable_cfg cfg;
+ struct io_pgtable_ops *iop;
+ struct device *dev;
+ spinlock_t pgtlock; /* pagetable lock */
};
static struct msm_priv *to_msm_priv(struct iommu_domain *dom)
@@ -60,67 +63,183 @@ static struct msm_priv *to_msm_priv(struct iommu_domain *dom)
return container_of(dom, struct msm_priv, domain);
}
-static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
+static int __enable_clocks(struct msm_iommu_dev *iommu)
{
int ret;
- ret = clk_enable(drvdata->pclk);
+ ret = clk_enable(iommu->pclk);
if (ret)
goto fail;
- if (drvdata->clk) {
- ret = clk_enable(drvdata->clk);
+ if (iommu->clk) {
+ ret = clk_enable(iommu->clk);
if (ret)
- clk_disable(drvdata->pclk);
+ clk_disable(iommu->pclk);
}
fail:
return ret;
}
-static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
+static void __disable_clocks(struct msm_iommu_dev *iommu)
{
- clk_disable(drvdata->clk);
- clk_disable(drvdata->pclk);
+ if (iommu->clk)
+ clk_disable(iommu->clk);
+ clk_disable(iommu->pclk);
}
-static int __flush_iotlb(struct iommu_domain *domain)
+static void msm_iommu_reset(void __iomem *base, int ncb)
{
- struct msm_priv *priv = to_msm_priv(domain);
- struct msm_iommu_drvdata *iommu_drvdata;
- struct msm_iommu_ctx_drvdata *ctx_drvdata;
- int ret = 0;
-#ifndef CONFIG_IOMMU_PGTABLES_L2
- unsigned long *fl_table = priv->pgtable;
- int i;
-
- if (!list_empty(&priv->list_attached)) {
- dmac_flush_range(fl_table, fl_table + SZ_16K);
-
- for (i = 0; i < NUM_FL_PTE; i++)
- if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) {
- void *sl_table = __va(fl_table[i] &
- FL_BASE_MASK);
- dmac_flush_range(sl_table, sl_table + SZ_4K);
- }
+ int ctx;
+
+ SET_RPUE(base, 0);
+ SET_RPUEIE(base, 0);
+ SET_ESRRESTORE(base, 0);
+ SET_TBE(base, 0);
+ SET_CR(base, 0);
+ SET_SPDMBE(base, 0);
+ SET_TESTBUSCR(base, 0);
+ SET_TLBRSW(base, 0);
+ SET_GLOBAL_TLBIALL(base, 0);
+ SET_RPU_ACR(base, 0);
+ SET_TLBLKCRWE(base, 1);
+
+ for (ctx = 0; ctx < ncb; ctx++) {
+ SET_BPRCOSH(base, ctx, 0);
+ SET_BPRCISH(base, ctx, 0);
+ SET_BPRCNSH(base, ctx, 0);
+ SET_BPSHCFG(base, ctx, 0);
+ SET_BPMTCFG(base, ctx, 0);
+ SET_ACTLR(base, ctx, 0);
+ SET_SCTLR(base, ctx, 0);
+ SET_FSRRESTORE(base, ctx, 0);
+ SET_TTBR0(base, ctx, 0);
+ SET_TTBR1(base, ctx, 0);
+ SET_TTBCR(base, ctx, 0);
+ SET_BFBCR(base, ctx, 0);
+ SET_PAR(base, ctx, 0);
+ SET_FAR(base, ctx, 0);
+ SET_CTX_TLBIALL(base, ctx, 0);
+ SET_TLBFLPTER(base, ctx, 0);
+ SET_TLBSLPTER(base, ctx, 0);
+ SET_TLBLKCR(base, ctx, 0);
+ SET_CONTEXTIDR(base, ctx, 0);
}
-#endif
+}
- list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
+static void __flush_iotlb(void *cookie)
+{
+ struct msm_priv *priv = cookie;
+ struct msm_iommu_dev *iommu = NULL;
+ struct msm_iommu_ctx_dev *master;
+ int ret = 0;
+
+ list_for_each_entry(iommu, &priv->list_attached, dom_node) {
+ ret = __enable_clocks(iommu);
+ if (ret)
+ goto fail;
- BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent);
+ list_for_each_entry(master, &iommu->ctx_list, list)
+ SET_CTX_TLBIALL(iommu->base, master->num, 0);
- iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
- BUG_ON(!iommu_drvdata);
+ __disable_clocks(iommu);
+ }
+fail:
+ return;
+}
- ret = __enable_clocks(iommu_drvdata);
+static void __flush_iotlb_range(unsigned long iova, size_t size,
+ size_t granule, bool leaf, void *cookie)
+{
+ struct msm_priv *priv = cookie;
+ struct msm_iommu_dev *iommu = NULL;
+ struct msm_iommu_ctx_dev *master;
+ int ret = 0;
+ int temp_size;
+
+ list_for_each_entry(iommu, &priv->list_attached, dom_node) {
+ ret = __enable_clocks(iommu);
if (ret)
goto fail;
- SET_CTX_TLBIALL(iommu_drvdata->base, ctx_drvdata->num, 0);
- __disable_clocks(iommu_drvdata);
+ list_for_each_entry(master, &iommu->ctx_list, list) {
+ temp_size = size;
+ do {
+ iova &= TLBIVA_VA;
+ iova |= GET_CONTEXTIDR_ASID(iommu->base,
+ master->num);
+ SET_TLBIVA(iommu->base, master->num, iova);
+ iova += granule;
+ } while (temp_size -= granule);
+ }
+
+ __disable_clocks(iommu);
}
+
fail:
- return ret;
+ return;
+}
+
+static void __flush_iotlb_sync(void *cookie)
+{
+ /*
+ * Nothing is needed here, the barrier to guarantee
+ * completion of the tlb sync operation is implicitly
+ * taken care when the iommu client does a writel before
+ * kick starting the other master.
+ */
+}
+
+static const struct iommu_gather_ops msm_iommu_gather_ops = {
+ .tlb_flush_all = __flush_iotlb,
+ .tlb_add_flush = __flush_iotlb_range,
+ .tlb_sync = __flush_iotlb_sync,
+};
+
+static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end)
+{
+ int idx;
+
+ do {
+ idx = find_next_zero_bit(map, end, start);
+ if (idx == end)
+ return -ENOSPC;
+ } while (test_and_set_bit(idx, map));
+
+ return idx;
+}
+
+static void msm_iommu_free_ctx(unsigned long *map, int idx)
+{
+ clear_bit(idx, map);
+}
+
+static void config_mids(struct msm_iommu_dev *iommu,
+ struct msm_iommu_ctx_dev *master)
+{
+ int mid, ctx, i;
+
+ for (i = 0; i < master->num_mids; i++) {
+ mid = master->mids[i];
+ ctx = master->num;
+
+ SET_M2VCBR_N(iommu->base, mid, 0);
+ SET_CBACR_N(iommu->base, ctx, 0);
+
+ /* Set VMID = 0 */
+ SET_VMID(iommu->base, mid, 0);
+
+ /* Set the context number for that MID to this context */
+ SET_CBNDX(iommu->base, mid, ctx);
+
+ /* Set MID associated with this context bank to 0*/
+ SET_CBVMID(iommu->base, ctx, 0);
+
+ /* Set the ASID for TLB tagging for this context */
+ SET_CONTEXTIDR_ASID(iommu->base, ctx, ctx);
+
+ /* Set security bit override to be Non-secure */
+ SET_NSCFG(iommu->base, mid, 3);
+ }
}
static void __reset_context(void __iomem *base, int ctx)
@@ -143,15 +262,17 @@ static void __reset_context(void __iomem *base, int ctx)
SET_TLBFLPTER(base, ctx, 0);
SET_TLBSLPTER(base, ctx, 0);
SET_TLBLKCR(base, ctx, 0);
- SET_PRRR(base, ctx, 0);
- SET_NMRR(base, ctx, 0);
}
-static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable)
+static void __program_context(void __iomem *base, int ctx,
+ struct msm_priv *priv)
{
- unsigned int prrr, nmrr;
__reset_context(base, ctx);
+ /* Turn on TEX Remap */
+ SET_TRE(base, ctx, 1);
+ SET_AFE(base, ctx, 1);
+
/* Set up HTW mode */
/* TLB miss configuration: perform HTW on miss */
SET_TLBMCFG(base, ctx, 0x3);
@@ -159,8 +280,13 @@ static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable)
/* V2P configuration: HTW for access */
SET_V2PCFG(base, ctx, 0x3);
- SET_TTBCR(base, ctx, 0);
- SET_TTBR0_PA(base, ctx, (pgtable >> 14));
+ SET_TTBCR(base, ctx, priv->cfg.arm_v7s_cfg.tcr);
+ SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr[0]);
+ SET_TTBR1(base, ctx, priv->cfg.arm_v7s_cfg.ttbr[1]);
+
+ /* Set prrr and nmrr */
+ SET_PRRR(base, ctx, priv->cfg.arm_v7s_cfg.prrr);
+ SET_NMRR(base, ctx, priv->cfg.arm_v7s_cfg.nmrr);
/* Invalidate the TLB for this context */
SET_CTX_TLBIALL(base, ctx, 0);
@@ -179,38 +305,9 @@ static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable)
SET_RCOSH(base, ctx, 1);
SET_RCNSH(base, ctx, 1);
- /* Turn on TEX Remap */
- SET_TRE(base, ctx, 1);
-
- /* Set TEX remap attributes */
- RCP15_PRRR(prrr);
- RCP15_NMRR(nmrr);
- SET_PRRR(base, ctx, prrr);
- SET_NMRR(base, ctx, nmrr);
-
/* Turn on BFB prefetch */
SET_BFBDFE(base, ctx, 1);
-#ifdef CONFIG_IOMMU_PGTABLES_L2
- /* Configure page tables as inner-cacheable and shareable to reduce
- * the TLB miss penalty.
- */
- SET_TTBR0_SH(base, ctx, 1);
- SET_TTBR1_SH(base, ctx, 1);
-
- SET_TTBR0_NOS(base, ctx, 1);
- SET_TTBR1_NOS(base, ctx, 1);
-
- SET_TTBR0_IRGNH(base, ctx, 0); /* WB, WA */
- SET_TTBR0_IRGNL(base, ctx, 1);
-
- SET_TTBR1_IRGNH(base, ctx, 0); /* WB, WA */
- SET_TTBR1_IRGNL(base, ctx, 1);
-
- SET_TTBR0_ORGN(base, ctx, 1); /* WB, WA */
- SET_TTBR1_ORGN(base, ctx, 1); /* WB, WA */
-#endif
-
/* Enable the MMU */
SET_M(base, ctx, 1);
}
@@ -227,13 +324,6 @@ static struct iommu_domain *msm_iommu_domain_alloc(unsigned type)
goto fail_nomem;
INIT_LIST_HEAD(&priv->list_attached);
- priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL,
- get_order(SZ_16K));
-
- if (!priv->pgtable)
- goto fail_nomem;
-
- memset(priv->pgtable, 0, SZ_16K);
priv->domain.geometry.aperture_start = 0;
priv->domain.geometry.aperture_end = (1ULL << 32) - 1;
@@ -250,304 +340,137 @@ static void msm_iommu_domain_free(struct iommu_domain *domain)
{
struct msm_priv *priv;
unsigned long flags;
- unsigned long *fl_table;
- int i;
spin_lock_irqsave(&msm_iommu_lock, flags);
priv = to_msm_priv(domain);
+ kfree(priv);
+ spin_unlock_irqrestore(&msm_iommu_lock, flags);
+}
- fl_table = priv->pgtable;
-
- for (i = 0; i < NUM_FL_PTE; i++)
- if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
- free_page((unsigned long) __va(((fl_table[i]) &
- FL_BASE_MASK)));
+static int msm_iommu_domain_config(struct msm_priv *priv)
+{
+ spin_lock_init(&priv->pgtlock);
+
+ priv->cfg = (struct io_pgtable_cfg) {
+ .quirks = IO_PGTABLE_QUIRK_TLBI_ON_MAP,
+ .pgsize_bitmap = msm_iommu_ops.pgsize_bitmap,
+ .ias = 32,
+ .oas = 32,
+ .tlb = &msm_iommu_gather_ops,
+ .iommu_dev = priv->dev,
+ };
+
+ priv->iop = alloc_io_pgtable_ops(ARM_V7S, &priv->cfg, priv);
+ if (!priv->iop) {
+ dev_err(priv->dev, "Failed to allocate pgtable\n");
+ return -EINVAL;
+ }
- free_pages((unsigned long)priv->pgtable, get_order(SZ_16K));
- priv->pgtable = NULL;
+ msm_iommu_ops.pgsize_bitmap = priv->cfg.pgsize_bitmap;
- kfree(priv);
- spin_unlock_irqrestore(&msm_iommu_lock, flags);
+ return 0;
}
static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
- struct msm_priv *priv;
- struct msm_iommu_ctx_dev *ctx_dev;
- struct msm_iommu_drvdata *iommu_drvdata;
- struct msm_iommu_ctx_drvdata *ctx_drvdata;
- struct msm_iommu_ctx_drvdata *tmp_drvdata;
int ret = 0;
unsigned long flags;
+ struct msm_iommu_dev *iommu;
+ struct msm_priv *priv = to_msm_priv(domain);
+ struct msm_iommu_ctx_dev *master;
- spin_lock_irqsave(&msm_iommu_lock, flags);
-
- priv = to_msm_priv(domain);
-
- if (!dev) {
- ret = -EINVAL;
- goto fail;
- }
-
- iommu_drvdata = dev_get_drvdata(dev->parent);
- ctx_drvdata = dev_get_drvdata(dev);
- ctx_dev = dev->platform_data;
-
- if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) {
- ret = -EINVAL;
- goto fail;
- }
-
- if (!list_empty(&ctx_drvdata->attached_elm)) {
- ret = -EBUSY;
- goto fail;
- }
+ priv->dev = dev;
+ msm_iommu_domain_config(priv);
- list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
- if (tmp_drvdata == ctx_drvdata) {
- ret = -EBUSY;
- goto fail;
+ spin_lock_irqsave(&msm_iommu_lock, flags);
+ list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
+ master = list_first_entry(&iommu->ctx_list,
+ struct msm_iommu_ctx_dev,
+ list);
+ if (master->of_node == dev->of_node) {
+ ret = __enable_clocks(iommu);
+ if (ret)
+ goto fail;
+
+ list_for_each_entry(master, &iommu->ctx_list, list) {
+ if (master->num) {
+ dev_err(dev, "domain already attached");
+ ret = -EEXIST;
+ goto fail;
+ }
+ master->num =
+ msm_iommu_alloc_ctx(iommu->context_map,
+ 0, iommu->ncb);
+ if (IS_ERR_VALUE(master->num)) {
+ ret = -ENODEV;
+ goto fail;
+ }
+ config_mids(iommu, master);
+ __program_context(iommu->base, master->num,
+ priv);
+ }
+ __disable_clocks(iommu);
+ list_add(&iommu->dom_node, &priv->list_attached);
}
-
- ret = __enable_clocks(iommu_drvdata);
- if (ret)
- goto fail;
-
- __program_context(iommu_drvdata->base, ctx_dev->num,
- __pa(priv->pgtable));
-
- __disable_clocks(iommu_drvdata);
- list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
- ret = __flush_iotlb(domain);
+ }
fail:
spin_unlock_irqrestore(&msm_iommu_lock, flags);
+
return ret;
}
static void msm_iommu_detach_dev(struct iommu_domain *domain,
struct device *dev)
{
- struct msm_priv *priv;
- struct msm_iommu_ctx_dev *ctx_dev;
- struct msm_iommu_drvdata *iommu_drvdata;
- struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ struct msm_priv *priv = to_msm_priv(domain);
unsigned long flags;
+ struct msm_iommu_dev *iommu;
+ struct msm_iommu_ctx_dev *master;
int ret;
- spin_lock_irqsave(&msm_iommu_lock, flags);
- priv = to_msm_priv(domain);
-
- if (!dev)
- goto fail;
-
- iommu_drvdata = dev_get_drvdata(dev->parent);
- ctx_drvdata = dev_get_drvdata(dev);
- ctx_dev = dev->platform_data;
-
- if (!iommu_drvdata || !ctx_drvdata || !ctx_dev)
- goto fail;
-
- ret = __flush_iotlb(domain);
- if (ret)
- goto fail;
-
- ret = __enable_clocks(iommu_drvdata);
- if (ret)
- goto fail;
+ free_io_pgtable_ops(priv->iop);
- __reset_context(iommu_drvdata->base, ctx_dev->num);
- __disable_clocks(iommu_drvdata);
- list_del_init(&ctx_drvdata->attached_elm);
+ spin_lock_irqsave(&msm_iommu_lock, flags);
+ list_for_each_entry(iommu, &priv->list_attached, dom_node) {
+ ret = __enable_clocks(iommu);
+ if (ret)
+ goto fail;
+ list_for_each_entry(master, &iommu->ctx_list, list) {
+ msm_iommu_free_ctx(iommu->context_map, master->num);
+ __reset_context(iommu->base, master->num);
+ }
+ __disable_clocks(iommu);
+ }
fail:
spin_unlock_irqrestore(&msm_iommu_lock, flags);
}
-static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
+static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t pa, size_t len, int prot)
{
- struct msm_priv *priv;
+ struct msm_priv *priv = to_msm_priv(domain);
unsigned long flags;
- unsigned long *fl_table;
- unsigned long *fl_pte;
- unsigned long fl_offset;
- unsigned long *sl_table;
- unsigned long *sl_pte;
- unsigned long sl_offset;
- unsigned int pgprot;
- int ret = 0, tex, sh;
-
- spin_lock_irqsave(&msm_iommu_lock, flags);
-
- sh = (prot & MSM_IOMMU_ATTR_SH) ? 1 : 0;
- tex = msm_iommu_tex_class[prot & MSM_IOMMU_CP_MASK];
-
- if (tex < 0 || tex > NUM_TEX_CLASS - 1) {
- ret = -EINVAL;
- goto fail;
- }
-
- priv = to_msm_priv(domain);
-
- fl_table = priv->pgtable;
-
- if (len != SZ_16M && len != SZ_1M &&
- len != SZ_64K && len != SZ_4K) {
- pr_debug("Bad size: %d\n", len);
- ret = -EINVAL;
- goto fail;
- }
-
- if (!fl_table) {
- pr_debug("Null page table\n");
- ret = -EINVAL;
- goto fail;
- }
-
- if (len == SZ_16M || len == SZ_1M) {
- pgprot = sh ? FL_SHARED : 0;
- pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0;
- pgprot |= tex & 0x02 ? FL_CACHEABLE : 0;
- pgprot |= tex & 0x04 ? FL_TEX0 : 0;
- } else {
- pgprot = sh ? SL_SHARED : 0;
- pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0;
- pgprot |= tex & 0x02 ? SL_CACHEABLE : 0;
- pgprot |= tex & 0x04 ? SL_TEX0 : 0;
- }
-
- fl_offset = FL_OFFSET(va); /* Upper 12 bits */
- fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
-
- if (len == SZ_16M) {
- int i = 0;
- for (i = 0; i < 16; i++)
- *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION |
- FL_AP_READ | FL_AP_WRITE | FL_TYPE_SECT |
- FL_SHARED | FL_NG | pgprot;
- }
-
- if (len == SZ_1M)
- *fl_pte = (pa & 0xFFF00000) | FL_AP_READ | FL_AP_WRITE | FL_NG |
- FL_TYPE_SECT | FL_SHARED | pgprot;
-
- /* Need a 2nd level table */
- if ((len == SZ_4K || len == SZ_64K) && (*fl_pte) == 0) {
- unsigned long *sl;
- sl = (unsigned long *) __get_free_pages(GFP_ATOMIC,
- get_order(SZ_4K));
-
- if (!sl) {
- pr_debug("Could not allocate second level table\n");
- ret = -ENOMEM;
- goto fail;
- }
-
- memset(sl, 0, SZ_4K);
- *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | FL_TYPE_TABLE);
- }
-
- sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
- sl_offset = SL_OFFSET(va);
- sl_pte = sl_table + sl_offset;
-
-
- if (len == SZ_4K)
- *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_AP0 | SL_AP1 | SL_NG |
- SL_SHARED | SL_TYPE_SMALL | pgprot;
-
- if (len == SZ_64K) {
- int i;
+ int ret;
- for (i = 0; i < 16; i++)
- *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_AP0 |
- SL_NG | SL_AP1 | SL_SHARED | SL_TYPE_LARGE | pgprot;
- }
+ spin_lock_irqsave(&priv->pgtlock, flags);
+ ret = priv->iop->map(priv->iop, iova, pa, len, prot);
+ spin_unlock_irqrestore(&priv->pgtlock, flags);
- ret = __flush_iotlb(domain);
-fail:
- spin_unlock_irqrestore(&msm_iommu_lock, flags);
return ret;
}
-static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
- size_t len)
+static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
+ size_t len)
{
- struct msm_priv *priv;
+ struct msm_priv *priv = to_msm_priv(domain);
unsigned long flags;
- unsigned long *fl_table;
- unsigned long *fl_pte;
- unsigned long fl_offset;
- unsigned long *sl_table;
- unsigned long *sl_pte;
- unsigned long sl_offset;
- int i, ret = 0;
-
- spin_lock_irqsave(&msm_iommu_lock, flags);
-
- priv = to_msm_priv(domain);
- fl_table = priv->pgtable;
+ spin_lock_irqsave(&priv->pgtlock, flags);
+ len = priv->iop->unmap(priv->iop, iova, len);
+ spin_unlock_irqrestore(&priv->pgtlock, flags);
- if (len != SZ_16M && len != SZ_1M &&
- len != SZ_64K && len != SZ_4K) {
- pr_debug("Bad length: %d\n", len);
- goto fail;
- }
-
- if (!fl_table) {
- pr_debug("Null page table\n");
- goto fail;
- }
-
- fl_offset = FL_OFFSET(va); /* Upper 12 bits */
- fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
-
- if (*fl_pte == 0) {
- pr_debug("First level PTE is 0\n");
- goto fail;
- }
-
- /* Unmap supersection */
- if (len == SZ_16M)
- for (i = 0; i < 16; i++)
- *(fl_pte+i) = 0;
-
- if (len == SZ_1M)
- *fl_pte = 0;
-
- sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
- sl_offset = SL_OFFSET(va);
- sl_pte = sl_table + sl_offset;
-
- if (len == SZ_64K) {
- for (i = 0; i < 16; i++)
- *(sl_pte+i) = 0;
- }
-
- if (len == SZ_4K)
- *sl_pte = 0;
-
- if (len == SZ_4K || len == SZ_64K) {
- int used = 0;
-
- for (i = 0; i < NUM_SL_PTE; i++)
- if (sl_table[i])
- used = 1;
- if (!used) {
- free_page((unsigned long)sl_table);
- *fl_pte = 0;
- }
- }
-
- ret = __flush_iotlb(domain);
-
-fail:
- spin_unlock_irqrestore(&msm_iommu_lock, flags);
-
- /* the IOMMU API requires us to return how many bytes were unmapped */
- len = ret ? 0 : len;
return len;
}
@@ -555,47 +478,46 @@ static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t va)
{
struct msm_priv *priv;
- struct msm_iommu_drvdata *iommu_drvdata;
- struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ struct msm_iommu_dev *iommu;
+ struct msm_iommu_ctx_dev *master;
unsigned int par;
unsigned long flags;
- void __iomem *base;
phys_addr_t ret = 0;
- int ctx;
spin_lock_irqsave(&msm_iommu_lock, flags);
priv = to_msm_priv(domain);
- if (list_empty(&priv->list_attached))
- goto fail;
+ iommu = list_first_entry(&priv->list_attached,
+ struct msm_iommu_dev, dom_node);
- ctx_drvdata = list_entry(priv->list_attached.next,
- struct msm_iommu_ctx_drvdata, attached_elm);
- iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
+ if (list_empty(&iommu->ctx_list))
+ goto fail;
- base = iommu_drvdata->base;
- ctx = ctx_drvdata->num;
+ master = list_first_entry(&iommu->ctx_list,
+ struct msm_iommu_ctx_dev, list);
+ if (!master)
+ goto fail;
- ret = __enable_clocks(iommu_drvdata);
+ ret = __enable_clocks(iommu);
if (ret)
goto fail;
/* Invalidate context TLB */
- SET_CTX_TLBIALL(base, ctx, 0);
- SET_V2PPR(base, ctx, va & V2Pxx_VA);
+ SET_CTX_TLBIALL(iommu->base, master->num, 0);
+ SET_V2PPR(iommu->base, master->num, va & V2Pxx_VA);
- par = GET_PAR(base, ctx);
+ par = GET_PAR(iommu->base, master->num);
/* We are dealing with a supersection */
- if (GET_NOFAULT_SS(base, ctx))
+ if (GET_NOFAULT_SS(iommu->base, master->num))
ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
else /* Upper 20 bits from PAR, lower 12 from VA */
ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
- if (GET_FAULT(base, ctx))
+ if (GET_FAULT(iommu->base, master->num))
ret = 0;
- __disable_clocks(iommu_drvdata);
+ __disable_clocks(iommu);
fail:
spin_unlock_irqrestore(&msm_iommu_lock, flags);
return ret;
@@ -629,49 +551,92 @@ static void print_ctx_regs(void __iomem *base, int ctx)
GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
pr_err("SCTLR = %08x ACTLR = %08x\n",
GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
- pr_err("PRRR = %08x NMRR = %08x\n",
- GET_PRRR(base, ctx), GET_NMRR(base, ctx));
+}
+
+static void insert_iommu_master(struct device *dev,
+ struct msm_iommu_dev **iommu,
+ struct of_phandle_args *spec)
+{
+ struct msm_iommu_ctx_dev *master = dev->archdata.iommu;
+ int sid;
+
+ if (list_empty(&(*iommu)->ctx_list)) {
+ master = kzalloc(sizeof(*master), GFP_ATOMIC);
+ master->of_node = dev->of_node;
+ list_add(&master->list, &(*iommu)->ctx_list);
+ dev->archdata.iommu = master;
+ }
+
+ for (sid = 0; sid < master->num_mids; sid++)
+ if (master->mids[sid] == spec->args[0]) {
+ dev_warn(dev, "Stream ID 0x%hx repeated; ignoring\n",
+ sid);
+ return;
+ }
+
+ master->mids[master->num_mids++] = spec->args[0];
+}
+
+static int qcom_iommu_of_xlate(struct device *dev,
+ struct of_phandle_args *spec)
+{
+ struct msm_iommu_dev *iommu;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&msm_iommu_lock, flags);
+ list_for_each_entry(iommu, &qcom_iommu_devices, dev_node)
+ if (iommu->dev->of_node == spec->np)
+ break;
+
+ if (!iommu || iommu->dev->of_node != spec->np) {
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ insert_iommu_master(dev, &iommu, spec);
+fail:
+ spin_unlock_irqrestore(&msm_iommu_lock, flags);
+
+ return ret;
}
irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
{
- struct msm_iommu_drvdata *drvdata = dev_id;
- void __iomem *base;
+ struct msm_iommu_dev *iommu = dev_id;
unsigned int fsr;
int i, ret;
spin_lock(&msm_iommu_lock);
- if (!drvdata) {
+ if (!iommu) {
pr_err("Invalid device ID in context interrupt handler\n");
goto fail;
}
- base = drvdata->base;
-
pr_err("Unexpected IOMMU page fault!\n");
- pr_err("base = %08x\n", (unsigned int) base);
+ pr_err("base = %08x\n", (unsigned int)iommu->base);
- ret = __enable_clocks(drvdata);
+ ret = __enable_clocks(iommu);
if (ret)
goto fail;
- for (i = 0; i < drvdata->ncb; i++) {
- fsr = GET_FSR(base, i);
+ for (i = 0; i < iommu->ncb; i++) {
+ fsr = GET_FSR(iommu->base, i);
if (fsr) {
pr_err("Fault occurred in context %d.\n", i);
pr_err("Interesting registers:\n");
- print_ctx_regs(base, i);
- SET_FSR(base, i, 0x4000000F);
+ print_ctx_regs(iommu->base, i);
+ SET_FSR(iommu->base, i, 0x4000000F);
}
}
- __disable_clocks(drvdata);
+ __disable_clocks(iommu);
fail:
spin_unlock(&msm_iommu_lock);
return 0;
}
-static const struct iommu_ops msm_iommu_ops = {
+static struct iommu_ops msm_iommu_ops = {
.capable = msm_iommu_capable,
.domain_alloc = msm_iommu_domain_alloc,
.domain_free = msm_iommu_domain_free,
@@ -682,54 +647,163 @@ static const struct iommu_ops msm_iommu_ops = {
.map_sg = default_iommu_map_sg,
.iova_to_phys = msm_iommu_iova_to_phys,
.pgsize_bitmap = MSM_IOMMU_PGSIZES,
+ .of_xlate = qcom_iommu_of_xlate,
};
-static int __init get_tex_class(int icp, int ocp, int mt, int nos)
+static int msm_iommu_probe(struct platform_device *pdev)
{
- int i = 0;
- unsigned int prrr = 0;
- unsigned int nmrr = 0;
- int c_icp, c_ocp, c_mt, c_nos;
-
- RCP15_PRRR(prrr);
- RCP15_NMRR(nmrr);
-
- for (i = 0; i < NUM_TEX_CLASS; i++) {
- c_nos = PRRR_NOS(prrr, i);
- c_mt = PRRR_MT(prrr, i);
- c_icp = NMRR_ICP(nmrr, i);
- c_ocp = NMRR_OCP(nmrr, i);
-
- if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
- return i;
+ struct resource *r;
+ struct msm_iommu_dev *iommu;
+ int ret, par, val;
+
+ iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL);
+ if (!iommu)
+ return -ENODEV;
+
+ iommu->dev = &pdev->dev;
+ INIT_LIST_HEAD(&iommu->ctx_list);
+
+ iommu->pclk = devm_clk_get(iommu->dev, "smmu_pclk");
+ if (IS_ERR(iommu->pclk)) {
+ dev_err(iommu->dev, "could not get smmu_pclk\n");
+ return PTR_ERR(iommu->pclk);
+ }
+
+ ret = clk_prepare(iommu->pclk);
+ if (ret) {
+ dev_err(iommu->dev, "could not prepare smmu_pclk\n");
+ return ret;
+ }
+
+ iommu->clk = devm_clk_get(iommu->dev, "iommu_clk");
+ if (IS_ERR(iommu->clk)) {
+ dev_err(iommu->dev, "could not get iommu_clk\n");
+ clk_unprepare(iommu->pclk);
+ return PTR_ERR(iommu->clk);
+ }
+
+ ret = clk_prepare(iommu->clk);
+ if (ret) {
+ dev_err(iommu->dev, "could not prepare iommu_clk\n");
+ clk_unprepare(iommu->pclk);
+ return ret;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ iommu->base = devm_ioremap_resource(iommu->dev, r);
+ if (IS_ERR(iommu->base)) {
+ dev_err(iommu->dev, "could not get iommu base\n");
+ ret = PTR_ERR(iommu->base);
+ goto fail;
}
- return -ENODEV;
+ iommu->irq = platform_get_irq(pdev, 0);
+ if (iommu->irq < 0) {
+ dev_err(iommu->dev, "could not get iommu irq\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ ret = of_property_read_u32(iommu->dev->of_node, "qcom,ncb", &val);
+ if (ret) {
+ dev_err(iommu->dev, "could not get ncb\n");
+ goto fail;
+ }
+ iommu->ncb = val;
+
+ msm_iommu_reset(iommu->base, iommu->ncb);
+ SET_M(iommu->base, 0, 1);
+ SET_PAR(iommu->base, 0, 0);
+ SET_V2PCFG(iommu->base, 0, 1);
+ SET_V2PPR(iommu->base, 0, 0);
+ par = GET_PAR(iommu->base, 0);
+ SET_V2PCFG(iommu->base, 0, 0);
+ SET_M(iommu->base, 0, 0);
+
+ if (!par) {
+ pr_err("Invalid PAR value detected\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ ret = devm_request_threaded_irq(iommu->dev, iommu->irq, NULL,
+ msm_iommu_fault_handler,
+ IRQF_ONESHOT | IRQF_SHARED,
+ "msm_iommu_secure_irpt_handler",
+ iommu);
+ if (ret) {
+ pr_err("Request IRQ %d failed with ret=%d\n", iommu->irq, ret);
+ goto fail;
+ }
+
+ list_add(&iommu->dev_node, &qcom_iommu_devices);
+ of_iommu_set_ops(pdev->dev.of_node, &msm_iommu_ops);
+
+ pr_info("device mapped at %p, irq %d with %d ctx banks\n",
+ iommu->base, iommu->irq, iommu->ncb);
+
+ return ret;
+fail:
+ clk_unprepare(iommu->clk);
+ clk_unprepare(iommu->pclk);
+ return ret;
+}
+
+static const struct of_device_id msm_iommu_dt_match[] = {
+ { .compatible = "qcom,apq8064-iommu" },
+ {}
+};
+
+static int msm_iommu_remove(struct platform_device *pdev)
+{
+ struct msm_iommu_dev *iommu = platform_get_drvdata(pdev);
+
+ clk_unprepare(iommu->clk);
+ clk_unprepare(iommu->pclk);
+ return 0;
}
-static void __init setup_iommu_tex_classes(void)
+static struct platform_driver msm_iommu_driver = {
+ .driver = {
+ .name = "msm_iommu",
+ .of_match_table = msm_iommu_dt_match,
+ },
+ .probe = msm_iommu_probe,
+ .remove = msm_iommu_remove,
+};
+
+static int __init msm_iommu_driver_init(void)
{
- msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
- get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1);
+ int ret;
- msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
- get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1);
+ ret = platform_driver_register(&msm_iommu_driver);
+ if (ret != 0)
+ pr_err("Failed to register IOMMU driver\n");
- msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
- get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1);
+ return ret;
+}
- msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
- get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1);
+static void __exit msm_iommu_driver_exit(void)
+{
+ platform_driver_unregister(&msm_iommu_driver);
}
+subsys_initcall(msm_iommu_driver_init);
+module_exit(msm_iommu_driver_exit);
+
static int __init msm_iommu_init(void)
{
- setup_iommu_tex_classes();
bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
return 0;
}
-subsys_initcall(msm_iommu_init);
+static int __init msm_iommu_of_setup(struct device_node *np)
+{
+ msm_iommu_init();
+ return 0;
+}
+
+IOMMU_OF_DECLARE(msm_iommu_of, "qcom,apq8064-iommu", msm_iommu_of_setup);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");
diff --git a/drivers/iommu/msm_iommu.h b/drivers/iommu/msm_iommu.h
index 5c7c955e6..4ca25d50d 100644
--- a/drivers/iommu/msm_iommu.h
+++ b/drivers/iommu/msm_iommu.h
@@ -42,74 +42,53 @@
*/
#define MAX_NUM_MIDS 32
+/* Maximum number of context banks that can be present in IOMMU */
+#define IOMMU_MAX_CBS 128
+
/**
* struct msm_iommu_dev - a single IOMMU hardware instance
- * name Human-readable name given to this IOMMU HW instance
* ncb Number of context banks present on this IOMMU HW instance
+ * dev: IOMMU device
+ * irq: Interrupt number
+ * clk: The bus clock for this IOMMU hardware instance
+ * pclk: The clock for the IOMMU bus interconnect
+ * dev_node: list head in qcom_iommu_device_list
+ * dom_node: list head for domain
+ * ctx_list: list of 'struct msm_iommu_ctx_dev'
+ * context_map: Bitmap to track allocated context banks
*/
struct msm_iommu_dev {
- const char *name;
+ void __iomem *base;
int ncb;
+ struct device *dev;
+ int irq;
+ struct clk *clk;
+ struct clk *pclk;
+ struct list_head dev_node;
+ struct list_head dom_node;
+ struct list_head ctx_list;
+ DECLARE_BITMAP(context_map, IOMMU_MAX_CBS);
};
/**
* struct msm_iommu_ctx_dev - an IOMMU context bank instance
- * name Human-readable name given to this context bank
+ * of_node node ptr of client device
* num Index of this context bank within the hardware
* mids List of Machine IDs that are to be mapped into this context
* bank, terminated by -1. The MID is a set of signals on the
* AXI bus that identifies the function associated with a specific
* memory request. (See ARM spec).
+ * num_mids Total number of mids
+ * node list head in ctx_list
*/
struct msm_iommu_ctx_dev {
- const char *name;
+ struct device_node *of_node;
int num;
int mids[MAX_NUM_MIDS];
+ int num_mids;
+ struct list_head list;
};
-
-/**
- * struct msm_iommu_drvdata - A single IOMMU hardware instance
- * @base: IOMMU config port base address (VA)
- * @ncb The number of contexts on this IOMMU
- * @irq: Interrupt number
- * @clk: The bus clock for this IOMMU hardware instance
- * @pclk: The clock for the IOMMU bus interconnect
- *
- * A msm_iommu_drvdata holds the global driver data about a single piece
- * of an IOMMU hardware instance.
- */
-struct msm_iommu_drvdata {
- void __iomem *base;
- int irq;
- int ncb;
- struct clk *clk;
- struct clk *pclk;
-};
-
-/**
- * struct msm_iommu_ctx_drvdata - an IOMMU context bank instance
- * @num: Hardware context number of this context
- * @pdev: Platform device associated wit this HW instance
- * @attached_elm: List element for domains to track which devices are
- * attached to them
- *
- * A msm_iommu_ctx_drvdata holds the driver data for a single context bank
- * within each IOMMU hardware instance
- */
-struct msm_iommu_ctx_drvdata {
- int num;
- struct platform_device *pdev;
- struct list_head attached_elm;
-};
-
-/*
- * Look up an IOMMU context device by its context name. NULL if none found.
- * Useful for testing and drivers that do not yet fully have IOMMU stuff in
- * their platform devices.
- */
-struct device *msm_iommu_get_ctx(const char *ctx_name);
-
/*
* Interrupt handler for the IOMMU context fault interrupt. Hooking the
* interrupt is not supported in the API yet, but this will print an error
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index c3043d875..b12c12d74 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -34,7 +34,7 @@
#include <dt-bindings/memory/mt8173-larb-port.h>
#include <soc/mediatek/smi.h>
-#include "io-pgtable.h"
+#include "mtk_iommu.h"
#define REG_MMU_PT_BASE_ADDR 0x000
@@ -93,20 +93,6 @@
#define MTK_PROTECT_PA_ALIGN 128
-struct mtk_iommu_suspend_reg {
- u32 standard_axi_mode;
- u32 dcm_dis;
- u32 ctrl_reg;
- u32 int_control0;
- u32 int_main_control;
-};
-
-struct mtk_iommu_client_priv {
- struct list_head client;
- unsigned int mtk_m4u_id;
- struct device *m4udev;
-};
-
struct mtk_iommu_domain {
spinlock_t pgtlock; /* lock for page table */
@@ -116,19 +102,6 @@ struct mtk_iommu_domain {
struct iommu_domain domain;
};
-struct mtk_iommu_data {
- void __iomem *base;
- int irq;
- struct device *dev;
- struct clk *bclk;
- phys_addr_t protect_base; /* protect memory base */
- struct mtk_iommu_suspend_reg reg;
- struct mtk_iommu_domain *m4u_dom;
- struct iommu_group *m4u_group;
- struct mtk_smi_iommu smi_imu; /* SMI larb iommu info */
- bool enable_4GB;
-};
-
static struct iommu_ops mtk_iommu_ops;
static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom)
@@ -455,7 +428,6 @@ static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
if (!dev->archdata.iommu) {
/* Get the m4u device */
m4updev = of_find_device_by_node(args->np);
- of_node_put(args->np);
if (WARN_ON(!m4updev))
return -EINVAL;
@@ -552,25 +524,6 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
return 0;
}
-static int compare_of(struct device *dev, void *data)
-{
- return dev->of_node == data;
-}
-
-static int mtk_iommu_bind(struct device *dev)
-{
- struct mtk_iommu_data *data = dev_get_drvdata(dev);
-
- return component_bind_all(dev, &data->smi_imu);
-}
-
-static void mtk_iommu_unbind(struct device *dev)
-{
- struct mtk_iommu_data *data = dev_get_drvdata(dev);
-
- component_unbind_all(dev, &data->smi_imu);
-}
-
static const struct component_master_ops mtk_iommu_com_ops = {
.bind = mtk_iommu_bind,
.unbind = mtk_iommu_unbind,
diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h
new file mode 100644
index 000000000..3dab13b4a
--- /dev/null
+++ b/drivers/iommu/mtk_iommu.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2015-2016 MediaTek Inc.
+ * Author: Honghui Zhang <honghui.zhang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MTK_IOMMU_H_
+#define _MTK_IOMMU_H_
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <soc/mediatek/smi.h>
+
+#include "io-pgtable.h"
+
+struct mtk_iommu_suspend_reg {
+ u32 standard_axi_mode;
+ u32 dcm_dis;
+ u32 ctrl_reg;
+ u32 int_control0;
+ u32 int_main_control;
+};
+
+struct mtk_iommu_client_priv {
+ struct list_head client;
+ unsigned int mtk_m4u_id;
+ struct device *m4udev;
+};
+
+struct mtk_iommu_domain;
+
+struct mtk_iommu_data {
+ void __iomem *base;
+ int irq;
+ struct device *dev;
+ struct clk *bclk;
+ phys_addr_t protect_base; /* protect memory base */
+ struct mtk_iommu_suspend_reg reg;
+ struct mtk_iommu_domain *m4u_dom;
+ struct iommu_group *m4u_group;
+ struct mtk_smi_iommu smi_imu; /* SMI larb iommu info */
+ bool enable_4GB;
+};
+
+static inline int compare_of(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+static inline int mtk_iommu_bind(struct device *dev)
+{
+ struct mtk_iommu_data *data = dev_get_drvdata(dev);
+
+ return component_bind_all(dev, &data->smi_imu);
+}
+
+static inline void mtk_iommu_unbind(struct device *dev)
+{
+ struct mtk_iommu_data *data = dev_get_drvdata(dev);
+
+ component_unbind_all(dev, &data->smi_imu);
+}
+
+#endif
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
new file mode 100644
index 000000000..b8aeb0768
--- /dev/null
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -0,0 +1,727 @@
+/*
+ * Copyright (c) 2015-2016 MediaTek Inc.
+ * Author: Honghui Zhang <honghui.zhang@mediatek.com>
+ *
+ * Based on driver/iommu/mtk_iommu.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/bootmem.h>
+#include <linux/bug.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/dma-iommu.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/iopoll.h>
+#include <linux/kmemleak.h>
+#include <linux/list.h>
+#include <linux/of_address.h>
+#include <linux/of_iommu.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <asm/barrier.h>
+#include <asm/dma-iommu.h>
+#include <linux/module.h>
+#include <dt-bindings/memory/mt2701-larb-port.h>
+#include <soc/mediatek/smi.h>
+#include "mtk_iommu.h"
+
+#define REG_MMU_PT_BASE_ADDR 0x000
+
+#define F_ALL_INVLD 0x2
+#define F_MMU_INV_RANGE 0x1
+#define F_INVLD_EN0 BIT(0)
+#define F_INVLD_EN1 BIT(1)
+
+#define F_MMU_FAULT_VA_MSK 0xfffff000
+#define MTK_PROTECT_PA_ALIGN 128
+
+#define REG_MMU_CTRL_REG 0x210
+#define F_MMU_CTRL_COHERENT_EN BIT(8)
+#define REG_MMU_IVRP_PADDR 0x214
+#define REG_MMU_INT_CONTROL 0x220
+#define F_INT_TRANSLATION_FAULT BIT(0)
+#define F_INT_MAIN_MULTI_HIT_FAULT BIT(1)
+#define F_INT_INVALID_PA_FAULT BIT(2)
+#define F_INT_ENTRY_REPLACEMENT_FAULT BIT(3)
+#define F_INT_TABLE_WALK_FAULT BIT(4)
+#define F_INT_TLB_MISS_FAULT BIT(5)
+#define F_INT_PFH_DMA_FIFO_OVERFLOW BIT(6)
+#define F_INT_MISS_DMA_FIFO_OVERFLOW BIT(7)
+
+#define F_MMU_TF_PROTECT_SEL(prot) (((prot) & 0x3) << 5)
+#define F_INT_CLR_BIT BIT(12)
+
+#define REG_MMU_FAULT_ST 0x224
+#define REG_MMU_FAULT_VA 0x228
+#define REG_MMU_INVLD_PA 0x22C
+#define REG_MMU_INT_ID 0x388
+#define REG_MMU_INVALIDATE 0x5c0
+#define REG_MMU_INVLD_START_A 0x5c4
+#define REG_MMU_INVLD_END_A 0x5c8
+
+#define REG_MMU_INV_SEL 0x5d8
+#define REG_MMU_STANDARD_AXI_MODE 0x5e8
+
+#define REG_MMU_DCM 0x5f0
+#define F_MMU_DCM_ON BIT(1)
+#define REG_MMU_CPE_DONE 0x60c
+#define F_DESC_VALID 0x2
+#define F_DESC_NONSEC BIT(3)
+#define MT2701_M4U_TF_LARB(TF) (6 - (((TF) >> 13) & 0x7))
+#define MT2701_M4U_TF_PORT(TF) (((TF) >> 8) & 0xF)
+/* MTK generation one iommu HW only support 4K size mapping */
+#define MT2701_IOMMU_PAGE_SHIFT 12
+#define MT2701_IOMMU_PAGE_SIZE (1UL << MT2701_IOMMU_PAGE_SHIFT)
+
+/*
+ * MTK m4u support 4GB iova address space, and only support 4K page
+ * mapping. So the pagetable size should be exactly as 4M.
+ */
+#define M2701_IOMMU_PGT_SIZE SZ_4M
+
+struct mtk_iommu_domain {
+ spinlock_t pgtlock; /* lock for page table */
+ struct iommu_domain domain;
+ u32 *pgt_va;
+ dma_addr_t pgt_pa;
+ struct mtk_iommu_data *data;
+};
+
+static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct mtk_iommu_domain, domain);
+}
+
+static const int mt2701_m4u_in_larb[] = {
+ LARB0_PORT_OFFSET, LARB1_PORT_OFFSET,
+ LARB2_PORT_OFFSET, LARB3_PORT_OFFSET
+};
+
+static inline int mt2701_m4u_to_larb(int id)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(mt2701_m4u_in_larb) - 1; i >= 0; i--)
+ if ((id) >= mt2701_m4u_in_larb[i])
+ return i;
+
+ return 0;
+}
+
+static inline int mt2701_m4u_to_port(int id)
+{
+ int larb = mt2701_m4u_to_larb(id);
+
+ return id - mt2701_m4u_in_larb[larb];
+}
+
+static void mtk_iommu_tlb_flush_all(struct mtk_iommu_data *data)
+{
+ writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
+ data->base + REG_MMU_INV_SEL);
+ writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);
+ wmb(); /* Make sure the tlb flush all done */
+}
+
+static void mtk_iommu_tlb_flush_range(struct mtk_iommu_data *data,
+ unsigned long iova, size_t size)
+{
+ int ret;
+ u32 tmp;
+
+ writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
+ data->base + REG_MMU_INV_SEL);
+ writel_relaxed(iova & F_MMU_FAULT_VA_MSK,
+ data->base + REG_MMU_INVLD_START_A);
+ writel_relaxed((iova + size - 1) & F_MMU_FAULT_VA_MSK,
+ data->base + REG_MMU_INVLD_END_A);
+ writel_relaxed(F_MMU_INV_RANGE, data->base + REG_MMU_INVALIDATE);
+
+ ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE,
+ tmp, tmp != 0, 10, 100000);
+ if (ret) {
+ dev_warn(data->dev,
+ "Partial TLB flush timed out, falling back to full flush\n");
+ mtk_iommu_tlb_flush_all(data);
+ }
+ /* Clear the CPE status */
+ writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
+}
+
+static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
+{
+ struct mtk_iommu_data *data = dev_id;
+ struct mtk_iommu_domain *dom = data->m4u_dom;
+ u32 int_state, regval, fault_iova, fault_pa;
+ unsigned int fault_larb, fault_port;
+
+ /* Read error information from registers */
+ int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST);
+ fault_iova = readl_relaxed(data->base + REG_MMU_FAULT_VA);
+
+ fault_iova &= F_MMU_FAULT_VA_MSK;
+ fault_pa = readl_relaxed(data->base + REG_MMU_INVLD_PA);
+ regval = readl_relaxed(data->base + REG_MMU_INT_ID);
+ fault_larb = MT2701_M4U_TF_LARB(regval);
+ fault_port = MT2701_M4U_TF_PORT(regval);
+
+ /*
+ * MTK v1 iommu HW could not determine whether the fault is read or
+ * write fault, report as read fault.
+ */
+ if (report_iommu_fault(&dom->domain, data->dev, fault_iova,
+ IOMMU_FAULT_READ))
+ dev_err_ratelimited(data->dev,
+ "fault type=0x%x iova=0x%x pa=0x%x larb=%d port=%d\n",
+ int_state, fault_iova, fault_pa,
+ fault_larb, fault_port);
+
+ /* Interrupt clear */
+ regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL);
+ regval |= F_INT_CLR_BIT;
+ writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL);
+
+ mtk_iommu_tlb_flush_all(data);
+
+ return IRQ_HANDLED;
+}
+
+static void mtk_iommu_config(struct mtk_iommu_data *data,
+ struct device *dev, bool enable)
+{
+ struct mtk_iommu_client_priv *head, *cur, *next;
+ struct mtk_smi_larb_iommu *larb_mmu;
+ unsigned int larbid, portid;
+
+ head = dev->archdata.iommu;
+ list_for_each_entry_safe(cur, next, &head->client, client) {
+ larbid = mt2701_m4u_to_larb(cur->mtk_m4u_id);
+ portid = mt2701_m4u_to_port(cur->mtk_m4u_id);
+ larb_mmu = &data->smi_imu.larb_imu[larbid];
+
+ dev_dbg(dev, "%s iommu port: %d\n",
+ enable ? "enable" : "disable", portid);
+
+ if (enable)
+ larb_mmu->mmu |= MTK_SMI_MMU_EN(portid);
+ else
+ larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid);
+ }
+}
+
+static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data)
+{
+ struct mtk_iommu_domain *dom = data->m4u_dom;
+
+ spin_lock_init(&dom->pgtlock);
+
+ dom->pgt_va = dma_zalloc_coherent(data->dev,
+ M2701_IOMMU_PGT_SIZE,
+ &dom->pgt_pa, GFP_KERNEL);
+ if (!dom->pgt_va)
+ return -ENOMEM;
+
+ writel(dom->pgt_pa, data->base + REG_MMU_PT_BASE_ADDR);
+
+ dom->data = data;
+
+ return 0;
+}
+
+static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type)
+{
+ struct mtk_iommu_domain *dom;
+
+ if (type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
+
+ dom = kzalloc(sizeof(*dom), GFP_KERNEL);
+ if (!dom)
+ return NULL;
+
+ return &dom->domain;
+}
+
+static void mtk_iommu_domain_free(struct iommu_domain *domain)
+{
+ struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+ struct mtk_iommu_data *data = dom->data;
+
+ dma_free_coherent(data->dev, M2701_IOMMU_PGT_SIZE,
+ dom->pgt_va, dom->pgt_pa);
+ kfree(to_mtk_domain(domain));
+}
+
+static int mtk_iommu_attach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+ struct mtk_iommu_client_priv *priv = dev->archdata.iommu;
+ struct mtk_iommu_data *data;
+ int ret;
+
+ if (!priv)
+ return -ENODEV;
+
+ data = dev_get_drvdata(priv->m4udev);
+ if (!data->m4u_dom) {
+ data->m4u_dom = dom;
+ ret = mtk_iommu_domain_finalise(data);
+ if (ret) {
+ data->m4u_dom = NULL;
+ return ret;
+ }
+ }
+
+ mtk_iommu_config(data, dev, true);
+ return 0;
+}
+
+static void mtk_iommu_detach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct mtk_iommu_client_priv *priv = dev->archdata.iommu;
+ struct mtk_iommu_data *data;
+
+ if (!priv)
+ return;
+
+ data = dev_get_drvdata(priv->m4udev);
+ mtk_iommu_config(data, dev, false);
+}
+
+static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot)
+{
+ struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+ unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT;
+ unsigned long flags;
+ unsigned int i;
+ u32 *pgt_base_iova = dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT);
+ u32 pabase = (u32)paddr;
+ int map_size = 0;
+
+ spin_lock_irqsave(&dom->pgtlock, flags);
+ for (i = 0; i < page_num; i++) {
+ if (pgt_base_iova[i]) {
+ memset(pgt_base_iova, 0, i * sizeof(u32));
+ break;
+ }
+ pgt_base_iova[i] = pabase | F_DESC_VALID | F_DESC_NONSEC;
+ pabase += MT2701_IOMMU_PAGE_SIZE;
+ map_size += MT2701_IOMMU_PAGE_SIZE;
+ }
+
+ spin_unlock_irqrestore(&dom->pgtlock, flags);
+
+ mtk_iommu_tlb_flush_range(dom->data, iova, size);
+
+ return map_size == size ? 0 : -EEXIST;
+}
+
+static size_t mtk_iommu_unmap(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
+{
+ struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+ unsigned long flags;
+ u32 *pgt_base_iova = dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT);
+ unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT;
+
+ spin_lock_irqsave(&dom->pgtlock, flags);
+ memset(pgt_base_iova, 0, page_num * sizeof(u32));
+ spin_unlock_irqrestore(&dom->pgtlock, flags);
+
+ mtk_iommu_tlb_flush_range(dom->data, iova, size);
+
+ return size;
+}
+
+static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
+ dma_addr_t iova)
+{
+ struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+ unsigned long flags;
+ phys_addr_t pa;
+
+ spin_lock_irqsave(&dom->pgtlock, flags);
+ pa = *(dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT));
+ pa = pa & (~(MT2701_IOMMU_PAGE_SIZE - 1));
+ spin_unlock_irqrestore(&dom->pgtlock, flags);
+
+ return pa;
+}
+
+/*
+ * MTK generation one iommu HW only support one iommu domain, and all the client
+ * sharing the same iova address space.
+ */
+static int mtk_iommu_create_mapping(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct mtk_iommu_client_priv *head, *priv, *next;
+ struct platform_device *m4updev;
+ struct dma_iommu_mapping *mtk_mapping;
+ struct device *m4udev;
+ int ret;
+
+ if (args->args_count != 1) {
+ dev_err(dev, "invalid #iommu-cells(%d) property for IOMMU\n",
+ args->args_count);
+ return -EINVAL;
+ }
+
+ if (!dev->archdata.iommu) {
+ /* Get the m4u device */
+ m4updev = of_find_device_by_node(args->np);
+ if (WARN_ON(!m4updev))
+ return -EINVAL;
+
+ head = kzalloc(sizeof(*head), GFP_KERNEL);
+ if (!head)
+ return -ENOMEM;
+
+ dev->archdata.iommu = head;
+ INIT_LIST_HEAD(&head->client);
+ head->m4udev = &m4updev->dev;
+ } else {
+ head = dev->archdata.iommu;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto err_free_mem;
+ }
+ priv->mtk_m4u_id = args->args[0];
+ list_add_tail(&priv->client, &head->client);
+
+ m4udev = head->m4udev;
+ mtk_mapping = m4udev->archdata.iommu;
+ if (!mtk_mapping) {
+ /* MTK iommu support 4GB iova address space. */
+ mtk_mapping = arm_iommu_create_mapping(&platform_bus_type,
+ 0, 1ULL << 32);
+ if (IS_ERR(mtk_mapping)) {
+ ret = PTR_ERR(mtk_mapping);
+ goto err_free_mem;
+ }
+ m4udev->archdata.iommu = mtk_mapping;
+ }
+
+ ret = arm_iommu_attach_device(dev, mtk_mapping);
+ if (ret)
+ goto err_release_mapping;
+
+ return 0;
+
+err_release_mapping:
+ arm_iommu_release_mapping(mtk_mapping);
+ m4udev->archdata.iommu = NULL;
+err_free_mem:
+ list_for_each_entry_safe(priv, next, &head->client, client)
+ kfree(priv);
+ kfree(head);
+ dev->archdata.iommu = NULL;
+ return ret;
+}
+
+static int mtk_iommu_add_device(struct device *dev)
+{
+ struct iommu_group *group;
+ struct of_phandle_args iommu_spec;
+ struct of_phandle_iterator it;
+ int err;
+
+ of_for_each_phandle(&it, err, dev->of_node, "iommus",
+ "#iommu-cells", 0) {
+ int count = of_phandle_iterator_args(&it, iommu_spec.args,
+ MAX_PHANDLE_ARGS);
+ iommu_spec.np = of_node_get(it.node);
+ iommu_spec.args_count = count;
+
+ mtk_iommu_create_mapping(dev, &iommu_spec);
+ of_node_put(iommu_spec.np);
+ }
+
+ if (!dev->archdata.iommu) /* Not a iommu client device */
+ return -ENODEV;
+
+ group = iommu_group_get_for_dev(dev);
+ if (IS_ERR(group))
+ return PTR_ERR(group);
+
+ iommu_group_put(group);
+ return 0;
+}
+
+static void mtk_iommu_remove_device(struct device *dev)
+{
+ struct mtk_iommu_client_priv *head, *cur, *next;
+
+ head = dev->archdata.iommu;
+ if (!head)
+ return;
+
+ list_for_each_entry_safe(cur, next, &head->client, client) {
+ list_del(&cur->client);
+ kfree(cur);
+ }
+ kfree(head);
+ dev->archdata.iommu = NULL;
+
+ iommu_group_remove_device(dev);
+}
+
+static struct iommu_group *mtk_iommu_device_group(struct device *dev)
+{
+ struct mtk_iommu_data *data;
+ struct mtk_iommu_client_priv *priv;
+
+ priv = dev->archdata.iommu;
+ if (!priv)
+ return ERR_PTR(-ENODEV);
+
+ /* All the client devices are in the same m4u iommu-group */
+ data = dev_get_drvdata(priv->m4udev);
+ if (!data->m4u_group) {
+ data->m4u_group = iommu_group_alloc();
+ if (IS_ERR(data->m4u_group))
+ dev_err(dev, "Failed to allocate M4U IOMMU group\n");
+ }
+ return data->m4u_group;
+}
+
+static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
+{
+ u32 regval;
+ int ret;
+
+ ret = clk_prepare_enable(data->bclk);
+ if (ret) {
+ dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret);
+ return ret;
+ }
+
+ regval = F_MMU_CTRL_COHERENT_EN | F_MMU_TF_PROTECT_SEL(2);
+ writel_relaxed(regval, data->base + REG_MMU_CTRL_REG);
+
+ regval = F_INT_TRANSLATION_FAULT |
+ F_INT_MAIN_MULTI_HIT_FAULT |
+ F_INT_INVALID_PA_FAULT |
+ F_INT_ENTRY_REPLACEMENT_FAULT |
+ F_INT_TABLE_WALK_FAULT |
+ F_INT_TLB_MISS_FAULT |
+ F_INT_PFH_DMA_FIFO_OVERFLOW |
+ F_INT_MISS_DMA_FIFO_OVERFLOW;
+ writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL);
+
+ /* protect memory,hw will write here while translation fault */
+ writel_relaxed(data->protect_base,
+ data->base + REG_MMU_IVRP_PADDR);
+
+ writel_relaxed(F_MMU_DCM_ON, data->base + REG_MMU_DCM);
+
+ if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
+ dev_name(data->dev), (void *)data)) {
+ writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR);
+ clk_disable_unprepare(data->bclk);
+ dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static struct iommu_ops mtk_iommu_ops = {
+ .domain_alloc = mtk_iommu_domain_alloc,
+ .domain_free = mtk_iommu_domain_free,
+ .attach_dev = mtk_iommu_attach_device,
+ .detach_dev = mtk_iommu_detach_device,
+ .map = mtk_iommu_map,
+ .unmap = mtk_iommu_unmap,
+ .map_sg = default_iommu_map_sg,
+ .iova_to_phys = mtk_iommu_iova_to_phys,
+ .add_device = mtk_iommu_add_device,
+ .remove_device = mtk_iommu_remove_device,
+ .device_group = mtk_iommu_device_group,
+ .pgsize_bitmap = ~0UL << MT2701_IOMMU_PAGE_SHIFT,
+};
+
+static const struct of_device_id mtk_iommu_of_ids[] = {
+ { .compatible = "mediatek,mt2701-m4u", },
+ {}
+};
+
+static const struct component_master_ops mtk_iommu_com_ops = {
+ .bind = mtk_iommu_bind,
+ .unbind = mtk_iommu_unbind,
+};
+
+static int mtk_iommu_probe(struct platform_device *pdev)
+{
+ struct mtk_iommu_data *data;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct component_match *match = NULL;
+ struct of_phandle_args larb_spec;
+ struct of_phandle_iterator it;
+ void *protect;
+ int larb_nr, ret, err;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->dev = dev;
+
+ /* Protect memory. HW will access here while translation fault.*/
+ protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2,
+ GFP_KERNEL | GFP_DMA);
+ if (!protect)
+ return -ENOMEM;
+ data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(data->base))
+ return PTR_ERR(data->base);
+
+ data->irq = platform_get_irq(pdev, 0);
+ if (data->irq < 0)
+ return data->irq;
+
+ data->bclk = devm_clk_get(dev, "bclk");
+ if (IS_ERR(data->bclk))
+ return PTR_ERR(data->bclk);
+
+ larb_nr = 0;
+ of_for_each_phandle(&it, err, dev->of_node,
+ "mediatek,larbs", NULL, 0) {
+ struct platform_device *plarbdev;
+ int count = of_phandle_iterator_args(&it, larb_spec.args,
+ MAX_PHANDLE_ARGS);
+
+ if (count)
+ continue;
+
+ larb_spec.np = of_node_get(it.node);
+ if (!of_device_is_available(larb_spec.np))
+ continue;
+
+ plarbdev = of_find_device_by_node(larb_spec.np);
+ of_node_put(larb_spec.np);
+ if (!plarbdev) {
+ plarbdev = of_platform_device_create(
+ larb_spec.np, NULL,
+ platform_bus_type.dev_root);
+ if (!plarbdev)
+ return -EPROBE_DEFER;
+ }
+
+ data->smi_imu.larb_imu[larb_nr].dev = &plarbdev->dev;
+ component_match_add(dev, &match, compare_of, larb_spec.np);
+ larb_nr++;
+ }
+
+ data->smi_imu.larb_nr = larb_nr;
+
+ platform_set_drvdata(pdev, data);
+
+ ret = mtk_iommu_hw_init(data);
+ if (ret)
+ return ret;
+
+ if (!iommu_present(&platform_bus_type))
+ bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
+
+ return component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
+}
+
+static int mtk_iommu_remove(struct platform_device *pdev)
+{
+ struct mtk_iommu_data *data = platform_get_drvdata(pdev);
+
+ if (iommu_present(&platform_bus_type))
+ bus_set_iommu(&platform_bus_type, NULL);
+
+ clk_disable_unprepare(data->bclk);
+ devm_free_irq(&pdev->dev, data->irq, data);
+ component_master_del(&pdev->dev, &mtk_iommu_com_ops);
+ return 0;
+}
+
+static int __maybe_unused mtk_iommu_suspend(struct device *dev)
+{
+ struct mtk_iommu_data *data = dev_get_drvdata(dev);
+ struct mtk_iommu_suspend_reg *reg = &data->reg;
+ void __iomem *base = data->base;
+
+ reg->standard_axi_mode = readl_relaxed(base +
+ REG_MMU_STANDARD_AXI_MODE);
+ reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM);
+ reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG);
+ reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL);
+ return 0;
+}
+
+static int __maybe_unused mtk_iommu_resume(struct device *dev)
+{
+ struct mtk_iommu_data *data = dev_get_drvdata(dev);
+ struct mtk_iommu_suspend_reg *reg = &data->reg;
+ void __iomem *base = data->base;
+
+ writel_relaxed(data->m4u_dom->pgt_pa, base + REG_MMU_PT_BASE_ADDR);
+ writel_relaxed(reg->standard_axi_mode,
+ base + REG_MMU_STANDARD_AXI_MODE);
+ writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM);
+ writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
+ writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL);
+ writel_relaxed(data->protect_base, base + REG_MMU_IVRP_PADDR);
+ return 0;
+}
+
+static const struct dev_pm_ops mtk_iommu_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume)
+};
+
+static struct platform_driver mtk_iommu_driver = {
+ .probe = mtk_iommu_probe,
+ .remove = mtk_iommu_remove,
+ .driver = {
+ .name = "mtk-iommu",
+ .of_match_table = mtk_iommu_of_ids,
+ .pm = &mtk_iommu_pm_ops,
+ }
+};
+
+static int __init m4u_init(void)
+{
+ return platform_driver_register(&mtk_iommu_driver);
+}
+
+static void __exit m4u_exit(void)
+{
+ return platform_driver_unregister(&mtk_iommu_driver);
+}
+
+subsys_initcall(m4u_init);
+module_exit(m4u_exit);
+
+MODULE_DESCRIPTION("IOMMU API for MTK architected m4u v1 implementations");
+MODULE_AUTHOR("Honghui Zhang <honghui.zhang@mediatek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index af499aea0..57f23eaaa 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -174,7 +174,7 @@ err_put_node:
return NULL;
}
-void __init of_iommu_init(void)
+static int __init of_iommu_init(void)
{
struct device_node *np;
const struct of_device_id *match, *matches = &__iommu_of_table;
@@ -186,4 +186,7 @@ void __init of_iommu_init(void)
pr_err("Failed to initialise IOMMU %s\n",
of_node_full_name(np));
}
+
+ return 0;
}
+postcore_initcall_sync(of_iommu_init);
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 25b4627cb..9afcbf79f 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -4,11 +4,10 @@
* published by the Free Software Foundation.
*/
-#include <asm/cacheflush.h>
-#include <asm/pgtable.h>
#include <linux/compiler.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/dma-iommu.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -77,7 +76,9 @@
struct rk_iommu_domain {
struct list_head iommus;
+ struct platform_device *pdev;
u32 *dt; /* page directory table */
+ dma_addr_t dt_dma;
spinlock_t iommus_lock; /* lock for iommus list */
spinlock_t dt_lock; /* lock for modifying page directory table */
@@ -93,14 +94,12 @@ struct rk_iommu {
struct iommu_domain *domain; /* domain to which iommu is attached */
};
-static inline void rk_table_flush(u32 *va, unsigned int count)
+static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma,
+ unsigned int count)
{
- phys_addr_t pa_start = virt_to_phys(va);
- phys_addr_t pa_end = virt_to_phys(va + count);
- size_t size = pa_end - pa_start;
+ size_t size = count * sizeof(u32); /* count of u32 entry */
- __cpuc_flush_dcache_area(va, size);
- outer_flush_range(pa_start, pa_end);
+ dma_sync_single_for_device(&dom->pdev->dev, dma, size, DMA_TO_DEVICE);
}
static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom)
@@ -183,10 +182,9 @@ static inline bool rk_dte_is_pt_valid(u32 dte)
return dte & RK_DTE_PT_VALID;
}
-static u32 rk_mk_dte(u32 *pt)
+static inline u32 rk_mk_dte(dma_addr_t pt_dma)
{
- phys_addr_t pt_phys = virt_to_phys(pt);
- return (pt_phys & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID;
+ return (pt_dma & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID;
}
/*
@@ -603,13 +601,16 @@ static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain,
static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
dma_addr_t iova)
{
+ struct device *dev = &rk_domain->pdev->dev;
u32 *page_table, *dte_addr;
- u32 dte;
+ u32 dte_index, dte;
phys_addr_t pt_phys;
+ dma_addr_t pt_dma;
assert_spin_locked(&rk_domain->dt_lock);
- dte_addr = &rk_domain->dt[rk_iova_dte_index(iova)];
+ dte_index = rk_iova_dte_index(iova);
+ dte_addr = &rk_domain->dt[dte_index];
dte = *dte_addr;
if (rk_dte_is_pt_valid(dte))
goto done;
@@ -618,19 +619,27 @@ static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
if (!page_table)
return ERR_PTR(-ENOMEM);
- dte = rk_mk_dte(page_table);
- *dte_addr = dte;
+ pt_dma = dma_map_single(dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, pt_dma)) {
+ dev_err(dev, "DMA mapping error while allocating page table\n");
+ free_page((unsigned long)page_table);
+ return ERR_PTR(-ENOMEM);
+ }
- rk_table_flush(page_table, NUM_PT_ENTRIES);
- rk_table_flush(dte_addr, 1);
+ dte = rk_mk_dte(pt_dma);
+ *dte_addr = dte;
+ rk_table_flush(rk_domain, pt_dma, NUM_PT_ENTRIES);
+ rk_table_flush(rk_domain,
+ rk_domain->dt_dma + dte_index * sizeof(u32), 1);
done:
pt_phys = rk_dte_pt_address(dte);
return (u32 *)phys_to_virt(pt_phys);
}
static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain,
- u32 *pte_addr, dma_addr_t iova, size_t size)
+ u32 *pte_addr, dma_addr_t pte_dma,
+ size_t size)
{
unsigned int pte_count;
unsigned int pte_total = size / SPAGE_SIZE;
@@ -645,14 +654,14 @@ static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain,
pte_addr[pte_count] = rk_mk_pte_invalid(pte);
}
- rk_table_flush(pte_addr, pte_count);
+ rk_table_flush(rk_domain, pte_dma, pte_count);
return pte_count * SPAGE_SIZE;
}
static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
- dma_addr_t iova, phys_addr_t paddr, size_t size,
- int prot)
+ dma_addr_t pte_dma, dma_addr_t iova,
+ phys_addr_t paddr, size_t size, int prot)
{
unsigned int pte_count;
unsigned int pte_total = size / SPAGE_SIZE;
@@ -671,7 +680,7 @@ static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
paddr += SPAGE_SIZE;
}
- rk_table_flush(pte_addr, pte_count);
+ rk_table_flush(rk_domain, pte_dma, pte_total);
/*
* Zap the first and last iova to evict from iotlb any previously
@@ -684,7 +693,8 @@ static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
return 0;
unwind:
/* Unmap the range of iovas that we just mapped */
- rk_iommu_unmap_iova(rk_domain, pte_addr, iova, pte_count * SPAGE_SIZE);
+ rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma,
+ pte_count * SPAGE_SIZE);
iova += pte_count * SPAGE_SIZE;
page_phys = rk_pte_page_address(pte_addr[pte_count]);
@@ -699,8 +709,9 @@ static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
{
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags;
- dma_addr_t iova = (dma_addr_t)_iova;
+ dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
u32 *page_table, *pte_addr;
+ u32 dte_index, pte_index;
int ret;
spin_lock_irqsave(&rk_domain->dt_lock, flags);
@@ -718,8 +729,13 @@ static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
return PTR_ERR(page_table);
}
- pte_addr = &page_table[rk_iova_pte_index(iova)];
- ret = rk_iommu_map_iova(rk_domain, pte_addr, iova, paddr, size, prot);
+ dte_index = rk_domain->dt[rk_iova_dte_index(iova)];
+ pte_index = rk_iova_pte_index(iova);
+ pte_addr = &page_table[pte_index];
+ pte_dma = rk_dte_pt_address(dte_index) + pte_index * sizeof(u32);
+ ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova,
+ paddr, size, prot);
+
spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
return ret;
@@ -730,7 +746,7 @@ static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
{
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags;
- dma_addr_t iova = (dma_addr_t)_iova;
+ dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
phys_addr_t pt_phys;
u32 dte;
u32 *pte_addr;
@@ -754,7 +770,8 @@ static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
pt_phys = rk_dte_pt_address(dte);
pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
- unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, iova, size);
+ pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32);
+ unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size);
spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
@@ -787,7 +804,6 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags;
int ret, i;
- phys_addr_t dte_addr;
/*
* Allow 'virtual devices' (e.g., drm) to attach to domain.
@@ -807,14 +823,14 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
iommu->domain = domain;
- ret = devm_request_irq(dev, iommu->irq, rk_iommu_irq,
+ ret = devm_request_irq(iommu->dev, iommu->irq, rk_iommu_irq,
IRQF_SHARED, dev_name(dev), iommu);
if (ret)
return ret;
- dte_addr = virt_to_phys(rk_domain->dt);
for (i = 0; i < iommu->num_mmu; i++) {
- rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_addr);
+ rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
+ rk_domain->dt_dma);
rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
}
@@ -860,7 +876,7 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
}
rk_iommu_disable_stall(iommu);
- devm_free_irq(dev, iommu->irq, iommu);
+ devm_free_irq(iommu->dev, iommu->irq, iommu);
iommu->domain = NULL;
@@ -870,14 +886,30 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
{
struct rk_iommu_domain *rk_domain;
+ struct platform_device *pdev;
+ struct device *iommu_dev;
- if (type != IOMMU_DOMAIN_UNMANAGED)
+ if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
return NULL;
- rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL);
- if (!rk_domain)
+ /* Register a pdev per domain, so DMA API can base on this *dev
+ * even some virtual master doesn't have an iommu slave
+ */
+ pdev = platform_device_register_simple("rk_iommu_domain",
+ PLATFORM_DEVID_AUTO, NULL, 0);
+ if (IS_ERR(pdev))
return NULL;
+ rk_domain = devm_kzalloc(&pdev->dev, sizeof(*rk_domain), GFP_KERNEL);
+ if (!rk_domain)
+ goto err_unreg_pdev;
+
+ rk_domain->pdev = pdev;
+
+ if (type == IOMMU_DOMAIN_DMA &&
+ iommu_get_dma_cookie(&rk_domain->domain))
+ goto err_unreg_pdev;
+
/*
* rk32xx iommus use a 2 level pagetable.
* Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
@@ -885,18 +917,36 @@ static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
*/
rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
if (!rk_domain->dt)
- goto err_dt;
+ goto err_put_cookie;
+
+ iommu_dev = &pdev->dev;
+ rk_domain->dt_dma = dma_map_single(iommu_dev, rk_domain->dt,
+ SPAGE_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(iommu_dev, rk_domain->dt_dma)) {
+ dev_err(iommu_dev, "DMA map error for DT\n");
+ goto err_free_dt;
+ }
- rk_table_flush(rk_domain->dt, NUM_DT_ENTRIES);
+ rk_table_flush(rk_domain, rk_domain->dt_dma, NUM_DT_ENTRIES);
spin_lock_init(&rk_domain->iommus_lock);
spin_lock_init(&rk_domain->dt_lock);
INIT_LIST_HEAD(&rk_domain->iommus);
+ rk_domain->domain.geometry.aperture_start = 0;
+ rk_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32);
+ rk_domain->domain.geometry.force_aperture = true;
+
return &rk_domain->domain;
-err_dt:
- kfree(rk_domain);
+err_free_dt:
+ free_page((unsigned long)rk_domain->dt);
+err_put_cookie:
+ if (type == IOMMU_DOMAIN_DMA)
+ iommu_put_dma_cookie(&rk_domain->domain);
+err_unreg_pdev:
+ platform_device_unregister(pdev);
+
return NULL;
}
@@ -912,12 +962,20 @@ static void rk_iommu_domain_free(struct iommu_domain *domain)
if (rk_dte_is_pt_valid(dte)) {
phys_addr_t pt_phys = rk_dte_pt_address(dte);
u32 *page_table = phys_to_virt(pt_phys);
+ dma_unmap_single(&rk_domain->pdev->dev, pt_phys,
+ SPAGE_SIZE, DMA_TO_DEVICE);
free_page((unsigned long)page_table);
}
}
+ dma_unmap_single(&rk_domain->pdev->dev, rk_domain->dt_dma,
+ SPAGE_SIZE, DMA_TO_DEVICE);
free_page((unsigned long)rk_domain->dt);
- kfree(rk_domain);
+
+ if (domain->type == IOMMU_DOMAIN_DMA)
+ iommu_put_dma_cookie(&rk_domain->domain);
+
+ platform_device_unregister(rk_domain->pdev);
}
static bool rk_iommu_is_dev_iommu_master(struct device *dev)
@@ -1022,17 +1080,43 @@ static const struct iommu_ops rk_iommu_ops = {
.detach_dev = rk_iommu_detach_device,
.map = rk_iommu_map,
.unmap = rk_iommu_unmap,
+ .map_sg = default_iommu_map_sg,
.add_device = rk_iommu_add_device,
.remove_device = rk_iommu_remove_device,
.iova_to_phys = rk_iommu_iova_to_phys,
.pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
};
+static int rk_iommu_domain_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL);
+ if (!dev->dma_parms)
+ return -ENOMEM;
+
+ /* Set dma_ops for dev, otherwise it would be dummy_dma_ops */
+ arch_setup_dma_ops(dev, 0, DMA_BIT_MASK(32), NULL, false);
+
+ dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+ dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+
+ return 0;
+}
+
+static struct platform_driver rk_iommu_domain_driver = {
+ .probe = rk_iommu_domain_probe,
+ .driver = {
+ .name = "rk_iommu_domain",
+ },
+};
+
static int rk_iommu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rk_iommu *iommu;
struct resource *res;
+ int num_res = pdev->num_resources;
int i;
iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
@@ -1042,12 +1126,13 @@ static int rk_iommu_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, iommu);
iommu->dev = dev;
iommu->num_mmu = 0;
- iommu->bases = devm_kzalloc(dev, sizeof(*iommu->bases) * iommu->num_mmu,
+
+ iommu->bases = devm_kzalloc(dev, sizeof(*iommu->bases) * num_res,
GFP_KERNEL);
if (!iommu->bases)
return -ENOMEM;
- for (i = 0; i < pdev->num_resources; i++) {
+ for (i = 0; i < num_res; i++) {
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
if (!res)
continue;
@@ -1103,11 +1188,19 @@ static int __init rk_iommu_init(void)
if (ret)
return ret;
- return platform_driver_register(&rk_iommu_driver);
+ ret = platform_driver_register(&rk_iommu_domain_driver);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&rk_iommu_driver);
+ if (ret)
+ platform_driver_unregister(&rk_iommu_domain_driver);
+ return ret;
}
static void __exit rk_iommu_exit(void)
{
platform_driver_unregister(&rk_iommu_driver);
+ platform_driver_unregister(&rk_iommu_domain_driver);
}
subsys_initcall(rk_iommu_init);
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index fa33c50b0..7f8728984 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -8,6 +8,12 @@ config ARM_GIC
select IRQ_DOMAIN_HIERARCHY
select MULTI_IRQ_HANDLER
+config ARM_GIC_PM
+ bool
+ depends on PM
+ select ARM_GIC
+ select PM_CLK
+
config ARM_GIC_MAX_NR
int
default 2 if ARCH_REALVIEW
@@ -15,9 +21,9 @@ config ARM_GIC_MAX_NR
config ARM_GIC_V2M
bool
- depends on ARM_GIC
- depends on PCI && PCI_MSI
- select PCI_MSI_IRQ_DOMAIN
+ depends on PCI
+ select ARM_GIC
+ select PCI_MSI
config GIC_NON_BANKED
bool
@@ -31,7 +37,8 @@ config ARM_GIC_V3
config ARM_GIC_V3_ITS
bool
- select PCI_MSI_IRQ_DOMAIN
+ depends on PCI
+ depends on PCI_MSI
config ARM_NVIC
bool
@@ -56,13 +63,13 @@ config ARM_VIC_NR
config ARMADA_370_XP_IRQ
bool
select GENERIC_IRQ_CHIP
- select PCI_MSI_IRQ_DOMAIN if PCI_MSI
+ select PCI_MSI if PCI
config ALPINE_MSI
bool
- depends on PCI && PCI_MSI
+ depends on PCI
+ select PCI_MSI
select GENERIC_IRQ_CHIP
- select PCI_MSI_IRQ_DOMAIN
config ATMEL_AIC_IRQ
bool
@@ -111,7 +118,6 @@ config HISILICON_IRQ_MBIGEN
bool
select ARM_GIC_V3
select ARM_GIC_V3_ITS
- select GENERIC_MSI_IRQ_DOMAIN
config IMGPDC_IRQ
bool
@@ -244,12 +250,10 @@ config IRQ_MXS
config MVEBU_ODMI
bool
- select GENERIC_MSI_IRQ_DOMAIN
config LS_SCFG_MSI
def_bool y if SOC_LS1021A || ARCH_LAYERSCAPE
depends on PCI && PCI_MSI
- select PCI_MSI_IRQ_DOMAIN
config PARTITION_PERCPU
bool
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 38853a187..4c203b6b8 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_ARCH_SUNXI) += irq-sun4i.o
obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi-nmi.o
obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o
obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o
+obj-$(CONFIG_ARM_GIC_PM) += irq-gic-pm.o
obj-$(CONFIG_REALVIEW_DT) += irq-gic-realview.o
obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o
obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o
@@ -69,3 +70,4 @@ obj-$(CONFIG_PIC32_EVIC) += irq-pic32-evic.o
obj-$(CONFIG_MVEBU_ODMI) += irq-mvebu-odmi.o
obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scfg-msi.o
obj-$(CONFIG_EZNPS_GIC) += irq-eznps.o
+obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o
diff --git a/drivers/irqchip/exynos-combiner.c b/drivers/irqchip/exynos-combiner.c
index ead15be2d..b78a169c9 100644
--- a/drivers/irqchip/exynos-combiner.c
+++ b/drivers/irqchip/exynos-combiner.c
@@ -55,14 +55,14 @@ static void combiner_mask_irq(struct irq_data *data)
{
u32 mask = 1 << (data->hwirq % 32);
- __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
+ writel_relaxed(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
}
static void combiner_unmask_irq(struct irq_data *data)
{
u32 mask = 1 << (data->hwirq % 32);
- __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
+ writel_relaxed(mask, combiner_base(data) + COMBINER_ENABLE_SET);
}
static void combiner_handle_cascade_irq(struct irq_desc *desc)
@@ -75,7 +75,7 @@ static void combiner_handle_cascade_irq(struct irq_desc *desc)
chained_irq_enter(chip, desc);
spin_lock(&irq_controller_lock);
- status = __raw_readl(chip_data->base + COMBINER_INT_STATUS);
+ status = readl_relaxed(chip_data->base + COMBINER_INT_STATUS);
spin_unlock(&irq_controller_lock);
status &= chip_data->irq_mask;
@@ -135,7 +135,7 @@ static void __init combiner_init_one(struct combiner_chip_data *combiner_data,
combiner_data->parent_irq = irq;
/* Disable all interrupts */
- __raw_writel(combiner_data->irq_mask, base + COMBINER_ENABLE_CLEAR);
+ writel_relaxed(combiner_data->irq_mask, base + COMBINER_ENABLE_CLEAR);
}
static int combiner_irq_domain_xlate(struct irq_domain *d,
@@ -218,7 +218,7 @@ static int combiner_suspend(void)
for (i = 0; i < max_nr; i++)
combiner_data[i].pm_save =
- __raw_readl(combiner_data[i].base + COMBINER_ENABLE_SET);
+ readl_relaxed(combiner_data[i].base + COMBINER_ENABLE_SET);
return 0;
}
@@ -235,9 +235,9 @@ static void combiner_resume(void)
int i;
for (i = 0; i < max_nr; i++) {
- __raw_writel(combiner_data[i].irq_mask,
+ writel_relaxed(combiner_data[i].irq_mask,
combiner_data[i].base + COMBINER_ENABLE_CLEAR);
- __raw_writel(combiner_data[i].pm_save,
+ writel_relaxed(combiner_data[i].pm_save,
combiner_data[i].base + COMBINER_ENABLE_SET);
}
}
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index e7dc6cbda..8bcee65a0 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -345,38 +345,20 @@ static void armada_mpic_send_doorbell(const struct cpumask *mask,
ARMADA_370_XP_SW_TRIG_INT_OFFS);
}
-static int armada_xp_mpic_secondary_init(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+static int armada_xp_mpic_starting_cpu(unsigned int cpu)
{
- if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) {
- armada_xp_mpic_perf_init();
- armada_xp_mpic_smp_cpu_init();
- }
-
- return NOTIFY_OK;
+ armada_xp_mpic_perf_init();
+ armada_xp_mpic_smp_cpu_init();
+ return 0;
}
-static struct notifier_block armada_370_xp_mpic_cpu_notifier = {
- .notifier_call = armada_xp_mpic_secondary_init,
- .priority = 100,
-};
-
-static int mpic_cascaded_secondary_init(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+static int mpic_cascaded_starting_cpu(unsigned int cpu)
{
- if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) {
- armada_xp_mpic_perf_init();
- enable_percpu_irq(parent_irq, IRQ_TYPE_NONE);
- }
-
- return NOTIFY_OK;
+ armada_xp_mpic_perf_init();
+ enable_percpu_irq(parent_irq, IRQ_TYPE_NONE);
+ return 0;
}
-
-static struct notifier_block mpic_cascaded_cpu_notifier = {
- .notifier_call = mpic_cascaded_secondary_init,
- .priority = 100,
-};
-#endif /* CONFIG_SMP */
+#endif
static const struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
.map = armada_370_xp_mpic_irq_map,
@@ -541,7 +523,7 @@ static void armada_370_xp_mpic_resume(void)
writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
}
-struct syscore_ops armada_370_xp_mpic_syscore_ops = {
+static struct syscore_ops armada_370_xp_mpic_syscore_ops = {
.suspend = armada_370_xp_mpic_suspend,
.resume = armada_370_xp_mpic_resume,
};
@@ -595,11 +577,15 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
set_handle_irq(armada_370_xp_handle_irq);
#ifdef CONFIG_SMP
set_smp_cross_call(armada_mpic_send_doorbell);
- register_cpu_notifier(&armada_370_xp_mpic_cpu_notifier);
+ cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_XP_STARTING,
+ "AP_IRQ_ARMADA_XP_STARTING",
+ armada_xp_mpic_starting_cpu, NULL);
#endif
} else {
#ifdef CONFIG_SMP
- register_cpu_notifier(&mpic_cascaded_cpu_notifier);
+ cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_CASC_STARTING,
+ "AP_IRQ_ARMADA_CASC_STARTING",
+ mpic_cascaded_starting_cpu, NULL);
#endif
irq_set_chained_handler(parent_irq,
armada_370_xp_mpic_handle_cascade_irq);
diff --git a/drivers/irqchip/irq-aspeed-vic.c b/drivers/irqchip/irq-aspeed-vic.c
new file mode 100644
index 000000000..d24451d5b
--- /dev/null
+++ b/drivers/irqchip/irq-aspeed-vic.c
@@ -0,0 +1,230 @@
+/*
+ * Copyright (C) 2015 - Ben Herrenschmidt, IBM Corp.
+ *
+ * Driver for Aspeed "new" VIC as found in SoC generation 3 and later
+ *
+ * Based on irq-vic.c:
+ *
+ * Copyright (C) 1999 - 2003 ARM Limited
+ * Copyright (C) 2000 Deep Blue Solutions Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/syscore_ops.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+
+#include <asm/exception.h>
+#include <asm/irq.h>
+
+/* These definitions correspond to the "new mapping" of the
+ * register set that interleaves "high" and "low". The offsets
+ * below are for the "low" register, add 4 to get to the high one
+ */
+#define AVIC_IRQ_STATUS 0x00
+#define AVIC_FIQ_STATUS 0x08
+#define AVIC_RAW_STATUS 0x10
+#define AVIC_INT_SELECT 0x18
+#define AVIC_INT_ENABLE 0x20
+#define AVIC_INT_ENABLE_CLR 0x28
+#define AVIC_INT_TRIGGER 0x30
+#define AVIC_INT_TRIGGER_CLR 0x38
+#define AVIC_INT_SENSE 0x40
+#define AVIC_INT_DUAL_EDGE 0x48
+#define AVIC_INT_EVENT 0x50
+#define AVIC_EDGE_CLR 0x58
+#define AVIC_EDGE_STATUS 0x60
+
+#define NUM_IRQS 64
+
+struct aspeed_vic {
+ void __iomem *base;
+ u32 edge_sources[2];
+ struct irq_domain *dom;
+};
+static struct aspeed_vic *system_avic;
+
+static void vic_init_hw(struct aspeed_vic *vic)
+{
+ u32 sense;
+
+ /* Disable all interrupts */
+ writel(0xffffffff, vic->base + AVIC_INT_ENABLE_CLR);
+ writel(0xffffffff, vic->base + AVIC_INT_ENABLE_CLR + 4);
+
+ /* Make sure no soft trigger is on */
+ writel(0xffffffff, vic->base + AVIC_INT_TRIGGER_CLR);
+ writel(0xffffffff, vic->base + AVIC_INT_TRIGGER_CLR + 4);
+
+ /* Set everything to be IRQ */
+ writel(0, vic->base + AVIC_INT_SELECT);
+ writel(0, vic->base + AVIC_INT_SELECT + 4);
+
+ /* Some interrupts have a programable high/low level trigger
+ * (4 GPIO direct inputs), for now we assume this was configured
+ * by firmware. We read which ones are edge now.
+ */
+ sense = readl(vic->base + AVIC_INT_SENSE);
+ vic->edge_sources[0] = ~sense;
+ sense = readl(vic->base + AVIC_INT_SENSE + 4);
+ vic->edge_sources[1] = ~sense;
+
+ /* Clear edge detection latches */
+ writel(0xffffffff, vic->base + AVIC_EDGE_CLR);
+ writel(0xffffffff, vic->base + AVIC_EDGE_CLR + 4);
+}
+
+static void __exception_irq_entry avic_handle_irq(struct pt_regs *regs)
+{
+ struct aspeed_vic *vic = system_avic;
+ u32 stat, irq;
+
+ for (;;) {
+ irq = 0;
+ stat = readl_relaxed(vic->base + AVIC_IRQ_STATUS);
+ if (!stat) {
+ stat = readl_relaxed(vic->base + AVIC_IRQ_STATUS + 4);
+ irq = 32;
+ }
+ if (stat == 0)
+ break;
+ irq += ffs(stat) - 1;
+ handle_domain_irq(vic->dom, irq, regs);
+ }
+}
+
+static void avic_ack_irq(struct irq_data *d)
+{
+ struct aspeed_vic *vic = irq_data_get_irq_chip_data(d);
+ unsigned int sidx = d->hwirq >> 5;
+ unsigned int sbit = 1u << (d->hwirq & 0x1f);
+
+ /* Clear edge latch for edge interrupts, nop for level */
+ if (vic->edge_sources[sidx] & sbit)
+ writel(sbit, vic->base + AVIC_EDGE_CLR + sidx * 4);
+}
+
+static void avic_mask_irq(struct irq_data *d)
+{
+ struct aspeed_vic *vic = irq_data_get_irq_chip_data(d);
+ unsigned int sidx = d->hwirq >> 5;
+ unsigned int sbit = 1u << (d->hwirq & 0x1f);
+
+ writel(sbit, vic->base + AVIC_INT_ENABLE_CLR + sidx * 4);
+}
+
+static void avic_unmask_irq(struct irq_data *d)
+{
+ struct aspeed_vic *vic = irq_data_get_irq_chip_data(d);
+ unsigned int sidx = d->hwirq >> 5;
+ unsigned int sbit = 1u << (d->hwirq & 0x1f);
+
+ writel(sbit, vic->base + AVIC_INT_ENABLE + sidx * 4);
+}
+
+/* For level irq, faster than going through a nop "ack" and mask */
+static void avic_mask_ack_irq(struct irq_data *d)
+{
+ struct aspeed_vic *vic = irq_data_get_irq_chip_data(d);
+ unsigned int sidx = d->hwirq >> 5;
+ unsigned int sbit = 1u << (d->hwirq & 0x1f);
+
+ /* First mask */
+ writel(sbit, vic->base + AVIC_INT_ENABLE_CLR + sidx * 4);
+
+ /* Then clear edge latch for edge interrupts */
+ if (vic->edge_sources[sidx] & sbit)
+ writel(sbit, vic->base + AVIC_EDGE_CLR + sidx * 4);
+}
+
+static struct irq_chip avic_chip = {
+ .name = "AVIC",
+ .irq_ack = avic_ack_irq,
+ .irq_mask = avic_mask_irq,
+ .irq_unmask = avic_unmask_irq,
+ .irq_mask_ack = avic_mask_ack_irq,
+};
+
+static int avic_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct aspeed_vic *vic = d->host_data;
+ unsigned int sidx = hwirq >> 5;
+ unsigned int sbit = 1u << (hwirq & 0x1f);
+
+ /* Check if interrupt exists */
+ if (sidx > 1)
+ return -EPERM;
+
+ if (vic->edge_sources[sidx] & sbit)
+ irq_set_chip_and_handler(irq, &avic_chip, handle_edge_irq);
+ else
+ irq_set_chip_and_handler(irq, &avic_chip, handle_level_irq);
+ irq_set_chip_data(irq, vic);
+ irq_set_probe(irq);
+ return 0;
+}
+
+static struct irq_domain_ops avic_dom_ops = {
+ .map = avic_map,
+ .xlate = irq_domain_xlate_onetwocell,
+};
+
+static int __init avic_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ void __iomem *regs;
+ struct aspeed_vic *vic;
+
+ if (WARN(parent, "non-root Aspeed VIC not supported"))
+ return -EINVAL;
+ if (WARN(system_avic, "duplicate Aspeed VIC not supported"))
+ return -EINVAL;
+
+ regs = of_iomap(node, 0);
+ if (WARN_ON(!regs))
+ return -EIO;
+
+ vic = kzalloc(sizeof(struct aspeed_vic), GFP_KERNEL);
+ if (WARN_ON(!vic)) {
+ iounmap(regs);
+ return -ENOMEM;
+ }
+ vic->base = regs;
+
+ /* Initialize soures, all masked */
+ vic_init_hw(vic);
+
+ /* Ready to receive interrupts */
+ system_avic = vic;
+ set_handle_irq(avic_handle_irq);
+
+ /* Register our domain */
+ vic->dom = irq_domain_add_simple(node, NUM_IRQS, 0,
+ &avic_dom_ops, vic);
+
+ return 0;
+}
+
+IRQCHIP_DECLARE(aspeed_new_vic, "aspeed,ast2400-vic", avic_of_init);
diff --git a/drivers/irqchip/irq-bcm2835.c b/drivers/irqchip/irq-bcm2835.c
index bf9cc5f2e..44d7c38dd 100644
--- a/drivers/irqchip/irq-bcm2835.c
+++ b/drivers/irqchip/irq-bcm2835.c
@@ -52,7 +52,6 @@
#include <linux/irqdomain.h>
#include <asm/exception.h>
-#include <asm/mach/irq.h>
/* Put the bank and irq (32 bits) into the hwirq */
#define MAKE_HWIRQ(b, n) ((b << 5) | (n))
@@ -242,7 +241,7 @@ static void __exception_irq_entry bcm2835_handle_irq(
u32 hwirq;
while ((hwirq = get_next_armctrl_hwirq()) != ~0)
- handle_IRQ(irq_linear_revmap(intc.domain, hwirq), regs);
+ handle_domain_irq(intc.domain, hwirq, regs);
}
static void bcm2836_chained_handle_irq(struct irq_desc *desc)
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
index 72ff1d5c5..d96b2c947 100644
--- a/drivers/irqchip/irq-bcm2836.c
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -180,7 +180,7 @@ __exception_irq_entry bcm2836_arm_irqchip_handle_irq(struct pt_regs *regs)
} else if (stat) {
u32 hwirq = ffs(stat) - 1;
- handle_IRQ(irq_linear_revmap(intc.domain, hwirq), regs);
+ handle_domain_irq(intc.domain, hwirq, regs);
}
}
@@ -202,30 +202,23 @@ static void bcm2836_arm_irqchip_send_ipi(const struct cpumask *mask,
}
}
-/* Unmasks the IPI on the CPU when it's online. */
-static int bcm2836_arm_irqchip_cpu_notify(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+static int bcm2836_cpu_starting(unsigned int cpu)
{
- unsigned int cpu = (unsigned long)hcpu;
- unsigned int int_reg = LOCAL_MAILBOX_INT_CONTROL0;
- unsigned int mailbox = 0;
-
- if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
- bcm2836_arm_irqchip_unmask_per_cpu_irq(int_reg, mailbox, cpu);
- else if (action == CPU_DYING)
- bcm2836_arm_irqchip_mask_per_cpu_irq(int_reg, mailbox, cpu);
-
- return NOTIFY_OK;
+ bcm2836_arm_irqchip_unmask_per_cpu_irq(LOCAL_MAILBOX_INT_CONTROL0, 0,
+ cpu);
+ return 0;
}
-static struct notifier_block bcm2836_arm_irqchip_cpu_notifier = {
- .notifier_call = bcm2836_arm_irqchip_cpu_notify,
- .priority = 100,
-};
+static int bcm2836_cpu_dying(unsigned int cpu)
+{
+ bcm2836_arm_irqchip_mask_per_cpu_irq(LOCAL_MAILBOX_INT_CONTROL0, 0,
+ cpu);
+ return 0;
+}
#ifdef CONFIG_ARM
-int __init bcm2836_smp_boot_secondary(unsigned int cpu,
- struct task_struct *idle)
+static int __init bcm2836_smp_boot_secondary(unsigned int cpu,
+ struct task_struct *idle)
{
unsigned long secondary_startup_phys =
(unsigned long)virt_to_phys((void *)secondary_startup);
@@ -251,10 +244,9 @@ bcm2836_arm_irqchip_smp_init(void)
{
#ifdef CONFIG_SMP
/* Unmask IPIs to the boot CPU. */
- bcm2836_arm_irqchip_cpu_notify(&bcm2836_arm_irqchip_cpu_notifier,
- CPU_STARTING,
- (void *)(uintptr_t)smp_processor_id());
- register_cpu_notifier(&bcm2836_arm_irqchip_cpu_notifier);
+ cpuhp_setup_state(CPUHP_AP_IRQ_BCM2836_STARTING,
+ "AP_IRQ_BCM2836_STARTING", bcm2836_cpu_starting,
+ bcm2836_cpu_dying);
set_smp_cross_call(bcm2836_arm_irqchip_send_ipi);
diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c
index 61b18ab33..0ec92631e 100644
--- a/drivers/irqchip/irq-bcm7120-l2.c
+++ b/drivers/irqchip/irq-bcm7120-l2.c
@@ -215,7 +215,7 @@ static int __init bcm7120_l2_intc_iomap_3380(struct device_node *dn,
return 0;
}
-int __init bcm7120_l2_intc_probe(struct device_node *dn,
+static int __init bcm7120_l2_intc_probe(struct device_node *dn,
struct device_node *parent,
int (*iomap_regs_fn)(struct device_node *,
struct bcm7120_l2_intc_data *),
@@ -339,15 +339,15 @@ out_unmap:
return ret;
}
-int __init bcm7120_l2_intc_probe_7120(struct device_node *dn,
- struct device_node *parent)
+static int __init bcm7120_l2_intc_probe_7120(struct device_node *dn,
+ struct device_node *parent)
{
return bcm7120_l2_intc_probe(dn, parent, bcm7120_l2_intc_iomap_7120,
"BCM7120 L2");
}
-int __init bcm7120_l2_intc_probe_3380(struct device_node *dn,
- struct device_node *parent)
+static int __init bcm7120_l2_intc_probe_3380(struct device_node *dn,
+ struct device_node *parent)
{
return bcm7120_l2_intc_probe(dn, parent, bcm7120_l2_intc_iomap_3380,
"BCM3380 L2");
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index 65cd341f3..1d4a5b46d 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -112,8 +112,8 @@ static void brcmstb_l2_intc_resume(struct irq_data *d)
irq_gc_unlock(gc);
}
-int __init brcmstb_l2_intc_of_init(struct device_node *np,
- struct device_node *parent)
+static int __init brcmstb_l2_intc_of_init(struct device_node *np,
+ struct device_node *parent)
{
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
struct brcmstb_l2_intc_data *data;
diff --git a/drivers/irqchip/irq-clps711x.c b/drivers/irqchip/irq-clps711x.c
index 2223b3f15..f913f4db7 100644
--- a/drivers/irqchip/irq-clps711x.c
+++ b/drivers/irqchip/irq-clps711x.c
@@ -234,5 +234,5 @@ static int __init clps711x_intc_init_dt(struct device_node *np,
return _clps711x_intc_init(np, res.start, resource_size(&res));
}
-IRQCHIP_DECLARE(clps711x, "cirrus,clps711x-intc", clps711x_intc_init_dt);
+IRQCHIP_DECLARE(clps711x, "cirrus,ep7209-intc", clps711x_intc_init_dt);
#endif
diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c
index 89e7423f0..9ae71804b 100644
--- a/drivers/irqchip/irq-gic-common.c
+++ b/drivers/irqchip/irq-gic-common.c
@@ -90,8 +90,8 @@ int gic_configure_irq(unsigned int irq, unsigned int type,
return ret;
}
-void __init gic_dist_config(void __iomem *base, int gic_irqs,
- void (*sync_access)(void))
+void gic_dist_config(void __iomem *base, int gic_irqs,
+ void (*sync_access)(void))
{
unsigned int i;
diff --git a/drivers/irqchip/irq-gic-pm.c b/drivers/irqchip/irq-gic-pm.c
new file mode 100644
index 000000000..4cbffba3f
--- /dev/null
+++ b/drivers/irqchip/irq-gic-pm.c
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) 2016 NVIDIA CORPORATION, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/irqchip/arm-gic.h>
+#include <linux/platform_device.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+struct gic_clk_data {
+ unsigned int num_clocks;
+ const char *const *clocks;
+};
+
+static int gic_runtime_resume(struct device *dev)
+{
+ struct gic_chip_data *gic = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_clk_resume(dev);
+ if (ret)
+ return ret;
+
+ /*
+ * On the very first resume, the pointer to the driver data
+ * will be NULL and this is intentional, because we do not
+ * want to restore the GIC on the very first resume. So if
+ * the pointer is not valid just return.
+ */
+ if (!gic)
+ return 0;
+
+ gic_dist_restore(gic);
+ gic_cpu_restore(gic);
+
+ return 0;
+}
+
+static int gic_runtime_suspend(struct device *dev)
+{
+ struct gic_chip_data *gic = dev_get_drvdata(dev);
+
+ gic_dist_save(gic);
+ gic_cpu_save(gic);
+
+ return pm_clk_suspend(dev);
+}
+
+static int gic_get_clocks(struct device *dev, const struct gic_clk_data *data)
+{
+ struct clk *clk;
+ unsigned int i;
+ int ret;
+
+ if (!dev || !data)
+ return -EINVAL;
+
+ ret = pm_clk_create(dev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < data->num_clocks; i++) {
+ clk = of_clk_get_by_name(dev->of_node, data->clocks[i]);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "failed to get clock %s\n",
+ data->clocks[i]);
+ ret = PTR_ERR(clk);
+ goto error;
+ }
+
+ ret = pm_clk_add_clk(dev, clk);
+ if (ret) {
+ dev_err(dev, "failed to add clock at index %d\n", i);
+ clk_put(clk);
+ goto error;
+ }
+ }
+
+ return 0;
+
+error:
+ pm_clk_destroy(dev);
+
+ return ret;
+}
+
+static int gic_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct gic_clk_data *data;
+ struct gic_chip_data *gic;
+ int ret, irq;
+
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data) {
+ dev_err(&pdev->dev, "no device match found\n");
+ return -ENODEV;
+ }
+
+ irq = irq_of_parse_and_map(dev->of_node, 0);
+ if (!irq) {
+ dev_err(dev, "no parent interrupt found!\n");
+ return -EINVAL;
+ }
+
+ ret = gic_get_clocks(dev, data);
+ if (ret)
+ goto irq_dispose;
+
+ pm_runtime_enable(dev);
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ goto rpm_disable;
+
+ ret = gic_of_init_child(dev, &gic, irq);
+ if (ret)
+ goto rpm_put;
+
+ platform_set_drvdata(pdev, gic);
+
+ pm_runtime_put(dev);
+
+ dev_info(dev, "GIC IRQ controller registered\n");
+
+ return 0;
+
+rpm_put:
+ pm_runtime_put_sync(dev);
+rpm_disable:
+ pm_runtime_disable(dev);
+ pm_clk_destroy(dev);
+irq_dispose:
+ irq_dispose_mapping(irq);
+
+ return ret;
+}
+
+static const struct dev_pm_ops gic_pm_ops = {
+ SET_RUNTIME_PM_OPS(gic_runtime_suspend,
+ gic_runtime_resume, NULL)
+};
+
+static const char * const gic400_clocks[] = {
+ "clk",
+};
+
+static const struct gic_clk_data gic400_data = {
+ .num_clocks = ARRAY_SIZE(gic400_clocks),
+ .clocks = gic400_clocks,
+};
+
+static const struct of_device_id gic_match[] = {
+ { .compatible = "nvidia,tegra210-agic", .data = &gic400_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, gic_match);
+
+static struct platform_driver gic_driver = {
+ .probe = gic_probe,
+ .driver = {
+ .name = "gic",
+ .of_match_table = gic_match,
+ .pm = &gic_pm_ops,
+ }
+};
+
+builtin_platform_driver(gic_driver);
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index ad0d2960b..35eb7ac5d 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -24,6 +24,7 @@
#include <linux/of_pci.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/irqchip/arm-gic.h>
/*
* MSI_TYPER:
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 5eb1f9e17..36b9c28a5 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -56,13 +56,14 @@ struct its_collection {
};
/*
- * The ITS_BASER structure - contains memory information and cached
- * value of BASER register configuration.
+ * The ITS_BASER structure - contains memory information, cached
+ * value of BASER register configuration and ITS page size.
*/
struct its_baser {
void *base;
u64 val;
u32 order;
+ u32 psz;
};
/*
@@ -824,180 +825,241 @@ static const char *its_base_type_string[] = {
[GITS_BASER_TYPE_RESERVED7] = "Reserved (7)",
};
-static void its_free_tables(struct its_node *its)
+static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
{
- int i;
+ u32 idx = baser - its->tables;
- for (i = 0; i < GITS_BASER_NR_REGS; i++) {
- if (its->tables[i].base) {
- free_pages((unsigned long)its->tables[i].base,
- its->tables[i].order);
- its->tables[i].base = NULL;
- }
- }
+ return readq_relaxed(its->base + GITS_BASER + (idx << 3));
}
-static int its_alloc_tables(const char *node_name, struct its_node *its)
+static void its_write_baser(struct its_node *its, struct its_baser *baser,
+ u64 val)
{
- int err;
- int i;
- int psz = SZ_64K;
- u64 shr = GITS_BASER_InnerShareable;
- u64 cache;
- u64 typer;
- u32 ids;
+ u32 idx = baser - its->tables;
- if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) {
- /*
- * erratum 22375: only alloc 8MB table size
- * erratum 24313: ignore memory access type
- */
- cache = 0;
- ids = 0x14; /* 20 bits, 8MB */
- } else {
- cache = GITS_BASER_WaWb;
- typer = readq_relaxed(its->base + GITS_TYPER);
- ids = GITS_TYPER_DEVBITS(typer);
+ writeq_relaxed(val, its->base + GITS_BASER + (idx << 3));
+ baser->val = its_read_baser(its, baser);
+}
+
+static int its_setup_baser(struct its_node *its, struct its_baser *baser,
+ u64 cache, u64 shr, u32 psz, u32 order,
+ bool indirect)
+{
+ u64 val = its_read_baser(its, baser);
+ u64 esz = GITS_BASER_ENTRY_SIZE(val);
+ u64 type = GITS_BASER_TYPE(val);
+ u32 alloc_pages;
+ void *base;
+ u64 tmp;
+
+retry_alloc_baser:
+ alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
+ if (alloc_pages > GITS_BASER_PAGES_MAX) {
+ pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
+ &its->phys_base, its_base_type_string[type],
+ alloc_pages, GITS_BASER_PAGES_MAX);
+ alloc_pages = GITS_BASER_PAGES_MAX;
+ order = get_order(GITS_BASER_PAGES_MAX * psz);
}
- its->device_ids = ids;
+ base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
+ if (!base)
+ return -ENOMEM;
- for (i = 0; i < GITS_BASER_NR_REGS; i++) {
- u64 val = readq_relaxed(its->base + GITS_BASER + i * 8);
- u64 type = GITS_BASER_TYPE(val);
- u64 entry_size = GITS_BASER_ENTRY_SIZE(val);
- int order = get_order(psz);
- int alloc_pages;
- u64 tmp;
- void *base;
+retry_baser:
+ val = (virt_to_phys(base) |
+ (type << GITS_BASER_TYPE_SHIFT) |
+ ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
+ ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) |
+ cache |
+ shr |
+ GITS_BASER_VALID);
+
+ val |= indirect ? GITS_BASER_INDIRECT : 0x0;
+
+ switch (psz) {
+ case SZ_4K:
+ val |= GITS_BASER_PAGE_SIZE_4K;
+ break;
+ case SZ_16K:
+ val |= GITS_BASER_PAGE_SIZE_16K;
+ break;
+ case SZ_64K:
+ val |= GITS_BASER_PAGE_SIZE_64K;
+ break;
+ }
- if (type == GITS_BASER_TYPE_NONE)
- continue;
+ its_write_baser(its, baser, val);
+ tmp = baser->val;
+ if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
/*
- * Allocate as many entries as required to fit the
- * range of device IDs that the ITS can grok... The ID
- * space being incredibly sparse, this results in a
- * massive waste of memory.
- *
- * For other tables, only allocate a single page.
+ * Shareability didn't stick. Just use
+ * whatever the read reported, which is likely
+ * to be the only thing this redistributor
+ * supports. If that's zero, make it
+ * non-cacheable as well.
*/
- if (type == GITS_BASER_TYPE_DEVICE) {
- /*
- * 'order' was initialized earlier to the default page
- * granule of the the ITS. We can't have an allocation
- * smaller than that. If the requested allocation
- * is smaller, round up to the default page granule.
- */
- order = max(get_order((1UL << ids) * entry_size),
- order);
- if (order >= MAX_ORDER) {
- order = MAX_ORDER - 1;
- pr_warn("%s: Device Table too large, reduce its page order to %u\n",
- node_name, order);
- }
+ shr = tmp & GITS_BASER_SHAREABILITY_MASK;
+ if (!shr) {
+ cache = GITS_BASER_nC;
+ __flush_dcache_area(base, PAGE_ORDER_TO_SIZE(order));
}
+ goto retry_baser;
+ }
-retry_alloc_baser:
- alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
- if (alloc_pages > GITS_BASER_PAGES_MAX) {
- alloc_pages = GITS_BASER_PAGES_MAX;
- order = get_order(GITS_BASER_PAGES_MAX * psz);
- pr_warn("%s: Device Table too large, reduce its page order to %u (%u pages)\n",
- node_name, order, alloc_pages);
- }
-
- base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
- if (!base) {
- err = -ENOMEM;
- goto out_free;
- }
-
- its->tables[i].base = base;
- its->tables[i].order = order;
-
-retry_baser:
- val = (virt_to_phys(base) |
- (type << GITS_BASER_TYPE_SHIFT) |
- ((entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
- cache |
- shr |
- GITS_BASER_VALID);
+ if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) {
+ /*
+ * Page size didn't stick. Let's try a smaller
+ * size and retry. If we reach 4K, then
+ * something is horribly wrong...
+ */
+ free_pages((unsigned long)base, order);
+ baser->base = NULL;
switch (psz) {
- case SZ_4K:
- val |= GITS_BASER_PAGE_SIZE_4K;
- break;
case SZ_16K:
- val |= GITS_BASER_PAGE_SIZE_16K;
- break;
+ psz = SZ_4K;
+ goto retry_alloc_baser;
case SZ_64K:
- val |= GITS_BASER_PAGE_SIZE_64K;
- break;
+ psz = SZ_16K;
+ goto retry_alloc_baser;
}
+ }
- val |= alloc_pages - 1;
- its->tables[i].val = val;
+ if (val != tmp) {
+ pr_err("ITS@%pa: %s doesn't stick: %lx %lx\n",
+ &its->phys_base, its_base_type_string[type],
+ (unsigned long) val, (unsigned long) tmp);
+ free_pages((unsigned long)base, order);
+ return -ENXIO;
+ }
+
+ baser->order = order;
+ baser->base = base;
+ baser->psz = psz;
+ tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
+
+ pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
+ &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / tmp),
+ its_base_type_string[type],
+ (unsigned long)virt_to_phys(base),
+ indirect ? "indirect" : "flat", (int)esz,
+ psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
- writeq_relaxed(val, its->base + GITS_BASER + i * 8);
- tmp = readq_relaxed(its->base + GITS_BASER + i * 8);
+ return 0;
+}
+
+static bool its_parse_baser_device(struct its_node *its, struct its_baser *baser,
+ u32 psz, u32 *order)
+{
+ u64 esz = GITS_BASER_ENTRY_SIZE(its_read_baser(its, baser));
+ u64 val = GITS_BASER_InnerShareable | GITS_BASER_WaWb;
+ u32 ids = its->device_ids;
+ u32 new_order = *order;
+ bool indirect = false;
- if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
+ /* No need to enable Indirection if memory requirement < (psz*2)bytes */
+ if ((esz << ids) > (psz * 2)) {
+ /*
+ * Find out whether hw supports a single or two-level table by
+ * table by reading bit at offset '62' after writing '1' to it.
+ */
+ its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
+ indirect = !!(baser->val & GITS_BASER_INDIRECT);
+
+ if (indirect) {
/*
- * Shareability didn't stick. Just use
- * whatever the read reported, which is likely
- * to be the only thing this redistributor
- * supports. If that's zero, make it
- * non-cacheable as well.
+ * The size of the lvl2 table is equal to ITS page size
+ * which is 'psz'. For computing lvl1 table size,
+ * subtract ID bits that sparse lvl2 table from 'ids'
+ * which is reported by ITS hardware times lvl1 table
+ * entry size.
*/
- shr = tmp & GITS_BASER_SHAREABILITY_MASK;
- if (!shr) {
- cache = GITS_BASER_nC;
- __flush_dcache_area(base, PAGE_ORDER_TO_SIZE(order));
- }
- goto retry_baser;
+ ids -= ilog2(psz / esz);
+ esz = GITS_LVL1_ENTRY_SIZE;
}
+ }
- if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) {
- /*
- * Page size didn't stick. Let's try a smaller
- * size and retry. If we reach 4K, then
- * something is horribly wrong...
- */
- free_pages((unsigned long)base, order);
- its->tables[i].base = NULL;
+ /*
+ * Allocate as many entries as required to fit the
+ * range of device IDs that the ITS can grok... The ID
+ * space being incredibly sparse, this results in a
+ * massive waste of memory if two-level device table
+ * feature is not supported by hardware.
+ */
+ new_order = max_t(u32, get_order(esz << ids), new_order);
+ if (new_order >= MAX_ORDER) {
+ new_order = MAX_ORDER - 1;
+ ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / esz);
+ pr_warn("ITS@%pa: Device Table too large, reduce ids %u->%u\n",
+ &its->phys_base, its->device_ids, ids);
+ }
- switch (psz) {
- case SZ_16K:
- psz = SZ_4K;
- goto retry_alloc_baser;
- case SZ_64K:
- psz = SZ_16K;
- goto retry_alloc_baser;
- }
- }
+ *order = new_order;
+
+ return indirect;
+}
+
+static void its_free_tables(struct its_node *its)
+{
+ int i;
- if (val != tmp) {
- pr_err("ITS: %s: GITS_BASER%d doesn't stick: %lx %lx\n",
- node_name, i,
- (unsigned long) val, (unsigned long) tmp);
- err = -ENXIO;
- goto out_free;
+ for (i = 0; i < GITS_BASER_NR_REGS; i++) {
+ if (its->tables[i].base) {
+ free_pages((unsigned long)its->tables[i].base,
+ its->tables[i].order);
+ its->tables[i].base = NULL;
}
+ }
+}
+
+static int its_alloc_tables(struct its_node *its)
+{
+ u64 typer = readq_relaxed(its->base + GITS_TYPER);
+ u32 ids = GITS_TYPER_DEVBITS(typer);
+ u64 shr = GITS_BASER_InnerShareable;
+ u64 cache = GITS_BASER_WaWb;
+ u32 psz = SZ_64K;
+ int err, i;
- pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n",
- (int)(PAGE_ORDER_TO_SIZE(order) / entry_size),
- its_base_type_string[type],
- (unsigned long)virt_to_phys(base),
- psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
+ if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) {
+ /*
+ * erratum 22375: only alloc 8MB table size
+ * erratum 24313: ignore memory access type
+ */
+ cache = GITS_BASER_nCnB;
+ ids = 0x14; /* 20 bits, 8MB */
}
- return 0;
+ its->device_ids = ids;
-out_free:
- its_free_tables(its);
+ for (i = 0; i < GITS_BASER_NR_REGS; i++) {
+ struct its_baser *baser = its->tables + i;
+ u64 val = its_read_baser(its, baser);
+ u64 type = GITS_BASER_TYPE(val);
+ u32 order = get_order(psz);
+ bool indirect = false;
- return err;
+ if (type == GITS_BASER_TYPE_NONE)
+ continue;
+
+ if (type == GITS_BASER_TYPE_DEVICE)
+ indirect = its_parse_baser_device(its, baser, psz, &order);
+
+ err = its_setup_baser(its, baser, cache, shr, psz, order, indirect);
+ if (err < 0) {
+ its_free_tables(its);
+ return err;
+ }
+
+ /* Update settings which will be used for next BASERn */
+ psz = baser->psz;
+ cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
+ shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
+ }
+
+ return 0;
}
static int its_alloc_collections(struct its_node *its)
@@ -1185,10 +1247,57 @@ static struct its_baser *its_get_baser(struct its_node *its, u32 type)
return NULL;
}
+static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
+{
+ struct its_baser *baser;
+ struct page *page;
+ u32 esz, idx;
+ __le64 *table;
+
+ baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
+
+ /* Don't allow device id that exceeds ITS hardware limit */
+ if (!baser)
+ return (ilog2(dev_id) < its->device_ids);
+
+ /* Don't allow device id that exceeds single, flat table limit */
+ esz = GITS_BASER_ENTRY_SIZE(baser->val);
+ if (!(baser->val & GITS_BASER_INDIRECT))
+ return (dev_id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
+
+ /* Compute 1st level table index & check if that exceeds table limit */
+ idx = dev_id >> ilog2(baser->psz / esz);
+ if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
+ return false;
+
+ table = baser->base;
+
+ /* Allocate memory for 2nd level table */
+ if (!table[idx]) {
+ page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(baser->psz));
+ if (!page)
+ return false;
+
+ /* Flush Lvl2 table to PoC if hw doesn't support coherency */
+ if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
+ __flush_dcache_area(page_address(page), baser->psz);
+
+ table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
+
+ /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
+ if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
+ __flush_dcache_area(table + idx, GITS_LVL1_ENTRY_SIZE);
+
+ /* Ensure updated table contents are visible to ITS hardware */
+ dsb(sy);
+ }
+
+ return true;
+}
+
static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
int nvecs)
{
- struct its_baser *baser;
struct its_device *dev;
unsigned long *lpi_map;
unsigned long flags;
@@ -1199,14 +1308,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
int nr_ites;
int sz;
- baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
-
- /* Don't allow 'dev_id' that exceeds single, flat table limit */
- if (baser) {
- if (dev_id >= (PAGE_ORDER_TO_SIZE(baser->order) /
- GITS_BASER_ENTRY_SIZE(baser->val)))
- return NULL;
- } else if (ilog2(dev_id) >= its->device_ids)
+ if (!its_alloc_device_table(its, dev_id))
return NULL;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
@@ -1443,7 +1545,12 @@ static int its_force_quiescent(void __iomem *base)
u32 val;
val = readl_relaxed(base + GITS_CTLR);
- if (val & GITS_CTLR_QUIESCENT)
+ /*
+ * GIC architecture specification requires the ITS to be both
+ * disabled and quiescent for writes to GITS_BASER<n> or
+ * GITS_CBASER to not have UNPREDICTABLE results.
+ */
+ if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE))
return 0;
/* Disable the generation of all interrupts to this ITS */
@@ -1569,7 +1676,7 @@ static int __init its_probe(struct device_node *node,
its_enable_quirks(its);
- err = its_alloc_tables(node->full_name, its);
+ err = its_alloc_tables(its);
if (err)
goto out_free_cmd;
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 2c5ba0e70..da6c0ba61 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -538,27 +538,17 @@ static void gic_cpu_init(void)
}
#ifdef CONFIG_SMP
-static int gic_secondary_init(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+
+static int gic_starting_cpu(unsigned int cpu)
{
- if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
- gic_cpu_init();
- return NOTIFY_OK;
+ gic_cpu_init();
+ return 0;
}
-/*
- * Notifier for enabling the GIC CPU interface. Set an arbitrarily high
- * priority because the GIC needs to be up before the ARM generic timers.
- */
-static struct notifier_block gic_cpu_notifier = {
- .notifier_call = gic_secondary_init,
- .priority = 100,
-};
-
static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
unsigned long cluster_id)
{
- int cpu = *base_cpu;
+ int next_cpu, cpu = *base_cpu;
unsigned long mpidr = cpu_logical_map(cpu);
u16 tlist = 0;
@@ -572,9 +562,10 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
tlist |= 1 << (mpidr & 0xf);
- cpu = cpumask_next(cpu, mask);
- if (cpu >= nr_cpu_ids)
+ next_cpu = cpumask_next(cpu, mask);
+ if (next_cpu >= nr_cpu_ids)
goto out;
+ cpu = next_cpu;
mpidr = cpu_logical_map(cpu);
@@ -634,7 +625,9 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
static void gic_smp_init(void)
{
set_smp_cross_call(gic_raise_softirq);
- register_cpu_notifier(&gic_cpu_notifier);
+ cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GICV3_STARTING,
+ "AP_IRQ_GICV3_STARTING", gic_starting_cpu,
+ NULL);
}
static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
@@ -675,13 +668,20 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
#endif
#ifdef CONFIG_CPU_PM
+/* Check whether it's single security state view */
+static bool gic_dist_security_disabled(void)
+{
+ return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS;
+}
+
static int gic_cpu_pm_notifier(struct notifier_block *self,
unsigned long cmd, void *v)
{
if (cmd == CPU_PM_EXIT) {
- gic_enable_redist(true);
+ if (gic_dist_security_disabled())
+ gic_enable_redist(true);
gic_cpu_sys_reg_init();
- } else if (cmd == CPU_PM_ENTER) {
+ } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) {
gic_write_grpen1(0);
gic_enable_redist(false);
}
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index fbc4ae2af..390fac59c 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -75,7 +75,7 @@ struct gic_chip_data {
void __iomem *raw_dist_base;
void __iomem *raw_cpu_base;
u32 percpu_offset;
-#ifdef CONFIG_CPU_PM
+#if defined(CONFIG_CPU_PM) || defined(CONFIG_ARM_GIC_PM)
u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
u32 saved_spi_active[DIV_ROUND_UP(1020, 32)];
u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
@@ -449,7 +449,7 @@ static void gic_cpu_if_up(struct gic_chip_data *gic)
}
-static void __init gic_dist_init(struct gic_chip_data *gic)
+static void gic_dist_init(struct gic_chip_data *gic)
{
unsigned int i;
u32 cpumask;
@@ -528,14 +528,14 @@ int gic_cpu_if_down(unsigned int gic_nr)
return 0;
}
-#ifdef CONFIG_CPU_PM
+#if defined(CONFIG_CPU_PM) || defined(CONFIG_ARM_GIC_PM)
/*
* Saves the GIC distributor registers during suspend or idle. Must be called
* with interrupts disabled but before powering down the GIC. After calling
* this function, no interrupts will be delivered by the GIC, and another
* platform-specific wakeup source must be enabled.
*/
-static void gic_dist_save(struct gic_chip_data *gic)
+void gic_dist_save(struct gic_chip_data *gic)
{
unsigned int gic_irqs;
void __iomem *dist_base;
@@ -574,7 +574,7 @@ static void gic_dist_save(struct gic_chip_data *gic)
* handled normally, but any edge interrupts that occured will not be seen by
* the GIC and need to be handled by the platform-specific wakeup source.
*/
-static void gic_dist_restore(struct gic_chip_data *gic)
+void gic_dist_restore(struct gic_chip_data *gic)
{
unsigned int gic_irqs;
unsigned int i;
@@ -620,7 +620,7 @@ static void gic_dist_restore(struct gic_chip_data *gic)
writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL);
}
-static void gic_cpu_save(struct gic_chip_data *gic)
+void gic_cpu_save(struct gic_chip_data *gic)
{
int i;
u32 *ptr;
@@ -650,7 +650,7 @@ static void gic_cpu_save(struct gic_chip_data *gic)
}
-static void gic_cpu_restore(struct gic_chip_data *gic)
+void gic_cpu_restore(struct gic_chip_data *gic)
{
int i;
u32 *ptr;
@@ -727,7 +727,7 @@ static struct notifier_block gic_notifier_block = {
.notifier_call = gic_notifier,
};
-static int __init gic_pm_init(struct gic_chip_data *gic)
+static int gic_pm_init(struct gic_chip_data *gic)
{
gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
sizeof(u32));
@@ -757,7 +757,7 @@ free_ppi_enable:
return -ENOMEM;
}
#else
-static int __init gic_pm_init(struct gic_chip_data *gic)
+static int gic_pm_init(struct gic_chip_data *gic)
{
return 0;
}
@@ -769,6 +769,13 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
int cpu;
unsigned long flags, map = 0;
+ if (unlikely(nr_cpu_ids == 1)) {
+ /* Only one CPU? let's do a self-IPI... */
+ writel_relaxed(2 << 24 | irq,
+ gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
+ return;
+ }
+
raw_spin_lock_irqsave(&irq_controller_lock, flags);
/* Convert our logical CPU mask into a physical one. */
@@ -984,25 +991,12 @@ static int gic_irq_domain_translate(struct irq_domain *d,
return -EINVAL;
}
-#ifdef CONFIG_SMP
-static int gic_secondary_init(struct notifier_block *nfb, unsigned long action,
- void *hcpu)
+static int gic_starting_cpu(unsigned int cpu)
{
- if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
- gic_cpu_init(&gic_data[0]);
- return NOTIFY_OK;
+ gic_cpu_init(&gic_data[0]);
+ return 0;
}
-/*
- * Notifier for enabling the GIC CPU interface. Set an arbitrarily high
- * priority because the GIC needs to be up before the ARM generic timers.
- */
-static struct notifier_block gic_cpu_notifier = {
- .notifier_call = gic_secondary_init,
- .priority = 100,
-};
-#endif
-
static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *arg)
{
@@ -1032,32 +1026,31 @@ static const struct irq_domain_ops gic_irq_domain_ops = {
.unmap = gic_irq_domain_unmap,
};
-static int __init __gic_init_bases(struct gic_chip_data *gic, int irq_start,
- struct fwnode_handle *handle)
+static void gic_init_chip(struct gic_chip_data *gic, struct device *dev,
+ const char *name, bool use_eoimode1)
{
- irq_hw_number_t hwirq_base;
- int gic_irqs, irq_base, i, ret;
-
- if (WARN_ON(!gic || gic->domain))
- return -EINVAL;
-
/* Initialize irq_chip */
gic->chip = gic_chip;
+ gic->chip.name = name;
+ gic->chip.parent_device = dev;
- if (static_key_true(&supports_deactivate) && gic == &gic_data[0]) {
+ if (use_eoimode1) {
gic->chip.irq_mask = gic_eoimode1_mask_irq;
gic->chip.irq_eoi = gic_eoimode1_eoi_irq;
gic->chip.irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity;
- gic->chip.name = kasprintf(GFP_KERNEL, "GICv2");
- } else {
- gic->chip.name = kasprintf(GFP_KERNEL, "GIC-%d",
- (int)(gic - &gic_data[0]));
}
#ifdef CONFIG_SMP
if (gic == &gic_data[0])
gic->chip.irq_set_affinity = gic_set_affinity;
#endif
+}
+
+static int gic_init_bases(struct gic_chip_data *gic, int irq_start,
+ struct fwnode_handle *handle)
+{
+ irq_hw_number_t hwirq_base;
+ int gic_irqs, irq_base, ret;
if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
/* Frankein-GIC without banked registers... */
@@ -1138,6 +1131,36 @@ static int __init __gic_init_bases(struct gic_chip_data *gic, int irq_start,
goto error;
}
+ gic_dist_init(gic);
+ ret = gic_cpu_init(gic);
+ if (ret)
+ goto error;
+
+ ret = gic_pm_init(gic);
+ if (ret)
+ goto error;
+
+ return 0;
+
+error:
+ if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
+ free_percpu(gic->dist_base.percpu_base);
+ free_percpu(gic->cpu_base.percpu_base);
+ }
+
+ return ret;
+}
+
+static int __init __gic_init_bases(struct gic_chip_data *gic,
+ int irq_start,
+ struct fwnode_handle *handle)
+{
+ char *name;
+ int i, ret;
+
+ if (WARN_ON(!gic || gic->domain))
+ return -EINVAL;
+
if (gic == &gic_data[0]) {
/*
* Initialize the CPU interface map to all CPUs.
@@ -1148,31 +1171,26 @@ static int __init __gic_init_bases(struct gic_chip_data *gic, int irq_start,
gic_cpu_map[i] = 0xff;
#ifdef CONFIG_SMP
set_smp_cross_call(gic_raise_softirq);
- register_cpu_notifier(&gic_cpu_notifier);
#endif
+ cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
+ "AP_IRQ_GIC_STARTING",
+ gic_starting_cpu, NULL);
set_handle_irq(gic_handle_irq);
if (static_key_true(&supports_deactivate))
pr_info("GIC: Using split EOI/Deactivate mode\n");
}
- gic_dist_init(gic);
- ret = gic_cpu_init(gic);
- if (ret)
- goto error;
-
- ret = gic_pm_init(gic);
- if (ret)
- goto error;
-
- return 0;
-
-error:
- if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
- free_percpu(gic->dist_base.percpu_base);
- free_percpu(gic->cpu_base.percpu_base);
+ if (static_key_true(&supports_deactivate) && gic == &gic_data[0]) {
+ name = kasprintf(GFP_KERNEL, "GICv2");
+ gic_init_chip(gic, NULL, name, true);
+ } else {
+ name = kasprintf(GFP_KERNEL, "GIC-%d", (int)(gic-&gic_data[0]));
+ gic_init_chip(gic, NULL, name, false);
}
- kfree(gic->chip.name);
+ ret = gic_init_bases(gic, irq_start, handle);
+ if (ret)
+ kfree(name);
return ret;
}
@@ -1250,7 +1268,7 @@ static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
return true;
}
-static int __init gic_of_setup(struct gic_chip_data *gic, struct device_node *node)
+static int gic_of_setup(struct gic_chip_data *gic, struct device_node *node)
{
if (!gic || !node)
return -EINVAL;
@@ -1274,6 +1292,34 @@ error:
return -ENOMEM;
}
+int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq)
+{
+ int ret;
+
+ if (!dev || !dev->of_node || !gic || !irq)
+ return -EINVAL;
+
+ *gic = devm_kzalloc(dev, sizeof(**gic), GFP_KERNEL);
+ if (!*gic)
+ return -ENOMEM;
+
+ gic_init_chip(*gic, dev, dev->of_node->name, false);
+
+ ret = gic_of_setup(*gic, dev->of_node);
+ if (ret)
+ return ret;
+
+ ret = gic_init_bases(*gic, -1, &dev->of_node->fwnode);
+ if (ret) {
+ gic_teardown(*gic);
+ return ret;
+ }
+
+ irq_set_chained_handler_and_data(irq, gic_handle_cascade_irq, *gic);
+
+ return 0;
+}
+
static void __init gic_of_setup_kvm_info(struct device_node *node)
{
int ret;
@@ -1353,7 +1399,11 @@ IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init);
IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
IRQCHIP_DECLARE(pl390, "arm,pl390", gic_of_init);
-
+#else
+int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq)
+{
+ return -ENOTSUPP;
+}
#endif
#ifdef CONFIG_ACPI
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c
index 9e25d8ce0..021b0e083 100644
--- a/drivers/irqchip/irq-hip04.c
+++ b/drivers/irqchip/irq-hip04.c
@@ -342,26 +342,12 @@ static int hip04_irq_domain_xlate(struct irq_domain *d,
return ret;
}
-#ifdef CONFIG_SMP
-static int hip04_irq_secondary_init(struct notifier_block *nfb,
- unsigned long action,
- void *hcpu)
+static int hip04_irq_starting_cpu(unsigned int cpu)
{
- if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
- hip04_irq_cpu_init(&hip04_data);
- return NOTIFY_OK;
+ hip04_irq_cpu_init(&hip04_data);
+ return 0;
}
-/*
- * Notifier for enabling the INTC CPU interface. Set an arbitrarily high
- * priority because the GIC needs to be up before the ARM generic timers.
- */
-static struct notifier_block hip04_irq_cpu_notifier = {
- .notifier_call = hip04_irq_secondary_init,
- .priority = 100,
-};
-#endif
-
static const struct irq_domain_ops hip04_irq_domain_ops = {
.map = hip04_irq_domain_map,
.xlate = hip04_irq_domain_xlate,
@@ -417,13 +403,12 @@ hip04_of_init(struct device_node *node, struct device_node *parent)
#ifdef CONFIG_SMP
set_smp_cross_call(hip04_raise_softirq);
- register_cpu_notifier(&hip04_irq_cpu_notifier);
#endif
set_handle_irq(hip04_handle_irq);
hip04_irq_dist_init(&hip04_data);
- hip04_irq_cpu_init(&hip04_data);
-
+ cpuhp_setup_state(CPUHP_AP_IRQ_HIP04_STARTING, "AP_IRQ_HIP04_STARTING",
+ hip04_irq_starting_cpu, NULL);
return 0;
}
IRQCHIP_DECLARE(hip04_intc, "hisilicon,hip04-intc", hip04_of_init);
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 6acf69ee0..618569640 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -359,7 +359,7 @@ static void gic_handle_shared_int(bool chained)
pending_reg += gic_reg_step;
intrmask_reg += gic_reg_step;
- if (!config_enabled(CONFIG_64BIT) || mips_cm_is64)
+ if (!IS_ENABLED(CONFIG_64BIT) || mips_cm_is64)
continue;
pending[i] |= (u64)gic_read(pending_reg) << 32;
@@ -1047,12 +1047,14 @@ static void __init __gic_init(unsigned long gic_base_addr,
&gic_irq_domain_ops, NULL);
if (!gic_irq_domain)
panic("Failed to add GIC IRQ domain");
+ gic_irq_domain->name = "mips-gic-irq";
gic_dev_domain = irq_domain_add_hierarchy(gic_irq_domain, 0,
GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
node, &gic_dev_domain_ops, NULL);
if (!gic_dev_domain)
panic("Failed to add GIC DEV domain");
+ gic_dev_domain->name = "mips-gic-dev";
gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain,
IRQ_DOMAIN_FLAG_IPI_PER_CPU,
@@ -1061,6 +1063,7 @@ static void __init __gic_init(unsigned long gic_base_addr,
if (!gic_ipi_domain)
panic("Failed to add GIC IPI domain");
+ gic_ipi_domain->name = "mips-gic-ipi";
gic_ipi_domain->bus_token = DOMAIN_BUS_IPI;
if (node &&
diff --git a/drivers/irqchip/irq-omap-intc.c b/drivers/irqchip/irq-omap-intc.c
index 9d1bcfc33..b04a8ac6e 100644
--- a/drivers/irqchip/irq-omap-intc.c
+++ b/drivers/irqchip/irq-omap-intc.c
@@ -23,6 +23,8 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/irqchip/irq-omap-intc.h>
+
/* Define these here for now until we drop all board-files */
#define OMAP24XX_IC_BASE 0x480fe000
#define OMAP34XX_IC_BASE 0x48200000
diff --git a/drivers/irqchip/irq-s3c24xx.c b/drivers/irqchip/irq-s3c24xx.c
index 5dc5a760c..c25ce5af0 100644
--- a/drivers/irqchip/irq-s3c24xx.c
+++ b/drivers/irqchip/irq-s3c24xx.c
@@ -92,9 +92,9 @@ static void s3c_irq_mask(struct irq_data *data)
unsigned long mask;
unsigned int irqno;
- mask = __raw_readl(intc->reg_mask);
+ mask = readl_relaxed(intc->reg_mask);
mask |= (1UL << irq_data->offset);
- __raw_writel(mask, intc->reg_mask);
+ writel_relaxed(mask, intc->reg_mask);
if (parent_intc) {
parent_data = &parent_intc->irqs[irq_data->parent_irq];
@@ -119,9 +119,9 @@ static void s3c_irq_unmask(struct irq_data *data)
unsigned long mask;
unsigned int irqno;
- mask = __raw_readl(intc->reg_mask);
+ mask = readl_relaxed(intc->reg_mask);
mask &= ~(1UL << irq_data->offset);
- __raw_writel(mask, intc->reg_mask);
+ writel_relaxed(mask, intc->reg_mask);
if (parent_intc) {
irqno = irq_find_mapping(parent_intc->domain,
@@ -136,9 +136,9 @@ static inline void s3c_irq_ack(struct irq_data *data)
struct s3c_irq_intc *intc = irq_data->intc;
unsigned long bitval = 1UL << irq_data->offset;
- __raw_writel(bitval, intc->reg_pending);
+ writel_relaxed(bitval, intc->reg_pending);
if (intc->reg_intpnd)
- __raw_writel(bitval, intc->reg_intpnd);
+ writel_relaxed(bitval, intc->reg_intpnd);
}
static int s3c_irq_type(struct irq_data *data, unsigned int type)
@@ -172,9 +172,9 @@ static int s3c_irqext_type_set(void __iomem *gpcon_reg,
unsigned long newvalue = 0, value;
/* Set the GPIO to external interrupt mode */
- value = __raw_readl(gpcon_reg);
+ value = readl_relaxed(gpcon_reg);
value = (value & ~(3 << gpcon_offset)) | (0x02 << gpcon_offset);
- __raw_writel(value, gpcon_reg);
+ writel_relaxed(value, gpcon_reg);
/* Set the external interrupt to pointed trigger type */
switch (type)
@@ -208,9 +208,9 @@ static int s3c_irqext_type_set(void __iomem *gpcon_reg,
return -EINVAL;
}
- value = __raw_readl(extint_reg);
+ value = readl_relaxed(extint_reg);
value = (value & ~(7 << extint_offset)) | (newvalue << extint_offset);
- __raw_writel(value, extint_reg);
+ writel_relaxed(value, extint_reg);
return 0;
}
@@ -315,8 +315,8 @@ static void s3c_irq_demux(struct irq_desc *desc)
chained_irq_enter(chip, desc);
- src = __raw_readl(sub_intc->reg_pending);
- msk = __raw_readl(sub_intc->reg_mask);
+ src = readl_relaxed(sub_intc->reg_pending);
+ msk = readl_relaxed(sub_intc->reg_mask);
src &= ~msk;
src &= irq_data->sub_bits;
@@ -337,7 +337,7 @@ static inline int s3c24xx_handle_intc(struct s3c_irq_intc *intc,
int pnd;
int offset;
- pnd = __raw_readl(intc->reg_intpnd);
+ pnd = readl_relaxed(intc->reg_intpnd);
if (!pnd)
return false;
@@ -352,7 +352,7 @@ static inline int s3c24xx_handle_intc(struct s3c_irq_intc *intc,
*
* Thanks to Klaus, Shannon, et al for helping to debug this problem
*/
- offset = __raw_readl(intc->reg_intpnd + 4);
+ offset = readl_relaxed(intc->reg_intpnd + 4);
/* Find the bit manually, when the offset is wrong.
* The pending register only ever contains the one bit of the next
@@ -406,7 +406,7 @@ int s3c24xx_set_fiq(unsigned int irq, bool on)
intmod = 0;
}
- __raw_writel(intmod, S3C2410_INTMOD);
+ writel_relaxed(intmod, S3C2410_INTMOD);
return 0;
}
@@ -508,14 +508,14 @@ static void s3c24xx_clear_intc(struct s3c_irq_intc *intc)
last = 0;
for (i = 0; i < 4; i++) {
- pend = __raw_readl(reg_source);
+ pend = readl_relaxed(reg_source);
if (pend == 0 || pend == last)
break;
- __raw_writel(pend, intc->reg_pending);
+ writel_relaxed(pend, intc->reg_pending);
if (intc->reg_intpnd)
- __raw_writel(pend, intc->reg_intpnd);
+ writel_relaxed(pend, intc->reg_intpnd);
pr_info("irq: clearing pending status %08x\n", (int)pend);
last = pend;
diff --git a/drivers/irqchip/irq-sirfsoc.c b/drivers/irqchip/irq-sirfsoc.c
index 10cb21b9b..e1336848a 100644
--- a/drivers/irqchip/irq-sirfsoc.c
+++ b/drivers/irqchip/irq-sirfsoc.c
@@ -29,6 +29,11 @@
static struct irq_domain *sirfsoc_irqdomain;
+static void __iomem *sirfsoc_irq_get_regbase(void)
+{
+ return (void __iomem __force *)sirfsoc_irqdomain->host_data;
+}
+
static __init void sirfsoc_alloc_gc(void __iomem *base)
{
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
@@ -53,7 +58,7 @@ static __init void sirfsoc_alloc_gc(void __iomem *base)
static void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs)
{
- void __iomem *base = sirfsoc_irqdomain->host_data;
+ void __iomem *base = sirfsoc_irq_get_regbase();
u32 irqstat;
irqstat = readl_relaxed(base + SIRFSOC_INIT_IRQ_ID);
@@ -94,7 +99,7 @@ static struct sirfsoc_irq_status sirfsoc_irq_st;
static int sirfsoc_irq_suspend(void)
{
- void __iomem *base = sirfsoc_irqdomain->host_data;
+ void __iomem *base = sirfsoc_irq_get_regbase();
sirfsoc_irq_st.mask0 = readl_relaxed(base + SIRFSOC_INT_RISC_MASK0);
sirfsoc_irq_st.mask1 = readl_relaxed(base + SIRFSOC_INT_RISC_MASK1);
@@ -106,7 +111,7 @@ static int sirfsoc_irq_suspend(void)
static void sirfsoc_irq_resume(void)
{
- void __iomem *base = sirfsoc_irqdomain->host_data;
+ void __iomem *base = sirfsoc_irq_get_regbase();
writel_relaxed(sirfsoc_irq_st.mask0, base + SIRFSOC_INT_RISC_MASK0);
writel_relaxed(sirfsoc_irq_st.mask1, base + SIRFSOC_INT_RISC_MASK1);
diff --git a/drivers/irqchip/irq-tegra.c b/drivers/irqchip/irq-tegra.c
index e902f081e..3973a14bb 100644
--- a/drivers/irqchip/irq-tegra.c
+++ b/drivers/irqchip/irq-tegra.c
@@ -90,7 +90,7 @@ static struct tegra_ictlr_info *lic;
static inline void tegra_ictlr_write_mask(struct irq_data *d, unsigned long reg)
{
- void __iomem *base = d->chip_data;
+ void __iomem *base = (void __iomem __force *)d->chip_data;
u32 mask;
mask = BIT(d->hwirq % 32);
@@ -266,7 +266,7 @@ static int tegra_ictlr_domain_alloc(struct irq_domain *domain,
irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
&tegra_ictlr_chip,
- info->base[ictlr]);
+ (void __force *)info->base[ictlr]);
}
parent_fwspec = *fwspec;
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
index b956dfffe..f811a7de5 100644
--- a/drivers/irqchip/irq-vic.c
+++ b/drivers/irqchip/irq-vic.c
@@ -167,7 +167,7 @@ static int vic_suspend(void)
return 0;
}
-struct syscore_ops vic_syscore_ops = {
+static struct syscore_ops vic_syscore_ops = {
.suspend = vic_suspend,
.resume = vic_resume,
};
@@ -517,7 +517,8 @@ int __init vic_init_cascaded(void __iomem *base, unsigned int parent_irq,
EXPORT_SYMBOL_GPL(vic_init_cascaded);
#ifdef CONFIG_OF
-int __init vic_of_init(struct device_node *node, struct device_node *parent)
+static int __init vic_of_init(struct device_node *node,
+ struct device_node *parent)
{
void __iomem *regs;
u32 interrupt_mask = ~0;
diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c
index a2e0ed6c9..32f34511c 100644
--- a/drivers/isdn/hardware/eicon/divasmain.c
+++ b/drivers/isdn/hardware/eicon/divasmain.c
@@ -445,32 +445,32 @@ void divasa_unmap_pci_bar(void __iomem *bar)
/*********************************************************
** I/O port access
*********************************************************/
-byte __inline__ inpp(void __iomem *addr)
+inline byte inpp(void __iomem *addr)
{
return (inb((unsigned long) addr));
}
-word __inline__ inppw(void __iomem *addr)
+inline word inppw(void __iomem *addr)
{
return (inw((unsigned long) addr));
}
-void __inline__ inppw_buffer(void __iomem *addr, void *P, int length)
+inline void inppw_buffer(void __iomem *addr, void *P, int length)
{
insw((unsigned long) addr, (word *) P, length >> 1);
}
-void __inline__ outppw_buffer(void __iomem *addr, void *P, int length)
+inline void outppw_buffer(void __iomem *addr, void *P, int length)
{
outsw((unsigned long) addr, (word *) P, length >> 1);
}
-void __inline__ outppw(void __iomem *addr, word w)
+inline void outppw(void __iomem *addr, word w)
{
outw(w, (unsigned long) addr);
}
-void __inline__ outpp(void __iomem *addr, word p)
+inline void outpp(void __iomem *addr, word p)
{
outb(p, (unsigned long) addr);
}
diff --git a/drivers/isdn/hardware/eicon/platform.h b/drivers/isdn/hardware/eicon/platform.h
index b2edb7590..62e2073c3 100644
--- a/drivers/isdn/hardware/eicon/platform.h
+++ b/drivers/isdn/hardware/eicon/platform.h
@@ -203,7 +203,7 @@ void PCIread(byte bus, byte func, int offset, void *data, int length, void *pci_
/*
** I/O Port utilities
*/
-int diva_os_register_io_port(void *adapter, int register, unsigned long port,
+int diva_os_register_io_port(void *adapter, int reg, unsigned long port,
unsigned long length, const char *name, int id);
/*
** I/O port access abstraction
@@ -271,13 +271,13 @@ void diva_os_get_time(dword *sec, dword *usec);
** atomic operation, fake because we use threads
*/
typedef int diva_os_atomic_t;
-static diva_os_atomic_t __inline__
+static inline diva_os_atomic_t
diva_os_atomic_increment(diva_os_atomic_t *pv)
{
*pv += 1;
return (*pv);
}
-static diva_os_atomic_t __inline__
+static inline diva_os_atomic_t
diva_os_atomic_decrement(diva_os_atomic_t *pv)
{
*pv -= 1;
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 5ae28340a..9dcc9b13d 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -228,6 +228,20 @@ config LEDS_LP3944
To compile this driver as a module, choose M here: the
module will be called leds-lp3944.
+config LEDS_LP3952
+ tristate "LED Support for TI LP3952 2 channel LED driver"
+ depends on LEDS_CLASS
+ depends on I2C
+ depends on ACPI
+ depends on GPIOLIB
+ select REGMAP_I2C
+ help
+ This option enables support for LEDs connected to the Texas
+ Instruments LP3952 LED driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called leds-lp3952.
+
config LEDS_LP55XX_COMMON
tristate "Common Driver for TI/National LP5521/5523/55231/5562/8501"
depends on LEDS_LP5521 || LEDS_LP5523 || LEDS_LP5562 || LEDS_LP8501
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index cb2013df5..0684c865a 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o
obj-$(CONFIG_LEDS_GPIO_REGISTER) += leds-gpio-register.o
obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o
obj-$(CONFIG_LEDS_LP3944) += leds-lp3944.o
+obj-$(CONFIG_LEDS_LP3952) += leds-lp3952.o
obj-$(CONFIG_LEDS_LP55XX_COMMON) += leds-lp55xx-common.o
obj-$(CONFIG_LEDS_LP5521) += leds-lp5521.o
obj-$(CONFIG_LEDS_LP5523) += leds-lp5523.o
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 55fa65e1a..c92702a68 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -60,6 +60,8 @@ ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr,
goto unlock;
}
}
+ /* we come here only if buf matches no trigger */
+ ret = -EINVAL;
up_read(&triggers_list_lock);
unlock:
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index 8229f063b..9b991d46e 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -165,6 +165,7 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev)
return ERR_PTR(-ENOMEM);
device_for_each_child_node(dev, child) {
+ struct gpio_led_data *led_dat = &priv->leds[priv->num_leds];
struct gpio_led led = {};
const char *state = NULL;
@@ -205,12 +206,12 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev)
if (fwnode_property_present(child, "panic-indicator"))
led.panic_indicator = 1;
- ret = create_gpio_led(&led, &priv->leds[priv->num_leds],
- dev, NULL);
+ ret = create_gpio_led(&led, led_dat, dev, NULL);
if (ret < 0) {
fwnode_handle_put(child);
goto err;
}
+ led_dat->cdev.dev->of_node = np;
priv->num_leds++;
}
diff --git a/drivers/leds/leds-hp6xx.c b/drivers/leds/leds-hp6xx.c
index a6b8db0e2..137969fce 100644
--- a/drivers/leds/leds-hp6xx.c
+++ b/drivers/leds/leds-hp6xx.c
@@ -50,7 +50,7 @@ static struct led_classdev hp6xx_red_led = {
static struct led_classdev hp6xx_green_led = {
.name = "hp6xx:green",
- .default_trigger = "ide-disk",
+ .default_trigger = "disk-activity",
.brightness_set = hp6xxled_green_set,
.flags = LED_CORE_SUSPENDRESUME,
};
diff --git a/drivers/leds/leds-is31fl32xx.c b/drivers/leds/leds-is31fl32xx.c
index c901d132d..478844c5c 100644
--- a/drivers/leds/leds-is31fl32xx.c
+++ b/drivers/leds/leds-is31fl32xx.c
@@ -422,7 +422,7 @@ err:
return ret;
}
-static const struct of_device_id of_is31fl31xx_match[] = {
+static const struct of_device_id of_is31fl32xx_match[] = {
{ .compatible = "issi,is31fl3236", .data = &is31fl3236_cdef, },
{ .compatible = "issi,is31fl3235", .data = &is31fl3235_cdef, },
{ .compatible = "issi,is31fl3218", .data = &is31fl3218_cdef, },
@@ -432,7 +432,7 @@ static const struct of_device_id of_is31fl31xx_match[] = {
{},
};
-MODULE_DEVICE_TABLE(of, of_is31fl31xx_match);
+MODULE_DEVICE_TABLE(of, of_is31fl32xx_match);
static int is31fl32xx_probe(struct i2c_client *client,
const struct i2c_device_id *id)
@@ -444,7 +444,7 @@ static int is31fl32xx_probe(struct i2c_client *client,
int count;
int ret = 0;
- of_dev_id = of_match_device(of_is31fl31xx_match, dev);
+ of_dev_id = of_match_device(of_is31fl32xx_match, dev);
if (!of_dev_id)
return -EINVAL;
@@ -482,23 +482,29 @@ static int is31fl32xx_remove(struct i2c_client *client)
}
/*
- * i2c-core requires that id_table be non-NULL, even though
- * it is not used for DeviceTree based instantiation.
+ * i2c-core (and modalias) requires that id_table be properly filled,
+ * even though it is not used for DeviceTree based instantiation.
*/
-static const struct i2c_device_id is31fl31xx_id[] = {
+static const struct i2c_device_id is31fl32xx_id[] = {
+ { "is31fl3236" },
+ { "is31fl3235" },
+ { "is31fl3218" },
+ { "sn3218" },
+ { "is31fl3216" },
+ { "sn3216" },
{},
};
-MODULE_DEVICE_TABLE(i2c, is31fl31xx_id);
+MODULE_DEVICE_TABLE(i2c, is31fl32xx_id);
static struct i2c_driver is31fl32xx_driver = {
.driver = {
.name = "is31fl32xx",
- .of_match_table = of_is31fl31xx_match,
+ .of_match_table = of_is31fl32xx_match,
},
.probe = is31fl32xx_probe,
.remove = is31fl32xx_remove,
- .id_table = is31fl31xx_id,
+ .id_table = is31fl32xx_id,
};
module_i2c_driver(is31fl32xx_driver);
diff --git a/drivers/leds/leds-lp3952.c b/drivers/leds/leds-lp3952.c
new file mode 100644
index 000000000..a73c8ff08
--- /dev/null
+++ b/drivers/leds/leds-lp3952.c
@@ -0,0 +1,301 @@
+/*
+ * LED driver for TI lp3952 controller
+ *
+ * Copyright (C) 2016, DAQRI, LLC.
+ * Author: Tony Makkiel <tony.makkiel@daqri.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/leds-lp3952.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/reboot.h>
+#include <linux/regmap.h>
+
+static int lp3952_register_write(struct i2c_client *client, u8 reg, u8 val)
+{
+ int ret;
+ struct lp3952_led_array *priv = i2c_get_clientdata(client);
+
+ ret = regmap_write(priv->regmap, reg, val);
+
+ if (ret)
+ dev_err(&client->dev, "%s: reg 0x%x, val 0x%x, err %d\n",
+ __func__, reg, val, ret);
+ return ret;
+}
+
+static void lp3952_on_off(struct lp3952_led_array *priv,
+ enum lp3952_leds led_id, bool on)
+{
+ int ret, val;
+
+ dev_dbg(&priv->client->dev, "%s LED %d to %d\n", __func__, led_id, on);
+
+ val = 1 << led_id;
+ if (led_id == LP3952_LED_ALL)
+ val = LP3952_LED_MASK_ALL;
+
+ ret = regmap_update_bits(priv->regmap, LP3952_REG_LED_CTRL, val,
+ on ? val : 0);
+ if (ret)
+ dev_err(&priv->client->dev, "%s, Error %d\n", __func__, ret);
+}
+
+/*
+ * Using Imax to control brightness. There are 4 possible
+ * setting 25, 50, 75 and 100 % of Imax. Possible values are
+ * values 0-4. 0 meaning turn off.
+ */
+static int lp3952_set_brightness(struct led_classdev *cdev,
+ enum led_brightness value)
+{
+ unsigned int reg, shift_val;
+ struct lp3952_ctrl_hdl *led = container_of(cdev,
+ struct lp3952_ctrl_hdl,
+ cdev);
+ struct lp3952_led_array *priv = (struct lp3952_led_array *)led->priv;
+
+ dev_dbg(cdev->dev, "Brightness request: %d on %d\n", value,
+ led->channel);
+
+ if (value == LED_OFF) {
+ lp3952_on_off(priv, led->channel, false);
+ return 0;
+ }
+
+ if (led->channel > LP3952_RED_1) {
+ dev_err(cdev->dev, " %s Invalid LED requested", __func__);
+ return -EINVAL;
+ }
+
+ if (led->channel >= LP3952_BLUE_1) {
+ reg = LP3952_REG_RGB1_MAX_I_CTRL;
+ shift_val = (led->channel - LP3952_BLUE_1) * 2;
+ } else {
+ reg = LP3952_REG_RGB2_MAX_I_CTRL;
+ shift_val = led->channel * 2;
+ }
+
+ /* Enable the LED in case it is not enabled already */
+ lp3952_on_off(priv, led->channel, true);
+
+ return regmap_update_bits(priv->regmap, reg, 3 << shift_val,
+ --value << shift_val);
+}
+
+static int lp3952_get_label(struct device *dev, const char *label, char *dest)
+{
+ int ret;
+ const char *str;
+
+ ret = device_property_read_string(dev, label, &str);
+ if (!ret)
+ strncpy(dest, str, LP3952_LABEL_MAX_LEN);
+
+ return ret;
+}
+
+static int lp3952_register_led_classdev(struct lp3952_led_array *priv)
+{
+ int i, acpi_ret, ret = -ENODEV;
+ static const char *led_name_hdl[LP3952_LED_ALL] = {
+ "blue2",
+ "green2",
+ "red2",
+ "blue1",
+ "green1",
+ "red1"
+ };
+
+ for (i = 0; i < LP3952_LED_ALL; i++) {
+ acpi_ret = lp3952_get_label(&priv->client->dev, led_name_hdl[i],
+ priv->leds[i].name);
+ if (acpi_ret)
+ continue;
+
+ priv->leds[i].cdev.name = priv->leds[i].name;
+ priv->leds[i].cdev.brightness = LED_OFF;
+ priv->leds[i].cdev.max_brightness = LP3952_BRIGHT_MAX;
+ priv->leds[i].cdev.brightness_set_blocking =
+ lp3952_set_brightness;
+ priv->leds[i].channel = i;
+ priv->leds[i].priv = priv;
+
+ ret = devm_led_classdev_register(&priv->client->dev,
+ &priv->leds[i].cdev);
+ if (ret < 0) {
+ dev_err(&priv->client->dev,
+ "couldn't register LED %s\n",
+ priv->leds[i].cdev.name);
+ break;
+ }
+ }
+ return ret;
+}
+
+static int lp3952_set_pattern_gen_cmd(struct lp3952_led_array *priv,
+ u8 cmd_index, u8 r, u8 g, u8 b,
+ enum lp3952_tt tt, enum lp3952_cet cet)
+{
+ int ret;
+ struct ptrn_gen_cmd line = {
+ {
+ {
+ .r = r,
+ .g = g,
+ .b = b,
+ .cet = cet,
+ .tt = tt
+ }
+ }
+ };
+
+ if (cmd_index >= LP3952_CMD_REG_COUNT)
+ return -EINVAL;
+
+ ret = lp3952_register_write(priv->client,
+ LP3952_REG_CMD_0 + cmd_index * 2,
+ line.bytes.msb);
+ if (ret)
+ return ret;
+
+ return lp3952_register_write(priv->client,
+ LP3952_REG_CMD_0 + cmd_index * 2 + 1,
+ line.bytes.lsb);
+}
+
+static int lp3952_configure(struct lp3952_led_array *priv)
+{
+ int ret;
+
+ /* Disable any LEDs on from any previous conf. */
+ ret = lp3952_register_write(priv->client, LP3952_REG_LED_CTRL, 0);
+ if (ret)
+ return ret;
+
+ /* enable rgb patter, loop */
+ ret = lp3952_register_write(priv->client, LP3952_REG_PAT_GEN_CTRL,
+ LP3952_PATRN_LOOP | LP3952_PATRN_GEN_EN);
+ if (ret)
+ return ret;
+
+ /* Update Bit 6 (Active mode), Select both Led sets, Bit [1:0] */
+ ret = lp3952_register_write(priv->client, LP3952_REG_ENABLES,
+ LP3952_ACTIVE_MODE | LP3952_INT_B00ST_LDR);
+ if (ret)
+ return ret;
+
+ /* Set Cmd1 for RGB intensity,cmd and transition time */
+ return lp3952_set_pattern_gen_cmd(priv, 0, I46, I71, I100, TT0,
+ CET197);
+}
+
+static const struct regmap_config lp3952_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = REG_MAX,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static int lp3952_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int status;
+ struct lp3952_led_array *priv;
+
+ priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->client = client;
+
+ priv->enable_gpio = devm_gpiod_get(&client->dev, "nrst",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->enable_gpio)) {
+ status = PTR_ERR(priv->enable_gpio);
+ dev_err(&client->dev, "Failed to enable gpio: %d\n", status);
+ return status;
+ }
+
+ priv->regmap = devm_regmap_init_i2c(client, &lp3952_regmap);
+ if (IS_ERR(priv->regmap)) {
+ int err = PTR_ERR(priv->regmap);
+
+ dev_err(&client->dev, "Failed to allocate register map: %d\n",
+ err);
+ return err;
+ }
+
+ i2c_set_clientdata(client, priv);
+
+ status = lp3952_configure(priv);
+ if (status) {
+ dev_err(&client->dev, "Probe failed. Device not found (%d)\n",
+ status);
+ return status;
+ }
+
+ status = lp3952_register_led_classdev(priv);
+ if (status) {
+ dev_err(&client->dev, "Unable to register led_classdev: %d\n",
+ status);
+ return status;
+ }
+
+ return 0;
+}
+
+static int lp3952_remove(struct i2c_client *client)
+{
+ struct lp3952_led_array *priv;
+
+ priv = i2c_get_clientdata(client);
+ lp3952_on_off(priv, LP3952_LED_ALL, false);
+ gpiod_set_value(priv->enable_gpio, 0);
+
+ return 0;
+}
+
+static const struct i2c_device_id lp3952_id[] = {
+ {LP3952_NAME, 0},
+ {}
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id lp3952_acpi_match[] = {
+ {"TXNW3952", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(acpi, lp3952_acpi_match);
+#endif
+
+static struct i2c_driver lp3952_i2c_driver = {
+ .driver = {
+ .name = LP3952_NAME,
+ .acpi_match_table = ACPI_PTR(lp3952_acpi_match),
+ },
+ .probe = lp3952_probe,
+ .remove = lp3952_remove,
+ .id_table = lp3952_id,
+};
+
+module_i2c_driver(lp3952_i2c_driver);
+
+MODULE_AUTHOR("Tony Makkiel <tony.makkiel@daqri.com>");
+MODULE_DESCRIPTION("lp3952 I2C LED controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index e3d3b1aaa..09a7cffbc 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -21,6 +21,8 @@
#include <linux/workqueue.h>
#include <linux/leds-pca9532.h>
#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
/* m = num_leds*/
#define PCA9532_REG_INPUT(i) ((i) >> 3)
@@ -86,9 +88,22 @@ static const struct pca9532_chip_info pca9532_chip_info_tbl[] = {
},
};
+#ifdef CONFIG_OF
+static const struct of_device_id of_pca9532_leds_match[] = {
+ { .compatible = "nxp,pca9530", .data = (void *)pca9530 },
+ { .compatible = "nxp,pca9531", .data = (void *)pca9531 },
+ { .compatible = "nxp,pca9532", .data = (void *)pca9532 },
+ { .compatible = "nxp,pca9533", .data = (void *)pca9533 },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, of_pca9532_leds_match);
+#endif
+
static struct i2c_driver pca9532_driver = {
.driver = {
.name = "leds-pca953x",
+ .of_match_table = of_match_ptr(of_pca9532_leds_match),
},
.probe = pca9532_probe,
.remove = pca9532_remove,
@@ -354,6 +369,7 @@ static int pca9532_configure(struct i2c_client *client,
led->state = pled->state;
led->name = pled->name;
led->ldev.name = led->name;
+ led->ldev.default_trigger = led->default_trigger;
led->ldev.brightness = LED_OFF;
led->ldev.brightness_set_blocking =
pca9532_set_brightness;
@@ -432,15 +448,66 @@ exit:
return err;
}
+static struct pca9532_platform_data *
+pca9532_of_populate_pdata(struct device *dev, struct device_node *np)
+{
+ struct pca9532_platform_data *pdata;
+ struct device_node *child;
+ const struct of_device_id *match;
+ int devid, maxleds;
+ int i = 0;
+
+ match = of_match_device(of_pca9532_leds_match, dev);
+ if (!match)
+ return ERR_PTR(-ENODEV);
+
+ devid = (int)(uintptr_t)match->data;
+ maxleds = pca9532_chip_info_tbl[devid].num_leds;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ for_each_child_of_node(np, child) {
+ if (of_property_read_string(child, "label",
+ &pdata->leds[i].name))
+ pdata->leds[i].name = child->name;
+ of_property_read_u32(child, "type", &pdata->leds[i].type);
+ of_property_read_string(child, "linux,default-trigger",
+ &pdata->leds[i].default_trigger);
+ if (++i >= maxleds) {
+ of_node_put(child);
+ break;
+ }
+ }
+
+ return pdata;
+}
+
static int pca9532_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ int devid;
struct pca9532_data *data = i2c_get_clientdata(client);
struct pca9532_platform_data *pca9532_pdata =
dev_get_platdata(&client->dev);
-
- if (!pca9532_pdata)
- return -EIO;
+ struct device_node *np = client->dev.of_node;
+
+ if (!pca9532_pdata) {
+ if (np) {
+ pca9532_pdata =
+ pca9532_of_populate_pdata(&client->dev, np);
+ if (IS_ERR(pca9532_pdata))
+ return PTR_ERR(pca9532_pdata);
+ } else {
+ dev_err(&client->dev, "no platform data\n");
+ return -EINVAL;
+ }
+ devid = (int)(uintptr_t)of_match_device(
+ of_pca9532_leds_match, &client->dev)->data;
+ } else {
+ devid = id->driver_data;
+ }
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_BYTE_DATA))
@@ -450,7 +517,7 @@ static int pca9532_probe(struct i2c_client *client,
if (!data)
return -ENOMEM;
- data->chip_info = &pca9532_chip_info_tbl[id->driver_data];
+ data->chip_info = &pca9532_chip_info_tbl[devid];
dev_info(&client->dev, "setting platform data\n");
i2c_set_clientdata(client, data);
diff --git a/drivers/leds/leds-powernv.c b/drivers/leds/leds-powernv.c
index dfb8bd390..b2a98c7b5 100644
--- a/drivers/leds/leds-powernv.c
+++ b/drivers/leds/leds-powernv.c
@@ -118,7 +118,7 @@ static int powernv_led_set(struct powernv_led_data *powernv_led,
goto out_token;
}
- rc = be64_to_cpu(msg.params[1]);
+ rc = opal_get_async_rc(msg);
if (rc != OPAL_SUCCESS)
dev_err(dev, "%s : OAPL async call returned failed [rc=%d]\n",
__func__, rc);
diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig
index 9893d9113..3f9ddb9fa 100644
--- a/drivers/leds/trigger/Kconfig
+++ b/drivers/leds/trigger/Kconfig
@@ -33,12 +33,12 @@ config LEDS_TRIGGER_ONESHOT
If unsure, say Y.
-config LEDS_TRIGGER_IDE_DISK
- bool "LED IDE Disk Trigger"
- depends on IDE_GD_ATA
+config LEDS_TRIGGER_DISK
+ bool "LED Disk Trigger"
+ depends on IDE_GD_ATA || ATA
depends on LEDS_TRIGGERS
help
- This allows LEDs to be controlled by IDE disk activity.
+ This allows LEDs to be controlled by disk activity.
If unsure, say Y.
config LEDS_TRIGGER_MTD
diff --git a/drivers/leds/trigger/Makefile b/drivers/leds/trigger/Makefile
index 8cc64a4f4..a72c43cff 100644
--- a/drivers/leds/trigger/Makefile
+++ b/drivers/leds/trigger/Makefile
@@ -1,6 +1,6 @@
obj-$(CONFIG_LEDS_TRIGGER_TIMER) += ledtrig-timer.o
obj-$(CONFIG_LEDS_TRIGGER_ONESHOT) += ledtrig-oneshot.o
-obj-$(CONFIG_LEDS_TRIGGER_IDE_DISK) += ledtrig-ide-disk.o
+obj-$(CONFIG_LEDS_TRIGGER_DISK) += ledtrig-disk.o
obj-$(CONFIG_LEDS_TRIGGER_MTD) += ledtrig-mtd.o
obj-$(CONFIG_LEDS_TRIGGER_HEARTBEAT) += ledtrig-heartbeat.o
obj-$(CONFIG_LEDS_TRIGGER_BACKLIGHT) += ledtrig-backlight.o
diff --git a/drivers/leds/trigger/ledtrig-cpu.c b/drivers/leds/trigger/ledtrig-cpu.c
index 938467fb8..22f0634dd 100644
--- a/drivers/leds/trigger/ledtrig-cpu.c
+++ b/drivers/leds/trigger/ledtrig-cpu.c
@@ -92,29 +92,22 @@ static struct syscore_ops ledtrig_cpu_syscore_ops = {
.resume = ledtrig_cpu_syscore_resume,
};
-static int ledtrig_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
+static int ledtrig_online_cpu(unsigned int cpu)
{
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_STARTING:
- ledtrig_cpu(CPU_LED_START);
- break;
- case CPU_DYING:
- ledtrig_cpu(CPU_LED_STOP);
- break;
- }
-
- return NOTIFY_OK;
+ ledtrig_cpu(CPU_LED_START);
+ return 0;
}
-
-static struct notifier_block ledtrig_cpu_nb = {
- .notifier_call = ledtrig_cpu_notify,
-};
+static int ledtrig_prepare_down_cpu(unsigned int cpu)
+{
+ ledtrig_cpu(CPU_LED_STOP);
+ return 0;
+}
static int __init ledtrig_cpu_init(void)
{
int cpu;
+ int ret;
/* Supports up to 9999 cpu cores */
BUILD_BUG_ON(CONFIG_NR_CPUS > 9999);
@@ -133,7 +126,12 @@ static int __init ledtrig_cpu_init(void)
}
register_syscore_ops(&ledtrig_cpu_syscore_ops);
- register_cpu_notifier(&ledtrig_cpu_nb);
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "AP_LEDTRIG_STARTING",
+ ledtrig_online_cpu, ledtrig_prepare_down_cpu);
+ if (ret < 0)
+ pr_err("CPU hotplug notifier for ledtrig-cpu could not be registered: %d\n",
+ ret);
pr_info("ledtrig-cpu: registered to indicate activity on CPUs\n");
diff --git a/drivers/leds/trigger/ledtrig-disk.c b/drivers/leds/trigger/ledtrig-disk.c
new file mode 100644
index 000000000..cd525b412
--- /dev/null
+++ b/drivers/leds/trigger/ledtrig-disk.c
@@ -0,0 +1,41 @@
+/*
+ * LED Disk Activity Trigger
+ *
+ * Copyright 2006 Openedhand Ltd.
+ *
+ * Author: Richard Purdie <rpurdie@openedhand.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/leds.h>
+
+#define BLINK_DELAY 30
+
+DEFINE_LED_TRIGGER(ledtrig_disk);
+DEFINE_LED_TRIGGER(ledtrig_ide);
+
+void ledtrig_disk_activity(void)
+{
+ unsigned long blink_delay = BLINK_DELAY;
+
+ led_trigger_blink_oneshot(ledtrig_disk,
+ &blink_delay, &blink_delay, 0);
+ led_trigger_blink_oneshot(ledtrig_ide,
+ &blink_delay, &blink_delay, 0);
+}
+EXPORT_SYMBOL(ledtrig_disk_activity);
+
+static int __init ledtrig_disk_init(void)
+{
+ led_trigger_register_simple("disk-activity", &ledtrig_disk);
+ led_trigger_register_simple("ide-disk", &ledtrig_ide);
+
+ return 0;
+}
+device_initcall(ledtrig_disk_init);
diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig
index 85a339030..61c68a1f0 100644
--- a/drivers/lightnvm/Kconfig
+++ b/drivers/lightnvm/Kconfig
@@ -27,11 +27,13 @@ config NVM_DEBUG
It is required to create/remove targets without IOCTLs.
config NVM_GENNVM
- tristate "Generic NVM manager for Open-Channel SSDs"
+ tristate "General Non-Volatile Memory Manager for Open-Channel SSDs"
---help---
- NVM media manager for Open-Channel SSDs that offload management
- functionality to device, while keeping data placement and garbage
- collection decisions on the host.
+ Non-volatile memory media manager for Open-Channel SSDs that implements
+ physical media metadata management and block provisioning API.
+
+ This is the standard media manager for using Open-Channel SSDs, and
+ required for targets to be instantiated.
config NVM_RRPC
tristate "Round-robin Hybrid Open-Channel SSD target"
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 160c1a683..9ebd2cfbd 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -18,8 +18,6 @@
*
*/
-#include <linux/blkdev.h>
-#include <linux/blk-mq.h>
#include <linux/list.h>
#include <linux/types.h>
#include <linux/sem.h>
@@ -28,46 +26,42 @@
#include <linux/miscdevice.h>
#include <linux/lightnvm.h>
#include <linux/sched/sysctl.h>
-#include <uapi/linux/lightnvm.h>
static LIST_HEAD(nvm_tgt_types);
+static DECLARE_RWSEM(nvm_tgtt_lock);
static LIST_HEAD(nvm_mgrs);
static LIST_HEAD(nvm_devices);
-static LIST_HEAD(nvm_targets);
static DECLARE_RWSEM(nvm_lock);
-static struct nvm_target *nvm_find_target(const char *name)
+struct nvm_tgt_type *nvm_find_target_type(const char *name, int lock)
{
- struct nvm_target *tgt;
+ struct nvm_tgt_type *tmp, *tt = NULL;
- list_for_each_entry(tgt, &nvm_targets, list)
- if (!strcmp(name, tgt->disk->disk_name))
- return tgt;
+ if (lock)
+ down_write(&nvm_tgtt_lock);
- return NULL;
-}
-
-static struct nvm_tgt_type *nvm_find_target_type(const char *name)
-{
- struct nvm_tgt_type *tt;
-
- list_for_each_entry(tt, &nvm_tgt_types, list)
- if (!strcmp(name, tt->name))
- return tt;
+ list_for_each_entry(tmp, &nvm_tgt_types, list)
+ if (!strcmp(name, tmp->name)) {
+ tt = tmp;
+ break;
+ }
- return NULL;
+ if (lock)
+ up_write(&nvm_tgtt_lock);
+ return tt;
}
+EXPORT_SYMBOL(nvm_find_target_type);
int nvm_register_tgt_type(struct nvm_tgt_type *tt)
{
int ret = 0;
- down_write(&nvm_lock);
- if (nvm_find_target_type(tt->name))
+ down_write(&nvm_tgtt_lock);
+ if (nvm_find_target_type(tt->name, 0))
ret = -EEXIST;
else
list_add(&tt->list, &nvm_tgt_types);
- up_write(&nvm_lock);
+ up_write(&nvm_tgtt_lock);
return ret;
}
@@ -110,7 +104,7 @@ static struct nvmm_type *nvm_find_mgr_type(const char *name)
return NULL;
}
-struct nvmm_type *nvm_init_mgr(struct nvm_dev *dev)
+static struct nvmm_type *nvm_init_mgr(struct nvm_dev *dev)
{
struct nvmm_type *mt;
int ret;
@@ -182,20 +176,6 @@ static struct nvm_dev *nvm_find_nvm_dev(const char *name)
return NULL;
}
-struct nvm_block *nvm_get_blk_unlocked(struct nvm_dev *dev, struct nvm_lun *lun,
- unsigned long flags)
-{
- return dev->mt->get_blk_unlocked(dev, lun, flags);
-}
-EXPORT_SYMBOL(nvm_get_blk_unlocked);
-
-/* Assumes that all valid pages have already been moved on release to bm */
-void nvm_put_blk_unlocked(struct nvm_dev *dev, struct nvm_block *blk)
-{
- return dev->mt->put_blk_unlocked(dev, blk);
-}
-EXPORT_SYMBOL(nvm_put_blk_unlocked);
-
struct nvm_block *nvm_get_blk(struct nvm_dev *dev, struct nvm_lun *lun,
unsigned long flags)
{
@@ -210,6 +190,12 @@ void nvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
}
EXPORT_SYMBOL(nvm_put_blk);
+void nvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type)
+{
+ return dev->mt->mark_blk(dev, ppa, type);
+}
+EXPORT_SYMBOL(nvm_mark_blk);
+
int nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
{
return dev->mt->submit_io(dev, rqd);
@@ -251,9 +237,10 @@ void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
EXPORT_SYMBOL(nvm_generic_to_addr_mode);
int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
- struct ppa_addr *ppas, int nr_ppas, int vblk)
+ const struct ppa_addr *ppas, int nr_ppas, int vblk)
{
int i, plane_cnt, pl_idx;
+ struct ppa_addr ppa;
if ((!vblk || dev->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) {
rqd->nr_ppas = nr_ppas;
@@ -278,8 +265,9 @@ int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
for (i = 0; i < nr_ppas; i++) {
for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
- ppas[i].g.pl = pl_idx;
- rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppas[i];
+ ppa = ppas[i];
+ ppa.g.pl = pl_idx;
+ rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
}
}
}
@@ -337,7 +325,7 @@ static void nvm_end_io_sync(struct nvm_rq *rqd)
complete(waiting);
}
-int __nvm_submit_ppa(struct nvm_dev *dev, struct nvm_rq *rqd, int opcode,
+static int __nvm_submit_ppa(struct nvm_dev *dev, struct nvm_rq *rqd, int opcode,
int flags, void *buf, int len)
{
DECLARE_COMPLETION_ONSTACK(wait);
@@ -367,7 +355,9 @@ int __nvm_submit_ppa(struct nvm_dev *dev, struct nvm_rq *rqd, int opcode,
/* Prevent hang_check timer from firing at us during very long I/O */
hang_check = sysctl_hung_task_timeout_secs;
if (hang_check)
- while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2)));
+ while (!wait_for_completion_io_timeout(&wait,
+ hang_check * (HZ/2)))
+ ;
else
wait_for_completion_io(&wait);
@@ -510,7 +500,8 @@ static int nvm_init_mlc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
/* The lower page table encoding consists of a list of bytes, where each
* has a lower and an upper half. The first half byte maintains the
* increment value and every value after is an offset added to the
- * previous incrementation value */
+ * previous incrementation value
+ */
dev->lptbl[0] = mlc->pairs[0] & 0xF;
for (i = 1; i < dev->lps_per_blk; i++) {
p = mlc->pairs[i >> 1];
@@ -596,42 +587,11 @@ err_fmtype:
return ret;
}
-static void nvm_remove_target(struct nvm_target *t)
-{
- struct nvm_tgt_type *tt = t->type;
- struct gendisk *tdisk = t->disk;
- struct request_queue *q = tdisk->queue;
-
- lockdep_assert_held(&nvm_lock);
-
- del_gendisk(tdisk);
- blk_cleanup_queue(q);
-
- if (tt->exit)
- tt->exit(tdisk->private_data);
-
- put_disk(tdisk);
-
- list_del(&t->list);
- kfree(t);
-}
-
static void nvm_free_mgr(struct nvm_dev *dev)
{
- struct nvm_target *tgt, *tmp;
-
if (!dev->mt)
return;
- down_write(&nvm_lock);
- list_for_each_entry_safe(tgt, tmp, &nvm_targets, list) {
- if (tgt->dev != dev)
- continue;
-
- nvm_remove_target(tgt);
- }
- up_write(&nvm_lock);
-
dev->mt->unregister_mgr(dev);
dev->mt = NULL;
}
@@ -778,91 +738,6 @@ void nvm_unregister(char *disk_name)
}
EXPORT_SYMBOL(nvm_unregister);
-static const struct block_device_operations nvm_fops = {
- .owner = THIS_MODULE,
-};
-
-static int nvm_create_target(struct nvm_dev *dev,
- struct nvm_ioctl_create *create)
-{
- struct nvm_ioctl_create_simple *s = &create->conf.s;
- struct request_queue *tqueue;
- struct gendisk *tdisk;
- struct nvm_tgt_type *tt;
- struct nvm_target *t;
- void *targetdata;
-
- if (!dev->mt) {
- pr_info("nvm: device has no media manager registered.\n");
- return -ENODEV;
- }
-
- down_write(&nvm_lock);
- tt = nvm_find_target_type(create->tgttype);
- if (!tt) {
- pr_err("nvm: target type %s not found\n", create->tgttype);
- up_write(&nvm_lock);
- return -EINVAL;
- }
-
- t = nvm_find_target(create->tgtname);
- if (t) {
- pr_err("nvm: target name already exists.\n");
- up_write(&nvm_lock);
- return -EINVAL;
- }
- up_write(&nvm_lock);
-
- t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
- if (!t)
- return -ENOMEM;
-
- tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
- if (!tqueue)
- goto err_t;
- blk_queue_make_request(tqueue, tt->make_rq);
-
- tdisk = alloc_disk(0);
- if (!tdisk)
- goto err_queue;
-
- sprintf(tdisk->disk_name, "%s", create->tgtname);
- tdisk->flags = GENHD_FL_EXT_DEVT;
- tdisk->major = 0;
- tdisk->first_minor = 0;
- tdisk->fops = &nvm_fops;
- tdisk->queue = tqueue;
-
- targetdata = tt->init(dev, tdisk, s->lun_begin, s->lun_end);
- if (IS_ERR(targetdata))
- goto err_init;
-
- tdisk->private_data = targetdata;
- tqueue->queuedata = targetdata;
-
- blk_queue_max_hw_sectors(tqueue, 8 * dev->ops->max_phys_sect);
-
- set_capacity(tdisk, tt->capacity(targetdata));
- add_disk(tdisk);
-
- t->type = tt;
- t->disk = tdisk;
- t->dev = dev;
-
- down_write(&nvm_lock);
- list_add_tail(&t->list, &nvm_targets);
- up_write(&nvm_lock);
-
- return 0;
-err_init:
- put_disk(tdisk);
-err_queue:
- blk_cleanup_queue(tqueue);
-err_t:
- kfree(t);
- return -ENOMEM;
-}
-
static int __nvm_configure_create(struct nvm_ioctl_create *create)
{
struct nvm_dev *dev;
@@ -871,11 +746,17 @@ static int __nvm_configure_create(struct nvm_ioctl_create *create)
down_write(&nvm_lock);
dev = nvm_find_nvm_dev(create->dev);
up_write(&nvm_lock);
+
if (!dev) {
pr_err("nvm: device not found\n");
return -EINVAL;
}
+ if (!dev->mt) {
+ pr_info("nvm: device has no media manager registered.\n");
+ return -ENODEV;
+ }
+
if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) {
pr_err("nvm: config type not valid\n");
return -EINVAL;
@@ -888,25 +769,7 @@ static int __nvm_configure_create(struct nvm_ioctl_create *create)
return -EINVAL;
}
- return nvm_create_target(dev, create);
-}
-
-static int __nvm_configure_remove(struct nvm_ioctl_remove *remove)
-{
- struct nvm_target *t;
-
- down_write(&nvm_lock);
- t = nvm_find_target(remove->tgtname);
- if (!t) {
- pr_err("nvm: target \"%s\" doesn't exist.\n", remove->tgtname);
- up_write(&nvm_lock);
- return -EINVAL;
- }
-
- nvm_remove_target(t);
- up_write(&nvm_lock);
-
- return 0;
+ return dev->mt->create_tgt(dev, create);
}
#ifdef CONFIG_NVM_DEBUG
@@ -941,8 +804,9 @@ static int nvm_configure_show(const char *val)
static int nvm_configure_remove(const char *val)
{
struct nvm_ioctl_remove remove;
+ struct nvm_dev *dev;
char opcode;
- int ret;
+ int ret = 0;
ret = sscanf(val, "%c %256s", &opcode, remove.tgtname);
if (ret != 2) {
@@ -952,7 +816,13 @@ static int nvm_configure_remove(const char *val)
remove.flags = 0;
- return __nvm_configure_remove(&remove);
+ list_for_each_entry(dev, &nvm_devices, devices) {
+ ret = dev->mt->remove_tgt(dev, &remove);
+ if (!ret)
+ break;
+ }
+
+ return ret;
}
static int nvm_configure_create(const char *val)
@@ -1149,6 +1019,8 @@ static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
{
struct nvm_ioctl_remove remove;
+ struct nvm_dev *dev;
+ int ret = 0;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -1163,7 +1035,13 @@ static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
return -EINVAL;
}
- return __nvm_configure_remove(&remove);
+ list_for_each_entry(dev, &nvm_devices, devices) {
+ ret = dev->mt->remove_tgt(dev, &remove);
+ if (!ret)
+ break;
+ }
+
+ return ret;
}
static void nvm_setup_nvm_sb_info(struct nvm_sb_info *info)
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index ec9fb6876..b74174c6d 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -15,22 +15,160 @@
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
* USA.
*
- * Implementation of a generic nvm manager for Open-Channel SSDs.
+ * Implementation of a general nvm manager for Open-Channel SSDs.
*/
#include "gennvm.h"
-static int gennvm_get_area(struct nvm_dev *dev, sector_t *lba, sector_t len)
+static struct nvm_target *gen_find_target(struct gen_dev *gn, const char *name)
{
- struct gen_nvm *gn = dev->mp;
- struct gennvm_area *area, *prev, *next;
+ struct nvm_target *tgt;
+
+ list_for_each_entry(tgt, &gn->targets, list)
+ if (!strcmp(name, tgt->disk->disk_name))
+ return tgt;
+
+ return NULL;
+}
+
+static const struct block_device_operations gen_fops = {
+ .owner = THIS_MODULE,
+};
+
+static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
+{
+ struct gen_dev *gn = dev->mp;
+ struct nvm_ioctl_create_simple *s = &create->conf.s;
+ struct request_queue *tqueue;
+ struct gendisk *tdisk;
+ struct nvm_tgt_type *tt;
+ struct nvm_target *t;
+ void *targetdata;
+
+ tt = nvm_find_target_type(create->tgttype, 1);
+ if (!tt) {
+ pr_err("nvm: target type %s not found\n", create->tgttype);
+ return -EINVAL;
+ }
+
+ mutex_lock(&gn->lock);
+ t = gen_find_target(gn, create->tgtname);
+ if (t) {
+ pr_err("nvm: target name already exists.\n");
+ mutex_unlock(&gn->lock);
+ return -EINVAL;
+ }
+ mutex_unlock(&gn->lock);
+
+ t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
+ if (!t)
+ return -ENOMEM;
+
+ tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
+ if (!tqueue)
+ goto err_t;
+ blk_queue_make_request(tqueue, tt->make_rq);
+
+ tdisk = alloc_disk(0);
+ if (!tdisk)
+ goto err_queue;
+
+ sprintf(tdisk->disk_name, "%s", create->tgtname);
+ tdisk->flags = GENHD_FL_EXT_DEVT;
+ tdisk->major = 0;
+ tdisk->first_minor = 0;
+ tdisk->fops = &gen_fops;
+ tdisk->queue = tqueue;
+
+ targetdata = tt->init(dev, tdisk, s->lun_begin, s->lun_end);
+ if (IS_ERR(targetdata))
+ goto err_init;
+
+ tdisk->private_data = targetdata;
+ tqueue->queuedata = targetdata;
+
+ blk_queue_max_hw_sectors(tqueue, 8 * dev->ops->max_phys_sect);
+
+ set_capacity(tdisk, tt->capacity(targetdata));
+ add_disk(tdisk);
+
+ t->type = tt;
+ t->disk = tdisk;
+ t->dev = dev;
+
+ mutex_lock(&gn->lock);
+ list_add_tail(&t->list, &gn->targets);
+ mutex_unlock(&gn->lock);
+
+ return 0;
+err_init:
+ put_disk(tdisk);
+err_queue:
+ blk_cleanup_queue(tqueue);
+err_t:
+ kfree(t);
+ return -ENOMEM;
+}
+
+static void __gen_remove_target(struct nvm_target *t)
+{
+ struct nvm_tgt_type *tt = t->type;
+ struct gendisk *tdisk = t->disk;
+ struct request_queue *q = tdisk->queue;
+
+ del_gendisk(tdisk);
+ blk_cleanup_queue(q);
+
+ if (tt->exit)
+ tt->exit(tdisk->private_data);
+
+ put_disk(tdisk);
+
+ list_del(&t->list);
+ kfree(t);
+}
+
+/**
+ * gen_remove_tgt - Removes a target from the media manager
+ * @dev: device
+ * @remove: ioctl structure with target name to remove.
+ *
+ * Returns:
+ * 0: on success
+ * 1: on not found
+ * <0: on error
+ */
+static int gen_remove_tgt(struct nvm_dev *dev, struct nvm_ioctl_remove *remove)
+{
+ struct gen_dev *gn = dev->mp;
+ struct nvm_target *t;
+
+ if (!gn)
+ return 1;
+
+ mutex_lock(&gn->lock);
+ t = gen_find_target(gn, remove->tgtname);
+ if (!t) {
+ mutex_unlock(&gn->lock);
+ return 1;
+ }
+ __gen_remove_target(t);
+ mutex_unlock(&gn->lock);
+
+ return 0;
+}
+
+static int gen_get_area(struct nvm_dev *dev, sector_t *lba, sector_t len)
+{
+ struct gen_dev *gn = dev->mp;
+ struct gen_area *area, *prev, *next;
sector_t begin = 0;
sector_t max_sectors = (dev->sec_size * dev->total_secs) >> 9;
if (len > max_sectors)
return -EINVAL;
- area = kmalloc(sizeof(struct gennvm_area), GFP_KERNEL);
+ area = kmalloc(sizeof(struct gen_area), GFP_KERNEL);
if (!area)
return -ENOMEM;
@@ -64,10 +202,10 @@ static int gennvm_get_area(struct nvm_dev *dev, sector_t *lba, sector_t len)
return 0;
}
-static void gennvm_put_area(struct nvm_dev *dev, sector_t begin)
+static void gen_put_area(struct nvm_dev *dev, sector_t begin)
{
- struct gen_nvm *gn = dev->mp;
- struct gennvm_area *area;
+ struct gen_dev *gn = dev->mp;
+ struct gen_area *area;
spin_lock(&dev->lock);
list_for_each_entry(area, &gn->area_list, list) {
@@ -82,27 +220,27 @@ static void gennvm_put_area(struct nvm_dev *dev, sector_t begin)
spin_unlock(&dev->lock);
}
-static void gennvm_blocks_free(struct nvm_dev *dev)
+static void gen_blocks_free(struct nvm_dev *dev)
{
- struct gen_nvm *gn = dev->mp;
+ struct gen_dev *gn = dev->mp;
struct gen_lun *lun;
int i;
- gennvm_for_each_lun(gn, lun, i) {
+ gen_for_each_lun(gn, lun, i) {
if (!lun->vlun.blocks)
break;
vfree(lun->vlun.blocks);
}
}
-static void gennvm_luns_free(struct nvm_dev *dev)
+static void gen_luns_free(struct nvm_dev *dev)
{
- struct gen_nvm *gn = dev->mp;
+ struct gen_dev *gn = dev->mp;
kfree(gn->luns);
}
-static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn)
+static int gen_luns_init(struct nvm_dev *dev, struct gen_dev *gn)
{
struct gen_lun *lun;
int i;
@@ -111,7 +249,7 @@ static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn)
if (!gn->luns)
return -ENOMEM;
- gennvm_for_each_lun(gn, lun, i) {
+ gen_for_each_lun(gn, lun, i) {
spin_lock_init(&lun->vlun.lock);
INIT_LIST_HEAD(&lun->free_list);
INIT_LIST_HEAD(&lun->used_list);
@@ -122,14 +260,11 @@ static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn)
lun->vlun.lun_id = i % dev->luns_per_chnl;
lun->vlun.chnl_id = i / dev->luns_per_chnl;
lun->vlun.nr_free_blocks = dev->blks_per_lun;
- lun->vlun.nr_open_blocks = 0;
- lun->vlun.nr_closed_blocks = 0;
- lun->vlun.nr_bad_blocks = 0;
}
return 0;
}
-static int gennvm_block_bb(struct gen_nvm *gn, struct ppa_addr ppa,
+static int gen_block_bb(struct gen_dev *gn, struct ppa_addr ppa,
u8 *blks, int nr_blks)
{
struct nvm_dev *dev = gn->dev;
@@ -149,17 +284,16 @@ static int gennvm_block_bb(struct gen_nvm *gn, struct ppa_addr ppa,
blk = &lun->vlun.blocks[i];
list_move_tail(&blk->list, &lun->bb_list);
- lun->vlun.nr_bad_blocks++;
lun->vlun.nr_free_blocks--;
}
return 0;
}
-static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
+static int gen_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
{
struct nvm_dev *dev = private;
- struct gen_nvm *gn = dev->mp;
+ struct gen_dev *gn = dev->mp;
u64 elba = slba + nlb;
struct gen_lun *lun;
struct nvm_block *blk;
@@ -167,7 +301,7 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
int lun_id;
if (unlikely(elba > dev->total_secs)) {
- pr_err("gennvm: L2P data from device is out of bounds!\n");
+ pr_err("gen: L2P data from device is out of bounds!\n");
return -EINVAL;
}
@@ -175,7 +309,7 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
u64 pba = le64_to_cpu(entries[i]);
if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {
- pr_err("gennvm: L2P data entry is out of bounds!\n");
+ pr_err("gen: L2P data entry is out of bounds!\n");
return -EINVAL;
}
@@ -200,16 +334,15 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
* block state. The block is assumed to be open.
*/
list_move_tail(&blk->list, &lun->used_list);
- blk->state = NVM_BLK_ST_OPEN;
+ blk->state = NVM_BLK_ST_TGT;
lun->vlun.nr_free_blocks--;
- lun->vlun.nr_open_blocks++;
}
}
return 0;
}
-static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
+static int gen_blocks_init(struct nvm_dev *dev, struct gen_dev *gn)
{
struct gen_lun *lun;
struct nvm_block *block;
@@ -222,7 +355,7 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
if (!blks)
return -ENOMEM;
- gennvm_for_each_lun(gn, lun, lun_iter) {
+ gen_for_each_lun(gn, lun, lun_iter) {
lun->vlun.blocks = vzalloc(sizeof(struct nvm_block) *
dev->blks_per_lun);
if (!lun->vlun.blocks) {
@@ -256,20 +389,20 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
ret = nvm_get_bb_tbl(dev, ppa, blks);
if (ret)
- pr_err("gennvm: could not get BB table\n");
+ pr_err("gen: could not get BB table\n");
- ret = gennvm_block_bb(gn, ppa, blks, nr_blks);
+ ret = gen_block_bb(gn, ppa, blks, nr_blks);
if (ret)
- pr_err("gennvm: BB table map failed\n");
+ pr_err("gen: BB table map failed\n");
}
}
if ((dev->identity.dom & NVM_RSP_L2P) && dev->ops->get_l2p_tbl) {
ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_secs,
- gennvm_block_map, dev);
+ gen_block_map, dev);
if (ret) {
- pr_err("gennvm: could not read L2P table.\n");
- pr_warn("gennvm: default block initialization");
+ pr_err("gen: could not read L2P table.\n");
+ pr_warn("gen: default block initialization");
}
}
@@ -277,67 +410,79 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
return 0;
}
-static void gennvm_free(struct nvm_dev *dev)
+static void gen_free(struct nvm_dev *dev)
{
- gennvm_blocks_free(dev);
- gennvm_luns_free(dev);
+ gen_blocks_free(dev);
+ gen_luns_free(dev);
kfree(dev->mp);
dev->mp = NULL;
}
-static int gennvm_register(struct nvm_dev *dev)
+static int gen_register(struct nvm_dev *dev)
{
- struct gen_nvm *gn;
+ struct gen_dev *gn;
int ret;
if (!try_module_get(THIS_MODULE))
return -ENODEV;
- gn = kzalloc(sizeof(struct gen_nvm), GFP_KERNEL);
+ gn = kzalloc(sizeof(struct gen_dev), GFP_KERNEL);
if (!gn)
return -ENOMEM;
gn->dev = dev;
gn->nr_luns = dev->nr_luns;
INIT_LIST_HEAD(&gn->area_list);
+ mutex_init(&gn->lock);
+ INIT_LIST_HEAD(&gn->targets);
dev->mp = gn;
- ret = gennvm_luns_init(dev, gn);
+ ret = gen_luns_init(dev, gn);
if (ret) {
- pr_err("gennvm: could not initialize luns\n");
+ pr_err("gen: could not initialize luns\n");
goto err;
}
- ret = gennvm_blocks_init(dev, gn);
+ ret = gen_blocks_init(dev, gn);
if (ret) {
- pr_err("gennvm: could not initialize blocks\n");
+ pr_err("gen: could not initialize blocks\n");
goto err;
}
return 1;
err:
- gennvm_free(dev);
+ gen_free(dev);
module_put(THIS_MODULE);
return ret;
}
-static void gennvm_unregister(struct nvm_dev *dev)
+static void gen_unregister(struct nvm_dev *dev)
{
- gennvm_free(dev);
+ struct gen_dev *gn = dev->mp;
+ struct nvm_target *t, *tmp;
+
+ mutex_lock(&gn->lock);
+ list_for_each_entry_safe(t, tmp, &gn->targets, list) {
+ if (t->dev != dev)
+ continue;
+ __gen_remove_target(t);
+ }
+ mutex_unlock(&gn->lock);
+
+ gen_free(dev);
module_put(THIS_MODULE);
}
-static struct nvm_block *gennvm_get_blk_unlocked(struct nvm_dev *dev,
+static struct nvm_block *gen_get_blk(struct nvm_dev *dev,
struct nvm_lun *vlun, unsigned long flags)
{
struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
struct nvm_block *blk = NULL;
int is_gc = flags & NVM_IOTYPE_GC;
- assert_spin_locked(&vlun->lock);
-
+ spin_lock(&vlun->lock);
if (list_empty(&lun->free_list)) {
- pr_err_ratelimited("gennvm: lun %u have no free pages available",
+ pr_err_ratelimited("gen: lun %u have no free pages available",
lun->vlun.id);
goto out;
}
@@ -346,88 +491,58 @@ static struct nvm_block *gennvm_get_blk_unlocked(struct nvm_dev *dev,
goto out;
blk = list_first_entry(&lun->free_list, struct nvm_block, list);
- list_move_tail(&blk->list, &lun->used_list);
- blk->state = NVM_BLK_ST_OPEN;
+ list_move_tail(&blk->list, &lun->used_list);
+ blk->state = NVM_BLK_ST_TGT;
lun->vlun.nr_free_blocks--;
- lun->vlun.nr_open_blocks++;
-
out:
- return blk;
-}
-
-static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
- struct nvm_lun *vlun, unsigned long flags)
-{
- struct nvm_block *blk;
-
- spin_lock(&vlun->lock);
- blk = gennvm_get_blk_unlocked(dev, vlun, flags);
spin_unlock(&vlun->lock);
return blk;
}
-static void gennvm_put_blk_unlocked(struct nvm_dev *dev, struct nvm_block *blk)
+static void gen_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
{
struct nvm_lun *vlun = blk->lun;
struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
- assert_spin_locked(&vlun->lock);
-
- if (blk->state & NVM_BLK_ST_OPEN) {
- list_move_tail(&blk->list, &lun->free_list);
- lun->vlun.nr_open_blocks--;
- lun->vlun.nr_free_blocks++;
- blk->state = NVM_BLK_ST_FREE;
- } else if (blk->state & NVM_BLK_ST_CLOSED) {
+ spin_lock(&vlun->lock);
+ if (blk->state & NVM_BLK_ST_TGT) {
list_move_tail(&blk->list, &lun->free_list);
- lun->vlun.nr_closed_blocks--;
lun->vlun.nr_free_blocks++;
blk->state = NVM_BLK_ST_FREE;
} else if (blk->state & NVM_BLK_ST_BAD) {
list_move_tail(&blk->list, &lun->bb_list);
- lun->vlun.nr_bad_blocks++;
blk->state = NVM_BLK_ST_BAD;
} else {
WARN_ON_ONCE(1);
- pr_err("gennvm: erroneous block type (%lu -> %u)\n",
+ pr_err("gen: erroneous block type (%lu -> %u)\n",
blk->id, blk->state);
list_move_tail(&blk->list, &lun->bb_list);
- lun->vlun.nr_bad_blocks++;
- blk->state = NVM_BLK_ST_BAD;
}
-}
-
-static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
-{
- struct nvm_lun *vlun = blk->lun;
-
- spin_lock(&vlun->lock);
- gennvm_put_blk_unlocked(dev, blk);
spin_unlock(&vlun->lock);
}
-static void gennvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type)
+static void gen_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type)
{
- struct gen_nvm *gn = dev->mp;
+ struct gen_dev *gn = dev->mp;
struct gen_lun *lun;
struct nvm_block *blk;
- pr_debug("gennvm: ppa (ch: %u lun: %u blk: %u pg: %u) -> %u\n",
+ pr_debug("gen: ppa (ch: %u lun: %u blk: %u pg: %u) -> %u\n",
ppa.g.ch, ppa.g.lun, ppa.g.blk, ppa.g.pg, type);
if (unlikely(ppa.g.ch > dev->nr_chnls ||
ppa.g.lun > dev->luns_per_chnl ||
ppa.g.blk > dev->blks_per_lun)) {
WARN_ON_ONCE(1);
- pr_err("gennvm: ppa broken (ch: %u > %u lun: %u > %u blk: %u > %u",
+ pr_err("gen: ppa broken (ch: %u > %u lun: %u > %u blk: %u > %u",
ppa.g.ch, dev->nr_chnls,
ppa.g.lun, dev->luns_per_chnl,
ppa.g.blk, dev->blks_per_lun);
return;
}
- lun = &gn->luns[ppa.g.lun * ppa.g.ch];
+ lun = &gn->luns[(dev->luns_per_chnl * ppa.g.ch) + ppa.g.lun];
blk = &lun->vlun.blocks[ppa.g.blk];
/* will be moved to bb list on put_blk from target */
@@ -435,9 +550,9 @@ static void gennvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type)
}
/*
- * mark block bad in gennvm. It is expected that the target recovers separately
+ * mark block bad in gen. It is expected that the target recovers separately
*/
-static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
+static void gen_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
{
int bit = -1;
int max_secs = dev->ops->max_phys_sect;
@@ -447,25 +562,25 @@ static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
/* look up blocks and mark them as bad */
if (rqd->nr_ppas == 1) {
- gennvm_mark_blk(dev, rqd->ppa_addr, NVM_BLK_ST_BAD);
+ gen_mark_blk(dev, rqd->ppa_addr, NVM_BLK_ST_BAD);
return;
}
while ((bit = find_next_bit(comp_bits, max_secs, bit + 1)) < max_secs)
- gennvm_mark_blk(dev, rqd->ppa_list[bit], NVM_BLK_ST_BAD);
+ gen_mark_blk(dev, rqd->ppa_list[bit], NVM_BLK_ST_BAD);
}
-static void gennvm_end_io(struct nvm_rq *rqd)
+static void gen_end_io(struct nvm_rq *rqd)
{
struct nvm_tgt_instance *ins = rqd->ins;
if (rqd->error == NVM_RSP_ERR_FAILWRITE)
- gennvm_mark_blk_bad(rqd->dev, rqd);
+ gen_mark_blk_bad(rqd->dev, rqd);
ins->tt->end_io(rqd);
}
-static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
+static int gen_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
{
if (!dev->ops->submit_io)
return -ENODEV;
@@ -474,11 +589,11 @@ static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
nvm_generic_to_addr_mode(dev, rqd);
rqd->dev = dev;
- rqd->end_io = gennvm_end_io;
+ rqd->end_io = gen_end_io;
return dev->ops->submit_io(dev, rqd);
}
-static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
+static int gen_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
unsigned long flags)
{
struct ppa_addr addr = block_to_ppa(dev, blk);
@@ -486,19 +601,19 @@ static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
return nvm_erase_ppa(dev, &addr, 1);
}
-static int gennvm_reserve_lun(struct nvm_dev *dev, int lunid)
+static int gen_reserve_lun(struct nvm_dev *dev, int lunid)
{
return test_and_set_bit(lunid, dev->lun_map);
}
-static void gennvm_release_lun(struct nvm_dev *dev, int lunid)
+static void gen_release_lun(struct nvm_dev *dev, int lunid)
{
WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
}
-static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid)
+static struct nvm_lun *gen_get_lun(struct nvm_dev *dev, int lunid)
{
- struct gen_nvm *gn = dev->mp;
+ struct gen_dev *gn = dev->mp;
if (unlikely(lunid >= dev->nr_luns))
return NULL;
@@ -506,66 +621,62 @@ static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid)
return &gn->luns[lunid].vlun;
}
-static void gennvm_lun_info_print(struct nvm_dev *dev)
+static void gen_lun_info_print(struct nvm_dev *dev)
{
- struct gen_nvm *gn = dev->mp;
+ struct gen_dev *gn = dev->mp;
struct gen_lun *lun;
unsigned int i;
- gennvm_for_each_lun(gn, lun, i) {
+ gen_for_each_lun(gn, lun, i) {
spin_lock(&lun->vlun.lock);
- pr_info("%s: lun%8u\t%u\t%u\t%u\t%u\n",
- dev->name, i,
- lun->vlun.nr_free_blocks,
- lun->vlun.nr_open_blocks,
- lun->vlun.nr_closed_blocks,
- lun->vlun.nr_bad_blocks);
+ pr_info("%s: lun%8u\t%u\n", dev->name, i,
+ lun->vlun.nr_free_blocks);
spin_unlock(&lun->vlun.lock);
}
}
-static struct nvmm_type gennvm = {
+static struct nvmm_type gen = {
.name = "gennvm",
.version = {0, 1, 0},
- .register_mgr = gennvm_register,
- .unregister_mgr = gennvm_unregister,
+ .register_mgr = gen_register,
+ .unregister_mgr = gen_unregister,
- .get_blk_unlocked = gennvm_get_blk_unlocked,
- .put_blk_unlocked = gennvm_put_blk_unlocked,
+ .create_tgt = gen_create_tgt,
+ .remove_tgt = gen_remove_tgt,
- .get_blk = gennvm_get_blk,
- .put_blk = gennvm_put_blk,
+ .get_blk = gen_get_blk,
+ .put_blk = gen_put_blk,
- .submit_io = gennvm_submit_io,
- .erase_blk = gennvm_erase_blk,
+ .submit_io = gen_submit_io,
+ .erase_blk = gen_erase_blk,
- .mark_blk = gennvm_mark_blk,
+ .mark_blk = gen_mark_blk,
- .get_lun = gennvm_get_lun,
- .reserve_lun = gennvm_reserve_lun,
- .release_lun = gennvm_release_lun,
- .lun_info_print = gennvm_lun_info_print,
+ .get_lun = gen_get_lun,
+ .reserve_lun = gen_reserve_lun,
+ .release_lun = gen_release_lun,
+ .lun_info_print = gen_lun_info_print,
- .get_area = gennvm_get_area,
- .put_area = gennvm_put_area,
+ .get_area = gen_get_area,
+ .put_area = gen_put_area,
};
-static int __init gennvm_module_init(void)
+static int __init gen_module_init(void)
{
- return nvm_register_mgr(&gennvm);
+ return nvm_register_mgr(&gen);
}
-static void gennvm_module_exit(void)
+static void gen_module_exit(void)
{
- nvm_unregister_mgr(&gennvm);
+ nvm_unregister_mgr(&gen);
}
-module_init(gennvm_module_init);
-module_exit(gennvm_module_exit);
+module_init(gen_module_init);
+module_exit(gen_module_exit);
MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Generic media manager for Open-Channel SSDs");
+MODULE_DESCRIPTION("General media manager for Open-Channel SSDs");
diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h
index 04d7c23cf..8ecfa817d 100644
--- a/drivers/lightnvm/gennvm.h
+++ b/drivers/lightnvm/gennvm.h
@@ -34,20 +34,24 @@ struct gen_lun {
*/
};
-struct gen_nvm {
+struct gen_dev {
struct nvm_dev *dev;
int nr_luns;
struct gen_lun *luns;
struct list_head area_list;
+
+ struct mutex lock;
+ struct list_head targets;
};
-struct gennvm_area {
+struct gen_area {
struct list_head list;
sector_t begin;
sector_t end; /* end is excluded */
};
-#define gennvm_for_each_lun(bm, lun, i) \
+
+#define gen_for_each_lun(bm, lun, i) \
for ((i) = 0, lun = &(bm)->luns[0]; \
(i) < (bm)->nr_luns; (i)++, lun = &(bm)->luns[(i)])
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index 2103e97a9..37fcaadbf 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -48,7 +48,7 @@ static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
}
static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba,
- unsigned len)
+ unsigned int len)
{
sector_t i;
@@ -96,10 +96,13 @@ static void rrpc_discard(struct rrpc *rrpc, struct bio *bio)
sector_t len = bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE;
struct nvm_rq *rqd;
- do {
+ while (1) {
rqd = rrpc_inflight_laddr_acquire(rrpc, slba, len);
+ if (rqd)
+ break;
+
schedule();
- } while (!rqd);
+ }
if (IS_ERR(rqd)) {
pr_err("rrpc: unable to acquire inflight IO\n");
@@ -172,39 +175,32 @@ static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr)
}
/* requires lun->lock taken */
-static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *rblk)
+static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *new_rblk,
+ struct rrpc_block **cur_rblk)
{
struct rrpc *rrpc = rlun->rrpc;
- BUG_ON(!rblk);
-
- if (rlun->cur) {
- spin_lock(&rlun->cur->lock);
- WARN_ON(!block_is_full(rrpc, rlun->cur));
- spin_unlock(&rlun->cur->lock);
+ if (*cur_rblk) {
+ spin_lock(&(*cur_rblk)->lock);
+ WARN_ON(!block_is_full(rrpc, *cur_rblk));
+ spin_unlock(&(*cur_rblk)->lock);
}
- rlun->cur = rblk;
+ *cur_rblk = new_rblk;
}
static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
unsigned long flags)
{
- struct nvm_lun *lun = rlun->parent;
struct nvm_block *blk;
struct rrpc_block *rblk;
- spin_lock(&lun->lock);
- blk = nvm_get_blk_unlocked(rrpc->dev, rlun->parent, flags);
+ blk = nvm_get_blk(rrpc->dev, rlun->parent, flags);
if (!blk) {
pr_err("nvm: rrpc: cannot get new block from media manager\n");
- spin_unlock(&lun->lock);
return NULL;
}
rblk = rrpc_get_rblk(rlun, blk->id);
- list_add_tail(&rblk->list, &rlun->open_list);
- spin_unlock(&lun->lock);
-
blk->priv = rblk;
bitmap_zero(rblk->invalid_pages, rrpc->dev->sec_per_blk);
rblk->next_page = 0;
@@ -216,13 +212,7 @@ static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
{
- struct rrpc_lun *rlun = rblk->rlun;
- struct nvm_lun *lun = rlun->parent;
-
- spin_lock(&lun->lock);
- nvm_put_blk_unlocked(rrpc->dev, rblk->parent);
- list_del(&rblk->list);
- spin_unlock(&lun->lock);
+ nvm_put_blk(rrpc->dev, rblk->parent);
}
static void rrpc_put_blks(struct rrpc *rrpc)
@@ -342,7 +332,7 @@ try:
/* Perform read to do GC */
bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
- bio->bi_rw = READ;
+ bio_set_op_attrs(bio, REQ_OP_READ, 0);
bio->bi_private = &wait;
bio->bi_end_io = rrpc_end_sync_bio;
@@ -364,7 +354,7 @@ try:
reinit_completion(&wait);
bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
- bio->bi_rw = WRITE;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio->bi_private = &wait;
bio->bi_end_io = rrpc_end_sync_bio;
@@ -508,21 +498,11 @@ static void rrpc_gc_queue(struct work_struct *work)
struct rrpc *rrpc = gcb->rrpc;
struct rrpc_block *rblk = gcb->rblk;
struct rrpc_lun *rlun = rblk->rlun;
- struct nvm_lun *lun = rblk->parent->lun;
- struct nvm_block *blk = rblk->parent;
spin_lock(&rlun->lock);
list_add_tail(&rblk->prio, &rlun->prio_list);
spin_unlock(&rlun->lock);
- spin_lock(&lun->lock);
- lun->nr_open_blocks--;
- lun->nr_closed_blocks++;
- blk->state &= ~NVM_BLK_ST_OPEN;
- blk->state |= NVM_BLK_ST_CLOSED;
- list_move_tail(&rblk->list, &rlun->closed_list);
- spin_unlock(&lun->lock);
-
mempool_free(gcb, rrpc->gcb_pool);
pr_debug("nvm: block '%lu' is full, allow GC (sched)\n",
rblk->parent->id);
@@ -596,21 +576,20 @@ out:
return addr;
}
-/* Simple round-robin Logical to physical address translation.
- *
- * Retrieve the mapping using the active append point. Then update the ap for
- * the next write to the disk.
+/* Map logical address to a physical page. The mapping implements a round robin
+ * approach and allocates a page from the next lun available.
*
- * Returns rrpc_addr with the physical address and block. Remember to return to
- * rrpc->addr_cache when request is finished.
+ * Returns rrpc_addr with the physical address and block. Returns NULL if no
+ * blocks in the next rlun are available.
*/
static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
int is_gc)
{
struct rrpc_lun *rlun;
- struct rrpc_block *rblk;
+ struct rrpc_block *rblk, **cur_rblk;
struct nvm_lun *lun;
u64 paddr;
+ int gc_force = 0;
rlun = rrpc_get_lun_rr(rrpc, is_gc);
lun = rlun->parent;
@@ -618,41 +597,65 @@ static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
if (!is_gc && lun->nr_free_blocks < rrpc->nr_luns * 4)
return NULL;
- spin_lock(&rlun->lock);
+ /*
+ * page allocation steps:
+ * 1. Try to allocate new page from current rblk
+ * 2a. If succeed, proceed to map it in and return
+ * 2b. If fail, first try to allocate a new block from media manger,
+ * and then retry step 1. Retry until the normal block pool is
+ * exhausted.
+ * 3. If exhausted, and garbage collector is requesting the block,
+ * go to the reserved block and retry step 1.
+ * In the case that this fails as well, or it is not GC
+ * requesting, report not able to retrieve a block and let the
+ * caller handle further processing.
+ */
+ spin_lock(&rlun->lock);
+ cur_rblk = &rlun->cur;
rblk = rlun->cur;
retry:
paddr = rrpc_alloc_addr(rrpc, rblk);
- if (paddr == ADDR_EMPTY) {
- rblk = rrpc_get_blk(rrpc, rlun, 0);
- if (rblk) {
- rrpc_set_lun_cur(rlun, rblk);
- goto retry;
- }
+ if (paddr != ADDR_EMPTY)
+ goto done;
- if (is_gc) {
- /* retry from emergency gc block */
- paddr = rrpc_alloc_addr(rrpc, rlun->gc_cur);
- if (paddr == ADDR_EMPTY) {
- rblk = rrpc_get_blk(rrpc, rlun, 1);
- if (!rblk) {
- pr_err("rrpc: no more blocks");
- goto err;
- }
-
- rlun->gc_cur = rblk;
- paddr = rrpc_alloc_addr(rrpc, rlun->gc_cur);
- }
- rblk = rlun->gc_cur;
- }
+ if (!list_empty(&rlun->wblk_list)) {
+new_blk:
+ rblk = list_first_entry(&rlun->wblk_list, struct rrpc_block,
+ prio);
+ rrpc_set_lun_cur(rlun, rblk, cur_rblk);
+ list_del(&rblk->prio);
+ goto retry;
+ }
+ spin_unlock(&rlun->lock);
+
+ rblk = rrpc_get_blk(rrpc, rlun, gc_force);
+ if (rblk) {
+ spin_lock(&rlun->lock);
+ list_add_tail(&rblk->prio, &rlun->wblk_list);
+ /*
+ * another thread might already have added a new block,
+ * Therefore, make sure that one is used, instead of the
+ * one just added.
+ */
+ goto new_blk;
}
+ if (unlikely(is_gc) && !gc_force) {
+ /* retry from emergency gc block */
+ cur_rblk = &rlun->gc_cur;
+ rblk = rlun->gc_cur;
+ gc_force = 1;
+ spin_lock(&rlun->lock);
+ goto retry;
+ }
+
+ pr_err("rrpc: failed to allocate new block\n");
+ return NULL;
+done:
spin_unlock(&rlun->lock);
return rrpc_update_map(rrpc, laddr, rblk, paddr);
-err:
- spin_unlock(&rlun->lock);
- return NULL;
}
static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
@@ -850,14 +853,14 @@ static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio,
return NVM_IO_ERR;
}
- if (bio_rw(bio) == WRITE)
+ if (bio_op(bio) == REQ_OP_WRITE)
return rrpc_write_ppalist_rq(rrpc, bio, rqd, flags,
npages);
return rrpc_read_ppalist_rq(rrpc, bio, rqd, flags, npages);
}
- if (bio_rw(bio) == WRITE)
+ if (bio_op(bio) == REQ_OP_WRITE)
return rrpc_write_rq(rrpc, bio, rqd, flags);
return rrpc_read_rq(rrpc, bio, rqd, flags);
@@ -908,7 +911,7 @@ static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio)
struct nvm_rq *rqd;
int err;
- if (bio->bi_rw & REQ_DISCARD) {
+ if (bio_op(bio) == REQ_OP_DISCARD) {
rrpc_discard(rrpc, bio);
return BLK_QC_T_NONE;
}
@@ -1196,8 +1199,7 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
rlun->rrpc = rrpc;
INIT_LIST_HEAD(&rlun->prio_list);
- INIT_LIST_HEAD(&rlun->open_list);
- INIT_LIST_HEAD(&rlun->closed_list);
+ INIT_LIST_HEAD(&rlun->wblk_list);
INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
spin_lock_init(&rlun->lock);
@@ -1338,14 +1340,13 @@ static int rrpc_luns_configure(struct rrpc *rrpc)
rblk = rrpc_get_blk(rrpc, rlun, 0);
if (!rblk)
goto err;
-
- rrpc_set_lun_cur(rlun, rblk);
+ rrpc_set_lun_cur(rlun, rblk, &rlun->cur);
/* Emergency gc block */
rblk = rrpc_get_blk(rrpc, rlun, 1);
if (!rblk)
goto err;
- rlun->gc_cur = rblk;
+ rrpc_set_lun_cur(rlun, rblk, &rlun->gc_cur);
}
return 0;
diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
index 87e84b5fc..5e87d52cb 100644
--- a/drivers/lightnvm/rrpc.h
+++ b/drivers/lightnvm/rrpc.h
@@ -56,7 +56,6 @@ struct rrpc_block {
struct nvm_block *parent;
struct rrpc_lun *rlun;
struct list_head prio;
- struct list_head list;
#define MAX_INVALID_PAGES_STORAGE 8
/* Bitmap for invalid page intries */
@@ -77,13 +76,7 @@ struct rrpc_lun {
struct rrpc_block *blocks; /* Reference to block allocation */
struct list_head prio_list; /* Blocks that may be GC'ed */
- struct list_head open_list; /* In-use open blocks. These are blocks
- * that can be both written to and read
- * from
- */
- struct list_head closed_list; /* In-use closed blocks. These are
- * blocks that can _only_ be read from
- */
+ struct list_head wblk_list; /* Queued blocks to be written to */
struct work_struct ws_gc;
@@ -188,7 +181,7 @@ static inline int request_intersects(struct rrpc_inflight_rq *r,
}
static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
- unsigned pages, struct rrpc_inflight_rq *r)
+ unsigned int pages, struct rrpc_inflight_rq *r)
{
sector_t laddr_end = laddr + pages - 1;
struct rrpc_inflight_rq *rtmp;
@@ -213,7 +206,7 @@ static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
}
static inline int rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
- unsigned pages,
+ unsigned int pages,
struct rrpc_inflight_rq *r)
{
BUG_ON((laddr + pages) > rrpc->nr_sects);
diff --git a/drivers/lightnvm/sysblk.c b/drivers/lightnvm/sysblk.c
index 994697ac7..a75bd28aa 100644
--- a/drivers/lightnvm/sysblk.c
+++ b/drivers/lightnvm/sysblk.c
@@ -39,7 +39,8 @@ static inline int scan_ppa_idx(int row, int blkid)
return (row * MAX_BLKS_PR_SYSBLK) + blkid;
}
-void nvm_sysblk_to_cpu(struct nvm_sb_info *info, struct nvm_system_block *sb)
+static void nvm_sysblk_to_cpu(struct nvm_sb_info *info,
+ struct nvm_system_block *sb)
{
info->seqnr = be32_to_cpu(sb->seqnr);
info->erase_cnt = be32_to_cpu(sb->erase_cnt);
@@ -48,7 +49,8 @@ void nvm_sysblk_to_cpu(struct nvm_sb_info *info, struct nvm_system_block *sb)
info->fs_ppa.ppa = be64_to_cpu(sb->fs_ppa);
}
-void nvm_cpu_to_sysblk(struct nvm_system_block *sb, struct nvm_sb_info *info)
+static void nvm_cpu_to_sysblk(struct nvm_system_block *sb,
+ struct nvm_sb_info *info)
{
sb->magic = cpu_to_be32(NVM_SYSBLK_MAGIC);
sb->seqnr = cpu_to_be32(info->seqnr);
@@ -86,7 +88,7 @@ static int nvm_setup_sysblks(struct nvm_dev *dev, struct ppa_addr *sysblk_ppas)
return nr_rows;
}
-void nvm_setup_sysblk_scan(struct nvm_dev *dev, struct sysblk_scan *s,
+static void nvm_setup_sysblk_scan(struct nvm_dev *dev, struct sysblk_scan *s,
struct ppa_addr *sysblk_ppas)
{
memset(s, 0, sizeof(struct sysblk_scan));
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index c043885ac..61fbb6424 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -96,19 +96,18 @@ config ADB_PMU_LED
Support the front LED on Power/iBooks as a generic LED that can
be triggered by any of the supported triggers. To get the
behaviour of the old CONFIG_BLK_DEV_IDE_PMAC_BLINK, select this
- and the ide-disk LED trigger and configure appropriately through
- sysfs.
+ and the disk LED trigger and configure appropriately through sysfs.
-config ADB_PMU_LED_IDE
- bool "Use front LED as IDE LED by default"
+config ADB_PMU_LED_DISK
+ bool "Use front LED as DISK LED by default"
depends on ADB_PMU_LED
depends on LEDS_CLASS
depends on IDE_GD_ATA
select LEDS_TRIGGERS
- select LEDS_TRIGGER_IDE_DISK
+ select LEDS_TRIGGER_DISK
help
- This option makes the front LED default to the IDE trigger
- so that it blinks on IDE activity.
+ This option makes the front LED default to the disk trigger
+ so that it blinks on disk activity.
config PMAC_SMU
bool "Support for SMU based PowerMacs"
diff --git a/drivers/macintosh/ams/ams-i2c.c b/drivers/macintosh/ams/ams-i2c.c
index 978eda8d6..8a3ba5651 100644
--- a/drivers/macintosh/ams/ams-i2c.c
+++ b/drivers/macintosh/ams/ams-i2c.c
@@ -73,7 +73,6 @@ MODULE_DEVICE_TABLE(i2c, ams_id);
static struct i2c_driver ams_i2c_driver = {
.driver = {
.name = "ams",
- .owner = THIS_MODULE,
},
.probe = ams_i2c_probe,
.remove = ams_i2c_remove,
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index d531f8044..d6f72c826 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -38,6 +38,7 @@
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
+#include <linux/memblock.h>
#include <asm/byteorder.h>
#include <asm/io.h>
@@ -99,6 +100,7 @@ static DEFINE_MUTEX(smu_mutex);
static struct smu_device *smu;
static DEFINE_MUTEX(smu_part_access);
static int smu_irq_inited;
+static unsigned long smu_cmdbuf_abs;
static void smu_i2c_retry(unsigned long data);
@@ -479,8 +481,13 @@ int __init smu_init (void)
printk(KERN_INFO "SMU: Driver %s %s\n", VERSION, AUTHOR);
+ /*
+ * SMU based G5s need some memory below 2Gb. Thankfully this is
+ * called at a time where memblock is still available.
+ */
+ smu_cmdbuf_abs = memblock_alloc_base(4096, 4096, 0x80000000UL);
if (smu_cmdbuf_abs == 0) {
- printk(KERN_ERR "SMU: Command buffer not allocated !\n");
+ printk(KERN_ERR "SMU: Command buffer allocation failed !\n");
ret = -EINVAL;
goto fail_np;
}
diff --git a/drivers/macintosh/via-pmu-led.c b/drivers/macintosh/via-pmu-led.c
index 19c371809..ae067ab23 100644
--- a/drivers/macintosh/via-pmu-led.c
+++ b/drivers/macintosh/via-pmu-led.c
@@ -73,8 +73,8 @@ static void pmu_led_set(struct led_classdev *led_cdev,
static struct led_classdev pmu_led = {
.name = "pmu-led::front",
-#ifdef CONFIG_ADB_PMU_LED_IDE
- .default_trigger = "ide-disk",
+#ifdef CONFIG_ADB_PMU_LED_DISK
+ .default_trigger = "disk-activity",
#endif
.brightness_set = pmu_led_set,
};
diff --git a/drivers/macintosh/windfarm_pm112.c b/drivers/macintosh/windfarm_pm112.c
index 3024685e4..96d16fca6 100644
--- a/drivers/macintosh/windfarm_pm112.c
+++ b/drivers/macintosh/windfarm_pm112.c
@@ -668,7 +668,6 @@ static struct platform_driver wf_pm112_driver = {
.remove = wf_pm112_remove,
.driver = {
.name = "windfarm",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/macintosh/windfarm_pm72.c b/drivers/macintosh/windfarm_pm72.c
index 2f506b9d5..e88cfb36a 100644
--- a/drivers/macintosh/windfarm_pm72.c
+++ b/drivers/macintosh/windfarm_pm72.c
@@ -789,7 +789,6 @@ static struct platform_driver wf_pm72_driver = {
.remove = wf_pm72_remove,
.driver = {
.name = "windfarm",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/macintosh/windfarm_rm31.c b/drivers/macintosh/windfarm_rm31.c
index 82fc86a90..bdfcb8a8b 100644
--- a/drivers/macintosh/windfarm_rm31.c
+++ b/drivers/macintosh/windfarm_rm31.c
@@ -682,7 +682,6 @@ static struct platform_driver wf_rm31_driver = {
.remove = wf_rm31_remove,
.driver = {
.name = "windfarm",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 530592375..7817d40d8 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -123,4 +123,14 @@ config XGENE_SLIMPRO_MBOX
It is used to send short messages between ARM64-bit cores and
the SLIMpro Management Engine, primarily for PM. Say Y here if you
want to use the APM X-Gene SLIMpro IPCM support.
+
+config BCM_PDC_MBOX
+ tristate "Broadcom PDC Mailbox"
+ depends on ARM64 || COMPILE_TEST
+ depends on HAS_DMA
+ default ARCH_BCM_IPROC
+ help
+ Mailbox implementation for the Broadcom PDC ring manager,
+ which provides access to various offload engines on Broadcom
+ SoCs. Say Y here if you want to use the Broadcom PDC.
endif
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 0be3e742b..66c38e300 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -25,3 +25,5 @@ obj-$(CONFIG_TI_MESSAGE_MANAGER) += ti-msgmgr.o
obj-$(CONFIG_XGENE_SLIMPRO_MBOX) += mailbox-xgene-slimpro.o
obj-$(CONFIG_HI6220_MBOX) += hi6220-mailbox.o
+
+obj-$(CONFIG_BCM_PDC_MBOX) += bcm-pdc-mailbox.o
diff --git a/drivers/mailbox/bcm-pdc-mailbox.c b/drivers/mailbox/bcm-pdc-mailbox.c
new file mode 100644
index 000000000..c19dd820e
--- /dev/null
+++ b/drivers/mailbox/bcm-pdc-mailbox.c
@@ -0,0 +1,1532 @@
+/*
+ * Copyright 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation (the "GPL").
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 (GPLv2) for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 (GPLv2) along with this source code.
+ */
+
+/*
+ * Broadcom PDC Mailbox Driver
+ * The PDC provides a ring based programming interface to one or more hardware
+ * offload engines. For example, the PDC driver works with both SPU-M and SPU2
+ * cryptographic offload hardware. In some chips the PDC is referred to as MDE.
+ *
+ * The PDC driver registers with the Linux mailbox framework as a mailbox
+ * controller, once for each PDC instance. Ring 0 for each PDC is registered as
+ * a mailbox channel. The PDC driver uses interrupts to determine when data
+ * transfers to and from an offload engine are complete. The PDC driver uses
+ * threaded IRQs so that response messages are handled outside of interrupt
+ * context.
+ *
+ * The PDC driver allows multiple messages to be pending in the descriptor
+ * rings. The tx_msg_start descriptor index indicates where the last message
+ * starts. The txin_numd value at this index indicates how many descriptor
+ * indexes make up the message. Similar state is kept on the receive side. When
+ * an rx interrupt indicates a response is ready, the PDC driver processes numd
+ * descriptors from the tx and rx ring, thus processing one response at a time.
+ */
+
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox/brcm-message.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+
+#define PDC_SUCCESS 0
+
+#define RING_ENTRY_SIZE sizeof(struct dma64dd)
+
+/* # entries in PDC dma ring */
+#define PDC_RING_ENTRIES 128
+#define PDC_RING_SIZE (PDC_RING_ENTRIES * RING_ENTRY_SIZE)
+/* Rings are 8k aligned */
+#define RING_ALIGN_ORDER 13
+#define RING_ALIGN BIT(RING_ALIGN_ORDER)
+
+#define RX_BUF_ALIGN_ORDER 5
+#define RX_BUF_ALIGN BIT(RX_BUF_ALIGN_ORDER)
+
+/* descriptor bumping macros */
+#define XXD(x, max_mask) ((x) & (max_mask))
+#define TXD(x, max_mask) XXD((x), (max_mask))
+#define RXD(x, max_mask) XXD((x), (max_mask))
+#define NEXTTXD(i, max_mask) TXD((i) + 1, (max_mask))
+#define PREVTXD(i, max_mask) TXD((i) - 1, (max_mask))
+#define NEXTRXD(i, max_mask) RXD((i) + 1, (max_mask))
+#define PREVRXD(i, max_mask) RXD((i) - 1, (max_mask))
+#define NTXDACTIVE(h, t, max_mask) TXD((t) - (h), (max_mask))
+#define NRXDACTIVE(h, t, max_mask) RXD((t) - (h), (max_mask))
+
+/* Length of BCM header at start of SPU msg, in bytes */
+#define BCM_HDR_LEN 8
+
+/*
+ * PDC driver reserves ringset 0 on each SPU for its own use. The driver does
+ * not currently support use of multiple ringsets on a single PDC engine.
+ */
+#define PDC_RINGSET 0
+
+/*
+ * Interrupt mask and status definitions. Enable interrupts for tx and rx on
+ * ring 0
+ */
+#define PDC_XMTINT_0 (24 + PDC_RINGSET)
+#define PDC_RCVINT_0 (16 + PDC_RINGSET)
+#define PDC_XMTINTEN_0 BIT(PDC_XMTINT_0)
+#define PDC_RCVINTEN_0 BIT(PDC_RCVINT_0)
+#define PDC_INTMASK (PDC_XMTINTEN_0 | PDC_RCVINTEN_0)
+#define PDC_LAZY_FRAMECOUNT 1
+#define PDC_LAZY_TIMEOUT 10000
+#define PDC_LAZY_INT (PDC_LAZY_TIMEOUT | (PDC_LAZY_FRAMECOUNT << 24))
+#define PDC_INTMASK_OFFSET 0x24
+#define PDC_INTSTATUS_OFFSET 0x20
+#define PDC_RCVLAZY0_OFFSET (0x30 + 4 * PDC_RINGSET)
+
+/*
+ * For SPU2, configure MDE_CKSUM_CONTROL to write 17 bytes of metadata
+ * before frame
+ */
+#define PDC_SPU2_RESP_HDR_LEN 17
+#define PDC_CKSUM_CTRL BIT(27)
+#define PDC_CKSUM_CTRL_OFFSET 0x400
+
+#define PDC_SPUM_RESP_HDR_LEN 32
+
+/*
+ * Sets the following bits for write to transmit control reg:
+ * 0 - XmtEn - enable activity on the tx channel
+ * 11 - PtyChkDisable - parity check is disabled
+ * 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory
+ */
+#define PDC_TX_CTL 0x000C0801
+
+/*
+ * Sets the following bits for write to receive control reg:
+ * 0 - RcvEn - enable activity on the rx channel
+ * 7:1 - RcvOffset - size in bytes of status region at start of rx frame buf
+ * 9 - SepRxHdrDescEn - place start of new frames only in descriptors
+ * that have StartOfFrame set
+ * 10 - OflowContinue - on rx FIFO overflow, clear rx fifo, discard all
+ * remaining bytes in current frame, report error
+ * in rx frame status for current frame
+ * 11 - PtyChkDisable - parity check is disabled
+ * 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory
+ */
+#define PDC_RX_CTL 0x000C0E01
+
+#define CRYPTO_D64_RS0_CD_MASK ((PDC_RING_ENTRIES * RING_ENTRY_SIZE) - 1)
+
+/* descriptor flags */
+#define D64_CTRL1_EOT BIT(28) /* end of descriptor table */
+#define D64_CTRL1_IOC BIT(29) /* interrupt on complete */
+#define D64_CTRL1_EOF BIT(30) /* end of frame */
+#define D64_CTRL1_SOF BIT(31) /* start of frame */
+
+#define RX_STATUS_OVERFLOW 0x00800000
+#define RX_STATUS_LEN 0x0000FFFF
+
+#define PDC_TXREGS_OFFSET 0x200
+#define PDC_RXREGS_OFFSET 0x220
+
+/* Maximum size buffer the DMA engine can handle */
+#define PDC_DMA_BUF_MAX 16384
+
+struct pdc_dma_map {
+ void *ctx; /* opaque context associated with frame */
+};
+
+/* dma descriptor */
+struct dma64dd {
+ u32 ctrl1; /* misc control bits */
+ u32 ctrl2; /* buffer count and address extension */
+ u32 addrlow; /* memory address of the date buffer, bits 31:0 */
+ u32 addrhigh; /* memory address of the date buffer, bits 63:32 */
+};
+
+/* dma registers per channel(xmt or rcv) */
+struct dma64_regs {
+ u32 control; /* enable, et al */
+ u32 ptr; /* last descriptor posted to chip */
+ u32 addrlow; /* descriptor ring base address low 32-bits */
+ u32 addrhigh; /* descriptor ring base address bits 63:32 */
+ u32 status0; /* last rx descriptor written by hw */
+ u32 status1; /* driver does not use */
+};
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define _PADLINE(line) pad ## line
+#define _XSTR(line) _PADLINE(line)
+#define PAD _XSTR(__LINE__)
+#endif /* PAD */
+
+/* dma registers. matches hw layout. */
+struct dma64 {
+ struct dma64_regs dmaxmt; /* dma tx */
+ u32 PAD[2];
+ struct dma64_regs dmarcv; /* dma rx */
+ u32 PAD[2];
+};
+
+/* PDC registers */
+struct pdc_regs {
+ u32 devcontrol; /* 0x000 */
+ u32 devstatus; /* 0x004 */
+ u32 PAD;
+ u32 biststatus; /* 0x00c */
+ u32 PAD[4];
+ u32 intstatus; /* 0x020 */
+ u32 intmask; /* 0x024 */
+ u32 gptimer; /* 0x028 */
+
+ u32 PAD;
+ u32 intrcvlazy_0; /* 0x030 */
+ u32 intrcvlazy_1; /* 0x034 */
+ u32 intrcvlazy_2; /* 0x038 */
+ u32 intrcvlazy_3; /* 0x03c */
+
+ u32 PAD[48];
+ u32 removed_intrecvlazy; /* 0x100 */
+ u32 flowctlthresh; /* 0x104 */
+ u32 wrrthresh; /* 0x108 */
+ u32 gmac_idle_cnt_thresh; /* 0x10c */
+
+ u32 PAD[4];
+ u32 ifioaccessaddr; /* 0x120 */
+ u32 ifioaccessbyte; /* 0x124 */
+ u32 ifioaccessdata; /* 0x128 */
+
+ u32 PAD[21];
+ u32 phyaccess; /* 0x180 */
+ u32 PAD;
+ u32 phycontrol; /* 0x188 */
+ u32 txqctl; /* 0x18c */
+ u32 rxqctl; /* 0x190 */
+ u32 gpioselect; /* 0x194 */
+ u32 gpio_output_en; /* 0x198 */
+ u32 PAD; /* 0x19c */
+ u32 txq_rxq_mem_ctl; /* 0x1a0 */
+ u32 memory_ecc_status; /* 0x1a4 */
+ u32 serdes_ctl; /* 0x1a8 */
+ u32 serdes_status0; /* 0x1ac */
+ u32 serdes_status1; /* 0x1b0 */
+ u32 PAD[11]; /* 0x1b4-1dc */
+ u32 clk_ctl_st; /* 0x1e0 */
+ u32 hw_war; /* 0x1e4 */
+ u32 pwrctl; /* 0x1e8 */
+ u32 PAD[5];
+
+#define PDC_NUM_DMA_RINGS 4
+ struct dma64 dmaregs[PDC_NUM_DMA_RINGS]; /* 0x0200 - 0x2fc */
+
+ /* more registers follow, but we don't use them */
+};
+
+/* structure for allocating/freeing DMA rings */
+struct pdc_ring_alloc {
+ dma_addr_t dmabase; /* DMA address of start of ring */
+ void *vbase; /* base kernel virtual address of ring */
+ u32 size; /* ring allocation size in bytes */
+};
+
+/* PDC state structure */
+struct pdc_state {
+ /* synchronize access to this PDC state structure */
+ spinlock_t pdc_lock;
+
+ /* Index of the PDC whose state is in this structure instance */
+ u8 pdc_idx;
+
+ /* Platform device for this PDC instance */
+ struct platform_device *pdev;
+
+ /*
+ * Each PDC instance has a mailbox controller. PDC receives request
+ * messages through mailboxes, and sends response messages through the
+ * mailbox framework.
+ */
+ struct mbox_controller mbc;
+
+ unsigned int pdc_irq;
+
+ /*
+ * Last interrupt status read from PDC device. Saved in interrupt
+ * handler so the handler can clear the interrupt in the device,
+ * and the interrupt thread called later can know which interrupt
+ * bits are active.
+ */
+ unsigned long intstatus;
+
+ /* Number of bytes of receive status prior to each rx frame */
+ u32 rx_status_len;
+ /* Whether a BCM header is prepended to each frame */
+ bool use_bcm_hdr;
+ /* Sum of length of BCM header and rx status header */
+ u32 pdc_resp_hdr_len;
+
+ /* The base virtual address of DMA hw registers */
+ void __iomem *pdc_reg_vbase;
+
+ /* Pool for allocation of DMA rings */
+ struct dma_pool *ring_pool;
+
+ /* Pool for allocation of metadata buffers for response messages */
+ struct dma_pool *rx_buf_pool;
+
+ /*
+ * The base virtual address of DMA tx/rx descriptor rings. Corresponding
+ * DMA address and size of ring allocation.
+ */
+ struct pdc_ring_alloc tx_ring_alloc;
+ struct pdc_ring_alloc rx_ring_alloc;
+
+ struct pdc_regs *regs; /* start of PDC registers */
+
+ struct dma64_regs *txregs_64; /* dma tx engine registers */
+ struct dma64_regs *rxregs_64; /* dma rx engine registers */
+
+ /*
+ * Arrays of PDC_RING_ENTRIES descriptors
+ * To use multiple ringsets, this needs to be extended
+ */
+ struct dma64dd *txd_64; /* tx descriptor ring */
+ struct dma64dd *rxd_64; /* rx descriptor ring */
+
+ /* descriptor ring sizes */
+ u32 ntxd; /* # tx descriptors */
+ u32 nrxd; /* # rx descriptors */
+ u32 nrxpost; /* # rx buffers to keep posted */
+ u32 ntxpost; /* max number of tx buffers that can be posted */
+
+ /*
+ * Index of next tx descriptor to reclaim. That is, the descriptor
+ * index of the oldest tx buffer for which the host has yet to process
+ * the corresponding response.
+ */
+ u32 txin;
+
+ /*
+ * Index of the first receive descriptor for the sequence of
+ * message fragments currently under construction. Used to build up
+ * the rxin_numd count for a message. Updated to rxout when the host
+ * starts a new sequence of rx buffers for a new message.
+ */
+ u32 tx_msg_start;
+
+ /* Index of next tx descriptor to post. */
+ u32 txout;
+
+ /*
+ * Number of tx descriptors associated with the message that starts
+ * at this tx descriptor index.
+ */
+ u32 txin_numd[PDC_RING_ENTRIES];
+
+ /*
+ * Index of next rx descriptor to reclaim. This is the index of
+ * the next descriptor whose data has yet to be processed by the host.
+ */
+ u32 rxin;
+
+ /*
+ * Index of the first receive descriptor for the sequence of
+ * message fragments currently under construction. Used to build up
+ * the rxin_numd count for a message. Updated to rxout when the host
+ * starts a new sequence of rx buffers for a new message.
+ */
+ u32 rx_msg_start;
+
+ /*
+ * Saved value of current hardware rx descriptor index.
+ * The last rx buffer written by the hw is the index previous to
+ * this one.
+ */
+ u32 last_rx_curr;
+
+ /* Index of next rx descriptor to post. */
+ u32 rxout;
+
+ /*
+ * opaque context associated with frame that starts at each
+ * rx ring index.
+ */
+ void *rxp_ctx[PDC_RING_ENTRIES];
+
+ /*
+ * Scatterlists used to form request and reply frames beginning at a
+ * given ring index. Retained in order to unmap each sg after reply
+ * is processed
+ */
+ struct scatterlist *src_sg[PDC_RING_ENTRIES];
+ struct scatterlist *dst_sg[PDC_RING_ENTRIES];
+
+ /*
+ * Number of rx descriptors associated with the message that starts
+ * at this descriptor index. Not set for every index. For example,
+ * if descriptor index i points to a scatterlist with 4 entries, then
+ * the next three descriptor indexes don't have a value set.
+ */
+ u32 rxin_numd[PDC_RING_ENTRIES];
+
+ void *resp_hdr[PDC_RING_ENTRIES];
+ dma_addr_t resp_hdr_daddr[PDC_RING_ENTRIES];
+
+ struct dentry *debugfs_stats; /* debug FS stats file for this PDC */
+
+ /* counters */
+ u32 pdc_requests; /* number of request messages submitted */
+ u32 pdc_replies; /* number of reply messages received */
+ u32 txnobuf; /* count of tx ring full */
+ u32 rxnobuf; /* count of rx ring full */
+ u32 rx_oflow; /* count of rx overflows */
+};
+
+/* Global variables */
+
+struct pdc_globals {
+ /* Actual number of SPUs in hardware, as reported by device tree */
+ u32 num_spu;
+};
+
+static struct pdc_globals pdcg;
+
+/* top level debug FS directory for PDC driver */
+static struct dentry *debugfs_dir;
+
+static ssize_t pdc_debugfs_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *offp)
+{
+ struct pdc_state *pdcs;
+ char *buf;
+ ssize_t ret, out_offset, out_count;
+
+ out_count = 512;
+
+ buf = kmalloc(out_count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pdcs = filp->private_data;
+ out_offset = 0;
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "SPU %u stats:\n", pdcs->pdc_idx);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "PDC requests............%u\n",
+ pdcs->pdc_requests);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "PDC responses...........%u\n",
+ pdcs->pdc_replies);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "Tx err ring full........%u\n",
+ pdcs->txnobuf);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "Rx err ring full........%u\n",
+ pdcs->rxnobuf);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "Receive overflow........%u\n",
+ pdcs->rx_oflow);
+
+ if (out_offset > out_count)
+ out_offset = out_count;
+
+ ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
+ kfree(buf);
+ return ret;
+}
+
+static const struct file_operations pdc_debugfs_stats = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = pdc_debugfs_read,
+};
+
+/**
+ * pdc_setup_debugfs() - Create the debug FS directories. If the top-level
+ * directory has not yet been created, create it now. Create a stats file in
+ * this directory for a SPU.
+ * @pdcs: PDC state structure
+ */
+static void pdc_setup_debugfs(struct pdc_state *pdcs)
+{
+ char spu_stats_name[16];
+
+ if (!debugfs_initialized())
+ return;
+
+ snprintf(spu_stats_name, 16, "pdc%d_stats", pdcs->pdc_idx);
+ if (!debugfs_dir)
+ debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+
+ pdcs->debugfs_stats = debugfs_create_file(spu_stats_name, S_IRUSR,
+ debugfs_dir, pdcs,
+ &pdc_debugfs_stats);
+}
+
+static void pdc_free_debugfs(void)
+{
+ if (debugfs_dir && simple_empty(debugfs_dir)) {
+ debugfs_remove_recursive(debugfs_dir);
+ debugfs_dir = NULL;
+ }
+}
+
+/**
+ * pdc_build_rxd() - Build DMA descriptor to receive SPU result.
+ * @pdcs: PDC state for SPU that will generate result
+ * @dma_addr: DMA address of buffer that descriptor is being built for
+ * @buf_len: Length of the receive buffer, in bytes
+ * @flags: Flags to be stored in descriptor
+ */
+static inline void
+pdc_build_rxd(struct pdc_state *pdcs, dma_addr_t dma_addr,
+ u32 buf_len, u32 flags)
+{
+ struct device *dev = &pdcs->pdev->dev;
+
+ dev_dbg(dev,
+ "Writing rx descriptor for PDC %u at index %u with length %u. flags %#x\n",
+ pdcs->pdc_idx, pdcs->rxout, buf_len, flags);
+
+ iowrite32(lower_32_bits(dma_addr),
+ (void *)&pdcs->rxd_64[pdcs->rxout].addrlow);
+ iowrite32(upper_32_bits(dma_addr),
+ (void *)&pdcs->rxd_64[pdcs->rxout].addrhigh);
+ iowrite32(flags, (void *)&pdcs->rxd_64[pdcs->rxout].ctrl1);
+ iowrite32(buf_len, (void *)&pdcs->rxd_64[pdcs->rxout].ctrl2);
+ /* bump ring index and return */
+ pdcs->rxout = NEXTRXD(pdcs->rxout, pdcs->nrxpost);
+}
+
+/**
+ * pdc_build_txd() - Build a DMA descriptor to transmit a SPU request to
+ * hardware.
+ * @pdcs: PDC state for the SPU that will process this request
+ * @dma_addr: DMA address of packet to be transmitted
+ * @buf_len: Length of tx buffer, in bytes
+ * @flags: Flags to be stored in descriptor
+ */
+static inline void
+pdc_build_txd(struct pdc_state *pdcs, dma_addr_t dma_addr, u32 buf_len,
+ u32 flags)
+{
+ struct device *dev = &pdcs->pdev->dev;
+
+ dev_dbg(dev,
+ "Writing tx descriptor for PDC %u at index %u with length %u, flags %#x\n",
+ pdcs->pdc_idx, pdcs->txout, buf_len, flags);
+
+ iowrite32(lower_32_bits(dma_addr),
+ (void *)&pdcs->txd_64[pdcs->txout].addrlow);
+ iowrite32(upper_32_bits(dma_addr),
+ (void *)&pdcs->txd_64[pdcs->txout].addrhigh);
+ iowrite32(flags, (void *)&pdcs->txd_64[pdcs->txout].ctrl1);
+ iowrite32(buf_len, (void *)&pdcs->txd_64[pdcs->txout].ctrl2);
+
+ /* bump ring index and return */
+ pdcs->txout = NEXTTXD(pdcs->txout, pdcs->ntxpost);
+}
+
+/**
+ * pdc_receive() - Receive a response message from a given SPU.
+ * @pdcs: PDC state for the SPU to receive from
+ * @mssg: mailbox message to be returned to client
+ *
+ * When the return code indicates success, the response message is available in
+ * the receive buffers provided prior to submission of the request.
+ *
+ * Input:
+ * pdcs - PDC state structure for the SPU to be polled
+ * mssg - mailbox message to be returned to client. This function sets the
+ * context pointer on the message to help the client associate the
+ * response with a request.
+ *
+ * Return: PDC_SUCCESS if one or more receive descriptors was processed
+ * -EAGAIN indicates that no response message is available
+ * -EIO an error occurred
+ */
+static int
+pdc_receive(struct pdc_state *pdcs, struct brcm_message *mssg)
+{
+ struct device *dev = &pdcs->pdev->dev;
+ u32 len, rx_status;
+ u32 num_frags;
+ int i;
+ u8 *resp_hdr; /* virtual addr of start of resp message DMA header */
+ u32 frags_rdy; /* number of fragments ready to read */
+ u32 rx_idx; /* ring index of start of receive frame */
+ dma_addr_t resp_hdr_daddr;
+
+ spin_lock(&pdcs->pdc_lock);
+
+ /*
+ * return if a complete response message is not yet ready.
+ * rxin_numd[rxin] is the number of fragments in the next msg
+ * to read.
+ */
+ frags_rdy = NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr, pdcs->nrxpost);
+ if ((frags_rdy == 0) || (frags_rdy < pdcs->rxin_numd[pdcs->rxin])) {
+ /* See if the hw has written more fragments than we know */
+ pdcs->last_rx_curr =
+ (ioread32((void *)&pdcs->rxregs_64->status0) &
+ CRYPTO_D64_RS0_CD_MASK) / RING_ENTRY_SIZE;
+ frags_rdy = NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr,
+ pdcs->nrxpost);
+ if ((frags_rdy == 0) ||
+ (frags_rdy < pdcs->rxin_numd[pdcs->rxin])) {
+ /* No response ready */
+ spin_unlock(&pdcs->pdc_lock);
+ return -EAGAIN;
+ }
+ /* can't read descriptors/data until write index is read */
+ rmb();
+ }
+
+ num_frags = pdcs->txin_numd[pdcs->txin];
+ dma_unmap_sg(dev, pdcs->src_sg[pdcs->txin],
+ sg_nents(pdcs->src_sg[pdcs->txin]), DMA_TO_DEVICE);
+
+ for (i = 0; i < num_frags; i++)
+ pdcs->txin = NEXTTXD(pdcs->txin, pdcs->ntxpost);
+
+ dev_dbg(dev, "PDC %u reclaimed %d tx descriptors",
+ pdcs->pdc_idx, num_frags);
+
+ rx_idx = pdcs->rxin;
+ num_frags = pdcs->rxin_numd[rx_idx];
+ /* Return opaque context with result */
+ mssg->ctx = pdcs->rxp_ctx[rx_idx];
+ pdcs->rxp_ctx[rx_idx] = NULL;
+ resp_hdr = pdcs->resp_hdr[rx_idx];
+ resp_hdr_daddr = pdcs->resp_hdr_daddr[rx_idx];
+ dma_unmap_sg(dev, pdcs->dst_sg[rx_idx],
+ sg_nents(pdcs->dst_sg[rx_idx]), DMA_FROM_DEVICE);
+
+ for (i = 0; i < num_frags; i++)
+ pdcs->rxin = NEXTRXD(pdcs->rxin, pdcs->nrxpost);
+
+ spin_unlock(&pdcs->pdc_lock);
+
+ dev_dbg(dev, "PDC %u reclaimed %d rx descriptors",
+ pdcs->pdc_idx, num_frags);
+
+ dev_dbg(dev,
+ "PDC %u txin %u, txout %u, rxin %u, rxout %u, last_rx_curr %u\n",
+ pdcs->pdc_idx, pdcs->txin, pdcs->txout, pdcs->rxin,
+ pdcs->rxout, pdcs->last_rx_curr);
+
+ if (pdcs->pdc_resp_hdr_len == PDC_SPUM_RESP_HDR_LEN) {
+ /*
+ * For SPU-M, get length of response msg and rx overflow status.
+ */
+ rx_status = *((u32 *)resp_hdr);
+ len = rx_status & RX_STATUS_LEN;
+ dev_dbg(dev,
+ "SPU response length %u bytes", len);
+ if (unlikely(((rx_status & RX_STATUS_OVERFLOW) || (!len)))) {
+ if (rx_status & RX_STATUS_OVERFLOW) {
+ dev_err_ratelimited(dev,
+ "crypto receive overflow");
+ pdcs->rx_oflow++;
+ } else {
+ dev_info_ratelimited(dev, "crypto rx len = 0");
+ }
+ return -EIO;
+ }
+ }
+
+ dma_pool_free(pdcs->rx_buf_pool, resp_hdr, resp_hdr_daddr);
+
+ pdcs->pdc_replies++;
+ /* if we read one or more rx descriptors, claim success */
+ if (num_frags > 0)
+ return PDC_SUCCESS;
+ else
+ return -EIO;
+}
+
+/**
+ * pdc_tx_list_sg_add() - Add the buffers in a scatterlist to the transmit
+ * descriptors for a given SPU. The scatterlist buffers contain the data for a
+ * SPU request message.
+ * @spu_idx: The index of the SPU to submit the request to, [0, max_spu)
+ * @sg: Scatterlist whose buffers contain part of the SPU request
+ *
+ * If a scatterlist buffer is larger than PDC_DMA_BUF_MAX, multiple descriptors
+ * are written for that buffer, each <= PDC_DMA_BUF_MAX byte in length.
+ *
+ * Return: PDC_SUCCESS if successful
+ * < 0 otherwise
+ */
+static int pdc_tx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg)
+{
+ u32 flags = 0;
+ u32 eot;
+ u32 tx_avail;
+
+ /*
+ * Num descriptors needed. Conservatively assume we need a descriptor
+ * for every entry in sg.
+ */
+ u32 num_desc;
+ u32 desc_w = 0; /* Number of tx descriptors written */
+ u32 bufcnt; /* Number of bytes of buffer pointed to by descriptor */
+ dma_addr_t databufptr; /* DMA address to put in descriptor */
+
+ num_desc = (u32)sg_nents(sg);
+
+ /* check whether enough tx descriptors are available */
+ tx_avail = pdcs->ntxpost - NTXDACTIVE(pdcs->txin, pdcs->txout,
+ pdcs->ntxpost);
+ if (unlikely(num_desc > tx_avail)) {
+ pdcs->txnobuf++;
+ return -ENOSPC;
+ }
+
+ /* build tx descriptors */
+ if (pdcs->tx_msg_start == pdcs->txout) {
+ /* Start of frame */
+ pdcs->txin_numd[pdcs->tx_msg_start] = 0;
+ pdcs->src_sg[pdcs->txout] = sg;
+ flags = D64_CTRL1_SOF;
+ }
+
+ while (sg) {
+ if (unlikely(pdcs->txout == (pdcs->ntxd - 1)))
+ eot = D64_CTRL1_EOT;
+ else
+ eot = 0;
+
+ /*
+ * If sg buffer larger than PDC limit, split across
+ * multiple descriptors
+ */
+ bufcnt = sg_dma_len(sg);
+ databufptr = sg_dma_address(sg);
+ while (bufcnt > PDC_DMA_BUF_MAX) {
+ pdc_build_txd(pdcs, databufptr, PDC_DMA_BUF_MAX,
+ flags | eot);
+ desc_w++;
+ bufcnt -= PDC_DMA_BUF_MAX;
+ databufptr += PDC_DMA_BUF_MAX;
+ if (unlikely(pdcs->txout == (pdcs->ntxd - 1)))
+ eot = D64_CTRL1_EOT;
+ else
+ eot = 0;
+ }
+ sg = sg_next(sg);
+ if (!sg)
+ /* Writing last descriptor for frame */
+ flags |= (D64_CTRL1_EOF | D64_CTRL1_IOC);
+ pdc_build_txd(pdcs, databufptr, bufcnt, flags | eot);
+ desc_w++;
+ /* Clear start of frame after first descriptor */
+ flags &= ~D64_CTRL1_SOF;
+ }
+ pdcs->txin_numd[pdcs->tx_msg_start] += desc_w;
+
+ return PDC_SUCCESS;
+}
+
+/**
+ * pdc_tx_list_final() - Initiate DMA transfer of last frame written to tx
+ * ring.
+ * @pdcs: PDC state for SPU to process the request
+ *
+ * Sets the index of the last descriptor written in both the rx and tx ring.
+ *
+ * Return: PDC_SUCCESS
+ */
+static int pdc_tx_list_final(struct pdc_state *pdcs)
+{
+ /*
+ * write barrier to ensure all register writes are complete
+ * before chip starts to process new request
+ */
+ wmb();
+ iowrite32(pdcs->rxout << 4, (void *)&pdcs->rxregs_64->ptr);
+ iowrite32(pdcs->txout << 4, (void *)&pdcs->txregs_64->ptr);
+ pdcs->pdc_requests++;
+
+ return PDC_SUCCESS;
+}
+
+/**
+ * pdc_rx_list_init() - Start a new receive descriptor list for a given PDC.
+ * @pdcs: PDC state for SPU handling request
+ * @dst_sg: scatterlist providing rx buffers for response to be returned to
+ * mailbox client
+ * @ctx: Opaque context for this request
+ *
+ * Posts a single receive descriptor to hold the metadata that precedes a
+ * response. For example, with SPU-M, the metadata is a 32-byte DMA header and
+ * an 8-byte BCM header. Moves the msg_start descriptor indexes for both tx and
+ * rx to indicate the start of a new message.
+ *
+ * Return: PDC_SUCCESS if successful
+ * < 0 if an error (e.g., rx ring is full)
+ */
+static int pdc_rx_list_init(struct pdc_state *pdcs, struct scatterlist *dst_sg,
+ void *ctx)
+{
+ u32 flags = 0;
+ u32 rx_avail;
+ u32 rx_pkt_cnt = 1; /* Adding a single rx buffer */
+ dma_addr_t daddr;
+ void *vaddr;
+
+ rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout,
+ pdcs->nrxpost);
+ if (unlikely(rx_pkt_cnt > rx_avail)) {
+ pdcs->rxnobuf++;
+ return -ENOSPC;
+ }
+
+ /* allocate a buffer for the dma rx status */
+ vaddr = dma_pool_zalloc(pdcs->rx_buf_pool, GFP_ATOMIC, &daddr);
+ if (!vaddr)
+ return -ENOMEM;
+
+ /*
+ * Update msg_start indexes for both tx and rx to indicate the start
+ * of a new sequence of descriptor indexes that contain the fragments
+ * of the same message.
+ */
+ pdcs->rx_msg_start = pdcs->rxout;
+ pdcs->tx_msg_start = pdcs->txout;
+
+ /* This is always the first descriptor in the receive sequence */
+ flags = D64_CTRL1_SOF;
+ pdcs->rxin_numd[pdcs->rx_msg_start] = 1;
+
+ if (unlikely(pdcs->rxout == (pdcs->nrxd - 1)))
+ flags |= D64_CTRL1_EOT;
+
+ pdcs->rxp_ctx[pdcs->rxout] = ctx;
+ pdcs->dst_sg[pdcs->rxout] = dst_sg;
+ pdcs->resp_hdr[pdcs->rxout] = vaddr;
+ pdcs->resp_hdr_daddr[pdcs->rxout] = daddr;
+ pdc_build_rxd(pdcs, daddr, pdcs->pdc_resp_hdr_len, flags);
+ return PDC_SUCCESS;
+}
+
+/**
+ * pdc_rx_list_sg_add() - Add the buffers in a scatterlist to the receive
+ * descriptors for a given SPU. The caller must have already DMA mapped the
+ * scatterlist.
+ * @spu_idx: Indicates which SPU the buffers are for
+ * @sg: Scatterlist whose buffers are added to the receive ring
+ *
+ * If a receive buffer in the scatterlist is larger than PDC_DMA_BUF_MAX,
+ * multiple receive descriptors are written, each with a buffer <=
+ * PDC_DMA_BUF_MAX.
+ *
+ * Return: PDC_SUCCESS if successful
+ * < 0 otherwise (e.g., receive ring is full)
+ */
+static int pdc_rx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg)
+{
+ u32 flags = 0;
+ u32 rx_avail;
+
+ /*
+ * Num descriptors needed. Conservatively assume we need a descriptor
+ * for every entry from our starting point in the scatterlist.
+ */
+ u32 num_desc;
+ u32 desc_w = 0; /* Number of tx descriptors written */
+ u32 bufcnt; /* Number of bytes of buffer pointed to by descriptor */
+ dma_addr_t databufptr; /* DMA address to put in descriptor */
+
+ num_desc = (u32)sg_nents(sg);
+
+ rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout,
+ pdcs->nrxpost);
+ if (unlikely(num_desc > rx_avail)) {
+ pdcs->rxnobuf++;
+ return -ENOSPC;
+ }
+
+ while (sg) {
+ if (unlikely(pdcs->rxout == (pdcs->nrxd - 1)))
+ flags = D64_CTRL1_EOT;
+ else
+ flags = 0;
+
+ /*
+ * If sg buffer larger than PDC limit, split across
+ * multiple descriptors
+ */
+ bufcnt = sg_dma_len(sg);
+ databufptr = sg_dma_address(sg);
+ while (bufcnt > PDC_DMA_BUF_MAX) {
+ pdc_build_rxd(pdcs, databufptr, PDC_DMA_BUF_MAX, flags);
+ desc_w++;
+ bufcnt -= PDC_DMA_BUF_MAX;
+ databufptr += PDC_DMA_BUF_MAX;
+ if (unlikely(pdcs->rxout == (pdcs->nrxd - 1)))
+ flags = D64_CTRL1_EOT;
+ else
+ flags = 0;
+ }
+ pdc_build_rxd(pdcs, databufptr, bufcnt, flags);
+ desc_w++;
+ sg = sg_next(sg);
+ }
+ pdcs->rxin_numd[pdcs->rx_msg_start] += desc_w;
+
+ return PDC_SUCCESS;
+}
+
+/**
+ * pdc_irq_handler() - Interrupt handler called in interrupt context.
+ * @irq: Interrupt number that has fired
+ * @cookie: PDC state for DMA engine that generated the interrupt
+ *
+ * We have to clear the device interrupt status flags here. So cache the
+ * status for later use in the thread function. Other than that, just return
+ * WAKE_THREAD to invoke the thread function.
+ *
+ * Return: IRQ_WAKE_THREAD if interrupt is ours
+ * IRQ_NONE otherwise
+ */
+static irqreturn_t pdc_irq_handler(int irq, void *cookie)
+{
+ struct pdc_state *pdcs = cookie;
+ u32 intstatus = ioread32(pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET);
+
+ if (intstatus & PDC_XMTINTEN_0)
+ set_bit(PDC_XMTINT_0, &pdcs->intstatus);
+ if (intstatus & PDC_RCVINTEN_0)
+ set_bit(PDC_RCVINT_0, &pdcs->intstatus);
+
+ /* Clear interrupt flags in device */
+ iowrite32(intstatus, pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET);
+
+ /* Wakeup IRQ thread */
+ if (pdcs && (irq == pdcs->pdc_irq) && (intstatus & PDC_INTMASK))
+ return IRQ_WAKE_THREAD;
+
+ return IRQ_NONE;
+}
+
+/**
+ * pdc_irq_thread() - Function invoked on deferred thread when a DMA tx has
+ * completed or data is available to receive.
+ * @irq: Interrupt number
+ * @cookie: PDC state for PDC that generated the interrupt
+ *
+ * On DMA tx complete, notify the mailbox client. On DMA rx complete, process
+ * as many SPU response messages as are available and send each to the mailbox
+ * client.
+ *
+ * Return: IRQ_HANDLED if we recognized and handled the interrupt
+ * IRQ_NONE otherwise
+ */
+static irqreturn_t pdc_irq_thread(int irq, void *cookie)
+{
+ struct pdc_state *pdcs = cookie;
+ struct mbox_controller *mbc;
+ struct mbox_chan *chan;
+ bool tx_int;
+ bool rx_int;
+ int rx_status;
+ struct brcm_message mssg;
+
+ tx_int = test_and_clear_bit(PDC_XMTINT_0, &pdcs->intstatus);
+ rx_int = test_and_clear_bit(PDC_RCVINT_0, &pdcs->intstatus);
+
+ if (pdcs && (tx_int || rx_int)) {
+ dev_dbg(&pdcs->pdev->dev,
+ "%s() got irq %d with tx_int %s, rx_int %s",
+ __func__, irq,
+ tx_int ? "set" : "clear", rx_int ? "set" : "clear");
+
+ mbc = &pdcs->mbc;
+ chan = &mbc->chans[0];
+
+ if (tx_int) {
+ dev_dbg(&pdcs->pdev->dev, "%s(): tx done", __func__);
+ /* only one frame in flight at a time */
+ mbox_chan_txdone(chan, PDC_SUCCESS);
+ }
+ if (rx_int) {
+ while (1) {
+ /* Could be many frames ready */
+ memset(&mssg, 0, sizeof(mssg));
+ mssg.type = BRCM_MESSAGE_SPU;
+ rx_status = pdc_receive(pdcs, &mssg);
+ if (rx_status >= 0) {
+ dev_dbg(&pdcs->pdev->dev,
+ "%s(): invoking client rx cb",
+ __func__);
+ mbox_chan_received_data(chan, &mssg);
+ } else {
+ dev_dbg(&pdcs->pdev->dev,
+ "%s(): no SPU response available",
+ __func__);
+ break;
+ }
+ }
+ }
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
+/**
+ * pdc_ring_init() - Allocate DMA rings and initialize constant fields of
+ * descriptors in one ringset.
+ * @pdcs: PDC instance state
+ * @ringset: index of ringset being used
+ *
+ * Return: PDC_SUCCESS if ring initialized
+ * < 0 otherwise
+ */
+static int pdc_ring_init(struct pdc_state *pdcs, int ringset)
+{
+ int i;
+ int err = PDC_SUCCESS;
+ struct dma64 *dma_reg;
+ struct device *dev = &pdcs->pdev->dev;
+ struct pdc_ring_alloc tx;
+ struct pdc_ring_alloc rx;
+
+ /* Allocate tx ring */
+ tx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &tx.dmabase);
+ if (!tx.vbase) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ /* Allocate rx ring */
+ rx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &rx.dmabase);
+ if (!rx.vbase) {
+ err = -ENOMEM;
+ goto fail_dealloc;
+ }
+
+ dev_dbg(dev, " - base DMA addr of tx ring %pad", &tx.dmabase);
+ dev_dbg(dev, " - base virtual addr of tx ring %p", tx.vbase);
+ dev_dbg(dev, " - base DMA addr of rx ring %pad", &rx.dmabase);
+ dev_dbg(dev, " - base virtual addr of rx ring %p", rx.vbase);
+
+ /* lock after ring allocation to avoid scheduling while atomic */
+ spin_lock(&pdcs->pdc_lock);
+
+ memcpy(&pdcs->tx_ring_alloc, &tx, sizeof(tx));
+ memcpy(&pdcs->rx_ring_alloc, &rx, sizeof(rx));
+
+ pdcs->rxin = 0;
+ pdcs->rx_msg_start = 0;
+ pdcs->last_rx_curr = 0;
+ pdcs->rxout = 0;
+ pdcs->txin = 0;
+ pdcs->tx_msg_start = 0;
+ pdcs->txout = 0;
+
+ /* Set descriptor array base addresses */
+ pdcs->txd_64 = (struct dma64dd *)pdcs->tx_ring_alloc.vbase;
+ pdcs->rxd_64 = (struct dma64dd *)pdcs->rx_ring_alloc.vbase;
+
+ /* Tell device the base DMA address of each ring */
+ dma_reg = &pdcs->regs->dmaregs[ringset];
+ iowrite32(lower_32_bits(pdcs->tx_ring_alloc.dmabase),
+ (void *)&dma_reg->dmaxmt.addrlow);
+ iowrite32(upper_32_bits(pdcs->tx_ring_alloc.dmabase),
+ (void *)&dma_reg->dmaxmt.addrhigh);
+
+ iowrite32(lower_32_bits(pdcs->rx_ring_alloc.dmabase),
+ (void *)&dma_reg->dmarcv.addrlow);
+ iowrite32(upper_32_bits(pdcs->rx_ring_alloc.dmabase),
+ (void *)&dma_reg->dmarcv.addrhigh);
+
+ /* Initialize descriptors */
+ for (i = 0; i < PDC_RING_ENTRIES; i++) {
+ /* Every tx descriptor can be used for start of frame. */
+ if (i != pdcs->ntxpost) {
+ iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOF,
+ (void *)&pdcs->txd_64[i].ctrl1);
+ } else {
+ /* Last descriptor in ringset. Set End of Table. */
+ iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOF |
+ D64_CTRL1_EOT,
+ (void *)&pdcs->txd_64[i].ctrl1);
+ }
+
+ /* Every rx descriptor can be used for start of frame */
+ if (i != pdcs->nrxpost) {
+ iowrite32(D64_CTRL1_SOF,
+ (void *)&pdcs->rxd_64[i].ctrl1);
+ } else {
+ /* Last descriptor in ringset. Set End of Table. */
+ iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOT,
+ (void *)&pdcs->rxd_64[i].ctrl1);
+ }
+ }
+ spin_unlock(&pdcs->pdc_lock);
+ return PDC_SUCCESS;
+
+fail_dealloc:
+ dma_pool_free(pdcs->ring_pool, tx.vbase, tx.dmabase);
+done:
+ return err;
+}
+
+static void pdc_ring_free(struct pdc_state *pdcs)
+{
+ if (pdcs->tx_ring_alloc.vbase) {
+ dma_pool_free(pdcs->ring_pool, pdcs->tx_ring_alloc.vbase,
+ pdcs->tx_ring_alloc.dmabase);
+ pdcs->tx_ring_alloc.vbase = NULL;
+ }
+
+ if (pdcs->rx_ring_alloc.vbase) {
+ dma_pool_free(pdcs->ring_pool, pdcs->rx_ring_alloc.vbase,
+ pdcs->rx_ring_alloc.dmabase);
+ pdcs->rx_ring_alloc.vbase = NULL;
+ }
+}
+
+/**
+ * pdc_send_data() - mailbox send_data function
+ * @chan: The mailbox channel on which the data is sent. The channel
+ * corresponds to a DMA ringset.
+ * @data: The mailbox message to be sent. The message must be a
+ * brcm_message structure.
+ *
+ * This function is registered as the send_data function for the mailbox
+ * controller. From the destination scatterlist in the mailbox message, it
+ * creates a sequence of receive descriptors in the rx ring. From the source
+ * scatterlist, it creates a sequence of transmit descriptors in the tx ring.
+ * After creating the descriptors, it writes the rx ptr and tx ptr registers to
+ * initiate the DMA transfer.
+ *
+ * This function does the DMA map and unmap of the src and dst scatterlists in
+ * the mailbox message.
+ *
+ * Return: 0 if successful
+ * -ENOTSUPP if the mailbox message is a type this driver does not
+ * support
+ * < 0 if an error
+ */
+static int pdc_send_data(struct mbox_chan *chan, void *data)
+{
+ struct pdc_state *pdcs = chan->con_priv;
+ struct device *dev = &pdcs->pdev->dev;
+ struct brcm_message *mssg = data;
+ int err = PDC_SUCCESS;
+ int src_nent;
+ int dst_nent;
+ int nent;
+
+ if (mssg->type != BRCM_MESSAGE_SPU)
+ return -ENOTSUPP;
+
+ src_nent = sg_nents(mssg->spu.src);
+ if (src_nent) {
+ nent = dma_map_sg(dev, mssg->spu.src, src_nent, DMA_TO_DEVICE);
+ if (nent == 0)
+ return -EIO;
+ }
+
+ dst_nent = sg_nents(mssg->spu.dst);
+ if (dst_nent) {
+ nent = dma_map_sg(dev, mssg->spu.dst, dst_nent,
+ DMA_FROM_DEVICE);
+ if (nent == 0) {
+ dma_unmap_sg(dev, mssg->spu.src, src_nent,
+ DMA_TO_DEVICE);
+ return -EIO;
+ }
+ }
+
+ spin_lock(&pdcs->pdc_lock);
+
+ /* Create rx descriptors to SPU catch response */
+ err = pdc_rx_list_init(pdcs, mssg->spu.dst, mssg->ctx);
+ err |= pdc_rx_list_sg_add(pdcs, mssg->spu.dst);
+
+ /* Create tx descriptors to submit SPU request */
+ err |= pdc_tx_list_sg_add(pdcs, mssg->spu.src);
+ err |= pdc_tx_list_final(pdcs); /* initiate transfer */
+
+ spin_unlock(&pdcs->pdc_lock);
+
+ if (err)
+ dev_err(&pdcs->pdev->dev,
+ "%s failed with error %d", __func__, err);
+
+ return err;
+}
+
+static int pdc_startup(struct mbox_chan *chan)
+{
+ return pdc_ring_init(chan->con_priv, PDC_RINGSET);
+}
+
+static void pdc_shutdown(struct mbox_chan *chan)
+{
+ struct pdc_state *pdcs = chan->con_priv;
+
+ if (!pdcs)
+ return;
+
+ dev_dbg(&pdcs->pdev->dev,
+ "Shutdown mailbox channel for PDC %u", pdcs->pdc_idx);
+ pdc_ring_free(pdcs);
+}
+
+/**
+ * pdc_hw_init() - Use the given initialization parameters to initialize the
+ * state for one of the PDCs.
+ * @pdcs: state of the PDC
+ */
+static
+void pdc_hw_init(struct pdc_state *pdcs)
+{
+ struct platform_device *pdev;
+ struct device *dev;
+ struct dma64 *dma_reg;
+ int ringset = PDC_RINGSET;
+
+ pdev = pdcs->pdev;
+ dev = &pdev->dev;
+
+ dev_dbg(dev, "PDC %u initial values:", pdcs->pdc_idx);
+ dev_dbg(dev, "state structure: %p",
+ pdcs);
+ dev_dbg(dev, " - base virtual addr of hw regs %p",
+ pdcs->pdc_reg_vbase);
+
+ /* initialize data structures */
+ pdcs->regs = (struct pdc_regs *)pdcs->pdc_reg_vbase;
+ pdcs->txregs_64 = (struct dma64_regs *)
+ (void *)(((u8 *)pdcs->pdc_reg_vbase) +
+ PDC_TXREGS_OFFSET + (sizeof(struct dma64) * ringset));
+ pdcs->rxregs_64 = (struct dma64_regs *)
+ (void *)(((u8 *)pdcs->pdc_reg_vbase) +
+ PDC_RXREGS_OFFSET + (sizeof(struct dma64) * ringset));
+
+ pdcs->ntxd = PDC_RING_ENTRIES;
+ pdcs->nrxd = PDC_RING_ENTRIES;
+ pdcs->ntxpost = PDC_RING_ENTRIES - 1;
+ pdcs->nrxpost = PDC_RING_ENTRIES - 1;
+ pdcs->regs->intmask = 0;
+
+ dma_reg = &pdcs->regs->dmaregs[ringset];
+ iowrite32(0, (void *)&dma_reg->dmaxmt.ptr);
+ iowrite32(0, (void *)&dma_reg->dmarcv.ptr);
+
+ iowrite32(PDC_TX_CTL, (void *)&dma_reg->dmaxmt.control);
+
+ iowrite32(PDC_RX_CTL + (pdcs->rx_status_len << 1),
+ (void *)&dma_reg->dmarcv.control);
+
+ if (pdcs->pdc_resp_hdr_len == PDC_SPU2_RESP_HDR_LEN)
+ iowrite32(PDC_CKSUM_CTRL,
+ pdcs->pdc_reg_vbase + PDC_CKSUM_CTRL_OFFSET);
+}
+
+/**
+ * pdc_rx_buf_pool_create() - Pool of receive buffers used to catch the metadata
+ * header returned with each response message.
+ * @pdcs: PDC state structure
+ *
+ * The metadata is not returned to the mailbox client. So the PDC driver
+ * manages these buffers.
+ *
+ * Return: PDC_SUCCESS
+ * -ENOMEM if pool creation fails
+ */
+static int pdc_rx_buf_pool_create(struct pdc_state *pdcs)
+{
+ struct platform_device *pdev;
+ struct device *dev;
+
+ pdev = pdcs->pdev;
+ dev = &pdev->dev;
+
+ pdcs->pdc_resp_hdr_len = pdcs->rx_status_len;
+ if (pdcs->use_bcm_hdr)
+ pdcs->pdc_resp_hdr_len += BCM_HDR_LEN;
+
+ pdcs->rx_buf_pool = dma_pool_create("pdc rx bufs", dev,
+ pdcs->pdc_resp_hdr_len,
+ RX_BUF_ALIGN, 0);
+ if (!pdcs->rx_buf_pool)
+ return -ENOMEM;
+
+ return PDC_SUCCESS;
+}
+
+/**
+ * pdc_interrupts_init() - Initialize the interrupt configuration for a PDC and
+ * specify a threaded IRQ handler for deferred handling of interrupts outside of
+ * interrupt context.
+ * @pdcs: PDC state
+ *
+ * Set the interrupt mask for transmit and receive done.
+ * Set the lazy interrupt frame count to generate an interrupt for just one pkt.
+ *
+ * Return: PDC_SUCCESS
+ * <0 if threaded irq request fails
+ */
+static int pdc_interrupts_init(struct pdc_state *pdcs)
+{
+ struct platform_device *pdev = pdcs->pdev;
+ struct device *dev = &pdev->dev;
+ struct device_node *dn = pdev->dev.of_node;
+ int err;
+
+ pdcs->intstatus = 0;
+
+ /* interrupt configuration */
+ iowrite32(PDC_INTMASK, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET);
+ iowrite32(PDC_LAZY_INT, pdcs->pdc_reg_vbase + PDC_RCVLAZY0_OFFSET);
+
+ /* read irq from device tree */
+ pdcs->pdc_irq = irq_of_parse_and_map(dn, 0);
+ dev_dbg(dev, "pdc device %s irq %u for pdcs %p",
+ dev_name(dev), pdcs->pdc_irq, pdcs);
+ err = devm_request_threaded_irq(dev, pdcs->pdc_irq,
+ pdc_irq_handler,
+ pdc_irq_thread, 0, dev_name(dev), pdcs);
+ if (err) {
+ dev_err(dev, "threaded tx IRQ %u request failed with err %d\n",
+ pdcs->pdc_irq, err);
+ return err;
+ }
+ return PDC_SUCCESS;
+}
+
+static const struct mbox_chan_ops pdc_mbox_chan_ops = {
+ .send_data = pdc_send_data,
+ .startup = pdc_startup,
+ .shutdown = pdc_shutdown
+};
+
+/**
+ * pdc_mb_init() - Initialize the mailbox controller.
+ * @pdcs: PDC state
+ *
+ * Each PDC is a mailbox controller. Each ringset is a mailbox channel. Kernel
+ * driver only uses one ringset and thus one mb channel. PDC uses the transmit
+ * complete interrupt to determine when a mailbox message has successfully been
+ * transmitted.
+ *
+ * Return: 0 on success
+ * < 0 if there is an allocation or registration failure
+ */
+static int pdc_mb_init(struct pdc_state *pdcs)
+{
+ struct device *dev = &pdcs->pdev->dev;
+ struct mbox_controller *mbc;
+ int chan_index;
+ int err;
+
+ mbc = &pdcs->mbc;
+ mbc->dev = dev;
+ mbc->ops = &pdc_mbox_chan_ops;
+ mbc->num_chans = 1;
+ mbc->chans = devm_kcalloc(dev, mbc->num_chans, sizeof(*mbc->chans),
+ GFP_KERNEL);
+ if (!mbc->chans)
+ return -ENOMEM;
+
+ mbc->txdone_irq = true;
+ mbc->txdone_poll = false;
+ for (chan_index = 0; chan_index < mbc->num_chans; chan_index++)
+ mbc->chans[chan_index].con_priv = pdcs;
+
+ /* Register mailbox controller */
+ err = mbox_controller_register(mbc);
+ if (err) {
+ dev_crit(dev,
+ "Failed to register PDC mailbox controller. Error %d.",
+ err);
+ return err;
+ }
+ return 0;
+}
+
+/**
+ * pdc_dt_read() - Read application-specific data from device tree.
+ * @pdev: Platform device
+ * @pdcs: PDC state
+ *
+ * Reads the number of bytes of receive status that precede each received frame.
+ * Reads whether transmit and received frames should be preceded by an 8-byte
+ * BCM header.
+ *
+ * Return: 0 if successful
+ * -ENODEV if device not available
+ */
+static int pdc_dt_read(struct platform_device *pdev, struct pdc_state *pdcs)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *dn = pdev->dev.of_node;
+ int err;
+
+ err = of_property_read_u32(dn, "brcm,rx-status-len",
+ &pdcs->rx_status_len);
+ if (err < 0)
+ dev_err(dev,
+ "%s failed to get DMA receive status length from device tree",
+ __func__);
+
+ pdcs->use_bcm_hdr = of_property_read_bool(dn, "brcm,use-bcm-hdr");
+
+ return 0;
+}
+
+/**
+ * pdc_probe() - Probe function for PDC driver.
+ * @pdev: PDC platform device
+ *
+ * Reserve and map register regions defined in device tree.
+ * Allocate and initialize tx and rx DMA rings.
+ * Initialize a mailbox controller for each PDC.
+ *
+ * Return: 0 if successful
+ * < 0 if an error
+ */
+static int pdc_probe(struct platform_device *pdev)
+{
+ int err = 0;
+ struct device *dev = &pdev->dev;
+ struct resource *pdc_regs;
+ struct pdc_state *pdcs;
+
+ /* PDC state for one SPU */
+ pdcs = devm_kzalloc(dev, sizeof(*pdcs), GFP_KERNEL);
+ if (!pdcs) {
+ err = -ENOMEM;
+ goto cleanup;
+ }
+
+ spin_lock_init(&pdcs->pdc_lock);
+ pdcs->pdev = pdev;
+ platform_set_drvdata(pdev, pdcs);
+ pdcs->pdc_idx = pdcg.num_spu;
+ pdcg.num_spu++;
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_warn(dev, "PDC device cannot perform DMA. Error %d.", err);
+ goto cleanup;
+ }
+
+ /* Create DMA pool for tx ring */
+ pdcs->ring_pool = dma_pool_create("pdc rings", dev, PDC_RING_SIZE,
+ RING_ALIGN, 0);
+ if (!pdcs->ring_pool) {
+ err = -ENOMEM;
+ goto cleanup;
+ }
+
+ err = pdc_dt_read(pdev, pdcs);
+ if (err)
+ goto cleanup_ring_pool;
+
+ pdc_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!pdc_regs) {
+ err = -ENODEV;
+ goto cleanup_ring_pool;
+ }
+ dev_dbg(dev, "PDC register region res.start = %pa, res.end = %pa",
+ &pdc_regs->start, &pdc_regs->end);
+
+ pdcs->pdc_reg_vbase = devm_ioremap_resource(&pdev->dev, pdc_regs);
+ if (IS_ERR(pdcs->pdc_reg_vbase)) {
+ err = PTR_ERR(pdcs->pdc_reg_vbase);
+ dev_err(&pdev->dev, "Failed to map registers: %d\n", err);
+ goto cleanup_ring_pool;
+ }
+
+ /* create rx buffer pool after dt read to know how big buffers are */
+ err = pdc_rx_buf_pool_create(pdcs);
+ if (err)
+ goto cleanup_ring_pool;
+
+ pdc_hw_init(pdcs);
+
+ err = pdc_interrupts_init(pdcs);
+ if (err)
+ goto cleanup_buf_pool;
+
+ /* Initialize mailbox controller */
+ err = pdc_mb_init(pdcs);
+ if (err)
+ goto cleanup_buf_pool;
+
+ pdcs->debugfs_stats = NULL;
+ pdc_setup_debugfs(pdcs);
+
+ dev_dbg(dev, "pdc_probe() successful");
+ return PDC_SUCCESS;
+
+cleanup_buf_pool:
+ dma_pool_destroy(pdcs->rx_buf_pool);
+
+cleanup_ring_pool:
+ dma_pool_destroy(pdcs->ring_pool);
+
+cleanup:
+ return err;
+}
+
+static int pdc_remove(struct platform_device *pdev)
+{
+ struct pdc_state *pdcs = platform_get_drvdata(pdev);
+
+ pdc_free_debugfs();
+
+ mbox_controller_unregister(&pdcs->mbc);
+
+ dma_pool_destroy(pdcs->rx_buf_pool);
+ dma_pool_destroy(pdcs->ring_pool);
+ return 0;
+}
+
+static const struct of_device_id pdc_mbox_of_match[] = {
+ {.compatible = "brcm,iproc-pdc-mbox"},
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, pdc_mbox_of_match);
+
+static struct platform_driver pdc_mbox_driver = {
+ .probe = pdc_probe,
+ .remove = pdc_remove,
+ .driver = {
+ .name = "brcm-iproc-pdc-mbox",
+ .of_match_table = of_match_ptr(pdc_mbox_of_match),
+ },
+};
+module_platform_driver(pdc_mbox_driver);
+
+MODULE_AUTHOR("Rob Rice <rob.rice@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom PDC mailbox driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c
index 58d04726c..9ca96e9db 100644
--- a/drivers/mailbox/mailbox-test.c
+++ b/drivers/mailbox/mailbox-test.c
@@ -133,6 +133,7 @@ static ssize_t mbox_test_message_write(struct file *filp,
out:
kfree(tdev->signal);
kfree(tdev->message);
+ tdev->signal = NULL;
return ret < 0 ? ret : count;
}
diff --git a/drivers/mailbox/pl320-ipc.c b/drivers/mailbox/pl320-ipc.c
index f80acb36f..2dbed8709 100644
--- a/drivers/mailbox/pl320-ipc.c
+++ b/drivers/mailbox/pl320-ipc.c
@@ -58,29 +58,29 @@ static ATOMIC_NOTIFIER_HEAD(ipc_notifier);
static inline void set_destination(int source, int mbox)
{
- __raw_writel(CHAN_MASK(source), ipc_base + IPCMxDSET(mbox));
- __raw_writel(CHAN_MASK(source), ipc_base + IPCMxMSET(mbox));
+ writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxDSET(mbox));
+ writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxMSET(mbox));
}
static inline void clear_destination(int source, int mbox)
{
- __raw_writel(CHAN_MASK(source), ipc_base + IPCMxDCLEAR(mbox));
- __raw_writel(CHAN_MASK(source), ipc_base + IPCMxMCLEAR(mbox));
+ writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxDCLEAR(mbox));
+ writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxMCLEAR(mbox));
}
static void __ipc_send(int mbox, u32 *data)
{
int i;
for (i = 0; i < 7; i++)
- __raw_writel(data[i], ipc_base + IPCMxDR(mbox, i));
- __raw_writel(0x1, ipc_base + IPCMxSEND(mbox));
+ writel_relaxed(data[i], ipc_base + IPCMxDR(mbox, i));
+ writel_relaxed(0x1, ipc_base + IPCMxSEND(mbox));
}
static u32 __ipc_rcv(int mbox, u32 *data)
{
int i;
for (i = 0; i < 7; i++)
- data[i] = __raw_readl(ipc_base + IPCMxDR(mbox, i));
+ data[i] = readl_relaxed(ipc_base + IPCMxDR(mbox, i));
return data[1];
}
@@ -112,15 +112,15 @@ static irqreturn_t ipc_handler(int irq, void *dev)
u32 irq_stat;
u32 data[7];
- irq_stat = __raw_readl(ipc_base + IPCMMIS(1));
+ irq_stat = readl_relaxed(ipc_base + IPCMMIS(1));
if (irq_stat & MBOX_MASK(IPC_TX_MBOX)) {
- __raw_writel(0, ipc_base + IPCMxSEND(IPC_TX_MBOX));
+ writel_relaxed(0, ipc_base + IPCMxSEND(IPC_TX_MBOX));
complete(&ipc_completion);
}
if (irq_stat & MBOX_MASK(IPC_RX_MBOX)) {
__ipc_rcv(IPC_RX_MBOX, data);
atomic_notifier_call_chain(&ipc_notifier, data[0], data + 1);
- __raw_writel(2, ipc_base + IPCMxSEND(IPC_RX_MBOX));
+ writel_relaxed(2, ipc_base + IPCMxSEND(IPC_RX_MBOX));
}
return IRQ_HANDLED;
@@ -146,7 +146,7 @@ static int pl320_probe(struct amba_device *adev, const struct amba_id *id)
if (ipc_base == NULL)
return -ENOMEM;
- __raw_writel(0, ipc_base + IPCMxSEND(IPC_TX_MBOX));
+ writel_relaxed(0, ipc_base + IPCMxSEND(IPC_TX_MBOX));
ipc_irq = adev->irq[0];
ret = request_irq(ipc_irq, ipc_handler, 0, dev_name(&adev->dev), NULL);
@@ -154,20 +154,20 @@ static int pl320_probe(struct amba_device *adev, const struct amba_id *id)
goto err;
/* Init slow mailbox */
- __raw_writel(CHAN_MASK(A9_SOURCE),
- ipc_base + IPCMxSOURCE(IPC_TX_MBOX));
- __raw_writel(CHAN_MASK(M3_SOURCE),
- ipc_base + IPCMxDSET(IPC_TX_MBOX));
- __raw_writel(CHAN_MASK(M3_SOURCE) | CHAN_MASK(A9_SOURCE),
- ipc_base + IPCMxMSET(IPC_TX_MBOX));
+ writel_relaxed(CHAN_MASK(A9_SOURCE),
+ ipc_base + IPCMxSOURCE(IPC_TX_MBOX));
+ writel_relaxed(CHAN_MASK(M3_SOURCE),
+ ipc_base + IPCMxDSET(IPC_TX_MBOX));
+ writel_relaxed(CHAN_MASK(M3_SOURCE) | CHAN_MASK(A9_SOURCE),
+ ipc_base + IPCMxMSET(IPC_TX_MBOX));
/* Init receive mailbox */
- __raw_writel(CHAN_MASK(M3_SOURCE),
- ipc_base + IPCMxSOURCE(IPC_RX_MBOX));
- __raw_writel(CHAN_MASK(A9_SOURCE),
- ipc_base + IPCMxDSET(IPC_RX_MBOX));
- __raw_writel(CHAN_MASK(M3_SOURCE) | CHAN_MASK(A9_SOURCE),
- ipc_base + IPCMxMSET(IPC_RX_MBOX));
+ writel_relaxed(CHAN_MASK(M3_SOURCE),
+ ipc_base + IPCMxSOURCE(IPC_RX_MBOX));
+ writel_relaxed(CHAN_MASK(A9_SOURCE),
+ ipc_base + IPCMxDSET(IPC_RX_MBOX));
+ writel_relaxed(CHAN_MASK(M3_SOURCE) | CHAN_MASK(A9_SOURCE),
+ ipc_base + IPCMxMSET(IPC_RX_MBOX));
return 0;
err:
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 52ba8dd82..3cbda1af8 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -3,7 +3,8 @@
#
dm-mod-y += dm.o dm-table.o dm-target.o dm-linear.o dm-stripe.o \
- dm-ioctl.o dm-io.o dm-kcopyd.o dm-sysfs.o dm-stats.o
+ dm-ioctl.o dm-io.o dm-kcopyd.o dm-sysfs.o dm-stats.o \
+ dm-rq.o
dm-multipath-y += dm-path-selector.o dm-mpath.o
dm-snapshot-y += dm-snap.o dm-exception-store.o dm-snap-transient.o \
dm-snap-persistent.o
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index eab505ee0..76f7534d1 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -294,10 +294,10 @@ static void bch_btree_node_read(struct btree *b)
closure_init_stack(&cl);
bio = bch_bbio_alloc(b->c);
- bio->bi_rw = REQ_META|READ_SYNC;
bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
bio->bi_end_io = btree_node_read_endio;
bio->bi_private = &cl;
+ bio_set_op_attrs(bio, REQ_OP_READ, REQ_META|READ_SYNC);
bch_bio_map(bio, b->keys.set[0].data);
@@ -396,8 +396,8 @@ static void do_btree_node_write(struct btree *b)
b->bio->bi_end_io = btree_node_write_endio;
b->bio->bi_private = cl;
- b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA;
b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c));
+ bio_set_op_attrs(b->bio, REQ_OP_WRITE, REQ_META|WRITE_SYNC|REQ_FUA);
bch_bio_map(b->bio, i);
/*
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
index 9eaf1d6e8..864e673ae 100644
--- a/drivers/md/bcache/closure.c
+++ b/drivers/md/bcache/closure.c
@@ -112,7 +112,7 @@ bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl)
EXPORT_SYMBOL(closure_wait);
/**
- * closure_sync - sleep until a closure a closure has nothing left to wait on
+ * closure_sync - sleep until a closure has nothing left to wait on
*
* Sleeps until the refcount hits 1 - the thread that's running the closure owns
* the last refcount.
diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
index 782cc2c8a..9b2fe2d3e 100644
--- a/drivers/md/bcache/closure.h
+++ b/drivers/md/bcache/closure.h
@@ -31,7 +31,8 @@
* passing it, as you might expect, the function to run when nothing is pending
* and the workqueue to run that function out of.
*
- * continue_at() also, critically, is a macro that returns the calling function.
+ * continue_at() also, critically, requires a 'return' immediately following the
+ * location where this macro is referenced, to return to the calling function.
* There's good reason for this.
*
* To use safely closures asynchronously, they must always have a refcount while
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 8b1f1d5c1..c28df1647 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -52,9 +52,10 @@ void bch_btree_verify(struct btree *b)
bio->bi_bdev = PTR_CACHE(b->c, &b->key, 0)->bdev;
bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
bio->bi_iter.bi_size = KEY_SIZE(&v->key) << 9;
+ bio_set_op_attrs(bio, REQ_OP_READ, REQ_META|READ_SYNC);
bch_bio_map(bio, sorted);
- submit_bio_wait(REQ_META|READ_SYNC, bio);
+ submit_bio_wait(bio);
bch_bbio_free(bio, b->c);
memcpy(ondisk, sorted, KEY_SIZE(&v->key) << 9);
@@ -113,11 +114,12 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
check = bio_clone(bio, GFP_NOIO);
if (!check)
return;
+ bio_set_op_attrs(check, REQ_OP_READ, READ_SYNC);
if (bio_alloc_pages(check, GFP_NOIO))
goto out_put;
- submit_bio_wait(READ_SYNC, check);
+ submit_bio_wait(check);
bio_for_each_segment(bv, bio, iter) {
void *p1 = kmap_atomic(bv.bv_page);
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index 86a0bb871..e97b0acf7 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -25,7 +25,6 @@ struct bio *bch_bbio_alloc(struct cache_set *c)
struct bio *bio = &b->bio;
bio_init(bio);
- bio->bi_flags |= BIO_POOL_NONE << BIO_POOL_OFFSET;
bio->bi_max_vecs = bucket_pages(c);
bio->bi_io_vec = bio->bi_inline_vecs;
@@ -111,7 +110,7 @@ void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
struct bbio *b = container_of(bio, struct bbio, bio);
struct cache *ca = PTR_CACHE(c, &b->key, 0);
- unsigned threshold = bio->bi_rw & REQ_WRITE
+ unsigned threshold = op_is_write(bio_op(bio))
? c->congested_write_threshold_us
: c->congested_read_threshold_us;
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 29eba7219..6925023e1 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -54,11 +54,11 @@ reread: left = ca->sb.bucket_size - offset;
bio_reset(bio);
bio->bi_iter.bi_sector = bucket + offset;
bio->bi_bdev = ca->bdev;
- bio->bi_rw = READ;
bio->bi_iter.bi_size = len << 9;
bio->bi_end_io = journal_read_endio;
bio->bi_private = &cl;
+ bio_set_op_attrs(bio, REQ_OP_READ, 0);
bch_bio_map(bio, data);
closure_bio_submit(bio, &cl);
@@ -418,7 +418,7 @@ static void journal_discard_work(struct work_struct *work)
struct journal_device *ja =
container_of(work, struct journal_device, discard_work);
- submit_bio(0, &ja->discard_bio);
+ submit_bio(&ja->discard_bio);
}
static void do_journal_discard(struct cache *ca)
@@ -449,10 +449,10 @@ static void do_journal_discard(struct cache *ca)
atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
bio_init(bio);
+ bio_set_op_attrs(bio, REQ_OP_DISCARD, 0);
bio->bi_iter.bi_sector = bucket_to_sector(ca->set,
ca->sb.d[ja->discard_idx]);
bio->bi_bdev = ca->bdev;
- bio->bi_rw = REQ_WRITE|REQ_DISCARD;
bio->bi_max_vecs = 1;
bio->bi_io_vec = bio->bi_inline_vecs;
bio->bi_iter.bi_size = bucket_bytes(ca);
@@ -626,11 +626,12 @@ static void journal_write_unlocked(struct closure *cl)
bio_reset(bio);
bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
bio->bi_bdev = ca->bdev;
- bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA;
bio->bi_iter.bi_size = sectors << 9;
bio->bi_end_io = journal_write_endio;
bio->bi_private = w;
+ bio_set_op_attrs(bio, REQ_OP_WRITE,
+ REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA);
bch_bio_map(bio, w->data);
trace_bcache_journal_write(bio);
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index b929fc944..1881319f2 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -163,7 +163,7 @@ static void read_moving(struct cache_set *c)
moving_init(io);
bio = &io->bio.bio;
- bio->bi_rw = READ;
+ bio_set_op_attrs(bio, REQ_OP_READ, 0);
bio->bi_end_io = read_moving_endio;
if (bio_alloc_pages(bio, GFP_KERNEL))
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 25fa8445b..4b177fe11 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -205,10 +205,10 @@ static void bch_data_insert_start(struct closure *cl)
return bch_data_invalidate(cl);
/*
- * Journal writes are marked REQ_FLUSH; if the original write was a
+ * Journal writes are marked REQ_PREFLUSH; if the original write was a
* flush, it'll wait on the journal write.
*/
- bio->bi_rw &= ~(REQ_FLUSH|REQ_FUA);
+ bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);
do {
unsigned i;
@@ -253,7 +253,7 @@ static void bch_data_insert_start(struct closure *cl)
trace_bcache_cache_insert(k);
bch_keylist_push(&op->insert_keys);
- n->bi_rw |= REQ_WRITE;
+ bio_set_op_attrs(n, REQ_OP_WRITE, 0);
bch_submit_bbio(n, op->c, k, 0);
} while (n != bio);
@@ -378,12 +378,12 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
- (bio->bi_rw & REQ_DISCARD))
+ (bio_op(bio) == REQ_OP_DISCARD))
goto skip;
if (mode == CACHE_MODE_NONE ||
(mode == CACHE_MODE_WRITEAROUND &&
- (bio->bi_rw & REQ_WRITE)))
+ op_is_write(bio_op(bio))))
goto skip;
if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
@@ -404,8 +404,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
if (!congested &&
mode == CACHE_MODE_WRITEBACK &&
- (bio->bi_rw & REQ_WRITE) &&
- (bio->bi_rw & REQ_SYNC))
+ op_is_write(bio_op(bio)) &&
+ (bio->bi_opf & REQ_SYNC))
goto rescale;
spin_lock(&dc->io_lock);
@@ -657,7 +657,7 @@ static inline struct search *search_alloc(struct bio *bio,
s->cache_miss = NULL;
s->d = d;
s->recoverable = 1;
- s->write = (bio->bi_rw & REQ_WRITE) != 0;
+ s->write = op_is_write(bio_op(bio));
s->read_dirty_data = 0;
s->start_time = jiffies;
@@ -668,7 +668,7 @@ static inline struct search *search_alloc(struct bio *bio,
s->iop.write_prio = 0;
s->iop.error = 0;
s->iop.flags = 0;
- s->iop.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
+ s->iop.flush_journal = (bio->bi_opf & (REQ_PREFLUSH|REQ_FUA)) != 0;
s->iop.wq = bcache_wq;
return s;
@@ -796,8 +796,8 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
goto out_submit;
}
- if (!(bio->bi_rw & REQ_RAHEAD) &&
- !(bio->bi_rw & REQ_META) &&
+ if (!(bio->bi_opf & REQ_RAHEAD) &&
+ !(bio->bi_opf & REQ_META) &&
s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
reada = min_t(sector_t, dc->readahead >> 9,
bdev_sectors(bio->bi_bdev) - bio_end_sector(bio));
@@ -899,7 +899,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
* But check_overlapping drops dirty keys for which io hasn't started,
* so we still want to call it.
*/
- if (bio->bi_rw & REQ_DISCARD)
+ if (bio_op(bio) == REQ_OP_DISCARD)
s->iop.bypass = true;
if (should_writeback(dc, s->orig_bio,
@@ -913,22 +913,22 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
s->iop.bio = s->orig_bio;
bio_get(s->iop.bio);
- if (!(bio->bi_rw & REQ_DISCARD) ||
+ if ((bio_op(bio) != REQ_OP_DISCARD) ||
blk_queue_discard(bdev_get_queue(dc->bdev)))
closure_bio_submit(bio, cl);
} else if (s->iop.writeback) {
bch_writeback_add(dc);
s->iop.bio = bio;
- if (bio->bi_rw & REQ_FLUSH) {
+ if (bio->bi_opf & REQ_PREFLUSH) {
/* Also need to send a flush to the backing device */
struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
dc->disk.bio_split);
- flush->bi_rw = WRITE_FLUSH;
flush->bi_bdev = bio->bi_bdev;
flush->bi_end_io = request_endio;
flush->bi_private = cl;
+ bio_set_op_attrs(flush, REQ_OP_WRITE, WRITE_FLUSH);
closure_bio_submit(flush, cl);
}
@@ -992,7 +992,7 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q,
cached_dev_read(dc, s);
}
} else {
- if ((bio->bi_rw & REQ_DISCARD) &&
+ if ((bio_op(bio) == REQ_OP_DISCARD) &&
!blk_queue_discard(bdev_get_queue(dc->bdev)))
bio_endio(bio);
else
@@ -1103,7 +1103,7 @@ static blk_qc_t flash_dev_make_request(struct request_queue *q,
&KEY(d->id, bio->bi_iter.bi_sector, 0),
&KEY(d->id, bio_end_sector(bio), 0));
- s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0;
+ s->iop.bypass = (bio_op(bio) == REQ_OP_DISCARD) != 0;
s->iop.writeback = true;
s->iop.bio = bio;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 5d3b2318c..849ad441c 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -134,7 +134,6 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
case BCACHE_SB_VERSION_CDEV:
case BCACHE_SB_VERSION_CDEV_WITH_UUID:
sb->nbuckets = le64_to_cpu(s->nbuckets);
- sb->block_size = le16_to_cpu(s->block_size);
sb->bucket_size = le16_to_cpu(s->bucket_size);
sb->nr_in_set = le16_to_cpu(s->nr_in_set);
@@ -212,8 +211,8 @@ static void __write_super(struct cache_sb *sb, struct bio *bio)
unsigned i;
bio->bi_iter.bi_sector = SB_SECTOR;
- bio->bi_rw = REQ_SYNC|REQ_META;
bio->bi_iter.bi_size = SB_SIZE;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META);
bch_bio_map(bio, NULL);
out->offset = cpu_to_le64(sb->offset);
@@ -238,7 +237,7 @@ static void __write_super(struct cache_sb *sb, struct bio *bio)
pr_debug("ver %llu, flags %llu, seq %llu",
sb->version, sb->flags, sb->seq);
- submit_bio(REQ_WRITE, bio);
+ submit_bio(bio);
}
static void bch_write_bdev_super_unlock(struct closure *cl)
@@ -333,7 +332,7 @@ static void uuid_io_unlock(struct closure *cl)
up(&c->uuid_write_mutex);
}
-static void uuid_io(struct cache_set *c, unsigned long rw,
+static void uuid_io(struct cache_set *c, int op, unsigned long op_flags,
struct bkey *k, struct closure *parent)
{
struct closure *cl = &c->uuid_write;
@@ -348,21 +347,22 @@ static void uuid_io(struct cache_set *c, unsigned long rw,
for (i = 0; i < KEY_PTRS(k); i++) {
struct bio *bio = bch_bbio_alloc(c);
- bio->bi_rw = REQ_SYNC|REQ_META|rw;
+ bio->bi_opf = REQ_SYNC | REQ_META | op_flags;
bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
bio->bi_end_io = uuid_endio;
bio->bi_private = cl;
+ bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags);
bch_bio_map(bio, c->uuids);
bch_submit_bbio(bio, c, k, i);
- if (!(rw & WRITE))
+ if (op != REQ_OP_WRITE)
break;
}
bch_extent_to_text(buf, sizeof(buf), k);
- pr_debug("%s UUIDs at %s", rw & REQ_WRITE ? "wrote" : "read", buf);
+ pr_debug("%s UUIDs at %s", op == REQ_OP_WRITE ? "wrote" : "read", buf);
for (u = c->uuids; u < c->uuids + c->nr_uuids; u++)
if (!bch_is_zero(u->uuid, 16))
@@ -381,7 +381,7 @@ static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
return "bad uuid pointer";
bkey_copy(&c->uuid_bucket, k);
- uuid_io(c, READ_SYNC, k, cl);
+ uuid_io(c, REQ_OP_READ, READ_SYNC, k, cl);
if (j->version < BCACHE_JSET_VERSION_UUIDv1) {
struct uuid_entry_v0 *u0 = (void *) c->uuids;
@@ -426,7 +426,7 @@ static int __uuid_write(struct cache_set *c)
return 1;
SET_KEY_SIZE(&k.key, c->sb.bucket_size);
- uuid_io(c, REQ_WRITE, &k.key, &cl);
+ uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl);
closure_sync(&cl);
bkey_copy(&c->uuid_bucket, &k.key);
@@ -498,7 +498,8 @@ static void prio_endio(struct bio *bio)
closure_put(&ca->prio);
}
-static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw)
+static void prio_io(struct cache *ca, uint64_t bucket, int op,
+ unsigned long op_flags)
{
struct closure *cl = &ca->prio;
struct bio *bio = bch_bbio_alloc(ca->set);
@@ -507,11 +508,11 @@ static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw)
bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size;
bio->bi_bdev = ca->bdev;
- bio->bi_rw = REQ_SYNC|REQ_META|rw;
bio->bi_iter.bi_size = bucket_bytes(ca);
bio->bi_end_io = prio_endio;
bio->bi_private = ca;
+ bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags);
bch_bio_map(bio, ca->disk_buckets);
closure_bio_submit(bio, &ca->prio);
@@ -557,7 +558,7 @@ void bch_prio_write(struct cache *ca)
BUG_ON(bucket == -1);
mutex_unlock(&ca->set->bucket_lock);
- prio_io(ca, bucket, REQ_WRITE);
+ prio_io(ca, bucket, REQ_OP_WRITE, 0);
mutex_lock(&ca->set->bucket_lock);
ca->prio_buckets[i] = bucket;
@@ -599,7 +600,7 @@ static void prio_read(struct cache *ca, uint64_t bucket)
ca->prio_last_buckets[bucket_nr] = bucket;
bucket_nr++;
- prio_io(ca, bucket, READ_SYNC);
+ prio_io(ca, bucket, REQ_OP_READ, READ_SYNC);
if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8))
pr_warn("bad csum reading priorities");
@@ -759,7 +760,8 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
if (!d->nr_stripes ||
d->nr_stripes > INT_MAX ||
d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) {
- pr_err("nr_stripes too large");
+ pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)",
+ (unsigned)d->nr_stripes);
return -ENOMEM;
}
@@ -1518,7 +1520,8 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
!(c->fill_iter = mempool_create_kmalloc_pool(1, iter_size)) ||
!(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
!(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) ||
- !(c->moving_gc_wq = create_workqueue("bcache_gc")) ||
+ !(c->moving_gc_wq = alloc_workqueue("bcache_gc",
+ WQ_MEM_RECLAIM, 0)) ||
bch_journal_alloc(c) ||
bch_btree_cache_alloc(c) ||
bch_open_buckets_alloc(c) ||
@@ -1803,7 +1806,7 @@ void bch_cache_release(struct kobject *kobj)
module_put(THIS_MODULE);
}
-static int cache_alloc(struct cache_sb *sb, struct cache *ca)
+static int cache_alloc(struct cache *ca)
{
size_t free;
struct bucket *b;
@@ -1842,7 +1845,7 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
struct block_device *bdev, struct cache *ca)
{
char name[BDEVNAME_SIZE];
- const char *err = NULL;
+ const char *err = NULL; /* must be set for any error case */
int ret = 0;
memcpy(&ca->sb, sb, sizeof(struct cache_sb));
@@ -1858,9 +1861,14 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
if (blk_queue_discard(bdev_get_queue(ca->bdev)))
ca->discard = CACHE_DISCARD(&ca->sb);
- ret = cache_alloc(sb, ca);
- if (ret != 0)
+ ret = cache_alloc(ca);
+ if (ret != 0) {
+ if (ret == -ENOMEM)
+ err = "cache_alloc(): -ENOMEM";
+ else
+ err = "cache_alloc(): unknown error";
goto err;
+ }
if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) {
err = "error calling kobject_add";
@@ -2097,7 +2105,7 @@ static int __init bcache_init(void)
return bcache_major;
}
- if (!(bcache_wq = create_workqueue("bcache")) ||
+ if (!(bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0)) ||
!(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
sysfs_create_files(bcache_kobj, files) ||
bch_request_init() ||
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 60123677b..d9fd2a62e 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -182,7 +182,7 @@ static void write_dirty(struct closure *cl)
struct keybuf_key *w = io->bio.bi_private;
dirty_init(w);
- io->bio.bi_rw = WRITE;
+ bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0);
io->bio.bi_iter.bi_sector = KEY_START(&w->key);
io->bio.bi_bdev = io->dc->bdev;
io->bio.bi_end_io = dirty_endio;
@@ -251,10 +251,10 @@ static void read_dirty(struct cached_dev *dc)
io->dc = dc;
dirty_init(w);
+ bio_set_op_attrs(&io->bio, REQ_OP_READ, 0);
io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
io->bio.bi_bdev = PTR_CACHE(dc->disk.c,
&w->key, 0)->bdev;
- io->bio.bi_rw = READ;
io->bio.bi_end_io = read_dirty_endio;
if (bio_alloc_pages(&io->bio, GFP_KERNEL))
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index 073a042ae..301eaf565 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -57,7 +57,7 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
if (would_skip)
return false;
- return bio->bi_rw & REQ_SYNC ||
+ return bio->bi_opf & REQ_SYNC ||
in_use <= CUTOFF_WRITEBACK;
}
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index d8129ec93..13041ee37 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -162,7 +162,7 @@ static int read_sb_page(struct mddev *mddev, loff_t offset,
if (sync_page_io(rdev, target,
roundup(size, bdev_logical_block_size(rdev->bdev)),
- page, READ, true)) {
+ page, REQ_OP_READ, 0, true)) {
page->index = index;
return 0;
}
@@ -297,7 +297,7 @@ static void write_page(struct bitmap *bitmap, struct page *page, int wait)
atomic_inc(&bitmap->pending_writes);
set_buffer_locked(bh);
set_buffer_mapped(bh);
- submit_bh(WRITE | REQ_SYNC, bh);
+ submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
bh = bh->b_this_page;
}
@@ -392,7 +392,7 @@ static int read_page(struct file *file, unsigned long index,
atomic_inc(&bitmap->pending_writes);
set_buffer_locked(bh);
set_buffer_mapped(bh);
- submit_bh(READ, bh);
+ submit_bh(REQ_OP_READ, 0, bh);
}
block++;
bh = bh->b_this_page;
@@ -2183,19 +2183,29 @@ location_show(struct mddev *mddev, char *page)
static ssize_t
location_store(struct mddev *mddev, const char *buf, size_t len)
{
+ int rv;
+ rv = mddev_lock(mddev);
+ if (rv)
+ return rv;
if (mddev->pers) {
- if (!mddev->pers->quiesce)
- return -EBUSY;
- if (mddev->recovery || mddev->sync_thread)
- return -EBUSY;
+ if (!mddev->pers->quiesce) {
+ rv = -EBUSY;
+ goto out;
+ }
+ if (mddev->recovery || mddev->sync_thread) {
+ rv = -EBUSY;
+ goto out;
+ }
}
if (mddev->bitmap || mddev->bitmap_info.file ||
mddev->bitmap_info.offset) {
/* bitmap already configured. Only option is to clear it */
- if (strncmp(buf, "none", 4) != 0)
- return -EBUSY;
+ if (strncmp(buf, "none", 4) != 0) {
+ rv = -EBUSY;
+ goto out;
+ }
if (mddev->pers) {
mddev->pers->quiesce(mddev, 1);
bitmap_destroy(mddev);
@@ -2214,21 +2224,25 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
/* nothing to be done */;
else if (strncmp(buf, "file:", 5) == 0) {
/* Not supported yet */
- return -EINVAL;
+ rv = -EINVAL;
+ goto out;
} else {
- int rv;
if (buf[0] == '+')
rv = kstrtoll(buf+1, 10, &offset);
else
rv = kstrtoll(buf, 10, &offset);
if (rv)
- return rv;
- if (offset == 0)
- return -EINVAL;
+ goto out;
+ if (offset == 0) {
+ rv = -EINVAL;
+ goto out;
+ }
if (mddev->bitmap_info.external == 0 &&
mddev->major_version == 0 &&
- offset != mddev->bitmap_info.default_offset)
- return -EINVAL;
+ offset != mddev->bitmap_info.default_offset) {
+ rv = -EINVAL;
+ goto out;
+ }
mddev->bitmap_info.offset = offset;
if (mddev->pers) {
struct bitmap *bitmap;
@@ -2245,7 +2259,7 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
mddev->pers->quiesce(mddev, 0);
if (rv) {
bitmap_destroy(mddev);
- return rv;
+ goto out;
}
}
}
@@ -2257,6 +2271,11 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
set_bit(MD_CHANGE_DEVS, &mddev->flags);
md_wakeup_thread(mddev->thread);
}
+ rv = 0;
+out:
+ mddev_unlock(mddev);
+ if (rv)
+ return rv;
return len;
}
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index cd77216be..8625040ba 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -574,7 +574,8 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t block,
{
int r;
struct dm_io_request io_req = {
- .bi_rw = rw,
+ .bi_op = rw,
+ .bi_op_flags = 0,
.notify.fn = dmio_complete,
.notify.context = b,
.client = b->c->dm_io,
@@ -634,6 +635,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
* the dm_buffer's inline bio is local to bufio.
*/
b->bio.bi_private = end_io;
+ bio_set_op_attrs(&b->bio, rw, 0);
/*
* We assume that if len >= PAGE_SIZE ptr is page-aligned.
@@ -660,7 +662,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
ptr += PAGE_SIZE;
} while (len > 0);
- submit_bio(rw, &b->bio);
+ submit_bio(&b->bio);
}
static void submit_io(struct dm_buffer *b, int rw, sector_t block,
@@ -1326,7 +1328,8 @@ EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
int dm_bufio_issue_flush(struct dm_bufio_client *c)
{
struct dm_io_request io_req = {
- .bi_rw = WRITE_FLUSH,
+ .bi_op = REQ_OP_WRITE,
+ .bi_op_flags = WRITE_FLUSH,
.mem.type = DM_IO_KMEM,
.mem.ptr.addr = NULL,
.client = c->dm_io,
@@ -1876,7 +1879,7 @@ static int __init dm_bufio_init(void)
__cache_size_refresh();
mutex_unlock(&dm_bufio_clients_lock);
- dm_bufio_wq = create_singlethread_workqueue("dm_bufio_cache");
+ dm_bufio_wq = alloc_workqueue("dm_bufio_cache", WQ_MEM_RECLAIM, 0);
if (!dm_bufio_wq)
return -ENOMEM;
diff --git a/drivers/md/dm-builtin.c b/drivers/md/dm-builtin.c
index 6c9049c51..f09277187 100644
--- a/drivers/md/dm-builtin.c
+++ b/drivers/md/dm-builtin.c
@@ -1,4 +1,4 @@
-#include "dm.h"
+#include "dm-core.h"
/*
* The kobject release method must not be placed in the module itself,
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index ee0510f9a..59b2c5056 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -788,7 +788,8 @@ static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
spin_lock_irqsave(&cache->lock, flags);
if (cache->need_tick_bio &&
- !(bio->bi_rw & (REQ_FUA | REQ_FLUSH | REQ_DISCARD))) {
+ !(bio->bi_opf & (REQ_FUA | REQ_PREFLUSH)) &&
+ bio_op(bio) != REQ_OP_DISCARD) {
pb->tick = true;
cache->need_tick_bio = false;
}
@@ -829,7 +830,7 @@ static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
static int bio_triggers_commit(struct cache *cache, struct bio *bio)
{
- return bio->bi_rw & (REQ_FLUSH | REQ_FUA);
+ return bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
}
/*
@@ -851,7 +852,7 @@ static void inc_ds(struct cache *cache, struct bio *bio,
static bool accountable_bio(struct cache *cache, struct bio *bio)
{
return ((bio->bi_bdev == cache->origin_dev->bdev) &&
- !(bio->bi_rw & REQ_DISCARD));
+ bio_op(bio) != REQ_OP_DISCARD);
}
static void accounted_begin(struct cache *cache, struct bio *bio)
@@ -1067,7 +1068,8 @@ static void dec_io_migrations(struct cache *cache)
static bool discard_or_flush(struct bio *bio)
{
- return bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD);
+ return bio_op(bio) == REQ_OP_DISCARD ||
+ bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
}
static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell)
@@ -1612,8 +1614,8 @@ static void process_flush_bio(struct cache *cache, struct bio *bio)
remap_to_cache(cache, bio, 0);
/*
- * REQ_FLUSH is not directed at any particular block so we don't
- * need to inc_ds(). REQ_FUA's are split into a write + REQ_FLUSH
+ * REQ_PREFLUSH is not directed at any particular block so we don't
+ * need to inc_ds(). REQ_FUA's are split into a write + REQ_PREFLUSH
* by dm-core.
*/
issue(cache, bio);
@@ -1978,9 +1980,9 @@ static void process_deferred_bios(struct cache *cache)
bio = bio_list_pop(&bios);
- if (bio->bi_rw & REQ_FLUSH)
+ if (bio->bi_opf & REQ_PREFLUSH)
process_flush_bio(cache, bio);
- else if (bio->bi_rw & REQ_DISCARD)
+ else if (bio_op(bio) == REQ_OP_DISCARD)
process_discard_bio(cache, &structs, bio);
else
process_bio(cache, &structs, bio);
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
new file mode 100644
index 000000000..40ceba1fe
--- /dev/null
+++ b/drivers/md/dm-core.h
@@ -0,0 +1,149 @@
+/*
+ * Internal header file _only_ for device mapper core
+ *
+ * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
+ *
+ * This file is released under the LGPL.
+ */
+
+#ifndef DM_CORE_INTERNAL_H
+#define DM_CORE_INTERNAL_H
+
+#include <linux/kthread.h>
+#include <linux/ktime.h>
+#include <linux/blk-mq.h>
+
+#include <trace/events/block.h>
+
+#include "dm.h"
+
+#define DM_RESERVED_MAX_IOS 1024
+
+struct dm_kobject_holder {
+ struct kobject kobj;
+ struct completion completion;
+};
+
+/*
+ * DM core internal structure that used directly by dm.c and dm-rq.c
+ * DM targets must _not_ deference a mapped_device to directly access its members!
+ */
+struct mapped_device {
+ struct srcu_struct io_barrier;
+ struct mutex suspend_lock;
+
+ /*
+ * The current mapping (struct dm_table *).
+ * Use dm_get_live_table{_fast} or take suspend_lock for
+ * dereference.
+ */
+ void __rcu *map;
+
+ struct list_head table_devices;
+ struct mutex table_devices_lock;
+
+ unsigned long flags;
+
+ struct request_queue *queue;
+ int numa_node_id;
+
+ unsigned type;
+ /* Protect queue and type against concurrent access. */
+ struct mutex type_lock;
+
+ atomic_t holders;
+ atomic_t open_count;
+
+ struct dm_target *immutable_target;
+ struct target_type *immutable_target_type;
+
+ struct gendisk *disk;
+ char name[16];
+
+ void *interface_ptr;
+
+ /*
+ * A list of ios that arrived while we were suspended.
+ */
+ atomic_t pending[2];
+ wait_queue_head_t wait;
+ struct work_struct work;
+ spinlock_t deferred_lock;
+ struct bio_list deferred;
+
+ /*
+ * Event handling.
+ */
+ wait_queue_head_t eventq;
+ atomic_t event_nr;
+ atomic_t uevent_seq;
+ struct list_head uevent_list;
+ spinlock_t uevent_lock; /* Protect access to uevent_list */
+
+ /* the number of internal suspends */
+ unsigned internal_suspend_count;
+
+ /*
+ * Processing queue (flush)
+ */
+ struct workqueue_struct *wq;
+
+ /*
+ * io objects are allocated from here.
+ */
+ mempool_t *io_pool;
+ mempool_t *rq_pool;
+
+ struct bio_set *bs;
+
+ /*
+ * freeze/thaw support require holding onto a super block
+ */
+ struct super_block *frozen_sb;
+
+ /* forced geometry settings */
+ struct hd_geometry geometry;
+
+ struct block_device *bdev;
+
+ /* kobject and completion */
+ struct dm_kobject_holder kobj_holder;
+
+ /* zero-length flush that will be cloned and submitted to targets */
+ struct bio flush_bio;
+
+ struct dm_stats stats;
+
+ struct kthread_worker kworker;
+ struct task_struct *kworker_task;
+
+ /* for request-based merge heuristic in dm_request_fn() */
+ unsigned seq_rq_merge_deadline_usecs;
+ int last_rq_rw;
+ sector_t last_rq_pos;
+ ktime_t last_rq_start_time;
+
+ /* for blk-mq request-based DM support */
+ struct blk_mq_tag_set *tag_set;
+ bool use_blk_mq:1;
+ bool init_tio_pdu:1;
+};
+
+void dm_init_md_queue(struct mapped_device *md);
+void dm_init_normal_md_queue(struct mapped_device *md);
+int md_in_flight(struct mapped_device *md);
+void disable_write_same(struct mapped_device *md);
+
+static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
+{
+ return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
+}
+
+unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max);
+
+static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
+{
+ return !maxlen || strlen(result) + 1 >= maxlen;
+}
+
+#endif
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index cfe28cf66..874295757 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -181,7 +181,7 @@ struct crypt_config {
u8 key[0];
};
-#define MIN_IOS 16
+#define MIN_IOS 64
static void clone_init(struct dm_crypt_io *, struct bio *);
static void kcryptd_queue_crypt(struct dm_crypt_io *io);
@@ -683,7 +683,7 @@ static int crypt_iv_tcw_whitening(struct crypt_config *cc,
u8 *data)
{
struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
- u64 sector = cpu_to_le64((u64)dmreq->iv_sector);
+ __le64 sector = cpu_to_le64(dmreq->iv_sector);
u8 buf[TCW_WHITENING_SIZE];
SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm);
int i, r;
@@ -722,7 +722,7 @@ static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
struct dm_crypt_request *dmreq)
{
struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
- u64 sector = cpu_to_le64((u64)dmreq->iv_sector);
+ __le64 sector = cpu_to_le64(dmreq->iv_sector);
u8 *src;
int r = 0;
@@ -1136,7 +1136,7 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
clone->bi_private = io;
clone->bi_end_io = crypt_endio;
clone->bi_bdev = cc->dev->bdev;
- clone->bi_rw = io->base_bio->bi_rw;
+ bio_set_op_attrs(clone, bio_op(io->base_bio), io->base_bio->bi_opf);
}
static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
@@ -1911,11 +1911,12 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
struct crypt_config *cc = ti->private;
/*
- * If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues.
- * - for REQ_FLUSH device-mapper core ensures that no IO is in-flight
- * - for REQ_DISCARD caller must use flush if IO ordering matters
+ * If bio is REQ_PREFLUSH or REQ_OP_DISCARD, just bypass crypt queues.
+ * - for REQ_PREFLUSH device-mapper core ensures that no IO is in-flight
+ * - for REQ_OP_DISCARD caller must use flush if IO ordering matters
*/
- if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) {
+ if (unlikely(bio->bi_opf & REQ_PREFLUSH ||
+ bio_op(bio) == REQ_OP_DISCARD)) {
bio->bi_bdev = cc->dev->bdev;
if (bio_sectors(bio))
bio->bi_iter.bi_sector = cc->start +
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
index 665bf3285..bf2b2676c 100644
--- a/drivers/md/dm-era-target.c
+++ b/drivers/md/dm-era-target.c
@@ -1540,9 +1540,9 @@ static int era_map(struct dm_target *ti, struct bio *bio)
remap_to_origin(era, bio);
/*
- * REQ_FLUSH bios carry no data, so we're not interested in them.
+ * REQ_PREFLUSH bios carry no data, so we're not interested in them.
*/
- if (!(bio->bi_rw & REQ_FLUSH) &&
+ if (!(bio->bi_opf & REQ_PREFLUSH) &&
(bio_data_dir(bio) == WRITE) &&
!metadata_current_marked(era->md, block)) {
defer_bio(era, bio);
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 4eb5c67b1..6a2e8dd44 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -16,7 +16,7 @@
#define DM_MSG_PREFIX "flakey"
#define all_corrupt_bio_flags_match(bio, fc) \
- (((bio)->bi_rw & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags)
+ (((bio)->bi_opf & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags)
/*
* Flakey: Used for testing only, simulates intermittent,
@@ -266,9 +266,9 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
data[fc->corrupt_bio_byte - 1] = fc->corrupt_bio_value;
DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
- "(rw=%c bi_rw=%lu bi_sector=%llu cur_bytes=%u)\n",
+ "(rw=%c bi_opf=%u bi_sector=%llu cur_bytes=%u)\n",
bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
- (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_rw,
+ (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_opf,
(unsigned long long)bio->bi_iter.bi_sector, bio_bytes);
}
}
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 06d426eb5..0bf1a12e3 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -5,7 +5,7 @@
* This file is released under the GPL.
*/
-#include "dm.h"
+#include "dm-core.h"
#include <linux/device-mapper.h>
@@ -278,8 +278,9 @@ static void km_dp_init(struct dpages *dp, void *data)
/*-----------------------------------------------------------------
* IO routines that accept a list of pages.
*---------------------------------------------------------------*/
-static void do_region(int rw, unsigned region, struct dm_io_region *where,
- struct dpages *dp, struct io *io)
+static void do_region(int op, int op_flags, unsigned region,
+ struct dm_io_region *where, struct dpages *dp,
+ struct io *io)
{
struct bio *bio;
struct page *page;
@@ -295,24 +296,25 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
/*
* Reject unsupported discard and write same requests.
*/
- if (rw & REQ_DISCARD)
+ if (op == REQ_OP_DISCARD)
special_cmd_max_sectors = q->limits.max_discard_sectors;
- else if (rw & REQ_WRITE_SAME)
+ else if (op == REQ_OP_WRITE_SAME)
special_cmd_max_sectors = q->limits.max_write_same_sectors;
- if ((rw & (REQ_DISCARD | REQ_WRITE_SAME)) && special_cmd_max_sectors == 0) {
+ if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_SAME) &&
+ special_cmd_max_sectors == 0) {
dec_count(io, region, -EOPNOTSUPP);
return;
}
/*
- * where->count may be zero if rw holds a flush and we need to
+ * where->count may be zero if op holds a flush and we need to
* send a zero-sized flush.
*/
do {
/*
* Allocate a suitably sized-bio.
*/
- if ((rw & REQ_DISCARD) || (rw & REQ_WRITE_SAME))
+ if ((op == REQ_OP_DISCARD) || (op == REQ_OP_WRITE_SAME))
num_bvecs = 1;
else
num_bvecs = min_t(int, BIO_MAX_PAGES,
@@ -322,13 +324,14 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
bio->bi_bdev = where->bdev;
bio->bi_end_io = endio;
+ bio_set_op_attrs(bio, op, op_flags);
store_io_and_region_in_bio(bio, io, region);
- if (rw & REQ_DISCARD) {
+ if (op == REQ_OP_DISCARD) {
num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
remaining -= num_sectors;
- } else if (rw & REQ_WRITE_SAME) {
+ } else if (op == REQ_OP_WRITE_SAME) {
/*
* WRITE SAME only uses a single page.
*/
@@ -355,11 +358,11 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
}
atomic_inc(&io->count);
- submit_bio(rw, bio);
+ submit_bio(bio);
} while (remaining);
}
-static void dispatch_io(int rw, unsigned int num_regions,
+static void dispatch_io(int op, int op_flags, unsigned int num_regions,
struct dm_io_region *where, struct dpages *dp,
struct io *io, int sync)
{
@@ -369,7 +372,7 @@ static void dispatch_io(int rw, unsigned int num_regions,
BUG_ON(num_regions > DM_IO_MAX_REGIONS);
if (sync)
- rw |= REQ_SYNC;
+ op_flags |= REQ_SYNC;
/*
* For multiple regions we need to be careful to rewind
@@ -377,8 +380,8 @@ static void dispatch_io(int rw, unsigned int num_regions,
*/
for (i = 0; i < num_regions; i++) {
*dp = old_pages;
- if (where[i].count || (rw & REQ_FLUSH))
- do_region(rw, i, where + i, dp, io);
+ if (where[i].count || (op_flags & REQ_PREFLUSH))
+ do_region(op, op_flags, i, where + i, dp, io);
}
/*
@@ -402,13 +405,13 @@ static void sync_io_complete(unsigned long error, void *context)
}
static int sync_io(struct dm_io_client *client, unsigned int num_regions,
- struct dm_io_region *where, int rw, struct dpages *dp,
- unsigned long *error_bits)
+ struct dm_io_region *where, int op, int op_flags,
+ struct dpages *dp, unsigned long *error_bits)
{
struct io *io;
struct sync_io sio;
- if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
+ if (num_regions > 1 && !op_is_write(op)) {
WARN_ON(1);
return -EIO;
}
@@ -425,7 +428,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
io->vma_invalidate_address = dp->vma_invalidate_address;
io->vma_invalidate_size = dp->vma_invalidate_size;
- dispatch_io(rw, num_regions, where, dp, io, 1);
+ dispatch_io(op, op_flags, num_regions, where, dp, io, 1);
wait_for_completion_io(&sio.wait);
@@ -436,12 +439,12 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
}
static int async_io(struct dm_io_client *client, unsigned int num_regions,
- struct dm_io_region *where, int rw, struct dpages *dp,
- io_notify_fn fn, void *context)
+ struct dm_io_region *where, int op, int op_flags,
+ struct dpages *dp, io_notify_fn fn, void *context)
{
struct io *io;
- if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
+ if (num_regions > 1 && !op_is_write(op)) {
WARN_ON(1);
fn(1, context);
return -EIO;
@@ -457,7 +460,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions,
io->vma_invalidate_address = dp->vma_invalidate_address;
io->vma_invalidate_size = dp->vma_invalidate_size;
- dispatch_io(rw, num_regions, where, dp, io, 0);
+ dispatch_io(op, op_flags, num_regions, where, dp, io, 0);
return 0;
}
@@ -480,7 +483,7 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
case DM_IO_VMA:
flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
- if ((io_req->bi_rw & RW_MASK) == READ) {
+ if (io_req->bi_op == REQ_OP_READ) {
dp->vma_invalidate_address = io_req->mem.ptr.vma;
dp->vma_invalidate_size = size;
}
@@ -502,9 +505,9 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
* New collapsed (a)synchronous interface.
*
* If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
- * the queue with blk_unplug() some time later or set REQ_SYNC in io_req->bi_rw.
- * If you fail to do one of these, the IO will be submitted to the disk after
- * q->unplug_delay, which defaults to 3ms in blk-settings.c.
+ * the queue with blk_unplug() some time later or set REQ_SYNC in
+ * io_req->bi_opf. If you fail to do one of these, the IO will be submitted to
+ * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
*/
int dm_io(struct dm_io_request *io_req, unsigned num_regions,
struct dm_io_region *where, unsigned long *sync_error_bits)
@@ -518,10 +521,12 @@ int dm_io(struct dm_io_request *io_req, unsigned num_regions,
if (!io_req->notify.fn)
return sync_io(io_req->client, num_regions, where,
- io_req->bi_rw, &dp, sync_error_bits);
+ io_req->bi_op, io_req->bi_op_flags, &dp,
+ sync_error_bits);
- return async_io(io_req->client, num_regions, where, io_req->bi_rw,
- &dp, io_req->notify.fn, io_req->notify.context);
+ return async_io(io_req->client, num_regions, where, io_req->bi_op,
+ io_req->bi_op_flags, &dp, io_req->notify.fn,
+ io_req->notify.context);
}
EXPORT_SYMBOL(dm_io);
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 2c7ca258c..966eb4b61 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -5,7 +5,7 @@
* This file is released under the GPL.
*/
-#include "dm.h"
+#include "dm-core.h"
#include <linux/module.h>
#include <linux/vmalloc.h>
@@ -1267,6 +1267,15 @@ static int populate_table(struct dm_table *table,
return dm_table_complete(table);
}
+static bool is_valid_type(unsigned cur, unsigned new)
+{
+ if (cur == new ||
+ (cur == DM_TYPE_BIO_BASED && new == DM_TYPE_DAX_BIO_BASED))
+ return true;
+
+ return false;
+}
+
static int table_load(struct dm_ioctl *param, size_t param_size)
{
int r;
@@ -1309,7 +1318,7 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
DMWARN("unable to set up device queue for new table.");
goto err_unlock_md_type;
}
- } else if (dm_get_md_type(md) != dm_table_get_type(t)) {
+ } else if (!is_valid_type(dm_get_md_type(md), dm_table_get_type(t))) {
DMWARN("can't change device type after initial table load.");
r = -EINVAL;
goto err_unlock_md_type;
@@ -1670,8 +1679,7 @@ static int check_version(unsigned int cmd, struct dm_ioctl __user *user)
return r;
}
-#define DM_PARAMS_KMALLOC 0x0001 /* Params alloced with kmalloc */
-#define DM_PARAMS_VMALLOC 0x0002 /* Params alloced with vmalloc */
+#define DM_PARAMS_MALLOC 0x0001 /* Params allocated with kvmalloc() */
#define DM_WIPE_BUFFER 0x0010 /* Wipe input buffer before returning from ioctl */
static void free_params(struct dm_ioctl *param, size_t param_size, int param_flags)
@@ -1679,10 +1687,8 @@ static void free_params(struct dm_ioctl *param, size_t param_size, int param_fla
if (param_flags & DM_WIPE_BUFFER)
memset(param, 0, param_size);
- if (param_flags & DM_PARAMS_KMALLOC)
- kfree(param);
- if (param_flags & DM_PARAMS_VMALLOC)
- vfree(param);
+ if (param_flags & DM_PARAMS_MALLOC)
+ kvfree(param);
}
static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kernel,
@@ -1714,19 +1720,14 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
* Use kmalloc() rather than vmalloc() when we can.
*/
dmi = NULL;
- if (param_kernel->data_size <= KMALLOC_MAX_SIZE) {
+ if (param_kernel->data_size <= KMALLOC_MAX_SIZE)
dmi = kmalloc(param_kernel->data_size, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
- if (dmi)
- *param_flags |= DM_PARAMS_KMALLOC;
- }
if (!dmi) {
unsigned noio_flag;
noio_flag = memalloc_noio_save();
dmi = __vmalloc(param_kernel->data_size, GFP_NOIO | __GFP_HIGH | __GFP_HIGHMEM, PAGE_KERNEL);
memalloc_noio_restore(noio_flag);
- if (dmi)
- *param_flags |= DM_PARAMS_VMALLOC;
}
if (!dmi) {
@@ -1735,6 +1736,8 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
return -ENOMEM;
}
+ *param_flags |= DM_PARAMS_MALLOC;
+
if (copy_from_user(dmi, user, param_kernel->data_size))
goto bad;
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 1452ed9aa..9e9d04cb7 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -26,7 +26,7 @@
#include <linux/device-mapper.h>
#include <linux/dm-kcopyd.h>
-#include "dm.h"
+#include "dm-core.h"
#define SUB_JOB_SIZE 128
#define SPLIT_COUNT 8
@@ -465,7 +465,7 @@ static void complete_io(unsigned long error, void *context)
io_job_finish(kc->throttle);
if (error) {
- if (job->rw & WRITE)
+ if (op_is_write(job->rw))
job->write_err |= error;
else
job->read_err = 1;
@@ -477,7 +477,7 @@ static void complete_io(unsigned long error, void *context)
}
}
- if (job->rw & WRITE)
+ if (op_is_write(job->rw))
push(&kc->complete_jobs, job);
else {
@@ -496,7 +496,8 @@ static int run_io_job(struct kcopyd_job *job)
{
int r;
struct dm_io_request io_req = {
- .bi_rw = job->rw,
+ .bi_op = job->rw,
+ .bi_op_flags = 0,
.mem.type = DM_IO_PAGE_LIST,
.mem.ptr.pl = job->pages,
.mem.offset = 0,
@@ -550,7 +551,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
if (r < 0) {
/* error this rogue job */
- if (job->rw & WRITE)
+ if (op_is_write(job->rw))
job->write_err = (unsigned long) -1L;
else
job->read_err = 1;
@@ -734,7 +735,7 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
/*
* Use WRITE SAME to optimize zeroing if all dests support it.
*/
- job->rw = WRITE | REQ_WRITE_SAME;
+ job->rw = REQ_OP_WRITE_SAME;
for (i = 0; i < job->num_dests; i++)
if (!bdev_write_same(job->dests[i].bdev)) {
job->rw = WRITE;
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 05c35aacb..4788b0b98 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -141,9 +141,27 @@ static int linear_iterate_devices(struct dm_target *ti,
return fn(ti, lc->dev, lc->start, ti->len, data);
}
+static long linear_direct_access(struct dm_target *ti, sector_t sector,
+ void **kaddr, pfn_t *pfn, long size)
+{
+ struct linear_c *lc = ti->private;
+ struct block_device *bdev = lc->dev->bdev;
+ struct blk_dax_ctl dax = {
+ .sector = linear_map_sector(ti, sector),
+ .size = size,
+ };
+ long ret;
+
+ ret = bdev_direct_access(bdev, &dax);
+ *kaddr = dax.addr;
+ *pfn = dax.pfn;
+
+ return ret;
+}
+
static struct target_type linear_target = {
.name = "linear",
- .version = {1, 2, 1},
+ .version = {1, 3, 0},
.module = THIS_MODULE,
.ctr = linear_ctr,
.dtr = linear_dtr,
@@ -151,6 +169,7 @@ static struct target_type linear_target = {
.status = linear_status,
.prepare_ioctl = linear_prepare_ioctl,
.iterate_devices = linear_iterate_devices,
+ .direct_access = linear_direct_access,
};
int __init dm_linear_init(void)
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index d8f8cc85f..49e4d8d45 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -205,6 +205,7 @@ static int write_metadata(struct log_writes_c *lc, void *entry,
bio->bi_bdev = lc->logdev->bdev;
bio->bi_end_io = log_end_io;
bio->bi_private = lc;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
page = alloc_page(GFP_KERNEL);
if (!page) {
@@ -226,7 +227,7 @@ static int write_metadata(struct log_writes_c *lc, void *entry,
DMERR("Couldn't add page to the log block");
goto error_bio;
}
- submit_bio(WRITE, bio);
+ submit_bio(bio);
return 0;
error_bio:
bio_put(bio);
@@ -259,7 +260,7 @@ static int log_one_block(struct log_writes_c *lc,
sector++;
atomic_inc(&lc->io_blocks);
- bio = bio_alloc(GFP_KERNEL, block->vec_cnt);
+ bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt, BIO_MAX_PAGES));
if (!bio) {
DMERR("Couldn't alloc log bio");
goto error;
@@ -269,6 +270,7 @@ static int log_one_block(struct log_writes_c *lc,
bio->bi_bdev = lc->logdev->bdev;
bio->bi_end_io = log_end_io;
bio->bi_private = lc;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
for (i = 0; i < block->vec_cnt; i++) {
/*
@@ -279,8 +281,8 @@ static int log_one_block(struct log_writes_c *lc,
block->vecs[i].bv_len, 0);
if (ret != block->vecs[i].bv_len) {
atomic_inc(&lc->io_blocks);
- submit_bio(WRITE, bio);
- bio = bio_alloc(GFP_KERNEL, block->vec_cnt - i);
+ submit_bio(bio);
+ bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt - i, BIO_MAX_PAGES));
if (!bio) {
DMERR("Couldn't alloc log bio");
goto error;
@@ -290,6 +292,7 @@ static int log_one_block(struct log_writes_c *lc,
bio->bi_bdev = lc->logdev->bdev;
bio->bi_end_io = log_end_io;
bio->bi_private = lc;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
ret = bio_add_page(bio, block->vecs[i].bv_page,
block->vecs[i].bv_len, 0);
@@ -301,7 +304,7 @@ static int log_one_block(struct log_writes_c *lc,
}
sector += block->vecs[i].bv_len >> SECTOR_SHIFT;
}
- submit_bio(WRITE, bio);
+ submit_bio(bio);
out:
kfree(block->data);
kfree(block);
@@ -552,9 +555,9 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio)
struct bio_vec bv;
size_t alloc_size;
int i = 0;
- bool flush_bio = (bio->bi_rw & REQ_FLUSH);
- bool fua_bio = (bio->bi_rw & REQ_FUA);
- bool discard_bio = (bio->bi_rw & REQ_DISCARD);
+ bool flush_bio = (bio->bi_opf & REQ_PREFLUSH);
+ bool fua_bio = (bio->bi_opf & REQ_FUA);
+ bool discard_bio = (bio_op(bio) == REQ_OP_DISCARD);
pb->block = NULL;
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 627d19186..07fc1ad42 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -291,9 +291,10 @@ static void header_from_disk(struct log_header_core *core, struct log_header_dis
core->nr_regions = le64_to_cpu(disk->nr_regions);
}
-static int rw_header(struct log_c *lc, int rw)
+static int rw_header(struct log_c *lc, int op)
{
- lc->io_req.bi_rw = rw;
+ lc->io_req.bi_op = op;
+ lc->io_req.bi_op_flags = 0;
return dm_io(&lc->io_req, 1, &lc->header_location, NULL);
}
@@ -306,7 +307,8 @@ static int flush_header(struct log_c *lc)
.count = 0,
};
- lc->io_req.bi_rw = WRITE_FLUSH;
+ lc->io_req.bi_op = REQ_OP_WRITE;
+ lc->io_req.bi_op_flags = WRITE_FLUSH;
return dm_io(&lc->io_req, 1, &null_location, NULL);
}
@@ -315,7 +317,7 @@ static int read_header(struct log_c *log)
{
int r;
- r = rw_header(log, READ);
+ r = rw_header(log, REQ_OP_READ);
if (r)
return r;
@@ -629,7 +631,7 @@ static int disk_resume(struct dm_dirty_log *log)
header_to_disk(&lc->header, lc->disk_header);
/* write the new header */
- r = rw_header(lc, WRITE);
+ r = rw_header(lc, REQ_OP_WRITE);
if (!r) {
r = flush_header(lc);
if (r)
@@ -697,7 +699,7 @@ static int disk_flush(struct dm_dirty_log *log)
log_clear_bit(lc, lc->clean_bits, i);
}
- r = rw_header(lc, WRITE);
+ r = rw_header(lc, REQ_OP_WRITE);
if (r)
fail_log_device(lc);
else {
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 52baf8a5b..ac734e5bb 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -7,7 +7,8 @@
#include <linux/device-mapper.h>
-#include "dm.h"
+#include "dm-rq.h"
+#include "dm-bio-record.h"
#include "dm-path-selector.h"
#include "dm-uevent.h"
@@ -89,6 +90,8 @@ struct multipath {
atomic_t pg_init_in_progress; /* Only one pg_init allowed at once */
atomic_t pg_init_count; /* Number of times pg_init called */
+ unsigned queue_mode;
+
/*
* We must use a mempool of dm_mpath_io structs so that we
* can resubmit bios on error.
@@ -97,10 +100,13 @@ struct multipath {
struct mutex work_mutex;
struct work_struct trigger_event;
+
+ struct work_struct process_queued_bios;
+ struct bio_list queued_bios;
};
/*
- * Context information attached to each bio we process.
+ * Context information attached to each io we process.
*/
struct dm_mpath_io {
struct pgpath *pgpath;
@@ -114,6 +120,7 @@ static struct kmem_cache *_mpio_cache;
static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
static void trigger_event(struct work_struct *work);
static void activate_path(struct work_struct *work);
+static void process_queued_bios(struct work_struct *work);
/*-----------------------------------------------
* Multipath state flags.
@@ -185,7 +192,7 @@ static void free_priority_group(struct priority_group *pg,
kfree(pg);
}
-static struct multipath *alloc_multipath(struct dm_target *ti, bool use_blk_mq)
+static struct multipath *alloc_multipath(struct dm_target *ti)
{
struct multipath *m;
@@ -203,15 +210,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti, bool use_blk_mq)
mutex_init(&m->work_mutex);
m->mpio_pool = NULL;
- if (!use_blk_mq) {
- unsigned min_ios = dm_get_reserved_rq_based_ios();
-
- m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
- if (!m->mpio_pool) {
- kfree(m);
- return NULL;
- }
- }
+ m->queue_mode = DM_TYPE_NONE;
m->ti = ti;
ti->private = m;
@@ -220,6 +219,39 @@ static struct multipath *alloc_multipath(struct dm_target *ti, bool use_blk_mq)
return m;
}
+static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
+{
+ if (m->queue_mode == DM_TYPE_NONE) {
+ /*
+ * Default to request-based.
+ */
+ if (dm_use_blk_mq(dm_table_get_md(ti->table)))
+ m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
+ else
+ m->queue_mode = DM_TYPE_REQUEST_BASED;
+ }
+
+ if (m->queue_mode == DM_TYPE_REQUEST_BASED) {
+ unsigned min_ios = dm_get_reserved_rq_based_ios();
+
+ m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
+ if (!m->mpio_pool)
+ return -ENOMEM;
+ }
+ else if (m->queue_mode == DM_TYPE_BIO_BASED) {
+ INIT_WORK(&m->process_queued_bios, process_queued_bios);
+ /*
+ * bio-based doesn't support any direct scsi_dh management;
+ * it just discovers if a scsi_dh is attached.
+ */
+ set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
+ }
+
+ dm_table_set_type(ti->table, m->queue_mode);
+
+ return 0;
+}
+
static void free_multipath(struct multipath *m)
{
struct priority_group *pg, *tmp;
@@ -272,6 +304,41 @@ static void clear_request_fn_mpio(struct multipath *m, union map_info *info)
}
}
+static size_t multipath_per_bio_data_size(void)
+{
+ return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details);
+}
+
+static struct dm_mpath_io *get_mpio_from_bio(struct bio *bio)
+{
+ return dm_per_bio_data(bio, multipath_per_bio_data_size());
+}
+
+static struct dm_bio_details *get_bio_details_from_bio(struct bio *bio)
+{
+ /* dm_bio_details is immediately after the dm_mpath_io in bio's per-bio-data */
+ struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
+ void *bio_details = mpio + 1;
+
+ return bio_details;
+}
+
+static void multipath_init_per_bio_data(struct bio *bio, struct dm_mpath_io **mpio_p,
+ struct dm_bio_details **bio_details_p)
+{
+ struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
+ struct dm_bio_details *bio_details = get_bio_details_from_bio(bio);
+
+ memset(mpio, 0, sizeof(*mpio));
+ memset(bio_details, 0, sizeof(*bio_details));
+ dm_bio_record(bio_details, bio);
+
+ if (mpio_p)
+ *mpio_p = mpio;
+ if (bio_details_p)
+ *bio_details_p = bio_details;
+}
+
/*-----------------------------------------------
* Path selection
*-----------------------------------------------*/
@@ -431,16 +498,40 @@ failed:
* and multipath_resume() calls and we have no need to check
* for the DMF_NOFLUSH_SUSPENDING flag.
*/
-static int must_push_back(struct multipath *m)
+static bool __must_push_back(struct multipath *m)
{
- return (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) ||
- ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) !=
- test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) &&
- dm_noflush_suspending(m->ti)));
+ return ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) !=
+ test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) &&
+ dm_noflush_suspending(m->ti));
+}
+
+static bool must_push_back_rq(struct multipath *m)
+{
+ bool r;
+ unsigned long flags;
+
+ spin_lock_irqsave(&m->lock, flags);
+ r = (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) ||
+ __must_push_back(m));
+ spin_unlock_irqrestore(&m->lock, flags);
+
+ return r;
+}
+
+static bool must_push_back_bio(struct multipath *m)
+{
+ bool r;
+ unsigned long flags;
+
+ spin_lock_irqsave(&m->lock, flags);
+ r = __must_push_back(m);
+ spin_unlock_irqrestore(&m->lock, flags);
+
+ return r;
}
/*
- * Map cloned requests
+ * Map cloned requests (request-based multipath)
*/
static int __multipath_map(struct dm_target *ti, struct request *clone,
union map_info *map_context,
@@ -459,7 +550,7 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
pgpath = choose_pgpath(m, nr_bytes);
if (!pgpath) {
- if (!must_push_back(m))
+ if (!must_push_back_rq(m))
r = -EIO; /* Failed */
return r;
} else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
@@ -530,6 +621,108 @@ static void multipath_release_clone(struct request *clone)
}
/*
+ * Map cloned bios (bio-based multipath)
+ */
+static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_mpath_io *mpio)
+{
+ size_t nr_bytes = bio->bi_iter.bi_size;
+ struct pgpath *pgpath;
+ unsigned long flags;
+ bool queue_io;
+
+ /* Do we need to select a new pgpath? */
+ pgpath = lockless_dereference(m->current_pgpath);
+ queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
+ if (!pgpath || !queue_io)
+ pgpath = choose_pgpath(m, nr_bytes);
+
+ if ((pgpath && queue_io) ||
+ (!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) {
+ /* Queue for the daemon to resubmit */
+ spin_lock_irqsave(&m->lock, flags);
+ bio_list_add(&m->queued_bios, bio);
+ spin_unlock_irqrestore(&m->lock, flags);
+ /* PG_INIT_REQUIRED cannot be set without QUEUE_IO */
+ if (queue_io || test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
+ pg_init_all_paths(m);
+ else if (!queue_io)
+ queue_work(kmultipathd, &m->process_queued_bios);
+ return DM_MAPIO_SUBMITTED;
+ }
+
+ if (!pgpath) {
+ if (!must_push_back_bio(m))
+ return -EIO;
+ return DM_MAPIO_REQUEUE;
+ }
+
+ mpio->pgpath = pgpath;
+ mpio->nr_bytes = nr_bytes;
+
+ bio->bi_error = 0;
+ bio->bi_bdev = pgpath->path.dev->bdev;
+ bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
+
+ if (pgpath->pg->ps.type->start_io)
+ pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
+ &pgpath->path,
+ nr_bytes);
+ return DM_MAPIO_REMAPPED;
+}
+
+static int multipath_map_bio(struct dm_target *ti, struct bio *bio)
+{
+ struct multipath *m = ti->private;
+ struct dm_mpath_io *mpio = NULL;
+
+ multipath_init_per_bio_data(bio, &mpio, NULL);
+
+ return __multipath_map_bio(m, bio, mpio);
+}
+
+static void process_queued_bios_list(struct multipath *m)
+{
+ if (m->queue_mode == DM_TYPE_BIO_BASED)
+ queue_work(kmultipathd, &m->process_queued_bios);
+}
+
+static void process_queued_bios(struct work_struct *work)
+{
+ int r;
+ unsigned long flags;
+ struct bio *bio;
+ struct bio_list bios;
+ struct blk_plug plug;
+ struct multipath *m =
+ container_of(work, struct multipath, process_queued_bios);
+
+ bio_list_init(&bios);
+
+ spin_lock_irqsave(&m->lock, flags);
+
+ if (bio_list_empty(&m->queued_bios)) {
+ spin_unlock_irqrestore(&m->lock, flags);
+ return;
+ }
+
+ bio_list_merge(&bios, &m->queued_bios);
+ bio_list_init(&m->queued_bios);
+
+ spin_unlock_irqrestore(&m->lock, flags);
+
+ blk_start_plug(&plug);
+ while ((bio = bio_list_pop(&bios))) {
+ r = __multipath_map_bio(m, bio, get_mpio_from_bio(bio));
+ if (r < 0 || r == DM_MAPIO_REQUEUE) {
+ bio->bi_error = r;
+ bio_endio(bio);
+ } else if (r == DM_MAPIO_REMAPPED)
+ generic_make_request(bio);
+ }
+ blk_finish_plug(&plug);
+}
+
+/*
* If we run out of usable paths, should we queue I/O or error it?
*/
static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
@@ -557,8 +750,10 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
spin_unlock_irqrestore(&m->lock, flags);
- if (!queue_if_no_path)
+ if (!queue_if_no_path) {
dm_table_run_md_queue_async(m->ti->table);
+ process_queued_bios_list(m);
+ }
return 0;
}
@@ -798,6 +993,12 @@ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
if (!hw_argc)
return 0;
+ if (m->queue_mode == DM_TYPE_BIO_BASED) {
+ dm_consume_args(as, hw_argc);
+ DMERR("bio-based multipath doesn't allow hardware handler args");
+ return 0;
+ }
+
m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
if (hw_argc > 1) {
@@ -833,7 +1034,7 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m)
const char *arg_name;
static struct dm_arg _args[] = {
- {0, 6, "invalid number of feature args"},
+ {0, 8, "invalid number of feature args"},
{1, 50, "pg_init_retries must be between 1 and 50"},
{0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
};
@@ -873,6 +1074,24 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m)
continue;
}
+ if (!strcasecmp(arg_name, "queue_mode") &&
+ (argc >= 1)) {
+ const char *queue_mode_name = dm_shift_arg(as);
+
+ if (!strcasecmp(queue_mode_name, "bio"))
+ m->queue_mode = DM_TYPE_BIO_BASED;
+ else if (!strcasecmp(queue_mode_name, "rq"))
+ m->queue_mode = DM_TYPE_REQUEST_BASED;
+ else if (!strcasecmp(queue_mode_name, "mq"))
+ m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
+ else {
+ ti->error = "Unknown 'queue_mode' requested";
+ r = -EINVAL;
+ }
+ argc--;
+ continue;
+ }
+
ti->error = "Unrecognised multipath feature request";
r = -EINVAL;
} while (argc && !r);
@@ -880,8 +1099,7 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m)
return r;
}
-static int multipath_ctr(struct dm_target *ti, unsigned int argc,
- char **argv)
+static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
{
/* target arguments */
static struct dm_arg _args[] = {
@@ -894,12 +1112,11 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
struct dm_arg_set as;
unsigned pg_count = 0;
unsigned next_pg_num;
- bool use_blk_mq = dm_use_blk_mq(dm_table_get_md(ti->table));
as.argc = argc;
as.argv = argv;
- m = alloc_multipath(ti, use_blk_mq);
+ m = alloc_multipath(ti);
if (!m) {
ti->error = "can't allocate multipath";
return -EINVAL;
@@ -909,6 +1126,10 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
if (r)
goto bad;
+ r = alloc_multipath_stage2(ti, m);
+ if (r)
+ goto bad;
+
r = parse_hw_handler(&as, m);
if (r)
goto bad;
@@ -958,7 +1179,9 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
ti->num_flush_bios = 1;
ti->num_discard_bios = 1;
ti->num_write_same_bios = 1;
- if (use_blk_mq)
+ if (m->queue_mode == DM_TYPE_BIO_BASED)
+ ti->per_io_data_size = multipath_per_bio_data_size();
+ else if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED)
ti->per_io_data_size = sizeof(struct dm_mpath_io);
return 0;
@@ -1083,8 +1306,10 @@ static int reinstate_path(struct pgpath *pgpath)
out:
spin_unlock_irqrestore(&m->lock, flags);
- if (run_queue)
+ if (run_queue) {
dm_table_run_md_queue_async(m->ti->table);
+ process_queued_bios_list(m);
+ }
return r;
}
@@ -1281,6 +1506,8 @@ static void pg_init_done(void *data, int errors)
}
clear_bit(MPATHF_QUEUE_IO, &m->flags);
+ process_queued_bios_list(m);
+
/*
* Wake up any thread waiting to suspend.
*/
@@ -1328,7 +1555,7 @@ static int do_end_io(struct multipath *m, struct request *clone,
* during end I/O handling, since those clone requests don't have
* bio clones. If we queue them inside the multipath target,
* we need to make bio clones, that requires memory allocation.
- * (See drivers/md/dm.c:end_clone_bio() about why the clone requests
+ * (See drivers/md/dm-rq.c:end_clone_bio() about why the clone requests
* don't have bio clones.)
* Instead of queueing the clone request here, we queue the original
* request into dm core, which will remake a clone request and
@@ -1347,7 +1574,7 @@ static int do_end_io(struct multipath *m, struct request *clone,
if (!atomic_read(&m->nr_valid_paths)) {
if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
- if (!must_push_back(m))
+ if (!must_push_back_rq(m))
r = -EIO;
} else {
if (error == -EBADE)
@@ -1381,6 +1608,64 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
return r;
}
+static int do_end_io_bio(struct multipath *m, struct bio *clone,
+ int error, struct dm_mpath_io *mpio)
+{
+ unsigned long flags;
+
+ if (!error)
+ return 0; /* I/O complete */
+
+ if (noretry_error(error))
+ return error;
+
+ if (mpio->pgpath)
+ fail_path(mpio->pgpath);
+
+ if (!atomic_read(&m->nr_valid_paths)) {
+ if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
+ if (!must_push_back_bio(m))
+ return -EIO;
+ return DM_ENDIO_REQUEUE;
+ } else {
+ if (error == -EBADE)
+ return error;
+ }
+ }
+
+ /* Queue for the daemon to resubmit */
+ dm_bio_restore(get_bio_details_from_bio(clone), clone);
+
+ spin_lock_irqsave(&m->lock, flags);
+ bio_list_add(&m->queued_bios, clone);
+ spin_unlock_irqrestore(&m->lock, flags);
+ if (!test_bit(MPATHF_QUEUE_IO, &m->flags))
+ queue_work(kmultipathd, &m->process_queued_bios);
+
+ return DM_ENDIO_INCOMPLETE;
+}
+
+static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone, int error)
+{
+ struct multipath *m = ti->private;
+ struct dm_mpath_io *mpio = get_mpio_from_bio(clone);
+ struct pgpath *pgpath;
+ struct path_selector *ps;
+ int r;
+
+ BUG_ON(!mpio);
+
+ r = do_end_io_bio(m, clone, error, mpio);
+ pgpath = mpio->pgpath;
+ if (pgpath) {
+ ps = &pgpath->pg->ps;
+ if (ps->type->end_io)
+ ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
+ }
+
+ return r;
+}
+
/*
* Suspend can't complete until all the I/O is processed so if
* the last path fails we must error any remaining I/O.
@@ -1409,12 +1694,14 @@ static void multipath_postsuspend(struct dm_target *ti)
static void multipath_resume(struct dm_target *ti)
{
struct multipath *m = ti->private;
+ unsigned long flags;
+ spin_lock_irqsave(&m->lock, flags);
if (test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags))
set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
else
clear_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
- smp_mb__after_atomic();
+ spin_unlock_irqrestore(&m->lock, flags);
}
/*
@@ -1454,7 +1741,9 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) +
(m->pg_init_retries > 0) * 2 +
(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
- test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags));
+ test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) +
+ (m->queue_mode != DM_TYPE_REQUEST_BASED) * 2);
+
if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
DMEMIT("queue_if_no_path ");
if (m->pg_init_retries)
@@ -1463,6 +1752,16 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags))
DMEMIT("retain_attached_hw_handler ");
+ if (m->queue_mode != DM_TYPE_REQUEST_BASED) {
+ switch(m->queue_mode) {
+ case DM_TYPE_BIO_BASED:
+ DMEMIT("queue_mode bio ");
+ break;
+ case DM_TYPE_MQ_REQUEST_BASED:
+ DMEMIT("queue_mode mq ");
+ break;
+ }
+ }
}
if (!m->hw_handler_name || type == STATUSTYPE_INFO)
@@ -1642,6 +1941,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
pg_init_all_paths(m);
dm_table_run_md_queue_async(m->ti->table);
+ process_queued_bios_list(m);
}
/*
@@ -1748,7 +2048,7 @@ static int multipath_busy(struct dm_target *ti)
*---------------------------------------------------------------*/
static struct target_type multipath_target = {
.name = "multipath",
- .version = {1, 11, 0},
+ .version = {1, 12, 0},
.features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE,
.module = THIS_MODULE,
.ctr = multipath_ctr,
@@ -1757,6 +2057,8 @@ static struct target_type multipath_target = {
.clone_and_map_rq = multipath_clone_and_map,
.release_clone_rq = multipath_release_clone,
.rq_end_io = multipath_end_io,
+ .map = multipath_map_bio,
+ .end_io = multipath_end_io_bio,
.presuspend = multipath_presuspend,
.postsuspend = multipath_postsuspend,
.resume = multipath_resume,
@@ -1771,14 +2073,14 @@ static int __init dm_multipath_init(void)
{
int r;
- /* allocate a slab for the dm_ios */
+ /* allocate a slab for the dm_mpath_ios */
_mpio_cache = KMEM_CACHE(dm_mpath_io, 0);
if (!_mpio_cache)
return -ENOMEM;
r = dm_register_target(&multipath_target);
if (r < 0) {
- DMERR("register failed %d", r);
+ DMERR("request-based register failed %d", r);
r = -EINVAL;
goto bad_register_target;
}
@@ -1804,10 +2106,6 @@ static int __init dm_multipath_init(void)
goto bad_alloc_kmpath_handlerd;
}
- DMINFO("version %u.%u.%u loaded",
- multipath_target.version[0], multipath_target.version[1],
- multipath_target.version[2]);
-
return 0;
bad_alloc_kmpath_handlerd:
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 52532745a..8abde6b8c 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2010-2011 Neil Brown
- * Copyright (C) 2010-2015 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2010-2016 Red Hat, Inc. All rights reserved.
*
* This file is released under the GPL.
*/
@@ -17,7 +17,12 @@
#include <linux/device-mapper.h>
#define DM_MSG_PREFIX "raid"
-#define MAX_RAID_DEVICES 253 /* raid4/5/6 limit */
+#define MAX_RAID_DEVICES 253 /* md-raid kernel limit */
+
+/*
+ * Minimum sectors of free reshape space per raid device
+ */
+#define MIN_FREE_RESHAPE_SPACE to_sector(4*4096)
static bool devices_handle_discard_safely = false;
@@ -25,12 +30,12 @@ static bool devices_handle_discard_safely = false;
* The following flags are used by dm-raid.c to set up the array state.
* They must be cleared before md_run is called.
*/
-#define FirstUse 10 /* rdev flag */
+#define FirstUse 10 /* rdev flag */
struct raid_dev {
/*
* Two DM devices, one to hold metadata and one to hold the
- * actual data/parity. The reason for this is to not confuse
+ * actual data/parity. The reason for this is to not confuse
* ti->len and give more flexibility in altering size and
* characteristics.
*
@@ -46,25 +51,174 @@ struct raid_dev {
};
/*
+ * Bits for establishing rs->ctr_flags
+ *
+ * 1 = no flag value
+ * 2 = flag with value
+ */
+#define __CTR_FLAG_SYNC 0 /* 1 */ /* Not with raid0! */
+#define __CTR_FLAG_NOSYNC 1 /* 1 */ /* Not with raid0! */
+#define __CTR_FLAG_REBUILD 2 /* 2 */ /* Not with raid0! */
+#define __CTR_FLAG_DAEMON_SLEEP 3 /* 2 */ /* Not with raid0! */
+#define __CTR_FLAG_MIN_RECOVERY_RATE 4 /* 2 */ /* Not with raid0! */
+#define __CTR_FLAG_MAX_RECOVERY_RATE 5 /* 2 */ /* Not with raid0! */
+#define __CTR_FLAG_MAX_WRITE_BEHIND 6 /* 2 */ /* Only with raid1! */
+#define __CTR_FLAG_WRITE_MOSTLY 7 /* 2 */ /* Only with raid1! */
+#define __CTR_FLAG_STRIPE_CACHE 8 /* 2 */ /* Only with raid4/5/6! */
+#define __CTR_FLAG_REGION_SIZE 9 /* 2 */ /* Not with raid0! */
+#define __CTR_FLAG_RAID10_COPIES 10 /* 2 */ /* Only with raid10 */
+#define __CTR_FLAG_RAID10_FORMAT 11 /* 2 */ /* Only with raid10 */
+/* New for v1.9.0 */
+#define __CTR_FLAG_DELTA_DISKS 12 /* 2 */ /* Only with reshapable raid1/4/5/6/10! */
+#define __CTR_FLAG_DATA_OFFSET 13 /* 2 */ /* Only with reshapable raid4/5/6/10! */
+#define __CTR_FLAG_RAID10_USE_NEAR_SETS 14 /* 2 */ /* Only with raid10! */
+
+/*
* Flags for rs->ctr_flags field.
*/
-#define CTR_FLAG_SYNC 0x1
-#define CTR_FLAG_NOSYNC 0x2
-#define CTR_FLAG_REBUILD 0x4
-#define CTR_FLAG_DAEMON_SLEEP 0x8
-#define CTR_FLAG_MIN_RECOVERY_RATE 0x10
-#define CTR_FLAG_MAX_RECOVERY_RATE 0x20
-#define CTR_FLAG_MAX_WRITE_BEHIND 0x40
-#define CTR_FLAG_STRIPE_CACHE 0x80
-#define CTR_FLAG_REGION_SIZE 0x100
-#define CTR_FLAG_RAID10_COPIES 0x200
-#define CTR_FLAG_RAID10_FORMAT 0x400
+#define CTR_FLAG_SYNC (1 << __CTR_FLAG_SYNC)
+#define CTR_FLAG_NOSYNC (1 << __CTR_FLAG_NOSYNC)
+#define CTR_FLAG_REBUILD (1 << __CTR_FLAG_REBUILD)
+#define CTR_FLAG_DAEMON_SLEEP (1 << __CTR_FLAG_DAEMON_SLEEP)
+#define CTR_FLAG_MIN_RECOVERY_RATE (1 << __CTR_FLAG_MIN_RECOVERY_RATE)
+#define CTR_FLAG_MAX_RECOVERY_RATE (1 << __CTR_FLAG_MAX_RECOVERY_RATE)
+#define CTR_FLAG_MAX_WRITE_BEHIND (1 << __CTR_FLAG_MAX_WRITE_BEHIND)
+#define CTR_FLAG_WRITE_MOSTLY (1 << __CTR_FLAG_WRITE_MOSTLY)
+#define CTR_FLAG_STRIPE_CACHE (1 << __CTR_FLAG_STRIPE_CACHE)
+#define CTR_FLAG_REGION_SIZE (1 << __CTR_FLAG_REGION_SIZE)
+#define CTR_FLAG_RAID10_COPIES (1 << __CTR_FLAG_RAID10_COPIES)
+#define CTR_FLAG_RAID10_FORMAT (1 << __CTR_FLAG_RAID10_FORMAT)
+#define CTR_FLAG_DELTA_DISKS (1 << __CTR_FLAG_DELTA_DISKS)
+#define CTR_FLAG_DATA_OFFSET (1 << __CTR_FLAG_DATA_OFFSET)
+#define CTR_FLAG_RAID10_USE_NEAR_SETS (1 << __CTR_FLAG_RAID10_USE_NEAR_SETS)
+
+/*
+ * Definitions of various constructor flags to
+ * be used in checks of valid / invalid flags
+ * per raid level.
+ */
+/* Define all any sync flags */
+#define CTR_FLAGS_ANY_SYNC (CTR_FLAG_SYNC | CTR_FLAG_NOSYNC)
+
+/* Define flags for options without argument (e.g. 'nosync') */
+#define CTR_FLAG_OPTIONS_NO_ARGS (CTR_FLAGS_ANY_SYNC | \
+ CTR_FLAG_RAID10_USE_NEAR_SETS)
+
+/* Define flags for options with one argument (e.g. 'delta_disks +2') */
+#define CTR_FLAG_OPTIONS_ONE_ARG (CTR_FLAG_REBUILD | \
+ CTR_FLAG_WRITE_MOSTLY | \
+ CTR_FLAG_DAEMON_SLEEP | \
+ CTR_FLAG_MIN_RECOVERY_RATE | \
+ CTR_FLAG_MAX_RECOVERY_RATE | \
+ CTR_FLAG_MAX_WRITE_BEHIND | \
+ CTR_FLAG_STRIPE_CACHE | \
+ CTR_FLAG_REGION_SIZE | \
+ CTR_FLAG_RAID10_COPIES | \
+ CTR_FLAG_RAID10_FORMAT | \
+ CTR_FLAG_DELTA_DISKS | \
+ CTR_FLAG_DATA_OFFSET)
+
+/* Valid options definitions per raid level... */
+
+/* "raid0" does only accept data offset */
+#define RAID0_VALID_FLAGS (CTR_FLAG_DATA_OFFSET)
+
+/* "raid1" does not accept stripe cache, data offset, delta_disks or any raid10 options */
+#define RAID1_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \
+ CTR_FLAG_REBUILD | \
+ CTR_FLAG_WRITE_MOSTLY | \
+ CTR_FLAG_DAEMON_SLEEP | \
+ CTR_FLAG_MIN_RECOVERY_RATE | \
+ CTR_FLAG_MAX_RECOVERY_RATE | \
+ CTR_FLAG_MAX_WRITE_BEHIND | \
+ CTR_FLAG_REGION_SIZE | \
+ CTR_FLAG_DELTA_DISKS | \
+ CTR_FLAG_DATA_OFFSET)
+
+/* "raid10" does not accept any raid1 or stripe cache options */
+#define RAID10_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \
+ CTR_FLAG_REBUILD | \
+ CTR_FLAG_DAEMON_SLEEP | \
+ CTR_FLAG_MIN_RECOVERY_RATE | \
+ CTR_FLAG_MAX_RECOVERY_RATE | \
+ CTR_FLAG_REGION_SIZE | \
+ CTR_FLAG_RAID10_COPIES | \
+ CTR_FLAG_RAID10_FORMAT | \
+ CTR_FLAG_DELTA_DISKS | \
+ CTR_FLAG_DATA_OFFSET | \
+ CTR_FLAG_RAID10_USE_NEAR_SETS)
+
+/*
+ * "raid4/5/6" do not accept any raid1 or raid10 specific options
+ *
+ * "raid6" does not accept "nosync", because it is not guaranteed
+ * that both parity and q-syndrome are being written properly with
+ * any writes
+ */
+#define RAID45_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \
+ CTR_FLAG_REBUILD | \
+ CTR_FLAG_DAEMON_SLEEP | \
+ CTR_FLAG_MIN_RECOVERY_RATE | \
+ CTR_FLAG_MAX_RECOVERY_RATE | \
+ CTR_FLAG_MAX_WRITE_BEHIND | \
+ CTR_FLAG_STRIPE_CACHE | \
+ CTR_FLAG_REGION_SIZE | \
+ CTR_FLAG_DELTA_DISKS | \
+ CTR_FLAG_DATA_OFFSET)
+
+#define RAID6_VALID_FLAGS (CTR_FLAG_SYNC | \
+ CTR_FLAG_REBUILD | \
+ CTR_FLAG_DAEMON_SLEEP | \
+ CTR_FLAG_MIN_RECOVERY_RATE | \
+ CTR_FLAG_MAX_RECOVERY_RATE | \
+ CTR_FLAG_MAX_WRITE_BEHIND | \
+ CTR_FLAG_STRIPE_CACHE | \
+ CTR_FLAG_REGION_SIZE | \
+ CTR_FLAG_DELTA_DISKS | \
+ CTR_FLAG_DATA_OFFSET)
+/* ...valid options definitions per raid level */
+
+/*
+ * Flags for rs->runtime_flags field
+ * (RT_FLAG prefix meaning "runtime flag")
+ *
+ * These are all internal and used to define runtime state,
+ * e.g. to prevent another resume from preresume processing
+ * the raid set all over again.
+ */
+#define RT_FLAG_RS_PRERESUMED 0
+#define RT_FLAG_RS_RESUMED 1
+#define RT_FLAG_RS_BITMAP_LOADED 2
+#define RT_FLAG_UPDATE_SBS 3
+#define RT_FLAG_RESHAPE_RS 4
+
+/* Array elements of 64 bit needed for rebuild/failed disk bits */
+#define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8)
+
+/*
+ * raid set level, layout and chunk sectors backup/restore
+ */
+struct rs_layout {
+ int new_level;
+ int new_layout;
+ int new_chunk_sectors;
+};
struct raid_set {
struct dm_target *ti;
uint32_t bitmap_loaded;
- uint32_t ctr_flags;
+ uint32_t stripe_cache_entries;
+ unsigned long ctr_flags;
+ unsigned long runtime_flags;
+
+ uint64_t rebuild_disks[DISKS_ARRAY_ELEMS];
+
+ int raid_disks;
+ int delta_disks;
+ int data_offset;
+ int raid10_copies;
+ int requested_bitmap_chunk_sectors;
struct mddev md;
struct raid_type *raid_type;
@@ -73,82 +227,446 @@ struct raid_set {
struct raid_dev dev[0];
};
+static void rs_config_backup(struct raid_set *rs, struct rs_layout *l)
+{
+ struct mddev *mddev = &rs->md;
+
+ l->new_level = mddev->new_level;
+ l->new_layout = mddev->new_layout;
+ l->new_chunk_sectors = mddev->new_chunk_sectors;
+}
+
+static void rs_config_restore(struct raid_set *rs, struct rs_layout *l)
+{
+ struct mddev *mddev = &rs->md;
+
+ mddev->new_level = l->new_level;
+ mddev->new_layout = l->new_layout;
+ mddev->new_chunk_sectors = l->new_chunk_sectors;
+}
+
+/* raid10 algorithms (i.e. formats) */
+#define ALGORITHM_RAID10_DEFAULT 0
+#define ALGORITHM_RAID10_NEAR 1
+#define ALGORITHM_RAID10_OFFSET 2
+#define ALGORITHM_RAID10_FAR 3
+
/* Supported raid types and properties. */
static struct raid_type {
const char *name; /* RAID algorithm. */
const char *descr; /* Descriptor text for logging. */
- const unsigned parity_devs; /* # of parity devices. */
- const unsigned minimal_devs; /* minimal # of devices in set. */
- const unsigned level; /* RAID level. */
- const unsigned algorithm; /* RAID algorithm. */
+ const unsigned int parity_devs; /* # of parity devices. */
+ const unsigned int minimal_devs;/* minimal # of devices in set. */
+ const unsigned int level; /* RAID level. */
+ const unsigned int algorithm; /* RAID algorithm. */
} raid_types[] = {
- {"raid0", "RAID0 (striping)", 0, 2, 0, 0 /* NONE */},
- {"raid1", "RAID1 (mirroring)", 0, 2, 1, 0 /* NONE */},
- {"raid10", "RAID10 (striped mirrors)", 0, 2, 10, UINT_MAX /* Varies */},
- {"raid4", "RAID4 (dedicated parity disk)", 1, 2, 5, ALGORITHM_PARITY_0},
- {"raid5_la", "RAID5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC},
- {"raid5_ra", "RAID5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC},
- {"raid5_ls", "RAID5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC},
- {"raid5_rs", "RAID5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC},
- {"raid6_zr", "RAID6 (zero restart)", 2, 4, 6, ALGORITHM_ROTATING_ZERO_RESTART},
- {"raid6_nr", "RAID6 (N restart)", 2, 4, 6, ALGORITHM_ROTATING_N_RESTART},
- {"raid6_nc", "RAID6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE}
+ {"raid0", "raid0 (striping)", 0, 2, 0, 0 /* NONE */},
+ {"raid1", "raid1 (mirroring)", 0, 2, 1, 0 /* NONE */},
+ {"raid10_far", "raid10 far (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_FAR},
+ {"raid10_offset", "raid10 offset (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_OFFSET},
+ {"raid10_near", "raid10 near (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_NEAR},
+ {"raid10", "raid10 (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_DEFAULT},
+ {"raid4", "raid4 (dedicated last parity disk)", 1, 2, 4, ALGORITHM_PARITY_N}, /* raid4 layout = raid5_n */
+ {"raid5_n", "raid5 (dedicated last parity disk)", 1, 2, 5, ALGORITHM_PARITY_N},
+ {"raid5_ls", "raid5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC},
+ {"raid5_rs", "raid5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC},
+ {"raid5_la", "raid5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC},
+ {"raid5_ra", "raid5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC},
+ {"raid6_zr", "raid6 (zero restart)", 2, 4, 6, ALGORITHM_ROTATING_ZERO_RESTART},
+ {"raid6_nr", "raid6 (N restart)", 2, 4, 6, ALGORITHM_ROTATING_N_RESTART},
+ {"raid6_nc", "raid6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE},
+ {"raid6_n_6", "raid6 (dedicated parity/Q n/6)", 2, 4, 6, ALGORITHM_PARITY_N_6},
+ {"raid6_ls_6", "raid6 (left symmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_LEFT_SYMMETRIC_6},
+ {"raid6_rs_6", "raid6 (right symmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_RIGHT_SYMMETRIC_6},
+ {"raid6_la_6", "raid6 (left asymmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_LEFT_ASYMMETRIC_6},
+ {"raid6_ra_6", "raid6 (right asymmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_RIGHT_ASYMMETRIC_6}
+};
+
+/* True, if @v is in inclusive range [@min, @max] */
+static bool __within_range(long v, long min, long max)
+{
+ return v >= min && v <= max;
+}
+
+/* All table line arguments are defined here */
+static struct arg_name_flag {
+ const unsigned long flag;
+ const char *name;
+} __arg_name_flags[] = {
+ { CTR_FLAG_SYNC, "sync"},
+ { CTR_FLAG_NOSYNC, "nosync"},
+ { CTR_FLAG_REBUILD, "rebuild"},
+ { CTR_FLAG_DAEMON_SLEEP, "daemon_sleep"},
+ { CTR_FLAG_MIN_RECOVERY_RATE, "min_recovery_rate"},
+ { CTR_FLAG_MAX_RECOVERY_RATE, "max_recovery_rate"},
+ { CTR_FLAG_MAX_WRITE_BEHIND, "max_write_behind"},
+ { CTR_FLAG_WRITE_MOSTLY, "write_mostly"},
+ { CTR_FLAG_STRIPE_CACHE, "stripe_cache"},
+ { CTR_FLAG_REGION_SIZE, "region_size"},
+ { CTR_FLAG_RAID10_COPIES, "raid10_copies"},
+ { CTR_FLAG_RAID10_FORMAT, "raid10_format"},
+ { CTR_FLAG_DATA_OFFSET, "data_offset"},
+ { CTR_FLAG_DELTA_DISKS, "delta_disks"},
+ { CTR_FLAG_RAID10_USE_NEAR_SETS, "raid10_use_near_sets"},
};
-static char *raid10_md_layout_to_format(int layout)
+/* Return argument name string for given @flag */
+static const char *dm_raid_arg_name_by_flag(const uint32_t flag)
+{
+ if (hweight32(flag) == 1) {
+ struct arg_name_flag *anf = __arg_name_flags + ARRAY_SIZE(__arg_name_flags);
+
+ while (anf-- > __arg_name_flags)
+ if (flag & anf->flag)
+ return anf->name;
+
+ } else
+ DMERR("%s called with more than one flag!", __func__);
+
+ return NULL;
+}
+
+/*
+ * Bool helpers to test for various raid levels of a raid set.
+ * It's level as reported by the superblock rather than
+ * the requested raid_type passed to the constructor.
+ */
+/* Return true, if raid set in @rs is raid0 */
+static bool rs_is_raid0(struct raid_set *rs)
+{
+ return !rs->md.level;
+}
+
+/* Return true, if raid set in @rs is raid1 */
+static bool rs_is_raid1(struct raid_set *rs)
+{
+ return rs->md.level == 1;
+}
+
+/* Return true, if raid set in @rs is raid10 */
+static bool rs_is_raid10(struct raid_set *rs)
+{
+ return rs->md.level == 10;
+}
+
+/* Return true, if raid set in @rs is level 6 */
+static bool rs_is_raid6(struct raid_set *rs)
+{
+ return rs->md.level == 6;
+}
+
+/* Return true, if raid set in @rs is level 4, 5 or 6 */
+static bool rs_is_raid456(struct raid_set *rs)
+{
+ return __within_range(rs->md.level, 4, 6);
+}
+
+/* Return true, if raid set in @rs is reshapable */
+static bool __is_raid10_far(int layout);
+static bool rs_is_reshapable(struct raid_set *rs)
+{
+ return rs_is_raid456(rs) ||
+ (rs_is_raid10(rs) && !__is_raid10_far(rs->md.new_layout));
+}
+
+/* Return true, if raid set in @rs is recovering */
+static bool rs_is_recovering(struct raid_set *rs)
+{
+ return rs->md.recovery_cp < rs->dev[0].rdev.sectors;
+}
+
+/* Return true, if raid set in @rs is reshaping */
+static bool rs_is_reshaping(struct raid_set *rs)
+{
+ return rs->md.reshape_position != MaxSector;
+}
+
+/*
+ * bool helpers to test for various raid levels of a raid type @rt
+ */
+
+/* Return true, if raid type in @rt is raid0 */
+static bool rt_is_raid0(struct raid_type *rt)
+{
+ return !rt->level;
+}
+
+/* Return true, if raid type in @rt is raid1 */
+static bool rt_is_raid1(struct raid_type *rt)
+{
+ return rt->level == 1;
+}
+
+/* Return true, if raid type in @rt is raid10 */
+static bool rt_is_raid10(struct raid_type *rt)
+{
+ return rt->level == 10;
+}
+
+/* Return true, if raid type in @rt is raid4/5 */
+static bool rt_is_raid45(struct raid_type *rt)
+{
+ return __within_range(rt->level, 4, 5);
+}
+
+/* Return true, if raid type in @rt is raid6 */
+static bool rt_is_raid6(struct raid_type *rt)
+{
+ return rt->level == 6;
+}
+
+/* Return true, if raid type in @rt is raid4/5/6 */
+static bool rt_is_raid456(struct raid_type *rt)
+{
+ return __within_range(rt->level, 4, 6);
+}
+/* END: raid level bools */
+
+/* Return valid ctr flags for the raid level of @rs */
+static unsigned long __valid_flags(struct raid_set *rs)
+{
+ if (rt_is_raid0(rs->raid_type))
+ return RAID0_VALID_FLAGS;
+ else if (rt_is_raid1(rs->raid_type))
+ return RAID1_VALID_FLAGS;
+ else if (rt_is_raid10(rs->raid_type))
+ return RAID10_VALID_FLAGS;
+ else if (rt_is_raid45(rs->raid_type))
+ return RAID45_VALID_FLAGS;
+ else if (rt_is_raid6(rs->raid_type))
+ return RAID6_VALID_FLAGS;
+
+ return 0;
+}
+
+/*
+ * Check for valid flags set on @rs
+ *
+ * Has to be called after parsing of the ctr flags!
+ */
+static int rs_check_for_valid_flags(struct raid_set *rs)
+{
+ if (rs->ctr_flags & ~__valid_flags(rs)) {
+ rs->ti->error = "Invalid flags combination";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* MD raid10 bit definitions and helpers */
+#define RAID10_OFFSET (1 << 16) /* stripes with data copies area adjacent on devices */
+#define RAID10_BROCKEN_USE_FAR_SETS (1 << 17) /* Broken in raid10.c: use sets instead of whole stripe rotation */
+#define RAID10_USE_FAR_SETS (1 << 18) /* Use sets instead of whole stripe rotation */
+#define RAID10_FAR_COPIES_SHIFT 8 /* raid10 # far copies shift (2nd byte of layout) */
+
+/* Return md raid10 near copies for @layout */
+static unsigned int __raid10_near_copies(int layout)
+{
+ return layout & 0xFF;
+}
+
+/* Return md raid10 far copies for @layout */
+static unsigned int __raid10_far_copies(int layout)
+{
+ return __raid10_near_copies(layout >> RAID10_FAR_COPIES_SHIFT);
+}
+
+/* Return true if md raid10 offset for @layout */
+static bool __is_raid10_offset(int layout)
+{
+ return !!(layout & RAID10_OFFSET);
+}
+
+/* Return true if md raid10 near for @layout */
+static bool __is_raid10_near(int layout)
+{
+ return !__is_raid10_offset(layout) && __raid10_near_copies(layout) > 1;
+}
+
+/* Return true if md raid10 far for @layout */
+static bool __is_raid10_far(int layout)
+{
+ return !__is_raid10_offset(layout) && __raid10_far_copies(layout) > 1;
+}
+
+/* Return md raid10 layout string for @layout */
+static const char *raid10_md_layout_to_format(int layout)
{
/*
- * Bit 16 and 17 stand for "offset" and "use_far_sets"
+ * Bit 16 stands for "offset"
+ * (i.e. adjacent stripes hold copies)
+ *
* Refer to MD's raid10.c for details
*/
- if ((layout & 0x10000) && (layout & 0x20000))
+ if (__is_raid10_offset(layout))
return "offset";
- if ((layout & 0xFF) > 1)
+ if (__raid10_near_copies(layout) > 1)
return "near";
+ WARN_ON(__raid10_far_copies(layout) < 2);
+
return "far";
}
-static unsigned raid10_md_layout_to_copies(int layout)
+/* Return md raid10 algorithm for @name */
+static int raid10_name_to_format(const char *name)
+{
+ if (!strcasecmp(name, "near"))
+ return ALGORITHM_RAID10_NEAR;
+ else if (!strcasecmp(name, "offset"))
+ return ALGORITHM_RAID10_OFFSET;
+ else if (!strcasecmp(name, "far"))
+ return ALGORITHM_RAID10_FAR;
+
+ return -EINVAL;
+}
+
+/* Return md raid10 copies for @layout */
+static unsigned int raid10_md_layout_to_copies(int layout)
{
- if ((layout & 0xFF) > 1)
- return layout & 0xFF;
- return (layout >> 8) & 0xFF;
+ return max(__raid10_near_copies(layout), __raid10_far_copies(layout));
}
-static int raid10_format_to_md_layout(char *format, unsigned copies)
+/* Return md raid10 format id for @format string */
+static int raid10_format_to_md_layout(struct raid_set *rs,
+ unsigned int algorithm,
+ unsigned int copies)
{
- unsigned n = 1, f = 1;
+ unsigned int n = 1, f = 1, r = 0;
- if (!strcasecmp("near", format))
+ /*
+ * MD resilienece flaw:
+ *
+ * enabling use_far_sets for far/offset formats causes copies
+ * to be colocated on the same devs together with their origins!
+ *
+ * -> disable it for now in the definition above
+ */
+ if (algorithm == ALGORITHM_RAID10_DEFAULT ||
+ algorithm == ALGORITHM_RAID10_NEAR)
n = copies;
- else
+
+ else if (algorithm == ALGORITHM_RAID10_OFFSET) {
f = copies;
+ r = RAID10_OFFSET;
+ if (!test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags))
+ r |= RAID10_USE_FAR_SETS;
- if (!strcasecmp("offset", format))
- return 0x30000 | (f << 8) | n;
+ } else if (algorithm == ALGORITHM_RAID10_FAR) {
+ f = copies;
+ r = !RAID10_OFFSET;
+ if (!test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags))
+ r |= RAID10_USE_FAR_SETS;
- if (!strcasecmp("far", format))
- return 0x20000 | (f << 8) | n;
+ } else
+ return -EINVAL;
- return (f << 8) | n;
+ return r | (f << RAID10_FAR_COPIES_SHIFT) | n;
}
+/* END: MD raid10 bit definitions and helpers */
-static struct raid_type *get_raid_type(char *name)
+/* Check for any of the raid10 algorithms */
+static bool __got_raid10(struct raid_type *rtp, const int layout)
{
- int i;
+ if (rtp->level == 10) {
+ switch (rtp->algorithm) {
+ case ALGORITHM_RAID10_DEFAULT:
+ case ALGORITHM_RAID10_NEAR:
+ return __is_raid10_near(layout);
+ case ALGORITHM_RAID10_OFFSET:
+ return __is_raid10_offset(layout);
+ case ALGORITHM_RAID10_FAR:
+ return __is_raid10_far(layout);
+ default:
+ break;
+ }
+ }
- for (i = 0; i < ARRAY_SIZE(raid_types); i++)
- if (!strcmp(raid_types[i].name, name))
- return &raid_types[i];
+ return false;
+}
+
+/* Return raid_type for @name */
+static struct raid_type *get_raid_type(const char *name)
+{
+ struct raid_type *rtp = raid_types + ARRAY_SIZE(raid_types);
+
+ while (rtp-- > raid_types)
+ if (!strcasecmp(rtp->name, name))
+ return rtp;
return NULL;
}
-static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *raid_type, unsigned raid_devs)
+/* Return raid_type for @name based derived from @level and @layout */
+static struct raid_type *get_raid_type_by_ll(const int level, const int layout)
+{
+ struct raid_type *rtp = raid_types + ARRAY_SIZE(raid_types);
+
+ while (rtp-- > raid_types) {
+ /* RAID10 special checks based on @layout flags/properties */
+ if (rtp->level == level &&
+ (__got_raid10(rtp, layout) || rtp->algorithm == layout))
+ return rtp;
+ }
+
+ return NULL;
+}
+
+/*
+ * Conditionally change bdev capacity of @rs
+ * in case of a disk add/remove reshape
+ */
+static void rs_set_capacity(struct raid_set *rs)
+{
+ struct mddev *mddev = &rs->md;
+ struct md_rdev *rdev;
+ struct gendisk *gendisk = dm_disk(dm_table_get_md(rs->ti->table));
+
+ /*
+ * raid10 sets rdev->sector to the device size, which
+ * is unintended in case of out-of-place reshaping
+ */
+ rdev_for_each(rdev, mddev)
+ rdev->sectors = mddev->dev_sectors;
+
+ set_capacity(gendisk, mddev->array_sectors);
+ revalidate_disk(gendisk);
+}
+
+/*
+ * Set the mddev properties in @rs to the current
+ * ones retrieved from the freshest superblock
+ */
+static void rs_set_cur(struct raid_set *rs)
{
- unsigned i;
+ struct mddev *mddev = &rs->md;
+
+ mddev->new_level = mddev->level;
+ mddev->new_layout = mddev->layout;
+ mddev->new_chunk_sectors = mddev->chunk_sectors;
+}
+
+/*
+ * Set the mddev properties in @rs to the new
+ * ones requested by the ctr
+ */
+static void rs_set_new(struct raid_set *rs)
+{
+ struct mddev *mddev = &rs->md;
+
+ mddev->level = mddev->new_level;
+ mddev->layout = mddev->new_layout;
+ mddev->chunk_sectors = mddev->new_chunk_sectors;
+ mddev->raid_disks = rs->raid_disks;
+ mddev->delta_disks = 0;
+}
+
+static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *raid_type,
+ unsigned int raid_devs)
+{
+ unsigned int i;
struct raid_set *rs;
if (raid_devs <= raid_type->parity_devs) {
@@ -164,15 +682,19 @@ static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *ra
mddev_init(&rs->md);
+ rs->raid_disks = raid_devs;
+ rs->delta_disks = 0;
+
rs->ti = ti;
rs->raid_type = raid_type;
+ rs->stripe_cache_entries = 256;
rs->md.raid_disks = raid_devs;
rs->md.level = raid_type->level;
rs->md.new_level = rs->md.level;
rs->md.layout = raid_type->algorithm;
rs->md.new_layout = rs->md.layout;
rs->md.delta_disks = 0;
- rs->md.recovery_cp = 0;
+ rs->md.recovery_cp = MaxSector;
for (i = 0; i < raid_devs; i++)
md_rdev_init(&rs->dev[i].rdev);
@@ -189,11 +711,11 @@ static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *ra
return rs;
}
-static void context_free(struct raid_set *rs)
+static void raid_set_free(struct raid_set *rs)
{
int i;
- for (i = 0; i < rs->md.raid_disks; i++) {
+ for (i = 0; i < rs->raid_disks; i++) {
if (rs->dev[i].meta_dev)
dm_put_device(rs->ti, rs->dev[i].meta_dev);
md_rdev_clear(&rs->dev[i].rdev);
@@ -218,16 +740,22 @@ static void context_free(struct raid_set *rs)
* <meta_dev> -
*
* This code parses those words. If there is a failure,
- * the caller must use context_free to unwind the operations.
+ * the caller must use raid_set_free() to unwind the operations.
*/
-static int dev_parms(struct raid_set *rs, char **argv)
+static int parse_dev_params(struct raid_set *rs, struct dm_arg_set *as)
{
int i;
int rebuild = 0;
int metadata_available = 0;
- int ret = 0;
+ int r = 0;
+ const char *arg;
+
+ /* Put off the number of raid devices argument to get to dev pairs */
+ arg = dm_shift_arg(as);
+ if (!arg)
+ return -EINVAL;
- for (i = 0; i < rs->md.raid_disks; i++, argv += 2) {
+ for (i = 0; i < rs->raid_disks; i++) {
rs->dev[i].rdev.raid_disk = i;
rs->dev[i].meta_dev = NULL;
@@ -240,39 +768,49 @@ static int dev_parms(struct raid_set *rs, char **argv)
rs->dev[i].rdev.data_offset = 0;
rs->dev[i].rdev.mddev = &rs->md;
- if (strcmp(argv[0], "-")) {
- ret = dm_get_device(rs->ti, argv[0],
- dm_table_get_mode(rs->ti->table),
- &rs->dev[i].meta_dev);
- rs->ti->error = "RAID metadata device lookup failure";
- if (ret)
- return ret;
+ arg = dm_shift_arg(as);
+ if (!arg)
+ return -EINVAL;
+
+ if (strcmp(arg, "-")) {
+ r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table),
+ &rs->dev[i].meta_dev);
+ if (r) {
+ rs->ti->error = "RAID metadata device lookup failure";
+ return r;
+ }
rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL);
- if (!rs->dev[i].rdev.sb_page)
+ if (!rs->dev[i].rdev.sb_page) {
+ rs->ti->error = "Failed to allocate superblock page";
return -ENOMEM;
+ }
}
- if (!strcmp(argv[1], "-")) {
+ arg = dm_shift_arg(as);
+ if (!arg)
+ return -EINVAL;
+
+ if (!strcmp(arg, "-")) {
if (!test_bit(In_sync, &rs->dev[i].rdev.flags) &&
(!rs->dev[i].rdev.recovery_offset)) {
rs->ti->error = "Drive designated for rebuild not specified";
return -EINVAL;
}
- rs->ti->error = "No data device supplied with metadata device";
- if (rs->dev[i].meta_dev)
+ if (rs->dev[i].meta_dev) {
+ rs->ti->error = "No data device supplied with metadata device";
return -EINVAL;
+ }
continue;
}
- ret = dm_get_device(rs->ti, argv[1],
- dm_table_get_mode(rs->ti->table),
- &rs->dev[i].data_dev);
- if (ret) {
+ r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table),
+ &rs->dev[i].data_dev);
+ if (r) {
rs->ti->error = "RAID device lookup failure";
- return ret;
+ return r;
}
if (rs->dev[i].meta_dev) {
@@ -280,7 +818,7 @@ static int dev_parms(struct raid_set *rs, char **argv)
rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev;
}
rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev;
- list_add(&rs->dev[i].rdev.same_set, &rs->md.disks);
+ list_add_tail(&rs->dev[i].rdev.same_set, &rs->md.disks);
if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
rebuild++;
}
@@ -301,8 +839,7 @@ static int dev_parms(struct raid_set *rs, char **argv)
*
* User could specify 'nosync' option if desperate.
*/
- DMERR("Unable to rebuild drive while array is not in-sync");
- rs->ti->error = "RAID device lookup failure";
+ rs->ti->error = "Unable to rebuild drive while array is not in-sync";
return -EINVAL;
}
@@ -323,9 +860,12 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
{
unsigned long min_region_size = rs->ti->len / (1 << 21);
+ if (rs_is_raid0(rs))
+ return 0;
+
if (!region_size) {
/*
- * Choose a reasonable default. All figures in sectors.
+ * Choose a reasonable default. All figures in sectors.
*/
if (min_region_size > (1 << 13)) {
/* If not a power of 2, make it the next power of 2 */
@@ -366,7 +906,7 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
/*
* Convert sectors to bytes.
*/
- rs->md.bitmap_info.chunksize = (region_size << 9);
+ rs->md.bitmap_info.chunksize = to_bytes(region_size);
return 0;
}
@@ -382,9 +922,9 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
*/
static int validate_raid_redundancy(struct raid_set *rs)
{
- unsigned i, rebuild_cnt = 0;
- unsigned rebuilds_per_group = 0, copies, d;
- unsigned group_size, last_group_start;
+ unsigned int i, rebuild_cnt = 0;
+ unsigned int rebuilds_per_group = 0, copies;
+ unsigned int group_size, last_group_start;
for (i = 0; i < rs->md.raid_disks; i++)
if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
@@ -392,6 +932,8 @@ static int validate_raid_redundancy(struct raid_set *rs)
rebuild_cnt++;
switch (rs->raid_type->level) {
+ case 0:
+ break;
case 1:
if (rebuild_cnt >= rs->md.raid_disks)
goto too_many;
@@ -403,7 +945,7 @@ static int validate_raid_redundancy(struct raid_set *rs)
goto too_many;
break;
case 10:
- copies = raid10_md_layout_to_copies(rs->md.layout);
+ copies = raid10_md_layout_to_copies(rs->md.new_layout);
if (rebuild_cnt < copies)
break;
@@ -417,17 +959,16 @@ static int validate_raid_redundancy(struct raid_set *rs)
* simple case where the number of devices is a multiple of the
* number of copies, we must also handle cases where the number
* of devices is not a multiple of the number of copies.
- * E.g. dev1 dev2 dev3 dev4 dev5
- * A A B B C
- * C D D E E
+ * E.g. dev1 dev2 dev3 dev4 dev5
+ * A A B B C
+ * C D D E E
*/
- if (!strcmp("near", raid10_md_layout_to_format(rs->md.layout))) {
- for (i = 0; i < rs->md.raid_disks * copies; i++) {
+ if (__is_raid10_near(rs->md.new_layout)) {
+ for (i = 0; i < rs->md.raid_disks; i++) {
if (!(i % copies))
rebuilds_per_group = 0;
- d = i % rs->md.raid_disks;
- if ((!rs->dev[d].rdev.sb_page ||
- !test_bit(In_sync, &rs->dev[d].rdev.flags)) &&
+ if ((!rs->dev[i].rdev.sb_page ||
+ !test_bit(In_sync, &rs->dev[i].rdev.flags)) &&
(++rebuilds_per_group >= copies))
goto too_many;
}
@@ -442,7 +983,7 @@ static int validate_raid_redundancy(struct raid_set *rs)
* use the 'use_far_sets' variant.)
*
* This check is somewhat complicated by the need to account
- * for arrays that are not a multiple of (far) copies. This
+ * for arrays that are not a multiple of (far) copies. This
* results in the need to treat the last (potentially larger)
* set differently.
*/
@@ -475,42 +1016,48 @@ too_many:
*
* Argument definitions
* <chunk_size> The number of sectors per disk that
- * will form the "stripe"
+ * will form the "stripe"
* [[no]sync] Force or prevent recovery of the
- * entire array
+ * entire array
* [rebuild <idx>] Rebuild the drive indicated by the index
* [daemon_sleep <ms>] Time between bitmap daemon work to
- * clear bits
+ * clear bits
* [min_recovery_rate <kB/sec/disk>] Throttle RAID initialization
* [max_recovery_rate <kB/sec/disk>] Throttle RAID initialization
* [write_mostly <idx>] Indicate a write mostly drive via index
* [max_write_behind <sectors>] See '-write-behind=' (man mdadm)
* [stripe_cache <sectors>] Stripe cache size for higher RAIDs
- * [region_size <sectors>] Defines granularity of bitmap
+ * [region_size <sectors>] Defines granularity of bitmap
*
* RAID10-only options:
- * [raid10_copies <# copies>] Number of copies. (Default: 2)
+ * [raid10_copies <# copies>] Number of copies. (Default: 2)
* [raid10_format <near|far|offset>] Layout algorithm. (Default: near)
*/
-static int parse_raid_params(struct raid_set *rs, char **argv,
- unsigned num_raid_params)
-{
- char *raid10_format = "near";
- unsigned raid10_copies = 2;
- unsigned i;
- unsigned long value, region_size = 0;
- sector_t sectors_per_dev = rs->ti->len;
+static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
+ unsigned int num_raid_params)
+{
+ int value, raid10_format = ALGORITHM_RAID10_DEFAULT;
+ unsigned int raid10_copies = 2;
+ unsigned int i, write_mostly = 0;
+ unsigned int region_size = 0;
sector_t max_io_len;
- char *key;
+ const char *arg, *key;
+ struct raid_dev *rd;
+ struct raid_type *rt = rs->raid_type;
+
+ arg = dm_shift_arg(as);
+ num_raid_params--; /* Account for chunk_size argument */
+
+ if (kstrtoint(arg, 10, &value) < 0) {
+ rs->ti->error = "Bad numerical argument given for chunk_size";
+ return -EINVAL;
+ }
/*
* First, parse the in-order required arguments
* "chunk_size" is the only argument of this type.
*/
- if ((kstrtoul(argv[0], 10, &value) < 0)) {
- rs->ti->error = "Bad chunk size";
- return -EINVAL;
- } else if (rs->raid_type->level == 1) {
+ if (rt_is_raid1(rt)) {
if (value)
DMERR("Ignoring chunk size parameter for RAID 1");
value = 0;
@@ -523,8 +1070,6 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
}
rs->md.new_chunk_sectors = rs->md.chunk_sectors = value;
- argv++;
- num_raid_params--;
/*
* We set each individual device as In_sync with a completed
@@ -532,18 +1077,18 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
* replacement then one of the following cases applies:
*
* 1) User specifies 'rebuild'.
- * - Device is reset when param is read.
+ * - Device is reset when param is read.
* 2) A new device is supplied.
- * - No matching superblock found, resets device.
+ * - No matching superblock found, resets device.
* 3) Device failure was transient and returns on reload.
- * - Failure noticed, resets device for bitmap replay.
+ * - Failure noticed, resets device for bitmap replay.
* 4) Device hadn't completed recovery after previous failure.
- * - Superblock is read and overrides recovery_offset.
+ * - Superblock is read and overrides recovery_offset.
*
* What is found in the superblocks of the devices is always
* authoritative, unless 'rebuild' or '[no]sync' was specified.
*/
- for (i = 0; i < rs->md.raid_disks; i++) {
+ for (i = 0; i < rs->raid_disks; i++) {
set_bit(In_sync, &rs->dev[i].rdev.flags);
rs->dev[i].rdev.recovery_offset = MaxSector;
}
@@ -552,72 +1097,112 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
* Second, parse the unordered optional arguments
*/
for (i = 0; i < num_raid_params; i++) {
- if (!strcasecmp(argv[i], "nosync")) {
- rs->md.recovery_cp = MaxSector;
- rs->ctr_flags |= CTR_FLAG_NOSYNC;
+ key = dm_shift_arg(as);
+ if (!key) {
+ rs->ti->error = "Not enough raid parameters given";
+ return -EINVAL;
+ }
+
+ if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_NOSYNC))) {
+ if (test_and_set_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) {
+ rs->ti->error = "Only one 'nosync' argument allowed";
+ return -EINVAL;
+ }
+ continue;
+ }
+ if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_SYNC))) {
+ if (test_and_set_bit(__CTR_FLAG_SYNC, &rs->ctr_flags)) {
+ rs->ti->error = "Only one 'sync' argument allowed";
+ return -EINVAL;
+ }
continue;
}
- if (!strcasecmp(argv[i], "sync")) {
- rs->md.recovery_cp = 0;
- rs->ctr_flags |= CTR_FLAG_SYNC;
+ if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_USE_NEAR_SETS))) {
+ if (test_and_set_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) {
+ rs->ti->error = "Only one 'raid10_use_new_sets' argument allowed";
+ return -EINVAL;
+ }
continue;
}
- /* The rest of the optional arguments come in key/value pairs */
- if ((i + 1) >= num_raid_params) {
+ arg = dm_shift_arg(as);
+ i++; /* Account for the argument pairs */
+ if (!arg) {
rs->ti->error = "Wrong number of raid parameters given";
return -EINVAL;
}
- key = argv[i++];
+ /*
+ * Parameters that take a string value are checked here.
+ */
- /* Parameters that take a string value are checked here. */
- if (!strcasecmp(key, "raid10_format")) {
- if (rs->raid_type->level != 10) {
+ if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_FORMAT))) {
+ if (test_and_set_bit(__CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags)) {
+ rs->ti->error = "Only one 'raid10_format' argument pair allowed";
+ return -EINVAL;
+ }
+ if (!rt_is_raid10(rt)) {
rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type";
return -EINVAL;
}
- if (strcmp("near", argv[i]) &&
- strcmp("far", argv[i]) &&
- strcmp("offset", argv[i])) {
+ raid10_format = raid10_name_to_format(arg);
+ if (raid10_format < 0) {
rs->ti->error = "Invalid 'raid10_format' value given";
- return -EINVAL;
+ return raid10_format;
}
- raid10_format = argv[i];
- rs->ctr_flags |= CTR_FLAG_RAID10_FORMAT;
continue;
}
- if (kstrtoul(argv[i], 10, &value) < 0) {
+ if (kstrtoint(arg, 10, &value) < 0) {
rs->ti->error = "Bad numerical argument given in raid params";
return -EINVAL;
}
- /* Parameters that take a numeric value are checked here */
- if (!strcasecmp(key, "rebuild")) {
- if (value >= rs->md.raid_disks) {
+ if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_REBUILD))) {
+ /*
+ * "rebuild" is being passed in by userspace to provide
+ * indexes of replaced devices and to set up additional
+ * devices on raid level takeover.
+ */
+ if (!__within_range(value, 0, rs->raid_disks - 1)) {
rs->ti->error = "Invalid rebuild index given";
return -EINVAL;
}
- clear_bit(In_sync, &rs->dev[value].rdev.flags);
- rs->dev[value].rdev.recovery_offset = 0;
- rs->ctr_flags |= CTR_FLAG_REBUILD;
- } else if (!strcasecmp(key, "write_mostly")) {
- if (rs->raid_type->level != 1) {
+
+ if (test_and_set_bit(value, (void *) rs->rebuild_disks)) {
+ rs->ti->error = "rebuild for this index already given";
+ return -EINVAL;
+ }
+
+ rd = rs->dev + value;
+ clear_bit(In_sync, &rd->rdev.flags);
+ clear_bit(Faulty, &rd->rdev.flags);
+ rd->rdev.recovery_offset = 0;
+ set_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags);
+ } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_WRITE_MOSTLY))) {
+ if (!rt_is_raid1(rt)) {
rs->ti->error = "write_mostly option is only valid for RAID1";
return -EINVAL;
}
- if (value >= rs->md.raid_disks) {
- rs->ti->error = "Invalid write_mostly drive index given";
+
+ if (!__within_range(value, 0, rs->md.raid_disks - 1)) {
+ rs->ti->error = "Invalid write_mostly index given";
return -EINVAL;
}
+
+ write_mostly++;
set_bit(WriteMostly, &rs->dev[value].rdev.flags);
- } else if (!strcasecmp(key, "max_write_behind")) {
- if (rs->raid_type->level != 1) {
+ set_bit(__CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags);
+ } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MAX_WRITE_BEHIND))) {
+ if (!rt_is_raid1(rt)) {
rs->ti->error = "max_write_behind option is only valid for RAID1";
return -EINVAL;
}
- rs->ctr_flags |= CTR_FLAG_MAX_WRITE_BEHIND;
+
+ if (test_and_set_bit(__CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags)) {
+ rs->ti->error = "Only one max_write_behind argument pair allowed";
+ return -EINVAL;
+ }
/*
* In device-mapper, we specify things in sectors, but
@@ -628,64 +1213,121 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
rs->ti->error = "Max write-behind limit out of range";
return -EINVAL;
}
+
rs->md.bitmap_info.max_write_behind = value;
- } else if (!strcasecmp(key, "daemon_sleep")) {
- rs->ctr_flags |= CTR_FLAG_DAEMON_SLEEP;
+ } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DAEMON_SLEEP))) {
+ if (test_and_set_bit(__CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags)) {
+ rs->ti->error = "Only one daemon_sleep argument pair allowed";
+ return -EINVAL;
+ }
if (!value || (value > MAX_SCHEDULE_TIMEOUT)) {
rs->ti->error = "daemon sleep period out of range";
return -EINVAL;
}
rs->md.bitmap_info.daemon_sleep = value;
- } else if (!strcasecmp(key, "stripe_cache")) {
- rs->ctr_flags |= CTR_FLAG_STRIPE_CACHE;
+ } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DATA_OFFSET))) {
+ /* Userspace passes new data_offset after having extended the the data image LV */
+ if (test_and_set_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) {
+ rs->ti->error = "Only one data_offset argument pair allowed";
+ return -EINVAL;
+ }
+ /* Ensure sensible data offset */
+ if (value < 0 ||
+ (value && (value < MIN_FREE_RESHAPE_SPACE || value % to_sector(PAGE_SIZE)))) {
+ rs->ti->error = "Bogus data_offset value";
+ return -EINVAL;
+ }
+ rs->data_offset = value;
+ } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DELTA_DISKS))) {
+ /* Define the +/-# of disks to add to/remove from the given raid set */
+ if (test_and_set_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) {
+ rs->ti->error = "Only one delta_disks argument pair allowed";
+ return -EINVAL;
+ }
+ /* Ensure MAX_RAID_DEVICES and raid type minimal_devs! */
+ if (!__within_range(abs(value), 1, MAX_RAID_DEVICES - rt->minimal_devs)) {
+ rs->ti->error = "Too many delta_disk requested";
+ return -EINVAL;
+ }
- /*
- * In device-mapper, we specify things in sectors, but
- * MD records this value in kB
- */
- value /= 2;
+ rs->delta_disks = value;
+ } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_STRIPE_CACHE))) {
+ if (test_and_set_bit(__CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags)) {
+ rs->ti->error = "Only one stripe_cache argument pair allowed";
+ return -EINVAL;
+ }
- if ((rs->raid_type->level != 5) &&
- (rs->raid_type->level != 6)) {
+ if (!rt_is_raid456(rt)) {
rs->ti->error = "Inappropriate argument: stripe_cache";
return -EINVAL;
}
- if (raid5_set_cache_size(&rs->md, (int)value)) {
- rs->ti->error = "Bad stripe_cache size";
+
+ rs->stripe_cache_entries = value;
+ } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MIN_RECOVERY_RATE))) {
+ if (test_and_set_bit(__CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags)) {
+ rs->ti->error = "Only one min_recovery_rate argument pair allowed";
return -EINVAL;
}
- } else if (!strcasecmp(key, "min_recovery_rate")) {
- rs->ctr_flags |= CTR_FLAG_MIN_RECOVERY_RATE;
if (value > INT_MAX) {
rs->ti->error = "min_recovery_rate out of range";
return -EINVAL;
}
rs->md.sync_speed_min = (int)value;
- } else if (!strcasecmp(key, "max_recovery_rate")) {
- rs->ctr_flags |= CTR_FLAG_MAX_RECOVERY_RATE;
+ } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MAX_RECOVERY_RATE))) {
+ if (test_and_set_bit(__CTR_FLAG_MAX_RECOVERY_RATE, &rs->ctr_flags)) {
+ rs->ti->error = "Only one max_recovery_rate argument pair allowed";
+ return -EINVAL;
+ }
if (value > INT_MAX) {
rs->ti->error = "max_recovery_rate out of range";
return -EINVAL;
}
rs->md.sync_speed_max = (int)value;
- } else if (!strcasecmp(key, "region_size")) {
- rs->ctr_flags |= CTR_FLAG_REGION_SIZE;
+ } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_REGION_SIZE))) {
+ if (test_and_set_bit(__CTR_FLAG_REGION_SIZE, &rs->ctr_flags)) {
+ rs->ti->error = "Only one region_size argument pair allowed";
+ return -EINVAL;
+ }
+
region_size = value;
- } else if (!strcasecmp(key, "raid10_copies") &&
- (rs->raid_type->level == 10)) {
- if ((value < 2) || (value > 0xFF)) {
+ rs->requested_bitmap_chunk_sectors = value;
+ } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_COPIES))) {
+ if (test_and_set_bit(__CTR_FLAG_RAID10_COPIES, &rs->ctr_flags)) {
+ rs->ti->error = "Only one raid10_copies argument pair allowed";
+ return -EINVAL;
+ }
+
+ if (!__within_range(value, 2, rs->md.raid_disks)) {
rs->ti->error = "Bad value for 'raid10_copies'";
return -EINVAL;
}
- rs->ctr_flags |= CTR_FLAG_RAID10_COPIES;
+
raid10_copies = value;
} else {
DMERR("Unable to parse RAID parameter: %s", key);
- rs->ti->error = "Unable to parse RAID parameters";
+ rs->ti->error = "Unable to parse RAID parameter";
return -EINVAL;
}
}
+ if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags) &&
+ test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) {
+ rs->ti->error = "sync and nosync are mutually exclusive";
+ return -EINVAL;
+ }
+
+ if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) &&
+ (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags) ||
+ test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))) {
+ rs->ti->error = "sync/nosync and rebuild are mutually exclusive";
+ return -EINVAL;
+ }
+
+ if (write_mostly >= rs->md.raid_disks) {
+ rs->ti->error = "Can't set all raid1 devices to write_mostly";
+ return -EINVAL;
+ }
+
if (validate_region_size(rs, region_size))
return -EINVAL;
@@ -697,47 +1339,193 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
if (dm_set_target_max_io_len(rs->ti, max_io_len))
return -EINVAL;
- if (rs->raid_type->level == 10) {
+ if (rt_is_raid10(rt)) {
if (raid10_copies > rs->md.raid_disks) {
rs->ti->error = "Not enough devices to satisfy specification";
return -EINVAL;
}
- /*
- * If the format is not "near", we only support
- * two copies at the moment.
- */
- if (strcmp("near", raid10_format) && (raid10_copies > 2)) {
- rs->ti->error = "Too many copies for given RAID10 format.";
+ rs->md.new_layout = raid10_format_to_md_layout(rs, raid10_format, raid10_copies);
+ if (rs->md.new_layout < 0) {
+ rs->ti->error = "Error getting raid10 format";
+ return rs->md.new_layout;
+ }
+
+ rt = get_raid_type_by_ll(10, rs->md.new_layout);
+ if (!rt) {
+ rs->ti->error = "Failed to recognize new raid10 layout";
return -EINVAL;
}
- /* (Len * #mirrors) / #devices */
- sectors_per_dev = rs->ti->len * raid10_copies;
- sector_div(sectors_per_dev, rs->md.raid_disks);
-
- rs->md.layout = raid10_format_to_md_layout(raid10_format,
- raid10_copies);
- rs->md.new_layout = rs->md.layout;
- } else if ((!rs->raid_type->level || rs->raid_type->level > 1) &&
- sector_div(sectors_per_dev,
- (rs->md.raid_disks - rs->raid_type->parity_devs))) {
- rs->ti->error = "Target length not divisible by number of data devices";
- return -EINVAL;
+ if ((rt->algorithm == ALGORITHM_RAID10_DEFAULT ||
+ rt->algorithm == ALGORITHM_RAID10_NEAR) &&
+ test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) {
+ rs->ti->error = "RAID10 format 'near' and 'raid10_use_near_sets' are incompatible";
+ return -EINVAL;
+ }
}
- rs->md.dev_sectors = sectors_per_dev;
+
+ rs->raid10_copies = raid10_copies;
/* Assume there are no metadata devices until the drives are parsed */
rs->md.persistent = 0;
rs->md.external = 1;
+ /* Check, if any invalid ctr arguments have been passed in for the raid level */
+ return rs_check_for_valid_flags(rs);
+}
+
+/* Set raid4/5/6 cache size */
+static int rs_set_raid456_stripe_cache(struct raid_set *rs)
+{
+ int r;
+ struct r5conf *conf;
+ struct mddev *mddev = &rs->md;
+ uint32_t min_stripes = max(mddev->chunk_sectors, mddev->new_chunk_sectors) / 2;
+ uint32_t nr_stripes = rs->stripe_cache_entries;
+
+ if (!rt_is_raid456(rs->raid_type)) {
+ rs->ti->error = "Inappropriate raid level; cannot change stripe_cache size";
+ return -EINVAL;
+ }
+
+ if (nr_stripes < min_stripes) {
+ DMINFO("Adjusting requested %u stripe cache entries to %u to suit stripe size",
+ nr_stripes, min_stripes);
+ nr_stripes = min_stripes;
+ }
+
+ conf = mddev->private;
+ if (!conf) {
+ rs->ti->error = "Cannot change stripe_cache size on inactive RAID set";
+ return -EINVAL;
+ }
+
+ /* Try setting number of stripes in raid456 stripe cache */
+ if (conf->min_nr_stripes != nr_stripes) {
+ r = raid5_set_cache_size(mddev, nr_stripes);
+ if (r) {
+ rs->ti->error = "Failed to set raid4/5/6 stripe cache size";
+ return r;
+ }
+
+ DMINFO("%u stripe cache entries", nr_stripes);
+ }
+
+ return 0;
+}
+
+/* Return # of data stripes as kept in mddev as of @rs (i.e. as of superblock) */
+static unsigned int mddev_data_stripes(struct raid_set *rs)
+{
+ return rs->md.raid_disks - rs->raid_type->parity_devs;
+}
+
+/* Return # of data stripes of @rs (i.e. as of ctr) */
+static unsigned int rs_data_stripes(struct raid_set *rs)
+{
+ return rs->raid_disks - rs->raid_type->parity_devs;
+}
+
+/* Calculate the sectors per device and per array used for @rs */
+static int rs_set_dev_and_array_sectors(struct raid_set *rs, bool use_mddev)
+{
+ int delta_disks;
+ unsigned int data_stripes;
+ struct mddev *mddev = &rs->md;
+ struct md_rdev *rdev;
+ sector_t array_sectors = rs->ti->len, dev_sectors = rs->ti->len;
+
+ if (use_mddev) {
+ delta_disks = mddev->delta_disks;
+ data_stripes = mddev_data_stripes(rs);
+ } else {
+ delta_disks = rs->delta_disks;
+ data_stripes = rs_data_stripes(rs);
+ }
+
+ /* Special raid1 case w/o delta_disks support (yet) */
+ if (rt_is_raid1(rs->raid_type))
+ ;
+ else if (rt_is_raid10(rs->raid_type)) {
+ if (rs->raid10_copies < 2 ||
+ delta_disks < 0) {
+ rs->ti->error = "Bogus raid10 data copies or delta disks";
+ return -EINVAL;
+ }
+
+ dev_sectors *= rs->raid10_copies;
+ if (sector_div(dev_sectors, data_stripes))
+ goto bad;
+
+ array_sectors = (data_stripes + delta_disks) * dev_sectors;
+ if (sector_div(array_sectors, rs->raid10_copies))
+ goto bad;
+
+ } else if (sector_div(dev_sectors, data_stripes))
+ goto bad;
+
+ else
+ /* Striped layouts */
+ array_sectors = (data_stripes + delta_disks) * dev_sectors;
+
+ rdev_for_each(rdev, mddev)
+ rdev->sectors = dev_sectors;
+
+ mddev->array_sectors = array_sectors;
+ mddev->dev_sectors = dev_sectors;
+
return 0;
+bad:
+ rs->ti->error = "Target length not divisible by number of data devices";
+ return -EINVAL;
+}
+
+/* Setup recovery on @rs */
+static void __rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors)
+{
+ /* raid0 does not recover */
+ if (rs_is_raid0(rs))
+ rs->md.recovery_cp = MaxSector;
+ /*
+ * A raid6 set has to be recovered either
+ * completely or for the grown part to
+ * ensure proper parity and Q-Syndrome
+ */
+ else if (rs_is_raid6(rs))
+ rs->md.recovery_cp = dev_sectors;
+ /*
+ * Other raid set types may skip recovery
+ * depending on the 'nosync' flag.
+ */
+ else
+ rs->md.recovery_cp = test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)
+ ? MaxSector : dev_sectors;
+}
+
+/* Setup recovery on @rs based on raid type, device size and 'nosync' flag */
+static void rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors)
+{
+ if (!dev_sectors)
+ /* New raid set or 'sync' flag provided */
+ __rs_setup_recovery(rs, 0);
+ else if (dev_sectors == MaxSector)
+ /* Prevent recovery */
+ __rs_setup_recovery(rs, MaxSector);
+ else if (rs->dev[0].rdev.sectors < dev_sectors)
+ /* Grown raid set */
+ __rs_setup_recovery(rs, rs->dev[0].rdev.sectors);
+ else
+ __rs_setup_recovery(rs, MaxSector);
}
static void do_table_event(struct work_struct *ws)
{
struct raid_set *rs = container_of(ws, struct raid_set, md.event_work);
+ smp_rmb(); /* Make sure we access most actual mddev properties */
+ if (!rs_is_reshaping(rs))
+ rs_set_capacity(rs);
dm_table_event(rs->ti->table);
}
@@ -749,19 +1537,225 @@ static int raid_is_congested(struct dm_target_callbacks *cb, int bits)
}
/*
+ * Make sure a valid takover (level switch) is being requested on @rs
+ *
+ * Conversions of raid sets from one MD personality to another
+ * have to conform to restrictions which are enforced here.
+ */
+static int rs_check_takeover(struct raid_set *rs)
+{
+ struct mddev *mddev = &rs->md;
+ unsigned int near_copies;
+
+ if (rs->md.degraded) {
+ rs->ti->error = "Can't takeover degraded raid set";
+ return -EPERM;
+ }
+
+ if (rs_is_reshaping(rs)) {
+ rs->ti->error = "Can't takeover reshaping raid set";
+ return -EPERM;
+ }
+
+ switch (mddev->level) {
+ case 0:
+ /* raid0 -> raid1/5 with one disk */
+ if ((mddev->new_level == 1 || mddev->new_level == 5) &&
+ mddev->raid_disks == 1)
+ return 0;
+
+ /* raid0 -> raid10 */
+ if (mddev->new_level == 10 &&
+ !(rs->raid_disks % mddev->raid_disks))
+ return 0;
+
+ /* raid0 with multiple disks -> raid4/5/6 */
+ if (__within_range(mddev->new_level, 4, 6) &&
+ mddev->new_layout == ALGORITHM_PARITY_N &&
+ mddev->raid_disks > 1)
+ return 0;
+
+ break;
+
+ case 10:
+ /* Can't takeover raid10_offset! */
+ if (__is_raid10_offset(mddev->layout))
+ break;
+
+ near_copies = __raid10_near_copies(mddev->layout);
+
+ /* raid10* -> raid0 */
+ if (mddev->new_level == 0) {
+ /* Can takeover raid10_near with raid disks divisable by data copies! */
+ if (near_copies > 1 &&
+ !(mddev->raid_disks % near_copies)) {
+ mddev->raid_disks /= near_copies;
+ mddev->delta_disks = mddev->raid_disks;
+ return 0;
+ }
+
+ /* Can takeover raid10_far */
+ if (near_copies == 1 &&
+ __raid10_far_copies(mddev->layout) > 1)
+ return 0;
+
+ break;
+ }
+
+ /* raid10_{near,far} -> raid1 */
+ if (mddev->new_level == 1 &&
+ max(near_copies, __raid10_far_copies(mddev->layout)) == mddev->raid_disks)
+ return 0;
+
+ /* raid10_{near,far} with 2 disks -> raid4/5 */
+ if (__within_range(mddev->new_level, 4, 5) &&
+ mddev->raid_disks == 2)
+ return 0;
+ break;
+
+ case 1:
+ /* raid1 with 2 disks -> raid4/5 */
+ if (__within_range(mddev->new_level, 4, 5) &&
+ mddev->raid_disks == 2) {
+ mddev->degraded = 1;
+ return 0;
+ }
+
+ /* raid1 -> raid0 */
+ if (mddev->new_level == 0 &&
+ mddev->raid_disks == 1)
+ return 0;
+
+ /* raid1 -> raid10 */
+ if (mddev->new_level == 10)
+ return 0;
+ break;
+
+ case 4:
+ /* raid4 -> raid0 */
+ if (mddev->new_level == 0)
+ return 0;
+
+ /* raid4 -> raid1/5 with 2 disks */
+ if ((mddev->new_level == 1 || mddev->new_level == 5) &&
+ mddev->raid_disks == 2)
+ return 0;
+
+ /* raid4 -> raid5/6 with parity N */
+ if (__within_range(mddev->new_level, 5, 6) &&
+ mddev->layout == ALGORITHM_PARITY_N)
+ return 0;
+ break;
+
+ case 5:
+ /* raid5 with parity N -> raid0 */
+ if (mddev->new_level == 0 &&
+ mddev->layout == ALGORITHM_PARITY_N)
+ return 0;
+
+ /* raid5 with parity N -> raid4 */
+ if (mddev->new_level == 4 &&
+ mddev->layout == ALGORITHM_PARITY_N)
+ return 0;
+
+ /* raid5 with 2 disks -> raid1/4/10 */
+ if ((mddev->new_level == 1 || mddev->new_level == 4 || mddev->new_level == 10) &&
+ mddev->raid_disks == 2)
+ return 0;
+
+ /* raid5_* -> raid6_*_6 with Q-Syndrome N (e.g. raid5_ra -> raid6_ra_6 */
+ if (mddev->new_level == 6 &&
+ ((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) ||
+ __within_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC_6, ALGORITHM_RIGHT_SYMMETRIC_6)))
+ return 0;
+ break;
+
+ case 6:
+ /* raid6 with parity N -> raid0 */
+ if (mddev->new_level == 0 &&
+ mddev->layout == ALGORITHM_PARITY_N)
+ return 0;
+
+ /* raid6 with parity N -> raid4 */
+ if (mddev->new_level == 4 &&
+ mddev->layout == ALGORITHM_PARITY_N)
+ return 0;
+
+ /* raid6_*_n with Q-Syndrome N -> raid5_* */
+ if (mddev->new_level == 5 &&
+ ((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) ||
+ __within_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC, ALGORITHM_RIGHT_SYMMETRIC)))
+ return 0;
+
+ default:
+ break;
+ }
+
+ rs->ti->error = "takeover not possible";
+ return -EINVAL;
+}
+
+/* True if @rs requested to be taken over */
+static bool rs_takeover_requested(struct raid_set *rs)
+{
+ return rs->md.new_level != rs->md.level;
+}
+
+/* True if @rs is requested to reshape by ctr */
+static bool rs_reshape_requested(struct raid_set *rs)
+{
+ bool change;
+ struct mddev *mddev = &rs->md;
+
+ if (rs_takeover_requested(rs))
+ return false;
+
+ if (!mddev->level)
+ return false;
+
+ change = mddev->new_layout != mddev->layout ||
+ mddev->new_chunk_sectors != mddev->chunk_sectors ||
+ rs->delta_disks;
+
+ /* Historical case to support raid1 reshape without delta disks */
+ if (mddev->level == 1) {
+ if (rs->delta_disks)
+ return !!rs->delta_disks;
+
+ return !change &&
+ mddev->raid_disks != rs->raid_disks;
+ }
+
+ if (mddev->level == 10)
+ return change &&
+ !__is_raid10_far(mddev->new_layout) &&
+ rs->delta_disks >= 0;
+
+ return change;
+}
+
+/* Features */
+#define FEATURE_FLAG_SUPPORTS_V190 0x1 /* Supports extended superblock */
+
+/* State flags for sb->flags */
+#define SB_FLAG_RESHAPE_ACTIVE 0x1
+#define SB_FLAG_RESHAPE_BACKWARDS 0x2
+
+/*
* This structure is never routinely used by userspace, unlike md superblocks.
* Devices with this superblock should only ever be accessed via device-mapper.
*/
#define DM_RAID_MAGIC 0x64526D44
struct dm_raid_superblock {
__le32 magic; /* "DmRd" */
- __le32 features; /* Used to indicate possible future changes */
+ __le32 compat_features; /* Used to indicate compatible features (like 1.9.0 ondisk metadata extension) */
- __le32 num_devices; /* Number of devices in this array. (Max 64) */
- __le32 array_position; /* The position of this drive in the array */
+ __le32 num_devices; /* Number of devices in this raid set. (Max 64) */
+ __le32 array_position; /* The position of this drive in the raid set */
__le64 events; /* Incremented by md when superblock updated */
- __le64 failed_devices; /* Bit field of devices to indicate failures */
+ __le64 failed_devices; /* Pre 1.9.0 part of bit field of devices to */
+ /* indicate failures (see extension below) */
/*
* This offset tracks the progress of the repair or replacement of
@@ -770,21 +1764,95 @@ struct dm_raid_superblock {
__le64 disk_recovery_offset;
/*
- * This offset tracks the progress of the initial array
+ * This offset tracks the progress of the initial raid set
* synchronisation/parity calculation.
*/
__le64 array_resync_offset;
/*
- * RAID characteristics
+ * raid characteristics
*/
__le32 level;
__le32 layout;
__le32 stripe_sectors;
- /* Remainder of a logical block is zero-filled when writing (see super_sync()). */
+ /********************************************************************
+ * BELOW FOLLOW V1.9.0 EXTENSIONS TO THE PRISTINE SUPERBLOCK FORMAT!!!
+ *
+ * FEATURE_FLAG_SUPPORTS_V190 in the features member indicates that those exist
+ */
+
+ __le32 flags; /* Flags defining array states for reshaping */
+
+ /*
+ * This offset tracks the progress of a raid
+ * set reshape in order to be able to restart it
+ */
+ __le64 reshape_position;
+
+ /*
+ * These define the properties of the array in case of an interrupted reshape
+ */
+ __le32 new_level;
+ __le32 new_layout;
+ __le32 new_stripe_sectors;
+ __le32 delta_disks;
+
+ __le64 array_sectors; /* Array size in sectors */
+
+ /*
+ * Sector offsets to data on devices (reshaping).
+ * Needed to support out of place reshaping, thus
+ * not writing over any stripes whilst converting
+ * them from old to new layout
+ */
+ __le64 data_offset;
+ __le64 new_data_offset;
+
+ __le64 sectors; /* Used device size in sectors */
+
+ /*
+ * Additonal Bit field of devices indicating failures to support
+ * up to 256 devices with the 1.9.0 on-disk metadata format
+ */
+ __le64 extended_failed_devices[DISKS_ARRAY_ELEMS - 1];
+
+ __le32 incompat_features; /* Used to indicate any incompatible features */
+
+ /* Always set rest up to logical block size to 0 when writing (see get_metadata_device() below). */
} __packed;
+/*
+ * Check for reshape constraints on raid set @rs:
+ *
+ * - reshape function non-existent
+ * - degraded set
+ * - ongoing recovery
+ * - ongoing reshape
+ *
+ * Returns 0 if none or -EPERM if given constraint
+ * and error message reference in @errmsg
+ */
+static int rs_check_reshape(struct raid_set *rs)
+{
+ struct mddev *mddev = &rs->md;
+
+ if (!mddev->pers || !mddev->pers->check_reshape)
+ rs->ti->error = "Reshape not supported";
+ else if (mddev->degraded)
+ rs->ti->error = "Can't reshape degraded raid set";
+ else if (rs_is_recovering(rs))
+ rs->ti->error = "Convert request on recovering raid set prohibited";
+ else if (rs_is_reshaping(rs))
+ rs->ti->error = "raid set already reshaping!";
+ else if (!(rs_is_raid1(rs) || rs_is_raid10(rs) || rs_is_raid456(rs)))
+ rs->ti->error = "Reshaping only supported for raid1/4/5/6/10";
+ else
+ return 0;
+
+ return -EPERM;
+}
+
static int read_disk_sb(struct md_rdev *rdev, int size)
{
BUG_ON(!rdev->sb_page);
@@ -792,7 +1860,7 @@ static int read_disk_sb(struct md_rdev *rdev, int size)
if (rdev->sb_loaded)
return 0;
- if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, 1)) {
+ if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true)) {
DMERR("Failed to read superblock of device at position %d",
rdev->raid_disk);
md_error(rdev->mddev, rdev);
@@ -804,31 +1872,67 @@ static int read_disk_sb(struct md_rdev *rdev, int size)
return 0;
}
+static void sb_retrieve_failed_devices(struct dm_raid_superblock *sb, uint64_t *failed_devices)
+{
+ failed_devices[0] = le64_to_cpu(sb->failed_devices);
+ memset(failed_devices + 1, 0, sizeof(sb->extended_failed_devices));
+
+ if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190) {
+ int i = ARRAY_SIZE(sb->extended_failed_devices);
+
+ while (i--)
+ failed_devices[i+1] = le64_to_cpu(sb->extended_failed_devices[i]);
+ }
+}
+
+static void sb_update_failed_devices(struct dm_raid_superblock *sb, uint64_t *failed_devices)
+{
+ int i = ARRAY_SIZE(sb->extended_failed_devices);
+
+ sb->failed_devices = cpu_to_le64(failed_devices[0]);
+ while (i--)
+ sb->extended_failed_devices[i] = cpu_to_le64(failed_devices[i+1]);
+}
+
+/*
+ * Synchronize the superblock members with the raid set properties
+ *
+ * All superblock data is little endian.
+ */
static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
{
- int i;
- uint64_t failed_devices;
+ bool update_failed_devices = false;
+ unsigned int i;
+ uint64_t failed_devices[DISKS_ARRAY_ELEMS];
struct dm_raid_superblock *sb;
struct raid_set *rs = container_of(mddev, struct raid_set, md);
+ /* No metadata device, no superblock */
+ if (!rdev->meta_bdev)
+ return;
+
+ BUG_ON(!rdev->sb_page);
+
sb = page_address(rdev->sb_page);
- failed_devices = le64_to_cpu(sb->failed_devices);
- for (i = 0; i < mddev->raid_disks; i++)
- if (!rs->dev[i].data_dev ||
- test_bit(Faulty, &(rs->dev[i].rdev.flags)))
- failed_devices |= (1ULL << i);
+ sb_retrieve_failed_devices(sb, failed_devices);
- memset(sb + 1, 0, rdev->sb_size - sizeof(*sb));
+ for (i = 0; i < rs->raid_disks; i++)
+ if (!rs->dev[i].data_dev || test_bit(Faulty, &rs->dev[i].rdev.flags)) {
+ update_failed_devices = true;
+ set_bit(i, (void *) failed_devices);
+ }
+
+ if (update_failed_devices)
+ sb_update_failed_devices(sb, failed_devices);
sb->magic = cpu_to_le32(DM_RAID_MAGIC);
- sb->features = cpu_to_le32(0); /* No features yet */
+ sb->compat_features = cpu_to_le32(FEATURE_FLAG_SUPPORTS_V190);
sb->num_devices = cpu_to_le32(mddev->raid_disks);
sb->array_position = cpu_to_le32(rdev->raid_disk);
sb->events = cpu_to_le64(mddev->events);
- sb->failed_devices = cpu_to_le64(failed_devices);
sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset);
sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp);
@@ -836,6 +1940,34 @@ static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
sb->level = cpu_to_le32(mddev->level);
sb->layout = cpu_to_le32(mddev->layout);
sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors);
+
+ sb->new_level = cpu_to_le32(mddev->new_level);
+ sb->new_layout = cpu_to_le32(mddev->new_layout);
+ sb->new_stripe_sectors = cpu_to_le32(mddev->new_chunk_sectors);
+
+ sb->delta_disks = cpu_to_le32(mddev->delta_disks);
+
+ smp_rmb(); /* Make sure we access most recent reshape position */
+ sb->reshape_position = cpu_to_le64(mddev->reshape_position);
+ if (le64_to_cpu(sb->reshape_position) != MaxSector) {
+ /* Flag ongoing reshape */
+ sb->flags |= cpu_to_le32(SB_FLAG_RESHAPE_ACTIVE);
+
+ if (mddev->delta_disks < 0 || mddev->reshape_backwards)
+ sb->flags |= cpu_to_le32(SB_FLAG_RESHAPE_BACKWARDS);
+ } else {
+ /* Clear reshape flags */
+ sb->flags &= ~(cpu_to_le32(SB_FLAG_RESHAPE_ACTIVE|SB_FLAG_RESHAPE_BACKWARDS));
+ }
+
+ sb->array_sectors = cpu_to_le64(mddev->array_sectors);
+ sb->data_offset = cpu_to_le64(rdev->data_offset);
+ sb->new_data_offset = cpu_to_le64(rdev->new_data_offset);
+ sb->sectors = cpu_to_le64(rdev->sectors);
+ sb->incompat_features = cpu_to_le32(0);
+
+ /* Zero out the rest of the payload after the size of the superblock */
+ memset(sb + 1, 0, rdev->sb_size - sizeof(*sb));
}
/*
@@ -848,7 +1980,7 @@ static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
*/
static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
{
- int ret;
+ int r;
struct dm_raid_superblock *sb;
struct dm_raid_superblock *refsb;
uint64_t events_sb, events_refsb;
@@ -860,9 +1992,9 @@ static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
return -EINVAL;
}
- ret = read_disk_sb(rdev, rdev->sb_size);
- if (ret)
- return ret;
+ r = read_disk_sb(rdev, rdev->sb_size);
+ if (r)
+ return r;
sb = page_address(rdev->sb_page);
@@ -876,6 +2008,7 @@ static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
super_sync(rdev->mddev, rdev);
set_bit(FirstUse, &rdev->flags);
+ sb->compat_features = cpu_to_le32(FEATURE_FLAG_SUPPORTS_V190);
/* Force writing of superblocks to disk */
set_bit(MD_CHANGE_DEVS, &rdev->mddev->flags);
@@ -895,129 +2028,212 @@ static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
return (events_sb > events_refsb) ? 1 : 0;
}
-static int super_init_validation(struct mddev *mddev, struct md_rdev *rdev)
+static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
{
int role;
- struct raid_set *rs = container_of(mddev, struct raid_set, md);
+ unsigned int d;
+ struct mddev *mddev = &rs->md;
uint64_t events_sb;
- uint64_t failed_devices;
+ uint64_t failed_devices[DISKS_ARRAY_ELEMS];
struct dm_raid_superblock *sb;
- uint32_t new_devs = 0;
- uint32_t rebuilds = 0;
+ uint32_t new_devs = 0, rebuild_and_new = 0, rebuilds = 0;
struct md_rdev *r;
struct dm_raid_superblock *sb2;
sb = page_address(rdev->sb_page);
events_sb = le64_to_cpu(sb->events);
- failed_devices = le64_to_cpu(sb->failed_devices);
/*
* Initialise to 1 if this is a new superblock.
*/
mddev->events = events_sb ? : 1;
+ mddev->reshape_position = MaxSector;
+
/*
- * Reshaping is not currently allowed
+ * Reshaping is supported, e.g. reshape_position is valid
+ * in superblock and superblock content is authoritative.
*/
- if (le32_to_cpu(sb->level) != mddev->level) {
- DMERR("Reshaping arrays not yet supported. (RAID level change)");
- return -EINVAL;
- }
- if (le32_to_cpu(sb->layout) != mddev->layout) {
- DMERR("Reshaping arrays not yet supported. (RAID layout change)");
- DMERR(" 0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout);
- DMERR(" Old layout: %s w/ %d copies",
- raid10_md_layout_to_format(le32_to_cpu(sb->layout)),
- raid10_md_layout_to_copies(le32_to_cpu(sb->layout)));
- DMERR(" New layout: %s w/ %d copies",
- raid10_md_layout_to_format(mddev->layout),
- raid10_md_layout_to_copies(mddev->layout));
- return -EINVAL;
- }
- if (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors) {
- DMERR("Reshaping arrays not yet supported. (stripe sectors change)");
- return -EINVAL;
- }
+ if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190) {
+ /* Superblock is authoritative wrt given raid set layout! */
+ mddev->raid_disks = le32_to_cpu(sb->num_devices);
+ mddev->level = le32_to_cpu(sb->level);
+ mddev->layout = le32_to_cpu(sb->layout);
+ mddev->chunk_sectors = le32_to_cpu(sb->stripe_sectors);
+ mddev->new_level = le32_to_cpu(sb->new_level);
+ mddev->new_layout = le32_to_cpu(sb->new_layout);
+ mddev->new_chunk_sectors = le32_to_cpu(sb->new_stripe_sectors);
+ mddev->delta_disks = le32_to_cpu(sb->delta_disks);
+ mddev->array_sectors = le64_to_cpu(sb->array_sectors);
+
+ /* raid was reshaping and got interrupted */
+ if (le32_to_cpu(sb->flags) & SB_FLAG_RESHAPE_ACTIVE) {
+ if (test_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) {
+ DMERR("Reshape requested but raid set is still reshaping");
+ return -EINVAL;
+ }
- /* We can only change the number of devices in RAID1 right now */
- if ((rs->raid_type->level != 1) &&
- (le32_to_cpu(sb->num_devices) != mddev->raid_disks)) {
- DMERR("Reshaping arrays not yet supported. (device count change)");
- return -EINVAL;
+ if (mddev->delta_disks < 0 ||
+ (!mddev->delta_disks && (le32_to_cpu(sb->flags) & SB_FLAG_RESHAPE_BACKWARDS)))
+ mddev->reshape_backwards = 1;
+ else
+ mddev->reshape_backwards = 0;
+
+ mddev->reshape_position = le64_to_cpu(sb->reshape_position);
+ rs->raid_type = get_raid_type_by_ll(mddev->level, mddev->layout);
+ }
+
+ } else {
+ /*
+ * No takeover/reshaping, because we don't have the extended v1.9.0 metadata
+ */
+ if (le32_to_cpu(sb->level) != mddev->level) {
+ DMERR("Reshaping/takeover raid sets not yet supported. (raid level/stripes/size change)");
+ return -EINVAL;
+ }
+ if (le32_to_cpu(sb->layout) != mddev->layout) {
+ DMERR("Reshaping raid sets not yet supported. (raid layout change)");
+ DMERR(" 0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout);
+ DMERR(" Old layout: %s w/ %d copies",
+ raid10_md_layout_to_format(le32_to_cpu(sb->layout)),
+ raid10_md_layout_to_copies(le32_to_cpu(sb->layout)));
+ DMERR(" New layout: %s w/ %d copies",
+ raid10_md_layout_to_format(mddev->layout),
+ raid10_md_layout_to_copies(mddev->layout));
+ return -EINVAL;
+ }
+ if (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors) {
+ DMERR("Reshaping raid sets not yet supported. (stripe sectors change)");
+ return -EINVAL;
+ }
+
+ /* We can only change the number of devices in raid1 with old (i.e. pre 1.0.7) metadata */
+ if (!rt_is_raid1(rs->raid_type) &&
+ (le32_to_cpu(sb->num_devices) != mddev->raid_disks)) {
+ DMERR("Reshaping raid sets not yet supported. (device count change from %u to %u)",
+ sb->num_devices, mddev->raid_disks);
+ return -EINVAL;
+ }
+
+ /* Table line is checked vs. authoritative superblock */
+ rs_set_new(rs);
}
- if (!(rs->ctr_flags & (CTR_FLAG_SYNC | CTR_FLAG_NOSYNC)))
+ if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))
mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset);
/*
* During load, we set FirstUse if a new superblock was written.
* There are two reasons we might not have a superblock:
- * 1) The array is brand new - in which case, all of the
- * devices must have their In_sync bit set. Also,
+ * 1) The raid set is brand new - in which case, all of the
+ * devices must have their In_sync bit set. Also,
* recovery_cp must be 0, unless forced.
- * 2) This is a new device being added to an old array
+ * 2) This is a new device being added to an old raid set
* and the new device needs to be rebuilt - in which
* case the In_sync bit will /not/ be set and
* recovery_cp must be MaxSector.
+ * 3) This is/are a new device(s) being added to an old
+ * raid set during takeover to a higher raid level
+ * to provide capacity for redundancy or during reshape
+ * to add capacity to grow the raid set.
*/
+ d = 0;
rdev_for_each(r, mddev) {
+ if (test_bit(FirstUse, &r->flags))
+ new_devs++;
+
if (!test_bit(In_sync, &r->flags)) {
- DMINFO("Device %d specified for rebuild: "
- "Clearing superblock", r->raid_disk);
+ DMINFO("Device %d specified for rebuild; clearing superblock",
+ r->raid_disk);
rebuilds++;
- } else if (test_bit(FirstUse, &r->flags))
- new_devs++;
+
+ if (test_bit(FirstUse, &r->flags))
+ rebuild_and_new++;
+ }
+
+ d++;
}
- if (!rebuilds) {
- if (new_devs == mddev->raid_disks) {
- DMINFO("Superblocks created for new array");
+ if (new_devs == rs->raid_disks || !rebuilds) {
+ /* Replace a broken device */
+ if (new_devs == 1 && !rs->delta_disks)
+ ;
+ if (new_devs == rs->raid_disks) {
+ DMINFO("Superblocks created for new raid set");
set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
- } else if (new_devs) {
- DMERR("New device injected "
- "into existing array without 'rebuild' "
- "parameter specified");
+ } else if (new_devs != rebuilds &&
+ new_devs != rs->delta_disks) {
+ DMERR("New device injected into existing raid set without "
+ "'delta_disks' or 'rebuild' parameter specified");
return -EINVAL;
}
- } else if (new_devs) {
- DMERR("'rebuild' devices cannot be "
- "injected into an array with other first-time devices");
- return -EINVAL;
- } else if (mddev->recovery_cp != MaxSector) {
- DMERR("'rebuild' specified while array is not in-sync");
+ } else if (new_devs && new_devs != rebuilds) {
+ DMERR("%u 'rebuild' devices cannot be injected into"
+ " a raid set with %u other first-time devices",
+ rebuilds, new_devs);
return -EINVAL;
+ } else if (rebuilds) {
+ if (rebuild_and_new && rebuilds != rebuild_and_new) {
+ DMERR("new device%s provided without 'rebuild'",
+ new_devs > 1 ? "s" : "");
+ return -EINVAL;
+ } else if (rs_is_recovering(rs)) {
+ DMERR("'rebuild' specified while raid set is not in-sync (recovery_cp=%llu)",
+ (unsigned long long) mddev->recovery_cp);
+ return -EINVAL;
+ } else if (rs_is_reshaping(rs)) {
+ DMERR("'rebuild' specified while raid set is being reshaped (reshape_position=%llu)",
+ (unsigned long long) mddev->reshape_position);
+ return -EINVAL;
+ }
}
/*
* Now we set the Faulty bit for those devices that are
* recorded in the superblock as failed.
*/
+ sb_retrieve_failed_devices(sb, failed_devices);
rdev_for_each(r, mddev) {
if (!r->sb_page)
continue;
sb2 = page_address(r->sb_page);
sb2->failed_devices = 0;
+ memset(sb2->extended_failed_devices, 0, sizeof(sb2->extended_failed_devices));
/*
* Check for any device re-ordering.
*/
if (!test_bit(FirstUse, &r->flags) && (r->raid_disk >= 0)) {
role = le32_to_cpu(sb2->array_position);
+ if (role < 0)
+ continue;
+
if (role != r->raid_disk) {
- if (rs->raid_type->level != 1) {
- rs->ti->error = "Cannot change device "
- "positions in RAID array";
+ if (__is_raid10_near(mddev->layout)) {
+ if (mddev->raid_disks % __raid10_near_copies(mddev->layout) ||
+ rs->raid_disks % rs->raid10_copies) {
+ rs->ti->error =
+ "Cannot change raid10 near set to odd # of devices!";
+ return -EINVAL;
+ }
+
+ sb2->array_position = cpu_to_le32(r->raid_disk);
+
+ } else if (!(rs_is_raid10(rs) && rt_is_raid0(rs->raid_type)) &&
+ !(rs_is_raid0(rs) && rt_is_raid10(rs->raid_type)) &&
+ !rt_is_raid1(rs->raid_type)) {
+ rs->ti->error = "Cannot change device positions in raid set";
return -EINVAL;
}
- DMINFO("RAID1 device #%d now at position #%d",
- role, r->raid_disk);
+
+ DMINFO("raid device #%d now at position #%d", role, r->raid_disk);
}
/*
* Partial recovery is performed on
* returning failed devices.
*/
- if (failed_devices & (1 << role))
+ if (test_bit(role, (void *) failed_devices))
set_bit(Faulty, &r->flags);
}
}
@@ -1028,41 +2244,60 @@ static int super_init_validation(struct mddev *mddev, struct md_rdev *rdev)
static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
{
struct mddev *mddev = &rs->md;
- struct dm_raid_superblock *sb = page_address(rdev->sb_page);
+ struct dm_raid_superblock *sb;
+
+ if (rs_is_raid0(rs) || !rdev->sb_page)
+ return 0;
+
+ sb = page_address(rdev->sb_page);
/*
* If mddev->events is not set, we know we have not yet initialized
* the array.
*/
- if (!mddev->events && super_init_validation(mddev, rdev))
+ if (!mddev->events && super_init_validation(rs, rdev))
return -EINVAL;
- if (le32_to_cpu(sb->features)) {
- rs->ti->error = "Unable to assemble array: No feature flags supported yet";
+ if (le32_to_cpu(sb->compat_features) != FEATURE_FLAG_SUPPORTS_V190) {
+ rs->ti->error = "Unable to assemble array: Unknown flag(s) in compatible feature flags";
+ return -EINVAL;
+ }
+
+ if (sb->incompat_features) {
+ rs->ti->error = "Unable to assemble array: No incompatible feature flags supported yet";
return -EINVAL;
}
/* Enable bitmap creation for RAID levels != 0 */
- mddev->bitmap_info.offset = (rs->raid_type->level) ? to_sector(4096) : 0;
+ mddev->bitmap_info.offset = rt_is_raid0(rs->raid_type) ? 0 : to_sector(4096);
rdev->mddev->bitmap_info.default_offset = mddev->bitmap_info.offset;
- if (!test_bit(FirstUse, &rdev->flags)) {
+ if (!test_and_clear_bit(FirstUse, &rdev->flags)) {
+ /* Retrieve device size stored in superblock to be prepared for shrink */
+ rdev->sectors = le64_to_cpu(sb->sectors);
rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset);
- if (rdev->recovery_offset != MaxSector)
- clear_bit(In_sync, &rdev->flags);
+ if (rdev->recovery_offset == MaxSector)
+ set_bit(In_sync, &rdev->flags);
+ /*
+ * If no reshape in progress -> we're recovering single
+ * disk(s) and have to set the device(s) to out-of-sync
+ */
+ else if (!rs_is_reshaping(rs))
+ clear_bit(In_sync, &rdev->flags); /* Mandatory for recovery */
}
/*
* If a device comes back, set it as not In_sync and no longer faulty.
*/
- if (test_bit(Faulty, &rdev->flags)) {
- clear_bit(Faulty, &rdev->flags);
+ if (test_and_clear_bit(Faulty, &rdev->flags)) {
+ rdev->recovery_offset = 0;
clear_bit(In_sync, &rdev->flags);
rdev->saved_raid_disk = rdev->raid_disk;
- rdev->recovery_offset = 0;
}
- clear_bit(FirstUse, &rdev->flags);
+ /* Reshape support -> restore repective data offsets */
+ rdev->data_offset = le64_to_cpu(sb->data_offset);
+ rdev->new_data_offset = le64_to_cpu(sb->new_data_offset);
return 0;
}
@@ -1072,7 +2307,7 @@ static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
*/
static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
{
- int ret;
+ int r;
struct raid_dev *dev;
struct md_rdev *rdev, *tmp, *freshest;
struct mddev *mddev = &rs->md;
@@ -1082,30 +2317,35 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
/*
* Skipping super_load due to CTR_FLAG_SYNC will cause
* the array to undergo initialization again as
- * though it were new. This is the intended effect
+ * though it were new. This is the intended effect
* of the "sync" directive.
*
* When reshaping capability is added, we must ensure
* that the "sync" directive is disallowed during the
* reshape.
*/
- rdev->sectors = to_sector(i_size_read(rdev->bdev->bd_inode));
-
- if (rs->ctr_flags & CTR_FLAG_SYNC)
+ if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags))
continue;
if (!rdev->meta_bdev)
continue;
- ret = super_load(rdev, freshest);
+ r = super_load(rdev, freshest);
- switch (ret) {
+ switch (r) {
case 1:
freshest = rdev;
break;
case 0:
break;
default:
+ /*
+ * We have to keep any raid0 data/metadata device pairs or
+ * the MD raid0 personality will fail to start the array.
+ */
+ if (rs_is_raid0(rs))
+ continue;
+
dev = container_of(rdev, struct raid_dev, rdev);
if (dev->meta_dev)
dm_put_device(ti, dev->meta_dev);
@@ -1148,25 +2388,334 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
* Validation of the freshest device provides the source of
* validation for the remaining devices.
*/
- ti->error = "Unable to assemble array: Invalid superblocks";
+ rs->ti->error = "Unable to assemble array: Invalid superblocks";
if (super_validate(rs, freshest))
return -EINVAL;
rdev_for_each(rdev, mddev)
if ((rdev != freshest) && super_validate(rs, rdev))
return -EINVAL;
+ return 0;
+}
+
+/*
+ * Adjust data_offset and new_data_offset on all disk members of @rs
+ * for out of place reshaping if requested by contructor
+ *
+ * We need free space at the beginning of each raid disk for forward
+ * and at the end for backward reshapes which userspace has to provide
+ * via remapping/reordering of space.
+ */
+static int rs_adjust_data_offsets(struct raid_set *rs)
+{
+ sector_t data_offset = 0, new_data_offset = 0;
+ struct md_rdev *rdev;
+
+ /* Constructor did not request data offset change */
+ if (!test_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) {
+ if (!rs_is_reshapable(rs))
+ goto out;
+
+ return 0;
+ }
+
+ /* HM FIXME: get InSync raid_dev? */
+ rdev = &rs->dev[0].rdev;
+
+ if (rs->delta_disks < 0) {
+ /*
+ * Removing disks (reshaping backwards):
+ *
+ * - before reshape: data is at offset 0 and free space
+ * is at end of each component LV
+ *
+ * - after reshape: data is at offset rs->data_offset != 0 on each component LV
+ */
+ data_offset = 0;
+ new_data_offset = rs->data_offset;
+
+ } else if (rs->delta_disks > 0) {
+ /*
+ * Adding disks (reshaping forwards):
+ *
+ * - before reshape: data is at offset rs->data_offset != 0 and
+ * free space is at begin of each component LV
+ *
+ * - after reshape: data is at offset 0 on each component LV
+ */
+ data_offset = rs->data_offset;
+ new_data_offset = 0;
+
+ } else {
+ /*
+ * User space passes in 0 for data offset after having removed reshape space
+ *
+ * - or - (data offset != 0)
+ *
+ * Changing RAID layout or chunk size -> toggle offsets
+ *
+ * - before reshape: data is at offset rs->data_offset 0 and
+ * free space is at end of each component LV
+ * -or-
+ * data is at offset rs->data_offset != 0 and
+ * free space is at begin of each component LV
+ *
+ * - after reshape: data is at offset 0 if it was at offset != 0
+ * or at offset != 0 if it was at offset 0
+ * on each component LV
+ *
+ */
+ data_offset = rs->data_offset ? rdev->data_offset : 0;
+ new_data_offset = data_offset ? 0 : rs->data_offset;
+ set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
+ }
+
+ /*
+ * Make sure we got a minimum amount of free sectors per device
+ */
+ if (rs->data_offset &&
+ to_sector(i_size_read(rdev->bdev->bd_inode)) - rdev->sectors < MIN_FREE_RESHAPE_SPACE) {
+ rs->ti->error = data_offset ? "No space for forward reshape" :
+ "No space for backward reshape";
+ return -ENOSPC;
+ }
+out:
+ /* Adjust data offsets on all rdevs */
+ rdev_for_each(rdev, &rs->md) {
+ rdev->data_offset = data_offset;
+ rdev->new_data_offset = new_data_offset;
+ }
+
+ return 0;
+}
+
+/* Userpace reordered disks -> adjust raid_disk indexes in @rs */
+static void __reorder_raid_disk_indexes(struct raid_set *rs)
+{
+ int i = 0;
+ struct md_rdev *rdev;
+
+ rdev_for_each(rdev, &rs->md) {
+ rdev->raid_disk = i++;
+ rdev->saved_raid_disk = rdev->new_raid_disk = -1;
+ }
+}
+
+/*
+ * Setup @rs for takeover by a different raid level
+ */
+static int rs_setup_takeover(struct raid_set *rs)
+{
+ struct mddev *mddev = &rs->md;
+ struct md_rdev *rdev;
+ unsigned int d = mddev->raid_disks = rs->raid_disks;
+ sector_t new_data_offset = rs->dev[0].rdev.data_offset ? 0 : rs->data_offset;
+
+ if (rt_is_raid10(rs->raid_type)) {
+ if (mddev->level == 0) {
+ /* Userpace reordered disks -> adjust raid_disk indexes */
+ __reorder_raid_disk_indexes(rs);
+
+ /* raid0 -> raid10_far layout */
+ mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_FAR,
+ rs->raid10_copies);
+ } else if (mddev->level == 1)
+ /* raid1 -> raid10_near layout */
+ mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR,
+ rs->raid_disks);
+ else
+ return -EINVAL;
+
+ }
+
+ clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
+ mddev->recovery_cp = MaxSector;
+
+ while (d--) {
+ rdev = &rs->dev[d].rdev;
+
+ if (test_bit(d, (void *) rs->rebuild_disks)) {
+ clear_bit(In_sync, &rdev->flags);
+ clear_bit(Faulty, &rdev->flags);
+ mddev->recovery_cp = rdev->recovery_offset = 0;
+ /* Bitmap has to be created when we do an "up" takeover */
+ set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
+ }
+
+ rdev->new_data_offset = new_data_offset;
+ }
+
+ return 0;
+}
+
+/* Prepare @rs for reshape */
+static int rs_prepare_reshape(struct raid_set *rs)
+{
+ bool reshape;
+ struct mddev *mddev = &rs->md;
+
+ if (rs_is_raid10(rs)) {
+ if (rs->raid_disks != mddev->raid_disks &&
+ __is_raid10_near(mddev->layout) &&
+ rs->raid10_copies &&
+ rs->raid10_copies != __raid10_near_copies(mddev->layout)) {
+ /*
+ * raid disk have to be multiple of data copies to allow this conversion,
+ *
+ * This is actually not a reshape it is a
+ * rebuild of any additional mirrors per group
+ */
+ if (rs->raid_disks % rs->raid10_copies) {
+ rs->ti->error = "Can't reshape raid10 mirror groups";
+ return -EINVAL;
+ }
+
+ /* Userpace reordered disks to add/remove mirrors -> adjust raid_disk indexes */
+ __reorder_raid_disk_indexes(rs);
+ mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR,
+ rs->raid10_copies);
+ mddev->new_layout = mddev->layout;
+ reshape = false;
+ } else
+ reshape = true;
+
+ } else if (rs_is_raid456(rs))
+ reshape = true;
+
+ else if (rs_is_raid1(rs)) {
+ if (rs->delta_disks) {
+ /* Process raid1 via delta_disks */
+ mddev->degraded = rs->delta_disks < 0 ? -rs->delta_disks : rs->delta_disks;
+ reshape = true;
+ } else {
+ /* Process raid1 without delta_disks */
+ mddev->raid_disks = rs->raid_disks;
+ reshape = false;
+ }
+ } else {
+ rs->ti->error = "Called with bogus raid type";
+ return -EINVAL;
+ }
+
+ if (reshape) {
+ set_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags);
+ set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
+ } else if (mddev->raid_disks < rs->raid_disks)
+ /* Create new superblocks and bitmaps, if any new disks */
+ set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
return 0;
}
/*
+ *
+ * - change raid layout
+ * - change chunk size
+ * - add disks
+ * - remove disks
+ */
+static int rs_setup_reshape(struct raid_set *rs)
+{
+ int r = 0;
+ unsigned int cur_raid_devs, d;
+ struct mddev *mddev = &rs->md;
+ struct md_rdev *rdev;
+
+ mddev->delta_disks = rs->delta_disks;
+ cur_raid_devs = mddev->raid_disks;
+
+ /* Ignore impossible layout change whilst adding/removing disks */
+ if (mddev->delta_disks &&
+ mddev->layout != mddev->new_layout) {
+ DMINFO("Ignoring invalid layout change with delta_disks=%d", rs->delta_disks);
+ mddev->new_layout = mddev->layout;
+ }
+
+ /*
+ * Adjust array size:
+ *
+ * - in case of adding disks, array size has
+ * to grow after the disk adding reshape,
+ * which'll hapen in the event handler;
+ * reshape will happen forward, so space has to
+ * be available at the beginning of each disk
+ *
+ * - in case of removing disks, array size
+ * has to shrink before starting the reshape,
+ * which'll happen here;
+ * reshape will happen backward, so space has to
+ * be available at the end of each disk
+ *
+ * - data_offset and new_data_offset are
+ * adjusted for aforementioned out of place
+ * reshaping based on userspace passing in
+ * the "data_offset <sectors>" key/value
+ * pair via the constructor
+ */
+
+ /* Add disk(s) */
+ if (rs->delta_disks > 0) {
+ /* Prepare disks for check in raid4/5/6/10 {check|start}_reshape */
+ for (d = cur_raid_devs; d < rs->raid_disks; d++) {
+ rdev = &rs->dev[d].rdev;
+ clear_bit(In_sync, &rdev->flags);
+
+ /*
+ * save_raid_disk needs to be -1, or recovery_offset will be set to 0
+ * by md, which'll store that erroneously in the superblock on reshape
+ */
+ rdev->saved_raid_disk = -1;
+ rdev->raid_disk = d;
+
+ rdev->sectors = mddev->dev_sectors;
+ rdev->recovery_offset = rs_is_raid1(rs) ? 0 : MaxSector;
+ }
+
+ mddev->reshape_backwards = 0; /* adding disks -> forward reshape */
+
+ /* Remove disk(s) */
+ } else if (rs->delta_disks < 0) {
+ r = rs_set_dev_and_array_sectors(rs, true);
+ mddev->reshape_backwards = 1; /* removing disk(s) -> backward reshape */
+
+ /* Change layout and/or chunk size */
+ } else {
+ /*
+ * Reshape layout (e.g. raid5_ls -> raid5_n) and/or chunk size:
+ *
+ * keeping number of disks and do layout change ->
+ *
+ * toggle reshape_backward depending on data_offset:
+ *
+ * - free space upfront -> reshape forward
+ *
+ * - free space at the end -> reshape backward
+ *
+ *
+ * This utilizes free reshape space avoiding the need
+ * for userspace to move (parts of) LV segments in
+ * case of layout/chunksize change (for disk
+ * adding/removing reshape space has to be at
+ * the proper address (see above with delta_disks):
+ *
+ * add disk(s) -> begin
+ * remove disk(s)-> end
+ */
+ mddev->reshape_backwards = rs->dev[0].rdev.data_offset ? 0 : 1;
+ }
+
+ return r;
+}
+
+/*
* Enable/disable discard support on RAID set depending on
* RAID level and discard properties of underlying RAID members.
*/
-static void configure_discard_support(struct dm_target *ti, struct raid_set *rs)
+static void configure_discard_support(struct raid_set *rs)
{
int i;
bool raid456;
+ struct dm_target *ti = rs->ti;
/* Assume discards not supported until after checks below. */
ti->discards_supported = false;
@@ -1174,7 +2723,7 @@ static void configure_discard_support(struct dm_target *ti, struct raid_set *rs)
/* RAID level 4,5,6 require discard_zeroes_data for data integrity! */
raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6);
- for (i = 0; i < rs->md.raid_disks; i++) {
+ for (i = 0; i < rs->raid_disks; i++) {
struct request_queue *q;
if (!rs->dev[i].rdev.bdev)
@@ -1207,118 +2756,251 @@ static void configure_discard_support(struct dm_target *ti, struct raid_set *rs)
}
/*
- * Construct a RAID4/5/6 mapping:
+ * Construct a RAID0/1/10/4/5/6 mapping:
* Args:
- * <raid_type> <#raid_params> <raid_params> \
- * <#raid_devs> { <meta_dev1> <dev1> .. <meta_devN> <devN> }
+ * <raid_type> <#raid_params> <raid_params>{0,} \
+ * <#raid_devs> [<meta_dev1> <dev1>]{1,}
*
- * <raid_params> varies by <raid_type>. See 'parse_raid_params' for
+ * <raid_params> varies by <raid_type>. See 'parse_raid_params' for
* details on possible <raid_params>.
+ *
+ * Userspace is free to initialize the metadata devices, hence the superblocks to
+ * enforce recreation based on the passed in table parameters.
+ *
*/
-static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
+static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
- int ret;
+ int r;
+ bool resize;
struct raid_type *rt;
- unsigned long num_raid_params, num_raid_devs;
+ unsigned int num_raid_params, num_raid_devs;
+ sector_t calculated_dev_sectors;
struct raid_set *rs = NULL;
-
- /* Must have at least <raid_type> <#raid_params> */
- if (argc < 2) {
- ti->error = "Too few arguments";
+ const char *arg;
+ struct rs_layout rs_layout;
+ struct dm_arg_set as = { argc, argv }, as_nrd;
+ struct dm_arg _args[] = {
+ { 0, as.argc, "Cannot understand number of raid parameters" },
+ { 1, 254, "Cannot understand number of raid devices parameters" }
+ };
+
+ /* Must have <raid_type> */
+ arg = dm_shift_arg(&as);
+ if (!arg) {
+ ti->error = "No arguments";
return -EINVAL;
}
- /* raid type */
- rt = get_raid_type(argv[0]);
+ rt = get_raid_type(arg);
if (!rt) {
ti->error = "Unrecognised raid_type";
return -EINVAL;
}
- argc--;
- argv++;
-
- /* number of RAID parameters */
- if (kstrtoul(argv[0], 10, &num_raid_params) < 0) {
- ti->error = "Cannot understand number of RAID parameters";
- return -EINVAL;
- }
- argc--;
- argv++;
- /* Skip over RAID params for now and find out # of devices */
- if (num_raid_params >= argc) {
- ti->error = "Arguments do not agree with counts given";
+ /* Must have <#raid_params> */
+ if (dm_read_arg_group(_args, &as, &num_raid_params, &ti->error))
return -EINVAL;
- }
- if ((kstrtoul(argv[num_raid_params], 10, &num_raid_devs) < 0) ||
- (num_raid_devs > MAX_RAID_DEVICES)) {
- ti->error = "Cannot understand number of raid devices";
+ /* number of raid device tupples <meta_dev data_dev> */
+ as_nrd = as;
+ dm_consume_args(&as_nrd, num_raid_params);
+ _args[1].max = (as_nrd.argc - 1) / 2;
+ if (dm_read_arg(_args + 1, &as_nrd, &num_raid_devs, &ti->error))
return -EINVAL;
- }
- argc -= num_raid_params + 1; /* +1: we already have num_raid_devs */
- if (argc != (num_raid_devs * 2)) {
- ti->error = "Supplied RAID devices does not match the count given";
+ if (!__within_range(num_raid_devs, 1, MAX_RAID_DEVICES)) {
+ ti->error = "Invalid number of supplied raid devices";
return -EINVAL;
}
- rs = context_alloc(ti, rt, (unsigned)num_raid_devs);
+ rs = raid_set_alloc(ti, rt, num_raid_devs);
if (IS_ERR(rs))
return PTR_ERR(rs);
- ret = parse_raid_params(rs, argv, (unsigned)num_raid_params);
- if (ret)
+ r = parse_raid_params(rs, &as, num_raid_params);
+ if (r)
goto bad;
- argv += num_raid_params + 1;
-
- ret = dev_parms(rs, argv);
- if (ret)
+ r = parse_dev_params(rs, &as);
+ if (r)
goto bad;
rs->md.sync_super = super_sync;
- ret = analyse_superblocks(ti, rs);
- if (ret)
+
+ /*
+ * Calculate ctr requested array and device sizes to allow
+ * for superblock analysis needing device sizes defined.
+ *
+ * Any existing superblock will overwrite the array and device sizes
+ */
+ r = rs_set_dev_and_array_sectors(rs, false);
+ if (r)
+ goto bad;
+
+ calculated_dev_sectors = rs->dev[0].rdev.sectors;
+
+ /*
+ * Backup any new raid set level, layout, ...
+ * requested to be able to compare to superblock
+ * members for conversion decisions.
+ */
+ rs_config_backup(rs, &rs_layout);
+
+ r = analyse_superblocks(ti, rs);
+ if (r)
goto bad;
+ resize = calculated_dev_sectors != rs->dev[0].rdev.sectors;
+
INIT_WORK(&rs->md.event_work, do_table_event);
ti->private = rs;
ti->num_flush_bios = 1;
+ /* Restore any requested new layout for conversion decision */
+ rs_config_restore(rs, &rs_layout);
+
/*
- * Disable/enable discard support on RAID set.
+ * Now that we have any superblock metadata available,
+ * check for new, recovering, reshaping, to be taken over,
+ * to be reshaped or an existing, unchanged raid set to
+ * run in sequence.
*/
- configure_discard_support(ti, rs);
+ if (test_bit(MD_ARRAY_FIRST_USE, &rs->md.flags)) {
+ /* A new raid6 set has to be recovered to ensure proper parity and Q-Syndrome */
+ if (rs_is_raid6(rs) &&
+ test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) {
+ ti->error = "'nosync' not allowed for new raid6 set";
+ r = -EINVAL;
+ goto bad;
+ }
+ rs_setup_recovery(rs, 0);
+ set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
+ rs_set_new(rs);
+ } else if (rs_is_recovering(rs)) {
+ /* A recovering raid set may be resized */
+ ; /* skip setup rs */
+ } else if (rs_is_reshaping(rs)) {
+ /* Have to reject size change request during reshape */
+ if (resize) {
+ ti->error = "Can't resize a reshaping raid set";
+ r = -EPERM;
+ goto bad;
+ }
+ /* skip setup rs */
+ } else if (rs_takeover_requested(rs)) {
+ if (rs_is_reshaping(rs)) {
+ ti->error = "Can't takeover a reshaping raid set";
+ r = -EPERM;
+ goto bad;
+ }
+
+ /*
+ * If a takeover is needed, userspace sets any additional
+ * devices to rebuild and we can check for a valid request here.
+ *
+ * If acceptible, set the level to the new requested
+ * one, prohibit requesting recovery, allow the raid
+ * set to run and store superblocks during resume.
+ */
+ r = rs_check_takeover(rs);
+ if (r)
+ goto bad;
+
+ r = rs_setup_takeover(rs);
+ if (r)
+ goto bad;
+
+ set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
+ /* Takeover ain't recovery, so disable recovery */
+ rs_setup_recovery(rs, MaxSector);
+ rs_set_new(rs);
+ } else if (rs_reshape_requested(rs)) {
+ /*
+ * We can only prepare for a reshape here, because the
+ * raid set needs to run to provide the repective reshape
+ * check functions via its MD personality instance.
+ *
+ * So do the reshape check after md_run() succeeded.
+ */
+ r = rs_prepare_reshape(rs);
+ if (r)
+ return r;
+
+ /* Reshaping ain't recovery, so disable recovery */
+ rs_setup_recovery(rs, MaxSector);
+ rs_set_cur(rs);
+ } else {
+ /* May not set recovery when a device rebuild is requested */
+ if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
+ rs_setup_recovery(rs, MaxSector);
+ set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
+ } else
+ rs_setup_recovery(rs, test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags) ?
+ 0 : (resize ? calculated_dev_sectors : MaxSector));
+ rs_set_cur(rs);
+ }
+
+ /* If constructor requested it, change data and new_data offsets */
+ r = rs_adjust_data_offsets(rs);
+ if (r)
+ goto bad;
+
+ /* Start raid set read-only and assumed clean to change in raid_resume() */
+ rs->md.ro = 1;
+ rs->md.in_sync = 1;
+ set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
/* Has to be held on running the array */
mddev_lock_nointr(&rs->md);
- ret = md_run(&rs->md);
+ r = md_run(&rs->md);
rs->md.in_sync = 0; /* Assume already marked dirty */
- mddev_unlock(&rs->md);
- if (ret) {
- ti->error = "Fail to run raid array";
+ if (r) {
+ ti->error = "Failed to run raid array";
+ mddev_unlock(&rs->md);
goto bad;
}
- if (ti->len != rs->md.array_sectors) {
- ti->error = "Array size does not match requested target length";
- ret = -EINVAL;
- goto size_mismatch;
- }
rs->callbacks.congested_fn = raid_is_congested;
dm_table_add_target_callbacks(ti->table, &rs->callbacks);
mddev_suspend(&rs->md);
+
+ /* Try to adjust the raid4/5/6 stripe cache size to the stripe size */
+ if (rs_is_raid456(rs)) {
+ r = rs_set_raid456_stripe_cache(rs);
+ if (r)
+ goto bad_stripe_cache;
+ }
+
+ /* Now do an early reshape check */
+ if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) {
+ r = rs_check_reshape(rs);
+ if (r)
+ goto bad_check_reshape;
+
+ /* Restore new, ctr requested layout to perform check */
+ rs_config_restore(rs, &rs_layout);
+
+ if (rs->md.pers->start_reshape) {
+ r = rs->md.pers->check_reshape(&rs->md);
+ if (r) {
+ ti->error = "Reshape check failed";
+ goto bad_check_reshape;
+ }
+ }
+ }
+
+ mddev_unlock(&rs->md);
return 0;
-size_mismatch:
+bad_stripe_cache:
+bad_check_reshape:
md_stop(&rs->md);
bad:
- context_free(rs);
+ raid_set_free(rs);
- return ret;
+ return r;
}
static void raid_dtr(struct dm_target *ti)
@@ -1327,7 +3009,7 @@ static void raid_dtr(struct dm_target *ti)
list_del_init(&rs->callbacks.list);
md_stop(&rs->md);
- context_free(rs);
+ raid_set_free(rs);
}
static int raid_map(struct dm_target *ti, struct bio *bio)
@@ -1335,11 +3017,23 @@ static int raid_map(struct dm_target *ti, struct bio *bio)
struct raid_set *rs = ti->private;
struct mddev *mddev = &rs->md;
+ /*
+ * If we're reshaping to add disk(s)), ti->len and
+ * mddev->array_sectors will differ during the process
+ * (ti->len > mddev->array_sectors), so we have to requeue
+ * bios with addresses > mddev->array_sectors here or
+ * there will occur accesses past EOD of the component
+ * data images thus erroring the raid set.
+ */
+ if (unlikely(bio_end_sector(bio) > mddev->array_sectors))
+ return DM_MAPIO_REQUEUE;
+
mddev->pers->make_request(mddev, bio);
return DM_MAPIO_SUBMITTED;
}
+/* Return string describing the current sync action of @mddev */
static const char *decipher_sync_action(struct mddev *mddev)
{
if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
@@ -1365,195 +3059,260 @@ static const char *decipher_sync_action(struct mddev *mddev)
return "idle";
}
-static void raid_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+/*
+ * Return status string @rdev
+ *
+ * Status characters:
+ *
+ * 'D' = Dead/Failed device
+ * 'a' = Alive but not in-sync
+ * 'A' = Alive and in-sync
+ */
+static const char *__raid_dev_status(struct md_rdev *rdev, bool array_in_sync)
{
- struct raid_set *rs = ti->private;
- unsigned raid_param_cnt = 1; /* at least 1 for chunksize */
- unsigned sz = 0;
- int i, array_in_sync = 0;
- sector_t sync;
+ if (test_bit(Faulty, &rdev->flags))
+ return "D";
+ else if (!array_in_sync || !test_bit(In_sync, &rdev->flags))
+ return "a";
+ else
+ return "A";
+}
- switch (type) {
- case STATUSTYPE_INFO:
- DMEMIT("%s %d ", rs->raid_type->name, rs->md.raid_disks);
+/* Helper to return resync/reshape progress for @rs and @array_in_sync */
+static sector_t rs_get_progress(struct raid_set *rs,
+ sector_t resync_max_sectors, bool *array_in_sync)
+{
+ sector_t r, recovery_cp, curr_resync_completed;
+ struct mddev *mddev = &rs->md;
- if (rs->raid_type->level) {
- if (test_bit(MD_RECOVERY_RUNNING, &rs->md.recovery))
- sync = rs->md.curr_resync_completed;
- else
- sync = rs->md.recovery_cp;
-
- if (sync >= rs->md.resync_max_sectors) {
- /*
- * Sync complete.
- */
- array_in_sync = 1;
- sync = rs->md.resync_max_sectors;
- } else if (test_bit(MD_RECOVERY_REQUESTED, &rs->md.recovery)) {
- /*
- * If "check" or "repair" is occurring, the array has
- * undergone and initial sync and the health characters
- * should not be 'a' anymore.
- */
- array_in_sync = 1;
+ curr_resync_completed = mddev->curr_resync_completed ?: mddev->recovery_cp;
+ recovery_cp = mddev->recovery_cp;
+ *array_in_sync = false;
+
+ if (rs_is_raid0(rs)) {
+ r = resync_max_sectors;
+ *array_in_sync = true;
+
+ } else {
+ r = mddev->reshape_position;
+
+ /* Reshape is relative to the array size */
+ if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ||
+ r != MaxSector) {
+ if (r == MaxSector) {
+ *array_in_sync = true;
+ r = resync_max_sectors;
} else {
- /*
- * The array may be doing an initial sync, or it may
- * be rebuilding individual components. If all the
- * devices are In_sync, then it is the array that is
- * being initialized.
- */
- for (i = 0; i < rs->md.raid_disks; i++)
- if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
- array_in_sync = 1;
+ /* Got to reverse on backward reshape */
+ if (mddev->reshape_backwards)
+ r = mddev->array_sectors - r;
+
+ /* Devide by # of data stripes */
+ sector_div(r, mddev_data_stripes(rs));
}
+
+ /* Sync is relative to the component device size */
+ } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
+ r = curr_resync_completed;
+ else
+ r = recovery_cp;
+
+ if (r == MaxSector) {
+ /*
+ * Sync complete.
+ */
+ *array_in_sync = true;
+ r = resync_max_sectors;
+ } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
+ /*
+ * If "check" or "repair" is occurring, the raid set has
+ * undergone an initial sync and the health characters
+ * should not be 'a' anymore.
+ */
+ *array_in_sync = true;
} else {
- /* RAID0 */
- array_in_sync = 1;
- sync = rs->md.resync_max_sectors;
- }
+ struct md_rdev *rdev;
- /*
- * Status characters:
- * 'D' = Dead/Failed device
- * 'a' = Alive but not in-sync
- * 'A' = Alive and in-sync
- */
- for (i = 0; i < rs->md.raid_disks; i++) {
- if (test_bit(Faulty, &rs->dev[i].rdev.flags))
- DMEMIT("D");
- else if (!array_in_sync ||
- !test_bit(In_sync, &rs->dev[i].rdev.flags))
- DMEMIT("a");
- else
- DMEMIT("A");
+ /*
+ * The raid set may be doing an initial sync, or it may
+ * be rebuilding individual components. If all the
+ * devices are In_sync, then it is the raid set that is
+ * being initialized.
+ */
+ rdev_for_each(rdev, mddev)
+ if (!test_bit(In_sync, &rdev->flags))
+ *array_in_sync = true;
+#if 0
+ r = 0; /* HM FIXME: TESTME: https://bugzilla.redhat.com/show_bug.cgi?id=1210637 ? */
+#endif
}
+ }
+
+ return r;
+}
+
+/* Helper to return @dev name or "-" if !@dev */
+static const char *__get_dev_name(struct dm_dev *dev)
+{
+ return dev ? dev->name : "-";
+}
+
+static void raid_status(struct dm_target *ti, status_type_t type,
+ unsigned int status_flags, char *result, unsigned int maxlen)
+{
+ struct raid_set *rs = ti->private;
+ struct mddev *mddev = &rs->md;
+ struct r5conf *conf = mddev->private;
+ int i, max_nr_stripes = conf ? conf->max_nr_stripes : 0;
+ bool array_in_sync;
+ unsigned int raid_param_cnt = 1; /* at least 1 for chunksize */
+ unsigned int sz = 0;
+ unsigned int rebuild_disks;
+ unsigned int write_mostly_params = 0;
+ sector_t progress, resync_max_sectors, resync_mismatches;
+ const char *sync_action;
+ struct raid_type *rt;
+ struct md_rdev *rdev;
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+ /* *Should* always succeed */
+ rt = get_raid_type_by_ll(mddev->new_level, mddev->new_layout);
+ if (!rt)
+ return;
+
+ DMEMIT("%s %d ", rt->name, mddev->raid_disks);
+
+ /* Access most recent mddev properties for status output */
+ smp_rmb();
+ /* Get sensible max sectors even if raid set not yet started */
+ resync_max_sectors = test_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags) ?
+ mddev->resync_max_sectors : mddev->dev_sectors;
+ progress = rs_get_progress(rs, resync_max_sectors, &array_in_sync);
+ resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ?
+ atomic64_read(&mddev->resync_mismatches) : 0;
+ sync_action = decipher_sync_action(&rs->md);
+
+ /* HM FIXME: do we want another state char for raid0? It shows 'D' or 'A' now */
+ rdev_for_each(rdev, mddev)
+ DMEMIT(__raid_dev_status(rdev, array_in_sync));
/*
- * In-sync ratio:
+ * In-sync/Reshape ratio:
* The in-sync ratio shows the progress of:
- * - Initializing the array
- * - Rebuilding a subset of devices of the array
+ * - Initializing the raid set
+ * - Rebuilding a subset of devices of the raid set
* The user can distinguish between the two by referring
* to the status characters.
+ *
+ * The reshape ratio shows the progress of
+ * changing the raid layout or the number of
+ * disks of a raid set
*/
- DMEMIT(" %llu/%llu",
- (unsigned long long) sync,
- (unsigned long long) rs->md.resync_max_sectors);
+ DMEMIT(" %llu/%llu", (unsigned long long) progress,
+ (unsigned long long) resync_max_sectors);
/*
+ * v1.5.0+:
+ *
* Sync action:
- * See Documentation/device-mapper/dm-raid.c for
+ * See Documentation/device-mapper/dm-raid.txt for
* information on each of these states.
*/
- DMEMIT(" %s", decipher_sync_action(&rs->md));
+ DMEMIT(" %s", sync_action);
/*
+ * v1.5.0+:
+ *
* resync_mismatches/mismatch_cnt
* This field shows the number of discrepancies found when
- * performing a "check" of the array.
+ * performing a "check" of the raid set.
*/
- DMEMIT(" %llu",
- (strcmp(rs->md.last_sync_action, "check")) ? 0 :
- (unsigned long long)
- atomic64_read(&rs->md.resync_mismatches));
- break;
- case STATUSTYPE_TABLE:
- /* The string you would use to construct this array */
- for (i = 0; i < rs->md.raid_disks; i++) {
- if ((rs->ctr_flags & CTR_FLAG_REBUILD) &&
- rs->dev[i].data_dev &&
- !test_bit(In_sync, &rs->dev[i].rdev.flags))
- raid_param_cnt += 2; /* for rebuilds */
- if (rs->dev[i].data_dev &&
- test_bit(WriteMostly, &rs->dev[i].rdev.flags))
- raid_param_cnt += 2;
- }
-
- raid_param_cnt += (hweight32(rs->ctr_flags & ~CTR_FLAG_REBUILD) * 2);
- if (rs->ctr_flags & (CTR_FLAG_SYNC | CTR_FLAG_NOSYNC))
- raid_param_cnt--;
-
- DMEMIT("%s %u %u", rs->raid_type->name,
- raid_param_cnt, rs->md.chunk_sectors);
-
- if ((rs->ctr_flags & CTR_FLAG_SYNC) &&
- (rs->md.recovery_cp == MaxSector))
- DMEMIT(" sync");
- if (rs->ctr_flags & CTR_FLAG_NOSYNC)
- DMEMIT(" nosync");
-
- for (i = 0; i < rs->md.raid_disks; i++)
- if ((rs->ctr_flags & CTR_FLAG_REBUILD) &&
- rs->dev[i].data_dev &&
- !test_bit(In_sync, &rs->dev[i].rdev.flags))
- DMEMIT(" rebuild %u", i);
-
- if (rs->ctr_flags & CTR_FLAG_DAEMON_SLEEP)
- DMEMIT(" daemon_sleep %lu",
- rs->md.bitmap_info.daemon_sleep);
-
- if (rs->ctr_flags & CTR_FLAG_MIN_RECOVERY_RATE)
- DMEMIT(" min_recovery_rate %d", rs->md.sync_speed_min);
-
- if (rs->ctr_flags & CTR_FLAG_MAX_RECOVERY_RATE)
- DMEMIT(" max_recovery_rate %d", rs->md.sync_speed_max);
-
- for (i = 0; i < rs->md.raid_disks; i++)
- if (rs->dev[i].data_dev &&
- test_bit(WriteMostly, &rs->dev[i].rdev.flags))
- DMEMIT(" write_mostly %u", i);
-
- if (rs->ctr_flags & CTR_FLAG_MAX_WRITE_BEHIND)
- DMEMIT(" max_write_behind %lu",
- rs->md.bitmap_info.max_write_behind);
-
- if (rs->ctr_flags & CTR_FLAG_STRIPE_CACHE) {
- struct r5conf *conf = rs->md.private;
-
- /* convert from kiB to sectors */
- DMEMIT(" stripe_cache %d",
- conf ? conf->max_nr_stripes * 2 : 0);
- }
-
- if (rs->ctr_flags & CTR_FLAG_REGION_SIZE)
- DMEMIT(" region_size %lu",
- rs->md.bitmap_info.chunksize >> 9);
+ DMEMIT(" %llu", (unsigned long long) resync_mismatches);
- if (rs->ctr_flags & CTR_FLAG_RAID10_COPIES)
- DMEMIT(" raid10_copies %u",
- raid10_md_layout_to_copies(rs->md.layout));
-
- if (rs->ctr_flags & CTR_FLAG_RAID10_FORMAT)
- DMEMIT(" raid10_format %s",
- raid10_md_layout_to_format(rs->md.layout));
-
- DMEMIT(" %d", rs->md.raid_disks);
- for (i = 0; i < rs->md.raid_disks; i++) {
- if (rs->dev[i].meta_dev)
- DMEMIT(" %s", rs->dev[i].meta_dev->name);
- else
- DMEMIT(" -");
+ /*
+ * v1.9.0+:
+ *
+ * data_offset (needed for out of space reshaping)
+ * This field shows the data offset into the data
+ * image LV where the first stripes data starts.
+ *
+ * We keep data_offset equal on all raid disks of the set,
+ * so retrieving it from the first raid disk is sufficient.
+ */
+ DMEMIT(" %llu", (unsigned long long) rs->dev[0].rdev.data_offset);
+ break;
- if (rs->dev[i].data_dev)
- DMEMIT(" %s", rs->dev[i].data_dev->name);
- else
- DMEMIT(" -");
- }
+ case STATUSTYPE_TABLE:
+ /* Report the table line string you would use to construct this raid set */
+
+ /* Calculate raid parameter count */
+ for (i = 0; i < rs->raid_disks; i++)
+ if (test_bit(WriteMostly, &rs->dev[i].rdev.flags))
+ write_mostly_params += 2;
+ rebuild_disks = memweight(rs->rebuild_disks, DISKS_ARRAY_ELEMS * sizeof(*rs->rebuild_disks));
+ raid_param_cnt += rebuild_disks * 2 +
+ write_mostly_params +
+ hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_NO_ARGS) +
+ hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_ONE_ARG) * 2;
+ /* Emit table line */
+ DMEMIT("%s %u %u", rs->raid_type->name, raid_param_cnt, mddev->new_chunk_sectors);
+ if (test_bit(__CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags))
+ DMEMIT(" %s %s", dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_FORMAT),
+ raid10_md_layout_to_format(mddev->layout));
+ if (test_bit(__CTR_FLAG_RAID10_COPIES, &rs->ctr_flags))
+ DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_COPIES),
+ raid10_md_layout_to_copies(mddev->layout));
+ if (test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))
+ DMEMIT(" %s", dm_raid_arg_name_by_flag(CTR_FLAG_NOSYNC));
+ if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags))
+ DMEMIT(" %s", dm_raid_arg_name_by_flag(CTR_FLAG_SYNC));
+ if (test_bit(__CTR_FLAG_REGION_SIZE, &rs->ctr_flags))
+ DMEMIT(" %s %llu", dm_raid_arg_name_by_flag(CTR_FLAG_REGION_SIZE),
+ (unsigned long long) to_sector(mddev->bitmap_info.chunksize));
+ if (test_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags))
+ DMEMIT(" %s %llu", dm_raid_arg_name_by_flag(CTR_FLAG_DATA_OFFSET),
+ (unsigned long long) rs->data_offset);
+ if (test_bit(__CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags))
+ DMEMIT(" %s %lu", dm_raid_arg_name_by_flag(CTR_FLAG_DAEMON_SLEEP),
+ mddev->bitmap_info.daemon_sleep);
+ if (test_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags))
+ DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_DELTA_DISKS),
+ max(rs->delta_disks, mddev->delta_disks));
+ if (test_bit(__CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags))
+ DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_STRIPE_CACHE),
+ max_nr_stripes);
+ if (rebuild_disks)
+ for (i = 0; i < rs->raid_disks; i++)
+ if (test_bit(rs->dev[i].rdev.raid_disk, (void *) rs->rebuild_disks))
+ DMEMIT(" %s %u", dm_raid_arg_name_by_flag(CTR_FLAG_REBUILD),
+ rs->dev[i].rdev.raid_disk);
+ if (write_mostly_params)
+ for (i = 0; i < rs->raid_disks; i++)
+ if (test_bit(WriteMostly, &rs->dev[i].rdev.flags))
+ DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_WRITE_MOSTLY),
+ rs->dev[i].rdev.raid_disk);
+ if (test_bit(__CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags))
+ DMEMIT(" %s %lu", dm_raid_arg_name_by_flag(CTR_FLAG_MAX_WRITE_BEHIND),
+ mddev->bitmap_info.max_write_behind);
+ if (test_bit(__CTR_FLAG_MAX_RECOVERY_RATE, &rs->ctr_flags))
+ DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_MAX_RECOVERY_RATE),
+ mddev->sync_speed_max);
+ if (test_bit(__CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags))
+ DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_MIN_RECOVERY_RATE),
+ mddev->sync_speed_min);
+ DMEMIT(" %d", rs->raid_disks);
+ for (i = 0; i < rs->raid_disks; i++)
+ DMEMIT(" %s %s", __get_dev_name(rs->dev[i].meta_dev),
+ __get_dev_name(rs->dev[i].data_dev));
}
}
-static int raid_message(struct dm_target *ti, unsigned argc, char **argv)
+static int raid_message(struct dm_target *ti, unsigned int argc, char **argv)
{
struct raid_set *rs = ti->private;
struct mddev *mddev = &rs->md;
- if (!strcasecmp(argv[0], "reshape")) {
- DMERR("Reshape not supported.");
- return -EINVAL;
- }
-
if (!mddev->pers || !mddev->pers->sync_request)
return -EINVAL;
@@ -1571,11 +3330,10 @@ static int raid_message(struct dm_target *ti, unsigned argc, char **argv)
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
return -EBUSY;
else if (!strcasecmp(argv[0], "resync"))
- set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- else if (!strcasecmp(argv[0], "recover")) {
+ ; /* MD_RECOVERY_NEEDED set below */
+ else if (!strcasecmp(argv[0], "recover"))
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
- set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- } else {
+ else {
if (!strcasecmp(argv[0], "check"))
set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
else if (!!strcasecmp(argv[0], "repair"))
@@ -1588,11 +3346,11 @@ static int raid_message(struct dm_target *ti, unsigned argc, char **argv)
* canceling read-auto mode
*/
mddev->ro = 0;
- if (!mddev->suspended)
+ if (!mddev->suspended && mddev->sync_thread)
md_wakeup_thread(mddev->sync_thread);
}
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- if (!mddev->suspended)
+ if (!mddev->suspended && mddev->thread)
md_wakeup_thread(mddev->thread);
return 0;
@@ -1602,28 +3360,27 @@ static int raid_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{
struct raid_set *rs = ti->private;
- unsigned i;
- int ret = 0;
+ unsigned int i;
+ int r = 0;
- for (i = 0; !ret && i < rs->md.raid_disks; i++)
+ for (i = 0; !r && i < rs->md.raid_disks; i++)
if (rs->dev[i].data_dev)
- ret = fn(ti,
+ r = fn(ti,
rs->dev[i].data_dev,
0, /* No offset on data devs */
rs->md.dev_sectors,
data);
- return ret;
+ return r;
}
static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
struct raid_set *rs = ti->private;
- unsigned chunk_size = rs->md.chunk_sectors << 9;
- struct r5conf *conf = rs->md.private;
+ unsigned int chunk_size = to_bytes(rs->md.chunk_sectors);
blk_limits_io_min(limits, chunk_size);
- blk_limits_io_opt(limits, chunk_size * (conf->raid_disks - conf->max_degraded));
+ blk_limits_io_opt(limits, chunk_size * mddev_data_stripes(rs));
}
static void raid_presuspend(struct dm_target *ti)
@@ -1637,21 +3394,33 @@ static void raid_postsuspend(struct dm_target *ti)
{
struct raid_set *rs = ti->private;
- mddev_suspend(&rs->md);
+ if (!rs->md.suspended)
+ mddev_suspend(&rs->md);
+
+ rs->md.ro = 1;
}
static void attempt_restore_of_faulty_devices(struct raid_set *rs)
{
int i;
- uint64_t failed_devices, cleared_failed_devices = 0;
+ uint64_t cleared_failed_devices[DISKS_ARRAY_ELEMS];
unsigned long flags;
+ bool cleared = false;
struct dm_raid_superblock *sb;
+ struct mddev *mddev = &rs->md;
struct md_rdev *r;
+ /* RAID personalities have to provide hot add/remove methods or we need to bail out. */
+ if (!mddev->pers || !mddev->pers->hot_add_disk || !mddev->pers->hot_remove_disk)
+ return;
+
+ memset(cleared_failed_devices, 0, sizeof(cleared_failed_devices));
+
for (i = 0; i < rs->md.raid_disks; i++) {
r = &rs->dev[i].rdev;
if (test_bit(Faulty, &r->flags) && r->sb_page &&
- sync_page_io(r, 0, r->sb_size, r->sb_page, READ, 1)) {
+ sync_page_io(r, 0, r->sb_size, r->sb_page,
+ REQ_OP_READ, 0, true)) {
DMINFO("Faulty %s device #%d has readable super block."
" Attempting to revive it.",
rs->raid_type->name, i);
@@ -1660,13 +3429,13 @@ static void attempt_restore_of_faulty_devices(struct raid_set *rs)
* Faulty bit may be set, but sometimes the array can
* be suspended before the personalities can respond
* by removing the device from the array (i.e. calling
- * 'hot_remove_disk'). If they haven't yet removed
+ * 'hot_remove_disk'). If they haven't yet removed
* the failed device, its 'raid_disk' number will be
* '>= 0' - meaning we must call this function
* ourselves.
*/
if ((r->raid_disk >= 0) &&
- (r->mddev->pers->hot_remove_disk(r->mddev, r) != 0))
+ (mddev->pers->hot_remove_disk(mddev, r) != 0))
/* Failed to revive this device, try next */
continue;
@@ -1676,54 +3445,208 @@ static void attempt_restore_of_faulty_devices(struct raid_set *rs)
clear_bit(Faulty, &r->flags);
clear_bit(WriteErrorSeen, &r->flags);
clear_bit(In_sync, &r->flags);
- if (r->mddev->pers->hot_add_disk(r->mddev, r)) {
+ if (mddev->pers->hot_add_disk(mddev, r)) {
r->raid_disk = -1;
r->saved_raid_disk = -1;
r->flags = flags;
} else {
r->recovery_offset = 0;
- cleared_failed_devices |= 1 << i;
+ set_bit(i, (void *) cleared_failed_devices);
+ cleared = true;
}
}
}
- if (cleared_failed_devices) {
+
+ /* If any failed devices could be cleared, update all sbs failed_devices bits */
+ if (cleared) {
+ uint64_t failed_devices[DISKS_ARRAY_ELEMS];
+
rdev_for_each(r, &rs->md) {
sb = page_address(r->sb_page);
- failed_devices = le64_to_cpu(sb->failed_devices);
- failed_devices &= ~cleared_failed_devices;
- sb->failed_devices = cpu_to_le64(failed_devices);
+ sb_retrieve_failed_devices(sb, failed_devices);
+
+ for (i = 0; i < DISKS_ARRAY_ELEMS; i++)
+ failed_devices[i] &= ~cleared_failed_devices[i];
+
+ sb_update_failed_devices(sb, failed_devices);
}
}
}
-static void raid_resume(struct dm_target *ti)
+static int __load_dirty_region_bitmap(struct raid_set *rs)
{
- struct raid_set *rs = ti->private;
+ int r = 0;
+
+ /* Try loading the bitmap unless "raid0", which does not have one */
+ if (!rs_is_raid0(rs) &&
+ !test_and_set_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags)) {
+ r = bitmap_load(&rs->md);
+ if (r)
+ DMERR("Failed to load bitmap");
+ }
- if (rs->raid_type->level) {
- set_bit(MD_CHANGE_DEVS, &rs->md.flags);
+ return r;
+}
- if (!rs->bitmap_loaded) {
- bitmap_load(&rs->md);
- rs->bitmap_loaded = 1;
- } else {
- /*
- * A secondary resume while the device is active.
- * Take this opportunity to check whether any failed
- * devices are reachable again.
- */
- attempt_restore_of_faulty_devices(rs);
+/* Enforce updating all superblocks */
+static void rs_update_sbs(struct raid_set *rs)
+{
+ struct mddev *mddev = &rs->md;
+ int ro = mddev->ro;
+
+ set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ mddev->ro = 0;
+ md_update_sb(mddev, 1);
+ mddev->ro = ro;
+}
+
+/*
+ * Reshape changes raid algorithm of @rs to new one within personality
+ * (e.g. raid6_zr -> raid6_nc), changes stripe size, adds/removes
+ * disks from a raid set thus growing/shrinking it or resizes the set
+ *
+ * Call mddev_lock_nointr() before!
+ */
+static int rs_start_reshape(struct raid_set *rs)
+{
+ int r;
+ struct mddev *mddev = &rs->md;
+ struct md_personality *pers = mddev->pers;
+
+ r = rs_setup_reshape(rs);
+ if (r)
+ return r;
+
+ /* Need to be resumed to be able to start reshape, recovery is frozen until raid_resume() though */
+ if (mddev->suspended)
+ mddev_resume(mddev);
+
+ /*
+ * Check any reshape constraints enforced by the personalility
+ *
+ * May as well already kick the reshape off so that * pers->start_reshape() becomes optional.
+ */
+ r = pers->check_reshape(mddev);
+ if (r) {
+ rs->ti->error = "pers->check_reshape() failed";
+ return r;
+ }
+
+ /*
+ * Personality may not provide start reshape method in which
+ * case check_reshape above has already covered everything
+ */
+ if (pers->start_reshape) {
+ r = pers->start_reshape(mddev);
+ if (r) {
+ rs->ti->error = "pers->start_reshape() failed";
+ return r;
}
+ }
+
+ /* Suspend because a resume will happen in raid_resume() */
+ if (!mddev->suspended)
+ mddev_suspend(mddev);
+
+ /*
+ * Now reshape got set up, update superblocks to
+ * reflect the fact so that a table reload will
+ * access proper superblock content in the ctr.
+ */
+ rs_update_sbs(rs);
+
+ return 0;
+}
+
+static int raid_preresume(struct dm_target *ti)
+{
+ int r;
+ struct raid_set *rs = ti->private;
+ struct mddev *mddev = &rs->md;
+
+ /* This is a resume after a suspend of the set -> it's already started */
+ if (test_and_set_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags))
+ return 0;
+
+ /*
+ * The superblocks need to be updated on disk if the
+ * array is new or new devices got added (thus zeroed
+ * out by userspace) or __load_dirty_region_bitmap
+ * will overwrite them in core with old data or fail.
+ */
+ if (test_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags))
+ rs_update_sbs(rs);
+
+ /*
+ * Disable/enable discard support on raid set after any
+ * conversion, because devices can have been added
+ */
+ configure_discard_support(rs);
+
+ /* Load the bitmap from disk unless raid0 */
+ r = __load_dirty_region_bitmap(rs);
+ if (r)
+ return r;
+
+ /* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) */
+ if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) &&
+ mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)) {
+ r = bitmap_resize(mddev->bitmap, mddev->dev_sectors,
+ to_bytes(rs->requested_bitmap_chunk_sectors), 0);
+ if (r)
+ DMERR("Failed to resize bitmap");
+ }
+
+ /* Check for any resize/reshape on @rs and adjust/initiate */
+ /* Be prepared for mddev_resume() in raid_resume() */
+ set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ if (mddev->recovery_cp && mddev->recovery_cp < MaxSector) {
+ set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+ mddev->resync_min = mddev->recovery_cp;
+ }
- clear_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
+ rs_set_capacity(rs);
+
+ /* Check for any reshape request unless new raid set */
+ if (test_and_clear_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) {
+ /* Initiate a reshape. */
+ mddev_lock_nointr(mddev);
+ r = rs_start_reshape(rs);
+ mddev_unlock(mddev);
+ if (r)
+ DMWARN("Failed to check/start reshape, continuing without change");
+ r = 0;
}
- mddev_resume(&rs->md);
+ return r;
+}
+
+static void raid_resume(struct dm_target *ti)
+{
+ struct raid_set *rs = ti->private;
+ struct mddev *mddev = &rs->md;
+
+ if (test_and_set_bit(RT_FLAG_RS_RESUMED, &rs->runtime_flags)) {
+ /*
+ * A secondary resume while the device is active.
+ * Take this opportunity to check whether any failed
+ * devices are reachable again.
+ */
+ attempt_restore_of_faulty_devices(rs);
+ }
+
+ mddev->ro = 0;
+ mddev->in_sync = 0;
+
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+
+ if (mddev->suspended)
+ mddev_resume(mddev);
}
static struct target_type raid_target = {
.name = "raid",
- .version = {1, 8, 0},
+ .version = {1, 9, 0},
.module = THIS_MODULE,
.ctr = raid_ctr,
.dtr = raid_dtr,
@@ -1734,6 +3657,7 @@ static struct target_type raid_target = {
.io_hints = raid_io_hints,
.presuspend = raid_presuspend,
.postsuspend = raid_postsuspend,
+ .preresume = raid_preresume,
.resume = raid_resume,
};
@@ -1758,11 +3682,13 @@ module_param(devices_handle_discard_safely, bool, 0644);
MODULE_PARM_DESC(devices_handle_discard_safely,
"Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
-MODULE_DESCRIPTION(DM_NAME " raid4/5/6 target");
+MODULE_DESCRIPTION(DM_NAME " raid0/1/10/4/5/6 target");
+MODULE_ALIAS("dm-raid0");
MODULE_ALIAS("dm-raid1");
MODULE_ALIAS("dm-raid10");
MODULE_ALIAS("dm-raid4");
MODULE_ALIAS("dm-raid5");
MODULE_ALIAS("dm-raid6");
MODULE_AUTHOR("Neil Brown <dm-devel@redhat.com>");
+MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index b3ccf1e0d..bdf1606f6 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -260,7 +260,8 @@ static int mirror_flush(struct dm_target *ti)
struct dm_io_region io[ms->nr_mirrors];
struct mirror *m;
struct dm_io_request io_req = {
- .bi_rw = WRITE_FLUSH,
+ .bi_op = REQ_OP_WRITE,
+ .bi_op_flags = WRITE_FLUSH,
.mem.type = DM_IO_KMEM,
.mem.ptr.addr = NULL,
.client = ms->io_client,
@@ -527,7 +528,7 @@ static void read_callback(unsigned long error, void *context)
DMWARN_LIMIT("Read failure on mirror device %s. "
"Trying alternative device.",
m->dev->name);
- queue_bio(m->ms, bio, bio_rw(bio));
+ queue_bio(m->ms, bio, bio_data_dir(bio));
return;
}
@@ -541,7 +542,8 @@ static void read_async_bio(struct mirror *m, struct bio *bio)
{
struct dm_io_region io;
struct dm_io_request io_req = {
- .bi_rw = READ,
+ .bi_op = REQ_OP_READ,
+ .bi_op_flags = 0,
.mem.type = DM_IO_BIO,
.mem.ptr.bio = bio,
.notify.fn = read_callback,
@@ -624,7 +626,7 @@ static void write_callback(unsigned long error, void *context)
* If the bio is discard, return an error, but do not
* degrade the array.
*/
- if (bio->bi_rw & REQ_DISCARD) {
+ if (bio_op(bio) == REQ_OP_DISCARD) {
bio->bi_error = -EOPNOTSUPP;
bio_endio(bio);
return;
@@ -654,7 +656,8 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
struct dm_io_region io[ms->nr_mirrors], *dest = io;
struct mirror *m;
struct dm_io_request io_req = {
- .bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA),
+ .bi_op = REQ_OP_WRITE,
+ .bi_op_flags = bio->bi_opf & WRITE_FLUSH_FUA,
.mem.type = DM_IO_BIO,
.mem.ptr.bio = bio,
.notify.fn = write_callback,
@@ -662,8 +665,8 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
.client = ms->io_client,
};
- if (bio->bi_rw & REQ_DISCARD) {
- io_req.bi_rw |= REQ_DISCARD;
+ if (bio_op(bio) == REQ_OP_DISCARD) {
+ io_req.bi_op = REQ_OP_DISCARD;
io_req.mem.type = DM_IO_KMEM;
io_req.mem.ptr.addr = NULL;
}
@@ -701,8 +704,8 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
bio_list_init(&requeue);
while ((bio = bio_list_pop(writes))) {
- if ((bio->bi_rw & REQ_FLUSH) ||
- (bio->bi_rw & REQ_DISCARD)) {
+ if ((bio->bi_opf & REQ_PREFLUSH) ||
+ (bio_op(bio) == REQ_OP_DISCARD)) {
bio_list_add(&sync, bio);
continue;
}
@@ -1190,7 +1193,7 @@ static void mirror_dtr(struct dm_target *ti)
*/
static int mirror_map(struct dm_target *ti, struct bio *bio)
{
- int r, rw = bio_rw(bio);
+ int r, rw = bio_data_dir(bio);
struct mirror *m;
struct mirror_set *ms = ti->private;
struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
@@ -1214,7 +1217,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
* If region is not in-sync queue the bio.
*/
if (!r || (r == -EWOULDBLOCK)) {
- if (rw == READA)
+ if (bio->bi_opf & REQ_RAHEAD)
return -EWOULDBLOCK;
queue_bio(ms, bio, rw);
@@ -1239,7 +1242,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
{
- int rw = bio_rw(bio);
+ int rw = bio_data_dir(bio);
struct mirror_set *ms = (struct mirror_set *) ti->private;
struct mirror *m = NULL;
struct dm_bio_details *bd = NULL;
@@ -1250,7 +1253,8 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
* We need to dec pending if this was a write.
*/
if (rw == WRITE) {
- if (!(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD)))
+ if (!(bio->bi_opf & REQ_PREFLUSH) &&
+ bio_op(bio) != REQ_OP_DISCARD)
dm_rh_dec(ms->rh, bio_record->write_region);
return error;
}
@@ -1258,7 +1262,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
if (error == -EOPNOTSUPP)
goto out;
- if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD))
+ if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD))
goto out;
if (unlikely(error)) {
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index 74cb7b991..85c32b22a 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -398,12 +398,12 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio)
region_t region = dm_rh_bio_to_region(rh, bio);
int recovering = 0;
- if (bio->bi_rw & REQ_FLUSH) {
+ if (bio->bi_opf & REQ_PREFLUSH) {
rh->flush_failure = 1;
return;
}
- if (bio->bi_rw & REQ_DISCARD)
+ if (bio_op(bio) == REQ_OP_DISCARD)
return;
/* We must inform the log that the sync count has changed. */
@@ -526,7 +526,7 @@ void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
struct bio *bio;
for (bio = bios->head; bio; bio = bio->bi_next) {
- if (bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))
+ if (bio->bi_opf & REQ_PREFLUSH || bio_op(bio) == REQ_OP_DISCARD)
continue;
rh_inc(rh, dm_rh_bio_to_region(rh, bio));
}
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
new file mode 100644
index 000000000..1ca7463e8
--- /dev/null
+++ b/drivers/md/dm-rq.c
@@ -0,0 +1,988 @@
+/*
+ * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-core.h"
+#include "dm-rq.h"
+
+#include <linux/elevator.h> /* for rq_end_sector() */
+#include <linux/blk-mq.h>
+
+#define DM_MSG_PREFIX "core-rq"
+
+#define DM_MQ_NR_HW_QUEUES 1
+#define DM_MQ_QUEUE_DEPTH 2048
+static unsigned dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES;
+static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
+
+/*
+ * Request-based DM's mempools' reserved IOs set by the user.
+ */
+#define RESERVED_REQUEST_BASED_IOS 256
+static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
+
+#ifdef CONFIG_DM_MQ_DEFAULT
+static bool use_blk_mq = true;
+#else
+static bool use_blk_mq = false;
+#endif
+
+bool dm_use_blk_mq_default(void)
+{
+ return use_blk_mq;
+}
+
+bool dm_use_blk_mq(struct mapped_device *md)
+{
+ return md->use_blk_mq;
+}
+EXPORT_SYMBOL_GPL(dm_use_blk_mq);
+
+unsigned dm_get_reserved_rq_based_ios(void)
+{
+ return __dm_get_module_param(&reserved_rq_based_ios,
+ RESERVED_REQUEST_BASED_IOS, DM_RESERVED_MAX_IOS);
+}
+EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);
+
+static unsigned dm_get_blk_mq_nr_hw_queues(void)
+{
+ return __dm_get_module_param(&dm_mq_nr_hw_queues, 1, 32);
+}
+
+static unsigned dm_get_blk_mq_queue_depth(void)
+{
+ return __dm_get_module_param(&dm_mq_queue_depth,
+ DM_MQ_QUEUE_DEPTH, BLK_MQ_MAX_DEPTH);
+}
+
+int dm_request_based(struct mapped_device *md)
+{
+ return blk_queue_stackable(md->queue);
+}
+
+static void dm_old_start_queue(struct request_queue *q)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ if (blk_queue_stopped(q))
+ blk_start_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+void dm_start_queue(struct request_queue *q)
+{
+ if (!q->mq_ops)
+ dm_old_start_queue(q);
+ else {
+ queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, q);
+ blk_mq_start_stopped_hw_queues(q, true);
+ blk_mq_kick_requeue_list(q);
+ }
+}
+
+static void dm_old_stop_queue(struct request_queue *q)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ if (blk_queue_stopped(q)) {
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ return;
+ }
+
+ blk_stop_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+void dm_stop_queue(struct request_queue *q)
+{
+ if (!q->mq_ops)
+ dm_old_stop_queue(q);
+ else {
+ spin_lock_irq(q->queue_lock);
+ queue_flag_set(QUEUE_FLAG_STOPPED, q);
+ spin_unlock_irq(q->queue_lock);
+
+ blk_mq_cancel_requeue_work(q);
+ blk_mq_stop_hw_queues(q);
+ }
+}
+
+static struct dm_rq_target_io *alloc_old_rq_tio(struct mapped_device *md,
+ gfp_t gfp_mask)
+{
+ return mempool_alloc(md->io_pool, gfp_mask);
+}
+
+static void free_old_rq_tio(struct dm_rq_target_io *tio)
+{
+ mempool_free(tio, tio->md->io_pool);
+}
+
+static struct request *alloc_old_clone_request(struct mapped_device *md,
+ gfp_t gfp_mask)
+{
+ return mempool_alloc(md->rq_pool, gfp_mask);
+}
+
+static void free_old_clone_request(struct mapped_device *md, struct request *rq)
+{
+ mempool_free(rq, md->rq_pool);
+}
+
+/*
+ * Partial completion handling for request-based dm
+ */
+static void end_clone_bio(struct bio *clone)
+{
+ struct dm_rq_clone_bio_info *info =
+ container_of(clone, struct dm_rq_clone_bio_info, clone);
+ struct dm_rq_target_io *tio = info->tio;
+ struct bio *bio = info->orig;
+ unsigned int nr_bytes = info->orig->bi_iter.bi_size;
+ int error = clone->bi_error;
+
+ bio_put(clone);
+
+ if (tio->error)
+ /*
+ * An error has already been detected on the request.
+ * Once error occurred, just let clone->end_io() handle
+ * the remainder.
+ */
+ return;
+ else if (error) {
+ /*
+ * Don't notice the error to the upper layer yet.
+ * The error handling decision is made by the target driver,
+ * when the request is completed.
+ */
+ tio->error = error;
+ return;
+ }
+
+ /*
+ * I/O for the bio successfully completed.
+ * Notice the data completion to the upper layer.
+ */
+
+ /*
+ * bios are processed from the head of the list.
+ * So the completing bio should always be rq->bio.
+ * If it's not, something wrong is happening.
+ */
+ if (tio->orig->bio != bio)
+ DMERR("bio completion is going in the middle of the request");
+
+ /*
+ * Update the original request.
+ * Do not use blk_end_request() here, because it may complete
+ * the original request before the clone, and break the ordering.
+ */
+ blk_update_request(tio->orig, 0, nr_bytes);
+}
+
+static struct dm_rq_target_io *tio_from_request(struct request *rq)
+{
+ return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special);
+}
+
+static void rq_end_stats(struct mapped_device *md, struct request *orig)
+{
+ if (unlikely(dm_stats_used(&md->stats))) {
+ struct dm_rq_target_io *tio = tio_from_request(orig);
+ tio->duration_jiffies = jiffies - tio->duration_jiffies;
+ dm_stats_account_io(&md->stats, rq_data_dir(orig),
+ blk_rq_pos(orig), tio->n_sectors, true,
+ tio->duration_jiffies, &tio->stats_aux);
+ }
+}
+
+/*
+ * Don't touch any member of the md after calling this function because
+ * the md may be freed in dm_put() at the end of this function.
+ * Or do dm_get() before calling this function and dm_put() later.
+ */
+static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
+{
+ atomic_dec(&md->pending[rw]);
+
+ /* nudge anyone waiting on suspend queue */
+ if (!md_in_flight(md))
+ wake_up(&md->wait);
+
+ /*
+ * Run this off this callpath, as drivers could invoke end_io while
+ * inside their request_fn (and holding the queue lock). Calling
+ * back into ->request_fn() could deadlock attempting to grab the
+ * queue lock again.
+ */
+ if (!md->queue->mq_ops && run_queue)
+ blk_run_queue_async(md->queue);
+
+ /*
+ * dm_put() must be at the end of this function. See the comment above
+ */
+ dm_put(md);
+}
+
+static void free_rq_clone(struct request *clone)
+{
+ struct dm_rq_target_io *tio = clone->end_io_data;
+ struct mapped_device *md = tio->md;
+
+ blk_rq_unprep_clone(clone);
+
+ /*
+ * It is possible for a clone_old_rq() allocated clone to
+ * get passed in -- it may not yet have a request_queue.
+ * This is known to occur if the error target replaces
+ * a multipath target that has a request_fn queue stacked
+ * on blk-mq queue(s).
+ */
+ if (clone->q && clone->q->mq_ops)
+ /* stacked on blk-mq queue(s) */
+ tio->ti->type->release_clone_rq(clone);
+ else if (!md->queue->mq_ops)
+ /* request_fn queue stacked on request_fn queue(s) */
+ free_old_clone_request(md, clone);
+
+ if (!md->queue->mq_ops)
+ free_old_rq_tio(tio);
+}
+
+/*
+ * Complete the clone and the original request.
+ * Must be called without clone's queue lock held,
+ * see end_clone_request() for more details.
+ */
+static void dm_end_request(struct request *clone, int error)
+{
+ int rw = rq_data_dir(clone);
+ struct dm_rq_target_io *tio = clone->end_io_data;
+ struct mapped_device *md = tio->md;
+ struct request *rq = tio->orig;
+
+ if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
+ rq->errors = clone->errors;
+ rq->resid_len = clone->resid_len;
+
+ if (rq->sense)
+ /*
+ * We are using the sense buffer of the original
+ * request.
+ * So setting the length of the sense data is enough.
+ */
+ rq->sense_len = clone->sense_len;
+ }
+
+ free_rq_clone(clone);
+ rq_end_stats(md, rq);
+ if (!rq->q->mq_ops)
+ blk_end_request_all(rq, error);
+ else
+ blk_mq_end_request(rq, error);
+ rq_completed(md, rw, true);
+}
+
+static void dm_unprep_request(struct request *rq)
+{
+ struct dm_rq_target_io *tio = tio_from_request(rq);
+ struct request *clone = tio->clone;
+
+ if (!rq->q->mq_ops) {
+ rq->special = NULL;
+ rq->cmd_flags &= ~REQ_DONTPREP;
+ }
+
+ if (clone)
+ free_rq_clone(clone);
+ else if (!tio->md->queue->mq_ops)
+ free_old_rq_tio(tio);
+}
+
+/*
+ * Requeue the original request of a clone.
+ */
+static void dm_old_requeue_request(struct request *rq)
+{
+ struct request_queue *q = rq->q;
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_requeue_request(q, rq);
+ blk_run_queue_async(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+static void dm_mq_requeue_request(struct request *rq)
+{
+ struct request_queue *q = rq->q;
+ unsigned long flags;
+
+ blk_mq_requeue_request(rq);
+ spin_lock_irqsave(q->queue_lock, flags);
+ if (!blk_queue_stopped(q))
+ blk_mq_kick_requeue_list(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+static void dm_requeue_original_request(struct mapped_device *md,
+ struct request *rq)
+{
+ int rw = rq_data_dir(rq);
+
+ rq_end_stats(md, rq);
+ dm_unprep_request(rq);
+
+ if (!rq->q->mq_ops)
+ dm_old_requeue_request(rq);
+ else
+ dm_mq_requeue_request(rq);
+
+ rq_completed(md, rw, false);
+}
+
+static void dm_done(struct request *clone, int error, bool mapped)
+{
+ int r = error;
+ struct dm_rq_target_io *tio = clone->end_io_data;
+ dm_request_endio_fn rq_end_io = NULL;
+
+ if (tio->ti) {
+ rq_end_io = tio->ti->type->rq_end_io;
+
+ if (mapped && rq_end_io)
+ r = rq_end_io(tio->ti, clone, error, &tio->info);
+ }
+
+ if (unlikely(r == -EREMOTEIO && (req_op(clone) == REQ_OP_WRITE_SAME) &&
+ !clone->q->limits.max_write_same_sectors))
+ disable_write_same(tio->md);
+
+ if (r <= 0)
+ /* The target wants to complete the I/O */
+ dm_end_request(clone, r);
+ else if (r == DM_ENDIO_INCOMPLETE)
+ /* The target will handle the I/O */
+ return;
+ else if (r == DM_ENDIO_REQUEUE)
+ /* The target wants to requeue the I/O */
+ dm_requeue_original_request(tio->md, tio->orig);
+ else {
+ DMWARN("unimplemented target endio return value: %d", r);
+ BUG();
+ }
+}
+
+/*
+ * Request completion handler for request-based dm
+ */
+static void dm_softirq_done(struct request *rq)
+{
+ bool mapped = true;
+ struct dm_rq_target_io *tio = tio_from_request(rq);
+ struct request *clone = tio->clone;
+ int rw;
+
+ if (!clone) {
+ rq_end_stats(tio->md, rq);
+ rw = rq_data_dir(rq);
+ if (!rq->q->mq_ops) {
+ blk_end_request_all(rq, tio->error);
+ rq_completed(tio->md, rw, false);
+ free_old_rq_tio(tio);
+ } else {
+ blk_mq_end_request(rq, tio->error);
+ rq_completed(tio->md, rw, false);
+ }
+ return;
+ }
+
+ if (rq->cmd_flags & REQ_FAILED)
+ mapped = false;
+
+ dm_done(clone, tio->error, mapped);
+}
+
+/*
+ * Complete the clone and the original request with the error status
+ * through softirq context.
+ */
+static void dm_complete_request(struct request *rq, int error)
+{
+ struct dm_rq_target_io *tio = tio_from_request(rq);
+
+ tio->error = error;
+ if (!rq->q->mq_ops)
+ blk_complete_request(rq);
+ else
+ blk_mq_complete_request(rq, error);
+}
+
+/*
+ * Complete the not-mapped clone and the original request with the error status
+ * through softirq context.
+ * Target's rq_end_io() function isn't called.
+ * This may be used when the target's map_rq() or clone_and_map_rq() functions fail.
+ */
+static void dm_kill_unmapped_request(struct request *rq, int error)
+{
+ rq->cmd_flags |= REQ_FAILED;
+ dm_complete_request(rq, error);
+}
+
+/*
+ * Called with the clone's queue lock held (in the case of .request_fn)
+ */
+static void end_clone_request(struct request *clone, int error)
+{
+ struct dm_rq_target_io *tio = clone->end_io_data;
+
+ if (!clone->q->mq_ops) {
+ /*
+ * For just cleaning up the information of the queue in which
+ * the clone was dispatched.
+ * The clone is *NOT* freed actually here because it is alloced
+ * from dm own mempool (REQ_ALLOCED isn't set).
+ */
+ __blk_put_request(clone->q, clone);
+ }
+
+ /*
+ * Actual request completion is done in a softirq context which doesn't
+ * hold the clone's queue lock. Otherwise, deadlock could occur because:
+ * - another request may be submitted by the upper level driver
+ * of the stacking during the completion
+ * - the submission which requires queue lock may be done
+ * against this clone's queue
+ */
+ dm_complete_request(tio->orig, error);
+}
+
+static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
+{
+ int r;
+
+ if (blk_queue_io_stat(clone->q))
+ clone->cmd_flags |= REQ_IO_STAT;
+
+ clone->start_time = jiffies;
+ r = blk_insert_cloned_request(clone->q, clone);
+ if (r)
+ /* must complete clone in terms of original request */
+ dm_complete_request(rq, r);
+}
+
+static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
+ void *data)
+{
+ struct dm_rq_target_io *tio = data;
+ struct dm_rq_clone_bio_info *info =
+ container_of(bio, struct dm_rq_clone_bio_info, clone);
+
+ info->orig = bio_orig;
+ info->tio = tio;
+ bio->bi_end_io = end_clone_bio;
+
+ return 0;
+}
+
+static int setup_clone(struct request *clone, struct request *rq,
+ struct dm_rq_target_io *tio, gfp_t gfp_mask)
+{
+ int r;
+
+ r = blk_rq_prep_clone(clone, rq, tio->md->bs, gfp_mask,
+ dm_rq_bio_constructor, tio);
+ if (r)
+ return r;
+
+ clone->cmd = rq->cmd;
+ clone->cmd_len = rq->cmd_len;
+ clone->sense = rq->sense;
+ clone->end_io = end_clone_request;
+ clone->end_io_data = tio;
+
+ tio->clone = clone;
+
+ return 0;
+}
+
+static struct request *clone_old_rq(struct request *rq, struct mapped_device *md,
+ struct dm_rq_target_io *tio, gfp_t gfp_mask)
+{
+ /*
+ * Create clone for use with .request_fn request_queue
+ */
+ struct request *clone;
+
+ clone = alloc_old_clone_request(md, gfp_mask);
+ if (!clone)
+ return NULL;
+
+ blk_rq_init(NULL, clone);
+ if (setup_clone(clone, rq, tio, gfp_mask)) {
+ /* -ENOMEM */
+ free_old_clone_request(md, clone);
+ return NULL;
+ }
+
+ return clone;
+}
+
+static void map_tio_request(struct kthread_work *work);
+
+static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
+ struct mapped_device *md)
+{
+ tio->md = md;
+ tio->ti = NULL;
+ tio->clone = NULL;
+ tio->orig = rq;
+ tio->error = 0;
+ /*
+ * Avoid initializing info for blk-mq; it passes
+ * target-specific data through info.ptr
+ * (see: dm_mq_init_request)
+ */
+ if (!md->init_tio_pdu)
+ memset(&tio->info, 0, sizeof(tio->info));
+ if (md->kworker_task)
+ init_kthread_work(&tio->work, map_tio_request);
+}
+
+static struct dm_rq_target_io *dm_old_prep_tio(struct request *rq,
+ struct mapped_device *md,
+ gfp_t gfp_mask)
+{
+ struct dm_rq_target_io *tio;
+ int srcu_idx;
+ struct dm_table *table;
+
+ tio = alloc_old_rq_tio(md, gfp_mask);
+ if (!tio)
+ return NULL;
+
+ init_tio(tio, rq, md);
+
+ table = dm_get_live_table(md, &srcu_idx);
+ /*
+ * Must clone a request if this .request_fn DM device
+ * is stacked on .request_fn device(s).
+ */
+ if (!dm_table_all_blk_mq_devices(table)) {
+ if (!clone_old_rq(rq, md, tio, gfp_mask)) {
+ dm_put_live_table(md, srcu_idx);
+ free_old_rq_tio(tio);
+ return NULL;
+ }
+ }
+ dm_put_live_table(md, srcu_idx);
+
+ return tio;
+}
+
+/*
+ * Called with the queue lock held.
+ */
+static int dm_old_prep_fn(struct request_queue *q, struct request *rq)
+{
+ struct mapped_device *md = q->queuedata;
+ struct dm_rq_target_io *tio;
+
+ if (unlikely(rq->special)) {
+ DMWARN("Already has something in rq->special.");
+ return BLKPREP_KILL;
+ }
+
+ tio = dm_old_prep_tio(rq, md, GFP_ATOMIC);
+ if (!tio)
+ return BLKPREP_DEFER;
+
+ rq->special = tio;
+ rq->cmd_flags |= REQ_DONTPREP;
+
+ return BLKPREP_OK;
+}
+
+/*
+ * Returns:
+ * 0 : the request has been processed
+ * DM_MAPIO_REQUEUE : the original request needs to be requeued
+ * < 0 : the request was completed due to failure
+ */
+static int map_request(struct dm_rq_target_io *tio, struct request *rq,
+ struct mapped_device *md)
+{
+ int r;
+ struct dm_target *ti = tio->ti;
+ struct request *clone = NULL;
+
+ if (tio->clone) {
+ clone = tio->clone;
+ r = ti->type->map_rq(ti, clone, &tio->info);
+ } else {
+ r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
+ if (r < 0) {
+ /* The target wants to complete the I/O */
+ dm_kill_unmapped_request(rq, r);
+ return r;
+ }
+ if (r != DM_MAPIO_REMAPPED)
+ return r;
+ if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
+ /* -ENOMEM */
+ ti->type->release_clone_rq(clone);
+ return DM_MAPIO_REQUEUE;
+ }
+ }
+
+ switch (r) {
+ case DM_MAPIO_SUBMITTED:
+ /* The target has taken the I/O to submit by itself later */
+ break;
+ case DM_MAPIO_REMAPPED:
+ /* The target has remapped the I/O so dispatch it */
+ trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
+ blk_rq_pos(rq));
+ dm_dispatch_clone_request(clone, rq);
+ break;
+ case DM_MAPIO_REQUEUE:
+ /* The target wants to requeue the I/O */
+ dm_requeue_original_request(md, tio->orig);
+ break;
+ default:
+ if (r > 0) {
+ DMWARN("unimplemented target map return value: %d", r);
+ BUG();
+ }
+
+ /* The target wants to complete the I/O */
+ dm_kill_unmapped_request(rq, r);
+ return r;
+ }
+
+ return 0;
+}
+
+static void dm_start_request(struct mapped_device *md, struct request *orig)
+{
+ if (!orig->q->mq_ops)
+ blk_start_request(orig);
+ else
+ blk_mq_start_request(orig);
+ atomic_inc(&md->pending[rq_data_dir(orig)]);
+
+ if (md->seq_rq_merge_deadline_usecs) {
+ md->last_rq_pos = rq_end_sector(orig);
+ md->last_rq_rw = rq_data_dir(orig);
+ md->last_rq_start_time = ktime_get();
+ }
+
+ if (unlikely(dm_stats_used(&md->stats))) {
+ struct dm_rq_target_io *tio = tio_from_request(orig);
+ tio->duration_jiffies = jiffies;
+ tio->n_sectors = blk_rq_sectors(orig);
+ dm_stats_account_io(&md->stats, rq_data_dir(orig),
+ blk_rq_pos(orig), tio->n_sectors, false, 0,
+ &tio->stats_aux);
+ }
+
+ /*
+ * Hold the md reference here for the in-flight I/O.
+ * We can't rely on the reference count by device opener,
+ * because the device may be closed during the request completion
+ * when all bios are completed.
+ * See the comment in rq_completed() too.
+ */
+ dm_get(md);
+}
+
+static void map_tio_request(struct kthread_work *work)
+{
+ struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);
+ struct request *rq = tio->orig;
+ struct mapped_device *md = tio->md;
+
+ if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE)
+ dm_requeue_original_request(md, rq);
+}
+
+ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
+{
+ return sprintf(buf, "%u\n", md->seq_rq_merge_deadline_usecs);
+}
+
+#define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000
+
+ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
+ const char *buf, size_t count)
+{
+ unsigned deadline;
+
+ if (dm_get_md_type(md) != DM_TYPE_REQUEST_BASED)
+ return count;
+
+ if (kstrtouint(buf, 10, &deadline))
+ return -EINVAL;
+
+ if (deadline > MAX_SEQ_RQ_MERGE_DEADLINE_USECS)
+ deadline = MAX_SEQ_RQ_MERGE_DEADLINE_USECS;
+
+ md->seq_rq_merge_deadline_usecs = deadline;
+
+ return count;
+}
+
+static bool dm_old_request_peeked_before_merge_deadline(struct mapped_device *md)
+{
+ ktime_t kt_deadline;
+
+ if (!md->seq_rq_merge_deadline_usecs)
+ return false;
+
+ kt_deadline = ns_to_ktime((u64)md->seq_rq_merge_deadline_usecs * NSEC_PER_USEC);
+ kt_deadline = ktime_add_safe(md->last_rq_start_time, kt_deadline);
+
+ return !ktime_after(ktime_get(), kt_deadline);
+}
+
+/*
+ * q->request_fn for old request-based dm.
+ * Called with the queue lock held.
+ */
+static void dm_old_request_fn(struct request_queue *q)
+{
+ struct mapped_device *md = q->queuedata;
+ struct dm_target *ti = md->immutable_target;
+ struct request *rq;
+ struct dm_rq_target_io *tio;
+ sector_t pos = 0;
+
+ if (unlikely(!ti)) {
+ int srcu_idx;
+ struct dm_table *map = dm_get_live_table(md, &srcu_idx);
+
+ ti = dm_table_find_target(map, pos);
+ dm_put_live_table(md, srcu_idx);
+ }
+
+ /*
+ * For suspend, check blk_queue_stopped() and increment
+ * ->pending within a single queue_lock not to increment the
+ * number of in-flight I/Os after the queue is stopped in
+ * dm_suspend().
+ */
+ while (!blk_queue_stopped(q)) {
+ rq = blk_peek_request(q);
+ if (!rq)
+ return;
+
+ /* always use block 0 to find the target for flushes for now */
+ pos = 0;
+ if (req_op(rq) != REQ_OP_FLUSH)
+ pos = blk_rq_pos(rq);
+
+ if ((dm_old_request_peeked_before_merge_deadline(md) &&
+ md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 &&
+ md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) ||
+ (ti->type->busy && ti->type->busy(ti))) {
+ blk_delay_queue(q, 10);
+ return;
+ }
+
+ dm_start_request(md, rq);
+
+ tio = tio_from_request(rq);
+ /* Establish tio->ti before queuing work (map_tio_request) */
+ tio->ti = ti;
+ queue_kthread_work(&md->kworker, &tio->work);
+ BUG_ON(!irqs_disabled());
+ }
+}
+
+/*
+ * Fully initialize a .request_fn request-based queue.
+ */
+int dm_old_init_request_queue(struct mapped_device *md)
+{
+ /* Fully initialize the queue */
+ if (!blk_init_allocated_queue(md->queue, dm_old_request_fn, NULL))
+ return -EINVAL;
+
+ /* disable dm_old_request_fn's merge heuristic by default */
+ md->seq_rq_merge_deadline_usecs = 0;
+
+ dm_init_normal_md_queue(md);
+ blk_queue_softirq_done(md->queue, dm_softirq_done);
+ blk_queue_prep_rq(md->queue, dm_old_prep_fn);
+
+ /* Initialize the request-based DM worker thread */
+ init_kthread_worker(&md->kworker);
+ md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
+ "kdmwork-%s", dm_device_name(md));
+ if (IS_ERR(md->kworker_task))
+ return PTR_ERR(md->kworker_task);
+
+ elv_register_queue(md->queue);
+
+ return 0;
+}
+
+static int dm_mq_init_request(void *data, struct request *rq,
+ unsigned int hctx_idx, unsigned int request_idx,
+ unsigned int numa_node)
+{
+ struct mapped_device *md = data;
+ struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
+
+ /*
+ * Must initialize md member of tio, otherwise it won't
+ * be available in dm_mq_queue_rq.
+ */
+ tio->md = md;
+
+ if (md->init_tio_pdu) {
+ /* target-specific per-io data is immediately after the tio */
+ tio->info.ptr = tio + 1;
+ }
+
+ return 0;
+}
+
+static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct request *rq = bd->rq;
+ struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
+ struct mapped_device *md = tio->md;
+ struct dm_target *ti = md->immutable_target;
+
+ if (unlikely(!ti)) {
+ int srcu_idx;
+ struct dm_table *map = dm_get_live_table(md, &srcu_idx);
+
+ ti = dm_table_find_target(map, 0);
+ dm_put_live_table(md, srcu_idx);
+ }
+
+ /*
+ * On suspend dm_stop_queue() handles stopping the blk-mq
+ * request_queue BUT: even though the hw_queues are marked
+ * BLK_MQ_S_STOPPED at that point there is still a race that
+ * is allowing block/blk-mq.c to call ->queue_rq against a
+ * hctx that it really shouldn't. The following check guards
+ * against this rarity (albeit _not_ race-free).
+ */
+ if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
+ return BLK_MQ_RQ_QUEUE_BUSY;
+
+ if (ti->type->busy && ti->type->busy(ti))
+ return BLK_MQ_RQ_QUEUE_BUSY;
+
+ dm_start_request(md, rq);
+
+ /* Init tio using md established in .init_request */
+ init_tio(tio, rq, md);
+
+ /*
+ * Establish tio->ti before calling map_request().
+ */
+ tio->ti = ti;
+
+ /* Direct call is fine since .queue_rq allows allocations */
+ if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) {
+ /* Undo dm_start_request() before requeuing */
+ rq_end_stats(md, rq);
+ rq_completed(md, rq_data_dir(rq), false);
+ return BLK_MQ_RQ_QUEUE_BUSY;
+ }
+
+ return BLK_MQ_RQ_QUEUE_OK;
+}
+
+static struct blk_mq_ops dm_mq_ops = {
+ .queue_rq = dm_mq_queue_rq,
+ .map_queue = blk_mq_map_queue,
+ .complete = dm_softirq_done,
+ .init_request = dm_mq_init_request,
+};
+
+int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
+{
+ struct request_queue *q;
+ struct dm_target *immutable_tgt;
+ int err;
+
+ if (!dm_table_all_blk_mq_devices(t)) {
+ DMERR("request-based dm-mq may only be stacked on blk-mq device(s)");
+ return -EINVAL;
+ }
+
+ md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id);
+ if (!md->tag_set)
+ return -ENOMEM;
+
+ md->tag_set->ops = &dm_mq_ops;
+ md->tag_set->queue_depth = dm_get_blk_mq_queue_depth();
+ md->tag_set->numa_node = md->numa_node_id;
+ md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
+ md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues();
+ md->tag_set->driver_data = md;
+
+ md->tag_set->cmd_size = sizeof(struct dm_rq_target_io);
+ immutable_tgt = dm_table_get_immutable_target(t);
+ if (immutable_tgt && immutable_tgt->per_io_data_size) {
+ /* any target-specific per-io data is immediately after the tio */
+ md->tag_set->cmd_size += immutable_tgt->per_io_data_size;
+ md->init_tio_pdu = true;
+ }
+
+ err = blk_mq_alloc_tag_set(md->tag_set);
+ if (err)
+ goto out_kfree_tag_set;
+
+ q = blk_mq_init_allocated_queue(md->tag_set, md->queue);
+ if (IS_ERR(q)) {
+ err = PTR_ERR(q);
+ goto out_tag_set;
+ }
+ dm_init_md_queue(md);
+
+ /* backfill 'mq' sysfs registration normally done in blk_register_queue */
+ blk_mq_register_disk(md->disk);
+
+ return 0;
+
+out_tag_set:
+ blk_mq_free_tag_set(md->tag_set);
+out_kfree_tag_set:
+ kfree(md->tag_set);
+
+ return err;
+}
+
+void dm_mq_cleanup_mapped_device(struct mapped_device *md)
+{
+ if (md->tag_set) {
+ blk_mq_free_tag_set(md->tag_set);
+ kfree(md->tag_set);
+ }
+}
+
+module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
+
+module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices");
+
+module_param(dm_mq_nr_hw_queues, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dm_mq_nr_hw_queues, "Number of hardware queues for request-based dm-mq devices");
+
+module_param(dm_mq_queue_depth, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dm_mq_queue_depth, "Queue depth for request-based dm-mq devices");
diff --git a/drivers/md/dm-rq.h b/drivers/md/dm-rq.h
new file mode 100644
index 000000000..9e6f0a377
--- /dev/null
+++ b/drivers/md/dm-rq.h
@@ -0,0 +1,64 @@
+/*
+ * Internal header file for device mapper
+ *
+ * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
+ *
+ * This file is released under the LGPL.
+ */
+
+#ifndef DM_RQ_INTERNAL_H
+#define DM_RQ_INTERNAL_H
+
+#include <linux/bio.h>
+#include <linux/kthread.h>
+
+#include "dm-stats.h"
+
+struct mapped_device;
+
+/*
+ * One of these is allocated per request.
+ */
+struct dm_rq_target_io {
+ struct mapped_device *md;
+ struct dm_target *ti;
+ struct request *orig, *clone;
+ struct kthread_work work;
+ int error;
+ union map_info info;
+ struct dm_stats_aux stats_aux;
+ unsigned long duration_jiffies;
+ unsigned n_sectors;
+};
+
+/*
+ * For request-based dm - the bio clones we allocate are embedded in these
+ * structs.
+ *
+ * We allocate these with bio_alloc_bioset, using the front_pad parameter when
+ * the bioset is created - this means the bio has to come at the end of the
+ * struct.
+ */
+struct dm_rq_clone_bio_info {
+ struct bio *orig;
+ struct dm_rq_target_io *tio;
+ struct bio clone;
+};
+
+bool dm_use_blk_mq_default(void);
+bool dm_use_blk_mq(struct mapped_device *md);
+
+int dm_old_init_request_queue(struct mapped_device *md);
+int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t);
+void dm_mq_cleanup_mapped_device(struct mapped_device *md);
+
+void dm_start_queue(struct request_queue *q);
+void dm_stop_queue(struct request_queue *q);
+
+unsigned dm_get_reserved_rq_based_ios(void);
+
+ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf);
+ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
+ const char *buf, size_t count);
+
+#endif
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 4d3909393..b8cf956b5 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -226,8 +226,8 @@ static void do_metadata(struct work_struct *work)
/*
* Read or write a chunk aligned and sized block of data from a device.
*/
-static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
- int metadata)
+static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int op,
+ int op_flags, int metadata)
{
struct dm_io_region where = {
.bdev = dm_snap_cow(ps->store->snap)->bdev,
@@ -235,7 +235,8 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
.count = ps->store->chunk_size,
};
struct dm_io_request io_req = {
- .bi_rw = rw,
+ .bi_op = op,
+ .bi_op_flags = op_flags,
.mem.type = DM_IO_VMA,
.mem.ptr.vma = area,
.client = ps->io_client,
@@ -281,14 +282,14 @@ static void skip_metadata(struct pstore *ps)
* Read or write a metadata area. Remembering to skip the first
* chunk which holds the header.
*/
-static int area_io(struct pstore *ps, int rw)
+static int area_io(struct pstore *ps, int op, int op_flags)
{
int r;
chunk_t chunk;
chunk = area_location(ps, ps->current_area);
- r = chunk_io(ps, ps->area, chunk, rw, 0);
+ r = chunk_io(ps, ps->area, chunk, op, op_flags, 0);
if (r)
return r;
@@ -302,7 +303,8 @@ static void zero_memory_area(struct pstore *ps)
static int zero_disk_area(struct pstore *ps, chunk_t area)
{
- return chunk_io(ps, ps->zero_area, area_location(ps, area), WRITE, 0);
+ return chunk_io(ps, ps->zero_area, area_location(ps, area),
+ REQ_OP_WRITE, 0, 0);
}
static int read_header(struct pstore *ps, int *new_snapshot)
@@ -334,7 +336,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
if (r)
return r;
- r = chunk_io(ps, ps->header_area, 0, READ, 1);
+ r = chunk_io(ps, ps->header_area, 0, REQ_OP_READ, 0, 1);
if (r)
goto bad;
@@ -395,7 +397,7 @@ static int write_header(struct pstore *ps)
dh->version = cpu_to_le32(ps->version);
dh->chunk_size = cpu_to_le32(ps->store->chunk_size);
- return chunk_io(ps, ps->header_area, 0, WRITE, 1);
+ return chunk_io(ps, ps->header_area, 0, REQ_OP_WRITE, 0, 1);
}
/*
@@ -739,7 +741,7 @@ static void persistent_commit_exception(struct dm_exception_store *store,
/*
* Commit exceptions to disk.
*/
- if (ps->valid && area_io(ps, WRITE_FLUSH_FUA))
+ if (ps->valid && area_io(ps, REQ_OP_WRITE, WRITE_FLUSH_FUA))
ps->valid = 0;
/*
@@ -779,7 +781,7 @@ static int persistent_prepare_merge(struct dm_exception_store *store,
return 0;
ps->current_area--;
- r = area_io(ps, READ);
+ r = area_io(ps, REQ_OP_READ, 0);
if (r < 0)
return r;
ps->current_committed = ps->exceptions_per_area;
@@ -816,7 +818,7 @@ static int persistent_commit_merge(struct dm_exception_store *store,
for (i = 0; i < nr_merged; i++)
clear_exception(ps, ps->current_committed - 1 - i);
- r = area_io(ps, WRITE_FLUSH_FUA);
+ r = area_io(ps, REQ_OP_WRITE, WRITE_FLUSH_FUA);
if (r < 0)
return r;
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 70bb0e8b6..c65feeada 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1680,7 +1680,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
init_tracked_chunk(bio);
- if (bio->bi_rw & REQ_FLUSH) {
+ if (bio->bi_opf & REQ_PREFLUSH) {
bio->bi_bdev = s->cow->bdev;
return DM_MAPIO_REMAPPED;
}
@@ -1696,7 +1696,8 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
* to copy an exception */
down_write(&s->lock);
- if (!s->valid || (unlikely(s->snapshot_overflowed) && bio_rw(bio) == WRITE)) {
+ if (!s->valid || (unlikely(s->snapshot_overflowed) &&
+ bio_data_dir(bio) == WRITE)) {
r = -EIO;
goto out_unlock;
}
@@ -1713,7 +1714,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
* flags so we should only get this if we are
* writeable.
*/
- if (bio_rw(bio) == WRITE) {
+ if (bio_data_dir(bio) == WRITE) {
pe = __lookup_pending_exception(s, chunk);
if (!pe) {
up_write(&s->lock);
@@ -1799,7 +1800,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
init_tracked_chunk(bio);
- if (bio->bi_rw & REQ_FLUSH) {
+ if (bio->bi_opf & REQ_PREFLUSH) {
if (!dm_bio_get_target_bio_nr(bio))
bio->bi_bdev = s->origin->bdev;
else
@@ -1819,7 +1820,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
e = dm_lookup_exception(&s->complete, chunk);
if (e) {
/* Queue writes overlapping with chunks being merged */
- if (bio_rw(bio) == WRITE &&
+ if (bio_data_dir(bio) == WRITE &&
chunk >= s->first_merging_chunk &&
chunk < (s->first_merging_chunk +
s->num_merging_chunks)) {
@@ -1831,7 +1832,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
remap_exception(s, e, bio, chunk);
- if (bio_rw(bio) == WRITE)
+ if (bio_data_dir(bio) == WRITE)
track_chunk(s, bio, chunk);
goto out_unlock;
}
@@ -1839,7 +1840,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
redirect_to_origin:
bio->bi_bdev = s->origin->bdev;
- if (bio_rw(bio) == WRITE) {
+ if (bio_data_dir(bio) == WRITE) {
up_write(&s->lock);
return do_origin(s->origin, bio);
}
@@ -2285,10 +2286,10 @@ static int origin_map(struct dm_target *ti, struct bio *bio)
bio->bi_bdev = o->dev->bdev;
- if (unlikely(bio->bi_rw & REQ_FLUSH))
+ if (unlikely(bio->bi_opf & REQ_PREFLUSH))
return DM_MAPIO_REMAPPED;
- if (bio_rw(bio) != WRITE)
+ if (bio_data_dir(bio) != WRITE)
return DM_MAPIO_REMAPPED;
available_sectors = o->split_boundary -
@@ -2301,6 +2302,13 @@ static int origin_map(struct dm_target *ti, struct bio *bio)
return do_origin(o->dev, bio);
}
+static long origin_direct_access(struct dm_target *ti, sector_t sector,
+ void **kaddr, pfn_t *pfn, long size)
+{
+ DMWARN("device does not support dax.");
+ return -EIO;
+}
+
/*
* Set the target "max_io_len" field to the minimum of all the snapshots'
* chunk sizes.
@@ -2360,6 +2368,7 @@ static struct target_type origin_target = {
.postsuspend = origin_postsuspend,
.status = origin_status,
.iterate_devices = origin_iterate_devices,
+ .direct_access = origin_direct_access,
};
static struct target_type snapshot_target = {
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index 8289804cc..38b05f23b 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -10,7 +10,7 @@
#include <linux/module.h>
#include <linux/device-mapper.h>
-#include "dm.h"
+#include "dm-core.h"
#include "dm-stats.h"
#define DM_MSG_PREFIX "stats"
@@ -514,11 +514,10 @@ static void dm_stat_round(struct dm_stat *s, struct dm_stat_shared *shared,
}
static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
- unsigned long bi_rw, sector_t len,
+ int idx, sector_t len,
struct dm_stats_aux *stats_aux, bool end,
unsigned long duration_jiffies)
{
- unsigned long idx = bi_rw & REQ_WRITE;
struct dm_stat_shared *shared = &s->stat_shared[entry];
struct dm_stat_percpu *p;
@@ -584,7 +583,7 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
#endif
}
-static void __dm_stat_bio(struct dm_stat *s, unsigned long bi_rw,
+static void __dm_stat_bio(struct dm_stat *s, int bi_rw,
sector_t bi_sector, sector_t end_sector,
bool end, unsigned long duration_jiffies,
struct dm_stats_aux *stats_aux)
@@ -645,8 +644,8 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
last = raw_cpu_ptr(stats->last);
stats_aux->merged =
(bi_sector == (ACCESS_ONCE(last->last_sector) &&
- ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
- (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
+ ((bi_rw == WRITE) ==
+ (ACCESS_ONCE(last->last_rw) == WRITE))
));
ACCESS_ONCE(last->last_sector) = end_sector;
ACCESS_ONCE(last->last_rw) = bi_rw;
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 797ddb900..28193a57b 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -286,14 +286,14 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
uint32_t stripe;
unsigned target_bio_nr;
- if (bio->bi_rw & REQ_FLUSH) {
+ if (bio->bi_opf & REQ_PREFLUSH) {
target_bio_nr = dm_bio_get_target_bio_nr(bio);
BUG_ON(target_bio_nr >= sc->stripes);
bio->bi_bdev = sc->stripe[target_bio_nr].dev->bdev;
return DM_MAPIO_REMAPPED;
}
- if (unlikely(bio->bi_rw & REQ_DISCARD) ||
- unlikely(bio->bi_rw & REQ_WRITE_SAME)) {
+ if (unlikely(bio_op(bio) == REQ_OP_DISCARD) ||
+ unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) {
target_bio_nr = dm_bio_get_target_bio_nr(bio);
BUG_ON(target_bio_nr >= sc->stripes);
return stripe_map_range(sc, bio, target_bio_nr);
@@ -308,6 +308,29 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED;
}
+static long stripe_direct_access(struct dm_target *ti, sector_t sector,
+ void **kaddr, pfn_t *pfn, long size)
+{
+ struct stripe_c *sc = ti->private;
+ uint32_t stripe;
+ struct block_device *bdev;
+ struct blk_dax_ctl dax = {
+ .size = size,
+ };
+ long ret;
+
+ stripe_map_sector(sc, sector, &stripe, &dax.sector);
+
+ dax.sector += sc->stripe[stripe].physical_start;
+ bdev = sc->stripe[stripe].dev->bdev;
+
+ ret = bdev_direct_access(bdev, &dax);
+ *kaddr = dax.addr;
+ *pfn = dax.pfn;
+
+ return ret;
+}
+
/*
* Stripe status:
*
@@ -360,7 +383,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
if (!error)
return 0; /* I/O complete */
- if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD))
+ if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD))
return error;
if (error == -EOPNOTSUPP)
@@ -416,7 +439,7 @@ static void stripe_io_hints(struct dm_target *ti,
static struct target_type stripe_target = {
.name = "striped",
- .version = {1, 5, 1},
+ .version = {1, 6, 0},
.module = THIS_MODULE,
.ctr = stripe_ctr,
.dtr = stripe_dtr,
@@ -425,6 +448,7 @@ static struct target_type stripe_target = {
.status = stripe_status,
.iterate_devices = stripe_iterate_devices,
.io_hints = stripe_io_hints,
+ .direct_access = stripe_direct_access,
};
int __init dm_stripe_init(void)
diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
index 7e818f5f1..c209b8a19 100644
--- a/drivers/md/dm-sysfs.c
+++ b/drivers/md/dm-sysfs.c
@@ -6,7 +6,8 @@
#include <linux/sysfs.h>
#include <linux/dm-ioctl.h>
-#include "dm.h"
+#include "dm-core.h"
+#include "dm-rq.h"
struct dm_sysfs_attr {
struct attribute attr;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 626a5ec04..3e407a9cd 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -5,7 +5,7 @@
* This file is released under the GPL.
*/
-#include "dm.h"
+#include "dm-core.h"
#include <linux/module.h>
#include <linux/vmalloc.h>
@@ -43,8 +43,10 @@ struct dm_table {
struct dm_target *targets;
struct target_type *immutable_target_type;
- unsigned integrity_supported:1;
- unsigned singleton:1;
+
+ bool integrity_supported:1;
+ bool singleton:1;
+ bool all_blk_mq:1;
/*
* Indicates the rw permissions for the new logical
@@ -206,6 +208,7 @@ int dm_table_create(struct dm_table **result, fmode_t mode,
return -ENOMEM;
}
+ t->type = DM_TYPE_NONE;
t->mode = mode;
t->md = md;
*result = t;
@@ -703,7 +706,7 @@ int dm_table_add_target(struct dm_table *t, const char *type,
dm_device_name(t->md), type);
return -EINVAL;
}
- t->singleton = 1;
+ t->singleton = true;
}
if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
@@ -824,22 +827,70 @@ void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
}
EXPORT_SYMBOL(dm_consume_args);
+static bool __table_type_bio_based(unsigned table_type)
+{
+ return (table_type == DM_TYPE_BIO_BASED ||
+ table_type == DM_TYPE_DAX_BIO_BASED);
+}
+
static bool __table_type_request_based(unsigned table_type)
{
return (table_type == DM_TYPE_REQUEST_BASED ||
table_type == DM_TYPE_MQ_REQUEST_BASED);
}
-static int dm_table_set_type(struct dm_table *t)
+void dm_table_set_type(struct dm_table *t, unsigned type)
+{
+ t->type = type;
+}
+EXPORT_SYMBOL_GPL(dm_table_set_type);
+
+static int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ struct request_queue *q = bdev_get_queue(dev->bdev);
+
+ return q && blk_queue_dax(q);
+}
+
+static bool dm_table_supports_dax(struct dm_table *t)
+{
+ struct dm_target *ti;
+ unsigned i = 0;
+
+ /* Ensure that all targets support DAX. */
+ while (i < dm_table_get_num_targets(t)) {
+ ti = dm_table_get_target(t, i++);
+
+ if (!ti->type->direct_access)
+ return false;
+
+ if (!ti->type->iterate_devices ||
+ !ti->type->iterate_devices(ti, device_supports_dax, NULL))
+ return false;
+ }
+
+ return true;
+}
+
+static int dm_table_determine_type(struct dm_table *t)
{
unsigned i;
unsigned bio_based = 0, request_based = 0, hybrid = 0;
- bool use_blk_mq = false;
+ bool verify_blk_mq = false;
struct dm_target *tgt;
struct dm_dev_internal *dd;
- struct list_head *devices;
+ struct list_head *devices = dm_table_get_devices(t);
unsigned live_md_type = dm_get_md_type(t->md);
+ if (t->type != DM_TYPE_NONE) {
+ /* target already set the table's type */
+ if (t->type == DM_TYPE_BIO_BASED)
+ return 0;
+ BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED);
+ goto verify_rq_based;
+ }
+
for (i = 0; i < t->num_targets; i++) {
tgt = t->targets + i;
if (dm_target_hybrid(tgt))
@@ -871,11 +922,27 @@ static int dm_table_set_type(struct dm_table *t)
if (bio_based) {
/* We must use this table as bio-based */
t->type = DM_TYPE_BIO_BASED;
+ if (dm_table_supports_dax(t) ||
+ (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED))
+ t->type = DM_TYPE_DAX_BIO_BASED;
return 0;
}
BUG_ON(!request_based); /* No targets in this table */
+ if (list_empty(devices) && __table_type_request_based(live_md_type)) {
+ /* inherit live MD type */
+ t->type = live_md_type;
+ return 0;
+ }
+
+ /*
+ * The only way to establish DM_TYPE_MQ_REQUEST_BASED is by
+ * having a compatible target use dm_table_set_type.
+ */
+ t->type = DM_TYPE_REQUEST_BASED;
+
+verify_rq_based:
/*
* Request-based dm supports only tables that have a single target now.
* To support multiple targets, request splitting support is needed,
@@ -888,7 +955,6 @@ static int dm_table_set_type(struct dm_table *t)
}
/* Non-request-stackable devices can't be used for request-based dm */
- devices = dm_table_get_devices(t);
list_for_each_entry(dd, devices, list) {
struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev);
@@ -899,10 +965,10 @@ static int dm_table_set_type(struct dm_table *t)
}
if (q->mq_ops)
- use_blk_mq = true;
+ verify_blk_mq = true;
}
- if (use_blk_mq) {
+ if (verify_blk_mq) {
/* verify _all_ devices in the table are blk-mq devices */
list_for_each_entry(dd, devices, list)
if (!bdev_get_queue(dd->dm_dev->bdev)->mq_ops) {
@@ -910,14 +976,9 @@ static int dm_table_set_type(struct dm_table *t)
" are blk-mq request-stackable");
return -EINVAL;
}
- t->type = DM_TYPE_MQ_REQUEST_BASED;
- } else if (list_empty(devices) && __table_type_request_based(live_md_type)) {
- /* inherit live MD type */
- t->type = live_md_type;
-
- } else
- t->type = DM_TYPE_REQUEST_BASED;
+ t->all_blk_mq = true;
+ }
return 0;
}
@@ -956,14 +1017,19 @@ struct dm_target *dm_table_get_wildcard_target(struct dm_table *t)
return NULL;
}
+bool dm_table_bio_based(struct dm_table *t)
+{
+ return __table_type_bio_based(dm_table_get_type(t));
+}
+
bool dm_table_request_based(struct dm_table *t)
{
return __table_type_request_based(dm_table_get_type(t));
}
-bool dm_table_mq_request_based(struct dm_table *t)
+bool dm_table_all_blk_mq_devices(struct dm_table *t)
{
- return dm_table_get_type(t) == DM_TYPE_MQ_REQUEST_BASED;
+ return t->all_blk_mq;
}
static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
@@ -978,7 +1044,7 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
return -EINVAL;
}
- if (type == DM_TYPE_BIO_BASED)
+ if (__table_type_bio_based(type))
for (i = 0; i < t->num_targets; i++) {
tgt = t->targets + i;
per_io_data_size = max(per_io_data_size, tgt->per_io_data_size);
@@ -1106,7 +1172,7 @@ static int dm_table_register_integrity(struct dm_table *t)
return 0;
if (!integrity_profile_exists(dm_disk(md))) {
- t->integrity_supported = 1;
+ t->integrity_supported = true;
/*
* Register integrity profile during table load; we can do
* this because the final profile must match during resume.
@@ -1129,7 +1195,7 @@ static int dm_table_register_integrity(struct dm_table *t)
}
/* Preserve existing integrity profile */
- t->integrity_supported = 1;
+ t->integrity_supported = true;
return 0;
}
@@ -1141,9 +1207,9 @@ int dm_table_complete(struct dm_table *t)
{
int r;
- r = dm_table_set_type(t);
+ r = dm_table_determine_type(t);
if (r) {
- DMERR("unable to set table type");
+ DMERR("unable to determine table type");
return r;
}
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index a317dd884..710ae28fd 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -4,7 +4,7 @@
* This file is released under the GPL.
*/
-#include "dm.h"
+#include "dm-core.h"
#include <linux/module.h>
#include <linux/init.h>
@@ -148,9 +148,15 @@ static void io_err_release_clone_rq(struct request *clone)
{
}
+static long io_err_direct_access(struct dm_target *ti, sector_t sector,
+ void **kaddr, pfn_t *pfn, long size)
+{
+ return -EIO;
+}
+
static struct target_type error_target = {
.name = "error",
- .version = {1, 4, 0},
+ .version = {1, 5, 0},
.features = DM_TARGET_WILDCARD,
.ctr = io_err_ctr,
.dtr = io_err_dtr,
@@ -158,6 +164,7 @@ static struct target_type error_target = {
.map_rq = io_err_map_rq,
.clone_and_map_rq = io_err_clone_and_map_rq,
.release_clone_rq = io_err_release_clone_rq,
+ .direct_access = io_err_direct_access,
};
int __init dm_target_init(void)
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 43824d733..a15091a0d 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1677,6 +1677,36 @@ int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *resu
return r;
}
+int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e)
+{
+ int r = 0;
+
+ down_write(&pmd->root_lock);
+ for (; b != e; b++) {
+ r = dm_sm_inc_block(pmd->data_sm, b);
+ if (r)
+ break;
+ }
+ up_write(&pmd->root_lock);
+
+ return r;
+}
+
+int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e)
+{
+ int r = 0;
+
+ down_write(&pmd->root_lock);
+ for (; b != e; b++) {
+ r = dm_sm_dec_block(pmd->data_sm, b);
+ if (r)
+ break;
+ }
+ up_write(&pmd->root_lock);
+
+ return r;
+}
+
bool dm_thin_changed_this_transaction(struct dm_thin_device *td)
{
int r;
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
index a938babe4..35e954ea2 100644
--- a/drivers/md/dm-thin-metadata.h
+++ b/drivers/md/dm-thin-metadata.h
@@ -197,6 +197,9 @@ int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result);
int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result);
+int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
+int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
+
/*
* Returns -ENOSPC if the new size is too small and already allocated
* blocks would be lost.
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index fc803d50f..d1c05c12a 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -253,6 +253,7 @@ struct pool {
struct bio_list deferred_flush_bios;
struct list_head prepared_mappings;
struct list_head prepared_discards;
+ struct list_head prepared_discards_pt2;
struct list_head active_thins;
struct dm_deferred_set *shared_read_ds;
@@ -269,6 +270,7 @@ struct pool {
process_mapping_fn process_prepared_mapping;
process_mapping_fn process_prepared_discard;
+ process_mapping_fn process_prepared_discard_pt2;
struct dm_bio_prison_cell **cell_sort_array;
};
@@ -360,7 +362,7 @@ static int issue_discard(struct discard_op *op, dm_block_t data_b, dm_block_t da
sector_t len = block_to_sectors(tc->pool, data_e - data_b);
return __blkdev_issue_discard(tc->pool_dev->bdev, s, len,
- GFP_NOWAIT, REQ_WRITE | REQ_DISCARD, &op->bio);
+ GFP_NOWAIT, 0, &op->bio);
}
static void end_discard(struct discard_op *op, int r)
@@ -371,7 +373,8 @@ static void end_discard(struct discard_op *op, int r)
* need to wait for the chain to complete.
*/
bio_chain(op->bio, op->parent_bio);
- submit_bio(REQ_WRITE | REQ_DISCARD, op->bio);
+ bio_set_op_attrs(op->bio, REQ_OP_DISCARD, 0);
+ submit_bio(op->bio);
}
blk_finish_plug(&op->plug);
@@ -696,7 +699,7 @@ static void remap_to_origin(struct thin_c *tc, struct bio *bio)
static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
{
- return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
+ return (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) &&
dm_thin_changed_this_transaction(tc->td);
}
@@ -704,7 +707,7 @@ static void inc_all_io_entry(struct pool *pool, struct bio *bio)
{
struct dm_thin_endio_hook *h;
- if (bio->bi_rw & REQ_DISCARD)
+ if (bio_op(bio) == REQ_OP_DISCARD)
return;
h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
@@ -867,7 +870,8 @@ static void __inc_remap_and_issue_cell(void *context,
struct bio *bio;
while ((bio = bio_list_pop(&cell->bios))) {
- if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA))
+ if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) ||
+ bio_op(bio) == REQ_OP_DISCARD)
bio_list_add(&info->defer_bios, bio);
else {
inc_all_io_entry(info->tc->pool, bio);
@@ -999,7 +1003,8 @@ static void process_prepared_discard_no_passdown(struct dm_thin_new_mapping *m)
/*----------------------------------------------------------------*/
-static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m)
+static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m,
+ struct bio *discard_parent)
{
/*
* We've already unmapped this range of blocks, but before we
@@ -1012,7 +1017,7 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin;
struct discard_op op;
- begin_discard(&op, tc, m->bio);
+ begin_discard(&op, tc, discard_parent);
while (b != end) {
/* find start of unmapped run */
for (; b < end; b++) {
@@ -1047,28 +1052,101 @@ out:
end_discard(&op, r);
}
-static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
+static void queue_passdown_pt2(struct dm_thin_new_mapping *m)
+{
+ unsigned long flags;
+ struct pool *pool = m->tc->pool;
+
+ spin_lock_irqsave(&pool->lock, flags);
+ list_add_tail(&m->list, &pool->prepared_discards_pt2);
+ spin_unlock_irqrestore(&pool->lock, flags);
+ wake_worker(pool);
+}
+
+static void passdown_endio(struct bio *bio)
+{
+ /*
+ * It doesn't matter if the passdown discard failed, we still want
+ * to unmap (we ignore err).
+ */
+ queue_passdown_pt2(bio->bi_private);
+}
+
+static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
{
int r;
struct thin_c *tc = m->tc;
struct pool *pool = tc->pool;
+ struct bio *discard_parent;
+ dm_block_t data_end = m->data_block + (m->virt_end - m->virt_begin);
+ /*
+ * Only this thread allocates blocks, so we can be sure that the
+ * newly unmapped blocks will not be allocated before the end of
+ * the function.
+ */
r = dm_thin_remove_range(tc->td, m->virt_begin, m->virt_end);
if (r) {
metadata_operation_failed(pool, "dm_thin_remove_range", r);
bio_io_error(m->bio);
+ cell_defer_no_holder(tc, m->cell);
+ mempool_free(m, pool->mapping_pool);
+ return;
+ }
- } else if (m->maybe_shared) {
- passdown_double_checking_shared_status(m);
+ discard_parent = bio_alloc(GFP_NOIO, 1);
+ if (!discard_parent) {
+ DMWARN("%s: unable to allocate top level discard bio for passdown. Skipping passdown.",
+ dm_device_name(tc->pool->pool_md));
+ queue_passdown_pt2(m);
} else {
- struct discard_op op;
- begin_discard(&op, tc, m->bio);
- r = issue_discard(&op, m->data_block,
- m->data_block + (m->virt_end - m->virt_begin));
- end_discard(&op, r);
+ discard_parent->bi_end_io = passdown_endio;
+ discard_parent->bi_private = m;
+
+ if (m->maybe_shared)
+ passdown_double_checking_shared_status(m, discard_parent);
+ else {
+ struct discard_op op;
+
+ begin_discard(&op, tc, discard_parent);
+ r = issue_discard(&op, m->data_block, data_end);
+ end_discard(&op, r);
+ }
}
+ /*
+ * Increment the unmapped blocks. This prevents a race between the
+ * passdown io and reallocation of freed blocks.
+ */
+ r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end);
+ if (r) {
+ metadata_operation_failed(pool, "dm_pool_inc_data_range", r);
+ bio_io_error(m->bio);
+ cell_defer_no_holder(tc, m->cell);
+ mempool_free(m, pool->mapping_pool);
+ return;
+ }
+}
+
+static void process_prepared_discard_passdown_pt2(struct dm_thin_new_mapping *m)
+{
+ int r;
+ struct thin_c *tc = m->tc;
+ struct pool *pool = tc->pool;
+
+ /*
+ * The passdown has completed, so now we can decrement all those
+ * unmapped blocks.
+ */
+ r = dm_pool_dec_data_range(pool->pmd, m->data_block,
+ m->data_block + (m->virt_end - m->virt_begin));
+ if (r) {
+ metadata_operation_failed(pool, "dm_pool_dec_data_range", r);
+ bio_io_error(m->bio);
+ } else
+ bio_endio(m->bio);
+
cell_defer_no_holder(tc, m->cell);
mempool_free(m, pool->mapping_pool);
}
@@ -1639,7 +1717,8 @@ static void __remap_and_issue_shared_cell(void *context,
while ((bio = bio_list_pop(&cell->bios))) {
if ((bio_data_dir(bio) == WRITE) ||
- (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)))
+ (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) ||
+ bio_op(bio) == REQ_OP_DISCARD))
bio_list_add(&info->defer_bios, bio);
else {
struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));;
@@ -2028,7 +2107,7 @@ static void process_thin_deferred_bios(struct thin_c *tc)
break;
}
- if (bio->bi_rw & REQ_DISCARD)
+ if (bio_op(bio) == REQ_OP_DISCARD)
pool->process_discard(tc, bio);
else
pool->process_bio(tc, bio);
@@ -2115,7 +2194,7 @@ static void process_thin_deferred_cells(struct thin_c *tc)
return;
}
- if (cell->holder->bi_rw & REQ_DISCARD)
+ if (bio_op(cell->holder) == REQ_OP_DISCARD)
pool->process_discard_cell(tc, cell);
else
pool->process_cell(tc, cell);
@@ -2212,6 +2291,8 @@ static void do_worker(struct work_struct *ws)
throttle_work_update(&pool->throttle);
process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard);
throttle_work_update(&pool->throttle);
+ process_prepared(pool, &pool->prepared_discards_pt2, &pool->process_prepared_discard_pt2);
+ throttle_work_update(&pool->throttle);
process_deferred_bios(pool);
throttle_work_complete(&pool->throttle);
}
@@ -2340,7 +2421,8 @@ static void set_discard_callbacks(struct pool *pool)
if (passdown_enabled(pt)) {
pool->process_discard_cell = process_discard_cell_passdown;
- pool->process_prepared_discard = process_prepared_discard_passdown;
+ pool->process_prepared_discard = process_prepared_discard_passdown_pt1;
+ pool->process_prepared_discard_pt2 = process_prepared_discard_passdown_pt2;
} else {
pool->process_discard_cell = process_discard_cell_no_passdown;
pool->process_prepared_discard = process_prepared_discard_no_passdown;
@@ -2553,7 +2635,8 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_SUBMITTED;
}
- if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) {
+ if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) ||
+ bio_op(bio) == REQ_OP_DISCARD) {
thin_defer_bio_with_throttle(tc, bio);
return DM_MAPIO_SUBMITTED;
}
@@ -2826,6 +2909,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
bio_list_init(&pool->deferred_flush_bios);
INIT_LIST_HEAD(&pool->prepared_mappings);
INIT_LIST_HEAD(&pool->prepared_discards);
+ INIT_LIST_HEAD(&pool->prepared_discards_pt2);
INIT_LIST_HEAD(&pool->active_thins);
pool->low_water_triggered = false;
pool->suspended = true;
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c
index 766bc9300..b616f11d8 100644
--- a/drivers/md/dm-zero.c
+++ b/drivers/md/dm-zero.c
@@ -35,16 +35,19 @@ static int zero_ctr(struct dm_target *ti, unsigned int argc, char **argv)
*/
static int zero_map(struct dm_target *ti, struct bio *bio)
{
- switch(bio_rw(bio)) {
- case READ:
+ switch (bio_op(bio)) {
+ case REQ_OP_READ:
+ if (bio->bi_opf & REQ_RAHEAD) {
+ /* readahead of null bytes only wastes buffer cache */
+ return -EIO;
+ }
zero_fill_bio(bio);
break;
- case READA:
- /* readahead of null bytes only wastes buffer cache */
- return -EIO;
- case WRITE:
+ case REQ_OP_WRITE:
/* writes get silently dropped */
break;
+ default:
+ return -EIO;
}
bio_endio(bio);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index fd40bcb39..fa9b1cb44 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -5,13 +5,13 @@
* This file is released under the GPL.
*/
-#include "dm.h"
+#include "dm-core.h"
+#include "dm-rq.h"
#include "dm-uevent.h"
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/moduleparam.h>
#include <linux/blkpg.h>
#include <linux/bio.h>
#include <linux/mempool.h>
@@ -20,14 +20,8 @@
#include <linux/hdreg.h>
#include <linux/delay.h>
#include <linux/wait.h>
-#include <linux/kthread.h>
-#include <linux/ktime.h>
-#include <linux/elevator.h> /* for rq_end_sector() */
-#include <linux/blk-mq.h>
#include <linux/pr.h>
-#include <trace/events/block.h>
-
#define DM_MSG_PREFIX "core"
#ifdef CONFIG_PRINTK
@@ -63,7 +57,6 @@ static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
static struct workqueue_struct *deferred_remove_workqueue;
/*
- * For bio-based dm.
* One of these is allocated per bio.
*/
struct dm_io {
@@ -76,36 +69,6 @@ struct dm_io {
struct dm_stats_aux stats_aux;
};
-/*
- * For request-based dm.
- * One of these is allocated per request.
- */
-struct dm_rq_target_io {
- struct mapped_device *md;
- struct dm_target *ti;
- struct request *orig, *clone;
- struct kthread_work work;
- int error;
- union map_info info;
- struct dm_stats_aux stats_aux;
- unsigned long duration_jiffies;
- unsigned n_sectors;
-};
-
-/*
- * For request-based dm - the bio clones we allocate are embedded in these
- * structs.
- *
- * We allocate these with bio_alloc_bioset, using the front_pad parameter when
- * the bioset is created - this means the bio has to come at the end of the
- * struct.
- */
-struct dm_rq_clone_bio_info {
- struct bio *orig;
- struct dm_rq_target_io *tio;
- struct bio clone;
-};
-
#define MINOR_ALLOCED ((void *)-1)
/*
@@ -120,130 +83,9 @@ struct dm_rq_clone_bio_info {
#define DMF_DEFERRED_REMOVE 6
#define DMF_SUSPENDED_INTERNALLY 7
-/*
- * Work processed by per-device workqueue.
- */
-struct mapped_device {
- struct srcu_struct io_barrier;
- struct mutex suspend_lock;
-
- /*
- * The current mapping (struct dm_table *).
- * Use dm_get_live_table{_fast} or take suspend_lock for
- * dereference.
- */
- void __rcu *map;
-
- struct list_head table_devices;
- struct mutex table_devices_lock;
-
- unsigned long flags;
-
- struct request_queue *queue;
- int numa_node_id;
-
- unsigned type;
- /* Protect queue and type against concurrent access. */
- struct mutex type_lock;
-
- atomic_t holders;
- atomic_t open_count;
-
- struct dm_target *immutable_target;
- struct target_type *immutable_target_type;
-
- struct gendisk *disk;
- char name[16];
-
- void *interface_ptr;
-
- /*
- * A list of ios that arrived while we were suspended.
- */
- atomic_t pending[2];
- wait_queue_head_t wait;
- struct work_struct work;
- spinlock_t deferred_lock;
- struct bio_list deferred;
-
- /*
- * Event handling.
- */
- wait_queue_head_t eventq;
- atomic_t event_nr;
- atomic_t uevent_seq;
- struct list_head uevent_list;
- spinlock_t uevent_lock; /* Protect access to uevent_list */
-
- /* the number of internal suspends */
- unsigned internal_suspend_count;
-
- /*
- * Processing queue (flush)
- */
- struct workqueue_struct *wq;
-
- /*
- * io objects are allocated from here.
- */
- mempool_t *io_pool;
- mempool_t *rq_pool;
-
- struct bio_set *bs;
-
- /*
- * freeze/thaw support require holding onto a super block
- */
- struct super_block *frozen_sb;
-
- /* forced geometry settings */
- struct hd_geometry geometry;
-
- struct block_device *bdev;
-
- /* kobject and completion */
- struct dm_kobject_holder kobj_holder;
-
- /* zero-length flush that will be cloned and submitted to targets */
- struct bio flush_bio;
-
- struct dm_stats stats;
-
- struct kthread_worker kworker;
- struct task_struct *kworker_task;
-
- /* for request-based merge heuristic in dm_request_fn() */
- unsigned seq_rq_merge_deadline_usecs;
- int last_rq_rw;
- sector_t last_rq_pos;
- ktime_t last_rq_start_time;
-
- /* for blk-mq request-based DM support */
- struct blk_mq_tag_set *tag_set;
- bool use_blk_mq:1;
- bool init_tio_pdu:1;
-};
-
-#ifdef CONFIG_DM_MQ_DEFAULT
-static bool use_blk_mq = true;
-#else
-static bool use_blk_mq = false;
-#endif
-
-#define DM_MQ_NR_HW_QUEUES 1
-#define DM_MQ_QUEUE_DEPTH 2048
#define DM_NUMA_NODE NUMA_NO_NODE
-
-static unsigned dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES;
-static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
static int dm_numa_node = DM_NUMA_NODE;
-bool dm_use_blk_mq(struct mapped_device *md)
-{
- return md->use_blk_mq;
-}
-EXPORT_SYMBOL_GPL(dm_use_blk_mq);
-
/*
* For mempools pre-allocation at the table loading time.
*/
@@ -259,9 +101,6 @@ struct table_device {
struct dm_dev dm_dev;
};
-#define RESERVED_BIO_BASED_IOS 16
-#define RESERVED_REQUEST_BASED_IOS 256
-#define RESERVED_MAX_IOS 1024
static struct kmem_cache *_io_cache;
static struct kmem_cache *_rq_tio_cache;
static struct kmem_cache *_rq_cache;
@@ -269,13 +108,9 @@ static struct kmem_cache *_rq_cache;
/*
* Bio-based DM's mempools' reserved IOs set by the user.
*/
+#define RESERVED_BIO_BASED_IOS 16
static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
-/*
- * Request-based DM's mempools' reserved IOs set by the user.
- */
-static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
-
static int __dm_get_module_param_int(int *module_param, int min, int max)
{
int param = ACCESS_ONCE(*module_param);
@@ -297,8 +132,8 @@ static int __dm_get_module_param_int(int *module_param, int min, int max)
return param;
}
-static unsigned __dm_get_module_param(unsigned *module_param,
- unsigned def, unsigned max)
+unsigned __dm_get_module_param(unsigned *module_param,
+ unsigned def, unsigned max)
{
unsigned param = ACCESS_ONCE(*module_param);
unsigned modified_param = 0;
@@ -319,28 +154,10 @@ static unsigned __dm_get_module_param(unsigned *module_param,
unsigned dm_get_reserved_bio_based_ios(void)
{
return __dm_get_module_param(&reserved_bio_based_ios,
- RESERVED_BIO_BASED_IOS, RESERVED_MAX_IOS);
+ RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS);
}
EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
-unsigned dm_get_reserved_rq_based_ios(void)
-{
- return __dm_get_module_param(&reserved_rq_based_ios,
- RESERVED_REQUEST_BASED_IOS, RESERVED_MAX_IOS);
-}
-EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);
-
-static unsigned dm_get_blk_mq_nr_hw_queues(void)
-{
- return __dm_get_module_param(&dm_mq_nr_hw_queues, 1, 32);
-}
-
-static unsigned dm_get_blk_mq_queue_depth(void)
-{
- return __dm_get_module_param(&dm_mq_queue_depth,
- DM_MQ_QUEUE_DEPTH, BLK_MQ_MAX_DEPTH);
-}
-
static unsigned dm_get_numa_node(void)
{
return __dm_get_module_param_int(&dm_numa_node,
@@ -679,29 +496,7 @@ static void free_tio(struct dm_target_io *tio)
bio_put(&tio->clone);
}
-static struct dm_rq_target_io *alloc_old_rq_tio(struct mapped_device *md,
- gfp_t gfp_mask)
-{
- return mempool_alloc(md->io_pool, gfp_mask);
-}
-
-static void free_old_rq_tio(struct dm_rq_target_io *tio)
-{
- mempool_free(tio, tio->md->io_pool);
-}
-
-static struct request *alloc_old_clone_request(struct mapped_device *md,
- gfp_t gfp_mask)
-{
- return mempool_alloc(md->rq_pool, gfp_mask);
-}
-
-static void free_old_clone_request(struct mapped_device *md, struct request *rq)
-{
- mempool_free(rq, md->rq_pool);
-}
-
-static int md_in_flight(struct mapped_device *md)
+int md_in_flight(struct mapped_device *md)
{
return atomic_read(&md->pending[READ]) +
atomic_read(&md->pending[WRITE]);
@@ -723,8 +518,9 @@ static void start_io_acct(struct dm_io *io)
atomic_inc_return(&md->pending[rw]));
if (unlikely(dm_stats_used(&md->stats)))
- dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
- bio_sectors(bio), false, 0, &io->stats_aux);
+ dm_stats_account_io(&md->stats, bio_data_dir(bio),
+ bio->bi_iter.bi_sector, bio_sectors(bio),
+ false, 0, &io->stats_aux);
}
static void end_io_acct(struct dm_io *io)
@@ -738,8 +534,9 @@ static void end_io_acct(struct dm_io *io)
generic_end_io_acct(rw, &dm_disk(md)->part0, io->start_time);
if (unlikely(dm_stats_used(&md->stats)))
- dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
- bio_sectors(bio), true, duration, &io->stats_aux);
+ dm_stats_account_io(&md->stats, bio_data_dir(bio),
+ bio->bi_iter.bi_sector, bio_sectors(bio),
+ true, duration, &io->stats_aux);
/*
* After this is decremented the bio must not be touched if it is
@@ -1001,12 +798,12 @@ static void dec_pending(struct dm_io *io, int error)
if (io_error == DM_ENDIO_REQUEUE)
return;
- if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) {
+ if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) {
/*
* Preflush done for flush with data, reissue
- * without REQ_FLUSH.
+ * without REQ_PREFLUSH.
*/
- bio->bi_rw &= ~REQ_FLUSH;
+ bio->bi_opf &= ~REQ_PREFLUSH;
queue_io(md, bio);
} else {
/* done with normal IO or empty flush */
@@ -1017,7 +814,7 @@ static void dec_pending(struct dm_io *io, int error)
}
}
-static void disable_write_same(struct mapped_device *md)
+void disable_write_same(struct mapped_device *md)
{
struct queue_limits *limits = dm_get_queue_limits(md);
@@ -1051,7 +848,7 @@ static void clone_endio(struct bio *bio)
}
}
- if (unlikely(r == -EREMOTEIO && (bio->bi_rw & REQ_WRITE_SAME) &&
+ if (unlikely(r == -EREMOTEIO && (bio_op(bio) == REQ_OP_WRITE_SAME) &&
!bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors))
disable_write_same(md);
@@ -1060,371 +857,6 @@ static void clone_endio(struct bio *bio)
}
/*
- * Partial completion handling for request-based dm
- */
-static void end_clone_bio(struct bio *clone)
-{
- struct dm_rq_clone_bio_info *info =
- container_of(clone, struct dm_rq_clone_bio_info, clone);
- struct dm_rq_target_io *tio = info->tio;
- struct bio *bio = info->orig;
- unsigned int nr_bytes = info->orig->bi_iter.bi_size;
- int error = clone->bi_error;
-
- bio_put(clone);
-
- if (tio->error)
- /*
- * An error has already been detected on the request.
- * Once error occurred, just let clone->end_io() handle
- * the remainder.
- */
- return;
- else if (error) {
- /*
- * Don't notice the error to the upper layer yet.
- * The error handling decision is made by the target driver,
- * when the request is completed.
- */
- tio->error = error;
- return;
- }
-
- /*
- * I/O for the bio successfully completed.
- * Notice the data completion to the upper layer.
- */
-
- /*
- * bios are processed from the head of the list.
- * So the completing bio should always be rq->bio.
- * If it's not, something wrong is happening.
- */
- if (tio->orig->bio != bio)
- DMERR("bio completion is going in the middle of the request");
-
- /*
- * Update the original request.
- * Do not use blk_end_request() here, because it may complete
- * the original request before the clone, and break the ordering.
- */
- blk_update_request(tio->orig, 0, nr_bytes);
-}
-
-static struct dm_rq_target_io *tio_from_request(struct request *rq)
-{
- return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special);
-}
-
-static void rq_end_stats(struct mapped_device *md, struct request *orig)
-{
- if (unlikely(dm_stats_used(&md->stats))) {
- struct dm_rq_target_io *tio = tio_from_request(orig);
- tio->duration_jiffies = jiffies - tio->duration_jiffies;
- dm_stats_account_io(&md->stats, orig->cmd_flags, blk_rq_pos(orig),
- tio->n_sectors, true, tio->duration_jiffies,
- &tio->stats_aux);
- }
-}
-
-/*
- * Don't touch any member of the md after calling this function because
- * the md may be freed in dm_put() at the end of this function.
- * Or do dm_get() before calling this function and dm_put() later.
- */
-static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
-{
- atomic_dec(&md->pending[rw]);
-
- /* nudge anyone waiting on suspend queue */
- if (!md_in_flight(md))
- wake_up(&md->wait);
-
- /*
- * Run this off this callpath, as drivers could invoke end_io while
- * inside their request_fn (and holding the queue lock). Calling
- * back into ->request_fn() could deadlock attempting to grab the
- * queue lock again.
- */
- if (!md->queue->mq_ops && run_queue)
- blk_run_queue_async(md->queue);
-
- /*
- * dm_put() must be at the end of this function. See the comment above
- */
- dm_put(md);
-}
-
-static void free_rq_clone(struct request *clone)
-{
- struct dm_rq_target_io *tio = clone->end_io_data;
- struct mapped_device *md = tio->md;
-
- blk_rq_unprep_clone(clone);
-
- if (md->type == DM_TYPE_MQ_REQUEST_BASED)
- /* stacked on blk-mq queue(s) */
- tio->ti->type->release_clone_rq(clone);
- else if (!md->queue->mq_ops)
- /* request_fn queue stacked on request_fn queue(s) */
- free_old_clone_request(md, clone);
-
- if (!md->queue->mq_ops)
- free_old_rq_tio(tio);
-}
-
-/*
- * Complete the clone and the original request.
- * Must be called without clone's queue lock held,
- * see end_clone_request() for more details.
- */
-static void dm_end_request(struct request *clone, int error)
-{
- int rw = rq_data_dir(clone);
- struct dm_rq_target_io *tio = clone->end_io_data;
- struct mapped_device *md = tio->md;
- struct request *rq = tio->orig;
-
- if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
- rq->errors = clone->errors;
- rq->resid_len = clone->resid_len;
-
- if (rq->sense)
- /*
- * We are using the sense buffer of the original
- * request.
- * So setting the length of the sense data is enough.
- */
- rq->sense_len = clone->sense_len;
- }
-
- free_rq_clone(clone);
- rq_end_stats(md, rq);
- if (!rq->q->mq_ops)
- blk_end_request_all(rq, error);
- else
- blk_mq_end_request(rq, error);
- rq_completed(md, rw, true);
-}
-
-static void dm_unprep_request(struct request *rq)
-{
- struct dm_rq_target_io *tio = tio_from_request(rq);
- struct request *clone = tio->clone;
-
- if (!rq->q->mq_ops) {
- rq->special = NULL;
- rq->cmd_flags &= ~REQ_DONTPREP;
- }
-
- if (clone)
- free_rq_clone(clone);
- else if (!tio->md->queue->mq_ops)
- free_old_rq_tio(tio);
-}
-
-/*
- * Requeue the original request of a clone.
- */
-static void dm_old_requeue_request(struct request *rq)
-{
- struct request_queue *q = rq->q;
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- blk_requeue_request(q, rq);
- blk_run_queue_async(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
-}
-
-static void dm_mq_requeue_request(struct request *rq)
-{
- struct request_queue *q = rq->q;
- unsigned long flags;
-
- blk_mq_requeue_request(rq);
- spin_lock_irqsave(q->queue_lock, flags);
- if (!blk_queue_stopped(q))
- blk_mq_kick_requeue_list(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
-}
-
-static void dm_requeue_original_request(struct mapped_device *md,
- struct request *rq)
-{
- int rw = rq_data_dir(rq);
-
- rq_end_stats(md, rq);
- dm_unprep_request(rq);
-
- if (!rq->q->mq_ops)
- dm_old_requeue_request(rq);
- else
- dm_mq_requeue_request(rq);
-
- rq_completed(md, rw, false);
-}
-
-static void dm_old_stop_queue(struct request_queue *q)
-{
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- if (blk_queue_stopped(q)) {
- spin_unlock_irqrestore(q->queue_lock, flags);
- return;
- }
-
- blk_stop_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
-}
-
-static void dm_stop_queue(struct request_queue *q)
-{
- if (!q->mq_ops)
- dm_old_stop_queue(q);
- else
- blk_mq_stop_hw_queues(q);
-}
-
-static void dm_old_start_queue(struct request_queue *q)
-{
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- if (blk_queue_stopped(q))
- blk_start_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
-}
-
-static void dm_start_queue(struct request_queue *q)
-{
- if (!q->mq_ops)
- dm_old_start_queue(q);
- else {
- blk_mq_start_stopped_hw_queues(q, true);
- blk_mq_kick_requeue_list(q);
- }
-}
-
-static void dm_done(struct request *clone, int error, bool mapped)
-{
- int r = error;
- struct dm_rq_target_io *tio = clone->end_io_data;
- dm_request_endio_fn rq_end_io = NULL;
-
- if (tio->ti) {
- rq_end_io = tio->ti->type->rq_end_io;
-
- if (mapped && rq_end_io)
- r = rq_end_io(tio->ti, clone, error, &tio->info);
- }
-
- if (unlikely(r == -EREMOTEIO && (clone->cmd_flags & REQ_WRITE_SAME) &&
- !clone->q->limits.max_write_same_sectors))
- disable_write_same(tio->md);
-
- if (r <= 0)
- /* The target wants to complete the I/O */
- dm_end_request(clone, r);
- else if (r == DM_ENDIO_INCOMPLETE)
- /* The target will handle the I/O */
- return;
- else if (r == DM_ENDIO_REQUEUE)
- /* The target wants to requeue the I/O */
- dm_requeue_original_request(tio->md, tio->orig);
- else {
- DMWARN("unimplemented target endio return value: %d", r);
- BUG();
- }
-}
-
-/*
- * Request completion handler for request-based dm
- */
-static void dm_softirq_done(struct request *rq)
-{
- bool mapped = true;
- struct dm_rq_target_io *tio = tio_from_request(rq);
- struct request *clone = tio->clone;
- int rw;
-
- if (!clone) {
- rq_end_stats(tio->md, rq);
- rw = rq_data_dir(rq);
- if (!rq->q->mq_ops) {
- blk_end_request_all(rq, tio->error);
- rq_completed(tio->md, rw, false);
- free_old_rq_tio(tio);
- } else {
- blk_mq_end_request(rq, tio->error);
- rq_completed(tio->md, rw, false);
- }
- return;
- }
-
- if (rq->cmd_flags & REQ_FAILED)
- mapped = false;
-
- dm_done(clone, tio->error, mapped);
-}
-
-/*
- * Complete the clone and the original request with the error status
- * through softirq context.
- */
-static void dm_complete_request(struct request *rq, int error)
-{
- struct dm_rq_target_io *tio = tio_from_request(rq);
-
- tio->error = error;
- if (!rq->q->mq_ops)
- blk_complete_request(rq);
- else
- blk_mq_complete_request(rq, error);
-}
-
-/*
- * Complete the not-mapped clone and the original request with the error status
- * through softirq context.
- * Target's rq_end_io() function isn't called.
- * This may be used when the target's map_rq() or clone_and_map_rq() functions fail.
- */
-static void dm_kill_unmapped_request(struct request *rq, int error)
-{
- rq->cmd_flags |= REQ_FAILED;
- dm_complete_request(rq, error);
-}
-
-/*
- * Called with the clone's queue lock held (in the case of .request_fn)
- */
-static void end_clone_request(struct request *clone, int error)
-{
- struct dm_rq_target_io *tio = clone->end_io_data;
-
- if (!clone->q->mq_ops) {
- /*
- * For just cleaning up the information of the queue in which
- * the clone was dispatched.
- * The clone is *NOT* freed actually here because it is alloced
- * from dm own mempool (REQ_ALLOCED isn't set).
- */
- __blk_put_request(clone->q, clone);
- }
-
- /*
- * Actual request completion is done in a softirq context which doesn't
- * hold the clone's queue lock. Otherwise, deadlock could occur because:
- * - another request may be submitted by the upper level driver
- * of the stacking during the completion
- * - the submission which requires queue lock may be done
- * against this clone's queue
- */
- dm_complete_request(tio->orig, error);
-}
-
-/*
* Return maximum size of I/O possible at the supplied sector up to the current
* target boundary.
*/
@@ -1473,9 +905,36 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
}
EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
+static long dm_blk_direct_access(struct block_device *bdev, sector_t sector,
+ void **kaddr, pfn_t *pfn, long size)
+{
+ struct mapped_device *md = bdev->bd_disk->private_data;
+ struct dm_table *map;
+ struct dm_target *ti;
+ int srcu_idx;
+ long len, ret = -EIO;
+
+ map = dm_get_live_table(md, &srcu_idx);
+ if (!map)
+ goto out;
+
+ ti = dm_table_find_target(map, sector);
+ if (!dm_target_is_valid(ti))
+ goto out;
+
+ len = max_io_len(sector, ti) << SECTOR_SHIFT;
+ size = min(len, size);
+
+ if (ti->type->direct_access)
+ ret = ti->type->direct_access(ti, sector, kaddr, pfn, size);
+out:
+ dm_put_live_table(md, srcu_idx);
+ return min(ret, size);
+}
+
/*
* A target may call dm_accept_partial_bio only from the map routine. It is
- * allowed for all bio types except REQ_FLUSH.
+ * allowed for all bio types except REQ_PREFLUSH.
*
* dm_accept_partial_bio informs the dm that the target only wants to process
* additional n_sectors sectors of the bio and the rest of the data should be
@@ -1505,7 +964,7 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
{
struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
- BUG_ON(bio->bi_rw & REQ_FLUSH);
+ BUG_ON(bio->bi_opf & REQ_PREFLUSH);
BUG_ON(bi_size > *tio->len_ptr);
BUG_ON(n_sectors > bi_size);
*tio->len_ptr -= bi_size - n_sectors;
@@ -1746,9 +1205,9 @@ static int __split_and_process_non_flush(struct clone_info *ci)
unsigned len;
int r;
- if (unlikely(bio->bi_rw & REQ_DISCARD))
+ if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
return __send_discard(ci);
- else if (unlikely(bio->bi_rw & REQ_WRITE_SAME))
+ else if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
return __send_write_same(ci);
ti = dm_table_find_target(ci->map, ci->sector);
@@ -1793,7 +1252,7 @@ static void __split_and_process_bio(struct mapped_device *md,
start_io_acct(ci.io);
- if (bio->bi_rw & REQ_FLUSH) {
+ if (bio->bi_opf & REQ_PREFLUSH) {
ci.bio = &ci.md->flush_bio;
ci.sector_count = 0;
error = __send_empty_flush(&ci);
@@ -1831,7 +1290,7 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
dm_put_live_table(md, srcu_idx);
- if (bio_rw(bio) != READA)
+ if (!(bio->bi_opf & REQ_RAHEAD))
queue_io(md, bio);
else
bio_io_error(bio);
@@ -1843,352 +1302,6 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
return BLK_QC_T_NONE;
}
-int dm_request_based(struct mapped_device *md)
-{
- return blk_queue_stackable(md->queue);
-}
-
-static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
-{
- int r;
-
- if (blk_queue_io_stat(clone->q))
- clone->cmd_flags |= REQ_IO_STAT;
-
- clone->start_time = jiffies;
- r = blk_insert_cloned_request(clone->q, clone);
- if (r)
- /* must complete clone in terms of original request */
- dm_complete_request(rq, r);
-}
-
-static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
- void *data)
-{
- struct dm_rq_target_io *tio = data;
- struct dm_rq_clone_bio_info *info =
- container_of(bio, struct dm_rq_clone_bio_info, clone);
-
- info->orig = bio_orig;
- info->tio = tio;
- bio->bi_end_io = end_clone_bio;
-
- return 0;
-}
-
-static int setup_clone(struct request *clone, struct request *rq,
- struct dm_rq_target_io *tio, gfp_t gfp_mask)
-{
- int r;
-
- r = blk_rq_prep_clone(clone, rq, tio->md->bs, gfp_mask,
- dm_rq_bio_constructor, tio);
- if (r)
- return r;
-
- clone->cmd = rq->cmd;
- clone->cmd_len = rq->cmd_len;
- clone->sense = rq->sense;
- clone->end_io = end_clone_request;
- clone->end_io_data = tio;
-
- tio->clone = clone;
-
- return 0;
-}
-
-static struct request *clone_old_rq(struct request *rq, struct mapped_device *md,
- struct dm_rq_target_io *tio, gfp_t gfp_mask)
-{
- /*
- * Create clone for use with .request_fn request_queue
- */
- struct request *clone;
-
- clone = alloc_old_clone_request(md, gfp_mask);
- if (!clone)
- return NULL;
-
- blk_rq_init(NULL, clone);
- if (setup_clone(clone, rq, tio, gfp_mask)) {
- /* -ENOMEM */
- free_old_clone_request(md, clone);
- return NULL;
- }
-
- return clone;
-}
-
-static void map_tio_request(struct kthread_work *work);
-
-static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
- struct mapped_device *md)
-{
- tio->md = md;
- tio->ti = NULL;
- tio->clone = NULL;
- tio->orig = rq;
- tio->error = 0;
- /*
- * Avoid initializing info for blk-mq; it passes
- * target-specific data through info.ptr
- * (see: dm_mq_init_request)
- */
- if (!md->init_tio_pdu)
- memset(&tio->info, 0, sizeof(tio->info));
- if (md->kworker_task)
- init_kthread_work(&tio->work, map_tio_request);
-}
-
-static struct dm_rq_target_io *dm_old_prep_tio(struct request *rq,
- struct mapped_device *md,
- gfp_t gfp_mask)
-{
- struct dm_rq_target_io *tio;
- int srcu_idx;
- struct dm_table *table;
-
- tio = alloc_old_rq_tio(md, gfp_mask);
- if (!tio)
- return NULL;
-
- init_tio(tio, rq, md);
-
- table = dm_get_live_table(md, &srcu_idx);
- /*
- * Must clone a request if this .request_fn DM device
- * is stacked on .request_fn device(s).
- */
- if (!dm_table_mq_request_based(table)) {
- if (!clone_old_rq(rq, md, tio, gfp_mask)) {
- dm_put_live_table(md, srcu_idx);
- free_old_rq_tio(tio);
- return NULL;
- }
- }
- dm_put_live_table(md, srcu_idx);
-
- return tio;
-}
-
-/*
- * Called with the queue lock held.
- */
-static int dm_old_prep_fn(struct request_queue *q, struct request *rq)
-{
- struct mapped_device *md = q->queuedata;
- struct dm_rq_target_io *tio;
-
- if (unlikely(rq->special)) {
- DMWARN("Already has something in rq->special.");
- return BLKPREP_KILL;
- }
-
- tio = dm_old_prep_tio(rq, md, GFP_ATOMIC);
- if (!tio)
- return BLKPREP_DEFER;
-
- rq->special = tio;
- rq->cmd_flags |= REQ_DONTPREP;
-
- return BLKPREP_OK;
-}
-
-/*
- * Returns:
- * 0 : the request has been processed
- * DM_MAPIO_REQUEUE : the original request needs to be requeued
- * < 0 : the request was completed due to failure
- */
-static int map_request(struct dm_rq_target_io *tio, struct request *rq,
- struct mapped_device *md)
-{
- int r;
- struct dm_target *ti = tio->ti;
- struct request *clone = NULL;
-
- if (tio->clone) {
- clone = tio->clone;
- r = ti->type->map_rq(ti, clone, &tio->info);
- } else {
- r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
- if (r < 0) {
- /* The target wants to complete the I/O */
- dm_kill_unmapped_request(rq, r);
- return r;
- }
- if (r != DM_MAPIO_REMAPPED)
- return r;
- if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
- /* -ENOMEM */
- ti->type->release_clone_rq(clone);
- return DM_MAPIO_REQUEUE;
- }
- }
-
- switch (r) {
- case DM_MAPIO_SUBMITTED:
- /* The target has taken the I/O to submit by itself later */
- break;
- case DM_MAPIO_REMAPPED:
- /* The target has remapped the I/O so dispatch it */
- trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
- blk_rq_pos(rq));
- dm_dispatch_clone_request(clone, rq);
- break;
- case DM_MAPIO_REQUEUE:
- /* The target wants to requeue the I/O */
- dm_requeue_original_request(md, tio->orig);
- break;
- default:
- if (r > 0) {
- DMWARN("unimplemented target map return value: %d", r);
- BUG();
- }
-
- /* The target wants to complete the I/O */
- dm_kill_unmapped_request(rq, r);
- return r;
- }
-
- return 0;
-}
-
-static void map_tio_request(struct kthread_work *work)
-{
- struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);
- struct request *rq = tio->orig;
- struct mapped_device *md = tio->md;
-
- if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE)
- dm_requeue_original_request(md, rq);
-}
-
-static void dm_start_request(struct mapped_device *md, struct request *orig)
-{
- if (!orig->q->mq_ops)
- blk_start_request(orig);
- else
- blk_mq_start_request(orig);
- atomic_inc(&md->pending[rq_data_dir(orig)]);
-
- if (md->seq_rq_merge_deadline_usecs) {
- md->last_rq_pos = rq_end_sector(orig);
- md->last_rq_rw = rq_data_dir(orig);
- md->last_rq_start_time = ktime_get();
- }
-
- if (unlikely(dm_stats_used(&md->stats))) {
- struct dm_rq_target_io *tio = tio_from_request(orig);
- tio->duration_jiffies = jiffies;
- tio->n_sectors = blk_rq_sectors(orig);
- dm_stats_account_io(&md->stats, orig->cmd_flags, blk_rq_pos(orig),
- tio->n_sectors, false, 0, &tio->stats_aux);
- }
-
- /*
- * Hold the md reference here for the in-flight I/O.
- * We can't rely on the reference count by device opener,
- * because the device may be closed during the request completion
- * when all bios are completed.
- * See the comment in rq_completed() too.
- */
- dm_get(md);
-}
-
-#define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000
-
-ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
-{
- return sprintf(buf, "%u\n", md->seq_rq_merge_deadline_usecs);
-}
-
-ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
- const char *buf, size_t count)
-{
- unsigned deadline;
-
- if (!dm_request_based(md) || md->use_blk_mq)
- return count;
-
- if (kstrtouint(buf, 10, &deadline))
- return -EINVAL;
-
- if (deadline > MAX_SEQ_RQ_MERGE_DEADLINE_USECS)
- deadline = MAX_SEQ_RQ_MERGE_DEADLINE_USECS;
-
- md->seq_rq_merge_deadline_usecs = deadline;
-
- return count;
-}
-
-static bool dm_request_peeked_before_merge_deadline(struct mapped_device *md)
-{
- ktime_t kt_deadline;
-
- if (!md->seq_rq_merge_deadline_usecs)
- return false;
-
- kt_deadline = ns_to_ktime((u64)md->seq_rq_merge_deadline_usecs * NSEC_PER_USEC);
- kt_deadline = ktime_add_safe(md->last_rq_start_time, kt_deadline);
-
- return !ktime_after(ktime_get(), kt_deadline);
-}
-
-/*
- * q->request_fn for request-based dm.
- * Called with the queue lock held.
- */
-static void dm_request_fn(struct request_queue *q)
-{
- struct mapped_device *md = q->queuedata;
- struct dm_target *ti = md->immutable_target;
- struct request *rq;
- struct dm_rq_target_io *tio;
- sector_t pos = 0;
-
- if (unlikely(!ti)) {
- int srcu_idx;
- struct dm_table *map = dm_get_live_table(md, &srcu_idx);
-
- ti = dm_table_find_target(map, pos);
- dm_put_live_table(md, srcu_idx);
- }
-
- /*
- * For suspend, check blk_queue_stopped() and increment
- * ->pending within a single queue_lock not to increment the
- * number of in-flight I/Os after the queue is stopped in
- * dm_suspend().
- */
- while (!blk_queue_stopped(q)) {
- rq = blk_peek_request(q);
- if (!rq)
- return;
-
- /* always use block 0 to find the target for flushes for now */
- pos = 0;
- if (!(rq->cmd_flags & REQ_FLUSH))
- pos = blk_rq_pos(rq);
-
- if ((dm_request_peeked_before_merge_deadline(md) &&
- md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 &&
- md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) ||
- (ti->type->busy && ti->type->busy(ti))) {
- blk_delay_queue(q, 10);
- return;
- }
-
- dm_start_request(md, rq);
-
- tio = tio_from_request(rq);
- /* Establish tio->ti before queuing work (map_tio_request) */
- tio->ti = ti;
- queue_kthread_work(&md->kworker, &tio->work);
- BUG_ON(!irqs_disabled());
- }
-}
-
static int dm_any_congested(void *congested_data, int bdi_bits)
{
int r = bdi_bits;
@@ -2266,7 +1379,7 @@ static const struct block_device_operations dm_blk_dops;
static void dm_wq_work(struct work_struct *work);
-static void dm_init_md_queue(struct mapped_device *md)
+void dm_init_md_queue(struct mapped_device *md)
{
/*
* Request-based dm devices cannot be stacked on top of bio-based dm
@@ -2287,7 +1400,7 @@ static void dm_init_md_queue(struct mapped_device *md)
md->queue->backing_dev_info.congested_data = md;
}
-static void dm_init_normal_md_queue(struct mapped_device *md)
+void dm_init_normal_md_queue(struct mapped_device *md)
{
md->use_blk_mq = false;
dm_init_md_queue(md);
@@ -2327,6 +1440,8 @@ static void cleanup_mapped_device(struct mapped_device *md)
bdput(md->bdev);
md->bdev = NULL;
}
+
+ dm_mq_cleanup_mapped_device(md);
}
/*
@@ -2360,7 +1475,7 @@ static struct mapped_device *alloc_dev(int minor)
goto bad_io_barrier;
md->numa_node_id = numa_node_id;
- md->use_blk_mq = use_blk_mq;
+ md->use_blk_mq = dm_use_blk_mq_default();
md->init_tio_pdu = false;
md->type = DM_TYPE_NONE;
mutex_init(&md->suspend_lock);
@@ -2412,7 +1527,7 @@ static struct mapped_device *alloc_dev(int minor)
bio_init(&md->flush_bio);
md->flush_bio.bi_bdev = md->bdev;
- md->flush_bio.bi_rw = WRITE_FLUSH;
+ bio_set_op_attrs(&md->flush_bio, REQ_OP_WRITE, WRITE_FLUSH);
dm_stats_init(&md->stats);
@@ -2445,10 +1560,6 @@ static void free_dev(struct mapped_device *md)
unlock_fs(md);
cleanup_mapped_device(md);
- if (md->tag_set) {
- blk_mq_free_tag_set(md->tag_set);
- kfree(md->tag_set);
- }
free_table_devices(&md->table_devices);
dm_stats_cleanup(&md->stats);
@@ -2464,7 +1575,7 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
if (md->bs) {
/* The md already has necessary mempools. */
- if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) {
+ if (dm_table_bio_based(t)) {
/*
* Reload bioset because front_pad may have changed
* because a different table was loaded.
@@ -2654,176 +1765,15 @@ struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
}
EXPORT_SYMBOL_GPL(dm_get_queue_limits);
-static void dm_old_init_rq_based_worker_thread(struct mapped_device *md)
-{
- /* Initialize the request-based DM worker thread */
- init_kthread_worker(&md->kworker);
- md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
- "kdmwork-%s", dm_device_name(md));
-}
-
-/*
- * Fully initialize a .request_fn request-based queue.
- */
-static int dm_old_init_request_queue(struct mapped_device *md)
-{
- /* Fully initialize the queue */
- if (!blk_init_allocated_queue(md->queue, dm_request_fn, NULL))
- return -EINVAL;
-
- /* disable dm_request_fn's merge heuristic by default */
- md->seq_rq_merge_deadline_usecs = 0;
-
- dm_init_normal_md_queue(md);
- blk_queue_softirq_done(md->queue, dm_softirq_done);
- blk_queue_prep_rq(md->queue, dm_old_prep_fn);
-
- dm_old_init_rq_based_worker_thread(md);
-
- elv_register_queue(md->queue);
-
- return 0;
-}
-
-static int dm_mq_init_request(void *data, struct request *rq,
- unsigned int hctx_idx, unsigned int request_idx,
- unsigned int numa_node)
-{
- struct mapped_device *md = data;
- struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
-
- /*
- * Must initialize md member of tio, otherwise it won't
- * be available in dm_mq_queue_rq.
- */
- tio->md = md;
-
- if (md->init_tio_pdu) {
- /* target-specific per-io data is immediately after the tio */
- tio->info.ptr = tio + 1;
- }
-
- return 0;
-}
-
-static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
- const struct blk_mq_queue_data *bd)
-{
- struct request *rq = bd->rq;
- struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
- struct mapped_device *md = tio->md;
- struct dm_target *ti = md->immutable_target;
-
- if (unlikely(!ti)) {
- int srcu_idx;
- struct dm_table *map = dm_get_live_table(md, &srcu_idx);
-
- ti = dm_table_find_target(map, 0);
- dm_put_live_table(md, srcu_idx);
- }
-
- if (ti->type->busy && ti->type->busy(ti))
- return BLK_MQ_RQ_QUEUE_BUSY;
-
- dm_start_request(md, rq);
-
- /* Init tio using md established in .init_request */
- init_tio(tio, rq, md);
-
- /*
- * Establish tio->ti before queuing work (map_tio_request)
- * or making direct call to map_request().
- */
- tio->ti = ti;
-
- /* Direct call is fine since .queue_rq allows allocations */
- if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) {
- /* Undo dm_start_request() before requeuing */
- rq_end_stats(md, rq);
- rq_completed(md, rq_data_dir(rq), false);
- return BLK_MQ_RQ_QUEUE_BUSY;
- }
-
- return BLK_MQ_RQ_QUEUE_OK;
-}
-
-static struct blk_mq_ops dm_mq_ops = {
- .queue_rq = dm_mq_queue_rq,
- .map_queue = blk_mq_map_queue,
- .complete = dm_softirq_done,
- .init_request = dm_mq_init_request,
-};
-
-static int dm_mq_init_request_queue(struct mapped_device *md,
- struct dm_target *immutable_tgt)
-{
- struct request_queue *q;
- int err;
-
- if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) {
- DMERR("request-based dm-mq may only be stacked on blk-mq device(s)");
- return -EINVAL;
- }
-
- md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id);
- if (!md->tag_set)
- return -ENOMEM;
-
- md->tag_set->ops = &dm_mq_ops;
- md->tag_set->queue_depth = dm_get_blk_mq_queue_depth();
- md->tag_set->numa_node = md->numa_node_id;
- md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
- md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues();
- md->tag_set->driver_data = md;
-
- md->tag_set->cmd_size = sizeof(struct dm_rq_target_io);
- if (immutable_tgt && immutable_tgt->per_io_data_size) {
- /* any target-specific per-io data is immediately after the tio */
- md->tag_set->cmd_size += immutable_tgt->per_io_data_size;
- md->init_tio_pdu = true;
- }
-
- err = blk_mq_alloc_tag_set(md->tag_set);
- if (err)
- goto out_kfree_tag_set;
-
- q = blk_mq_init_allocated_queue(md->tag_set, md->queue);
- if (IS_ERR(q)) {
- err = PTR_ERR(q);
- goto out_tag_set;
- }
- dm_init_md_queue(md);
-
- /* backfill 'mq' sysfs registration normally done in blk_register_queue */
- blk_mq_register_disk(md->disk);
-
- return 0;
-
-out_tag_set:
- blk_mq_free_tag_set(md->tag_set);
-out_kfree_tag_set:
- kfree(md->tag_set);
-
- return err;
-}
-
-static unsigned filter_md_type(unsigned type, struct mapped_device *md)
-{
- if (type == DM_TYPE_BIO_BASED)
- return type;
-
- return !md->use_blk_mq ? DM_TYPE_REQUEST_BASED : DM_TYPE_MQ_REQUEST_BASED;
-}
-
/*
* Setup the DM device's queue based on md's type
*/
int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
{
int r;
- unsigned md_type = filter_md_type(dm_get_md_type(md), md);
+ unsigned type = dm_get_md_type(md);
- switch (md_type) {
+ switch (type) {
case DM_TYPE_REQUEST_BASED:
r = dm_old_init_request_queue(md);
if (r) {
@@ -2832,13 +1782,14 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
}
break;
case DM_TYPE_MQ_REQUEST_BASED:
- r = dm_mq_init_request_queue(md, dm_table_get_immutable_target(t));
+ r = dm_mq_init_request_queue(md, t);
if (r) {
DMERR("Cannot initialize queue for request-based dm-mq mapped device");
return r;
}
break;
case DM_TYPE_BIO_BASED:
+ case DM_TYPE_DAX_BIO_BASED:
dm_init_normal_md_queue(md);
blk_queue_make_request(md->queue, dm_make_request);
/*
@@ -2847,6 +1798,9 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
*/
bioset_free(md->queue->bio_split);
md->queue->bio_split = NULL;
+
+ if (type == DM_TYPE_DAX_BIO_BASED)
+ queue_flag_set_unlocked(QUEUE_FLAG_DAX, md->queue);
break;
}
@@ -3541,10 +2495,9 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t
if (!pools)
return NULL;
- type = filter_md_type(type, md);
-
switch (type) {
case DM_TYPE_BIO_BASED:
+ case DM_TYPE_DAX_BIO_BASED:
cachep = _io_cache;
pool_size = dm_get_reserved_bio_based_ios();
front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
@@ -3601,26 +2554,76 @@ void dm_free_md_mempools(struct dm_md_mempools *pools)
kfree(pools);
}
-static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
- u32 flags)
+struct dm_pr {
+ u64 old_key;
+ u64 new_key;
+ u32 flags;
+ bool fail_early;
+};
+
+static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn,
+ void *data)
{
struct mapped_device *md = bdev->bd_disk->private_data;
- const struct pr_ops *ops;
- fmode_t mode;
- int r;
+ struct dm_table *table;
+ struct dm_target *ti;
+ int ret = -ENOTTY, srcu_idx;
- r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
- if (r < 0)
- return r;
+ table = dm_get_live_table(md, &srcu_idx);
+ if (!table || !dm_table_get_size(table))
+ goto out;
- ops = bdev->bd_disk->fops->pr_ops;
- if (ops && ops->pr_register)
- r = ops->pr_register(bdev, old_key, new_key, flags);
- else
- r = -EOPNOTSUPP;
+ /* We only support devices that have a single target */
+ if (dm_table_get_num_targets(table) != 1)
+ goto out;
+ ti = dm_table_get_target(table, 0);
- bdput(bdev);
- return r;
+ ret = -EINVAL;
+ if (!ti->type->iterate_devices)
+ goto out;
+
+ ret = ti->type->iterate_devices(ti, fn, data);
+out:
+ dm_put_live_table(md, srcu_idx);
+ return ret;
+}
+
+/*
+ * For register / unregister we need to manually call out to every path.
+ */
+static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ struct dm_pr *pr = data;
+ const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
+
+ if (!ops || !ops->pr_register)
+ return -EOPNOTSUPP;
+ return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags);
+}
+
+static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
+ u32 flags)
+{
+ struct dm_pr pr = {
+ .old_key = old_key,
+ .new_key = new_key,
+ .flags = flags,
+ .fail_early = true,
+ };
+ int ret;
+
+ ret = dm_call_pr(bdev, __dm_pr_register, &pr);
+ if (ret && new_key) {
+ /* unregister all paths if we failed to register any path */
+ pr.old_key = new_key;
+ pr.new_key = 0;
+ pr.flags = 0;
+ pr.fail_early = false;
+ dm_call_pr(bdev, __dm_pr_register, &pr);
+ }
+
+ return ret;
}
static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
@@ -3721,6 +2724,7 @@ static const struct block_device_operations dm_blk_dops = {
.open = dm_blk_open,
.release = dm_blk_close,
.ioctl = dm_blk_ioctl,
+ .direct_access = dm_blk_direct_access,
.getgeo = dm_blk_getgeo,
.pr_ops = &dm_pr_ops,
.owner = THIS_MODULE
@@ -3738,18 +2742,6 @@ MODULE_PARM_DESC(major, "The major number of the device mapper");
module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
-module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
-
-module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices");
-
-module_param(dm_mq_nr_hw_queues, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(dm_mq_nr_hw_queues, "Number of hardware queues for request-based dm-mq devices");
-
-module_param(dm_mq_queue_depth, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(dm_mq_queue_depth, "Queue depth for request-based dm-mq devices");
-
module_param(dm_numa_node, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations");
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 13a758ec0..f0aad08b9 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -13,6 +13,7 @@
#include <linux/fs.h>
#include <linux/device-mapper.h>
#include <linux/list.h>
+#include <linux/moduleparam.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <linux/hdreg.h>
@@ -33,14 +34,6 @@
#define DM_STATUS_NOFLUSH_FLAG (1 << 0)
/*
- * Type of table and mapped_device's mempool
- */
-#define DM_TYPE_NONE 0
-#define DM_TYPE_BIO_BASED 1
-#define DM_TYPE_REQUEST_BASED 2
-#define DM_TYPE_MQ_REQUEST_BASED 3
-
-/*
* List of devices that a metadevice uses and should open/close.
*/
struct dm_dev_internal {
@@ -75,8 +68,9 @@ unsigned dm_table_get_type(struct dm_table *t);
struct target_type *dm_table_get_immutable_target_type(struct dm_table *t);
struct dm_target *dm_table_get_immutable_target(struct dm_table *t);
struct dm_target *dm_table_get_wildcard_target(struct dm_table *t);
+bool dm_table_bio_based(struct dm_table *t);
bool dm_table_request_based(struct dm_table *t);
-bool dm_table_mq_request_based(struct dm_table *t);
+bool dm_table_all_blk_mq_devices(struct dm_table *t);
void dm_table_free_md_mempools(struct dm_table *t);
struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
@@ -161,16 +155,6 @@ void dm_interface_exit(void);
/*
* sysfs interface
*/
-struct dm_kobject_holder {
- struct kobject kobj;
- struct completion completion;
-};
-
-static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
-{
- return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
-}
-
int dm_sysfs_init(struct mapped_device *md);
void dm_sysfs_exit(struct mapped_device *md);
struct kobject *dm_kobject(struct mapped_device *md);
@@ -212,8 +196,6 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
void dm_internal_suspend(struct mapped_device *md);
void dm_internal_resume(struct mapped_device *md);
-bool dm_use_blk_mq(struct mapped_device *md);
-
int dm_io_init(void);
void dm_io_exit(void);
@@ -228,18 +210,8 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t
void dm_free_md_mempools(struct dm_md_mempools *pools);
/*
- * Helpers that are used by DM core
+ * Various helpers
*/
unsigned dm_get_reserved_bio_based_ios(void);
-unsigned dm_get_reserved_rq_based_ios(void);
-
-static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
-{
- return !maxlen || strlen(result) + 1 >= maxlen;
-}
-
-ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf);
-ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
- const char *buf, size_t count);
#endif
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index b7fe7e9fc..86f5d4359 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -221,7 +221,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
struct bio *split;
sector_t start_sector, end_sector, data_offset;
- if (unlikely(bio->bi_rw & REQ_FLUSH)) {
+ if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
md_flush_request(mddev, bio);
return;
}
@@ -252,7 +252,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
split->bi_iter.bi_sector = split->bi_iter.bi_sector -
start_sector + data_offset;
- if (unlikely((split->bi_rw & REQ_DISCARD) &&
+ if (unlikely((bio_op(split) == REQ_OP_DISCARD) &&
!blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
/* Just ignore it */
bio_endio(split);
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 41573f1f6..34a840d9d 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -834,8 +834,10 @@ static int join(struct mddev *mddev, int nodes)
goto err;
}
cinfo->ack_lockres = lockres_init(mddev, "ack", ack_bast, 0);
- if (!cinfo->ack_lockres)
+ if (!cinfo->ack_lockres) {
+ ret = -ENOMEM;
goto err;
+ }
/* get sync CR lock on ACK. */
if (dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_CR))
pr_err("md-cluster: failed to get a sync CR lock on ACK!(%d)\n",
@@ -849,8 +851,10 @@ static int join(struct mddev *mddev, int nodes)
pr_info("md-cluster: Joined cluster %s slot %d\n", str, cinfo->slot_number);
snprintf(str, 64, "bitmap%04d", cinfo->slot_number - 1);
cinfo->bitmap_lockres = lockres_init(mddev, str, NULL, 1);
- if (!cinfo->bitmap_lockres)
+ if (!cinfo->bitmap_lockres) {
+ ret = -ENOMEM;
goto err;
+ }
if (dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW)) {
pr_err("Failed to get bitmap lock\n");
ret = -EINVAL;
@@ -858,8 +862,10 @@ static int join(struct mddev *mddev, int nodes)
}
cinfo->resync_lockres = lockres_init(mddev, "resync", NULL, 0);
- if (!cinfo->resync_lockres)
+ if (!cinfo->resync_lockres) {
+ ret = -ENOMEM;
goto err;
+ }
return 0;
err:
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 0678a0a95..915e84d63 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -285,7 +285,7 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
*/
sectors = bio_sectors(bio);
/* bio could be mergeable after passing to underlayer */
- bio->bi_rw &= ~REQ_NOMERGE;
+ bio->bi_opf &= ~REQ_NOMERGE;
mddev->pers->make_request(mddev, bio);
cpu = part_stat_lock();
@@ -394,8 +394,9 @@ static void submit_flushes(struct work_struct *ws)
bi->bi_end_io = md_end_flush;
bi->bi_private = rdev;
bi->bi_bdev = rdev->bdev;
+ bio_set_op_attrs(bi, REQ_OP_WRITE, WRITE_FLUSH);
atomic_inc(&mddev->flush_pending);
- submit_bio(WRITE_FLUSH, bi);
+ submit_bio(bi);
rcu_read_lock();
rdev_dec_pending(rdev, mddev);
}
@@ -413,7 +414,7 @@ static void md_submit_flush_data(struct work_struct *ws)
/* an empty barrier - all done */
bio_endio(bio);
else {
- bio->bi_rw &= ~REQ_FLUSH;
+ bio->bi_opf &= ~REQ_PREFLUSH;
mddev->pers->make_request(mddev, bio);
}
@@ -742,9 +743,10 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
bio_add_page(bio, page, size, 0);
bio->bi_private = rdev;
bio->bi_end_io = super_written;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH_FUA);
atomic_inc(&mddev->pending_writes);
- submit_bio(WRITE_FLUSH_FUA, bio);
+ submit_bio(bio);
}
void md_super_wait(struct mddev *mddev)
@@ -754,13 +756,14 @@ void md_super_wait(struct mddev *mddev)
}
int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
- struct page *page, int rw, bool metadata_op)
+ struct page *page, int op, int op_flags, bool metadata_op)
{
struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
int ret;
bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
rdev->meta_bdev : rdev->bdev;
+ bio_set_op_attrs(bio, op, op_flags);
if (metadata_op)
bio->bi_iter.bi_sector = sector + rdev->sb_start;
else if (rdev->mddev->reshape_position != MaxSector &&
@@ -770,7 +773,8 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
else
bio->bi_iter.bi_sector = sector + rdev->data_offset;
bio_add_page(bio, page, size, 0);
- submit_bio_wait(rw, bio);
+
+ submit_bio_wait(bio);
ret = !bio->bi_error;
bio_put(bio);
@@ -785,7 +789,7 @@ static int read_disk_sb(struct md_rdev *rdev, int size)
if (rdev->sb_loaded)
return 0;
- if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, true))
+ if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true))
goto fail;
rdev->sb_loaded = 1;
return 0;
@@ -1471,7 +1475,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
return -EINVAL;
bb_sector = (long long)offset;
if (!sync_page_io(rdev, bb_sector, sectors << 9,
- rdev->bb_page, READ, true))
+ rdev->bb_page, REQ_OP_READ, 0, true))
return -EIO;
bbp = (u64 *)page_address(rdev->bb_page);
rdev->badblocks.shift = sb->bblog_shift;
@@ -1600,11 +1604,8 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
mddev->new_chunk_sectors = mddev->chunk_sectors;
}
- if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL) {
+ if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)
set_bit(MD_HAS_JOURNAL, &mddev->flags);
- if (mddev->recovery_cp == MaxSector)
- set_bit(MD_JOURNAL_CLEAN, &mddev->flags);
- }
} else if (mddev->pers == NULL) {
/* Insist of good event counter while assembling, except for
* spares (which don't need an event count) */
@@ -2478,8 +2479,7 @@ static int add_bound_rdev(struct md_rdev *rdev)
if (add_journal)
mddev_resume(mddev);
if (err) {
- unbind_rdev_from_array(rdev);
- export_rdev(rdev);
+ md_kick_rdev_from_array(rdev);
return err;
}
}
@@ -2596,6 +2596,10 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
else
err = -EBUSY;
} else if (cmd_match(buf, "remove")) {
+ if (rdev->mddev->pers) {
+ clear_bit(Blocked, &rdev->flags);
+ remove_and_add_spares(rdev->mddev, rdev);
+ }
if (rdev->raid_disk >= 0)
err = -EBUSY;
else {
@@ -3172,8 +3176,7 @@ int md_rdev_init(struct md_rdev *rdev)
rdev->data_offset = 0;
rdev->new_data_offset = 0;
rdev->sb_events = 0;
- rdev->last_read_error.tv_sec = 0;
- rdev->last_read_error.tv_nsec = 0;
+ rdev->last_read_error = 0;
rdev->sb_loaded = 0;
rdev->bb_page = NULL;
atomic_set(&rdev->nr_pending, 0);
@@ -3579,6 +3582,8 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
mddev->to_remove = &md_redundancy_group;
}
+ module_put(oldpers->owner);
+
rdev_for_each(rdev, mddev) {
if (rdev->raid_disk < 0)
continue;
@@ -3936,6 +3941,8 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
} else
err = -EBUSY;
}
+ if (!err)
+ sysfs_notify_dirent_safe(mddev->sysfs_state);
spin_unlock(&mddev->lock);
return err ?: len;
}
@@ -4187,7 +4194,8 @@ size_store(struct mddev *mddev, const char *buf, size_t len)
return err;
if (mddev->pers) {
err = update_size(mddev, sectors);
- md_update_sb(mddev, 1);
+ if (err == 0)
+ md_update_sb(mddev, 1);
} else {
if (mddev->dev_sectors == 0 ||
mddev->dev_sectors > sectors)
@@ -5840,6 +5848,9 @@ static int get_array_info(struct mddev *mddev, void __user *arg)
working++;
if (test_bit(In_sync, &rdev->flags))
insync++;
+ else if (test_bit(Journal, &rdev->flags))
+ /* TODO: add journal count to md_u.h */
+ ;
else
spare++;
}
@@ -7805,6 +7816,7 @@ void md_do_sync(struct md_thread *thread)
if (ret)
goto skip;
+ set_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags);
if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ||
test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
@@ -7846,6 +7858,7 @@ void md_do_sync(struct md_thread *thread)
*/
do {
+ int mddev2_minor = -1;
mddev->curr_resync = 2;
try_again:
@@ -7875,10 +7888,14 @@ void md_do_sync(struct md_thread *thread)
prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
mddev2->curr_resync >= mddev->curr_resync) {
- printk(KERN_INFO "md: delaying %s of %s"
- " until %s has finished (they"
- " share one or more physical units)\n",
- desc, mdname(mddev), mdname(mddev2));
+ if (mddev2_minor != mddev2->md_minor) {
+ mddev2_minor = mddev2->md_minor;
+ printk(KERN_INFO "md: delaying %s of %s"
+ " until %s has finished (they"
+ " share one or more physical units)\n",
+ desc, mdname(mddev),
+ mdname(mddev2));
+ }
mddev_put(mddev2);
if (signal_pending(current))
flush_signals(current);
@@ -8143,18 +8160,11 @@ void md_do_sync(struct md_thread *thread)
}
}
skip:
- if (mddev_is_clustered(mddev) &&
- ret == 0) {
- /* set CHANGE_PENDING here since maybe another
- * update is needed, so other nodes are informed */
- set_mask_bits(&mddev->flags, 0,
- BIT(MD_CHANGE_PENDING) | BIT(MD_CHANGE_DEVS));
- md_wakeup_thread(mddev->thread);
- wait_event(mddev->sb_wait,
- !test_bit(MD_CHANGE_PENDING, &mddev->flags));
- md_cluster_ops->resync_finish(mddev);
- } else
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ /* set CHANGE_PENDING here since maybe another update is needed,
+ * so other nodes are informed. It should be harmless for normal
+ * raid */
+ set_mask_bits(&mddev->flags, 0,
+ BIT(MD_CHANGE_PENDING) | BIT(MD_CHANGE_DEVS));
spin_lock(&mddev->lock);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
@@ -8180,15 +8190,34 @@ static int remove_and_add_spares(struct mddev *mddev,
struct md_rdev *rdev;
int spares = 0;
int removed = 0;
+ bool remove_some = false;
- rdev_for_each(rdev, mddev)
+ rdev_for_each(rdev, mddev) {
if ((this == NULL || rdev == this) &&
rdev->raid_disk >= 0 &&
!test_bit(Blocked, &rdev->flags) &&
- (test_bit(Faulty, &rdev->flags) ||
+ test_bit(Faulty, &rdev->flags) &&
+ atomic_read(&rdev->nr_pending)==0) {
+ /* Faulty non-Blocked devices with nr_pending == 0
+ * never get nr_pending incremented,
+ * never get Faulty cleared, and never get Blocked set.
+ * So we can synchronize_rcu now rather than once per device
+ */
+ remove_some = true;
+ set_bit(RemoveSynchronized, &rdev->flags);
+ }
+ }
+
+ if (remove_some)
+ synchronize_rcu();
+ rdev_for_each(rdev, mddev) {
+ if ((this == NULL || rdev == this) &&
+ rdev->raid_disk >= 0 &&
+ !test_bit(Blocked, &rdev->flags) &&
+ ((test_bit(RemoveSynchronized, &rdev->flags) ||
(!test_bit(In_sync, &rdev->flags) &&
!test_bit(Journal, &rdev->flags))) &&
- atomic_read(&rdev->nr_pending)==0) {
+ atomic_read(&rdev->nr_pending)==0)) {
if (mddev->pers->hot_remove_disk(
mddev, rdev) == 0) {
sysfs_unlink_rdev(mddev, rdev);
@@ -8196,6 +8225,10 @@ static int remove_and_add_spares(struct mddev *mddev,
removed++;
}
}
+ if (remove_some && test_bit(RemoveSynchronized, &rdev->flags))
+ clear_bit(RemoveSynchronized, &rdev->flags);
+ }
+
if (removed && mddev->kobj.sd)
sysfs_notify(&mddev->kobj, NULL, "degraded");
@@ -8243,16 +8276,13 @@ no_add:
static void md_start_sync(struct work_struct *ws)
{
struct mddev *mddev = container_of(ws, struct mddev, del_work);
- int ret = 0;
mddev->sync_thread = md_register_thread(md_do_sync,
mddev,
"resync");
if (!mddev->sync_thread) {
- if (!(mddev_is_clustered(mddev) && ret == -EAGAIN))
- printk(KERN_ERR "%s: could not start resync"
- " thread...\n",
- mdname(mddev));
+ printk(KERN_ERR "%s: could not start resync thread...\n",
+ mdname(mddev));
/* leave the spares where they are, it shouldn't hurt */
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
@@ -8498,6 +8528,11 @@ void md_reap_sync_thread(struct mddev *mddev)
rdev->saved_raid_disk = -1;
md_update_sb(mddev, 1);
+ /* MD_CHANGE_PENDING should be cleared by md_update_sb, so we can
+ * call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by
+ * clustered raid */
+ if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags))
+ md_cluster_ops->resync_finish(mddev);
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
@@ -8795,6 +8830,7 @@ EXPORT_SYMBOL(md_reload_sb);
* at boot time.
*/
+static DEFINE_MUTEX(detected_devices_mutex);
static LIST_HEAD(all_detected_devices);
struct detected_devices_node {
struct list_head list;
@@ -8808,7 +8844,9 @@ void md_autodetect_dev(dev_t dev)
node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
if (node_detected_dev) {
node_detected_dev->dev = dev;
+ mutex_lock(&detected_devices_mutex);
list_add_tail(&node_detected_dev->list, &all_detected_devices);
+ mutex_unlock(&detected_devices_mutex);
} else {
printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed"
", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev));
@@ -8827,6 +8865,7 @@ static void autostart_arrays(int part)
printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
+ mutex_lock(&detected_devices_mutex);
while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
i_scanned++;
node_detected_dev = list_entry(all_detected_devices.next,
@@ -8845,6 +8884,7 @@ static void autostart_arrays(int part)
list_add(&rdev->same_set, &pending_raid_disks);
i_passed++;
}
+ mutex_unlock(&detected_devices_mutex);
printk(KERN_INFO "md: Scanned %d and added %d devices.\n",
i_scanned, i_passed);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index b5c4be73e..20c667579 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -99,7 +99,7 @@ struct md_rdev {
atomic_t read_errors; /* number of consecutive read errors that
* we have tried to ignore.
*/
- struct timespec last_read_error; /* monotonic time since our
+ time64_t last_read_error; /* monotonic time since our
* last read error
*/
atomic_t corrected_errors; /* number of corrected read errors,
@@ -163,6 +163,11 @@ enum flag_bits {
* than other devices in the array
*/
ClusterRemove,
+ RemoveSynchronized, /* synchronize_rcu() was called after
+ * this device was known to be faulty,
+ * so it is safe to remove without
+ * another synchronize_rcu() call.
+ */
};
static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors,
@@ -204,6 +209,9 @@ struct mddev {
#define MD_RELOAD_SB 7 /* Reload the superblock because another node
* updated it.
*/
+#define MD_CLUSTER_RESYNC_LOCKED 8 /* cluster raid only, which means node
+ * already took resync lock, need to
+ * release the lock */
int suspended;
atomic_t active_io;
@@ -424,7 +432,7 @@ struct mddev {
/* Generic flush handling.
* The last to finish preflush schedules a worker to submit
- * the rest of the request (without the REQ_FLUSH flag).
+ * the rest of the request (without the REQ_PREFLUSH flag).
*/
struct bio *flush_bio;
atomic_t flush_pending;
@@ -618,7 +626,8 @@ extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
sector_t sector, int size, struct page *page);
extern void md_super_wait(struct mddev *mddev);
extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
- struct page *page, int rw, bool metadata_op);
+ struct page *page, int op, int op_flags,
+ bool metadata_op);
extern void md_do_sync(struct md_thread *thread);
extern void md_new_event(struct mddev *mddev);
extern int md_allow_write(struct mddev *mddev);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index dd483bb2e..673efbd6f 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -43,7 +43,8 @@ static int multipath_map (struct mpconf *conf)
rcu_read_lock();
for (i = 0; i < disks; i++) {
struct md_rdev *rdev = rcu_dereference(conf->multipaths[i].rdev);
- if (rdev && test_bit(In_sync, &rdev->flags)) {
+ if (rdev && test_bit(In_sync, &rdev->flags) &&
+ !test_bit(Faulty, &rdev->flags)) {
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
return i;
@@ -90,7 +91,7 @@ static void multipath_end_request(struct bio *bio)
if (!bio->bi_error)
multipath_end_bh_io(mp_bh, 0);
- else if (!(bio->bi_rw & REQ_RAHEAD)) {
+ else if (!(bio->bi_opf & REQ_RAHEAD)) {
/*
* oops, IO error:
*/
@@ -111,7 +112,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
struct multipath_bh * mp_bh;
struct multipath_info *multipath;
- if (unlikely(bio->bi_rw & REQ_FLUSH)) {
+ if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
md_flush_request(mddev, bio);
return;
}
@@ -134,24 +135,26 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
mp_bh->bio.bi_bdev = multipath->rdev->bdev;
- mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;
+ mp_bh->bio.bi_opf |= REQ_FAILFAST_TRANSPORT;
mp_bh->bio.bi_end_io = multipath_end_request;
mp_bh->bio.bi_private = mp_bh;
generic_make_request(&mp_bh->bio);
return;
}
-static void multipath_status (struct seq_file *seq, struct mddev *mddev)
+static void multipath_status(struct seq_file *seq, struct mddev *mddev)
{
struct mpconf *conf = mddev->private;
int i;
seq_printf (seq, " [%d/%d] [", conf->raid_disks,
conf->raid_disks - mddev->degraded);
- for (i = 0; i < conf->raid_disks; i++)
- seq_printf (seq, "%s",
- conf->multipaths[i].rdev &&
- test_bit(In_sync, &conf->multipaths[i].rdev->flags) ? "U" : "_");
+ rcu_read_lock();
+ for (i = 0; i < conf->raid_disks; i++) {
+ struct md_rdev *rdev = rcu_dereference(conf->multipaths[i].rdev);
+ seq_printf (seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
+ }
+ rcu_read_unlock();
seq_printf (seq, "]");
}
@@ -295,12 +298,14 @@ static int multipath_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
goto abort;
}
p->rdev = NULL;
- synchronize_rcu();
- if (atomic_read(&rdev->nr_pending)) {
- /* lost the race, try later */
- err = -EBUSY;
- p->rdev = rdev;
- goto abort;
+ if (!test_bit(RemoveSynchronized, &rdev->flags)) {
+ synchronize_rcu();
+ if (atomic_read(&rdev->nr_pending)) {
+ /* lost the race, try later */
+ err = -EBUSY;
+ p->rdev = rdev;
+ goto abort;
+ }
}
err = md_integrity_register(mddev);
}
@@ -355,7 +360,7 @@ static void multipathd(struct md_thread *thread)
bio->bi_iter.bi_sector +=
conf->multipaths[mp_bh->path].rdev->data_offset;
bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
- bio->bi_rw |= REQ_FAILFAST_TRANSPORT;
+ bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
bio->bi_end_io = multipath_end_request;
bio->bi_private = mp_bh;
generic_make_request(bio);
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index ea3d3b656..2cc187780 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -429,7 +429,14 @@ static int dm_btree_lookup_next_single(struct dm_btree_info *info, dm_block_t ro
if (flags & INTERNAL_NODE) {
i = lower_bound(n, key);
- if (i < 0 || i >= nr_entries) {
+ if (i < 0) {
+ /*
+ * avoid early -ENODATA return when all entries are
+ * higher than the search @key.
+ */
+ i = 0;
+ }
+ if (i >= nr_entries) {
r = -ENODATA;
goto out;
}
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 34783a3c8..258986a26 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -458,7 +458,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
struct md_rdev *tmp_dev;
struct bio *split;
- if (unlikely(bio->bi_rw & REQ_FLUSH)) {
+ if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
md_flush_request(mddev, bio);
return;
}
@@ -488,7 +488,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
split->bi_iter.bi_sector = sector + zone->dev_start +
tmp_dev->data_offset;
- if (unlikely((split->bi_rw & REQ_DISCARD) &&
+ if (unlikely((bio_op(split) == REQ_OP_DISCARD) &&
!blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
/* Just ignore it */
bio_endio(split);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index c7c8cde0a..21dc00eb1 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -319,14 +319,13 @@ static void raid1_end_read_request(struct bio *bio)
{
int uptodate = !bio->bi_error;
struct r1bio *r1_bio = bio->bi_private;
- int mirror;
struct r1conf *conf = r1_bio->mddev->private;
+ struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev;
- mirror = r1_bio->read_disk;
/*
* this branch is our 'one mirror IO has finished' event handler:
*/
- update_head_pos(mirror, r1_bio);
+ update_head_pos(r1_bio->read_disk, r1_bio);
if (uptodate)
set_bit(R1BIO_Uptodate, &r1_bio->state);
@@ -339,14 +338,14 @@ static void raid1_end_read_request(struct bio *bio)
spin_lock_irqsave(&conf->device_lock, flags);
if (r1_bio->mddev->degraded == conf->raid_disks ||
(r1_bio->mddev->degraded == conf->raid_disks-1 &&
- test_bit(In_sync, &conf->mirrors[mirror].rdev->flags)))
+ test_bit(In_sync, &rdev->flags)))
uptodate = 1;
spin_unlock_irqrestore(&conf->device_lock, flags);
}
if (uptodate) {
raid_end_bio_io(r1_bio);
- rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
+ rdev_dec_pending(rdev, conf->mddev);
} else {
/*
* oops, read error:
@@ -356,7 +355,7 @@ static void raid1_end_read_request(struct bio *bio)
KERN_ERR "md/raid1:%s: %s: "
"rescheduling sector %llu\n",
mdname(conf->mddev),
- bdevname(conf->mirrors[mirror].rdev->bdev,
+ bdevname(rdev->bdev,
b),
(unsigned long long)r1_bio->sector);
set_bit(R1BIO_ReadError, &r1_bio->state);
@@ -403,20 +402,18 @@ static void r1_bio_write_done(struct r1bio *r1_bio)
static void raid1_end_write_request(struct bio *bio)
{
struct r1bio *r1_bio = bio->bi_private;
- int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
+ int behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
struct r1conf *conf = r1_bio->mddev->private;
struct bio *to_put = NULL;
-
- mirror = find_bio_disk(r1_bio, bio);
+ int mirror = find_bio_disk(r1_bio, bio);
+ struct md_rdev *rdev = conf->mirrors[mirror].rdev;
/*
* 'one mirror IO has finished' event handler:
*/
if (bio->bi_error) {
- set_bit(WriteErrorSeen,
- &conf->mirrors[mirror].rdev->flags);
- if (!test_and_set_bit(WantReplacement,
- &conf->mirrors[mirror].rdev->flags))
+ set_bit(WriteErrorSeen, &rdev->flags);
+ if (!test_and_set_bit(WantReplacement, &rdev->flags))
set_bit(MD_RECOVERY_NEEDED, &
conf->mddev->recovery);
@@ -445,13 +442,12 @@ static void raid1_end_write_request(struct bio *bio)
* before rdev->recovery_offset, but for simplicity we don't
* check this here.
*/
- if (test_bit(In_sync, &conf->mirrors[mirror].rdev->flags) &&
- !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags))
+ if (test_bit(In_sync, &rdev->flags) &&
+ !test_bit(Faulty, &rdev->flags))
set_bit(R1BIO_Uptodate, &r1_bio->state);
/* Maybe we can clear some bad blocks. */
- if (is_badblock(conf->mirrors[mirror].rdev,
- r1_bio->sector, r1_bio->sectors,
+ if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
&first_bad, &bad_sectors)) {
r1_bio->bios[mirror] = IO_MADE_GOOD;
set_bit(R1BIO_MadeGood, &r1_bio->state);
@@ -459,7 +455,7 @@ static void raid1_end_write_request(struct bio *bio)
}
if (behind) {
- if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags))
+ if (test_bit(WriteMostly, &rdev->flags))
atomic_dec(&r1_bio->behind_remaining);
/*
@@ -483,8 +479,7 @@ static void raid1_end_write_request(struct bio *bio)
}
}
if (r1_bio->bios[mirror] == NULL)
- rdev_dec_pending(conf->mirrors[mirror].rdev,
- conf->mddev);
+ rdev_dec_pending(rdev, conf->mddev);
/*
* Let's see if all mirrored write operations have finished
@@ -689,13 +684,6 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
if (!rdev)
goto retry;
atomic_inc(&rdev->nr_pending);
- if (test_bit(Faulty, &rdev->flags)) {
- /* cannot risk returning a device that failed
- * before we inc'ed nr_pending
- */
- rdev_dec_pending(rdev, conf->mddev);
- goto retry;
- }
sectors = best_good_sectors;
if (conf->mirrors[best_disk].next_seq_sect != this_sector)
@@ -759,7 +747,7 @@ static void flush_pending_writes(struct r1conf *conf)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
bio->bi_next = NULL;
- if (unlikely((bio->bi_rw & REQ_DISCARD) &&
+ if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
!blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
bio_endio(bio);
@@ -1033,7 +1021,7 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
bio->bi_next = NULL;
- if (unlikely((bio->bi_rw & REQ_DISCARD) &&
+ if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
!blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
bio_endio(bio);
@@ -1053,12 +1041,11 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio)
int i, disks;
struct bitmap *bitmap;
unsigned long flags;
+ const int op = bio_op(bio);
const int rw = bio_data_dir(bio);
- const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
- const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
- const unsigned long do_discard = (bio->bi_rw
- & (REQ_DISCARD | REQ_SECURE));
- const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
+ const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
+ const unsigned long do_flush_fua = (bio->bi_opf &
+ (REQ_PREFLUSH | REQ_FUA));
struct md_rdev *blocked_rdev;
struct blk_plug_cb *cb;
struct raid1_plug_cb *plug = NULL;
@@ -1106,7 +1093,7 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio)
bitmap = mddev->bitmap;
/*
- * make_request() can abort the operation when READA is being
+ * make_request() can abort the operation when read-ahead is being
* used and no empty request is available.
*
*/
@@ -1166,7 +1153,7 @@ read_again:
mirror->rdev->data_offset;
read_bio->bi_bdev = mirror->rdev->bdev;
read_bio->bi_end_io = raid1_end_read_request;
- read_bio->bi_rw = READ | do_sync;
+ bio_set_op_attrs(read_bio, op, do_sync);
read_bio->bi_private = r1_bio;
if (max_sectors < r1_bio->sectors) {
@@ -1376,8 +1363,7 @@ read_again:
conf->mirrors[i].rdev->data_offset);
mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
mbio->bi_end_io = raid1_end_write_request;
- mbio->bi_rw =
- WRITE | do_flush_fua | do_sync | do_discard | do_same;
+ bio_set_op_attrs(mbio, op, do_flush_fua | do_sync);
mbio->bi_private = r1_bio;
atomic_inc(&r1_bio->remaining);
@@ -1668,13 +1654,16 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
goto abort;
}
p->rdev = NULL;
- synchronize_rcu();
- if (atomic_read(&rdev->nr_pending)) {
- /* lost the race, try later */
- err = -EBUSY;
- p->rdev = rdev;
- goto abort;
- } else if (conf->mirrors[conf->raid_disks + number].rdev) {
+ if (!test_bit(RemoveSynchronized, &rdev->flags)) {
+ synchronize_rcu();
+ if (atomic_read(&rdev->nr_pending)) {
+ /* lost the race, try later */
+ err = -EBUSY;
+ p->rdev = rdev;
+ goto abort;
+ }
+ }
+ if (conf->mirrors[conf->raid_disks + number].rdev) {
/* We just removed a device that is being replaced.
* Move down the replacement. We drain all IO before
* doing this to avoid confusion.
@@ -1721,11 +1710,9 @@ static void end_sync_write(struct bio *bio)
struct r1bio *r1_bio = bio->bi_private;
struct mddev *mddev = r1_bio->mddev;
struct r1conf *conf = mddev->private;
- int mirror=0;
sector_t first_bad;
int bad_sectors;
-
- mirror = find_bio_disk(r1_bio, bio);
+ struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
if (!uptodate) {
sector_t sync_blocks = 0;
@@ -1738,16 +1725,12 @@ static void end_sync_write(struct bio *bio)
s += sync_blocks;
sectors_to_go -= sync_blocks;
} while (sectors_to_go > 0);
- set_bit(WriteErrorSeen,
- &conf->mirrors[mirror].rdev->flags);
- if (!test_and_set_bit(WantReplacement,
- &conf->mirrors[mirror].rdev->flags))
+ set_bit(WriteErrorSeen, &rdev->flags);
+ if (!test_and_set_bit(WantReplacement, &rdev->flags))
set_bit(MD_RECOVERY_NEEDED, &
mddev->recovery);
set_bit(R1BIO_WriteError, &r1_bio->state);
- } else if (is_badblock(conf->mirrors[mirror].rdev,
- r1_bio->sector,
- r1_bio->sectors,
+ } else if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
&first_bad, &bad_sectors) &&
!is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
r1_bio->sector,
@@ -1771,7 +1754,7 @@ static void end_sync_write(struct bio *bio)
static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
int sectors, struct page *page, int rw)
{
- if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
+ if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false))
/* success */
return 1;
if (rw == WRITE) {
@@ -1825,7 +1808,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
rdev = conf->mirrors[d].rdev;
if (sync_page_io(rdev, sect, s<<9,
bio->bi_io_vec[idx].bv_page,
- READ, false)) {
+ REQ_OP_READ, 0, false)) {
success = 1;
break;
}
@@ -2030,7 +2013,7 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
continue;
- wbio->bi_rw = WRITE;
+ bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
wbio->bi_end_io = end_sync_write;
atomic_inc(&r1_bio->remaining);
md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
@@ -2074,29 +2057,30 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
s = PAGE_SIZE >> 9;
do {
- /* Note: no rcu protection needed here
- * as this is synchronous in the raid1d thread
- * which is the thread that might remove
- * a device. If raid1d ever becomes multi-threaded....
- */
sector_t first_bad;
int bad_sectors;
- rdev = conf->mirrors[d].rdev;
+ rcu_read_lock();
+ rdev = rcu_dereference(conf->mirrors[d].rdev);
if (rdev &&
(test_bit(In_sync, &rdev->flags) ||
(!test_bit(Faulty, &rdev->flags) &&
rdev->recovery_offset >= sect + s)) &&
is_badblock(rdev, sect, s,
- &first_bad, &bad_sectors) == 0 &&
- sync_page_io(rdev, sect, s<<9,
- conf->tmppage, READ, false))
- success = 1;
- else {
- d++;
- if (d == conf->raid_disks * 2)
- d = 0;
- }
+ &first_bad, &bad_sectors) == 0) {
+ atomic_inc(&rdev->nr_pending);
+ rcu_read_unlock();
+ if (sync_page_io(rdev, sect, s<<9,
+ conf->tmppage, REQ_OP_READ, 0, false))
+ success = 1;
+ rdev_dec_pending(rdev, mddev);
+ if (success)
+ break;
+ } else
+ rcu_read_unlock();
+ d++;
+ if (d == conf->raid_disks * 2)
+ d = 0;
} while (!success && d != read_disk);
if (!success) {
@@ -2112,11 +2096,17 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
if (d==0)
d = conf->raid_disks * 2;
d--;
- rdev = conf->mirrors[d].rdev;
+ rcu_read_lock();
+ rdev = rcu_dereference(conf->mirrors[d].rdev);
if (rdev &&
- !test_bit(Faulty, &rdev->flags))
+ !test_bit(Faulty, &rdev->flags)) {
+ atomic_inc(&rdev->nr_pending);
+ rcu_read_unlock();
r1_sync_page_io(rdev, sect, s,
conf->tmppage, WRITE);
+ rdev_dec_pending(rdev, mddev);
+ } else
+ rcu_read_unlock();
}
d = start;
while (d != read_disk) {
@@ -2124,9 +2114,12 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
if (d==0)
d = conf->raid_disks * 2;
d--;
- rdev = conf->mirrors[d].rdev;
+ rcu_read_lock();
+ rdev = rcu_dereference(conf->mirrors[d].rdev);
if (rdev &&
!test_bit(Faulty, &rdev->flags)) {
+ atomic_inc(&rdev->nr_pending);
+ rcu_read_unlock();
if (r1_sync_page_io(rdev, sect, s,
conf->tmppage, READ)) {
atomic_add(s, &rdev->corrected_errors);
@@ -2135,10 +2128,12 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
"(%d sectors at %llu on %s)\n",
mdname(mddev), s,
(unsigned long long)(sect +
- rdev->data_offset),
+ rdev->data_offset),
bdevname(rdev->bdev, b));
}
- }
+ rdev_dec_pending(rdev, mddev);
+ } else
+ rcu_read_unlock();
}
sectors -= s;
sect += s;
@@ -2201,14 +2196,15 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
wbio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
}
- wbio->bi_rw = WRITE;
+ bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
wbio->bi_iter.bi_sector = r1_bio->sector;
wbio->bi_iter.bi_size = r1_bio->sectors << 9;
bio_trim(wbio, sector - r1_bio->sector, sectors);
wbio->bi_iter.bi_sector += rdev->data_offset;
wbio->bi_bdev = rdev->bdev;
- if (submit_bio_wait(WRITE, wbio) < 0)
+
+ if (submit_bio_wait(wbio) < 0)
/* failure! */
ok = rdev_set_badblocks(rdev, sector,
sectors, 0)
@@ -2322,7 +2318,7 @@ read_more:
raid_end_bio_io(r1_bio);
} else {
const unsigned long do_sync
- = r1_bio->master_bio->bi_rw & REQ_SYNC;
+ = r1_bio->master_bio->bi_opf & REQ_SYNC;
if (bio) {
r1_bio->bios[r1_bio->read_disk] =
mddev->ro ? IO_BLOCKED : NULL;
@@ -2343,7 +2339,7 @@ read_more:
bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset;
bio->bi_bdev = rdev->bdev;
bio->bi_end_io = raid1_end_read_request;
- bio->bi_rw = READ | do_sync;
+ bio_set_op_attrs(bio, REQ_OP_READ, do_sync);
bio->bi_private = r1_bio;
if (max_sectors < r1_bio->sectors) {
/* Drat - have to split this up more */
@@ -2535,6 +2531,13 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
return sync_blocks;
}
+ /*
+ * If there is non-resync activity waiting for a turn, then let it
+ * though before starting on this new sync request.
+ */
+ if (conf->nr_waiting)
+ schedule_timeout_uninterruptible(1);
+
/* we are incrementing sector_nr below. To be safe, we check against
* sector_nr + two times RESYNC_SECTORS
*/
@@ -2571,7 +2574,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
if (i < conf->raid_disks)
still_degraded = 1;
} else if (!test_bit(In_sync, &rdev->flags)) {
- bio->bi_rw = WRITE;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio->bi_end_io = end_sync_write;
write_targets ++;
} else {
@@ -2598,7 +2601,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
if (disk < 0)
disk = i;
}
- bio->bi_rw = READ;
+ bio_set_op_attrs(bio, REQ_OP_READ, 0);
bio->bi_end_io = end_sync_read;
read_targets++;
} else if (!test_bit(WriteErrorSeen, &rdev->flags) &&
@@ -2610,7 +2613,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
* if we are doing resync or repair. Otherwise, leave
* this device alone for this sync request.
*/
- bio->bi_rw = WRITE;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio->bi_end_io = end_sync_write;
write_targets++;
}
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index c7de2a53e..be1a9fca3 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -707,7 +707,6 @@ static struct md_rdev *read_balance(struct r10conf *conf,
raid10_find_phys(conf, r10_bio);
rcu_read_lock();
-retry:
sectors = r10_bio->sectors;
best_slot = -1;
best_rdev = NULL;
@@ -804,13 +803,6 @@ retry:
if (slot >= 0) {
atomic_inc(&rdev->nr_pending);
- if (test_bit(Faulty, &rdev->flags)) {
- /* Cannot risk returning a device that failed
- * before we inc'ed nr_pending
- */
- rdev_dec_pending(rdev, conf->mddev);
- goto retry;
- }
r10_bio->read_slot = slot;
} else
rdev = NULL;
@@ -865,7 +857,7 @@ static void flush_pending_writes(struct r10conf *conf)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
bio->bi_next = NULL;
- if (unlikely((bio->bi_rw & REQ_DISCARD) &&
+ if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
!blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
bio_endio(bio);
@@ -913,7 +905,7 @@ static void raise_barrier(struct r10conf *conf, int force)
/* Now wait for all pending IO to complete */
wait_event_lock_irq(conf->wait_barrier,
- !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
+ !atomic_read(&conf->nr_pending) && conf->barrier < RESYNC_DEPTH,
conf->resync_lock);
spin_unlock_irq(&conf->resync_lock);
@@ -944,23 +936,23 @@ static void wait_barrier(struct r10conf *conf)
*/
wait_event_lock_irq(conf->wait_barrier,
!conf->barrier ||
- (conf->nr_pending &&
+ (atomic_read(&conf->nr_pending) &&
current->bio_list &&
!bio_list_empty(current->bio_list)),
conf->resync_lock);
conf->nr_waiting--;
+ if (!conf->nr_waiting)
+ wake_up(&conf->wait_barrier);
}
- conf->nr_pending++;
+ atomic_inc(&conf->nr_pending);
spin_unlock_irq(&conf->resync_lock);
}
static void allow_barrier(struct r10conf *conf)
{
- unsigned long flags;
- spin_lock_irqsave(&conf->resync_lock, flags);
- conf->nr_pending--;
- spin_unlock_irqrestore(&conf->resync_lock, flags);
- wake_up(&conf->wait_barrier);
+ if ((atomic_dec_and_test(&conf->nr_pending)) ||
+ (conf->array_freeze_pending))
+ wake_up(&conf->wait_barrier);
}
static void freeze_array(struct r10conf *conf, int extra)
@@ -978,13 +970,15 @@ static void freeze_array(struct r10conf *conf, int extra)
* we continue.
*/
spin_lock_irq(&conf->resync_lock);
+ conf->array_freeze_pending++;
conf->barrier++;
conf->nr_waiting++;
wait_event_lock_irq_cmd(conf->wait_barrier,
- conf->nr_pending == conf->nr_queued+extra,
+ atomic_read(&conf->nr_pending) == conf->nr_queued+extra,
conf->resync_lock,
flush_pending_writes(conf));
+ conf->array_freeze_pending--;
spin_unlock_irq(&conf->resync_lock);
}
@@ -1041,7 +1035,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
bio->bi_next = NULL;
- if (unlikely((bio->bi_rw & REQ_DISCARD) &&
+ if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
!blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
/* Just ignore it */
bio_endio(bio);
@@ -1058,12 +1052,10 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
struct r10bio *r10_bio;
struct bio *read_bio;
int i;
+ const int op = bio_op(bio);
const int rw = bio_data_dir(bio);
- const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
- const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
- const unsigned long do_discard = (bio->bi_rw
- & (REQ_DISCARD | REQ_SECURE));
- const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
+ const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
+ const unsigned long do_fua = (bio->bi_opf & REQ_FUA);
unsigned long flags;
struct md_rdev *blocked_rdev;
struct blk_plug_cb *cb;
@@ -1072,6 +1064,8 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
int max_sectors;
int sectors;
+ md_write_start(mddev, bio);
+
/*
* Register the new request and wait if the reconstruction
* thread has put up a bar for new requests.
@@ -1156,7 +1150,7 @@ read_again:
choose_data_offset(r10_bio, rdev);
read_bio->bi_bdev = rdev->bdev;
read_bio->bi_end_io = raid10_end_read_request;
- read_bio->bi_rw = READ | do_sync;
+ bio_set_op_attrs(read_bio, op, do_sync);
read_bio->bi_private = r10_bio;
if (max_sectors < r10_bio->sectors) {
@@ -1363,8 +1357,7 @@ retry_write:
rdev));
mbio->bi_bdev = rdev->bdev;
mbio->bi_end_io = raid10_end_write_request;
- mbio->bi_rw =
- WRITE | do_sync | do_fua | do_discard | do_same;
+ bio_set_op_attrs(mbio, op, do_sync | do_fua);
mbio->bi_private = r10_bio;
atomic_inc(&r10_bio->remaining);
@@ -1406,8 +1399,7 @@ retry_write:
r10_bio, rdev));
mbio->bi_bdev = rdev->bdev;
mbio->bi_end_io = raid10_end_write_request;
- mbio->bi_rw =
- WRITE | do_sync | do_fua | do_discard | do_same;
+ bio_set_op_attrs(mbio, op, do_sync | do_fua);
mbio->bi_private = r10_bio;
atomic_inc(&r10_bio->remaining);
@@ -1450,13 +1442,11 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio)
struct bio *split;
- if (unlikely(bio->bi_rw & REQ_FLUSH)) {
+ if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
md_flush_request(mddev, bio);
return;
}
- md_write_start(mddev, bio);
-
do {
/*
@@ -1503,10 +1493,12 @@ static void raid10_status(struct seq_file *seq, struct mddev *mddev)
}
seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks,
conf->geo.raid_disks - mddev->degraded);
- for (i = 0; i < conf->geo.raid_disks; i++)
- seq_printf(seq, "%s",
- conf->mirrors[i].rdev &&
- test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_");
+ rcu_read_lock();
+ for (i = 0; i < conf->geo.raid_disks; i++) {
+ struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
+ seq_printf(seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
+ }
+ rcu_read_unlock();
seq_printf(seq, "]");
}
@@ -1604,7 +1596,7 @@ static void raid10_error(struct mddev *mddev, struct md_rdev *rdev)
static void print_conf(struct r10conf *conf)
{
int i;
- struct raid10_info *tmp;
+ struct md_rdev *rdev;
printk(KERN_DEBUG "RAID10 conf printout:\n");
if (!conf) {
@@ -1614,14 +1606,16 @@ static void print_conf(struct r10conf *conf)
printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
conf->geo.raid_disks);
+ /* This is only called with ->reconfix_mutex held, so
+ * rcu protection of rdev is not needed */
for (i = 0; i < conf->geo.raid_disks; i++) {
char b[BDEVNAME_SIZE];
- tmp = conf->mirrors + i;
- if (tmp->rdev)
+ rdev = conf->mirrors[i].rdev;
+ if (rdev)
printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
- i, !test_bit(In_sync, &tmp->rdev->flags),
- !test_bit(Faulty, &tmp->rdev->flags),
- bdevname(tmp->rdev->bdev,b));
+ i, !test_bit(In_sync, &rdev->flags),
+ !test_bit(Faulty, &rdev->flags),
+ bdevname(rdev->bdev,b));
}
}
@@ -1770,7 +1764,7 @@ static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
err = -EBUSY;
goto abort;
}
- /* Only remove faulty devices if recovery
+ /* Only remove non-faulty devices if recovery
* is not possible.
*/
if (!test_bit(Faulty, &rdev->flags) &&
@@ -1782,13 +1776,16 @@ static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
goto abort;
}
*rdevp = NULL;
- synchronize_rcu();
- if (atomic_read(&rdev->nr_pending)) {
- /* lost the race, try later */
- err = -EBUSY;
- *rdevp = rdev;
- goto abort;
- } else if (p->replacement) {
+ if (!test_bit(RemoveSynchronized, &rdev->flags)) {
+ synchronize_rcu();
+ if (atomic_read(&rdev->nr_pending)) {
+ /* lost the race, try later */
+ err = -EBUSY;
+ *rdevp = rdev;
+ goto abort;
+ }
+ }
+ if (p->replacement) {
/* We must have just cleared 'rdev' */
p->rdev = p->replacement;
clear_bit(Replacement, &p->replacement->flags);
@@ -1992,10 +1989,10 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
tbio->bi_vcnt = vcnt;
tbio->bi_iter.bi_size = fbio->bi_iter.bi_size;
- tbio->bi_rw = WRITE;
tbio->bi_private = r10_bio;
tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
tbio->bi_end_io = end_sync_write;
+ bio_set_op_attrs(tbio, REQ_OP_WRITE, 0);
bio_copy_data(tbio, fbio);
@@ -2078,7 +2075,7 @@ static void fix_recovery_read_error(struct r10bio *r10_bio)
addr,
s << 9,
bio->bi_io_vec[idx].bv_page,
- READ, false);
+ REQ_OP_READ, 0, false);
if (ok) {
rdev = conf->mirrors[dw].rdev;
addr = r10_bio->devs[1].addr + sect;
@@ -2086,7 +2083,7 @@ static void fix_recovery_read_error(struct r10bio *r10_bio)
addr,
s << 9,
bio->bi_io_vec[idx].bv_page,
- WRITE, false);
+ REQ_OP_WRITE, 0, false);
if (!ok) {
set_bit(WriteErrorSeen, &rdev->flags);
if (!test_and_set_bit(WantReplacement,
@@ -2175,21 +2172,20 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
*/
static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
{
- struct timespec cur_time_mon;
+ long cur_time_mon;
unsigned long hours_since_last;
unsigned int read_errors = atomic_read(&rdev->read_errors);
- ktime_get_ts(&cur_time_mon);
+ cur_time_mon = ktime_get_seconds();
- if (rdev->last_read_error.tv_sec == 0 &&
- rdev->last_read_error.tv_nsec == 0) {
+ if (rdev->last_read_error == 0) {
/* first time we've seen a read error */
rdev->last_read_error = cur_time_mon;
return;
}
- hours_since_last = (cur_time_mon.tv_sec -
- rdev->last_read_error.tv_sec) / 3600;
+ hours_since_last = (long)(cur_time_mon -
+ rdev->last_read_error) / 3600;
rdev->last_read_error = cur_time_mon;
@@ -2213,7 +2209,7 @@ static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
&& (rw == READ || test_bit(WriteErrorSeen, &rdev->flags)))
return -1;
- if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
+ if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false))
/* success */
return 1;
if (rw == WRITE) {
@@ -2268,7 +2264,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
printk(KERN_NOTICE
"md/raid10:%s: %s: Failing raid device\n",
mdname(mddev), b);
- md_error(mddev, conf->mirrors[d].rdev);
+ md_error(mddev, rdev);
r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED;
return;
}
@@ -2291,6 +2287,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
rdev = rcu_dereference(conf->mirrors[d].rdev);
if (rdev &&
test_bit(In_sync, &rdev->flags) &&
+ !test_bit(Faulty, &rdev->flags) &&
is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
&first_bad, &bad_sectors) == 0) {
atomic_inc(&rdev->nr_pending);
@@ -2299,7 +2296,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
r10_bio->devs[sl].addr +
sect,
s<<9,
- conf->tmppage, READ, false);
+ conf->tmppage,
+ REQ_OP_READ, 0, false);
rdev_dec_pending(rdev, mddev);
rcu_read_lock();
if (success)
@@ -2343,6 +2341,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
d = r10_bio->devs[sl].devnum;
rdev = rcu_dereference(conf->mirrors[d].rdev);
if (!rdev ||
+ test_bit(Faulty, &rdev->flags) ||
!test_bit(In_sync, &rdev->flags))
continue;
@@ -2382,6 +2381,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
d = r10_bio->devs[sl].devnum;
rdev = rcu_dereference(conf->mirrors[d].rdev);
if (!rdev ||
+ test_bit(Faulty, &rdev->flags) ||
!test_bit(In_sync, &rdev->flags))
continue;
@@ -2465,18 +2465,21 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
while (sect_to_write) {
struct bio *wbio;
+ sector_t wsector;
if (sectors > sect_to_write)
sectors = sect_to_write;
/* Write at 'sector' for 'sectors' */
wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
- wbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
- choose_data_offset(r10_bio, rdev) +
- (sector - r10_bio->sector));
+ wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector);
+ wbio->bi_iter.bi_sector = wsector +
+ choose_data_offset(r10_bio, rdev);
wbio->bi_bdev = rdev->bdev;
- if (submit_bio_wait(WRITE, wbio) < 0)
+ bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
+
+ if (submit_bio_wait(wbio) < 0)
/* Failure! */
- ok = rdev_set_badblocks(rdev, sector,
+ ok = rdev_set_badblocks(rdev, wsector,
sectors, 0)
&& ok;
@@ -2531,7 +2534,7 @@ read_more:
return;
}
- do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
+ do_sync = (r10_bio->master_bio->bi_opf & REQ_SYNC);
slot = r10_bio->read_slot;
printk_ratelimited(
KERN_ERR
@@ -2548,7 +2551,7 @@ read_more:
bio->bi_iter.bi_sector = r10_bio->devs[slot].addr
+ choose_data_offset(r10_bio, rdev);
bio->bi_bdev = rdev->bdev;
- bio->bi_rw = READ | do_sync;
+ bio_set_op_attrs(bio, REQ_OP_READ, do_sync);
bio->bi_private = r10_bio;
bio->bi_end_io = raid10_end_read_request;
if (max_sectors < r10_bio->sectors) {
@@ -2877,11 +2880,14 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
/* Completed a full sync so the replacements
* are now fully recovered.
*/
- for (i = 0; i < conf->geo.raid_disks; i++)
- if (conf->mirrors[i].replacement)
- conf->mirrors[i].replacement
- ->recovery_offset
- = MaxSector;
+ rcu_read_lock();
+ for (i = 0; i < conf->geo.raid_disks; i++) {
+ struct md_rdev *rdev =
+ rcu_dereference(conf->mirrors[i].replacement);
+ if (rdev)
+ rdev->recovery_offset = MaxSector;
+ }
+ rcu_read_unlock();
}
conf->fullsync = 0;
}
@@ -2912,6 +2918,13 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
max_sector > (sector_nr | chunk_mask))
max_sector = (sector_nr | chunk_mask) + 1;
+ /*
+ * If there is non-resync activity waiting for a turn, then let it
+ * though before starting on this new sync request.
+ */
+ if (conf->nr_waiting)
+ schedule_timeout_uninterruptible(1);
+
/* Again, very different code for resync and recovery.
* Both must result in an r10bio with a list of bios that
* have bi_end_io, bi_sector, bi_bdev set,
@@ -2940,14 +2953,20 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
int must_sync;
int any_working;
struct raid10_info *mirror = &conf->mirrors[i];
+ struct md_rdev *mrdev, *mreplace;
- if ((mirror->rdev == NULL ||
- test_bit(In_sync, &mirror->rdev->flags))
- &&
- (mirror->replacement == NULL ||
- test_bit(Faulty,
- &mirror->replacement->flags)))
+ rcu_read_lock();
+ mrdev = rcu_dereference(mirror->rdev);
+ mreplace = rcu_dereference(mirror->replacement);
+
+ if ((mrdev == NULL ||
+ test_bit(Faulty, &mrdev->flags) ||
+ test_bit(In_sync, &mrdev->flags)) &&
+ (mreplace == NULL ||
+ test_bit(Faulty, &mreplace->flags))) {
+ rcu_read_unlock();
continue;
+ }
still_degraded = 0;
/* want to reconstruct this device */
@@ -2957,8 +2976,11 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
/* last stripe is not complete - don't
* try to recover this sector.
*/
+ rcu_read_unlock();
continue;
}
+ if (mreplace && test_bit(Faulty, &mreplace->flags))
+ mreplace = NULL;
/* Unless we are doing a full sync, or a replacement
* we only need to recover the block if it is set in
* the bitmap
@@ -2968,14 +2990,19 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
if (sync_blocks < max_sync)
max_sync = sync_blocks;
if (!must_sync &&
- mirror->replacement == NULL &&
+ mreplace == NULL &&
!conf->fullsync) {
/* yep, skip the sync_blocks here, but don't assume
* that there will never be anything to do here
*/
chunks_skipped = -1;
+ rcu_read_unlock();
continue;
}
+ atomic_inc(&mrdev->nr_pending);
+ if (mreplace)
+ atomic_inc(&mreplace->nr_pending);
+ rcu_read_unlock();
r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
r10_bio->state = 0;
@@ -2994,12 +3021,15 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
/* Need to check if the array will still be
* degraded
*/
- for (j = 0; j < conf->geo.raid_disks; j++)
- if (conf->mirrors[j].rdev == NULL ||
- test_bit(Faulty, &conf->mirrors[j].rdev->flags)) {
+ rcu_read_lock();
+ for (j = 0; j < conf->geo.raid_disks; j++) {
+ struct md_rdev *rdev = rcu_dereference(
+ conf->mirrors[j].rdev);
+ if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
still_degraded = 1;
break;
}
+ }
must_sync = bitmap_start_sync(mddev->bitmap, sect,
&sync_blocks, still_degraded);
@@ -3009,15 +3039,15 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
int k;
int d = r10_bio->devs[j].devnum;
sector_t from_addr, to_addr;
- struct md_rdev *rdev;
+ struct md_rdev *rdev =
+ rcu_dereference(conf->mirrors[d].rdev);
sector_t sector, first_bad;
int bad_sectors;
- if (!conf->mirrors[d].rdev ||
- !test_bit(In_sync, &conf->mirrors[d].rdev->flags))
+ if (!rdev ||
+ !test_bit(In_sync, &rdev->flags))
continue;
/* This is where we read from */
any_working = 1;
- rdev = conf->mirrors[d].rdev;
sector = r10_bio->devs[j].addr;
if (is_badblock(rdev, sector, max_sync,
@@ -3038,7 +3068,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
biolist = bio;
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_read;
- bio->bi_rw = READ;
+ bio_set_op_attrs(bio, REQ_OP_READ, 0);
from_addr = r10_bio->devs[j].addr;
bio->bi_iter.bi_sector = from_addr +
rdev->data_offset;
@@ -3056,18 +3086,17 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
r10_bio->devs[1].devnum = i;
r10_bio->devs[1].addr = to_addr;
- rdev = mirror->rdev;
- if (!test_bit(In_sync, &rdev->flags)) {
+ if (!test_bit(In_sync, &mrdev->flags)) {
bio = r10_bio->devs[1].bio;
bio_reset(bio);
bio->bi_next = biolist;
biolist = bio;
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_write;
- bio->bi_rw = WRITE;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio->bi_iter.bi_sector = to_addr
- + rdev->data_offset;
- bio->bi_bdev = rdev->bdev;
+ + mrdev->data_offset;
+ bio->bi_bdev = mrdev->bdev;
atomic_inc(&r10_bio->remaining);
} else
r10_bio->devs[1].bio->bi_end_io = NULL;
@@ -3076,8 +3105,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
bio = r10_bio->devs[1].repl_bio;
if (bio)
bio->bi_end_io = NULL;
- rdev = mirror->replacement;
- /* Note: if rdev != NULL, then bio
+ /* Note: if mreplace != NULL, then bio
* cannot be NULL as r10buf_pool_alloc will
* have allocated it.
* So the second test here is pointless.
@@ -3085,21 +3113,22 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
* this comment keeps human reviewers
* happy.
*/
- if (rdev == NULL || bio == NULL ||
- test_bit(Faulty, &rdev->flags))
+ if (mreplace == NULL || bio == NULL ||
+ test_bit(Faulty, &mreplace->flags))
break;
bio_reset(bio);
bio->bi_next = biolist;
biolist = bio;
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_write;
- bio->bi_rw = WRITE;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio->bi_iter.bi_sector = to_addr +
- rdev->data_offset;
- bio->bi_bdev = rdev->bdev;
+ mreplace->data_offset;
+ bio->bi_bdev = mreplace->bdev;
atomic_inc(&r10_bio->remaining);
break;
}
+ rcu_read_unlock();
if (j == conf->copies) {
/* Cannot recover, so abort the recovery or
* record a bad block */
@@ -3112,15 +3141,15 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
if (r10_bio->devs[k].devnum == i)
break;
if (!test_bit(In_sync,
- &mirror->rdev->flags)
+ &mrdev->flags)
&& !rdev_set_badblocks(
- mirror->rdev,
+ mrdev,
r10_bio->devs[k].addr,
max_sync, 0))
any_working = 0;
- if (mirror->replacement &&
+ if (mreplace &&
!rdev_set_badblocks(
- mirror->replacement,
+ mreplace,
r10_bio->devs[k].addr,
max_sync, 0))
any_working = 0;
@@ -3138,8 +3167,14 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
if (rb2)
atomic_dec(&rb2->remaining);
r10_bio = rb2;
+ rdev_dec_pending(mrdev, mddev);
+ if (mreplace)
+ rdev_dec_pending(mreplace, mddev);
break;
}
+ rdev_dec_pending(mrdev, mddev);
+ if (mreplace)
+ rdev_dec_pending(mreplace, mddev);
}
if (biolist == NULL) {
while (r10_bio) {
@@ -3184,6 +3219,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
int d = r10_bio->devs[i].devnum;
sector_t first_bad, sector;
int bad_sectors;
+ struct md_rdev *rdev;
if (r10_bio->devs[i].repl_bio)
r10_bio->devs[i].repl_bio->bi_end_io = NULL;
@@ -3191,12 +3227,14 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
bio = r10_bio->devs[i].bio;
bio_reset(bio);
bio->bi_error = -EIO;
- if (conf->mirrors[d].rdev == NULL ||
- test_bit(Faulty, &conf->mirrors[d].rdev->flags))
+ rcu_read_lock();
+ rdev = rcu_dereference(conf->mirrors[d].rdev);
+ if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
+ rcu_read_unlock();
continue;
+ }
sector = r10_bio->devs[i].addr;
- if (is_badblock(conf->mirrors[d].rdev,
- sector, max_sync,
+ if (is_badblock(rdev, sector, max_sync,
&first_bad, &bad_sectors)) {
if (first_bad > sector)
max_sync = first_bad - sector;
@@ -3204,25 +3242,28 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
bad_sectors -= (sector - first_bad);
if (max_sync > bad_sectors)
max_sync = bad_sectors;
+ rcu_read_unlock();
continue;
}
}
- atomic_inc(&conf->mirrors[d].rdev->nr_pending);
+ atomic_inc(&rdev->nr_pending);
atomic_inc(&r10_bio->remaining);
bio->bi_next = biolist;
biolist = bio;
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_read;
- bio->bi_rw = READ;
- bio->bi_iter.bi_sector = sector +
- conf->mirrors[d].rdev->data_offset;
- bio->bi_bdev = conf->mirrors[d].rdev->bdev;
+ bio_set_op_attrs(bio, REQ_OP_READ, 0);
+ bio->bi_iter.bi_sector = sector + rdev->data_offset;
+ bio->bi_bdev = rdev->bdev;
count++;
- if (conf->mirrors[d].replacement == NULL ||
- test_bit(Faulty,
- &conf->mirrors[d].replacement->flags))
+ rdev = rcu_dereference(conf->mirrors[d].replacement);
+ if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
+ rcu_read_unlock();
continue;
+ }
+ atomic_inc(&rdev->nr_pending);
+ rcu_read_unlock();
/* Need to set up for writing to the replacement */
bio = r10_bio->devs[i].repl_bio;
@@ -3230,15 +3271,13 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_error = -EIO;
sector = r10_bio->devs[i].addr;
- atomic_inc(&conf->mirrors[d].rdev->nr_pending);
bio->bi_next = biolist;
biolist = bio;
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_write;
- bio->bi_rw = WRITE;
- bio->bi_iter.bi_sector = sector +
- conf->mirrors[d].replacement->data_offset;
- bio->bi_bdev = conf->mirrors[d].replacement->bdev;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+ bio->bi_iter.bi_sector = sector + rdev->data_offset;
+ bio->bi_bdev = rdev->bdev;
count++;
}
@@ -3505,6 +3544,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
spin_lock_init(&conf->resync_lock);
init_waitqueue_head(&conf->wait_barrier);
+ atomic_set(&conf->nr_pending, 0);
conf->thread = md_register_thread(raid10d, mddev, "raid10");
if (!conf->thread)
@@ -4320,7 +4360,7 @@ read_more:
+ rdev->data_offset);
read_bio->bi_private = r10_bio;
read_bio->bi_end_io = end_sync_read;
- read_bio->bi_rw = READ;
+ bio_set_op_attrs(read_bio, REQ_OP_READ, 0);
read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
read_bio->bi_error = 0;
read_bio->bi_vcnt = 0;
@@ -4334,15 +4374,16 @@ read_more:
blist = read_bio;
read_bio->bi_next = NULL;
+ rcu_read_lock();
for (s = 0; s < conf->copies*2; s++) {
struct bio *b;
int d = r10_bio->devs[s/2].devnum;
struct md_rdev *rdev2;
if (s&1) {
- rdev2 = conf->mirrors[d].replacement;
+ rdev2 = rcu_dereference(conf->mirrors[d].replacement);
b = r10_bio->devs[s/2].repl_bio;
} else {
- rdev2 = conf->mirrors[d].rdev;
+ rdev2 = rcu_dereference(conf->mirrors[d].rdev);
b = r10_bio->devs[s/2].bio;
}
if (!rdev2 || test_bit(Faulty, &rdev2->flags))
@@ -4354,7 +4395,7 @@ read_more:
rdev2->new_data_offset;
b->bi_private = r10_bio;
b->bi_end_io = end_reshape_write;
- b->bi_rw = WRITE;
+ bio_set_op_attrs(b, REQ_OP_WRITE, 0);
b->bi_next = blist;
blist = b;
}
@@ -4387,6 +4428,7 @@ read_more:
nr_sectors += len >> 9;
}
bio_full:
+ rcu_read_unlock();
r10_bio->sectors = nr_sectors;
/* Now submit the read */
@@ -4438,16 +4480,20 @@ static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
struct bio *b;
int d = r10_bio->devs[s/2].devnum;
struct md_rdev *rdev;
+ rcu_read_lock();
if (s&1) {
- rdev = conf->mirrors[d].replacement;
+ rdev = rcu_dereference(conf->mirrors[d].replacement);
b = r10_bio->devs[s/2].repl_bio;
} else {
- rdev = conf->mirrors[d].rdev;
+ rdev = rcu_dereference(conf->mirrors[d].rdev);
b = r10_bio->devs[s/2].bio;
}
- if (!rdev || test_bit(Faulty, &rdev->flags))
+ if (!rdev || test_bit(Faulty, &rdev->flags)) {
+ rcu_read_unlock();
continue;
+ }
atomic_inc(&rdev->nr_pending);
+ rcu_read_unlock();
md_sync_acct(b->bi_bdev, r10_bio->sectors);
atomic_inc(&r10_bio->remaining);
b->bi_next = NULL;
@@ -4508,9 +4554,10 @@ static int handle_reshape_read_error(struct mddev *mddev,
if (s > (PAGE_SIZE >> 9))
s = PAGE_SIZE >> 9;
+ rcu_read_lock();
while (!success) {
int d = r10b->devs[slot].devnum;
- struct md_rdev *rdev = conf->mirrors[d].rdev;
+ struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
sector_t addr;
if (rdev == NULL ||
test_bit(Faulty, &rdev->flags) ||
@@ -4518,11 +4565,15 @@ static int handle_reshape_read_error(struct mddev *mddev,
goto failed;
addr = r10b->devs[slot].addr + idx * PAGE_SIZE;
+ atomic_inc(&rdev->nr_pending);
+ rcu_read_unlock();
success = sync_page_io(rdev,
addr,
s << 9,
bvec[idx].bv_page,
- READ, false);
+ REQ_OP_READ, 0, false);
+ rdev_dec_pending(rdev, mddev);
+ rcu_read_lock();
if (success)
break;
failed:
@@ -4532,6 +4583,7 @@ static int handle_reshape_read_error(struct mddev *mddev,
if (slot == first_slot)
break;
}
+ rcu_read_unlock();
if (!success) {
/* couldn't read this block, must give up */
set_bit(MD_RECOVERY_INTR,
@@ -4601,16 +4653,18 @@ static void raid10_finish_reshape(struct mddev *mddev)
}
} else {
int d;
+ rcu_read_lock();
for (d = conf->geo.raid_disks ;
d < conf->geo.raid_disks - mddev->delta_disks;
d++) {
- struct md_rdev *rdev = conf->mirrors[d].rdev;
+ struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
if (rdev)
clear_bit(In_sync, &rdev->flags);
- rdev = conf->mirrors[d].replacement;
+ rdev = rcu_dereference(conf->mirrors[d].replacement);
if (rdev)
clear_bit(In_sync, &rdev->flags);
}
+ rcu_read_unlock();
}
mddev->layout = mddev->new_layout;
mddev->chunk_sectors = 1 << conf->geo.chunk_shift;
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index 6fc2c7575..18ec1f7a9 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -64,10 +64,11 @@ struct r10conf {
int pending_count;
spinlock_t resync_lock;
- int nr_pending;
+ atomic_t nr_pending;
int nr_waiting;
int nr_queued;
int barrier;
+ int array_freeze_pending;
sector_t next_resync;
int fullsync; /* set to 1 if a full sync is needed,
* (fresh device added).
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index e889e2deb..1b1ab4a1d 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -96,7 +96,6 @@ struct r5l_log {
spinlock_t no_space_stripes_lock;
bool need_cache_flush;
- bool in_teardown;
};
/*
@@ -254,14 +253,14 @@ static void r5l_submit_current_io(struct r5l_log *log)
__r5l_set_io_unit_state(io, IO_UNIT_IO_START);
spin_unlock_irqrestore(&log->io_list_lock, flags);
- submit_bio(WRITE, io->current_bio);
+ submit_bio(io->current_bio);
}
static struct bio *r5l_bio_alloc(struct r5l_log *log)
{
struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, log->bs);
- bio->bi_rw = WRITE;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio->bi_bdev = log->rdev->bdev;
bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start;
@@ -373,7 +372,7 @@ static void r5l_append_payload_page(struct r5l_log *log, struct page *page)
io->current_bio = r5l_bio_alloc(log);
bio_chain(io->current_bio, prev);
- submit_bio(WRITE, prev);
+ submit_bio(prev);
}
if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0))
@@ -536,7 +535,7 @@ int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
bio_endio(bio);
return 0;
}
- bio->bi_rw &= ~REQ_FLUSH;
+ bio->bi_opf &= ~REQ_PREFLUSH;
return -EAGAIN;
}
@@ -686,7 +685,8 @@ void r5l_flush_stripe_to_raid(struct r5l_log *log)
bio_reset(&log->flush_bio);
log->flush_bio.bi_bdev = log->rdev->bdev;
log->flush_bio.bi_end_io = r5l_log_flush_endio;
- submit_bio(WRITE_FLUSH, &log->flush_bio);
+ bio_set_op_attrs(&log->flush_bio, REQ_OP_WRITE, WRITE_FLUSH);
+ submit_bio(&log->flush_bio);
}
static void r5l_write_super(struct r5l_log *log, sector_t cp);
@@ -703,31 +703,22 @@ static void r5l_write_super_and_discard_space(struct r5l_log *log,
mddev = log->rdev->mddev;
/*
- * This is to avoid a deadlock. r5l_quiesce holds reconfig_mutex and
- * wait for this thread to finish. This thread waits for
- * MD_CHANGE_PENDING clear, which is supposed to be done in
- * md_check_recovery(). md_check_recovery() tries to get
- * reconfig_mutex. Since r5l_quiesce already holds the mutex,
- * md_check_recovery() fails, so the PENDING never get cleared. The
- * in_teardown check workaround this issue.
+ * Discard could zero data, so before discard we must make sure
+ * superblock is updated to new log tail. Updating superblock (either
+ * directly call md_update_sb() or depend on md thread) must hold
+ * reconfig mutex. On the other hand, raid5_quiesce is called with
+ * reconfig_mutex hold. The first step of raid5_quiesce() is waitting
+ * for all IO finish, hence waitting for reclaim thread, while reclaim
+ * thread is calling this function and waitting for reconfig mutex. So
+ * there is a deadlock. We workaround this issue with a trylock.
+ * FIXME: we could miss discard if we can't take reconfig mutex
*/
- if (!log->in_teardown) {
- set_mask_bits(&mddev->flags, 0,
- BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
- md_wakeup_thread(mddev->thread);
- wait_event(mddev->sb_wait,
- !test_bit(MD_CHANGE_PENDING, &mddev->flags) ||
- log->in_teardown);
- /*
- * r5l_quiesce could run after in_teardown check and hold
- * mutex first. Superblock might get updated twice.
- */
- if (log->in_teardown)
- md_update_sb(mddev, 1);
- } else {
- WARN_ON(!mddev_is_locked(mddev));
- md_update_sb(mddev, 1);
- }
+ set_mask_bits(&mddev->flags, 0,
+ BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
+ if (!mddev_trylock(mddev))
+ return;
+ md_update_sb(mddev, 1);
+ mddev_unlock(mddev);
/* discard IO error really doesn't matter, ignore it */
if (log->last_checkpoint < end) {
@@ -826,7 +817,6 @@ void r5l_quiesce(struct r5l_log *log, int state)
if (!log || state == 2)
return;
if (state == 0) {
- log->in_teardown = 0;
/*
* This is a special case for hotadd. In suspend, the array has
* no journal. In resume, journal is initialized as well as the
@@ -837,11 +827,6 @@ void r5l_quiesce(struct r5l_log *log, int state)
log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
log->rdev->mddev, "reclaim");
} else if (state == 1) {
- /*
- * at this point all stripes are finished, so io_unit is at
- * least in STRIPE_END state
- */
- log->in_teardown = 1;
/* make sure r5l_write_super_and_discard_space exits */
mddev = log->rdev->mddev;
wake_up(&mddev->sb_wait);
@@ -881,7 +866,8 @@ static int r5l_read_meta_block(struct r5l_log *log,
struct r5l_meta_block *mb;
u32 crc, stored_crc;
- if (!sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, READ, false))
+ if (!sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, REQ_OP_READ, 0,
+ false))
return -EIO;
mb = page_address(page);
@@ -926,7 +912,8 @@ static int r5l_recovery_flush_one_stripe(struct r5l_log *log,
&disk_index, sh);
sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
- sh->dev[disk_index].page, READ, false);
+ sh->dev[disk_index].page, REQ_OP_READ, 0,
+ false);
sh->dev[disk_index].log_checksum =
le32_to_cpu(payload->checksum[0]);
set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
@@ -934,7 +921,8 @@ static int r5l_recovery_flush_one_stripe(struct r5l_log *log,
} else {
disk_index = sh->pd_idx;
sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
- sh->dev[disk_index].page, READ, false);
+ sh->dev[disk_index].page, REQ_OP_READ, 0,
+ false);
sh->dev[disk_index].log_checksum =
le32_to_cpu(payload->checksum[0]);
set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
@@ -944,7 +932,7 @@ static int r5l_recovery_flush_one_stripe(struct r5l_log *log,
sync_page_io(log->rdev,
r5l_ring_add(log, *log_offset, BLOCK_SECTORS),
PAGE_SIZE, sh->dev[disk_index].page,
- READ, false);
+ REQ_OP_READ, 0, false);
sh->dev[disk_index].log_checksum =
le32_to_cpu(payload->checksum[1]);
set_bit(R5_Wantwrite,
@@ -986,11 +974,13 @@ static int r5l_recovery_flush_one_stripe(struct r5l_log *log,
rdev = rcu_dereference(conf->disks[disk_index].rdev);
if (rdev)
sync_page_io(rdev, stripe_sect, PAGE_SIZE,
- sh->dev[disk_index].page, WRITE, false);
+ sh->dev[disk_index].page, REQ_OP_WRITE, 0,
+ false);
rrdev = rcu_dereference(conf->disks[disk_index].replacement);
if (rrdev)
sync_page_io(rrdev, stripe_sect, PAGE_SIZE,
- sh->dev[disk_index].page, WRITE, false);
+ sh->dev[disk_index].page, REQ_OP_WRITE, 0,
+ false);
}
raid5_release_stripe(sh);
return 0;
@@ -1062,7 +1052,8 @@ static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
mb->checksum = cpu_to_le32(crc);
- if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, WRITE_FUA, false)) {
+ if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE,
+ WRITE_FUA, false)) {
__free_page(page);
return -EIO;
}
@@ -1137,7 +1128,7 @@ static int r5l_load_log(struct r5l_log *log)
if (!page)
return -ENOMEM;
- if (!sync_page_io(rdev, cp, PAGE_SIZE, page, READ, false)) {
+ if (!sync_page_io(rdev, cp, PAGE_SIZE, page, REQ_OP_READ, 0, false)) {
ret = -EIO;
goto ioerr;
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 8959e6dd3..ee7fc3701 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -659,6 +659,7 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
{
struct stripe_head *sh;
int hash = stripe_hash_locks_hash(sector);
+ int inc_empty_inactive_list_flag;
pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
@@ -703,7 +704,12 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
atomic_inc(&conf->active_stripes);
BUG_ON(list_empty(&sh->lru) &&
!test_bit(STRIPE_EXPANDING, &sh->state));
+ inc_empty_inactive_list_flag = 0;
+ if (!list_empty(conf->inactive_list + hash))
+ inc_empty_inactive_list_flag = 1;
list_del_init(&sh->lru);
+ if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag)
+ atomic_inc(&conf->empty_inactive_list_nr);
if (sh->group) {
sh->group->stripes_cnt--;
sh->group = NULL;
@@ -762,6 +768,7 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
sector_t head_sector, tmp_sec;
int hash;
int dd_idx;
+ int inc_empty_inactive_list_flag;
/* Don't cross chunks, so stripe pd_idx/qd_idx is the same */
tmp_sec = sh->sector;
@@ -779,7 +786,12 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
atomic_inc(&conf->active_stripes);
BUG_ON(list_empty(&head->lru) &&
!test_bit(STRIPE_EXPANDING, &head->state));
+ inc_empty_inactive_list_flag = 0;
+ if (!list_empty(conf->inactive_list + hash))
+ inc_empty_inactive_list_flag = 1;
list_del_init(&head->lru);
+ if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag)
+ atomic_inc(&conf->empty_inactive_list_nr);
if (head->group) {
head->group->stripes_cnt--;
head->group = NULL;
@@ -806,7 +818,8 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
dd_idx = 0;
while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
dd_idx++;
- if (head->dev[dd_idx].towrite->bi_rw != sh->dev[dd_idx].towrite->bi_rw)
+ if (head->dev[dd_idx].towrite->bi_opf != sh->dev[dd_idx].towrite->bi_opf ||
+ bio_op(head->dev[dd_idx].towrite) != bio_op(sh->dev[dd_idx].towrite))
goto unlock_out;
if (head->batch_head) {
@@ -891,29 +904,28 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
if (r5l_write_stripe(conf->log, sh) == 0)
return;
for (i = disks; i--; ) {
- int rw;
+ int op, op_flags = 0;
int replace_only = 0;
struct bio *bi, *rbi;
struct md_rdev *rdev, *rrdev = NULL;
sh = head_sh;
if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
+ op = REQ_OP_WRITE;
if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
- rw = WRITE_FUA;
- else
- rw = WRITE;
+ op_flags = WRITE_FUA;
if (test_bit(R5_Discard, &sh->dev[i].flags))
- rw |= REQ_DISCARD;
+ op = REQ_OP_DISCARD;
} else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
- rw = READ;
+ op = REQ_OP_READ;
else if (test_and_clear_bit(R5_WantReplace,
&sh->dev[i].flags)) {
- rw = WRITE;
+ op = REQ_OP_WRITE;
replace_only = 1;
} else
continue;
if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags))
- rw |= REQ_SYNC;
+ op_flags |= REQ_SYNC;
again:
bi = &sh->dev[i].req;
@@ -927,7 +939,7 @@ again:
rdev = rrdev;
rrdev = NULL;
}
- if (rw & WRITE) {
+ if (op_is_write(op)) {
if (replace_only)
rdev = NULL;
if (rdev == rrdev)
@@ -953,7 +965,7 @@ again:
* need to check for writes. We never accept write errors
* on the replacement, so we don't to check rrdev.
*/
- while ((rw & WRITE) && rdev &&
+ while (op_is_write(op) && rdev &&
test_bit(WriteErrorSeen, &rdev->flags)) {
sector_t first_bad;
int bad_sectors;
@@ -993,17 +1005,16 @@ again:
set_bit(STRIPE_IO_STARTED, &sh->state);
- bio_reset(bi);
bi->bi_bdev = rdev->bdev;
- bi->bi_rw = rw;
- bi->bi_end_io = (rw & WRITE)
+ bio_set_op_attrs(bi, op, op_flags);
+ bi->bi_end_io = op_is_write(op)
? raid5_end_write_request
: raid5_end_read_request;
bi->bi_private = sh;
- pr_debug("%s: for %llu schedule op %ld on disc %d\n",
+ pr_debug("%s: for %llu schedule op %d on disc %d\n",
__func__, (unsigned long long)sh->sector,
- bi->bi_rw, i);
+ bi->bi_opf, i);
atomic_inc(&sh->count);
if (sh != head_sh)
atomic_inc(&head_sh->count);
@@ -1014,7 +1025,7 @@ again:
bi->bi_iter.bi_sector = (sh->sector
+ rdev->data_offset);
if (test_bit(R5_ReadNoMerge, &head_sh->dev[i].flags))
- bi->bi_rw |= REQ_NOMERGE;
+ bi->bi_opf |= REQ_NOMERGE;
if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
@@ -1027,7 +1038,7 @@ again:
* If this is discard request, set bi_vcnt 0. We don't
* want to confuse SCSI because SCSI will replace payload
*/
- if (rw & REQ_DISCARD)
+ if (op == REQ_OP_DISCARD)
bi->bi_vcnt = 0;
if (rrdev)
set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
@@ -1045,17 +1056,16 @@ again:
set_bit(STRIPE_IO_STARTED, &sh->state);
- bio_reset(rbi);
rbi->bi_bdev = rrdev->bdev;
- rbi->bi_rw = rw;
- BUG_ON(!(rw & WRITE));
+ bio_set_op_attrs(rbi, op, op_flags);
+ BUG_ON(!op_is_write(op));
rbi->bi_end_io = raid5_end_write_request;
rbi->bi_private = sh;
- pr_debug("%s: for %llu schedule op %ld on "
+ pr_debug("%s: for %llu schedule op %d on "
"replacement disc %d\n",
__func__, (unsigned long long)sh->sector,
- rbi->bi_rw, i);
+ rbi->bi_opf, i);
atomic_inc(&sh->count);
if (sh != head_sh)
atomic_inc(&head_sh->count);
@@ -1076,7 +1086,7 @@ again:
* If this is discard request, set bi_vcnt 0. We don't
* want to confuse SCSI because SCSI will replace payload
*/
- if (rw & REQ_DISCARD)
+ if (op == REQ_OP_DISCARD)
rbi->bi_vcnt = 0;
if (conf->mddev->gendisk)
trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
@@ -1085,10 +1095,10 @@ again:
generic_make_request(rbi);
}
if (!rdev && !rrdev) {
- if (rw & WRITE)
+ if (op_is_write(op))
set_bit(STRIPE_DEGRADED, &sh->state);
- pr_debug("skip op %ld on disc %d for sector %llu\n",
- bi->bi_rw, i, (unsigned long long)sh->sector);
+ pr_debug("skip op %d on disc %d for sector %llu\n",
+ bi->bi_opf, i, (unsigned long long)sh->sector);
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
}
@@ -1619,11 +1629,11 @@ again:
while (wbi && wbi->bi_iter.bi_sector <
dev->sector + STRIPE_SECTORS) {
- if (wbi->bi_rw & REQ_FUA)
+ if (wbi->bi_opf & REQ_FUA)
set_bit(R5_WantFUA, &dev->flags);
- if (wbi->bi_rw & REQ_SYNC)
+ if (wbi->bi_opf & REQ_SYNC)
set_bit(R5_SyncIO, &dev->flags);
- if (wbi->bi_rw & REQ_DISCARD)
+ if (bio_op(wbi) == REQ_OP_DISCARD)
set_bit(R5_Discard, &dev->flags);
else {
tx = async_copy_data(1, wbi, &dev->page,
@@ -1978,9 +1988,11 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
put_cpu();
}
-static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp)
+static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp,
+ int disks)
{
struct stripe_head *sh;
+ int i;
sh = kmem_cache_zalloc(sc, gfp);
if (sh) {
@@ -1989,6 +2001,17 @@ static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp)
INIT_LIST_HEAD(&sh->batch_list);
INIT_LIST_HEAD(&sh->lru);
atomic_set(&sh->count, 1);
+ for (i = 0; i < disks; i++) {
+ struct r5dev *dev = &sh->dev[i];
+
+ bio_init(&dev->req);
+ dev->req.bi_io_vec = &dev->vec;
+ dev->req.bi_max_vecs = 1;
+
+ bio_init(&dev->rreq);
+ dev->rreq.bi_io_vec = &dev->rvec;
+ dev->rreq.bi_max_vecs = 1;
+ }
}
return sh;
}
@@ -1996,7 +2019,7 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp)
{
struct stripe_head *sh;
- sh = alloc_stripe(conf->slab_cache, gfp);
+ sh = alloc_stripe(conf->slab_cache, gfp, conf->pool_size);
if (!sh)
return 0;
@@ -2167,7 +2190,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
mutex_lock(&conf->cache_size_mutex);
for (i = conf->max_nr_stripes; i; i--) {
- nsh = alloc_stripe(sc, GFP_KERNEL);
+ nsh = alloc_stripe(sc, GFP_KERNEL, newsize);
if (!nsh)
break;
@@ -2299,6 +2322,7 @@ static void raid5_end_read_request(struct bio * bi)
(unsigned long long)sh->sector, i, atomic_read(&sh->count),
bi->bi_error);
if (i == disks) {
+ bio_reset(bi);
BUG();
return;
}
@@ -2399,6 +2423,7 @@ static void raid5_end_read_request(struct bio * bi)
}
}
rdev_dec_pending(rdev, conf->mddev);
+ bio_reset(bi);
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
raid5_release_stripe(sh);
@@ -2436,6 +2461,7 @@ static void raid5_end_write_request(struct bio *bi)
(unsigned long long)sh->sector, i, atomic_read(&sh->count),
bi->bi_error);
if (i == disks) {
+ bio_reset(bi);
BUG();
return;
}
@@ -2472,6 +2498,7 @@ static void raid5_end_write_request(struct bio *bi)
if (sh->batch_head && bi->bi_error && !replacement)
set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state);
+ bio_reset(bi);
if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
@@ -2485,16 +2512,6 @@ static void raid5_build_block(struct stripe_head *sh, int i, int previous)
{
struct r5dev *dev = &sh->dev[i];
- bio_init(&dev->req);
- dev->req.bi_io_vec = &dev->vec;
- dev->req.bi_max_vecs = 1;
- dev->req.bi_private = sh;
-
- bio_init(&dev->rreq);
- dev->rreq.bi_io_vec = &dev->rvec;
- dev->rreq.bi_max_vecs = 1;
- dev->rreq.bi_private = sh;
-
dev->flags = 0;
dev->sector = raid5_compute_blocknr(sh, i, previous);
}
@@ -3080,7 +3097,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
struct md_rdev *rdev;
rcu_read_lock();
rdev = rcu_dereference(conf->disks[i].rdev);
- if (rdev && test_bit(In_sync, &rdev->flags))
+ if (rdev && test_bit(In_sync, &rdev->flags) &&
+ !test_bit(Faulty, &rdev->flags))
atomic_inc(&rdev->nr_pending);
else
rdev = NULL;
@@ -3210,15 +3228,16 @@ handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
/* During recovery devices cannot be removed, so
* locking and refcounting of rdevs is not needed
*/
+ rcu_read_lock();
for (i = 0; i < conf->raid_disks; i++) {
- struct md_rdev *rdev = conf->disks[i].rdev;
+ struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
if (rdev
&& !test_bit(Faulty, &rdev->flags)
&& !test_bit(In_sync, &rdev->flags)
&& !rdev_set_badblocks(rdev, sh->sector,
STRIPE_SECTORS, 0))
abort = 1;
- rdev = conf->disks[i].replacement;
+ rdev = rcu_dereference(conf->disks[i].replacement);
if (rdev
&& !test_bit(Faulty, &rdev->flags)
&& !test_bit(In_sync, &rdev->flags)
@@ -3226,6 +3245,7 @@ handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
STRIPE_SECTORS, 0))
abort = 1;
}
+ rcu_read_unlock();
if (abort)
conf->recovery_disabled =
conf->mddev->recovery_disabled;
@@ -3237,15 +3257,16 @@ static int want_replace(struct stripe_head *sh, int disk_idx)
{
struct md_rdev *rdev;
int rv = 0;
- /* Doing recovery so rcu locking not required */
- rdev = sh->raid_conf->disks[disk_idx].replacement;
+
+ rcu_read_lock();
+ rdev = rcu_dereference(sh->raid_conf->disks[disk_idx].replacement);
if (rdev
&& !test_bit(Faulty, &rdev->flags)
&& !test_bit(In_sync, &rdev->flags)
&& (rdev->recovery_offset <= sh->sector
|| rdev->mddev->recovery_cp <= sh->sector))
rv = 1;
-
+ rcu_read_unlock();
return rv;
}
@@ -3600,7 +3621,7 @@ static void handle_stripe_dirtying(struct r5conf *conf,
pr_debug("for sector %llu, rmw=%d rcw=%d\n",
(unsigned long long)sh->sector, rmw, rcw);
set_bit(STRIPE_HANDLE, &sh->state);
- if ((rmw < rcw || (rmw == rcw && conf->rmw_level == PARITY_ENABLE_RMW)) && rmw > 0) {
+ if ((rmw < rcw || (rmw == rcw && conf->rmw_level == PARITY_PREFER_RMW)) && rmw > 0) {
/* prefer read-modify-write, but need to get some data */
if (conf->mddev->queue)
blk_add_trace_msg(conf->mddev->queue,
@@ -3627,7 +3648,7 @@ static void handle_stripe_dirtying(struct r5conf *conf,
}
}
}
- if ((rcw < rmw || (rcw == rmw && conf->rmw_level != PARITY_ENABLE_RMW)) && rcw > 0) {
+ if ((rcw < rmw || (rcw == rmw && conf->rmw_level != PARITY_PREFER_RMW)) && rcw > 0) {
/* want reconstruct write, but need to get some data */
int qread =0;
rcw = 0;
@@ -4624,7 +4645,9 @@ finish:
}
if (!bio_list_empty(&s.return_bi)) {
- if (test_bit(MD_CHANGE_PENDING, &conf->mddev->flags)) {
+ if (test_bit(MD_CHANGE_PENDING, &conf->mddev->flags) &&
+ (s.failed <= conf->max_degraded ||
+ conf->mddev->external == 0)) {
spin_lock_irq(&conf->device_lock);
bio_list_merge(&conf->return_bi, &s.return_bi);
spin_unlock_irq(&conf->device_lock);
@@ -5150,7 +5173,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
DEFINE_WAIT(w);
bool do_prepare;
- if (unlikely(bi->bi_rw & REQ_FLUSH)) {
+ if (unlikely(bi->bi_opf & REQ_PREFLUSH)) {
int ret = r5l_handle_flush_request(conf->log, bi);
if (ret == 0)
@@ -5176,7 +5199,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
return;
}
- if (unlikely(bi->bi_rw & REQ_DISCARD)) {
+ if (unlikely(bio_op(bi) == REQ_OP_DISCARD)) {
make_discard_request(mddev, bi);
return;
}
@@ -5233,7 +5256,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
(unsigned long long)logical_sector);
sh = raid5_get_active_stripe(conf, new_sector, previous,
- (bi->bi_rw&RWA_MASK), 0);
+ (bi->bi_opf & REQ_RAHEAD), 0);
if (sh) {
if (unlikely(previous)) {
/* expansion might have moved on while waiting for a
@@ -5301,7 +5324,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
set_bit(STRIPE_HANDLE, &sh->state);
clear_bit(STRIPE_DELAYED, &sh->state);
if ((!sh->batch_head || sh == sh->batch_head) &&
- (bi->bi_rw & REQ_SYNC) &&
+ (bi->bi_opf & REQ_SYNC) &&
!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
atomic_inc(&conf->preread_active_stripes);
release_stripe_plug(mddev, sh);
@@ -6616,6 +6639,16 @@ static struct r5conf *setup_conf(struct mddev *mddev)
}
conf->min_nr_stripes = NR_STRIPES;
+ if (mddev->reshape_position != MaxSector) {
+ int stripes = max_t(int,
+ ((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4,
+ ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4);
+ conf->min_nr_stripes = max(NR_STRIPES, stripes);
+ if (conf->min_nr_stripes != NR_STRIPES)
+ printk(KERN_INFO
+ "md/raid:%s: force stripe size %d for reshape\n",
+ mdname(mddev), conf->min_nr_stripes);
+ }
memory = conf->min_nr_stripes * (sizeof(struct stripe_head) +
max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS);
@@ -6822,11 +6855,14 @@ static int raid5_run(struct mddev *mddev)
if (IS_ERR(conf))
return PTR_ERR(conf);
- if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !journal_dev) {
- printk(KERN_ERR "md/raid:%s: journal disk is missing, force array readonly\n",
- mdname(mddev));
- mddev->ro = 1;
- set_disk_ro(mddev->gendisk, 1);
+ if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
+ if (!journal_dev) {
+ pr_err("md/raid:%s: journal disk is missing, force array readonly\n",
+ mdname(mddev));
+ mddev->ro = 1;
+ set_disk_ro(mddev->gendisk, 1);
+ } else if (mddev->recovery_cp == MaxSector)
+ set_bit(MD_JOURNAL_CLEAN, &mddev->flags);
}
conf->min_offset_diff = min_offset_diff;
@@ -7066,10 +7102,12 @@ static void raid5_status(struct seq_file *seq, struct mddev *mddev)
seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
conf->chunk_sectors / 2, mddev->layout);
seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
- for (i = 0; i < conf->raid_disks; i++)
- seq_printf (seq, "%s",
- conf->disks[i].rdev &&
- test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
+ rcu_read_lock();
+ for (i = 0; i < conf->raid_disks; i++) {
+ struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
+ seq_printf (seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
+ }
+ rcu_read_unlock();
seq_printf (seq, "]");
}
@@ -7191,12 +7229,15 @@ static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
goto abort;
}
*rdevp = NULL;
- synchronize_rcu();
- if (atomic_read(&rdev->nr_pending)) {
- /* lost the race, try later */
- err = -EBUSY;
- *rdevp = rdev;
- } else if (p->replacement) {
+ if (!test_bit(RemoveSynchronized, &rdev->flags)) {
+ synchronize_rcu();
+ if (atomic_read(&rdev->nr_pending)) {
+ /* lost the race, try later */
+ err = -EBUSY;
+ *rdevp = rdev;
+ }
+ }
+ if (p->replacement) {
/* We must have just cleared 'rdev' */
p->rdev = p->replacement;
clear_bit(Replacement, &p->replacement->flags);
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index a8518fb3b..962f2a9a6 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -80,6 +80,9 @@ config MEDIA_RC_SUPPORT
Say Y when you have a TV or an IR device.
+config MEDIA_CEC_EDID
+ bool
+
#
# Media controller
# Selectable only for webcam/grabbers, as other drivers don't use it
diff --git a/drivers/media/Makefile b/drivers/media/Makefile
index e608bbce0..081a7866f 100644
--- a/drivers/media/Makefile
+++ b/drivers/media/Makefile
@@ -2,6 +2,10 @@
# Makefile for the kernel multimedia device drivers.
#
+ifeq ($(CONFIG_MEDIA_CEC_EDID),y)
+ obj-$(CONFIG_MEDIA_SUPPORT) += cec-edid.o
+endif
+
media-objs := media-device.o media-devnode.o media-entity.o
#
diff --git a/drivers/media/cec-edid.c b/drivers/media/cec-edid.c
new file mode 100644
index 000000000..5719b991e
--- /dev/null
+++ b/drivers/media/cec-edid.c
@@ -0,0 +1,171 @@
+/*
+ * cec-edid - HDMI Consumer Electronics Control EDID & CEC helper functions
+ *
+ * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <media/cec-edid.h>
+
+/*
+ * This EDID is expected to be a CEA-861 compliant, which means that there are
+ * at least two blocks and one or more of the extensions blocks are CEA-861
+ * blocks.
+ *
+ * The returned location is guaranteed to be < size - 1.
+ */
+static unsigned int cec_get_edid_spa_location(const u8 *edid, unsigned int size)
+{
+ unsigned int blocks = size / 128;
+ unsigned int block;
+ u8 d;
+
+ /* Sanity check: at least 2 blocks and a multiple of the block size */
+ if (blocks < 2 || size % 128)
+ return 0;
+
+ /*
+ * If there are fewer extension blocks than the size, then update
+ * 'blocks'. It is allowed to have more extension blocks than the size,
+ * since some hardware can only read e.g. 256 bytes of the EDID, even
+ * though more blocks are present. The first CEA-861 extension block
+ * should normally be in block 1 anyway.
+ */
+ if (edid[0x7e] + 1 < blocks)
+ blocks = edid[0x7e] + 1;
+
+ for (block = 1; block < blocks; block++) {
+ unsigned int offset = block * 128;
+
+ /* Skip any non-CEA-861 extension blocks */
+ if (edid[offset] != 0x02 || edid[offset + 1] != 0x03)
+ continue;
+
+ /* search Vendor Specific Data Block (tag 3) */
+ d = edid[offset + 2] & 0x7f;
+ /* Check if there are Data Blocks */
+ if (d <= 4)
+ continue;
+ if (d > 4) {
+ unsigned int i = offset + 4;
+ unsigned int end = offset + d;
+
+ /* Note: 'end' is always < 'size' */
+ do {
+ u8 tag = edid[i] >> 5;
+ u8 len = edid[i] & 0x1f;
+
+ if (tag == 3 && len >= 5 && i + len <= end &&
+ edid[i + 1] == 0x03 &&
+ edid[i + 2] == 0x0c &&
+ edid[i + 3] == 0x00)
+ return i + 4;
+ i += len + 1;
+ } while (i < end);
+ }
+ }
+ return 0;
+}
+
+u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size,
+ unsigned int *offset)
+{
+ unsigned int loc = cec_get_edid_spa_location(edid, size);
+
+ if (offset)
+ *offset = loc;
+ if (loc == 0)
+ return CEC_PHYS_ADDR_INVALID;
+ return (edid[loc] << 8) | edid[loc + 1];
+}
+EXPORT_SYMBOL_GPL(cec_get_edid_phys_addr);
+
+void cec_set_edid_phys_addr(u8 *edid, unsigned int size, u16 phys_addr)
+{
+ unsigned int loc = cec_get_edid_spa_location(edid, size);
+ u8 sum = 0;
+ unsigned int i;
+
+ if (loc == 0)
+ return;
+ edid[loc] = phys_addr >> 8;
+ edid[loc + 1] = phys_addr & 0xff;
+ loc &= ~0x7f;
+
+ /* update the checksum */
+ for (i = loc; i < loc + 127; i++)
+ sum += edid[i];
+ edid[i] = 256 - sum;
+}
+EXPORT_SYMBOL_GPL(cec_set_edid_phys_addr);
+
+u16 cec_phys_addr_for_input(u16 phys_addr, u8 input)
+{
+ /* Check if input is sane */
+ if (WARN_ON(input == 0 || input > 0xf))
+ return CEC_PHYS_ADDR_INVALID;
+
+ if (phys_addr == 0)
+ return input << 12;
+
+ if ((phys_addr & 0x0fff) == 0)
+ return phys_addr | (input << 8);
+
+ if ((phys_addr & 0x00ff) == 0)
+ return phys_addr | (input << 4);
+
+ if ((phys_addr & 0x000f) == 0)
+ return phys_addr | input;
+
+ /*
+ * All nibbles are used so no valid physical addresses can be assigned
+ * to the input.
+ */
+ return CEC_PHYS_ADDR_INVALID;
+}
+EXPORT_SYMBOL_GPL(cec_phys_addr_for_input);
+
+int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port)
+{
+ int i;
+
+ if (parent)
+ *parent = phys_addr;
+ if (port)
+ *port = 0;
+ if (phys_addr == CEC_PHYS_ADDR_INVALID)
+ return 0;
+ for (i = 0; i < 16; i += 4)
+ if (phys_addr & (0xf << i))
+ break;
+ if (i == 16)
+ return 0;
+ if (parent)
+ *parent = phys_addr & (0xfff0 << i);
+ if (port)
+ *port = (phys_addr >> i) & 0xf;
+ for (i += 4; i < 16; i += 4)
+ if ((phys_addr & (0xf << i)) == 0)
+ return -EINVAL;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cec_phys_addr_validate);
+
+MODULE_AUTHOR("Hans Verkuil <hans.verkuil@cisco.com>");
+MODULE_DESCRIPTION("CEC EDID helper functions");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
index cf1dadd0b..3ec3cebe6 100644
--- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
+++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
@@ -777,7 +777,7 @@ static void precalculate_color(struct tpg_data *tpg, int k)
* Remember that r, g and b are still in the 0 - 0xff0 range.
*/
if (tpg->real_rgb_range == V4L2_DV_RGB_RANGE_LIMITED &&
- tpg->rgb_range == V4L2_DV_RGB_RANGE_FULL) {
+ tpg->rgb_range == V4L2_DV_RGB_RANGE_FULL && !tpg->is_yuv) {
/*
* Convert from full range (which is what r, g and b are)
* to limited range (which is the 'real' RGB range), which
@@ -787,7 +787,7 @@ static void precalculate_color(struct tpg_data *tpg, int k)
g = (g * 219) / 255 + (16 << 4);
b = (b * 219) / 255 + (16 << 4);
} else if (tpg->real_rgb_range != V4L2_DV_RGB_RANGE_LIMITED &&
- tpg->rgb_range == V4L2_DV_RGB_RANGE_LIMITED) {
+ tpg->rgb_range == V4L2_DV_RGB_RANGE_LIMITED && !tpg->is_yuv) {
/*
* Clamp r, g and b to the limited range and convert to full
* range since that's what we deliver.
diff --git a/drivers/media/dvb-core/demux.h b/drivers/media/dvb-core/demux.h
index 6d3b95b89..4b4c1da20 100644
--- a/drivers/media/dvb-core/demux.h
+++ b/drivers/media/dvb-core/demux.h
@@ -1,6 +1,10 @@
/*
* demux.h
*
+ * The Kernel Digital TV Demux kABI defines a driver-internal interface for
+ * registering low-level, hardware specific driver to a hardware independent
+ * demux layer.
+ *
* Copyright (c) 2002 Convergence GmbH
*
* based on code:
@@ -32,49 +36,6 @@
#include <linux/time.h>
#include <linux/dvb/dmx.h>
-/**
- * DOC: Digital TV Demux
- *
- * The Kernel Digital TV Demux kABI defines a driver-internal interface for
- * registering low-level, hardware specific driver to a hardware independent
- * demux layer. It is only of interest for Digital TV device driver writers.
- * The header file for this kABI is named demux.h and located in
- * drivers/media/dvb-core.
- *
- * The demux kABI should be implemented for each demux in the system. It is
- * used to select the TS source of a demux and to manage the demux resources.
- * When the demux client allocates a resource via the demux kABI, it receives
- * a pointer to the kABI of that resource.
- *
- * Each demux receives its TS input from a DVB front-end or from memory, as
- * set via this demux kABI. In a system with more than one front-end, the kABI
- * can be used to select one of the DVB front-ends as a TS source for a demux,
- * unless this is fixed in the HW platform.
- *
- * The demux kABI only controls front-ends regarding to their connections with
- * demuxes; the kABI used to set the other front-end parameters, such as
- * tuning, are devined via the Digital TV Frontend kABI.
- *
- * The functions that implement the abstract interface demux should be defined
- * static or module private and registered to the Demux core for external
- * access. It is not necessary to implement every function in the struct
- * &dmx_demux. For example, a demux interface might support Section filtering,
- * but not PES filtering. The kABI client is expected to check the value of any
- * function pointer before calling the function: the value of NULL means
- * that the function is not available.
- *
- * Whenever the functions of the demux API modify shared data, the
- * possibilities of lost update and race condition problems should be
- * addressed, e.g. by protecting parts of code with mutexes.
- *
- * Note that functions called from a bottom half context must not sleep.
- * Even a simple memory allocation without using %GFP_ATOMIC can result in a
- * kernel thread being put to sleep if swapping is needed. For example, the
- * Linux Kernel calls the functions of a network device interface from a
- * bottom half context. Thus, if a demux kABI function is called from network
- * device code, the function must not sleep.
- */
-
/*
* Common definitions
*/
@@ -104,7 +65,7 @@
*/
/**
- * enum ts_filter_type - filter type bitmap for dmx_ts_feed.set()
+ * enum ts_filter_type - filter type bitmap for dmx_ts_feed.set\(\)
*
* @TS_PACKET: Send TS packets (188 bytes) to callback (default).
* @TS_PAYLOAD_ONLY: In case TS_PACKET is set, only send the TS payload
@@ -143,7 +104,7 @@ struct dmx_ts_feed {
int type,
enum dmx_ts_pes pes_type,
size_t circular_buffer_size,
- struct timespec timeout);
+ ktime_t timeout);
int (*start_filtering)(struct dmx_ts_feed *feed);
int (*stop_filtering)(struct dmx_ts_feed *feed);
};
@@ -231,30 +192,6 @@ struct dmx_section_feed {
};
/**
- * DOC: Demux Callback
- *
- * This kernel-space API comprises the callback functions that deliver filtered
- * data to the demux client. Unlike the other DVB kABIs, these functions are
- * provided by the client and called from the demux code.
- *
- * The function pointers of this abstract interface are not packed into a
- * structure as in the other demux APIs, because the callback functions are
- * registered and used independent of each other. As an example, it is possible
- * for the API client to provide several callback functions for receiving TS
- * packets and no callbacks for PES packets or sections.
- *
- * The functions that implement the callback API need not be re-entrant: when
- * a demux driver calls one of these functions, the driver is not allowed to
- * call the function again before the original call returns. If a callback is
- * triggered by a hardware interrupt, it is recommended to use the Linux
- * bottom half mechanism or start a tasklet instead of making the callback
- * function call directly from a hardware interrupt.
- *
- * This mechanism is implemented by dmx_ts_cb() and dmx_section_cb()
- * callbacks.
- */
-
-/**
* typedef dmx_ts_cb - DVB demux TS filter callback function prototype
*
* @buffer1: Pointer to the start of the filtered TS packets.
@@ -402,7 +339,7 @@ struct dmx_frontend {
* @DMX_SECTION_FILTERING: set if section filtering is supported;
* @DMX_MEMORY_BASED_FILTERING: set if write() available.
*
- * Those flags are OR'ed in the &dmx_demux.&capabilities field
+ * Those flags are OR'ed in the &dmx_demux.capabilities field
*/
enum dmx_demux_caps {
DMX_TS_FILTERING = 1,
@@ -442,10 +379,10 @@ enum dmx_demux_caps {
* @open is called and decrement it when @close is called.
* The @demux function parameter contains a pointer to the demux API and
* instance data.
- * It returns
- * 0 on success;
- * -EUSERS, if maximum usage count was reached;
- * -EINVAL, on bad parameter.
+ * It returns:
+ * 0 on success;
+ * -EUSERS, if maximum usage count was reached;
+ * -EINVAL, on bad parameter.
*
* @close: This function reserves the demux for use by the caller and, if
* necessary, initializes the demux. When the demux is no longer needed,
@@ -455,10 +392,10 @@ enum dmx_demux_caps {
* @open is called and decrement it when @close is called.
* The @demux function parameter contains a pointer to the demux API and
* instance data.
- * It returns
- * 0 on success;
- * -ENODEV, if demux was not in use (e. g. no users);
- * -EINVAL, on bad parameter.
+ * It returns:
+ * 0 on success;
+ * -ENODEV, if demux was not in use (e. g. no users);
+ * -EINVAL, on bad parameter.
*
* @write: This function provides the demux driver with a memory buffer
* containing TS packets. Instead of receiving TS packets from the DVB
@@ -473,12 +410,12 @@ enum dmx_demux_caps {
* The @buf function parameter contains a pointer to the TS data in
* kernel-space memory.
* The @count function parameter contains the length of the TS data.
- * It returns
- * 0 on success;
- * -ERESTARTSYS, if mutex lock was interrupted;
- * -EINTR, if a signal handling is pending;
- * -ENODEV, if demux was removed;
- * -EINVAL, on bad parameter.
+ * It returns:
+ * 0 on success;
+ * -ERESTARTSYS, if mutex lock was interrupted;
+ * -EINTR, if a signal handling is pending;
+ * -ENODEV, if demux was removed;
+ * -EINVAL, on bad parameter.
*
* @allocate_ts_feed: Allocates a new TS feed, which is used to filter the TS
* packets carrying a certain PID. The TS feed normally corresponds to a
@@ -489,11 +426,11 @@ enum dmx_demux_caps {
* instance data.
* The @callback function parameter contains a pointer to the callback
* function for passing received TS packet.
- * It returns
- * 0 on success;
- * -ERESTARTSYS, if mutex lock was interrupted;
- * -EBUSY, if no more TS feeds is available;
- * -EINVAL, on bad parameter.
+ * It returns:
+ * 0 on success;
+ * -ERESTARTSYS, if mutex lock was interrupted;
+ * -EBUSY, if no more TS feeds is available;
+ * -EINVAL, on bad parameter.
*
* @release_ts_feed: Releases the resources allocated with @allocate_ts_feed.
* Any filtering in progress on the TS feed should be stopped before
@@ -502,9 +439,9 @@ enum dmx_demux_caps {
* instance data.
* The @feed function parameter contains a pointer to the TS feed API and
* instance data.
- * It returns
- * 0 on success;
- * -EINVAL on bad parameter.
+ * It returns:
+ * 0 on success;
+ * -EINVAL on bad parameter.
*
* @allocate_section_feed: Allocates a new section feed, i.e. a demux resource
* for filtering and receiving sections. On platforms with hardware
@@ -520,10 +457,10 @@ enum dmx_demux_caps {
* instance data.
* The @callback function parameter contains a pointer to the callback
* function for passing received TS packet.
- * It returns
- * 0 on success;
- * -EBUSY, if no more TS feeds is available;
- * -EINVAL, on bad parameter.
+ * It returns:
+ * 0 on success;
+ * -EBUSY, if no more TS feeds is available;
+ * -EINVAL, on bad parameter.
*
* @release_section_feed: Releases the resources allocated with
* @allocate_section_feed, including allocated filters. Any filtering in
@@ -533,9 +470,9 @@ enum dmx_demux_caps {
* instance data.
* The @feed function parameter contains a pointer to the TS feed API and
* instance data.
- * It returns
- * 0 on success;
- * -EINVAL, on bad parameter.
+ * It returns:
+ * 0 on success;
+ * -EINVAL, on bad parameter.
*
* @add_frontend: Registers a connectivity between a demux and a front-end,
* i.e., indicates that the demux can be connected via a call to
@@ -549,9 +486,9 @@ enum dmx_demux_caps {
* instance data.
* The @frontend function parameter contains a pointer to the front-end
* instance data.
- * It returns
- * 0 on success;
- * -EINVAL, on bad parameter.
+ * It returns:
+ * 0 on success;
+ * -EINVAL, on bad parameter.
*
* @remove_frontend: Indicates that the given front-end, registered by a call
* to @add_frontend, can no longer be connected as a TS source by this
@@ -565,10 +502,10 @@ enum dmx_demux_caps {
* instance data.
* The @frontend function parameter contains a pointer to the front-end
* instance data.
- * It returns
- * 0 on success;
- * -ENODEV, if the front-end was not found,
- * -EINVAL, on bad parameter.
+ * It returns:
+ * 0 on success;
+ * -ENODEV, if the front-end was not found,
+ * -EINVAL, on bad parameter.
*
* @get_frontends: Provides the APIs of the front-ends that have been
* registered for this demux. Any of the front-ends obtained with this
@@ -592,17 +529,17 @@ enum dmx_demux_caps {
* instance data.
* The @frontend function parameter contains a pointer to the front-end
* instance data.
- * It returns
- * 0 on success;
- * -EINVAL, on bad parameter.
+ * It returns:
+ * 0 on success;
+ * -EINVAL, on bad parameter.
*
* @disconnect_frontend: Disconnects the demux and a front-end previously
* connected by a @connect_frontend call.
* The @demux function parameter contains a pointer to the demux API and
* instance data.
- * It returns
- * 0 on success;
- * -EINVAL on bad parameter.
+ * It returns:
+ * 0 on success;
+ * -EINVAL on bad parameter.
*
* @get_pes_pids: Get the PIDs for DMX_PES_AUDIO0, DMX_PES_VIDEO0,
* DMX_PES_TELETEXT0, DMX_PES_SUBTITLE0 and DMX_PES_PCR0.
@@ -610,9 +547,9 @@ enum dmx_demux_caps {
* instance data.
* The @pids function parameter contains an array with five u16 elements
* where the PIDs will be stored.
- * It returns
- * 0 on success;
- * -EINVAL on bad parameter.
+ * It returns:
+ * 0 on success;
+ * -EINVAL on bad parameter.
*/
struct dmx_demux {
diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
index a168cbe1c..7b67e1dd9 100644
--- a/drivers/media/dvb-core/dmxdev.c
+++ b/drivers/media/dvb-core/dmxdev.c
@@ -556,7 +556,7 @@ static int dvb_dmxdev_start_feed(struct dmxdev *dmxdev,
struct dmxdev_filter *filter,
struct dmxdev_feed *feed)
{
- struct timespec timeout = { 0 };
+ ktime_t timeout = ktime_set(0, 0);
struct dmx_pes_filter_params *para = &filter->params.pes;
dmx_output_t otype;
int ret;
diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c
index f82cd1ff4..b5b5b195e 100644
--- a/drivers/media/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb-core/dvb_ca_en50221.c
@@ -123,6 +123,7 @@ struct dvb_ca_slot {
/* Private CA-interface information */
struct dvb_ca_private {
+ struct kref refcount;
/* pointer back to the public data structure */
struct dvb_ca_en50221 *pub;
@@ -161,6 +162,34 @@ struct dvb_ca_private {
struct mutex ioctl_mutex;
};
+static void dvb_ca_private_free(struct dvb_ca_private *ca)
+{
+ unsigned int i;
+
+ dvb_unregister_device(ca->dvbdev);
+ for (i = 0; i < ca->slot_count; i++)
+ vfree(ca->slot_info[i].rx_buffer.data);
+
+ kfree(ca->slot_info);
+ kfree(ca);
+}
+
+static void dvb_ca_private_release(struct kref *ref)
+{
+ struct dvb_ca_private *ca = container_of(ref, struct dvb_ca_private, refcount);
+ dvb_ca_private_free(ca);
+}
+
+static void dvb_ca_private_get(struct dvb_ca_private *ca)
+{
+ kref_get(&ca->refcount);
+}
+
+static void dvb_ca_private_put(struct dvb_ca_private *ca)
+{
+ kref_put(&ca->refcount, dvb_ca_private_release);
+}
+
static void dvb_ca_en50221_thread_wakeup(struct dvb_ca_private *ca);
static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 * ebuf, int ecount);
static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot, u8 * ebuf, int ecount);
@@ -1558,6 +1587,8 @@ static int dvb_ca_en50221_io_open(struct inode *inode, struct file *file)
dvb_ca_en50221_thread_update_delay(ca);
dvb_ca_en50221_thread_wakeup(ca);
+ dvb_ca_private_get(ca);
+
return 0;
}
@@ -1586,6 +1617,8 @@ static int dvb_ca_en50221_io_release(struct inode *inode, struct file *file)
module_put(ca->pub->owner);
+ dvb_ca_private_put(ca);
+
return err;
}
@@ -1681,6 +1714,7 @@ int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter,
ret = -ENOMEM;
goto exit;
}
+ kref_init(&ca->refcount);
ca->pub = pubca;
ca->flags = flags;
ca->slot_count = slot_count;
@@ -1759,10 +1793,7 @@ void dvb_ca_en50221_release(struct dvb_ca_en50221 *pubca)
for (i = 0; i < ca->slot_count; i++) {
dvb_ca_en50221_slot_shutdown(ca, i);
- vfree(ca->slot_info[i].rx_buffer.data);
}
- kfree(ca->slot_info);
- dvb_unregister_device(ca->dvbdev);
- kfree(ca);
+ dvb_ca_private_put(ca);
pubca->private = NULL;
}
diff --git a/drivers/media/dvb-core/dvb_demux.c b/drivers/media/dvb-core/dvb_demux.c
index 0cc5e9351..a0cf7b0d0 100644
--- a/drivers/media/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb-core/dvb_demux.c
@@ -398,28 +398,23 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
int dvr_done = 0;
if (dvb_demux_speedcheck) {
- struct timespec cur_time, delta_time;
+ ktime_t cur_time;
u64 speed_bytes, speed_timedelta;
demux->speed_pkts_cnt++;
/* show speed every SPEED_PKTS_INTERVAL packets */
if (!(demux->speed_pkts_cnt % SPEED_PKTS_INTERVAL)) {
- cur_time = current_kernel_time();
+ cur_time = ktime_get();
- if (demux->speed_last_time.tv_sec != 0 &&
- demux->speed_last_time.tv_nsec != 0) {
- delta_time = timespec_sub(cur_time,
- demux->speed_last_time);
+ if (ktime_to_ns(demux->speed_last_time) != 0) {
speed_bytes = (u64)demux->speed_pkts_cnt
* 188 * 8;
/* convert to 1024 basis */
speed_bytes = 1000 * div64_u64(speed_bytes,
1024);
- speed_timedelta =
- (u64)timespec_to_ns(&delta_time);
- speed_timedelta = div64_u64(speed_timedelta,
- 1000000); /* nsec -> usec */
+ speed_timedelta = ktime_ms_delta(cur_time,
+ demux->speed_last_time);
printk(KERN_INFO "TS speed %llu Kbits/sec \n",
div64_u64(speed_bytes,
speed_timedelta));
@@ -666,7 +661,7 @@ out:
static int dmx_ts_feed_set(struct dmx_ts_feed *ts_feed, u16 pid, int ts_type,
enum dmx_ts_pes pes_type,
- size_t circular_buffer_size, struct timespec timeout)
+ size_t circular_buffer_size, ktime_t timeout)
{
struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
struct dvb_demux *demux = feed->demux;
diff --git a/drivers/media/dvb-core/dvb_demux.h b/drivers/media/dvb-core/dvb_demux.h
index ae7fc33c3..5ed3cab4a 100644
--- a/drivers/media/dvb-core/dvb_demux.h
+++ b/drivers/media/dvb-core/dvb_demux.h
@@ -83,7 +83,7 @@ struct dvb_demux_feed {
u8 *buffer;
int buffer_size;
- struct timespec timeout;
+ ktime_t timeout;
struct dvb_demux_filter *filter;
int ts_type;
@@ -134,7 +134,7 @@ struct dvb_demux {
uint8_t *cnt_storage; /* for TS continuity check */
- struct timespec speed_last_time; /* for TS speed check */
+ ktime_t speed_last_time; /* for TS speed check */
uint32_t speed_pkts_cnt; /* for TS speed check */
};
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index c0142614c..be99c8dbc 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -99,6 +99,7 @@ MODULE_PARM_DESC(dvb_mfe_wait_time, "Wait up to <mfe_wait_time> seconds on open(
static DEFINE_MUTEX(frontend_mutex);
struct dvb_frontend_private {
+ struct kref refcount;
/* thread/frontend values */
struct dvb_device *dvbdev;
@@ -137,6 +138,23 @@ struct dvb_frontend_private {
#endif
};
+static void dvb_frontend_private_free(struct kref *ref)
+{
+ struct dvb_frontend_private *fepriv =
+ container_of(ref, struct dvb_frontend_private, refcount);
+ kfree(fepriv);
+}
+
+static void dvb_frontend_private_put(struct dvb_frontend_private *fepriv)
+{
+ kref_put(&fepriv->refcount, dvb_frontend_private_free);
+}
+
+static void dvb_frontend_private_get(struct dvb_frontend_private *fepriv)
+{
+ kref_get(&fepriv->refcount);
+}
+
static void dvb_frontend_wakeup(struct dvb_frontend *fe);
static int dtv_get_frontend(struct dvb_frontend *fe,
struct dtv_frontend_properties *c,
@@ -2543,6 +2561,8 @@ static int dvb_frontend_open(struct inode *inode, struct file *file)
fepriv->events.eventr = fepriv->events.eventw = 0;
}
+ dvb_frontend_private_get(fepriv);
+
if (adapter->mfe_shared)
mutex_unlock (&adapter->mfe_lock);
return ret;
@@ -2591,6 +2611,8 @@ static int dvb_frontend_release(struct inode *inode, struct file *file)
fe->ops.ts_bus_ctrl(fe, 0);
}
+ dvb_frontend_private_put(fepriv);
+
return ret;
}
@@ -2679,6 +2701,8 @@ int dvb_register_frontend(struct dvb_adapter* dvb,
}
fepriv = fe->frontend_priv;
+ kref_init(&fepriv->refcount);
+
sema_init(&fepriv->sem, 1);
init_waitqueue_head (&fepriv->wait_queue);
init_waitqueue_head (&fepriv->events.wait_queue);
@@ -2713,18 +2737,11 @@ int dvb_unregister_frontend(struct dvb_frontend* fe)
mutex_lock(&frontend_mutex);
dvb_frontend_stop (fe);
- mutex_unlock(&frontend_mutex);
-
- if (fepriv->dvbdev->users < -1)
- wait_event(fepriv->dvbdev->wait_queue,
- fepriv->dvbdev->users==-1);
-
- mutex_lock(&frontend_mutex);
dvb_unregister_device (fepriv->dvbdev);
/* fe is invalid now */
- kfree(fepriv);
mutex_unlock(&frontend_mutex);
+ dvb_frontend_private_put(fepriv);
return 0;
}
EXPORT_SYMBOL(dvb_unregister_frontend);
diff --git a/drivers/media/dvb-core/dvb_frontend.h b/drivers/media/dvb-core/dvb_frontend.h
index 9592573a0..fb6e84811 100644
--- a/drivers/media/dvb-core/dvb_frontend.h
+++ b/drivers/media/dvb-core/dvb_frontend.h
@@ -1,6 +1,10 @@
/*
* dvb_frontend.h
*
+ * The Digital TV Frontend kABI defines a driver-internal interface for
+ * registering low-level, hardware specific driver to a hardware independent
+ * frontend layer.
+ *
* Copyright (C) 2001 convergence integrated media GmbH
* Copyright (C) 2004 convergence GmbH
*
@@ -42,29 +46,6 @@
#include "dvbdev.h"
-/**
- * DOC: Digital TV Frontend
- *
- * The Digital TV Frontend kABI defines a driver-internal interface for
- * registering low-level, hardware specific driver to a hardware independent
- * frontend layer. It is only of interest for Digital TV device driver writers.
- * The header file for this API is named dvb_frontend.h and located in
- * drivers/media/dvb-core.
- *
- * Before using the Digital TV frontend core, the bridge driver should attach
- * the frontend demod, tuner and SEC devices and call dvb_register_frontend(),
- * in order to register the new frontend at the subsystem. At device
- * detach/removal, the bridge driver should call dvb_unregister_frontend() to
- * remove the frontend from the core and then dvb_frontend_detach() to free the
- * memory allocated by the frontend drivers.
- *
- * The drivers should also call dvb_frontend_suspend() as part of their
- * handler for the &device_driver.suspend(), and dvb_frontend_resume() as
- * part of their handler for &device_driver.resume().
- *
- * A few other optional functions are provided to handle some special cases.
- */
-
/*
* Maximum number of Delivery systems per frontend. It
* should be smaller or equal to 32
@@ -406,7 +387,7 @@ struct dtv_frontend_properties;
* FE_DISHNETWORK_SEND_LEGACY_CMD ioctl (only Satellite).
* Drivers should not use this, except when the DVB
* core emulation fails to provide proper support (e.g.
- * if set_voltage() takes more than 8ms to work), and
+ * if @set_voltage takes more than 8ms to work), and
* when backward compatibility with this legacy API is
* required.
* @i2c_gate_ctrl: controls the I2C gate. Newer drivers should use I2C
@@ -741,13 +722,13 @@ void dvb_frontend_detach(struct dvb_frontend *fe);
* This function prepares a Digital TV frontend to suspend.
*
* In order to prepare the tuner to suspend, if
- * &dvb_frontend_ops.tuner_ops.suspend() is available, it calls it. Otherwise,
- * it will call &dvb_frontend_ops.tuner_ops.sleep(), if available.
+ * &dvb_frontend_ops.tuner_ops.suspend\(\) is available, it calls it. Otherwise,
+ * it will call &dvb_frontend_ops.tuner_ops.sleep\(\), if available.
*
- * It will also call &dvb_frontend_ops.sleep() to put the demod to suspend.
+ * It will also call &dvb_frontend_ops.sleep\(\) to put the demod to suspend.
*
- * The drivers should also call dvb_frontend_suspend() as part of their
- * handler for the &device_driver.suspend().
+ * The drivers should also call dvb_frontend_suspend\(\) as part of their
+ * handler for the &device_driver.suspend\(\).
*/
int dvb_frontend_suspend(struct dvb_frontend *fe);
@@ -758,17 +739,17 @@ int dvb_frontend_suspend(struct dvb_frontend *fe);
*
* This function resumes the usual operation of the tuner after resume.
*
- * In order to resume the frontend, it calls the demod &dvb_frontend_ops.init().
+ * In order to resume the frontend, it calls the demod &dvb_frontend_ops.init\(\).
*
- * If &dvb_frontend_ops.tuner_ops.resume() is available, It, it calls it.
- * Otherwise,t will call &dvb_frontend_ops.tuner_ops.init(), if available.
+ * If &dvb_frontend_ops.tuner_ops.resume\(\) is available, It, it calls it.
+ * Otherwise,t will call &dvb_frontend_ops.tuner_ops.init\(\), if available.
*
* Once tuner and demods are resumed, it will enforce that the SEC voltage and
* tone are restored to their previous values and wake up the frontend's
* kthread in order to retune the frontend.
*
* The drivers should also call dvb_frontend_resume() as part of their
- * handler for the &device_driver.resume().
+ * handler for the &device_driver.resume\(\).
*/
int dvb_frontend_resume(struct dvb_frontend *fe);
@@ -777,7 +758,7 @@ int dvb_frontend_resume(struct dvb_frontend *fe);
*
* @fe: pointer to the frontend struct
*
- * Calls &dvb_frontend_ops.init() and &dvb_frontend_ops.tuner_ops.init(),
+ * Calls &dvb_frontend_ops.init\(\) and &dvb_frontend_ops.tuner_ops.init\(\),
* and resets SEC tone and voltage (for Satellite systems).
*
* NOTE: Currently, this function is used only by one driver (budget-av).
@@ -799,14 +780,14 @@ void dvb_frontend_reinitialise(struct dvb_frontend *fe);
* satellite subsystem.
*
* Its used internally by the DVB frontend core, in order to emulate
- * %FE_DISHNETWORK_SEND_LEGACY_CMD using the &dvb_frontend_ops.set_voltage()
+ * %FE_DISHNETWORK_SEND_LEGACY_CMD using the &dvb_frontend_ops.set_voltage\(\)
* callback.
*
* NOTE: it should not be used at the drivers, as the emulation for the
* legacy callback is provided by the Kernel. The only situation where this
* should be at the drivers is when there are some bugs at the hardware that
* would prevent the core emulation to work. On such cases, the driver would
- * be writing a &dvb_frontend_ops.dishnetwork_send_legacy_command() and
+ * be writing a &dvb_frontend_ops.dishnetwork_send_legacy_command\(\) and
* calling this function directly.
*/
void dvb_frontend_sleep_until(ktime_t *waketime, u32 add_usec);
diff --git a/drivers/media/dvb-core/dvb_math.h b/drivers/media/dvb-core/dvb_math.h
index 34dc1df03..2f0326674 100644
--- a/drivers/media/dvb-core/dvb_math.h
+++ b/drivers/media/dvb-core/dvb_math.h
@@ -30,11 +30,15 @@
* @value: The value (must be != 0)
*
* to use rational values you can use the following method:
+ *
* intlog2(value) = intlog2(value * 2^x) - x * 2^24
*
* Some usecase examples:
+ *
* intlog2(8) will give 3 << 24 = 3 * 2^24
+ *
* intlog2(9) will give 3 << 24 + ... = 3.16... * 2^24
+ *
* intlog2(1.5) = intlog2(3) - 2^24 = 0.584... * 2^24
*
*
@@ -48,10 +52,13 @@ extern unsigned int intlog2(u32 value);
* @value: The value (must be != 0)
*
* to use rational values you can use the following method:
+ *
* intlog10(value) = intlog10(value * 10^x) - x * 2^24
*
* An usecase example:
+ *
* intlog10(1000) will give 3 << 24 = 3 * 2^24
+ *
* due to the implementation intlog10(1000) might be not exactly 3 * 2^24
*
* look at intlog2 for similar examples
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
index ce6a711b4..9914f69a4 100644
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@ -997,7 +997,7 @@ static int dvb_net_feed_start(struct net_device *dev)
netdev_dbg(dev, "start filtering\n");
priv->secfeed->start_filtering(priv->secfeed);
} else if (priv->feedtype == DVB_NET_FEEDTYPE_ULE) {
- struct timespec timeout = { 0, 10000000 }; // 10 msec
+ ktime_t timeout = ns_to_ktime(10 * NSEC_PER_MSEC);
/* we have payloads encapsulated in TS */
netdev_dbg(dev, "alloc tsfeed\n");
diff --git a/drivers/media/dvb-core/dvb_ringbuffer.h b/drivers/media/dvb-core/dvb_ringbuffer.h
index 3ebc2d34b..8af642399 100644
--- a/drivers/media/dvb-core/dvb_ringbuffer.h
+++ b/drivers/media/dvb-core/dvb_ringbuffer.h
@@ -150,9 +150,6 @@ extern ssize_t dvb_ringbuffer_pkt_write(struct dvb_ringbuffer *rbuf, u8* buf,
/**
* dvb_ringbuffer_pkt_read_user - Read from a packet in the ringbuffer.
- * Note: unlike dvb_ringbuffer_read(), this does NOT update the read pointer
- * in the ringbuffer. You must use dvb_ringbuffer_pkt_dispose() to mark a
- * packet as no longer required.
*
* @rbuf: Ringbuffer concerned.
* @idx: Packet index as returned by dvb_ringbuffer_pkt_next().
@@ -161,9 +158,17 @@ extern ssize_t dvb_ringbuffer_pkt_write(struct dvb_ringbuffer *rbuf, u8* buf,
* @len: Size of destination buffer.
*
* returns Number of bytes read, or -EFAULT.
+ *
+ * .. note::
+ *
+ * unlike dvb_ringbuffer_read(), this does **NOT** update the read pointer
+ * in the ringbuffer. You must use dvb_ringbuffer_pkt_dispose() to mark a
+ * packet as no longer required.
*/
-extern ssize_t dvb_ringbuffer_pkt_read_user(struct dvb_ringbuffer *rbuf, size_t idx,
- int offset, u8 __user *buf, size_t len);
+extern ssize_t dvb_ringbuffer_pkt_read_user(struct dvb_ringbuffer *rbuf,
+ size_t idx,
+ int offset, u8 __user *buf,
+ size_t len);
/**
* dvb_ringbuffer_pkt_read - Read from a packet in the ringbuffer.
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index a82f77c49..c645aa81f 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -73,6 +73,14 @@ config DVB_SI2165
Say Y when you want to support this frontend.
+config DVB_MN88472
+ tristate "Panasonic MN88472"
+ depends on DVB_CORE && I2C
+ select REGMAP_I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ Say Y when you want to support this frontend.
+
config DVB_MN88473
tristate "Panasonic MN88473"
depends on DVB_CORE && I2C
@@ -853,6 +861,13 @@ config DVB_ASCOT2E
help
Say Y when you want to support this frontend.
+config DVB_HELENE
+ tristate "Sony HELENE Sat/Ter tuner (CXD2858ER)"
+ depends on DVB_CORE && I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ Say Y when you want to support this frontend.
+
comment "Tools to develop new frontends"
config DVB_DUMMY_FE
diff --git a/drivers/media/dvb-frontends/Makefile b/drivers/media/dvb-frontends/Makefile
index eb7191f42..e90165ad3 100644
--- a/drivers/media/dvb-frontends/Makefile
+++ b/drivers/media/dvb-frontends/Makefile
@@ -95,6 +95,7 @@ obj-$(CONFIG_DVB_STV0900) += stv0900.o
obj-$(CONFIG_DVB_STV090x) += stv090x.o
obj-$(CONFIG_DVB_STV6110x) += stv6110x.o
obj-$(CONFIG_DVB_M88DS3103) += m88ds3103.o
+obj-$(CONFIG_DVB_MN88472) += mn88472.o
obj-$(CONFIG_DVB_MN88473) += mn88473.o
obj-$(CONFIG_DVB_ISL6423) += isl6423.o
obj-$(CONFIG_DVB_EC100) += ec100.o
@@ -123,3 +124,4 @@ obj-$(CONFIG_DVB_AS102_FE) += as102_fe.o
obj-$(CONFIG_DVB_TC90522) += tc90522.o
obj-$(CONFIG_DVB_HORUS3A) += horus3a.o
obj-$(CONFIG_DVB_ASCOT2E) += ascot2e.o
+obj-$(CONFIG_DVB_HELENE) += helene.o
diff --git a/drivers/media/dvb-frontends/af9033.c b/drivers/media/dvb-frontends/af9033.c
index efebe5ce2..9a8157a5f 100644
--- a/drivers/media/dvb-frontends/af9033.c
+++ b/drivers/media/dvb-frontends/af9033.c
@@ -41,7 +41,6 @@ struct af9033_dev {
u64 post_bit_count;
u64 error_block_count;
u64 total_block_count;
- struct delayed_work stat_work;
};
/* write multiple registers */
@@ -468,8 +467,6 @@ static int af9033_init(struct dvb_frontend *fe)
c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
c->post_bit_error.len = 1;
c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
- /* start statistics polling */
- schedule_delayed_work(&dev->stat_work, msecs_to_jiffies(2000));
return 0;
@@ -485,9 +482,6 @@ static int af9033_sleep(struct dvb_frontend *fe)
int ret, i;
u8 tmp;
- /* stop statistics polling */
- cancel_delayed_work_sync(&dev->stat_work);
-
ret = af9033_wr_reg(dev, 0x80004c, 1);
if (ret < 0)
goto err;
@@ -821,36 +815,39 @@ err:
static int af9033_read_status(struct dvb_frontend *fe, enum fe_status *status)
{
struct af9033_dev *dev = fe->demodulator_priv;
- int ret;
- u8 tmp;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int ret, i, tmp = 0;
+ u8 u8tmp, buf[7];
+
+ dev_dbg(&dev->client->dev, "\n");
*status = 0;
/* radio channel status, 0=no result, 1=has signal, 2=no signal */
- ret = af9033_rd_reg(dev, 0x800047, &tmp);
+ ret = af9033_rd_reg(dev, 0x800047, &u8tmp);
if (ret < 0)
goto err;
/* has signal */
- if (tmp == 0x01)
+ if (u8tmp == 0x01)
*status |= FE_HAS_SIGNAL;
- if (tmp != 0x02) {
+ if (u8tmp != 0x02) {
/* TPS lock */
- ret = af9033_rd_reg_mask(dev, 0x80f5a9, &tmp, 0x01);
+ ret = af9033_rd_reg_mask(dev, 0x80f5a9, &u8tmp, 0x01);
if (ret < 0)
goto err;
- if (tmp)
+ if (u8tmp)
*status |= FE_HAS_SIGNAL | FE_HAS_CARRIER |
FE_HAS_VITERBI;
/* full lock */
- ret = af9033_rd_reg_mask(dev, 0x80f999, &tmp, 0x01);
+ ret = af9033_rd_reg_mask(dev, 0x80f999, &u8tmp, 0x01);
if (ret < 0)
goto err;
- if (tmp)
+ if (u8tmp)
*status |= FE_HAS_SIGNAL | FE_HAS_CARRIER |
FE_HAS_VITERBI | FE_HAS_SYNC |
FE_HAS_LOCK;
@@ -858,6 +855,148 @@ static int af9033_read_status(struct dvb_frontend *fe, enum fe_status *status)
dev->fe_status = *status;
+ /* signal strength */
+ if (dev->fe_status & FE_HAS_SIGNAL) {
+ if (dev->is_af9035) {
+ ret = af9033_rd_reg(dev, 0x80004a, &u8tmp);
+ if (ret)
+ goto err;
+ tmp = -u8tmp * 1000;
+ } else {
+ ret = af9033_rd_reg(dev, 0x8000f7, &u8tmp);
+ if (ret)
+ goto err;
+ tmp = (u8tmp - 100) * 1000;
+ }
+
+ c->strength.len = 1;
+ c->strength.stat[0].scale = FE_SCALE_DECIBEL;
+ c->strength.stat[0].svalue = tmp;
+ } else {
+ c->strength.len = 1;
+ c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ }
+
+ /* CNR */
+ if (dev->fe_status & FE_HAS_VITERBI) {
+ u32 snr_val, snr_lut_size;
+ const struct val_snr *snr_lut = NULL;
+
+ /* read value */
+ ret = af9033_rd_regs(dev, 0x80002c, buf, 3);
+ if (ret)
+ goto err;
+
+ snr_val = (buf[2] << 16) | (buf[1] << 8) | (buf[0] << 0);
+
+ /* read superframe number */
+ ret = af9033_rd_reg(dev, 0x80f78b, &u8tmp);
+ if (ret)
+ goto err;
+
+ if (u8tmp)
+ snr_val /= u8tmp;
+
+ /* read current transmission mode */
+ ret = af9033_rd_reg(dev, 0x80f900, &u8tmp);
+ if (ret)
+ goto err;
+
+ switch ((u8tmp >> 0) & 3) {
+ case 0:
+ snr_val *= 4;
+ break;
+ case 1:
+ snr_val *= 1;
+ break;
+ case 2:
+ snr_val *= 2;
+ break;
+ default:
+ snr_val *= 0;
+ break;
+ }
+
+ /* read current modulation */
+ ret = af9033_rd_reg(dev, 0x80f903, &u8tmp);
+ if (ret)
+ goto err;
+
+ switch ((u8tmp >> 0) & 3) {
+ case 0:
+ snr_lut_size = ARRAY_SIZE(qpsk_snr_lut);
+ snr_lut = qpsk_snr_lut;
+ break;
+ case 1:
+ snr_lut_size = ARRAY_SIZE(qam16_snr_lut);
+ snr_lut = qam16_snr_lut;
+ break;
+ case 2:
+ snr_lut_size = ARRAY_SIZE(qam64_snr_lut);
+ snr_lut = qam64_snr_lut;
+ break;
+ default:
+ snr_lut_size = 0;
+ tmp = 0;
+ break;
+ }
+
+ for (i = 0; i < snr_lut_size; i++) {
+ tmp = snr_lut[i].snr * 1000;
+ if (snr_val < snr_lut[i].val)
+ break;
+ }
+
+ c->cnr.len = 1;
+ c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+ c->cnr.stat[0].svalue = tmp;
+ } else {
+ c->cnr.len = 1;
+ c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ }
+
+ /* UCB/PER/BER */
+ if (dev->fe_status & FE_HAS_LOCK) {
+ /* outer FEC, 204 byte packets */
+ u16 abort_packet_count, rsd_packet_count;
+ /* inner FEC, bits */
+ u32 rsd_bit_err_count;
+
+ /*
+ * Packet count used for measurement is 10000
+ * (rsd_packet_count). Maybe it should be increased?
+ */
+
+ ret = af9033_rd_regs(dev, 0x800032, buf, 7);
+ if (ret)
+ goto err;
+
+ abort_packet_count = (buf[1] << 8) | (buf[0] << 0);
+ rsd_bit_err_count = (buf[4] << 16) | (buf[3] << 8) | buf[2];
+ rsd_packet_count = (buf[6] << 8) | (buf[5] << 0);
+
+ dev->error_block_count += abort_packet_count;
+ dev->total_block_count += rsd_packet_count;
+ dev->post_bit_error += rsd_bit_err_count;
+ dev->post_bit_count += rsd_packet_count * 204 * 8;
+
+ c->block_count.len = 1;
+ c->block_count.stat[0].scale = FE_SCALE_COUNTER;
+ c->block_count.stat[0].uvalue = dev->total_block_count;
+
+ c->block_error.len = 1;
+ c->block_error.stat[0].scale = FE_SCALE_COUNTER;
+ c->block_error.stat[0].uvalue = dev->error_block_count;
+
+ c->post_bit_count.len = 1;
+ c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER;
+ c->post_bit_count.stat[0].uvalue = dev->post_bit_count;
+
+ c->post_bit_error.len = 1;
+ c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+ c->post_bit_error.stat[0].uvalue = dev->post_bit_error;
+ }
+
return 0;
err:
@@ -1059,159 +1198,6 @@ err:
return ret;
}
-static void af9033_stat_work(struct work_struct *work)
-{
- struct af9033_dev *dev = container_of(work, struct af9033_dev, stat_work.work);
- struct dtv_frontend_properties *c = &dev->fe.dtv_property_cache;
- int ret, tmp, i, len;
- u8 u8tmp, buf[7];
-
- dev_dbg(&dev->client->dev, "\n");
-
- /* signal strength */
- if (dev->fe_status & FE_HAS_SIGNAL) {
- if (dev->is_af9035) {
- ret = af9033_rd_reg(dev, 0x80004a, &u8tmp);
- tmp = -u8tmp * 1000;
- } else {
- ret = af9033_rd_reg(dev, 0x8000f7, &u8tmp);
- tmp = (u8tmp - 100) * 1000;
- }
- if (ret)
- goto err;
-
- c->strength.len = 1;
- c->strength.stat[0].scale = FE_SCALE_DECIBEL;
- c->strength.stat[0].svalue = tmp;
- } else {
- c->strength.len = 1;
- c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
- }
-
- /* CNR */
- if (dev->fe_status & FE_HAS_VITERBI) {
- u32 snr_val;
- const struct val_snr *snr_lut;
-
- /* read value */
- ret = af9033_rd_regs(dev, 0x80002c, buf, 3);
- if (ret)
- goto err;
-
- snr_val = (buf[2] << 16) | (buf[1] << 8) | (buf[0] << 0);
-
- /* read superframe number */
- ret = af9033_rd_reg(dev, 0x80f78b, &u8tmp);
- if (ret)
- goto err;
-
- if (u8tmp)
- snr_val /= u8tmp;
-
- /* read current transmission mode */
- ret = af9033_rd_reg(dev, 0x80f900, &u8tmp);
- if (ret)
- goto err;
-
- switch ((u8tmp >> 0) & 3) {
- case 0:
- snr_val *= 4;
- break;
- case 1:
- snr_val *= 1;
- break;
- case 2:
- snr_val *= 2;
- break;
- default:
- goto err_schedule_delayed_work;
- }
-
- /* read current modulation */
- ret = af9033_rd_reg(dev, 0x80f903, &u8tmp);
- if (ret)
- goto err;
-
- switch ((u8tmp >> 0) & 3) {
- case 0:
- len = ARRAY_SIZE(qpsk_snr_lut);
- snr_lut = qpsk_snr_lut;
- break;
- case 1:
- len = ARRAY_SIZE(qam16_snr_lut);
- snr_lut = qam16_snr_lut;
- break;
- case 2:
- len = ARRAY_SIZE(qam64_snr_lut);
- snr_lut = qam64_snr_lut;
- break;
- default:
- goto err_schedule_delayed_work;
- }
-
- for (i = 0; i < len; i++) {
- tmp = snr_lut[i].snr * 1000;
- if (snr_val < snr_lut[i].val)
- break;
- }
-
- c->cnr.len = 1;
- c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
- c->cnr.stat[0].svalue = tmp;
- } else {
- c->cnr.len = 1;
- c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
- }
-
- /* UCB/PER/BER */
- if (dev->fe_status & FE_HAS_LOCK) {
- /* outer FEC, 204 byte packets */
- u16 abort_packet_count, rsd_packet_count;
- /* inner FEC, bits */
- u32 rsd_bit_err_count;
-
- /*
- * Packet count used for measurement is 10000
- * (rsd_packet_count). Maybe it should be increased?
- */
-
- ret = af9033_rd_regs(dev, 0x800032, buf, 7);
- if (ret)
- goto err;
-
- abort_packet_count = (buf[1] << 8) | (buf[0] << 0);
- rsd_bit_err_count = (buf[4] << 16) | (buf[3] << 8) | buf[2];
- rsd_packet_count = (buf[6] << 8) | (buf[5] << 0);
-
- dev->error_block_count += abort_packet_count;
- dev->total_block_count += rsd_packet_count;
- dev->post_bit_error += rsd_bit_err_count;
- dev->post_bit_count += rsd_packet_count * 204 * 8;
-
- c->block_count.len = 1;
- c->block_count.stat[0].scale = FE_SCALE_COUNTER;
- c->block_count.stat[0].uvalue = dev->total_block_count;
-
- c->block_error.len = 1;
- c->block_error.stat[0].scale = FE_SCALE_COUNTER;
- c->block_error.stat[0].uvalue = dev->error_block_count;
-
- c->post_bit_count.len = 1;
- c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER;
- c->post_bit_count.stat[0].uvalue = dev->post_bit_count;
-
- c->post_bit_error.len = 1;
- c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
- c->post_bit_error.stat[0].uvalue = dev->post_bit_error;
- }
-
-err_schedule_delayed_work:
- schedule_delayed_work(&dev->stat_work, msecs_to_jiffies(2000));
- return;
-err:
- dev_dbg(&dev->client->dev, "failed=%d\n", ret);
-}
-
static struct dvb_frontend_ops af9033_ops = {
.delsys = { SYS_DVBT },
.info = {
@@ -1272,7 +1258,6 @@ static int af9033_probe(struct i2c_client *client,
/* setup the state */
dev->client = client;
- INIT_DELAYED_WORK(&dev->stat_work, af9033_stat_work);
memcpy(&dev->cfg, cfg, sizeof(struct af9033_config));
if (dev->cfg.clock != 12000000) {
@@ -1372,9 +1357,6 @@ static int af9033_remove(struct i2c_client *client)
dev_dbg(&dev->client->dev, "\n");
- /* stop statistics polling */
- cancel_delayed_work_sync(&dev->stat_work);
-
dev->fe.ops.release = NULL;
dev->fe.demodulator_priv = NULL;
kfree(dev);
@@ -1391,6 +1373,7 @@ MODULE_DEVICE_TABLE(i2c, af9033_id_table);
static struct i2c_driver af9033_driver = {
.driver = {
.name = "af9033",
+ .suppress_bind_attrs = true,
},
.probe = af9033_probe,
.remove = af9033_remove,
diff --git a/drivers/media/dvb-frontends/ascot2e.c b/drivers/media/dvb-frontends/ascot2e.c
index f770f6a2c..8cc8c4597 100644
--- a/drivers/media/dvb-frontends/ascot2e.c
+++ b/drivers/media/dvb-frontends/ascot2e.c
@@ -132,7 +132,7 @@ static int ascot2e_write_regs(struct ascot2e_priv *priv,
}
};
- if (len + 1 >= sizeof(buf)) {
+ if (len + 1 > sizeof(buf)) {
dev_warn(&priv->i2c->dev,"wr reg=%04x: len=%d is too big!\n",
reg, len + 1);
return -E2BIG;
diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c
index 900186ba8..ffe88bc6b 100644
--- a/drivers/media/dvb-frontends/cxd2841er.c
+++ b/drivers/media/dvb-frontends/cxd2841er.c
@@ -1,7 +1,9 @@
/*
* cxd2841er.c
*
- * Sony CXD2441ER digital demodulator driver
+ * Sony digital demodulator driver for
+ * CXD2841ER - DVB-S/S2/T/T2/C/C2
+ * CXD2854ER - DVB-S/S2/T/T2/C/C2, ISDB-T/S
*
* Copyright 2012 Sony Corporation
* Copyright (C) 2014 NetUP Inc.
@@ -34,6 +36,16 @@
#include "cxd2841er_priv.h"
#define MAX_WRITE_REGSIZE 16
+#define LOG2_E_100X 144
+
+/* DVB-C constellation */
+enum sony_dvbc_constellation_t {
+ SONY_DVBC_CONSTELLATION_16QAM,
+ SONY_DVBC_CONSTELLATION_32QAM,
+ SONY_DVBC_CONSTELLATION_64QAM,
+ SONY_DVBC_CONSTELLATION_128QAM,
+ SONY_DVBC_CONSTELLATION_256QAM
+};
enum cxd2841er_state {
STATE_SHUTDOWN = 0,
@@ -51,6 +63,8 @@ struct cxd2841er_priv {
const struct cxd2841er_config *config;
enum cxd2841er_state state;
u8 system;
+ enum cxd2841er_xtal xtal;
+ enum fe_caps caps;
};
static const struct cxd2841er_cnr_data s_cn_data[] = {
@@ -188,6 +202,9 @@ static const struct cxd2841er_cnr_data s2_cn_data[] = {
};
#define MAKE_IFFREQ_CONFIG(iffreq) ((u32)(((iffreq)/41.0)*16777216.0 + 0.5))
+#define MAKE_IFFREQ_CONFIG_XTAL(xtal, iffreq) ((xtal == SONY_XTAL_24000) ? \
+ (u32)(((iffreq)/48.0)*16777216.0 + 0.5) : \
+ (u32)(((iffreq)/41.0)*16777216.0 + 0.5))
static void cxd2841er_i2c_debug(struct cxd2841er_priv *priv,
u8 addr, u8 reg, u8 write,
@@ -217,7 +234,7 @@ static int cxd2841er_write_regs(struct cxd2841er_priv *priv,
};
if (len + 1 >= sizeof(buf)) {
- dev_warn(&priv->i2c->dev,"wr reg=%04x: len=%d is too big!\n",
+ dev_warn(&priv->i2c->dev, "wr reg=%04x: len=%d is too big!\n",
reg, len + 1);
return -E2BIG;
}
@@ -282,6 +299,7 @@ static int cxd2841er_read_regs(struct cxd2841er_priv *priv,
KBUILD_MODNAME, ret, i2c_addr, reg);
return ret;
}
+ cxd2841er_i2c_debug(priv, i2c_addr, reg, 0, val, len);
return 0;
}
@@ -427,6 +445,15 @@ static int cxd2841er_sleep_tc_to_active_t2_band(struct cxd2841er_priv *priv,
static int cxd2841er_sleep_tc_to_active_c_band(struct cxd2841er_priv *priv,
u32 bandwidth);
+static int cxd2841er_sleep_tc_to_active_i(struct cxd2841er_priv *priv,
+ u32 bandwidth);
+
+static int cxd2841er_active_i_to_sleep_tc(struct cxd2841er_priv *priv);
+
+static int cxd2841er_sleep_tc_to_shutdown(struct cxd2841er_priv *priv);
+
+static int cxd2841er_shutdown_to_sleep_tc(struct cxd2841er_priv *priv);
+
static int cxd2841er_retune_active(struct cxd2841er_priv *priv,
struct dtv_frontend_properties *p)
{
@@ -454,7 +481,13 @@ static int cxd2841er_retune_active(struct cxd2841er_priv *priv,
priv, p->bandwidth_hz);
case SYS_DVBC_ANNEX_A:
return cxd2841er_sleep_tc_to_active_c_band(
- priv, 8000000);
+ priv, p->bandwidth_hz);
+ case SYS_ISDBT:
+ cxd2841er_active_i_to_sleep_tc(priv);
+ cxd2841er_sleep_tc_to_shutdown(priv);
+ cxd2841er_shutdown_to_sleep_tc(priv);
+ return cxd2841er_sleep_tc_to_active_i(
+ priv, p->bandwidth_hz);
}
}
dev_dbg(&priv->i2c->dev, "%s(): invalid delivery system %d\n",
@@ -669,6 +702,45 @@ static int cxd2841er_active_c_to_sleep_tc(struct cxd2841er_priv *priv)
return 0;
}
+static int cxd2841er_active_i_to_sleep_tc(struct cxd2841er_priv *priv)
+{
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ if (priv->state != STATE_ACTIVE_TC) {
+ dev_err(&priv->i2c->dev, "%s(): invalid state %d\n",
+ __func__, priv->state);
+ return -EINVAL;
+ }
+ /* Set SLV-T Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+ /* disable TS output */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0xc3, 0x01);
+ /* enable Hi-Z setting 1 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x80, 0x3f);
+ /* enable Hi-Z setting 2 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x81, 0xff);
+
+ /* TODO: Cancel demod parameter */
+
+ /* Set SLV-X Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x00, 0x00);
+ /* disable ADC 1 */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x18, 0x01);
+ /* Set SLV-T Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+ /* Disable ADC 2 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x43, 0x0a);
+ /* Disable ADC 3 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x41, 0x0a);
+ /* Disable ADC clock */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x30, 0x00);
+ /* Disable RF level monitor */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x2f, 0x00);
+ /* Disable demod clock */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x2c, 0x00);
+ priv->state = STATE_SLEEP_TC;
+ return 0;
+}
+
static int cxd2841er_shutdown_to_sleep_s(struct cxd2841er_priv *priv)
{
dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
@@ -686,8 +758,25 @@ static int cxd2841er_shutdown_to_sleep_s(struct cxd2841er_priv *priv)
cxd2841er_write_reg(priv, I2C_SLVX, 0x00, 0x00);
/* Set demod SW reset */
cxd2841er_write_reg(priv, I2C_SLVX, 0x10, 0x01);
- /* Set X'tal clock to 20.5Mhz */
- cxd2841er_write_reg(priv, I2C_SLVX, 0x14, 0x00);
+
+ switch (priv->xtal) {
+ case SONY_XTAL_20500:
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x14, 0x00);
+ break;
+ case SONY_XTAL_24000:
+ /* Select demod frequency */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x12, 0x00);
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x14, 0x03);
+ break;
+ case SONY_XTAL_41000:
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x14, 0x01);
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s(): invalid demod xtal %d\n",
+ __func__, priv->xtal);
+ return -EINVAL;
+ }
+
/* Set demod mode */
cxd2841er_write_reg(priv, I2C_SLVX, 0x17, 0x0a);
/* Clear demod SW reset */
@@ -712,6 +801,8 @@ static int cxd2841er_shutdown_to_sleep_s(struct cxd2841er_priv *priv)
static int cxd2841er_shutdown_to_sleep_tc(struct cxd2841er_priv *priv)
{
+ u8 data = 0;
+
dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
if (priv->state != STATE_SHUTDOWN) {
dev_dbg(&priv->i2c->dev, "%s(): invalid demod state %d\n",
@@ -727,9 +818,24 @@ static int cxd2841er_shutdown_to_sleep_tc(struct cxd2841er_priv *priv)
cxd2841er_write_reg(priv, I2C_SLVX, 0x00, 0x00);
/* Set demod SW reset */
cxd2841er_write_reg(priv, I2C_SLVX, 0x10, 0x01);
- /* Set X'tal clock to 20.5Mhz */
+ /* Select ADC clock mode */
cxd2841er_write_reg(priv, I2C_SLVX, 0x13, 0x00);
- cxd2841er_write_reg(priv, I2C_SLVX, 0x14, 0x00);
+
+ switch (priv->xtal) {
+ case SONY_XTAL_20500:
+ data = 0x0;
+ break;
+ case SONY_XTAL_24000:
+ /* Select demod frequency */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x12, 0x00);
+ data = 0x3;
+ break;
+ case SONY_XTAL_41000:
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x12, 0x00);
+ data = 0x1;
+ break;
+ }
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x14, data);
/* Clear demod SW reset */
cxd2841er_write_reg(priv, I2C_SLVX, 0x10, 0x00);
usleep_range(1000, 2000);
@@ -809,11 +915,14 @@ static void cxd2841er_set_ts_clock_mode(struct cxd2841er_priv *priv,
static u8 cxd2841er_chip_id(struct cxd2841er_priv *priv)
{
- u8 chip_id;
+ u8 chip_id = 0;
dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
- cxd2841er_write_reg(priv, I2C_SLVT, 0, 0);
- cxd2841er_read_reg(priv, I2C_SLVT, 0xfd, &chip_id);
+ if (cxd2841er_write_reg(priv, I2C_SLVT, 0, 0) == 0)
+ cxd2841er_read_reg(priv, I2C_SLVT, 0xfd, &chip_id);
+ else if (cxd2841er_write_reg(priv, I2C_SLVX, 0, 0) == 0)
+ cxd2841er_read_reg(priv, I2C_SLVX, 0xfd, &chip_id);
+
return chip_id;
}
@@ -896,6 +1005,25 @@ static int cxd2841er_read_status_c(struct cxd2841er_priv *priv, u8 *tslock)
return 0;
}
+static int cxd2841er_read_status_i(struct cxd2841er_priv *priv,
+ u8 *sync, u8 *tslock, u8 *unlock)
+{
+ u8 data = 0;
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ if (priv->state != STATE_ACTIVE_TC)
+ return -EINVAL;
+ /* Set SLV-T Bank : 0x60 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x60);
+ cxd2841er_read_reg(priv, I2C_SLVT, 0x10, &data);
+ dev_dbg(&priv->i2c->dev,
+ "%s(): lock=0x%x\n", __func__, data);
+ *sync = ((data & 0x02) ? 1 : 0);
+ *tslock = ((data & 0x01) ? 1 : 0);
+ *unlock = ((data & 0x10) ? 1 : 0);
+ return 0;
+}
+
static int cxd2841er_read_status_tc(struct dvb_frontend *fe,
enum fe_status *status)
{
@@ -921,6 +1049,20 @@ static int cxd2841er_read_status_tc(struct dvb_frontend *fe,
FE_HAS_SYNC;
if (tslock)
*status |= FE_HAS_LOCK;
+ } else if (priv->system == SYS_ISDBT) {
+ ret = cxd2841er_read_status_i(
+ priv, &sync, &tslock, &unlock);
+ if (ret)
+ goto done;
+ if (unlock)
+ goto done;
+ if (sync)
+ *status = FE_HAS_SIGNAL |
+ FE_HAS_CARRIER |
+ FE_HAS_VITERBI |
+ FE_HAS_SYNC;
+ if (tslock)
+ *status |= FE_HAS_LOCK;
} else if (priv->system == SYS_DVBC_ANNEX_A) {
ret = cxd2841er_read_status_c(priv, &tslock);
if (ret)
@@ -997,6 +1139,76 @@ static int cxd2841er_get_carrier_offset_s_s2(struct cxd2841er_priv *priv,
return 0;
}
+static int cxd2841er_get_carrier_offset_i(struct cxd2841er_priv *priv,
+ u32 bandwidth, int *offset)
+{
+ u8 data[4];
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ if (priv->state != STATE_ACTIVE_TC) {
+ dev_dbg(&priv->i2c->dev, "%s(): invalid state %d\n",
+ __func__, priv->state);
+ return -EINVAL;
+ }
+ if (priv->system != SYS_ISDBT) {
+ dev_dbg(&priv->i2c->dev, "%s(): invalid delivery system %d\n",
+ __func__, priv->system);
+ return -EINVAL;
+ }
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x60);
+ cxd2841er_read_regs(priv, I2C_SLVT, 0x4c, data, sizeof(data));
+ *offset = -1 * sign_extend32(
+ ((u32)(data[0] & 0x1F) << 24) | ((u32)data[1] << 16) |
+ ((u32)data[2] << 8) | (u32)data[3], 29);
+
+ switch (bandwidth) {
+ case 6000000:
+ *offset = -1 * ((*offset) * 8/264);
+ break;
+ case 7000000:
+ *offset = -1 * ((*offset) * 8/231);
+ break;
+ case 8000000:
+ *offset = -1 * ((*offset) * 8/198);
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s(): invalid bandwidth %d\n",
+ __func__, bandwidth);
+ return -EINVAL;
+ }
+
+ dev_dbg(&priv->i2c->dev, "%s(): bandwidth %d offset %d\n",
+ __func__, bandwidth, *offset);
+
+ return 0;
+}
+
+static int cxd2841er_get_carrier_offset_t(struct cxd2841er_priv *priv,
+ u32 bandwidth, int *offset)
+{
+ u8 data[4];
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ if (priv->state != STATE_ACTIVE_TC) {
+ dev_dbg(&priv->i2c->dev, "%s(): invalid state %d\n",
+ __func__, priv->state);
+ return -EINVAL;
+ }
+ if (priv->system != SYS_DVBT) {
+ dev_dbg(&priv->i2c->dev, "%s(): invalid delivery system %d\n",
+ __func__, priv->system);
+ return -EINVAL;
+ }
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
+ cxd2841er_read_regs(priv, I2C_SLVT, 0x4c, data, sizeof(data));
+ *offset = -1 * sign_extend32(
+ ((u32)(data[0] & 0x1F) << 24) | ((u32)data[1] << 16) |
+ ((u32)data[2] << 8) | (u32)data[3], 29);
+ *offset *= (bandwidth / 1000000);
+ *offset /= 235;
+ return 0;
+}
+
static int cxd2841er_get_carrier_offset_t2(struct cxd2841er_priv *priv,
u32 bandwidth, int *offset)
{
@@ -1060,6 +1272,24 @@ static int cxd2841er_get_carrier_offset_c(struct cxd2841er_priv *priv,
return 0;
}
+static int cxd2841er_read_packet_errors_c(
+ struct cxd2841er_priv *priv, u32 *penum)
+{
+ u8 data[3];
+
+ *penum = 0;
+ if (priv->state != STATE_ACTIVE_TC) {
+ dev_dbg(&priv->i2c->dev, "%s(): invalid state %d\n",
+ __func__, priv->state);
+ return -EINVAL;
+ }
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x40);
+ cxd2841er_read_regs(priv, I2C_SLVT, 0xea, data, sizeof(data));
+ if (data[2] & 0x01)
+ *penum = ((u32)data[0] << 8) | (u32)data[1];
+ return 0;
+}
+
static int cxd2841er_read_packet_errors_t(
struct cxd2841er_priv *priv, u32 *penum)
{
@@ -1096,11 +1326,85 @@ static int cxd2841er_read_packet_errors_t2(
return 0;
}
-static u32 cxd2841er_mon_read_ber_s(struct cxd2841er_priv *priv)
+static int cxd2841er_read_packet_errors_i(
+ struct cxd2841er_priv *priv, u32 *penum)
+{
+ u8 data[2];
+
+ *penum = 0;
+ if (priv->state != STATE_ACTIVE_TC) {
+ dev_dbg(&priv->i2c->dev, "%s(): invalid state %d\n",
+ __func__, priv->state);
+ return -EINVAL;
+ }
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x60);
+ cxd2841er_read_regs(priv, I2C_SLVT, 0xA1, data, 1);
+
+ if (!(data[0] & 0x01))
+ return 0;
+
+ /* Layer A */
+ cxd2841er_read_regs(priv, I2C_SLVT, 0xA2, data, sizeof(data));
+ *penum = ((u32)data[0] << 8) | (u32)data[1];
+
+ /* Layer B */
+ cxd2841er_read_regs(priv, I2C_SLVT, 0xA4, data, sizeof(data));
+ *penum += ((u32)data[0] << 8) | (u32)data[1];
+
+ /* Layer C */
+ cxd2841er_read_regs(priv, I2C_SLVT, 0xA6, data, sizeof(data));
+ *penum += ((u32)data[0] << 8) | (u32)data[1];
+
+ return 0;
+}
+
+static int cxd2841er_read_ber_c(struct cxd2841er_priv *priv,
+ u32 *bit_error, u32 *bit_count)
+{
+ u8 data[3];
+ u32 bit_err, period_exp;
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ if (priv->state != STATE_ACTIVE_TC) {
+ dev_dbg(&priv->i2c->dev, "%s(): invalid state %d\n",
+ __func__, priv->state);
+ return -EINVAL;
+ }
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x40);
+ cxd2841er_read_regs(priv, I2C_SLVT, 0x62, data, sizeof(data));
+ if (!(data[0] & 0x80)) {
+ dev_dbg(&priv->i2c->dev,
+ "%s(): no valid BER data\n", __func__);
+ return -EINVAL;
+ }
+ bit_err = ((u32)(data[0] & 0x3f) << 16) |
+ ((u32)data[1] << 8) |
+ (u32)data[2];
+ cxd2841er_read_reg(priv, I2C_SLVT, 0x60, data);
+ period_exp = data[0] & 0x1f;
+
+ if ((period_exp <= 11) && (bit_err > (1 << period_exp) * 204 * 8)) {
+ dev_dbg(&priv->i2c->dev,
+ "%s(): period_exp(%u) or bit_err(%u) not in range. no valid BER data\n",
+ __func__, period_exp, bit_err);
+ return -EINVAL;
+ }
+
+ dev_dbg(&priv->i2c->dev,
+ "%s(): period_exp(%u) or bit_err(%u) count=%d\n",
+ __func__, period_exp, bit_err,
+ ((1 << period_exp) * 204 * 8));
+
+ *bit_error = bit_err;
+ *bit_count = ((1 << period_exp) * 204 * 8);
+
+ return 0;
+}
+
+static int cxd2841er_mon_read_ber_s(struct cxd2841er_priv *priv,
+ u32 *bit_error, u32 *bit_count)
{
u8 data[11];
- u32 bit_error, bit_count;
- u32 temp_q, temp_r;
/* Set SLV-T Bank : 0xA0 */
cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0xa0);
@@ -1116,40 +1420,30 @@ static u32 cxd2841er_mon_read_ber_s(struct cxd2841er_priv *priv)
*/
cxd2841er_read_regs(priv, I2C_SLVT, 0x35, data, 11);
if (data[0] & 0x01) {
- bit_error = ((u32)(data[1] & 0x3F) << 16) |
- ((u32)(data[2] & 0xFF) << 8) |
- (u32)(data[3] & 0xFF);
- bit_count = ((u32)(data[8] & 0x3F) << 16) |
- ((u32)(data[9] & 0xFF) << 8) |
- (u32)(data[10] & 0xFF);
- /*
- * BER = bitError / bitCount
- * = (bitError * 10^7) / bitCount
- * = ((bitError * 625 * 125 * 128) / bitCount
- */
- if ((bit_count == 0) || (bit_error > bit_count)) {
+ *bit_error = ((u32)(data[1] & 0x3F) << 16) |
+ ((u32)(data[2] & 0xFF) << 8) |
+ (u32)(data[3] & 0xFF);
+ *bit_count = ((u32)(data[8] & 0x3F) << 16) |
+ ((u32)(data[9] & 0xFF) << 8) |
+ (u32)(data[10] & 0xFF);
+ if ((*bit_count == 0) || (*bit_error > *bit_count)) {
dev_dbg(&priv->i2c->dev,
"%s(): invalid bit_error %d, bit_count %d\n",
- __func__, bit_error, bit_count);
- return 0;
+ __func__, *bit_error, *bit_count);
+ return -EINVAL;
}
- temp_q = div_u64_rem(10000000ULL * bit_error,
- bit_count, &temp_r);
- if (bit_count != 1 && temp_r >= bit_count / 2)
- temp_q++;
- return temp_q;
+ return 0;
}
dev_dbg(&priv->i2c->dev, "%s(): no data available\n", __func__);
- return 0;
+ return -EINVAL;
}
-static u32 cxd2841er_mon_read_ber_s2(struct cxd2841er_priv *priv)
+static int cxd2841er_mon_read_ber_s2(struct cxd2841er_priv *priv,
+ u32 *bit_error, u32 *bit_count)
{
u8 data[5];
- u32 bit_error, period;
- u32 temp_q, temp_r;
- u32 result = 0;
+ u32 period;
/* Set SLV-T Bank : 0xB2 */
cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0xb2);
@@ -1164,10 +1458,10 @@ static u32 cxd2841er_mon_read_ber_s2(struct cxd2841er_priv *priv)
cxd2841er_read_regs(priv, I2C_SLVT, 0x30, data, 5);
if (data[0] & 0x01) {
/* Bit error count */
- bit_error = ((u32)(data[1] & 0x0F) << 24) |
- ((u32)(data[2] & 0xFF) << 16) |
- ((u32)(data[3] & 0xFF) << 8) |
- (u32)(data[4] & 0xFF);
+ *bit_error = ((u32)(data[1] & 0x0F) << 24) |
+ ((u32)(data[2] & 0xFF) << 16) |
+ ((u32)(data[3] & 0xFF) << 8) |
+ (u32)(data[4] & 0xFF);
/* Set SLV-T Bank : 0xA0 */
cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0xa0);
@@ -1177,40 +1471,30 @@ static u32 cxd2841er_mon_read_ber_s2(struct cxd2841er_priv *priv)
if (period == 0) {
dev_dbg(&priv->i2c->dev,
"%s(): period is 0\n", __func__);
- return 0;
+ return -EINVAL;
}
- if (bit_error > (period * 64800)) {
+ if (*bit_error > (period * 64800)) {
dev_dbg(&priv->i2c->dev,
"%s(): invalid bit_err 0x%x period 0x%x\n",
- __func__, bit_error, period);
- return 0;
+ __func__, *bit_error, period);
+ return -EINVAL;
}
- /*
- * BER = bitError / (period * 64800)
- * = (bitError * 10^7) / (period * 64800)
- * = (bitError * 10^5) / (period * 648)
- * = (bitError * 12500) / (period * 81)
- * = (bitError * 10) * 1250 / (period * 81)
- */
- temp_q = div_u64_rem(12500ULL * bit_error,
- period * 81, &temp_r);
- if (temp_r >= period * 40)
- temp_q++;
- result = temp_q;
+ *bit_count = period * 64800;
+
+ return 0;
} else {
dev_dbg(&priv->i2c->dev,
"%s(): no data available\n", __func__);
}
- return result;
+ return -EINVAL;
}
-static int cxd2841er_read_ber_t2(struct cxd2841er_priv *priv, u32 *ber)
+static int cxd2841er_read_ber_t2(struct cxd2841er_priv *priv,
+ u32 *bit_error, u32 *bit_count)
{
u8 data[4];
- u32 div, q, r;
- u32 bit_err, period_exp, n_ldpc;
+ u32 period_exp, n_ldpc;
- *ber = 0;
if (priv->state != STATE_ACTIVE_TC) {
dev_dbg(&priv->i2c->dev,
"%s(): invalid state %d\n", __func__, priv->state);
@@ -1221,40 +1505,44 @@ static int cxd2841er_read_ber_t2(struct cxd2841er_priv *priv, u32 *ber)
if (!(data[0] & 0x10)) {
dev_dbg(&priv->i2c->dev,
"%s(): no valid BER data\n", __func__);
- return 0;
+ return -EINVAL;
}
- bit_err = ((u32)(data[0] & 0x0f) << 24) |
- ((u32)data[1] << 16) |
- ((u32)data[2] << 8) |
- (u32)data[3];
+ *bit_error = ((u32)(data[0] & 0x0f) << 24) |
+ ((u32)data[1] << 16) |
+ ((u32)data[2] << 8) |
+ (u32)data[3];
cxd2841er_read_reg(priv, I2C_SLVT, 0x6f, data);
period_exp = data[0] & 0x0f;
cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x22);
cxd2841er_read_reg(priv, I2C_SLVT, 0x5e, data);
n_ldpc = ((data[0] & 0x03) == 0 ? 16200 : 64800);
- if (bit_err > ((1U << period_exp) * n_ldpc)) {
+ if (*bit_error > ((1U << period_exp) * n_ldpc)) {
dev_dbg(&priv->i2c->dev,
"%s(): invalid BER value\n", __func__);
return -EINVAL;
}
+
+ /*
+ * FIXME: the right thing would be to return bit_error untouched,
+ * but, as we don't know the scale returned by the counters, let's
+ * at least preserver BER = bit_error/bit_count.
+ */
if (period_exp >= 4) {
- div = (1U << (period_exp - 4)) * (n_ldpc / 200);
- q = div_u64_rem(3125ULL * bit_err, div, &r);
+ *bit_count = (1U << (period_exp - 4)) * (n_ldpc / 200);
+ *bit_error *= 3125ULL;
} else {
- div = (1U << period_exp) * (n_ldpc / 200);
- q = div_u64_rem(50000ULL * bit_err, div, &r);
+ *bit_count = (1U << period_exp) * (n_ldpc / 200);
+ *bit_error *= 50000ULL;
}
- *ber = (r >= div / 2) ? q + 1 : q;
return 0;
}
-static int cxd2841er_read_ber_t(struct cxd2841er_priv *priv, u32 *ber)
+static int cxd2841er_read_ber_t(struct cxd2841er_priv *priv,
+ u32 *bit_error, u32 *bit_count)
{
u8 data[2];
- u32 div, q, r;
- u32 bit_err, period;
+ u32 period;
- *ber = 0;
if (priv->state != STATE_ACTIVE_TC) {
dev_dbg(&priv->i2c->dev,
"%s(): invalid state %d\n", __func__, priv->state);
@@ -1268,16 +1556,22 @@ static int cxd2841er_read_ber_t(struct cxd2841er_priv *priv, u32 *ber)
return 0;
}
cxd2841er_read_regs(priv, I2C_SLVT, 0x22, data, sizeof(data));
- bit_err = ((u32)data[0] << 8) | (u32)data[1];
+ *bit_error = ((u32)data[0] << 8) | (u32)data[1];
cxd2841er_read_reg(priv, I2C_SLVT, 0x6f, data);
period = ((data[0] & 0x07) == 0) ? 256 : (4096 << (data[0] & 0x07));
- div = period / 128;
- q = div_u64_rem(78125ULL * bit_err, div, &r);
- *ber = (r >= div / 2) ? q + 1 : q;
+
+ /*
+ * FIXME: the right thing would be to return bit_error untouched,
+ * but, as we don't know the scale returned by the counters, let's
+ * at least preserver BER = bit_error/bit_count.
+ */
+ *bit_count = period / 128;
+ *bit_error *= 78125ULL;
return 0;
}
-static u32 cxd2841er_dvbs_read_snr(struct cxd2841er_priv *priv, u8 delsys)
+static u32 cxd2841er_dvbs_read_snr(struct cxd2841er_priv *priv,
+ u8 delsys, u32 *snr)
{
u8 data[3];
u32 res = 0, value;
@@ -1335,9 +1629,71 @@ static u32 cxd2841er_dvbs_read_snr(struct cxd2841er_priv *priv, u8 delsys)
} else {
dev_dbg(&priv->i2c->dev,
"%s(): no data available\n", __func__);
+ return -EINVAL;
}
done:
- return res;
+ *snr = res;
+ return 0;
+}
+
+static uint32_t sony_log(uint32_t x)
+{
+ return (((10000>>8)*(intlog2(x)>>16) + LOG2_E_100X/2)/LOG2_E_100X);
+}
+
+static int cxd2841er_read_snr_c(struct cxd2841er_priv *priv, u32 *snr)
+{
+ u32 reg;
+ u8 data[2];
+ enum sony_dvbc_constellation_t qam = SONY_DVBC_CONSTELLATION_16QAM;
+
+ *snr = 0;
+ if (priv->state != STATE_ACTIVE_TC) {
+ dev_dbg(&priv->i2c->dev,
+ "%s(): invalid state %d\n",
+ __func__, priv->state);
+ return -EINVAL;
+ }
+
+ /*
+ * Freeze registers: ensure multiple separate register reads
+ * are from the same snapshot
+ */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x01, 0x01);
+
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x40);
+ cxd2841er_read_regs(priv, I2C_SLVT, 0x19, data, 1);
+ qam = (enum sony_dvbc_constellation_t) (data[0] & 0x07);
+ cxd2841er_read_regs(priv, I2C_SLVT, 0x4C, data, 2);
+
+ reg = ((u32)(data[0]&0x1f) << 8) | (u32)data[1];
+ if (reg == 0) {
+ dev_dbg(&priv->i2c->dev,
+ "%s(): reg value out of range\n", __func__);
+ return 0;
+ }
+
+ switch (qam) {
+ case SONY_DVBC_CONSTELLATION_16QAM:
+ case SONY_DVBC_CONSTELLATION_64QAM:
+ case SONY_DVBC_CONSTELLATION_256QAM:
+ /* SNR(dB) = -9.50 * ln(IREG_SNR_ESTIMATE / (24320)) */
+ if (reg < 126)
+ reg = 126;
+ *snr = -95 * (int32_t)sony_log(reg) + 95941;
+ break;
+ case SONY_DVBC_CONSTELLATION_32QAM:
+ case SONY_DVBC_CONSTELLATION_128QAM:
+ /* SNR(dB) = -8.75 * ln(IREG_SNR_ESTIMATE / (20800)) */
+ if (reg < 69)
+ reg = 69;
+ *snr = -88 * (int32_t)sony_log(reg) + 86999;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
}
static int cxd2841er_read_snr_t(struct cxd2841er_priv *priv, u32 *snr)
@@ -1391,6 +1747,52 @@ static int cxd2841er_read_snr_t2(struct cxd2841er_priv *priv, u32 *snr)
return 0;
}
+static int cxd2841er_read_snr_i(struct cxd2841er_priv *priv, u32 *snr)
+{
+ u32 reg;
+ u8 data[2];
+
+ *snr = 0;
+ if (priv->state != STATE_ACTIVE_TC) {
+ dev_dbg(&priv->i2c->dev,
+ "%s(): invalid state %d\n", __func__,
+ priv->state);
+ return -EINVAL;
+ }
+
+ /* Freeze all registers */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x01, 0x01);
+
+
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x60);
+ cxd2841er_read_regs(priv, I2C_SLVT, 0x28, data, sizeof(data));
+ reg = ((u32)data[0] << 8) | (u32)data[1];
+ if (reg == 0) {
+ dev_dbg(&priv->i2c->dev,
+ "%s(): reg value out of range\n", __func__);
+ return 0;
+ }
+ if (reg > 4996)
+ reg = 4996;
+ *snr = 100 * intlog10(reg) - 9031;
+ return 0;
+}
+
+static u16 cxd2841er_read_agc_gain_c(struct cxd2841er_priv *priv,
+ u8 delsys)
+{
+ u8 data[2];
+
+ cxd2841er_write_reg(
+ priv, I2C_SLVT, 0x00, 0x40);
+ cxd2841er_read_regs(priv, I2C_SLVT, 0x49, data, 2);
+ dev_dbg(&priv->i2c->dev,
+ "%s(): AGC value=%u\n",
+ __func__, (((u16)data[0] & 0x0F) << 8) |
+ (u16)(data[1] & 0xFF));
+ return ((((u16)data[0] & 0x0F) << 8) | (u16)(data[1] & 0xFF)) << 4;
+}
+
static u16 cxd2841er_read_agc_gain_t_t2(struct cxd2841er_priv *priv,
u8 delsys)
{
@@ -1399,6 +1801,26 @@ static u16 cxd2841er_read_agc_gain_t_t2(struct cxd2841er_priv *priv,
cxd2841er_write_reg(
priv, I2C_SLVT, 0x00, (delsys == SYS_DVBT ? 0x10 : 0x20));
cxd2841er_read_regs(priv, I2C_SLVT, 0x26, data, 2);
+ dev_dbg(&priv->i2c->dev,
+ "%s(): AGC value=%u\n",
+ __func__, (((u16)data[0] & 0x0F) << 8) |
+ (u16)(data[1] & 0xFF));
+ return ((((u16)data[0] & 0x0F) << 8) | (u16)(data[1] & 0xFF)) << 4;
+}
+
+static u16 cxd2841er_read_agc_gain_i(struct cxd2841er_priv *priv,
+ u8 delsys)
+{
+ u8 data[2];
+
+ cxd2841er_write_reg(
+ priv, I2C_SLVT, 0x00, 0x60);
+ cxd2841er_read_regs(priv, I2C_SLVT, 0x26, data, 2);
+
+ dev_dbg(&priv->i2c->dev,
+ "%s(): AGC value=%u\n",
+ __func__, (((u16)data[0] & 0x0F) << 8) |
+ (u16)(data[1] & 0xFF));
return ((((u16)data[0] & 0x0F) << 8) | (u16)(data[1] & 0xFF)) << 4;
}
@@ -1417,101 +1839,170 @@ static u16 cxd2841er_read_agc_gain_s(struct cxd2841er_priv *priv)
return ((((u16)data[0] & 0x1F) << 8) | (u16)(data[1] & 0xFF)) << 3;
}
-static int cxd2841er_read_ber(struct dvb_frontend *fe, u32 *ber)
+static void cxd2841er_read_ber(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct cxd2841er_priv *priv = fe->demodulator_priv;
+ u32 ret, bit_error = 0, bit_count = 0;
dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
- *ber = 0;
switch (p->delivery_system) {
+ case SYS_DVBC_ANNEX_A:
+ case SYS_DVBC_ANNEX_B:
+ case SYS_DVBC_ANNEX_C:
+ ret = cxd2841er_read_ber_c(priv, &bit_error, &bit_count);
+ break;
case SYS_DVBS:
- *ber = cxd2841er_mon_read_ber_s(priv);
+ ret = cxd2841er_mon_read_ber_s(priv, &bit_error, &bit_count);
break;
case SYS_DVBS2:
- *ber = cxd2841er_mon_read_ber_s2(priv);
+ ret = cxd2841er_mon_read_ber_s2(priv, &bit_error, &bit_count);
break;
case SYS_DVBT:
- return cxd2841er_read_ber_t(priv, ber);
+ ret = cxd2841er_read_ber_t(priv, &bit_error, &bit_count);
+ break;
case SYS_DVBT2:
- return cxd2841er_read_ber_t2(priv, ber);
- default:
- *ber = 0;
+ ret = cxd2841er_read_ber_t2(priv, &bit_error, &bit_count);
break;
+ default:
+ p->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ return;
+ }
+
+ if (!ret) {
+ p->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+ p->post_bit_error.stat[0].uvalue += bit_error;
+ p->post_bit_count.stat[0].scale = FE_SCALE_COUNTER;
+ p->post_bit_count.stat[0].uvalue += bit_count;
+ } else {
+ p->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
}
- return 0;
}
-static int cxd2841er_read_signal_strength(struct dvb_frontend *fe,
- u16 *strength)
+static void cxd2841er_read_signal_strength(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct cxd2841er_priv *priv = fe->demodulator_priv;
+ s32 strength;
dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
switch (p->delivery_system) {
case SYS_DVBT:
case SYS_DVBT2:
- *strength = 65535 - cxd2841er_read_agc_gain_t_t2(
- priv, p->delivery_system);
+ strength = cxd2841er_read_agc_gain_t_t2(priv,
+ p->delivery_system);
+ p->strength.stat[0].scale = FE_SCALE_DECIBEL;
+ /* Formula was empirically determinated @ 410 MHz */
+ p->strength.stat[0].uvalue = strength * 366 / 100 - 89520;
+ break; /* Code moved out of the function */
+ case SYS_DVBC_ANNEX_A:
+ case SYS_DVBC_ANNEX_B:
+ case SYS_DVBC_ANNEX_C:
+ strength = cxd2841er_read_agc_gain_c(priv,
+ p->delivery_system);
+ p->strength.stat[0].scale = FE_SCALE_DECIBEL;
+ /*
+ * Formula was empirically determinated via linear regression,
+ * using frequencies: 175 MHz, 410 MHz and 800 MHz, and a
+ * stream modulated with QAM64
+ */
+ p->strength.stat[0].uvalue = strength * 4045 / 1000 - 85224;
+ break;
+ case SYS_ISDBT:
+ strength = cxd2841er_read_agc_gain_i(priv, p->delivery_system);
+ p->strength.stat[0].scale = FE_SCALE_DECIBEL;
+ /*
+ * Formula was empirically determinated via linear regression,
+ * using frequencies: 175 MHz, 410 MHz and 800 MHz.
+ */
+ p->strength.stat[0].uvalue = strength * 3775 / 1000 - 90185;
break;
case SYS_DVBS:
case SYS_DVBS2:
- *strength = 65535 - cxd2841er_read_agc_gain_s(priv);
+ strength = 65535 - cxd2841er_read_agc_gain_s(priv);
+ p->strength.stat[0].scale = FE_SCALE_RELATIVE;
+ p->strength.stat[0].uvalue = strength;
break;
default:
- *strength = 0;
+ p->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
break;
}
- return 0;
}
-static int cxd2841er_read_snr(struct dvb_frontend *fe, u16 *snr)
+static void cxd2841er_read_snr(struct dvb_frontend *fe)
{
u32 tmp = 0;
+ int ret = 0;
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct cxd2841er_priv *priv = fe->demodulator_priv;
dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
switch (p->delivery_system) {
+ case SYS_DVBC_ANNEX_A:
+ case SYS_DVBC_ANNEX_B:
+ case SYS_DVBC_ANNEX_C:
+ ret = cxd2841er_read_snr_c(priv, &tmp);
+ break;
case SYS_DVBT:
- cxd2841er_read_snr_t(priv, &tmp);
+ ret = cxd2841er_read_snr_t(priv, &tmp);
break;
case SYS_DVBT2:
- cxd2841er_read_snr_t2(priv, &tmp);
+ ret = cxd2841er_read_snr_t2(priv, &tmp);
+ break;
+ case SYS_ISDBT:
+ ret = cxd2841er_read_snr_i(priv, &tmp);
break;
case SYS_DVBS:
case SYS_DVBS2:
- tmp = cxd2841er_dvbs_read_snr(priv, p->delivery_system);
+ ret = cxd2841er_dvbs_read_snr(priv, p->delivery_system, &tmp);
break;
default:
dev_dbg(&priv->i2c->dev, "%s(): unknown delivery system %d\n",
__func__, p->delivery_system);
- break;
+ p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ return;
+ }
+
+ if (!ret) {
+ p->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+ p->cnr.stat[0].svalue = tmp;
+ } else {
+ p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
}
- *snr = tmp & 0xffff;
- return 0;
}
-static int cxd2841er_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
+static void cxd2841er_read_ucblocks(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct cxd2841er_priv *priv = fe->demodulator_priv;
+ u32 ucblocks;
dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
switch (p->delivery_system) {
+ case SYS_DVBC_ANNEX_A:
+ case SYS_DVBC_ANNEX_B:
+ case SYS_DVBC_ANNEX_C:
+ cxd2841er_read_packet_errors_c(priv, &ucblocks);
+ break;
case SYS_DVBT:
- cxd2841er_read_packet_errors_t(priv, ucblocks);
+ cxd2841er_read_packet_errors_t(priv, &ucblocks);
break;
case SYS_DVBT2:
- cxd2841er_read_packet_errors_t2(priv, ucblocks);
+ cxd2841er_read_packet_errors_t2(priv, &ucblocks);
break;
- default:
- *ucblocks = 0;
+ case SYS_ISDBT:
+ cxd2841er_read_packet_errors_i(priv, &ucblocks);
break;
+ default:
+ p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ return;
}
dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
- return 0;
+
+ p->block_error.stat[0].scale = FE_SCALE_COUNTER;
+ p->block_error.stat[0].uvalue = ucblocks;
}
static int cxd2841er_dvbt2_set_profile(
@@ -1524,15 +2015,18 @@ static int cxd2841er_dvbt2_set_profile(
switch (profile) {
case DVBT2_PROFILE_BASE:
tune_mode = 0x01;
- seq_not2d_time = 12;
+ /* Set early unlock time */
+ seq_not2d_time = (priv->xtal == SONY_XTAL_24000)?0x0E:0x0C;
break;
case DVBT2_PROFILE_LITE:
tune_mode = 0x05;
- seq_not2d_time = 40;
+ /* Set early unlock time */
+ seq_not2d_time = (priv->xtal == SONY_XTAL_24000)?0x2E:0x28;
break;
case DVBT2_PROFILE_ANY:
tune_mode = 0x00;
- seq_not2d_time = 40;
+ /* Set early unlock time */
+ seq_not2d_time = (priv->xtal == SONY_XTAL_24000)?0x2E:0x28;
break;
default:
return -EINVAL;
@@ -1574,254 +2068,617 @@ static int cxd2841er_sleep_tc_to_active_t2_band(struct cxd2841er_priv *priv,
u32 bandwidth)
{
u32 iffreq;
- u8 b20_9f[5];
- u8 b10_a6[14];
- u8 b10_b6[3];
- u8 b10_d7;
+ u8 data[MAX_WRITE_REGSIZE];
+
+ const uint8_t nominalRate8bw[3][5] = {
+ /* TRCG Nominal Rate [37:0] */
+ {0x11, 0xF0, 0x00, 0x00, 0x00}, /* 20.5MHz XTal */
+ {0x15, 0x00, 0x00, 0x00, 0x00}, /* 24MHz XTal */
+ {0x11, 0xF0, 0x00, 0x00, 0x00} /* 41MHz XTal */
+ };
+
+ const uint8_t nominalRate7bw[3][5] = {
+ /* TRCG Nominal Rate [37:0] */
+ {0x14, 0x80, 0x00, 0x00, 0x00}, /* 20.5MHz XTal */
+ {0x18, 0x00, 0x00, 0x00, 0x00}, /* 24MHz XTal */
+ {0x14, 0x80, 0x00, 0x00, 0x00} /* 41MHz XTal */
+ };
+
+ const uint8_t nominalRate6bw[3][5] = {
+ /* TRCG Nominal Rate [37:0] */
+ {0x17, 0xEA, 0xAA, 0xAA, 0xAA}, /* 20.5MHz XTal */
+ {0x1C, 0x00, 0x00, 0x00, 0x00}, /* 24MHz XTal */
+ {0x17, 0xEA, 0xAA, 0xAA, 0xAA} /* 41MHz XTal */
+ };
+
+ const uint8_t nominalRate5bw[3][5] = {
+ /* TRCG Nominal Rate [37:0] */
+ {0x1C, 0xB3, 0x33, 0x33, 0x33}, /* 20.5MHz XTal */
+ {0x21, 0x99, 0x99, 0x99, 0x99}, /* 24MHz XTal */
+ {0x1C, 0xB3, 0x33, 0x33, 0x33} /* 41MHz XTal */
+ };
+
+ const uint8_t nominalRate17bw[3][5] = {
+ /* TRCG Nominal Rate [37:0] */
+ {0x58, 0xE2, 0xAF, 0xE0, 0xBC}, /* 20.5MHz XTal */
+ {0x68, 0x0F, 0xA2, 0x32, 0xD0}, /* 24MHz XTal */
+ {0x58, 0xE2, 0xAF, 0xE0, 0xBC} /* 41MHz XTal */
+ };
+
+ const uint8_t itbCoef8bw[3][14] = {
+ {0x26, 0xAF, 0x06, 0xCD, 0x13, 0xBB, 0x28, 0xBA,
+ 0x23, 0xA9, 0x1F, 0xA8, 0x2C, 0xC8}, /* 20.5MHz XTal */
+ {0x2F, 0xBA, 0x28, 0x9B, 0x28, 0x9D, 0x28, 0xA1,
+ 0x29, 0xA5, 0x2A, 0xAC, 0x29, 0xB5}, /* 24MHz XTal */
+ {0x26, 0xAF, 0x06, 0xCD, 0x13, 0xBB, 0x28, 0xBA,
+ 0x23, 0xA9, 0x1F, 0xA8, 0x2C, 0xC8} /* 41MHz XTal */
+ };
+
+ const uint8_t itbCoef7bw[3][14] = {
+ {0x2C, 0xBD, 0x02, 0xCF, 0x04, 0xF8, 0x23, 0xA6,
+ 0x29, 0xB0, 0x26, 0xA9, 0x21, 0xA5}, /* 20.5MHz XTal */
+ {0x30, 0xB1, 0x29, 0x9A, 0x28, 0x9C, 0x28, 0xA0,
+ 0x29, 0xA2, 0x2B, 0xA6, 0x2B, 0xAD}, /* 24MHz XTal */
+ {0x2C, 0xBD, 0x02, 0xCF, 0x04, 0xF8, 0x23, 0xA6,
+ 0x29, 0xB0, 0x26, 0xA9, 0x21, 0xA5} /* 41MHz XTal */
+ };
+
+ const uint8_t itbCoef6bw[3][14] = {
+ {0x27, 0xA7, 0x28, 0xB3, 0x02, 0xF0, 0x01, 0xE8,
+ 0x00, 0xCF, 0x00, 0xE6, 0x23, 0xA4}, /* 20.5MHz XTal */
+ {0x31, 0xA8, 0x29, 0x9B, 0x27, 0x9C, 0x28, 0x9E,
+ 0x29, 0xA4, 0x29, 0xA2, 0x29, 0xA8}, /* 24MHz XTal */
+ {0x27, 0xA7, 0x28, 0xB3, 0x02, 0xF0, 0x01, 0xE8,
+ 0x00, 0xCF, 0x00, 0xE6, 0x23, 0xA4} /* 41MHz XTal */
+ };
+
+ const uint8_t itbCoef5bw[3][14] = {
+ {0x27, 0xA7, 0x28, 0xB3, 0x02, 0xF0, 0x01, 0xE8,
+ 0x00, 0xCF, 0x00, 0xE6, 0x23, 0xA4}, /* 20.5MHz XTal */
+ {0x31, 0xA8, 0x29, 0x9B, 0x27, 0x9C, 0x28, 0x9E,
+ 0x29, 0xA4, 0x29, 0xA2, 0x29, 0xA8}, /* 24MHz XTal */
+ {0x27, 0xA7, 0x28, 0xB3, 0x02, 0xF0, 0x01, 0xE8,
+ 0x00, 0xCF, 0x00, 0xE6, 0x23, 0xA4} /* 41MHz XTal */
+ };
+
+ const uint8_t itbCoef17bw[3][14] = {
+ {0x25, 0xA0, 0x36, 0x8D, 0x2E, 0x94, 0x28, 0x9B,
+ 0x32, 0x90, 0x2C, 0x9D, 0x29, 0x99}, /* 20.5MHz XTal */
+ {0x33, 0x8E, 0x2B, 0x97, 0x2D, 0x95, 0x37, 0x8B,
+ 0x30, 0x97, 0x2D, 0x9A, 0x21, 0xA4}, /* 24MHz XTal */
+ {0x25, 0xA0, 0x36, 0x8D, 0x2E, 0x94, 0x28, 0x9B,
+ 0x32, 0x90, 0x2C, 0x9D, 0x29, 0x99} /* 41MHz XTal */
+ };
+
+ /* Set SLV-T Bank : 0x20 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x20);
- dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
switch (bandwidth) {
case 8000000:
- /* bank 0x20, reg 0x9f */
- b20_9f[0] = 0x11;
- b20_9f[1] = 0xf0;
- b20_9f[2] = 0x00;
- b20_9f[3] = 0x00;
- b20_9f[4] = 0x00;
- /* bank 0x10, reg 0xa6 */
- b10_a6[0] = 0x26;
- b10_a6[1] = 0xaf;
- b10_a6[2] = 0x06;
- b10_a6[3] = 0xcd;
- b10_a6[4] = 0x13;
- b10_a6[5] = 0xbb;
- b10_a6[6] = 0x28;
- b10_a6[7] = 0xba;
- b10_a6[8] = 0x23;
- b10_a6[9] = 0xa9;
- b10_a6[10] = 0x1f;
- b10_a6[11] = 0xa8;
- b10_a6[12] = 0x2c;
- b10_a6[13] = 0xc8;
- iffreq = MAKE_IFFREQ_CONFIG(4.80);
- b10_d7 = 0x00;
+ /* <Timing Recovery setting> */
+ cxd2841er_write_regs(priv, I2C_SLVT,
+ 0x9F, nominalRate8bw[priv->xtal], 5);
+
+ /* Set SLV-T Bank : 0x27 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x27);
+ cxd2841er_set_reg_bits(priv, I2C_SLVT,
+ 0x7a, 0x00, 0x0f);
+
+ /* Set SLV-T Bank : 0x10 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
+
+ /* Group delay equaliser settings for
+ * ASCOT2D, ASCOT2E and ASCOT3 tuners
+ */
+ cxd2841er_write_regs(priv, I2C_SLVT,
+ 0xA6, itbCoef8bw[priv->xtal], 14);
+ /* <IF freq setting> */
+ iffreq = MAKE_IFFREQ_CONFIG_XTAL(priv->xtal, 4.80);
+ data[0] = (u8) ((iffreq >> 16) & 0xff);
+ data[1] = (u8)((iffreq >> 8) & 0xff);
+ data[2] = (u8)(iffreq & 0xff);
+ cxd2841er_write_regs(priv, I2C_SLVT, 0xB6, data, 3);
+ /* System bandwidth setting */
+ cxd2841er_set_reg_bits(
+ priv, I2C_SLVT, 0xD7, 0x00, 0x07);
break;
case 7000000:
- /* bank 0x20, reg 0x9f */
- b20_9f[0] = 0x14;
- b20_9f[1] = 0x80;
- b20_9f[2] = 0x00;
- b20_9f[3] = 0x00;
- b20_9f[4] = 0x00;
- /* bank 0x10, reg 0xa6 */
- b10_a6[0] = 0x2C;
- b10_a6[1] = 0xBD;
- b10_a6[2] = 0x02;
- b10_a6[3] = 0xCF;
- b10_a6[4] = 0x04;
- b10_a6[5] = 0xF8;
- b10_a6[6] = 0x23;
- b10_a6[7] = 0xA6;
- b10_a6[8] = 0x29;
- b10_a6[9] = 0xB0;
- b10_a6[10] = 0x26;
- b10_a6[11] = 0xA9;
- b10_a6[12] = 0x21;
- b10_a6[13] = 0xA5;
- iffreq = MAKE_IFFREQ_CONFIG(4.2);
- b10_d7 = 0x02;
+ /* <Timing Recovery setting> */
+ cxd2841er_write_regs(priv, I2C_SLVT,
+ 0x9F, nominalRate7bw[priv->xtal], 5);
+
+ /* Set SLV-T Bank : 0x27 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x27);
+ cxd2841er_set_reg_bits(priv, I2C_SLVT,
+ 0x7a, 0x00, 0x0f);
+
+ /* Set SLV-T Bank : 0x10 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
+
+ /* Group delay equaliser settings for
+ * ASCOT2D, ASCOT2E and ASCOT3 tuners
+ */
+ cxd2841er_write_regs(priv, I2C_SLVT,
+ 0xA6, itbCoef7bw[priv->xtal], 14);
+ /* <IF freq setting> */
+ iffreq = MAKE_IFFREQ_CONFIG_XTAL(priv->xtal, 4.20);
+ data[0] = (u8) ((iffreq >> 16) & 0xff);
+ data[1] = (u8)((iffreq >> 8) & 0xff);
+ data[2] = (u8)(iffreq & 0xff);
+ cxd2841er_write_regs(priv, I2C_SLVT, 0xB6, data, 3);
+ /* System bandwidth setting */
+ cxd2841er_set_reg_bits(
+ priv, I2C_SLVT, 0xD7, 0x02, 0x07);
break;
case 6000000:
- /* bank 0x20, reg 0x9f */
- b20_9f[0] = 0x17;
- b20_9f[1] = 0xEA;
- b20_9f[2] = 0xAA;
- b20_9f[3] = 0xAA;
- b20_9f[4] = 0xAA;
- /* bank 0x10, reg 0xa6 */
- b10_a6[0] = 0x27;
- b10_a6[1] = 0xA7;
- b10_a6[2] = 0x28;
- b10_a6[3] = 0xB3;
- b10_a6[4] = 0x02;
- b10_a6[5] = 0xF0;
- b10_a6[6] = 0x01;
- b10_a6[7] = 0xE8;
- b10_a6[8] = 0x00;
- b10_a6[9] = 0xCF;
- b10_a6[10] = 0x00;
- b10_a6[11] = 0xE6;
- b10_a6[12] = 0x23;
- b10_a6[13] = 0xA4;
- iffreq = MAKE_IFFREQ_CONFIG(3.6);
- b10_d7 = 0x04;
+ /* <Timing Recovery setting> */
+ cxd2841er_write_regs(priv, I2C_SLVT,
+ 0x9F, nominalRate6bw[priv->xtal], 5);
+
+ /* Set SLV-T Bank : 0x27 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x27);
+ cxd2841er_set_reg_bits(priv, I2C_SLVT,
+ 0x7a, 0x00, 0x0f);
+
+ /* Set SLV-T Bank : 0x10 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
+
+ /* Group delay equaliser settings for
+ * ASCOT2D, ASCOT2E and ASCOT3 tuners
+ */
+ cxd2841er_write_regs(priv, I2C_SLVT,
+ 0xA6, itbCoef6bw[priv->xtal], 14);
+ /* <IF freq setting> */
+ iffreq = MAKE_IFFREQ_CONFIG_XTAL(priv->xtal, 3.60);
+ data[0] = (u8) ((iffreq >> 16) & 0xff);
+ data[1] = (u8)((iffreq >> 8) & 0xff);
+ data[2] = (u8)(iffreq & 0xff);
+ cxd2841er_write_regs(priv, I2C_SLVT, 0xB6, data, 3);
+ /* System bandwidth setting */
+ cxd2841er_set_reg_bits(
+ priv, I2C_SLVT, 0xD7, 0x04, 0x07);
break;
case 5000000:
- /* bank 0x20, reg 0x9f */
- b20_9f[0] = 0x1C;
- b20_9f[1] = 0xB3;
- b20_9f[2] = 0x33;
- b20_9f[3] = 0x33;
- b20_9f[4] = 0x33;
- /* bank 0x10, reg 0xa6 */
- b10_a6[0] = 0x27;
- b10_a6[1] = 0xA7;
- b10_a6[2] = 0x28;
- b10_a6[3] = 0xB3;
- b10_a6[4] = 0x02;
- b10_a6[5] = 0xF0;
- b10_a6[6] = 0x01;
- b10_a6[7] = 0xE8;
- b10_a6[8] = 0x00;
- b10_a6[9] = 0xCF;
- b10_a6[10] = 0x00;
- b10_a6[11] = 0xE6;
- b10_a6[12] = 0x23;
- b10_a6[13] = 0xA4;
- iffreq = MAKE_IFFREQ_CONFIG(3.6);
- b10_d7 = 0x06;
+ /* <Timing Recovery setting> */
+ cxd2841er_write_regs(priv, I2C_SLVT,
+ 0x9F, nominalRate5bw[priv->xtal], 5);
+
+ /* Set SLV-T Bank : 0x27 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x27);
+ cxd2841er_set_reg_bits(priv, I2C_SLVT,
+ 0x7a, 0x00, 0x0f);
+
+ /* Set SLV-T Bank : 0x10 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
+
+ /* Group delay equaliser settings for
+ * ASCOT2D, ASCOT2E and ASCOT3 tuners
+ */
+ cxd2841er_write_regs(priv, I2C_SLVT,
+ 0xA6, itbCoef5bw[priv->xtal], 14);
+ /* <IF freq setting> */
+ iffreq = MAKE_IFFREQ_CONFIG_XTAL(priv->xtal, 3.60);
+ data[0] = (u8) ((iffreq >> 16) & 0xff);
+ data[1] = (u8)((iffreq >> 8) & 0xff);
+ data[2] = (u8)(iffreq & 0xff);
+ cxd2841er_write_regs(priv, I2C_SLVT, 0xB6, data, 3);
+ /* System bandwidth setting */
+ cxd2841er_set_reg_bits(
+ priv, I2C_SLVT, 0xD7, 0x06, 0x07);
break;
case 1712000:
- /* bank 0x20, reg 0x9f */
- b20_9f[0] = 0x58;
- b20_9f[1] = 0xE2;
- b20_9f[2] = 0xAF;
- b20_9f[3] = 0xE0;
- b20_9f[4] = 0xBC;
- /* bank 0x10, reg 0xa6 */
- b10_a6[0] = 0x25;
- b10_a6[1] = 0xA0;
- b10_a6[2] = 0x36;
- b10_a6[3] = 0x8D;
- b10_a6[4] = 0x2E;
- b10_a6[5] = 0x94;
- b10_a6[6] = 0x28;
- b10_a6[7] = 0x9B;
- b10_a6[8] = 0x32;
- b10_a6[9] = 0x90;
- b10_a6[10] = 0x2C;
- b10_a6[11] = 0x9D;
- b10_a6[12] = 0x29;
- b10_a6[13] = 0x99;
- iffreq = MAKE_IFFREQ_CONFIG(3.5);
- b10_d7 = 0x03;
+ /* <Timing Recovery setting> */
+ cxd2841er_write_regs(priv, I2C_SLVT,
+ 0x9F, nominalRate17bw[priv->xtal], 5);
+
+ /* Set SLV-T Bank : 0x27 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x27);
+ cxd2841er_set_reg_bits(priv, I2C_SLVT,
+ 0x7a, 0x03, 0x0f);
+
+ /* Set SLV-T Bank : 0x10 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
+
+ /* Group delay equaliser settings for
+ * ASCOT2D, ASCOT2E and ASCOT3 tuners
+ */
+ cxd2841er_write_regs(priv, I2C_SLVT,
+ 0xA6, itbCoef17bw[priv->xtal], 14);
+ /* <IF freq setting> */
+ iffreq = MAKE_IFFREQ_CONFIG_XTAL(priv->xtal, 3.50);
+ data[0] = (u8) ((iffreq >> 16) & 0xff);
+ data[1] = (u8)((iffreq >> 8) & 0xff);
+ data[2] = (u8)(iffreq & 0xff);
+ cxd2841er_write_regs(priv, I2C_SLVT, 0xB6, data, 3);
+ /* System bandwidth setting */
+ cxd2841er_set_reg_bits(
+ priv, I2C_SLVT, 0xD7, 0x03, 0x07);
break;
default:
return -EINVAL;
}
- /* Set SLV-T Bank : 0x20 */
- cxd2841er_write_reg(priv, I2C_SLVX, 0x00, 0x20);
- cxd2841er_write_regs(priv, I2C_SLVT, 0x9f, b20_9f, sizeof(b20_9f));
- /* Set SLV-T Bank : 0x27 */
- cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x27);
- cxd2841er_set_reg_bits(
- priv, I2C_SLVT, 0x7a,
- (bandwidth == 1712000 ? 0x03 : 0x00), 0x0f);
- /* Set SLV-T Bank : 0x10 */
- cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
- /* Group delay equaliser sett. for ASCOT2E */
- cxd2841er_write_regs(priv, I2C_SLVT, 0xa6, b10_a6, sizeof(b10_a6));
- /* <IF freq setting> */
- b10_b6[0] = (u8) ((iffreq >> 16) & 0xff);
- b10_b6[1] = (u8)((iffreq >> 8) & 0xff);
- b10_b6[2] = (u8)(iffreq & 0xff);
- cxd2841er_write_regs(priv, I2C_SLVT, 0xb6, b10_b6, sizeof(b10_b6));
- /* System bandwidth setting */
- cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xd7, b10_d7, 0x07);
return 0;
}
static int cxd2841er_sleep_tc_to_active_t_band(
struct cxd2841er_priv *priv, u32 bandwidth)
{
- u8 b13_9c[2] = { 0x01, 0x14 };
- u8 bw8mhz_b10_9f[] = { 0x11, 0xF0, 0x00, 0x00, 0x00 };
- u8 bw8mhz_b10_a6[] = { 0x26, 0xAF, 0x06, 0xCD, 0x13, 0xBB,
- 0x28, 0xBA, 0x23, 0xA9, 0x1F, 0xA8, 0x2C, 0xC8 };
- u8 bw8mhz_b10_d9[] = { 0x01, 0xE0 };
- u8 bw8mhz_b17_38[] = { 0x01, 0x02 };
- u8 bw7mhz_b10_9f[] = { 0x14, 0x80, 0x00, 0x00, 0x00 };
- u8 bw7mhz_b10_a6[] = { 0x2C, 0xBD, 0x02, 0xCF, 0x04, 0xF8,
- 0x23, 0xA6, 0x29, 0xB0, 0x26, 0xA9, 0x21, 0xA5 };
- u8 bw7mhz_b10_d9[] = { 0x12, 0xF8 };
- u8 bw7mhz_b17_38[] = { 0x00, 0x03 };
- u8 bw6mhz_b10_9f[] = { 0x17, 0xEA, 0xAA, 0xAA, 0xAA };
- u8 bw6mhz_b10_a6[] = { 0x27, 0xA7, 0x28, 0xB3, 0x02, 0xF0,
- 0x01, 0xE8, 0x00, 0xCF, 0x00, 0xE6, 0x23, 0xA4 };
- u8 bw6mhz_b10_d9[] = { 0x1F, 0xDC };
- u8 bw6mhz_b17_38[] = { 0x00, 0x03 };
- u8 bw5mhz_b10_9f[] = { 0x1C, 0xB3, 0x33, 0x33, 0x33 };
- u8 bw5mhz_b10_a6[] = { 0x27, 0xA7, 0x28, 0xB3, 0x02, 0xF0,
- 0x01, 0xE8, 0x00, 0xCF, 0x00, 0xE6, 0x23, 0xA4 };
- u8 bw5mhz_b10_d9[] = { 0x26, 0x3C };
- u8 bw5mhz_b17_38[] = { 0x00, 0x03 };
- u8 b10_b6[3];
- u8 d7val;
+ u8 data[MAX_WRITE_REGSIZE];
u32 iffreq;
- u8 *b10_9f;
- u8 *b10_a6;
- u8 *b10_d9;
- u8 *b17_38;
+ u8 nominalRate8bw[3][5] = {
+ /* TRCG Nominal Rate [37:0] */
+ {0x11, 0xF0, 0x00, 0x00, 0x00}, /* 20.5MHz XTal */
+ {0x15, 0x00, 0x00, 0x00, 0x00}, /* 24MHz XTal */
+ {0x11, 0xF0, 0x00, 0x00, 0x00} /* 41MHz XTal */
+ };
+ u8 nominalRate7bw[3][5] = {
+ /* TRCG Nominal Rate [37:0] */
+ {0x14, 0x80, 0x00, 0x00, 0x00}, /* 20.5MHz XTal */
+ {0x18, 0x00, 0x00, 0x00, 0x00}, /* 24MHz XTal */
+ {0x14, 0x80, 0x00, 0x00, 0x00} /* 41MHz XTal */
+ };
+ u8 nominalRate6bw[3][5] = {
+ /* TRCG Nominal Rate [37:0] */
+ {0x17, 0xEA, 0xAA, 0xAA, 0xAA}, /* 20.5MHz XTal */
+ {0x1C, 0x00, 0x00, 0x00, 0x00}, /* 24MHz XTal */
+ {0x17, 0xEA, 0xAA, 0xAA, 0xAA} /* 41MHz XTal */
+ };
+ u8 nominalRate5bw[3][5] = {
+ /* TRCG Nominal Rate [37:0] */
+ {0x1C, 0xB3, 0x33, 0x33, 0x33}, /* 20.5MHz XTal */
+ {0x21, 0x99, 0x99, 0x99, 0x99}, /* 24MHz XTal */
+ {0x1C, 0xB3, 0x33, 0x33, 0x33} /* 41MHz XTal */
+ };
- dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ u8 itbCoef8bw[3][14] = {
+ {0x26, 0xAF, 0x06, 0xCD, 0x13, 0xBB, 0x28, 0xBA, 0x23, 0xA9,
+ 0x1F, 0xA8, 0x2C, 0xC8}, /* 20.5MHz XTal */
+ {0x2F, 0xBA, 0x28, 0x9B, 0x28, 0x9D, 0x28, 0xA1, 0x29, 0xA5,
+ 0x2A, 0xAC, 0x29, 0xB5}, /* 24MHz XTal */
+ {0x26, 0xAF, 0x06, 0xCD, 0x13, 0xBB, 0x28, 0xBA, 0x23, 0xA9,
+ 0x1F, 0xA8, 0x2C, 0xC8} /* 41MHz XTal */
+ };
+ u8 itbCoef7bw[3][14] = {
+ {0x2C, 0xBD, 0x02, 0xCF, 0x04, 0xF8, 0x23, 0xA6, 0x29, 0xB0,
+ 0x26, 0xA9, 0x21, 0xA5}, /* 20.5MHz XTal */
+ {0x30, 0xB1, 0x29, 0x9A, 0x28, 0x9C, 0x28, 0xA0, 0x29, 0xA2,
+ 0x2B, 0xA6, 0x2B, 0xAD}, /* 24MHz XTal */
+ {0x2C, 0xBD, 0x02, 0xCF, 0x04, 0xF8, 0x23, 0xA6, 0x29, 0xB0,
+ 0x26, 0xA9, 0x21, 0xA5} /* 41MHz XTal */
+ };
+ u8 itbCoef6bw[3][14] = {
+ {0x27, 0xA7, 0x28, 0xB3, 0x02, 0xF0, 0x01, 0xE8, 0x00, 0xCF,
+ 0x00, 0xE6, 0x23, 0xA4}, /* 20.5MHz XTal */
+ {0x31, 0xA8, 0x29, 0x9B, 0x27, 0x9C, 0x28, 0x9E, 0x29, 0xA4,
+ 0x29, 0xA2, 0x29, 0xA8}, /* 24MHz XTal */
+ {0x27, 0xA7, 0x28, 0xB3, 0x02, 0xF0, 0x01, 0xE8, 0x00, 0xCF,
+ 0x00, 0xE6, 0x23, 0xA4} /* 41MHz XTal */
+ };
+ u8 itbCoef5bw[3][14] = {
+ {0x27, 0xA7, 0x28, 0xB3, 0x02, 0xF0, 0x01, 0xE8, 0x00, 0xCF,
+ 0x00, 0xE6, 0x23, 0xA4}, /* 20.5MHz XTal */
+ {0x31, 0xA8, 0x29, 0x9B, 0x27, 0x9C, 0x28, 0x9E, 0x29, 0xA4,
+ 0x29, 0xA2, 0x29, 0xA8}, /* 24MHz XTal */
+ {0x27, 0xA7, 0x28, 0xB3, 0x02, 0xF0, 0x01, 0xE8, 0x00, 0xCF,
+ 0x00, 0xE6, 0x23, 0xA4} /* 41MHz XTal */
+ };
+
+ /* Set SLV-T Bank : 0x13 */
cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x13);
/* Echo performance optimization setting */
- cxd2841er_write_regs(priv, I2C_SLVT, 0x9c, b13_9c, sizeof(b13_9c));
+ data[0] = 0x01;
+ data[1] = 0x14;
+ cxd2841er_write_regs(priv, I2C_SLVT, 0x9C, data, 2);
+
+ /* Set SLV-T Bank : 0x10 */
cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
switch (bandwidth) {
case 8000000:
- b10_9f = bw8mhz_b10_9f;
- b10_a6 = bw8mhz_b10_a6;
- b10_d9 = bw8mhz_b10_d9;
- b17_38 = bw8mhz_b17_38;
- d7val = 0;
- iffreq = MAKE_IFFREQ_CONFIG(4.80);
+ /* <Timing Recovery setting> */
+ cxd2841er_write_regs(priv, I2C_SLVT,
+ 0x9F, nominalRate8bw[priv->xtal], 5);
+ /* Group delay equaliser settings for
+ * ASCOT2D, ASCOT2E and ASCOT3 tuners
+ */
+ cxd2841er_write_regs(priv, I2C_SLVT,
+ 0xA6, itbCoef8bw[priv->xtal], 14);
+ /* <IF freq setting> */
+ iffreq = MAKE_IFFREQ_CONFIG_XTAL(priv->xtal, 4.80);
+ data[0] = (u8) ((iffreq >> 16) & 0xff);
+ data[1] = (u8)((iffreq >> 8) & 0xff);
+ data[2] = (u8)(iffreq & 0xff);
+ cxd2841er_write_regs(priv, I2C_SLVT, 0xB6, data, 3);
+ /* System bandwidth setting */
+ cxd2841er_set_reg_bits(
+ priv, I2C_SLVT, 0xD7, 0x00, 0x07);
+
+ /* Demod core latency setting */
+ if (priv->xtal == SONY_XTAL_24000) {
+ data[0] = 0x15;
+ data[1] = 0x28;
+ } else {
+ data[0] = 0x01;
+ data[1] = 0xE0;
+ }
+ cxd2841er_write_regs(priv, I2C_SLVT, 0xD9, data, 2);
+
+ /* Notch filter setting */
+ data[0] = 0x01;
+ data[1] = 0x02;
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x17);
+ cxd2841er_write_regs(priv, I2C_SLVT, 0x38, data, 2);
break;
case 7000000:
- b10_9f = bw7mhz_b10_9f;
- b10_a6 = bw7mhz_b10_a6;
- b10_d9 = bw7mhz_b10_d9;
- b17_38 = bw7mhz_b17_38;
- d7val = 2;
- iffreq = MAKE_IFFREQ_CONFIG(4.20);
+ /* <Timing Recovery setting> */
+ cxd2841er_write_regs(priv, I2C_SLVT,
+ 0x9F, nominalRate7bw[priv->xtal], 5);
+ /* Group delay equaliser settings for
+ * ASCOT2D, ASCOT2E and ASCOT3 tuners
+ */
+ cxd2841er_write_regs(priv, I2C_SLVT,
+ 0xA6, itbCoef7bw[priv->xtal], 14);
+ /* <IF freq setting> */
+ iffreq = MAKE_IFFREQ_CONFIG_XTAL(priv->xtal, 4.20);
+ data[0] = (u8) ((iffreq >> 16) & 0xff);
+ data[1] = (u8)((iffreq >> 8) & 0xff);
+ data[2] = (u8)(iffreq & 0xff);
+ cxd2841er_write_regs(priv, I2C_SLVT, 0xB6, data, 3);
+ /* System bandwidth setting */
+ cxd2841er_set_reg_bits(
+ priv, I2C_SLVT, 0xD7, 0x02, 0x07);
+
+ /* Demod core latency setting */
+ if (priv->xtal == SONY_XTAL_24000) {
+ data[0] = 0x1F;
+ data[1] = 0xF8;
+ } else {
+ data[0] = 0x12;
+ data[1] = 0xF8;
+ }
+ cxd2841er_write_regs(priv, I2C_SLVT, 0xD9, data, 2);
+
+ /* Notch filter setting */
+ data[0] = 0x00;
+ data[1] = 0x03;
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x17);
+ cxd2841er_write_regs(priv, I2C_SLVT, 0x38, data, 2);
break;
case 6000000:
- b10_9f = bw6mhz_b10_9f;
- b10_a6 = bw6mhz_b10_a6;
- b10_d9 = bw6mhz_b10_d9;
- b17_38 = bw6mhz_b17_38;
- d7val = 4;
- iffreq = MAKE_IFFREQ_CONFIG(3.60);
+ /* <Timing Recovery setting> */
+ cxd2841er_write_regs(priv, I2C_SLVT,
+ 0x9F, nominalRate6bw[priv->xtal], 5);
+ /* Group delay equaliser settings for
+ * ASCOT2D, ASCOT2E and ASCOT3 tuners
+ */
+ cxd2841er_write_regs(priv, I2C_SLVT,
+ 0xA6, itbCoef6bw[priv->xtal], 14);
+ /* <IF freq setting> */
+ iffreq = MAKE_IFFREQ_CONFIG_XTAL(priv->xtal, 3.60);
+ data[0] = (u8) ((iffreq >> 16) & 0xff);
+ data[1] = (u8)((iffreq >> 8) & 0xff);
+ data[2] = (u8)(iffreq & 0xff);
+ cxd2841er_write_regs(priv, I2C_SLVT, 0xB6, data, 3);
+ /* System bandwidth setting */
+ cxd2841er_set_reg_bits(
+ priv, I2C_SLVT, 0xD7, 0x04, 0x07);
+
+ /* Demod core latency setting */
+ if (priv->xtal == SONY_XTAL_24000) {
+ data[0] = 0x25;
+ data[1] = 0x4C;
+ } else {
+ data[0] = 0x1F;
+ data[1] = 0xDC;
+ }
+ cxd2841er_write_regs(priv, I2C_SLVT, 0xD9, data, 2);
+
+ /* Notch filter setting */
+ data[0] = 0x00;
+ data[1] = 0x03;
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x17);
+ cxd2841er_write_regs(priv, I2C_SLVT, 0x38, data, 2);
break;
case 5000000:
- b10_9f = bw5mhz_b10_9f;
- b10_a6 = bw5mhz_b10_a6;
- b10_d9 = bw5mhz_b10_d9;
- b17_38 = bw5mhz_b17_38;
- d7val = 6;
- iffreq = MAKE_IFFREQ_CONFIG(3.60);
+ /* <Timing Recovery setting> */
+ cxd2841er_write_regs(priv, I2C_SLVT,
+ 0x9F, nominalRate5bw[priv->xtal], 5);
+ /* Group delay equaliser settings for
+ * ASCOT2D, ASCOT2E and ASCOT3 tuners
+ */
+ cxd2841er_write_regs(priv, I2C_SLVT,
+ 0xA6, itbCoef5bw[priv->xtal], 14);
+ /* <IF freq setting> */
+ iffreq = MAKE_IFFREQ_CONFIG_XTAL(priv->xtal, 3.60);
+ data[0] = (u8) ((iffreq >> 16) & 0xff);
+ data[1] = (u8)((iffreq >> 8) & 0xff);
+ data[2] = (u8)(iffreq & 0xff);
+ cxd2841er_write_regs(priv, I2C_SLVT, 0xB6, data, 3);
+ /* System bandwidth setting */
+ cxd2841er_set_reg_bits(
+ priv, I2C_SLVT, 0xD7, 0x06, 0x07);
+
+ /* Demod core latency setting */
+ if (priv->xtal == SONY_XTAL_24000) {
+ data[0] = 0x2C;
+ data[1] = 0xC2;
+ } else {
+ data[0] = 0x26;
+ data[1] = 0x3C;
+ }
+ cxd2841er_write_regs(priv, I2C_SLVT, 0xD9, data, 2);
+
+ /* Notch filter setting */
+ data[0] = 0x00;
+ data[1] = 0x03;
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x17);
+ cxd2841er_write_regs(priv, I2C_SLVT, 0x38, data, 2);
+ break;
+ }
+
+ return 0;
+}
+
+static int cxd2841er_sleep_tc_to_active_i_band(
+ struct cxd2841er_priv *priv, u32 bandwidth)
+{
+ u32 iffreq;
+ u8 data[3];
+
+ /* TRCG Nominal Rate */
+ u8 nominalRate8bw[3][5] = {
+ {0x00, 0x00, 0x00, 0x00, 0x00}, /* 20.5MHz XTal */
+ {0x11, 0xB8, 0x00, 0x00, 0x00}, /* 24MHz XTal */
+ {0x00, 0x00, 0x00, 0x00, 0x00} /* 41MHz XTal */
+ };
+
+ u8 nominalRate7bw[3][5] = {
+ {0x00, 0x00, 0x00, 0x00, 0x00}, /* 20.5MHz XTal */
+ {0x14, 0x40, 0x00, 0x00, 0x00}, /* 24MHz XTal */
+ {0x00, 0x00, 0x00, 0x00, 0x00} /* 41MHz XTal */
+ };
+
+ u8 nominalRate6bw[3][5] = {
+ {0x14, 0x2E, 0x00, 0x00, 0x00}, /* 20.5MHz XTal */
+ {0x17, 0xA0, 0x00, 0x00, 0x00}, /* 24MHz XTal */
+ {0x14, 0x2E, 0x00, 0x00, 0x00} /* 41MHz XTal */
+ };
+
+ u8 itbCoef8bw[3][14] = {
+ {0x00}, /* 20.5MHz XTal */
+ {0x2F, 0xBA, 0x28, 0x9B, 0x28, 0x9D, 0x28, 0xA1, 0x29,
+ 0xA5, 0x2A, 0xAC, 0x29, 0xB5}, /* 24MHz Xtal */
+ {0x0}, /* 41MHz XTal */
+ };
+
+ u8 itbCoef7bw[3][14] = {
+ {0x00}, /* 20.5MHz XTal */
+ {0x30, 0xB1, 0x29, 0x9A, 0x28, 0x9C, 0x28, 0xA0, 0x29,
+ 0xA2, 0x2B, 0xA6, 0x2B, 0xAD}, /* 24MHz Xtal */
+ {0x00}, /* 41MHz XTal */
+ };
+
+ u8 itbCoef6bw[3][14] = {
+ {0x27, 0xA7, 0x28, 0xB3, 0x02, 0xF0, 0x01, 0xE8, 0x00,
+ 0xCF, 0x00, 0xE6, 0x23, 0xA4}, /* 20.5MHz XTal */
+ {0x31, 0xA8, 0x29, 0x9B, 0x27, 0x9C, 0x28, 0x9E, 0x29,
+ 0xA4, 0x29, 0xA2, 0x29, 0xA8}, /* 24MHz Xtal */
+ {0x27, 0xA7, 0x28, 0xB3, 0x02, 0xF0, 0x01, 0xE8, 0x00,
+ 0xCF, 0x00, 0xE6, 0x23, 0xA4}, /* 41MHz XTal */
+ };
+
+ dev_dbg(&priv->i2c->dev, "%s() bandwidth=%u\n", __func__, bandwidth);
+ /* Set SLV-T Bank : 0x10 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
+
+ /* 20.5/41MHz Xtal support is not available
+ * on ISDB-T 7MHzBW and 8MHzBW
+ */
+ if (priv->xtal != SONY_XTAL_24000 && bandwidth > 6000000) {
+ dev_err(&priv->i2c->dev,
+ "%s(): bandwidth %d supported only for 24MHz xtal\n",
+ __func__, bandwidth);
+ return -EINVAL;
+ }
+
+ switch (bandwidth) {
+ case 8000000:
+ /* TRCG Nominal Rate */
+ cxd2841er_write_regs(priv, I2C_SLVT,
+ 0x9F, nominalRate8bw[priv->xtal], 5);
+ /* Group delay equaliser settings for ASCOT tuners optimized */
+ cxd2841er_write_regs(priv, I2C_SLVT,
+ 0xA6, itbCoef8bw[priv->xtal], 14);
+
+ /* IF freq setting */
+ iffreq = MAKE_IFFREQ_CONFIG_XTAL(priv->xtal, 4.75);
+ data[0] = (u8) ((iffreq >> 16) & 0xff);
+ data[1] = (u8)((iffreq >> 8) & 0xff);
+ data[2] = (u8)(iffreq & 0xff);
+ cxd2841er_write_regs(priv, I2C_SLVT, 0xB6, data, 3);
+
+ /* System bandwidth setting */
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xd7, 0x0, 0x7);
+
+ /* Demod core latency setting */
+ data[0] = 0x13;
+ data[1] = 0xFC;
+ cxd2841er_write_regs(priv, I2C_SLVT, 0xD9, data, 2);
+
+ /* Acquisition optimization setting */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x12);
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x71, 0x03, 0x07);
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x15);
+ cxd2841er_write_reg(priv, I2C_SLVT, 0xBE, 0x03);
+ break;
+ case 7000000:
+ /* TRCG Nominal Rate */
+ cxd2841er_write_regs(priv, I2C_SLVT,
+ 0x9F, nominalRate7bw[priv->xtal], 5);
+ /* Group delay equaliser settings for ASCOT tuners optimized */
+ cxd2841er_write_regs(priv, I2C_SLVT,
+ 0xA6, itbCoef7bw[priv->xtal], 14);
+
+ /* IF freq setting */
+ iffreq = MAKE_IFFREQ_CONFIG_XTAL(priv->xtal, 4.15);
+ data[0] = (u8) ((iffreq >> 16) & 0xff);
+ data[1] = (u8)((iffreq >> 8) & 0xff);
+ data[2] = (u8)(iffreq & 0xff);
+ cxd2841er_write_regs(priv, I2C_SLVT, 0xB6, data, 3);
+
+ /* System bandwidth setting */
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xd7, 0x02, 0x7);
+
+ /* Demod core latency setting */
+ data[0] = 0x1A;
+ data[1] = 0xFA;
+ cxd2841er_write_regs(priv, I2C_SLVT, 0xD9, data, 2);
+
+ /* Acquisition optimization setting */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x12);
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x71, 0x03, 0x07);
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x15);
+ cxd2841er_write_reg(priv, I2C_SLVT, 0xBE, 0x02);
+ break;
+ case 6000000:
+ /* TRCG Nominal Rate */
+ cxd2841er_write_regs(priv, I2C_SLVT,
+ 0x9F, nominalRate6bw[priv->xtal], 5);
+ /* Group delay equaliser settings for ASCOT tuners optimized */
+ cxd2841er_write_regs(priv, I2C_SLVT,
+ 0xA6, itbCoef6bw[priv->xtal], 14);
+
+ /* IF freq setting */
+ iffreq = MAKE_IFFREQ_CONFIG_XTAL(priv->xtal, 3.55);
+ data[0] = (u8) ((iffreq >> 16) & 0xff);
+ data[1] = (u8)((iffreq >> 8) & 0xff);
+ data[2] = (u8)(iffreq & 0xff);
+ cxd2841er_write_regs(priv, I2C_SLVT, 0xB6, data, 3);
+
+ /* System bandwidth setting */
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xd7, 0x04, 0x7);
+
+ /* Demod core latency setting */
+ if (priv->xtal == SONY_XTAL_24000) {
+ data[0] = 0x1F;
+ data[1] = 0x79;
+ } else {
+ data[0] = 0x1A;
+ data[1] = 0xE2;
+ }
+ cxd2841er_write_regs(priv, I2C_SLVT, 0xD9, data, 2);
+
+ /* Acquisition optimization setting */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x12);
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x71, 0x07, 0x07);
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x15);
+ cxd2841er_write_reg(priv, I2C_SLVT, 0xBE, 0x02);
break;
default:
dev_dbg(&priv->i2c->dev, "%s(): invalid bandwidth %d\n",
- __func__, bandwidth);
+ __func__, bandwidth);
return -EINVAL;
}
- /* <IF freq setting> */
- b10_b6[0] = (u8) ((iffreq >> 16) & 0xff);
- b10_b6[1] = (u8)((iffreq >> 8) & 0xff);
- b10_b6[2] = (u8)(iffreq & 0xff);
- cxd2841er_write_regs(
- priv, I2C_SLVT, 0x9f, b10_9f, sizeof(bw8mhz_b10_9f));
- cxd2841er_write_regs(
- priv, I2C_SLVT, 0xa6, b10_a6, sizeof(bw8mhz_b10_a6));
- cxd2841er_write_regs(priv, I2C_SLVT, 0xb6, b10_b6, sizeof(b10_b6));
- cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xd7, d7val, 0x7);
- cxd2841er_write_regs(
- priv, I2C_SLVT, 0xd9, b10_d9, sizeof(bw8mhz_b10_d9));
- cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x17);
- cxd2841er_write_regs(
- priv, I2C_SLVT, 0x38, b17_38, sizeof(bw8mhz_b17_38));
return 0;
}
@@ -1837,7 +2694,7 @@ static int cxd2841er_sleep_tc_to_active_c_band(struct cxd2841er_priv *priv,
u8 b10_b6[3];
u32 iffreq;
- dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ dev_dbg(&priv->i2c->dev, "%s() bw=%d\n", __func__, bandwidth);
cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
switch (bandwidth) {
case 8000000:
@@ -1854,7 +2711,7 @@ static int cxd2841er_sleep_tc_to_active_c_band(struct cxd2841er_priv *priv,
iffreq = MAKE_IFFREQ_CONFIG(3.7);
break;
default:
- dev_dbg(&priv->i2c->dev, "%s(): unsupported bandwidth %d\n",
+ dev_err(&priv->i2c->dev, "%s(): unsupported bandwidth %d\n",
__func__, bandwidth);
return -EINVAL;
}
@@ -1902,6 +2759,7 @@ static int cxd2841er_sleep_tc_to_active_t(struct cxd2841er_priv *priv,
u32 bandwidth)
{
u8 data[2] = { 0x09, 0x54 };
+ u8 data24m[3] = {0xDC, 0x6C, 0x00};
dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
cxd2841er_set_ts_clock_mode(priv, SYS_DVBT);
@@ -1919,7 +2777,11 @@ static int cxd2841er_sleep_tc_to_active_t(struct cxd2841er_priv *priv,
cxd2841er_write_reg(priv, I2C_SLVT, 0x30, 0x00);
/* Enable ADC 1 */
cxd2841er_write_reg(priv, I2C_SLVT, 0x41, 0x1a);
- /* xtal freq 20.5MHz */
+ /* Enable ADC 2 & 3 */
+ if (priv->xtal == SONY_XTAL_41000) {
+ data[0] = 0x0A;
+ data[1] = 0xD4;
+ }
cxd2841er_write_regs(priv, I2C_SLVT, 0x43, data, 2);
/* Enable ADC 4 */
cxd2841er_write_reg(priv, I2C_SLVX, 0x18, 0x00);
@@ -1947,6 +2809,15 @@ static int cxd2841er_sleep_tc_to_active_t(struct cxd2841er_priv *priv,
/* TSIF setting */
cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xce, 0x01, 0x01);
cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xcf, 0x01, 0x01);
+
+ if (priv->xtal == SONY_XTAL_24000) {
+ /* Set SLV-T Bank : 0x10 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
+ cxd2841er_write_reg(priv, I2C_SLVT, 0xBF, 0x60);
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x18);
+ cxd2841er_write_regs(priv, I2C_SLVT, 0x24, data24m, 3);
+ }
+
cxd2841er_sleep_tc_to_active_t_band(priv, bandwidth);
/* Set SLV-T Bank : 0x00 */
cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
@@ -1961,7 +2832,7 @@ static int cxd2841er_sleep_tc_to_active_t(struct cxd2841er_priv *priv,
static int cxd2841er_sleep_tc_to_active_t2(struct cxd2841er_priv *priv,
u32 bandwidth)
{
- u8 data[2] = { 0x09, 0x54 };
+ u8 data[MAX_WRITE_REGSIZE];
dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
cxd2841er_set_ts_clock_mode(priv, SYS_DVBT2);
@@ -1974,12 +2845,21 @@ static int cxd2841er_sleep_tc_to_active_t2(struct cxd2841er_priv *priv,
/* Enable demod clock */
cxd2841er_write_reg(priv, I2C_SLVT, 0x2c, 0x01);
/* Disable RF level monitor */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x59, 0x00);
cxd2841er_write_reg(priv, I2C_SLVT, 0x2f, 0x00);
/* Enable ADC clock */
cxd2841er_write_reg(priv, I2C_SLVT, 0x30, 0x00);
/* Enable ADC 1 */
cxd2841er_write_reg(priv, I2C_SLVT, 0x41, 0x1a);
- /* xtal freq 20.5MHz */
+
+ if (priv->xtal == SONY_XTAL_41000) {
+ data[0] = 0x0A;
+ data[1] = 0xD4;
+ } else {
+ data[0] = 0x09;
+ data[1] = 0x54;
+ }
+
cxd2841er_write_regs(priv, I2C_SLVT, 0x43, data, 2);
/* Enable ADC 4 */
cxd2841er_write_reg(priv, I2C_SLVX, 0x18, 0x00);
@@ -2002,6 +2882,10 @@ static int cxd2841er_sleep_tc_to_active_t2(struct cxd2841er_priv *priv,
/* Set SLV-T Bank : 0x2b */
cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x2b);
cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x76, 0x20, 0x70);
+ /* Set SLV-T Bank : 0x23 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x23);
+ /* L1 Control setting */
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xE6, 0x00, 0x03);
/* Set SLV-T Bank : 0x00 */
cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
/* TSIF setting */
@@ -2020,6 +2904,72 @@ static int cxd2841er_sleep_tc_to_active_t2(struct cxd2841er_priv *priv,
cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x2b);
cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x11, 0x20, 0x3f);
+ /* 24MHz Xtal setting */
+ if (priv->xtal == SONY_XTAL_24000) {
+ /* Set SLV-T Bank : 0x11 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x11);
+ data[0] = 0xEB;
+ data[1] = 0x03;
+ data[2] = 0x3B;
+ cxd2841er_write_regs(priv, I2C_SLVT, 0x33, data, 3);
+
+ /* Set SLV-T Bank : 0x20 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x20);
+ data[0] = 0x5E;
+ data[1] = 0x5E;
+ data[2] = 0x47;
+ cxd2841er_write_regs(priv, I2C_SLVT, 0x95, data, 3);
+
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x99, 0x18);
+
+ data[0] = 0x3F;
+ data[1] = 0xFF;
+ cxd2841er_write_regs(priv, I2C_SLVT, 0xD9, data, 2);
+
+ /* Set SLV-T Bank : 0x24 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x24);
+ data[0] = 0x0B;
+ data[1] = 0x72;
+ cxd2841er_write_regs(priv, I2C_SLVT, 0x34, data, 2);
+
+ data[0] = 0x93;
+ data[1] = 0xF3;
+ data[2] = 0x00;
+ cxd2841er_write_regs(priv, I2C_SLVT, 0xD2, data, 3);
+
+ data[0] = 0x05;
+ data[1] = 0xB8;
+ data[2] = 0xD8;
+ cxd2841er_write_regs(priv, I2C_SLVT, 0xDD, data, 3);
+
+ cxd2841er_write_reg(priv, I2C_SLVT, 0xE0, 0x00);
+
+ /* Set SLV-T Bank : 0x25 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x25);
+ cxd2841er_write_reg(priv, I2C_SLVT, 0xED, 0x60);
+
+ /* Set SLV-T Bank : 0x27 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x27);
+ cxd2841er_write_reg(priv, I2C_SLVT, 0xFA, 0x34);
+
+ /* Set SLV-T Bank : 0x2B */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x2B);
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x4B, 0x2F);
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x9E, 0x0E);
+
+ /* Set SLV-T Bank : 0x2D */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x2D);
+ data[0] = 0x89;
+ data[1] = 0x89;
+ cxd2841er_write_regs(priv, I2C_SLVT, 0x24, data, 2);
+
+ /* Set SLV-T Bank : 0x5E */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x5E);
+ data[0] = 0x24;
+ data[1] = 0x95;
+ cxd2841er_write_regs(priv, I2C_SLVT, 0x8C, data, 2);
+ }
+
cxd2841er_sleep_tc_to_active_t2_band(priv, bandwidth);
/* Set SLV-T Bank : 0x00 */
@@ -2032,6 +2982,84 @@ static int cxd2841er_sleep_tc_to_active_t2(struct cxd2841er_priv *priv,
return 0;
}
+/* ISDB-Tb part */
+static int cxd2841er_sleep_tc_to_active_i(struct cxd2841er_priv *priv,
+ u32 bandwidth)
+{
+ u8 data[2] = { 0x09, 0x54 };
+ u8 data24m[2] = {0x60, 0x00};
+ u8 data24m2[3] = {0xB7, 0x1B, 0x00};
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ cxd2841er_set_ts_clock_mode(priv, SYS_DVBT);
+ /* Set SLV-X Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x00, 0x00);
+ /* Set demod mode */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x17, 0x06);
+ /* Set SLV-T Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+ /* Enable demod clock */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x2c, 0x01);
+ /* Enable RF level monitor */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x2f, 0x01);
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x59, 0x01);
+ /* Enable ADC clock */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x30, 0x00);
+ /* Enable ADC 1 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x41, 0x1a);
+ /* xtal freq 20.5MHz or 24M */
+ cxd2841er_write_regs(priv, I2C_SLVT, 0x43, data, 2);
+ /* Enable ADC 4 */
+ cxd2841er_write_reg(priv, I2C_SLVX, 0x18, 0x00);
+ /* ASCOT setting ON */
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xa5, 0x01, 0x01);
+ /* FEC Auto Recovery setting */
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x30, 0x01, 0x01);
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x31, 0x00, 0x01);
+ /* ISDB-T initial setting */
+ /* Set SLV-T Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xce, 0x00, 0x01);
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xcf, 0x00, 0x01);
+ /* Set SLV-T Bank : 0x10 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x69, 0x04, 0x07);
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x6B, 0x03, 0x07);
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x9D, 0x50, 0xFF);
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xD3, 0x06, 0x1F);
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xED, 0x00, 0x01);
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xE2, 0xCE, 0x80);
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xF2, 0x13, 0x10);
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xDE, 0x2E, 0x3F);
+ /* Set SLV-T Bank : 0x15 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x15);
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xDE, 0x02, 0x03);
+ /* Set SLV-T Bank : 0x1E */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x1E);
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x73, 0x68, 0xFF);
+ /* Set SLV-T Bank : 0x63 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x63);
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x81, 0x00, 0x01);
+
+ /* for xtal 24MHz */
+ /* Set SLV-T Bank : 0x10 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
+ cxd2841er_write_regs(priv, I2C_SLVT, 0xBF, data24m, 2);
+ /* Set SLV-T Bank : 0x60 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x60);
+ cxd2841er_write_regs(priv, I2C_SLVT, 0xA8, data24m2, 3);
+
+ cxd2841er_sleep_tc_to_active_i_band(priv, bandwidth);
+ /* Set SLV-T Bank : 0x00 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
+ /* Disable HiZ Setting 1 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x80, 0x28);
+ /* Disable HiZ Setting 2 */
+ cxd2841er_write_reg(priv, I2C_SLVT, 0x81, 0x00);
+ priv->state = STATE_ACTIVE_TC;
+ return 0;
+}
+
static int cxd2841er_sleep_tc_to_active_c(struct cxd2841er_priv *priv,
u32 bandwidth)
{
@@ -2079,7 +3107,7 @@ static int cxd2841er_sleep_tc_to_active_c(struct cxd2841er_priv *priv,
cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xce, 0x01, 0x01);
cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xcf, 0x01, 0x01);
- cxd2841er_sleep_tc_to_active_c_band(priv, 8000000);
+ cxd2841er_sleep_tc_to_active_c_band(priv, bandwidth);
/* Set SLV-T Bank : 0x00 */
cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
/* Disable HiZ Setting 1 */
@@ -2094,8 +3122,6 @@ static int cxd2841er_get_frontend(struct dvb_frontend *fe,
struct dtv_frontend_properties *p)
{
enum fe_status status = 0;
- u16 strength = 0, snr = 0;
- u32 errors = 0, ber = 0;
struct cxd2841er_priv *priv = fe->demodulator_priv;
dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
@@ -2104,32 +3130,18 @@ static int cxd2841er_get_frontend(struct dvb_frontend *fe,
else if (priv->state == STATE_ACTIVE_TC)
cxd2841er_read_status_tc(fe, &status);
+ cxd2841er_read_signal_strength(fe);
+
if (status & FE_HAS_LOCK) {
- cxd2841er_read_signal_strength(fe, &strength);
- p->strength.len = 1;
- p->strength.stat[0].scale = FE_SCALE_RELATIVE;
- p->strength.stat[0].uvalue = strength;
- cxd2841er_read_snr(fe, &snr);
- p->cnr.len = 1;
- p->cnr.stat[0].scale = FE_SCALE_DECIBEL;
- p->cnr.stat[0].svalue = snr;
- cxd2841er_read_ucblocks(fe, &errors);
- p->block_error.len = 1;
- p->block_error.stat[0].scale = FE_SCALE_COUNTER;
- p->block_error.stat[0].uvalue = errors;
- cxd2841er_read_ber(fe, &ber);
- p->post_bit_error.len = 1;
- p->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
- p->post_bit_error.stat[0].uvalue = ber;
+ cxd2841er_read_snr(fe);
+ cxd2841er_read_ucblocks(fe);
+
+ cxd2841er_read_ber(fe);
} else {
- p->strength.len = 1;
- p->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
- p->cnr.len = 1;
p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
- p->block_error.len = 1;
p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
- p->post_bit_error.len = 1;
p->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
}
return 0;
}
@@ -2142,10 +3154,10 @@ static int cxd2841er_set_frontend_s(struct dvb_frontend *fe)
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
u32 symbol_rate = p->symbol_rate/1000;
- dev_dbg(&priv->i2c->dev, "%s(): %s frequency=%d symbol_rate=%d\n",
+ dev_dbg(&priv->i2c->dev, "%s(): %s frequency=%d symbol_rate=%d xtal=%d\n",
__func__,
(p->delivery_system == SYS_DVBS ? "DVB-S" : "DVB-S2"),
- p->frequency, symbol_rate);
+ p->frequency, symbol_rate, priv->xtal);
switch (priv->state) {
case STATE_SLEEP_S:
ret = cxd2841er_sleep_s_to_active_s(
@@ -2189,6 +3201,13 @@ static int cxd2841er_set_frontend_s(struct dvb_frontend *fe)
__func__, carr_offset);
}
done:
+ /* Reset stats */
+ p->strength.stat[0].scale = FE_SCALE_RELATIVE;
+ p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+
return ret;
}
@@ -2199,7 +3218,8 @@ static int cxd2841er_set_frontend_tc(struct dvb_frontend *fe)
struct cxd2841er_priv *priv = fe->demodulator_priv;
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
- dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ dev_dbg(&priv->i2c->dev, "%s() delivery_system=%d bandwidth_hz=%d\n",
+ __func__, p->delivery_system, p->bandwidth_hz);
if (p->delivery_system == SYS_DVBT) {
priv->system = SYS_DVBT;
switch (priv->state) {
@@ -2233,9 +3253,33 @@ static int cxd2841er_set_frontend_tc(struct dvb_frontend *fe)
__func__, priv->state);
ret = -EINVAL;
}
+ } else if (p->delivery_system == SYS_ISDBT) {
+ priv->system = SYS_ISDBT;
+ switch (priv->state) {
+ case STATE_SLEEP_TC:
+ ret = cxd2841er_sleep_tc_to_active_i(
+ priv, p->bandwidth_hz);
+ break;
+ case STATE_ACTIVE_TC:
+ ret = cxd2841er_retune_active(priv, p);
+ break;
+ default:
+ dev_dbg(&priv->i2c->dev, "%s(): invalid state %d\n",
+ __func__, priv->state);
+ ret = -EINVAL;
+ }
} else if (p->delivery_system == SYS_DVBC_ANNEX_A ||
p->delivery_system == SYS_DVBC_ANNEX_C) {
priv->system = SYS_DVBC_ANNEX_A;
+ /* correct bandwidth */
+ if (p->bandwidth_hz != 6000000 &&
+ p->bandwidth_hz != 7000000 &&
+ p->bandwidth_hz != 8000000) {
+ p->bandwidth_hz = 8000000;
+ dev_dbg(&priv->i2c->dev, "%s(): forcing bandwidth to %d\n",
+ __func__, p->bandwidth_hz);
+ }
+
switch (priv->state) {
case STATE_SLEEP_TC:
ret = cxd2841er_sleep_tc_to_active_c(
@@ -2321,7 +3365,8 @@ static int cxd2841er_tune_tc(struct dvb_frontend *fe,
struct cxd2841er_priv *priv = fe->demodulator_priv;
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
- dev_dbg(&priv->i2c->dev, "%s(): re_tune %d\n", __func__, re_tune);
+ dev_dbg(&priv->i2c->dev, "%s(): re_tune %d bandwidth=%d\n", __func__,
+ re_tune, p->bandwidth_hz);
if (re_tune) {
ret = cxd2841er_set_frontend_tc(fe);
if (ret)
@@ -2329,15 +3374,32 @@ static int cxd2841er_tune_tc(struct dvb_frontend *fe,
cxd2841er_read_status_tc(fe, status);
if (*status & FE_HAS_LOCK) {
switch (priv->system) {
+ case SYS_ISDBT:
+ ret = cxd2841er_get_carrier_offset_i(
+ priv, p->bandwidth_hz,
+ &carrier_offset);
+ if (ret)
+ return ret;
+ break;
case SYS_DVBT:
+ ret = cxd2841er_get_carrier_offset_t(
+ priv, p->bandwidth_hz,
+ &carrier_offset);
+ if (ret)
+ return ret;
+ break;
case SYS_DVBT2:
ret = cxd2841er_get_carrier_offset_t2(
priv, p->bandwidth_hz,
&carrier_offset);
+ if (ret)
+ return ret;
break;
case SYS_DVBC_ANNEX_A:
ret = cxd2841er_get_carrier_offset_c(
priv, &carrier_offset);
+ if (ret)
+ return ret;
break;
default:
dev_dbg(&priv->i2c->dev,
@@ -2345,8 +3407,6 @@ static int cxd2841er_tune_tc(struct dvb_frontend *fe,
__func__, priv->system);
return -EINVAL;
}
- if (ret)
- return ret;
dev_dbg(&priv->i2c->dev, "%s(): carrier offset %d\n",
__func__, carrier_offset);
p->frequency += carrier_offset;
@@ -2382,6 +3442,9 @@ static int cxd2841er_sleep_tc(struct dvb_frontend *fe)
case SYS_DVBT2:
cxd2841er_active_t2_to_sleep_tc(priv);
break;
+ case SYS_ISDBT:
+ cxd2841er_active_i_to_sleep_tc(priv);
+ break;
case SYS_DVBC_ANNEX_A:
cxd2841er_active_c_to_sleep_tc(priv);
break;
@@ -2512,23 +3575,57 @@ static enum dvbfe_algo cxd2841er_get_algo(struct dvb_frontend *fe)
return DVBFE_ALGO_HW;
}
+static void cxd2841er_init_stats(struct dvb_frontend *fe)
+{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+
+ p->strength.len = 1;
+ p->strength.stat[0].scale = FE_SCALE_RELATIVE;
+ p->cnr.len = 1;
+ p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->block_error.len = 1;
+ p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->post_bit_error.len = 1;
+ p->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->post_bit_count.len = 1;
+ p->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+}
+
+
static int cxd2841er_init_s(struct dvb_frontend *fe)
{
struct cxd2841er_priv *priv = fe->demodulator_priv;
+ /* sanity. force demod to SHUTDOWN state */
+ if (priv->state == STATE_SLEEP_S) {
+ dev_dbg(&priv->i2c->dev, "%s() forcing sleep->shutdown\n",
+ __func__);
+ cxd2841er_sleep_s_to_shutdown(priv);
+ } else if (priv->state == STATE_ACTIVE_S) {
+ dev_dbg(&priv->i2c->dev, "%s() forcing active->sleep->shutdown\n",
+ __func__);
+ cxd2841er_active_s_to_sleep_s(priv);
+ cxd2841er_sleep_s_to_shutdown(priv);
+ }
+
dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
cxd2841er_shutdown_to_sleep_s(priv);
/* SONY_DEMOD_CONFIG_SAT_IFAGCNEG set to 1 */
cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0xa0);
cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xb9, 0x01, 0x01);
+
+ cxd2841er_init_stats(fe);
+
return 0;
}
static int cxd2841er_init_tc(struct dvb_frontend *fe)
{
struct cxd2841er_priv *priv = fe->demodulator_priv;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
- dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ dev_dbg(&priv->i2c->dev, "%s() bandwidth_hz=%d\n",
+ __func__, p->bandwidth_hz);
cxd2841er_shutdown_to_sleep_tc(priv);
/* SONY_DEMOD_CONFIG_IFAGCNEG = 1 */
cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
@@ -2538,12 +3635,14 @@ static int cxd2841er_init_tc(struct dvb_frontend *fe)
/* SONY_DEMOD_CONFIG_PARALLEL_SEL = 1 */
cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xc4, 0x00, 0x80);
+
+ cxd2841er_init_stats(fe);
+
return 0;
}
static struct dvb_frontend_ops cxd2841er_dvbs_s2_ops;
-static struct dvb_frontend_ops cxd2841er_dvbt_t2_ops;
-static struct dvb_frontend_ops cxd2841er_dvbc_ops;
+static struct dvb_frontend_ops cxd2841er_t_c_ops;
static struct dvb_frontend *cxd2841er_attach(struct cxd2841er_config *cfg,
struct i2c_adapter *i2c,
@@ -2551,6 +3650,7 @@ static struct dvb_frontend *cxd2841er_attach(struct cxd2841er_config *cfg,
{
u8 chip_id = 0;
const char *type;
+ const char *name;
struct cxd2841er_priv *priv = NULL;
/* allocate memory for the internal state */
@@ -2561,46 +3661,49 @@ static struct dvb_frontend *cxd2841er_attach(struct cxd2841er_config *cfg,
priv->config = cfg;
priv->i2c_addr_slvx = (cfg->i2c_addr + 4) >> 1;
priv->i2c_addr_slvt = (cfg->i2c_addr) >> 1;
- /* create dvb_frontend */
- switch (system) {
- case SYS_DVBS:
- memcpy(&priv->frontend.ops,
- &cxd2841er_dvbs_s2_ops,
- sizeof(struct dvb_frontend_ops));
- type = "S/S2";
- break;
- case SYS_DVBT:
- memcpy(&priv->frontend.ops,
- &cxd2841er_dvbt_t2_ops,
- sizeof(struct dvb_frontend_ops));
- type = "T/T2";
- break;
- case SYS_DVBC_ANNEX_A:
- memcpy(&priv->frontend.ops,
- &cxd2841er_dvbc_ops,
- sizeof(struct dvb_frontend_ops));
- type = "C/C2";
- break;
- default:
- kfree(priv);
- return NULL;
- }
+ priv->xtal = cfg->xtal;
priv->frontend.demodulator_priv = priv;
dev_info(&priv->i2c->dev,
- "%s(): attaching CXD2841ER DVB-%s frontend\n",
- __func__, type);
- dev_info(&priv->i2c->dev,
"%s(): I2C adapter %p SLVX addr %x SLVT addr %x\n",
__func__, priv->i2c,
priv->i2c_addr_slvx, priv->i2c_addr_slvt);
chip_id = cxd2841er_chip_id(priv);
- if (chip_id != CXD2841ER_CHIP_ID) {
+ switch (chip_id) {
+ case CXD2841ER_CHIP_ID:
+ snprintf(cxd2841er_t_c_ops.info.name, 128,
+ "Sony CXD2841ER DVB-T/T2/C demodulator");
+ name = "CXD2841ER";
+ break;
+ case CXD2854ER_CHIP_ID:
+ snprintf(cxd2841er_t_c_ops.info.name, 128,
+ "Sony CXD2854ER DVB-T/T2/C and ISDB-T demodulator");
+ cxd2841er_t_c_ops.delsys[3] = SYS_ISDBT;
+ name = "CXD2854ER";
+ break;
+ default:
dev_err(&priv->i2c->dev, "%s(): invalid chip ID 0x%02x\n",
- __func__, chip_id);
+ __func__, chip_id);
priv->frontend.demodulator_priv = NULL;
kfree(priv);
return NULL;
}
+
+ /* create dvb_frontend */
+ if (system == SYS_DVBS) {
+ memcpy(&priv->frontend.ops,
+ &cxd2841er_dvbs_s2_ops,
+ sizeof(struct dvb_frontend_ops));
+ type = "S/S2";
+ } else {
+ memcpy(&priv->frontend.ops,
+ &cxd2841er_t_c_ops,
+ sizeof(struct dvb_frontend_ops));
+ type = "T/T2/C/ISDB-T";
+ }
+
+ dev_info(&priv->i2c->dev,
+ "%s(): attaching %s DVB-%s frontend\n",
+ __func__, name, type);
dev_info(&priv->i2c->dev, "%s(): chip ID 0x%02x OK.\n",
__func__, chip_id);
return &priv->frontend;
@@ -2613,19 +3716,12 @@ struct dvb_frontend *cxd2841er_attach_s(struct cxd2841er_config *cfg,
}
EXPORT_SYMBOL(cxd2841er_attach_s);
-struct dvb_frontend *cxd2841er_attach_t(struct cxd2841er_config *cfg,
+struct dvb_frontend *cxd2841er_attach_t_c(struct cxd2841er_config *cfg,
struct i2c_adapter *i2c)
{
- return cxd2841er_attach(cfg, i2c, SYS_DVBT);
+ return cxd2841er_attach(cfg, i2c, 0);
}
-EXPORT_SYMBOL(cxd2841er_attach_t);
-
-struct dvb_frontend *cxd2841er_attach_c(struct cxd2841er_config *cfg,
- struct i2c_adapter *i2c)
-{
- return cxd2841er_attach(cfg, i2c, SYS_DVBC_ANNEX_A);
-}
-EXPORT_SYMBOL(cxd2841er_attach_c);
+EXPORT_SYMBOL(cxd2841er_attach_t_c);
static struct dvb_frontend_ops cxd2841er_dvbs_s2_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2 },
@@ -2655,10 +3751,10 @@ static struct dvb_frontend_ops cxd2841er_dvbs_s2_ops = {
.tune = cxd2841er_tune_s
};
-static struct dvb_frontend_ops cxd2841er_dvbt_t2_ops = {
- .delsys = { SYS_DVBT, SYS_DVBT2 },
+static struct dvb_frontend_ops cxd2841er_t_c_ops = {
+ .delsys = { SYS_DVBT, SYS_DVBT2, SYS_DVBC_ANNEX_A },
.info = {
- .name = "Sony CXD2841ER DVB-T/T2 demodulator",
+ .name = "", /* will set in attach function */
.caps = FE_CAN_FEC_1_2 |
FE_CAN_FEC_2_3 |
FE_CAN_FEC_3_4 |
@@ -2691,37 +3787,6 @@ static struct dvb_frontend_ops cxd2841er_dvbt_t2_ops = {
.get_frontend_algo = cxd2841er_get_algo
};
-static struct dvb_frontend_ops cxd2841er_dvbc_ops = {
- .delsys = { SYS_DVBC_ANNEX_A },
- .info = {
- .name = "Sony CXD2841ER DVB-C demodulator",
- .caps = FE_CAN_FEC_1_2 |
- FE_CAN_FEC_2_3 |
- FE_CAN_FEC_3_4 |
- FE_CAN_FEC_5_6 |
- FE_CAN_FEC_7_8 |
- FE_CAN_FEC_AUTO |
- FE_CAN_QAM_16 |
- FE_CAN_QAM_32 |
- FE_CAN_QAM_64 |
- FE_CAN_QAM_128 |
- FE_CAN_QAM_256 |
- FE_CAN_QAM_AUTO |
- FE_CAN_INVERSION_AUTO,
- .frequency_min = 42000000,
- .frequency_max = 1002000000
- },
- .init = cxd2841er_init_tc,
- .sleep = cxd2841er_sleep_tc,
- .release = cxd2841er_release,
- .set_frontend = cxd2841er_set_frontend_tc,
- .get_frontend = cxd2841er_get_frontend,
- .read_status = cxd2841er_read_status_tc,
- .tune = cxd2841er_tune_tc,
- .i2c_gate_ctrl = cxd2841er_i2c_gate_ctrl,
- .get_frontend_algo = cxd2841er_get_algo,
-};
-
-MODULE_DESCRIPTION("Sony CXD2841ER DVB-C/C2/T/T2/S/S2 demodulator driver");
-MODULE_AUTHOR("Sergey Kozlov <serjk@netup.ru>");
+MODULE_DESCRIPTION("Sony CXD2841ER/CXD2854ER DVB-C/C2/T/T2/S/S2 demodulator driver");
+MODULE_AUTHOR("Sergey Kozlov <serjk@netup.ru>, Abylay Ospan <aospan@netup.ru>");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/cxd2841er.h b/drivers/media/dvb-frontends/cxd2841er.h
index 3472bdd58..62ad5f073 100644
--- a/drivers/media/dvb-frontends/cxd2841er.h
+++ b/drivers/media/dvb-frontends/cxd2841er.h
@@ -25,41 +25,39 @@
#include <linux/kconfig.h>
#include <linux/dvb/frontend.h>
+enum cxd2841er_xtal {
+ SONY_XTAL_20500, /* 20.5 MHz */
+ SONY_XTAL_24000, /* 24 MHz */
+ SONY_XTAL_41000 /* 41 MHz */
+};
+
struct cxd2841er_config {
u8 i2c_addr;
+ enum cxd2841er_xtal xtal;
};
#if IS_REACHABLE(CONFIG_DVB_CXD2841ER)
extern struct dvb_frontend *cxd2841er_attach_s(struct cxd2841er_config *cfg,
struct i2c_adapter *i2c);
-extern struct dvb_frontend *cxd2841er_attach_t(struct cxd2841er_config *cfg,
- struct i2c_adapter *i2c);
-
-extern struct dvb_frontend *cxd2841er_attach_c(struct cxd2841er_config *cfg,
+extern struct dvb_frontend *cxd2841er_attach_t_c(struct cxd2841er_config *cfg,
struct i2c_adapter *i2c);
#else
static inline struct dvb_frontend *cxd2841er_attach_s(
struct cxd2841er_config *cfg,
struct i2c_adapter *i2c)
{
- printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ pr_warn("%s: driver disabled by Kconfig\n", __func__);
return NULL;
}
-static inline struct dvb_frontend *cxd2841er_attach_t(
+static inline struct dvb_frontend *cxd2841er_attach_t_c(
struct cxd2841er_config *cfg, struct i2c_adapter *i2c)
{
- printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ pr_warn("%s: driver disabled by Kconfig\n", __func__);
return NULL;
}
-static inline struct dvb_frontend *cxd2841er_attach_c(
- struct cxd2841er_config *cfg, struct i2c_adapter *i2c)
-{
- printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
- return NULL;
-}
#endif
#endif
diff --git a/drivers/media/dvb-frontends/cxd2841er_priv.h b/drivers/media/dvb-frontends/cxd2841er_priv.h
index 33e2f4952..0bbce4511 100644
--- a/drivers/media/dvb-frontends/cxd2841er_priv.h
+++ b/drivers/media/dvb-frontends/cxd2841er_priv.h
@@ -26,6 +26,7 @@
#define I2C_SLVT 1
#define CXD2841ER_CHIP_ID 0xa7
+#define CXD2854ER_CHIP_ID 0xc1
#define CXD2841ER_DVBS_POLLING_INVL 10
diff --git a/drivers/media/dvb-frontends/dib0090.c b/drivers/media/dvb-frontends/dib0090.c
index d879dc060..14c403254 100644
--- a/drivers/media/dvb-frontends/dib0090.c
+++ b/drivers/media/dvb-frontends/dib0090.c
@@ -797,6 +797,8 @@ static const u16 bb_ramp_pwm_normal[] = {
(0 << 9) | 400, /* BB_RAMP6 */
};
+#if 0
+/* Currently unused */
static const u16 bb_ramp_pwm_boost[] = {
550, /* max BB gain in 10th of dB */
8, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> BB_RAMP2 */
@@ -806,6 +808,7 @@ static const u16 bb_ramp_pwm_boost[] = {
(2 << 9) | 208, /* BB_RAMP5 = 29dB */
(0 << 9) | 440, /* BB_RAMP6 */
};
+#endif
static const u16 rf_ramp_pwm_cband[] = {
314, /* max RF gain in 10th of dB */
@@ -849,6 +852,8 @@ static const u16 rf_ramp_pwm_uhf[] = {
(0 << 10) | 580, /* GAIN_4_2, LNA 4 */
};
+#if 0
+/* Currently unused */
static const u16 rf_ramp_pwm_sband[] = {
253, /* max RF gain in 10th of dB */
38, /* ramp_slope = 1dB of gain -> clock_ticks_per_db = clk_khz / ramp_slope -> RF_RAMP2 */
@@ -862,6 +867,7 @@ static const u16 rf_ramp_pwm_sband[] = {
(0 << 10) | 0, /* GAIN_4_1, LNA 4 = 0dB */
(0 << 10) | 0, /* GAIN_4_2, LNA 4 */
};
+#endif
struct slope {
s16 range;
diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.c b/drivers/media/dvb-frontends/drx39xyj/drxj.c
index a45aee6a3..65a09c66b 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drxj.c
+++ b/drivers/media/dvb-frontends/drx39xyj/drxj.c
@@ -1240,12 +1240,15 @@ static u32 frac_times1e6(u32 N, u32 D)
* and rounded. For calc used formula: 16*10^(prescaleGain[dB]/20).
*
*/
+#if 0
+/* Currently, unused as we lack support for analog TV */
static const u16 nicam_presc_table_val[43] = {
1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4,
5, 5, 6, 6, 7, 8, 9, 10, 11, 13, 14, 16,
18, 20, 23, 25, 28, 32, 36, 40, 45,
51, 57, 64, 71, 80, 90, 101, 113, 127
};
+#endif
/*============================================================================*/
/*== END HELPER FUNCTIONS ==*/
diff --git a/drivers/media/dvb-frontends/ds3000.c b/drivers/media/dvb-frontends/ds3000.c
index cae93c393..c5505d98b 100644
--- a/drivers/media/dvb-frontends/ds3000.c
+++ b/drivers/media/dvb-frontends/ds3000.c
@@ -957,6 +957,15 @@ static int ds3000_set_frontend(struct dvb_frontend *fe)
/* enable ac coupling */
ds3000_writereg(state, 0x25, 0x8a);
+ if ((c->symbol_rate < ds3000_ops.info.symbol_rate_min) ||
+ (c->symbol_rate > ds3000_ops.info.symbol_rate_max)) {
+ dprintk("%s() symbol_rate %u out of range (%u ... %u)\n",
+ __func__, c->symbol_rate,
+ ds3000_ops.info.symbol_rate_min,
+ ds3000_ops.info.symbol_rate_max);
+ return -EINVAL;
+ }
+
/* enhance symbol rate performance */
if ((c->symbol_rate / 1000) <= 5000) {
value = 29777 / (c->symbol_rate / 1000) + 1;
diff --git a/drivers/media/dvb-frontends/helene.c b/drivers/media/dvb-frontends/helene.c
new file mode 100644
index 000000000..97a898274
--- /dev/null
+++ b/drivers/media/dvb-frontends/helene.c
@@ -0,0 +1,1042 @@
+/*
+ * helene.c
+ *
+ * Sony HELENE DVB-S/S2 DVB-T/T2 DVB-C/C2 ISDB-T/S tuner driver (CXD2858ER)
+ *
+ * Copyright 2012 Sony Corporation
+ * Copyright (C) 2014 NetUP Inc.
+ * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/dvb/frontend.h>
+#include <linux/types.h>
+#include "helene.h"
+#include "dvb_frontend.h"
+
+#define MAX_WRITE_REGSIZE 20
+
+enum helene_state {
+ STATE_UNKNOWN,
+ STATE_SLEEP,
+ STATE_ACTIVE
+};
+
+struct helene_priv {
+ u32 frequency;
+ u8 i2c_address;
+ struct i2c_adapter *i2c;
+ enum helene_state state;
+ void *set_tuner_data;
+ int (*set_tuner)(void *, int);
+ enum helene_xtal xtal;
+};
+
+#define TERR_INTERNAL_LOOPFILTER_AVAILABLE(tv_system) \
+ (((tv_system) != SONY_HELENE_DTV_DVBC_6) && \
+ ((tv_system) != SONY_HELENE_DTV_DVBC_8)\
+ && ((tv_system) != SONY_HELENE_DTV_DVBC2_6) && \
+ ((tv_system) != SONY_HELENE_DTV_DVBC2_8))
+
+#define HELENE_AUTO 0xff
+#define HELENE_OFFSET(ofs) ((u8)(ofs) & 0x1F)
+#define HELENE_BW_6 0x00
+#define HELENE_BW_7 0x01
+#define HELENE_BW_8 0x02
+#define HELENE_BW_1_7 0x03
+
+enum helene_tv_system_t {
+ SONY_HELENE_TV_SYSTEM_UNKNOWN,
+ /* Terrestrial Analog */
+ SONY_HELENE_ATV_MN_EIAJ,
+ /**< System-M (Japan) (IF: Fp=5.75MHz in default) */
+ SONY_HELENE_ATV_MN_SAP,
+ /**< System-M (US) (IF: Fp=5.75MHz in default) */
+ SONY_HELENE_ATV_MN_A2,
+ /**< System-M (Korea) (IF: Fp=5.9MHz in default) */
+ SONY_HELENE_ATV_BG,
+ /**< System-B/G (IF: Fp=7.3MHz in default) */
+ SONY_HELENE_ATV_I,
+ /**< System-I (IF: Fp=7.85MHz in default) */
+ SONY_HELENE_ATV_DK,
+ /**< System-D/K (IF: Fp=7.85MHz in default) */
+ SONY_HELENE_ATV_L,
+ /**< System-L (IF: Fp=7.85MHz in default) */
+ SONY_HELENE_ATV_L_DASH,
+ /**< System-L DASH (IF: Fp=2.2MHz in default) */
+ /* Terrestrial/Cable Digital */
+ SONY_HELENE_DTV_8VSB,
+ /**< ATSC 8VSB (IF: Fc=3.7MHz in default) */
+ SONY_HELENE_DTV_QAM,
+ /**< US QAM (IF: Fc=3.7MHz in default) */
+ SONY_HELENE_DTV_ISDBT_6,
+ /**< ISDB-T 6MHzBW (IF: Fc=3.55MHz in default) */
+ SONY_HELENE_DTV_ISDBT_7,
+ /**< ISDB-T 7MHzBW (IF: Fc=4.15MHz in default) */
+ SONY_HELENE_DTV_ISDBT_8,
+ /**< ISDB-T 8MHzBW (IF: Fc=4.75MHz in default) */
+ SONY_HELENE_DTV_DVBT_5,
+ /**< DVB-T 5MHzBW (IF: Fc=3.6MHz in default) */
+ SONY_HELENE_DTV_DVBT_6,
+ /**< DVB-T 6MHzBW (IF: Fc=3.6MHz in default) */
+ SONY_HELENE_DTV_DVBT_7,
+ /**< DVB-T 7MHzBW (IF: Fc=4.2MHz in default) */
+ SONY_HELENE_DTV_DVBT_8,
+ /**< DVB-T 8MHzBW (IF: Fc=4.8MHz in default) */
+ SONY_HELENE_DTV_DVBT2_1_7,
+ /**< DVB-T2 1.7MHzBW (IF: Fc=3.5MHz in default) */
+ SONY_HELENE_DTV_DVBT2_5,
+ /**< DVB-T2 5MHzBW (IF: Fc=3.6MHz in default) */
+ SONY_HELENE_DTV_DVBT2_6,
+ /**< DVB-T2 6MHzBW (IF: Fc=3.6MHz in default) */
+ SONY_HELENE_DTV_DVBT2_7,
+ /**< DVB-T2 7MHzBW (IF: Fc=4.2MHz in default) */
+ SONY_HELENE_DTV_DVBT2_8,
+ /**< DVB-T2 8MHzBW (IF: Fc=4.8MHz in default) */
+ SONY_HELENE_DTV_DVBC_6,
+ /**< DVB-C 6MHzBW (IF: Fc=3.7MHz in default) */
+ SONY_HELENE_DTV_DVBC_8,
+ /**< DVB-C 8MHzBW (IF: Fc=4.9MHz in default) */
+ SONY_HELENE_DTV_DVBC2_6,
+ /**< DVB-C2 6MHzBW (IF: Fc=3.7MHz in default) */
+ SONY_HELENE_DTV_DVBC2_8,
+ /**< DVB-C2 8MHzBW (IF: Fc=4.9MHz in default) */
+ SONY_HELENE_DTV_DTMB,
+ /**< DTMB (IF: Fc=5.1MHz in default) */
+ /* Satellite */
+ SONY_HELENE_STV_ISDBS,
+ /**< ISDB-S */
+ SONY_HELENE_STV_DVBS,
+ /**< DVB-S */
+ SONY_HELENE_STV_DVBS2,
+ /**< DVB-S2 */
+
+ SONY_HELENE_ATV_MIN = SONY_HELENE_ATV_MN_EIAJ,
+ /**< Minimum analog terrestrial system */
+ SONY_HELENE_ATV_MAX = SONY_HELENE_ATV_L_DASH,
+ /**< Maximum analog terrestrial system */
+ SONY_HELENE_DTV_MIN = SONY_HELENE_DTV_8VSB,
+ /**< Minimum digital terrestrial system */
+ SONY_HELENE_DTV_MAX = SONY_HELENE_DTV_DTMB,
+ /**< Maximum digital terrestrial system */
+ SONY_HELENE_TERR_TV_SYSTEM_NUM,
+ /**< Number of supported terrestrial broadcasting system */
+ SONY_HELENE_STV_MIN = SONY_HELENE_STV_ISDBS,
+ /**< Minimum satellite system */
+ SONY_HELENE_STV_MAX = SONY_HELENE_STV_DVBS2
+ /**< Maximum satellite system */
+};
+
+struct helene_terr_adjust_param_t {
+ /* < Addr:0x69 Bit[6:4] : RFVGA gain.
+ * 0xFF means Auto. (RF_GAIN_SEL = 1)
+ */
+ uint8_t RF_GAIN;
+ /* < Addr:0x69 Bit[3:0] : IF_BPF gain.
+ */
+ uint8_t IF_BPF_GC;
+ /* < Addr:0x6B Bit[3:0] : RF overload
+ * RF input detect level. (FRF <= 172MHz)
+ */
+ uint8_t RFOVLD_DET_LV1_VL;
+ /* < Addr:0x6B Bit[3:0] : RF overload
+ * RF input detect level. (172MHz < FRF <= 464MHz)
+ */
+ uint8_t RFOVLD_DET_LV1_VH;
+ /* < Addr:0x6B Bit[3:0] : RF overload
+ * RF input detect level. (FRF > 464MHz)
+ */
+ uint8_t RFOVLD_DET_LV1_U;
+ /* < Addr:0x6C Bit[2:0] :
+ * Internal RFAGC detect level. (FRF <= 172MHz)
+ */
+ uint8_t IFOVLD_DET_LV_VL;
+ /* < Addr:0x6C Bit[2:0] :
+ * Internal RFAGC detect level. (172MHz < FRF <= 464MHz)
+ */
+ uint8_t IFOVLD_DET_LV_VH;
+ /* < Addr:0x6C Bit[2:0] :
+ * Internal RFAGC detect level. (FRF > 464MHz)
+ */
+ uint8_t IFOVLD_DET_LV_U;
+ /* < Addr:0x6D Bit[5:4] :
+ * IF filter center offset.
+ */
+ uint8_t IF_BPF_F0;
+ /* < Addr:0x6D Bit[1:0] :
+ * 6MHzBW(0x00) or 7MHzBW(0x01)
+ * or 8MHzBW(0x02) or 1.7MHzBW(0x03)
+ */
+ uint8_t BW;
+ /* < Addr:0x6E Bit[4:0] :
+ * 5bit signed. IF offset (kHz) = FIF_OFFSET x 50
+ */
+ uint8_t FIF_OFFSET;
+ /* < Addr:0x6F Bit[4:0] :
+ * 5bit signed. BW offset (kHz) =
+ * BW_OFFSET x 50 (BW_OFFSET x 10 in 1.7MHzBW)
+ */
+ uint8_t BW_OFFSET;
+ /* < Addr:0x9C Bit[0] :
+ * Local polarity. (0: Upper Local, 1: Lower Local)
+ */
+ uint8_t IS_LOWERLOCAL;
+};
+
+static const struct helene_terr_adjust_param_t
+terr_params[SONY_HELENE_TERR_TV_SYSTEM_NUM] = {
+ /*< SONY_HELENE_TV_SYSTEM_UNKNOWN */
+ {HELENE_AUTO, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ HELENE_BW_6, HELENE_OFFSET(0), HELENE_OFFSET(0), 0x00},
+ /* Analog */
+ /**< SONY_HELENE_ATV_MN_EIAJ (System-M (Japan)) */
+ {HELENE_AUTO, 0x05, 0x03, 0x06, 0x03, 0x01, 0x01, 0x01, 0x00,
+ HELENE_BW_6, HELENE_OFFSET(0), HELENE_OFFSET(1), 0x00},
+ /**< SONY_HELENE_ATV_MN_SAP (System-M (US)) */
+ {HELENE_AUTO, 0x05, 0x03, 0x06, 0x03, 0x01, 0x01, 0x01, 0x00,
+ HELENE_BW_6, HELENE_OFFSET(0), HELENE_OFFSET(1), 0x00},
+ {HELENE_AUTO, 0x05, 0x03, 0x06, 0x03, 0x01, 0x01, 0x01, 0x00,
+ HELENE_BW_6, HELENE_OFFSET(3), HELENE_OFFSET(1), 0x00},
+ /**< SONY_HELENE_ATV_MN_A2 (System-M (Korea)) */
+ {HELENE_AUTO, 0x05, 0x03, 0x06, 0x03, 0x01, 0x01, 0x01, 0x00,
+ HELENE_BW_7, HELENE_OFFSET(11), HELENE_OFFSET(5), 0x00},
+ /**< SONY_HELENE_ATV_BG (System-B/G) */
+ {HELENE_AUTO, 0x05, 0x03, 0x06, 0x03, 0x01, 0x01, 0x01, 0x00,
+ HELENE_BW_8, HELENE_OFFSET(2), HELENE_OFFSET(-3), 0x00},
+ /**< SONY_HELENE_ATV_I (System-I) */
+ {HELENE_AUTO, 0x05, 0x03, 0x06, 0x03, 0x01, 0x01, 0x01, 0x00,
+ HELENE_BW_8, HELENE_OFFSET(2), HELENE_OFFSET(-3), 0x00},
+ /**< SONY_HELENE_ATV_DK (System-D/K) */
+ {HELENE_AUTO, 0x03, 0x04, 0x0A, 0x04, 0x04, 0x04, 0x04, 0x00,
+ HELENE_BW_8, HELENE_OFFSET(2), HELENE_OFFSET(-3), 0x00},
+ /**< SONY_HELENE_ATV_L (System-L) */
+ {HELENE_AUTO, 0x03, 0x04, 0x0A, 0x04, 0x04, 0x04, 0x04, 0x00,
+ HELENE_BW_8, HELENE_OFFSET(-1), HELENE_OFFSET(4), 0x00},
+ /**< SONY_HELENE_ATV_L_DASH (System-L DASH) */
+ /* Digital */
+ {HELENE_AUTO, 0x09, 0x0B, 0x0B, 0x0B, 0x03, 0x03, 0x03, 0x00,
+ HELENE_BW_6, HELENE_OFFSET(-6), HELENE_OFFSET(-3), 0x00},
+ /**< SONY_HELENE_DTV_8VSB (ATSC 8VSB) */
+ {HELENE_AUTO, 0x09, 0x0B, 0x0B, 0x0B, 0x02, 0x02, 0x02, 0x00,
+ HELENE_BW_6, HELENE_OFFSET(-6), HELENE_OFFSET(-3), 0x00},
+ /**< SONY_HELENE_DTV_QAM (US QAM) */
+ {HELENE_AUTO, 0x09, 0x0B, 0x0B, 0x0B, 0x02, 0x02, 0x02, 0x00,
+ HELENE_BW_6, HELENE_OFFSET(-9), HELENE_OFFSET(-5), 0x00},
+ /**< SONY_HELENE_DTV_ISDBT_6 (ISDB-T 6MHzBW) */
+ {HELENE_AUTO, 0x09, 0x0B, 0x0B, 0x0B, 0x02, 0x02, 0x02, 0x00,
+ HELENE_BW_7, HELENE_OFFSET(-7), HELENE_OFFSET(-6), 0x00},
+ /**< SONY_HELENE_DTV_ISDBT_7 (ISDB-T 7MHzBW) */
+ {HELENE_AUTO, 0x09, 0x0B, 0x0B, 0x0B, 0x02, 0x02, 0x02, 0x00,
+ HELENE_BW_8, HELENE_OFFSET(-5), HELENE_OFFSET(-7), 0x00},
+ /**< SONY_HELENE_DTV_ISDBT_8 (ISDB-T 8MHzBW) */
+ {HELENE_AUTO, 0x09, 0x0B, 0x0B, 0x0B, 0x02, 0x02, 0x02, 0x00,
+ HELENE_BW_6, HELENE_OFFSET(-8), HELENE_OFFSET(-3), 0x00},
+ /**< SONY_HELENE_DTV_DVBT_5 (DVB-T 5MHzBW) */
+ {HELENE_AUTO, 0x09, 0x0B, 0x0B, 0x0B, 0x02, 0x02, 0x02, 0x00,
+ HELENE_BW_6, HELENE_OFFSET(-8), HELENE_OFFSET(-3), 0x00},
+ /**< SONY_HELENE_DTV_DVBT_6 (DVB-T 6MHzBW) */
+ {HELENE_AUTO, 0x09, 0x0B, 0x0B, 0x0B, 0x02, 0x02, 0x02, 0x00,
+ HELENE_BW_7, HELENE_OFFSET(-6), HELENE_OFFSET(-5), 0x00},
+ /**< SONY_HELENE_DTV_DVBT_7 (DVB-T 7MHzBW) */
+ {HELENE_AUTO, 0x09, 0x0B, 0x0B, 0x0B, 0x02, 0x02, 0x02, 0x00,
+ HELENE_BW_8, HELENE_OFFSET(-4), HELENE_OFFSET(-6), 0x00},
+ /**< SONY_HELENE_DTV_DVBT_8 (DVB-T 8MHzBW) */
+ {HELENE_AUTO, 0x09, 0x0B, 0x0B, 0x0B, 0x02, 0x02, 0x02, 0x00,
+ HELENE_BW_1_7, HELENE_OFFSET(-10), HELENE_OFFSET(-10), 0x00},
+ /**< SONY_HELENE_DTV_DVBT2_1_7 (DVB-T2 1.7MHzBW) */
+ {HELENE_AUTO, 0x09, 0x0B, 0x0B, 0x0B, 0x02, 0x02, 0x02, 0x00,
+ HELENE_BW_6, HELENE_OFFSET(-8), HELENE_OFFSET(-3), 0x00},
+ /**< SONY_HELENE_DTV_DVBT2_5 (DVB-T2 5MHzBW) */
+ {HELENE_AUTO, 0x09, 0x0B, 0x0B, 0x0B, 0x02, 0x02, 0x02, 0x00,
+ HELENE_BW_6, HELENE_OFFSET(-8), HELENE_OFFSET(-3), 0x00},
+ /**< SONY_HELENE_DTV_DVBT2_6 (DVB-T2 6MHzBW) */
+ {HELENE_AUTO, 0x09, 0x0B, 0x0B, 0x0B, 0x02, 0x02, 0x02, 0x00,
+ HELENE_BW_7, HELENE_OFFSET(-6), HELENE_OFFSET(-5), 0x00},
+ /**< SONY_HELENE_DTV_DVBT2_7 (DVB-T2 7MHzBW) */
+ {HELENE_AUTO, 0x09, 0x0B, 0x0B, 0x0B, 0x02, 0x02, 0x02, 0x00,
+ HELENE_BW_8, HELENE_OFFSET(-4), HELENE_OFFSET(-6), 0x00},
+ /**< SONY_HELENE_DTV_DVBT2_8 (DVB-T2 8MHzBW) */
+ {HELENE_AUTO, 0x05, 0x02, 0x02, 0x02, 0x01, 0x01, 0x01, 0x00,
+ HELENE_BW_6, HELENE_OFFSET(-6), HELENE_OFFSET(-4), 0x00},
+ /**< SONY_HELENE_DTV_DVBC_6 (DVB-C 6MHzBW) */
+ {HELENE_AUTO, 0x05, 0x02, 0x02, 0x02, 0x01, 0x01, 0x01, 0x00,
+ HELENE_BW_8, HELENE_OFFSET(-2), HELENE_OFFSET(-3), 0x00},
+ /**< SONY_HELENE_DTV_DVBC_8 (DVB-C 8MHzBW) */
+ {HELENE_AUTO, 0x03, 0x09, 0x09, 0x09, 0x02, 0x02, 0x02, 0x00,
+ HELENE_BW_6, HELENE_OFFSET(-6), HELENE_OFFSET(-2), 0x00},
+ /**< SONY_HELENE_DTV_DVBC2_6 (DVB-C2 6MHzBW) */
+ {HELENE_AUTO, 0x03, 0x09, 0x09, 0x09, 0x02, 0x02, 0x02, 0x00,
+ HELENE_BW_8, HELENE_OFFSET(-2), HELENE_OFFSET(0), 0x00},
+ /**< SONY_HELENE_DTV_DVBC2_8 (DVB-C2 8MHzBW) */
+ {HELENE_AUTO, 0x04, 0x0B, 0x0B, 0x0B, 0x02, 0x02, 0x02, 0x00,
+ HELENE_BW_8, HELENE_OFFSET(2), HELENE_OFFSET(1), 0x00}
+ /**< SONY_HELENE_DTV_DTMB (DTMB) */
+};
+
+static void helene_i2c_debug(struct helene_priv *priv,
+ u8 reg, u8 write, const u8 *data, u32 len)
+{
+ dev_dbg(&priv->i2c->dev, "helene: I2C %s reg 0x%02x size %d\n",
+ (write == 0 ? "read" : "write"), reg, len);
+ print_hex_dump_bytes("helene: I2C data: ",
+ DUMP_PREFIX_OFFSET, data, len);
+}
+
+static int helene_write_regs(struct helene_priv *priv,
+ u8 reg, const u8 *data, u32 len)
+{
+ int ret;
+ u8 buf[MAX_WRITE_REGSIZE + 1];
+ struct i2c_msg msg[1] = {
+ {
+ .addr = priv->i2c_address,
+ .flags = 0,
+ .len = len + 1,
+ .buf = buf,
+ }
+ };
+
+ if (len + 1 > sizeof(buf)) {
+ dev_warn(&priv->i2c->dev,
+ "wr reg=%04x: len=%d vs %Zu is too big!\n",
+ reg, len + 1, sizeof(buf));
+ return -E2BIG;
+ }
+
+ helene_i2c_debug(priv, reg, 1, data, len);
+ buf[0] = reg;
+ memcpy(&buf[1], data, len);
+ ret = i2c_transfer(priv->i2c, msg, 1);
+ if (ret >= 0 && ret != 1)
+ ret = -EREMOTEIO;
+ if (ret < 0) {
+ dev_warn(&priv->i2c->dev,
+ "%s: i2c wr failed=%d reg=%02x len=%d\n",
+ KBUILD_MODNAME, ret, reg, len);
+ return ret;
+ }
+ return 0;
+}
+
+static int helene_write_reg(struct helene_priv *priv, u8 reg, u8 val)
+{
+ return helene_write_regs(priv, reg, &val, 1);
+}
+
+static int helene_read_regs(struct helene_priv *priv,
+ u8 reg, u8 *val, u32 len)
+{
+ int ret;
+ struct i2c_msg msg[2] = {
+ {
+ .addr = priv->i2c_address,
+ .flags = 0,
+ .len = 1,
+ .buf = &reg,
+ }, {
+ .addr = priv->i2c_address,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = val,
+ }
+ };
+
+ ret = i2c_transfer(priv->i2c, &msg[0], 1);
+ if (ret >= 0 && ret != 1)
+ ret = -EREMOTEIO;
+ if (ret < 0) {
+ dev_warn(&priv->i2c->dev,
+ "%s: I2C rw failed=%d addr=%02x reg=%02x\n",
+ KBUILD_MODNAME, ret, priv->i2c_address, reg);
+ return ret;
+ }
+ ret = i2c_transfer(priv->i2c, &msg[1], 1);
+ if (ret >= 0 && ret != 1)
+ ret = -EREMOTEIO;
+ if (ret < 0) {
+ dev_warn(&priv->i2c->dev,
+ "%s: i2c rd failed=%d addr=%02x reg=%02x\n",
+ KBUILD_MODNAME, ret, priv->i2c_address, reg);
+ return ret;
+ }
+ helene_i2c_debug(priv, reg, 0, val, len);
+ return 0;
+}
+
+static int helene_read_reg(struct helene_priv *priv, u8 reg, u8 *val)
+{
+ return helene_read_regs(priv, reg, val, 1);
+}
+
+static int helene_set_reg_bits(struct helene_priv *priv,
+ u8 reg, u8 data, u8 mask)
+{
+ int res;
+ u8 rdata;
+
+ if (mask != 0xff) {
+ res = helene_read_reg(priv, reg, &rdata);
+ if (res != 0)
+ return res;
+ data = ((data & mask) | (rdata & (mask ^ 0xFF)));
+ }
+ return helene_write_reg(priv, reg, data);
+}
+
+static int helene_enter_power_save(struct helene_priv *priv)
+{
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ if (priv->state == STATE_SLEEP)
+ return 0;
+
+ /* Standby setting for CPU */
+ helene_write_reg(priv, 0x88, 0x0);
+
+ /* Standby setting for internal logic block */
+ helene_write_reg(priv, 0x87, 0xC0);
+
+ priv->state = STATE_SLEEP;
+ return 0;
+}
+
+static int helene_leave_power_save(struct helene_priv *priv)
+{
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ if (priv->state == STATE_ACTIVE)
+ return 0;
+
+ /* Standby setting for internal logic block */
+ helene_write_reg(priv, 0x87, 0xC4);
+
+ /* Standby setting for CPU */
+ helene_write_reg(priv, 0x88, 0x40);
+
+ priv->state = STATE_ACTIVE;
+ return 0;
+}
+
+static int helene_init(struct dvb_frontend *fe)
+{
+ struct helene_priv *priv = fe->tuner_priv;
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ return helene_leave_power_save(priv);
+}
+
+static int helene_release(struct dvb_frontend *fe)
+{
+ struct helene_priv *priv = fe->tuner_priv;
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ kfree(fe->tuner_priv);
+ fe->tuner_priv = NULL;
+ return 0;
+}
+
+static int helene_sleep(struct dvb_frontend *fe)
+{
+ struct helene_priv *priv = fe->tuner_priv;
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+ helene_enter_power_save(priv);
+ return 0;
+}
+
+static enum helene_tv_system_t helene_get_tv_system(struct dvb_frontend *fe)
+{
+ enum helene_tv_system_t system = SONY_HELENE_TV_SYSTEM_UNKNOWN;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ struct helene_priv *priv = fe->tuner_priv;
+
+ if (p->delivery_system == SYS_DVBT) {
+ if (p->bandwidth_hz <= 5000000)
+ system = SONY_HELENE_DTV_DVBT_5;
+ else if (p->bandwidth_hz <= 6000000)
+ system = SONY_HELENE_DTV_DVBT_6;
+ else if (p->bandwidth_hz <= 7000000)
+ system = SONY_HELENE_DTV_DVBT_7;
+ else if (p->bandwidth_hz <= 8000000)
+ system = SONY_HELENE_DTV_DVBT_8;
+ else {
+ system = SONY_HELENE_DTV_DVBT_8;
+ p->bandwidth_hz = 8000000;
+ }
+ } else if (p->delivery_system == SYS_DVBT2) {
+ if (p->bandwidth_hz <= 5000000)
+ system = SONY_HELENE_DTV_DVBT2_5;
+ else if (p->bandwidth_hz <= 6000000)
+ system = SONY_HELENE_DTV_DVBT2_6;
+ else if (p->bandwidth_hz <= 7000000)
+ system = SONY_HELENE_DTV_DVBT2_7;
+ else if (p->bandwidth_hz <= 8000000)
+ system = SONY_HELENE_DTV_DVBT2_8;
+ else {
+ system = SONY_HELENE_DTV_DVBT2_8;
+ p->bandwidth_hz = 8000000;
+ }
+ } else if (p->delivery_system == SYS_DVBS) {
+ system = SONY_HELENE_STV_DVBS;
+ } else if (p->delivery_system == SYS_DVBS2) {
+ system = SONY_HELENE_STV_DVBS2;
+ } else if (p->delivery_system == SYS_ISDBS) {
+ system = SONY_HELENE_STV_ISDBS;
+ } else if (p->delivery_system == SYS_ISDBT) {
+ if (p->bandwidth_hz <= 6000000)
+ system = SONY_HELENE_DTV_ISDBT_6;
+ else if (p->bandwidth_hz <= 7000000)
+ system = SONY_HELENE_DTV_ISDBT_7;
+ else if (p->bandwidth_hz <= 8000000)
+ system = SONY_HELENE_DTV_ISDBT_8;
+ else {
+ system = SONY_HELENE_DTV_ISDBT_8;
+ p->bandwidth_hz = 8000000;
+ }
+ } else if (p->delivery_system == SYS_DVBC_ANNEX_A) {
+ if (p->bandwidth_hz <= 6000000)
+ system = SONY_HELENE_DTV_DVBC_6;
+ else if (p->bandwidth_hz <= 8000000)
+ system = SONY_HELENE_DTV_DVBC_8;
+ }
+ dev_dbg(&priv->i2c->dev,
+ "%s(): HELENE DTV system %d (delsys %d, bandwidth %d)\n",
+ __func__, (int)system, p->delivery_system,
+ p->bandwidth_hz);
+ return system;
+}
+
+static int helene_set_params_s(struct dvb_frontend *fe)
+{
+ u8 data[MAX_WRITE_REGSIZE];
+ u32 frequency;
+ enum helene_tv_system_t tv_system;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ struct helene_priv *priv = fe->tuner_priv;
+ int frequencykHz = p->frequency;
+ uint32_t frequency4kHz = 0;
+ u32 symbol_rate = p->symbol_rate/1000;
+
+ dev_dbg(&priv->i2c->dev, "%s(): tune frequency %dkHz sr=%uKsps\n",
+ __func__, frequencykHz, symbol_rate);
+ tv_system = helene_get_tv_system(fe);
+
+ if (tv_system == SONY_HELENE_TV_SYSTEM_UNKNOWN) {
+ dev_err(&priv->i2c->dev, "%s(): unknown DTV system\n",
+ __func__);
+ return -EINVAL;
+ }
+ /* RF switch turn to satellite */
+ if (priv->set_tuner)
+ priv->set_tuner(priv->set_tuner_data, 0);
+ frequency = roundup(p->frequency / 1000, 1);
+
+ /* Disable IF signal output */
+ helene_write_reg(priv, 0x15, 0x02);
+
+ /* RFIN matching in power save (Sat) reset */
+ helene_write_reg(priv, 0x43, 0x06);
+
+ /* Analog block setting (0x6A, 0x6B) */
+ data[0] = 0x00;
+ data[1] = 0x00;
+ helene_write_regs(priv, 0x6A, data, 2);
+ helene_write_reg(priv, 0x75, 0x99);
+ helene_write_reg(priv, 0x9D, 0x00);
+
+ /* Tuning setting for CPU (0x61) */
+ helene_write_reg(priv, 0x61, 0x07);
+
+ /* Satellite mode select (0x01) */
+ helene_write_reg(priv, 0x01, 0x01);
+
+ /* Clock enable for internal logic block, CPU wake-up (0x04, 0x05) */
+ data[0] = 0xC4;
+ data[1] = 0x40;
+
+ switch (priv->xtal) {
+ case SONY_HELENE_XTAL_16000:
+ data[2] = 0x02;
+ break;
+ case SONY_HELENE_XTAL_20500:
+ data[2] = 0x02;
+ break;
+ case SONY_HELENE_XTAL_24000:
+ data[2] = 0x03;
+ break;
+ case SONY_HELENE_XTAL_41000:
+ data[2] = 0x05;
+ break;
+ default:
+ dev_err(&priv->i2c->dev, "%s(): unknown xtal %d\n",
+ __func__, priv->xtal);
+ return -EINVAL;
+ }
+
+ /* Setting for analog block (0x07). LOOPFILTER INTERNAL */
+ data[3] = 0x80;
+
+ /* Tuning setting for analog block
+ * (0x08, 0x09, 0x0A, 0x0B). LOOPFILTER INTERNAL
+ */
+ if (priv->xtal == SONY_HELENE_XTAL_20500)
+ data[4] = 0x58;
+ else
+ data[4] = 0x70;
+
+ data[5] = 0x1E;
+ data[6] = 0x02;
+ data[7] = 0x24;
+
+ /* Enable for analog block (0x0C, 0x0D, 0x0E). SAT LNA ON */
+ data[8] = 0x0F;
+ data[8] |= 0xE0; /* POWERSAVE_TERR_RF_ACTIVE */
+ data[9] = 0x02;
+ data[10] = 0x1E;
+
+ /* Setting for LPF cutoff frequency (0x0F) */
+ switch (tv_system) {
+ case SONY_HELENE_STV_ISDBS:
+ data[11] = 0x22; /* 22MHz */
+ break;
+ case SONY_HELENE_STV_DVBS:
+ if (symbol_rate <= 4000)
+ data[11] = 0x05;
+ else if (symbol_rate <= 10000)
+ data[11] = (uint8_t)((symbol_rate * 47
+ + (40000-1)) / 40000);
+ else
+ data[11] = (uint8_t)((symbol_rate * 27
+ + (40000-1)) / 40000 + 5);
+
+ if (data[11] > 36)
+ data[11] = 36; /* 5 <= lpf_cutoff <= 36 is valid */
+ break;
+ case SONY_HELENE_STV_DVBS2:
+ if (symbol_rate <= 4000)
+ data[11] = 0x05;
+ else if (symbol_rate <= 10000)
+ data[11] = (uint8_t)((symbol_rate * 11
+ + (10000-1)) / 10000);
+ else
+ data[11] = (uint8_t)((symbol_rate * 3
+ + (5000-1)) / 5000 + 5);
+
+ if (data[11] > 36)
+ data[11] = 36; /* 5 <= lpf_cutoff <= 36 is valid */
+ break;
+ default:
+ dev_err(&priv->i2c->dev, "%s(): unknown standard %d\n",
+ __func__, tv_system);
+ return -EINVAL;
+ }
+
+ /* RF tuning frequency setting (0x10, 0x11, 0x12) */
+ frequency4kHz = (frequencykHz + 2) / 4;
+ data[12] = (uint8_t)(frequency4kHz & 0xFF); /* FRF_L */
+ data[13] = (uint8_t)((frequency4kHz >> 8) & 0xFF); /* FRF_M */
+ /* FRF_H (bit[3:0]) */
+ data[14] = (uint8_t)((frequency4kHz >> 16) & 0x0F);
+
+ /* Tuning command (0x13) */
+ data[15] = 0xFF;
+
+ /* Setting for IQOUT_LIMIT (0x14) 0.75Vpp */
+ data[16] = 0x00;
+
+ /* Enable IQ output (0x15) */
+ data[17] = 0x01;
+
+ helene_write_regs(priv, 0x04, data, 18);
+
+ dev_dbg(&priv->i2c->dev, "%s(): tune done\n",
+ __func__);
+
+ priv->frequency = frequency;
+ return 0;
+}
+
+static int helene_set_params(struct dvb_frontend *fe)
+{
+ u8 data[MAX_WRITE_REGSIZE];
+ u32 frequency;
+ enum helene_tv_system_t tv_system;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ struct helene_priv *priv = fe->tuner_priv;
+ int frequencykHz = p->frequency / 1000;
+
+ dev_dbg(&priv->i2c->dev, "%s(): tune frequency %dkHz\n",
+ __func__, frequencykHz);
+ tv_system = helene_get_tv_system(fe);
+
+ if (tv_system == SONY_HELENE_TV_SYSTEM_UNKNOWN) {
+ dev_dbg(&priv->i2c->dev, "%s(): unknown DTV system\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (priv->set_tuner)
+ priv->set_tuner(priv->set_tuner_data, 1);
+ frequency = roundup(p->frequency / 1000, 25);
+
+ /* mode select */
+ helene_write_reg(priv, 0x01, 0x00);
+
+ /* Disable IF signal output */
+ helene_write_reg(priv, 0x74, 0x02);
+
+ if (priv->state == STATE_SLEEP)
+ helene_leave_power_save(priv);
+
+ /* Initial setting for internal analog block (0x91, 0x92) */
+ if ((tv_system == SONY_HELENE_DTV_DVBC_6) ||
+ (tv_system == SONY_HELENE_DTV_DVBC_8)) {
+ data[0] = 0x16;
+ data[1] = 0x26;
+ } else {
+ data[0] = 0x10;
+ data[1] = 0x20;
+ }
+ helene_write_regs(priv, 0x91, data, 2);
+
+ /* Setting for analog block */
+ if (TERR_INTERNAL_LOOPFILTER_AVAILABLE(tv_system))
+ data[0] = 0x90;
+ else
+ data[0] = 0x00;
+
+ /* Setting for local polarity (0x9D) */
+ data[1] = (uint8_t)(terr_params[tv_system].IS_LOWERLOCAL & 0x01);
+ helene_write_regs(priv, 0x9C, data, 2);
+
+ /* Enable for analog block */
+ data[0] = 0xEE;
+ data[1] = 0x02;
+ data[2] = 0x1E;
+ data[3] = 0x67; /* Tuning setting for CPU */
+
+ /* Setting for PLL reference divider for xtal=24MHz */
+ if ((tv_system == SONY_HELENE_DTV_DVBC_6) ||
+ (tv_system == SONY_HELENE_DTV_DVBC_8))
+ data[4] = 0x18;
+ else
+ data[4] = 0x03;
+
+ /* Tuning setting for analog block */
+ if (TERR_INTERNAL_LOOPFILTER_AVAILABLE(tv_system)) {
+ data[5] = 0x38;
+ data[6] = 0x1E;
+ data[7] = 0x02;
+ data[8] = 0x24;
+ } else if ((tv_system == SONY_HELENE_DTV_DVBC_6) ||
+ (tv_system == SONY_HELENE_DTV_DVBC_8)) {
+ data[5] = 0x1C;
+ data[6] = 0x78;
+ data[7] = 0x08;
+ data[8] = 0x1C;
+ } else {
+ data[5] = 0xB4;
+ data[6] = 0x78;
+ data[7] = 0x08;
+ data[8] = 0x30;
+ }
+ helene_write_regs(priv, 0x5E, data, 9);
+
+ /* LT_AMP_EN should be 0 */
+ helene_set_reg_bits(priv, 0x67, 0x0, 0x02);
+
+ /* Setting for IFOUT_LIMIT */
+ data[0] = 0x00; /* 1.5Vpp */
+
+ /* RF_GAIN setting */
+ if (terr_params[tv_system].RF_GAIN == HELENE_AUTO)
+ data[1] = 0x80; /* RF_GAIN_SEL = 1 */
+ else
+ data[1] = (uint8_t)((terr_params[tv_system].RF_GAIN
+ << 4) & 0x70);
+
+ /* IF_BPF_GC setting */
+ data[1] |= (uint8_t)(terr_params[tv_system].IF_BPF_GC & 0x0F);
+
+ /* Setting for internal RFAGC (0x6A, 0x6B, 0x6C) */
+ data[2] = 0x00;
+ if (frequencykHz <= 172000) {
+ data[3] = (uint8_t)(terr_params[tv_system].RFOVLD_DET_LV1_VL
+ & 0x0F);
+ data[4] = (uint8_t)(terr_params[tv_system].IFOVLD_DET_LV_VL
+ & 0x07);
+ } else if (frequencykHz <= 464000) {
+ data[3] = (uint8_t)(terr_params[tv_system].RFOVLD_DET_LV1_VH
+ & 0x0F);
+ data[4] = (uint8_t)(terr_params[tv_system].IFOVLD_DET_LV_VH
+ & 0x07);
+ } else {
+ data[3] = (uint8_t)(terr_params[tv_system].RFOVLD_DET_LV1_U
+ & 0x0F);
+ data[4] = (uint8_t)(terr_params[tv_system].IFOVLD_DET_LV_U
+ & 0x07);
+ }
+ data[4] |= 0x20;
+
+ /* Setting for IF frequency and bandwidth */
+
+ /* IF filter center frequency offset (IF_BPF_F0) (0x6D) */
+ data[5] = (uint8_t)((terr_params[tv_system].IF_BPF_F0 << 4) & 0x30);
+
+ /* IF filter band width (BW) (0x6D) */
+ data[5] |= (uint8_t)(terr_params[tv_system].BW & 0x03);
+
+ /* IF frequency offset value (FIF_OFFSET) (0x6E) */
+ data[6] = (uint8_t)(terr_params[tv_system].FIF_OFFSET & 0x1F);
+
+ /* IF band width offset value (BW_OFFSET) (0x6F) */
+ data[7] = (uint8_t)(terr_params[tv_system].BW_OFFSET & 0x1F);
+
+ /* RF tuning frequency setting (0x70, 0x71, 0x72) */
+ data[8] = (uint8_t)(frequencykHz & 0xFF); /* FRF_L */
+ data[9] = (uint8_t)((frequencykHz >> 8) & 0xFF); /* FRF_M */
+ data[10] = (uint8_t)((frequencykHz >> 16)
+ & 0x0F); /* FRF_H (bit[3:0]) */
+
+ /* Tuning command */
+ data[11] = 0xFF;
+
+ /* Enable IF output, AGC and IFOUT pin selection (0x74) */
+ data[12] = 0x01;
+
+ if ((tv_system == SONY_HELENE_DTV_DVBC_6) ||
+ (tv_system == SONY_HELENE_DTV_DVBC_8)) {
+ data[13] = 0xD9;
+ data[14] = 0x0F;
+ data[15] = 0x24;
+ data[16] = 0x87;
+ } else {
+ data[13] = 0x99;
+ data[14] = 0x00;
+ data[15] = 0x24;
+ data[16] = 0x87;
+ }
+
+ helene_write_regs(priv, 0x68, data, 17);
+
+ dev_dbg(&priv->i2c->dev, "%s(): tune done\n",
+ __func__);
+
+ priv->frequency = frequency;
+ return 0;
+}
+
+static int helene_get_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+ struct helene_priv *priv = fe->tuner_priv;
+
+ *frequency = priv->frequency * 1000;
+ return 0;
+}
+
+static struct dvb_tuner_ops helene_tuner_ops = {
+ .info = {
+ .name = "Sony HELENE Ter tuner",
+ .frequency_min = 1000000,
+ .frequency_max = 1200000000,
+ .frequency_step = 25000,
+ },
+ .init = helene_init,
+ .release = helene_release,
+ .sleep = helene_sleep,
+ .set_params = helene_set_params,
+ .get_frequency = helene_get_frequency,
+};
+
+static struct dvb_tuner_ops helene_tuner_ops_s = {
+ .info = {
+ .name = "Sony HELENE Sat tuner",
+ .frequency_min = 500000,
+ .frequency_max = 2500000,
+ .frequency_step = 1000,
+ },
+ .init = helene_init,
+ .release = helene_release,
+ .sleep = helene_sleep,
+ .set_params = helene_set_params_s,
+ .get_frequency = helene_get_frequency,
+};
+
+/* power-on tuner
+ * call once after reset
+ */
+static int helene_x_pon(struct helene_priv *priv)
+{
+ /* RFIN matching in power save (terrestrial) = ACTIVE */
+ /* RFIN matching in power save (satellite) = ACTIVE */
+ u8 dataT[] = { 0x06, 0x00, 0x02, 0x00 };
+ /* SAT_RF_ACTIVE = true, lnaOff = false, terrRfActive = true */
+ u8 dataS[] = { 0x05, 0x06 };
+ u8 cdata[] = {0x7A, 0x01};
+ u8 data[20];
+ u8 rdata[2];
+
+ /* mode select */
+ helene_write_reg(priv, 0x01, 0x00);
+
+ helene_write_reg(priv, 0x67, dataT[3]);
+ helene_write_reg(priv, 0x43, dataS[1]);
+ helene_write_regs(priv, 0x5E, dataT, 3);
+ helene_write_reg(priv, 0x0C, dataS[0]);
+
+ /* Initial setting for internal logic block */
+ helene_write_regs(priv, 0x99, cdata, sizeof(cdata));
+
+ /* 0x81 - 0x94 */
+ data[0] = 0x18; /* xtal 24 MHz */
+ data[1] = (uint8_t)(0x80 | (0x04 & 0x1F)); /* 4 x 25 = 100uA */
+ data[2] = (uint8_t)(0x80 | (0x26 & 0x7F)); /* 38 x 0.25 = 9.5pF */
+ data[3] = 0x80; /* REFOUT signal output 500mVpp */
+ data[4] = 0x00; /* GPIO settings */
+ data[5] = 0x00; /* GPIO settings */
+ data[6] = 0xC4; /* Clock enable for internal logic block */
+ data[7] = 0x40; /* Start CPU boot-up */
+ data[8] = 0x10; /* For burst-write */
+
+ /* Setting for internal RFAGC */
+ data[9] = 0x00;
+ data[10] = 0x45;
+ data[11] = 0x75;
+
+ data[12] = 0x07; /* Setting for analog block */
+
+ /* Initial setting for internal analog block */
+ data[13] = 0x1C;
+ data[14] = 0x3F;
+ data[15] = 0x02;
+ data[16] = 0x10;
+ data[17] = 0x20;
+ data[18] = 0x0A;
+ data[19] = 0x00;
+
+ helene_write_regs(priv, 0x81, data, sizeof(data));
+
+ /* Setting for internal RFAGC */
+ helene_write_reg(priv, 0x9B, 0x00);
+
+ msleep(20);
+
+ /* Check CPU_STT/CPU_ERR */
+ helene_read_regs(priv, 0x1A, rdata, sizeof(rdata));
+
+ if (rdata[0] != 0x00) {
+ dev_err(&priv->i2c->dev,
+ "HELENE tuner CPU error 0x%x\n", rdata[0]);
+ return -EIO;
+ }
+
+ /* VCO current setting */
+ cdata[0] = 0x90;
+ cdata[1] = 0x06;
+ helene_write_regs(priv, 0x17, cdata, sizeof(cdata));
+ msleep(20);
+ helene_read_reg(priv, 0x19, data);
+ helene_write_reg(priv, 0x95, (uint8_t)((data[0] >> 4) & 0x0F));
+
+ /* Disable IF signal output */
+ helene_write_reg(priv, 0x74, 0x02);
+
+ /* Standby setting for CPU */
+ helene_write_reg(priv, 0x88, 0x00);
+
+ /* Standby setting for internal logic block */
+ helene_write_reg(priv, 0x87, 0xC0);
+
+ /* Load capacitance control setting for crystal oscillator */
+ helene_write_reg(priv, 0x80, 0x01);
+
+ /* Satellite initial setting */
+ cdata[0] = 0x07;
+ cdata[1] = 0x00;
+ helene_write_regs(priv, 0x41, cdata, sizeof(cdata));
+
+ dev_info(&priv->i2c->dev,
+ "HELENE tuner x_pon done\n");
+
+ return 0;
+}
+
+struct dvb_frontend *helene_attach_s(struct dvb_frontend *fe,
+ const struct helene_config *config,
+ struct i2c_adapter *i2c)
+{
+ struct helene_priv *priv = NULL;
+
+ priv = kzalloc(sizeof(struct helene_priv), GFP_KERNEL);
+ if (priv == NULL)
+ return NULL;
+ priv->i2c_address = (config->i2c_address >> 1);
+ priv->i2c = i2c;
+ priv->set_tuner_data = config->set_tuner_priv;
+ priv->set_tuner = config->set_tuner_callback;
+ priv->xtal = config->xtal;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+
+ if (helene_x_pon(priv) != 0)
+ return NULL;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+
+ memcpy(&fe->ops.tuner_ops, &helene_tuner_ops_s,
+ sizeof(struct dvb_tuner_ops));
+ fe->tuner_priv = priv;
+ dev_info(&priv->i2c->dev,
+ "Sony HELENE Sat attached on addr=%x at I2C adapter %p\n",
+ priv->i2c_address, priv->i2c);
+ return fe;
+}
+EXPORT_SYMBOL(helene_attach_s);
+
+struct dvb_frontend *helene_attach(struct dvb_frontend *fe,
+ const struct helene_config *config,
+ struct i2c_adapter *i2c)
+{
+ struct helene_priv *priv = NULL;
+
+ priv = kzalloc(sizeof(struct helene_priv), GFP_KERNEL);
+ if (priv == NULL)
+ return NULL;
+ priv->i2c_address = (config->i2c_address >> 1);
+ priv->i2c = i2c;
+ priv->set_tuner_data = config->set_tuner_priv;
+ priv->set_tuner = config->set_tuner_callback;
+ priv->xtal = config->xtal;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+
+ if (helene_x_pon(priv) != 0)
+ return NULL;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+
+ memcpy(&fe->ops.tuner_ops, &helene_tuner_ops,
+ sizeof(struct dvb_tuner_ops));
+ fe->tuner_priv = priv;
+ dev_info(&priv->i2c->dev,
+ "Sony HELENE Ter attached on addr=%x at I2C adapter %p\n",
+ priv->i2c_address, priv->i2c);
+ return fe;
+}
+EXPORT_SYMBOL(helene_attach);
+
+MODULE_DESCRIPTION("Sony HELENE Sat/Ter tuner driver");
+MODULE_AUTHOR("Abylay Ospan <aospan@netup.ru>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/helene.h b/drivers/media/dvb-frontends/helene.h
new file mode 100644
index 000000000..e1b9224cf
--- /dev/null
+++ b/drivers/media/dvb-frontends/helene.h
@@ -0,0 +1,79 @@
+/*
+ * helene.h
+ *
+ * Sony HELENE DVB-S/S2/T/T2/C/C2/ISDB-T/S tuner driver (CXD2858ER)
+ *
+ * Copyright 2012 Sony Corporation
+ * Copyright (C) 2014 NetUP Inc.
+ * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DVB_HELENE_H__
+#define __DVB_HELENE_H__
+
+#include <linux/kconfig.h>
+#include <linux/dvb/frontend.h>
+#include <linux/i2c.h>
+
+enum helene_xtal {
+ SONY_HELENE_XTAL_16000, /* 16 MHz */
+ SONY_HELENE_XTAL_20500, /* 20.5 MHz */
+ SONY_HELENE_XTAL_24000, /* 24 MHz */
+ SONY_HELENE_XTAL_41000 /* 41 MHz */
+};
+
+/**
+ * struct helene_config - the configuration of 'Helene' tuner driver
+ * @i2c_address: I2C address of the tuner
+ * @xtal_freq_mhz: Oscillator frequency, MHz
+ * @set_tuner_priv: Callback function private context
+ * @set_tuner_callback: Callback function that notifies the parent driver
+ * which tuner is active now
+ */
+struct helene_config {
+ u8 i2c_address;
+ u8 xtal_freq_mhz;
+ void *set_tuner_priv;
+ int (*set_tuner_callback)(void *, int);
+ enum helene_xtal xtal;
+};
+
+#if IS_REACHABLE(CONFIG_DVB_HELENE)
+extern struct dvb_frontend *helene_attach(struct dvb_frontend *fe,
+ const struct helene_config *config,
+ struct i2c_adapter *i2c);
+#else
+static inline struct dvb_frontend *helene_attach(struct dvb_frontend *fe,
+ const struct helene_config *config,
+ struct i2c_adapter *i2c)
+{
+ pr_warn("%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif
+
+#if IS_REACHABLE(CONFIG_DVB_HELENE)
+extern struct dvb_frontend *helene_attach_s(struct dvb_frontend *fe,
+ const struct helene_config *config,
+ struct i2c_adapter *i2c);
+#else
+static inline struct dvb_frontend *helene_attach_s(struct dvb_frontend *fe,
+ const struct helene_config *config,
+ struct i2c_adapter *i2c)
+{
+ pr_warn("%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif
+
+#endif
diff --git a/drivers/media/dvb-frontends/horus3a.c b/drivers/media/dvb-frontends/horus3a.c
index 000606af7..a98bca527 100644
--- a/drivers/media/dvb-frontends/horus3a.c
+++ b/drivers/media/dvb-frontends/horus3a.c
@@ -66,7 +66,7 @@ static int horus3a_write_regs(struct horus3a_priv *priv,
}
};
- if (len + 1 >= sizeof(buf)) {
+ if (len + 1 > sizeof(buf)) {
dev_warn(&priv->i2c->dev,"wr reg=%04x: len=%d is too big!\n",
reg, len + 1);
return -E2BIG;
@@ -272,24 +272,6 @@ static int horus3a_set_params(struct dvb_frontend *fe)
if (fc_lpf > 36)
fc_lpf = 36;
} else if (p->delivery_system == SYS_DVBS2) {
- int rolloff;
-
- switch (p->rolloff) {
- case ROLLOFF_35:
- rolloff = 35;
- break;
- case ROLLOFF_25:
- rolloff = 25;
- break;
- case ROLLOFF_20:
- rolloff = 20;
- break;
- case ROLLOFF_AUTO:
- default:
- dev_err(&priv->i2c->dev,
- "horus3a: auto roll-off is not supported\n");
- return -EINVAL;
- }
/*
* SR <= 4.5:
* fc_lpf = 5
@@ -302,11 +284,9 @@ static int horus3a_set_params(struct dvb_frontend *fe)
if (symbol_rate <= 4500)
fc_lpf = 5;
else if (symbol_rate <= 10000)
- fc_lpf = (u8)DIV_ROUND_UP(
- symbol_rate * (200 + rolloff), 200000);
+ fc_lpf = (u8)((symbol_rate * 11 + (10000-1)) / 10000);
else
- fc_lpf = (u8)DIV_ROUND_UP(
- symbol_rate * (100 + rolloff), 200000) + 5;
+ fc_lpf = (u8)((symbol_rate * 3 + (5000-1)) / 5000 + 5);
/* 5 <= fc_lpf <= 36 is valid */
if (fc_lpf > 36)
fc_lpf = 36;
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
index 1c312302c..3c1420d3f 100644
--- a/drivers/media/dvb-frontends/m88ds3103.c
+++ b/drivers/media/dvb-frontends/m88ds3103.c
@@ -306,8 +306,8 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
const struct m88ds3103_reg_val *init;
u8 u8tmp, u8tmp1 = 0, u8tmp2 = 0; /* silence compiler warning */
u8 buf[3];
- u16 u16tmp, divide_ratio = 0;
- u32 tuner_frequency, target_mclk;
+ u16 u16tmp;
+ u32 tuner_frequency_khz, target_mclk;
s32 s32tmp;
dev_dbg(&client->dev,
@@ -344,7 +344,7 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
}
if (fe->ops.tuner_ops.get_frequency) {
- ret = fe->ops.tuner_ops.get_frequency(fe, &tuner_frequency);
+ ret = fe->ops.tuner_ops.get_frequency(fe, &tuner_frequency_khz);
if (ret)
goto err;
} else {
@@ -353,20 +353,20 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
* actual frequency used. Carrier offset calculation is not
* valid.
*/
- tuner_frequency = c->frequency;
+ tuner_frequency_khz = c->frequency;
}
/* select M88RS6000 demod main mclk and ts mclk from tuner die. */
if (dev->chip_id == M88RS6000_CHIP_ID) {
if (c->symbol_rate > 45010000)
- dev->mclk_khz = 110250;
+ dev->mclk = 110250000;
else
- dev->mclk_khz = 96000;
+ dev->mclk = 96000000;
if (c->delivery_system == SYS_DVBS)
- target_mclk = 96000;
+ target_mclk = 96000000;
else
- target_mclk = 144000;
+ target_mclk = 144000000;
/* Enable demod clock path */
ret = regmap_write(dev->regmap, 0x06, 0x00);
@@ -375,7 +375,7 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
usleep_range(10000, 20000);
} else {
/* set M88DS3103 mclk and ts mclk. */
- dev->mclk_khz = 96000;
+ dev->mclk = 96000000;
switch (dev->cfg->ts_mode) {
case M88DS3103_TS_SERIAL:
@@ -385,14 +385,14 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
case M88DS3103_TS_PARALLEL:
case M88DS3103_TS_CI:
if (c->delivery_system == SYS_DVBS)
- target_mclk = 96000;
+ target_mclk = 96000000;
else {
if (c->symbol_rate < 18000000)
- target_mclk = 96000;
+ target_mclk = 96000000;
else if (c->symbol_rate < 28000000)
- target_mclk = 144000;
+ target_mclk = 144000000;
else
- target_mclk = 192000;
+ target_mclk = 192000000;
}
break;
default:
@@ -402,15 +402,15 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
}
switch (target_mclk) {
- case 96000:
+ case 96000000:
u8tmp1 = 0x02; /* 0b10 */
u8tmp2 = 0x01; /* 0b01 */
break;
- case 144000:
+ case 144000000:
u8tmp1 = 0x00; /* 0b00 */
u8tmp2 = 0x01; /* 0b01 */
break;
- case 192000:
+ case 192000000:
u8tmp1 = 0x03; /* 0b11 */
u8tmp2 = 0x00; /* 0b00 */
break;
@@ -464,8 +464,8 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
}
if (dev->chip_id == M88RS6000_CHIP_ID) {
- if ((c->delivery_system == SYS_DVBS2)
- && ((c->symbol_rate / 1000) <= 5000)) {
+ if (c->delivery_system == SYS_DVBS2 &&
+ c->symbol_rate <= 5000000) {
ret = regmap_write(dev->regmap, 0xc0, 0x04);
if (ret)
goto err;
@@ -522,37 +522,25 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
ret = m88ds3103_update_bits(dev, 0x29, 0x20, u8tmp1);
if (ret)
goto err;
- u8tmp1 = 0;
- u8tmp2 = 0;
+ u16tmp = 0;
+ u8tmp1 = 0x3f;
+ u8tmp2 = 0x3f;
break;
default:
- if (dev->cfg->ts_clk) {
- divide_ratio = DIV_ROUND_UP(target_mclk, dev->cfg->ts_clk);
- u8tmp1 = divide_ratio / 2;
- u8tmp2 = DIV_ROUND_UP(divide_ratio, 2);
- }
+ u16tmp = DIV_ROUND_UP(target_mclk, dev->cfg->ts_clk);
+ u8tmp1 = u16tmp / 2 - 1;
+ u8tmp2 = DIV_ROUND_UP(u16tmp, 2) - 1;
}
- dev_dbg(&client->dev,
- "target_mclk=%d ts_clk=%d divide_ratio=%d\n",
- target_mclk, dev->cfg->ts_clk, divide_ratio);
+ dev_dbg(&client->dev, "target_mclk=%u ts_clk=%u ts_clk_divide_ratio=%u\n",
+ target_mclk, dev->cfg->ts_clk, u16tmp);
- u8tmp1--;
- u8tmp2--;
/* u8tmp1[5:2] => fe[3:0], u8tmp1[1:0] => ea[7:6] */
- u8tmp1 &= 0x3f;
/* u8tmp2[5:0] => ea[5:0] */
- u8tmp2 &= 0x3f;
-
- ret = regmap_bulk_read(dev->regmap, 0xfe, &u8tmp, 1);
+ u8tmp = (u8tmp1 >> 2) & 0x0f;
+ ret = regmap_update_bits(dev->regmap, 0xfe, 0x0f, u8tmp);
if (ret)
goto err;
-
- u8tmp = ((u8tmp & 0xf0) << 0) | u8tmp1 >> 2;
- ret = regmap_write(dev->regmap, 0xfe, u8tmp);
- if (ret)
- goto err;
-
u8tmp = ((u8tmp1 & 0x03) << 6) | u8tmp2 >> 0;
ret = regmap_write(dev->regmap, 0xea, u8tmp);
if (ret)
@@ -581,7 +569,7 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
if (ret)
goto err;
- u16tmp = DIV_ROUND_CLOSEST((c->symbol_rate / 1000) << 15, dev->mclk_khz / 2);
+ u16tmp = DIV_ROUND_CLOSEST_ULL((u64)c->symbol_rate * 0x10000, dev->mclk);
buf[0] = (u16tmp >> 0) & 0xff;
buf[1] = (u16tmp >> 8) & 0xff;
ret = regmap_bulk_write(dev->regmap, 0x61, buf, 2);
@@ -601,13 +589,11 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
goto err;
dev_dbg(&client->dev, "carrier offset=%d\n",
- (tuner_frequency - c->frequency));
-
- s32tmp = 0x10000 * (tuner_frequency - c->frequency);
- s32tmp = DIV_ROUND_CLOSEST(s32tmp, dev->mclk_khz);
- if (s32tmp < 0)
- s32tmp += 0x10000;
+ (tuner_frequency_khz - c->frequency));
+ /* Use 32-bit calc as there is no s64 version of DIV_ROUND_CLOSEST() */
+ s32tmp = 0x10000 * (tuner_frequency_khz - c->frequency);
+ s32tmp = DIV_ROUND_CLOSEST(s32tmp, dev->mclk / 1000);
buf[0] = (s32tmp >> 0) & 0xff;
buf[1] = (s32tmp >> 8) & 0xff;
ret = regmap_bulk_write(dev->regmap, 0x5e, buf, 2);
@@ -635,10 +621,10 @@ static int m88ds3103_init(struct dvb_frontend *fe)
struct m88ds3103_dev *dev = fe->demodulator_priv;
struct i2c_client *client = dev->client;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- int ret, len, remaining;
+ int ret, len, rem;
unsigned int utmp;
- const struct firmware *fw = NULL;
- u8 *fw_file;
+ const struct firmware *firmware;
+ const char *name;
dev_dbg(&client->dev, "\n");
@@ -664,7 +650,7 @@ static int m88ds3103_init(struct dvb_frontend *fe)
dev_dbg(&client->dev, "firmware=%02x\n", utmp);
if (utmp)
- goto skip_fw_download;
+ goto warm;
/* global reset, global diseqc reset, golbal fec reset */
ret = regmap_write(dev->regmap, 0x07, 0xe0);
@@ -679,52 +665,47 @@ static int m88ds3103_init(struct dvb_frontend *fe)
m88ds3103_ops.info.name);
if (dev->chip_id == M88RS6000_CHIP_ID)
- fw_file = M88RS6000_FIRMWARE;
+ name = M88RS6000_FIRMWARE;
else
- fw_file = M88DS3103_FIRMWARE;
+ name = M88DS3103_FIRMWARE;
/* request the firmware, this will block and timeout */
- ret = reject_firmware(&fw, fw_file, &client->dev);
+ ret = reject_firmware(&firmware, name, &client->dev);
if (ret) {
- dev_err(&client->dev, "firmware file '%s' not found\n", fw_file);
+ dev_err(&client->dev, "firmware file '%s' not found\n", name);
goto err;
}
- dev_info(&client->dev, "downloading firmware from file '%s'\n",
- fw_file);
+ dev_info(&client->dev, "downloading firmware from file '%s'\n", name);
ret = regmap_write(dev->regmap, 0xb2, 0x01);
if (ret)
- goto error_fw_release;
-
- for (remaining = fw->size; remaining > 0;
- remaining -= (dev->cfg->i2c_wr_max - 1)) {
- len = remaining;
- if (len > (dev->cfg->i2c_wr_max - 1))
- len = (dev->cfg->i2c_wr_max - 1);
+ goto err_release_firmware;
+ for (rem = firmware->size; rem > 0; rem -= (dev->cfg->i2c_wr_max - 1)) {
+ len = min(dev->cfg->i2c_wr_max - 1, rem);
ret = regmap_bulk_write(dev->regmap, 0xb0,
- &fw->data[fw->size - remaining], len);
+ &firmware->data[firmware->size - rem],
+ len);
if (ret) {
- dev_err(&client->dev, "firmware download failed=%d\n",
+ dev_err(&client->dev, "firmware download failed %d\n",
ret);
- goto error_fw_release;
+ goto err_release_firmware;
}
}
ret = regmap_write(dev->regmap, 0xb2, 0x00);
if (ret)
- goto error_fw_release;
+ goto err_release_firmware;
- release_firmware(fw);
- fw = NULL;
+ release_firmware(firmware);
ret = regmap_read(dev->regmap, 0xb9, &utmp);
if (ret)
goto err;
if (!utmp) {
+ ret = -EINVAL;
dev_info(&client->dev, "firmware did not run\n");
- ret = -EFAULT;
goto err;
}
@@ -733,7 +714,7 @@ static int m88ds3103_init(struct dvb_frontend *fe)
dev_info(&client->dev, "firmware version: %X.%X\n",
(utmp >> 4) & 0xf, (utmp >> 0 & 0xf));
-skip_fw_download:
+warm:
/* warm state */
dev->warm = true;
@@ -746,8 +727,8 @@ skip_fw_download:
c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
return 0;
-error_fw_release:
- release_firmware(fw);
+err_release_firmware:
+ release_firmware(firmware);
err:
dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
@@ -952,8 +933,7 @@ static int m88ds3103_get_frontend(struct dvb_frontend *fe,
if (ret)
goto err;
- c->symbol_rate = 1ull * ((buf[1] << 8) | (buf[0] << 0)) *
- dev->mclk_khz * 1000 / 0x10000;
+ c->symbol_rate = DIV_ROUND_CLOSEST_ULL((u64)(buf[1] << 8 | buf[0] << 0) * dev->mclk, 0x10000);
return 0;
err:
@@ -1119,8 +1099,9 @@ static int m88ds3103_diseqc_send_master_cmd(struct dvb_frontend *fe,
#define SEND_MASTER_CMD_TIMEOUT 120
timeout = jiffies + msecs_to_jiffies(SEND_MASTER_CMD_TIMEOUT);
- /* DiSEqC message typical period is 54 ms */
- usleep_range(50000, 54000);
+ /* DiSEqC message period is 13.5 ms per byte */
+ utmp = diseqc_cmd->msg_len * 13500;
+ usleep_range(utmp - 4000, utmp);
for (utmp = 1; !time_after(jiffies, timeout) && utmp;) {
ret = regmap_read(dev->regmap, 0xa1, &utmp);
@@ -1395,7 +1376,7 @@ static int m88ds3103_probe(struct i2c_client *client,
dev->config.clock = pdata->clk;
dev->config.i2c_wr_max = pdata->i2c_wr_max;
dev->config.ts_mode = pdata->ts_mode;
- dev->config.ts_clk = pdata->ts_clk;
+ dev->config.ts_clk = pdata->ts_clk * 1000;
dev->config.ts_clk_pol = pdata->ts_clk_pol;
dev->config.spec_inv = pdata->spec_inv;
dev->config.agc_inv = pdata->agc_inv;
@@ -1446,6 +1427,11 @@ static int m88ds3103_probe(struct i2c_client *client,
goto err_kfree;
}
+ if (!pdata->ts_clk) {
+ ret = -EINVAL;
+ goto err_kfree;
+ }
+
/* 0x29 register is defined differently for m88rs6000. */
/* set internal tuner address to 0x21 */
if (dev->chip_id == M88RS6000_CHIP_ID)
diff --git a/drivers/media/dvb-frontends/m88ds3103_priv.h b/drivers/media/dvb-frontends/m88ds3103_priv.h
index fcf3a7b95..1447cde25 100644
--- a/drivers/media/dvb-frontends/m88ds3103_priv.h
+++ b/drivers/media/dvb-frontends/m88ds3103_priv.h
@@ -27,7 +27,6 @@
#define M88DS3103_FIRMWARE "/*(DEBLOBBED)*/"
#define M88RS6000_FIRMWARE "/*(DEBLOBBED)*/"
-#define M88DS3103_MCLK_KHZ 96000
#define M88RS6000_CHIP_ID 0x74
#define M88DS3103_CHIP_ID 0x70
@@ -46,7 +45,7 @@ struct m88ds3103_dev {
/* auto detect chip id to do different config */
u8 chip_id;
/* main mclk is calculated for M88RS6000 dynamically */
- s32 mclk_khz;
+ s32 mclk;
u64 post_bit_error;
u64 post_bit_count;
};
diff --git a/drivers/media/dvb-frontends/m88rs2000.c b/drivers/media/dvb-frontends/m88rs2000.c
index a09b12313..ef79a4ec3 100644
--- a/drivers/media/dvb-frontends/m88rs2000.c
+++ b/drivers/media/dvb-frontends/m88rs2000.c
@@ -609,7 +609,7 @@ static int m88rs2000_set_frontend(struct dvb_frontend *fe)
{
struct m88rs2000_state *state = fe->demodulator_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- enum fe_status status;
+ enum fe_status status = 0;
int i, ret = 0;
u32 tuner_freq;
s16 offset = 0;
diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
index fb88dddaf..41325328a 100644
--- a/drivers/media/dvb-frontends/mb86a20s.c
+++ b/drivers/media/dvb-frontends/mb86a20s.c
@@ -301,10 +301,11 @@ static int mb86a20s_read_status(struct dvb_frontend *fe, enum fe_status *status)
*status = 0;
- val = mb86a20s_readreg(state, 0x0a) & 0xf;
+ val = mb86a20s_readreg(state, 0x0a);
if (val < 0)
return val;
+ val &= 0xf;
if (val >= 2)
*status |= FE_HAS_SIGNAL;
diff --git a/drivers/media/dvb-frontends/mn88472.c b/drivers/media/dvb-frontends/mn88472.c
new file mode 100644
index 000000000..150560f80
--- /dev/null
+++ b/drivers/media/dvb-frontends/mn88472.c
@@ -0,0 +1,613 @@
+/*
+ * Panasonic MN88472 DVB-T/T2/C demodulator driver
+ *
+ * Copyright (C) 2013 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mn88472_priv.h"
+
+static int mn88472_get_tune_settings(struct dvb_frontend *fe,
+ struct dvb_frontend_tune_settings *s)
+{
+ s->min_delay_ms = 1000;
+ return 0;
+}
+
+static int mn88472_read_status(struct dvb_frontend *fe, enum fe_status *status)
+{
+ struct i2c_client *client = fe->demodulator_priv;
+ struct mn88472_dev *dev = i2c_get_clientdata(client);
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int ret;
+ unsigned int utmp;
+
+ if (!dev->active) {
+ ret = -EAGAIN;
+ goto err;
+ }
+
+ switch (c->delivery_system) {
+ case SYS_DVBT:
+ ret = regmap_read(dev->regmap[0], 0x7f, &utmp);
+ if (ret)
+ goto err;
+ if ((utmp & 0x0f) >= 0x09)
+ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
+ FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
+ else
+ *status = 0;
+ break;
+ case SYS_DVBT2:
+ ret = regmap_read(dev->regmap[2], 0x92, &utmp);
+ if (ret)
+ goto err;
+ if ((utmp & 0x0f) >= 0x0d)
+ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
+ FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
+ else if ((utmp & 0x0f) >= 0x0a)
+ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
+ FE_HAS_VITERBI;
+ else if ((utmp & 0x0f) >= 0x07)
+ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER;
+ else
+ *status = 0;
+ break;
+ case SYS_DVBC_ANNEX_A:
+ ret = regmap_read(dev->regmap[1], 0x84, &utmp);
+ if (ret)
+ goto err;
+ if ((utmp & 0x0f) >= 0x08)
+ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
+ FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
+ else
+ *status = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err;
+ }
+
+ return 0;
+err:
+ dev_dbg(&client->dev, "failed=%d\n", ret);
+ return ret;
+}
+
+static int mn88472_set_frontend(struct dvb_frontend *fe)
+{
+ struct i2c_client *client = fe->demodulator_priv;
+ struct mn88472_dev *dev = i2c_get_clientdata(client);
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int ret, i;
+ unsigned int utmp;
+ u32 if_frequency;
+ u8 buf[3], delivery_system_val, bandwidth_val, *bandwidth_vals_ptr;
+ u8 reg_bank0_b4_val, reg_bank0_cd_val, reg_bank0_d4_val;
+ u8 reg_bank0_d6_val;
+
+ dev_dbg(&client->dev,
+ "delivery_system=%u modulation=%u frequency=%u bandwidth_hz=%u symbol_rate=%u inversion=%d stream_id=%d\n",
+ c->delivery_system, c->modulation, c->frequency,
+ c->bandwidth_hz, c->symbol_rate, c->inversion, c->stream_id);
+
+ if (!dev->active) {
+ ret = -EAGAIN;
+ goto err;
+ }
+
+ switch (c->delivery_system) {
+ case SYS_DVBT:
+ delivery_system_val = 0x02;
+ reg_bank0_b4_val = 0x00;
+ reg_bank0_cd_val = 0x1f;
+ reg_bank0_d4_val = 0x0a;
+ reg_bank0_d6_val = 0x48;
+ break;
+ case SYS_DVBT2:
+ delivery_system_val = 0x03;
+ reg_bank0_b4_val = 0xf6;
+ reg_bank0_cd_val = 0x01;
+ reg_bank0_d4_val = 0x09;
+ reg_bank0_d6_val = 0x46;
+ break;
+ case SYS_DVBC_ANNEX_A:
+ delivery_system_val = 0x04;
+ reg_bank0_b4_val = 0x00;
+ reg_bank0_cd_val = 0x17;
+ reg_bank0_d4_val = 0x09;
+ reg_bank0_d6_val = 0x48;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err;
+ }
+
+ switch (c->delivery_system) {
+ case SYS_DVBT:
+ case SYS_DVBT2:
+ switch (c->bandwidth_hz) {
+ case 5000000:
+ bandwidth_vals_ptr = "\xe5\x99\x9a\x1b\xa9\x1b\xa9";
+ bandwidth_val = 0x03;
+ break;
+ case 6000000:
+ bandwidth_vals_ptr = "\xbf\x55\x55\x15\x6b\x15\x6b";
+ bandwidth_val = 0x02;
+ break;
+ case 7000000:
+ bandwidth_vals_ptr = "\xa4\x00\x00\x0f\x2c\x0f\x2c";
+ bandwidth_val = 0x01;
+ break;
+ case 8000000:
+ bandwidth_vals_ptr = "\x8f\x80\x00\x08\xee\x08\xee";
+ bandwidth_val = 0x00;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err;
+ }
+ break;
+ case SYS_DVBC_ANNEX_A:
+ bandwidth_vals_ptr = NULL;
+ bandwidth_val = 0x00;
+ break;
+ default:
+ break;
+ }
+
+ /* Program tuner */
+ if (fe->ops.tuner_ops.set_params) {
+ ret = fe->ops.tuner_ops.set_params(fe);
+ if (ret)
+ goto err;
+ }
+
+ if (fe->ops.tuner_ops.get_if_frequency) {
+ ret = fe->ops.tuner_ops.get_if_frequency(fe, &if_frequency);
+ if (ret)
+ goto err;
+
+ dev_dbg(&client->dev, "get_if_frequency=%d\n", if_frequency);
+ } else {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = regmap_write(dev->regmap[2], 0x00, 0x66);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap[2], 0x01, 0x00);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap[2], 0x02, 0x01);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap[2], 0x03, delivery_system_val);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap[2], 0x04, bandwidth_val);
+ if (ret)
+ goto err;
+
+ /* IF */
+ utmp = DIV_ROUND_CLOSEST_ULL((u64)if_frequency * 0x1000000, dev->clk);
+ buf[0] = (utmp >> 16) & 0xff;
+ buf[1] = (utmp >> 8) & 0xff;
+ buf[2] = (utmp >> 0) & 0xff;
+ for (i = 0; i < 3; i++) {
+ ret = regmap_write(dev->regmap[2], 0x10 + i, buf[i]);
+ if (ret)
+ goto err;
+ }
+
+ /* Bandwidth */
+ if (bandwidth_vals_ptr) {
+ for (i = 0; i < 7; i++) {
+ ret = regmap_write(dev->regmap[2], 0x13 + i,
+ bandwidth_vals_ptr[i]);
+ if (ret)
+ goto err;
+ }
+ }
+
+ ret = regmap_write(dev->regmap[0], 0xb4, reg_bank0_b4_val);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap[0], 0xcd, reg_bank0_cd_val);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap[0], 0xd4, reg_bank0_d4_val);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap[0], 0xd6, reg_bank0_d6_val);
+ if (ret)
+ goto err;
+
+ switch (c->delivery_system) {
+ case SYS_DVBT:
+ ret = regmap_write(dev->regmap[0], 0x07, 0x26);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap[0], 0x00, 0xba);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap[0], 0x01, 0x13);
+ if (ret)
+ goto err;
+ break;
+ case SYS_DVBT2:
+ ret = regmap_write(dev->regmap[2], 0x2b, 0x13);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap[2], 0x4f, 0x05);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap[1], 0xf6, 0x05);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap[2], 0x32, c->stream_id);
+ if (ret)
+ goto err;
+ break;
+ case SYS_DVBC_ANNEX_A:
+ break;
+ default:
+ break;
+ }
+
+ /* Reset FSM */
+ ret = regmap_write(dev->regmap[2], 0xf8, 0x9f);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ dev_dbg(&client->dev, "failed=%d\n", ret);
+ return ret;
+}
+
+static int mn88472_init(struct dvb_frontend *fe)
+{
+ struct i2c_client *client = fe->demodulator_priv;
+ struct mn88472_dev *dev = i2c_get_clientdata(client);
+ int ret, len, rem;
+ unsigned int utmp;
+ const struct firmware *firmware;
+ const char *name = MN88472_FIRMWARE;
+
+ dev_dbg(&client->dev, "\n");
+
+ /* Power up */
+ ret = regmap_write(dev->regmap[2], 0x05, 0x00);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap[2], 0x0b, 0x00);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap[2], 0x0c, 0x00);
+ if (ret)
+ goto err;
+
+ /* Check if firmware is already running */
+ ret = regmap_read(dev->regmap[0], 0xf5, &utmp);
+ if (ret)
+ goto err;
+ if (!(utmp & 0x01))
+ goto warm;
+
+ ret = reject_firmware(&firmware, name, &client->dev);
+ if (ret) {
+ dev_err(&client->dev, "firmware file '%s' not found\n", name);
+ goto err;
+ }
+
+ dev_info(&client->dev, "downloading firmware from file '%s'\n", name);
+
+ ret = regmap_write(dev->regmap[0], 0xf5, 0x03);
+ if (ret)
+ goto err_release_firmware;
+
+ for (rem = firmware->size; rem > 0; rem -= (dev->i2c_write_max - 1)) {
+ len = min(dev->i2c_write_max - 1, rem);
+ ret = regmap_bulk_write(dev->regmap[0], 0xf6,
+ &firmware->data[firmware->size - rem],
+ len);
+ if (ret) {
+ dev_err(&client->dev, "firmware download failed %d\n",
+ ret);
+ goto err_release_firmware;
+ }
+ }
+
+ /* Parity check of firmware */
+ ret = regmap_read(dev->regmap[0], 0xf8, &utmp);
+ if (ret)
+ goto err_release_firmware;
+ if (utmp & 0x10) {
+ ret = -EINVAL;
+ dev_err(&client->dev, "firmware did not run\n");
+ goto err_release_firmware;
+ }
+
+ ret = regmap_write(dev->regmap[0], 0xf5, 0x00);
+ if (ret)
+ goto err_release_firmware;
+
+ release_firmware(firmware);
+warm:
+ /* TS config */
+ switch (dev->ts_mode) {
+ case SERIAL_TS_MODE:
+ utmp = 0x1d;
+ break;
+ case PARALLEL_TS_MODE:
+ utmp = 0x00;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err;
+ }
+ ret = regmap_write(dev->regmap[2], 0x08, utmp);
+ if (ret)
+ goto err;
+
+ switch (dev->ts_clk) {
+ case VARIABLE_TS_CLOCK:
+ utmp = 0xe3;
+ break;
+ case FIXED_TS_CLOCK:
+ utmp = 0xe1;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err;
+ }
+ ret = regmap_write(dev->regmap[0], 0xd9, utmp);
+ if (ret)
+ goto err;
+
+ dev->active = true;
+
+ return 0;
+err_release_firmware:
+ release_firmware(firmware);
+err:
+ dev_dbg(&client->dev, "failed=%d\n", ret);
+ return ret;
+}
+
+static int mn88472_sleep(struct dvb_frontend *fe)
+{
+ struct i2c_client *client = fe->demodulator_priv;
+ struct mn88472_dev *dev = i2c_get_clientdata(client);
+ int ret;
+
+ dev_dbg(&client->dev, "\n");
+
+ /* Power down */
+ ret = regmap_write(dev->regmap[2], 0x0c, 0x30);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap[2], 0x0b, 0x30);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap[2], 0x05, 0x3e);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ dev_dbg(&client->dev, "failed=%d\n", ret);
+ return ret;
+}
+
+static struct dvb_frontend_ops mn88472_ops = {
+ .delsys = {SYS_DVBT, SYS_DVBT2, SYS_DVBC_ANNEX_A},
+ .info = {
+ .name = "Panasonic MN88472",
+ .symbol_rate_min = 1000000,
+ .symbol_rate_max = 7200000,
+ .caps = FE_CAN_FEC_1_2 |
+ FE_CAN_FEC_2_3 |
+ FE_CAN_FEC_3_4 |
+ FE_CAN_FEC_5_6 |
+ FE_CAN_FEC_7_8 |
+ FE_CAN_FEC_AUTO |
+ FE_CAN_QPSK |
+ FE_CAN_QAM_16 |
+ FE_CAN_QAM_32 |
+ FE_CAN_QAM_64 |
+ FE_CAN_QAM_128 |
+ FE_CAN_QAM_256 |
+ FE_CAN_QAM_AUTO |
+ FE_CAN_TRANSMISSION_MODE_AUTO |
+ FE_CAN_GUARD_INTERVAL_AUTO |
+ FE_CAN_HIERARCHY_AUTO |
+ FE_CAN_MUTE_TS |
+ FE_CAN_2G_MODULATION |
+ FE_CAN_MULTISTREAM
+ },
+
+ .get_tune_settings = mn88472_get_tune_settings,
+
+ .init = mn88472_init,
+ .sleep = mn88472_sleep,
+
+ .set_frontend = mn88472_set_frontend,
+
+ .read_status = mn88472_read_status,
+};
+
+static struct dvb_frontend *mn88472_get_dvb_frontend(struct i2c_client *client)
+{
+ struct mn88472_dev *dev = i2c_get_clientdata(client);
+
+ dev_dbg(&client->dev, "\n");
+
+ return &dev->fe;
+}
+
+static int mn88472_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct mn88472_config *pdata = client->dev.platform_data;
+ struct mn88472_dev *dev;
+ int ret;
+ unsigned int utmp;
+ static const struct regmap_config regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ };
+
+ dev_dbg(&client->dev, "\n");
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ dev->i2c_write_max = pdata->i2c_wr_max ? pdata->i2c_wr_max : ~0;
+ dev->clk = pdata->xtal;
+ dev->ts_mode = pdata->ts_mode;
+ dev->ts_clk = pdata->ts_clock;
+ dev->client[0] = client;
+ dev->regmap[0] = regmap_init_i2c(dev->client[0], &regmap_config);
+ if (IS_ERR(dev->regmap[0])) {
+ ret = PTR_ERR(dev->regmap[0]);
+ goto err_kfree;
+ }
+
+ /* Check demod answers with correct chip id */
+ ret = regmap_read(dev->regmap[0], 0xff, &utmp);
+ if (ret)
+ goto err_regmap_0_regmap_exit;
+
+ dev_dbg(&client->dev, "chip id=%02x\n", utmp);
+
+ if (utmp != 0x02) {
+ ret = -ENODEV;
+ goto err_regmap_0_regmap_exit;
+ }
+
+ /*
+ * Chip has three I2C addresses for different register banks. Used
+ * addresses are 0x18, 0x1a and 0x1c. We register two dummy clients,
+ * 0x1a and 0x1c, in order to get own I2C client for each register bank.
+ *
+ * Also, register bank 2 do not support sequential I/O. Only single
+ * register write or read is allowed to that bank.
+ */
+ dev->client[1] = i2c_new_dummy(client->adapter, 0x1a);
+ if (!dev->client[1]) {
+ ret = -ENODEV;
+ dev_err(&client->dev, "I2C registration failed\n");
+ if (ret)
+ goto err_regmap_0_regmap_exit;
+ }
+ dev->regmap[1] = regmap_init_i2c(dev->client[1], &regmap_config);
+ if (IS_ERR(dev->regmap[1])) {
+ ret = PTR_ERR(dev->regmap[1]);
+ goto err_client_1_i2c_unregister_device;
+ }
+ i2c_set_clientdata(dev->client[1], dev);
+
+ dev->client[2] = i2c_new_dummy(client->adapter, 0x1c);
+ if (!dev->client[2]) {
+ ret = -ENODEV;
+ dev_err(&client->dev, "2nd I2C registration failed\n");
+ if (ret)
+ goto err_regmap_1_regmap_exit;
+ }
+ dev->regmap[2] = regmap_init_i2c(dev->client[2], &regmap_config);
+ if (IS_ERR(dev->regmap[2])) {
+ ret = PTR_ERR(dev->regmap[2]);
+ goto err_client_2_i2c_unregister_device;
+ }
+ i2c_set_clientdata(dev->client[2], dev);
+
+ /* Sleep because chip is active by default */
+ ret = regmap_write(dev->regmap[2], 0x05, 0x3e);
+ if (ret)
+ goto err_regmap_2_regmap_exit;
+
+ /* Create dvb frontend */
+ memcpy(&dev->fe.ops, &mn88472_ops, sizeof(struct dvb_frontend_ops));
+ dev->fe.demodulator_priv = client;
+ *pdata->fe = &dev->fe;
+ i2c_set_clientdata(client, dev);
+
+ /* Setup callbacks */
+ pdata->get_dvb_frontend = mn88472_get_dvb_frontend;
+
+ dev_info(&client->dev, "Panasonic MN88472 successfully identified\n");
+
+ return 0;
+err_regmap_2_regmap_exit:
+ regmap_exit(dev->regmap[2]);
+err_client_2_i2c_unregister_device:
+ i2c_unregister_device(dev->client[2]);
+err_regmap_1_regmap_exit:
+ regmap_exit(dev->regmap[1]);
+err_client_1_i2c_unregister_device:
+ i2c_unregister_device(dev->client[1]);
+err_regmap_0_regmap_exit:
+ regmap_exit(dev->regmap[0]);
+err_kfree:
+ kfree(dev);
+err:
+ dev_dbg(&client->dev, "failed=%d\n", ret);
+ return ret;
+}
+
+static int mn88472_remove(struct i2c_client *client)
+{
+ struct mn88472_dev *dev = i2c_get_clientdata(client);
+
+ dev_dbg(&client->dev, "\n");
+
+ regmap_exit(dev->regmap[2]);
+ i2c_unregister_device(dev->client[2]);
+
+ regmap_exit(dev->regmap[1]);
+ i2c_unregister_device(dev->client[1]);
+
+ regmap_exit(dev->regmap[0]);
+
+ kfree(dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id mn88472_id_table[] = {
+ {"mn88472", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, mn88472_id_table);
+
+static struct i2c_driver mn88472_driver = {
+ .driver = {
+ .name = "mn88472",
+ .suppress_bind_attrs = true,
+ },
+ .probe = mn88472_probe,
+ .remove = mn88472_remove,
+ .id_table = mn88472_id_table,
+};
+
+module_i2c_driver(mn88472_driver);
+
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("Panasonic MN88472 DVB-T/T2/C demodulator driver");
+MODULE_LICENSE("GPL");
+/*(DEBLOBBED)*/
diff --git a/drivers/media/dvb-frontends/mn88472.h b/drivers/media/dvb-frontends/mn88472.h
index 095294d29..323632523 100644
--- a/drivers/media/dvb-frontends/mn88472.h
+++ b/drivers/media/dvb-frontends/mn88472.h
@@ -19,23 +19,33 @@
#include <linux/dvb/frontend.h>
-enum ts_clock {
- VARIABLE_TS_CLOCK,
- FIXED_TS_CLOCK,
-};
+/**
+ * struct mn88472_config - Platform data for the mn88472 driver
+ * @xtal: Clock frequency.
+ * @ts_mode: TS mode.
+ * @ts_clock: TS clock config.
+ * @i2c_wr_max: Max number of bytes driver writes to I2C at once.
+ * @get_dvb_frontend: Get DVB frontend.
+ */
-enum ts_mode {
- SERIAL_TS_MODE,
- PARALLEL_TS_MODE,
-};
+/* Define old names for backward compatibility */
+#define VARIABLE_TS_CLOCK MN88472_TS_CLK_VARIABLE
+#define FIXED_TS_CLOCK MN88472_TS_CLK_FIXED
+#define SERIAL_TS_MODE MN88472_TS_MODE_SERIAL
+#define PARALLEL_TS_MODE MN88472_TS_MODE_PARALLEL
struct mn88472_config {
- /*
- * Max num of bytes given I2C adapter could write at once.
- * Default: none
- */
- u16 i2c_wr_max;
+ unsigned int xtal;
+
+#define MN88472_TS_MODE_SERIAL 0
+#define MN88472_TS_MODE_PARALLEL 1
+ int ts_mode;
+#define MN88472_TS_CLK_FIXED 0
+#define MN88472_TS_CLK_VARIABLE 1
+ int ts_clock;
+
+ u16 i2c_wr_max;
/* Everything after that is returned by the driver. */
@@ -43,14 +53,7 @@ struct mn88472_config {
* DVB frontend.
*/
struct dvb_frontend **fe;
-
- /*
- * Xtal frequency.
- * Hz
- */
- u32 xtal;
- int ts_mode;
- int ts_clock;
+ struct dvb_frontend* (*get_dvb_frontend)(struct i2c_client *);
};
#endif
diff --git a/drivers/media/dvb-frontends/mn88472_priv.h b/drivers/media/dvb-frontends/mn88472_priv.h
new file mode 100644
index 000000000..45c5789a7
--- /dev/null
+++ b/drivers/media/dvb-frontends/mn88472_priv.h
@@ -0,0 +1,38 @@
+/*
+ * Panasonic MN88472 DVB-T/T2/C demodulator driver
+ *
+ * Copyright (C) 2013 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MN88472_PRIV_H
+#define MN88472_PRIV_H
+
+#include "dvb_frontend.h"
+#include "mn88472.h"
+#include <linux/firmware.h>
+#include <linux/regmap.h>
+
+#define MN88472_FIRMWARE "/*(DEBLOBBED)*/"
+
+struct mn88472_dev {
+ struct i2c_client *client[3];
+ struct regmap *regmap[3];
+ struct dvb_frontend fe;
+ u16 i2c_write_max;
+ unsigned int clk;
+ unsigned int active:1;
+ unsigned int ts_mode:1;
+ unsigned int ts_clk:1;
+};
+
+#endif
diff --git a/drivers/media/dvb-frontends/mn88473.c b/drivers/media/dvb-frontends/mn88473.c
index 553944aba..89d397553 100644
--- a/drivers/media/dvb-frontends/mn88473.c
+++ b/drivers/media/dvb-frontends/mn88473.c
@@ -330,7 +330,7 @@ static int mn88473_init(struct dvb_frontend *fe)
/* Request the firmware, this will block and timeout */
ret = reject_firmware(&fw, name, &client->dev);
if (ret) {
- dev_err(&client->dev, "firmare file '%s' not found\n", name);
+ dev_err(&client->dev, "firmware file '%s' not found\n", name);
goto err;
}
@@ -536,7 +536,7 @@ static int mn88473_probe(struct i2c_client *client,
/* Sleep because chip is active by default */
ret = regmap_write(dev->regmap[2], 0x05, 0x3e);
if (ret)
- goto err_client_2_i2c_unregister_device;
+ goto err_regmap_2_regmap_exit;
/* Create dvb frontend */
memcpy(&dev->frontend.ops, &mn88473_ops, sizeof(dev->frontend.ops));
@@ -547,7 +547,8 @@ static int mn88473_probe(struct i2c_client *client,
dev_info(&client->dev, "Panasonic MN88473 successfully identified\n");
return 0;
-
+err_regmap_2_regmap_exit:
+ regmap_exit(dev->regmap[2]);
err_client_2_i2c_unregister_device:
i2c_unregister_device(dev->client[2]);
err_regmap_1_regmap_exit:
diff --git a/drivers/media/dvb-frontends/rtl2830.c b/drivers/media/dvb-frontends/rtl2830.c
index d25d1e0cd..87226056f 100644
--- a/drivers/media/dvb-frontends/rtl2830.c
+++ b/drivers/media/dvb-frontends/rtl2830.c
@@ -135,8 +135,6 @@ static int rtl2830_init(struct dvb_frontend *fe)
c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
c->post_bit_count.len = 1;
c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
- /* start statistics polling */
- schedule_delayed_work(&dev->stat_work, msecs_to_jiffies(2000));
dev->sleeping = false;
@@ -152,8 +150,6 @@ static int rtl2830_sleep(struct dvb_frontend *fe)
struct rtl2830_dev *dev = i2c_get_clientdata(client);
dev->sleeping = true;
- /* stop statistics polling */
- cancel_delayed_work_sync(&dev->stat_work);
dev->fe_status = 0;
return 0;
@@ -396,8 +392,10 @@ static int rtl2830_read_status(struct dvb_frontend *fe, enum fe_status *status)
{
struct i2c_client *client = fe->demodulator_priv;
struct rtl2830_dev *dev = i2c_get_clientdata(client);
- int ret;
- u8 u8tmp;
+ struct dtv_frontend_properties *c = &dev->fe.dtv_property_cache;
+ int ret, stmp;
+ unsigned int utmp;
+ u8 u8tmp, buf[2];
*status = 0;
@@ -419,6 +417,89 @@ static int rtl2830_read_status(struct dvb_frontend *fe, enum fe_status *status)
dev->fe_status = *status;
+ /* Signal strength */
+ if (dev->fe_status & FE_HAS_SIGNAL) {
+ /* Read IF AGC */
+ ret = rtl2830_bulk_read(client, 0x359, buf, 2);
+ if (ret)
+ goto err;
+
+ stmp = buf[0] << 8 | buf[1] << 0;
+ stmp = sign_extend32(stmp, 13);
+ utmp = clamp_val(-4 * stmp + 32767, 0x0000, 0xffff);
+
+ dev_dbg(&client->dev, "IF AGC=%d\n", stmp);
+
+ c->strength.stat[0].scale = FE_SCALE_RELATIVE;
+ c->strength.stat[0].uvalue = utmp;
+ } else {
+ c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ }
+
+ /* CNR */
+ if (dev->fe_status & FE_HAS_VITERBI) {
+ unsigned int hierarchy, constellation;
+ #define CONSTELLATION_NUM 3
+ #define HIERARCHY_NUM 4
+ static const u32 constant[CONSTELLATION_NUM][HIERARCHY_NUM] = {
+ {70705899, 70705899, 70705899, 70705899},
+ {82433173, 82433173, 87483115, 94445660},
+ {92888734, 92888734, 95487525, 99770748},
+ };
+
+ ret = rtl2830_bulk_read(client, 0x33c, &u8tmp, 1);
+ if (ret)
+ goto err;
+
+ constellation = (u8tmp >> 2) & 0x03; /* [3:2] */
+ if (constellation > CONSTELLATION_NUM - 1)
+ goto err;
+
+ hierarchy = (u8tmp >> 4) & 0x07; /* [6:4] */
+ if (hierarchy > HIERARCHY_NUM - 1)
+ goto err;
+
+ ret = rtl2830_bulk_read(client, 0x40c, buf, 2);
+ if (ret)
+ goto err;
+
+ utmp = buf[0] << 8 | buf[1] << 0;
+ if (utmp)
+ stmp = (constant[constellation][hierarchy] -
+ intlog10(utmp)) / ((1 << 24) / 10000);
+ else
+ stmp = 0;
+
+ dev_dbg(&client->dev, "CNR raw=%u\n", utmp);
+
+ c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+ c->cnr.stat[0].svalue = stmp;
+ } else {
+ c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ }
+
+ /* BER */
+ if (dev->fe_status & FE_HAS_LOCK) {
+ ret = rtl2830_bulk_read(client, 0x34e, buf, 2);
+ if (ret)
+ goto err;
+
+ utmp = buf[0] << 8 | buf[1] << 0;
+ dev->post_bit_error += utmp;
+ dev->post_bit_count += 1000000;
+
+ dev_dbg(&client->dev, "BER errors=%u total=1000000\n", utmp);
+
+ c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+ c->post_bit_error.stat[0].uvalue = dev->post_bit_error;
+ c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER;
+ c->post_bit_count.stat[0].uvalue = dev->post_bit_count;
+ } else {
+ c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ }
+
+
return ret;
err:
dev_dbg(&client->dev, "failed=%d\n", ret);
@@ -503,109 +584,6 @@ static struct dvb_frontend_ops rtl2830_ops = {
.read_signal_strength = rtl2830_read_signal_strength,
};
-static void rtl2830_stat_work(struct work_struct *work)
-{
- struct rtl2830_dev *dev = container_of(work, struct rtl2830_dev, stat_work.work);
- struct i2c_client *client = dev->client;
- struct dtv_frontend_properties *c = &dev->fe.dtv_property_cache;
- int ret, tmp;
- u8 u8tmp, buf[2];
- u16 u16tmp;
-
- dev_dbg(&client->dev, "\n");
-
- /* signal strength */
- if (dev->fe_status & FE_HAS_SIGNAL) {
- struct {signed int x:14; } s;
-
- /* read IF AGC */
- ret = rtl2830_bulk_read(client, 0x359, buf, 2);
- if (ret)
- goto err;
-
- u16tmp = buf[0] << 8 | buf[1] << 0;
- u16tmp &= 0x3fff; /* [13:0] */
- tmp = s.x = u16tmp; /* 14-bit bin to 2 complement */
- u16tmp = clamp_val(-4 * tmp + 32767, 0x0000, 0xffff);
-
- dev_dbg(&client->dev, "IF AGC=%d\n", tmp);
-
- c->strength.stat[0].scale = FE_SCALE_RELATIVE;
- c->strength.stat[0].uvalue = u16tmp;
- } else {
- c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
- }
-
- /* CNR */
- if (dev->fe_status & FE_HAS_VITERBI) {
- unsigned hierarchy, constellation;
- #define CONSTELLATION_NUM 3
- #define HIERARCHY_NUM 4
- static const u32 constant[CONSTELLATION_NUM][HIERARCHY_NUM] = {
- {70705899, 70705899, 70705899, 70705899},
- {82433173, 82433173, 87483115, 94445660},
- {92888734, 92888734, 95487525, 99770748},
- };
-
- ret = rtl2830_bulk_read(client, 0x33c, &u8tmp, 1);
- if (ret)
- goto err;
-
- constellation = (u8tmp >> 2) & 0x03; /* [3:2] */
- if (constellation > CONSTELLATION_NUM - 1)
- goto err_schedule_delayed_work;
-
- hierarchy = (u8tmp >> 4) & 0x07; /* [6:4] */
- if (hierarchy > HIERARCHY_NUM - 1)
- goto err_schedule_delayed_work;
-
- ret = rtl2830_bulk_read(client, 0x40c, buf, 2);
- if (ret)
- goto err;
-
- u16tmp = buf[0] << 8 | buf[1] << 0;
- if (u16tmp)
- tmp = (constant[constellation][hierarchy] -
- intlog10(u16tmp)) / ((1 << 24) / 10000);
- else
- tmp = 0;
-
- dev_dbg(&client->dev, "CNR raw=%u\n", u16tmp);
-
- c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
- c->cnr.stat[0].svalue = tmp;
- } else {
- c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
- }
-
- /* BER */
- if (dev->fe_status & FE_HAS_LOCK) {
- ret = rtl2830_bulk_read(client, 0x34e, buf, 2);
- if (ret)
- goto err;
-
- u16tmp = buf[0] << 8 | buf[1] << 0;
- dev->post_bit_error += u16tmp;
- dev->post_bit_count += 1000000;
-
- dev_dbg(&client->dev, "BER errors=%u total=1000000\n", u16tmp);
-
- c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
- c->post_bit_error.stat[0].uvalue = dev->post_bit_error;
- c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER;
- c->post_bit_count.stat[0].uvalue = dev->post_bit_count;
- } else {
- c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
- c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
- }
-
-err_schedule_delayed_work:
- schedule_delayed_work(&dev->stat_work, msecs_to_jiffies(2000));
- return;
-err:
- dev_dbg(&client->dev, "failed=%d\n", ret);
-}
-
static int rtl2830_pid_filter_ctrl(struct dvb_frontend *fe, int onoff)
{
struct i2c_client *client = fe->demodulator_priv;
@@ -851,7 +829,6 @@ static int rtl2830_probe(struct i2c_client *client,
dev->client = client;
dev->pdata = client->dev.platform_data;
dev->sleeping = true;
- INIT_DELAYED_WORK(&dev->stat_work, rtl2830_stat_work);
dev->regmap = regmap_init(&client->dev, &regmap_bus, client,
&regmap_config);
if (IS_ERR(dev->regmap)) {
@@ -904,9 +881,6 @@ static int rtl2830_remove(struct i2c_client *client)
dev_dbg(&client->dev, "\n");
- /* stop statistics polling */
- cancel_delayed_work_sync(&dev->stat_work);
-
i2c_mux_del_adapters(dev->muxc);
regmap_exit(dev->regmap);
kfree(dev);
@@ -922,7 +896,8 @@ MODULE_DEVICE_TABLE(i2c, rtl2830_id_table);
static struct i2c_driver rtl2830_driver = {
.driver = {
- .name = "rtl2830",
+ .name = "rtl2830",
+ .suppress_bind_attrs = true,
},
.probe = rtl2830_probe,
.remove = rtl2830_remove,
diff --git a/drivers/media/dvb-frontends/rtl2830_priv.h b/drivers/media/dvb-frontends/rtl2830_priv.h
index da4909543..8ec4721d7 100644
--- a/drivers/media/dvb-frontends/rtl2830_priv.h
+++ b/drivers/media/dvb-frontends/rtl2830_priv.h
@@ -24,6 +24,7 @@
#include <linux/i2c-mux.h>
#include <linux/math64.h>
#include <linux/regmap.h>
+#include <linux/bitops.h>
struct rtl2830_dev {
struct rtl2830_platform_data *pdata;
@@ -33,7 +34,6 @@ struct rtl2830_dev {
struct dvb_frontend fe;
bool sleeping;
unsigned long filters;
- struct delayed_work stat_work;
enum fe_status fe_status;
u64 post_bit_error_prev; /* for old DVBv3 read_ber() calculation */
u64 post_bit_error;
diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c
index bfb6beedd..0ced01f10 100644
--- a/drivers/media/dvb-frontends/rtl2832.c
+++ b/drivers/media/dvb-frontends/rtl2832.c
@@ -947,6 +947,8 @@ static int rtl2832_slave_ts_ctrl(struct i2c_client *client, bool enable)
goto err;
}
+ dev->slave_ts = enable;
+
return 0;
err:
dev_dbg(&client->dev, "failed=%d\n", ret);
@@ -960,7 +962,7 @@ static int rtl2832_pid_filter_ctrl(struct dvb_frontend *fe, int onoff)
int ret;
u8 u8tmp;
- dev_dbg(&client->dev, "onoff=%d\n", onoff);
+ dev_dbg(&client->dev, "onoff=%d, slave_ts=%d\n", onoff, dev->slave_ts);
/* enable / disable PID filter */
if (onoff)
@@ -968,7 +970,10 @@ static int rtl2832_pid_filter_ctrl(struct dvb_frontend *fe, int onoff)
else
u8tmp = 0x00;
- ret = regmap_update_bits(dev->regmap, 0x061, 0xc0, u8tmp);
+ if (dev->slave_ts)
+ ret = regmap_update_bits(dev->regmap, 0x021, 0xc0, u8tmp);
+ else
+ ret = regmap_update_bits(dev->regmap, 0x061, 0xc0, u8tmp);
if (ret)
goto err;
@@ -986,8 +991,8 @@ static int rtl2832_pid_filter(struct dvb_frontend *fe, u8 index, u16 pid,
int ret;
u8 buf[4];
- dev_dbg(&client->dev, "index=%d pid=%04x onoff=%d\n",
- index, pid, onoff);
+ dev_dbg(&client->dev, "index=%d pid=%04x onoff=%d slave_ts=%d\n",
+ index, pid, onoff, dev->slave_ts);
/* skip invalid PIDs (0x2000) */
if (pid > 0x1fff || index > 32)
@@ -1003,14 +1008,22 @@ static int rtl2832_pid_filter(struct dvb_frontend *fe, u8 index, u16 pid,
buf[1] = (dev->filters >> 8) & 0xff;
buf[2] = (dev->filters >> 16) & 0xff;
buf[3] = (dev->filters >> 24) & 0xff;
- ret = regmap_bulk_write(dev->regmap, 0x062, buf, 4);
+
+ if (dev->slave_ts)
+ ret = regmap_bulk_write(dev->regmap, 0x022, buf, 4);
+ else
+ ret = regmap_bulk_write(dev->regmap, 0x062, buf, 4);
if (ret)
goto err;
/* add PID */
buf[0] = (pid >> 8) & 0xff;
buf[1] = (pid >> 0) & 0xff;
- ret = regmap_bulk_write(dev->regmap, 0x066 + 2 * index, buf, 2);
+
+ if (dev->slave_ts)
+ ret = regmap_bulk_write(dev->regmap, 0x026 + 2 * index, buf, 2);
+ else
+ ret = regmap_bulk_write(dev->regmap, 0x066 + 2 * index, buf, 2);
if (ret)
goto err;
@@ -1135,6 +1148,7 @@ MODULE_DEVICE_TABLE(i2c, rtl2832_id_table);
static struct i2c_driver rtl2832_driver = {
.driver = {
.name = "rtl2832",
+ .suppress_bind_attrs = true,
},
.probe = rtl2832_probe,
.remove = rtl2832_remove,
diff --git a/drivers/media/dvb-frontends/rtl2832_priv.h b/drivers/media/dvb-frontends/rtl2832_priv.h
index c1a8a69e9..9a6d01a9c 100644
--- a/drivers/media/dvb-frontends/rtl2832_priv.h
+++ b/drivers/media/dvb-frontends/rtl2832_priv.h
@@ -44,6 +44,7 @@ struct rtl2832_dev {
bool sleeping;
struct delayed_work i2c_gate_work;
unsigned long filters; /* PID filter */
+ bool slave_ts;
};
struct rtl2832_reg_entry {
diff --git a/drivers/media/dvb-frontends/rtl2832_sdr.c b/drivers/media/dvb-frontends/rtl2832_sdr.c
index 47a480a7d..6e22af36b 100644
--- a/drivers/media/dvb-frontends/rtl2832_sdr.c
+++ b/drivers/media/dvb-frontends/rtl2832_sdr.c
@@ -452,7 +452,7 @@ static int rtl2832_sdr_querycap(struct file *file, void *fh,
/* Videobuf2 operations */
static int rtl2832_sdr_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers,
- unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int *nplanes, unsigned int sizes[], struct device *alloc_devs[])
{
struct rtl2832_sdr_dev *dev = vb2_get_drv_priv(vq);
struct platform_device *pdev = dev->pdev;
diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c
index 11489e1d5..2715d2d8d 100644
--- a/drivers/media/dvb-frontends/si2168.c
+++ b/drivers/media/dvb-frontends/si2168.c
@@ -357,9 +357,7 @@ static int si2168_init(struct dvb_frontend *fe)
struct si2168_dev *dev = i2c_get_clientdata(client);
int ret, len, remaining;
const struct firmware *fw;
- const char *fw_name;
struct si2168_cmd cmd;
- unsigned int chip_id;
dev_dbg(&client->dev, "\n");
@@ -371,7 +369,7 @@ static int si2168_init(struct dvb_frontend *fe)
if (ret)
goto err;
- if (dev->fw_loaded) {
+ if (dev->warm) {
/* resume */
memcpy(cmd.args, "\xc0\x06\x08\x0f\x00\x20\x21\x01", 8);
cmd.wlen = 8;
@@ -398,49 +396,14 @@ static int si2168_init(struct dvb_frontend *fe)
if (ret)
goto err;
- /* query chip revision */
- memcpy(cmd.args, "\x02", 1);
- cmd.wlen = 1;
- cmd.rlen = 13;
- ret = si2168_cmd_execute(client, &cmd);
- if (ret)
- goto err;
-
- chip_id = cmd.args[1] << 24 | cmd.args[2] << 16 | cmd.args[3] << 8 |
- cmd.args[4] << 0;
-
- #define SI2168_A20 ('A' << 24 | 68 << 16 | '2' << 8 | '0' << 0)
- #define SI2168_A30 ('A' << 24 | 68 << 16 | '3' << 8 | '0' << 0)
- #define SI2168_B40 ('B' << 24 | 68 << 16 | '4' << 8 | '0' << 0)
-
- switch (chip_id) {
- case SI2168_A20:
- fw_name = SI2168_A20_FIRMWARE;
- break;
- case SI2168_A30:
- fw_name = SI2168_A30_FIRMWARE;
- break;
- case SI2168_B40:
- fw_name = SI2168_B40_FIRMWARE;
- break;
- default:
- dev_err(&client->dev, "unknown chip version Si21%d-%c%c%c\n",
- cmd.args[2], cmd.args[1],
- cmd.args[3], cmd.args[4]);
- ret = -EINVAL;
- goto err;
- }
-
- dev_info(&client->dev, "found a 'Silicon Labs Si21%d-%c%c%c'\n",
- cmd.args[2], cmd.args[1], cmd.args[3], cmd.args[4]);
-
/* request the firmware, this will block and timeout */
- ret = reject_firmware(&fw, fw_name, &client->dev);
+ ret = reject_firmware(&fw, dev->firmware_name, &client->dev);
if (ret) {
/* fallback mechanism to handle old name for Si2168 B40 fw */
- if (chip_id == SI2168_B40) {
- fw_name = SI2168_B40_FIRMWARE_FALLBACK;
- ret = reject_firmware(&fw, fw_name, &client->dev);
+ if (dev->chip_id == SI2168_CHIP_ID_B40) {
+ dev->firmware_name = SI2168_B40_FIRMWARE_FALLBACK;
+ ret = reject_firmware(&fw, dev->firmware_name,
+ &client->dev);
}
if (ret == 0) {
@@ -450,13 +413,13 @@ static int si2168_init(struct dvb_frontend *fe)
} else {
dev_err(&client->dev,
"firmware file '%s' not found\n",
- fw_name);
+ dev->firmware_name);
goto err_release_firmware;
}
}
dev_info(&client->dev, "downloading firmware from file '%s'\n",
- fw_name);
+ dev->firmware_name);
if ((fw->size % 17 == 0) && (fw->data[0] > 5)) {
/* firmware is in the new format */
@@ -511,8 +474,11 @@ static int si2168_init(struct dvb_frontend *fe)
if (ret)
goto err;
- dev_info(&client->dev, "firmware version: %c.%c.%d\n",
- cmd.args[6], cmd.args[7], cmd.args[8]);
+ dev->version = (cmd.args[9] + '@') << 24 | (cmd.args[6] - '0') << 16 |
+ (cmd.args[7] - '0') << 8 | (cmd.args[8]) << 0;
+ dev_info(&client->dev, "firmware version: %c %d.%d.%d\n",
+ dev->version >> 24 & 0xff, dev->version >> 16 & 0xff,
+ dev->version >> 8 & 0xff, dev->version >> 0 & 0xff);
/* set ts mode */
memcpy(cmd.args, "\x14\x00\x01\x10\x10\x00", 6);
@@ -525,7 +491,7 @@ static int si2168_init(struct dvb_frontend *fe)
if (ret)
goto err;
- dev->fw_loaded = true;
+ dev->warm = true;
warm:
dev->active = true;
@@ -549,6 +515,10 @@ static int si2168_sleep(struct dvb_frontend *fe)
dev->active = false;
+ /* Firmware B 4.0-11 or later loses warm state during sleep */
+ if (dev->version > ('B' << 24 | 4 << 16 | 0 << 8 | 11 << 0))
+ dev->warm = false;
+
memcpy(cmd.args, "\x13", 1);
cmd.wlen = 1;
cmd.rlen = 0;
@@ -653,6 +623,7 @@ static int si2168_probe(struct i2c_client *client,
struct si2168_config *config = client->dev.platform_data;
struct si2168_dev *dev;
int ret;
+ struct si2168_cmd cmd;
dev_dbg(&client->dev, "\n");
@@ -663,8 +634,56 @@ static int si2168_probe(struct i2c_client *client,
goto err;
}
+ i2c_set_clientdata(client, dev);
mutex_init(&dev->i2c_mutex);
+ /* Initialize */
+ memcpy(cmd.args, "\xc0\x12\x00\x0c\x00\x0d\x16\x00\x00\x00\x00\x00\x00", 13);
+ cmd.wlen = 13;
+ cmd.rlen = 0;
+ ret = si2168_cmd_execute(client, &cmd);
+ if (ret)
+ goto err_kfree;
+
+ /* Power up */
+ memcpy(cmd.args, "\xc0\x06\x01\x0f\x00\x20\x20\x01", 8);
+ cmd.wlen = 8;
+ cmd.rlen = 1;
+ ret = si2168_cmd_execute(client, &cmd);
+ if (ret)
+ goto err_kfree;
+
+ /* Query chip revision */
+ memcpy(cmd.args, "\x02", 1);
+ cmd.wlen = 1;
+ cmd.rlen = 13;
+ ret = si2168_cmd_execute(client, &cmd);
+ if (ret)
+ goto err_kfree;
+
+ dev->chip_id = cmd.args[1] << 24 | cmd.args[2] << 16 |
+ cmd.args[3] << 8 | cmd.args[4] << 0;
+
+ switch (dev->chip_id) {
+ case SI2168_CHIP_ID_A20:
+ dev->firmware_name = SI2168_A20_FIRMWARE;
+ break;
+ case SI2168_CHIP_ID_A30:
+ dev->firmware_name = SI2168_A30_FIRMWARE;
+ break;
+ case SI2168_CHIP_ID_B40:
+ dev->firmware_name = SI2168_B40_FIRMWARE;
+ break;
+ default:
+ dev_dbg(&client->dev, "unknown chip version Si21%d-%c%c%c\n",
+ cmd.args[2], cmd.args[1], cmd.args[3], cmd.args[4]);
+ ret = -ENODEV;
+ goto err_kfree;
+ }
+
+ dev->version = (cmd.args[1]) << 24 | (cmd.args[3] - '0') << 16 |
+ (cmd.args[4] - '0') << 8 | (cmd.args[5]) << 0;
+
/* create mux i2c adapter for tuner */
dev->muxc = i2c_mux_alloc(client->adapter, &client->dev,
1, 0, I2C_MUX_LOCKED,
@@ -686,11 +705,14 @@ static int si2168_probe(struct i2c_client *client,
dev->ts_mode = config->ts_mode;
dev->ts_clock_inv = config->ts_clock_inv;
dev->ts_clock_gapped = config->ts_clock_gapped;
- dev->fw_loaded = false;
- i2c_set_clientdata(client, dev);
+ dev_info(&client->dev, "Silicon Labs Si2168-%c%d%d successfully identified\n",
+ dev->version >> 24 & 0xff, dev->version >> 16 & 0xff,
+ dev->version >> 8 & 0xff);
+ dev_info(&client->dev, "firmware version: %c %d.%d.%d\n",
+ dev->version >> 24 & 0xff, dev->version >> 16 & 0xff,
+ dev->version >> 8 & 0xff, dev->version >> 0 & 0xff);
- dev_info(&client->dev, "Silicon Labs Si2168 successfully attached\n");
return 0;
err_kfree:
kfree(dev);
@@ -723,7 +745,8 @@ MODULE_DEVICE_TABLE(i2c, si2168_id_table);
static struct i2c_driver si2168_driver = {
.driver = {
- .name = "si2168",
+ .name = "si2168",
+ .suppress_bind_attrs = true,
},
.probe = si2168_probe,
.remove = si2168_remove,
diff --git a/drivers/media/dvb-frontends/si2168_priv.h b/drivers/media/dvb-frontends/si2168_priv.h
index 5ac036229..4b34779cd 100644
--- a/drivers/media/dvb-frontends/si2168_priv.h
+++ b/drivers/media/dvb-frontends/si2168_priv.h
@@ -34,8 +34,14 @@ struct si2168_dev {
struct dvb_frontend fe;
enum fe_delivery_system delivery_system;
enum fe_status fe_status;
+ #define SI2168_CHIP_ID_A20 ('A' << 24 | 68 << 16 | '2' << 8 | '0' << 0)
+ #define SI2168_CHIP_ID_A30 ('A' << 24 | 68 << 16 | '3' << 8 | '0' << 0)
+ #define SI2168_CHIP_ID_B40 ('B' << 24 | 68 << 16 | '4' << 8 | '0' << 0)
+ unsigned int chip_id;
+ unsigned int version;
+ const char *firmware_name;
bool active;
- bool fw_loaded;
+ bool warm;
u8 ts_mode;
bool ts_clock_inv;
bool ts_clock_gapped;
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 993dc50c1..ce9006e10 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -209,6 +209,7 @@ config VIDEO_ADV7604
depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
depends on GPIOLIB || COMPILE_TEST
select HDMI
+ select MEDIA_CEC_EDID
---help---
Support for the Analog Devices ADV7604 video decoder.
@@ -218,10 +219,18 @@ config VIDEO_ADV7604
To compile this driver as a module, choose M here: the
module will be called adv7604.
+config VIDEO_ADV7604_CEC
+ bool "Enable Analog Devices ADV7604 CEC support"
+ depends on VIDEO_ADV7604 && MEDIA_CEC
+ ---help---
+ When selected the adv7604 will support the optional
+ HDMI CEC feature.
+
config VIDEO_ADV7842
tristate "Analog Devices ADV7842 decoder"
depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
select HDMI
+ select MEDIA_CEC_EDID
---help---
Support for the Analog Devices ADV7842 video decoder.
@@ -231,6 +240,13 @@ config VIDEO_ADV7842
To compile this driver as a module, choose M here: the
module will be called adv7842.
+config VIDEO_ADV7842_CEC
+ bool "Enable Analog Devices ADV7842 CEC support"
+ depends on VIDEO_ADV7842 && MEDIA_CEC
+ ---help---
+ When selected the adv7842 will support the optional
+ HDMI CEC feature.
+
config VIDEO_BT819
tristate "BT819A VideoStream decoder"
depends on VIDEO_V4L2 && I2C
@@ -447,6 +463,7 @@ config VIDEO_ADV7511
tristate "Analog Devices ADV7511 encoder"
depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
select HDMI
+ select MEDIA_CEC_EDID
---help---
Support for the Analog Devices ADV7511 video encoder.
@@ -455,6 +472,13 @@ config VIDEO_ADV7511
To compile this driver as a module, choose M here: the
module will be called adv7511.
+config VIDEO_ADV7511_CEC
+ bool "Enable Analog Devices ADV7511 CEC support"
+ depends on VIDEO_ADV7511 && MEDIA_CEC
+ ---help---
+ When selected the adv7511 will support the optional
+ HDMI CEC feature.
+
config VIDEO_AD9389B
tristate "Analog Devices AD9389B encoder"
depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index b77b0a4db..95cbc857f 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -100,7 +100,7 @@
#define ADV7180_REG_IDENT 0x0011
#define ADV7180_ID_7180 0x18
-#define ADV7180_REG_ICONF1 0x0040
+#define ADV7180_REG_ICONF1 0x2040
#define ADV7180_ICONF1_ACTIVE_LOW 0x01
#define ADV7180_ICONF1_PSYNC_ONLY 0x10
#define ADV7180_ICONF1_ACTIVE_TO_CLR 0xC0
@@ -113,15 +113,15 @@
#define ADV7180_IRQ1_LOCK 0x01
#define ADV7180_IRQ1_UNLOCK 0x02
-#define ADV7180_REG_ISR1 0x0042
-#define ADV7180_REG_ICR1 0x0043
-#define ADV7180_REG_IMR1 0x0044
-#define ADV7180_REG_IMR2 0x0048
+#define ADV7180_REG_ISR1 0x2042
+#define ADV7180_REG_ICR1 0x2043
+#define ADV7180_REG_IMR1 0x2044
+#define ADV7180_REG_IMR2 0x2048
#define ADV7180_IRQ3_AD_CHANGE 0x08
-#define ADV7180_REG_ISR3 0x004A
-#define ADV7180_REG_ICR3 0x004B
-#define ADV7180_REG_IMR3 0x004C
-#define ADV7180_REG_IMR4 0x50
+#define ADV7180_REG_ISR3 0x204A
+#define ADV7180_REG_ICR3 0x204B
+#define ADV7180_REG_IMR3 0x204C
+#define ADV7180_REG_IMR4 0x2050
#define ADV7180_REG_NTSC_V_BIT_END 0x00E6
#define ADV7180_NTSC_V_BIT_END_MANUAL_NVEND 0x4F
diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
index 39271c35d..53030d631 100644
--- a/drivers/media/i2c/adv7511.c
+++ b/drivers/media/i2c/adv7511.c
@@ -33,6 +33,7 @@
#include <media/v4l2-ctrls.h>
#include <media/v4l2-dv-timings.h>
#include <media/i2c/adv7511.h>
+#include <media/cec.h>
static int debug;
module_param(debug, int, 0644);
@@ -59,6 +60,8 @@ MODULE_LICENSE("GPL v2");
#define ADV7511_MIN_PIXELCLOCK 20000000
#define ADV7511_MAX_PIXELCLOCK 225000000
+#define ADV7511_MAX_ADDRS (3)
+
/*
**********************************************************************
*
@@ -90,12 +93,20 @@ struct adv7511_state {
struct v4l2_ctrl_handler hdl;
int chip_revision;
u8 i2c_edid_addr;
- u8 i2c_cec_addr;
u8 i2c_pktmem_addr;
+ u8 i2c_cec_addr;
+
+ struct i2c_client *i2c_cec;
+ struct cec_adapter *cec_adap;
+ u8 cec_addr[ADV7511_MAX_ADDRS];
+ u8 cec_valid_addrs;
+ bool cec_enabled_adap;
+
/* Is the adv7511 powered on? */
bool power_on;
/* Did we receive hotplug and rx-sense signals? */
bool have_monitor;
+ bool enabled_irq;
/* timings from s_dv_timings */
struct v4l2_dv_timings dv_timings;
u32 fmt_code;
@@ -227,7 +238,7 @@ static int adv_smbus_read_i2c_block_data(struct i2c_client *client,
return ret;
}
-static inline void adv7511_edid_rd(struct v4l2_subdev *sd, u16 len, u8 *buf)
+static void adv7511_edid_rd(struct v4l2_subdev *sd, uint16_t len, uint8_t *buf)
{
struct adv7511_state *state = get_adv7511_state(sd);
int i;
@@ -242,6 +253,34 @@ static inline void adv7511_edid_rd(struct v4l2_subdev *sd, u16 len, u8 *buf)
v4l2_err(sd, "%s: i2c read error\n", __func__);
}
+static inline int adv7511_cec_read(struct v4l2_subdev *sd, u8 reg)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+
+ return i2c_smbus_read_byte_data(state->i2c_cec, reg);
+}
+
+static int adv7511_cec_write(struct v4l2_subdev *sd, u8 reg, u8 val)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+ int ret;
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ ret = i2c_smbus_write_byte_data(state->i2c_cec, reg, val);
+ if (ret == 0)
+ return 0;
+ }
+ v4l2_err(sd, "%s: I2C Write Problem\n", __func__);
+ return ret;
+}
+
+static inline int adv7511_cec_write_and_or(struct v4l2_subdev *sd, u8 reg, u8 mask,
+ u8 val)
+{
+ return adv7511_cec_write(sd, reg, (adv7511_cec_read(sd, reg) & mask) | val);
+}
+
static int adv7511_pktmem_rd(struct v4l2_subdev *sd, u8 reg)
{
struct adv7511_state *state = get_adv7511_state(sd);
@@ -343,28 +382,20 @@ static void adv7511_csc_rgb_full2limit(struct v4l2_subdev *sd, bool enable)
}
}
-static void adv7511_set_IT_content_AVI_InfoFrame(struct v4l2_subdev *sd)
+static void adv7511_set_rgb_quantization_mode(struct v4l2_subdev *sd, struct v4l2_ctrl *ctrl)
{
struct adv7511_state *state = get_adv7511_state(sd);
- if (state->dv_timings.bt.flags & V4L2_DV_FL_IS_CE_VIDEO) {
- /* CE format, not IT */
- adv7511_wr_and_or(sd, 0x57, 0x7f, 0x00);
- } else {
- /* IT format */
- adv7511_wr_and_or(sd, 0x57, 0x7f, 0x80);
+
+ /* Only makes sense for RGB formats */
+ if (state->fmt_code != MEDIA_BUS_FMT_RGB888_1X24) {
+ /* so just keep quantization */
+ adv7511_csc_rgb_full2limit(sd, false);
+ return;
}
-}
-static int adv7511_set_rgb_quantization_mode(struct v4l2_subdev *sd, struct v4l2_ctrl *ctrl)
-{
switch (ctrl->val) {
- default:
- return -EINVAL;
- break;
- case V4L2_DV_RGB_RANGE_AUTO: {
+ case V4L2_DV_RGB_RANGE_AUTO:
/* automatic */
- struct adv7511_state *state = get_adv7511_state(sd);
-
if (state->dv_timings.bt.flags & V4L2_DV_FL_IS_CE_VIDEO) {
/* CE format, RGB limited range (16-235) */
adv7511_csc_rgb_full2limit(sd, true);
@@ -372,7 +403,6 @@ static int adv7511_set_rgb_quantization_mode(struct v4l2_subdev *sd, struct v4l2
/* not CE format, RGB full range (0-255) */
adv7511_csc_rgb_full2limit(sd, false);
}
- }
break;
case V4L2_DV_RGB_RANGE_LIMITED:
/* RGB limited range (16-235) */
@@ -383,7 +413,6 @@ static int adv7511_set_rgb_quantization_mode(struct v4l2_subdev *sd, struct v4l2
adv7511_csc_rgb_full2limit(sd, false);
break;
}
- return 0;
}
/* ------------------------------ CTRL OPS ------------------------------ */
@@ -400,8 +429,10 @@ static int adv7511_s_ctrl(struct v4l2_ctrl *ctrl)
adv7511_wr_and_or(sd, 0xaf, 0xfd, ctrl->val == V4L2_DV_TX_MODE_HDMI ? 0x02 : 0x00);
return 0;
}
- if (state->rgb_quantization_range_ctrl == ctrl)
- return adv7511_set_rgb_quantization_mode(sd, ctrl);
+ if (state->rgb_quantization_range_ctrl == ctrl) {
+ adv7511_set_rgb_quantization_mode(sd, ctrl);
+ return 0;
+ }
if (state->content_type_ctrl == ctrl) {
u8 itc, cn;
@@ -425,16 +456,28 @@ static const struct v4l2_ctrl_ops adv7511_ctrl_ops = {
#ifdef CONFIG_VIDEO_ADV_DEBUG
static void adv7511_inv_register(struct v4l2_subdev *sd)
{
+ struct adv7511_state *state = get_adv7511_state(sd);
+
v4l2_info(sd, "0x000-0x0ff: Main Map\n");
+ if (state->i2c_cec)
+ v4l2_info(sd, "0x100-0x1ff: CEC Map\n");
}
static int adv7511_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg)
{
+ struct adv7511_state *state = get_adv7511_state(sd);
+
reg->size = 1;
switch (reg->reg >> 8) {
case 0:
reg->val = adv7511_rd(sd, reg->reg & 0xff);
break;
+ case 1:
+ if (state->i2c_cec) {
+ reg->val = adv7511_cec_read(sd, reg->reg & 0xff);
+ break;
+ }
+ /* fall through */
default:
v4l2_info(sd, "Register %03llx not supported\n", reg->reg);
adv7511_inv_register(sd);
@@ -445,10 +488,18 @@ static int adv7511_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *
static int adv7511_s_register(struct v4l2_subdev *sd, const struct v4l2_dbg_register *reg)
{
+ struct adv7511_state *state = get_adv7511_state(sd);
+
switch (reg->reg >> 8) {
case 0:
adv7511_wr(sd, reg->reg & 0xff, reg->val & 0xff);
break;
+ case 1:
+ if (state->i2c_cec) {
+ adv7511_cec_write(sd, reg->reg & 0xff, reg->val & 0xff);
+ break;
+ }
+ /* fall through */
default:
v4l2_info(sd, "Register %03llx not supported\n", reg->reg);
adv7511_inv_register(sd);
@@ -536,6 +587,7 @@ static int adv7511_log_status(struct v4l2_subdev *sd)
{
struct adv7511_state *state = get_adv7511_state(sd);
struct adv7511_state_edid *edid = &state->edid;
+ int i;
static const char * const states[] = {
"in reset",
@@ -605,7 +657,23 @@ static int adv7511_log_status(struct v4l2_subdev *sd)
else
v4l2_info(sd, "no timings set\n");
v4l2_info(sd, "i2c edid addr: 0x%x\n", state->i2c_edid_addr);
+
+ if (state->i2c_cec == NULL)
+ return 0;
+
v4l2_info(sd, "i2c cec addr: 0x%x\n", state->i2c_cec_addr);
+
+ v4l2_info(sd, "CEC: %s\n", state->cec_enabled_adap ?
+ "enabled" : "disabled");
+ if (state->cec_enabled_adap) {
+ for (i = 0; i < ADV7511_MAX_ADDRS; i++) {
+ bool is_valid = state->cec_valid_addrs & (1 << i);
+
+ if (is_valid)
+ v4l2_info(sd, "CEC Logical Address: 0x%x\n",
+ state->cec_addr[i]);
+ }
+ }
v4l2_info(sd, "i2c pktmem addr: 0x%x\n", state->i2c_pktmem_addr);
return 0;
}
@@ -663,15 +731,197 @@ static int adv7511_s_power(struct v4l2_subdev *sd, int on)
return true;
}
+#if IS_ENABLED(CONFIG_VIDEO_ADV7511_CEC)
+static int adv7511_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct adv7511_state *state = adap->priv;
+ struct v4l2_subdev *sd = &state->sd;
+
+ if (state->i2c_cec == NULL)
+ return -EIO;
+
+ if (!state->cec_enabled_adap && enable) {
+ /* power up cec section */
+ adv7511_cec_write_and_or(sd, 0x4e, 0xfc, 0x01);
+ /* legacy mode and clear all rx buffers */
+ adv7511_cec_write(sd, 0x4a, 0x07);
+ adv7511_cec_write(sd, 0x4a, 0);
+ adv7511_cec_write_and_or(sd, 0x11, 0xfe, 0); /* initially disable tx */
+ /* enabled irqs: */
+ /* tx: ready */
+ /* tx: arbitration lost */
+ /* tx: retry timeout */
+ /* rx: ready 1 */
+ if (state->enabled_irq)
+ adv7511_wr_and_or(sd, 0x95, 0xc0, 0x39);
+ } else if (state->cec_enabled_adap && !enable) {
+ if (state->enabled_irq)
+ adv7511_wr_and_or(sd, 0x95, 0xc0, 0x00);
+ /* disable address mask 1-3 */
+ adv7511_cec_write_and_or(sd, 0x4b, 0x8f, 0x00);
+ /* power down cec section */
+ adv7511_cec_write_and_or(sd, 0x4e, 0xfc, 0x00);
+ state->cec_valid_addrs = 0;
+ }
+ state->cec_enabled_adap = enable;
+ return 0;
+}
+
+static int adv7511_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
+{
+ struct adv7511_state *state = adap->priv;
+ struct v4l2_subdev *sd = &state->sd;
+ unsigned int i, free_idx = ADV7511_MAX_ADDRS;
+
+ if (!state->cec_enabled_adap)
+ return addr == CEC_LOG_ADDR_INVALID ? 0 : -EIO;
+
+ if (addr == CEC_LOG_ADDR_INVALID) {
+ adv7511_cec_write_and_or(sd, 0x4b, 0x8f, 0);
+ state->cec_valid_addrs = 0;
+ return 0;
+ }
+
+ for (i = 0; i < ADV7511_MAX_ADDRS; i++) {
+ bool is_valid = state->cec_valid_addrs & (1 << i);
+
+ if (free_idx == ADV7511_MAX_ADDRS && !is_valid)
+ free_idx = i;
+ if (is_valid && state->cec_addr[i] == addr)
+ return 0;
+ }
+ if (i == ADV7511_MAX_ADDRS) {
+ i = free_idx;
+ if (i == ADV7511_MAX_ADDRS)
+ return -ENXIO;
+ }
+ state->cec_addr[i] = addr;
+ state->cec_valid_addrs |= 1 << i;
+
+ switch (i) {
+ case 0:
+ /* enable address mask 0 */
+ adv7511_cec_write_and_or(sd, 0x4b, 0xef, 0x10);
+ /* set address for mask 0 */
+ adv7511_cec_write_and_or(sd, 0x4c, 0xf0, addr);
+ break;
+ case 1:
+ /* enable address mask 1 */
+ adv7511_cec_write_and_or(sd, 0x4b, 0xdf, 0x20);
+ /* set address for mask 1 */
+ adv7511_cec_write_and_or(sd, 0x4c, 0x0f, addr << 4);
+ break;
+ case 2:
+ /* enable address mask 2 */
+ adv7511_cec_write_and_or(sd, 0x4b, 0xbf, 0x40);
+ /* set address for mask 1 */
+ adv7511_cec_write_and_or(sd, 0x4d, 0xf0, addr);
+ break;
+ }
+ return 0;
+}
+
+static int adv7511_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct adv7511_state *state = adap->priv;
+ struct v4l2_subdev *sd = &state->sd;
+ u8 len = msg->len;
+ unsigned int i;
+
+ v4l2_dbg(1, debug, sd, "%s: len %d\n", __func__, len);
+
+ if (len > 16) {
+ v4l2_err(sd, "%s: len exceeded 16 (%d)\n", __func__, len);
+ return -EINVAL;
+ }
+
+ /*
+ * The number of retries is the number of attempts - 1, but retry
+ * at least once. It's not clear if a value of 0 is allowed, so
+ * let's do at least one retry.
+ */
+ adv7511_cec_write_and_or(sd, 0x12, ~0x70, max(1, attempts - 1) << 4);
+
+ /* blocking, clear cec tx irq status */
+ adv7511_wr_and_or(sd, 0x97, 0xc7, 0x38);
+
+ /* write data */
+ for (i = 0; i < len; i++)
+ adv7511_cec_write(sd, i, msg->msg[i]);
+
+ /* set length (data + header) */
+ adv7511_cec_write(sd, 0x10, len);
+ /* start transmit, enable tx */
+ adv7511_cec_write(sd, 0x11, 0x01);
+ return 0;
+}
+
+static void adv_cec_tx_raw_status(struct v4l2_subdev *sd, u8 tx_raw_status)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+
+ if ((adv7511_cec_read(sd, 0x11) & 0x01) == 0) {
+ v4l2_dbg(1, debug, sd, "%s: tx raw: tx disabled\n", __func__);
+ return;
+ }
+
+ if (tx_raw_status & 0x10) {
+ v4l2_dbg(1, debug, sd,
+ "%s: tx raw: arbitration lost\n", __func__);
+ cec_transmit_done(state->cec_adap, CEC_TX_STATUS_ARB_LOST,
+ 1, 0, 0, 0);
+ return;
+ }
+ if (tx_raw_status & 0x08) {
+ u8 status;
+ u8 nack_cnt;
+ u8 low_drive_cnt;
+
+ v4l2_dbg(1, debug, sd, "%s: tx raw: retry failed\n", __func__);
+ /*
+ * We set this status bit since this hardware performs
+ * retransmissions.
+ */
+ status = CEC_TX_STATUS_MAX_RETRIES;
+ nack_cnt = adv7511_cec_read(sd, 0x14) & 0xf;
+ if (nack_cnt)
+ status |= CEC_TX_STATUS_NACK;
+ low_drive_cnt = adv7511_cec_read(sd, 0x14) >> 4;
+ if (low_drive_cnt)
+ status |= CEC_TX_STATUS_LOW_DRIVE;
+ cec_transmit_done(state->cec_adap, status,
+ 0, nack_cnt, low_drive_cnt, 0);
+ return;
+ }
+ if (tx_raw_status & 0x20) {
+ v4l2_dbg(1, debug, sd, "%s: tx raw: ready ok\n", __func__);
+ cec_transmit_done(state->cec_adap, CEC_TX_STATUS_OK, 0, 0, 0, 0);
+ return;
+ }
+}
+
+static const struct cec_adap_ops adv7511_cec_adap_ops = {
+ .adap_enable = adv7511_cec_adap_enable,
+ .adap_log_addr = adv7511_cec_adap_log_addr,
+ .adap_transmit = adv7511_cec_adap_transmit,
+};
+#endif
+
/* Enable interrupts */
static void adv7511_set_isr(struct v4l2_subdev *sd, bool enable)
{
+ struct adv7511_state *state = get_adv7511_state(sd);
u8 irqs = MASK_ADV7511_HPD_INT | MASK_ADV7511_MSEN_INT;
u8 irqs_rd;
int retries = 100;
v4l2_dbg(2, debug, sd, "%s: %s\n", __func__, enable ? "enable" : "disable");
+ if (state->enabled_irq == enable)
+ return;
+ state->enabled_irq = enable;
+
/* The datasheet says that the EDID ready interrupt should be
disabled if there is no hotplug. */
if (!enable)
@@ -679,6 +929,9 @@ static void adv7511_set_isr(struct v4l2_subdev *sd, bool enable)
else if (adv7511_have_hotplug(sd))
irqs |= MASK_ADV7511_EDID_RDY_INT;
+ adv7511_wr_and_or(sd, 0x95, 0xc0,
+ (state->cec_enabled_adap && enable) ? 0x39 : 0x00);
+
/*
* This i2c write can fail (approx. 1 in 1000 writes). But it
* is essential that this register is correct, so retry it
@@ -701,20 +954,53 @@ static void adv7511_set_isr(struct v4l2_subdev *sd, bool enable)
static int adv7511_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
{
u8 irq_status;
+ u8 cec_irq;
/* disable interrupts to prevent a race condition */
adv7511_set_isr(sd, false);
irq_status = adv7511_rd(sd, 0x96);
+ cec_irq = adv7511_rd(sd, 0x97);
/* clear detected interrupts */
adv7511_wr(sd, 0x96, irq_status);
+ adv7511_wr(sd, 0x97, cec_irq);
- v4l2_dbg(1, debug, sd, "%s: irq 0x%x\n", __func__, irq_status);
+ v4l2_dbg(1, debug, sd, "%s: irq 0x%x, cec-irq 0x%x\n", __func__,
+ irq_status, cec_irq);
if (irq_status & (MASK_ADV7511_HPD_INT | MASK_ADV7511_MSEN_INT))
adv7511_check_monitor_present_status(sd);
if (irq_status & MASK_ADV7511_EDID_RDY_INT)
adv7511_check_edid_status(sd);
+#if IS_ENABLED(CONFIG_VIDEO_ADV7511_CEC)
+ if (cec_irq & 0x38)
+ adv_cec_tx_raw_status(sd, cec_irq);
+
+ if (cec_irq & 1) {
+ struct adv7511_state *state = get_adv7511_state(sd);
+ struct cec_msg msg;
+
+ msg.len = adv7511_cec_read(sd, 0x25) & 0x1f;
+
+ v4l2_dbg(1, debug, sd, "%s: cec msg len %d\n", __func__,
+ msg.len);
+
+ if (msg.len > 16)
+ msg.len = 16;
+
+ if (msg.len) {
+ u8 i;
+
+ for (i = 0; i < msg.len; i++)
+ msg.msg[i] = adv7511_cec_read(sd, i + 0x15);
+
+ adv7511_cec_write(sd, 0x4a, 1); /* toggle to re-enable rx 1 */
+ adv7511_cec_write(sd, 0x4a, 0);
+ cec_received_msg(state->cec_adap, &msg);
+ }
+ }
+#endif
+
/* enable interrupts */
adv7511_set_isr(sd, true);
@@ -755,6 +1041,8 @@ static int adv7511_s_dv_timings(struct v4l2_subdev *sd,
struct v4l2_dv_timings *timings)
{
struct adv7511_state *state = get_adv7511_state(sd);
+ struct v4l2_bt_timings *bt = &timings->bt;
+ u32 fps;
v4l2_dbg(1, debug, sd, "%s:\n", __func__);
@@ -766,17 +1054,33 @@ static int adv7511_s_dv_timings(struct v4l2_subdev *sd,
if the format is one of the CEA or DMT timings. */
v4l2_find_dv_timings_cap(timings, &adv7511_timings_cap, 0, NULL, NULL);
- timings->bt.flags &= ~V4L2_DV_FL_REDUCED_FPS;
-
/* save timings */
state->dv_timings = *timings;
+ /* set h/vsync polarities */
+ adv7511_wr_and_or(sd, 0x17, 0x9f,
+ ((bt->polarities & V4L2_DV_VSYNC_POS_POL) ? 0 : 0x40) |
+ ((bt->polarities & V4L2_DV_HSYNC_POS_POL) ? 0 : 0x20));
+
+ fps = (u32)bt->pixelclock / (V4L2_DV_BT_FRAME_WIDTH(bt) * V4L2_DV_BT_FRAME_HEIGHT(bt));
+ switch (fps) {
+ case 24:
+ adv7511_wr_and_or(sd, 0xfb, 0xf9, 1 << 1);
+ break;
+ case 25:
+ adv7511_wr_and_or(sd, 0xfb, 0xf9, 2 << 1);
+ break;
+ case 30:
+ adv7511_wr_and_or(sd, 0xfb, 0xf9, 3 << 1);
+ break;
+ default:
+ adv7511_wr_and_or(sd, 0xfb, 0xf9, 0);
+ break;
+ }
+
/* update quantization range based on new dv_timings */
adv7511_set_rgb_quantization_mode(sd, state->rgb_quantization_range_ctrl);
- /* update AVI infoframe */
- adv7511_set_IT_content_AVI_InfoFrame(sd);
-
return 0;
}
@@ -956,8 +1260,6 @@ static int adv7511_enum_mbus_code(struct v4l2_subdev *sd,
static void adv7511_fill_format(struct adv7511_state *state,
struct v4l2_mbus_framefmt *format)
{
- memset(format, 0, sizeof(*format));
-
format->width = state->dv_timings.bt.width;
format->height = state->dv_timings.bt.height;
format->field = V4L2_FIELD_NONE;
@@ -972,6 +1274,7 @@ static int adv7511_get_fmt(struct v4l2_subdev *sd,
if (format->pad != 0)
return -EINVAL;
+ memset(&format->format, 0, sizeof(format->format));
adv7511_fill_format(state, &format->format);
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
@@ -1132,6 +1435,7 @@ static int adv7511_set_fmt(struct v4l2_subdev *sd,
adv7511_wr_and_or(sd, 0x57, 0x83, (ec << 4) | (q << 2) | (itc << 7));
adv7511_wr_and_or(sd, 0x59, 0x0f, (yq << 6) | (cn << 4));
adv7511_wr_and_or(sd, 0x4a, 0xff, 1);
+ adv7511_set_rgb_quantization_mode(sd, state->rgb_quantization_range_ctrl);
return 0;
}
@@ -1183,6 +1487,8 @@ static void adv7511_notify_no_edid(struct v4l2_subdev *sd)
/* We failed to read the EDID, so send an event for this. */
ed.present = false;
ed.segment = adv7511_rd(sd, 0xc4);
+ ed.phys_addr = CEC_PHYS_ADDR_INVALID;
+ cec_s_phys_addr(state->cec_adap, ed.phys_addr, false);
v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, 0x0);
}
@@ -1406,13 +1712,16 @@ static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
v4l2_dbg(1, debug, sd, "%s: edid complete with %d segment(s)\n", __func__, state->edid.segments);
state->edid.complete = true;
-
+ ed.phys_addr = cec_get_edid_phys_addr(state->edid.data,
+ state->edid.segments * 256,
+ NULL);
/* report when we have all segments
but report only for segment 0
*/
ed.present = true;
ed.segment = 0;
state->edid_detect_counter++;
+ cec_s_phys_addr(state->cec_adap, ed.phys_addr, false);
v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
return ed.present;
}
@@ -1420,17 +1729,43 @@ static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
return false;
}
+static int adv7511_registered(struct v4l2_subdev *sd)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+ int err;
+
+ err = cec_register_adapter(state->cec_adap);
+ if (err)
+ cec_delete_adapter(state->cec_adap);
+ return err;
+}
+
+static void adv7511_unregistered(struct v4l2_subdev *sd)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+
+ cec_unregister_adapter(state->cec_adap);
+}
+
+static const struct v4l2_subdev_internal_ops adv7511_int_ops = {
+ .registered = adv7511_registered,
+ .unregistered = adv7511_unregistered,
+};
+
/* ----------------------------------------------------------------------- */
/* Setup ADV7511 */
static void adv7511_init_setup(struct v4l2_subdev *sd)
{
struct adv7511_state *state = get_adv7511_state(sd);
struct adv7511_state_edid *edid = &state->edid;
+ u32 cec_clk = state->pdata.cec_clk;
+ u8 ratio;
v4l2_dbg(1, debug, sd, "%s\n", __func__);
/* clear all interrupts */
adv7511_wr(sd, 0x96, 0xff);
+ adv7511_wr(sd, 0x97, 0xff);
/*
* Stop HPD from resetting a lot of registers.
* It might leave the chip in a partly un-initialized state,
@@ -1442,6 +1777,25 @@ static void adv7511_init_setup(struct v4l2_subdev *sd)
adv7511_set_isr(sd, false);
adv7511_s_stream(sd, false);
adv7511_s_audio_stream(sd, false);
+
+ if (state->i2c_cec == NULL)
+ return;
+
+ v4l2_dbg(1, debug, sd, "%s: cec_clk %d\n", __func__, cec_clk);
+
+ /* cec soft reset */
+ adv7511_cec_write(sd, 0x50, 0x01);
+ adv7511_cec_write(sd, 0x50, 0x00);
+
+ /* legacy mode */
+ adv7511_cec_write(sd, 0x4a, 0x00);
+
+ if (cec_clk % 750000 != 0)
+ v4l2_err(sd, "%s: cec_clk %d, not multiple of 750 Khz\n",
+ __func__, cec_clk);
+
+ ratio = (cec_clk / 750000) - 1;
+ adv7511_cec_write(sd, 0x4e, ratio << 2);
}
static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *id)
@@ -1476,6 +1830,7 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
client->addr << 1);
v4l2_i2c_subdev_init(sd, client, &adv7511_ops);
+ sd->internal_ops = &adv7511_int_ops;
hdl = &state->hdl;
v4l2_ctrl_handler_init(hdl, 10);
@@ -1516,26 +1871,47 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
chip_id[0] = adv7511_rd(sd, 0xf5);
chip_id[1] = adv7511_rd(sd, 0xf6);
if (chip_id[0] != 0x75 || chip_id[1] != 0x11) {
- v4l2_err(sd, "chip_id != 0x7511, read 0x%02x%02x\n", chip_id[0], chip_id[1]);
+ v4l2_err(sd, "chip_id != 0x7511, read 0x%02x%02x\n", chip_id[0],
+ chip_id[1]);
err = -EIO;
goto err_entity;
}
- state->i2c_edid = i2c_new_dummy(client->adapter, state->i2c_edid_addr >> 1);
+ state->i2c_edid = i2c_new_dummy(client->adapter,
+ state->i2c_edid_addr >> 1);
if (state->i2c_edid == NULL) {
v4l2_err(sd, "failed to register edid i2c client\n");
err = -ENOMEM;
goto err_entity;
}
+ adv7511_wr(sd, 0xe1, state->i2c_cec_addr);
+ if (state->pdata.cec_clk < 3000000 ||
+ state->pdata.cec_clk > 100000000) {
+ v4l2_err(sd, "%s: cec_clk %u outside range, disabling cec\n",
+ __func__, state->pdata.cec_clk);
+ state->pdata.cec_clk = 0;
+ }
+
+ if (state->pdata.cec_clk) {
+ state->i2c_cec = i2c_new_dummy(client->adapter,
+ state->i2c_cec_addr >> 1);
+ if (state->i2c_cec == NULL) {
+ v4l2_err(sd, "failed to register cec i2c client\n");
+ goto err_unreg_edid;
+ }
+ adv7511_wr(sd, 0xe2, 0x00); /* power up cec section */
+ } else {
+ adv7511_wr(sd, 0xe2, 0x01); /* power down cec section */
+ }
+
state->i2c_pktmem = i2c_new_dummy(client->adapter, state->i2c_pktmem_addr >> 1);
if (state->i2c_pktmem == NULL) {
v4l2_err(sd, "failed to register pktmem i2c client\n");
err = -ENOMEM;
- goto err_unreg_edid;
+ goto err_unreg_cec;
}
- adv7511_wr(sd, 0xe2, 0x01); /* power down cec section */
state->work_queue = create_singlethread_workqueue(sd->name);
if (state->work_queue == NULL) {
v4l2_err(sd, "could not create workqueue\n");
@@ -1546,6 +1922,19 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
INIT_DELAYED_WORK(&state->edid_handler, adv7511_edid_handler);
adv7511_init_setup(sd);
+
+#if IS_ENABLED(CONFIG_VIDEO_ADV7511_CEC)
+ state->cec_adap = cec_allocate_adapter(&adv7511_cec_adap_ops,
+ state, dev_name(&client->dev), CEC_CAP_TRANSMIT |
+ CEC_CAP_LOG_ADDRS | CEC_CAP_PASSTHROUGH | CEC_CAP_RC,
+ ADV7511_MAX_ADDRS, &client->dev);
+ err = PTR_ERR_OR_ZERO(state->cec_adap);
+ if (err) {
+ destroy_workqueue(state->work_queue);
+ goto err_unreg_pktmem;
+ }
+#endif
+
adv7511_set_isr(sd, true);
adv7511_check_monitor_present_status(sd);
@@ -1555,6 +1944,9 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
err_unreg_pktmem:
i2c_unregister_device(state->i2c_pktmem);
+err_unreg_cec:
+ if (state->i2c_cec)
+ i2c_unregister_device(state->i2c_cec);
err_unreg_edid:
i2c_unregister_device(state->i2c_edid);
err_entity:
@@ -1576,9 +1968,12 @@ static int adv7511_remove(struct i2c_client *client)
v4l2_dbg(1, debug, sd, "%s removed @ 0x%x (%s)\n", client->name,
client->addr << 1, client->adapter->name);
+ adv7511_set_isr(sd, false);
adv7511_init_setup(sd);
cancel_delayed_work(&state->edid_handler);
i2c_unregister_device(state->i2c_edid);
+ if (state->i2c_cec)
+ i2c_unregister_device(state->i2c_cec);
i2c_unregister_device(state->i2c_pktmem);
destroy_workqueue(state->work_queue);
v4l2_device_unregister_subdev(sd);
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index 3f1ab4986..4003831de 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -40,6 +40,7 @@
#include <linux/regmap.h>
#include <media/i2c/adv7604.h>
+#include <media/cec.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
@@ -80,6 +81,8 @@ MODULE_LICENSE("GPL");
#define ADV76XX_OP_SWAP_CB_CR (1 << 0)
+#define ADV76XX_MAX_ADDRS (3)
+
enum adv76xx_type {
ADV7604,
ADV7611,
@@ -164,6 +167,7 @@ struct adv76xx_state {
struct adv76xx_platform_data pdata;
struct gpio_desc *hpd_gpio[4];
+ struct gpio_desc *reset_gpio;
struct v4l2_subdev sd;
struct media_pad pads[ADV76XX_PAD_MAX];
@@ -184,10 +188,15 @@ struct adv76xx_state {
u16 spa_port_a[2];
struct v4l2_fract aspect_ratio;
u32 rgb_quantization_range;
- struct workqueue_struct *work_queues;
struct delayed_work delayed_work_enable_hotplug;
bool restart_stdi_once;
+ /* CEC */
+ struct cec_adapter *cec_adap;
+ u8 cec_addr[ADV76XX_MAX_ADDRS];
+ u8 cec_valid_addrs;
+ bool cec_enabled_adap;
+
/* i2c clients */
struct i2c_client *i2c_clients[ADV76XX_PAGE_MAX];
@@ -381,7 +390,8 @@ static inline int io_write(struct v4l2_subdev *sd, u8 reg, u8 val)
return regmap_write(state->regmap[ADV76XX_PAGE_IO], reg, val);
}
-static inline int io_write_clr_set(struct v4l2_subdev *sd, u8 reg, u8 mask, u8 val)
+static inline int io_write_clr_set(struct v4l2_subdev *sd, u8 reg, u8 mask,
+ u8 val)
{
return io_write(sd, reg, (io_read(sd, reg) & ~mask) | val);
}
@@ -414,6 +424,12 @@ static inline int cec_write(struct v4l2_subdev *sd, u8 reg, u8 val)
return regmap_write(state->regmap[ADV76XX_PAGE_CEC], reg, val);
}
+static inline int cec_write_clr_set(struct v4l2_subdev *sd, u8 reg, u8 mask,
+ u8 val)
+{
+ return cec_write(sd, reg, (cec_read(sd, reg) & ~mask) | val);
+}
+
static inline int infoframe_read(struct v4l2_subdev *sd, u8 reg)
{
struct adv76xx_state *state = to_state(sd);
@@ -892,9 +908,9 @@ static int adv76xx_s_detect_tx_5v_ctrl(struct v4l2_subdev *sd)
{
struct adv76xx_state *state = to_state(sd);
const struct adv76xx_chip_info *info = state->info;
+ u16 cable_det = info->read_cable_det(sd);
- return v4l2_ctrl_s_ctrl(state->detect_tx_5v_ctrl,
- info->read_cable_det(sd));
+ return v4l2_ctrl_s_ctrl(state->detect_tx_5v_ctrl, cable_det);
}
static int find_and_set_predefined_video_timings(struct v4l2_subdev *sd,
@@ -1086,6 +1102,10 @@ static void set_rgb_quantization_range(struct v4l2_subdev *sd)
struct adv76xx_state *state = to_state(sd);
bool rgb_output = io_read(sd, 0x02) & 0x02;
bool hdmi_signal = hdmi_read(sd, 0x05) & 0x80;
+ u8 y = HDMI_COLORSPACE_RGB;
+
+ if (hdmi_signal && (io_read(sd, 0x60) & 1))
+ y = infoframe_read(sd, 0x01) >> 5;
v4l2_dbg(2, debug, sd, "%s: RGB quantization range: %d, RGB out: %d, HDMI: %d\n",
__func__, state->rgb_quantization_range,
@@ -1093,6 +1113,7 @@ static void set_rgb_quantization_range(struct v4l2_subdev *sd)
adv76xx_set_gain(sd, true, 0x0, 0x0, 0x0);
adv76xx_set_offset(sd, true, 0x0, 0x0, 0x0);
+ io_write_clr_set(sd, 0x02, 0x04, rgb_output ? 0 : 4);
switch (state->rgb_quantization_range) {
case V4L2_DV_RGB_RANGE_AUTO:
@@ -1142,6 +1163,9 @@ static void set_rgb_quantization_range(struct v4l2_subdev *sd)
break;
}
+ if (y != HDMI_COLORSPACE_RGB)
+ break;
+
/* RGB limited range (16-235) */
io_write_clr_set(sd, 0x02, 0xf0, 0x00);
@@ -1153,6 +1177,9 @@ static void set_rgb_quantization_range(struct v4l2_subdev *sd)
break;
}
+ if (y != HDMI_COLORSPACE_RGB)
+ break;
+
/* RGB full range (0-255) */
io_write_clr_set(sd, 0x02, 0xf0, 0x10);
@@ -1849,6 +1876,7 @@ static void adv76xx_setup_format(struct adv76xx_state *state)
io_write_clr_set(sd, 0x04, 0xe0, adv76xx_op_ch_sel(state));
io_write_clr_set(sd, 0x05, 0x01,
state->format->swap_cb_cr ? ADV76XX_OP_SWAP_CB_CR : 0);
+ set_rgb_quantization_range(sd);
}
static int adv76xx_get_format(struct v4l2_subdev *sd,
@@ -1924,6 +1952,210 @@ static int adv76xx_set_format(struct v4l2_subdev *sd,
return 0;
}
+#if IS_ENABLED(CONFIG_VIDEO_ADV7604_CEC)
+static void adv76xx_cec_tx_raw_status(struct v4l2_subdev *sd, u8 tx_raw_status)
+{
+ struct adv76xx_state *state = to_state(sd);
+
+ if ((cec_read(sd, 0x11) & 0x01) == 0) {
+ v4l2_dbg(1, debug, sd, "%s: tx raw: tx disabled\n", __func__);
+ return;
+ }
+
+ if (tx_raw_status & 0x02) {
+ v4l2_dbg(1, debug, sd, "%s: tx raw: arbitration lost\n",
+ __func__);
+ cec_transmit_done(state->cec_adap, CEC_TX_STATUS_ARB_LOST,
+ 1, 0, 0, 0);
+ }
+ if (tx_raw_status & 0x04) {
+ u8 status;
+ u8 nack_cnt;
+ u8 low_drive_cnt;
+
+ v4l2_dbg(1, debug, sd, "%s: tx raw: retry failed\n", __func__);
+ /*
+ * We set this status bit since this hardware performs
+ * retransmissions.
+ */
+ status = CEC_TX_STATUS_MAX_RETRIES;
+ nack_cnt = cec_read(sd, 0x14) & 0xf;
+ if (nack_cnt)
+ status |= CEC_TX_STATUS_NACK;
+ low_drive_cnt = cec_read(sd, 0x14) >> 4;
+ if (low_drive_cnt)
+ status |= CEC_TX_STATUS_LOW_DRIVE;
+ cec_transmit_done(state->cec_adap, status,
+ 0, nack_cnt, low_drive_cnt, 0);
+ return;
+ }
+ if (tx_raw_status & 0x01) {
+ v4l2_dbg(1, debug, sd, "%s: tx raw: ready ok\n", __func__);
+ cec_transmit_done(state->cec_adap, CEC_TX_STATUS_OK, 0, 0, 0, 0);
+ return;
+ }
+}
+
+static void adv76xx_cec_isr(struct v4l2_subdev *sd, bool *handled)
+{
+ struct adv76xx_state *state = to_state(sd);
+ u8 cec_irq;
+
+ /* cec controller */
+ cec_irq = io_read(sd, 0x4d) & 0x0f;
+ if (!cec_irq)
+ return;
+
+ v4l2_dbg(1, debug, sd, "%s: cec: irq 0x%x\n", __func__, cec_irq);
+ adv76xx_cec_tx_raw_status(sd, cec_irq);
+ if (cec_irq & 0x08) {
+ struct cec_msg msg;
+
+ msg.len = cec_read(sd, 0x25) & 0x1f;
+ if (msg.len > 16)
+ msg.len = 16;
+
+ if (msg.len) {
+ u8 i;
+
+ for (i = 0; i < msg.len; i++)
+ msg.msg[i] = cec_read(sd, i + 0x15);
+ cec_write(sd, 0x26, 0x01); /* re-enable rx */
+ cec_received_msg(state->cec_adap, &msg);
+ }
+ }
+
+ /* note: the bit order is swapped between 0x4d and 0x4e */
+ cec_irq = ((cec_irq & 0x08) >> 3) | ((cec_irq & 0x04) >> 1) |
+ ((cec_irq & 0x02) << 1) | ((cec_irq & 0x01) << 3);
+ io_write(sd, 0x4e, cec_irq);
+
+ if (handled)
+ *handled = true;
+}
+
+static int adv76xx_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct adv76xx_state *state = adap->priv;
+ struct v4l2_subdev *sd = &state->sd;
+
+ if (!state->cec_enabled_adap && enable) {
+ cec_write_clr_set(sd, 0x2a, 0x01, 0x01); /* power up cec */
+ cec_write(sd, 0x2c, 0x01); /* cec soft reset */
+ cec_write_clr_set(sd, 0x11, 0x01, 0); /* initially disable tx */
+ /* enabled irqs: */
+ /* tx: ready */
+ /* tx: arbitration lost */
+ /* tx: retry timeout */
+ /* rx: ready */
+ io_write_clr_set(sd, 0x50, 0x0f, 0x0f);
+ cec_write(sd, 0x26, 0x01); /* enable rx */
+ } else if (state->cec_enabled_adap && !enable) {
+ /* disable cec interrupts */
+ io_write_clr_set(sd, 0x50, 0x0f, 0x00);
+ /* disable address mask 1-3 */
+ cec_write_clr_set(sd, 0x27, 0x70, 0x00);
+ /* power down cec section */
+ cec_write_clr_set(sd, 0x2a, 0x01, 0x00);
+ state->cec_valid_addrs = 0;
+ }
+ state->cec_enabled_adap = enable;
+ adv76xx_s_detect_tx_5v_ctrl(sd);
+ return 0;
+}
+
+static int adv76xx_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
+{
+ struct adv76xx_state *state = adap->priv;
+ struct v4l2_subdev *sd = &state->sd;
+ unsigned int i, free_idx = ADV76XX_MAX_ADDRS;
+
+ if (!state->cec_enabled_adap)
+ return addr == CEC_LOG_ADDR_INVALID ? 0 : -EIO;
+
+ if (addr == CEC_LOG_ADDR_INVALID) {
+ cec_write_clr_set(sd, 0x27, 0x70, 0);
+ state->cec_valid_addrs = 0;
+ return 0;
+ }
+
+ for (i = 0; i < ADV76XX_MAX_ADDRS; i++) {
+ bool is_valid = state->cec_valid_addrs & (1 << i);
+
+ if (free_idx == ADV76XX_MAX_ADDRS && !is_valid)
+ free_idx = i;
+ if (is_valid && state->cec_addr[i] == addr)
+ return 0;
+ }
+ if (i == ADV76XX_MAX_ADDRS) {
+ i = free_idx;
+ if (i == ADV76XX_MAX_ADDRS)
+ return -ENXIO;
+ }
+ state->cec_addr[i] = addr;
+ state->cec_valid_addrs |= 1 << i;
+
+ switch (i) {
+ case 0:
+ /* enable address mask 0 */
+ cec_write_clr_set(sd, 0x27, 0x10, 0x10);
+ /* set address for mask 0 */
+ cec_write_clr_set(sd, 0x28, 0x0f, addr);
+ break;
+ case 1:
+ /* enable address mask 1 */
+ cec_write_clr_set(sd, 0x27, 0x20, 0x20);
+ /* set address for mask 1 */
+ cec_write_clr_set(sd, 0x28, 0xf0, addr << 4);
+ break;
+ case 2:
+ /* enable address mask 2 */
+ cec_write_clr_set(sd, 0x27, 0x40, 0x40);
+ /* set address for mask 1 */
+ cec_write_clr_set(sd, 0x29, 0x0f, addr);
+ break;
+ }
+ return 0;
+}
+
+static int adv76xx_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct adv76xx_state *state = adap->priv;
+ struct v4l2_subdev *sd = &state->sd;
+ u8 len = msg->len;
+ unsigned int i;
+
+ /*
+ * The number of retries is the number of attempts - 1, but retry
+ * at least once. It's not clear if a value of 0 is allowed, so
+ * let's do at least one retry.
+ */
+ cec_write_clr_set(sd, 0x12, 0x70, max(1, attempts - 1) << 4);
+
+ if (len > 16) {
+ v4l2_err(sd, "%s: len exceeded 16 (%d)\n", __func__, len);
+ return -EINVAL;
+ }
+
+ /* write data */
+ for (i = 0; i < len; i++)
+ cec_write(sd, i, msg->msg[i]);
+
+ /* set length (data + header) */
+ cec_write(sd, 0x10, len);
+ /* start transmit, enable tx */
+ cec_write(sd, 0x11, 0x01);
+ return 0;
+}
+
+static const struct cec_adap_ops adv76xx_cec_adap_ops = {
+ .adap_enable = adv76xx_cec_adap_enable,
+ .adap_log_addr = adv76xx_cec_adap_log_addr,
+ .adap_transmit = adv76xx_cec_adap_transmit,
+};
+#endif
+
static int adv76xx_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
{
struct adv76xx_state *state = to_state(sd);
@@ -1969,6 +2201,11 @@ static int adv76xx_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
*handled = true;
}
+#if IS_ENABLED(CONFIG_VIDEO_ADV7604_CEC)
+ /* cec */
+ adv76xx_cec_isr(sd, handled);
+#endif
+
/* tx 5v detect */
tx_5v = irq_reg_0x70 & info->cable_det_mask;
if (tx_5v) {
@@ -2018,39 +2255,12 @@ static int adv76xx_get_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
return 0;
}
-static int get_edid_spa_location(const u8 *edid)
-{
- u8 d;
-
- if ((edid[0x7e] != 1) ||
- (edid[0x80] != 0x02) ||
- (edid[0x81] != 0x03)) {
- return -1;
- }
-
- /* search Vendor Specific Data Block (tag 3) */
- d = edid[0x82] & 0x7f;
- if (d > 4) {
- int i = 0x84;
- int end = 0x80 + d;
-
- do {
- u8 tag = edid[i] >> 5;
- u8 len = edid[i] & 0x1f;
-
- if ((tag == 3) && (len >= 5))
- return i + 4;
- i += len + 1;
- } while (i < end);
- }
- return -1;
-}
-
static int adv76xx_set_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
{
struct adv76xx_state *state = to_state(sd);
const struct adv76xx_chip_info *info = state->info;
- int spa_loc;
+ unsigned int spa_loc;
+ u16 pa;
int err;
int i;
@@ -2081,6 +2291,10 @@ static int adv76xx_set_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
edid->blocks = 2;
return -E2BIG;
}
+ pa = cec_get_edid_phys_addr(edid->edid, edid->blocks * 128, &spa_loc);
+ err = cec_phys_addr_validate(pa, &pa, NULL);
+ if (err)
+ return err;
v4l2_dbg(2, debug, sd, "%s: write EDID pad %d, edid.present = 0x%x\n",
__func__, edid->pad, state->edid.present);
@@ -2090,9 +2304,12 @@ static int adv76xx_set_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
adv76xx_set_hpd(state, 0);
rep_write_clr_set(sd, info->edid_enable_reg, 0x0f, 0x00);
- spa_loc = get_edid_spa_location(edid->edid);
- if (spa_loc < 0)
- spa_loc = 0xc0; /* Default value [REF_02, p. 116] */
+ /*
+ * Return an error if no location of the source physical address
+ * was found.
+ */
+ if (spa_loc == 0)
+ return -EINVAL;
switch (edid->pad) {
case ADV76XX_PAD_HDMI_PORT_A:
@@ -2152,10 +2369,10 @@ static int adv76xx_set_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
v4l2_err(sd, "error enabling edid (0x%x)\n", state->edid.present);
return -EIO;
}
+ cec_s_phys_addr(state->cec_adap, pa, false);
/* enable hotplug after 100 ms */
- queue_delayed_work(state->work_queues,
- &state->delayed_work_enable_hotplug, HZ / 10);
+ schedule_delayed_work(&state->delayed_work_enable_hotplug, HZ / 10);
return 0;
}
@@ -2276,8 +2493,19 @@ static int adv76xx_log_status(struct v4l2_subdev *sd)
((edid_enabled & 0x02) ? "Yes" : "No"),
((edid_enabled & 0x04) ? "Yes" : "No"),
((edid_enabled & 0x08) ? "Yes" : "No"));
- v4l2_info(sd, "CEC: %s\n", !!(cec_read(sd, 0x2a) & 0x01) ?
+ v4l2_info(sd, "CEC: %s\n", state->cec_enabled_adap ?
"enabled" : "disabled");
+ if (state->cec_enabled_adap) {
+ int i;
+
+ for (i = 0; i < ADV76XX_MAX_ADDRS; i++) {
+ bool is_valid = state->cec_valid_addrs & (1 << i);
+
+ if (is_valid)
+ v4l2_info(sd, "CEC Logical Address: 0x%x\n",
+ state->cec_addr[i]);
+ }
+ }
v4l2_info(sd, "-----Signal status-----\n");
cable_det = info->read_cable_det(sd);
@@ -2323,11 +2551,10 @@ static int adv76xx_log_status(struct v4l2_subdev *sd)
rgb_quantization_range_txt[state->rgb_quantization_range]);
v4l2_info(sd, "Input color space: %s\n",
input_color_space_txt[reg_io_0x02 >> 4]);
- v4l2_info(sd, "Output color space: %s %s, saturator %s, alt-gamma %s\n",
+ v4l2_info(sd, "Output color space: %s %s, alt-gamma %s\n",
(reg_io_0x02 & 0x02) ? "RGB" : "YCbCr",
- (reg_io_0x02 & 0x04) ? "(16-235)" : "(0-255)",
(((reg_io_0x02 >> 2) & 0x01) ^ (reg_io_0x02 & 0x01)) ?
- "enabled" : "disabled",
+ "(16-235)" : "(0-255)",
(reg_io_0x02 & 0x08) ? "enabled" : "disabled");
v4l2_info(sd, "Color space conversion: %s\n",
csc_coeff_sel_rb[cp_read(sd, info->cp_csc) >> 4]);
@@ -2387,6 +2614,24 @@ static int adv76xx_subscribe_event(struct v4l2_subdev *sd,
}
}
+static int adv76xx_registered(struct v4l2_subdev *sd)
+{
+ struct adv76xx_state *state = to_state(sd);
+ int err;
+
+ err = cec_register_adapter(state->cec_adap);
+ if (err)
+ cec_delete_adapter(state->cec_adap);
+ return err;
+}
+
+static void adv76xx_unregistered(struct v4l2_subdev *sd)
+{
+ struct adv76xx_state *state = to_state(sd);
+
+ cec_unregister_adapter(state->cec_adap);
+}
+
/* ----------------------------------------------------------------------- */
static const struct v4l2_ctrl_ops adv76xx_ctrl_ops = {
@@ -2430,6 +2675,11 @@ static const struct v4l2_subdev_ops adv76xx_ops = {
.pad = &adv76xx_pad_ops,
};
+static const struct v4l2_subdev_internal_ops adv76xx_int_ops = {
+ .registered = adv76xx_registered,
+ .unregistered = adv76xx_unregistered,
+};
+
/* -------------------------- custom ctrls ---------------------------------- */
static const struct v4l2_ctrl_config adv7604_ctrl_analog_sampling_phase = {
@@ -2492,10 +2742,7 @@ static int adv76xx_core_init(struct v4l2_subdev *sd)
cp_write(sd, 0xcf, 0x01); /* Power down macrovision */
/* video format */
- io_write_clr_set(sd, 0x02, 0x0f,
- pdata->alt_gamma << 3 |
- pdata->op_656_range << 2 |
- pdata->alt_data_sat << 0);
+ io_write_clr_set(sd, 0x02, 0x0f, pdata->alt_gamma << 3);
io_write_clr_set(sd, 0x05, 0x0e, pdata->blank_data << 3 |
pdata->insert_av_codes << 2 |
pdata->replicate_av_codes << 1);
@@ -2845,10 +3092,8 @@ static int adv76xx_parse_dt(struct adv76xx_state *state)
if (flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
state->pdata.inv_llc_pol = 1;
- if (bus_cfg.bus_type == V4L2_MBUS_BT656) {
+ if (bus_cfg.bus_type == V4L2_MBUS_BT656)
state->pdata.insert_av_codes = 1;
- state->pdata.op_656_range = 1;
- }
/* Disable the interrupt for now as no DT-based board uses it. */
state->pdata.int1_config = ADV76XX_INT1_CONFIG_DISABLED;
@@ -2871,7 +3116,6 @@ static int adv76xx_parse_dt(struct adv76xx_state *state)
state->pdata.disable_pwrdnb = 0;
state->pdata.disable_cable_det_rst = 0;
state->pdata.blank_data = 1;
- state->pdata.alt_data_sat = 1;
state->pdata.op_format_mode_sel = ADV7604_OP_FORMAT_MODE0;
state->pdata.bus_order = ADV7604_BUS_ORDER_RGB;
@@ -3020,6 +3264,19 @@ static int configure_regmaps(struct adv76xx_state *state)
return 0;
}
+static void adv76xx_reset(struct adv76xx_state *state)
+{
+ if (state->reset_gpio) {
+ /* ADV76XX can be reset by a low reset pulse of minimum 5 ms. */
+ gpiod_set_value_cansleep(state->reset_gpio, 0);
+ usleep_range(5000, 10000);
+ gpiod_set_value_cansleep(state->reset_gpio, 1);
+ /* It is recommended to wait 5 ms after the low pulse before */
+ /* an I2C write is performed to the ADV76XX. */
+ usleep_range(5000, 10000);
+ }
+}
+
static int adv76xx_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -3083,6 +3340,12 @@ static int adv76xx_probe(struct i2c_client *client,
if (state->hpd_gpio[i])
v4l_info(client, "Handling HPD %u GPIO\n", i);
}
+ state->reset_gpio = devm_gpiod_get_optional(&client->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(state->reset_gpio))
+ return PTR_ERR(state->reset_gpio);
+
+ adv76xx_reset(state);
state->timings = cea640x480;
state->format = adv76xx_format_info(state, MEDIA_BUS_FMT_YUYV8_2X8);
@@ -3093,6 +3356,7 @@ static int adv76xx_probe(struct i2c_client *client,
id->name, i2c_adapter_id(client->adapter),
client->addr);
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+ sd->internal_ops = &adv76xx_int_ops;
/* Configure IO Regmap region */
err = configure_regmap(state, ADV76XX_PAGE_IO);
@@ -3206,14 +3470,6 @@ static int adv76xx_probe(struct i2c_client *client,
}
}
- /* work queues */
- state->work_queues = create_singlethread_workqueue(client->name);
- if (!state->work_queues) {
- v4l2_err(sd, "Could not create work queue\n");
- err = -ENOMEM;
- goto err_i2c;
- }
-
INIT_DELAYED_WORK(&state->delayed_work_enable_hotplug,
adv76xx_delayed_work_enable_hotplug);
@@ -3236,6 +3492,18 @@ static int adv76xx_probe(struct i2c_client *client,
err = adv76xx_core_init(sd);
if (err)
goto err_entity;
+
+#if IS_ENABLED(CONFIG_VIDEO_ADV7604_CEC)
+ state->cec_adap = cec_allocate_adapter(&adv76xx_cec_adap_ops,
+ state, dev_name(&client->dev),
+ CEC_CAP_TRANSMIT | CEC_CAP_LOG_ADDRS |
+ CEC_CAP_PASSTHROUGH | CEC_CAP_RC, ADV76XX_MAX_ADDRS,
+ &client->dev);
+ err = PTR_ERR_OR_ZERO(state->cec_adap);
+ if (err)
+ goto err_entity;
+#endif
+
v4l2_info(sd, "%s found @ 0x%x (%s)\n", client->name,
client->addr << 1, client->adapter->name);
@@ -3249,7 +3517,6 @@ err_entity:
media_entity_cleanup(&sd->entity);
err_work_queues:
cancel_delayed_work(&state->delayed_work_enable_hotplug);
- destroy_workqueue(state->work_queues);
err_i2c:
adv76xx_unregister_clients(state);
err_hdl:
@@ -3264,8 +3531,14 @@ static int adv76xx_remove(struct i2c_client *client)
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct adv76xx_state *state = to_state(sd);
+ /* disable interrupts */
+ io_write(sd, 0x40, 0);
+ io_write(sd, 0x41, 0);
+ io_write(sd, 0x46, 0);
+ io_write(sd, 0x6e, 0);
+ io_write(sd, 0x73, 0);
+
cancel_delayed_work(&state->delayed_work_enable_hotplug);
- destroy_workqueue(state->work_queues);
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
adv76xx_unregister_clients(to_state(sd));
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index ecaacb0a6..8c2a52e28 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -39,6 +39,7 @@
#include <linux/workqueue.h>
#include <linux/v4l2-dv-timings.h>
#include <linux/hdmi.h>
+#include <media/cec.h>
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
#include <media/v4l2-ctrls.h>
@@ -79,6 +80,8 @@ MODULE_LICENSE("GPL");
#define ADV7842_OP_SWAP_CB_CR (1 << 0)
+#define ADV7842_MAX_ADDRS (3)
+
/*
**********************************************************************
*
@@ -118,7 +121,6 @@ struct adv7842_state {
struct v4l2_fract aspect_ratio;
u32 rgb_quantization_range;
bool is_cea_format;
- struct workqueue_struct *work_queues;
struct delayed_work delayed_work_enable_hotplug;
bool restart_stdi_once;
bool hdmi_port_a;
@@ -142,6 +144,11 @@ struct adv7842_state {
struct v4l2_ctrl *free_run_color_ctrl_manual;
struct v4l2_ctrl *free_run_color_ctrl;
struct v4l2_ctrl *rgb_quantization_range_ctrl;
+
+ struct cec_adapter *cec_adap;
+ u8 cec_addr[ADV7842_MAX_ADDRS];
+ u8 cec_valid_addrs;
+ bool cec_enabled_adap;
};
/* Unsupported timings. This device cannot support 720p30. */
@@ -418,9 +425,9 @@ static inline int cec_write(struct v4l2_subdev *sd, u8 reg, u8 val)
return adv_smbus_write_byte_data(state->i2c_cec, reg, val);
}
-static inline int cec_write_and_or(struct v4l2_subdev *sd, u8 reg, u8 mask, u8 val)
+static inline int cec_write_clr_set(struct v4l2_subdev *sd, u8 reg, u8 mask, u8 val)
{
- return cec_write(sd, reg, (cec_read(sd, reg) & mask) | val);
+ return cec_write(sd, reg, (cec_read(sd, reg) & ~mask) | val);
}
static inline int infoframe_read(struct v4l2_subdev *sd, u8 reg)
@@ -696,6 +703,18 @@ adv7842_get_dv_timings_cap(struct v4l2_subdev *sd)
/* ----------------------------------------------------------------------- */
+static u16 adv7842_read_cable_det(struct v4l2_subdev *sd)
+{
+ u8 reg = io_read(sd, 0x6f);
+ u16 val = 0;
+
+ if (reg & 0x02)
+ val |= 1; /* port A */
+ if (reg & 0x01)
+ val |= 2; /* port B */
+ return val;
+}
+
static void adv7842_delayed_work_enable_hotplug(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
@@ -756,56 +775,23 @@ static int edid_write_vga_segment(struct v4l2_subdev *sd)
}
/* enable hotplug after 200 ms */
- queue_delayed_work(state->work_queues,
- &state->delayed_work_enable_hotplug, HZ / 5);
+ schedule_delayed_work(&state->delayed_work_enable_hotplug, HZ / 5);
return 0;
}
-static int edid_spa_location(const u8 *edid)
-{
- u8 d;
-
- /*
- * TODO, improve and update for other CEA extensions
- * currently only for 1 segment (256 bytes),
- * i.e. 1 extension block and CEA revision 3.
- */
- if ((edid[0x7e] != 1) ||
- (edid[0x80] != 0x02) ||
- (edid[0x81] != 0x03)) {
- return -EINVAL;
- }
- /*
- * search Vendor Specific Data Block (tag 3)
- */
- d = edid[0x82] & 0x7f;
- if (d > 4) {
- int i = 0x84;
- int end = 0x80 + d;
- do {
- u8 tag = edid[i]>>5;
- u8 len = edid[i] & 0x1f;
-
- if ((tag == 3) && (len >= 5))
- return i + 4;
- i += len + 1;
- } while (i < end);
- }
- return -EINVAL;
-}
-
static int edid_write_hdmi_segment(struct v4l2_subdev *sd, u8 port)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct adv7842_state *state = to_state(sd);
- const u8 *val = state->hdmi_edid.edid;
- int spa_loc = edid_spa_location(val);
+ const u8 *edid = state->hdmi_edid.edid;
+ int spa_loc;
+ u16 pa;
int err = 0;
int i;
- v4l2_dbg(2, debug, sd, "%s: write EDID on port %c (spa at 0x%x)\n",
- __func__, (port == ADV7842_EDID_PORT_A) ? 'A' : 'B', spa_loc);
+ v4l2_dbg(2, debug, sd, "%s: write EDID on port %c\n",
+ __func__, (port == ADV7842_EDID_PORT_A) ? 'A' : 'B');
/* HPA disable on port A and B */
io_write_and_or(sd, 0x20, 0xcf, 0x00);
@@ -816,24 +802,33 @@ static int edid_write_hdmi_segment(struct v4l2_subdev *sd, u8 port)
if (!state->hdmi_edid.present)
return 0;
+ pa = cec_get_edid_phys_addr(edid, 256, &spa_loc);
+ err = cec_phys_addr_validate(pa, &pa, NULL);
+ if (err)
+ return err;
+
+ /*
+ * Return an error if no location of the source physical address
+ * was found.
+ */
+ if (spa_loc == 0)
+ return -EINVAL;
+
/* edid segment pointer '0' for HDMI ports */
rep_write_and_or(sd, 0x77, 0xef, 0x00);
for (i = 0; !err && i < 256; i += I2C_SMBUS_BLOCK_MAX)
err = adv_smbus_write_i2c_block_data(state->i2c_edid, i,
- I2C_SMBUS_BLOCK_MAX, val + i);
+ I2C_SMBUS_BLOCK_MAX, edid + i);
if (err)
return err;
- if (spa_loc < 0)
- spa_loc = 0xc0; /* Default value [REF_02, p. 199] */
-
if (port == ADV7842_EDID_PORT_A) {
- rep_write(sd, 0x72, val[spa_loc]);
- rep_write(sd, 0x73, val[spa_loc + 1]);
+ rep_write(sd, 0x72, edid[spa_loc]);
+ rep_write(sd, 0x73, edid[spa_loc + 1]);
} else {
- rep_write(sd, 0x74, val[spa_loc]);
- rep_write(sd, 0x75, val[spa_loc + 1]);
+ rep_write(sd, 0x74, edid[spa_loc]);
+ rep_write(sd, 0x75, edid[spa_loc + 1]);
}
rep_write(sd, 0x76, spa_loc & 0xff);
rep_write_and_or(sd, 0x77, 0xbf, (spa_loc >> 2) & 0x40);
@@ -853,10 +848,10 @@ static int edid_write_hdmi_segment(struct v4l2_subdev *sd, u8 port)
(port == ADV7842_EDID_PORT_A) ? 'A' : 'B');
return -EIO;
}
+ cec_s_phys_addr(state->cec_adap, pa, false);
/* enable hotplug after 200 ms */
- queue_delayed_work(state->work_queues,
- &state->delayed_work_enable_hotplug, HZ / 5);
+ schedule_delayed_work(&state->delayed_work_enable_hotplug, HZ / 5);
return 0;
}
@@ -983,20 +978,11 @@ static int adv7842_s_register(struct v4l2_subdev *sd,
static int adv7842_s_detect_tx_5v_ctrl(struct v4l2_subdev *sd)
{
struct adv7842_state *state = to_state(sd);
- int prev = v4l2_ctrl_g_ctrl(state->detect_tx_5v_ctrl);
- u8 reg_io_6f = io_read(sd, 0x6f);
- int val = 0;
+ u16 cable_det = adv7842_read_cable_det(sd);
- if (reg_io_6f & 0x02)
- val |= 1; /* port A */
- if (reg_io_6f & 0x01)
- val |= 2; /* port B */
-
- v4l2_dbg(1, debug, sd, "%s: 0x%x -> 0x%x\n", __func__, prev, val);
+ v4l2_dbg(1, debug, sd, "%s: 0x%x\n", __func__, cable_det);
- if (val != prev)
- return v4l2_ctrl_s_ctrl(state->detect_tx_5v_ctrl, val);
- return 0;
+ return v4l2_ctrl_s_ctrl(state->detect_tx_5v_ctrl, cable_det);
}
static int find_and_set_predefined_video_timings(struct v4l2_subdev *sd,
@@ -1198,6 +1184,10 @@ static void set_rgb_quantization_range(struct v4l2_subdev *sd)
struct adv7842_state *state = to_state(sd);
bool rgb_output = io_read(sd, 0x02) & 0x02;
bool hdmi_signal = hdmi_read(sd, 0x05) & 0x80;
+ u8 y = HDMI_COLORSPACE_RGB;
+
+ if (hdmi_signal && (io_read(sd, 0x60) & 1))
+ y = infoframe_read(sd, 0x01) >> 5;
v4l2_dbg(2, debug, sd, "%s: RGB quantization range: %d, RGB out: %d, HDMI: %d\n",
__func__, state->rgb_quantization_range,
@@ -1205,6 +1195,7 @@ static void set_rgb_quantization_range(struct v4l2_subdev *sd)
adv7842_set_gain(sd, true, 0x0, 0x0, 0x0);
adv7842_set_offset(sd, true, 0x0, 0x0, 0x0);
+ io_write_clr_set(sd, 0x02, 0x04, rgb_output ? 0 : 4);
switch (state->rgb_quantization_range) {
case V4L2_DV_RGB_RANGE_AUTO:
@@ -1254,6 +1245,9 @@ static void set_rgb_quantization_range(struct v4l2_subdev *sd)
break;
}
+ if (y != HDMI_COLORSPACE_RGB)
+ break;
+
/* RGB limited range (16-235) */
io_write_and_or(sd, 0x02, 0x0f, 0x00);
@@ -1265,6 +1259,9 @@ static void set_rgb_quantization_range(struct v4l2_subdev *sd)
break;
}
+ if (y != HDMI_COLORSPACE_RGB)
+ break;
+
/* RGB full range (0-255) */
io_write_and_or(sd, 0x02, 0x0f, 0x10);
@@ -2072,6 +2069,7 @@ static void adv7842_setup_format(struct adv7842_state *state)
io_write_clr_set(sd, 0x04, 0xe0, adv7842_op_ch_sel(state));
io_write_clr_set(sd, 0x05, 0x01,
state->format->swap_cb_cr ? ADV7842_OP_SWAP_CB_CR : 0);
+ set_rgb_quantization_range(sd);
}
static int adv7842_get_format(struct v4l2_subdev *sd,
@@ -2170,6 +2168,207 @@ static void adv7842_irq_enable(struct v4l2_subdev *sd, bool enable)
}
}
+#if IS_ENABLED(CONFIG_VIDEO_ADV7842_CEC)
+static void adv7842_cec_tx_raw_status(struct v4l2_subdev *sd, u8 tx_raw_status)
+{
+ struct adv7842_state *state = to_state(sd);
+
+ if ((cec_read(sd, 0x11) & 0x01) == 0) {
+ v4l2_dbg(1, debug, sd, "%s: tx raw: tx disabled\n", __func__);
+ return;
+ }
+
+ if (tx_raw_status & 0x02) {
+ v4l2_dbg(1, debug, sd, "%s: tx raw: arbitration lost\n",
+ __func__);
+ cec_transmit_done(state->cec_adap, CEC_TX_STATUS_ARB_LOST,
+ 1, 0, 0, 0);
+ return;
+ }
+ if (tx_raw_status & 0x04) {
+ u8 status;
+ u8 nack_cnt;
+ u8 low_drive_cnt;
+
+ v4l2_dbg(1, debug, sd, "%s: tx raw: retry failed\n", __func__);
+ /*
+ * We set this status bit since this hardware performs
+ * retransmissions.
+ */
+ status = CEC_TX_STATUS_MAX_RETRIES;
+ nack_cnt = cec_read(sd, 0x14) & 0xf;
+ if (nack_cnt)
+ status |= CEC_TX_STATUS_NACK;
+ low_drive_cnt = cec_read(sd, 0x14) >> 4;
+ if (low_drive_cnt)
+ status |= CEC_TX_STATUS_LOW_DRIVE;
+ cec_transmit_done(state->cec_adap, status,
+ 0, nack_cnt, low_drive_cnt, 0);
+ return;
+ }
+ if (tx_raw_status & 0x01) {
+ v4l2_dbg(1, debug, sd, "%s: tx raw: ready ok\n", __func__);
+ cec_transmit_done(state->cec_adap, CEC_TX_STATUS_OK, 0, 0, 0, 0);
+ return;
+ }
+}
+
+static void adv7842_cec_isr(struct v4l2_subdev *sd, bool *handled)
+{
+ u8 cec_irq;
+
+ /* cec controller */
+ cec_irq = io_read(sd, 0x93) & 0x0f;
+ if (!cec_irq)
+ return;
+
+ v4l2_dbg(1, debug, sd, "%s: cec: irq 0x%x\n", __func__, cec_irq);
+ adv7842_cec_tx_raw_status(sd, cec_irq);
+ if (cec_irq & 0x08) {
+ struct adv7842_state *state = to_state(sd);
+ struct cec_msg msg;
+
+ msg.len = cec_read(sd, 0x25) & 0x1f;
+ if (msg.len > 16)
+ msg.len = 16;
+
+ if (msg.len) {
+ u8 i;
+
+ for (i = 0; i < msg.len; i++)
+ msg.msg[i] = cec_read(sd, i + 0x15);
+ cec_write(sd, 0x26, 0x01); /* re-enable rx */
+ cec_received_msg(state->cec_adap, &msg);
+ }
+ }
+
+ io_write(sd, 0x94, cec_irq);
+
+ if (handled)
+ *handled = true;
+}
+
+static int adv7842_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct adv7842_state *state = adap->priv;
+ struct v4l2_subdev *sd = &state->sd;
+
+ if (!state->cec_enabled_adap && enable) {
+ cec_write_clr_set(sd, 0x2a, 0x01, 0x01); /* power up cec */
+ cec_write(sd, 0x2c, 0x01); /* cec soft reset */
+ cec_write_clr_set(sd, 0x11, 0x01, 0); /* initially disable tx */
+ /* enabled irqs: */
+ /* tx: ready */
+ /* tx: arbitration lost */
+ /* tx: retry timeout */
+ /* rx: ready */
+ io_write_clr_set(sd, 0x96, 0x0f, 0x0f);
+ cec_write(sd, 0x26, 0x01); /* enable rx */
+ } else if (state->cec_enabled_adap && !enable) {
+ /* disable cec interrupts */
+ io_write_clr_set(sd, 0x96, 0x0f, 0x00);
+ /* disable address mask 1-3 */
+ cec_write_clr_set(sd, 0x27, 0x70, 0x00);
+ /* power down cec section */
+ cec_write_clr_set(sd, 0x2a, 0x01, 0x00);
+ state->cec_valid_addrs = 0;
+ }
+ state->cec_enabled_adap = enable;
+ return 0;
+}
+
+static int adv7842_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
+{
+ struct adv7842_state *state = adap->priv;
+ struct v4l2_subdev *sd = &state->sd;
+ unsigned int i, free_idx = ADV7842_MAX_ADDRS;
+
+ if (!state->cec_enabled_adap)
+ return addr == CEC_LOG_ADDR_INVALID ? 0 : -EIO;
+
+ if (addr == CEC_LOG_ADDR_INVALID) {
+ cec_write_clr_set(sd, 0x27, 0x70, 0);
+ state->cec_valid_addrs = 0;
+ return 0;
+ }
+
+ for (i = 0; i < ADV7842_MAX_ADDRS; i++) {
+ bool is_valid = state->cec_valid_addrs & (1 << i);
+
+ if (free_idx == ADV7842_MAX_ADDRS && !is_valid)
+ free_idx = i;
+ if (is_valid && state->cec_addr[i] == addr)
+ return 0;
+ }
+ if (i == ADV7842_MAX_ADDRS) {
+ i = free_idx;
+ if (i == ADV7842_MAX_ADDRS)
+ return -ENXIO;
+ }
+ state->cec_addr[i] = addr;
+ state->cec_valid_addrs |= 1 << i;
+
+ switch (i) {
+ case 0:
+ /* enable address mask 0 */
+ cec_write_clr_set(sd, 0x27, 0x10, 0x10);
+ /* set address for mask 0 */
+ cec_write_clr_set(sd, 0x28, 0x0f, addr);
+ break;
+ case 1:
+ /* enable address mask 1 */
+ cec_write_clr_set(sd, 0x27, 0x20, 0x20);
+ /* set address for mask 1 */
+ cec_write_clr_set(sd, 0x28, 0xf0, addr << 4);
+ break;
+ case 2:
+ /* enable address mask 2 */
+ cec_write_clr_set(sd, 0x27, 0x40, 0x40);
+ /* set address for mask 1 */
+ cec_write_clr_set(sd, 0x29, 0x0f, addr);
+ break;
+ }
+ return 0;
+}
+
+static int adv7842_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct adv7842_state *state = adap->priv;
+ struct v4l2_subdev *sd = &state->sd;
+ u8 len = msg->len;
+ unsigned int i;
+
+ /*
+ * The number of retries is the number of attempts - 1, but retry
+ * at least once. It's not clear if a value of 0 is allowed, so
+ * let's do at least one retry.
+ */
+ cec_write_clr_set(sd, 0x12, 0x70, max(1, attempts - 1) << 4);
+
+ if (len > 16) {
+ v4l2_err(sd, "%s: len exceeded 16 (%d)\n", __func__, len);
+ return -EINVAL;
+ }
+
+ /* write data */
+ for (i = 0; i < len; i++)
+ cec_write(sd, i, msg->msg[i]);
+
+ /* set length (data + header) */
+ cec_write(sd, 0x10, len);
+ /* start transmit, enable tx */
+ cec_write(sd, 0x11, 0x01);
+ return 0;
+}
+
+static const struct cec_adap_ops adv7842_cec_adap_ops = {
+ .adap_enable = adv7842_cec_adap_enable,
+ .adap_log_addr = adv7842_cec_adap_log_addr,
+ .adap_transmit = adv7842_cec_adap_transmit,
+};
+#endif
+
static int adv7842_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
{
struct adv7842_state *state = to_state(sd);
@@ -2241,6 +2440,11 @@ static int adv7842_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
*handled = true;
}
+#if IS_ENABLED(CONFIG_VIDEO_ADV7842_CEC)
+ /* cec */
+ adv7842_cec_isr(sd, handled);
+#endif
+
/* tx 5v detect */
if (irq_status[2] & 0x3) {
v4l2_dbg(1, debug, sd, "%s: irq tx_5v\n", __func__);
@@ -2321,10 +2525,12 @@ static int adv7842_set_edid(struct v4l2_subdev *sd, struct v4l2_edid *e)
case ADV7842_EDID_PORT_A:
case ADV7842_EDID_PORT_B:
memset(&state->hdmi_edid.edid, 0, 256);
- if (e->blocks)
+ if (e->blocks) {
state->hdmi_edid.present |= 0x04 << e->pad;
- else
+ } else {
state->hdmi_edid.present &= ~(0x04 << e->pad);
+ adv7842_s_detect_tx_5v_ctrl(sd);
+ }
memcpy(&state->hdmi_edid.edid, e->edid, 128 * e->blocks);
err = edid_write_hdmi_segment(sd, e->pad);
break;
@@ -2397,6 +2603,8 @@ static void adv7842_log_infoframes(struct v4l2_subdev *sd)
log_infoframe(sd, &cri[i]);
}
+#if 0
+/* Let's keep it here for now, as it could be useful for debug */
static const char * const prim_mode_txt[] = {
"SDP",
"Component",
@@ -2415,6 +2623,7 @@ static const char * const prim_mode_txt[] = {
"Reserved",
"Reserved",
};
+#endif
static int adv7842_sdp_log_status(struct v4l2_subdev *sd)
{
@@ -2509,8 +2718,19 @@ static int adv7842_cp_log_status(struct v4l2_subdev *sd)
v4l2_info(sd, "HPD A %s, B %s\n",
reg_io_0x21 & 0x02 ? "enabled" : "disabled",
reg_io_0x21 & 0x01 ? "enabled" : "disabled");
- v4l2_info(sd, "CEC %s\n", !!(cec_read(sd, 0x2a) & 0x01) ?
+ v4l2_info(sd, "CEC: %s\n", state->cec_enabled_adap ?
"enabled" : "disabled");
+ if (state->cec_enabled_adap) {
+ int i;
+
+ for (i = 0; i < ADV7842_MAX_ADDRS; i++) {
+ bool is_valid = state->cec_valid_addrs & (1 << i);
+
+ if (is_valid)
+ v4l2_info(sd, "CEC Logical Address: 0x%x\n",
+ state->cec_addr[i]);
+ }
+ }
v4l2_info(sd, "-----Signal status-----\n");
if (state->hdmi_port_a) {
@@ -2569,11 +2789,11 @@ static int adv7842_cp_log_status(struct v4l2_subdev *sd)
rgb_quantization_range_txt[state->rgb_quantization_range]);
v4l2_info(sd, "Input color space: %s\n",
input_color_space_txt[reg_io_0x02 >> 4]);
- v4l2_info(sd, "Output color space: %s %s, saturator %s\n",
+ v4l2_info(sd, "Output color space: %s %s, alt-gamma %s\n",
(reg_io_0x02 & 0x02) ? "RGB" : "YCbCr",
- (reg_io_0x02 & 0x04) ? "(16-235)" : "(0-255)",
- ((reg_io_0x02 & 0x04) ^ (reg_io_0x02 & 0x01)) ?
- "enabled" : "disabled");
+ (((reg_io_0x02 >> 2) & 0x01) ^ (reg_io_0x02 & 0x01)) ?
+ "(16-235)" : "(0-255)",
+ (reg_io_0x02 & 0x08) ? "enabled" : "disabled");
v4l2_info(sd, "Color space conversion: %s\n",
csc_coeff_sel_rb[cp_read(sd, 0xf4) >> 4]);
@@ -2777,11 +2997,7 @@ static int adv7842_core_init(struct v4l2_subdev *sd)
io_write(sd, 0x15, 0x80); /* Power up pads */
/* video format */
- io_write(sd, 0x02,
- 0xf0 |
- pdata->alt_gamma << 3 |
- pdata->op_656_range << 2 |
- pdata->alt_data_sat << 0);
+ io_write(sd, 0x02, 0xf0 | pdata->alt_gamma << 3);
io_write_and_or(sd, 0x05, 0xf0, pdata->blank_data << 3 |
pdata->insert_av_codes << 2 |
pdata->replicate_av_codes << 1);
@@ -3031,6 +3247,24 @@ static int adv7842_subscribe_event(struct v4l2_subdev *sd,
}
}
+static int adv7842_registered(struct v4l2_subdev *sd)
+{
+ struct adv7842_state *state = to_state(sd);
+ int err;
+
+ err = cec_register_adapter(state->cec_adap);
+ if (err)
+ cec_delete_adapter(state->cec_adap);
+ return err;
+}
+
+static void adv7842_unregistered(struct v4l2_subdev *sd)
+{
+ struct adv7842_state *state = to_state(sd);
+
+ cec_unregister_adapter(state->cec_adap);
+}
+
/* ----------------------------------------------------------------------- */
static const struct v4l2_ctrl_ops adv7842_ctrl_ops = {
@@ -3077,6 +3311,11 @@ static const struct v4l2_subdev_ops adv7842_ops = {
.pad = &adv7842_pad_ops,
};
+static const struct v4l2_subdev_internal_ops adv7842_int_ops = {
+ .registered = adv7842_registered,
+ .unregistered = adv7842_unregistered,
+};
+
/* -------------------------- custom ctrls ---------------------------------- */
static const struct v4l2_ctrl_config adv7842_ctrl_analog_sampling_phase = {
@@ -3241,6 +3480,7 @@ static int adv7842_probe(struct i2c_client *client,
sd = &state->sd;
v4l2_i2c_subdev_init(sd, client, &adv7842_ops);
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+ sd->internal_ops = &adv7842_int_ops;
state->mode = pdata->mode;
state->hdmi_port_a = pdata->input == ADV7842_SELECT_HDMI_PORT_A;
@@ -3311,13 +3551,6 @@ static int adv7842_probe(struct i2c_client *client,
goto err_i2c;
}
- /* work queues */
- state->work_queues = create_singlethread_workqueue(client->name);
- if (!state->work_queues) {
- v4l2_err(sd, "Could not create work queue\n");
- err = -ENOMEM;
- goto err_i2c;
- }
INIT_DELAYED_WORK(&state->delayed_work_enable_hotplug,
adv7842_delayed_work_enable_hotplug);
@@ -3331,6 +3564,17 @@ static int adv7842_probe(struct i2c_client *client,
if (err)
goto err_entity;
+#if IS_ENABLED(CONFIG_VIDEO_ADV7842_CEC)
+ state->cec_adap = cec_allocate_adapter(&adv7842_cec_adap_ops,
+ state, dev_name(&client->dev),
+ CEC_CAP_TRANSMIT | CEC_CAP_LOG_ADDRS |
+ CEC_CAP_PASSTHROUGH | CEC_CAP_RC, ADV7842_MAX_ADDRS,
+ &client->dev);
+ err = PTR_ERR_OR_ZERO(state->cec_adap);
+ if (err)
+ goto err_entity;
+#endif
+
v4l2_info(sd, "%s found @ 0x%x (%s)\n", client->name,
client->addr << 1, client->adapter->name);
return 0;
@@ -3339,7 +3583,6 @@ err_entity:
media_entity_cleanup(&sd->entity);
err_work_queues:
cancel_delayed_work(&state->delayed_work_enable_hotplug);
- destroy_workqueue(state->work_queues);
err_i2c:
adv7842_unregister_clients(sd);
err_hdl:
@@ -3355,9 +3598,7 @@ static int adv7842_remove(struct i2c_client *client)
struct adv7842_state *state = to_state(sd);
adv7842_irq_enable(sd, false);
-
cancel_delayed_work(&state->delayed_work_enable_hotplug);
- destroy_workqueue(state->work_queues);
v4l2_device_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
adv7842_unregister_clients(sd);
diff --git a/drivers/media/i2c/cs53l32a.c b/drivers/media/i2c/cs53l32a.c
index b7e87e386..e4b3cf49d 100644
--- a/drivers/media/i2c/cs53l32a.c
+++ b/drivers/media/i2c/cs53l32a.c
@@ -121,13 +121,6 @@ static const struct v4l2_ctrl_ops cs53l32a_ctrl_ops = {
static const struct v4l2_subdev_core_ops cs53l32a_core_ops = {
.log_status = cs53l32a_log_status,
- .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
- .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
- .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
- .g_ctrl = v4l2_subdev_g_ctrl,
- .s_ctrl = v4l2_subdev_s_ctrl,
- .queryctrl = v4l2_subdev_queryctrl,
- .querymenu = v4l2_subdev_querymenu,
};
static const struct v4l2_subdev_audio_ops cs53l32a_audio_ops = {
diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c
index 07a3e7173..142ae2880 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.c
+++ b/drivers/media/i2c/cx25840/cx25840-core.c
@@ -5042,13 +5042,6 @@ static const struct v4l2_ctrl_ops cx25840_ctrl_ops = {
static const struct v4l2_subdev_core_ops cx25840_core_ops = {
.log_status = cx25840_log_status,
- .g_ctrl = v4l2_subdev_g_ctrl,
- .s_ctrl = v4l2_subdev_s_ctrl,
- .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
- .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
- .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
- .queryctrl = v4l2_subdev_queryctrl,
- .querymenu = v4l2_subdev_querymenu,
.reset = cx25840_reset,
.load_fw = cx25840_load_fw,
.s_io_pin_config = common_s_io_pin_config,
diff --git a/drivers/media/i2c/msp3400-driver.c b/drivers/media/i2c/msp3400-driver.c
index e016626eb..503b7c4f0 100644
--- a/drivers/media/i2c/msp3400-driver.c
+++ b/drivers/media/i2c/msp3400-driver.c
@@ -642,13 +642,6 @@ static const struct v4l2_ctrl_ops msp_ctrl_ops = {
static const struct v4l2_subdev_core_ops msp_core_ops = {
.log_status = msp_log_status,
- .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
- .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
- .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
- .g_ctrl = v4l2_subdev_g_ctrl,
- .s_ctrl = v4l2_subdev_s_ctrl,
- .queryctrl = v4l2_subdev_queryctrl,
- .querymenu = v4l2_subdev_querymenu,
};
static const struct v4l2_subdev_video_ops msp_video_ops = {
diff --git a/drivers/media/i2c/mt9t001.c b/drivers/media/i2c/mt9t001.c
index 702d562f8..842017fa4 100644
--- a/drivers/media/i2c/mt9t001.c
+++ b/drivers/media/i2c/mt9t001.c
@@ -233,10 +233,21 @@ static int __mt9t001_set_power(struct mt9t001 *mt9t001, bool on)
ret = mt9t001_reset(mt9t001);
if (ret < 0) {
dev_err(&client->dev, "Failed to reset the camera\n");
- return ret;
+ goto e_power;
}
- return v4l2_ctrl_handler_setup(&mt9t001->ctrls);
+ ret = v4l2_ctrl_handler_setup(&mt9t001->ctrls);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to set up control handlers\n");
+ goto e_power;
+ }
+
+ return 0;
+
+e_power:
+ mt9t001_power_off(mt9t001);
+
+ return ret;
}
/* -----------------------------------------------------------------------------
@@ -834,7 +845,7 @@ static struct v4l2_subdev_ops mt9t001_subdev_ops = {
.pad = &mt9t001_subdev_pad_ops,
};
-static struct v4l2_subdev_internal_ops mt9t001_subdev_internal_ops = {
+static const struct v4l2_subdev_internal_ops mt9t001_subdev_internal_ops = {
.registered = mt9t001_registered,
.open = mt9t001_open,
.close = mt9t001_close,
diff --git a/drivers/media/i2c/mt9v032.c b/drivers/media/i2c/mt9v032.c
index 501b37039..58eb62f1b 100644
--- a/drivers/media/i2c/mt9v032.c
+++ b/drivers/media/i2c/mt9v032.c
@@ -19,7 +19,6 @@
#include <linux/log2.h>
#include <linux/mutex.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
@@ -133,9 +132,16 @@
#define MT9V032_TEST_PATTERN_GRAY_DIAGONAL (3 << 11)
#define MT9V032_TEST_PATTERN_ENABLE (1 << 13)
#define MT9V032_TEST_PATTERN_FLIP (1 << 14)
+#define MT9V032_AEGC_DESIRED_BIN 0xa5
+#define MT9V032_AEC_UPDATE_FREQUENCY 0xa6
+#define MT9V032_AEC_LPF 0xa8
+#define MT9V032_AGC_UPDATE_FREQUENCY 0xa9
+#define MT9V032_AGC_LPF 0xaa
#define MT9V032_AEC_AGC_ENABLE 0xaf
#define MT9V032_AEC_ENABLE (1 << 0)
#define MT9V032_AGC_ENABLE (1 << 1)
+#define MT9V034_AEC_MAX_SHUTTER_WIDTH 0xad
+#define MT9V032_AEC_MAX_SHUTTER_WIDTH 0xbd
#define MT9V032_THERMAL_INFO 0xc1
enum mt9v032_model {
@@ -162,6 +168,8 @@ struct mt9v032_model_data {
unsigned int min_shutter;
unsigned int max_shutter;
unsigned int pclk_reg;
+ unsigned int aec_max_shutter_reg;
+ const struct v4l2_ctrl_config * const aec_max_shutter_v4l2_ctrl;
};
struct mt9v032_model_info {
@@ -175,63 +183,6 @@ static const struct mt9v032_model_version mt9v032_versions[] = {
{ MT9V034_CHIP_ID_REV1, "MT9V024/MT9V034 rev1" },
};
-static const struct mt9v032_model_data mt9v032_model_data[] = {
- {
- /* MT9V022, MT9V032 revisions 1/2/3 */
- .min_row_time = 660,
- .min_hblank = MT9V032_HORIZONTAL_BLANKING_MIN,
- .min_vblank = MT9V032_VERTICAL_BLANKING_MIN,
- .max_vblank = MT9V032_VERTICAL_BLANKING_MAX,
- .min_shutter = MT9V032_TOTAL_SHUTTER_WIDTH_MIN,
- .max_shutter = MT9V032_TOTAL_SHUTTER_WIDTH_MAX,
- .pclk_reg = MT9V032_PIXEL_CLOCK,
- }, {
- /* MT9V024, MT9V034 */
- .min_row_time = 690,
- .min_hblank = MT9V034_HORIZONTAL_BLANKING_MIN,
- .min_vblank = MT9V034_VERTICAL_BLANKING_MIN,
- .max_vblank = MT9V034_VERTICAL_BLANKING_MAX,
- .min_shutter = MT9V034_TOTAL_SHUTTER_WIDTH_MIN,
- .max_shutter = MT9V034_TOTAL_SHUTTER_WIDTH_MAX,
- .pclk_reg = MT9V034_PIXEL_CLOCK,
- },
-};
-
-static const struct mt9v032_model_info mt9v032_models[] = {
- [MT9V032_MODEL_V022_COLOR] = {
- .data = &mt9v032_model_data[0],
- .color = true,
- },
- [MT9V032_MODEL_V022_MONO] = {
- .data = &mt9v032_model_data[0],
- .color = false,
- },
- [MT9V032_MODEL_V024_COLOR] = {
- .data = &mt9v032_model_data[1],
- .color = true,
- },
- [MT9V032_MODEL_V024_MONO] = {
- .data = &mt9v032_model_data[1],
- .color = false,
- },
- [MT9V032_MODEL_V032_COLOR] = {
- .data = &mt9v032_model_data[0],
- .color = true,
- },
- [MT9V032_MODEL_V032_MONO] = {
- .data = &mt9v032_model_data[0],
- .color = false,
- },
- [MT9V032_MODEL_V034_COLOR] = {
- .data = &mt9v032_model_data[1],
- .color = true,
- },
- [MT9V032_MODEL_V034_MONO] = {
- .data = &mt9v032_model_data[1],
- .color = false,
- },
-};
-
struct mt9v032 {
struct v4l2_subdev subdev;
struct media_pad pad;
@@ -349,7 +300,8 @@ static int mt9v032_power_on(struct mt9v032 *mt9v032)
if (ret < 0)
return ret;
- return regmap_write(map, MT9V032_CHIP_CONTROL, 0);
+ return regmap_write(map, MT9V032_CHIP_CONTROL,
+ MT9V032_CHIP_CONTROL_MASTER_MODE);
}
static void mt9v032_power_off(struct mt9v032 *mt9v032)
@@ -421,8 +373,7 @@ __mt9v032_get_pad_crop(struct mt9v032 *mt9v032, struct v4l2_subdev_pad_config *c
static int mt9v032_s_stream(struct v4l2_subdev *subdev, int enable)
{
- const u16 mode = MT9V032_CHIP_CONTROL_MASTER_MODE
- | MT9V032_CHIP_CONTROL_DOUT_ENABLE
+ const u16 mode = MT9V032_CHIP_CONTROL_DOUT_ENABLE
| MT9V032_CHIP_CONTROL_SEQUENTIAL;
struct mt9v032 *mt9v032 = to_mt9v032(subdev);
struct v4l2_rect *crop = &mt9v032->crop;
@@ -647,6 +598,34 @@ static int mt9v032_set_selection(struct v4l2_subdev *subdev,
*/
#define V4L2_CID_TEST_PATTERN_COLOR (V4L2_CID_USER_BASE | 0x1001)
+/*
+ * Value between 1 and 64 to set the desired bin. This is effectively a measure
+ * of how bright the image is supposed to be. Both AGC and AEC try to reach
+ * this.
+ */
+#define V4L2_CID_AEGC_DESIRED_BIN (V4L2_CID_USER_BASE | 0x1002)
+/*
+ * LPF is the low pass filter capability of the chip. Both AEC and AGC have
+ * this setting. This limits the speed in which AGC/AEC adjust their settings.
+ * Possible values are 0-2. 0 means no LPF. For 1 and 2 this equation is used:
+ *
+ * if |(calculated new exp - current exp)| > (current exp / 4)
+ * next exp = calculated new exp
+ * else
+ * next exp = current exp + ((calculated new exp - current exp) / 2^LPF)
+ */
+#define V4L2_CID_AEC_LPF (V4L2_CID_USER_BASE | 0x1003)
+#define V4L2_CID_AGC_LPF (V4L2_CID_USER_BASE | 0x1004)
+/*
+ * Value between 0 and 15. This is the number of frames being skipped before
+ * updating the auto exposure/gain.
+ */
+#define V4L2_CID_AEC_UPDATE_INTERVAL (V4L2_CID_USER_BASE | 0x1005)
+#define V4L2_CID_AGC_UPDATE_INTERVAL (V4L2_CID_USER_BASE | 0x1006)
+/*
+ * Maximum shutter width used for AEC.
+ */
+#define V4L2_CID_AEC_MAX_SHUTTER_WIDTH (V4L2_CID_USER_BASE | 0x1007)
static int mt9v032_s_ctrl(struct v4l2_ctrl *ctrl)
{
@@ -716,6 +695,28 @@ static int mt9v032_s_ctrl(struct v4l2_ctrl *ctrl)
break;
}
return regmap_write(map, MT9V032_TEST_PATTERN, data);
+
+ case V4L2_CID_AEGC_DESIRED_BIN:
+ return regmap_write(map, MT9V032_AEGC_DESIRED_BIN, ctrl->val);
+
+ case V4L2_CID_AEC_LPF:
+ return regmap_write(map, MT9V032_AEC_LPF, ctrl->val);
+
+ case V4L2_CID_AGC_LPF:
+ return regmap_write(map, MT9V032_AGC_LPF, ctrl->val);
+
+ case V4L2_CID_AEC_UPDATE_INTERVAL:
+ return regmap_write(map, MT9V032_AEC_UPDATE_FREQUENCY,
+ ctrl->val);
+
+ case V4L2_CID_AGC_UPDATE_INTERVAL:
+ return regmap_write(map, MT9V032_AGC_UPDATE_FREQUENCY,
+ ctrl->val);
+
+ case V4L2_CID_AEC_MAX_SHUTTER_WIDTH:
+ return regmap_write(map,
+ mt9v032->model->data->aec_max_shutter_reg,
+ ctrl->val);
}
return 0;
@@ -745,6 +746,84 @@ static const struct v4l2_ctrl_config mt9v032_test_pattern_color = {
.flags = 0,
};
+static const struct v4l2_ctrl_config mt9v032_aegc_controls[] = {
+ {
+ .ops = &mt9v032_ctrl_ops,
+ .id = V4L2_CID_AEGC_DESIRED_BIN,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "AEC/AGC Desired Bin",
+ .min = 1,
+ .max = 64,
+ .step = 1,
+ .def = 58,
+ .flags = 0,
+ }, {
+ .ops = &mt9v032_ctrl_ops,
+ .id = V4L2_CID_AEC_LPF,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "AEC Low Pass Filter",
+ .min = 0,
+ .max = 2,
+ .step = 1,
+ .def = 0,
+ .flags = 0,
+ }, {
+ .ops = &mt9v032_ctrl_ops,
+ .id = V4L2_CID_AGC_LPF,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "AGC Low Pass Filter",
+ .min = 0,
+ .max = 2,
+ .step = 1,
+ .def = 2,
+ .flags = 0,
+ }, {
+ .ops = &mt9v032_ctrl_ops,
+ .id = V4L2_CID_AEC_UPDATE_INTERVAL,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "AEC Update Interval",
+ .min = 0,
+ .max = 16,
+ .step = 1,
+ .def = 2,
+ .flags = 0,
+ }, {
+ .ops = &mt9v032_ctrl_ops,
+ .id = V4L2_CID_AGC_UPDATE_INTERVAL,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "AGC Update Interval",
+ .min = 0,
+ .max = 16,
+ .step = 1,
+ .def = 2,
+ .flags = 0,
+ }
+};
+
+static const struct v4l2_ctrl_config mt9v032_aec_max_shutter_width = {
+ .ops = &mt9v032_ctrl_ops,
+ .id = V4L2_CID_AEC_MAX_SHUTTER_WIDTH,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "AEC Max Shutter Width",
+ .min = 1,
+ .max = 2047,
+ .step = 1,
+ .def = 480,
+ .flags = 0,
+};
+
+static const struct v4l2_ctrl_config mt9v034_aec_max_shutter_width = {
+ .ops = &mt9v032_ctrl_ops,
+ .id = V4L2_CID_AEC_MAX_SHUTTER_WIDTH,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "AEC Max Shutter Width",
+ .min = 1,
+ .max = 32765,
+ .step = 1,
+ .def = 480,
+ .flags = 0,
+};
+
/* -----------------------------------------------------------------------------
* V4L2 subdev core operations
*/
@@ -953,13 +1032,6 @@ static int mt9v032_probe(struct i2c_client *client,
unsigned int i;
int ret;
- if (!i2c_check_functionality(client->adapter,
- I2C_FUNC_SMBUS_WORD_DATA)) {
- dev_warn(&client->adapter->dev,
- "I2C-Adapter doesn't support I2C_FUNC_SMBUS_WORD\n");
- return -EIO;
- }
-
mt9v032 = devm_kzalloc(&client->dev, sizeof(*mt9v032), GFP_KERNEL);
if (!mt9v032)
return -ENOMEM;
@@ -986,7 +1058,8 @@ static int mt9v032_probe(struct i2c_client *client,
mt9v032->pdata = pdata;
mt9v032->model = (const void *)did->driver_data;
- v4l2_ctrl_handler_init(&mt9v032->ctrls, 10);
+ v4l2_ctrl_handler_init(&mt9v032->ctrls, 11 +
+ ARRAY_SIZE(mt9v032_aegc_controls));
v4l2_ctrl_new_std(&mt9v032->ctrls, &mt9v032_ctrl_ops,
V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
@@ -1015,6 +1088,13 @@ static int mt9v032_probe(struct i2c_client *client,
mt9v032->test_pattern_color = v4l2_ctrl_new_custom(&mt9v032->ctrls,
&mt9v032_test_pattern_color, NULL);
+ v4l2_ctrl_new_custom(&mt9v032->ctrls,
+ mt9v032->model->data->aec_max_shutter_v4l2_ctrl,
+ NULL);
+ for (i = 0; i < ARRAY_SIZE(mt9v032_aegc_controls); ++i)
+ v4l2_ctrl_new_custom(&mt9v032->ctrls, &mt9v032_aegc_controls[i],
+ NULL);
+
v4l2_ctrl_cluster(2, &mt9v032->test_pattern);
mt9v032->pixel_rate =
@@ -1103,6 +1183,67 @@ static int mt9v032_remove(struct i2c_client *client)
return 0;
}
+static const struct mt9v032_model_data mt9v032_model_data[] = {
+ {
+ /* MT9V022, MT9V032 revisions 1/2/3 */
+ .min_row_time = 660,
+ .min_hblank = MT9V032_HORIZONTAL_BLANKING_MIN,
+ .min_vblank = MT9V032_VERTICAL_BLANKING_MIN,
+ .max_vblank = MT9V032_VERTICAL_BLANKING_MAX,
+ .min_shutter = MT9V032_TOTAL_SHUTTER_WIDTH_MIN,
+ .max_shutter = MT9V032_TOTAL_SHUTTER_WIDTH_MAX,
+ .pclk_reg = MT9V032_PIXEL_CLOCK,
+ .aec_max_shutter_reg = MT9V032_AEC_MAX_SHUTTER_WIDTH,
+ .aec_max_shutter_v4l2_ctrl = &mt9v032_aec_max_shutter_width,
+ }, {
+ /* MT9V024, MT9V034 */
+ .min_row_time = 690,
+ .min_hblank = MT9V034_HORIZONTAL_BLANKING_MIN,
+ .min_vblank = MT9V034_VERTICAL_BLANKING_MIN,
+ .max_vblank = MT9V034_VERTICAL_BLANKING_MAX,
+ .min_shutter = MT9V034_TOTAL_SHUTTER_WIDTH_MIN,
+ .max_shutter = MT9V034_TOTAL_SHUTTER_WIDTH_MAX,
+ .pclk_reg = MT9V034_PIXEL_CLOCK,
+ .aec_max_shutter_reg = MT9V034_AEC_MAX_SHUTTER_WIDTH,
+ .aec_max_shutter_v4l2_ctrl = &mt9v034_aec_max_shutter_width,
+ },
+};
+
+static const struct mt9v032_model_info mt9v032_models[] = {
+ [MT9V032_MODEL_V022_COLOR] = {
+ .data = &mt9v032_model_data[0],
+ .color = true,
+ },
+ [MT9V032_MODEL_V022_MONO] = {
+ .data = &mt9v032_model_data[0],
+ .color = false,
+ },
+ [MT9V032_MODEL_V024_COLOR] = {
+ .data = &mt9v032_model_data[1],
+ .color = true,
+ },
+ [MT9V032_MODEL_V024_MONO] = {
+ .data = &mt9v032_model_data[1],
+ .color = false,
+ },
+ [MT9V032_MODEL_V032_COLOR] = {
+ .data = &mt9v032_model_data[0],
+ .color = true,
+ },
+ [MT9V032_MODEL_V032_MONO] = {
+ .data = &mt9v032_model_data[0],
+ .color = false,
+ },
+ [MT9V032_MODEL_V034_COLOR] = {
+ .data = &mt9v032_model_data[1],
+ .color = true,
+ },
+ [MT9V032_MODEL_V034_MONO] = {
+ .data = &mt9v032_model_data[1],
+ .color = false,
+ },
+};
+
static const struct i2c_device_id mt9v032_id[] = {
{ "mt9v022", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V022_COLOR] },
{ "mt9v022m", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V022_MONO] },
diff --git a/drivers/media/i2c/saa7115.c b/drivers/media/i2c/saa7115.c
index bd3526bdd..58062b41c 100644
--- a/drivers/media/i2c/saa7115.c
+++ b/drivers/media/i2c/saa7115.c
@@ -1585,13 +1585,6 @@ static const struct v4l2_ctrl_ops saa711x_ctrl_ops = {
static const struct v4l2_subdev_core_ops saa711x_core_ops = {
.log_status = saa711x_log_status,
- .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
- .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
- .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
- .g_ctrl = v4l2_subdev_g_ctrl,
- .s_ctrl = v4l2_subdev_s_ctrl,
- .queryctrl = v4l2_subdev_queryctrl,
- .querymenu = v4l2_subdev_querymenu,
.reset = saa711x_reset,
.s_gpio = saa711x_s_gpio,
#ifdef CONFIG_VIDEO_ADV_DEBUG
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index 3dfe387ab..d08ab6c83 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -3044,10 +3044,8 @@ static struct smiapp_platform_data *smiapp_get_pdata(struct device *dev)
pdata->op_sys_clock = devm_kcalloc(
dev, bus_cfg->nr_of_link_frequencies + 1 /* guardian */,
sizeof(*pdata->op_sys_clock), GFP_KERNEL);
- if (!pdata->op_sys_clock) {
- rval = -ENOMEM;
+ if (!pdata->op_sys_clock)
goto out_err;
- }
for (i = 0; i < bus_cfg->nr_of_link_frequencies; i++) {
pdata->op_sys_clock[i] = bus_cfg->link_frequencies[i];
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index 6cf6d0673..1e3a0dd22 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -89,8 +89,6 @@ struct tc358743_state {
struct v4l2_ctrl *audio_sampling_rate_ctrl;
struct v4l2_ctrl *audio_present_ctrl;
- /* work queues */
- struct workqueue_struct *work_queues;
struct delayed_work delayed_work_enable_hotplug;
/* edid */
@@ -425,8 +423,7 @@ static void tc358743_enable_edid(struct v4l2_subdev *sd)
/* Enable hotplug after 100 ms. DDC access to EDID is also enabled when
* hotplug is enabled. See register DDC_CTL */
- queue_delayed_work(state->work_queues,
- &state->delayed_work_enable_hotplug, HZ / 10);
+ schedule_delayed_work(&state->delayed_work_enable_hotplug, HZ / 10);
tc358743_enable_interrupts(sd, true);
tc358743_s_ctrl_detect_tx_5v(sd);
@@ -1884,14 +1881,6 @@ static int tc358743_probe(struct i2c_client *client,
goto err_hdl;
}
- /* work queues */
- state->work_queues = create_singlethread_workqueue(client->name);
- if (!state->work_queues) {
- v4l2_err(sd, "Could not create work queue\n");
- err = -ENOMEM;
- goto err_hdl;
- }
-
state->pad.flags = MEDIA_PAD_FL_SOURCE;
err = media_entity_pads_init(&sd->entity, 1, &state->pad);
if (err < 0)
@@ -1940,7 +1929,6 @@ static int tc358743_probe(struct i2c_client *client,
err_work_queues:
cancel_delayed_work(&state->delayed_work_enable_hotplug);
- destroy_workqueue(state->work_queues);
mutex_destroy(&state->confctl_mutex);
err_hdl:
media_entity_cleanup(&sd->entity);
@@ -1954,7 +1942,6 @@ static int tc358743_remove(struct i2c_client *client)
struct tc358743_state *state = to_state(sd);
cancel_delayed_work(&state->delayed_work_enable_hotplug);
- destroy_workqueue(state->work_queues);
v4l2_async_unregister_subdev(sd);
v4l2_device_unregister_subdev(sd);
mutex_destroy(&state->confctl_mutex);
diff --git a/drivers/media/i2c/tvaudio.c b/drivers/media/i2c/tvaudio.c
index fece2a433..42d1e26e5 100644
--- a/drivers/media/i2c/tvaudio.c
+++ b/drivers/media/i2c/tvaudio.c
@@ -1855,13 +1855,6 @@ static const struct v4l2_ctrl_ops tvaudio_ctrl_ops = {
static const struct v4l2_subdev_core_ops tvaudio_core_ops = {
.log_status = tvaudio_log_status,
- .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
- .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
- .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
- .g_ctrl = v4l2_subdev_g_ctrl,
- .s_ctrl = v4l2_subdev_s_ctrl,
- .queryctrl = v4l2_subdev_queryctrl,
- .querymenu = v4l2_subdev_querymenu,
};
static const struct v4l2_subdev_tuner_ops tvaudio_tuner_ops = {
diff --git a/drivers/media/i2c/wm8775.c b/drivers/media/i2c/wm8775.c
index 6e00f145b..5581f4db0 100644
--- a/drivers/media/i2c/wm8775.c
+++ b/drivers/media/i2c/wm8775.c
@@ -178,13 +178,6 @@ static const struct v4l2_ctrl_ops wm8775_ctrl_ops = {
static const struct v4l2_subdev_core_ops wm8775_core_ops = {
.log_status = wm8775_log_status,
- .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
- .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
- .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
- .g_ctrl = v4l2_subdev_g_ctrl,
- .s_ctrl = v4l2_subdev_s_ctrl,
- .queryctrl = v4l2_subdev_queryctrl,
- .querymenu = v4l2_subdev_querymenu,
};
static const struct v4l2_subdev_tuner_ops wm8775_tuner_ops = {
diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
index a1cd50f33..1795abeda 100644
--- a/drivers/media/media-device.c
+++ b/drivers/media/media-device.c
@@ -423,7 +423,7 @@ static long media_device_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
struct media_devnode *devnode = media_devnode_data(filp);
- struct media_device *dev = to_media_device(devnode);
+ struct media_device *dev = devnode->media_dev;
long ret;
mutex_lock(&dev->graph_mutex);
@@ -495,7 +495,7 @@ static long media_device_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
struct media_devnode *devnode = media_devnode_data(filp);
- struct media_device *dev = to_media_device(devnode);
+ struct media_device *dev = devnode->media_dev;
long ret;
switch (cmd) {
@@ -531,7 +531,8 @@ static const struct media_file_operations media_device_fops = {
static ssize_t show_model(struct device *cd,
struct device_attribute *attr, char *buf)
{
- struct media_device *mdev = to_media_device(to_media_devnode(cd));
+ struct media_devnode *devnode = to_media_devnode(cd);
+ struct media_device *mdev = devnode->media_dev;
return sprintf(buf, "%.*s\n", (int)sizeof(mdev->model), mdev->model);
}
@@ -704,23 +705,35 @@ EXPORT_SYMBOL_GPL(media_device_cleanup);
int __must_check __media_device_register(struct media_device *mdev,
struct module *owner)
{
+ struct media_devnode *devnode;
int ret;
+ devnode = kzalloc(sizeof(*devnode), GFP_KERNEL);
+ if (!devnode)
+ return -ENOMEM;
+
/* Register the device node. */
- mdev->devnode.fops = &media_device_fops;
- mdev->devnode.parent = mdev->dev;
- mdev->devnode.release = media_device_release;
+ mdev->devnode = devnode;
+ devnode->fops = &media_device_fops;
+ devnode->parent = mdev->dev;
+ devnode->release = media_device_release;
/* Set version 0 to indicate user-space that the graph is static */
mdev->topology_version = 0;
- ret = media_devnode_register(&mdev->devnode, owner);
- if (ret < 0)
+ ret = media_devnode_register(mdev, devnode, owner);
+ if (ret < 0) {
+ /* devnode free is handled in media_devnode_*() */
+ mdev->devnode = NULL;
return ret;
+ }
- ret = device_create_file(&mdev->devnode.dev, &dev_attr_model);
+ ret = device_create_file(&devnode->dev, &dev_attr_model);
if (ret < 0) {
- media_devnode_unregister(&mdev->devnode);
+ /* devnode free is handled in media_devnode_*() */
+ mdev->devnode = NULL;
+ media_devnode_unregister_prepare(devnode);
+ media_devnode_unregister(devnode);
return ret;
}
@@ -771,11 +784,14 @@ void media_device_unregister(struct media_device *mdev)
mutex_lock(&mdev->graph_mutex);
/* Check if mdev was ever registered at all */
- if (!media_devnode_is_registered(&mdev->devnode)) {
+ if (!media_devnode_is_registered(mdev->devnode)) {
mutex_unlock(&mdev->graph_mutex);
return;
}
+ /* Clear the devnode register bit to avoid races with media dev open */
+ media_devnode_unregister_prepare(mdev->devnode);
+
/* Remove all entities from the media device */
list_for_each_entry_safe(entity, next, &mdev->entities, graph_obj.list)
__media_device_unregister_entity(entity);
@@ -794,9 +810,12 @@ void media_device_unregister(struct media_device *mdev)
mutex_unlock(&mdev->graph_mutex);
- device_remove_file(&mdev->devnode.dev, &dev_attr_model);
- dev_dbg(mdev->dev, "Media device unregistering\n");
- media_devnode_unregister(&mdev->devnode);
+ dev_dbg(mdev->dev, "Media device unregistered\n");
+
+ device_remove_file(&mdev->devnode->dev, &dev_attr_model);
+ media_devnode_unregister(mdev->devnode);
+ /* devnode free is handled in media_devnode_*() */
+ mdev->devnode = NULL;
}
EXPORT_SYMBOL_GPL(media_device_unregister);
diff --git a/drivers/media/media-devnode.c b/drivers/media/media-devnode.c
index b66dc9d07..f2772ba6f 100644
--- a/drivers/media/media-devnode.c
+++ b/drivers/media/media-devnode.c
@@ -44,6 +44,7 @@
#include <linux/uaccess.h>
#include <media/media-devnode.h>
+#include <media/media-device.h>
#define MEDIA_NUM_DEVICES 256
#define MEDIA_NAME "media"
@@ -59,21 +60,19 @@ static DECLARE_BITMAP(media_devnode_nums, MEDIA_NUM_DEVICES);
/* Called when the last user of the media device exits. */
static void media_devnode_release(struct device *cd)
{
- struct media_devnode *mdev = to_media_devnode(cd);
+ struct media_devnode *devnode = to_media_devnode(cd);
mutex_lock(&media_devnode_lock);
-
- /* Delete the cdev on this minor as well */
- cdev_del(&mdev->cdev);
-
/* Mark device node number as free */
- clear_bit(mdev->minor, media_devnode_nums);
-
+ clear_bit(devnode->minor, media_devnode_nums);
mutex_unlock(&media_devnode_lock);
/* Release media_devnode and perform other cleanups as needed. */
- if (mdev->release)
- mdev->release(mdev);
+ if (devnode->release)
+ devnode->release(devnode);
+
+ kfree(devnode);
+ pr_debug("%s: Media Devnode Deallocated\n", __func__);
}
static struct bus_type media_bus_type = {
@@ -83,37 +82,37 @@ static struct bus_type media_bus_type = {
static ssize_t media_read(struct file *filp, char __user *buf,
size_t sz, loff_t *off)
{
- struct media_devnode *mdev = media_devnode_data(filp);
+ struct media_devnode *devnode = media_devnode_data(filp);
- if (!mdev->fops->read)
+ if (!devnode->fops->read)
return -EINVAL;
- if (!media_devnode_is_registered(mdev))
+ if (!media_devnode_is_registered(devnode))
return -EIO;
- return mdev->fops->read(filp, buf, sz, off);
+ return devnode->fops->read(filp, buf, sz, off);
}
static ssize_t media_write(struct file *filp, const char __user *buf,
size_t sz, loff_t *off)
{
- struct media_devnode *mdev = media_devnode_data(filp);
+ struct media_devnode *devnode = media_devnode_data(filp);
- if (!mdev->fops->write)
+ if (!devnode->fops->write)
return -EINVAL;
- if (!media_devnode_is_registered(mdev))
+ if (!media_devnode_is_registered(devnode))
return -EIO;
- return mdev->fops->write(filp, buf, sz, off);
+ return devnode->fops->write(filp, buf, sz, off);
}
static unsigned int media_poll(struct file *filp,
struct poll_table_struct *poll)
{
- struct media_devnode *mdev = media_devnode_data(filp);
+ struct media_devnode *devnode = media_devnode_data(filp);
- if (!media_devnode_is_registered(mdev))
+ if (!media_devnode_is_registered(devnode))
return POLLERR | POLLHUP;
- if (!mdev->fops->poll)
+ if (!devnode->fops->poll)
return DEFAULT_POLLMASK;
- return mdev->fops->poll(filp, poll);
+ return devnode->fops->poll(filp, poll);
}
static long
@@ -121,12 +120,12 @@ __media_ioctl(struct file *filp, unsigned int cmd, unsigned long arg,
long (*ioctl_func)(struct file *filp, unsigned int cmd,
unsigned long arg))
{
- struct media_devnode *mdev = media_devnode_data(filp);
+ struct media_devnode *devnode = media_devnode_data(filp);
if (!ioctl_func)
return -ENOTTY;
- if (!media_devnode_is_registered(mdev))
+ if (!media_devnode_is_registered(devnode))
return -EIO;
return ioctl_func(filp, cmd, arg);
@@ -134,9 +133,9 @@ __media_ioctl(struct file *filp, unsigned int cmd, unsigned long arg,
static long media_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
- struct media_devnode *mdev = media_devnode_data(filp);
+ struct media_devnode *devnode = media_devnode_data(filp);
- return __media_ioctl(filp, cmd, arg, mdev->fops->ioctl);
+ return __media_ioctl(filp, cmd, arg, devnode->fops->ioctl);
}
#ifdef CONFIG_COMPAT
@@ -144,9 +143,9 @@ static long media_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
static long media_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
- struct media_devnode *mdev = media_devnode_data(filp);
+ struct media_devnode *devnode = media_devnode_data(filp);
- return __media_ioctl(filp, cmd, arg, mdev->fops->compat_ioctl);
+ return __media_ioctl(filp, cmd, arg, devnode->fops->compat_ioctl);
}
#endif /* CONFIG_COMPAT */
@@ -154,7 +153,7 @@ static long media_compat_ioctl(struct file *filp, unsigned int cmd,
/* Override for the open function */
static int media_open(struct inode *inode, struct file *filp)
{
- struct media_devnode *mdev;
+ struct media_devnode *devnode;
int ret;
/* Check if the media device is available. This needs to be done with
@@ -164,23 +163,23 @@ static int media_open(struct inode *inode, struct file *filp)
* a crash.
*/
mutex_lock(&media_devnode_lock);
- mdev = container_of(inode->i_cdev, struct media_devnode, cdev);
+ devnode = container_of(inode->i_cdev, struct media_devnode, cdev);
/* return ENXIO if the media device has been removed
already or if it is not registered anymore. */
- if (!media_devnode_is_registered(mdev)) {
+ if (!media_devnode_is_registered(devnode)) {
mutex_unlock(&media_devnode_lock);
return -ENXIO;
}
/* and increase the device refcount */
- get_device(&mdev->dev);
+ get_device(&devnode->dev);
mutex_unlock(&media_devnode_lock);
- filp->private_data = mdev;
+ filp->private_data = devnode;
- if (mdev->fops->open) {
- ret = mdev->fops->open(filp);
+ if (devnode->fops->open) {
+ ret = devnode->fops->open(filp);
if (ret) {
- put_device(&mdev->dev);
+ put_device(&devnode->dev);
filp->private_data = NULL;
return ret;
}
@@ -192,16 +191,18 @@ static int media_open(struct inode *inode, struct file *filp)
/* Override for the release function */
static int media_release(struct inode *inode, struct file *filp)
{
- struct media_devnode *mdev = media_devnode_data(filp);
+ struct media_devnode *devnode = media_devnode_data(filp);
- if (mdev->fops->release)
- mdev->fops->release(filp);
+ if (devnode->fops->release)
+ devnode->fops->release(filp);
filp->private_data = NULL;
/* decrease the refcount unconditionally since the release()
return value is ignored. */
- put_device(&mdev->dev);
+ put_device(&devnode->dev);
+
+ pr_debug("%s: Media Release\n", __func__);
return 0;
}
@@ -219,7 +220,8 @@ static const struct file_operations media_devnode_fops = {
.llseek = no_llseek,
};
-int __must_check media_devnode_register(struct media_devnode *mdev,
+int __must_check media_devnode_register(struct media_device *mdev,
+ struct media_devnode *devnode,
struct module *owner)
{
int minor;
@@ -231,61 +233,80 @@ int __must_check media_devnode_register(struct media_devnode *mdev,
if (minor == MEDIA_NUM_DEVICES) {
mutex_unlock(&media_devnode_lock);
pr_err("could not get a free minor\n");
+ kfree(devnode);
return -ENFILE;
}
set_bit(minor, media_devnode_nums);
mutex_unlock(&media_devnode_lock);
- mdev->minor = minor;
+ devnode->minor = minor;
+ devnode->media_dev = mdev;
+
+ /* Part 1: Initialize dev now to use dev.kobj for cdev.kobj.parent */
+ devnode->dev.bus = &media_bus_type;
+ devnode->dev.devt = MKDEV(MAJOR(media_dev_t), devnode->minor);
+ devnode->dev.release = media_devnode_release;
+ if (devnode->parent)
+ devnode->dev.parent = devnode->parent;
+ dev_set_name(&devnode->dev, "media%d", devnode->minor);
+ device_initialize(&devnode->dev);
/* Part 2: Initialize and register the character device */
- cdev_init(&mdev->cdev, &media_devnode_fops);
- mdev->cdev.owner = owner;
+ cdev_init(&devnode->cdev, &media_devnode_fops);
+ devnode->cdev.owner = owner;
+ devnode->cdev.kobj.parent = &devnode->dev.kobj;
- ret = cdev_add(&mdev->cdev, MKDEV(MAJOR(media_dev_t), mdev->minor), 1);
+ ret = cdev_add(&devnode->cdev, MKDEV(MAJOR(media_dev_t), devnode->minor), 1);
if (ret < 0) {
pr_err("%s: cdev_add failed\n", __func__);
- goto error;
+ goto cdev_add_error;
}
- /* Part 3: Register the media device */
- mdev->dev.bus = &media_bus_type;
- mdev->dev.devt = MKDEV(MAJOR(media_dev_t), mdev->minor);
- mdev->dev.release = media_devnode_release;
- if (mdev->parent)
- mdev->dev.parent = mdev->parent;
- dev_set_name(&mdev->dev, "media%d", mdev->minor);
- ret = device_register(&mdev->dev);
+ /* Part 3: Add the media device */
+ ret = device_add(&devnode->dev);
if (ret < 0) {
- pr_err("%s: device_register failed\n", __func__);
- goto error;
+ pr_err("%s: device_add failed\n", __func__);
+ goto device_add_error;
}
/* Part 4: Activate this minor. The char device can now be used. */
- set_bit(MEDIA_FLAG_REGISTERED, &mdev->flags);
+ set_bit(MEDIA_FLAG_REGISTERED, &devnode->flags);
return 0;
-error:
+device_add_error:
+ cdev_del(&devnode->cdev);
+cdev_add_error:
mutex_lock(&media_devnode_lock);
- cdev_del(&mdev->cdev);
- clear_bit(mdev->minor, media_devnode_nums);
+ clear_bit(devnode->minor, media_devnode_nums);
+ devnode->media_dev = NULL;
mutex_unlock(&media_devnode_lock);
+ put_device(&devnode->dev);
return ret;
}
-void media_devnode_unregister(struct media_devnode *mdev)
+void media_devnode_unregister_prepare(struct media_devnode *devnode)
{
- /* Check if mdev was ever registered at all */
- if (!media_devnode_is_registered(mdev))
+ /* Check if devnode was ever registered at all */
+ if (!media_devnode_is_registered(devnode))
return;
mutex_lock(&media_devnode_lock);
- clear_bit(MEDIA_FLAG_REGISTERED, &mdev->flags);
+ clear_bit(MEDIA_FLAG_REGISTERED, &devnode->flags);
+ mutex_unlock(&media_devnode_lock);
+}
+
+void media_devnode_unregister(struct media_devnode *devnode)
+{
+ mutex_lock(&media_devnode_lock);
+ /* Delete the cdev on this minor as well */
+ cdev_del(&devnode->cdev);
mutex_unlock(&media_devnode_lock);
- device_unregister(&mdev->dev);
+ device_del(&devnode->dev);
+ devnode->media_dev = NULL;
+ put_device(&devnode->dev);
}
/*
diff --git a/drivers/media/pci/bt8xx/dst_ca.c b/drivers/media/pci/bt8xx/dst_ca.c
index da8b414fd..8681b9143 100644
--- a/drivers/media/pci/bt8xx/dst_ca.c
+++ b/drivers/media/pci/bt8xx/dst_ca.c
@@ -655,7 +655,6 @@ static long dst_ca_ioctl(struct file *file, unsigned int cmd, unsigned long ioct
static int dst_ca_open(struct inode *inode, struct file *file)
{
dprintk(verbose, DST_CA_DEBUG, 1, " Device opened [%p] ", file);
- try_module_get(THIS_MODULE);
return 0;
}
@@ -663,7 +662,6 @@ static int dst_ca_open(struct inode *inode, struct file *file)
static int dst_ca_release(struct inode *inode, struct file *file)
{
dprintk(verbose, DST_CA_DEBUG, 1, " Device closed.");
- module_put(THIS_MODULE);
return 0;
}
diff --git a/drivers/media/pci/cobalt/cobalt-driver.c b/drivers/media/pci/cobalt/cobalt-driver.c
index 8d6f04fc8..476f7f0dc 100644
--- a/drivers/media/pci/cobalt/cobalt-driver.c
+++ b/drivers/media/pci/cobalt/cobalt-driver.c
@@ -492,7 +492,6 @@ static int cobalt_subdevs_init(struct cobalt *cobalt)
.ain_sel = ADV7604_AIN7_8_9_NC_SYNC_3_1,
.bus_order = ADV7604_BUS_ORDER_BRG,
.blank_data = 1,
- .op_656_range = 1,
.op_format_mode_sel = ADV7604_OP_FORMAT_MODE0,
.int1_config = ADV76XX_INT1_CONFIG_ACTIVE_HIGH,
.dr_str_data = ADV76XX_DR_STR_HIGH,
@@ -571,7 +570,6 @@ static int cobalt_subdevs_hsma_init(struct cobalt *cobalt)
.bus_order = ADV7842_BUS_ORDER_RBG,
.op_format_mode_sel = ADV7842_OP_FORMAT_MODE0,
.blank_data = 1,
- .op_656_range = 1,
.dr_str_data = 3,
.dr_str_clk = 3,
.dr_str_sync = 3,
@@ -691,17 +689,10 @@ static int cobalt_probe(struct pci_dev *pci_dev,
cobalt->pci_dev = pci_dev;
cobalt->instance = i;
- cobalt->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev);
- if (IS_ERR(cobalt->alloc_ctx)) {
- kfree(cobalt);
- return -ENOMEM;
- }
-
retval = v4l2_device_register(&pci_dev->dev, &cobalt->v4l2_dev);
if (retval) {
pr_err("cobalt: v4l2_device_register of card %d failed\n",
cobalt->instance);
- vb2_dma_sg_cleanup_ctx(cobalt->alloc_ctx);
kfree(cobalt);
return retval;
}
@@ -782,7 +773,6 @@ err:
cobalt_err("error %d on initialization\n", retval);
v4l2_device_unregister(&cobalt->v4l2_dev);
- vb2_dma_sg_cleanup_ctx(cobalt->alloc_ctx);
kfree(cobalt);
return retval;
}
@@ -818,7 +808,6 @@ static void cobalt_remove(struct pci_dev *pci_dev)
cobalt_info("removed cobalt card\n");
v4l2_device_unregister(v4l2_dev);
- vb2_dma_sg_cleanup_ctx(cobalt->alloc_ctx);
kfree(cobalt);
}
diff --git a/drivers/media/pci/cobalt/cobalt-driver.h b/drivers/media/pci/cobalt/cobalt-driver.h
index b2f08e4a6..ed00dc9d9 100644
--- a/drivers/media/pci/cobalt/cobalt-driver.h
+++ b/drivers/media/pci/cobalt/cobalt-driver.h
@@ -262,7 +262,6 @@ struct cobalt {
int instance;
struct pci_dev *pci_dev;
struct v4l2_device v4l2_dev;
- void *alloc_ctx;
void __iomem *bar0, *bar1;
diff --git a/drivers/media/pci/cobalt/cobalt-v4l2.c b/drivers/media/pci/cobalt/cobalt-v4l2.c
index c0ba458f6..d05672fe9 100644
--- a/drivers/media/pci/cobalt/cobalt-v4l2.c
+++ b/drivers/media/pci/cobalt/cobalt-v4l2.c
@@ -45,7 +45,7 @@ static const struct v4l2_dv_timings cea1080p60 = V4L2_DV_BT_CEA_1920X1080P60;
static int cobalt_queue_setup(struct vb2_queue *q,
unsigned int *num_buffers, unsigned int *num_planes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct cobalt_stream *s = q->drv_priv;
unsigned size = s->stride * s->height;
@@ -54,7 +54,6 @@ static int cobalt_queue_setup(struct vb2_queue *q,
*num_buffers = 3;
if (*num_buffers > NR_BUFS)
*num_buffers = NR_BUFS;
- alloc_ctxs[0] = s->cobalt->alloc_ctx;
if (*num_planes)
return sizes[0] < size ? -EINVAL : 0;
*num_planes = 1;
@@ -1224,6 +1223,7 @@ static int cobalt_node_register(struct cobalt *cobalt, int node)
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->min_buffers_needed = 2;
q->lock = &s->lock;
+ q->dev = &cobalt->pci_dev->dev;
vdev->queue = q;
video_set_drvdata(vdev, s);
diff --git a/drivers/media/pci/cx18/cx18-alsa-mixer.c b/drivers/media/pci/cx18/cx18-alsa-mixer.c
index 341bddc00..284275270 100644
--- a/drivers/media/pci/cx18/cx18-alsa-mixer.c
+++ b/drivers/media/pci/cx18/cx18-alsa-mixer.c
@@ -93,7 +93,7 @@ static int snd_cx18_mixer_tv_vol_get(struct snd_kcontrol *kctl,
vctrl.value = dB_to_cx18_av_vol(uctl->value.integer.value[0]);
snd_cx18_lock(cxsc);
- ret = v4l2_subdev_call(cx->sd_av, core, g_ctrl, &vctrl);
+ ret = v4l2_g_ctrl(cx->sd_av->ctrl_handler, &vctrl);
snd_cx18_unlock(cxsc);
if (!ret)
@@ -115,14 +115,14 @@ static int snd_cx18_mixer_tv_vol_put(struct snd_kcontrol *kctl,
snd_cx18_lock(cxsc);
/* Fetch current state */
- ret = v4l2_subdev_call(cx->sd_av, core, g_ctrl, &vctrl);
+ ret = v4l2_g_ctrl(cx->sd_av->ctrl_handler, &vctrl);
if (ret ||
(cx18_av_vol_to_dB(vctrl.value) != uctl->value.integer.value[0])) {
/* Set, if needed */
vctrl.value = dB_to_cx18_av_vol(uctl->value.integer.value[0]);
- ret = v4l2_subdev_call(cx->sd_av, core, s_ctrl, &vctrl);
+ ret = v4l2_s_ctrl(cx->sd_av->ctrl_handler, &vctrl);
if (!ret)
ret = 1; /* Indicate control was changed w/o error */
}
diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c
index 4754fd135..6eeadc48c 100644
--- a/drivers/media/pci/cx18/cx18-driver.c
+++ b/drivers/media/pci/cx18/cx18-driver.c
@@ -560,7 +560,7 @@ static void cx18_process_options(struct cx18 *cx)
cx->stream_buf_size[CX18_ENC_STREAM_TYPE_MPG] = enc_mpg_bufsize;
cx->stream_buf_size[CX18_ENC_STREAM_TYPE_IDX] = enc_idx_bufsize;
cx->stream_buf_size[CX18_ENC_STREAM_TYPE_YUV] = enc_yuv_bufsize;
- cx->stream_buf_size[CX18_ENC_STREAM_TYPE_VBI] = vbi_active_samples * 36;
+ cx->stream_buf_size[CX18_ENC_STREAM_TYPE_VBI] = VBI_ACTIVE_SAMPLES * 36;
cx->stream_buf_size[CX18_ENC_STREAM_TYPE_PCM] = enc_pcm_bufsize;
cx->stream_buf_size[CX18_ENC_STREAM_TYPE_RAD] = 0; /* control no data */
diff --git a/drivers/media/pci/cx18/cx18-driver.h b/drivers/media/pci/cx18/cx18-driver.h
index 47ce80fa7..ef308a10e 100644
--- a/drivers/media/pci/cx18/cx18-driver.h
+++ b/drivers/media/pci/cx18/cx18-driver.h
@@ -492,9 +492,9 @@ struct cx18_card;
* (1/15.625 kHz) * 2 * 13.5 MHz = 1728 samples/line =
* 4 bytes SAV + 280 bytes anc data + 4 bytes SAV + 1440 active samples
*/
-static const u32 vbi_active_samples = 1444; /* 4 byte SAV + 720 Y + 720 U/V */
-static const u32 vbi_hblank_samples_60Hz = 272; /* 4 byte EAV + 268 anc/fill */
-static const u32 vbi_hblank_samples_50Hz = 284; /* 4 byte EAV + 280 anc/fill */
+#define VBI_ACTIVE_SAMPLES 1444 /* 4 byte SAV + 720 Y + 720 U/V */
+#define VBI_HBLANK_SAMPLES_60HZ 272 /* 4 byte EAV + 268 anc/fill */
+#define VBI_HBLANK_SAMPLES_50HZ 284 /* 4 byte EAV + 280 anc/fill */
#define CX18_VBI_FRAMES 32
diff --git a/drivers/media/pci/cx18/cx18-ioctl.c b/drivers/media/pci/cx18/cx18-ioctl.c
index eeb741c7d..fecca2a63 100644
--- a/drivers/media/pci/cx18/cx18-ioctl.c
+++ b/drivers/media/pci/cx18/cx18-ioctl.c
@@ -177,7 +177,7 @@ static int cx18_g_fmt_vbi_cap(struct file *file, void *fh,
vbifmt->sampling_rate = 27000000;
vbifmt->offset = 248; /* FIXME - slightly wrong for both 50 & 60 Hz */
- vbifmt->samples_per_line = vbi_active_samples - 4;
+ vbifmt->samples_per_line = VBI_ACTIVE_SAMPLES - 4;
vbifmt->sample_format = V4L2_PIX_FMT_GREY;
vbifmt->start[0] = cx->vbi.start[0];
vbifmt->start[1] = cx->vbi.start[1];
diff --git a/drivers/media/pci/cx18/cx18-streams.c b/drivers/media/pci/cx18/cx18-streams.c
index c98608452..f3802ec1b 100644
--- a/drivers/media/pci/cx18/cx18-streams.c
+++ b/drivers/media/pci/cx18/cx18-streams.c
@@ -605,9 +605,9 @@ static void cx18_vbi_setup(struct cx18_stream *s)
/* Lines per field */
data[1] = (lines / 2) | ((lines / 2) << 16);
/* bytes per line */
- data[2] = (raw ? vbi_active_samples
- : (cx->is_60hz ? vbi_hblank_samples_60Hz
- : vbi_hblank_samples_50Hz));
+ data[2] = (raw ? VBI_ACTIVE_SAMPLES
+ : (cx->is_60hz ? VBI_HBLANK_SAMPLES_60HZ
+ : VBI_HBLANK_SAMPLES_50HZ));
/* Every X number of frames a VBI interrupt arrives
(frames as in 25 or 30 fps) */
data[3] = 1;
@@ -761,7 +761,7 @@ static void cx18_stream_configure_mdls(struct cx18_stream *s)
s->bufs_per_mdl = 1;
if (cx18_raw_vbi(s->cx)) {
s->mdl_size = (s->cx->is_60hz ? 12 : 18)
- * 2 * vbi_active_samples;
+ * 2 * VBI_ACTIVE_SAMPLES;
} else {
/*
* See comment in cx18_vbi_setup() below about the
@@ -769,8 +769,8 @@ static void cx18_stream_configure_mdls(struct cx18_stream *s)
* the lines on which EAV RP codes toggle.
*/
s->mdl_size = s->cx->is_60hz
- ? (21 - 4 + 1) * 2 * vbi_hblank_samples_60Hz
- : (23 - 2 + 1) * 2 * vbi_hblank_samples_50Hz;
+ ? (21 - 4 + 1) * 2 * VBI_HBLANK_SAMPLES_60HZ
+ : (23 - 2 + 1) * 2 * VBI_HBLANK_SAMPLES_50HZ;
}
break;
default:
diff --git a/drivers/media/pci/cx18/cx18-vbi.c b/drivers/media/pci/cx18/cx18-vbi.c
index add99642f..43360cbcf 100644
--- a/drivers/media/pci/cx18/cx18-vbi.c
+++ b/drivers/media/pci/cx18/cx18-vbi.c
@@ -108,7 +108,7 @@ static void copy_vbi_data(struct cx18 *cx, int lines, u32 pts_stamp)
/* FIXME - this function ignores the input size. */
static u32 compress_raw_buf(struct cx18 *cx, u8 *buf, u32 size, u32 hdr_size)
{
- u32 line_size = vbi_active_samples;
+ u32 line_size = VBI_ACTIVE_SAMPLES;
u32 lines = cx->vbi.count * 2;
u8 *q = buf;
u8 *p;
@@ -145,8 +145,8 @@ static u32 compress_sliced_buf(struct cx18 *cx, u8 *buf, u32 size,
struct v4l2_decode_vbi_line vbi;
int i;
u32 line = 0;
- u32 line_size = cx->is_60hz ? vbi_hblank_samples_60Hz
- : vbi_hblank_samples_50Hz;
+ u32 line_size = cx->is_60hz ? VBI_HBLANK_SAMPLES_60HZ
+ : VBI_HBLANK_SAMPLES_50HZ;
/* find the first valid line */
for (i = hdr_size, buf += hdr_size; i < size; i++, buf++) {
diff --git a/drivers/media/pci/cx23885/cx23885-417.c b/drivers/media/pci/cx23885/cx23885-417.c
index 1eede74fe..97f836400 100644
--- a/drivers/media/pci/cx23885/cx23885-417.c
+++ b/drivers/media/pci/cx23885/cx23885-417.c
@@ -1140,7 +1140,7 @@ static int cx23885_initialize_codec(struct cx23885_dev *dev, int startencoder)
static int queue_setup(struct vb2_queue *q,
unsigned int *num_buffers, unsigned int *num_planes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct cx23885_dev *dev = q->drv_priv;
@@ -1148,7 +1148,6 @@ static int queue_setup(struct vb2_queue *q,
dev->ts1.ts_packet_count = mpeglines;
*num_planes = 1;
sizes[0] = mpeglinesize * mpeglines;
- alloc_ctxs[0] = dev->alloc_ctx;
*num_buffers = mpegbufs;
return 0;
}
@@ -1553,6 +1552,7 @@ int cx23885_417_register(struct cx23885_dev *dev)
q->mem_ops = &vb2_dma_sg_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &dev->lock;
+ q->dev = &dev->pci->dev;
err = vb2_queue_init(q);
if (err < 0)
diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
index ff0789328..55fef8210 100644
--- a/drivers/media/pci/cx23885/cx23885-cards.c
+++ b/drivers/media/pci/cx23885/cx23885-cards.c
@@ -765,6 +765,11 @@ struct cx23885_board cx23885_boards[] = {
.amux = CX25840_AUDIO7,
} },
},
+ [CX23885_BOARD_HAUPPAUGE_QUADHD_DVB] = {
+ .name = "Hauppauge WinTV-QuadHD-DVB",
+ .portb = CX23885_MPEG_DVB,
+ .portc = CX23885_MPEG_DVB,
+ },
};
const unsigned int cx23885_bcount = ARRAY_SIZE(cx23885_boards);
@@ -1060,6 +1065,14 @@ struct cx23885_subid cx23885_subids[] = {
.subvendor = 0x1576,
.subdevice = 0x0460,
.card = CX23885_BOARD_VIEWCAST_460E,
+ }, {
+ .subvendor = 0x0070,
+ .subdevice = 0x6a28,
+ .card = CX23885_BOARD_HAUPPAUGE_QUADHD_DVB, /* Tuner Pair 1 */
+ }, {
+ .subvendor = 0x0070,
+ .subdevice = 0x6b28,
+ .card = CX23885_BOARD_HAUPPAUGE_QUADHD_DVB, /* Tuner Pair 2 */
},
};
const unsigned int cx23885_idcount = ARRAY_SIZE(cx23885_subids);
@@ -1257,6 +1270,14 @@ static void hauppauge_eeprom(struct cx23885_dev *dev, u8 *eeprom_data)
case 150329:
/* WinTV-HVR5525 (PCIe, DVB-S/S2, DVB-T/T2/C) */
break;
+ case 166100:
+ /* WinTV-QuadHD (DVB) Tuner Pair 1 (PCIe, IR, half height,
+ DVB-T/T2/C, DVB-T/T2/C */
+ break;
+ case 166101:
+ /* WinTV-QuadHD (DVB) Tuner Pair 2 (PCIe, IR, half height,
+ DVB-T/T2/C, DVB-T/T2/C */
+ break;
default:
printk(KERN_WARNING "%s: warning: "
"unknown hauppauge model #%d\n",
@@ -1729,20 +1750,22 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
cx23885_gpio_set(dev, GPIO_2);
break;
case CX23885_BOARD_HAUPPAUGE_HVR5525:
+ case CX23885_BOARD_HAUPPAUGE_QUADHD_DVB:
/*
- * GPIO-00 IR_WIDE
- * GPIO-02 wake#
- * GPIO-03 VAUX Pres.
- * GPIO-07 PROG#
- * GPIO-08 SAT_RESN
- * GPIO-09 TER_RESN
- * GPIO-10 B2_SENSE
- * GPIO-11 B1_SENSE
- * GPIO-15 IR_LED_STATUS
- * GPIO-19 IR_NARROW
- * GPIO-20 Blauster1
- * ALTGPIO VAUX_SWITCH
- * AUX_PLL_CLK : Blaster2
+ * HVR5525 GPIO Details:
+ * GPIO-00 IR_WIDE
+ * GPIO-02 wake#
+ * GPIO-03 VAUX Pres.
+ * GPIO-07 PROG#
+ * GPIO-08 SAT_RESN
+ * GPIO-09 TER_RESN
+ * GPIO-10 B2_SENSE
+ * GPIO-11 B1_SENSE
+ * GPIO-15 IR_LED_STATUS
+ * GPIO-19 IR_NARROW
+ * GPIO-20 Blauster1
+ * ALTGPIO VAUX_SWITCH
+ * AUX_PLL_CLK : Blaster2
*/
/* Put the parts into reset and back */
cx23885_gpio_enable(dev, GPIO_8 | GPIO_9, 1);
@@ -1802,6 +1825,7 @@ int cx23885_ir_init(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1255:
case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
case CX23885_BOARD_HAUPPAUGE_HVR1210:
+ case CX23885_BOARD_HAUPPAUGE_QUADHD_DVB:
/* FIXME: Implement me */
break;
case CX23885_BOARD_HAUPPAUGE_HVR1270:
@@ -2000,6 +2024,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_STARBURST:
case CX23885_BOARD_HAUPPAUGE_IMPACTVCBE:
case CX23885_BOARD_HAUPPAUGE_HVR5525:
+ case CX23885_BOARD_HAUPPAUGE_QUADHD_DVB:
if (dev->i2c_bus[0].i2c_rc == 0)
hauppauge_eeprom(dev, eeprom+0xc0);
break;
@@ -2145,6 +2170,14 @@ void cx23885_card_setup(struct cx23885_dev *dev)
ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
break;
+ case CX23885_BOARD_HAUPPAUGE_QUADHD_DVB:
+ ts1->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
+ ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
+ ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
+ ts2->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
+ ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
+ ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
+ break;
case CX23885_BOARD_HAUPPAUGE_HVR1250:
case CX23885_BOARD_HAUPPAUGE_HVR1500:
case CX23885_BOARD_HAUPPAUGE_HVR1500Q:
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index 813c217b5..c86b1093a 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -2005,14 +2005,9 @@ static int cx23885_initdev(struct pci_dev *pci_dev,
err = pci_set_dma_mask(pci_dev, 0xffffffff);
if (err) {
printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
- goto fail_context;
+ goto fail_ctrl;
}
- dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev);
- if (IS_ERR(dev->alloc_ctx)) {
- err = PTR_ERR(dev->alloc_ctx);
- goto fail_context;
- }
err = request_irq(pci_dev->irq, cx23885_irq,
IRQF_SHARED, dev->name, dev);
if (err < 0) {
@@ -2041,8 +2036,6 @@ static int cx23885_initdev(struct pci_dev *pci_dev,
return 0;
fail_irq:
- vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
-fail_context:
cx23885_dev_unregister(dev);
fail_ctrl:
v4l2_ctrl_handler_free(hdl);
@@ -2068,7 +2061,6 @@ static void cx23885_finidev(struct pci_dev *pci_dev)
pci_disable_device(pci_dev);
cx23885_dev_unregister(dev);
- vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
v4l2_ctrl_handler_free(&dev->ctrl_handler);
v4l2_device_unregister(v4l2_dev);
kfree(dev);
diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
index 7e061da5d..139d853b9 100644
--- a/drivers/media/pci/cx23885/cx23885-dvb.c
+++ b/drivers/media/pci/cx23885/cx23885-dvb.c
@@ -94,7 +94,7 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
static int queue_setup(struct vb2_queue *q,
unsigned int *num_buffers, unsigned int *num_planes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct cx23885_tsport *port = q->drv_priv;
@@ -102,7 +102,6 @@ static int queue_setup(struct vb2_queue *q,
port->ts_packet_count = 32;
*num_planes = 1;
sizes[0] = port->ts_packet_size * port->ts_packet_count;
- alloc_ctxs[0] = port->dev->alloc_ctx;
*num_buffers = 32;
return 0;
}
@@ -2269,9 +2268,107 @@ static int dvb_register(struct cx23885_tsport *port)
}
break;
}
+ case CX23885_BOARD_HAUPPAUGE_QUADHD_DVB:
+ switch (port->nr) {
+ /* port b - Terrestrial/cable */
+ case 1:
+ /* attach frontend */
+ memset(&si2168_config, 0, sizeof(si2168_config));
+ si2168_config.i2c_adapter = &adapter;
+ si2168_config.fe = &fe0->dvb.frontend;
+ si2168_config.ts_mode = SI2168_TS_SERIAL;
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "si2168", I2C_NAME_SIZE);
+ info.addr = 0x64;
+ info.platform_data = &si2168_config;
+ request_module("%s", info.type);
+ client_demod = i2c_new_device(&dev->i2c_bus[0].i2c_adap, &info);
+ if (!client_demod || !client_demod->dev.driver)
+ goto frontend_detach;
+ if (!try_module_get(client_demod->dev.driver->owner)) {
+ i2c_unregister_device(client_demod);
+ goto frontend_detach;
+ }
+ port->i2c_client_demod = client_demod;
+
+ /* attach tuner */
+ memset(&si2157_config, 0, sizeof(si2157_config));
+ si2157_config.fe = fe0->dvb.frontend;
+ si2157_config.if_port = 1;
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "si2157", I2C_NAME_SIZE);
+ info.addr = 0x60;
+ info.platform_data = &si2157_config;
+ request_module("%s", info.type);
+ client_tuner = i2c_new_device(&dev->i2c_bus[1].i2c_adap, &info);
+ if (!client_tuner || !client_tuner->dev.driver) {
+ module_put(client_demod->dev.driver->owner);
+ i2c_unregister_device(client_demod);
+ port->i2c_client_demod = NULL;
+ goto frontend_detach;
+ }
+ if (!try_module_get(client_tuner->dev.driver->owner)) {
+ i2c_unregister_device(client_tuner);
+ module_put(client_demod->dev.driver->owner);
+ i2c_unregister_device(client_demod);
+ port->i2c_client_demod = NULL;
+ goto frontend_detach;
+ }
+ port->i2c_client_tuner = client_tuner;
+ break;
+
+ /* port c - terrestrial/cable */
+ case 2:
+ /* attach frontend */
+ memset(&si2168_config, 0, sizeof(si2168_config));
+ si2168_config.i2c_adapter = &adapter;
+ si2168_config.fe = &fe0->dvb.frontend;
+ si2168_config.ts_mode = SI2168_TS_SERIAL;
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "si2168", I2C_NAME_SIZE);
+ info.addr = 0x66;
+ info.platform_data = &si2168_config;
+ request_module("%s", info.type);
+ client_demod = i2c_new_device(&dev->i2c_bus[0].i2c_adap, &info);
+ if (!client_demod || !client_demod->dev.driver)
+ goto frontend_detach;
+ if (!try_module_get(client_demod->dev.driver->owner)) {
+ i2c_unregister_device(client_demod);
+ goto frontend_detach;
+ }
+ port->i2c_client_demod = client_demod;
+
+ /* attach tuner */
+ memset(&si2157_config, 0, sizeof(si2157_config));
+ si2157_config.fe = fe0->dvb.frontend;
+ si2157_config.if_port = 1;
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "si2157", I2C_NAME_SIZE);
+ info.addr = 0x62;
+ info.platform_data = &si2157_config;
+ request_module("%s", info.type);
+ client_tuner = i2c_new_device(&dev->i2c_bus[1].i2c_adap, &info);
+ if (!client_tuner || !client_tuner->dev.driver) {
+ module_put(client_demod->dev.driver->owner);
+ i2c_unregister_device(client_demod);
+ port->i2c_client_demod = NULL;
+ goto frontend_detach;
+ }
+ if (!try_module_get(client_tuner->dev.driver->owner)) {
+ i2c_unregister_device(client_tuner);
+ module_put(client_demod->dev.driver->owner);
+ i2c_unregister_device(client_demod);
+ port->i2c_client_demod = NULL;
+ goto frontend_detach;
+ }
+ port->i2c_client_tuner = client_tuner;
+ break;
+ }
+ break;
+
default:
printk(KERN_INFO "%s: The frontend of your DVB/ATSC card "
- " isn't supported yet\n",
+ " isn't supported yet\n",
dev->name);
break;
}
@@ -2397,6 +2494,7 @@ int cx23885_dvb_register(struct cx23885_tsport *port)
q->mem_ops = &vb2_dma_sg_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &dev->lock;
+ q->dev = &dev->pci->dev;
err = vb2_queue_init(q);
if (err < 0)
diff --git a/drivers/media/pci/cx23885/cx23885-vbi.c b/drivers/media/pci/cx23885/cx23885-vbi.c
index 39750ebcc..75e7fa7b1 100644
--- a/drivers/media/pci/cx23885/cx23885-vbi.c
+++ b/drivers/media/pci/cx23885/cx23885-vbi.c
@@ -122,7 +122,7 @@ static int cx23885_start_vbi_dma(struct cx23885_dev *dev,
static int queue_setup(struct vb2_queue *q,
unsigned int *num_buffers, unsigned int *num_planes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct cx23885_dev *dev = q->drv_priv;
unsigned lines = VBI_PAL_LINE_COUNT;
@@ -131,7 +131,6 @@ static int queue_setup(struct vb2_queue *q,
lines = VBI_NTSC_LINE_COUNT;
*num_planes = 1;
sizes[0] = lines * VBI_LINE_LENGTH * 2;
- alloc_ctxs[0] = dev->alloc_ctx;
return 0;
}
diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c
index 53df50465..148f9a424 100644
--- a/drivers/media/pci/cx23885/cx23885-video.c
+++ b/drivers/media/pci/cx23885/cx23885-video.c
@@ -335,13 +335,12 @@ static int cx23885_start_video_dma(struct cx23885_dev *dev,
static int queue_setup(struct vb2_queue *q,
unsigned int *num_buffers, unsigned int *num_planes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct cx23885_dev *dev = q->drv_priv;
*num_planes = 1;
sizes[0] = (dev->fmt->depth * dev->width * dev->height) >> 3;
- alloc_ctxs[0] = dev->alloc_ctx;
return 0;
}
@@ -1268,6 +1267,7 @@ int cx23885_video_register(struct cx23885_dev *dev)
q->mem_ops = &vb2_dma_sg_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &dev->lock;
+ q->dev = &dev->pci->dev;
err = vb2_queue_init(q);
if (err < 0)
@@ -1284,6 +1284,7 @@ int cx23885_video_register(struct cx23885_dev *dev)
q->mem_ops = &vb2_dma_sg_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &dev->lock;
+ q->dev = &dev->pci->dev;
err = vb2_queue_init(q);
if (err < 0)
diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h
index b1a540940..24a0a6c5b 100644
--- a/drivers/media/pci/cx23885/cx23885.h
+++ b/drivers/media/pci/cx23885/cx23885.h
@@ -103,6 +103,7 @@
#define CX23885_BOARD_HAUPPAUGE_STARBURST 53
#define CX23885_BOARD_VIEWCAST_260E 54
#define CX23885_BOARD_VIEWCAST_460E 55
+#define CX23885_BOARD_HAUPPAUGE_QUADHD_DVB 56
#define GPIO_0 0x00000001
#define GPIO_1 0x00000002
@@ -430,7 +431,6 @@ struct cx23885_dev {
struct vb2_queue vb2_vidq;
struct cx23885_dmaqueue vbiq;
struct vb2_queue vb2_vbiq;
- void *alloc_ctx;
spinlock_t slock;
diff --git a/drivers/media/pci/cx25821/cx25821-alsa.c b/drivers/media/pci/cx25821/cx25821-alsa.c
index b602eba2b..df189b16a 100644
--- a/drivers/media/pci/cx25821/cx25821-alsa.c
+++ b/drivers/media/pci/cx25821/cx25821-alsa.c
@@ -693,7 +693,7 @@ static int snd_cx25821_pcm(struct cx25821_audio_dev *chip, int device,
* Only boards with eeprom and byte 1 at eeprom=1 have it
*/
-static const struct pci_device_id cx25821_audio_pci_tbl[] = {
+static const struct pci_device_id __maybe_unused cx25821_audio_pci_tbl[] = {
{0x14f1, 0x0920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0,}
};
diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c
index 0042803a9..9a5f912ca 100644
--- a/drivers/media/pci/cx25821/cx25821-core.c
+++ b/drivers/media/pci/cx25821/cx25821-core.c
@@ -1301,15 +1301,10 @@ static int cx25821_initdev(struct pci_dev *pci_dev,
goto fail_unregister_device;
}
- dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev);
- if (IS_ERR(dev->alloc_ctx)) {
- err = PTR_ERR(dev->alloc_ctx);
- goto fail_unregister_pci;
- }
err = cx25821_dev_setup(dev);
if (err)
- goto fail_free_ctx;
+ goto fail_unregister_pci;
/* print pci info */
pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &dev->pci_rev);
@@ -1340,8 +1335,6 @@ fail_irq:
pr_info("cx25821_initdev() can't get IRQ !\n");
cx25821_dev_unregister(dev);
-fail_free_ctx:
- vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
fail_unregister_pci:
pci_disable_device(pci_dev);
fail_unregister_device:
@@ -1365,7 +1358,6 @@ static void cx25821_finidev(struct pci_dev *pci_dev)
free_irq(pci_dev->irq, dev);
cx25821_dev_unregister(dev);
- vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
v4l2_device_unregister(v4l2_dev);
kfree(dev);
}
diff --git a/drivers/media/pci/cx25821/cx25821-video.c b/drivers/media/pci/cx25821/cx25821-video.c
index c48bba9da..adcd09be3 100644
--- a/drivers/media/pci/cx25821/cx25821-video.c
+++ b/drivers/media/pci/cx25821/cx25821-video.c
@@ -143,13 +143,11 @@ int cx25821_video_irq(struct cx25821_dev *dev, int chan_num, u32 status)
static int cx25821_queue_setup(struct vb2_queue *q,
unsigned int *num_buffers, unsigned int *num_planes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct cx25821_channel *chan = q->drv_priv;
unsigned size = (chan->fmt->depth * chan->width * chan->height) >> 3;
- alloc_ctxs[0] = chan->dev->alloc_ctx;
-
if (*num_planes)
return sizes[0] < size ? -EINVAL : 0;
@@ -759,6 +757,7 @@ int cx25821_video_register(struct cx25821_dev *dev)
q->mem_ops = &vb2_dma_sg_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &dev->lock;
+ q->dev = &dev->pci->dev;
if (!is_output) {
err = vb2_queue_init(q);
diff --git a/drivers/media/pci/cx25821/cx25821.h b/drivers/media/pci/cx25821/cx25821.h
index a513b68be..35c7375e4 100644
--- a/drivers/media/pci/cx25821/cx25821.h
+++ b/drivers/media/pci/cx25821/cx25821.h
@@ -249,7 +249,6 @@ struct cx25821_dev {
int hwrevision;
/* used by cx25821-alsa */
struct snd_card *card;
- void *alloc_ctx;
u32 clk_freq;
diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c
index e158a1da1..f3f13eb0c 100644
--- a/drivers/media/pci/cx88/cx88-alsa.c
+++ b/drivers/media/pci/cx88/cx88-alsa.c
@@ -799,13 +799,9 @@ static int snd_cx88_alc_put(struct snd_kcontrol *kcontrol,
{
snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
struct cx88_core *core = chip->core;
- struct v4l2_control client_ctl;
-
- memset(&client_ctl, 0, sizeof(client_ctl));
- client_ctl.value = 0 != value->value.integer.value[0];
- client_ctl.id = V4L2_CID_AUDIO_LOUDNESS;
- call_hw(core, WM8775_GID, core, s_ctrl, &client_ctl);
+ wm8775_s_ctrl(core, V4L2_CID_AUDIO_LOUDNESS,
+ value->value.integer.value[0] != 0);
return 0;
}
diff --git a/drivers/media/pci/cx88/cx88-blackbird.c b/drivers/media/pci/cx88/cx88-blackbird.c
index 34e4e754b..6eeaf5373 100644
--- a/drivers/media/pci/cx88/cx88-blackbird.c
+++ b/drivers/media/pci/cx88/cx88-blackbird.c
@@ -639,7 +639,7 @@ static int blackbird_stop_codec(struct cx8802_dev *dev)
static int queue_setup(struct vb2_queue *q,
unsigned int *num_buffers, unsigned int *num_planes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct cx8802_dev *dev = q->drv_priv;
@@ -647,7 +647,6 @@ static int queue_setup(struct vb2_queue *q,
dev->ts_packet_size = 188 * 4;
dev->ts_packet_count = 32;
sizes[0] = dev->ts_packet_size * dev->ts_packet_count;
- alloc_ctxs[0] = dev->alloc_ctx;
return 0;
}
@@ -1183,6 +1182,7 @@ static int cx8802_blackbird_probe(struct cx8802_driver *drv)
q->mem_ops = &vb2_dma_sg_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &core->lock;
+ q->dev = &dev->pci->dev;
err = vb2_queue_init(q);
if (err < 0)
diff --git a/drivers/media/pci/cx88/cx88-dvb.c b/drivers/media/pci/cx88/cx88-dvb.c
index c939dd9f4..ff988c2cb 100644
--- a/drivers/media/pci/cx88/cx88-dvb.c
+++ b/drivers/media/pci/cx88/cx88-dvb.c
@@ -84,7 +84,7 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
static int queue_setup(struct vb2_queue *q,
unsigned int *num_buffers, unsigned int *num_planes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct cx8802_dev *dev = q->drv_priv;
@@ -92,7 +92,6 @@ static int queue_setup(struct vb2_queue *q,
dev->ts_packet_size = 188 * 4;
dev->ts_packet_count = dvb_buf_tscnt;
sizes[0] = dev->ts_packet_size * dev->ts_packet_count;
- alloc_ctxs[0] = dev->alloc_ctx;
*num_buffers = dvb_buf_tscnt;
return 0;
}
@@ -1793,6 +1792,7 @@ static int cx8802_dvb_probe(struct cx8802_driver *drv)
q->mem_ops = &vb2_dma_sg_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &core->lock;
+ q->dev = &dev->pci->dev;
err = vb2_queue_init(q);
if (err < 0)
diff --git a/drivers/media/pci/cx88/cx88-mpeg.c b/drivers/media/pci/cx88/cx88-mpeg.c
index f34c229f9..245357adb 100644
--- a/drivers/media/pci/cx88/cx88-mpeg.c
+++ b/drivers/media/pci/cx88/cx88-mpeg.c
@@ -726,11 +726,6 @@ static int cx8802_probe(struct pci_dev *pci_dev,
if (NULL == dev)
goto fail_core;
dev->pci = pci_dev;
- dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev);
- if (IS_ERR(dev->alloc_ctx)) {
- err = PTR_ERR(dev->alloc_ctx);
- goto fail_dev;
- }
dev->core = core;
/* Maintain a reference so cx88-video can query the 8802 device. */
@@ -738,7 +733,7 @@ static int cx8802_probe(struct pci_dev *pci_dev,
err = cx8802_init_common(dev);
if (err != 0)
- goto fail_free;
+ goto fail_dev;
INIT_LIST_HEAD(&dev->drvlist);
mutex_lock(&cx8802_mutex);
@@ -749,8 +744,6 @@ static int cx8802_probe(struct pci_dev *pci_dev,
request_modules(dev);
return 0;
- fail_free:
- vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
fail_dev:
kfree(dev);
fail_core:
@@ -798,7 +791,6 @@ static void cx8802_remove(struct pci_dev *pci_dev)
/* common */
cx8802_fini_common(dev);
cx88_core_put(dev->core,dev->pci);
- vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
kfree(dev);
}
diff --git a/drivers/media/pci/cx88/cx88-vbi.c b/drivers/media/pci/cx88/cx88-vbi.c
index ccc646d81..d3237cf8f 100644
--- a/drivers/media/pci/cx88/cx88-vbi.c
+++ b/drivers/media/pci/cx88/cx88-vbi.c
@@ -109,7 +109,7 @@ int cx8800_restart_vbi_queue(struct cx8800_dev *dev,
static int queue_setup(struct vb2_queue *q,
unsigned int *num_buffers, unsigned int *num_planes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct cx8800_dev *dev = q->drv_priv;
@@ -118,7 +118,6 @@ static int queue_setup(struct vb2_queue *q,
sizes[0] = VBI_LINE_NTSC_COUNT * VBI_LINE_LENGTH * 2;
else
sizes[0] = VBI_LINE_PAL_COUNT * VBI_LINE_LENGTH * 2;
- alloc_ctxs[0] = dev->alloc_ctx;
return 0;
}
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index 5f331df65..5dc1e3f08 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -431,14 +431,13 @@ static int restart_video_queue(struct cx8800_dev *dev,
static int queue_setup(struct vb2_queue *q,
unsigned int *num_buffers, unsigned int *num_planes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct cx8800_dev *dev = q->drv_priv;
struct cx88_core *core = dev->core;
*num_planes = 1;
sizes[0] = (dev->fmt->depth * core->width * core->height) >> 3;
- alloc_ctxs[0] = dev->alloc_ctx;
return 0;
}
@@ -1319,12 +1318,6 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
printk("%s/0: Oops: no 32bit PCI DMA ???\n",core->name);
goto fail_core;
}
- dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev);
- if (IS_ERR(dev->alloc_ctx)) {
- err = PTR_ERR(dev->alloc_ctx);
- goto fail_core;
- }
-
/* initialize driver struct */
spin_lock_init(&dev->slock);
@@ -1445,6 +1438,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
q->mem_ops = &vb2_dma_sg_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &core->lock;
+ q->dev = &dev->pci->dev;
err = vb2_queue_init(q);
if (err < 0)
@@ -1461,6 +1455,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
q->mem_ops = &vb2_dma_sg_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &core->lock;
+ q->dev = &dev->pci->dev;
err = vb2_queue_init(q);
if (err < 0)
@@ -1530,7 +1525,6 @@ fail_unreg:
free_irq(pci_dev->irq, dev);
mutex_unlock(&core->lock);
fail_core:
- vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
core->v4ldev = NULL;
cx88_core_put(core,dev->pci);
fail_free:
@@ -1564,7 +1558,6 @@ static void cx8800_finidev(struct pci_dev *pci_dev)
/* free memory */
cx88_core_put(core,dev->pci);
- vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
kfree(dev);
}
diff --git a/drivers/media/pci/cx88/cx88.h b/drivers/media/pci/cx88/cx88.h
index 78f817ee7..ecd4b7bec 100644
--- a/drivers/media/pci/cx88/cx88.h
+++ b/drivers/media/pci/cx88/cx88.h
@@ -485,7 +485,6 @@ struct cx8800_dev {
/* pci i/o */
struct pci_dev *pci;
unsigned char pci_rev,pci_lat;
- void *alloc_ctx;
const struct cx8800_fmt *fmt;
@@ -549,7 +548,6 @@ struct cx8802_dev {
/* pci i/o */
struct pci_dev *pci;
unsigned char pci_rev,pci_lat;
- void *alloc_ctx;
/* dma queues */
struct cx88_dmaqueue mpegq;
diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c
index 6e995ef8c..47def73b3 100644
--- a/drivers/media/pci/ddbridge/ddbridge-core.c
+++ b/drivers/media/pci/ddbridge/ddbridge-core.c
@@ -1569,10 +1569,9 @@ static int ddb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (pci_enable_device(pdev) < 0)
return -ENODEV;
- dev = vmalloc(sizeof(struct ddb));
+ dev = vzalloc(sizeof(struct ddb));
if (dev == NULL)
return -ENOMEM;
- memset(dev, 0, sizeof(struct ddb));
dev->pdev = pdev;
pci_set_drvdata(pdev, dev);
diff --git a/drivers/media/pci/dt3155/dt3155.c b/drivers/media/pci/dt3155/dt3155.c
index 568c0c8fb..6a219694b 100644
--- a/drivers/media/pci/dt3155/dt3155.c
+++ b/drivers/media/pci/dt3155/dt3155.c
@@ -133,7 +133,7 @@ static int wait_i2c_reg(void __iomem *addr)
static int
dt3155_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *num_planes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct dt3155_priv *pd = vb2_get_drv_priv(vq);
@@ -141,7 +141,6 @@ dt3155_queue_setup(struct vb2_queue *vq,
if (vq->num_buffers + *nbuffers < 2)
*nbuffers = 2 - vq->num_buffers;
- alloc_ctxs[0] = pd->alloc_ctx;
if (*num_planes)
return sizes[0] < size ? -EINVAL : 0;
*num_planes = 1;
@@ -544,21 +543,16 @@ static int dt3155_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pd->vidq.min_buffers_needed = 2;
pd->vidq.gfp_flags = GFP_DMA32;
pd->vidq.lock = &pd->mux; /* for locking v4l2_file_operations */
+ pd->vidq.dev = &pdev->dev;
pd->vdev.queue = &pd->vidq;
err = vb2_queue_init(&pd->vidq);
if (err < 0)
goto err_v4l2_dev_unreg;
- pd->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
- if (IS_ERR(pd->alloc_ctx)) {
- dev_err(&pdev->dev, "Can't allocate buffer context");
- err = PTR_ERR(pd->alloc_ctx);
- goto err_v4l2_dev_unreg;
- }
spin_lock_init(&pd->lock);
pd->config = ACQ_MODE_EVEN;
err = pci_enable_device(pdev);
if (err)
- goto err_free_ctx;
+ goto err_v4l2_dev_unreg;
err = pci_request_region(pdev, 0, pci_name(pdev));
if (err)
goto err_pci_disable;
@@ -588,8 +582,6 @@ err_free_reg:
pci_release_region(pdev, 0);
err_pci_disable:
pci_disable_device(pdev);
-err_free_ctx:
- vb2_dma_contig_cleanup_ctx(pd->alloc_ctx);
err_v4l2_dev_unreg:
v4l2_device_unregister(&pd->v4l2_dev);
return err;
@@ -608,7 +600,6 @@ static void dt3155_remove(struct pci_dev *pdev)
pci_iounmap(pdev, pd->regs);
pci_release_region(pdev, 0);
pci_disable_device(pdev);
- vb2_dma_contig_cleanup_ctx(pd->alloc_ctx);
}
static const struct pci_device_id pci_ids[] = {
diff --git a/drivers/media/pci/dt3155/dt3155.h b/drivers/media/pci/dt3155/dt3155.h
index b3531e0bc..39442e589 100644
--- a/drivers/media/pci/dt3155/dt3155.h
+++ b/drivers/media/pci/dt3155/dt3155.h
@@ -161,7 +161,6 @@
* @vdev: video_device structure
* @pdev: pointer to pci_dev structure
* @vidq: vb2_queue structure
- * @alloc_ctx: dma_contig allocation context
* @curr_buf: pointer to curren buffer
* @mux: mutex to protect the instance
* @dmaq: queue for dma buffers
@@ -181,7 +180,6 @@ struct dt3155_priv {
struct video_device vdev;
struct pci_dev *pdev;
struct vb2_queue vidq;
- struct vb2_alloc_ctx *alloc_ctx;
struct vb2_v4l2_buffer *curr_buf;
struct mutex mux;
struct list_head dmaq;
diff --git a/drivers/media/pci/ivtv/ivtv-alsa-mixer.c b/drivers/media/pci/ivtv/ivtv-alsa-mixer.c
index 33ec05b09..79b24bde4 100644
--- a/drivers/media/pci/ivtv/ivtv-alsa-mixer.c
+++ b/drivers/media/pci/ivtv/ivtv-alsa-mixer.c
@@ -93,7 +93,7 @@ static int snd_ivtv_mixer_tv_vol_get(struct snd_kcontrol *kctl,
vctrl.value = dB_to_cx25840_vol(uctl->value.integer.value[0]);
snd_ivtv_lock(itvsc);
- ret = v4l2_subdev_call(itv->sd_audio, core, g_ctrl, &vctrl);
+ ret = v4l2_g_ctrl(itv->sd_audio->ctrl_handler, &vctrl);
snd_ivtv_unlock(itvsc);
if (!ret)
@@ -115,14 +115,14 @@ static int snd_ivtv_mixer_tv_vol_put(struct snd_kcontrol *kctl,
snd_ivtv_lock(itvsc);
/* Fetch current state */
- ret = v4l2_subdev_call(itv->sd_audio, core, g_ctrl, &vctrl);
+ ret = v4l2_g_ctrl(itv->sd_audio->ctrl_handler, &vctrl);
if (ret ||
(cx25840_vol_to_dB(vctrl.value) != uctl->value.integer.value[0])) {
/* Set, if needed */
vctrl.value = dB_to_cx25840_vol(uctl->value.integer.value[0]);
- ret = v4l2_subdev_call(itv->sd_audio, core, s_ctrl, &vctrl);
+ ret = v4l2_s_ctrl(itv->sd_audio->ctrl_handler, &vctrl);
if (!ret)
ret = 1; /* Indicate control was changed w/o error */
}
diff --git a/drivers/media/pci/netup_unidvb/Kconfig b/drivers/media/pci/netup_unidvb/Kconfig
index f277b0b10..0ad37714c 100644
--- a/drivers/media/pci/netup_unidvb/Kconfig
+++ b/drivers/media/pci/netup_unidvb/Kconfig
@@ -5,8 +5,13 @@ config DVB_NETUP_UNIDVB
select VIDEOBUF2_VMALLOC
select DVB_HORUS3A if MEDIA_SUBDRV_AUTOSELECT
select DVB_ASCOT2E if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_HELENE if MEDIA_SUBDRV_AUTOSELECT
select DVB_LNBH25 if MEDIA_SUBDRV_AUTOSELECT
select DVB_CXD2841ER if MEDIA_SUBDRV_AUTOSELECT
---help---
Support for NetUP PCI express Universal DVB card.
-
+ help
+ Say Y when you want to support NetUP Dual Universal DVB card
+ Card can receive two independent streams in following standards:
+ DVB-S/S2, T/T2, C/C2
+ Two CI slots available for CAM modules.
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb.h b/drivers/media/pci/netup_unidvb/netup_unidvb.h
index a67b28111..39b08ecda 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb.h
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb.h
@@ -50,6 +50,15 @@
#define NETUP_UNIDVB_IRQ_CAM0 (1 << 11)
#define NETUP_UNIDVB_IRQ_CAM1 (1 << 12)
+/* NetUP Universal DVB card hardware revisions and it's PCI device id's:
+ * 1.3 - CXD2841ER demod, ASCOT2E and HORUS3A tuners
+ * 1.4 - CXD2854ER demod, HELENE tuner
+*/
+enum netup_hw_rev {
+ NETUP_HW_REV_1_3 = 0x18F6,
+ NETUP_HW_REV_1_4 = 0x18F7
+};
+
struct netup_dma {
u8 num;
spinlock_t lock;
@@ -119,6 +128,7 @@ struct netup_unidvb_dev {
struct netup_dma dma[2];
struct netup_ci_state ci[2];
struct netup_spi *spi;
+ enum netup_hw_rev rev;
};
int netup_i2c_register(struct netup_unidvb_dev *ndev);
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_ci.c b/drivers/media/pci/netup_unidvb/netup_unidvb_ci.c
index f46ffac66..f535270c2 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb_ci.c
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_ci.c
@@ -147,7 +147,7 @@ static int netup_unidvb_ci_read_attribute_mem(struct dvb_ca_en50221 *en50221,
{
struct netup_ci_state *state = en50221->data;
struct netup_unidvb_dev *dev = state->dev;
- u8 val = *((u8 __force *)state->membase8_io + addr);
+ u8 val = *((u8 __force *)state->membase8_config + addr);
dev_dbg(&dev->pci_dev->dev,
"%s(): addr=0x%x val=0x%x\n", __func__, addr, val);
@@ -162,7 +162,7 @@ static int netup_unidvb_ci_write_attribute_mem(struct dvb_ca_en50221 *en50221,
dev_dbg(&dev->pci_dev->dev,
"%s(): addr=0x%x data=0x%x\n", __func__, addr, data);
- *((u8 __force *)state->membase8_io + addr) = data;
+ *((u8 __force *)state->membase8_config + addr) = data;
return 0;
}
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
index 2b667b315..ac547cb84 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
@@ -34,6 +34,7 @@
#include "cxd2841er.h"
#include "horus3a.h"
#include "ascot2e.h"
+#include "helene.h"
#include "lnbh25.h"
static int spi_enable;
@@ -120,7 +121,8 @@ static int netup_unidvb_tuner_ctrl(void *priv, int is_dvb_tc);
static void netup_unidvb_queue_cleanup(struct netup_dma *dma);
static struct cxd2841er_config demod_config = {
- .i2c_addr = 0xc8
+ .i2c_addr = 0xc8,
+ .xtal = SONY_XTAL_24000
};
static struct horus3a_config horus3a_conf = {
@@ -134,6 +136,12 @@ static struct ascot2e_config ascot2e_conf = {
.set_tuner_callback = netup_unidvb_tuner_ctrl
};
+static struct helene_config helene_conf = {
+ .i2c_address = 0xc0,
+ .xtal = SONY_HELENE_XTAL_24000,
+ .set_tuner_callback = netup_unidvb_tuner_ctrl
+};
+
static struct lnbh25_config lnbh25_conf = {
.i2c_address = 0x10,
.data2_config = LNBH25_TEN | LNBH25_EXTM
@@ -152,6 +160,11 @@ static int netup_unidvb_tuner_ctrl(void *priv, int is_dvb_tc)
__func__, dma->num, is_dvb_tc);
reg = readb(ndev->bmmio0 + GPIO_REG_IO);
mask = (dma->num == 0) ? GPIO_RFA_CTL : GPIO_RFB_CTL;
+
+ /* inverted tuner control in hw rev. 1.4 */
+ if (ndev->rev == NETUP_HW_REV_1_4)
+ is_dvb_tc = !is_dvb_tc;
+
if (!is_dvb_tc)
reg |= mask;
else
@@ -280,7 +293,7 @@ static int netup_unidvb_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers,
unsigned int *nplanes,
unsigned int sizes[],
- void *alloc_ctxs[])
+ struct device *alloc_devs[])
{
struct netup_dma *dma = vb2_get_drv_priv(vq);
@@ -372,7 +385,15 @@ static int netup_unidvb_queue_init(struct netup_dma *dma,
static int netup_unidvb_dvb_init(struct netup_unidvb_dev *ndev,
int num)
{
- struct vb2_dvb_frontend *fe0, *fe1, *fe2;
+ int fe_count = 2;
+ int i = 0;
+ struct vb2_dvb_frontend *fes[2];
+ u8 fe_name[32];
+
+ if (ndev->rev == NETUP_HW_REV_1_3)
+ demod_config.xtal = SONY_XTAL_20500;
+ else
+ demod_config.xtal = SONY_XTAL_24000;
if (num < 0 || num > 1) {
dev_dbg(&ndev->pci_dev->dev,
@@ -381,84 +402,96 @@ static int netup_unidvb_dvb_init(struct netup_unidvb_dev *ndev,
}
mutex_init(&ndev->frontends[num].lock);
INIT_LIST_HEAD(&ndev->frontends[num].felist);
- if (vb2_dvb_alloc_frontend(&ndev->frontends[num], 1) == NULL ||
- vb2_dvb_alloc_frontend(
- &ndev->frontends[num], 2) == NULL ||
- vb2_dvb_alloc_frontend(
- &ndev->frontends[num], 3) == NULL) {
- dev_dbg(&ndev->pci_dev->dev,
- "%s(): unable to allocate vb2_dvb_frontend\n",
- __func__);
- return -ENOMEM;
+
+ for (i = 0; i < fe_count; i++) {
+ if (vb2_dvb_alloc_frontend(&ndev->frontends[num], i+1)
+ == NULL) {
+ dev_err(&ndev->pci_dev->dev,
+ "%s(): unable to allocate vb2_dvb_frontend\n",
+ __func__);
+ return -ENOMEM;
+ }
}
- fe0 = vb2_dvb_get_frontend(&ndev->frontends[num], 1);
- fe1 = vb2_dvb_get_frontend(&ndev->frontends[num], 2);
- fe2 = vb2_dvb_get_frontend(&ndev->frontends[num], 3);
- if (fe0 == NULL || fe1 == NULL || fe2 == NULL) {
- dev_dbg(&ndev->pci_dev->dev,
- "%s(): frontends has not been allocated\n", __func__);
- return -EINVAL;
+
+ for (i = 0; i < fe_count; i++) {
+ fes[i] = vb2_dvb_get_frontend(&ndev->frontends[num], i+1);
+ if (fes[i] == NULL) {
+ dev_err(&ndev->pci_dev->dev,
+ "%s(): frontends has not been allocated\n",
+ __func__);
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < fe_count; i++) {
+ netup_unidvb_queue_init(&ndev->dma[num], &fes[i]->dvb.dvbq);
+ snprintf(fe_name, sizeof(fe_name), "netup_fe%d", i);
+ fes[i]->dvb.name = fe_name;
}
- netup_unidvb_queue_init(&ndev->dma[num], &fe0->dvb.dvbq);
- netup_unidvb_queue_init(&ndev->dma[num], &fe1->dvb.dvbq);
- netup_unidvb_queue_init(&ndev->dma[num], &fe2->dvb.dvbq);
- fe0->dvb.name = "netup_fe0";
- fe1->dvb.name = "netup_fe1";
- fe2->dvb.name = "netup_fe2";
- fe0->dvb.frontend = dvb_attach(cxd2841er_attach_s,
+
+ fes[0]->dvb.frontend = dvb_attach(cxd2841er_attach_s,
&demod_config, &ndev->i2c[num].adap);
- if (fe0->dvb.frontend == NULL) {
+ if (fes[0]->dvb.frontend == NULL) {
dev_dbg(&ndev->pci_dev->dev,
"%s(): unable to attach DVB-S/S2 frontend\n",
__func__);
goto frontend_detach;
}
- horus3a_conf.set_tuner_priv = &ndev->dma[num];
- if (!dvb_attach(horus3a_attach, fe0->dvb.frontend,
- &horus3a_conf, &ndev->i2c[num].adap)) {
- dev_dbg(&ndev->pci_dev->dev,
- "%s(): unable to attach DVB-S/S2 tuner frontend\n",
- __func__);
- goto frontend_detach;
+
+ if (ndev->rev == NETUP_HW_REV_1_3) {
+ horus3a_conf.set_tuner_priv = &ndev->dma[num];
+ if (!dvb_attach(horus3a_attach, fes[0]->dvb.frontend,
+ &horus3a_conf, &ndev->i2c[num].adap)) {
+ dev_dbg(&ndev->pci_dev->dev,
+ "%s(): unable to attach HORUS3A DVB-S/S2 tuner frontend\n",
+ __func__);
+ goto frontend_detach;
+ }
+ } else {
+ helene_conf.set_tuner_priv = &ndev->dma[num];
+ if (!dvb_attach(helene_attach_s, fes[0]->dvb.frontend,
+ &helene_conf, &ndev->i2c[num].adap)) {
+ dev_err(&ndev->pci_dev->dev,
+ "%s(): unable to attach HELENE DVB-S/S2 tuner frontend\n",
+ __func__);
+ goto frontend_detach;
+ }
}
- if (!dvb_attach(lnbh25_attach, fe0->dvb.frontend,
+
+ if (!dvb_attach(lnbh25_attach, fes[0]->dvb.frontend,
&lnbh25_conf, &ndev->i2c[num].adap)) {
dev_dbg(&ndev->pci_dev->dev,
"%s(): unable to attach SEC frontend\n", __func__);
goto frontend_detach;
}
+
/* DVB-T/T2 frontend */
- fe1->dvb.frontend = dvb_attach(cxd2841er_attach_t,
+ fes[1]->dvb.frontend = dvb_attach(cxd2841er_attach_t_c,
&demod_config, &ndev->i2c[num].adap);
- if (fe1->dvb.frontend == NULL) {
- dev_dbg(&ndev->pci_dev->dev,
- "%s(): unable to attach DVB-T frontend\n", __func__);
- goto frontend_detach;
- }
- fe1->dvb.frontend->id = 1;
- ascot2e_conf.set_tuner_priv = &ndev->dma[num];
- if (!dvb_attach(ascot2e_attach, fe1->dvb.frontend,
- &ascot2e_conf, &ndev->i2c[num].adap)) {
- dev_dbg(&ndev->pci_dev->dev,
- "%s(): unable to attach DVB-T tuner frontend\n",
- __func__);
- goto frontend_detach;
- }
- /* DVB-C/C2 frontend */
- fe2->dvb.frontend = dvb_attach(cxd2841er_attach_c,
- &demod_config, &ndev->i2c[num].adap);
- if (fe2->dvb.frontend == NULL) {
+ if (fes[1]->dvb.frontend == NULL) {
dev_dbg(&ndev->pci_dev->dev,
- "%s(): unable to attach DVB-C frontend\n", __func__);
+ "%s(): unable to attach Ter frontend\n", __func__);
goto frontend_detach;
}
- fe2->dvb.frontend->id = 2;
- if (!dvb_attach(ascot2e_attach, fe2->dvb.frontend,
- &ascot2e_conf, &ndev->i2c[num].adap)) {
- dev_dbg(&ndev->pci_dev->dev,
- "%s(): unable to attach DVB-T/C tuner frontend\n",
- __func__);
- goto frontend_detach;
+ fes[1]->dvb.frontend->id = 1;
+ if (ndev->rev == NETUP_HW_REV_1_3) {
+ ascot2e_conf.set_tuner_priv = &ndev->dma[num];
+ if (!dvb_attach(ascot2e_attach, fes[1]->dvb.frontend,
+ &ascot2e_conf, &ndev->i2c[num].adap)) {
+ dev_dbg(&ndev->pci_dev->dev,
+ "%s(): unable to attach Ter tuner frontend\n",
+ __func__);
+ goto frontend_detach;
+ }
+ } else {
+ helene_conf.set_tuner_priv = &ndev->dma[num];
+ if (!dvb_attach(helene_attach, fes[1]->dvb.frontend,
+ &helene_conf, &ndev->i2c[num].adap)) {
+ dev_err(&ndev->pci_dev->dev,
+ "%s(): unable to attach HELENE Ter tuner frontend\n",
+ __func__);
+ goto frontend_detach;
+ }
}
if (vb2_dvb_register_bus(&ndev->frontends[num],
@@ -730,7 +763,7 @@ static int netup_unidvb_request_mmio(struct pci_dev *pci_dev)
static int netup_unidvb_request_modules(struct device *dev)
{
static const char * const modules[] = {
- "lnbh25", "ascot2e", "horus3a", "cxd2841er", NULL
+ "lnbh25", "ascot2e", "horus3a", "cxd2841er", "helene", NULL
};
const char * const *curr_mod = modules;
int err;
@@ -774,6 +807,16 @@ static int netup_unidvb_initdev(struct pci_dev *pci_dev,
if (!ndev)
goto dev_alloc_err;
+ /* detect hardware revision */
+ if (pci_dev->device == NETUP_HW_REV_1_3)
+ ndev->rev = NETUP_HW_REV_1_3;
+ else
+ ndev->rev = NETUP_HW_REV_1_4;
+
+ dev_info(&pci_dev->dev,
+ "%s(): board (0x%x) hardware revision 0x%x\n",
+ __func__, pci_dev->device, ndev->rev);
+
ndev->old_fw = old_firmware;
ndev->wq = create_singlethread_workqueue(NETUP_UNIDVB_NAME);
if (!ndev->wq) {
@@ -932,7 +975,7 @@ wq_create_err:
kfree(ndev);
dev_alloc_err:
dev_err(&pci_dev->dev,
- "%s(): failed to initizalize device\n", __func__);
+ "%s(): failed to initialize device\n", __func__);
return -EIO;
}
@@ -972,7 +1015,8 @@ static void netup_unidvb_finidev(struct pci_dev *pci_dev)
static struct pci_device_id netup_unidvb_pci_tbl[] = {
- { PCI_DEVICE(0x1b55, 0x18f6) },
+ { PCI_DEVICE(0x1b55, 0x18f6) }, /* hw rev. 1.3 */
+ { PCI_DEVICE(0x1b55, 0x18f7) }, /* hw rev. 1.4 */
{ 0, }
};
MODULE_DEVICE_TABLE(pci, netup_unidvb_pci_tbl);
diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
index c0e1780ec..ffb66a9ae 100644
--- a/drivers/media/pci/saa7134/saa7134-core.c
+++ b/drivers/media/pci/saa7134/saa7134-core.c
@@ -1164,18 +1164,13 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
saa7134_board_init1(dev);
saa7134_hwinit1(dev);
- dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev);
- if (IS_ERR(dev->alloc_ctx)) {
- err = PTR_ERR(dev->alloc_ctx);
- goto fail3;
- }
/* get irq */
err = request_irq(pci_dev->irq, saa7134_irq,
IRQF_SHARED, dev->name, dev);
if (err < 0) {
pr_err("%s: can't get IRQ %d\n",
dev->name,pci_dev->irq);
- goto fail4;
+ goto fail3;
}
/* wait a bit, register i2c bus */
@@ -1233,7 +1228,7 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
if (err < 0) {
pr_info("%s: can't register video device\n",
dev->name);
- goto fail5;
+ goto fail4;
}
pr_info("%s: registered device %s [v4l2]\n",
dev->name, video_device_node_name(dev->video_dev));
@@ -1246,7 +1241,7 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
err = video_register_device(dev->vbi_dev,VFL_TYPE_VBI,
vbi_nr[dev->nr]);
if (err < 0)
- goto fail5;
+ goto fail4;
pr_info("%s: registered device %s\n",
dev->name, video_device_node_name(dev->vbi_dev));
@@ -1257,7 +1252,7 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
err = video_register_device(dev->radio_dev,VFL_TYPE_RADIO,
radio_nr[dev->nr]);
if (err < 0)
- goto fail5;
+ goto fail4;
pr_info("%s: registered device %s\n",
dev->name, video_device_node_name(dev->radio_dev));
}
@@ -1268,7 +1263,7 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
err = v4l2_mc_create_media_graph(dev->media_dev);
if (err) {
pr_err("failed to create media graph\n");
- goto fail5;
+ goto fail4;
}
#endif
/* everything worked */
@@ -1287,17 +1282,15 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
#ifdef CONFIG_MEDIA_CONTROLLER
err = media_device_register(dev->media_dev);
if (err)
- goto fail5;
+ goto fail4;
#endif
return 0;
- fail5:
+ fail4:
saa7134_unregister_video(dev);
saa7134_i2c_unregister(dev);
free_irq(pci_dev->irq, dev);
- fail4:
- vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
fail3:
saa7134_hwfini(dev);
iounmap(dev->lmmio);
@@ -1367,7 +1360,6 @@ static void saa7134_finidev(struct pci_dev *pci_dev)
/* release resources */
free_irq(pci_dev->irq, dev);
- vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
iounmap(dev->lmmio);
release_mem_region(pci_resource_start(pci_dev,0),
pci_resource_len(pci_dev,0));
diff --git a/drivers/media/pci/saa7134/saa7134-dvb.c b/drivers/media/pci/saa7134/saa7134-dvb.c
index a0c5d8e1a..b4916c1af 100644
--- a/drivers/media/pci/saa7134/saa7134-dvb.c
+++ b/drivers/media/pci/saa7134/saa7134-dvb.c
@@ -1238,6 +1238,7 @@ static int dvb_init(struct saa7134_dev *dev)
q->buf_struct_size = sizeof(struct saa7134_buf);
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &dev->lock;
+ q->dev = &dev->pci->dev;
ret = vb2_queue_init(q);
if (ret) {
vb2_dvb_dealloc_frontends(&dev->frontends);
diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c
index ca417a454..791a51618 100644
--- a/drivers/media/pci/saa7134/saa7134-empress.c
+++ b/drivers/media/pci/saa7134/saa7134-empress.c
@@ -295,6 +295,7 @@ static int empress_init(struct saa7134_dev *dev)
q->buf_struct_size = sizeof(struct saa7134_buf);
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &dev->lock;
+ q->dev = &dev->pci->dev;
err = vb2_queue_init(q);
if (err)
return err;
diff --git a/drivers/media/pci/saa7134/saa7134-ts.c b/drivers/media/pci/saa7134/saa7134-ts.c
index 0584a2adb..7eaf36a41 100644
--- a/drivers/media/pci/saa7134/saa7134-ts.c
+++ b/drivers/media/pci/saa7134/saa7134-ts.c
@@ -118,7 +118,7 @@ EXPORT_SYMBOL_GPL(saa7134_ts_buffer_prepare);
int saa7134_ts_queue_setup(struct vb2_queue *q,
unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct saa7134_dmaqueue *dmaq = q->drv_priv;
struct saa7134_dev *dev = dmaq->dev;
@@ -131,7 +131,6 @@ int saa7134_ts_queue_setup(struct vb2_queue *q,
*nbuffers = 3;
*nplanes = 1;
sizes[0] = size;
- alloc_ctxs[0] = dev->alloc_ctx;
return 0;
}
EXPORT_SYMBOL_GPL(saa7134_ts_queue_setup);
diff --git a/drivers/media/pci/saa7134/saa7134-vbi.c b/drivers/media/pci/saa7134/saa7134-vbi.c
index e76da37c4..cf9a31e0a 100644
--- a/drivers/media/pci/saa7134/saa7134-vbi.c
+++ b/drivers/media/pci/saa7134/saa7134-vbi.c
@@ -140,7 +140,7 @@ static int buffer_prepare(struct vb2_buffer *vb2)
static int queue_setup(struct vb2_queue *q,
unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct saa7134_dmaqueue *dmaq = q->drv_priv;
struct saa7134_dev *dev = dmaq->dev;
@@ -155,7 +155,6 @@ static int queue_setup(struct vb2_queue *q,
*nbuffers = saa7134_buffer_count(size, *nbuffers);
*nplanes = 1;
sizes[0] = size;
- alloc_ctxs[0] = dev->alloc_ctx;
return 0;
}
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index ffa39543e..8a6ebd087 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -963,7 +963,7 @@ static int buffer_prepare(struct vb2_buffer *vb2)
static int queue_setup(struct vb2_queue *q,
unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct saa7134_dmaqueue *dmaq = q->drv_priv;
struct saa7134_dev *dev = dmaq->dev;
@@ -980,7 +980,6 @@ static int queue_setup(struct vb2_queue *q,
*nbuffers = saa7134_buffer_count(size, *nbuffers);
*nplanes = 1;
sizes[0] = size;
- alloc_ctxs[0] = dev->alloc_ctx;
saa7134_enable_analog_tuner(dev);
@@ -2173,6 +2172,7 @@ int saa7134_video_init1(struct saa7134_dev *dev)
q->buf_struct_size = sizeof(struct saa7134_buf);
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &dev->lock;
+ q->dev = &dev->pci->dev;
ret = vb2_queue_init(q);
if (ret)
return ret;
@@ -2191,6 +2191,7 @@ int saa7134_video_init1(struct saa7134_dev *dev)
q->buf_struct_size = sizeof(struct saa7134_buf);
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &dev->lock;
+ q->dev = &dev->pci->dev;
ret = vb2_queue_init(q);
if (ret)
return ret;
diff --git a/drivers/media/pci/saa7134/saa7134.h b/drivers/media/pci/saa7134/saa7134.h
index 69a9bbf22..384908352 100644
--- a/drivers/media/pci/saa7134/saa7134.h
+++ b/drivers/media/pci/saa7134/saa7134.h
@@ -610,7 +610,6 @@ struct saa7134_dev {
/* video+ts+vbi capture */
- void *alloc_ctx;
struct saa7134_dmaqueue video_q;
struct vb2_queue video_vbq;
struct saa7134_dmaqueue vbi_q;
@@ -854,7 +853,7 @@ int saa7134_ts_buffer_init(struct vb2_buffer *vb2);
int saa7134_ts_buffer_prepare(struct vb2_buffer *vb2);
int saa7134_ts_queue_setup(struct vb2_queue *q,
unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], void *alloc_ctxs[]);
+ unsigned int sizes[], struct device *alloc_devs[]);
int saa7134_ts_start_streaming(struct vb2_queue *vq, unsigned int count);
void saa7134_ts_stop_streaming(struct vb2_queue *vq);
diff --git a/drivers/media/pci/saa7164/saa7164-encoder.c b/drivers/media/pci/saa7164/saa7164-encoder.c
index 1b184c39b..32a353d16 100644
--- a/drivers/media/pci/saa7164/saa7164-encoder.c
+++ b/drivers/media/pci/saa7164/saa7164-encoder.c
@@ -1022,8 +1022,7 @@ int saa7164_encoder_register(struct saa7164_port *port)
dprintk(DBGLVL_ENC, "%s()\n", __func__);
- if (port->type != SAA7164_MPEG_ENCODER)
- BUG();
+ BUG_ON(port->type != SAA7164_MPEG_ENCODER);
/* Sanity check that the PCI configuration space is active */
if (port->hwcfg.BARLocation == 0) {
@@ -1151,8 +1150,7 @@ void saa7164_encoder_unregister(struct saa7164_port *port)
dprintk(DBGLVL_ENC, "%s(port=%d)\n", __func__, port->nr);
- if (port->type != SAA7164_MPEG_ENCODER)
- BUG();
+ BUG_ON(port->type != SAA7164_MPEG_ENCODER);
if (port->v4l_device) {
if (port->v4l_device->minor != -1)
diff --git a/drivers/media/pci/saa7164/saa7164.h b/drivers/media/pci/saa7164/saa7164.h
index 8337524bf..97411b038 100644
--- a/drivers/media/pci/saa7164/saa7164.h
+++ b/drivers/media/pci/saa7164/saa7164.h
@@ -263,10 +263,6 @@ struct saa7164_i2c {
u32 i2c_rc;
};
-struct saa7164_ctrl {
- struct v4l2_queryctrl v;
-};
-
struct saa7164_tvnorm {
char *name;
v4l2_std_id id;
diff --git a/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c b/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
index 67a14c41c..399164314 100644
--- a/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
+++ b/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
@@ -33,7 +33,7 @@
#include "solo6x10-jpeg.h"
#define MIN_VID_BUFFERS 2
-#define FRAME_BUF_SIZE (196 * 1024)
+#define FRAME_BUF_SIZE (400 * 1024)
#define MP4_QS 16
#define DMA_ALIGN 4096
@@ -664,12 +664,9 @@ static int solo_ring_thread(void *data)
static int solo_enc_queue_setup(struct vb2_queue *q,
unsigned int *num_buffers,
unsigned int *num_planes, unsigned int sizes[],
- void *alloc_ctxs[])
+ struct device *alloc_devs[])
{
- struct solo_enc_dev *solo_enc = vb2_get_drv_priv(q);
-
sizes[0] = FRAME_BUF_SIZE;
- alloc_ctxs[0] = solo_enc->alloc_ctx;
*num_planes = 1;
if (*num_buffers < MIN_VID_BUFFERS)
@@ -1239,11 +1236,6 @@ static struct solo_enc_dev *solo_enc_alloc(struct solo_dev *solo_dev,
return ERR_PTR(-ENOMEM);
hdl = &solo_enc->hdl;
- solo_enc->alloc_ctx = vb2_dma_sg_init_ctx(&solo_dev->pdev->dev);
- if (IS_ERR(solo_enc->alloc_ctx)) {
- ret = PTR_ERR(solo_enc->alloc_ctx);
- goto hdl_free;
- }
v4l2_ctrl_handler_init(hdl, 10);
v4l2_ctrl_new_std(hdl, &solo_ctrl_ops,
V4L2_CID_BRIGHTNESS, 0, 255, 1, 128);
@@ -1299,6 +1291,7 @@ static struct solo_enc_dev *solo_enc_alloc(struct solo_dev *solo_dev,
solo_enc->vidq.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
solo_enc->vidq.buf_struct_size = sizeof(struct solo_vb2_buf);
solo_enc->vidq.lock = &solo_enc->lock;
+ solo_enc->vidq.dev = &solo_dev->pdev->dev;
ret = vb2_queue_init(&solo_enc->vidq);
if (ret)
goto hdl_free;
@@ -1347,7 +1340,6 @@ pci_free:
solo_enc->desc_items, solo_enc->desc_dma);
hdl_free:
v4l2_ctrl_handler_free(hdl);
- vb2_dma_sg_cleanup_ctx(solo_enc->alloc_ctx);
kfree(solo_enc);
return ERR_PTR(ret);
}
@@ -1362,7 +1354,6 @@ static void solo_enc_free(struct solo_enc_dev *solo_enc)
solo_enc->desc_items, solo_enc->desc_dma);
video_unregister_device(solo_enc->vfd);
v4l2_ctrl_handler_free(&solo_enc->hdl);
- vb2_dma_sg_cleanup_ctx(solo_enc->alloc_ctx);
kfree(solo_enc);
}
diff --git a/drivers/media/pci/solo6x10/solo6x10-v4l2.c b/drivers/media/pci/solo6x10/solo6x10-v4l2.c
index 721ff5320..b4be47969 100644
--- a/drivers/media/pci/solo6x10/solo6x10-v4l2.c
+++ b/drivers/media/pci/solo6x10/solo6x10-v4l2.c
@@ -315,12 +315,11 @@ static void solo_stop_thread(struct solo_dev *solo_dev)
static int solo_queue_setup(struct vb2_queue *q,
unsigned int *num_buffers, unsigned int *num_planes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct solo_dev *solo_dev = vb2_get_drv_priv(q);
sizes[0] = solo_image_size(solo_dev);
- alloc_ctxs[0] = solo_dev->alloc_ctx;
*num_planes = 1;
if (*num_buffers < MIN_VID_BUFFERS)
@@ -386,26 +385,24 @@ static int solo_querycap(struct file *file, void *priv,
static int solo_enum_ext_input(struct solo_dev *solo_dev,
struct v4l2_input *input)
{
- static const char * const dispnames_1[] = { "4UP" };
- static const char * const dispnames_2[] = { "4UP-1", "4UP-2" };
- static const char * const dispnames_5[] = {
- "4UP-1", "4UP-2", "4UP-3", "4UP-4", "16UP"
- };
- const char * const *dispnames;
+ int ext = input->index - solo_dev->nr_chans;
+ unsigned int nup, first;
- if (input->index >= (solo_dev->nr_chans + solo_dev->nr_ext))
+ if (ext >= solo_dev->nr_ext)
return -EINVAL;
- if (solo_dev->nr_ext == 5)
- dispnames = dispnames_5;
- else if (solo_dev->nr_ext == 2)
- dispnames = dispnames_2;
- else
- dispnames = dispnames_1;
-
- snprintf(input->name, sizeof(input->name), "Multi %s",
- dispnames[input->index - solo_dev->nr_chans]);
-
+ nup = (ext == 4) ? 16 : 4;
+ first = (ext & 3) << 2; /* first channel in the n-up */
+ snprintf(input->name, sizeof(input->name),
+ "Multi %d-up (cameras %d-%d)",
+ nup, first + 1, first + nup);
+ /* Possible outputs:
+ * Multi 4-up (cameras 1-4)
+ * Multi 4-up (cameras 5-8)
+ * Multi 4-up (cameras 9-12)
+ * Multi 4-up (cameras 13-16)
+ * Multi 16-up (cameras 1-16)
+ */
return 0;
}
@@ -681,16 +678,11 @@ int solo_v4l2_init(struct solo_dev *solo_dev, unsigned nr)
solo_dev->vidq.gfp_flags = __GFP_DMA32 | __GFP_KSWAPD_RECLAIM;
solo_dev->vidq.buf_struct_size = sizeof(struct solo_vb2_buf);
solo_dev->vidq.lock = &solo_dev->lock;
+ solo_dev->vidq.dev = &solo_dev->pdev->dev;
ret = vb2_queue_init(&solo_dev->vidq);
if (ret < 0)
goto fail;
- solo_dev->alloc_ctx = vb2_dma_contig_init_ctx(&solo_dev->pdev->dev);
- if (IS_ERR(solo_dev->alloc_ctx)) {
- dev_err(&solo_dev->pdev->dev, "Can't allocate buffer context");
- return PTR_ERR(solo_dev->alloc_ctx);
- }
-
/* Cycle all the channels and clear */
for (i = 0; i < solo_dev->nr_chans; i++) {
solo_v4l2_set_ch(solo_dev, i);
@@ -718,7 +710,6 @@ int solo_v4l2_init(struct solo_dev *solo_dev, unsigned nr)
fail:
video_device_release(solo_dev->vfd);
- vb2_dma_contig_cleanup_ctx(solo_dev->alloc_ctx);
v4l2_ctrl_handler_free(&solo_dev->disp_hdl);
solo_dev->vfd = NULL;
return ret;
@@ -730,7 +721,6 @@ void solo_v4l2_exit(struct solo_dev *solo_dev)
return;
video_unregister_device(solo_dev->vfd);
- vb2_dma_contig_cleanup_ctx(solo_dev->alloc_ctx);
v4l2_ctrl_handler_free(&solo_dev->disp_hdl);
solo_dev->vfd = NULL;
}
diff --git a/drivers/media/pci/solo6x10/solo6x10.h b/drivers/media/pci/solo6x10/solo6x10.h
index 4ab6586c0..5bd498735 100644
--- a/drivers/media/pci/solo6x10/solo6x10.h
+++ b/drivers/media/pci/solo6x10/solo6x10.h
@@ -178,7 +178,6 @@ struct solo_enc_dev {
u32 sequence;
struct vb2_queue vidq;
struct list_head vidq_active;
- void *alloc_ctx;
int desc_count;
int desc_nelts;
struct solo_p2m_desc *desc_items;
@@ -269,7 +268,6 @@ struct solo_dev {
/* Buffer handling */
struct vb2_queue vidq;
- struct vb2_alloc_ctx *alloc_ctx;
u32 sequence;
struct task_struct *kthread;
struct mutex lock;
diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.c b/drivers/media/pci/sta2x11/sta2x11_vip.c
index 1fc195f89..aeb2b4e2d 100644
--- a/drivers/media/pci/sta2x11/sta2x11_vip.c
+++ b/drivers/media/pci/sta2x11/sta2x11_vip.c
@@ -111,7 +111,6 @@ static inline struct vip_buffer *to_vip_buffer(struct vb2_v4l2_buffer *vb2)
* @input: input line for video signal ( 0 or 1 )
* @disabled: Device is in power down state
* @slock: for excluse acces of registers
- * @alloc_ctx: context for videobuf2
* @vb_vidq: queue maintained by videobuf2 layer
* @buffer_list: list of buffer in use
* @sequence: sequence number of acquired buffer
@@ -141,7 +140,6 @@ struct sta2x11_vip {
int disabled;
spinlock_t slock;
- struct vb2_alloc_ctx *alloc_ctx;
struct vb2_queue vb_vidq;
struct list_head buffer_list;
unsigned int sequence;
@@ -267,7 +265,7 @@ static void vip_active_buf_next(struct sta2x11_vip *vip)
/* Videobuf2 Operations */
static int queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct sta2x11_vip *vip = vb2_get_drv_priv(vq);
@@ -276,7 +274,6 @@ static int queue_setup(struct vb2_queue *vq,
*nplanes = 1;
sizes[0] = vip->format.sizeimage;
- alloc_ctxs[0] = vip->alloc_ctx;
vip->sequence = 0;
vip->active = NULL;
@@ -861,25 +858,15 @@ static int sta2x11_vip_init_buffer(struct sta2x11_vip *vip)
vip->vb_vidq.ops = &vip_video_qops;
vip->vb_vidq.mem_ops = &vb2_dma_contig_memops;
vip->vb_vidq.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ vip->vb_vidq.dev = &vip->pdev->dev;
err = vb2_queue_init(&vip->vb_vidq);
if (err)
return err;
INIT_LIST_HEAD(&vip->buffer_list);
spin_lock_init(&vip->lock);
-
-
- vip->alloc_ctx = vb2_dma_contig_init_ctx(&vip->pdev->dev);
- if (IS_ERR(vip->alloc_ctx)) {
- v4l2_err(&vip->v4l2_dev, "Can't allocate buffer context");
- return PTR_ERR(vip->alloc_ctx);
- }
-
return 0;
}
-static void sta2x11_vip_release_buffer(struct sta2x11_vip *vip)
-{
- vb2_dma_contig_cleanup_ctx(vip->alloc_ctx);
-}
+
static int sta2x11_vip_init_controls(struct sta2x11_vip *vip)
{
/*
@@ -1120,7 +1107,6 @@ vrelease:
video_unregister_device(&vip->video_dev);
free_irq(pdev->irq, vip);
release_buf:
- sta2x11_vip_release_buffer(vip);
pci_disable_msi(pdev);
unmap:
vb2_queue_release(&vip->vb_vidq);
diff --git a/drivers/media/pci/tw68/tw68-core.c b/drivers/media/pci/tw68/tw68-core.c
index 4e77618fb..8474528be 100644
--- a/drivers/media/pci/tw68/tw68-core.c
+++ b/drivers/media/pci/tw68/tw68-core.c
@@ -305,19 +305,13 @@ static int tw68_initdev(struct pci_dev *pci_dev,
/* Then do any initialisation wanted before interrupts are on */
tw68_hw_init1(dev);
- dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev);
- if (IS_ERR(dev->alloc_ctx)) {
- err = PTR_ERR(dev->alloc_ctx);
- goto fail3;
- }
-
/* get irq */
err = devm_request_irq(&pci_dev->dev, pci_dev->irq, tw68_irq,
IRQF_SHARED, dev->name, dev);
if (err < 0) {
pr_err("%s: can't get IRQ %d\n",
dev->name, pci_dev->irq);
- goto fail4;
+ goto fail3;
}
/*
@@ -331,7 +325,7 @@ static int tw68_initdev(struct pci_dev *pci_dev,
if (err < 0) {
pr_err("%s: can't register video device\n",
dev->name);
- goto fail5;
+ goto fail4;
}
tw_setl(TW68_INTMASK, dev->pci_irqmask);
@@ -340,10 +334,8 @@ static int tw68_initdev(struct pci_dev *pci_dev,
return 0;
-fail5:
- video_unregister_device(&dev->vdev);
fail4:
- vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
+ video_unregister_device(&dev->vdev);
fail3:
iounmap(dev->lmmio);
fail2:
@@ -367,7 +359,6 @@ static void tw68_finidev(struct pci_dev *pci_dev)
/* unregister */
video_unregister_device(&dev->vdev);
v4l2_ctrl_handler_free(&dev->hdl);
- vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
/* release resources */
iounmap(dev->lmmio);
diff --git a/drivers/media/pci/tw68/tw68-video.c b/drivers/media/pci/tw68/tw68-video.c
index 07116a87a..5e8212845 100644
--- a/drivers/media/pci/tw68/tw68-video.c
+++ b/drivers/media/pci/tw68/tw68-video.c
@@ -378,7 +378,7 @@ static int tw68_buffer_count(unsigned int size, unsigned int count)
static int tw68_queue_setup(struct vb2_queue *q,
unsigned int *num_buffers, unsigned int *num_planes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct tw68_dev *dev = vb2_get_drv_priv(q);
unsigned tot_bufs = q->num_buffers + *num_buffers;
@@ -388,7 +388,6 @@ static int tw68_queue_setup(struct vb2_queue *q,
tot_bufs = 2;
tot_bufs = tw68_buffer_count(size, tot_bufs);
*num_buffers = tot_bufs - q->num_buffers;
- alloc_ctxs[0] = dev->alloc_ctx;
/*
* We allow create_bufs, but only if the sizeimage is >= as the
* current sizeimage. The tw68_buffer_count calculation becomes quite
@@ -983,6 +982,7 @@ int tw68_video_init2(struct tw68_dev *dev, int video_nr)
dev->vidq.buf_struct_size = sizeof(struct tw68_buf);
dev->vidq.lock = &dev->lock;
dev->vidq.min_buffers_needed = 2;
+ dev->vidq.dev = &dev->pci->dev;
ret = vb2_queue_init(&dev->vidq);
if (ret)
return ret;
diff --git a/drivers/media/pci/tw68/tw68.h b/drivers/media/pci/tw68/tw68.h
index 6c7dcb300..5585c7ee2 100644
--- a/drivers/media/pci/tw68/tw68.h
+++ b/drivers/media/pci/tw68/tw68.h
@@ -165,7 +165,6 @@ struct tw68_dev {
unsigned field;
struct vb2_queue vidq;
struct list_head active;
- void *alloc_ctx;
/* various v4l controls */
const struct tw68_tvnorm *tvnorm; /* video */
diff --git a/drivers/media/pci/tw686x/Kconfig b/drivers/media/pci/tw686x/Kconfig
index fb8536974..34ff37712 100644
--- a/drivers/media/pci/tw686x/Kconfig
+++ b/drivers/media/pci/tw686x/Kconfig
@@ -3,6 +3,8 @@ config VIDEO_TW686X
depends on PCI && VIDEO_DEV && VIDEO_V4L2 && SND
depends on HAS_DMA
select VIDEOBUF2_VMALLOC
+ select VIDEOBUF2_DMA_CONTIG
+ select VIDEOBUF2_DMA_SG
select SND_PCM
help
Support for Intersil/Techwell TW686x-based frame grabber cards.
diff --git a/drivers/media/pci/tw686x/tw686x-audio.c b/drivers/media/pci/tw686x/tw686x-audio.c
index 91459ab71..96e444c49 100644
--- a/drivers/media/pci/tw686x/tw686x-audio.c
+++ b/drivers/media/pci/tw686x/tw686x-audio.c
@@ -62,12 +62,22 @@ void tw686x_audio_irq(struct tw686x_dev *dev, unsigned long requests,
}
spin_unlock_irqrestore(&ac->lock, flags);
+ if (!done || !next)
+ continue;
+ /*
+ * Checking for a non-nil dma_desc[pb]->virt buffer is
+ * the same as checking for memcpy DMA mode.
+ */
desc = &ac->dma_descs[pb];
- if (done && next && desc->virt) {
- memcpy(done->virt, desc->virt, desc->size);
- ac->ptr = done->dma - ac->buf[0].dma;
- snd_pcm_period_elapsed(ac->ss);
+ if (desc->virt) {
+ memcpy(done->virt, desc->virt,
+ dev->period_size);
+ } else {
+ u32 reg = pb ? ADMA_B_ADDR[ch] : ADMA_P_ADDR[ch];
+ reg_write(dev, reg, next->dma);
}
+ ac->ptr = done->dma - ac->buf[0].dma;
+ snd_pcm_period_elapsed(ac->ss);
}
}
@@ -83,10 +93,9 @@ static int tw686x_pcm_hw_free(struct snd_pcm_substream *ss)
}
/*
- * The audio device rate is global and shared among all
- * capture channels. The driver makes no effort to prevent
- * rate modifications. User is free change the rate, but it
- * means changing the rate for all capture sub-devices.
+ * Audio parameters are global and shared among all
+ * capture channels. The driver prevents changes to
+ * the parameters if any audio channel is capturing.
*/
static const struct snd_pcm_hardware tw686x_capture_hw = {
.info = (SNDRV_PCM_INFO_MMAP |
@@ -99,9 +108,9 @@ static const struct snd_pcm_hardware tw686x_capture_hw = {
.rate_max = 48000,
.channels_min = 1,
.channels_max = 1,
- .buffer_bytes_max = TW686X_AUDIO_PAGE_MAX * TW686X_AUDIO_PAGE_SZ,
- .period_bytes_min = TW686X_AUDIO_PAGE_SZ,
- .period_bytes_max = TW686X_AUDIO_PAGE_SZ,
+ .buffer_bytes_max = TW686X_AUDIO_PAGE_MAX * AUDIO_DMA_SIZE_MAX,
+ .period_bytes_min = AUDIO_DMA_SIZE_MIN,
+ .period_bytes_max = AUDIO_DMA_SIZE_MAX,
.periods_min = TW686X_AUDIO_PERIODS_MIN,
.periods_max = TW686X_AUDIO_PERIODS_MAX,
};
@@ -143,6 +152,14 @@ static int tw686x_pcm_prepare(struct snd_pcm_substream *ss)
int i;
spin_lock_irqsave(&dev->lock, flags);
+ /*
+ * Given the audio parameters are global (i.e. shared across
+ * DMA channels), we need to check new params are allowed.
+ */
+ if (((dev->audio_rate != rt->rate) ||
+ (dev->period_size != period_size)) && dev->audio_enabled)
+ goto err_audio_busy;
+
tw686x_disable_channel(dev, AUDIO_CHANNEL_OFFSET + ac->ch);
spin_unlock_irqrestore(&dev->lock, flags);
@@ -156,12 +173,21 @@ static int tw686x_pcm_prepare(struct snd_pcm_substream *ss)
reg_write(dev, AUDIO_CONTROL2, reg);
}
- if (period_size != TW686X_AUDIO_PAGE_SZ ||
- rt->periods < TW686X_AUDIO_PERIODS_MIN ||
- rt->periods > TW686X_AUDIO_PERIODS_MAX) {
- return -EINVAL;
+ if (dev->period_size != period_size) {
+ u32 reg;
+
+ dev->period_size = period_size;
+ reg = reg_read(dev, AUDIO_CONTROL1);
+ reg &= ~(AUDIO_DMA_SIZE_MASK << AUDIO_DMA_SIZE_SHIFT);
+ reg |= period_size << AUDIO_DMA_SIZE_SHIFT;
+
+ reg_write(dev, AUDIO_CONTROL1, reg);
}
+ if (rt->periods < TW686X_AUDIO_PERIODS_MIN ||
+ rt->periods > TW686X_AUDIO_PERIODS_MAX)
+ return -EINVAL;
+
spin_lock_irqsave(&ac->lock, flags);
INIT_LIST_HEAD(&ac->buf_list);
@@ -181,9 +207,19 @@ static int tw686x_pcm_prepare(struct snd_pcm_substream *ss)
ac->curr_bufs[0] = p_buf;
ac->curr_bufs[1] = b_buf;
ac->ptr = 0;
+
+ if (dev->dma_mode != TW686X_DMA_MODE_MEMCPY) {
+ reg_write(dev, ADMA_P_ADDR[ac->ch], p_buf->dma);
+ reg_write(dev, ADMA_B_ADDR[ac->ch], b_buf->dma);
+ }
+
spin_unlock_irqrestore(&ac->lock, flags);
return 0;
+
+err_audio_busy:
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return -EBUSY;
}
static int tw686x_pcm_trigger(struct snd_pcm_substream *ss, int cmd)
@@ -197,6 +233,7 @@ static int tw686x_pcm_trigger(struct snd_pcm_substream *ss, int cmd)
case SNDRV_PCM_TRIGGER_START:
if (ac->curr_bufs[0] && ac->curr_bufs[1]) {
spin_lock_irqsave(&dev->lock, flags);
+ dev->audio_enabled = 1;
tw686x_enable_channel(dev,
AUDIO_CHANNEL_OFFSET + ac->ch);
spin_unlock_irqrestore(&dev->lock, flags);
@@ -209,6 +246,7 @@ static int tw686x_pcm_trigger(struct snd_pcm_substream *ss, int cmd)
break;
case SNDRV_PCM_TRIGGER_STOP:
spin_lock_irqsave(&dev->lock, flags);
+ dev->audio_enabled = 0;
tw686x_disable_channel(dev, AUDIO_CHANNEL_OFFSET + ac->ch);
spin_unlock_irqrestore(&dev->lock, flags);
@@ -266,8 +304,8 @@ static int tw686x_snd_pcm_init(struct tw686x_dev *dev)
return snd_pcm_lib_preallocate_pages_for_all(pcm,
SNDRV_DMA_TYPE_DEV,
snd_dma_pci_data(dev->pci_dev),
- TW686X_AUDIO_PAGE_MAX * TW686X_AUDIO_PAGE_SZ,
- TW686X_AUDIO_PAGE_MAX * TW686X_AUDIO_PAGE_SZ);
+ TW686X_AUDIO_PAGE_MAX * AUDIO_DMA_SIZE_MAX,
+ TW686X_AUDIO_PAGE_MAX * AUDIO_DMA_SIZE_MAX);
}
static void tw686x_audio_dma_free(struct tw686x_dev *dev,
@@ -290,11 +328,19 @@ static int tw686x_audio_dma_alloc(struct tw686x_dev *dev,
{
int pb;
+ /*
+ * In the memcpy DMA mode we allocate a consistent buffer
+ * and use it for the DMA capture. Otherwise, DMA
+ * acts on the ALSA buffers as received in pcm_prepare.
+ */
+ if (dev->dma_mode != TW686X_DMA_MODE_MEMCPY)
+ return 0;
+
for (pb = 0; pb < 2; pb++) {
u32 reg = pb ? ADMA_B_ADDR[ac->ch] : ADMA_P_ADDR[ac->ch];
void *virt;
- virt = pci_alloc_consistent(dev->pci_dev, TW686X_AUDIO_PAGE_SZ,
+ virt = pci_alloc_consistent(dev->pci_dev, AUDIO_DMA_SIZE_MAX,
&ac->dma_descs[pb].phys);
if (!virt) {
dev_err(&dev->pci_dev->dev,
@@ -303,7 +349,7 @@ static int tw686x_audio_dma_alloc(struct tw686x_dev *dev,
return -ENOMEM;
}
ac->dma_descs[pb].virt = virt;
- ac->dma_descs[pb].size = TW686X_AUDIO_PAGE_SZ;
+ ac->dma_descs[pb].size = AUDIO_DMA_SIZE_MAX;
reg_write(dev, reg, ac->dma_descs[pb].phys);
}
return 0;
@@ -334,12 +380,8 @@ int tw686x_audio_init(struct tw686x_dev *dev)
struct snd_card *card;
int err, ch;
- /*
- * AUDIO_CONTROL1
- * DMA byte length [31:19] = 4096 (i.e. ALSA period)
- * External audio enable [0] = enabled
- */
- reg_write(dev, AUDIO_CONTROL1, 0x80000001);
+ /* Enable external audio */
+ reg_write(dev, AUDIO_CONTROL1, BIT(0));
err = snd_card_new(&pci_dev->dev, SNDRV_DEFAULT_IDX1,
SNDRV_DEFAULT_STR1,
diff --git a/drivers/media/pci/tw686x/tw686x-core.c b/drivers/media/pci/tw686x/tw686x-core.c
index cf53b0e97..71a0453b1 100644
--- a/drivers/media/pci/tw686x/tw686x-core.c
+++ b/drivers/media/pci/tw686x/tw686x-core.c
@@ -21,12 +21,14 @@
* under stress testings it has been found that the machine can
* freeze completely if DMA registers are programmed while streaming
* is active.
- * This driver tries to access hardware registers as infrequently
- * as possible by:
- * i. allocating fixed DMA buffers and memcpy'ing into
- * vmalloc'ed buffers
- * ii. using a timer to mitigate the rate of DMA reset operations,
- * on DMA channels error.
+ *
+ * Therefore, driver implements a dma_mode called 'memcpy' which
+ * avoids cycling the DMA buffers, and insteads allocates extra DMA buffers
+ * and then copies into vmalloc'ed user buffers.
+ *
+ * In addition to this, when streaming is on, the driver tries to access
+ * hardware registers as infrequently as possible. This is done by using
+ * a timer to limit the rate at which DMA is reset on DMA channels error.
*/
#include <linux/init.h>
@@ -55,6 +57,42 @@ static u32 dma_interval = 0x00098968;
module_param(dma_interval, int, 0444);
MODULE_PARM_DESC(dma_interval, "Minimum time span for DMA interrupting host");
+static unsigned int dma_mode = TW686X_DMA_MODE_MEMCPY;
+static const char *dma_mode_name(unsigned int mode)
+{
+ switch (mode) {
+ case TW686X_DMA_MODE_MEMCPY:
+ return "memcpy";
+ case TW686X_DMA_MODE_CONTIG:
+ return "contig";
+ case TW686X_DMA_MODE_SG:
+ return "sg";
+ default:
+ return "unknown";
+ }
+}
+
+static int tw686x_dma_mode_get(char *buffer, struct kernel_param *kp)
+{
+ return sprintf(buffer, dma_mode_name(dma_mode));
+}
+
+static int tw686x_dma_mode_set(const char *val, struct kernel_param *kp)
+{
+ if (!strcasecmp(val, dma_mode_name(TW686X_DMA_MODE_MEMCPY)))
+ dma_mode = TW686X_DMA_MODE_MEMCPY;
+ else if (!strcasecmp(val, dma_mode_name(TW686X_DMA_MODE_CONTIG)))
+ dma_mode = TW686X_DMA_MODE_CONTIG;
+ else if (!strcasecmp(val, dma_mode_name(TW686X_DMA_MODE_SG)))
+ dma_mode = TW686X_DMA_MODE_SG;
+ else
+ return -EINVAL;
+ return 0;
+}
+module_param_call(dma_mode, tw686x_dma_mode_set, tw686x_dma_mode_get,
+ &dma_mode, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(dma_mode, "DMA operation mode (memcpy/contig/sg, default=memcpy)");
+
void tw686x_disable_channel(struct tw686x_dev *dev, unsigned int channel)
{
u32 dma_en = reg_read(dev, DMA_CHANNEL_ENABLE);
@@ -212,6 +250,7 @@ static int tw686x_probe(struct pci_dev *pci_dev,
if (!dev)
return -ENOMEM;
dev->type = pci_id->driver_data;
+ dev->dma_mode = dma_mode;
sprintf(dev->name, "tw%04X", pci_dev->device);
dev->video_channels = kcalloc(max_channels(dev),
@@ -228,9 +267,10 @@ static int tw686x_probe(struct pci_dev *pci_dev,
goto free_video;
}
- pr_info("%s: PCI %s, IRQ %d, MMIO 0x%lx\n", dev->name,
+ pr_info("%s: PCI %s, IRQ %d, MMIO 0x%lx (%s mode)\n", dev->name,
pci_name(pci_dev), pci_dev->irq,
- (unsigned long)pci_resource_start(pci_dev, 0));
+ (unsigned long)pci_resource_start(pci_dev, 0),
+ dma_mode_name(dma_mode));
dev->pci_dev = pci_dev;
if (pci_enable_device(pci_dev)) {
diff --git a/drivers/media/pci/tw686x/tw686x-regs.h b/drivers/media/pci/tw686x/tw686x-regs.h
index fcef586a4..15a956642 100644
--- a/drivers/media/pci/tw686x/tw686x-regs.h
+++ b/drivers/media/pci/tw686x/tw686x-regs.h
@@ -105,6 +105,10 @@
0x2d0, 0x2d1, 0x2d2, 0x2d3 })
#define SYS_MODE_DMA_SHIFT 13
+#define AUDIO_DMA_SIZE_SHIFT 19
+#define AUDIO_DMA_SIZE_MIN SZ_512
+#define AUDIO_DMA_SIZE_MAX SZ_4K
+#define AUDIO_DMA_SIZE_MASK (SZ_8K - 1)
#define DMA_CMD_ENABLE BIT(31)
#define INT_STATUS_DMA_TOUT BIT(17)
@@ -119,4 +123,9 @@
#define TW686X_STD_PAL_CN 5
#define TW686X_STD_PAL_60 6
+#define TW686X_FIELD_MODE 0x3
+#define TW686X_FRAME_MODE 0x2
+/* 0x1 is reserved */
+#define TW686X_SG_MODE 0x0
+
#define TW686X_FIFO_ERROR(x) (x & ~(0xff))
diff --git a/drivers/media/pci/tw686x/tw686x-video.c b/drivers/media/pci/tw686x/tw686x-video.c
index 253e10823..cdb16de77 100644
--- a/drivers/media/pci/tw686x/tw686x-video.c
+++ b/drivers/media/pci/tw686x/tw686x-video.c
@@ -19,6 +19,8 @@
#include <linux/slab.h>
#include <media/v4l2-common.h>
#include <media/v4l2-event.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/videobuf2-dma-sg.h>
#include <media/videobuf2-vmalloc.h>
#include "tw686x.h"
#include "tw686x-regs.h"
@@ -26,6 +28,11 @@
#define TW686X_INPUTS_PER_CH 4
#define TW686X_VIDEO_WIDTH 720
#define TW686X_VIDEO_HEIGHT(id) ((id & V4L2_STD_525_60) ? 480 : 576)
+#define TW686X_MAX_FPS(id) ((id & V4L2_STD_525_60) ? 30 : 25)
+
+#define TW686X_MAX_SG_ENTRY_SIZE 4096
+#define TW686X_MAX_SG_DESC_COUNT 256 /* PAL 720x576 needs 203 4-KB pages */
+#define TW686X_SG_TABLE_SIZE (TW686X_MAX_SG_DESC_COUNT * sizeof(struct tw686x_sg_desc))
static const struct tw686x_format formats[] = {
{
@@ -43,53 +50,367 @@ static const struct tw686x_format formats[] = {
}
};
-static unsigned int tw686x_fields_map(v4l2_std_id std, unsigned int fps)
+static void tw686x_buf_done(struct tw686x_video_channel *vc,
+ unsigned int pb)
{
- static const unsigned int map[15] = {
- 0x00000000, 0x00000001, 0x00004001, 0x00104001, 0x00404041,
- 0x01041041, 0x01104411, 0x01111111, 0x04444445, 0x04511445,
- 0x05145145, 0x05151515, 0x05515455, 0x05551555, 0x05555555
- };
+ struct tw686x_dma_desc *desc = &vc->dma_descs[pb];
+ struct tw686x_dev *dev = vc->dev;
+ struct vb2_v4l2_buffer *vb;
+ struct vb2_buffer *vb2_buf;
- static const unsigned int std_625_50[26] = {
- 0, 1, 1, 2, 3, 3, 4, 4, 5, 5, 6, 7, 7,
- 8, 8, 9, 10, 10, 11, 11, 12, 13, 13, 14, 14, 0
- };
+ if (vc->curr_bufs[pb]) {
+ vb = &vc->curr_bufs[pb]->vb;
- static const unsigned int std_525_60[31] = {
- 0, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
- 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 0, 0
- };
+ vb->field = dev->dma_ops->field;
+ vb->sequence = vc->sequence++;
+ vb2_buf = &vb->vb2_buf;
- unsigned int i;
+ if (dev->dma_mode == TW686X_DMA_MODE_MEMCPY)
+ memcpy(vb2_plane_vaddr(vb2_buf, 0), desc->virt,
+ desc->size);
+ vb2_buf->timestamp = ktime_get_ns();
+ vb2_buffer_done(vb2_buf, VB2_BUF_STATE_DONE);
+ }
+
+ vc->pb = !pb;
+}
+
+/*
+ * We can call this even when alloc_dma failed for the given channel
+ */
+static void tw686x_memcpy_dma_free(struct tw686x_video_channel *vc,
+ unsigned int pb)
+{
+ struct tw686x_dma_desc *desc = &vc->dma_descs[pb];
+ struct tw686x_dev *dev = vc->dev;
+ struct pci_dev *pci_dev;
+ unsigned long flags;
+
+ /* Check device presence. Shouldn't really happen! */
+ spin_lock_irqsave(&dev->lock, flags);
+ pci_dev = dev->pci_dev;
+ spin_unlock_irqrestore(&dev->lock, flags);
+ if (!pci_dev) {
+ WARN(1, "trying to deallocate on missing device\n");
+ return;
+ }
+
+ if (desc->virt) {
+ pci_free_consistent(dev->pci_dev, desc->size,
+ desc->virt, desc->phys);
+ desc->virt = NULL;
+ }
+}
+
+static int tw686x_memcpy_dma_alloc(struct tw686x_video_channel *vc,
+ unsigned int pb)
+{
+ struct tw686x_dev *dev = vc->dev;
+ u32 reg = pb ? VDMA_B_ADDR[vc->ch] : VDMA_P_ADDR[vc->ch];
+ unsigned int len;
+ void *virt;
+
+ WARN(vc->dma_descs[pb].virt,
+ "Allocating buffer but previous still here\n");
+
+ len = (vc->width * vc->height * vc->format->depth) >> 3;
+ virt = pci_alloc_consistent(dev->pci_dev, len,
+ &vc->dma_descs[pb].phys);
+ if (!virt) {
+ v4l2_err(&dev->v4l2_dev,
+ "dma%d: unable to allocate %s-buffer\n",
+ vc->ch, pb ? "B" : "P");
+ return -ENOMEM;
+ }
+ vc->dma_descs[pb].size = len;
+ vc->dma_descs[pb].virt = virt;
+ reg_write(dev, reg, vc->dma_descs[pb].phys);
+
+ return 0;
+}
+
+static void tw686x_memcpy_buf_refill(struct tw686x_video_channel *vc,
+ unsigned int pb)
+{
+ struct tw686x_v4l2_buf *buf;
+
+ while (!list_empty(&vc->vidq_queued)) {
+
+ buf = list_first_entry(&vc->vidq_queued,
+ struct tw686x_v4l2_buf, list);
+ list_del(&buf->list);
+
+ vc->curr_bufs[pb] = buf;
+ return;
+ }
+ vc->curr_bufs[pb] = NULL;
+}
- if (std & V4L2_STD_525_60) {
- if (fps >= ARRAY_SIZE(std_525_60))
- fps = 30;
- i = std_525_60[fps];
+static const struct tw686x_dma_ops memcpy_dma_ops = {
+ .alloc = tw686x_memcpy_dma_alloc,
+ .free = tw686x_memcpy_dma_free,
+ .buf_refill = tw686x_memcpy_buf_refill,
+ .mem_ops = &vb2_vmalloc_memops,
+ .hw_dma_mode = TW686X_FRAME_MODE,
+ .field = V4L2_FIELD_INTERLACED,
+};
+
+static void tw686x_contig_buf_refill(struct tw686x_video_channel *vc,
+ unsigned int pb)
+{
+ struct tw686x_v4l2_buf *buf;
+
+ while (!list_empty(&vc->vidq_queued)) {
+ u32 reg = pb ? VDMA_B_ADDR[vc->ch] : VDMA_P_ADDR[vc->ch];
+ dma_addr_t phys;
+
+ buf = list_first_entry(&vc->vidq_queued,
+ struct tw686x_v4l2_buf, list);
+ list_del(&buf->list);
+
+ phys = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
+ reg_write(vc->dev, reg, phys);
+
+ buf->vb.vb2_buf.state = VB2_BUF_STATE_ACTIVE;
+ vc->curr_bufs[pb] = buf;
+ return;
+ }
+ vc->curr_bufs[pb] = NULL;
+}
+
+static const struct tw686x_dma_ops contig_dma_ops = {
+ .buf_refill = tw686x_contig_buf_refill,
+ .mem_ops = &vb2_dma_contig_memops,
+ .hw_dma_mode = TW686X_FRAME_MODE,
+ .field = V4L2_FIELD_INTERLACED,
+};
+
+static int tw686x_sg_desc_fill(struct tw686x_sg_desc *descs,
+ struct tw686x_v4l2_buf *buf,
+ unsigned int buf_len)
+{
+ struct sg_table *vbuf = vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0);
+ unsigned int len, entry_len;
+ struct scatterlist *sg;
+ int i, count;
+
+ /* Clear the scatter-gather table */
+ memset(descs, 0, TW686X_SG_TABLE_SIZE);
+
+ count = 0;
+ for_each_sg(vbuf->sgl, sg, vbuf->nents, i) {
+ dma_addr_t phys = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+
+ while (len && buf_len) {
+
+ if (count == TW686X_MAX_SG_DESC_COUNT)
+ return -ENOMEM;
+
+ entry_len = min_t(unsigned int, len,
+ TW686X_MAX_SG_ENTRY_SIZE);
+ entry_len = min_t(unsigned int, entry_len, buf_len);
+ descs[count].phys = cpu_to_le32(phys);
+ descs[count++].flags_length =
+ cpu_to_le32(BIT(30) | entry_len);
+ phys += entry_len;
+ len -= entry_len;
+ buf_len -= entry_len;
+ }
+
+ if (!buf_len)
+ return 0;
+ }
+
+ return -ENOMEM;
+}
+
+static void tw686x_sg_buf_refill(struct tw686x_video_channel *vc,
+ unsigned int pb)
+{
+ struct tw686x_dev *dev = vc->dev;
+ struct tw686x_v4l2_buf *buf;
+
+ while (!list_empty(&vc->vidq_queued)) {
+ unsigned int buf_len;
+
+ buf = list_first_entry(&vc->vidq_queued,
+ struct tw686x_v4l2_buf, list);
+ list_del(&buf->list);
+
+ buf_len = (vc->width * vc->height * vc->format->depth) >> 3;
+ if (tw686x_sg_desc_fill(vc->sg_descs[pb], buf, buf_len)) {
+ v4l2_err(&dev->v4l2_dev,
+ "dma%d: unable to fill %s-buffer\n",
+ vc->ch, pb ? "B" : "P");
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ continue;
+ }
+
+ buf->vb.vb2_buf.state = VB2_BUF_STATE_ACTIVE;
+ vc->curr_bufs[pb] = buf;
+ return;
+ }
+
+ vc->curr_bufs[pb] = NULL;
+}
+
+static void tw686x_sg_dma_free(struct tw686x_video_channel *vc,
+ unsigned int pb)
+{
+ struct tw686x_dma_desc *desc = &vc->dma_descs[pb];
+ struct tw686x_dev *dev = vc->dev;
+
+ if (desc->size) {
+ pci_free_consistent(dev->pci_dev, desc->size,
+ desc->virt, desc->phys);
+ desc->virt = NULL;
+ }
+
+ vc->sg_descs[pb] = NULL;
+}
+
+static int tw686x_sg_dma_alloc(struct tw686x_video_channel *vc,
+ unsigned int pb)
+{
+ struct tw686x_dma_desc *desc = &vc->dma_descs[pb];
+ struct tw686x_dev *dev = vc->dev;
+ u32 reg = pb ? DMA_PAGE_TABLE1_ADDR[vc->ch] :
+ DMA_PAGE_TABLE0_ADDR[vc->ch];
+ void *virt;
+
+ if (desc->size) {
+
+ virt = pci_alloc_consistent(dev->pci_dev, desc->size,
+ &desc->phys);
+ if (!virt) {
+ v4l2_err(&dev->v4l2_dev,
+ "dma%d: unable to allocate %s-buffer\n",
+ vc->ch, pb ? "B" : "P");
+ return -ENOMEM;
+ }
+ desc->virt = virt;
+ reg_write(dev, reg, desc->phys);
} else {
- if (fps >= ARRAY_SIZE(std_625_50))
- fps = 25;
- i = std_625_50[fps];
+ virt = dev->video_channels[0].dma_descs[pb].virt +
+ vc->ch * TW686X_SG_TABLE_SIZE;
}
- return map[i];
+ vc->sg_descs[pb] = virt;
+ return 0;
+}
+
+static int tw686x_sg_setup(struct tw686x_dev *dev)
+{
+ unsigned int sg_table_size, pb, ch, channels;
+
+ if (is_second_gen(dev)) {
+ /*
+ * TW6865/TW6869: each channel needs a pair of
+ * P-B descriptor tables.
+ */
+ channels = max_channels(dev);
+ sg_table_size = TW686X_SG_TABLE_SIZE;
+ } else {
+ /*
+ * TW6864/TW6868: we need to allocate a pair of
+ * P-B descriptor tables, common for all channels.
+ * Each table will be bigger than 4 KB.
+ */
+ channels = 1;
+ sg_table_size = max_channels(dev) * TW686X_SG_TABLE_SIZE;
+ }
+
+ for (ch = 0; ch < channels; ch++) {
+ struct tw686x_video_channel *vc = &dev->video_channels[ch];
+
+ for (pb = 0; pb < 2; pb++)
+ vc->dma_descs[pb].size = sg_table_size;
+ }
+
+ return 0;
+}
+
+static const struct tw686x_dma_ops sg_dma_ops = {
+ .setup = tw686x_sg_setup,
+ .alloc = tw686x_sg_dma_alloc,
+ .free = tw686x_sg_dma_free,
+ .buf_refill = tw686x_sg_buf_refill,
+ .mem_ops = &vb2_dma_sg_memops,
+ .hw_dma_mode = TW686X_SG_MODE,
+ .field = V4L2_FIELD_SEQ_TB,
+};
+
+static const unsigned int fps_map[15] = {
+ /*
+ * bit 31 enables selecting the field control register
+ * bits 0-29 are a bitmask with fields that will be output.
+ * For NTSC (and PAL-M, PAL-60), all 30 bits are used.
+ * For other PAL standards, only the first 25 bits are used.
+ */
+ 0x00000000, /* output all fields */
+ 0x80000006, /* 2 fps (60Hz), 2 fps (50Hz) */
+ 0x80018006, /* 4 fps (60Hz), 4 fps (50Hz) */
+ 0x80618006, /* 6 fps (60Hz), 6 fps (50Hz) */
+ 0x81818186, /* 8 fps (60Hz), 8 fps (50Hz) */
+ 0x86186186, /* 10 fps (60Hz), 8 fps (50Hz) */
+ 0x86619866, /* 12 fps (60Hz), 10 fps (50Hz) */
+ 0x86666666, /* 14 fps (60Hz), 12 fps (50Hz) */
+ 0x9999999e, /* 16 fps (60Hz), 14 fps (50Hz) */
+ 0x99e6799e, /* 18 fps (60Hz), 16 fps (50Hz) */
+ 0x9e79e79e, /* 20 fps (60Hz), 16 fps (50Hz) */
+ 0x9e7e7e7e, /* 22 fps (60Hz), 18 fps (50Hz) */
+ 0x9fe7f9fe, /* 24 fps (60Hz), 20 fps (50Hz) */
+ 0x9ffe7ffe, /* 26 fps (60Hz), 22 fps (50Hz) */
+ 0x9ffffffe, /* 28 fps (60Hz), 24 fps (50Hz) */
+};
+
+static unsigned int tw686x_real_fps(unsigned int index, unsigned int max_fps)
+{
+ unsigned long mask;
+
+ if (!index || index >= ARRAY_SIZE(fps_map))
+ return max_fps;
+
+ mask = GENMASK(max_fps - 1, 0);
+ return hweight_long(fps_map[index] & mask);
+}
+
+static unsigned int tw686x_fps_idx(unsigned int fps, unsigned int max_fps)
+{
+ unsigned int idx, real_fps;
+ int delta;
+
+ /* First guess */
+ idx = (12 + 15 * fps) / max_fps;
+
+ /* Minimal possible framerate is 2 frames per second */
+ if (!idx)
+ return 1;
+
+ /* Check if the difference is bigger than abs(1) and adjust */
+ real_fps = tw686x_real_fps(idx, max_fps);
+ delta = real_fps - fps;
+ if (delta < -1)
+ idx++;
+ else if (delta > 1)
+ idx--;
+
+ /* Max framerate */
+ if (idx >= 15)
+ return 0;
+
+ return idx;
}
static void tw686x_set_framerate(struct tw686x_video_channel *vc,
unsigned int fps)
{
- unsigned int map;
-
- if (vc->fps == fps)
- return;
+ unsigned int i;
- map = tw686x_fields_map(vc->video_standard, fps) << 1;
- map |= map << 1;
- if (map > 0)
- map |= BIT(31);
- reg_write(vc->dev, VIDEO_FIELD_CTRL[vc->ch], map);
- vc->fps = fps;
+ i = tw686x_fps_idx(fps, TW686X_MAX_FPS(vc->video_standard));
+ reg_write(vc->dev, VIDEO_FIELD_CTRL[vc->ch], fps_map[i]);
+ vc->fps = tw686x_real_fps(i, TW686X_MAX_FPS(vc->video_standard));
}
static const struct tw686x_format *format_by_fourcc(unsigned int fourcc)
@@ -104,7 +425,7 @@ static const struct tw686x_format *format_by_fourcc(unsigned int fourcc)
static int tw686x_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct tw686x_video_channel *vc = vb2_get_drv_priv(vq);
unsigned int szimage =
@@ -152,75 +473,6 @@ static void tw686x_buf_queue(struct vb2_buffer *vb)
spin_unlock_irqrestore(&vc->qlock, flags);
}
-/*
- * We can call this even when alloc_dma failed for the given channel
- */
-static void tw686x_free_dma(struct tw686x_video_channel *vc, unsigned int pb)
-{
- struct tw686x_dma_desc *desc = &vc->dma_descs[pb];
- struct tw686x_dev *dev = vc->dev;
- struct pci_dev *pci_dev;
- unsigned long flags;
-
- /* Check device presence. Shouldn't really happen! */
- spin_lock_irqsave(&dev->lock, flags);
- pci_dev = dev->pci_dev;
- spin_unlock_irqrestore(&dev->lock, flags);
- if (!pci_dev) {
- WARN(1, "trying to deallocate on missing device\n");
- return;
- }
-
- if (desc->virt) {
- pci_free_consistent(dev->pci_dev, desc->size,
- desc->virt, desc->phys);
- desc->virt = NULL;
- }
-}
-
-static int tw686x_alloc_dma(struct tw686x_video_channel *vc, unsigned int pb)
-{
- struct tw686x_dev *dev = vc->dev;
- u32 reg = pb ? VDMA_B_ADDR[vc->ch] : VDMA_P_ADDR[vc->ch];
- unsigned int len;
- void *virt;
-
- WARN(vc->dma_descs[pb].virt,
- "Allocating buffer but previous still here\n");
-
- len = (vc->width * vc->height * vc->format->depth) >> 3;
- virt = pci_alloc_consistent(dev->pci_dev, len,
- &vc->dma_descs[pb].phys);
- if (!virt) {
- v4l2_err(&dev->v4l2_dev,
- "dma%d: unable to allocate %s-buffer\n",
- vc->ch, pb ? "B" : "P");
- return -ENOMEM;
- }
- vc->dma_descs[pb].size = len;
- vc->dma_descs[pb].virt = virt;
- reg_write(dev, reg, vc->dma_descs[pb].phys);
-
- return 0;
-}
-
-static void tw686x_buffer_refill(struct tw686x_video_channel *vc,
- unsigned int pb)
-{
- struct tw686x_v4l2_buf *buf;
-
- while (!list_empty(&vc->vidq_queued)) {
-
- buf = list_first_entry(&vc->vidq_queued,
- struct tw686x_v4l2_buf, list);
- list_del(&buf->list);
-
- vc->curr_bufs[pb] = buf;
- return;
- }
- vc->curr_bufs[pb] = NULL;
-}
-
static void tw686x_clear_queue(struct tw686x_video_channel *vc,
enum vb2_buffer_state state)
{
@@ -262,7 +514,8 @@ static int tw686x_start_streaming(struct vb2_queue *vq, unsigned int count)
spin_lock_irqsave(&vc->qlock, flags);
/* Sanity check */
- if (!vc->dma_descs[0].virt || !vc->dma_descs[1].virt) {
+ if (dev->dma_mode == TW686X_DMA_MODE_MEMCPY &&
+ (!vc->dma_descs[0].virt || !vc->dma_descs[1].virt)) {
spin_unlock_irqrestore(&vc->qlock, flags);
v4l2_err(&dev->v4l2_dev,
"video%d: refusing to start without DMA buffers\n",
@@ -272,7 +525,7 @@ static int tw686x_start_streaming(struct vb2_queue *vq, unsigned int count)
}
for (pb = 0; pb < 2; pb++)
- tw686x_buffer_refill(vc, pb);
+ dev->dma_ops->buf_refill(vc, pb);
spin_unlock_irqrestore(&vc->qlock, flags);
vc->sequence = 0;
@@ -375,10 +628,11 @@ static int tw686x_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct tw686x_video_channel *vc = video_drvdata(file);
+ struct tw686x_dev *dev = vc->dev;
f->fmt.pix.width = vc->width;
f->fmt.pix.height = vc->height;
- f->fmt.pix.field = V4L2_FIELD_INTERLACED;
+ f->fmt.pix.field = dev->dma_ops->field;
f->fmt.pix.pixelformat = vc->format->fourcc;
f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
f->fmt.pix.bytesperline = (f->fmt.pix.width * vc->format->depth) / 8;
@@ -390,6 +644,7 @@ static int tw686x_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct tw686x_video_channel *vc = video_drvdata(file);
+ struct tw686x_dev *dev = vc->dev;
unsigned int video_height = TW686X_VIDEO_HEIGHT(vc->video_standard);
const struct tw686x_format *format;
@@ -412,7 +667,7 @@ static int tw686x_try_fmt_vid_cap(struct file *file, void *priv,
f->fmt.pix.bytesperline = (f->fmt.pix.width * format->depth) / 8;
f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
- f->fmt.pix.field = V4L2_FIELD_INTERLACED;
+ f->fmt.pix.field = dev->dma_ops->field;
return 0;
}
@@ -421,6 +676,7 @@ static int tw686x_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct tw686x_video_channel *vc = video_drvdata(file);
+ struct tw686x_dev *dev = vc->dev;
u32 val, width, line_width, height;
unsigned long bitsperframe;
int err, pb;
@@ -438,15 +694,16 @@ static int tw686x_s_fmt_vid_cap(struct file *file, void *priv,
vc->height = f->fmt.pix.height;
/* We need new DMA buffers if the framesize has changed */
- if (bitsperframe != vc->width * vc->height * vc->format->depth) {
+ if (dev->dma_ops->alloc &&
+ bitsperframe != vc->width * vc->height * vc->format->depth) {
for (pb = 0; pb < 2; pb++)
- tw686x_free_dma(vc, pb);
+ dev->dma_ops->free(vc, pb);
for (pb = 0; pb < 2; pb++) {
- err = tw686x_alloc_dma(vc, pb);
+ err = dev->dma_ops->alloc(vc, pb);
if (err) {
if (pb > 0)
- tw686x_free_dma(vc, 0);
+ dev->dma_ops->free(vc, 0);
return err;
}
}
@@ -464,6 +721,19 @@ static int tw686x_s_fmt_vid_cap(struct file *file, void *priv,
else
val &= ~BIT(24);
+ val &= ~0x7ffff;
+
+ /* Program the DMA scatter-gather */
+ if (dev->dma_mode == TW686X_DMA_MODE_SG) {
+ u32 start_idx, end_idx;
+
+ start_idx = is_second_gen(dev) ?
+ 0 : vc->ch * TW686X_MAX_SG_DESC_COUNT;
+ end_idx = start_idx + TW686X_MAX_SG_DESC_COUNT - 1;
+
+ val |= (end_idx << 10) | start_idx;
+ }
+
val &= ~(0x7 << 20);
val |= vc->format->mode << 20;
reg_write(vc->dev, VDMA_CHANNEL_CONFIG[vc->ch], val);
@@ -540,6 +810,12 @@ static int tw686x_s_std(struct file *file, void *priv, v4l2_std_id id)
ret = tw686x_g_fmt_vid_cap(file, priv, &f);
if (!ret)
tw686x_s_fmt_vid_cap(file, priv, &f);
+
+ /*
+ * Frame decimation depends on the chosen standard,
+ * so reset it to the current value.
+ */
+ tw686x_set_framerate(vc, vc->fps);
return 0;
}
@@ -609,6 +885,40 @@ static int tw686x_g_std(struct file *file, void *priv, v4l2_std_id *id)
return 0;
}
+static int tw686x_g_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *sp)
+{
+ struct tw686x_video_channel *vc = video_drvdata(file);
+ struct v4l2_captureparm *cp = &sp->parm.capture;
+
+ if (sp->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ sp->parm.capture.readbuffers = 3;
+
+ cp->capability = V4L2_CAP_TIMEPERFRAME;
+ cp->timeperframe.numerator = 1;
+ cp->timeperframe.denominator = vc->fps;
+ return 0;
+}
+
+static int tw686x_s_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *sp)
+{
+ struct tw686x_video_channel *vc = video_drvdata(file);
+ struct v4l2_captureparm *cp = &sp->parm.capture;
+ unsigned int denominator = cp->timeperframe.denominator;
+ unsigned int numerator = cp->timeperframe.numerator;
+ unsigned int fps;
+
+ if (vb2_is_busy(&vc->vidq))
+ return -EBUSY;
+
+ fps = (!numerator || !denominator) ? 0 : denominator / numerator;
+ if (vc->fps != fps)
+ tw686x_set_framerate(vc, fps);
+ return tw686x_g_parm(file, priv, sp);
+}
+
static int tw686x_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
@@ -695,6 +1005,9 @@ static const struct v4l2_ioctl_ops tw686x_video_ioctl_ops = {
.vidioc_g_std = tw686x_g_std,
.vidioc_s_std = tw686x_s_std,
+ .vidioc_g_parm = tw686x_g_parm,
+ .vidioc_s_parm = tw686x_s_parm,
+
.vidioc_enum_input = tw686x_enum_input,
.vidioc_g_input = tw686x_g_input,
.vidioc_s_input = tw686x_s_input,
@@ -713,26 +1026,11 @@ static const struct v4l2_ioctl_ops tw686x_video_ioctl_ops = {
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
-static void tw686x_buffer_copy(struct tw686x_video_channel *vc,
- unsigned int pb, struct vb2_v4l2_buffer *vb)
-{
- struct tw686x_dma_desc *desc = &vc->dma_descs[pb];
- struct vb2_buffer *vb2_buf = &vb->vb2_buf;
-
- vb->field = V4L2_FIELD_INTERLACED;
- vb->sequence = vc->sequence++;
-
- memcpy(vb2_plane_vaddr(vb2_buf, 0), desc->virt, desc->size);
- vb2_buf->timestamp = ktime_get_ns();
- vb2_buffer_done(vb2_buf, VB2_BUF_STATE_DONE);
-}
-
void tw686x_video_irq(struct tw686x_dev *dev, unsigned long requests,
unsigned int pb_status, unsigned int fifo_status,
unsigned int *reset_ch)
{
struct tw686x_video_channel *vc;
- struct vb2_v4l2_buffer *vb;
unsigned long flags;
unsigned int ch, pb;
@@ -781,14 +1079,9 @@ void tw686x_video_irq(struct tw686x_dev *dev, unsigned long requests,
continue;
}
- /* handle video stream */
spin_lock_irqsave(&vc->qlock, flags);
- if (vc->curr_bufs[pb]) {
- vb = &vc->curr_bufs[pb]->vb;
- tw686x_buffer_copy(vc, pb, vb);
- }
- vc->pb = !pb;
- tw686x_buffer_refill(vc, pb);
+ tw686x_buf_done(vc, pb);
+ dev->dma_ops->buf_refill(vc, pb);
spin_unlock_irqrestore(&vc->qlock, flags);
}
}
@@ -803,8 +1096,9 @@ void tw686x_video_free(struct tw686x_dev *dev)
if (vc->device)
video_unregister_device(vc->device);
- for (pb = 0; pb < 2; pb++)
- tw686x_free_dma(vc, pb);
+ if (dev->dma_ops->free)
+ for (pb = 0; pb < 2; pb++)
+ dev->dma_ops->free(vc, pb);
}
}
@@ -813,10 +1107,25 @@ int tw686x_video_init(struct tw686x_dev *dev)
unsigned int ch, val, pb;
int err;
+ if (dev->dma_mode == TW686X_DMA_MODE_MEMCPY)
+ dev->dma_ops = &memcpy_dma_ops;
+ else if (dev->dma_mode == TW686X_DMA_MODE_CONTIG)
+ dev->dma_ops = &contig_dma_ops;
+ else if (dev->dma_mode == TW686X_DMA_MODE_SG)
+ dev->dma_ops = &sg_dma_ops;
+ else
+ return -EINVAL;
+
err = v4l2_device_register(&dev->pci_dev->dev, &dev->v4l2_dev);
if (err)
return err;
+ if (dev->dma_ops->setup) {
+ err = dev->dma_ops->setup(dev);
+ if (err)
+ return err;
+ }
+
for (ch = 0; ch < max_channels(dev); ch++) {
struct tw686x_video_channel *vc = &dev->video_channels[ch];
struct video_device *vdev;
@@ -842,10 +1151,12 @@ int tw686x_video_init(struct tw686x_dev *dev)
reg_write(dev, HACTIVE_LO[ch], 0xd0);
reg_write(dev, VIDEO_SIZE[ch], 0);
- for (pb = 0; pb < 2; pb++) {
- err = tw686x_alloc_dma(vc, pb);
- if (err)
- goto error;
+ if (dev->dma_ops->alloc) {
+ for (pb = 0; pb < 2; pb++) {
+ err = dev->dma_ops->alloc(vc, pb);
+ if (err)
+ goto error;
+ }
}
vc->vidq.io_modes = VB2_READ | VB2_MMAP | VB2_DMABUF;
@@ -853,11 +1164,12 @@ int tw686x_video_init(struct tw686x_dev *dev)
vc->vidq.drv_priv = vc;
vc->vidq.buf_struct_size = sizeof(struct tw686x_v4l2_buf);
vc->vidq.ops = &tw686x_video_qops;
- vc->vidq.mem_ops = &vb2_vmalloc_memops;
+ vc->vidq.mem_ops = dev->dma_ops->mem_ops;
vc->vidq.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
vc->vidq.min_buffers_needed = 2;
vc->vidq.lock = &vc->vb_mutex;
vc->vidq.gfp_flags = GFP_DMA32;
+ vc->vidq.dev = &dev->pci_dev->dev;
err = vb2_queue_init(&vc->vidq);
if (err) {
@@ -915,10 +1227,9 @@ int tw686x_video_init(struct tw686x_dev *dev)
vc->num = vdev->num;
}
- /* Set DMA frame mode on all channels. Only supported mode for now. */
val = TW686X_DEF_PHASE_REF;
for (ch = 0; ch < max_channels(dev); ch++)
- val |= TW686X_FRAME_MODE << (16 + ch * 2);
+ val |= dev->dma_ops->hw_dma_mode << (16 + ch * 2);
reg_write(dev, PHASE_REF, val);
reg_write(dev, MISC2[0], 0xe7);
diff --git a/drivers/media/pci/tw686x/tw686x.h b/drivers/media/pci/tw686x/tw686x.h
index 44b5755ac..f24a2a9bc 100644
--- a/drivers/media/pci/tw686x/tw686x.h
+++ b/drivers/media/pci/tw686x/tw686x.h
@@ -27,16 +27,14 @@
#define TYPE_SECOND_GEN 0x10
#define TW686X_DEF_PHASE_REF 0x1518
-#define TW686X_FIELD_MODE 0x3
-#define TW686X_FRAME_MODE 0x2
-/* 0x1 is reserved */
-#define TW686X_SG_MODE 0x0
-
-#define TW686X_AUDIO_PAGE_SZ 4096
#define TW686X_AUDIO_PAGE_MAX 16
#define TW686X_AUDIO_PERIODS_MIN 2
#define TW686X_AUDIO_PERIODS_MAX TW686X_AUDIO_PAGE_MAX
+#define TW686X_DMA_MODE_MEMCPY 0
+#define TW686X_DMA_MODE_CONTIG 1
+#define TW686X_DMA_MODE_SG 2
+
struct tw686x_format {
char *name;
unsigned int fourcc;
@@ -50,6 +48,12 @@ struct tw686x_dma_desc {
unsigned int size;
};
+struct tw686x_sg_desc {
+ /* 3 MSBits for flags, 13 LSBits for length */
+ __le32 flags_length;
+ __le32 phys;
+};
+
struct tw686x_audio_buf {
dma_addr_t dma;
void *virt;
@@ -82,6 +86,7 @@ struct tw686x_video_channel {
struct video_device *device;
struct tw686x_v4l2_buf *curr_bufs[2];
struct tw686x_dma_desc dma_descs[2];
+ struct tw686x_sg_desc *sg_descs[2];
struct v4l2_ctrl_handler ctrl_handler;
const struct tw686x_format *format;
@@ -99,6 +104,16 @@ struct tw686x_video_channel {
bool no_signal;
};
+struct tw686x_dma_ops {
+ int (*setup)(struct tw686x_dev *dev);
+ int (*alloc)(struct tw686x_video_channel *vc, unsigned int pb);
+ void (*free)(struct tw686x_video_channel *vc, unsigned int pb);
+ void (*buf_refill)(struct tw686x_video_channel *vc, unsigned int pb);
+ const struct vb2_mem_ops *mem_ops;
+ enum v4l2_field field;
+ u32 hw_dma_mode;
+};
+
/**
* struct tw686x_dev - global device status
* @lock: spinlock controlling access to the
@@ -112,15 +127,18 @@ struct tw686x_dev {
char name[32];
unsigned int type;
+ unsigned int dma_mode;
struct pci_dev *pci_dev;
__u32 __iomem *mmio;
- void *alloc_ctx;
-
+ const struct tw686x_dma_ops *dma_ops;
struct tw686x_video_channel *video_channels;
struct tw686x_audio_channel *audio_channels;
- int audio_rate; /* per-device value */
+ /* Per-device audio parameters */
+ int audio_rate;
+ int period_size;
+ int audio_enabled;
struct timer_list dma_delay_timer;
u32 pending_dma_en; /* must be protected by lock */
@@ -143,6 +161,12 @@ static inline unsigned int max_channels(struct tw686x_dev *dev)
return dev->type & TYPE_MAX_CHANNELS; /* 4 or 8 channels */
}
+static inline unsigned is_second_gen(struct tw686x_dev *dev)
+{
+ /* each channel has its own DMA SG table */
+ return dev->type & TYPE_SECOND_GEN;
+}
+
void tw686x_enable_channel(struct tw686x_dev *dev, unsigned int channel);
void tw686x_disable_channel(struct tw686x_dev *dev, unsigned int channel);
diff --git a/drivers/media/pci/zoran/zr36016.c b/drivers/media/pci/zoran/zr36016.c
index b87ddba86..c12ca9f96 100644
--- a/drivers/media/pci/zoran/zr36016.c
+++ b/drivers/media/pci/zoran/zr36016.c
@@ -246,10 +246,6 @@ static int zr36016_pushit (struct zr36016 *ptr,
//TODO//
========================================================================= */
-// needed offset values PAL NTSC SECAM
-static const int zr016_xoff[] = { 20, 20, 20 };
-static const int zr016_yoff[] = { 8, 9, 7 };
-
static void
zr36016_init (struct zr36016 *ptr)
{
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 84e041c0a..552b635cf 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -110,6 +110,7 @@ source "drivers/media/platform/exynos4-is/Kconfig"
source "drivers/media/platform/s5p-tv/Kconfig"
source "drivers/media/platform/am437x/Kconfig"
source "drivers/media/platform/xilinx/Kconfig"
+source "drivers/media/platform/rcar-vin/Kconfig"
config VIDEO_TI_CAL
tristate "TI CAL (Camera Adaptation Layer) driver"
@@ -152,6 +153,36 @@ config VIDEO_CODA
Coda is a range of video codec IPs that supports
H.264, MPEG-4, and other video formats.
+config VIDEO_MEDIATEK_VPU
+ tristate "Mediatek Video Processor Unit"
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ ---help---
+ This driver provides downloading VPU firmware and
+ communicating with VPU. This driver for hw video
+ codec embedded in Mediatek's MT8173 SOCs. It is able
+ to handle video decoding/encoding in a range of formats.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mtk-vpu.
+
+config VIDEO_MEDIATEK_VCODEC
+ tristate "Mediatek Video Codec driver"
+ depends on MTK_IOMMU || COMPILE_TEST
+ depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ select VIDEO_MEDIATEK_VPU
+ default n
+ ---help---
+ Mediatek video codec driver provides HW capability to
+ encode and decode in a range of video formats
+ This driver rely on VPU driver to communicate with VPU.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mtk-vcodec
+
config VIDEO_MEM2MEM_DEINTERLACE
tristate "Deinterlace support"
depends on VIDEO_DEV && VIDEO_V4L2 && DMA_ENGINE
@@ -247,10 +278,24 @@ config VIDEO_RENESAS_JPU
To compile this driver as a module, choose M here: the module
will be called rcar_jpu.
+config VIDEO_RENESAS_FCP
+ tristate "Renesas Frame Compression Processor"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ depends on OF
+ ---help---
+ This is a driver for the Renesas Frame Compression Processor (FCP).
+ The FCP is a companion module of video processing modules in the
+ Renesas R-Car Gen3 SoCs. It handles memory access for the codec,
+ VSP and FDP modules.
+
+ To compile this driver as a module, choose M here: the module
+ will be called rcar-fcp.
+
config VIDEO_RENESAS_VSP1
tristate "Renesas VSP1 Video Processing Engine"
depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && HAS_DMA
depends on (ARCH_RENESAS && OF) || COMPILE_TEST
+ depends on (!ARM64 && !VIDEO_RENESAS_FCP) || VIDEO_RENESAS_FCP
select VIDEOBUF2_DMA_CONTIG
---help---
This is a V4L2 driver for the Renesas VSP1 video processing engine.
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index bbb7bd1eb..21771c1a1 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -46,6 +46,7 @@ obj-$(CONFIG_VIDEO_SH_VOU) += sh_vou.o
obj-$(CONFIG_SOC_CAMERA) += soc_camera/
+obj-$(CONFIG_VIDEO_RENESAS_FCP) += rcar-fcp.o
obj-$(CONFIG_VIDEO_RENESAS_JPU) += rcar_jpu.o
obj-$(CONFIG_VIDEO_RENESAS_VSP1) += vsp1/
@@ -55,4 +56,10 @@ obj-$(CONFIG_VIDEO_AM437X_VPFE) += am437x/
obj-$(CONFIG_VIDEO_XILINX) += xilinx/
+obj-$(CONFIG_VIDEO_RCAR_VIN) += rcar-vin/
+
ccflags-y += -I$(srctree)/drivers/media/i2c
+
+obj-$(CONFIG_VIDEO_MEDIATEK_VPU) += mtk-vpu/
+
+obj-$(CONFIG_VIDEO_MEDIATEK_VCODEC) += mtk-vcodec/
diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
index e749eb7c3..b33b9e35e 100644
--- a/drivers/media/platform/am437x/am437x-vpfe.c
+++ b/drivers/media/platform/am437x/am437x-vpfe.c
@@ -1901,21 +1901,20 @@ static void vpfe_calculate_offsets(struct vpfe_device *vpfe)
* @nbuffers: ptr to number of buffers requested by application
* @nplanes:: contains number of distinct video planes needed to hold a frame
* @sizes[]: contains the size (in bytes) of each plane.
- * @alloc_ctxs: ptr to allocation context
+ * @alloc_devs: ptr to allocation context
*
* This callback function is called when reqbuf() is called to adjust
* the buffer count and buffer size
*/
static int vpfe_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct vpfe_device *vpfe = vb2_get_drv_priv(vq);
unsigned size = vpfe->fmt.fmt.pix.sizeimage;
if (vq->num_buffers + *nbuffers < 3)
*nbuffers = 3 - vq->num_buffers;
- alloc_ctxs[0] = vpfe->alloc_ctx;
if (*nplanes) {
if (sizes[0] < size)
@@ -2364,13 +2363,6 @@ static int vpfe_probe_complete(struct vpfe_device *vpfe)
goto probe_out;
/* Initialize videobuf2 queue as per the buffer type */
- vpfe->alloc_ctx = vb2_dma_contig_init_ctx(vpfe->pdev);
- if (IS_ERR(vpfe->alloc_ctx)) {
- vpfe_err(vpfe, "Failed to get the context\n");
- err = PTR_ERR(vpfe->alloc_ctx);
- goto probe_out;
- }
-
q = &vpfe->buffer_queue;
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
@@ -2381,11 +2373,11 @@ static int vpfe_probe_complete(struct vpfe_device *vpfe)
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &vpfe->lock;
q->min_buffers_needed = 1;
+ q->dev = vpfe->pdev;
err = vb2_queue_init(q);
if (err) {
vpfe_err(vpfe, "vb2_queue_init() failed\n");
- vb2_dma_contig_cleanup_ctx(vpfe->alloc_ctx);
goto probe_out;
}
diff --git a/drivers/media/platform/am437x/am437x-vpfe.h b/drivers/media/platform/am437x/am437x-vpfe.h
index 777bf97fe..17d7aa426 100644
--- a/drivers/media/platform/am437x/am437x-vpfe.h
+++ b/drivers/media/platform/am437x/am437x-vpfe.h
@@ -264,8 +264,6 @@ struct vpfe_device {
struct v4l2_rect crop;
/* Buffer queue used in video-buf */
struct vb2_queue buffer_queue;
- /* Allocator-specific contexts for each plane */
- struct vb2_alloc_ctx *alloc_ctx;
/* Queue of filled frames */
struct list_head dma_queue;
/* IRQ lock for DMA queue */
diff --git a/drivers/media/platform/blackfin/bfin_capture.c b/drivers/media/platform/blackfin/bfin_capture.c
index d0092dae7..8eb03397d 100644
--- a/drivers/media/platform/blackfin/bfin_capture.c
+++ b/drivers/media/platform/blackfin/bfin_capture.c
@@ -91,8 +91,6 @@ struct bcap_device {
struct bcap_buffer *cur_frm;
/* buffer queue used in videobuf2 */
struct vb2_queue buffer_queue;
- /* allocator-specific contexts for each plane */
- struct vb2_alloc_ctx *alloc_ctx;
/* queue of filled frames */
struct list_head dma_queue;
/* used in videobuf2 callback */
@@ -203,13 +201,12 @@ static void bcap_free_sensor_formats(struct bcap_device *bcap_dev)
static int bcap_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct bcap_device *bcap_dev = vb2_get_drv_priv(vq);
if (vq->num_buffers + *nbuffers < 2)
*nbuffers = 2;
- alloc_ctxs[0] = bcap_dev->alloc_ctx;
if (*nplanes)
return sizes[0] < bcap_dev->fmt.sizeimage ? -EINVAL : 0;
@@ -820,12 +817,6 @@ static int bcap_probe(struct platform_device *pdev)
}
bcap_dev->ppi->priv = bcap_dev;
- bcap_dev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
- if (IS_ERR(bcap_dev->alloc_ctx)) {
- ret = PTR_ERR(bcap_dev->alloc_ctx);
- goto err_free_ppi;
- }
-
vfd = &bcap_dev->video_dev;
/* initialize field of video device */
vfd->release = video_device_release_empty;
@@ -839,7 +830,7 @@ static int bcap_probe(struct platform_device *pdev)
if (ret) {
v4l2_err(pdev->dev.driver,
"Unable to register v4l2 device\n");
- goto err_cleanup_ctx;
+ goto err_free_ppi;
}
v4l2_info(&bcap_dev->v4l2_dev, "v4l2 device registered\n");
@@ -863,6 +854,7 @@ static int bcap_probe(struct platform_device *pdev)
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &bcap_dev->mutex;
q->min_buffers_needed = 1;
+ q->dev = &pdev->dev;
ret = vb2_queue_init(q);
if (ret)
@@ -967,8 +959,6 @@ err_free_handler:
v4l2_ctrl_handler_free(&bcap_dev->ctrl_handler);
err_unreg_v4l2:
v4l2_device_unregister(&bcap_dev->v4l2_dev);
-err_cleanup_ctx:
- vb2_dma_contig_cleanup_ctx(bcap_dev->alloc_ctx);
err_free_ppi:
ppi_delete_instance(bcap_dev->ppi);
err_free_dev:
@@ -986,7 +976,6 @@ static int bcap_remove(struct platform_device *pdev)
video_unregister_device(&bcap_dev->video_dev);
v4l2_ctrl_handler_free(&bcap_dev->ctrl_handler);
v4l2_device_unregister(v4l2_dev);
- vb2_dma_contig_cleanup_ctx(bcap_dev->alloc_ctx);
ppi_delete_instance(bcap_dev->ppi);
kfree(bcap_dev);
return 0;
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index 6709cb46c..232e5225b 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -1139,7 +1139,7 @@ static void set_default_params(struct coda_ctx *ctx)
*/
static int coda_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct coda_ctx *ctx = vb2_get_drv_priv(vq);
struct coda_q_data *q_data;
@@ -1151,9 +1151,6 @@ static int coda_queue_setup(struct vb2_queue *vq,
*nplanes = 1;
sizes[0] = size;
- /* Set to vb2-dma-contig allocator context, ignored by vb2-vmalloc */
- alloc_ctxs[0] = ctx->dev->alloc_ctx;
-
v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
"get %d buffer(s) of size %d each.\n", *nbuffers, size);
@@ -1599,6 +1596,7 @@ static int coda_queue_init(struct coda_ctx *ctx, struct vb2_queue *vq)
* that videobuf2 will keep the value of bytesused intact.
*/
vq->allow_zero_bytesused = 1;
+ vq->dev = &ctx->dev->plat_dev->dev;
return vb2_queue_init(vq);
}
@@ -2040,16 +2038,10 @@ static void coda_fw_callback(const struct firmware *fw, void *context)
if (ret < 0)
goto put_pm;
- dev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
- if (IS_ERR(dev->alloc_ctx)) {
- v4l2_err(&dev->v4l2_dev, "Failed to alloc vb2 context\n");
- goto put_pm;
- }
-
dev->m2m_dev = v4l2_m2m_init(&coda_m2m_ops);
if (IS_ERR(dev->m2m_dev)) {
v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem device\n");
- goto rel_ctx;
+ goto put_pm;
}
for (i = 0; i < dev->devtype->num_vdevs; i++) {
@@ -2072,8 +2064,6 @@ rel_vfd:
while (--i >= 0)
video_unregister_device(&dev->vfd[i]);
v4l2_m2m_release(dev->m2m_dev);
-rel_ctx:
- vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
put_pm:
pm_runtime_put_sync(&pdev->dev);
}
@@ -2226,7 +2216,7 @@ static int coda_probe(struct platform_device *pdev)
dev->rstc = devm_reset_control_get_optional(&pdev->dev, NULL);
if (IS_ERR(dev->rstc)) {
ret = PTR_ERR(dev->rstc);
- if (ret == -ENOENT || ret == -ENOSYS) {
+ if (ret == -ENOENT || ret == -ENOTSUPP) {
dev->rstc = NULL;
} else {
dev_err(&pdev->dev, "failed get reset control: %d\n",
@@ -2324,8 +2314,6 @@ static int coda_remove(struct platform_device *pdev)
if (dev->m2m_dev)
v4l2_m2m_release(dev->m2m_dev);
pm_runtime_disable(&pdev->dev);
- if (dev->alloc_ctx)
- vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
v4l2_device_unregister(&dev->v4l2_dev);
destroy_workqueue(dev->workqueue);
if (dev->iram.vaddr)
diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h
index 8f2c71e06..53f966616 100644
--- a/drivers/media/platform/coda/coda.h
+++ b/drivers/media/platform/coda/coda.h
@@ -92,7 +92,6 @@ struct coda_dev {
struct mutex coda_mutex;
struct workqueue_struct *workqueue;
struct v4l2_m2m_dev *m2m_dev;
- struct vb2_alloc_ctx *alloc_ctx;
struct list_head instances;
unsigned long instance_mask;
struct dentry *debugfs_root;
diff --git a/drivers/media/platform/davinci/ccdc_hw_device.h b/drivers/media/platform/davinci/ccdc_hw_device.h
index 86b9b3518..ae5605de7 100644
--- a/drivers/media/platform/davinci/ccdc_hw_device.h
+++ b/drivers/media/platform/davinci/ccdc_hw_device.h
@@ -80,13 +80,6 @@ struct ccdc_hw_ops {
/* Pointer to function to get line length */
unsigned int (*get_line_length) (void);
- /* Query CCDC control IDs */
- int (*queryctrl)(struct v4l2_queryctrl *qctrl);
- /* Set CCDC control */
- int (*set_control)(struct v4l2_control *ctrl);
- /* Get CCDC control */
- int (*get_control)(struct v4l2_control *ctrl);
-
/* Pointer to function to set frame buffer address */
void (*setfbaddr) (unsigned long addr);
/* Pointer to function to get field id */
diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c
index 0abcdfe97..0b1709e96 100644
--- a/drivers/media/platform/davinci/vpbe_display.c
+++ b/drivers/media/platform/davinci/vpbe_display.c
@@ -230,7 +230,7 @@ static int vpbe_buffer_prepare(struct vb2_buffer *vb)
static int
vpbe_buffer_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
/* Get the file handle object and layer object */
@@ -242,7 +242,6 @@ vpbe_buffer_queue_setup(struct vb2_queue *vq,
/* Store number of buffers allocated in numbuffer member */
if (vq->num_buffers + *nbuffers < VPBE_DEFAULT_NUM_BUFS)
*nbuffers = VPBE_DEFAULT_NUM_BUFS - vq->num_buffers;
- alloc_ctxs[0] = layer->alloc_ctx;
if (*nplanes)
return sizes[0] < layer->pix_fmt.sizeimage ? -EINVAL : 0;
@@ -1451,20 +1450,13 @@ static int vpbe_display_probe(struct platform_device *pdev)
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->min_buffers_needed = 1;
q->lock = &disp_dev->dev[i]->opslock;
+ q->dev = disp_dev->vpbe_dev->pdev;
err = vb2_queue_init(q);
if (err) {
v4l2_err(v4l2_dev, "vb2_queue_init() failed\n");
goto probe_out;
}
- disp_dev->dev[i]->alloc_ctx =
- vb2_dma_contig_init_ctx(disp_dev->vpbe_dev->pdev);
- if (IS_ERR(disp_dev->dev[i]->alloc_ctx)) {
- v4l2_err(v4l2_dev, "Failed to get the context\n");
- err = PTR_ERR(disp_dev->dev[i]->alloc_ctx);
- goto probe_out;
- }
-
INIT_LIST_HEAD(&disp_dev->dev[i]->dma_queue);
if (register_device(disp_dev->dev[i], disp_dev, pdev)) {
@@ -1482,7 +1474,6 @@ probe_out:
for (k = 0; k < VPBE_DISPLAY_MAX_DEVICES; k++) {
/* Unregister video device */
if (disp_dev->dev[k] != NULL) {
- vb2_dma_contig_cleanup_ctx(disp_dev->dev[k]->alloc_ctx);
video_unregister_device(&disp_dev->dev[k]->video_dev);
kfree(disp_dev->dev[k]);
}
@@ -1510,7 +1501,6 @@ static int vpbe_display_remove(struct platform_device *pdev)
for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
/* Get the pointer to the layer object */
vpbe_display_layer = disp_dev->dev[i];
- vb2_dma_contig_cleanup_ctx(vpbe_display_layer->alloc_ctx);
/* Unregister video device */
video_unregister_device(&vpbe_display_layer->video_dev);
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index 08f7028c7..5104cc0ee 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -107,14 +107,14 @@ static int vpif_buffer_prepare(struct vb2_buffer *vb)
* @nbuffers: ptr to number of buffers requested by application
* @nplanes:: contains number of distinct video planes needed to hold a frame
* @sizes[]: contains the size (in bytes) of each plane.
- * @alloc_ctxs: ptr to allocation context
+ * @alloc_devs: ptr to allocation context
*
* This callback function is called when reqbuf() is called to adjust
* the buffer count and buffer size
*/
static int vpif_buffer_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct channel_obj *ch = vb2_get_drv_priv(vq);
struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
@@ -133,7 +133,6 @@ static int vpif_buffer_queue_setup(struct vb2_queue *vq,
*nplanes = 1;
sizes[0] = size;
- alloc_ctxs[0] = common->alloc_ctx;
/* Calculate the offset for Y and C data in the buffer */
vpif_calculate_offsets(ch);
@@ -1371,6 +1370,7 @@ static int vpif_probe_complete(void)
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->min_buffers_needed = 1;
q->lock = &common->lock;
+ q->dev = vpif_dev;
err = vb2_queue_init(q);
if (err) {
@@ -1378,13 +1378,6 @@ static int vpif_probe_complete(void)
goto probe_out;
}
- common->alloc_ctx = vb2_dma_contig_init_ctx(vpif_dev);
- if (IS_ERR(common->alloc_ctx)) {
- vpif_err("Failed to get the context\n");
- err = PTR_ERR(common->alloc_ctx);
- goto probe_out;
- }
-
INIT_LIST_HEAD(&common->dma_queue);
/* Initialize the video_device structure */
@@ -1412,7 +1405,6 @@ probe_out:
/* Get the pointer to the channel object */
ch = vpif_obj.dev[k];
common = &ch->common[k];
- vb2_dma_contig_cleanup_ctx(common->alloc_ctx);
/* Unregister video device */
video_unregister_device(&ch->video_dev);
}
@@ -1546,7 +1538,6 @@ static int vpif_remove(struct platform_device *device)
/* Get the pointer to the channel object */
ch = vpif_obj.dev[i];
common = &ch->common[VPIF_VIDEO_INDEX];
- vb2_dma_contig_cleanup_ctx(common->alloc_ctx);
/* Unregister video device */
video_unregister_device(&ch->video_dev);
kfree(vpif_obj.dev[i]);
diff --git a/drivers/media/platform/davinci/vpif_capture.h b/drivers/media/platform/davinci/vpif_capture.h
index 4a7600929..9e35b6771 100644
--- a/drivers/media/platform/davinci/vpif_capture.h
+++ b/drivers/media/platform/davinci/vpif_capture.h
@@ -65,8 +65,6 @@ struct common_obj {
struct v4l2_format fmt;
/* Buffer queue used in video-buf */
struct vb2_queue buffer_queue;
- /* allocator-specific contexts for each plane */
- struct vb2_alloc_ctx *alloc_ctx;
/* Queue of filled frames */
struct list_head dma_queue;
/* Used in video-buf */
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index f40755cf1..75b27233e 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -102,14 +102,14 @@ static int vpif_buffer_prepare(struct vb2_buffer *vb)
* @nbuffers: ptr to number of buffers requested by application
* @nplanes:: contains number of distinct video planes needed to hold a frame
* @sizes[]: contains the size (in bytes) of each plane.
- * @alloc_ctxs: ptr to allocation context
+ * @alloc_devs: ptr to allocation context
*
* This callback function is called when reqbuf() is called to adjust
* the buffer count and buffer size
*/
static int vpif_buffer_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct channel_obj *ch = vb2_get_drv_priv(vq);
struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
@@ -126,7 +126,6 @@ static int vpif_buffer_queue_setup(struct vb2_queue *vq,
*nplanes = 1;
sizes[0] = size;
- alloc_ctxs[0] = common->alloc_ctx;
/* Calculate the offset for Y and C data in the buffer */
vpif_calculate_offsets(ch);
@@ -1191,19 +1190,13 @@ static int vpif_probe_complete(void)
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->min_buffers_needed = 1;
q->lock = &common->lock;
+ q->dev = vpif_dev;
err = vb2_queue_init(q);
if (err) {
vpif_err("vpif_display: vb2_queue_init() failed\n");
goto probe_out;
}
- common->alloc_ctx = vb2_dma_contig_init_ctx(vpif_dev);
- if (IS_ERR(common->alloc_ctx)) {
- vpif_err("Failed to get the context\n");
- err = PTR_ERR(common->alloc_ctx);
- goto probe_out;
- }
-
INIT_LIST_HEAD(&common->dma_queue);
/* register video device */
@@ -1233,7 +1226,6 @@ probe_out:
for (k = 0; k < j; k++) {
ch = vpif_obj.dev[k];
common = &ch->common[k];
- vb2_dma_contig_cleanup_ctx(common->alloc_ctx);
video_unregister_device(&ch->video_dev);
}
return err;
@@ -1355,7 +1347,6 @@ static int vpif_remove(struct platform_device *device)
/* Get the pointer to the channel object */
ch = vpif_obj.dev[i];
common = &ch->common[VPIF_VIDEO_INDEX];
- vb2_dma_contig_cleanup_ctx(common->alloc_ctx);
/* Unregister video device */
video_unregister_device(&ch->video_dev);
kfree(vpif_obj.dev[i]);
diff --git a/drivers/media/platform/davinci/vpif_display.h b/drivers/media/platform/davinci/vpif_display.h
index e7a1723a1..af2765fdc 100644
--- a/drivers/media/platform/davinci/vpif_display.h
+++ b/drivers/media/platform/davinci/vpif_display.h
@@ -74,8 +74,6 @@ struct common_obj {
struct v4l2_format fmt; /* Used to store the format */
struct vb2_queue buffer_queue; /* Buffer queue used in
* video-buf */
- /* allocator-specific contexts for each plane */
- struct vb2_alloc_ctx *alloc_ctx;
struct list_head dma_queue; /* Queue of filled frames */
spinlock_t irqlock; /* Used in video-buf */
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
index c04973669..787bd16c1 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.c
+++ b/drivers/media/platform/exynos-gsc/gsc-core.c
@@ -1123,19 +1123,13 @@ static int gsc_probe(struct platform_device *pdev)
if (ret < 0)
goto err_m2m;
- /* Initialize continious memory allocator */
- gsc->alloc_ctx = vb2_dma_contig_init_ctx(dev);
- if (IS_ERR(gsc->alloc_ctx)) {
- ret = PTR_ERR(gsc->alloc_ctx);
- goto err_pm;
- }
+ vb2_dma_contig_set_max_seg_size(dev, DMA_BIT_MASK(32));
dev_dbg(dev, "gsc-%d registered successfully\n", gsc->id);
pm_runtime_put(dev);
return 0;
-err_pm:
- pm_runtime_put(dev);
+
err_m2m:
gsc_unregister_m2m_device(gsc);
err_v4l2:
@@ -1152,7 +1146,7 @@ static int gsc_remove(struct platform_device *pdev)
gsc_unregister_m2m_device(gsc);
v4l2_device_unregister(&gsc->v4l2_dev);
- vb2_dma_contig_cleanup_ctx(gsc->alloc_ctx);
+ vb2_dma_contig_clear_max_seg_size(&pdev->dev);
pm_runtime_disable(&pdev->dev);
gsc_clk_put(gsc);
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.h b/drivers/media/platform/exynos-gsc/gsc-core.h
index ec4000c72..7ad7b9dc2 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.h
+++ b/drivers/media/platform/exynos-gsc/gsc-core.h
@@ -327,7 +327,6 @@ struct gsc_driverdata {
* @irq_queue: interrupt handler waitqueue
* @m2m: memory-to-memory V4L2 device information
* @state: flags used to synchronize m2m and capture mode operation
- * @alloc_ctx: videobuf2 memory allocator context
* @vdev: video device for G-Scaler instance
*/
struct gsc_dev {
@@ -341,7 +340,6 @@ struct gsc_dev {
wait_queue_head_t irq_queue;
struct gsc_m2m_device m2m;
unsigned long state;
- struct vb2_alloc_ctx *alloc_ctx;
struct video_device vdev;
struct v4l2_device v4l2_dev;
};
diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
index a600e32e2..ec6494cbd 100644
--- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
+++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
@@ -213,7 +213,7 @@ put_device:
static int gsc_m2m_queue_setup(struct vb2_queue *vq,
unsigned int *num_buffers, unsigned int *num_planes,
- unsigned int sizes[], void *allocators[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct gsc_ctx *ctx = vb2_get_drv_priv(vq);
struct gsc_frame *frame;
@@ -227,10 +227,8 @@ static int gsc_m2m_queue_setup(struct vb2_queue *vq,
return -EINVAL;
*num_planes = frame->fmt->num_planes;
- for (i = 0; i < frame->fmt->num_planes; i++) {
+ for (i = 0; i < frame->fmt->num_planes; i++)
sizes[i] = frame->payload[i];
- allocators[i] = ctx->gsc_dev->alloc_ctx;
- }
return 0;
}
@@ -591,6 +589,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
src_vq->lock = &ctx->gsc_dev->lock;
+ src_vq->dev = &ctx->gsc_dev->pdev->dev;
ret = vb2_queue_init(src_vq);
if (ret)
@@ -605,6 +604,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
dst_vq->lock = &ctx->gsc_dev->lock;
+ dst_vq->dev = &ctx->gsc_dev->pdev->dev;
return vb2_queue_init(dst_vq);
}
diff --git a/drivers/media/platform/exynos4-is/fimc-capture.c b/drivers/media/platform/exynos4-is/fimc-capture.c
index bf47d3b9c..fdec499fb 100644
--- a/drivers/media/platform/exynos4-is/fimc-capture.c
+++ b/drivers/media/platform/exynos4-is/fimc-capture.c
@@ -340,7 +340,7 @@ int fimc_capture_resume(struct fimc_dev *fimc)
static int queue_setup(struct vb2_queue *vq,
unsigned int *num_buffers, unsigned int *num_planes,
- unsigned int sizes[], void *allocators[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct fimc_ctx *ctx = vq->drv_priv;
struct fimc_frame *frame = &ctx->d_frame;
@@ -354,11 +354,9 @@ static int queue_setup(struct vb2_queue *vq,
if (*num_planes) {
if (*num_planes != fmt->memplanes)
return -EINVAL;
- for (i = 0; i < *num_planes; i++) {
+ for (i = 0; i < *num_planes; i++)
if (sizes[i] < (wh * fmt->depth[i]) / 8)
return -EINVAL;
- allocators[i] = ctx->fimc_dev->alloc_ctx;
- }
return 0;
}
@@ -371,8 +369,6 @@ static int queue_setup(struct vb2_queue *vq,
sizes[i] = frame->payload[i];
else
sizes[i] = max_t(u32, size, frame->payload[i]);
-
- allocators[i] = ctx->fimc_dev->alloc_ctx;
}
return 0;
@@ -1779,6 +1775,7 @@ static int fimc_register_capture_device(struct fimc_dev *fimc,
q->buf_struct_size = sizeof(struct fimc_vid_buffer);
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &fimc->lock;
+ q->dev = &fimc->pdev->dev;
ret = vb2_queue_init(q);
if (ret)
diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c
index b1c1cea82..8f89ca21b 100644
--- a/drivers/media/platform/exynos4-is/fimc-core.c
+++ b/drivers/media/platform/exynos4-is/fimc-core.c
@@ -1018,19 +1018,11 @@ static int fimc_probe(struct platform_device *pdev)
goto err_sd;
}
- /* Initialize contiguous memory allocator */
- fimc->alloc_ctx = vb2_dma_contig_init_ctx(dev);
- if (IS_ERR(fimc->alloc_ctx)) {
- ret = PTR_ERR(fimc->alloc_ctx);
- goto err_gclk;
- }
+ vb2_dma_contig_set_max_seg_size(dev, DMA_BIT_MASK(32));
dev_dbg(dev, "FIMC.%d registered successfully\n", fimc->id);
return 0;
-err_gclk:
- if (!pm_runtime_enabled(dev))
- clk_disable(fimc->clock[CLK_GATE]);
err_sd:
fimc_unregister_capture_subdev(fimc);
err_sclk:
@@ -1123,7 +1115,7 @@ static int fimc_remove(struct platform_device *pdev)
pm_runtime_set_suspended(&pdev->dev);
fimc_unregister_capture_subdev(fimc);
- vb2_dma_contig_cleanup_ctx(fimc->alloc_ctx);
+ vb2_dma_contig_clear_max_seg_size(&pdev->dev);
clk_disable(fimc->clock[CLK_BUS]);
fimc_clk_put(fimc);
diff --git a/drivers/media/platform/exynos4-is/fimc-core.h b/drivers/media/platform/exynos4-is/fimc-core.h
index 6b7435453..5615fefbf 100644
--- a/drivers/media/platform/exynos4-is/fimc-core.h
+++ b/drivers/media/platform/exynos4-is/fimc-core.h
@@ -307,7 +307,6 @@ struct fimc_m2m_device {
*/
struct fimc_vid_cap {
struct fimc_ctx *ctx;
- struct vb2_alloc_ctx *alloc_ctx;
struct v4l2_subdev subdev;
struct exynos_video_entity ve;
struct media_pad vd_pad;
@@ -417,7 +416,6 @@ struct fimc_ctx;
* @m2m: memory-to-memory V4L2 device information
* @vid_cap: camera capture device information
* @state: flags used to synchronize m2m and capture mode operation
- * @alloc_ctx: videobuf2 memory allocator context
* @pipeline: fimc video capture pipeline data structure
*/
struct fimc_dev {
@@ -436,7 +434,6 @@ struct fimc_dev {
struct fimc_m2m_device m2m;
struct fimc_vid_cap vid_cap;
unsigned long state;
- struct vb2_alloc_ctx *alloc_ctx;
};
/**
diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
index 5a8d4f03a..02b5cab70 100644
--- a/drivers/media/platform/exynos4-is/fimc-is.c
+++ b/drivers/media/platform/exynos4-is/fimc-is.c
@@ -204,9 +204,6 @@ static int fimc_is_register_subdevs(struct fimc_is *is)
if (ret < 0)
return ret;
- /* Initialize memory allocator context for the ISP DMA. */
- is->isp.alloc_ctx = is->alloc_ctx;
-
for_each_compatible_node(i2c_bus, NULL, FIMC_IS_I2C_COMPATIBLE) {
for_each_available_child_of_node(i2c_bus, child) {
ret = fimc_is_parse_sensor_config(is, index, child);
@@ -847,18 +844,14 @@ static int fimc_is_probe(struct platform_device *pdev)
if (ret < 0)
goto err_pm;
- is->alloc_ctx = vb2_dma_contig_init_ctx(dev);
- if (IS_ERR(is->alloc_ctx)) {
- ret = PTR_ERR(is->alloc_ctx);
- goto err_pm;
- }
+ vb2_dma_contig_set_max_seg_size(dev, DMA_BIT_MASK(32));
/*
* Register FIMC-IS V4L2 subdevs to this driver. The video nodes
* will be created within the subdev's registered() callback.
*/
ret = fimc_is_register_subdevs(is);
if (ret < 0)
- goto err_vb;
+ goto err_pm;
ret = fimc_is_debugfs_create(is);
if (ret < 0)
@@ -877,8 +870,6 @@ err_dfs:
fimc_is_debugfs_remove(is);
err_sd:
fimc_is_unregister_subdevs(is);
-err_vb:
- vb2_dma_contig_cleanup_ctx(is->alloc_ctx);
err_pm:
if (!pm_runtime_enabled(dev))
fimc_is_runtime_suspend(dev);
@@ -939,7 +930,7 @@ static int fimc_is_remove(struct platform_device *pdev)
fimc_is_runtime_suspend(dev);
free_irq(is->irq, is);
fimc_is_unregister_subdevs(is);
- vb2_dma_contig_cleanup_ctx(is->alloc_ctx);
+ vb2_dma_contig_clear_max_seg_size(dev);
fimc_is_put_clocks(is);
fimc_is_debugfs_remove(is);
release_firmware(is->fw.f_w);
diff --git a/drivers/media/platform/exynos4-is/fimc-is.h b/drivers/media/platform/exynos4-is/fimc-is.h
index 02ab290fc..0c2459e60 100644
--- a/drivers/media/platform/exynos4-is/fimc-is.h
+++ b/drivers/media/platform/exynos4-is/fimc-is.h
@@ -233,7 +233,6 @@ struct chain_config {
* @pdev: pointer to FIMC-IS platform device
* @pctrl: pointer to pinctrl structure for this device
* @v4l2_dev: pointer to top the level v4l2_device
- * @alloc_ctx: videobuf2 memory allocator context
* @lock: mutex serializing video device and the subdev operations
* @slock: spinlock protecting this data structure and the hw registers
* @clocks: FIMC-LITE gate clock
@@ -256,7 +255,6 @@ struct fimc_is {
struct fimc_is_sensor sensor[FIMC_IS_SENSORS_NUM];
struct fimc_is_setfile setfile;
- struct vb2_alloc_ctx *alloc_ctx;
struct v4l2_ctrl_handler ctrl_handler;
struct mutex lock;
diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.c b/drivers/media/platform/exynos4-is/fimc-isp-video.c
index c0816728c..400ce0cb0 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp-video.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp-video.c
@@ -40,7 +40,7 @@
static int isp_video_capture_queue_setup(struct vb2_queue *vq,
unsigned int *num_buffers, unsigned int *num_planes,
- unsigned int sizes[], void *allocators[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct fimc_isp *isp = vb2_get_drv_priv(vq);
struct v4l2_pix_format_mplane *vid_fmt = &isp->video_capture.pixfmt;
@@ -57,20 +57,16 @@ static int isp_video_capture_queue_setup(struct vb2_queue *vq,
if (*num_planes) {
if (*num_planes != fmt->memplanes)
return -EINVAL;
- for (i = 0; i < *num_planes; i++) {
+ for (i = 0; i < *num_planes; i++)
if (sizes[i] < (wh * fmt->depth[i]) / 8)
return -EINVAL;
- allocators[i] = isp->alloc_ctx;
- }
return 0;
}
*num_planes = fmt->memplanes;
- for (i = 0; i < fmt->memplanes; i++) {
+ for (i = 0; i < fmt->memplanes; i++)
sizes[i] = (wh * fmt->depth[i]) / 8;
- allocators[i] = isp->alloc_ctx;
- }
return 0;
}
@@ -597,6 +593,7 @@ int fimc_isp_video_device_register(struct fimc_isp *isp,
q->drv_priv = isp;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &isp->video_lock;
+ q->dev = &isp->pdev->dev;
ret = vb2_queue_init(q);
if (ret < 0)
diff --git a/drivers/media/platform/exynos4-is/fimc-isp.h b/drivers/media/platform/exynos4-is/fimc-isp.h
index e0686b5f1..3cdd52491 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp.h
+++ b/drivers/media/platform/exynos4-is/fimc-isp.h
@@ -148,7 +148,6 @@ struct fimc_is_video {
/**
* struct fimc_isp - FIMC-IS ISP data structure
* @pdev: pointer to FIMC-IS platform device
- * @alloc_ctx: videobuf2 memory allocator context
* @subdev: ISP v4l2_subdev
* @subdev_pads: the ISP subdev media pads
* @test_pattern: test pattern controls
@@ -161,7 +160,6 @@ struct fimc_is_video {
*/
struct fimc_isp {
struct platform_device *pdev;
- struct vb2_alloc_ctx *alloc_ctx;
struct v4l2_subdev subdev;
struct media_pad subdev_pads[FIMC_ISP_SD_PADS_NUM];
struct v4l2_mbus_framefmt src_fmt;
diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c
index dc1b929f7..a0f149fb8 100644
--- a/drivers/media/platform/exynos4-is/fimc-lite.c
+++ b/drivers/media/platform/exynos4-is/fimc-lite.c
@@ -357,7 +357,7 @@ static void stop_streaming(struct vb2_queue *q)
static int queue_setup(struct vb2_queue *vq,
unsigned int *num_buffers, unsigned int *num_planes,
- unsigned int sizes[], void *allocators[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct fimc_lite *fimc = vq->drv_priv;
struct flite_frame *frame = &fimc->out_frame;
@@ -371,20 +371,16 @@ static int queue_setup(struct vb2_queue *vq,
if (*num_planes) {
if (*num_planes != fmt->memplanes)
return -EINVAL;
- for (i = 0; i < *num_planes; i++) {
+ for (i = 0; i < *num_planes; i++)
if (sizes[i] < (wh * fmt->depth[i]) / 8)
return -EINVAL;
- allocators[i] = fimc->alloc_ctx;
- }
return 0;
}
*num_planes = fmt->memplanes;
- for (i = 0; i < fmt->memplanes; i++) {
+ for (i = 0; i < fmt->memplanes; i++)
sizes[i] = (wh * fmt->depth[i]) / 8;
- allocators[i] = fimc->alloc_ctx;
- }
return 0;
}
@@ -1300,6 +1296,7 @@ static int fimc_lite_subdev_registered(struct v4l2_subdev *sd)
q->drv_priv = fimc;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &fimc->lock;
+ q->dev = &fimc->pdev->dev;
ret = vb2_queue_init(q);
if (ret < 0)
@@ -1551,11 +1548,7 @@ static int fimc_lite_probe(struct platform_device *pdev)
goto err_sd;
}
- fimc->alloc_ctx = vb2_dma_contig_init_ctx(dev);
- if (IS_ERR(fimc->alloc_ctx)) {
- ret = PTR_ERR(fimc->alloc_ctx);
- goto err_clk_dis;
- }
+ vb2_dma_contig_set_max_seg_size(dev, DMA_BIT_MASK(32));
fimc_lite_set_default_config(fimc);
@@ -1563,9 +1556,6 @@ static int fimc_lite_probe(struct platform_device *pdev)
fimc->index);
return 0;
-err_clk_dis:
- if (!pm_runtime_enabled(dev))
- clk_disable(fimc->clock);
err_sd:
fimc_lite_unregister_capture_subdev(fimc);
err_clk_put:
@@ -1651,7 +1641,7 @@ static int fimc_lite_remove(struct platform_device *pdev)
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
fimc_lite_unregister_capture_subdev(fimc);
- vb2_dma_contig_cleanup_ctx(fimc->alloc_ctx);
+ vb2_dma_contig_clear_max_seg_size(dev);
fimc_lite_clk_put(fimc);
dev_info(dev, "Driver unloaded\n");
diff --git a/drivers/media/platform/exynos4-is/fimc-lite.h b/drivers/media/platform/exynos4-is/fimc-lite.h
index 11690d563..9ae1e96a1 100644
--- a/drivers/media/platform/exynos4-is/fimc-lite.h
+++ b/drivers/media/platform/exynos4-is/fimc-lite.h
@@ -113,7 +113,6 @@ struct flite_buffer {
* @ve: exynos video device entity structure
* @v4l2_dev: pointer to top the level v4l2_device
* @fh: v4l2 file handle
- * @alloc_ctx: videobuf2 memory allocator context
* @subdev: FIMC-LITE subdev
* @vd_pad: media (sink) pad for the capture video node
* @subdev_pads: the subdev media pads
@@ -148,7 +147,6 @@ struct fimc_lite {
struct exynos_video_entity ve;
struct v4l2_device *v4l2_dev;
struct v4l2_fh fh;
- struct vb2_alloc_ctx *alloc_ctx;
struct v4l2_subdev subdev;
struct media_pad vd_pad;
struct media_pad subdev_pads[FLITE_SD_PADS_NUM];
diff --git a/drivers/media/platform/exynos4-is/fimc-m2m.c b/drivers/media/platform/exynos4-is/fimc-m2m.c
index 55ec4c99d..b1309e114 100644
--- a/drivers/media/platform/exynos4-is/fimc-m2m.c
+++ b/drivers/media/platform/exynos4-is/fimc-m2m.c
@@ -50,30 +50,28 @@ void fimc_m2m_job_finish(struct fimc_ctx *ctx, int vb_state)
src_vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
dst_vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
- if (src_vb && dst_vb) {
+ if (src_vb)
v4l2_m2m_buf_done(src_vb, vb_state);
+ if (dst_vb)
v4l2_m2m_buf_done(dst_vb, vb_state);
+ if (src_vb && dst_vb)
v4l2_m2m_job_finish(ctx->fimc_dev->m2m.m2m_dev,
ctx->fh.m2m_ctx);
- }
}
/* Complete the transaction which has been scheduled for execution. */
-static int fimc_m2m_shutdown(struct fimc_ctx *ctx)
+static void fimc_m2m_shutdown(struct fimc_ctx *ctx)
{
struct fimc_dev *fimc = ctx->fimc_dev;
- int ret;
if (!fimc_m2m_pending(fimc))
- return 0;
+ return;
fimc_ctx_state_set(FIMC_CTX_SHUT, ctx);
- ret = wait_event_timeout(fimc->irq_queue,
- !fimc_ctx_state_is_set(FIMC_CTX_SHUT, ctx),
- FIMC_SHUTDOWN_TIMEOUT);
-
- return ret == 0 ? -ETIMEDOUT : ret;
+ wait_event_timeout(fimc->irq_queue,
+ !fimc_ctx_state_is_set(FIMC_CTX_SHUT, ctx),
+ FIMC_SHUTDOWN_TIMEOUT);
}
static int start_streaming(struct vb2_queue *q, unsigned int count)
@@ -88,12 +86,10 @@ static int start_streaming(struct vb2_queue *q, unsigned int count)
static void stop_streaming(struct vb2_queue *q)
{
struct fimc_ctx *ctx = q->drv_priv;
- int ret;
- ret = fimc_m2m_shutdown(ctx);
- if (ret == -ETIMEDOUT)
- fimc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
+ fimc_m2m_shutdown(ctx);
+ fimc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
pm_runtime_put(&ctx->fimc_dev->pdev->dev);
}
@@ -178,7 +174,7 @@ static void fimc_job_abort(void *priv)
static int fimc_queue_setup(struct vb2_queue *vq,
unsigned int *num_buffers, unsigned int *num_planes,
- unsigned int sizes[], void *allocators[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct fimc_ctx *ctx = vb2_get_drv_priv(vq);
struct fimc_frame *f;
@@ -195,10 +191,8 @@ static int fimc_queue_setup(struct vb2_queue *vq,
return -EINVAL;
*num_planes = f->fmt->memplanes;
- for (i = 0; i < f->fmt->memplanes; i++) {
+ for (i = 0; i < f->fmt->memplanes; i++)
sizes[i] = f->payload[i];
- allocators[i] = ctx->fimc_dev->alloc_ctx;
- }
return 0;
}
@@ -562,6 +556,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
src_vq->lock = &ctx->fimc_dev->lock;
+ src_vq->dev = &ctx->fimc_dev->pdev->dev;
ret = vb2_queue_init(src_vq);
if (ret)
@@ -575,6 +570,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
dst_vq->lock = &ctx->fimc_dev->lock;
+ dst_vq->dev = &ctx->fimc_dev->pdev->dev;
return vb2_queue_init(dst_vq);
}
diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
index bf954424e..86e681daa 100644
--- a/drivers/media/platform/exynos4-is/mipi-csis.c
+++ b/drivers/media/platform/exynos4-is/mipi-csis.c
@@ -649,23 +649,6 @@ static int s5pcsis_log_status(struct v4l2_subdev *sd)
return 0;
}
-static int s5pcsis_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
-{
- struct v4l2_mbus_framefmt *format = v4l2_subdev_get_try_format(sd, fh->pad, 0);
-
- format->colorspace = V4L2_COLORSPACE_JPEG;
- format->code = s5pcsis_formats[0].code;
- format->width = S5PCSIS_DEF_PIX_WIDTH;
- format->height = S5PCSIS_DEF_PIX_HEIGHT;
- format->field = V4L2_FIELD_NONE;
-
- return 0;
-}
-
-static const struct v4l2_subdev_internal_ops s5pcsis_sd_internal_ops = {
- .open = s5pcsis_open,
-};
-
static struct v4l2_subdev_core_ops s5pcsis_core_ops = {
.s_power = s5pcsis_s_power,
.log_status = s5pcsis_log_status,
diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c
index 7383818c2..0fcb5c780 100644
--- a/drivers/media/platform/m2m-deinterlace.c
+++ b/drivers/media/platform/m2m-deinterlace.c
@@ -136,7 +136,6 @@ struct deinterlace_dev {
struct dma_chan *dma_chan;
struct v4l2_m2m_dev *m2m_dev;
- struct vb2_alloc_ctx *alloc_ctx;
};
struct deinterlace_ctx {
@@ -799,7 +798,7 @@ struct vb2_dc_conf {
static int deinterlace_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct deinterlace_ctx *ctx = vb2_get_drv_priv(vq);
struct deinterlace_q_data *q_data;
@@ -820,8 +819,6 @@ static int deinterlace_queue_setup(struct vb2_queue *vq,
*nbuffers = count;
sizes[0] = size;
- alloc_ctxs[0] = ctx->dev->alloc_ctx;
-
dprintk(ctx->dev, "get %d buffer(s) of size %d each.\n", count, size);
return 0;
@@ -874,6 +871,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->ops = &deinterlace_qops;
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->dev = ctx->dev->v4l2_dev.dev;
q_data[V4L2_M2M_SRC].fmt = &formats[0];
q_data[V4L2_M2M_SRC].width = 640;
q_data[V4L2_M2M_SRC].height = 480;
@@ -891,6 +889,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->ops = &deinterlace_qops;
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->dev = ctx->dev->v4l2_dev.dev;
q_data[V4L2_M2M_DST].fmt = &formats[0];
q_data[V4L2_M2M_DST].width = 640;
q_data[V4L2_M2M_DST].height = 480;
@@ -1046,13 +1045,6 @@ static int deinterlace_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pcdev);
- pcdev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
- if (IS_ERR(pcdev->alloc_ctx)) {
- v4l2_err(&pcdev->v4l2_dev, "Failed to alloc vb2 context\n");
- ret = PTR_ERR(pcdev->alloc_ctx);
- goto err_ctx;
- }
-
pcdev->m2m_dev = v4l2_m2m_init(&m2m_ops);
if (IS_ERR(pcdev->m2m_dev)) {
v4l2_err(&pcdev->v4l2_dev, "Failed to init mem2mem device\n");
@@ -1064,8 +1056,6 @@ static int deinterlace_probe(struct platform_device *pdev)
err_m2m:
video_unregister_device(&pcdev->vfd);
-err_ctx:
- vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
unreg_dev:
v4l2_device_unregister(&pcdev->v4l2_dev);
rel_dma:
@@ -1082,7 +1072,6 @@ static int deinterlace_remove(struct platform_device *pdev)
v4l2_m2m_release(pcdev->m2m_dev);
video_unregister_device(&pcdev->vfd);
v4l2_device_unregister(&pcdev->v4l2_dev);
- vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
dma_release_channel(pcdev->dma_chan);
return 0;
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
index 9b878deb1..af59bf4dc 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.c
+++ b/drivers/media/platform/marvell-ccic/mcam-core.c
@@ -973,7 +973,7 @@ static int mcam_cam_set_flip(struct mcam_camera *cam)
memset(&ctrl, 0, sizeof(ctrl));
ctrl.id = V4L2_CID_VFLIP;
ctrl.value = flip;
- return sensor_call(cam, core, s_ctrl, &ctrl);
+ return v4l2_s_ctrl(NULL, cam->sensor->ctrl_handler, &ctrl);
}
@@ -1051,7 +1051,7 @@ static int mcam_read_setup(struct mcam_camera *cam)
static int mcam_vb_queue_setup(struct vb2_queue *vq,
unsigned int *nbufs,
unsigned int *num_planes, unsigned int sizes[],
- void *alloc_ctxs[])
+ struct device *alloc_devs[])
{
struct mcam_camera *cam = vb2_get_drv_priv(vq);
int minbufs = (cam->buffer_mode == B_DMA_contig) ? 3 : 2;
@@ -1059,10 +1059,6 @@ static int mcam_vb_queue_setup(struct vb2_queue *vq,
if (*nbufs < minbufs)
*nbufs = minbufs;
- if (cam->buffer_mode == B_DMA_contig)
- alloc_ctxs[0] = cam->vb_alloc_ctx;
- else if (cam->buffer_mode == B_DMA_sg)
- alloc_ctxs[0] = cam->vb_alloc_ctx_sg;
if (*num_planes)
return sizes[0] < size ? -EINVAL : 0;
@@ -1271,6 +1267,7 @@ static int mcam_setup_vb2(struct mcam_camera *cam)
vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
vq->buf_struct_size = sizeof(struct mcam_vb_buffer);
+ vq->dev = cam->dev;
INIT_LIST_HEAD(&cam->buffers);
switch (cam->buffer_mode) {
case B_DMA_contig:
@@ -1279,9 +1276,6 @@ static int mcam_setup_vb2(struct mcam_camera *cam)
vq->mem_ops = &vb2_dma_contig_memops;
cam->dma_setup = mcam_ctlr_dma_contig;
cam->frame_complete = mcam_dma_contig_done;
- cam->vb_alloc_ctx = vb2_dma_contig_init_ctx(cam->dev);
- if (IS_ERR(cam->vb_alloc_ctx))
- return PTR_ERR(cam->vb_alloc_ctx);
#endif
break;
case B_DMA_sg:
@@ -1290,9 +1284,6 @@ static int mcam_setup_vb2(struct mcam_camera *cam)
vq->mem_ops = &vb2_dma_sg_memops;
cam->dma_setup = mcam_ctlr_dma_sg;
cam->frame_complete = mcam_dma_sg_done;
- cam->vb_alloc_ctx_sg = vb2_dma_sg_init_ctx(cam->dev);
- if (IS_ERR(cam->vb_alloc_ctx_sg))
- return PTR_ERR(cam->vb_alloc_ctx_sg);
#endif
break;
case B_vmalloc:
@@ -1309,18 +1300,6 @@ static int mcam_setup_vb2(struct mcam_camera *cam)
return vb2_queue_init(vq);
}
-static void mcam_cleanup_vb2(struct mcam_camera *cam)
-{
-#ifdef MCAM_MODE_DMA_CONTIG
- if (cam->buffer_mode == B_DMA_contig)
- vb2_dma_contig_cleanup_ctx(cam->vb_alloc_ctx);
-#endif
-#ifdef MCAM_MODE_DMA_SG
- if (cam->buffer_mode == B_DMA_sg)
- vb2_dma_sg_cleanup_ctx(cam->vb_alloc_ctx_sg);
-#endif
-}
-
/* ---------------------------------------------------------------------- */
/*
@@ -1875,7 +1854,6 @@ void mccic_shutdown(struct mcam_camera *cam)
cam_warn(cam, "Removing a device with users!\n");
mcam_ctlr_power_down(cam);
}
- mcam_cleanup_vb2(cam);
if (cam->buffer_mode == B_vmalloc)
mcam_free_dma_bufs(cam);
video_unregister_device(&cam->vdev);
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.h b/drivers/media/platform/marvell-ccic/mcam-core.h
index 35cd9e5ae..beb339f55 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.h
+++ b/drivers/media/platform/marvell-ccic/mcam-core.h
@@ -176,8 +176,6 @@ struct mcam_camera {
/* DMA buffers - DMA modes */
struct mcam_vb_buffer *vb_bufs[MAX_DMA_BUFS];
- struct vb2_alloc_ctx *vb_alloc_ctx;
- struct vb2_alloc_ctx *vb_alloc_ctx_sg;
/* Mode-specific ops, set at open time */
void (*dma_setup)(struct mcam_camera *cam);
diff --git a/drivers/media/platform/mtk-vcodec/Makefile b/drivers/media/platform/mtk-vcodec/Makefile
new file mode 100644
index 000000000..dc5cb006d
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/Makefile
@@ -0,0 +1,19 @@
+
+
+obj-$(CONFIG_VIDEO_MEDIATEK_VCODEC) += mtk-vcodec-enc.o mtk-vcodec-common.o
+
+
+
+mtk-vcodec-enc-y := venc/venc_vp8_if.o \
+ venc/venc_h264_if.o \
+ mtk_vcodec_enc.o \
+ mtk_vcodec_enc_drv.o \
+ mtk_vcodec_enc_pm.o \
+ venc_drv_if.o \
+ venc_vpu_if.o \
+
+
+mtk-vcodec-common-y := mtk_vcodec_intr.o \
+ mtk_vcodec_util.o\
+
+ccflags-y += -I$(srctree)/drivers/media/platform/mtk-vpu
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
new file mode 100644
index 000000000..3a8e6958a
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
@@ -0,0 +1,334 @@
+/*
+* Copyright (c) 2016 MediaTek Inc.
+* Author: PC Chen <pc.chen@mediatek.com>
+* Tiffany Lin <tiffany.lin@mediatek.com>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#ifndef _MTK_VCODEC_DRV_H_
+#define _MTK_VCODEC_DRV_H_
+
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-core.h>
+
+
+#define MTK_VCODEC_DRV_NAME "mtk_vcodec_drv"
+#define MTK_VCODEC_ENC_NAME "mtk-vcodec-enc"
+#define MTK_PLATFORM_STR "platform:mt8173"
+
+
+#define MTK_VCODEC_MAX_PLANES 3
+#define MTK_V4L2_BENCHMARK 0
+#define WAIT_INTR_TIMEOUT_MS 1000
+
+/**
+ * enum mtk_hw_reg_idx - MTK hw register base index
+ */
+enum mtk_hw_reg_idx {
+ VDEC_SYS,
+ VDEC_MISC,
+ VDEC_LD,
+ VDEC_TOP,
+ VDEC_CM,
+ VDEC_AD,
+ VDEC_AV,
+ VDEC_PP,
+ VDEC_HWD,
+ VDEC_HWQ,
+ VDEC_HWB,
+ VDEC_HWG,
+ NUM_MAX_VDEC_REG_BASE,
+ /* h264 encoder */
+ VENC_SYS = NUM_MAX_VDEC_REG_BASE,
+ /* vp8 encoder */
+ VENC_LT_SYS,
+ NUM_MAX_VCODEC_REG_BASE
+};
+
+/**
+ * enum mtk_instance_type - The type of an MTK Vcodec instance.
+ */
+enum mtk_instance_type {
+ MTK_INST_DECODER = 0,
+ MTK_INST_ENCODER = 1,
+};
+
+/**
+ * enum mtk_instance_state - The state of an MTK Vcodec instance.
+ * @MTK_STATE_FREE - default state when instance is created
+ * @MTK_STATE_INIT - vcodec instance is initialized
+ * @MTK_STATE_HEADER - vdec had sps/pps header parsed or venc
+ * had sps/pps header encoded
+ * @MTK_STATE_FLUSH - vdec is flushing. Only used by decoder
+ * @MTK_STATE_ABORT - vcodec should be aborted
+ */
+enum mtk_instance_state {
+ MTK_STATE_FREE = 0,
+ MTK_STATE_INIT = 1,
+ MTK_STATE_HEADER = 2,
+ MTK_STATE_FLUSH = 3,
+ MTK_STATE_ABORT = 4,
+};
+
+/**
+ * struct mtk_encode_param - General encoding parameters type
+ */
+enum mtk_encode_param {
+ MTK_ENCODE_PARAM_NONE = 0,
+ MTK_ENCODE_PARAM_BITRATE = (1 << 0),
+ MTK_ENCODE_PARAM_FRAMERATE = (1 << 1),
+ MTK_ENCODE_PARAM_INTRA_PERIOD = (1 << 2),
+ MTK_ENCODE_PARAM_FORCE_INTRA = (1 << 3),
+ MTK_ENCODE_PARAM_GOP_SIZE = (1 << 4),
+};
+
+enum mtk_fmt_type {
+ MTK_FMT_DEC = 0,
+ MTK_FMT_ENC = 1,
+ MTK_FMT_FRAME = 2,
+};
+
+/**
+ * struct mtk_video_fmt - Structure used to store information about pixelformats
+ */
+struct mtk_video_fmt {
+ u32 fourcc;
+ enum mtk_fmt_type type;
+ u32 num_planes;
+};
+
+/**
+ * struct mtk_codec_framesizes - Structure used to store information about
+ * framesizes
+ */
+struct mtk_codec_framesizes {
+ u32 fourcc;
+ struct v4l2_frmsize_stepwise stepwise;
+};
+
+/**
+ * struct mtk_q_type - Type of queue
+ */
+enum mtk_q_type {
+ MTK_Q_DATA_SRC = 0,
+ MTK_Q_DATA_DST = 1,
+};
+
+/**
+ * struct mtk_q_data - Structure used to store information about queue
+ */
+struct mtk_q_data {
+ unsigned int visible_width;
+ unsigned int visible_height;
+ unsigned int coded_width;
+ unsigned int coded_height;
+ enum v4l2_field field;
+ unsigned int bytesperline[MTK_VCODEC_MAX_PLANES];
+ unsigned int sizeimage[MTK_VCODEC_MAX_PLANES];
+ struct mtk_video_fmt *fmt;
+};
+
+/**
+ * struct mtk_enc_params - General encoding parameters
+ * @bitrate: target bitrate in bits per second
+ * @num_b_frame: number of b frames between p-frame
+ * @rc_frame: frame based rate control
+ * @rc_mb: macroblock based rate control
+ * @seq_hdr_mode: H.264 sequence header is encoded separately or joined
+ * with the first frame
+ * @intra_period: I frame period
+ * @gop_size: group of picture size, it's used as the intra frame period
+ * @framerate_num: frame rate numerator. ex: framerate_num=30 and
+ * framerate_denom=1 menas FPS is 30
+ * @framerate_denom: frame rate denominator. ex: framerate_num=30 and
+ * framerate_denom=1 menas FPS is 30
+ * @h264_max_qp: Max value for H.264 quantization parameter
+ * @h264_profile: V4L2 defined H.264 profile
+ * @h264_level: V4L2 defined H.264 level
+ * @force_intra: force/insert intra frame
+ */
+struct mtk_enc_params {
+ unsigned int bitrate;
+ unsigned int num_b_frame;
+ unsigned int rc_frame;
+ unsigned int rc_mb;
+ unsigned int seq_hdr_mode;
+ unsigned int intra_period;
+ unsigned int gop_size;
+ unsigned int framerate_num;
+ unsigned int framerate_denom;
+ unsigned int h264_max_qp;
+ unsigned int h264_profile;
+ unsigned int h264_level;
+ unsigned int force_intra;
+};
+
+/**
+ * struct mtk_vcodec_pm - Power management data structure
+ */
+struct mtk_vcodec_pm {
+ struct clk *vcodecpll;
+ struct clk *univpll_d2;
+ struct clk *clk_cci400_sel;
+ struct clk *vdecpll;
+ struct clk *vdec_sel;
+ struct clk *vencpll_d2;
+ struct clk *venc_sel;
+ struct clk *univpll1_d2;
+ struct clk *venc_lt_sel;
+ struct device *larbvdec;
+ struct device *larbvenc;
+ struct device *larbvenclt;
+ struct device *dev;
+ struct mtk_vcodec_dev *mtkdev;
+};
+
+/**
+ * struct mtk_vcodec_ctx - Context (instance) private data.
+ *
+ * @type: type of the instance - decoder or encoder
+ * @dev: pointer to the mtk_vcodec_dev of the device
+ * @list: link to ctx_list of mtk_vcodec_dev
+ * @fh: struct v4l2_fh
+ * @m2m_ctx: pointer to the v4l2_m2m_ctx of the context
+ * @q_data: store information of input and output queue
+ * of the context
+ * @id: index of the context that this structure describes
+ * @state: state of the context
+ * @param_change: indicate encode parameter type
+ * @enc_params: encoding parameters
+ * @enc_if: hoooked encoder driver interface
+ * @drv_handle: driver handle for specific decode/encode instance
+ *
+ * @int_cond: variable used by the waitqueue
+ * @int_type: type of the last interrupt
+ * @queue: waitqueue that can be used to wait for this context to
+ * finish
+ * @irq_status: irq status
+ *
+ * @ctrl_hdl: handler for v4l2 framework
+ * @encode_work: worker for the encoding
+ *
+ * @colorspace: enum v4l2_colorspace; supplemental to pixelformat
+ * @ycbcr_enc: enum v4l2_ycbcr_encoding, Y'CbCr encoding
+ * @quantization: enum v4l2_quantization, colorspace quantization
+ * @xfer_func: enum v4l2_xfer_func, colorspace transfer function
+ */
+struct mtk_vcodec_ctx {
+ enum mtk_instance_type type;
+ struct mtk_vcodec_dev *dev;
+ struct list_head list;
+
+ struct v4l2_fh fh;
+ struct v4l2_m2m_ctx *m2m_ctx;
+ struct mtk_q_data q_data[2];
+ int id;
+ enum mtk_instance_state state;
+ enum mtk_encode_param param_change;
+ struct mtk_enc_params enc_params;
+
+ struct venc_common_if *enc_if;
+ unsigned long drv_handle;
+
+ int int_cond;
+ int int_type;
+ wait_queue_head_t queue;
+ unsigned int irq_status;
+
+ struct v4l2_ctrl_handler ctrl_hdl;
+ struct work_struct encode_work;
+
+ enum v4l2_colorspace colorspace;
+ enum v4l2_ycbcr_encoding ycbcr_enc;
+ enum v4l2_quantization quantization;
+ enum v4l2_xfer_func xfer_func;
+};
+
+/**
+ * struct mtk_vcodec_dev - driver data
+ * @v4l2_dev: V4L2 device to register video devices for.
+ * @vfd_enc: Video device for encoder.
+ *
+ * @m2m_dev_enc: m2m device for encoder.
+ * @plat_dev: platform device
+ * @vpu_plat_dev: mtk vpu platform device
+ * @ctx_list: list of struct mtk_vcodec_ctx
+ * @irqlock: protect data access by irq handler and work thread
+ * @curr_ctx: The context that is waiting for codec hardware
+ *
+ * @reg_base: Mapped address of MTK Vcodec registers.
+ *
+ * @id_counter: used to identify current opened instance
+ * @num_instances: counter of active MTK Vcodec instances
+ *
+ * @encode_workqueue: encode work queue
+ *
+ * @int_cond: used to identify interrupt condition happen
+ * @int_type: used to identify what kind of interrupt condition happen
+ * @dev_mutex: video_device lock
+ * @queue: waitqueue for waiting for completion of device commands
+ *
+ * @enc_irq: h264 encoder irq resource
+ * @enc_lt_irq: vp8 encoder irq resource
+ *
+ * @enc_mutex: encoder hardware lock.
+ *
+ * @pm: power management control
+ * @dec_capability: used to identify decode capability, ex: 4k
+ * @enc_capability: used to identify encode capability
+ */
+struct mtk_vcodec_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device *vfd_enc;
+
+ struct v4l2_m2m_dev *m2m_dev_enc;
+ struct platform_device *plat_dev;
+ struct platform_device *vpu_plat_dev;
+ struct list_head ctx_list;
+ spinlock_t irqlock;
+ struct mtk_vcodec_ctx *curr_ctx;
+ void __iomem *reg_base[NUM_MAX_VCODEC_REG_BASE];
+
+ unsigned long id_counter;
+ int num_instances;
+
+ struct workqueue_struct *encode_workqueue;
+
+ int int_cond;
+ int int_type;
+ struct mutex dev_mutex;
+ wait_queue_head_t queue;
+
+ int enc_irq;
+ int enc_lt_irq;
+
+ struct mutex enc_mutex;
+
+ struct mtk_vcodec_pm pm;
+ unsigned int dec_capability;
+ unsigned int enc_capability;
+};
+
+static inline struct mtk_vcodec_ctx *fh_to_ctx(struct v4l2_fh *fh)
+{
+ return container_of(fh, struct mtk_vcodec_ctx, fh);
+}
+
+static inline struct mtk_vcodec_ctx *ctrl_to_ctx(struct v4l2_ctrl *ctrl)
+{
+ return container_of(ctrl->handler, struct mtk_vcodec_ctx, ctrl_hdl);
+}
+
+#endif /* _MTK_VCODEC_DRV_H_ */
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
new file mode 100644
index 000000000..2c5719ac2
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
@@ -0,0 +1,1302 @@
+/*
+* Copyright (c) 2016 MediaTek Inc.
+* Author: PC Chen <pc.chen@mediatek.com>
+* Tiffany Lin <tiffany.lin@mediatek.com>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+#include <soc/mediatek/smi.h>
+
+#include "mtk_vcodec_drv.h"
+#include "mtk_vcodec_enc.h"
+#include "mtk_vcodec_intr.h"
+#include "mtk_vcodec_util.h"
+#include "venc_drv_if.h"
+
+#define MTK_VENC_MIN_W 160U
+#define MTK_VENC_MIN_H 128U
+#define MTK_VENC_MAX_W 1920U
+#define MTK_VENC_MAX_H 1088U
+#define DFT_CFG_WIDTH MTK_VENC_MIN_W
+#define DFT_CFG_HEIGHT MTK_VENC_MIN_H
+#define MTK_MAX_CTRLS_HINT 20
+#define OUT_FMT_IDX 0
+#define CAP_FMT_IDX 4
+
+
+static void mtk_venc_worker(struct work_struct *work);
+
+static struct mtk_video_fmt mtk_video_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12M,
+ .type = MTK_FMT_FRAME,
+ .num_planes = 2,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV21M,
+ .type = MTK_FMT_FRAME,
+ .num_planes = 2,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV420M,
+ .type = MTK_FMT_FRAME,
+ .num_planes = 3,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YVU420M,
+ .type = MTK_FMT_FRAME,
+ .num_planes = 3,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_H264,
+ .type = MTK_FMT_ENC,
+ .num_planes = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP8,
+ .type = MTK_FMT_ENC,
+ .num_planes = 1,
+ },
+};
+
+#define NUM_FORMATS ARRAY_SIZE(mtk_video_formats)
+
+static const struct mtk_codec_framesizes mtk_venc_framesizes[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_H264,
+ .stepwise = { MTK_VENC_MIN_W, MTK_VENC_MAX_W, 16,
+ MTK_VENC_MIN_H, MTK_VENC_MAX_H, 16 },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP8,
+ .stepwise = { MTK_VENC_MIN_W, MTK_VENC_MAX_W, 16,
+ MTK_VENC_MIN_H, MTK_VENC_MAX_H, 16 },
+ },
+};
+
+#define NUM_SUPPORTED_FRAMESIZE ARRAY_SIZE(mtk_venc_framesizes)
+
+static int vidioc_venc_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct mtk_vcodec_ctx *ctx = ctrl_to_ctx(ctrl);
+ struct mtk_enc_params *p = &ctx->enc_params;
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_MPEG_VIDEO_BITRATE:
+ mtk_v4l2_debug(2, "V4L2_CID_MPEG_VIDEO_BITRATE val = %d",
+ ctrl->val);
+ p->bitrate = ctrl->val;
+ ctx->param_change |= MTK_ENCODE_PARAM_BITRATE;
+ break;
+ case V4L2_CID_MPEG_VIDEO_B_FRAMES:
+ mtk_v4l2_debug(2, "V4L2_CID_MPEG_VIDEO_B_FRAMES val = %d",
+ ctrl->val);
+ p->num_b_frame = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE:
+ mtk_v4l2_debug(2, "V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE val = %d",
+ ctrl->val);
+ p->rc_frame = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_MAX_QP:
+ mtk_v4l2_debug(2, "V4L2_CID_MPEG_VIDEO_H264_MAX_QP val = %d",
+ ctrl->val);
+ p->h264_max_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
+ mtk_v4l2_debug(2, "V4L2_CID_MPEG_VIDEO_HEADER_MODE val = %d",
+ ctrl->val);
+ p->seq_hdr_mode = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE:
+ mtk_v4l2_debug(2, "V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE val = %d",
+ ctrl->val);
+ p->rc_mb = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ mtk_v4l2_debug(2, "V4L2_CID_MPEG_VIDEO_H264_PROFILE val = %d",
+ ctrl->val);
+ p->h264_profile = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ mtk_v4l2_debug(2, "V4L2_CID_MPEG_VIDEO_H264_LEVEL val = %d",
+ ctrl->val);
+ p->h264_level = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD:
+ mtk_v4l2_debug(2, "V4L2_CID_MPEG_VIDEO_H264_I_PERIOD val = %d",
+ ctrl->val);
+ p->intra_period = ctrl->val;
+ ctx->param_change |= MTK_ENCODE_PARAM_INTRA_PERIOD;
+ break;
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+ mtk_v4l2_debug(2, "V4L2_CID_MPEG_VIDEO_GOP_SIZE val = %d",
+ ctrl->val);
+ p->gop_size = ctrl->val;
+ ctx->param_change |= MTK_ENCODE_PARAM_GOP_SIZE;
+ break;
+ case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME:
+ mtk_v4l2_debug(2, "V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME");
+ p->force_intra = 1;
+ ctx->param_change |= MTK_ENCODE_PARAM_FORCE_INTRA;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops mtk_vcodec_enc_ctrl_ops = {
+ .s_ctrl = vidioc_venc_s_ctrl,
+};
+
+static int vidioc_enum_fmt(struct v4l2_fmtdesc *f, bool output_queue)
+{
+ struct mtk_video_fmt *fmt;
+ int i, j = 0;
+
+ for (i = 0; i < NUM_FORMATS; ++i) {
+ if (output_queue && mtk_video_formats[i].type != MTK_FMT_FRAME)
+ continue;
+ if (!output_queue && mtk_video_formats[i].type != MTK_FMT_ENC)
+ continue;
+
+ if (j == f->index) {
+ fmt = &mtk_video_formats[i];
+ f->pixelformat = fmt->fourcc;
+ memset(f->reserved, 0, sizeof(f->reserved));
+ return 0;
+ }
+ ++j;
+ }
+
+ return -EINVAL;
+}
+
+static int vidioc_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ int i = 0;
+
+ if (fsize->index != 0)
+ return -EINVAL;
+
+ for (i = 0; i < NUM_SUPPORTED_FRAMESIZE; ++i) {
+ if (fsize->pixel_format != mtk_venc_framesizes[i].fourcc)
+ continue;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise = mtk_venc_framesizes[i].stepwise;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int vidioc_enum_fmt_vid_cap_mplane(struct file *file, void *pirv,
+ struct v4l2_fmtdesc *f)
+{
+ return vidioc_enum_fmt(f, false);
+}
+
+static int vidioc_enum_fmt_vid_out_mplane(struct file *file, void *prov,
+ struct v4l2_fmtdesc *f)
+{
+ return vidioc_enum_fmt(f, true);
+}
+
+static int vidioc_venc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strlcpy(cap->driver, MTK_VCODEC_ENC_NAME, sizeof(cap->driver));
+ strlcpy(cap->bus_info, MTK_PLATFORM_STR, sizeof(cap->bus_info));
+ strlcpy(cap->card, MTK_PLATFORM_STR, sizeof(cap->card));
+
+ return 0;
+}
+
+static int vidioc_venc_s_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *a)
+{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return -EINVAL;
+
+ ctx->enc_params.framerate_num =
+ a->parm.output.timeperframe.denominator;
+ ctx->enc_params.framerate_denom =
+ a->parm.output.timeperframe.numerator;
+ ctx->param_change |= MTK_ENCODE_PARAM_FRAMERATE;
+
+ return 0;
+}
+
+static int vidioc_venc_g_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *a)
+{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return -EINVAL;
+
+ a->parm.output.timeperframe.denominator =
+ ctx->enc_params.framerate_num;
+ a->parm.output.timeperframe.numerator =
+ ctx->enc_params.framerate_denom;
+
+ return 0;
+}
+
+static struct mtk_q_data *mtk_venc_get_q_data(struct mtk_vcodec_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ if (V4L2_TYPE_IS_OUTPUT(type))
+ return &ctx->q_data[MTK_Q_DATA_SRC];
+
+ return &ctx->q_data[MTK_Q_DATA_DST];
+}
+
+static struct mtk_video_fmt *mtk_venc_find_format(struct v4l2_format *f)
+{
+ struct mtk_video_fmt *fmt;
+ unsigned int k;
+
+ for (k = 0; k < NUM_FORMATS; k++) {
+ fmt = &mtk_video_formats[k];
+ if (fmt->fourcc == f->fmt.pix.pixelformat)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+/* V4L2 specification suggests the driver corrects the format struct if any of
+ * the dimensions is unsupported
+ */
+static int vidioc_try_fmt(struct v4l2_format *f, struct mtk_video_fmt *fmt)
+{
+ struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
+ int i;
+
+ pix_fmt_mp->field = V4L2_FIELD_NONE;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ pix_fmt_mp->num_planes = 1;
+ pix_fmt_mp->plane_fmt[0].bytesperline = 0;
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ int tmp_w, tmp_h;
+
+ pix_fmt_mp->height = clamp(pix_fmt_mp->height,
+ MTK_VENC_MIN_H,
+ MTK_VENC_MAX_H);
+ pix_fmt_mp->width = clamp(pix_fmt_mp->width,
+ MTK_VENC_MIN_W,
+ MTK_VENC_MAX_W);
+
+ /* find next closer width align 16, heign align 32, size align
+ * 64 rectangle
+ */
+ tmp_w = pix_fmt_mp->width;
+ tmp_h = pix_fmt_mp->height;
+ v4l_bound_align_image(&pix_fmt_mp->width,
+ MTK_VENC_MIN_W,
+ MTK_VENC_MAX_W, 4,
+ &pix_fmt_mp->height,
+ MTK_VENC_MIN_H,
+ MTK_VENC_MAX_H, 5, 6);
+
+ if (pix_fmt_mp->width < tmp_w &&
+ (pix_fmt_mp->width + 16) <= MTK_VENC_MAX_W)
+ pix_fmt_mp->width += 16;
+ if (pix_fmt_mp->height < tmp_h &&
+ (pix_fmt_mp->height + 32) <= MTK_VENC_MAX_H)
+ pix_fmt_mp->height += 32;
+
+ mtk_v4l2_debug(0,
+ "before resize width=%d, height=%d, after resize width=%d, height=%d, sizeimage=%d %d",
+ tmp_w, tmp_h, pix_fmt_mp->width,
+ pix_fmt_mp->height,
+ pix_fmt_mp->plane_fmt[0].sizeimage,
+ pix_fmt_mp->plane_fmt[1].sizeimage);
+
+ pix_fmt_mp->num_planes = fmt->num_planes;
+ pix_fmt_mp->plane_fmt[0].sizeimage =
+ pix_fmt_mp->width * pix_fmt_mp->height +
+ ((ALIGN(pix_fmt_mp->width, 16) * 2) * 16);
+ pix_fmt_mp->plane_fmt[0].bytesperline = pix_fmt_mp->width;
+
+ if (pix_fmt_mp->num_planes == 2) {
+ pix_fmt_mp->plane_fmt[1].sizeimage =
+ (pix_fmt_mp->width * pix_fmt_mp->height) / 2 +
+ (ALIGN(pix_fmt_mp->width, 16) * 16);
+ pix_fmt_mp->plane_fmt[2].sizeimage = 0;
+ pix_fmt_mp->plane_fmt[1].bytesperline =
+ pix_fmt_mp->width;
+ pix_fmt_mp->plane_fmt[2].bytesperline = 0;
+ } else if (pix_fmt_mp->num_planes == 3) {
+ pix_fmt_mp->plane_fmt[1].sizeimage =
+ pix_fmt_mp->plane_fmt[2].sizeimage =
+ (pix_fmt_mp->width * pix_fmt_mp->height) / 4 +
+ ((ALIGN(pix_fmt_mp->width, 16) / 2) * 16);
+ pix_fmt_mp->plane_fmt[1].bytesperline =
+ pix_fmt_mp->plane_fmt[2].bytesperline =
+ pix_fmt_mp->width / 2;
+ }
+ }
+
+ for (i = 0; i < pix_fmt_mp->num_planes; i++)
+ memset(&(pix_fmt_mp->plane_fmt[i].reserved[0]), 0x0,
+ sizeof(pix_fmt_mp->plane_fmt[0].reserved));
+
+ pix_fmt_mp->flags = 0;
+ memset(&pix_fmt_mp->reserved, 0x0,
+ sizeof(pix_fmt_mp->reserved));
+
+ return 0;
+}
+
+static void mtk_venc_set_param(struct mtk_vcodec_ctx *ctx,
+ struct venc_enc_param *param)
+{
+ struct mtk_q_data *q_data_src = &ctx->q_data[MTK_Q_DATA_SRC];
+ struct mtk_enc_params *enc_params = &ctx->enc_params;
+
+ switch (q_data_src->fmt->fourcc) {
+ case V4L2_PIX_FMT_YUV420M:
+ param->input_yuv_fmt = VENC_YUV_FORMAT_I420;
+ break;
+ case V4L2_PIX_FMT_YVU420M:
+ param->input_yuv_fmt = VENC_YUV_FORMAT_YV12;
+ break;
+ case V4L2_PIX_FMT_NV12M:
+ param->input_yuv_fmt = VENC_YUV_FORMAT_NV12;
+ break;
+ case V4L2_PIX_FMT_NV21M:
+ param->input_yuv_fmt = VENC_YUV_FORMAT_NV21;
+ break;
+ default:
+ mtk_v4l2_err("Unsupport fourcc =%d", q_data_src->fmt->fourcc);
+ break;
+ }
+ param->h264_profile = enc_params->h264_profile;
+ param->h264_level = enc_params->h264_level;
+
+ /* Config visible resolution */
+ param->width = q_data_src->visible_width;
+ param->height = q_data_src->visible_height;
+ /* Config coded resolution */
+ param->buf_width = q_data_src->coded_width;
+ param->buf_height = q_data_src->coded_height;
+ param->frm_rate = enc_params->framerate_num /
+ enc_params->framerate_denom;
+ param->intra_period = enc_params->intra_period;
+ param->gop_size = enc_params->gop_size;
+ param->bitrate = enc_params->bitrate;
+
+ mtk_v4l2_debug(0,
+ "fmt 0x%x, P/L %d/%d, w/h %d/%d, buf %d/%d, fps/bps %d/%d, gop %d, i_period %d",
+ param->input_yuv_fmt, param->h264_profile,
+ param->h264_level, param->width, param->height,
+ param->buf_width, param->buf_height,
+ param->frm_rate, param->bitrate,
+ param->gop_size, param->intra_period);
+}
+
+static int vidioc_venc_s_fmt_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct vb2_queue *vq;
+ struct mtk_q_data *q_data;
+ int i, ret;
+ struct mtk_video_fmt *fmt;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ if (!vq) {
+ mtk_v4l2_err("fail to get vq");
+ return -EINVAL;
+ }
+
+ if (vb2_is_busy(vq)) {
+ mtk_v4l2_err("queue busy");
+ return -EBUSY;
+ }
+
+ q_data = mtk_venc_get_q_data(ctx, f->type);
+ if (!q_data) {
+ mtk_v4l2_err("fail to get q data");
+ return -EINVAL;
+ }
+
+ fmt = mtk_venc_find_format(f);
+ if (!fmt) {
+ f->fmt.pix.pixelformat = mtk_video_formats[CAP_FMT_IDX].fourcc;
+ fmt = mtk_venc_find_format(f);
+ }
+
+ q_data->fmt = fmt;
+ ret = vidioc_try_fmt(f, q_data->fmt);
+ if (ret)
+ return ret;
+
+ q_data->coded_width = f->fmt.pix_mp.width;
+ q_data->coded_height = f->fmt.pix_mp.height;
+ q_data->field = f->fmt.pix_mp.field;
+
+ for (i = 0; i < f->fmt.pix_mp.num_planes; i++) {
+ struct v4l2_plane_pix_format *plane_fmt;
+
+ plane_fmt = &f->fmt.pix_mp.plane_fmt[i];
+ q_data->bytesperline[i] = plane_fmt->bytesperline;
+ q_data->sizeimage[i] = plane_fmt->sizeimage;
+ }
+
+ if (ctx->state == MTK_STATE_FREE) {
+ ret = venc_if_init(ctx, q_data->fmt->fourcc);
+ if (ret) {
+ mtk_v4l2_err("venc_if_init failed=%d, codec type=%x",
+ ret, q_data->fmt->fourcc);
+ return -EBUSY;
+ }
+ ctx->state = MTK_STATE_INIT;
+ }
+
+ return 0;
+}
+
+static int vidioc_venc_s_fmt_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct vb2_queue *vq;
+ struct mtk_q_data *q_data;
+ int ret, i;
+ struct mtk_video_fmt *fmt;
+ struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ if (!vq) {
+ mtk_v4l2_err("fail to get vq");
+ return -EINVAL;
+ }
+
+ if (vb2_is_busy(vq)) {
+ mtk_v4l2_err("queue busy");
+ return -EBUSY;
+ }
+
+ q_data = mtk_venc_get_q_data(ctx, f->type);
+ if (!q_data) {
+ mtk_v4l2_err("fail to get q data");
+ return -EINVAL;
+ }
+
+ fmt = mtk_venc_find_format(f);
+ if (!fmt) {
+ f->fmt.pix.pixelformat = mtk_video_formats[OUT_FMT_IDX].fourcc;
+ fmt = mtk_venc_find_format(f);
+ }
+
+ pix_fmt_mp->height = clamp(pix_fmt_mp->height,
+ MTK_VENC_MIN_H,
+ MTK_VENC_MAX_H);
+ pix_fmt_mp->width = clamp(pix_fmt_mp->width,
+ MTK_VENC_MIN_W,
+ MTK_VENC_MAX_W);
+
+ q_data->visible_width = f->fmt.pix_mp.width;
+ q_data->visible_height = f->fmt.pix_mp.height;
+ q_data->fmt = fmt;
+ ret = vidioc_try_fmt(f, q_data->fmt);
+ if (ret)
+ return ret;
+
+ q_data->coded_width = f->fmt.pix_mp.width;
+ q_data->coded_height = f->fmt.pix_mp.height;
+
+ q_data->field = f->fmt.pix_mp.field;
+ ctx->colorspace = f->fmt.pix_mp.colorspace;
+ ctx->ycbcr_enc = f->fmt.pix_mp.ycbcr_enc;
+ ctx->quantization = f->fmt.pix_mp.quantization;
+ ctx->xfer_func = f->fmt.pix_mp.xfer_func;
+
+ for (i = 0; i < f->fmt.pix_mp.num_planes; i++) {
+ struct v4l2_plane_pix_format *plane_fmt;
+
+ plane_fmt = &f->fmt.pix_mp.plane_fmt[i];
+ q_data->bytesperline[i] = plane_fmt->bytesperline;
+ q_data->sizeimage[i] = plane_fmt->sizeimage;
+ }
+
+ return 0;
+}
+
+static int vidioc_venc_g_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct vb2_queue *vq;
+ struct mtk_q_data *q_data;
+ int i;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = mtk_venc_get_q_data(ctx, f->type);
+
+ pix->width = q_data->coded_width;
+ pix->height = q_data->coded_height;
+ pix->pixelformat = q_data->fmt->fourcc;
+ pix->field = q_data->field;
+ pix->num_planes = q_data->fmt->num_planes;
+ for (i = 0; i < pix->num_planes; i++) {
+ pix->plane_fmt[i].bytesperline = q_data->bytesperline[i];
+ pix->plane_fmt[i].sizeimage = q_data->sizeimage[i];
+ memset(&(pix->plane_fmt[i].reserved[0]), 0x0,
+ sizeof(pix->plane_fmt[i].reserved));
+ }
+
+ pix->flags = 0;
+ pix->colorspace = ctx->colorspace;
+ pix->ycbcr_enc = ctx->ycbcr_enc;
+ pix->quantization = ctx->quantization;
+ pix->xfer_func = ctx->xfer_func;
+
+ return 0;
+}
+
+static int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct mtk_video_fmt *fmt;
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+
+ fmt = mtk_venc_find_format(f);
+ if (!fmt) {
+ f->fmt.pix.pixelformat = mtk_video_formats[CAP_FMT_IDX].fourcc;
+ fmt = mtk_venc_find_format(f);
+ }
+ f->fmt.pix_mp.colorspace = ctx->colorspace;
+ f->fmt.pix_mp.ycbcr_enc = ctx->ycbcr_enc;
+ f->fmt.pix_mp.quantization = ctx->quantization;
+ f->fmt.pix_mp.xfer_func = ctx->xfer_func;
+
+ return vidioc_try_fmt(f, fmt);
+}
+
+static int vidioc_try_fmt_vid_out_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct mtk_video_fmt *fmt;
+
+ fmt = mtk_venc_find_format(f);
+ if (!fmt) {
+ f->fmt.pix.pixelformat = mtk_video_formats[OUT_FMT_IDX].fourcc;
+ fmt = mtk_venc_find_format(f);
+ }
+ if (!f->fmt.pix_mp.colorspace) {
+ f->fmt.pix_mp.colorspace = V4L2_COLORSPACE_REC709;
+ f->fmt.pix_mp.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ f->fmt.pix_mp.quantization = V4L2_QUANTIZATION_DEFAULT;
+ f->fmt.pix_mp.xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ }
+
+ return vidioc_try_fmt(f, fmt);
+}
+
+static int vidioc_venc_qbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+
+ if (ctx->state == MTK_STATE_ABORT) {
+ mtk_v4l2_err("[%d] Call on QBUF after unrecoverable error",
+ ctx->id);
+ return -EIO;
+ }
+
+ return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_venc_dqbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+
+ if (ctx->state == MTK_STATE_ABORT) {
+ mtk_v4l2_err("[%d] Call on QBUF after unrecoverable error",
+ ctx->id);
+ return -EIO;
+ }
+
+ return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+}
+
+const struct v4l2_ioctl_ops mtk_venc_ioctl_ops = {
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = vidioc_venc_qbuf,
+ .vidioc_dqbuf = vidioc_venc_dqbuf,
+
+ .vidioc_querycap = vidioc_venc_querycap,
+ .vidioc_enum_fmt_vid_cap_mplane = vidioc_enum_fmt_vid_cap_mplane,
+ .vidioc_enum_fmt_vid_out_mplane = vidioc_enum_fmt_vid_out_mplane,
+ .vidioc_enum_framesizes = vidioc_enum_framesizes,
+
+ .vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt_vid_cap_mplane,
+ .vidioc_try_fmt_vid_out_mplane = vidioc_try_fmt_vid_out_mplane,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+
+ .vidioc_s_parm = vidioc_venc_s_parm,
+ .vidioc_g_parm = vidioc_venc_g_parm,
+ .vidioc_s_fmt_vid_cap_mplane = vidioc_venc_s_fmt_cap,
+ .vidioc_s_fmt_vid_out_mplane = vidioc_venc_s_fmt_out,
+
+ .vidioc_g_fmt_vid_cap_mplane = vidioc_venc_g_fmt,
+ .vidioc_g_fmt_vid_out_mplane = vidioc_venc_g_fmt,
+
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+};
+
+static int vb2ops_venc_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers,
+ unsigned int *nplanes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vq);
+ struct mtk_q_data *q_data;
+ unsigned int i;
+
+ q_data = mtk_venc_get_q_data(ctx, vq->type);
+
+ if (q_data == NULL)
+ return -EINVAL;
+
+ if (*nplanes) {
+ for (i = 0; i < *nplanes; i++)
+ if (sizes[i] < q_data->sizeimage[i])
+ return -EINVAL;
+ } else {
+ *nplanes = q_data->fmt->num_planes;
+ for (i = 0; i < *nplanes; i++)
+ sizes[i] = q_data->sizeimage[i];
+ }
+
+ return 0;
+}
+
+static int vb2ops_venc_buf_prepare(struct vb2_buffer *vb)
+{
+ struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct mtk_q_data *q_data;
+ int i;
+
+ q_data = mtk_venc_get_q_data(ctx, vb->vb2_queue->type);
+
+ for (i = 0; i < q_data->fmt->num_planes; i++) {
+ if (vb2_plane_size(vb, i) < q_data->sizeimage[i]) {
+ mtk_v4l2_err("data will not fit into plane %d (%lu < %d)",
+ i, vb2_plane_size(vb, i),
+ q_data->sizeimage[i]);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void vb2ops_venc_buf_queue(struct vb2_buffer *vb)
+{
+ struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vb2_v4l2 =
+ container_of(vb, struct vb2_v4l2_buffer, vb2_buf);
+
+ struct mtk_video_enc_buf *mtk_buf =
+ container_of(vb2_v4l2, struct mtk_video_enc_buf, vb);
+
+ if ((vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) &&
+ (ctx->param_change != MTK_ENCODE_PARAM_NONE)) {
+ mtk_v4l2_debug(1, "[%d] Before id=%d encode parameter change %x",
+ ctx->id,
+ mtk_buf->vb.vb2_buf.index,
+ ctx->param_change);
+ mtk_buf->param_change = ctx->param_change;
+ mtk_buf->enc_params = ctx->enc_params;
+ ctx->param_change = MTK_ENCODE_PARAM_NONE;
+ }
+
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, to_vb2_v4l2_buffer(vb));
+}
+
+static int vb2ops_venc_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(q);
+ struct venc_enc_param param;
+ int ret;
+ int i;
+
+ /* Once state turn into MTK_STATE_ABORT, we need stop_streaming
+ * to clear it
+ */
+ if ((ctx->state == MTK_STATE_ABORT) || (ctx->state == MTK_STATE_FREE)) {
+ ret = -EIO;
+ goto err_set_param;
+ }
+
+ /* Do the initialization when both start_streaming have been called */
+ if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+ if (!vb2_start_streaming_called(&ctx->m2m_ctx->cap_q_ctx.q))
+ return 0;
+ } else {
+ if (!vb2_start_streaming_called(&ctx->m2m_ctx->out_q_ctx.q))
+ return 0;
+ }
+
+ mtk_venc_set_param(ctx, &param);
+ ret = venc_if_set_param(ctx, VENC_SET_PARAM_ENC, &param);
+ if (ret) {
+ mtk_v4l2_err("venc_if_set_param failed=%d", ret);
+ ctx->state = MTK_STATE_ABORT;
+ goto err_set_param;
+ }
+ ctx->param_change = MTK_ENCODE_PARAM_NONE;
+
+ if ((ctx->q_data[MTK_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_H264) &&
+ (ctx->enc_params.seq_hdr_mode !=
+ V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE)) {
+ ret = venc_if_set_param(ctx,
+ VENC_SET_PARAM_PREPEND_HEADER,
+ NULL);
+ if (ret) {
+ mtk_v4l2_err("venc_if_set_param failed=%d", ret);
+ ctx->state = MTK_STATE_ABORT;
+ goto err_set_param;
+ }
+ ctx->state = MTK_STATE_HEADER;
+ }
+
+ return 0;
+
+err_set_param:
+ for (i = 0; i < q->num_buffers; ++i) {
+ if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE) {
+ mtk_v4l2_debug(0, "[%d] id=%d, type=%d, %d -> VB2_BUF_STATE_QUEUED",
+ ctx->id, i, q->type,
+ (int)q->bufs[i]->state);
+ v4l2_m2m_buf_done(to_vb2_v4l2_buffer(q->bufs[i]),
+ VB2_BUF_STATE_QUEUED);
+ }
+ }
+
+ return ret;
+}
+
+static void vb2ops_venc_stop_streaming(struct vb2_queue *q)
+{
+ struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(q);
+ struct vb2_buffer *src_buf, *dst_buf;
+ int ret;
+
+ mtk_v4l2_debug(2, "[%d]-> type=%d", ctx->id, q->type);
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ while ((dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx))) {
+ dst_buf->planes[0].bytesused = 0;
+ v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf),
+ VB2_BUF_STATE_ERROR);
+ }
+ } else {
+ while ((src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx)))
+ v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf),
+ VB2_BUF_STATE_ERROR);
+ }
+
+ if ((q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+ vb2_is_streaming(&ctx->m2m_ctx->out_q_ctx.q)) ||
+ (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
+ vb2_is_streaming(&ctx->m2m_ctx->cap_q_ctx.q))) {
+ mtk_v4l2_debug(1, "[%d]-> q type %d out=%d cap=%d",
+ ctx->id, q->type,
+ vb2_is_streaming(&ctx->m2m_ctx->out_q_ctx.q),
+ vb2_is_streaming(&ctx->m2m_ctx->cap_q_ctx.q));
+ return;
+ }
+
+ /* Release the encoder if both streams are stopped. */
+ ret = venc_if_deinit(ctx);
+ if (ret)
+ mtk_v4l2_err("venc_if_deinit failed=%d", ret);
+
+ ctx->state = MTK_STATE_FREE;
+}
+
+static struct vb2_ops mtk_venc_vb2_ops = {
+ .queue_setup = vb2ops_venc_queue_setup,
+ .buf_prepare = vb2ops_venc_buf_prepare,
+ .buf_queue = vb2ops_venc_buf_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = vb2ops_venc_start_streaming,
+ .stop_streaming = vb2ops_venc_stop_streaming,
+};
+
+static int mtk_venc_encode_header(void *priv)
+{
+ struct mtk_vcodec_ctx *ctx = priv;
+ int ret;
+ struct vb2_buffer *src_buf, *dst_buf;
+ struct vb2_v4l2_buffer *dst_vb2_v4l2, *src_vb2_v4l2;
+ struct mtk_vcodec_mem bs_buf;
+ struct venc_done_result enc_result;
+
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+ if (!dst_buf) {
+ mtk_v4l2_debug(1, "No dst buffer");
+ return -EINVAL;
+ }
+
+ bs_buf.va = vb2_plane_vaddr(dst_buf, 0);
+ bs_buf.dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+ bs_buf.size = (size_t)dst_buf->planes[0].length;
+
+ mtk_v4l2_debug(1,
+ "[%d] buf id=%d va=0x%p dma_addr=0x%llx size=%zu",
+ ctx->id,
+ dst_buf->index, bs_buf.va,
+ (u64)bs_buf.dma_addr,
+ bs_buf.size);
+
+ ret = venc_if_encode(ctx,
+ VENC_START_OPT_ENCODE_SEQUENCE_HEADER,
+ NULL, &bs_buf, &enc_result);
+
+ if (ret) {
+ dst_buf->planes[0].bytesused = 0;
+ ctx->state = MTK_STATE_ABORT;
+ v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf),
+ VB2_BUF_STATE_ERROR);
+ mtk_v4l2_err("venc_if_encode failed=%d", ret);
+ return -EINVAL;
+ }
+ src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ if (src_buf) {
+ src_vb2_v4l2 = to_vb2_v4l2_buffer(src_buf);
+ dst_vb2_v4l2 = to_vb2_v4l2_buffer(dst_buf);
+ dst_buf->timestamp = src_buf->timestamp;
+ dst_vb2_v4l2->timecode = src_vb2_v4l2->timecode;
+ } else {
+ mtk_v4l2_err("No timestamp for the header buffer.");
+ }
+
+ ctx->state = MTK_STATE_HEADER;
+ dst_buf->planes[0].bytesused = enc_result.bs_size;
+ v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf), VB2_BUF_STATE_DONE);
+
+ return 0;
+}
+
+static int mtk_venc_param_change(struct mtk_vcodec_ctx *ctx)
+{
+ struct venc_enc_param enc_prm;
+ struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ struct vb2_v4l2_buffer *vb2_v4l2 =
+ container_of(vb, struct vb2_v4l2_buffer, vb2_buf);
+ struct mtk_video_enc_buf *mtk_buf =
+ container_of(vb2_v4l2, struct mtk_video_enc_buf, vb);
+
+ int ret = 0;
+
+ memset(&enc_prm, 0, sizeof(enc_prm));
+ if (mtk_buf->param_change == MTK_ENCODE_PARAM_NONE)
+ return 0;
+
+ if (mtk_buf->param_change & MTK_ENCODE_PARAM_BITRATE) {
+ enc_prm.bitrate = mtk_buf->enc_params.bitrate;
+ mtk_v4l2_debug(1, "[%d] id=%d, change param br=%d",
+ ctx->id,
+ mtk_buf->vb.vb2_buf.index,
+ enc_prm.bitrate);
+ ret |= venc_if_set_param(ctx,
+ VENC_SET_PARAM_ADJUST_BITRATE,
+ &enc_prm);
+ }
+ if (!ret && mtk_buf->param_change & MTK_ENCODE_PARAM_FRAMERATE) {
+ enc_prm.frm_rate = mtk_buf->enc_params.framerate_num /
+ mtk_buf->enc_params.framerate_denom;
+ mtk_v4l2_debug(1, "[%d] id=%d, change param fr=%d",
+ ctx->id,
+ mtk_buf->vb.vb2_buf.index,
+ enc_prm.frm_rate);
+ ret |= venc_if_set_param(ctx,
+ VENC_SET_PARAM_ADJUST_FRAMERATE,
+ &enc_prm);
+ }
+ if (!ret && mtk_buf->param_change & MTK_ENCODE_PARAM_GOP_SIZE) {
+ enc_prm.gop_size = mtk_buf->enc_params.gop_size;
+ mtk_v4l2_debug(1, "change param intra period=%d",
+ enc_prm.gop_size);
+ ret |= venc_if_set_param(ctx,
+ VENC_SET_PARAM_GOP_SIZE,
+ &enc_prm);
+ }
+ if (!ret && mtk_buf->param_change & MTK_ENCODE_PARAM_FORCE_INTRA) {
+ mtk_v4l2_debug(1, "[%d] id=%d, change param force I=%d",
+ ctx->id,
+ mtk_buf->vb.vb2_buf.index,
+ mtk_buf->enc_params.force_intra);
+ if (mtk_buf->enc_params.force_intra)
+ ret |= venc_if_set_param(ctx,
+ VENC_SET_PARAM_FORCE_INTRA,
+ NULL);
+ }
+
+ mtk_buf->param_change = MTK_ENCODE_PARAM_NONE;
+
+ if (ret) {
+ ctx->state = MTK_STATE_ABORT;
+ mtk_v4l2_err("venc_if_set_param %d failed=%d",
+ mtk_buf->param_change, ret);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * v4l2_m2m_streamoff() holds dev_mutex and waits mtk_venc_worker()
+ * to call v4l2_m2m_job_finish().
+ * If mtk_venc_worker() tries to acquire dev_mutex, it will deadlock.
+ * So this function must not try to acquire dev->dev_mutex.
+ * This means v4l2 ioctls and mtk_venc_worker() can run at the same time.
+ * mtk_venc_worker() should be carefully implemented to avoid bugs.
+ */
+static void mtk_venc_worker(struct work_struct *work)
+{
+ struct mtk_vcodec_ctx *ctx = container_of(work, struct mtk_vcodec_ctx,
+ encode_work);
+ struct vb2_buffer *src_buf, *dst_buf;
+ struct venc_frm_buf frm_buf;
+ struct mtk_vcodec_mem bs_buf;
+ struct venc_done_result enc_result;
+ int ret, i;
+ struct vb2_v4l2_buffer *dst_vb2_v4l2, *src_vb2_v4l2;
+
+ /* check dst_buf, dst_buf may be removed in device_run
+ * to stored encdoe header so we need check dst_buf and
+ * call job_finish here to prevent recursion
+ */
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+ if (!dst_buf) {
+ v4l2_m2m_job_finish(ctx->dev->m2m_dev_enc, ctx->m2m_ctx);
+ return;
+ }
+
+ src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ memset(&frm_buf, 0, sizeof(frm_buf));
+ for (i = 0; i < src_buf->num_planes ; i++) {
+ frm_buf.fb_addr[i].va = vb2_plane_vaddr(src_buf, i);
+ frm_buf.fb_addr[i].dma_addr =
+ vb2_dma_contig_plane_dma_addr(src_buf, i);
+ frm_buf.fb_addr[i].size =
+ (size_t)src_buf->planes[i].length;
+ }
+ bs_buf.va = vb2_plane_vaddr(dst_buf, 0);
+ bs_buf.dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+ bs_buf.size = (size_t)dst_buf->planes[0].length;
+
+ mtk_v4l2_debug(2,
+ "Framebuf VA=%p PA=%llx Size=0x%zx;VA=%p PA=0x%llx Size=0x%zx;VA=%p PA=0x%llx Size=%zu",
+ frm_buf.fb_addr[0].va,
+ (u64)frm_buf.fb_addr[0].dma_addr,
+ frm_buf.fb_addr[0].size,
+ frm_buf.fb_addr[1].va,
+ (u64)frm_buf.fb_addr[1].dma_addr,
+ frm_buf.fb_addr[1].size,
+ frm_buf.fb_addr[2].va,
+ (u64)frm_buf.fb_addr[2].dma_addr,
+ frm_buf.fb_addr[2].size);
+
+ ret = venc_if_encode(ctx, VENC_START_OPT_ENCODE_FRAME,
+ &frm_buf, &bs_buf, &enc_result);
+
+ src_vb2_v4l2 = to_vb2_v4l2_buffer(src_buf);
+ dst_vb2_v4l2 = to_vb2_v4l2_buffer(dst_buf);
+
+ dst_buf->timestamp = src_buf->timestamp;
+ dst_vb2_v4l2->timecode = src_vb2_v4l2->timecode;
+
+ if (enc_result.is_key_frm)
+ dst_vb2_v4l2->flags |= V4L2_BUF_FLAG_KEYFRAME;
+
+ if (ret) {
+ v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf),
+ VB2_BUF_STATE_ERROR);
+ dst_buf->planes[0].bytesused = 0;
+ v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf),
+ VB2_BUF_STATE_ERROR);
+ mtk_v4l2_err("venc_if_encode failed=%d", ret);
+ } else {
+ v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf),
+ VB2_BUF_STATE_DONE);
+ dst_buf->planes[0].bytesused = enc_result.bs_size;
+ v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf),
+ VB2_BUF_STATE_DONE);
+ mtk_v4l2_debug(2, "venc_if_encode bs size=%d",
+ enc_result.bs_size);
+ }
+
+ v4l2_m2m_job_finish(ctx->dev->m2m_dev_enc, ctx->m2m_ctx);
+
+ mtk_v4l2_debug(1, "<=== src_buf[%d] dst_buf[%d] venc_if_encode ret=%d Size=%u===>",
+ src_buf->index, dst_buf->index, ret,
+ enc_result.bs_size);
+}
+
+static void m2mops_venc_device_run(void *priv)
+{
+ struct mtk_vcodec_ctx *ctx = priv;
+
+ if ((ctx->q_data[MTK_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_H264) &&
+ (ctx->state != MTK_STATE_HEADER)) {
+ /* encode h264 sps/pps header */
+ mtk_venc_encode_header(ctx);
+ queue_work(ctx->dev->encode_workqueue, &ctx->encode_work);
+ return;
+ }
+
+ mtk_venc_param_change(ctx);
+ queue_work(ctx->dev->encode_workqueue, &ctx->encode_work);
+}
+
+static int m2mops_venc_job_ready(void *m2m_priv)
+{
+ struct mtk_vcodec_ctx *ctx = m2m_priv;
+
+ if (ctx->state == MTK_STATE_ABORT || ctx->state == MTK_STATE_FREE) {
+ mtk_v4l2_debug(3, "[%d]Not ready: state=0x%x.",
+ ctx->id, ctx->state);
+ return 0;
+ }
+
+ return 1;
+}
+
+static void m2mops_venc_job_abort(void *priv)
+{
+ struct mtk_vcodec_ctx *ctx = priv;
+
+ ctx->state = MTK_STATE_ABORT;
+}
+
+static void m2mops_venc_lock(void *m2m_priv)
+{
+ struct mtk_vcodec_ctx *ctx = m2m_priv;
+
+ mutex_lock(&ctx->dev->dev_mutex);
+}
+
+static void m2mops_venc_unlock(void *m2m_priv)
+{
+ struct mtk_vcodec_ctx *ctx = m2m_priv;
+
+ mutex_unlock(&ctx->dev->dev_mutex);
+}
+
+const struct v4l2_m2m_ops mtk_venc_m2m_ops = {
+ .device_run = m2mops_venc_device_run,
+ .job_ready = m2mops_venc_job_ready,
+ .job_abort = m2mops_venc_job_abort,
+ .lock = m2mops_venc_lock,
+ .unlock = m2mops_venc_unlock,
+};
+
+void mtk_vcodec_enc_set_default_params(struct mtk_vcodec_ctx *ctx)
+{
+ struct mtk_q_data *q_data;
+
+ ctx->m2m_ctx->q_lock = &ctx->dev->dev_mutex;
+ ctx->fh.m2m_ctx = ctx->m2m_ctx;
+ ctx->fh.ctrl_handler = &ctx->ctrl_hdl;
+ INIT_WORK(&ctx->encode_work, mtk_venc_worker);
+
+ ctx->colorspace = V4L2_COLORSPACE_REC709;
+ ctx->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ ctx->quantization = V4L2_QUANTIZATION_DEFAULT;
+ ctx->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+
+ q_data = &ctx->q_data[MTK_Q_DATA_SRC];
+ memset(q_data, 0, sizeof(struct mtk_q_data));
+ q_data->visible_width = DFT_CFG_WIDTH;
+ q_data->visible_height = DFT_CFG_HEIGHT;
+ q_data->coded_width = DFT_CFG_WIDTH;
+ q_data->coded_height = DFT_CFG_HEIGHT;
+ q_data->field = V4L2_FIELD_NONE;
+
+ q_data->fmt = &mtk_video_formats[OUT_FMT_IDX];
+
+ v4l_bound_align_image(&q_data->coded_width,
+ MTK_VENC_MIN_W,
+ MTK_VENC_MAX_W, 4,
+ &q_data->coded_height,
+ MTK_VENC_MIN_H,
+ MTK_VENC_MAX_H, 5, 6);
+
+ if (q_data->coded_width < DFT_CFG_WIDTH &&
+ (q_data->coded_width + 16) <= MTK_VENC_MAX_W)
+ q_data->coded_width += 16;
+ if (q_data->coded_height < DFT_CFG_HEIGHT &&
+ (q_data->coded_height + 32) <= MTK_VENC_MAX_H)
+ q_data->coded_height += 32;
+
+ q_data->sizeimage[0] =
+ q_data->coded_width * q_data->coded_height+
+ ((ALIGN(q_data->coded_width, 16) * 2) * 16);
+ q_data->bytesperline[0] = q_data->coded_width;
+ q_data->sizeimage[1] =
+ (q_data->coded_width * q_data->coded_height) / 2 +
+ (ALIGN(q_data->coded_width, 16) * 16);
+ q_data->bytesperline[1] = q_data->coded_width;
+
+ q_data = &ctx->q_data[MTK_Q_DATA_DST];
+ memset(q_data, 0, sizeof(struct mtk_q_data));
+ q_data->coded_width = DFT_CFG_WIDTH;
+ q_data->coded_height = DFT_CFG_HEIGHT;
+ q_data->fmt = &mtk_video_formats[CAP_FMT_IDX];
+ q_data->field = V4L2_FIELD_NONE;
+ ctx->q_data[MTK_Q_DATA_DST].sizeimage[0] =
+ DFT_CFG_WIDTH * DFT_CFG_HEIGHT;
+ ctx->q_data[MTK_Q_DATA_DST].bytesperline[0] = 0;
+
+}
+
+int mtk_vcodec_enc_ctrls_setup(struct mtk_vcodec_ctx *ctx)
+{
+ const struct v4l2_ctrl_ops *ops = &mtk_vcodec_enc_ctrl_ops;
+ struct v4l2_ctrl_handler *handler = &ctx->ctrl_hdl;
+
+ v4l2_ctrl_handler_init(handler, MTK_MAX_CTRLS_HINT);
+
+ v4l2_ctrl_new_std(handler, ops, V4L2_CID_MPEG_VIDEO_BITRATE,
+ 1, 4000000, 1, 4000000);
+ v4l2_ctrl_new_std(handler, ops, V4L2_CID_MPEG_VIDEO_B_FRAMES,
+ 0, 2, 1, 0);
+ v4l2_ctrl_new_std(handler, ops, V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE,
+ 0, 1, 1, 1);
+ v4l2_ctrl_new_std(handler, ops, V4L2_CID_MPEG_VIDEO_H264_MAX_QP,
+ 0, 51, 1, 51);
+ v4l2_ctrl_new_std(handler, ops, V4L2_CID_MPEG_VIDEO_H264_I_PERIOD,
+ 0, 65535, 1, 0);
+ v4l2_ctrl_new_std(handler, ops, V4L2_CID_MPEG_VIDEO_GOP_SIZE,
+ 0, 65535, 1, 0);
+ v4l2_ctrl_new_std(handler, ops, V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE,
+ 0, 1, 1, 0);
+ v4l2_ctrl_new_std(handler, ops, V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME,
+ 0, 0, 0, 0);
+ v4l2_ctrl_new_std_menu(handler, ops,
+ V4L2_CID_MPEG_VIDEO_HEADER_MODE,
+ V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME,
+ 0, V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE);
+ v4l2_ctrl_new_std_menu(handler, ops, V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+ V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
+ 0, V4L2_MPEG_VIDEO_H264_PROFILE_HIGH);
+ v4l2_ctrl_new_std_menu(handler, ops, V4L2_CID_MPEG_VIDEO_H264_LEVEL,
+ V4L2_MPEG_VIDEO_H264_LEVEL_4_2,
+ 0, V4L2_MPEG_VIDEO_H264_LEVEL_4_0);
+ if (handler->error) {
+ mtk_v4l2_err("Init control handler fail %d",
+ handler->error);
+ return handler->error;
+ }
+
+ v4l2_ctrl_handler_setup(&ctx->ctrl_hdl);
+
+ return 0;
+}
+
+int mtk_vcodec_enc_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct mtk_vcodec_ctx *ctx = priv;
+ int ret;
+
+ /* Note: VB2_USERPTR works with dma-contig because mt8173
+ * support iommu
+ * https://patchwork.kernel.org/patch/8335461/
+ * https://patchwork.kernel.org/patch/7596181/
+ */
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_DMABUF | VB2_MMAP | VB2_USERPTR;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct mtk_video_enc_buf);
+ src_vq->ops = &mtk_venc_vb2_ops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->dev->dev_mutex;
+ src_vq->dev = &ctx->dev->plat_dev->dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_DMABUF | VB2_MMAP | VB2_USERPTR;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->ops = &mtk_venc_vb2_ops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->dev->dev_mutex;
+ dst_vq->dev = &ctx->dev->plat_dev->dev;
+
+ return vb2_queue_init(dst_vq);
+}
+
+int mtk_venc_unlock(struct mtk_vcodec_ctx *ctx)
+{
+ struct mtk_vcodec_dev *dev = ctx->dev;
+
+ mutex_unlock(&dev->enc_mutex);
+ return 0;
+}
+
+int mtk_venc_lock(struct mtk_vcodec_ctx *ctx)
+{
+ struct mtk_vcodec_dev *dev = ctx->dev;
+
+ mutex_lock(&dev->enc_mutex);
+ return 0;
+}
+
+void mtk_vcodec_enc_release(struct mtk_vcodec_ctx *ctx)
+{
+ int ret = venc_if_deinit(ctx);
+
+ if (ret)
+ mtk_v4l2_err("venc_if_deinit failed=%d", ret);
+
+ ctx->state = MTK_STATE_FREE;
+}
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.h
new file mode 100644
index 000000000..d7a154a97
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.h
@@ -0,0 +1,58 @@
+/*
+* Copyright (c) 2016 MediaTek Inc.
+* Author: PC Chen <pc.chen@mediatek.com>
+* Tiffany Lin <tiffany.lin@mediatek.com>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#ifndef _MTK_VCODEC_ENC_H_
+#define _MTK_VCODEC_ENC_H_
+
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
+
+#define MTK_VENC_IRQ_STATUS_SPS 0x1
+#define MTK_VENC_IRQ_STATUS_PPS 0x2
+#define MTK_VENC_IRQ_STATUS_FRM 0x4
+#define MTK_VENC_IRQ_STATUS_DRAM 0x8
+#define MTK_VENC_IRQ_STATUS_PAUSE 0x10
+#define MTK_VENC_IRQ_STATUS_SWITCH 0x20
+
+#define MTK_VENC_IRQ_STATUS_OFFSET 0x05C
+#define MTK_VENC_IRQ_ACK_OFFSET 0x060
+
+/**
+ * struct mtk_video_enc_buf - Private data related to each VB2 buffer.
+ * @vb: Pointer to related VB2 buffer.
+ * @list: list that buffer link to
+ * @param_change: Types of encode parameter change before encoding this
+ * buffer
+ * @enc_params: Encode parameters changed before encode this buffer
+ */
+struct mtk_video_enc_buf {
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+ u32 param_change;
+ struct mtk_enc_params enc_params;
+};
+
+extern const struct v4l2_ioctl_ops mtk_venc_ioctl_ops;
+extern const struct v4l2_m2m_ops mtk_venc_m2m_ops;
+
+int mtk_venc_unlock(struct mtk_vcodec_ctx *ctx);
+int mtk_venc_lock(struct mtk_vcodec_ctx *ctx);
+int mtk_vcodec_enc_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq);
+void mtk_vcodec_enc_release(struct mtk_vcodec_ctx *ctx);
+int mtk_vcodec_enc_ctrls_setup(struct mtk_vcodec_ctx *ctx);
+void mtk_vcodec_enc_set_default_params(struct mtk_vcodec_ctx *ctx);
+
+#endif /* _MTK_VCODEC_ENC_H_ */
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
new file mode 100644
index 000000000..5cd215143
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
@@ -0,0 +1,439 @@
+/*
+* Copyright (c) 2016 MediaTek Inc.
+* Author: PC Chen <pc.chen@mediatek.com>
+* Tiffany Lin <tiffany.lin@mediatek.com>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+#include <linux/pm_runtime.h>
+
+#include "mtk_vcodec_drv.h"
+#include "mtk_vcodec_enc.h"
+#include "mtk_vcodec_enc_pm.h"
+#include "mtk_vcodec_intr.h"
+#include "mtk_vcodec_util.h"
+#include "mtk_vpu.h"
+
+module_param(mtk_v4l2_dbg_level, int, S_IRUGO | S_IWUSR);
+module_param(mtk_vcodec_dbg, bool, S_IRUGO | S_IWUSR);
+
+/* Wake up context wait_queue */
+static void wake_up_ctx(struct mtk_vcodec_ctx *ctx, unsigned int reason)
+{
+ ctx->int_cond = 1;
+ ctx->int_type = reason;
+ wake_up_interruptible(&ctx->queue);
+}
+
+static void clean_irq_status(unsigned int irq_status, void __iomem *addr)
+{
+ if (irq_status & MTK_VENC_IRQ_STATUS_PAUSE)
+ writel(MTK_VENC_IRQ_STATUS_PAUSE, addr);
+
+ if (irq_status & MTK_VENC_IRQ_STATUS_SWITCH)
+ writel(MTK_VENC_IRQ_STATUS_SWITCH, addr);
+
+ if (irq_status & MTK_VENC_IRQ_STATUS_DRAM)
+ writel(MTK_VENC_IRQ_STATUS_DRAM, addr);
+
+ if (irq_status & MTK_VENC_IRQ_STATUS_SPS)
+ writel(MTK_VENC_IRQ_STATUS_SPS, addr);
+
+ if (irq_status & MTK_VENC_IRQ_STATUS_PPS)
+ writel(MTK_VENC_IRQ_STATUS_PPS, addr);
+
+ if (irq_status & MTK_VENC_IRQ_STATUS_FRM)
+ writel(MTK_VENC_IRQ_STATUS_FRM, addr);
+
+}
+static irqreturn_t mtk_vcodec_enc_irq_handler(int irq, void *priv)
+{
+ struct mtk_vcodec_dev *dev = priv;
+ struct mtk_vcodec_ctx *ctx;
+ unsigned long flags;
+ void __iomem *addr;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ ctx = dev->curr_ctx;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+
+ mtk_v4l2_debug(1, "id=%d", ctx->id);
+ addr = dev->reg_base[VENC_SYS] + MTK_VENC_IRQ_ACK_OFFSET;
+
+ ctx->irq_status = readl(dev->reg_base[VENC_SYS] +
+ (MTK_VENC_IRQ_STATUS_OFFSET));
+
+ clean_irq_status(ctx->irq_status, addr);
+
+ wake_up_ctx(ctx, MTK_INST_IRQ_RECEIVED);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mtk_vcodec_enc_lt_irq_handler(int irq, void *priv)
+{
+ struct mtk_vcodec_dev *dev = priv;
+ struct mtk_vcodec_ctx *ctx;
+ unsigned long flags;
+ void __iomem *addr;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ ctx = dev->curr_ctx;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+
+ mtk_v4l2_debug(1, "id=%d", ctx->id);
+ ctx->irq_status = readl(dev->reg_base[VENC_LT_SYS] +
+ (MTK_VENC_IRQ_STATUS_OFFSET));
+
+ addr = dev->reg_base[VENC_LT_SYS] + MTK_VENC_IRQ_ACK_OFFSET;
+
+ clean_irq_status(ctx->irq_status, addr);
+
+ wake_up_ctx(ctx, MTK_INST_IRQ_RECEIVED);
+ return IRQ_HANDLED;
+}
+
+static void mtk_vcodec_enc_reset_handler(void *priv)
+{
+ struct mtk_vcodec_dev *dev = priv;
+ struct mtk_vcodec_ctx *ctx;
+
+ mtk_v4l2_debug(0, "Watchdog timeout!!");
+
+ mutex_lock(&dev->dev_mutex);
+ list_for_each_entry(ctx, &dev->ctx_list, list) {
+ ctx->state = MTK_STATE_ABORT;
+ mtk_v4l2_debug(0, "[%d] Change to state MTK_STATE_ABORT",
+ ctx->id);
+ }
+ mutex_unlock(&dev->dev_mutex);
+}
+
+static int fops_vcodec_open(struct file *file)
+{
+ struct mtk_vcodec_dev *dev = video_drvdata(file);
+ struct mtk_vcodec_ctx *ctx = NULL;
+ int ret = 0;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ mutex_lock(&dev->dev_mutex);
+ /*
+ * Use simple counter to uniquely identify this context. Only
+ * used for logging.
+ */
+ ctx->id = dev->id_counter++;
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+ INIT_LIST_HEAD(&ctx->list);
+ ctx->dev = dev;
+ init_waitqueue_head(&ctx->queue);
+
+ ctx->type = MTK_INST_ENCODER;
+ ret = mtk_vcodec_enc_ctrls_setup(ctx);
+ if (ret) {
+ mtk_v4l2_err("Failed to setup controls() (%d)",
+ ret);
+ goto err_ctrls_setup;
+ }
+ ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev_enc, ctx,
+ &mtk_vcodec_enc_queue_init);
+ if (IS_ERR((__force void *)ctx->m2m_ctx)) {
+ ret = PTR_ERR((__force void *)ctx->m2m_ctx);
+ mtk_v4l2_err("Failed to v4l2_m2m_ctx_init() (%d)",
+ ret);
+ goto err_m2m_ctx_init;
+ }
+ mtk_vcodec_enc_set_default_params(ctx);
+
+ if (v4l2_fh_is_singular(&ctx->fh)) {
+ /*
+ * vpu_load_firmware checks if it was loaded already and
+ * does nothing in that case
+ */
+ ret = vpu_load_firmware(dev->vpu_plat_dev);
+ if (ret < 0) {
+ /*
+ * Return 0 if downloading firmware successfully,
+ * otherwise it is failed
+ */
+ mtk_v4l2_err("vpu_load_firmware failed!");
+ goto err_load_fw;
+ }
+
+ dev->enc_capability =
+ vpu_get_venc_hw_capa(dev->vpu_plat_dev);
+ mtk_v4l2_debug(0, "encoder capability %x", dev->enc_capability);
+ }
+
+ mtk_v4l2_debug(2, "Create instance [%d]@%p m2m_ctx=%p ",
+ ctx->id, ctx, ctx->m2m_ctx);
+
+ dev->num_instances++;
+ list_add(&ctx->list, &dev->ctx_list);
+
+ mutex_unlock(&dev->dev_mutex);
+ mtk_v4l2_debug(0, "%s encoder [%d]", dev_name(&dev->plat_dev->dev),
+ ctx->id);
+ return ret;
+
+ /* Deinit when failure occurred */
+err_load_fw:
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+err_m2m_ctx_init:
+ v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
+err_ctrls_setup:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+ mutex_unlock(&dev->dev_mutex);
+
+ return ret;
+}
+
+static int fops_vcodec_release(struct file *file)
+{
+ struct mtk_vcodec_dev *dev = video_drvdata(file);
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(file->private_data);
+
+ mtk_v4l2_debug(1, "[%d] encoder", ctx->id);
+ mutex_lock(&dev->dev_mutex);
+
+ /*
+ * Call v4l2_m2m_ctx_release to make sure the worker thread is not
+ * running after venc_if_deinit.
+ */
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+ mtk_vcodec_enc_release(ctx);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
+
+ list_del_init(&ctx->list);
+ dev->num_instances--;
+ kfree(ctx);
+ mutex_unlock(&dev->dev_mutex);
+ return 0;
+}
+
+static const struct v4l2_file_operations mtk_vcodec_fops = {
+ .owner = THIS_MODULE,
+ .open = fops_vcodec_open,
+ .release = fops_vcodec_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static int mtk_vcodec_probe(struct platform_device *pdev)
+{
+ struct mtk_vcodec_dev *dev;
+ struct video_device *vfd_enc;
+ struct resource *res;
+ int i, j, ret;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&dev->ctx_list);
+ dev->plat_dev = pdev;
+
+ dev->vpu_plat_dev = vpu_get_plat_device(dev->plat_dev);
+ if (dev->vpu_plat_dev == NULL) {
+ mtk_v4l2_err("[VPU] vpu device in not ready");
+ return -EPROBE_DEFER;
+ }
+
+ vpu_wdt_reg_handler(dev->vpu_plat_dev, mtk_vcodec_enc_reset_handler,
+ dev, VPU_RST_ENC);
+
+ ret = mtk_vcodec_init_enc_pm(dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to get mt vcodec clock source!");
+ return ret;
+ }
+
+ for (i = VENC_SYS, j = 0; i < NUM_MAX_VCODEC_REG_BASE; i++, j++) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, j);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "get memory resource failed.");
+ ret = -ENXIO;
+ goto err_res;
+ }
+ dev->reg_base[i] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR((__force void *)dev->reg_base[i])) {
+ ret = PTR_ERR((__force void *)dev->reg_base[i]);
+ goto err_res;
+ }
+ mtk_v4l2_debug(2, "reg[%d] base=0x%p", i, dev->reg_base[i]);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "failed to get irq resource");
+ ret = -ENOENT;
+ goto err_res;
+ }
+
+ dev->enc_irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(&pdev->dev, dev->enc_irq,
+ mtk_vcodec_enc_irq_handler,
+ 0, pdev->name, dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to install dev->enc_irq %d (%d)",
+ dev->enc_irq,
+ ret);
+ ret = -EINVAL;
+ goto err_res;
+ }
+
+ dev->enc_lt_irq = platform_get_irq(pdev, 1);
+ ret = devm_request_irq(&pdev->dev,
+ dev->enc_lt_irq, mtk_vcodec_enc_lt_irq_handler,
+ 0, pdev->name, dev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to install dev->enc_lt_irq %d (%d)",
+ dev->enc_lt_irq, ret);
+ ret = -EINVAL;
+ goto err_res;
+ }
+
+ disable_irq(dev->enc_irq);
+ disable_irq(dev->enc_lt_irq); /* VENC_LT */
+ mutex_init(&dev->enc_mutex);
+ mutex_init(&dev->dev_mutex);
+ spin_lock_init(&dev->irqlock);
+
+ snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name), "%s",
+ "[MTK_V4L2_VENC]");
+
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret) {
+ mtk_v4l2_err("v4l2_device_register err=%d", ret);
+ goto err_res;
+ }
+
+ init_waitqueue_head(&dev->queue);
+
+ /* allocate video device for encoder and register it */
+ vfd_enc = video_device_alloc();
+ if (!vfd_enc) {
+ mtk_v4l2_err("Failed to allocate video device");
+ ret = -ENOMEM;
+ goto err_enc_alloc;
+ }
+ vfd_enc->fops = &mtk_vcodec_fops;
+ vfd_enc->ioctl_ops = &mtk_venc_ioctl_ops;
+ vfd_enc->release = video_device_release;
+ vfd_enc->lock = &dev->dev_mutex;
+ vfd_enc->v4l2_dev = &dev->v4l2_dev;
+ vfd_enc->vfl_dir = VFL_DIR_M2M;
+ vfd_enc->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE |
+ V4L2_CAP_STREAMING;
+
+ snprintf(vfd_enc->name, sizeof(vfd_enc->name), "%s",
+ MTK_VCODEC_ENC_NAME);
+ video_set_drvdata(vfd_enc, dev);
+ dev->vfd_enc = vfd_enc;
+ platform_set_drvdata(pdev, dev);
+
+ dev->m2m_dev_enc = v4l2_m2m_init(&mtk_venc_m2m_ops);
+ if (IS_ERR((__force void *)dev->m2m_dev_enc)) {
+ mtk_v4l2_err("Failed to init mem2mem enc device");
+ ret = PTR_ERR((__force void *)dev->m2m_dev_enc);
+ goto err_enc_mem_init;
+ }
+
+ dev->encode_workqueue =
+ alloc_ordered_workqueue(MTK_VCODEC_ENC_NAME,
+ WQ_MEM_RECLAIM |
+ WQ_FREEZABLE);
+ if (!dev->encode_workqueue) {
+ mtk_v4l2_err("Failed to create encode workqueue");
+ ret = -EINVAL;
+ goto err_event_workq;
+ }
+
+ ret = video_register_device(vfd_enc, VFL_TYPE_GRABBER, 1);
+ if (ret) {
+ mtk_v4l2_err("Failed to register video device");
+ goto err_enc_reg;
+ }
+
+ mtk_v4l2_debug(0, "encoder registered as /dev/video%d",
+ vfd_enc->num);
+
+ return 0;
+
+err_enc_reg:
+ destroy_workqueue(dev->encode_workqueue);
+err_event_workq:
+ v4l2_m2m_release(dev->m2m_dev_enc);
+err_enc_mem_init:
+ video_unregister_device(vfd_enc);
+err_enc_alloc:
+ v4l2_device_unregister(&dev->v4l2_dev);
+err_res:
+ mtk_vcodec_release_enc_pm(dev);
+ return ret;
+}
+
+static const struct of_device_id mtk_vcodec_enc_match[] = {
+ {.compatible = "mediatek,mt8173-vcodec-enc",},
+ {},
+};
+MODULE_DEVICE_TABLE(of, mtk_vcodec_enc_match);
+
+static int mtk_vcodec_enc_remove(struct platform_device *pdev)
+{
+ struct mtk_vcodec_dev *dev = platform_get_drvdata(pdev);
+
+ mtk_v4l2_debug_enter();
+ flush_workqueue(dev->encode_workqueue);
+ destroy_workqueue(dev->encode_workqueue);
+ if (dev->m2m_dev_enc)
+ v4l2_m2m_release(dev->m2m_dev_enc);
+
+ if (dev->vfd_enc)
+ video_unregister_device(dev->vfd_enc);
+
+ v4l2_device_unregister(&dev->v4l2_dev);
+ mtk_vcodec_release_enc_pm(dev);
+ return 0;
+}
+
+static struct platform_driver mtk_vcodec_enc_driver = {
+ .probe = mtk_vcodec_probe,
+ .remove = mtk_vcodec_enc_remove,
+ .driver = {
+ .name = MTK_VCODEC_ENC_NAME,
+ .of_match_table = mtk_vcodec_enc_match,
+ },
+};
+
+module_platform_driver(mtk_vcodec_enc_driver);
+
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Mediatek video codec V4L2 encoder driver");
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c
new file mode 100644
index 000000000..3e73e9db7
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c
@@ -0,0 +1,137 @@
+/*
+* Copyright (c) 2016 MediaTek Inc.
+* Author: Tiffany Lin <tiffany.lin@mediatek.com>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#include <linux/clk.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+#include <soc/mediatek/smi.h>
+
+#include "mtk_vcodec_enc_pm.h"
+#include "mtk_vcodec_util.h"
+#include "mtk_vpu.h"
+
+
+int mtk_vcodec_init_enc_pm(struct mtk_vcodec_dev *mtkdev)
+{
+ struct device_node *node;
+ struct platform_device *pdev;
+ struct device *dev;
+ struct mtk_vcodec_pm *pm;
+ int ret = 0;
+
+ pdev = mtkdev->plat_dev;
+ pm = &mtkdev->pm;
+ memset(pm, 0, sizeof(struct mtk_vcodec_pm));
+ pm->mtkdev = mtkdev;
+ pm->dev = &pdev->dev;
+ dev = &pdev->dev;
+
+ node = of_parse_phandle(dev->of_node, "mediatek,larb", 0);
+ if (!node) {
+ mtk_v4l2_err("no mediatek,larb found");
+ return -1;
+ }
+ pdev = of_find_device_by_node(node);
+ if (!pdev) {
+ mtk_v4l2_err("no mediatek,larb device found");
+ return -1;
+ }
+ pm->larbvenc = &pdev->dev;
+
+ node = of_parse_phandle(dev->of_node, "mediatek,larb", 1);
+ if (!node) {
+ mtk_v4l2_err("no mediatek,larb found");
+ return -1;
+ }
+
+ pdev = of_find_device_by_node(node);
+ if (!pdev) {
+ mtk_v4l2_err("no mediatek,larb device found");
+ return -1;
+ }
+
+ pm->larbvenclt = &pdev->dev;
+ pdev = mtkdev->plat_dev;
+ pm->dev = &pdev->dev;
+
+ pm->vencpll_d2 = devm_clk_get(&pdev->dev, "venc_sel_src");
+ if (IS_ERR(pm->vencpll_d2)) {
+ mtk_v4l2_err("devm_clk_get vencpll_d2 fail");
+ ret = PTR_ERR(pm->vencpll_d2);
+ }
+
+ pm->venc_sel = devm_clk_get(&pdev->dev, "venc_sel");
+ if (IS_ERR(pm->venc_sel)) {
+ mtk_v4l2_err("devm_clk_get venc_sel fail");
+ ret = PTR_ERR(pm->venc_sel);
+ }
+
+ pm->univpll1_d2 = devm_clk_get(&pdev->dev, "venc_lt_sel_src");
+ if (IS_ERR(pm->univpll1_d2)) {
+ mtk_v4l2_err("devm_clk_get univpll1_d2 fail");
+ ret = PTR_ERR(pm->univpll1_d2);
+ }
+
+ pm->venc_lt_sel = devm_clk_get(&pdev->dev, "venc_lt_sel");
+ if (IS_ERR(pm->venc_lt_sel)) {
+ mtk_v4l2_err("devm_clk_get venc_lt_sel fail");
+ ret = PTR_ERR(pm->venc_lt_sel);
+ }
+
+ return ret;
+}
+
+void mtk_vcodec_release_enc_pm(struct mtk_vcodec_dev *mtkdev)
+{
+}
+
+
+void mtk_vcodec_enc_clock_on(struct mtk_vcodec_pm *pm)
+{
+ int ret;
+
+ ret = clk_prepare_enable(pm->venc_sel);
+ if (ret)
+ mtk_v4l2_err("clk_prepare_enable fail %d", ret);
+
+ ret = clk_set_parent(pm->venc_sel, pm->vencpll_d2);
+ if (ret)
+ mtk_v4l2_err("clk_set_parent fail %d", ret);
+
+ ret = clk_prepare_enable(pm->venc_lt_sel);
+ if (ret)
+ mtk_v4l2_err("clk_prepare_enable fail %d", ret);
+
+ ret = clk_set_parent(pm->venc_lt_sel, pm->univpll1_d2);
+ if (ret)
+ mtk_v4l2_err("clk_set_parent fail %d", ret);
+
+ ret = mtk_smi_larb_get(pm->larbvenc);
+ if (ret)
+ mtk_v4l2_err("mtk_smi_larb_get larb3 fail %d", ret);
+
+ ret = mtk_smi_larb_get(pm->larbvenclt);
+ if (ret)
+ mtk_v4l2_err("mtk_smi_larb_get larb4 fail %d", ret);
+
+}
+
+void mtk_vcodec_enc_clock_off(struct mtk_vcodec_pm *pm)
+{
+ mtk_smi_larb_put(pm->larbvenc);
+ mtk_smi_larb_put(pm->larbvenclt);
+ clk_disable_unprepare(pm->venc_lt_sel);
+ clk_disable_unprepare(pm->venc_sel);
+}
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.h
new file mode 100644
index 000000000..f32167138
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.h
@@ -0,0 +1,26 @@
+/*
+* Copyright (c) 2016 MediaTek Inc.
+* Author: Tiffany Lin <tiffany.lin@mediatek.com>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#ifndef _MTK_VCODEC_ENC_PM_H_
+#define _MTK_VCODEC_ENC_PM_H_
+
+#include "mtk_vcodec_drv.h"
+
+int mtk_vcodec_init_enc_pm(struct mtk_vcodec_dev *dev);
+void mtk_vcodec_release_enc_pm(struct mtk_vcodec_dev *dev);
+
+void mtk_vcodec_enc_clock_on(struct mtk_vcodec_pm *pm);
+void mtk_vcodec_enc_clock_off(struct mtk_vcodec_pm *pm);
+
+#endif /* _MTK_VCODEC_ENC_PM_H_ */
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.c
new file mode 100644
index 000000000..52e7e5c9a
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.c
@@ -0,0 +1,54 @@
+/*
+* Copyright (c) 2016 MediaTek Inc.
+* Author: Tiffany Lin <tiffany.lin@mediatek.com>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#include <linux/errno.h>
+#include <linux/wait.h>
+
+#include "mtk_vcodec_drv.h"
+#include "mtk_vcodec_intr.h"
+#include "mtk_vcodec_util.h"
+
+int mtk_vcodec_wait_for_done_ctx(struct mtk_vcodec_ctx *ctx, int command,
+ unsigned int timeout_ms)
+{
+ wait_queue_head_t *waitqueue;
+ long timeout_jiff, ret;
+ int status = 0;
+
+ waitqueue = (wait_queue_head_t *)&ctx->queue;
+ timeout_jiff = msecs_to_jiffies(timeout_ms);
+
+ ret = wait_event_interruptible_timeout(*waitqueue,
+ (ctx->int_cond &&
+ (ctx->int_type == command)),
+ timeout_jiff);
+
+ if (!ret) {
+ status = -1; /* timeout */
+ mtk_v4l2_err("[%d] cmd=%d, ctx->type=%d, wait_event_interruptible_timeout time=%ums out %d %d!",
+ ctx->id, ctx->type, command, timeout_ms,
+ ctx->int_cond, ctx->int_type);
+ } else if (-ERESTARTSYS == ret) {
+ mtk_v4l2_err("[%d] cmd=%d, ctx->type=%d, wait_event_interruptible_timeout interrupted by a signal %d %d",
+ ctx->id, ctx->type, command, ctx->int_cond,
+ ctx->int_type);
+ status = -1;
+ }
+
+ ctx->int_cond = 0;
+ ctx->int_type = 0;
+
+ return status;
+}
+EXPORT_SYMBOL(mtk_vcodec_wait_for_done_ctx);
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.h
new file mode 100644
index 000000000..12131855b
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.h
@@ -0,0 +1,26 @@
+/*
+* Copyright (c) 2016 MediaTek Inc.
+* Author: Tiffany Lin <tiffany.lin@mediatek.com>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#ifndef _MTK_VCODEC_INTR_H_
+#define _MTK_VCODEC_INTR_H_
+
+#define MTK_INST_IRQ_RECEIVED 0x1
+
+struct mtk_vcodec_ctx;
+
+/* timeout is ms */
+int mtk_vcodec_wait_for_done_ctx(struct mtk_vcodec_ctx *data, int command,
+ unsigned int timeout_ms);
+
+#endif /* _MTK_VCODEC_INTR_H_ */
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
new file mode 100644
index 000000000..5e3651372
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
@@ -0,0 +1,94 @@
+/*
+* Copyright (c) 2016 MediaTek Inc.
+* Author: PC Chen <pc.chen@mediatek.com>
+* Tiffany Lin <tiffany.lin@mediatek.com>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#include <linux/module.h>
+
+#include "mtk_vcodec_drv.h"
+#include "mtk_vcodec_util.h"
+#include "mtk_vpu.h"
+
+/* For encoder, this will enable logs in venc/*/
+bool mtk_vcodec_dbg;
+EXPORT_SYMBOL(mtk_vcodec_dbg);
+
+/* The log level of v4l2 encoder or decoder driver.
+ * That is, files under mtk-vcodec/.
+ */
+int mtk_v4l2_dbg_level;
+EXPORT_SYMBOL(mtk_v4l2_dbg_level);
+
+void __iomem *mtk_vcodec_get_reg_addr(struct mtk_vcodec_ctx *data,
+ unsigned int reg_idx)
+{
+ struct mtk_vcodec_ctx *ctx = (struct mtk_vcodec_ctx *)data;
+
+ if (!data || reg_idx >= NUM_MAX_VCODEC_REG_BASE) {
+ mtk_v4l2_err("Invalid arguments, reg_idx=%d", reg_idx);
+ return NULL;
+ }
+ return ctx->dev->reg_base[reg_idx];
+}
+EXPORT_SYMBOL(mtk_vcodec_get_reg_addr);
+
+int mtk_vcodec_mem_alloc(struct mtk_vcodec_ctx *data,
+ struct mtk_vcodec_mem *mem)
+{
+ unsigned long size = mem->size;
+ struct mtk_vcodec_ctx *ctx = (struct mtk_vcodec_ctx *)data;
+ struct device *dev = &ctx->dev->plat_dev->dev;
+
+ mem->va = dma_alloc_coherent(dev, size, &mem->dma_addr, GFP_KERNEL);
+
+ if (!mem->va) {
+ mtk_v4l2_err("%s dma_alloc size=%ld failed!", dev_name(dev),
+ size);
+ return -ENOMEM;
+ }
+
+ memset(mem->va, 0, size);
+
+ mtk_v4l2_debug(3, "[%d] - va = %p", ctx->id, mem->va);
+ mtk_v4l2_debug(3, "[%d] - dma = 0x%lx", ctx->id,
+ (unsigned long)mem->dma_addr);
+ mtk_v4l2_debug(3, "[%d] size = 0x%lx", ctx->id, size);
+
+ return 0;
+}
+EXPORT_SYMBOL(mtk_vcodec_mem_alloc);
+
+void mtk_vcodec_mem_free(struct mtk_vcodec_ctx *data,
+ struct mtk_vcodec_mem *mem)
+{
+ unsigned long size = mem->size;
+ struct mtk_vcodec_ctx *ctx = (struct mtk_vcodec_ctx *)data;
+ struct device *dev = &ctx->dev->plat_dev->dev;
+
+ if (!mem->va) {
+ mtk_v4l2_err("%s dma_free size=%ld failed!", dev_name(dev),
+ size);
+ return;
+ }
+
+ dma_free_coherent(dev, size, mem->va, mem->dma_addr);
+ mem->va = NULL;
+ mem->dma_addr = 0;
+ mem->size = 0;
+
+ mtk_v4l2_debug(3, "[%d] - va = %p", ctx->id, mem->va);
+ mtk_v4l2_debug(3, "[%d] - dma = 0x%lx", ctx->id,
+ (unsigned long)mem->dma_addr);
+ mtk_v4l2_debug(3, "[%d] size = 0x%lx", ctx->id, size);
+}
+EXPORT_SYMBOL(mtk_vcodec_mem_free);
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h
new file mode 100644
index 000000000..d6345fc04
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h
@@ -0,0 +1,87 @@
+/*
+* Copyright (c) 2016 MediaTek Inc.
+* Author: PC Chen <pc.chen@mediatek.com>
+* Tiffany Lin <tiffany.lin@mediatek.com>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#ifndef _MTK_VCODEC_UTIL_H_
+#define _MTK_VCODEC_UTIL_H_
+
+#include <linux/types.h>
+#include <linux/dma-direction.h>
+
+struct mtk_vcodec_mem {
+ size_t size;
+ void *va;
+ dma_addr_t dma_addr;
+};
+
+struct mtk_vcodec_ctx;
+
+extern int mtk_v4l2_dbg_level;
+extern bool mtk_vcodec_dbg;
+
+#define DEBUG 1
+
+#if defined(DEBUG)
+
+#define mtk_v4l2_debug(level, fmt, args...) \
+ do { \
+ if (mtk_v4l2_dbg_level >= level) \
+ pr_info("[MTK_V4L2] level=%d %s(),%d: " fmt "\n",\
+ level, __func__, __LINE__, ##args); \
+ } while (0)
+
+#define mtk_v4l2_err(fmt, args...) \
+ pr_err("[MTK_V4L2][ERROR] %s:%d: " fmt "\n", __func__, __LINE__, \
+ ##args)
+
+
+#define mtk_v4l2_debug_enter() mtk_v4l2_debug(3, "+")
+#define mtk_v4l2_debug_leave() mtk_v4l2_debug(3, "-")
+
+#define mtk_vcodec_debug(h, fmt, args...) \
+ do { \
+ if (mtk_vcodec_dbg) \
+ pr_info("[MTK_VCODEC][%d]: %s() " fmt "\n", \
+ ((struct mtk_vcodec_ctx *)h->ctx)->id, \
+ __func__, ##args); \
+ } while (0)
+
+#define mtk_vcodec_err(h, fmt, args...) \
+ pr_err("[MTK_VCODEC][ERROR][%d]: %s() " fmt "\n", \
+ ((struct mtk_vcodec_ctx *)h->ctx)->id, __func__, ##args)
+
+#define mtk_vcodec_debug_enter(h) mtk_vcodec_debug(h, "+")
+#define mtk_vcodec_debug_leave(h) mtk_vcodec_debug(h, "-")
+
+#else
+
+#define mtk_v4l2_debug(level, fmt, args...)
+#define mtk_v4l2_err(fmt, args...)
+#define mtk_v4l2_debug_enter()
+#define mtk_v4l2_debug_leave()
+
+#define mtk_vcodec_debug(h, fmt, args...)
+#define mtk_vcodec_err(h, fmt, args...)
+#define mtk_vcodec_debug_enter(h)
+#define mtk_vcodec_debug_leave(h)
+
+#endif
+
+void __iomem *mtk_vcodec_get_reg_addr(struct mtk_vcodec_ctx *data,
+ unsigned int reg_idx);
+int mtk_vcodec_mem_alloc(struct mtk_vcodec_ctx *data,
+ struct mtk_vcodec_mem *mem);
+void mtk_vcodec_mem_free(struct mtk_vcodec_ctx *data,
+ struct mtk_vcodec_mem *mem);
+#endif /* _MTK_VCODEC_UTIL_H_ */
diff --git a/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c b/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
new file mode 100644
index 000000000..63d4be4ff
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
@@ -0,0 +1,679 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Jungchang Tsao <jungchang.tsao@mediatek.com>
+ * Daniel Hsiao <daniel.hsiao@mediatek.com>
+ * PoChun Lin <pochun.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "../mtk_vcodec_drv.h"
+#include "../mtk_vcodec_util.h"
+#include "../mtk_vcodec_intr.h"
+#include "../mtk_vcodec_enc.h"
+#include "../mtk_vcodec_enc_pm.h"
+#include "../venc_drv_base.h"
+#include "../venc_ipi_msg.h"
+#include "../venc_vpu_if.h"
+#include "mtk_vpu.h"
+
+static const char h264_filler_marker[] = {0x0, 0x0, 0x0, 0x1, 0xc};
+
+#define H264_FILLER_MARKER_SIZE ARRAY_SIZE(h264_filler_marker)
+#define VENC_PIC_BITSTREAM_BYTE_CNT 0x0098
+
+/**
+ * enum venc_h264_vpu_work_buf - h264 encoder buffer index
+ */
+enum venc_h264_vpu_work_buf {
+ VENC_H264_VPU_WORK_BUF_RC_INFO,
+ VENC_H264_VPU_WORK_BUF_RC_CODE,
+ VENC_H264_VPU_WORK_BUF_REC_LUMA,
+ VENC_H264_VPU_WORK_BUF_REC_CHROMA,
+ VENC_H264_VPU_WORK_BUF_REF_LUMA,
+ VENC_H264_VPU_WORK_BUF_REF_CHROMA,
+ VENC_H264_VPU_WORK_BUF_MV_INFO_1,
+ VENC_H264_VPU_WORK_BUF_MV_INFO_2,
+ VENC_H264_VPU_WORK_BUF_SKIP_FRAME,
+ VENC_H264_VPU_WORK_BUF_MAX,
+};
+
+/**
+ * enum venc_h264_bs_mode - for bs_mode argument in h264_enc_vpu_encode
+ */
+enum venc_h264_bs_mode {
+ H264_BS_MODE_SPS,
+ H264_BS_MODE_PPS,
+ H264_BS_MODE_FRAME,
+};
+
+/*
+ * struct venc_h264_vpu_config - Structure for h264 encoder configuration
+ * AP-W/R : AP is writer/reader on this item
+ * VPU-W/R: VPU is write/reader on this item
+ * @input_fourcc: input fourcc
+ * @bitrate: target bitrate (in bps)
+ * @pic_w: picture width. Picture size is visible stream resolution, in pixels,
+ * to be used for display purposes; must be smaller or equal to buffer
+ * size.
+ * @pic_h: picture height
+ * @buf_w: buffer width. Buffer size is stream resolution in pixels aligned to
+ * hardware requirements.
+ * @buf_h: buffer height
+ * @gop_size: group of picture size (idr frame)
+ * @intra_period: intra frame period
+ * @framerate: frame rate in fps
+ * @profile: as specified in standard
+ * @level: as specified in standard
+ * @wfd: WFD mode 1:on, 0:off
+ */
+struct venc_h264_vpu_config {
+ u32 input_fourcc;
+ u32 bitrate;
+ u32 pic_w;
+ u32 pic_h;
+ u32 buf_w;
+ u32 buf_h;
+ u32 gop_size;
+ u32 intra_period;
+ u32 framerate;
+ u32 profile;
+ u32 level;
+ u32 wfd;
+};
+
+/*
+ * struct venc_h264_vpu_buf - Structure for buffer information
+ * AP-W/R : AP is writer/reader on this item
+ * VPU-W/R: VPU is write/reader on this item
+ * @iova: IO virtual address
+ * @vpua: VPU side memory addr which is used by RC_CODE
+ * @size: buffer size (in bytes)
+ */
+struct venc_h264_vpu_buf {
+ u32 iova;
+ u32 vpua;
+ u32 size;
+};
+
+/*
+ * struct venc_h264_vsi - Structure for VPU driver control and info share
+ * AP-W/R : AP is writer/reader on this item
+ * VPU-W/R: VPU is write/reader on this item
+ * This structure is allocated in VPU side and shared to AP side.
+ * @config: h264 encoder configuration
+ * @work_bufs: working buffer information in VPU side
+ * The work_bufs here is for storing the 'size' info shared to AP side.
+ * The similar item in struct venc_h264_inst is for memory allocation
+ * in AP side. The AP driver will copy the 'size' from here to the one in
+ * struct mtk_vcodec_mem, then invoke mtk_vcodec_mem_alloc to allocate
+ * the buffer. After that, bypass the 'dma_addr' to the 'iova' field here for
+ * register setting in VPU side.
+ */
+struct venc_h264_vsi {
+ struct venc_h264_vpu_config config;
+ struct venc_h264_vpu_buf work_bufs[VENC_H264_VPU_WORK_BUF_MAX];
+};
+
+/*
+ * struct venc_h264_inst - h264 encoder AP driver instance
+ * @hw_base: h264 encoder hardware register base
+ * @work_bufs: working buffer
+ * @pps_buf: buffer to store the pps bitstream
+ * @work_buf_allocated: working buffer allocated flag
+ * @frm_cnt: encoded frame count
+ * @prepend_hdr: when the v4l2 layer send VENC_SET_PARAM_PREPEND_HEADER cmd
+ * through h264_enc_set_param interface, it will set this flag and prepend the
+ * sps/pps in h264_enc_encode function.
+ * @vpu_inst: VPU instance to exchange information between AP and VPU
+ * @vsi: driver structure allocated by VPU side and shared to AP side for
+ * control and info share
+ * @ctx: context for v4l2 layer integration
+ */
+struct venc_h264_inst {
+ void __iomem *hw_base;
+ struct mtk_vcodec_mem work_bufs[VENC_H264_VPU_WORK_BUF_MAX];
+ struct mtk_vcodec_mem pps_buf;
+ bool work_buf_allocated;
+ unsigned int frm_cnt;
+ unsigned int prepend_hdr;
+ struct venc_vpu_inst vpu_inst;
+ struct venc_h264_vsi *vsi;
+ struct mtk_vcodec_ctx *ctx;
+};
+
+static inline u32 h264_read_reg(struct venc_h264_inst *inst, u32 addr)
+{
+ return readl(inst->hw_base + addr);
+}
+
+static unsigned int h264_get_profile(struct venc_h264_inst *inst,
+ unsigned int profile)
+{
+ switch (profile) {
+ case V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE:
+ return 66;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_MAIN:
+ return 77;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH:
+ return 100;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE:
+ mtk_vcodec_err(inst, "unsupported CONSTRAINED_BASELINE");
+ return 0;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED:
+ mtk_vcodec_err(inst, "unsupported EXTENDED");
+ return 0;
+ default:
+ mtk_vcodec_debug(inst, "unsupported profile %d", profile);
+ return 100;
+ }
+}
+
+static unsigned int h264_get_level(struct venc_h264_inst *inst,
+ unsigned int level)
+{
+ switch (level) {
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1B:
+ mtk_vcodec_err(inst, "unsupported 1B");
+ return 0;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_0:
+ return 10;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_1:
+ return 11;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_2:
+ return 12;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_3:
+ return 13;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_0:
+ return 20;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_1:
+ return 21;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_2:
+ return 22;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_0:
+ return 30;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_1:
+ return 31;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_2:
+ return 32;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_0:
+ return 40;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_1:
+ return 41;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_2:
+ return 42;
+ default:
+ mtk_vcodec_debug(inst, "unsupported level %d", level);
+ return 31;
+ }
+}
+
+static void h264_enc_free_work_buf(struct venc_h264_inst *inst)
+{
+ int i;
+
+ mtk_vcodec_debug_enter(inst);
+
+ /* Except the SKIP_FRAME buffers,
+ * other buffers need to be freed by AP.
+ */
+ for (i = 0; i < VENC_H264_VPU_WORK_BUF_MAX; i++) {
+ if (i != VENC_H264_VPU_WORK_BUF_SKIP_FRAME)
+ mtk_vcodec_mem_free(inst->ctx, &inst->work_bufs[i]);
+ }
+
+ mtk_vcodec_mem_free(inst->ctx, &inst->pps_buf);
+
+ mtk_vcodec_debug_leave(inst);
+}
+
+static int h264_enc_alloc_work_buf(struct venc_h264_inst *inst)
+{
+ int i;
+ int ret = 0;
+ struct venc_h264_vpu_buf *wb = inst->vsi->work_bufs;
+
+ mtk_vcodec_debug_enter(inst);
+
+ for (i = 0; i < VENC_H264_VPU_WORK_BUF_MAX; i++) {
+ /*
+ * This 'wb' structure is set by VPU side and shared to AP for
+ * buffer allocation and IO virtual addr mapping. For most of
+ * the buffers, AP will allocate the buffer according to 'size'
+ * field and store the IO virtual addr in 'iova' field. There
+ * are two exceptions:
+ * (1) RC_CODE buffer, it's pre-allocated in the VPU side, and
+ * save the VPU addr in the 'vpua' field. The AP will translate
+ * the VPU addr to the corresponding IO virtual addr and store
+ * in 'iova' field for reg setting in VPU side.
+ * (2) SKIP_FRAME buffer, it's pre-allocated in the VPU side,
+ * and save the VPU addr in the 'vpua' field. The AP will
+ * translate the VPU addr to the corresponding AP side virtual
+ * address and do some memcpy access to move to bitstream buffer
+ * assigned by v4l2 layer.
+ */
+ inst->work_bufs[i].size = wb[i].size;
+ if (i == VENC_H264_VPU_WORK_BUF_SKIP_FRAME) {
+ inst->work_bufs[i].va = vpu_mapping_dm_addr(
+ inst->vpu_inst.dev, wb[i].vpua);
+ inst->work_bufs[i].dma_addr = 0;
+ } else {
+ ret = mtk_vcodec_mem_alloc(inst->ctx,
+ &inst->work_bufs[i]);
+ if (ret) {
+ mtk_vcodec_err(inst,
+ "cannot allocate buf %d", i);
+ goto err_alloc;
+ }
+ /*
+ * This RC_CODE is pre-allocated by VPU and saved in VPU
+ * addr. So we need use memcpy to copy RC_CODE from VPU
+ * addr into IO virtual addr in 'iova' field for reg
+ * setting in VPU side.
+ */
+ if (i == VENC_H264_VPU_WORK_BUF_RC_CODE) {
+ void *tmp_va;
+
+ tmp_va = vpu_mapping_dm_addr(inst->vpu_inst.dev,
+ wb[i].vpua);
+ memcpy(inst->work_bufs[i].va, tmp_va,
+ wb[i].size);
+ }
+ }
+ wb[i].iova = inst->work_bufs[i].dma_addr;
+
+ mtk_vcodec_debug(inst,
+ "work_buf[%d] va=0x%p iova=%pad size=%zu",
+ i, inst->work_bufs[i].va,
+ &inst->work_bufs[i].dma_addr,
+ inst->work_bufs[i].size);
+ }
+
+ /* the pps_buf is used by AP side only */
+ inst->pps_buf.size = 128;
+ ret = mtk_vcodec_mem_alloc(inst->ctx, &inst->pps_buf);
+ if (ret) {
+ mtk_vcodec_err(inst, "cannot allocate pps_buf");
+ goto err_alloc;
+ }
+
+ mtk_vcodec_debug_leave(inst);
+
+ return ret;
+
+err_alloc:
+ h264_enc_free_work_buf(inst);
+
+ return ret;
+}
+
+static unsigned int h264_enc_wait_venc_done(struct venc_h264_inst *inst)
+{
+ unsigned int irq_status = 0;
+ struct mtk_vcodec_ctx *ctx = (struct mtk_vcodec_ctx *)inst->ctx;
+
+ if (!mtk_vcodec_wait_for_done_ctx(ctx, MTK_INST_IRQ_RECEIVED,
+ WAIT_INTR_TIMEOUT_MS)) {
+ irq_status = ctx->irq_status;
+ mtk_vcodec_debug(inst, "irq_status %x <-", irq_status);
+ }
+ return irq_status;
+}
+
+static int h264_encode_sps(struct venc_h264_inst *inst,
+ struct mtk_vcodec_mem *bs_buf,
+ unsigned int *bs_size)
+{
+ int ret = 0;
+ unsigned int irq_status;
+
+ mtk_vcodec_debug_enter(inst);
+
+ ret = vpu_enc_encode(&inst->vpu_inst, H264_BS_MODE_SPS, NULL,
+ bs_buf, bs_size);
+ if (ret)
+ return ret;
+
+ irq_status = h264_enc_wait_venc_done(inst);
+ if (irq_status != MTK_VENC_IRQ_STATUS_SPS) {
+ mtk_vcodec_err(inst, "expect irq status %d",
+ MTK_VENC_IRQ_STATUS_SPS);
+ return -EINVAL;
+ }
+
+ *bs_size = h264_read_reg(inst, VENC_PIC_BITSTREAM_BYTE_CNT);
+ mtk_vcodec_debug(inst, "bs size %d <-", *bs_size);
+
+ return ret;
+}
+
+static int h264_encode_pps(struct venc_h264_inst *inst,
+ struct mtk_vcodec_mem *bs_buf,
+ unsigned int *bs_size)
+{
+ int ret = 0;
+ unsigned int irq_status;
+
+ mtk_vcodec_debug_enter(inst);
+
+ ret = vpu_enc_encode(&inst->vpu_inst, H264_BS_MODE_PPS, NULL,
+ bs_buf, bs_size);
+ if (ret)
+ return ret;
+
+ irq_status = h264_enc_wait_venc_done(inst);
+ if (irq_status != MTK_VENC_IRQ_STATUS_PPS) {
+ mtk_vcodec_err(inst, "expect irq status %d",
+ MTK_VENC_IRQ_STATUS_PPS);
+ return -EINVAL;
+ }
+
+ *bs_size = h264_read_reg(inst, VENC_PIC_BITSTREAM_BYTE_CNT);
+ mtk_vcodec_debug(inst, "bs size %d <-", *bs_size);
+
+ return ret;
+}
+
+static int h264_encode_header(struct venc_h264_inst *inst,
+ struct mtk_vcodec_mem *bs_buf,
+ unsigned int *bs_size)
+{
+ int ret = 0;
+ unsigned int bs_size_sps;
+ unsigned int bs_size_pps;
+
+ ret = h264_encode_sps(inst, bs_buf, &bs_size_sps);
+ if (ret)
+ return ret;
+
+ ret = h264_encode_pps(inst, &inst->pps_buf, &bs_size_pps);
+ if (ret)
+ return ret;
+
+ memcpy(bs_buf->va + bs_size_sps, inst->pps_buf.va, bs_size_pps);
+ *bs_size = bs_size_sps + bs_size_pps;
+
+ return ret;
+}
+
+static int h264_encode_frame(struct venc_h264_inst *inst,
+ struct venc_frm_buf *frm_buf,
+ struct mtk_vcodec_mem *bs_buf,
+ unsigned int *bs_size)
+{
+ int ret = 0;
+ unsigned int irq_status;
+
+ mtk_vcodec_debug_enter(inst);
+
+ ret = vpu_enc_encode(&inst->vpu_inst, H264_BS_MODE_FRAME, frm_buf,
+ bs_buf, bs_size);
+ if (ret)
+ return ret;
+
+ /*
+ * skip frame case: The skip frame buffer is composed by vpu side only,
+ * it does not trigger the hw, so skip the wait interrupt operation.
+ */
+ if (inst->vpu_inst.state == VEN_IPI_MSG_ENC_STATE_SKIP) {
+ *bs_size = inst->vpu_inst.bs_size;
+ memcpy(bs_buf->va,
+ inst->work_bufs[VENC_H264_VPU_WORK_BUF_SKIP_FRAME].va,
+ *bs_size);
+ ++inst->frm_cnt;
+ return ret;
+ }
+
+ irq_status = h264_enc_wait_venc_done(inst);
+ if (irq_status != MTK_VENC_IRQ_STATUS_FRM) {
+ mtk_vcodec_err(inst, "irq_status=%d failed", irq_status);
+ return -EIO;
+ }
+
+ *bs_size = h264_read_reg(inst, VENC_PIC_BITSTREAM_BYTE_CNT);
+
+ ++inst->frm_cnt;
+ mtk_vcodec_debug(inst, "frm %d bs_size %d key_frm %d <-",
+ inst->frm_cnt, *bs_size, inst->vpu_inst.is_key_frm);
+
+ return ret;
+}
+
+static void h264_encode_filler(struct venc_h264_inst *inst, void *buf,
+ int size)
+{
+ unsigned char *p = buf;
+
+ if (size < H264_FILLER_MARKER_SIZE) {
+ mtk_vcodec_err(inst, "filler size too small %d", size);
+ return;
+ }
+
+ memcpy(p, h264_filler_marker, ARRAY_SIZE(h264_filler_marker));
+ size -= H264_FILLER_MARKER_SIZE;
+ p += H264_FILLER_MARKER_SIZE;
+ memset(p, 0xff, size);
+}
+
+static int h264_enc_init(struct mtk_vcodec_ctx *ctx, unsigned long *handle)
+{
+ int ret = 0;
+ struct venc_h264_inst *inst;
+
+ inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ inst->ctx = ctx;
+ inst->vpu_inst.ctx = ctx;
+ inst->vpu_inst.dev = ctx->dev->vpu_plat_dev;
+ inst->vpu_inst.id = IPI_VENC_H264;
+ inst->hw_base = mtk_vcodec_get_reg_addr(inst->ctx, VENC_SYS);
+
+ mtk_vcodec_debug_enter(inst);
+
+ ret = vpu_enc_init(&inst->vpu_inst);
+
+ inst->vsi = (struct venc_h264_vsi *)inst->vpu_inst.vsi;
+
+ mtk_vcodec_debug_leave(inst);
+
+ if (ret)
+ kfree(inst);
+ else
+ (*handle) = (unsigned long)inst;
+
+ return ret;
+}
+
+static int h264_enc_encode(unsigned long handle,
+ enum venc_start_opt opt,
+ struct venc_frm_buf *frm_buf,
+ struct mtk_vcodec_mem *bs_buf,
+ struct venc_done_result *result)
+{
+ int ret = 0;
+ struct venc_h264_inst *inst = (struct venc_h264_inst *)handle;
+ struct mtk_vcodec_ctx *ctx = inst->ctx;
+
+ mtk_vcodec_debug(inst, "opt %d ->", opt);
+
+ enable_irq(ctx->dev->enc_irq);
+
+ switch (opt) {
+ case VENC_START_OPT_ENCODE_SEQUENCE_HEADER: {
+ unsigned int bs_size_hdr;
+
+ ret = h264_encode_header(inst, bs_buf, &bs_size_hdr);
+ if (ret)
+ goto encode_err;
+
+ result->bs_size = bs_size_hdr;
+ result->is_key_frm = false;
+ break;
+ }
+
+ case VENC_START_OPT_ENCODE_FRAME: {
+ int hdr_sz;
+ int hdr_sz_ext;
+ int filler_sz = 0;
+ const int bs_alignment = 128;
+ struct mtk_vcodec_mem tmp_bs_buf;
+ unsigned int bs_size_hdr;
+ unsigned int bs_size_frm;
+
+ if (!inst->prepend_hdr) {
+ ret = h264_encode_frame(inst, frm_buf, bs_buf,
+ &result->bs_size);
+ if (ret)
+ goto encode_err;
+ result->is_key_frm = inst->vpu_inst.is_key_frm;
+ break;
+ }
+
+ mtk_vcodec_debug(inst, "h264_encode_frame prepend SPS/PPS");
+
+ ret = h264_encode_header(inst, bs_buf, &bs_size_hdr);
+ if (ret)
+ goto encode_err;
+
+ hdr_sz = bs_size_hdr;
+ hdr_sz_ext = (hdr_sz & (bs_alignment - 1));
+ if (hdr_sz_ext) {
+ filler_sz = bs_alignment - hdr_sz_ext;
+ if (hdr_sz_ext + H264_FILLER_MARKER_SIZE > bs_alignment)
+ filler_sz += bs_alignment;
+ h264_encode_filler(inst, bs_buf->va + hdr_sz,
+ filler_sz);
+ }
+
+ tmp_bs_buf.va = bs_buf->va + hdr_sz + filler_sz;
+ tmp_bs_buf.dma_addr = bs_buf->dma_addr + hdr_sz + filler_sz;
+ tmp_bs_buf.size = bs_buf->size - (hdr_sz + filler_sz);
+
+ ret = h264_encode_frame(inst, frm_buf, &tmp_bs_buf,
+ &bs_size_frm);
+ if (ret)
+ goto encode_err;
+
+ result->bs_size = hdr_sz + filler_sz + bs_size_frm;
+
+ mtk_vcodec_debug(inst, "hdr %d filler %d frame %d bs %d",
+ hdr_sz, filler_sz, bs_size_frm,
+ result->bs_size);
+
+ inst->prepend_hdr = 0;
+ result->is_key_frm = inst->vpu_inst.is_key_frm;
+ break;
+ }
+
+ default:
+ mtk_vcodec_err(inst, "venc_start_opt %d not supported", opt);
+ ret = -EINVAL;
+ break;
+ }
+
+encode_err:
+
+ disable_irq(ctx->dev->enc_irq);
+ mtk_vcodec_debug(inst, "opt %d <-", opt);
+
+ return ret;
+}
+
+static int h264_enc_set_param(unsigned long handle,
+ enum venc_set_param_type type,
+ struct venc_enc_param *enc_prm)
+{
+ int ret = 0;
+ struct venc_h264_inst *inst = (struct venc_h264_inst *)handle;
+
+ mtk_vcodec_debug(inst, "->type=%d", type);
+
+ switch (type) {
+ case VENC_SET_PARAM_ENC:
+ inst->vsi->config.input_fourcc = enc_prm->input_yuv_fmt;
+ inst->vsi->config.bitrate = enc_prm->bitrate;
+ inst->vsi->config.pic_w = enc_prm->width;
+ inst->vsi->config.pic_h = enc_prm->height;
+ inst->vsi->config.buf_w = enc_prm->buf_width;
+ inst->vsi->config.buf_h = enc_prm->buf_height;
+ inst->vsi->config.gop_size = enc_prm->gop_size;
+ inst->vsi->config.framerate = enc_prm->frm_rate;
+ inst->vsi->config.intra_period = enc_prm->intra_period;
+ inst->vsi->config.profile =
+ h264_get_profile(inst, enc_prm->h264_profile);
+ inst->vsi->config.level =
+ h264_get_level(inst, enc_prm->h264_level);
+ inst->vsi->config.wfd = 0;
+ ret = vpu_enc_set_param(&inst->vpu_inst, type, enc_prm);
+ if (ret)
+ break;
+ if (inst->work_buf_allocated) {
+ h264_enc_free_work_buf(inst);
+ inst->work_buf_allocated = false;
+ }
+ ret = h264_enc_alloc_work_buf(inst);
+ if (ret)
+ break;
+ inst->work_buf_allocated = true;
+ break;
+
+ case VENC_SET_PARAM_PREPEND_HEADER:
+ inst->prepend_hdr = 1;
+ mtk_vcodec_debug(inst, "set prepend header mode");
+ break;
+
+ default:
+ ret = vpu_enc_set_param(&inst->vpu_inst, type, enc_prm);
+ break;
+ }
+
+ mtk_vcodec_debug_leave(inst);
+
+ return ret;
+}
+
+static int h264_enc_deinit(unsigned long handle)
+{
+ int ret = 0;
+ struct venc_h264_inst *inst = (struct venc_h264_inst *)handle;
+
+ mtk_vcodec_debug_enter(inst);
+
+ ret = vpu_enc_deinit(&inst->vpu_inst);
+
+ if (inst->work_buf_allocated)
+ h264_enc_free_work_buf(inst);
+
+ mtk_vcodec_debug_leave(inst);
+ kfree(inst);
+
+ return ret;
+}
+
+static struct venc_common_if venc_h264_if = {
+ h264_enc_init,
+ h264_enc_encode,
+ h264_enc_set_param,
+ h264_enc_deinit,
+};
+
+struct venc_common_if *get_h264_enc_comm_if(void);
+
+struct venc_common_if *get_h264_enc_comm_if(void)
+{
+ return &venc_h264_if;
+}
diff --git a/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c b/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
new file mode 100644
index 000000000..6d9758479
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
@@ -0,0 +1,484 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Daniel Hsiao <daniel.hsiao@mediatek.com>
+ * PoChun Lin <pochun.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "../mtk_vcodec_drv.h"
+#include "../mtk_vcodec_util.h"
+#include "../mtk_vcodec_intr.h"
+#include "../mtk_vcodec_enc.h"
+#include "../mtk_vcodec_enc_pm.h"
+#include "../venc_drv_base.h"
+#include "../venc_ipi_msg.h"
+#include "../venc_vpu_if.h"
+#include "mtk_vpu.h"
+
+#define VENC_BITSTREAM_FRAME_SIZE 0x0098
+#define VENC_BITSTREAM_HEADER_LEN 0x00e8
+
+/* This ac_tag is vp8 frame tag. */
+#define MAX_AC_TAG_SIZE 10
+
+/**
+ * enum venc_vp8_vpu_work_buf - vp8 encoder buffer index
+ */
+enum venc_vp8_vpu_work_buf {
+ VENC_VP8_VPU_WORK_BUF_LUMA,
+ VENC_VP8_VPU_WORK_BUF_LUMA2,
+ VENC_VP8_VPU_WORK_BUF_LUMA3,
+ VENC_VP8_VPU_WORK_BUF_CHROMA,
+ VENC_VP8_VPU_WORK_BUF_CHROMA2,
+ VENC_VP8_VPU_WORK_BUF_CHROMA3,
+ VENC_VP8_VPU_WORK_BUF_MV_INFO,
+ VENC_VP8_VPU_WORK_BUF_BS_HEADER,
+ VENC_VP8_VPU_WORK_BUF_PROB_BUF,
+ VENC_VP8_VPU_WORK_BUF_RC_INFO,
+ VENC_VP8_VPU_WORK_BUF_RC_CODE,
+ VENC_VP8_VPU_WORK_BUF_RC_CODE2,
+ VENC_VP8_VPU_WORK_BUF_RC_CODE3,
+ VENC_VP8_VPU_WORK_BUF_MAX,
+};
+
+/*
+ * struct venc_vp8_vpu_config - Structure for vp8 encoder configuration
+ * AP-W/R : AP is writer/reader on this item
+ * VPU-W/R: VPU is write/reader on this item
+ * @input_fourcc: input fourcc
+ * @bitrate: target bitrate (in bps)
+ * @pic_w: picture width. Picture size is visible stream resolution, in pixels,
+ * to be used for display purposes; must be smaller or equal to buffer
+ * size.
+ * @pic_h: picture height
+ * @buf_w: buffer width (with 16 alignment). Buffer size is stream resolution
+ * in pixels aligned to hardware requirements.
+ * @buf_h: buffer height (with 16 alignment)
+ * @gop_size: group of picture size (key frame)
+ * @framerate: frame rate in fps
+ * @ts_mode: temporal scalability mode (0: disable, 1: enable)
+ * support three temporal layers - 0: 7.5fps 1: 7.5fps 2: 15fps.
+ */
+struct venc_vp8_vpu_config {
+ u32 input_fourcc;
+ u32 bitrate;
+ u32 pic_w;
+ u32 pic_h;
+ u32 buf_w;
+ u32 buf_h;
+ u32 gop_size;
+ u32 framerate;
+ u32 ts_mode;
+};
+
+/*
+ * struct venc_vp8_vpu_buf - Structure for buffer information
+ * AP-W/R : AP is writer/reader on this item
+ * VPU-W/R: VPU is write/reader on this item
+ * @iova: IO virtual address
+ * @vpua: VPU side memory addr which is used by RC_CODE
+ * @size: buffer size (in bytes)
+ */
+struct venc_vp8_vpu_buf {
+ u32 iova;
+ u32 vpua;
+ u32 size;
+};
+
+/*
+ * struct venc_vp8_vsi - Structure for VPU driver control and info share
+ * AP-W/R : AP is writer/reader on this item
+ * VPU-W/R: VPU is write/reader on this item
+ * This structure is allocated in VPU side and shared to AP side.
+ * @config: vp8 encoder configuration
+ * @work_bufs: working buffer information in VPU side
+ * The work_bufs here is for storing the 'size' info shared to AP side.
+ * The similar item in struct venc_vp8_inst is for memory allocation
+ * in AP side. The AP driver will copy the 'size' from here to the one in
+ * struct mtk_vcodec_mem, then invoke mtk_vcodec_mem_alloc to allocate
+ * the buffer. After that, bypass the 'dma_addr' to the 'iova' field here for
+ * register setting in VPU side.
+ */
+struct venc_vp8_vsi {
+ struct venc_vp8_vpu_config config;
+ struct venc_vp8_vpu_buf work_bufs[VENC_VP8_VPU_WORK_BUF_MAX];
+};
+
+/*
+ * struct venc_vp8_inst - vp8 encoder AP driver instance
+ * @hw_base: vp8 encoder hardware register base
+ * @work_bufs: working buffer
+ * @work_buf_allocated: working buffer allocated flag
+ * @frm_cnt: encoded frame count, it's used for I-frame judgement and
+ * reset when force intra cmd received.
+ * @ts_mode: temporal scalability mode (0: disable, 1: enable)
+ * support three temporal layers - 0: 7.5fps 1: 7.5fps 2: 15fps.
+ * @vpu_inst: VPU instance to exchange information between AP and VPU
+ * @vsi: driver structure allocated by VPU side and shared to AP side for
+ * control and info share
+ * @ctx: context for v4l2 layer integration
+ */
+struct venc_vp8_inst {
+ void __iomem *hw_base;
+ struct mtk_vcodec_mem work_bufs[VENC_VP8_VPU_WORK_BUF_MAX];
+ bool work_buf_allocated;
+ unsigned int frm_cnt;
+ unsigned int ts_mode;
+ struct venc_vpu_inst vpu_inst;
+ struct venc_vp8_vsi *vsi;
+ struct mtk_vcodec_ctx *ctx;
+};
+
+static inline u32 vp8_enc_read_reg(struct venc_vp8_inst *inst, u32 addr)
+{
+ return readl(inst->hw_base + addr);
+}
+
+static void vp8_enc_free_work_buf(struct venc_vp8_inst *inst)
+{
+ int i;
+
+ mtk_vcodec_debug_enter(inst);
+
+ /* Buffers need to be freed by AP. */
+ for (i = 0; i < VENC_VP8_VPU_WORK_BUF_MAX; i++) {
+ if ((inst->work_bufs[i].size == 0))
+ continue;
+ mtk_vcodec_mem_free(inst->ctx, &inst->work_bufs[i]);
+ }
+
+ mtk_vcodec_debug_leave(inst);
+}
+
+static int vp8_enc_alloc_work_buf(struct venc_vp8_inst *inst)
+{
+ int i;
+ int ret = 0;
+ struct venc_vp8_vpu_buf *wb = inst->vsi->work_bufs;
+
+ mtk_vcodec_debug_enter(inst);
+
+ for (i = 0; i < VENC_VP8_VPU_WORK_BUF_MAX; i++) {
+ if ((wb[i].size == 0))
+ continue;
+ /*
+ * This 'wb' structure is set by VPU side and shared to AP for
+ * buffer allocation and IO virtual addr mapping. For most of
+ * the buffers, AP will allocate the buffer according to 'size'
+ * field and store the IO virtual addr in 'iova' field. For the
+ * RC_CODEx buffers, they are pre-allocated in the VPU side
+ * because they are inside VPU SRAM, and save the VPU addr in
+ * the 'vpua' field. The AP will translate the VPU addr to the
+ * corresponding IO virtual addr and store in 'iova' field.
+ */
+ inst->work_bufs[i].size = wb[i].size;
+ ret = mtk_vcodec_mem_alloc(inst->ctx, &inst->work_bufs[i]);
+ if (ret) {
+ mtk_vcodec_err(inst,
+ "cannot alloc work_bufs[%d]", i);
+ goto err_alloc;
+ }
+ /*
+ * This RC_CODEx is pre-allocated by VPU and saved in VPU addr.
+ * So we need use memcpy to copy RC_CODEx from VPU addr into IO
+ * virtual addr in 'iova' field for reg setting in VPU side.
+ */
+ if (i == VENC_VP8_VPU_WORK_BUF_RC_CODE ||
+ i == VENC_VP8_VPU_WORK_BUF_RC_CODE2 ||
+ i == VENC_VP8_VPU_WORK_BUF_RC_CODE3) {
+ void *tmp_va;
+
+ tmp_va = vpu_mapping_dm_addr(inst->vpu_inst.dev,
+ wb[i].vpua);
+ memcpy(inst->work_bufs[i].va, tmp_va, wb[i].size);
+ }
+ wb[i].iova = inst->work_bufs[i].dma_addr;
+
+ mtk_vcodec_debug(inst,
+ "work_bufs[%d] va=0x%p,iova=%pad,size=%zu",
+ i, inst->work_bufs[i].va,
+ &inst->work_bufs[i].dma_addr,
+ inst->work_bufs[i].size);
+ }
+
+ mtk_vcodec_debug_leave(inst);
+
+ return ret;
+
+err_alloc:
+ vp8_enc_free_work_buf(inst);
+
+ return ret;
+}
+
+static unsigned int vp8_enc_wait_venc_done(struct venc_vp8_inst *inst)
+{
+ unsigned int irq_status = 0;
+ struct mtk_vcodec_ctx *ctx = (struct mtk_vcodec_ctx *)inst->ctx;
+
+ if (!mtk_vcodec_wait_for_done_ctx(ctx, MTK_INST_IRQ_RECEIVED,
+ WAIT_INTR_TIMEOUT_MS)) {
+ irq_status = ctx->irq_status;
+ mtk_vcodec_debug(inst, "isr return %x", irq_status);
+ }
+ return irq_status;
+}
+
+/*
+ * Compose ac_tag, bitstream header and bitstream payload into
+ * one bitstream buffer.
+ */
+static int vp8_enc_compose_one_frame(struct venc_vp8_inst *inst,
+ struct mtk_vcodec_mem *bs_buf,
+ unsigned int *bs_size)
+{
+ unsigned int not_key;
+ u32 bs_frm_size;
+ u32 bs_hdr_len;
+ unsigned int ac_tag_size;
+ u8 ac_tag[MAX_AC_TAG_SIZE];
+ u32 tag;
+
+ bs_frm_size = vp8_enc_read_reg(inst, VENC_BITSTREAM_FRAME_SIZE);
+ bs_hdr_len = vp8_enc_read_reg(inst, VENC_BITSTREAM_HEADER_LEN);
+
+ /* if a frame is key frame, not_key is 0 */
+ not_key = !inst->vpu_inst.is_key_frm;
+ tag = (bs_hdr_len << 5) | 0x10 | not_key;
+ ac_tag[0] = tag & 0xff;
+ ac_tag[1] = (tag >> 8) & 0xff;
+ ac_tag[2] = (tag >> 16) & 0xff;
+
+ /* key frame */
+ if (not_key == 0) {
+ ac_tag_size = MAX_AC_TAG_SIZE;
+ ac_tag[3] = 0x9d;
+ ac_tag[4] = 0x01;
+ ac_tag[5] = 0x2a;
+ ac_tag[6] = inst->vsi->config.pic_w;
+ ac_tag[7] = inst->vsi->config.pic_w >> 8;
+ ac_tag[8] = inst->vsi->config.pic_h;
+ ac_tag[9] = inst->vsi->config.pic_h >> 8;
+ } else {
+ ac_tag_size = 3;
+ }
+
+ if (bs_buf->size < bs_hdr_len + bs_frm_size + ac_tag_size) {
+ mtk_vcodec_err(inst, "bitstream buf size is too small(%zu)",
+ bs_buf->size);
+ return -EINVAL;
+ }
+
+ /*
+ * (1) The vp8 bitstream header and body are generated by the HW vp8
+ * encoder separately at the same time. We cannot know the bitstream
+ * header length in advance.
+ * (2) From the vp8 spec, there is no stuffing byte allowed between the
+ * ac tag, bitstream header and bitstream body.
+ */
+ memmove(bs_buf->va + bs_hdr_len + ac_tag_size,
+ bs_buf->va, bs_frm_size);
+ memcpy(bs_buf->va + ac_tag_size,
+ inst->work_bufs[VENC_VP8_VPU_WORK_BUF_BS_HEADER].va,
+ bs_hdr_len);
+ memcpy(bs_buf->va, ac_tag, ac_tag_size);
+ *bs_size = bs_frm_size + bs_hdr_len + ac_tag_size;
+
+ return 0;
+}
+
+static int vp8_enc_encode_frame(struct venc_vp8_inst *inst,
+ struct venc_frm_buf *frm_buf,
+ struct mtk_vcodec_mem *bs_buf,
+ unsigned int *bs_size)
+{
+ int ret = 0;
+ unsigned int irq_status;
+
+ mtk_vcodec_debug(inst, "->frm_cnt=%d", inst->frm_cnt);
+
+ ret = vpu_enc_encode(&inst->vpu_inst, 0, frm_buf, bs_buf, bs_size);
+ if (ret)
+ return ret;
+
+ irq_status = vp8_enc_wait_venc_done(inst);
+ if (irq_status != MTK_VENC_IRQ_STATUS_FRM) {
+ mtk_vcodec_err(inst, "irq_status=%d failed", irq_status);
+ return -EIO;
+ }
+
+ if (vp8_enc_compose_one_frame(inst, bs_buf, bs_size)) {
+ mtk_vcodec_err(inst, "vp8_enc_compose_one_frame failed");
+ return -EINVAL;
+ }
+
+ inst->frm_cnt++;
+ mtk_vcodec_debug(inst, "<-size=%d key_frm=%d", *bs_size,
+ inst->vpu_inst.is_key_frm);
+
+ return ret;
+}
+
+static int vp8_enc_init(struct mtk_vcodec_ctx *ctx, unsigned long *handle)
+{
+ int ret = 0;
+ struct venc_vp8_inst *inst;
+
+ inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ inst->ctx = ctx;
+ inst->vpu_inst.ctx = ctx;
+ inst->vpu_inst.dev = ctx->dev->vpu_plat_dev;
+ inst->vpu_inst.id = IPI_VENC_VP8;
+ inst->hw_base = mtk_vcodec_get_reg_addr(inst->ctx, VENC_LT_SYS);
+
+ mtk_vcodec_debug_enter(inst);
+
+ ret = vpu_enc_init(&inst->vpu_inst);
+
+ inst->vsi = (struct venc_vp8_vsi *)inst->vpu_inst.vsi;
+
+ mtk_vcodec_debug_leave(inst);
+
+ if (ret)
+ kfree(inst);
+ else
+ (*handle) = (unsigned long)inst;
+
+ return ret;
+}
+
+static int vp8_enc_encode(unsigned long handle,
+ enum venc_start_opt opt,
+ struct venc_frm_buf *frm_buf,
+ struct mtk_vcodec_mem *bs_buf,
+ struct venc_done_result *result)
+{
+ int ret = 0;
+ struct venc_vp8_inst *inst = (struct venc_vp8_inst *)handle;
+ struct mtk_vcodec_ctx *ctx = inst->ctx;
+
+ mtk_vcodec_debug_enter(inst);
+
+ enable_irq(ctx->dev->enc_lt_irq);
+
+ switch (opt) {
+ case VENC_START_OPT_ENCODE_FRAME:
+ ret = vp8_enc_encode_frame(inst, frm_buf, bs_buf,
+ &result->bs_size);
+ if (ret)
+ goto encode_err;
+ result->is_key_frm = inst->vpu_inst.is_key_frm;
+ break;
+
+ default:
+ mtk_vcodec_err(inst, "opt not support:%d", opt);
+ ret = -EINVAL;
+ break;
+ }
+
+encode_err:
+
+ disable_irq(ctx->dev->enc_lt_irq);
+ mtk_vcodec_debug_leave(inst);
+
+ return ret;
+}
+
+static int vp8_enc_set_param(unsigned long handle,
+ enum venc_set_param_type type,
+ struct venc_enc_param *enc_prm)
+{
+ int ret = 0;
+ struct venc_vp8_inst *inst = (struct venc_vp8_inst *)handle;
+
+ mtk_vcodec_debug(inst, "->type=%d", type);
+
+ switch (type) {
+ case VENC_SET_PARAM_ENC:
+ inst->vsi->config.input_fourcc = enc_prm->input_yuv_fmt;
+ inst->vsi->config.bitrate = enc_prm->bitrate;
+ inst->vsi->config.pic_w = enc_prm->width;
+ inst->vsi->config.pic_h = enc_prm->height;
+ inst->vsi->config.buf_w = enc_prm->buf_width;
+ inst->vsi->config.buf_h = enc_prm->buf_height;
+ inst->vsi->config.gop_size = enc_prm->gop_size;
+ inst->vsi->config.framerate = enc_prm->frm_rate;
+ inst->vsi->config.ts_mode = inst->ts_mode;
+ ret = vpu_enc_set_param(&inst->vpu_inst, type, enc_prm);
+ if (ret)
+ break;
+ if (inst->work_buf_allocated) {
+ vp8_enc_free_work_buf(inst);
+ inst->work_buf_allocated = false;
+ }
+ ret = vp8_enc_alloc_work_buf(inst);
+ if (ret)
+ break;
+ inst->work_buf_allocated = true;
+ break;
+
+ /*
+ * VENC_SET_PARAM_TS_MODE must be called before VENC_SET_PARAM_ENC
+ */
+ case VENC_SET_PARAM_TS_MODE:
+ inst->ts_mode = 1;
+ mtk_vcodec_debug(inst, "set ts_mode");
+ break;
+
+ default:
+ ret = vpu_enc_set_param(&inst->vpu_inst, type, enc_prm);
+ break;
+ }
+
+ mtk_vcodec_debug_leave(inst);
+
+ return ret;
+}
+
+static int vp8_enc_deinit(unsigned long handle)
+{
+ int ret = 0;
+ struct venc_vp8_inst *inst = (struct venc_vp8_inst *)handle;
+
+ mtk_vcodec_debug_enter(inst);
+
+ ret = vpu_enc_deinit(&inst->vpu_inst);
+
+ if (inst->work_buf_allocated)
+ vp8_enc_free_work_buf(inst);
+
+ mtk_vcodec_debug_leave(inst);
+ kfree(inst);
+
+ return ret;
+}
+
+static struct venc_common_if venc_vp8_if = {
+ vp8_enc_init,
+ vp8_enc_encode,
+ vp8_enc_set_param,
+ vp8_enc_deinit,
+};
+
+struct venc_common_if *get_vp8_enc_comm_if(void);
+
+struct venc_common_if *get_vp8_enc_comm_if(void)
+{
+ return &venc_vp8_if;
+}
diff --git a/drivers/media/platform/mtk-vcodec/venc_drv_base.h b/drivers/media/platform/mtk-vcodec/venc_drv_base.h
new file mode 100644
index 000000000..6308d44de
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/venc_drv_base.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Daniel Hsiao <daniel.hsiao@mediatek.com>
+ * Jungchang Tsao <jungchang.tsao@mediatek.com>
+ * Tiffany Lin <tiffany.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _VENC_DRV_BASE_
+#define _VENC_DRV_BASE_
+
+#include "mtk_vcodec_drv.h"
+
+#include "venc_drv_if.h"
+
+struct venc_common_if {
+ /**
+ * (*init)() - initialize driver
+ * @ctx: [in] mtk v4l2 context
+ * @handle: [out] driver handle
+ */
+ int (*init)(struct mtk_vcodec_ctx *ctx, unsigned long *handle);
+
+ /**
+ * (*encode)() - trigger encode
+ * @handle: [in] driver handle
+ * @opt: [in] encode option
+ * @frm_buf: [in] frame buffer to store input frame
+ * @bs_buf: [in] bitstream buffer to store output bitstream
+ * @result: [out] encode result
+ */
+ int (*encode)(unsigned long handle, enum venc_start_opt opt,
+ struct venc_frm_buf *frm_buf,
+ struct mtk_vcodec_mem *bs_buf,
+ struct venc_done_result *result);
+
+ /**
+ * (*set_param)() - set driver's parameter
+ * @handle: [in] driver handle
+ * @type: [in] parameter type
+ * @in: [in] buffer to store the parameter
+ */
+ int (*set_param)(unsigned long handle, enum venc_set_param_type type,
+ struct venc_enc_param *in);
+
+ /**
+ * (*deinit)() - deinitialize driver.
+ * @handle: [in] driver handle
+ */
+ int (*deinit)(unsigned long handle);
+};
+
+#endif
diff --git a/drivers/media/platform/mtk-vcodec/venc_drv_if.c b/drivers/media/platform/mtk-vcodec/venc_drv_if.c
new file mode 100644
index 000000000..c4c83e718
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/venc_drv_if.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Daniel Hsiao <daniel.hsiao@mediatek.com>
+ * Jungchang Tsao <jungchang.tsao@mediatek.com>
+ * Tiffany Lin <tiffany.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "venc_drv_base.h"
+#include "venc_drv_if.h"
+
+#include "mtk_vcodec_enc.h"
+#include "mtk_vcodec_enc_pm.h"
+#include "mtk_vpu.h"
+
+struct venc_common_if *get_h264_enc_comm_if(void);
+struct venc_common_if *get_vp8_enc_comm_if(void);
+
+int venc_if_init(struct mtk_vcodec_ctx *ctx, unsigned int fourcc)
+{
+ int ret = 0;
+
+ switch (fourcc) {
+ case V4L2_PIX_FMT_VP8:
+ ctx->enc_if = get_vp8_enc_comm_if();
+ break;
+ case V4L2_PIX_FMT_H264:
+ ctx->enc_if = get_h264_enc_comm_if();
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mtk_venc_lock(ctx);
+ mtk_vcodec_enc_clock_on(&ctx->dev->pm);
+ ret = ctx->enc_if->init(ctx, (unsigned long *)&ctx->drv_handle);
+ mtk_vcodec_enc_clock_off(&ctx->dev->pm);
+ mtk_venc_unlock(ctx);
+
+ return ret;
+}
+
+int venc_if_set_param(struct mtk_vcodec_ctx *ctx,
+ enum venc_set_param_type type, struct venc_enc_param *in)
+{
+ int ret = 0;
+
+ mtk_venc_lock(ctx);
+ mtk_vcodec_enc_clock_on(&ctx->dev->pm);
+ ret = ctx->enc_if->set_param(ctx->drv_handle, type, in);
+ mtk_vcodec_enc_clock_off(&ctx->dev->pm);
+ mtk_venc_unlock(ctx);
+
+ return ret;
+}
+
+int venc_if_encode(struct mtk_vcodec_ctx *ctx,
+ enum venc_start_opt opt, struct venc_frm_buf *frm_buf,
+ struct mtk_vcodec_mem *bs_buf,
+ struct venc_done_result *result)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ mtk_venc_lock(ctx);
+
+ spin_lock_irqsave(&ctx->dev->irqlock, flags);
+ ctx->dev->curr_ctx = ctx;
+ spin_unlock_irqrestore(&ctx->dev->irqlock, flags);
+
+ mtk_vcodec_enc_clock_on(&ctx->dev->pm);
+ ret = ctx->enc_if->encode(ctx->drv_handle, opt, frm_buf,
+ bs_buf, result);
+ mtk_vcodec_enc_clock_off(&ctx->dev->pm);
+
+ spin_lock_irqsave(&ctx->dev->irqlock, flags);
+ ctx->dev->curr_ctx = NULL;
+ spin_unlock_irqrestore(&ctx->dev->irqlock, flags);
+
+ mtk_venc_unlock(ctx);
+ return ret;
+}
+
+int venc_if_deinit(struct mtk_vcodec_ctx *ctx)
+{
+ int ret = 0;
+
+ if (ctx->drv_handle == 0)
+ return 0;
+
+ mtk_venc_lock(ctx);
+ mtk_vcodec_enc_clock_on(&ctx->dev->pm);
+ ret = ctx->enc_if->deinit(ctx->drv_handle);
+ mtk_vcodec_enc_clock_off(&ctx->dev->pm);
+ mtk_venc_unlock(ctx);
+
+ ctx->drv_handle = 0;
+
+ return ret;
+}
diff --git a/drivers/media/platform/mtk-vcodec/venc_drv_if.h b/drivers/media/platform/mtk-vcodec/venc_drv_if.h
new file mode 100644
index 000000000..a6e7d32e5
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/venc_drv_if.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Daniel Hsiao <daniel.hsiao@mediatek.com>
+ * Jungchang Tsao <jungchang.tsao@mediatek.com>
+ * Tiffany Lin <tiffany.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _VENC_DRV_IF_H_
+#define _VENC_DRV_IF_H_
+
+#include "mtk_vcodec_drv.h"
+#include "mtk_vcodec_util.h"
+
+/*
+ * enum venc_yuv_fmt - The type of input yuv format
+ * (VPU related: If you change the order, you must also update the VPU codes.)
+ * @VENC_YUV_FORMAT_I420: I420 YUV format
+ * @VENC_YUV_FORMAT_YV12: YV12 YUV format
+ * @VENC_YUV_FORMAT_NV12: NV12 YUV format
+ * @VENC_YUV_FORMAT_NV21: NV21 YUV format
+ */
+enum venc_yuv_fmt {
+ VENC_YUV_FORMAT_I420 = 3,
+ VENC_YUV_FORMAT_YV12 = 5,
+ VENC_YUV_FORMAT_NV12 = 6,
+ VENC_YUV_FORMAT_NV21 = 7,
+};
+
+/*
+ * enum venc_start_opt - encode frame option used in venc_if_encode()
+ * @VENC_START_OPT_ENCODE_SEQUENCE_HEADER: encode SPS/PPS for H264
+ * @VENC_START_OPT_ENCODE_FRAME: encode normal frame
+ */
+enum venc_start_opt {
+ VENC_START_OPT_ENCODE_SEQUENCE_HEADER,
+ VENC_START_OPT_ENCODE_FRAME,
+};
+
+/*
+ * enum venc_set_param_type - The type of set parameter used in
+ * venc_if_set_param()
+ * (VPU related: If you change the order, you must also update the VPU codes.)
+ * @VENC_SET_PARAM_ENC: set encoder parameters
+ * @VENC_SET_PARAM_FORCE_INTRA: force an intra frame
+ * @VENC_SET_PARAM_ADJUST_BITRATE: adjust bitrate (in bps)
+ * @VENC_SET_PARAM_ADJUST_FRAMERATE: set frame rate
+ * @VENC_SET_PARAM_GOP_SIZE: set IDR interval
+ * @VENC_SET_PARAM_INTRA_PERIOD: set I frame interval
+ * @VENC_SET_PARAM_SKIP_FRAME: set H264 skip one frame
+ * @VENC_SET_PARAM_PREPEND_HEADER: set H264 prepend SPS/PPS before IDR
+ * @VENC_SET_PARAM_TS_MODE: set VP8 temporal scalability mode
+ */
+enum venc_set_param_type {
+ VENC_SET_PARAM_ENC,
+ VENC_SET_PARAM_FORCE_INTRA,
+ VENC_SET_PARAM_ADJUST_BITRATE,
+ VENC_SET_PARAM_ADJUST_FRAMERATE,
+ VENC_SET_PARAM_GOP_SIZE,
+ VENC_SET_PARAM_INTRA_PERIOD,
+ VENC_SET_PARAM_SKIP_FRAME,
+ VENC_SET_PARAM_PREPEND_HEADER,
+ VENC_SET_PARAM_TS_MODE,
+};
+
+/*
+ * struct venc_enc_prm - encoder settings for VENC_SET_PARAM_ENC used in
+ * venc_if_set_param()
+ * @input_fourcc: input yuv format
+ * @h264_profile: V4L2 defined H.264 profile
+ * @h264_level: V4L2 defined H.264 level
+ * @width: image width
+ * @height: image height
+ * @buf_width: buffer width
+ * @buf_height: buffer height
+ * @frm_rate: frame rate in fps
+ * @intra_period: intra frame period
+ * @bitrate: target bitrate in bps
+ * @gop_size: group of picture size
+ */
+struct venc_enc_param {
+ enum venc_yuv_fmt input_yuv_fmt;
+ unsigned int h264_profile;
+ unsigned int h264_level;
+ unsigned int width;
+ unsigned int height;
+ unsigned int buf_width;
+ unsigned int buf_height;
+ unsigned int frm_rate;
+ unsigned int intra_period;
+ unsigned int bitrate;
+ unsigned int gop_size;
+};
+
+/*
+ * struct venc_frm_buf - frame buffer information used in venc_if_encode()
+ * @fb_addr: plane frame buffer addresses
+ */
+struct venc_frm_buf {
+ struct mtk_vcodec_mem fb_addr[MTK_VCODEC_MAX_PLANES];
+};
+
+/*
+ * struct venc_done_result - This is return information used in venc_if_encode()
+ * @bs_size: output bitstream size
+ * @is_key_frm: output is key frame or not
+ */
+struct venc_done_result {
+ unsigned int bs_size;
+ bool is_key_frm;
+};
+
+/*
+ * venc_if_init - Create the driver handle
+ * @ctx: device context
+ * @fourcc: encoder input format
+ * Return: 0 if creating handle successfully, otherwise it is failed.
+ */
+int venc_if_init(struct mtk_vcodec_ctx *ctx, unsigned int fourcc);
+
+/*
+ * venc_if_deinit - Release the driver handle
+ * @ctx: device context
+ * Return: 0 if releasing handle successfully, otherwise it is failed.
+ */
+int venc_if_deinit(struct mtk_vcodec_ctx *ctx);
+
+/*
+ * venc_if_set_param - Set parameter to driver
+ * @ctx: device context
+ * @type: parameter type
+ * @in: input parameter
+ * Return: 0 if setting param successfully, otherwise it is failed.
+ */
+int venc_if_set_param(struct mtk_vcodec_ctx *ctx,
+ enum venc_set_param_type type,
+ struct venc_enc_param *in);
+
+/*
+ * venc_if_encode - Encode one frame
+ * @ctx: device context
+ * @opt: encode frame option
+ * @frm_buf: input frame buffer information
+ * @bs_buf: output bitstream buffer infomraiton
+ * @result: encode result
+ * Return: 0 if encoding frame successfully, otherwise it is failed.
+ */
+int venc_if_encode(struct mtk_vcodec_ctx *ctx,
+ enum venc_start_opt opt,
+ struct venc_frm_buf *frm_buf,
+ struct mtk_vcodec_mem *bs_buf,
+ struct venc_done_result *result);
+
+#endif /* _VENC_DRV_IF_H_ */
diff --git a/drivers/media/platform/mtk-vcodec/venc_ipi_msg.h b/drivers/media/platform/mtk-vcodec/venc_ipi_msg.h
new file mode 100644
index 000000000..4c869cb6f
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/venc_ipi_msg.h
@@ -0,0 +1,210 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Jungchang Tsao <jungchang.tsao@mediatek.com>
+ * Daniel Hsiao <daniel.hsiao@mediatek.com>
+ * Tiffany Lin <tiffany.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _VENC_IPI_MSG_H_
+#define _VENC_IPI_MSG_H_
+
+#define AP_IPIMSG_VENC_BASE 0xC000
+#define VPU_IPIMSG_VENC_BASE 0xD000
+
+/**
+ * enum venc_ipi_msg_id - message id between AP and VPU
+ * (ipi stands for inter-processor interrupt)
+ * @AP_IPIMSG_ENC_XXX: AP to VPU cmd message id
+ * @VPU_IPIMSG_ENC_XXX_DONE: VPU ack AP cmd message id
+ */
+enum venc_ipi_msg_id {
+ AP_IPIMSG_ENC_INIT = AP_IPIMSG_VENC_BASE,
+ AP_IPIMSG_ENC_SET_PARAM,
+ AP_IPIMSG_ENC_ENCODE,
+ AP_IPIMSG_ENC_DEINIT,
+
+ VPU_IPIMSG_ENC_INIT_DONE = VPU_IPIMSG_VENC_BASE,
+ VPU_IPIMSG_ENC_SET_PARAM_DONE,
+ VPU_IPIMSG_ENC_ENCODE_DONE,
+ VPU_IPIMSG_ENC_DEINIT_DONE,
+};
+
+/**
+ * struct venc_ap_ipi_msg_init - AP to VPU init cmd structure
+ * @msg_id: message id (AP_IPIMSG_XXX_ENC_INIT)
+ * @reserved: reserved for future use. vpu is running in 32bit. Without
+ * this reserved field, if kernel run in 64bit. this struct size
+ * will be different between kernel and vpu
+ * @venc_inst: AP encoder instance
+ * (struct venc_vp8_inst/venc_h264_inst *)
+ */
+struct venc_ap_ipi_msg_init {
+ uint32_t msg_id;
+ uint32_t reserved;
+ uint64_t venc_inst;
+};
+
+/**
+ * struct venc_ap_ipi_msg_set_param - AP to VPU set_param cmd structure
+ * @msg_id: message id (AP_IPIMSG_XXX_ENC_SET_PARAM)
+ * @vpu_inst_addr: VPU encoder instance addr
+ * (struct venc_vp8_vsi/venc_h264_vsi *)
+ * @param_id: parameter id (venc_set_param_type)
+ * @data_item: number of items in the data array
+ * @data[8]: data array to store the set parameters
+ */
+struct venc_ap_ipi_msg_set_param {
+ uint32_t msg_id;
+ uint32_t vpu_inst_addr;
+ uint32_t param_id;
+ uint32_t data_item;
+ uint32_t data[8];
+};
+
+/**
+ * struct venc_ap_ipi_msg_enc - AP to VPU enc cmd structure
+ * @msg_id: message id (AP_IPIMSG_XXX_ENC_ENCODE)
+ * @vpu_inst_addr: VPU encoder instance addr
+ * (struct venc_vp8_vsi/venc_h264_vsi *)
+ * @bs_mode: bitstream mode for h264
+ * (H264_BS_MODE_SPS/H264_BS_MODE_PPS/H264_BS_MODE_FRAME)
+ * @input_addr: pointer to input image buffer plane
+ * @bs_addr: pointer to output bit stream buffer
+ * @bs_size: bit stream buffer size
+ */
+struct venc_ap_ipi_msg_enc {
+ uint32_t msg_id;
+ uint32_t vpu_inst_addr;
+ uint32_t bs_mode;
+ uint32_t input_addr[3];
+ uint32_t bs_addr;
+ uint32_t bs_size;
+};
+
+/**
+ * struct venc_ap_ipi_msg_deinit - AP to VPU deinit cmd structure
+ * @msg_id: message id (AP_IPIMSG_XXX_ENC_DEINIT)
+ * @vpu_inst_addr: VPU encoder instance addr
+ * (struct venc_vp8_vsi/venc_h264_vsi *)
+ */
+struct venc_ap_ipi_msg_deinit {
+ uint32_t msg_id;
+ uint32_t vpu_inst_addr;
+};
+
+/**
+ * enum venc_ipi_msg_status - VPU ack AP cmd status
+ */
+enum venc_ipi_msg_status {
+ VENC_IPI_MSG_STATUS_OK,
+ VENC_IPI_MSG_STATUS_FAIL,
+};
+
+/**
+ * struct venc_vpu_ipi_msg_common - VPU ack AP cmd common structure
+ * @msg_id: message id (VPU_IPIMSG_XXX_DONE)
+ * @status: cmd status (venc_ipi_msg_status)
+ * @venc_inst: AP encoder instance (struct venc_vp8_inst/venc_h264_inst *)
+ */
+struct venc_vpu_ipi_msg_common {
+ uint32_t msg_id;
+ uint32_t status;
+ uint64_t venc_inst;
+};
+
+/**
+ * struct venc_vpu_ipi_msg_init - VPU ack AP init cmd structure
+ * @msg_id: message id (VPU_IPIMSG_XXX_ENC_SET_PARAM_DONE)
+ * @status: cmd status (venc_ipi_msg_status)
+ * @venc_inst: AP encoder instance (struct venc_vp8_inst/venc_h264_inst *)
+ * @vpu_inst_addr: VPU encoder instance addr
+ * (struct venc_vp8_vsi/venc_h264_vsi *)
+ * @reserved: reserved for future use. vpu is running in 32bit. Without
+ * this reserved field, if kernel run in 64bit. this struct size
+ * will be different between kernel and vpu
+ */
+struct venc_vpu_ipi_msg_init {
+ uint32_t msg_id;
+ uint32_t status;
+ uint64_t venc_inst;
+ uint32_t vpu_inst_addr;
+ uint32_t reserved;
+};
+
+/**
+ * struct venc_vpu_ipi_msg_set_param - VPU ack AP set_param cmd structure
+ * @msg_id: message id (VPU_IPIMSG_XXX_ENC_SET_PARAM_DONE)
+ * @status: cmd status (venc_ipi_msg_status)
+ * @venc_inst: AP encoder instance (struct venc_vp8_inst/venc_h264_inst *)
+ * @param_id: parameter id (venc_set_param_type)
+ * @data_item: number of items in the data array
+ * @data[6]: data array to store the return result
+ */
+struct venc_vpu_ipi_msg_set_param {
+ uint32_t msg_id;
+ uint32_t status;
+ uint64_t venc_inst;
+ uint32_t param_id;
+ uint32_t data_item;
+ uint32_t data[6];
+};
+
+/**
+ * enum venc_ipi_msg_enc_state - Type of encode state
+ * VEN_IPI_MSG_ENC_STATE_FRAME: one frame being encoded
+ * VEN_IPI_MSG_ENC_STATE_PART: bit stream buffer full
+ * VEN_IPI_MSG_ENC_STATE_SKIP: encoded skip frame
+ * VEN_IPI_MSG_ENC_STATE_ERROR: encounter error
+ */
+enum venc_ipi_msg_enc_state {
+ VEN_IPI_MSG_ENC_STATE_FRAME,
+ VEN_IPI_MSG_ENC_STATE_PART,
+ VEN_IPI_MSG_ENC_STATE_SKIP,
+ VEN_IPI_MSG_ENC_STATE_ERROR,
+};
+
+/**
+ * struct venc_vpu_ipi_msg_enc - VPU ack AP enc cmd structure
+ * @msg_id: message id (VPU_IPIMSG_XXX_ENC_ENCODE_DONE)
+ * @status: cmd status (venc_ipi_msg_status)
+ * @venc_inst: AP encoder instance (struct venc_vp8_inst/venc_h264_inst *)
+ * @state: encode state (venc_ipi_msg_enc_state)
+ * @is_key_frm: whether the encoded frame is key frame
+ * @bs_size: encoded bitstream size
+ * @reserved: reserved for future use. vpu is running in 32bit. Without
+ * this reserved field, if kernel run in 64bit. this struct size
+ * will be different between kernel and vpu
+ */
+struct venc_vpu_ipi_msg_enc {
+ uint32_t msg_id;
+ uint32_t status;
+ uint64_t venc_inst;
+ uint32_t state;
+ uint32_t is_key_frm;
+ uint32_t bs_size;
+ uint32_t reserved;
+};
+
+/**
+ * struct venc_vpu_ipi_msg_deinit - VPU ack AP deinit cmd structure
+ * @msg_id: message id (VPU_IPIMSG_XXX_ENC_DEINIT_DONE)
+ * @status: cmd status (venc_ipi_msg_status)
+ * @venc_inst: AP encoder instance (struct venc_vp8_inst/venc_h264_inst *)
+ */
+struct venc_vpu_ipi_msg_deinit {
+ uint32_t msg_id;
+ uint32_t status;
+ uint64_t venc_inst;
+};
+
+#endif /* _VENC_IPI_MSG_H_ */
diff --git a/drivers/media/platform/mtk-vcodec/venc_vpu_if.c b/drivers/media/platform/mtk-vcodec/venc_vpu_if.c
new file mode 100644
index 000000000..a01c7599b
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/venc_vpu_if.c
@@ -0,0 +1,238 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PoChun Lin <pochun.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mtk_vpu.h"
+#include "venc_ipi_msg.h"
+#include "venc_vpu_if.h"
+
+static void handle_enc_init_msg(struct venc_vpu_inst *vpu, void *data)
+{
+ struct venc_vpu_ipi_msg_init *msg = data;
+
+ vpu->inst_addr = msg->vpu_inst_addr;
+ vpu->vsi = vpu_mapping_dm_addr(vpu->dev, msg->vpu_inst_addr);
+}
+
+static void handle_enc_encode_msg(struct venc_vpu_inst *vpu, void *data)
+{
+ struct venc_vpu_ipi_msg_enc *msg = data;
+
+ vpu->state = msg->state;
+ vpu->bs_size = msg->bs_size;
+ vpu->is_key_frm = msg->is_key_frm;
+}
+
+static void vpu_enc_ipi_handler(void *data, unsigned int len, void *priv)
+{
+ struct venc_vpu_ipi_msg_common *msg = data;
+ struct venc_vpu_inst *vpu =
+ (struct venc_vpu_inst *)(unsigned long)msg->venc_inst;
+
+ mtk_vcodec_debug(vpu, "msg_id %x inst %p status %d",
+ msg->msg_id, vpu, msg->status);
+
+ switch (msg->msg_id) {
+ case VPU_IPIMSG_ENC_INIT_DONE:
+ handle_enc_init_msg(vpu, data);
+ break;
+ case VPU_IPIMSG_ENC_SET_PARAM_DONE:
+ break;
+ case VPU_IPIMSG_ENC_ENCODE_DONE:
+ handle_enc_encode_msg(vpu, data);
+ break;
+ case VPU_IPIMSG_ENC_DEINIT_DONE:
+ break;
+ default:
+ mtk_vcodec_err(vpu, "unknown msg id %x", msg->msg_id);
+ break;
+ }
+
+ vpu->signaled = 1;
+ vpu->failure = (msg->status != VENC_IPI_MSG_STATUS_OK);
+
+ mtk_vcodec_debug_leave(vpu);
+}
+
+static int vpu_enc_send_msg(struct venc_vpu_inst *vpu, void *msg,
+ int len)
+{
+ int status;
+
+ mtk_vcodec_debug_enter(vpu);
+
+ if (!vpu->dev) {
+ mtk_vcodec_err(vpu, "inst dev is NULL");
+ return -EINVAL;
+ }
+
+ status = vpu_ipi_send(vpu->dev, vpu->id, msg, len);
+ if (status) {
+ uint32_t msg_id = *(uint32_t *)msg;
+
+ mtk_vcodec_err(vpu, "vpu_ipi_send msg_id %x len %d fail %d",
+ msg_id, len, status);
+ return -EINVAL;
+ }
+ if (vpu->failure)
+ return -EINVAL;
+
+ mtk_vcodec_debug_leave(vpu);
+
+ return 0;
+}
+
+int vpu_enc_init(struct venc_vpu_inst *vpu)
+{
+ int status;
+ struct venc_ap_ipi_msg_init out;
+
+ mtk_vcodec_debug_enter(vpu);
+
+ init_waitqueue_head(&vpu->wq_hd);
+ vpu->signaled = 0;
+ vpu->failure = 0;
+
+ status = vpu_ipi_register(vpu->dev, vpu->id, vpu_enc_ipi_handler,
+ NULL, NULL);
+ if (status) {
+ mtk_vcodec_err(vpu, "vpu_ipi_register fail %d", status);
+ return -EINVAL;
+ }
+
+ memset(&out, 0, sizeof(out));
+ out.msg_id = AP_IPIMSG_ENC_INIT;
+ out.venc_inst = (unsigned long)vpu;
+ if (vpu_enc_send_msg(vpu, &out, sizeof(out))) {
+ mtk_vcodec_err(vpu, "AP_IPIMSG_ENC_INIT fail");
+ return -EINVAL;
+ }
+
+ mtk_vcodec_debug_leave(vpu);
+
+ return 0;
+}
+
+int vpu_enc_set_param(struct venc_vpu_inst *vpu,
+ enum venc_set_param_type id,
+ struct venc_enc_param *enc_param)
+{
+ struct venc_ap_ipi_msg_set_param out;
+
+ mtk_vcodec_debug(vpu, "id %d ->", id);
+
+ memset(&out, 0, sizeof(out));
+ out.msg_id = AP_IPIMSG_ENC_SET_PARAM;
+ out.vpu_inst_addr = vpu->inst_addr;
+ out.param_id = id;
+ switch (id) {
+ case VENC_SET_PARAM_ENC:
+ out.data_item = 0;
+ break;
+ case VENC_SET_PARAM_FORCE_INTRA:
+ out.data_item = 0;
+ break;
+ case VENC_SET_PARAM_ADJUST_BITRATE:
+ out.data_item = 1;
+ out.data[0] = enc_param->bitrate;
+ break;
+ case VENC_SET_PARAM_ADJUST_FRAMERATE:
+ out.data_item = 1;
+ out.data[0] = enc_param->frm_rate;
+ break;
+ case VENC_SET_PARAM_GOP_SIZE:
+ out.data_item = 1;
+ out.data[0] = enc_param->gop_size;
+ break;
+ case VENC_SET_PARAM_INTRA_PERIOD:
+ out.data_item = 1;
+ out.data[0] = enc_param->intra_period;
+ break;
+ case VENC_SET_PARAM_SKIP_FRAME:
+ out.data_item = 0;
+ break;
+ default:
+ mtk_vcodec_err(vpu, "id %d not supported", id);
+ return -EINVAL;
+ }
+ if (vpu_enc_send_msg(vpu, &out, sizeof(out))) {
+ mtk_vcodec_err(vpu,
+ "AP_IPIMSG_ENC_SET_PARAM %d fail", id);
+ return -EINVAL;
+ }
+
+ mtk_vcodec_debug(vpu, "id %d <-", id);
+
+ return 0;
+}
+
+int vpu_enc_encode(struct venc_vpu_inst *vpu, unsigned int bs_mode,
+ struct venc_frm_buf *frm_buf,
+ struct mtk_vcodec_mem *bs_buf,
+ unsigned int *bs_size)
+{
+ struct venc_ap_ipi_msg_enc out;
+
+ mtk_vcodec_debug(vpu, "bs_mode %d ->", bs_mode);
+
+ memset(&out, 0, sizeof(out));
+ out.msg_id = AP_IPIMSG_ENC_ENCODE;
+ out.vpu_inst_addr = vpu->inst_addr;
+ out.bs_mode = bs_mode;
+ if (frm_buf) {
+ if ((frm_buf->fb_addr[0].dma_addr % 16 == 0) &&
+ (frm_buf->fb_addr[1].dma_addr % 16 == 0) &&
+ (frm_buf->fb_addr[2].dma_addr % 16 == 0)) {
+ out.input_addr[0] = frm_buf->fb_addr[0].dma_addr;
+ out.input_addr[1] = frm_buf->fb_addr[1].dma_addr;
+ out.input_addr[2] = frm_buf->fb_addr[2].dma_addr;
+ } else {
+ mtk_vcodec_err(vpu, "dma_addr not align to 16");
+ return -EINVAL;
+ }
+ }
+ if (bs_buf) {
+ out.bs_addr = bs_buf->dma_addr;
+ out.bs_size = bs_buf->size;
+ }
+ if (vpu_enc_send_msg(vpu, &out, sizeof(out))) {
+ mtk_vcodec_err(vpu, "AP_IPIMSG_ENC_ENCODE %d fail",
+ bs_mode);
+ return -EINVAL;
+ }
+
+ mtk_vcodec_debug(vpu, "bs_mode %d state %d size %d key_frm %d <-",
+ bs_mode, vpu->state, vpu->bs_size, vpu->is_key_frm);
+
+ return 0;
+}
+
+int vpu_enc_deinit(struct venc_vpu_inst *vpu)
+{
+ struct venc_ap_ipi_msg_deinit out;
+
+ mtk_vcodec_debug_enter(vpu);
+
+ memset(&out, 0, sizeof(out));
+ out.msg_id = AP_IPIMSG_ENC_DEINIT;
+ out.vpu_inst_addr = vpu->inst_addr;
+ if (vpu_enc_send_msg(vpu, &out, sizeof(out))) {
+ mtk_vcodec_err(vpu, "AP_IPIMSG_ENC_DEINIT fail");
+ return -EINVAL;
+ }
+
+ mtk_vcodec_debug_leave(vpu);
+
+ return 0;
+}
diff --git a/drivers/media/platform/mtk-vcodec/venc_vpu_if.h b/drivers/media/platform/mtk-vcodec/venc_vpu_if.h
new file mode 100644
index 000000000..215d1e013
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/venc_vpu_if.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PoChun Lin <pochun.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _VENC_VPU_IF_H_
+#define _VENC_VPU_IF_H_
+
+#include "mtk_vpu.h"
+#include "venc_drv_if.h"
+
+/*
+ * struct venc_vpu_inst - encoder VPU driver instance
+ * @wq_hd: wait queue used for vpu cmd trigger then wait vpu interrupt done
+ * @signaled: flag used for checking vpu interrupt done
+ * @failure: flag to show vpu cmd succeeds or not
+ * @state: enum venc_ipi_msg_enc_state
+ * @bs_size: bitstream size for skip frame case usage
+ * @is_key_frm: key frame flag
+ * @inst_addr: VPU instance addr
+ * @vsi: driver structure allocated by VPU side and shared to AP side for
+ * control and info share
+ * @id: the id of inter-processor interrupt
+ * @ctx: context for v4l2 layer integration
+ * @dev: device for v4l2 layer integration
+ */
+struct venc_vpu_inst {
+ wait_queue_head_t wq_hd;
+ int signaled;
+ int failure;
+ int state;
+ int bs_size;
+ int is_key_frm;
+ unsigned int inst_addr;
+ void *vsi;
+ enum ipi_id id;
+ struct mtk_vcodec_ctx *ctx;
+ struct platform_device *dev;
+};
+
+int vpu_enc_init(struct venc_vpu_inst *vpu);
+int vpu_enc_set_param(struct venc_vpu_inst *vpu,
+ enum venc_set_param_type id,
+ struct venc_enc_param *param);
+int vpu_enc_encode(struct venc_vpu_inst *vpu, unsigned int bs_mode,
+ struct venc_frm_buf *frm_buf,
+ struct mtk_vcodec_mem *bs_buf,
+ unsigned int *bs_size);
+int vpu_enc_deinit(struct venc_vpu_inst *vpu);
+
+#endif
diff --git a/drivers/media/platform/mtk-vpu/Makefile b/drivers/media/platform/mtk-vpu/Makefile
new file mode 100644
index 000000000..58cc1b4bc
--- /dev/null
+++ b/drivers/media/platform/mtk-vpu/Makefile
@@ -0,0 +1,3 @@
+mtk-vpu-y += mtk_vpu.o
+
+obj-$(CONFIG_VIDEO_MEDIATEK_VPU) += mtk-vpu.o
diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.c b/drivers/media/platform/mtk-vpu/mtk_vpu.c
new file mode 100644
index 000000000..460241514
--- /dev/null
+++ b/drivers/media/platform/mtk-vpu/mtk_vpu.c
@@ -0,0 +1,946 @@
+/*
+* Copyright (c) 2016 MediaTek Inc.
+* Author: Andrew-CT Chen <andrew-ct.chen@mediatek.com>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/iommu.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/sched.h>
+#include <linux/sizes.h>
+
+#include "mtk_vpu.h"
+
+/**
+ * VPU (video processor unit) is a tiny processor controlling video hardware
+ * related to video codec, scaling and color format converting.
+ * VPU interfaces with other blocks by share memory and interrupt.
+ **/
+
+#define INIT_TIMEOUT_MS 2000U
+#define IPI_TIMEOUT_MS 2000U
+#define VPU_FW_VER_LEN 16
+
+/* maximum program/data TCM (Tightly-Coupled Memory) size */
+#define VPU_PTCM_SIZE (96 * SZ_1K)
+#define VPU_DTCM_SIZE (32 * SZ_1K)
+/* the offset to get data tcm address */
+#define VPU_DTCM_OFFSET 0x18000UL
+/* daynamic allocated maximum extended memory size */
+#define VPU_EXT_P_SIZE SZ_1M
+#define VPU_EXT_D_SIZE SZ_4M
+/* maximum binary firmware size */
+#define VPU_P_FW_SIZE (VPU_PTCM_SIZE + VPU_EXT_P_SIZE)
+#define VPU_D_FW_SIZE (VPU_DTCM_SIZE + VPU_EXT_D_SIZE)
+/* the size of share buffer between Host and VPU */
+#define SHARE_BUF_SIZE 48
+
+/* binary firmware name */
+#define VPU_P_FW "/*(DEBLOBBED)*/"
+#define VPU_D_FW "/*(DEBLOBBED)*/"
+
+#define VPU_RESET 0x0
+#define VPU_TCM_CFG 0x0008
+#define VPU_PMEM_EXT0_ADDR 0x000C
+#define VPU_PMEM_EXT1_ADDR 0x0010
+#define VPU_TO_HOST 0x001C
+#define VPU_DMEM_EXT0_ADDR 0x0014
+#define VPU_DMEM_EXT1_ADDR 0x0018
+#define HOST_TO_VPU 0x0024
+#define VPU_PC_REG 0x0060
+#define VPU_WDT_REG 0x0084
+
+/* vpu inter-processor communication interrupt */
+#define VPU_IPC_INT BIT(8)
+
+/**
+ * enum vpu_fw_type - VPU firmware type
+ *
+ * @P_FW: program firmware
+ * @D_FW: data firmware
+ *
+ */
+enum vpu_fw_type {
+ P_FW,
+ D_FW,
+};
+
+/**
+ * struct vpu_mem - VPU extended program/data memory information
+ *
+ * @va: the kernel virtual memory address of VPU extended memory
+ * @pa: the physical memory address of VPU extended memory
+ *
+ */
+struct vpu_mem {
+ void *va;
+ dma_addr_t pa;
+};
+
+/**
+ * struct vpu_regs - VPU TCM and configuration registers
+ *
+ * @tcm: the register for VPU Tightly-Coupled Memory
+ * @cfg: the register for VPU configuration
+ * @irq: the irq number for VPU interrupt
+ */
+struct vpu_regs {
+ void __iomem *tcm;
+ void __iomem *cfg;
+ int irq;
+};
+
+/**
+ * struct vpu_wdt_handler - VPU watchdog reset handler
+ *
+ * @reset_func: reset handler
+ * @priv: private data
+ */
+struct vpu_wdt_handler {
+ void (*reset_func)(void *);
+ void *priv;
+};
+
+/**
+ * struct vpu_wdt - VPU watchdog workqueue
+ *
+ * @handler: VPU watchdog reset handler
+ * @ws: workstruct for VPU watchdog
+ * @wq: workqueue for VPU watchdog
+ */
+struct vpu_wdt {
+ struct vpu_wdt_handler handler[VPU_RST_MAX];
+ struct work_struct ws;
+ struct workqueue_struct *wq;
+};
+
+/**
+ * struct vpu_run - VPU initialization status
+ *
+ * @signaled: the signal of vpu initialization completed
+ * @fw_ver: VPU firmware version
+ * @enc_capability: encoder capability which is not used for now and
+ * the value is reserved for future use
+ * @wq: wait queue for VPU initialization status
+ */
+struct vpu_run {
+ u32 signaled;
+ char fw_ver[VPU_FW_VER_LEN];
+ unsigned int enc_capability;
+ wait_queue_head_t wq;
+};
+
+/**
+ * struct vpu_ipi_desc - VPU IPI descriptor
+ *
+ * @handler: IPI handler
+ * @name: the name of IPI handler
+ * @priv: the private data of IPI handler
+ */
+struct vpu_ipi_desc {
+ ipi_handler_t handler;
+ const char *name;
+ void *priv;
+};
+
+/**
+ * struct share_obj - DTCM (Data Tightly-Coupled Memory) buffer shared with
+ * AP and VPU
+ *
+ * @id: IPI id
+ * @len: share buffer length
+ * @share_buf: share buffer data
+ */
+struct share_obj {
+ s32 id;
+ u32 len;
+ unsigned char share_buf[SHARE_BUF_SIZE];
+};
+
+/**
+ * struct mtk_vpu - vpu driver data
+ * @extmem: VPU extended memory information
+ * @reg: VPU TCM and configuration registers
+ * @run: VPU initialization status
+ * @ipi_desc: VPU IPI descriptor
+ * @recv_buf: VPU DTCM share buffer for receiving. The
+ * receive buffer is only accessed in interrupt context.
+ * @send_buf: VPU DTCM share buffer for sending
+ * @dev: VPU struct device
+ * @clk: VPU clock on/off
+ * @fw_loaded: indicate VPU firmware loaded
+ * @enable_4GB: VPU 4GB mode on/off
+ * @vpu_mutex: protect mtk_vpu (except recv_buf) and ensure only
+ * one client to use VPU service at a time. For example,
+ * suppose a client is using VPU to decode VP8.
+ * If the other client wants to encode VP8,
+ * it has to wait until VP8 decode completes.
+ * @wdt_refcnt WDT reference count to make sure the watchdog can be
+ * disabled if no other client is using VPU service
+ * @ack_wq: The wait queue for each codec and mdp. When sleeping
+ * processes wake up, they will check the condition
+ * "ipi_id_ack" to run the corresponding action or
+ * go back to sleep.
+ * @ipi_id_ack: The ACKs for registered IPI function sending
+ * interrupt to VPU
+ *
+ */
+struct mtk_vpu {
+ struct vpu_mem extmem[2];
+ struct vpu_regs reg;
+ struct vpu_run run;
+ struct vpu_wdt wdt;
+ struct vpu_ipi_desc ipi_desc[IPI_MAX];
+ struct share_obj *recv_buf;
+ struct share_obj *send_buf;
+ struct device *dev;
+ struct clk *clk;
+ bool fw_loaded;
+ bool enable_4GB;
+ struct mutex vpu_mutex; /* for protecting vpu data data structure */
+ u32 wdt_refcnt;
+ wait_queue_head_t ack_wq;
+ bool ipi_id_ack[IPI_MAX];
+};
+
+static inline void vpu_cfg_writel(struct mtk_vpu *vpu, u32 val, u32 offset)
+{
+ writel(val, vpu->reg.cfg + offset);
+}
+
+static inline u32 vpu_cfg_readl(struct mtk_vpu *vpu, u32 offset)
+{
+ return readl(vpu->reg.cfg + offset);
+}
+
+static inline bool vpu_running(struct mtk_vpu *vpu)
+{
+ return vpu_cfg_readl(vpu, VPU_RESET) & BIT(0);
+}
+
+static void vpu_clock_disable(struct mtk_vpu *vpu)
+{
+ /* Disable VPU watchdog */
+ mutex_lock(&vpu->vpu_mutex);
+ if (!--vpu->wdt_refcnt)
+ vpu_cfg_writel(vpu,
+ vpu_cfg_readl(vpu, VPU_WDT_REG) & ~(1L << 31),
+ VPU_WDT_REG);
+ mutex_unlock(&vpu->vpu_mutex);
+
+ clk_disable(vpu->clk);
+}
+
+static int vpu_clock_enable(struct mtk_vpu *vpu)
+{
+ int ret;
+
+ ret = clk_enable(vpu->clk);
+ if (ret)
+ return ret;
+ /* Enable VPU watchdog */
+ mutex_lock(&vpu->vpu_mutex);
+ if (!vpu->wdt_refcnt++)
+ vpu_cfg_writel(vpu,
+ vpu_cfg_readl(vpu, VPU_WDT_REG) | (1L << 31),
+ VPU_WDT_REG);
+ mutex_unlock(&vpu->vpu_mutex);
+
+ return ret;
+}
+
+int vpu_ipi_register(struct platform_device *pdev,
+ enum ipi_id id, ipi_handler_t handler,
+ const char *name, void *priv)
+{
+ struct mtk_vpu *vpu = platform_get_drvdata(pdev);
+ struct vpu_ipi_desc *ipi_desc;
+
+ if (!vpu) {
+ dev_err(&pdev->dev, "vpu device in not ready\n");
+ return -EPROBE_DEFER;
+ }
+
+ if (id >= 0 && id < IPI_MAX && handler) {
+ ipi_desc = vpu->ipi_desc;
+ ipi_desc[id].name = name;
+ ipi_desc[id].handler = handler;
+ ipi_desc[id].priv = priv;
+ return 0;
+ }
+
+ dev_err(&pdev->dev, "register vpu ipi id %d with invalid arguments\n",
+ id);
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(vpu_ipi_register);
+
+int vpu_ipi_send(struct platform_device *pdev,
+ enum ipi_id id, void *buf,
+ unsigned int len)
+{
+ struct mtk_vpu *vpu = platform_get_drvdata(pdev);
+ struct share_obj *send_obj = vpu->send_buf;
+ unsigned long timeout;
+ int ret = 0;
+
+ if (id <= IPI_VPU_INIT || id >= IPI_MAX ||
+ len > sizeof(send_obj->share_buf) || !buf) {
+ dev_err(vpu->dev, "failed to send ipi message\n");
+ return -EINVAL;
+ }
+
+ ret = vpu_clock_enable(vpu);
+ if (ret) {
+ dev_err(vpu->dev, "failed to enable vpu clock\n");
+ return ret;
+ }
+ if (!vpu_running(vpu)) {
+ dev_err(vpu->dev, "vpu_ipi_send: VPU is not running\n");
+ ret = -EINVAL;
+ goto clock_disable;
+ }
+
+ mutex_lock(&vpu->vpu_mutex);
+
+ /* Wait until VPU receives the last command */
+ timeout = jiffies + msecs_to_jiffies(IPI_TIMEOUT_MS);
+ do {
+ if (time_after(jiffies, timeout)) {
+ dev_err(vpu->dev, "vpu_ipi_send: IPI timeout!\n");
+ ret = -EIO;
+ goto mut_unlock;
+ }
+ } while (vpu_cfg_readl(vpu, HOST_TO_VPU));
+
+ memcpy((void *)send_obj->share_buf, buf, len);
+ send_obj->len = len;
+ send_obj->id = id;
+
+ vpu->ipi_id_ack[id] = false;
+ /* send the command to VPU */
+ vpu_cfg_writel(vpu, 0x1, HOST_TO_VPU);
+
+ mutex_unlock(&vpu->vpu_mutex);
+
+ /* wait for VPU's ACK */
+ timeout = msecs_to_jiffies(IPI_TIMEOUT_MS);
+ ret = wait_event_timeout(vpu->ack_wq, vpu->ipi_id_ack[id], timeout);
+ vpu->ipi_id_ack[id] = false;
+ if (ret == 0) {
+ dev_err(vpu->dev, "vpu ipi %d ack time out !", id);
+ ret = -EIO;
+ goto clock_disable;
+ }
+ vpu_clock_disable(vpu);
+
+ return 0;
+
+mut_unlock:
+ mutex_unlock(&vpu->vpu_mutex);
+clock_disable:
+ vpu_clock_disable(vpu);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vpu_ipi_send);
+
+static void vpu_wdt_reset_func(struct work_struct *ws)
+{
+ struct vpu_wdt *wdt = container_of(ws, struct vpu_wdt, ws);
+ struct mtk_vpu *vpu = container_of(wdt, struct mtk_vpu, wdt);
+ struct vpu_wdt_handler *handler = wdt->handler;
+ int index, ret;
+
+ dev_info(vpu->dev, "vpu reset\n");
+ ret = vpu_clock_enable(vpu);
+ if (ret) {
+ dev_err(vpu->dev, "[VPU] wdt enables clock failed %d\n", ret);
+ return;
+ }
+ mutex_lock(&vpu->vpu_mutex);
+ vpu_cfg_writel(vpu, 0x0, VPU_RESET);
+ vpu->fw_loaded = false;
+ mutex_unlock(&vpu->vpu_mutex);
+ vpu_clock_disable(vpu);
+
+ for (index = 0; index < VPU_RST_MAX; index++) {
+ if (handler[index].reset_func) {
+ handler[index].reset_func(handler[index].priv);
+ dev_dbg(vpu->dev, "wdt handler func %d\n", index);
+ }
+ }
+}
+
+int vpu_wdt_reg_handler(struct platform_device *pdev,
+ void wdt_reset(void *),
+ void *priv, enum rst_id id)
+{
+ struct mtk_vpu *vpu = platform_get_drvdata(pdev);
+ struct vpu_wdt_handler *handler;
+
+ if (!vpu) {
+ dev_err(&pdev->dev, "vpu device in not ready\n");
+ return -EPROBE_DEFER;
+ }
+
+ handler = vpu->wdt.handler;
+
+ if (id >= 0 && id < VPU_RST_MAX && wdt_reset) {
+ dev_dbg(vpu->dev, "wdt register id %d\n", id);
+ mutex_lock(&vpu->vpu_mutex);
+ handler[id].reset_func = wdt_reset;
+ handler[id].priv = priv;
+ mutex_unlock(&vpu->vpu_mutex);
+ return 0;
+ }
+
+ dev_err(vpu->dev, "register vpu wdt handler failed\n");
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(vpu_wdt_reg_handler);
+
+unsigned int vpu_get_venc_hw_capa(struct platform_device *pdev)
+{
+ struct mtk_vpu *vpu = platform_get_drvdata(pdev);
+
+ return vpu->run.enc_capability;
+}
+EXPORT_SYMBOL_GPL(vpu_get_venc_hw_capa);
+
+void *vpu_mapping_dm_addr(struct platform_device *pdev,
+ u32 dtcm_dmem_addr)
+{
+ struct mtk_vpu *vpu = platform_get_drvdata(pdev);
+
+ if (!dtcm_dmem_addr ||
+ (dtcm_dmem_addr > (VPU_DTCM_SIZE + VPU_EXT_D_SIZE))) {
+ dev_err(vpu->dev, "invalid virtual data memory address\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (dtcm_dmem_addr < VPU_DTCM_SIZE)
+ return (__force void *)(dtcm_dmem_addr + vpu->reg.tcm +
+ VPU_DTCM_OFFSET);
+
+ return vpu->extmem[D_FW].va + (dtcm_dmem_addr - VPU_DTCM_SIZE);
+}
+EXPORT_SYMBOL_GPL(vpu_mapping_dm_addr);
+
+struct platform_device *vpu_get_plat_device(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *vpu_node;
+ struct platform_device *vpu_pdev;
+
+ vpu_node = of_parse_phandle(dev->of_node, "mediatek,vpu", 0);
+ if (!vpu_node) {
+ dev_err(dev, "can't get vpu node\n");
+ return NULL;
+ }
+
+ vpu_pdev = of_find_device_by_node(vpu_node);
+ if (WARN_ON(!vpu_pdev)) {
+ dev_err(dev, "vpu pdev failed\n");
+ of_node_put(vpu_node);
+ return NULL;
+ }
+
+ return vpu_pdev;
+}
+EXPORT_SYMBOL_GPL(vpu_get_plat_device);
+
+/* load vpu program/data memory */
+static int load_requested_vpu(struct mtk_vpu *vpu,
+ const struct firmware *vpu_fw,
+ u8 fw_type)
+{
+ size_t tcm_size = fw_type ? VPU_DTCM_SIZE : VPU_PTCM_SIZE;
+ size_t fw_size = fw_type ? VPU_D_FW_SIZE : VPU_P_FW_SIZE;
+ char *fw_name = fw_type ? VPU_D_FW : VPU_P_FW;
+ size_t dl_size = 0;
+ size_t extra_fw_size = 0;
+ void *dest;
+ int ret;
+
+ ret = reject_firmware(&vpu_fw, fw_name, vpu->dev);
+ if (ret < 0) {
+ dev_err(vpu->dev, "Failed to load %s, %d\n", fw_name, ret);
+ return ret;
+ }
+ dl_size = vpu_fw->size;
+ if (dl_size > fw_size) {
+ dev_err(vpu->dev, "fw %s size %zu is abnormal\n", fw_name,
+ dl_size);
+ release_firmware(vpu_fw);
+ return -EFBIG;
+ }
+ dev_dbg(vpu->dev, "Downloaded fw %s size: %zu.\n",
+ fw_name,
+ dl_size);
+ /* reset VPU */
+ vpu_cfg_writel(vpu, 0x0, VPU_RESET);
+
+ /* handle extended firmware size */
+ if (dl_size > tcm_size) {
+ dev_dbg(vpu->dev, "fw size %zu > limited fw size %zu\n",
+ dl_size, tcm_size);
+ extra_fw_size = dl_size - tcm_size;
+ dev_dbg(vpu->dev, "extra_fw_size %zu\n", extra_fw_size);
+ dl_size = tcm_size;
+ }
+ dest = (__force void *)vpu->reg.tcm;
+ if (fw_type == D_FW)
+ dest += VPU_DTCM_OFFSET;
+ memcpy(dest, vpu_fw->data, dl_size);
+ /* download to extended memory if need */
+ if (extra_fw_size > 0) {
+ dest = vpu->extmem[fw_type].va;
+ dev_dbg(vpu->dev, "download extended memory type %x\n",
+ fw_type);
+ memcpy(dest, vpu_fw->data + tcm_size, extra_fw_size);
+ }
+
+ release_firmware(vpu_fw);
+
+ return 0;
+}
+
+int vpu_load_firmware(struct platform_device *pdev)
+{
+ struct mtk_vpu *vpu = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ struct vpu_run *run = &vpu->run;
+ const struct firmware *vpu_fw = NULL;
+ int ret;
+
+ if (!pdev) {
+ dev_err(dev, "VPU platform device is invalid\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&vpu->vpu_mutex);
+ if (vpu->fw_loaded) {
+ mutex_unlock(&vpu->vpu_mutex);
+ return 0;
+ }
+ mutex_unlock(&vpu->vpu_mutex);
+
+ ret = vpu_clock_enable(vpu);
+ if (ret) {
+ dev_err(dev, "enable clock failed %d\n", ret);
+ return ret;
+ }
+
+ mutex_lock(&vpu->vpu_mutex);
+
+ run->signaled = false;
+ dev_dbg(vpu->dev, "firmware request\n");
+ /* Downloading program firmware to device*/
+ ret = load_requested_vpu(vpu, vpu_fw, P_FW);
+ if (ret < 0) {
+ dev_err(dev, "Failed to request %s, %d\n", VPU_P_FW, ret);
+ goto OUT_LOAD_FW;
+ }
+
+ /* Downloading data firmware to device */
+ ret = load_requested_vpu(vpu, vpu_fw, D_FW);
+ if (ret < 0) {
+ dev_err(dev, "Failed to request %s, %d\n", VPU_D_FW, ret);
+ goto OUT_LOAD_FW;
+ }
+
+ vpu->fw_loaded = true;
+ /* boot up vpu */
+ vpu_cfg_writel(vpu, 0x1, VPU_RESET);
+
+ ret = wait_event_interruptible_timeout(run->wq,
+ run->signaled,
+ msecs_to_jiffies(INIT_TIMEOUT_MS)
+ );
+ if (ret == 0) {
+ ret = -ETIME;
+ dev_err(dev, "wait vpu initialization timout!\n");
+ goto OUT_LOAD_FW;
+ } else if (-ERESTARTSYS == ret) {
+ dev_err(dev, "wait vpu interrupted by a signal!\n");
+ goto OUT_LOAD_FW;
+ }
+
+ ret = 0;
+ dev_info(dev, "vpu is ready. Fw version %s\n", run->fw_ver);
+
+OUT_LOAD_FW:
+ mutex_unlock(&vpu->vpu_mutex);
+ vpu_clock_disable(vpu);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vpu_load_firmware);
+
+static void vpu_init_ipi_handler(void *data, unsigned int len, void *priv)
+{
+ struct mtk_vpu *vpu = (struct mtk_vpu *)priv;
+ struct vpu_run *run = (struct vpu_run *)data;
+
+ vpu->run.signaled = run->signaled;
+ strncpy(vpu->run.fw_ver, run->fw_ver, VPU_FW_VER_LEN);
+ vpu->run.enc_capability = run->enc_capability;
+ wake_up_interruptible(&vpu->run.wq);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static ssize_t vpu_debug_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char buf[256];
+ unsigned int len;
+ unsigned int running, pc, vpu_to_host, host_to_vpu, wdt;
+ int ret;
+ struct device *dev = file->private_data;
+ struct mtk_vpu *vpu = dev_get_drvdata(dev);
+
+ ret = vpu_clock_enable(vpu);
+ if (ret) {
+ dev_err(vpu->dev, "[VPU] enable clock failed %d\n", ret);
+ return 0;
+ }
+
+ /* vpu register status */
+ running = vpu_running(vpu);
+ pc = vpu_cfg_readl(vpu, VPU_PC_REG);
+ wdt = vpu_cfg_readl(vpu, VPU_WDT_REG);
+ host_to_vpu = vpu_cfg_readl(vpu, HOST_TO_VPU);
+ vpu_to_host = vpu_cfg_readl(vpu, VPU_TO_HOST);
+ vpu_clock_disable(vpu);
+
+ if (running) {
+ len = snprintf(buf, sizeof(buf), "VPU is running\n\n"
+ "FW Version: %s\n"
+ "PC: 0x%x\n"
+ "WDT: 0x%x\n"
+ "Host to VPU: 0x%x\n"
+ "VPU to Host: 0x%x\n",
+ vpu->run.fw_ver, pc, wdt,
+ host_to_vpu, vpu_to_host);
+ } else {
+ len = snprintf(buf, sizeof(buf), "VPU not running\n");
+ }
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations vpu_debug_fops = {
+ .open = simple_open,
+ .read = vpu_debug_read,
+};
+#endif /* CONFIG_DEBUG_FS */
+
+static void vpu_free_ext_mem(struct mtk_vpu *vpu, u8 fw_type)
+{
+ struct device *dev = vpu->dev;
+ size_t fw_ext_size = fw_type ? VPU_EXT_D_SIZE : VPU_EXT_P_SIZE;
+
+ dma_free_coherent(dev, fw_ext_size, vpu->extmem[fw_type].va,
+ vpu->extmem[fw_type].pa);
+}
+
+static int vpu_alloc_ext_mem(struct mtk_vpu *vpu, u32 fw_type)
+{
+ struct device *dev = vpu->dev;
+ size_t fw_ext_size = fw_type ? VPU_EXT_D_SIZE : VPU_EXT_P_SIZE;
+ u32 vpu_ext_mem0 = fw_type ? VPU_DMEM_EXT0_ADDR : VPU_PMEM_EXT0_ADDR;
+ u32 vpu_ext_mem1 = fw_type ? VPU_DMEM_EXT1_ADDR : VPU_PMEM_EXT1_ADDR;
+ u32 offset_4gb = vpu->enable_4GB ? 0x40000000 : 0;
+
+ vpu->extmem[fw_type].va = dma_alloc_coherent(dev,
+ fw_ext_size,
+ &vpu->extmem[fw_type].pa,
+ GFP_KERNEL);
+ if (!vpu->extmem[fw_type].va) {
+ dev_err(dev, "Failed to allocate the extended program memory\n");
+ return PTR_ERR(vpu->extmem[fw_type].va);
+ }
+
+ /* Disable extend0. Enable extend1 */
+ vpu_cfg_writel(vpu, 0x1, vpu_ext_mem0);
+ vpu_cfg_writel(vpu, (vpu->extmem[fw_type].pa & 0xFFFFF000) + offset_4gb,
+ vpu_ext_mem1);
+
+ dev_info(dev, "%s extend memory phy=0x%llx virt=0x%p\n",
+ fw_type ? "Data" : "Program",
+ (unsigned long long)vpu->extmem[fw_type].pa,
+ vpu->extmem[fw_type].va);
+
+ return 0;
+}
+
+static void vpu_ipi_handler(struct mtk_vpu *vpu)
+{
+ struct share_obj *rcv_obj = vpu->recv_buf;
+ struct vpu_ipi_desc *ipi_desc = vpu->ipi_desc;
+
+ if (rcv_obj->id < IPI_MAX && ipi_desc[rcv_obj->id].handler) {
+ ipi_desc[rcv_obj->id].handler(rcv_obj->share_buf,
+ rcv_obj->len,
+ ipi_desc[rcv_obj->id].priv);
+ if (rcv_obj->id > IPI_VPU_INIT) {
+ vpu->ipi_id_ack[rcv_obj->id] = true;
+ wake_up(&vpu->ack_wq);
+ }
+ } else {
+ dev_err(vpu->dev, "No such ipi id = %d\n", rcv_obj->id);
+ }
+}
+
+static int vpu_ipi_init(struct mtk_vpu *vpu)
+{
+ /* Disable VPU to host interrupt */
+ vpu_cfg_writel(vpu, 0x0, VPU_TO_HOST);
+
+ /* shared buffer initialization */
+ vpu->recv_buf = (__force struct share_obj *)(vpu->reg.tcm +
+ VPU_DTCM_OFFSET);
+ vpu->send_buf = vpu->recv_buf + 1;
+ memset(vpu->recv_buf, 0, sizeof(struct share_obj));
+ memset(vpu->send_buf, 0, sizeof(struct share_obj));
+
+ return 0;
+}
+
+static irqreturn_t vpu_irq_handler(int irq, void *priv)
+{
+ struct mtk_vpu *vpu = priv;
+ u32 vpu_to_host;
+ int ret;
+
+ /*
+ * Clock should have been enabled already.
+ * Enable again in case vpu_ipi_send times out
+ * and has disabled the clock.
+ */
+ ret = clk_enable(vpu->clk);
+ if (ret) {
+ dev_err(vpu->dev, "[VPU] enable clock failed %d\n", ret);
+ return IRQ_NONE;
+ }
+ vpu_to_host = vpu_cfg_readl(vpu, VPU_TO_HOST);
+ if (vpu_to_host & VPU_IPC_INT) {
+ vpu_ipi_handler(vpu);
+ } else {
+ dev_err(vpu->dev, "vpu watchdog timeout! 0x%x", vpu_to_host);
+ queue_work(vpu->wdt.wq, &vpu->wdt.ws);
+ }
+
+ /* VPU won't send another interrupt until we set VPU_TO_HOST to 0. */
+ vpu_cfg_writel(vpu, 0x0, VPU_TO_HOST);
+ clk_disable(vpu->clk);
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *vpu_debugfs;
+#endif
+static int mtk_vpu_probe(struct platform_device *pdev)
+{
+ struct mtk_vpu *vpu;
+ struct device *dev;
+ struct resource *res;
+ int ret = 0;
+
+ dev_dbg(&pdev->dev, "initialization\n");
+
+ dev = &pdev->dev;
+ vpu = devm_kzalloc(dev, sizeof(*vpu), GFP_KERNEL);
+ if (!vpu)
+ return -ENOMEM;
+
+ vpu->dev = &pdev->dev;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcm");
+ vpu->reg.tcm = devm_ioremap_resource(dev, res);
+ if (IS_ERR((__force void *)vpu->reg.tcm))
+ return PTR_ERR((__force void *)vpu->reg.tcm);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg_reg");
+ vpu->reg.cfg = devm_ioremap_resource(dev, res);
+ if (IS_ERR((__force void *)vpu->reg.cfg))
+ return PTR_ERR((__force void *)vpu->reg.cfg);
+
+ /* Get VPU clock */
+ vpu->clk = devm_clk_get(dev, "main");
+ if (IS_ERR(vpu->clk)) {
+ dev_err(dev, "get vpu clock failed\n");
+ return PTR_ERR(vpu->clk);
+ }
+
+ platform_set_drvdata(pdev, vpu);
+
+ ret = clk_prepare(vpu->clk);
+ if (ret) {
+ dev_err(dev, "prepare vpu clock failed\n");
+ return ret;
+ }
+
+ /* VPU watchdog */
+ vpu->wdt.wq = create_singlethread_workqueue("vpu_wdt");
+ if (!vpu->wdt.wq) {
+ dev_err(dev, "initialize wdt workqueue failed\n");
+ return -ENOMEM;
+ }
+ INIT_WORK(&vpu->wdt.ws, vpu_wdt_reset_func);
+ mutex_init(&vpu->vpu_mutex);
+
+ ret = vpu_clock_enable(vpu);
+ if (ret) {
+ dev_err(dev, "enable vpu clock failed\n");
+ goto workqueue_destroy;
+ }
+
+ dev_dbg(dev, "vpu ipi init\n");
+ ret = vpu_ipi_init(vpu);
+ if (ret) {
+ dev_err(dev, "Failed to init ipi\n");
+ goto disable_vpu_clk;
+ }
+
+ /* register vpu initialization IPI */
+ ret = vpu_ipi_register(pdev, IPI_VPU_INIT, vpu_init_ipi_handler,
+ "vpu_init", vpu);
+ if (ret) {
+ dev_err(dev, "Failed to register IPI_VPU_INIT\n");
+ goto vpu_mutex_destroy;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ vpu_debugfs = debugfs_create_file("mtk_vpu", S_IRUGO, NULL, (void *)dev,
+ &vpu_debug_fops);
+ if (!vpu_debugfs) {
+ ret = -ENOMEM;
+ goto cleanup_ipi;
+ }
+#endif
+
+ /* Set PTCM to 96K and DTCM to 32K */
+ vpu_cfg_writel(vpu, 0x2, VPU_TCM_CFG);
+
+ vpu->enable_4GB = !!(totalram_pages > (SZ_2G >> PAGE_SHIFT));
+ dev_info(dev, "4GB mode %u\n", vpu->enable_4GB);
+
+ if (vpu->enable_4GB) {
+ ret = of_reserved_mem_device_init(dev);
+ if (ret)
+ dev_info(dev, "init reserved memory failed\n");
+ /* continue to use dynamic allocation if failed */
+ }
+
+ ret = vpu_alloc_ext_mem(vpu, D_FW);
+ if (ret) {
+ dev_err(dev, "Allocate DM failed\n");
+ goto remove_debugfs;
+ }
+
+ ret = vpu_alloc_ext_mem(vpu, P_FW);
+ if (ret) {
+ dev_err(dev, "Allocate PM failed\n");
+ goto free_d_mem;
+ }
+
+ init_waitqueue_head(&vpu->run.wq);
+ init_waitqueue_head(&vpu->ack_wq);
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(dev, "get IRQ resource failed.\n");
+ ret = -ENXIO;
+ goto free_p_mem;
+ }
+ vpu->reg.irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(dev, vpu->reg.irq, vpu_irq_handler, 0,
+ pdev->name, vpu);
+ if (ret) {
+ dev_err(dev, "failed to request irq\n");
+ goto free_p_mem;
+ }
+
+ vpu_clock_disable(vpu);
+ dev_dbg(dev, "initialization completed\n");
+
+ return 0;
+
+free_p_mem:
+ vpu_free_ext_mem(vpu, P_FW);
+free_d_mem:
+ vpu_free_ext_mem(vpu, D_FW);
+remove_debugfs:
+ of_reserved_mem_device_release(dev);
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove(vpu_debugfs);
+cleanup_ipi:
+#endif
+ memset(vpu->ipi_desc, 0, sizeof(struct vpu_ipi_desc) * IPI_MAX);
+vpu_mutex_destroy:
+ mutex_destroy(&vpu->vpu_mutex);
+disable_vpu_clk:
+ vpu_clock_disable(vpu);
+workqueue_destroy:
+ destroy_workqueue(vpu->wdt.wq);
+
+ return ret;
+}
+
+static const struct of_device_id mtk_vpu_match[] = {
+ {
+ .compatible = "mediatek,mt8173-vpu",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mtk_vpu_match);
+
+static int mtk_vpu_remove(struct platform_device *pdev)
+{
+ struct mtk_vpu *vpu = platform_get_drvdata(pdev);
+
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove(vpu_debugfs);
+#endif
+ if (vpu->wdt.wq) {
+ flush_workqueue(vpu->wdt.wq);
+ destroy_workqueue(vpu->wdt.wq);
+ }
+ vpu_free_ext_mem(vpu, P_FW);
+ vpu_free_ext_mem(vpu, D_FW);
+ mutex_destroy(&vpu->vpu_mutex);
+ clk_unprepare(vpu->clk);
+
+ return 0;
+}
+
+static struct platform_driver mtk_vpu_driver = {
+ .probe = mtk_vpu_probe,
+ .remove = mtk_vpu_remove,
+ .driver = {
+ .name = "mtk_vpu",
+ .of_match_table = mtk_vpu_match,
+ },
+};
+
+module_platform_driver(mtk_vpu_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Mediatek Video Prosessor Unit driver");
diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.h b/drivers/media/platform/mtk-vpu/mtk_vpu.h
new file mode 100644
index 000000000..5ab37f04b
--- /dev/null
+++ b/drivers/media/platform/mtk-vpu/mtk_vpu.h
@@ -0,0 +1,162 @@
+/*
+* Copyright (c) 2016 MediaTek Inc.
+* Author: Andrew-CT Chen <andrew-ct.chen@mediatek.com>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#ifndef _MTK_VPU_H
+#define _MTK_VPU_H
+
+#include <linux/platform_device.h>
+
+/**
+ * VPU (video processor unit) is a tiny processor controlling video hardware
+ * related to video codec, scaling and color format converting.
+ * VPU interfaces with other blocks by share memory and interrupt.
+ **/
+
+typedef void (*ipi_handler_t) (void *data,
+ unsigned int len,
+ void *priv);
+
+/**
+ * enum ipi_id - the id of inter-processor interrupt
+ *
+ * @IPI_VPU_INIT: The interrupt from vpu is to notfiy kernel
+ VPU initialization completed.
+ IPI_VPU_INIT is sent from VPU when firmware is
+ loaded. AP doesn't need to send IPI_VPU_INIT
+ command to VPU.
+ For other IPI below, AP should send the request
+ to VPU to trigger the interrupt.
+ * @IPI_VENC_H264: The interrupt from vpu is to notify kernel to
+ handle H264 video encoder job, and vice versa.
+ * @IPI_VENC_VP8: The interrupt fro vpu is to notify kernel to
+ handle VP8 video encoder job,, and vice versa.
+ * @IPI_MAX: The maximum IPI number
+ */
+
+enum ipi_id {
+ IPI_VPU_INIT = 0,
+ IPI_VENC_H264,
+ IPI_VENC_VP8,
+ IPI_MAX,
+};
+
+/**
+ * enum rst_id - reset id to register reset function for VPU watchdog timeout
+ *
+ * @VPU_RST_ENC: encoder reset id
+ * @VPU_RST_MAX: maximum reset id
+ */
+enum rst_id {
+ VPU_RST_ENC,
+ VPU_RST_MAX,
+};
+
+/**
+ * vpu_ipi_register - register an ipi function
+ *
+ * @pdev: VPU platform device
+ * @id: IPI ID
+ * @handler: IPI handler
+ * @name: IPI name
+ * @priv: private data for IPI handler
+ *
+ * Register an ipi function to receive ipi interrupt from VPU.
+ *
+ * Return: Return 0 if ipi registers successfully, otherwise it is failed.
+ */
+int vpu_ipi_register(struct platform_device *pdev, enum ipi_id id,
+ ipi_handler_t handler, const char *name, void *priv);
+
+/**
+ * vpu_ipi_send - send data from AP to vpu.
+ *
+ * @pdev: VPU platform device
+ * @id: IPI ID
+ * @buf: the data buffer
+ * @len: the data buffer length
+ *
+ * This function is thread-safe. When this function returns,
+ * VPU has received the data and starts the processing.
+ * When the processing completes, IPI handler registered
+ * by vpu_ipi_register will be called in interrupt context.
+ *
+ * Return: Return 0 if sending data successfully, otherwise it is failed.
+ **/
+int vpu_ipi_send(struct platform_device *pdev,
+ enum ipi_id id, void *buf,
+ unsigned int len);
+
+/**
+ * vpu_get_plat_device - get VPU's platform device
+ *
+ * @pdev: the platform device of the module requesting VPU platform
+ * device for using VPU API.
+ *
+ * Return: Return NULL if it is failed.
+ * otherwise it is VPU's platform device
+ **/
+struct platform_device *vpu_get_plat_device(struct platform_device *pdev);
+
+/**
+ * vpu_wdt_reg_handler - register a VPU watchdog handler
+ *
+ * @pdev: VPU platform device
+ * @vpu_wdt_reset_func: the callback reset function
+ * @private_data: the private data for reset function
+ * @rst_id: reset id
+ *
+ * Register a handler performing own tasks when vpu reset by watchdog
+ *
+ * Return: Return 0 if the handler is added successfully,
+ * otherwise it is failed.
+ *
+ **/
+int vpu_wdt_reg_handler(struct platform_device *pdev,
+ void vpu_wdt_reset_func(void *),
+ void *priv, enum rst_id id);
+/**
+ * vpu_get_venc_hw_capa - get video encoder hardware capability
+ *
+ * @pdev: VPU platform device
+ *
+ * Return: video encoder hardware capability
+ **/
+unsigned int vpu_get_venc_hw_capa(struct platform_device *pdev);
+
+/**
+ * vpu_load_firmware - download VPU firmware and boot it
+ *
+ * @pdev: VPU platform device
+ *
+ * Return: Return 0 if downloading firmware successfully,
+ * otherwise it is failed
+ **/
+int vpu_load_firmware(struct platform_device *pdev);
+
+/**
+ * vpu_mapping_dm_addr - Mapping DTCM/DMEM to kernel virtual address
+ *
+ * @pdev: VPU platform device
+ * @dmem_addr: VPU's data memory address
+ *
+ * Mapping the VPU's DTCM (Data Tightly-Coupled Memory) /
+ * DMEM (Data Extended Memory) memory address to
+ * kernel virtual address.
+ *
+ * Return: Return ERR_PTR(-EINVAL) if mapping failed,
+ * otherwise the mapped kernel virtual address
+ **/
+void *vpu_mapping_dm_addr(struct platform_device *pdev,
+ u32 dtcm_dmem_addr);
+#endif /* _MTK_VPU_H */
diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
index 3c4012d42..c639406fe 100644
--- a/drivers/media/platform/mx2_emmaprp.c
+++ b/drivers/media/platform/mx2_emmaprp.c
@@ -211,7 +211,6 @@ struct emmaprp_dev {
struct clk *clk_emma_ahb, *clk_emma_ipg;
struct v4l2_m2m_dev *m2m_dev;
- struct vb2_alloc_ctx *alloc_ctx;
};
struct emmaprp_ctx {
@@ -690,7 +689,7 @@ static const struct v4l2_ioctl_ops emmaprp_ioctl_ops = {
*/
static int emmaprp_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct emmaprp_ctx *ctx = vb2_get_drv_priv(vq);
struct emmaprp_q_data *q_data;
@@ -710,8 +709,6 @@ static int emmaprp_queue_setup(struct vb2_queue *vq,
*nbuffers = count;
sizes[0] = size;
- alloc_ctxs[0] = ctx->dev->alloc_ctx;
-
dprintk(ctx->dev, "get %d buffer(s) of size %d each.\n", count, size);
return 0;
@@ -765,6 +762,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->ops = &emmaprp_qops;
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->dev = ctx->dev->v4l2_dev.dev;
ret = vb2_queue_init(src_vq);
if (ret)
@@ -777,6 +775,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->ops = &emmaprp_qops;
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->dev = ctx->dev->v4l2_dev.dev;
return vb2_queue_init(dst_vq);
}
@@ -948,18 +947,11 @@ static int emmaprp_probe(struct platform_device *pdev)
if (ret)
goto rel_vdev;
- pcdev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
- if (IS_ERR(pcdev->alloc_ctx)) {
- v4l2_err(&pcdev->v4l2_dev, "Failed to alloc vb2 context\n");
- ret = PTR_ERR(pcdev->alloc_ctx);
- goto rel_vdev;
- }
-
pcdev->m2m_dev = v4l2_m2m_init(&m2m_ops);
if (IS_ERR(pcdev->m2m_dev)) {
v4l2_err(&pcdev->v4l2_dev, "Failed to init mem2mem device\n");
ret = PTR_ERR(pcdev->m2m_dev);
- goto rel_ctx;
+ goto rel_vdev;
}
ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
@@ -973,8 +965,6 @@ static int emmaprp_probe(struct platform_device *pdev)
rel_m2m:
v4l2_m2m_release(pcdev->m2m_dev);
-rel_ctx:
- vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
rel_vdev:
video_device_release(vfd);
unreg_dev:
@@ -993,7 +983,6 @@ static int emmaprp_remove(struct platform_device *pdev)
video_unregister_device(pcdev->vfd);
v4l2_m2m_release(pcdev->m2m_dev);
- vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
v4l2_device_unregister(&pcdev->v4l2_dev);
mutex_destroy(&pcdev->dev_mutex);
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index 70c28d19e..6b01e126f 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -45,7 +45,7 @@
#include <media/v4l2-ioctl.h>
#include <video/omapvrfb.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "omap_voutlib.h"
#include "omap_voutdef.h"
@@ -1318,71 +1318,16 @@ s_crop_err:
return ret;
}
-static int vidioc_queryctrl(struct file *file, void *fh,
- struct v4l2_queryctrl *ctrl)
+static int omap_vout_s_ctrl(struct v4l2_ctrl *ctrl)
{
+ struct omap_vout_device *vout =
+ container_of(ctrl->handler, struct omap_vout_device, ctrl_handler);
int ret = 0;
switch (ctrl->id) {
- case V4L2_CID_ROTATE:
- ret = v4l2_ctrl_query_fill(ctrl, 0, 270, 90, 0);
- break;
- case V4L2_CID_BG_COLOR:
- ret = v4l2_ctrl_query_fill(ctrl, 0, 0xFFFFFF, 1, 0);
- break;
- case V4L2_CID_VFLIP:
- ret = v4l2_ctrl_query_fill(ctrl, 0, 1, 1, 0);
- break;
- default:
- ctrl->name[0] = '\0';
- ret = -EINVAL;
- }
- return ret;
-}
-
-static int vidioc_g_ctrl(struct file *file, void *fh, struct v4l2_control *ctrl)
-{
- int ret = 0;
- struct omap_vout_device *vout = fh;
-
- switch (ctrl->id) {
- case V4L2_CID_ROTATE:
- ctrl->value = vout->control[0].value;
- break;
- case V4L2_CID_BG_COLOR:
- {
- struct omap_overlay_manager_info info;
- struct omap_overlay *ovl;
-
- ovl = vout->vid_info.overlays[0];
- if (!ovl->manager || !ovl->manager->get_manager_info) {
- ret = -EINVAL;
- break;
- }
-
- ovl->manager->get_manager_info(ovl->manager, &info);
- ctrl->value = info.default_color;
- break;
- }
- case V4L2_CID_VFLIP:
- ctrl->value = vout->control[2].value;
- break;
- default:
- ret = -EINVAL;
- }
- return ret;
-}
-
-static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *a)
-{
- int ret = 0;
- struct omap_vout_device *vout = fh;
-
- switch (a->id) {
- case V4L2_CID_ROTATE:
- {
+ case V4L2_CID_ROTATE: {
struct omapvideo_info *ovid;
- int rotation = a->value;
+ int rotation = ctrl->val;
ovid = &vout->vid_info;
@@ -1405,15 +1350,13 @@ static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *a)
ret = -EINVAL;
break;
}
-
- vout->control[0].value = rotation;
mutex_unlock(&vout->lock);
break;
}
case V4L2_CID_BG_COLOR:
{
struct omap_overlay *ovl;
- unsigned int color = a->value;
+ unsigned int color = ctrl->val;
struct omap_overlay_manager_info info;
ovl = vout->vid_info.overlays[0];
@@ -1432,15 +1375,13 @@ static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *a)
ret = -EINVAL;
break;
}
-
- vout->control[1].value = color;
mutex_unlock(&vout->lock);
break;
}
case V4L2_CID_VFLIP:
{
struct omapvideo_info *ovid;
- unsigned int mirror = a->value;
+ unsigned int mirror = ctrl->val;
ovid = &vout->vid_info;
@@ -1457,16 +1398,19 @@ static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *a)
break;
}
vout->mirror = mirror;
- vout->control[2].value = mirror;
mutex_unlock(&vout->lock);
break;
}
default:
- ret = -EINVAL;
+ return -EINVAL;
}
return ret;
}
+static const struct v4l2_ctrl_ops omap_vout_ctrl_ops = {
+ .s_ctrl = omap_vout_s_ctrl,
+};
+
static int vidioc_reqbufs(struct file *file, void *fh,
struct v4l2_requestbuffers *req)
{
@@ -1831,11 +1775,8 @@ static const struct v4l2_ioctl_ops vout_ioctl_ops = {
.vidioc_g_fmt_vid_out = vidioc_g_fmt_vid_out,
.vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out,
.vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out,
- .vidioc_queryctrl = vidioc_queryctrl,
- .vidioc_g_ctrl = vidioc_g_ctrl,
.vidioc_s_fbuf = vidioc_s_fbuf,
.vidioc_g_fbuf = vidioc_g_fbuf,
- .vidioc_s_ctrl = vidioc_s_ctrl,
.vidioc_try_fmt_vid_out_overlay = vidioc_try_fmt_vid_overlay,
.vidioc_s_fmt_vid_out_overlay = vidioc_s_fmt_vid_overlay,
.vidioc_g_fmt_vid_out_overlay = vidioc_g_fmt_vid_overlay,
@@ -1865,9 +1806,9 @@ static int __init omap_vout_setup_video_data(struct omap_vout_device *vout)
{
struct video_device *vfd;
struct v4l2_pix_format *pix;
- struct v4l2_control *control;
struct omap_overlay *ovl = vout->vid_info.overlays[0];
struct omap_dss_device *display = ovl->get_device(ovl);
+ struct v4l2_ctrl_handler *hdl;
/* set the default pix */
pix = &vout->pix;
@@ -1896,29 +1837,32 @@ static int __init omap_vout_setup_video_data(struct omap_vout_device *vout)
omap_vout_new_format(pix, &vout->fbuf, &vout->crop, &vout->win);
- /*Initialize the control variables for
- rotation, flipping and background color. */
- control = vout->control;
- control[0].id = V4L2_CID_ROTATE;
- control[0].value = 0;
+ hdl = &vout->ctrl_handler;
+ v4l2_ctrl_handler_init(hdl, 3);
+ v4l2_ctrl_new_std(hdl, &omap_vout_ctrl_ops,
+ V4L2_CID_ROTATE, 0, 270, 90, 0);
+ v4l2_ctrl_new_std(hdl, &omap_vout_ctrl_ops,
+ V4L2_CID_BG_COLOR, 0, 0xffffff, 1, 0);
+ v4l2_ctrl_new_std(hdl, &omap_vout_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ if (hdl->error)
+ return hdl->error;
+
vout->rotation = 0;
vout->mirror = false;
- vout->control[2].id = V4L2_CID_HFLIP;
- vout->control[2].value = 0;
if (vout->vid_info.rotation_type == VOUT_ROT_VRFB)
vout->vrfb_bpp = 2;
- control[1].id = V4L2_CID_BG_COLOR;
- control[1].value = 0;
-
/* initialize the video_device struct */
vfd = vout->vfd = video_device_alloc();
if (!vfd) {
printk(KERN_ERR VOUT_NAME ": could not allocate"
" video device struct\n");
+ v4l2_ctrl_handler_free(hdl);
return -ENOMEM;
}
+ vfd->ctrl_handler = hdl;
vfd->release = video_device_release;
vfd->ioctl_ops = &vout_ioctl_ops;
@@ -2092,6 +2036,7 @@ static void omap_vout_cleanup_device(struct omap_vout_device *vout)
video_unregister_device(vfd);
}
}
+ v4l2_ctrl_handler_free(&vout->ctrl_handler);
if (ovid->rotation_type == VOUT_ROT_VRFB) {
omap_vout_release_vrfb(vout);
/* Free the VRFB buffer if allocated
diff --git a/drivers/media/platform/omap/omap_voutdef.h b/drivers/media/platform/omap/omap_voutdef.h
index 9ccfe1f47..80c79fabd 100644
--- a/drivers/media/platform/omap/omap_voutdef.h
+++ b/drivers/media/platform/omap/omap_voutdef.h
@@ -11,7 +11,8 @@
#ifndef OMAP_VOUTDEF_H
#define OMAP_VOUTDEF_H
-#include <video/omapdss.h>
+#include <media/v4l2-ctrls.h>
+#include <video/omapfb_dss.h>
#include <video/omapvrfb.h>
#define YUYV_BPP 2
@@ -116,6 +117,7 @@ struct omap_vout_device {
struct omapvideo_info vid_info;
struct video_device *vfd;
struct omap2video_device *vid_dev;
+ struct v4l2_ctrl_handler ctrl_handler;
int vid;
int opened;
@@ -149,12 +151,9 @@ struct omap_vout_device {
/* Lock to protect the shared data structures in ioctl */
struct mutex lock;
- /* V4L2 control structure for different control id */
- struct v4l2_control control[MAX_CID];
enum dss_rotation rotation;
bool mirror;
int flicker_filter;
- /* V4L2 control structure for different control id */
int bpp; /* bytes per pixel */
int vrfb_bpp; /* bytes per pixel with respect to VRFB */
diff --git a/drivers/media/platform/omap/omap_voutlib.c b/drivers/media/platform/omap/omap_voutlib.c
index 80b0d88f1..58a25fdf0 100644
--- a/drivers/media/platform/omap/omap_voutlib.c
+++ b/drivers/media/platform/omap/omap_voutlib.c
@@ -26,7 +26,7 @@
#include <linux/dma-mapping.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "omap_voutlib.h"
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index 1b1a95d54..7d9f35976 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -331,7 +331,7 @@ isp_video_check_format(struct isp_video *video, struct isp_video_fh *vfh)
static int isp_video_queue_setup(struct vb2_queue *queue,
unsigned int *count, unsigned int *num_planes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct isp_video_fh *vfh = vb2_get_drv_priv(queue);
struct isp_video *video = vfh->video;
@@ -342,8 +342,6 @@ static int isp_video_queue_setup(struct vb2_queue *queue,
if (sizes[0] == 0)
return -EINVAL;
- alloc_ctxs[0] = video->alloc_ctx;
-
*count = min(*count, video->capture_mem / PAGE_ALIGN(sizes[0]));
return 0;
@@ -1308,6 +1306,7 @@ static int isp_video_open(struct file *file)
queue->mem_ops = &vb2_dma_contig_memops;
queue->buf_struct_size = sizeof(struct isp_buffer);
queue->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ queue->dev = video->isp->dev;
ret = vb2_queue_init(&handle->queue);
if (ret < 0) {
@@ -1414,15 +1413,9 @@ int omap3isp_video_init(struct isp_video *video, const char *name)
return -EINVAL;
}
- video->alloc_ctx = vb2_dma_contig_init_ctx(video->isp->dev);
- if (IS_ERR(video->alloc_ctx))
- return PTR_ERR(video->alloc_ctx);
-
ret = media_entity_pads_init(&video->video.entity, 1, &video->pad);
- if (ret < 0) {
- vb2_dma_contig_cleanup_ctx(video->alloc_ctx);
+ if (ret < 0)
return ret;
- }
mutex_init(&video->mutex);
atomic_set(&video->active, 0);
@@ -1451,7 +1444,6 @@ int omap3isp_video_init(struct isp_video *video, const char *name)
void omap3isp_video_cleanup(struct isp_video *video)
{
- vb2_dma_contig_cleanup_ctx(video->alloc_ctx);
media_entity_cleanup(&video->video.entity);
mutex_destroy(&video->queue_lock);
mutex_destroy(&video->stream_lock);
diff --git a/drivers/media/platform/omap3isp/ispvideo.h b/drivers/media/platform/omap3isp/ispvideo.h
index 6a48d5879..f6a2082b4 100644
--- a/drivers/media/platform/omap3isp/ispvideo.h
+++ b/drivers/media/platform/omap3isp/ispvideo.h
@@ -171,7 +171,6 @@ struct isp_video {
bool error;
/* Video buffers queue */
- void *alloc_ctx;
struct vb2_queue *queue;
struct mutex queue_lock; /* protects the queue */
spinlock_t irqlock; /* protects dmaqueue */
diff --git a/drivers/media/platform/rcar-fcp.c b/drivers/media/platform/rcar-fcp.c
new file mode 100644
index 000000000..bc50c69ee
--- /dev/null
+++ b/drivers/media/platform/rcar-fcp.c
@@ -0,0 +1,187 @@
+/*
+ * rcar-fcp.c -- R-Car Frame Compression Processor Driver
+ *
+ * Copyright (C) 2016 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include <media/rcar-fcp.h>
+
+struct rcar_fcp_device {
+ struct list_head list;
+ struct device *dev;
+};
+
+static LIST_HEAD(fcp_devices);
+static DEFINE_MUTEX(fcp_lock);
+
+/* -----------------------------------------------------------------------------
+ * Public API
+ */
+
+/**
+ * rcar_fcp_get - Find and acquire a reference to an FCP instance
+ * @np: Device node of the FCP instance
+ *
+ * Search the list of registered FCP instances for the instance corresponding to
+ * the given device node.
+ *
+ * Return a pointer to the FCP instance, or an ERR_PTR if the instance can't be
+ * found.
+ */
+struct rcar_fcp_device *rcar_fcp_get(const struct device_node *np)
+{
+ struct rcar_fcp_device *fcp;
+
+ mutex_lock(&fcp_lock);
+
+ list_for_each_entry(fcp, &fcp_devices, list) {
+ if (fcp->dev->of_node != np)
+ continue;
+
+ /*
+ * Make sure the module won't be unloaded behind our back. This
+ * is a poor man's safety net, the module should really not be
+ * unloaded while FCP users can be active.
+ */
+ if (!try_module_get(fcp->dev->driver->owner))
+ fcp = NULL;
+
+ goto done;
+ }
+
+ fcp = ERR_PTR(-EPROBE_DEFER);
+
+done:
+ mutex_unlock(&fcp_lock);
+ return fcp;
+}
+EXPORT_SYMBOL_GPL(rcar_fcp_get);
+
+/**
+ * rcar_fcp_put - Release a reference to an FCP instance
+ * @fcp: The FCP instance
+ *
+ * Release the FCP instance acquired by a call to rcar_fcp_get().
+ */
+void rcar_fcp_put(struct rcar_fcp_device *fcp)
+{
+ if (fcp)
+ module_put(fcp->dev->driver->owner);
+}
+EXPORT_SYMBOL_GPL(rcar_fcp_put);
+
+/**
+ * rcar_fcp_enable - Enable an FCP
+ * @fcp: The FCP instance
+ *
+ * Before any memory access through an FCP is performed by a module, the FCP
+ * must be enabled by a call to this function. The enable calls are reference
+ * counted, each successful call must be followed by one rcar_fcp_disable()
+ * call when no more memory transfer can occur through the FCP.
+ *
+ * Return 0 on success or a negative error code if an error occurs. The enable
+ * reference count isn't increased when this function returns an error.
+ */
+int rcar_fcp_enable(struct rcar_fcp_device *fcp)
+{
+ int error;
+
+ if (!fcp)
+ return 0;
+
+ error = pm_runtime_get_sync(fcp->dev);
+ if (error < 0)
+ return error;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rcar_fcp_enable);
+
+/**
+ * rcar_fcp_disable - Disable an FCP
+ * @fcp: The FCP instance
+ *
+ * This function is the counterpart of rcar_fcp_enable(). As enable calls are
+ * reference counted a disable call may not disable the FCP synchronously.
+ */
+void rcar_fcp_disable(struct rcar_fcp_device *fcp)
+{
+ if (fcp)
+ pm_runtime_put(fcp->dev);
+}
+EXPORT_SYMBOL_GPL(rcar_fcp_disable);
+
+/* -----------------------------------------------------------------------------
+ * Platform Driver
+ */
+
+static int rcar_fcp_probe(struct platform_device *pdev)
+{
+ struct rcar_fcp_device *fcp;
+
+ fcp = devm_kzalloc(&pdev->dev, sizeof(*fcp), GFP_KERNEL);
+ if (fcp == NULL)
+ return -ENOMEM;
+
+ fcp->dev = &pdev->dev;
+
+ pm_runtime_enable(&pdev->dev);
+
+ mutex_lock(&fcp_lock);
+ list_add_tail(&fcp->list, &fcp_devices);
+ mutex_unlock(&fcp_lock);
+
+ platform_set_drvdata(pdev, fcp);
+
+ return 0;
+}
+
+static int rcar_fcp_remove(struct platform_device *pdev)
+{
+ struct rcar_fcp_device *fcp = platform_get_drvdata(pdev);
+
+ mutex_lock(&fcp_lock);
+ list_del(&fcp->list);
+ mutex_unlock(&fcp_lock);
+
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id rcar_fcp_of_match[] = {
+ { .compatible = "renesas,fcpv" },
+ { },
+};
+
+static struct platform_driver rcar_fcp_platform_driver = {
+ .probe = rcar_fcp_probe,
+ .remove = rcar_fcp_remove,
+ .driver = {
+ .name = "rcar-fcp",
+ .of_match_table = rcar_fcp_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+module_platform_driver(rcar_fcp_platform_driver);
+
+MODULE_ALIAS("rcar-fcp");
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_DESCRIPTION("Renesas FCP Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/rcar-vin/Kconfig b/drivers/media/platform/rcar-vin/Kconfig
new file mode 100644
index 000000000..b2ff2d4e8
--- /dev/null
+++ b/drivers/media/platform/rcar-vin/Kconfig
@@ -0,0 +1,11 @@
+config VIDEO_RCAR_VIN
+ tristate "R-Car Video Input (VIN) Driver"
+ depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && OF && HAS_DMA
+ depends on ARCH_RENESAS || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ ---help---
+ Support for Renesas R-Car Video Input (VIN) driver.
+ Supports R-Car Gen2 SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rcar-vin.
diff --git a/drivers/media/platform/rcar-vin/Makefile b/drivers/media/platform/rcar-vin/Makefile
new file mode 100644
index 000000000..48c5632c2
--- /dev/null
+++ b/drivers/media/platform/rcar-vin/Makefile
@@ -0,0 +1,3 @@
+rcar-vin-objs = rcar-core.o rcar-dma.o rcar-v4l2.o
+
+obj-$(CONFIG_VIDEO_RCAR_VIN) += rcar-vin.o
diff --git a/drivers/media/platform/rcar-vin/rcar-core.c b/drivers/media/platform/rcar-vin/rcar-core.c
new file mode 100644
index 000000000..4b2007b73
--- /dev/null
+++ b/drivers/media/platform/rcar-vin/rcar-core.c
@@ -0,0 +1,334 @@
+/*
+ * Driver for Renesas R-Car VIN
+ *
+ * Copyright (C) 2016 Renesas Electronics Corp.
+ * Copyright (C) 2011-2013 Renesas Solutions Corp.
+ * Copyright (C) 2013 Cogent Embedded, Inc., <source@cogentembedded.com>
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * Based on the soc-camera rcar_vin driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <media/v4l2-of.h>
+
+#include "rcar-vin.h"
+
+/* -----------------------------------------------------------------------------
+ * Async notifier
+ */
+
+#define notifier_to_vin(n) container_of(n, struct rvin_dev, notifier)
+
+static int rvin_mbus_supported(struct rvin_dev *vin)
+{
+ struct v4l2_subdev *sd;
+ struct v4l2_subdev_mbus_code_enum code = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+
+ sd = vin_to_source(vin);
+
+ code.index = 0;
+ while (!v4l2_subdev_call(sd, pad, enum_mbus_code, NULL, &code)) {
+ code.index++;
+ switch (code.code) {
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YUYV10_2X10:
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ vin->source.code = code.code;
+ vin_dbg(vin, "Found supported media bus format: %d\n",
+ vin->source.code);
+ return true;
+ default:
+ break;
+ }
+ }
+
+ return false;
+}
+
+static int rvin_graph_notify_complete(struct v4l2_async_notifier *notifier)
+{
+ struct rvin_dev *vin = notifier_to_vin(notifier);
+ int ret;
+
+ ret = v4l2_device_register_subdev_nodes(&vin->v4l2_dev);
+ if (ret < 0) {
+ vin_err(vin, "Failed to register subdev nodes\n");
+ return ret;
+ }
+
+ if (!rvin_mbus_supported(vin)) {
+ vin_err(vin, "No supported mediabus format found\n");
+ return -EINVAL;
+ }
+
+ return rvin_v4l2_probe(vin);
+}
+
+static void rvin_graph_notify_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_subdev *asd)
+{
+ struct rvin_dev *vin = notifier_to_vin(notifier);
+
+ rvin_v4l2_remove(vin);
+}
+
+static int rvin_graph_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct rvin_dev *vin = notifier_to_vin(notifier);
+
+ vin_dbg(vin, "subdev %s bound\n", subdev->name);
+
+ vin->entity.entity = &subdev->entity;
+ vin->entity.subdev = subdev;
+
+ return 0;
+}
+
+static int rvin_graph_parse(struct rvin_dev *vin,
+ struct device_node *node)
+{
+ struct device_node *remote;
+ struct device_node *ep = NULL;
+ struct device_node *next;
+ int ret = 0;
+
+ while (1) {
+ next = of_graph_get_next_endpoint(node, ep);
+ if (!next)
+ break;
+
+ of_node_put(ep);
+ ep = next;
+
+ remote = of_graph_get_remote_port_parent(ep);
+ if (!remote) {
+ ret = -EINVAL;
+ break;
+ }
+
+ /* Skip entities that we have already processed. */
+ if (remote == vin->dev->of_node) {
+ of_node_put(remote);
+ continue;
+ }
+
+ /* Remote node to connect */
+ if (!vin->entity.node) {
+ vin->entity.node = remote;
+ vin->entity.asd.match_type = V4L2_ASYNC_MATCH_OF;
+ vin->entity.asd.match.of.node = remote;
+ ret++;
+ }
+ }
+
+ of_node_put(ep);
+
+ return ret;
+}
+
+static int rvin_graph_init(struct rvin_dev *vin)
+{
+ struct v4l2_async_subdev **subdevs = NULL;
+ int ret;
+
+ /* Parse the graph to extract a list of subdevice DT nodes. */
+ ret = rvin_graph_parse(vin, vin->dev->of_node);
+ if (ret < 0) {
+ vin_err(vin, "Graph parsing failed\n");
+ goto done;
+ }
+
+ if (!ret) {
+ vin_err(vin, "No subdev found in graph\n");
+ goto done;
+ }
+
+ if (ret != 1) {
+ vin_err(vin, "More then one subdev found in graph\n");
+ goto done;
+ }
+
+ /* Register the subdevices notifier. */
+ subdevs = devm_kzalloc(vin->dev, sizeof(*subdevs), GFP_KERNEL);
+ if (subdevs == NULL) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ subdevs[0] = &vin->entity.asd;
+
+ vin->notifier.subdevs = subdevs;
+ vin->notifier.num_subdevs = 1;
+ vin->notifier.bound = rvin_graph_notify_bound;
+ vin->notifier.unbind = rvin_graph_notify_unbind;
+ vin->notifier.complete = rvin_graph_notify_complete;
+
+ ret = v4l2_async_notifier_register(&vin->v4l2_dev, &vin->notifier);
+ if (ret < 0) {
+ vin_err(vin, "Notifier registration failed\n");
+ goto done;
+ }
+
+ ret = 0;
+
+done:
+ if (ret < 0) {
+ v4l2_async_notifier_unregister(&vin->notifier);
+ of_node_put(vin->entity.node);
+ }
+
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * Platform Device Driver
+ */
+
+static const struct of_device_id rvin_of_id_table[] = {
+ { .compatible = "renesas,vin-r8a7794", .data = (void *)RCAR_GEN2 },
+ { .compatible = "renesas,vin-r8a7793", .data = (void *)RCAR_GEN2 },
+ { .compatible = "renesas,vin-r8a7791", .data = (void *)RCAR_GEN2 },
+ { .compatible = "renesas,vin-r8a7790", .data = (void *)RCAR_GEN2 },
+ { .compatible = "renesas,vin-r8a7779", .data = (void *)RCAR_H1 },
+ { .compatible = "renesas,vin-r8a7778", .data = (void *)RCAR_M1 },
+ { },
+};
+MODULE_DEVICE_TABLE(of, rvin_of_id_table);
+
+static int rvin_parse_dt(struct rvin_dev *vin)
+{
+ const struct of_device_id *match;
+ struct v4l2_of_endpoint ep;
+ struct device_node *np;
+ int ret;
+
+ match = of_match_device(of_match_ptr(rvin_of_id_table), vin->dev);
+ if (!match)
+ return -ENODEV;
+
+ vin->chip = (enum chip_id)match->data;
+
+ np = of_graph_get_next_endpoint(vin->dev->of_node, NULL);
+ if (!np) {
+ vin_err(vin, "Could not find endpoint\n");
+ return -EINVAL;
+ }
+
+ ret = v4l2_of_parse_endpoint(np, &ep);
+ if (ret) {
+ vin_err(vin, "Could not parse endpoint\n");
+ return ret;
+ }
+
+ of_node_put(np);
+
+ vin->mbus_cfg.type = ep.bus_type;
+
+ switch (vin->mbus_cfg.type) {
+ case V4L2_MBUS_PARALLEL:
+ vin->mbus_cfg.flags = ep.bus.parallel.flags;
+ break;
+ case V4L2_MBUS_BT656:
+ vin->mbus_cfg.flags = 0;
+ break;
+ default:
+ vin_err(vin, "Unknown media bus type\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rcar_vin_probe(struct platform_device *pdev)
+{
+ struct rvin_dev *vin;
+ struct resource *mem;
+ int irq, ret;
+
+ vin = devm_kzalloc(&pdev->dev, sizeof(*vin), GFP_KERNEL);
+ if (!vin)
+ return -ENOMEM;
+
+ vin->dev = &pdev->dev;
+
+ ret = rvin_parse_dt(vin);
+ if (ret)
+ return ret;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (mem == NULL)
+ return -EINVAL;
+
+ vin->base = devm_ioremap_resource(vin->dev, mem);
+ if (IS_ERR(vin->base))
+ return PTR_ERR(vin->base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ return ret;
+
+ ret = rvin_dma_probe(vin, irq);
+ if (ret)
+ return ret;
+
+ ret = rvin_graph_init(vin);
+ if (ret < 0)
+ goto error;
+
+ pm_suspend_ignore_children(&pdev->dev, true);
+ pm_runtime_enable(&pdev->dev);
+
+ platform_set_drvdata(pdev, vin);
+
+ return 0;
+error:
+ rvin_dma_remove(vin);
+
+ return ret;
+}
+
+static int rcar_vin_remove(struct platform_device *pdev)
+{
+ struct rvin_dev *vin = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(&pdev->dev);
+
+ v4l2_async_notifier_unregister(&vin->notifier);
+
+ rvin_dma_remove(vin);
+
+ return 0;
+}
+
+static struct platform_driver rcar_vin_driver = {
+ .driver = {
+ .name = "rcar-vin",
+ .of_match_table = rvin_of_id_table,
+ },
+ .probe = rcar_vin_probe,
+ .remove = rcar_vin_remove,
+};
+
+module_platform_driver(rcar_vin_driver);
+
+MODULE_AUTHOR("Niklas Söderlund <niklas.soderlund@ragnatech.se>");
+MODULE_DESCRIPTION("Renesas R-Car VIN camera host driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/rcar-vin/rcar-dma.c b/drivers/media/platform/rcar-vin/rcar-dma.c
new file mode 100644
index 000000000..496aa97b6
--- /dev/null
+++ b/drivers/media/platform/rcar-vin/rcar-dma.c
@@ -0,0 +1,1187 @@
+/*
+ * Driver for Renesas R-Car VIN
+ *
+ * Copyright (C) 2016 Renesas Electronics Corp.
+ * Copyright (C) 2011-2013 Renesas Solutions Corp.
+ * Copyright (C) 2013 Cogent Embedded, Inc., <source@cogentembedded.com>
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * Based on the soc-camera rcar_vin driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+
+#include <media/videobuf2-dma-contig.h>
+
+#include "rcar-vin.h"
+
+/* -----------------------------------------------------------------------------
+ * HW Functions
+ */
+
+/* Register offsets for R-Car VIN */
+#define VNMC_REG 0x00 /* Video n Main Control Register */
+#define VNMS_REG 0x04 /* Video n Module Status Register */
+#define VNFC_REG 0x08 /* Video n Frame Capture Register */
+#define VNSLPRC_REG 0x0C /* Video n Start Line Pre-Clip Register */
+#define VNELPRC_REG 0x10 /* Video n End Line Pre-Clip Register */
+#define VNSPPRC_REG 0x14 /* Video n Start Pixel Pre-Clip Register */
+#define VNEPPRC_REG 0x18 /* Video n End Pixel Pre-Clip Register */
+#define VNSLPOC_REG 0x1C /* Video n Start Line Post-Clip Register */
+#define VNELPOC_REG 0x20 /* Video n End Line Post-Clip Register */
+#define VNSPPOC_REG 0x24 /* Video n Start Pixel Post-Clip Register */
+#define VNEPPOC_REG 0x28 /* Video n End Pixel Post-Clip Register */
+#define VNIS_REG 0x2C /* Video n Image Stride Register */
+#define VNMB_REG(m) (0x30 + ((m) << 2)) /* Video n Memory Base m Register */
+#define VNIE_REG 0x40 /* Video n Interrupt Enable Register */
+#define VNINTS_REG 0x44 /* Video n Interrupt Status Register */
+#define VNSI_REG 0x48 /* Video n Scanline Interrupt Register */
+#define VNMTC_REG 0x4C /* Video n Memory Transfer Control Register */
+#define VNYS_REG 0x50 /* Video n Y Scale Register */
+#define VNXS_REG 0x54 /* Video n X Scale Register */
+#define VNDMR_REG 0x58 /* Video n Data Mode Register */
+#define VNDMR2_REG 0x5C /* Video n Data Mode Register 2 */
+#define VNUVAOF_REG 0x60 /* Video n UV Address Offset Register */
+#define VNC1A_REG 0x80 /* Video n Coefficient Set C1A Register */
+#define VNC1B_REG 0x84 /* Video n Coefficient Set C1B Register */
+#define VNC1C_REG 0x88 /* Video n Coefficient Set C1C Register */
+#define VNC2A_REG 0x90 /* Video n Coefficient Set C2A Register */
+#define VNC2B_REG 0x94 /* Video n Coefficient Set C2B Register */
+#define VNC2C_REG 0x98 /* Video n Coefficient Set C2C Register */
+#define VNC3A_REG 0xA0 /* Video n Coefficient Set C3A Register */
+#define VNC3B_REG 0xA4 /* Video n Coefficient Set C3B Register */
+#define VNC3C_REG 0xA8 /* Video n Coefficient Set C3C Register */
+#define VNC4A_REG 0xB0 /* Video n Coefficient Set C4A Register */
+#define VNC4B_REG 0xB4 /* Video n Coefficient Set C4B Register */
+#define VNC4C_REG 0xB8 /* Video n Coefficient Set C4C Register */
+#define VNC5A_REG 0xC0 /* Video n Coefficient Set C5A Register */
+#define VNC5B_REG 0xC4 /* Video n Coefficient Set C5B Register */
+#define VNC5C_REG 0xC8 /* Video n Coefficient Set C5C Register */
+#define VNC6A_REG 0xD0 /* Video n Coefficient Set C6A Register */
+#define VNC6B_REG 0xD4 /* Video n Coefficient Set C6B Register */
+#define VNC6C_REG 0xD8 /* Video n Coefficient Set C6C Register */
+#define VNC7A_REG 0xE0 /* Video n Coefficient Set C7A Register */
+#define VNC7B_REG 0xE4 /* Video n Coefficient Set C7B Register */
+#define VNC7C_REG 0xE8 /* Video n Coefficient Set C7C Register */
+#define VNC8A_REG 0xF0 /* Video n Coefficient Set C8A Register */
+#define VNC8B_REG 0xF4 /* Video n Coefficient Set C8B Register */
+#define VNC8C_REG 0xF8 /* Video n Coefficient Set C8C Register */
+
+
+/* Register bit fields for R-Car VIN */
+/* Video n Main Control Register bits */
+#define VNMC_FOC (1 << 21)
+#define VNMC_YCAL (1 << 19)
+#define VNMC_INF_YUV8_BT656 (0 << 16)
+#define VNMC_INF_YUV8_BT601 (1 << 16)
+#define VNMC_INF_YUV10_BT656 (2 << 16)
+#define VNMC_INF_YUV10_BT601 (3 << 16)
+#define VNMC_INF_YUV16 (5 << 16)
+#define VNMC_INF_RGB888 (6 << 16)
+#define VNMC_VUP (1 << 10)
+#define VNMC_IM_ODD (0 << 3)
+#define VNMC_IM_ODD_EVEN (1 << 3)
+#define VNMC_IM_EVEN (2 << 3)
+#define VNMC_IM_FULL (3 << 3)
+#define VNMC_BPS (1 << 1)
+#define VNMC_ME (1 << 0)
+
+/* Video n Module Status Register bits */
+#define VNMS_FBS_MASK (3 << 3)
+#define VNMS_FBS_SHIFT 3
+#define VNMS_AV (1 << 1)
+#define VNMS_CA (1 << 0)
+
+/* Video n Frame Capture Register bits */
+#define VNFC_C_FRAME (1 << 1)
+#define VNFC_S_FRAME (1 << 0)
+
+/* Video n Interrupt Enable Register bits */
+#define VNIE_FIE (1 << 4)
+#define VNIE_EFE (1 << 1)
+
+/* Video n Data Mode Register bits */
+#define VNDMR_EXRGB (1 << 8)
+#define VNDMR_BPSM (1 << 4)
+#define VNDMR_DTMD_YCSEP (1 << 1)
+#define VNDMR_DTMD_ARGB1555 (1 << 0)
+
+/* Video n Data Mode Register 2 bits */
+#define VNDMR2_VPS (1 << 30)
+#define VNDMR2_HPS (1 << 29)
+#define VNDMR2_FTEV (1 << 17)
+#define VNDMR2_VLV(n) ((n & 0xf) << 12)
+
+static void rvin_write(struct rvin_dev *vin, u32 value, u32 offset)
+{
+ iowrite32(value, vin->base + offset);
+}
+
+static u32 rvin_read(struct rvin_dev *vin, u32 offset)
+{
+ return ioread32(vin->base + offset);
+}
+
+static int rvin_setup(struct rvin_dev *vin)
+{
+ u32 vnmc, dmr, dmr2, interrupts;
+ bool progressive = false, output_is_yuv = false, input_is_yuv = false;
+
+ switch (vin->format.field) {
+ case V4L2_FIELD_TOP:
+ vnmc = VNMC_IM_ODD;
+ break;
+ case V4L2_FIELD_BOTTOM:
+ vnmc = VNMC_IM_EVEN;
+ break;
+ case V4L2_FIELD_INTERLACED:
+ case V4L2_FIELD_INTERLACED_TB:
+ vnmc = VNMC_IM_FULL;
+ break;
+ case V4L2_FIELD_INTERLACED_BT:
+ vnmc = VNMC_IM_FULL | VNMC_FOC;
+ break;
+ case V4L2_FIELD_NONE:
+ if (vin->continuous) {
+ vnmc = VNMC_IM_ODD_EVEN;
+ progressive = true;
+ } else {
+ vnmc = VNMC_IM_ODD;
+ }
+ break;
+ default:
+ vnmc = VNMC_IM_ODD;
+ break;
+ }
+
+ /*
+ * Input interface
+ */
+ switch (vin->source.code) {
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ /* BT.601/BT.1358 16bit YCbCr422 */
+ vnmc |= VNMC_INF_YUV16;
+ input_is_yuv = true;
+ break;
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ /* BT.656 8bit YCbCr422 or BT.601 8bit YCbCr422 */
+ vnmc |= vin->mbus_cfg.type == V4L2_MBUS_BT656 ?
+ VNMC_INF_YUV8_BT656 : VNMC_INF_YUV8_BT601;
+ input_is_yuv = true;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ vnmc |= VNMC_INF_RGB888;
+ break;
+ case MEDIA_BUS_FMT_YUYV10_2X10:
+ /* BT.656 10bit YCbCr422 or BT.601 10bit YCbCr422 */
+ vnmc |= vin->mbus_cfg.type == V4L2_MBUS_BT656 ?
+ VNMC_INF_YUV10_BT656 : VNMC_INF_YUV10_BT601;
+ input_is_yuv = true;
+ break;
+ default:
+ break;
+ }
+
+ /* Enable VSYNC Field Toogle mode after one VSYNC input */
+ dmr2 = VNDMR2_FTEV | VNDMR2_VLV(1);
+
+ /* Hsync Signal Polarity Select */
+ if (!(vin->mbus_cfg.flags & V4L2_MBUS_HSYNC_ACTIVE_LOW))
+ dmr2 |= VNDMR2_HPS;
+
+ /* Vsync Signal Polarity Select */
+ if (!(vin->mbus_cfg.flags & V4L2_MBUS_VSYNC_ACTIVE_LOW))
+ dmr2 |= VNDMR2_VPS;
+
+ /*
+ * Output format
+ */
+ switch (vin->format.pixelformat) {
+ case V4L2_PIX_FMT_NV16:
+ rvin_write(vin,
+ ALIGN(vin->format.width * vin->format.height, 0x80),
+ VNUVAOF_REG);
+ dmr = VNDMR_DTMD_YCSEP;
+ output_is_yuv = true;
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ dmr = VNDMR_BPSM;
+ output_is_yuv = true;
+ break;
+ case V4L2_PIX_FMT_UYVY:
+ dmr = 0;
+ output_is_yuv = true;
+ break;
+ case V4L2_PIX_FMT_XRGB555:
+ dmr = VNDMR_DTMD_ARGB1555;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ dmr = 0;
+ break;
+ case V4L2_PIX_FMT_XBGR32:
+ if (vin->chip == RCAR_GEN2 || vin->chip == RCAR_H1) {
+ dmr = VNDMR_EXRGB;
+ break;
+ }
+ /* fall through */
+ default:
+ vin_err(vin, "Invalid pixelformat (0x%x)\n",
+ vin->format.pixelformat);
+ return -EINVAL;
+ }
+
+ /* Always update on field change */
+ vnmc |= VNMC_VUP;
+
+ /* If input and output use the same colorspace, use bypass mode */
+ if (input_is_yuv == output_is_yuv)
+ vnmc |= VNMC_BPS;
+
+ /* Progressive or interlaced mode */
+ interrupts = progressive ? VNIE_FIE : VNIE_EFE;
+
+ /* Ack interrupts */
+ rvin_write(vin, interrupts, VNINTS_REG);
+ /* Enable interrupts */
+ rvin_write(vin, interrupts, VNIE_REG);
+ /* Start capturing */
+ rvin_write(vin, dmr, VNDMR_REG);
+ rvin_write(vin, dmr2, VNDMR2_REG);
+
+ /* Enable module */
+ rvin_write(vin, vnmc | VNMC_ME, VNMC_REG);
+
+ return 0;
+}
+
+static void rvin_capture_on(struct rvin_dev *vin)
+{
+ vin_dbg(vin, "Capture on in %s mode\n",
+ vin->continuous ? "continuous" : "single");
+
+ if (vin->continuous)
+ /* Continuous Frame Capture Mode */
+ rvin_write(vin, VNFC_C_FRAME, VNFC_REG);
+ else
+ /* Single Frame Capture Mode */
+ rvin_write(vin, VNFC_S_FRAME, VNFC_REG);
+}
+
+static void rvin_capture_off(struct rvin_dev *vin)
+{
+ /* Set continuous & single transfer off */
+ rvin_write(vin, 0, VNFC_REG);
+}
+
+static int rvin_capture_start(struct rvin_dev *vin)
+{
+ int ret;
+
+ rvin_crop_scale_comp(vin);
+
+ ret = rvin_setup(vin);
+ if (ret)
+ return ret;
+
+ rvin_capture_on(vin);
+
+ return 0;
+}
+
+static void rvin_capture_stop(struct rvin_dev *vin)
+{
+ rvin_capture_off(vin);
+
+ /* Disable module */
+ rvin_write(vin, rvin_read(vin, VNMC_REG) & ~VNMC_ME, VNMC_REG);
+}
+
+static void rvin_disable_interrupts(struct rvin_dev *vin)
+{
+ rvin_write(vin, 0, VNIE_REG);
+}
+
+static u32 rvin_get_interrupt_status(struct rvin_dev *vin)
+{
+ return rvin_read(vin, VNINTS_REG);
+}
+
+static void rvin_ack_interrupt(struct rvin_dev *vin)
+{
+ rvin_write(vin, rvin_read(vin, VNINTS_REG), VNINTS_REG);
+}
+
+static bool rvin_capture_active(struct rvin_dev *vin)
+{
+ return rvin_read(vin, VNMS_REG) & VNMS_CA;
+}
+
+static int rvin_get_active_slot(struct rvin_dev *vin)
+{
+ if (vin->continuous)
+ return (rvin_read(vin, VNMS_REG) & VNMS_FBS_MASK)
+ >> VNMS_FBS_SHIFT;
+
+ return 0;
+}
+
+static void rvin_set_slot_addr(struct rvin_dev *vin, int slot, dma_addr_t addr)
+{
+ const struct rvin_video_format *fmt;
+ int offsetx, offsety;
+ dma_addr_t offset;
+
+ fmt = rvin_format_from_pixel(vin->format.pixelformat);
+
+ /*
+ * There is no HW support for composition do the beast we can
+ * by modifying the buffer offset
+ */
+ offsetx = vin->compose.left * fmt->bpp;
+ offsety = vin->compose.top * vin->format.bytesperline;
+ offset = addr + offsetx + offsety;
+
+ /*
+ * The address needs to be 128 bytes aligned. Driver should never accept
+ * settings that do not satisfy this in the first place...
+ */
+ if (WARN_ON((offsetx | offsety | offset) & HW_BUFFER_MASK))
+ return;
+
+ rvin_write(vin, offset, VNMB_REG(slot));
+}
+
+/* -----------------------------------------------------------------------------
+ * Crop and Scaling Gen2
+ */
+
+struct vin_coeff {
+ unsigned short xs_value;
+ u32 coeff_set[24];
+};
+
+static const struct vin_coeff vin_coeff_set[] = {
+ { 0x0000, {
+ 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000 },
+ },
+ { 0x1000, {
+ 0x000fa400, 0x000fa400, 0x09625902,
+ 0x000003f8, 0x00000403, 0x3de0d9f0,
+ 0x001fffed, 0x00000804, 0x3cc1f9c3,
+ 0x001003de, 0x00000c01, 0x3cb34d7f,
+ 0x002003d2, 0x00000c00, 0x3d24a92d,
+ 0x00200bca, 0x00000bff, 0x3df600d2,
+ 0x002013cc, 0x000007ff, 0x3ed70c7e,
+ 0x00100fde, 0x00000000, 0x3f87c036 },
+ },
+ { 0x1200, {
+ 0x002ffff1, 0x002ffff1, 0x02a0a9c8,
+ 0x002003e7, 0x001ffffa, 0x000185bc,
+ 0x002007dc, 0x000003ff, 0x3e52859c,
+ 0x00200bd4, 0x00000002, 0x3d53996b,
+ 0x00100fd0, 0x00000403, 0x3d04ad2d,
+ 0x00000bd5, 0x00000403, 0x3d35ace7,
+ 0x3ff003e4, 0x00000801, 0x3dc674a1,
+ 0x3fffe800, 0x00000800, 0x3e76f461 },
+ },
+ { 0x1400, {
+ 0x00100be3, 0x00100be3, 0x04d1359a,
+ 0x00000fdb, 0x002003ed, 0x0211fd93,
+ 0x00000fd6, 0x002003f4, 0x0002d97b,
+ 0x000007d6, 0x002ffffb, 0x3e93b956,
+ 0x3ff003da, 0x001003ff, 0x3db49926,
+ 0x3fffefe9, 0x00100001, 0x3d655cee,
+ 0x3fffd400, 0x00000003, 0x3d65f4b6,
+ 0x000fb421, 0x00000402, 0x3dc6547e },
+ },
+ { 0x1600, {
+ 0x00000bdd, 0x00000bdd, 0x06519578,
+ 0x3ff007da, 0x00000be3, 0x03c24973,
+ 0x3ff003d9, 0x00000be9, 0x01b30d5f,
+ 0x3ffff7df, 0x001003f1, 0x0003c542,
+ 0x000fdfec, 0x001003f7, 0x3ec4711d,
+ 0x000fc400, 0x002ffffd, 0x3df504f1,
+ 0x001fa81a, 0x002ffc00, 0x3d957cc2,
+ 0x002f8c3c, 0x00100000, 0x3db5c891 },
+ },
+ { 0x1800, {
+ 0x3ff003dc, 0x3ff003dc, 0x0791e558,
+ 0x000ff7dd, 0x3ff007de, 0x05328554,
+ 0x000fe7e3, 0x3ff00be2, 0x03232546,
+ 0x000fd7ee, 0x000007e9, 0x0143bd30,
+ 0x001fb800, 0x000007ee, 0x00044511,
+ 0x002fa015, 0x000007f4, 0x3ef4bcee,
+ 0x002f8832, 0x001003f9, 0x3e4514c7,
+ 0x001f7853, 0x001003fd, 0x3de54c9f },
+ },
+ { 0x1a00, {
+ 0x000fefe0, 0x000fefe0, 0x08721d3c,
+ 0x001fdbe7, 0x000ffbde, 0x0652a139,
+ 0x001fcbf0, 0x000003df, 0x0463292e,
+ 0x002fb3ff, 0x3ff007e3, 0x0293a91d,
+ 0x002f9c12, 0x3ff00be7, 0x01241905,
+ 0x001f8c29, 0x000007ed, 0x3fe470eb,
+ 0x000f7c46, 0x000007f2, 0x3f04b8ca,
+ 0x3fef7865, 0x000007f6, 0x3e74e4a8 },
+ },
+ { 0x1c00, {
+ 0x001fd3e9, 0x001fd3e9, 0x08f23d26,
+ 0x002fbff3, 0x001fe3e4, 0x0712ad23,
+ 0x002fa800, 0x000ff3e0, 0x05631d1b,
+ 0x001f9810, 0x000ffbe1, 0x03b3890d,
+ 0x000f8c23, 0x000003e3, 0x0233e8fa,
+ 0x3fef843b, 0x000003e7, 0x00f430e4,
+ 0x3fbf8456, 0x3ff00bea, 0x00046cc8,
+ 0x3f8f8c72, 0x3ff00bef, 0x3f3490ac },
+ },
+ { 0x1e00, {
+ 0x001fbbf4, 0x001fbbf4, 0x09425112,
+ 0x001fa800, 0x002fc7ed, 0x0792b110,
+ 0x000f980e, 0x001fdbe6, 0x0613110a,
+ 0x3fff8c20, 0x001fe7e3, 0x04a368fd,
+ 0x3fcf8c33, 0x000ff7e2, 0x0343b8ed,
+ 0x3f9f8c4a, 0x000fffe3, 0x0203f8da,
+ 0x3f5f9c61, 0x000003e6, 0x00e428c5,
+ 0x3f1fb07b, 0x000003eb, 0x3fe440af },
+ },
+ { 0x2000, {
+ 0x000fa400, 0x000fa400, 0x09625902,
+ 0x3fff980c, 0x001fb7f5, 0x0812b0ff,
+ 0x3fdf901c, 0x001fc7ed, 0x06b2fcfa,
+ 0x3faf902d, 0x001fd3e8, 0x055348f1,
+ 0x3f7f983f, 0x001fe3e5, 0x04038ce3,
+ 0x3f3fa454, 0x001fefe3, 0x02e3c8d1,
+ 0x3f0fb86a, 0x001ff7e4, 0x01c3e8c0,
+ 0x3ecfd880, 0x000fffe6, 0x00c404ac },
+ },
+ { 0x2200, {
+ 0x3fdf9c0b, 0x3fdf9c0b, 0x09725cf4,
+ 0x3fbf9818, 0x3fffa400, 0x0842a8f1,
+ 0x3f8f9827, 0x000fb3f7, 0x0702f0ec,
+ 0x3f5fa037, 0x000fc3ef, 0x05d330e4,
+ 0x3f2fac49, 0x001fcfea, 0x04a364d9,
+ 0x3effc05c, 0x001fdbe7, 0x038394ca,
+ 0x3ecfdc6f, 0x001fe7e6, 0x0273b0bb,
+ 0x3ea00083, 0x001fefe6, 0x0183c0a9 },
+ },
+ { 0x2400, {
+ 0x3f9fa014, 0x3f9fa014, 0x098260e6,
+ 0x3f7f9c23, 0x3fcf9c0a, 0x08629ce5,
+ 0x3f4fa431, 0x3fefa400, 0x0742d8e1,
+ 0x3f1fb440, 0x3fffb3f8, 0x062310d9,
+ 0x3eefc850, 0x000fbbf2, 0x050340d0,
+ 0x3ecfe062, 0x000fcbec, 0x041364c2,
+ 0x3ea00073, 0x001fd3ea, 0x03037cb5,
+ 0x3e902086, 0x001fdfe8, 0x022388a5 },
+ },
+ { 0x2600, {
+ 0x3f5fa81e, 0x3f5fa81e, 0x096258da,
+ 0x3f3fac2b, 0x3f8fa412, 0x088290d8,
+ 0x3f0fbc38, 0x3fafa408, 0x0772c8d5,
+ 0x3eefcc47, 0x3fcfa800, 0x0672f4ce,
+ 0x3ecfe456, 0x3fefaffa, 0x05531cc6,
+ 0x3eb00066, 0x3fffbbf3, 0x047334bb,
+ 0x3ea01c77, 0x000fc7ee, 0x039348ae,
+ 0x3ea04486, 0x000fd3eb, 0x02b350a1 },
+ },
+ { 0x2800, {
+ 0x3f2fb426, 0x3f2fb426, 0x094250ce,
+ 0x3f0fc032, 0x3f4fac1b, 0x086284cd,
+ 0x3eefd040, 0x3f7fa811, 0x0782acc9,
+ 0x3ecfe84c, 0x3f9fa807, 0x06a2d8c4,
+ 0x3eb0005b, 0x3fbfac00, 0x05b2f4bc,
+ 0x3eb0186a, 0x3fdfb3fa, 0x04c308b4,
+ 0x3eb04077, 0x3fefbbf4, 0x03f31ca8,
+ 0x3ec06884, 0x000fbff2, 0x03031c9e },
+ },
+ { 0x2a00, {
+ 0x3f0fc42d, 0x3f0fc42d, 0x090240c4,
+ 0x3eefd439, 0x3f2fb822, 0x08526cc2,
+ 0x3edfe845, 0x3f4fb018, 0x078294bf,
+ 0x3ec00051, 0x3f6fac0f, 0x06b2b4bb,
+ 0x3ec0185f, 0x3f8fac07, 0x05e2ccb4,
+ 0x3ec0386b, 0x3fafac00, 0x0502e8ac,
+ 0x3ed05c77, 0x3fcfb3fb, 0x0432f0a3,
+ 0x3ef08482, 0x3fdfbbf6, 0x0372f898 },
+ },
+ { 0x2c00, {
+ 0x3eefdc31, 0x3eefdc31, 0x08e238b8,
+ 0x3edfec3d, 0x3f0fc828, 0x082258b9,
+ 0x3ed00049, 0x3f1fc01e, 0x077278b6,
+ 0x3ed01455, 0x3f3fb815, 0x06c294b2,
+ 0x3ed03460, 0x3f5fb40d, 0x0602acac,
+ 0x3ef0506c, 0x3f7fb006, 0x0542c0a4,
+ 0x3f107476, 0x3f9fb400, 0x0472c89d,
+ 0x3f309c80, 0x3fbfb7fc, 0x03b2cc94 },
+ },
+ { 0x2e00, {
+ 0x3eefec37, 0x3eefec37, 0x088220b0,
+ 0x3ee00041, 0x3effdc2d, 0x07f244ae,
+ 0x3ee0144c, 0x3f0fd023, 0x07625cad,
+ 0x3ef02c57, 0x3f1fc81a, 0x06c274a9,
+ 0x3f004861, 0x3f3fbc13, 0x060288a6,
+ 0x3f20686b, 0x3f5fb80c, 0x05529c9e,
+ 0x3f408c74, 0x3f6fb805, 0x04b2ac96,
+ 0x3f80ac7e, 0x3f8fb800, 0x0402ac8e },
+ },
+ { 0x3000, {
+ 0x3ef0003a, 0x3ef0003a, 0x084210a6,
+ 0x3ef01045, 0x3effec32, 0x07b228a7,
+ 0x3f00284e, 0x3f0fdc29, 0x073244a4,
+ 0x3f104058, 0x3f0fd420, 0x06a258a2,
+ 0x3f305c62, 0x3f2fc818, 0x0612689d,
+ 0x3f508069, 0x3f3fc011, 0x05728496,
+ 0x3f80a072, 0x3f4fc00a, 0x04d28c90,
+ 0x3fc0c07b, 0x3f6fbc04, 0x04429088 },
+ },
+ { 0x3200, {
+ 0x3f00103e, 0x3f00103e, 0x07f1fc9e,
+ 0x3f102447, 0x3f000035, 0x0782149d,
+ 0x3f203c4f, 0x3f0ff02c, 0x07122c9c,
+ 0x3f405458, 0x3f0fe424, 0x06924099,
+ 0x3f607061, 0x3f1fd41d, 0x06024c97,
+ 0x3f909068, 0x3f2fcc16, 0x05726490,
+ 0x3fc0b070, 0x3f3fc80f, 0x04f26c8a,
+ 0x0000d077, 0x3f4fc409, 0x04627484 },
+ },
+ { 0x3400, {
+ 0x3f202040, 0x3f202040, 0x07a1e898,
+ 0x3f303449, 0x3f100c38, 0x0741fc98,
+ 0x3f504c50, 0x3f10002f, 0x06e21495,
+ 0x3f706459, 0x3f1ff028, 0x06722492,
+ 0x3fa08060, 0x3f1fe421, 0x05f2348f,
+ 0x3fd09c67, 0x3f1fdc19, 0x05824c89,
+ 0x0000bc6e, 0x3f2fd014, 0x04f25086,
+ 0x0040dc74, 0x3f3fcc0d, 0x04825c7f },
+ },
+ { 0x3600, {
+ 0x3f403042, 0x3f403042, 0x0761d890,
+ 0x3f504848, 0x3f301c3b, 0x0701f090,
+ 0x3f805c50, 0x3f200c33, 0x06a2008f,
+ 0x3fa07458, 0x3f10002b, 0x06520c8d,
+ 0x3fd0905e, 0x3f1ff424, 0x05e22089,
+ 0x0000ac65, 0x3f1fe81d, 0x05823483,
+ 0x0030cc6a, 0x3f2fdc18, 0x04f23c81,
+ 0x0080e871, 0x3f2fd412, 0x0482407c },
+ },
+ { 0x3800, {
+ 0x3f604043, 0x3f604043, 0x0721c88a,
+ 0x3f80544a, 0x3f502c3c, 0x06d1d88a,
+ 0x3fb06851, 0x3f301c35, 0x0681e889,
+ 0x3fd08456, 0x3f30082f, 0x0611fc88,
+ 0x00009c5d, 0x3f200027, 0x05d20884,
+ 0x0030b863, 0x3f2ff421, 0x05621880,
+ 0x0070d468, 0x3f2fe81b, 0x0502247c,
+ 0x00c0ec6f, 0x3f2fe015, 0x04a22877 },
+ },
+ { 0x3a00, {
+ 0x3f904c44, 0x3f904c44, 0x06e1b884,
+ 0x3fb0604a, 0x3f70383e, 0x0691c885,
+ 0x3fe07451, 0x3f502c36, 0x0661d483,
+ 0x00009055, 0x3f401831, 0x0601ec81,
+ 0x0030a85b, 0x3f300c2a, 0x05b1f480,
+ 0x0070c061, 0x3f300024, 0x0562047a,
+ 0x00b0d867, 0x3f3ff41e, 0x05020c77,
+ 0x00f0f46b, 0x3f2fec19, 0x04a21474 },
+ },
+ { 0x3c00, {
+ 0x3fb05c43, 0x3fb05c43, 0x06c1b07e,
+ 0x3fe06c4b, 0x3f902c3f, 0x0681c081,
+ 0x0000844f, 0x3f703838, 0x0631cc7d,
+ 0x00309855, 0x3f602433, 0x05d1d47e,
+ 0x0060b459, 0x3f50142e, 0x0581e47b,
+ 0x00a0c85f, 0x3f400828, 0x0531f078,
+ 0x00e0e064, 0x3f300021, 0x0501fc73,
+ 0x00b0fc6a, 0x3f3ff41d, 0x04a20873 },
+ },
+ { 0x3e00, {
+ 0x3fe06444, 0x3fe06444, 0x0681a07a,
+ 0x00007849, 0x3fc0503f, 0x0641b07a,
+ 0x0020904d, 0x3fa0403a, 0x05f1c07a,
+ 0x0060a453, 0x3f803034, 0x05c1c878,
+ 0x0090b858, 0x3f70202f, 0x0571d477,
+ 0x00d0d05d, 0x3f501829, 0x0531e073,
+ 0x0110e462, 0x3f500825, 0x04e1e471,
+ 0x01510065, 0x3f40001f, 0x04a1f06d },
+ },
+ { 0x4000, {
+ 0x00007044, 0x00007044, 0x06519476,
+ 0x00208448, 0x3fe05c3f, 0x0621a476,
+ 0x0050984d, 0x3fc04c3a, 0x05e1b075,
+ 0x0080ac52, 0x3fa03c35, 0x05a1b875,
+ 0x00c0c056, 0x3f803030, 0x0561c473,
+ 0x0100d45b, 0x3f70202b, 0x0521d46f,
+ 0x0140e860, 0x3f601427, 0x04d1d46e,
+ 0x01810064, 0x3f500822, 0x0491dc6b },
+ },
+ { 0x5000, {
+ 0x0110a442, 0x0110a442, 0x0551545e,
+ 0x0140b045, 0x00e0983f, 0x0531585f,
+ 0x0160c047, 0x00c08c3c, 0x0511645e,
+ 0x0190cc4a, 0x00908039, 0x04f1685f,
+ 0x01c0dc4c, 0x00707436, 0x04d1705e,
+ 0x0200e850, 0x00506833, 0x04b1785b,
+ 0x0230f453, 0x00305c30, 0x0491805a,
+ 0x02710056, 0x0010542d, 0x04718059 },
+ },
+ { 0x6000, {
+ 0x01c0bc40, 0x01c0bc40, 0x04c13052,
+ 0x01e0c841, 0x01a0b43d, 0x04c13851,
+ 0x0210cc44, 0x0180a83c, 0x04a13453,
+ 0x0230d845, 0x0160a03a, 0x04913c52,
+ 0x0260e047, 0x01409838, 0x04714052,
+ 0x0280ec49, 0x01208c37, 0x04514c50,
+ 0x02b0f44b, 0x01008435, 0x04414c50,
+ 0x02d1004c, 0x00e07c33, 0x0431544f },
+ },
+ { 0x7000, {
+ 0x0230c83e, 0x0230c83e, 0x04711c4c,
+ 0x0250d03f, 0x0210c43c, 0x0471204b,
+ 0x0270d840, 0x0200b83c, 0x0451244b,
+ 0x0290dc42, 0x01e0b43a, 0x0441244c,
+ 0x02b0e443, 0x01c0b038, 0x0441284b,
+ 0x02d0ec44, 0x01b0a438, 0x0421304a,
+ 0x02f0f445, 0x0190a036, 0x04213449,
+ 0x0310f847, 0x01709c34, 0x04213848 },
+ },
+ { 0x8000, {
+ 0x0280d03d, 0x0280d03d, 0x04310c48,
+ 0x02a0d43e, 0x0270c83c, 0x04311047,
+ 0x02b0dc3e, 0x0250c83a, 0x04311447,
+ 0x02d0e040, 0x0240c03a, 0x04211446,
+ 0x02e0e840, 0x0220bc39, 0x04111847,
+ 0x0300e842, 0x0210b438, 0x04012445,
+ 0x0310f043, 0x0200b037, 0x04012045,
+ 0x0330f444, 0x01e0ac36, 0x03f12445 },
+ },
+ { 0xefff, {
+ 0x0340dc3a, 0x0340dc3a, 0x03b0ec40,
+ 0x0340e03a, 0x0330e039, 0x03c0f03e,
+ 0x0350e03b, 0x0330dc39, 0x03c0ec3e,
+ 0x0350e43a, 0x0320dc38, 0x03c0f43e,
+ 0x0360e43b, 0x0320d839, 0x03b0f03e,
+ 0x0360e83b, 0x0310d838, 0x03c0fc3b,
+ 0x0370e83b, 0x0310d439, 0x03a0f83d,
+ 0x0370e83c, 0x0300d438, 0x03b0fc3c },
+ }
+};
+
+static void rvin_set_coeff(struct rvin_dev *vin, unsigned short xs)
+{
+ int i;
+ const struct vin_coeff *p_prev_set = NULL;
+ const struct vin_coeff *p_set = NULL;
+
+ /* Look for suitable coefficient values */
+ for (i = 0; i < ARRAY_SIZE(vin_coeff_set); i++) {
+ p_prev_set = p_set;
+ p_set = &vin_coeff_set[i];
+
+ if (xs < p_set->xs_value)
+ break;
+ }
+
+ /* Use previous value if its XS value is closer */
+ if (p_prev_set && p_set &&
+ xs - p_prev_set->xs_value < p_set->xs_value - xs)
+ p_set = p_prev_set;
+
+ /* Set coefficient registers */
+ rvin_write(vin, p_set->coeff_set[0], VNC1A_REG);
+ rvin_write(vin, p_set->coeff_set[1], VNC1B_REG);
+ rvin_write(vin, p_set->coeff_set[2], VNC1C_REG);
+
+ rvin_write(vin, p_set->coeff_set[3], VNC2A_REG);
+ rvin_write(vin, p_set->coeff_set[4], VNC2B_REG);
+ rvin_write(vin, p_set->coeff_set[5], VNC2C_REG);
+
+ rvin_write(vin, p_set->coeff_set[6], VNC3A_REG);
+ rvin_write(vin, p_set->coeff_set[7], VNC3B_REG);
+ rvin_write(vin, p_set->coeff_set[8], VNC3C_REG);
+
+ rvin_write(vin, p_set->coeff_set[9], VNC4A_REG);
+ rvin_write(vin, p_set->coeff_set[10], VNC4B_REG);
+ rvin_write(vin, p_set->coeff_set[11], VNC4C_REG);
+
+ rvin_write(vin, p_set->coeff_set[12], VNC5A_REG);
+ rvin_write(vin, p_set->coeff_set[13], VNC5B_REG);
+ rvin_write(vin, p_set->coeff_set[14], VNC5C_REG);
+
+ rvin_write(vin, p_set->coeff_set[15], VNC6A_REG);
+ rvin_write(vin, p_set->coeff_set[16], VNC6B_REG);
+ rvin_write(vin, p_set->coeff_set[17], VNC6C_REG);
+
+ rvin_write(vin, p_set->coeff_set[18], VNC7A_REG);
+ rvin_write(vin, p_set->coeff_set[19], VNC7B_REG);
+ rvin_write(vin, p_set->coeff_set[20], VNC7C_REG);
+
+ rvin_write(vin, p_set->coeff_set[21], VNC8A_REG);
+ rvin_write(vin, p_set->coeff_set[22], VNC8B_REG);
+ rvin_write(vin, p_set->coeff_set[23], VNC8C_REG);
+}
+
+void rvin_crop_scale_comp(struct rvin_dev *vin)
+{
+ u32 xs, ys;
+
+ /* Set Start/End Pixel/Line Pre-Clip */
+ rvin_write(vin, vin->crop.left, VNSPPRC_REG);
+ rvin_write(vin, vin->crop.left + vin->crop.width - 1, VNEPPRC_REG);
+ switch (vin->format.field) {
+ case V4L2_FIELD_INTERLACED:
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ rvin_write(vin, vin->crop.top / 2, VNSLPRC_REG);
+ rvin_write(vin, (vin->crop.top + vin->crop.height) / 2 - 1,
+ VNELPRC_REG);
+ break;
+ default:
+ rvin_write(vin, vin->crop.top, VNSLPRC_REG);
+ rvin_write(vin, vin->crop.top + vin->crop.height - 1,
+ VNELPRC_REG);
+ break;
+ }
+
+ /* Set scaling coefficient */
+ ys = 0;
+ if (vin->crop.height != vin->compose.height)
+ ys = (4096 * vin->crop.height) / vin->compose.height;
+ rvin_write(vin, ys, VNYS_REG);
+
+ xs = 0;
+ if (vin->crop.width != vin->compose.width)
+ xs = (4096 * vin->crop.width) / vin->compose.width;
+
+ /* Horizontal upscaling is up to double size */
+ if (xs > 0 && xs < 2048)
+ xs = 2048;
+
+ rvin_write(vin, xs, VNXS_REG);
+
+ /* Horizontal upscaling is done out by scaling down from double size */
+ if (xs < 4096)
+ xs *= 2;
+
+ rvin_set_coeff(vin, xs);
+
+ /* Set Start/End Pixel/Line Post-Clip */
+ rvin_write(vin, 0, VNSPPOC_REG);
+ rvin_write(vin, 0, VNSLPOC_REG);
+ rvin_write(vin, vin->format.width - 1, VNEPPOC_REG);
+ switch (vin->format.field) {
+ case V4L2_FIELD_INTERLACED:
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ rvin_write(vin, vin->format.height / 2 - 1, VNELPOC_REG);
+ break;
+ default:
+ rvin_write(vin, vin->format.height - 1, VNELPOC_REG);
+ break;
+ }
+
+ if (vin->format.pixelformat == V4L2_PIX_FMT_NV16)
+ rvin_write(vin, ALIGN(vin->format.width, 0x20), VNIS_REG);
+ else
+ rvin_write(vin, ALIGN(vin->format.width, 0x10), VNIS_REG);
+
+ vin_dbg(vin,
+ "Pre-Clip: %ux%u@%u:%u YS: %d XS: %d Post-Clip: %ux%u@%u:%u\n",
+ vin->crop.width, vin->crop.height, vin->crop.left,
+ vin->crop.top, ys, xs, vin->format.width, vin->format.height,
+ 0, 0);
+}
+
+void rvin_scale_try(struct rvin_dev *vin, struct v4l2_pix_format *pix,
+ u32 width, u32 height)
+{
+ /* All VIN channels on Gen2 have scalers */
+ pix->width = width;
+ pix->height = height;
+}
+
+/* -----------------------------------------------------------------------------
+ * DMA Functions
+ */
+
+#define RVIN_TIMEOUT_MS 100
+#define RVIN_RETRIES 10
+
+struct rvin_buffer {
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+};
+
+#define to_buf_list(vb2_buffer) (&container_of(vb2_buffer, \
+ struct rvin_buffer, \
+ vb)->list)
+
+/* Moves a buffer from the queue to the HW slots */
+static bool rvin_fill_hw_slot(struct rvin_dev *vin, int slot)
+{
+ struct rvin_buffer *buf;
+ struct vb2_v4l2_buffer *vbuf;
+ dma_addr_t phys_addr_top;
+
+ if (vin->queue_buf[slot] != NULL)
+ return true;
+
+ if (list_empty(&vin->buf_list))
+ return false;
+
+ vin_dbg(vin, "Filling HW slot: %d\n", slot);
+
+ /* Keep track of buffer we give to HW */
+ buf = list_entry(vin->buf_list.next, struct rvin_buffer, list);
+ vbuf = &buf->vb;
+ list_del_init(to_buf_list(vbuf));
+ vin->queue_buf[slot] = vbuf;
+
+ /* Setup DMA */
+ phys_addr_top = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, 0);
+ rvin_set_slot_addr(vin, slot, phys_addr_top);
+
+ return true;
+}
+
+static bool rvin_fill_hw(struct rvin_dev *vin)
+{
+ int slot, limit;
+
+ limit = vin->continuous ? HW_BUFFER_NUM : 1;
+
+ for (slot = 0; slot < limit; slot++)
+ if (!rvin_fill_hw_slot(vin, slot))
+ return false;
+ return true;
+}
+
+static irqreturn_t rvin_irq(int irq, void *data)
+{
+ struct rvin_dev *vin = data;
+ u32 int_status;
+ int slot;
+ unsigned int sequence, handled = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vin->qlock, flags);
+
+ int_status = rvin_get_interrupt_status(vin);
+ if (!int_status)
+ goto done;
+
+ rvin_ack_interrupt(vin);
+ handled = 1;
+
+ /* Nothing to do if capture status is 'STOPPED' */
+ if (vin->state == STOPPED) {
+ vin_dbg(vin, "IRQ while state stopped\n");
+ goto done;
+ }
+
+ /* Nothing to do if capture status is 'STOPPING' */
+ if (vin->state == STOPPING) {
+ vin_dbg(vin, "IRQ while state stopping\n");
+ goto done;
+ }
+
+ /* Prepare for capture and update state */
+ slot = rvin_get_active_slot(vin);
+ sequence = vin->sequence++;
+
+ vin_dbg(vin, "IRQ %02d: %d\tbuf0: %c buf1: %c buf2: %c\tmore: %d\n",
+ sequence, slot,
+ slot == 0 ? 'x' : vin->queue_buf[0] != NULL ? '1' : '0',
+ slot == 1 ? 'x' : vin->queue_buf[1] != NULL ? '1' : '0',
+ slot == 2 ? 'x' : vin->queue_buf[2] != NULL ? '1' : '0',
+ !list_empty(&vin->buf_list));
+
+ /* HW have written to a slot that is not prepared we are in trouble */
+ if (WARN_ON((vin->queue_buf[slot] == NULL)))
+ goto done;
+
+ /* Capture frame */
+ vin->queue_buf[slot]->field = vin->format.field;
+ vin->queue_buf[slot]->sequence = sequence;
+ vin->queue_buf[slot]->vb2_buf.timestamp = ktime_get_ns();
+ vb2_buffer_done(&vin->queue_buf[slot]->vb2_buf, VB2_BUF_STATE_DONE);
+ vin->queue_buf[slot] = NULL;
+
+ /* Prepare for next frame */
+ if (!rvin_fill_hw(vin)) {
+
+ /*
+ * Can't supply HW with new buffers fast enough. Halt
+ * capture until more buffers are available.
+ */
+ vin->state = STALLED;
+
+ /*
+ * The continuous capturing requires an explicit stop
+ * operation when there is no buffer to be set into
+ * the VnMBm registers.
+ */
+ if (vin->continuous) {
+ rvin_capture_off(vin);
+ vin_dbg(vin, "IRQ %02d: hw not ready stop\n", sequence);
+ }
+ } else {
+ /*
+ * The single capturing requires an explicit capture
+ * operation to fetch the next frame.
+ */
+ if (!vin->continuous)
+ rvin_capture_on(vin);
+ }
+done:
+ spin_unlock_irqrestore(&vin->qlock, flags);
+
+ return IRQ_RETVAL(handled);
+}
+
+/* Need to hold qlock before calling */
+static void return_all_buffers(struct rvin_dev *vin,
+ enum vb2_buffer_state state)
+{
+ struct rvin_buffer *buf, *node;
+ int i;
+
+ for (i = 0; i < HW_BUFFER_NUM; i++) {
+ if (vin->queue_buf[i]) {
+ vb2_buffer_done(&vin->queue_buf[i]->vb2_buf,
+ state);
+ vin->queue_buf[i] = NULL;
+ }
+ }
+
+ list_for_each_entry_safe(buf, node, &vin->buf_list, list) {
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
+ list_del(&buf->list);
+ }
+}
+
+static int rvin_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[],
+ struct device *alloc_devs[])
+
+{
+ struct rvin_dev *vin = vb2_get_drv_priv(vq);
+
+ /* Make sure the image size is large enough. */
+ if (*nplanes)
+ return sizes[0] < vin->format.sizeimage ? -EINVAL : 0;
+
+ *nplanes = 1;
+ sizes[0] = vin->format.sizeimage;
+
+ return 0;
+};
+
+static int rvin_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct rvin_dev *vin = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned long size = vin->format.sizeimage;
+
+ if (vb2_plane_size(vb, 0) < size) {
+ vin_err(vin, "buffer too small (%lu < %lu)\n",
+ vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, size);
+
+ return 0;
+}
+
+static void rvin_buffer_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct rvin_dev *vin = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned long flags;
+
+ spin_lock_irqsave(&vin->qlock, flags);
+
+ list_add_tail(to_buf_list(vbuf), &vin->buf_list);
+
+ /*
+ * If capture is stalled add buffer to HW and restart
+ * capturing if HW is ready to continue.
+ */
+ if (vin->state == STALLED)
+ if (rvin_fill_hw(vin))
+ rvin_capture_on(vin);
+
+ spin_unlock_irqrestore(&vin->qlock, flags);
+}
+
+static int rvin_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct rvin_dev *vin = vb2_get_drv_priv(vq);
+ struct v4l2_subdev *sd;
+ unsigned long flags;
+ int ret;
+
+ sd = vin_to_source(vin);
+ v4l2_subdev_call(sd, video, s_stream, 1);
+
+ spin_lock_irqsave(&vin->qlock, flags);
+
+ vin->state = RUNNING;
+ vin->sequence = 0;
+
+ /* Continuous capture requires more buffers then there are HW slots */
+ vin->continuous = count > HW_BUFFER_NUM;
+
+ /*
+ * This should never happen but if we don't have enough
+ * buffers for HW bail out
+ */
+ if (!rvin_fill_hw(vin)) {
+ vin_err(vin, "HW not ready to start, not enough buffers available\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = rvin_capture_start(vin);
+out:
+ /* Return all buffers if something went wrong */
+ if (ret) {
+ return_all_buffers(vin, VB2_BUF_STATE_QUEUED);
+ v4l2_subdev_call(sd, video, s_stream, 0);
+ }
+
+ spin_unlock_irqrestore(&vin->qlock, flags);
+
+ return ret;
+}
+
+static void rvin_stop_streaming(struct vb2_queue *vq)
+{
+ struct rvin_dev *vin = vb2_get_drv_priv(vq);
+ struct v4l2_subdev *sd;
+ unsigned long flags;
+ int retries = 0;
+
+ spin_lock_irqsave(&vin->qlock, flags);
+
+ vin->state = STOPPING;
+
+ /* Wait for streaming to stop */
+ while (retries++ < RVIN_RETRIES) {
+
+ rvin_capture_stop(vin);
+
+ /* Check if HW is stopped */
+ if (!rvin_capture_active(vin)) {
+ vin->state = STOPPED;
+ break;
+ }
+
+ spin_unlock_irqrestore(&vin->qlock, flags);
+ msleep(RVIN_TIMEOUT_MS);
+ spin_lock_irqsave(&vin->qlock, flags);
+ }
+
+ if (vin->state != STOPPED) {
+ /*
+ * If this happens something have gone horribly wrong.
+ * Set state to stopped to prevent the interrupt handler
+ * to make things worse...
+ */
+ vin_err(vin, "Failed stop HW, something is seriously broken\n");
+ vin->state = STOPPED;
+ }
+
+ /* Release all active buffers */
+ return_all_buffers(vin, VB2_BUF_STATE_ERROR);
+
+ spin_unlock_irqrestore(&vin->qlock, flags);
+
+ sd = vin_to_source(vin);
+ v4l2_subdev_call(sd, video, s_stream, 0);
+
+ /* disable interrupts */
+ rvin_disable_interrupts(vin);
+}
+
+static struct vb2_ops rvin_qops = {
+ .queue_setup = rvin_queue_setup,
+ .buf_prepare = rvin_buffer_prepare,
+ .buf_queue = rvin_buffer_queue,
+ .start_streaming = rvin_start_streaming,
+ .stop_streaming = rvin_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+void rvin_dma_remove(struct rvin_dev *vin)
+{
+ mutex_destroy(&vin->lock);
+
+ v4l2_device_unregister(&vin->v4l2_dev);
+}
+
+int rvin_dma_probe(struct rvin_dev *vin, int irq)
+{
+ struct vb2_queue *q = &vin->queue;
+ int i, ret;
+
+ /* Initialize the top-level structure */
+ ret = v4l2_device_register(vin->dev, &vin->v4l2_dev);
+ if (ret)
+ return ret;
+
+ mutex_init(&vin->lock);
+ INIT_LIST_HEAD(&vin->buf_list);
+
+ spin_lock_init(&vin->qlock);
+
+ vin->state = STOPPED;
+
+ for (i = 0; i < HW_BUFFER_NUM; i++)
+ vin->queue_buf[i] = NULL;
+
+ /* buffer queue */
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_READ | VB2_DMABUF;
+ q->lock = &vin->lock;
+ q->drv_priv = vin;
+ q->buf_struct_size = sizeof(struct rvin_buffer);
+ q->ops = &rvin_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 2;
+ q->dev = vin->dev;
+
+ ret = vb2_queue_init(q);
+ if (ret < 0) {
+ vin_err(vin, "failed to initialize VB2 queue\n");
+ goto error;
+ }
+
+ /* irq */
+ ret = devm_request_irq(vin->dev, irq, rvin_irq, IRQF_SHARED,
+ KBUILD_MODNAME, vin);
+ if (ret) {
+ vin_err(vin, "failed to request irq\n");
+ goto error;
+ }
+
+ return 0;
+error:
+ rvin_dma_remove(vin);
+
+ return ret;
+}
diff --git a/drivers/media/platform/rcar-vin/rcar-v4l2.c b/drivers/media/platform/rcar-vin/rcar-v4l2.c
new file mode 100644
index 000000000..10a5c107e
--- /dev/null
+++ b/drivers/media/platform/rcar-vin/rcar-v4l2.c
@@ -0,0 +1,874 @@
+/*
+ * Driver for Renesas R-Car VIN
+ *
+ * Copyright (C) 2016 Renesas Electronics Corp.
+ * Copyright (C) 2011-2013 Renesas Solutions Corp.
+ * Copyright (C) 2013 Cogent Embedded, Inc., <source@cogentembedded.com>
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * Based on the soc-camera rcar_vin driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/pm_runtime.h>
+
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-rect.h>
+
+#include "rcar-vin.h"
+
+#define RVIN_DEFAULT_FORMAT V4L2_PIX_FMT_YUYV
+#define RVIN_MAX_WIDTH 2048
+#define RVIN_MAX_HEIGHT 2048
+
+/* -----------------------------------------------------------------------------
+ * Format Conversions
+ */
+
+static const struct rvin_video_format rvin_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV16,
+ .bpp = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .bpp = 2,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .bpp = 2,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .bpp = 2,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_XRGB555,
+ .bpp = 2,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_XBGR32,
+ .bpp = 4,
+ },
+};
+
+const struct rvin_video_format *rvin_format_from_pixel(u32 pixelformat)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(rvin_formats); i++)
+ if (rvin_formats[i].fourcc == pixelformat)
+ return rvin_formats + i;
+
+ return NULL;
+}
+
+static u32 rvin_format_bytesperline(struct v4l2_pix_format *pix)
+{
+ const struct rvin_video_format *fmt;
+
+ fmt = rvin_format_from_pixel(pix->pixelformat);
+
+ if (WARN_ON(!fmt))
+ return -EINVAL;
+
+ return pix->width * fmt->bpp;
+}
+
+static u32 rvin_format_sizeimage(struct v4l2_pix_format *pix)
+{
+ if (pix->pixelformat == V4L2_PIX_FMT_NV16)
+ return pix->bytesperline * pix->height * 2;
+
+ return pix->bytesperline * pix->height;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2
+ */
+
+static int __rvin_try_format_source(struct rvin_dev *vin,
+ u32 which,
+ struct v4l2_pix_format *pix,
+ struct rvin_source_fmt *source)
+{
+ struct v4l2_subdev *sd;
+ struct v4l2_subdev_pad_config *pad_cfg;
+ struct v4l2_subdev_format format = {
+ .which = which,
+ };
+ int ret;
+
+ sd = vin_to_source(vin);
+
+ v4l2_fill_mbus_format(&format.format, pix, vin->source.code);
+
+ pad_cfg = v4l2_subdev_alloc_pad_config(sd);
+ if (pad_cfg == NULL)
+ return -ENOMEM;
+
+ format.pad = vin->src_pad_idx;
+
+ ret = v4l2_device_call_until_err(sd->v4l2_dev, 0, pad, set_fmt,
+ pad_cfg, &format);
+ if (ret < 0)
+ goto cleanup;
+
+ v4l2_fill_pix_format(pix, &format.format);
+
+ source->width = pix->width;
+ source->height = pix->height;
+
+ vin_dbg(vin, "Source resolution: %ux%u\n", source->width,
+ source->height);
+
+cleanup:
+ v4l2_subdev_free_pad_config(pad_cfg);
+ return 0;
+}
+
+static int __rvin_try_format(struct rvin_dev *vin,
+ u32 which,
+ struct v4l2_pix_format *pix,
+ struct rvin_source_fmt *source)
+{
+ const struct rvin_video_format *info;
+ u32 rwidth, rheight, walign;
+
+ /* Requested */
+ rwidth = pix->width;
+ rheight = pix->height;
+
+ /*
+ * Retrieve format information and select the current format if the
+ * requested format isn't supported.
+ */
+ info = rvin_format_from_pixel(pix->pixelformat);
+ if (!info) {
+ vin_dbg(vin, "Format %x not found, keeping %x\n",
+ pix->pixelformat, vin->format.pixelformat);
+ *pix = vin->format;
+ pix->width = rwidth;
+ pix->height = rheight;
+ }
+
+ /* Always recalculate */
+ pix->bytesperline = 0;
+ pix->sizeimage = 0;
+
+ /* Limit to source capabilities */
+ __rvin_try_format_source(vin, which, pix, source);
+
+ /* If source can't match format try if VIN can scale */
+ if (source->width != rwidth || source->height != rheight)
+ rvin_scale_try(vin, pix, rwidth, rheight);
+
+ /* HW limit width to a multiple of 32 (2^5) for NV16 else 2 (2^1) */
+ walign = vin->format.pixelformat == V4L2_PIX_FMT_NV16 ? 5 : 1;
+
+ /* Limit to VIN capabilities */
+ v4l_bound_align_image(&pix->width, 2, RVIN_MAX_WIDTH, walign,
+ &pix->height, 4, RVIN_MAX_HEIGHT, 2, 0);
+
+ switch (pix->field) {
+ case V4L2_FIELD_NONE:
+ case V4L2_FIELD_TOP:
+ case V4L2_FIELD_BOTTOM:
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ case V4L2_FIELD_INTERLACED:
+ break;
+ default:
+ pix->field = V4L2_FIELD_NONE;
+ break;
+ }
+
+ pix->bytesperline = max_t(u32, pix->bytesperline,
+ rvin_format_bytesperline(pix));
+ pix->sizeimage = max_t(u32, pix->sizeimage,
+ rvin_format_sizeimage(pix));
+
+ vin_dbg(vin, "Requested %ux%u Got %ux%u bpl: %d size: %d\n",
+ rwidth, rheight, pix->width, pix->height,
+ pix->bytesperline, pix->sizeimage);
+
+ return 0;
+}
+
+static int rvin_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+
+ strlcpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
+ strlcpy(cap->card, "R_Car_VIN", sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ dev_name(vin->dev));
+ return 0;
+}
+
+static int rvin_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct rvin_source_fmt source;
+
+ return __rvin_try_format(vin, V4L2_SUBDEV_FORMAT_TRY, &f->fmt.pix,
+ &source);
+}
+
+static int rvin_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct rvin_source_fmt source;
+ int ret;
+
+ if (vb2_is_busy(&vin->queue))
+ return -EBUSY;
+
+ ret = __rvin_try_format(vin, V4L2_SUBDEV_FORMAT_ACTIVE, &f->fmt.pix,
+ &source);
+ if (ret)
+ return ret;
+
+ vin->source.width = source.width;
+ vin->source.height = source.height;
+
+ vin->format = f->fmt.pix;
+
+ return 0;
+}
+
+static int rvin_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+
+ f->fmt.pix = vin->format;
+
+ return 0;
+}
+
+static int rvin_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ if (f->index >= ARRAY_SIZE(rvin_formats))
+ return -EINVAL;
+
+ f->pixelformat = rvin_formats[f->index].fourcc;
+
+ return 0;
+}
+
+static int rvin_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ s->r.left = s->r.top = 0;
+ s->r.width = vin->source.width;
+ s->r.height = vin->source.height;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ s->r = vin->crop;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ s->r.left = s->r.top = 0;
+ s->r.width = vin->format.width;
+ s->r.height = vin->format.height;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ s->r = vin->compose;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rvin_s_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ const struct rvin_video_format *fmt;
+ struct v4l2_rect r = s->r;
+ struct v4l2_rect max_rect;
+ struct v4l2_rect min_rect = {
+ .width = 6,
+ .height = 2,
+ };
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ v4l2_rect_set_min_size(&r, &min_rect);
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP:
+ /* Can't crop outside of source input */
+ max_rect.top = max_rect.left = 0;
+ max_rect.width = vin->source.width;
+ max_rect.height = vin->source.height;
+ v4l2_rect_map_inside(&r, &max_rect);
+
+ v4l_bound_align_image(&r.width, 2, vin->source.width, 1,
+ &r.height, 4, vin->source.height, 2, 0);
+
+ r.top = clamp_t(s32, r.top, 0, vin->source.height - r.height);
+ r.left = clamp_t(s32, r.left, 0, vin->source.width - r.width);
+
+ vin->crop = s->r = r;
+
+ vin_dbg(vin, "Cropped %dx%d@%d:%d of %dx%d\n",
+ r.width, r.height, r.left, r.top,
+ vin->source.width, vin->source.height);
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ /* Make sure compose rect fits inside output format */
+ max_rect.top = max_rect.left = 0;
+ max_rect.width = vin->format.width;
+ max_rect.height = vin->format.height;
+ v4l2_rect_map_inside(&r, &max_rect);
+
+ /*
+ * Composing is done by adding a offset to the buffer address,
+ * the HW wants this address to be aligned to HW_BUFFER_MASK.
+ * Make sure the top and left values meets this requirement.
+ */
+ while ((r.top * vin->format.bytesperline) & HW_BUFFER_MASK)
+ r.top--;
+
+ fmt = rvin_format_from_pixel(vin->format.pixelformat);
+ while ((r.left * fmt->bpp) & HW_BUFFER_MASK)
+ r.left--;
+
+ vin->compose = s->r = r;
+
+ vin_dbg(vin, "Compose %dx%d@%d:%d in %dx%d\n",
+ r.width, r.height, r.left, r.top,
+ vin->format.width, vin->format.height);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* HW supports modifying configuration while running */
+ rvin_crop_scale_comp(vin);
+
+ return 0;
+}
+
+static int rvin_cropcap(struct file *file, void *priv,
+ struct v4l2_cropcap *crop)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct v4l2_subdev *sd = vin_to_source(vin);
+
+ if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ return v4l2_subdev_call(sd, video, cropcap, crop);
+}
+
+static int rvin_enum_input(struct file *file, void *priv,
+ struct v4l2_input *i)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct v4l2_subdev *sd = vin_to_source(vin);
+ int ret;
+
+ if (i->index != 0)
+ return -EINVAL;
+
+ ret = v4l2_subdev_call(sd, video, g_input_status, &i->status);
+ if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
+ return ret;
+
+ i->type = V4L2_INPUT_TYPE_CAMERA;
+ i->std = vin->vdev.tvnorms;
+
+ if (v4l2_subdev_has_op(sd, pad, dv_timings_cap))
+ i->capabilities = V4L2_IN_CAP_DV_TIMINGS;
+
+ strlcpy(i->name, "Camera", sizeof(i->name));
+
+ return 0;
+}
+
+static int rvin_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+static int rvin_s_input(struct file *file, void *priv, unsigned int i)
+{
+ if (i > 0)
+ return -EINVAL;
+ return 0;
+}
+
+static int rvin_querystd(struct file *file, void *priv, v4l2_std_id *a)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct v4l2_subdev *sd = vin_to_source(vin);
+
+ return v4l2_subdev_call(sd, video, querystd, a);
+}
+
+static int rvin_s_std(struct file *file, void *priv, v4l2_std_id a)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct v4l2_subdev *sd = vin_to_source(vin);
+ struct v4l2_subdev_format fmt = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ struct v4l2_mbus_framefmt *mf = &fmt.format;
+ int ret = v4l2_subdev_call(sd, video, s_std, a);
+
+ if (ret < 0)
+ return ret;
+
+ /* Changing the standard will change the width/height */
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt);
+ if (ret) {
+ vin_err(vin, "Failed to get initial format\n");
+ return ret;
+ }
+
+ vin->format.width = mf->width;
+ vin->format.height = mf->height;
+
+ vin->crop.top = vin->crop.left = 0;
+ vin->crop.width = mf->width;
+ vin->crop.height = mf->height;
+
+ vin->compose.top = vin->compose.left = 0;
+ vin->compose.width = mf->width;
+ vin->compose.height = mf->height;
+
+ return 0;
+}
+
+static int rvin_g_std(struct file *file, void *priv, v4l2_std_id *a)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct v4l2_subdev *sd = vin_to_source(vin);
+
+ return v4l2_subdev_call(sd, video, g_std, a);
+}
+
+static int rvin_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_SOURCE_CHANGE:
+ return v4l2_event_subscribe(fh, sub, 4, NULL);
+ }
+ return v4l2_ctrl_subscribe_event(fh, sub);
+}
+
+static int rvin_enum_dv_timings(struct file *file, void *priv_fh,
+ struct v4l2_enum_dv_timings *timings)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct v4l2_subdev *sd = vin_to_source(vin);
+ int pad, ret;
+
+ pad = timings->pad;
+ timings->pad = vin->src_pad_idx;
+
+ ret = v4l2_subdev_call(sd, pad, enum_dv_timings, timings);
+
+ timings->pad = pad;
+
+ return ret;
+}
+
+static int rvin_s_dv_timings(struct file *file, void *priv_fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct v4l2_subdev *sd = vin_to_source(vin);
+ int err;
+
+ err = v4l2_subdev_call(sd,
+ video, s_dv_timings, timings);
+ if (!err) {
+ vin->source.width = timings->bt.width;
+ vin->source.height = timings->bt.height;
+ vin->format.width = timings->bt.width;
+ vin->format.height = timings->bt.height;
+ }
+ return err;
+}
+
+static int rvin_g_dv_timings(struct file *file, void *priv_fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct v4l2_subdev *sd = vin_to_source(vin);
+
+ return v4l2_subdev_call(sd,
+ video, g_dv_timings, timings);
+}
+
+static int rvin_query_dv_timings(struct file *file, void *priv_fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct v4l2_subdev *sd = vin_to_source(vin);
+
+ return v4l2_subdev_call(sd,
+ video, query_dv_timings, timings);
+}
+
+static int rvin_dv_timings_cap(struct file *file, void *priv_fh,
+ struct v4l2_dv_timings_cap *cap)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct v4l2_subdev *sd = vin_to_source(vin);
+ int pad, ret;
+
+ pad = cap->pad;
+ cap->pad = vin->src_pad_idx;
+
+ ret = v4l2_subdev_call(sd, pad, dv_timings_cap, cap);
+
+ cap->pad = pad;
+
+ return ret;
+}
+
+static const struct v4l2_ioctl_ops rvin_ioctl_ops = {
+ .vidioc_querycap = rvin_querycap,
+ .vidioc_try_fmt_vid_cap = rvin_try_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = rvin_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = rvin_s_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_cap = rvin_enum_fmt_vid_cap,
+
+ .vidioc_g_selection = rvin_g_selection,
+ .vidioc_s_selection = rvin_s_selection,
+
+ .vidioc_cropcap = rvin_cropcap,
+
+ .vidioc_enum_input = rvin_enum_input,
+ .vidioc_g_input = rvin_g_input,
+ .vidioc_s_input = rvin_s_input,
+
+ .vidioc_dv_timings_cap = rvin_dv_timings_cap,
+ .vidioc_enum_dv_timings = rvin_enum_dv_timings,
+ .vidioc_g_dv_timings = rvin_g_dv_timings,
+ .vidioc_s_dv_timings = rvin_s_dv_timings,
+ .vidioc_query_dv_timings = rvin_query_dv_timings,
+
+ .vidioc_querystd = rvin_querystd,
+ .vidioc_g_std = rvin_g_std,
+ .vidioc_s_std = rvin_s_std,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = rvin_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/* -----------------------------------------------------------------------------
+ * File Operations
+ */
+
+static int rvin_power_on(struct rvin_dev *vin)
+{
+ int ret;
+ struct v4l2_subdev *sd = vin_to_source(vin);
+
+ pm_runtime_get_sync(vin->v4l2_dev.dev);
+
+ ret = v4l2_subdev_call(sd, core, s_power, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
+ return ret;
+ return 0;
+}
+
+static int rvin_power_off(struct rvin_dev *vin)
+{
+ int ret;
+ struct v4l2_subdev *sd = vin_to_source(vin);
+
+ ret = v4l2_subdev_call(sd, core, s_power, 0);
+
+ pm_runtime_put(vin->v4l2_dev.dev);
+
+ if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
+ return ret;
+
+ return 0;
+}
+
+static int rvin_initialize_device(struct file *file)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ int ret;
+
+ struct v4l2_format f = {
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .fmt.pix = {
+ .width = vin->format.width,
+ .height = vin->format.height,
+ .field = vin->format.field,
+ .colorspace = vin->format.colorspace,
+ .pixelformat = vin->format.pixelformat,
+ },
+ };
+
+ ret = rvin_power_on(vin);
+ if (ret < 0)
+ return ret;
+
+ pm_runtime_enable(&vin->vdev.dev);
+ ret = pm_runtime_resume(&vin->vdev.dev);
+ if (ret < 0 && ret != -ENOSYS)
+ goto eresume;
+
+ /*
+ * Try to configure with default parameters. Notice: this is the
+ * very first open, so, we cannot race against other calls,
+ * apart from someone else calling open() simultaneously, but
+ * .host_lock is protecting us against it.
+ */
+ ret = rvin_s_fmt_vid_cap(file, NULL, &f);
+ if (ret < 0)
+ goto esfmt;
+
+ v4l2_ctrl_handler_setup(&vin->ctrl_handler);
+
+ return 0;
+esfmt:
+ pm_runtime_disable(&vin->vdev.dev);
+eresume:
+ rvin_power_off(vin);
+
+ return ret;
+}
+
+static int rvin_open(struct file *file)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ int ret;
+
+ mutex_lock(&vin->lock);
+
+ file->private_data = vin;
+
+ ret = v4l2_fh_open(file);
+ if (ret)
+ goto unlock;
+
+ if (!v4l2_fh_is_singular_file(file))
+ goto unlock;
+
+ if (rvin_initialize_device(file)) {
+ v4l2_fh_release(file);
+ ret = -ENODEV;
+ }
+
+unlock:
+ mutex_unlock(&vin->lock);
+ return ret;
+}
+
+static int rvin_release(struct file *file)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ bool fh_singular;
+ int ret;
+
+ mutex_lock(&vin->lock);
+
+ /* Save the singular status before we call the clean-up helper */
+ fh_singular = v4l2_fh_is_singular_file(file);
+
+ /* the release helper will cleanup any on-going streaming */
+ ret = _vb2_fop_release(file, NULL);
+
+ /*
+ * If this was the last open file.
+ * Then de-initialize hw module.
+ */
+ if (fh_singular) {
+ pm_runtime_suspend(&vin->vdev.dev);
+ pm_runtime_disable(&vin->vdev.dev);
+ rvin_power_off(vin);
+ }
+
+ mutex_unlock(&vin->lock);
+
+ return ret;
+}
+
+static const struct v4l2_file_operations rvin_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = video_ioctl2,
+ .open = rvin_open,
+ .release = rvin_release,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+ .read = vb2_fop_read,
+};
+
+void rvin_v4l2_remove(struct rvin_dev *vin)
+{
+ v4l2_info(&vin->v4l2_dev, "Removing %s\n",
+ video_device_node_name(&vin->vdev));
+
+ /* Checks internaly if handlers have been init or not */
+ v4l2_ctrl_handler_free(&vin->ctrl_handler);
+
+ /* Checks internaly if vdev have been init or not */
+ video_unregister_device(&vin->vdev);
+}
+
+static void rvin_notify(struct v4l2_subdev *sd,
+ unsigned int notification, void *arg)
+{
+ struct rvin_dev *vin =
+ container_of(sd->v4l2_dev, struct rvin_dev, v4l2_dev);
+
+ switch (notification) {
+ case V4L2_DEVICE_NOTIFY_EVENT:
+ v4l2_event_queue(&vin->vdev, arg);
+ break;
+ default:
+ break;
+ }
+}
+
+int rvin_v4l2_probe(struct rvin_dev *vin)
+{
+ struct v4l2_subdev_format fmt = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ struct v4l2_mbus_framefmt *mf = &fmt.format;
+ struct video_device *vdev = &vin->vdev;
+ struct v4l2_subdev *sd = vin_to_source(vin);
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ int pad_idx;
+#endif
+ int ret;
+
+ v4l2_set_subdev_hostdata(sd, vin);
+
+ vin->v4l2_dev.notify = rvin_notify;
+
+ ret = v4l2_subdev_call(sd, video, g_tvnorms, &vin->vdev.tvnorms);
+ if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
+ return ret;
+
+ if (vin->vdev.tvnorms == 0) {
+ /* Disable the STD API if there are no tvnorms defined */
+ v4l2_disable_ioctl(&vin->vdev, VIDIOC_G_STD);
+ v4l2_disable_ioctl(&vin->vdev, VIDIOC_S_STD);
+ v4l2_disable_ioctl(&vin->vdev, VIDIOC_QUERYSTD);
+ v4l2_disable_ioctl(&vin->vdev, VIDIOC_ENUMSTD);
+ }
+
+ /* Add the controls */
+ /*
+ * Currently the subdev with the largest number of controls (13) is
+ * ov6550. So let's pick 16 as a hint for the control handler. Note
+ * that this is a hint only: too large and you waste some memory, too
+ * small and there is a (very) small performance hit when looking up
+ * controls in the internal hash.
+ */
+ ret = v4l2_ctrl_handler_init(&vin->ctrl_handler, 16);
+ if (ret < 0)
+ return ret;
+
+ ret = v4l2_ctrl_add_handler(&vin->ctrl_handler, sd->ctrl_handler, NULL);
+ if (ret < 0)
+ return ret;
+
+ /* video node */
+ vdev->fops = &rvin_fops;
+ vdev->v4l2_dev = &vin->v4l2_dev;
+ vdev->queue = &vin->queue;
+ strlcpy(vdev->name, KBUILD_MODNAME, sizeof(vdev->name));
+ vdev->release = video_device_release_empty;
+ vdev->ioctl_ops = &rvin_ioctl_ops;
+ vdev->lock = &vin->lock;
+ vdev->ctrl_handler = &vin->ctrl_handler;
+ vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
+ V4L2_CAP_READWRITE;
+
+ vin->src_pad_idx = 0;
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ for (pad_idx = 0; pad_idx < sd->entity.num_pads; pad_idx++)
+ if (sd->entity.pads[pad_idx].flags
+ == MEDIA_PAD_FL_SOURCE)
+ break;
+ if (pad_idx >= sd->entity.num_pads)
+ return -EINVAL;
+
+ vin->src_pad_idx = pad_idx;
+#endif
+ fmt.pad = vin->src_pad_idx;
+
+ /* Try to improve our guess of a reasonable window format */
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt);
+ if (ret) {
+ vin_err(vin, "Failed to get initial format\n");
+ return ret;
+ }
+
+ /* Set default format */
+ vin->format.width = mf->width;
+ vin->format.height = mf->height;
+ vin->format.colorspace = mf->colorspace;
+ vin->format.field = mf->field;
+ vin->format.pixelformat = RVIN_DEFAULT_FORMAT;
+
+
+ /* Set initial crop and compose */
+ vin->crop.top = vin->crop.left = 0;
+ vin->crop.width = mf->width;
+ vin->crop.height = mf->height;
+
+ vin->compose.top = vin->compose.left = 0;
+ vin->compose.width = mf->width;
+ vin->compose.height = mf->height;
+
+ ret = video_register_device(&vin->vdev, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ vin_err(vin, "Failed to register video device\n");
+ return ret;
+ }
+
+ video_set_drvdata(&vin->vdev, vin);
+
+ v4l2_info(&vin->v4l2_dev, "Device registered as %s\n",
+ video_device_node_name(&vin->vdev));
+
+ return ret;
+}
diff --git a/drivers/media/platform/rcar-vin/rcar-vin.h b/drivers/media/platform/rcar-vin/rcar-vin.h
new file mode 100644
index 000000000..31ad39a39
--- /dev/null
+++ b/drivers/media/platform/rcar-vin/rcar-vin.h
@@ -0,0 +1,163 @@
+/*
+ * Driver for Renesas R-Car VIN
+ *
+ * Copyright (C) 2016 Renesas Electronics Corp.
+ * Copyright (C) 2011-2013 Renesas Solutions Corp.
+ * Copyright (C) 2013 Cogent Embedded, Inc., <source@cogentembedded.com>
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * Based on the soc-camera rcar_vin driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __RCAR_VIN__
+#define __RCAR_VIN__
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/videobuf2-v4l2.h>
+
+/* Number of HW buffers */
+#define HW_BUFFER_NUM 3
+
+/* Address alignment mask for HW buffers */
+#define HW_BUFFER_MASK 0x7f
+
+enum chip_id {
+ RCAR_GEN2,
+ RCAR_H1,
+ RCAR_M1,
+};
+
+/**
+ * STOPPED - No operation in progress
+ * RUNNING - Operation in progress have buffers
+ * STALLED - No operation in progress have no buffers
+ * STOPPING - Stopping operation
+ */
+enum rvin_dma_state {
+ STOPPED = 0,
+ RUNNING,
+ STALLED,
+ STOPPING,
+};
+
+/**
+ * struct rvin_source_fmt - Source information
+ * @code: Media bus format from source
+ * @width: Width from source
+ * @height: Height from source
+ */
+struct rvin_source_fmt {
+ u32 code;
+ u32 width;
+ u32 height;
+};
+
+/**
+ * struct rvin_video_format - Data format stored in memory
+ * @fourcc: Pixelformat
+ * @bpp: Bytes per pixel
+ */
+struct rvin_video_format {
+ u32 fourcc;
+ u8 bpp;
+};
+
+struct rvin_graph_entity {
+ struct device_node *node;
+ struct media_entity *entity;
+
+ struct v4l2_async_subdev asd;
+ struct v4l2_subdev *subdev;
+};
+
+/**
+ * struct rvin_dev - Renesas VIN device structure
+ * @dev: (OF) device
+ * @base: device I/O register space remapped to virtual memory
+ * @chip: type of VIN chip
+ * @mbus_cfg media bus configuration
+ *
+ * @vdev: V4L2 video device associated with VIN
+ * @v4l2_dev: V4L2 device
+ * @src_pad_idx: source pad index for media controller drivers
+ * @ctrl_handler: V4L2 control handler
+ * @notifier: V4L2 asynchronous subdevs notifier
+ * @entity: entity in the DT for subdevice
+ *
+ * @lock: protects @queue
+ * @queue: vb2 buffers queue
+ *
+ * @qlock: protects @queue_buf, @buf_list, @continuous, @sequence
+ * @state
+ * @queue_buf: Keeps track of buffers given to HW slot
+ * @buf_list: list of queued buffers
+ * @continuous: tracks if active operation is continuous or single mode
+ * @sequence: V4L2 buffers sequence number
+ * @state: keeps track of operation state
+ *
+ * @source: active format from the video source
+ * @format: active V4L2 pixel format
+ *
+ * @crop: active cropping
+ * @compose: active composing
+ */
+struct rvin_dev {
+ struct device *dev;
+ void __iomem *base;
+ enum chip_id chip;
+ struct v4l2_mbus_config mbus_cfg;
+
+ struct video_device vdev;
+ struct v4l2_device v4l2_dev;
+ int src_pad_idx;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_async_notifier notifier;
+ struct rvin_graph_entity entity;
+
+ struct mutex lock;
+ struct vb2_queue queue;
+
+ spinlock_t qlock;
+ struct vb2_v4l2_buffer *queue_buf[HW_BUFFER_NUM];
+ struct list_head buf_list;
+ bool continuous;
+ unsigned int sequence;
+ enum rvin_dma_state state;
+
+ struct rvin_source_fmt source;
+ struct v4l2_pix_format format;
+
+ struct v4l2_rect crop;
+ struct v4l2_rect compose;
+};
+
+#define vin_to_source(vin) vin->entity.subdev
+
+/* Debug */
+#define vin_dbg(d, fmt, arg...) dev_dbg(d->dev, fmt, ##arg)
+#define vin_info(d, fmt, arg...) dev_info(d->dev, fmt, ##arg)
+#define vin_warn(d, fmt, arg...) dev_warn(d->dev, fmt, ##arg)
+#define vin_err(d, fmt, arg...) dev_err(d->dev, fmt, ##arg)
+
+int rvin_dma_probe(struct rvin_dev *vin, int irq);
+void rvin_dma_remove(struct rvin_dev *vin);
+
+int rvin_v4l2_probe(struct rvin_dev *vin);
+void rvin_v4l2_remove(struct rvin_dev *vin);
+
+const struct rvin_video_format *rvin_format_from_pixel(u32 pixelformat);
+
+/* Cropping, composing and scaling */
+void rvin_scale_try(struct rvin_dev *vin, struct v4l2_pix_format *pix,
+ u32 width, u32 height);
+void rvin_crop_scale_comp(struct rvin_dev *vin);
+
+#endif
diff --git a/drivers/media/platform/rcar_jpu.c b/drivers/media/platform/rcar_jpu.c
index 552789a69..16782ceb2 100644
--- a/drivers/media/platform/rcar_jpu.c
+++ b/drivers/media/platform/rcar_jpu.c
@@ -203,7 +203,6 @@
* @irq: JPEG IP irq
* @clk: JPEG IP clock
* @dev: JPEG IP struct device
- * @alloc_ctx: videobuf2 memory allocator's context
* @ref_count: reference counter
*/
struct jpu {
@@ -220,7 +219,6 @@ struct jpu {
unsigned int irq;
struct clk *clk;
struct device *dev;
- void *alloc_ctx;
int ref_count;
};
@@ -1016,7 +1014,7 @@ error_free:
*/
static int jpu_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct jpu_ctx *ctx = vb2_get_drv_priv(vq);
struct jpu_q_data *q_data;
@@ -1033,17 +1031,14 @@ static int jpu_queue_setup(struct vb2_queue *vq,
if (sizes[i] < q_size)
return -EINVAL;
- alloc_ctxs[i] = ctx->jpu->alloc_ctx;
}
return 0;
}
*nplanes = q_data->format.num_planes;
- for (i = 0; i < *nplanes; i++) {
+ for (i = 0; i < *nplanes; i++)
sizes[i] = q_data->format.plane_fmt[i].sizeimage;
- alloc_ctxs[i] = ctx->jpu->alloc_ctx;
- }
return 0;
}
@@ -1214,6 +1209,7 @@ static int jpu_queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
src_vq->lock = &ctx->jpu->mutex;
+ src_vq->dev = ctx->jpu->v4l2_dev.dev;
ret = vb2_queue_init(src_vq);
if (ret)
@@ -1228,6 +1224,7 @@ static int jpu_queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
dst_vq->lock = &ctx->jpu->mutex;
+ dst_vq->dev = ctx->jpu->v4l2_dev.dev;
return vb2_queue_init(dst_vq);
}
@@ -1676,13 +1673,6 @@ static int jpu_probe(struct platform_device *pdev)
goto device_register_rollback;
}
- jpu->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
- if (IS_ERR(jpu->alloc_ctx)) {
- v4l2_err(&jpu->v4l2_dev, "Failed to init memory allocator\n");
- ret = PTR_ERR(jpu->alloc_ctx);
- goto m2m_init_rollback;
- }
-
/* fill in qantization and Huffman tables for encoder */
for (i = 0; i < JPU_MAX_QUALITY; i++)
jpu_generate_hdr(i, (unsigned char *)jpeg_hdrs[i]);
@@ -1699,7 +1689,7 @@ static int jpu_probe(struct platform_device *pdev)
ret = video_register_device(&jpu->vfd_encoder, VFL_TYPE_GRABBER, -1);
if (ret) {
v4l2_err(&jpu->v4l2_dev, "Failed to register video device\n");
- goto vb2_allocator_rollback;
+ goto m2m_init_rollback;
}
video_set_drvdata(&jpu->vfd_encoder, jpu);
@@ -1732,9 +1722,6 @@ static int jpu_probe(struct platform_device *pdev)
enc_vdev_register_rollback:
video_unregister_device(&jpu->vfd_encoder);
-vb2_allocator_rollback:
- vb2_dma_contig_cleanup_ctx(jpu->alloc_ctx);
-
m2m_init_rollback:
v4l2_m2m_release(jpu->m2m_dev);
@@ -1750,7 +1737,6 @@ static int jpu_remove(struct platform_device *pdev)
video_unregister_device(&jpu->vfd_decoder);
video_unregister_device(&jpu->vfd_encoder);
- vb2_dma_contig_cleanup_ctx(jpu->alloc_ctx);
v4l2_m2m_release(jpu->m2m_dev);
v4l2_device_unregister(&jpu->v4l2_dev);
diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c
index bd060ef5d..0413a861a 100644
--- a/drivers/media/platform/s3c-camif/camif-capture.c
+++ b/drivers/media/platform/s3c-camif/camif-capture.c
@@ -437,10 +437,9 @@ static void stop_streaming(struct vb2_queue *vq)
static int queue_setup(struct vb2_queue *vq,
unsigned int *num_buffers, unsigned int *num_planes,
- unsigned int sizes[], void *allocators[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct camif_vp *vp = vb2_get_drv_priv(vq);
- struct camif_dev *camif = vp->camif;
struct camif_frame *frame = &vp->out_frame;
const struct camif_fmt *fmt = vp->out_fmt;
unsigned int size;
@@ -449,7 +448,6 @@ static int queue_setup(struct vb2_queue *vq,
return -EINVAL;
size = (frame->f_width * frame->f_height * fmt->depth) / 8;
- allocators[0] = camif->alloc_ctx;
if (*num_planes)
return sizes[0] < size ? -EINVAL : 0;
@@ -1138,6 +1136,7 @@ int s3c_camif_register_video_node(struct camif_dev *camif, int idx)
q->drv_priv = vp;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &vp->camif->lock;
+ q->dev = camif->v4l2_dev.dev;
ret = vb2_queue_init(q);
if (ret)
diff --git a/drivers/media/platform/s3c-camif/camif-core.c b/drivers/media/platform/s3c-camif/camif-core.c
index af237af20..ec4001970 100644
--- a/drivers/media/platform/s3c-camif/camif-core.c
+++ b/drivers/media/platform/s3c-camif/camif-core.c
@@ -474,16 +474,9 @@ static int s3c_camif_probe(struct platform_device *pdev)
if (ret < 0)
goto err_pm;
- /* Initialize contiguous memory allocator */
- camif->alloc_ctx = vb2_dma_contig_init_ctx(dev);
- if (IS_ERR(camif->alloc_ctx)) {
- ret = PTR_ERR(camif->alloc_ctx);
- goto err_alloc;
- }
-
ret = camif_media_dev_init(camif);
if (ret < 0)
- goto err_mdev;
+ goto err_alloc;
ret = camif_register_sensor(camif);
if (ret < 0)
@@ -517,8 +510,6 @@ err_sens:
media_device_unregister(&camif->media_dev);
media_device_cleanup(&camif->media_dev);
camif_unregister_media_entities(camif);
-err_mdev:
- vb2_dma_contig_cleanup_ctx(camif->alloc_ctx);
err_alloc:
pm_runtime_put(dev);
pm_runtime_disable(dev);
diff --git a/drivers/media/platform/s3c-camif/camif-core.h b/drivers/media/platform/s3c-camif/camif-core.h
index 57cbc3d97..1f5c8c94c 100644
--- a/drivers/media/platform/s3c-camif/camif-core.h
+++ b/drivers/media/platform/s3c-camif/camif-core.h
@@ -254,7 +254,6 @@ struct camif_vp {
* @ctrl_handler: v4l2 control handler (owned by @subdev)
* @test_pattern: test pattern controls
* @vp: video path (DMA) description (codec/preview)
- * @alloc_ctx: memory buffer allocator context
* @variant: variant information for this device
* @dev: pointer to the CAMIF device struct
* @pdata: a copy of the driver's platform data
@@ -291,7 +290,6 @@ struct camif_dev {
u8 colorfx_cr;
struct camif_vp vp[CAMIF_VP_NUM];
- struct vb2_alloc_ctx *alloc_ctx;
const struct s3c_camif_variant *variant;
struct device *dev;
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
index 612d1ea51..391dd7a7b 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -103,7 +103,7 @@ static struct g2d_frame *get_frame(struct g2d_ctx *ctx,
static int g2d_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct g2d_ctx *ctx = vb2_get_drv_priv(vq);
struct g2d_frame *f = get_frame(ctx, vq->type);
@@ -113,7 +113,6 @@ static int g2d_queue_setup(struct vb2_queue *vq,
sizes[0] = f->size;
*nplanes = 1;
- alloc_ctxs[0] = ctx->dev->alloc_ctx;
if (*nbuffers == 0)
*nbuffers = 1;
@@ -159,6 +158,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
src_vq->lock = &ctx->dev->mutex;
+ src_vq->dev = ctx->dev->v4l2_dev.dev;
ret = vb2_queue_init(src_vq);
if (ret)
@@ -172,6 +172,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
dst_vq->lock = &ctx->dev->mutex;
+ dst_vq->dev = ctx->dev->v4l2_dev.dev;
return vb2_queue_init(dst_vq);
}
@@ -681,15 +682,11 @@ static int g2d_probe(struct platform_device *pdev)
goto put_clk_gate;
}
- dev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
- if (IS_ERR(dev->alloc_ctx)) {
- ret = PTR_ERR(dev->alloc_ctx);
- goto unprep_clk_gate;
- }
+ vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
if (ret)
- goto alloc_ctx_cleanup;
+ goto unprep_clk_gate;
vfd = video_device_alloc();
if (!vfd) {
v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n");
@@ -734,8 +731,6 @@ rel_vdev:
video_device_release(vfd);
unreg_v4l2_dev:
v4l2_device_unregister(&dev->v4l2_dev);
-alloc_ctx_cleanup:
- vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
unprep_clk_gate:
clk_unprepare(dev->gate);
put_clk_gate:
@@ -756,7 +751,7 @@ static int g2d_remove(struct platform_device *pdev)
v4l2_m2m_release(dev->m2m_dev);
video_unregister_device(dev->vfd);
v4l2_device_unregister(&dev->v4l2_dev);
- vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
+ vb2_dma_contig_clear_max_seg_size(&pdev->dev);
clk_unprepare(dev->gate);
clk_put(dev->gate);
clk_unprepare(dev->clk);
diff --git a/drivers/media/platform/s5p-g2d/g2d.h b/drivers/media/platform/s5p-g2d/g2d.h
index e31df541a..dd812b557 100644
--- a/drivers/media/platform/s5p-g2d/g2d.h
+++ b/drivers/media/platform/s5p-g2d/g2d.h
@@ -25,7 +25,6 @@ struct g2d_dev {
struct mutex mutex;
spinlock_t ctrl_lock;
atomic_t num_inst;
- struct vb2_alloc_ctx *alloc_ctx;
void __iomem *regs;
struct clk *clk;
struct clk *gate;
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index caa19b408..785e6936c 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -2436,7 +2436,7 @@ static struct v4l2_m2m_ops exynos4_jpeg_m2m_ops = {
static int s5p_jpeg_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vq);
struct s5p_jpeg_q_data *q_data = NULL;
@@ -2457,7 +2457,6 @@ static int s5p_jpeg_queue_setup(struct vb2_queue *vq,
*nbuffers = count;
*nplanes = 1;
sizes[0] = size;
- alloc_ctxs[0] = ctx->jpeg->alloc_ctx;
return 0;
}
@@ -2563,6 +2562,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
src_vq->lock = &ctx->jpeg->lock;
+ src_vq->dev = ctx->jpeg->dev;
ret = vb2_queue_init(src_vq);
if (ret)
@@ -2576,6 +2576,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
dst_vq->lock = &ctx->jpeg->lock;
+ dst_vq->dev = ctx->jpeg->dev;
return vb2_queue_init(dst_vq);
}
@@ -2843,19 +2844,14 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
goto device_register_rollback;
}
- jpeg->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
- if (IS_ERR(jpeg->alloc_ctx)) {
- v4l2_err(&jpeg->v4l2_dev, "Failed to init memory allocator\n");
- ret = PTR_ERR(jpeg->alloc_ctx);
- goto m2m_init_rollback;
- }
+ vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
/* JPEG encoder /dev/videoX node */
jpeg->vfd_encoder = video_device_alloc();
if (!jpeg->vfd_encoder) {
v4l2_err(&jpeg->v4l2_dev, "Failed to allocate video device\n");
ret = -ENOMEM;
- goto vb2_allocator_rollback;
+ goto m2m_init_rollback;
}
snprintf(jpeg->vfd_encoder->name, sizeof(jpeg->vfd_encoder->name),
"%s-enc", S5P_JPEG_M2M_NAME);
@@ -2871,7 +2867,7 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
if (ret) {
v4l2_err(&jpeg->v4l2_dev, "Failed to register video device\n");
video_device_release(jpeg->vfd_encoder);
- goto vb2_allocator_rollback;
+ goto m2m_init_rollback;
}
video_set_drvdata(jpeg->vfd_encoder, jpeg);
@@ -2920,9 +2916,6 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
enc_vdev_register_rollback:
video_unregister_device(jpeg->vfd_encoder);
-vb2_allocator_rollback:
- vb2_dma_contig_cleanup_ctx(jpeg->alloc_ctx);
-
m2m_init_rollback:
v4l2_m2m_release(jpeg->m2m_dev);
@@ -2941,7 +2934,7 @@ static int s5p_jpeg_remove(struct platform_device *pdev)
video_unregister_device(jpeg->vfd_decoder);
video_unregister_device(jpeg->vfd_encoder);
- vb2_dma_contig_cleanup_ctx(jpeg->alloc_ctx);
+ vb2_dma_contig_clear_max_seg_size(&pdev->dev);
v4l2_m2m_release(jpeg->m2m_dev);
v4l2_device_unregister(&jpeg->v4l2_dev);
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.h b/drivers/media/platform/s5p-jpeg/jpeg-core.h
index 9b1db0934..4492a3535 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.h
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.h
@@ -110,7 +110,6 @@ enum exynos4_jpeg_img_quality_level {
* @irq: JPEG IP irq
* @clocks: JPEG IP clock(s)
* @dev: JPEG IP struct device
- * @alloc_ctx: videobuf2 memory allocator's context
* @variant: driver variant to be used
* @irq_status interrupt flags set during single encode/decode
operation
@@ -130,7 +129,6 @@ struct s5p_jpeg {
enum exynos4_jpeg_result irq_ret;
struct clk *clocks[JPEG_MAX_CLOCKS];
struct device *dev;
- void *alloc_ctx;
struct s5p_jpeg_variant *variant;
u32 irq_status;
};
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index eb72fbf2e..f78838926 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -22,6 +22,7 @@
#include <media/v4l2-event.h>
#include <linux/workqueue.h>
#include <linux/of.h>
+#include <linux/of_reserved_mem.h>
#include <media/videobuf2-v4l2.h>
#include "s5p_mfc_common.h"
#include "s5p_mfc_ctrl.h"
@@ -29,11 +30,11 @@
#include "s5p_mfc_dec.h"
#include "s5p_mfc_enc.h"
#include "s5p_mfc_intr.h"
+#include "s5p_mfc_iommu.h"
#include "s5p_mfc_opr.h"
#include "s5p_mfc_cmd.h"
#include "s5p_mfc_pm.h"
-#define S5P_MFC_NAME "s5p-mfc"
#define S5P_MFC_DEC_NAME "s5p-mfc-dec"
#define S5P_MFC_ENC_NAME "s5p-mfc-enc"
@@ -1043,66 +1044,94 @@ static const struct v4l2_file_operations s5p_mfc_fops = {
.mmap = s5p_mfc_mmap,
};
-static int match_child(struct device *dev, void *data)
+/* DMA memory related helper functions */
+static void s5p_mfc_memdev_release(struct device *dev)
{
- if (!dev_name(dev))
- return 0;
- return !strcmp(dev_name(dev), (char *)data);
+ of_reserved_mem_device_release(dev);
}
-static void s5p_mfc_memdev_release(struct device *dev)
+static struct device *s5p_mfc_alloc_memdev(struct device *dev,
+ const char *name, unsigned int idx)
{
- dma_release_declared_memory(dev);
-}
+ struct device *child;
+ int ret;
-static void *mfc_get_drv_data(struct platform_device *pdev);
+ child = devm_kzalloc(dev, sizeof(struct device), GFP_KERNEL);
+ if (!child)
+ return NULL;
+
+ device_initialize(child);
+ dev_set_name(child, "%s:%s", dev_name(dev), name);
+ child->parent = dev;
+ child->bus = dev->bus;
+ child->coherent_dma_mask = dev->coherent_dma_mask;
+ child->dma_mask = dev->dma_mask;
+ child->release = s5p_mfc_memdev_release;
+
+ if (device_add(child) == 0) {
+ ret = of_reserved_mem_device_init_by_idx(child, dev->of_node,
+ idx);
+ if (ret == 0)
+ return child;
+ }
+
+ put_device(child);
+ return NULL;
+}
-static int s5p_mfc_alloc_memdevs(struct s5p_mfc_dev *dev)
+static int s5p_mfc_configure_dma_memory(struct s5p_mfc_dev *mfc_dev)
{
- unsigned int mem_info[2] = { };
+ struct device *dev = &mfc_dev->plat_dev->dev;
- dev->mem_dev_l = devm_kzalloc(&dev->plat_dev->dev,
- sizeof(struct device), GFP_KERNEL);
- if (!dev->mem_dev_l) {
- mfc_err("Not enough memory\n");
- return -ENOMEM;
+ /*
+ * When IOMMU is available, we cannot use the default configuration,
+ * because of MFC firmware requirements: address space limited to
+ * 256M and non-zero default start address.
+ * This is still simplified, not optimal configuration, but for now
+ * IOMMU core doesn't allow to configure device's IOMMUs channel
+ * separately.
+ */
+ if (exynos_is_iommu_available(dev)) {
+ int ret = exynos_configure_iommu(dev, S5P_MFC_IOMMU_DMA_BASE,
+ S5P_MFC_IOMMU_DMA_SIZE);
+ if (ret == 0)
+ mfc_dev->mem_dev_l = mfc_dev->mem_dev_r = dev;
+ return ret;
}
- dev_set_name(dev->mem_dev_l, "%s", "s5p-mfc-l");
- dev->mem_dev_l->release = s5p_mfc_memdev_release;
- device_initialize(dev->mem_dev_l);
- of_property_read_u32_array(dev->plat_dev->dev.of_node,
- "samsung,mfc-l", mem_info, 2);
- if (dma_declare_coherent_memory(dev->mem_dev_l, mem_info[0],
- mem_info[0], mem_info[1],
- DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE) == 0) {
- mfc_err("Failed to declare coherent memory for\n"
- "MFC device\n");
- return -ENOMEM;
+ /*
+ * Create and initialize virtual devices for accessing
+ * reserved memory regions.
+ */
+ mfc_dev->mem_dev_l = s5p_mfc_alloc_memdev(dev, "left",
+ MFC_BANK1_ALLOC_CTX);
+ if (!mfc_dev->mem_dev_l)
+ return -ENODEV;
+ mfc_dev->mem_dev_r = s5p_mfc_alloc_memdev(dev, "right",
+ MFC_BANK2_ALLOC_CTX);
+ if (!mfc_dev->mem_dev_r) {
+ device_unregister(mfc_dev->mem_dev_l);
+ return -ENODEV;
}
- dev->mem_dev_r = devm_kzalloc(&dev->plat_dev->dev,
- sizeof(struct device), GFP_KERNEL);
- if (!dev->mem_dev_r) {
- mfc_err("Not enough memory\n");
- return -ENOMEM;
- }
+ return 0;
+}
- dev_set_name(dev->mem_dev_r, "%s", "s5p-mfc-r");
- dev->mem_dev_r->release = s5p_mfc_memdev_release;
- device_initialize(dev->mem_dev_r);
- of_property_read_u32_array(dev->plat_dev->dev.of_node,
- "samsung,mfc-r", mem_info, 2);
- if (dma_declare_coherent_memory(dev->mem_dev_r, mem_info[0],
- mem_info[0], mem_info[1],
- DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE) == 0) {
- pr_err("Failed to declare coherent memory for\n"
- "MFC device\n");
- return -ENOMEM;
+static void s5p_mfc_unconfigure_dma_memory(struct s5p_mfc_dev *mfc_dev)
+{
+ struct device *dev = &mfc_dev->plat_dev->dev;
+
+ if (exynos_is_iommu_available(dev)) {
+ exynos_unconfigure_iommu(dev);
+ return;
}
- return 0;
+
+ device_unregister(mfc_dev->mem_dev_l);
+ device_unregister(mfc_dev->mem_dev_r);
}
+static void *mfc_get_drv_data(struct platform_device *pdev);
+
/* MFC probe function */
static int s5p_mfc_probe(struct platform_device *pdev)
{
@@ -1128,14 +1157,11 @@ static int s5p_mfc_probe(struct platform_device *pdev)
dev->variant = mfc_get_drv_data(pdev);
- ret = s5p_mfc_init_pm(dev);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to get mfc clock source\n");
- return ret;
- }
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
+ if (res == NULL) {
+ dev_err(&pdev->dev, "failed to get io resource\n");
+ return -ENOENT;
+ }
dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(dev->regs_base))
return PTR_ERR(dev->regs_base);
@@ -1143,54 +1169,36 @@ static int s5p_mfc_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (res == NULL) {
dev_err(&pdev->dev, "failed to get irq resource\n");
- ret = -ENOENT;
- goto err_res;
+ return -ENOENT;
}
dev->irq = res->start;
ret = devm_request_irq(&pdev->dev, dev->irq, s5p_mfc_irq,
0, pdev->name, dev);
if (ret) {
dev_err(&pdev->dev, "Failed to install irq (%d)\n", ret);
- goto err_res;
+ return ret;
}
- if (pdev->dev.of_node) {
- ret = s5p_mfc_alloc_memdevs(dev);
- if (ret < 0)
- goto err_res;
- } else {
- dev->mem_dev_l = device_find_child(&dev->plat_dev->dev,
- "s5p-mfc-l", match_child);
- if (!dev->mem_dev_l) {
- mfc_err("Mem child (L) device get failed\n");
- ret = -ENODEV;
- goto err_res;
- }
- dev->mem_dev_r = device_find_child(&dev->plat_dev->dev,
- "s5p-mfc-r", match_child);
- if (!dev->mem_dev_r) {
- mfc_err("Mem child (R) device get failed\n");
- ret = -ENODEV;
- goto err_res;
- }
+ ret = s5p_mfc_configure_dma_memory(dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to configure DMA memory\n");
+ return ret;
}
- dev->alloc_ctx[0] = vb2_dma_contig_init_ctx(dev->mem_dev_l);
- if (IS_ERR(dev->alloc_ctx[0])) {
- ret = PTR_ERR(dev->alloc_ctx[0]);
- goto err_res;
- }
- dev->alloc_ctx[1] = vb2_dma_contig_init_ctx(dev->mem_dev_r);
- if (IS_ERR(dev->alloc_ctx[1])) {
- ret = PTR_ERR(dev->alloc_ctx[1]);
- goto err_mem_init_ctx_1;
+ ret = s5p_mfc_init_pm(dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to get mfc clock source\n");
+ goto err_dma;
}
+ vb2_dma_contig_set_max_seg_size(dev->mem_dev_l, DMA_BIT_MASK(32));
+ vb2_dma_contig_set_max_seg_size(dev->mem_dev_r, DMA_BIT_MASK(32));
+
mutex_init(&dev->mfc_mutex);
ret = s5p_mfc_alloc_firmware(dev);
if (ret)
- goto err_alloc_fw;
+ goto err_res;
ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
if (ret)
@@ -1212,14 +1220,6 @@ static int s5p_mfc_probe(struct platform_device *pdev)
vfd->vfl_dir = VFL_DIR_M2M;
snprintf(vfd->name, sizeof(vfd->name), "%s", S5P_MFC_DEC_NAME);
dev->vfd_dec = vfd;
- ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
- if (ret) {
- v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
- video_device_release(vfd);
- goto err_dec_reg;
- }
- v4l2_info(&dev->v4l2_dev,
- "decoder registered as /dev/video%d\n", vfd->num);
video_set_drvdata(vfd, dev);
/* encoder */
@@ -1237,14 +1237,6 @@ static int s5p_mfc_probe(struct platform_device *pdev)
vfd->vfl_dir = VFL_DIR_M2M;
snprintf(vfd->name, sizeof(vfd->name), "%s", S5P_MFC_ENC_NAME);
dev->vfd_enc = vfd;
- ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
- if (ret) {
- v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
- video_device_release(vfd);
- goto err_enc_reg;
- }
- v4l2_info(&dev->v4l2_dev,
- "encoder registered as /dev/video%d\n", vfd->num);
video_set_drvdata(vfd, dev);
platform_set_drvdata(pdev, dev);
@@ -1261,26 +1253,41 @@ static int s5p_mfc_probe(struct platform_device *pdev)
s5p_mfc_init_hw_cmds(dev);
s5p_mfc_init_regs(dev);
+ /* Register decoder and encoder */
+ ret = video_register_device(dev->vfd_dec, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
+ goto err_dec_reg;
+ }
+ v4l2_info(&dev->v4l2_dev,
+ "decoder registered as /dev/video%d\n", dev->vfd_dec->num);
+
+ ret = video_register_device(dev->vfd_enc, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
+ goto err_enc_reg;
+ }
+ v4l2_info(&dev->v4l2_dev,
+ "encoder registered as /dev/video%d\n", dev->vfd_enc->num);
+
pr_debug("%s--\n", __func__);
return 0;
/* Deinit MFC if probe had failed */
err_enc_reg:
- video_device_release(dev->vfd_enc);
-err_enc_alloc:
video_unregister_device(dev->vfd_dec);
err_dec_reg:
+ video_device_release(dev->vfd_enc);
+err_enc_alloc:
video_device_release(dev->vfd_dec);
err_dec_alloc:
v4l2_device_unregister(&dev->v4l2_dev);
err_v4l2_dev_reg:
s5p_mfc_release_firmware(dev);
-err_alloc_fw:
- vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[1]);
-err_mem_init_ctx_1:
- vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[0]);
err_res:
s5p_mfc_final_pm(dev);
+err_dma:
+ s5p_mfc_unconfigure_dma_memory(dev);
pr_debug("%s-- with error\n", __func__);
return ret;
@@ -1300,14 +1307,13 @@ static int s5p_mfc_remove(struct platform_device *pdev)
video_unregister_device(dev->vfd_enc);
video_unregister_device(dev->vfd_dec);
+ video_device_release(dev->vfd_enc);
+ video_device_release(dev->vfd_dec);
v4l2_device_unregister(&dev->v4l2_dev);
s5p_mfc_release_firmware(dev);
- vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[0]);
- vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[1]);
- if (pdev->dev.of_node) {
- put_device(dev->mem_dev_l);
- put_device(dev->mem_dev_r);
- }
+ s5p_mfc_unconfigure_dma_memory(dev);
+ vb2_dma_contig_clear_max_seg_size(dev->mem_dev_l);
+ vb2_dma_contig_clear_max_seg_size(dev->mem_dev_r);
s5p_mfc_final_pm(dev);
return 0;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
index 9eb2481ec..373e346fc 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
@@ -25,6 +25,8 @@
#include "regs-mfc.h"
#include "regs-mfc-v8.h"
+#define S5P_MFC_NAME "s5p-mfc"
+
/* Definitions related to MFC memory */
/* Offset base used to differentiate between CAPTURE and OUTPUT
@@ -285,7 +287,6 @@ struct s5p_mfc_priv_buf {
* @watchdog_cnt: counter for the watchdog
* @watchdog_workqueue: workqueue for the watchdog
* @watchdog_work: worker for the watchdog
- * @alloc_ctx: videobuf2 allocator contexts for two memory banks
* @enter_suspend: flag set when entering suspend
* @ctx_buf: common context memory (MFCv6)
* @warn_start: hardware error code from which warnings start
@@ -328,7 +329,6 @@ struct s5p_mfc_dev {
struct timer_list watchdog_timer;
struct workqueue_struct *watchdog_workqueue;
struct work_struct watchdog_work;
- void *alloc_ctx[2];
unsigned long enter_suspend;
struct s5p_mfc_priv_buf ctx_buf;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
index f2d6376ce..47c997d9e 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
@@ -265,9 +265,10 @@ static int vidioc_querycap(struct file *file, void *priv,
{
struct s5p_mfc_dev *dev = video_drvdata(file);
- strncpy(cap->driver, dev->plat_dev->name, sizeof(cap->driver) - 1);
- strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1);
- cap->bus_info[0] = 0;
+ strncpy(cap->driver, S5P_MFC_NAME, sizeof(cap->driver) - 1);
+ strncpy(cap->card, dev->vfd_dec->name, sizeof(cap->card) - 1);
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ dev_name(&dev->plat_dev->dev));
/*
* This is only a mem-to-mem video device. The capture and output
* device capability flags are left only for backward compatibility
@@ -423,7 +424,7 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
pix_mp = &f->fmt.pix_mp;
if (ret)
return ret;
- if (ctx->vq_src.streaming || ctx->vq_dst.streaming) {
+ if (vb2_is_streaming(&ctx->vq_src) || vb2_is_streaming(&ctx->vq_dst)) {
v4l2_err(&dev->v4l2_dev, "%s queue busy\n", __func__);
ret = -EBUSY;
goto out;
@@ -474,7 +475,6 @@ static int reqbufs_output(struct s5p_mfc_dev *dev, struct s5p_mfc_ctx *ctx,
ret = vb2_reqbufs(&ctx->vq_src, reqbufs);
if (ret)
goto out;
- s5p_mfc_close_mfc_inst(dev, ctx);
ctx->src_bufs_cnt = 0;
ctx->output_state = QUEUE_FREE;
} else if (ctx->output_state == QUEUE_FREE) {
@@ -565,7 +565,7 @@ out:
return ret;
}
-/* Reqeust buffers */
+/* Request buffers */
static int vidioc_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *reqbufs)
{
@@ -573,7 +573,7 @@ static int vidioc_reqbufs(struct file *file, void *priv,
struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
if (reqbufs->memory != V4L2_MEMORY_MMAP) {
- mfc_err("Only V4L2_MEMORY_MAP is supported\n");
+ mfc_debug(2, "Only V4L2_MEMORY_MMAP is supported\n");
return -EINVAL;
}
@@ -821,7 +821,7 @@ static int vidioc_decoder_cmd(struct file *file, void *priv,
if (cmd->flags != 0)
return -EINVAL;
- if (!ctx->vq_src.streaming)
+ if (!vb2_is_streaming(&ctx->vq_src))
return -EINVAL;
spin_lock_irqsave(&dev->irqlock, flags);
@@ -890,7 +890,7 @@ static const struct v4l2_ioctl_ops s5p_mfc_dec_ioctl_ops = {
static int s5p_mfc_queue_setup(struct vb2_queue *vq,
unsigned int *buf_count,
unsigned int *plane_count, unsigned int psize[],
- void *allocators[])
+ struct device *alloc_devs[])
{
struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
struct s5p_mfc_dev *dev = ctx->dev;
@@ -931,16 +931,14 @@ static int s5p_mfc_queue_setup(struct vb2_queue *vq,
psize[1] = ctx->chroma_size;
if (IS_MFCV6_PLUS(dev))
- allocators[0] =
- ctx->dev->alloc_ctx[MFC_BANK1_ALLOC_CTX];
+ alloc_devs[0] = ctx->dev->mem_dev_l;
else
- allocators[0] =
- ctx->dev->alloc_ctx[MFC_BANK2_ALLOC_CTX];
- allocators[1] = ctx->dev->alloc_ctx[MFC_BANK1_ALLOC_CTX];
+ alloc_devs[0] = ctx->dev->mem_dev_r;
+ alloc_devs[1] = ctx->dev->mem_dev_l;
} else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
ctx->state == MFCINST_INIT) {
psize[0] = ctx->dec_src_buf_size;
- allocators[0] = ctx->dev->alloc_ctx[MFC_BANK1_ALLOC_CTX];
+ alloc_devs[0] = ctx->dev->mem_dev_l;
} else {
mfc_err("This video node is dedicated to decoding. Decoding not initialized\n");
return -EINVAL;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index 034b5c1d3..fcc2e054c 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -943,9 +943,10 @@ static int vidioc_querycap(struct file *file, void *priv,
{
struct s5p_mfc_dev *dev = video_drvdata(file);
- strncpy(cap->driver, dev->plat_dev->name, sizeof(cap->driver) - 1);
- strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1);
- cap->bus_info[0] = 0;
+ strncpy(cap->driver, S5P_MFC_NAME, sizeof(cap->driver) - 1);
+ strncpy(cap->card, dev->vfd_enc->name, sizeof(cap->card) - 1);
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ dev_name(&dev->plat_dev->dev));
/*
* This is only a mem-to-mem video device. The capture and output
* device capability flags are left only for backward compatibility
@@ -1043,10 +1044,6 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
mfc_err("failed to try output format\n");
return -EINVAL;
}
- if (pix_fmt_mp->plane_fmt[0].sizeimage == 0) {
- mfc_err("must be set encoding output size\n");
- return -EINVAL;
- }
if ((dev->variant->version_bit & fmt->versions) == 0) {
mfc_err("Unsupported format by this MFC version.\n");
return -EINVAL;
@@ -1060,11 +1057,6 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
mfc_err("failed to try output format\n");
return -EINVAL;
}
-
- if (fmt->num_planes != pix_fmt_mp->num_planes) {
- mfc_err("failed to try output format\n");
- return -EINVAL;
- }
if ((dev->variant->version_bit & fmt->versions) == 0) {
mfc_err("Unsupported format by this MFC version.\n");
return -EINVAL;
@@ -1144,7 +1136,10 @@ static int vidioc_reqbufs(struct file *file, void *priv,
return -EINVAL;
if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
if (reqbufs->count == 0) {
+ mfc_debug(2, "Freeing buffers\n");
ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
+ s5p_mfc_hw_call(dev->mfc_ops, release_codec_buffers,
+ ctx);
ctx->capture_state = QUEUE_FREE;
return ret;
}
@@ -1817,7 +1812,7 @@ static int check_vb_with_fmt(struct s5p_mfc_fmt *fmt, struct vb2_buffer *vb)
static int s5p_mfc_queue_setup(struct vb2_queue *vq,
unsigned int *buf_count, unsigned int *plane_count,
- unsigned int psize[], void *allocators[])
+ unsigned int psize[], struct device *alloc_devs[])
{
struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
struct s5p_mfc_dev *dev = ctx->dev;
@@ -1837,7 +1832,7 @@ static int s5p_mfc_queue_setup(struct vb2_queue *vq,
if (*buf_count > MFC_MAX_BUFFERS)
*buf_count = MFC_MAX_BUFFERS;
psize[0] = ctx->enc_dst_buf_size;
- allocators[0] = ctx->dev->alloc_ctx[MFC_BANK1_ALLOC_CTX];
+ alloc_devs[0] = ctx->dev->mem_dev_l;
} else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
if (ctx->src_fmt)
*plane_count = ctx->src_fmt->num_planes;
@@ -1853,15 +1848,11 @@ static int s5p_mfc_queue_setup(struct vb2_queue *vq,
psize[1] = ctx->chroma_size;
if (IS_MFCV6_PLUS(dev)) {
- allocators[0] =
- ctx->dev->alloc_ctx[MFC_BANK1_ALLOC_CTX];
- allocators[1] =
- ctx->dev->alloc_ctx[MFC_BANK1_ALLOC_CTX];
+ alloc_devs[0] = ctx->dev->mem_dev_l;
+ alloc_devs[1] = ctx->dev->mem_dev_l;
} else {
- allocators[0] =
- ctx->dev->alloc_ctx[MFC_BANK2_ALLOC_CTX];
- allocators[1] =
- ctx->dev->alloc_ctx[MFC_BANK2_ALLOC_CTX];
+ alloc_devs[0] = ctx->dev->mem_dev_r;
+ alloc_devs[1] = ctx->dev->mem_dev_r;
}
} else {
mfc_err("invalid queue type: %d\n", vq->type);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_iommu.h b/drivers/media/platform/s5p-mfc/s5p_mfc_iommu.h
new file mode 100644
index 000000000..6962132ae
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_iommu.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2015 Samsung Electronics Co.Ltd
+ * Authors: Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef S5P_MFC_IOMMU_H_
+#define S5P_MFC_IOMMU_H_
+
+#define S5P_MFC_IOMMU_DMA_BASE 0x20000000lu
+#define S5P_MFC_IOMMU_DMA_SIZE SZ_256M
+
+#if defined(CONFIG_EXYNOS_IOMMU) && defined(CONFIG_ARM_DMA_USE_IOMMU)
+
+#include <asm/dma-iommu.h>
+
+static inline bool exynos_is_iommu_available(struct device *dev)
+{
+ return dev->archdata.iommu != NULL;
+}
+
+static inline void exynos_unconfigure_iommu(struct device *dev)
+{
+ struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
+
+ arm_iommu_detach_device(dev);
+ arm_iommu_release_mapping(mapping);
+}
+
+static inline int exynos_configure_iommu(struct device *dev,
+ unsigned int base, unsigned int size)
+{
+ struct dma_iommu_mapping *mapping = NULL;
+ int ret;
+
+ /* Disable the default mapping created by device core */
+ if (to_dma_iommu_mapping(dev))
+ exynos_unconfigure_iommu(dev);
+
+ mapping = arm_iommu_create_mapping(dev->bus, base, size);
+ if (IS_ERR(mapping)) {
+ pr_warn("Failed to create IOMMU mapping for device %s\n",
+ dev_name(dev));
+ return PTR_ERR(mapping);
+ }
+
+ ret = arm_iommu_attach_device(dev, mapping);
+ if (ret) {
+ pr_warn("Failed to attached device %s to IOMMU_mapping\n",
+ dev_name(dev));
+ arm_iommu_release_mapping(mapping);
+ return ret;
+ }
+
+ return 0;
+}
+
+#else
+
+static inline bool exynos_is_iommu_available(struct device *dev)
+{
+ return false;
+}
+
+static inline int exynos_configure_iommu(struct device *dev,
+ unsigned int base, unsigned int size)
+{
+ return -ENOSYS;
+}
+
+static inline void exynos_unconfigure_iommu(struct device *dev) { }
+
+#endif
+
+#endif /* S5P_MFC_IOMMU_H_ */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
index 5f97a3398..930dc2ddd 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
@@ -54,6 +54,7 @@ int s5p_mfc_init_pm(struct s5p_mfc_dev *dev)
pm->clock = clk_get(&dev->plat_dev->dev, MFC_SCLK_NAME);
if (IS_ERR(pm->clock)) {
mfc_info("Failed to get MFC special clock control\n");
+ pm->clock = NULL;
} else {
clk_set_rate(pm->clock, MFC_SCLK_RATE);
ret = clk_prepare_enable(pm->clock);
@@ -76,8 +77,10 @@ int s5p_mfc_init_pm(struct s5p_mfc_dev *dev)
err_s_clk:
clk_put(pm->clock);
+ pm->clock = NULL;
err_p_ip_clk:
clk_put(pm->clock_gate);
+ pm->clock_gate = NULL;
err_g_ip_clk:
return ret;
}
@@ -88,9 +91,11 @@ void s5p_mfc_final_pm(struct s5p_mfc_dev *dev)
pm->clock) {
clk_disable_unprepare(pm->clock);
clk_put(pm->clock);
+ pm->clock = NULL;
}
clk_unprepare(pm->clock_gate);
clk_put(pm->clock_gate);
+ pm->clock_gate = NULL;
#ifdef CONFIG_PM
pm_runtime_disable(pm->device);
#endif
@@ -98,12 +103,13 @@ void s5p_mfc_final_pm(struct s5p_mfc_dev *dev)
int s5p_mfc_clock_on(void)
{
- int ret;
+ int ret = 0;
#ifdef CLK_DEBUG
atomic_inc(&clk_ref);
mfc_debug(3, "+ %d\n", atomic_read(&clk_ref));
#endif
- ret = clk_enable(pm->clock_gate);
+ if (!IS_ERR_OR_NULL(pm->clock_gate))
+ ret = clk_enable(pm->clock_gate);
return ret;
}
@@ -113,7 +119,8 @@ void s5p_mfc_clock_off(void)
atomic_dec(&clk_ref);
mfc_debug(3, "- %d\n", atomic_read(&clk_ref));
#endif
- clk_disable(pm->clock_gate);
+ if (!IS_ERR_OR_NULL(pm->clock_gate))
+ clk_disable(pm->clock_gate);
}
int s5p_mfc_power_on(void)
diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
index 4dd62a918..869f0ce86 100644
--- a/drivers/media/platform/s5p-tv/mixer.h
+++ b/drivers/media/platform/s5p-tv/mixer.h
@@ -245,8 +245,6 @@ struct mxr_device {
/** V4L2 device */
struct v4l2_device v4l2_dev;
- /** context of allocator */
- void *alloc_ctx;
/** event wait queue */
wait_queue_head_t event_queue;
/** state flags */
diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
index 7ab5578a0..ee74e2b44 100644
--- a/drivers/media/platform/s5p-tv/mixer_video.c
+++ b/drivers/media/platform/s5p-tv/mixer_video.c
@@ -80,12 +80,7 @@ int mxr_acquire_video(struct mxr_device *mdev,
goto fail;
}
- mdev->alloc_ctx = vb2_dma_contig_init_ctx(mdev->dev);
- if (IS_ERR(mdev->alloc_ctx)) {
- mxr_err(mdev, "could not acquire vb2 allocator\n");
- ret = PTR_ERR(mdev->alloc_ctx);
- goto fail_v4l2_dev;
- }
+ vb2_dma_contig_set_max_seg_size(mdev->dev, DMA_BIT_MASK(32));
/* registering outputs */
mdev->output_cnt = 0;
@@ -120,7 +115,7 @@ int mxr_acquire_video(struct mxr_device *mdev,
mxr_err(mdev, "failed to register any output\n");
ret = -ENODEV;
/* skipping fail_output because there is nothing to free */
- goto fail_vb2_allocator;
+ goto fail_v4l2_dev;
}
return 0;
@@ -131,10 +126,6 @@ fail_output:
kfree(mdev->output[i]);
memset(mdev->output, 0, sizeof(mdev->output));
-fail_vb2_allocator:
- /* freeing allocator context */
- vb2_dma_contig_cleanup_ctx(mdev->alloc_ctx);
-
fail_v4l2_dev:
/* NOTE: automatically unregister all subdevs */
v4l2_device_unregister(v4l2_dev);
@@ -151,7 +142,7 @@ void mxr_release_video(struct mxr_device *mdev)
for (i = 0; i < mdev->output_cnt; ++i)
kfree(mdev->output[i]);
- vb2_dma_contig_cleanup_ctx(mdev->alloc_ctx);
+ vb2_dma_contig_clear_max_seg_size(mdev->dev);
v4l2_device_unregister(&mdev->v4l2_dev);
}
@@ -883,7 +874,7 @@ static const struct v4l2_file_operations mxr_fops = {
static int queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[],
- void *alloc_ctxs[])
+ struct device *alloc_devs[])
{
struct mxr_layer *layer = vb2_get_drv_priv(vq);
const struct mxr_format *fmt = layer->fmt;
@@ -901,7 +892,6 @@ static int queue_setup(struct vb2_queue *vq,
*nplanes = fmt->num_subframes;
for (i = 0; i < fmt->num_subframes; ++i) {
- alloc_ctxs[i] = layer->mdev->alloc_ctx;
sizes[i] = planes[i].sizeimage;
mxr_dbg(mdev, "size[%d] = %08x\n", i, sizes[i]);
}
@@ -1110,6 +1100,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
.min_buffers_needed = 1,
.mem_ops = &vb2_dma_contig_memops,
.lock = &layer->mutex,
+ .dev = mdev->dev,
};
return layer;
diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c
index 82b5d69b8..15a562af1 100644
--- a/drivers/media/platform/sh_veu.c
+++ b/drivers/media/platform/sh_veu.c
@@ -118,7 +118,6 @@ struct sh_veu_dev {
struct sh_veu_file *output;
struct mutex fop_lock;
void __iomem *base;
- struct vb2_alloc_ctx *alloc_ctx;
spinlock_t lock;
bool is_2h;
unsigned int xaction;
@@ -866,7 +865,7 @@ static const struct v4l2_ioctl_ops sh_veu_ioctl_ops = {
static int sh_veu_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct sh_veu_dev *veu = vb2_get_drv_priv(vq);
struct sh_veu_vfmt *vfmt = sh_veu_get_vfmt(veu, vq->type);
@@ -882,14 +881,11 @@ static int sh_veu_queue_setup(struct vb2_queue *vq,
*nbuffers = count;
}
- if (*nplanes) {
- alloc_ctxs[0] = veu->alloc_ctx;
+ if (*nplanes)
return sizes[0] < size ? -EINVAL : 0;
- }
*nplanes = 1;
sizes[0] = size;
- alloc_ctxs[0] = veu->alloc_ctx;
dev_dbg(veu->dev, "get %d buffer(s) of size %d each.\n", count, size);
@@ -948,6 +944,7 @@ static int sh_veu_queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->lock = &veu->fop_lock;
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->dev = veu->v4l2_dev.dev;
ret = vb2_queue_init(src_vq);
if (ret < 0)
@@ -962,6 +959,7 @@ static int sh_veu_queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->lock = &veu->fop_lock;
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->dev = veu->v4l2_dev.dev;
return vb2_queue_init(dst_vq);
}
@@ -1148,12 +1146,6 @@ static int sh_veu_probe(struct platform_device *pdev)
vdev = &veu->vdev;
- veu->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
- if (IS_ERR(veu->alloc_ctx)) {
- ret = PTR_ERR(veu->alloc_ctx);
- goto einitctx;
- }
-
*vdev = sh_veu_videodev;
vdev->v4l2_dev = &veu->v4l2_dev;
spin_lock_init(&veu->lock);
@@ -1187,8 +1179,6 @@ evidreg:
pm_runtime_disable(&pdev->dev);
v4l2_m2m_release(veu->m2m_dev);
em2minit:
- vb2_dma_contig_cleanup_ctx(veu->alloc_ctx);
-einitctx:
v4l2_device_unregister(&veu->v4l2_dev);
return ret;
}
@@ -1202,7 +1192,6 @@ static int sh_veu_remove(struct platform_device *pdev)
video_unregister_device(&veu->vdev);
pm_runtime_disable(&pdev->dev);
v4l2_m2m_release(veu->m2m_dev);
- vb2_dma_contig_cleanup_ctx(veu->alloc_ctx);
v4l2_device_unregister(&veu->v4l2_dev);
return 0;
diff --git a/drivers/media/platform/sh_vou.c b/drivers/media/platform/sh_vou.c
index 115740498..e1f39b4cf 100644
--- a/drivers/media/platform/sh_vou.c
+++ b/drivers/media/platform/sh_vou.c
@@ -86,7 +86,6 @@ struct sh_vou_device {
v4l2_std_id std;
int pix_idx;
struct vb2_queue queue;
- struct vb2_alloc_ctx *alloc_ctx;
struct sh_vou_buffer *active;
enum sh_vou_status status;
unsigned sequence;
@@ -245,7 +244,7 @@ static void sh_vou_stream_config(struct sh_vou_device *vou_dev)
/* Locking: caller holds fop_lock mutex */
static int sh_vou_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct sh_vou_device *vou_dev = vb2_get_drv_priv(vq);
struct v4l2_pix_format *pix = &vou_dev->pix;
@@ -253,7 +252,6 @@ static int sh_vou_queue_setup(struct vb2_queue *vq,
dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__);
- alloc_ctxs[0] = vou_dev->alloc_ctx;
if (*nplanes)
return sizes[0] < pix->height * bytes_per_line ? -EINVAL : 0;
*nplanes = 1;
@@ -1304,16 +1302,11 @@ static int sh_vou_probe(struct platform_device *pdev)
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->min_buffers_needed = 2;
q->lock = &vou_dev->fop_lock;
+ q->dev = &pdev->dev;
ret = vb2_queue_init(q);
if (ret)
- goto einitctx;
+ goto ei2cgadap;
- vou_dev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
- if (IS_ERR(vou_dev->alloc_ctx)) {
- dev_err(&pdev->dev, "Can't allocate buffer context");
- ret = PTR_ERR(vou_dev->alloc_ctx);
- goto einitctx;
- }
vdev->queue = q;
INIT_LIST_HEAD(&vou_dev->buf_list);
@@ -1348,8 +1341,6 @@ ei2cnd:
ereset:
i2c_put_adapter(i2c_adap);
ei2cgadap:
- vb2_dma_contig_cleanup_ctx(vou_dev->alloc_ctx);
-einitctx:
pm_runtime_disable(&pdev->dev);
v4l2_device_unregister(&vou_dev->v4l2_dev);
return ret;
@@ -1367,7 +1358,6 @@ static int sh_vou_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
video_unregister_device(&vou_dev->vdev);
i2c_put_adapter(client->adapter);
- vb2_dma_contig_cleanup_ctx(vou_dev->alloc_ctx);
v4l2_device_unregister(&vou_dev->v4l2_dev);
return 0;
}
diff --git a/drivers/media/platform/soc_camera/Kconfig b/drivers/media/platform/soc_camera/Kconfig
index 83029a485..39f66414f 100644
--- a/drivers/media/platform/soc_camera/Kconfig
+++ b/drivers/media/platform/soc_camera/Kconfig
@@ -25,8 +25,8 @@ config VIDEO_PXA27x
---help---
This is a v4l2 driver for the PXA27x Quick Capture Interface
-config VIDEO_RCAR_VIN
- tristate "R-Car Video Input (VIN) support"
+config VIDEO_RCAR_VIN_OLD
+ tristate "R-Car Video Input (VIN) support (DEPRECATED)"
depends on VIDEO_DEV && SOC_CAMERA
depends on ARCH_RENESAS || COMPILE_TEST
depends on HAS_DMA
diff --git a/drivers/media/platform/soc_camera/Makefile b/drivers/media/platform/soc_camera/Makefile
index 7ee71ae23..7703cb7ce 100644
--- a/drivers/media/platform/soc_camera/Makefile
+++ b/drivers/media/platform/soc_camera/Makefile
@@ -10,4 +10,4 @@ obj-$(CONFIG_VIDEO_ATMEL_ISI) += atmel-isi.o
obj-$(CONFIG_VIDEO_PXA27x) += pxa_camera.o
obj-$(CONFIG_VIDEO_SH_MOBILE_CEU) += sh_mobile_ceu_camera.o
obj-$(CONFIG_VIDEO_SH_MOBILE_CSI2) += sh_mobile_csi2.o
-obj-$(CONFIG_VIDEO_RCAR_VIN) += rcar_vin.o
+obj-$(CONFIG_VIDEO_RCAR_VIN_OLD) += rcar_vin.o
diff --git a/drivers/media/platform/soc_camera/atmel-isi.c b/drivers/media/platform/soc_camera/atmel-isi.c
index ab2d9b9b1..30211f6b4 100644
--- a/drivers/media/platform/soc_camera/atmel-isi.c
+++ b/drivers/media/platform/soc_camera/atmel-isi.c
@@ -72,8 +72,6 @@ struct atmel_isi {
int sequence;
- struct vb2_alloc_ctx *alloc_ctx;
-
/* Allocate descriptors for dma buffer use */
struct fbd *p_fb_descriptors;
dma_addr_t fb_descriptors_phys;
@@ -305,7 +303,7 @@ static int atmel_isi_wait_status(struct atmel_isi *isi, int wait_reset)
------------------------------------------------------------------*/
static int queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
@@ -322,7 +320,6 @@ static int queue_setup(struct vb2_queue *vq,
*nplanes = 1;
sizes[0] = size;
- alloc_ctxs[0] = isi->alloc_ctx;
isi->sequence = 0;
isi->active = NULL;
@@ -567,6 +564,7 @@ static int isi_camera_init_videobuf(struct vb2_queue *q,
q->mem_ops = &vb2_dma_contig_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &ici->host_lock;
+ q->dev = ici->v4l2_dev.dev;
return vb2_queue_init(q);
}
@@ -963,7 +961,6 @@ static int atmel_isi_remove(struct platform_device *pdev)
struct atmel_isi, soc_host);
soc_camera_host_unregister(soc_host);
- vb2_dma_contig_cleanup_ctx(isi->alloc_ctx);
dma_free_coherent(&pdev->dev,
sizeof(struct fbd) * MAX_BUFFER_NUM,
isi->p_fb_descriptors,
@@ -1067,12 +1064,6 @@ static int atmel_isi_probe(struct platform_device *pdev)
list_add(&isi->dma_desc[i].list, &isi->dma_desc_head);
}
- isi->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
- if (IS_ERR(isi->alloc_ctx)) {
- ret = PTR_ERR(isi->alloc_ctx);
- goto err_alloc_ctx;
- }
-
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
isi->regs = devm_ioremap_resource(&pdev->dev, regs);
if (IS_ERR(isi->regs)) {
@@ -1119,8 +1110,6 @@ err_register_soc_camera_host:
pm_runtime_disable(&pdev->dev);
err_req_irq:
err_ioremap:
- vb2_dma_contig_cleanup_ctx(isi->alloc_ctx);
-err_alloc_ctx:
dma_free_coherent(&pdev->dev,
sizeof(struct fbd) * MAX_BUFFER_NUM,
isi->p_fb_descriptors,
diff --git a/drivers/media/platform/soc_camera/rcar_vin.c b/drivers/media/platform/soc_camera/rcar_vin.c
index 3f9c1b845..9c137522c 100644
--- a/drivers/media/platform/soc_camera/rcar_vin.c
+++ b/drivers/media/platform/soc_camera/rcar_vin.c
@@ -484,7 +484,6 @@ struct rcar_vin_priv {
struct list_head capture;
#define MAX_BUFFER_NUM 3
struct vb2_v4l2_buffer *queue_buf[MAX_BUFFER_NUM];
- struct vb2_alloc_ctx *alloc_ctx;
enum v4l2_field field;
unsigned int pdata_flags;
unsigned int vb_count;
@@ -534,14 +533,12 @@ struct rcar_vin_cam {
static int rcar_vin_videobuf_setup(struct vb2_queue *vq,
unsigned int *count,
unsigned int *num_planes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct rcar_vin_priv *priv = ici->priv;
- alloc_ctxs[0] = priv->alloc_ctx;
-
if (!vq->num_buffers)
priv->sequence = 0;
@@ -1816,6 +1813,7 @@ static int rcar_vin_init_videobuf2(struct vb2_queue *vq,
vq->buf_struct_size = sizeof(struct rcar_vin_buffer);
vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
vq->lock = &ici->host_lock;
+ vq->dev = ici->v4l2_dev.dev;
return vb2_queue_init(vq);
}
@@ -1912,10 +1910,6 @@ static int rcar_vin_probe(struct platform_device *pdev)
if (ret)
return ret;
- priv->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
- if (IS_ERR(priv->alloc_ctx))
- return PTR_ERR(priv->alloc_ctx);
-
priv->ici.priv = priv;
priv->ici.v4l2_dev.dev = &pdev->dev;
priv->ici.drv_name = dev_name(&pdev->dev);
@@ -1946,7 +1940,6 @@ static int rcar_vin_probe(struct platform_device *pdev)
cleanup:
pm_runtime_disable(&pdev->dev);
- vb2_dma_contig_cleanup_ctx(priv->alloc_ctx);
return ret;
}
@@ -1954,12 +1947,9 @@ cleanup:
static int rcar_vin_remove(struct platform_device *pdev)
{
struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
- struct rcar_vin_priv *priv = container_of(soc_host,
- struct rcar_vin_priv, ici);
soc_camera_host_unregister(soc_host);
pm_runtime_disable(&pdev->dev);
- vb2_dma_contig_cleanup_ctx(priv->alloc_ctx);
return 0;
}
diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
index b9f369c0f..02b519dde 100644
--- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
+++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
@@ -113,7 +113,6 @@ struct sh_mobile_ceu_dev {
spinlock_t lock; /* Protects video buffer lists */
struct list_head capture;
struct vb2_v4l2_buffer *active;
- struct vb2_alloc_ctx *alloc_ctx;
struct sh_mobile_ceu_info *pdata;
struct completion complete;
@@ -211,14 +210,12 @@ static int sh_mobile_ceu_soft_reset(struct sh_mobile_ceu_dev *pcdev)
*/
static int sh_mobile_ceu_videobuf_setup(struct vb2_queue *vq,
unsigned int *count, unsigned int *num_planes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
- alloc_ctxs[0] = pcdev->alloc_ctx;
-
if (!vq->num_buffers)
pcdev->sequence = 0;
@@ -1670,6 +1667,7 @@ static int sh_mobile_ceu_init_videobuf(struct vb2_queue *q,
q->buf_struct_size = sizeof(struct sh_mobile_ceu_buffer);
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &ici->host_lock;
+ q->dev = ici->v4l2_dev.dev;
return vb2_queue_init(q);
}
@@ -1822,12 +1820,6 @@ static int sh_mobile_ceu_probe(struct platform_device *pdev)
pcdev->ici.ops = &sh_mobile_ceu_host_ops;
pcdev->ici.capabilities = SOCAM_HOST_CAP_STRIDE;
- pcdev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
- if (IS_ERR(pcdev->alloc_ctx)) {
- err = PTR_ERR(pcdev->alloc_ctx);
- goto exit_free_clk;
- }
-
if (pcdev->pdata && pcdev->pdata->asd_sizes) {
struct v4l2_async_subdev **asd;
char name[] = "sh-mobile-csi2";
@@ -1872,7 +1864,7 @@ static int sh_mobile_ceu_probe(struct platform_device *pdev)
if (!csi2_pdev) {
err = -ENOMEM;
- goto exit_free_ctx;
+ goto exit_free_clk;
}
pcdev->csi2_pdev = csi2_pdev;
@@ -1955,8 +1947,6 @@ exit_pdev_put:
pcdev->csi2_pdev->resource = NULL;
platform_device_put(pcdev->csi2_pdev);
}
-exit_free_ctx:
- vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
exit_free_clk:
pm_runtime_disable(&pdev->dev);
exit_release_mem:
@@ -1976,7 +1966,6 @@ static int sh_mobile_ceu_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
if (platform_get_resource(pdev, IORESOURCE_MEM, 1))
dma_release_declared_memory(&pdev->dev);
- vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
if (csi2_pdev && csi2_pdev->dev.driver) {
struct module *csi2_drv = csi2_pdev->dev.driver->owner;
platform_device_del(csi2_pdev);
diff --git a/drivers/media/platform/sti/bdisp/bdisp-filter.h b/drivers/media/platform/sti/bdisp/bdisp-filter.h
index fc8c54f72..53e52fb41 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-filter.h
+++ b/drivers/media/platform/sti/bdisp/bdisp-filter.h
@@ -19,178 +19,6 @@ struct bdisp_filter_h_spec {
const u16 max;
const u8 coef[BDISP_HF_NB];
};
-
-static const struct bdisp_filter_h_spec bdisp_h_spec[] = {
- {
- .min = 0,
- .max = 921,
- .coef = {
- 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0xff, 0x07, 0x3d, 0xfc, 0x01, 0x00,
- 0x00, 0x01, 0xfd, 0x11, 0x36, 0xf9, 0x02, 0x00,
- 0x00, 0x01, 0xfb, 0x1b, 0x2e, 0xf9, 0x02, 0x00,
- 0x00, 0x01, 0xf9, 0x26, 0x26, 0xf9, 0x01, 0x00,
- 0x00, 0x02, 0xf9, 0x30, 0x19, 0xfb, 0x01, 0x00,
- 0x00, 0x02, 0xf9, 0x39, 0x0e, 0xfd, 0x01, 0x00,
- 0x00, 0x01, 0xfc, 0x3e, 0x06, 0xff, 0x00, 0x00
- }
- },
- {
- .min = 921,
- .max = 1024,
- .coef = {
- 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
- 0xff, 0x03, 0xfd, 0x08, 0x3e, 0xf9, 0x04, 0xfe,
- 0xfd, 0x06, 0xf8, 0x13, 0x3b, 0xf4, 0x07, 0xfc,
- 0xfb, 0x08, 0xf5, 0x1f, 0x34, 0xf1, 0x09, 0xfb,
- 0xfb, 0x09, 0xf2, 0x2b, 0x2a, 0xf1, 0x09, 0xfb,
- 0xfb, 0x09, 0xf2, 0x35, 0x1e, 0xf4, 0x08, 0xfb,
- 0xfc, 0x07, 0xf5, 0x3c, 0x12, 0xf7, 0x06, 0xfd,
- 0xfe, 0x04, 0xfa, 0x3f, 0x07, 0xfc, 0x03, 0xff
- }
- },
- {
- .min = 1024,
- .max = 1126,
- .coef = {
- 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
- 0xff, 0x03, 0xfd, 0x08, 0x3e, 0xf9, 0x04, 0xfe,
- 0xfd, 0x06, 0xf8, 0x13, 0x3b, 0xf4, 0x07, 0xfc,
- 0xfb, 0x08, 0xf5, 0x1f, 0x34, 0xf1, 0x09, 0xfb,
- 0xfb, 0x09, 0xf2, 0x2b, 0x2a, 0xf1, 0x09, 0xfb,
- 0xfb, 0x09, 0xf2, 0x35, 0x1e, 0xf4, 0x08, 0xfb,
- 0xfc, 0x07, 0xf5, 0x3c, 0x12, 0xf7, 0x06, 0xfd,
- 0xfe, 0x04, 0xfa, 0x3f, 0x07, 0xfc, 0x03, 0xff
- }
- },
- {
- .min = 1126,
- .max = 1228,
- .coef = {
- 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
- 0xff, 0x03, 0xfd, 0x08, 0x3e, 0xf9, 0x04, 0xfe,
- 0xfd, 0x06, 0xf8, 0x13, 0x3b, 0xf4, 0x07, 0xfc,
- 0xfb, 0x08, 0xf5, 0x1f, 0x34, 0xf1, 0x09, 0xfb,
- 0xfb, 0x09, 0xf2, 0x2b, 0x2a, 0xf1, 0x09, 0xfb,
- 0xfb, 0x09, 0xf2, 0x35, 0x1e, 0xf4, 0x08, 0xfb,
- 0xfc, 0x07, 0xf5, 0x3c, 0x12, 0xf7, 0x06, 0xfd,
- 0xfe, 0x04, 0xfa, 0x3f, 0x07, 0xfc, 0x03, 0xff
- }
- },
- {
- .min = 1228,
- .max = 1331,
- .coef = {
- 0xfd, 0x04, 0xfc, 0x05, 0x39, 0x05, 0xfc, 0x04,
- 0xfc, 0x06, 0xf9, 0x0c, 0x39, 0xfe, 0x00, 0x02,
- 0xfb, 0x08, 0xf6, 0x17, 0x35, 0xf9, 0x02, 0x00,
- 0xfc, 0x08, 0xf4, 0x20, 0x30, 0xf4, 0x05, 0xff,
- 0xfd, 0x07, 0xf4, 0x29, 0x28, 0xf3, 0x07, 0xfd,
- 0xff, 0x05, 0xf5, 0x31, 0x1f, 0xf3, 0x08, 0xfc,
- 0x00, 0x02, 0xf9, 0x38, 0x14, 0xf6, 0x08, 0xfb,
- 0x02, 0x00, 0xff, 0x3a, 0x0b, 0xf8, 0x06, 0xfc
- }
- },
- {
- .min = 1331,
- .max = 1433,
- .coef = {
- 0xfc, 0x06, 0xf9, 0x09, 0x34, 0x09, 0xf9, 0x06,
- 0xfd, 0x07, 0xf7, 0x10, 0x32, 0x02, 0xfc, 0x05,
- 0xfe, 0x07, 0xf6, 0x17, 0x2f, 0xfc, 0xff, 0x04,
- 0xff, 0x06, 0xf5, 0x20, 0x2a, 0xf9, 0x01, 0x02,
- 0x00, 0x04, 0xf6, 0x27, 0x25, 0xf6, 0x04, 0x00,
- 0x02, 0x01, 0xf9, 0x2d, 0x1d, 0xf5, 0x06, 0xff,
- 0x04, 0xff, 0xfd, 0x31, 0x15, 0xf5, 0x07, 0xfe,
- 0x05, 0xfc, 0x02, 0x35, 0x0d, 0xf7, 0x07, 0xfd
- }
- },
- {
- .min = 1433,
- .max = 1536,
- .coef = {
- 0xfe, 0x06, 0xf8, 0x0b, 0x30, 0x0b, 0xf8, 0x06,
- 0xff, 0x06, 0xf7, 0x12, 0x2d, 0x05, 0xfa, 0x06,
- 0x00, 0x04, 0xf6, 0x18, 0x2c, 0x00, 0xfc, 0x06,
- 0x01, 0x02, 0xf7, 0x1f, 0x27, 0xfd, 0xff, 0x04,
- 0x03, 0x00, 0xf9, 0x24, 0x24, 0xf9, 0x00, 0x03,
- 0x04, 0xff, 0xfd, 0x29, 0x1d, 0xf7, 0x02, 0x01,
- 0x06, 0xfc, 0x00, 0x2d, 0x17, 0xf6, 0x04, 0x00,
- 0x06, 0xfa, 0x05, 0x30, 0x0f, 0xf7, 0x06, 0xff
- }
- },
- {
- .min = 1536,
- .max = 2048,
- .coef = {
- 0x05, 0xfd, 0xfb, 0x13, 0x25, 0x13, 0xfb, 0xfd,
- 0x05, 0xfc, 0xfd, 0x17, 0x24, 0x0f, 0xf9, 0xff,
- 0x04, 0xfa, 0xff, 0x1b, 0x24, 0x0b, 0xf9, 0x00,
- 0x03, 0xf9, 0x01, 0x1f, 0x23, 0x08, 0xf8, 0x01,
- 0x02, 0xf9, 0x04, 0x22, 0x20, 0x04, 0xf9, 0x02,
- 0x01, 0xf8, 0x08, 0x25, 0x1d, 0x01, 0xf9, 0x03,
- 0x00, 0xf9, 0x0c, 0x25, 0x1a, 0xfe, 0xfa, 0x04,
- 0xff, 0xf9, 0x10, 0x26, 0x15, 0xfc, 0xfc, 0x05
- }
- },
- {
- .min = 2048,
- .max = 3072,
- .coef = {
- 0xfc, 0xfd, 0x06, 0x13, 0x18, 0x13, 0x06, 0xfd,
- 0xfc, 0xfe, 0x08, 0x15, 0x17, 0x12, 0x04, 0xfc,
- 0xfb, 0xfe, 0x0a, 0x16, 0x18, 0x10, 0x03, 0xfc,
- 0xfb, 0x00, 0x0b, 0x18, 0x17, 0x0f, 0x01, 0xfb,
- 0xfb, 0x00, 0x0d, 0x19, 0x17, 0x0d, 0x00, 0xfb,
- 0xfb, 0x01, 0x0f, 0x19, 0x16, 0x0b, 0x00, 0xfb,
- 0xfc, 0x03, 0x11, 0x19, 0x15, 0x09, 0xfe, 0xfb,
- 0xfc, 0x04, 0x12, 0x1a, 0x12, 0x08, 0xfe, 0xfc
- }
- },
- {
- .min = 3072,
- .max = 4096,
- .coef = {
- 0xfe, 0x02, 0x09, 0x0f, 0x0e, 0x0f, 0x09, 0x02,
- 0xff, 0x02, 0x09, 0x0f, 0x10, 0x0e, 0x08, 0x01,
- 0xff, 0x03, 0x0a, 0x10, 0x10, 0x0d, 0x07, 0x00,
- 0x00, 0x04, 0x0b, 0x10, 0x0f, 0x0c, 0x06, 0x00,
- 0x00, 0x05, 0x0c, 0x10, 0x0e, 0x0c, 0x05, 0x00,
- 0x00, 0x06, 0x0c, 0x11, 0x0e, 0x0b, 0x04, 0x00,
- 0x00, 0x07, 0x0d, 0x11, 0x0f, 0x0a, 0x03, 0xff,
- 0x01, 0x08, 0x0e, 0x11, 0x0e, 0x09, 0x02, 0xff
- }
- },
- {
- .min = 4096,
- .max = 5120,
- .coef = {
- 0x00, 0x04, 0x09, 0x0c, 0x0e, 0x0c, 0x09, 0x04,
- 0x01, 0x05, 0x09, 0x0c, 0x0d, 0x0c, 0x08, 0x04,
- 0x01, 0x05, 0x0a, 0x0c, 0x0e, 0x0b, 0x08, 0x03,
- 0x02, 0x06, 0x0a, 0x0d, 0x0c, 0x0b, 0x07, 0x03,
- 0x02, 0x07, 0x0a, 0x0d, 0x0d, 0x0a, 0x07, 0x02,
- 0x03, 0x07, 0x0b, 0x0d, 0x0c, 0x0a, 0x06, 0x02,
- 0x03, 0x08, 0x0b, 0x0d, 0x0d, 0x0a, 0x05, 0x01,
- 0x04, 0x08, 0x0c, 0x0d, 0x0c, 0x09, 0x05, 0x01
- }
- },
- {
- .min = 5120,
- .max = 65535,
- .coef = {
- 0x03, 0x06, 0x09, 0x0b, 0x09, 0x0b, 0x09, 0x06,
- 0x03, 0x06, 0x09, 0x0b, 0x0c, 0x0a, 0x08, 0x05,
- 0x03, 0x06, 0x09, 0x0b, 0x0c, 0x0a, 0x08, 0x05,
- 0x04, 0x07, 0x09, 0x0b, 0x0b, 0x0a, 0x08, 0x04,
- 0x04, 0x07, 0x0a, 0x0b, 0x0b, 0x0a, 0x07, 0x04,
- 0x04, 0x08, 0x0a, 0x0b, 0x0b, 0x09, 0x07, 0x04,
- 0x05, 0x08, 0x0a, 0x0b, 0x0c, 0x09, 0x06, 0x03,
- 0x05, 0x08, 0x0a, 0x0b, 0x0c, 0x09, 0x06, 0x03
- }
- }
-};
-
/**
* struct bdisp_filter_v_spec - Vertical filter specification
*
@@ -204,138 +32,6 @@ struct bdisp_filter_v_spec {
const u8 coef[BDISP_VF_NB];
};
-static const struct bdisp_filter_v_spec bdisp_v_spec[] = {
- {
- .min = 0,
- .max = 1024,
- .coef = {
- 0x00, 0x00, 0x40, 0x00, 0x00,
- 0x00, 0x06, 0x3d, 0xfd, 0x00,
- 0xfe, 0x0f, 0x38, 0xfb, 0x00,
- 0xfd, 0x19, 0x2f, 0xfb, 0x00,
- 0xfc, 0x24, 0x24, 0xfc, 0x00,
- 0xfb, 0x2f, 0x19, 0xfd, 0x00,
- 0xfb, 0x38, 0x0f, 0xfe, 0x00,
- 0xfd, 0x3d, 0x06, 0x00, 0x00
- }
- },
- {
- .min = 1024,
- .max = 1331,
- .coef = {
- 0xfc, 0x05, 0x3e, 0x05, 0xfc,
- 0xf8, 0x0e, 0x3b, 0xff, 0x00,
- 0xf5, 0x18, 0x38, 0xf9, 0x02,
- 0xf4, 0x21, 0x31, 0xf5, 0x05,
- 0xf4, 0x2a, 0x27, 0xf4, 0x07,
- 0xf6, 0x30, 0x1e, 0xf4, 0x08,
- 0xf9, 0x35, 0x15, 0xf6, 0x07,
- 0xff, 0x37, 0x0b, 0xf9, 0x06
- }
- },
- {
- .min = 1331,
- .max = 1433,
- .coef = {
- 0xf8, 0x0a, 0x3c, 0x0a, 0xf8,
- 0xf6, 0x12, 0x3b, 0x02, 0xfb,
- 0xf4, 0x1b, 0x35, 0xfd, 0xff,
- 0xf4, 0x23, 0x30, 0xf8, 0x01,
- 0xf6, 0x29, 0x27, 0xf6, 0x04,
- 0xf9, 0x2e, 0x1e, 0xf5, 0x06,
- 0xfd, 0x31, 0x16, 0xf6, 0x06,
- 0x02, 0x32, 0x0d, 0xf8, 0x07
- }
- },
- {
- .min = 1433,
- .max = 1536,
- .coef = {
- 0xf6, 0x0e, 0x38, 0x0e, 0xf6,
- 0xf5, 0x15, 0x38, 0x06, 0xf8,
- 0xf5, 0x1d, 0x33, 0x00, 0xfb,
- 0xf6, 0x23, 0x2d, 0xfc, 0xfe,
- 0xf9, 0x28, 0x26, 0xf9, 0x00,
- 0xfc, 0x2c, 0x1e, 0xf7, 0x03,
- 0x00, 0x2e, 0x18, 0xf6, 0x04,
- 0x05, 0x2e, 0x11, 0xf7, 0x05
- }
- },
- {
- .min = 1536,
- .max = 2048,
- .coef = {
- 0xfb, 0x13, 0x24, 0x13, 0xfb,
- 0xfd, 0x17, 0x23, 0x0f, 0xfa,
- 0xff, 0x1a, 0x23, 0x0b, 0xf9,
- 0x01, 0x1d, 0x22, 0x07, 0xf9,
- 0x04, 0x20, 0x1f, 0x04, 0xf9,
- 0x07, 0x22, 0x1c, 0x01, 0xfa,
- 0x0b, 0x24, 0x17, 0xff, 0xfb,
- 0x0f, 0x24, 0x14, 0xfd, 0xfc
- }
- },
- {
- .min = 2048,
- .max = 3072,
- .coef = {
- 0x05, 0x10, 0x16, 0x10, 0x05,
- 0x06, 0x11, 0x16, 0x0f, 0x04,
- 0x08, 0x13, 0x15, 0x0e, 0x02,
- 0x09, 0x14, 0x16, 0x0c, 0x01,
- 0x0b, 0x15, 0x15, 0x0b, 0x00,
- 0x0d, 0x16, 0x13, 0x0a, 0x00,
- 0x0f, 0x17, 0x13, 0x08, 0xff,
- 0x11, 0x18, 0x12, 0x07, 0xfe
- }
- },
- {
- .min = 3072,
- .max = 4096,
- .coef = {
- 0x09, 0x0f, 0x10, 0x0f, 0x09,
- 0x09, 0x0f, 0x12, 0x0e, 0x08,
- 0x0a, 0x10, 0x11, 0x0e, 0x07,
- 0x0b, 0x11, 0x11, 0x0d, 0x06,
- 0x0c, 0x11, 0x12, 0x0c, 0x05,
- 0x0d, 0x12, 0x11, 0x0c, 0x04,
- 0x0e, 0x12, 0x11, 0x0b, 0x04,
- 0x0f, 0x13, 0x11, 0x0a, 0x03
- }
- },
- {
- .min = 4096,
- .max = 5120,
- .coef = {
- 0x0a, 0x0e, 0x10, 0x0e, 0x0a,
- 0x0b, 0x0e, 0x0f, 0x0e, 0x0a,
- 0x0b, 0x0f, 0x10, 0x0d, 0x09,
- 0x0c, 0x0f, 0x10, 0x0d, 0x08,
- 0x0d, 0x0f, 0x0f, 0x0d, 0x08,
- 0x0d, 0x10, 0x10, 0x0c, 0x07,
- 0x0e, 0x10, 0x0f, 0x0c, 0x07,
- 0x0f, 0x10, 0x10, 0x0b, 0x06
- }
- },
- {
- .min = 5120,
- .max = 65535,
- .coef = {
- 0x0b, 0x0e, 0x0e, 0x0e, 0x0b,
- 0x0b, 0x0e, 0x0f, 0x0d, 0x0b,
- 0x0c, 0x0e, 0x0f, 0x0d, 0x0a,
- 0x0c, 0x0e, 0x0f, 0x0d, 0x0a,
- 0x0d, 0x0f, 0x0e, 0x0d, 0x09,
- 0x0d, 0x0f, 0x0f, 0x0c, 0x09,
- 0x0e, 0x0f, 0x0e, 0x0c, 0x09,
- 0x0e, 0x0f, 0x0f, 0x0c, 0x08
- }
- }
-};
-
-#define NB_H_FILTER ARRAY_SIZE(bdisp_h_spec)
-#define NB_V_FILTER ARRAY_SIZE(bdisp_v_spec)
-
/* RGB YUV 601 standard conversion */
static const u32 bdisp_rgb_to_yuv[] = {
0x0e1e8bee, 0x08420419, 0xfb5ed471, 0x08004080,
diff --git a/drivers/media/platform/sti/bdisp/bdisp-hw.c b/drivers/media/platform/sti/bdisp/bdisp-hw.c
index 052c932ac..b7892f3ef 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-hw.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-hw.c
@@ -47,6 +47,311 @@ struct bdisp_filter_addr {
dma_addr_t paddr; /* Physical address for filter table */
};
+static const struct bdisp_filter_h_spec bdisp_h_spec[] = {
+ {
+ .min = 0,
+ .max = 921,
+ .coef = {
+ 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xff, 0x07, 0x3d, 0xfc, 0x01, 0x00,
+ 0x00, 0x01, 0xfd, 0x11, 0x36, 0xf9, 0x02, 0x00,
+ 0x00, 0x01, 0xfb, 0x1b, 0x2e, 0xf9, 0x02, 0x00,
+ 0x00, 0x01, 0xf9, 0x26, 0x26, 0xf9, 0x01, 0x00,
+ 0x00, 0x02, 0xf9, 0x30, 0x19, 0xfb, 0x01, 0x00,
+ 0x00, 0x02, 0xf9, 0x39, 0x0e, 0xfd, 0x01, 0x00,
+ 0x00, 0x01, 0xfc, 0x3e, 0x06, 0xff, 0x00, 0x00
+ }
+ },
+ {
+ .min = 921,
+ .max = 1024,
+ .coef = {
+ 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
+ 0xff, 0x03, 0xfd, 0x08, 0x3e, 0xf9, 0x04, 0xfe,
+ 0xfd, 0x06, 0xf8, 0x13, 0x3b, 0xf4, 0x07, 0xfc,
+ 0xfb, 0x08, 0xf5, 0x1f, 0x34, 0xf1, 0x09, 0xfb,
+ 0xfb, 0x09, 0xf2, 0x2b, 0x2a, 0xf1, 0x09, 0xfb,
+ 0xfb, 0x09, 0xf2, 0x35, 0x1e, 0xf4, 0x08, 0xfb,
+ 0xfc, 0x07, 0xf5, 0x3c, 0x12, 0xf7, 0x06, 0xfd,
+ 0xfe, 0x04, 0xfa, 0x3f, 0x07, 0xfc, 0x03, 0xff
+ }
+ },
+ {
+ .min = 1024,
+ .max = 1126,
+ .coef = {
+ 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
+ 0xff, 0x03, 0xfd, 0x08, 0x3e, 0xf9, 0x04, 0xfe,
+ 0xfd, 0x06, 0xf8, 0x13, 0x3b, 0xf4, 0x07, 0xfc,
+ 0xfb, 0x08, 0xf5, 0x1f, 0x34, 0xf1, 0x09, 0xfb,
+ 0xfb, 0x09, 0xf2, 0x2b, 0x2a, 0xf1, 0x09, 0xfb,
+ 0xfb, 0x09, 0xf2, 0x35, 0x1e, 0xf4, 0x08, 0xfb,
+ 0xfc, 0x07, 0xf5, 0x3c, 0x12, 0xf7, 0x06, 0xfd,
+ 0xfe, 0x04, 0xfa, 0x3f, 0x07, 0xfc, 0x03, 0xff
+ }
+ },
+ {
+ .min = 1126,
+ .max = 1228,
+ .coef = {
+ 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
+ 0xff, 0x03, 0xfd, 0x08, 0x3e, 0xf9, 0x04, 0xfe,
+ 0xfd, 0x06, 0xf8, 0x13, 0x3b, 0xf4, 0x07, 0xfc,
+ 0xfb, 0x08, 0xf5, 0x1f, 0x34, 0xf1, 0x09, 0xfb,
+ 0xfb, 0x09, 0xf2, 0x2b, 0x2a, 0xf1, 0x09, 0xfb,
+ 0xfb, 0x09, 0xf2, 0x35, 0x1e, 0xf4, 0x08, 0xfb,
+ 0xfc, 0x07, 0xf5, 0x3c, 0x12, 0xf7, 0x06, 0xfd,
+ 0xfe, 0x04, 0xfa, 0x3f, 0x07, 0xfc, 0x03, 0xff
+ }
+ },
+ {
+ .min = 1228,
+ .max = 1331,
+ .coef = {
+ 0xfd, 0x04, 0xfc, 0x05, 0x39, 0x05, 0xfc, 0x04,
+ 0xfc, 0x06, 0xf9, 0x0c, 0x39, 0xfe, 0x00, 0x02,
+ 0xfb, 0x08, 0xf6, 0x17, 0x35, 0xf9, 0x02, 0x00,
+ 0xfc, 0x08, 0xf4, 0x20, 0x30, 0xf4, 0x05, 0xff,
+ 0xfd, 0x07, 0xf4, 0x29, 0x28, 0xf3, 0x07, 0xfd,
+ 0xff, 0x05, 0xf5, 0x31, 0x1f, 0xf3, 0x08, 0xfc,
+ 0x00, 0x02, 0xf9, 0x38, 0x14, 0xf6, 0x08, 0xfb,
+ 0x02, 0x00, 0xff, 0x3a, 0x0b, 0xf8, 0x06, 0xfc
+ }
+ },
+ {
+ .min = 1331,
+ .max = 1433,
+ .coef = {
+ 0xfc, 0x06, 0xf9, 0x09, 0x34, 0x09, 0xf9, 0x06,
+ 0xfd, 0x07, 0xf7, 0x10, 0x32, 0x02, 0xfc, 0x05,
+ 0xfe, 0x07, 0xf6, 0x17, 0x2f, 0xfc, 0xff, 0x04,
+ 0xff, 0x06, 0xf5, 0x20, 0x2a, 0xf9, 0x01, 0x02,
+ 0x00, 0x04, 0xf6, 0x27, 0x25, 0xf6, 0x04, 0x00,
+ 0x02, 0x01, 0xf9, 0x2d, 0x1d, 0xf5, 0x06, 0xff,
+ 0x04, 0xff, 0xfd, 0x31, 0x15, 0xf5, 0x07, 0xfe,
+ 0x05, 0xfc, 0x02, 0x35, 0x0d, 0xf7, 0x07, 0xfd
+ }
+ },
+ {
+ .min = 1433,
+ .max = 1536,
+ .coef = {
+ 0xfe, 0x06, 0xf8, 0x0b, 0x30, 0x0b, 0xf8, 0x06,
+ 0xff, 0x06, 0xf7, 0x12, 0x2d, 0x05, 0xfa, 0x06,
+ 0x00, 0x04, 0xf6, 0x18, 0x2c, 0x00, 0xfc, 0x06,
+ 0x01, 0x02, 0xf7, 0x1f, 0x27, 0xfd, 0xff, 0x04,
+ 0x03, 0x00, 0xf9, 0x24, 0x24, 0xf9, 0x00, 0x03,
+ 0x04, 0xff, 0xfd, 0x29, 0x1d, 0xf7, 0x02, 0x01,
+ 0x06, 0xfc, 0x00, 0x2d, 0x17, 0xf6, 0x04, 0x00,
+ 0x06, 0xfa, 0x05, 0x30, 0x0f, 0xf7, 0x06, 0xff
+ }
+ },
+ {
+ .min = 1536,
+ .max = 2048,
+ .coef = {
+ 0x05, 0xfd, 0xfb, 0x13, 0x25, 0x13, 0xfb, 0xfd,
+ 0x05, 0xfc, 0xfd, 0x17, 0x24, 0x0f, 0xf9, 0xff,
+ 0x04, 0xfa, 0xff, 0x1b, 0x24, 0x0b, 0xf9, 0x00,
+ 0x03, 0xf9, 0x01, 0x1f, 0x23, 0x08, 0xf8, 0x01,
+ 0x02, 0xf9, 0x04, 0x22, 0x20, 0x04, 0xf9, 0x02,
+ 0x01, 0xf8, 0x08, 0x25, 0x1d, 0x01, 0xf9, 0x03,
+ 0x00, 0xf9, 0x0c, 0x25, 0x1a, 0xfe, 0xfa, 0x04,
+ 0xff, 0xf9, 0x10, 0x26, 0x15, 0xfc, 0xfc, 0x05
+ }
+ },
+ {
+ .min = 2048,
+ .max = 3072,
+ .coef = {
+ 0xfc, 0xfd, 0x06, 0x13, 0x18, 0x13, 0x06, 0xfd,
+ 0xfc, 0xfe, 0x08, 0x15, 0x17, 0x12, 0x04, 0xfc,
+ 0xfb, 0xfe, 0x0a, 0x16, 0x18, 0x10, 0x03, 0xfc,
+ 0xfb, 0x00, 0x0b, 0x18, 0x17, 0x0f, 0x01, 0xfb,
+ 0xfb, 0x00, 0x0d, 0x19, 0x17, 0x0d, 0x00, 0xfb,
+ 0xfb, 0x01, 0x0f, 0x19, 0x16, 0x0b, 0x00, 0xfb,
+ 0xfc, 0x03, 0x11, 0x19, 0x15, 0x09, 0xfe, 0xfb,
+ 0xfc, 0x04, 0x12, 0x1a, 0x12, 0x08, 0xfe, 0xfc
+ }
+ },
+ {
+ .min = 3072,
+ .max = 4096,
+ .coef = {
+ 0xfe, 0x02, 0x09, 0x0f, 0x0e, 0x0f, 0x09, 0x02,
+ 0xff, 0x02, 0x09, 0x0f, 0x10, 0x0e, 0x08, 0x01,
+ 0xff, 0x03, 0x0a, 0x10, 0x10, 0x0d, 0x07, 0x00,
+ 0x00, 0x04, 0x0b, 0x10, 0x0f, 0x0c, 0x06, 0x00,
+ 0x00, 0x05, 0x0c, 0x10, 0x0e, 0x0c, 0x05, 0x00,
+ 0x00, 0x06, 0x0c, 0x11, 0x0e, 0x0b, 0x04, 0x00,
+ 0x00, 0x07, 0x0d, 0x11, 0x0f, 0x0a, 0x03, 0xff,
+ 0x01, 0x08, 0x0e, 0x11, 0x0e, 0x09, 0x02, 0xff
+ }
+ },
+ {
+ .min = 4096,
+ .max = 5120,
+ .coef = {
+ 0x00, 0x04, 0x09, 0x0c, 0x0e, 0x0c, 0x09, 0x04,
+ 0x01, 0x05, 0x09, 0x0c, 0x0d, 0x0c, 0x08, 0x04,
+ 0x01, 0x05, 0x0a, 0x0c, 0x0e, 0x0b, 0x08, 0x03,
+ 0x02, 0x06, 0x0a, 0x0d, 0x0c, 0x0b, 0x07, 0x03,
+ 0x02, 0x07, 0x0a, 0x0d, 0x0d, 0x0a, 0x07, 0x02,
+ 0x03, 0x07, 0x0b, 0x0d, 0x0c, 0x0a, 0x06, 0x02,
+ 0x03, 0x08, 0x0b, 0x0d, 0x0d, 0x0a, 0x05, 0x01,
+ 0x04, 0x08, 0x0c, 0x0d, 0x0c, 0x09, 0x05, 0x01
+ }
+ },
+ {
+ .min = 5120,
+ .max = 65535,
+ .coef = {
+ 0x03, 0x06, 0x09, 0x0b, 0x09, 0x0b, 0x09, 0x06,
+ 0x03, 0x06, 0x09, 0x0b, 0x0c, 0x0a, 0x08, 0x05,
+ 0x03, 0x06, 0x09, 0x0b, 0x0c, 0x0a, 0x08, 0x05,
+ 0x04, 0x07, 0x09, 0x0b, 0x0b, 0x0a, 0x08, 0x04,
+ 0x04, 0x07, 0x0a, 0x0b, 0x0b, 0x0a, 0x07, 0x04,
+ 0x04, 0x08, 0x0a, 0x0b, 0x0b, 0x09, 0x07, 0x04,
+ 0x05, 0x08, 0x0a, 0x0b, 0x0c, 0x09, 0x06, 0x03,
+ 0x05, 0x08, 0x0a, 0x0b, 0x0c, 0x09, 0x06, 0x03
+ }
+ }
+};
+
+#define NB_H_FILTER ARRAY_SIZE(bdisp_h_spec)
+
+
+static const struct bdisp_filter_v_spec bdisp_v_spec[] = {
+ {
+ .min = 0,
+ .max = 1024,
+ .coef = {
+ 0x00, 0x00, 0x40, 0x00, 0x00,
+ 0x00, 0x06, 0x3d, 0xfd, 0x00,
+ 0xfe, 0x0f, 0x38, 0xfb, 0x00,
+ 0xfd, 0x19, 0x2f, 0xfb, 0x00,
+ 0xfc, 0x24, 0x24, 0xfc, 0x00,
+ 0xfb, 0x2f, 0x19, 0xfd, 0x00,
+ 0xfb, 0x38, 0x0f, 0xfe, 0x00,
+ 0xfd, 0x3d, 0x06, 0x00, 0x00
+ }
+ },
+ {
+ .min = 1024,
+ .max = 1331,
+ .coef = {
+ 0xfc, 0x05, 0x3e, 0x05, 0xfc,
+ 0xf8, 0x0e, 0x3b, 0xff, 0x00,
+ 0xf5, 0x18, 0x38, 0xf9, 0x02,
+ 0xf4, 0x21, 0x31, 0xf5, 0x05,
+ 0xf4, 0x2a, 0x27, 0xf4, 0x07,
+ 0xf6, 0x30, 0x1e, 0xf4, 0x08,
+ 0xf9, 0x35, 0x15, 0xf6, 0x07,
+ 0xff, 0x37, 0x0b, 0xf9, 0x06
+ }
+ },
+ {
+ .min = 1331,
+ .max = 1433,
+ .coef = {
+ 0xf8, 0x0a, 0x3c, 0x0a, 0xf8,
+ 0xf6, 0x12, 0x3b, 0x02, 0xfb,
+ 0xf4, 0x1b, 0x35, 0xfd, 0xff,
+ 0xf4, 0x23, 0x30, 0xf8, 0x01,
+ 0xf6, 0x29, 0x27, 0xf6, 0x04,
+ 0xf9, 0x2e, 0x1e, 0xf5, 0x06,
+ 0xfd, 0x31, 0x16, 0xf6, 0x06,
+ 0x02, 0x32, 0x0d, 0xf8, 0x07
+ }
+ },
+ {
+ .min = 1433,
+ .max = 1536,
+ .coef = {
+ 0xf6, 0x0e, 0x38, 0x0e, 0xf6,
+ 0xf5, 0x15, 0x38, 0x06, 0xf8,
+ 0xf5, 0x1d, 0x33, 0x00, 0xfb,
+ 0xf6, 0x23, 0x2d, 0xfc, 0xfe,
+ 0xf9, 0x28, 0x26, 0xf9, 0x00,
+ 0xfc, 0x2c, 0x1e, 0xf7, 0x03,
+ 0x00, 0x2e, 0x18, 0xf6, 0x04,
+ 0x05, 0x2e, 0x11, 0xf7, 0x05
+ }
+ },
+ {
+ .min = 1536,
+ .max = 2048,
+ .coef = {
+ 0xfb, 0x13, 0x24, 0x13, 0xfb,
+ 0xfd, 0x17, 0x23, 0x0f, 0xfa,
+ 0xff, 0x1a, 0x23, 0x0b, 0xf9,
+ 0x01, 0x1d, 0x22, 0x07, 0xf9,
+ 0x04, 0x20, 0x1f, 0x04, 0xf9,
+ 0x07, 0x22, 0x1c, 0x01, 0xfa,
+ 0x0b, 0x24, 0x17, 0xff, 0xfb,
+ 0x0f, 0x24, 0x14, 0xfd, 0xfc
+ }
+ },
+ {
+ .min = 2048,
+ .max = 3072,
+ .coef = {
+ 0x05, 0x10, 0x16, 0x10, 0x05,
+ 0x06, 0x11, 0x16, 0x0f, 0x04,
+ 0x08, 0x13, 0x15, 0x0e, 0x02,
+ 0x09, 0x14, 0x16, 0x0c, 0x01,
+ 0x0b, 0x15, 0x15, 0x0b, 0x00,
+ 0x0d, 0x16, 0x13, 0x0a, 0x00,
+ 0x0f, 0x17, 0x13, 0x08, 0xff,
+ 0x11, 0x18, 0x12, 0x07, 0xfe
+ }
+ },
+ {
+ .min = 3072,
+ .max = 4096,
+ .coef = {
+ 0x09, 0x0f, 0x10, 0x0f, 0x09,
+ 0x09, 0x0f, 0x12, 0x0e, 0x08,
+ 0x0a, 0x10, 0x11, 0x0e, 0x07,
+ 0x0b, 0x11, 0x11, 0x0d, 0x06,
+ 0x0c, 0x11, 0x12, 0x0c, 0x05,
+ 0x0d, 0x12, 0x11, 0x0c, 0x04,
+ 0x0e, 0x12, 0x11, 0x0b, 0x04,
+ 0x0f, 0x13, 0x11, 0x0a, 0x03
+ }
+ },
+ {
+ .min = 4096,
+ .max = 5120,
+ .coef = {
+ 0x0a, 0x0e, 0x10, 0x0e, 0x0a,
+ 0x0b, 0x0e, 0x0f, 0x0e, 0x0a,
+ 0x0b, 0x0f, 0x10, 0x0d, 0x09,
+ 0x0c, 0x0f, 0x10, 0x0d, 0x08,
+ 0x0d, 0x0f, 0x0f, 0x0d, 0x08,
+ 0x0d, 0x10, 0x10, 0x0c, 0x07,
+ 0x0e, 0x10, 0x0f, 0x0c, 0x07,
+ 0x0f, 0x10, 0x10, 0x0b, 0x06
+ }
+ },
+ {
+ .min = 5120,
+ .max = 65535,
+ .coef = {
+ 0x0b, 0x0e, 0x0e, 0x0e, 0x0b,
+ 0x0b, 0x0e, 0x0f, 0x0d, 0x0b,
+ 0x0c, 0x0e, 0x0f, 0x0d, 0x0a,
+ 0x0c, 0x0e, 0x0f, 0x0d, 0x0a,
+ 0x0d, 0x0f, 0x0e, 0x0d, 0x09,
+ 0x0d, 0x0f, 0x0f, 0x0c, 0x09,
+ 0x0e, 0x0f, 0x0e, 0x0c, 0x09,
+ 0x0e, 0x0f, 0x0f, 0x0c, 0x08
+ }
+ }
+};
+
+#define NB_V_FILTER ARRAY_SIZE(bdisp_v_spec)
+
static struct bdisp_filter_addr bdisp_h_filter[NB_H_FILTER];
static struct bdisp_filter_addr bdisp_v_filter[NB_V_FILTER];
@@ -125,14 +430,11 @@ int bdisp_hw_get_and_clear_irq(struct bdisp_dev *bdisp)
*/
void bdisp_hw_free_nodes(struct bdisp_ctx *ctx)
{
- if (ctx && ctx->node[0]) {
- DEFINE_DMA_ATTRS(attrs);
-
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ if (ctx && ctx->node[0])
dma_free_attrs(ctx->bdisp_dev->dev,
sizeof(struct bdisp_node) * MAX_NB_NODE,
- ctx->node[0], ctx->node_paddr[0], &attrs);
- }
+ ctx->node[0], ctx->node_paddr[0],
+ DMA_ATTR_WRITE_COMBINE);
}
/**
@@ -150,12 +452,10 @@ int bdisp_hw_alloc_nodes(struct bdisp_ctx *ctx)
unsigned int i, node_size = sizeof(struct bdisp_node);
void *base;
dma_addr_t paddr;
- DEFINE_DMA_ATTRS(attrs);
/* Allocate all the nodes within a single memory page */
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
base = dma_alloc_attrs(dev, node_size * MAX_NB_NODE, &paddr,
- GFP_KERNEL | GFP_DMA, &attrs);
+ GFP_KERNEL | GFP_DMA, DMA_ATTR_WRITE_COMBINE);
if (!base) {
dev_err(dev, "%s no mem\n", __func__);
return -ENOMEM;
@@ -188,13 +488,9 @@ void bdisp_hw_free_filters(struct device *dev)
{
int size = (BDISP_HF_NB * NB_H_FILTER) + (BDISP_VF_NB * NB_V_FILTER);
- if (bdisp_h_filter[0].virt) {
- DEFINE_DMA_ATTRS(attrs);
-
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ if (bdisp_h_filter[0].virt)
dma_free_attrs(dev, size, bdisp_h_filter[0].virt,
- bdisp_h_filter[0].paddr, &attrs);
- }
+ bdisp_h_filter[0].paddr, DMA_ATTR_WRITE_COMBINE);
}
/**
@@ -211,12 +507,11 @@ int bdisp_hw_alloc_filters(struct device *dev)
unsigned int i, size;
void *base;
dma_addr_t paddr;
- DEFINE_DMA_ATTRS(attrs);
/* Allocate all the filters within a single memory page */
size = (BDISP_HF_NB * NB_H_FILTER) + (BDISP_VF_NB * NB_V_FILTER);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
- base = dma_alloc_attrs(dev, size, &paddr, GFP_KERNEL | GFP_DMA, &attrs);
+ base = dma_alloc_attrs(dev, size, &paddr, GFP_KERNEL | GFP_DMA,
+ DMA_ATTR_WRITE_COMBINE);
if (!base)
return -ENOMEM;
diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
index d12a419c0..3b1ac687d 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
@@ -439,7 +439,7 @@ static void bdisp_ctrls_delete(struct bdisp_ctx *ctx)
static int bdisp_queue_setup(struct vb2_queue *vq,
unsigned int *nb_buf, unsigned int *nb_planes,
- unsigned int sizes[], void *allocators[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct bdisp_ctx *ctx = vb2_get_drv_priv(vq);
struct bdisp_frame *frame = ctx_get_frame(ctx, vq->type);
@@ -453,7 +453,6 @@ static int bdisp_queue_setup(struct vb2_queue *vq,
dev_err(ctx->bdisp_dev->dev, "Invalid format\n");
return -EINVAL;
}
- allocators[0] = ctx->bdisp_dev->alloc_ctx;
if (*nb_planes)
return sizes[0] < frame->sizeimage ? -EINVAL : 0;
@@ -553,6 +552,7 @@ static int queue_init(void *priv,
src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
src_vq->lock = &ctx->bdisp_dev->lock;
+ src_vq->dev = ctx->bdisp_dev->v4l2_dev.dev;
ret = vb2_queue_init(src_vq);
if (ret)
@@ -567,6 +567,7 @@ static int queue_init(void *priv,
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
dst_vq->lock = &ctx->bdisp_dev->lock;
+ dst_vq->dev = ctx->bdisp_dev->v4l2_dev.dev;
return vb2_queue_init(dst_vq);
}
@@ -1269,8 +1270,6 @@ static int bdisp_remove(struct platform_device *pdev)
bdisp_hw_free_filters(bdisp->dev);
- vb2_dma_contig_cleanup_ctx(bdisp->alloc_ctx);
-
pm_runtime_disable(&pdev->dev);
bdisp_debugfs_remove(bdisp);
@@ -1371,18 +1370,11 @@ static int bdisp_probe(struct platform_device *pdev)
goto err_dbg;
}
- /* Continuous memory allocator */
- bdisp->alloc_ctx = vb2_dma_contig_init_ctx(dev);
- if (IS_ERR(bdisp->alloc_ctx)) {
- ret = PTR_ERR(bdisp->alloc_ctx);
- goto err_pm;
- }
-
/* Filters */
if (bdisp_hw_alloc_filters(bdisp->dev)) {
dev_err(bdisp->dev, "no memory for filters\n");
ret = -ENOMEM;
- goto err_vb2_dma;
+ goto err_pm;
}
/* Register */
@@ -1401,8 +1393,6 @@ static int bdisp_probe(struct platform_device *pdev)
err_filter:
bdisp_hw_free_filters(bdisp->dev);
-err_vb2_dma:
- vb2_dma_contig_cleanup_ctx(bdisp->alloc_ctx);
err_pm:
pm_runtime_put(dev);
err_dbg:
diff --git a/drivers/media/platform/sti/bdisp/bdisp.h b/drivers/media/platform/sti/bdisp/bdisp.h
index 0cf985772..b3fbf9902 100644
--- a/drivers/media/platform/sti/bdisp/bdisp.h
+++ b/drivers/media/platform/sti/bdisp/bdisp.h
@@ -175,7 +175,6 @@ struct bdisp_dbg {
* @id: device index
* @m2m: memory-to-memory V4L2 device information
* @state: flags used to synchronize m2m and capture mode operation
- * @alloc_ctx: videobuf2 memory allocator context
* @clock: IP clock
* @regs: registers
* @irq_queue: interrupt handler waitqueue
@@ -193,7 +192,6 @@ struct bdisp_dev {
u16 id;
struct bdisp_m2m_device m2m;
unsigned long state;
- struct vb2_alloc_ctx *alloc_ctx;
struct clk *clock;
void __iomem *regs;
wait_queue_head_t irq_queue;
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
index ca0082b5e..cb0c05055 100644
--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
@@ -1161,6 +1161,7 @@ static int load_c8sectpfe_fw(struct c8sectpfei *fei)
if (err) {
dev_err(fei->dev, "c8sectpfe_elf_sanity_check failed err=(%d)\n"
, err);
+ release_firmware(fw);
return err;
}
diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c
index 82001e6b5..e967fcfdc 100644
--- a/drivers/media/platform/ti-vpe/cal.c
+++ b/drivers/media/platform/ti-vpe/cal.c
@@ -287,7 +287,6 @@ struct cal_ctx {
/* Several counters */
unsigned long jiffies;
- struct vb2_alloc_ctx *alloc_ctx;
struct cal_dmaqueue vidq;
/* Input Number */
@@ -1226,14 +1225,13 @@ static int cal_enum_frameintervals(struct file *file, void *priv,
*/
static int cal_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct cal_ctx *ctx = vb2_get_drv_priv(vq);
unsigned size = ctx->v_fmt.fmt.pix.sizeimage;
if (vq->num_buffers + *nbuffers < 3)
*nbuffers = 3 - vq->num_buffers;
- alloc_ctxs[0] = ctx->alloc_ctx;
if (*nplanes) {
if (sizes[0] < size)
@@ -1551,6 +1549,7 @@ static int cal_complete_ctx(struct cal_ctx *ctx)
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &ctx->mutex;
q->min_buffers_needed = 3;
+ q->dev = ctx->v4l2_dev.dev;
ret = vb2_queue_init(q);
if (ret)
@@ -1578,18 +1577,7 @@ static int cal_complete_ctx(struct cal_ctx *ctx)
v4l2_info(&ctx->v4l2_dev, "V4L2 device registered as %s\n",
video_device_node_name(vfd));
- ctx->alloc_ctx = vb2_dma_contig_init_ctx(vfd->v4l2_dev->dev);
- if (IS_ERR(ctx->alloc_ctx)) {
- ctx_err(ctx, "Failed to alloc vb2 context\n");
- ret = PTR_ERR(ctx->alloc_ctx);
- goto vdev_unreg;
- }
-
return 0;
-
-vdev_unreg:
- video_unregister_device(vfd);
- return ret;
}
static struct device_node *
@@ -1914,7 +1902,6 @@ static int cal_remove(struct platform_device *pdev)
video_device_node_name(&ctx->vdev));
camerarx_phy_disable(ctx);
v4l2_async_notifier_unregister(&ctx->notifier);
- vb2_dma_contig_cleanup_ctx(ctx->alloc_ctx);
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
v4l2_device_unregister(&ctx->v4l2_dev);
video_unregister_device(&ctx->vdev);
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
index 1fa00c2cf..55a1458ac 100644
--- a/drivers/media/platform/ti-vpe/vpe.c
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -362,7 +362,6 @@ struct vpe_dev {
void __iomem *base;
struct resource *res;
- struct vb2_alloc_ctx *alloc_ctx;
struct vpdma_data *vpdma; /* vpdma data handle */
struct sc_data *sc; /* scaler data handle */
struct csc_data *csc; /* csc data handle */
@@ -1797,7 +1796,7 @@ static const struct v4l2_ioctl_ops vpe_ioctl_ops = {
*/
static int vpe_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
int i;
struct vpe_ctx *ctx = vb2_get_drv_priv(vq);
@@ -1807,10 +1806,8 @@ static int vpe_queue_setup(struct vb2_queue *vq,
*nplanes = q_data->fmt->coplanar ? 2 : 1;
- for (i = 0; i < *nplanes; i++) {
+ for (i = 0; i < *nplanes; i++)
sizes[i] = q_data->sizeimage[i];
- alloc_ctxs[i] = ctx->dev->alloc_ctx;
- }
vpe_dbg(ctx->dev, "get %d buffer(s) of size %d", *nbuffers,
sizes[VPE_LUMA]);
@@ -1907,6 +1904,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
src_vq->lock = &dev->dev_mutex;
+ src_vq->dev = dev->v4l2_dev.dev;
ret = vb2_queue_init(src_vq);
if (ret)
@@ -1921,6 +1919,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
dst_vq->lock = &dev->dev_mutex;
+ dst_vq->dev = dev->v4l2_dev.dev;
return vb2_queue_init(dst_vq);
}
@@ -2161,7 +2160,6 @@ static void vpe_fw_cb(struct platform_device *pdev)
vpe_runtime_put(pdev);
pm_runtime_disable(&pdev->dev);
v4l2_m2m_release(dev->m2m_dev);
- vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
v4l2_device_unregister(&dev->v4l2_dev);
return;
@@ -2213,18 +2211,11 @@ static int vpe_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
- dev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
- if (IS_ERR(dev->alloc_ctx)) {
- vpe_err(dev, "Failed to alloc vb2 context\n");
- ret = PTR_ERR(dev->alloc_ctx);
- goto v4l2_dev_unreg;
- }
-
dev->m2m_dev = v4l2_m2m_init(&m2m_ops);
if (IS_ERR(dev->m2m_dev)) {
vpe_err(dev, "Failed to init mem2mem device\n");
ret = PTR_ERR(dev->m2m_dev);
- goto rel_ctx;
+ goto v4l2_dev_unreg;
}
pm_runtime_enable(&pdev->dev);
@@ -2269,8 +2260,6 @@ runtime_put:
rel_m2m:
pm_runtime_disable(&pdev->dev);
v4l2_m2m_release(dev->m2m_dev);
-rel_ctx:
- vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
v4l2_dev_unreg:
v4l2_device_unregister(&dev->v4l2_dev);
@@ -2286,7 +2275,6 @@ static int vpe_remove(struct platform_device *pdev)
v4l2_m2m_release(dev->m2m_dev);
video_unregister_device(&dev->vfd);
v4l2_device_unregister(&dev->v4l2_dev);
- vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
vpe_set_clock_enable(dev, 0);
vpe_runtime_put(pdev);
diff --git a/drivers/media/platform/via-camera.c b/drivers/media/platform/via-camera.c
index 1254f7e4d..7ca12deba 100644
--- a/drivers/media/platform/via-camera.c
+++ b/drivers/media/platform/via-camera.c
@@ -240,7 +240,7 @@ static int viacam_set_flip(struct via_camera *cam)
memset(&ctrl, 0, sizeof(ctrl));
ctrl.id = V4L2_CID_VFLIP;
ctrl.value = flip_image;
- return sensor_call(cam, core, s_ctrl, &ctrl);
+ return v4l2_s_ctrl(NULL, cam->sensor->ctrl_handler, &ctrl);
}
/*
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
index c4b5fab83..cd0ff4a66 100644
--- a/drivers/media/platform/vim2m.c
+++ b/drivers/media/platform/vim2m.c
@@ -171,6 +171,9 @@ struct vim2m_ctx {
int mode;
enum v4l2_colorspace colorspace;
+ enum v4l2_ycbcr_encoding ycbcr_enc;
+ enum v4l2_xfer_func xfer_func;
+ enum v4l2_quantization quant;
/* Source and destination queue data */
struct vim2m_q_data q_data[2];
@@ -493,6 +496,9 @@ static int vidioc_g_fmt(struct vim2m_ctx *ctx, struct v4l2_format *f)
f->fmt.pix.bytesperline = (q_data->width * q_data->fmt->depth) >> 3;
f->fmt.pix.sizeimage = q_data->sizeimage;
f->fmt.pix.colorspace = ctx->colorspace;
+ f->fmt.pix.xfer_func = ctx->xfer_func;
+ f->fmt.pix.ycbcr_enc = ctx->ycbcr_enc;
+ f->fmt.pix.quantization = ctx->quant;
return 0;
}
@@ -549,6 +555,9 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
return -EINVAL;
}
f->fmt.pix.colorspace = ctx->colorspace;
+ f->fmt.pix.xfer_func = ctx->xfer_func;
+ f->fmt.pix.ycbcr_enc = ctx->ycbcr_enc;
+ f->fmt.pix.quantization = ctx->quant;
return vidioc_try_fmt(f, fmt);
}
@@ -630,8 +639,12 @@ static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
return ret;
ret = vidioc_s_fmt(file2ctx(file), f);
- if (!ret)
+ if (!ret) {
ctx->colorspace = f->fmt.pix.colorspace;
+ ctx->xfer_func = f->fmt.pix.xfer_func;
+ ctx->ycbcr_enc = f->fmt.pix.ycbcr_enc;
+ ctx->quant = f->fmt.pix.quantization;
+ }
return ret;
}
@@ -711,7 +724,7 @@ static const struct v4l2_ioctl_ops vim2m_ioctl_ops = {
static int vim2m_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct vim2m_ctx *ctx = vb2_get_drv_priv(vq);
struct vim2m_q_data *q_data;
@@ -731,11 +744,6 @@ static int vim2m_queue_setup(struct vb2_queue *vq,
*nplanes = 1;
sizes[0] = size;
- /*
- * videobuf2-vmalloc allocator is context-less so no need to set
- * alloc_ctxs array.
- */
-
dprintk(ctx->dev, "get %d buffer(s) of size %d each.\n", count, size);
return 0;
diff --git a/drivers/media/platform/vivid/Kconfig b/drivers/media/platform/vivid/Kconfig
index f535f5769..8e6918c5c 100644
--- a/drivers/media/platform/vivid/Kconfig
+++ b/drivers/media/platform/vivid/Kconfig
@@ -6,6 +6,7 @@ config VIDEO_VIVID
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
+ select MEDIA_CEC_EDID
select VIDEOBUF2_VMALLOC
select VIDEO_V4L2_TPG
default n
@@ -22,6 +23,13 @@ config VIDEO_VIVID
Say Y here if you want to test video apps or debug V4L devices.
When in doubt, say N.
+config VIDEO_VIVID_CEC
+ bool "Enable CEC emulation support"
+ depends on VIDEO_VIVID && MEDIA_CEC
+ ---help---
+ When selected the vivid module will emulate the optional
+ HDMI CEC feature.
+
config VIDEO_VIVID_MAX_DEVS
int "Maximum number of devices"
depends on VIDEO_VIVID
diff --git a/drivers/media/platform/vivid/Makefile b/drivers/media/platform/vivid/Makefile
index 633c8a1b2..29738810e 100644
--- a/drivers/media/platform/vivid/Makefile
+++ b/drivers/media/platform/vivid/Makefile
@@ -3,4 +3,8 @@ vivid-objs := vivid-core.o vivid-ctrls.o vivid-vid-common.o vivid-vbi-gen.o \
vivid-radio-rx.o vivid-radio-tx.o vivid-radio-common.o \
vivid-rds-gen.o vivid-sdr-cap.o vivid-vbi-cap.o vivid-vbi-out.o \
vivid-osd.o
+ifeq ($(CONFIG_VIDEO_VIVID_CEC),y)
+ vivid-objs += vivid-cec.o
+endif
+
obj-$(CONFIG_VIDEO_VIVID) += vivid.o
diff --git a/drivers/media/platform/vivid/vivid-cec.c b/drivers/media/platform/vivid/vivid-cec.c
new file mode 100644
index 000000000..f9f878b8e
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-cec.c
@@ -0,0 +1,231 @@
+/*
+ * vivid-cec.c - A Virtual Video Test Driver, cec emulation
+ *
+ * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <media/cec.h>
+
+#include "vivid-core.h"
+#include "vivid-cec.h"
+
+void vivid_cec_bus_free_work(struct vivid_dev *dev)
+{
+ spin_lock(&dev->cec_slock);
+ while (!list_empty(&dev->cec_work_list)) {
+ struct vivid_cec_work *cw =
+ list_first_entry(&dev->cec_work_list,
+ struct vivid_cec_work, list);
+
+ spin_unlock(&dev->cec_slock);
+ cancel_delayed_work_sync(&cw->work);
+ spin_lock(&dev->cec_slock);
+ list_del(&cw->list);
+ cec_transmit_done(cw->adap, CEC_TX_STATUS_LOW_DRIVE, 0, 0, 1, 0);
+ kfree(cw);
+ }
+ spin_unlock(&dev->cec_slock);
+}
+
+static bool vivid_cec_find_dest_adap(struct vivid_dev *dev,
+ struct cec_adapter *adap, u8 dest)
+{
+ unsigned int i;
+
+ if (dest >= 0xf)
+ return false;
+
+ if (adap != dev->cec_rx_adap && dev->cec_rx_adap &&
+ dev->cec_rx_adap->is_configured &&
+ cec_has_log_addr(dev->cec_rx_adap, dest))
+ return true;
+
+ for (i = 0; i < MAX_OUTPUTS && dev->cec_tx_adap[i]; i++) {
+ if (adap == dev->cec_tx_adap[i])
+ continue;
+ if (!dev->cec_tx_adap[i]->is_configured)
+ continue;
+ if (cec_has_log_addr(dev->cec_tx_adap[i], dest))
+ return true;
+ }
+ return false;
+}
+
+static void vivid_cec_xfer_done_worker(struct work_struct *work)
+{
+ struct vivid_cec_work *cw =
+ container_of(work, struct vivid_cec_work, work.work);
+ struct vivid_dev *dev = cw->dev;
+ struct cec_adapter *adap = cw->adap;
+ u8 dest = cec_msg_destination(&cw->msg);
+ bool valid_dest;
+ unsigned int i;
+
+ valid_dest = cec_msg_is_broadcast(&cw->msg);
+ if (!valid_dest)
+ valid_dest = vivid_cec_find_dest_adap(dev, adap, dest);
+
+ cw->tx_status = valid_dest ? CEC_TX_STATUS_OK : CEC_TX_STATUS_NACK;
+ spin_lock(&dev->cec_slock);
+ dev->cec_xfer_time_jiffies = 0;
+ dev->cec_xfer_start_jiffies = 0;
+ list_del(&cw->list);
+ spin_unlock(&dev->cec_slock);
+ cec_transmit_done(cw->adap, cw->tx_status, 0, valid_dest ? 0 : 1, 0, 0);
+
+ /* Broadcast message */
+ if (adap != dev->cec_rx_adap)
+ cec_received_msg(dev->cec_rx_adap, &cw->msg);
+ for (i = 0; i < MAX_OUTPUTS && dev->cec_tx_adap[i]; i++)
+ if (adap != dev->cec_tx_adap[i])
+ cec_received_msg(dev->cec_tx_adap[i], &cw->msg);
+ kfree(cw);
+}
+
+static void vivid_cec_xfer_try_worker(struct work_struct *work)
+{
+ struct vivid_cec_work *cw =
+ container_of(work, struct vivid_cec_work, work.work);
+ struct vivid_dev *dev = cw->dev;
+
+ spin_lock(&dev->cec_slock);
+ if (dev->cec_xfer_time_jiffies) {
+ list_del(&cw->list);
+ spin_unlock(&dev->cec_slock);
+ cec_transmit_done(cw->adap, CEC_TX_STATUS_ARB_LOST, 1, 0, 0, 0);
+ kfree(cw);
+ } else {
+ INIT_DELAYED_WORK(&cw->work, vivid_cec_xfer_done_worker);
+ dev->cec_xfer_start_jiffies = jiffies;
+ dev->cec_xfer_time_jiffies = usecs_to_jiffies(cw->usecs);
+ spin_unlock(&dev->cec_slock);
+ schedule_delayed_work(&cw->work, dev->cec_xfer_time_jiffies);
+ }
+}
+
+static int vivid_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ return 0;
+}
+
+static int vivid_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
+{
+ return 0;
+}
+
+/*
+ * One data bit takes 2400 us, each byte needs 10 bits so that's 24000 us
+ * per byte.
+ */
+#define USECS_PER_BYTE 24000
+
+static int vivid_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct vivid_dev *dev = adap->priv;
+ struct vivid_cec_work *cw = kzalloc(sizeof(*cw), GFP_KERNEL);
+ long delta_jiffies = 0;
+
+ if (cw == NULL)
+ return -ENOMEM;
+ cw->dev = dev;
+ cw->adap = adap;
+ cw->usecs = CEC_FREE_TIME_TO_USEC(signal_free_time) +
+ msg->len * USECS_PER_BYTE;
+ cw->msg = *msg;
+
+ spin_lock(&dev->cec_slock);
+ list_add(&cw->list, &dev->cec_work_list);
+ if (dev->cec_xfer_time_jiffies == 0) {
+ INIT_DELAYED_WORK(&cw->work, vivid_cec_xfer_done_worker);
+ dev->cec_xfer_start_jiffies = jiffies;
+ dev->cec_xfer_time_jiffies = usecs_to_jiffies(cw->usecs);
+ delta_jiffies = dev->cec_xfer_time_jiffies;
+ } else {
+ INIT_DELAYED_WORK(&cw->work, vivid_cec_xfer_try_worker);
+ delta_jiffies = dev->cec_xfer_start_jiffies +
+ dev->cec_xfer_time_jiffies - jiffies;
+ }
+ spin_unlock(&dev->cec_slock);
+ schedule_delayed_work(&cw->work, delta_jiffies < 0 ? 0 : delta_jiffies);
+ return 0;
+}
+
+static int vivid_received(struct cec_adapter *adap, struct cec_msg *msg)
+{
+ struct vivid_dev *dev = adap->priv;
+ struct cec_msg reply;
+ u8 dest = cec_msg_destination(msg);
+ u8 disp_ctl;
+ char osd[14];
+
+ if (cec_msg_is_broadcast(msg))
+ dest = adap->log_addrs.log_addr[0];
+ cec_msg_init(&reply, dest, cec_msg_initiator(msg));
+
+ switch (cec_msg_opcode(msg)) {
+ case CEC_MSG_SET_OSD_STRING:
+ if (!cec_is_sink(adap))
+ return -ENOMSG;
+ cec_ops_set_osd_string(msg, &disp_ctl, osd);
+ switch (disp_ctl) {
+ case CEC_OP_DISP_CTL_DEFAULT:
+ strcpy(dev->osd, osd);
+ dev->osd_jiffies = jiffies;
+ break;
+ case CEC_OP_DISP_CTL_UNTIL_CLEARED:
+ strcpy(dev->osd, osd);
+ dev->osd_jiffies = 0;
+ break;
+ case CEC_OP_DISP_CTL_CLEAR:
+ dev->osd[0] = 0;
+ dev->osd_jiffies = 0;
+ break;
+ default:
+ cec_msg_feature_abort(&reply, cec_msg_opcode(msg),
+ CEC_OP_ABORT_INVALID_OP);
+ cec_transmit_msg(adap, &reply, false);
+ break;
+ }
+ break;
+ default:
+ return -ENOMSG;
+ }
+ return 0;
+}
+
+static const struct cec_adap_ops vivid_cec_adap_ops = {
+ .adap_enable = vivid_cec_adap_enable,
+ .adap_log_addr = vivid_cec_adap_log_addr,
+ .adap_transmit = vivid_cec_adap_transmit,
+ .received = vivid_received,
+};
+
+struct cec_adapter *vivid_cec_alloc_adap(struct vivid_dev *dev,
+ unsigned int idx,
+ struct device *parent,
+ bool is_source)
+{
+ char name[sizeof(dev->vid_out_dev.name) + 2];
+ u32 caps = CEC_CAP_TRANSMIT | CEC_CAP_LOG_ADDRS |
+ CEC_CAP_PASSTHROUGH | CEC_CAP_RC | CEC_CAP_MONITOR_ALL;
+
+ snprintf(name, sizeof(name), "%s%d",
+ is_source ? dev->vid_out_dev.name : dev->vid_cap_dev.name,
+ idx);
+ return cec_allocate_adapter(&vivid_cec_adap_ops, dev,
+ name, caps, 1, parent);
+}
diff --git a/drivers/media/platform/vivid/vivid-cec.h b/drivers/media/platform/vivid/vivid-cec.h
new file mode 100644
index 000000000..97892afa6
--- /dev/null
+++ b/drivers/media/platform/vivid/vivid-cec.h
@@ -0,0 +1,33 @@
+/*
+ * vivid-cec.h - A Virtual Video Test Driver, cec emulation
+ *
+ * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifdef CONFIG_VIDEO_VIVID_CEC
+struct cec_adapter *vivid_cec_alloc_adap(struct vivid_dev *dev,
+ unsigned int idx,
+ struct device *parent,
+ bool is_source);
+void vivid_cec_bus_free_work(struct vivid_dev *dev);
+
+#else
+
+static inline void vivid_cec_bus_free_work(struct vivid_dev *dev)
+{
+}
+
+#endif
diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c
index c14da84af..7f937136c 100644
--- a/drivers/media/platform/vivid/vivid-core.c
+++ b/drivers/media/platform/vivid/vivid-core.c
@@ -46,6 +46,7 @@
#include "vivid-vbi-cap.h"
#include "vivid-vbi-out.h"
#include "vivid-osd.h"
+#include "vivid-cec.h"
#include "vivid-ctrls.h"
#define VIVID_MODULE_NAME "vivid"
@@ -684,6 +685,11 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
dev->input_name_counter[i] = in_type_counter[dev->input_type[i]]++;
}
dev->has_audio_inputs = in_type_counter[TV] && in_type_counter[SVID];
+ if (in_type_counter[HDMI] == 16) {
+ /* The CEC physical address only allows for max 15 inputs */
+ in_type_counter[HDMI]--;
+ dev->num_inputs--;
+ }
/* how many outputs do we have and of what type? */
dev->num_outputs = num_outputs[inst];
@@ -696,6 +702,15 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
dev->output_name_counter[i] = out_type_counter[dev->output_type[i]]++;
}
dev->has_audio_outputs = out_type_counter[SVID];
+ if (out_type_counter[HDMI] == 16) {
+ /*
+ * The CEC physical address only allows for max 15 inputs,
+ * so outputs are also limited to 15 to allow for easy
+ * CEC output to input mapping.
+ */
+ out_type_counter[HDMI]--;
+ dev->num_outputs--;
+ }
/* do we create a video capture device? */
dev->has_vid_cap = node_type & 0x0001;
@@ -1010,6 +1025,17 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
INIT_LIST_HEAD(&dev->vbi_out_active);
INIT_LIST_HEAD(&dev->sdr_cap_active);
+ INIT_LIST_HEAD(&dev->cec_work_list);
+ spin_lock_init(&dev->cec_slock);
+ /*
+ * Same as create_singlethread_workqueue, but now I can use the
+ * string formatting of alloc_ordered_workqueue.
+ */
+ dev->cec_workqueue =
+ alloc_ordered_workqueue("vivid-%03d-cec", WQ_MEM_RECLAIM, inst);
+ if (!dev->cec_workqueue)
+ goto unreg_dev;
+
/* start creating the vb2 queues */
if (dev->has_vid_cap) {
/* initialize vid_cap queue */
@@ -1117,7 +1143,8 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
/* finally start creating the device nodes */
if (dev->has_vid_cap) {
vfd = &dev->vid_cap_dev;
- strlcpy(vfd->name, "vivid-vid-cap", sizeof(vfd->name));
+ snprintf(vfd->name, sizeof(vfd->name),
+ "vivid-%03d-vid-cap", inst);
vfd->fops = &vivid_fops;
vfd->ioctl_ops = &vivid_ioctl_ops;
vfd->device_caps = dev->vid_cap_caps;
@@ -1133,6 +1160,27 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
vfd->lock = &dev->mutex;
video_set_drvdata(vfd, dev);
+#ifdef CONFIG_VIDEO_VIVID_CEC
+ if (in_type_counter[HDMI]) {
+ struct cec_adapter *adap;
+
+ adap = vivid_cec_alloc_adap(dev, 0, &pdev->dev, false);
+ ret = PTR_ERR_OR_ZERO(adap);
+ if (ret < 0)
+ goto unreg_dev;
+ dev->cec_rx_adap = adap;
+ ret = cec_register_adapter(adap);
+ if (ret < 0) {
+ cec_delete_adapter(adap);
+ dev->cec_rx_adap = NULL;
+ goto unreg_dev;
+ }
+ cec_s_phys_addr(adap, 0, false);
+ v4l2_info(&dev->v4l2_dev, "CEC adapter %s registered for HDMI input %d\n",
+ dev_name(&adap->devnode.dev), i);
+ }
+#endif
+
ret = video_register_device(vfd, VFL_TYPE_GRABBER, vid_cap_nr[inst]);
if (ret < 0)
goto unreg_dev;
@@ -1141,8 +1189,13 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
}
if (dev->has_vid_out) {
+#ifdef CONFIG_VIDEO_VIVID_CEC
+ unsigned int bus_cnt = 0;
+#endif
+
vfd = &dev->vid_out_dev;
- strlcpy(vfd->name, "vivid-vid-out", sizeof(vfd->name));
+ snprintf(vfd->name, sizeof(vfd->name),
+ "vivid-%03d-vid-out", inst);
vfd->vfl_dir = VFL_DIR_TX;
vfd->fops = &vivid_fops;
vfd->ioctl_ops = &vivid_ioctl_ops;
@@ -1159,6 +1212,35 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
vfd->lock = &dev->mutex;
video_set_drvdata(vfd, dev);
+#ifdef CONFIG_VIDEO_VIVID_CEC
+ for (i = 0; i < dev->num_outputs; i++) {
+ struct cec_adapter *adap;
+
+ if (dev->output_type[i] != HDMI)
+ continue;
+ dev->cec_output2bus_map[i] = bus_cnt;
+ adap = vivid_cec_alloc_adap(dev, bus_cnt,
+ &pdev->dev, true);
+ ret = PTR_ERR_OR_ZERO(adap);
+ if (ret < 0)
+ goto unreg_dev;
+ dev->cec_tx_adap[bus_cnt] = adap;
+ ret = cec_register_adapter(adap);
+ if (ret < 0) {
+ cec_delete_adapter(adap);
+ dev->cec_tx_adap[bus_cnt] = NULL;
+ goto unreg_dev;
+ }
+ bus_cnt++;
+ if (bus_cnt <= out_type_counter[HDMI])
+ cec_s_phys_addr(adap, bus_cnt << 12, false);
+ else
+ cec_s_phys_addr(adap, 0x1000, false);
+ v4l2_info(&dev->v4l2_dev, "CEC adapter %s registered for HDMI output %d\n",
+ dev_name(&adap->devnode.dev), i);
+ }
+#endif
+
ret = video_register_device(vfd, VFL_TYPE_GRABBER, vid_out_nr[inst]);
if (ret < 0)
goto unreg_dev;
@@ -1168,7 +1250,8 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
if (dev->has_vbi_cap) {
vfd = &dev->vbi_cap_dev;
- strlcpy(vfd->name, "vivid-vbi-cap", sizeof(vfd->name));
+ snprintf(vfd->name, sizeof(vfd->name),
+ "vivid-%03d-vbi-cap", inst);
vfd->fops = &vivid_fops;
vfd->ioctl_ops = &vivid_ioctl_ops;
vfd->device_caps = dev->vbi_cap_caps;
@@ -1191,7 +1274,8 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
if (dev->has_vbi_out) {
vfd = &dev->vbi_out_dev;
- strlcpy(vfd->name, "vivid-vbi-out", sizeof(vfd->name));
+ snprintf(vfd->name, sizeof(vfd->name),
+ "vivid-%03d-vbi-out", inst);
vfd->vfl_dir = VFL_DIR_TX;
vfd->fops = &vivid_fops;
vfd->ioctl_ops = &vivid_ioctl_ops;
@@ -1215,7 +1299,8 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
if (dev->has_sdr_cap) {
vfd = &dev->sdr_cap_dev;
- strlcpy(vfd->name, "vivid-sdr-cap", sizeof(vfd->name));
+ snprintf(vfd->name, sizeof(vfd->name),
+ "vivid-%03d-sdr-cap", inst);
vfd->fops = &vivid_fops;
vfd->ioctl_ops = &vivid_ioctl_ops;
vfd->device_caps = dev->sdr_cap_caps;
@@ -1234,7 +1319,8 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
if (dev->has_radio_rx) {
vfd = &dev->radio_rx_dev;
- strlcpy(vfd->name, "vivid-rad-rx", sizeof(vfd->name));
+ snprintf(vfd->name, sizeof(vfd->name),
+ "vivid-%03d-rad-rx", inst);
vfd->fops = &vivid_radio_fops;
vfd->ioctl_ops = &vivid_ioctl_ops;
vfd->device_caps = dev->radio_rx_caps;
@@ -1252,7 +1338,8 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
if (dev->has_radio_tx) {
vfd = &dev->radio_tx_dev;
- strlcpy(vfd->name, "vivid-rad-tx", sizeof(vfd->name));
+ snprintf(vfd->name, sizeof(vfd->name),
+ "vivid-%03d-rad-tx", inst);
vfd->vfl_dir = VFL_DIR_TX;
vfd->fops = &vivid_radio_fops;
vfd->ioctl_ops = &vivid_ioctl_ops;
@@ -1282,6 +1369,13 @@ unreg_dev:
video_unregister_device(&dev->vbi_cap_dev);
video_unregister_device(&dev->vid_out_dev);
video_unregister_device(&dev->vid_cap_dev);
+ cec_unregister_adapter(dev->cec_rx_adap);
+ for (i = 0; i < MAX_OUTPUTS; i++)
+ cec_unregister_adapter(dev->cec_tx_adap[i]);
+ if (dev->cec_workqueue) {
+ vivid_cec_bus_free_work(dev);
+ destroy_workqueue(dev->cec_workqueue);
+ }
free_dev:
v4l2_device_put(&dev->v4l2_dev);
return ret;
@@ -1331,8 +1425,7 @@ static int vivid_probe(struct platform_device *pdev)
static int vivid_remove(struct platform_device *pdev)
{
struct vivid_dev *dev;
- unsigned i;
-
+ unsigned int i, j;
for (i = 0; i < n_devs; i++) {
dev = vivid_devs[i];
@@ -1380,6 +1473,13 @@ static int vivid_remove(struct platform_device *pdev)
unregister_framebuffer(&dev->fb_info);
vivid_fb_release_buffers(dev);
}
+ cec_unregister_adapter(dev->cec_rx_adap);
+ for (j = 0; j < MAX_OUTPUTS; j++)
+ cec_unregister_adapter(dev->cec_tx_adap[j]);
+ if (dev->cec_workqueue) {
+ vivid_cec_bus_free_work(dev);
+ destroy_workqueue(dev->cec_workqueue);
+ }
v4l2_device_put(&dev->v4l2_dev);
vivid_devs[i] = NULL;
}
diff --git a/drivers/media/platform/vivid/vivid-core.h b/drivers/media/platform/vivid/vivid-core.h
index 776783bec..a7daa40d0 100644
--- a/drivers/media/platform/vivid/vivid-core.h
+++ b/drivers/media/platform/vivid/vivid-core.h
@@ -21,6 +21,8 @@
#define _VIVID_CORE_H_
#include <linux/fb.h>
+#include <linux/workqueue.h>
+#include <media/cec.h>
#include <media/videobuf2-v4l2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-dev.h>
@@ -132,6 +134,17 @@ enum vivid_colorspace {
#define VIVID_INVALID_SIGNAL(mode) \
((mode) == NO_SIGNAL || (mode) == NO_LOCK || (mode) == OUT_OF_RANGE)
+struct vivid_cec_work {
+ struct list_head list;
+ struct delayed_work work;
+ struct cec_adapter *adap;
+ struct vivid_dev *dev;
+ unsigned int usecs;
+ unsigned int timeout_ms;
+ u8 tx_status;
+ struct cec_msg msg;
+};
+
struct vivid_dev {
unsigned inst;
struct v4l2_device v4l2_dev;
@@ -497,6 +510,20 @@ struct vivid_dev {
/* Shared between radio receiver and transmitter */
bool radio_rds_loop;
struct timespec radio_rds_init_ts;
+
+ /* CEC */
+ struct cec_adapter *cec_rx_adap;
+ struct cec_adapter *cec_tx_adap[MAX_OUTPUTS];
+ struct workqueue_struct *cec_workqueue;
+ spinlock_t cec_slock;
+ struct list_head cec_work_list;
+ unsigned int cec_xfer_time_jiffies;
+ unsigned long cec_xfer_start_jiffies;
+ u8 cec_output2bus_map[MAX_OUTPUTS];
+
+ /* CEC OSD String */
+ char osd[14];
+ unsigned long osd_jiffies;
};
static inline bool vivid_is_webcam(const struct vivid_dev *dev)
diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c
index 3b8c10108..6ca71aabb 100644
--- a/drivers/media/platform/vivid/vivid-kthread-cap.c
+++ b/drivers/media/platform/vivid/vivid-kthread-cap.c
@@ -552,6 +552,19 @@ static void vivid_fillbuff(struct vivid_dev *dev, struct vivid_buffer *buf)
snprintf(str, sizeof(str), " button pressed!");
tpg_gen_text(tpg, basep, line++ * line_height, 16, str);
}
+ if (dev->osd[0]) {
+ if (vivid_is_hdmi_cap(dev)) {
+ snprintf(str, sizeof(str),
+ " OSD \"%s\"", dev->osd);
+ tpg_gen_text(tpg, basep, line++ * line_height,
+ 16, str);
+ }
+ if (dev->osd_jiffies &&
+ time_is_before_jiffies(dev->osd_jiffies + 5 * HZ)) {
+ dev->osd[0] = 0;
+ dev->osd_jiffies = 0;
+ }
+ }
}
/*
diff --git a/drivers/media/platform/vivid/vivid-sdr-cap.c b/drivers/media/platform/vivid/vivid-sdr-cap.c
index 3d1604cb9..ebd7b9c4d 100644
--- a/drivers/media/platform/vivid/vivid-sdr-cap.c
+++ b/drivers/media/platform/vivid/vivid-sdr-cap.c
@@ -51,8 +51,6 @@ static const struct vivid_format formats[] = {
},
};
-static const unsigned int NUM_FORMATS = ARRAY_SIZE(formats);
-
static const struct v4l2_frequency_band bands_adc[] = {
{
.tuner = 0,
@@ -215,7 +213,7 @@ static int vivid_thread_sdr_cap(void *data)
static int sdr_cap_queue_setup(struct vb2_queue *vq,
unsigned *nbuffers, unsigned *nplanes,
- unsigned sizes[], void *alloc_ctxs[])
+ unsigned sizes[], struct device *alloc_devs[])
{
/* 2 = max 16-bit sample returned */
sizes[0] = SDR_CAP_SAMPLES_PER_BUF * 2;
diff --git a/drivers/media/platform/vivid/vivid-vbi-cap.c b/drivers/media/platform/vivid/vivid-vbi-cap.c
index cda45a582..d66ef95dd 100644
--- a/drivers/media/platform/vivid/vivid-vbi-cap.c
+++ b/drivers/media/platform/vivid/vivid-vbi-cap.c
@@ -137,7 +137,7 @@ void vivid_sliced_vbi_cap_process(struct vivid_dev *dev,
static int vbi_cap_queue_setup(struct vb2_queue *vq,
unsigned *nbuffers, unsigned *nplanes,
- unsigned sizes[], void *alloc_ctxs[])
+ unsigned sizes[], struct device *alloc_devs[])
{
struct vivid_dev *dev = vb2_get_drv_priv(vq);
bool is_60hz = dev->std_cap & V4L2_STD_525_60;
diff --git a/drivers/media/platform/vivid/vivid-vbi-out.c b/drivers/media/platform/vivid/vivid-vbi-out.c
index 3c5a469e6..d2989195c 100644
--- a/drivers/media/platform/vivid/vivid-vbi-out.c
+++ b/drivers/media/platform/vivid/vivid-vbi-out.c
@@ -29,7 +29,7 @@
static int vbi_out_queue_setup(struct vb2_queue *vq,
unsigned *nbuffers, unsigned *nplanes,
- unsigned sizes[], void *alloc_ctxs[])
+ unsigned sizes[], struct device *alloc_devs[])
{
struct vivid_dev *dev = vb2_get_drv_priv(vq);
bool is_60hz = dev->std_out & V4L2_STD_525_60;
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
index 4f730f355..d404a7ce3 100644
--- a/drivers/media/platform/vivid/vivid-vid-cap.c
+++ b/drivers/media/platform/vivid/vivid-vid-cap.c
@@ -36,8 +36,7 @@
/* timeperframe: min/max and default */
static const struct v4l2_fract
tpf_min = {.numerator = 1, .denominator = FPS_MAX},
- tpf_max = {.numerator = FPS_MAX, .denominator = 1},
- tpf_default = {.numerator = 1, .denominator = 30};
+ tpf_max = {.numerator = FPS_MAX, .denominator = 1};
static const struct vivid_fmt formats_ovl[] = {
{
@@ -98,7 +97,7 @@ static const struct v4l2_discrete_probe webcam_probe = {
static int vid_cap_queue_setup(struct vb2_queue *vq,
unsigned *nbuffers, unsigned *nplanes,
- unsigned sizes[], void *alloc_ctxs[])
+ unsigned sizes[], struct device *alloc_devs[])
{
struct vivid_dev *dev = vb2_get_drv_priv(vq);
unsigned buffers = tpg_g_buffers(&dev->tpg);
@@ -145,11 +144,6 @@ static int vid_cap_queue_setup(struct vb2_queue *vq,
*nplanes = buffers;
- /*
- * videobuf2-vmalloc allocator is context-less so no need to set
- * alloc_ctxs array.
- */
-
dprintk(dev, 1, "%s: count=%d\n", __func__, *nbuffers);
for (p = 0; p < buffers; p++)
dprintk(dev, 1, "%s: size[%u]=%u\n", __func__, p, sizes[p]);
@@ -1701,6 +1695,9 @@ int vidioc_s_edid(struct file *file, void *_fh,
struct v4l2_edid *edid)
{
struct vivid_dev *dev = video_drvdata(file);
+ u16 phys_addr;
+ unsigned int i;
+ int ret;
memset(edid->reserved, 0, sizeof(edid->reserved));
if (edid->pad >= dev->num_inputs)
@@ -1709,14 +1706,32 @@ int vidioc_s_edid(struct file *file, void *_fh,
return -EINVAL;
if (edid->blocks == 0) {
dev->edid_blocks = 0;
- return 0;
+ phys_addr = CEC_PHYS_ADDR_INVALID;
+ goto set_phys_addr;
}
if (edid->blocks > dev->edid_max_blocks) {
edid->blocks = dev->edid_max_blocks;
return -E2BIG;
}
+ phys_addr = cec_get_edid_phys_addr(edid->edid, edid->blocks * 128, NULL);
+ ret = cec_phys_addr_validate(phys_addr, &phys_addr, NULL);
+ if (ret)
+ return ret;
+
+ if (vb2_is_busy(&dev->vb_vid_cap_q))
+ return -EBUSY;
+
dev->edid_blocks = edid->blocks;
memcpy(dev->edid, edid->edid, edid->blocks * 128);
+
+set_phys_addr:
+ /* TODO: a proper hotplug detect cycle should be emulated here */
+ cec_s_phys_addr(dev->cec_rx_adap, phys_addr, false);
+
+ for (i = 0; i < MAX_OUTPUTS && dev->cec_tx_adap[i]; i++)
+ cec_s_phys_addr(dev->cec_tx_adap[i],
+ cec_phys_addr_for_input(phys_addr, i + 1),
+ false);
return 0;
}
@@ -1836,6 +1851,7 @@ int vivid_vid_cap_s_parm(struct file *file, void *priv,
/* resync the thread's timings */
dev->cap_seq_resync = true;
dev->timeperframe_vid_cap = tpf;
+ parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
parm->parm.capture.timeperframe = tpf;
parm->parm.capture.readbuffers = 1;
return 0;
diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
index 39ea22847..fcda3ae4e 100644
--- a/drivers/media/platform/vivid/vivid-vid-common.c
+++ b/drivers/media/platform/vivid/vivid-vid-common.c
@@ -811,6 +811,7 @@ int vidioc_g_edid(struct file *file, void *_fh,
{
struct vivid_dev *dev = video_drvdata(file);
struct video_device *vdev = video_devdata(file);
+ struct cec_adapter *adap;
memset(edid->reserved, 0, sizeof(edid->reserved));
if (vdev->vfl_dir == VFL_DIR_RX) {
@@ -818,11 +819,16 @@ int vidioc_g_edid(struct file *file, void *_fh,
return -EINVAL;
if (dev->input_type[edid->pad] != HDMI)
return -EINVAL;
+ adap = dev->cec_rx_adap;
} else {
+ unsigned int bus_idx;
+
if (edid->pad >= dev->num_outputs)
return -EINVAL;
if (dev->output_type[edid->pad] != HDMI)
return -EINVAL;
+ bus_idx = dev->cec_output2bus_map[edid->pad];
+ adap = dev->cec_tx_adap[bus_idx];
}
if (edid->start_block == 0 && edid->blocks == 0) {
edid->blocks = dev->edid_blocks;
@@ -835,5 +841,6 @@ int vidioc_g_edid(struct file *file, void *_fh,
if (edid->start_block + edid->blocks > dev->edid_blocks)
edid->blocks = dev->edid_blocks - edid->start_block;
memcpy(edid->edid, dev->edid, edid->blocks * 128);
+ cec_set_edid_phys_addr(edid->edid, edid->blocks * 128, adap->phys_addr);
return 0;
}
diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c
index f92f4496d..dd609eea4 100644
--- a/drivers/media/platform/vivid/vivid-vid-out.c
+++ b/drivers/media/platform/vivid/vivid-vid-out.c
@@ -34,7 +34,7 @@
static int vid_out_queue_setup(struct vb2_queue *vq,
unsigned *nbuffers, unsigned *nplanes,
- unsigned sizes[], void *alloc_ctxs[])
+ unsigned sizes[], struct device *alloc_devs[])
{
struct vivid_dev *dev = vb2_get_drv_priv(vq);
const struct vivid_fmt *vfmt = dev->fmt_out;
@@ -87,11 +87,6 @@ static int vid_out_queue_setup(struct vb2_queue *vq,
*nplanes = planes;
- /*
- * videobuf2-vmalloc allocator is context-less so no need to set
- * alloc_ctxs array.
- */
-
dprintk(dev, 1, "%s: count=%d\n", __func__, *nbuffers);
for (p = 0; p < planes; p++)
dprintk(dev, 1, "%s: size[%u]=%u\n", __func__, p, sizes[p]);
diff --git a/drivers/media/platform/vsp1/Makefile b/drivers/media/platform/vsp1/Makefile
index 95b3ac2ea..1328e1bd2 100644
--- a/drivers/media/platform/vsp1/Makefile
+++ b/drivers/media/platform/vsp1/Makefile
@@ -1,7 +1,8 @@
vsp1-y := vsp1_drv.o vsp1_entity.o vsp1_pipe.o
vsp1-y += vsp1_dl.o vsp1_drm.o vsp1_video.o
vsp1-y += vsp1_rpf.o vsp1_rwpf.o vsp1_wpf.o
-vsp1-y += vsp1_hsit.o vsp1_lif.o vsp1_lut.o
+vsp1-y += vsp1_clu.o vsp1_hsit.o vsp1_lut.o
vsp1-y += vsp1_bru.o vsp1_sru.o vsp1_uds.o
+vsp1-y += vsp1_lif.o
obj-$(CONFIG_VIDEO_RENESAS_VSP1) += vsp1.o
diff --git a/drivers/media/platform/vsp1/vsp1.h b/drivers/media/platform/vsp1/vsp1.h
index 46738b6c5..06a2ec7e5 100644
--- a/drivers/media/platform/vsp1/vsp1.h
+++ b/drivers/media/platform/vsp1/vsp1.h
@@ -25,11 +25,13 @@
struct clk;
struct device;
+struct rcar_fcp_device;
struct vsp1_drm;
struct vsp1_entity;
struct vsp1_platform_data;
struct vsp1_bru;
+struct vsp1_clu;
struct vsp1_hsit;
struct vsp1_lif;
struct vsp1_lut;
@@ -45,6 +47,9 @@ struct vsp1_uds;
#define VSP1_HAS_LUT (1 << 1)
#define VSP1_HAS_SRU (1 << 2)
#define VSP1_HAS_BRU (1 << 3)
+#define VSP1_HAS_CLU (1 << 4)
+#define VSP1_HAS_WPF_VFLIP (1 << 5)
+#define VSP1_HAS_WPF_HFLIP (1 << 6)
struct vsp1_device_info {
u32 version;
@@ -62,12 +67,10 @@ struct vsp1_device {
const struct vsp1_device_info *info;
void __iomem *mmio;
- struct clk *clock;
-
- struct mutex lock;
- int ref_count;
+ struct rcar_fcp_device *fcp;
struct vsp1_bru *bru;
+ struct vsp1_clu *clu;
struct vsp1_hsit *hsi;
struct vsp1_hsit *hst;
struct vsp1_lif *lif;
diff --git a/drivers/media/platform/vsp1/vsp1_bru.c b/drivers/media/platform/vsp1/vsp1_bru.c
index b1068c018..8268b8772 100644
--- a/drivers/media/platform/vsp1/vsp1_bru.c
+++ b/drivers/media/platform/vsp1/vsp1_bru.c
@@ -249,7 +249,7 @@ static int bru_set_selection(struct v4l2_subdev *subdev,
return 0;
}
-static struct v4l2_subdev_pad_ops bru_pad_ops = {
+static const struct v4l2_subdev_pad_ops bru_pad_ops = {
.init_cfg = vsp1_entity_init_cfg,
.enum_mbus_code = bru_enum_mbus_code,
.enum_frame_size = bru_enum_frame_size,
@@ -259,7 +259,7 @@ static struct v4l2_subdev_pad_ops bru_pad_ops = {
.set_selection = bru_set_selection,
};
-static struct v4l2_subdev_ops bru_ops = {
+static const struct v4l2_subdev_ops bru_ops = {
.pad = &bru_pad_ops,
};
@@ -269,13 +269,16 @@ static struct v4l2_subdev_ops bru_ops = {
static void bru_configure(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
- struct vsp1_dl_list *dl)
+ struct vsp1_dl_list *dl, bool full)
{
struct vsp1_bru *bru = to_bru(&entity->subdev);
struct v4l2_mbus_framefmt *format;
unsigned int flags;
unsigned int i;
+ if (!full)
+ return;
+
format = vsp1_entity_get_pad_format(&bru->entity, bru->entity.config,
bru->entity.source_pad);
@@ -390,7 +393,8 @@ struct vsp1_bru *vsp1_bru_create(struct vsp1_device *vsp1)
bru->entity.type = VSP1_ENTITY_BRU;
ret = vsp1_entity_init(vsp1, &bru->entity, "bru",
- vsp1->info->num_bru_inputs + 1, &bru_ops);
+ vsp1->info->num_bru_inputs + 1, &bru_ops,
+ MEDIA_ENT_F_PROC_VIDEO_COMPOSER);
if (ret < 0)
return ERR_PTR(ret);
diff --git a/drivers/media/platform/vsp1/vsp1_clu.c b/drivers/media/platform/vsp1/vsp1_clu.c
new file mode 100644
index 000000000..b63d2dbe5
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_clu.c
@@ -0,0 +1,292 @@
+/*
+ * vsp1_clu.c -- R-Car VSP1 Cubic Look-Up Table
+ *
+ * Copyright (C) 2015-2016 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_clu.h"
+#include "vsp1_dl.h"
+
+#define CLU_MIN_SIZE 4U
+#define CLU_MAX_SIZE 8190U
+
+/* -----------------------------------------------------------------------------
+ * Device Access
+ */
+
+static inline void vsp1_clu_write(struct vsp1_clu *clu, struct vsp1_dl_list *dl,
+ u32 reg, u32 data)
+{
+ vsp1_dl_list_write(dl, reg, data);
+}
+
+/* -----------------------------------------------------------------------------
+ * Controls
+ */
+
+#define V4L2_CID_VSP1_CLU_TABLE (V4L2_CID_USER_BASE | 0x1001)
+#define V4L2_CID_VSP1_CLU_MODE (V4L2_CID_USER_BASE | 0x1002)
+#define V4L2_CID_VSP1_CLU_MODE_2D 0
+#define V4L2_CID_VSP1_CLU_MODE_3D 1
+
+static int clu_set_table(struct vsp1_clu *clu, struct v4l2_ctrl *ctrl)
+{
+ struct vsp1_dl_body *dlb;
+ unsigned int i;
+
+ dlb = vsp1_dl_fragment_alloc(clu->entity.vsp1, 1 + 17 * 17 * 17);
+ if (!dlb)
+ return -ENOMEM;
+
+ vsp1_dl_fragment_write(dlb, VI6_CLU_ADDR, 0);
+ for (i = 0; i < 17 * 17 * 17; ++i)
+ vsp1_dl_fragment_write(dlb, VI6_CLU_DATA, ctrl->p_new.p_u32[i]);
+
+ spin_lock_irq(&clu->lock);
+ swap(clu->clu, dlb);
+ spin_unlock_irq(&clu->lock);
+
+ vsp1_dl_fragment_free(dlb);
+ return 0;
+}
+
+static int clu_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vsp1_clu *clu =
+ container_of(ctrl->handler, struct vsp1_clu, ctrls);
+
+ switch (ctrl->id) {
+ case V4L2_CID_VSP1_CLU_TABLE:
+ clu_set_table(clu, ctrl);
+ break;
+
+ case V4L2_CID_VSP1_CLU_MODE:
+ clu->mode = ctrl->val;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops clu_ctrl_ops = {
+ .s_ctrl = clu_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config clu_table_control = {
+ .ops = &clu_ctrl_ops,
+ .id = V4L2_CID_VSP1_CLU_TABLE,
+ .name = "Look-Up Table",
+ .type = V4L2_CTRL_TYPE_U32,
+ .min = 0x00000000,
+ .max = 0x00ffffff,
+ .step = 1,
+ .def = 0,
+ .dims = { 17, 17, 17 },
+};
+
+static const char * const clu_mode_menu[] = {
+ "2D",
+ "3D",
+ NULL,
+};
+
+static const struct v4l2_ctrl_config clu_mode_control = {
+ .ops = &clu_ctrl_ops,
+ .id = V4L2_CID_VSP1_CLU_MODE,
+ .name = "Mode",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .min = 0,
+ .max = 1,
+ .def = 1,
+ .qmenu = clu_mode_menu,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Pad Operations
+ */
+
+static int clu_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ static const unsigned int codes[] = {
+ MEDIA_BUS_FMT_ARGB8888_1X32,
+ MEDIA_BUS_FMT_AHSV8888_1X32,
+ MEDIA_BUS_FMT_AYUV8_1X32,
+ };
+
+ return vsp1_subdev_enum_mbus_code(subdev, cfg, code, codes,
+ ARRAY_SIZE(codes));
+}
+
+static int clu_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ return vsp1_subdev_enum_frame_size(subdev, cfg, fse, CLU_MIN_SIZE,
+ CLU_MIN_SIZE, CLU_MAX_SIZE,
+ CLU_MAX_SIZE);
+}
+
+static int clu_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vsp1_clu *clu = to_clu(subdev);
+ struct v4l2_subdev_pad_config *config;
+ struct v4l2_mbus_framefmt *format;
+
+ config = vsp1_entity_get_pad_config(&clu->entity, cfg, fmt->which);
+ if (!config)
+ return -EINVAL;
+
+ /* Default to YUV if the requested format is not supported. */
+ if (fmt->format.code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
+ fmt->format.code != MEDIA_BUS_FMT_AHSV8888_1X32 &&
+ fmt->format.code != MEDIA_BUS_FMT_AYUV8_1X32)
+ fmt->format.code = MEDIA_BUS_FMT_AYUV8_1X32;
+
+ format = vsp1_entity_get_pad_format(&clu->entity, config, fmt->pad);
+
+ if (fmt->pad == CLU_PAD_SOURCE) {
+ /* The CLU output format can't be modified. */
+ fmt->format = *format;
+ return 0;
+ }
+
+ format->code = fmt->format.code;
+ format->width = clamp_t(unsigned int, fmt->format.width,
+ CLU_MIN_SIZE, CLU_MAX_SIZE);
+ format->height = clamp_t(unsigned int, fmt->format.height,
+ CLU_MIN_SIZE, CLU_MAX_SIZE);
+ format->field = V4L2_FIELD_NONE;
+ format->colorspace = V4L2_COLORSPACE_SRGB;
+
+ fmt->format = *format;
+
+ /* Propagate the format to the source pad. */
+ format = vsp1_entity_get_pad_format(&clu->entity, config,
+ CLU_PAD_SOURCE);
+ *format = fmt->format;
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static const struct v4l2_subdev_pad_ops clu_pad_ops = {
+ .init_cfg = vsp1_entity_init_cfg,
+ .enum_mbus_code = clu_enum_mbus_code,
+ .enum_frame_size = clu_enum_frame_size,
+ .get_fmt = vsp1_subdev_get_pad_format,
+ .set_fmt = clu_set_format,
+};
+
+static const struct v4l2_subdev_ops clu_ops = {
+ .pad = &clu_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
+
+static void clu_configure(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl, bool full)
+{
+ struct vsp1_clu *clu = to_clu(&entity->subdev);
+ struct vsp1_dl_body *dlb;
+ unsigned long flags;
+ u32 ctrl = VI6_CLU_CTRL_AAI | VI6_CLU_CTRL_MVS | VI6_CLU_CTRL_EN;
+
+ /* The format can't be changed during streaming, only verify it at
+ * stream start and store the information internally for future partial
+ * reconfiguration calls.
+ */
+ if (full) {
+ struct v4l2_mbus_framefmt *format;
+
+ format = vsp1_entity_get_pad_format(&clu->entity,
+ clu->entity.config,
+ CLU_PAD_SINK);
+ clu->yuv_mode = format->code == MEDIA_BUS_FMT_AYUV8_1X32;
+ return;
+ }
+
+ /* 2D mode can only be used with the YCbCr pixel encoding. */
+ if (clu->mode == V4L2_CID_VSP1_CLU_MODE_2D && clu->yuv_mode)
+ ctrl |= VI6_CLU_CTRL_AX1I_2D | VI6_CLU_CTRL_AX2I_2D
+ | VI6_CLU_CTRL_OS0_2D | VI6_CLU_CTRL_OS1_2D
+ | VI6_CLU_CTRL_OS2_2D | VI6_CLU_CTRL_M2D;
+
+ vsp1_clu_write(clu, dl, VI6_CLU_CTRL, ctrl);
+
+ spin_lock_irqsave(&clu->lock, flags);
+ dlb = clu->clu;
+ clu->clu = NULL;
+ spin_unlock_irqrestore(&clu->lock, flags);
+
+ if (dlb)
+ vsp1_dl_list_add_fragment(dl, dlb);
+}
+
+static const struct vsp1_entity_operations clu_entity_ops = {
+ .configure = clu_configure,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+struct vsp1_clu *vsp1_clu_create(struct vsp1_device *vsp1)
+{
+ struct vsp1_clu *clu;
+ int ret;
+
+ clu = devm_kzalloc(vsp1->dev, sizeof(*clu), GFP_KERNEL);
+ if (clu == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&clu->lock);
+
+ clu->entity.ops = &clu_entity_ops;
+ clu->entity.type = VSP1_ENTITY_CLU;
+
+ ret = vsp1_entity_init(vsp1, &clu->entity, "clu", 2, &clu_ops,
+ MEDIA_ENT_F_PROC_VIDEO_LUT);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ /* Initialize the control handler. */
+ v4l2_ctrl_handler_init(&clu->ctrls, 2);
+ v4l2_ctrl_new_custom(&clu->ctrls, &clu_table_control, NULL);
+ v4l2_ctrl_new_custom(&clu->ctrls, &clu_mode_control, NULL);
+
+ clu->entity.subdev.ctrl_handler = &clu->ctrls;
+
+ if (clu->ctrls.error) {
+ dev_err(vsp1->dev, "clu: failed to initialize controls\n");
+ ret = clu->ctrls.error;
+ vsp1_entity_destroy(&clu->entity);
+ return ERR_PTR(ret);
+ }
+
+ v4l2_ctrl_handler_setup(&clu->ctrls);
+
+ return clu;
+}
diff --git a/drivers/media/platform/vsp1/vsp1_clu.h b/drivers/media/platform/vsp1/vsp1_clu.h
new file mode 100644
index 000000000..036e0a2f1
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_clu.h
@@ -0,0 +1,48 @@
+/*
+ * vsp1_clu.h -- R-Car VSP1 Cubic Look-Up Table
+ *
+ * Copyright (C) 2015 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef __VSP1_CLU_H__
+#define __VSP1_CLU_H__
+
+#include <linux/spinlock.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1_entity.h"
+
+struct vsp1_device;
+struct vsp1_dl_body;
+
+#define CLU_PAD_SINK 0
+#define CLU_PAD_SOURCE 1
+
+struct vsp1_clu {
+ struct vsp1_entity entity;
+
+ struct v4l2_ctrl_handler ctrls;
+
+ bool yuv_mode;
+ spinlock_t lock;
+ unsigned int mode;
+ struct vsp1_dl_body *clu;
+};
+
+static inline struct vsp1_clu *to_clu(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct vsp1_clu, entity.subdev);
+}
+
+struct vsp1_clu *vsp1_clu_create(struct vsp1_device *vsp1);
+
+#endif /* __VSP1_CLU_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_dl.c b/drivers/media/platform/vsp1/vsp1_dl.c
index e238d9b93..37c3518aa 100644
--- a/drivers/media/platform/vsp1/vsp1_dl.c
+++ b/drivers/media/platform/vsp1/vsp1_dl.c
@@ -15,6 +15,7 @@
#include <linux/dma-mapping.h>
#include <linux/gfp.h>
#include <linux/slab.h>
+#include <linux/workqueue.h>
#include "vsp1.h"
#include "vsp1_dl.h"
@@ -92,11 +93,13 @@ enum vsp1_dl_mode {
* @index: index of the related WPF
* @mode: display list operation mode (header or headerless)
* @vsp1: the VSP1 device
- * @lock: protects the active, queued and pending lists
+ * @lock: protects the free, active, queued, pending and gc_fragments lists
* @free: array of all free display lists
* @active: list currently being processed (loaded) by hardware
* @queued: list queued to the hardware (written to the DL registers)
* @pending: list waiting to be queued to the hardware
+ * @gc_work: fragments garbage collector work struct
+ * @gc_fragments: array of display list fragments waiting to be freed
*/
struct vsp1_dl_manager {
unsigned int index;
@@ -108,6 +111,9 @@ struct vsp1_dl_manager {
struct vsp1_dl_list *active;
struct vsp1_dl_list *queued;
struct vsp1_dl_list *pending;
+
+ struct work_struct gc_work;
+ struct list_head gc_fragments;
};
/* -----------------------------------------------------------------------------
@@ -262,21 +268,10 @@ static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm)
return dl;
}
-static void vsp1_dl_list_free_fragments(struct vsp1_dl_list *dl)
-{
- struct vsp1_dl_body *dlb, *next;
-
- list_for_each_entry_safe(dlb, next, &dl->fragments, list) {
- list_del(&dlb->list);
- vsp1_dl_body_cleanup(dlb);
- kfree(dlb);
- }
-}
-
static void vsp1_dl_list_free(struct vsp1_dl_list *dl)
{
vsp1_dl_body_cleanup(&dl->body0);
- vsp1_dl_list_free_fragments(dl);
+ list_splice_init(&dl->fragments, &dl->dlm->gc_fragments);
kfree(dl);
}
@@ -311,7 +306,16 @@ static void __vsp1_dl_list_put(struct vsp1_dl_list *dl)
if (!dl)
return;
- vsp1_dl_list_free_fragments(dl);
+ /* We can't free fragments here as DMA memory can only be freed in
+ * interruptible context. Move all fragments to the display list
+ * manager's list of fragments to be freed, they will be
+ * garbage-collected by the work queue.
+ */
+ if (!list_empty(&dl->fragments)) {
+ list_splice_init(&dl->fragments, &dl->dlm->gc_fragments);
+ schedule_work(&dl->dlm->gc_work);
+ }
+
dl->body0.num_entries = 0;
list_add_tail(&dl->list, &dl->dlm->free);
@@ -550,6 +554,40 @@ void vsp1_dlm_reset(struct vsp1_dl_manager *dlm)
dlm->pending = NULL;
}
+/*
+ * Free all fragments awaiting to be garbage-collected.
+ *
+ * This function must be called without the display list manager lock held.
+ */
+static void vsp1_dlm_fragments_free(struct vsp1_dl_manager *dlm)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dlm->lock, flags);
+
+ while (!list_empty(&dlm->gc_fragments)) {
+ struct vsp1_dl_body *dlb;
+
+ dlb = list_first_entry(&dlm->gc_fragments, struct vsp1_dl_body,
+ list);
+ list_del(&dlb->list);
+
+ spin_unlock_irqrestore(&dlm->lock, flags);
+ vsp1_dl_fragment_free(dlb);
+ spin_lock_irqsave(&dlm->lock, flags);
+ }
+
+ spin_unlock_irqrestore(&dlm->lock, flags);
+}
+
+static void vsp1_dlm_garbage_collect(struct work_struct *work)
+{
+ struct vsp1_dl_manager *dlm =
+ container_of(work, struct vsp1_dl_manager, gc_work);
+
+ vsp1_dlm_fragments_free(dlm);
+}
+
struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1,
unsigned int index,
unsigned int prealloc)
@@ -568,6 +606,8 @@ struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1,
spin_lock_init(&dlm->lock);
INIT_LIST_HEAD(&dlm->free);
+ INIT_LIST_HEAD(&dlm->gc_fragments);
+ INIT_WORK(&dlm->gc_work, vsp1_dlm_garbage_collect);
for (i = 0; i < prealloc; ++i) {
struct vsp1_dl_list *dl;
@@ -589,8 +629,12 @@ void vsp1_dlm_destroy(struct vsp1_dl_manager *dlm)
if (!dlm)
return;
+ cancel_work_sync(&dlm->gc_work);
+
list_for_each_entry_safe(dl, next, &dlm->free, list) {
list_del(&dl->list);
vsp1_dl_list_free(dl);
}
+
+ vsp1_dlm_fragments_free(dlm);
}
diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c
index fc4bbc401..fe9665e57 100644
--- a/drivers/media/platform/vsp1/vsp1_drm.c
+++ b/drivers/media/platform/vsp1/vsp1_drm.c
@@ -230,42 +230,33 @@ EXPORT_SYMBOL_GPL(vsp1_du_atomic_begin);
* vsp1_du_atomic_update - Setup one RPF input of the VSP pipeline
* @dev: the VSP device
* @rpf_index: index of the RPF to setup (0-based)
- * @pixelformat: V4L2 pixel format for the RPF memory input
- * @pitch: number of bytes per line in the image stored in memory
- * @mem: DMA addresses of the memory buffers (one per plane)
- * @src: the source crop rectangle for the RPF
- * @dst: the destination compose rectangle for the BRU input
- * @alpha: global alpha value for the input
- * @zpos: the Z-order position of the input
+ * @cfg: the RPF configuration
*
- * Configure the VSP to perform composition of the image referenced by @mem
- * through RPF @rpf_index, using the @src crop rectangle and the @dst
+ * Configure the VSP to perform image composition through RPF @rpf_index as
+ * described by the @cfg configuration. The image to compose is referenced by
+ * @cfg.mem and composed using the @cfg.src crop rectangle and the @cfg.dst
* composition rectangle. The Z-order is configurable with higher @zpos values
* displayed on top.
*
- * Image format as stored in memory is expressed as a V4L2 @pixelformat value.
- * As a special case, setting the pixel format to 0 will disable the RPF. The
- * @pitch, @mem, @src and @dst parameters are ignored in that case. Calling the
+ * If the @cfg configuration is NULL, the RPF will be disabled. Calling the
* function on a disabled RPF is allowed.
*
- * The memory pitch is configurable to allow for padding at end of lines, or
- * simple for images that extend beyond the crop rectangle boundaries. The
- * @pitch value is expressed in bytes and applies to all planes for multiplanar
- * formats.
+ * Image format as stored in memory is expressed as a V4L2 @cfg.pixelformat
+ * value. The memory pitch is configurable to allow for padding at end of lines,
+ * or simply for images that extend beyond the crop rectangle boundaries. The
+ * @cfg.pitch value is expressed in bytes and applies to all planes for
+ * multiplanar formats.
*
* The source memory buffer is referenced by the DMA address of its planes in
- * the @mem array. Up to two planes are supported. The second plane DMA address
- * is ignored for formats using a single plane.
+ * the @cfg.mem array. Up to two planes are supported. The second plane DMA
+ * address is ignored for formats using a single plane.
*
* This function isn't reentrant, the caller needs to serialize calls.
*
* Return 0 on success or a negative error code on failure.
*/
-int vsp1_du_atomic_update_ext(struct device *dev, unsigned int rpf_index,
- u32 pixelformat, unsigned int pitch,
- dma_addr_t mem[2], const struct v4l2_rect *src,
- const struct v4l2_rect *dst, unsigned int alpha,
- unsigned int zpos)
+int vsp1_du_atomic_update(struct device *dev, unsigned int rpf_index,
+ const struct vsp1_du_atomic_config *cfg)
{
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
const struct vsp1_format_info *fmtinfo;
@@ -276,7 +267,7 @@ int vsp1_du_atomic_update_ext(struct device *dev, unsigned int rpf_index,
rpf = vsp1->rpf[rpf_index];
- if (pixelformat == 0) {
+ if (!cfg) {
dev_dbg(vsp1->dev, "%s: RPF%u: disable requested\n", __func__,
rpf_index);
@@ -287,38 +278,39 @@ int vsp1_du_atomic_update_ext(struct device *dev, unsigned int rpf_index,
dev_dbg(vsp1->dev,
"%s: RPF%u: (%u,%u)/%ux%u -> (%u,%u)/%ux%u (%08x), pitch %u dma { %pad, %pad } zpos %u\n",
__func__, rpf_index,
- src->left, src->top, src->width, src->height,
- dst->left, dst->top, dst->width, dst->height,
- pixelformat, pitch, &mem[0], &mem[1], zpos);
+ cfg->src.left, cfg->src.top, cfg->src.width, cfg->src.height,
+ cfg->dst.left, cfg->dst.top, cfg->dst.width, cfg->dst.height,
+ cfg->pixelformat, cfg->pitch, &cfg->mem[0], &cfg->mem[1],
+ cfg->zpos);
/* Store the format, stride, memory buffer address, crop and compose
* rectangles and Z-order position and for the input.
*/
- fmtinfo = vsp1_get_format_info(pixelformat);
+ fmtinfo = vsp1_get_format_info(cfg->pixelformat);
if (!fmtinfo) {
dev_dbg(vsp1->dev, "Unsupport pixel format %08x for RPF\n",
- pixelformat);
+ cfg->pixelformat);
return -EINVAL;
}
rpf->fmtinfo = fmtinfo;
rpf->format.num_planes = fmtinfo->planes;
- rpf->format.plane_fmt[0].bytesperline = pitch;
- rpf->format.plane_fmt[1].bytesperline = pitch;
- rpf->alpha = alpha;
+ rpf->format.plane_fmt[0].bytesperline = cfg->pitch;
+ rpf->format.plane_fmt[1].bytesperline = cfg->pitch;
+ rpf->alpha = cfg->alpha;
- rpf->mem.addr[0] = mem[0];
- rpf->mem.addr[1] = mem[1];
+ rpf->mem.addr[0] = cfg->mem[0];
+ rpf->mem.addr[1] = cfg->mem[1];
rpf->mem.addr[2] = 0;
- vsp1->drm->inputs[rpf_index].crop = *src;
- vsp1->drm->inputs[rpf_index].compose = *dst;
- vsp1->drm->inputs[rpf_index].zpos = zpos;
+ vsp1->drm->inputs[rpf_index].crop = cfg->src;
+ vsp1->drm->inputs[rpf_index].compose = cfg->dst;
+ vsp1->drm->inputs[rpf_index].zpos = cfg->zpos;
vsp1->drm->inputs[rpf_index].enabled = true;
return 0;
}
-EXPORT_SYMBOL_GPL(vsp1_du_atomic_update_ext);
+EXPORT_SYMBOL_GPL(vsp1_du_atomic_update);
static int vsp1_du_setup_rpf_pipe(struct vsp1_device *vsp1,
struct vsp1_rwpf *rpf, unsigned int bru_input)
@@ -499,8 +491,10 @@ void vsp1_du_atomic_flush(struct device *dev)
vsp1_entity_route_setup(entity, pipe->dl);
- if (entity->ops->configure)
- entity->ops->configure(entity, pipe, pipe->dl);
+ if (entity->ops->configure) {
+ entity->ops->configure(entity, pipe, pipe->dl, true);
+ entity->ops->configure(entity, pipe, pipe->dl, false);
+ }
/* The memory buffer address must be applied after configuring
* the RPF to make sure the crop offset are computed.
diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
index e2d779fac..cc316d281 100644
--- a/drivers/media/platform/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/vsp1/vsp1_drv.c
@@ -19,12 +19,15 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/videodev2.h>
+#include <media/rcar-fcp.h>
#include <media/v4l2-subdev.h>
#include "vsp1.h"
#include "vsp1_bru.h"
+#include "vsp1_clu.h"
#include "vsp1_dl.h"
#include "vsp1_drm.h"
#include "vsp1_hsit.h"
@@ -145,7 +148,7 @@ static int vsp1_uapi_create_links(struct vsp1_device *vsp1)
return ret;
}
- if (vsp1->info->features & VSP1_HAS_LIF) {
+ if (vsp1->lif) {
ret = media_create_pad_link(&vsp1->wpf[0]->entity.subdev.entity,
RWPF_PAD_SOURCE,
&vsp1->lif->entity.subdev.entity,
@@ -168,19 +171,15 @@ static int vsp1_uapi_create_links(struct vsp1_device *vsp1)
for (i = 0; i < vsp1->info->wpf_count; ++i) {
/* Connect the video device to the WPF. All connections are
- * immutable except for the WPF0 source link if a LIF is
- * present.
+ * immutable.
*/
struct vsp1_rwpf *wpf = vsp1->wpf[i];
- unsigned int flags = MEDIA_LNK_FL_ENABLED;
-
- if (!(vsp1->info->features & VSP1_HAS_LIF) || i != 0)
- flags |= MEDIA_LNK_FL_IMMUTABLE;
ret = media_create_pad_link(&wpf->entity.subdev.entity,
RWPF_PAD_SOURCE,
&wpf->video->video.entity, 0,
- flags);
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
if (ret < 0)
return ret;
}
@@ -204,7 +203,8 @@ static void vsp1_destroy_entities(struct vsp1_device *vsp1)
}
v4l2_device_unregister(&vsp1->v4l2_dev);
- media_device_unregister(&vsp1->media_dev);
+ if (vsp1->info->uapi)
+ media_device_unregister(&vsp1->media_dev);
media_device_cleanup(&vsp1->media_dev);
if (!vsp1->info->uapi)
@@ -252,6 +252,16 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
list_add_tail(&vsp1->bru->entity.list_dev, &vsp1->entities);
}
+ if (vsp1->info->features & VSP1_HAS_CLU) {
+ vsp1->clu = vsp1_clu_create(vsp1);
+ if (IS_ERR(vsp1->clu)) {
+ ret = PTR_ERR(vsp1->clu);
+ goto done;
+ }
+
+ list_add_tail(&vsp1->clu->entity.list_dev, &vsp1->entities);
+ }
+
vsp1->hsi = vsp1_hsit_create(vsp1, true);
if (IS_ERR(vsp1->hsi)) {
ret = PTR_ERR(vsp1->hsi);
@@ -268,7 +278,11 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
list_add_tail(&vsp1->hst->entity.list_dev, &vsp1->entities);
- if (vsp1->info->features & VSP1_HAS_LIF) {
+ /* The LIF is only supported when used in conjunction with the DU, in
+ * which case the userspace API is disabled. If the userspace API is
+ * enabled skip the LIF, even when present.
+ */
+ if (vsp1->info->features & VSP1_HAS_LIF && !vsp1->info->uapi) {
vsp1->lif = vsp1_lif_create(vsp1);
if (IS_ERR(vsp1->lif)) {
ret = PTR_ERR(vsp1->lif);
@@ -379,14 +393,15 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
/* Register subdev nodes if the userspace API is enabled or initialize
* the DRM pipeline otherwise.
*/
- if (vsp1->info->uapi)
+ if (vsp1->info->uapi) {
ret = v4l2_device_register_subdev_nodes(&vsp1->v4l2_dev);
- else
- ret = vsp1_drm_init(vsp1);
- if (ret < 0)
- goto done;
+ if (ret < 0)
+ goto done;
- ret = media_device_register(mdev);
+ ret = media_device_register(mdev);
+ } else {
+ ret = vsp1_drm_init(vsp1);
+ }
done:
if (ret < 0)
@@ -462,35 +477,16 @@ static int vsp1_device_init(struct vsp1_device *vsp1)
/*
* vsp1_device_get - Acquire the VSP1 device
*
- * Increment the VSP1 reference count and initialize the device if the first
- * reference is taken.
+ * Make sure the device is not suspended and initialize it if needed.
*
* Return 0 on success or a negative error code otherwise.
*/
int vsp1_device_get(struct vsp1_device *vsp1)
{
- int ret = 0;
-
- mutex_lock(&vsp1->lock);
- if (vsp1->ref_count > 0)
- goto done;
-
- ret = clk_prepare_enable(vsp1->clock);
- if (ret < 0)
- goto done;
-
- ret = vsp1_device_init(vsp1);
- if (ret < 0) {
- clk_disable_unprepare(vsp1->clock);
- goto done;
- }
-
-done:
- if (!ret)
- vsp1->ref_count++;
+ int ret;
- mutex_unlock(&vsp1->lock);
- return ret;
+ ret = pm_runtime_get_sync(vsp1->dev);
+ return ret < 0 ? ret : 0;
}
/*
@@ -501,54 +497,59 @@ done:
*/
void vsp1_device_put(struct vsp1_device *vsp1)
{
- mutex_lock(&vsp1->lock);
-
- if (--vsp1->ref_count == 0)
- clk_disable_unprepare(vsp1->clock);
-
- mutex_unlock(&vsp1->lock);
+ pm_runtime_put_sync(vsp1->dev);
}
/* -----------------------------------------------------------------------------
* Power Management
*/
-#ifdef CONFIG_PM_SLEEP
-static int vsp1_pm_suspend(struct device *dev)
+static int __maybe_unused vsp1_pm_suspend(struct device *dev)
{
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
- WARN_ON(mutex_is_locked(&vsp1->lock));
+ vsp1_pipelines_suspend(vsp1);
+ pm_runtime_force_suspend(vsp1->dev);
- if (vsp1->ref_count == 0)
- return 0;
+ return 0;
+}
- vsp1_pipelines_suspend(vsp1);
+static int __maybe_unused vsp1_pm_resume(struct device *dev)
+{
+ struct vsp1_device *vsp1 = dev_get_drvdata(dev);
- clk_disable_unprepare(vsp1->clock);
+ pm_runtime_force_resume(vsp1->dev);
+ vsp1_pipelines_resume(vsp1);
return 0;
}
-static int vsp1_pm_resume(struct device *dev)
+static int __maybe_unused vsp1_pm_runtime_suspend(struct device *dev)
{
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
- WARN_ON(mutex_is_locked(&vsp1->lock));
+ rcar_fcp_disable(vsp1->fcp);
- if (vsp1->ref_count == 0)
- return 0;
+ return 0;
+}
- clk_prepare_enable(vsp1->clock);
+static int __maybe_unused vsp1_pm_runtime_resume(struct device *dev)
+{
+ struct vsp1_device *vsp1 = dev_get_drvdata(dev);
+ int ret;
- vsp1_pipelines_resume(vsp1);
+ if (vsp1->info) {
+ ret = vsp1_device_init(vsp1);
+ if (ret < 0)
+ return ret;
+ }
- return 0;
+ return rcar_fcp_enable(vsp1->fcp);
}
-#endif
static const struct dev_pm_ops vsp1_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(vsp1_pm_suspend, vsp1_pm_resume)
+ SET_RUNTIME_PM_OPS(vsp1_pm_runtime_suspend, vsp1_pm_runtime_resume, NULL)
};
/* -----------------------------------------------------------------------------
@@ -559,7 +560,8 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
{
.version = VI6_IP_VERSION_MODEL_VSPS_H2,
.gen = 2,
- .features = VSP1_HAS_BRU | VSP1_HAS_LUT | VSP1_HAS_SRU,
+ .features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_LUT
+ | VSP1_HAS_SRU | VSP1_HAS_WPF_VFLIP,
.rpf_count = 5,
.uds_count = 3,
.wpf_count = 4,
@@ -568,9 +570,9 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
}, {
.version = VI6_IP_VERSION_MODEL_VSPR_H2,
.gen = 2,
- .features = VSP1_HAS_BRU | VSP1_HAS_SRU,
+ .features = VSP1_HAS_BRU | VSP1_HAS_SRU | VSP1_HAS_WPF_VFLIP,
.rpf_count = 5,
- .uds_count = 1,
+ .uds_count = 3,
.wpf_count = 4,
.num_bru_inputs = 4,
.uapi = true,
@@ -580,22 +582,24 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.features = VSP1_HAS_BRU | VSP1_HAS_LIF | VSP1_HAS_LUT,
.rpf_count = 4,
.uds_count = 1,
- .wpf_count = 4,
+ .wpf_count = 1,
.num_bru_inputs = 4,
.uapi = true,
}, {
.version = VI6_IP_VERSION_MODEL_VSPS_M2,
.gen = 2,
- .features = VSP1_HAS_BRU | VSP1_HAS_LUT | VSP1_HAS_SRU,
+ .features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_LUT
+ | VSP1_HAS_SRU | VSP1_HAS_WPF_VFLIP,
.rpf_count = 5,
- .uds_count = 3,
+ .uds_count = 1,
.wpf_count = 4,
.num_bru_inputs = 4,
.uapi = true,
}, {
.version = VI6_IP_VERSION_MODEL_VSPI_GEN3,
.gen = 3,
- .features = VSP1_HAS_LUT | VSP1_HAS_SRU,
+ .features = VSP1_HAS_CLU | VSP1_HAS_LUT | VSP1_HAS_SRU
+ | VSP1_HAS_WPF_HFLIP | VSP1_HAS_WPF_VFLIP,
.rpf_count = 1,
.uds_count = 1,
.wpf_count = 1,
@@ -603,7 +607,7 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
}, {
.version = VI6_IP_VERSION_MODEL_VSPBD_GEN3,
.gen = 3,
- .features = VSP1_HAS_BRU,
+ .features = VSP1_HAS_BRU | VSP1_HAS_WPF_VFLIP,
.rpf_count = 5,
.wpf_count = 1,
.num_bru_inputs = 5,
@@ -611,7 +615,8 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
}, {
.version = VI6_IP_VERSION_MODEL_VSPBC_GEN3,
.gen = 3,
- .features = VSP1_HAS_BRU | VSP1_HAS_LUT,
+ .features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_LUT
+ | VSP1_HAS_WPF_VFLIP,
.rpf_count = 5,
.wpf_count = 1,
.num_bru_inputs = 5,
@@ -619,7 +624,7 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
}, {
.version = VI6_IP_VERSION_MODEL_VSPD_GEN3,
.gen = 3,
- .features = VSP1_HAS_BRU | VSP1_HAS_LIF,
+ .features = VSP1_HAS_BRU | VSP1_HAS_LIF | VSP1_HAS_WPF_VFLIP,
.rpf_count = 5,
.wpf_count = 2,
.num_bru_inputs = 5,
@@ -629,6 +634,7 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
static int vsp1_probe(struct platform_device *pdev)
{
struct vsp1_device *vsp1;
+ struct device_node *fcp_node;
struct resource *irq;
struct resource *io;
unsigned int i;
@@ -640,22 +646,17 @@ static int vsp1_probe(struct platform_device *pdev)
return -ENOMEM;
vsp1->dev = &pdev->dev;
- mutex_init(&vsp1->lock);
INIT_LIST_HEAD(&vsp1->entities);
INIT_LIST_HEAD(&vsp1->videos);
- /* I/O, IRQ and clock resources */
+ platform_set_drvdata(pdev, vsp1);
+
+ /* I/O and IRQ resources (clock managed by the clock PM domain) */
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
vsp1->mmio = devm_ioremap_resource(&pdev->dev, io);
if (IS_ERR(vsp1->mmio))
return PTR_ERR(vsp1->mmio);
- vsp1->clock = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(vsp1->clock)) {
- dev_err(&pdev->dev, "failed to get clock\n");
- return PTR_ERR(vsp1->clock);
- }
-
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!irq) {
dev_err(&pdev->dev, "missing IRQ\n");
@@ -669,13 +670,27 @@ static int vsp1_probe(struct platform_device *pdev)
return ret;
}
+ /* FCP (optional) */
+ fcp_node = of_parse_phandle(pdev->dev.of_node, "renesas,fcp", 0);
+ if (fcp_node) {
+ vsp1->fcp = rcar_fcp_get(fcp_node);
+ of_node_put(fcp_node);
+ if (IS_ERR(vsp1->fcp)) {
+ dev_dbg(&pdev->dev, "FCP not found (%ld)\n",
+ PTR_ERR(vsp1->fcp));
+ return PTR_ERR(vsp1->fcp);
+ }
+ }
+
/* Configure device parameters based on the version register. */
- ret = clk_prepare_enable(vsp1->clock);
+ pm_runtime_enable(&pdev->dev);
+
+ ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0)
- return ret;
+ goto done;
version = vsp1_read(vsp1, VI6_IP_VERSION);
- clk_disable_unprepare(vsp1->clock);
+ pm_runtime_put_sync(&pdev->dev);
for (i = 0; i < ARRAY_SIZE(vsp1_device_infos); ++i) {
if ((version & VI6_IP_VERSION_MODEL_MASK) ==
@@ -687,7 +702,8 @@ static int vsp1_probe(struct platform_device *pdev)
if (!vsp1->info) {
dev_err(&pdev->dev, "unsupported IP version 0x%08x\n", version);
- return -ENXIO;
+ ret = -ENXIO;
+ goto done;
}
dev_dbg(&pdev->dev, "IP version 0x%08x\n", version);
@@ -696,12 +712,14 @@ static int vsp1_probe(struct platform_device *pdev)
ret = vsp1_create_entities(vsp1);
if (ret < 0) {
dev_err(&pdev->dev, "failed to create entities\n");
- return ret;
+ goto done;
}
- platform_set_drvdata(pdev, vsp1);
+done:
+ if (ret)
+ pm_runtime_disable(&pdev->dev);
- return 0;
+ return ret;
}
static int vsp1_remove(struct platform_device *pdev)
@@ -709,6 +727,9 @@ static int vsp1_remove(struct platform_device *pdev)
struct vsp1_device *vsp1 = platform_get_drvdata(pdev);
vsp1_destroy_entities(vsp1);
+ rcar_fcp_put(vsp1->fcp);
+
+ pm_runtime_disable(&pdev->dev);
return 0;
}
diff --git a/drivers/media/platform/vsp1/vsp1_entity.c b/drivers/media/platform/vsp1/vsp1_entity.c
index 3d070bcc6..4cf6cc719 100644
--- a/drivers/media/platform/vsp1/vsp1_entity.c
+++ b/drivers/media/platform/vsp1/vsp1_entity.c
@@ -22,6 +22,12 @@
#include "vsp1_dl.h"
#include "vsp1_entity.h"
+static inline struct vsp1_entity *
+media_entity_to_vsp1_entity(struct media_entity *entity)
+{
+ return container_of(entity, struct vsp1_entity, subdev.entity);
+}
+
void vsp1_entity_route_setup(struct vsp1_entity *source,
struct vsp1_dl_list *dl)
{
@@ -30,7 +36,7 @@ void vsp1_entity_route_setup(struct vsp1_entity *source,
if (source->route->reg == 0)
return;
- sink = container_of(source->sink, struct vsp1_entity, subdev.entity);
+ sink = media_entity_to_vsp1_entity(source->sink);
vsp1_dl_list_write(dl, source->route->reg,
sink->route->inputs[source->sink_pad]);
}
@@ -81,12 +87,30 @@ vsp1_entity_get_pad_format(struct vsp1_entity *entity,
return v4l2_subdev_get_try_format(&entity->subdev, cfg, pad);
}
+/**
+ * vsp1_entity_get_pad_selection - Get a pad selection from storage for entity
+ * @entity: the entity
+ * @cfg: the configuration storage
+ * @pad: the pad number
+ * @target: the selection target
+ *
+ * Return the selection rectangle stored in the given configuration for an
+ * entity's pad. The configuration can be an ACTIVE or TRY configuration. The
+ * selection target can be COMPOSE or CROP.
+ */
struct v4l2_rect *
-vsp1_entity_get_pad_compose(struct vsp1_entity *entity,
- struct v4l2_subdev_pad_config *cfg,
- unsigned int pad)
+vsp1_entity_get_pad_selection(struct vsp1_entity *entity,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, unsigned int target)
{
- return v4l2_subdev_get_try_compose(&entity->subdev, cfg, pad);
+ switch (target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ return v4l2_subdev_get_try_compose(&entity->subdev, cfg, pad);
+ case V4L2_SEL_TGT_CROP:
+ return v4l2_subdev_get_try_crop(&entity->subdev, cfg, pad);
+ default:
+ return NULL;
+ }
}
/*
@@ -252,7 +276,7 @@ int vsp1_entity_link_setup(struct media_entity *entity,
if (!(local->flags & MEDIA_PAD_FL_SOURCE))
return 0;
- source = container_of(local->entity, struct vsp1_entity, subdev.entity);
+ source = media_entity_to_vsp1_entity(local->entity);
if (!source->route)
return 0;
@@ -274,33 +298,50 @@ int vsp1_entity_link_setup(struct media_entity *entity,
* Initialization
*/
+#define VSP1_ENTITY_ROUTE(ent) \
+ { VSP1_ENTITY_##ent, 0, VI6_DPR_##ent##_ROUTE, \
+ { VI6_DPR_NODE_##ent }, VI6_DPR_NODE_##ent }
+
+#define VSP1_ENTITY_ROUTE_RPF(idx) \
+ { VSP1_ENTITY_RPF, idx, VI6_DPR_RPF_ROUTE(idx), \
+ { 0, }, VI6_DPR_NODE_RPF(idx) }
+
+#define VSP1_ENTITY_ROUTE_UDS(idx) \
+ { VSP1_ENTITY_UDS, idx, VI6_DPR_UDS_ROUTE(idx), \
+ { VI6_DPR_NODE_UDS(idx) }, VI6_DPR_NODE_UDS(idx) }
+
+#define VSP1_ENTITY_ROUTE_WPF(idx) \
+ { VSP1_ENTITY_WPF, idx, 0, \
+ { VI6_DPR_NODE_WPF(idx) }, VI6_DPR_NODE_WPF(idx) }
+
static const struct vsp1_route vsp1_routes[] = {
{ VSP1_ENTITY_BRU, 0, VI6_DPR_BRU_ROUTE,
{ VI6_DPR_NODE_BRU_IN(0), VI6_DPR_NODE_BRU_IN(1),
VI6_DPR_NODE_BRU_IN(2), VI6_DPR_NODE_BRU_IN(3),
- VI6_DPR_NODE_BRU_IN(4) } },
- { VSP1_ENTITY_HSI, 0, VI6_DPR_HSI_ROUTE, { VI6_DPR_NODE_HSI, } },
- { VSP1_ENTITY_HST, 0, VI6_DPR_HST_ROUTE, { VI6_DPR_NODE_HST, } },
- { VSP1_ENTITY_LIF, 0, 0, { VI6_DPR_NODE_LIF, } },
- { VSP1_ENTITY_LUT, 0, VI6_DPR_LUT_ROUTE, { VI6_DPR_NODE_LUT, } },
- { VSP1_ENTITY_RPF, 0, VI6_DPR_RPF_ROUTE(0), { 0, } },
- { VSP1_ENTITY_RPF, 1, VI6_DPR_RPF_ROUTE(1), { 0, } },
- { VSP1_ENTITY_RPF, 2, VI6_DPR_RPF_ROUTE(2), { 0, } },
- { VSP1_ENTITY_RPF, 3, VI6_DPR_RPF_ROUTE(3), { 0, } },
- { VSP1_ENTITY_RPF, 4, VI6_DPR_RPF_ROUTE(4), { 0, } },
- { VSP1_ENTITY_SRU, 0, VI6_DPR_SRU_ROUTE, { VI6_DPR_NODE_SRU, } },
- { VSP1_ENTITY_UDS, 0, VI6_DPR_UDS_ROUTE(0), { VI6_DPR_NODE_UDS(0), } },
- { VSP1_ENTITY_UDS, 1, VI6_DPR_UDS_ROUTE(1), { VI6_DPR_NODE_UDS(1), } },
- { VSP1_ENTITY_UDS, 2, VI6_DPR_UDS_ROUTE(2), { VI6_DPR_NODE_UDS(2), } },
- { VSP1_ENTITY_WPF, 0, 0, { VI6_DPR_NODE_WPF(0), } },
- { VSP1_ENTITY_WPF, 1, 0, { VI6_DPR_NODE_WPF(1), } },
- { VSP1_ENTITY_WPF, 2, 0, { VI6_DPR_NODE_WPF(2), } },
- { VSP1_ENTITY_WPF, 3, 0, { VI6_DPR_NODE_WPF(3), } },
+ VI6_DPR_NODE_BRU_IN(4) }, VI6_DPR_NODE_BRU_OUT },
+ VSP1_ENTITY_ROUTE(CLU),
+ VSP1_ENTITY_ROUTE(HSI),
+ VSP1_ENTITY_ROUTE(HST),
+ { VSP1_ENTITY_LIF, 0, 0, { VI6_DPR_NODE_LIF, }, VI6_DPR_NODE_LIF },
+ VSP1_ENTITY_ROUTE(LUT),
+ VSP1_ENTITY_ROUTE_RPF(0),
+ VSP1_ENTITY_ROUTE_RPF(1),
+ VSP1_ENTITY_ROUTE_RPF(2),
+ VSP1_ENTITY_ROUTE_RPF(3),
+ VSP1_ENTITY_ROUTE_RPF(4),
+ VSP1_ENTITY_ROUTE(SRU),
+ VSP1_ENTITY_ROUTE_UDS(0),
+ VSP1_ENTITY_ROUTE_UDS(1),
+ VSP1_ENTITY_ROUTE_UDS(2),
+ VSP1_ENTITY_ROUTE_WPF(0),
+ VSP1_ENTITY_ROUTE_WPF(1),
+ VSP1_ENTITY_ROUTE_WPF(2),
+ VSP1_ENTITY_ROUTE_WPF(3),
};
int vsp1_entity_init(struct vsp1_device *vsp1, struct vsp1_entity *entity,
const char *name, unsigned int num_pads,
- const struct v4l2_subdev_ops *ops)
+ const struct v4l2_subdev_ops *ops, u32 function)
{
struct v4l2_subdev *subdev;
unsigned int i;
@@ -341,6 +382,7 @@ int vsp1_entity_init(struct vsp1_device *vsp1, struct vsp1_entity *entity,
subdev = &entity->subdev;
v4l2_subdev_init(subdev, ops);
+ subdev->entity.function = function;
subdev->entity.ops = &vsp1->media_ops;
subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
diff --git a/drivers/media/platform/vsp1/vsp1_entity.h b/drivers/media/platform/vsp1/vsp1_entity.h
index 69eff4e17..b43457fd2 100644
--- a/drivers/media/platform/vsp1/vsp1_entity.h
+++ b/drivers/media/platform/vsp1/vsp1_entity.h
@@ -24,6 +24,7 @@ struct vsp1_pipeline;
enum vsp1_entity_type {
VSP1_ENTITY_BRU,
+ VSP1_ENTITY_CLU,
VSP1_ENTITY_HSI,
VSP1_ENTITY_HST,
VSP1_ENTITY_LIF,
@@ -42,17 +43,21 @@ enum vsp1_entity_type {
* @index: Entity index this routing entry is associated with
* @reg: Output routing configuration register
* @inputs: Target node value for each input
+ * @output: Target node value for entity output
*
* Each $vsp1_route entry describes routing configuration for the entity
* specified by the entry's @type and @index. @reg indicates the register that
* holds output routing configuration for the entity, and the @inputs array
- * store the target node value for each input of the entity.
+ * store the target node value for each input of the entity. The @output field
+ * stores the target node value of the entity output when used as a source for
+ * histogram generation.
*/
struct vsp1_route {
enum vsp1_entity_type type;
unsigned int index;
unsigned int reg;
unsigned int inputs[VSP1_ENTITY_MAX_INPUTS];
+ unsigned int output;
};
/**
@@ -68,7 +73,7 @@ struct vsp1_entity_operations {
void (*destroy)(struct vsp1_entity *);
void (*set_memory)(struct vsp1_entity *, struct vsp1_dl_list *dl);
void (*configure)(struct vsp1_entity *, struct vsp1_pipeline *,
- struct vsp1_dl_list *);
+ struct vsp1_dl_list *, bool);
};
struct vsp1_entity {
@@ -100,7 +105,7 @@ static inline struct vsp1_entity *to_vsp1_entity(struct v4l2_subdev *subdev)
int vsp1_entity_init(struct vsp1_device *vsp1, struct vsp1_entity *entity,
const char *name, unsigned int num_pads,
- const struct v4l2_subdev_ops *ops);
+ const struct v4l2_subdev_ops *ops, u32 function);
void vsp1_entity_destroy(struct vsp1_entity *entity);
extern const struct v4l2_subdev_internal_ops vsp1_subdev_internal_ops;
@@ -118,9 +123,9 @@ vsp1_entity_get_pad_format(struct vsp1_entity *entity,
struct v4l2_subdev_pad_config *cfg,
unsigned int pad);
struct v4l2_rect *
-vsp1_entity_get_pad_compose(struct vsp1_entity *entity,
- struct v4l2_subdev_pad_config *cfg,
- unsigned int pad);
+vsp1_entity_get_pad_selection(struct vsp1_entity *entity,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, unsigned int target);
int vsp1_entity_init_cfg(struct v4l2_subdev *subdev,
struct v4l2_subdev_pad_config *cfg);
diff --git a/drivers/media/platform/vsp1/vsp1_hsit.c b/drivers/media/platform/vsp1/vsp1_hsit.c
index 68b8567b3..6e5077beb 100644
--- a/drivers/media/platform/vsp1/vsp1_hsit.c
+++ b/drivers/media/platform/vsp1/vsp1_hsit.c
@@ -107,7 +107,7 @@ static int hsit_set_format(struct v4l2_subdev *subdev,
return 0;
}
-static struct v4l2_subdev_pad_ops hsit_pad_ops = {
+static const struct v4l2_subdev_pad_ops hsit_pad_ops = {
.init_cfg = vsp1_entity_init_cfg,
.enum_mbus_code = hsit_enum_mbus_code,
.enum_frame_size = hsit_enum_frame_size,
@@ -115,7 +115,7 @@ static struct v4l2_subdev_pad_ops hsit_pad_ops = {
.set_fmt = hsit_set_format,
};
-static struct v4l2_subdev_ops hsit_ops = {
+static const struct v4l2_subdev_ops hsit_ops = {
.pad = &hsit_pad_ops,
};
@@ -125,10 +125,13 @@ static struct v4l2_subdev_ops hsit_ops = {
static void hsit_configure(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
- struct vsp1_dl_list *dl)
+ struct vsp1_dl_list *dl, bool full)
{
struct vsp1_hsit *hsit = to_hsit(&entity->subdev);
+ if (!full)
+ return;
+
if (hsit->inverse)
vsp1_hsit_write(hsit, dl, VI6_HSI_CTRL, VI6_HSI_CTRL_EN);
else
@@ -161,8 +164,9 @@ struct vsp1_hsit *vsp1_hsit_create(struct vsp1_device *vsp1, bool inverse)
else
hsit->entity.type = VSP1_ENTITY_HST;
- ret = vsp1_entity_init(vsp1, &hsit->entity, inverse ? "hsi" : "hst", 2,
- &hsit_ops);
+ ret = vsp1_entity_init(vsp1, &hsit->entity, inverse ? "hsi" : "hst",
+ 2, &hsit_ops,
+ MEDIA_ENT_F_PROC_VIDEO_PIXEL_ENC_CONV);
if (ret < 0)
return ERR_PTR(ret);
diff --git a/drivers/media/platform/vsp1/vsp1_lif.c b/drivers/media/platform/vsp1/vsp1_lif.c
index 0217393f2..a720063f3 100644
--- a/drivers/media/platform/vsp1/vsp1_lif.c
+++ b/drivers/media/platform/vsp1/vsp1_lif.c
@@ -104,7 +104,7 @@ static int lif_set_format(struct v4l2_subdev *subdev,
return 0;
}
-static struct v4l2_subdev_pad_ops lif_pad_ops = {
+static const struct v4l2_subdev_pad_ops lif_pad_ops = {
.init_cfg = vsp1_entity_init_cfg,
.enum_mbus_code = lif_enum_mbus_code,
.enum_frame_size = lif_enum_frame_size,
@@ -112,7 +112,7 @@ static struct v4l2_subdev_pad_ops lif_pad_ops = {
.set_fmt = lif_set_format,
};
-static struct v4l2_subdev_ops lif_ops = {
+static const struct v4l2_subdev_ops lif_ops = {
.pad = &lif_pad_ops,
};
@@ -122,7 +122,7 @@ static struct v4l2_subdev_ops lif_ops = {
static void lif_configure(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
- struct vsp1_dl_list *dl)
+ struct vsp1_dl_list *dl, bool full)
{
const struct v4l2_mbus_framefmt *format;
struct vsp1_lif *lif = to_lif(&entity->subdev);
@@ -130,6 +130,9 @@ static void lif_configure(struct vsp1_entity *entity,
unsigned int obth = 400;
unsigned int lbth = 200;
+ if (!full)
+ return;
+
format = vsp1_entity_get_pad_format(&lif->entity, lif->entity.config,
LIF_PAD_SOURCE);
@@ -165,7 +168,12 @@ struct vsp1_lif *vsp1_lif_create(struct vsp1_device *vsp1)
lif->entity.ops = &lif_entity_ops;
lif->entity.type = VSP1_ENTITY_LIF;
- ret = vsp1_entity_init(vsp1, &lif->entity, "lif", 2, &lif_ops);
+ /* The LIF is never exposed to userspace, but media entity registration
+ * requires a function to be set. Use PROC_VIDEO_PIXEL_FORMATTER just to
+ * avoid triggering a WARN_ON(), the value won't be seen anywhere.
+ */
+ ret = vsp1_entity_init(vsp1, &lif->entity, "lif", 2, &lif_ops,
+ MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER);
if (ret < 0)
return ERR_PTR(ret);
diff --git a/drivers/media/platform/vsp1/vsp1_lut.c b/drivers/media/platform/vsp1/vsp1_lut.c
index aa09e59f0..dc31de960 100644
--- a/drivers/media/platform/vsp1/vsp1_lut.c
+++ b/drivers/media/platform/vsp1/vsp1_lut.c
@@ -13,7 +13,6 @@
#include <linux/device.h>
#include <linux/gfp.h>
-#include <linux/vsp1.h>
#include <media/v4l2-subdev.h>
@@ -35,43 +34,62 @@ static inline void vsp1_lut_write(struct vsp1_lut *lut, struct vsp1_dl_list *dl,
}
/* -----------------------------------------------------------------------------
- * V4L2 Subdevice Core Operations
+ * Controls
*/
-static int lut_set_table(struct vsp1_lut *lut, struct vsp1_lut_config *config)
+#define V4L2_CID_VSP1_LUT_TABLE (V4L2_CID_USER_BASE | 0x1001)
+
+static int lut_set_table(struct vsp1_lut *lut, struct v4l2_ctrl *ctrl)
{
struct vsp1_dl_body *dlb;
unsigned int i;
- dlb = vsp1_dl_fragment_alloc(lut->entity.vsp1, ARRAY_SIZE(config->lut));
+ dlb = vsp1_dl_fragment_alloc(lut->entity.vsp1, 256);
if (!dlb)
return -ENOMEM;
- for (i = 0; i < ARRAY_SIZE(config->lut); ++i)
+ for (i = 0; i < 256; ++i)
vsp1_dl_fragment_write(dlb, VI6_LUT_TABLE + 4 * i,
- config->lut[i]);
+ ctrl->p_new.p_u32[i]);
- mutex_lock(&lut->lock);
+ spin_lock_irq(&lut->lock);
swap(lut->lut, dlb);
- mutex_unlock(&lut->lock);
+ spin_unlock_irq(&lut->lock);
vsp1_dl_fragment_free(dlb);
return 0;
}
-static long lut_ioctl(struct v4l2_subdev *subdev, unsigned int cmd, void *arg)
+static int lut_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct vsp1_lut *lut = to_lut(subdev);
-
- switch (cmd) {
- case VIDIOC_VSP1_LUT_CONFIG:
- return lut_set_table(lut, arg);
+ struct vsp1_lut *lut =
+ container_of(ctrl->handler, struct vsp1_lut, ctrls);
- default:
- return -ENOIOCTLCMD;
+ switch (ctrl->id) {
+ case V4L2_CID_VSP1_LUT_TABLE:
+ lut_set_table(lut, ctrl);
+ break;
}
+
+ return 0;
}
+static const struct v4l2_ctrl_ops lut_ctrl_ops = {
+ .s_ctrl = lut_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config lut_table_control = {
+ .ops = &lut_ctrl_ops,
+ .id = V4L2_CID_VSP1_LUT_TABLE,
+ .name = "Look-Up Table",
+ .type = V4L2_CTRL_TYPE_U32,
+ .min = 0x00000000,
+ .max = 0x00ffffff,
+ .step = 1,
+ .def = 0,
+ .dims = { 256},
+};
+
/* -----------------------------------------------------------------------------
* V4L2 Subdevice Pad Operations
*/
@@ -147,11 +165,7 @@ static int lut_set_format(struct v4l2_subdev *subdev,
* V4L2 Subdevice Operations
*/
-static struct v4l2_subdev_core_ops lut_core_ops = {
- .ioctl = lut_ioctl,
-};
-
-static struct v4l2_subdev_pad_ops lut_pad_ops = {
+static const struct v4l2_subdev_pad_ops lut_pad_ops = {
.init_cfg = vsp1_entity_init_cfg,
.enum_mbus_code = lut_enum_mbus_code,
.enum_frame_size = lut_enum_frame_size,
@@ -159,8 +173,7 @@ static struct v4l2_subdev_pad_ops lut_pad_ops = {
.set_fmt = lut_set_format,
};
-static struct v4l2_subdev_ops lut_ops = {
- .core = &lut_core_ops,
+static const struct v4l2_subdev_ops lut_ops = {
.pad = &lut_pad_ops,
};
@@ -170,18 +183,24 @@ static struct v4l2_subdev_ops lut_ops = {
static void lut_configure(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
- struct vsp1_dl_list *dl)
+ struct vsp1_dl_list *dl, bool full)
{
struct vsp1_lut *lut = to_lut(&entity->subdev);
+ struct vsp1_dl_body *dlb;
+ unsigned long flags;
- vsp1_lut_write(lut, dl, VI6_LUT_CTRL, VI6_LUT_CTRL_EN);
-
- mutex_lock(&lut->lock);
- if (lut->lut) {
- vsp1_dl_list_add_fragment(dl, lut->lut);
- lut->lut = NULL;
+ if (full) {
+ vsp1_lut_write(lut, dl, VI6_LUT_CTRL, VI6_LUT_CTRL_EN);
+ return;
}
- mutex_unlock(&lut->lock);
+
+ spin_lock_irqsave(&lut->lock, flags);
+ dlb = lut->lut;
+ lut->lut = NULL;
+ spin_unlock_irqrestore(&lut->lock, flags);
+
+ if (dlb)
+ vsp1_dl_list_add_fragment(dl, dlb);
}
static const struct vsp1_entity_operations lut_entity_ops = {
@@ -201,12 +220,30 @@ struct vsp1_lut *vsp1_lut_create(struct vsp1_device *vsp1)
if (lut == NULL)
return ERR_PTR(-ENOMEM);
+ spin_lock_init(&lut->lock);
+
lut->entity.ops = &lut_entity_ops;
lut->entity.type = VSP1_ENTITY_LUT;
- ret = vsp1_entity_init(vsp1, &lut->entity, "lut", 2, &lut_ops);
+ ret = vsp1_entity_init(vsp1, &lut->entity, "lut", 2, &lut_ops,
+ MEDIA_ENT_F_PROC_VIDEO_LUT);
if (ret < 0)
return ERR_PTR(ret);
+ /* Initialize the control handler. */
+ v4l2_ctrl_handler_init(&lut->ctrls, 1);
+ v4l2_ctrl_new_custom(&lut->ctrls, &lut_table_control, NULL);
+
+ lut->entity.subdev.ctrl_handler = &lut->ctrls;
+
+ if (lut->ctrls.error) {
+ dev_err(vsp1->dev, "lut: failed to initialize controls\n");
+ ret = lut->ctrls.error;
+ vsp1_entity_destroy(&lut->entity);
+ return ERR_PTR(ret);
+ }
+
+ v4l2_ctrl_handler_setup(&lut->ctrls);
+
return lut;
}
diff --git a/drivers/media/platform/vsp1/vsp1_lut.h b/drivers/media/platform/vsp1/vsp1_lut.h
index cef874f22..f8c4e8f0a 100644
--- a/drivers/media/platform/vsp1/vsp1_lut.h
+++ b/drivers/media/platform/vsp1/vsp1_lut.h
@@ -13,9 +13,10 @@
#ifndef __VSP1_LUT_H__
#define __VSP1_LUT_H__
-#include <linux/mutex.h>
+#include <linux/spinlock.h>
#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
#include <media/v4l2-subdev.h>
#include "vsp1_entity.h"
@@ -28,7 +29,9 @@ struct vsp1_device;
struct vsp1_lut {
struct vsp1_entity entity;
- struct mutex lock;
+ struct v4l2_ctrl_handler ctrls;
+
+ spinlock_t lock;
struct vsp1_dl_body *lut;
};
diff --git a/drivers/media/platform/vsp1/vsp1_pipe.c b/drivers/media/platform/vsp1/vsp1_pipe.c
index 4f3b4a1d0..3e75fb3fc 100644
--- a/drivers/media/platform/vsp1/vsp1_pipe.c
+++ b/drivers/media/platform/vsp1/vsp1_pipe.c
@@ -172,13 +172,17 @@ void vsp1_pipeline_reset(struct vsp1_pipeline *pipe)
bru->inputs[i].rpf = NULL;
}
- for (i = 0; i < pipe->num_inputs; ++i) {
- pipe->inputs[i]->pipe = NULL;
- pipe->inputs[i] = NULL;
+ for (i = 0; i < ARRAY_SIZE(pipe->inputs); ++i) {
+ if (pipe->inputs[i]) {
+ pipe->inputs[i]->pipe = NULL;
+ pipe->inputs[i] = NULL;
+ }
}
- pipe->output->pipe = NULL;
- pipe->output = NULL;
+ if (pipe->output) {
+ pipe->output->pipe = NULL;
+ pipe->output = NULL;
+ }
INIT_LIST_HEAD(&pipe->entities);
pipe->state = VSP1_PIPELINE_STOPPED;
@@ -286,6 +290,8 @@ void vsp1_pipeline_frame_end(struct vsp1_pipeline *pipe)
if (pipe->frame_end)
pipe->frame_end(pipe);
+
+ pipe->sequence++;
}
/*
@@ -295,42 +301,20 @@ void vsp1_pipeline_frame_end(struct vsp1_pipeline *pipe)
* to be scaled, we disable alpha scaling when the UDS input has a fixed alpha
* value. The UDS then outputs a fixed alpha value which needs to be programmed
* from the input RPF alpha.
- *
- * This function can only be called from a subdev s_stream handler as it
- * requires a valid display list context.
*/
void vsp1_pipeline_propagate_alpha(struct vsp1_pipeline *pipe,
- struct vsp1_entity *input,
- struct vsp1_dl_list *dl,
- unsigned int alpha)
+ struct vsp1_dl_list *dl, unsigned int alpha)
{
- struct vsp1_entity *entity;
- struct media_pad *pad;
-
- pad = media_entity_remote_pad(&input->pads[RWPF_PAD_SOURCE]);
-
- while (pad) {
- if (!is_media_entity_v4l2_subdev(pad->entity))
- break;
-
- entity = to_vsp1_entity(media_entity_to_v4l2_subdev(pad->entity));
-
- /* The BRU background color has a fixed alpha value set to 255,
- * the output alpha value is thus always equal to 255.
- */
- if (entity->type == VSP1_ENTITY_BRU)
- alpha = 255;
-
- if (entity->type == VSP1_ENTITY_UDS) {
- struct vsp1_uds *uds = to_uds(&entity->subdev);
+ if (!pipe->uds)
+ return;
- vsp1_uds_set_alpha(uds, dl, alpha);
- break;
- }
+ /* The BRU background color has a fixed alpha value set to 255, the
+ * output alpha value is thus always equal to 255.
+ */
+ if (pipe->uds_input->type == VSP1_ENTITY_BRU)
+ alpha = 255;
- pad = &entity->pads[entity->source_pad];
- pad = media_entity_remote_pad(pad);
- }
+ vsp1_uds_set_alpha(pipe->uds, dl, alpha);
}
void vsp1_pipelines_suspend(struct vsp1_device *vsp1)
@@ -383,7 +367,7 @@ void vsp1_pipelines_resume(struct vsp1_device *vsp1)
{
unsigned int i;
- /* Resume pipeline all running pipelines. */
+ /* Resume all running pipelines. */
for (i = 0; i < vsp1->info->wpf_count; ++i) {
struct vsp1_rwpf *wpf = vsp1->wpf[i];
struct vsp1_pipeline *pipe;
diff --git a/drivers/media/platform/vsp1/vsp1_pipe.h b/drivers/media/platform/vsp1/vsp1_pipe.h
index 7b5611351..d20d997b1 100644
--- a/drivers/media/platform/vsp1/vsp1_pipe.h
+++ b/drivers/media/platform/vsp1/vsp1_pipe.h
@@ -61,12 +61,13 @@ enum vsp1_pipeline_state {
* @pipe: the media pipeline
* @irqlock: protects the pipeline state
* @state: current state
- * @wq: work queue to wait for state change completion
+ * @wq: wait queue to wait for state change completion
* @frame_end: frame end interrupt handler
* @lock: protects the pipeline use count and stream count
* @kref: pipeline reference count
* @stream_count: number of streaming video nodes
* @buffers_ready: bitmask of RPFs and WPFs with at least one buffer available
+ * @sequence: frame sequence number
* @num_inputs: number of RPFs
* @inputs: array of RPFs in the pipeline (indexed by RPF index)
* @output: WPF at the output of the pipeline
@@ -90,6 +91,7 @@ struct vsp1_pipeline {
struct kref kref;
unsigned int stream_count;
unsigned int buffers_ready;
+ unsigned int sequence;
unsigned int num_inputs;
struct vsp1_rwpf *inputs[VSP1_MAX_RPF];
@@ -115,9 +117,7 @@ bool vsp1_pipeline_ready(struct vsp1_pipeline *pipe);
void vsp1_pipeline_frame_end(struct vsp1_pipeline *pipe);
void vsp1_pipeline_propagate_alpha(struct vsp1_pipeline *pipe,
- struct vsp1_entity *input,
- struct vsp1_dl_list *dl,
- unsigned int alpha);
+ struct vsp1_dl_list *dl, unsigned int alpha);
void vsp1_pipelines_suspend(struct vsp1_device *vsp1);
void vsp1_pipelines_resume(struct vsp1_device *vsp1);
diff --git a/drivers/media/platform/vsp1/vsp1_regs.h b/drivers/media/platform/vsp1/vsp1_regs.h
index 927b5fb94..3b03007ba 100644
--- a/drivers/media/platform/vsp1/vsp1_regs.h
+++ b/drivers/media/platform/vsp1/vsp1_regs.h
@@ -154,10 +154,10 @@
#define VI6_RPF_ALPH_SEL_AEXT_EXT (1 << 18)
#define VI6_RPF_ALPH_SEL_AEXT_ONE (2 << 18)
#define VI6_RPF_ALPH_SEL_AEXT_MASK (3 << 18)
-#define VI6_RPF_ALPH_SEL_ALPHA0_MASK (0xff << 8)
-#define VI6_RPF_ALPH_SEL_ALPHA0_SHIFT 8
-#define VI6_RPF_ALPH_SEL_ALPHA1_MASK (0xff << 0)
-#define VI6_RPF_ALPH_SEL_ALPHA1_SHIFT 0
+#define VI6_RPF_ALPH_SEL_ALPHA1_MASK (0xff << 8)
+#define VI6_RPF_ALPH_SEL_ALPHA1_SHIFT 8
+#define VI6_RPF_ALPH_SEL_ALPHA0_MASK (0xff << 0)
+#define VI6_RPF_ALPH_SEL_ALPHA0_SHIFT 0
#define VI6_RPF_VRTCOL_SET 0x0318
#define VI6_RPF_VRTCOL_SET_LAYA_MASK (0xff << 24)
@@ -255,6 +255,8 @@
#define VI6_WPF_OUTFMT_PDV_MASK (0xff << 24)
#define VI6_WPF_OUTFMT_PDV_SHIFT 24
#define VI6_WPF_OUTFMT_PXA (1 << 23)
+#define VI6_WPF_OUTFMT_ROT (1 << 18)
+#define VI6_WPF_OUTFMT_HFLP (1 << 17)
#define VI6_WPF_OUTFMT_FLP (1 << 16)
#define VI6_WPF_OUTFMT_SPYCS (1 << 15)
#define VI6_WPF_OUTFMT_SPUVS (1 << 14)
@@ -289,6 +291,11 @@
#define VI6_WPF_RNDCTRL_CLMD_EXT (2 << 12)
#define VI6_WPF_RNDCTRL_CLMD_MASK (3 << 12)
+#define VI6_WPF_ROT_CTRL 0x1018
+#define VI6_WPF_ROT_CTRL_LN16 (1 << 17)
+#define VI6_WPF_ROT_CTRL_LMEM_WD_MASK (0x1fff << 0)
+#define VI6_WPF_ROT_CTRL_LMEM_WD_SHIFT 0
+
#define VI6_WPF_DSTM_STRIDE_Y 0x101c
#define VI6_WPF_DSTM_STRIDE_C 0x1020
#define VI6_WPF_DSTM_ADDR_Y 0x1024
@@ -444,6 +451,15 @@
*/
#define VI6_CLU_CTRL 0x2900
+#define VI6_CLU_CTRL_AAI (1 << 28)
+#define VI6_CLU_CTRL_MVS (1 << 24)
+#define VI6_CLU_CTRL_AX1I_2D (3 << 14)
+#define VI6_CLU_CTRL_AX2I_2D (1 << 12)
+#define VI6_CLU_CTRL_OS0_2D (3 << 8)
+#define VI6_CLU_CTRL_OS1_2D (1 << 6)
+#define VI6_CLU_CTRL_OS2_2D (3 << 4)
+#define VI6_CLU_CTRL_M2D (1 << 1)
+#define VI6_CLU_CTRL_EN (1 << 0)
/* -----------------------------------------------------------------------------
* HST Control Registers
diff --git a/drivers/media/platform/vsp1/vsp1_rpf.c b/drivers/media/platform/vsp1/vsp1_rpf.c
index 49168db3f..388838913 100644
--- a/drivers/media/platform/vsp1/vsp1_rpf.c
+++ b/drivers/media/platform/vsp1/vsp1_rpf.c
@@ -38,7 +38,7 @@ static inline void vsp1_rpf_write(struct vsp1_rwpf *rpf,
* V4L2 Subdevice Operations
*/
-static struct v4l2_subdev_ops rpf_ops = {
+static const struct v4l2_subdev_ops rpf_ops = {
.pad = &vsp1_rwpf_pad_ops,
};
@@ -60,7 +60,7 @@ static void rpf_set_memory(struct vsp1_entity *entity, struct vsp1_dl_list *dl)
static void rpf_configure(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
- struct vsp1_dl_list *dl)
+ struct vsp1_dl_list *dl, bool full)
{
struct vsp1_rwpf *rpf = to_rwpf(&entity->subdev);
const struct vsp1_format_info *fmtinfo = rpf->fmtinfo;
@@ -73,6 +73,16 @@ static void rpf_configure(struct vsp1_entity *entity,
u32 pstride;
u32 infmt;
+ if (!full) {
+ vsp1_rpf_write(rpf, dl, VI6_RPF_VRTCOL_SET,
+ rpf->alpha << VI6_RPF_VRTCOL_SET_LAYA_SHIFT);
+ vsp1_rpf_write(rpf, dl, VI6_RPF_MULT_ALPHA, rpf->mult_alpha |
+ (rpf->alpha << VI6_RPF_MULT_ALPHA_RATIO_SHIFT));
+
+ vsp1_pipeline_propagate_alpha(pipe, dl, rpf->alpha);
+ return;
+ }
+
/* Source size, stride and crop offsets.
*
* The crop offsets correspond to the location of the crop rectangle top
@@ -130,9 +140,10 @@ static void rpf_configure(struct vsp1_entity *entity,
if (pipe->bru) {
const struct v4l2_rect *compose;
- compose = vsp1_entity_get_pad_compose(pipe->bru,
- pipe->bru->config,
- rpf->bru_input);
+ compose = vsp1_entity_get_pad_selection(pipe->bru,
+ pipe->bru->config,
+ rpf->bru_input,
+ V4L2_SEL_TGT_COMPOSE);
left = compose->left;
top = compose->top;
}
@@ -167,9 +178,6 @@ static void rpf_configure(struct vsp1_entity *entity,
(fmtinfo->alpha ? VI6_RPF_ALPH_SEL_ASEL_PACKED
: VI6_RPF_ALPH_SEL_ASEL_FIXED));
- vsp1_rpf_write(rpf, dl, VI6_RPF_VRTCOL_SET,
- rpf->alpha << VI6_RPF_VRTCOL_SET_LAYA_SHIFT);
-
if (entity->vsp1->info->gen == 3) {
u32 mult;
@@ -187,8 +195,7 @@ static void rpf_configure(struct vsp1_entity *entity,
mult = VI6_RPF_MULT_ALPHA_A_MMD_RATIO
| (premultiplied ?
VI6_RPF_MULT_ALPHA_P_MMD_RATIO :
- VI6_RPF_MULT_ALPHA_P_MMD_NONE)
- | (rpf->alpha << VI6_RPF_MULT_ALPHA_RATIO_SHIFT);
+ VI6_RPF_MULT_ALPHA_P_MMD_NONE);
} else {
/* When the input doesn't contain an alpha channel the
* global alpha value is applied in the unpacking unit,
@@ -199,11 +206,9 @@ static void rpf_configure(struct vsp1_entity *entity,
| VI6_RPF_MULT_ALPHA_P_MMD_NONE;
}
- vsp1_rpf_write(rpf, dl, VI6_RPF_MULT_ALPHA, mult);
+ rpf->mult_alpha = mult;
}
- vsp1_pipeline_propagate_alpha(pipe, &rpf->entity, dl, rpf->alpha);
-
vsp1_rpf_write(rpf, dl, VI6_RPF_MSK_CTRL, 0);
vsp1_rpf_write(rpf, dl, VI6_RPF_CKEY_CTRL, 0);
@@ -236,18 +241,21 @@ struct vsp1_rwpf *vsp1_rpf_create(struct vsp1_device *vsp1, unsigned int index)
rpf->entity.index = index;
sprintf(name, "rpf.%u", index);
- ret = vsp1_entity_init(vsp1, &rpf->entity, name, 2, &rpf_ops);
+ ret = vsp1_entity_init(vsp1, &rpf->entity, name, 2, &rpf_ops,
+ MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER);
if (ret < 0)
return ERR_PTR(ret);
/* Initialize the control handler. */
- ret = vsp1_rwpf_init_ctrls(rpf);
+ ret = vsp1_rwpf_init_ctrls(rpf, 0);
if (ret < 0) {
dev_err(vsp1->dev, "rpf%u: failed to initialize controls\n",
index);
goto error;
}
+ v4l2_ctrl_handler_setup(&rpf->ctrls);
+
return rpf;
error:
diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.c b/drivers/media/platform/vsp1/vsp1_rwpf.c
index 3b6e032e7..8d461b375 100644
--- a/drivers/media/platform/vsp1/vsp1_rwpf.c
+++ b/drivers/media/platform/vsp1/vsp1_rwpf.c
@@ -241,11 +241,9 @@ static const struct v4l2_ctrl_ops vsp1_rwpf_ctrl_ops = {
.s_ctrl = vsp1_rwpf_s_ctrl,
};
-int vsp1_rwpf_init_ctrls(struct vsp1_rwpf *rwpf)
+int vsp1_rwpf_init_ctrls(struct vsp1_rwpf *rwpf, unsigned int ncontrols)
{
- rwpf->alpha = 255;
-
- v4l2_ctrl_handler_init(&rwpf->ctrls, 1);
+ v4l2_ctrl_handler_init(&rwpf->ctrls, ncontrols + 1);
v4l2_ctrl_new_std(&rwpf->ctrls, &vsp1_rwpf_ctrl_ops,
V4L2_CID_ALPHA_COMPONENT, 0, 255, 1, 255);
diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.h b/drivers/media/platform/vsp1/vsp1_rwpf.h
index 9ff7c78f2..cb20484e8 100644
--- a/drivers/media/platform/vsp1/vsp1_rwpf.h
+++ b/drivers/media/platform/vsp1/vsp1_rwpf.h
@@ -13,6 +13,8 @@
#ifndef __VSP1_RWPF_H__
#define __VSP1_RWPF_H__
+#include <linux/spinlock.h>
+
#include <media/media-entity.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-subdev.h>
@@ -49,6 +51,16 @@ struct vsp1_rwpf {
unsigned int alpha;
+ u32 mult_alpha;
+ u32 outfmt;
+
+ struct {
+ spinlock_t lock;
+ struct v4l2_ctrl *ctrls[2];
+ unsigned int pending;
+ unsigned int active;
+ } flip;
+
unsigned int offsets[2];
struct vsp1_rwpf_memory mem;
@@ -68,7 +80,7 @@ static inline struct vsp1_rwpf *entity_to_rwpf(struct vsp1_entity *entity)
struct vsp1_rwpf *vsp1_rpf_create(struct vsp1_device *vsp1, unsigned int index);
struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index);
-int vsp1_rwpf_init_ctrls(struct vsp1_rwpf *rwpf);
+int vsp1_rwpf_init_ctrls(struct vsp1_rwpf *rwpf, unsigned int ncontrols);
extern const struct v4l2_subdev_pad_ops vsp1_rwpf_pad_ops;
diff --git a/drivers/media/platform/vsp1/vsp1_sru.c b/drivers/media/platform/vsp1/vsp1_sru.c
index 97ef997ae..47f5e0cea 100644
--- a/drivers/media/platform/vsp1/vsp1_sru.c
+++ b/drivers/media/platform/vsp1/vsp1_sru.c
@@ -37,7 +37,7 @@ static inline void vsp1_sru_write(struct vsp1_sru *sru, struct vsp1_dl_list *dl,
* Controls
*/
-#define V4L2_CID_VSP1_SRU_INTENSITY (V4L2_CID_USER_BASE + 1)
+#define V4L2_CID_VSP1_SRU_INTENSITY (V4L2_CID_USER_BASE | 0x1001)
struct vsp1_sru_param {
u32 ctrl0;
@@ -239,7 +239,7 @@ static int sru_set_format(struct v4l2_subdev *subdev,
return 0;
}
-static struct v4l2_subdev_pad_ops sru_pad_ops = {
+static const struct v4l2_subdev_pad_ops sru_pad_ops = {
.init_cfg = vsp1_entity_init_cfg,
.enum_mbus_code = sru_enum_mbus_code,
.enum_frame_size = sru_enum_frame_size,
@@ -247,7 +247,7 @@ static struct v4l2_subdev_pad_ops sru_pad_ops = {
.set_fmt = sru_set_format,
};
-static struct v4l2_subdev_ops sru_ops = {
+static const struct v4l2_subdev_ops sru_ops = {
.pad = &sru_pad_ops,
};
@@ -257,7 +257,7 @@ static struct v4l2_subdev_ops sru_ops = {
static void sru_configure(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
- struct vsp1_dl_list *dl)
+ struct vsp1_dl_list *dl, bool full)
{
const struct vsp1_sru_param *param;
struct vsp1_sru *sru = to_sru(&entity->subdev);
@@ -265,6 +265,9 @@ static void sru_configure(struct vsp1_entity *entity,
struct v4l2_mbus_framefmt *output;
u32 ctrl0;
+ if (!full)
+ return;
+
input = vsp1_entity_get_pad_format(&sru->entity, sru->entity.config,
SRU_PAD_SINK);
output = vsp1_entity_get_pad_format(&sru->entity, sru->entity.config,
@@ -308,7 +311,8 @@ struct vsp1_sru *vsp1_sru_create(struct vsp1_device *vsp1)
sru->entity.ops = &sru_entity_ops;
sru->entity.type = VSP1_ENTITY_SRU;
- ret = vsp1_entity_init(vsp1, &sru->entity, "sru", 2, &sru_ops);
+ ret = vsp1_entity_init(vsp1, &sru->entity, "sru", 2, &sru_ops,
+ MEDIA_ENT_F_PROC_VIDEO_SCALER);
if (ret < 0)
return ERR_PTR(ret);
diff --git a/drivers/media/platform/vsp1/vsp1_uds.c b/drivers/media/platform/vsp1/vsp1_uds.c
index 1875e29da..652dcd895 100644
--- a/drivers/media/platform/vsp1/vsp1_uds.c
+++ b/drivers/media/platform/vsp1/vsp1_uds.c
@@ -40,9 +40,11 @@ static inline void vsp1_uds_write(struct vsp1_uds *uds, struct vsp1_dl_list *dl,
* Scaling Computation
*/
-void vsp1_uds_set_alpha(struct vsp1_uds *uds, struct vsp1_dl_list *dl,
+void vsp1_uds_set_alpha(struct vsp1_entity *entity, struct vsp1_dl_list *dl,
unsigned int alpha)
{
+ struct vsp1_uds *uds = to_uds(&entity->subdev);
+
vsp1_uds_write(uds, dl, VI6_UDS_ALPVAL,
alpha << VI6_UDS_ALPVAL_VAL0_SHIFT);
}
@@ -226,7 +228,7 @@ static int uds_set_format(struct v4l2_subdev *subdev,
* V4L2 Subdevice Operations
*/
-static struct v4l2_subdev_pad_ops uds_pad_ops = {
+static const struct v4l2_subdev_pad_ops uds_pad_ops = {
.init_cfg = vsp1_entity_init_cfg,
.enum_mbus_code = uds_enum_mbus_code,
.enum_frame_size = uds_enum_frame_size,
@@ -234,7 +236,7 @@ static struct v4l2_subdev_pad_ops uds_pad_ops = {
.set_fmt = uds_set_format,
};
-static struct v4l2_subdev_ops uds_ops = {
+static const struct v4l2_subdev_ops uds_ops = {
.pad = &uds_pad_ops,
};
@@ -244,7 +246,7 @@ static struct v4l2_subdev_ops uds_ops = {
static void uds_configure(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
- struct vsp1_dl_list *dl)
+ struct vsp1_dl_list *dl, bool full)
{
struct vsp1_uds *uds = to_uds(&entity->subdev);
const struct v4l2_mbus_framefmt *output;
@@ -253,6 +255,9 @@ static void uds_configure(struct vsp1_entity *entity,
unsigned int vscale;
bool multitap;
+ if (!full)
+ return;
+
input = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config,
UDS_PAD_SINK);
output = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config,
@@ -314,7 +319,8 @@ struct vsp1_uds *vsp1_uds_create(struct vsp1_device *vsp1, unsigned int index)
uds->entity.index = index;
sprintf(name, "uds.%u", index);
- ret = vsp1_entity_init(vsp1, &uds->entity, name, 2, &uds_ops);
+ ret = vsp1_entity_init(vsp1, &uds->entity, name, 2, &uds_ops,
+ MEDIA_ENT_F_PROC_VIDEO_SCALER);
if (ret < 0)
return ERR_PTR(ret);
diff --git a/drivers/media/platform/vsp1/vsp1_uds.h b/drivers/media/platform/vsp1/vsp1_uds.h
index 5c8cbfcad..7bf3cdcff 100644
--- a/drivers/media/platform/vsp1/vsp1_uds.h
+++ b/drivers/media/platform/vsp1/vsp1_uds.h
@@ -35,7 +35,7 @@ static inline struct vsp1_uds *to_uds(struct v4l2_subdev *subdev)
struct vsp1_uds *vsp1_uds_create(struct vsp1_device *vsp1, unsigned int index);
-void vsp1_uds_set_alpha(struct vsp1_uds *uds, struct vsp1_dl_list *dl,
+void vsp1_uds_set_alpha(struct vsp1_entity *uds, struct vsp1_dl_list *dl,
unsigned int alpha);
#endif /* __VSP1_UDS_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
index a9aec5c0b..9fb4fc26a 100644
--- a/drivers/media/platform/vsp1/vsp1_video.c
+++ b/drivers/media/platform/vsp1/vsp1_video.c
@@ -219,7 +219,7 @@ vsp1_video_complete_buffer(struct vsp1_video *video)
spin_unlock_irqrestore(&video->irqlock, flags);
- done->buf.sequence = video->sequence++;
+ done->buf.sequence = pipe->sequence;
done->buf.vb2_buf.timestamp = ktime_get_ns();
for (i = 0; i < done->buf.vb2_buf.num_planes; ++i)
vb2_set_plane_payload(&done->buf.vb2_buf, i,
@@ -251,11 +251,17 @@ static void vsp1_video_frame_end(struct vsp1_pipeline *pipe,
static void vsp1_video_pipeline_run(struct vsp1_pipeline *pipe)
{
struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
+ struct vsp1_entity *entity;
unsigned int i;
if (!pipe->dl)
pipe->dl = vsp1_dl_list_get(pipe->output->dlm);
+ list_for_each_entry(entity, &pipe->entities, list_pipe) {
+ if (entity->ops->configure)
+ entity->ops->configure(entity, pipe, pipe->dl, false);
+ }
+
for (i = 0; i < vsp1->info->rpf_count; ++i) {
struct vsp1_rwpf *rwpf = pipe->inputs[i];
@@ -519,8 +525,8 @@ static void vsp1_video_pipeline_put(struct vsp1_pipeline *pipe)
static int
vsp1_video_queue_setup(struct vb2_queue *vq,
- unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct vsp1_video *video = vb2_get_drv_priv(vq);
const struct v4l2_pix_format_mplane *format = &video->rwpf->format;
@@ -530,20 +536,16 @@ vsp1_video_queue_setup(struct vb2_queue *vq,
if (*nplanes != format->num_planes)
return -EINVAL;
- for (i = 0; i < *nplanes; i++) {
+ for (i = 0; i < *nplanes; i++)
if (sizes[i] < format->plane_fmt[i].sizeimage)
return -EINVAL;
- alloc_ctxs[i] = video->alloc_ctx;
- }
return 0;
}
*nplanes = format->num_planes;
- for (i = 0; i < format->num_planes; ++i) {
+ for (i = 0; i < format->num_planes; ++i)
sizes[i] = format->plane_fmt[i].sizeimage;
- alloc_ctxs[i] = video->alloc_ctx;
- }
return 0;
}
@@ -632,7 +634,7 @@ static int vsp1_video_setup_pipeline(struct vsp1_pipeline *pipe)
vsp1_entity_route_setup(entity, pipe->dl);
if (entity->ops->configure)
- entity->ops->configure(entity, pipe, pipe->dl);
+ entity->ops->configure(entity, pipe, pipe->dl, true);
}
return 0;
@@ -674,7 +676,7 @@ static void vsp1_video_stop_streaming(struct vb2_queue *vq)
int ret;
mutex_lock(&pipe->lock);
- if (--pipe->stream_count == 0) {
+ if (--pipe->stream_count == pipe->num_inputs) {
/* Stop the pipeline. */
ret = vsp1_pipeline_stop(pipe);
if (ret == -ETIMEDOUT)
@@ -696,7 +698,7 @@ static void vsp1_video_stop_streaming(struct vb2_queue *vq)
spin_unlock_irqrestore(&video->irqlock, flags);
}
-static struct vb2_ops vsp1_video_queue_qops = {
+static const struct vb2_ops vsp1_video_queue_qops = {
.queue_setup = vsp1_video_queue_setup,
.buf_prepare = vsp1_video_buffer_prepare,
.buf_queue = vsp1_video_buffer_queue,
@@ -805,8 +807,6 @@ vsp1_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
if (video->queue.owner && video->queue.owner != file->private_data)
return -EBUSY;
- video->sequence = 0;
-
/* Get a pipeline for the video node and start streaming on it. No link
* touching an entity in the pipeline can be activated or deactivated
* once streaming is started.
@@ -915,7 +915,7 @@ static int vsp1_video_release(struct file *file)
return 0;
}
-static struct v4l2_file_operations vsp1_video_fops = {
+static const struct v4l2_file_operations vsp1_video_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = video_ioctl2,
.open = vsp1_video_open,
@@ -982,13 +982,6 @@ struct vsp1_video *vsp1_video_create(struct vsp1_device *vsp1,
video_set_drvdata(&video->video, video);
- /* ... and the buffers queue... */
- video->alloc_ctx = vb2_dma_contig_init_ctx(video->vsp1->dev);
- if (IS_ERR(video->alloc_ctx)) {
- ret = PTR_ERR(video->alloc_ctx);
- goto error;
- }
-
video->queue.type = video->type;
video->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
video->queue.lock = &video->lock;
@@ -997,6 +990,7 @@ struct vsp1_video *vsp1_video_create(struct vsp1_device *vsp1,
video->queue.ops = &vsp1_video_queue_qops;
video->queue.mem_ops = &vb2_dma_contig_memops;
video->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ video->queue.dev = video->vsp1->dev;
ret = vb2_queue_init(&video->queue);
if (ret < 0) {
dev_err(video->vsp1->dev, "failed to initialize vb2 queue\n");
@@ -1014,7 +1008,6 @@ struct vsp1_video *vsp1_video_create(struct vsp1_device *vsp1,
return video;
error:
- vb2_dma_contig_cleanup_ctx(video->alloc_ctx);
vsp1_video_cleanup(video);
return ERR_PTR(ret);
}
@@ -1024,6 +1017,5 @@ void vsp1_video_cleanup(struct vsp1_video *video)
if (video_is_registered(&video->video))
video_unregister_device(&video->video);
- vb2_dma_contig_cleanup_ctx(video->alloc_ctx);
media_entity_cleanup(&video->video.entity);
}
diff --git a/drivers/media/platform/vsp1/vsp1_video.h b/drivers/media/platform/vsp1/vsp1_video.h
index 867b00807..50ea7f022 100644
--- a/drivers/media/platform/vsp1/vsp1_video.h
+++ b/drivers/media/platform/vsp1/vsp1_video.h
@@ -46,10 +46,8 @@ struct vsp1_video {
unsigned int pipe_index;
struct vb2_queue queue;
- void *alloc_ctx;
spinlock_t irqlock;
struct list_head irqqueue;
- unsigned int sequence;
};
static inline struct vsp1_video *to_vsp1_video(struct video_device *vdev)
diff --git a/drivers/media/platform/vsp1/vsp1_wpf.c b/drivers/media/platform/vsp1/vsp1_wpf.c
index 6c91eaa35..31983169c 100644
--- a/drivers/media/platform/vsp1/vsp1_wpf.c
+++ b/drivers/media/platform/vsp1/vsp1_wpf.c
@@ -37,6 +37,97 @@ static inline void vsp1_wpf_write(struct vsp1_rwpf *wpf,
}
/* -----------------------------------------------------------------------------
+ * Controls
+ */
+
+enum wpf_flip_ctrl {
+ WPF_CTRL_VFLIP = 0,
+ WPF_CTRL_HFLIP = 1,
+ WPF_CTRL_MAX,
+};
+
+static int vsp1_wpf_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vsp1_rwpf *wpf =
+ container_of(ctrl->handler, struct vsp1_rwpf, ctrls);
+ unsigned int i;
+ u32 flip = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ case V4L2_CID_VFLIP:
+ for (i = 0; i < WPF_CTRL_MAX; ++i) {
+ if (wpf->flip.ctrls[i])
+ flip |= wpf->flip.ctrls[i]->val ? BIT(i) : 0;
+ }
+
+ spin_lock_irq(&wpf->flip.lock);
+ wpf->flip.pending = flip;
+ spin_unlock_irq(&wpf->flip.lock);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vsp1_wpf_ctrl_ops = {
+ .s_ctrl = vsp1_wpf_s_ctrl,
+};
+
+static int wpf_init_controls(struct vsp1_rwpf *wpf)
+{
+ struct vsp1_device *vsp1 = wpf->entity.vsp1;
+ unsigned int num_flip_ctrls;
+
+ spin_lock_init(&wpf->flip.lock);
+
+ if (wpf->entity.index != 0) {
+ /* Only WPF0 supports flipping. */
+ num_flip_ctrls = 0;
+ } else if (vsp1->info->features & VSP1_HAS_WPF_HFLIP) {
+ /* When horizontal flip is supported the WPF implements two
+ * controls (horizontal flip and vertical flip).
+ */
+ num_flip_ctrls = 2;
+ } else if (vsp1->info->features & VSP1_HAS_WPF_VFLIP) {
+ /* When only vertical flip is supported the WPF implements a
+ * single control (vertical flip).
+ */
+ num_flip_ctrls = 1;
+ } else {
+ /* Otherwise flipping is not supported. */
+ num_flip_ctrls = 0;
+ }
+
+ vsp1_rwpf_init_ctrls(wpf, num_flip_ctrls);
+
+ if (num_flip_ctrls >= 1) {
+ wpf->flip.ctrls[WPF_CTRL_VFLIP] =
+ v4l2_ctrl_new_std(&wpf->ctrls, &vsp1_wpf_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ }
+
+ if (num_flip_ctrls == 2) {
+ wpf->flip.ctrls[WPF_CTRL_HFLIP] =
+ v4l2_ctrl_new_std(&wpf->ctrls, &vsp1_wpf_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+
+ v4l2_ctrl_cluster(2, wpf->flip.ctrls);
+ }
+
+ if (wpf->ctrls.error) {
+ dev_err(vsp1->dev, "wpf%u: failed to initialize controls\n",
+ wpf->entity.index);
+ return wpf->ctrls.error;
+ }
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
* V4L2 Subdevice Core Operations
*/
@@ -62,11 +153,11 @@ static int wpf_s_stream(struct v4l2_subdev *subdev, int enable)
* V4L2 Subdevice Operations
*/
-static struct v4l2_subdev_video_ops wpf_video_ops = {
+static const struct v4l2_subdev_video_ops wpf_video_ops = {
.s_stream = wpf_s_stream,
};
-static struct v4l2_subdev_ops wpf_ops = {
+static const struct v4l2_subdev_ops wpf_ops = {
.video = &wpf_video_ops,
.pad = &vsp1_rwpf_pad_ops,
};
@@ -85,15 +176,37 @@ static void vsp1_wpf_destroy(struct vsp1_entity *entity)
static void wpf_set_memory(struct vsp1_entity *entity, struct vsp1_dl_list *dl)
{
struct vsp1_rwpf *wpf = entity_to_rwpf(entity);
+ const struct v4l2_pix_format_mplane *format = &wpf->format;
+ struct vsp1_rwpf_memory mem = wpf->mem;
+ unsigned int flip = wpf->flip.active;
+ unsigned int offset;
+
+ /* Update the memory offsets based on flipping configuration. The
+ * destination addresses point to the locations where the VSP starts
+ * writing to memory, which can be different corners of the image
+ * depending on vertical flipping. Horizontal flipping is handled
+ * through a line buffer and doesn't modify the start address.
+ */
+ if (flip & BIT(WPF_CTRL_VFLIP)) {
+ mem.addr[0] += (format->height - 1)
+ * format->plane_fmt[0].bytesperline;
+
+ if (format->num_planes > 1) {
+ offset = (format->height / wpf->fmtinfo->vsub - 1)
+ * format->plane_fmt[1].bytesperline;
+ mem.addr[1] += offset;
+ mem.addr[2] += offset;
+ }
+ }
- vsp1_wpf_write(wpf, dl, VI6_WPF_DSTM_ADDR_Y, wpf->mem.addr[0]);
- vsp1_wpf_write(wpf, dl, VI6_WPF_DSTM_ADDR_C0, wpf->mem.addr[1]);
- vsp1_wpf_write(wpf, dl, VI6_WPF_DSTM_ADDR_C1, wpf->mem.addr[2]);
+ vsp1_wpf_write(wpf, dl, VI6_WPF_DSTM_ADDR_Y, mem.addr[0]);
+ vsp1_wpf_write(wpf, dl, VI6_WPF_DSTM_ADDR_C0, mem.addr[1]);
+ vsp1_wpf_write(wpf, dl, VI6_WPF_DSTM_ADDR_C1, mem.addr[2]);
}
static void wpf_configure(struct vsp1_entity *entity,
struct vsp1_pipeline *pipe,
- struct vsp1_dl_list *dl)
+ struct vsp1_dl_list *dl, bool full)
{
struct vsp1_rwpf *wpf = to_rwpf(&entity->subdev);
struct vsp1_device *vsp1 = wpf->entity.vsp1;
@@ -104,6 +217,26 @@ static void wpf_configure(struct vsp1_entity *entity,
u32 outfmt = 0;
u32 srcrpf = 0;
+ if (!full) {
+ const unsigned int mask = BIT(WPF_CTRL_VFLIP)
+ | BIT(WPF_CTRL_HFLIP);
+
+ spin_lock(&wpf->flip.lock);
+ wpf->flip.active = (wpf->flip.active & ~mask)
+ | (wpf->flip.pending & mask);
+ spin_unlock(&wpf->flip.lock);
+
+ outfmt = (wpf->alpha << VI6_WPF_OUTFMT_PDV_SHIFT) | wpf->outfmt;
+
+ if (wpf->flip.active & BIT(WPF_CTRL_VFLIP))
+ outfmt |= VI6_WPF_OUTFMT_FLP;
+ if (wpf->flip.active & BIT(WPF_CTRL_HFLIP))
+ outfmt |= VI6_WPF_OUTFMT_HFLP;
+
+ vsp1_wpf_write(wpf, dl, VI6_WPF_OUTFMT, outfmt);
+ return;
+ }
+
/* Cropping */
crop = vsp1_rwpf_get_crop(wpf, wpf->entity.config);
@@ -143,13 +276,18 @@ static void wpf_configure(struct vsp1_entity *entity,
format->plane_fmt[1].bytesperline);
vsp1_wpf_write(wpf, dl, VI6_WPF_DSWAP, fmtinfo->swap);
+
+ if (vsp1->info->features & VSP1_HAS_WPF_HFLIP &&
+ wpf->entity.index == 0)
+ vsp1_wpf_write(wpf, dl, VI6_WPF_ROT_CTRL,
+ VI6_WPF_ROT_CTRL_LN16 |
+ (256 << VI6_WPF_ROT_CTRL_LMEM_WD_SHIFT));
}
if (sink_format->code != source_format->code)
outfmt |= VI6_WPF_OUTFMT_CSC;
- outfmt |= wpf->alpha << VI6_WPF_OUTFMT_PDV_SHIFT;
- vsp1_wpf_write(wpf, dl, VI6_WPF_OUTFMT, outfmt);
+ wpf->outfmt = outfmt;
vsp1_dl_list_write(dl, VI6_DPR_WPF_FPORCH(wpf->entity.index),
VI6_DPR_WPF_FPORCH_FP_WPFN);
@@ -216,7 +354,8 @@ struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index)
wpf->entity.index = index;
sprintf(name, "wpf.%u", index);
- ret = vsp1_entity_init(vsp1, &wpf->entity, name, 2, &wpf_ops);
+ ret = vsp1_entity_init(vsp1, &wpf->entity, name, 2, &wpf_ops,
+ MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER);
if (ret < 0)
return ERR_PTR(ret);
@@ -228,13 +367,15 @@ struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index)
}
/* Initialize the control handler. */
- ret = vsp1_rwpf_init_ctrls(wpf);
+ ret = wpf_init_controls(wpf);
if (ret < 0) {
dev_err(vsp1->dev, "wpf%u: failed to initialize controls\n",
index);
goto error;
}
+ v4l2_ctrl_handler_setup(&wpf->ctrls);
+
return wpf;
error:
diff --git a/drivers/media/platform/xilinx/xilinx-dma.c b/drivers/media/platform/xilinx/xilinx-dma.c
index 7f6898b13..7ae1a134b 100644
--- a/drivers/media/platform/xilinx/xilinx-dma.c
+++ b/drivers/media/platform/xilinx/xilinx-dma.c
@@ -318,11 +318,10 @@ static void xvip_dma_complete(void *param)
static int
xvip_dma_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct xvip_dma *dma = vb2_get_drv_priv(vq);
- alloc_ctxs[0] = dma->alloc_ctx;
/* Make sure the image size is large enough. */
if (*nplanes)
return sizes[0] < dma->format.sizeimage ? -EINVAL : 0;
@@ -706,12 +705,6 @@ int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma,
video_set_drvdata(&dma->video, dma);
/* ... and the buffers queue... */
- dma->alloc_ctx = vb2_dma_contig_init_ctx(dma->xdev->dev);
- if (IS_ERR(dma->alloc_ctx)) {
- ret = PTR_ERR(dma->alloc_ctx);
- goto error;
- }
-
/* Don't enable VB2_READ and VB2_WRITE, as using the read() and write()
* V4L2 APIs would be inefficient. Testing on the command line with a
* 'cat /dev/video?' thus won't be possible, but given that the driver
@@ -728,6 +721,7 @@ int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma,
dma->queue.mem_ops = &vb2_dma_contig_memops;
dma->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC
| V4L2_BUF_FLAG_TSTAMP_SRC_EOF;
+ dma->queue.dev = dma->xdev->dev;
ret = vb2_queue_init(&dma->queue);
if (ret < 0) {
dev_err(dma->xdev->dev, "failed to initialize VB2 queue\n");
@@ -766,9 +760,6 @@ void xvip_dma_cleanup(struct xvip_dma *dma)
if (dma->dma)
dma_release_channel(dma->dma);
- if (!IS_ERR_OR_NULL(dma->alloc_ctx))
- vb2_dma_contig_cleanup_ctx(dma->alloc_ctx);
-
media_entity_cleanup(&dma->video.entity);
mutex_destroy(&dma->lock);
diff --git a/drivers/media/platform/xilinx/xilinx-dma.h b/drivers/media/platform/xilinx/xilinx-dma.h
index 7a1621a2e..e95d136c1 100644
--- a/drivers/media/platform/xilinx/xilinx-dma.h
+++ b/drivers/media/platform/xilinx/xilinx-dma.h
@@ -65,7 +65,6 @@ static inline struct xvip_pipeline *to_xvip_pipeline(struct media_entity *e)
* @format: active V4L2 pixel format
* @fmtinfo: format information corresponding to the active @format
* @queue: vb2 buffers queue
- * @alloc_ctx: allocation context for the vb2 @queue
* @sequence: V4L2 buffers sequence number
* @queued_bufs: list of queued buffers
* @queued_lock: protects the buf_queued list
@@ -88,7 +87,6 @@ struct xvip_dma {
const struct xvip_video_format *fmtinfo;
struct vb2_queue queue;
- void *alloc_ctx;
unsigned int sequence;
struct list_head queued_bufs;
diff --git a/drivers/media/radio/radio-aztech.c b/drivers/media/radio/radio-aztech.c
index 705dd6f91..f445327f2 100644
--- a/drivers/media/radio/radio-aztech.c
+++ b/drivers/media/radio/radio-aztech.c
@@ -43,7 +43,6 @@ MODULE_VERSION("1.0.0");
static int io[AZTECH_MAX] = { [0] = CONFIG_RADIO_AZTECH_PORT,
[1 ... (AZTECH_MAX - 1)] = -1 };
static int radio_nr[AZTECH_MAX] = { [0 ... (AZTECH_MAX - 1)] = -1 };
-static const int radio_wait_time = 1000;
module_param_array(io, int, NULL, 0444);
MODULE_PARM_DESC(io, "I/O addresses of the Aztech card (0x350 or 0x358)");
diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
index 70fd8e801..8253f79d5 100644
--- a/drivers/media/radio/radio-maxiradio.c
+++ b/drivers/media/radio/radio-maxiradio.c
@@ -183,6 +183,7 @@ static void maxiradio_remove(struct pci_dev *pdev)
outb(0, dev->io);
v4l2_device_unregister(v4l2_dev);
release_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
+ kfree(dev);
}
static struct pci_device_id maxiradio_pci_tbl[] = {
diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
index aeeeb72d7..749f07910 100644
--- a/drivers/media/radio/wl128x/fmdrv_common.c
+++ b/drivers/media/radio/wl128x/fmdrv_common.c
@@ -1472,7 +1472,7 @@ static long fm_st_receive(void *arg, struct sk_buff *skb)
* Called by ST layer to indicate protocol registration completion
* status.
*/
-static void fm_st_reg_comp_cb(void *arg, char data)
+static void fm_st_reg_comp_cb(void *arg, int data)
{
struct fmdev *fmdev;
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index bd4d68500..370e16e07 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -336,7 +336,7 @@ config IR_TTUSBIR
config IR_RX51
tristate "Nokia N900 IR transmitter diode"
- depends on OMAP_DM_TIMER && ARCH_OMAP2PLUS && LIRC && !ARCH_MULTIPLATFORM
+ depends on OMAP_DM_TIMER && PWM_OMAP_DMTIMER && ARCH_OMAP2PLUS && LIRC
---help---
Say Y or M here if you want to enable support for the IR
transmitter diode built in the Nokia N900 (RX51) device.
diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
index 8d77e1c4a..d1c61cd03 100644
--- a/drivers/media/rc/ene_ir.c
+++ b/drivers/media/rc/ene_ir.c
@@ -904,7 +904,7 @@ static int ene_set_tx_carrier(struct rc_dev *rdev, u32 carrier)
dbg("TX: out of range %d-%d kHz carrier",
2000 / ENE_CIRMOD_PRD_MIN, 2000 / ENE_CIRMOD_PRD_MAX);
- return -1;
+ return -EINVAL;
}
dev->tx_period = period;
diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c
index ee60e17fb..5f634545d 100644
--- a/drivers/media/rc/iguanair.c
+++ b/drivers/media/rc/iguanair.c
@@ -330,7 +330,7 @@ static int iguanair_set_tx_carrier(struct rc_dev *dev, uint32_t carrier)
mutex_unlock(&ir->lock);
- return carrier;
+ return 0;
}
static int iguanair_set_tx_mask(struct rc_dev *dev, uint32_t mask)
diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
index 5effc65d2..c3277308a 100644
--- a/drivers/media/rc/ir-lirc-codec.c
+++ b/drivers/media/rc/ir-lirc-codec.c
@@ -292,7 +292,10 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
tmp > dev->max_timeout)
return -EINVAL;
- dev->timeout = tmp;
+ if (dev->s_timeout)
+ ret = dev->s_timeout(dev, tmp);
+ if (!ret)
+ dev->timeout = tmp;
break;
case LIRC_SET_REC_TIMEOUT_REPORTS:
diff --git a/drivers/media/rc/ir-rx51.c b/drivers/media/rc/ir-rx51.c
index 4e1711a40..82fb6f2ca 100644
--- a/drivers/media/rc/ir-rx51.c
+++ b/drivers/media/rc/ir-rx51.c
@@ -12,22 +12,17 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
-
+#include <linux/clk.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/uaccess.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/wait.h>
-
-#include <plat/dmtimer.h>
-#include <plat/clock.h>
+#include <linux/pwm.h>
+#include <linux/of.h>
+#include <linux/hrtimer.h>
#include <media/lirc.h>
#include <media/lirc_dev.h>
@@ -41,100 +36,51 @@
#define WBUF_LEN 256
-#define TIMER_MAX_VALUE 0xffffffff
-
struct lirc_rx51 {
- struct omap_dm_timer *pwm_timer;
- struct omap_dm_timer *pulse_timer;
+ struct pwm_device *pwm;
+ struct hrtimer timer;
struct device *dev;
struct lirc_rx51_platform_data *pdata;
wait_queue_head_t wqueue;
- unsigned long fclk_khz;
unsigned int freq; /* carrier frequency */
unsigned int duty_cycle; /* carrier duty cycle */
- unsigned int irq_num;
- unsigned int match;
int wbuf[WBUF_LEN];
int wbuf_index;
unsigned long device_is_open;
- int pwm_timer_num;
};
-static void lirc_rx51_on(struct lirc_rx51 *lirc_rx51)
+static inline void lirc_rx51_on(struct lirc_rx51 *lirc_rx51)
{
- omap_dm_timer_set_pwm(lirc_rx51->pwm_timer, 0, 1,
- OMAP_TIMER_TRIGGER_OVERFLOW_AND_COMPARE);
+ pwm_enable(lirc_rx51->pwm);
}
-static void lirc_rx51_off(struct lirc_rx51 *lirc_rx51)
+static inline void lirc_rx51_off(struct lirc_rx51 *lirc_rx51)
{
- omap_dm_timer_set_pwm(lirc_rx51->pwm_timer, 0, 1,
- OMAP_TIMER_TRIGGER_NONE);
+ pwm_disable(lirc_rx51->pwm);
}
static int init_timing_params(struct lirc_rx51 *lirc_rx51)
{
- u32 load, match;
+ struct pwm_device *pwm = lirc_rx51->pwm;
+ int duty, period = DIV_ROUND_CLOSEST(NSEC_PER_SEC, lirc_rx51->freq);
- load = -(lirc_rx51->fclk_khz * 1000 / lirc_rx51->freq);
- match = -(lirc_rx51->duty_cycle * -load / 100);
- omap_dm_timer_set_load(lirc_rx51->pwm_timer, 1, load);
- omap_dm_timer_set_match(lirc_rx51->pwm_timer, 1, match);
- omap_dm_timer_write_counter(lirc_rx51->pwm_timer, TIMER_MAX_VALUE - 2);
- omap_dm_timer_start(lirc_rx51->pwm_timer);
- omap_dm_timer_set_int_enable(lirc_rx51->pulse_timer, 0);
- omap_dm_timer_start(lirc_rx51->pulse_timer);
+ duty = DIV_ROUND_CLOSEST(lirc_rx51->duty_cycle * period, 100);
- lirc_rx51->match = 0;
+ pwm_config(pwm, duty, period);
return 0;
}
-#define tics_after(a, b) ((long)(b) - (long)(a) < 0)
-
-static int pulse_timer_set_timeout(struct lirc_rx51 *lirc_rx51, int usec)
+static enum hrtimer_restart lirc_rx51_timer_cb(struct hrtimer *timer)
{
- int counter;
-
- BUG_ON(usec < 0);
-
- if (lirc_rx51->match == 0)
- counter = omap_dm_timer_read_counter(lirc_rx51->pulse_timer);
- else
- counter = lirc_rx51->match;
-
- counter += (u32)(lirc_rx51->fclk_khz * usec / (1000));
- omap_dm_timer_set_match(lirc_rx51->pulse_timer, 1, counter);
- omap_dm_timer_set_int_enable(lirc_rx51->pulse_timer,
- OMAP_TIMER_INT_MATCH);
- if (tics_after(omap_dm_timer_read_counter(lirc_rx51->pulse_timer),
- counter)) {
- return 1;
- }
- return 0;
-}
-
-static irqreturn_t lirc_rx51_interrupt_handler(int irq, void *ptr)
-{
- unsigned int retval;
- struct lirc_rx51 *lirc_rx51 = ptr;
-
- retval = omap_dm_timer_read_status(lirc_rx51->pulse_timer);
- if (!retval)
- return IRQ_NONE;
+ struct lirc_rx51 *lirc_rx51 =
+ container_of(timer, struct lirc_rx51, timer);
+ ktime_t now;
- if (retval & ~OMAP_TIMER_INT_MATCH)
- dev_err_ratelimited(lirc_rx51->dev,
- ": Unexpected interrupt source: %x\n", retval);
-
- omap_dm_timer_write_status(lirc_rx51->pulse_timer,
- OMAP_TIMER_INT_MATCH |
- OMAP_TIMER_INT_OVERFLOW |
- OMAP_TIMER_INT_CAPTURE);
if (lirc_rx51->wbuf_index < 0) {
dev_err_ratelimited(lirc_rx51->dev,
- ": BUG wbuf_index has value of %i\n",
+ "BUG wbuf_index has value of %i\n",
lirc_rx51->wbuf_index);
goto end;
}
@@ -144,6 +90,8 @@ static irqreturn_t lirc_rx51_interrupt_handler(int irq, void *ptr)
* pulses until we catch up.
*/
do {
+ u64 ns;
+
if (lirc_rx51->wbuf_index >= WBUF_LEN)
goto end;
if (lirc_rx51->wbuf[lirc_rx51->wbuf_index] == -1)
@@ -154,84 +102,24 @@ static irqreturn_t lirc_rx51_interrupt_handler(int irq, void *ptr)
else
lirc_rx51_on(lirc_rx51);
- retval = pulse_timer_set_timeout(lirc_rx51,
- lirc_rx51->wbuf[lirc_rx51->wbuf_index]);
+ ns = 1000 * lirc_rx51->wbuf[lirc_rx51->wbuf_index];
+ hrtimer_add_expires_ns(timer, ns);
+
lirc_rx51->wbuf_index++;
- } while (retval);
+ now = timer->base->get_time();
+
+ } while (hrtimer_get_expires_tv64(timer) < now.tv64);
- return IRQ_HANDLED;
+ return HRTIMER_RESTART;
end:
/* Stop TX here */
lirc_rx51_off(lirc_rx51);
lirc_rx51->wbuf_index = -1;
- omap_dm_timer_stop(lirc_rx51->pwm_timer);
- omap_dm_timer_stop(lirc_rx51->pulse_timer);
- omap_dm_timer_set_int_enable(lirc_rx51->pulse_timer, 0);
- wake_up_interruptible(&lirc_rx51->wqueue);
-
- return IRQ_HANDLED;
-}
-
-static int lirc_rx51_init_port(struct lirc_rx51 *lirc_rx51)
-{
- struct clk *clk_fclk;
- int retval, pwm_timer = lirc_rx51->pwm_timer_num;
-
- lirc_rx51->pwm_timer = omap_dm_timer_request_specific(pwm_timer);
- if (lirc_rx51->pwm_timer == NULL) {
- dev_err(lirc_rx51->dev, ": Error requesting GPT%d timer\n",
- pwm_timer);
- return -EBUSY;
- }
-
- lirc_rx51->pulse_timer = omap_dm_timer_request();
- if (lirc_rx51->pulse_timer == NULL) {
- dev_err(lirc_rx51->dev, ": Error requesting pulse timer\n");
- retval = -EBUSY;
- goto err1;
- }
-
- omap_dm_timer_set_source(lirc_rx51->pwm_timer, OMAP_TIMER_SRC_SYS_CLK);
- omap_dm_timer_set_source(lirc_rx51->pulse_timer,
- OMAP_TIMER_SRC_SYS_CLK);
-
- omap_dm_timer_enable(lirc_rx51->pwm_timer);
- omap_dm_timer_enable(lirc_rx51->pulse_timer);
-
- lirc_rx51->irq_num = omap_dm_timer_get_irq(lirc_rx51->pulse_timer);
- retval = request_irq(lirc_rx51->irq_num, lirc_rx51_interrupt_handler,
- IRQF_SHARED, "lirc_pulse_timer", lirc_rx51);
- if (retval) {
- dev_err(lirc_rx51->dev, ": Failed to request interrupt line\n");
- goto err2;
- }
-
- clk_fclk = omap_dm_timer_get_fclk(lirc_rx51->pwm_timer);
- lirc_rx51->fclk_khz = clk_fclk->rate / 1000;
-
- return 0;
-err2:
- omap_dm_timer_free(lirc_rx51->pulse_timer);
-err1:
- omap_dm_timer_free(lirc_rx51->pwm_timer);
-
- return retval;
-}
-
-static int lirc_rx51_free_port(struct lirc_rx51 *lirc_rx51)
-{
- omap_dm_timer_set_int_enable(lirc_rx51->pulse_timer, 0);
- free_irq(lirc_rx51->irq_num, lirc_rx51);
- lirc_rx51_off(lirc_rx51);
- omap_dm_timer_disable(lirc_rx51->pwm_timer);
- omap_dm_timer_disable(lirc_rx51->pulse_timer);
- omap_dm_timer_free(lirc_rx51->pwm_timer);
- omap_dm_timer_free(lirc_rx51->pulse_timer);
- lirc_rx51->wbuf_index = -1;
+ wake_up_interruptible(&lirc_rx51->wqueue);
- return 0;
+ return HRTIMER_NORESTART;
}
static ssize_t lirc_rx51_write(struct file *file, const char *buf,
@@ -270,8 +158,9 @@ static ssize_t lirc_rx51_write(struct file *file, const char *buf,
lirc_rx51_on(lirc_rx51);
lirc_rx51->wbuf_index = 1;
- pulse_timer_set_timeout(lirc_rx51, lirc_rx51->wbuf[0]);
-
+ hrtimer_start(&lirc_rx51->timer,
+ ns_to_ktime(1000 * lirc_rx51->wbuf[0]),
+ HRTIMER_MODE_REL);
/*
* Don't return back to the userspace until the transfer has
* finished
@@ -371,14 +260,24 @@ static int lirc_rx51_open(struct inode *inode, struct file *file)
if (test_and_set_bit(1, &lirc_rx51->device_is_open))
return -EBUSY;
- return lirc_rx51_init_port(lirc_rx51);
+ lirc_rx51->pwm = pwm_get(lirc_rx51->dev, NULL);
+ if (IS_ERR(lirc_rx51->pwm)) {
+ int res = PTR_ERR(lirc_rx51->pwm);
+
+ dev_err(lirc_rx51->dev, "pwm_get failed: %d\n", res);
+ return res;
+ }
+
+ return 0;
}
static int lirc_rx51_release(struct inode *inode, struct file *file)
{
struct lirc_rx51 *lirc_rx51 = file->private_data;
- lirc_rx51_free_port(lirc_rx51);
+ hrtimer_cancel(&lirc_rx51->timer);
+ lirc_rx51_off(lirc_rx51);
+ pwm_put(lirc_rx51->pwm);
clear_bit(1, &lirc_rx51->device_is_open);
@@ -386,7 +285,6 @@ static int lirc_rx51_release(struct inode *inode, struct file *file)
}
static struct lirc_rx51 lirc_rx51 = {
- .freq = 38000,
.duty_cycle = 50,
.wbuf_index = -1,
};
@@ -444,9 +342,32 @@ static int lirc_rx51_resume(struct platform_device *dev)
static int lirc_rx51_probe(struct platform_device *dev)
{
+ struct pwm_device *pwm;
+
lirc_rx51_driver.features = LIRC_RX51_DRIVER_FEATURES;
lirc_rx51.pdata = dev->dev.platform_data;
- lirc_rx51.pwm_timer_num = lirc_rx51.pdata->pwm_timer;
+
+ if (!lirc_rx51.pdata) {
+ dev_err(&dev->dev, "Platform Data is missing\n");
+ return -ENXIO;
+ }
+
+ pwm = pwm_get(&dev->dev, NULL);
+ if (IS_ERR(pwm)) {
+ int err = PTR_ERR(pwm);
+
+ if (err != -EPROBE_DEFER)
+ dev_err(&dev->dev, "pwm_get failed: %d\n", err);
+ return err;
+ }
+
+ /* Use default, in case userspace does not set the carrier */
+ lirc_rx51.freq = DIV_ROUND_CLOSEST(pwm_get_period(pwm), NSEC_PER_SEC);
+ pwm_put(pwm);
+
+ hrtimer_init(&lirc_rx51.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ lirc_rx51.timer.function = lirc_rx51_timer_cb;
+
lirc_rx51.dev = &dev->dev;
lirc_rx51_driver.dev = &dev->dev;
lirc_rx51_driver.minor = lirc_register_driver(&lirc_rx51_driver);
@@ -457,8 +378,6 @@ static int lirc_rx51_probe(struct platform_device *dev)
lirc_rx51_driver.minor);
return lirc_rx51_driver.minor;
}
- dev_info(lirc_rx51.dev, "registration ok, minor: %d, pwm: %d\n",
- lirc_rx51_driver.minor, lirc_rx51.pwm_timer_num);
return 0;
}
@@ -468,6 +387,14 @@ static int lirc_rx51_remove(struct platform_device *dev)
return lirc_unregister_driver(lirc_rx51_driver.minor);
}
+static const struct of_device_id lirc_rx51_match[] = {
+ {
+ .compatible = "nokia,n900-ir",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, lirc_rx51_match);
+
struct platform_driver lirc_rx51_platform_driver = {
.probe = lirc_rx51_probe,
.remove = lirc_rx51_remove,
@@ -475,7 +402,7 @@ struct platform_driver lirc_rx51_platform_driver = {
.resume = lirc_rx51_resume,
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(lirc_rx51_match),
},
};
module_platform_driver(lirc_rx51_platform_driver);
diff --git a/drivers/media/rc/keymaps/Makefile b/drivers/media/rc/keymaps/Makefile
index fbbd3bbcd..d7b13fae1 100644
--- a/drivers/media/rc/keymaps/Makefile
+++ b/drivers/media/rc/keymaps/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-behold.o \
rc-behold-columbus.o \
rc-budget-ci-old.o \
+ rc-cec.o \
rc-cinergy-1400.o \
rc-cinergy.o \
rc-delock-61959.o \
@@ -28,6 +29,7 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-dm1105-nec.o \
rc-dntv-live-dvb-t.o \
rc-dntv-live-dvbt-pro.o \
+ rc-dtt200u.o \
rc-dvbsky.o \
rc-em-terratec.o \
rc-encore-enltv2.o \
diff --git a/drivers/media/rc/keymaps/rc-cec.c b/drivers/media/rc/keymaps/rc-cec.c
new file mode 100644
index 000000000..354c8e724
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-cec.c
@@ -0,0 +1,182 @@
+/* Keytable for the CEC remote control
+ *
+ * Copyright (c) 2015 by Kamil Debski
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+#include <linux/module.h>
+
+/*
+ * CEC Spec "High-Definition Multimedia Interface Specification" can be obtained
+ * here: http://xtreamerdev.googlecode.com/files/CEC_Specs.pdf
+ * The list of control codes is listed in Table 27: User Control Codes p. 95
+ */
+
+static struct rc_map_table cec[] = {
+ { 0x00, KEY_OK },
+ { 0x01, KEY_UP },
+ { 0x02, KEY_DOWN },
+ { 0x03, KEY_LEFT },
+ { 0x04, KEY_RIGHT },
+ { 0x05, KEY_RIGHT_UP },
+ { 0x06, KEY_RIGHT_DOWN },
+ { 0x07, KEY_LEFT_UP },
+ { 0x08, KEY_LEFT_DOWN },
+ { 0x09, KEY_ROOT_MENU }, /* CEC Spec: Device Root Menu - see Note 2 */
+ /*
+ * Note 2: This is the initial display that a device shows. It is
+ * device-dependent and can be, for example, a contents menu, setup
+ * menu, favorite menu or other menu. The actual menu displayed
+ * may also depend on the device's current state.
+ */
+ { 0x0a, KEY_SETUP },
+ { 0x0b, KEY_MENU }, /* CEC Spec: Contents Menu */
+ { 0x0c, KEY_FAVORITES }, /* CEC Spec: Favorite Menu */
+ { 0x0d, KEY_EXIT },
+ /* 0x0e-0x0f: Reserved */
+ { 0x10, KEY_MEDIA_TOP_MENU },
+ { 0x11, KEY_CONTEXT_MENU },
+ /* 0x12-0x1c: Reserved */
+ { 0x1d, KEY_DIGITS }, /* CEC Spec: select/toggle a Number Entry Mode */
+ { 0x1e, KEY_NUMERIC_11 },
+ { 0x1f, KEY_NUMERIC_12 },
+ /* 0x20-0x29: Keys 0 to 9 */
+ { 0x20, KEY_NUMERIC_0 },
+ { 0x21, KEY_NUMERIC_1 },
+ { 0x22, KEY_NUMERIC_2 },
+ { 0x23, KEY_NUMERIC_3 },
+ { 0x24, KEY_NUMERIC_4 },
+ { 0x25, KEY_NUMERIC_5 },
+ { 0x26, KEY_NUMERIC_6 },
+ { 0x27, KEY_NUMERIC_7 },
+ { 0x28, KEY_NUMERIC_8 },
+ { 0x29, KEY_NUMERIC_9 },
+ { 0x2a, KEY_DOT },
+ { 0x2b, KEY_ENTER },
+ { 0x2c, KEY_CLEAR },
+ /* 0x2d-0x2e: Reserved */
+ { 0x2f, KEY_NEXT_FAVORITE }, /* CEC Spec: Next Favorite */
+ { 0x30, KEY_CHANNELUP },
+ { 0x31, KEY_CHANNELDOWN },
+ { 0x32, KEY_PREVIOUS }, /* CEC Spec: Previous Channel */
+ { 0x33, KEY_SOUND }, /* CEC Spec: Sound Select */
+ { 0x34, KEY_VIDEO }, /* 0x34: CEC Spec: Input Select */
+ { 0x35, KEY_INFO }, /* CEC Spec: Display Information */
+ { 0x36, KEY_HELP },
+ { 0x37, KEY_PAGEUP },
+ { 0x38, KEY_PAGEDOWN },
+ /* 0x39-0x3f: Reserved */
+ { 0x40, KEY_POWER },
+ { 0x41, KEY_VOLUMEUP },
+ { 0x42, KEY_VOLUMEDOWN },
+ { 0x43, KEY_MUTE },
+ { 0x44, KEY_PLAYCD },
+ { 0x45, KEY_STOPCD },
+ { 0x46, KEY_PAUSECD },
+ { 0x47, KEY_RECORD },
+ { 0x48, KEY_REWIND },
+ { 0x49, KEY_FASTFORWARD },
+ { 0x4a, KEY_EJECTCD }, /* CEC Spec: Eject */
+ { 0x4b, KEY_FORWARD },
+ { 0x4c, KEY_BACK },
+ { 0x4d, KEY_STOP_RECORD }, /* CEC Spec: Stop-Record */
+ { 0x4e, KEY_PAUSE_RECORD }, /* CEC Spec: Pause-Record */
+ /* 0x4f: Reserved */
+ { 0x50, KEY_ANGLE },
+ { 0x51, KEY_TV2 },
+ { 0x52, KEY_VOD }, /* CEC Spec: Video on Demand */
+ { 0x53, KEY_EPG },
+ { 0x54, KEY_TIME }, /* CEC Spec: Timer */
+ { 0x55, KEY_CONFIG },
+ /*
+ * The following codes are hard to implement at this moment, as they
+ * carry an additional additional argument. Most likely changes to RC
+ * framework are necessary.
+ * For now they are interpreted by the CEC framework as non keycodes
+ * and are passed as messages enabling user application to parse them.
+ */
+ /* 0x56: CEC Spec: Select Broadcast Type */
+ /* 0x57: CEC Spec: Select Sound presentation */
+ { 0x58, KEY_AUDIO_DESC }, /* CEC 2.0 and up */
+ { 0x59, KEY_WWW }, /* CEC 2.0 and up */
+ { 0x5a, KEY_3D_MODE }, /* CEC 2.0 and up */
+ /* 0x5b-0x5f: Reserved */
+ { 0x60, KEY_PLAYCD }, /* CEC Spec: Play Function */
+ { 0x6005, KEY_FASTFORWARD },
+ { 0x6006, KEY_FASTFORWARD },
+ { 0x6007, KEY_FASTFORWARD },
+ { 0x6015, KEY_SLOW },
+ { 0x6016, KEY_SLOW },
+ { 0x6017, KEY_SLOW },
+ { 0x6009, KEY_FASTREVERSE },
+ { 0x600a, KEY_FASTREVERSE },
+ { 0x600b, KEY_FASTREVERSE },
+ { 0x6019, KEY_SLOWREVERSE },
+ { 0x601a, KEY_SLOWREVERSE },
+ { 0x601b, KEY_SLOWREVERSE },
+ { 0x6020, KEY_REWIND },
+ { 0x6024, KEY_PLAYCD },
+ { 0x6025, KEY_PAUSECD },
+ { 0x61, KEY_PLAYPAUSE }, /* CEC Spec: Pause-Play Function */
+ { 0x62, KEY_RECORD }, /* Spec: Record Function */
+ { 0x63, KEY_PAUSE_RECORD }, /* CEC Spec: Pause-Record Function */
+ { 0x64, KEY_STOPCD }, /* CEC Spec: Stop Function */
+ { 0x65, KEY_MUTE }, /* CEC Spec: Mute Function */
+ { 0x66, KEY_UNMUTE }, /* CEC Spec: Restore the volume */
+ /*
+ * The following codes are hard to implement at this moment, as they
+ * carry an additional additional argument. Most likely changes to RC
+ * framework are necessary.
+ * For now they are interpreted by the CEC framework as non keycodes
+ * and are passed as messages enabling user application to parse them.
+ */
+ /* 0x67: CEC Spec: Tune Function */
+ /* 0x68: CEC Spec: Seleect Media Function */
+ /* 0x69: CEC Spec: Select A/V Input Function */
+ /* 0x6a: CEC Spec: Select Audio Input Function */
+ { 0x6b, KEY_POWER }, /* CEC Spec: Power Toggle Function */
+ { 0x6c, KEY_SLEEP }, /* CEC Spec: Power Off Function */
+ { 0x6d, KEY_WAKEUP }, /* CEC Spec: Power On Function */
+ /* 0x6e-0x70: Reserved */
+ { 0x71, KEY_BLUE }, /* CEC Spec: F1 (Blue) */
+ { 0x72, KEY_RED }, /* CEC Spec: F2 (Red) */
+ { 0x73, KEY_GREEN }, /* CEC Spec: F3 (Green) */
+ { 0x74, KEY_YELLOW }, /* CEC Spec: F4 (Yellow) */
+ { 0x75, KEY_F5 },
+ { 0x76, KEY_DATA }, /* CEC Spec: Data - see Note 3 */
+ /*
+ * Note 3: This is used, for example, to enter or leave a digital TV
+ * data broadcast application.
+ */
+ /* 0x77-0xff: Reserved */
+};
+
+static struct rc_map_list cec_map = {
+ .map = {
+ .scan = cec,
+ .size = ARRAY_SIZE(cec),
+ .rc_type = RC_TYPE_CEC,
+ .name = RC_MAP_CEC,
+ }
+};
+
+static int __init init_rc_map_cec(void)
+{
+ return rc_map_register(&cec_map);
+}
+
+static void __exit exit_rc_map_cec(void)
+{
+ rc_map_unregister(&cec_map);
+}
+
+module_init(init_rc_map_cec);
+module_exit(exit_rc_map_cec);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kamil Debski");
diff --git a/drivers/media/rc/keymaps/rc-dtt200u.c b/drivers/media/rc/keymaps/rc-dtt200u.c
new file mode 100644
index 000000000..25650e9e4
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-dtt200u.c
@@ -0,0 +1,59 @@
+/* Keytable for Wideview WT-220U.
+ *
+ * Copyright (c) 2016 Jonathan McDowell <noodles@earth.li>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/rc-map.h>
+#include <linux/module.h>
+
+/* key list for the tiny remote control (Yakumo, don't know about the others) */
+static struct rc_map_table dtt200u_table[] = {
+ { 0x8001, KEY_MUTE },
+ { 0x8002, KEY_CHANNELDOWN },
+ { 0x8003, KEY_VOLUMEDOWN },
+ { 0x8004, KEY_1 },
+ { 0x8005, KEY_2 },
+ { 0x8006, KEY_3 },
+ { 0x8007, KEY_4 },
+ { 0x8008, KEY_5 },
+ { 0x8009, KEY_6 },
+ { 0x800a, KEY_7 },
+ { 0x800c, KEY_ZOOM },
+ { 0x800d, KEY_0 },
+ { 0x800e, KEY_SELECT },
+ { 0x8012, KEY_POWER },
+ { 0x801a, KEY_CHANNELUP },
+ { 0x801b, KEY_8 },
+ { 0x801e, KEY_VOLUMEUP },
+ { 0x801f, KEY_9 },
+};
+
+static struct rc_map_list dtt200u_map = {
+ .map = {
+ .scan = dtt200u_table,
+ .size = ARRAY_SIZE(dtt200u_table),
+ .rc_type = RC_TYPE_NEC,
+ .name = RC_MAP_DTT200U,
+ }
+};
+
+static int __init init_rc_map_dtt200u(void)
+{
+ return rc_map_register(&dtt200u_map);
+}
+
+static void __exit exit_rc_map_dtt200u(void)
+{
+ rc_map_unregister(&dtt200u_map);
+}
+
+module_init(init_rc_map_dtt200u)
+module_exit(exit_rc_map_dtt200u)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>");
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index 92ae1903c..91f9bb87c 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -19,6 +19,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -80,8 +82,6 @@ static void lirc_irctl_init(struct irctl *ir)
static void lirc_irctl_cleanup(struct irctl *ir)
{
- dev_dbg(ir->d.dev, LOGHEAD "cleaning up\n", ir->d.name, ir->d.minor);
-
device_destroy(lirc_class, MKDEV(MAJOR(lirc_base_dev), ir->d.minor));
if (ir->buf != ir->d.rbuf) {
@@ -97,28 +97,25 @@ static void lirc_irctl_cleanup(struct irctl *ir)
*/
static int lirc_add_to_buf(struct irctl *ir)
{
- if (ir->d.add_to_buf) {
- int res = -ENODATA;
- int got_data = 0;
+ int res;
+ int got_data = -1;
- /*
- * service the device as long as it is returning
- * data and we have space
- */
-get_data:
- res = ir->d.add_to_buf(ir->d.data, ir->buf);
- if (res == 0) {
- got_data++;
- goto get_data;
- }
+ if (!ir->d.add_to_buf)
+ return 0;
- if (res == -ENODEV)
- kthread_stop(ir->task);
+ /*
+ * service the device as long as it is returning
+ * data and we have space
+ */
+ do {
+ got_data++;
+ res = ir->d.add_to_buf(ir->d.data, ir->buf);
+ } while (!res);
- return got_data ? 0 : res;
- }
+ if (res == -ENODEV)
+ kthread_stop(ir->task);
- return 0;
+ return got_data ? 0 : res;
}
/* main function of the polling thread
@@ -127,9 +124,6 @@ static int lirc_thread(void *irctl)
{
struct irctl *ir = irctl;
- dev_dbg(ir->d.dev, LOGHEAD "poll thread started\n",
- ir->d.name, ir->d.minor);
-
do {
if (ir->open) {
if (ir->jiffies_to_wait) {
@@ -146,9 +140,6 @@ static int lirc_thread(void *irctl)
}
} while (!kthread_should_stop());
- dev_dbg(ir->d.dev, LOGHEAD "poll thread ended\n",
- ir->d.name, ir->d.minor);
-
return 0;
}
@@ -203,74 +194,86 @@ err_out:
return retval;
}
-int lirc_register_driver(struct lirc_driver *d)
+static int lirc_allocate_buffer(struct irctl *ir)
{
- struct irctl *ir;
- int minor;
+ int err = 0;
int bytes_in_key;
unsigned int chunk_size;
unsigned int buffer_size;
+ struct lirc_driver *d = &ir->d;
+
+ mutex_lock(&lirc_dev_lock);
+
+ bytes_in_key = BITS_TO_LONGS(d->code_length) +
+ (d->code_length % 8 ? 1 : 0);
+ buffer_size = d->buffer_size ? d->buffer_size : BUFLEN / bytes_in_key;
+ chunk_size = d->chunk_size ? d->chunk_size : bytes_in_key;
+
+ if (d->rbuf) {
+ ir->buf = d->rbuf;
+ } else {
+ ir->buf = kmalloc(sizeof(struct lirc_buffer), GFP_KERNEL);
+ if (!ir->buf) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = lirc_buffer_init(ir->buf, chunk_size, buffer_size);
+ if (err) {
+ kfree(ir->buf);
+ goto out;
+ }
+ }
+ ir->chunk_size = ir->buf->chunk_size;
+
+out:
+ mutex_unlock(&lirc_dev_lock);
+
+ return err;
+}
+
+static int lirc_allocate_driver(struct lirc_driver *d)
+{
+ struct irctl *ir;
+ int minor;
int err;
if (!d) {
- printk(KERN_ERR "lirc_dev: lirc_register_driver: "
- "driver pointer must be not NULL!\n");
- err = -EBADRQC;
- goto out;
+ pr_err("driver pointer must be not NULL!\n");
+ return -EBADRQC;
}
if (!d->dev) {
- printk(KERN_ERR "%s: dev pointer not filled in!\n", __func__);
- err = -EINVAL;
- goto out;
+ pr_err("dev pointer not filled in!\n");
+ return -EINVAL;
}
- if (MAX_IRCTL_DEVICES <= d->minor) {
- dev_err(d->dev, "lirc_dev: lirc_register_driver: "
- "\"minor\" must be between 0 and %d (%d)!\n",
- MAX_IRCTL_DEVICES - 1, d->minor);
- err = -EBADRQC;
- goto out;
+ if (d->minor >= MAX_IRCTL_DEVICES) {
+ dev_err(d->dev, "minor must be between 0 and %d!\n",
+ MAX_IRCTL_DEVICES - 1);
+ return -EBADRQC;
}
- if (1 > d->code_length || (BUFLEN * 8) < d->code_length) {
- dev_err(d->dev, "lirc_dev: lirc_register_driver: "
- "code length in bits for minor (%d) "
- "must be less than %d!\n",
- d->minor, BUFLEN * 8);
- err = -EBADRQC;
- goto out;
+ if (d->code_length < 1 || d->code_length > (BUFLEN * 8)) {
+ dev_err(d->dev, "code length must be less than %d bits\n",
+ BUFLEN * 8);
+ return -EBADRQC;
}
- dev_dbg(d->dev, "lirc_dev: lirc_register_driver: sample_rate: %d\n",
- d->sample_rate);
if (d->sample_rate) {
if (2 > d->sample_rate || HZ < d->sample_rate) {
- dev_err(d->dev, "lirc_dev: lirc_register_driver: "
- "sample_rate must be between 2 and %d!\n", HZ);
- err = -EBADRQC;
- goto out;
+ dev_err(d->dev, "invalid %d sample rate\n",
+ d->sample_rate);
+ return -EBADRQC;
}
if (!d->add_to_buf) {
- dev_err(d->dev, "lirc_dev: lirc_register_driver: "
- "add_to_buf cannot be NULL when "
- "sample_rate is set\n");
- err = -EBADRQC;
- goto out;
- }
- } else if (!(d->fops && d->fops->read) && !d->rbuf) {
- dev_err(d->dev, "lirc_dev: lirc_register_driver: "
- "fops->read and rbuf cannot all be NULL!\n");
- err = -EBADRQC;
- goto out;
- } else if (!d->rbuf) {
- if (!(d->fops && d->fops->read && d->fops->poll &&
- d->fops->unlocked_ioctl)) {
- dev_err(d->dev, "lirc_dev: lirc_register_driver: "
- "neither read, poll nor unlocked_ioctl can be NULL!\n");
- err = -EBADRQC;
- goto out;
+ dev_err(d->dev, "add_to_buf not set\n");
+ return -EBADRQC;
}
+ } else if (!d->rbuf && !(d->fops && d->fops->read &&
+ d->fops->poll && d->fops->unlocked_ioctl)) {
+ dev_err(d->dev, "undefined read, poll, ioctl\n");
+ return -EBADRQC;
}
mutex_lock(&lirc_dev_lock);
@@ -282,15 +285,13 @@ int lirc_register_driver(struct lirc_driver *d)
for (minor = 0; minor < MAX_IRCTL_DEVICES; minor++)
if (!irctls[minor])
break;
- if (MAX_IRCTL_DEVICES == minor) {
- dev_err(d->dev, "lirc_dev: lirc_register_driver: "
- "no free slots for drivers!\n");
+ if (minor == MAX_IRCTL_DEVICES) {
+ dev_err(d->dev, "no free slots for drivers!\n");
err = -ENOMEM;
goto out_lock;
}
} else if (irctls[minor]) {
- dev_err(d->dev, "lirc_dev: lirc_register_driver: "
- "minor (%d) just registered!\n", minor);
+ dev_err(d->dev, "minor (%d) just registered!\n", minor);
err = -EBUSY;
goto out_lock;
}
@@ -304,37 +305,9 @@ int lirc_register_driver(struct lirc_driver *d)
irctls[minor] = ir;
d->minor = minor;
- if (d->sample_rate) {
- ir->jiffies_to_wait = HZ / d->sample_rate;
- } else {
- /* it means - wait for external event in task queue */
- ir->jiffies_to_wait = 0;
- }
-
/* some safety check 8-) */
d->name[sizeof(d->name)-1] = '\0';
- bytes_in_key = BITS_TO_LONGS(d->code_length) +
- (d->code_length % 8 ? 1 : 0);
- buffer_size = d->buffer_size ? d->buffer_size : BUFLEN / bytes_in_key;
- chunk_size = d->chunk_size ? d->chunk_size : bytes_in_key;
-
- if (d->rbuf) {
- ir->buf = d->rbuf;
- } else {
- ir->buf = kmalloc(sizeof(struct lirc_buffer), GFP_KERNEL);
- if (!ir->buf) {
- err = -ENOMEM;
- goto out_lock;
- }
- err = lirc_buffer_init(ir->buf, chunk_size, buffer_size);
- if (err) {
- kfree(ir->buf);
- goto out_lock;
- }
- }
- ir->chunk_size = ir->buf->chunk_size;
-
if (d->features == 0)
d->features = LIRC_CAN_REC_LIRCCODE;
@@ -345,15 +318,19 @@ int lirc_register_driver(struct lirc_driver *d)
"lirc%u", ir->d.minor);
if (d->sample_rate) {
+ ir->jiffies_to_wait = HZ / d->sample_rate;
+
/* try to fire up polling thread */
ir->task = kthread_run(lirc_thread, (void *)ir, "lirc_dev");
if (IS_ERR(ir->task)) {
- dev_err(d->dev, "lirc_dev: lirc_register_driver: "
- "cannot run poll thread for minor = %d\n",
- d->minor);
+ dev_err(d->dev, "cannot run thread for minor = %d\n",
+ d->minor);
err = -ECHILD;
goto out_sysfs;
}
+ } else {
+ /* it means - wait for external event in task queue */
+ ir->jiffies_to_wait = 0;
}
err = lirc_cdev_add(ir);
@@ -371,9 +348,26 @@ out_sysfs:
device_destroy(lirc_class, MKDEV(MAJOR(lirc_base_dev), ir->d.minor));
out_lock:
mutex_unlock(&lirc_dev_lock);
-out:
+
return err;
}
+
+int lirc_register_driver(struct lirc_driver *d)
+{
+ int minor, err = 0;
+
+ minor = lirc_allocate_driver(d);
+ if (minor < 0)
+ return minor;
+
+ if (LIRC_CAN_REC(d->features)) {
+ err = lirc_allocate_buffer(irctls[minor]);
+ if (err)
+ lirc_unregister_driver(minor);
+ }
+
+ return err ? err : minor;
+}
EXPORT_SYMBOL(lirc_register_driver);
int lirc_unregister_driver(int minor)
@@ -382,15 +376,14 @@ int lirc_unregister_driver(int minor)
struct cdev *cdev;
if (minor < 0 || minor >= MAX_IRCTL_DEVICES) {
- printk(KERN_ERR "lirc_dev: %s: minor (%d) must be between "
- "0 and %d!\n", __func__, minor, MAX_IRCTL_DEVICES - 1);
+ pr_err("minor (%d) must be between 0 and %d!\n",
+ minor, MAX_IRCTL_DEVICES - 1);
return -EBADRQC;
}
ir = irctls[minor];
if (!ir) {
- printk(KERN_ERR "lirc_dev: %s: failed to get irctl struct "
- "for minor %d!\n", __func__, minor);
+ pr_err("failed to get irctl\n");
return -ENOENT;
}
@@ -399,8 +392,8 @@ int lirc_unregister_driver(int minor)
mutex_lock(&lirc_dev_lock);
if (ir->d.minor != minor) {
- printk(KERN_ERR "lirc_dev: %s: minor (%d) device not "
- "registered!\n", __func__, minor);
+ dev_err(ir->d.dev, "lirc_dev: minor %d device not registered\n",
+ minor);
mutex_unlock(&lirc_dev_lock);
return -ENOENT;
}
@@ -418,7 +411,10 @@ int lirc_unregister_driver(int minor)
ir->d.name, ir->d.minor);
wake_up_interruptible(&ir->buf->wait_poll);
mutex_lock(&ir->irctl_lock);
- ir->d.set_use_dec(ir->d.data);
+
+ if (ir->d.set_use_dec)
+ ir->d.set_use_dec(ir->d.data);
+
module_put(cdev->owner);
mutex_unlock(&ir->irctl_lock);
} else {
@@ -442,8 +438,7 @@ int lirc_dev_fop_open(struct inode *inode, struct file *file)
int retval = 0;
if (iminor(inode) >= MAX_IRCTL_DEVICES) {
- printk(KERN_WARNING "lirc_dev [%d]: open result = -ENODEV\n",
- iminor(inode));
+ pr_err("open result for %d is -ENODEV\n", iminor(inode));
return -ENODEV;
}
@@ -477,7 +472,8 @@ int lirc_dev_fop_open(struct inode *inode, struct file *file)
cdev = ir->cdev;
if (try_module_get(cdev->owner)) {
ir->open++;
- retval = ir->d.set_use_inc(ir->d.data);
+ if (ir->d.set_use_inc)
+ retval = ir->d.set_use_inc(ir->d.data);
if (retval) {
module_put(cdev->owner);
@@ -490,10 +486,6 @@ int lirc_dev_fop_open(struct inode *inode, struct file *file)
}
error:
- if (ir)
- dev_dbg(ir->d.dev, LOGHEAD "open result = %d\n",
- ir->d.name, ir->d.minor, retval);
-
mutex_unlock(&lirc_dev_lock);
nonseekable_open(inode, file);
@@ -509,14 +501,12 @@ int lirc_dev_fop_close(struct inode *inode, struct file *file)
int ret;
if (!ir) {
- printk(KERN_ERR "%s: called with invalid irctl\n", __func__);
+ pr_err("called with invalid irctl\n");
return -EINVAL;
}
cdev = ir->cdev;
- dev_dbg(ir->d.dev, LOGHEAD "close called\n", ir->d.name, ir->d.minor);
-
ret = mutex_lock_killable(&lirc_dev_lock);
WARN_ON(ret);
@@ -524,7 +514,8 @@ int lirc_dev_fop_close(struct inode *inode, struct file *file)
ir->open--;
if (ir->attached) {
- ir->d.set_use_dec(ir->d.data);
+ if (ir->d.set_use_dec)
+ ir->d.set_use_dec(ir->d.data);
module_put(cdev->owner);
} else {
lirc_irctl_cleanup(ir);
@@ -547,12 +538,10 @@ unsigned int lirc_dev_fop_poll(struct file *file, poll_table *wait)
unsigned int ret;
if (!ir) {
- printk(KERN_ERR "%s: called with invalid irctl\n", __func__);
+ pr_err("called with invalid irctl\n");
return POLLERR;
}
- dev_dbg(ir->d.dev, LOGHEAD "poll called\n", ir->d.name, ir->d.minor);
-
if (!ir->attached)
return POLLERR;
@@ -580,7 +569,7 @@ long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct irctl *ir = irctls[iminor(file_inode(file))];
if (!ir) {
- printk(KERN_ERR "lirc_dev: %s: no irctl found!\n", __func__);
+ pr_err("no irctl found!\n");
return -ENODEV;
}
@@ -588,7 +577,7 @@ long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
ir->d.name, ir->d.minor, cmd);
if (ir->d.minor == NOPLUG || !ir->attached) {
- dev_dbg(ir->d.dev, LOGHEAD "ioctl result = -ENODEV\n",
+ dev_err(ir->d.dev, LOGHEAD "ioctl result = -ENODEV\n",
ir->d.name, ir->d.minor);
return -ENODEV;
}
@@ -600,8 +589,8 @@ long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
result = put_user(ir->d.features, (__u32 __user *)arg);
break;
case LIRC_GET_REC_MODE:
- if (!(ir->d.features & LIRC_CAN_REC_MASK)) {
- result = -ENOSYS;
+ if (LIRC_CAN_REC(ir->d.features)) {
+ result = -ENOTTY;
break;
}
@@ -610,8 +599,8 @@ long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
(__u32 __user *)arg);
break;
case LIRC_SET_REC_MODE:
- if (!(ir->d.features & LIRC_CAN_REC_MASK)) {
- result = -ENOSYS;
+ if (LIRC_CAN_REC(ir->d.features)) {
+ result = -ENOTTY;
break;
}
@@ -629,7 +618,7 @@ long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case LIRC_GET_MIN_TIMEOUT:
if (!(ir->d.features & LIRC_CAN_SET_REC_TIMEOUT) ||
ir->d.min_timeout == 0) {
- result = -ENOSYS;
+ result = -ENOTTY;
break;
}
@@ -638,7 +627,7 @@ long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case LIRC_GET_MAX_TIMEOUT:
if (!(ir->d.features & LIRC_CAN_SET_REC_TIMEOUT) ||
ir->d.max_timeout == 0) {
- result = -ENOSYS;
+ result = -ENOTTY;
break;
}
@@ -648,9 +637,6 @@ long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
result = -EINVAL;
}
- dev_dbg(ir->d.dev, LOGHEAD "ioctl result = %d\n",
- ir->d.name, ir->d.minor, result);
-
mutex_unlock(&ir->irctl_lock);
return result;
@@ -668,7 +654,7 @@ ssize_t lirc_dev_fop_read(struct file *file,
DECLARE_WAITQUEUE(wait, current);
if (!ir) {
- printk(KERN_ERR "%s: called with invalid irctl\n", __func__);
+ pr_err("called with invalid irctl\n");
return -ENODEV;
}
@@ -709,7 +695,8 @@ ssize_t lirc_dev_fop_read(struct file *file,
/* According to the read(2) man page, 'written' can be
* returned as less than 'length', instead of blocking
* again, returning -EWOULDBLOCK, or returning
- * -ERESTARTSYS */
+ * -ERESTARTSYS
+ */
if (written)
break;
if (file->f_flags & O_NONBLOCK) {
@@ -755,8 +742,6 @@ out_locked:
out_unlocked:
kfree(buf);
- dev_dbg(ir->d.dev, LOGHEAD "read result = %s (%d)\n",
- ir->d.name, ir->d.minor, ret ? "<fail>" : "<ok>", ret);
return ret ? ret : written;
}
@@ -775,12 +760,10 @@ ssize_t lirc_dev_fop_write(struct file *file, const char __user *buffer,
struct irctl *ir = irctls[iminor(file_inode(file))];
if (!ir) {
- printk(KERN_ERR "%s: called with invalid irctl\n", __func__);
+ pr_err("called with invalid irctl\n");
return -ENODEV;
}
- dev_dbg(ir->d.dev, LOGHEAD "write called\n", ir->d.name, ir->d.minor);
-
if (!ir->attached)
return -ENODEV;
@@ -795,25 +778,23 @@ static int __init lirc_dev_init(void)
lirc_class = class_create(THIS_MODULE, "lirc");
if (IS_ERR(lirc_class)) {
- retval = PTR_ERR(lirc_class);
- printk(KERN_ERR "lirc_dev: class_create failed\n");
- goto error;
+ pr_err("class_create failed\n");
+ return PTR_ERR(lirc_class);
}
retval = alloc_chrdev_region(&lirc_base_dev, 0, MAX_IRCTL_DEVICES,
IRCTL_DEV_NAME);
if (retval) {
class_destroy(lirc_class);
- printk(KERN_ERR "lirc_dev: alloc_chrdev_region failed\n");
- goto error;
+ pr_err("alloc_chrdev_region failed\n");
+ return retval;
}
- printk(KERN_INFO "lirc_dev: IR Remote Control driver registered, "
- "major %d \n", MAJOR(lirc_base_dev));
+ pr_info("IR Remote Control driver registered, major %d\n",
+ MAJOR(lirc_base_dev));
-error:
- return retval;
+ return 0;
}
@@ -822,7 +803,7 @@ static void __exit lirc_dev_exit(void)
{
class_destroy(lirc_class);
unregister_chrdev_region(lirc_base_dev, MAX_IRCTL_DEVICES);
- printk(KERN_INFO "lirc_dev: module unloaded\n");
+ pr_info("module unloaded\n");
}
module_init(lirc_dev_init);
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 5cf2e749b..4f8c7effd 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -887,6 +887,12 @@ static int mceusb_set_tx_mask(struct rc_dev *dev, u32 mask)
{
struct mceusb_dev *ir = dev->priv;
+ /* return number of transmitters */
+ int emitters = ir->num_txports ? ir->num_txports : 2;
+
+ if (mask >= (1 << emitters))
+ return emitters;
+
if (ir->flags.tx_mask_normal)
ir->tx_mask = mask;
else
@@ -936,7 +942,7 @@ static int mceusb_set_tx_carrier(struct rc_dev *dev, u32 carrier)
}
- return carrier;
+ return 0;
}
/*
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index e8ceb0e2f..00215f343 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -139,11 +139,7 @@ static inline void nvt_cir_reg_write(struct nvt_dev *nvt, u8 val, u8 offset)
/* read val from cir config register */
static u8 nvt_cir_reg_read(struct nvt_dev *nvt, u8 offset)
{
- u8 val;
-
- val = inb(nvt->cir_addr + offset);
-
- return val;
+ return inb(nvt->cir_addr + offset);
}
/* write val to cir wake register */
@@ -156,11 +152,7 @@ static inline void nvt_cir_wake_reg_write(struct nvt_dev *nvt,
/* read val from cir wake config register */
static u8 nvt_cir_wake_reg_read(struct nvt_dev *nvt, u8 offset)
{
- u8 val;
-
- val = inb(nvt->cir_wake_addr + offset);
-
- return val;
+ return inb(nvt->cir_wake_addr + offset);
}
/* don't override io address if one is set already */
@@ -481,18 +473,14 @@ static void nvt_cir_wake_ldev_init(struct nvt_dev *nvt)
nvt_set_ioaddr(nvt, &nvt->cir_wake_addr);
- nvt_cr_write(nvt, nvt->cir_wake_irq, CR_CIR_IRQ_RSRC);
-
- nvt_dbg("CIR Wake initialized, base io port address: 0x%lx, irq: %d",
- nvt->cir_wake_addr, nvt->cir_wake_irq);
+ nvt_dbg("CIR Wake initialized, base io port address: 0x%lx",
+ nvt->cir_wake_addr);
}
/* clear out the hardware's cir rx fifo */
static void nvt_clear_cir_fifo(struct nvt_dev *nvt)
{
- u8 val;
-
- val = nvt_cir_reg_read(nvt, CIR_FIFOCON);
+ u8 val = nvt_cir_reg_read(nvt, CIR_FIFOCON);
nvt_cir_reg_write(nvt, val | CIR_FIFOCON_RXFIFOCLR, CIR_FIFOCON);
}
@@ -528,7 +516,7 @@ static void nvt_set_cir_iren(struct nvt_dev *nvt)
{
u8 iren;
- iren = CIR_IREN_RTR | CIR_IREN_PE;
+ iren = CIR_IREN_RTR | CIR_IREN_PE | CIR_IREN_RFO;
nvt_cir_reg_write(nvt, iren, CIR_IREN);
}
@@ -567,34 +555,15 @@ static void nvt_cir_regs_init(struct nvt_dev *nvt)
static void nvt_cir_wake_regs_init(struct nvt_dev *nvt)
{
- /* set number of bytes needed for wake from s3 (default 65) */
- nvt_cir_wake_reg_write(nvt, CIR_WAKE_FIFO_CMP_BYTES,
- CIR_WAKE_FIFO_CMP_DEEP);
-
- /* set tolerance/variance allowed per byte during wake compare */
- nvt_cir_wake_reg_write(nvt, CIR_WAKE_CMP_TOLERANCE,
- CIR_WAKE_FIFO_CMP_TOL);
-
- /* set sample limit count (PE interrupt raised when reached) */
- nvt_cir_wake_reg_write(nvt, CIR_RX_LIMIT_COUNT >> 8, CIR_WAKE_SLCH);
- nvt_cir_wake_reg_write(nvt, CIR_RX_LIMIT_COUNT & 0xff, CIR_WAKE_SLCL);
-
- /* set cir wake fifo rx trigger level (currently 67) */
- nvt_cir_wake_reg_write(nvt, CIR_WAKE_FIFOCON_RX_TRIGGER_LEV,
- CIR_WAKE_FIFOCON);
-
/*
- * Enable TX and RX, specific carrier on = low, off = high, and set
- * sample period (currently 50us)
+ * Disable RX, set specific carrier on = low, off = high,
+ * and sample period (currently 50us)
*/
- nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 | CIR_WAKE_IRCON_RXEN |
+ nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 |
CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV |
CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL,
CIR_WAKE_IRCON);
- /* clear cir wake rx fifo */
- nvt_clear_cir_wake_fifo(nvt);
-
/* clear any and all stray interrupts */
nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS);
@@ -789,8 +758,6 @@ static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
nvt_dbg_verbose("Processing buffer of len %d", nvt->pkts);
- init_ir_raw_event(&rawir);
-
for (i = 0; i < nvt->pkts; i++) {
sample = nvt->buf[i];
@@ -836,19 +803,10 @@ static void nvt_get_rx_ir_data(struct nvt_dev *nvt)
{
u8 fifocount, val;
unsigned int b_idx;
- bool overrun = false;
int i;
/* Get count of how many bytes to read from RX FIFO */
fifocount = nvt_cir_reg_read(nvt, CIR_RXFCONT);
- /* if we get 0xff, probably means the logical dev is disabled */
- if (fifocount == 0xff)
- return;
- /* watch out for a fifo overrun condition */
- else if (fifocount > RX_BUF_LEN) {
- overrun = true;
- fifocount = RX_BUF_LEN;
- }
nvt_dbg("attempting to fetch %u bytes from hw rx fifo", fifocount);
@@ -870,9 +828,6 @@ static void nvt_get_rx_ir_data(struct nvt_dev *nvt)
nvt_dbg("%s: pkts now %d", __func__, nvt->pkts);
nvt_process_rx_ir_data(nvt);
-
- if (overrun)
- nvt_handle_rx_fifo_overrun(nvt);
}
static void nvt_cir_log_irqs(u8 status, u8 iren)
@@ -908,7 +863,7 @@ static bool nvt_cir_tx_inactive(struct nvt_dev *nvt)
static irqreturn_t nvt_cir_isr(int irq, void *data)
{
struct nvt_dev *nvt = data;
- u8 status, iren, cur_state;
+ u8 status, iren;
unsigned long flags;
nvt_dbg_verbose("%s firing", __func__);
@@ -946,23 +901,15 @@ static irqreturn_t nvt_cir_isr(int irq, void *data)
nvt_cir_log_irqs(status, iren);
- if (status & CIR_IRSTS_RTR) {
- /* FIXME: add code for study/learn mode */
+ if (status & CIR_IRSTS_RFO)
+ nvt_handle_rx_fifo_overrun(nvt);
+
+ else if (status & (CIR_IRSTS_RTR | CIR_IRSTS_PE)) {
/* We only do rx if not tx'ing */
if (nvt_cir_tx_inactive(nvt))
nvt_get_rx_ir_data(nvt);
}
- if (status & CIR_IRSTS_PE) {
- if (nvt_cir_tx_inactive(nvt))
- nvt_get_rx_ir_data(nvt);
-
- cur_state = nvt->study_state;
-
- if (cur_state == ST_STUDY_NONE)
- nvt_clear_cir_fifo(nvt);
- }
-
spin_unlock_irqrestore(&nvt->nvt_lock, flags);
if (status & CIR_IRSTS_TE)
@@ -1004,51 +951,6 @@ static irqreturn_t nvt_cir_isr(int irq, void *data)
return IRQ_HANDLED;
}
-/* Interrupt service routine for CIR Wake */
-static irqreturn_t nvt_cir_wake_isr(int irq, void *data)
-{
- u8 status, iren, val;
- struct nvt_dev *nvt = data;
- unsigned long flags;
-
- nvt_dbg_wake("%s firing", __func__);
-
- spin_lock_irqsave(&nvt->nvt_lock, flags);
-
- status = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRSTS);
- iren = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IREN);
-
- /* IRQ may be shared with CIR, therefore check for each
- * status bit whether the related interrupt source is enabled
- */
- if (!(status & iren)) {
- spin_unlock_irqrestore(&nvt->nvt_lock, flags);
- return IRQ_NONE;
- }
-
- if (status & CIR_WAKE_IRSTS_IR_PENDING)
- nvt_clear_cir_wake_fifo(nvt);
-
- nvt_cir_wake_reg_write(nvt, status, CIR_WAKE_IRSTS);
- nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IRSTS);
-
- if ((status & CIR_WAKE_IRSTS_PE) &&
- (nvt->wake_state == ST_WAKE_START)) {
- while (nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX)) {
- val = nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY);
- nvt_dbg("setting wake up key: 0x%x", val);
- }
-
- nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IREN);
- nvt->wake_state = ST_WAKE_FINISH;
- }
-
- spin_unlock_irqrestore(&nvt->nvt_lock, flags);
-
- nvt_dbg_wake("%s done", __func__);
- return IRQ_HANDLED;
-}
-
static void nvt_disable_cir(struct nvt_dev *nvt)
{
unsigned long flags;
@@ -1152,8 +1054,6 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
nvt->cir_irq = pnp_irq(pdev, 0);
nvt->cir_wake_addr = pnp_port_start(pdev, 1);
- /* irq is always shared between cir and cir wake */
- nvt->cir_wake_irq = nvt->cir_irq;
nvt->cr_efir = CR_EFIR;
nvt->cr_efdr = CR_EFDR;
@@ -1229,11 +1129,6 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
CIR_IOREG_LENGTH, NVT_DRIVER_NAME "-wake"))
goto exit_unregister_device;
- if (devm_request_irq(&pdev->dev, nvt->cir_wake_irq,
- nvt_cir_wake_isr, IRQF_SHARED,
- NVT_DRIVER_NAME "-wake", (void *)nvt))
- goto exit_unregister_device;
-
ret = device_create_file(&rdev->dev, &dev_attr_wakeup_data);
if (ret)
goto exit_unregister_device;
@@ -1284,10 +1179,6 @@ static int nvt_suspend(struct pnp_dev *pdev, pm_message_t state)
spin_lock_irqsave(&nvt->nvt_lock, flags);
- /* zero out misc state tracking */
- nvt->study_state = ST_STUDY_NONE;
- nvt->wake_state = ST_WAKE_NONE;
-
/* disable all CIR interrupts */
nvt_cir_reg_write(nvt, 0, CIR_IREN);
diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h
index c9c98ebb1..acf735fc7 100644
--- a/drivers/media/rc/nuvoton-cir.h
+++ b/drivers/media/rc/nuvoton-cir.h
@@ -104,7 +104,6 @@ struct nvt_dev {
unsigned long cir_addr;
unsigned long cir_wake_addr;
int cir_irq;
- int cir_wake_irq;
enum nvt_chip_ver chip_ver;
/* hardware id */
@@ -112,36 +111,12 @@ struct nvt_dev {
u8 chip_minor;
/* hardware features */
- bool hw_learning_capable;
bool hw_tx_capable;
- /* rx settings */
- bool learning_enabled;
-
- /* track cir wake state */
- u8 wake_state;
- /* for study */
- u8 study_state;
/* carrier period = 1 / frequency */
u32 carrier;
};
-/* study states */
-#define ST_STUDY_NONE 0x0
-#define ST_STUDY_START 0x1
-#define ST_STUDY_CARRIER 0x2
-#define ST_STUDY_ALL_RECV 0x4
-
-/* wake states */
-#define ST_WAKE_NONE 0x0
-#define ST_WAKE_START 0x1
-#define ST_WAKE_FINISH 0x2
-
-/* receive states */
-#define ST_RX_WAIT_7F 0x1
-#define ST_RX_WAIT_HEAD 0x2
-#define ST_RX_WAIT_SILENT_END 0x4
-
/* send states */
#define ST_TX_NONE 0x0
#define ST_TX_REQUEST 0x2
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 7dfc7c218..8e7f2929f 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -130,13 +130,18 @@ static struct rc_map_list empty_map = {
static int ir_create_table(struct rc_map *rc_map,
const char *name, u64 rc_type, size_t size)
{
- rc_map->name = name;
+ rc_map->name = kstrdup(name, GFP_KERNEL);
+ if (!rc_map->name)
+ return -ENOMEM;
rc_map->rc_type = rc_type;
rc_map->alloc = roundup_pow_of_two(size * sizeof(struct rc_map_table));
rc_map->size = rc_map->alloc / sizeof(struct rc_map_table);
rc_map->scan = kmalloc(rc_map->alloc, GFP_KERNEL);
- if (!rc_map->scan)
+ if (!rc_map->scan) {
+ kfree(rc_map->name);
+ rc_map->name = NULL;
return -ENOMEM;
+ }
IR_dprintk(1, "Allocated space for %u keycode entries (%u bytes)\n",
rc_map->size, rc_map->alloc);
@@ -153,6 +158,7 @@ static int ir_create_table(struct rc_map *rc_map,
static void ir_free_table(struct rc_map *rc_map)
{
rc_map->size = 0;
+ kfree(rc_map->name);
kfree(rc_map->scan);
rc_map->scan = NULL;
}
@@ -804,6 +810,7 @@ static const struct {
{ RC_BIT_SHARP, "sharp", "ir-sharp-decoder" },
{ RC_BIT_MCE_KBD, "mce_kbd", "ir-mce_kbd-decoder" },
{ RC_BIT_XMP, "xmp", "ir-xmp-decoder" },
+ { RC_BIT_CEC, "cec", NULL },
};
/**
diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
index ec74244a3..399f44d89 100644
--- a/drivers/media/rc/redrat3.c
+++ b/drivers/media/rc/redrat3.c
@@ -188,8 +188,7 @@ struct redrat3_dev {
/* usb dma */
dma_addr_t dma_in;
- /* rx signal timeout timer */
- struct timer_list rx_timeout;
+ /* rx signal timeout */
u32 hw_timeout;
/* Is the device currently transmitting?*/
@@ -330,22 +329,11 @@ static u32 redrat3_us_to_len(u32 microsec)
return result ? result : 1;
}
-/* timer callback to send reset event */
-static void redrat3_rx_timeout(unsigned long data)
-{
- struct redrat3_dev *rr3 = (struct redrat3_dev *)data;
-
- dev_dbg(rr3->dev, "calling ir_raw_event_reset\n");
- ir_raw_event_reset(rr3->rc);
-}
-
static void redrat3_process_ir_data(struct redrat3_dev *rr3)
{
DEFINE_IR_RAW_EVENT(rawir);
struct device *dev;
- unsigned i, trailer = 0;
- unsigned sig_size, single_len, offset, val;
- unsigned long delay;
+ unsigned int i, sig_size, single_len, offset, val;
u32 mod_freq;
if (!rr3) {
@@ -355,10 +343,6 @@ static void redrat3_process_ir_data(struct redrat3_dev *rr3)
dev = rr3->dev;
- /* Make sure we reset the IR kfifo after a bit of inactivity */
- delay = usecs_to_jiffies(rr3->hw_timeout);
- mod_timer(&rr3->rx_timeout, jiffies + delay);
-
mod_freq = redrat3_val_to_mod_freq(&rr3->irdata);
dev_dbg(dev, "Got mod_freq of %u\n", mod_freq);
@@ -376,9 +360,6 @@ static void redrat3_process_ir_data(struct redrat3_dev *rr3)
rawir.pulse = true;
rawir.duration = US_TO_NS(single_len);
- /* Save initial pulse length to fudge trailer */
- if (i == 0)
- trailer = rawir.duration;
/* cap the value to IR_MAX_DURATION */
rawir.duration = (rawir.duration > IR_MAX_DURATION) ?
IR_MAX_DURATION : rawir.duration;
@@ -388,18 +369,13 @@ static void redrat3_process_ir_data(struct redrat3_dev *rr3)
ir_raw_event_store_with_filter(rr3->rc, &rawir);
}
- /* add a trailing space, if need be */
- if (i % 2) {
- rawir.pulse = false;
- /* this duration is made up, and may not be ideal... */
- if (trailer < US_TO_NS(1000))
- rawir.duration = US_TO_NS(2800);
- else
- rawir.duration = trailer;
- dev_dbg(dev, "storing trailing space with duration %d\n",
- rawir.duration);
- ir_raw_event_store_with_filter(rr3->rc, &rawir);
- }
+ /* add a trailing space */
+ rawir.pulse = false;
+ rawir.timeout = true;
+ rawir.duration = US_TO_NS(rr3->hw_timeout);
+ dev_dbg(dev, "storing trailing timeout with duration %d\n",
+ rawir.duration);
+ ir_raw_event_store_with_filter(rr3->rc, &rawir);
dev_dbg(dev, "calling ir_raw_event_handle\n");
ir_raw_event_handle(rr3->rc);
@@ -499,6 +475,37 @@ static u32 redrat3_get_timeout(struct redrat3_dev *rr3)
return timeout;
}
+static int redrat3_set_timeout(struct rc_dev *rc_dev, unsigned int timeoutns)
+{
+ struct redrat3_dev *rr3 = rc_dev->priv;
+ struct usb_device *udev = rr3->udev;
+ struct device *dev = rr3->dev;
+ u32 *timeout;
+ int ret;
+
+ timeout = kmalloc(sizeof(*timeout), GFP_KERNEL);
+ if (!timeout)
+ return -ENOMEM;
+
+ *timeout = cpu_to_be32(redrat3_us_to_len(timeoutns / 1000));
+ ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), RR3_SET_IR_PARAM,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
+ RR3_IR_IO_SIG_TIMEOUT, 0, timeout, sizeof(*timeout),
+ HZ * 25);
+ dev_dbg(dev, "set ir parm timeout %d ret 0x%02x\n",
+ be32_to_cpu(*timeout), ret);
+
+ if (ret == sizeof(*timeout)) {
+ rr3->hw_timeout = timeoutns / 1000;
+ ret = 0;
+ } else if (ret >= 0)
+ ret = -EIO;
+
+ kfree(timeout);
+
+ return ret;
+}
+
static void redrat3_reset(struct redrat3_dev *rr3)
{
struct usb_device *udev = rr3->udev;
@@ -708,7 +715,7 @@ static int redrat3_set_tx_carrier(struct rc_dev *rcdev, u32 carrier)
rr3->carrier = carrier;
- return carrier;
+ return 0;
}
static int redrat3_transmit_ir(struct rc_dev *rcdev, unsigned *txbuf,
@@ -880,7 +887,10 @@ static struct rc_dev *redrat3_init_rc_dev(struct redrat3_dev *rr3)
rc->priv = rr3;
rc->driver_type = RC_DRIVER_IR_RAW;
rc->allowed_protocols = RC_BIT_ALL;
- rc->timeout = US_TO_NS(2750);
+ rc->min_timeout = MS_TO_NS(RR3_RX_MIN_TIMEOUT);
+ rc->max_timeout = MS_TO_NS(RR3_RX_MAX_TIMEOUT);
+ rc->timeout = US_TO_NS(rr3->hw_timeout);
+ rc->s_timeout = redrat3_set_timeout;
rc->tx_ir = redrat3_transmit_ir;
rc->s_tx_carrier = redrat3_set_tx_carrier;
rc->driver_name = DRIVER_NAME;
@@ -990,7 +1000,7 @@ static int redrat3_dev_probe(struct usb_interface *intf,
if (retval < 0)
goto error;
- /* store current hardware timeout, in us, will use for kfifo resets */
+ /* store current hardware timeout, in µs */
rr3->hw_timeout = redrat3_get_timeout(rr3);
/* default.. will get overridden by any sends with a freq defined */
@@ -1026,7 +1036,6 @@ static int redrat3_dev_probe(struct usb_interface *intf,
retval = -ENOMEM;
goto led_free_error;
}
- setup_timer(&rr3->rx_timeout, redrat3_rx_timeout, (unsigned long)rr3);
/* we can register the device now, as it is ready */
usb_set_intfdata(intf, rr3);
@@ -1055,7 +1064,6 @@ static void redrat3_dev_disconnect(struct usb_interface *intf)
usb_set_intfdata(intf, NULL);
rc_unregister_device(rr3->rc);
led_classdev_unregister(&rr3->led);
- del_timer_sync(&rr3->rx_timeout);
redrat3_delete(rr3, udev);
}
diff --git a/drivers/media/rc/winbond-cir.c b/drivers/media/rc/winbond-cir.c
index d839f73f6..95ae60e65 100644
--- a/drivers/media/rc/winbond-cir.c
+++ b/drivers/media/rc/winbond-cir.c
@@ -615,6 +615,10 @@ wbcir_txmask(struct rc_dev *dev, u32 mask)
unsigned long flags;
u8 val;
+ /* return the number of transmitters */
+ if (mask > 15)
+ return 4;
+
/* Four outputs, only one output can be enabled at a time */
switch (mask) {
case 0x1:
diff --git a/drivers/media/tuners/it913x.c b/drivers/media/tuners/it913x.c
index 5c96da693..6c3ef2181 100644
--- a/drivers/media/tuners/it913x.c
+++ b/drivers/media/tuners/it913x.c
@@ -464,6 +464,7 @@ MODULE_DEVICE_TABLE(i2c, it913x_id_table);
static struct i2c_driver it913x_driver = {
.driver = {
.name = "it913x",
+ .suppress_bind_attrs = true,
},
.probe = it913x_probe,
.remove = it913x_remove,
diff --git a/drivers/media/tuners/mt2063.c b/drivers/media/tuners/mt2063.c
index 6457ac91e..7f0b9d594 100644
--- a/drivers/media/tuners/mt2063.c
+++ b/drivers/media/tuners/mt2063.c
@@ -24,6 +24,7 @@
#include <linux/module.h>
#include <linux/string.h>
#include <linux/videodev2.h>
+#include <linux/gcd.h>
#include "mt2063.h"
@@ -665,27 +666,6 @@ static u32 MT2063_ChooseFirstIF(struct MT2063_AvoidSpursData_t *pAS_Info)
}
/**
- * gcd() - Uses Euclid's algorithm
- *
- * @u, @v: Unsigned values whose GCD is desired.
- *
- * Returns THE greatest common divisor of u and v, if either value is 0,
- * the other value is returned as the result.
- */
-static u32 MT2063_gcd(u32 u, u32 v)
-{
- u32 r;
-
- while (v != 0) {
- r = u % v;
- u = v;
- v = r;
- }
-
- return u;
-}
-
-/**
* IsSpurInBand() - Checks to see if a spur will be present within the IF's
* bandwidth. (fIFOut +/- fIFBW, -fIFOut +/- fIFBW)
*
@@ -731,12 +711,12 @@ static u32 IsSpurInBand(struct MT2063_AvoidSpursData_t *pAS_Info,
** of f_LO1, f_LO2 and the edge value. Use the larger of this
** gcd-based scale factor or f_Scale.
*/
- lo_gcd = MT2063_gcd(f_LO1, f_LO2);
- gd_Scale = max((u32) MT2063_gcd(lo_gcd, d), f_Scale);
+ lo_gcd = gcd(f_LO1, f_LO2);
+ gd_Scale = max((u32) gcd(lo_gcd, d), f_Scale);
hgds = gd_Scale / 2;
- gc_Scale = max((u32) MT2063_gcd(lo_gcd, c), f_Scale);
+ gc_Scale = max((u32) gcd(lo_gcd, c), f_Scale);
hgcs = gc_Scale / 2;
- gf_Scale = max((u32) MT2063_gcd(lo_gcd, f), f_Scale);
+ gf_Scale = max((u32) gcd(lo_gcd, f), f_Scale);
hgfs = gf_Scale / 2;
n0 = DIV_ROUND_UP(f_LO2 - d, f_LO1 - f_LO2);
diff --git a/drivers/media/tuners/r820t.c b/drivers/media/tuners/r820t.c
index 6ab35e315..08dca4035 100644
--- a/drivers/media/tuners/r820t.c
+++ b/drivers/media/tuners/r820t.c
@@ -337,20 +337,6 @@ static int r820t_xtal_capacitor[][2] = {
};
/*
- * measured with a Racal 6103E GSM test set at 928 MHz with -60 dBm
- * input power, for raw results see:
- * http://steve-m.de/projects/rtl-sdr/gain_measurement/r820t/
- */
-
-static const int r820t_lna_gain_steps[] = {
- 0, 9, 13, 40, 38, 13, 31, 22, 26, 31, 26, 14, 19, 5, 35, 13
-};
-
-static const int r820t_mixer_gain_steps[] = {
- 0, 5, 10, 10, 19, 9, 10, 25, 17, 10, 8, 16, 13, 6, 3, -8
-};
-
-/*
* I2C read/write code and shadow registers logic
*/
static void shadow_store(struct r820t_priv *priv, u8 reg, const u8 *val,
@@ -1216,6 +1202,21 @@ static int r820t_read_gain(struct r820t_priv *priv)
#if 0
/* FIXME: This routine requires more testing */
+
+/*
+ * measured with a Racal 6103E GSM test set at 928 MHz with -60 dBm
+ * input power, for raw results see:
+ * http://steve-m.de/projects/rtl-sdr/gain_measurement/r820t/
+ */
+
+static const int r820t_lna_gain_steps[] = {
+ 0, 9, 13, 40, 38, 13, 31, 22, 26, 31, 26, 14, 19, 5, 35, 13
+};
+
+static const int r820t_mixer_gain_steps[] = {
+ 0, 5, 10, 10, 19, 9, 10, 25, 17, 10, 8, 16, 13, 6, 3, -8
+};
+
static int r820t_set_gain_mode(struct r820t_priv *priv,
bool set_manual_gain,
int gain)
diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
index abe362cd6..d2323b330 100644
--- a/drivers/media/tuners/si2157.c
+++ b/drivers/media/tuners/si2157.c
@@ -514,7 +514,8 @@ MODULE_DEVICE_TABLE(i2c, si2157_id_table);
static struct i2c_driver si2157_driver = {
.driver = {
- .name = "si2157",
+ .name = "si2157",
+ .suppress_bind_attrs = true,
},
.probe = si2157_probe,
.remove = si2157_remove,
diff --git a/drivers/media/usb/airspy/airspy.c b/drivers/media/usb/airspy/airspy.c
index 92d9d4214..fe031b069 100644
--- a/drivers/media/usb/airspy/airspy.c
+++ b/drivers/media/usb/airspy/airspy.c
@@ -488,7 +488,7 @@ static void airspy_disconnect(struct usb_interface *intf)
/* Videobuf2 operations */
static int airspy_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers,
- unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int *nplanes, unsigned int sizes[], struct device *alloc_devs[])
{
struct airspy *s = vb2_get_drv_priv(vq);
diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c
index 321ea5cf1..bf53553d2 100644
--- a/drivers/media/usb/au0828/au0828-core.c
+++ b/drivers/media/usb/au0828/au0828-core.c
@@ -142,7 +142,7 @@ static void au0828_unregister_media_device(struct au0828_dev *dev)
struct media_device *mdev = dev->media_dev;
struct media_entity_notify *notify, *nextp;
- if (!mdev || !media_devnode_is_registered(&mdev->devnode))
+ if (!mdev || !media_devnode_is_registered(mdev->devnode))
return;
/* Remove au0828 entity_notify callbacks */
@@ -482,7 +482,7 @@ static int au0828_media_device_register(struct au0828_dev *dev,
if (!dev->media_dev)
return 0;
- if (!media_devnode_is_registered(&dev->media_dev->devnode)) {
+ if (!media_devnode_is_registered(dev->media_dev->devnode)) {
/* register media device */
ret = media_device_register(dev->media_dev);
diff --git a/drivers/media/usb/au0828/au0828-vbi.c b/drivers/media/usb/au0828/au0828-vbi.c
index b4efc103a..e0930ce59 100644
--- a/drivers/media/usb/au0828/au0828-vbi.c
+++ b/drivers/media/usb/au0828/au0828-vbi.c
@@ -32,7 +32,7 @@
static int vbi_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct au0828_dev *dev = vb2_get_drv_priv(vq);
unsigned long size = dev->vbi_width * dev->vbi_height * 2;
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index 7d0ec4cb2..82b026985 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -698,7 +698,7 @@ int au0828_v4l2_device_register(struct usb_interface *interface,
static int queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct au0828_dev *dev = vb2_get_drv_priv(vq);
unsigned long size = dev->height * dev->bytesperline;
diff --git a/drivers/media/usb/cx231xx/cx231xx-417.c b/drivers/media/usb/cx231xx/cx231xx-417.c
index fd8b39534..6365897d2 100644
--- a/drivers/media/usb/cx231xx/cx231xx-417.c
+++ b/drivers/media/usb/cx231xx/cx231xx-417.c
@@ -1570,10 +1570,12 @@ static int vidioc_s_ctrl(struct file *file, void *priv,
{
struct cx231xx_fh *fh = file->private_data;
struct cx231xx *dev = fh->dev;
+ struct v4l2_subdev *sd;
dprintk(3, "enter vidioc_s_ctrl()\n");
/* Update the A/V core */
- call_all(dev, core, s_ctrl, ctl);
+ v4l2_device_for_each_subdev(sd, &dev->v4l2_dev)
+ v4l2_s_ctrl(NULL, sd->ctrl_handler, ctl);
dprintk(3, "exit vidioc_s_ctrl()\n");
return 0;
}
diff --git a/drivers/media/usb/dvb-usb-v2/Kconfig b/drivers/media/usb/dvb-usb-v2/Kconfig
index 3dc8ef004..524533d3e 100644
--- a/drivers/media/usb/dvb-usb-v2/Kconfig
+++ b/drivers/media/usb/dvb-usb-v2/Kconfig
@@ -127,17 +127,22 @@ config DVB_USB_MXL111SF
config DVB_USB_RTL28XXU
tristate "Realtek RTL28xxU DVB USB support"
depends on DVB_USB_V2 && I2C_MUX
+ select DVB_MN88472 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_MN88473 if MEDIA_SUBDRV_AUTOSELECT
select DVB_RTL2830
select DVB_RTL2832
select DVB_RTL2832_SDR if (MEDIA_SUBDRV_AUTOSELECT && MEDIA_SDR_SUPPORT)
- select MEDIA_TUNER_QT1010 if MEDIA_SUBDRV_AUTOSELECT
- select MEDIA_TUNER_MT2060 if MEDIA_SUBDRV_AUTOSELECT
- select MEDIA_TUNER_MXL5005S if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_SI2168 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_E4000 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_FC0012 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_FC0013 if MEDIA_SUBDRV_AUTOSELECT
- select MEDIA_TUNER_E4000 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_FC2580 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_MT2060 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_MXL5005S if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_QT1010 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_R820T if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_SI2157 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_TUA9001 if MEDIA_SUBDRV_AUTOSELECT
help
Say Y here to support the Realtek RTL28xxU DVB USB receiver.
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 3b303d3e4..69b77ed8d 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -49,6 +49,7 @@ static int af9035_ctrl_msg(struct dvb_usb_device *d, struct usb_req *req)
#define CHECKSUM_LEN 2
#define USB_TIMEOUT 2000
struct state *state = d_to_priv(d);
+ struct usb_interface *intf = d->intf;
int ret, wlen, rlen;
u16 checksum, tmp_checksum;
@@ -57,8 +58,8 @@ static int af9035_ctrl_msg(struct dvb_usb_device *d, struct usb_req *req)
/* buffer overflow check */
if (req->wlen > (BUF_LEN - REQ_HDR_LEN - CHECKSUM_LEN) ||
req->rlen > (BUF_LEN - ACK_HDR_LEN - CHECKSUM_LEN)) {
- dev_err(&d->udev->dev, "%s: too much data wlen=%d rlen=%d\n",
- KBUILD_MODNAME, req->wlen, req->rlen);
+ dev_err(&intf->dev, "too much data wlen=%d rlen=%d\n",
+ req->wlen, req->rlen);
ret = -EINVAL;
goto exit;
}
@@ -94,10 +95,8 @@ static int af9035_ctrl_msg(struct dvb_usb_device *d, struct usb_req *req)
checksum = af9035_checksum(state->buf, rlen - 2);
tmp_checksum = (state->buf[rlen - 2] << 8) | state->buf[rlen - 1];
if (tmp_checksum != checksum) {
- dev_err(&d->udev->dev,
- "%s: command=%02x checksum mismatch (%04x != %04x)\n",
- KBUILD_MODNAME, req->cmd, tmp_checksum,
- checksum);
+ dev_err(&intf->dev, "command=%02x checksum mismatch (%04x != %04x)\n",
+ req->cmd, tmp_checksum, checksum);
ret = -EIO;
goto exit;
}
@@ -110,8 +109,8 @@ static int af9035_ctrl_msg(struct dvb_usb_device *d, struct usb_req *req)
goto exit;
}
- dev_dbg(&d->udev->dev, "%s: command=%02x failed fw error=%d\n",
- __func__, req->cmd, state->buf[2]);
+ dev_dbg(&intf->dev, "command=%02x failed fw error=%d\n",
+ req->cmd, state->buf[2]);
ret = -EIO;
goto exit;
}
@@ -122,20 +121,20 @@ static int af9035_ctrl_msg(struct dvb_usb_device *d, struct usb_req *req)
exit:
mutex_unlock(&d->usb_mutex);
if (ret < 0)
- dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&intf->dev, "failed=%d\n", ret);
return ret;
}
/* write multiple registers */
static int af9035_wr_regs(struct dvb_usb_device *d, u32 reg, u8 *val, int len)
{
+ struct usb_interface *intf = d->intf;
u8 wbuf[MAX_XFER_SIZE];
u8 mbox = (reg >> 16) & 0xff;
struct usb_req req = { CMD_MEM_WR, mbox, 6 + len, wbuf, 0, NULL };
if (6 + len > sizeof(wbuf)) {
- dev_warn(&d->udev->dev, "%s: i2c wr: len=%d is too big!\n",
- KBUILD_MODNAME, len);
+ dev_warn(&intf->dev, "i2c wr: len=%d is too big!\n", len);
return -EOPNOTSUPP;
}
@@ -198,6 +197,7 @@ static int af9035_add_i2c_dev(struct dvb_usb_device *d, const char *type,
{
int ret, num;
struct state *state = d_to_priv(d);
+ struct usb_interface *intf = d->intf;
struct i2c_client *client;
struct i2c_board_info board_info = {
.addr = addr,
@@ -212,11 +212,10 @@ static int af9035_add_i2c_dev(struct dvb_usb_device *d, const char *type,
break;
}
- dev_dbg(&d->udev->dev, "%s: num=%d\n", __func__, num);
+ dev_dbg(&intf->dev, "num=%d\n", num);
if (num == AF9035_I2C_CLIENT_MAX) {
- dev_err(&d->udev->dev, "%s: I2C client out of index\n",
- KBUILD_MODNAME);
+ dev_err(&intf->dev, "I2C client out of index\n");
ret = -ENODEV;
goto err;
}
@@ -240,7 +239,7 @@ static int af9035_add_i2c_dev(struct dvb_usb_device *d, const char *type,
state->i2c_client[num] = client;
return 0;
err:
- dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&intf->dev, "failed=%d\n", ret);
return ret;
}
@@ -248,6 +247,7 @@ static void af9035_del_i2c_dev(struct dvb_usb_device *d)
{
int num;
struct state *state = d_to_priv(d);
+ struct usb_interface *intf = d->intf;
struct i2c_client *client;
/* find last used client */
@@ -257,11 +257,10 @@ static void af9035_del_i2c_dev(struct dvb_usb_device *d)
break;
}
- dev_dbg(&d->udev->dev, "%s: num=%d\n", __func__, num);
+ dev_dbg(&intf->dev, "num=%d\n", num);
if (num == -1) {
- dev_err(&d->udev->dev, "%s: I2C client out of index\n",
- KBUILD_MODNAME);
+ dev_err(&intf->dev, "I2C client out of index\n");
goto err;
}
@@ -276,7 +275,7 @@ static void af9035_del_i2c_dev(struct dvb_usb_device *d)
state->i2c_client[num] = NULL;
return;
err:
- dev_dbg(&d->udev->dev, "%s: failed\n", __func__);
+ dev_dbg(&intf->dev, "failed\n");
}
static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
@@ -348,6 +347,9 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
ret = af9035_rd_regs(d, reg, &msg[1].buf[0],
msg[1].len);
+ } else if (state->no_read) {
+ memset(msg[1].buf, 0, msg[1].len);
+ ret = 0;
} else {
/* I2C write + read */
u8 buf[MAX_XFER_SIZE];
@@ -367,10 +369,25 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
memcpy(&buf[3], msg[0].buf, msg[0].len);
} else {
buf[1] = msg[0].addr << 1;
- buf[2] = 0x00; /* reg addr len */
buf[3] = 0x00; /* reg addr MSB */
buf[4] = 0x00; /* reg addr LSB */
- memcpy(&buf[5], msg[0].buf, msg[0].len);
+
+ /* Keep prev behavior for write req len > 2*/
+ if (msg[0].len > 2) {
+ buf[2] = 0x00; /* reg addr len */
+ memcpy(&buf[5], msg[0].buf, msg[0].len);
+
+ /* Use reg addr fields if write req len <= 2 */
+ } else {
+ req.wlen = 5;
+ buf[2] = msg[0].len;
+ if (msg[0].len == 2) {
+ buf[3] = msg[0].buf[0];
+ buf[4] = msg[0].buf[1];
+ } else if (msg[0].len == 1) {
+ buf[4] = msg[0].buf[0];
+ }
+ }
}
ret = af9035_ctrl_msg(d, &req);
}
@@ -421,6 +438,9 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
if (msg[0].len > 40) {
/* TODO: correct limits > 40 */
ret = -EOPNOTSUPP;
+ } else if (state->no_read) {
+ memset(msg[0].buf, 0, msg[0].len);
+ ret = 0;
} else {
/* I2C read */
u8 buf[5];
@@ -475,7 +495,9 @@ static struct i2c_algorithm af9035_i2c_algo = {
static int af9035_identify_state(struct dvb_usb_device *d, const char **name)
{
struct state *state = d_to_priv(d);
- int ret;
+ struct usb_interface *intf = d->intf;
+ int ret, ts_mode_invalid;
+ u8 tmp;
u8 wbuf[1] = { 1 };
u8 rbuf[4];
struct usb_req req = { CMD_FW_QUERYINFO, 0, sizeof(wbuf), wbuf,
@@ -492,10 +514,8 @@ static int af9035_identify_state(struct dvb_usb_device *d, const char **name)
if (ret < 0)
goto err;
- dev_info(&d->udev->dev,
- "%s: prechip_version=%02x chip_version=%02x chip_type=%04x\n",
- KBUILD_MODNAME, state->prechip_version,
- state->chip_version, state->chip_type);
+ dev_info(&intf->dev, "prechip_version=%02x chip_version=%02x chip_type=%04x\n",
+ state->prechip_version, state->chip_version, state->chip_type);
if (state->chip_type == 0x9135) {
if (state->chip_version == 0x02)
@@ -511,11 +531,41 @@ static int af9035_identify_state(struct dvb_usb_device *d, const char **name)
state->eeprom_addr = EEPROM_BASE_AF9035;
}
+
+ /* check for dual tuner mode */
+ ret = af9035_rd_reg(d, state->eeprom_addr + EEPROM_TS_MODE, &tmp);
+ if (ret < 0)
+ goto err;
+
+ ts_mode_invalid = 0;
+ switch (tmp) {
+ case 0:
+ break;
+ case 1:
+ case 3:
+ state->dual_mode = true;
+ break;
+ case 5:
+ if (state->chip_type != 0x9135 && state->chip_type != 0x9306)
+ state->dual_mode = true; /* AF9035 */
+ else
+ ts_mode_invalid = 1;
+ break;
+ default:
+ ts_mode_invalid = 1;
+ }
+
+ dev_dbg(&intf->dev, "ts mode=%d dual mode=%d\n", tmp, state->dual_mode);
+
+ if (ts_mode_invalid)
+ dev_info(&intf->dev, "ts mode=%d not supported, defaulting to single tuner mode!", tmp);
+
+
ret = af9035_ctrl_msg(d, &req);
if (ret < 0)
goto err;
- dev_dbg(&d->udev->dev, "%s: reply=%*ph\n", __func__, 4, rbuf);
+ dev_dbg(&intf->dev, "reply=%*ph\n", 4, rbuf);
if (rbuf[0] || rbuf[1] || rbuf[2] || rbuf[3])
ret = WARM;
else
@@ -524,7 +574,7 @@ static int af9035_identify_state(struct dvb_usb_device *d, const char **name)
return ret;
err:
- dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&intf->dev, "failed=%d\n", ret);
return ret;
}
@@ -532,6 +582,7 @@ err:
static int af9035_download_firmware_old(struct dvb_usb_device *d,
const struct firmware *fw)
{
+ struct usb_interface *intf = d->intf;
int ret, i, j, len;
u8 wbuf[1];
struct usb_req req = { 0, 0, 0, NULL, 0, NULL };
@@ -562,14 +613,12 @@ static int af9035_download_firmware_old(struct dvb_usb_device *d,
hdr_checksum = fw->data[fw->size - i + 5] << 8;
hdr_checksum |= fw->data[fw->size - i + 6] << 0;
- dev_dbg(&d->udev->dev,
- "%s: core=%d addr=%04x data_len=%d checksum=%04x\n",
- __func__, hdr_core, hdr_addr, hdr_data_len,
- hdr_checksum);
+ dev_dbg(&intf->dev, "core=%d addr=%04x data_len=%d checksum=%04x\n",
+ hdr_core, hdr_addr, hdr_data_len, hdr_checksum);
if (((hdr_core != 1) && (hdr_core != 2)) ||
(hdr_data_len > i)) {
- dev_dbg(&d->udev->dev, "%s: bad firmware\n", __func__);
+ dev_dbg(&intf->dev, "bad firmware\n");
break;
}
@@ -600,18 +649,17 @@ static int af9035_download_firmware_old(struct dvb_usb_device *d,
i -= hdr_data_len + HDR_SIZE;
- dev_dbg(&d->udev->dev, "%s: data uploaded=%zu\n",
- __func__, fw->size - i);
+ dev_dbg(&intf->dev, "data uploaded=%zu\n", fw->size - i);
}
/* print warn if firmware is bad, continue and see what happens */
if (i)
- dev_warn(&d->udev->dev, "%s: bad firmware\n", KBUILD_MODNAME);
+ dev_warn(&intf->dev, "bad firmware\n");
return 0;
err:
- dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&intf->dev, "failed=%d\n", ret);
return ret;
}
@@ -619,6 +667,7 @@ err:
static int af9035_download_firmware_new(struct dvb_usb_device *d,
const struct firmware *fw)
{
+ struct usb_interface *intf = d->intf;
int ret, i, i_prev;
struct usb_req req_fw_dl = { CMD_FW_SCATTER_WR, 0, 0, NULL, 0, NULL };
#define HDR_SIZE 7
@@ -648,15 +697,14 @@ static int af9035_download_firmware_new(struct dvb_usb_device *d,
if (ret < 0)
goto err;
- dev_dbg(&d->udev->dev, "%s: data uploaded=%d\n",
- __func__, i);
+ dev_dbg(&intf->dev, "data uploaded=%d\n", i);
}
}
return 0;
err:
- dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&intf->dev, "failed=%d\n", ret);
return ret;
}
@@ -664,6 +712,7 @@ err:
static int af9035_download_firmware(struct dvb_usb_device *d,
const struct firmware *fw)
{
+ struct usb_interface *intf = d->intf;
struct state *state = d_to_priv(d);
int ret;
u8 wbuf[1];
@@ -672,7 +721,7 @@ static int af9035_download_firmware(struct dvb_usb_device *d,
struct usb_req req = { 0, 0, 0, NULL, 0, NULL };
struct usb_req req_fw_ver = { CMD_FW_QUERYINFO, 0, 1, wbuf, 4, rbuf };
- dev_dbg(&d->udev->dev, "%s:\n", __func__);
+ dev_dbg(&intf->dev, "\n");
/*
* In case of dual tuner configuration we need to do some extra
@@ -680,11 +729,7 @@ static int af9035_download_firmware(struct dvb_usb_device *d,
* which is done by master demod.
* Master feeds also clock and controls power via GPIO.
*/
- ret = af9035_rd_reg(d, state->eeprom_addr + EEPROM_TS_MODE, &tmp);
- if (ret < 0)
- goto err;
-
- if (tmp == 1 || tmp == 3 || tmp == 5) {
+ if (state->dual_mode) {
/* configure gpioh1, reset & power slave demod */
ret = af9035_wr_reg_mask(d, 0x00d8b0, 0x01, 0x01);
if (ret < 0)
@@ -752,25 +797,25 @@ static int af9035_download_firmware(struct dvb_usb_device *d,
goto err;
if (!(rbuf[0] || rbuf[1] || rbuf[2] || rbuf[3])) {
- dev_err(&d->udev->dev, "%s: firmware did not run\n",
- KBUILD_MODNAME);
+ dev_err(&intf->dev, "firmware did not run\n");
ret = -ENODEV;
goto err;
}
- dev_info(&d->udev->dev, "%s: firmware version=%d.%d.%d.%d",
- KBUILD_MODNAME, rbuf[0], rbuf[1], rbuf[2], rbuf[3]);
+ dev_info(&intf->dev, "firmware version=%d.%d.%d.%d",
+ rbuf[0], rbuf[1], rbuf[2], rbuf[3]);
return 0;
err:
- dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&intf->dev, "failed=%d\n", ret);
return ret;
}
static int af9035_read_config(struct dvb_usb_device *d)
{
+ struct usb_interface *intf = d->intf;
struct state *state = d_to_priv(d);
int ret, i;
u8 tmp;
@@ -805,7 +850,7 @@ static int af9035_read_config(struct dvb_usb_device *d)
goto err;
if (tmp == 0x00) {
- dev_dbg(&d->udev->dev, "%s: no eeprom\n", __func__);
+ dev_dbg(&intf->dev, "no eeprom\n");
goto skip_eeprom;
}
} else if (state->chip_type == 0x9306) {
@@ -817,18 +862,6 @@ static int af9035_read_config(struct dvb_usb_device *d)
}
-
- /* check if there is dual tuners */
- ret = af9035_rd_reg(d, state->eeprom_addr + EEPROM_TS_MODE, &tmp);
- if (ret < 0)
- goto err;
-
- if (tmp == 1 || tmp == 3 || tmp == 5)
- state->dual_mode = true;
-
- dev_dbg(&d->udev->dev, "%s: ts mode=%d dual mode=%d\n", __func__,
- tmp, state->dual_mode);
-
if (state->dual_mode) {
/* read 2nd demodulator I2C address */
ret = af9035_rd_reg(d,
@@ -840,8 +873,7 @@ static int af9035_read_config(struct dvb_usb_device *d)
if (tmp)
state->af9033_i2c_addr[1] = tmp;
- dev_dbg(&d->udev->dev, "%s: 2nd demod I2C addr=%02x\n",
- __func__, tmp);
+ dev_dbg(&intf->dev, "2nd demod I2C addr=%02x\n", tmp);
}
addr = state->eeprom_addr;
@@ -852,8 +884,7 @@ static int af9035_read_config(struct dvb_usb_device *d)
if (ret < 0)
goto err;
- dev_dbg(&d->udev->dev, "%s: [%d]tuner=%02x\n",
- __func__, i, tmp);
+ dev_dbg(&intf->dev, "[%d]tuner=%02x\n", i, tmp);
/* tuner sanity check */
if (state->chip_type == 0x9135) {
@@ -882,10 +913,8 @@ static int af9035_read_config(struct dvb_usb_device *d)
}
if (state->af9033_config[i].tuner != tmp) {
- dev_info(&d->udev->dev,
- "%s: [%d] overriding tuner from %02x to %02x\n",
- KBUILD_MODNAME, i, tmp,
- state->af9033_config[i].tuner);
+ dev_info(&intf->dev, "[%d] overriding tuner from %02x to %02x\n",
+ i, tmp, state->af9033_config[i].tuner);
}
switch (state->af9033_config[i].tuner) {
@@ -905,9 +934,8 @@ static int af9035_read_config(struct dvb_usb_device *d)
case AF9033_TUNER_IT9135_62:
break;
default:
- dev_warn(&d->udev->dev,
- "%s: tuner id=%02x not supported, please report!",
- KBUILD_MODNAME, tmp);
+ dev_warn(&intf->dev, "tuner id=%02x not supported, please report!",
+ tmp);
}
/* disable dual mode if driver does not support it */
@@ -924,9 +952,7 @@ static int af9035_read_config(struct dvb_usb_device *d)
break;
default:
state->dual_mode = false;
- dev_info(&d->udev->dev,
- "%s: driver does not support 2nd tuner and will disable it",
- KBUILD_MODNAME);
+ dev_info(&intf->dev, "driver does not support 2nd tuner and will disable it");
}
/* tuner IF frequency */
@@ -942,7 +968,7 @@ static int af9035_read_config(struct dvb_usb_device *d)
tmp16 |= tmp << 8;
- dev_dbg(&d->udev->dev, "%s: [%d]IF=%d\n", __func__, i, tmp16);
+ dev_dbg(&intf->dev, "[%d]IF=%d\n", i, tmp16);
addr += 0x10; /* shift for the 2nd tuner params */
}
@@ -962,10 +988,24 @@ skip_eeprom:
state->af9033_config[i].clock = clock_lut_af9035[tmp];
}
+ state->no_read = false;
+ /* Some MXL5007T devices cannot properly handle tuner I2C read ops. */
+ if (state->af9033_config[0].tuner == AF9033_TUNER_MXL5007T &&
+ le16_to_cpu(d->udev->descriptor.idVendor) == USB_VID_AVERMEDIA)
+
+ switch (le16_to_cpu(d->udev->descriptor.idProduct)) {
+ case USB_PID_AVERMEDIA_A867:
+ case USB_PID_AVERMEDIA_TWINSTAR:
+ dev_info(&intf->dev,
+ "Device may have issues with I2C read operations. Enabling fix.\n");
+ state->no_read = true;
+ break;
+ }
+
return 0;
err:
- dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&intf->dev, "failed=%d\n", ret);
return ret;
}
@@ -973,10 +1013,11 @@ err:
static int af9035_tua9001_tuner_callback(struct dvb_usb_device *d,
int cmd, int arg)
{
+ struct usb_interface *intf = d->intf;
int ret;
u8 val;
- dev_dbg(&d->udev->dev, "%s: cmd=%d arg=%d\n", __func__, cmd, arg);
+ dev_dbg(&intf->dev, "cmd=%d arg=%d\n", cmd, arg);
/*
* CEN always enabled by hardware wiring
@@ -1010,7 +1051,7 @@ static int af9035_tua9001_tuner_callback(struct dvb_usb_device *d,
return 0;
err:
- dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&intf->dev, "failed=%d\n", ret);
return ret;
}
@@ -1019,6 +1060,7 @@ err:
static int af9035_fc0011_tuner_callback(struct dvb_usb_device *d,
int cmd, int arg)
{
+ struct usb_interface *intf = d->intf;
int ret;
switch (cmd) {
@@ -1076,7 +1118,7 @@ static int af9035_fc0011_tuner_callback(struct dvb_usb_device *d,
return 0;
err:
- dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&intf->dev, "failed=%d\n", ret);
return ret;
}
@@ -1102,9 +1144,10 @@ static int af9035_frontend_callback(void *adapter_priv, int component,
{
struct i2c_adapter *adap = adapter_priv;
struct dvb_usb_device *d = i2c_get_adapdata(adap);
+ struct usb_interface *intf = d->intf;
- dev_dbg(&d->udev->dev, "%s: component=%d cmd=%d arg=%d\n",
- __func__, component, cmd, arg);
+ dev_dbg(&intf->dev, "component=%d cmd=%d arg=%d\n",
+ component, cmd, arg);
switch (component) {
case DVB_FRONTEND_COMPONENT_TUNER:
@@ -1127,9 +1170,10 @@ static int af9035_frontend_attach(struct dvb_usb_adapter *adap)
{
struct state *state = adap_to_priv(adap);
struct dvb_usb_device *d = adap_to_d(adap);
+ struct usb_interface *intf = d->intf;
int ret;
- dev_dbg(&d->udev->dev, "%s: adap->id=%d\n", __func__, adap->id);
+ dev_dbg(&intf->dev, "adap->id=%d\n", adap->id);
if (!state->af9033_config[adap->id].tuner) {
/* unsupported tuner */
@@ -1156,7 +1200,7 @@ static int af9035_frontend_attach(struct dvb_usb_adapter *adap)
return 0;
err:
- dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&intf->dev, "failed=%d\n", ret);
return ret;
}
@@ -1165,11 +1209,12 @@ static int it930x_frontend_attach(struct dvb_usb_adapter *adap)
{
struct state *state = adap_to_priv(adap);
struct dvb_usb_device *d = adap_to_d(adap);
+ struct usb_interface *intf = d->intf;
int ret;
struct si2168_config si2168_config;
struct i2c_adapter *adapter;
- dev_dbg(&d->udev->dev, "adap->id=%d\n", adap->id);
+ dev_dbg(&intf->dev, "adap->id=%d\n", adap->id);
memset(&si2168_config, 0, sizeof(si2168_config));
si2168_config.i2c_adapter = &adapter;
@@ -1192,7 +1237,7 @@ static int it930x_frontend_attach(struct dvb_usb_adapter *adap)
return 0;
err:
- dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&intf->dev, "failed=%d\n", ret);
return ret;
}
@@ -1201,9 +1246,10 @@ static int af9035_frontend_detach(struct dvb_usb_adapter *adap)
{
struct state *state = adap_to_priv(adap);
struct dvb_usb_device *d = adap_to_d(adap);
+ struct usb_interface *intf = d->intf;
int demod2;
- dev_dbg(&d->udev->dev, "%s: adap->id=%d\n", __func__, adap->id);
+ dev_dbg(&intf->dev, "adap->id=%d\n", adap->id);
/*
* For dual tuner devices we have to resolve 2nd demod client, as there
@@ -1279,12 +1325,13 @@ static int af9035_tuner_attach(struct dvb_usb_adapter *adap)
{
struct state *state = adap_to_priv(adap);
struct dvb_usb_device *d = adap_to_d(adap);
+ struct usb_interface *intf = d->intf;
int ret;
struct dvb_frontend *fe;
struct i2c_msg msg[1];
u8 tuner_addr;
- dev_dbg(&d->udev->dev, "%s: adap->id=%d\n", __func__, adap->id);
+ dev_dbg(&intf->dev, "adap->id=%d\n", adap->id);
/*
* XXX: Hack used in that function: we abuse unused I2C address bit [7]
@@ -1522,7 +1569,7 @@ static int af9035_tuner_attach(struct dvb_usb_adapter *adap)
return 0;
err:
- dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&intf->dev, "failed=%d\n", ret);
return ret;
}
@@ -1531,10 +1578,11 @@ static int it930x_tuner_attach(struct dvb_usb_adapter *adap)
{
struct state *state = adap_to_priv(adap);
struct dvb_usb_device *d = adap_to_d(adap);
+ struct usb_interface *intf = d->intf;
int ret;
struct si2157_config si2157_config;
- dev_dbg(&d->udev->dev, "%s: adap->id=%d\n", __func__, adap->id);
+ dev_dbg(&intf->dev, "adap->id=%d\n", adap->id);
/* I2C master bus 2 clock speed 300k */
ret = af9035_wr_reg(d, 0x00f6a7, 0x07);
@@ -1590,7 +1638,7 @@ static int it930x_tuner_attach(struct dvb_usb_adapter *adap)
return 0;
err:
- dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&intf->dev, "failed=%d\n", ret);
return ret;
}
@@ -1600,8 +1648,9 @@ static int it930x_tuner_detach(struct dvb_usb_adapter *adap)
{
struct state *state = adap_to_priv(adap);
struct dvb_usb_device *d = adap_to_d(adap);
+ struct usb_interface *intf = d->intf;
- dev_dbg(&d->udev->dev, "adap->id=%d\n", adap->id);
+ dev_dbg(&intf->dev, "adap->id=%d\n", adap->id);
if (adap->id == 1) {
if (state->i2c_client[3])
@@ -1619,8 +1668,9 @@ static int af9035_tuner_detach(struct dvb_usb_adapter *adap)
{
struct state *state = adap_to_priv(adap);
struct dvb_usb_device *d = adap_to_d(adap);
+ struct usb_interface *intf = d->intf;
- dev_dbg(&d->udev->dev, "%s: adap->id=%d\n", __func__, adap->id);
+ dev_dbg(&intf->dev, "adap->id=%d\n", adap->id);
switch (state->af9033_config[adap->id].tuner) {
case AF9033_TUNER_TUA9001:
@@ -1646,6 +1696,7 @@ static int af9035_tuner_detach(struct dvb_usb_adapter *adap)
static int af9035_init(struct dvb_usb_device *d)
{
struct state *state = d_to_priv(d);
+ struct usb_interface *intf = d->intf;
int ret, i;
u16 frame_size = (d->udev->speed == USB_SPEED_FULL ? 5 : 87) * 188 / 4;
u8 packet_size = (d->udev->speed == USB_SPEED_FULL ? 64 : 512) / 4;
@@ -1670,9 +1721,8 @@ static int af9035_init(struct dvb_usb_device *d)
{ 0x80f9a4, 0x00, 0x01 },
};
- dev_dbg(&d->udev->dev,
- "%s: USB speed=%d frame_size=%04x packet_size=%02x\n",
- __func__, d->udev->speed, frame_size, packet_size);
+ dev_dbg(&intf->dev, "USB speed=%d frame_size=%04x packet_size=%02x\n",
+ d->udev->speed, frame_size, packet_size);
/* init endpoints */
for (i = 0; i < ARRAY_SIZE(tab); i++) {
@@ -1685,7 +1735,7 @@ static int af9035_init(struct dvb_usb_device *d)
return 0;
err:
- dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&intf->dev, "failed=%d\n", ret);
return ret;
}
@@ -1693,6 +1743,7 @@ err:
static int it930x_init(struct dvb_usb_device *d)
{
struct state *state = d_to_priv(d);
+ struct usb_interface *intf = d->intf;
int ret, i;
u16 frame_size = (d->udev->speed == USB_SPEED_FULL ? 5 : 816) * 188 / 4;
u8 packet_size = (d->udev->speed == USB_SPEED_FULL ? 64 : 512) / 4;
@@ -1752,9 +1803,8 @@ static int it930x_init(struct dvb_usb_device *d)
{ 0x00da5a, 0x1f, 0xff }, /* ts_fail_ignore */
};
- dev_dbg(&d->udev->dev,
- "%s: USB speed=%d frame_size=%04x packet_size=%02x\n",
- __func__, d->udev->speed, frame_size, packet_size);
+ dev_dbg(&intf->dev, "USB speed=%d frame_size=%04x packet_size=%02x\n",
+ d->udev->speed, frame_size, packet_size);
/* init endpoints */
for (i = 0; i < ARRAY_SIZE(tab); i++) {
@@ -1767,7 +1817,7 @@ static int it930x_init(struct dvb_usb_device *d)
return 0;
err:
- dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&intf->dev, "failed=%d\n", ret);
return ret;
}
@@ -1776,6 +1826,7 @@ err:
#if IS_ENABLED(CONFIG_RC_CORE)
static int af9035_rc_query(struct dvb_usb_device *d)
{
+ struct usb_interface *intf = d->intf;
int ret;
u32 key;
u8 buf[4];
@@ -1801,14 +1852,14 @@ static int af9035_rc_query(struct dvb_usb_device *d)
buf[2] << 8 | buf[3]);
}
- dev_dbg(&d->udev->dev, "%s: %*ph\n", __func__, 4, buf);
+ dev_dbg(&intf->dev, "%*ph\n", 4, buf);
rc_keydown(d->rc_dev, RC_TYPE_NEC, key, 0);
return 0;
err:
- dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&intf->dev, "failed=%d\n", ret);
return ret;
}
@@ -1816,6 +1867,7 @@ err:
static int af9035_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc)
{
struct state *state = d_to_priv(d);
+ struct usb_interface *intf = d->intf;
int ret;
u8 tmp;
@@ -1823,7 +1875,7 @@ static int af9035_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc)
if (ret < 0)
goto err;
- dev_dbg(&d->udev->dev, "%s: ir_mode=%02x\n", __func__, tmp);
+ dev_dbg(&intf->dev, "ir_mode=%02x\n", tmp);
/* don't activate rc if in HID mode or if not available */
if (tmp == 5) {
@@ -1832,7 +1884,7 @@ static int af9035_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc)
if (ret < 0)
goto err;
- dev_dbg(&d->udev->dev, "%s: ir_type=%02x\n", __func__, tmp);
+ dev_dbg(&intf->dev, "ir_type=%02x\n", tmp);
switch (tmp) {
case 0: /* NEC */
@@ -1855,7 +1907,7 @@ static int af9035_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc)
return 0;
err:
- dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&intf->dev, "failed=%d\n", ret);
return ret;
}
@@ -1867,8 +1919,9 @@ static int af9035_get_stream_config(struct dvb_frontend *fe, u8 *ts_type,
struct usb_data_stream_properties *stream)
{
struct dvb_usb_device *d = fe_to_d(fe);
+ struct usb_interface *intf = d->intf;
- dev_dbg(&d->udev->dev, "%s: adap=%d\n", __func__, fe_to_adap(fe)->id);
+ dev_dbg(&intf->dev, "adap=%d\n", fe_to_adap(fe)->id);
if (d->udev->speed == USB_SPEED_FULL)
stream->u.bulk.buffersize = 5 * 188;
@@ -1920,7 +1973,7 @@ static int af9035_probe(struct usb_interface *intf,
if ((le16_to_cpu(udev->descriptor.idVendor) == USB_VID_TERRATEC) &&
(le16_to_cpu(udev->descriptor.idProduct) == 0x0099)) {
if (!strcmp("Afatech", manufacturer)) {
- dev_dbg(&udev->dev, "%s: rejecting device\n", __func__);
+ dev_dbg(&udev->dev, "rejecting device\n");
return -ENODEV;
}
}
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.h b/drivers/media/usb/dvb-usb-v2/af9035.h
index dc5849c6b..d1e5c4a34 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.h
+++ b/drivers/media/usb/dvb-usb-v2/af9035.h
@@ -62,6 +62,7 @@ struct state {
u8 chip_version;
u16 chip_type;
u8 dual_mode:1;
+ u8 no_read:1;
u16 eeprom_addr;
u8 af9033_i2c_addr[2];
struct af9033_config af9033_config[2];
@@ -112,7 +113,7 @@ static const u32 clock_lut_it9135[] = {
* 0 TS
* 1 DCA + PIP
* 3 PIP
- * 5 DCA + PIP
+ * 5 DCA + PIP (AF9035 only)
* n DCA
*
* Values 0, 3 and 5 are seen to this day. 0 for single TS and 3/5 for dual TS.
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index eb7af8cb8..6643762a9 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -624,7 +624,7 @@ static int rtl28xxu_identify_state(struct dvb_usb_device *d, const char **name)
dev_dbg(&d->intf->dev, "chip_id=%u\n", dev->chip_id);
/* Retry failed I2C messages */
- d->i2c_adap.retries = 1;
+ d->i2c_adap.retries = 3;
d->i2c_adap.timeout = msecs_to_jiffies(10);
return WARM;
diff --git a/drivers/media/usb/dvb-usb/dtt200u.c b/drivers/media/usb/dvb-usb/dtt200u.c
index 68056ca9a..0bb294812 100644
--- a/drivers/media/usb/dvb-usb/dtt200u.c
+++ b/drivers/media/usb/dvb-usb/dtt200u.c
@@ -55,36 +55,36 @@ static int dtt200u_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid,
return dvb_usb_generic_write(adap->dev, b_pid, 4);
}
-/* remote control */
-/* key list for the tiny remote control (Yakumo, don't know about the others) */
-static struct rc_map_table rc_map_dtt200u_table[] = {
- { 0x8001, KEY_MUTE },
- { 0x8002, KEY_CHANNELDOWN },
- { 0x8003, KEY_VOLUMEDOWN },
- { 0x8004, KEY_1 },
- { 0x8005, KEY_2 },
- { 0x8006, KEY_3 },
- { 0x8007, KEY_4 },
- { 0x8008, KEY_5 },
- { 0x8009, KEY_6 },
- { 0x800a, KEY_7 },
- { 0x800c, KEY_ZOOM },
- { 0x800d, KEY_0 },
- { 0x800e, KEY_SELECT },
- { 0x8012, KEY_POWER },
- { 0x801a, KEY_CHANNELUP },
- { 0x801b, KEY_8 },
- { 0x801e, KEY_VOLUMEUP },
- { 0x801f, KEY_9 },
-};
-
-static int dtt200u_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
+static int dtt200u_rc_query(struct dvb_usb_device *d)
{
u8 key[5],cmd = GET_RC_CODE;
+ u32 scancode;
+
dvb_usb_generic_rw(d,&cmd,1,key,5,0);
- dvb_usb_nec_rc_key_to_event(d,key,event,state);
+ if (key[0] == 1) {
+ scancode = key[1];
+ if ((u8) ~key[1] != key[2]) {
+ /* Extended NEC */
+ scancode = scancode << 8;
+ scancode |= key[2];
+ }
+ scancode = scancode << 8;
+ scancode |= key[3];
+
+ /* Check command checksum is ok */
+ if ((u8) ~key[3] == key[4])
+ rc_keydown(d->rc_dev, RC_TYPE_NEC, scancode, 0);
+ else
+ rc_keyup(d->rc_dev);
+ } else if (key[0] == 2) {
+ rc_repeat(d->rc_dev);
+ } else {
+ rc_keyup(d->rc_dev);
+ }
+
if (key[0] != 0)
deb_info("key: %*ph\n", 5, key);
+
return 0;
}
@@ -164,11 +164,11 @@ static struct dvb_usb_device_properties dtt200u_properties = {
},
.power_ctrl = dtt200u_power_ctrl,
- .rc.legacy = {
+ .rc.core = {
.rc_interval = 300,
- .rc_map_table = rc_map_dtt200u_table,
- .rc_map_size = ARRAY_SIZE(rc_map_dtt200u_table),
+ .rc_codes = RC_MAP_DTT200U,
.rc_query = dtt200u_rc_query,
+ .allowed_protos = RC_BIT_NEC,
},
.generic_bulk_ctrl_endpoint = 0x01,
@@ -214,11 +214,11 @@ static struct dvb_usb_device_properties wt220u_properties = {
},
.power_ctrl = dtt200u_power_ctrl,
- .rc.legacy = {
+ .rc.core = {
.rc_interval = 300,
- .rc_map_table = rc_map_dtt200u_table,
- .rc_map_size = ARRAY_SIZE(rc_map_dtt200u_table),
+ .rc_codes = RC_MAP_DTT200U,
.rc_query = dtt200u_rc_query,
+ .allowed_protos = RC_BIT_NEC,
},
.generic_bulk_ctrl_endpoint = 0x01,
@@ -264,11 +264,11 @@ static struct dvb_usb_device_properties wt220u_fc_properties = {
},
.power_ctrl = dtt200u_power_ctrl,
- .rc.legacy = {
+ .rc.core = {
.rc_interval = 300,
- .rc_map_table = rc_map_dtt200u_table,
- .rc_map_size = ARRAY_SIZE(rc_map_dtt200u_table),
+ .rc_codes = RC_MAP_DTT200U,
.rc_query = dtt200u_rc_query,
+ .allowed_protos = RC_BIT_NEC,
},
.generic_bulk_ctrl_endpoint = 0x01,
@@ -314,11 +314,11 @@ static struct dvb_usb_device_properties wt220u_zl0353_properties = {
},
.power_ctrl = dtt200u_power_ctrl,
- .rc.legacy = {
+ .rc.core = {
.rc_interval = 300,
- .rc_map_table = rc_map_dtt200u_table,
- .rc_map_size = ARRAY_SIZE(rc_map_dtt200u_table),
+ .rc_codes = RC_MAP_DTT200U,
.rc_query = dtt200u_rc_query,
+ .allowed_protos = RC_BIT_NEC,
},
.generic_bulk_ctrl_endpoint = 0x01,
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-dvb.c b/drivers/media/usb/dvb-usb/dvb-usb-dvb.c
index 6477b04e9..a04c0a250 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-dvb.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-dvb.c
@@ -320,8 +320,6 @@ int dvb_usb_adapter_frontend_init(struct dvb_usb_adapter *adap)
adap->num_frontends_initialized++;
}
- if (ret)
- return ret;
ret = dvb_create_media_graph(&adap->dvb_adap, true);
if (ret)
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index ec5d4caf3..88d608d00 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -847,7 +847,7 @@ static int su3000_power_ctrl(struct dvb_usb_device *d, int i)
struct dw2102_state *state = (struct dw2102_state *)d->priv;
u8 obuf[] = {0xde, 0};
- info("%s: %d, initialized %d\n", __func__, i, state->initialized);
+ info("%s: %d, initialized %d", __func__, i, state->initialized);
if (i && !state->initialized) {
state->initialized = 1;
@@ -894,7 +894,7 @@ static int su3000_identify_state(struct usb_device *udev,
struct dvb_usb_device_description **desc,
int *cold)
{
- info("%s\n", __func__);
+ info("%s", __func__);
*cold = 0;
return 0;
@@ -1132,7 +1132,7 @@ static int dw2104_frontend_attach(struct dvb_usb_adapter *d)
tuner_ops->set_bandwidth = stb6100_set_bandw;
tuner_ops->get_bandwidth = stb6100_get_bandw;
d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage;
- info("Attached STV0900+STB6100!\n");
+ info("Attached STV0900+STB6100!");
return 0;
}
}
@@ -1146,7 +1146,7 @@ static int dw2104_frontend_attach(struct dvb_usb_adapter *d)
&dw2104_stv6110_config,
&d->dev->i2c_adap)) {
d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage;
- info("Attached STV0900+STV6110A!\n");
+ info("Attached STV0900+STV6110A!");
return 0;
}
}
@@ -1157,7 +1157,7 @@ static int dw2104_frontend_attach(struct dvb_usb_adapter *d)
&d->dev->i2c_adap);
if (d->fe_adap[0].fe != NULL) {
d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage;
- info("Attached cx24116!\n");
+ info("Attached cx24116!");
return 0;
}
}
@@ -1168,7 +1168,7 @@ static int dw2104_frontend_attach(struct dvb_usb_adapter *d)
dvb_attach(ts2020_attach, d->fe_adap[0].fe,
&dw2104_ts2020_config, &d->dev->i2c_adap);
d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage;
- info("Attached DS3000!\n");
+ info("Attached DS3000!");
return 0;
}
@@ -1187,7 +1187,7 @@ static int dw2102_frontend_attach(struct dvb_usb_adapter *d)
&d->dev->i2c_adap);
if (d->fe_adap[0].fe != NULL) {
d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage;
- info("Attached si21xx!\n");
+ info("Attached si21xx!");
return 0;
}
}
@@ -1199,7 +1199,7 @@ static int dw2102_frontend_attach(struct dvb_usb_adapter *d)
if (dvb_attach(stb6000_attach, d->fe_adap[0].fe, 0x61,
&d->dev->i2c_adap)) {
d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage;
- info("Attached stv0288!\n");
+ info("Attached stv0288!");
return 0;
}
}
@@ -1211,7 +1211,7 @@ static int dw2102_frontend_attach(struct dvb_usb_adapter *d)
&d->dev->i2c_adap);
if (d->fe_adap[0].fe != NULL) {
d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage;
- info("Attached stv0299!\n");
+ info("Attached stv0299!");
return 0;
}
}
@@ -1223,7 +1223,7 @@ static int dw3101_frontend_attach(struct dvb_usb_adapter *d)
d->fe_adap[0].fe = dvb_attach(tda10023_attach, &dw3101_tda10023_config,
&d->dev->i2c_adap, 0x48);
if (d->fe_adap[0].fe != NULL) {
- info("Attached tda10023!\n");
+ info("Attached tda10023!");
return 0;
}
return -EIO;
@@ -1237,7 +1237,7 @@ static int zl100313_frontend_attach(struct dvb_usb_adapter *d)
if (dvb_attach(zl10039_attach, d->fe_adap[0].fe, 0x60,
&d->dev->i2c_adap)) {
d->fe_adap[0].fe->ops.set_voltage = dw210x_set_voltage;
- info("Attached zl100313+zl10039!\n");
+ info("Attached zl100313+zl10039!");
return 0;
}
}
@@ -1262,7 +1262,7 @@ static int stv0288_frontend_attach(struct dvb_usb_adapter *d)
dw210x_op_rw(d->dev->udev, 0x8a, 0, 0, obuf, 2, DW210X_WRITE_MSG);
- info("Attached stv0288+stb6000!\n");
+ info("Attached stv0288+stb6000!");
return 0;
@@ -1287,7 +1287,7 @@ static int ds3000_frontend_attach(struct dvb_usb_adapter *d)
dw210x_op_rw(d->dev->udev, 0x8a, 0, 0, obuf, 2, DW210X_WRITE_MSG);
- info("Attached ds3000+ts2020!\n");
+ info("Attached ds3000+ts2020!");
return 0;
}
@@ -1305,7 +1305,7 @@ static int prof_7500_frontend_attach(struct dvb_usb_adapter *d)
dw210x_op_rw(d->dev->udev, 0x8a, 0, 0, obuf, 2, DW210X_WRITE_MSG);
- info("Attached STV0900+STB6100A!\n");
+ info("Attached STV0900+STB6100A!");
return 0;
}
@@ -1353,11 +1353,11 @@ static int su3000_frontend_attach(struct dvb_usb_adapter *d)
if (dvb_attach(ts2020_attach, d->fe_adap[0].fe,
&dw2104_ts2020_config,
&d->dev->i2c_adap)) {
- info("Attached DS3000/TS2020!\n");
+ info("Attached DS3000/TS2020!");
return 0;
}
- info("Failed to attach DS3000/TS2020!\n");
+ info("Failed to attach DS3000/TS2020!");
return -EIO;
}
@@ -1402,12 +1402,12 @@ static int t220_frontend_attach(struct dvb_usb_adapter *d)
if (d->fe_adap[0].fe != NULL) {
if (dvb_attach(tda18271_attach, d->fe_adap[0].fe, 0x60,
&d->dev->i2c_adap, &tda18271_config)) {
- info("Attached TDA18271HD/CXD2820R!\n");
+ info("Attached TDA18271HD/CXD2820R!");
return 0;
}
}
- info("Failed to attach TDA18271HD/CXD2820R!\n");
+ info("Failed to attach TDA18271HD/CXD2820R!");
return -EIO;
}
@@ -1428,11 +1428,11 @@ static int m88rs2000_frontend_attach(struct dvb_usb_adapter *d)
if (dvb_attach(ts2020_attach, d->fe_adap[0].fe,
&dw2104_ts2020_config,
&d->dev->i2c_adap)) {
- info("Attached RS2000/TS2020!\n");
+ info("Attached RS2000/TS2020!");
return 0;
}
- info("Failed to attach RS2000/TS2020!\n");
+ info("Failed to attach RS2000/TS2020!");
return -EIO;
}
@@ -1641,6 +1641,7 @@ enum dw2102_table_entry {
TEVII_S421,
TEVII_S632,
TERRATEC_CINERGY_S2_R2,
+ TERRATEC_CINERGY_S2_R3,
GOTVIEW_SAT_HD,
GENIATECH_T220,
TECHNOTREND_S2_4600,
@@ -1669,6 +1670,7 @@ static struct usb_device_id dw2102_table[] = {
[TEVII_S421] = {USB_DEVICE(0x9022, USB_PID_TEVII_S421)},
[TEVII_S632] = {USB_DEVICE(0x9022, USB_PID_TEVII_S632)},
[TERRATEC_CINERGY_S2_R2] = {USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_S2_R2)},
+ [TERRATEC_CINERGY_S2_R3] = {USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_S2_R3)},
[GOTVIEW_SAT_HD] = {USB_DEVICE(0x1FE1, USB_PID_GOTVIEW_SAT_HD)},
[GENIATECH_T220] = {USB_DEVICE(0x1f4d, 0xD220)},
[TECHNOTREND_S2_4600] = {USB_DEVICE(USB_VID_TECHNOTREND,
@@ -2083,7 +2085,7 @@ static struct dvb_usb_device_properties su3000_properties = {
}},
}
},
- .num_device_descs = 5,
+ .num_device_descs = 6,
.devices = {
{ "SU3000HD DVB-S USB2.0",
{ &dw2102_table[GENIATECH_SU3000], NULL },
@@ -2101,6 +2103,10 @@ static struct dvb_usb_device_properties su3000_properties = {
{ &dw2102_table[TERRATEC_CINERGY_S2_R2], NULL },
{ NULL },
},
+ { "Terratec Cinergy S2 USB HD Rev.3",
+ { &dw2102_table[TERRATEC_CINERGY_S2_R3], NULL },
+ { NULL },
+ },
{ "GOTVIEW Satellite HD",
{ &dw2102_table[GOTVIEW_SAT_HD], NULL },
{ NULL },
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index 7ec9ce2cd..4d1c7b132 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -904,17 +904,6 @@ static struct tda18271_config c3tech_duo_tda18271_config = {
.small_i2c = TDA18271_03_BYTE_CHUNK_INIT,
};
-static const struct m88ds3103_config pctv_461e_m88ds3103_config = {
- .i2c_addr = 0x68,
- .clock = 27000000,
- .i2c_wr_max = 33,
- .clock_out = 0,
- .ts_mode = M88DS3103_TS_PARALLEL,
- .ts_clk = 16000,
- .ts_clk_pol = 1,
- .agc = 0x99,
-};
-
static struct tda18271_std_map drx_j_std_map = {
.atsc_6 = { .if_freq = 5000, .agc_mode = 3, .std = 0, .if_lvl = 1,
.rfagc_top = 0x37, },
diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c
index a19b5c8b5..1a9e1e556 100644
--- a/drivers/media/usb/em28xx/em28xx-i2c.c
+++ b/drivers/media/usb/em28xx/em28xx-i2c.c
@@ -507,9 +507,8 @@ static int em28xx_i2c_xfer(struct i2c_adapter *i2c_adap,
if (dev->disconnected)
return -ENODEV;
- rc = rt_mutex_trylock(&dev->i2c_bus_lock);
- if (rc < 0)
- return rc;
+ if (!rt_mutex_trylock(&dev->i2c_bus_lock))
+ return -EAGAIN;
/* Switch I2C bus if needed */
if (bus != dev->cur_i2c_bus &&
diff --git a/drivers/media/usb/em28xx/em28xx-vbi.c b/drivers/media/usb/em28xx/em28xx-vbi.c
index fe94c9225..836c6b53b 100644
--- a/drivers/media/usb/em28xx/em28xx-vbi.c
+++ b/drivers/media/usb/em28xx/em28xx-vbi.c
@@ -33,7 +33,7 @@
static int vbi_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct em28xx *dev = vb2_get_drv_priv(vq);
struct em28xx_v4l2 *v4l2 = dev->v4l2;
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
index 44834b2ef..796869521 100644
--- a/drivers/media/usb/em28xx/em28xx-video.c
+++ b/drivers/media/usb/em28xx/em28xx-video.c
@@ -1013,7 +1013,7 @@ static void em28xx_v4l2_create_entities(struct em28xx *dev)
static int queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct em28xx *dev = vb2_get_drv_priv(vq);
struct em28xx_v4l2 *v4l2 = dev->v4l2;
diff --git a/drivers/media/usb/go7007/go7007-v4l2.c b/drivers/media/usb/go7007/go7007-v4l2.c
index ea01ee5df..af8458996 100644
--- a/drivers/media/usb/go7007/go7007-v4l2.c
+++ b/drivers/media/usb/go7007/go7007-v4l2.c
@@ -370,7 +370,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
static int go7007_queue_setup(struct vb2_queue *q,
unsigned int *num_buffers, unsigned int *num_planes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
sizes[0] = GO7007_BUF_SIZE;
*num_planes = 1;
diff --git a/drivers/media/usb/gspca/cpia1.c b/drivers/media/usb/gspca/cpia1.c
index f23df4a9d..52b88e9e6 100644
--- a/drivers/media/usb/gspca/cpia1.c
+++ b/drivers/media/usb/gspca/cpia1.c
@@ -1624,7 +1624,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
static void sd_stopN(struct gspca_dev *gspca_dev)
{
- struct sd *sd = (struct sd *) gspca_dev;
+ struct sd *sd __maybe_unused = (struct sd *) gspca_dev;
command_pause(gspca_dev);
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
index af5cd8213..b17bd7ebc 100644
--- a/drivers/media/usb/gspca/gspca.c
+++ b/drivers/media/usb/gspca/gspca.c
@@ -522,7 +522,7 @@ static int frame_alloc(struct gspca_dev *gspca_dev, struct file *file,
frame = &gspca_dev->frame[i];
frame->v4l2_buf.index = i;
frame->v4l2_buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- frame->v4l2_buf.flags = 0;
+ frame->v4l2_buf.flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
frame->v4l2_buf.field = V4L2_FIELD_NONE;
frame->v4l2_buf.length = frsz;
frame->v4l2_buf.memory = memory;
@@ -705,7 +705,7 @@ static int build_isoc_ep_tb(struct gspca_dev *gspca_dev,
psize = (psize & 0x07ff) * (1 + ((psize >> 11) & 3));
bandwidth = psize * 1000;
if (gspca_dev->dev->speed == USB_SPEED_HIGH
- || gspca_dev->dev->speed == USB_SPEED_SUPER)
+ || gspca_dev->dev->speed >= USB_SPEED_SUPER)
bandwidth *= 8;
bandwidth /= 1 << (ep->desc.bInterval - 1);
if (bandwidth <= last_bw)
@@ -996,6 +996,19 @@ static int wxh_to_mode(struct gspca_dev *gspca_dev,
{
int i;
+ for (i = 0; i < gspca_dev->cam.nmodes; i++) {
+ if (width == gspca_dev->cam.cam_mode[i].width
+ && height == gspca_dev->cam.cam_mode[i].height)
+ return i;
+ }
+ return -EINVAL;
+}
+
+static int wxh_to_nearest_mode(struct gspca_dev *gspca_dev,
+ int width, int height)
+{
+ int i;
+
for (i = gspca_dev->cam.nmodes; --i > 0; ) {
if (width >= gspca_dev->cam.cam_mode[i].width
&& height >= gspca_dev->cam.cam_mode[i].height)
@@ -1125,8 +1138,8 @@ static int try_fmt_vid_cap(struct gspca_dev *gspca_dev,
PDEBUG_MODE(gspca_dev, D_CONF, "try fmt cap",
fmt->fmt.pix.pixelformat, w, h);
- /* search the closest mode for width and height */
- mode = wxh_to_mode(gspca_dev, w, h);
+ /* search the nearest mode for width and height */
+ mode = wxh_to_nearest_mode(gspca_dev, w, h);
/* OK if right palette */
if (gspca_dev->cam.cam_mode[mode].pixelformat
@@ -1233,9 +1246,13 @@ static int vidioc_enum_frameintervals(struct file *filp, void *priv,
struct v4l2_frmivalenum *fival)
{
struct gspca_dev *gspca_dev = video_drvdata(filp);
- int mode = wxh_to_mode(gspca_dev, fival->width, fival->height);
+ int mode;
__u32 i;
+ mode = wxh_to_mode(gspca_dev, fival->width, fival->height);
+ if (mode < 0)
+ return -EINVAL;
+
if (gspca_dev->cam.mode_framerates == NULL ||
gspca_dev->cam.mode_framerates[mode].nrates == 0)
return -EINVAL;
@@ -1246,7 +1263,7 @@ static int vidioc_enum_frameintervals(struct file *filp, void *priv,
for (i = 0; i < gspca_dev->cam.mode_framerates[mode].nrates; i++) {
if (fival->index == i) {
- fival->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
fival->discrete.numerator = 1;
fival->discrete.denominator =
gspca_dev->cam.mode_framerates[mode].rates[i];
diff --git a/drivers/media/usb/gspca/konica.c b/drivers/media/usb/gspca/konica.c
index 39c96bb4c..0712b1bc9 100644
--- a/drivers/media/usb/gspca/konica.c
+++ b/drivers/media/usb/gspca/konica.c
@@ -243,7 +243,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
static void sd_stopN(struct gspca_dev *gspca_dev)
{
- struct sd *sd = (struct sd *) gspca_dev;
+ struct sd *sd __maybe_unused = (struct sd *) gspca_dev;
konica_stream_off(gspca_dev);
#if IS_ENABLED(CONFIG_INPUT)
diff --git a/drivers/media/usb/gspca/m5602/m5602_bridge.h b/drivers/media/usb/gspca/m5602/m5602_bridge.h
index 19eb1a64f..43ebc03d8 100644
--- a/drivers/media/usb/gspca/m5602/m5602_bridge.h
+++ b/drivers/media/usb/gspca/m5602/m5602_bridge.h
@@ -115,21 +115,6 @@
/*****************************************************************************/
-/* A skeleton used for sending messages to the m5602 bridge */
-static const unsigned char bridge_urb_skeleton[] = {
- 0x13, 0x00, 0x81, 0x00
-};
-
-/* A skeleton used for sending messages to the sensor */
-static const unsigned char sensor_urb_skeleton[] = {
- 0x23, M5602_XB_GPIO_EN_H, 0x81, 0x06,
- 0x23, M5602_XB_MISC_CTRL, 0x81, 0x80,
- 0x13, M5602_XB_I2C_DEV_ADDR, 0x81, 0x00,
- 0x13, M5602_XB_I2C_REG_ADDR, 0x81, 0x00,
- 0x13, M5602_XB_I2C_DATA, 0x81, 0x00,
- 0x13, M5602_XB_I2C_CTRL, 0x81, 0x11
-};
-
struct sd {
struct gspca_dev gspca_dev;
diff --git a/drivers/media/usb/gspca/m5602/m5602_core.c b/drivers/media/usb/gspca/m5602/m5602_core.c
index d926e62cb..e4a0658e3 100644
--- a/drivers/media/usb/gspca/m5602/m5602_core.c
+++ b/drivers/media/usb/gspca/m5602/m5602_core.c
@@ -37,6 +37,21 @@ static const struct usb_device_id m5602_table[] = {
MODULE_DEVICE_TABLE(usb, m5602_table);
+/* A skeleton used for sending messages to the sensor */
+static const unsigned char sensor_urb_skeleton[] = {
+ 0x23, M5602_XB_GPIO_EN_H, 0x81, 0x06,
+ 0x23, M5602_XB_MISC_CTRL, 0x81, 0x80,
+ 0x13, M5602_XB_I2C_DEV_ADDR, 0x81, 0x00,
+ 0x13, M5602_XB_I2C_REG_ADDR, 0x81, 0x00,
+ 0x13, M5602_XB_I2C_DATA, 0x81, 0x00,
+ 0x13, M5602_XB_I2C_CTRL, 0x81, 0x11
+};
+
+/* A skeleton used for sending messages to the m5602 bridge */
+static const unsigned char bridge_urb_skeleton[] = {
+ 0x13, 0x00, 0x81, 0x00
+};
+
/* Reads a byte from the m5602 */
int m5602_read_bridge(struct sd *sd, const u8 address, u8 *i2c_data)
{
diff --git a/drivers/media/usb/gspca/m5602/m5602_mt9m111.c b/drivers/media/usb/gspca/m5602/m5602_mt9m111.c
index 27fcef11a..7d01ddd7e 100644
--- a/drivers/media/usb/gspca/m5602/m5602_mt9m111.c
+++ b/drivers/media/usb/gspca/m5602/m5602_mt9m111.c
@@ -23,6 +23,150 @@
static int mt9m111_s_ctrl(struct v4l2_ctrl *ctrl);
static void mt9m111_dump_registers(struct sd *sd);
+static const unsigned char preinit_mt9m111[][4] = {
+ {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02, 0x00},
+ {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
+ {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0d, 0x00},
+ {BRIDGE, M5602_XB_SENSOR_CTRL, 0x00, 0x00},
+ {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
+ {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
+
+ {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
+ {SENSOR, MT9M111_SC_RESET,
+ MT9M111_RESET |
+ MT9M111_RESTART |
+ MT9M111_ANALOG_STANDBY |
+ MT9M111_CHIP_DISABLE,
+ MT9M111_SHOW_BAD_FRAMES |
+ MT9M111_RESTART_BAD_FRAMES |
+ MT9M111_SYNCHRONIZE_CHANGES},
+
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x05, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DAT, 0x04, 0x00},
+ {BRIDGE, M5602_XB_GPIO_EN_H, 0x3e, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DIR_H, 0x3e, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DAT_H, 0x02, 0x00},
+ {BRIDGE, M5602_XB_GPIO_EN_L, 0xff, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DIR_L, 0xff, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DAT_L, 0x00, 0x00},
+
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x07, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DAT, 0x0b, 0x00},
+ {BRIDGE, M5602_XB_GPIO_EN_H, 0x06, 0x00},
+ {BRIDGE, M5602_XB_GPIO_EN_L, 0x00, 0x00},
+
+ {BRIDGE, M5602_XB_I2C_CLK_DIV, 0x0a, 0x00}
+};
+
+static const unsigned char init_mt9m111[][4] = {
+ {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02, 0x00},
+ {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
+ {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
+ {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
+
+ {BRIDGE, M5602_XB_GPIO_EN_H, 0x06, 0x00},
+ {BRIDGE, M5602_XB_GPIO_EN_L, 0x00, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DAT, 0x04, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DIR_H, 0x3e, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DIR_L, 0xff, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DAT_H, 0x02, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DAT_L, 0x00, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x07, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DAT, 0x0b, 0x00},
+ {BRIDGE, M5602_XB_I2C_CLK_DIV, 0x0a, 0x00},
+
+ {SENSOR, MT9M111_SC_RESET, 0x00, 0x29},
+ {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
+ {SENSOR, MT9M111_SC_RESET, 0x00, 0x08},
+ {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x01},
+ {SENSOR, MT9M111_CP_OPERATING_MODE_CTL, 0x00,
+ MT9M111_CP_OPERATING_MODE_CTL},
+ {SENSOR, MT9M111_CP_LENS_CORRECTION_1, 0x04, 0x2a},
+ {SENSOR, MT9M111_CP_DEFECT_CORR_CONTEXT_A, 0x00,
+ MT9M111_2D_DEFECT_CORRECTION_ENABLE},
+ {SENSOR, MT9M111_CP_DEFECT_CORR_CONTEXT_B, 0x00,
+ MT9M111_2D_DEFECT_CORRECTION_ENABLE},
+ {SENSOR, MT9M111_CP_LUMA_OFFSET, 0x00, 0x00},
+ {SENSOR, MT9M111_CP_LUMA_CLIP, 0xff, 0x00},
+ {SENSOR, MT9M111_CP_OUTPUT_FORMAT_CTL2_CONTEXT_A, 0x14, 0x00},
+ {SENSOR, MT9M111_CP_OUTPUT_FORMAT_CTL2_CONTEXT_B, 0x14, 0x00},
+ {SENSOR, 0xcd, 0x00, 0x0e},
+ {SENSOR, 0xd0, 0x00, 0x40},
+
+ {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x02},
+ {SENSOR, MT9M111_CC_AUTO_EXPOSURE_PARAMETER_18, 0x00, 0x00},
+ {SENSOR, MT9M111_CC_AWB_PARAMETER_7, 0xef, 0x03},
+
+ {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
+ {SENSOR, 0x33, 0x03, 0x49},
+ {SENSOR, 0x34, 0xc0, 0x19},
+ {SENSOR, 0x3f, 0x20, 0x20},
+ {SENSOR, 0x40, 0x20, 0x20},
+ {SENSOR, 0x5a, 0xc0, 0x0a},
+ {SENSOR, 0x70, 0x7b, 0x0a},
+ {SENSOR, 0x71, 0xff, 0x00},
+ {SENSOR, 0x72, 0x19, 0x0e},
+ {SENSOR, 0x73, 0x18, 0x0f},
+ {SENSOR, 0x74, 0x57, 0x32},
+ {SENSOR, 0x75, 0x56, 0x34},
+ {SENSOR, 0x76, 0x73, 0x35},
+ {SENSOR, 0x77, 0x30, 0x12},
+ {SENSOR, 0x78, 0x79, 0x02},
+ {SENSOR, 0x79, 0x75, 0x06},
+ {SENSOR, 0x7a, 0x77, 0x0a},
+ {SENSOR, 0x7b, 0x78, 0x09},
+ {SENSOR, 0x7c, 0x7d, 0x06},
+ {SENSOR, 0x7d, 0x31, 0x10},
+ {SENSOR, 0x7e, 0x00, 0x7e},
+ {SENSOR, 0x80, 0x59, 0x04},
+ {SENSOR, 0x81, 0x59, 0x04},
+ {SENSOR, 0x82, 0x57, 0x0a},
+ {SENSOR, 0x83, 0x58, 0x0b},
+ {SENSOR, 0x84, 0x47, 0x0c},
+ {SENSOR, 0x85, 0x48, 0x0e},
+ {SENSOR, 0x86, 0x5b, 0x02},
+ {SENSOR, 0x87, 0x00, 0x5c},
+ {SENSOR, MT9M111_CONTEXT_CONTROL, 0x00, MT9M111_SEL_CONTEXT_B},
+ {SENSOR, 0x60, 0x00, 0x80},
+ {SENSOR, 0x61, 0x00, 0x00},
+ {SENSOR, 0x62, 0x00, 0x00},
+ {SENSOR, 0x63, 0x00, 0x00},
+ {SENSOR, 0x64, 0x00, 0x00},
+
+ {SENSOR, MT9M111_SC_ROWSTART, 0x00, 0x0d}, /* 13 */
+ {SENSOR, MT9M111_SC_COLSTART, 0x00, 0x12}, /* 18 */
+ {SENSOR, MT9M111_SC_WINDOW_HEIGHT, 0x04, 0x00}, /* 1024 */
+ {SENSOR, MT9M111_SC_WINDOW_WIDTH, 0x05, 0x10}, /* 1296 */
+ {SENSOR, MT9M111_SC_HBLANK_CONTEXT_B, 0x01, 0x60}, /* 352 */
+ {SENSOR, MT9M111_SC_VBLANK_CONTEXT_B, 0x00, 0x11}, /* 17 */
+ {SENSOR, MT9M111_SC_HBLANK_CONTEXT_A, 0x01, 0x60}, /* 352 */
+ {SENSOR, MT9M111_SC_VBLANK_CONTEXT_A, 0x00, 0x11}, /* 17 */
+ {SENSOR, MT9M111_SC_R_MODE_CONTEXT_A, 0x01, 0x0f}, /* 271 */
+ {SENSOR, 0x30, 0x04, 0x00},
+ /* Set number of blank rows chosen to 400 */
+ {SENSOR, MT9M111_SC_SHUTTER_WIDTH, 0x01, 0x90},
+};
+
+static const unsigned char start_mt9m111[][4] = {
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
+ {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
+ {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
+ {BRIDGE, M5602_XB_LINE_OF_FRAME_H, 0x81, 0x00},
+ {BRIDGE, M5602_XB_PIX_OF_LINE_H, 0x82, 0x00},
+ {BRIDGE, M5602_XB_SIG_INI, 0x01, 0x00},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
+};
+
static struct v4l2_pix_format mt9m111_modes[] = {
{
640,
diff --git a/drivers/media/usb/gspca/m5602/m5602_mt9m111.h b/drivers/media/usb/gspca/m5602/m5602_mt9m111.h
index 07448d35e..781a16311 100644
--- a/drivers/media/usb/gspca/m5602/m5602_mt9m111.h
+++ b/drivers/media/usb/gspca/m5602/m5602_mt9m111.h
@@ -126,148 +126,4 @@ static const struct m5602_sensor mt9m111 = {
.disconnect = mt9m111_disconnect,
.start = mt9m111_start,
};
-
-static const unsigned char preinit_mt9m111[][4] = {
- {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02, 0x00},
- {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0d, 0x00},
- {BRIDGE, M5602_XB_SENSOR_CTRL, 0x00, 0x00},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
-
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
- {SENSOR, MT9M111_SC_RESET,
- MT9M111_RESET |
- MT9M111_RESTART |
- MT9M111_ANALOG_STANDBY |
- MT9M111_CHIP_DISABLE,
- MT9M111_SHOW_BAD_FRAMES |
- MT9M111_RESTART_BAD_FRAMES |
- MT9M111_SYNCHRONIZE_CHANGES},
-
- {BRIDGE, M5602_XB_GPIO_DIR, 0x05, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x04, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_H, 0x3e, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR_H, 0x3e, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT_H, 0x02, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_L, 0xff, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR_L, 0xff, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT_L, 0x00, 0x00},
-
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR, 0x07, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x0b, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_H, 0x06, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_L, 0x00, 0x00},
-
- {BRIDGE, M5602_XB_I2C_CLK_DIV, 0x0a, 0x00}
-};
-
-static const unsigned char init_mt9m111[][4] = {
- {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02, 0x00},
- {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
-
- {BRIDGE, M5602_XB_GPIO_EN_H, 0x06, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_L, 0x00, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x04, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR_H, 0x3e, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR_L, 0xff, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT_H, 0x02, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT_L, 0x00, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR, 0x07, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x0b, 0x00},
- {BRIDGE, M5602_XB_I2C_CLK_DIV, 0x0a, 0x00},
-
- {SENSOR, MT9M111_SC_RESET, 0x00, 0x29},
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
- {SENSOR, MT9M111_SC_RESET, 0x00, 0x08},
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x01},
- {SENSOR, MT9M111_CP_OPERATING_MODE_CTL, 0x00,
- MT9M111_CP_OPERATING_MODE_CTL},
- {SENSOR, MT9M111_CP_LENS_CORRECTION_1, 0x04, 0x2a},
- {SENSOR, MT9M111_CP_DEFECT_CORR_CONTEXT_A, 0x00,
- MT9M111_2D_DEFECT_CORRECTION_ENABLE},
- {SENSOR, MT9M111_CP_DEFECT_CORR_CONTEXT_B, 0x00,
- MT9M111_2D_DEFECT_CORRECTION_ENABLE},
- {SENSOR, MT9M111_CP_LUMA_OFFSET, 0x00, 0x00},
- {SENSOR, MT9M111_CP_LUMA_CLIP, 0xff, 0x00},
- {SENSOR, MT9M111_CP_OUTPUT_FORMAT_CTL2_CONTEXT_A, 0x14, 0x00},
- {SENSOR, MT9M111_CP_OUTPUT_FORMAT_CTL2_CONTEXT_B, 0x14, 0x00},
- {SENSOR, 0xcd, 0x00, 0x0e},
- {SENSOR, 0xd0, 0x00, 0x40},
-
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x02},
- {SENSOR, MT9M111_CC_AUTO_EXPOSURE_PARAMETER_18, 0x00, 0x00},
- {SENSOR, MT9M111_CC_AWB_PARAMETER_7, 0xef, 0x03},
-
- {SENSOR, MT9M111_PAGE_MAP, 0x00, 0x00},
- {SENSOR, 0x33, 0x03, 0x49},
- {SENSOR, 0x34, 0xc0, 0x19},
- {SENSOR, 0x3f, 0x20, 0x20},
- {SENSOR, 0x40, 0x20, 0x20},
- {SENSOR, 0x5a, 0xc0, 0x0a},
- {SENSOR, 0x70, 0x7b, 0x0a},
- {SENSOR, 0x71, 0xff, 0x00},
- {SENSOR, 0x72, 0x19, 0x0e},
- {SENSOR, 0x73, 0x18, 0x0f},
- {SENSOR, 0x74, 0x57, 0x32},
- {SENSOR, 0x75, 0x56, 0x34},
- {SENSOR, 0x76, 0x73, 0x35},
- {SENSOR, 0x77, 0x30, 0x12},
- {SENSOR, 0x78, 0x79, 0x02},
- {SENSOR, 0x79, 0x75, 0x06},
- {SENSOR, 0x7a, 0x77, 0x0a},
- {SENSOR, 0x7b, 0x78, 0x09},
- {SENSOR, 0x7c, 0x7d, 0x06},
- {SENSOR, 0x7d, 0x31, 0x10},
- {SENSOR, 0x7e, 0x00, 0x7e},
- {SENSOR, 0x80, 0x59, 0x04},
- {SENSOR, 0x81, 0x59, 0x04},
- {SENSOR, 0x82, 0x57, 0x0a},
- {SENSOR, 0x83, 0x58, 0x0b},
- {SENSOR, 0x84, 0x47, 0x0c},
- {SENSOR, 0x85, 0x48, 0x0e},
- {SENSOR, 0x86, 0x5b, 0x02},
- {SENSOR, 0x87, 0x00, 0x5c},
- {SENSOR, MT9M111_CONTEXT_CONTROL, 0x00, MT9M111_SEL_CONTEXT_B},
- {SENSOR, 0x60, 0x00, 0x80},
- {SENSOR, 0x61, 0x00, 0x00},
- {SENSOR, 0x62, 0x00, 0x00},
- {SENSOR, 0x63, 0x00, 0x00},
- {SENSOR, 0x64, 0x00, 0x00},
-
- {SENSOR, MT9M111_SC_ROWSTART, 0x00, 0x0d}, /* 13 */
- {SENSOR, MT9M111_SC_COLSTART, 0x00, 0x12}, /* 18 */
- {SENSOR, MT9M111_SC_WINDOW_HEIGHT, 0x04, 0x00}, /* 1024 */
- {SENSOR, MT9M111_SC_WINDOW_WIDTH, 0x05, 0x10}, /* 1296 */
- {SENSOR, MT9M111_SC_HBLANK_CONTEXT_B, 0x01, 0x60}, /* 352 */
- {SENSOR, MT9M111_SC_VBLANK_CONTEXT_B, 0x00, 0x11}, /* 17 */
- {SENSOR, MT9M111_SC_HBLANK_CONTEXT_A, 0x01, 0x60}, /* 352 */
- {SENSOR, MT9M111_SC_VBLANK_CONTEXT_A, 0x00, 0x11}, /* 17 */
- {SENSOR, MT9M111_SC_R_MODE_CONTEXT_A, 0x01, 0x0f}, /* 271 */
- {SENSOR, 0x30, 0x04, 0x00},
- /* Set number of blank rows chosen to 400 */
- {SENSOR, MT9M111_SC_SHUTTER_WIDTH, 0x01, 0x90},
-};
-
-static const unsigned char start_mt9m111[][4] = {
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
- {BRIDGE, M5602_XB_LINE_OF_FRAME_H, 0x81, 0x00},
- {BRIDGE, M5602_XB_PIX_OF_LINE_H, 0x82, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x01, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
-};
#endif
diff --git a/drivers/media/usb/gspca/m5602/m5602_ov7660.c b/drivers/media/usb/gspca/m5602/m5602_ov7660.c
index 64b3b03a9..672b7a520 100644
--- a/drivers/media/usb/gspca/m5602/m5602_ov7660.c
+++ b/drivers/media/usb/gspca/m5602/m5602_ov7660.c
@@ -23,6 +23,159 @@
static int ov7660_s_ctrl(struct v4l2_ctrl *ctrl);
static void ov7660_dump_registers(struct sd *sd);
+static const unsigned char preinit_ov7660[][4] = {
+ {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02},
+ {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
+ {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
+ {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0d},
+ {BRIDGE, M5602_XB_SENSOR_CTRL, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x03},
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x03},
+ {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
+ {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
+
+ {SENSOR, OV7660_OFON, 0x0c},
+ {SENSOR, OV7660_COM2, 0x11},
+ {SENSOR, OV7660_COM7, 0x05},
+
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x01},
+ {BRIDGE, M5602_XB_GPIO_DAT, 0x04},
+ {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
+ {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06},
+ {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x08},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
+ {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
+ {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x05},
+ {BRIDGE, M5602_XB_GPIO_DAT, 0x00},
+ {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
+ {BRIDGE, M5602_XB_GPIO_EN_L, 0x00}
+};
+
+static const unsigned char init_ov7660[][4] = {
+ {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02},
+ {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
+ {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
+ {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0d},
+ {BRIDGE, M5602_XB_SENSOR_CTRL, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x01},
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x01},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
+ {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
+ {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x05},
+ {BRIDGE, M5602_XB_GPIO_DAT, 0x00},
+ {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
+ {BRIDGE, M5602_XB_GPIO_EN_L, 0x00},
+ {SENSOR, OV7660_COM7, 0x80},
+ {SENSOR, OV7660_CLKRC, 0x80},
+ {SENSOR, OV7660_COM9, 0x4c},
+ {SENSOR, OV7660_OFON, 0x43},
+ {SENSOR, OV7660_COM12, 0x28},
+ {SENSOR, OV7660_COM8, 0x00},
+ {SENSOR, OV7660_COM10, 0x40},
+ {SENSOR, OV7660_HSTART, 0x0c},
+ {SENSOR, OV7660_HSTOP, 0x61},
+ {SENSOR, OV7660_HREF, 0xa4},
+ {SENSOR, OV7660_PSHFT, 0x0b},
+ {SENSOR, OV7660_VSTART, 0x01},
+ {SENSOR, OV7660_VSTOP, 0x7a},
+ {SENSOR, OV7660_VSTOP, 0x00},
+ {SENSOR, OV7660_COM7, 0x05},
+ {SENSOR, OV7660_COM6, 0x42},
+ {SENSOR, OV7660_BBIAS, 0x94},
+ {SENSOR, OV7660_GbBIAS, 0x94},
+ {SENSOR, OV7660_RSVD29, 0x94},
+ {SENSOR, OV7660_RBIAS, 0x94},
+ {SENSOR, OV7660_COM1, 0x00},
+ {SENSOR, OV7660_AECH, 0x00},
+ {SENSOR, OV7660_AECHH, 0x00},
+ {SENSOR, OV7660_ADC, 0x05},
+ {SENSOR, OV7660_COM13, 0x00},
+ {SENSOR, OV7660_RSVDA1, 0x23},
+ {SENSOR, OV7660_TSLB, 0x0d},
+ {SENSOR, OV7660_HV, 0x80},
+ {SENSOR, OV7660_LCC1, 0x00},
+ {SENSOR, OV7660_LCC2, 0x00},
+ {SENSOR, OV7660_LCC3, 0x10},
+ {SENSOR, OV7660_LCC4, 0x40},
+ {SENSOR, OV7660_LCC5, 0x01},
+
+ {SENSOR, OV7660_AECH, 0x20},
+ {SENSOR, OV7660_COM1, 0x00},
+ {SENSOR, OV7660_OFON, 0x0c},
+ {SENSOR, OV7660_COM2, 0x11},
+ {SENSOR, OV7660_COM7, 0x05},
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x01},
+ {BRIDGE, M5602_XB_GPIO_DAT, 0x04},
+ {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
+ {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06},
+ {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x08},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
+ {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
+ {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x05},
+ {BRIDGE, M5602_XB_GPIO_DAT, 0x00},
+ {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
+ {BRIDGE, M5602_XB_GPIO_EN_L, 0x00},
+ {SENSOR, OV7660_AECH, 0x5f},
+ {SENSOR, OV7660_COM1, 0x03},
+ {SENSOR, OV7660_OFON, 0x0c},
+ {SENSOR, OV7660_COM2, 0x11},
+ {SENSOR, OV7660_COM7, 0x05},
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x01},
+ {BRIDGE, M5602_XB_GPIO_DAT, 0x04},
+ {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
+ {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06},
+ {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x08},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
+ {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
+ {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x05},
+ {BRIDGE, M5602_XB_GPIO_DAT, 0x00},
+ {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
+ {BRIDGE, M5602_XB_GPIO_EN_L, 0x00},
+
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
+ {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
+ {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
+ {BRIDGE, M5602_XB_LINE_OF_FRAME_H, 0x81},
+ {BRIDGE, M5602_XB_PIX_OF_LINE_H, 0x82},
+ {BRIDGE, M5602_XB_SIG_INI, 0x01},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x08},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x01},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0xec},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
+ {BRIDGE, M5602_XB_SIG_INI, 0x00},
+ {BRIDGE, M5602_XB_SIG_INI, 0x02},
+ {BRIDGE, M5602_XB_HSYNC_PARA, 0x00},
+ {BRIDGE, M5602_XB_HSYNC_PARA, 0x27},
+ {BRIDGE, M5602_XB_HSYNC_PARA, 0x02},
+ {BRIDGE, M5602_XB_HSYNC_PARA, 0xa7},
+ {BRIDGE, M5602_XB_SIG_INI, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
+};
+
static struct v4l2_pix_format ov7660_modes[] = {
{
640,
diff --git a/drivers/media/usb/gspca/m5602/m5602_ov7660.h b/drivers/media/usb/gspca/m5602/m5602_ov7660.h
index 6fece1ce1..72445d5df 100644
--- a/drivers/media/usb/gspca/m5602/m5602_ov7660.h
+++ b/drivers/media/usb/gspca/m5602/m5602_ov7660.h
@@ -107,157 +107,4 @@ static const struct m5602_sensor ov7660 = {
.stop = ov7660_stop,
.disconnect = ov7660_disconnect,
};
-
-static const unsigned char preinit_ov7660[][4] = {
- {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02},
- {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0d},
- {BRIDGE, M5602_XB_SENSOR_CTRL, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR, 0x03},
- {BRIDGE, M5602_XB_GPIO_DIR, 0x03},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
-
- {SENSOR, OV7660_OFON, 0x0c},
- {SENSOR, OV7660_COM2, 0x11},
- {SENSOR, OV7660_COM7, 0x05},
-
- {BRIDGE, M5602_XB_GPIO_DIR, 0x01},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x04},
- {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
- {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06},
- {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x08},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
- {BRIDGE, M5602_XB_GPIO_DIR, 0x05},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
- {BRIDGE, M5602_XB_GPIO_EN_L, 0x00}
-};
-
-static const unsigned char init_ov7660[][4] = {
- {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02},
- {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0d},
- {BRIDGE, M5602_XB_SENSOR_CTRL, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR, 0x01},
- {BRIDGE, M5602_XB_GPIO_DIR, 0x01},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
- {BRIDGE, M5602_XB_GPIO_DIR, 0x05},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
- {BRIDGE, M5602_XB_GPIO_EN_L, 0x00},
- {SENSOR, OV7660_COM7, 0x80},
- {SENSOR, OV7660_CLKRC, 0x80},
- {SENSOR, OV7660_COM9, 0x4c},
- {SENSOR, OV7660_OFON, 0x43},
- {SENSOR, OV7660_COM12, 0x28},
- {SENSOR, OV7660_COM8, 0x00},
- {SENSOR, OV7660_COM10, 0x40},
- {SENSOR, OV7660_HSTART, 0x0c},
- {SENSOR, OV7660_HSTOP, 0x61},
- {SENSOR, OV7660_HREF, 0xa4},
- {SENSOR, OV7660_PSHFT, 0x0b},
- {SENSOR, OV7660_VSTART, 0x01},
- {SENSOR, OV7660_VSTOP, 0x7a},
- {SENSOR, OV7660_VSTOP, 0x00},
- {SENSOR, OV7660_COM7, 0x05},
- {SENSOR, OV7660_COM6, 0x42},
- {SENSOR, OV7660_BBIAS, 0x94},
- {SENSOR, OV7660_GbBIAS, 0x94},
- {SENSOR, OV7660_RSVD29, 0x94},
- {SENSOR, OV7660_RBIAS, 0x94},
- {SENSOR, OV7660_COM1, 0x00},
- {SENSOR, OV7660_AECH, 0x00},
- {SENSOR, OV7660_AECHH, 0x00},
- {SENSOR, OV7660_ADC, 0x05},
- {SENSOR, OV7660_COM13, 0x00},
- {SENSOR, OV7660_RSVDA1, 0x23},
- {SENSOR, OV7660_TSLB, 0x0d},
- {SENSOR, OV7660_HV, 0x80},
- {SENSOR, OV7660_LCC1, 0x00},
- {SENSOR, OV7660_LCC2, 0x00},
- {SENSOR, OV7660_LCC3, 0x10},
- {SENSOR, OV7660_LCC4, 0x40},
- {SENSOR, OV7660_LCC5, 0x01},
-
- {SENSOR, OV7660_AECH, 0x20},
- {SENSOR, OV7660_COM1, 0x00},
- {SENSOR, OV7660_OFON, 0x0c},
- {SENSOR, OV7660_COM2, 0x11},
- {SENSOR, OV7660_COM7, 0x05},
- {BRIDGE, M5602_XB_GPIO_DIR, 0x01},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x04},
- {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
- {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06},
- {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x08},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
- {BRIDGE, M5602_XB_GPIO_DIR, 0x05},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
- {BRIDGE, M5602_XB_GPIO_EN_L, 0x00},
- {SENSOR, OV7660_AECH, 0x5f},
- {SENSOR, OV7660_COM1, 0x03},
- {SENSOR, OV7660_OFON, 0x0c},
- {SENSOR, OV7660_COM2, 0x11},
- {SENSOR, OV7660_COM7, 0x05},
- {BRIDGE, M5602_XB_GPIO_DIR, 0x01},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x04},
- {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
- {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06},
- {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x08},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
- {BRIDGE, M5602_XB_GPIO_DIR, 0x05},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
- {BRIDGE, M5602_XB_GPIO_EN_L, 0x00},
-
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
- {BRIDGE, M5602_XB_LINE_OF_FRAME_H, 0x81},
- {BRIDGE, M5602_XB_PIX_OF_LINE_H, 0x82},
- {BRIDGE, M5602_XB_SIG_INI, 0x01},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x08},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x01},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0xec},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x02},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x27},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x02},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0xa7},
- {BRIDGE, M5602_XB_SIG_INI, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
-};
#endif
diff --git a/drivers/media/usb/gspca/m5602/m5602_ov9650.c b/drivers/media/usb/gspca/m5602/m5602_ov9650.c
index 59bc62bfa..4544d3a1a 100644
--- a/drivers/media/usb/gspca/m5602/m5602_ov9650.c
+++ b/drivers/media/usb/gspca/m5602/m5602_ov9650.c
@@ -1,3 +1,4 @@
+
/*
* Driver for the ov9650 sensor
*
@@ -23,6 +24,157 @@
static int ov9650_s_ctrl(struct v4l2_ctrl *ctrl);
static void ov9650_dump_registers(struct sd *sd);
+static const unsigned char preinit_ov9650[][3] = {
+ /* [INITCAM] */
+ {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02},
+ {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
+ {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
+ {BRIDGE, M5602_XB_SENSOR_CTRL, 0x00},
+
+ {BRIDGE, M5602_XB_SENSOR_TYPE, 0x08},
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x05},
+ {BRIDGE, M5602_XB_GPIO_DAT, 0x04},
+ {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
+ {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06},
+ {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DAT, 0x00},
+ {BRIDGE, M5602_XB_I2C_CLK_DIV, 0x0a},
+ /* Reset chip */
+ {SENSOR, OV9650_COM7, OV9650_REGISTER_RESET},
+ /* Enable double clock */
+ {SENSOR, OV9650_CLKRC, 0x80},
+ /* Do something out of spec with the power */
+ {SENSOR, OV9650_OFON, 0x40}
+};
+
+static const unsigned char init_ov9650[][3] = {
+ /* [INITCAM] */
+ {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02},
+ {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
+ {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
+ {BRIDGE, M5602_XB_SENSOR_CTRL, 0x00},
+
+ {BRIDGE, M5602_XB_SENSOR_TYPE, 0x08},
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x05},
+ {BRIDGE, M5602_XB_GPIO_DAT, 0x04},
+ {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
+ {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06},
+ {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DAT, 0x00},
+ {BRIDGE, M5602_XB_I2C_CLK_DIV, 0x0a},
+
+ /* Reset chip */
+ {SENSOR, OV9650_COM7, OV9650_REGISTER_RESET},
+ /* One extra reset is needed in order to make the sensor behave
+ properly when resuming from ram, could be a timing issue */
+ {SENSOR, OV9650_COM7, OV9650_REGISTER_RESET},
+
+ /* Enable double clock */
+ {SENSOR, OV9650_CLKRC, 0x80},
+ /* Do something out of spec with the power */
+ {SENSOR, OV9650_OFON, 0x40},
+
+ /* Set fast AGC/AEC algorithm with unlimited step size */
+ {SENSOR, OV9650_COM8, OV9650_FAST_AGC_AEC |
+ OV9650_AEC_UNLIM_STEP_SIZE},
+
+ {SENSOR, OV9650_CHLF, 0x10},
+ {SENSOR, OV9650_ARBLM, 0xbf},
+ {SENSOR, OV9650_ACOM38, 0x81},
+ /* Turn off color matrix coefficient double option */
+ {SENSOR, OV9650_COM16, 0x00},
+ /* Enable color matrix for RGB/YUV, Delay Y channel,
+ set output Y/UV delay to 1 */
+ {SENSOR, OV9650_COM13, 0x19},
+ /* Enable digital BLC, Set output mode to U Y V Y */
+ {SENSOR, OV9650_TSLB, 0x0c},
+ /* Limit the AGC/AEC stable upper region */
+ {SENSOR, OV9650_COM24, 0x00},
+ /* Enable HREF and some out of spec things */
+ {SENSOR, OV9650_COM12, 0x73},
+ /* Set all DBLC offset signs to positive and
+ do some out of spec stuff */
+ {SENSOR, OV9650_DBLC1, 0xdf},
+ {SENSOR, OV9650_COM21, 0x06},
+ {SENSOR, OV9650_RSVD35, 0x91},
+ /* Necessary, no camera stream without it */
+ {SENSOR, OV9650_RSVD16, 0x06},
+ {SENSOR, OV9650_RSVD94, 0x99},
+ {SENSOR, OV9650_RSVD95, 0x99},
+ {SENSOR, OV9650_RSVD96, 0x04},
+ /* Enable full range output */
+ {SENSOR, OV9650_COM15, 0x0},
+ /* Enable HREF at optical black, enable ADBLC bias,
+ enable ADBLC, reset timings at format change */
+ {SENSOR, OV9650_COM6, 0x4b},
+ /* Subtract 32 from the B channel bias */
+ {SENSOR, OV9650_BBIAS, 0xa0},
+ /* Subtract 32 from the Gb channel bias */
+ {SENSOR, OV9650_GbBIAS, 0xa0},
+ /* Do not bypass the analog BLC and to some out of spec stuff */
+ {SENSOR, OV9650_Gr_COM, 0x00},
+ /* Subtract 32 from the R channel bias */
+ {SENSOR, OV9650_RBIAS, 0xa0},
+ /* Subtract 32 from the R channel bias */
+ {SENSOR, OV9650_RBIAS, 0x0},
+ {SENSOR, OV9650_COM26, 0x80},
+ {SENSOR, OV9650_ACOMA9, 0x98},
+ /* Set the AGC/AEC stable region upper limit */
+ {SENSOR, OV9650_AEW, 0x68},
+ /* Set the AGC/AEC stable region lower limit */
+ {SENSOR, OV9650_AEB, 0x5c},
+ /* Set the high and low limit nibbles to 3 */
+ {SENSOR, OV9650_VPT, 0xc3},
+ /* Set the Automatic Gain Ceiling (AGC) to 128x,
+ drop VSYNC at frame drop,
+ limit exposure timing,
+ drop frame when the AEC step is larger than the exposure gap */
+ {SENSOR, OV9650_COM9, 0x6e},
+ /* Set VSYNC negative, Set RESET to SLHS (slave mode horizontal sync)
+ and set PWDN to SLVS (slave mode vertical sync) */
+ {SENSOR, OV9650_COM10, 0x42},
+ /* Set horizontal column start high to default value */
+ {SENSOR, OV9650_HSTART, 0x1a}, /* 210 */
+ /* Set horizontal column end */
+ {SENSOR, OV9650_HSTOP, 0xbf}, /* 1534 */
+ /* Complementing register to the two writes above */
+ {SENSOR, OV9650_HREF, 0xb2},
+ /* Set vertical row start high bits */
+ {SENSOR, OV9650_VSTRT, 0x02},
+ /* Set vertical row end low bits */
+ {SENSOR, OV9650_VSTOP, 0x7e},
+ /* Set complementing vertical frame control */
+ {SENSOR, OV9650_VREF, 0x10},
+ {SENSOR, OV9650_ADC, 0x04},
+ {SENSOR, OV9650_HV, 0x40},
+
+ /* Enable denoise, and white-pixel erase */
+ {SENSOR, OV9650_COM22, OV9650_DENOISE_ENABLE |
+ OV9650_WHITE_PIXEL_ENABLE |
+ OV9650_WHITE_PIXEL_OPTION},
+
+ /* Enable VARIOPIXEL */
+ {SENSOR, OV9650_COM3, OV9650_VARIOPIXEL},
+ {SENSOR, OV9650_COM4, OV9650_QVGA_VARIOPIXEL},
+
+ /* Put the sensor in soft sleep mode */
+ {SENSOR, OV9650_COM2, OV9650_SOFT_SLEEP | OV9650_OUTPUT_DRIVE_2X},
+};
+
+static const unsigned char res_init_ov9650[][3] = {
+ {SENSOR, OV9650_COM2, OV9650_OUTPUT_DRIVE_2X},
+
+ {BRIDGE, M5602_XB_LINE_OF_FRAME_H, 0x82},
+ {BRIDGE, M5602_XB_LINE_OF_FRAME_L, 0x00},
+ {BRIDGE, M5602_XB_PIX_OF_LINE_H, 0x82},
+ {BRIDGE, M5602_XB_PIX_OF_LINE_L, 0x00},
+ {BRIDGE, M5602_XB_SIG_INI, 0x01}
+};
+
/* Vertically and horizontally flips the image if matched, needed for machines
where the sensor is mounted upside down */
static
diff --git a/drivers/media/usb/gspca/m5602/m5602_ov9650.h b/drivers/media/usb/gspca/m5602/m5602_ov9650.h
index f9f5870da..ce3db062c 100644
--- a/drivers/media/usb/gspca/m5602/m5602_ov9650.h
+++ b/drivers/media/usb/gspca/m5602/m5602_ov9650.h
@@ -156,154 +156,4 @@ static const struct m5602_sensor ov9650 = {
.disconnect = ov9650_disconnect,
};
-static const unsigned char preinit_ov9650[][3] = {
- /* [INITCAM] */
- {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02},
- {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
- {BRIDGE, M5602_XB_SENSOR_CTRL, 0x00},
-
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x08},
- {BRIDGE, M5602_XB_GPIO_DIR, 0x05},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x04},
- {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
- {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06},
- {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x00},
- {BRIDGE, M5602_XB_I2C_CLK_DIV, 0x0a},
- /* Reset chip */
- {SENSOR, OV9650_COM7, OV9650_REGISTER_RESET},
- /* Enable double clock */
- {SENSOR, OV9650_CLKRC, 0x80},
- /* Do something out of spec with the power */
- {SENSOR, OV9650_OFON, 0x40}
-};
-
-static const unsigned char init_ov9650[][3] = {
- /* [INITCAM] */
- {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02},
- {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
- {BRIDGE, M5602_XB_SENSOR_CTRL, 0x00},
-
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x08},
- {BRIDGE, M5602_XB_GPIO_DIR, 0x05},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x04},
- {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
- {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06},
- {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x00},
- {BRIDGE, M5602_XB_I2C_CLK_DIV, 0x0a},
-
- /* Reset chip */
- {SENSOR, OV9650_COM7, OV9650_REGISTER_RESET},
- /* One extra reset is needed in order to make the sensor behave
- properly when resuming from ram, could be a timing issue */
- {SENSOR, OV9650_COM7, OV9650_REGISTER_RESET},
-
- /* Enable double clock */
- {SENSOR, OV9650_CLKRC, 0x80},
- /* Do something out of spec with the power */
- {SENSOR, OV9650_OFON, 0x40},
-
- /* Set fast AGC/AEC algorithm with unlimited step size */
- {SENSOR, OV9650_COM8, OV9650_FAST_AGC_AEC |
- OV9650_AEC_UNLIM_STEP_SIZE},
-
- {SENSOR, OV9650_CHLF, 0x10},
- {SENSOR, OV9650_ARBLM, 0xbf},
- {SENSOR, OV9650_ACOM38, 0x81},
- /* Turn off color matrix coefficient double option */
- {SENSOR, OV9650_COM16, 0x00},
- /* Enable color matrix for RGB/YUV, Delay Y channel,
- set output Y/UV delay to 1 */
- {SENSOR, OV9650_COM13, 0x19},
- /* Enable digital BLC, Set output mode to U Y V Y */
- {SENSOR, OV9650_TSLB, 0x0c},
- /* Limit the AGC/AEC stable upper region */
- {SENSOR, OV9650_COM24, 0x00},
- /* Enable HREF and some out of spec things */
- {SENSOR, OV9650_COM12, 0x73},
- /* Set all DBLC offset signs to positive and
- do some out of spec stuff */
- {SENSOR, OV9650_DBLC1, 0xdf},
- {SENSOR, OV9650_COM21, 0x06},
- {SENSOR, OV9650_RSVD35, 0x91},
- /* Necessary, no camera stream without it */
- {SENSOR, OV9650_RSVD16, 0x06},
- {SENSOR, OV9650_RSVD94, 0x99},
- {SENSOR, OV9650_RSVD95, 0x99},
- {SENSOR, OV9650_RSVD96, 0x04},
- /* Enable full range output */
- {SENSOR, OV9650_COM15, 0x0},
- /* Enable HREF at optical black, enable ADBLC bias,
- enable ADBLC, reset timings at format change */
- {SENSOR, OV9650_COM6, 0x4b},
- /* Subtract 32 from the B channel bias */
- {SENSOR, OV9650_BBIAS, 0xa0},
- /* Subtract 32 from the Gb channel bias */
- {SENSOR, OV9650_GbBIAS, 0xa0},
- /* Do not bypass the analog BLC and to some out of spec stuff */
- {SENSOR, OV9650_Gr_COM, 0x00},
- /* Subtract 32 from the R channel bias */
- {SENSOR, OV9650_RBIAS, 0xa0},
- /* Subtract 32 from the R channel bias */
- {SENSOR, OV9650_RBIAS, 0x0},
- {SENSOR, OV9650_COM26, 0x80},
- {SENSOR, OV9650_ACOMA9, 0x98},
- /* Set the AGC/AEC stable region upper limit */
- {SENSOR, OV9650_AEW, 0x68},
- /* Set the AGC/AEC stable region lower limit */
- {SENSOR, OV9650_AEB, 0x5c},
- /* Set the high and low limit nibbles to 3 */
- {SENSOR, OV9650_VPT, 0xc3},
- /* Set the Automatic Gain Ceiling (AGC) to 128x,
- drop VSYNC at frame drop,
- limit exposure timing,
- drop frame when the AEC step is larger than the exposure gap */
- {SENSOR, OV9650_COM9, 0x6e},
- /* Set VSYNC negative, Set RESET to SLHS (slave mode horizontal sync)
- and set PWDN to SLVS (slave mode vertical sync) */
- {SENSOR, OV9650_COM10, 0x42},
- /* Set horizontal column start high to default value */
- {SENSOR, OV9650_HSTART, 0x1a}, /* 210 */
- /* Set horizontal column end */
- {SENSOR, OV9650_HSTOP, 0xbf}, /* 1534 */
- /* Complementing register to the two writes above */
- {SENSOR, OV9650_HREF, 0xb2},
- /* Set vertical row start high bits */
- {SENSOR, OV9650_VSTRT, 0x02},
- /* Set vertical row end low bits */
- {SENSOR, OV9650_VSTOP, 0x7e},
- /* Set complementing vertical frame control */
- {SENSOR, OV9650_VREF, 0x10},
- {SENSOR, OV9650_ADC, 0x04},
- {SENSOR, OV9650_HV, 0x40},
-
- /* Enable denoise, and white-pixel erase */
- {SENSOR, OV9650_COM22, OV9650_DENOISE_ENABLE |
- OV9650_WHITE_PIXEL_ENABLE |
- OV9650_WHITE_PIXEL_OPTION},
-
- /* Enable VARIOPIXEL */
- {SENSOR, OV9650_COM3, OV9650_VARIOPIXEL},
- {SENSOR, OV9650_COM4, OV9650_QVGA_VARIOPIXEL},
-
- /* Put the sensor in soft sleep mode */
- {SENSOR, OV9650_COM2, OV9650_SOFT_SLEEP | OV9650_OUTPUT_DRIVE_2X},
-};
-
-static const unsigned char res_init_ov9650[][3] = {
- {SENSOR, OV9650_COM2, OV9650_OUTPUT_DRIVE_2X},
-
- {BRIDGE, M5602_XB_LINE_OF_FRAME_H, 0x82},
- {BRIDGE, M5602_XB_LINE_OF_FRAME_L, 0x00},
- {BRIDGE, M5602_XB_PIX_OF_LINE_H, 0x82},
- {BRIDGE, M5602_XB_PIX_OF_LINE_L, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x01}
-};
#endif
diff --git a/drivers/media/usb/gspca/m5602/m5602_po1030.c b/drivers/media/usb/gspca/m5602/m5602_po1030.c
index 4bf5c4342..a0a90dd34 100644
--- a/drivers/media/usb/gspca/m5602/m5602_po1030.c
+++ b/drivers/media/usb/gspca/m5602/m5602_po1030.c
@@ -23,6 +23,110 @@
static int po1030_s_ctrl(struct v4l2_ctrl *ctrl);
static void po1030_dump_registers(struct sd *sd);
+static const unsigned char preinit_po1030[][3] = {
+ {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02},
+ {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
+ {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
+ {BRIDGE, M5602_XB_SENSOR_CTRL, 0x00},
+ {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
+ {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x05},
+ {BRIDGE, M5602_XB_GPIO_DAT, 0x04},
+ {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
+ {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06},
+ {BRIDGE, M5602_XB_GPIO_DAT_H, 0x02},
+
+ {SENSOR, PO1030_AUTOCTRL2, PO1030_SENSOR_RESET | (1 << 2)},
+
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x04},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
+ {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x05},
+ {BRIDGE, M5602_XB_GPIO_DAT, 0x00}
+};
+
+static const unsigned char init_po1030[][3] = {
+ {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02},
+ {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
+ {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
+ {BRIDGE, M5602_XB_SENSOR_CTRL, 0x00},
+ {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
+
+ {SENSOR, PO1030_AUTOCTRL2, PO1030_SENSOR_RESET | (1 << 2)},
+
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x05},
+ {BRIDGE, M5602_XB_GPIO_DAT, 0x04},
+ {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
+ {BRIDGE, M5602_XB_GPIO_EN_L, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06},
+ {BRIDGE, M5602_XB_GPIO_DAT_H, 0x02},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x04},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x05},
+ {BRIDGE, M5602_XB_GPIO_DAT, 0x00},
+
+ {SENSOR, PO1030_AUTOCTRL2, 0x04},
+
+ {SENSOR, PO1030_OUTFORMCTRL2, PO1030_RAW_RGB_BAYER},
+ {SENSOR, PO1030_AUTOCTRL1, PO1030_WEIGHT_WIN_2X},
+
+ {SENSOR, PO1030_CONTROL2, 0x03},
+ {SENSOR, 0x21, 0x90},
+ {SENSOR, PO1030_YTARGET, 0x60},
+ {SENSOR, 0x59, 0x13},
+ {SENSOR, PO1030_OUTFORMCTRL1, PO1030_HREF_ENABLE},
+ {SENSOR, PO1030_EDGE_ENH_OFF, 0x00},
+ {SENSOR, PO1030_EGA, 0x80},
+ {SENSOR, 0x78, 0x14},
+ {SENSOR, 0x6f, 0x01},
+ {SENSOR, PO1030_GLOBALGAINMAX, 0x14},
+ {SENSOR, PO1030_Cb_U_GAIN, 0x38},
+ {SENSOR, PO1030_Cr_V_GAIN, 0x38},
+ {SENSOR, PO1030_CONTROL1, PO1030_SHUTTER_MODE |
+ PO1030_AUTO_SUBSAMPLING |
+ PO1030_FRAME_EQUAL},
+ {SENSOR, PO1030_GC0, 0x10},
+ {SENSOR, PO1030_GC1, 0x20},
+ {SENSOR, PO1030_GC2, 0x40},
+ {SENSOR, PO1030_GC3, 0x60},
+ {SENSOR, PO1030_GC4, 0x80},
+ {SENSOR, PO1030_GC5, 0xa0},
+ {SENSOR, PO1030_GC6, 0xc0},
+ {SENSOR, PO1030_GC7, 0xff},
+
+ /* Set the width to 751 */
+ {SENSOR, PO1030_FRAMEWIDTH_H, 0x02},
+ {SENSOR, PO1030_FRAMEWIDTH_L, 0xef},
+
+ /* Set the height to 540 */
+ {SENSOR, PO1030_FRAMEHEIGHT_H, 0x02},
+ {SENSOR, PO1030_FRAMEHEIGHT_L, 0x1c},
+
+ /* Set the x window to 1 */
+ {SENSOR, PO1030_WINDOWX_H, 0x00},
+ {SENSOR, PO1030_WINDOWX_L, 0x01},
+
+ /* Set the y window to 1 */
+ {SENSOR, PO1030_WINDOWY_H, 0x00},
+ {SENSOR, PO1030_WINDOWY_L, 0x01},
+
+ /* with a very low lighted environment increase the exposure but
+ * decrease the FPS (Frame Per Second) */
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
+
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x05},
+ {BRIDGE, M5602_XB_GPIO_DAT, 0x00},
+ {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
+ {BRIDGE, M5602_XB_GPIO_EN_L, 0x00},
+};
+
static struct v4l2_pix_format po1030_modes[] = {
{
640,
diff --git a/drivers/media/usb/gspca/m5602/m5602_po1030.h b/drivers/media/usb/gspca/m5602/m5602_po1030.h
index a6ab76149..981a91aa7 100644
--- a/drivers/media/usb/gspca/m5602/m5602_po1030.h
+++ b/drivers/media/usb/gspca/m5602/m5602_po1030.h
@@ -167,108 +167,4 @@ static const struct m5602_sensor po1030 = {
.start = po1030_start,
.disconnect = po1030_disconnect,
};
-
-static const unsigned char preinit_po1030[][3] = {
- {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02},
- {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
- {BRIDGE, M5602_XB_SENSOR_CTRL, 0x00},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
- {BRIDGE, M5602_XB_GPIO_DIR, 0x05},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x04},
- {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
- {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06},
- {BRIDGE, M5602_XB_GPIO_DAT_H, 0x02},
-
- {SENSOR, PO1030_AUTOCTRL2, PO1030_SENSOR_RESET | (1 << 2)},
-
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x04},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
- {BRIDGE, M5602_XB_GPIO_DIR, 0x05},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x00}
-};
-
-static const unsigned char init_po1030[][3] = {
- {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02},
- {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
- {BRIDGE, M5602_XB_SENSOR_CTRL, 0x00},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0c},
-
- {SENSOR, PO1030_AUTOCTRL2, PO1030_SENSOR_RESET | (1 << 2)},
-
- {BRIDGE, M5602_XB_GPIO_DIR, 0x05},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x04},
- {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
- {BRIDGE, M5602_XB_GPIO_EN_L, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06},
- {BRIDGE, M5602_XB_GPIO_DAT_H, 0x02},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x04},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
- {BRIDGE, M5602_XB_GPIO_DIR, 0x05},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x00},
-
- {SENSOR, PO1030_AUTOCTRL2, 0x04},
-
- {SENSOR, PO1030_OUTFORMCTRL2, PO1030_RAW_RGB_BAYER},
- {SENSOR, PO1030_AUTOCTRL1, PO1030_WEIGHT_WIN_2X},
-
- {SENSOR, PO1030_CONTROL2, 0x03},
- {SENSOR, 0x21, 0x90},
- {SENSOR, PO1030_YTARGET, 0x60},
- {SENSOR, 0x59, 0x13},
- {SENSOR, PO1030_OUTFORMCTRL1, PO1030_HREF_ENABLE},
- {SENSOR, PO1030_EDGE_ENH_OFF, 0x00},
- {SENSOR, PO1030_EGA, 0x80},
- {SENSOR, 0x78, 0x14},
- {SENSOR, 0x6f, 0x01},
- {SENSOR, PO1030_GLOBALGAINMAX, 0x14},
- {SENSOR, PO1030_Cb_U_GAIN, 0x38},
- {SENSOR, PO1030_Cr_V_GAIN, 0x38},
- {SENSOR, PO1030_CONTROL1, PO1030_SHUTTER_MODE |
- PO1030_AUTO_SUBSAMPLING |
- PO1030_FRAME_EQUAL},
- {SENSOR, PO1030_GC0, 0x10},
- {SENSOR, PO1030_GC1, 0x20},
- {SENSOR, PO1030_GC2, 0x40},
- {SENSOR, PO1030_GC3, 0x60},
- {SENSOR, PO1030_GC4, 0x80},
- {SENSOR, PO1030_GC5, 0xa0},
- {SENSOR, PO1030_GC6, 0xc0},
- {SENSOR, PO1030_GC7, 0xff},
-
- /* Set the width to 751 */
- {SENSOR, PO1030_FRAMEWIDTH_H, 0x02},
- {SENSOR, PO1030_FRAMEWIDTH_L, 0xef},
-
- /* Set the height to 540 */
- {SENSOR, PO1030_FRAMEHEIGHT_H, 0x02},
- {SENSOR, PO1030_FRAMEHEIGHT_L, 0x1c},
-
- /* Set the x window to 1 */
- {SENSOR, PO1030_WINDOWX_H, 0x00},
- {SENSOR, PO1030_WINDOWX_L, 0x01},
-
- /* Set the y window to 1 */
- {SENSOR, PO1030_WINDOWY_H, 0x00},
- {SENSOR, PO1030_WINDOWY_L, 0x01},
-
- /* with a very low lighted environment increase the exposure but
- * decrease the FPS (Frame Per Second) */
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
-
- {BRIDGE, M5602_XB_GPIO_DIR, 0x05},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
- {BRIDGE, M5602_XB_GPIO_EN_L, 0x00},
-};
#endif
diff --git a/drivers/media/usb/gspca/m5602/m5602_s5k4aa.c b/drivers/media/usb/gspca/m5602/m5602_s5k4aa.c
index 7d1259945..8447b9c5f 100644
--- a/drivers/media/usb/gspca/m5602/m5602_s5k4aa.c
+++ b/drivers/media/usb/gspca/m5602/m5602_s5k4aa.c
@@ -20,6 +20,205 @@
#include "m5602_s5k4aa.h"
+static const unsigned char preinit_s5k4aa[][4] = {
+ {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02, 0x00},
+ {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
+ {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
+ {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0d, 0x00},
+ {BRIDGE, M5602_XB_SENSOR_CTRL, 0x00, 0x00},
+
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DAT, 0x08, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0xb0, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0x80, 0x00},
+ {BRIDGE, M5602_XB_GPIO_EN_H, 0x3f, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DIR_H, 0x3f, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DAT, 0x00, 0x00},
+ {BRIDGE, M5602_XB_GPIO_EN_L, 0xff, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DIR_L, 0xff, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DAT_L, 0x00, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
+ {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
+ {BRIDGE, M5602_XB_SENSOR_TYPE, 0x08, 0x00},
+
+ {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02, 0x00},
+ {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DAT, 0x14, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xf0, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DAT, 0x1c, 0x00},
+ {BRIDGE, M5602_XB_GPIO_EN_H, 0x06, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00, 0x00},
+ {BRIDGE, M5602_XB_GPIO_EN_L, 0x00, 0x00},
+ {BRIDGE, M5602_XB_I2C_CLK_DIV, 0x20, 0x00},
+
+ {SENSOR, S5K4AA_PAGE_MAP, 0x00, 0x00}
+};
+
+static const unsigned char init_s5k4aa[][4] = {
+ {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02, 0x00},
+ {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
+ {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
+ {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0d, 0x00},
+ {BRIDGE, M5602_XB_SENSOR_CTRL, 0x00, 0x00},
+
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DAT, 0x08, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0xb0, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0x80, 0x00},
+ {BRIDGE, M5602_XB_GPIO_EN_H, 0x3f, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DIR_H, 0x3f, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DAT, 0x00, 0x00},
+ {BRIDGE, M5602_XB_GPIO_EN_L, 0xff, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DIR_L, 0xff, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DAT_L, 0x00, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
+ {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
+ {BRIDGE, M5602_XB_SENSOR_TYPE, 0x08, 0x00},
+
+ {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02, 0x00},
+ {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DAT, 0x14, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xf0, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DAT, 0x1c, 0x00},
+ {BRIDGE, M5602_XB_GPIO_EN_H, 0x06, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00, 0x00},
+ {BRIDGE, M5602_XB_GPIO_EN_L, 0x00, 0x00},
+ {BRIDGE, M5602_XB_I2C_CLK_DIV, 0x20, 0x00},
+
+ {SENSOR, S5K4AA_PAGE_MAP, 0x07, 0x00},
+ {SENSOR, 0x36, 0x01, 0x00},
+ {SENSOR, S5K4AA_PAGE_MAP, 0x00, 0x00},
+ {SENSOR, 0x7b, 0xff, 0x00},
+ {SENSOR, S5K4AA_PAGE_MAP, 0x02, 0x00},
+ {SENSOR, 0x0c, 0x05, 0x00},
+ {SENSOR, 0x02, 0x0e, 0x00},
+ {SENSOR, S5K4AA_READ_MODE, 0xa0, 0x00},
+ {SENSOR, 0x37, 0x00, 0x00},
+};
+
+static const unsigned char VGA_s5k4aa[][4] = {
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
+ {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
+ {BRIDGE, M5602_XB_SENSOR_TYPE, 0x08, 0x00},
+ {BRIDGE, M5602_XB_LINE_OF_FRAME_H, 0x81, 0x00},
+ {BRIDGE, M5602_XB_PIX_OF_LINE_H, 0x82, 0x00},
+ {BRIDGE, M5602_XB_SIG_INI, 0x01, 0x00},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
+ /* VSYNC_PARA, VSYNC_PARA : img height 480 = 0x01e0 */
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x01, 0x00},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0xe0, 0x00},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
+ {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
+ {BRIDGE, M5602_XB_SIG_INI, 0x02, 0x00},
+ {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
+ {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
+ /* HSYNC_PARA, HSYNC_PARA : img width 640 = 0x0280 */
+ {BRIDGE, M5602_XB_HSYNC_PARA, 0x02, 0x00},
+ {BRIDGE, M5602_XB_HSYNC_PARA, 0x80, 0x00},
+ {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xa0, 0x00}, /* 48 MHz */
+
+ {SENSOR, S5K4AA_PAGE_MAP, 0x02, 0x00},
+ {SENSOR, S5K4AA_READ_MODE, S5K4AA_RM_H_FLIP | S5K4AA_RM_ROW_SKIP_2X
+ | S5K4AA_RM_COL_SKIP_2X, 0x00},
+ /* 0x37 : Fix image stability when light is too bright and improves
+ * image quality in 640x480, but worsens it in 1280x1024 */
+ {SENSOR, 0x37, 0x01, 0x00},
+ /* ROWSTART_HI, ROWSTART_LO : 10 + (1024-960)/2 = 42 = 0x002a */
+ {SENSOR, S5K4AA_ROWSTART_HI, 0x00, 0x00},
+ {SENSOR, S5K4AA_ROWSTART_LO, 0x29, 0x00},
+ {SENSOR, S5K4AA_COLSTART_HI, 0x00, 0x00},
+ {SENSOR, S5K4AA_COLSTART_LO, 0x0c, 0x00},
+ /* window_height_hi, window_height_lo : 960 = 0x03c0 */
+ {SENSOR, S5K4AA_WINDOW_HEIGHT_HI, 0x03, 0x00},
+ {SENSOR, S5K4AA_WINDOW_HEIGHT_LO, 0xc0, 0x00},
+ /* window_width_hi, window_width_lo : 1280 = 0x0500 */
+ {SENSOR, S5K4AA_WINDOW_WIDTH_HI, 0x05, 0x00},
+ {SENSOR, S5K4AA_WINDOW_WIDTH_LO, 0x00, 0x00},
+ {SENSOR, S5K4AA_H_BLANK_HI__, 0x00, 0x00},
+ {SENSOR, S5K4AA_H_BLANK_LO__, 0xa8, 0x00}, /* helps to sync... */
+ {SENSOR, S5K4AA_EXPOSURE_HI, 0x01, 0x00},
+ {SENSOR, S5K4AA_EXPOSURE_LO, 0x00, 0x00},
+ {SENSOR, 0x11, 0x04, 0x00},
+ {SENSOR, 0x12, 0xc3, 0x00},
+ {SENSOR, S5K4AA_PAGE_MAP, 0x02, 0x00},
+ {SENSOR, 0x02, 0x0e, 0x00},
+};
+
+static const unsigned char SXGA_s5k4aa[][4] = {
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
+ {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
+ {BRIDGE, M5602_XB_SENSOR_TYPE, 0x08, 0x00},
+ {BRIDGE, M5602_XB_LINE_OF_FRAME_H, 0x81, 0x00},
+ {BRIDGE, M5602_XB_PIX_OF_LINE_H, 0x82, 0x00},
+ {BRIDGE, M5602_XB_SIG_INI, 0x01, 0x00},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
+ /* VSYNC_PARA, VSYNC_PARA : img height 1024 = 0x0400 */
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x04, 0x00},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
+ {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
+ {BRIDGE, M5602_XB_SIG_INI, 0x02, 0x00},
+ {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
+ {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
+ /* HSYNC_PARA, HSYNC_PARA : img width 1280 = 0x0500 */
+ {BRIDGE, M5602_XB_HSYNC_PARA, 0x05, 0x00},
+ {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
+ {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xa0, 0x00}, /* 48 MHz */
+
+ {SENSOR, S5K4AA_PAGE_MAP, 0x02, 0x00},
+ {SENSOR, S5K4AA_READ_MODE, S5K4AA_RM_H_FLIP, 0x00},
+ {SENSOR, 0x37, 0x01, 0x00},
+ {SENSOR, S5K4AA_ROWSTART_HI, 0x00, 0x00},
+ {SENSOR, S5K4AA_ROWSTART_LO, 0x09, 0x00},
+ {SENSOR, S5K4AA_COLSTART_HI, 0x00, 0x00},
+ {SENSOR, S5K4AA_COLSTART_LO, 0x0a, 0x00},
+ {SENSOR, S5K4AA_WINDOW_HEIGHT_HI, 0x04, 0x00},
+ {SENSOR, S5K4AA_WINDOW_HEIGHT_LO, 0x00, 0x00},
+ {SENSOR, S5K4AA_WINDOW_WIDTH_HI, 0x05, 0x00},
+ {SENSOR, S5K4AA_WINDOW_WIDTH_LO, 0x00, 0x00},
+ {SENSOR, S5K4AA_H_BLANK_HI__, 0x01, 0x00},
+ {SENSOR, S5K4AA_H_BLANK_LO__, 0xa8, 0x00},
+ {SENSOR, S5K4AA_EXPOSURE_HI, 0x01, 0x00},
+ {SENSOR, S5K4AA_EXPOSURE_LO, 0x00, 0x00},
+ {SENSOR, 0x11, 0x04, 0x00},
+ {SENSOR, 0x12, 0xc3, 0x00},
+ {SENSOR, S5K4AA_PAGE_MAP, 0x02, 0x00},
+ {SENSOR, 0x02, 0x0e, 0x00},
+};
+
+
static int s5k4aa_s_ctrl(struct v4l2_ctrl *ctrl);
static void s5k4aa_dump_registers(struct sd *sd);
diff --git a/drivers/media/usb/gspca/m5602/m5602_s5k4aa.h b/drivers/media/usb/gspca/m5602/m5602_s5k4aa.h
index 9953e9766..8407682d6 100644
--- a/drivers/media/usb/gspca/m5602/m5602_s5k4aa.h
+++ b/drivers/media/usb/gspca/m5602/m5602_s5k4aa.h
@@ -85,201 +85,4 @@ static const struct m5602_sensor s5k4aa = {
.disconnect = s5k4aa_disconnect,
};
-static const unsigned char preinit_s5k4aa[][4] = {
- {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02, 0x00},
- {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0d, 0x00},
- {BRIDGE, M5602_XB_SENSOR_CTRL, 0x00, 0x00},
-
- {BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x08, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0xb0, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0x80, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_H, 0x3f, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR_H, 0x3f, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x00, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_L, 0xff, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR_L, 0xff, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT_L, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x08, 0x00},
-
- {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02, 0x00},
- {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x14, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xf0, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x1c, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_H, 0x06, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_L, 0x00, 0x00},
- {BRIDGE, M5602_XB_I2C_CLK_DIV, 0x20, 0x00},
-
- {SENSOR, S5K4AA_PAGE_MAP, 0x00, 0x00}
-};
-
-static const unsigned char init_s5k4aa[][4] = {
- {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02, 0x00},
- {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0d, 0x00},
- {BRIDGE, M5602_XB_SENSOR_CTRL, 0x00, 0x00},
-
- {BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x08, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0xb0, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0x80, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_H, 0x3f, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR_H, 0x3f, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x00, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_L, 0xff, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR_L, 0xff, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT_L, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x08, 0x00},
-
- {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02, 0x00},
- {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x14, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xf0, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x1c, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_H, 0x06, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_L, 0x00, 0x00},
- {BRIDGE, M5602_XB_I2C_CLK_DIV, 0x20, 0x00},
-
- {SENSOR, S5K4AA_PAGE_MAP, 0x07, 0x00},
- {SENSOR, 0x36, 0x01, 0x00},
- {SENSOR, S5K4AA_PAGE_MAP, 0x00, 0x00},
- {SENSOR, 0x7b, 0xff, 0x00},
- {SENSOR, S5K4AA_PAGE_MAP, 0x02, 0x00},
- {SENSOR, 0x0c, 0x05, 0x00},
- {SENSOR, 0x02, 0x0e, 0x00},
- {SENSOR, S5K4AA_READ_MODE, 0xa0, 0x00},
- {SENSOR, 0x37, 0x00, 0x00},
-};
-
-static const unsigned char VGA_s5k4aa[][4] = {
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x08, 0x00},
- {BRIDGE, M5602_XB_LINE_OF_FRAME_H, 0x81, 0x00},
- {BRIDGE, M5602_XB_PIX_OF_LINE_H, 0x82, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x01, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- /* VSYNC_PARA, VSYNC_PARA : img height 480 = 0x01e0 */
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x01, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0xe0, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x02, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
- /* HSYNC_PARA, HSYNC_PARA : img width 640 = 0x0280 */
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x02, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x80, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xa0, 0x00}, /* 48 MHz */
-
- {SENSOR, S5K4AA_PAGE_MAP, 0x02, 0x00},
- {SENSOR, S5K4AA_READ_MODE, S5K4AA_RM_H_FLIP | S5K4AA_RM_ROW_SKIP_2X
- | S5K4AA_RM_COL_SKIP_2X, 0x00},
- /* 0x37 : Fix image stability when light is too bright and improves
- * image quality in 640x480, but worsens it in 1280x1024 */
- {SENSOR, 0x37, 0x01, 0x00},
- /* ROWSTART_HI, ROWSTART_LO : 10 + (1024-960)/2 = 42 = 0x002a */
- {SENSOR, S5K4AA_ROWSTART_HI, 0x00, 0x00},
- {SENSOR, S5K4AA_ROWSTART_LO, 0x29, 0x00},
- {SENSOR, S5K4AA_COLSTART_HI, 0x00, 0x00},
- {SENSOR, S5K4AA_COLSTART_LO, 0x0c, 0x00},
- /* window_height_hi, window_height_lo : 960 = 0x03c0 */
- {SENSOR, S5K4AA_WINDOW_HEIGHT_HI, 0x03, 0x00},
- {SENSOR, S5K4AA_WINDOW_HEIGHT_LO, 0xc0, 0x00},
- /* window_width_hi, window_width_lo : 1280 = 0x0500 */
- {SENSOR, S5K4AA_WINDOW_WIDTH_HI, 0x05, 0x00},
- {SENSOR, S5K4AA_WINDOW_WIDTH_LO, 0x00, 0x00},
- {SENSOR, S5K4AA_H_BLANK_HI__, 0x00, 0x00},
- {SENSOR, S5K4AA_H_BLANK_LO__, 0xa8, 0x00}, /* helps to sync... */
- {SENSOR, S5K4AA_EXPOSURE_HI, 0x01, 0x00},
- {SENSOR, S5K4AA_EXPOSURE_LO, 0x00, 0x00},
- {SENSOR, 0x11, 0x04, 0x00},
- {SENSOR, 0x12, 0xc3, 0x00},
- {SENSOR, S5K4AA_PAGE_MAP, 0x02, 0x00},
- {SENSOR, 0x02, 0x0e, 0x00},
-};
-
-static const unsigned char SXGA_s5k4aa[][4] = {
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x08, 0x00},
- {BRIDGE, M5602_XB_LINE_OF_FRAME_H, 0x81, 0x00},
- {BRIDGE, M5602_XB_PIX_OF_LINE_H, 0x82, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x01, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- /* VSYNC_PARA, VSYNC_PARA : img height 1024 = 0x0400 */
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x04, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x02, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
- /* HSYNC_PARA, HSYNC_PARA : img width 1280 = 0x0500 */
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x05, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xa0, 0x00}, /* 48 MHz */
-
- {SENSOR, S5K4AA_PAGE_MAP, 0x02, 0x00},
- {SENSOR, S5K4AA_READ_MODE, S5K4AA_RM_H_FLIP, 0x00},
- {SENSOR, 0x37, 0x01, 0x00},
- {SENSOR, S5K4AA_ROWSTART_HI, 0x00, 0x00},
- {SENSOR, S5K4AA_ROWSTART_LO, 0x09, 0x00},
- {SENSOR, S5K4AA_COLSTART_HI, 0x00, 0x00},
- {SENSOR, S5K4AA_COLSTART_LO, 0x0a, 0x00},
- {SENSOR, S5K4AA_WINDOW_HEIGHT_HI, 0x04, 0x00},
- {SENSOR, S5K4AA_WINDOW_HEIGHT_LO, 0x00, 0x00},
- {SENSOR, S5K4AA_WINDOW_WIDTH_HI, 0x05, 0x00},
- {SENSOR, S5K4AA_WINDOW_WIDTH_LO, 0x00, 0x00},
- {SENSOR, S5K4AA_H_BLANK_HI__, 0x01, 0x00},
- {SENSOR, S5K4AA_H_BLANK_LO__, 0xa8, 0x00},
- {SENSOR, S5K4AA_EXPOSURE_HI, 0x01, 0x00},
- {SENSOR, S5K4AA_EXPOSURE_LO, 0x00, 0x00},
- {SENSOR, 0x11, 0x04, 0x00},
- {SENSOR, 0x12, 0xc3, 0x00},
- {SENSOR, S5K4AA_PAGE_MAP, 0x02, 0x00},
- {SENSOR, 0x02, 0x0e, 0x00},
-};
#endif
diff --git a/drivers/media/usb/gspca/m5602/m5602_s5k83a.c b/drivers/media/usb/gspca/m5602/m5602_s5k83a.c
index bf6b21543..be5e25d1a 100644
--- a/drivers/media/usb/gspca/m5602/m5602_s5k83a.c
+++ b/drivers/media/usb/gspca/m5602/m5602_s5k83a.c
@@ -41,6 +41,130 @@ static struct v4l2_pix_format s5k83a_modes[] = {
}
};
+static const unsigned char preinit_s5k83a[][4] = {
+ {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02, 0x00},
+ {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
+ {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
+ {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0d, 0x00},
+ {BRIDGE, M5602_XB_SENSOR_CTRL, 0x00, 0x00},
+
+ {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DAT, 0x08, 0x00},
+ {BRIDGE, M5602_XB_GPIO_EN_H, 0x3f, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DIR_H, 0x3f, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00, 0x00},
+ {BRIDGE, M5602_XB_GPIO_EN_L, 0xff, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DIR_L, 0xff, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DAT_L, 0x00, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0xb0, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0x80, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
+ {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
+ {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
+ {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02, 0x00},
+ {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xf0, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DAT, 0x1c, 0x00},
+ {BRIDGE, M5602_XB_GPIO_EN_H, 0x06, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00, 0x00},
+ {BRIDGE, M5602_XB_GPIO_EN_L, 0x00, 0x00},
+ {BRIDGE, M5602_XB_I2C_CLK_DIV, 0x20, 0x00},
+};
+
+/* This could probably be considerably shortened.
+ I don't have the hardware to experiment with it, patches welcome
+*/
+static const unsigned char init_s5k83a[][4] = {
+ /* The following sequence is useless after a clean boot
+ but is necessary after resume from suspend */
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DAT, 0x08, 0x00},
+ {BRIDGE, M5602_XB_GPIO_EN_H, 0x3f, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DIR_H, 0x3f, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00, 0x00},
+ {BRIDGE, M5602_XB_GPIO_EN_L, 0xff, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DIR_L, 0xff, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DAT_L, 0x00, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0xb0, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0x80, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
+ {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
+ {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
+ {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02, 0x00},
+ {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xf0, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DAT, 0x08, 0x00},
+ {BRIDGE, M5602_XB_GPIO_EN_H, 0x06, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06, 0x00},
+ {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00, 0x00},
+ {BRIDGE, M5602_XB_GPIO_EN_L, 0x00, 0x00},
+ {BRIDGE, M5602_XB_I2C_CLK_DIV, 0x20, 0x00},
+
+ {SENSOR, S5K83A_PAGE_MAP, 0x04, 0x00},
+ {SENSOR, 0xaf, 0x01, 0x00},
+ {SENSOR, S5K83A_PAGE_MAP, 0x00, 0x00},
+ {SENSOR, 0x7b, 0xff, 0x00},
+ {SENSOR, S5K83A_PAGE_MAP, 0x05, 0x00},
+ {SENSOR, 0x01, 0x50, 0x00},
+ {SENSOR, 0x12, 0x20, 0x00},
+ {SENSOR, 0x17, 0x40, 0x00},
+ {SENSOR, 0x1c, 0x00, 0x00},
+ {SENSOR, 0x02, 0x70, 0x00},
+ {SENSOR, 0x03, 0x0b, 0x00},
+ {SENSOR, 0x04, 0xf0, 0x00},
+ {SENSOR, 0x05, 0x0b, 0x00},
+ {SENSOR, 0x06, 0x71, 0x00},
+ {SENSOR, 0x07, 0xe8, 0x00}, /* 488 */
+ {SENSOR, 0x08, 0x02, 0x00},
+ {SENSOR, 0x09, 0x88, 0x00}, /* 648 */
+ {SENSOR, 0x14, 0x00, 0x00},
+ {SENSOR, 0x15, 0x20, 0x00}, /* 32 */
+ {SENSOR, 0x19, 0x00, 0x00},
+ {SENSOR, 0x1a, 0x98, 0x00}, /* 152 */
+ {SENSOR, 0x0f, 0x02, 0x00},
+ {SENSOR, 0x10, 0xe5, 0x00}, /* 741 */
+ /* normal colors
+ (this is value after boot, but after tries can be different) */
+ {SENSOR, 0x00, 0x06, 0x00},
+};
+
+static const unsigned char start_s5k83a[][4] = {
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
+ {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
+ {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
+ {BRIDGE, M5602_XB_LINE_OF_FRAME_H, 0x81, 0x00},
+ {BRIDGE, M5602_XB_PIX_OF_LINE_H, 0x82, 0x00},
+ {BRIDGE, M5602_XB_SIG_INI, 0x01, 0x00},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x01, 0x00},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0xe4, 0x00}, /* 484 */
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
+ {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
+ {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
+ {BRIDGE, M5602_XB_SIG_INI, 0x02, 0x00},
+ {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
+ {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
+ {BRIDGE, M5602_XB_HSYNC_PARA, 0x02, 0x00},
+ {BRIDGE, M5602_XB_HSYNC_PARA, 0x7f, 0x00}, /* 639 */
+ {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
+ {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
+};
+
static void s5k83a_dump_registers(struct sd *sd);
static int s5k83a_get_rotation(struct sd *sd, u8 *reg_data);
static int s5k83a_set_led_indication(struct sd *sd, u8 val);
diff --git a/drivers/media/usb/gspca/m5602/m5602_s5k83a.h b/drivers/media/usb/gspca/m5602/m5602_s5k83a.h
index d61b91822..3212bfe53 100644
--- a/drivers/media/usb/gspca/m5602/m5602_s5k83a.h
+++ b/drivers/media/usb/gspca/m5602/m5602_s5k83a.h
@@ -61,128 +61,4 @@ static const struct m5602_sensor s5k83a = {
.i2c_slave_id = 0x5a,
.i2c_regW = 2,
};
-
-static const unsigned char preinit_s5k83a[][4] = {
- {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02, 0x00},
- {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x0d, 0x00},
- {BRIDGE, M5602_XB_SENSOR_CTRL, 0x00, 0x00},
-
- {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x08, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_H, 0x3f, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR_H, 0x3f, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_L, 0xff, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR_L, 0xff, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT_L, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0xb0, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0x80, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
- {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02, 0x00},
- {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xf0, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x1c, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_H, 0x06, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_L, 0x00, 0x00},
- {BRIDGE, M5602_XB_I2C_CLK_DIV, 0x20, 0x00},
-};
-
-/* This could probably be considerably shortened.
- I don't have the hardware to experiment with it, patches welcome
-*/
-static const unsigned char init_s5k83a[][4] = {
- /* The following sequence is useless after a clean boot
- but is necessary after resume from suspend */
- {BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x08, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_H, 0x3f, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR_H, 0x3f, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_L, 0xff, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR_L, 0xff, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT_L, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0xb0, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0x80, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
- {BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02, 0x00},
- {BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xf0, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR, 0x1d, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT, 0x08, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_H, 0x06, 0x00},
- {BRIDGE, M5602_XB_GPIO_DIR_H, 0x06, 0x00},
- {BRIDGE, M5602_XB_GPIO_DAT_H, 0x00, 0x00},
- {BRIDGE, M5602_XB_GPIO_EN_L, 0x00, 0x00},
- {BRIDGE, M5602_XB_I2C_CLK_DIV, 0x20, 0x00},
-
- {SENSOR, S5K83A_PAGE_MAP, 0x04, 0x00},
- {SENSOR, 0xaf, 0x01, 0x00},
- {SENSOR, S5K83A_PAGE_MAP, 0x00, 0x00},
- {SENSOR, 0x7b, 0xff, 0x00},
- {SENSOR, S5K83A_PAGE_MAP, 0x05, 0x00},
- {SENSOR, 0x01, 0x50, 0x00},
- {SENSOR, 0x12, 0x20, 0x00},
- {SENSOR, 0x17, 0x40, 0x00},
- {SENSOR, 0x1c, 0x00, 0x00},
- {SENSOR, 0x02, 0x70, 0x00},
- {SENSOR, 0x03, 0x0b, 0x00},
- {SENSOR, 0x04, 0xf0, 0x00},
- {SENSOR, 0x05, 0x0b, 0x00},
- {SENSOR, 0x06, 0x71, 0x00},
- {SENSOR, 0x07, 0xe8, 0x00}, /* 488 */
- {SENSOR, 0x08, 0x02, 0x00},
- {SENSOR, 0x09, 0x88, 0x00}, /* 648 */
- {SENSOR, 0x14, 0x00, 0x00},
- {SENSOR, 0x15, 0x20, 0x00}, /* 32 */
- {SENSOR, 0x19, 0x00, 0x00},
- {SENSOR, 0x1a, 0x98, 0x00}, /* 152 */
- {SENSOR, 0x0f, 0x02, 0x00},
- {SENSOR, 0x10, 0xe5, 0x00}, /* 741 */
- /* normal colors
- (this is value after boot, but after tries can be different) */
- {SENSOR, 0x00, 0x06, 0x00},
-};
-
-static const unsigned char start_s5k83a[][4] = {
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x06, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
- {BRIDGE, M5602_XB_ADC_CTRL, 0xc0, 0x00},
- {BRIDGE, M5602_XB_SENSOR_TYPE, 0x09, 0x00},
- {BRIDGE, M5602_XB_LINE_OF_FRAME_H, 0x81, 0x00},
- {BRIDGE, M5602_XB_PIX_OF_LINE_H, 0x82, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x01, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x01, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0xe4, 0x00}, /* 484 */
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_VSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
- {BRIDGE, M5602_XB_SIG_INI, 0x02, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x00, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x02, 0x00},
- {BRIDGE, M5602_XB_HSYNC_PARA, 0x7f, 0x00}, /* 639 */
- {BRIDGE, M5602_XB_SIG_INI, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00, 0x00},
- {BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0, 0x00},
-};
#endif
diff --git a/drivers/media/usb/gspca/ov534.c b/drivers/media/usb/gspca/ov534.c
index bfff1d1c7..9266a5c9a 100644
--- a/drivers/media/usb/gspca/ov534.c
+++ b/drivers/media/usb/gspca/ov534.c
@@ -51,6 +51,7 @@
#define OV534_OP_READ_2 0xf9
#define CTRL_TIMEOUT 500
+#define DEFAULT_FRAME_RATE 30
MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>");
MODULE_DESCRIPTION("GSPCA/OV534 USB Camera Driver");
@@ -1061,7 +1062,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam->cam_mode = ov772x_mode;
cam->nmodes = ARRAY_SIZE(ov772x_mode);
- sd->frame_rate = 30;
+ sd->frame_rate = DEFAULT_FRAME_RATE;
return 0;
}
@@ -1492,10 +1493,8 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev,
struct sd *sd = (struct sd *) gspca_dev;
if (tpf->numerator == 0 || tpf->denominator == 0)
- /* Set default framerate */
- sd->frame_rate = 30;
+ sd->frame_rate = DEFAULT_FRAME_RATE;
else
- /* Set requested framerate */
sd->frame_rate = tpf->denominator / tpf->numerator;
if (gspca_dev->streaming)
diff --git a/drivers/media/usb/gspca/sn9c20x.c b/drivers/media/usb/gspca/sn9c20x.c
index d0ee89958..10269dad9 100644
--- a/drivers/media/usb/gspca/sn9c20x.c
+++ b/drivers/media/usb/gspca/sn9c20x.c
@@ -92,7 +92,6 @@ struct sd {
struct v4l2_ctrl *jpegqual;
struct work_struct work;
- struct workqueue_struct *work_thread;
u32 pktsz; /* (used by pkt_scan) */
u16 npkt;
@@ -2051,8 +2050,6 @@ static int sd_start(struct gspca_dev *gspca_dev)
if (mode & MODE_JPEG) {
sd->pktsz = sd->npkt = 0;
sd->nchg = 0;
- sd->work_thread =
- create_singlethread_workqueue(KBUILD_MODNAME);
}
return gspca_dev->usb_err;
@@ -2070,12 +2067,9 @@ static void sd_stop0(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- if (sd->work_thread != NULL) {
- mutex_unlock(&gspca_dev->usb_lock);
- destroy_workqueue(sd->work_thread);
- mutex_lock(&gspca_dev->usb_lock);
- sd->work_thread = NULL;
- }
+ mutex_unlock(&gspca_dev->usb_lock);
+ flush_work(&sd->work);
+ mutex_lock(&gspca_dev->usb_lock);
}
static void do_autoexposure(struct gspca_dev *gspca_dev, u16 avg_lum)
@@ -2228,7 +2222,7 @@ static void transfer_check(struct gspca_dev *gspca_dev,
new_qual = sd->jpegqual->maximum;
if (new_qual != curqual) {
sd->jpegqual->cur.val = new_qual;
- queue_work(sd->work_thread, &sd->work);
+ schedule_work(&sd->work);
}
}
} else {
diff --git a/drivers/media/usb/gspca/t613.c b/drivers/media/usb/gspca/t613.c
index e2cc4e5a0..bb52fc1fe 100644
--- a/drivers/media/usb/gspca/t613.c
+++ b/drivers/media/usb/gspca/t613.c
@@ -837,7 +837,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* isoc packet */
int len) /* iso packet length */
{
- struct sd *sd = (struct sd *) gspca_dev;
+ struct sd *sd __maybe_unused = (struct sd *) gspca_dev;
int pkt_type;
if (data[0] == 0x5a) {
diff --git a/drivers/media/usb/gspca/topro.c b/drivers/media/usb/gspca/topro.c
index c028a5c24..15eb069ab 100644
--- a/drivers/media/usb/gspca/topro.c
+++ b/drivers/media/usb/gspca/topro.c
@@ -175,6 +175,8 @@ static const u8 jpeg_q[17] = {
#error "USB buffer too small"
#endif
+#define DEFAULT_FRAME_RATE 30
+
static const u8 rates[] = {30, 20, 15, 10, 7, 5};
static const struct framerates framerates[] = {
{
@@ -4020,7 +4022,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
gspca_dev->cam.mode_framerates = sd->bridge == BRIDGE_TP6800 ?
framerates : framerates_6810;
- sd->framerate = 30; /* default: 30 fps */
+ sd->framerate = DEFAULT_FRAME_RATE;
return 0;
}
@@ -4803,7 +4805,7 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev,
int fr, i;
if (tpf->numerator == 0 || tpf->denominator == 0)
- sd->framerate = 30;
+ sd->framerate = DEFAULT_FRAME_RATE;
else
sd->framerate = tpf->denominator / tpf->numerator;
diff --git a/drivers/media/usb/gspca/zc3xx.c b/drivers/media/usb/gspca/zc3xx.c
index c5d8ee6fa..5f7254d2b 100644
--- a/drivers/media/usb/gspca/zc3xx.c
+++ b/drivers/media/usb/gspca/zc3xx.c
@@ -53,7 +53,6 @@ struct sd {
struct v4l2_ctrl *jpegqual;
struct work_struct work;
- struct workqueue_struct *work_thread;
u8 reg08; /* webcam compression quality */
@@ -6826,8 +6825,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
return gspca_dev->usb_err;
/* Start the transfer parameters update thread */
- sd->work_thread = create_singlethread_workqueue(KBUILD_MODNAME);
- queue_work(sd->work_thread, &sd->work);
+ schedule_work(&sd->work);
return 0;
}
@@ -6838,12 +6836,9 @@ static void sd_stop0(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- if (sd->work_thread != NULL) {
- mutex_unlock(&gspca_dev->usb_lock);
- destroy_workqueue(sd->work_thread);
- mutex_lock(&gspca_dev->usb_lock);
- sd->work_thread = NULL;
- }
+ mutex_unlock(&gspca_dev->usb_lock);
+ flush_work(&sd->work);
+ mutex_lock(&gspca_dev->usb_lock);
if (!gspca_dev->present)
return;
send_unknown(gspca_dev, sd->sensor);
diff --git a/drivers/media/usb/hackrf/hackrf.c b/drivers/media/usb/hackrf/hackrf.c
index 9e700caf0..b1e229a44 100644
--- a/drivers/media/usb/hackrf/hackrf.c
+++ b/drivers/media/usb/hackrf/hackrf.c
@@ -760,7 +760,7 @@ static void hackrf_return_all_buffers(struct vb2_queue *vq,
static int hackrf_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers,
- unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int *nplanes, unsigned int sizes[], struct device *alloc_devs[])
{
struct hackrf_dev *dev = vb2_get_drv_priv(vq);
diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c
index 08f0ca7aa..a61d8fd63 100644
--- a/drivers/media/usb/hdpvr/hdpvr-core.c
+++ b/drivers/media/usb/hdpvr/hdpvr-core.c
@@ -310,10 +310,6 @@ static int hdpvr_probe(struct usb_interface *interface,
init_waitqueue_head(&dev->wait_buffer);
init_waitqueue_head(&dev->wait_data);
- dev->workqueue = create_singlethread_workqueue("hdpvr_buffer");
- if (!dev->workqueue)
- goto error;
-
dev->options = hdpvr_default_options;
if (default_video_input < HDPVR_VIDEO_INPUTS)
@@ -404,9 +400,7 @@ reg_fail:
#endif
error:
if (dev) {
- /* Destroy single thread */
- if (dev->workqueue)
- destroy_workqueue(dev->workqueue);
+ flush_work(&dev->worker);
/* this frees allocated memory */
hdpvr_delete(dev);
}
@@ -427,7 +421,7 @@ static void hdpvr_disconnect(struct usb_interface *interface)
mutex_unlock(&dev->io_mutex);
v4l2_device_disconnect(&dev->v4l2_dev);
msleep(100);
- flush_workqueue(dev->workqueue);
+ flush_work(&dev->worker);
mutex_lock(&dev->io_mutex);
hdpvr_cancel_queue(dev);
mutex_unlock(&dev->io_mutex);
diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c
index ba7f02270..2a3a8b470 100644
--- a/drivers/media/usb/hdpvr/hdpvr-video.c
+++ b/drivers/media/usb/hdpvr/hdpvr-video.c
@@ -316,7 +316,7 @@ static int hdpvr_start_streaming(struct hdpvr_device *dev)
dev->status = STATUS_STREAMING;
INIT_WORK(&dev->worker, hdpvr_transmit_buffers);
- queue_work(dev->workqueue, &dev->worker);
+ schedule_work(&dev->worker);
v4l2_dbg(MSG_BUFFER, hdpvr_debug, &dev->v4l2_dev,
"streaming started\n");
@@ -350,7 +350,7 @@ static int hdpvr_stop_streaming(struct hdpvr_device *dev)
wake_up_interruptible(&dev->wait_buffer);
msleep(50);
- flush_workqueue(dev->workqueue);
+ flush_work(&dev->worker);
mutex_lock(&dev->io_mutex);
/* kill the still outstanding urbs */
@@ -1123,7 +1123,7 @@ static void hdpvr_device_release(struct video_device *vdev)
hdpvr_delete(dev);
mutex_lock(&dev->io_mutex);
- destroy_workqueue(dev->workqueue);
+ flush_work(&dev->worker);
mutex_unlock(&dev->io_mutex);
v4l2_device_unregister(&dev->v4l2_dev);
diff --git a/drivers/media/usb/hdpvr/hdpvr.h b/drivers/media/usb/hdpvr/hdpvr.h
index 78e815441..a12e0af1d 100644
--- a/drivers/media/usb/hdpvr/hdpvr.h
+++ b/drivers/media/usb/hdpvr/hdpvr.h
@@ -107,8 +107,6 @@ struct hdpvr_device {
/* waitqueue for data */
wait_queue_head_t wait_data;
/**/
- struct workqueue_struct *workqueue;
- /**/
struct work_struct worker;
/* current stream owner */
struct v4l2_fh *owner;
diff --git a/drivers/media/usb/msi2500/msi2500.c b/drivers/media/usb/msi2500/msi2500.c
index 2d3303368..e7f167d44 100644
--- a/drivers/media/usb/msi2500/msi2500.c
+++ b/drivers/media/usb/msi2500/msi2500.c
@@ -618,7 +618,7 @@ static int msi2500_querycap(struct file *file, void *fh,
static int msi2500_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers,
unsigned int *nplanes, unsigned int sizes[],
- void *alloc_ctxs[])
+ struct device *alloc_devs[])
{
struct msi2500_dev *dev = vb2_get_drv_priv(vq);
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
index 203418f87..c9c5f88de 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
@@ -2856,11 +2856,15 @@ static void pvr2_subdev_set_control(struct pvr2_hdw *hdw, int id,
const char *name, int val)
{
struct v4l2_control ctrl;
+ struct v4l2_subdev *sd;
+
pvr2_trace(PVR2_TRACE_CHIPS, "subdev v4l2 %s=%d", name, val);
memset(&ctrl, 0, sizeof(ctrl));
ctrl.id = id;
ctrl.value = val;
- v4l2_device_call_all(&hdw->v4l2_dev, 0, core, s_ctrl, &ctrl);
+
+ v4l2_device_for_each_subdev(sd, &hdw->v4l2_dev)
+ v4l2_s_ctrl(NULL, sd->ctrl_handler, &ctrl);
}
#define PVR2_SUBDEV_SET_CONTROL(hdw, id, lab) \
diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
index 18aed5dd3..b51b27a3f 100644
--- a/drivers/media/usb/pwc/pwc-if.c
+++ b/drivers/media/usb/pwc/pwc-if.c
@@ -573,7 +573,7 @@ static void pwc_video_release(struct v4l2_device *v)
static int queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct pwc_device *pdev = vb2_get_drv_priv(vq);
int size;
@@ -1118,8 +1118,10 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
return 0;
+#ifdef CONFIG_USB_PWC_INPUT_EVDEV
err_video_unreg:
video_unregister_device(&pdev->vdev);
+#endif
err_unregister_v4l2_dev:
v4l2_device_unregister(&pdev->v4l2_dev);
err_free_controls:
diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c
index 66b97da7d..e3882eadb 100644
--- a/drivers/media/usb/s2255/s2255drv.c
+++ b/drivers/media/usb/s2255/s2255drv.c
@@ -662,7 +662,7 @@ static void s2255_fillbuff(struct s2255_vc *vc,
static int queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct s2255_vc *vc = vb2_get_drv_priv(vq);
if (*nbuffers < S2255_MIN_BUFS)
diff --git a/drivers/media/usb/stk1160/stk1160-v4l.c b/drivers/media/usb/stk1160/stk1160-v4l.c
index 77131fd61..5fab3bee8 100644
--- a/drivers/media/usb/stk1160/stk1160-v4l.c
+++ b/drivers/media/usb/stk1160/stk1160-v4l.c
@@ -666,7 +666,7 @@ static const struct v4l2_ioctl_ops stk1160_ioctl_ops = {
*/
static int queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct stk1160 *dev = vb2_get_drv_priv(vq);
unsigned long size;
@@ -680,6 +680,9 @@ static int queue_setup(struct vb2_queue *vq,
*nbuffers = clamp_t(unsigned int, *nbuffers,
STK1160_MIN_VIDEO_BUFFERS, STK1160_MAX_VIDEO_BUFFERS);
+ if (*nplanes)
+ return sizes[0] < size ? -EINVAL : 0;
+
/* This means a packed colorformat */
*nplanes = 1;
diff --git a/drivers/media/usb/usbtv/usbtv-audio.c b/drivers/media/usb/usbtv/usbtv-audio.c
index 5dab02432..1965ff1b1 100644
--- a/drivers/media/usb/usbtv/usbtv-audio.c
+++ b/drivers/media/usb/usbtv/usbtv-audio.c
@@ -1,13 +1,6 @@
/*
- * Fushicai USBTV007 Audio-Video Grabber Driver
- *
- * Product web site:
- * http://www.fushicai.com/products_detail/&productId=d05449ee-b690-42f9-a661-aa7353894bed.html
- *
* Copyright (c) 2013 Federico Simoncelli
* All rights reserved.
- * No physical hardware was harmed running Windows during the
- * reverse-engineering activity
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -20,6 +13,27 @@
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL").
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/*
+ * Fushicai USBTV007 Audio-Video Grabber Driver
+ *
+ * Product web site:
+ * http://www.fushicai.com/products_detail/&productId=d05449ee-b690-42f9-a661-aa7353894bed.html
+ *
+ * No physical hardware was harmed running Windows during the
+ * reverse-engineering activity
*/
#include <sound/core.h>
diff --git a/drivers/media/usb/usbtv/usbtv-core.c b/drivers/media/usb/usbtv/usbtv-core.c
index 29428bef2..dc76fd41e 100644
--- a/drivers/media/usb/usbtv/usbtv-core.c
+++ b/drivers/media/usb/usbtv/usbtv-core.c
@@ -1,19 +1,6 @@
/*
- * Fushicai USBTV007 Audio-Video Grabber Driver
- *
- * Product web site:
- * http://www.fushicai.com/products_detail/&productId=d05449ee-b690-42f9-a661-aa7353894bed.html
- *
- * Following LWN articles were very useful in construction of this driver:
- * Video4Linux2 API series: http://lwn.net/Articles/203924/
- * videobuf2 API explanation: http://lwn.net/Articles/447435/
- * Thanks go to Jonathan Corbet for providing this quality documentation.
- * He is awesome.
- *
* Copyright (c) 2013 Lubomir Rintel
* All rights reserved.
- * No physical hardware was harmed running Windows during the
- * reverse-engineering activity
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,6 +13,33 @@
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL").
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/*
+ * Fushicai USBTV007 Audio-Video Grabber Driver
+ *
+ * Product web site:
+ * http://www.fushicai.com/products_detail/&productId=d05449ee-b690-42f9-a661-aa7353894bed.html
+ *
+ * Following LWN articles were very useful in construction of this driver:
+ * Video4Linux2 API series: http://lwn.net/Articles/203924/
+ * videobuf2 API explanation: http://lwn.net/Articles/447435/
+ * Thanks go to Jonathan Corbet for providing this quality documentation.
+ * He is awesome.
+ *
+ * No physical hardware was harmed running Windows during the
+ * reverse-engineering activity
*/
#include "usbtv.h"
diff --git a/drivers/media/usb/usbtv/usbtv-video.c b/drivers/media/usb/usbtv/usbtv-video.c
index f6cfad465..2a089756c 100644
--- a/drivers/media/usb/usbtv/usbtv-video.c
+++ b/drivers/media/usb/usbtv/usbtv-video.c
@@ -1,19 +1,6 @@
/*
- * Fushicai USBTV007 Audio-Video Grabber Driver
- *
- * Product web site:
- * http://www.fushicai.com/products_detail/&productId=d05449ee-b690-42f9-a661-aa7353894bed.html
- *
- * Following LWN articles were very useful in construction of this driver:
- * Video4Linux2 API series: http://lwn.net/Articles/203924/
- * videobuf2 API explanation: http://lwn.net/Articles/447435/
- * Thanks go to Jonathan Corbet for providing this quality documentation.
- * He is awesome.
- *
* Copyright (c) 2013 Lubomir Rintel
* All rights reserved.
- * No physical hardware was harmed running Windows during the
- * reverse-engineering activity
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,6 +13,33 @@
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL").
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/*
+ * Fushicai USBTV007 Audio-Video Grabber Driver
+ *
+ * Product web site:
+ * http://www.fushicai.com/products_detail/&productId=d05449ee-b690-42f9-a661-aa7353894bed.html
+ *
+ * Following LWN articles were very useful in construction of this driver:
+ * Video4Linux2 API series: http://lwn.net/Articles/203924/
+ * videobuf2 API explanation: http://lwn.net/Articles/447435/
+ * Thanks go to Jonathan Corbet for providing this quality documentation.
+ * He is awesome.
+ *
+ * No physical hardware was harmed running Windows during the
+ * reverse-engineering activity
*/
#include <media/v4l2-ioctl.h>
@@ -251,8 +265,23 @@ static int usbtv_setup_capture(struct usbtv *usbtv)
/* Copy data from chunk into a frame buffer, deinterlacing the data
* into every second line. Unfortunately, they don't align nicely into
* 720 pixel lines, as the chunk is 240 words long, which is 480 pixels.
- * Therefore, we break down the chunk into two halves before copyting,
- * so that we can interleave a line if needed. */
+ * Therefore, we break down the chunk into two halves before copying,
+ * so that we can interleave a line if needed.
+ *
+ * Each "chunk" is 240 words; a word in this context equals 4 bytes.
+ * Image format is YUYV/YUV 4:2:2, consisting of Y Cr Y Cb, defining two
+ * pixels, the Cr and Cb shared between the two pixels, but each having
+ * separate Y values. Thus, the 240 words equal 480 pixels. It therefore,
+ * takes 1.5 chunks to make a 720 pixel-wide line for the frame.
+ * The image is interlaced, so there is a "scan" of odd lines, followed
+ * by "scan" of even numbered lines.
+ *
+ * Following code is writing the chunks in correct sequence, skipping
+ * the rows based on "odd" value.
+ * line 1: chunk[0][ 0..479] chunk[0][480..959] chunk[1][ 0..479]
+ * line 3: chunk[1][480..959] chunk[2][ 0..479] chunk[2][480..959]
+ * ...etc.
+ */
static void usbtv_chunk_to_vbuf(u32 *frame, __be32 *src, int chunk_no, int odd)
{
int half;
@@ -608,7 +637,7 @@ static struct v4l2_file_operations usbtv_fops = {
static int usbtv_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers,
- unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int *nplanes, unsigned int sizes[], struct device *alloc_devs[])
{
struct usbtv *usbtv = vb2_get_drv_priv(vq);
unsigned size = USBTV_CHUNK * usbtv->n_chunks * 2 * sizeof(u32);
diff --git a/drivers/media/usb/usbtv/usbtv.h b/drivers/media/usb/usbtv/usbtv.h
index 161b38d5c..011f9fdc7 100644
--- a/drivers/media/usb/usbtv/usbtv.h
+++ b/drivers/media/usb/usbtv/usbtv.h
@@ -1,10 +1,6 @@
/*
- * Fushicai USBTV007 Audio-Video Grabber Driver
- *
* Copyright (c) 2013 Lubomir Rintel
* All rights reserved.
- * No physical hardware was harmed running Windows during the
- * reverse-engineering activity
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -17,6 +13,24 @@
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL").
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/*
+ * Fushicai USBTV007 Audio-Video Grabber Driver
+ *
+ * No physical hardware was harmed running Windows during the
+ * reverse-engineering activity
*/
#include <linux/module.h>
diff --git a/drivers/media/usb/usbvision/usbvision-core.c b/drivers/media/usb/usbvision/usbvision-core.c
index 1ea04e75f..52ac43915 100644
--- a/drivers/media/usb/usbvision/usbvision-core.c
+++ b/drivers/media/usb/usbvision/usbvision-core.c
@@ -88,11 +88,6 @@ MODULE_PARM_DESC(adjust_y_offset, "adjust Y offset display [core]");
#define DBG_SCRATCH (1 << 4)
#define DBG_FUNC (1 << 5)
-static const int max_imgwidth = MAX_FRAME_WIDTH;
-static const int max_imgheight = MAX_FRAME_HEIGHT;
-static const int min_imgwidth = MIN_FRAME_WIDTH;
-static const int min_imgheight = MIN_FRAME_HEIGHT;
-
/* The value of 'scratch_buf_size' affects quality of the picture
* in many ways. Shorter buffers may cause loss of data when client
* is too slow. Larger buffers are memory-consuming and take longer
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index ad2f3d27b..c8b4eb2ee 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -188,12 +188,10 @@ static ssize_t show_hue(struct device *cd,
{
struct video_device *vdev = to_video_device(cd);
struct usb_usbvision *usbvision = video_get_drvdata(vdev);
- struct v4l2_control ctrl;
- ctrl.id = V4L2_CID_HUE;
- ctrl.value = 0;
- if (usbvision->user)
- call_all(usbvision, core, g_ctrl, &ctrl);
- return sprintf(buf, "%d\n", ctrl.value);
+ s32 val = v4l2_ctrl_g_ctrl(v4l2_ctrl_find(&usbvision->hdl,
+ V4L2_CID_HUE));
+
+ return sprintf(buf, "%d\n", val);
}
static DEVICE_ATTR(hue, S_IRUGO, show_hue, NULL);
@@ -202,12 +200,10 @@ static ssize_t show_contrast(struct device *cd,
{
struct video_device *vdev = to_video_device(cd);
struct usb_usbvision *usbvision = video_get_drvdata(vdev);
- struct v4l2_control ctrl;
- ctrl.id = V4L2_CID_CONTRAST;
- ctrl.value = 0;
- if (usbvision->user)
- call_all(usbvision, core, g_ctrl, &ctrl);
- return sprintf(buf, "%d\n", ctrl.value);
+ s32 val = v4l2_ctrl_g_ctrl(v4l2_ctrl_find(&usbvision->hdl,
+ V4L2_CID_CONTRAST));
+
+ return sprintf(buf, "%d\n", val);
}
static DEVICE_ATTR(contrast, S_IRUGO, show_contrast, NULL);
@@ -216,12 +212,10 @@ static ssize_t show_brightness(struct device *cd,
{
struct video_device *vdev = to_video_device(cd);
struct usb_usbvision *usbvision = video_get_drvdata(vdev);
- struct v4l2_control ctrl;
- ctrl.id = V4L2_CID_BRIGHTNESS;
- ctrl.value = 0;
- if (usbvision->user)
- call_all(usbvision, core, g_ctrl, &ctrl);
- return sprintf(buf, "%d\n", ctrl.value);
+ s32 val = v4l2_ctrl_g_ctrl(v4l2_ctrl_find(&usbvision->hdl,
+ V4L2_CID_BRIGHTNESS));
+
+ return sprintf(buf, "%d\n", val);
}
static DEVICE_ATTR(brightness, S_IRUGO, show_brightness, NULL);
@@ -230,12 +224,10 @@ static ssize_t show_saturation(struct device *cd,
{
struct video_device *vdev = to_video_device(cd);
struct usb_usbvision *usbvision = video_get_drvdata(vdev);
- struct v4l2_control ctrl;
- ctrl.id = V4L2_CID_SATURATION;
- ctrl.value = 0;
- if (usbvision->user)
- call_all(usbvision, core, g_ctrl, &ctrl);
- return sprintf(buf, "%d\n", ctrl.value);
+ s32 val = v4l2_ctrl_g_ctrl(v4l2_ctrl_find(&usbvision->hdl,
+ V4L2_CID_SATURATION));
+
+ return sprintf(buf, "%d\n", val);
}
static DEVICE_ATTR(saturation, S_IRUGO, show_saturation, NULL);
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 451e84e96..302e284a9 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -1674,7 +1674,7 @@ static void uvc_delete(struct uvc_device *dev)
if (dev->vdev.dev)
v4l2_device_unregister(&dev->vdev);
#ifdef CONFIG_MEDIA_CONTROLLER
- if (media_devnode_is_registered(&dev->mdev.devnode))
+ if (media_devnode_is_registered(dev->mdev.devnode))
media_device_unregister(&dev->mdev);
media_device_cleanup(&dev->mdev);
#endif
diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c
index 543947227..773fefb52 100644
--- a/drivers/media/usb/uvc/uvc_queue.c
+++ b/drivers/media/usb/uvc/uvc_queue.c
@@ -71,7 +71,7 @@ static void uvc_queue_return_buffers(struct uvc_video_queue *queue,
static int uvc_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
struct uvc_streaming *stream = uvc_queue_to_stream(queue);
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index c04bc6afb..05eed4be2 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -142,6 +142,21 @@ static __u32 uvc_try_frame_interval(struct uvc_frame *frame, __u32 interval)
return interval;
}
+static __u32 uvc_v4l2_get_bytesperline(const struct uvc_format *format,
+ const struct uvc_frame *frame)
+{
+ switch (format->fcc) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_YVU420:
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_M420:
+ return frame->wWidth;
+
+ default:
+ return format->bpp * frame->wWidth / 8;
+ }
+}
+
static int uvc_v4l2_try_format(struct uvc_streaming *stream,
struct v4l2_format *fmt, struct uvc_streaming_control *probe,
struct uvc_format **uvc_format, struct uvc_frame **uvc_frame)
@@ -245,7 +260,7 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
fmt->fmt.pix.width = frame->wWidth;
fmt->fmt.pix.height = frame->wHeight;
fmt->fmt.pix.field = V4L2_FIELD_NONE;
- fmt->fmt.pix.bytesperline = format->bpp * frame->wWidth / 8;
+ fmt->fmt.pix.bytesperline = uvc_v4l2_get_bytesperline(format, frame);
fmt->fmt.pix.sizeimage = probe->dwMaxVideoFrameSize;
fmt->fmt.pix.colorspace = format->colorspace;
fmt->fmt.pix.priv = 0;
@@ -282,7 +297,7 @@ static int uvc_v4l2_get_format(struct uvc_streaming *stream,
fmt->fmt.pix.width = frame->wWidth;
fmt->fmt.pix.height = frame->wHeight;
fmt->fmt.pix.field = V4L2_FIELD_NONE;
- fmt->fmt.pix.bytesperline = format->bpp * frame->wWidth / 8;
+ fmt->fmt.pix.bytesperline = uvc_v4l2_get_bytesperline(format, frame);
fmt->fmt.pix.sizeimage = stream->ctrl.dwMaxVideoFrameSize;
fmt->fmt.pix.colorspace = format->colorspace;
fmt->fmt.pix.priv = 0;
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index 075a0fe77..b5589d5f5 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -1470,6 +1470,7 @@ static unsigned int uvc_endpoint_max_bpi(struct usb_device *dev,
switch (dev->speed) {
case USB_SPEED_SUPER:
+ case USB_SPEED_SUPER_PLUS:
return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
case USB_SPEED_HIGH:
psize = usb_endpoint_maxp(&ep->desc);
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index 8b321e0aa..f7abfad9a 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -2606,14 +2606,6 @@ int v4l2_queryctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_queryctrl *qc)
}
EXPORT_SYMBOL(v4l2_queryctrl);
-int v4l2_subdev_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
-{
- if (qc->id & (V4L2_CTRL_FLAG_NEXT_CTRL | V4L2_CTRL_FLAG_NEXT_COMPOUND))
- return -EINVAL;
- return v4l2_queryctrl(sd->ctrl_handler, qc);
-}
-EXPORT_SYMBOL(v4l2_subdev_queryctrl);
-
/* Implement VIDIOC_QUERYMENU */
int v4l2_querymenu(struct v4l2_ctrl_handler *hdl, struct v4l2_querymenu *qm)
{
@@ -2657,13 +2649,6 @@ int v4l2_querymenu(struct v4l2_ctrl_handler *hdl, struct v4l2_querymenu *qm)
}
EXPORT_SYMBOL(v4l2_querymenu);
-int v4l2_subdev_querymenu(struct v4l2_subdev *sd, struct v4l2_querymenu *qm)
-{
- return v4l2_querymenu(sd->ctrl_handler, qm);
-}
-EXPORT_SYMBOL(v4l2_subdev_querymenu);
-
-
/* Some general notes on the atomic requirements of VIDIOC_G/TRY/S_EXT_CTRLS:
@@ -2890,12 +2875,6 @@ int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs
}
EXPORT_SYMBOL(v4l2_g_ext_ctrls);
-int v4l2_subdev_g_ext_ctrls(struct v4l2_subdev *sd, struct v4l2_ext_controls *cs)
-{
- return v4l2_g_ext_ctrls(sd->ctrl_handler, cs);
-}
-EXPORT_SYMBOL(v4l2_subdev_g_ext_ctrls);
-
/* Helper function to get a single control */
static int get_ctrl(struct v4l2_ctrl *ctrl, struct v4l2_ext_control *c)
{
@@ -2941,12 +2920,6 @@ int v4l2_g_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_control *control)
}
EXPORT_SYMBOL(v4l2_g_ctrl);
-int v4l2_subdev_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *control)
-{
- return v4l2_g_ctrl(sd->ctrl_handler, control);
-}
-EXPORT_SYMBOL(v4l2_subdev_g_ctrl);
-
s32 v4l2_ctrl_g_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_ext_control c;
@@ -3194,18 +3167,6 @@ int v4l2_s_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
}
EXPORT_SYMBOL(v4l2_s_ext_ctrls);
-int v4l2_subdev_try_ext_ctrls(struct v4l2_subdev *sd, struct v4l2_ext_controls *cs)
-{
- return try_set_ext_ctrls(NULL, sd->ctrl_handler, cs, false);
-}
-EXPORT_SYMBOL(v4l2_subdev_try_ext_ctrls);
-
-int v4l2_subdev_s_ext_ctrls(struct v4l2_subdev *sd, struct v4l2_ext_controls *cs)
-{
- return try_set_ext_ctrls(NULL, sd->ctrl_handler, cs, true);
-}
-EXPORT_SYMBOL(v4l2_subdev_s_ext_ctrls);
-
/* Helper function for VIDIOC_S_CTRL compatibility */
static int set_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 ch_flags)
{
@@ -3268,12 +3229,6 @@ int v4l2_s_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
}
EXPORT_SYMBOL(v4l2_s_ctrl);
-int v4l2_subdev_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *control)
-{
- return v4l2_s_ctrl(NULL, sd->ctrl_handler, control);
-}
-EXPORT_SYMBOL(v4l2_subdev_s_ctrl);
-
int __v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val)
{
lockdep_assert_held(ctrl->handler->lock);
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index 70b559d7c..e6da353b3 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -812,40 +812,6 @@ static int video_register_media_controller(struct video_device *vdev, int type)
return 0;
}
-/**
- * __video_register_device - register video4linux devices
- * @vdev: video device structure we want to register
- * @type: type of device to register
- * @nr: which device node number (0 == /dev/video0, 1 == /dev/video1, ...
- * -1 == first free)
- * @warn_if_nr_in_use: warn if the desired device node number
- * was already in use and another number was chosen instead.
- * @owner: module that owns the video device node
- *
- * The registration code assigns minor numbers and device node numbers
- * based on the requested type and registers the new device node with
- * the kernel.
- *
- * This function assumes that struct video_device was zeroed when it
- * was allocated and does not contain any stale date.
- *
- * An error is returned if no free minor or device node number could be
- * found, or if the registration of the device node failed.
- *
- * Zero is returned on success.
- *
- * Valid types are
- *
- * %VFL_TYPE_GRABBER - A frame grabber
- *
- * %VFL_TYPE_VBI - Vertical blank data (undecoded)
- *
- * %VFL_TYPE_RADIO - A radio card
- *
- * %VFL_TYPE_SUBDEV - A subdevice
- *
- * %VFL_TYPE_SDR - Software Defined Radio
- */
int __video_register_device(struct video_device *vdev, int type, int nr,
int warn_if_nr_in_use, struct module *owner)
{
diff --git a/drivers/media/v4l2-core/v4l2-flash-led-class.c b/drivers/media/v4l2-core/v4l2-flash-led-class.c
index fc5ff8b21..ae7544d54 100644
--- a/drivers/media/v4l2-core/v4l2-flash-led-class.c
+++ b/drivers/media/v4l2-core/v4l2-flash-led-class.c
@@ -609,14 +609,7 @@ static const struct v4l2_subdev_internal_ops v4l2_flash_subdev_internal_ops = {
.close = v4l2_flash_close,
};
-static const struct v4l2_subdev_core_ops v4l2_flash_core_ops = {
- .queryctrl = v4l2_subdev_queryctrl,
- .querymenu = v4l2_subdev_querymenu,
-};
-
-static const struct v4l2_subdev_ops v4l2_flash_subdev_ops = {
- .core = &v4l2_flash_core_ops,
-};
+static const struct v4l2_subdev_ops v4l2_flash_subdev_ops;
struct v4l2_flash *v4l2_flash_init(
struct device *dev, struct device_node *of_node,
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 528390f33..51a0fa144 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -2541,14 +2541,14 @@ static struct v4l2_ioctl_info v4l2_ioctls[] = {
IOCTL_INFO_FNC(VIDIOC_DBG_S_REGISTER, v4l_dbg_s_register, v4l_print_dbg_register, 0),
IOCTL_INFO_FNC(VIDIOC_DBG_G_REGISTER, v4l_dbg_g_register, v4l_print_dbg_register, 0),
IOCTL_INFO_FNC(VIDIOC_S_HW_FREQ_SEEK, v4l_s_hw_freq_seek, v4l_print_hw_freq_seek, INFO_FL_PRIO),
- IOCTL_INFO_STD(VIDIOC_S_DV_TIMINGS, vidioc_s_dv_timings, v4l_print_dv_timings, INFO_FL_PRIO),
+ IOCTL_INFO_STD(VIDIOC_S_DV_TIMINGS, vidioc_s_dv_timings, v4l_print_dv_timings, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_dv_timings, bt.flags)),
IOCTL_INFO_STD(VIDIOC_G_DV_TIMINGS, vidioc_g_dv_timings, v4l_print_dv_timings, 0),
IOCTL_INFO_FNC(VIDIOC_DQEVENT, v4l_dqevent, v4l_print_event, 0),
IOCTL_INFO_FNC(VIDIOC_SUBSCRIBE_EVENT, v4l_subscribe_event, v4l_print_event_subscription, 0),
IOCTL_INFO_FNC(VIDIOC_UNSUBSCRIBE_EVENT, v4l_unsubscribe_event, v4l_print_event_subscription, 0),
IOCTL_INFO_FNC(VIDIOC_CREATE_BUFS, v4l_create_bufs, v4l_print_create_buffers, INFO_FL_PRIO | INFO_FL_QUEUE),
IOCTL_INFO_FNC(VIDIOC_PREPARE_BUF, v4l_prepare_buf, v4l_print_buffer, INFO_FL_QUEUE),
- IOCTL_INFO_STD(VIDIOC_ENUM_DV_TIMINGS, vidioc_enum_dv_timings, v4l_print_enum_dv_timings, 0),
+ IOCTL_INFO_STD(VIDIOC_ENUM_DV_TIMINGS, vidioc_enum_dv_timings, v4l_print_enum_dv_timings, INFO_FL_CLEAR(v4l2_enum_dv_timings, pad)),
IOCTL_INFO_STD(VIDIOC_QUERY_DV_TIMINGS, vidioc_query_dv_timings, v4l_print_dv_timings, 0),
IOCTL_INFO_STD(VIDIOC_DV_TIMINGS_CAP, vidioc_dv_timings_cap, v4l_print_dv_timings_cap, INFO_FL_CLEAR(v4l2_dv_timings_cap, type)),
IOCTL_INFO_FNC(VIDIOC_ENUM_FREQ_BANDS, v4l_enum_freq_bands, v4l_print_freq_band, 0),
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index 953eab08e..34a1e7c8b 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -621,16 +621,6 @@ void v4l2_subdev_init(struct v4l2_subdev *sd, const struct v4l2_subdev_ops *ops)
}
EXPORT_SYMBOL(v4l2_subdev_init);
-/**
- * v4l2_subdev_notify_event() - Delivers event notification for subdevice
- * @sd: The subdev for which to deliver the event
- * @ev: The event to deliver
- *
- * Will deliver the specified event to all userspace event listeners which are
- * subscribed to the v42l subdev event queue as well as to the bridge driver
- * using the notify callback. The notification type for the notify callback
- * will be V4L2_DEVICE_NOTIFY_EVENT.
- */
void v4l2_subdev_notify_event(struct v4l2_subdev *sd,
const struct v4l2_event *ev)
{
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 633fc1ab1..ca8ffeb56 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -206,8 +206,9 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
for (plane = 0; plane < vb->num_planes; ++plane) {
unsigned long size = PAGE_ALIGN(vb->planes[plane].length);
- mem_priv = call_ptr_memop(vb, alloc, q->alloc_ctx[plane],
- size, dma_dir, q->gfp_flags);
+ mem_priv = call_ptr_memop(vb, alloc,
+ q->alloc_devs[plane] ? : q->dev,
+ q->dma_attrs, size, dma_dir, q->gfp_flags);
if (IS_ERR_OR_NULL(mem_priv))
goto free;
@@ -737,7 +738,7 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
*/
num_buffers = min_t(unsigned int, *count, VB2_MAX_FRAME);
num_buffers = max_t(unsigned int, num_buffers, q->min_buffers_needed);
- memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
+ memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
q->memory = memory;
/*
@@ -745,7 +746,7 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
* Driver also sets the size and allocator context for each plane.
*/
ret = call_qop(q, queue_setup, q, &num_buffers, &num_planes,
- plane_sizes, q->alloc_ctx);
+ plane_sizes, q->alloc_devs);
if (ret)
return ret;
@@ -778,7 +779,7 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
num_planes = 0;
ret = call_qop(q, queue_setup, q, &num_buffers,
- &num_planes, plane_sizes, q->alloc_ctx);
+ &num_planes, plane_sizes, q->alloc_devs);
if (!ret && allocated_buffers < num_buffers)
ret = -ENOMEM;
@@ -844,7 +845,7 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
}
if (!q->num_buffers) {
- memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
+ memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
q->memory = memory;
q->waiting_for_buffers = !q->is_output;
}
@@ -861,7 +862,7 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
* buffer and their sizes are acceptable
*/
ret = call_qop(q, queue_setup, q, &num_buffers,
- &num_planes, plane_sizes, q->alloc_ctx);
+ &num_planes, plane_sizes, q->alloc_devs);
if (ret)
return ret;
@@ -884,7 +885,7 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
* queue driver has set up
*/
ret = call_qop(q, queue_setup, q, &num_buffers,
- &num_planes, plane_sizes, q->alloc_ctx);
+ &num_planes, plane_sizes, q->alloc_devs);
if (!ret && allocated_buffers < num_buffers)
ret = -ENOMEM;
@@ -1131,9 +1132,10 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const void *pb)
vb->planes[plane].data_offset = 0;
/* Acquire each plane's memory */
- mem_priv = call_ptr_memop(vb, get_userptr, q->alloc_ctx[plane],
- planes[plane].m.userptr,
- planes[plane].length, dma_dir);
+ mem_priv = call_ptr_memop(vb, get_userptr,
+ q->alloc_devs[plane] ? : q->dev,
+ planes[plane].m.userptr,
+ planes[plane].length, dma_dir);
if (IS_ERR_OR_NULL(mem_priv)) {
dprintk(1, "failed acquiring userspace "
"memory for plane %d\n", plane);
@@ -1256,8 +1258,8 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const void *pb)
/* Acquire each plane's memory */
mem_priv = call_ptr_memop(vb, attach_dmabuf,
- q->alloc_ctx[plane], dbuf, planes[plane].length,
- dma_dir);
+ q->alloc_devs[plane] ? : q->dev,
+ dbuf, planes[plane].length, dma_dir);
if (IS_ERR(mem_priv)) {
dprintk(1, "failed to attach dmabuf\n");
ret = PTR_ERR(mem_priv);
@@ -1845,7 +1847,7 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
* Make sure to call buf_finish for any queued buffers. Normally
* that's done in dqbuf, but that's not going to happen when we
* cancel the whole queue. Note: this code belongs here, not in
- * __vb2_dqbuf() since in vb2_internal_dqbuf() there is a critical
+ * __vb2_dqbuf() since in vb2_core_dqbuf() there is a critical
* call to __fill_user_buffer() after buf_finish(). That order can't
* be changed, so we can't move the buf_finish() to __vb2_dqbuf().
*/
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
index 5361197f3..59fa204b1 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
@@ -21,18 +21,13 @@
#include <media/videobuf2-dma-contig.h>
#include <media/videobuf2-memops.h>
-struct vb2_dc_conf {
- struct device *dev;
- struct dma_attrs attrs;
-};
-
struct vb2_dc_buf {
struct device *dev;
void *vaddr;
unsigned long size;
void *cookie;
dma_addr_t dma_addr;
- struct dma_attrs attrs;
+ unsigned long attrs;
enum dma_data_direction dma_dir;
struct sg_table *dma_sgt;
struct frame_vector *vec;
@@ -135,32 +130,32 @@ static void vb2_dc_put(void *buf_priv)
kfree(buf->sgt_base);
}
dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr,
- &buf->attrs);
+ buf->attrs);
put_device(buf->dev);
kfree(buf);
}
-static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size,
- enum dma_data_direction dma_dir, gfp_t gfp_flags)
+static void *vb2_dc_alloc(struct device *dev, unsigned long attrs,
+ unsigned long size, enum dma_data_direction dma_dir,
+ gfp_t gfp_flags)
{
- struct vb2_dc_conf *conf = alloc_ctx;
- struct device *dev = conf->dev;
struct vb2_dc_buf *buf;
buf = kzalloc(sizeof *buf, GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
- buf->attrs = conf->attrs;
+ if (attrs)
+ buf->attrs = attrs;
buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr,
- GFP_KERNEL | gfp_flags, &buf->attrs);
+ GFP_KERNEL | gfp_flags, buf->attrs);
if (!buf->cookie) {
dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
kfree(buf);
return ERR_PTR(-ENOMEM);
}
- if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->attrs))
+ if ((buf->attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
buf->vaddr = buf->cookie;
/* Prevent the device from being released while the buffer is used */
@@ -194,7 +189,7 @@ static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
vma->vm_pgoff = 0;
ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
- buf->dma_addr, buf->size, &buf->attrs);
+ buf->dma_addr, buf->size, buf->attrs);
if (ret) {
pr_err("Remapping memory failed, error: %d\n", ret);
@@ -377,7 +372,7 @@ static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
}
ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr,
- buf->size, &buf->attrs);
+ buf->size, buf->attrs);
if (ret < 0) {
dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
kfree(sgt);
@@ -426,15 +421,12 @@ static void vb2_dc_put_userptr(void *buf_priv)
struct page **pages;
if (sgt) {
- DEFINE_DMA_ATTRS(attrs);
-
- dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
/*
* No need to sync to CPU, it's already synced to the CPU
* since the finish() memop will have been called before this.
*/
dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir, &attrs);
+ buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
pages = frame_vector_pages(buf->vec);
/* sgt should exist only if vector contains pages... */
BUG_ON(IS_ERR(pages));
@@ -478,10 +470,9 @@ static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn
}
#endif
-static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
+static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
unsigned long size, enum dma_data_direction dma_dir)
{
- struct vb2_dc_conf *conf = alloc_ctx;
struct vb2_dc_buf *buf;
struct frame_vector *vec;
unsigned long offset;
@@ -490,9 +481,6 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
struct sg_table *sgt;
unsigned long contig_size;
unsigned long dma_align = dma_get_cache_alignment();
- DEFINE_DMA_ATTRS(attrs);
-
- dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
/* Only cache aligned DMA transfers are reliable */
if (!IS_ALIGNED(vaddr | size, dma_align)) {
@@ -509,7 +497,7 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
if (!buf)
return ERR_PTR(-ENOMEM);
- buf->dev = conf->dev;
+ buf->dev = dev;
buf->dma_dir = dma_dir;
offset = vaddr & ~PAGE_MASK;
@@ -554,7 +542,7 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
* prepare() memop is called.
*/
sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir, &attrs);
+ buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
if (sgt->nents <= 0) {
pr_err("failed to map scatterlist\n");
ret = -EIO;
@@ -578,7 +566,7 @@ out:
fail_map_sg:
dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir, &attrs);
+ buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
fail_sgt_init:
sg_free_table(sgt);
@@ -676,10 +664,9 @@ static void vb2_dc_detach_dmabuf(void *mem_priv)
kfree(buf);
}
-static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
+static void *vb2_dc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
unsigned long size, enum dma_data_direction dma_dir)
{
- struct vb2_dc_conf *conf = alloc_ctx;
struct vb2_dc_buf *buf;
struct dma_buf_attachment *dba;
@@ -690,7 +677,7 @@ static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
if (!buf)
return ERR_PTR(-ENOMEM);
- buf->dev = conf->dev;
+ buf->dev = dev;
/* create attachment for the dmabuf with the user device */
dba = dma_buf_attach(dbuf, buf->dev);
if (IS_ERR(dba)) {
@@ -729,29 +716,58 @@ const struct vb2_mem_ops vb2_dma_contig_memops = {
};
EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
-void *vb2_dma_contig_init_ctx_attrs(struct device *dev,
- struct dma_attrs *attrs)
+/**
+ * vb2_dma_contig_set_max_seg_size() - configure DMA max segment size
+ * @dev: device for configuring DMA parameters
+ * @size: size of DMA max segment size to set
+ *
+ * To allow mapping the scatter-list into a single chunk in the DMA
+ * address space, the device is required to have the DMA max segment
+ * size parameter set to a value larger than the buffer size. Otherwise,
+ * the DMA-mapping subsystem will split the mapping into max segment
+ * size chunks. This function sets the DMA max segment size
+ * parameter to let DMA-mapping map a buffer as a single chunk in DMA
+ * address space.
+ * This code assumes that the DMA-mapping subsystem will merge all
+ * scatterlist segments if this is really possible (for example when
+ * an IOMMU is available and enabled).
+ * Ideally, this parameter should be set by the generic bus code, but it
+ * is left with the default 64KiB value due to historical litmiations in
+ * other subsystems (like limited USB host drivers) and there no good
+ * place to set it to the proper value.
+ * This function should be called from the drivers, which are known to
+ * operate on platforms with IOMMU and provide access to shared buffers
+ * (either USERPTR or DMABUF). This should be done before initializing
+ * videobuf2 queue.
+ */
+int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
{
- struct vb2_dc_conf *conf;
-
- conf = kzalloc(sizeof *conf, GFP_KERNEL);
- if (!conf)
- return ERR_PTR(-ENOMEM);
-
- conf->dev = dev;
- if (attrs)
- conf->attrs = *attrs;
+ if (!dev->dma_parms) {
+ dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
+ if (!dev->dma_parms)
+ return -ENOMEM;
+ }
+ if (dma_get_max_seg_size(dev) < size)
+ return dma_set_max_seg_size(dev, size);
- return conf;
+ return 0;
}
-EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx_attrs);
+EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size);
-void vb2_dma_contig_cleanup_ctx(void *alloc_ctx)
+/*
+ * vb2_dma_contig_clear_max_seg_size() - release resources for DMA parameters
+ * @dev: device for configuring DMA parameters
+ *
+ * This function releases resources allocated to configure DMA parameters
+ * (see vb2_dma_contig_set_max_seg_size() function). It should be called from
+ * device drivers on driver remove.
+ */
+void vb2_dma_contig_clear_max_seg_size(struct device *dev)
{
- if (!IS_ERR_OR_NULL(alloc_ctx))
- kfree(alloc_ctx);
+ kfree(dev->dma_parms);
+ dev->dma_parms = NULL;
}
-EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx);
+EXPORT_SYMBOL_GPL(vb2_dma_contig_clear_max_seg_size);
MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c
index 9985c89f0..bd82d709e 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c
@@ -30,10 +30,6 @@ module_param(debug, int, 0644);
printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg); \
} while (0)
-struct vb2_dma_sg_conf {
- struct device *dev;
-};
-
struct vb2_dma_sg_buf {
struct device *dev;
void *vaddr;
@@ -99,19 +95,16 @@ static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
return 0;
}
-static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size,
- enum dma_data_direction dma_dir, gfp_t gfp_flags)
+static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
+ unsigned long size, enum dma_data_direction dma_dir,
+ gfp_t gfp_flags)
{
- struct vb2_dma_sg_conf *conf = alloc_ctx;
struct vb2_dma_sg_buf *buf;
struct sg_table *sgt;
int ret;
int num_pages;
- DEFINE_DMA_ATTRS(attrs);
- dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
-
- if (WARN_ON(alloc_ctx == NULL))
+ if (WARN_ON(dev == NULL))
return NULL;
buf = kzalloc(sizeof *buf, GFP_KERNEL);
if (!buf)
@@ -140,7 +133,7 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size,
goto fail_table_alloc;
/* Prevent the device from being released while the buffer is used */
- buf->dev = get_device(conf->dev);
+ buf->dev = get_device(dev);
sgt = &buf->sg_table;
/*
@@ -148,7 +141,7 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size,
* prepare() memop is called.
*/
sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir, &attrs);
+ buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
if (!sgt->nents)
goto fail_map;
@@ -183,13 +176,10 @@ static void vb2_dma_sg_put(void *buf_priv)
int i = buf->num_pages;
if (atomic_dec_and_test(&buf->refcount)) {
- DEFINE_DMA_ATTRS(attrs);
-
- dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
buf->num_pages);
dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir, &attrs);
+ buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
if (buf->vaddr)
vm_unmap_ram(buf->vaddr, buf->num_pages);
sg_free_table(buf->dma_sgt);
@@ -226,23 +216,20 @@ static void vb2_dma_sg_finish(void *buf_priv)
dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
}
-static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
+static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,
unsigned long size,
enum dma_data_direction dma_dir)
{
- struct vb2_dma_sg_conf *conf = alloc_ctx;
struct vb2_dma_sg_buf *buf;
struct sg_table *sgt;
- DEFINE_DMA_ATTRS(attrs);
struct frame_vector *vec;
- dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
buf = kzalloc(sizeof *buf, GFP_KERNEL);
if (!buf)
return NULL;
buf->vaddr = NULL;
- buf->dev = conf->dev;
+ buf->dev = dev;
buf->dma_dir = dma_dir;
buf->offset = vaddr & ~PAGE_MASK;
buf->size = size;
@@ -267,7 +254,7 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
* prepare() memop is called.
*/
sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir, &attrs);
+ buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
if (!sgt->nents)
goto userptr_fail_map;
@@ -291,14 +278,11 @@ static void vb2_dma_sg_put_userptr(void *buf_priv)
struct vb2_dma_sg_buf *buf = buf_priv;
struct sg_table *sgt = &buf->sg_table;
int i = buf->num_pages;
- DEFINE_DMA_ATTRS(attrs);
-
- dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
__func__, buf->num_pages);
dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir,
- &attrs);
+ DMA_ATTR_SKIP_CPU_SYNC);
if (buf->vaddr)
vm_unmap_ram(buf->vaddr, buf->num_pages);
sg_free_table(buf->dma_sgt);
@@ -616,10 +600,9 @@ static void vb2_dma_sg_detach_dmabuf(void *mem_priv)
kfree(buf);
}
-static void *vb2_dma_sg_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
+static void *vb2_dma_sg_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
unsigned long size, enum dma_data_direction dma_dir)
{
- struct vb2_dma_sg_conf *conf = alloc_ctx;
struct vb2_dma_sg_buf *buf;
struct dma_buf_attachment *dba;
@@ -630,7 +613,7 @@ static void *vb2_dma_sg_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
if (!buf)
return ERR_PTR(-ENOMEM);
- buf->dev = conf->dev;
+ buf->dev = dev;
/* create attachment for the dmabuf with the user device */
dba = dma_buf_attach(dbuf, buf->dev);
if (IS_ERR(dba)) {
@@ -672,27 +655,6 @@ const struct vb2_mem_ops vb2_dma_sg_memops = {
};
EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
-void *vb2_dma_sg_init_ctx(struct device *dev)
-{
- struct vb2_dma_sg_conf *conf;
-
- conf = kzalloc(sizeof(*conf), GFP_KERNEL);
- if (!conf)
- return ERR_PTR(-ENOMEM);
-
- conf->dev = dev;
-
- return conf;
-}
-EXPORT_SYMBOL_GPL(vb2_dma_sg_init_ctx);
-
-void vb2_dma_sg_cleanup_ctx(void *alloc_ctx)
-{
- if (!IS_ERR_OR_NULL(alloc_ctx))
- kfree(alloc_ctx);
-}
-EXPORT_SYMBOL_GPL(vb2_dma_sg_cleanup_ctx);
-
MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
MODULE_AUTHOR("Andrzej Pietrasiewicz");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
index 7f366f1b0..9cfbb6e4b 100644
--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
+++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
@@ -427,7 +427,7 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb,
if (V4L2_TYPE_IS_OUTPUT(b->type)) {
/*
* For output buffers mask out the timecode flag:
- * this will be handled later in vb2_internal_qbuf().
+ * this will be handled later in vb2_qbuf().
* The 'field' is valid metadata for this output buffer
* and so that needs to be copied here.
*/
@@ -586,13 +586,6 @@ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
}
EXPORT_SYMBOL_GPL(vb2_create_bufs);
-static int vb2_internal_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
-{
- int ret = vb2_queue_or_prepare_buf(q, b, "qbuf");
-
- return ret ? ret : vb2_core_qbuf(q, b->index, b);
-}
-
/**
* vb2_qbuf() - Queue a buffer from userspace
* @q: videobuf2 queue
@@ -612,30 +605,18 @@ static int vb2_internal_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
*/
int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
{
+ int ret;
+
if (vb2_fileio_is_active(q)) {
dprintk(1, "file io in progress\n");
return -EBUSY;
}
- return vb2_internal_qbuf(q, b);
+ ret = vb2_queue_or_prepare_buf(q, b, "qbuf");
+ return ret ? ret : vb2_core_qbuf(q, b->index, b);
}
EXPORT_SYMBOL_GPL(vb2_qbuf);
-static int vb2_internal_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b,
- bool nonblocking)
-{
- int ret;
-
- if (b->type != q->type) {
- dprintk(1, "invalid buffer type\n");
- return -EINVAL;
- }
-
- ret = vb2_core_dqbuf(q, NULL, b, nonblocking);
-
- return ret;
-}
-
/**
* vb2_dqbuf() - Dequeue a buffer to the userspace
* @q: videobuf2 queue
@@ -659,11 +640,27 @@ static int vb2_internal_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b,
*/
int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
{
+ int ret;
+
if (vb2_fileio_is_active(q)) {
dprintk(1, "file io in progress\n");
return -EBUSY;
}
- return vb2_internal_dqbuf(q, b, nonblocking);
+
+ if (b->type != q->type) {
+ dprintk(1, "invalid buffer type\n");
+ return -EINVAL;
+ }
+
+ ret = vb2_core_dqbuf(q, NULL, b, nonblocking);
+
+ /*
+ * After calling the VIDIOC_DQBUF V4L2_BUF_FLAG_DONE must be
+ * cleared.
+ */
+ b->flags &= ~V4L2_BUF_FLAG_DONE;
+
+ return ret;
}
EXPORT_SYMBOL_GPL(vb2_dqbuf);
diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c
index 1c302743a..c2820a6e1 100644
--- a/drivers/media/v4l2-core/videobuf2-vmalloc.c
+++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c
@@ -33,8 +33,9 @@ struct vb2_vmalloc_buf {
static void vb2_vmalloc_put(void *buf_priv);
-static void *vb2_vmalloc_alloc(void *alloc_ctx, unsigned long size,
- enum dma_data_direction dma_dir, gfp_t gfp_flags)
+static void *vb2_vmalloc_alloc(struct device *dev, unsigned long attrs,
+ unsigned long size, enum dma_data_direction dma_dir,
+ gfp_t gfp_flags)
{
struct vb2_vmalloc_buf *buf;
@@ -69,7 +70,7 @@ static void vb2_vmalloc_put(void *buf_priv)
}
}
-static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
+static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr,
unsigned long size,
enum dma_data_direction dma_dir)
{
@@ -403,7 +404,7 @@ static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
kfree(buf);
}
-static void *vb2_vmalloc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
+static void *vb2_vmalloc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
unsigned long size, enum dma_data_direction dma_dir)
{
struct vb2_vmalloc_buf *buf;
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index 81ddb1757..4b4c0c3c3 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -25,6 +25,17 @@ config ATMEL_SDRAMC
Starting with the at91sam9g45, this controller supports SDR, DDR and
LP-DDR memories.
+config ATMEL_EBI
+ bool "Atmel EBI driver"
+ default y
+ depends on ARCH_AT91 && OF
+ select MFD_SYSCON
+ help
+ Driver for Atmel EBI controller.
+ Used to configure the EBI (external bus interface) when the device-
+ tree is used. This bus supports NANDs, external ethernet controller,
+ SRAMs, ATA devices, etc.
+
config TI_AEMIF
tristate "Texas Instruments AEMIF driver"
depends on (ARCH_DAVINCI || ARCH_KEYSTONE) && OF
@@ -104,7 +115,7 @@ config FSL_CORENET_CF
config FSL_IFC
bool
- depends on FSL_SOC
+ depends on FSL_SOC || ARCH_LAYERSCAPE
config JZ4780_NEMC
bool "Ingenic JZ4780 SoC NEMC driver"
diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile
index cb0b7a1df..b20ae38b5 100644
--- a/drivers/memory/Makefile
+++ b/drivers/memory/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_OF) += of_memory.o
endif
obj-$(CONFIG_ARM_PL172_MPMC) += pl172.o
obj-$(CONFIG_ATMEL_SDRAMC) += atmel-sdramc.o
+obj-$(CONFIG_ATMEL_EBI) += atmel-ebi.o
obj-$(CONFIG_TI_AEMIF) += ti-aemif.o
obj-$(CONFIG_TI_EMIF) += emif.o
obj-$(CONFIG_OMAP_GPMC) += omap-gpmc.o
diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c
new file mode 100644
index 000000000..f87ad6f5d
--- /dev/null
+++ b/drivers/memory/atmel-ebi.c
@@ -0,0 +1,766 @@
+/*
+ * EBI driver for Atmel chips
+ * inspired by the fsl weim bus driver
+ *
+ * Copyright (C) 2013 Jean-Jacques Hiblot <jjhiblot@traphandler.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/atmel-matrix.h>
+#include <linux/mfd/syscon/atmel-smc.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+struct at91sam9_smc_timings {
+ u32 ncs_rd_setup_ns;
+ u32 nrd_setup_ns;
+ u32 ncs_wr_setup_ns;
+ u32 nwe_setup_ns;
+ u32 ncs_rd_pulse_ns;
+ u32 nrd_pulse_ns;
+ u32 ncs_wr_pulse_ns;
+ u32 nwe_pulse_ns;
+ u32 nrd_cycle_ns;
+ u32 nwe_cycle_ns;
+ u32 tdf_ns;
+};
+
+struct at91sam9_smc_generic_fields {
+ struct regmap_field *setup;
+ struct regmap_field *pulse;
+ struct regmap_field *cycle;
+ struct regmap_field *mode;
+};
+
+struct at91sam9_ebi_dev_config {
+ struct at91sam9_smc_timings timings;
+ u32 mode;
+};
+
+struct at91_ebi_dev_config {
+ int cs;
+ union {
+ struct at91sam9_ebi_dev_config sam9;
+ };
+};
+
+struct at91_ebi;
+
+struct at91_ebi_dev {
+ struct list_head node;
+ struct at91_ebi *ebi;
+ u32 mode;
+ int numcs;
+ struct at91_ebi_dev_config configs[];
+};
+
+struct at91_ebi_caps {
+ unsigned int available_cs;
+ const struct reg_field *ebi_csa;
+ void (*get_config)(struct at91_ebi_dev *ebid,
+ struct at91_ebi_dev_config *conf);
+ int (*xlate_config)(struct at91_ebi_dev *ebid,
+ struct device_node *configs_np,
+ struct at91_ebi_dev_config *conf);
+ int (*apply_config)(struct at91_ebi_dev *ebid,
+ struct at91_ebi_dev_config *conf);
+ int (*init)(struct at91_ebi *ebi);
+};
+
+struct at91_ebi {
+ struct clk *clk;
+ struct regmap *smc;
+ struct regmap *matrix;
+
+ struct regmap_field *ebi_csa;
+
+ struct device *dev;
+ const struct at91_ebi_caps *caps;
+ struct list_head devs;
+ union {
+ struct at91sam9_smc_generic_fields sam9;
+ };
+};
+
+static void at91sam9_ebi_get_config(struct at91_ebi_dev *ebid,
+ struct at91_ebi_dev_config *conf)
+{
+ struct at91sam9_smc_generic_fields *fields = &ebid->ebi->sam9;
+ unsigned int clk_rate = clk_get_rate(ebid->ebi->clk);
+ struct at91sam9_ebi_dev_config *config = &conf->sam9;
+ struct at91sam9_smc_timings *timings = &config->timings;
+ unsigned int val;
+
+ regmap_fields_read(fields->mode, conf->cs, &val);
+ config->mode = val & ~AT91_SMC_TDF;
+
+ val = (val & AT91_SMC_TDF) >> 16;
+ timings->tdf_ns = clk_rate * val;
+
+ regmap_fields_read(fields->setup, conf->cs, &val);
+ timings->ncs_rd_setup_ns = (val >> 24) & 0x1f;
+ timings->ncs_rd_setup_ns += ((val >> 29) & 0x1) * 128;
+ timings->ncs_rd_setup_ns *= clk_rate;
+ timings->nrd_setup_ns = (val >> 16) & 0x1f;
+ timings->nrd_setup_ns += ((val >> 21) & 0x1) * 128;
+ timings->nrd_setup_ns *= clk_rate;
+ timings->ncs_wr_setup_ns = (val >> 8) & 0x1f;
+ timings->ncs_wr_setup_ns += ((val >> 13) & 0x1) * 128;
+ timings->ncs_wr_setup_ns *= clk_rate;
+ timings->nwe_setup_ns = val & 0x1f;
+ timings->nwe_setup_ns += ((val >> 5) & 0x1) * 128;
+ timings->nwe_setup_ns *= clk_rate;
+
+ regmap_fields_read(fields->pulse, conf->cs, &val);
+ timings->ncs_rd_pulse_ns = (val >> 24) & 0x3f;
+ timings->ncs_rd_pulse_ns += ((val >> 30) & 0x1) * 256;
+ timings->ncs_rd_pulse_ns *= clk_rate;
+ timings->nrd_pulse_ns = (val >> 16) & 0x3f;
+ timings->nrd_pulse_ns += ((val >> 22) & 0x1) * 256;
+ timings->nrd_pulse_ns *= clk_rate;
+ timings->ncs_wr_pulse_ns = (val >> 8) & 0x3f;
+ timings->ncs_wr_pulse_ns += ((val >> 14) & 0x1) * 256;
+ timings->ncs_wr_pulse_ns *= clk_rate;
+ timings->nwe_pulse_ns = val & 0x3f;
+ timings->nwe_pulse_ns += ((val >> 6) & 0x1) * 256;
+ timings->nwe_pulse_ns *= clk_rate;
+
+ regmap_fields_read(fields->cycle, conf->cs, &val);
+ timings->nrd_cycle_ns = (val >> 16) & 0x7f;
+ timings->nrd_cycle_ns += ((val >> 23) & 0x3) * 256;
+ timings->nrd_cycle_ns *= clk_rate;
+ timings->nwe_cycle_ns = val & 0x7f;
+ timings->nwe_cycle_ns += ((val >> 7) & 0x3) * 256;
+ timings->nwe_cycle_ns *= clk_rate;
+}
+
+static int at91_xlate_timing(struct device_node *np, const char *prop,
+ u32 *val, bool *required)
+{
+ if (!of_property_read_u32(np, prop, val)) {
+ *required = true;
+ return 0;
+ }
+
+ if (*required)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int at91sam9_smc_xslate_timings(struct at91_ebi_dev *ebid,
+ struct device_node *np,
+ struct at91sam9_smc_timings *timings,
+ bool *required)
+{
+ int ret;
+
+ ret = at91_xlate_timing(np, "atmel,smc-ncs-rd-setup-ns",
+ &timings->ncs_rd_setup_ns, required);
+ if (ret)
+ goto out;
+
+ ret = at91_xlate_timing(np, "atmel,smc-nrd-setup-ns",
+ &timings->nrd_setup_ns, required);
+ if (ret)
+ goto out;
+
+ ret = at91_xlate_timing(np, "atmel,smc-ncs-wr-setup-ns",
+ &timings->ncs_wr_setup_ns, required);
+ if (ret)
+ goto out;
+
+ ret = at91_xlate_timing(np, "atmel,smc-nwe-setup-ns",
+ &timings->nwe_setup_ns, required);
+ if (ret)
+ goto out;
+
+ ret = at91_xlate_timing(np, "atmel,smc-ncs-rd-pulse-ns",
+ &timings->ncs_rd_pulse_ns, required);
+ if (ret)
+ goto out;
+
+ ret = at91_xlate_timing(np, "atmel,smc-nrd-pulse-ns",
+ &timings->nrd_pulse_ns, required);
+ if (ret)
+ goto out;
+
+ ret = at91_xlate_timing(np, "atmel,smc-ncs-wr-pulse-ns",
+ &timings->ncs_wr_pulse_ns, required);
+ if (ret)
+ goto out;
+
+ ret = at91_xlate_timing(np, "atmel,smc-nwe-pulse-ns",
+ &timings->nwe_pulse_ns, required);
+ if (ret)
+ goto out;
+
+ ret = at91_xlate_timing(np, "atmel,smc-nwe-cycle-ns",
+ &timings->nwe_cycle_ns, required);
+ if (ret)
+ goto out;
+
+ ret = at91_xlate_timing(np, "atmel,smc-nrd-cycle-ns",
+ &timings->nrd_cycle_ns, required);
+ if (ret)
+ goto out;
+
+ ret = at91_xlate_timing(np, "atmel,smc-tdf-ns",
+ &timings->tdf_ns, required);
+
+out:
+ if (ret)
+ dev_err(ebid->ebi->dev,
+ "missing or invalid timings definition in %s",
+ np->full_name);
+
+ return ret;
+}
+
+static int at91sam9_ebi_xslate_config(struct at91_ebi_dev *ebid,
+ struct device_node *np,
+ struct at91_ebi_dev_config *conf)
+{
+ struct at91sam9_ebi_dev_config *config = &conf->sam9;
+ bool required = false;
+ const char *tmp_str;
+ u32 tmp;
+ int ret;
+
+ ret = of_property_read_u32(np, "atmel,smc-bus-width", &tmp);
+ if (!ret) {
+ switch (tmp) {
+ case 8:
+ config->mode |= AT91_SMC_DBW_8;
+ break;
+
+ case 16:
+ config->mode |= AT91_SMC_DBW_16;
+ break;
+
+ case 32:
+ config->mode |= AT91_SMC_DBW_32;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ required = true;
+ }
+
+ if (of_property_read_bool(np, "atmel,smc-tdf-optimized")) {
+ config->mode |= AT91_SMC_TDFMODE_OPTIMIZED;
+ required = true;
+ }
+
+ tmp_str = NULL;
+ of_property_read_string(np, "atmel,smc-byte-access-type", &tmp_str);
+ if (tmp_str && !strcmp(tmp_str, "write")) {
+ config->mode |= AT91_SMC_BAT_WRITE;
+ required = true;
+ }
+
+ tmp_str = NULL;
+ of_property_read_string(np, "atmel,smc-read-mode", &tmp_str);
+ if (tmp_str && !strcmp(tmp_str, "nrd")) {
+ config->mode |= AT91_SMC_READMODE_NRD;
+ required = true;
+ }
+
+ tmp_str = NULL;
+ of_property_read_string(np, "atmel,smc-write-mode", &tmp_str);
+ if (tmp_str && !strcmp(tmp_str, "nwe")) {
+ config->mode |= AT91_SMC_WRITEMODE_NWE;
+ required = true;
+ }
+
+ tmp_str = NULL;
+ of_property_read_string(np, "atmel,smc-exnw-mode", &tmp_str);
+ if (tmp_str) {
+ if (!strcmp(tmp_str, "frozen"))
+ config->mode |= AT91_SMC_EXNWMODE_FROZEN;
+ else if (!strcmp(tmp_str, "ready"))
+ config->mode |= AT91_SMC_EXNWMODE_READY;
+ else if (strcmp(tmp_str, "disabled"))
+ return -EINVAL;
+
+ required = true;
+ }
+
+ ret = of_property_read_u32(np, "atmel,smc-page-mode", &tmp);
+ if (!ret) {
+ switch (tmp) {
+ case 4:
+ config->mode |= AT91_SMC_PS_4;
+ break;
+
+ case 8:
+ config->mode |= AT91_SMC_PS_8;
+ break;
+
+ case 16:
+ config->mode |= AT91_SMC_PS_16;
+ break;
+
+ case 32:
+ config->mode |= AT91_SMC_PS_32;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ config->mode |= AT91_SMC_PMEN;
+ required = true;
+ }
+
+ ret = at91sam9_smc_xslate_timings(ebid, np, &config->timings,
+ &required);
+ if (ret)
+ return ret;
+
+ return required;
+}
+
+static int at91sam9_ebi_apply_config(struct at91_ebi_dev *ebid,
+ struct at91_ebi_dev_config *conf)
+{
+ unsigned int clk_rate = clk_get_rate(ebid->ebi->clk);
+ struct at91sam9_ebi_dev_config *config = &conf->sam9;
+ struct at91sam9_smc_timings *timings = &config->timings;
+ struct at91sam9_smc_generic_fields *fields = &ebid->ebi->sam9;
+ u32 coded_val;
+ u32 val;
+
+ coded_val = at91sam9_smc_setup_ns_to_cycles(clk_rate,
+ timings->ncs_rd_setup_ns);
+ val = AT91SAM9_SMC_NCS_NRDSETUP(coded_val);
+ coded_val = at91sam9_smc_setup_ns_to_cycles(clk_rate,
+ timings->nrd_setup_ns);
+ val |= AT91SAM9_SMC_NRDSETUP(coded_val);
+ coded_val = at91sam9_smc_setup_ns_to_cycles(clk_rate,
+ timings->ncs_wr_setup_ns);
+ val |= AT91SAM9_SMC_NCS_WRSETUP(coded_val);
+ coded_val = at91sam9_smc_setup_ns_to_cycles(clk_rate,
+ timings->nwe_setup_ns);
+ val |= AT91SAM9_SMC_NWESETUP(coded_val);
+ regmap_fields_write(fields->setup, conf->cs, val);
+
+ coded_val = at91sam9_smc_pulse_ns_to_cycles(clk_rate,
+ timings->ncs_rd_pulse_ns);
+ val = AT91SAM9_SMC_NCS_NRDPULSE(coded_val);
+ coded_val = at91sam9_smc_pulse_ns_to_cycles(clk_rate,
+ timings->nrd_pulse_ns);
+ val |= AT91SAM9_SMC_NRDPULSE(coded_val);
+ coded_val = at91sam9_smc_pulse_ns_to_cycles(clk_rate,
+ timings->ncs_wr_pulse_ns);
+ val |= AT91SAM9_SMC_NCS_WRPULSE(coded_val);
+ coded_val = at91sam9_smc_pulse_ns_to_cycles(clk_rate,
+ timings->nwe_pulse_ns);
+ val |= AT91SAM9_SMC_NWEPULSE(coded_val);
+ regmap_fields_write(fields->pulse, conf->cs, val);
+
+ coded_val = at91sam9_smc_cycle_ns_to_cycles(clk_rate,
+ timings->nrd_cycle_ns);
+ val = AT91SAM9_SMC_NRDCYCLE(coded_val);
+ coded_val = at91sam9_smc_cycle_ns_to_cycles(clk_rate,
+ timings->nwe_cycle_ns);
+ val |= AT91SAM9_SMC_NWECYCLE(coded_val);
+ regmap_fields_write(fields->cycle, conf->cs, val);
+
+ val = DIV_ROUND_UP(timings->tdf_ns, clk_rate);
+ if (val > AT91_SMC_TDF_MAX)
+ val = AT91_SMC_TDF_MAX;
+ regmap_fields_write(fields->mode, conf->cs,
+ config->mode | AT91_SMC_TDF_(val));
+
+ return 0;
+}
+
+static int at91sam9_ebi_init(struct at91_ebi *ebi)
+{
+ struct at91sam9_smc_generic_fields *fields = &ebi->sam9;
+ struct reg_field field = REG_FIELD(0, 0, 31);
+
+ field.id_size = fls(ebi->caps->available_cs);
+ field.id_offset = AT91SAM9_SMC_GENERIC_BLK_SZ;
+
+ field.reg = AT91SAM9_SMC_SETUP(AT91SAM9_SMC_GENERIC);
+ fields->setup = devm_regmap_field_alloc(ebi->dev, ebi->smc, field);
+ if (IS_ERR(fields->setup))
+ return PTR_ERR(fields->setup);
+
+ field.reg = AT91SAM9_SMC_PULSE(AT91SAM9_SMC_GENERIC);
+ fields->pulse = devm_regmap_field_alloc(ebi->dev, ebi->smc, field);
+ if (IS_ERR(fields->pulse))
+ return PTR_ERR(fields->pulse);
+
+ field.reg = AT91SAM9_SMC_CYCLE(AT91SAM9_SMC_GENERIC);
+ fields->cycle = devm_regmap_field_alloc(ebi->dev, ebi->smc, field);
+ if (IS_ERR(fields->cycle))
+ return PTR_ERR(fields->cycle);
+
+ field.reg = AT91SAM9_SMC_MODE(AT91SAM9_SMC_GENERIC);
+ fields->mode = devm_regmap_field_alloc(ebi->dev, ebi->smc, field);
+ if (IS_ERR(fields->mode))
+ return PTR_ERR(fields->mode);
+
+ return 0;
+}
+
+static int sama5d3_ebi_init(struct at91_ebi *ebi)
+{
+ struct at91sam9_smc_generic_fields *fields = &ebi->sam9;
+ struct reg_field field = REG_FIELD(0, 0, 31);
+
+ field.id_size = fls(ebi->caps->available_cs);
+ field.id_offset = SAMA5_SMC_GENERIC_BLK_SZ;
+
+ field.reg = AT91SAM9_SMC_SETUP(SAMA5_SMC_GENERIC);
+ fields->setup = devm_regmap_field_alloc(ebi->dev, ebi->smc, field);
+ if (IS_ERR(fields->setup))
+ return PTR_ERR(fields->setup);
+
+ field.reg = AT91SAM9_SMC_PULSE(SAMA5_SMC_GENERIC);
+ fields->pulse = devm_regmap_field_alloc(ebi->dev, ebi->smc, field);
+ if (IS_ERR(fields->pulse))
+ return PTR_ERR(fields->pulse);
+
+ field.reg = AT91SAM9_SMC_CYCLE(SAMA5_SMC_GENERIC);
+ fields->cycle = devm_regmap_field_alloc(ebi->dev, ebi->smc, field);
+ if (IS_ERR(fields->cycle))
+ return PTR_ERR(fields->cycle);
+
+ field.reg = SAMA5_SMC_MODE(SAMA5_SMC_GENERIC);
+ fields->mode = devm_regmap_field_alloc(ebi->dev, ebi->smc, field);
+ if (IS_ERR(fields->mode))
+ return PTR_ERR(fields->mode);
+
+ return 0;
+}
+
+static int at91_ebi_dev_setup(struct at91_ebi *ebi, struct device_node *np,
+ int reg_cells)
+{
+ const struct at91_ebi_caps *caps = ebi->caps;
+ struct at91_ebi_dev_config conf = { };
+ struct device *dev = ebi->dev;
+ struct at91_ebi_dev *ebid;
+ int ret, numcs = 0, i;
+ bool apply = false;
+
+ numcs = of_property_count_elems_of_size(np, "reg",
+ reg_cells * sizeof(u32));
+ if (numcs <= 0) {
+ dev_err(dev, "invalid reg property in %s\n", np->full_name);
+ return -EINVAL;
+ }
+
+ ebid = devm_kzalloc(ebi->dev,
+ sizeof(*ebid) + (numcs * sizeof(*ebid->configs)),
+ GFP_KERNEL);
+ if (!ebid)
+ return -ENOMEM;
+
+ ebid->ebi = ebi;
+
+ ret = caps->xlate_config(ebid, np, &conf);
+ if (ret < 0)
+ return ret;
+ else if (ret)
+ apply = true;
+
+ for (i = 0; i < numcs; i++) {
+ u32 cs;
+
+ ret = of_property_read_u32_index(np, "reg", i * reg_cells,
+ &cs);
+ if (ret)
+ return ret;
+
+ if (cs > AT91_MATRIX_EBI_NUM_CS ||
+ !(ebi->caps->available_cs & BIT(cs))) {
+ dev_err(dev, "invalid reg property in %s\n",
+ np->full_name);
+ return -EINVAL;
+ }
+
+ ebid->configs[i].cs = cs;
+
+ if (apply) {
+ conf.cs = cs;
+ ret = caps->apply_config(ebid, &conf);
+ if (ret)
+ return ret;
+ }
+
+ caps->get_config(ebid, &ebid->configs[i]);
+
+ /*
+ * Attach the EBI device to the generic SMC logic if at least
+ * one "atmel,smc-" property is present.
+ */
+ if (ebi->ebi_csa && ret)
+ regmap_field_update_bits(ebi->ebi_csa,
+ BIT(cs), 0);
+ }
+
+ list_add_tail(&ebid->node, &ebi->devs);
+
+ return 0;
+}
+
+static const struct reg_field at91sam9260_ebi_csa =
+ REG_FIELD(AT91SAM9260_MATRIX_EBICSA, 0,
+ AT91_MATRIX_EBI_NUM_CS - 1);
+
+static const struct at91_ebi_caps at91sam9260_ebi_caps = {
+ .available_cs = 0xff,
+ .ebi_csa = &at91sam9260_ebi_csa,
+ .get_config = at91sam9_ebi_get_config,
+ .xlate_config = at91sam9_ebi_xslate_config,
+ .apply_config = at91sam9_ebi_apply_config,
+ .init = at91sam9_ebi_init,
+};
+
+static const struct reg_field at91sam9261_ebi_csa =
+ REG_FIELD(AT91SAM9261_MATRIX_EBICSA, 0,
+ AT91_MATRIX_EBI_NUM_CS - 1);
+
+static const struct at91_ebi_caps at91sam9261_ebi_caps = {
+ .available_cs = 0xff,
+ .ebi_csa = &at91sam9261_ebi_csa,
+ .get_config = at91sam9_ebi_get_config,
+ .xlate_config = at91sam9_ebi_xslate_config,
+ .apply_config = at91sam9_ebi_apply_config,
+ .init = at91sam9_ebi_init,
+};
+
+static const struct reg_field at91sam9263_ebi0_csa =
+ REG_FIELD(AT91SAM9263_MATRIX_EBI0CSA, 0,
+ AT91_MATRIX_EBI_NUM_CS - 1);
+
+static const struct at91_ebi_caps at91sam9263_ebi0_caps = {
+ .available_cs = 0x3f,
+ .ebi_csa = &at91sam9263_ebi0_csa,
+ .get_config = at91sam9_ebi_get_config,
+ .xlate_config = at91sam9_ebi_xslate_config,
+ .apply_config = at91sam9_ebi_apply_config,
+ .init = at91sam9_ebi_init,
+};
+
+static const struct reg_field at91sam9263_ebi1_csa =
+ REG_FIELD(AT91SAM9263_MATRIX_EBI1CSA, 0,
+ AT91_MATRIX_EBI_NUM_CS - 1);
+
+static const struct at91_ebi_caps at91sam9263_ebi1_caps = {
+ .available_cs = 0x7,
+ .ebi_csa = &at91sam9263_ebi1_csa,
+ .get_config = at91sam9_ebi_get_config,
+ .xlate_config = at91sam9_ebi_xslate_config,
+ .apply_config = at91sam9_ebi_apply_config,
+ .init = at91sam9_ebi_init,
+};
+
+static const struct reg_field at91sam9rl_ebi_csa =
+ REG_FIELD(AT91SAM9RL_MATRIX_EBICSA, 0,
+ AT91_MATRIX_EBI_NUM_CS - 1);
+
+static const struct at91_ebi_caps at91sam9rl_ebi_caps = {
+ .available_cs = 0x3f,
+ .ebi_csa = &at91sam9rl_ebi_csa,
+ .get_config = at91sam9_ebi_get_config,
+ .xlate_config = at91sam9_ebi_xslate_config,
+ .apply_config = at91sam9_ebi_apply_config,
+ .init = at91sam9_ebi_init,
+};
+
+static const struct reg_field at91sam9g45_ebi_csa =
+ REG_FIELD(AT91SAM9G45_MATRIX_EBICSA, 0,
+ AT91_MATRIX_EBI_NUM_CS - 1);
+
+static const struct at91_ebi_caps at91sam9g45_ebi_caps = {
+ .available_cs = 0x3f,
+ .ebi_csa = &at91sam9g45_ebi_csa,
+ .get_config = at91sam9_ebi_get_config,
+ .xlate_config = at91sam9_ebi_xslate_config,
+ .apply_config = at91sam9_ebi_apply_config,
+ .init = at91sam9_ebi_init,
+};
+
+static const struct at91_ebi_caps at91sam9x5_ebi_caps = {
+ .available_cs = 0x3f,
+ .ebi_csa = &at91sam9263_ebi0_csa,
+ .get_config = at91sam9_ebi_get_config,
+ .xlate_config = at91sam9_ebi_xslate_config,
+ .apply_config = at91sam9_ebi_apply_config,
+ .init = at91sam9_ebi_init,
+};
+
+static const struct at91_ebi_caps sama5d3_ebi_caps = {
+ .available_cs = 0xf,
+ .get_config = at91sam9_ebi_get_config,
+ .xlate_config = at91sam9_ebi_xslate_config,
+ .apply_config = at91sam9_ebi_apply_config,
+ .init = sama5d3_ebi_init,
+};
+
+static const struct of_device_id at91_ebi_id_table[] = {
+ {
+ .compatible = "atmel,at91sam9260-ebi",
+ .data = &at91sam9260_ebi_caps,
+ },
+ {
+ .compatible = "atmel,at91sam9261-ebi",
+ .data = &at91sam9261_ebi_caps,
+ },
+ {
+ .compatible = "atmel,at91sam9263-ebi0",
+ .data = &at91sam9263_ebi0_caps,
+ },
+ {
+ .compatible = "atmel,at91sam9263-ebi1",
+ .data = &at91sam9263_ebi1_caps,
+ },
+ {
+ .compatible = "atmel,at91sam9rl-ebi",
+ .data = &at91sam9rl_ebi_caps,
+ },
+ {
+ .compatible = "atmel,at91sam9g45-ebi",
+ .data = &at91sam9g45_ebi_caps,
+ },
+ {
+ .compatible = "atmel,at91sam9x5-ebi",
+ .data = &at91sam9x5_ebi_caps,
+ },
+ {
+ .compatible = "atmel,sama5d3-ebi",
+ .data = &sama5d3_ebi_caps,
+ },
+ { /* sentinel */ }
+};
+
+static int at91_ebi_dev_disable(struct at91_ebi *ebi, struct device_node *np)
+{
+ struct device *dev = ebi->dev;
+ struct property *newprop;
+
+ newprop = devm_kzalloc(dev, sizeof(*newprop), GFP_KERNEL);
+ if (!newprop)
+ return -ENOMEM;
+
+ newprop->name = devm_kstrdup(dev, "status", GFP_KERNEL);
+ if (!newprop->name)
+ return -ENOMEM;
+
+ newprop->value = devm_kstrdup(dev, "disabled", GFP_KERNEL);
+ if (!newprop->name)
+ return -ENOMEM;
+
+ newprop->length = sizeof("disabled");
+
+ return of_update_property(np, newprop);
+}
+
+static int at91_ebi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *child, *np = dev->of_node;
+ const struct of_device_id *match;
+ struct at91_ebi *ebi;
+ int ret, reg_cells;
+ struct clk *clk;
+ u32 val;
+
+ match = of_match_device(at91_ebi_id_table, dev);
+ if (!match || !match->data)
+ return -EINVAL;
+
+ ebi = devm_kzalloc(dev, sizeof(*ebi), GFP_KERNEL);
+ if (!ebi)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ebi->devs);
+ ebi->caps = match->data;
+ ebi->dev = dev;
+
+ clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ebi->clk = clk;
+
+ ebi->smc = syscon_regmap_lookup_by_phandle(np, "atmel,smc");
+ if (IS_ERR(ebi->smc))
+ return PTR_ERR(ebi->smc);
+
+ /*
+ * The sama5d3 does not provide an EBICSA register and thus does need
+ * to access the matrix registers.
+ */
+ if (ebi->caps->ebi_csa) {
+ ebi->matrix =
+ syscon_regmap_lookup_by_phandle(np, "atmel,matrix");
+ if (IS_ERR(ebi->matrix))
+ return PTR_ERR(ebi->matrix);
+
+ ebi->ebi_csa = regmap_field_alloc(ebi->matrix,
+ *ebi->caps->ebi_csa);
+ if (IS_ERR(ebi->ebi_csa))
+ return PTR_ERR(ebi->ebi_csa);
+ }
+
+ ret = ebi->caps->init(ebi);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(np, "#address-cells", &val);
+ if (ret) {
+ dev_err(dev, "missing #address-cells property\n");
+ return ret;
+ }
+
+ reg_cells = val;
+
+ ret = of_property_read_u32(np, "#size-cells", &val);
+ if (ret) {
+ dev_err(dev, "missing #address-cells property\n");
+ return ret;
+ }
+
+ reg_cells += val;
+
+ for_each_available_child_of_node(np, child) {
+ if (!of_find_property(child, "reg", NULL))
+ continue;
+
+ ret = at91_ebi_dev_setup(ebi, child, reg_cells);
+ if (ret) {
+ dev_err(dev, "failed to configure EBI bus for %s, disabling the device",
+ child->full_name);
+
+ ret = at91_ebi_dev_disable(ebi, child);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return of_platform_populate(np, NULL, NULL, dev);
+}
+
+static struct platform_driver at91_ebi_driver = {
+ .driver = {
+ .name = "atmel-ebi",
+ .of_match_table = at91_ebi_id_table,
+ },
+};
+builtin_platform_driver_probe(at91_ebi_driver, at91_ebi_probe);
diff --git a/drivers/memory/atmel-sdramc.c b/drivers/memory/atmel-sdramc.c
index a3ebc8a87..53a341f3b 100644
--- a/drivers/memory/atmel-sdramc.c
+++ b/drivers/memory/atmel-sdramc.c
@@ -1,6 +1,8 @@
/*
* Atmel (Multi-port DDR-)SDRAM Controller driver
*
+ * Author: Alexandre Belloni <alexandre.belloni@free-electrons.com>
+ *
* Copyright (C) 2014 Atmel
*
* This program is free software: you can redistribute it and/or modify
@@ -20,7 +22,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
@@ -48,7 +50,6 @@ static const struct of_device_id atmel_ramc_of_match[] = {
{ .compatible = "atmel,sama5d3-ddramc", .data = &sama5d3_caps, },
{},
};
-MODULE_DEVICE_TABLE(of, atmel_ramc_of_match);
static int atmel_ramc_probe(struct platform_device *pdev)
{
@@ -90,8 +91,4 @@ static int __init atmel_ramc_init(void)
{
return platform_driver_register(&atmel_ramc_driver);
}
-module_init(atmel_ramc_init);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Alexandre Belloni <alexandre.belloni@free-electrons.com>");
-MODULE_DESCRIPTION("Atmel (Multi-port DDR-)SDRAM Controller");
+device_initcall(atmel_ramc_init);
diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c
index 904b4af5f..1b182b117 100644
--- a/drivers/memory/fsl_ifc.c
+++ b/drivers/memory/fsl_ifc.c
@@ -31,7 +31,9 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/fsl_ifc.h>
-#include <asm/prom.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev;
EXPORT_SYMBOL(fsl_ifc_ctrl_dev);
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
index f6b575791..4afbc412f 100644
--- a/drivers/memory/mtk-smi.c
+++ b/drivers/memory/mtk-smi.c
@@ -21,19 +21,50 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <soc/mediatek/smi.h>
+#include <dt-bindings/memory/mt2701-larb-port.h>
#define SMI_LARB_MMU_EN 0xf00
+#define REG_SMI_SECUR_CON_BASE 0x5c0
+
+/* every register control 8 port, register offset 0x4 */
+#define REG_SMI_SECUR_CON_OFFSET(id) (((id) >> 3) << 2)
+#define REG_SMI_SECUR_CON_ADDR(id) \
+ (REG_SMI_SECUR_CON_BASE + REG_SMI_SECUR_CON_OFFSET(id))
+
+/*
+ * every port have 4 bit to control, bit[port + 3] control virtual or physical,
+ * bit[port + 2 : port + 1] control the domain, bit[port] control the security
+ * or non-security.
+ */
+#define SMI_SECUR_CON_VAL_MSK(id) (~(0xf << (((id) & 0x7) << 2)))
+#define SMI_SECUR_CON_VAL_VIRT(id) BIT((((id) & 0x7) << 2) + 3)
+/* mt2701 domain should be set to 3 */
+#define SMI_SECUR_CON_VAL_DOMAIN(id) (0x3 << ((((id) & 0x7) << 2) + 1))
+
+struct mtk_smi_larb_gen {
+ int port_in_larb[MTK_LARB_NR_MAX + 1];
+ void (*config_port)(struct device *);
+};
struct mtk_smi {
- struct device *dev;
- struct clk *clk_apb, *clk_smi;
+ struct device *dev;
+ struct clk *clk_apb, *clk_smi;
+ struct clk *clk_async; /*only needed by mt2701*/
+ void __iomem *smi_ao_base;
};
struct mtk_smi_larb { /* larb: local arbiter */
- struct mtk_smi smi;
- void __iomem *base;
- struct device *smi_common_dev;
- u32 *mmu;
+ struct mtk_smi smi;
+ void __iomem *base;
+ struct device *smi_common_dev;
+ const struct mtk_smi_larb_gen *larb_gen;
+ int larbid;
+ u32 *mmu;
+};
+
+enum mtk_smi_gen {
+ MTK_SMI_GEN1,
+ MTK_SMI_GEN2
};
static int mtk_smi_enable(const struct mtk_smi *smi)
@@ -71,6 +102,7 @@ static void mtk_smi_disable(const struct mtk_smi *smi)
int mtk_smi_larb_get(struct device *larbdev)
{
struct mtk_smi_larb *larb = dev_get_drvdata(larbdev);
+ const struct mtk_smi_larb_gen *larb_gen = larb->larb_gen;
struct mtk_smi *common = dev_get_drvdata(larb->smi_common_dev);
int ret;
@@ -87,7 +119,7 @@ int mtk_smi_larb_get(struct device *larbdev)
}
/* Configure the iommu info for this larb */
- writel(*larb->mmu, larb->base + SMI_LARB_MMU_EN);
+ larb_gen->config_port(larbdev);
return 0;
}
@@ -126,6 +158,45 @@ mtk_smi_larb_bind(struct device *dev, struct device *master, void *data)
return -ENODEV;
}
+static void mtk_smi_larb_config_port(struct device *dev)
+{
+ struct mtk_smi_larb *larb = dev_get_drvdata(dev);
+
+ writel(*larb->mmu, larb->base + SMI_LARB_MMU_EN);
+}
+
+
+static void mtk_smi_larb_config_port_gen1(struct device *dev)
+{
+ struct mtk_smi_larb *larb = dev_get_drvdata(dev);
+ const struct mtk_smi_larb_gen *larb_gen = larb->larb_gen;
+ struct mtk_smi *common = dev_get_drvdata(larb->smi_common_dev);
+ int i, m4u_port_id, larb_port_num;
+ u32 sec_con_val, reg_val;
+
+ m4u_port_id = larb_gen->port_in_larb[larb->larbid];
+ larb_port_num = larb_gen->port_in_larb[larb->larbid + 1]
+ - larb_gen->port_in_larb[larb->larbid];
+
+ for (i = 0; i < larb_port_num; i++, m4u_port_id++) {
+ if (*larb->mmu & BIT(i)) {
+ /* bit[port + 3] controls the virtual or physical */
+ sec_con_val = SMI_SECUR_CON_VAL_VIRT(m4u_port_id);
+ } else {
+ /* do not need to enable m4u for this port */
+ continue;
+ }
+ reg_val = readl(common->smi_ao_base
+ + REG_SMI_SECUR_CON_ADDR(m4u_port_id));
+ reg_val &= SMI_SECUR_CON_VAL_MSK(m4u_port_id);
+ reg_val |= sec_con_val;
+ reg_val |= SMI_SECUR_CON_VAL_DOMAIN(m4u_port_id);
+ writel(reg_val,
+ common->smi_ao_base
+ + REG_SMI_SECUR_CON_ADDR(m4u_port_id));
+ }
+}
+
static void
mtk_smi_larb_unbind(struct device *dev, struct device *master, void *data)
{
@@ -137,6 +208,31 @@ static const struct component_ops mtk_smi_larb_component_ops = {
.unbind = mtk_smi_larb_unbind,
};
+static const struct mtk_smi_larb_gen mtk_smi_larb_mt8173 = {
+ /* mt8173 do not need the port in larb */
+ .config_port = mtk_smi_larb_config_port,
+};
+
+static const struct mtk_smi_larb_gen mtk_smi_larb_mt2701 = {
+ .port_in_larb = {
+ LARB0_PORT_OFFSET, LARB1_PORT_OFFSET,
+ LARB2_PORT_OFFSET, LARB3_PORT_OFFSET
+ },
+ .config_port = mtk_smi_larb_config_port_gen1,
+};
+
+static const struct of_device_id mtk_smi_larb_of_ids[] = {
+ {
+ .compatible = "mediatek,mt8173-smi-larb",
+ .data = &mtk_smi_larb_mt8173
+ },
+ {
+ .compatible = "mediatek,mt2701-smi-larb",
+ .data = &mtk_smi_larb_mt2701
+ },
+ {}
+};
+
static int mtk_smi_larb_probe(struct platform_device *pdev)
{
struct mtk_smi_larb *larb;
@@ -144,14 +240,20 @@ static int mtk_smi_larb_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *smi_node;
struct platform_device *smi_pdev;
+ const struct of_device_id *of_id;
if (!dev->pm_domain)
return -EPROBE_DEFER;
+ of_id = of_match_node(mtk_smi_larb_of_ids, pdev->dev.of_node);
+ if (!of_id)
+ return -EINVAL;
+
larb = devm_kzalloc(dev, sizeof(*larb), GFP_KERNEL);
if (!larb)
return -ENOMEM;
+ larb->larb_gen = of_id->data;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
larb->base = devm_ioremap_resource(dev, res);
if (IS_ERR(larb->base))
@@ -191,24 +293,34 @@ static int mtk_smi_larb_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id mtk_smi_larb_of_ids[] = {
- { .compatible = "mediatek,mt8173-smi-larb",},
- {}
-};
-
static struct platform_driver mtk_smi_larb_driver = {
.probe = mtk_smi_larb_probe,
- .remove = mtk_smi_larb_remove,
+ .remove = mtk_smi_larb_remove,
.driver = {
.name = "mtk-smi-larb",
.of_match_table = mtk_smi_larb_of_ids,
}
};
+static const struct of_device_id mtk_smi_common_of_ids[] = {
+ {
+ .compatible = "mediatek,mt8173-smi-common",
+ .data = (void *)MTK_SMI_GEN2
+ },
+ {
+ .compatible = "mediatek,mt2701-smi-common",
+ .data = (void *)MTK_SMI_GEN1
+ },
+ {}
+};
+
static int mtk_smi_common_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_smi *common;
+ struct resource *res;
+ const struct of_device_id *of_id;
+ enum mtk_smi_gen smi_gen;
if (!dev->pm_domain)
return -EPROBE_DEFER;
@@ -226,6 +338,29 @@ static int mtk_smi_common_probe(struct platform_device *pdev)
if (IS_ERR(common->clk_smi))
return PTR_ERR(common->clk_smi);
+ of_id = of_match_node(mtk_smi_common_of_ids, pdev->dev.of_node);
+ if (!of_id)
+ return -EINVAL;
+
+ /*
+ * for mtk smi gen 1, we need to get the ao(always on) base to config
+ * m4u port, and we need to enable the aync clock for transform the smi
+ * clock into emi clock domain, but for mtk smi gen2, there's no smi ao
+ * base.
+ */
+ smi_gen = (enum mtk_smi_gen)of_id->data;
+ if (smi_gen == MTK_SMI_GEN1) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ common->smi_ao_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(common->smi_ao_base))
+ return PTR_ERR(common->smi_ao_base);
+
+ common->clk_async = devm_clk_get(dev, "async");
+ if (IS_ERR(common->clk_async))
+ return PTR_ERR(common->clk_async);
+
+ clk_prepare_enable(common->clk_async);
+ }
pm_runtime_enable(dev);
platform_set_drvdata(pdev, common);
return 0;
@@ -237,11 +372,6 @@ static int mtk_smi_common_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id mtk_smi_common_of_ids[] = {
- { .compatible = "mediatek,mt8173-smi-common", },
- {}
-};
-
static struct platform_driver mtk_smi_common_driver = {
.probe = mtk_smi_common_probe,
.remove = mtk_smi_common_remove,
@@ -272,4 +402,5 @@ err_unreg_smi:
platform_driver_unregister(&mtk_smi_common_driver);
return ret;
}
+
subsys_initcall(mtk_smi_init);
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index 73ec32001..f00f3e742 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -20,7 +20,6 @@
#include <linux/ioport.h>
#include <linux/spinlock.h>
#include <linux/io.h>
-#include <linux/module.h>
#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
@@ -1807,7 +1806,6 @@ static const struct of_device_id gpmc_dt_ids[] = {
{ .compatible = "ti,am3352-gpmc" }, /* am335x devices */
{ }
};
-MODULE_DEVICE_TABLE(of, gpmc_dt_ids);
/**
* gpmc_read_settings_dt - read gpmc settings from device-tree
@@ -2134,8 +2132,7 @@ no_timings:
/* is child a common bus? */
if (of_match_node(of_default_bus_match_table, child))
/* create children and other common bus children */
- if (of_platform_populate(child, of_default_bus_match_table,
- NULL, &pdev->dev))
+ if (of_platform_default_populate(child, NULL, &pdev->dev))
goto err_child_fail;
return 0;
@@ -2155,68 +2152,6 @@ err:
return ret;
}
-static int gpmc_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
-{
- return 1; /* we're input only */
-}
-
-static int gpmc_gpio_direction_input(struct gpio_chip *chip,
- unsigned int offset)
-{
- return 0; /* we're input only */
-}
-
-static int gpmc_gpio_direction_output(struct gpio_chip *chip,
- unsigned int offset, int value)
-{
- return -EINVAL; /* we're input only */
-}
-
-static void gpmc_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
-{
-}
-
-static int gpmc_gpio_get(struct gpio_chip *chip, unsigned int offset)
-{
- u32 reg;
-
- offset += 8;
-
- reg = gpmc_read_reg(GPMC_STATUS) & BIT(offset);
-
- return !!reg;
-}
-
-static int gpmc_gpio_init(struct gpmc_device *gpmc)
-{
- int ret;
-
- gpmc->gpio_chip.parent = gpmc->dev;
- gpmc->gpio_chip.owner = THIS_MODULE;
- gpmc->gpio_chip.label = DEVICE_NAME;
- gpmc->gpio_chip.ngpio = gpmc_nr_waitpins;
- gpmc->gpio_chip.get_direction = gpmc_gpio_get_direction;
- gpmc->gpio_chip.direction_input = gpmc_gpio_direction_input;
- gpmc->gpio_chip.direction_output = gpmc_gpio_direction_output;
- gpmc->gpio_chip.set = gpmc_gpio_set;
- gpmc->gpio_chip.get = gpmc_gpio_get;
- gpmc->gpio_chip.base = -1;
-
- ret = gpiochip_add(&gpmc->gpio_chip);
- if (ret < 0) {
- dev_err(gpmc->dev, "could not register gpio chip: %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-static void gpmc_gpio_exit(struct gpmc_device *gpmc)
-{
- gpiochip_remove(&gpmc->gpio_chip);
-}
-
static int gpmc_probe_dt(struct platform_device *pdev)
{
int ret;
@@ -2280,7 +2215,69 @@ static int gpmc_probe_dt(struct platform_device *pdev)
static void gpmc_probe_dt_children(struct platform_device *pdev)
{
}
-#endif
+#endif /* CONFIG_OF */
+
+static int gpmc_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+ return 1; /* we're input only */
+}
+
+static int gpmc_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ return 0; /* we're input only */
+}
+
+static int gpmc_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ return -EINVAL; /* we're input only */
+}
+
+static void gpmc_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+}
+
+static int gpmc_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ u32 reg;
+
+ offset += 8;
+
+ reg = gpmc_read_reg(GPMC_STATUS) & BIT(offset);
+
+ return !!reg;
+}
+
+static int gpmc_gpio_init(struct gpmc_device *gpmc)
+{
+ int ret;
+
+ gpmc->gpio_chip.parent = gpmc->dev;
+ gpmc->gpio_chip.owner = THIS_MODULE;
+ gpmc->gpio_chip.label = DEVICE_NAME;
+ gpmc->gpio_chip.ngpio = gpmc_nr_waitpins;
+ gpmc->gpio_chip.get_direction = gpmc_gpio_get_direction;
+ gpmc->gpio_chip.direction_input = gpmc_gpio_direction_input;
+ gpmc->gpio_chip.direction_output = gpmc_gpio_direction_output;
+ gpmc->gpio_chip.set = gpmc_gpio_set;
+ gpmc->gpio_chip.get = gpmc_gpio_get;
+ gpmc->gpio_chip.base = -1;
+
+ ret = gpiochip_add(&gpmc->gpio_chip);
+ if (ret < 0) {
+ dev_err(gpmc->dev, "could not register gpio chip: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void gpmc_gpio_exit(struct gpmc_device *gpmc)
+{
+ gpiochip_remove(&gpmc->gpio_chip);
+}
static int gpmc_probe(struct platform_device *pdev)
{
@@ -2430,15 +2427,7 @@ static __init int gpmc_init(void)
{
return platform_driver_register(&gpmc_driver);
}
-
-static __exit void gpmc_exit(void)
-{
- platform_driver_unregister(&gpmc_driver);
-
-}
-
postcore_initcall(gpmc_init);
-module_exit(gpmc_exit);
static struct omap3_gpmc_regs gpmc_context;
diff --git a/drivers/memory/samsung/exynos-srom.c b/drivers/memory/samsung/exynos-srom.c
index 96756fb4d..bf827a666 100644
--- a/drivers/memory/samsung/exynos-srom.c
+++ b/drivers/memory/samsung/exynos-srom.c
@@ -11,7 +11,7 @@
*/
#include <linux/io.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
@@ -91,17 +91,17 @@ static int exynos_srom_configure_bank(struct exynos_srom *srom,
if (width == 2)
cs |= 1 << EXYNOS_SROM_BW__DATAWIDTH__SHIFT;
- bw = __raw_readl(srom->reg_base + EXYNOS_SROM_BW);
+ bw = readl_relaxed(srom->reg_base + EXYNOS_SROM_BW);
bw = (bw & ~(EXYNOS_SROM_BW__CS_MASK << bank)) | (cs << bank);
- __raw_writel(bw, srom->reg_base + EXYNOS_SROM_BW);
+ writel_relaxed(bw, srom->reg_base + EXYNOS_SROM_BW);
- __raw_writel(pmc | (timing[0] << EXYNOS_SROM_BCX__TACP__SHIFT) |
- (timing[1] << EXYNOS_SROM_BCX__TCAH__SHIFT) |
- (timing[2] << EXYNOS_SROM_BCX__TCOH__SHIFT) |
- (timing[3] << EXYNOS_SROM_BCX__TACC__SHIFT) |
- (timing[4] << EXYNOS_SROM_BCX__TCOS__SHIFT) |
- (timing[5] << EXYNOS_SROM_BCX__TACS__SHIFT),
- srom->reg_base + EXYNOS_SROM_BC0 + bank);
+ writel_relaxed(pmc | (timing[0] << EXYNOS_SROM_BCX__TACP__SHIFT) |
+ (timing[1] << EXYNOS_SROM_BCX__TCAH__SHIFT) |
+ (timing[2] << EXYNOS_SROM_BCX__TCOH__SHIFT) |
+ (timing[3] << EXYNOS_SROM_BCX__TACC__SHIFT) |
+ (timing[4] << EXYNOS_SROM_BCX__TCOS__SHIFT) |
+ (timing[5] << EXYNOS_SROM_BCX__TACS__SHIFT),
+ srom->reg_base + EXYNOS_SROM_BC0 + bank);
return 0;
}
@@ -134,7 +134,7 @@ static int exynos_srom_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, srom);
srom->reg_offset = exynos_srom_alloc_reg_dump(exynos_srom_offsets,
- sizeof(exynos_srom_offsets));
+ ARRAY_SIZE(exynos_srom_offsets));
if (!srom->reg_offset) {
iounmap(srom->reg_base);
return -ENOMEM;
@@ -159,16 +159,6 @@ static int exynos_srom_probe(struct platform_device *pdev)
return of_platform_populate(np, NULL, NULL, dev);
}
-static int exynos_srom_remove(struct platform_device *pdev)
-{
- struct exynos_srom *srom = platform_get_drvdata(pdev);
-
- kfree(srom->reg_offset);
- iounmap(srom->reg_base);
-
- return 0;
-}
-
#ifdef CONFIG_PM_SLEEP
static void exynos_srom_save(void __iomem *base,
struct exynos_srom_reg_dump *rd,
@@ -211,21 +201,16 @@ static const struct of_device_id of_exynos_srom_ids[] = {
},
{},
};
-MODULE_DEVICE_TABLE(of, of_exynos_srom_ids);
static SIMPLE_DEV_PM_OPS(exynos_srom_pm_ops, exynos_srom_suspend, exynos_srom_resume);
static struct platform_driver exynos_srom_driver = {
.probe = exynos_srom_probe,
- .remove = exynos_srom_remove,
.driver = {
.name = "exynos-srom",
.of_match_table = of_exynos_srom_ids,
.pm = &exynos_srom_pm_ops,
+ .suppress_bind_attrs = true,
},
};
-module_platform_driver(exynos_srom_driver);
-
-MODULE_AUTHOR("Pankaj Dubey <pankaj.dubey@samsung.com>");
-MODULE_DESCRIPTION("Exynos SROM Controller Driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(exynos_srom_driver);
diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
index a1ae0cc2b..a4803ac19 100644
--- a/drivers/memory/tegra/mc.c
+++ b/drivers/memory/tegra/mc.c
@@ -186,8 +186,10 @@ static int load_timings(struct tegra_mc *mc, struct device_node *node)
timing = &mc->timings[i++];
err = load_one_timing(mc, timing, child);
- if (err)
+ if (err) {
+ of_node_put(child);
return err;
+ }
}
return 0;
@@ -206,15 +208,13 @@ static int tegra_mc_setup_timings(struct tegra_mc *mc)
for_each_child_of_node(mc->dev->of_node, node) {
err = of_property_read_u32(node, "nvidia,ram-code",
&node_ram_code);
- if (err || (node_ram_code != ram_code)) {
- of_node_put(node);
+ if (err || (node_ram_code != ram_code))
continue;
- }
err = load_timings(mc, node);
+ of_node_put(node);
if (err)
return err;
- of_node_put(node);
break;
}
diff --git a/drivers/memory/tegra/tegra124-emc.c b/drivers/memory/tegra/tegra124-emc.c
index 3dac7be39..06cc781eb 100644
--- a/drivers/memory/tegra/tegra124-emc.c
+++ b/drivers/memory/tegra/tegra124-emc.c
@@ -970,8 +970,10 @@ static int tegra_emc_load_timings_from_dt(struct tegra_emc *emc,
timing = &emc->timings[i++];
err = load_one_timing_from_dt(emc, timing, child);
- if (err)
+ if (err) {
+ of_node_put(child);
return err;
+ }
}
sort(emc->timings, emc->num_timings, sizeof(*timing), cmp_timings,
@@ -995,10 +997,8 @@ tegra_emc_find_node_by_ram_code(struct device_node *node, u32 ram_code)
u32 value;
err = of_property_read_u32(np, "nvidia,ram-code", &value);
- if (err || (value != ram_code)) {
- of_node_put(np);
+ if (err || (value != ram_code))
continue;
- }
return np;
}
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index 3cd68152d..aacf584f2 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -2002,8 +2002,7 @@ static int msb_bd_getgeo(struct block_device *bdev,
static int msb_prepare_req(struct request_queue *q, struct request *req)
{
- if (req->cmd_type != REQ_TYPE_FS &&
- req->cmd_type != REQ_TYPE_BLOCK_PC) {
+ if (req->cmd_type != REQ_TYPE_FS) {
blk_dump_rq_flags(req, "MS unsupported request");
return BLKPREP_KILL;
}
@@ -2146,7 +2145,6 @@ static int msb_init_disk(struct memstick_dev *card)
msb->disk->fops = &msb_bdops;
msb->disk->private_data = msb;
msb->disk->queue = msb->queue;
- msb->disk->driverfs_dev = &card->dev;
msb->disk->flags |= GENHD_FL_EXT_DEVT;
capacity = msb->pages_in_block * msb->logical_block_count;
@@ -2163,7 +2161,7 @@ static int msb_init_disk(struct memstick_dev *card)
set_disk_ro(msb->disk, 1);
msb_start(card);
- add_disk(msb->disk);
+ device_add_disk(&card->dev, msb->disk);
dbg("Disk added");
return 0;
@@ -2340,23 +2338,11 @@ static struct memstick_driver msb_driver = {
.resume = msb_resume
};
-static int major;
-
static int __init msb_init(void)
{
- int rc = register_blkdev(0, DRIVER_NAME);
-
- if (rc < 0) {
- pr_err("failed to register major (error %d)\n", rc);
- return rc;
- }
-
- major = rc;
- rc = memstick_register_driver(&msb_driver);
- if (rc) {
- unregister_blkdev(major, DRIVER_NAME);
+ int rc = memstick_register_driver(&msb_driver);
+ if (rc)
pr_err("failed to register memstick driver (error %d)\n", rc);
- }
return rc;
}
@@ -2364,7 +2350,6 @@ static int __init msb_init(void)
static void __exit msb_exit(void)
{
memstick_unregister_driver(&msb_driver);
- unregister_blkdev(major, DRIVER_NAME);
idr_destroy(&msb_disk_idr);
}
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index 0fb27d338..c1472275f 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -829,8 +829,7 @@ static void mspro_block_start(struct memstick_dev *card)
static int mspro_block_prepare_req(struct request_queue *q, struct request *req)
{
- if (req->cmd_type != REQ_TYPE_FS &&
- req->cmd_type != REQ_TYPE_BLOCK_PC) {
+ if (req->cmd_type != REQ_TYPE_FS) {
blk_dump_rq_flags(req, "MSPro unsupported request");
return BLKPREP_KILL;
}
@@ -1243,7 +1242,6 @@ static int mspro_block_init_disk(struct memstick_dev *card)
msb->usage_count = 1;
msb->disk->private_data = msb;
msb->disk->queue = msb->queue;
- msb->disk->driverfs_dev = &card->dev;
sprintf(msb->disk->disk_name, "mspblk%d", disk_id);
@@ -1255,7 +1253,7 @@ static int mspro_block_init_disk(struct memstick_dev *card)
set_capacity(msb->disk, capacity);
dev_dbg(&card->dev, "capacity set %ld\n", capacity);
- add_disk(msb->disk);
+ device_add_disk(&card->dev, msb->disk);
msb->active = 1;
return 0;
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 1bcf601de..580f4f280 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -18,6 +18,17 @@ config MFD_CS5535
This is the core driver for CS5535/CS5536 MFD functions. This is
necessary for using the board's GPIO and MFGPT functionality.
+config MFD_ALTERA_A10SR
+ bool "Altera Arria10 DevKit System Resource chip"
+ depends on ARCH_SOCFPGA && SPI_MASTER=y && OF
+ select REGMAP_SPI
+ select MFD_CORE
+ help
+ Support for the Altera Arria10 DevKit MAX5 System Resource chip
+ using the SPI interface. This driver provides common support for
+ accessing the external gpio extender (LEDs & buttons) and
+ power supply alarms (hwmon).
+
config MFD_ACT8945A
tristate "Active-semi ACT8945A"
select MFD_CORE
@@ -480,6 +491,8 @@ config MFD_KEMPLD
* COMe-cDC2 (microETXexpress-DC)
* COMe-cHL6
* COMe-cPC2 (microETXexpress-PC)
+ * COMe-cSL6
+ * COMe-mAL10
* COMe-mBT10
* COMe-mCT10
* COMe-mTT10 (nanoETXexpress-TT)
@@ -524,8 +537,8 @@ config MFD_88PM860X
battery-charger under the corresponding menus.
config MFD_MAX14577
- bool "Maxim Semiconductor MAX14577/77836 MUIC + Charger Support"
- depends on I2C=y
+ tristate "Maxim Semiconductor MAX14577/77836 MUIC + Charger Support"
+ depends on I2C
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
@@ -852,13 +865,14 @@ config MFD_RK808
including interrupts, RTC, LDO & DCDC regulators, and onkey.
config MFD_RN5T618
- tristate "Ricoh RN5T5618 PMIC"
+ tristate "Ricoh RN5T567/618 PMIC"
depends on I2C
+ depends on OF
select MFD_CORE
select REGMAP_I2C
help
- Say yes here to add support for the Ricoh RN5T618 PMIC. This
- driver provides common support for accessing the device,
+ Say yes here to add support for the Ricoh RN5T567 or R5T618 PMIC.
+ This driver provides common support for accessing the device,
additional drivers must be enabled in order to use the
functionality of the device.
@@ -1535,6 +1549,7 @@ config MFD_WM8350
config MFD_WM8350_I2C
bool "Wolfson Microelectronics WM8350 with I2C"
select MFD_WM8350
+ select REGMAP_I2C
depends on I2C=y
help
The WM8350 is an integrated audio and power management
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 42a66e19e..2ba3ba35f 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -205,3 +205,5 @@ intel-soc-pmic-objs := intel_soc_pmic_core.o intel_soc_pmic_crc.o
intel-soc-pmic-$(CONFIG_INTEL_PMC_IPC) += intel_soc_pmic_bxtwc.o
obj-$(CONFIG_INTEL_SOC_PMIC) += intel-soc-pmic.o
obj-$(CONFIG_MFD_MT6397) += mt6397-core.o
+
+obj-$(CONFIG_MFD_ALTERA_A10SR) += altera-a10sr.o
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index f3d689176..589eebfc1 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -1087,7 +1087,6 @@ static int ab8500_probe(struct platform_device *pdev)
"Vbus Detect (USB)",
"USB ID Detect",
"UART Factory Mode Detect"};
- struct ab8500_platform_data *plat = dev_get_platdata(&pdev->dev);
const struct platform_device_id *platid = platform_get_device_id(pdev);
enum ab8500_version version = AB8500_VERSION_UNDEFINED;
struct device_node *np = pdev->dev.of_node;
@@ -1219,9 +1218,6 @@ static int ab8500_probe(struct platform_device *pdev)
pr_cont("None\n");
}
- if (plat && plat->init)
- plat->init(ab8500);
-
if (is_ab9540(ab8500)) {
ret = get_register_interruptible(ab8500, AB8500_CHARGER,
AB8500_CH_USBCH_STAT1_REG, &value);
diff --git a/drivers/mfd/ab8500-sysctrl.c b/drivers/mfd/ab8500-sysctrl.c
index b9f001030..207cc4979 100644
--- a/drivers/mfd/ab8500-sysctrl.c
+++ b/drivers/mfd/ab8500-sysctrl.c
@@ -127,45 +127,11 @@ EXPORT_SYMBOL(ab8500_sysctrl_write);
static int ab8500_sysctrl_probe(struct platform_device *pdev)
{
- struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
- struct ab8500_platform_data *plat;
- struct ab8500_sysctrl_platform_data *pdata;
-
- plat = dev_get_platdata(pdev->dev.parent);
-
- if (!plat)
- return -EINVAL;
-
sysctrl_dev = &pdev->dev;
if (!pm_power_off)
pm_power_off = ab8500_power_off;
- pdata = plat->sysctrl;
- if (pdata) {
- int last, ret, i, j;
-
- if (is_ab8505(ab8500))
- last = AB8500_SYSCLKREQ4RFCLKBUF;
- else
- last = AB8500_SYSCLKREQ8RFCLKBUF;
-
- for (i = AB8500_SYSCLKREQ1RFCLKBUF; i <= last; i++) {
- j = i - AB8500_SYSCLKREQ1RFCLKBUF;
- ret = ab8500_sysctrl_write(i, 0xff,
- pdata->initial_req_buf_config[j]);
- dev_dbg(&pdev->dev,
- "Setting SysClkReq%dRfClkBuf 0x%X\n",
- j + 1,
- pdata->initial_req_buf_config[j]);
- if (ret < 0) {
- dev_err(&pdev->dev,
- "Can't set sysClkReq%dRfClkBuf: %d\n",
- j + 1, ret);
- }
- }
- }
-
return 0;
}
diff --git a/drivers/mfd/altera-a10sr.c b/drivers/mfd/altera-a10sr.c
new file mode 100644
index 000000000..c05aa4ff5
--- /dev/null
+++ b/drivers/mfd/altera-a10sr.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright Intel Corporation (C) 2014-2016. All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * SPI access for Altera Arria10 MAX5 System Resource Chip
+ *
+ * Adapted from DA9052
+ */
+
+#include <linux/mfd/altera-a10sr.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/spi/spi.h>
+
+static const struct mfd_cell altr_a10sr_subdev_info[] = {
+ {
+ .name = "altr_a10sr_gpio",
+ .of_compatible = "altr,a10sr-gpio",
+ },
+};
+
+static bool altr_a10sr_reg_readable(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case ALTR_A10SR_VERSION_READ:
+ case ALTR_A10SR_LED_REG:
+ case ALTR_A10SR_PBDSW_REG:
+ case ALTR_A10SR_PBDSW_IRQ_REG:
+ case ALTR_A10SR_PWR_GOOD1_REG:
+ case ALTR_A10SR_PWR_GOOD2_REG:
+ case ALTR_A10SR_PWR_GOOD3_REG:
+ case ALTR_A10SR_FMCAB_REG:
+ case ALTR_A10SR_HPS_RST_REG:
+ case ALTR_A10SR_USB_QSPI_REG:
+ case ALTR_A10SR_SFPA_REG:
+ case ALTR_A10SR_SFPB_REG:
+ case ALTR_A10SR_I2C_M_REG:
+ case ALTR_A10SR_WARM_RST_REG:
+ case ALTR_A10SR_WR_KEY_REG:
+ case ALTR_A10SR_PMBUS_REG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool altr_a10sr_reg_writeable(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case ALTR_A10SR_LED_REG:
+ case ALTR_A10SR_PBDSW_IRQ_REG:
+ case ALTR_A10SR_FMCAB_REG:
+ case ALTR_A10SR_HPS_RST_REG:
+ case ALTR_A10SR_USB_QSPI_REG:
+ case ALTR_A10SR_SFPA_REG:
+ case ALTR_A10SR_SFPB_REG:
+ case ALTR_A10SR_WARM_RST_REG:
+ case ALTR_A10SR_WR_KEY_REG:
+ case ALTR_A10SR_PMBUS_REG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool altr_a10sr_reg_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case ALTR_A10SR_PBDSW_REG:
+ case ALTR_A10SR_PBDSW_IRQ_REG:
+ case ALTR_A10SR_PWR_GOOD1_REG:
+ case ALTR_A10SR_PWR_GOOD2_REG:
+ case ALTR_A10SR_PWR_GOOD3_REG:
+ case ALTR_A10SR_HPS_RST_REG:
+ case ALTR_A10SR_I2C_M_REG:
+ case ALTR_A10SR_WARM_RST_REG:
+ case ALTR_A10SR_WR_KEY_REG:
+ case ALTR_A10SR_PMBUS_REG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+const struct regmap_config altr_a10sr_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .cache_type = REGCACHE_NONE,
+
+ .use_single_rw = true,
+ .read_flag_mask = 1,
+ .write_flag_mask = 0,
+
+ .max_register = ALTR_A10SR_WR_KEY_REG,
+ .readable_reg = altr_a10sr_reg_readable,
+ .writeable_reg = altr_a10sr_reg_writeable,
+ .volatile_reg = altr_a10sr_reg_volatile,
+
+};
+
+static int altr_a10sr_spi_probe(struct spi_device *spi)
+{
+ int ret;
+ struct altr_a10sr *a10sr;
+
+ a10sr = devm_kzalloc(&spi->dev, sizeof(*a10sr),
+ GFP_KERNEL);
+ if (!a10sr)
+ return -ENOMEM;
+
+ spi->mode = SPI_MODE_3;
+ spi->bits_per_word = 8;
+ spi_setup(spi);
+
+ a10sr->dev = &spi->dev;
+
+ spi_set_drvdata(spi, a10sr);
+
+ a10sr->regmap = devm_regmap_init_spi(spi, &altr_a10sr_regmap_config);
+ if (IS_ERR(a10sr->regmap)) {
+ ret = PTR_ERR(a10sr->regmap);
+ dev_err(&spi->dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = devm_mfd_add_devices(a10sr->dev, PLATFORM_DEVID_AUTO,
+ altr_a10sr_subdev_info,
+ ARRAY_SIZE(altr_a10sr_subdev_info),
+ NULL, 0, NULL);
+ if (ret)
+ dev_err(a10sr->dev, "Failed to register sub-devices: %d\n",
+ ret);
+
+ return ret;
+}
+
+static const struct of_device_id altr_a10sr_spi_of_match[] = {
+ { .compatible = "altr,a10sr" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, altr_a10sr_spi_of_match);
+
+static struct spi_driver altr_a10sr_spi_driver = {
+ .probe = altr_a10sr_spi_probe,
+ .driver = {
+ .name = "altr_a10sr",
+ .of_match_table = of_match_ptr(altr_a10sr_spi_of_match),
+ },
+};
+
+module_spi_driver(altr_a10sr_spi_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Thor Thayer <tthayer@opensource.altera.com>");
+MODULE_DESCRIPTION("Altera Arria10 DevKit System Resource MFD Driver");
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index bf2717967..e4f97b3c8 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -1462,7 +1462,7 @@ int arizona_dev_init(struct arizona *arizona)
/* Set up for interrupts */
ret = arizona_irq_init(arizona);
if (ret != 0)
- goto err_reset;
+ goto err_pm;
pm_runtime_set_autosuspend_delay(arizona->dev, 100);
pm_runtime_use_autosuspend(arizona->dev);
@@ -1486,6 +1486,8 @@ int arizona_dev_init(struct arizona *arizona)
err_irq:
arizona_irq_exit(arizona);
+err_pm:
+ pm_runtime_disable(arizona->dev);
err_reset:
arizona_enable_reset(arizona);
regulator_disable(arizona->dcvdd);
diff --git a/drivers/mfd/arizona-irq.c b/drivers/mfd/arizona-irq.c
index edeb49513..5e18d3c77 100644
--- a/drivers/mfd/arizona-irq.c
+++ b/drivers/mfd/arizona-irq.c
@@ -109,8 +109,20 @@ static irqreturn_t arizona_irq_thread(int irq, void *data)
do {
poll = false;
- if (arizona->aod_irq_chip)
- handle_nested_irq(irq_find_mapping(arizona->virq, 0));
+ if (arizona->aod_irq_chip) {
+ /*
+ * Check the AOD status register to determine whether
+ * the nested IRQ handler should be called.
+ */
+ ret = regmap_read(arizona->regmap,
+ ARIZONA_AOD_IRQ1, &val);
+ if (ret)
+ dev_warn(arizona->dev,
+ "Failed to read AOD IRQ1 %d\n", ret);
+ else if (val)
+ handle_nested_irq(
+ irq_find_mapping(arizona->virq, 0));
+ }
/*
* Check if one of the main interrupts is asserted and only
diff --git a/drivers/mfd/atmel-hlcdc.c b/drivers/mfd/atmel-hlcdc.c
index eca7ea69b..4b15b0840 100644
--- a/drivers/mfd/atmel-hlcdc.c
+++ b/drivers/mfd/atmel-hlcdc.c
@@ -50,8 +50,9 @@ static int regmap_atmel_hlcdc_reg_write(void *context, unsigned int reg,
if (reg <= ATMEL_HLCDC_DIS) {
u32 status;
- readl_poll_timeout(hregmap->regs + ATMEL_HLCDC_SR, status,
- !(status & ATMEL_HLCDC_SIP), 1, 100);
+ readl_poll_timeout_atomic(hregmap->regs + ATMEL_HLCDC_SR,
+ status, !(status & ATMEL_HLCDC_SIP),
+ 1, 100);
}
writel(val, hregmap->regs + reg);
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index e4e32978c..fd80b0981 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -17,6 +17,7 @@
*/
#include <linux/err.h>
+#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -93,7 +94,10 @@ static const struct regmap_range axp22x_writeable_ranges[] = {
};
static const struct regmap_range axp22x_volatile_ranges[] = {
+ regmap_reg_range(AXP20X_PWR_INPUT_STATUS, AXP20X_PWR_OP_MODE),
regmap_reg_range(AXP20X_IRQ1_EN, AXP20X_IRQ5_STATE),
+ regmap_reg_range(AXP22X_GPIO_STATE, AXP22X_GPIO_STATE),
+ regmap_reg_range(AXP20X_FG_RES, AXP20X_FG_RES),
};
static const struct regmap_access_table axp22x_writeable_table = {
@@ -157,6 +161,11 @@ static struct resource axp20x_usb_power_supply_resources[] = {
DEFINE_RES_IRQ_NAMED(AXP20X_IRQ_VBUS_NOT_VALID, "VBUS_NOT_VALID"),
};
+static struct resource axp22x_usb_power_supply_resources[] = {
+ DEFINE_RES_IRQ_NAMED(AXP22X_IRQ_VBUS_PLUGIN, "VBUS_PLUGIN"),
+ DEFINE_RES_IRQ_NAMED(AXP22X_IRQ_VBUS_REMOVAL, "VBUS_REMOVAL"),
+};
+
static struct resource axp22x_pek_resources[] = {
{
.name = "PEK_DBR",
@@ -524,6 +533,11 @@ static struct mfd_cell axp22x_cells[] = {
.resources = axp22x_pek_resources,
}, {
.name = "axp20x-regulator",
+ }, {
+ .name = "axp20x-usb-power-supply",
+ .of_compatible = "x-powers,axp221-usb-power-supply",
+ .num_resources = ARRAY_SIZE(axp22x_usb_power_supply_resources),
+ .resources = axp22x_usb_power_supply_resources,
},
};
@@ -664,6 +678,9 @@ static void axp20x_power_off(void)
regmap_write(axp20x_pm_power_off->regmap, AXP20X_OFF_CTRL,
AXP20X_OFF);
+
+ /* Give capacitors etc. time to drain to avoid kernel panic msg. */
+ msleep(500);
}
int axp20x_match_device(struct axp20x_dev *axp20x)
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index c0a86aeb1..388e268b9 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -3094,8 +3094,7 @@ static void db8500_prcmu_update_cpufreq(void)
}
}
-static int db8500_prcmu_register_ab8500(struct device *parent,
- struct ab8500_platform_data *pdata)
+static int db8500_prcmu_register_ab8500(struct device *parent)
{
struct device_node *np;
struct resource ab8500_resource;
@@ -3103,8 +3102,6 @@ static int db8500_prcmu_register_ab8500(struct device *parent,
.name = "ab8500-core",
.of_compatible = "stericsson,ab8500",
.id = AB8500_VERSION_AB8500,
- .platform_data = pdata,
- .pdata_size = sizeof(struct ab8500_platform_data),
.resources = &ab8500_resource,
.num_resources = 1,
};
@@ -3133,7 +3130,6 @@ static int db8500_prcmu_register_ab8500(struct device *parent,
static int db8500_prcmu_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct prcmu_pdata *pdata = dev_get_platdata(&pdev->dev);
int irq = 0, err = 0;
struct resource *res;
@@ -3149,7 +3145,7 @@ static int db8500_prcmu_probe(struct platform_device *pdev)
return -ENOMEM;
}
init_prcm_registers();
- dbx500_fw_version_init(pdev, pdata->version_offset);
+ dbx500_fw_version_init(pdev, DB8500_PRCMU_FW_VERSION_OFFSET);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "prcmu-tcdm");
if (!res) {
dev_err(&pdev->dev, "no prcmu tcdm region provided\n");
@@ -3204,7 +3200,7 @@ static int db8500_prcmu_probe(struct platform_device *pdev)
}
}
- err = db8500_prcmu_register_ab8500(&pdev->dev, pdata->ab_platdata);
+ err = db8500_prcmu_register_ab8500(&pdev->dev);
if (err) {
mfd_remove_devices(&pdev->dev);
pr_err("prcmu: Failed to add ab8500 subdevice\n");
diff --git a/drivers/mfd/dm355evm_msp.c b/drivers/mfd/dm355evm_msp.c
index 14661ec5e..270e19c0b 100644
--- a/drivers/mfd/dm355evm_msp.c
+++ b/drivers/mfd/dm355evm_msp.c
@@ -199,11 +199,8 @@ static struct device *add_child(struct i2c_client *client, const char *name,
int status;
pdev = platform_device_alloc(name, -1);
- if (!pdev) {
- dev_dbg(&client->dev, "can't alloc dev\n");
- status = -ENOMEM;
- goto err;
- }
+ if (!pdev)
+ return ERR_PTR(-ENOMEM);
device_init_wakeup(&pdev->dev, can_wakeup);
pdev->dev.parent = &client->dev;
diff --git a/drivers/mfd/hi655x-pmic.c b/drivers/mfd/hi655x-pmic.c
index 05ddc7882..0fc629956 100644
--- a/drivers/mfd/hi655x-pmic.c
+++ b/drivers/mfd/hi655x-pmic.c
@@ -24,19 +24,15 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
-static const struct mfd_cell hi655x_pmic_devs[] = {
- { .name = "hi655x-regulator", },
-};
-
static const struct regmap_irq hi655x_irqs[] = {
- { .reg_offset = 0, .mask = OTMP_D1R_INT },
- { .reg_offset = 0, .mask = VSYS_2P5_R_INT },
- { .reg_offset = 0, .mask = VSYS_UV_D3R_INT },
- { .reg_offset = 0, .mask = VSYS_6P0_D200UR_INT },
- { .reg_offset = 0, .mask = PWRON_D4SR_INT },
- { .reg_offset = 0, .mask = PWRON_D20F_INT },
- { .reg_offset = 0, .mask = PWRON_D20R_INT },
- { .reg_offset = 0, .mask = RESERVE_INT },
+ { .reg_offset = 0, .mask = OTMP_D1R_INT_MASK },
+ { .reg_offset = 0, .mask = VSYS_2P5_R_INT_MASK },
+ { .reg_offset = 0, .mask = VSYS_UV_D3R_INT_MASK },
+ { .reg_offset = 0, .mask = VSYS_6P0_D200UR_INT_MASK },
+ { .reg_offset = 0, .mask = PWRON_D4SR_INT_MASK },
+ { .reg_offset = 0, .mask = PWRON_D20F_INT_MASK },
+ { .reg_offset = 0, .mask = PWRON_D20R_INT_MASK },
+ { .reg_offset = 0, .mask = RESERVE_INT_MASK },
};
static const struct regmap_irq_chip hi655x_irq_chip = {
@@ -45,6 +41,7 @@ static const struct regmap_irq_chip hi655x_irq_chip = {
.num_regs = 1,
.num_irqs = ARRAY_SIZE(hi655x_irqs),
.status_base = HI655X_IRQ_STAT_BASE,
+ .ack_base = HI655X_IRQ_STAT_BASE,
.mask_base = HI655X_IRQ_MASK_BASE,
};
@@ -55,6 +52,34 @@ static struct regmap_config hi655x_regmap_config = {
.max_register = HI655X_BUS_ADDR(0xFFF),
};
+static struct resource pwrkey_resources[] = {
+ {
+ .name = "down",
+ .start = PWRON_D20R_INT,
+ .end = PWRON_D20R_INT,
+ .flags = IORESOURCE_IRQ,
+ }, {
+ .name = "up",
+ .start = PWRON_D20F_INT,
+ .end = PWRON_D20F_INT,
+ .flags = IORESOURCE_IRQ,
+ }, {
+ .name = "hold 4s",
+ .start = PWRON_D4SR_INT,
+ .end = PWRON_D4SR_INT,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static const struct mfd_cell hi655x_pmic_devs[] = {
+ {
+ .name = "hi65xx-powerkey",
+ .num_resources = ARRAY_SIZE(pwrkey_resources),
+ .resources = &pwrkey_resources[0],
+ },
+ { .name = "hi655x-regulator", },
+};
+
static void hi655x_local_irq_clear(struct regmap *map)
{
int i;
@@ -80,12 +105,9 @@ static int hi655x_pmic_probe(struct platform_device *pdev)
pmic->dev = dev;
pmic->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!pmic->res)
- return -ENOENT;
-
base = devm_ioremap_resource(dev, pmic->res);
- if (!base)
- return -ENOMEM;
+ if (IS_ERR(base))
+ return PTR_ERR(base);
pmic->regmap = devm_regmap_init_mmio_clk(dev, NULL, base,
&hi655x_regmap_config);
@@ -123,7 +145,8 @@ static int hi655x_pmic_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pmic);
ret = mfd_add_devices(dev, PLATFORM_DEVID_AUTO, hi655x_pmic_devs,
- ARRAY_SIZE(hi655x_pmic_devs), NULL, 0, NULL);
+ ARRAY_SIZE(hi655x_pmic_devs), NULL, 0,
+ regmap_irq_get_domain(pmic->irq_data));
if (ret) {
dev_err(dev, "Failed to register device %d\n", ret);
regmap_del_irq_chip(gpio_to_irq(pmic->gpio), pmic->irq_data);
diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
index 05b924542..da5722d7c 100644
--- a/drivers/mfd/kempld-core.c
+++ b/drivers/mfd/kempld-core.c
@@ -624,6 +624,14 @@ static struct dmi_system_id kempld_dmi_table[] __initdata = {
.driver_data = (void *)&kempld_platform_data_generic,
.callback = kempld_create_platform_device,
}, {
+ .ident = "CSL6",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
+ DMI_MATCH(DMI_BOARD_NAME, "COMe-cSL6"),
+ },
+ .driver_data = (void *)&kempld_platform_data_generic,
+ .callback = kempld_create_platform_device,
+ }, {
.ident = "CVV6",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
@@ -647,6 +655,14 @@ static struct dmi_system_id kempld_dmi_table[] __initdata = {
.driver_data = (void *)&kempld_platform_data_generic,
.callback = kempld_create_platform_device,
}, {
+ .ident = "MAL1",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
+ DMI_MATCH(DMI_BOARD_NAME, "COMe-mAL10"),
+ },
+ .driver_data = (void *)&kempld_platform_data_generic,
+ .callback = kempld_create_platform_device,
+ }, {
.ident = "MBR1",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c
index 2280b3fdc..6c245128a 100644
--- a/drivers/mfd/max14577.c
+++ b/drivers/mfd/max14577.c
@@ -561,7 +561,7 @@ static int __init max14577_i2c_init(void)
return i2c_add_driver(&max14577_i2c_driver);
}
-subsys_initcall(max14577_i2c_init);
+module_init(max14577_i2c_init);
static void __exit max14577_i2c_exit(void)
{
diff --git a/drivers/mfd/max77620.c b/drivers/mfd/max77620.c
index f32fbb8e8..258757e21 100644
--- a/drivers/mfd/max77620.c
+++ b/drivers/mfd/max77620.c
@@ -31,25 +31,25 @@
#include <linux/interrupt.h>
#include <linux/mfd/core.h>
#include <linux/mfd/max77620.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
-static struct resource gpio_resources[] = {
+static const struct resource gpio_resources[] = {
DEFINE_RES_IRQ(MAX77620_IRQ_TOP_GPIO),
};
-static struct resource power_resources[] = {
+static const struct resource power_resources[] = {
DEFINE_RES_IRQ(MAX77620_IRQ_LBT_MBATLOW),
};
-static struct resource rtc_resources[] = {
+static const struct resource rtc_resources[] = {
DEFINE_RES_IRQ(MAX77620_IRQ_TOP_RTC),
};
-static struct resource thermal_resources[] = {
+static const struct resource thermal_resources[] = {
DEFINE_RES_IRQ(MAX77620_IRQ_LBT_TJALRM1),
DEFINE_RES_IRQ(MAX77620_IRQ_LBT_TJALRM2),
};
@@ -111,15 +111,6 @@ static const struct mfd_cell max20024_children[] = {
},
};
-static struct regmap_irq_chip max77620_top_irq_chip = {
- .name = "max77620-top",
- .irqs = max77620_top_irqs,
- .num_irqs = ARRAY_SIZE(max77620_top_irqs),
- .num_regs = 2,
- .status_base = MAX77620_REG_IRQTOP,
- .mask_base = MAX77620_REG_IRQTOPM,
-};
-
static const struct regmap_range max77620_readable_ranges[] = {
regmap_reg_range(MAX77620_REG_CNFGGLBL1, MAX77620_REG_DVSSD4),
};
@@ -180,6 +171,51 @@ static const struct regmap_config max20024_regmap_config = {
.volatile_table = &max77620_volatile_table,
};
+/*
+ * MAX77620 and MAX20024 has the following steps of the interrupt handling
+ * for TOP interrupts:
+ * 1. When interrupt occurs from PMIC, mask the PMIC interrupt by setting GLBLM.
+ * 2. Read IRQTOP and service the interrupt.
+ * 3. Once all interrupts has been checked and serviced, the interrupt service
+ * routine un-masks the hardware interrupt line by clearing GLBLM.
+ */
+static int max77620_irq_global_mask(void *irq_drv_data)
+{
+ struct max77620_chip *chip = irq_drv_data;
+ int ret;
+
+ ret = regmap_update_bits(chip->rmap, MAX77620_REG_INTENLBT,
+ MAX77620_GLBLM_MASK, MAX77620_GLBLM_MASK);
+ if (ret < 0)
+ dev_err(chip->dev, "Failed to set GLBLM: %d\n", ret);
+
+ return ret;
+}
+
+static int max77620_irq_global_unmask(void *irq_drv_data)
+{
+ struct max77620_chip *chip = irq_drv_data;
+ int ret;
+
+ ret = regmap_update_bits(chip->rmap, MAX77620_REG_INTENLBT,
+ MAX77620_GLBLM_MASK, 0);
+ if (ret < 0)
+ dev_err(chip->dev, "Failed to reset GLBLM: %d\n", ret);
+
+ return ret;
+}
+
+static struct regmap_irq_chip max77620_top_irq_chip = {
+ .name = "max77620-top",
+ .irqs = max77620_top_irqs,
+ .num_irqs = ARRAY_SIZE(max77620_top_irqs),
+ .num_regs = 2,
+ .status_base = MAX77620_REG_IRQTOP,
+ .mask_base = MAX77620_REG_IRQTOPM,
+ .handle_pre_irq = max77620_irq_global_mask,
+ .handle_post_irq = max77620_irq_global_unmask,
+};
+
/* max77620_get_fps_period_reg_value: Get FPS bit field value from
* requested periods.
* MAX77620 supports the FPS period of 40, 80, 160, 320, 540, 1280, 2560
@@ -433,6 +469,7 @@ static int max77620_probe(struct i2c_client *client,
if (ret < 0)
return ret;
+ max77620_top_irq_chip.irq_drv_data = chip;
ret = devm_regmap_add_irq_chip(chip->dev, chip->rmap, client->irq,
IRQF_ONESHOT | IRQF_SHARED,
chip->irq_base, &max77620_top_irq_chip,
@@ -568,7 +605,6 @@ static const struct i2c_device_id max77620_id[] = {
{"max20024", MAX20024},
{},
};
-MODULE_DEVICE_TABLE(i2c, max77620_id);
static const struct dev_pm_ops max77620_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(max77620_i2c_suspend, max77620_i2c_resume)
@@ -582,11 +618,4 @@ static struct i2c_driver max77620_driver = {
.probe = max77620_probe,
.id_table = max77620_id,
};
-
-module_i2c_driver(max77620_driver);
-
-MODULE_DESCRIPTION("MAX77620/MAX20024 Multi Function Device Core Driver");
-MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
-MODULE_AUTHOR("Chaitanya Bandi <bandik@nvidia.com>");
-MODULE_AUTHOR("Mallikarjun Kasoju <mkasoju@nvidia.com>");
-MODULE_LICENSE("GPL v2");
+builtin_i2c_driver(max77620_driver);
diff --git a/drivers/mfd/max77843.c b/drivers/mfd/max77843.c
index 7cfc95b49..dc5caeaaa 100644
--- a/drivers/mfd/max77843.c
+++ b/drivers/mfd/max77843.c
@@ -15,7 +15,7 @@
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/interrupt.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/mfd/core.h>
#include <linux/mfd/max77693-common.h>
#include <linux/mfd/max77843-private.h>
@@ -171,19 +171,6 @@ err_pmic_id:
return ret;
}
-static int max77843_remove(struct i2c_client *i2c)
-{
- struct max77693_dev *max77843 = i2c_get_clientdata(i2c);
-
- mfd_remove_devices(max77843->dev);
-
- regmap_del_irq_chip(max77843->irq, max77843->irq_data_topsys);
-
- i2c_unregister_device(max77843->i2c_chg);
-
- return 0;
-}
-
static const struct of_device_id max77843_dt_match[] = {
{ .compatible = "maxim,max77843", },
{ },
@@ -193,7 +180,6 @@ static const struct i2c_device_id max77843_id[] = {
{ "max77843", TYPE_MAX77843, },
{ },
};
-MODULE_DEVICE_TABLE(i2c, max77843_id);
static int __maybe_unused max77843_suspend(struct device *dev)
{
@@ -226,9 +212,9 @@ static struct i2c_driver max77843_i2c_driver = {
.name = "max77843",
.pm = &max77843_pm,
.of_match_table = max77843_dt_match,
+ .suppress_bind_attrs = true,
},
.probe = max77843_probe,
- .remove = max77843_remove,
.id_table = max77843_id,
};
@@ -237,9 +223,3 @@ static int __init max77843_i2c_init(void)
return i2c_add_driver(&max77843_i2c_driver);
}
subsys_initcall(max77843_i2c_init);
-
-static void __exit max77843_i2c_exit(void)
-{
- i2c_del_driver(&max77843_i2c_driver);
-}
-module_exit(max77843_i2c_exit);
diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
index 70443b161..5c80aea32 100644
--- a/drivers/mfd/max8925-i2c.c
+++ b/drivers/mfd/max8925-i2c.c
@@ -9,7 +9,7 @@
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
#include <linux/mfd/max8925.h>
@@ -133,7 +133,6 @@ static const struct i2c_device_id max8925_id_table[] = {
{ "max8925", 0 },
{ },
};
-MODULE_DEVICE_TABLE(i2c, max8925_id_table);
static int max8925_dt_init(struct device_node *np, struct device *dev,
struct max8925_platform_data *pdata)
@@ -240,7 +239,6 @@ static const struct of_device_id max8925_dt_ids[] = {
{ .compatible = "maxim,max8925", },
{},
};
-MODULE_DEVICE_TABLE(of, max8925_dt_ids);
static struct i2c_driver max8925_driver = {
.driver = {
@@ -264,13 +262,3 @@ static int __init max8925_i2c_init(void)
return ret;
}
subsys_initcall(max8925_i2c_init);
-
-static void __exit max8925_i2c_exit(void)
-{
- i2c_del_driver(&max8925_driver);
-}
-module_exit(max8925_i2c_exit);
-
-MODULE_DESCRIPTION("I2C Driver for Maxim 8925");
-MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
index f316348e3..2d6e2c392 100644
--- a/drivers/mfd/max8997.c
+++ b/drivers/mfd/max8997.c
@@ -2,7 +2,7 @@
* max8997.c - mfd core driver for the Maxim 8966 and 8997
*
* Copyright (C) 2011 Samsung Electronics
- * MyungJoo Ham <myungjoo.ham@smasung.com>
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -28,7 +28,7 @@
#include <linux/of_irq.h>
#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/mfd/core.h>
#include <linux/mfd/max8997.h>
@@ -55,7 +55,6 @@ static const struct of_device_id max8997_pmic_dt_match[] = {
{ .compatible = "maxim,max8997-pmic", .data = (void *)TYPE_MAX8997 },
{},
};
-MODULE_DEVICE_TABLE(of, max8997_pmic_dt_match);
#endif
int max8997_read_reg(struct i2c_client *i2c, u8 reg, u8 *dest)
@@ -263,24 +262,11 @@ err_i2c_haptic:
return ret;
}
-static int max8997_i2c_remove(struct i2c_client *i2c)
-{
- struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
-
- mfd_remove_devices(max8997->dev);
- i2c_unregister_device(max8997->muic);
- i2c_unregister_device(max8997->haptic);
- i2c_unregister_device(max8997->rtc);
-
- return 0;
-}
-
static const struct i2c_device_id max8997_i2c_id[] = {
{ "max8997", TYPE_MAX8997 },
{ "max8966", TYPE_MAX8966 },
{ }
};
-MODULE_DEVICE_TABLE(i2c, max8998_i2c_id);
static u8 max8997_dumpaddr_pmic[] = {
MAX8997_REG_INT1MSK,
@@ -510,10 +496,10 @@ static struct i2c_driver max8997_i2c_driver = {
.driver = {
.name = "max8997",
.pm = &max8997_pm,
+ .suppress_bind_attrs = true,
.of_match_table = of_match_ptr(max8997_pmic_dt_match),
},
.probe = max8997_i2c_probe,
- .remove = max8997_i2c_remove,
.id_table = max8997_i2c_id,
};
@@ -523,13 +509,3 @@ static int __init max8997_i2c_init(void)
}
/* init early so consumer devices can complete system boot */
subsys_initcall(max8997_i2c_init);
-
-static void __exit max8997_i2c_exit(void)
-{
- i2c_del_driver(&max8997_i2c_driver);
-}
-module_exit(max8997_i2c_exit);
-
-MODULE_DESCRIPTION("MAXIM 8997 multi-function core driver");
-MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
index ab28b2940..4c33b8063 100644
--- a/drivers/mfd/max8998.c
+++ b/drivers/mfd/max8998.c
@@ -21,8 +21,6 @@
*/
#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
@@ -138,7 +136,6 @@ static const struct of_device_id max8998_dt_match[] = {
{ .compatible = "ti,lp3974", .data = (void *)TYPE_LP3974 },
{},
};
-MODULE_DEVICE_TABLE(of, max8998_dt_match);
#endif
/*
@@ -254,23 +251,11 @@ err:
return ret;
}
-static int max8998_i2c_remove(struct i2c_client *i2c)
-{
- struct max8998_dev *max8998 = i2c_get_clientdata(i2c);
-
- mfd_remove_devices(max8998->dev);
- max8998_irq_exit(max8998);
- i2c_unregister_device(max8998->rtc);
-
- return 0;
-}
-
static const struct i2c_device_id max8998_i2c_id[] = {
{ "max8998", TYPE_MAX8998 },
{ "lp3974", TYPE_LP3974},
{ }
};
-MODULE_DEVICE_TABLE(i2c, max8998_i2c_id);
static int max8998_suspend(struct device *dev)
{
@@ -378,10 +363,10 @@ static struct i2c_driver max8998_i2c_driver = {
.driver = {
.name = "max8998",
.pm = &max8998_pm,
+ .suppress_bind_attrs = true,
.of_match_table = of_match_ptr(max8998_dt_match),
},
.probe = max8998_i2c_probe,
- .remove = max8998_i2c_remove,
.id_table = max8998_i2c_id,
};
@@ -391,13 +376,3 @@ static int __init max8998_i2c_init(void)
}
/* init early so consumer devices can complete system boot */
subsys_initcall(max8998_i2c_init);
-
-static void __exit max8998_i2c_exit(void)
-{
- i2c_del_driver(&max8998_i2c_driver);
-}
-module_exit(max8998_i2c_exit);
-
-MODULE_DESCRIPTION("MAXIM 8998 multi-function core driver");
-MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c
index c30290f33..1aa74c4c3 100644
--- a/drivers/mfd/omap-usb-tll.c
+++ b/drivers/mfd/omap-usb-tll.c
@@ -30,6 +30,8 @@
#include <linux/platform_data/usb-omap.h>
#include <linux/of.h>
+#include "omap-usb.h"
+
#define USBTLL_DRIVER_NAME "usbhs_tll"
/* TLL Register Set */
diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c
index 880d4699b..2e4432345 100644
--- a/drivers/mfd/qcom_rpm.c
+++ b/drivers/mfd/qcom_rpm.c
@@ -72,8 +72,6 @@ struct qcom_rpm {
#define RPM_NOTIFICATION BIT(30)
#define RPM_REJECTED BIT(31)
-#define RPM_SIGNAL BIT(2)
-
static const struct qcom_rpm_resource apq8064_rpm_resource_table[] = {
[QCOM_RPM_CXO_CLK] = { 25, 9, 5, 1 },
[QCOM_RPM_PXO_CLK] = { 26, 10, 6, 1 },
diff --git a/drivers/mfd/rn5t618.c b/drivers/mfd/rn5t618.c
index 0ad51d792..ee94080e1 100644
--- a/drivers/mfd/rn5t618.c
+++ b/drivers/mfd/rn5t618.c
@@ -2,6 +2,7 @@
* MFD core driver for Ricoh RN5T618 PMIC
*
* Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com>
+ * Copyright (C) 2016 Toradex AG
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -11,10 +12,13 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/mfd/core.h>
#include <linux/mfd/rn5t618.h>
#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/reboot.h>
#include <linux/regmap.h>
static const struct mfd_cell rn5t618_cells[] = {
@@ -48,28 +52,64 @@ static const struct regmap_config rn5t618_regmap_config = {
};
static struct rn5t618 *rn5t618_pm_power_off;
+static struct notifier_block rn5t618_restart_handler;
-static void rn5t618_power_off(void)
+static void rn5t618_trigger_poweroff_sequence(bool repower)
{
/* disable automatic repower-on */
regmap_update_bits(rn5t618_pm_power_off->regmap, RN5T618_REPCNT,
- RN5T618_REPCNT_REPWRON, 0);
+ RN5T618_REPCNT_REPWRON,
+ repower ? RN5T618_REPCNT_REPWRON : 0);
/* start power-off sequence */
regmap_update_bits(rn5t618_pm_power_off->regmap, RN5T618_SLPCNT,
RN5T618_SLPCNT_SWPWROFF, RN5T618_SLPCNT_SWPWROFF);
}
+static void rn5t618_power_off(void)
+{
+ rn5t618_trigger_poweroff_sequence(false);
+}
+
+static int rn5t618_restart(struct notifier_block *this,
+ unsigned long mode, void *cmd)
+{
+ rn5t618_trigger_poweroff_sequence(true);
+
+ /*
+ * Re-power factor detection on PMIC side is not instant. 1ms
+ * proved to be enough time until reset takes effect.
+ */
+ mdelay(1);
+
+ return NOTIFY_DONE;
+}
+
+static const struct of_device_id rn5t618_of_match[] = {
+ { .compatible = "ricoh,rn5t567", .data = (void *)RN5T567 },
+ { .compatible = "ricoh,rn5t618", .data = (void *)RN5T618 },
+ { }
+};
+MODULE_DEVICE_TABLE(of, rn5t618_of_match);
+
static int rn5t618_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
+ const struct of_device_id *of_id;
struct rn5t618 *priv;
int ret;
+ of_id = of_match_device(rn5t618_of_match, &i2c->dev);
+ if (!of_id) {
+ dev_err(&i2c->dev, "Failed to find matching DT ID\n");
+ return -EINVAL;
+ }
+
priv = devm_kzalloc(&i2c->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
i2c_set_clientdata(i2c, priv);
+ priv->variant = (long)of_id->data;
priv->regmap = devm_regmap_init_i2c(i2c, &rn5t618_regmap_config);
if (IS_ERR(priv->regmap)) {
@@ -85,9 +125,21 @@ static int rn5t618_i2c_probe(struct i2c_client *i2c,
return ret;
}
- if (!pm_power_off) {
- rn5t618_pm_power_off = priv;
- pm_power_off = rn5t618_power_off;
+ rn5t618_pm_power_off = priv;
+ if (of_device_is_system_power_controller(i2c->dev.of_node)) {
+ if (!pm_power_off)
+ pm_power_off = rn5t618_power_off;
+ else
+ dev_warn(&i2c->dev, "Poweroff callback already assigned\n");
+ }
+
+ rn5t618_restart_handler.notifier_call = rn5t618_restart;
+ rn5t618_restart_handler.priority = 192;
+
+ ret = register_restart_handler(&rn5t618_restart_handler);
+ if (ret) {
+ dev_err(&i2c->dev, "cannot register restart handler, %d\n", ret);
+ return ret;
}
return 0;
@@ -105,12 +157,6 @@ static int rn5t618_i2c_remove(struct i2c_client *i2c)
return 0;
}
-static const struct of_device_id rn5t618_of_match[] = {
- { .compatible = "ricoh,rn5t618" },
- { }
-};
-MODULE_DEVICE_TABLE(of, rn5t618_of_match);
-
static const struct i2c_device_id rn5t618_i2c_id[] = {
{ }
};
@@ -129,5 +175,5 @@ static struct i2c_driver rn5t618_i2c_driver = {
module_i2c_driver(rn5t618_i2c_driver);
MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>");
-MODULE_DESCRIPTION("Ricoh RN5T618 MFD driver");
+MODULE_DESCRIPTION("Ricoh RN5T567/618 MFD driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/rtsx_usb.c b/drivers/mfd/rtsx_usb.c
index dbd907d71..691dab791 100644
--- a/drivers/mfd/rtsx_usb.c
+++ b/drivers/mfd/rtsx_usb.c
@@ -46,9 +46,6 @@ static void rtsx_usb_sg_timed_out(unsigned long data)
dev_dbg(&ucr->pusb_intf->dev, "%s: sg transfer timed out", __func__);
usb_sg_cancel(&ucr->current_sg);
-
- /* we know the cancellation is caused by time-out */
- ucr->current_sg.status = -ETIMEDOUT;
}
static int rtsx_usb_bulk_transfer_sglist(struct rtsx_ucr *ucr,
@@ -67,12 +64,15 @@ static int rtsx_usb_bulk_transfer_sglist(struct rtsx_ucr *ucr,
ucr->sg_timer.expires = jiffies + msecs_to_jiffies(timeout);
add_timer(&ucr->sg_timer);
usb_sg_wait(&ucr->current_sg);
- del_timer_sync(&ucr->sg_timer);
+ if (!del_timer_sync(&ucr->sg_timer))
+ ret = -ETIMEDOUT;
+ else
+ ret = ucr->current_sg.status;
if (act_len)
*act_len = ucr->current_sg.bytes;
- return ucr->current_sg.status;
+ return ret;
}
int rtsx_usb_transfer_data(struct rtsx_ucr *ucr, unsigned int pipe,
diff --git a/drivers/mfd/si476x-i2c.c b/drivers/mfd/si476x-i2c.c
index fb4ce6d04..c180b7533 100644
--- a/drivers/mfd/si476x-i2c.c
+++ b/drivers/mfd/si476x-i2c.c
@@ -600,7 +600,7 @@ static int si476x_core_fwver_to_revision(struct si476x_core *core,
unknown_revision:
dev_err(&core->client->dev,
"Unsupported version of the firmware: %d.%d.%d, "
- "reverting to A10 comptible functions\n",
+ "reverting to A10 compatible functions\n",
major, minor1, minor2);
return SI476X_REVISION_A10;
diff --git a/drivers/mfd/smsc-ece1099.c b/drivers/mfd/smsc-ece1099.c
index 7f89e89b8..cd18c0982 100644
--- a/drivers/mfd/smsc-ece1099.c
+++ b/drivers/mfd/smsc-ece1099.c
@@ -36,7 +36,7 @@ static int smsc_i2c_probe(struct i2c_client *i2c,
{
struct smsc *smsc;
int devid, rev, venid_l, venid_h;
- int ret = 0;
+ int ret;
smsc = devm_kzalloc(&i2c->dev, sizeof(struct smsc),
GFP_KERNEL);
@@ -46,10 +46,8 @@ static int smsc_i2c_probe(struct i2c_client *i2c,
}
smsc->regmap = devm_regmap_init_i2c(i2c, &smsc_regmap_config);
- if (IS_ERR(smsc->regmap)) {
- ret = PTR_ERR(smsc->regmap);
- goto err;
- }
+ if (IS_ERR(smsc->regmap))
+ return PTR_ERR(smsc->regmap);
i2c_set_clientdata(i2c, smsc);
smsc->dev = &i2c->dev;
@@ -68,7 +66,7 @@ static int smsc_i2c_probe(struct i2c_client *i2c,
ret = regmap_write(smsc->regmap, SMSC_CLK_CTRL, smsc->clk);
if (ret)
- goto err;
+ return ret;
#ifdef CONFIG_OF
if (i2c->dev.of_node)
@@ -76,7 +74,6 @@ static int smsc_i2c_probe(struct i2c_client *i2c,
NULL, NULL, &i2c->dev);
#endif
-err:
return ret;
}
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index fb8f9e8b7..94c7cc02f 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -23,6 +23,27 @@
#include <linux/regulator/consumer.h>
#include "stmpe.h"
+/**
+ * struct stmpe_platform_data - STMPE platform data
+ * @id: device id to distinguish between multiple STMPEs on the same board
+ * @blocks: bitmask of blocks to enable (use STMPE_BLOCK_*)
+ * @irq_trigger: IRQ trigger to use for the interrupt to the host
+ * @autosleep: bool to enable/disable stmpe autosleep
+ * @autosleep_timeout: inactivity timeout in milliseconds for autosleep
+ * @irq_over_gpio: true if gpio is used to get irq
+ * @irq_gpio: gpio number over which irq will be requested (significant only if
+ * irq_over_gpio is true)
+ */
+struct stmpe_platform_data {
+ int id;
+ unsigned int blocks;
+ unsigned int irq_trigger;
+ bool autosleep;
+ bool irq_over_gpio;
+ int irq_gpio;
+ int autosleep_timeout;
+};
+
static int __stmpe_enable(struct stmpe *stmpe, unsigned int blocks)
{
return stmpe->variant->enable(stmpe, blocks, true);
@@ -1187,24 +1208,19 @@ static void stmpe_of_probe(struct stmpe_platform_data *pdata,
/* Called from client specific probe routines */
int stmpe_probe(struct stmpe_client_info *ci, enum stmpe_partnum partnum)
{
- struct stmpe_platform_data *pdata = dev_get_platdata(ci->dev);
+ struct stmpe_platform_data *pdata;
struct device_node *np = ci->dev->of_node;
struct stmpe *stmpe;
int ret;
- if (!pdata) {
- if (!np)
- return -EINVAL;
-
- pdata = devm_kzalloc(ci->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
+ pdata = devm_kzalloc(ci->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
- stmpe_of_probe(pdata, np);
+ stmpe_of_probe(pdata, np);
- if (of_find_property(np, "interrupts", NULL) == NULL)
- ci->irq = -1;
- }
+ if (of_find_property(np, "interrupts", NULL) == NULL)
+ ci->irq = -1;
stmpe = devm_kzalloc(ci->dev, sizeof(struct stmpe), GFP_KERNEL);
if (!stmpe)
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
index e4e4b22ee..c8f027b4e 100644
--- a/drivers/mfd/ti_am335x_tscadc.c
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -27,20 +27,6 @@
#include <linux/mfd/ti_am335x_tscadc.h>
-static unsigned int tscadc_readl(struct ti_tscadc_dev *tsadc, unsigned int reg)
-{
- unsigned int val;
-
- regmap_read(tsadc->regmap_tscadc, reg, &val);
- return val;
-}
-
-static void tscadc_writel(struct ti_tscadc_dev *tsadc, unsigned int reg,
- unsigned int val)
-{
- regmap_write(tsadc->regmap_tscadc, reg, val);
-}
-
static const struct regmap_config tscadc_regmap_config = {
.name = "ti_tscadc",
.reg_bits = 32,
@@ -48,89 +34,89 @@ static const struct regmap_config tscadc_regmap_config = {
.val_bits = 32,
};
-void am335x_tsc_se_set_cache(struct ti_tscadc_dev *tsadc, u32 val)
+void am335x_tsc_se_set_cache(struct ti_tscadc_dev *tscadc, u32 val)
{
unsigned long flags;
- spin_lock_irqsave(&tsadc->reg_lock, flags);
- tsadc->reg_se_cache |= val;
- if (tsadc->adc_waiting)
- wake_up(&tsadc->reg_se_wait);
- else if (!tsadc->adc_in_use)
- tscadc_writel(tsadc, REG_SE, tsadc->reg_se_cache);
+ spin_lock_irqsave(&tscadc->reg_lock, flags);
+ tscadc->reg_se_cache |= val;
+ if (tscadc->adc_waiting)
+ wake_up(&tscadc->reg_se_wait);
+ else if (!tscadc->adc_in_use)
+ regmap_write(tscadc->regmap, REG_SE, tscadc->reg_se_cache);
- spin_unlock_irqrestore(&tsadc->reg_lock, flags);
+ spin_unlock_irqrestore(&tscadc->reg_lock, flags);
}
EXPORT_SYMBOL_GPL(am335x_tsc_se_set_cache);
-static void am335x_tscadc_need_adc(struct ti_tscadc_dev *tsadc)
+static void am335x_tscadc_need_adc(struct ti_tscadc_dev *tscadc)
{
DEFINE_WAIT(wait);
u32 reg;
- reg = tscadc_readl(tsadc, REG_ADCFSM);
+ regmap_read(tscadc->regmap, REG_ADCFSM, &reg);
if (reg & SEQ_STATUS) {
- tsadc->adc_waiting = true;
- prepare_to_wait(&tsadc->reg_se_wait, &wait,
+ tscadc->adc_waiting = true;
+ prepare_to_wait(&tscadc->reg_se_wait, &wait,
TASK_UNINTERRUPTIBLE);
- spin_unlock_irq(&tsadc->reg_lock);
+ spin_unlock_irq(&tscadc->reg_lock);
schedule();
- spin_lock_irq(&tsadc->reg_lock);
- finish_wait(&tsadc->reg_se_wait, &wait);
+ spin_lock_irq(&tscadc->reg_lock);
+ finish_wait(&tscadc->reg_se_wait, &wait);
/*
* Sequencer should either be idle or
* busy applying the charge step.
*/
- reg = tscadc_readl(tsadc, REG_ADCFSM);
+ regmap_read(tscadc->regmap, REG_ADCFSM, &reg);
WARN_ON((reg & SEQ_STATUS) && !(reg & CHARGE_STEP));
- tsadc->adc_waiting = false;
+ tscadc->adc_waiting = false;
}
- tsadc->adc_in_use = true;
+ tscadc->adc_in_use = true;
}
-void am335x_tsc_se_set_once(struct ti_tscadc_dev *tsadc, u32 val)
+void am335x_tsc_se_set_once(struct ti_tscadc_dev *tscadc, u32 val)
{
- spin_lock_irq(&tsadc->reg_lock);
- am335x_tscadc_need_adc(tsadc);
+ spin_lock_irq(&tscadc->reg_lock);
+ am335x_tscadc_need_adc(tscadc);
- tscadc_writel(tsadc, REG_SE, val);
- spin_unlock_irq(&tsadc->reg_lock);
+ regmap_write(tscadc->regmap, REG_SE, val);
+ spin_unlock_irq(&tscadc->reg_lock);
}
EXPORT_SYMBOL_GPL(am335x_tsc_se_set_once);
-void am335x_tsc_se_adc_done(struct ti_tscadc_dev *tsadc)
+void am335x_tsc_se_adc_done(struct ti_tscadc_dev *tscadc)
{
unsigned long flags;
- spin_lock_irqsave(&tsadc->reg_lock, flags);
- tsadc->adc_in_use = false;
- tscadc_writel(tsadc, REG_SE, tsadc->reg_se_cache);
- spin_unlock_irqrestore(&tsadc->reg_lock, flags);
+ spin_lock_irqsave(&tscadc->reg_lock, flags);
+ tscadc->adc_in_use = false;
+ regmap_write(tscadc->regmap, REG_SE, tscadc->reg_se_cache);
+ spin_unlock_irqrestore(&tscadc->reg_lock, flags);
}
EXPORT_SYMBOL_GPL(am335x_tsc_se_adc_done);
-void am335x_tsc_se_clr(struct ti_tscadc_dev *tsadc, u32 val)
+void am335x_tsc_se_clr(struct ti_tscadc_dev *tscadc, u32 val)
{
unsigned long flags;
- spin_lock_irqsave(&tsadc->reg_lock, flags);
- tsadc->reg_se_cache &= ~val;
- tscadc_writel(tsadc, REG_SE, tsadc->reg_se_cache);
- spin_unlock_irqrestore(&tsadc->reg_lock, flags);
+ spin_lock_irqsave(&tscadc->reg_lock, flags);
+ tscadc->reg_se_cache &= ~val;
+ regmap_write(tscadc->regmap, REG_SE, tscadc->reg_se_cache);
+ spin_unlock_irqrestore(&tscadc->reg_lock, flags);
}
EXPORT_SYMBOL_GPL(am335x_tsc_se_clr);
-static void tscadc_idle_config(struct ti_tscadc_dev *config)
+static void tscadc_idle_config(struct ti_tscadc_dev *tscadc)
{
unsigned int idleconfig;
idleconfig = STEPCONFIG_YNN | STEPCONFIG_INM_ADCREFM |
STEPCONFIG_INP_ADCREFM | STEPCONFIG_YPN;
- tscadc_writel(config, REG_IDLECONFIG, idleconfig);
+ regmap_write(tscadc->regmap, REG_IDLECONFIG, idleconfig);
}
static int ti_tscadc_probe(struct platform_device *pdev)
@@ -182,8 +168,7 @@ static int ti_tscadc_probe(struct platform_device *pdev)
}
/* Allocate memory for device */
- tscadc = devm_kzalloc(&pdev->dev,
- sizeof(struct ti_tscadc_dev), GFP_KERNEL);
+ tscadc = devm_kzalloc(&pdev->dev, sizeof(*tscadc), GFP_KERNEL);
if (!tscadc) {
dev_err(&pdev->dev, "failed to allocate memory.\n");
return -ENOMEM;
@@ -202,11 +187,11 @@ static int ti_tscadc_probe(struct platform_device *pdev)
if (IS_ERR(tscadc->tscadc_base))
return PTR_ERR(tscadc->tscadc_base);
- tscadc->regmap_tscadc = devm_regmap_init_mmio(&pdev->dev,
+ tscadc->regmap = devm_regmap_init_mmio(&pdev->dev,
tscadc->tscadc_base, &tscadc_regmap_config);
- if (IS_ERR(tscadc->regmap_tscadc)) {
+ if (IS_ERR(tscadc->regmap)) {
dev_err(&pdev->dev, "regmap init failed\n");
- err = PTR_ERR(tscadc->regmap_tscadc);
+ err = PTR_ERR(tscadc->regmap);
goto ret;
}
@@ -236,11 +221,11 @@ static int ti_tscadc_probe(struct platform_device *pdev)
/* TSCADC_CLKDIV needs to be configured to the value minus 1 */
tscadc->clk_div--;
- tscadc_writel(tscadc, REG_CLKDIV, tscadc->clk_div);
+ regmap_write(tscadc->regmap, REG_CLKDIV, tscadc->clk_div);
/* Set the control register bits */
ctrl = CNTRLREG_STEPCONFIGWRT | CNTRLREG_STEPID;
- tscadc_writel(tscadc, REG_CTRL, ctrl);
+ regmap_write(tscadc->regmap, REG_CTRL, ctrl);
/* Set register bits for Idle Config Mode */
if (tsc_wires > 0) {
@@ -254,7 +239,7 @@ static int ti_tscadc_probe(struct platform_device *pdev)
/* Enable the TSC module enable bit */
ctrl |= CNTRLREG_TSCSSENB;
- tscadc_writel(tscadc, REG_CTRL, ctrl);
+ regmap_write(tscadc->regmap, REG_CTRL, ctrl);
tscadc->used_cells = 0;
tscadc->tsc_cell = -1;
@@ -300,7 +285,7 @@ static int ti_tscadc_remove(struct platform_device *pdev)
{
struct ti_tscadc_dev *tscadc = platform_get_drvdata(pdev);
- tscadc_writel(tscadc, REG_SE, 0x00);
+ regmap_write(tscadc->regmap, REG_SE, 0x00);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -310,51 +295,43 @@ static int ti_tscadc_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int tscadc_suspend(struct device *dev)
+static int __maybe_unused tscadc_suspend(struct device *dev)
{
- struct ti_tscadc_dev *tscadc_dev = dev_get_drvdata(dev);
+ struct ti_tscadc_dev *tscadc = dev_get_drvdata(dev);
- tscadc_writel(tscadc_dev, REG_SE, 0x00);
+ regmap_write(tscadc->regmap, REG_SE, 0x00);
pm_runtime_put_sync(dev);
return 0;
}
-static int tscadc_resume(struct device *dev)
+static int __maybe_unused tscadc_resume(struct device *dev)
{
- struct ti_tscadc_dev *tscadc_dev = dev_get_drvdata(dev);
+ struct ti_tscadc_dev *tscadc = dev_get_drvdata(dev);
u32 ctrl;
pm_runtime_get_sync(dev);
/* context restore */
ctrl = CNTRLREG_STEPCONFIGWRT | CNTRLREG_STEPID;
- tscadc_writel(tscadc_dev, REG_CTRL, ctrl);
+ regmap_write(tscadc->regmap, REG_CTRL, ctrl);
- if (tscadc_dev->tsc_cell != -1) {
- if (tscadc_dev->tsc_wires == 5)
+ if (tscadc->tsc_cell != -1) {
+ if (tscadc->tsc_wires == 5)
ctrl |= CNTRLREG_5WIRE | CNTRLREG_TSCENB;
else
ctrl |= CNTRLREG_4WIRE | CNTRLREG_TSCENB;
- tscadc_idle_config(tscadc_dev);
+ tscadc_idle_config(tscadc);
}
ctrl |= CNTRLREG_TSCSSENB;
- tscadc_writel(tscadc_dev, REG_CTRL, ctrl);
+ regmap_write(tscadc->regmap, REG_CTRL, ctrl);
- tscadc_writel(tscadc_dev, REG_CLKDIV, tscadc_dev->clk_div);
+ regmap_write(tscadc->regmap, REG_CLKDIV, tscadc->clk_div);
return 0;
}
-static const struct dev_pm_ops tscadc_pm_ops = {
- .suspend = tscadc_suspend,
- .resume = tscadc_resume,
-};
-#define TSCADC_PM_OPS (&tscadc_pm_ops)
-#else
-#define TSCADC_PM_OPS NULL
-#endif
+static SIMPLE_DEV_PM_OPS(tscadc_pm_ops, tscadc_suspend, tscadc_resume);
static const struct of_device_id ti_tscadc_dt_ids[] = {
{ .compatible = "ti,am3359-tscadc", },
@@ -365,7 +342,7 @@ MODULE_DEVICE_TABLE(of, ti_tscadc_dt_ids);
static struct platform_driver ti_tscadc_driver = {
.driver = {
.name = "ti_am3359-tscadc",
- .pm = TSCADC_PM_OPS,
+ .pm = &tscadc_pm_ops,
.of_match_table = ti_tscadc_dt_ids,
},
.probe = ti_tscadc_probe,
diff --git a/drivers/mfd/tps6507x.c b/drivers/mfd/tps6507x.c
index 40beb2f43..1f308c4e3 100644
--- a/drivers/mfd/tps6507x.c
+++ b/drivers/mfd/tps6507x.c
@@ -105,8 +105,8 @@ static int tps6507x_i2c_probe(struct i2c_client *i2c,
}
static const struct i2c_device_id tps6507x_i2c_id[] = {
- { "tps6507x", 0 },
- { }
+ { "tps6507x", 0 },
+ { }
};
MODULE_DEVICE_TABLE(i2c, tps6507x_i2c_id);
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 831696ee2..a49d3db6d 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -622,11 +622,8 @@ add_numbered_child(unsigned mod_no, const char *name, int num,
twl = &twl_priv->twl_modules[sid];
pdev = platform_device_alloc(name, num);
- if (!pdev) {
- dev_dbg(&twl->client->dev, "can't alloc dev\n");
- status = -ENOMEM;
- goto err;
- }
+ if (!pdev)
+ return ERR_PTR(-ENOMEM);
pdev->dev.parent = &twl->client->dev;
@@ -634,7 +631,7 @@ add_numbered_child(unsigned mod_no, const char *name, int num,
status = platform_device_add_data(pdev, pdata, pdata_len);
if (status < 0) {
dev_dbg(&pdev->dev, "can't add platform_data\n");
- goto err;
+ goto put_device;
}
}
@@ -647,21 +644,22 @@ add_numbered_child(unsigned mod_no, const char *name, int num,
status = platform_device_add_resources(pdev, r, irq1 ? 2 : 1);
if (status < 0) {
dev_dbg(&pdev->dev, "can't add irqs\n");
- goto err;
+ goto put_device;
}
}
status = platform_device_add(pdev);
- if (status == 0)
- device_init_wakeup(&pdev->dev, can_wakeup);
+ if (status)
+ goto put_device;
+
+ device_init_wakeup(&pdev->dev, can_wakeup);
-err:
- if (status < 0) {
- platform_device_put(pdev);
- dev_err(&twl->client->dev, "can't add %s dev\n", name);
- return ERR_PTR(status);
- }
return &pdev->dev;
+
+put_device:
+ platform_device_put(pdev);
+ dev_err(&twl->client->dev, "failed to add device %s\n", name);
+ return ERR_PTR(status);
}
static inline struct device *add_child(unsigned mod_no, const char *name,
diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
index 852d5874a..ab328ec49 100644
--- a/drivers/mfd/twl6040.c
+++ b/drivers/mfd/twl6040.c
@@ -323,8 +323,7 @@ int twl6040_power(struct twl6040 *twl6040, int on)
/* Default PLL configuration after power up */
twl6040->pll = TWL6040_SYSCLK_SEL_LPPLL;
- twl6040->sysclk = 19200000;
- twl6040->mclk = 32768;
+ twl6040->sysclk_rate = 19200000;
} else {
/* already powered-down */
if (!twl6040->power_count) {
@@ -352,8 +351,12 @@ int twl6040_power(struct twl6040 *twl6040, int on)
regcache_cache_only(twl6040->regmap, true);
regcache_mark_dirty(twl6040->regmap);
- twl6040->sysclk = 0;
- twl6040->mclk = 0;
+ twl6040->sysclk_rate = 0;
+
+ if (twl6040->pll == TWL6040_SYSCLK_SEL_HPPLL) {
+ clk_disable_unprepare(twl6040->mclk);
+ twl6040->mclk_rate = 0;
+ }
clk_disable_unprepare(twl6040->clk32k);
}
@@ -377,15 +380,15 @@ int twl6040_set_pll(struct twl6040 *twl6040, int pll_id,
/* Force full reconfiguration when switching between PLL */
if (pll_id != twl6040->pll) {
- twl6040->sysclk = 0;
- twl6040->mclk = 0;
+ twl6040->sysclk_rate = 0;
+ twl6040->mclk_rate = 0;
}
switch (pll_id) {
case TWL6040_SYSCLK_SEL_LPPLL:
/* low-power PLL divider */
/* Change the sysclk configuration only if it has been canged */
- if (twl6040->sysclk != freq_out) {
+ if (twl6040->sysclk_rate != freq_out) {
switch (freq_out) {
case 17640000:
lppllctl |= TWL6040_LPLLFIN;
@@ -427,6 +430,8 @@ int twl6040_set_pll(struct twl6040 *twl6040, int pll_id,
ret = -EINVAL;
goto pll_out;
}
+
+ clk_disable_unprepare(twl6040->mclk);
break;
case TWL6040_SYSCLK_SEL_HPPLL:
/* high-performance PLL can provide only 19.2 MHz */
@@ -437,7 +442,7 @@ int twl6040_set_pll(struct twl6040 *twl6040, int pll_id,
goto pll_out;
}
- if (twl6040->mclk != freq_in) {
+ if (twl6040->mclk_rate != freq_in) {
hppllctl &= ~TWL6040_MCLK_MSK;
switch (freq_in) {
@@ -468,6 +473,9 @@ int twl6040_set_pll(struct twl6040 *twl6040, int pll_id,
goto pll_out;
}
+ /* When switching to HPPLL, enable the mclk first */
+ if (pll_id != twl6040->pll)
+ clk_prepare_enable(twl6040->mclk);
/*
* enable clock slicer to ensure input waveform is
* square
@@ -483,6 +491,8 @@ int twl6040_set_pll(struct twl6040 *twl6040, int pll_id,
lppllctl &= ~TWL6040_LPLLENA;
twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL,
lppllctl);
+
+ twl6040->mclk_rate = freq_in;
}
break;
default:
@@ -491,8 +501,7 @@ int twl6040_set_pll(struct twl6040 *twl6040, int pll_id,
goto pll_out;
}
- twl6040->sysclk = freq_out;
- twl6040->mclk = freq_in;
+ twl6040->sysclk_rate = freq_out;
twl6040->pll = pll_id;
pll_out:
@@ -512,7 +521,7 @@ EXPORT_SYMBOL(twl6040_get_pll);
unsigned int twl6040_get_sysclk(struct twl6040 *twl6040)
{
- return twl6040->sysclk;
+ return twl6040->sysclk_rate;
}
EXPORT_SYMBOL(twl6040_get_sysclk);
@@ -655,10 +664,18 @@ static int twl6040_probe(struct i2c_client *client,
if (IS_ERR(twl6040->clk32k)) {
if (PTR_ERR(twl6040->clk32k) == -EPROBE_DEFER)
return -EPROBE_DEFER;
- dev_info(&client->dev, "clk32k is not handled\n");
+ dev_dbg(&client->dev, "clk32k is not handled\n");
twl6040->clk32k = NULL;
}
+ twl6040->mclk = devm_clk_get(&client->dev, "mclk");
+ if (IS_ERR(twl6040->mclk)) {
+ if (PTR_ERR(twl6040->mclk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_dbg(&client->dev, "mclk is not handled\n");
+ twl6040->mclk = NULL;
+ }
+
twl6040->supplies[0].supply = "vio";
twl6040->supplies[1].supply = "v2v1";
ret = devm_regulator_bulk_get(&client->dev, TWL6040_NUM_SUPPLIES,
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index a216b4667..d00252828 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -345,16 +345,6 @@ config SENSORS_TSL2550
This driver can also be built as a module. If so, the module
will be called tsl2550.
-config SENSORS_BH1780
- tristate "ROHM BH1780GLI ambient light sensor"
- depends on I2C && SYSFS
- help
- If you say yes here you get support for the ROHM BH1780GLI
- ambient light sensor.
-
- This driver can also be built as a module. If so, the module
- will be called bh1780gli.
-
config SENSORS_BH1770
tristate "BH1770GLC / SFH7770 combined ALS - Proximity sensor"
depends on I2C
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index b2fb6dbff..fb32516dd 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -19,7 +19,6 @@ obj-$(CONFIG_TIFM_CORE) += tifm_core.o
obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o
obj-$(CONFIG_PHANTOM) += phantom.o
obj-$(CONFIG_QCOM_COINCELL) += qcom-coincell.o
-obj-$(CONFIG_SENSORS_BH1780) += bh1780gli.o
obj-$(CONFIG_SENSORS_BH1770) += bh1770glc.o
obj-$(CONFIG_SENSORS_APDS990X) += apds990x.o
obj-$(CONFIG_SGI_IOC4) += ioc4.o
@@ -57,3 +56,18 @@ obj-$(CONFIG_ECHO) += echo/
obj-$(CONFIG_VEXPRESS_SYSCFG) += vexpress-syscfg.o
obj-$(CONFIG_CXL_BASE) += cxl/
obj-$(CONFIG_PANEL) += panel.o
+
+lkdtm-$(CONFIG_LKDTM) += lkdtm_core.o
+lkdtm-$(CONFIG_LKDTM) += lkdtm_bugs.o
+lkdtm-$(CONFIG_LKDTM) += lkdtm_heap.o
+lkdtm-$(CONFIG_LKDTM) += lkdtm_perms.o
+lkdtm-$(CONFIG_LKDTM) += lkdtm_rodata_objcopy.o
+lkdtm-$(CONFIG_LKDTM) += lkdtm_usercopy.o
+
+OBJCOPYFLAGS :=
+OBJCOPYFLAGS_lkdtm_rodata_objcopy.o := \
+ --set-section-flags .text=alloc,readonly \
+ --rename-section .text=.rodata
+targets += lkdtm_rodata.o lkdtm_rodata_objcopy.o
+$(obj)/lkdtm_rodata_objcopy.o: $(obj)/lkdtm_rodata.o FORCE
+ $(call if_changed,objcopy)
diff --git a/drivers/misc/cxl/Kconfig b/drivers/misc/cxl/Kconfig
index 8756d06e2..b75cf830d 100644
--- a/drivers/misc/cxl/Kconfig
+++ b/drivers/misc/cxl/Kconfig
@@ -7,11 +7,7 @@ config CXL_BASE
default n
select PPC_COPRO_BASE
-config CXL_KERNEL_API
- bool
- default n
-
-config CXL_EEH
+config CXL_AFU_DRIVER_OPS
bool
default n
@@ -19,8 +15,7 @@ config CXL
tristate "Support for IBM Coherent Accelerators (CXL)"
depends on PPC_POWERNV && PCI_MSI && EEH
select CXL_BASE
- select CXL_KERNEL_API
- select CXL_EEH
+ select CXL_AFU_DRIVER_OPS
default m
help
Select this option to enable driver support for IBM Coherent
@@ -33,3 +28,11 @@ config CXL
CAPI adapters are found in POWER8 based systems.
If unsure, say N.
+
+config CXL_BIMODAL
+ bool "Support for bi-modal CAPI cards"
+ depends on HOTPLUG_PCI_POWERNV = y && CXL || HOTPLUG_PCI_POWERNV = m && CXL = m
+ default y
+ help
+ Select this option to enable support for bi-modal CAPI cards, such as
+ the Mellanox CX-4.
diff --git a/drivers/misc/cxl/Makefile b/drivers/misc/cxl/Makefile
index 8a55c1aa1..56e9a4732 100644
--- a/drivers/misc/cxl/Makefile
+++ b/drivers/misc/cxl/Makefile
@@ -3,7 +3,7 @@ ccflags-$(CONFIG_PPC_WERROR) += -Werror
cxl-y += main.o file.o irq.o fault.o native.o
cxl-y += context.o sysfs.o debugfs.o pci.o trace.o
-cxl-y += vphb.o api.o
+cxl-y += vphb.o phb.o api.o
cxl-$(CONFIG_PPC_PSERIES) += flash.o guest.o of.o hcalls.o
obj-$(CONFIG_CXL) += cxl.o
obj-$(CONFIG_CXL_BASE) += base.o
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index 6d228ccd8..f3d34b941 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -13,6 +13,8 @@
#include <linux/file.h>
#include <misc/cxl.h>
#include <linux/fs.h>
+#include <asm/pnv-pci.h>
+#include <linux/msi.h>
#include "cxl.h"
@@ -24,6 +26,8 @@ struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
int rc;
afu = cxl_pci_to_afu(dev);
+ if (IS_ERR(afu))
+ return ERR_CAST(afu);
ctx = cxl_context_alloc();
if (IS_ERR(ctx)) {
@@ -94,6 +98,42 @@ static irq_hw_number_t cxl_find_afu_irq(struct cxl_context *ctx, int num)
return 0;
}
+int _cxl_next_msi_hwirq(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq)
+{
+ if (*ctx == NULL || *afu_irq == 0) {
+ *afu_irq = 1;
+ *ctx = cxl_get_context(pdev);
+ } else {
+ (*afu_irq)++;
+ if (*afu_irq > cxl_get_max_irqs_per_process(pdev)) {
+ *ctx = list_next_entry(*ctx, extra_irq_contexts);
+ *afu_irq = 1;
+ }
+ }
+ return cxl_find_afu_irq(*ctx, *afu_irq);
+}
+/* Exported via cxl_base */
+
+int cxl_set_priv(struct cxl_context *ctx, void *priv)
+{
+ if (!ctx)
+ return -EINVAL;
+
+ ctx->priv = priv;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cxl_set_priv);
+
+void *cxl_get_priv(struct cxl_context *ctx)
+{
+ if (!ctx)
+ return ERR_PTR(-EINVAL);
+
+ return ctx->priv;
+}
+EXPORT_SYMBOL_GPL(cxl_get_priv);
+
int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num)
{
int res;
@@ -102,7 +142,10 @@ int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num)
if (num == 0)
num = ctx->afu->pp_irqs;
res = afu_allocate_irqs(ctx, num);
- if (!res && !cpu_has_feature(CPU_FTR_HVMODE)) {
+ if (res)
+ return res;
+
+ if (!cpu_has_feature(CPU_FTR_HVMODE)) {
/* In a guest, the PSL interrupt is not multiplexed. It was
* allocated above, and we need to set its handler
*/
@@ -110,6 +153,13 @@ int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num)
if (hwirq)
cxl_map_irq(ctx->afu->adapter, hwirq, cxl_ops->psl_interrupt, ctx, "psl");
}
+
+ if (ctx->status == STARTED) {
+ if (cxl_ops->update_ivtes)
+ cxl_ops->update_ivtes(ctx);
+ else WARN(1, "BUG: cxl_allocate_afu_irqs must be called prior to starting the context on this platform\n");
+ }
+
return res;
}
EXPORT_SYMBOL_GPL(cxl_allocate_afu_irqs);
@@ -323,6 +373,23 @@ struct cxl_context *cxl_fops_get_context(struct file *file)
}
EXPORT_SYMBOL_GPL(cxl_fops_get_context);
+void cxl_set_driver_ops(struct cxl_context *ctx,
+ struct cxl_afu_driver_ops *ops)
+{
+ WARN_ON(!ops->fetch_event || !ops->event_delivered);
+ atomic_set(&ctx->afu_driver_events, 0);
+ ctx->afu_driver_ops = ops;
+}
+EXPORT_SYMBOL_GPL(cxl_set_driver_ops);
+
+void cxl_context_events_pending(struct cxl_context *ctx,
+ unsigned int new_events)
+{
+ atomic_add(new_events, &ctx->afu_driver_events);
+ wake_up_all(&ctx->wq);
+}
+EXPORT_SYMBOL_GPL(cxl_context_events_pending);
+
int cxl_start_work(struct cxl_context *ctx,
struct cxl_ioctl_start_work *work)
{
@@ -390,7 +457,106 @@ EXPORT_SYMBOL_GPL(cxl_perst_reloads_same_image);
ssize_t cxl_read_adapter_vpd(struct pci_dev *dev, void *buf, size_t count)
{
struct cxl_afu *afu = cxl_pci_to_afu(dev);
+ if (IS_ERR(afu))
+ return -ENODEV;
return cxl_ops->read_adapter_vpd(afu->adapter, buf, count);
}
EXPORT_SYMBOL_GPL(cxl_read_adapter_vpd);
+
+int cxl_set_max_irqs_per_process(struct pci_dev *dev, int irqs)
+{
+ struct cxl_afu *afu = cxl_pci_to_afu(dev);
+ if (IS_ERR(afu))
+ return -ENODEV;
+
+ if (irqs > afu->adapter->user_irqs)
+ return -EINVAL;
+
+ /* Limit user_irqs to prevent the user increasing this via sysfs */
+ afu->adapter->user_irqs = irqs;
+ afu->irqs_max = irqs;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cxl_set_max_irqs_per_process);
+
+int cxl_get_max_irqs_per_process(struct pci_dev *dev)
+{
+ struct cxl_afu *afu = cxl_pci_to_afu(dev);
+ if (IS_ERR(afu))
+ return -ENODEV;
+
+ return afu->irqs_max;
+}
+EXPORT_SYMBOL_GPL(cxl_get_max_irqs_per_process);
+
+/*
+ * This is a special interrupt allocation routine called from the PHB's MSI
+ * setup function. When capi interrupts are allocated in this manner they must
+ * still be associated with a running context, but since the MSI APIs have no
+ * way to specify this we use the default context associated with the device.
+ *
+ * The Mellanox CX4 has a hardware limitation that restricts the maximum AFU
+ * interrupt number, so in order to overcome this their driver informs us of
+ * the restriction by setting the maximum interrupts per context, and we
+ * allocate additional contexts as necessary so that we can keep the AFU
+ * interrupt number within the supported range.
+ */
+int _cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
+{
+ struct cxl_context *ctx, *new_ctx, *default_ctx;
+ int remaining;
+ int rc;
+
+ ctx = default_ctx = cxl_get_context(pdev);
+ if (WARN_ON(!default_ctx))
+ return -ENODEV;
+
+ remaining = nvec;
+ while (remaining > 0) {
+ rc = cxl_allocate_afu_irqs(ctx, min(remaining, ctx->afu->irqs_max));
+ if (rc) {
+ pr_warn("%s: Failed to find enough free MSIs\n", pci_name(pdev));
+ return rc;
+ }
+ remaining -= ctx->afu->irqs_max;
+
+ if (ctx != default_ctx && default_ctx->status == STARTED) {
+ WARN_ON(cxl_start_context(ctx,
+ be64_to_cpu(default_ctx->elem->common.wed),
+ NULL));
+ }
+
+ if (remaining > 0) {
+ new_ctx = cxl_dev_context_init(pdev);
+ if (!new_ctx) {
+ pr_warn("%s: Failed to allocate enough contexts for MSIs\n", pci_name(pdev));
+ return -ENOSPC;
+ }
+ list_add(&new_ctx->extra_irq_contexts, &ctx->extra_irq_contexts);
+ ctx = new_ctx;
+ }
+ }
+
+ return 0;
+}
+/* Exported via cxl_base */
+
+void _cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev)
+{
+ struct cxl_context *ctx, *pos, *tmp;
+
+ ctx = cxl_get_context(pdev);
+ if (WARN_ON(!ctx))
+ return;
+
+ cxl_free_afu_irqs(ctx);
+ list_for_each_entry_safe(pos, tmp, &ctx->extra_irq_contexts, extra_irq_contexts) {
+ cxl_stop_context(pos);
+ cxl_free_afu_irqs(pos);
+ list_del(&pos->extra_irq_contexts);
+ cxl_release_context(pos);
+ }
+}
+/* Exported via cxl_base */
diff --git a/drivers/misc/cxl/base.c b/drivers/misc/cxl/base.c
index 9b90ec6c0..cd54ce6f6 100644
--- a/drivers/misc/cxl/base.c
+++ b/drivers/misc/cxl/base.c
@@ -54,6 +54,19 @@ static inline void cxl_calls_put(struct cxl_calls *calls) { }
#endif /* CONFIG_CXL_MODULE */
+/* AFU refcount management */
+struct cxl_afu *cxl_afu_get(struct cxl_afu *afu)
+{
+ return (get_device(&afu->dev) == NULL) ? NULL : afu;
+}
+EXPORT_SYMBOL_GPL(cxl_afu_get);
+
+void cxl_afu_put(struct cxl_afu *afu)
+{
+ put_device(&afu->dev);
+}
+EXPORT_SYMBOL_GPL(cxl_afu_put);
+
void cxl_slbia(struct mm_struct *mm)
{
struct cxl_calls *calls;
@@ -93,9 +106,92 @@ int cxl_update_properties(struct device_node *dn,
}
EXPORT_SYMBOL_GPL(cxl_update_properties);
+/*
+ * API calls into the driver that may be called from the PHB code and must be
+ * built in.
+ */
+bool cxl_pci_associate_default_context(struct pci_dev *dev, struct cxl_afu *afu)
+{
+ bool ret;
+ struct cxl_calls *calls;
+
+ calls = cxl_calls_get();
+ if (!calls)
+ return false;
+
+ ret = calls->cxl_pci_associate_default_context(dev, afu);
+
+ cxl_calls_put(calls);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cxl_pci_associate_default_context);
+
+void cxl_pci_disable_device(struct pci_dev *dev)
+{
+ struct cxl_calls *calls;
+
+ calls = cxl_calls_get();
+ if (!calls)
+ return;
+
+ calls->cxl_pci_disable_device(dev);
+
+ cxl_calls_put(calls);
+}
+EXPORT_SYMBOL_GPL(cxl_pci_disable_device);
+
+int cxl_next_msi_hwirq(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq)
+{
+ int ret;
+ struct cxl_calls *calls;
+
+ calls = cxl_calls_get();
+ if (!calls)
+ return -EBUSY;
+
+ ret = calls->cxl_next_msi_hwirq(pdev, ctx, afu_irq);
+
+ cxl_calls_put(calls);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cxl_next_msi_hwirq);
+
+int cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
+{
+ int ret;
+ struct cxl_calls *calls;
+
+ calls = cxl_calls_get();
+ if (!calls)
+ return false;
+
+ ret = calls->cxl_cx4_setup_msi_irqs(pdev, nvec, type);
+
+ cxl_calls_put(calls);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cxl_cx4_setup_msi_irqs);
+
+void cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev)
+{
+ struct cxl_calls *calls;
+
+ calls = cxl_calls_get();
+ if (!calls)
+ return;
+
+ calls->cxl_cx4_teardown_msi_irqs(pdev);
+
+ cxl_calls_put(calls);
+}
+EXPORT_SYMBOL_GPL(cxl_cx4_teardown_msi_irqs);
+
static int __init cxl_base_init(void)
{
- struct device_node *np = NULL;
+ struct device_node *np;
struct platform_device *dev;
int count = 0;
@@ -105,8 +201,7 @@ static int __init cxl_base_init(void)
if (cpu_has_feature(CPU_FTR_HVMODE))
return 0;
- while ((np = of_find_compatible_node(np, NULL,
- "ibm,coherent-platform-facility"))) {
+ for_each_compatible_node(np, NULL, "ibm,coherent-platform-facility") {
dev = of_platform_device_create(np, NULL, NULL);
if (dev)
count++;
@@ -114,5 +209,4 @@ static int __init cxl_base_init(void)
pr_devel("Found %d cxl device(s)\n", count);
return 0;
}
-
-module_init(cxl_base_init);
+device_initcall(cxl_base_init);
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 26d206b1d..c466ee2b0 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -67,6 +67,9 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
ctx->pending_fault = false;
ctx->pending_afu_err = false;
+ INIT_LIST_HEAD(&ctx->irq_names);
+ INIT_LIST_HEAD(&ctx->extra_irq_contexts);
+
/*
* When we have to destroy all contexts in cxl_context_detach_all() we
* end up with afu_release_irqs() called from inside a
@@ -87,7 +90,7 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
*/
mutex_lock(&afu->contexts_lock);
idr_preload(GFP_KERNEL);
- i = idr_alloc(&ctx->afu->contexts_idr, ctx, 0,
+ i = idr_alloc(&ctx->afu->contexts_idr, ctx, ctx->afu->adapter->min_pe,
ctx->afu->num_procs, GFP_NOWAIT);
idr_preload_end();
mutex_unlock(&afu->contexts_lock);
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index 4fe50788f..344a0ff8f 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -24,6 +24,7 @@
#include <asm/reg.h>
#include <misc/cxl-base.h>
+#include <misc/cxl.h>
#include <uapi/misc/cxl.h>
extern uint cxl_verbose;
@@ -34,7 +35,7 @@ extern uint cxl_verbose;
* Bump version each time a user API change is made, whether it is
* backwards compatible ot not.
*/
-#define CXL_API_VERSION 2
+#define CXL_API_VERSION 3
#define CXL_API_VERSION_COMPATIBLE 1
/*
@@ -81,6 +82,7 @@ static const cxl_p1_reg_t CXL_PSL_TLBIA = {0x00A8};
static const cxl_p1_reg_t CXL_PSL_AFUSEL = {0x00B0};
/* 0x00C0:7EFF Implementation dependent area */
+/* PSL registers */
static const cxl_p1_reg_t CXL_PSL_FIR1 = {0x0100};
static const cxl_p1_reg_t CXL_PSL_FIR2 = {0x0108};
static const cxl_p1_reg_t CXL_PSL_Timebase = {0x0110};
@@ -91,6 +93,11 @@ static const cxl_p1_reg_t CXL_PSL_FIR_CNTL = {0x0148};
static const cxl_p1_reg_t CXL_PSL_DSNDCTL = {0x0150};
static const cxl_p1_reg_t CXL_PSL_SNWRALLOC = {0x0158};
static const cxl_p1_reg_t CXL_PSL_TRACE = {0x0170};
+/* XSL registers (Mellanox CX4) */
+static const cxl_p1_reg_t CXL_XSL_Timebase = {0x0100};
+static const cxl_p1_reg_t CXL_XSL_TB_CTLSTAT = {0x0108};
+static const cxl_p1_reg_t CXL_XSL_FEC = {0x0158};
+static const cxl_p1_reg_t CXL_XSL_DSNCTL = {0x0168};
/* 0x7F00:7FFF Reserved PCIe MSI-X Pending Bit Array area */
/* 0x8000:FFFF Reserved PCIe MSI-X Table Area */
@@ -182,6 +189,18 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0};
#define CXL_PSL_ID_An_F (1ull << (63-31))
#define CXL_PSL_ID_An_L (1ull << (63-30))
+/****** CXL_PSL_SERR_An ****************************************************/
+#define CXL_PSL_SERR_An_afuto (1ull << (63-0))
+#define CXL_PSL_SERR_An_afudis (1ull << (63-1))
+#define CXL_PSL_SERR_An_afuov (1ull << (63-2))
+#define CXL_PSL_SERR_An_badsrc (1ull << (63-3))
+#define CXL_PSL_SERR_An_badctx (1ull << (63-4))
+#define CXL_PSL_SERR_An_llcmdis (1ull << (63-5))
+#define CXL_PSL_SERR_An_llcmdto (1ull << (63-6))
+#define CXL_PSL_SERR_An_afupar (1ull << (63-7))
+#define CXL_PSL_SERR_An_afudup (1ull << (63-8))
+#define CXL_PSL_SERR_An_AE (1ull << (63-30))
+
/****** CXL_PSL_SCNTL_An ****************************************************/
#define CXL_PSL_SCNTL_An_CR (0x1ull << (63-15))
/* Programming Modes: */
@@ -421,18 +440,6 @@ struct cxl_afu {
bool enabled;
};
-/* AFU refcount management */
-static inline struct cxl_afu *cxl_afu_get(struct cxl_afu *afu)
-{
-
- return (get_device(&afu->dev) == NULL) ? NULL : afu;
-}
-
-static inline void cxl_afu_put(struct cxl_afu *afu)
-{
- put_device(&afu->dev);
-}
-
struct cxl_irq_name {
struct list_head list;
@@ -477,6 +484,9 @@ struct cxl_context {
/* Only used in PR mode */
u64 process_token;
+ /* driver private data */
+ void *priv;
+
unsigned long *irq_bitmap; /* Accessed from IRQ context */
struct cxl_irq_ranges irqs;
struct list_head irq_names;
@@ -522,7 +532,35 @@ struct cxl_context {
bool pending_fault;
bool pending_afu_err;
+ /* Used by AFU drivers for driver specific event delivery */
+ struct cxl_afu_driver_ops *afu_driver_ops;
+ atomic_t afu_driver_events;
+
struct rcu_head rcu;
+
+ /*
+ * Only used when more interrupts are allocated via
+ * pci_enable_msix_range than are supported in the default context, to
+ * use additional contexts to overcome the limitation. i.e. Mellanox
+ * CX4 only:
+ */
+ struct list_head extra_irq_contexts;
+};
+
+struct cxl_service_layer_ops {
+ int (*adapter_regs_init)(struct cxl *adapter, struct pci_dev *dev);
+ int (*afu_regs_init)(struct cxl_afu *afu);
+ int (*register_serr_irq)(struct cxl_afu *afu);
+ void (*release_serr_irq)(struct cxl_afu *afu);
+ void (*debugfs_add_adapter_sl_regs)(struct cxl *adapter, struct dentry *dir);
+ void (*debugfs_add_afu_sl_regs)(struct cxl_afu *afu, struct dentry *dir);
+ void (*psl_irq_dump_registers)(struct cxl_context *ctx);
+ void (*err_irq_dump_registers)(struct cxl *adapter);
+ void (*debugfs_stop_trace)(struct cxl *adapter);
+ void (*write_timebase_ctrl)(struct cxl *adapter);
+ u64 (*timebase_read)(struct cxl *adapter);
+ int capi_mode;
+ bool needs_reset_before_disable;
};
struct cxl_native {
@@ -533,6 +571,7 @@ struct cxl_native {
irq_hw_number_t err_hwirq;
unsigned int err_virq;
u64 ps_off;
+ const struct cxl_service_layer_ops *sl_ops;
};
struct cxl_guest {
@@ -563,6 +602,7 @@ struct cxl {
struct bin_attribute cxl_attr;
int adapter_num;
int user_irqs;
+ int min_pe;
u64 ps_size;
u16 psl_rev;
u16 base_image;
@@ -688,9 +728,21 @@ static inline u64 cxl_p2n_read(struct cxl_afu *afu, cxl_p2n_reg_t reg)
ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
loff_t off, size_t count);
+/* Internal functions wrapped in cxl_base to allow PHB to call them */
+bool _cxl_pci_associate_default_context(struct pci_dev *dev, struct cxl_afu *afu);
+void _cxl_pci_disable_device(struct pci_dev *dev);
+int _cxl_next_msi_hwirq(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq);
+int _cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type);
+void _cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev);
struct cxl_calls {
void (*cxl_slbia)(struct mm_struct *mm);
+ bool (*cxl_pci_associate_default_context)(struct pci_dev *dev, struct cxl_afu *afu);
+ void (*cxl_pci_disable_device)(struct pci_dev *dev);
+ int (*cxl_next_msi_hwirq)(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq);
+ int (*cxl_cx4_setup_msi_irqs)(struct pci_dev *pdev, int nvec, int type);
+ void (*cxl_cx4_teardown_msi_irqs)(struct pci_dev *pdev);
+
struct module *owner;
};
int register_cxl_calls(struct cxl_calls *calls);
@@ -805,6 +857,11 @@ int cxl_tlb_slb_invalidate(struct cxl *adapter);
int cxl_afu_disable(struct cxl_afu *afu);
int cxl_psl_purge(struct cxl_afu *afu);
+void cxl_debugfs_add_adapter_psl_regs(struct cxl *adapter, struct dentry *dir);
+void cxl_debugfs_add_adapter_xsl_regs(struct cxl *adapter, struct dentry *dir);
+void cxl_debugfs_add_afu_psl_regs(struct cxl_afu *afu, struct dentry *dir);
+void cxl_native_psl_irq_dump_regs(struct cxl_context *ctx);
+void cxl_native_err_irq_dump_regs(struct cxl *adapter);
void cxl_stop_trace(struct cxl *cxl);
int cxl_pci_vphb_add(struct cxl_afu *afu);
void cxl_pci_vphb_remove(struct cxl_afu *afu);
@@ -855,6 +912,7 @@ struct cxl_backend_ops {
int (*attach_process)(struct cxl_context *ctx, bool kernel,
u64 wed, u64 amr);
int (*detach_process)(struct cxl_context *ctx);
+ void (*update_ivtes)(struct cxl_context *ctx);
bool (*support_attributes)(const char *attr_name, enum cxl_attrs type);
bool (*link_ok)(struct cxl *cxl, struct cxl_afu *afu);
void (*release_afu)(struct device *dev);
@@ -879,4 +937,7 @@ extern const struct cxl_backend_ops *cxl_ops;
/* check if the given pci_dev is on the the cxl vphb bus */
bool cxl_pci_is_vphb_device(struct pci_dev *dev);
+
+/* decode AFU error bits in the PSL register PSL_SERR_An */
+void cxl_afu_decode_psl_serr(struct cxl_afu *afu, u64 serr);
#endif
diff --git a/drivers/misc/cxl/debugfs.c b/drivers/misc/cxl/debugfs.c
index 5751899e0..ec7b8a017 100644
--- a/drivers/misc/cxl/debugfs.c
+++ b/drivers/misc/cxl/debugfs.c
@@ -51,6 +51,19 @@ static struct dentry *debugfs_create_io_x64(const char *name, umode_t mode,
return debugfs_create_file(name, mode, parent, (void __force *)value, &fops_io_x64);
}
+void cxl_debugfs_add_adapter_psl_regs(struct cxl *adapter, struct dentry *dir)
+{
+ debugfs_create_io_x64("fir1", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR1));
+ debugfs_create_io_x64("fir2", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR2));
+ debugfs_create_io_x64("fir_cntl", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR_CNTL));
+ debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_TRACE));
+}
+
+void cxl_debugfs_add_adapter_xsl_regs(struct cxl *adapter, struct dentry *dir)
+{
+ debugfs_create_io_x64("fec", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_XSL_FEC));
+}
+
int cxl_debugfs_adapter_add(struct cxl *adapter)
{
struct dentry *dir;
@@ -65,13 +78,10 @@ int cxl_debugfs_adapter_add(struct cxl *adapter)
return PTR_ERR(dir);
adapter->debugfs = dir;
- debugfs_create_io_x64("fir1", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR1));
- debugfs_create_io_x64("fir2", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR2));
- debugfs_create_io_x64("fir_cntl", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR_CNTL));
debugfs_create_io_x64("err_ivte", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_ErrIVTE));
- debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_TRACE));
-
+ if (adapter->native->sl_ops->debugfs_add_adapter_sl_regs)
+ adapter->native->sl_ops->debugfs_add_adapter_sl_regs(adapter, dir);
return 0;
}
@@ -80,6 +90,14 @@ void cxl_debugfs_adapter_remove(struct cxl *adapter)
debugfs_remove_recursive(adapter->debugfs);
}
+void cxl_debugfs_add_afu_psl_regs(struct cxl_afu *afu, struct dentry *dir)
+{
+ debugfs_create_io_x64("fir", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_FIR_SLICE_An));
+ debugfs_create_io_x64("serr", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SERR_An));
+ debugfs_create_io_x64("afu_debug", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_AFU_DEBUG_An));
+ debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SLICE_TRACE));
+}
+
int cxl_debugfs_afu_add(struct cxl_afu *afu)
{
struct dentry *dir;
@@ -94,18 +112,15 @@ int cxl_debugfs_afu_add(struct cxl_afu *afu)
return PTR_ERR(dir);
afu->debugfs = dir;
- debugfs_create_io_x64("fir", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_FIR_SLICE_An));
- debugfs_create_io_x64("serr", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SERR_An));
- debugfs_create_io_x64("afu_debug", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_AFU_DEBUG_An));
debugfs_create_io_x64("sr", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SR_An));
-
debugfs_create_io_x64("dsisr", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_PSL_DSISR_An));
debugfs_create_io_x64("dar", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_PSL_DAR_An));
debugfs_create_io_x64("sstp0", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_SSTP0_An));
debugfs_create_io_x64("sstp1", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_SSTP1_An));
debugfs_create_io_x64("err_status", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_PSL_ErrStat_An));
- debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SLICE_TRACE));
+ if (afu->adapter->native->sl_ops->debugfs_add_afu_sl_regs)
+ afu->adapter->native->sl_ops->debugfs_add_afu_sl_regs(afu, dir);
return 0;
}
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index eec468f16..5fb9894b1 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -293,6 +293,17 @@ int afu_mmap(struct file *file, struct vm_area_struct *vm)
return cxl_context_iomap(ctx, vm);
}
+static inline bool ctx_event_pending(struct cxl_context *ctx)
+{
+ if (ctx->pending_irq || ctx->pending_fault || ctx->pending_afu_err)
+ return true;
+
+ if (ctx->afu_driver_ops && atomic_read(&ctx->afu_driver_events))
+ return true;
+
+ return false;
+}
+
unsigned int afu_poll(struct file *file, struct poll_table_struct *poll)
{
struct cxl_context *ctx = file->private_data;
@@ -305,8 +316,7 @@ unsigned int afu_poll(struct file *file, struct poll_table_struct *poll)
pr_devel("afu_poll wait done pe: %i\n", ctx->pe);
spin_lock_irqsave(&ctx->lock, flags);
- if (ctx->pending_irq || ctx->pending_fault ||
- ctx->pending_afu_err)
+ if (ctx_event_pending(ctx))
mask |= POLLIN | POLLRDNORM;
else if (ctx->status == CLOSED)
/* Only error on closed when there are no futher events pending
@@ -319,16 +329,46 @@ unsigned int afu_poll(struct file *file, struct poll_table_struct *poll)
return mask;
}
-static inline int ctx_event_pending(struct cxl_context *ctx)
+static ssize_t afu_driver_event_copy(struct cxl_context *ctx,
+ char __user *buf,
+ struct cxl_event *event,
+ struct cxl_event_afu_driver_reserved *pl)
{
- return (ctx->pending_irq || ctx->pending_fault ||
- ctx->pending_afu_err || (ctx->status == CLOSED));
+ /* Check event */
+ if (!pl) {
+ ctx->afu_driver_ops->event_delivered(ctx, pl, -EINVAL);
+ return -EFAULT;
+ }
+
+ /* Check event size */
+ event->header.size += pl->data_size;
+ if (event->header.size > CXL_READ_MIN_SIZE) {
+ ctx->afu_driver_ops->event_delivered(ctx, pl, -EINVAL);
+ return -EFAULT;
+ }
+
+ /* Copy event header */
+ if (copy_to_user(buf, event, sizeof(struct cxl_event_header))) {
+ ctx->afu_driver_ops->event_delivered(ctx, pl, -EFAULT);
+ return -EFAULT;
+ }
+
+ /* Copy event data */
+ buf += sizeof(struct cxl_event_header);
+ if (copy_to_user(buf, &pl->data, pl->data_size)) {
+ ctx->afu_driver_ops->event_delivered(ctx, pl, -EFAULT);
+ return -EFAULT;
+ }
+
+ ctx->afu_driver_ops->event_delivered(ctx, pl, 0); /* Success */
+ return event->header.size;
}
ssize_t afu_read(struct file *file, char __user *buf, size_t count,
loff_t *off)
{
struct cxl_context *ctx = file->private_data;
+ struct cxl_event_afu_driver_reserved *pl = NULL;
struct cxl_event event;
unsigned long flags;
int rc;
@@ -344,7 +384,7 @@ ssize_t afu_read(struct file *file, char __user *buf, size_t count,
for (;;) {
prepare_to_wait(&ctx->wq, &wait, TASK_INTERRUPTIBLE);
- if (ctx_event_pending(ctx))
+ if (ctx_event_pending(ctx) || (ctx->status == CLOSED))
break;
if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
@@ -374,7 +414,12 @@ ssize_t afu_read(struct file *file, char __user *buf, size_t count,
memset(&event, 0, sizeof(event));
event.header.process_element = ctx->pe;
event.header.size = sizeof(struct cxl_event_header);
- if (ctx->pending_irq) {
+ if (ctx->afu_driver_ops && atomic_read(&ctx->afu_driver_events)) {
+ pr_devel("afu_read delivering AFU driver specific event\n");
+ pl = ctx->afu_driver_ops->fetch_event(ctx);
+ atomic_dec(&ctx->afu_driver_events);
+ event.header.type = CXL_EVENT_AFU_DRIVER;
+ } else if (ctx->pending_irq) {
pr_devel("afu_read delivering AFU interrupt\n");
event.header.size += sizeof(struct cxl_event_afu_interrupt);
event.header.type = CXL_EVENT_AFU_INTERRUPT;
@@ -404,6 +449,9 @@ ssize_t afu_read(struct file *file, char __user *buf, size_t count,
spin_unlock_irqrestore(&ctx->lock, flags);
+ if (event.header.type == CXL_EVENT_AFU_DRIVER)
+ return afu_driver_event_copy(ctx, buf, &event, pl);
+
if (copy_to_user(buf, &event, event.header.size))
return -EFAULT;
return event.header.size;
@@ -558,7 +606,7 @@ int __init cxl_file_init(void)
* If these change we really need to update API. Either change some
* flags or update API version number CXL_API_VERSION.
*/
- BUILD_BUG_ON(CXL_API_VERSION != 2);
+ BUILD_BUG_ON(CXL_API_VERSION != 3);
BUILD_BUG_ON(sizeof(struct cxl_ioctl_start_work) != 64);
BUILD_BUG_ON(sizeof(struct cxl_event_header) != 8);
BUILD_BUG_ON(sizeof(struct cxl_event_afu_interrupt) != 8);
diff --git a/drivers/misc/cxl/flash.c b/drivers/misc/cxl/flash.c
index 68dd0b7da..c63d61e17 100644
--- a/drivers/misc/cxl/flash.c
+++ b/drivers/misc/cxl/flash.c
@@ -24,8 +24,8 @@ struct ai_header {
};
static struct semaphore sem;
-unsigned long *buffer[CXL_AI_MAX_ENTRIES];
-struct sg_list *le;
+static unsigned long *buffer[CXL_AI_MAX_ENTRIES];
+static struct sg_list *le;
static u64 continue_token;
static unsigned int transfer;
diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c
index bc8d0b987..9aa58a77a 100644
--- a/drivers/misc/cxl/guest.c
+++ b/drivers/misc/cxl/guest.c
@@ -196,15 +196,18 @@ static irqreturn_t guest_slice_irq_err(int irq, void *data)
{
struct cxl_afu *afu = data;
int rc;
- u64 serr;
+ u64 serr, afu_error, dsisr;
- WARN(irq, "CXL SLICE ERROR interrupt %i\n", irq);
rc = cxl_h_get_fn_error_interrupt(afu->guest->handle, &serr);
if (rc) {
dev_crit(&afu->dev, "Couldn't read PSL_SERR_An: %d\n", rc);
return IRQ_HANDLED;
}
- dev_crit(&afu->dev, "PSL_SERR_An: 0x%.16llx\n", serr);
+ afu_error = cxl_p2n_read(afu, CXL_AFU_ERR_An);
+ dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
+ cxl_afu_decode_psl_serr(afu, serr);
+ dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n", afu_error);
+ dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n", dsisr);
rc = cxl_h_ack_fn_error_interrupt(afu->guest->handle, serr);
if (rc)
@@ -1052,16 +1055,18 @@ static void free_adapter(struct cxl *adapter)
struct irq_avail *cur;
int i;
- if (adapter->guest->irq_avail) {
- for (i = 0; i < adapter->guest->irq_nranges; i++) {
- cur = &adapter->guest->irq_avail[i];
- kfree(cur->bitmap);
+ if (adapter->guest) {
+ if (adapter->guest->irq_avail) {
+ for (i = 0; i < adapter->guest->irq_nranges; i++) {
+ cur = &adapter->guest->irq_avail[i];
+ kfree(cur->bitmap);
+ }
+ kfree(adapter->guest->irq_avail);
}
- kfree(adapter->guest->irq_avail);
+ kfree(adapter->guest->status);
+ kfree(adapter->guest);
}
- kfree(adapter->guest->status);
cxl_remove_adapter_nr(adapter);
- kfree(adapter->guest);
kfree(adapter);
}
@@ -1182,6 +1187,7 @@ const struct cxl_backend_ops cxl_guest_ops = {
.ack_irq = guest_ack_irq,
.attach_process = guest_attach_process,
.detach_process = guest_detach_process,
+ .update_ivtes = NULL,
.support_attributes = guest_support_attributes,
.link_ok = guest_link_ok,
.release_afu = guest_release_afu,
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
index 8def4553a..dec60f58a 100644
--- a/drivers/misc/cxl/irq.c
+++ b/drivers/misc/cxl/irq.c
@@ -260,9 +260,6 @@ int afu_allocate_irqs(struct cxl_context *ctx, u32 count)
else
alloc_count = count + 1;
- /* Initialize the list head to hold irq names */
- INIT_LIST_HEAD(&ctx->irq_names);
-
if ((rc = cxl_ops->alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter,
alloc_count)))
return rc;
@@ -374,3 +371,32 @@ void afu_release_irqs(struct cxl_context *ctx, void *cookie)
ctx->irq_count = 0;
}
+
+void cxl_afu_decode_psl_serr(struct cxl_afu *afu, u64 serr)
+{
+ dev_crit(&afu->dev,
+ "PSL Slice error received. Check AFU for root cause.\n");
+ dev_crit(&afu->dev, "PSL_SERR_An: 0x%016llx\n", serr);
+ if (serr & CXL_PSL_SERR_An_afuto)
+ dev_crit(&afu->dev, "AFU MMIO Timeout\n");
+ if (serr & CXL_PSL_SERR_An_afudis)
+ dev_crit(&afu->dev,
+ "MMIO targeted Accelerator that was not enabled\n");
+ if (serr & CXL_PSL_SERR_An_afuov)
+ dev_crit(&afu->dev, "AFU CTAG Overflow\n");
+ if (serr & CXL_PSL_SERR_An_badsrc)
+ dev_crit(&afu->dev, "Bad Interrupt Source\n");
+ if (serr & CXL_PSL_SERR_An_badctx)
+ dev_crit(&afu->dev, "Bad Context Handle\n");
+ if (serr & CXL_PSL_SERR_An_llcmdis)
+ dev_crit(&afu->dev, "LLCMD to Disabled AFU\n");
+ if (serr & CXL_PSL_SERR_An_llcmdto)
+ dev_crit(&afu->dev, "LLCMD Timeout to AFU\n");
+ if (serr & CXL_PSL_SERR_An_afupar)
+ dev_crit(&afu->dev, "AFU MMIO Parity Error\n");
+ if (serr & CXL_PSL_SERR_An_afudup)
+ dev_crit(&afu->dev, "AFU MMIO Duplicate CTAG Error\n");
+ if (serr & CXL_PSL_SERR_An_AE)
+ dev_crit(&afu->dev,
+ "AFU asserted JDONE with JERROR in AFU Directed Mode\n");
+}
diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c
index ae68c3201..d9be23b24 100644
--- a/drivers/misc/cxl/main.c
+++ b/drivers/misc/cxl/main.c
@@ -110,6 +110,11 @@ static inline void cxl_slbia_core(struct mm_struct *mm)
static struct cxl_calls cxl_calls = {
.cxl_slbia = cxl_slbia_core,
+ .cxl_pci_associate_default_context = _cxl_pci_associate_default_context,
+ .cxl_pci_disable_device = _cxl_pci_disable_device,
+ .cxl_next_msi_hwirq = _cxl_next_msi_hwirq,
+ .cxl_cx4_setup_msi_irqs = _cxl_cx4_setup_msi_irqs,
+ .cxl_cx4_teardown_msi_irqs = _cxl_cx4_teardown_msi_irqs,
.owner = THIS_MODULE,
};
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index 55d8a1459..e606fdc4b 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -21,10 +21,10 @@
#include "cxl.h"
#include "trace.h"
-static int afu_control(struct cxl_afu *afu, u64 command,
+static int afu_control(struct cxl_afu *afu, u64 command, u64 clear,
u64 result, u64 mask, bool enabled)
{
- u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
+ u64 AFU_Cntl;
unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
int rc = 0;
@@ -33,7 +33,8 @@ static int afu_control(struct cxl_afu *afu, u64 command,
trace_cxl_afu_ctrl(afu, command);
- cxl_p2n_write(afu, CXL_AFU_Cntl_An, AFU_Cntl | command);
+ AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
+ cxl_p2n_write(afu, CXL_AFU_Cntl_An, (AFU_Cntl & ~clear) | command);
AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
while ((AFU_Cntl & mask) != result) {
@@ -54,6 +55,16 @@ static int afu_control(struct cxl_afu *afu, u64 command,
cpu_relax();
AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
};
+
+ if (AFU_Cntl & CXL_AFU_Cntl_An_RA) {
+ /*
+ * Workaround for a bug in the XSL used in the Mellanox CX4
+ * that fails to clear the RA bit after an AFU reset,
+ * preventing subsequent AFU resets from working.
+ */
+ cxl_p2n_write(afu, CXL_AFU_Cntl_An, AFU_Cntl & ~CXL_AFU_Cntl_An_RA);
+ }
+
pr_devel("AFU command complete: %llx\n", command);
afu->enabled = enabled;
out:
@@ -67,7 +78,7 @@ static int afu_enable(struct cxl_afu *afu)
{
pr_devel("AFU enable request\n");
- return afu_control(afu, CXL_AFU_Cntl_An_E,
+ return afu_control(afu, CXL_AFU_Cntl_An_E, 0,
CXL_AFU_Cntl_An_ES_Enabled,
CXL_AFU_Cntl_An_ES_MASK, true);
}
@@ -76,7 +87,8 @@ int cxl_afu_disable(struct cxl_afu *afu)
{
pr_devel("AFU disable request\n");
- return afu_control(afu, 0, CXL_AFU_Cntl_An_ES_Disabled,
+ return afu_control(afu, 0, CXL_AFU_Cntl_An_E,
+ CXL_AFU_Cntl_An_ES_Disabled,
CXL_AFU_Cntl_An_ES_MASK, false);
}
@@ -85,7 +97,7 @@ static int native_afu_reset(struct cxl_afu *afu)
{
pr_devel("AFU reset request\n");
- return afu_control(afu, CXL_AFU_Cntl_An_RA,
+ return afu_control(afu, CXL_AFU_Cntl_An_RA, 0,
CXL_AFU_Cntl_An_RS_Complete | CXL_AFU_Cntl_An_ES_Disabled,
CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK,
false);
@@ -189,7 +201,7 @@ int cxl_alloc_spa(struct cxl_afu *afu)
unsigned spa_size;
/* Work out how many pages to allocate */
- afu->native->spa_order = 0;
+ afu->native->spa_order = -1;
do {
afu->native->spa_order++;
spa_size = (1 << afu->native->spa_order) * PAGE_SIZE;
@@ -430,7 +442,6 @@ static int remove_process_element(struct cxl_context *ctx)
return rc;
}
-
void cxl_assign_psn_space(struct cxl_context *ctx)
{
if (!ctx->afu->pp_size || ctx->master) {
@@ -507,10 +518,39 @@ static u64 calculate_sr(struct cxl_context *ctx)
return sr;
}
+static void update_ivtes_directed(struct cxl_context *ctx)
+{
+ bool need_update = (ctx->status == STARTED);
+ int r;
+
+ if (need_update) {
+ WARN_ON(terminate_process_element(ctx));
+ WARN_ON(remove_process_element(ctx));
+ }
+
+ for (r = 0; r < CXL_IRQ_RANGES; r++) {
+ ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]);
+ ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]);
+ }
+
+ /*
+ * Theoretically we could use the update llcmd, instead of a
+ * terminate/remove/add (or if an atomic update was required we could
+ * do a suspend/update/resume), however it seems there might be issues
+ * with the update llcmd on some cards (including those using an XSL on
+ * an ASIC) so for now it's safest to go with the commands that are
+ * known to work. In the future if we come across a situation where the
+ * card may be performing transactions using the same PE while we are
+ * doing this update we might need to revisit this.
+ */
+ if (need_update)
+ WARN_ON(add_process_element(ctx));
+}
+
static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
{
u32 pid;
- int r, result;
+ int result;
cxl_assign_psn_space(ctx);
@@ -545,10 +585,7 @@ static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
ctx->irqs.range[0] = 1;
}
- for (r = 0; r < CXL_IRQ_RANGES; r++) {
- ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]);
- ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]);
- }
+ update_ivtes_directed(ctx);
ctx->elem->common.amr = cpu_to_be64(amr);
ctx->elem->common.wed = cpu_to_be64(wed);
@@ -570,7 +607,33 @@ static int deactivate_afu_directed(struct cxl_afu *afu)
cxl_sysfs_afu_m_remove(afu);
cxl_chardev_afu_remove(afu);
- cxl_ops->afu_reset(afu);
+ /*
+ * The CAIA section 2.2.1 indicates that the procedure for starting and
+ * stopping an AFU in AFU directed mode is AFU specific, which is not
+ * ideal since this code is generic and with one exception has no
+ * knowledge of the AFU. This is in contrast to the procedure for
+ * disabling a dedicated process AFU, which is documented to just
+ * require a reset. The architecture does indicate that both an AFU
+ * reset and an AFU disable should result in the AFU being disabled and
+ * we do both followed by a PSL purge for safety.
+ *
+ * Notably we used to have some issues with the disable sequence on PSL
+ * cards, which is why we ended up using this heavy weight procedure in
+ * the first place, however a bug was discovered that had rendered the
+ * disable operation ineffective, so it is conceivable that was the
+ * sole explanation for those difficulties. Careful regression testing
+ * is recommended if anyone attempts to remove or reorder these
+ * operations.
+ *
+ * The XSL on the Mellanox CX4 behaves a little differently from the
+ * PSL based cards and will time out an AFU reset if the AFU is still
+ * enabled. That card is special in that we do have a means to identify
+ * it from this code, so in that case we skip the reset and just use a
+ * disable/purge to avoid the timeout and corresponding noise in the
+ * kernel log.
+ */
+ if (afu->adapter->native->sl_ops->needs_reset_before_disable)
+ cxl_ops->afu_reset(afu);
cxl_afu_disable(afu);
cxl_psl_purge(afu);
@@ -600,6 +663,22 @@ static int activate_dedicated_process(struct cxl_afu *afu)
return cxl_chardev_d_afu_add(afu);
}
+static void update_ivtes_dedicated(struct cxl_context *ctx)
+{
+ struct cxl_afu *afu = ctx->afu;
+
+ cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An,
+ (((u64)ctx->irqs.offset[0] & 0xffff) << 48) |
+ (((u64)ctx->irqs.offset[1] & 0xffff) << 32) |
+ (((u64)ctx->irqs.offset[2] & 0xffff) << 16) |
+ ((u64)ctx->irqs.offset[3] & 0xffff));
+ cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, (u64)
+ (((u64)ctx->irqs.range[0] & 0xffff) << 48) |
+ (((u64)ctx->irqs.range[1] & 0xffff) << 32) |
+ (((u64)ctx->irqs.range[2] & 0xffff) << 16) |
+ ((u64)ctx->irqs.range[3] & 0xffff));
+}
+
static int attach_dedicated(struct cxl_context *ctx, u64 wed, u64 amr)
{
struct cxl_afu *afu = ctx->afu;
@@ -618,16 +697,7 @@ static int attach_dedicated(struct cxl_context *ctx, u64 wed, u64 amr)
cxl_prefault(ctx, wed);
- cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An,
- (((u64)ctx->irqs.offset[0] & 0xffff) << 48) |
- (((u64)ctx->irqs.offset[1] & 0xffff) << 32) |
- (((u64)ctx->irqs.offset[2] & 0xffff) << 16) |
- ((u64)ctx->irqs.offset[3] & 0xffff));
- cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, (u64)
- (((u64)ctx->irqs.range[0] & 0xffff) << 48) |
- (((u64)ctx->irqs.range[1] & 0xffff) << 32) |
- (((u64)ctx->irqs.range[2] & 0xffff) << 16) |
- ((u64)ctx->irqs.range[3] & 0xffff));
+ update_ivtes_dedicated(ctx);
cxl_p2n_write(afu, CXL_PSL_AMR_An, amr);
@@ -703,12 +773,37 @@ static int native_attach_process(struct cxl_context *ctx, bool kernel,
static inline int detach_process_native_dedicated(struct cxl_context *ctx)
{
+ /*
+ * The CAIA section 2.1.1 indicates that we need to do an AFU reset to
+ * stop the AFU in dedicated mode (we therefore do not make that
+ * optional like we do in the afu directed path). It does not indicate
+ * that we need to do an explicit disable (which should occur
+ * implicitly as part of the reset) or purge, but we do these as well
+ * to be on the safe side.
+ *
+ * Notably we used to have some issues with the disable sequence
+ * (before the sequence was spelled out in the architecture) which is
+ * why we were so heavy weight in the first place, however a bug was
+ * discovered that had rendered the disable operation ineffective, so
+ * it is conceivable that was the sole explanation for those
+ * difficulties. Point is, we should be careful and do some regression
+ * testing if we ever attempt to remove any part of this procedure.
+ */
cxl_ops->afu_reset(ctx->afu);
cxl_afu_disable(ctx->afu);
cxl_psl_purge(ctx->afu);
return 0;
}
+static void native_update_ivtes(struct cxl_context *ctx)
+{
+ if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
+ return update_ivtes_directed(ctx);
+ if (ctx->afu->current_mode == CXL_MODE_DEDICATED)
+ return update_ivtes_dedicated(ctx);
+ WARN(1, "native_update_ivtes: Bad mode\n");
+}
+
static inline int detach_process_native_afu_directed(struct cxl_context *ctx)
{
if (!ctx->pe_inserted)
@@ -754,26 +849,38 @@ static int native_get_irq_info(struct cxl_afu *afu, struct cxl_irq_info *info)
return 0;
}
-static irqreturn_t native_handle_psl_slice_error(struct cxl_context *ctx,
- u64 dsisr, u64 errstat)
+void cxl_native_psl_irq_dump_regs(struct cxl_context *ctx)
{
u64 fir1, fir2, fir_slice, serr, afu_debug;
fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1);
fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2);
fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An);
- serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An);
- dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat);
dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1);
dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2);
- dev_crit(&ctx->afu->dev, "PSL_SERR_An: 0x%016llx\n", serr);
+ if (ctx->afu->adapter->native->sl_ops->register_serr_irq) {
+ serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
+ cxl_afu_decode_psl_serr(ctx->afu, serr);
+ }
dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
+}
+
+static irqreturn_t native_handle_psl_slice_error(struct cxl_context *ctx,
+ u64 dsisr, u64 errstat)
+{
+
+ dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat);
- dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n");
- cxl_stop_trace(ctx->afu->adapter);
+ if (ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers)
+ ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers(ctx);
+
+ if (ctx->afu->adapter->native->sl_ops->debugfs_stop_trace) {
+ dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n");
+ ctx->afu->adapter->native->sl_ops->debugfs_stop_trace(ctx->afu->adapter);
+ }
return cxl_ops->ack_irq(ctx, 0, errstat);
}
@@ -817,7 +924,7 @@ static irqreturn_t native_irq_multiplexed(int irq, void *data)
return fail_psl_irq(afu, &irq_info);
}
-void native_irq_wait(struct cxl_context *ctx)
+static void native_irq_wait(struct cxl_context *ctx)
{
u64 dsisr;
int timeout = 1000;
@@ -849,41 +956,56 @@ void native_irq_wait(struct cxl_context *ctx)
static irqreturn_t native_slice_irq_err(int irq, void *data)
{
struct cxl_afu *afu = data;
- u64 fir_slice, errstat, serr, afu_debug;
-
- WARN(irq, "CXL SLICE ERROR interrupt %i\n", irq);
+ u64 fir_slice, errstat, serr, afu_debug, afu_error, dsisr;
+ /*
+ * slice err interrupt is only used with full PSL (no XSL)
+ */
serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An);
errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An);
- dev_crit(&afu->dev, "PSL_SERR_An: 0x%016llx\n", serr);
+ afu_error = cxl_p2n_read(afu, CXL_AFU_ERR_An);
+ dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
+ cxl_afu_decode_psl_serr(afu, serr);
dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat);
dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
+ dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n", afu_error);
+ dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n", dsisr);
cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
return IRQ_HANDLED;
}
+void cxl_native_err_irq_dump_regs(struct cxl *adapter)
+{
+ u64 fir1, fir2;
+
+ fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1);
+ fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2);
+
+ dev_crit(&adapter->dev, "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n", fir1, fir2);
+}
+
static irqreturn_t native_irq_err(int irq, void *data)
{
struct cxl *adapter = data;
- u64 fir1, fir2, err_ivte;
+ u64 err_ivte;
WARN(1, "CXL ERROR interrupt %i\n", irq);
err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE);
dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%016llx\n", err_ivte);
- dev_crit(&adapter->dev, "STOPPING CXL TRACE\n");
- cxl_stop_trace(adapter);
-
- fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1);
- fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2);
+ if (adapter->native->sl_ops->debugfs_stop_trace) {
+ dev_crit(&adapter->dev, "STOPPING CXL TRACE\n");
+ adapter->native->sl_ops->debugfs_stop_trace(adapter);
+ }
- dev_crit(&adapter->dev, "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n", fir1, fir2);
+ if (adapter->native->sl_ops->err_irq_dump_registers)
+ adapter->native->sl_ops->err_irq_dump_registers(adapter);
return IRQ_HANDLED;
}
@@ -1128,6 +1250,7 @@ const struct cxl_backend_ops cxl_native_ops = {
.irq_wait = native_irq_wait,
.attach_process = native_attach_process,
.detach_process = native_detach_process,
+ .update_ivtes = native_update_ivtes,
.support_attributes = native_support_attributes,
.link_ok = cxl_adapter_link_ok,
.release_afu = cxl_pci_release_afu,
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index a08fcc888..6f0c4ac4b 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -55,6 +55,8 @@
pci_read_config_byte(dev, vsec + 0xa, dest)
#define CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val) \
pci_write_config_byte(dev, vsec + 0xa, val)
+#define CXL_WRITE_VSEC_MODE_CONTROL_BUS(bus, devfn, vsec, val) \
+ pci_bus_write_config_byte(bus, devfn, vsec + 0xa, val)
#define CXL_VSEC_PROTOCOL_MASK 0xe0
#define CXL_VSEC_PROTOCOL_1024TB 0x80
#define CXL_VSEC_PROTOCOL_512TB 0x40
@@ -352,13 +354,10 @@ static u64 get_capp_unit_id(struct device_node *np)
return 0;
}
-static int init_implementation_adapter_regs(struct cxl *adapter, struct pci_dev *dev)
+static int calc_capp_routing(struct pci_dev *dev, u64 *chipid, u64 *capp_unit_id)
{
struct device_node *np;
const __be32 *prop;
- u64 psl_dsnctl;
- u64 chipid;
- u64 capp_unit_id;
if (!(np = pnv_pci_get_phb_node(dev)))
return -ENODEV;
@@ -367,14 +366,28 @@ static int init_implementation_adapter_regs(struct cxl *adapter, struct pci_dev
np = of_get_next_parent(np);
if (!np)
return -ENODEV;
- chipid = be32_to_cpup(prop);
- capp_unit_id = get_capp_unit_id(np);
+ *chipid = be32_to_cpup(prop);
+ *capp_unit_id = get_capp_unit_id(np);
of_node_put(np);
- if (!capp_unit_id) {
+ if (!*capp_unit_id) {
pr_err("cxl: invalid capp unit id\n");
return -ENODEV;
}
+ return 0;
+}
+
+static int init_implementation_adapter_psl_regs(struct cxl *adapter, struct pci_dev *dev)
+{
+ u64 psl_dsnctl, psl_fircntl;
+ u64 chipid;
+ u64 capp_unit_id;
+ int rc;
+
+ rc = calc_capp_routing(dev, &chipid, &capp_unit_id);
+ if (rc)
+ return rc;
+
psl_dsnctl = 0x0000900000000000ULL; /* pteupd ttype, scdone */
psl_dsnctl |= (0x2ULL << (63-38)); /* MMIO hang pulse: 256 us */
/* Tell PSL where to route data to */
@@ -385,16 +398,72 @@ static int init_implementation_adapter_regs(struct cxl *adapter, struct pci_dev
cxl_p1_write(adapter, CXL_PSL_RESLCKTO, 0x20000000200ULL);
/* snoop write mask */
cxl_p1_write(adapter, CXL_PSL_SNWRALLOC, 0x00000000FFFFFFFFULL);
- /* set fir_accum */
- cxl_p1_write(adapter, CXL_PSL_FIR_CNTL, 0x0800000000000000ULL);
+ /* set fir_cntl to recommended value for production env */
+ psl_fircntl = (0x2ULL << (63-3)); /* ce_report */
+ psl_fircntl |= (0x1ULL << (63-6)); /* FIR_report */
+ psl_fircntl |= 0x1ULL; /* ce_thresh */
+ cxl_p1_write(adapter, CXL_PSL_FIR_CNTL, psl_fircntl);
/* for debugging with trace arrays */
cxl_p1_write(adapter, CXL_PSL_TRACE, 0x0000FF7C00000000ULL);
return 0;
}
+static int init_implementation_adapter_xsl_regs(struct cxl *adapter, struct pci_dev *dev)
+{
+ u64 xsl_dsnctl;
+ u64 chipid;
+ u64 capp_unit_id;
+ int rc;
+
+ rc = calc_capp_routing(dev, &chipid, &capp_unit_id);
+ if (rc)
+ return rc;
+
+ /* Tell XSL where to route data to */
+ xsl_dsnctl = 0x0000600000000000ULL | (chipid << (63-5));
+ xsl_dsnctl |= (capp_unit_id << (63-13));
+ cxl_p1_write(adapter, CXL_XSL_DSNCTL, xsl_dsnctl);
+
+ return 0;
+}
+
+/* PSL & XSL */
+#define TBSYNC_CAL(n) (((u64)n & 0x7) << (63-3))
#define TBSYNC_CNT(n) (((u64)n & 0x7) << (63-6))
-#define _2048_250MHZ_CYCLES 1
+/* For the PSL this is a multiple for 0 < n <= 7: */
+#define PSL_2048_250MHZ_CYCLES 1
+
+static void write_timebase_ctrl_psl(struct cxl *adapter)
+{
+ cxl_p1_write(adapter, CXL_PSL_TB_CTLSTAT,
+ TBSYNC_CNT(2 * PSL_2048_250MHZ_CYCLES));
+}
+
+/* XSL */
+#define TBSYNC_ENA (1ULL << 63)
+/* For the XSL this is 2**n * 2000 clocks for 0 < n <= 6: */
+#define XSL_2000_CLOCKS 1
+#define XSL_4000_CLOCKS 2
+#define XSL_8000_CLOCKS 3
+
+static void write_timebase_ctrl_xsl(struct cxl *adapter)
+{
+ cxl_p1_write(adapter, CXL_XSL_TB_CTLSTAT,
+ TBSYNC_ENA |
+ TBSYNC_CAL(3) |
+ TBSYNC_CNT(XSL_4000_CLOCKS));
+}
+
+static u64 timebase_read_psl(struct cxl *adapter)
+{
+ return cxl_p1_read(adapter, CXL_PSL_Timebase);
+}
+
+static u64 timebase_read_xsl(struct cxl *adapter)
+{
+ return cxl_p1_read(adapter, CXL_XSL_Timebase);
+}
static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev)
{
@@ -421,8 +490,7 @@ static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev)
* Setup PSL Timebase Control and Status register
* with the recommended Timebase Sync Count value
*/
- cxl_p1_write(adapter, CXL_PSL_TB_CTLSTAT,
- TBSYNC_CNT(2 * _2048_250MHZ_CYCLES));
+ adapter->native->sl_ops->write_timebase_ctrl(adapter);
/* Enable PSL Timebase */
cxl_p1_write(adapter, CXL_PSL_Control, 0x0000000000000000);
@@ -435,7 +503,7 @@ static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev)
dev_info(&dev->dev, "PSL timebase can't synchronize\n");
return;
}
- psl_tb = cxl_p1_read(adapter, CXL_PSL_Timebase);
+ psl_tb = adapter->native->sl_ops->timebase_read(adapter);
delta = mftb() - psl_tb;
if (delta < 0)
delta = -delta;
@@ -445,7 +513,7 @@ static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev)
return;
}
-static int init_implementation_afu_regs(struct cxl_afu *afu)
+static int init_implementation_afu_psl_regs(struct cxl_afu *afu)
{
/* read/write masks for this slice */
cxl_p1n_write(afu, CXL_PSL_APCALLOC_A, 0xFFFFFFFEFEFEFEFEULL);
@@ -551,36 +619,234 @@ static int setup_cxl_bars(struct pci_dev *dev)
return 0;
}
-/* pciex node: ibm,opal-m64-window = <0x3d058 0x0 0x3d058 0x0 0x8 0x0>; */
-static int switch_card_to_cxl(struct pci_dev *dev)
-{
+#ifdef CONFIG_CXL_BIMODAL
+
+struct cxl_switch_work {
+ struct pci_dev *dev;
+ struct work_struct work;
int vsec;
+ int mode;
+};
+
+static void switch_card_to_cxl(struct work_struct *work)
+{
+ struct cxl_switch_work *switch_work =
+ container_of(work, struct cxl_switch_work, work);
+ struct pci_dev *dev = switch_work->dev;
+ struct pci_bus *bus = dev->bus;
+ struct pci_controller *hose = pci_bus_to_host(bus);
+ struct pci_dev *bridge;
+ struct pnv_php_slot *php_slot;
+ unsigned int devfn;
u8 val;
int rc;
- dev_info(&dev->dev, "switch card to CXL\n");
+ dev_info(&bus->dev, "cxl: Preparing for mode switch...\n");
+ bridge = list_first_entry_or_null(&hose->bus->devices, struct pci_dev,
+ bus_list);
+ if (!bridge) {
+ dev_WARN(&bus->dev, "cxl: Couldn't find root port!\n");
+ goto err_dev_put;
+ }
- if (!(vsec = find_cxl_vsec(dev))) {
- dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
+ php_slot = pnv_php_find_slot(pci_device_to_OF_node(bridge));
+ if (!php_slot) {
+ dev_err(&bus->dev, "cxl: Failed to find slot hotplug "
+ "information. You may need to upgrade "
+ "skiboot. Aborting.\n");
+ goto err_dev_put;
+ }
+
+ rc = CXL_READ_VSEC_MODE_CONTROL(dev, switch_work->vsec, &val);
+ if (rc) {
+ dev_err(&bus->dev, "cxl: Failed to read CAPI mode control: %i\n", rc);
+ goto err_dev_put;
+ }
+ devfn = dev->devfn;
+
+ /* Release the reference obtained in cxl_check_and_switch_mode() */
+ pci_dev_put(dev);
+
+ dev_dbg(&bus->dev, "cxl: Removing PCI devices from kernel\n");
+ pci_lock_rescan_remove();
+ pci_hp_remove_devices(bridge->subordinate);
+ pci_unlock_rescan_remove();
+
+ /* Switch the CXL protocol on the card */
+ if (switch_work->mode == CXL_BIMODE_CXL) {
+ dev_info(&bus->dev, "cxl: Switching card to CXL mode\n");
+ val &= ~CXL_VSEC_PROTOCOL_MASK;
+ val |= CXL_VSEC_PROTOCOL_256TB | CXL_VSEC_PROTOCOL_ENABLE;
+ rc = pnv_cxl_enable_phb_kernel_api(hose, true);
+ if (rc) {
+ dev_err(&bus->dev, "cxl: Failed to enable kernel API"
+ " on real PHB, aborting\n");
+ goto err_free_work;
+ }
+ } else {
+ dev_WARN(&bus->dev, "cxl: Switching card to PCI mode not supported!\n");
+ goto err_free_work;
+ }
+
+ rc = CXL_WRITE_VSEC_MODE_CONTROL_BUS(bus, devfn, switch_work->vsec, val);
+ if (rc) {
+ dev_err(&bus->dev, "cxl: Failed to configure CXL protocol: %i\n", rc);
+ goto err_free_work;
+ }
+
+ /*
+ * The CAIA spec (v1.1, Section 10.6 Bi-modal Device Support) states
+ * we must wait 100ms after this mode switch before touching PCIe config
+ * space.
+ */
+ msleep(100);
+
+ /*
+ * Hot reset to cause the card to come back in cxl mode. A
+ * OPAL_RESET_PCI_LINK would be sufficient, but currently lacks support
+ * in skiboot, so we use a hot reset instead.
+ *
+ * We call pci_set_pcie_reset_state() on the bridge, as a CAPI card is
+ * guaranteed to sit directly under the root port, and setting the reset
+ * state on a device directly under the root port is equivalent to doing
+ * it on the root port iself.
+ */
+ dev_info(&bus->dev, "cxl: Configuration write complete, resetting card\n");
+ pci_set_pcie_reset_state(bridge, pcie_hot_reset);
+ pci_set_pcie_reset_state(bridge, pcie_deassert_reset);
+
+ dev_dbg(&bus->dev, "cxl: Offlining slot\n");
+ rc = pnv_php_set_slot_power_state(&php_slot->slot, OPAL_PCI_SLOT_OFFLINE);
+ if (rc) {
+ dev_err(&bus->dev, "cxl: OPAL offlining call failed: %i\n", rc);
+ goto err_free_work;
+ }
+
+ dev_dbg(&bus->dev, "cxl: Onlining and probing slot\n");
+ rc = pnv_php_set_slot_power_state(&php_slot->slot, OPAL_PCI_SLOT_ONLINE);
+ if (rc) {
+ dev_err(&bus->dev, "cxl: OPAL onlining call failed: %i\n", rc);
+ goto err_free_work;
+ }
+
+ pci_lock_rescan_remove();
+ pci_hp_add_devices(bridge->subordinate);
+ pci_unlock_rescan_remove();
+
+ dev_info(&bus->dev, "cxl: CAPI mode switch completed\n");
+ kfree(switch_work);
+ return;
+
+err_dev_put:
+ /* Release the reference obtained in cxl_check_and_switch_mode() */
+ pci_dev_put(dev);
+err_free_work:
+ kfree(switch_work);
+}
+
+int cxl_check_and_switch_mode(struct pci_dev *dev, int mode, int vsec)
+{
+ struct cxl_switch_work *work;
+ u8 val;
+ int rc;
+
+ if (!cpu_has_feature(CPU_FTR_HVMODE))
return -ENODEV;
+
+ if (!vsec) {
+ vsec = find_cxl_vsec(dev);
+ if (!vsec) {
+ dev_info(&dev->dev, "CXL VSEC not found\n");
+ return -ENODEV;
+ }
}
- if ((rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val))) {
- dev_err(&dev->dev, "failed to read current mode control: %i", rc);
+ rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val);
+ if (rc) {
+ dev_err(&dev->dev, "Failed to read current mode control: %i", rc);
return rc;
}
- val &= ~CXL_VSEC_PROTOCOL_MASK;
- val |= CXL_VSEC_PROTOCOL_256TB | CXL_VSEC_PROTOCOL_ENABLE;
- if ((rc = CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val))) {
- dev_err(&dev->dev, "failed to enable CXL protocol: %i", rc);
- return rc;
+
+ if (mode == CXL_BIMODE_PCI) {
+ if (!(val & CXL_VSEC_PROTOCOL_ENABLE)) {
+ dev_info(&dev->dev, "Card is already in PCI mode\n");
+ return 0;
+ }
+ /*
+ * TODO: Before it's safe to switch the card back to PCI mode
+ * we need to disable the CAPP and make sure any cachelines the
+ * card holds have been flushed out. Needs skiboot support.
+ */
+ dev_WARN(&dev->dev, "CXL mode switch to PCI unsupported!\n");
+ return -EIO;
}
+
+ if (val & CXL_VSEC_PROTOCOL_ENABLE) {
+ dev_info(&dev->dev, "Card is already in CXL mode\n");
+ return 0;
+ }
+
+ dev_info(&dev->dev, "Card is in PCI mode, scheduling kernel thread "
+ "to switch to CXL mode\n");
+
+ work = kmalloc(sizeof(struct cxl_switch_work), GFP_KERNEL);
+ if (!work)
+ return -ENOMEM;
+
+ pci_dev_get(dev);
+ work->dev = dev;
+ work->vsec = vsec;
+ work->mode = mode;
+ INIT_WORK(&work->work, switch_card_to_cxl);
+
+ schedule_work(&work->work);
+
/*
- * The CAIA spec (v0.12 11.6 Bi-modal Device Support) states
- * we must wait 100ms after this mode switch before touching
- * PCIe config space.
+ * We return a failure now to abort the driver init. Once the
+ * link has been cycled and the card is in cxl mode we will
+ * come back (possibly using the generic cxl driver), but
+ * return success as the card should then be in cxl mode.
+ *
+ * TODO: What if the card comes back in PCI mode even after
+ * the switch? Don't want to spin endlessly.
*/
- msleep(100);
+ return -EBUSY;
+}
+EXPORT_SYMBOL_GPL(cxl_check_and_switch_mode);
+
+#endif /* CONFIG_CXL_BIMODAL */
+
+static int setup_cxl_protocol_area(struct pci_dev *dev)
+{
+ u8 val;
+ int rc;
+ int vsec = find_cxl_vsec(dev);
+
+ if (!vsec) {
+ dev_info(&dev->dev, "CXL VSEC not found\n");
+ return -ENODEV;
+ }
+
+ rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val);
+ if (rc) {
+ dev_err(&dev->dev, "Failed to read current mode control: %i\n", rc);
+ return rc;
+ }
+
+ if (!(val & CXL_VSEC_PROTOCOL_ENABLE)) {
+ dev_err(&dev->dev, "Card not in CAPI mode!\n");
+ return -EIO;
+ }
+
+ if ((val & CXL_VSEC_PROTOCOL_MASK) != CXL_VSEC_PROTOCOL_256TB) {
+ val &= ~CXL_VSEC_PROTOCOL_MASK;
+ val |= CXL_VSEC_PROTOCOL_256TB;
+ rc = CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val);
+ if (rc) {
+ dev_err(&dev->dev, "Failed to set CXL protocol area: %i\n", rc);
+ return rc;
+ }
+ }
return 0;
}
@@ -712,6 +978,21 @@ static int cxl_afu_descriptor_looks_ok(struct cxl_afu *afu)
}
}
+ if ((afu->modes_supported & ~CXL_MODE_DEDICATED) && afu->max_procs_virtualised == 0) {
+ /*
+ * We could also check this for the dedicated process model
+ * since the architecture indicates it should be set to 1, but
+ * in that case we ignore the value and I'd rather not risk
+ * breaking any existing dedicated process AFUs that left it as
+ * 0 (not that I'm aware of any). It is clearly an error for an
+ * AFU directed AFU to set this to 0, and would have previously
+ * triggered a bug resulting in the maximum not being enforced
+ * at all since idr_alloc treats 0 as no maximum.
+ */
+ dev_err(&afu->dev, "AFU does not support any processes\n");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -753,11 +1034,13 @@ static int sanitise_afu_regs(struct cxl_afu *afu)
else
cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
}
- reg = cxl_p1n_read(afu, CXL_PSL_SERR_An);
- if (reg) {
- if (reg & ~0xffff)
- dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg);
- cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff);
+ if (afu->adapter->native->sl_ops->register_serr_irq) {
+ reg = cxl_p1n_read(afu, CXL_PSL_SERR_An);
+ if (reg) {
+ if (reg & ~0xffff)
+ dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg);
+ cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff);
+ }
}
reg = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
if (reg) {
@@ -835,11 +1118,13 @@ static int pci_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pc
if ((rc = cxl_afu_descriptor_looks_ok(afu)))
goto err1;
- if ((rc = init_implementation_afu_regs(afu)))
- goto err1;
+ if (adapter->native->sl_ops->afu_regs_init)
+ if ((rc = adapter->native->sl_ops->afu_regs_init(afu)))
+ goto err1;
- if ((rc = cxl_native_register_serr_irq(afu)))
- goto err1;
+ if (adapter->native->sl_ops->register_serr_irq)
+ if ((rc = adapter->native->sl_ops->register_serr_irq(afu)))
+ goto err1;
if ((rc = cxl_native_register_psl_irq(afu)))
goto err2;
@@ -847,7 +1132,8 @@ static int pci_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pc
return 0;
err2:
- cxl_native_release_serr_irq(afu);
+ if (adapter->native->sl_ops->release_serr_irq)
+ adapter->native->sl_ops->release_serr_irq(afu);
err1:
pci_unmap_slice_regs(afu);
return rc;
@@ -856,7 +1142,8 @@ err1:
static void pci_deconfigure_afu(struct cxl_afu *afu)
{
cxl_native_release_psl_irq(afu);
- cxl_native_release_serr_irq(afu);
+ if (afu->adapter->native->sl_ops->release_serr_irq)
+ afu->adapter->native->sl_ops->release_serr_irq(afu);
pci_unmap_slice_regs(afu);
}
@@ -1165,7 +1452,7 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev)
if ((rc = setup_cxl_bars(dev)))
return rc;
- if ((rc = switch_card_to_cxl(dev)))
+ if ((rc = setup_cxl_protocol_area(dev)))
return rc;
if ((rc = cxl_update_image_control(adapter)))
@@ -1177,10 +1464,13 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev)
if ((rc = sanitise_adapter_regs(adapter)))
goto err;
- if ((rc = init_implementation_adapter_regs(adapter, dev)))
+ if ((rc = adapter->native->sl_ops->adapter_regs_init(adapter, dev)))
goto err;
- if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_CAPI)))
+ /* Required for devices using CAPP DMA mode, harmless for others */
+ pci_set_master(dev);
+
+ if ((rc = pnv_phb_to_cxl_mode(dev, adapter->native->sl_ops->capi_mode)))
goto err;
/* If recovery happened, the last step is to turn on snooping.
@@ -1212,6 +1502,44 @@ static void cxl_deconfigure_adapter(struct cxl *adapter)
pci_disable_device(pdev);
}
+static const struct cxl_service_layer_ops psl_ops = {
+ .adapter_regs_init = init_implementation_adapter_psl_regs,
+ .afu_regs_init = init_implementation_afu_psl_regs,
+ .register_serr_irq = cxl_native_register_serr_irq,
+ .release_serr_irq = cxl_native_release_serr_irq,
+ .debugfs_add_adapter_sl_regs = cxl_debugfs_add_adapter_psl_regs,
+ .debugfs_add_afu_sl_regs = cxl_debugfs_add_afu_psl_regs,
+ .psl_irq_dump_registers = cxl_native_psl_irq_dump_regs,
+ .err_irq_dump_registers = cxl_native_err_irq_dump_regs,
+ .debugfs_stop_trace = cxl_stop_trace,
+ .write_timebase_ctrl = write_timebase_ctrl_psl,
+ .timebase_read = timebase_read_psl,
+ .capi_mode = OPAL_PHB_CAPI_MODE_CAPI,
+ .needs_reset_before_disable = true,
+};
+
+static const struct cxl_service_layer_ops xsl_ops = {
+ .adapter_regs_init = init_implementation_adapter_xsl_regs,
+ .debugfs_add_adapter_sl_regs = cxl_debugfs_add_adapter_xsl_regs,
+ .write_timebase_ctrl = write_timebase_ctrl_xsl,
+ .timebase_read = timebase_read_xsl,
+ .capi_mode = OPAL_PHB_CAPI_MODE_DMA,
+};
+
+static void set_sl_ops(struct cxl *adapter, struct pci_dev *dev)
+{
+ if (dev->vendor == PCI_VENDOR_ID_MELLANOX && dev->device == 0x1013) {
+ /* Mellanox CX-4 */
+ dev_info(&adapter->dev, "Device uses an XSL\n");
+ adapter->native->sl_ops = &xsl_ops;
+ adapter->min_pe = 1; /* Workaround for CX-4 hardware bug */
+ } else {
+ dev_info(&adapter->dev, "Device uses a PSL\n");
+ adapter->native->sl_ops = &psl_ops;
+ }
+}
+
+
static struct cxl *cxl_pci_init_adapter(struct pci_dev *dev)
{
struct cxl *adapter;
@@ -1227,6 +1555,8 @@ static struct cxl *cxl_pci_init_adapter(struct pci_dev *dev)
goto err_release;
}
+ set_sl_ops(adapter, dev);
+
/* Set defaults for parameters which need to persist over
* configure/reconfigure
*/
@@ -1280,6 +1610,67 @@ static void cxl_pci_remove_adapter(struct cxl *adapter)
device_unregister(&adapter->dev);
}
+#define CXL_MAX_PCIEX_PARENT 2
+
+static int cxl_slot_is_switched(struct pci_dev *dev)
+{
+ struct device_node *np;
+ int depth = 0;
+ const __be32 *prop;
+
+ if (!(np = pci_device_to_OF_node(dev))) {
+ pr_err("cxl: np = NULL\n");
+ return -ENODEV;
+ }
+ of_node_get(np);
+ while (np) {
+ np = of_get_next_parent(np);
+ prop = of_get_property(np, "device_type", NULL);
+ if (!prop || strcmp((char *)prop, "pciex"))
+ break;
+ depth++;
+ }
+ of_node_put(np);
+ return (depth > CXL_MAX_PCIEX_PARENT);
+}
+
+bool cxl_slot_is_supported(struct pci_dev *dev, int flags)
+{
+ if (!cpu_has_feature(CPU_FTR_HVMODE))
+ return false;
+
+ if ((flags & CXL_SLOT_FLAG_DMA) && (!pvr_version_is(PVR_POWER8NVL))) {
+ /*
+ * CAPP DMA mode is technically supported on regular P8, but
+ * will EEH if the card attempts to access memory < 4GB, which
+ * we cannot realistically avoid. We might be able to work
+ * around the issue, but until then return unsupported:
+ */
+ return false;
+ }
+
+ if (cxl_slot_is_switched(dev))
+ return false;
+
+ /*
+ * XXX: This gets a little tricky on regular P8 (not POWER8NVL) since
+ * the CAPP can be connected to PHB 0, 1 or 2 on a first come first
+ * served basis, which is racy to check from here. If we need to
+ * support this in future we might need to consider having this
+ * function effectively reserve it ahead of time.
+ *
+ * Currently, the only user of this API is the Mellanox CX4, which is
+ * only supported on P8NVL due to the above mentioned limitation of
+ * CAPP DMA mode and therefore does not need to worry about this. If the
+ * issue with CAPP DMA mode is later worked around on P8 we might need
+ * to revisit this.
+ */
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(cxl_slot_is_supported);
+
+
static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct cxl *adapter;
@@ -1291,6 +1682,11 @@ static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
return -ENODEV;
}
+ if (cxl_slot_is_switched(dev)) {
+ dev_info(&dev->dev, "Ignoring card on incompatible PCI slot\n");
+ return -ENODEV;
+ }
+
if (cxl_verbose)
dump_cxl_config_space(dev);
@@ -1311,6 +1707,9 @@ static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
dev_err(&dev->dev, "AFU %i failed to start: %i\n", slice, rc);
}
+ if (pnv_pci_on_cxl_phb(dev) && adapter->slices >= 1)
+ pnv_cxl_phb_set_peer_afu(dev, adapter->afu[0]);
+
return 0;
}
@@ -1381,6 +1780,9 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
*/
for (i = 0; i < adapter->slices; i++) {
afu = adapter->afu[i];
+ /* Only participate in EEH if we are on a virtual PHB */
+ if (afu->phb == NULL)
+ return PCI_ERS_RESULT_NONE;
cxl_vphb_error_detected(afu, state);
}
return PCI_ERS_RESULT_DISCONNECT;
diff --git a/drivers/misc/cxl/phb.c b/drivers/misc/cxl/phb.c
new file mode 100644
index 000000000..0935d44c1
--- /dev/null
+++ b/drivers/misc/cxl/phb.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2014-2016 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/pci.h>
+#include "cxl.h"
+
+bool _cxl_pci_associate_default_context(struct pci_dev *dev, struct cxl_afu *afu)
+{
+ struct cxl_context *ctx;
+
+ /*
+ * Allocate a context to do cxl things to. This is used for interrupts
+ * in the peer model using a real phb, and if we eventually do DMA ops
+ * in the virtual phb, we'll need a default context to attach them to.
+ */
+ ctx = cxl_dev_context_init(dev);
+ if (!ctx)
+ return false;
+ dev->dev.archdata.cxl_ctx = ctx;
+
+ return (cxl_ops->afu_check_and_enable(afu) == 0);
+}
+/* exported via cxl_base */
+
+void _cxl_pci_disable_device(struct pci_dev *dev)
+{
+ struct cxl_context *ctx = cxl_get_context(dev);
+
+ if (ctx) {
+ if (ctx->status == STARTED) {
+ dev_err(&dev->dev, "Default context started\n");
+ return;
+ }
+ dev->dev.archdata.cxl_ctx = NULL;
+ cxl_release_context(ctx);
+ }
+}
+/* exported via cxl_base */
diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c
index cdc7723b8..3519acebf 100644
--- a/drivers/misc/cxl/vphb.c
+++ b/drivers/misc/cxl/vphb.c
@@ -9,6 +9,7 @@
#include <linux/pci.h>
#include <misc/cxl.h>
+#include <asm/pnv-pci.h>
#include "cxl.h"
static int cxl_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
@@ -44,7 +45,6 @@ static bool cxl_pci_enable_device_hook(struct pci_dev *dev)
{
struct pci_controller *phb;
struct cxl_afu *afu;
- struct cxl_context *ctx;
phb = pci_bus_to_host(dev->bus);
afu = (struct cxl_afu *)phb->private_data;
@@ -57,30 +57,7 @@ static bool cxl_pci_enable_device_hook(struct pci_dev *dev)
set_dma_ops(&dev->dev, &dma_direct_ops);
set_dma_offset(&dev->dev, PAGE_OFFSET);
- /*
- * Allocate a context to do cxl things too. If we eventually do real
- * DMA ops, we'll need a default context to attach them to
- */
- ctx = cxl_dev_context_init(dev);
- if (!ctx)
- return false;
- dev->dev.archdata.cxl_ctx = ctx;
-
- return (cxl_ops->afu_check_and_enable(afu) == 0);
-}
-
-static void cxl_pci_disable_device(struct pci_dev *dev)
-{
- struct cxl_context *ctx = cxl_get_context(dev);
-
- if (ctx) {
- if (ctx->status == STARTED) {
- dev_err(&dev->dev, "Default context started\n");
- return;
- }
- dev->dev.archdata.cxl_ctx = NULL;
- cxl_release_context(ctx);
- }
+ return _cxl_pci_associate_default_context(dev, afu);
}
static resource_size_t cxl_pci_window_alignment(struct pci_bus *bus,
@@ -197,8 +174,8 @@ static struct pci_controller_ops cxl_pci_controller_ops =
{
.probe_mode = cxl_pci_probe_mode,
.enable_device_hook = cxl_pci_enable_device_hook,
- .disable_device = cxl_pci_disable_device,
- .release_device = cxl_pci_disable_device,
+ .disable_device = _cxl_pci_disable_device,
+ .release_device = _cxl_pci_disable_device,
.window_alignment = cxl_pci_window_alignment,
.reset_secondary_bus = cxl_pci_reset_secondary_bus,
.setup_msi_irqs = cxl_setup_msi_irqs,
@@ -208,20 +185,30 @@ static struct pci_controller_ops cxl_pci_controller_ops =
int cxl_pci_vphb_add(struct cxl_afu *afu)
{
- struct pci_dev *phys_dev;
- struct pci_controller *phb, *phys_phb;
+ struct pci_controller *phb;
struct device_node *vphb_dn;
struct device *parent;
- if (cpu_has_feature(CPU_FTR_HVMODE)) {
- phys_dev = to_pci_dev(afu->adapter->dev.parent);
- phys_phb = pci_bus_to_host(phys_dev->bus);
- vphb_dn = phys_phb->dn;
- parent = &phys_dev->dev;
- } else {
- vphb_dn = afu->adapter->dev.parent->of_node;
- parent = afu->adapter->dev.parent;
- }
+ /*
+ * If there are no AFU configuration records we won't have anything to
+ * expose under the vPHB, so skip creating one, returning success since
+ * this is still a valid case. This will also opt us out of EEH
+ * handling since we won't have anything special to do if there are no
+ * kernel drivers attached to the vPHB, and EEH handling is not yet
+ * supported in the peer model.
+ */
+ if (!afu->crs_num)
+ return 0;
+
+ /* The parent device is the adapter. Reuse the device node of
+ * the adapter.
+ * We don't seem to care what device node is used for the vPHB,
+ * but tools such as lsvpd walk up the device parents looking
+ * for a valid location code, so we might as well show devices
+ * attached to the adapter as being located on that adapter.
+ */
+ parent = afu->adapter->dev.parent;
+ vphb_dn = parent->of_node;
/* Alloc and setup PHB data structure */
phb = pcibios_alloc_controller(vphb_dn);
@@ -234,7 +221,7 @@ int cxl_pci_vphb_add(struct cxl_afu *afu)
/* Setup the PHB using arch provided callback */
phb->ops = &cxl_pcie_pci_ops;
phb->cfg_addr = NULL;
- phb->cfg_data = 0;
+ phb->cfg_data = NULL;
phb->private_data = afu;
phb->controller_ops = cxl_pci_controller_ops;
@@ -243,6 +230,11 @@ int cxl_pci_vphb_add(struct cxl_afu *afu)
if (phb->bus == NULL)
return -ENXIO;
+ /* Set release hook on root bus */
+ pci_set_host_bridge_release(to_pci_host_bridge(phb->bus->bridge),
+ pcibios_free_controller_deferred,
+ (void *) phb);
+
/* Claim resources. This might need some rework as well depending
* whether we are doing probe-only or not, like assigning unassigned
* resources etc...
@@ -269,7 +261,15 @@ void cxl_pci_vphb_remove(struct cxl_afu *afu)
afu->phb = NULL;
pci_remove_root_bus(phb->bus);
- pcibios_free_controller(phb);
+ /*
+ * We don't free phb here - that's handled by
+ * pcibios_free_controller_deferred()
+ */
+}
+
+static bool _cxl_pci_is_vphb_device(struct pci_controller *phb)
+{
+ return (phb->ops == &cxl_pcie_pci_ops);
}
bool cxl_pci_is_vphb_device(struct pci_dev *dev)
@@ -278,7 +278,7 @@ bool cxl_pci_is_vphb_device(struct pci_dev *dev)
phb = pci_bus_to_host(dev->bus);
- return (phb->ops == &cxl_pcie_pci_ops);
+ return _cxl_pci_is_vphb_device(phb);
}
struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev)
@@ -287,7 +287,13 @@ struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev)
phb = pci_bus_to_host(dev->bus);
- return (struct cxl_afu *)phb->private_data;
+ if (_cxl_pci_is_vphb_device(phb))
+ return (struct cxl_afu *)phb->private_data;
+
+ if (pnv_pci_on_cxl_phb(dev))
+ return pnv_cxl_phb_to_afu(phb);
+
+ return ERR_PTR(-ENODEV);
}
EXPORT_SYMBOL_GPL(cxl_pci_to_afu);
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 9ceb63b62..3cdf8e1ca 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -58,6 +58,10 @@ struct at24_data {
int use_smbus;
int use_smbus_write;
+ ssize_t (*read_func)(struct at24_data *, char *, unsigned int, size_t);
+ ssize_t (*write_func)(struct at24_data *,
+ const char *, unsigned int, size_t);
+
/*
* Lock protects against activities from other Linux tasks,
* but not from changes by other I2C masters.
@@ -109,25 +113,63 @@ MODULE_PARM_DESC(write_timeout, "Time (in ms) to try writes (default 25)");
((1 << AT24_SIZE_FLAGS | (_flags)) \
<< AT24_SIZE_BYTELEN | ilog2(_len))
+/*
+ * Both reads and writes fail if the previous write didn't complete yet. This
+ * macro loops a few times waiting at least long enough for one entire page
+ * write to work while making sure that at least one iteration is run before
+ * checking the break condition.
+ *
+ * It takes two parameters: a variable in which the future timeout in jiffies
+ * will be stored and a temporary variable holding the time of the last
+ * iteration of processing the request. Both should be unsigned integers
+ * holding at least 32 bits.
+ */
+#define loop_until_timeout(tout, op_time) \
+ for (tout = jiffies + msecs_to_jiffies(write_timeout), op_time = 0; \
+ op_time ? time_before(op_time, tout) : true; \
+ usleep_range(1000, 1500), op_time = jiffies)
+
static const struct i2c_device_id at24_ids[] = {
/* needs 8 addresses as A0-A2 are ignored */
- { "24c00", AT24_DEVICE_MAGIC(128 / 8, AT24_FLAG_TAKE8ADDR) },
+ { "24c00", AT24_DEVICE_MAGIC(128 / 8, AT24_FLAG_TAKE8ADDR) },
/* old variants can't be handled with this generic entry! */
- { "24c01", AT24_DEVICE_MAGIC(1024 / 8, 0) },
- { "24c02", AT24_DEVICE_MAGIC(2048 / 8, 0) },
+ { "24c01", AT24_DEVICE_MAGIC(1024 / 8, 0) },
+ { "24cs01", AT24_DEVICE_MAGIC(16,
+ AT24_FLAG_SERIAL | AT24_FLAG_READONLY) },
+ { "24c02", AT24_DEVICE_MAGIC(2048 / 8, 0) },
+ { "24cs02", AT24_DEVICE_MAGIC(16,
+ AT24_FLAG_SERIAL | AT24_FLAG_READONLY) },
+ { "24mac402", AT24_DEVICE_MAGIC(48 / 8,
+ AT24_FLAG_MAC | AT24_FLAG_READONLY) },
+ { "24mac602", AT24_DEVICE_MAGIC(64 / 8,
+ AT24_FLAG_MAC | AT24_FLAG_READONLY) },
/* spd is a 24c02 in memory DIMMs */
- { "spd", AT24_DEVICE_MAGIC(2048 / 8,
- AT24_FLAG_READONLY | AT24_FLAG_IRUGO) },
- { "24c04", AT24_DEVICE_MAGIC(4096 / 8, 0) },
+ { "spd", AT24_DEVICE_MAGIC(2048 / 8,
+ AT24_FLAG_READONLY | AT24_FLAG_IRUGO) },
+ { "24c04", AT24_DEVICE_MAGIC(4096 / 8, 0) },
+ { "24cs04", AT24_DEVICE_MAGIC(16,
+ AT24_FLAG_SERIAL | AT24_FLAG_READONLY) },
/* 24rf08 quirk is handled at i2c-core */
- { "24c08", AT24_DEVICE_MAGIC(8192 / 8, 0) },
- { "24c16", AT24_DEVICE_MAGIC(16384 / 8, 0) },
- { "24c32", AT24_DEVICE_MAGIC(32768 / 8, AT24_FLAG_ADDR16) },
- { "24c64", AT24_DEVICE_MAGIC(65536 / 8, AT24_FLAG_ADDR16) },
- { "24c128", AT24_DEVICE_MAGIC(131072 / 8, AT24_FLAG_ADDR16) },
- { "24c256", AT24_DEVICE_MAGIC(262144 / 8, AT24_FLAG_ADDR16) },
- { "24c512", AT24_DEVICE_MAGIC(524288 / 8, AT24_FLAG_ADDR16) },
- { "24c1024", AT24_DEVICE_MAGIC(1048576 / 8, AT24_FLAG_ADDR16) },
+ { "24c08", AT24_DEVICE_MAGIC(8192 / 8, 0) },
+ { "24cs08", AT24_DEVICE_MAGIC(16,
+ AT24_FLAG_SERIAL | AT24_FLAG_READONLY) },
+ { "24c16", AT24_DEVICE_MAGIC(16384 / 8, 0) },
+ { "24cs16", AT24_DEVICE_MAGIC(16,
+ AT24_FLAG_SERIAL | AT24_FLAG_READONLY) },
+ { "24c32", AT24_DEVICE_MAGIC(32768 / 8, AT24_FLAG_ADDR16) },
+ { "24cs32", AT24_DEVICE_MAGIC(16,
+ AT24_FLAG_ADDR16 |
+ AT24_FLAG_SERIAL |
+ AT24_FLAG_READONLY) },
+ { "24c64", AT24_DEVICE_MAGIC(65536 / 8, AT24_FLAG_ADDR16) },
+ { "24cs64", AT24_DEVICE_MAGIC(16,
+ AT24_FLAG_ADDR16 |
+ AT24_FLAG_SERIAL |
+ AT24_FLAG_READONLY) },
+ { "24c128", AT24_DEVICE_MAGIC(131072 / 8, AT24_FLAG_ADDR16) },
+ { "24c256", AT24_DEVICE_MAGIC(262144 / 8, AT24_FLAG_ADDR16) },
+ { "24c512", AT24_DEVICE_MAGIC(524288 / 8, AT24_FLAG_ADDR16) },
+ { "24c1024", AT24_DEVICE_MAGIC(1048576 / 8, AT24_FLAG_ADDR16) },
{ "at24", 0 },
{ /* END OF LIST */ }
};
@@ -145,9 +187,22 @@ MODULE_DEVICE_TABLE(acpi, at24_acpi_ids);
* This routine supports chips which consume multiple I2C addresses. It
* computes the addressing information to be used for a given r/w request.
* Assumes that sanity checks for offset happened at sysfs-layer.
+ *
+ * Slave address and byte offset derive from the offset. Always
+ * set the byte address; on a multi-master board, another master
+ * may have changed the chip's "current" address pointer.
+ *
+ * REVISIT some multi-address chips don't rollover page reads to
+ * the next slave address, so we may need to truncate the count.
+ * Those chips might need another quirk flag.
+ *
+ * If the real hardware used four adjacent 24c02 chips and that
+ * were misconfigured as one 24c08, that would be a similar effect:
+ * one "eeprom" file not four, but larger reads would fail when
+ * they crossed certain pages.
*/
static struct i2c_client *at24_translate_offset(struct at24_data *at24,
- unsigned *offset)
+ unsigned int *offset)
{
unsigned i;
@@ -162,123 +217,168 @@ static struct i2c_client *at24_translate_offset(struct at24_data *at24,
return at24->client[i];
}
-static ssize_t at24_eeprom_read(struct at24_data *at24, char *buf,
- unsigned offset, size_t count)
+static ssize_t at24_eeprom_read_smbus(struct at24_data *at24, char *buf,
+ unsigned int offset, size_t count)
{
- struct i2c_msg msg[2];
- u8 msgbuf[2];
+ unsigned long timeout, read_time;
struct i2c_client *client;
+ int status;
+
+ client = at24_translate_offset(at24, &offset);
+
+ if (count > io_limit)
+ count = io_limit;
+
+ /* Smaller eeproms can work given some SMBus extension calls */
+ if (count > I2C_SMBUS_BLOCK_MAX)
+ count = I2C_SMBUS_BLOCK_MAX;
+
+ loop_until_timeout(timeout, read_time) {
+ status = i2c_smbus_read_i2c_block_data_or_emulated(client,
+ offset,
+ count, buf);
+
+ dev_dbg(&client->dev, "read %zu@%d --> %d (%ld)\n",
+ count, offset, status, jiffies);
+
+ if (status == count)
+ return count;
+ }
+
+ return -ETIMEDOUT;
+}
+
+static ssize_t at24_eeprom_read_i2c(struct at24_data *at24, char *buf,
+ unsigned int offset, size_t count)
+{
unsigned long timeout, read_time;
+ struct i2c_client *client;
+ struct i2c_msg msg[2];
int status, i;
+ u8 msgbuf[2];
memset(msg, 0, sizeof(msg));
-
- /*
- * REVISIT some multi-address chips don't rollover page reads to
- * the next slave address, so we may need to truncate the count.
- * Those chips might need another quirk flag.
- *
- * If the real hardware used four adjacent 24c02 chips and that
- * were misconfigured as one 24c08, that would be a similar effect:
- * one "eeprom" file not four, but larger reads would fail when
- * they crossed certain pages.
- */
-
- /*
- * Slave address and byte offset derive from the offset. Always
- * set the byte address; on a multi-master board, another master
- * may have changed the chip's "current" address pointer.
- */
client = at24_translate_offset(at24, &offset);
if (count > io_limit)
count = io_limit;
- if (at24->use_smbus) {
- /* Smaller eeproms can work given some SMBus extension calls */
- if (count > I2C_SMBUS_BLOCK_MAX)
- count = I2C_SMBUS_BLOCK_MAX;
- } else {
- /*
- * When we have a better choice than SMBus calls, use a
- * combined I2C message. Write address; then read up to
- * io_limit data bytes. Note that read page rollover helps us
- * here (unlike writes). msgbuf is u8 and will cast to our
- * needs.
- */
- i = 0;
- if (at24->chip.flags & AT24_FLAG_ADDR16)
- msgbuf[i++] = offset >> 8;
- msgbuf[i++] = offset;
-
- msg[0].addr = client->addr;
- msg[0].buf = msgbuf;
- msg[0].len = i;
-
- msg[1].addr = client->addr;
- msg[1].flags = I2C_M_RD;
- msg[1].buf = buf;
- msg[1].len = count;
- }
-
/*
- * Reads fail if the previous write didn't complete yet. We may
- * loop a few times until this one succeeds, waiting at least
- * long enough for one entire page write to work.
+ * When we have a better choice than SMBus calls, use a combined I2C
+ * message. Write address; then read up to io_limit data bytes. Note
+ * that read page rollover helps us here (unlike writes). msgbuf is
+ * u8 and will cast to our needs.
*/
- timeout = jiffies + msecs_to_jiffies(write_timeout);
- do {
- read_time = jiffies;
- if (at24->use_smbus) {
- status = i2c_smbus_read_i2c_block_data_or_emulated(client, offset,
- count, buf);
- } else {
- status = i2c_transfer(client->adapter, msg, 2);
- if (status == 2)
- status = count;
- }
+ i = 0;
+ if (at24->chip.flags & AT24_FLAG_ADDR16)
+ msgbuf[i++] = offset >> 8;
+ msgbuf[i++] = offset;
+
+ msg[0].addr = client->addr;
+ msg[0].buf = msgbuf;
+ msg[0].len = i;
+
+ msg[1].addr = client->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].buf = buf;
+ msg[1].len = count;
+
+ loop_until_timeout(timeout, read_time) {
+ status = i2c_transfer(client->adapter, msg, 2);
+ if (status == 2)
+ status = count;
+
dev_dbg(&client->dev, "read %zu@%d --> %d (%ld)\n",
count, offset, status, jiffies);
if (status == count)
return count;
-
- usleep_range(1000, 1500);
- } while (time_before(read_time, timeout));
+ }
return -ETIMEDOUT;
}
-static int at24_read(void *priv, unsigned int off, void *val, size_t count)
+static ssize_t at24_eeprom_read_serial(struct at24_data *at24, char *buf,
+ unsigned int offset, size_t count)
{
- struct at24_data *at24 = priv;
- char *buf = val;
+ unsigned long timeout, read_time;
+ struct i2c_client *client;
+ struct i2c_msg msg[2];
+ u8 addrbuf[2];
+ int status;
- if (unlikely(!count))
- return count;
+ client = at24_translate_offset(at24, &offset);
+
+ memset(msg, 0, sizeof(msg));
+ msg[0].addr = client->addr;
+ msg[0].buf = addrbuf;
/*
- * Read data from chip, protecting against concurrent updates
- * from this host, but not from other I2C masters.
+ * The address pointer of the device is shared between the regular
+ * EEPROM array and the serial number block. The dummy write (part of
+ * the sequential read protocol) ensures the address pointer is reset
+ * to the desired position.
*/
- mutex_lock(&at24->lock);
+ if (at24->chip.flags & AT24_FLAG_ADDR16) {
+ /*
+ * For 16 bit address pointers, the word address must contain
+ * a '10' sequence in bits 11 and 10 regardless of the
+ * intended position of the address pointer.
+ */
+ addrbuf[0] = 0x08;
+ addrbuf[1] = offset;
+ msg[0].len = 2;
+ } else {
+ /*
+ * Otherwise the word address must begin with a '10' sequence,
+ * regardless of the intended address.
+ */
+ addrbuf[0] = 0x80 + offset;
+ msg[0].len = 1;
+ }
- while (count) {
- int status;
+ msg[1].addr = client->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].buf = buf;
+ msg[1].len = count;
- status = at24_eeprom_read(at24, buf, off, count);
- if (status < 0) {
- mutex_unlock(&at24->lock);
- return status;
- }
- buf += status;
- off += status;
- count -= status;
+ loop_until_timeout(timeout, read_time) {
+ status = i2c_transfer(client->adapter, msg, 2);
+ if (status == 2)
+ return count;
}
- mutex_unlock(&at24->lock);
+ return -ETIMEDOUT;
+}
- return 0;
+static ssize_t at24_eeprom_read_mac(struct at24_data *at24, char *buf,
+ unsigned int offset, size_t count)
+{
+ unsigned long timeout, read_time;
+ struct i2c_client *client;
+ struct i2c_msg msg[2];
+ u8 addrbuf[2];
+ int status;
+
+ client = at24_translate_offset(at24, &offset);
+
+ memset(msg, 0, sizeof(msg));
+ msg[0].addr = client->addr;
+ msg[0].buf = addrbuf;
+ addrbuf[0] = 0x90 + offset;
+ msg[0].len = 1;
+ msg[1].addr = client->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].buf = buf;
+ msg[1].len = count;
+
+ loop_until_timeout(timeout, read_time) {
+ status = i2c_transfer(client->adapter, msg, 2);
+ if (status == 2)
+ return count;
+ }
+
+ return -ETIMEDOUT;
}
/*
@@ -286,21 +386,15 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
* chip is normally write protected. But there are plenty of product
* variants here, including OTP fuses and partial chip protect.
*
- * We only use page mode writes; the alternative is sloooow. This routine
- * writes at most one page.
+ * We only use page mode writes; the alternative is sloooow. These routines
+ * write at most one page.
*/
-static ssize_t at24_eeprom_write(struct at24_data *at24, const char *buf,
- unsigned offset, size_t count)
+
+static size_t at24_adjust_write_count(struct at24_data *at24,
+ unsigned int offset, size_t count)
{
- struct i2c_client *client;
- struct i2c_msg msg;
- ssize_t status = 0;
- unsigned long timeout, write_time;
unsigned next_page;
- /* Get corresponding I2C address and adjust offset */
- client = at24_translate_offset(at24, &offset);
-
/* write_max is at most a page */
if (count > at24->write_max)
count = at24->write_max;
@@ -310,62 +404,132 @@ static ssize_t at24_eeprom_write(struct at24_data *at24, const char *buf,
if (offset + count > next_page)
count = next_page - offset;
- /* If we'll use I2C calls for I/O, set up the message */
- if (!at24->use_smbus) {
- int i = 0;
+ return count;
+}
- msg.addr = client->addr;
- msg.flags = 0;
+static ssize_t at24_eeprom_write_smbus_block(struct at24_data *at24,
+ const char *buf,
+ unsigned int offset, size_t count)
+{
+ unsigned long timeout, write_time;
+ struct i2c_client *client;
+ ssize_t status = 0;
- /* msg.buf is u8 and casts will mask the values */
- msg.buf = at24->writebuf;
- if (at24->chip.flags & AT24_FLAG_ADDR16)
- msg.buf[i++] = offset >> 8;
+ client = at24_translate_offset(at24, &offset);
+ count = at24_adjust_write_count(at24, offset, count);
+
+ loop_until_timeout(timeout, write_time) {
+ status = i2c_smbus_write_i2c_block_data(client,
+ offset, count, buf);
+ if (status == 0)
+ status = count;
+
+ dev_dbg(&client->dev, "write %zu@%d --> %zd (%ld)\n",
+ count, offset, status, jiffies);
- msg.buf[i++] = offset;
- memcpy(&msg.buf[i], buf, count);
- msg.len = i + count;
+ if (status == count)
+ return count;
}
- /*
- * Writes fail if the previous one didn't complete yet. We may
- * loop a few times until this one succeeds, waiting at least
- * long enough for one entire page write to work.
- */
- timeout = jiffies + msecs_to_jiffies(write_timeout);
- do {
- write_time = jiffies;
- if (at24->use_smbus_write) {
- switch (at24->use_smbus_write) {
- case I2C_SMBUS_I2C_BLOCK_DATA:
- status = i2c_smbus_write_i2c_block_data(client,
- offset, count, buf);
- break;
- case I2C_SMBUS_BYTE_DATA:
- status = i2c_smbus_write_byte_data(client,
- offset, buf[0]);
- break;
- }
-
- if (status == 0)
- status = count;
- } else {
- status = i2c_transfer(client->adapter, &msg, 1);
- if (status == 1)
- status = count;
- }
+ return -ETIMEDOUT;
+}
+
+static ssize_t at24_eeprom_write_smbus_byte(struct at24_data *at24,
+ const char *buf,
+ unsigned int offset, size_t count)
+{
+ unsigned long timeout, write_time;
+ struct i2c_client *client;
+ ssize_t status = 0;
+
+ client = at24_translate_offset(at24, &offset);
+
+ loop_until_timeout(timeout, write_time) {
+ status = i2c_smbus_write_byte_data(client, offset, buf[0]);
+ if (status == 0)
+ status = count;
+
dev_dbg(&client->dev, "write %zu@%d --> %zd (%ld)\n",
count, offset, status, jiffies);
if (status == count)
return count;
+ }
+
+ return -ETIMEDOUT;
+}
+
+static ssize_t at24_eeprom_write_i2c(struct at24_data *at24, const char *buf,
+ unsigned int offset, size_t count)
+{
+ unsigned long timeout, write_time;
+ struct i2c_client *client;
+ struct i2c_msg msg;
+ ssize_t status = 0;
+ int i = 0;
+
+ client = at24_translate_offset(at24, &offset);
+ count = at24_adjust_write_count(at24, offset, count);
+
+ msg.addr = client->addr;
+ msg.flags = 0;
+
+ /* msg.buf is u8 and casts will mask the values */
+ msg.buf = at24->writebuf;
+ if (at24->chip.flags & AT24_FLAG_ADDR16)
+ msg.buf[i++] = offset >> 8;
- usleep_range(1000, 1500);
- } while (time_before(write_time, timeout));
+ msg.buf[i++] = offset;
+ memcpy(&msg.buf[i], buf, count);
+ msg.len = i + count;
+
+ loop_until_timeout(timeout, write_time) {
+ status = i2c_transfer(client->adapter, &msg, 1);
+ if (status == 1)
+ status = count;
+
+ dev_dbg(&client->dev, "write %zu@%d --> %zd (%ld)\n",
+ count, offset, status, jiffies);
+
+ if (status == count)
+ return count;
+ }
return -ETIMEDOUT;
}
+static int at24_read(void *priv, unsigned int off, void *val, size_t count)
+{
+ struct at24_data *at24 = priv;
+ char *buf = val;
+
+ if (unlikely(!count))
+ return count;
+
+ /*
+ * Read data from chip, protecting against concurrent updates
+ * from this host, but not from other I2C masters.
+ */
+ mutex_lock(&at24->lock);
+
+ while (count) {
+ int status;
+
+ status = at24->read_func(at24, buf, off, count);
+ if (status < 0) {
+ mutex_unlock(&at24->lock);
+ return status;
+ }
+ buf += status;
+ off += status;
+ count -= status;
+ }
+
+ mutex_unlock(&at24->lock);
+
+ return 0;
+}
+
static int at24_write(void *priv, unsigned int off, void *val, size_t count)
{
struct at24_data *at24 = priv;
@@ -383,7 +547,7 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count)
while (count) {
int status;
- status = at24_eeprom_write(at24, buf, off, count);
+ status = at24->write_func(at24, buf, off, count);
if (status < 0) {
mutex_unlock(&at24->lock);
return status;
@@ -400,7 +564,7 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count)
#ifdef CONFIG_OF
static void at24_get_ofdata(struct i2c_client *client,
- struct at24_platform_data *chip)
+ struct at24_platform_data *chip)
{
const __be32 *val;
struct device_node *node = client->dev.of_node;
@@ -415,7 +579,7 @@ static void at24_get_ofdata(struct i2c_client *client,
}
#else
static void at24_get_ofdata(struct i2c_client *client,
- struct at24_platform_data *chip)
+ struct at24_platform_data *chip)
{ }
#endif /* CONFIG_OF */
@@ -518,6 +682,30 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
at24->chip = chip;
at24->num_addresses = num_addresses;
+ if ((chip.flags & AT24_FLAG_SERIAL) && (chip.flags & AT24_FLAG_MAC)) {
+ dev_err(&client->dev,
+ "invalid device data - cannot have both AT24_FLAG_SERIAL & AT24_FLAG_MAC.");
+ return -EINVAL;
+ }
+
+ if (chip.flags & AT24_FLAG_SERIAL) {
+ at24->read_func = at24_eeprom_read_serial;
+ } else if (chip.flags & AT24_FLAG_MAC) {
+ at24->read_func = at24_eeprom_read_mac;
+ } else {
+ at24->read_func = at24->use_smbus ? at24_eeprom_read_smbus
+ : at24_eeprom_read_i2c;
+ }
+
+ if (at24->use_smbus) {
+ if (at24->use_smbus_write == I2C_SMBUS_I2C_BLOCK_DATA)
+ at24->write_func = at24_eeprom_write_smbus_block;
+ else
+ at24->write_func = at24_eeprom_write_smbus_byte;
+ } else {
+ at24->write_func = at24_eeprom_write_i2c;
+ }
+
writable = !(chip.flags & AT24_FLAG_READONLY);
if (writable) {
if (!use_smbus || use_smbus_write) {
diff --git a/drivers/misc/genwqe/card_base.c b/drivers/misc/genwqe/card_base.c
index 4cf8f82cf..a70b853fa 100644
--- a/drivers/misc/genwqe/card_base.c
+++ b/drivers/misc/genwqe/card_base.c
@@ -182,7 +182,7 @@ static void genwqe_dev_free(struct genwqe_dev *cd)
*/
static int genwqe_bus_reset(struct genwqe_dev *cd)
{
- int bars, rc = 0;
+ int rc = 0;
struct pci_dev *pci_dev = cd->pci_dev;
void __iomem *mmio;
@@ -193,8 +193,7 @@ static int genwqe_bus_reset(struct genwqe_dev *cd)
cd->mmio = NULL;
pci_iounmap(pci_dev, mmio);
- bars = pci_select_bars(pci_dev, IORESOURCE_MEM);
- pci_release_selected_regions(pci_dev, bars);
+ pci_release_mem_regions(pci_dev);
/*
* Firmware/BIOS might change memory mapping during bus reset.
@@ -218,7 +217,7 @@ static int genwqe_bus_reset(struct genwqe_dev *cd)
GENWQE_INJECT_GFIR_FATAL |
GENWQE_INJECT_GFIR_INFO);
- rc = pci_request_selected_regions(pci_dev, bars, genwqe_driver_name);
+ rc = pci_request_mem_regions(pci_dev, genwqe_driver_name);
if (rc) {
dev_err(&pci_dev->dev,
"[%s] err: request bars failed (%d)\n", __func__, rc);
@@ -1068,10 +1067,9 @@ static int genwqe_health_check_stop(struct genwqe_dev *cd)
*/
static int genwqe_pci_setup(struct genwqe_dev *cd)
{
- int err, bars;
+ int err;
struct pci_dev *pci_dev = cd->pci_dev;
- bars = pci_select_bars(pci_dev, IORESOURCE_MEM);
err = pci_enable_device_mem(pci_dev);
if (err) {
dev_err(&pci_dev->dev,
@@ -1080,7 +1078,7 @@ static int genwqe_pci_setup(struct genwqe_dev *cd)
}
/* Reserve PCI I/O and memory resources */
- err = pci_request_selected_regions(pci_dev, bars, genwqe_driver_name);
+ err = pci_request_mem_regions(pci_dev, genwqe_driver_name);
if (err) {
dev_err(&pci_dev->dev,
"[%s] err: request bars failed (%d)\n", __func__, err);
@@ -1142,7 +1140,7 @@ static int genwqe_pci_setup(struct genwqe_dev *cd)
out_iounmap:
pci_iounmap(pci_dev, cd->mmio);
out_release_resources:
- pci_release_selected_regions(pci_dev, bars);
+ pci_release_mem_regions(pci_dev);
err_disable_device:
pci_disable_device(pci_dev);
err_out:
@@ -1154,14 +1152,12 @@ static int genwqe_pci_setup(struct genwqe_dev *cd)
*/
static void genwqe_pci_remove(struct genwqe_dev *cd)
{
- int bars;
struct pci_dev *pci_dev = cd->pci_dev;
if (cd->mmio)
pci_iounmap(pci_dev, cd->mmio);
- bars = pci_select_bars(pci_dev, IORESOURCE_MEM);
- pci_release_selected_regions(pci_dev, bars);
+ pci_release_mem_regions(pci_dev);
pci_disable_device(pci_dev);
}
diff --git a/drivers/misc/lkdtm.h b/drivers/misc/lkdtm.h
new file mode 100644
index 000000000..fdf954c21
--- /dev/null
+++ b/drivers/misc/lkdtm.h
@@ -0,0 +1,60 @@
+#ifndef __LKDTM_H
+#define __LKDTM_H
+
+#define pr_fmt(fmt) "lkdtm: " fmt
+
+#include <linux/kernel.h>
+
+/* lkdtm_bugs.c */
+void __init lkdtm_bugs_init(int *recur_param);
+void lkdtm_PANIC(void);
+void lkdtm_BUG(void);
+void lkdtm_WARNING(void);
+void lkdtm_EXCEPTION(void);
+void lkdtm_LOOP(void);
+void lkdtm_OVERFLOW(void);
+void lkdtm_CORRUPT_STACK(void);
+void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void);
+void lkdtm_SOFTLOCKUP(void);
+void lkdtm_HARDLOCKUP(void);
+void lkdtm_SPINLOCKUP(void);
+void lkdtm_HUNG_TASK(void);
+void lkdtm_ATOMIC_UNDERFLOW(void);
+void lkdtm_ATOMIC_OVERFLOW(void);
+
+/* lkdtm_heap.c */
+void lkdtm_OVERWRITE_ALLOCATION(void);
+void lkdtm_WRITE_AFTER_FREE(void);
+void lkdtm_READ_AFTER_FREE(void);
+void lkdtm_WRITE_BUDDY_AFTER_FREE(void);
+void lkdtm_READ_BUDDY_AFTER_FREE(void);
+
+/* lkdtm_perms.c */
+void __init lkdtm_perms_init(void);
+void lkdtm_WRITE_RO(void);
+void lkdtm_WRITE_RO_AFTER_INIT(void);
+void lkdtm_WRITE_KERN(void);
+void lkdtm_EXEC_DATA(void);
+void lkdtm_EXEC_STACK(void);
+void lkdtm_EXEC_KMALLOC(void);
+void lkdtm_EXEC_VMALLOC(void);
+void lkdtm_EXEC_RODATA(void);
+void lkdtm_EXEC_USERSPACE(void);
+void lkdtm_ACCESS_USERSPACE(void);
+
+/* lkdtm_rodata.c */
+void lkdtm_rodata_do_nothing(void);
+
+/* lkdtm_usercopy.c */
+void __init lkdtm_usercopy_init(void);
+void __exit lkdtm_usercopy_exit(void);
+void lkdtm_USERCOPY_HEAP_SIZE_TO(void);
+void lkdtm_USERCOPY_HEAP_SIZE_FROM(void);
+void lkdtm_USERCOPY_HEAP_FLAG_TO(void);
+void lkdtm_USERCOPY_HEAP_FLAG_FROM(void);
+void lkdtm_USERCOPY_STACK_FRAME_TO(void);
+void lkdtm_USERCOPY_STACK_FRAME_FROM(void);
+void lkdtm_USERCOPY_STACK_BEYOND(void);
+void lkdtm_USERCOPY_KERNEL(void);
+
+#endif
diff --git a/drivers/misc/lkdtm_bugs.c b/drivers/misc/lkdtm_bugs.c
new file mode 100644
index 000000000..182ae1894
--- /dev/null
+++ b/drivers/misc/lkdtm_bugs.c
@@ -0,0 +1,148 @@
+/*
+ * This is for all the tests related to logic bugs (e.g. bad dereferences,
+ * bad alignment, bad loops, bad locking, bad scheduling, deep stacks, and
+ * lockups) along with other things that don't fit well into existing LKDTM
+ * test source files.
+ */
+#include "lkdtm.h"
+#include <linux/sched.h>
+
+/*
+ * Make sure our attempts to over run the kernel stack doesn't trigger
+ * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
+ * recurse past the end of THREAD_SIZE by default.
+ */
+#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
+#define REC_STACK_SIZE (CONFIG_FRAME_WARN / 2)
+#else
+#define REC_STACK_SIZE (THREAD_SIZE / 8)
+#endif
+#define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
+
+static int recur_count = REC_NUM_DEFAULT;
+
+static DEFINE_SPINLOCK(lock_me_up);
+
+static int recursive_loop(int remaining)
+{
+ char buf[REC_STACK_SIZE];
+
+ /* Make sure compiler does not optimize this away. */
+ memset(buf, (remaining & 0xff) | 0x1, REC_STACK_SIZE);
+ if (!remaining)
+ return 0;
+ else
+ return recursive_loop(remaining - 1);
+}
+
+/* If the depth is negative, use the default, otherwise keep parameter. */
+void __init lkdtm_bugs_init(int *recur_param)
+{
+ if (*recur_param < 0)
+ *recur_param = recur_count;
+ else
+ recur_count = *recur_param;
+}
+
+void lkdtm_PANIC(void)
+{
+ panic("dumptest");
+}
+
+void lkdtm_BUG(void)
+{
+ BUG();
+}
+
+void lkdtm_WARNING(void)
+{
+ WARN_ON(1);
+}
+
+void lkdtm_EXCEPTION(void)
+{
+ *((int *) 0) = 0;
+}
+
+void lkdtm_LOOP(void)
+{
+ for (;;)
+ ;
+}
+
+void lkdtm_OVERFLOW(void)
+{
+ (void) recursive_loop(recur_count);
+}
+
+noinline void lkdtm_CORRUPT_STACK(void)
+{
+ /* Use default char array length that triggers stack protection. */
+ char data[8];
+
+ memset((void *)data, 0, 64);
+}
+
+void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
+{
+ static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5};
+ u32 *p;
+ u32 val = 0x12345678;
+
+ p = (u32 *)(data + 1);
+ if (*p == 0)
+ val = 0x87654321;
+ *p = val;
+}
+
+void lkdtm_SOFTLOCKUP(void)
+{
+ preempt_disable();
+ for (;;)
+ cpu_relax();
+}
+
+void lkdtm_HARDLOCKUP(void)
+{
+ local_irq_disable();
+ for (;;)
+ cpu_relax();
+}
+
+void lkdtm_SPINLOCKUP(void)
+{
+ /* Must be called twice to trigger. */
+ spin_lock(&lock_me_up);
+ /* Let sparse know we intended to exit holding the lock. */
+ __release(&lock_me_up);
+}
+
+void lkdtm_HUNG_TASK(void)
+{
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule();
+}
+
+void lkdtm_ATOMIC_UNDERFLOW(void)
+{
+ atomic_t under = ATOMIC_INIT(INT_MIN);
+
+ pr_info("attempting good atomic increment\n");
+ atomic_inc(&under);
+ atomic_dec(&under);
+
+ pr_info("attempting bad atomic underflow\n");
+ atomic_dec(&under);
+}
+
+void lkdtm_ATOMIC_OVERFLOW(void)
+{
+ atomic_t over = ATOMIC_INIT(INT_MAX);
+
+ pr_info("attempting good atomic decrement\n");
+ atomic_dec(&over);
+ atomic_inc(&over);
+
+ pr_info("attempting bad atomic overflow\n");
+ atomic_inc(&over);
+}
diff --git a/drivers/misc/lkdtm_core.c b/drivers/misc/lkdtm_core.c
new file mode 100644
index 000000000..f9154b8d6
--- /dev/null
+++ b/drivers/misc/lkdtm_core.c
@@ -0,0 +1,544 @@
+/*
+ * Linux Kernel Dump Test Module for testing kernel crashes conditions:
+ * induces system failures at predefined crashpoints and under predefined
+ * operational conditions in order to evaluate the reliability of kernel
+ * sanity checking and crash dumps obtained using different dumping
+ * solutions.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2006
+ *
+ * Author: Ankita Garg <ankita@in.ibm.com>
+ *
+ * It is adapted from the Linux Kernel Dump Test Tool by
+ * Fernando Luis Vazquez Cao <http://lkdtt.sourceforge.net>
+ *
+ * Debugfs support added by Simon Kagstrom <simon.kagstrom@netinsight.net>
+ *
+ * See Documentation/fault-injection/provoke-crashes.txt for instructions
+ */
+#include "lkdtm.h"
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/buffer_head.h>
+#include <linux/kprobes.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/hrtimer.h>
+#include <linux/slab.h>
+#include <scsi/scsi_cmnd.h>
+#include <linux/debugfs.h>
+
+#ifdef CONFIG_IDE
+#include <linux/ide.h>
+#endif
+
+#define DEFAULT_COUNT 10
+
+static int lkdtm_debugfs_open(struct inode *inode, struct file *file);
+static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
+ size_t count, loff_t *off);
+static ssize_t direct_entry(struct file *f, const char __user *user_buf,
+ size_t count, loff_t *off);
+
+#ifdef CONFIG_KPROBES
+static void lkdtm_handler(void);
+static ssize_t lkdtm_debugfs_entry(struct file *f,
+ const char __user *user_buf,
+ size_t count, loff_t *off);
+
+
+/* jprobe entry point handlers. */
+static unsigned int jp_do_irq(unsigned int irq)
+{
+ lkdtm_handler();
+ jprobe_return();
+ return 0;
+}
+
+static irqreturn_t jp_handle_irq_event(unsigned int irq,
+ struct irqaction *action)
+{
+ lkdtm_handler();
+ jprobe_return();
+ return 0;
+}
+
+static void jp_tasklet_action(struct softirq_action *a)
+{
+ lkdtm_handler();
+ jprobe_return();
+}
+
+static void jp_ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
+{
+ lkdtm_handler();
+ jprobe_return();
+}
+
+struct scan_control;
+
+static unsigned long jp_shrink_inactive_list(unsigned long max_scan,
+ struct zone *zone,
+ struct scan_control *sc)
+{
+ lkdtm_handler();
+ jprobe_return();
+ return 0;
+}
+
+static int jp_hrtimer_start(struct hrtimer *timer, ktime_t tim,
+ const enum hrtimer_mode mode)
+{
+ lkdtm_handler();
+ jprobe_return();
+ return 0;
+}
+
+static int jp_scsi_dispatch_cmd(struct scsi_cmnd *cmd)
+{
+ lkdtm_handler();
+ jprobe_return();
+ return 0;
+}
+
+# ifdef CONFIG_IDE
+static int jp_generic_ide_ioctl(ide_drive_t *drive, struct file *file,
+ struct block_device *bdev, unsigned int cmd,
+ unsigned long arg)
+{
+ lkdtm_handler();
+ jprobe_return();
+ return 0;
+}
+# endif
+#endif
+
+/* Crash points */
+struct crashpoint {
+ const char *name;
+ const struct file_operations fops;
+ struct jprobe jprobe;
+};
+
+#define CRASHPOINT(_name, _write, _symbol, _entry) \
+ { \
+ .name = _name, \
+ .fops = { \
+ .read = lkdtm_debugfs_read, \
+ .llseek = generic_file_llseek, \
+ .open = lkdtm_debugfs_open, \
+ .write = _write, \
+ }, \
+ .jprobe = { \
+ .kp.symbol_name = _symbol, \
+ .entry = (kprobe_opcode_t *)_entry, \
+ }, \
+ }
+
+/* Define the possible places where we can trigger a crash point. */
+struct crashpoint crashpoints[] = {
+ CRASHPOINT("DIRECT", direct_entry,
+ NULL, NULL),
+#ifdef CONFIG_KPROBES
+ CRASHPOINT("INT_HARDWARE_ENTRY", lkdtm_debugfs_entry,
+ "do_IRQ", jp_do_irq),
+ CRASHPOINT("INT_HW_IRQ_EN", lkdtm_debugfs_entry,
+ "handle_IRQ_event", jp_handle_irq_event),
+ CRASHPOINT("INT_TASKLET_ENTRY", lkdtm_debugfs_entry,
+ "tasklet_action", jp_tasklet_action),
+ CRASHPOINT("FS_DEVRW", lkdtm_debugfs_entry,
+ "ll_rw_block", jp_ll_rw_block),
+ CRASHPOINT("MEM_SWAPOUT", lkdtm_debugfs_entry,
+ "shrink_inactive_list", jp_shrink_inactive_list),
+ CRASHPOINT("TIMERADD", lkdtm_debugfs_entry,
+ "hrtimer_start", jp_hrtimer_start),
+ CRASHPOINT("SCSI_DISPATCH_CMD", lkdtm_debugfs_entry,
+ "scsi_dispatch_cmd", jp_scsi_dispatch_cmd),
+# ifdef CONFIG_IDE
+ CRASHPOINT("IDE_CORE_CP", lkdtm_debugfs_entry,
+ "generic_ide_ioctl", jp_generic_ide_ioctl),
+# endif
+#endif
+};
+
+
+/* Crash types. */
+struct crashtype {
+ const char *name;
+ void (*func)(void);
+};
+
+#define CRASHTYPE(_name) \
+ { \
+ .name = __stringify(_name), \
+ .func = lkdtm_ ## _name, \
+ }
+
+/* Define the possible types of crashes that can be triggered. */
+struct crashtype crashtypes[] = {
+ CRASHTYPE(PANIC),
+ CRASHTYPE(BUG),
+ CRASHTYPE(WARNING),
+ CRASHTYPE(EXCEPTION),
+ CRASHTYPE(LOOP),
+ CRASHTYPE(OVERFLOW),
+ CRASHTYPE(CORRUPT_STACK),
+ CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE),
+ CRASHTYPE(OVERWRITE_ALLOCATION),
+ CRASHTYPE(WRITE_AFTER_FREE),
+ CRASHTYPE(READ_AFTER_FREE),
+ CRASHTYPE(WRITE_BUDDY_AFTER_FREE),
+ CRASHTYPE(READ_BUDDY_AFTER_FREE),
+ CRASHTYPE(SOFTLOCKUP),
+ CRASHTYPE(HARDLOCKUP),
+ CRASHTYPE(SPINLOCKUP),
+ CRASHTYPE(HUNG_TASK),
+ CRASHTYPE(EXEC_DATA),
+ CRASHTYPE(EXEC_STACK),
+ CRASHTYPE(EXEC_KMALLOC),
+ CRASHTYPE(EXEC_VMALLOC),
+ CRASHTYPE(EXEC_RODATA),
+ CRASHTYPE(EXEC_USERSPACE),
+ CRASHTYPE(ACCESS_USERSPACE),
+ CRASHTYPE(WRITE_RO),
+ CRASHTYPE(WRITE_RO_AFTER_INIT),
+ CRASHTYPE(WRITE_KERN),
+ CRASHTYPE(ATOMIC_UNDERFLOW),
+ CRASHTYPE(ATOMIC_OVERFLOW),
+ CRASHTYPE(USERCOPY_HEAP_SIZE_TO),
+ CRASHTYPE(USERCOPY_HEAP_SIZE_FROM),
+ CRASHTYPE(USERCOPY_HEAP_FLAG_TO),
+ CRASHTYPE(USERCOPY_HEAP_FLAG_FROM),
+ CRASHTYPE(USERCOPY_STACK_FRAME_TO),
+ CRASHTYPE(USERCOPY_STACK_FRAME_FROM),
+ CRASHTYPE(USERCOPY_STACK_BEYOND),
+ CRASHTYPE(USERCOPY_KERNEL),
+};
+
+
+/* Global jprobe entry and crashtype. */
+static struct jprobe *lkdtm_jprobe;
+struct crashpoint *lkdtm_crashpoint;
+struct crashtype *lkdtm_crashtype;
+
+/* Module parameters */
+static int recur_count = -1;
+module_param(recur_count, int, 0644);
+MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test");
+
+static char* cpoint_name;
+module_param(cpoint_name, charp, 0444);
+MODULE_PARM_DESC(cpoint_name, " Crash Point, where kernel is to be crashed");
+
+static char* cpoint_type;
+module_param(cpoint_type, charp, 0444);
+MODULE_PARM_DESC(cpoint_type, " Crash Point Type, action to be taken on "\
+ "hitting the crash point");
+
+static int cpoint_count = DEFAULT_COUNT;
+module_param(cpoint_count, int, 0644);
+MODULE_PARM_DESC(cpoint_count, " Crash Point Count, number of times the "\
+ "crash point is to be hit to trigger action");
+
+
+/* Return the crashtype number or NULL if the name is invalid */
+static struct crashtype *find_crashtype(const char *name)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(crashtypes); i++) {
+ if (!strcmp(name, crashtypes[i].name))
+ return &crashtypes[i];
+ }
+
+ return NULL;
+}
+
+/*
+ * This is forced noinline just so it distinctly shows up in the stackdump
+ * which makes validation of expected lkdtm crashes easier.
+ */
+static noinline void lkdtm_do_action(struct crashtype *crashtype)
+{
+ BUG_ON(!crashtype || !crashtype->func);
+ crashtype->func();
+}
+
+static int lkdtm_register_cpoint(struct crashpoint *crashpoint,
+ struct crashtype *crashtype)
+{
+ int ret;
+
+ /* If this doesn't have a symbol, just call immediately. */
+ if (!crashpoint->jprobe.kp.symbol_name) {
+ lkdtm_do_action(crashtype);
+ return 0;
+ }
+
+ if (lkdtm_jprobe != NULL)
+ unregister_jprobe(lkdtm_jprobe);
+
+ lkdtm_crashpoint = crashpoint;
+ lkdtm_crashtype = crashtype;
+ lkdtm_jprobe = &crashpoint->jprobe;
+ ret = register_jprobe(lkdtm_jprobe);
+ if (ret < 0) {
+ pr_info("Couldn't register jprobe %s\n",
+ crashpoint->jprobe.kp.symbol_name);
+ lkdtm_jprobe = NULL;
+ lkdtm_crashpoint = NULL;
+ lkdtm_crashtype = NULL;
+ }
+
+ return ret;
+}
+
+#ifdef CONFIG_KPROBES
+/* Global crash counter and spinlock. */
+static int crash_count = DEFAULT_COUNT;
+static DEFINE_SPINLOCK(crash_count_lock);
+
+/* Called by jprobe entry points. */
+static void lkdtm_handler(void)
+{
+ unsigned long flags;
+ bool do_it = false;
+
+ BUG_ON(!lkdtm_crashpoint || !lkdtm_crashtype);
+
+ spin_lock_irqsave(&crash_count_lock, flags);
+ crash_count--;
+ pr_info("Crash point %s of type %s hit, trigger in %d rounds\n",
+ lkdtm_crashpoint->name, lkdtm_crashtype->name, crash_count);
+
+ if (crash_count == 0) {
+ do_it = true;
+ crash_count = cpoint_count;
+ }
+ spin_unlock_irqrestore(&crash_count_lock, flags);
+
+ if (do_it)
+ lkdtm_do_action(lkdtm_crashtype);
+}
+
+static ssize_t lkdtm_debugfs_entry(struct file *f,
+ const char __user *user_buf,
+ size_t count, loff_t *off)
+{
+ struct crashpoint *crashpoint = file_inode(f)->i_private;
+ struct crashtype *crashtype = NULL;
+ char *buf;
+ int err;
+
+ if (count >= PAGE_SIZE)
+ return -EINVAL;
+
+ buf = (char *)__get_free_page(GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ if (copy_from_user(buf, user_buf, count)) {
+ free_page((unsigned long) buf);
+ return -EFAULT;
+ }
+ /* NULL-terminate and remove enter */
+ buf[count] = '\0';
+ strim(buf);
+
+ crashtype = find_crashtype(buf);
+ free_page((unsigned long)buf);
+
+ if (!crashtype)
+ return -EINVAL;
+
+ err = lkdtm_register_cpoint(crashpoint, crashtype);
+ if (err < 0)
+ return err;
+
+ *off += count;
+
+ return count;
+}
+#endif
+
+/* Generic read callback that just prints out the available crash types */
+static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
+ size_t count, loff_t *off)
+{
+ char *buf;
+ int i, n, out;
+
+ buf = (char *)__get_free_page(GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ n = snprintf(buf, PAGE_SIZE, "Available crash types:\n");
+ for (i = 0; i < ARRAY_SIZE(crashtypes); i++) {
+ n += snprintf(buf + n, PAGE_SIZE - n, "%s\n",
+ crashtypes[i].name);
+ }
+ buf[n] = '\0';
+
+ out = simple_read_from_buffer(user_buf, count, off,
+ buf, n);
+ free_page((unsigned long) buf);
+
+ return out;
+}
+
+static int lkdtm_debugfs_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+/* Special entry to just crash directly. Available without KPROBEs */
+static ssize_t direct_entry(struct file *f, const char __user *user_buf,
+ size_t count, loff_t *off)
+{
+ struct crashtype *crashtype;
+ char *buf;
+
+ if (count >= PAGE_SIZE)
+ return -EINVAL;
+ if (count < 1)
+ return -EINVAL;
+
+ buf = (char *)__get_free_page(GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ if (copy_from_user(buf, user_buf, count)) {
+ free_page((unsigned long) buf);
+ return -EFAULT;
+ }
+ /* NULL-terminate and remove enter */
+ buf[count] = '\0';
+ strim(buf);
+
+ crashtype = find_crashtype(buf);
+ free_page((unsigned long) buf);
+ if (!crashtype)
+ return -EINVAL;
+
+ pr_info("Performing direct entry %s\n", crashtype->name);
+ lkdtm_do_action(crashtype);
+ *off += count;
+
+ return count;
+}
+
+static struct dentry *lkdtm_debugfs_root;
+
+static int __init lkdtm_module_init(void)
+{
+ struct crashpoint *crashpoint = NULL;
+ struct crashtype *crashtype = NULL;
+ int ret = -EINVAL;
+ int i;
+
+ /* Neither or both of these need to be set */
+ if ((cpoint_type || cpoint_name) && !(cpoint_type && cpoint_name)) {
+ pr_err("Need both cpoint_type and cpoint_name or neither\n");
+ return -EINVAL;
+ }
+
+ if (cpoint_type) {
+ crashtype = find_crashtype(cpoint_type);
+ if (!crashtype) {
+ pr_err("Unknown crashtype '%s'\n", cpoint_type);
+ return -EINVAL;
+ }
+ }
+
+ if (cpoint_name) {
+ for (i = 0; i < ARRAY_SIZE(crashpoints); i++) {
+ if (!strcmp(cpoint_name, crashpoints[i].name))
+ crashpoint = &crashpoints[i];
+ }
+
+ /* Refuse unknown crashpoints. */
+ if (!crashpoint) {
+ pr_err("Invalid crashpoint %s\n", cpoint_name);
+ return -EINVAL;
+ }
+ }
+
+#ifdef CONFIG_KPROBES
+ /* Set crash count. */
+ crash_count = cpoint_count;
+#endif
+
+ /* Handle test-specific initialization. */
+ lkdtm_bugs_init(&recur_count);
+ lkdtm_perms_init();
+ lkdtm_usercopy_init();
+
+ /* Register debugfs interface */
+ lkdtm_debugfs_root = debugfs_create_dir("provoke-crash", NULL);
+ if (!lkdtm_debugfs_root) {
+ pr_err("creating root dir failed\n");
+ return -ENODEV;
+ }
+
+ /* Install debugfs trigger files. */
+ for (i = 0; i < ARRAY_SIZE(crashpoints); i++) {
+ struct crashpoint *cur = &crashpoints[i];
+ struct dentry *de;
+
+ de = debugfs_create_file(cur->name, 0644, lkdtm_debugfs_root,
+ cur, &cur->fops);
+ if (de == NULL) {
+ pr_err("could not create crashpoint %s\n", cur->name);
+ goto out_err;
+ }
+ }
+
+ /* Install crashpoint if one was selected. */
+ if (crashpoint) {
+ ret = lkdtm_register_cpoint(crashpoint, crashtype);
+ if (ret < 0) {
+ pr_info("Invalid crashpoint %s\n", crashpoint->name);
+ goto out_err;
+ }
+ pr_info("Crash point %s of type %s registered\n",
+ crashpoint->name, cpoint_type);
+ } else {
+ pr_info("No crash points registered, enable through debugfs\n");
+ }
+
+ return 0;
+
+out_err:
+ debugfs_remove_recursive(lkdtm_debugfs_root);
+ return ret;
+}
+
+static void __exit lkdtm_module_exit(void)
+{
+ debugfs_remove_recursive(lkdtm_debugfs_root);
+
+ /* Handle test-specific clean-up. */
+ lkdtm_usercopy_exit();
+
+ unregister_jprobe(lkdtm_jprobe);
+ pr_info("Crash point unregistered\n");
+}
+
+module_init(lkdtm_module_init);
+module_exit(lkdtm_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Kernel crash testing module");
diff --git a/drivers/misc/lkdtm_heap.c b/drivers/misc/lkdtm_heap.c
new file mode 100644
index 000000000..0f1581664
--- /dev/null
+++ b/drivers/misc/lkdtm_heap.c
@@ -0,0 +1,142 @@
+/*
+ * This is for all the tests relating directly to heap memory, including
+ * page allocation and slab allocations.
+ */
+#include "lkdtm.h"
+#include <linux/slab.h>
+
+/*
+ * This tries to stay within the next largest power-of-2 kmalloc cache
+ * to avoid actually overwriting anything important if it's not detected
+ * correctly.
+ */
+void lkdtm_OVERWRITE_ALLOCATION(void)
+{
+ size_t len = 1020;
+ u32 *data = kmalloc(len, GFP_KERNEL);
+
+ data[1024 / sizeof(u32)] = 0x12345678;
+ kfree(data);
+}
+
+void lkdtm_WRITE_AFTER_FREE(void)
+{
+ int *base, *again;
+ size_t len = 1024;
+ /*
+ * The slub allocator uses the first word to store the free
+ * pointer in some configurations. Use the middle of the
+ * allocation to avoid running into the freelist
+ */
+ size_t offset = (len / sizeof(*base)) / 2;
+
+ base = kmalloc(len, GFP_KERNEL);
+ pr_info("Allocated memory %p-%p\n", base, &base[offset * 2]);
+ pr_info("Attempting bad write to freed memory at %p\n",
+ &base[offset]);
+ kfree(base);
+ base[offset] = 0x0abcdef0;
+ /* Attempt to notice the overwrite. */
+ again = kmalloc(len, GFP_KERNEL);
+ kfree(again);
+ if (again != base)
+ pr_info("Hmm, didn't get the same memory range.\n");
+}
+
+void lkdtm_READ_AFTER_FREE(void)
+{
+ int *base, *val, saw;
+ size_t len = 1024;
+ /*
+ * The slub allocator uses the first word to store the free
+ * pointer in some configurations. Use the middle of the
+ * allocation to avoid running into the freelist
+ */
+ size_t offset = (len / sizeof(*base)) / 2;
+
+ base = kmalloc(len, GFP_KERNEL);
+ if (!base) {
+ pr_info("Unable to allocate base memory.\n");
+ return;
+ }
+
+ val = kmalloc(len, GFP_KERNEL);
+ if (!val) {
+ pr_info("Unable to allocate val memory.\n");
+ kfree(base);
+ return;
+ }
+
+ *val = 0x12345678;
+ base[offset] = *val;
+ pr_info("Value in memory before free: %x\n", base[offset]);
+
+ kfree(base);
+
+ pr_info("Attempting bad read from freed memory\n");
+ saw = base[offset];
+ if (saw != *val) {
+ /* Good! Poisoning happened, so declare a win. */
+ pr_info("Memory correctly poisoned (%x)\n", saw);
+ BUG();
+ }
+ pr_info("Memory was not poisoned\n");
+
+ kfree(val);
+}
+
+void lkdtm_WRITE_BUDDY_AFTER_FREE(void)
+{
+ unsigned long p = __get_free_page(GFP_KERNEL);
+ if (!p) {
+ pr_info("Unable to allocate free page\n");
+ return;
+ }
+
+ pr_info("Writing to the buddy page before free\n");
+ memset((void *)p, 0x3, PAGE_SIZE);
+ free_page(p);
+ schedule();
+ pr_info("Attempting bad write to the buddy page after free\n");
+ memset((void *)p, 0x78, PAGE_SIZE);
+ /* Attempt to notice the overwrite. */
+ p = __get_free_page(GFP_KERNEL);
+ free_page(p);
+ schedule();
+}
+
+void lkdtm_READ_BUDDY_AFTER_FREE(void)
+{
+ unsigned long p = __get_free_page(GFP_KERNEL);
+ int saw, *val;
+ int *base;
+
+ if (!p) {
+ pr_info("Unable to allocate free page\n");
+ return;
+ }
+
+ val = kmalloc(1024, GFP_KERNEL);
+ if (!val) {
+ pr_info("Unable to allocate val memory.\n");
+ free_page(p);
+ return;
+ }
+
+ base = (int *)p;
+
+ *val = 0x12345678;
+ base[0] = *val;
+ pr_info("Value in memory before free: %x\n", base[0]);
+ free_page(p);
+ pr_info("Attempting to read from freed memory\n");
+ saw = base[0];
+ if (saw != *val) {
+ /* Good! Poisoning happened, so declare a win. */
+ pr_info("Memory correctly poisoned (%x)\n", saw);
+ BUG();
+ }
+ pr_info("Buddy page was not poisoned\n");
+
+ kfree(val);
+}
diff --git a/drivers/misc/lkdtm_perms.c b/drivers/misc/lkdtm_perms.c
new file mode 100644
index 000000000..45f1c0f96
--- /dev/null
+++ b/drivers/misc/lkdtm_perms.c
@@ -0,0 +1,199 @@
+/*
+ * This is for all the tests related to validating kernel memory
+ * permissions: non-executable regions, non-writable regions, and
+ * even non-readable regions.
+ */
+#include "lkdtm.h"
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mman.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+
+/* Whether or not to fill the target memory area with do_nothing(). */
+#define CODE_WRITE true
+#define CODE_AS_IS false
+
+/* How many bytes to copy to be sure we've copied enough of do_nothing(). */
+#define EXEC_SIZE 64
+
+/* This is non-const, so it will end up in the .data section. */
+static u8 data_area[EXEC_SIZE];
+
+/* This is cost, so it will end up in the .rodata section. */
+static const unsigned long rodata = 0xAA55AA55;
+
+/* This is marked __ro_after_init, so it should ultimately be .rodata. */
+static unsigned long ro_after_init __ro_after_init = 0x55AA5500;
+
+/*
+ * This just returns to the caller. It is designed to be copied into
+ * non-executable memory regions.
+ */
+static void do_nothing(void)
+{
+ return;
+}
+
+/* Must immediately follow do_nothing for size calculuations to work out. */
+static void do_overwritten(void)
+{
+ pr_info("do_overwritten wasn't overwritten!\n");
+ return;
+}
+
+static noinline void execute_location(void *dst, bool write)
+{
+ void (*func)(void) = dst;
+
+ pr_info("attempting ok execution at %p\n", do_nothing);
+ do_nothing();
+
+ if (write == CODE_WRITE) {
+ memcpy(dst, do_nothing, EXEC_SIZE);
+ flush_icache_range((unsigned long)dst,
+ (unsigned long)dst + EXEC_SIZE);
+ }
+ pr_info("attempting bad execution at %p\n", func);
+ func();
+}
+
+static void execute_user_location(void *dst)
+{
+ /* Intentionally crossing kernel/user memory boundary. */
+ void (*func)(void) = dst;
+
+ pr_info("attempting ok execution at %p\n", do_nothing);
+ do_nothing();
+
+ if (copy_to_user((void __user *)dst, do_nothing, EXEC_SIZE))
+ return;
+ flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE);
+ pr_info("attempting bad execution at %p\n", func);
+ func();
+}
+
+void lkdtm_WRITE_RO(void)
+{
+ /* Explicitly cast away "const" for the test. */
+ unsigned long *ptr = (unsigned long *)&rodata;
+
+ pr_info("attempting bad rodata write at %p\n", ptr);
+ *ptr ^= 0xabcd1234;
+}
+
+void lkdtm_WRITE_RO_AFTER_INIT(void)
+{
+ unsigned long *ptr = &ro_after_init;
+
+ /*
+ * Verify we were written to during init. Since an Oops
+ * is considered a "success", a failure is to just skip the
+ * real test.
+ */
+ if ((*ptr & 0xAA) != 0xAA) {
+ pr_info("%p was NOT written during init!?\n", ptr);
+ return;
+ }
+
+ pr_info("attempting bad ro_after_init write at %p\n", ptr);
+ *ptr ^= 0xabcd1234;
+}
+
+void lkdtm_WRITE_KERN(void)
+{
+ size_t size;
+ unsigned char *ptr;
+
+ size = (unsigned long)do_overwritten - (unsigned long)do_nothing;
+ ptr = (unsigned char *)do_overwritten;
+
+ pr_info("attempting bad %zu byte write at %p\n", size, ptr);
+ memcpy(ptr, (unsigned char *)do_nothing, size);
+ flush_icache_range((unsigned long)ptr, (unsigned long)(ptr + size));
+
+ do_overwritten();
+}
+
+void lkdtm_EXEC_DATA(void)
+{
+ execute_location(data_area, CODE_WRITE);
+}
+
+void lkdtm_EXEC_STACK(void)
+{
+ u8 stack_area[EXEC_SIZE];
+ execute_location(stack_area, CODE_WRITE);
+}
+
+void lkdtm_EXEC_KMALLOC(void)
+{
+ u32 *kmalloc_area = kmalloc(EXEC_SIZE, GFP_KERNEL);
+ execute_location(kmalloc_area, CODE_WRITE);
+ kfree(kmalloc_area);
+}
+
+void lkdtm_EXEC_VMALLOC(void)
+{
+ u32 *vmalloc_area = vmalloc(EXEC_SIZE);
+ execute_location(vmalloc_area, CODE_WRITE);
+ vfree(vmalloc_area);
+}
+
+void lkdtm_EXEC_RODATA(void)
+{
+ execute_location(lkdtm_rodata_do_nothing, CODE_AS_IS);
+}
+
+void lkdtm_EXEC_USERSPACE(void)
+{
+ unsigned long user_addr;
+
+ user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ if (user_addr >= TASK_SIZE) {
+ pr_warn("Failed to allocate user memory\n");
+ return;
+ }
+ execute_user_location((void *)user_addr);
+ vm_munmap(user_addr, PAGE_SIZE);
+}
+
+void lkdtm_ACCESS_USERSPACE(void)
+{
+ unsigned long user_addr, tmp = 0;
+ unsigned long *ptr;
+
+ user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ if (user_addr >= TASK_SIZE) {
+ pr_warn("Failed to allocate user memory\n");
+ return;
+ }
+
+ if (copy_to_user((void __user *)user_addr, &tmp, sizeof(tmp))) {
+ pr_warn("copy_to_user failed\n");
+ vm_munmap(user_addr, PAGE_SIZE);
+ return;
+ }
+
+ ptr = (unsigned long *)user_addr;
+
+ pr_info("attempting bad read at %p\n", ptr);
+ tmp = *ptr;
+ tmp += 0xc0dec0de;
+
+ pr_info("attempting bad write at %p\n", ptr);
+ *ptr = tmp;
+
+ vm_munmap(user_addr, PAGE_SIZE);
+}
+
+void __init lkdtm_perms_init(void)
+{
+ /* Make sure we can write to __ro_after_init values during __init */
+ ro_after_init |= 0xAA;
+
+}
diff --git a/drivers/misc/lkdtm_rodata.c b/drivers/misc/lkdtm_rodata.c
new file mode 100644
index 000000000..3564477b8
--- /dev/null
+++ b/drivers/misc/lkdtm_rodata.c
@@ -0,0 +1,10 @@
+/*
+ * This includes functions that are meant to live entirely in .rodata
+ * (via objcopy tricks), to validate the non-executability of .rodata.
+ */
+#include "lkdtm.h"
+
+void notrace lkdtm_rodata_do_nothing(void)
+{
+ /* Does nothing. We just want an architecture agnostic "return". */
+}
diff --git a/drivers/misc/lkdtm_usercopy.c b/drivers/misc/lkdtm_usercopy.c
new file mode 100644
index 000000000..1dd611423
--- /dev/null
+++ b/drivers/misc/lkdtm_usercopy.c
@@ -0,0 +1,322 @@
+/*
+ * This is for all the tests related to copy_to_user() and copy_from_user()
+ * hardening.
+ */
+#include "lkdtm.h"
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mman.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+
+/*
+ * Many of the tests here end up using const sizes, but those would
+ * normally be ignored by hardened usercopy, so force the compiler
+ * into choosing the non-const path to make sure we trigger the
+ * hardened usercopy checks by added "unconst" to all the const copies,
+ * and making sure "cache_size" isn't optimized into a const.
+ */
+static volatile size_t unconst = 0;
+static volatile size_t cache_size = 1024;
+static struct kmem_cache *bad_cache;
+
+static const unsigned char test_text[] = "This is a test.\n";
+
+/*
+ * Instead of adding -Wno-return-local-addr, just pass the stack address
+ * through a function to obfuscate it from the compiler.
+ */
+static noinline unsigned char *trick_compiler(unsigned char *stack)
+{
+ return stack + 0;
+}
+
+static noinline unsigned char *do_usercopy_stack_callee(int value)
+{
+ unsigned char buf[32];
+ int i;
+
+ /* Exercise stack to avoid everything living in registers. */
+ for (i = 0; i < sizeof(buf); i++) {
+ buf[i] = value & 0xff;
+ }
+
+ return trick_compiler(buf);
+}
+
+static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
+{
+ unsigned long user_addr;
+ unsigned char good_stack[32];
+ unsigned char *bad_stack;
+ int i;
+
+ /* Exercise stack to avoid everything living in registers. */
+ for (i = 0; i < sizeof(good_stack); i++)
+ good_stack[i] = test_text[i % sizeof(test_text)];
+
+ /* This is a pointer to outside our current stack frame. */
+ if (bad_frame) {
+ bad_stack = do_usercopy_stack_callee((uintptr_t)&bad_stack);
+ } else {
+ /* Put start address just inside stack. */
+ bad_stack = task_stack_page(current) + THREAD_SIZE;
+ bad_stack -= sizeof(unsigned long);
+ }
+
+ user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ if (user_addr >= TASK_SIZE) {
+ pr_warn("Failed to allocate user memory\n");
+ return;
+ }
+
+ if (to_user) {
+ pr_info("attempting good copy_to_user of local stack\n");
+ if (copy_to_user((void __user *)user_addr, good_stack,
+ unconst + sizeof(good_stack))) {
+ pr_warn("copy_to_user failed unexpectedly?!\n");
+ goto free_user;
+ }
+
+ pr_info("attempting bad copy_to_user of distant stack\n");
+ if (copy_to_user((void __user *)user_addr, bad_stack,
+ unconst + sizeof(good_stack))) {
+ pr_warn("copy_to_user failed, but lacked Oops\n");
+ goto free_user;
+ }
+ } else {
+ /*
+ * There isn't a safe way to not be protected by usercopy
+ * if we're going to write to another thread's stack.
+ */
+ if (!bad_frame)
+ goto free_user;
+
+ pr_info("attempting good copy_from_user of local stack\n");
+ if (copy_from_user(good_stack, (void __user *)user_addr,
+ unconst + sizeof(good_stack))) {
+ pr_warn("copy_from_user failed unexpectedly?!\n");
+ goto free_user;
+ }
+
+ pr_info("attempting bad copy_from_user of distant stack\n");
+ if (copy_from_user(bad_stack, (void __user *)user_addr,
+ unconst + sizeof(good_stack))) {
+ pr_warn("copy_from_user failed, but lacked Oops\n");
+ goto free_user;
+ }
+ }
+
+free_user:
+ vm_munmap(user_addr, PAGE_SIZE);
+}
+
+static void do_usercopy_heap_size(bool to_user)
+{
+ unsigned long user_addr;
+ unsigned char *one, *two;
+ size_t size = unconst + 1024;
+
+ one = kmalloc(size, GFP_KERNEL);
+ two = kmalloc(size, GFP_KERNEL);
+ if (!one || !two) {
+ pr_warn("Failed to allocate kernel memory\n");
+ goto free_kernel;
+ }
+
+ user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ if (user_addr >= TASK_SIZE) {
+ pr_warn("Failed to allocate user memory\n");
+ goto free_kernel;
+ }
+
+ memset(one, 'A', size);
+ memset(two, 'B', size);
+
+ if (to_user) {
+ pr_info("attempting good copy_to_user of correct size\n");
+ if (copy_to_user((void __user *)user_addr, one, size)) {
+ pr_warn("copy_to_user failed unexpectedly?!\n");
+ goto free_user;
+ }
+
+ pr_info("attempting bad copy_to_user of too large size\n");
+ if (copy_to_user((void __user *)user_addr, one, 2 * size)) {
+ pr_warn("copy_to_user failed, but lacked Oops\n");
+ goto free_user;
+ }
+ } else {
+ pr_info("attempting good copy_from_user of correct size\n");
+ if (copy_from_user(one, (void __user *)user_addr, size)) {
+ pr_warn("copy_from_user failed unexpectedly?!\n");
+ goto free_user;
+ }
+
+ pr_info("attempting bad copy_from_user of too large size\n");
+ if (copy_from_user(one, (void __user *)user_addr, 2 * size)) {
+ pr_warn("copy_from_user failed, but lacked Oops\n");
+ goto free_user;
+ }
+ }
+
+free_user:
+ vm_munmap(user_addr, PAGE_SIZE);
+free_kernel:
+ kfree(one);
+ kfree(two);
+}
+
+static void do_usercopy_heap_flag(bool to_user)
+{
+ unsigned long user_addr;
+ unsigned char *good_buf = NULL;
+ unsigned char *bad_buf = NULL;
+
+ /* Make sure cache was prepared. */
+ if (!bad_cache) {
+ pr_warn("Failed to allocate kernel cache\n");
+ return;
+ }
+
+ /*
+ * Allocate one buffer from each cache (kmalloc will have the
+ * SLAB_USERCOPY flag already, but "bad_cache" won't).
+ */
+ good_buf = kmalloc(cache_size, GFP_KERNEL);
+ bad_buf = kmem_cache_alloc(bad_cache, GFP_KERNEL);
+ if (!good_buf || !bad_buf) {
+ pr_warn("Failed to allocate buffers from caches\n");
+ goto free_alloc;
+ }
+
+ /* Allocate user memory we'll poke at. */
+ user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ if (user_addr >= TASK_SIZE) {
+ pr_warn("Failed to allocate user memory\n");
+ goto free_alloc;
+ }
+
+ memset(good_buf, 'A', cache_size);
+ memset(bad_buf, 'B', cache_size);
+
+ if (to_user) {
+ pr_info("attempting good copy_to_user with SLAB_USERCOPY\n");
+ if (copy_to_user((void __user *)user_addr, good_buf,
+ cache_size)) {
+ pr_warn("copy_to_user failed unexpectedly?!\n");
+ goto free_user;
+ }
+
+ pr_info("attempting bad copy_to_user w/o SLAB_USERCOPY\n");
+ if (copy_to_user((void __user *)user_addr, bad_buf,
+ cache_size)) {
+ pr_warn("copy_to_user failed, but lacked Oops\n");
+ goto free_user;
+ }
+ } else {
+ pr_info("attempting good copy_from_user with SLAB_USERCOPY\n");
+ if (copy_from_user(good_buf, (void __user *)user_addr,
+ cache_size)) {
+ pr_warn("copy_from_user failed unexpectedly?!\n");
+ goto free_user;
+ }
+
+ pr_info("attempting bad copy_from_user w/o SLAB_USERCOPY\n");
+ if (copy_from_user(bad_buf, (void __user *)user_addr,
+ cache_size)) {
+ pr_warn("copy_from_user failed, but lacked Oops\n");
+ goto free_user;
+ }
+ }
+
+free_user:
+ vm_munmap(user_addr, PAGE_SIZE);
+free_alloc:
+ if (bad_buf)
+ kmem_cache_free(bad_cache, bad_buf);
+ kfree(good_buf);
+}
+
+/* Callable tests. */
+void lkdtm_USERCOPY_HEAP_SIZE_TO(void)
+{
+ do_usercopy_heap_size(true);
+}
+
+void lkdtm_USERCOPY_HEAP_SIZE_FROM(void)
+{
+ do_usercopy_heap_size(false);
+}
+
+void lkdtm_USERCOPY_HEAP_FLAG_TO(void)
+{
+ do_usercopy_heap_flag(true);
+}
+
+void lkdtm_USERCOPY_HEAP_FLAG_FROM(void)
+{
+ do_usercopy_heap_flag(false);
+}
+
+void lkdtm_USERCOPY_STACK_FRAME_TO(void)
+{
+ do_usercopy_stack(true, true);
+}
+
+void lkdtm_USERCOPY_STACK_FRAME_FROM(void)
+{
+ do_usercopy_stack(false, true);
+}
+
+void lkdtm_USERCOPY_STACK_BEYOND(void)
+{
+ do_usercopy_stack(true, false);
+}
+
+void lkdtm_USERCOPY_KERNEL(void)
+{
+ unsigned long user_addr;
+
+ user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ if (user_addr >= TASK_SIZE) {
+ pr_warn("Failed to allocate user memory\n");
+ return;
+ }
+
+ pr_info("attempting good copy_to_user from kernel rodata\n");
+ if (copy_to_user((void __user *)user_addr, test_text,
+ unconst + sizeof(test_text))) {
+ pr_warn("copy_to_user failed unexpectedly?!\n");
+ goto free_user;
+ }
+
+ pr_info("attempting bad copy_to_user from kernel text\n");
+ if (copy_to_user((void __user *)user_addr, vm_mmap,
+ unconst + PAGE_SIZE)) {
+ pr_warn("copy_to_user failed, but lacked Oops\n");
+ goto free_user;
+ }
+
+free_user:
+ vm_munmap(user_addr, PAGE_SIZE);
+}
+
+void __init lkdtm_usercopy_init(void)
+{
+ /* Prepare cache that lacks SLAB_USERCOPY flag. */
+ bad_cache = kmem_cache_create("lkdtm-no-usercopy", cache_size, 0,
+ 0, NULL);
+}
+
+void __exit lkdtm_usercopy_exit(void)
+{
+ kmem_cache_destroy(bad_cache);
+}
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 5aa606c8a..085f3aafe 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -132,6 +132,7 @@ static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length)
hdr->length = length;
hdr->msg_complete = 1;
hdr->reserved = 0;
+ hdr->internal = 0;
}
/**
@@ -165,15 +166,15 @@ void mei_hbm_cl_hdr(struct mei_cl *cl, u8 hbm_cmd, void *buf, size_t len)
* Return: 0 on success, <0 on failure.
*/
static inline
-int mei_hbm_cl_write(struct mei_device *dev,
- struct mei_cl *cl, u8 hbm_cmd, size_t len)
+int mei_hbm_cl_write(struct mei_device *dev, struct mei_cl *cl,
+ u8 hbm_cmd, u8 *buf, size_t len)
{
- struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
+ struct mei_msg_hdr mei_hdr;
- mei_hbm_hdr(mei_hdr, len);
- mei_hbm_cl_hdr(cl, hbm_cmd, dev->wr_msg.data, len);
+ mei_hbm_hdr(&mei_hdr, len);
+ mei_hbm_cl_hdr(cl, hbm_cmd, buf, len);
- return mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+ return mei_write_message(dev, &mei_hdr, buf);
}
/**
@@ -250,24 +251,23 @@ int mei_hbm_start_wait(struct mei_device *dev)
*/
int mei_hbm_start_req(struct mei_device *dev)
{
- struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
- struct hbm_host_version_request *start_req;
+ struct mei_msg_hdr mei_hdr;
+ struct hbm_host_version_request start_req;
const size_t len = sizeof(struct hbm_host_version_request);
int ret;
mei_hbm_reset(dev);
- mei_hbm_hdr(mei_hdr, len);
+ mei_hbm_hdr(&mei_hdr, len);
/* host start message */
- start_req = (struct hbm_host_version_request *)dev->wr_msg.data;
- memset(start_req, 0, len);
- start_req->hbm_cmd = HOST_START_REQ_CMD;
- start_req->host_version.major_version = HBM_MAJOR_VERSION;
- start_req->host_version.minor_version = HBM_MINOR_VERSION;
+ memset(&start_req, 0, len);
+ start_req.hbm_cmd = HOST_START_REQ_CMD;
+ start_req.host_version.major_version = HBM_MAJOR_VERSION;
+ start_req.host_version.minor_version = HBM_MINOR_VERSION;
dev->hbm_state = MEI_HBM_IDLE;
- ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+ ret = mei_write_message(dev, &mei_hdr, &start_req);
if (ret) {
dev_err(dev->dev, "version message write failed: ret = %d\n",
ret);
@@ -288,23 +288,22 @@ int mei_hbm_start_req(struct mei_device *dev)
*/
static int mei_hbm_enum_clients_req(struct mei_device *dev)
{
- struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
- struct hbm_host_enum_request *enum_req;
+ struct mei_msg_hdr mei_hdr;
+ struct hbm_host_enum_request enum_req;
const size_t len = sizeof(struct hbm_host_enum_request);
int ret;
/* enumerate clients */
- mei_hbm_hdr(mei_hdr, len);
+ mei_hbm_hdr(&mei_hdr, len);
- enum_req = (struct hbm_host_enum_request *)dev->wr_msg.data;
- memset(enum_req, 0, len);
- enum_req->hbm_cmd = HOST_ENUM_REQ_CMD;
- enum_req->flags |= dev->hbm_f_dc_supported ?
- MEI_HBM_ENUM_F_ALLOW_ADD : 0;
- enum_req->flags |= dev->hbm_f_ie_supported ?
- MEI_HBM_ENUM_F_IMMEDIATE_ENUM : 0;
+ memset(&enum_req, 0, len);
+ enum_req.hbm_cmd = HOST_ENUM_REQ_CMD;
+ enum_req.flags |= dev->hbm_f_dc_supported ?
+ MEI_HBM_ENUM_F_ALLOW_ADD : 0;
+ enum_req.flags |= dev->hbm_f_ie_supported ?
+ MEI_HBM_ENUM_F_IMMEDIATE_ENUM : 0;
- ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+ ret = mei_write_message(dev, &mei_hdr, &enum_req);
if (ret) {
dev_err(dev->dev, "enumeration request write failed: ret = %d.\n",
ret);
@@ -358,23 +357,21 @@ static int mei_hbm_me_cl_add(struct mei_device *dev,
*/
static int mei_hbm_add_cl_resp(struct mei_device *dev, u8 addr, u8 status)
{
- struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
- struct hbm_add_client_response *resp;
+ struct mei_msg_hdr mei_hdr;
+ struct hbm_add_client_response resp;
const size_t len = sizeof(struct hbm_add_client_response);
int ret;
dev_dbg(dev->dev, "adding client response\n");
- resp = (struct hbm_add_client_response *)dev->wr_msg.data;
+ mei_hbm_hdr(&mei_hdr, len);
- mei_hbm_hdr(mei_hdr, len);
- memset(resp, 0, sizeof(struct hbm_add_client_response));
+ memset(&resp, 0, sizeof(struct hbm_add_client_response));
+ resp.hbm_cmd = MEI_HBM_ADD_CLIENT_RES_CMD;
+ resp.me_addr = addr;
+ resp.status = status;
- resp->hbm_cmd = MEI_HBM_ADD_CLIENT_RES_CMD;
- resp->me_addr = addr;
- resp->status = status;
-
- ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+ ret = mei_write_message(dev, &mei_hdr, &resp);
if (ret)
dev_err(dev->dev, "add client response write failed: ret = %d\n",
ret);
@@ -421,18 +418,17 @@ int mei_hbm_cl_notify_req(struct mei_device *dev,
struct mei_cl *cl, u8 start)
{
- struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
- struct hbm_notification_request *req;
+ struct mei_msg_hdr mei_hdr;
+ struct hbm_notification_request req;
const size_t len = sizeof(struct hbm_notification_request);
int ret;
- mei_hbm_hdr(mei_hdr, len);
- mei_hbm_cl_hdr(cl, MEI_HBM_NOTIFY_REQ_CMD, dev->wr_msg.data, len);
+ mei_hbm_hdr(&mei_hdr, len);
+ mei_hbm_cl_hdr(cl, MEI_HBM_NOTIFY_REQ_CMD, &req, len);
- req = (struct hbm_notification_request *)dev->wr_msg.data;
- req->start = start;
+ req.start = start;
- ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+ ret = mei_write_message(dev, &mei_hdr, &req);
if (ret)
dev_err(dev->dev, "notify request failed: ret = %d\n", ret);
@@ -534,8 +530,8 @@ static void mei_hbm_cl_notify(struct mei_device *dev,
*/
static int mei_hbm_prop_req(struct mei_device *dev, unsigned long start_idx)
{
- struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
- struct hbm_props_request *prop_req;
+ struct mei_msg_hdr mei_hdr;
+ struct hbm_props_request prop_req;
const size_t len = sizeof(struct hbm_props_request);
unsigned long addr;
int ret;
@@ -550,15 +546,14 @@ static int mei_hbm_prop_req(struct mei_device *dev, unsigned long start_idx)
return 0;
}
- mei_hbm_hdr(mei_hdr, len);
- prop_req = (struct hbm_props_request *)dev->wr_msg.data;
+ mei_hbm_hdr(&mei_hdr, len);
- memset(prop_req, 0, sizeof(struct hbm_props_request));
+ memset(&prop_req, 0, sizeof(struct hbm_props_request));
- prop_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
- prop_req->me_addr = addr;
+ prop_req.hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
+ prop_req.me_addr = addr;
- ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+ ret = mei_write_message(dev, &mei_hdr, &prop_req);
if (ret) {
dev_err(dev->dev, "properties request write failed: ret = %d\n",
ret);
@@ -581,21 +576,20 @@ static int mei_hbm_prop_req(struct mei_device *dev, unsigned long start_idx)
*/
int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd)
{
- struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
- struct hbm_power_gate *req;
+ struct mei_msg_hdr mei_hdr;
+ struct hbm_power_gate req;
const size_t len = sizeof(struct hbm_power_gate);
int ret;
if (!dev->hbm_f_pg_supported)
return -EOPNOTSUPP;
- mei_hbm_hdr(mei_hdr, len);
+ mei_hbm_hdr(&mei_hdr, len);
- req = (struct hbm_power_gate *)dev->wr_msg.data;
- memset(req, 0, len);
- req->hbm_cmd = pg_cmd;
+ memset(&req, 0, len);
+ req.hbm_cmd = pg_cmd;
- ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+ ret = mei_write_message(dev, &mei_hdr, &req);
if (ret)
dev_err(dev->dev, "power gate command write failed.\n");
return ret;
@@ -611,18 +605,17 @@ EXPORT_SYMBOL_GPL(mei_hbm_pg);
*/
static int mei_hbm_stop_req(struct mei_device *dev)
{
- struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
- struct hbm_host_stop_request *req =
- (struct hbm_host_stop_request *)dev->wr_msg.data;
+ struct mei_msg_hdr mei_hdr;
+ struct hbm_host_stop_request req;
const size_t len = sizeof(struct hbm_host_stop_request);
- mei_hbm_hdr(mei_hdr, len);
+ mei_hbm_hdr(&mei_hdr, len);
- memset(req, 0, len);
- req->hbm_cmd = HOST_STOP_REQ_CMD;
- req->reason = DRIVER_STOP_REQUEST;
+ memset(&req, 0, len);
+ req.hbm_cmd = HOST_STOP_REQ_CMD;
+ req.reason = DRIVER_STOP_REQUEST;
- return mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+ return mei_write_message(dev, &mei_hdr, &req);
}
/**
@@ -636,9 +629,10 @@ static int mei_hbm_stop_req(struct mei_device *dev)
int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl)
{
const size_t len = sizeof(struct hbm_flow_control);
+ u8 buf[len];
cl_dbg(dev, cl, "sending flow control\n");
- return mei_hbm_cl_write(dev, cl, MEI_FLOW_CONTROL_CMD, len);
+ return mei_hbm_cl_write(dev, cl, MEI_FLOW_CONTROL_CMD, buf, len);
}
/**
@@ -714,8 +708,9 @@ static void mei_hbm_cl_flow_control_res(struct mei_device *dev,
int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl)
{
const size_t len = sizeof(struct hbm_client_connect_request);
+ u8 buf[len];
- return mei_hbm_cl_write(dev, cl, CLIENT_DISCONNECT_REQ_CMD, len);
+ return mei_hbm_cl_write(dev, cl, CLIENT_DISCONNECT_REQ_CMD, buf, len);
}
/**
@@ -729,8 +724,9 @@ int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl)
int mei_hbm_cl_disconnect_rsp(struct mei_device *dev, struct mei_cl *cl)
{
const size_t len = sizeof(struct hbm_client_connect_response);
+ u8 buf[len];
- return mei_hbm_cl_write(dev, cl, CLIENT_DISCONNECT_RES_CMD, len);
+ return mei_hbm_cl_write(dev, cl, CLIENT_DISCONNECT_RES_CMD, buf, len);
}
/**
@@ -765,8 +761,9 @@ static void mei_hbm_cl_disconnect_res(struct mei_device *dev, struct mei_cl *cl,
int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl)
{
const size_t len = sizeof(struct hbm_client_connect_request);
+ u8 buf[len];
- return mei_hbm_cl_write(dev, cl, CLIENT_CONNECT_REQ_CMD, len);
+ return mei_hbm_cl_write(dev, cl, CLIENT_CONNECT_REQ_CMD, buf, len);
}
/**
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index c9e01021e..e5e32503d 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -382,7 +382,6 @@ const char *mei_pg_state_str(enum mei_pg_state state);
*
* @hbuf_depth : depth of hardware host/write buffer is slots
* @hbuf_is_ready : query if the host host/write buffer is ready
- * @wr_msg : the buffer for hbm control messages
*
* @version : HBM protocol version in use
* @hbm_f_pg_supported : hbm feature pgi protocol
@@ -467,12 +466,6 @@ struct mei_device {
u8 hbuf_depth;
bool hbuf_is_ready;
- /* used for control messages */
- struct {
- struct mei_msg_hdr hdr;
- unsigned char data[128];
- } wr_msg;
-
struct hbm_version version;
unsigned int hbm_f_pg_supported:1;
unsigned int hbm_f_dc_supported:1;
@@ -670,8 +663,7 @@ static inline size_t mei_hbuf_max_len(const struct mei_device *dev)
}
static inline int mei_write_message(struct mei_device *dev,
- struct mei_msg_hdr *hdr,
- unsigned char *buf)
+ struct mei_msg_hdr *hdr, void *buf)
{
return dev->ops->write(dev, hdr, buf);
}
diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig
index 89e5917e1..6fd9d367d 100644
--- a/drivers/misc/mic/Kconfig
+++ b/drivers/misc/mic/Kconfig
@@ -146,3 +146,7 @@ config VOP
More information about the Intel MIC family as well as the Linux
OS and tools for MIC to use with this driver are available from
<http://software.intel.com/en-us/mic-developer>.
+
+if VOP
+source "drivers/vhost/Kconfig.vringh"
+endif
diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c
index e047efd83..9599d732a 100644
--- a/drivers/misc/mic/host/mic_boot.c
+++ b/drivers/misc/mic/host/mic_boot.c
@@ -38,7 +38,7 @@ static inline struct mic_device *vpdev_to_mdev(struct device *dev)
static dma_addr_t
_mic_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
void *va = phys_to_virt(page_to_phys(page)) + offset;
struct mic_device *mdev = vpdev_to_mdev(dev);
@@ -48,7 +48,7 @@ _mic_dma_map_page(struct device *dev, struct page *page,
static void _mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct mic_device *mdev = vpdev_to_mdev(dev);
@@ -144,7 +144,7 @@ static inline struct mic_device *scdev_to_mdev(struct scif_hw_dev *scdev)
static void *__mic_dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scif_hw_dev *scdev = dev_get_drvdata(dev);
struct mic_device *mdev = scdev_to_mdev(scdev);
@@ -164,7 +164,7 @@ static void *__mic_dma_alloc(struct device *dev, size_t size,
}
static void __mic_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
struct scif_hw_dev *scdev = dev_get_drvdata(dev);
struct mic_device *mdev = scdev_to_mdev(scdev);
@@ -176,7 +176,7 @@ static void __mic_dma_free(struct device *dev, size_t size, void *vaddr,
static dma_addr_t
__mic_dma_map_page(struct device *dev, struct page *page, unsigned long offset,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *va = phys_to_virt(page_to_phys(page)) + offset;
struct scif_hw_dev *scdev = dev_get_drvdata(dev);
@@ -188,7 +188,7 @@ __mic_dma_map_page(struct device *dev, struct page *page, unsigned long offset,
static void
__mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scif_hw_dev *scdev = dev_get_drvdata(dev);
struct mic_device *mdev = scdev_to_mdev(scdev);
@@ -198,7 +198,7 @@ __mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
static int __mic_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scif_hw_dev *scdev = dev_get_drvdata(dev);
struct mic_device *mdev = scdev_to_mdev(scdev);
@@ -229,7 +229,7 @@ err:
static void __mic_dma_unmap_sg(struct device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scif_hw_dev *scdev = dev_get_drvdata(dev);
struct mic_device *mdev = scdev_to_mdev(scdev);
@@ -327,7 +327,7 @@ static inline struct mic_device *mbdev_to_mdev(struct mbus_device *mbdev)
static dma_addr_t
mic_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *va = phys_to_virt(page_to_phys(page)) + offset;
struct mic_device *mdev = dev_get_drvdata(dev->parent);
@@ -338,7 +338,7 @@ mic_dma_map_page(struct device *dev, struct page *page,
static void
mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct mic_device *mdev = dev_get_drvdata(dev->parent);
mic_unmap_single(mdev, dma_addr, size);
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index dcdbd5867..00051590e 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -141,7 +141,7 @@ static void st_send_frame(unsigned char chnl_id, struct st_data_s *st_gdata)
* This function is being called with spin lock held, protocol drivers are
* only expected to complete their waits and do nothing more than that.
*/
-static void st_reg_complete(struct st_data_s *st_gdata, char err)
+static void st_reg_complete(struct st_data_s *st_gdata, int err)
{
unsigned char i = 0;
pr_info(" %s ", __func__);
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index c5472e3c9..2206d4477 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -93,6 +93,7 @@ static DEFINE_SPINLOCK(mmc_blk_lock);
*/
struct mmc_blk_data {
spinlock_t lock;
+ struct device *parent;
struct gendisk *disk;
struct mmc_queue queue;
struct list_head part;
@@ -1724,8 +1725,9 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
!IS_ALIGNED(blk_rq_sectors(next), 8))
break;
- if (next->cmd_flags & REQ_DISCARD ||
- next->cmd_flags & REQ_FLUSH)
+ if (req_op(next) == REQ_OP_DISCARD ||
+ req_op(next) == REQ_OP_SECURE_ERASE ||
+ req_op(next) == REQ_OP_FLUSH)
break;
if (rq_data_dir(cur) != rq_data_dir(next))
@@ -1800,8 +1802,7 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
do_data_tag = (card->ext_csd.data_tag_unit_size) &&
(prq->cmd_flags & REQ_META) &&
(rq_data_dir(prq) == WRITE) &&
- ((brq->data.blocks * brq->data.blksz) >=
- card->ext_csd.data_tag_unit_size);
+ blk_rq_bytes(prq) >= card->ext_csd.data_tag_unit_size;
/* Argument of CMD23 */
packed_cmd_hdr[(i * 2)] = cpu_to_le32(
(do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
@@ -1976,8 +1977,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
* When 4KB native sector is enabled, only 8 blocks
* multiple read or write is allowed
*/
- if ((brq->data.blocks & 0x07) &&
- (card->ext_csd.data_sector_size == 4096)) {
+ if (mmc_large_sector(card) &&
+ !IS_ALIGNED(blk_rq_sectors(rqc), 8)) {
pr_err("%s: Transfer size is not 4KB sector size aligned\n",
req->rq_disk->disk_name);
mq_rq = mq->mqrq_cur;
@@ -2150,7 +2151,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
struct mmc_card *card = md->queue.card;
struct mmc_host *host = card->host;
unsigned long flags;
- unsigned int cmd_flags = req ? req->cmd_flags : 0;
+ bool req_is_special = mmc_req_is_special(req);
if (req && !mq->mqrq_prev->req)
/* claim host only for the first request */
@@ -2166,15 +2167,17 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
}
mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
- if (cmd_flags & REQ_DISCARD) {
+ if (req && req_op(req) == REQ_OP_DISCARD) {
/* complete ongoing async transfer before issuing discard */
if (card->host->areq)
mmc_blk_issue_rw_rq(mq, NULL);
- if (req->cmd_flags & REQ_SECURE)
- ret = mmc_blk_issue_secdiscard_rq(mq, req);
- else
- ret = mmc_blk_issue_discard_rq(mq, req);
- } else if (cmd_flags & REQ_FLUSH) {
+ ret = mmc_blk_issue_discard_rq(mq, req);
+ } else if (req && req_op(req) == REQ_OP_SECURE_ERASE) {
+ /* complete ongoing async transfer before issuing secure erase*/
+ if (card->host->areq)
+ mmc_blk_issue_rw_rq(mq, NULL);
+ ret = mmc_blk_issue_secdiscard_rq(mq, req);
+ } else if (req && req_op(req) == REQ_OP_FLUSH) {
/* complete ongoing async transfer before issuing flush */
if (card->host->areq)
mmc_blk_issue_rw_rq(mq, NULL);
@@ -2189,8 +2192,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
}
out:
- if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||
- (cmd_flags & MMC_REQ_SPECIAL_MASK))
+ if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) || req_is_special)
/*
* Release host when there are no more requests
* and after special request(discard, flush) is done.
@@ -2271,7 +2273,7 @@ again:
md->disk->fops = &mmc_bdops;
md->disk->private_data = md;
md->disk->queue = md->queue.queue;
- md->disk->driverfs_dev = parent;
+ md->parent = parent;
set_disk_ro(md->disk, md->read_only || default_ro);
md->disk->flags = GENHD_FL_EXT_DEVT;
if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT))
@@ -2459,7 +2461,7 @@ static int mmc_add_disk(struct mmc_blk_data *md)
int ret;
struct mmc_card *card = md->queue.card;
- add_disk(md->disk);
+ device_add_disk(md->parent, md->disk);
md->force_ro.show = force_ro_show;
md->force_ro.store = force_ro_store;
sysfs_attr_init(&md->force_ro.attr);
@@ -2499,12 +2501,6 @@ force_ro_fail:
return ret;
}
-#define CID_MANFID_SANDISK 0x2
-#define CID_MANFID_TOSHIBA 0x11
-#define CID_MANFID_MICRON 0x13
-#define CID_MANFID_SAMSUNG 0x15
-#define CID_MANFID_KINGSTON 0x70
-
static const struct mmc_fixup blk_fixups[] =
{
MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 6f4323c6d..708057261 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -33,7 +33,8 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
/*
* We only like normal block requests and discards.
*/
- if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) {
+ if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD &&
+ req_op(req) != REQ_OP_SECURE_ERASE) {
blk_dump_rq_flags(req, "MMC bad request");
return BLKPREP_KILL;
}
@@ -56,7 +57,6 @@ static int mmc_queue_thread(void *d)
down(&mq->thread_sem);
do {
struct request *req = NULL;
- unsigned int cmd_flags = 0;
spin_lock_irq(q->queue_lock);
set_current_state(TASK_INTERRUPTIBLE);
@@ -65,8 +65,9 @@ static int mmc_queue_thread(void *d)
spin_unlock_irq(q->queue_lock);
if (req || mq->mqrq_prev->req) {
+ bool req_is_special = mmc_req_is_special(req);
+
set_current_state(TASK_RUNNING);
- cmd_flags = req ? req->cmd_flags : 0;
mq->issue_fn(mq, req);
cond_resched();
if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
@@ -81,7 +82,7 @@ static int mmc_queue_thread(void *d)
* has been finished. Do not assign it to previous
* request.
*/
- if (cmd_flags & MMC_REQ_SPECIAL_MASK)
+ if (req_is_special)
mq->mqrq_cur->req = NULL;
mq->mqrq_prev->brq.mrq.data = NULL;
@@ -173,7 +174,7 @@ static void mmc_queue_setup_discard(struct request_queue *q,
if (card->pref_erase > max_discard)
q->limits.discard_granularity = 0;
if (mmc_can_secure_erase_trim(card))
- queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
+ queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
}
/**
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index 36cddab57..fee5e1271 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -1,7 +1,13 @@
#ifndef MMC_QUEUE_H
#define MMC_QUEUE_H
-#define MMC_REQ_SPECIAL_MASK (REQ_DISCARD | REQ_FLUSH)
+static inline bool mmc_req_is_special(struct request *req)
+{
+ return req &&
+ (req_op(req) == REQ_OP_FLUSH ||
+ req_op(req) == REQ_OP_DISCARD ||
+ req_op(req) == REQ_OP_SECURE_ERASE);
+}
struct request;
struct task_struct;
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 4bc48f104..c64266f5a 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -332,12 +332,13 @@ int mmc_add_card(struct mmc_card *card)
mmc_card_ddr52(card) ? "DDR " : "",
type);
} else {
- pr_info("%s: new %s%s%s%s%s card at address %04x\n",
+ pr_info("%s: new %s%s%s%s%s%s card at address %04x\n",
mmc_hostname(card->host),
mmc_card_uhs(card) ? "ultra high speed " :
(mmc_card_hs(card) ? "high speed " : ""),
mmc_card_hs400(card) ? "HS400 " :
(mmc_card_hs200(card) ? "HS200 " : ""),
+ mmc_card_hs400es(card) ? "Enhanced strobe " : "",
mmc_card_ddr52(card) ? "DDR " : "",
uhs_bus_speed_mode, type, card->rca);
}
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 8b4dfd454..e55cde6d4 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1127,6 +1127,15 @@ void mmc_set_initial_state(struct mmc_host *host)
host->ios.bus_width = MMC_BUS_WIDTH_1;
host->ios.timing = MMC_TIMING_LEGACY;
host->ios.drv_type = 0;
+ host->ios.enhanced_strobe = false;
+
+ /*
+ * Make sure we are in non-enhanced strobe mode before we
+ * actually enable it in ext_csd.
+ */
+ if ((host->caps2 & MMC_CAP2_HS400_ES) &&
+ host->ops->hs400_enhanced_strobe)
+ host->ops->hs400_enhanced_strobe(host, &host->ios);
mmc_set_ios(host);
}
@@ -1925,17 +1934,15 @@ void mmc_init_erase(struct mmc_card *card)
* to that size and alignment.
*
* For SD cards that define Allocation Unit size, limit erases to one
- * Allocation Unit at a time. For MMC cards that define High Capacity
- * Erase Size, whether it is switched on or not, limit to that size.
- * Otherwise just have a stab at a good value. For modern cards it
- * will end up being 4MiB. Note that if the value is too small, it
- * can end up taking longer to erase.
+ * Allocation Unit at a time.
+ * For MMC, have a stab at ai good value and for modern cards it will
+ * end up being 4MiB. Note that if the value is too small, it can end
+ * up taking longer to erase. Also note, erase_size is already set to
+ * High Capacity Erase Size if available when this function is called.
*/
if (mmc_card_sd(card) && card->ssr.au) {
card->pref_erase = card->ssr.au;
card->erase_shift = ffs(card->ssr.au) - 1;
- } else if (card->ext_csd.hc_erase_size) {
- card->pref_erase = card->ext_csd.hc_erase_size;
} else if (card->erase_size) {
sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
if (sz < 128)
@@ -2060,7 +2067,8 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
unsigned int to, unsigned int arg)
{
struct mmc_command cmd = {0};
- unsigned int qty = 0;
+ unsigned int qty = 0, busy_timeout = 0;
+ bool use_r1b_resp = false;
unsigned long timeout;
int err;
@@ -2128,8 +2136,22 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
memset(&cmd, 0, sizeof(struct mmc_command));
cmd.opcode = MMC_ERASE;
cmd.arg = arg;
- cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
- cmd.busy_timeout = mmc_erase_timeout(card, arg, qty);
+ busy_timeout = mmc_erase_timeout(card, arg, qty);
+ /*
+ * If the host controller supports busy signalling and the timeout for
+ * the erase operation does not exceed the max_busy_timeout, we should
+ * use R1B response. Or we need to prevent the host from doing hw busy
+ * detection, which is done by converting to a R1 response instead.
+ */
+ if (card->host->max_busy_timeout &&
+ busy_timeout > card->host->max_busy_timeout) {
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+ } else {
+ cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+ cmd.busy_timeout = busy_timeout;
+ use_r1b_resp = true;
+ }
+
err = mmc_wait_for_cmd(card->host, &cmd, 0);
if (err) {
pr_err("mmc_erase: erase error %d, status %#x\n",
@@ -2141,7 +2163,14 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
if (mmc_host_is_spi(card->host))
goto out;
- timeout = jiffies + msecs_to_jiffies(MMC_CORE_TIMEOUT_MS);
+ /*
+ * In case of when R1B + MMC_CAP_WAIT_WHILE_BUSY is used, the polling
+ * shall be avoided.
+ */
+ if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
+ goto out;
+
+ timeout = jiffies + msecs_to_jiffies(busy_timeout);
do {
memset(&cmd, 0, sizeof(struct mmc_command));
cmd.opcode = MMC_SEND_STATUS;
@@ -2321,23 +2350,41 @@ static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
unsigned int arg)
{
struct mmc_host *host = card->host;
- unsigned int max_discard, x, y, qty = 0, max_qty, timeout;
+ unsigned int max_discard, x, y, qty = 0, max_qty, min_qty, timeout;
unsigned int last_timeout = 0;
- if (card->erase_shift)
+ if (card->erase_shift) {
max_qty = UINT_MAX >> card->erase_shift;
- else if (mmc_card_sd(card))
+ min_qty = card->pref_erase >> card->erase_shift;
+ } else if (mmc_card_sd(card)) {
max_qty = UINT_MAX;
- else
+ min_qty = card->pref_erase;
+ } else {
max_qty = UINT_MAX / card->erase_size;
+ min_qty = card->pref_erase / card->erase_size;
+ }
- /* Find the largest qty with an OK timeout */
+ /*
+ * We should not only use 'host->max_busy_timeout' as the limitation
+ * when deciding the max discard sectors. We should set a balance value
+ * to improve the erase speed, and it can not get too long timeout at
+ * the same time.
+ *
+ * Here we set 'card->pref_erase' as the minimal discard sectors no
+ * matter what size of 'host->max_busy_timeout', but if the
+ * 'host->max_busy_timeout' is large enough for more discard sectors,
+ * then we can continue to increase the max discard sectors until we
+ * get a balance value.
+ */
do {
y = 0;
for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
timeout = mmc_erase_timeout(card, arg, qty + x);
- if (timeout > host->max_busy_timeout)
+
+ if (qty + x > min_qty &&
+ timeout > host->max_busy_timeout)
break;
+
if (timeout < last_timeout)
break;
last_timeout = timeout;
@@ -2491,17 +2538,21 @@ static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
mmc_go_idle(host);
- mmc_send_if_cond(host, host->ocr_avail);
+ if (!(host->caps2 & MMC_CAP2_NO_SD))
+ mmc_send_if_cond(host, host->ocr_avail);
/* Order's important: probe SDIO, then SD, then MMC */
if (!(host->caps2 & MMC_CAP2_NO_SDIO))
if (!mmc_attach_sdio(host))
return 0;
- if (!mmc_attach_sd(host))
- return 0;
- if (!mmc_attach_mmc(host))
- return 0;
+ if (!(host->caps2 & MMC_CAP2_NO_SD))
+ if (!mmc_attach_sd(host))
+ return 0;
+
+ if (!(host->caps2 & MMC_CAP2_NO_MMC))
+ if (!mmc_attach_mmc(host))
+ return 0;
mmc_power_off(host);
return -EIO;
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 9382a57a5..c8451ce55 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -148,7 +148,8 @@ static int mmc_ios_show(struct seq_file *s, void *data)
str = "mmc HS200";
break;
case MMC_TIMING_MMC_HS400:
- str = "mmc HS400";
+ str = mmc_card_hs400es(host->card) ?
+ "mmc HS400 enhanced strobe" : "mmc HS400";
break;
default:
str = "invalid";
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 1be42fab1..98f25ffb4 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -313,6 +313,14 @@ int mmc_of_parse(struct mmc_host *host)
host->caps2 |= MMC_CAP2_HS400_1_8V | MMC_CAP2_HS200_1_8V_SDR;
if (of_property_read_bool(np, "mmc-hs400-1_2v"))
host->caps2 |= MMC_CAP2_HS400_1_2V | MMC_CAP2_HS200_1_2V_SDR;
+ if (of_property_read_bool(np, "mmc-hs400-enhanced-strobe"))
+ host->caps2 |= MMC_CAP2_HS400_ES;
+ if (of_property_read_bool(np, "no-sdio"))
+ host->caps2 |= MMC_CAP2_NO_SDIO;
+ if (of_property_read_bool(np, "no-sd"))
+ host->caps2 |= MMC_CAP2_NO_SD;
+ if (of_property_read_bool(np, "no-mmc"))
+ host->caps2 |= MMC_CAP2_NO_MMC;
host->dsr_req = !of_property_read_u32(np, "dsr", &host->dsr);
if (host->dsr_req && (host->dsr & ~0xffff)) {
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 5d438ad3e..f2d185cf8 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -45,6 +45,17 @@ static const unsigned int tacc_mant[] = {
35, 40, 45, 50, 55, 60, 70, 80,
};
+static const struct mmc_fixup mmc_ext_csd_fixups[] = {
+ /*
+ * Certain Hynix eMMC 4.41 cards might get broken when HPI feature
+ * is used so disable the HPI feature for such buggy cards.
+ */
+ MMC_FIXUP_EXT_CSD_REV(CID_NAME_ANY, CID_MANFID_HYNIX,
+ 0x014a, add_quirk, MMC_QUIRK_BROKEN_HPI, 5),
+
+ END_FIXUP
+};
+
#define UNSTUFF_BITS(resp,start,size) \
({ \
const int __size = size; \
@@ -235,6 +246,11 @@ static void mmc_select_card_type(struct mmc_card *card)
avail_type |= EXT_CSD_CARD_TYPE_HS400_1_2V;
}
+ if ((caps2 & MMC_CAP2_HS400_ES) &&
+ card->ext_csd.strobe_support &&
+ (avail_type & EXT_CSD_CARD_TYPE_HS400))
+ avail_type |= EXT_CSD_CARD_TYPE_HS400ES;
+
card->ext_csd.hs_max_dtr = hs_max_dtr;
card->ext_csd.hs200_max_dtr = hs200_max_dtr;
card->mmc_avail_type = avail_type;
@@ -370,6 +386,9 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
*/
card->ext_csd.rev = ext_csd[EXT_CSD_REV];
+ /* fixup device after ext_csd revision field is updated */
+ mmc_fixup_device(card, mmc_ext_csd_fixups);
+
card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0];
card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1];
card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2];
@@ -386,6 +405,7 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
mmc_card_set_blockaddr(card);
}
+ card->ext_csd.strobe_support = ext_csd[EXT_CSD_STROBE_SUPPORT];
card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
mmc_select_card_type(card);
@@ -500,7 +520,8 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
card->cid.year += 16;
/* check whether the eMMC card supports BKOPS */
- if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
+ if (!mmc_card_broken_hpi(card) &&
+ ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
card->ext_csd.bkops = 1;
card->ext_csd.man_bkops_en =
(ext_csd[EXT_CSD_BKOPS_EN] &
@@ -513,7 +534,8 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
}
/* check whether the eMMC card supports HPI */
- if (!broken_hpi && (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1)) {
+ if (!mmc_card_broken_hpi(card) &&
+ !broken_hpi && (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1)) {
card->ext_csd.hpi = 1;
if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2)
card->ext_csd.hpi_cmd = MMC_STOP_TRANSMISSION;
@@ -727,6 +749,7 @@ MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult);
MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors);
+MMC_DEV_ATTR(ocr, "%08x\n", card->ocr);
static ssize_t mmc_fwrev_show(struct device *dev,
struct device_attribute *attr,
@@ -744,6 +767,22 @@ static ssize_t mmc_fwrev_show(struct device *dev,
static DEVICE_ATTR(fwrev, S_IRUGO, mmc_fwrev_show, NULL);
+static ssize_t mmc_dsr_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mmc_card *card = mmc_dev_to_card(dev);
+ struct mmc_host *host = card->host;
+
+ if (card->csd.dsr_imp && host->dsr_req)
+ return sprintf(buf, "0x%x\n", host->dsr);
+ else
+ /* return default DSR value */
+ return sprintf(buf, "0x%x\n", 0x404);
+}
+
+static DEVICE_ATTR(dsr, S_IRUGO, mmc_dsr_show, NULL);
+
static struct attribute *mmc_std_attrs[] = {
&dev_attr_cid.attr,
&dev_attr_csd.attr,
@@ -762,6 +801,8 @@ static struct attribute *mmc_std_attrs[] = {
&dev_attr_enhanced_area_size.attr,
&dev_attr_raw_rpmb_size_mult.attr,
&dev_attr_rel_sectors.attr,
+ &dev_attr_ocr.attr,
+ &dev_attr_dsr.attr,
NULL,
};
ATTRIBUTE_GROUPS(mmc_std);
@@ -959,6 +1000,19 @@ static int mmc_select_bus_width(struct mmc_card *card)
return err;
}
+/* Caller must hold re-tuning */
+static int mmc_switch_status(struct mmc_card *card)
+{
+ u32 status;
+ int err;
+
+ err = mmc_send_status(card, &status);
+ if (err)
+ return err;
+
+ return mmc_switch_status_error(card->host, status);
+}
+
/*
* Switch to the high-speed mode
*/
@@ -969,9 +1023,11 @@ static int mmc_select_hs(struct mmc_card *card)
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
card->ext_csd.generic_cmd6_time,
- true, true, true);
- if (!err)
+ true, false, true);
+ if (!err) {
mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
+ err = mmc_switch_status(card);
+ }
return err;
}
@@ -1047,23 +1103,9 @@ static int mmc_select_hs_ddr(struct mmc_card *card)
return err;
}
-/* Caller must hold re-tuning */
-static int mmc_switch_status(struct mmc_card *card)
-{
- u32 status;
- int err;
-
- err = mmc_send_status(card, &status);
- if (err)
- return err;
-
- return mmc_switch_status_error(card->host, status);
-}
-
static int mmc_select_hs400(struct mmc_card *card)
{
struct mmc_host *host = card->host;
- bool send_status = true;
unsigned int max_dtr;
int err = 0;
u8 val;
@@ -1075,19 +1117,12 @@ static int mmc_select_hs400(struct mmc_card *card)
host->ios.bus_width == MMC_BUS_WIDTH_8))
return 0;
- if (host->caps & MMC_CAP_WAIT_WHILE_BUSY)
- send_status = false;
-
- /* Reduce frequency to HS frequency */
- max_dtr = card->ext_csd.hs_max_dtr;
- mmc_set_clock(host, max_dtr);
-
/* Switch card to HS mode */
val = EXT_CSD_TIMING_HS;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HS_TIMING, val,
card->ext_csd.generic_cmd6_time,
- true, send_status, true);
+ true, false, true);
if (err) {
pr_err("%s: switch to high-speed from hs200 failed, err:%d\n",
mmc_hostname(host), err);
@@ -1097,11 +1132,13 @@ static int mmc_select_hs400(struct mmc_card *card)
/* Set host controller to HS timing */
mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
- if (!send_status) {
- err = mmc_switch_status(card);
- if (err)
- goto out_err;
- }
+ /* Reduce frequency to HS frequency */
+ max_dtr = card->ext_csd.hs_max_dtr;
+ mmc_set_clock(host, max_dtr);
+
+ err = mmc_switch_status(card);
+ if (err)
+ goto out_err;
/* Switch card to DDR */
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
@@ -1120,7 +1157,7 @@ static int mmc_select_hs400(struct mmc_card *card)
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HS_TIMING, val,
card->ext_csd.generic_cmd6_time,
- true, send_status, true);
+ true, false, true);
if (err) {
pr_err("%s: switch to hs400 failed, err:%d\n",
mmc_hostname(host), err);
@@ -1131,11 +1168,9 @@ static int mmc_select_hs400(struct mmc_card *card)
mmc_set_timing(host, MMC_TIMING_MMC_HS400);
mmc_set_bus_speed(card);
- if (!send_status) {
- err = mmc_switch_status(card);
- if (err)
- goto out_err;
- }
+ err = mmc_switch_status(card);
+ if (err)
+ goto out_err;
return 0;
@@ -1153,14 +1188,10 @@ int mmc_hs200_to_hs400(struct mmc_card *card)
int mmc_hs400_to_hs200(struct mmc_card *card)
{
struct mmc_host *host = card->host;
- bool send_status = true;
unsigned int max_dtr;
int err;
u8 val;
- if (host->caps & MMC_CAP_WAIT_WHILE_BUSY)
- send_status = false;
-
/* Reduce frequency to HS */
max_dtr = card->ext_csd.hs_max_dtr;
mmc_set_clock(host, max_dtr);
@@ -1169,49 +1200,43 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
val = EXT_CSD_TIMING_HS;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
val, card->ext_csd.generic_cmd6_time,
- true, send_status, true);
+ true, false, true);
if (err)
goto out_err;
mmc_set_timing(host, MMC_TIMING_MMC_DDR52);
- if (!send_status) {
- err = mmc_switch_status(card);
- if (err)
- goto out_err;
- }
+ err = mmc_switch_status(card);
+ if (err)
+ goto out_err;
/* Switch HS DDR to HS */
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
EXT_CSD_BUS_WIDTH_8, card->ext_csd.generic_cmd6_time,
- true, send_status, true);
+ true, false, true);
if (err)
goto out_err;
mmc_set_timing(host, MMC_TIMING_MMC_HS);
- if (!send_status) {
- err = mmc_switch_status(card);
- if (err)
- goto out_err;
- }
+ err = mmc_switch_status(card);
+ if (err)
+ goto out_err;
/* Switch HS to HS200 */
val = EXT_CSD_TIMING_HS200 |
card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
- val, card->ext_csd.generic_cmd6_time, true,
- send_status, true);
+ val, card->ext_csd.generic_cmd6_time,
+ true, false, true);
if (err)
goto out_err;
mmc_set_timing(host, MMC_TIMING_MMC_HS200);
- if (!send_status) {
- err = mmc_switch_status(card);
- if (err)
- goto out_err;
- }
+ err = mmc_switch_status(card);
+ if (err)
+ goto out_err;
mmc_set_bus_speed(card);
@@ -1223,6 +1248,78 @@ out_err:
return err;
}
+static int mmc_select_hs400es(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ int err = 0;
+ u8 val;
+
+ if (!(host->caps & MMC_CAP_8_BIT_DATA)) {
+ err = -ENOTSUPP;
+ goto out_err;
+ }
+
+ err = mmc_select_bus_width(card);
+ if (err < 0)
+ goto out_err;
+
+ /* Switch card to HS mode */
+ err = mmc_select_hs(card);
+ if (err) {
+ pr_err("%s: switch to high-speed failed, err:%d\n",
+ mmc_hostname(host), err);
+ goto out_err;
+ }
+
+ err = mmc_switch_status(card);
+ if (err)
+ goto out_err;
+
+ /* Switch card to DDR with strobe bit */
+ val = EXT_CSD_DDR_BUS_WIDTH_8 | EXT_CSD_BUS_WIDTH_STROBE;
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_BUS_WIDTH,
+ val,
+ card->ext_csd.generic_cmd6_time);
+ if (err) {
+ pr_err("%s: switch to bus width for hs400es failed, err:%d\n",
+ mmc_hostname(host), err);
+ goto out_err;
+ }
+
+ /* Switch card to HS400 */
+ val = EXT_CSD_TIMING_HS400 |
+ card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_HS_TIMING, val,
+ card->ext_csd.generic_cmd6_time,
+ true, false, true);
+ if (err) {
+ pr_err("%s: switch to hs400es failed, err:%d\n",
+ mmc_hostname(host), err);
+ goto out_err;
+ }
+
+ /* Set host controller to HS400 timing and frequency */
+ mmc_set_timing(host, MMC_TIMING_MMC_HS400);
+
+ /* Controller enable enhanced strobe function */
+ host->ios.enhanced_strobe = true;
+ if (host->ops->hs400_enhanced_strobe)
+ host->ops->hs400_enhanced_strobe(host, &host->ios);
+
+ err = mmc_switch_status(card);
+ if (err)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
+ __func__, err);
+ return err;
+}
+
static void mmc_select_driver_type(struct mmc_card *card)
{
int card_drv_type, drive_strength, drv_type;
@@ -1250,7 +1347,6 @@ static void mmc_select_driver_type(struct mmc_card *card)
static int mmc_select_hs200(struct mmc_card *card)
{
struct mmc_host *host = card->host;
- bool send_status = true;
unsigned int old_timing, old_signal_voltage;
int err = -EINVAL;
u8 val;
@@ -1268,34 +1364,30 @@ static int mmc_select_hs200(struct mmc_card *card)
mmc_select_driver_type(card);
- if (host->caps & MMC_CAP_WAIT_WHILE_BUSY)
- send_status = false;
-
/*
* Set the bus width(4 or 8) with host's support and
* switch to HS200 mode if bus width is set successfully.
*/
err = mmc_select_bus_width(card);
- if (err >= 0) {
+ if (err > 0) {
val = EXT_CSD_TIMING_HS200 |
card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HS_TIMING, val,
card->ext_csd.generic_cmd6_time,
- true, send_status, true);
+ true, false, true);
if (err)
goto err;
old_timing = host->ios.timing;
mmc_set_timing(host, MMC_TIMING_MMC_HS200);
- if (!send_status) {
- err = mmc_switch_status(card);
- /*
- * mmc_select_timing() assumes timing has not changed if
- * it is a switch error.
- */
- if (err == -EBADMSG)
- mmc_set_timing(host, old_timing);
- }
+
+ err = mmc_switch_status(card);
+ /*
+ * mmc_select_timing() assumes timing has not changed if
+ * it is a switch error.
+ */
+ if (err == -EBADMSG)
+ mmc_set_timing(host, old_timing);
}
err:
if (err) {
@@ -1310,7 +1402,7 @@ err:
}
/*
- * Activate High Speed or HS200 mode if supported.
+ * Activate High Speed, HS200 or HS400ES mode if supported.
*/
static int mmc_select_timing(struct mmc_card *card)
{
@@ -1319,7 +1411,9 @@ static int mmc_select_timing(struct mmc_card *card)
if (!mmc_can_ext_csd(card))
goto bus_speed;
- if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200)
+ if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400ES)
+ err = mmc_select_hs400es(card);
+ else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200)
err = mmc_select_hs200(card);
else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS)
err = mmc_select_hs(card);
@@ -1583,7 +1677,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
} else if (mmc_card_hs(card)) {
/* Select the desired bus width optionally */
err = mmc_select_bus_width(card);
- if (err >= 0) {
+ if (err > 0) {
err = mmc_select_hs_ddr(card);
if (err)
goto free_card;
@@ -1616,7 +1710,8 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
* If cache size is higher than 0, this indicates
* the existence of cache and it can be turned on.
*/
- if (card->ext_csd.cache_size > 0) {
+ if (!mmc_card_broken_hpi(card) &&
+ card->ext_csd.cache_size > 0) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_CACHE_CTRL, 1,
card->ext_csd.generic_cmd6_time);
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 62355bda6..ad6e9798e 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -480,6 +480,7 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
u32 status = 0;
bool use_r1b_resp = use_busy_signal;
bool expired = false;
+ bool busy = false;
mmc_retune_hold(host);
@@ -533,21 +534,26 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
timeout_ms = MMC_OPS_TIMEOUT_MS;
/* Must check status to be sure of no errors. */
- timeout = jiffies + msecs_to_jiffies(timeout_ms);
+ timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
do {
+ /*
+ * Due to the possibility of being preempted after
+ * sending the status command, check the expiration
+ * time first.
+ */
+ expired = time_after(jiffies, timeout);
if (send_status) {
- /*
- * Due to the possibility of being preempted after
- * sending the status command, check the expiration
- * time first.
- */
- expired = time_after(jiffies, timeout);
err = __mmc_send_status(card, &status, ignore_crc);
if (err)
goto out;
}
if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
break;
+ if (host->ops->card_busy) {
+ if (!host->ops->card_busy(host))
+ break;
+ busy = true;
+ }
if (mmc_host_is_spi(host))
break;
@@ -556,19 +562,20 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
* does'nt support MMC_CAP_WAIT_WHILE_BUSY, then we can only
* rely on waiting for the stated timeout to be sufficient.
*/
- if (!send_status) {
+ if (!send_status && !host->ops->card_busy) {
mmc_delay(timeout_ms);
goto out;
}
/* Timeout if the device never leaves the program state. */
- if (expired && R1_CURRENT_STATE(status) == R1_STATE_PRG) {
+ if (expired &&
+ (R1_CURRENT_STATE(status) == R1_STATE_PRG || busy)) {
pr_err("%s: Card stuck in programming state! %s\n",
mmc_hostname(host), __func__);
err = -ETIMEDOUT;
goto out;
}
- } while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
+ } while (R1_CURRENT_STATE(status) == R1_STATE_PRG || busy);
err = mmc_switch_status_error(host, status);
out:
diff --git a/drivers/mmc/core/quirks.c b/drivers/mmc/core/quirks.c
index fad660b95..ca9cade31 100644
--- a/drivers/mmc/core/quirks.c
+++ b/drivers/mmc/core/quirks.c
@@ -72,6 +72,8 @@ void mmc_fixup_device(struct mmc_card *card, const struct mmc_fixup *table)
f->cis_vendor == (u16) SDIO_ANY_ID) &&
(f->cis_device == card->cis.device ||
f->cis_device == (u16) SDIO_ANY_ID) &&
+ (f->ext_csd_rev == EXT_CSD_REV_ANY ||
+ f->ext_csd_rev == card->ext_csd.rev) &&
rev >= f->rev_start && rev <= f->rev_end) {
dev_dbg(&card->dev, "calling %pf\n", f->vendor_fixup);
f->vendor_fixup(card, f->data);
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index b95bd24d9..012393624 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -675,8 +675,25 @@ MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
+MMC_DEV_ATTR(ocr, "%08x\n", card->ocr);
+static ssize_t mmc_dsr_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mmc_card *card = mmc_dev_to_card(dev);
+ struct mmc_host *host = card->host;
+
+ if (card->csd.dsr_imp && host->dsr_req)
+ return sprintf(buf, "0x%x\n", host->dsr);
+ else
+ /* return default DSR value */
+ return sprintf(buf, "0x%x\n", 0x404);
+}
+
+static DEVICE_ATTR(dsr, S_IRUGO, mmc_dsr_show, NULL);
+
static struct attribute *sd_std_attrs[] = {
&dev_attr_cid.attr,
&dev_attr_csd.attr,
@@ -690,6 +707,8 @@ static struct attribute *sd_std_attrs[] = {
&dev_attr_name.attr,
&dev_attr_oemid.attr,
&dev_attr_serial.attr,
+ &dev_attr_ocr.attr,
+ &dev_attr_dsr.attr,
NULL,
};
ATTRIBUTE_GROUPS(sd_std);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index d5ae22fbb..2c1af1056 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -122,6 +122,7 @@ config MMC_SDHCI_OF_ARASAN
tristate "SDHCI OF support for the Arasan SDHCI controllers"
depends on MMC_SDHCI_PLTFM
depends on OF
+ depends on COMMON_CLK
help
This selects the Arasan Secure Digital Host Controller Interface
(SDHCI). This hardware is found e.g. in Xilinx' Zynq SoC.
@@ -296,17 +297,6 @@ config MMC_SDHCI_BCM_KONA
If you have a controller with this interface, say Y or M here.
-config MMC_SDHCI_BCM2835
- tristate "SDHCI platform support for the BCM2835 SD/MMC Controller"
- depends on ARCH_BCM2835
- depends on MMC_SDHCI_PLTFM
- select MMC_SDHCI_IO_ACCESSORS
- help
- This selects the BCM2835 SD/MMC controller. If you have a BCM2835
- platform with SD or MMC devices, say Y or M here.
-
- If unsure, say N.
-
config MMC_SDHCI_F_SDH30
tristate "SDHCI support for Fujitsu Semiconductor F_SDH30"
depends on MMC_SDHCI_PLTFM
@@ -792,3 +782,13 @@ config MMC_SDHCI_MICROCHIP_PIC32
If you have a controller with this interface, say Y or M here.
If unsure, say N.
+config MMC_SDHCI_BRCMSTB
+ tristate "Broadcom SDIO/SD/MMC support"
+ depends on ARCH_BRCMSTB || BMIPS_GENERIC
+ depends on MMC_SDHCI_PLTFM
+ default y
+ help
+ This selects support for the SDIO/SD/MMC Host Controller on
+ Broadcom STB SoCs.
+
+ If unsure, say Y.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index af918d261..e2bdaaf43 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -71,11 +71,11 @@ obj-$(CONFIG_MMC_SDHCI_OF_AT91) += sdhci-of-at91.o
obj-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o
obj-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o
obj-$(CONFIG_MMC_SDHCI_BCM_KONA) += sdhci-bcm-kona.o
-obj-$(CONFIG_MMC_SDHCI_BCM2835) += sdhci-bcm2835.o
obj-$(CONFIG_MMC_SDHCI_IPROC) += sdhci-iproc.o
obj-$(CONFIG_MMC_SDHCI_MSM) += sdhci-msm.o
obj-$(CONFIG_MMC_SDHCI_ST) += sdhci-st.o
obj-$(CONFIG_MMC_SDHCI_MICROCHIP_PIC32) += sdhci-pic32.o
+obj-$(CONFIG_MMC_SDHCI_BRCMSTB) += sdhci-brcmstb.o
ifeq ($(CONFIG_CB710_DEBUG),y)
CFLAGS-cb710-mmc += -DDEBUG
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index 7e3a3247b..da0ef1765 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -157,7 +157,7 @@ static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing)
* HOLD register should be bypassed in case there is no phase shift
* applied on CMD/DATA that is sent to the card.
*/
- if (!SDMMC_CLKSEL_GET_DRV_WD3(clksel))
+ if (!SDMMC_CLKSEL_GET_DRV_WD3(clksel) && host->cur_slot)
set_bit(DW_MMC_CARD_NO_USE_HOLD, &host->cur_slot->flags);
}
diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c
index 63c2e2ed1..8e9d886bf 100644
--- a/drivers/mmc/host/dw_mmc-k3.c
+++ b/drivers/mmc/host/dw_mmc-k3.c
@@ -32,6 +32,12 @@ struct k3_priv {
struct regmap *reg;
};
+static unsigned long dw_mci_hi6220_caps[] = {
+ MMC_CAP_CMD23,
+ MMC_CAP_CMD23,
+ 0
+};
+
static void dw_mci_k3_set_ios(struct dw_mci *host, struct mmc_ios *ios)
{
int ret;
@@ -126,6 +132,7 @@ static void dw_mci_hi6220_set_ios(struct dw_mci *host, struct mmc_ios *ios)
}
static const struct dw_mci_drv_data hi6220_data = {
+ .caps = dw_mci_hi6220_caps,
.switch_voltage = dw_mci_hi6220_switch_voltage,
.set_ios = dw_mci_hi6220_set_ios,
.parse_dt = dw_mci_hi6220_parse_dt,
diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c
index 358b0dc85..25eae359a 100644
--- a/drivers/mmc/host/dw_mmc-rockchip.c
+++ b/drivers/mmc/host/dw_mmc-rockchip.c
@@ -285,9 +285,6 @@ static int dw_mci_rockchip_init(struct dw_mci *host)
/* It is slot 8 on Rockchip SoCs */
host->sdio_id0 = 8;
- /* It needs this quirk on all Rockchip SoCs */
- host->pdata->quirks |= DW_MCI_QUIRK_BROKEN_DTO;
-
if (of_device_is_compatible(host->dev->of_node,
"rockchip,rk3288-dw-mshc"))
host->bus_hz /= RK3288_CLKGEN_DIV;
@@ -297,10 +294,10 @@ static int dw_mci_rockchip_init(struct dw_mci *host)
/* Common capabilities of RK3288 SoC */
static unsigned long dw_mci_rk3288_dwmmc_caps[4] = {
- MMC_CAP_ERASE | MMC_CAP_CMD23,
- MMC_CAP_ERASE | MMC_CAP_CMD23,
- MMC_CAP_ERASE | MMC_CAP_CMD23,
- MMC_CAP_ERASE | MMC_CAP_CMD23,
+ MMC_CAP_CMD23,
+ MMC_CAP_CMD23,
+ MMC_CAP_CMD23,
+ MMC_CAP_CMD23,
};
static const struct dw_mci_drv_data rk2928_drv_data = {
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 2cc6123b1..767af2026 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -44,11 +44,11 @@
/* Common flag combinations */
#define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
SDMMC_INT_HTO | SDMMC_INT_SBE | \
- SDMMC_INT_EBE)
+ SDMMC_INT_EBE | SDMMC_INT_HLE)
#define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
- SDMMC_INT_RESP_ERR)
+ SDMMC_INT_RESP_ERR | SDMMC_INT_HLE)
#define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
- DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
+ DW_MCI_CMD_ERROR_FLAGS)
#define DW_MCI_SEND_STATUS 1
#define DW_MCI_RECV_STATUS 2
#define DW_MCI_DMA_THRESHOLD 16
@@ -92,7 +92,7 @@ struct idmac_desc {
__le32 des1; /* Buffer sizes */
#define IDMAC_SET_BUFFER1_SIZE(d, s) \
- ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
+ ((d)->des1 = ((d)->des1 & cpu_to_le32(0x03ffe000)) | (cpu_to_le32((s) & 0x1fff)))
__le32 des2; /* buffer 1 physical address */
@@ -105,6 +105,7 @@ struct idmac_desc {
static bool dw_mci_reset(struct dw_mci *host);
static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
static int dw_mci_card_busy(struct mmc_host *mmc);
+static int dw_mci_get_cd(struct mmc_host *mmc);
#if defined(CONFIG_DEBUG_FS)
static int dw_mci_req_show(struct seq_file *s, void *v)
@@ -898,23 +899,35 @@ done:
mci_writel(host, FIFOTH, fifoth_val);
}
-static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
+static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data)
{
unsigned int blksz = data->blksz;
u32 blksz_depth, fifo_depth;
u16 thld_size;
-
- WARN_ON(!(data->flags & MMC_DATA_READ));
+ u8 enable;
/*
* CDTHRCTL doesn't exist prior to 240A (in fact that register offset is
* in the FIFO region, so we really shouldn't access it).
*/
- if (host->verid < DW_MMC_240A)
+ if (host->verid < DW_MMC_240A ||
+ (host->verid < DW_MMC_280A && data->flags & MMC_DATA_WRITE))
+ return;
+
+ /*
+ * Card write Threshold is introduced since 2.80a
+ * It's used when HS400 mode is enabled.
+ */
+ if (data->flags & MMC_DATA_WRITE &&
+ !(host->timing != MMC_TIMING_MMC_HS400))
return;
+ if (data->flags & MMC_DATA_WRITE)
+ enable = SDMMC_CARD_WR_THR_EN;
+ else
+ enable = SDMMC_CARD_RD_THR_EN;
+
if (host->timing != MMC_TIMING_MMC_HS200 &&
- host->timing != MMC_TIMING_MMC_HS400 &&
host->timing != MMC_TIMING_UHS_SDR104)
goto disable;
@@ -930,11 +943,11 @@ static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
* Currently just choose blksz.
*/
thld_size = blksz;
- mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
+ mci_writel(host, CDTHRCTL, SDMMC_SET_THLD(thld_size, enable));
return;
disable:
- mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
+ mci_writel(host, CDTHRCTL, 0);
}
static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
@@ -1005,12 +1018,12 @@ static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
host->sg = NULL;
host->data = data;
- if (data->flags & MMC_DATA_READ) {
+ if (data->flags & MMC_DATA_READ)
host->dir_status = DW_MCI_RECV_STATUS;
- dw_mci_ctrl_rd_thld(host, data);
- } else {
+ else
host->dir_status = DW_MCI_SEND_STATUS;
- }
+
+ dw_mci_ctrl_thld(host, data);
if (dw_mci_submit_data_dma(host, data)) {
if (host->data->flags & MMC_DATA_READ)
@@ -1099,7 +1112,7 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
- if ((clock << div) != slot->__clk_old || force_clkinit)
+ if (clock != slot->__clk_old || force_clkinit)
dev_info(&slot->mmc->class_dev,
"Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
slot->id, host->bus_hz, clock,
@@ -1128,8 +1141,8 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
/* inform CIU */
mci_send_cmd(slot, sdmmc_cmd_bits, 0);
- /* keep the clock with reflecting clock dividor */
- slot->__clk_old = clock << div;
+ /* keep the last clock value that was requested from core */
+ slot->__clk_old = clock;
}
host->current_speed = clock;
@@ -1253,15 +1266,15 @@ static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
* atomic, otherwise the card could be removed in between and the
* request wouldn't fail until another card was inserted.
*/
- spin_lock_bh(&host->lock);
- if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
- spin_unlock_bh(&host->lock);
+ if (!dw_mci_get_cd(mmc)) {
mrq->cmd->error = -ENOMEDIUM;
mmc_request_done(mmc, mrq);
return;
}
+ spin_lock_bh(&host->lock);
+
dw_mci_queue_request(host, slot, mrq);
spin_unlock_bh(&host->lock);
@@ -1451,8 +1464,7 @@ static int dw_mci_get_cd(struct mmc_host *mmc)
int gpio_cd = mmc_gpio_get_cd(mmc);
/* Use platform get_cd function, else try onboard card detect */
- if ((mmc->caps & MMC_CAP_NEEDS_POLL) ||
- (mmc->caps & MMC_CAP_NONREMOVABLE))
+ if ((mmc->caps & MMC_CAP_NEEDS_POLL) || !mmc_card_is_removable(mmc))
present = 1;
else if (gpio_cd >= 0)
present = gpio_cd;
@@ -1761,6 +1773,33 @@ static void dw_mci_tasklet_func(unsigned long priv)
}
if (cmd->data && err) {
+ /*
+ * During UHS tuning sequence, sending the stop
+ * command after the response CRC error would
+ * throw the system into a confused state
+ * causing all future tuning phases to report
+ * failure.
+ *
+ * In such case controller will move into a data
+ * transfer state after a response error or
+ * response CRC error. Let's let that finish
+ * before trying to send a stop, so we'll go to
+ * STATE_SENDING_DATA.
+ *
+ * Although letting the data transfer take place
+ * will waste a bit of time (we already know
+ * the command was bad), it can't cause any
+ * errors since it's possible it would have
+ * taken place anyway if this tasklet got
+ * delayed. Allowing the transfer to take place
+ * avoids races and keeps things simple.
+ */
+ if ((err != -ETIMEDOUT) &&
+ (cmd->opcode == MMC_SEND_TUNING_BLOCK)) {
+ state = STATE_SENDING_DATA;
+ continue;
+ }
+
dw_mci_stop_dma(host);
send_stop_abort(host, data);
state = STATE_SENDING_STOP;
@@ -1801,8 +1840,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
* If all data-related interrupts don't come
* within the given time in reading data state.
*/
- if ((host->quirks & DW_MCI_QUIRK_BROKEN_DTO) &&
- (host->dir_status == DW_MCI_RECV_STATUS))
+ if (host->dir_status == DW_MCI_RECV_STATUS)
dw_mci_set_drto(host);
break;
}
@@ -1844,8 +1882,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
* interrupt doesn't come within the given time.
* in reading data state.
*/
- if ((host->quirks & DW_MCI_QUIRK_BROKEN_DTO) &&
- (host->dir_status == DW_MCI_RECV_STATUS))
+ if (host->dir_status == DW_MCI_RECV_STATUS)
dw_mci_set_drto(host);
break;
}
@@ -2411,8 +2448,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
}
if (pending & SDMMC_INT_DATA_OVER) {
- if (host->quirks & DW_MCI_QUIRK_BROKEN_DTO)
- del_timer(&host->dto_timer);
+ del_timer(&host->dto_timer);
mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
if (!host->data_status)
@@ -2474,7 +2510,8 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI |
SDMMC_IDMAC_INT_RI);
mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI);
- host->dma_ops->complete((void *)host);
+ if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
+ host->dma_ops->complete((void *)host);
}
} else {
pending = mci_readl(host, IDSTS);
@@ -2482,7 +2519,8 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI |
SDMMC_IDMAC_INT_RI);
mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
- host->dma_ops->complete((void *)host);
+ if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
+ host->dma_ops->complete((void *)host);
}
}
@@ -2570,6 +2608,12 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
if (host->pdata->caps)
mmc->caps = host->pdata->caps;
+ /*
+ * Support MMC_CAP_ERASE by default.
+ * It needs to use trim/discard/erase commands.
+ */
+ mmc->caps |= MMC_CAP_ERASE;
+
if (host->pdata->pm_caps)
mmc->pm_caps = host->pdata->pm_caps;
@@ -2616,10 +2660,7 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
mmc->max_seg_size = mmc->max_req_size;
}
- if (dw_mci_get_cd(mmc))
- set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
- else
- clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
+ dw_mci_get_cd(mmc);
ret = mmc_add_host(mmc);
if (ret)
@@ -3006,11 +3047,8 @@ int dw_mci_probe(struct dw_mci *host)
setup_timer(&host->cmd11_timer,
dw_mci_cmd11_timer, (unsigned long)host);
- host->quirks = host->pdata->quirks;
-
- if (host->quirks & DW_MCI_QUIRK_BROKEN_DTO)
- setup_timer(&host->dto_timer,
- dw_mci_dto_timer, (unsigned long)host);
+ setup_timer(&host->dto_timer,
+ dw_mci_dto_timer, (unsigned long)host);
spin_lock_init(&host->lock);
spin_lock_init(&host->irq_lock);
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 1e8d8380f..e8cd2dec3 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -15,6 +15,7 @@
#define _DW_MMC_H_
#define DW_MMC_240A 0x240a
+#define DW_MMC_280A 0x280a
#define SDMMC_CTRL 0x000
#define SDMMC_PWREN 0x004
@@ -175,7 +176,10 @@
/* Version ID register define */
#define SDMMC_GET_VERID(x) ((x) & 0xFFFF)
/* Card read threshold */
-#define SDMMC_SET_RD_THLD(v, x) (((v) & 0xFFF) << 16 | (x))
+#define SDMMC_SET_THLD(v, x) (((v) & 0xFFF) << 16 | (x))
+#define SDMMC_CARD_WR_THR_EN BIT(2)
+#define SDMMC_CARD_RD_THR_EN BIT(0)
+/* UHS-1 register defines */
#define SDMMC_UHS_18V BIT(0)
/* All ctrl reset bits */
#define SDMMC_CTRL_ALL_RESET_FLAGS \
@@ -245,9 +249,8 @@ extern int dw_mci_resume(struct dw_mci *host);
* @queue_node: List node for placing this node in the @queue list of
* &struct dw_mci.
* @clock: Clock rate configured by set_ios(). Protected by host->lock.
- * @__clk_old: The last updated clock with reflecting clock divider.
- * Keeping track of this helps us to avoid spamming the console
- * with CONFIG_MMC_CLKGATE.
+ * @__clk_old: The last clock value that was requested from core.
+ * Keeping track of this helps us to avoid spamming the console.
* @flags: Random state bits associated with the slot.
* @id: Number of this slot.
* @sdio_id: Number of this slot in the SDIO interrupt registers.
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 03ddf0ecf..684087db1 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -1068,8 +1068,6 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
jz4740_mmc_clock_disable(host);
setup_timer(&host->timeout_timer, jz4740_mmc_timeout,
(unsigned long)host);
- /* It is not important when it times out, it just needs to timeout. */
- set_timer_slack(&host->timeout_timer, HZ);
host->use_dma = true;
if (host->use_dma && jz4740_mmc_acquire_dma_channels(host) != 0)
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 5642f71f8..84e9afcb5 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -287,6 +287,11 @@ struct msdc_save_para {
u32 emmc50_cfg0;
};
+struct msdc_tune_para {
+ u32 iocon;
+ u32 pad_tune;
+};
+
struct msdc_delay_phase {
u8 maxlen;
u8 start;
@@ -326,7 +331,10 @@ struct msdc_host {
unsigned char timing;
bool vqmmc_enabled;
u32 hs400_ds_delay;
+ bool hs400_mode; /* current eMMC will run at hs400 mode */
struct msdc_save_para save_para; /* used when gate HCLK */
+ struct msdc_tune_para def_tune_para; /* default tune setting */
+ struct msdc_tune_para saved_tune_para; /* tune result of CMD21/CMD19 */
};
static void sdr_set_bits(void __iomem *reg, u32 bs)
@@ -582,6 +590,18 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
msdc_set_timeout(host, host->timeout_ns, host->timeout_clks);
sdr_set_bits(host->base + MSDC_INTEN, flags);
+ /*
+ * mmc_select_hs400() will drop to 50Mhz and High speed mode,
+ * tune result of hs200/200Mhz is not suitable for 50Mhz
+ */
+ if (host->sclk <= 52000000) {
+ writel(host->def_tune_para.iocon, host->base + MSDC_IOCON);
+ writel(host->def_tune_para.pad_tune, host->base + MSDC_PAD_TUNE);
+ } else {
+ writel(host->saved_tune_para.iocon, host->base + MSDC_IOCON);
+ writel(host->saved_tune_para.pad_tune, host->base + MSDC_PAD_TUNE);
+ }
+
dev_dbg(host->dev, "sclk: %d, timing: %d\n", host->sclk, timing);
}
@@ -781,7 +801,13 @@ static bool msdc_cmd_done(struct msdc_host *host, int events,
}
if (!sbc_error && !(events & MSDC_INT_CMDRDY)) {
- msdc_reset_hw(host);
+ if (cmd->opcode != MMC_SEND_TUNING_BLOCK &&
+ cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200)
+ /*
+ * should not clear fifo/interrupt as the tune data
+ * may have alreay come.
+ */
+ msdc_reset_hw(host);
if (events & MSDC_INT_RSPCRCERR) {
cmd->error = -EILSEQ;
host->error |= REQ_CMD_EIO;
@@ -865,7 +891,11 @@ static void msdc_start_command(struct msdc_host *host,
static void msdc_cmd_next(struct msdc_host *host,
struct mmc_request *mrq, struct mmc_command *cmd)
{
- if (cmd->error || (mrq->sbc && mrq->sbc->error))
+ if ((cmd->error &&
+ !(cmd->error == -EILSEQ &&
+ (cmd->opcode == MMC_SEND_TUNING_BLOCK ||
+ cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200))) ||
+ (mrq->sbc && mrq->sbc->error))
msdc_request_done(host, mrq);
else if (cmd == mrq->sbc)
msdc_start_command(host, mrq, mrq->cmd);
@@ -1158,6 +1188,8 @@ static void msdc_init_hw(struct msdc_host *host)
/* Configure to default data timeout */
sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC, 3);
+ host->def_tune_para.iocon = readl(host->base + MSDC_IOCON);
+ host->def_tune_para.pad_tune = readl(host->base + MSDC_PAD_TUNE);
dev_dbg(host->dev, "init hardware done!");
}
@@ -1296,7 +1328,7 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode)
{
struct msdc_host *host = mmc_priv(mmc);
u32 rise_delay = 0, fall_delay = 0;
- struct msdc_delay_phase final_rise_delay, final_fall_delay;
+ struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,};
u8 final_delay, final_maxlen;
int cmd_err;
int i;
@@ -1309,6 +1341,11 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode)
if (!cmd_err)
rise_delay |= (1 << i);
}
+ final_rise_delay = get_best_delay(host, rise_delay);
+ /* if rising edge has enough margin, then do not scan falling edge */
+ if (final_rise_delay.maxlen >= 10 ||
+ (final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4))
+ goto skip_fall;
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
for (i = 0; i < PAD_DELAY_MAX; i++) {
@@ -1318,10 +1355,9 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode)
if (!cmd_err)
fall_delay |= (1 << i);
}
-
- final_rise_delay = get_best_delay(host, rise_delay);
final_fall_delay = get_best_delay(host, fall_delay);
+skip_fall:
final_maxlen = max(final_rise_delay.maxlen, final_fall_delay.maxlen);
if (final_maxlen == final_rise_delay.maxlen) {
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
@@ -1342,7 +1378,7 @@ static int msdc_tune_data(struct mmc_host *mmc, u32 opcode)
{
struct msdc_host *host = mmc_priv(mmc);
u32 rise_delay = 0, fall_delay = 0;
- struct msdc_delay_phase final_rise_delay, final_fall_delay;
+ struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,};
u8 final_delay, final_maxlen;
int i, ret;
@@ -1355,6 +1391,11 @@ static int msdc_tune_data(struct mmc_host *mmc, u32 opcode)
if (!ret)
rise_delay |= (1 << i);
}
+ final_rise_delay = get_best_delay(host, rise_delay);
+ /* if rising edge has enough margin, then do not scan falling edge */
+ if (final_rise_delay.maxlen >= 10 ||
+ (final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4))
+ goto skip_fall;
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
@@ -1365,14 +1406,10 @@ static int msdc_tune_data(struct mmc_host *mmc, u32 opcode)
if (!ret)
fall_delay |= (1 << i);
}
-
- final_rise_delay = get_best_delay(host, rise_delay);
final_fall_delay = get_best_delay(host, fall_delay);
+skip_fall:
final_maxlen = max(final_rise_delay.maxlen, final_fall_delay.maxlen);
- /* Rising edge is more stable, prefer to use it */
- if (final_rise_delay.maxlen >= 10)
- final_maxlen = final_rise_delay.maxlen;
if (final_maxlen == final_rise_delay.maxlen) {
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
@@ -1402,16 +1439,21 @@ static int msdc_execute_tuning(struct mmc_host *mmc, u32 opcode)
dev_err(host->dev, "Tune response fail!\n");
return ret;
}
- ret = msdc_tune_data(mmc, opcode);
- if (ret == -EIO)
- dev_err(host->dev, "Tune data fail!\n");
+ if (host->hs400_mode == false) {
+ ret = msdc_tune_data(mmc, opcode);
+ if (ret == -EIO)
+ dev_err(host->dev, "Tune data fail!\n");
+ }
+ host->saved_tune_para.iocon = readl(host->base + MSDC_IOCON);
+ host->saved_tune_para.pad_tune = readl(host->base + MSDC_PAD_TUNE);
return ret;
}
static int msdc_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct msdc_host *host = mmc_priv(mmc);
+ host->hs400_mode = true;
writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE);
return 0;
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 3d1ea5e0e..fb3ca8296 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -1065,7 +1065,7 @@ static int mxcmci_probe(struct platform_device *pdev)
if (pdata)
dat3_card_detect = pdata->dat3_card_detect;
- else if (!(mmc->caps & MMC_CAP_NONREMOVABLE)
+ else if (mmc_card_is_removable(mmc)
&& !of_property_read_bool(pdev->dev.of_node, "cd-gpios"))
dat3_card_detect = true;
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index f23d65eb0..be3c49fa7 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -1016,14 +1016,16 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
/* Only reconfigure if we have a different burst size */
if (*bp != burst) {
- struct dma_slave_config cfg;
-
- cfg.src_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
- cfg.dst_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
- cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
- cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
- cfg.src_maxburst = burst;
- cfg.dst_maxburst = burst;
+ struct dma_slave_config cfg = {
+ .src_addr = host->phys_base +
+ OMAP_MMC_REG(host, DATA),
+ .dst_addr = host->phys_base +
+ OMAP_MMC_REG(host, DATA),
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
+ .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
+ .src_maxburst = burst,
+ .dst_maxburst = burst,
+ };
if (dmaengine_slave_config(c, &cfg))
goto use_pio;
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 24ebc9a8d..5f2f24a73 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1409,11 +1409,18 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
static int omap_hsmmc_setup_dma_transfer(struct omap_hsmmc_host *host,
struct mmc_request *req)
{
- struct dma_slave_config cfg;
struct dma_async_tx_descriptor *tx;
int ret = 0, i;
struct mmc_data *data = req->data;
struct dma_chan *chan;
+ struct dma_slave_config cfg = {
+ .src_addr = host->mapbase + OMAP_HSMMC_DATA,
+ .dst_addr = host->mapbase + OMAP_HSMMC_DATA,
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .src_maxburst = data->blksz / 4,
+ .dst_maxburst = data->blksz / 4,
+ };
/* Sanity check: all the SG entries must be aligned by block size. */
for (i = 0; i < data->sg_len; i++) {
@@ -1433,13 +1440,6 @@ static int omap_hsmmc_setup_dma_transfer(struct omap_hsmmc_host *host,
chan = omap_hsmmc_get_dma_chan(host, data);
- cfg.src_addr = host->mapbase + OMAP_HSMMC_DATA;
- cfg.dst_addr = host->mapbase + OMAP_HSMMC_DATA;
- cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- cfg.src_maxburst = data->blksz / 4;
- cfg.dst_maxburst = data->blksz / 4;
-
ret = dmaengine_slave_config(chan, &cfg);
if (ret)
return ret;
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index 93137483e..396c9b7e4 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -38,7 +38,6 @@ struct realtek_pci_sdmmc {
struct rtsx_pcr *pcr;
struct mmc_host *mmc;
struct mmc_request *mrq;
- struct workqueue_struct *workq;
#define SDMMC_WORKQ_NAME "rtsx_pci_sdmmc_workq"
struct work_struct work;
@@ -244,7 +243,7 @@ static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host,
stat_idx = sd_status_index(rsp_type);
if (rsp_type == SD_RSP_TYPE_R1b)
- timeout = 3000;
+ timeout = cmd->busy_timeout ? cmd->busy_timeout : 3000;
if (cmd->opcode == SD_SWITCH_VOLTAGE) {
err = rtsx_pci_write_register(pcr, SD_BUS_STAT,
@@ -885,7 +884,7 @@ static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
if (sd_rw_cmd(mrq->cmd) || sdio_extblock_cmd(mrq->cmd, data))
host->using_cookie = sd_pre_dma_transfer(host, data, false);
- queue_work(host->workq, &host->work);
+ schedule_work(&host->work);
}
static int sd_set_bus_width(struct realtek_pci_sdmmc *host,
@@ -1360,7 +1359,7 @@ static void realtek_init_host(struct realtek_pci_sdmmc *host)
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SD_HIGHSPEED |
MMC_CAP_MMC_HIGHSPEED | MMC_CAP_BUS_WIDTH_TEST |
- MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
+ MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_ERASE;
mmc->caps2 = MMC_CAP2_NO_PRESCAN_POWERUP | MMC_CAP2_FULL_PWR_CYCLE;
mmc->max_current_330 = 400;
mmc->max_current_180 = 800;
@@ -1404,11 +1403,6 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev)
return -ENOMEM;
host = mmc_priv(mmc);
- host->workq = create_singlethread_workqueue(SDMMC_WORKQ_NAME);
- if (!host->workq) {
- mmc_free_host(mmc);
- return -ENOMEM;
- }
host->pcr = pcr;
host->mmc = mmc;
host->pdev = pdev;
@@ -1462,9 +1456,7 @@ static int rtsx_pci_sdmmc_drv_remove(struct platform_device *pdev)
mmc_remove_host(mmc);
host->eject = true;
- flush_workqueue(host->workq);
- destroy_workqueue(host->workq);
- host->workq = NULL;
+ flush_work(&host->work);
mmc_free_host(mmc);
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 39814f3dc..c531deef3 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1365,7 +1365,7 @@ static struct s3c24xx_mci_pdata s3cmci_def_pdata = {
.no_detect = 1,
};
-#ifdef CONFIG_CPU_FREQ
+#ifdef CONFIG_ARM_S3C24XX_CPUFREQ
static int s3cmci_cpufreq_transition(struct notifier_block *nb,
unsigned long val, void *data)
diff --git a/drivers/mmc/host/s3cmci.h b/drivers/mmc/host/s3cmci.h
index cc2e46cb5..30c2c0dd1 100644
--- a/drivers/mmc/host/s3cmci.h
+++ b/drivers/mmc/host/s3cmci.h
@@ -74,7 +74,7 @@ struct s3cmci_host {
struct dentry *debug_regs;
#endif
-#ifdef CONFIG_CPU_FREQ
+#ifdef CONFIG_ARM_S3C24XX_CPUFREQ
struct notifier_block freq_transition;
#endif
};
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 458ffb763..8fe0756c8 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -43,6 +43,7 @@
#ifdef CONFIG_X86
#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
#include <asm/iosf_mbi.h>
#endif
@@ -126,7 +127,7 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
static bool sdhci_acpi_byt(void)
{
static const struct x86_cpu_id byt[] = {
- { X86_VENDOR_INTEL, 6, 0x37 },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
{}
};
@@ -531,11 +532,6 @@ static int sdhci_acpi_resume(struct device *dev)
return sdhci_resume_host(c->host);
}
-#else
-
-#define sdhci_acpi_suspend NULL
-#define sdhci_acpi_resume NULL
-
#endif
#ifdef CONFIG_PM
@@ -559,8 +555,7 @@ static int sdhci_acpi_runtime_resume(struct device *dev)
#endif
static const struct dev_pm_ops sdhci_acpi_pm_ops = {
- .suspend = sdhci_acpi_suspend,
- .resume = sdhci_acpi_resume,
+ SET_SYSTEM_SLEEP_PM_OPS(sdhci_acpi_suspend, sdhci_acpi_resume)
SET_RUNTIME_PM_OPS(sdhci_acpi_runtime_suspend,
sdhci_acpi_runtime_resume, NULL)
};
diff --git a/drivers/mmc/host/sdhci-bcm-kona.c b/drivers/mmc/host/sdhci-bcm-kona.c
index 00a8a40a3..e5c634bdf 100644
--- a/drivers/mmc/host/sdhci-bcm-kona.c
+++ b/drivers/mmc/host/sdhci-bcm-kona.c
@@ -264,12 +264,12 @@ static int sdhci_bcm_kona_probe(struct platform_device *pdev)
}
dev_dbg(dev, "non-removable=%c\n",
- (host->mmc->caps & MMC_CAP_NONREMOVABLE) ? 'Y' : 'N');
+ mmc_card_is_removable(host->mmc) ? 'N' : 'Y');
dev_dbg(dev, "cd_gpio %c, wp_gpio %c\n",
(mmc_gpio_get_cd(host->mmc) != -ENOSYS) ? 'Y' : 'N',
(mmc_gpio_get_ro(host->mmc) != -ENOSYS) ? 'Y' : 'N');
- if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
+ if (!mmc_card_is_removable(host->mmc))
host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
dev_dbg(dev, "is_8bit=%c\n",
@@ -288,7 +288,7 @@ static int sdhci_bcm_kona_probe(struct platform_device *pdev)
}
/* if device is eMMC, emulate card insert right here */
- if (host->mmc->caps & MMC_CAP_NONREMOVABLE) {
+ if (!mmc_card_is_removable(host->mmc)) {
ret = sdhci_bcm_kona_sd_card_emulate(host, 1);
if (ret) {
dev_err(dev,
@@ -326,7 +326,7 @@ err_pltfm_free:
static struct platform_driver sdhci_bcm_kona_driver = {
.driver = {
.name = "sdhci-kona",
- .pm = SDHCI_PLTFM_PMOPS,
+ .pm = &sdhci_pltfm_pmops,
.of_match_table = sdhci_bcm_kona_of_match,
},
.probe = sdhci_bcm_kona_probe,
diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
new file mode 100644
index 000000000..cce10fe3e
--- /dev/null
+++ b/drivers/mmc/host/sdhci-brcmstb.c
@@ -0,0 +1,143 @@
+/*
+ * sdhci-brcmstb.c Support for SDHCI on Broadcom BRCMSTB SoC's
+ *
+ * Copyright (C) 2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/mmc/host.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#include "sdhci-pltfm.h"
+
+#ifdef CONFIG_PM_SLEEP
+
+static int sdhci_brcmstb_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ int res;
+
+ res = sdhci_suspend_host(host);
+ if (res)
+ return res;
+ clk_disable_unprepare(pltfm_host->clk);
+ return res;
+}
+
+static int sdhci_brcmstb_resume(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ int err;
+
+ err = clk_prepare_enable(pltfm_host->clk);
+ if (err)
+ return err;
+ return sdhci_resume_host(host);
+}
+
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(sdhci_brcmstb_pmops, sdhci_brcmstb_suspend,
+ sdhci_brcmstb_resume);
+
+static const struct sdhci_ops sdhci_brcmstb_ops = {
+ .set_clock = sdhci_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+static struct sdhci_pltfm_data sdhci_brcmstb_pdata = {
+ .ops = &sdhci_brcmstb_ops,
+};
+
+static int sdhci_brcmstb_probe(struct platform_device *pdev)
+{
+ struct sdhci_host *host;
+ struct sdhci_pltfm_host *pltfm_host;
+ struct clk *clk;
+ int res;
+
+ clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "Clock not found in Device Tree\n");
+ clk = NULL;
+ }
+ res = clk_prepare_enable(clk);
+ if (res)
+ return res;
+
+ host = sdhci_pltfm_init(pdev, &sdhci_brcmstb_pdata, 0);
+ if (IS_ERR(host)) {
+ res = PTR_ERR(host);
+ goto err_clk;
+ }
+
+ /* Enable MMC_CAP2_HC_ERASE_SZ for better max discard calculations */
+ host->mmc->caps2 |= MMC_CAP2_HC_ERASE_SZ;
+
+ sdhci_get_of_property(pdev);
+ mmc_of_parse(host->mmc);
+
+ /*
+ * Supply the existing CAPS, but clear the UHS modes. This
+ * will allow these modes to be specified by device tree
+ * properties through mmc_of_parse().
+ */
+ host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
+ host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
+ host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
+ SDHCI_SUPPORT_DDR50);
+ host->quirks |= SDHCI_QUIRK_MISSING_CAPS |
+ SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+
+ res = sdhci_add_host(host);
+ if (res)
+ goto err;
+
+ pltfm_host = sdhci_priv(host);
+ pltfm_host->clk = clk;
+ return res;
+
+err:
+ sdhci_pltfm_free(pdev);
+err_clk:
+ clk_disable_unprepare(clk);
+ return res;
+}
+
+static const struct of_device_id sdhci_brcm_of_match[] = {
+ { .compatible = "brcm,bcm7425-sdhci" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sdhci_brcm_of_match);
+
+static struct platform_driver sdhci_brcmstb_driver = {
+ .driver = {
+ .name = "sdhci-brcmstb",
+ .owner = THIS_MODULE,
+ .pm = &sdhci_brcmstb_pmops,
+ .of_match_table = of_match_ptr(sdhci_brcm_of_match),
+ },
+ .probe = sdhci_brcmstb_probe,
+ .remove = sdhci_pltfm_unregister,
+};
+
+module_platform_driver(sdhci_brcmstb_driver);
+
+MODULE_DESCRIPTION("SDHCI driver for Broadcom BRCMSTB SoCs");
+MODULE_AUTHOR("Broadcom");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-cns3xxx.c b/drivers/mmc/host/sdhci-cns3xxx.c
index 59f2923f8..bd286db7f 100644
--- a/drivers/mmc/host/sdhci-cns3xxx.c
+++ b/drivers/mmc/host/sdhci-cns3xxx.c
@@ -101,7 +101,7 @@ static int sdhci_cns3xxx_probe(struct platform_device *pdev)
static struct platform_driver sdhci_cns3xxx_driver = {
.driver = {
.name = "sdhci-cns3xxx",
- .pm = SDHCI_PLTFM_PMOPS,
+ .pm = &sdhci_pltfm_pmops,
},
.probe = sdhci_cns3xxx_probe,
.remove = sdhci_pltfm_unregister,
diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c
index 407c21f15..de9f9603d 100644
--- a/drivers/mmc/host/sdhci-dove.c
+++ b/drivers/mmc/host/sdhci-dove.c
@@ -117,7 +117,7 @@ MODULE_DEVICE_TABLE(of, sdhci_dove_of_match_table);
static struct platform_driver sdhci_dove_driver = {
.driver = {
.name = "sdhci-dove",
- .pm = SDHCI_PLTFM_PMOPS,
+ .pm = &sdhci_pltfm_pmops,
.of_match_table = sdhci_dove_of_match_table,
},
.probe = sdhci_dove_probe,
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 9d3ae1f4b..99e0b334f 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -39,11 +39,13 @@
#define ESDHC_VENDOR_SPEC_VSELECT (1 << 1)
#define ESDHC_VENDOR_SPEC_FRC_SDCLK_ON (1 << 8)
#define ESDHC_WTMK_LVL 0x44
+#define ESDHC_WTMK_DEFAULT_VAL 0x10401040
#define ESDHC_MIX_CTRL 0x48
#define ESDHC_MIX_CTRL_DDREN (1 << 3)
#define ESDHC_MIX_CTRL_AC23EN (1 << 7)
#define ESDHC_MIX_CTRL_EXE_TUNE (1 << 22)
#define ESDHC_MIX_CTRL_SMPCLK_SEL (1 << 23)
+#define ESDHC_MIX_CTRL_AUTO_TUNE_EN (1 << 24)
#define ESDHC_MIX_CTRL_FBCLK_SEL (1 << 25)
#define ESDHC_MIX_CTRL_HS400_EN (1 << 26)
/* Bits 3 and 6 are not SDHCI standard definitions */
@@ -75,7 +77,8 @@
#define ESDHC_TUNING_CTRL 0xcc
#define ESDHC_STD_TUNING_EN (1 << 24)
/* NOTE: the minimum valid tuning start tap for mx6sl is 1 */
-#define ESDHC_TUNING_START_TAP 0x1
+#define ESDHC_TUNING_START_TAP_DEFAULT 0x1
+#define ESDHC_TUNING_START_TAP_MASK 0xff
#define ESDHC_TUNING_STEP_MASK 0x00070000
#define ESDHC_TUNING_STEP_SHIFT 16
@@ -299,7 +302,8 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
/* imx6q/dl does not have cap_1 register, fake one */
val = SDHCI_SUPPORT_DDR50 | SDHCI_SUPPORT_SDR104
| SDHCI_SUPPORT_SDR50
- | SDHCI_USE_SDR50_TUNING;
+ | SDHCI_USE_SDR50_TUNING
+ | (SDHCI_TUNING_MODE_3 << SDHCI_RETUNING_MODE_SHIFT);
if (imx_data->socdata->flags & ESDHC_FLAG_HS400)
val |= SDHCI_SUPPORT_HS400;
@@ -469,32 +473,29 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
writel(new_val, host->ioaddr + ESDHC_VENDOR_SPEC);
if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
new_val = readl(host->ioaddr + ESDHC_MIX_CTRL);
- if (val & SDHCI_CTRL_TUNED_CLK)
+ if (val & SDHCI_CTRL_TUNED_CLK) {
new_val |= ESDHC_MIX_CTRL_SMPCLK_SEL;
- else
+ new_val |= ESDHC_MIX_CTRL_AUTO_TUNE_EN;
+ } else {
new_val &= ~ESDHC_MIX_CTRL_SMPCLK_SEL;
+ new_val &= ~ESDHC_MIX_CTRL_AUTO_TUNE_EN;
+ }
writel(new_val , host->ioaddr + ESDHC_MIX_CTRL);
} else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
u32 v = readl(host->ioaddr + SDHCI_ACMD12_ERR);
u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL);
- u32 tuning_ctrl;
if (val & SDHCI_CTRL_TUNED_CLK) {
v |= ESDHC_MIX_CTRL_SMPCLK_SEL;
} else {
v &= ~ESDHC_MIX_CTRL_SMPCLK_SEL;
m &= ~ESDHC_MIX_CTRL_FBCLK_SEL;
+ m &= ~ESDHC_MIX_CTRL_AUTO_TUNE_EN;
}
if (val & SDHCI_CTRL_EXEC_TUNING) {
v |= ESDHC_MIX_CTRL_EXE_TUNE;
m |= ESDHC_MIX_CTRL_FBCLK_SEL;
- tuning_ctrl = readl(host->ioaddr + ESDHC_TUNING_CTRL);
- tuning_ctrl |= ESDHC_STD_TUNING_EN | ESDHC_TUNING_START_TAP;
- if (imx_data->boarddata.tuning_step) {
- tuning_ctrl &= ~ESDHC_TUNING_STEP_MASK;
- tuning_ctrl |= imx_data->boarddata.tuning_step << ESDHC_TUNING_STEP_SHIFT;
- }
- writel(tuning_ctrl, host->ioaddr + ESDHC_TUNING_CTRL);
+ m |= ESDHC_MIX_CTRL_AUTO_TUNE_EN;
} else {
v &= ~ESDHC_MIX_CTRL_EXE_TUNE;
}
@@ -751,6 +752,7 @@ static void esdhc_post_tuning(struct sdhci_host *host)
reg = readl(host->ioaddr + ESDHC_MIX_CTRL);
reg &= ~ESDHC_MIX_CTRL_EXE_TUNE;
+ reg |= ESDHC_MIX_CTRL_AUTO_TUNE_EN;
writel(reg, host->ioaddr + ESDHC_MIX_CTRL);
}
@@ -838,6 +840,11 @@ static void esdhc_set_strobe_dll(struct sdhci_host *host)
u32 v;
if (host->mmc->actual_clock > ESDHC_STROBE_DLL_CLK_FREQ) {
+ /* disable clock before enabling strobe dll */
+ writel(readl(host->ioaddr + ESDHC_VENDOR_SPEC) &
+ ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON,
+ host->ioaddr + ESDHC_VENDOR_SPEC);
+
/* force a reset on strobe dll */
writel(ESDHC_STROBE_DLL_CTRL_RESET,
host->ioaddr + ESDHC_STROBE_DLL_CTRL);
@@ -899,6 +906,8 @@ static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
m |= ESDHC_MIX_CTRL_DDREN | ESDHC_MIX_CTRL_HS400_EN;
writel(m, host->ioaddr + ESDHC_MIX_CTRL);
imx_data->is_ddr = 1;
+ /* update clock after enable DDR for strobe DLL lock */
+ host->ops->set_clock(host, host->clock);
esdhc_set_strobe_dll(host);
break;
}
@@ -957,6 +966,62 @@ static const struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
.ops = &sdhci_esdhc_ops,
};
+static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
+ int tmp;
+
+ if (esdhc_is_usdhc(imx_data)) {
+ /*
+ * The imx6q ROM code will change the default watermark
+ * level setting to something insane. Change it back here.
+ */
+ writel(ESDHC_WTMK_DEFAULT_VAL, host->ioaddr + ESDHC_WTMK_LVL);
+
+ /*
+ * ROM code will change the bit burst_length_enable setting
+ * to zero if this usdhc is choosed to boot system. Change
+ * it back here, otherwise it will impact the performance a
+ * lot. This bit is used to enable/disable the burst length
+ * for the external AHB2AXI bridge, it's usefully especially
+ * for INCR transfer because without burst length indicator,
+ * the AHB2AXI bridge does not know the burst length in
+ * advance. And without burst length indicator, AHB INCR
+ * transfer can only be converted to singles on the AXI side.
+ */
+ writel(readl(host->ioaddr + SDHCI_HOST_CONTROL)
+ | ESDHC_BURST_LEN_EN_INCR,
+ host->ioaddr + SDHCI_HOST_CONTROL);
+ /*
+ * errata ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL
+ * TO1.1, it's harmless for MX6SL
+ */
+ writel(readl(host->ioaddr + 0x6c) | BIT(7),
+ host->ioaddr + 0x6c);
+
+ /* disable DLL_CTRL delay line settings */
+ writel(0x0, host->ioaddr + ESDHC_DLL_CTRL);
+
+ if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
+ tmp = readl(host->ioaddr + ESDHC_TUNING_CTRL);
+ tmp |= ESDHC_STD_TUNING_EN |
+ ESDHC_TUNING_START_TAP_DEFAULT;
+ if (imx_data->boarddata.tuning_start_tap) {
+ tmp &= ~ESDHC_TUNING_START_TAP_MASK;
+ tmp |= imx_data->boarddata.tuning_start_tap;
+ }
+
+ if (imx_data->boarddata.tuning_step) {
+ tmp &= ~ESDHC_TUNING_STEP_MASK;
+ tmp |= imx_data->boarddata.tuning_step
+ << ESDHC_TUNING_STEP_SHIFT;
+ }
+ writel(tmp, host->ioaddr + ESDHC_TUNING_CTRL);
+ }
+ }
+}
+
#ifdef CONFIG_OF
static int
sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
@@ -975,6 +1040,8 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
boarddata->wp_type = ESDHC_WP_GPIO;
of_property_read_u32(np, "fsl,tuning-step", &boarddata->tuning_step);
+ of_property_read_u32(np, "fsl,tuning-start-tap",
+ &boarddata->tuning_start_tap);
if (of_find_property(np, "no-1-8-v", NULL))
boarddata->support_vsel = false;
@@ -1147,58 +1214,27 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
if (IS_ERR(imx_data->pins_default))
dev_warn(mmc_dev(host->mmc), "could not get default state\n");
- host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
-
if (imx_data->socdata->flags & ESDHC_FLAG_ENGCM07207)
/* Fix errata ENGcm07207 present on i.MX25 and i.MX35 */
host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK
| SDHCI_QUIRK_BROKEN_ADMA;
- /*
- * The imx6q ROM code will change the default watermark level setting
- * to something insane. Change it back here.
- */
if (esdhc_is_usdhc(imx_data)) {
- writel(0x10401040, host->ioaddr + ESDHC_WTMK_LVL);
-
host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
host->mmc->caps |= MMC_CAP_1_8V_DDR;
-
- /*
- * ROM code will change the bit burst_length_enable setting
- * to zero if this usdhc is choosed to boot system. Change
- * it back here, otherwise it will impact the performance a
- * lot. This bit is used to enable/disable the burst length
- * for the external AHB2AXI bridge, it's usefully especially
- * for INCR transfer because without burst length indicator,
- * the AHB2AXI bridge does not know the burst length in
- * advance. And without burst length indicator, AHB INCR
- * transfer can only be converted to singles on the AXI side.
- */
- writel(readl(host->ioaddr + SDHCI_HOST_CONTROL)
- | ESDHC_BURST_LEN_EN_INCR,
- host->ioaddr + SDHCI_HOST_CONTROL);
-
if (!(imx_data->socdata->flags & ESDHC_FLAG_HS200))
host->quirks2 |= SDHCI_QUIRK2_BROKEN_HS200;
- /*
- * errata ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL
- * TO1.1, it's harmless for MX6SL
- */
- writel(readl(host->ioaddr + 0x6c) | BIT(7),
- host->ioaddr + 0x6c);
+ /* clear tuning bits in case ROM has set it already */
+ writel(0x0, host->ioaddr + ESDHC_MIX_CTRL);
+ writel(0x0, host->ioaddr + SDHCI_ACMD12_ERR);
+ writel(0x0, host->ioaddr + ESDHC_TUNE_CTRL_STATUS);
}
if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
sdhci_esdhc_ops.platform_execute_tuning =
esdhc_executing_tuning;
- if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING)
- writel(readl(host->ioaddr + ESDHC_TUNING_CTRL) |
- ESDHC_STD_TUNING_EN | ESDHC_TUNING_START_TAP,
- host->ioaddr + ESDHC_TUNING_CTRL);
-
if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536)
host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
@@ -1212,6 +1248,8 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
if (err)
goto disable_clk;
+ sdhci_esdhc_imx_hwinit(host);
+
err = sdhci_add_host(host);
if (err)
goto disable_clk;
@@ -1255,6 +1293,25 @@ static int sdhci_esdhc_imx_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int sdhci_esdhc_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+
+ return sdhci_suspend_host(host);
+}
+
+static int sdhci_esdhc_resume(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+
+ /* re-initialize hw state in case it's lost in low power mode */
+ sdhci_esdhc_imx_hwinit(host);
+
+ return sdhci_resume_host(host);
+}
+#endif
+
#ifdef CONFIG_PM
static int sdhci_esdhc_runtime_suspend(struct device *dev)
{
@@ -1291,7 +1348,7 @@ static int sdhci_esdhc_runtime_resume(struct device *dev)
#endif
static const struct dev_pm_ops sdhci_esdhc_pmops = {
- SET_SYSTEM_SLEEP_PM_OPS(sdhci_pltfm_suspend, sdhci_pltfm_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(sdhci_esdhc_suspend, sdhci_esdhc_resume)
SET_RUNTIME_PM_OPS(sdhci_esdhc_runtime_suspend,
sdhci_esdhc_runtime_resume, NULL)
};
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index 1110f73b0..726246665 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -164,8 +164,17 @@ static const struct sdhci_pltfm_data sdhci_iproc_pltfm_data = {
static const struct sdhci_iproc_data iproc_data = {
.pdata = &sdhci_iproc_pltfm_data,
- .caps = 0x05E90000,
- .caps1 = 0x00000064,
+ .caps = ((0x1 << SDHCI_MAX_BLOCK_SHIFT)
+ & SDHCI_MAX_BLOCK_MASK) |
+ SDHCI_CAN_VDD_330 |
+ SDHCI_CAN_VDD_180 |
+ SDHCI_CAN_DO_SUSPEND |
+ SDHCI_CAN_DO_HISPD |
+ SDHCI_CAN_DO_ADMA2 |
+ SDHCI_CAN_DO_SDMA,
+ .caps1 = SDHCI_DRIVER_TYPE_C |
+ SDHCI_DRIVER_TYPE_D |
+ SDHCI_SUPPORT_DDR50,
.mmc_caps = MMC_CAP_1_8V_DDR,
};
@@ -251,7 +260,7 @@ static struct platform_driver sdhci_iproc_driver = {
.driver = {
.name = "sdhci-iproc",
.of_match_table = sdhci_iproc_of_match,
- .pm = SDHCI_PLTFM_PMOPS,
+ .pm = &sdhci_pltfm_pmops,
},
.probe = sdhci_iproc_probe,
.remove = sdhci_pltfm_unregister,
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 0653fe730..8ef44a2a2 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -32,6 +32,21 @@
#define CORE_POWER 0x0
#define CORE_SW_RST BIT(7)
+#define CORE_PWRCTL_STATUS 0xdc
+#define CORE_PWRCTL_MASK 0xe0
+#define CORE_PWRCTL_CLEAR 0xe4
+#define CORE_PWRCTL_CTL 0xe8
+#define CORE_PWRCTL_BUS_OFF BIT(0)
+#define CORE_PWRCTL_BUS_ON BIT(1)
+#define CORE_PWRCTL_IO_LOW BIT(2)
+#define CORE_PWRCTL_IO_HIGH BIT(3)
+#define CORE_PWRCTL_BUS_SUCCESS BIT(0)
+#define CORE_PWRCTL_IO_SUCCESS BIT(2)
+#define REQ_BUS_OFF BIT(0)
+#define REQ_BUS_ON BIT(1)
+#define REQ_IO_LOW BIT(2)
+#define REQ_IO_HIGH BIT(3)
+#define INT_MASK 0xf
#define MAX_PHASES 16
#define CORE_DLL_LOCK BIT(7)
#define CORE_DLL_EN BIT(16)
@@ -56,6 +71,7 @@
struct sdhci_msm_host {
struct platform_device *pdev;
void __iomem *core_mem; /* MSM SDCC mapped address */
+ int pwr_irq; /* power irq */
struct clk *clk; /* main SD/MMC bus clock */
struct clk *pclk; /* SDHC peripheral bus clock */
struct clk *bus_clk; /* SDHC bus voter clock */
@@ -410,6 +426,85 @@ retry:
return rc;
}
+static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
+ unsigned int uhs)
+{
+ struct mmc_host *mmc = host->mmc;
+ u16 ctrl_2;
+
+ ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ /* Select Bus Speed Mode for host */
+ ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
+ switch (uhs) {
+ case MMC_TIMING_UHS_SDR12:
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
+ break;
+ case MMC_TIMING_UHS_SDR25:
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
+ break;
+ case MMC_TIMING_UHS_SDR50:
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
+ break;
+ case MMC_TIMING_MMC_HS200:
+ case MMC_TIMING_UHS_SDR104:
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
+ break;
+ case MMC_TIMING_UHS_DDR50:
+ case MMC_TIMING_MMC_DDR52:
+ ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
+ break;
+ }
+
+ /*
+ * When clock frequency is less than 100MHz, the feedback clock must be
+ * provided and DLL must not be used so that tuning can be skipped. To
+ * provide feedback clock, the mode selection can be any value less
+ * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
+ */
+ if (host->clock <= 100000000 &&
+ (uhs == MMC_TIMING_MMC_HS400 ||
+ uhs == MMC_TIMING_MMC_HS200 ||
+ uhs == MMC_TIMING_UHS_SDR104))
+ ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
+
+ dev_dbg(mmc_dev(mmc), "%s: clock=%u uhs=%u ctrl_2=0x%x\n",
+ mmc_hostname(host->mmc), host->clock, uhs, ctrl_2);
+ sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
+}
+
+static void sdhci_msm_voltage_switch(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ u32 irq_status, irq_ack = 0;
+
+ irq_status = readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS);
+ irq_status &= INT_MASK;
+
+ writel_relaxed(irq_status, msm_host->core_mem + CORE_PWRCTL_CLEAR);
+
+ if (irq_status & (CORE_PWRCTL_BUS_ON | CORE_PWRCTL_BUS_OFF))
+ irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
+ if (irq_status & (CORE_PWRCTL_IO_LOW | CORE_PWRCTL_IO_HIGH))
+ irq_ack |= CORE_PWRCTL_IO_SUCCESS;
+
+ /*
+ * The driver has to acknowledge the interrupt, switch voltages and
+ * report back if it succeded or not to this register. The voltage
+ * switches are handled by the sdhci core, so just report success.
+ */
+ writel_relaxed(irq_ack, msm_host->core_mem + CORE_PWRCTL_CTL);
+}
+
+static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
+{
+ struct sdhci_host *host = (struct sdhci_host *)data;
+
+ sdhci_msm_voltage_switch(host);
+
+ return IRQ_HANDLED;
+}
+
static const struct of_device_id sdhci_msm_dt_match[] = {
{ .compatible = "qcom,sdhci-msm-v4" },
{},
@@ -422,11 +517,13 @@ static const struct sdhci_ops sdhci_msm_ops = {
.reset = sdhci_reset,
.set_clock = sdhci_set_clock,
.set_bus_width = sdhci_set_bus_width,
- .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
+ .voltage_switch = sdhci_msm_voltage_switch,
};
static const struct sdhci_pltfm_data sdhci_msm_pdata = {
.quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
+ SDHCI_QUIRK_NO_CARD_NO_RESET |
SDHCI_QUIRK_SINGLE_POWER_WRITE,
.ops = &sdhci_msm_ops,
};
@@ -473,7 +570,7 @@ static int sdhci_msm_probe(struct platform_device *pdev)
msm_host->pclk = devm_clk_get(&pdev->dev, "iface");
if (IS_ERR(msm_host->pclk)) {
ret = PTR_ERR(msm_host->pclk);
- dev_err(&pdev->dev, "Perpheral clk setup failed (%d)\n", ret);
+ dev_err(&pdev->dev, "Peripheral clk setup failed (%d)\n", ret);
goto bus_clk_disable;
}
@@ -545,6 +642,22 @@ static int sdhci_msm_probe(struct platform_device *pdev)
CORE_VENDOR_SPEC_CAPABILITIES0);
}
+ /* Setup IRQ for handling power/voltage tasks with PMIC */
+ msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
+ if (msm_host->pwr_irq < 0) {
+ dev_err(&pdev->dev, "Get pwr_irq failed (%d)\n",
+ msm_host->pwr_irq);
+ goto clk_disable;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
+ sdhci_msm_pwr_irq, IRQF_ONESHOT,
+ dev_name(&pdev->dev), host);
+ if (ret) {
+ dev_err(&pdev->dev, "Request IRQ failed (%d)\n", ret);
+ goto clk_disable;
+ }
+
ret = sdhci_add_host(host);
if (ret)
goto clk_disable;
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index b6f4c1d41..e0f193f7e 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -19,27 +19,136 @@
* your option) any later version.
*/
+#include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/phy/phy.h>
+#include <linux/regmap.h>
#include "sdhci-pltfm.h"
#define SDHCI_ARASAN_CLK_CTRL_OFFSET 0x2c
+#define SDHCI_ARASAN_VENDOR_REGISTER 0x78
+#define VENDOR_ENHANCED_STROBE BIT(0)
#define CLK_CTRL_TIMEOUT_SHIFT 16
#define CLK_CTRL_TIMEOUT_MASK (0xf << CLK_CTRL_TIMEOUT_SHIFT)
#define CLK_CTRL_TIMEOUT_MIN_EXP 13
+/*
+ * On some SoCs the syscon area has a feature where the upper 16-bits of
+ * each 32-bit register act as a write mask for the lower 16-bits. This allows
+ * atomic updates of the register without locking. This macro is used on SoCs
+ * that have that feature.
+ */
+#define HIWORD_UPDATE(val, mask, shift) \
+ ((val) << (shift) | (mask) << ((shift) + 16))
+
+/**
+ * struct sdhci_arasan_soc_ctl_field - Field used in sdhci_arasan_soc_ctl_map
+ *
+ * @reg: Offset within the syscon of the register containing this field
+ * @width: Number of bits for this field
+ * @shift: Bit offset within @reg of this field (or -1 if not avail)
+ */
+struct sdhci_arasan_soc_ctl_field {
+ u32 reg;
+ u16 width;
+ s16 shift;
+};
+
+/**
+ * struct sdhci_arasan_soc_ctl_map - Map in syscon to corecfg registers
+ *
+ * It's up to the licensee of the Arsan IP block to make these available
+ * somewhere if needed. Presumably these will be scattered somewhere that's
+ * accessible via the syscon API.
+ *
+ * @baseclkfreq: Where to find corecfg_baseclkfreq
+ * @hiword_update: If true, use HIWORD_UPDATE to access the syscon
+ */
+struct sdhci_arasan_soc_ctl_map {
+ struct sdhci_arasan_soc_ctl_field baseclkfreq;
+ bool hiword_update;
+};
+
/**
* struct sdhci_arasan_data
- * @clk_ahb: Pointer to the AHB clock
- * @phy: Pointer to the generic phy
+ * @host: Pointer to the main SDHCI host structure.
+ * @clk_ahb: Pointer to the AHB clock
+ * @phy: Pointer to the generic phy
+ * @sdcardclk_hw: Struct for the clock we might provide to a PHY.
+ * @sdcardclk: Pointer to normal 'struct clock' for sdcardclk_hw.
+ * @soc_ctl_base: Pointer to regmap for syscon for soc_ctl registers.
+ * @soc_ctl_map: Map to get offsets into soc_ctl registers.
*/
struct sdhci_arasan_data {
+ struct sdhci_host *host;
struct clk *clk_ahb;
struct phy *phy;
+
+ struct clk_hw sdcardclk_hw;
+ struct clk *sdcardclk;
+
+ struct regmap *soc_ctl_base;
+ const struct sdhci_arasan_soc_ctl_map *soc_ctl_map;
+};
+
+static const struct sdhci_arasan_soc_ctl_map rk3399_soc_ctl_map = {
+ .baseclkfreq = { .reg = 0xf000, .width = 8, .shift = 8 },
+ .hiword_update = true,
};
+/**
+ * sdhci_arasan_syscon_write - Write to a field in soc_ctl registers
+ *
+ * This function allows writing to fields in sdhci_arasan_soc_ctl_map.
+ * Note that if a field is specified as not available (shift < 0) then
+ * this function will silently return an error code. It will be noisy
+ * and print errors for any other (unexpected) errors.
+ *
+ * @host: The sdhci_host
+ * @fld: The field to write to
+ * @val: The value to write
+ */
+static int sdhci_arasan_syscon_write(struct sdhci_host *host,
+ const struct sdhci_arasan_soc_ctl_field *fld,
+ u32 val)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
+ struct regmap *soc_ctl_base = sdhci_arasan->soc_ctl_base;
+ u32 reg = fld->reg;
+ u16 width = fld->width;
+ s16 shift = fld->shift;
+ int ret;
+
+ /*
+ * Silently return errors for shift < 0 so caller doesn't have
+ * to check for fields which are optional. For fields that
+ * are required then caller needs to do something special
+ * anyway.
+ */
+ if (shift < 0)
+ return -EINVAL;
+
+ if (sdhci_arasan->soc_ctl_map->hiword_update)
+ ret = regmap_write(soc_ctl_base, reg,
+ HIWORD_UPDATE(val, GENMASK(width, 0),
+ shift));
+ else
+ ret = regmap_update_bits(soc_ctl_base, reg,
+ GENMASK(shift + width, shift),
+ val << shift);
+
+ /* Yell about (unexpected) regmap errors */
+ if (ret)
+ pr_warn("%s: Regmap write fail: %d\n",
+ mmc_hostname(host->mmc), ret);
+
+ return ret;
+}
+
static unsigned int sdhci_arasan_get_timeout_clock(struct sdhci_host *host)
{
u32 div;
@@ -79,6 +188,21 @@ static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
}
}
+static void sdhci_arasan_hs400_enhanced_strobe(struct mmc_host *mmc,
+ struct mmc_ios *ios)
+{
+ u32 vendor;
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ vendor = readl(host->ioaddr + SDHCI_ARASAN_VENDOR_REGISTER);
+ if (ios->enhanced_strobe)
+ vendor |= VENDOR_ENHANCED_STROBE;
+ else
+ vendor &= ~VENDOR_ENHANCED_STROBE;
+
+ writel(vendor, host->ioaddr + SDHCI_ARASAN_VENDOR_REGISTER);
+}
+
static struct sdhci_ops sdhci_arasan_ops = {
.set_clock = sdhci_arasan_set_clock,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
@@ -172,9 +296,168 @@ static int sdhci_arasan_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(sdhci_arasan_dev_pm_ops, sdhci_arasan_suspend,
sdhci_arasan_resume);
+static const struct of_device_id sdhci_arasan_of_match[] = {
+ /* SoC-specific compatible strings w/ soc_ctl_map */
+ {
+ .compatible = "rockchip,rk3399-sdhci-5.1",
+ .data = &rk3399_soc_ctl_map,
+ },
+
+ /* Generic compatible below here */
+ { .compatible = "arasan,sdhci-8.9a" },
+ { .compatible = "arasan,sdhci-5.1" },
+ { .compatible = "arasan,sdhci-4.9a" },
+
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sdhci_arasan_of_match);
+
+/**
+ * sdhci_arasan_sdcardclk_recalc_rate - Return the card clock rate
+ *
+ * Return the current actual rate of the SD card clock. This can be used
+ * to communicate with out PHY.
+ *
+ * @hw: Pointer to the hardware clock structure.
+ * @parent_rate The parent rate (should be rate of clk_xin).
+ * Returns the card clock rate.
+ */
+static unsigned long sdhci_arasan_sdcardclk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+
+{
+ struct sdhci_arasan_data *sdhci_arasan =
+ container_of(hw, struct sdhci_arasan_data, sdcardclk_hw);
+ struct sdhci_host *host = sdhci_arasan->host;
+
+ return host->mmc->actual_clock;
+}
+
+static const struct clk_ops arasan_sdcardclk_ops = {
+ .recalc_rate = sdhci_arasan_sdcardclk_recalc_rate,
+};
+
+/**
+ * sdhci_arasan_update_baseclkfreq - Set corecfg_baseclkfreq
+ *
+ * The corecfg_baseclkfreq is supposed to contain the MHz of clk_xin. This
+ * function can be used to make that happen.
+ *
+ * NOTES:
+ * - Many existing devices don't seem to do this and work fine. To keep
+ * compatibility for old hardware where the device tree doesn't provide a
+ * register map, this function is a noop if a soc_ctl_map hasn't been provided
+ * for this platform.
+ * - It's assumed that clk_xin is not dynamic and that we use the SDHCI divider
+ * to achieve lower clock rates. That means that this function is called once
+ * at probe time and never called again.
+ *
+ * @host: The sdhci_host
+ */
+static void sdhci_arasan_update_baseclkfreq(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
+ const struct sdhci_arasan_soc_ctl_map *soc_ctl_map =
+ sdhci_arasan->soc_ctl_map;
+ u32 mhz = DIV_ROUND_CLOSEST(clk_get_rate(pltfm_host->clk), 1000000);
+
+ /* Having a map is optional */
+ if (!soc_ctl_map)
+ return;
+
+ /* If we have a map, we expect to have a syscon */
+ if (!sdhci_arasan->soc_ctl_base) {
+ pr_warn("%s: Have regmap, but no soc-ctl-syscon\n",
+ mmc_hostname(host->mmc));
+ return;
+ }
+
+ sdhci_arasan_syscon_write(host, &soc_ctl_map->baseclkfreq, mhz);
+}
+
+/**
+ * sdhci_arasan_register_sdclk - Register the sdclk for a PHY to use
+ *
+ * Some PHY devices need to know what the actual card clock is. In order for
+ * them to find out, we'll provide a clock through the common clock framework
+ * for them to query.
+ *
+ * Note: without seriously re-architecting SDHCI's clock code and testing on
+ * all platforms, there's no way to create a totally beautiful clock here
+ * with all clock ops implemented. Instead, we'll just create a clock that can
+ * be queried and set the CLK_GET_RATE_NOCACHE attribute to tell common clock
+ * framework that we're doing things behind its back. This should be sufficient
+ * to create nice clean device tree bindings and later (if needed) we can try
+ * re-architecting SDHCI if we see some benefit to it.
+ *
+ * @sdhci_arasan: Our private data structure.
+ * @clk_xin: Pointer to the functional clock
+ * @dev: Pointer to our struct device.
+ * Returns 0 on success and error value on error
+ */
+static int sdhci_arasan_register_sdclk(struct sdhci_arasan_data *sdhci_arasan,
+ struct clk *clk_xin,
+ struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct clk_init_data sdcardclk_init;
+ const char *parent_clk_name;
+ int ret;
+
+ /* Providing a clock to the PHY is optional; no error if missing */
+ if (!of_find_property(np, "#clock-cells", NULL))
+ return 0;
+
+ ret = of_property_read_string_index(np, "clock-output-names", 0,
+ &sdcardclk_init.name);
+ if (ret) {
+ dev_err(dev, "DT has #clock-cells but no clock-output-names\n");
+ return ret;
+ }
+
+ parent_clk_name = __clk_get_name(clk_xin);
+ sdcardclk_init.parent_names = &parent_clk_name;
+ sdcardclk_init.num_parents = 1;
+ sdcardclk_init.flags = CLK_GET_RATE_NOCACHE;
+ sdcardclk_init.ops = &arasan_sdcardclk_ops;
+
+ sdhci_arasan->sdcardclk_hw.init = &sdcardclk_init;
+ sdhci_arasan->sdcardclk =
+ devm_clk_register(dev, &sdhci_arasan->sdcardclk_hw);
+ sdhci_arasan->sdcardclk_hw.init = NULL;
+
+ ret = of_clk_add_provider(np, of_clk_src_simple_get,
+ sdhci_arasan->sdcardclk);
+ if (ret)
+ dev_err(dev, "Failed to add clock provider\n");
+
+ return ret;
+}
+
+/**
+ * sdhci_arasan_unregister_sdclk - Undoes sdhci_arasan_register_sdclk()
+ *
+ * Should be called any time we're exiting and sdhci_arasan_register_sdclk()
+ * returned success.
+ *
+ * @dev: Pointer to our struct device.
+ */
+static void sdhci_arasan_unregister_sdclk(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+
+ if (!of_find_property(np, "#clock-cells", NULL))
+ return;
+
+ of_clk_del_provider(dev->of_node);
+}
+
static int sdhci_arasan_probe(struct platform_device *pdev)
{
int ret;
+ const struct of_device_id *match;
+ struct device_node *node;
struct clk *clk_xin;
struct sdhci_host *host;
struct sdhci_pltfm_host *pltfm_host;
@@ -187,6 +470,24 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
pltfm_host = sdhci_priv(host);
sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
+ sdhci_arasan->host = host;
+
+ match = of_match_node(sdhci_arasan_of_match, pdev->dev.of_node);
+ sdhci_arasan->soc_ctl_map = match->data;
+
+ node = of_parse_phandle(pdev->dev.of_node, "arasan,soc-ctl-syscon", 0);
+ if (node) {
+ sdhci_arasan->soc_ctl_base = syscon_node_to_regmap(node);
+ of_node_put(node);
+
+ if (IS_ERR(sdhci_arasan->soc_ctl_base)) {
+ ret = PTR_ERR(sdhci_arasan->soc_ctl_base);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Can't get syscon: %d\n",
+ ret);
+ goto err_pltfm_free;
+ }
+ }
sdhci_arasan->clk_ahb = devm_clk_get(&pdev->dev, "clk_ahb");
if (IS_ERR(sdhci_arasan->clk_ahb)) {
@@ -217,10 +518,16 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
sdhci_get_of_property(pdev);
pltfm_host->clk = clk_xin;
+ sdhci_arasan_update_baseclkfreq(host);
+
+ ret = sdhci_arasan_register_sdclk(sdhci_arasan, clk_xin, &pdev->dev);
+ if (ret)
+ goto clk_disable_all;
+
ret = mmc_of_parse(host->mmc);
if (ret) {
dev_err(&pdev->dev, "parsing dt failed (%u)\n", ret);
- goto clk_disable_all;
+ goto unreg_clk;
}
sdhci_arasan->phy = ERR_PTR(-ENODEV);
@@ -231,13 +538,13 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
if (IS_ERR(sdhci_arasan->phy)) {
ret = PTR_ERR(sdhci_arasan->phy);
dev_err(&pdev->dev, "No phy for arasan,sdhci-5.1.\n");
- goto clk_disable_all;
+ goto unreg_clk;
}
ret = phy_init(sdhci_arasan->phy);
if (ret < 0) {
dev_err(&pdev->dev, "phy_init err.\n");
- goto clk_disable_all;
+ goto unreg_clk;
}
ret = phy_power_on(sdhci_arasan->phy);
@@ -245,6 +552,9 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "phy_power_on err.\n");
goto err_phy_power;
}
+
+ host->mmc_host_ops.hs400_enhanced_strobe =
+ sdhci_arasan_hs400_enhanced_strobe;
}
ret = sdhci_add_host(host);
@@ -259,6 +569,8 @@ err_add_host:
err_phy_power:
if (!IS_ERR(sdhci_arasan->phy))
phy_exit(sdhci_arasan->phy);
+unreg_clk:
+ sdhci_arasan_unregister_sdclk(&pdev->dev);
clk_disable_all:
clk_disable_unprepare(clk_xin);
clk_dis_ahb:
@@ -281,6 +593,8 @@ static int sdhci_arasan_remove(struct platform_device *pdev)
phy_exit(sdhci_arasan->phy);
}
+ sdhci_arasan_unregister_sdclk(&pdev->dev);
+
ret = sdhci_pltfm_unregister(pdev);
clk_disable_unprepare(clk_ahb);
@@ -288,14 +602,6 @@ static int sdhci_arasan_remove(struct platform_device *pdev)
return ret;
}
-static const struct of_device_id sdhci_arasan_of_match[] = {
- { .compatible = "arasan,sdhci-8.9a" },
- { .compatible = "arasan,sdhci-5.1" },
- { .compatible = "arasan,sdhci-4.9a" },
- { }
-};
-MODULE_DEVICE_TABLE(of, sdhci_arasan_of_match);
-
static struct platform_driver sdhci_arasan_driver = {
.driver = {
.name = "sdhci-arasan",
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index d4cef713d..a9b7fc06c 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -288,7 +288,7 @@ static int sdhci_at91_probe(struct platform_device *pdev)
* Disable SDHCI_QUIRK_BROKEN_CARD_DETECTION to be sure nobody tries
* to enable polling via device tree with broken-cd property.
*/
- if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE) &&
+ if (mmc_card_is_removable(host->mmc) &&
mmc_gpio_get_cd(host->mmc) < 0) {
host->mmc->caps |= MMC_CAP_NEEDS_POLL;
host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 3f34d354f..239be2fde 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -481,7 +481,7 @@ static void esdhc_reset(struct sdhci_host *host, u8 mask)
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static u32 esdhc_proctl;
static int esdhc_of_suspend(struct device *dev)
{
@@ -504,16 +504,12 @@ static int esdhc_of_resume(struct device *dev)
}
return ret;
}
-
-static const struct dev_pm_ops esdhc_pmops = {
- .suspend = esdhc_of_suspend,
- .resume = esdhc_of_resume,
-};
-#define ESDHC_PMOPS (&esdhc_pmops)
-#else
-#define ESDHC_PMOPS NULL
#endif
+static SIMPLE_DEV_PM_OPS(esdhc_of_dev_pm_ops,
+ esdhc_of_suspend,
+ esdhc_of_resume);
+
static const struct sdhci_ops sdhci_esdhc_be_ops = {
.read_l = esdhc_be_readl,
.read_w = esdhc_be_readw,
@@ -657,7 +653,7 @@ static struct platform_driver sdhci_esdhc_driver = {
.driver = {
.name = "sdhci-esdhc",
.of_match_table = sdhci_esdhc_of_match,
- .pm = ESDHC_PMOPS,
+ .pm = &esdhc_of_dev_pm_ops,
},
.probe = sdhci_esdhc_probe,
.remove = sdhci_pltfm_unregister,
diff --git a/drivers/mmc/host/sdhci-of-hlwd.c b/drivers/mmc/host/sdhci-of-hlwd.c
index 4079a96ad..ac00c5efb 100644
--- a/drivers/mmc/host/sdhci-of-hlwd.c
+++ b/drivers/mmc/host/sdhci-of-hlwd.c
@@ -85,7 +85,7 @@ static struct platform_driver sdhci_hlwd_driver = {
.driver = {
.name = "sdhci-hlwd",
.of_match_table = sdhci_hlwd_of_match,
- .pm = SDHCI_PLTFM_PMOPS,
+ .pm = &sdhci_pltfm_pmops,
},
.probe = sdhci_hlwd_probe,
.remove = sdhci_pltfm_unregister,
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index a4dbf7421..897cfd24c 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -419,13 +419,13 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
};
/* Define Host controllers for Intel Merrifield platform */
-#define INTEL_MRFL_EMMC_0 0
-#define INTEL_MRFL_EMMC_1 1
+#define INTEL_MRFLD_EMMC_0 0
+#define INTEL_MRFLD_EMMC_1 1
-static int intel_mrfl_mmc_probe_slot(struct sdhci_pci_slot *slot)
+static int intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot *slot)
{
- if ((PCI_FUNC(slot->chip->pdev->devfn) != INTEL_MRFL_EMMC_0) &&
- (PCI_FUNC(slot->chip->pdev->devfn) != INTEL_MRFL_EMMC_1))
+ if ((PCI_FUNC(slot->chip->pdev->devfn) != INTEL_MRFLD_EMMC_0) &&
+ (PCI_FUNC(slot->chip->pdev->devfn) != INTEL_MRFLD_EMMC_1))
/* SD support is not ready yet */
return -ENODEV;
@@ -435,12 +435,12 @@ static int intel_mrfl_mmc_probe_slot(struct sdhci_pci_slot *slot)
return 0;
}
-static const struct sdhci_pci_fixes sdhci_intel_mrfl_mmc = {
+static const struct sdhci_pci_fixes sdhci_intel_mrfld_mmc = {
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.quirks2 = SDHCI_QUIRK2_BROKEN_HS200 |
SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
.allow_runtime_pm = true,
- .probe_slot = intel_mrfl_mmc_probe_slot,
+ .probe_slot = intel_mrfld_mmc_probe_slot,
};
/* O2Micro extra registers */
@@ -1104,10 +1104,10 @@ static const struct pci_device_id pci_ids[] = {
{
.vendor = PCI_VENDOR_ID_INTEL,
- .device = PCI_DEVICE_ID_INTEL_MRFL_MMC,
+ .device = PCI_DEVICE_ID_INTEL_MRFLD_MMC,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
- .driver_data = (kernel_ulong_t)&sdhci_intel_mrfl_mmc,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mrfld_mmc,
},
{
@@ -1413,8 +1413,7 @@ static const struct sdhci_ops sdhci_pci_ops = {
* *
\*****************************************************************************/
-#ifdef CONFIG_PM
-
+#ifdef CONFIG_PM_SLEEP
static int sdhci_pci_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -1496,7 +1495,9 @@ static int sdhci_pci_resume(struct device *dev)
return 0;
}
+#endif
+#ifdef CONFIG_PM
static int sdhci_pci_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -1562,17 +1563,10 @@ static int sdhci_pci_runtime_resume(struct device *dev)
return 0;
}
-
-#else /* CONFIG_PM */
-
-#define sdhci_pci_suspend NULL
-#define sdhci_pci_resume NULL
-
-#endif /* CONFIG_PM */
+#endif
static const struct dev_pm_ops sdhci_pci_pm_ops = {
- .suspend = sdhci_pci_suspend,
- .resume = sdhci_pci_resume,
+ SET_SYSTEM_SLEEP_PM_OPS(sdhci_pci_suspend, sdhci_pci_resume)
SET_RUNTIME_PM_OPS(sdhci_pci_runtime_suspend,
sdhci_pci_runtime_resume, NULL)
};
@@ -1760,11 +1754,12 @@ static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot)
static void sdhci_pci_runtime_pm_allow(struct device *dev)
{
- pm_runtime_put_noidle(dev);
- pm_runtime_allow(dev);
+ pm_suspend_ignore_children(dev, 1);
pm_runtime_set_autosuspend_delay(dev, 50);
pm_runtime_use_autosuspend(dev);
- pm_suspend_ignore_children(dev, 1);
+ pm_runtime_allow(dev);
+ /* Stay active until mmc core scans for a card */
+ pm_runtime_put_noidle(dev);
}
static void sdhci_pci_runtime_pm_forbid(struct device *dev)
@@ -1810,15 +1805,13 @@ static int sdhci_pci_probe(struct pci_dev *pdev,
return -ENODEV;
}
- ret = pci_enable_device(pdev);
+ ret = pcim_enable_device(pdev);
if (ret)
return ret;
- chip = kzalloc(sizeof(struct sdhci_pci_chip), GFP_KERNEL);
- if (!chip) {
- ret = -ENOMEM;
- goto err;
- }
+ chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
chip->pdev = pdev;
chip->fixes = (const struct sdhci_pci_fixes *)ent->driver_data;
@@ -1834,7 +1827,7 @@ static int sdhci_pci_probe(struct pci_dev *pdev,
if (chip->fixes && chip->fixes->probe) {
ret = chip->fixes->probe(chip);
if (ret)
- goto free;
+ return ret;
}
slots = chip->num_slots; /* Quirk may have changed this */
@@ -1844,8 +1837,7 @@ static int sdhci_pci_probe(struct pci_dev *pdev,
if (IS_ERR(slot)) {
for (i--; i >= 0; i--)
sdhci_pci_remove_slot(chip->slots[i]);
- ret = PTR_ERR(slot);
- goto free;
+ return PTR_ERR(slot);
}
chip->slots[i] = slot;
@@ -1855,35 +1847,18 @@ static int sdhci_pci_probe(struct pci_dev *pdev,
sdhci_pci_runtime_pm_allow(&pdev->dev);
return 0;
-
-free:
- pci_set_drvdata(pdev, NULL);
- kfree(chip);
-
-err:
- pci_disable_device(pdev);
- return ret;
}
static void sdhci_pci_remove(struct pci_dev *pdev)
{
int i;
- struct sdhci_pci_chip *chip;
+ struct sdhci_pci_chip *chip = pci_get_drvdata(pdev);
- chip = pci_get_drvdata(pdev);
-
- if (chip) {
- if (chip->allow_runtime_pm)
- sdhci_pci_runtime_pm_forbid(&pdev->dev);
-
- for (i = 0; i < chip->num_slots; i++)
- sdhci_pci_remove_slot(chip->slots[i]);
-
- pci_set_drvdata(pdev, NULL);
- kfree(chip);
- }
+ if (chip->allow_runtime_pm)
+ sdhci_pci_runtime_pm_forbid(&pdev->dev);
- pci_disable_device(pdev);
+ for (i = 0; i < chip->num_slots; i++)
+ sdhci_pci_remove_slot(chip->slots[i]);
}
static struct pci_driver sdhci_driver = {
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 89e715168..7e0788712 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -14,7 +14,7 @@
#define PCI_DEVICE_ID_INTEL_BSW_EMMC 0x2294
#define PCI_DEVICE_ID_INTEL_BSW_SDIO 0x2295
#define PCI_DEVICE_ID_INTEL_BSW_SD 0x2296
-#define PCI_DEVICE_ID_INTEL_MRFL_MMC 0x1190
+#define PCI_DEVICE_ID_INTEL_MRFLD_MMC 0x1190
#define PCI_DEVICE_ID_INTEL_CLV_SDIO0 0x08f9
#define PCI_DEVICE_ID_INTEL_CLV_SDIO1 0x08fa
#define PCI_DEVICE_ID_INTEL_CLV_SDIO2 0x08fb
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index 64f287a03..1d17dcfc3 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -215,29 +215,26 @@ int sdhci_pltfm_unregister(struct platform_device *pdev)
}
EXPORT_SYMBOL_GPL(sdhci_pltfm_unregister);
-#ifdef CONFIG_PM
-int sdhci_pltfm_suspend(struct device *dev)
+#ifdef CONFIG_PM_SLEEP
+static int sdhci_pltfm_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
return sdhci_suspend_host(host);
}
-EXPORT_SYMBOL_GPL(sdhci_pltfm_suspend);
-int sdhci_pltfm_resume(struct device *dev)
+static int sdhci_pltfm_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
return sdhci_resume_host(host);
}
-EXPORT_SYMBOL_GPL(sdhci_pltfm_resume);
+#endif
const struct dev_pm_ops sdhci_pltfm_pmops = {
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
+ SET_SYSTEM_SLEEP_PM_OPS(sdhci_pltfm_suspend, sdhci_pltfm_resume)
};
EXPORT_SYMBOL_GPL(sdhci_pltfm_pmops);
-#endif /* CONFIG_PM */
static int __init sdhci_pltfm_drv_init(void)
{
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h
index d38053bf9..3280f2077 100644
--- a/drivers/mmc/host/sdhci-pltfm.h
+++ b/drivers/mmc/host/sdhci-pltfm.h
@@ -109,13 +109,6 @@ static inline void *sdhci_pltfm_priv(struct sdhci_pltfm_host *host)
return (void *)host->private;
}
-#ifdef CONFIG_PM
-extern int sdhci_pltfm_suspend(struct device *dev);
-extern int sdhci_pltfm_resume(struct device *dev);
extern const struct dev_pm_ops sdhci_pltfm_pmops;
-#define SDHCI_PLTFM_PMOPS (&sdhci_pltfm_pmops)
-#else
-#define SDHCI_PLTFM_PMOPS NULL
-#endif
#endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */
diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c
index 1d8dd3540..347eae2d7 100644
--- a/drivers/mmc/host/sdhci-pxav2.c
+++ b/drivers/mmc/host/sdhci-pxav2.c
@@ -252,7 +252,7 @@ static struct platform_driver sdhci_pxav2_driver = {
.driver = {
.name = "sdhci-pxav2",
.of_match_table = of_match_ptr(sdhci_pxav2_of_match),
- .pm = SDHCI_PLTFM_PMOPS,
+ .pm = &sdhci_pltfm_pmops,
},
.probe = sdhci_pxav2_probe,
.remove = sdhci_pxav2_remove,
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index 30132500a..dd1938d34 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -583,24 +583,17 @@ static int sdhci_pxav3_runtime_resume(struct device *dev)
}
#endif
-#ifdef CONFIG_PM
static const struct dev_pm_ops sdhci_pxav3_pmops = {
SET_SYSTEM_SLEEP_PM_OPS(sdhci_pxav3_suspend, sdhci_pxav3_resume)
SET_RUNTIME_PM_OPS(sdhci_pxav3_runtime_suspend,
sdhci_pxav3_runtime_resume, NULL)
};
-#define SDHCI_PXAV3_PMOPS (&sdhci_pxav3_pmops)
-
-#else
-#define SDHCI_PXAV3_PMOPS NULL
-#endif
-
static struct platform_driver sdhci_pxav3_driver = {
.driver = {
.name = "sdhci-pxav3",
.of_match_table = of_match_ptr(sdhci_pxav3_of_match),
- .pm = SDHCI_PXAV3_PMOPS,
+ .pm = &sdhci_pxav3_pmops,
},
.probe = sdhci_pxav3_probe,
.remove = sdhci_pxav3_remove,
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 70c724bc6..784c5a848 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -714,19 +714,12 @@ static int sdhci_s3c_runtime_resume(struct device *dev)
}
#endif
-#ifdef CONFIG_PM
static const struct dev_pm_ops sdhci_s3c_pmops = {
SET_SYSTEM_SLEEP_PM_OPS(sdhci_s3c_suspend, sdhci_s3c_resume)
SET_RUNTIME_PM_OPS(sdhci_s3c_runtime_suspend, sdhci_s3c_runtime_resume,
NULL)
};
-#define SDHCI_S3C_PMOPS (&sdhci_s3c_pmops)
-
-#else
-#define SDHCI_S3C_PMOPS NULL
-#endif
-
#if defined(CONFIG_CPU_EXYNOS4210) || defined(CONFIG_SOC_EXYNOS4212)
static struct sdhci_s3c_drv_data exynos4_sdhci_drv_data = {
.no_divider = true,
@@ -765,7 +758,7 @@ static struct platform_driver sdhci_s3c_driver = {
.driver = {
.name = "s3c-sdhci",
.of_match_table = of_match_ptr(sdhci_s3c_dt_match),
- .pm = SDHCI_S3C_PMOPS,
+ .pm = &sdhci_s3c_pmops,
},
};
diff --git a/drivers/mmc/host/sdhci-sirf.c b/drivers/mmc/host/sdhci-sirf.c
index 34866f668..5d068639d 100644
--- a/drivers/mmc/host/sdhci-sirf.c
+++ b/drivers/mmc/host/sdhci-sirf.c
@@ -260,9 +260,9 @@ static int sdhci_sirf_resume(struct device *dev)
return sdhci_resume_host(host);
}
+#endif
static SIMPLE_DEV_PM_OPS(sdhci_sirf_pm_ops, sdhci_sirf_suspend, sdhci_sirf_resume);
-#endif
static const struct of_device_id sdhci_sirf_of_match[] = {
{ .compatible = "sirf,prima2-sdhc" },
@@ -274,9 +274,7 @@ static struct platform_driver sdhci_sirf_driver = {
.driver = {
.name = "sdhci-sirf",
.of_match_table = sdhci_sirf_of_match,
-#ifdef CONFIG_PM_SLEEP
.pm = &sdhci_sirf_pm_ops,
-#endif
},
.probe = sdhci_sirf_probe,
.remove = sdhci_pltfm_unregister,
diff --git a/drivers/mmc/host/sdhci-st.c b/drivers/mmc/host/sdhci-st.c
index b7eaecfdd..ed92ce729 100644
--- a/drivers/mmc/host/sdhci-st.c
+++ b/drivers/mmc/host/sdhci-st.c
@@ -184,7 +184,7 @@ static void st_mmcss_cconfig(struct device_node *np, struct sdhci_host *host)
writel_relaxed(cconf2, host->ioaddr + ST_MMC_CCONFIG_REG_2);
- if (mhost->caps & MMC_CAP_NONREMOVABLE)
+ if (!mmc_card_is_removable(mhost))
cconf3 |= ST_MMC_CCONFIG_EMMC_SLOT_TYPE;
else
/* CARD _D ET_CTRL */
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index bcc0de47f..1e93dc4e3 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -148,28 +148,37 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
return;
misc_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
- /* Erratum: Enable SDHCI spec v3.00 support */
- if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300)
- misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300;
- /* Advertise UHS modes as supported by host */
- if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
- misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR50;
- else
- misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_SDR50;
- if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
- misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
- else
- misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_DDR50;
- if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
- misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
- else
- misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_SDR104;
- sdhci_writel(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL);
-
clk_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
+
+ misc_ctrl &= ~(SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 |
+ SDHCI_MISC_CTRL_ENABLE_SDR50 |
+ SDHCI_MISC_CTRL_ENABLE_DDR50 |
+ SDHCI_MISC_CTRL_ENABLE_SDR104);
+
clk_ctrl &= ~SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE;
- if (soc_data->nvquirks & SDHCI_MISC_CTRL_ENABLE_SDR50)
- clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE;
+
+ /*
+ * If the board does not define a regulator for the SDHCI
+ * IO voltage, then don't advertise support for UHS modes
+ * even if the device supports it because the IO voltage
+ * cannot be configured.
+ */
+ if (!IS_ERR(host->mmc->supply.vqmmc)) {
+ /* Erratum: Enable SDHCI spec v3.00 support */
+ if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300)
+ misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300;
+ /* Advertise UHS modes as supported by host */
+ if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
+ misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR50;
+ if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
+ misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
+ if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
+ misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
+ if (soc_data->nvquirks & SDHCI_MISC_CTRL_ENABLE_SDR50)
+ clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE;
+ }
+
+ sdhci_writel(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL);
sdhci_writel(host, clk_ctrl, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
@@ -474,7 +483,7 @@ static struct platform_driver sdhci_tegra_driver = {
.driver = {
.name = "sdhci-tegra",
.of_match_table = sdhci_tegra_dt_match,
- .pm = SDHCI_PLTFM_PMOPS,
+ .pm = &sdhci_pltfm_pmops,
},
.probe = sdhci_tegra_probe,
.remove = sdhci_pltfm_unregister,
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 0e3d7c056..cd65d474a 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -45,65 +45,62 @@ static unsigned int debug_quirks2;
static void sdhci_finish_data(struct sdhci_host *);
-static void sdhci_finish_command(struct sdhci_host *);
-static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
-static int sdhci_get_cd(struct mmc_host *mmc);
static void sdhci_dumpregs(struct sdhci_host *host)
{
- pr_debug(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
- mmc_hostname(host->mmc));
-
- pr_debug(DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n",
- sdhci_readl(host, SDHCI_DMA_ADDRESS),
- sdhci_readw(host, SDHCI_HOST_VERSION));
- pr_debug(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
- sdhci_readw(host, SDHCI_BLOCK_SIZE),
- sdhci_readw(host, SDHCI_BLOCK_COUNT));
- pr_debug(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
- sdhci_readl(host, SDHCI_ARGUMENT),
- sdhci_readw(host, SDHCI_TRANSFER_MODE));
- pr_debug(DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n",
- sdhci_readl(host, SDHCI_PRESENT_STATE),
- sdhci_readb(host, SDHCI_HOST_CONTROL));
- pr_debug(DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n",
- sdhci_readb(host, SDHCI_POWER_CONTROL),
- sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
- pr_debug(DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n",
- sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
- sdhci_readw(host, SDHCI_CLOCK_CONTROL));
- pr_debug(DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n",
- sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
- sdhci_readl(host, SDHCI_INT_STATUS));
- pr_debug(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
- sdhci_readl(host, SDHCI_INT_ENABLE),
- sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
- pr_debug(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
- sdhci_readw(host, SDHCI_ACMD12_ERR),
- sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
- pr_debug(DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n",
- sdhci_readl(host, SDHCI_CAPABILITIES),
- sdhci_readl(host, SDHCI_CAPABILITIES_1));
- pr_debug(DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n",
- sdhci_readw(host, SDHCI_COMMAND),
- sdhci_readl(host, SDHCI_MAX_CURRENT));
- pr_debug(DRIVER_NAME ": Host ctl2: 0x%08x\n",
- sdhci_readw(host, SDHCI_HOST_CONTROL2));
+ pr_err(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
+ mmc_hostname(host->mmc));
+
+ pr_err(DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n",
+ sdhci_readl(host, SDHCI_DMA_ADDRESS),
+ sdhci_readw(host, SDHCI_HOST_VERSION));
+ pr_err(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
+ sdhci_readw(host, SDHCI_BLOCK_SIZE),
+ sdhci_readw(host, SDHCI_BLOCK_COUNT));
+ pr_err(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
+ sdhci_readl(host, SDHCI_ARGUMENT),
+ sdhci_readw(host, SDHCI_TRANSFER_MODE));
+ pr_err(DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n",
+ sdhci_readl(host, SDHCI_PRESENT_STATE),
+ sdhci_readb(host, SDHCI_HOST_CONTROL));
+ pr_err(DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n",
+ sdhci_readb(host, SDHCI_POWER_CONTROL),
+ sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
+ pr_err(DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n",
+ sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
+ sdhci_readw(host, SDHCI_CLOCK_CONTROL));
+ pr_err(DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n",
+ sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
+ sdhci_readl(host, SDHCI_INT_STATUS));
+ pr_err(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
+ sdhci_readl(host, SDHCI_INT_ENABLE),
+ sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
+ pr_err(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
+ sdhci_readw(host, SDHCI_ACMD12_ERR),
+ sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
+ pr_err(DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n",
+ sdhci_readl(host, SDHCI_CAPABILITIES),
+ sdhci_readl(host, SDHCI_CAPABILITIES_1));
+ pr_err(DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n",
+ sdhci_readw(host, SDHCI_COMMAND),
+ sdhci_readl(host, SDHCI_MAX_CURRENT));
+ pr_err(DRIVER_NAME ": Host ctl2: 0x%08x\n",
+ sdhci_readw(host, SDHCI_HOST_CONTROL2));
if (host->flags & SDHCI_USE_ADMA) {
if (host->flags & SDHCI_USE_64_BIT_DMA)
- pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
- readl(host->ioaddr + SDHCI_ADMA_ERROR),
- readl(host->ioaddr + SDHCI_ADMA_ADDRESS_HI),
- readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
+ pr_err(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
+ readl(host->ioaddr + SDHCI_ADMA_ERROR),
+ readl(host->ioaddr + SDHCI_ADMA_ADDRESS_HI),
+ readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
else
- pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
- readl(host->ioaddr + SDHCI_ADMA_ERROR),
- readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
+ pr_err(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
+ readl(host->ioaddr + SDHCI_ADMA_ERROR),
+ readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
}
- pr_debug(DRIVER_NAME ": ===========================================\n");
+ pr_err(DRIVER_NAME ": ===========================================\n");
}
/*****************************************************************************\
@@ -112,12 +109,17 @@ static void sdhci_dumpregs(struct sdhci_host *host)
* *
\*****************************************************************************/
+static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
+{
+ return cmd->data || cmd->flags & MMC_RSP_BUSY;
+}
+
static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
{
u32 present;
if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
- (host->mmc->caps & MMC_CAP_NONREMOVABLE))
+ !mmc_card_is_removable(host->mmc))
return;
if (enable) {
@@ -193,7 +195,9 @@ EXPORT_SYMBOL_GPL(sdhci_reset);
static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
{
if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
- if (!sdhci_get_cd(host->mmc))
+ struct mmc_host *mmc = host->mmc;
+
+ if (!mmc->ops->get_cd(mmc))
return;
}
@@ -210,10 +214,10 @@ static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
}
}
-static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios);
-
static void sdhci_init(struct sdhci_host *host, int soft)
{
+ struct mmc_host *mmc = host->mmc;
+
if (soft)
sdhci_do_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
else
@@ -225,13 +229,17 @@ static void sdhci_init(struct sdhci_host *host, int soft)
SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
SDHCI_INT_RESPONSE;
+ if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
+ host->tuning_mode == SDHCI_TUNING_MODE_3)
+ host->ier |= SDHCI_INT_RETUNE;
+
sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
if (soft) {
/* force clock reconfiguration */
host->clock = 0;
- sdhci_set_ios(host->mmc, &host->mmc->ios);
+ mmc->ops->set_ios(mmc, &mmc->ios);
}
}
@@ -429,8 +437,6 @@ static void sdhci_transfer_pio(struct sdhci_host *host)
{
u32 mask;
- BUG_ON(!host->data);
-
if (host->blocks == 0)
return;
@@ -747,14 +753,14 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
u8 ctrl;
struct mmc_data *data = cmd->data;
- WARN_ON(host->data);
-
- if (data || (cmd->flags & MMC_RSP_BUSY))
+ if (sdhci_data_line_cmd(cmd))
sdhci_set_timeout(host, cmd);
if (!data)
return;
+ WARN_ON(host->data);
+
/* Sanity checks */
BUG_ON(data->blksz * data->blocks > 524288);
BUG_ON(data->blksz > host->mmc->max_blk_size);
@@ -879,6 +885,12 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
}
+static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
+ struct mmc_request *mrq)
+{
+ return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12);
+}
+
static void sdhci_set_transfer_mode(struct sdhci_host *host,
struct mmc_command *cmd)
{
@@ -909,12 +921,12 @@ static void sdhci_set_transfer_mode(struct sdhci_host *host,
* If we are sending CMD23, CMD12 never gets sent
* on successful completion (so no Auto-CMD12).
*/
- if (!host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
+ if (sdhci_auto_cmd12(host, cmd->mrq) &&
(cmd->opcode != SD_IO_RW_EXTENDED))
mode |= SDHCI_TRNS_AUTO_CMD12;
- else if (host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
+ else if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
mode |= SDHCI_TRNS_AUTO_CMD23;
- sdhci_writel(host, host->mrq->sbc->arg, SDHCI_ARGUMENT2);
+ sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
}
}
@@ -926,14 +938,63 @@ static void sdhci_set_transfer_mode(struct sdhci_host *host,
sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
}
-static void sdhci_finish_data(struct sdhci_host *host)
+static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
+{
+ return (!(host->flags & SDHCI_DEVICE_DEAD) &&
+ ((mrq->cmd && mrq->cmd->error) ||
+ (mrq->sbc && mrq->sbc->error) ||
+ (mrq->data && ((mrq->data->error && !mrq->data->stop) ||
+ (mrq->data->stop && mrq->data->stop->error))) ||
+ (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
+}
+
+static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
+{
+ int i;
+
+ for (i = 0; i < SDHCI_MAX_MRQS; i++) {
+ if (host->mrqs_done[i] == mrq) {
+ WARN_ON(1);
+ return;
+ }
+ }
+
+ for (i = 0; i < SDHCI_MAX_MRQS; i++) {
+ if (!host->mrqs_done[i]) {
+ host->mrqs_done[i] = mrq;
+ break;
+ }
+ }
+
+ WARN_ON(i >= SDHCI_MAX_MRQS);
+
+ tasklet_schedule(&host->finish_tasklet);
+}
+
+static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
{
- struct mmc_data *data;
+ if (host->cmd && host->cmd->mrq == mrq)
+ host->cmd = NULL;
+
+ if (host->data_cmd && host->data_cmd->mrq == mrq)
+ host->data_cmd = NULL;
+
+ if (host->data && host->data->mrq == mrq)
+ host->data = NULL;
- BUG_ON(!host->data);
+ if (sdhci_needs_reset(host, mrq))
+ host->pending_reset = true;
+
+ __sdhci_finish_mrq(host, mrq);
+}
+
+static void sdhci_finish_data(struct sdhci_host *host)
+{
+ struct mmc_command *data_cmd = host->data_cmd;
+ struct mmc_data *data = host->data;
- data = host->data;
host->data = NULL;
+ host->data_cmd = NULL;
if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
(SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
@@ -958,20 +1019,41 @@ static void sdhci_finish_data(struct sdhci_host *host)
*/
if (data->stop &&
(data->error ||
- !host->mrq->sbc)) {
+ !data->mrq->sbc)) {
/*
* The controller needs a reset of internal state machines
* upon error conditions.
*/
if (data->error) {
- sdhci_do_reset(host, SDHCI_RESET_CMD);
+ if (!host->cmd || host->cmd == data_cmd)
+ sdhci_do_reset(host, SDHCI_RESET_CMD);
sdhci_do_reset(host, SDHCI_RESET_DATA);
}
+ /* Avoid triggering warning in sdhci_send_command() */
+ host->cmd = NULL;
sdhci_send_command(host, data->stop);
- } else
- tasklet_schedule(&host->finish_tasklet);
+ } else {
+ sdhci_finish_mrq(host, data->mrq);
+ }
+}
+
+static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
+ unsigned long timeout)
+{
+ if (sdhci_data_line_cmd(mrq->cmd))
+ mod_timer(&host->data_timer, timeout);
+ else
+ mod_timer(&host->timer, timeout);
+}
+
+static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
+{
+ if (sdhci_data_line_cmd(mrq->cmd))
+ del_timer(&host->data_timer);
+ else
+ del_timer(&host->timer);
}
void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
@@ -989,12 +1071,12 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
timeout = 10;
mask = SDHCI_CMD_INHIBIT;
- if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
+ if (sdhci_data_line_cmd(cmd))
mask |= SDHCI_DATA_INHIBIT;
/* We shouldn't wait for data inihibit for stop commands, even
though they might use busy signaling */
- if (host->mrq->data && (cmd == host->mrq->data->stop))
+ if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
mask &= ~SDHCI_DATA_INHIBIT;
while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
@@ -1003,7 +1085,7 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
mmc_hostname(host->mmc));
sdhci_dumpregs(host);
cmd->error = -EIO;
- tasklet_schedule(&host->finish_tasklet);
+ sdhci_finish_mrq(host, cmd->mrq);
return;
}
timeout--;
@@ -1015,10 +1097,13 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
else
timeout += 10 * HZ;
- mod_timer(&host->timer, timeout);
+ sdhci_mod_timer(host, cmd->mrq, timeout);
host->cmd = cmd;
- host->busy_handle = 0;
+ if (sdhci_data_line_cmd(cmd)) {
+ WARN_ON(host->data_cmd);
+ host->data_cmd = cmd;
+ }
sdhci_prepare_data(host, cmd);
@@ -1030,7 +1115,7 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
pr_err("%s: Unsupported response type!\n",
mmc_hostname(host->mmc));
cmd->error = -EINVAL;
- tasklet_schedule(&host->finish_tasklet);
+ sdhci_finish_mrq(host, cmd->mrq);
return;
}
@@ -1059,40 +1144,58 @@ EXPORT_SYMBOL_GPL(sdhci_send_command);
static void sdhci_finish_command(struct sdhci_host *host)
{
+ struct mmc_command *cmd = host->cmd;
int i;
- BUG_ON(host->cmd == NULL);
+ host->cmd = NULL;
- if (host->cmd->flags & MMC_RSP_PRESENT) {
- if (host->cmd->flags & MMC_RSP_136) {
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ if (cmd->flags & MMC_RSP_136) {
/* CRC is stripped so we need to do some shifting. */
for (i = 0;i < 4;i++) {
- host->cmd->resp[i] = sdhci_readl(host,
+ cmd->resp[i] = sdhci_readl(host,
SDHCI_RESPONSE + (3-i)*4) << 8;
if (i != 3)
- host->cmd->resp[i] |=
+ cmd->resp[i] |=
sdhci_readb(host,
SDHCI_RESPONSE + (3-i)*4-1);
}
} else {
- host->cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
+ cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
+ }
+ }
+
+ /*
+ * The host can send and interrupt when the busy state has
+ * ended, allowing us to wait without wasting CPU cycles.
+ * The busy signal uses DAT0 so this is similar to waiting
+ * for data to complete.
+ *
+ * Note: The 1.0 specification is a bit ambiguous about this
+ * feature so there might be some problems with older
+ * controllers.
+ */
+ if (cmd->flags & MMC_RSP_BUSY) {
+ if (cmd->data) {
+ DBG("Cannot wait for busy signal when also doing a data transfer");
+ } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
+ cmd == host->data_cmd) {
+ /* Command complete before busy is ended */
+ return;
}
}
/* Finished CMD23, now send actual command. */
- if (host->cmd == host->mrq->sbc) {
- host->cmd = NULL;
- sdhci_send_command(host, host->mrq->cmd);
+ if (cmd == cmd->mrq->sbc) {
+ sdhci_send_command(host, cmd->mrq->cmd);
} else {
/* Processed actual command. */
if (host->data && host->data_early)
sdhci_finish_data(host);
- if (!host->cmd->data)
- tasklet_schedule(&host->finish_tasklet);
-
- host->cmd = NULL;
+ if (!cmd->data)
+ sdhci_finish_mrq(host, cmd->mrq);
}
}
@@ -1373,26 +1476,22 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
spin_lock_irqsave(&host->lock, flags);
- WARN_ON(host->mrq != NULL);
-
sdhci_led_activate(host);
/*
* Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
* requests if Auto-CMD12 is enabled.
*/
- if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
+ if (sdhci_auto_cmd12(host, mrq)) {
if (mrq->stop) {
mrq->data->stop = NULL;
mrq->stop = NULL;
}
}
- host->mrq = mrq;
-
if (!present || host->flags & SDHCI_DEVICE_DEAD) {
- host->mrq->cmd->error = -ENOMEDIUM;
- tasklet_schedule(&host->finish_tasklet);
+ mrq->cmd->error = -ENOMEDIUM;
+ sdhci_finish_mrq(host, mrq);
} else {
if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
sdhci_send_command(host, mrq->sbc);
@@ -1617,7 +1716,7 @@ static int sdhci_get_cd(struct mmc_host *mmc)
return 0;
/* If nonremovable, assume that the card is always present. */
- if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
+ if (!mmc_card_is_removable(host->mmc))
return 1;
/*
@@ -1733,13 +1832,14 @@ static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
switch (ios->signal_voltage) {
case MMC_SIGNAL_VOLTAGE_330:
+ if (!(host->flags & SDHCI_SIGNALING_330))
+ return -EINVAL;
/* Set 1.8V Signal Enable in the Host Control2 register to 0 */
ctrl &= ~SDHCI_CTRL_VDD_180;
sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
if (!IS_ERR(mmc->supply.vqmmc)) {
- ret = regulator_set_voltage(mmc->supply.vqmmc, 2700000,
- 3600000);
+ ret = mmc_regulator_set_vqmmc(mmc, ios);
if (ret) {
pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
mmc_hostname(mmc));
@@ -1759,9 +1859,10 @@ static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
return -EAGAIN;
case MMC_SIGNAL_VOLTAGE_180:
+ if (!(host->flags & SDHCI_SIGNALING_180))
+ return -EINVAL;
if (!IS_ERR(mmc->supply.vqmmc)) {
- ret = regulator_set_voltage(mmc->supply.vqmmc,
- 1700000, 1950000);
+ ret = mmc_regulator_set_vqmmc(mmc, ios);
if (ret) {
pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
mmc_hostname(mmc));
@@ -1790,9 +1891,10 @@ static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
return -EAGAIN;
case MMC_SIGNAL_VOLTAGE_120:
+ if (!(host->flags & SDHCI_SIGNALING_120))
+ return -EINVAL;
if (!IS_ERR(mmc->supply.vqmmc)) {
- ret = regulator_set_voltage(mmc->supply.vqmmc, 1100000,
- 1300000);
+ ret = mmc_regulator_set_vqmmc(mmc, ios);
if (ret) {
pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
mmc_hostname(mmc));
@@ -1811,10 +1913,10 @@ static int sdhci_card_busy(struct mmc_host *mmc)
struct sdhci_host *host = mmc_priv(mmc);
u32 present_state;
- /* Check whether DAT[3:0] is 0000 */
+ /* Check whether DAT[0] is 0 */
present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
- return !(present_state & SDHCI_DATA_LVL_MASK);
+ return !(present_state & SDHCI_DATA_0_LVL_MASK);
}
static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
@@ -1909,7 +2011,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
/*
* Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number
- * of loops reaches 40 times or a timeout of 150ms occurs.
+ * of loops reaches 40 times.
*/
do {
struct mmc_command cmd = {0};
@@ -1920,13 +2022,13 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
cmd.retries = 0;
cmd.data = NULL;
+ cmd.mrq = &mrq;
cmd.error = 0;
if (tuning_loop_counter-- == 0)
break;
mrq.cmd = &cmd;
- host->mrq = &mrq;
/*
* In response to CMD19, the card sends 64 bytes of tuning
@@ -1956,7 +2058,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
sdhci_send_command(host, &cmd);
host->cmd = NULL;
- host->mrq = NULL;
+ sdhci_del_timer(host, &mrq);
spin_unlock_irqrestore(&host->lock, flags);
/* Wait for Buffer Read Ready interrupt */
@@ -2086,6 +2188,24 @@ static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
}
+static inline bool sdhci_has_requests(struct sdhci_host *host)
+{
+ return host->cmd || host->data_cmd;
+}
+
+static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
+{
+ if (host->data_cmd) {
+ host->data_cmd->error = err;
+ sdhci_finish_mrq(host, host->data_cmd->mrq);
+ }
+
+ if (host->cmd) {
+ host->cmd->error = err;
+ sdhci_finish_mrq(host, host->cmd->mrq);
+ }
+}
+
static void sdhci_card_event(struct mmc_host *mmc)
{
struct sdhci_host *host = mmc_priv(mmc);
@@ -2096,12 +2216,12 @@ static void sdhci_card_event(struct mmc_host *mmc)
if (host->ops->card_event)
host->ops->card_event(host);
- present = sdhci_get_cd(host->mmc);
+ present = mmc->ops->get_cd(mmc);
spin_lock_irqsave(&host->lock, flags);
- /* Check host->mrq first in case we are runtime suspended */
- if (host->mrq && !present) {
+ /* Check sdhci_has_requests() first in case we are runtime suspended */
+ if (sdhci_has_requests(host) && !present) {
pr_err("%s: Card removed during transfer!\n",
mmc_hostname(host->mmc));
pr_err("%s: Resetting controller.\n",
@@ -2110,8 +2230,7 @@ static void sdhci_card_event(struct mmc_host *mmc)
sdhci_do_reset(host, SDHCI_RESET_CMD);
sdhci_do_reset(host, SDHCI_RESET_DATA);
- host->mrq->cmd->error = -ENOMEDIUM;
- tasklet_schedule(&host->finish_tasklet);
+ sdhci_error_out_mrqs(host, -ENOMEDIUM);
}
spin_unlock_irqrestore(&host->lock, flags);
@@ -2140,28 +2259,28 @@ static const struct mmc_host_ops sdhci_ops = {
* *
\*****************************************************************************/
-static void sdhci_tasklet_finish(unsigned long param)
+static bool sdhci_request_done(struct sdhci_host *host)
{
- struct sdhci_host *host;
unsigned long flags;
struct mmc_request *mrq;
-
- host = (struct sdhci_host*)param;
+ int i;
spin_lock_irqsave(&host->lock, flags);
- /*
- * If this tasklet gets rescheduled while running, it will
- * be run again afterwards but without any active request.
- */
- if (!host->mrq) {
- spin_unlock_irqrestore(&host->lock, flags);
- return;
+ for (i = 0; i < SDHCI_MAX_MRQS; i++) {
+ mrq = host->mrqs_done[i];
+ if (mrq) {
+ host->mrqs_done[i] = NULL;
+ break;
+ }
}
- del_timer(&host->timer);
+ if (!mrq) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ return true;
+ }
- mrq = host->mrq;
+ sdhci_del_timer(host, mrq);
/*
* Always unmap the data buffers if they were mapped by
@@ -2183,13 +2302,7 @@ static void sdhci_tasklet_finish(unsigned long param)
* The controller needs a reset of internal state machines
* upon error conditions.
*/
- if (!(host->flags & SDHCI_DEVICE_DEAD) &&
- ((mrq->cmd && mrq->cmd->error) ||
- (mrq->sbc && mrq->sbc->error) ||
- (mrq->data && ((mrq->data->error && !mrq->data->stop) ||
- (mrq->data->stop && mrq->data->stop->error))) ||
- (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
-
+ if (sdhci_needs_reset(host, mrq)) {
/* Some controllers need this kick or reset won't work here */
if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
/* This is to force an update */
@@ -2197,20 +2310,31 @@ static void sdhci_tasklet_finish(unsigned long param)
/* Spec says we should do both at the same time, but Ricoh
controllers do not like that. */
- sdhci_do_reset(host, SDHCI_RESET_CMD);
- sdhci_do_reset(host, SDHCI_RESET_DATA);
- }
+ if (!host->cmd)
+ sdhci_do_reset(host, SDHCI_RESET_CMD);
+ if (!host->data_cmd)
+ sdhci_do_reset(host, SDHCI_RESET_DATA);
- host->mrq = NULL;
- host->cmd = NULL;
- host->data = NULL;
+ host->pending_reset = false;
+ }
- sdhci_led_deactivate(host);
+ if (!sdhci_has_requests(host))
+ sdhci_led_deactivate(host);
mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
mmc_request_done(host->mmc, mrq);
+
+ return false;
+}
+
+static void sdhci_tasklet_finish(unsigned long param)
+{
+ struct sdhci_host *host = (struct sdhci_host *)param;
+
+ while (!sdhci_request_done(host))
+ ;
}
static void sdhci_timeout_timer(unsigned long data)
@@ -2222,7 +2346,30 @@ static void sdhci_timeout_timer(unsigned long data)
spin_lock_irqsave(&host->lock, flags);
- if (host->mrq) {
+ if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
+ pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
+ mmc_hostname(host->mmc));
+ sdhci_dumpregs(host);
+
+ host->cmd->error = -ETIMEDOUT;
+ sdhci_finish_mrq(host, host->cmd->mrq);
+ }
+
+ mmiowb();
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static void sdhci_timeout_data_timer(unsigned long data)
+{
+ struct sdhci_host *host;
+ unsigned long flags;
+
+ host = (struct sdhci_host *)data;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (host->data || host->data_cmd ||
+ (host->cmd && sdhci_data_line_cmd(host->cmd))) {
pr_err("%s: Timeout waiting for hardware interrupt.\n",
mmc_hostname(host->mmc));
sdhci_dumpregs(host);
@@ -2230,13 +2377,12 @@ static void sdhci_timeout_timer(unsigned long data)
if (host->data) {
host->data->error = -ETIMEDOUT;
sdhci_finish_data(host);
+ } else if (host->data_cmd) {
+ host->data_cmd->error = -ETIMEDOUT;
+ sdhci_finish_mrq(host, host->data_cmd->mrq);
} else {
- if (host->cmd)
- host->cmd->error = -ETIMEDOUT;
- else
- host->mrq->cmd->error = -ETIMEDOUT;
-
- tasklet_schedule(&host->finish_tasklet);
+ host->cmd->error = -ETIMEDOUT;
+ sdhci_finish_mrq(host, host->cmd->mrq);
}
}
@@ -2252,9 +2398,14 @@ static void sdhci_timeout_timer(unsigned long data)
static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
{
- BUG_ON(intmask == 0);
-
if (!host->cmd) {
+ /*
+ * SDHCI recovers from errors by resetting the cmd and data
+ * circuits. Until that is done, there very well might be more
+ * interrupts, so ignore them in that case.
+ */
+ if (host->pending_reset)
+ return;
pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
mmc_hostname(host->mmc), (unsigned)intmask);
sdhci_dumpregs(host);
@@ -2285,37 +2436,14 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
return;
}
- tasklet_schedule(&host->finish_tasklet);
+ sdhci_finish_mrq(host, host->cmd->mrq);
return;
}
- /*
- * The host can send and interrupt when the busy state has
- * ended, allowing us to wait without wasting CPU cycles.
- * Unfortunately this is overloaded on the "data complete"
- * interrupt, so we need to take some care when handling
- * it.
- *
- * Note: The 1.0 specification is a bit ambiguous about this
- * feature so there might be some problems with older
- * controllers.
- */
- if (host->cmd->flags & MMC_RSP_BUSY) {
- if (host->cmd->data)
- DBG("Cannot wait for busy signal when also doing a data transfer");
- else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ)
- && !host->busy_handle) {
- /* Mark that command complete before busy is ended */
- host->busy_handle = 1;
- return;
- }
-
- /* The controller does not support the end-of-busy IRQ,
- * fall through and take the SDHCI_INT_RESPONSE */
- } else if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
- host->cmd->opcode == MMC_STOP_TRANSMISSION && !host->data) {
+ if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
+ !(host->cmd->flags & MMC_RSP_BUSY) && !host->data &&
+ host->cmd->opcode == MMC_STOP_TRANSMISSION)
*mask &= ~SDHCI_INT_DATA_END;
- }
if (intmask & SDHCI_INT_RESPONSE)
sdhci_finish_command(host);
@@ -2357,7 +2485,6 @@ static void sdhci_adma_show_error(struct sdhci_host *host) { }
static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
{
u32 command;
- BUG_ON(intmask == 0);
/* CMD19 generates _only_ Buffer Read Ready interrupt */
if (intmask & SDHCI_INT_DATA_AVAIL) {
@@ -2371,15 +2498,20 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
}
if (!host->data) {
+ struct mmc_command *data_cmd = host->data_cmd;
+
+ if (data_cmd)
+ host->data_cmd = NULL;
+
/*
* The "data complete" interrupt is also used to
* indicate that a busy state has ended. See comment
* above in sdhci_cmd_irq().
*/
- if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) {
+ if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
if (intmask & SDHCI_INT_DATA_TIMEOUT) {
- host->cmd->error = -ETIMEDOUT;
- tasklet_schedule(&host->finish_tasklet);
+ data_cmd->error = -ETIMEDOUT;
+ sdhci_finish_mrq(host, data_cmd->mrq);
return;
}
if (intmask & SDHCI_INT_DATA_END) {
@@ -2388,14 +2520,22 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
* before the command completed, so make
* sure we do things in the proper order.
*/
- if (host->busy_handle)
- sdhci_finish_command(host);
- else
- host->busy_handle = 1;
+ if (host->cmd == data_cmd)
+ return;
+
+ sdhci_finish_mrq(host, data_cmd->mrq);
return;
}
}
+ /*
+ * SDHCI recovers from errors by resetting the cmd and data
+ * circuits. Until that is done, there very well might be more
+ * interrupts, so ignore them in that case.
+ */
+ if (host->pending_reset)
+ return;
+
pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
mmc_hostname(host->mmc), (unsigned)intmask);
sdhci_dumpregs(host);
@@ -2453,7 +2593,7 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
}
if (intmask & SDHCI_INT_DATA_END) {
- if (host->cmd) {
+ if (host->cmd == host->data_cmd) {
/*
* Data managed to finish before the
* command completed. Make sure we do
@@ -2537,6 +2677,9 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
pr_err("%s: Card is consuming too much power!\n",
mmc_hostname(host->mmc));
+ if (intmask & SDHCI_INT_RETUNE)
+ mmc_retune_needed(host->mmc);
+
if (intmask & SDHCI_INT_CARD_INT) {
sdhci_enable_sdio_irq_nolock(host, false);
host->thread_isr |= SDHCI_INT_CARD_INT;
@@ -2546,7 +2689,7 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
- SDHCI_INT_CARD_INT);
+ SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT);
if (intmask) {
unexpected |= intmask;
@@ -2582,8 +2725,10 @@ static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
spin_unlock_irqrestore(&host->lock, flags);
if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
- sdhci_card_event(host->mmc);
- mmc_detect_change(host->mmc, msecs_to_jiffies(200));
+ struct mmc_host *mmc = host->mmc;
+
+ mmc->ops->card_event(mmc);
+ mmc_detect_change(mmc, msecs_to_jiffies(200));
}
if (isr & SDHCI_INT_CARD_INT) {
@@ -2605,18 +2750,31 @@ static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
\*****************************************************************************/
#ifdef CONFIG_PM
+/*
+ * To enable wakeup events, the corresponding events have to be enabled in
+ * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal
+ * Table' in the SD Host Controller Standard Specification.
+ * It is useless to restore SDHCI_INT_ENABLE state in
+ * sdhci_disable_irq_wakeups() since it will be set by
+ * sdhci_enable_card_detection() or sdhci_init().
+ */
void sdhci_enable_irq_wakeups(struct sdhci_host *host)
{
u8 val;
u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
| SDHCI_WAKE_ON_INT;
+ u32 irq_val = SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
+ SDHCI_INT_CARD_INT;
val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
val |= mask ;
/* Avoid fake wake up */
- if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
+ if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) {
val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE);
+ irq_val &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
+ }
sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
+ sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
}
EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
@@ -2636,7 +2794,8 @@ int sdhci_suspend_host(struct sdhci_host *host)
sdhci_disable_card_detection(host);
mmc_retune_timer_stop(host->mmc);
- mmc_retune_needed(host->mmc);
+ if (host->tuning_mode != SDHCI_TUNING_MODE_3)
+ mmc_retune_needed(host->mmc);
if (!device_may_wakeup(mmc_dev(host->mmc))) {
host->ier = 0;
@@ -2654,6 +2813,7 @@ EXPORT_SYMBOL_GPL(sdhci_suspend_host);
int sdhci_resume_host(struct sdhci_host *host)
{
+ struct mmc_host *mmc = host->mmc;
int ret = 0;
if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
@@ -2667,7 +2827,7 @@ int sdhci_resume_host(struct sdhci_host *host)
sdhci_init(host, 0);
host->pwr = 0;
host->clock = 0;
- sdhci_set_ios(host->mmc, &host->mmc->ios);
+ mmc->ops->set_ios(mmc, &mmc->ios);
} else {
sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
mmiowb();
@@ -2696,7 +2856,8 @@ int sdhci_runtime_suspend_host(struct sdhci_host *host)
unsigned long flags;
mmc_retune_timer_stop(host->mmc);
- mmc_retune_needed(host->mmc);
+ if (host->tuning_mode != SDHCI_TUNING_MODE_3)
+ mmc_retune_needed(host->mmc);
spin_lock_irqsave(&host->lock, flags);
host->ier &= SDHCI_INT_CARD_INT;
@@ -2716,6 +2877,7 @@ EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
int sdhci_runtime_resume_host(struct sdhci_host *host)
{
+ struct mmc_host *mmc = host->mmc;
unsigned long flags;
int host_flags = host->flags;
@@ -2729,8 +2891,8 @@ int sdhci_runtime_resume_host(struct sdhci_host *host)
/* Force clock and power re-program */
host->pwr = 0;
host->clock = 0;
- sdhci_start_signal_voltage_switch(host->mmc, &host->mmc->ios);
- sdhci_set_ios(host->mmc, &host->mmc->ios);
+ mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
+ mmc->ops->set_ios(mmc, &mmc->ios);
if ((host_flags & SDHCI_PV_ENABLED) &&
!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
@@ -2781,6 +2943,8 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev,
host->mmc_host_ops = sdhci_ops;
mmc->ops = &host->mmc_host_ops;
+ host->flags = SDHCI_SIGNALING_330;
+
return host;
}
@@ -2816,10 +2980,41 @@ static int sdhci_set_dma_mask(struct sdhci_host *host)
return ret;
}
-int sdhci_add_host(struct sdhci_host *host)
+void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1)
+{
+ u16 v;
+
+ if (host->read_caps)
+ return;
+
+ host->read_caps = true;
+
+ if (debug_quirks)
+ host->quirks = debug_quirks;
+
+ if (debug_quirks2)
+ host->quirks2 = debug_quirks2;
+
+ sdhci_do_reset(host, SDHCI_RESET_ALL);
+
+ v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
+ host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
+
+ if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
+ return;
+
+ host->caps = caps ? *caps : sdhci_readl(host, SDHCI_CAPABILITIES);
+
+ if (host->version < SDHCI_SPEC_300)
+ return;
+
+ host->caps1 = caps1 ? *caps1 : sdhci_readl(host, SDHCI_CAPABILITIES_1);
+}
+EXPORT_SYMBOL_GPL(__sdhci_read_caps);
+
+int sdhci_setup_host(struct sdhci_host *host)
{
struct mmc_host *mmc;
- u32 caps[2] = {0, 0};
u32 max_current_caps;
unsigned int ocr_avail;
unsigned int override_timeout_clk;
@@ -2832,34 +3027,28 @@ int sdhci_add_host(struct sdhci_host *host)
mmc = host->mmc;
- if (debug_quirks)
- host->quirks = debug_quirks;
- if (debug_quirks2)
- host->quirks2 = debug_quirks2;
+ /*
+ * If there are external regulators, get them. Note this must be done
+ * early before resetting the host and reading the capabilities so that
+ * the host can take the appropriate action if regulators are not
+ * available.
+ */
+ ret = mmc_regulator_get_supply(mmc);
+ if (ret == -EPROBE_DEFER)
+ return ret;
- override_timeout_clk = host->timeout_clk;
+ sdhci_read_caps(host);
- sdhci_do_reset(host, SDHCI_RESET_ALL);
+ override_timeout_clk = host->timeout_clk;
- host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
- host->version = (host->version & SDHCI_SPEC_VER_MASK)
- >> SDHCI_SPEC_VER_SHIFT;
if (host->version > SDHCI_SPEC_300) {
pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
mmc_hostname(mmc), host->version);
}
- caps[0] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps :
- sdhci_readl(host, SDHCI_CAPABILITIES);
-
- if (host->version >= SDHCI_SPEC_300)
- caps[1] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ?
- host->caps1 :
- sdhci_readl(host, SDHCI_CAPABILITIES_1);
-
if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
host->flags |= SDHCI_USE_SDMA;
- else if (!(caps[0] & SDHCI_CAN_DO_SDMA))
+ else if (!(host->caps & SDHCI_CAN_DO_SDMA))
DBG("Controller doesn't have SDMA capability\n");
else
host->flags |= SDHCI_USE_SDMA;
@@ -2871,7 +3060,7 @@ int sdhci_add_host(struct sdhci_host *host)
}
if ((host->version >= SDHCI_SPEC_200) &&
- (caps[0] & SDHCI_CAN_DO_ADMA2))
+ (host->caps & SDHCI_CAN_DO_ADMA2))
host->flags |= SDHCI_USE_ADMA;
if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
@@ -2887,7 +3076,7 @@ int sdhci_add_host(struct sdhci_host *host)
* SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
* implement.
*/
- if (caps[0] & SDHCI_CAN_64BIT)
+ if (host->caps & SDHCI_CAN_64BIT)
host->flags |= SDHCI_USE_64_BIT_DMA;
if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
@@ -2963,10 +3152,10 @@ int sdhci_add_host(struct sdhci_host *host)
}
if (host->version >= SDHCI_SPEC_300)
- host->max_clk = (caps[0] & SDHCI_CLOCK_V3_BASE_MASK)
+ host->max_clk = (host->caps & SDHCI_CLOCK_V3_BASE_MASK)
>> SDHCI_CLOCK_BASE_SHIFT;
else
- host->max_clk = (caps[0] & SDHCI_CLOCK_BASE_MASK)
+ host->max_clk = (host->caps & SDHCI_CLOCK_BASE_MASK)
>> SDHCI_CLOCK_BASE_SHIFT;
host->max_clk *= 1000000;
@@ -2985,7 +3174,7 @@ int sdhci_add_host(struct sdhci_host *host)
* In case of Host Controller v3.00, find out whether clock
* multiplier is supported.
*/
- host->clk_mul = (caps[1] & SDHCI_CLOCK_MUL_MASK) >>
+ host->clk_mul = (host->caps1 & SDHCI_CLOCK_MUL_MASK) >>
SDHCI_CLOCK_MUL_SHIFT;
/*
@@ -3017,7 +3206,7 @@ int sdhci_add_host(struct sdhci_host *host)
mmc->f_max = max_clk;
if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
- host->timeout_clk = (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >>
+ host->timeout_clk = (host->caps & SDHCI_TIMEOUT_CLK_MASK) >>
SDHCI_TIMEOUT_CLK_SHIFT;
if (host->timeout_clk == 0) {
if (host->ops->get_timeout_clock) {
@@ -3031,7 +3220,7 @@ int sdhci_add_host(struct sdhci_host *host)
}
}
- if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
+ if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
host->timeout_clk *= 1000;
if (override_timeout_clk)
@@ -3072,27 +3261,22 @@ int sdhci_add_host(struct sdhci_host *host)
if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
mmc->caps &= ~MMC_CAP_CMD23;
- if (caps[0] & SDHCI_CAN_DO_HISPD)
+ if (host->caps & SDHCI_CAN_DO_HISPD)
mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
- !(mmc->caps & MMC_CAP_NONREMOVABLE) &&
+ mmc_card_is_removable(mmc) &&
mmc_gpio_get_cd(host->mmc) < 0)
mmc->caps |= MMC_CAP_NEEDS_POLL;
- /* If there are external regulators, get them */
- ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
- goto undma;
-
/* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
if (!IS_ERR(mmc->supply.vqmmc)) {
ret = regulator_enable(mmc->supply.vqmmc);
if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
1950000))
- caps[1] &= ~(SDHCI_SUPPORT_SDR104 |
- SDHCI_SUPPORT_SDR50 |
- SDHCI_SUPPORT_DDR50);
+ host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
+ SDHCI_SUPPORT_SDR50 |
+ SDHCI_SUPPORT_DDR50);
if (ret) {
pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
mmc_hostname(mmc), ret);
@@ -3100,28 +3284,30 @@ int sdhci_add_host(struct sdhci_host *host)
}
}
- if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V)
- caps[1] &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
- SDHCI_SUPPORT_DDR50);
+ if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
+ host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
+ SDHCI_SUPPORT_DDR50);
+ }
/* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
- if (caps[1] & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
- SDHCI_SUPPORT_DDR50))
+ if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
+ SDHCI_SUPPORT_DDR50))
mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
/* SDR104 supports also implies SDR50 support */
- if (caps[1] & SDHCI_SUPPORT_SDR104) {
+ if (host->caps1 & SDHCI_SUPPORT_SDR104) {
mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
/* SD3.0: SDR104 is supported so (for eMMC) the caps2
* field can be promoted to support HS200.
*/
if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
mmc->caps2 |= MMC_CAP2_HS200;
- } else if (caps[1] & SDHCI_SUPPORT_SDR50)
+ } else if (host->caps1 & SDHCI_SUPPORT_SDR50) {
mmc->caps |= MMC_CAP_UHS_SDR50;
+ }
if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
- (caps[1] & SDHCI_SUPPORT_HS400))
+ (host->caps1 & SDHCI_SUPPORT_HS400))
mmc->caps2 |= MMC_CAP2_HS400;
if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
@@ -3130,25 +3316,25 @@ int sdhci_add_host(struct sdhci_host *host)
1300000)))
mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
- if ((caps[1] & SDHCI_SUPPORT_DDR50) &&
- !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
+ if ((host->caps1 & SDHCI_SUPPORT_DDR50) &&
+ !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
mmc->caps |= MMC_CAP_UHS_DDR50;
/* Does the host need tuning for SDR50? */
- if (caps[1] & SDHCI_USE_SDR50_TUNING)
+ if (host->caps1 & SDHCI_USE_SDR50_TUNING)
host->flags |= SDHCI_SDR50_NEEDS_TUNING;
/* Driver Type(s) (A, C, D) supported by the host */
- if (caps[1] & SDHCI_DRIVER_TYPE_A)
+ if (host->caps1 & SDHCI_DRIVER_TYPE_A)
mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
- if (caps[1] & SDHCI_DRIVER_TYPE_C)
+ if (host->caps1 & SDHCI_DRIVER_TYPE_C)
mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
- if (caps[1] & SDHCI_DRIVER_TYPE_D)
+ if (host->caps1 & SDHCI_DRIVER_TYPE_D)
mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
/* Initial value for re-tuning timer count */
- host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
- SDHCI_RETUNING_TIMER_COUNT_SHIFT;
+ host->tuning_count = (host->caps1 & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
+ SDHCI_RETUNING_TIMER_COUNT_SHIFT;
/*
* In case Re-tuning Timer is not disabled, the actual value of
@@ -3158,7 +3344,7 @@ int sdhci_add_host(struct sdhci_host *host)
host->tuning_count = 1 << (host->tuning_count - 1);
/* Re-tuning mode supported by the Host Controller */
- host->tuning_mode = (caps[1] & SDHCI_RETUNING_MODE_MASK) >>
+ host->tuning_mode = (host->caps1 & SDHCI_RETUNING_MODE_MASK) >>
SDHCI_RETUNING_MODE_SHIFT;
ocr_avail = 0;
@@ -3187,7 +3373,7 @@ int sdhci_add_host(struct sdhci_host *host)
}
}
- if (caps[0] & SDHCI_CAN_VDD_330) {
+ if (host->caps & SDHCI_CAN_VDD_330) {
ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
mmc->max_current_330 = ((max_current_caps &
@@ -3195,7 +3381,7 @@ int sdhci_add_host(struct sdhci_host *host)
SDHCI_MAX_CURRENT_330_SHIFT) *
SDHCI_MAX_CURRENT_MULTIPLIER;
}
- if (caps[0] & SDHCI_CAN_VDD_300) {
+ if (host->caps & SDHCI_CAN_VDD_300) {
ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
mmc->max_current_300 = ((max_current_caps &
@@ -3203,7 +3389,7 @@ int sdhci_add_host(struct sdhci_host *host)
SDHCI_MAX_CURRENT_300_SHIFT) *
SDHCI_MAX_CURRENT_MULTIPLIER;
}
- if (caps[0] & SDHCI_CAN_VDD_180) {
+ if (host->caps & SDHCI_CAN_VDD_180) {
ocr_avail |= MMC_VDD_165_195;
mmc->max_current_180 = ((max_current_caps &
@@ -3240,6 +3426,15 @@ int sdhci_add_host(struct sdhci_host *host)
goto unreg;
}
+ if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
+ MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
+ MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) ||
+ (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
+ host->flags |= SDHCI_SIGNALING_180;
+
+ if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
+ host->flags |= SDHCI_SIGNALING_120;
+
spin_lock_init(&host->lock);
/*
@@ -3281,7 +3476,7 @@ int sdhci_add_host(struct sdhci_host *host)
if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
mmc->max_blk_size = 2;
} else {
- mmc->max_blk_size = (caps[0] & SDHCI_MAX_BLOCK_MASK) >>
+ mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >>
SDHCI_MAX_BLOCK_SHIFT;
if (mmc->max_blk_size >= 3) {
pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
@@ -3297,6 +3492,28 @@ int sdhci_add_host(struct sdhci_host *host)
*/
mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
+ return 0;
+
+unreg:
+ if (!IS_ERR(mmc->supply.vqmmc))
+ regulator_disable(mmc->supply.vqmmc);
+undma:
+ if (host->align_buffer)
+ dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
+ host->adma_table_sz, host->align_buffer,
+ host->align_addr);
+ host->adma_table = NULL;
+ host->align_buffer = NULL;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sdhci_setup_host);
+
+int __sdhci_add_host(struct sdhci_host *host)
+{
+ struct mmc_host *mmc = host->mmc;
+ int ret;
+
/*
* Init tasklets.
*/
@@ -3304,6 +3521,8 @@ int sdhci_add_host(struct sdhci_host *host)
sdhci_tasklet_finish, (unsigned long)host);
setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
+ setup_timer(&host->data_timer, sdhci_timeout_data_timer,
+ (unsigned long)host);
init_waitqueue_head(&host->buf_ready_int);
@@ -3353,10 +3572,10 @@ unirq:
free_irq(host->irq, host);
untasklet:
tasklet_kill(&host->finish_tasklet);
-unreg:
+
if (!IS_ERR(mmc->supply.vqmmc))
regulator_disable(mmc->supply.vqmmc);
-undma:
+
if (host->align_buffer)
dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
host->adma_table_sz, host->align_buffer,
@@ -3366,7 +3585,18 @@ undma:
return ret;
}
+EXPORT_SYMBOL_GPL(__sdhci_add_host);
+
+int sdhci_add_host(struct sdhci_host *host)
+{
+ int ret;
+ ret = sdhci_setup_host(host);
+ if (ret)
+ return ret;
+
+ return __sdhci_add_host(host);
+}
EXPORT_SYMBOL_GPL(sdhci_add_host);
void sdhci_remove_host(struct sdhci_host *host, int dead)
@@ -3379,12 +3609,10 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
host->flags |= SDHCI_DEVICE_DEAD;
- if (host->mrq) {
+ if (sdhci_has_requests(host)) {
pr_err("%s: Controller removed during "
" transfer!\n", mmc_hostname(mmc));
-
- host->mrq->cmd->error = -ENOMEDIUM;
- tasklet_schedule(&host->finish_tasklet);
+ sdhci_error_out_mrqs(host, -ENOMEDIUM);
}
spin_unlock_irqrestore(&host->lock, flags);
@@ -3404,6 +3632,7 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
free_irq(host->irq, host);
del_timer_sync(&host->timer);
+ del_timer_sync(&host->data_timer);
tasklet_kill(&host->finish_tasklet);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 609f87ca5..0411c9f36 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -128,6 +128,7 @@
#define SDHCI_INT_CARD_INSERT 0x00000040
#define SDHCI_INT_CARD_REMOVE 0x00000080
#define SDHCI_INT_CARD_INT 0x00000100
+#define SDHCI_INT_RETUNE 0x00001000
#define SDHCI_INT_ERROR 0x00008000
#define SDHCI_INT_TIMEOUT 0x00010000
#define SDHCI_INT_CRC 0x00020000
@@ -186,6 +187,7 @@
#define SDHCI_CAN_DO_ADMA1 0x00100000
#define SDHCI_CAN_DO_HISPD 0x00200000
#define SDHCI_CAN_DO_SDMA 0x00400000
+#define SDHCI_CAN_DO_SUSPEND 0x00800000
#define SDHCI_CAN_VDD_330 0x01000000
#define SDHCI_CAN_VDD_300 0x02000000
#define SDHCI_CAN_VDD_180 0x04000000
@@ -314,6 +316,9 @@ struct sdhci_adma2_64_desc {
*/
#define SDHCI_MAX_SEGS 128
+/* Allow for a a command request and a data request at the same time */
+#define SDHCI_MAX_MRQS 2
+
enum sdhci_cookie {
COOKIE_UNMAPPED,
COOKIE_PRE_MAPPED, /* mapped by sdhci_pre_req() */
@@ -447,6 +452,9 @@ struct sdhci_host {
#define SDHCI_SDIO_IRQ_ENABLED (1<<9) /* SDIO irq enabled */
#define SDHCI_USE_64_BIT_DMA (1<<12) /* Use 64-bit DMA */
#define SDHCI_HS400_TUNING (1<<13) /* Tuning for HS400 */
+#define SDHCI_SIGNALING_330 (1<<14) /* Host is capable of 3.3V signaling */
+#define SDHCI_SIGNALING_180 (1<<15) /* Host is capable of 1.8V signaling */
+#define SDHCI_SIGNALING_120 (1<<16) /* Host is capable of 1.2V signaling */
unsigned int version; /* SDHCI spec. version */
@@ -460,12 +468,13 @@ struct sdhci_host {
bool runtime_suspended; /* Host is runtime suspended */
bool bus_on; /* Bus power prevents runtime suspend */
bool preset_enabled; /* Preset is enabled */
+ bool pending_reset; /* Cmd/data reset is pending */
- struct mmc_request *mrq; /* Current request */
+ struct mmc_request *mrqs_done[SDHCI_MAX_MRQS]; /* Requests done */
struct mmc_command *cmd; /* Current command */
+ struct mmc_command *data_cmd; /* Current data command */
struct mmc_data *data; /* Current data request */
unsigned int data_early:1; /* Data finished before cmd */
- unsigned int busy_handle:1; /* Handling the order of Busy-end */
struct sg_mapping_iter sg_miter; /* SG state for PIO */
unsigned int blocks; /* remaining PIO blocks */
@@ -486,9 +495,11 @@ struct sdhci_host {
struct tasklet_struct finish_tasklet; /* Tasklet structures */
struct timer_list timer; /* Timer for timeouts */
+ struct timer_list data_timer; /* Timer for data timeouts */
- u32 caps; /* Alternative CAPABILITY_0 */
- u32 caps1; /* Alternative CAPABILITY_1 */
+ u32 caps; /* CAPABILITY_0 */
+ u32 caps1; /* CAPABILITY_1 */
+ bool read_caps; /* Capability flags have been read */
unsigned int ocr_avail_sdio; /* OCR bit masks */
unsigned int ocr_avail_sd;
@@ -508,6 +519,8 @@ struct sdhci_host {
unsigned int tuning_count; /* Timer count for re-tuning */
unsigned int tuning_mode; /* Re-tuning mode supported by host */
#define SDHCI_TUNING_MODE_1 0
+#define SDHCI_TUNING_MODE_2 1
+#define SDHCI_TUNING_MODE_3 2
unsigned long private[0] ____cacheline_aligned;
};
@@ -645,11 +658,20 @@ static inline void *sdhci_priv(struct sdhci_host *host)
}
extern void sdhci_card_detect(struct sdhci_host *host);
+extern void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps,
+ u32 *caps1);
+extern int sdhci_setup_host(struct sdhci_host *host);
+extern int __sdhci_add_host(struct sdhci_host *host);
extern int sdhci_add_host(struct sdhci_host *host);
extern void sdhci_remove_host(struct sdhci_host *host, int dead);
extern void sdhci_send_command(struct sdhci_host *host,
struct mmc_command *cmd);
+static inline void sdhci_read_caps(struct sdhci_host *host)
+{
+ __sdhci_read_caps(host, NULL, NULL, NULL);
+}
+
static inline bool sdhci_sdio_irq_enabled(struct sdhci_host *host)
{
return !!(host->flags & SDHCI_SDIO_IRQ_ENABLED);
diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c
index 983b8b32e..111b66f54 100644
--- a/drivers/mmc/host/sdhci_f_sdh30.c
+++ b/drivers/mmc/host/sdhci_f_sdh30.c
@@ -222,7 +222,7 @@ static struct platform_driver sdhci_f_sdh30_driver = {
.driver = {
.name = "f_sdh30",
.of_match_table = f_sdh30_dt_ids,
- .pm = SDHCI_PLTFM_PMOPS,
+ .pm = &sdhci_pltfm_pmops,
},
.probe = sdhci_f_sdh30_probe,
.remove = sdhci_f_sdh30_remove,
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index dd64b8663..900778421 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -574,7 +574,7 @@ static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
if (state1 & STS1_CMDSEQ) {
sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK);
- for (timeout = 10000000; timeout; timeout--) {
+ for (timeout = 10000; timeout; timeout--) {
if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1)
& STS1_CMDSEQ))
break;
@@ -819,10 +819,12 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
tmp |= CMD_SET_RTYP_NO;
break;
case MMC_RSP_R1:
- case MMC_RSP_R1B:
case MMC_RSP_R3:
tmp |= CMD_SET_RTYP_6B;
break;
+ case MMC_RSP_R1B:
+ tmp |= CMD_SET_RBSY | CMD_SET_RTYP_6B;
+ break;
case MMC_RSP_R2:
tmp |= CMD_SET_RTYP_17B;
break;
@@ -830,17 +832,7 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
dev_err(dev, "Unsupported response type.\n");
break;
}
- switch (opc) {
- /* RBSY */
- case MMC_SLEEP_AWAKE:
- case MMC_SWITCH:
- case MMC_STOP_TRANSMISSION:
- case MMC_SET_WRITE_PROT:
- case MMC_CLR_WRITE_PROT:
- case MMC_ERASE:
- tmp |= CMD_SET_RBSY;
- break;
- }
+
/* WDAT / DATW */
if (data) {
tmp |= CMD_SET_WDAT;
@@ -925,23 +917,13 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
{
struct mmc_command *cmd = mrq->cmd;
u32 opc = cmd->opcode;
- u32 mask;
+ u32 mask = 0;
unsigned long flags;
- switch (opc) {
- /* response busy check */
- case MMC_SLEEP_AWAKE:
- case MMC_SWITCH:
- case MMC_STOP_TRANSMISSION:
- case MMC_SET_WRITE_PROT:
- case MMC_CLR_WRITE_PROT:
- case MMC_ERASE:
+ if (cmd->flags & MMC_RSP_BUSY)
mask = MASK_START_CMD | MASK_MRBSYE;
- break;
- default:
+ else
mask = MASK_START_CMD | MASK_MCRSPE;
- break;
- }
if (host->ccs_enable)
mask |= MASK_MCCSTO;
@@ -1009,22 +991,6 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
host->state = STATE_REQUEST;
spin_unlock_irqrestore(&host->lock, flags);
- switch (mrq->cmd->opcode) {
- /* MMCIF does not support SD/SDIO command */
- case MMC_SLEEP_AWAKE: /* = SD_IO_SEND_OP_COND (5) */
- case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */
- if ((mrq->cmd->flags & MMC_CMD_MASK) != MMC_CMD_BCR)
- break;
- case MMC_APP_CMD:
- case SD_IO_RW_DIRECT:
- host->state = STATE_IDLE;
- mrq->cmd->error = -ETIMEDOUT;
- mmc_request_done(mmc, mrq);
- return;
- default:
- break;
- }
-
host->mrq = mrq;
sh_mmcif_start_cmd(host, mrq);
@@ -1488,6 +1454,9 @@ static int sh_mmcif_probe(struct platform_device *pdev)
sh_mmcif_init_ocr(host);
mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY;
+ mmc->caps2 |= MMC_CAP2_NO_SD | MMC_CAP2_NO_SDIO;
+ mmc->max_busy_timeout = 10000;
+
if (pd && pd->caps)
mmc->caps |= pd->caps;
mmc->max_segs = 32;
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index f750f9494..c3b651bf8 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -39,6 +39,12 @@
#define EXT_ACC 0xe4
+#define SDHI_VER_GEN2_SDR50 0x490c
+/* very old datasheets said 0x490c for SDR104, too. They are wrong! */
+#define SDHI_VER_GEN2_SDR104 0xcb0d
+#define SDHI_VER_GEN3_SD 0xcc10
+#define SDHI_VER_GEN3_SDMMC 0xcd10
+
#define host_to_priv(host) container_of((host)->pdata, struct sh_mobile_sdhi, mmc_data)
struct sh_mobile_sdhi_of_data {
@@ -109,14 +115,14 @@ static void sh_mobile_sdhi_sdbuf_width(struct tmio_mmc_host *host, int width)
* sh_mobile_sdhi_of_data :: dma_buswidth
*/
switch (sd_ctrl_read16(host, CTL_VERSION)) {
- case 0x490C:
+ case SDHI_VER_GEN2_SDR50:
val = (width == 32) ? 0x0001 : 0x0000;
break;
- case 0xCB0D:
+ case SDHI_VER_GEN2_SDR104:
val = (width == 32) ? 0x0000 : 0x0001;
break;
- case 0xCC10: /* Gen3, SD only */
- case 0xCD10: /* Gen3, SD + MMC */
+ case SDHI_VER_GEN3_SD:
+ case SDHI_VER_GEN3_SDMMC:
if (width == 64)
val = 0x0000;
else if (width == 32)
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 1aac2ad8e..7f63ec05b 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -259,7 +259,7 @@ static inline void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
static inline void sd_ctrl_write32_as_16_and_16(struct tmio_mmc_host *host, int addr, u32 val)
{
- writew(val, host->ctl + (addr << host->bus_shift));
+ writew(val & 0xffff, host->ctl + (addr << host->bus_shift));
writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
}
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index f44e2ab7a..92467efc4 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -1086,7 +1086,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
_host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD ||
mmc->caps & MMC_CAP_NEEDS_POLL ||
- mmc->caps & MMC_CAP_NONREMOVABLE ||
+ !mmc_card_is_removable(mmc) ||
mmc->slot.cd_irq >= 0);
if (tmio_mmc_clk_enable(_host) < 0) {
diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
index 845dd27d9..377947580 100644
--- a/drivers/mtd/bcm47xxpart.c
+++ b/drivers/mtd/bcm47xxpart.c
@@ -122,7 +122,7 @@ static int bcm47xxpart_parse(struct mtd_info *master,
for (offset = 0; offset <= master->size - blocksize;
offset += blocksize) {
/* Nothing more in higher memory on BCM47XX (MIPS) */
- if (config_enabled(CONFIG_BCM47XX) && offset >= 0x2000000)
+ if (IS_ENABLED(CONFIG_BCM47XX) && offset >= 0x2000000)
break;
if (curr_part >= BCM47XXPART_MAX_PARTS) {
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index 9a1a6ffd1..94d3eb42c 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -416,7 +416,7 @@ static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t
return ret;
}
-static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
+static int do_write_buffer(struct map_info *map, struct flchip *chip,
unsigned long adr, const u_char *buf, int len)
{
struct cfi_private *cfi = map->fldrv_priv;
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index f73c41697..58329d2da 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -113,12 +113,12 @@ config MTD_SST25L
if you want to specify device partitioning.
config MTD_BCM47XXSFLASH
- tristate "R/O support for serial flash on BCMA bus"
- depends on BCMA_SFLASH
+ tristate "Support for serial flash on BCMA bus"
+ depends on BCMA_SFLASH && (MIPS || ARM)
help
BCMA bus can have various flash memories attached, they are
registered by bcma as platform devices. This enables driver for
- serial flash memories (only read-only mode is implemented).
+ serial flash memories.
config MTD_SLRAM
tristate "Uncached system RAM"
@@ -171,18 +171,6 @@ config MTDRAM_ERASE_SIZE
as a module, it is also possible to specify this as a parameter when
loading the module.
-#If not a module (I don't want to test it as a module)
-config MTDRAM_ABS_POS
- hex "SRAM Hexadecimal Absolute position or 0"
- depends on MTD_MTDRAM=y
- default "0"
- help
- If you have system RAM accessible by the CPU but not used by Linux
- in normal operation, you can give the physical address at which the
- available RAM starts, and the MTDRAM driver will use it instead of
- allocating space from Linux's available memory. Otherwise, leave
- this set to zero. Most people will want to leave this as zero.
-
config MTD_BLOCK2MTD
tristate "MTD using block device"
depends on BLOCK
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 9d6854467..9cf7fcd28 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -73,14 +73,15 @@ static int m25p80_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
return spi_write(spi, flash->command, len + 1);
}
-static void m25p80_write(struct spi_nor *nor, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
+static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len,
+ const u_char *buf)
{
struct m25p *flash = nor->priv;
struct spi_device *spi = flash->spi;
struct spi_transfer t[2] = {};
struct spi_message m;
int cmd_sz = m25p_cmdsz(nor);
+ ssize_t ret;
spi_message_init(&m);
@@ -98,9 +99,14 @@ static void m25p80_write(struct spi_nor *nor, loff_t to, size_t len,
t[1].len = len;
spi_message_add_tail(&t[1], &m);
- spi_sync(spi, &m);
+ ret = spi_sync(spi, &m);
+ if (ret)
+ return ret;
- *retlen += m.actual_length - cmd_sz;
+ ret = m.actual_length - cmd_sz;
+ if (ret < 0)
+ return -EIO;
+ return ret;
}
static inline unsigned int m25p80_rx_nbits(struct spi_nor *nor)
@@ -119,21 +125,21 @@ static inline unsigned int m25p80_rx_nbits(struct spi_nor *nor)
* Read an address range from the nor chip. The address range
* may be any size provided it is within the physical boundaries.
*/
-static int m25p80_read(struct spi_nor *nor, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
+static ssize_t m25p80_read(struct spi_nor *nor, loff_t from, size_t len,
+ u_char *buf)
{
struct m25p *flash = nor->priv;
struct spi_device *spi = flash->spi;
struct spi_transfer t[2];
struct spi_message m;
unsigned int dummy = nor->read_dummy;
+ ssize_t ret;
/* convert the dummy cycles to the number of bytes */
dummy /= 8;
if (spi_flash_read_supported(spi)) {
struct spi_flash_read_message msg;
- int ret;
memset(&msg, 0, sizeof(msg));
@@ -149,8 +155,9 @@ static int m25p80_read(struct spi_nor *nor, loff_t from, size_t len,
msg.data_nbits = m25p80_rx_nbits(nor);
ret = spi_flash_read(spi, &msg);
- *retlen = msg.retlen;
- return ret;
+ if (ret < 0)
+ return ret;
+ return msg.retlen;
}
spi_message_init(&m);
@@ -165,13 +172,17 @@ static int m25p80_read(struct spi_nor *nor, loff_t from, size_t len,
t[1].rx_buf = buf;
t[1].rx_nbits = m25p80_rx_nbits(nor);
- t[1].len = len;
+ t[1].len = min(len, spi_max_transfer_size(spi));
spi_message_add_tail(&t[1], &m);
- spi_sync(spi, &m);
+ ret = spi_sync(spi, &m);
+ if (ret)
+ return ret;
- *retlen = m.actual_length - m25p_cmdsz(nor) - dummy;
- return 0;
+ ret = m.actual_length - m25p_cmdsz(nor) - dummy;
+ if (ret < 0)
+ return -EIO;
+ return ret;
}
/*
diff --git a/drivers/mtd/devices/powernv_flash.c b/drivers/mtd/devices/powernv_flash.c
index d5b870b3f..f5396f26d 100644
--- a/drivers/mtd/devices/powernv_flash.c
+++ b/drivers/mtd/devices/powernv_flash.c
@@ -95,7 +95,7 @@ static int powernv_flash_async_op(struct mtd_info *mtd, enum flash_op op,
return -EIO;
}
- rc = be64_to_cpu(msg.params[1]);
+ rc = opal_get_async_rc(msg);
if (rc == OPAL_SUCCESS) {
rc = 0;
if (retlen)
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 22f3858c0..3fad35942 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -186,7 +186,7 @@ static int of_flash_probe(struct platform_device *dev)
* consists internally of 2 non-identical NOR chips on one die.
*/
p = of_get_property(dp, "reg", &count);
- if (count % reg_tuple_size != 0) {
+ if (!p || count % reg_tuple_size != 0) {
dev_err(&dev->dev, "Malformed reg property on %s\n",
dev->dev.of_node->full_name);
err = -EINVAL;
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 74ae24364..8d58acf33 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -87,14 +87,14 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
if (req->cmd_type != REQ_TYPE_FS)
return -EIO;
- if (req->cmd_flags & REQ_FLUSH)
+ if (req_op(req) == REQ_OP_FLUSH)
return tr->flush(dev);
if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
get_capacity(req->rq_disk))
return -EIO;
- if (req->cmd_flags & REQ_DISCARD)
+ if (req_op(req) == REQ_OP_DISCARD)
return tr->discard(dev, block, nsect);
if (rq_data_dir(req) == READ) {
@@ -431,12 +431,10 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
goto error4;
INIT_WORK(&new->work, mtd_blktrans_work);
- gd->driverfs_dev = &new->mtd->dev;
-
if (new->readonly)
set_disk_ro(gd, 1);
- add_disk(gd);
+ device_add_disk(&new->mtd->dev, gd);
if (new->disk_attributes) {
ret = sysfs_create_group(&disk_to_dev(gd)->kobj,
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index f05e0e9eb..21ff58099 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -438,7 +438,7 @@ config MTD_NAND_FSL_ELBC
config MTD_NAND_FSL_IFC
tristate "NAND support for Freescale IFC controller"
- depends on MTD_NAND && FSL_SOC
+ depends on MTD_NAND && (FSL_SOC || ARCH_LAYERSCAPE)
select FSL_IFC
select MEMORY
help
@@ -539,7 +539,6 @@ config MTD_NAND_FSMC
config MTD_NAND_XWAY
tristate "Support for NAND on Lantiq XWAY SoC"
depends on LANTIQ && SOC_TYPE_XWAY
- select MTD_NAND_PLATFORM
help
Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached
to the External Bus Unit (EBU).
@@ -563,4 +562,11 @@ config MTD_NAND_QCOM
Enables support for NAND flash chips on SoCs containing the EBI2 NAND
controller. This controller is found on IPQ806x SoC.
+config MTD_NAND_MTK
+ tristate "Support for NAND controller on MTK SoCs"
+ depends on HAS_DMA
+ help
+ Enables support for NAND controller on MTK SoCs.
+ This controller is found on mt27xx, mt81xx, mt65xx SoCs.
+
endif # MTD_NAND
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index f55335373..cafde6f3d 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -57,5 +57,6 @@ obj-$(CONFIG_MTD_NAND_SUNXI) += sunxi_nand.o
obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o
obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/
obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o
+obj-$(CONFIG_MTD_NAND_MTK) += mtk_nand.o mtk_ecc.o
nand-objs := nand_base.o nand_bbt.o nand_timings.o
diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c
index b76ad7c01..8eb2c64df 100644
--- a/drivers/mtd/nand/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/brcmnand/brcmnand.c
@@ -340,6 +340,36 @@ static const u16 brcmnand_regs_v71[] = {
[BRCMNAND_FC_BASE] = 0x400,
};
+/* BRCMNAND v7.2 */
+static const u16 brcmnand_regs_v72[] = {
+ [BRCMNAND_CMD_START] = 0x04,
+ [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
+ [BRCMNAND_CMD_ADDRESS] = 0x0c,
+ [BRCMNAND_INTFC_STATUS] = 0x14,
+ [BRCMNAND_CS_SELECT] = 0x18,
+ [BRCMNAND_CS_XOR] = 0x1c,
+ [BRCMNAND_LL_OP] = 0x20,
+ [BRCMNAND_CS0_BASE] = 0x50,
+ [BRCMNAND_CS1_BASE] = 0,
+ [BRCMNAND_CORR_THRESHOLD] = 0xdc,
+ [BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0,
+ [BRCMNAND_UNCORR_COUNT] = 0xfc,
+ [BRCMNAND_CORR_COUNT] = 0x100,
+ [BRCMNAND_CORR_EXT_ADDR] = 0x10c,
+ [BRCMNAND_CORR_ADDR] = 0x110,
+ [BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
+ [BRCMNAND_UNCORR_ADDR] = 0x118,
+ [BRCMNAND_SEMAPHORE] = 0x150,
+ [BRCMNAND_ID] = 0x194,
+ [BRCMNAND_ID_EXT] = 0x198,
+ [BRCMNAND_LL_RDATA] = 0x19c,
+ [BRCMNAND_OOB_READ_BASE] = 0x200,
+ [BRCMNAND_OOB_READ_10_BASE] = 0,
+ [BRCMNAND_OOB_WRITE_BASE] = 0x400,
+ [BRCMNAND_OOB_WRITE_10_BASE] = 0,
+ [BRCMNAND_FC_BASE] = 0x600,
+};
+
enum brcmnand_cs_reg {
BRCMNAND_CS_CFG_EXT = 0,
BRCMNAND_CS_CFG,
@@ -435,7 +465,9 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
}
/* Register offsets */
- if (ctrl->nand_version >= 0x0701)
+ if (ctrl->nand_version >= 0x0702)
+ ctrl->reg_offsets = brcmnand_regs_v72;
+ else if (ctrl->nand_version >= 0x0701)
ctrl->reg_offsets = brcmnand_regs_v71;
else if (ctrl->nand_version >= 0x0600)
ctrl->reg_offsets = brcmnand_regs_v60;
@@ -480,7 +512,9 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
}
/* Maximum spare area sector size (per 512B) */
- if (ctrl->nand_version >= 0x0600)
+ if (ctrl->nand_version >= 0x0702)
+ ctrl->max_oob = 128;
+ else if (ctrl->nand_version >= 0x0600)
ctrl->max_oob = 64;
else if (ctrl->nand_version >= 0x0500)
ctrl->max_oob = 32;
@@ -583,14 +617,20 @@ static void brcmnand_wr_corr_thresh(struct brcmnand_host *host, u8 val)
enum brcmnand_reg reg = BRCMNAND_CORR_THRESHOLD;
int cs = host->cs;
- if (ctrl->nand_version >= 0x0600)
+ if (ctrl->nand_version >= 0x0702)
+ bits = 7;
+ else if (ctrl->nand_version >= 0x0600)
bits = 6;
else if (ctrl->nand_version >= 0x0500)
bits = 5;
else
bits = 4;
- if (ctrl->nand_version >= 0x0600) {
+ if (ctrl->nand_version >= 0x0702) {
+ if (cs >= 4)
+ reg = BRCMNAND_CORR_THRESHOLD_EXT;
+ shift = (cs % 4) * bits;
+ } else if (ctrl->nand_version >= 0x0600) {
if (cs >= 5)
reg = BRCMNAND_CORR_THRESHOLD_EXT;
shift = (cs % 5) * bits;
@@ -631,19 +671,28 @@ enum {
static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl)
{
- if (ctrl->nand_version >= 0x0600)
+ if (ctrl->nand_version >= 0x0702)
+ return GENMASK(7, 0);
+ else if (ctrl->nand_version >= 0x0600)
return GENMASK(6, 0);
else
return GENMASK(5, 0);
}
#define NAND_ACC_CONTROL_ECC_SHIFT 16
+#define NAND_ACC_CONTROL_ECC_EXT_SHIFT 13
static inline u32 brcmnand_ecc_level_mask(struct brcmnand_controller *ctrl)
{
u32 mask = (ctrl->nand_version >= 0x0600) ? 0x1f : 0x0f;
- return mask << NAND_ACC_CONTROL_ECC_SHIFT;
+ mask <<= NAND_ACC_CONTROL_ECC_SHIFT;
+
+ /* v7.2 includes additional ECC levels */
+ if (ctrl->nand_version >= 0x0702)
+ mask |= 0x7 << NAND_ACC_CONTROL_ECC_EXT_SHIFT;
+
+ return mask;
}
static void brcmnand_set_ecc_enabled(struct brcmnand_host *host, int en)
@@ -667,7 +716,9 @@ static void brcmnand_set_ecc_enabled(struct brcmnand_host *host, int en)
static inline int brcmnand_sector_1k_shift(struct brcmnand_controller *ctrl)
{
- if (ctrl->nand_version >= 0x0600)
+ if (ctrl->nand_version >= 0x0702)
+ return 9;
+ else if (ctrl->nand_version >= 0x0600)
return 7;
else if (ctrl->nand_version >= 0x0500)
return 6;
@@ -773,10 +824,16 @@ enum brcmnand_llop_type {
* Internal support functions
***********************************************************************/
-static inline bool is_hamming_ecc(struct brcmnand_cfg *cfg)
+static inline bool is_hamming_ecc(struct brcmnand_controller *ctrl,
+ struct brcmnand_cfg *cfg)
{
- return cfg->sector_size_1k == 0 && cfg->spare_area_size == 16 &&
- cfg->ecc_level == 15;
+ if (ctrl->nand_version <= 0x0701)
+ return cfg->sector_size_1k == 0 && cfg->spare_area_size == 16 &&
+ cfg->ecc_level == 15;
+ else
+ return cfg->sector_size_1k == 0 && ((cfg->spare_area_size == 16 &&
+ cfg->ecc_level == 15) ||
+ (cfg->spare_area_size == 28 && cfg->ecc_level == 16));
}
/*
@@ -931,7 +988,7 @@ static int brcmstb_choose_ecc_layout(struct brcmnand_host *host)
if (p->sector_size_1k)
ecc_level <<= 1;
- if (is_hamming_ecc(p)) {
+ if (is_hamming_ecc(host->ctrl, p)) {
ecc->bytes = 3 * sectors;
mtd_set_ooblayout(mtd, &brcmnand_hamming_ooblayout_ops);
return 0;
@@ -1108,7 +1165,7 @@ static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd)
ctrl->cmd_pending = cmd;
intfc = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
- BUG_ON(!(intfc & INTFC_CTLR_READY));
+ WARN_ON(!(intfc & INTFC_CTLR_READY));
mb(); /* flush previous writes */
brcmnand_write_reg(ctrl, BRCMNAND_CMD_START,
@@ -1545,6 +1602,56 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
return ret;
}
+/*
+ * Check a page to see if it is erased (w/ bitflips) after an uncorrectable ECC
+ * error
+ *
+ * Because the HW ECC signals an ECC error if an erase paged has even a single
+ * bitflip, we must check each ECC error to see if it is actually an erased
+ * page with bitflips, not a truly corrupted page.
+ *
+ * On a real error, return a negative error code (-EBADMSG for ECC error), and
+ * buf will contain raw data.
+ * Otherwise, buf gets filled with 0xffs and return the maximum number of
+ * bitflips-per-ECC-sector to the caller.
+ *
+ */
+static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd,
+ struct nand_chip *chip, void *buf, u64 addr)
+{
+ int i, sas;
+ void *oob = chip->oob_poi;
+ int bitflips = 0;
+ int page = addr >> chip->page_shift;
+ int ret;
+
+ if (!buf) {
+ buf = chip->buffers->databuf;
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
+ }
+
+ sas = mtd->oobsize / chip->ecc.steps;
+
+ /* read without ecc for verification */
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
+ ret = chip->ecc.read_page_raw(mtd, chip, buf, true, page);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < chip->ecc.steps; i++, oob += sas) {
+ ret = nand_check_erased_ecc_chunk(buf, chip->ecc.size,
+ oob, sas, NULL, 0,
+ chip->ecc.strength);
+ if (ret < 0)
+ return ret;
+
+ bitflips = max(bitflips, ret);
+ }
+
+ return bitflips;
+}
+
static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip,
u64 addr, unsigned int trans, u32 *buf, u8 *oob)
{
@@ -1552,9 +1659,11 @@ static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip,
struct brcmnand_controller *ctrl = host->ctrl;
u64 err_addr = 0;
int err;
+ bool retry = true;
dev_dbg(ctrl->dev, "read %llx -> %p\n", (unsigned long long)addr, buf);
+try_dmaread:
brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_COUNT, 0);
if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) {
@@ -1575,6 +1684,34 @@ static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip,
}
if (mtd_is_eccerr(err)) {
+ /*
+ * On controller version and 7.0, 7.1 , DMA read after a
+ * prior PIO read that reported uncorrectable error,
+ * the DMA engine captures this error following DMA read
+ * cleared only on subsequent DMA read, so just retry once
+ * to clear a possible false error reported for current DMA
+ * read
+ */
+ if ((ctrl->nand_version == 0x0700) ||
+ (ctrl->nand_version == 0x0701)) {
+ if (retry) {
+ retry = false;
+ goto try_dmaread;
+ }
+ }
+
+ /*
+ * Controller version 7.2 has hw encoder to detect erased page
+ * bitflips, apply sw verification for older controllers only
+ */
+ if (ctrl->nand_version < 0x0702) {
+ err = brcmstb_nand_verify_erased_page(mtd, chip, buf,
+ addr);
+ /* erased page bitflips corrected */
+ if (err > 0)
+ return err;
+ }
+
dev_dbg(ctrl->dev, "uncorrectable error at 0x%llx\n",
(unsigned long long)err_addr);
mtd->ecc_stats.failed++;
@@ -1857,7 +1994,8 @@ static int brcmnand_set_cfg(struct brcmnand_host *host,
return 0;
}
-static void brcmnand_print_cfg(char *buf, struct brcmnand_cfg *cfg)
+static void brcmnand_print_cfg(struct brcmnand_host *host,
+ char *buf, struct brcmnand_cfg *cfg)
{
buf += sprintf(buf,
"%lluMiB total, %uKiB blocks, %u%s pages, %uB OOB, %u-bit",
@@ -1868,7 +2006,7 @@ static void brcmnand_print_cfg(char *buf, struct brcmnand_cfg *cfg)
cfg->spare_area_size, cfg->device_width);
/* Account for Hamming ECC and for BCH 512B vs 1KiB sectors */
- if (is_hamming_ecc(cfg))
+ if (is_hamming_ecc(host->ctrl, cfg))
sprintf(buf, ", Hamming ECC");
else if (cfg->sector_size_1k)
sprintf(buf, ", BCH-%u (1KiB sector)", cfg->ecc_level << 1);
@@ -1987,7 +2125,7 @@ static int brcmnand_setup_dev(struct brcmnand_host *host)
brcmnand_set_ecc_enabled(host, 1);
- brcmnand_print_cfg(msg, cfg);
+ brcmnand_print_cfg(host, msg, cfg);
dev_info(ctrl->dev, "detected %s\n", msg);
/* Configure ACC_CONTROL */
@@ -1995,6 +2133,10 @@ static int brcmnand_setup_dev(struct brcmnand_host *host)
tmp = nand_readreg(ctrl, offs);
tmp &= ~ACC_CONTROL_PARTIAL_PAGE;
tmp &= ~ACC_CONTROL_RD_ERASED;
+
+ /* We need to turn on Read from erased paged protected by ECC */
+ if (ctrl->nand_version >= 0x0702)
+ tmp |= ACC_CONTROL_RD_ERASED;
tmp &= ~ACC_CONTROL_FAST_PGM_RDIN;
if (ctrl->features & BRCMNAND_HAS_PREFETCH) {
/*
@@ -2195,6 +2337,7 @@ static const struct of_device_id brcmnand_of_match[] = {
{ .compatible = "brcm,brcmnand-v6.2" },
{ .compatible = "brcm,brcmnand-v7.0" },
{ .compatible = "brcm,brcmnand-v7.1" },
+ { .compatible = "brcm,brcmnand-v7.2" },
{},
};
MODULE_DEVICE_TABLE(of, brcmnand_of_match);
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index cc07ba0f0..27fa8b87c 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -240,6 +240,9 @@ static void nand_davinci_hwctl_4bit(struct mtd_info *mtd, int mode)
unsigned long flags;
u32 val;
+ /* Reset ECC hardware */
+ davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
+
spin_lock_irqsave(&davinci_nand_lock, flags);
/* Start 4-bit ECC calculation for read/write */
diff --git a/drivers/mtd/nand/jz4780_bch.c b/drivers/mtd/nand/jz4780_bch.c
index d74f4ba4a..731c6051d 100644
--- a/drivers/mtd/nand/jz4780_bch.c
+++ b/drivers/mtd/nand/jz4780_bch.c
@@ -375,6 +375,6 @@ static struct platform_driver jz4780_bch_driver = {
module_platform_driver(jz4780_bch_driver);
MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>");
-MODULE_AUTHOR("Harvey Hunt <harvey.hunt@imgtec.com>");
+MODULE_AUTHOR("Harvey Hunt <harveyhuntnexus@gmail.com>");
MODULE_DESCRIPTION("Ingenic JZ4780 BCH error correction driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/jz4780_nand.c b/drivers/mtd/nand/jz4780_nand.c
index daf3c4217..175f67da2 100644
--- a/drivers/mtd/nand/jz4780_nand.c
+++ b/drivers/mtd/nand/jz4780_nand.c
@@ -412,6 +412,6 @@ static struct platform_driver jz4780_nand_driver = {
module_platform_driver(jz4780_nand_driver);
MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>");
-MODULE_AUTHOR("Harvey Hunt <harvey.hunt@imgtec.com>");
+MODULE_AUTHOR("Harvey Hunt <harveyhuntnexus@gmail.com>");
MODULE_DESCRIPTION("Ingenic JZ4780 NAND driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/mtk_ecc.c b/drivers/mtd/nand/mtk_ecc.c
new file mode 100644
index 000000000..d54f66641
--- /dev/null
+++ b/drivers/mtd/nand/mtk_ecc.c
@@ -0,0 +1,534 @@
+/*
+ * MTK ECC controller driver.
+ * Copyright (C) 2016 MediaTek Inc.
+ * Authors: Xiaolei Li <xiaolei.li@mediatek.com>
+ * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/mutex.h>
+
+#include "mtk_ecc.h"
+
+#define ECC_IDLE_MASK BIT(0)
+#define ECC_IRQ_EN BIT(0)
+#define ECC_OP_ENABLE (1)
+#define ECC_OP_DISABLE (0)
+
+#define ECC_ENCCON (0x00)
+#define ECC_ENCCNFG (0x04)
+#define ECC_CNFG_4BIT (0)
+#define ECC_CNFG_6BIT (1)
+#define ECC_CNFG_8BIT (2)
+#define ECC_CNFG_10BIT (3)
+#define ECC_CNFG_12BIT (4)
+#define ECC_CNFG_14BIT (5)
+#define ECC_CNFG_16BIT (6)
+#define ECC_CNFG_18BIT (7)
+#define ECC_CNFG_20BIT (8)
+#define ECC_CNFG_22BIT (9)
+#define ECC_CNFG_24BIT (0xa)
+#define ECC_CNFG_28BIT (0xb)
+#define ECC_CNFG_32BIT (0xc)
+#define ECC_CNFG_36BIT (0xd)
+#define ECC_CNFG_40BIT (0xe)
+#define ECC_CNFG_44BIT (0xf)
+#define ECC_CNFG_48BIT (0x10)
+#define ECC_CNFG_52BIT (0x11)
+#define ECC_CNFG_56BIT (0x12)
+#define ECC_CNFG_60BIT (0x13)
+#define ECC_MODE_SHIFT (5)
+#define ECC_MS_SHIFT (16)
+#define ECC_ENCDIADDR (0x08)
+#define ECC_ENCIDLE (0x0C)
+#define ECC_ENCPAR(x) (0x10 + (x) * sizeof(u32))
+#define ECC_ENCIRQ_EN (0x80)
+#define ECC_ENCIRQ_STA (0x84)
+#define ECC_DECCON (0x100)
+#define ECC_DECCNFG (0x104)
+#define DEC_EMPTY_EN BIT(31)
+#define DEC_CNFG_CORRECT (0x3 << 12)
+#define ECC_DECIDLE (0x10C)
+#define ECC_DECENUM0 (0x114)
+#define ERR_MASK (0x3f)
+#define ECC_DECDONE (0x124)
+#define ECC_DECIRQ_EN (0x200)
+#define ECC_DECIRQ_STA (0x204)
+
+#define ECC_TIMEOUT (500000)
+
+#define ECC_IDLE_REG(op) ((op) == ECC_ENCODE ? ECC_ENCIDLE : ECC_DECIDLE)
+#define ECC_CTL_REG(op) ((op) == ECC_ENCODE ? ECC_ENCCON : ECC_DECCON)
+#define ECC_IRQ_REG(op) ((op) == ECC_ENCODE ? \
+ ECC_ENCIRQ_EN : ECC_DECIRQ_EN)
+
+struct mtk_ecc {
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *clk;
+
+ struct completion done;
+ struct mutex lock;
+ u32 sectors;
+};
+
+static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc,
+ enum mtk_ecc_operation op)
+{
+ struct device *dev = ecc->dev;
+ u32 val;
+ int ret;
+
+ ret = readl_poll_timeout_atomic(ecc->regs + ECC_IDLE_REG(op), val,
+ val & ECC_IDLE_MASK,
+ 10, ECC_TIMEOUT);
+ if (ret)
+ dev_warn(dev, "%s NOT idle\n",
+ op == ECC_ENCODE ? "encoder" : "decoder");
+}
+
+static irqreturn_t mtk_ecc_irq(int irq, void *id)
+{
+ struct mtk_ecc *ecc = id;
+ enum mtk_ecc_operation op;
+ u32 dec, enc;
+
+ dec = readw(ecc->regs + ECC_DECIRQ_STA) & ECC_IRQ_EN;
+ if (dec) {
+ op = ECC_DECODE;
+ dec = readw(ecc->regs + ECC_DECDONE);
+ if (dec & ecc->sectors) {
+ ecc->sectors = 0;
+ complete(&ecc->done);
+ } else {
+ return IRQ_HANDLED;
+ }
+ } else {
+ enc = readl(ecc->regs + ECC_ENCIRQ_STA) & ECC_IRQ_EN;
+ if (enc) {
+ op = ECC_ENCODE;
+ complete(&ecc->done);
+ } else {
+ return IRQ_NONE;
+ }
+ }
+
+ writel(0, ecc->regs + ECC_IRQ_REG(op));
+
+ return IRQ_HANDLED;
+}
+
+static void mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
+{
+ u32 ecc_bit = ECC_CNFG_4BIT, dec_sz, enc_sz;
+ u32 reg;
+
+ switch (config->strength) {
+ case 4:
+ ecc_bit = ECC_CNFG_4BIT;
+ break;
+ case 6:
+ ecc_bit = ECC_CNFG_6BIT;
+ break;
+ case 8:
+ ecc_bit = ECC_CNFG_8BIT;
+ break;
+ case 10:
+ ecc_bit = ECC_CNFG_10BIT;
+ break;
+ case 12:
+ ecc_bit = ECC_CNFG_12BIT;
+ break;
+ case 14:
+ ecc_bit = ECC_CNFG_14BIT;
+ break;
+ case 16:
+ ecc_bit = ECC_CNFG_16BIT;
+ break;
+ case 18:
+ ecc_bit = ECC_CNFG_18BIT;
+ break;
+ case 20:
+ ecc_bit = ECC_CNFG_20BIT;
+ break;
+ case 22:
+ ecc_bit = ECC_CNFG_22BIT;
+ break;
+ case 24:
+ ecc_bit = ECC_CNFG_24BIT;
+ break;
+ case 28:
+ ecc_bit = ECC_CNFG_28BIT;
+ break;
+ case 32:
+ ecc_bit = ECC_CNFG_32BIT;
+ break;
+ case 36:
+ ecc_bit = ECC_CNFG_36BIT;
+ break;
+ case 40:
+ ecc_bit = ECC_CNFG_40BIT;
+ break;
+ case 44:
+ ecc_bit = ECC_CNFG_44BIT;
+ break;
+ case 48:
+ ecc_bit = ECC_CNFG_48BIT;
+ break;
+ case 52:
+ ecc_bit = ECC_CNFG_52BIT;
+ break;
+ case 56:
+ ecc_bit = ECC_CNFG_56BIT;
+ break;
+ case 60:
+ ecc_bit = ECC_CNFG_60BIT;
+ break;
+ default:
+ dev_err(ecc->dev, "invalid strength %d, default to 4 bits\n",
+ config->strength);
+ }
+
+ if (config->op == ECC_ENCODE) {
+ /* configure ECC encoder (in bits) */
+ enc_sz = config->len << 3;
+
+ reg = ecc_bit | (config->mode << ECC_MODE_SHIFT);
+ reg |= (enc_sz << ECC_MS_SHIFT);
+ writel(reg, ecc->regs + ECC_ENCCNFG);
+
+ if (config->mode != ECC_NFI_MODE)
+ writel(lower_32_bits(config->addr),
+ ecc->regs + ECC_ENCDIADDR);
+
+ } else {
+ /* configure ECC decoder (in bits) */
+ dec_sz = (config->len << 3) +
+ config->strength * ECC_PARITY_BITS;
+
+ reg = ecc_bit | (config->mode << ECC_MODE_SHIFT);
+ reg |= (dec_sz << ECC_MS_SHIFT) | DEC_CNFG_CORRECT;
+ reg |= DEC_EMPTY_EN;
+ writel(reg, ecc->regs + ECC_DECCNFG);
+
+ if (config->sectors)
+ ecc->sectors = 1 << (config->sectors - 1);
+ }
+}
+
+void mtk_ecc_get_stats(struct mtk_ecc *ecc, struct mtk_ecc_stats *stats,
+ int sectors)
+{
+ u32 offset, i, err;
+ u32 bitflips = 0;
+
+ stats->corrected = 0;
+ stats->failed = 0;
+
+ for (i = 0; i < sectors; i++) {
+ offset = (i >> 2) << 2;
+ err = readl(ecc->regs + ECC_DECENUM0 + offset);
+ err = err >> ((i % 4) * 8);
+ err &= ERR_MASK;
+ if (err == ERR_MASK) {
+ /* uncorrectable errors */
+ stats->failed++;
+ continue;
+ }
+
+ stats->corrected += err;
+ bitflips = max_t(u32, bitflips, err);
+ }
+
+ stats->bitflips = bitflips;
+}
+EXPORT_SYMBOL(mtk_ecc_get_stats);
+
+void mtk_ecc_release(struct mtk_ecc *ecc)
+{
+ clk_disable_unprepare(ecc->clk);
+ put_device(ecc->dev);
+}
+EXPORT_SYMBOL(mtk_ecc_release);
+
+static void mtk_ecc_hw_init(struct mtk_ecc *ecc)
+{
+ mtk_ecc_wait_idle(ecc, ECC_ENCODE);
+ writew(ECC_OP_DISABLE, ecc->regs + ECC_ENCCON);
+
+ mtk_ecc_wait_idle(ecc, ECC_DECODE);
+ writel(ECC_OP_DISABLE, ecc->regs + ECC_DECCON);
+}
+
+static struct mtk_ecc *mtk_ecc_get(struct device_node *np)
+{
+ struct platform_device *pdev;
+ struct mtk_ecc *ecc;
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev || !platform_get_drvdata(pdev))
+ return ERR_PTR(-EPROBE_DEFER);
+
+ get_device(&pdev->dev);
+ ecc = platform_get_drvdata(pdev);
+ clk_prepare_enable(ecc->clk);
+ mtk_ecc_hw_init(ecc);
+
+ return ecc;
+}
+
+struct mtk_ecc *of_mtk_ecc_get(struct device_node *of_node)
+{
+ struct mtk_ecc *ecc = NULL;
+ struct device_node *np;
+
+ np = of_parse_phandle(of_node, "ecc-engine", 0);
+ if (np) {
+ ecc = mtk_ecc_get(np);
+ of_node_put(np);
+ }
+
+ return ecc;
+}
+EXPORT_SYMBOL(of_mtk_ecc_get);
+
+int mtk_ecc_enable(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
+{
+ enum mtk_ecc_operation op = config->op;
+ int ret;
+
+ ret = mutex_lock_interruptible(&ecc->lock);
+ if (ret) {
+ dev_err(ecc->dev, "interrupted when attempting to lock\n");
+ return ret;
+ }
+
+ mtk_ecc_wait_idle(ecc, op);
+ mtk_ecc_config(ecc, config);
+ writew(ECC_OP_ENABLE, ecc->regs + ECC_CTL_REG(op));
+
+ init_completion(&ecc->done);
+ writew(ECC_IRQ_EN, ecc->regs + ECC_IRQ_REG(op));
+
+ return 0;
+}
+EXPORT_SYMBOL(mtk_ecc_enable);
+
+void mtk_ecc_disable(struct mtk_ecc *ecc)
+{
+ enum mtk_ecc_operation op = ECC_ENCODE;
+
+ /* find out the running operation */
+ if (readw(ecc->regs + ECC_CTL_REG(op)) != ECC_OP_ENABLE)
+ op = ECC_DECODE;
+
+ /* disable it */
+ mtk_ecc_wait_idle(ecc, op);
+ writew(0, ecc->regs + ECC_IRQ_REG(op));
+ writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op));
+
+ mutex_unlock(&ecc->lock);
+}
+EXPORT_SYMBOL(mtk_ecc_disable);
+
+int mtk_ecc_wait_done(struct mtk_ecc *ecc, enum mtk_ecc_operation op)
+{
+ int ret;
+
+ ret = wait_for_completion_timeout(&ecc->done, msecs_to_jiffies(500));
+ if (!ret) {
+ dev_err(ecc->dev, "%s timeout - interrupt did not arrive)\n",
+ (op == ECC_ENCODE) ? "encoder" : "decoder");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mtk_ecc_wait_done);
+
+int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
+ u8 *data, u32 bytes)
+{
+ dma_addr_t addr;
+ u8 *p;
+ u32 len, i, val;
+ int ret = 0;
+
+ addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE);
+ ret = dma_mapping_error(ecc->dev, addr);
+ if (ret) {
+ dev_err(ecc->dev, "dma mapping error\n");
+ return -EINVAL;
+ }
+
+ config->op = ECC_ENCODE;
+ config->addr = addr;
+ ret = mtk_ecc_enable(ecc, config);
+ if (ret) {
+ dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
+ return ret;
+ }
+
+ ret = mtk_ecc_wait_done(ecc, ECC_ENCODE);
+ if (ret)
+ goto timeout;
+
+ mtk_ecc_wait_idle(ecc, ECC_ENCODE);
+
+ /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */
+ len = (config->strength * ECC_PARITY_BITS + 7) >> 3;
+ p = data + bytes;
+
+ /* write the parity bytes generated by the ECC back to the OOB region */
+ for (i = 0; i < len; i++) {
+ if ((i % 4) == 0)
+ val = readl(ecc->regs + ECC_ENCPAR(i / 4));
+ p[i] = (val >> ((i % 4) * 8)) & 0xff;
+ }
+timeout:
+
+ dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
+ mtk_ecc_disable(ecc);
+
+ return ret;
+}
+EXPORT_SYMBOL(mtk_ecc_encode);
+
+void mtk_ecc_adjust_strength(u32 *p)
+{
+ u32 ecc[] = {4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36,
+ 40, 44, 48, 52, 56, 60};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ecc); i++) {
+ if (*p <= ecc[i]) {
+ if (!i)
+ *p = ecc[i];
+ else if (*p != ecc[i])
+ *p = ecc[i - 1];
+ return;
+ }
+ }
+
+ *p = ecc[ARRAY_SIZE(ecc) - 1];
+}
+EXPORT_SYMBOL(mtk_ecc_adjust_strength);
+
+static int mtk_ecc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_ecc *ecc;
+ struct resource *res;
+ int irq, ret;
+
+ ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
+ if (!ecc)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ecc->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ecc->regs)) {
+ dev_err(dev, "failed to map regs: %ld\n", PTR_ERR(ecc->regs));
+ return PTR_ERR(ecc->regs);
+ }
+
+ ecc->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(ecc->clk)) {
+ dev_err(dev, "failed to get clock: %ld\n", PTR_ERR(ecc->clk));
+ return PTR_ERR(ecc->clk);
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "failed to get irq\n");
+ return -EINVAL;
+ }
+
+ ret = dma_set_mask(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dev, "failed to set DMA mask\n");
+ return ret;
+ }
+
+ ret = devm_request_irq(dev, irq, mtk_ecc_irq, 0x0, "mtk-ecc", ecc);
+ if (ret) {
+ dev_err(dev, "failed to request irq\n");
+ return -EINVAL;
+ }
+
+ ecc->dev = dev;
+ mutex_init(&ecc->lock);
+ platform_set_drvdata(pdev, ecc);
+ dev_info(dev, "probed\n");
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mtk_ecc_suspend(struct device *dev)
+{
+ struct mtk_ecc *ecc = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(ecc->clk);
+
+ return 0;
+}
+
+static int mtk_ecc_resume(struct device *dev)
+{
+ struct mtk_ecc *ecc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(ecc->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clk\n");
+ return ret;
+ }
+
+ mtk_ecc_hw_init(ecc);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(mtk_ecc_pm_ops, mtk_ecc_suspend, mtk_ecc_resume);
+#endif
+
+static const struct of_device_id mtk_ecc_dt_match[] = {
+ { .compatible = "mediatek,mt2701-ecc" },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, mtk_ecc_dt_match);
+
+static struct platform_driver mtk_ecc_driver = {
+ .probe = mtk_ecc_probe,
+ .driver = {
+ .name = "mtk-ecc",
+ .of_match_table = of_match_ptr(mtk_ecc_dt_match),
+#ifdef CONFIG_PM_SLEEP
+ .pm = &mtk_ecc_pm_ops,
+#endif
+ },
+};
+
+module_platform_driver(mtk_ecc_driver);
+
+MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>");
+MODULE_DESCRIPTION("MTK Nand ECC Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/mtk_ecc.h b/drivers/mtd/nand/mtk_ecc.h
new file mode 100644
index 000000000..cbeba5cd1
--- /dev/null
+++ b/drivers/mtd/nand/mtk_ecc.h
@@ -0,0 +1,50 @@
+/*
+ * MTK SDG1 ECC controller
+ *
+ * Copyright (c) 2016 Mediatek
+ * Authors: Xiaolei Li <xiaolei.li@mediatek.com>
+ * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#ifndef __DRIVERS_MTD_NAND_MTK_ECC_H__
+#define __DRIVERS_MTD_NAND_MTK_ECC_H__
+
+#include <linux/types.h>
+
+#define ECC_PARITY_BITS (14)
+
+enum mtk_ecc_mode {ECC_DMA_MODE = 0, ECC_NFI_MODE = 1};
+enum mtk_ecc_operation {ECC_ENCODE, ECC_DECODE};
+
+struct device_node;
+struct mtk_ecc;
+
+struct mtk_ecc_stats {
+ u32 corrected;
+ u32 bitflips;
+ u32 failed;
+};
+
+struct mtk_ecc_config {
+ enum mtk_ecc_operation op;
+ enum mtk_ecc_mode mode;
+ dma_addr_t addr;
+ u32 strength;
+ u32 sectors;
+ u32 len;
+};
+
+int mtk_ecc_encode(struct mtk_ecc *, struct mtk_ecc_config *, u8 *, u32);
+void mtk_ecc_get_stats(struct mtk_ecc *, struct mtk_ecc_stats *, int);
+int mtk_ecc_wait_done(struct mtk_ecc *, enum mtk_ecc_operation);
+int mtk_ecc_enable(struct mtk_ecc *, struct mtk_ecc_config *);
+void mtk_ecc_disable(struct mtk_ecc *);
+void mtk_ecc_adjust_strength(u32 *);
+
+struct mtk_ecc *of_mtk_ecc_get(struct device_node *);
+void mtk_ecc_release(struct mtk_ecc *);
+
+#endif
diff --git a/drivers/mtd/nand/mtk_nand.c b/drivers/mtd/nand/mtk_nand.c
new file mode 100644
index 000000000..5223a2182
--- /dev/null
+++ b/drivers/mtd/nand/mtk_nand.c
@@ -0,0 +1,1529 @@
+/*
+ * MTK NAND Flash controller driver.
+ * Copyright (C) 2016 MediaTek Inc.
+ * Authors: Xiaolei Li <xiaolei.li@mediatek.com>
+ * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/mtd.h>
+#include <linux/module.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include "mtk_ecc.h"
+
+/* NAND controller register definition */
+#define NFI_CNFG (0x00)
+#define CNFG_AHB BIT(0)
+#define CNFG_READ_EN BIT(1)
+#define CNFG_DMA_BURST_EN BIT(2)
+#define CNFG_BYTE_RW BIT(6)
+#define CNFG_HW_ECC_EN BIT(8)
+#define CNFG_AUTO_FMT_EN BIT(9)
+#define CNFG_OP_CUST (6 << 12)
+#define NFI_PAGEFMT (0x04)
+#define PAGEFMT_FDM_ECC_SHIFT (12)
+#define PAGEFMT_FDM_SHIFT (8)
+#define PAGEFMT_SPARE_16 (0)
+#define PAGEFMT_SPARE_26 (1)
+#define PAGEFMT_SPARE_27 (2)
+#define PAGEFMT_SPARE_28 (3)
+#define PAGEFMT_SPARE_32 (4)
+#define PAGEFMT_SPARE_36 (5)
+#define PAGEFMT_SPARE_40 (6)
+#define PAGEFMT_SPARE_44 (7)
+#define PAGEFMT_SPARE_48 (8)
+#define PAGEFMT_SPARE_49 (9)
+#define PAGEFMT_SPARE_50 (0xa)
+#define PAGEFMT_SPARE_51 (0xb)
+#define PAGEFMT_SPARE_52 (0xc)
+#define PAGEFMT_SPARE_62 (0xd)
+#define PAGEFMT_SPARE_63 (0xe)
+#define PAGEFMT_SPARE_64 (0xf)
+#define PAGEFMT_SPARE_SHIFT (4)
+#define PAGEFMT_SEC_SEL_512 BIT(2)
+#define PAGEFMT_512_2K (0)
+#define PAGEFMT_2K_4K (1)
+#define PAGEFMT_4K_8K (2)
+#define PAGEFMT_8K_16K (3)
+/* NFI control */
+#define NFI_CON (0x08)
+#define CON_FIFO_FLUSH BIT(0)
+#define CON_NFI_RST BIT(1)
+#define CON_BRD BIT(8) /* burst read */
+#define CON_BWR BIT(9) /* burst write */
+#define CON_SEC_SHIFT (12)
+/* Timming control register */
+#define NFI_ACCCON (0x0C)
+#define NFI_INTR_EN (0x10)
+#define INTR_AHB_DONE_EN BIT(6)
+#define NFI_INTR_STA (0x14)
+#define NFI_CMD (0x20)
+#define NFI_ADDRNOB (0x30)
+#define NFI_COLADDR (0x34)
+#define NFI_ROWADDR (0x38)
+#define NFI_STRDATA (0x40)
+#define STAR_EN (1)
+#define STAR_DE (0)
+#define NFI_CNRNB (0x44)
+#define NFI_DATAW (0x50)
+#define NFI_DATAR (0x54)
+#define NFI_PIO_DIRDY (0x58)
+#define PIO_DI_RDY (0x01)
+#define NFI_STA (0x60)
+#define STA_CMD BIT(0)
+#define STA_ADDR BIT(1)
+#define STA_BUSY BIT(8)
+#define STA_EMP_PAGE BIT(12)
+#define NFI_FSM_CUSTDATA (0xe << 16)
+#define NFI_FSM_MASK (0xf << 16)
+#define NFI_ADDRCNTR (0x70)
+#define CNTR_MASK GENMASK(16, 12)
+#define ADDRCNTR_SEC_SHIFT (12)
+#define ADDRCNTR_SEC(val) \
+ (((val) & CNTR_MASK) >> ADDRCNTR_SEC_SHIFT)
+#define NFI_STRADDR (0x80)
+#define NFI_BYTELEN (0x84)
+#define NFI_CSEL (0x90)
+#define NFI_FDML(x) (0xA0 + (x) * sizeof(u32) * 2)
+#define NFI_FDMM(x) (0xA4 + (x) * sizeof(u32) * 2)
+#define NFI_FDM_MAX_SIZE (8)
+#define NFI_FDM_MIN_SIZE (1)
+#define NFI_MASTER_STA (0x224)
+#define MASTER_STA_MASK (0x0FFF)
+#define NFI_EMPTY_THRESH (0x23C)
+
+#define MTK_NAME "mtk-nand"
+#define KB(x) ((x) * 1024UL)
+#define MB(x) (KB(x) * 1024UL)
+
+#define MTK_TIMEOUT (500000)
+#define MTK_RESET_TIMEOUT (1000000)
+#define MTK_MAX_SECTOR (16)
+#define MTK_NAND_MAX_NSELS (2)
+
+struct mtk_nfc_bad_mark_ctl {
+ void (*bm_swap)(struct mtd_info *, u8 *buf, int raw);
+ u32 sec;
+ u32 pos;
+};
+
+/*
+ * FDM: region used to store free OOB data
+ */
+struct mtk_nfc_fdm {
+ u32 reg_size;
+ u32 ecc_size;
+};
+
+struct mtk_nfc_nand_chip {
+ struct list_head node;
+ struct nand_chip nand;
+
+ struct mtk_nfc_bad_mark_ctl bad_mark;
+ struct mtk_nfc_fdm fdm;
+ u32 spare_per_sector;
+
+ int nsels;
+ u8 sels[0];
+ /* nothing after this field */
+};
+
+struct mtk_nfc_clk {
+ struct clk *nfi_clk;
+ struct clk *pad_clk;
+};
+
+struct mtk_nfc {
+ struct nand_hw_control controller;
+ struct mtk_ecc_config ecc_cfg;
+ struct mtk_nfc_clk clk;
+ struct mtk_ecc *ecc;
+
+ struct device *dev;
+ void __iomem *regs;
+
+ struct completion done;
+ struct list_head chips;
+
+ u8 *buffer;
+};
+
+static inline struct mtk_nfc_nand_chip *to_mtk_nand(struct nand_chip *nand)
+{
+ return container_of(nand, struct mtk_nfc_nand_chip, nand);
+}
+
+static inline u8 *data_ptr(struct nand_chip *chip, const u8 *p, int i)
+{
+ return (u8 *)p + i * chip->ecc.size;
+}
+
+static inline u8 *oob_ptr(struct nand_chip *chip, int i)
+{
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ u8 *poi;
+
+ /* map the sector's FDM data to free oob:
+ * the beginning of the oob area stores the FDM data of bad mark sectors
+ */
+
+ if (i < mtk_nand->bad_mark.sec)
+ poi = chip->oob_poi + (i + 1) * mtk_nand->fdm.reg_size;
+ else if (i == mtk_nand->bad_mark.sec)
+ poi = chip->oob_poi;
+ else
+ poi = chip->oob_poi + i * mtk_nand->fdm.reg_size;
+
+ return poi;
+}
+
+static inline int mtk_data_len(struct nand_chip *chip)
+{
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+
+ return chip->ecc.size + mtk_nand->spare_per_sector;
+}
+
+static inline u8 *mtk_data_ptr(struct nand_chip *chip, int i)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+
+ return nfc->buffer + i * mtk_data_len(chip);
+}
+
+static inline u8 *mtk_oob_ptr(struct nand_chip *chip, int i)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+
+ return nfc->buffer + i * mtk_data_len(chip) + chip->ecc.size;
+}
+
+static inline void nfi_writel(struct mtk_nfc *nfc, u32 val, u32 reg)
+{
+ writel(val, nfc->regs + reg);
+}
+
+static inline void nfi_writew(struct mtk_nfc *nfc, u16 val, u32 reg)
+{
+ writew(val, nfc->regs + reg);
+}
+
+static inline void nfi_writeb(struct mtk_nfc *nfc, u8 val, u32 reg)
+{
+ writeb(val, nfc->regs + reg);
+}
+
+static inline u32 nfi_readl(struct mtk_nfc *nfc, u32 reg)
+{
+ return readl_relaxed(nfc->regs + reg);
+}
+
+static inline u16 nfi_readw(struct mtk_nfc *nfc, u32 reg)
+{
+ return readw_relaxed(nfc->regs + reg);
+}
+
+static inline u8 nfi_readb(struct mtk_nfc *nfc, u32 reg)
+{
+ return readb_relaxed(nfc->regs + reg);
+}
+
+static void mtk_nfc_hw_reset(struct mtk_nfc *nfc)
+{
+ struct device *dev = nfc->dev;
+ u32 val;
+ int ret;
+
+ /* reset all registers and force the NFI master to terminate */
+ nfi_writel(nfc, CON_FIFO_FLUSH | CON_NFI_RST, NFI_CON);
+
+ /* wait for the master to finish the last transaction */
+ ret = readl_poll_timeout(nfc->regs + NFI_MASTER_STA, val,
+ !(val & MASTER_STA_MASK), 50,
+ MTK_RESET_TIMEOUT);
+ if (ret)
+ dev_warn(dev, "master active in reset [0x%x] = 0x%x\n",
+ NFI_MASTER_STA, val);
+
+ /* ensure any status register affected by the NFI master is reset */
+ nfi_writel(nfc, CON_FIFO_FLUSH | CON_NFI_RST, NFI_CON);
+ nfi_writew(nfc, STAR_DE, NFI_STRDATA);
+}
+
+static int mtk_nfc_send_command(struct mtk_nfc *nfc, u8 command)
+{
+ struct device *dev = nfc->dev;
+ u32 val;
+ int ret;
+
+ nfi_writel(nfc, command, NFI_CMD);
+
+ ret = readl_poll_timeout_atomic(nfc->regs + NFI_STA, val,
+ !(val & STA_CMD), 10, MTK_TIMEOUT);
+ if (ret) {
+ dev_warn(dev, "nfi core timed out entering command mode\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mtk_nfc_send_address(struct mtk_nfc *nfc, int addr)
+{
+ struct device *dev = nfc->dev;
+ u32 val;
+ int ret;
+
+ nfi_writel(nfc, addr, NFI_COLADDR);
+ nfi_writel(nfc, 0, NFI_ROWADDR);
+ nfi_writew(nfc, 1, NFI_ADDRNOB);
+
+ ret = readl_poll_timeout_atomic(nfc->regs + NFI_STA, val,
+ !(val & STA_ADDR), 10, MTK_TIMEOUT);
+ if (ret) {
+ dev_warn(dev, "nfi core timed out entering address mode\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mtk_nfc_hw_runtime_config(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ u32 fmt, spare;
+
+ if (!mtd->writesize)
+ return 0;
+
+ spare = mtk_nand->spare_per_sector;
+
+ switch (mtd->writesize) {
+ case 512:
+ fmt = PAGEFMT_512_2K | PAGEFMT_SEC_SEL_512;
+ break;
+ case KB(2):
+ if (chip->ecc.size == 512)
+ fmt = PAGEFMT_2K_4K | PAGEFMT_SEC_SEL_512;
+ else
+ fmt = PAGEFMT_512_2K;
+ break;
+ case KB(4):
+ if (chip->ecc.size == 512)
+ fmt = PAGEFMT_4K_8K | PAGEFMT_SEC_SEL_512;
+ else
+ fmt = PAGEFMT_2K_4K;
+ break;
+ case KB(8):
+ if (chip->ecc.size == 512)
+ fmt = PAGEFMT_8K_16K | PAGEFMT_SEC_SEL_512;
+ else
+ fmt = PAGEFMT_4K_8K;
+ break;
+ case KB(16):
+ fmt = PAGEFMT_8K_16K;
+ break;
+ default:
+ dev_err(nfc->dev, "invalid page len: %d\n", mtd->writesize);
+ return -EINVAL;
+ }
+
+ /*
+ * the hardware will double the value for this eccsize, so we need to
+ * halve it
+ */
+ if (chip->ecc.size == 1024)
+ spare >>= 1;
+
+ switch (spare) {
+ case 16:
+ fmt |= (PAGEFMT_SPARE_16 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 26:
+ fmt |= (PAGEFMT_SPARE_26 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 27:
+ fmt |= (PAGEFMT_SPARE_27 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 28:
+ fmt |= (PAGEFMT_SPARE_28 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 32:
+ fmt |= (PAGEFMT_SPARE_32 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 36:
+ fmt |= (PAGEFMT_SPARE_36 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 40:
+ fmt |= (PAGEFMT_SPARE_40 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 44:
+ fmt |= (PAGEFMT_SPARE_44 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 48:
+ fmt |= (PAGEFMT_SPARE_48 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 49:
+ fmt |= (PAGEFMT_SPARE_49 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 50:
+ fmt |= (PAGEFMT_SPARE_50 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 51:
+ fmt |= (PAGEFMT_SPARE_51 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 52:
+ fmt |= (PAGEFMT_SPARE_52 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 62:
+ fmt |= (PAGEFMT_SPARE_62 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 63:
+ fmt |= (PAGEFMT_SPARE_63 << PAGEFMT_SPARE_SHIFT);
+ break;
+ case 64:
+ fmt |= (PAGEFMT_SPARE_64 << PAGEFMT_SPARE_SHIFT);
+ break;
+ default:
+ dev_err(nfc->dev, "invalid spare per sector %d\n", spare);
+ return -EINVAL;
+ }
+
+ fmt |= mtk_nand->fdm.reg_size << PAGEFMT_FDM_SHIFT;
+ fmt |= mtk_nand->fdm.ecc_size << PAGEFMT_FDM_ECC_SHIFT;
+ nfi_writew(nfc, fmt, NFI_PAGEFMT);
+
+ nfc->ecc_cfg.strength = chip->ecc.strength;
+ nfc->ecc_cfg.len = chip->ecc.size + mtk_nand->fdm.ecc_size;
+
+ return 0;
+}
+
+static void mtk_nfc_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct mtk_nfc *nfc = nand_get_controller_data(nand);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(nand);
+
+ if (chip < 0)
+ return;
+
+ mtk_nfc_hw_runtime_config(mtd);
+
+ nfi_writel(nfc, mtk_nand->sels[chip], NFI_CSEL);
+}
+
+static int mtk_nfc_dev_ready(struct mtd_info *mtd)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
+
+ if (nfi_readl(nfc, NFI_STA) & STA_BUSY)
+ return 0;
+
+ return 1;
+}
+
+static void mtk_nfc_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
+
+ if (ctrl & NAND_ALE) {
+ mtk_nfc_send_address(nfc, dat);
+ } else if (ctrl & NAND_CLE) {
+ mtk_nfc_hw_reset(nfc);
+
+ nfi_writew(nfc, CNFG_OP_CUST, NFI_CNFG);
+ mtk_nfc_send_command(nfc, dat);
+ }
+}
+
+static inline void mtk_nfc_wait_ioready(struct mtk_nfc *nfc)
+{
+ int rc;
+ u8 val;
+
+ rc = readb_poll_timeout_atomic(nfc->regs + NFI_PIO_DIRDY, val,
+ val & PIO_DI_RDY, 10, MTK_TIMEOUT);
+ if (rc < 0)
+ dev_err(nfc->dev, "data not ready\n");
+}
+
+static inline u8 mtk_nfc_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ u32 reg;
+
+ /* after each byte read, the NFI_STA reg is reset by the hardware */
+ reg = nfi_readl(nfc, NFI_STA) & NFI_FSM_MASK;
+ if (reg != NFI_FSM_CUSTDATA) {
+ reg = nfi_readw(nfc, NFI_CNFG);
+ reg |= CNFG_BYTE_RW | CNFG_READ_EN;
+ nfi_writew(nfc, reg, NFI_CNFG);
+
+ /*
+ * set to max sector to allow the HW to continue reading over
+ * unaligned accesses
+ */
+ reg = (MTK_MAX_SECTOR << CON_SEC_SHIFT) | CON_BRD;
+ nfi_writel(nfc, reg, NFI_CON);
+
+ /* trigger to fetch data */
+ nfi_writew(nfc, STAR_EN, NFI_STRDATA);
+ }
+
+ mtk_nfc_wait_ioready(nfc);
+
+ return nfi_readb(nfc, NFI_DATAR);
+}
+
+static void mtk_nfc_read_buf(struct mtd_info *mtd, u8 *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ buf[i] = mtk_nfc_read_byte(mtd);
+}
+
+static void mtk_nfc_write_byte(struct mtd_info *mtd, u8 byte)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
+ u32 reg;
+
+ reg = nfi_readl(nfc, NFI_STA) & NFI_FSM_MASK;
+
+ if (reg != NFI_FSM_CUSTDATA) {
+ reg = nfi_readw(nfc, NFI_CNFG) | CNFG_BYTE_RW;
+ nfi_writew(nfc, reg, NFI_CNFG);
+
+ reg = MTK_MAX_SECTOR << CON_SEC_SHIFT | CON_BWR;
+ nfi_writel(nfc, reg, NFI_CON);
+
+ nfi_writew(nfc, STAR_EN, NFI_STRDATA);
+ }
+
+ mtk_nfc_wait_ioready(nfc);
+ nfi_writeb(nfc, byte, NFI_DATAW);
+}
+
+static void mtk_nfc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ mtk_nfc_write_byte(mtd, buf[i]);
+}
+
+static int mtk_nfc_sector_encode(struct nand_chip *chip, u8 *data)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ int size = chip->ecc.size + mtk_nand->fdm.reg_size;
+
+ nfc->ecc_cfg.mode = ECC_DMA_MODE;
+ nfc->ecc_cfg.op = ECC_ENCODE;
+
+ return mtk_ecc_encode(nfc->ecc, &nfc->ecc_cfg, data, size);
+}
+
+static void mtk_nfc_no_bad_mark_swap(struct mtd_info *a, u8 *b, int c)
+{
+ /* nop */
+}
+
+static void mtk_nfc_bad_mark_swap(struct mtd_info *mtd, u8 *buf, int raw)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtk_nfc_nand_chip *nand = to_mtk_nand(chip);
+ u32 bad_pos = nand->bad_mark.pos;
+
+ if (raw)
+ bad_pos += nand->bad_mark.sec * mtk_data_len(chip);
+ else
+ bad_pos += nand->bad_mark.sec * chip->ecc.size;
+
+ swap(chip->oob_poi[0], buf[bad_pos]);
+}
+
+static int mtk_nfc_format_subpage(struct mtd_info *mtd, u32 offset,
+ u32 len, const u8 *buf)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+ u32 start, end;
+ int i, ret;
+
+ start = offset / chip->ecc.size;
+ end = DIV_ROUND_UP(offset + len, chip->ecc.size);
+
+ memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
+ for (i = 0; i < chip->ecc.steps; i++) {
+ memcpy(mtk_data_ptr(chip, i), data_ptr(chip, buf, i),
+ chip->ecc.size);
+
+ if (start > i || i >= end)
+ continue;
+
+ if (i == mtk_nand->bad_mark.sec)
+ mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
+
+ memcpy(mtk_oob_ptr(chip, i), oob_ptr(chip, i), fdm->reg_size);
+
+ /* program the CRC back to the OOB */
+ ret = mtk_nfc_sector_encode(chip, mtk_data_ptr(chip, i));
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mtk_nfc_format_page(struct mtd_info *mtd, const u8 *buf)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+ u32 i;
+
+ memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
+ for (i = 0; i < chip->ecc.steps; i++) {
+ if (buf)
+ memcpy(mtk_data_ptr(chip, i), data_ptr(chip, buf, i),
+ chip->ecc.size);
+
+ if (i == mtk_nand->bad_mark.sec)
+ mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
+
+ memcpy(mtk_oob_ptr(chip, i), oob_ptr(chip, i), fdm->reg_size);
+ }
+}
+
+static inline void mtk_nfc_read_fdm(struct nand_chip *chip, u32 start,
+ u32 sectors)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+ u32 vall, valm;
+ u8 *oobptr;
+ int i, j;
+
+ for (i = 0; i < sectors; i++) {
+ oobptr = oob_ptr(chip, start + i);
+ vall = nfi_readl(nfc, NFI_FDML(i));
+ valm = nfi_readl(nfc, NFI_FDMM(i));
+
+ for (j = 0; j < fdm->reg_size; j++)
+ oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8);
+ }
+}
+
+static inline void mtk_nfc_write_fdm(struct nand_chip *chip)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+ u32 vall, valm;
+ u8 *oobptr;
+ int i, j;
+
+ for (i = 0; i < chip->ecc.steps; i++) {
+ oobptr = oob_ptr(chip, i);
+ vall = 0;
+ valm = 0;
+ for (j = 0; j < 8; j++) {
+ if (j < 4)
+ vall |= (j < fdm->reg_size ? oobptr[j] : 0xff)
+ << (j * 8);
+ else
+ valm |= (j < fdm->reg_size ? oobptr[j] : 0xff)
+ << ((j - 4) * 8);
+ }
+ nfi_writel(nfc, vall, NFI_FDML(i));
+ nfi_writel(nfc, valm, NFI_FDMM(i));
+ }
+}
+
+static int mtk_nfc_do_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const u8 *buf, int page, int len)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct device *dev = nfc->dev;
+ dma_addr_t addr;
+ u32 reg;
+ int ret;
+
+ addr = dma_map_single(dev, (void *)buf, len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(nfc->dev, addr);
+ if (ret) {
+ dev_err(nfc->dev, "dma mapping error\n");
+ return -EINVAL;
+ }
+
+ reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AHB | CNFG_DMA_BURST_EN;
+ nfi_writew(nfc, reg, NFI_CNFG);
+
+ nfi_writel(nfc, chip->ecc.steps << CON_SEC_SHIFT, NFI_CON);
+ nfi_writel(nfc, lower_32_bits(addr), NFI_STRADDR);
+ nfi_writew(nfc, INTR_AHB_DONE_EN, NFI_INTR_EN);
+
+ init_completion(&nfc->done);
+
+ reg = nfi_readl(nfc, NFI_CON) | CON_BWR;
+ nfi_writel(nfc, reg, NFI_CON);
+ nfi_writew(nfc, STAR_EN, NFI_STRDATA);
+
+ ret = wait_for_completion_timeout(&nfc->done, msecs_to_jiffies(500));
+ if (!ret) {
+ dev_err(dev, "program ahb done timeout\n");
+ nfi_writew(nfc, 0, NFI_INTR_EN);
+ ret = -ETIMEDOUT;
+ goto timeout;
+ }
+
+ ret = readl_poll_timeout_atomic(nfc->regs + NFI_ADDRCNTR, reg,
+ ADDRCNTR_SEC(reg) >= chip->ecc.steps,
+ 10, MTK_TIMEOUT);
+ if (ret)
+ dev_err(dev, "hwecc write timeout\n");
+
+timeout:
+
+ dma_unmap_single(nfc->dev, addr, len, DMA_TO_DEVICE);
+ nfi_writel(nfc, 0, NFI_CON);
+
+ return ret;
+}
+
+static int mtk_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const u8 *buf, int page, int raw)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ size_t len;
+ const u8 *bufpoi;
+ u32 reg;
+ int ret;
+
+ if (!raw) {
+ /* OOB => FDM: from register, ECC: from HW */
+ reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AUTO_FMT_EN;
+ nfi_writew(nfc, reg | CNFG_HW_ECC_EN, NFI_CNFG);
+
+ nfc->ecc_cfg.op = ECC_ENCODE;
+ nfc->ecc_cfg.mode = ECC_NFI_MODE;
+ ret = mtk_ecc_enable(nfc->ecc, &nfc->ecc_cfg);
+ if (ret) {
+ /* clear NFI config */
+ reg = nfi_readw(nfc, NFI_CNFG);
+ reg &= ~(CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
+ nfi_writew(nfc, reg, NFI_CNFG);
+
+ return ret;
+ }
+
+ memcpy(nfc->buffer, buf, mtd->writesize);
+ mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, raw);
+ bufpoi = nfc->buffer;
+
+ /* write OOB into the FDM registers (OOB area in MTK NAND) */
+ mtk_nfc_write_fdm(chip);
+ } else {
+ bufpoi = buf;
+ }
+
+ len = mtd->writesize + (raw ? mtd->oobsize : 0);
+ ret = mtk_nfc_do_write_page(mtd, chip, bufpoi, page, len);
+
+ if (!raw)
+ mtk_ecc_disable(nfc->ecc);
+
+ return ret;
+}
+
+static int mtk_nfc_write_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, const u8 *buf,
+ int oob_on, int page)
+{
+ return mtk_nfc_write_page(mtd, chip, buf, page, 0);
+}
+
+static int mtk_nfc_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ const u8 *buf, int oob_on, int pg)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+
+ mtk_nfc_format_page(mtd, buf);
+ return mtk_nfc_write_page(mtd, chip, nfc->buffer, pg, 1);
+}
+
+static int mtk_nfc_write_subpage_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, u32 offset,
+ u32 data_len, const u8 *buf,
+ int oob_on, int page)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ int ret;
+
+ ret = mtk_nfc_format_subpage(mtd, offset, data_len, buf);
+ if (ret < 0)
+ return ret;
+
+ /* use the data in the private buffer (now with FDM and CRC) */
+ return mtk_nfc_write_page(mtd, chip, nfc->buffer, page, 1);
+}
+
+static int mtk_nfc_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ int ret;
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
+
+ ret = mtk_nfc_write_page_raw(mtd, chip, NULL, 1, page);
+ if (ret < 0)
+ return -EIO;
+
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ ret = chip->waitfunc(mtd, chip);
+
+ return ret & NAND_STATUS_FAIL ? -EIO : 0;
+}
+
+static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 sectors)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_ecc_stats stats;
+ int rc, i;
+
+ rc = nfi_readl(nfc, NFI_STA) & STA_EMP_PAGE;
+ if (rc) {
+ memset(buf, 0xff, sectors * chip->ecc.size);
+ for (i = 0; i < sectors; i++)
+ memset(oob_ptr(chip, i), 0xff, mtk_nand->fdm.reg_size);
+ return 0;
+ }
+
+ mtk_ecc_get_stats(nfc->ecc, &stats, sectors);
+ mtd->ecc_stats.corrected += stats.corrected;
+ mtd->ecc_stats.failed += stats.failed;
+
+ return stats.bitflips;
+}
+
+static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
+ u32 data_offs, u32 readlen,
+ u8 *bufpoi, int page, int raw)
+{
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ u32 spare = mtk_nand->spare_per_sector;
+ u32 column, sectors, start, end, reg;
+ dma_addr_t addr;
+ int bitflips;
+ size_t len;
+ u8 *buf;
+ int rc;
+
+ start = data_offs / chip->ecc.size;
+ end = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size);
+
+ sectors = end - start;
+ column = start * (chip->ecc.size + spare);
+
+ len = sectors * chip->ecc.size + (raw ? sectors * spare : 0);
+ buf = bufpoi + start * chip->ecc.size;
+
+ if (column != 0)
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, column, -1);
+
+ addr = dma_map_single(nfc->dev, buf, len, DMA_FROM_DEVICE);
+ rc = dma_mapping_error(nfc->dev, addr);
+ if (rc) {
+ dev_err(nfc->dev, "dma mapping error\n");
+
+ return -EINVAL;
+ }
+
+ reg = nfi_readw(nfc, NFI_CNFG);
+ reg |= CNFG_READ_EN | CNFG_DMA_BURST_EN | CNFG_AHB;
+ if (!raw) {
+ reg |= CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN;
+ nfi_writew(nfc, reg, NFI_CNFG);
+
+ nfc->ecc_cfg.mode = ECC_NFI_MODE;
+ nfc->ecc_cfg.sectors = sectors;
+ nfc->ecc_cfg.op = ECC_DECODE;
+ rc = mtk_ecc_enable(nfc->ecc, &nfc->ecc_cfg);
+ if (rc) {
+ dev_err(nfc->dev, "ecc enable\n");
+ /* clear NFI_CNFG */
+ reg &= ~(CNFG_DMA_BURST_EN | CNFG_AHB | CNFG_READ_EN |
+ CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
+ nfi_writew(nfc, reg, NFI_CNFG);
+ dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE);
+
+ return rc;
+ }
+ } else {
+ nfi_writew(nfc, reg, NFI_CNFG);
+ }
+
+ nfi_writel(nfc, sectors << CON_SEC_SHIFT, NFI_CON);
+ nfi_writew(nfc, INTR_AHB_DONE_EN, NFI_INTR_EN);
+ nfi_writel(nfc, lower_32_bits(addr), NFI_STRADDR);
+
+ init_completion(&nfc->done);
+ reg = nfi_readl(nfc, NFI_CON) | CON_BRD;
+ nfi_writel(nfc, reg, NFI_CON);
+ nfi_writew(nfc, STAR_EN, NFI_STRDATA);
+
+ rc = wait_for_completion_timeout(&nfc->done, msecs_to_jiffies(500));
+ if (!rc)
+ dev_warn(nfc->dev, "read ahb/dma done timeout\n");
+
+ rc = readl_poll_timeout_atomic(nfc->regs + NFI_BYTELEN, reg,
+ ADDRCNTR_SEC(reg) >= sectors, 10,
+ MTK_TIMEOUT);
+ if (rc < 0) {
+ dev_err(nfc->dev, "subpage done timeout\n");
+ bitflips = -EIO;
+ } else {
+ bitflips = 0;
+ if (!raw) {
+ rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE);
+ bitflips = rc < 0 ? -ETIMEDOUT :
+ mtk_nfc_update_ecc_stats(mtd, buf, sectors);
+ mtk_nfc_read_fdm(chip, start, sectors);
+ }
+ }
+
+ dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE);
+
+ if (raw)
+ goto done;
+
+ mtk_ecc_disable(nfc->ecc);
+
+ if (clamp(mtk_nand->bad_mark.sec, start, end) == mtk_nand->bad_mark.sec)
+ mtk_nand->bad_mark.bm_swap(mtd, bufpoi, raw);
+done:
+ nfi_writel(nfc, 0, NFI_CON);
+
+ return bitflips;
+}
+
+static int mtk_nfc_read_subpage_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, u32 off,
+ u32 len, u8 *p, int pg)
+{
+ return mtk_nfc_read_subpage(mtd, chip, off, len, p, pg, 0);
+}
+
+static int mtk_nfc_read_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, u8 *p,
+ int oob_on, int pg)
+{
+ return mtk_nfc_read_subpage(mtd, chip, 0, mtd->writesize, p, pg, 0);
+}
+
+static int mtk_nfc_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ u8 *buf, int oob_on, int page)
+{
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+ int i, ret;
+
+ memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
+ ret = mtk_nfc_read_subpage(mtd, chip, 0, mtd->writesize, nfc->buffer,
+ page, 1);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < chip->ecc.steps; i++) {
+ memcpy(oob_ptr(chip, i), mtk_oob_ptr(chip, i), fdm->reg_size);
+
+ if (i == mtk_nand->bad_mark.sec)
+ mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
+
+ if (buf)
+ memcpy(data_ptr(chip, buf, i), mtk_data_ptr(chip, i),
+ chip->ecc.size);
+ }
+
+ return ret;
+}
+
+static int mtk_nfc_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+
+ return mtk_nfc_read_page_raw(mtd, chip, NULL, 1, page);
+}
+
+static inline void mtk_nfc_hw_init(struct mtk_nfc *nfc)
+{
+ /*
+ * ACCON: access timing control register
+ * -------------------------------------
+ * 31:28: minimum required time for CS post pulling down after accessing
+ * the device
+ * 27:22: minimum required time for CS pre pulling down before accessing
+ * the device
+ * 21:16: minimum required time from NCEB low to NREB low
+ * 15:12: minimum required time from NWEB high to NREB low.
+ * 11:08: write enable hold time
+ * 07:04: write wait states
+ * 03:00: read wait states
+ */
+ nfi_writel(nfc, 0x10804211, NFI_ACCCON);
+
+ /*
+ * CNRNB: nand ready/busy register
+ * -------------------------------
+ * 7:4: timeout register for polling the NAND busy/ready signal
+ * 0 : poll the status of the busy/ready signal after [7:4]*16 cycles.
+ */
+ nfi_writew(nfc, 0xf1, NFI_CNRNB);
+ nfi_writew(nfc, PAGEFMT_8K_16K, NFI_PAGEFMT);
+
+ mtk_nfc_hw_reset(nfc);
+
+ nfi_readl(nfc, NFI_INTR_STA);
+ nfi_writel(nfc, 0, NFI_INTR_EN);
+}
+
+static irqreturn_t mtk_nfc_irq(int irq, void *id)
+{
+ struct mtk_nfc *nfc = id;
+ u16 sta, ien;
+
+ sta = nfi_readw(nfc, NFI_INTR_STA);
+ ien = nfi_readw(nfc, NFI_INTR_EN);
+
+ if (!(sta & ien))
+ return IRQ_NONE;
+
+ nfi_writew(nfc, ~sta & ien, NFI_INTR_EN);
+ complete(&nfc->done);
+
+ return IRQ_HANDLED;
+}
+
+static int mtk_nfc_enable_clk(struct device *dev, struct mtk_nfc_clk *clk)
+{
+ int ret;
+
+ ret = clk_prepare_enable(clk->nfi_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable nfi clk\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(clk->pad_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable pad clk\n");
+ clk_disable_unprepare(clk->nfi_clk);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mtk_nfc_disable_clk(struct mtk_nfc_clk *clk)
+{
+ clk_disable_unprepare(clk->nfi_clk);
+ clk_disable_unprepare(clk->pad_clk);
+}
+
+static int mtk_nfc_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oob_region)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+ u32 eccsteps;
+
+ eccsteps = mtd->writesize / chip->ecc.size;
+
+ if (section >= eccsteps)
+ return -ERANGE;
+
+ oob_region->length = fdm->reg_size - fdm->ecc_size;
+ oob_region->offset = section * fdm->reg_size + fdm->ecc_size;
+
+ return 0;
+}
+
+static int mtk_nfc_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oob_region)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ u32 eccsteps;
+
+ if (section)
+ return -ERANGE;
+
+ eccsteps = mtd->writesize / chip->ecc.size;
+ oob_region->offset = mtk_nand->fdm.reg_size * eccsteps;
+ oob_region->length = mtd->oobsize - oob_region->offset;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops mtk_nfc_ooblayout_ops = {
+ .free = mtk_nfc_ooblayout_free,
+ .ecc = mtk_nfc_ooblayout_ecc,
+};
+
+static void mtk_nfc_set_fdm(struct mtk_nfc_fdm *fdm, struct mtd_info *mtd)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct mtk_nfc_nand_chip *chip = to_mtk_nand(nand);
+ u32 ecc_bytes;
+
+ ecc_bytes = DIV_ROUND_UP(nand->ecc.strength * ECC_PARITY_BITS, 8);
+
+ fdm->reg_size = chip->spare_per_sector - ecc_bytes;
+ if (fdm->reg_size > NFI_FDM_MAX_SIZE)
+ fdm->reg_size = NFI_FDM_MAX_SIZE;
+
+ /* bad block mark storage */
+ fdm->ecc_size = 1;
+}
+
+static void mtk_nfc_set_bad_mark_ctl(struct mtk_nfc_bad_mark_ctl *bm_ctl,
+ struct mtd_info *mtd)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+
+ if (mtd->writesize == 512) {
+ bm_ctl->bm_swap = mtk_nfc_no_bad_mark_swap;
+ } else {
+ bm_ctl->bm_swap = mtk_nfc_bad_mark_swap;
+ bm_ctl->sec = mtd->writesize / mtk_data_len(nand);
+ bm_ctl->pos = mtd->writesize % mtk_data_len(nand);
+ }
+}
+
+static void mtk_nfc_set_spare_per_sector(u32 *sps, struct mtd_info *mtd)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ u32 spare[] = {16, 26, 27, 28, 32, 36, 40, 44,
+ 48, 49, 50, 51, 52, 62, 63, 64};
+ u32 eccsteps, i;
+
+ eccsteps = mtd->writesize / nand->ecc.size;
+ *sps = mtd->oobsize / eccsteps;
+
+ if (nand->ecc.size == 1024)
+ *sps >>= 1;
+
+ for (i = 0; i < ARRAY_SIZE(spare); i++) {
+ if (*sps <= spare[i]) {
+ if (!i)
+ *sps = spare[i];
+ else if (*sps != spare[i])
+ *sps = spare[i - 1];
+ break;
+ }
+ }
+
+ if (i >= ARRAY_SIZE(spare))
+ *sps = spare[ARRAY_SIZE(spare) - 1];
+
+ if (nand->ecc.size == 1024)
+ *sps <<= 1;
+}
+
+static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ u32 spare;
+ int free;
+
+ /* support only ecc hw mode */
+ if (nand->ecc.mode != NAND_ECC_HW) {
+ dev_err(dev, "ecc.mode not supported\n");
+ return -EINVAL;
+ }
+
+ /* if optional dt settings not present */
+ if (!nand->ecc.size || !nand->ecc.strength) {
+ /* use datasheet requirements */
+ nand->ecc.strength = nand->ecc_strength_ds;
+ nand->ecc.size = nand->ecc_step_ds;
+
+ /*
+ * align eccstrength and eccsize
+ * this controller only supports 512 and 1024 sizes
+ */
+ if (nand->ecc.size < 1024) {
+ if (mtd->writesize > 512) {
+ nand->ecc.size = 1024;
+ nand->ecc.strength <<= 1;
+ } else {
+ nand->ecc.size = 512;
+ }
+ } else {
+ nand->ecc.size = 1024;
+ }
+
+ mtk_nfc_set_spare_per_sector(&spare, mtd);
+
+ /* calculate oob bytes except ecc parity data */
+ free = ((nand->ecc.strength * ECC_PARITY_BITS) + 7) >> 3;
+ free = spare - free;
+
+ /*
+ * enhance ecc strength if oob left is bigger than max FDM size
+ * or reduce ecc strength if oob size is not enough for ecc
+ * parity data.
+ */
+ if (free > NFI_FDM_MAX_SIZE) {
+ spare -= NFI_FDM_MAX_SIZE;
+ nand->ecc.strength = (spare << 3) / ECC_PARITY_BITS;
+ } else if (free < 0) {
+ spare -= NFI_FDM_MIN_SIZE;
+ nand->ecc.strength = (spare << 3) / ECC_PARITY_BITS;
+ }
+ }
+
+ mtk_ecc_adjust_strength(&nand->ecc.strength);
+
+ dev_info(dev, "eccsize %d eccstrength %d\n",
+ nand->ecc.size, nand->ecc.strength);
+
+ return 0;
+}
+
+static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
+ struct device_node *np)
+{
+ struct mtk_nfc_nand_chip *chip;
+ struct nand_chip *nand;
+ struct mtd_info *mtd;
+ int nsels, len;
+ u32 tmp;
+ int ret;
+ int i;
+
+ if (!of_get_property(np, "reg", &nsels))
+ return -ENODEV;
+
+ nsels /= sizeof(u32);
+ if (!nsels || nsels > MTK_NAND_MAX_NSELS) {
+ dev_err(dev, "invalid reg property size %d\n", nsels);
+ return -EINVAL;
+ }
+
+ chip = devm_kzalloc(dev, sizeof(*chip) + nsels * sizeof(u8),
+ GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->nsels = nsels;
+ for (i = 0; i < nsels; i++) {
+ ret = of_property_read_u32_index(np, "reg", i, &tmp);
+ if (ret) {
+ dev_err(dev, "reg property failure : %d\n", ret);
+ return ret;
+ }
+ chip->sels[i] = tmp;
+ }
+
+ nand = &chip->nand;
+ nand->controller = &nfc->controller;
+
+ nand_set_flash_node(nand, np);
+ nand_set_controller_data(nand, nfc);
+
+ nand->options |= NAND_USE_BOUNCE_BUFFER | NAND_SUBPAGE_READ;
+ nand->dev_ready = mtk_nfc_dev_ready;
+ nand->select_chip = mtk_nfc_select_chip;
+ nand->write_byte = mtk_nfc_write_byte;
+ nand->write_buf = mtk_nfc_write_buf;
+ nand->read_byte = mtk_nfc_read_byte;
+ nand->read_buf = mtk_nfc_read_buf;
+ nand->cmd_ctrl = mtk_nfc_cmd_ctrl;
+
+ /* set default mode in case dt entry is missing */
+ nand->ecc.mode = NAND_ECC_HW;
+
+ nand->ecc.write_subpage = mtk_nfc_write_subpage_hwecc;
+ nand->ecc.write_page_raw = mtk_nfc_write_page_raw;
+ nand->ecc.write_page = mtk_nfc_write_page_hwecc;
+ nand->ecc.write_oob_raw = mtk_nfc_write_oob_std;
+ nand->ecc.write_oob = mtk_nfc_write_oob_std;
+
+ nand->ecc.read_subpage = mtk_nfc_read_subpage_hwecc;
+ nand->ecc.read_page_raw = mtk_nfc_read_page_raw;
+ nand->ecc.read_page = mtk_nfc_read_page_hwecc;
+ nand->ecc.read_oob_raw = mtk_nfc_read_oob_std;
+ nand->ecc.read_oob = mtk_nfc_read_oob_std;
+
+ mtd = nand_to_mtd(nand);
+ mtd->owner = THIS_MODULE;
+ mtd->dev.parent = dev;
+ mtd->name = MTK_NAME;
+ mtd_set_ooblayout(mtd, &mtk_nfc_ooblayout_ops);
+
+ mtk_nfc_hw_init(nfc);
+
+ ret = nand_scan_ident(mtd, nsels, NULL);
+ if (ret)
+ return -ENODEV;
+
+ /* store bbt magic in page, cause OOB is not protected */
+ if (nand->bbt_options & NAND_BBT_USE_FLASH)
+ nand->bbt_options |= NAND_BBT_NO_OOB;
+
+ ret = mtk_nfc_ecc_init(dev, mtd);
+ if (ret)
+ return -EINVAL;
+
+ if (nand->options & NAND_BUSWIDTH_16) {
+ dev_err(dev, "16bits buswidth not supported");
+ return -EINVAL;
+ }
+
+ mtk_nfc_set_spare_per_sector(&chip->spare_per_sector, mtd);
+ mtk_nfc_set_fdm(&chip->fdm, mtd);
+ mtk_nfc_set_bad_mark_ctl(&chip->bad_mark, mtd);
+
+ len = mtd->writesize + mtd->oobsize;
+ nfc->buffer = devm_kzalloc(dev, len, GFP_KERNEL);
+ if (!nfc->buffer)
+ return -ENOMEM;
+
+ ret = nand_scan_tail(mtd);
+ if (ret)
+ return -ENODEV;
+
+ ret = mtd_device_parse_register(mtd, NULL, NULL, NULL, 0);
+ if (ret) {
+ dev_err(dev, "mtd parse partition error\n");
+ nand_release(mtd);
+ return ret;
+ }
+
+ list_add_tail(&chip->node, &nfc->chips);
+
+ return 0;
+}
+
+static int mtk_nfc_nand_chips_init(struct device *dev, struct mtk_nfc *nfc)
+{
+ struct device_node *np = dev->of_node;
+ struct device_node *nand_np;
+ int ret;
+
+ for_each_child_of_node(np, nand_np) {
+ ret = mtk_nfc_nand_chip_init(dev, nfc, nand_np);
+ if (ret) {
+ of_node_put(nand_np);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int mtk_nfc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct mtk_nfc *nfc;
+ struct resource *res;
+ int ret, irq;
+
+ nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ spin_lock_init(&nfc->controller.lock);
+ init_waitqueue_head(&nfc->controller.wq);
+ INIT_LIST_HEAD(&nfc->chips);
+
+ /* probe defer if not ready */
+ nfc->ecc = of_mtk_ecc_get(np);
+ if (IS_ERR(nfc->ecc))
+ return PTR_ERR(nfc->ecc);
+ else if (!nfc->ecc)
+ return -ENODEV;
+
+ nfc->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ nfc->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(nfc->regs)) {
+ ret = PTR_ERR(nfc->regs);
+ dev_err(dev, "no nfi base\n");
+ goto release_ecc;
+ }
+
+ nfc->clk.nfi_clk = devm_clk_get(dev, "nfi_clk");
+ if (IS_ERR(nfc->clk.nfi_clk)) {
+ dev_err(dev, "no clk\n");
+ ret = PTR_ERR(nfc->clk.nfi_clk);
+ goto release_ecc;
+ }
+
+ nfc->clk.pad_clk = devm_clk_get(dev, "pad_clk");
+ if (IS_ERR(nfc->clk.pad_clk)) {
+ dev_err(dev, "no pad clk\n");
+ ret = PTR_ERR(nfc->clk.pad_clk);
+ goto release_ecc;
+ }
+
+ ret = mtk_nfc_enable_clk(dev, &nfc->clk);
+ if (ret)
+ goto release_ecc;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "no nfi irq resource\n");
+ ret = -EINVAL;
+ goto clk_disable;
+ }
+
+ ret = devm_request_irq(dev, irq, mtk_nfc_irq, 0x0, "mtk-nand", nfc);
+ if (ret) {
+ dev_err(dev, "failed to request nfi irq\n");
+ goto clk_disable;
+ }
+
+ ret = dma_set_mask(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dev, "failed to set dma mask\n");
+ goto clk_disable;
+ }
+
+ platform_set_drvdata(pdev, nfc);
+
+ ret = mtk_nfc_nand_chips_init(dev, nfc);
+ if (ret) {
+ dev_err(dev, "failed to init nand chips\n");
+ goto clk_disable;
+ }
+
+ return 0;
+
+clk_disable:
+ mtk_nfc_disable_clk(&nfc->clk);
+
+release_ecc:
+ mtk_ecc_release(nfc->ecc);
+
+ return ret;
+}
+
+static int mtk_nfc_remove(struct platform_device *pdev)
+{
+ struct mtk_nfc *nfc = platform_get_drvdata(pdev);
+ struct mtk_nfc_nand_chip *chip;
+
+ while (!list_empty(&nfc->chips)) {
+ chip = list_first_entry(&nfc->chips, struct mtk_nfc_nand_chip,
+ node);
+ nand_release(nand_to_mtd(&chip->nand));
+ list_del(&chip->node);
+ }
+
+ mtk_ecc_release(nfc->ecc);
+ mtk_nfc_disable_clk(&nfc->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mtk_nfc_suspend(struct device *dev)
+{
+ struct mtk_nfc *nfc = dev_get_drvdata(dev);
+
+ mtk_nfc_disable_clk(&nfc->clk);
+
+ return 0;
+}
+
+static int mtk_nfc_resume(struct device *dev)
+{
+ struct mtk_nfc *nfc = dev_get_drvdata(dev);
+ struct mtk_nfc_nand_chip *chip;
+ struct nand_chip *nand;
+ struct mtd_info *mtd;
+ int ret;
+ u32 i;
+
+ udelay(200);
+
+ ret = mtk_nfc_enable_clk(dev, &nfc->clk);
+ if (ret)
+ return ret;
+
+ mtk_nfc_hw_init(nfc);
+
+ /* reset NAND chip if VCC was powered off */
+ list_for_each_entry(chip, &nfc->chips, node) {
+ nand = &chip->nand;
+ mtd = nand_to_mtd(nand);
+ for (i = 0; i < chip->nsels; i++) {
+ nand->select_chip(mtd, i);
+ nand->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ }
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(mtk_nfc_pm_ops, mtk_nfc_suspend, mtk_nfc_resume);
+#endif
+
+static const struct of_device_id mtk_nfc_id_table[] = {
+ { .compatible = "mediatek,mt2701-nfc" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mtk_nfc_id_table);
+
+static struct platform_driver mtk_nfc_driver = {
+ .probe = mtk_nfc_probe,
+ .remove = mtk_nfc_remove,
+ .driver = {
+ .name = MTK_NAME,
+ .of_match_table = mtk_nfc_id_table,
+#ifdef CONFIG_PM_SLEEP
+ .pm = &mtk_nfc_pm_ops,
+#endif
+ },
+};
+
+module_platform_driver(mtk_nfc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>");
+MODULE_DESCRIPTION("MTK Nand Flash Controller Driver");
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index ccc05f5b2..2af9869a1 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -168,6 +168,7 @@ struct nand_flash_dev nand_flash_ids[] = {
/* Manufacturer IDs */
struct nand_manufacturers nand_manuf_ids[] = {
{NAND_MFR_TOSHIBA, "Toshiba"},
+ {NAND_MFR_ESMT, "ESMT"},
{NAND_MFR_SAMSUNG, "Samsung"},
{NAND_MFR_FUJITSU, "Fujitsu"},
{NAND_MFR_NATIONAL, "National"},
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index a136da8df..5513bfd9c 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -118,8 +118,6 @@
#define PREFETCH_STATUS_FIFO_CNT(val) ((val >> 24) & 0x7F)
#define STATUS_BUFF_EMPTY 0x00000001
-#define OMAP24XX_DMA_GPMC 4
-
#define SECTOR_BYTES 512
/* 4 bit padding to make byte aligned, 56 = 52 + 4 */
#define BCH4_BIT_PAD 4
@@ -1811,7 +1809,6 @@ static int omap_nand_probe(struct platform_device *pdev)
struct nand_chip *nand_chip;
int err;
dma_cap_mask_t mask;
- unsigned sig;
struct resource *res;
struct device *dev = &pdev->dev;
int min_oobbytes = BADBLOCK_MARKER_LENGTH;
@@ -1924,11 +1921,11 @@ static int omap_nand_probe(struct platform_device *pdev)
case NAND_OMAP_PREFETCH_DMA:
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- sig = OMAP24XX_DMA_GPMC;
- info->dma = dma_request_channel(mask, omap_dma_filter_fn, &sig);
- if (!info->dma) {
+ info->dma = dma_request_chan(pdev->dev.parent, "rxtx");
+
+ if (IS_ERR(info->dma)) {
dev_err(&pdev->dev, "DMA engine request failed\n");
- err = -ENXIO;
+ err = PTR_ERR(info->dma);
goto return_error;
} else {
struct dma_slave_config cfg;
@@ -2172,7 +2169,7 @@ scan_tail:
return 0;
return_error:
- if (info->dma)
+ if (!IS_ERR_OR_NULL(info->dma))
dma_release_channel(info->dma);
if (nand_chip->ecc.priv) {
nand_bch_free(nand_chip->ecc.priv);
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
index a83a69068..e414b31b7 100644
--- a/drivers/mtd/nand/sunxi_nand.c
+++ b/drivers/mtd/nand/sunxi_nand.c
@@ -39,6 +39,7 @@
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
+#include <linux/reset.h>
#define NFC_REG_CTL 0x0000
#define NFC_REG_ST 0x0004
@@ -153,6 +154,7 @@
/* define bit use in NFC_ECC_ST */
#define NFC_ECC_ERR(x) BIT(x)
+#define NFC_ECC_ERR_MSK GENMASK(15, 0)
#define NFC_ECC_PAT_FOUND(x) BIT(x + 16)
#define NFC_ECC_ERR_CNT(b, x) (((x) >> (((b) % 4) * 8)) & 0xff)
@@ -269,10 +271,12 @@ struct sunxi_nfc {
void __iomem *regs;
struct clk *ahb_clk;
struct clk *mod_clk;
+ struct reset_control *reset;
unsigned long assigned_cs;
unsigned long clk_rate;
struct list_head chips;
struct completion complete;
+ struct dma_chan *dmac;
};
static inline struct sunxi_nfc *to_sunxi_nfc(struct nand_hw_control *ctrl)
@@ -365,6 +369,67 @@ static int sunxi_nfc_rst(struct sunxi_nfc *nfc)
return ret;
}
+static int sunxi_nfc_dma_op_prepare(struct mtd_info *mtd, const void *buf,
+ int chunksize, int nchunks,
+ enum dma_data_direction ddir,
+ struct scatterlist *sg)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+ struct dma_async_tx_descriptor *dmad;
+ enum dma_transfer_direction tdir;
+ dma_cookie_t dmat;
+ int ret;
+
+ if (ddir == DMA_FROM_DEVICE)
+ tdir = DMA_DEV_TO_MEM;
+ else
+ tdir = DMA_MEM_TO_DEV;
+
+ sg_init_one(sg, buf, nchunks * chunksize);
+ ret = dma_map_sg(nfc->dev, sg, 1, ddir);
+ if (!ret)
+ return -ENOMEM;
+
+ dmad = dmaengine_prep_slave_sg(nfc->dmac, sg, 1, tdir, DMA_CTRL_ACK);
+ if (!dmad) {
+ ret = -EINVAL;
+ goto err_unmap_buf;
+ }
+
+ writel(readl(nfc->regs + NFC_REG_CTL) | NFC_RAM_METHOD,
+ nfc->regs + NFC_REG_CTL);
+ writel(nchunks, nfc->regs + NFC_REG_SECTOR_NUM);
+ writel(chunksize, nfc->regs + NFC_REG_CNT);
+ dmat = dmaengine_submit(dmad);
+
+ ret = dma_submit_error(dmat);
+ if (ret)
+ goto err_clr_dma_flag;
+
+ return 0;
+
+err_clr_dma_flag:
+ writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD,
+ nfc->regs + NFC_REG_CTL);
+
+err_unmap_buf:
+ dma_unmap_sg(nfc->dev, sg, 1, ddir);
+ return ret;
+}
+
+static void sunxi_nfc_dma_op_cleanup(struct mtd_info *mtd,
+ enum dma_data_direction ddir,
+ struct scatterlist *sg)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+
+ dma_unmap_sg(nfc->dev, sg, 1, ddir);
+ writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD,
+ nfc->regs + NFC_REG_CTL);
+}
+
static int sunxi_nfc_dev_ready(struct mtd_info *mtd)
{
struct nand_chip *nand = mtd_to_nand(mtd);
@@ -822,17 +887,15 @@ static void sunxi_nfc_hw_ecc_update_stats(struct mtd_info *mtd,
}
static int sunxi_nfc_hw_ecc_correct(struct mtd_info *mtd, u8 *data, u8 *oob,
- int step, bool *erased)
+ int step, u32 status, bool *erased)
{
struct nand_chip *nand = mtd_to_nand(mtd);
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
struct nand_ecc_ctrl *ecc = &nand->ecc;
- u32 status, tmp;
+ u32 tmp;
*erased = false;
- status = readl(nfc->regs + NFC_REG_ECC_ST);
-
if (status & NFC_ECC_ERR(step))
return -EBADMSG;
@@ -898,6 +961,7 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
*cur_off = oob_off + ecc->bytes + 4;
ret = sunxi_nfc_hw_ecc_correct(mtd, data, oob_required ? oob : NULL, 0,
+ readl(nfc->regs + NFC_REG_ECC_ST),
&erased);
if (erased)
return 1;
@@ -967,6 +1031,130 @@ static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd,
*cur_off = mtd->oobsize + mtd->writesize;
}
+static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf,
+ int oob_required, int page,
+ int nchunks)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ bool randomized = nand->options & NAND_NEED_SCRAMBLING;
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+ unsigned int max_bitflips = 0;
+ int ret, i, raw_mode = 0;
+ struct scatterlist sg;
+ u32 status;
+
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ return ret;
+
+ ret = sunxi_nfc_dma_op_prepare(mtd, buf, ecc->size, nchunks,
+ DMA_FROM_DEVICE, &sg);
+ if (ret)
+ return ret;
+
+ sunxi_nfc_hw_ecc_enable(mtd);
+ sunxi_nfc_randomizer_config(mtd, page, false);
+ sunxi_nfc_randomizer_enable(mtd);
+
+ writel((NAND_CMD_RNDOUTSTART << 16) | (NAND_CMD_RNDOUT << 8) |
+ NAND_CMD_READSTART, nfc->regs + NFC_REG_RCMD_SET);
+
+ dma_async_issue_pending(nfc->dmac);
+
+ writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD | NFC_DATA_TRANS,
+ nfc->regs + NFC_REG_CMD);
+
+ ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
+ if (ret)
+ dmaengine_terminate_all(nfc->dmac);
+
+ sunxi_nfc_randomizer_disable(mtd);
+ sunxi_nfc_hw_ecc_disable(mtd);
+
+ sunxi_nfc_dma_op_cleanup(mtd, DMA_FROM_DEVICE, &sg);
+
+ if (ret)
+ return ret;
+
+ status = readl(nfc->regs + NFC_REG_ECC_ST);
+
+ for (i = 0; i < nchunks; i++) {
+ int data_off = i * ecc->size;
+ int oob_off = i * (ecc->bytes + 4);
+ u8 *data = buf + data_off;
+ u8 *oob = nand->oob_poi + oob_off;
+ bool erased;
+
+ ret = sunxi_nfc_hw_ecc_correct(mtd, randomized ? data : NULL,
+ oob_required ? oob : NULL,
+ i, status, &erased);
+
+ /* ECC errors are handled in the second loop. */
+ if (ret < 0)
+ continue;
+
+ if (oob_required && !erased) {
+ /* TODO: use DMA to retrieve OOB */
+ nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
+ mtd->writesize + oob_off, -1);
+ nand->read_buf(mtd, oob, ecc->bytes + 4);
+
+ sunxi_nfc_hw_ecc_get_prot_oob_bytes(mtd, oob, i,
+ !i, page);
+ }
+
+ if (erased)
+ raw_mode = 1;
+
+ sunxi_nfc_hw_ecc_update_stats(mtd, &max_bitflips, ret);
+ }
+
+ if (status & NFC_ECC_ERR_MSK) {
+ for (i = 0; i < nchunks; i++) {
+ int data_off = i * ecc->size;
+ int oob_off = i * (ecc->bytes + 4);
+ u8 *data = buf + data_off;
+ u8 *oob = nand->oob_poi + oob_off;
+
+ if (!(status & NFC_ECC_ERR(i)))
+ continue;
+
+ /*
+ * Re-read the data with the randomizer disabled to
+ * identify bitflips in erased pages.
+ */
+ if (randomized) {
+ /* TODO: use DMA to read page in raw mode */
+ nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
+ data_off, -1);
+ nand->read_buf(mtd, data, ecc->size);
+ }
+
+ /* TODO: use DMA to retrieve OOB */
+ nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
+ mtd->writesize + oob_off, -1);
+ nand->read_buf(mtd, oob, ecc->bytes + 4);
+
+ ret = nand_check_erased_ecc_chunk(data, ecc->size,
+ oob, ecc->bytes + 4,
+ NULL, 0,
+ ecc->strength);
+ if (ret >= 0)
+ raw_mode = 1;
+
+ sunxi_nfc_hw_ecc_update_stats(mtd, &max_bitflips, ret);
+ }
+ }
+
+ if (oob_required)
+ sunxi_nfc_hw_ecc_read_extra_oob(mtd, nand->oob_poi,
+ NULL, !raw_mode,
+ page);
+
+ return max_bitflips;
+}
+
static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
const u8 *data, int data_off,
const u8 *oob, int oob_off,
@@ -1065,6 +1253,23 @@ static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd,
return max_bitflips;
}
+static int sunxi_nfc_hw_ecc_read_page_dma(struct mtd_info *mtd,
+ struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ int ret;
+
+ ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, oob_required, page,
+ chip->ecc.steps);
+ if (ret >= 0)
+ return ret;
+
+ /* Fallback to PIO mode */
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 0, -1);
+
+ return sunxi_nfc_hw_ecc_read_page(mtd, chip, buf, oob_required, page);
+}
+
static int sunxi_nfc_hw_ecc_read_subpage(struct mtd_info *mtd,
struct nand_chip *chip,
u32 data_offs, u32 readlen,
@@ -1098,6 +1303,25 @@ static int sunxi_nfc_hw_ecc_read_subpage(struct mtd_info *mtd,
return max_bitflips;
}
+static int sunxi_nfc_hw_ecc_read_subpage_dma(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ u32 data_offs, u32 readlen,
+ u8 *buf, int page)
+{
+ int nchunks = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size);
+ int ret;
+
+ ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, false, page, nchunks);
+ if (ret >= 0)
+ return ret;
+
+ /* Fallback to PIO mode */
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 0, -1);
+
+ return sunxi_nfc_hw_ecc_read_subpage(mtd, chip, data_offs, readlen,
+ buf, page);
+}
+
static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
struct nand_chip *chip,
const uint8_t *buf, int oob_required,
@@ -1130,6 +1354,99 @@ static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
return 0;
}
+static int sunxi_nfc_hw_ecc_write_subpage(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ u32 data_offs, u32 data_len,
+ const u8 *buf, int oob_required,
+ int page)
+{
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int ret, i, cur_off = 0;
+
+ sunxi_nfc_hw_ecc_enable(mtd);
+
+ for (i = data_offs / ecc->size;
+ i < DIV_ROUND_UP(data_offs + data_len, ecc->size); i++) {
+ int data_off = i * ecc->size;
+ int oob_off = i * (ecc->bytes + 4);
+ const u8 *data = buf + data_off;
+ const u8 *oob = chip->oob_poi + oob_off;
+
+ ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off, oob,
+ oob_off + mtd->writesize,
+ &cur_off, !i, page);
+ if (ret)
+ return ret;
+ }
+
+ sunxi_nfc_hw_ecc_disable(mtd);
+
+ return 0;
+}
+
+static int sunxi_nfc_hw_ecc_write_page_dma(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const u8 *buf,
+ int oob_required,
+ int page)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+ struct scatterlist sg;
+ int ret, i;
+
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ return ret;
+
+ ret = sunxi_nfc_dma_op_prepare(mtd, buf, ecc->size, ecc->steps,
+ DMA_TO_DEVICE, &sg);
+ if (ret)
+ goto pio_fallback;
+
+ for (i = 0; i < ecc->steps; i++) {
+ const u8 *oob = nand->oob_poi + (i * (ecc->bytes + 4));
+
+ sunxi_nfc_hw_ecc_set_prot_oob_bytes(mtd, oob, i, !i, page);
+ }
+
+ sunxi_nfc_hw_ecc_enable(mtd);
+ sunxi_nfc_randomizer_config(mtd, page, false);
+ sunxi_nfc_randomizer_enable(mtd);
+
+ writel((NAND_CMD_RNDIN << 8) | NAND_CMD_PAGEPROG,
+ nfc->regs + NFC_REG_RCMD_SET);
+
+ dma_async_issue_pending(nfc->dmac);
+
+ writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD |
+ NFC_DATA_TRANS | NFC_ACCESS_DIR,
+ nfc->regs + NFC_REG_CMD);
+
+ ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
+ if (ret)
+ dmaengine_terminate_all(nfc->dmac);
+
+ sunxi_nfc_randomizer_disable(mtd);
+ sunxi_nfc_hw_ecc_disable(mtd);
+
+ sunxi_nfc_dma_op_cleanup(mtd, DMA_TO_DEVICE, &sg);
+
+ if (ret)
+ return ret;
+
+ if (oob_required || (chip->options & NAND_NEED_SCRAMBLING))
+ /* TODO: use DMA to transfer extra OOB bytes ? */
+ sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi,
+ NULL, page);
+
+ return 0;
+
+pio_fallback:
+ return sunxi_nfc_hw_ecc_write_page(mtd, chip, buf, oob_required, page);
+}
+
static int sunxi_nfc_hw_syndrome_ecc_read_page(struct mtd_info *mtd,
struct nand_chip *chip,
uint8_t *buf, int oob_required,
@@ -1497,10 +1814,19 @@ static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd,
int ret;
int i;
+ if (ecc->size != 512 && ecc->size != 1024)
+ return -EINVAL;
+
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
+ /* Prefer 1k ECC chunk over 512 ones */
+ if (ecc->size == 512 && mtd->writesize > 512) {
+ ecc->size = 1024;
+ ecc->strength *= 2;
+ }
+
/* Add ECC info retrieval from DT */
for (i = 0; i < ARRAY_SIZE(strengths); i++) {
if (ecc->strength <= strengths[i])
@@ -1550,14 +1876,28 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct mtd_info *mtd,
struct nand_ecc_ctrl *ecc,
struct device_node *np)
{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
int ret;
ret = sunxi_nand_hw_common_ecc_ctrl_init(mtd, ecc, np);
if (ret)
return ret;
- ecc->read_page = sunxi_nfc_hw_ecc_read_page;
- ecc->write_page = sunxi_nfc_hw_ecc_write_page;
+ if (nfc->dmac) {
+ ecc->read_page = sunxi_nfc_hw_ecc_read_page_dma;
+ ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage_dma;
+ ecc->write_page = sunxi_nfc_hw_ecc_write_page_dma;
+ nand->options |= NAND_USE_BOUNCE_BUFFER;
+ } else {
+ ecc->read_page = sunxi_nfc_hw_ecc_read_page;
+ ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage;
+ ecc->write_page = sunxi_nfc_hw_ecc_write_page;
+ }
+
+ /* TODO: support DMA for raw accesses and subpage write */
+ ecc->write_subpage = sunxi_nfc_hw_ecc_write_subpage;
ecc->read_oob_raw = nand_read_oob_std;
ecc->write_oob_raw = nand_write_oob_std;
ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage;
@@ -1871,26 +2211,59 @@ static int sunxi_nfc_probe(struct platform_device *pdev)
if (ret)
goto out_ahb_clk_unprepare;
+ nfc->reset = devm_reset_control_get_optional(dev, "ahb");
+ if (!IS_ERR(nfc->reset)) {
+ ret = reset_control_deassert(nfc->reset);
+ if (ret) {
+ dev_err(dev, "reset err %d\n", ret);
+ goto out_mod_clk_unprepare;
+ }
+ } else if (PTR_ERR(nfc->reset) != -ENOENT) {
+ ret = PTR_ERR(nfc->reset);
+ goto out_mod_clk_unprepare;
+ }
+
ret = sunxi_nfc_rst(nfc);
if (ret)
- goto out_mod_clk_unprepare;
+ goto out_ahb_reset_reassert;
writel(0, nfc->regs + NFC_REG_INT);
ret = devm_request_irq(dev, irq, sunxi_nfc_interrupt,
0, "sunxi-nand", nfc);
if (ret)
- goto out_mod_clk_unprepare;
+ goto out_ahb_reset_reassert;
+
+ nfc->dmac = dma_request_slave_channel(dev, "rxtx");
+ if (nfc->dmac) {
+ struct dma_slave_config dmac_cfg = { };
+
+ dmac_cfg.src_addr = r->start + NFC_REG_IO_DATA;
+ dmac_cfg.dst_addr = dmac_cfg.src_addr;
+ dmac_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dmac_cfg.dst_addr_width = dmac_cfg.src_addr_width;
+ dmac_cfg.src_maxburst = 4;
+ dmac_cfg.dst_maxburst = 4;
+ dmaengine_slave_config(nfc->dmac, &dmac_cfg);
+ } else {
+ dev_warn(dev, "failed to request rxtx DMA channel\n");
+ }
platform_set_drvdata(pdev, nfc);
ret = sunxi_nand_chips_init(dev, nfc);
if (ret) {
dev_err(dev, "failed to init nand chips\n");
- goto out_mod_clk_unprepare;
+ goto out_release_dmac;
}
return 0;
+out_release_dmac:
+ if (nfc->dmac)
+ dma_release_channel(nfc->dmac);
+out_ahb_reset_reassert:
+ if (!IS_ERR(nfc->reset))
+ reset_control_assert(nfc->reset);
out_mod_clk_unprepare:
clk_disable_unprepare(nfc->mod_clk);
out_ahb_clk_unprepare:
@@ -1904,6 +2277,12 @@ static int sunxi_nfc_remove(struct platform_device *pdev)
struct sunxi_nfc *nfc = platform_get_drvdata(pdev);
sunxi_nand_chips_cleanup(nfc);
+
+ if (!IS_ERR(nfc->reset))
+ reset_control_assert(nfc->reset);
+
+ if (nfc->dmac)
+ dma_release_channel(nfc->dmac);
clk_disable_unprepare(nfc->mod_clk);
clk_disable_unprepare(nfc->ahb_clk);
diff --git a/drivers/mtd/nand/xway_nand.c b/drivers/mtd/nand/xway_nand.c
index 0cf0ac07a..1f2948c0c 100644
--- a/drivers/mtd/nand/xway_nand.c
+++ b/drivers/mtd/nand/xway_nand.c
@@ -4,6 +4,7 @@
* by the Free Software Foundation.
*
* Copyright © 2012 John Crispin <blogic@openwrt.org>
+ * Copyright © 2016 Hauke Mehrtens <hauke@hauke-m.de>
*/
#include <linux/mtd/nand.h>
@@ -16,20 +17,28 @@
#define EBU_ADDSEL1 0x24
#define EBU_NAND_CON 0xB0
#define EBU_NAND_WAIT 0xB4
+#define NAND_WAIT_RD BIT(0) /* NAND flash status output */
+#define NAND_WAIT_WR_C BIT(3) /* NAND Write/Read complete */
#define EBU_NAND_ECC0 0xB8
#define EBU_NAND_ECC_AC 0xBC
-/* nand commands */
-#define NAND_CMD_ALE (1 << 2)
-#define NAND_CMD_CLE (1 << 3)
-#define NAND_CMD_CS (1 << 4)
-#define NAND_WRITE_CMD_RESET 0xff
+/*
+ * nand commands
+ * The pins of the NAND chip are selected based on the address bits of the
+ * "register" read and write. There are no special registers, but an
+ * address range and the lower address bits are used to activate the
+ * correct line. For example when the bit (1 << 2) is set in the address
+ * the ALE pin will be activated.
+ */
+#define NAND_CMD_ALE BIT(2) /* address latch enable */
+#define NAND_CMD_CLE BIT(3) /* command latch enable */
+#define NAND_CMD_CS BIT(4) /* chip select */
+#define NAND_CMD_SE BIT(5) /* spare area access latch */
+#define NAND_CMD_WP BIT(6) /* write protect */
#define NAND_WRITE_CMD (NAND_CMD_CS | NAND_CMD_CLE)
#define NAND_WRITE_ADDR (NAND_CMD_CS | NAND_CMD_ALE)
#define NAND_WRITE_DATA (NAND_CMD_CS)
#define NAND_READ_DATA (NAND_CMD_CS)
-#define NAND_WAIT_WR_C (1 << 3)
-#define NAND_WAIT_RD (0x1)
/* we need to tel the ebu which addr we mapped the nand to */
#define ADDSEL1_MASK(x) (x << 4)
@@ -54,31 +63,41 @@
#define NAND_CON_CSMUX (1 << 1)
#define NAND_CON_NANDM 1
-static void xway_reset_chip(struct nand_chip *chip)
+struct xway_nand_data {
+ struct nand_chip chip;
+ unsigned long csflags;
+ void __iomem *nandaddr;
+};
+
+static u8 xway_readb(struct mtd_info *mtd, int op)
{
- unsigned long nandaddr = (unsigned long) chip->IO_ADDR_W;
- unsigned long flags;
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct xway_nand_data *data = nand_get_controller_data(chip);
- nandaddr &= ~NAND_WRITE_ADDR;
- nandaddr |= NAND_WRITE_CMD;
+ return readb(data->nandaddr + op);
+}
- /* finish with a reset */
- spin_lock_irqsave(&ebu_lock, flags);
- writeb(NAND_WRITE_CMD_RESET, (void __iomem *) nandaddr);
- while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0)
- ;
- spin_unlock_irqrestore(&ebu_lock, flags);
+static void xway_writeb(struct mtd_info *mtd, int op, u8 value)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct xway_nand_data *data = nand_get_controller_data(chip);
+
+ writeb(value, data->nandaddr + op);
}
-static void xway_select_chip(struct mtd_info *mtd, int chip)
+static void xway_select_chip(struct mtd_info *mtd, int select)
{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct xway_nand_data *data = nand_get_controller_data(chip);
- switch (chip) {
+ switch (select) {
case -1:
ltq_ebu_w32_mask(NAND_CON_CE, 0, EBU_NAND_CON);
ltq_ebu_w32_mask(NAND_CON_NANDM, 0, EBU_NAND_CON);
+ spin_unlock_irqrestore(&ebu_lock, data->csflags);
break;
case 0:
+ spin_lock_irqsave(&ebu_lock, data->csflags);
ltq_ebu_w32_mask(0, NAND_CON_NANDM, EBU_NAND_CON);
ltq_ebu_w32_mask(0, NAND_CON_CE, EBU_NAND_CON);
break;
@@ -89,26 +108,16 @@ static void xway_select_chip(struct mtd_info *mtd, int chip)
static void xway_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
{
- struct nand_chip *this = mtd_to_nand(mtd);
- unsigned long nandaddr = (unsigned long) this->IO_ADDR_W;
- unsigned long flags;
-
- if (ctrl & NAND_CTRL_CHANGE) {
- nandaddr &= ~(NAND_WRITE_CMD | NAND_WRITE_ADDR);
- if (ctrl & NAND_CLE)
- nandaddr |= NAND_WRITE_CMD;
- else
- nandaddr |= NAND_WRITE_ADDR;
- this->IO_ADDR_W = (void __iomem *) nandaddr;
- }
+ if (cmd == NAND_CMD_NONE)
+ return;
- if (cmd != NAND_CMD_NONE) {
- spin_lock_irqsave(&ebu_lock, flags);
- writeb(cmd, this->IO_ADDR_W);
- while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0)
- ;
- spin_unlock_irqrestore(&ebu_lock, flags);
- }
+ if (ctrl & NAND_CLE)
+ xway_writeb(mtd, NAND_WRITE_CMD, cmd);
+ else if (ctrl & NAND_ALE)
+ xway_writeb(mtd, NAND_WRITE_ADDR, cmd);
+
+ while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0)
+ ;
}
static int xway_dev_ready(struct mtd_info *mtd)
@@ -118,80 +127,122 @@ static int xway_dev_ready(struct mtd_info *mtd)
static unsigned char xway_read_byte(struct mtd_info *mtd)
{
- struct nand_chip *this = mtd_to_nand(mtd);
- unsigned long nandaddr = (unsigned long) this->IO_ADDR_R;
- unsigned long flags;
- int ret;
+ return xway_readb(mtd, NAND_READ_DATA);
+}
+
+static void xway_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ int i;
- spin_lock_irqsave(&ebu_lock, flags);
- ret = ltq_r8((void __iomem *)(nandaddr + NAND_READ_DATA));
- spin_unlock_irqrestore(&ebu_lock, flags);
+ for (i = 0; i < len; i++)
+ buf[i] = xway_readb(mtd, NAND_WRITE_DATA);
+}
- return ret;
+static void xway_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ xway_writeb(mtd, NAND_WRITE_DATA, buf[i]);
}
+/*
+ * Probe for the NAND device.
+ */
static int xway_nand_probe(struct platform_device *pdev)
{
- struct nand_chip *this = platform_get_drvdata(pdev);
- unsigned long nandaddr = (unsigned long) this->IO_ADDR_W;
- const __be32 *cs = of_get_property(pdev->dev.of_node,
- "lantiq,cs", NULL);
+ struct xway_nand_data *data;
+ struct mtd_info *mtd;
+ struct resource *res;
+ int err;
+ u32 cs;
u32 cs_flag = 0;
+ /* Allocate memory for the device structure (and zero it) */
+ data = devm_kzalloc(&pdev->dev, sizeof(struct xway_nand_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->nandaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(data->nandaddr))
+ return PTR_ERR(data->nandaddr);
+
+ nand_set_flash_node(&data->chip, pdev->dev.of_node);
+ mtd = nand_to_mtd(&data->chip);
+ mtd->dev.parent = &pdev->dev;
+
+ data->chip.cmd_ctrl = xway_cmd_ctrl;
+ data->chip.dev_ready = xway_dev_ready;
+ data->chip.select_chip = xway_select_chip;
+ data->chip.write_buf = xway_write_buf;
+ data->chip.read_buf = xway_read_buf;
+ data->chip.read_byte = xway_read_byte;
+ data->chip.chip_delay = 30;
+
+ data->chip.ecc.mode = NAND_ECC_SOFT;
+ data->chip.ecc.algo = NAND_ECC_HAMMING;
+
+ platform_set_drvdata(pdev, data);
+ nand_set_controller_data(&data->chip, data);
+
/* load our CS from the DT. Either we find a valid 1 or default to 0 */
- if (cs && (*cs == 1))
+ err = of_property_read_u32(pdev->dev.of_node, "lantiq,cs", &cs);
+ if (!err && cs == 1)
cs_flag = NAND_CON_IN_CS1 | NAND_CON_OUT_CS1;
/* setup the EBU to run in NAND mode on our base addr */
- ltq_ebu_w32(CPHYSADDR(nandaddr)
- | ADDSEL1_MASK(3) | ADDSEL1_REGEN, EBU_ADDSEL1);
+ ltq_ebu_w32(CPHYSADDR(data->nandaddr)
+ | ADDSEL1_MASK(3) | ADDSEL1_REGEN, EBU_ADDSEL1);
ltq_ebu_w32(BUSCON1_SETUP | BUSCON1_BCGEN_RES | BUSCON1_WAITWRC2
- | BUSCON1_WAITRDC2 | BUSCON1_HOLDC1 | BUSCON1_RECOVC1
- | BUSCON1_CMULT4, LTQ_EBU_BUSCON1);
+ | BUSCON1_WAITRDC2 | BUSCON1_HOLDC1 | BUSCON1_RECOVC1
+ | BUSCON1_CMULT4, LTQ_EBU_BUSCON1);
ltq_ebu_w32(NAND_CON_NANDM | NAND_CON_CSMUX | NAND_CON_CS_P
- | NAND_CON_SE_P | NAND_CON_WP_P | NAND_CON_PRE_P
- | cs_flag, EBU_NAND_CON);
+ | NAND_CON_SE_P | NAND_CON_WP_P | NAND_CON_PRE_P
+ | cs_flag, EBU_NAND_CON);
- /* finish with a reset */
- xway_reset_chip(this);
+ /* Scan to find existence of the device */
+ err = nand_scan(mtd, 1);
+ if (err)
+ return err;
- return 0;
-}
+ err = mtd_device_register(mtd, NULL, 0);
+ if (err)
+ nand_release(mtd);
-static struct platform_nand_data xway_nand_data = {
- .chip = {
- .nr_chips = 1,
- .chip_delay = 30,
- },
- .ctrl = {
- .probe = xway_nand_probe,
- .cmd_ctrl = xway_cmd_ctrl,
- .dev_ready = xway_dev_ready,
- .select_chip = xway_select_chip,
- .read_byte = xway_read_byte,
- }
-};
+ return err;
+}
/*
- * Try to find the node inside the DT. If it is available attach out
- * platform_nand_data
+ * Remove a NAND device.
*/
-static int __init xway_register_nand(void)
+static int xway_nand_remove(struct platform_device *pdev)
{
- struct device_node *node;
- struct platform_device *pdev;
-
- node = of_find_compatible_node(NULL, NULL, "lantiq,nand-xway");
- if (!node)
- return -ENOENT;
- pdev = of_find_device_by_node(node);
- if (!pdev)
- return -EINVAL;
- pdev->dev.platform_data = &xway_nand_data;
- of_node_put(node);
+ struct xway_nand_data *data = platform_get_drvdata(pdev);
+
+ nand_release(nand_to_mtd(&data->chip));
+
return 0;
}
-subsys_initcall(xway_register_nand);
+static const struct of_device_id xway_nand_match[] = {
+ { .compatible = "lantiq,nand-xway" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xway_nand_match);
+
+static struct platform_driver xway_nand_driver = {
+ .probe = xway_nand_probe,
+ .remove = xway_nand_remove,
+ .driver = {
+ .name = "lantiq,nand-xway",
+ .of_match_table = xway_nand_match,
+ },
+};
+
+module_platform_driver(xway_nand_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index a4b029a41..1a6d0e367 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -3188,13 +3188,13 @@ static int onenand_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
size_t tmp_retlen;
ret = action(mtd, from, len, &tmp_retlen, buf);
+ if (ret)
+ break;
buf += tmp_retlen;
len -= tmp_retlen;
*retlen += tmp_retlen;
- if (ret)
- break;
}
otp_pages--;
}
diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig
index d42c98e1f..4a682ee0f 100644
--- a/drivers/mtd/spi-nor/Kconfig
+++ b/drivers/mtd/spi-nor/Kconfig
@@ -29,6 +29,26 @@ config MTD_SPI_NOR_USE_4K_SECTORS
Please note that some tools/drivers/filesystems may not work with
4096 B erase size (e.g. UBIFS requires 15 KiB as a minimum).
+config SPI_ATMEL_QUADSPI
+ tristate "Atmel Quad SPI Controller"
+ depends on ARCH_AT91 || (ARM && COMPILE_TEST)
+ depends on OF && HAS_IOMEM
+ help
+ This enables support for the Quad SPI controller in master mode.
+ This driver does not support generic SPI. The implementation only
+ supports SPI NOR.
+
+config SPI_CADENCE_QUADSPI
+ tristate "Cadence Quad SPI controller"
+ depends on OF && ARM
+ help
+ Enable support for the Cadence Quad SPI Flash controller.
+
+ Cadence QSPI is a specialized controller for connecting an SPI
+ Flash over 1/2/4-bit wide bus. Enable this option if you have a
+ device with a Cadence QSPI controller and want to access the
+ Flash as an MTD device.
+
config SPI_FSL_QUADSPI
tristate "Freescale Quad SPI controller"
depends on ARCH_MXC || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
@@ -38,6 +58,13 @@ config SPI_FSL_QUADSPI
This controller does not support generic SPI. It only supports
SPI NOR.
+config SPI_HISI_SFC
+ tristate "Hisilicon SPI-NOR Flash Controller(SFC)"
+ depends on ARCH_HISI || COMPILE_TEST
+ depends on HAS_IOMEM && HAS_DMA
+ help
+ This enables support for hisilicon SPI-NOR flash controller.
+
config SPI_NXP_SPIFI
tristate "NXP SPI Flash Interface (SPIFI)"
depends on OF && (ARCH_LPC18XX || COMPILE_TEST)
diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile
index 0bf3a7f81..121695e83 100644
--- a/drivers/mtd/spi-nor/Makefile
+++ b/drivers/mtd/spi-nor/Makefile
@@ -1,4 +1,7 @@
obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o
+obj-$(CONFIG_SPI_ATMEL_QUADSPI) += atmel-quadspi.o
+obj-$(CONFIG_SPI_CADENCE_QUADSPI) += cadence-quadspi.o
obj-$(CONFIG_SPI_FSL_QUADSPI) += fsl-quadspi.o
+obj-$(CONFIG_SPI_HISI_SFC) += hisi-sfc.o
obj-$(CONFIG_MTD_MT81xx_NOR) += mtk-quadspi.o
obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o
diff --git a/drivers/mtd/spi-nor/atmel-quadspi.c b/drivers/mtd/spi-nor/atmel-quadspi.c
new file mode 100644
index 000000000..47937d9be
--- /dev/null
+++ b/drivers/mtd/spi-nor/atmel-quadspi.c
@@ -0,0 +1,732 @@
+/*
+ * Driver for Atmel QSPI Controller
+ *
+ * Copyright (C) 2015 Atmel Corporation
+ *
+ * Author: Cyrille Pitchen <cyrille.pitchen@atmel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This driver is based on drivers/mtd/spi-nor/fsl-quadspi.c from Freescale.
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/spi-nor.h>
+#include <linux/platform_data/atmel.h>
+#include <linux/of.h>
+
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/pinctrl/consumer.h>
+
+/* QSPI register offsets */
+#define QSPI_CR 0x0000 /* Control Register */
+#define QSPI_MR 0x0004 /* Mode Register */
+#define QSPI_RD 0x0008 /* Receive Data Register */
+#define QSPI_TD 0x000c /* Transmit Data Register */
+#define QSPI_SR 0x0010 /* Status Register */
+#define QSPI_IER 0x0014 /* Interrupt Enable Register */
+#define QSPI_IDR 0x0018 /* Interrupt Disable Register */
+#define QSPI_IMR 0x001c /* Interrupt Mask Register */
+#define QSPI_SCR 0x0020 /* Serial Clock Register */
+
+#define QSPI_IAR 0x0030 /* Instruction Address Register */
+#define QSPI_ICR 0x0034 /* Instruction Code Register */
+#define QSPI_IFR 0x0038 /* Instruction Frame Register */
+
+#define QSPI_SMR 0x0040 /* Scrambling Mode Register */
+#define QSPI_SKR 0x0044 /* Scrambling Key Register */
+
+#define QSPI_WPMR 0x00E4 /* Write Protection Mode Register */
+#define QSPI_WPSR 0x00E8 /* Write Protection Status Register */
+
+#define QSPI_VERSION 0x00FC /* Version Register */
+
+
+/* Bitfields in QSPI_CR (Control Register) */
+#define QSPI_CR_QSPIEN BIT(0)
+#define QSPI_CR_QSPIDIS BIT(1)
+#define QSPI_CR_SWRST BIT(7)
+#define QSPI_CR_LASTXFER BIT(24)
+
+/* Bitfields in QSPI_MR (Mode Register) */
+#define QSPI_MR_SSM BIT(0)
+#define QSPI_MR_LLB BIT(1)
+#define QSPI_MR_WDRBT BIT(2)
+#define QSPI_MR_SMRM BIT(3)
+#define QSPI_MR_CSMODE_MASK GENMASK(5, 4)
+#define QSPI_MR_CSMODE_NOT_RELOADED (0 << 4)
+#define QSPI_MR_CSMODE_LASTXFER (1 << 4)
+#define QSPI_MR_CSMODE_SYSTEMATICALLY (2 << 4)
+#define QSPI_MR_NBBITS_MASK GENMASK(11, 8)
+#define QSPI_MR_NBBITS(n) ((((n) - 8) << 8) & QSPI_MR_NBBITS_MASK)
+#define QSPI_MR_DLYBCT_MASK GENMASK(23, 16)
+#define QSPI_MR_DLYBCT(n) (((n) << 16) & QSPI_MR_DLYBCT_MASK)
+#define QSPI_MR_DLYCS_MASK GENMASK(31, 24)
+#define QSPI_MR_DLYCS(n) (((n) << 24) & QSPI_MR_DLYCS_MASK)
+
+/* Bitfields in QSPI_SR/QSPI_IER/QSPI_IDR/QSPI_IMR */
+#define QSPI_SR_RDRF BIT(0)
+#define QSPI_SR_TDRE BIT(1)
+#define QSPI_SR_TXEMPTY BIT(2)
+#define QSPI_SR_OVRES BIT(3)
+#define QSPI_SR_CSR BIT(8)
+#define QSPI_SR_CSS BIT(9)
+#define QSPI_SR_INSTRE BIT(10)
+#define QSPI_SR_QSPIENS BIT(24)
+
+#define QSPI_SR_CMD_COMPLETED (QSPI_SR_INSTRE | QSPI_SR_CSR)
+
+/* Bitfields in QSPI_SCR (Serial Clock Register) */
+#define QSPI_SCR_CPOL BIT(0)
+#define QSPI_SCR_CPHA BIT(1)
+#define QSPI_SCR_SCBR_MASK GENMASK(15, 8)
+#define QSPI_SCR_SCBR(n) (((n) << 8) & QSPI_SCR_SCBR_MASK)
+#define QSPI_SCR_DLYBS_MASK GENMASK(23, 16)
+#define QSPI_SCR_DLYBS(n) (((n) << 16) & QSPI_SCR_DLYBS_MASK)
+
+/* Bitfields in QSPI_ICR (Instruction Code Register) */
+#define QSPI_ICR_INST_MASK GENMASK(7, 0)
+#define QSPI_ICR_INST(inst) (((inst) << 0) & QSPI_ICR_INST_MASK)
+#define QSPI_ICR_OPT_MASK GENMASK(23, 16)
+#define QSPI_ICR_OPT(opt) (((opt) << 16) & QSPI_ICR_OPT_MASK)
+
+/* Bitfields in QSPI_IFR (Instruction Frame Register) */
+#define QSPI_IFR_WIDTH_MASK GENMASK(2, 0)
+#define QSPI_IFR_WIDTH_SINGLE_BIT_SPI (0 << 0)
+#define QSPI_IFR_WIDTH_DUAL_OUTPUT (1 << 0)
+#define QSPI_IFR_WIDTH_QUAD_OUTPUT (2 << 0)
+#define QSPI_IFR_WIDTH_DUAL_IO (3 << 0)
+#define QSPI_IFR_WIDTH_QUAD_IO (4 << 0)
+#define QSPI_IFR_WIDTH_DUAL_CMD (5 << 0)
+#define QSPI_IFR_WIDTH_QUAD_CMD (6 << 0)
+#define QSPI_IFR_INSTEN BIT(4)
+#define QSPI_IFR_ADDREN BIT(5)
+#define QSPI_IFR_OPTEN BIT(6)
+#define QSPI_IFR_DATAEN BIT(7)
+#define QSPI_IFR_OPTL_MASK GENMASK(9, 8)
+#define QSPI_IFR_OPTL_1BIT (0 << 8)
+#define QSPI_IFR_OPTL_2BIT (1 << 8)
+#define QSPI_IFR_OPTL_4BIT (2 << 8)
+#define QSPI_IFR_OPTL_8BIT (3 << 8)
+#define QSPI_IFR_ADDRL BIT(10)
+#define QSPI_IFR_TFRTYP_MASK GENMASK(13, 12)
+#define QSPI_IFR_TFRTYP_TRSFR_READ (0 << 12)
+#define QSPI_IFR_TFRTYP_TRSFR_READ_MEM (1 << 12)
+#define QSPI_IFR_TFRTYP_TRSFR_WRITE (2 << 12)
+#define QSPI_IFR_TFRTYP_TRSFR_WRITE_MEM (3 << 13)
+#define QSPI_IFR_CRM BIT(14)
+#define QSPI_IFR_NBDUM_MASK GENMASK(20, 16)
+#define QSPI_IFR_NBDUM(n) (((n) << 16) & QSPI_IFR_NBDUM_MASK)
+
+/* Bitfields in QSPI_SMR (Scrambling Mode Register) */
+#define QSPI_SMR_SCREN BIT(0)
+#define QSPI_SMR_RVDIS BIT(1)
+
+/* Bitfields in QSPI_WPMR (Write Protection Mode Register) */
+#define QSPI_WPMR_WPEN BIT(0)
+#define QSPI_WPMR_WPKEY_MASK GENMASK(31, 8)
+#define QSPI_WPMR_WPKEY(wpkey) (((wpkey) << 8) & QSPI_WPMR_WPKEY_MASK)
+
+/* Bitfields in QSPI_WPSR (Write Protection Status Register) */
+#define QSPI_WPSR_WPVS BIT(0)
+#define QSPI_WPSR_WPVSRC_MASK GENMASK(15, 8)
+#define QSPI_WPSR_WPVSRC(src) (((src) << 8) & QSPI_WPSR_WPVSRC)
+
+
+struct atmel_qspi {
+ void __iomem *regs;
+ void __iomem *mem;
+ struct clk *clk;
+ struct platform_device *pdev;
+ u32 pending;
+
+ struct spi_nor nor;
+ u32 clk_rate;
+ struct completion cmd_completion;
+};
+
+struct atmel_qspi_command {
+ union {
+ struct {
+ u32 instruction:1;
+ u32 address:3;
+ u32 mode:1;
+ u32 dummy:1;
+ u32 data:1;
+ u32 reserved:25;
+ } bits;
+ u32 word;
+ } enable;
+ u8 instruction;
+ u8 mode;
+ u8 num_mode_cycles;
+ u8 num_dummy_cycles;
+ u32 address;
+
+ size_t buf_len;
+ const void *tx_buf;
+ void *rx_buf;
+};
+
+/* Register access functions */
+static inline u32 qspi_readl(struct atmel_qspi *aq, u32 reg)
+{
+ return readl_relaxed(aq->regs + reg);
+}
+
+static inline void qspi_writel(struct atmel_qspi *aq, u32 reg, u32 value)
+{
+ writel_relaxed(value, aq->regs + reg);
+}
+
+static int atmel_qspi_run_transfer(struct atmel_qspi *aq,
+ const struct atmel_qspi_command *cmd)
+{
+ void __iomem *ahb_mem;
+
+ /* Then fallback to a PIO transfer (memcpy() DOES NOT work!) */
+ ahb_mem = aq->mem;
+ if (cmd->enable.bits.address)
+ ahb_mem += cmd->address;
+ if (cmd->tx_buf)
+ _memcpy_toio(ahb_mem, cmd->tx_buf, cmd->buf_len);
+ else
+ _memcpy_fromio(cmd->rx_buf, ahb_mem, cmd->buf_len);
+
+ return 0;
+}
+
+#ifdef DEBUG
+static void atmel_qspi_debug_command(struct atmel_qspi *aq,
+ const struct atmel_qspi_command *cmd,
+ u32 ifr)
+{
+ u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE];
+ size_t len = 0;
+ int i;
+
+ if (cmd->enable.bits.instruction)
+ cmd_buf[len++] = cmd->instruction;
+
+ for (i = cmd->enable.bits.address-1; i >= 0; --i)
+ cmd_buf[len++] = (cmd->address >> (i << 3)) & 0xff;
+
+ if (cmd->enable.bits.mode)
+ cmd_buf[len++] = cmd->mode;
+
+ if (cmd->enable.bits.dummy) {
+ int num = cmd->num_dummy_cycles;
+
+ switch (ifr & QSPI_IFR_WIDTH_MASK) {
+ case QSPI_IFR_WIDTH_SINGLE_BIT_SPI:
+ case QSPI_IFR_WIDTH_DUAL_OUTPUT:
+ case QSPI_IFR_WIDTH_QUAD_OUTPUT:
+ num >>= 3;
+ break;
+ case QSPI_IFR_WIDTH_DUAL_IO:
+ case QSPI_IFR_WIDTH_DUAL_CMD:
+ num >>= 2;
+ break;
+ case QSPI_IFR_WIDTH_QUAD_IO:
+ case QSPI_IFR_WIDTH_QUAD_CMD:
+ num >>= 1;
+ break;
+ default:
+ return;
+ }
+
+ for (i = 0; i < num; ++i)
+ cmd_buf[len++] = 0;
+ }
+
+ /* Dump the SPI command */
+ print_hex_dump(KERN_DEBUG, "qspi cmd: ", DUMP_PREFIX_NONE,
+ 32, 1, cmd_buf, len, false);
+
+#ifdef VERBOSE_DEBUG
+ /* If verbose debug is enabled, also dump the TX data */
+ if (cmd->enable.bits.data && cmd->tx_buf)
+ print_hex_dump(KERN_DEBUG, "qspi tx : ", DUMP_PREFIX_NONE,
+ 32, 1, cmd->tx_buf, cmd->buf_len, false);
+#endif
+}
+#else
+#define atmel_qspi_debug_command(aq, cmd, ifr)
+#endif
+
+static int atmel_qspi_run_command(struct atmel_qspi *aq,
+ const struct atmel_qspi_command *cmd,
+ u32 ifr_tfrtyp, u32 ifr_width)
+{
+ u32 iar, icr, ifr, sr;
+ int err = 0;
+
+ iar = 0;
+ icr = 0;
+ ifr = ifr_tfrtyp | ifr_width;
+
+ /* Compute instruction parameters */
+ if (cmd->enable.bits.instruction) {
+ icr |= QSPI_ICR_INST(cmd->instruction);
+ ifr |= QSPI_IFR_INSTEN;
+ }
+
+ /* Compute address parameters */
+ switch (cmd->enable.bits.address) {
+ case 4:
+ ifr |= QSPI_IFR_ADDRL;
+ /* fall through to the 24bit (3 byte) address case. */
+ case 3:
+ iar = (cmd->enable.bits.data) ? 0 : cmd->address;
+ ifr |= QSPI_IFR_ADDREN;
+ break;
+ case 0:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Compute option parameters */
+ if (cmd->enable.bits.mode && cmd->num_mode_cycles) {
+ u32 mode_cycle_bits, mode_bits;
+
+ icr |= QSPI_ICR_OPT(cmd->mode);
+ ifr |= QSPI_IFR_OPTEN;
+
+ switch (ifr & QSPI_IFR_WIDTH_MASK) {
+ case QSPI_IFR_WIDTH_SINGLE_BIT_SPI:
+ case QSPI_IFR_WIDTH_DUAL_OUTPUT:
+ case QSPI_IFR_WIDTH_QUAD_OUTPUT:
+ mode_cycle_bits = 1;
+ break;
+ case QSPI_IFR_WIDTH_DUAL_IO:
+ case QSPI_IFR_WIDTH_DUAL_CMD:
+ mode_cycle_bits = 2;
+ break;
+ case QSPI_IFR_WIDTH_QUAD_IO:
+ case QSPI_IFR_WIDTH_QUAD_CMD:
+ mode_cycle_bits = 4;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mode_bits = cmd->num_mode_cycles * mode_cycle_bits;
+ switch (mode_bits) {
+ case 1:
+ ifr |= QSPI_IFR_OPTL_1BIT;
+ break;
+
+ case 2:
+ ifr |= QSPI_IFR_OPTL_2BIT;
+ break;
+
+ case 4:
+ ifr |= QSPI_IFR_OPTL_4BIT;
+ break;
+
+ case 8:
+ ifr |= QSPI_IFR_OPTL_8BIT;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ }
+
+ /* Set number of dummy cycles */
+ if (cmd->enable.bits.dummy)
+ ifr |= QSPI_IFR_NBDUM(cmd->num_dummy_cycles);
+
+ /* Set data enable */
+ if (cmd->enable.bits.data) {
+ ifr |= QSPI_IFR_DATAEN;
+
+ /* Special case for Continuous Read Mode */
+ if (!cmd->tx_buf && !cmd->rx_buf)
+ ifr |= QSPI_IFR_CRM;
+ }
+
+ /* Clear pending interrupts */
+ (void)qspi_readl(aq, QSPI_SR);
+
+ /* Set QSPI Instruction Frame registers */
+ atmel_qspi_debug_command(aq, cmd, ifr);
+ qspi_writel(aq, QSPI_IAR, iar);
+ qspi_writel(aq, QSPI_ICR, icr);
+ qspi_writel(aq, QSPI_IFR, ifr);
+
+ /* Skip to the final steps if there is no data */
+ if (!cmd->enable.bits.data)
+ goto no_data;
+
+ /* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */
+ (void)qspi_readl(aq, QSPI_IFR);
+
+ /* Stop here for continuous read */
+ if (!cmd->tx_buf && !cmd->rx_buf)
+ return 0;
+ /* Send/Receive data */
+ err = atmel_qspi_run_transfer(aq, cmd);
+
+ /* Release the chip-select */
+ qspi_writel(aq, QSPI_CR, QSPI_CR_LASTXFER);
+
+ if (err)
+ return err;
+
+#if defined(DEBUG) && defined(VERBOSE_DEBUG)
+ /*
+ * If verbose debug is enabled, also dump the RX data in addition to
+ * the SPI command previously dumped by atmel_qspi_debug_command()
+ */
+ if (cmd->rx_buf)
+ print_hex_dump(KERN_DEBUG, "qspi rx : ", DUMP_PREFIX_NONE,
+ 32, 1, cmd->rx_buf, cmd->buf_len, false);
+#endif
+no_data:
+ /* Poll INSTRuction End status */
+ sr = qspi_readl(aq, QSPI_SR);
+ if ((sr & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED)
+ return err;
+
+ /* Wait for INSTRuction End interrupt */
+ reinit_completion(&aq->cmd_completion);
+ aq->pending = sr & QSPI_SR_CMD_COMPLETED;
+ qspi_writel(aq, QSPI_IER, QSPI_SR_CMD_COMPLETED);
+ if (!wait_for_completion_timeout(&aq->cmd_completion,
+ msecs_to_jiffies(1000)))
+ err = -ETIMEDOUT;
+ qspi_writel(aq, QSPI_IDR, QSPI_SR_CMD_COMPLETED);
+
+ return err;
+}
+
+static int atmel_qspi_read_reg(struct spi_nor *nor, u8 opcode,
+ u8 *buf, int len)
+{
+ struct atmel_qspi *aq = nor->priv;
+ struct atmel_qspi_command cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.enable.bits.instruction = 1;
+ cmd.enable.bits.data = 1;
+ cmd.instruction = opcode;
+ cmd.rx_buf = buf;
+ cmd.buf_len = len;
+ return atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_READ,
+ QSPI_IFR_WIDTH_SINGLE_BIT_SPI);
+}
+
+static int atmel_qspi_write_reg(struct spi_nor *nor, u8 opcode,
+ u8 *buf, int len)
+{
+ struct atmel_qspi *aq = nor->priv;
+ struct atmel_qspi_command cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.enable.bits.instruction = 1;
+ cmd.enable.bits.data = (buf != NULL && len > 0);
+ cmd.instruction = opcode;
+ cmd.tx_buf = buf;
+ cmd.buf_len = len;
+ return atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_WRITE,
+ QSPI_IFR_WIDTH_SINGLE_BIT_SPI);
+}
+
+static ssize_t atmel_qspi_write(struct spi_nor *nor, loff_t to, size_t len,
+ const u_char *write_buf)
+{
+ struct atmel_qspi *aq = nor->priv;
+ struct atmel_qspi_command cmd;
+ ssize_t ret;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.enable.bits.instruction = 1;
+ cmd.enable.bits.address = nor->addr_width;
+ cmd.enable.bits.data = 1;
+ cmd.instruction = nor->program_opcode;
+ cmd.address = (u32)to;
+ cmd.tx_buf = write_buf;
+ cmd.buf_len = len;
+ ret = atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_WRITE_MEM,
+ QSPI_IFR_WIDTH_SINGLE_BIT_SPI);
+ return (ret < 0) ? ret : len;
+}
+
+static int atmel_qspi_erase(struct spi_nor *nor, loff_t offs)
+{
+ struct atmel_qspi *aq = nor->priv;
+ struct atmel_qspi_command cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.enable.bits.instruction = 1;
+ cmd.enable.bits.address = nor->addr_width;
+ cmd.instruction = nor->erase_opcode;
+ cmd.address = (u32)offs;
+ return atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_WRITE,
+ QSPI_IFR_WIDTH_SINGLE_BIT_SPI);
+}
+
+static ssize_t atmel_qspi_read(struct spi_nor *nor, loff_t from, size_t len,
+ u_char *read_buf)
+{
+ struct atmel_qspi *aq = nor->priv;
+ struct atmel_qspi_command cmd;
+ u8 num_mode_cycles, num_dummy_cycles;
+ u32 ifr_width;
+ ssize_t ret;
+
+ switch (nor->flash_read) {
+ case SPI_NOR_NORMAL:
+ case SPI_NOR_FAST:
+ ifr_width = QSPI_IFR_WIDTH_SINGLE_BIT_SPI;
+ break;
+
+ case SPI_NOR_DUAL:
+ ifr_width = QSPI_IFR_WIDTH_DUAL_OUTPUT;
+ break;
+
+ case SPI_NOR_QUAD:
+ ifr_width = QSPI_IFR_WIDTH_QUAD_OUTPUT;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (nor->read_dummy >= 2) {
+ num_mode_cycles = 2;
+ num_dummy_cycles = nor->read_dummy - 2;
+ } else {
+ num_mode_cycles = nor->read_dummy;
+ num_dummy_cycles = 0;
+ }
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.enable.bits.instruction = 1;
+ cmd.enable.bits.address = nor->addr_width;
+ cmd.enable.bits.mode = (num_mode_cycles > 0);
+ cmd.enable.bits.dummy = (num_dummy_cycles > 0);
+ cmd.enable.bits.data = 1;
+ cmd.instruction = nor->read_opcode;
+ cmd.address = (u32)from;
+ cmd.mode = 0xff; /* This value prevents from entering the 0-4-4 mode */
+ cmd.num_mode_cycles = num_mode_cycles;
+ cmd.num_dummy_cycles = num_dummy_cycles;
+ cmd.rx_buf = read_buf;
+ cmd.buf_len = len;
+ ret = atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_READ_MEM,
+ ifr_width);
+ return (ret < 0) ? ret : len;
+}
+
+static int atmel_qspi_init(struct atmel_qspi *aq)
+{
+ unsigned long src_rate;
+ u32 mr, scr, scbr;
+
+ /* Reset the QSPI controller */
+ qspi_writel(aq, QSPI_CR, QSPI_CR_SWRST);
+
+ /* Set the QSPI controller in Serial Memory Mode */
+ mr = QSPI_MR_NBBITS(8) | QSPI_MR_SSM;
+ qspi_writel(aq, QSPI_MR, mr);
+
+ src_rate = clk_get_rate(aq->clk);
+ if (!src_rate)
+ return -EINVAL;
+
+ /* Compute the QSPI baudrate */
+ scbr = DIV_ROUND_UP(src_rate, aq->clk_rate);
+ if (scbr > 0)
+ scbr--;
+ scr = QSPI_SCR_SCBR(scbr);
+ qspi_writel(aq, QSPI_SCR, scr);
+
+ /* Enable the QSPI controller */
+ qspi_writel(aq, QSPI_CR, QSPI_CR_QSPIEN);
+
+ return 0;
+}
+
+static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id)
+{
+ struct atmel_qspi *aq = (struct atmel_qspi *)dev_id;
+ u32 status, mask, pending;
+
+ status = qspi_readl(aq, QSPI_SR);
+ mask = qspi_readl(aq, QSPI_IMR);
+ pending = status & mask;
+
+ if (!pending)
+ return IRQ_NONE;
+
+ aq->pending |= pending;
+ if ((aq->pending & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED)
+ complete(&aq->cmd_completion);
+
+ return IRQ_HANDLED;
+}
+
+static int atmel_qspi_probe(struct platform_device *pdev)
+{
+ struct device_node *child, *np = pdev->dev.of_node;
+ struct atmel_qspi *aq;
+ struct resource *res;
+ struct spi_nor *nor;
+ struct mtd_info *mtd;
+ int irq, err = 0;
+
+ if (of_get_child_count(np) != 1)
+ return -ENODEV;
+ child = of_get_next_child(np, NULL);
+
+ aq = devm_kzalloc(&pdev->dev, sizeof(*aq), GFP_KERNEL);
+ if (!aq) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ platform_set_drvdata(pdev, aq);
+ init_completion(&aq->cmd_completion);
+ aq->pdev = pdev;
+
+ /* Map the registers */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_base");
+ aq->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(aq->regs)) {
+ dev_err(&pdev->dev, "missing registers\n");
+ err = PTR_ERR(aq->regs);
+ goto exit;
+ }
+
+ /* Map the AHB memory */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mmap");
+ aq->mem = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(aq->mem)) {
+ dev_err(&pdev->dev, "missing AHB memory\n");
+ err = PTR_ERR(aq->mem);
+ goto exit;
+ }
+
+ /* Get the peripheral clock */
+ aq->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(aq->clk)) {
+ dev_err(&pdev->dev, "missing peripheral clock\n");
+ err = PTR_ERR(aq->clk);
+ goto exit;
+ }
+
+ /* Enable the peripheral clock */
+ err = clk_prepare_enable(aq->clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable the peripheral clock\n");
+ goto exit;
+ }
+
+ /* Request the IRQ */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "missing IRQ\n");
+ err = irq;
+ goto disable_clk;
+ }
+ err = devm_request_irq(&pdev->dev, irq, atmel_qspi_interrupt,
+ 0, dev_name(&pdev->dev), aq);
+ if (err)
+ goto disable_clk;
+
+ /* Setup the spi-nor */
+ nor = &aq->nor;
+ mtd = &nor->mtd;
+
+ nor->dev = &pdev->dev;
+ spi_nor_set_flash_node(nor, child);
+ nor->priv = aq;
+ mtd->priv = nor;
+
+ nor->read_reg = atmel_qspi_read_reg;
+ nor->write_reg = atmel_qspi_write_reg;
+ nor->read = atmel_qspi_read;
+ nor->write = atmel_qspi_write;
+ nor->erase = atmel_qspi_erase;
+
+ err = of_property_read_u32(child, "spi-max-frequency", &aq->clk_rate);
+ if (err < 0)
+ goto disable_clk;
+
+ err = atmel_qspi_init(aq);
+ if (err)
+ goto disable_clk;
+
+ err = spi_nor_scan(nor, NULL, SPI_NOR_QUAD);
+ if (err)
+ goto disable_clk;
+
+ err = mtd_device_register(mtd, NULL, 0);
+ if (err)
+ goto disable_clk;
+
+ of_node_put(child);
+
+ return 0;
+
+disable_clk:
+ clk_disable_unprepare(aq->clk);
+exit:
+ of_node_put(child);
+
+ return err;
+}
+
+static int atmel_qspi_remove(struct platform_device *pdev)
+{
+ struct atmel_qspi *aq = platform_get_drvdata(pdev);
+
+ mtd_device_unregister(&aq->nor.mtd);
+ qspi_writel(aq, QSPI_CR, QSPI_CR_QSPIDIS);
+ clk_disable_unprepare(aq->clk);
+ return 0;
+}
+
+
+static const struct of_device_id atmel_qspi_dt_ids[] = {
+ { .compatible = "atmel,sama5d2-qspi" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, atmel_qspi_dt_ids);
+
+static struct platform_driver atmel_qspi_driver = {
+ .driver = {
+ .name = "atmel_qspi",
+ .of_match_table = atmel_qspi_dt_ids,
+ },
+ .probe = atmel_qspi_probe,
+ .remove = atmel_qspi_remove,
+};
+module_platform_driver(atmel_qspi_driver);
+
+MODULE_AUTHOR("Cyrille Pitchen <cyrille.pitchen@atmel.com>");
+MODULE_DESCRIPTION("Atmel QSPI Controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
new file mode 100644
index 000000000..d403ba7b8
--- /dev/null
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -0,0 +1,1299 @@
+/*
+ * Driver for Cadence QSPI Controller
+ *
+ * Copyright Altera Corporation (C) 2012-2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/spi-nor.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/spi/spi.h>
+#include <linux/timer.h>
+
+#define CQSPI_NAME "cadence-qspi"
+#define CQSPI_MAX_CHIPSELECT 16
+
+struct cqspi_st;
+
+struct cqspi_flash_pdata {
+ struct spi_nor nor;
+ struct cqspi_st *cqspi;
+ u32 clk_rate;
+ u32 read_delay;
+ u32 tshsl_ns;
+ u32 tsd2d_ns;
+ u32 tchsh_ns;
+ u32 tslch_ns;
+ u8 inst_width;
+ u8 addr_width;
+ u8 data_width;
+ u8 cs;
+ bool registered;
+};
+
+struct cqspi_st {
+ struct platform_device *pdev;
+
+ struct clk *clk;
+ unsigned int sclk;
+
+ void __iomem *iobase;
+ void __iomem *ahb_base;
+ struct completion transfer_complete;
+ struct mutex bus_mutex;
+
+ int current_cs;
+ int current_page_size;
+ int current_erase_size;
+ int current_addr_width;
+ unsigned long master_ref_clk_hz;
+ bool is_decoded_cs;
+ u32 fifo_depth;
+ u32 fifo_width;
+ u32 trigger_address;
+ struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT];
+};
+
+/* Operation timeout value */
+#define CQSPI_TIMEOUT_MS 500
+#define CQSPI_READ_TIMEOUT_MS 10
+
+/* Instruction type */
+#define CQSPI_INST_TYPE_SINGLE 0
+#define CQSPI_INST_TYPE_DUAL 1
+#define CQSPI_INST_TYPE_QUAD 2
+
+#define CQSPI_DUMMY_CLKS_PER_BYTE 8
+#define CQSPI_DUMMY_BYTES_MAX 4
+#define CQSPI_DUMMY_CLKS_MAX 31
+
+#define CQSPI_STIG_DATA_LEN_MAX 8
+
+/* Register map */
+#define CQSPI_REG_CONFIG 0x00
+#define CQSPI_REG_CONFIG_ENABLE_MASK BIT(0)
+#define CQSPI_REG_CONFIG_DECODE_MASK BIT(9)
+#define CQSPI_REG_CONFIG_CHIPSELECT_LSB 10
+#define CQSPI_REG_CONFIG_DMA_MASK BIT(15)
+#define CQSPI_REG_CONFIG_BAUD_LSB 19
+#define CQSPI_REG_CONFIG_IDLE_LSB 31
+#define CQSPI_REG_CONFIG_CHIPSELECT_MASK 0xF
+#define CQSPI_REG_CONFIG_BAUD_MASK 0xF
+
+#define CQSPI_REG_RD_INSTR 0x04
+#define CQSPI_REG_RD_INSTR_OPCODE_LSB 0
+#define CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB 8
+#define CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB 12
+#define CQSPI_REG_RD_INSTR_TYPE_DATA_LSB 16
+#define CQSPI_REG_RD_INSTR_MODE_EN_LSB 20
+#define CQSPI_REG_RD_INSTR_DUMMY_LSB 24
+#define CQSPI_REG_RD_INSTR_TYPE_INSTR_MASK 0x3
+#define CQSPI_REG_RD_INSTR_TYPE_ADDR_MASK 0x3
+#define CQSPI_REG_RD_INSTR_TYPE_DATA_MASK 0x3
+#define CQSPI_REG_RD_INSTR_DUMMY_MASK 0x1F
+
+#define CQSPI_REG_WR_INSTR 0x08
+#define CQSPI_REG_WR_INSTR_OPCODE_LSB 0
+#define CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB 12
+#define CQSPI_REG_WR_INSTR_TYPE_DATA_LSB 16
+
+#define CQSPI_REG_DELAY 0x0C
+#define CQSPI_REG_DELAY_TSLCH_LSB 0
+#define CQSPI_REG_DELAY_TCHSH_LSB 8
+#define CQSPI_REG_DELAY_TSD2D_LSB 16
+#define CQSPI_REG_DELAY_TSHSL_LSB 24
+#define CQSPI_REG_DELAY_TSLCH_MASK 0xFF
+#define CQSPI_REG_DELAY_TCHSH_MASK 0xFF
+#define CQSPI_REG_DELAY_TSD2D_MASK 0xFF
+#define CQSPI_REG_DELAY_TSHSL_MASK 0xFF
+
+#define CQSPI_REG_READCAPTURE 0x10
+#define CQSPI_REG_READCAPTURE_BYPASS_LSB 0
+#define CQSPI_REG_READCAPTURE_DELAY_LSB 1
+#define CQSPI_REG_READCAPTURE_DELAY_MASK 0xF
+
+#define CQSPI_REG_SIZE 0x14
+#define CQSPI_REG_SIZE_ADDRESS_LSB 0
+#define CQSPI_REG_SIZE_PAGE_LSB 4
+#define CQSPI_REG_SIZE_BLOCK_LSB 16
+#define CQSPI_REG_SIZE_ADDRESS_MASK 0xF
+#define CQSPI_REG_SIZE_PAGE_MASK 0xFFF
+#define CQSPI_REG_SIZE_BLOCK_MASK 0x3F
+
+#define CQSPI_REG_SRAMPARTITION 0x18
+#define CQSPI_REG_INDIRECTTRIGGER 0x1C
+
+#define CQSPI_REG_DMA 0x20
+#define CQSPI_REG_DMA_SINGLE_LSB 0
+#define CQSPI_REG_DMA_BURST_LSB 8
+#define CQSPI_REG_DMA_SINGLE_MASK 0xFF
+#define CQSPI_REG_DMA_BURST_MASK 0xFF
+
+#define CQSPI_REG_REMAP 0x24
+#define CQSPI_REG_MODE_BIT 0x28
+
+#define CQSPI_REG_SDRAMLEVEL 0x2C
+#define CQSPI_REG_SDRAMLEVEL_RD_LSB 0
+#define CQSPI_REG_SDRAMLEVEL_WR_LSB 16
+#define CQSPI_REG_SDRAMLEVEL_RD_MASK 0xFFFF
+#define CQSPI_REG_SDRAMLEVEL_WR_MASK 0xFFFF
+
+#define CQSPI_REG_IRQSTATUS 0x40
+#define CQSPI_REG_IRQMASK 0x44
+
+#define CQSPI_REG_INDIRECTRD 0x60
+#define CQSPI_REG_INDIRECTRD_START_MASK BIT(0)
+#define CQSPI_REG_INDIRECTRD_CANCEL_MASK BIT(1)
+#define CQSPI_REG_INDIRECTRD_DONE_MASK BIT(5)
+
+#define CQSPI_REG_INDIRECTRDWATERMARK 0x64
+#define CQSPI_REG_INDIRECTRDSTARTADDR 0x68
+#define CQSPI_REG_INDIRECTRDBYTES 0x6C
+
+#define CQSPI_REG_CMDCTRL 0x90
+#define CQSPI_REG_CMDCTRL_EXECUTE_MASK BIT(0)
+#define CQSPI_REG_CMDCTRL_INPROGRESS_MASK BIT(1)
+#define CQSPI_REG_CMDCTRL_WR_BYTES_LSB 12
+#define CQSPI_REG_CMDCTRL_WR_EN_LSB 15
+#define CQSPI_REG_CMDCTRL_ADD_BYTES_LSB 16
+#define CQSPI_REG_CMDCTRL_ADDR_EN_LSB 19
+#define CQSPI_REG_CMDCTRL_RD_BYTES_LSB 20
+#define CQSPI_REG_CMDCTRL_RD_EN_LSB 23
+#define CQSPI_REG_CMDCTRL_OPCODE_LSB 24
+#define CQSPI_REG_CMDCTRL_WR_BYTES_MASK 0x7
+#define CQSPI_REG_CMDCTRL_ADD_BYTES_MASK 0x3
+#define CQSPI_REG_CMDCTRL_RD_BYTES_MASK 0x7
+
+#define CQSPI_REG_INDIRECTWR 0x70
+#define CQSPI_REG_INDIRECTWR_START_MASK BIT(0)
+#define CQSPI_REG_INDIRECTWR_CANCEL_MASK BIT(1)
+#define CQSPI_REG_INDIRECTWR_DONE_MASK BIT(5)
+
+#define CQSPI_REG_INDIRECTWRWATERMARK 0x74
+#define CQSPI_REG_INDIRECTWRSTARTADDR 0x78
+#define CQSPI_REG_INDIRECTWRBYTES 0x7C
+
+#define CQSPI_REG_CMDADDRESS 0x94
+#define CQSPI_REG_CMDREADDATALOWER 0xA0
+#define CQSPI_REG_CMDREADDATAUPPER 0xA4
+#define CQSPI_REG_CMDWRITEDATALOWER 0xA8
+#define CQSPI_REG_CMDWRITEDATAUPPER 0xAC
+
+/* Interrupt status bits */
+#define CQSPI_REG_IRQ_MODE_ERR BIT(0)
+#define CQSPI_REG_IRQ_UNDERFLOW BIT(1)
+#define CQSPI_REG_IRQ_IND_COMP BIT(2)
+#define CQSPI_REG_IRQ_IND_RD_REJECT BIT(3)
+#define CQSPI_REG_IRQ_WR_PROTECTED_ERR BIT(4)
+#define CQSPI_REG_IRQ_ILLEGAL_AHB_ERR BIT(5)
+#define CQSPI_REG_IRQ_WATERMARK BIT(6)
+#define CQSPI_REG_IRQ_IND_SRAM_FULL BIT(12)
+
+#define CQSPI_IRQ_MASK_RD (CQSPI_REG_IRQ_WATERMARK | \
+ CQSPI_REG_IRQ_IND_SRAM_FULL | \
+ CQSPI_REG_IRQ_IND_COMP)
+
+#define CQSPI_IRQ_MASK_WR (CQSPI_REG_IRQ_IND_COMP | \
+ CQSPI_REG_IRQ_WATERMARK | \
+ CQSPI_REG_IRQ_UNDERFLOW)
+
+#define CQSPI_IRQ_STATUS_MASK 0x1FFFF
+
+static int cqspi_wait_for_bit(void __iomem *reg, const u32 mask, bool clear)
+{
+ unsigned long end = jiffies + msecs_to_jiffies(CQSPI_TIMEOUT_MS);
+ u32 val;
+
+ while (1) {
+ val = readl(reg);
+ if (clear)
+ val = ~val;
+ val &= mask;
+
+ if (val == mask)
+ return 0;
+
+ if (time_after(jiffies, end))
+ return -ETIMEDOUT;
+ }
+}
+
+static bool cqspi_is_idle(struct cqspi_st *cqspi)
+{
+ u32 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
+
+ return reg & (1 << CQSPI_REG_CONFIG_IDLE_LSB);
+}
+
+static u32 cqspi_get_rd_sram_level(struct cqspi_st *cqspi)
+{
+ u32 reg = readl(cqspi->iobase + CQSPI_REG_SDRAMLEVEL);
+
+ reg >>= CQSPI_REG_SDRAMLEVEL_RD_LSB;
+ return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK;
+}
+
+static irqreturn_t cqspi_irq_handler(int this_irq, void *dev)
+{
+ struct cqspi_st *cqspi = dev;
+ unsigned int irq_status;
+
+ /* Read interrupt status */
+ irq_status = readl(cqspi->iobase + CQSPI_REG_IRQSTATUS);
+
+ /* Clear interrupt */
+ writel(irq_status, cqspi->iobase + CQSPI_REG_IRQSTATUS);
+
+ irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR;
+
+ if (irq_status)
+ complete(&cqspi->transfer_complete);
+
+ return IRQ_HANDLED;
+}
+
+static unsigned int cqspi_calc_rdreg(struct spi_nor *nor, const u8 opcode)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ u32 rdreg = 0;
+
+ rdreg |= f_pdata->inst_width << CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB;
+ rdreg |= f_pdata->addr_width << CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB;
+ rdreg |= f_pdata->data_width << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB;
+
+ return rdreg;
+}
+
+static int cqspi_wait_idle(struct cqspi_st *cqspi)
+{
+ const unsigned int poll_idle_retry = 3;
+ unsigned int count = 0;
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(CQSPI_TIMEOUT_MS);
+ while (1) {
+ /*
+ * Read few times in succession to ensure the controller
+ * is indeed idle, that is, the bit does not transition
+ * low again.
+ */
+ if (cqspi_is_idle(cqspi))
+ count++;
+ else
+ count = 0;
+
+ if (count >= poll_idle_retry)
+ return 0;
+
+ if (time_after(jiffies, timeout)) {
+ /* Timeout, in busy mode. */
+ dev_err(&cqspi->pdev->dev,
+ "QSPI is still busy after %dms timeout.\n",
+ CQSPI_TIMEOUT_MS);
+ return -ETIMEDOUT;
+ }
+
+ cpu_relax();
+ }
+}
+
+static int cqspi_exec_flash_cmd(struct cqspi_st *cqspi, unsigned int reg)
+{
+ void __iomem *reg_base = cqspi->iobase;
+ int ret;
+
+ /* Write the CMDCTRL without start execution. */
+ writel(reg, reg_base + CQSPI_REG_CMDCTRL);
+ /* Start execute */
+ reg |= CQSPI_REG_CMDCTRL_EXECUTE_MASK;
+ writel(reg, reg_base + CQSPI_REG_CMDCTRL);
+
+ /* Polling for completion. */
+ ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_CMDCTRL,
+ CQSPI_REG_CMDCTRL_INPROGRESS_MASK, 1);
+ if (ret) {
+ dev_err(&cqspi->pdev->dev,
+ "Flash command execution timed out.\n");
+ return ret;
+ }
+
+ /* Polling QSPI idle status. */
+ return cqspi_wait_idle(cqspi);
+}
+
+static int cqspi_command_read(struct spi_nor *nor,
+ const u8 *txbuf, const unsigned n_tx,
+ u8 *rxbuf, const unsigned n_rx)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *reg_base = cqspi->iobase;
+ unsigned int rdreg;
+ unsigned int reg;
+ unsigned int read_len;
+ int status;
+
+ if (!n_rx || n_rx > CQSPI_STIG_DATA_LEN_MAX || !rxbuf) {
+ dev_err(nor->dev, "Invalid input argument, len %d rxbuf 0x%p\n",
+ n_rx, rxbuf);
+ return -EINVAL;
+ }
+
+ reg = txbuf[0] << CQSPI_REG_CMDCTRL_OPCODE_LSB;
+
+ rdreg = cqspi_calc_rdreg(nor, txbuf[0]);
+ writel(rdreg, reg_base + CQSPI_REG_RD_INSTR);
+
+ reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB);
+
+ /* 0 means 1 byte. */
+ reg |= (((n_rx - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK)
+ << CQSPI_REG_CMDCTRL_RD_BYTES_LSB);
+ status = cqspi_exec_flash_cmd(cqspi, reg);
+ if (status)
+ return status;
+
+ reg = readl(reg_base + CQSPI_REG_CMDREADDATALOWER);
+
+ /* Put the read value into rx_buf */
+ read_len = (n_rx > 4) ? 4 : n_rx;
+ memcpy(rxbuf, &reg, read_len);
+ rxbuf += read_len;
+
+ if (n_rx > 4) {
+ reg = readl(reg_base + CQSPI_REG_CMDREADDATAUPPER);
+
+ read_len = n_rx - read_len;
+ memcpy(rxbuf, &reg, read_len);
+ }
+
+ return 0;
+}
+
+static int cqspi_command_write(struct spi_nor *nor, const u8 opcode,
+ const u8 *txbuf, const unsigned n_tx)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *reg_base = cqspi->iobase;
+ unsigned int reg;
+ unsigned int data;
+ int ret;
+
+ if (n_tx > 4 || (n_tx && !txbuf)) {
+ dev_err(nor->dev,
+ "Invalid input argument, cmdlen %d txbuf 0x%p\n",
+ n_tx, txbuf);
+ return -EINVAL;
+ }
+
+ reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
+ if (n_tx) {
+ reg |= (0x1 << CQSPI_REG_CMDCTRL_WR_EN_LSB);
+ reg |= ((n_tx - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK)
+ << CQSPI_REG_CMDCTRL_WR_BYTES_LSB;
+ data = 0;
+ memcpy(&data, txbuf, n_tx);
+ writel(data, reg_base + CQSPI_REG_CMDWRITEDATALOWER);
+ }
+
+ ret = cqspi_exec_flash_cmd(cqspi, reg);
+ return ret;
+}
+
+static int cqspi_command_write_addr(struct spi_nor *nor,
+ const u8 opcode, const unsigned int addr)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *reg_base = cqspi->iobase;
+ unsigned int reg;
+
+ reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
+ reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
+ reg |= ((nor->addr_width - 1) & CQSPI_REG_CMDCTRL_ADD_BYTES_MASK)
+ << CQSPI_REG_CMDCTRL_ADD_BYTES_LSB;
+
+ writel(addr, reg_base + CQSPI_REG_CMDADDRESS);
+
+ return cqspi_exec_flash_cmd(cqspi, reg);
+}
+
+static int cqspi_indirect_read_setup(struct spi_nor *nor,
+ const unsigned int from_addr)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *reg_base = cqspi->iobase;
+ unsigned int dummy_clk = 0;
+ unsigned int reg;
+
+ writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
+
+ reg = nor->read_opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB;
+ reg |= cqspi_calc_rdreg(nor, nor->read_opcode);
+
+ /* Setup dummy clock cycles */
+ dummy_clk = nor->read_dummy;
+ if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
+ dummy_clk = CQSPI_DUMMY_CLKS_MAX;
+
+ if (dummy_clk / 8) {
+ reg |= (1 << CQSPI_REG_RD_INSTR_MODE_EN_LSB);
+ /* Set mode bits high to ensure chip doesn't enter XIP */
+ writel(0xFF, reg_base + CQSPI_REG_MODE_BIT);
+
+ /* Need to subtract the mode byte (8 clocks). */
+ if (f_pdata->inst_width != CQSPI_INST_TYPE_QUAD)
+ dummy_clk -= 8;
+
+ if (dummy_clk)
+ reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK)
+ << CQSPI_REG_RD_INSTR_DUMMY_LSB;
+ }
+
+ writel(reg, reg_base + CQSPI_REG_RD_INSTR);
+
+ /* Set address width */
+ reg = readl(reg_base + CQSPI_REG_SIZE);
+ reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
+ reg |= (nor->addr_width - 1);
+ writel(reg, reg_base + CQSPI_REG_SIZE);
+ return 0;
+}
+
+static int cqspi_indirect_read_execute(struct spi_nor *nor,
+ u8 *rxbuf, const unsigned n_rx)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *reg_base = cqspi->iobase;
+ void __iomem *ahb_base = cqspi->ahb_base;
+ unsigned int remaining = n_rx;
+ unsigned int bytes_to_read = 0;
+ int ret = 0;
+
+ writel(remaining, reg_base + CQSPI_REG_INDIRECTRDBYTES);
+
+ /* Clear all interrupts. */
+ writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
+
+ writel(CQSPI_IRQ_MASK_RD, reg_base + CQSPI_REG_IRQMASK);
+
+ reinit_completion(&cqspi->transfer_complete);
+ writel(CQSPI_REG_INDIRECTRD_START_MASK,
+ reg_base + CQSPI_REG_INDIRECTRD);
+
+ while (remaining > 0) {
+ ret = wait_for_completion_timeout(&cqspi->transfer_complete,
+ msecs_to_jiffies
+ (CQSPI_READ_TIMEOUT_MS));
+
+ bytes_to_read = cqspi_get_rd_sram_level(cqspi);
+
+ if (!ret && bytes_to_read == 0) {
+ dev_err(nor->dev, "Indirect read timeout, no bytes\n");
+ ret = -ETIMEDOUT;
+ goto failrd;
+ }
+
+ while (bytes_to_read != 0) {
+ bytes_to_read *= cqspi->fifo_width;
+ bytes_to_read = bytes_to_read > remaining ?
+ remaining : bytes_to_read;
+ readsl(ahb_base, rxbuf, DIV_ROUND_UP(bytes_to_read, 4));
+ rxbuf += bytes_to_read;
+ remaining -= bytes_to_read;
+ bytes_to_read = cqspi_get_rd_sram_level(cqspi);
+ }
+
+ if (remaining > 0)
+ reinit_completion(&cqspi->transfer_complete);
+ }
+
+ /* Check indirect done status */
+ ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTRD,
+ CQSPI_REG_INDIRECTRD_DONE_MASK, 0);
+ if (ret) {
+ dev_err(nor->dev,
+ "Indirect read completion error (%i)\n", ret);
+ goto failrd;
+ }
+
+ /* Disable interrupt */
+ writel(0, reg_base + CQSPI_REG_IRQMASK);
+
+ /* Clear indirect completion status */
+ writel(CQSPI_REG_INDIRECTRD_DONE_MASK, reg_base + CQSPI_REG_INDIRECTRD);
+
+ return 0;
+
+failrd:
+ /* Disable interrupt */
+ writel(0, reg_base + CQSPI_REG_IRQMASK);
+
+ /* Cancel the indirect read */
+ writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
+ reg_base + CQSPI_REG_INDIRECTRD);
+ return ret;
+}
+
+static int cqspi_indirect_write_setup(struct spi_nor *nor,
+ const unsigned int to_addr)
+{
+ unsigned int reg;
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *reg_base = cqspi->iobase;
+
+ /* Set opcode. */
+ reg = nor->program_opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB;
+ writel(reg, reg_base + CQSPI_REG_WR_INSTR);
+ reg = cqspi_calc_rdreg(nor, nor->program_opcode);
+ writel(reg, reg_base + CQSPI_REG_RD_INSTR);
+
+ writel(to_addr, reg_base + CQSPI_REG_INDIRECTWRSTARTADDR);
+
+ reg = readl(reg_base + CQSPI_REG_SIZE);
+ reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
+ reg |= (nor->addr_width - 1);
+ writel(reg, reg_base + CQSPI_REG_SIZE);
+ return 0;
+}
+
+static int cqspi_indirect_write_execute(struct spi_nor *nor,
+ const u8 *txbuf, const unsigned n_tx)
+{
+ const unsigned int page_size = nor->page_size;
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *reg_base = cqspi->iobase;
+ unsigned int remaining = n_tx;
+ unsigned int write_bytes;
+ int ret;
+
+ writel(remaining, reg_base + CQSPI_REG_INDIRECTWRBYTES);
+
+ /* Clear all interrupts. */
+ writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
+
+ writel(CQSPI_IRQ_MASK_WR, reg_base + CQSPI_REG_IRQMASK);
+
+ reinit_completion(&cqspi->transfer_complete);
+ writel(CQSPI_REG_INDIRECTWR_START_MASK,
+ reg_base + CQSPI_REG_INDIRECTWR);
+
+ while (remaining > 0) {
+ write_bytes = remaining > page_size ? page_size : remaining;
+ writesl(cqspi->ahb_base, txbuf, DIV_ROUND_UP(write_bytes, 4));
+
+ ret = wait_for_completion_timeout(&cqspi->transfer_complete,
+ msecs_to_jiffies
+ (CQSPI_TIMEOUT_MS));
+ if (!ret) {
+ dev_err(nor->dev, "Indirect write timeout\n");
+ ret = -ETIMEDOUT;
+ goto failwr;
+ }
+
+ txbuf += write_bytes;
+ remaining -= write_bytes;
+
+ if (remaining > 0)
+ reinit_completion(&cqspi->transfer_complete);
+ }
+
+ /* Check indirect done status */
+ ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTWR,
+ CQSPI_REG_INDIRECTWR_DONE_MASK, 0);
+ if (ret) {
+ dev_err(nor->dev,
+ "Indirect write completion error (%i)\n", ret);
+ goto failwr;
+ }
+
+ /* Disable interrupt. */
+ writel(0, reg_base + CQSPI_REG_IRQMASK);
+
+ /* Clear indirect completion status */
+ writel(CQSPI_REG_INDIRECTWR_DONE_MASK, reg_base + CQSPI_REG_INDIRECTWR);
+
+ cqspi_wait_idle(cqspi);
+
+ return 0;
+
+failwr:
+ /* Disable interrupt. */
+ writel(0, reg_base + CQSPI_REG_IRQMASK);
+
+ /* Cancel the indirect write */
+ writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
+ reg_base + CQSPI_REG_INDIRECTWR);
+ return ret;
+}
+
+static void cqspi_chipselect(struct spi_nor *nor)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *reg_base = cqspi->iobase;
+ unsigned int chip_select = f_pdata->cs;
+ unsigned int reg;
+
+ reg = readl(reg_base + CQSPI_REG_CONFIG);
+ if (cqspi->is_decoded_cs) {
+ reg |= CQSPI_REG_CONFIG_DECODE_MASK;
+ } else {
+ reg &= ~CQSPI_REG_CONFIG_DECODE_MASK;
+
+ /* Convert CS if without decoder.
+ * CS0 to 4b'1110
+ * CS1 to 4b'1101
+ * CS2 to 4b'1011
+ * CS3 to 4b'0111
+ */
+ chip_select = 0xF & ~(1 << chip_select);
+ }
+
+ reg &= ~(CQSPI_REG_CONFIG_CHIPSELECT_MASK
+ << CQSPI_REG_CONFIG_CHIPSELECT_LSB);
+ reg |= (chip_select & CQSPI_REG_CONFIG_CHIPSELECT_MASK)
+ << CQSPI_REG_CONFIG_CHIPSELECT_LSB;
+ writel(reg, reg_base + CQSPI_REG_CONFIG);
+}
+
+static void cqspi_configure_cs_and_sizes(struct spi_nor *nor)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *iobase = cqspi->iobase;
+ unsigned int reg;
+
+ /* configure page size and block size. */
+ reg = readl(iobase + CQSPI_REG_SIZE);
+ reg &= ~(CQSPI_REG_SIZE_PAGE_MASK << CQSPI_REG_SIZE_PAGE_LSB);
+ reg &= ~(CQSPI_REG_SIZE_BLOCK_MASK << CQSPI_REG_SIZE_BLOCK_LSB);
+ reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
+ reg |= (nor->page_size << CQSPI_REG_SIZE_PAGE_LSB);
+ reg |= (ilog2(nor->mtd.erasesize) << CQSPI_REG_SIZE_BLOCK_LSB);
+ reg |= (nor->addr_width - 1);
+ writel(reg, iobase + CQSPI_REG_SIZE);
+
+ /* configure the chip select */
+ cqspi_chipselect(nor);
+
+ /* Store the new configuration of the controller */
+ cqspi->current_page_size = nor->page_size;
+ cqspi->current_erase_size = nor->mtd.erasesize;
+ cqspi->current_addr_width = nor->addr_width;
+}
+
+static unsigned int calculate_ticks_for_ns(const unsigned int ref_clk_hz,
+ const unsigned int ns_val)
+{
+ unsigned int ticks;
+
+ ticks = ref_clk_hz / 1000; /* kHz */
+ ticks = DIV_ROUND_UP(ticks * ns_val, 1000000);
+
+ return ticks;
+}
+
+static void cqspi_delay(struct spi_nor *nor)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ void __iomem *iobase = cqspi->iobase;
+ const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz;
+ unsigned int tshsl, tchsh, tslch, tsd2d;
+ unsigned int reg;
+ unsigned int tsclk;
+
+ /* calculate the number of ref ticks for one sclk tick */
+ tsclk = DIV_ROUND_UP(ref_clk_hz, cqspi->sclk);
+
+ tshsl = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tshsl_ns);
+ /* this particular value must be at least one sclk */
+ if (tshsl < tsclk)
+ tshsl = tsclk;
+
+ tchsh = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tchsh_ns);
+ tslch = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tslch_ns);
+ tsd2d = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tsd2d_ns);
+
+ reg = (tshsl & CQSPI_REG_DELAY_TSHSL_MASK)
+ << CQSPI_REG_DELAY_TSHSL_LSB;
+ reg |= (tchsh & CQSPI_REG_DELAY_TCHSH_MASK)
+ << CQSPI_REG_DELAY_TCHSH_LSB;
+ reg |= (tslch & CQSPI_REG_DELAY_TSLCH_MASK)
+ << CQSPI_REG_DELAY_TSLCH_LSB;
+ reg |= (tsd2d & CQSPI_REG_DELAY_TSD2D_MASK)
+ << CQSPI_REG_DELAY_TSD2D_LSB;
+ writel(reg, iobase + CQSPI_REG_DELAY);
+}
+
+static void cqspi_config_baudrate_div(struct cqspi_st *cqspi)
+{
+ const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz;
+ void __iomem *reg_base = cqspi->iobase;
+ u32 reg, div;
+
+ /* Recalculate the baudrate divisor based on QSPI specification. */
+ div = DIV_ROUND_UP(ref_clk_hz, 2 * cqspi->sclk) - 1;
+
+ reg = readl(reg_base + CQSPI_REG_CONFIG);
+ reg &= ~(CQSPI_REG_CONFIG_BAUD_MASK << CQSPI_REG_CONFIG_BAUD_LSB);
+ reg |= (div & CQSPI_REG_CONFIG_BAUD_MASK) << CQSPI_REG_CONFIG_BAUD_LSB;
+ writel(reg, reg_base + CQSPI_REG_CONFIG);
+}
+
+static void cqspi_readdata_capture(struct cqspi_st *cqspi,
+ const unsigned int bypass,
+ const unsigned int delay)
+{
+ void __iomem *reg_base = cqspi->iobase;
+ unsigned int reg;
+
+ reg = readl(reg_base + CQSPI_REG_READCAPTURE);
+
+ if (bypass)
+ reg |= (1 << CQSPI_REG_READCAPTURE_BYPASS_LSB);
+ else
+ reg &= ~(1 << CQSPI_REG_READCAPTURE_BYPASS_LSB);
+
+ reg &= ~(CQSPI_REG_READCAPTURE_DELAY_MASK
+ << CQSPI_REG_READCAPTURE_DELAY_LSB);
+
+ reg |= (delay & CQSPI_REG_READCAPTURE_DELAY_MASK)
+ << CQSPI_REG_READCAPTURE_DELAY_LSB;
+
+ writel(reg, reg_base + CQSPI_REG_READCAPTURE);
+}
+
+static void cqspi_controller_enable(struct cqspi_st *cqspi, bool enable)
+{
+ void __iomem *reg_base = cqspi->iobase;
+ unsigned int reg;
+
+ reg = readl(reg_base + CQSPI_REG_CONFIG);
+
+ if (enable)
+ reg |= CQSPI_REG_CONFIG_ENABLE_MASK;
+ else
+ reg &= ~CQSPI_REG_CONFIG_ENABLE_MASK;
+
+ writel(reg, reg_base + CQSPI_REG_CONFIG);
+}
+
+static void cqspi_configure(struct spi_nor *nor)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ const unsigned int sclk = f_pdata->clk_rate;
+ int switch_cs = (cqspi->current_cs != f_pdata->cs);
+ int switch_ck = (cqspi->sclk != sclk);
+
+ if ((cqspi->current_page_size != nor->page_size) ||
+ (cqspi->current_erase_size != nor->mtd.erasesize) ||
+ (cqspi->current_addr_width != nor->addr_width))
+ switch_cs = 1;
+
+ if (switch_cs || switch_ck)
+ cqspi_controller_enable(cqspi, 0);
+
+ /* Switch chip select. */
+ if (switch_cs) {
+ cqspi->current_cs = f_pdata->cs;
+ cqspi_configure_cs_and_sizes(nor);
+ }
+
+ /* Setup baudrate divisor and delays */
+ if (switch_ck) {
+ cqspi->sclk = sclk;
+ cqspi_config_baudrate_div(cqspi);
+ cqspi_delay(nor);
+ cqspi_readdata_capture(cqspi, 1, f_pdata->read_delay);
+ }
+
+ if (switch_cs || switch_ck)
+ cqspi_controller_enable(cqspi, 1);
+}
+
+static int cqspi_set_protocol(struct spi_nor *nor, const int read)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+
+ f_pdata->inst_width = CQSPI_INST_TYPE_SINGLE;
+ f_pdata->addr_width = CQSPI_INST_TYPE_SINGLE;
+ f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
+
+ if (read) {
+ switch (nor->flash_read) {
+ case SPI_NOR_NORMAL:
+ case SPI_NOR_FAST:
+ f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
+ break;
+ case SPI_NOR_DUAL:
+ f_pdata->data_width = CQSPI_INST_TYPE_DUAL;
+ break;
+ case SPI_NOR_QUAD:
+ f_pdata->data_width = CQSPI_INST_TYPE_QUAD;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ cqspi_configure(nor);
+
+ return 0;
+}
+
+static ssize_t cqspi_write(struct spi_nor *nor, loff_t to,
+ size_t len, const u_char *buf)
+{
+ int ret;
+
+ ret = cqspi_set_protocol(nor, 0);
+ if (ret)
+ return ret;
+
+ ret = cqspi_indirect_write_setup(nor, to);
+ if (ret)
+ return ret;
+
+ ret = cqspi_indirect_write_execute(nor, buf, len);
+ if (ret)
+ return ret;
+
+ return (ret < 0) ? ret : len;
+}
+
+static ssize_t cqspi_read(struct spi_nor *nor, loff_t from,
+ size_t len, u_char *buf)
+{
+ int ret;
+
+ ret = cqspi_set_protocol(nor, 1);
+ if (ret)
+ return ret;
+
+ ret = cqspi_indirect_read_setup(nor, from);
+ if (ret)
+ return ret;
+
+ ret = cqspi_indirect_read_execute(nor, buf, len);
+ if (ret)
+ return ret;
+
+ return (ret < 0) ? ret : len;
+}
+
+static int cqspi_erase(struct spi_nor *nor, loff_t offs)
+{
+ int ret;
+
+ ret = cqspi_set_protocol(nor, 0);
+ if (ret)
+ return ret;
+
+ /* Send write enable, then erase commands. */
+ ret = nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
+ if (ret)
+ return ret;
+
+ /* Set up command buffer. */
+ ret = cqspi_command_write_addr(nor, nor->erase_opcode, offs);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int cqspi_prep(struct spi_nor *nor, enum spi_nor_ops ops)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+
+ mutex_lock(&cqspi->bus_mutex);
+
+ return 0;
+}
+
+static void cqspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
+{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+
+ mutex_unlock(&cqspi->bus_mutex);
+}
+
+static int cqspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+{
+ int ret;
+
+ ret = cqspi_set_protocol(nor, 0);
+ if (!ret)
+ ret = cqspi_command_read(nor, &opcode, 1, buf, len);
+
+ return ret;
+}
+
+static int cqspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+{
+ int ret;
+
+ ret = cqspi_set_protocol(nor, 0);
+ if (!ret)
+ ret = cqspi_command_write(nor, opcode, buf, len);
+
+ return ret;
+}
+
+static int cqspi_of_get_flash_pdata(struct platform_device *pdev,
+ struct cqspi_flash_pdata *f_pdata,
+ struct device_node *np)
+{
+ if (of_property_read_u32(np, "cdns,read-delay", &f_pdata->read_delay)) {
+ dev_err(&pdev->dev, "couldn't determine read-delay\n");
+ return -ENXIO;
+ }
+
+ if (of_property_read_u32(np, "cdns,tshsl-ns", &f_pdata->tshsl_ns)) {
+ dev_err(&pdev->dev, "couldn't determine tshsl-ns\n");
+ return -ENXIO;
+ }
+
+ if (of_property_read_u32(np, "cdns,tsd2d-ns", &f_pdata->tsd2d_ns)) {
+ dev_err(&pdev->dev, "couldn't determine tsd2d-ns\n");
+ return -ENXIO;
+ }
+
+ if (of_property_read_u32(np, "cdns,tchsh-ns", &f_pdata->tchsh_ns)) {
+ dev_err(&pdev->dev, "couldn't determine tchsh-ns\n");
+ return -ENXIO;
+ }
+
+ if (of_property_read_u32(np, "cdns,tslch-ns", &f_pdata->tslch_ns)) {
+ dev_err(&pdev->dev, "couldn't determine tslch-ns\n");
+ return -ENXIO;
+ }
+
+ if (of_property_read_u32(np, "spi-max-frequency", &f_pdata->clk_rate)) {
+ dev_err(&pdev->dev, "couldn't determine spi-max-frequency\n");
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int cqspi_of_get_pdata(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct cqspi_st *cqspi = platform_get_drvdata(pdev);
+
+ cqspi->is_decoded_cs = of_property_read_bool(np, "cdns,is-decoded-cs");
+
+ if (of_property_read_u32(np, "cdns,fifo-depth", &cqspi->fifo_depth)) {
+ dev_err(&pdev->dev, "couldn't determine fifo-depth\n");
+ return -ENXIO;
+ }
+
+ if (of_property_read_u32(np, "cdns,fifo-width", &cqspi->fifo_width)) {
+ dev_err(&pdev->dev, "couldn't determine fifo-width\n");
+ return -ENXIO;
+ }
+
+ if (of_property_read_u32(np, "cdns,trigger-address",
+ &cqspi->trigger_address)) {
+ dev_err(&pdev->dev, "couldn't determine trigger-address\n");
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static void cqspi_controller_init(struct cqspi_st *cqspi)
+{
+ cqspi_controller_enable(cqspi, 0);
+
+ /* Configure the remap address register, no remap */
+ writel(0, cqspi->iobase + CQSPI_REG_REMAP);
+
+ /* Disable all interrupts. */
+ writel(0, cqspi->iobase + CQSPI_REG_IRQMASK);
+
+ /* Configure the SRAM split to 1:1 . */
+ writel(cqspi->fifo_depth / 2, cqspi->iobase + CQSPI_REG_SRAMPARTITION);
+
+ /* Load indirect trigger address. */
+ writel(cqspi->trigger_address,
+ cqspi->iobase + CQSPI_REG_INDIRECTTRIGGER);
+
+ /* Program read watermark -- 1/2 of the FIFO. */
+ writel(cqspi->fifo_depth * cqspi->fifo_width / 2,
+ cqspi->iobase + CQSPI_REG_INDIRECTRDWATERMARK);
+ /* Program write watermark -- 1/8 of the FIFO. */
+ writel(cqspi->fifo_depth * cqspi->fifo_width / 8,
+ cqspi->iobase + CQSPI_REG_INDIRECTWRWATERMARK);
+
+ cqspi_controller_enable(cqspi, 1);
+}
+
+static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
+{
+ struct platform_device *pdev = cqspi->pdev;
+ struct device *dev = &pdev->dev;
+ struct cqspi_flash_pdata *f_pdata;
+ struct spi_nor *nor;
+ struct mtd_info *mtd;
+ unsigned int cs;
+ int i, ret;
+
+ /* Get flash device data */
+ for_each_available_child_of_node(dev->of_node, np) {
+ if (of_property_read_u32(np, "reg", &cs)) {
+ dev_err(dev, "Couldn't determine chip select.\n");
+ goto err;
+ }
+
+ if (cs > CQSPI_MAX_CHIPSELECT) {
+ dev_err(dev, "Chip select %d out of range.\n", cs);
+ goto err;
+ }
+
+ f_pdata = &cqspi->f_pdata[cs];
+ f_pdata->cqspi = cqspi;
+ f_pdata->cs = cs;
+
+ ret = cqspi_of_get_flash_pdata(pdev, f_pdata, np);
+ if (ret)
+ goto err;
+
+ nor = &f_pdata->nor;
+ mtd = &nor->mtd;
+
+ mtd->priv = nor;
+
+ nor->dev = dev;
+ spi_nor_set_flash_node(nor, np);
+ nor->priv = f_pdata;
+
+ nor->read_reg = cqspi_read_reg;
+ nor->write_reg = cqspi_write_reg;
+ nor->read = cqspi_read;
+ nor->write = cqspi_write;
+ nor->erase = cqspi_erase;
+ nor->prepare = cqspi_prep;
+ nor->unprepare = cqspi_unprep;
+
+ mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%s.%d",
+ dev_name(dev), cs);
+ if (!mtd->name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = spi_nor_scan(nor, NULL, SPI_NOR_QUAD);
+ if (ret)
+ goto err;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret)
+ goto err;
+
+ f_pdata->registered = true;
+ }
+
+ return 0;
+
+err:
+ for (i = 0; i < CQSPI_MAX_CHIPSELECT; i++)
+ if (cqspi->f_pdata[i].registered)
+ mtd_device_unregister(&cqspi->f_pdata[i].nor.mtd);
+ return ret;
+}
+
+static int cqspi_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct cqspi_st *cqspi;
+ struct resource *res;
+ struct resource *res_ahb;
+ int ret;
+ int irq;
+
+ cqspi = devm_kzalloc(dev, sizeof(*cqspi), GFP_KERNEL);
+ if (!cqspi)
+ return -ENOMEM;
+
+ mutex_init(&cqspi->bus_mutex);
+ cqspi->pdev = pdev;
+ platform_set_drvdata(pdev, cqspi);
+
+ /* Obtain configuration from OF. */
+ ret = cqspi_of_get_pdata(pdev);
+ if (ret) {
+ dev_err(dev, "Cannot get mandatory OF data.\n");
+ return -ENODEV;
+ }
+
+ /* Obtain QSPI clock. */
+ cqspi->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(cqspi->clk)) {
+ dev_err(dev, "Cannot claim QSPI clock.\n");
+ return PTR_ERR(cqspi->clk);
+ }
+
+ /* Obtain and remap controller address. */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ cqspi->iobase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(cqspi->iobase)) {
+ dev_err(dev, "Cannot remap controller address.\n");
+ return PTR_ERR(cqspi->iobase);
+ }
+
+ /* Obtain and remap AHB address. */
+ res_ahb = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ cqspi->ahb_base = devm_ioremap_resource(dev, res_ahb);
+ if (IS_ERR(cqspi->ahb_base)) {
+ dev_err(dev, "Cannot remap AHB address.\n");
+ return PTR_ERR(cqspi->ahb_base);
+ }
+
+ init_completion(&cqspi->transfer_complete);
+
+ /* Obtain IRQ line. */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "Cannot obtain IRQ.\n");
+ return -ENXIO;
+ }
+
+ ret = clk_prepare_enable(cqspi->clk);
+ if (ret) {
+ dev_err(dev, "Cannot enable QSPI clock.\n");
+ return ret;
+ }
+
+ cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk);
+
+ ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0,
+ pdev->name, cqspi);
+ if (ret) {
+ dev_err(dev, "Cannot request IRQ.\n");
+ goto probe_irq_failed;
+ }
+
+ cqspi_wait_idle(cqspi);
+ cqspi_controller_init(cqspi);
+ cqspi->current_cs = -1;
+ cqspi->sclk = 0;
+
+ ret = cqspi_setup_flash(cqspi, np);
+ if (ret) {
+ dev_err(dev, "Cadence QSPI NOR probe failed %d\n", ret);
+ goto probe_setup_failed;
+ }
+
+ return ret;
+probe_irq_failed:
+ cqspi_controller_enable(cqspi, 0);
+probe_setup_failed:
+ clk_disable_unprepare(cqspi->clk);
+ return ret;
+}
+
+static int cqspi_remove(struct platform_device *pdev)
+{
+ struct cqspi_st *cqspi = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < CQSPI_MAX_CHIPSELECT; i++)
+ if (cqspi->f_pdata[i].registered)
+ mtd_device_unregister(&cqspi->f_pdata[i].nor.mtd);
+
+ cqspi_controller_enable(cqspi, 0);
+
+ clk_disable_unprepare(cqspi->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int cqspi_suspend(struct device *dev)
+{
+ struct cqspi_st *cqspi = dev_get_drvdata(dev);
+
+ cqspi_controller_enable(cqspi, 0);
+ return 0;
+}
+
+static int cqspi_resume(struct device *dev)
+{
+ struct cqspi_st *cqspi = dev_get_drvdata(dev);
+
+ cqspi_controller_enable(cqspi, 1);
+ return 0;
+}
+
+static const struct dev_pm_ops cqspi__dev_pm_ops = {
+ .suspend = cqspi_suspend,
+ .resume = cqspi_resume,
+};
+
+#define CQSPI_DEV_PM_OPS (&cqspi__dev_pm_ops)
+#else
+#define CQSPI_DEV_PM_OPS NULL
+#endif
+
+static struct of_device_id const cqspi_dt_ids[] = {
+ {.compatible = "cdns,qspi-nor",},
+ { /* end of table */ }
+};
+
+MODULE_DEVICE_TABLE(of, cqspi_dt_ids);
+
+static struct platform_driver cqspi_platform_driver = {
+ .probe = cqspi_probe,
+ .remove = cqspi_remove,
+ .driver = {
+ .name = CQSPI_NAME,
+ .pm = CQSPI_DEV_PM_OPS,
+ .of_match_table = cqspi_dt_ids,
+ },
+};
+
+module_platform_driver(cqspi_platform_driver);
+
+MODULE_DESCRIPTION("Cadence QSPI Controller Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" CQSPI_NAME);
+MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>");
+MODULE_AUTHOR("Graham Moore <grmoore@opensource.altera.com>");
diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c
index 9ab2b51d5..5c82e4ef1 100644
--- a/drivers/mtd/spi-nor/fsl-quadspi.c
+++ b/drivers/mtd/spi-nor/fsl-quadspi.c
@@ -618,9 +618,9 @@ static inline void fsl_qspi_invalid(struct fsl_qspi *q)
qspi_writel(q, reg, q->iobase + QUADSPI_MCR);
}
-static int fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor,
+static ssize_t fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor,
u8 opcode, unsigned int to, u32 *txbuf,
- unsigned count, size_t *retlen)
+ unsigned count)
{
int ret, i, j;
u32 tmp;
@@ -647,8 +647,8 @@ static int fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor,
/* Trigger it */
ret = fsl_qspi_runcmd(q, opcode, to, count);
- if (ret == 0 && retlen)
- *retlen += count;
+ if (ret == 0)
+ return count;
return ret;
}
@@ -859,7 +859,9 @@ static int fsl_qspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
} else if (len > 0) {
ret = fsl_qspi_nor_write(q, nor, opcode, 0,
- (u32 *)buf, len, NULL);
+ (u32 *)buf, len);
+ if (ret > 0)
+ return 0;
} else {
dev_err(q->dev, "invalid cmd %d\n", opcode);
ret = -EINVAL;
@@ -868,20 +870,20 @@ static int fsl_qspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
return ret;
}
-static void fsl_qspi_write(struct spi_nor *nor, loff_t to,
- size_t len, size_t *retlen, const u_char *buf)
+static ssize_t fsl_qspi_write(struct spi_nor *nor, loff_t to,
+ size_t len, const u_char *buf)
{
struct fsl_qspi *q = nor->priv;
-
- fsl_qspi_nor_write(q, nor, nor->program_opcode, to,
- (u32 *)buf, len, retlen);
+ ssize_t ret = fsl_qspi_nor_write(q, nor, nor->program_opcode, to,
+ (u32 *)buf, len);
/* invalid the data in the AHB buffer. */
fsl_qspi_invalid(q);
+ return ret;
}
-static int fsl_qspi_read(struct spi_nor *nor, loff_t from,
- size_t len, size_t *retlen, u_char *buf)
+static ssize_t fsl_qspi_read(struct spi_nor *nor, loff_t from,
+ size_t len, u_char *buf)
{
struct fsl_qspi *q = nor->priv;
u8 cmd = nor->read_opcode;
@@ -923,8 +925,7 @@ static int fsl_qspi_read(struct spi_nor *nor, loff_t from,
memcpy(buf, q->ahb_addr + q->chip_base_addr + from - q->memmap_offs,
len);
- *retlen += len;
- return 0;
+ return len;
}
static int fsl_qspi_erase(struct spi_nor *nor, loff_t offs)
diff --git a/drivers/mtd/spi-nor/hisi-sfc.c b/drivers/mtd/spi-nor/hisi-sfc.c
new file mode 100644
index 000000000..20378b0d5
--- /dev/null
+++ b/drivers/mtd/spi-nor/hisi-sfc.c
@@ -0,0 +1,489 @@
+/*
+ * HiSilicon SPI Nor Flash Controller Driver
+ *
+ * Copyright (c) 2015-2016 HiSilicon Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/spi-nor.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* Hardware register offsets and field definitions */
+#define FMC_CFG 0x00
+#define FMC_CFG_OP_MODE_MASK BIT_MASK(0)
+#define FMC_CFG_OP_MODE_BOOT 0
+#define FMC_CFG_OP_MODE_NORMAL 1
+#define FMC_CFG_FLASH_SEL(type) (((type) & 0x3) << 1)
+#define FMC_CFG_FLASH_SEL_MASK 0x6
+#define FMC_ECC_TYPE(type) (((type) & 0x7) << 5)
+#define FMC_ECC_TYPE_MASK GENMASK(7, 5)
+#define SPI_NOR_ADDR_MODE_MASK BIT_MASK(10)
+#define SPI_NOR_ADDR_MODE_3BYTES (0x0 << 10)
+#define SPI_NOR_ADDR_MODE_4BYTES (0x1 << 10)
+#define FMC_GLOBAL_CFG 0x04
+#define FMC_GLOBAL_CFG_WP_ENABLE BIT(6)
+#define FMC_SPI_TIMING_CFG 0x08
+#define TIMING_CFG_TCSH(nr) (((nr) & 0xf) << 8)
+#define TIMING_CFG_TCSS(nr) (((nr) & 0xf) << 4)
+#define TIMING_CFG_TSHSL(nr) ((nr) & 0xf)
+#define CS_HOLD_TIME 0x6
+#define CS_SETUP_TIME 0x6
+#define CS_DESELECT_TIME 0xf
+#define FMC_INT 0x18
+#define FMC_INT_OP_DONE BIT(0)
+#define FMC_INT_CLR 0x20
+#define FMC_CMD 0x24
+#define FMC_CMD_CMD1(cmd) ((cmd) & 0xff)
+#define FMC_ADDRL 0x2c
+#define FMC_OP_CFG 0x30
+#define OP_CFG_FM_CS(cs) ((cs) << 11)
+#define OP_CFG_MEM_IF_TYPE(type) (((type) & 0x7) << 7)
+#define OP_CFG_ADDR_NUM(addr) (((addr) & 0x7) << 4)
+#define OP_CFG_DUMMY_NUM(dummy) ((dummy) & 0xf)
+#define FMC_DATA_NUM 0x38
+#define FMC_DATA_NUM_CNT(cnt) ((cnt) & GENMASK(13, 0))
+#define FMC_OP 0x3c
+#define FMC_OP_DUMMY_EN BIT(8)
+#define FMC_OP_CMD1_EN BIT(7)
+#define FMC_OP_ADDR_EN BIT(6)
+#define FMC_OP_WRITE_DATA_EN BIT(5)
+#define FMC_OP_READ_DATA_EN BIT(2)
+#define FMC_OP_READ_STATUS_EN BIT(1)
+#define FMC_OP_REG_OP_START BIT(0)
+#define FMC_DMA_LEN 0x40
+#define FMC_DMA_LEN_SET(len) ((len) & GENMASK(27, 0))
+#define FMC_DMA_SADDR_D0 0x4c
+#define HIFMC_DMA_MAX_LEN (4096)
+#define HIFMC_DMA_MASK (HIFMC_DMA_MAX_LEN - 1)
+#define FMC_OP_DMA 0x68
+#define OP_CTRL_RD_OPCODE(code) (((code) & 0xff) << 16)
+#define OP_CTRL_WR_OPCODE(code) (((code) & 0xff) << 8)
+#define OP_CTRL_RW_OP(op) ((op) << 1)
+#define OP_CTRL_DMA_OP_READY BIT(0)
+#define FMC_OP_READ 0x0
+#define FMC_OP_WRITE 0x1
+#define FMC_WAIT_TIMEOUT 1000000
+
+enum hifmc_iftype {
+ IF_TYPE_STD,
+ IF_TYPE_DUAL,
+ IF_TYPE_DIO,
+ IF_TYPE_QUAD,
+ IF_TYPE_QIO,
+};
+
+struct hifmc_priv {
+ u32 chipselect;
+ u32 clkrate;
+ struct hifmc_host *host;
+};
+
+#define HIFMC_MAX_CHIP_NUM 2
+struct hifmc_host {
+ struct device *dev;
+ struct mutex lock;
+
+ void __iomem *regbase;
+ void __iomem *iobase;
+ struct clk *clk;
+ void *buffer;
+ dma_addr_t dma_buffer;
+
+ struct spi_nor *nor[HIFMC_MAX_CHIP_NUM];
+ u32 num_chip;
+};
+
+static inline int wait_op_finish(struct hifmc_host *host)
+{
+ u32 reg;
+
+ return readl_poll_timeout(host->regbase + FMC_INT, reg,
+ (reg & FMC_INT_OP_DONE), 0, FMC_WAIT_TIMEOUT);
+}
+
+static int get_if_type(enum read_mode flash_read)
+{
+ enum hifmc_iftype if_type;
+
+ switch (flash_read) {
+ case SPI_NOR_DUAL:
+ if_type = IF_TYPE_DUAL;
+ break;
+ case SPI_NOR_QUAD:
+ if_type = IF_TYPE_QUAD;
+ break;
+ case SPI_NOR_NORMAL:
+ case SPI_NOR_FAST:
+ default:
+ if_type = IF_TYPE_STD;
+ break;
+ }
+
+ return if_type;
+}
+
+static void hisi_spi_nor_init(struct hifmc_host *host)
+{
+ u32 reg;
+
+ reg = TIMING_CFG_TCSH(CS_HOLD_TIME)
+ | TIMING_CFG_TCSS(CS_SETUP_TIME)
+ | TIMING_CFG_TSHSL(CS_DESELECT_TIME);
+ writel(reg, host->regbase + FMC_SPI_TIMING_CFG);
+}
+
+static int hisi_spi_nor_prep(struct spi_nor *nor, enum spi_nor_ops ops)
+{
+ struct hifmc_priv *priv = nor->priv;
+ struct hifmc_host *host = priv->host;
+ int ret;
+
+ mutex_lock(&host->lock);
+
+ ret = clk_set_rate(host->clk, priv->clkrate);
+ if (ret)
+ goto out;
+
+ ret = clk_prepare_enable(host->clk);
+ if (ret)
+ goto out;
+
+ return 0;
+
+out:
+ mutex_unlock(&host->lock);
+ return ret;
+}
+
+static void hisi_spi_nor_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
+{
+ struct hifmc_priv *priv = nor->priv;
+ struct hifmc_host *host = priv->host;
+
+ clk_disable_unprepare(host->clk);
+ mutex_unlock(&host->lock);
+}
+
+static int hisi_spi_nor_op_reg(struct spi_nor *nor,
+ u8 opcode, int len, u8 optype)
+{
+ struct hifmc_priv *priv = nor->priv;
+ struct hifmc_host *host = priv->host;
+ u32 reg;
+
+ reg = FMC_CMD_CMD1(opcode);
+ writel(reg, host->regbase + FMC_CMD);
+
+ reg = FMC_DATA_NUM_CNT(len);
+ writel(reg, host->regbase + FMC_DATA_NUM);
+
+ reg = OP_CFG_FM_CS(priv->chipselect);
+ writel(reg, host->regbase + FMC_OP_CFG);
+
+ writel(0xff, host->regbase + FMC_INT_CLR);
+ reg = FMC_OP_CMD1_EN | FMC_OP_REG_OP_START | optype;
+ writel(reg, host->regbase + FMC_OP);
+
+ return wait_op_finish(host);
+}
+
+static int hisi_spi_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
+ int len)
+{
+ struct hifmc_priv *priv = nor->priv;
+ struct hifmc_host *host = priv->host;
+ int ret;
+
+ ret = hisi_spi_nor_op_reg(nor, opcode, len, FMC_OP_READ_DATA_EN);
+ if (ret)
+ return ret;
+
+ memcpy_fromio(buf, host->iobase, len);
+ return 0;
+}
+
+static int hisi_spi_nor_write_reg(struct spi_nor *nor, u8 opcode,
+ u8 *buf, int len)
+{
+ struct hifmc_priv *priv = nor->priv;
+ struct hifmc_host *host = priv->host;
+
+ if (len)
+ memcpy_toio(host->iobase, buf, len);
+
+ return hisi_spi_nor_op_reg(nor, opcode, len, FMC_OP_WRITE_DATA_EN);
+}
+
+static int hisi_spi_nor_dma_transfer(struct spi_nor *nor, loff_t start_off,
+ dma_addr_t dma_buf, size_t len, u8 op_type)
+{
+ struct hifmc_priv *priv = nor->priv;
+ struct hifmc_host *host = priv->host;
+ u8 if_type = 0;
+ u32 reg;
+
+ reg = readl(host->regbase + FMC_CFG);
+ reg &= ~(FMC_CFG_OP_MODE_MASK | SPI_NOR_ADDR_MODE_MASK);
+ reg |= FMC_CFG_OP_MODE_NORMAL;
+ reg |= (nor->addr_width == 4) ? SPI_NOR_ADDR_MODE_4BYTES
+ : SPI_NOR_ADDR_MODE_3BYTES;
+ writel(reg, host->regbase + FMC_CFG);
+
+ writel(start_off, host->regbase + FMC_ADDRL);
+ writel(dma_buf, host->regbase + FMC_DMA_SADDR_D0);
+ writel(FMC_DMA_LEN_SET(len), host->regbase + FMC_DMA_LEN);
+
+ reg = OP_CFG_FM_CS(priv->chipselect);
+ if_type = get_if_type(nor->flash_read);
+ reg |= OP_CFG_MEM_IF_TYPE(if_type);
+ if (op_type == FMC_OP_READ)
+ reg |= OP_CFG_DUMMY_NUM(nor->read_dummy >> 3);
+ writel(reg, host->regbase + FMC_OP_CFG);
+
+ writel(0xff, host->regbase + FMC_INT_CLR);
+ reg = OP_CTRL_RW_OP(op_type) | OP_CTRL_DMA_OP_READY;
+ reg |= (op_type == FMC_OP_READ)
+ ? OP_CTRL_RD_OPCODE(nor->read_opcode)
+ : OP_CTRL_WR_OPCODE(nor->program_opcode);
+ writel(reg, host->regbase + FMC_OP_DMA);
+
+ return wait_op_finish(host);
+}
+
+static ssize_t hisi_spi_nor_read(struct spi_nor *nor, loff_t from, size_t len,
+ u_char *read_buf)
+{
+ struct hifmc_priv *priv = nor->priv;
+ struct hifmc_host *host = priv->host;
+ size_t offset;
+ int ret;
+
+ for (offset = 0; offset < len; offset += HIFMC_DMA_MAX_LEN) {
+ size_t trans = min_t(size_t, HIFMC_DMA_MAX_LEN, len - offset);
+
+ ret = hisi_spi_nor_dma_transfer(nor,
+ from + offset, host->dma_buffer, trans, FMC_OP_READ);
+ if (ret) {
+ dev_warn(nor->dev, "DMA read timeout\n");
+ return ret;
+ }
+ memcpy(read_buf + offset, host->buffer, trans);
+ }
+
+ return len;
+}
+
+static ssize_t hisi_spi_nor_write(struct spi_nor *nor, loff_t to,
+ size_t len, const u_char *write_buf)
+{
+ struct hifmc_priv *priv = nor->priv;
+ struct hifmc_host *host = priv->host;
+ size_t offset;
+ int ret;
+
+ for (offset = 0; offset < len; offset += HIFMC_DMA_MAX_LEN) {
+ size_t trans = min_t(size_t, HIFMC_DMA_MAX_LEN, len - offset);
+
+ memcpy(host->buffer, write_buf + offset, trans);
+ ret = hisi_spi_nor_dma_transfer(nor,
+ to + offset, host->dma_buffer, trans, FMC_OP_WRITE);
+ if (ret) {
+ dev_warn(nor->dev, "DMA write timeout\n");
+ return ret;
+ }
+ }
+
+ return len;
+}
+
+/**
+ * Get spi flash device information and register it as a mtd device.
+ */
+static int hisi_spi_nor_register(struct device_node *np,
+ struct hifmc_host *host)
+{
+ struct device *dev = host->dev;
+ struct spi_nor *nor;
+ struct hifmc_priv *priv;
+ struct mtd_info *mtd;
+ int ret;
+
+ nor = devm_kzalloc(dev, sizeof(*nor), GFP_KERNEL);
+ if (!nor)
+ return -ENOMEM;
+
+ nor->dev = dev;
+ spi_nor_set_flash_node(nor, np);
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ ret = of_property_read_u32(np, "reg", &priv->chipselect);
+ if (ret) {
+ dev_err(dev, "There's no reg property for %s\n",
+ np->full_name);
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "spi-max-frequency",
+ &priv->clkrate);
+ if (ret) {
+ dev_err(dev, "There's no spi-max-frequency property for %s\n",
+ np->full_name);
+ return ret;
+ }
+ priv->host = host;
+ nor->priv = priv;
+
+ nor->prepare = hisi_spi_nor_prep;
+ nor->unprepare = hisi_spi_nor_unprep;
+ nor->read_reg = hisi_spi_nor_read_reg;
+ nor->write_reg = hisi_spi_nor_write_reg;
+ nor->read = hisi_spi_nor_read;
+ nor->write = hisi_spi_nor_write;
+ nor->erase = NULL;
+ ret = spi_nor_scan(nor, NULL, SPI_NOR_QUAD);
+ if (ret)
+ return ret;
+
+ mtd = &nor->mtd;
+ mtd->name = np->name;
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret)
+ return ret;
+
+ host->nor[host->num_chip] = nor;
+ host->num_chip++;
+ return 0;
+}
+
+static void hisi_spi_nor_unregister_all(struct hifmc_host *host)
+{
+ int i;
+
+ for (i = 0; i < host->num_chip; i++)
+ mtd_device_unregister(&host->nor[i]->mtd);
+}
+
+static int hisi_spi_nor_register_all(struct hifmc_host *host)
+{
+ struct device *dev = host->dev;
+ struct device_node *np;
+ int ret;
+
+ for_each_available_child_of_node(dev->of_node, np) {
+ ret = hisi_spi_nor_register(np, host);
+ if (ret)
+ goto fail;
+
+ if (host->num_chip == HIFMC_MAX_CHIP_NUM) {
+ dev_warn(dev, "Flash device number exceeds the maximum chipselect number\n");
+ break;
+ }
+ }
+
+ return 0;
+
+fail:
+ hisi_spi_nor_unregister_all(host);
+ return ret;
+}
+
+static int hisi_spi_nor_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct hifmc_host *host;
+ int ret;
+
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, host);
+ host->dev = dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "control");
+ host->regbase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(host->regbase))
+ return PTR_ERR(host->regbase);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "memory");
+ host->iobase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(host->iobase))
+ return PTR_ERR(host->iobase);
+
+ host->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(host->clk))
+ return PTR_ERR(host->clk);
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_warn(dev, "Unable to set dma mask\n");
+ return ret;
+ }
+
+ host->buffer = dmam_alloc_coherent(dev, HIFMC_DMA_MAX_LEN,
+ &host->dma_buffer, GFP_KERNEL);
+ if (!host->buffer)
+ return -ENOMEM;
+
+ mutex_init(&host->lock);
+ clk_prepare_enable(host->clk);
+ hisi_spi_nor_init(host);
+ ret = hisi_spi_nor_register_all(host);
+ if (ret)
+ mutex_destroy(&host->lock);
+
+ clk_disable_unprepare(host->clk);
+ return ret;
+}
+
+static int hisi_spi_nor_remove(struct platform_device *pdev)
+{
+ struct hifmc_host *host = platform_get_drvdata(pdev);
+
+ hisi_spi_nor_unregister_all(host);
+ mutex_destroy(&host->lock);
+ clk_disable_unprepare(host->clk);
+ return 0;
+}
+
+static const struct of_device_id hisi_spi_nor_dt_ids[] = {
+ { .compatible = "hisilicon,fmc-spi-nor"},
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, hisi_spi_nor_dt_ids);
+
+static struct platform_driver hisi_spi_nor_driver = {
+ .driver = {
+ .name = "hisi-sfc",
+ .of_match_table = hisi_spi_nor_dt_ids,
+ },
+ .probe = hisi_spi_nor_probe,
+ .remove = hisi_spi_nor_remove,
+};
+module_platform_driver(hisi_spi_nor_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("HiSilicon SPI Nor Flash Controller Driver");
diff --git a/drivers/mtd/spi-nor/mtk-quadspi.c b/drivers/mtd/spi-nor/mtk-quadspi.c
index 8bed1a4cb..e661877c2 100644
--- a/drivers/mtd/spi-nor/mtk-quadspi.c
+++ b/drivers/mtd/spi-nor/mtk-quadspi.c
@@ -21,7 +21,6 @@
#include <linux/ioport.h>
#include <linux/math64.h>
#include <linux/module.h>
-#include <linux/mtd/mtd.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -243,8 +242,8 @@ static void mt8173_nor_set_addr(struct mt8173_nor *mt8173_nor, u32 addr)
writeb(addr & 0xff, mt8173_nor->base + MTK_NOR_RADR3_REG);
}
-static int mt8173_nor_read(struct spi_nor *nor, loff_t from, size_t length,
- size_t *retlen, u_char *buffer)
+static ssize_t mt8173_nor_read(struct spi_nor *nor, loff_t from, size_t length,
+ u_char *buffer)
{
int i, ret;
int addr = (int)from;
@@ -255,13 +254,13 @@ static int mt8173_nor_read(struct spi_nor *nor, loff_t from, size_t length,
mt8173_nor_set_read_mode(mt8173_nor);
mt8173_nor_set_addr(mt8173_nor, addr);
- for (i = 0; i < length; i++, (*retlen)++) {
+ for (i = 0; i < length; i++) {
ret = mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_PIO_READ_CMD);
if (ret < 0)
return ret;
buf[i] = readb(mt8173_nor->base + MTK_NOR_RDATA_REG);
}
- return 0;
+ return length;
}
static int mt8173_nor_write_single_byte(struct mt8173_nor *mt8173_nor,
@@ -297,36 +296,44 @@ static int mt8173_nor_write_buffer(struct mt8173_nor *mt8173_nor, int addr,
return mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_WR_CMD);
}
-static void mt8173_nor_write(struct spi_nor *nor, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
+static ssize_t mt8173_nor_write(struct spi_nor *nor, loff_t to, size_t len,
+ const u_char *buf)
{
int ret;
struct mt8173_nor *mt8173_nor = nor->priv;
+ size_t i;
ret = mt8173_nor_write_buffer_enable(mt8173_nor);
- if (ret < 0)
+ if (ret < 0) {
dev_warn(mt8173_nor->dev, "write buffer enable failed!\n");
+ return ret;
+ }
- while (len >= SFLASH_WRBUF_SIZE) {
+ for (i = 0; i + SFLASH_WRBUF_SIZE <= len; i += SFLASH_WRBUF_SIZE) {
ret = mt8173_nor_write_buffer(mt8173_nor, to, buf);
- if (ret < 0)
+ if (ret < 0) {
dev_err(mt8173_nor->dev, "write buffer failed!\n");
- len -= SFLASH_WRBUF_SIZE;
+ return ret;
+ }
to += SFLASH_WRBUF_SIZE;
buf += SFLASH_WRBUF_SIZE;
- (*retlen) += SFLASH_WRBUF_SIZE;
}
ret = mt8173_nor_write_buffer_disable(mt8173_nor);
- if (ret < 0)
+ if (ret < 0) {
dev_warn(mt8173_nor->dev, "write buffer disable failed!\n");
+ return ret;
+ }
- if (len) {
- ret = mt8173_nor_write_single_byte(mt8173_nor, to, (int)len,
- (u8 *)buf);
- if (ret < 0)
+ if (i < len) {
+ ret = mt8173_nor_write_single_byte(mt8173_nor, to,
+ (int)(len - i), (u8 *)buf);
+ if (ret < 0) {
dev_err(mt8173_nor->dev, "write single byte failed!\n");
- (*retlen) += len;
+ return ret;
+ }
}
+
+ return len;
}
static int mt8173_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
diff --git a/drivers/mtd/spi-nor/nxp-spifi.c b/drivers/mtd/spi-nor/nxp-spifi.c
index ae428cb0e..73a14f409 100644
--- a/drivers/mtd/spi-nor/nxp-spifi.c
+++ b/drivers/mtd/spi-nor/nxp-spifi.c
@@ -172,8 +172,8 @@ static int nxp_spifi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
return nxp_spifi_wait_for_cmd(spifi);
}
-static int nxp_spifi_read(struct spi_nor *nor, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
+static ssize_t nxp_spifi_read(struct spi_nor *nor, loff_t from, size_t len,
+ u_char *buf)
{
struct nxp_spifi *spifi = nor->priv;
int ret;
@@ -183,24 +183,23 @@ static int nxp_spifi_read(struct spi_nor *nor, loff_t from, size_t len,
return ret;
memcpy_fromio(buf, spifi->flash_base + from, len);
- *retlen += len;
- return 0;
+ return len;
}
-static void nxp_spifi_write(struct spi_nor *nor, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
+static ssize_t nxp_spifi_write(struct spi_nor *nor, loff_t to, size_t len,
+ const u_char *buf)
{
struct nxp_spifi *spifi = nor->priv;
u32 cmd;
int ret;
+ size_t i;
ret = nxp_spifi_set_memory_mode_off(spifi);
if (ret)
- return;
+ return ret;
writel(to, spifi->io_base + SPIFI_ADDR);
- *retlen += len;
cmd = SPIFI_CMD_DOUT |
SPIFI_CMD_DATALEN(len) |
@@ -209,10 +208,14 @@ static void nxp_spifi_write(struct spi_nor *nor, loff_t to, size_t len,
SPIFI_CMD_FRAMEFORM(spifi->nor.addr_width + 1);
writel(cmd, spifi->io_base + SPIFI_CMD);
- while (len--)
- writeb(*buf++, spifi->io_base + SPIFI_DATA);
+ for (i = 0; i < len; i++)
+ writeb(buf[i], spifi->io_base + SPIFI_DATA);
+
+ ret = nxp_spifi_wait_for_cmd(spifi);
+ if (ret)
+ return ret;
- nxp_spifi_wait_for_cmd(spifi);
+ return len;
}
static int nxp_spifi_erase(struct spi_nor *nor, loff_t offs)
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index a6adb2785..d0fc165d7 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -830,10 +830,26 @@ static const struct flash_info spi_nor_ids[] = {
{ "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1, SPI_NOR_NO_ERASE) },
/* GigaDevice */
- { "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64, SECT_4K) },
- { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, SECT_4K) },
- { "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256, SECT_4K) },
+ {
+ "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ {
+ "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ {
+ "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ {
+ "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
/* Intel/Numonyx -- xxxs33b */
{ "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
@@ -871,6 +887,7 @@ static const struct flash_info spi_nor_ids[] = {
{ "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
{ "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
{ "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
+ { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
/* PMC */
{ "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
@@ -1031,8 +1048,25 @@ static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
if (ret)
return ret;
- ret = nor->read(nor, from, len, retlen, buf);
+ while (len) {
+ ret = nor->read(nor, from, len, buf);
+ if (ret == 0) {
+ /* We shouldn't see 0-length reads */
+ ret = -EIO;
+ goto read_err;
+ }
+ if (ret < 0)
+ goto read_err;
+
+ WARN_ON(ret > len);
+ *retlen += ret;
+ buf += ret;
+ from += ret;
+ len -= ret;
+ }
+ ret = 0;
+read_err:
spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_READ);
return ret;
}
@@ -1060,10 +1094,14 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
nor->program_opcode = SPINOR_OP_BP;
/* write one byte. */
- nor->write(nor, to, 1, retlen, buf);
+ ret = nor->write(nor, to, 1, buf);
+ if (ret < 0)
+ goto sst_write_err;
+ WARN(ret != 1, "While writing 1 byte written %i bytes\n",
+ (int)ret);
ret = spi_nor_wait_till_ready(nor);
if (ret)
- goto time_out;
+ goto sst_write_err;
}
to += actual;
@@ -1072,10 +1110,14 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
nor->program_opcode = SPINOR_OP_AAI_WP;
/* write two bytes. */
- nor->write(nor, to, 2, retlen, buf + actual);
+ ret = nor->write(nor, to, 2, buf + actual);
+ if (ret < 0)
+ goto sst_write_err;
+ WARN(ret != 2, "While writing 2 bytes written %i bytes\n",
+ (int)ret);
ret = spi_nor_wait_till_ready(nor);
if (ret)
- goto time_out;
+ goto sst_write_err;
to += 2;
nor->sst_write_second = true;
}
@@ -1084,21 +1126,26 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
write_disable(nor);
ret = spi_nor_wait_till_ready(nor);
if (ret)
- goto time_out;
+ goto sst_write_err;
/* Write out trailing byte if it exists. */
if (actual != len) {
write_enable(nor);
nor->program_opcode = SPINOR_OP_BP;
- nor->write(nor, to, 1, retlen, buf + actual);
-
+ ret = nor->write(nor, to, 1, buf + actual);
+ if (ret < 0)
+ goto sst_write_err;
+ WARN(ret != 1, "While writing 1 byte written %i bytes\n",
+ (int)ret);
ret = spi_nor_wait_till_ready(nor);
if (ret)
- goto time_out;
+ goto sst_write_err;
write_disable(nor);
+ actual += 1;
}
-time_out:
+sst_write_err:
+ *retlen += actual;
spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
return ret;
}
@@ -1112,8 +1159,8 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
struct spi_nor *nor = mtd_to_spi_nor(mtd);
- u32 page_offset, page_size, i;
- int ret;
+ size_t page_offset, page_remain, i;
+ ssize_t ret;
dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
@@ -1121,35 +1168,37 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
if (ret)
return ret;
- write_enable(nor);
-
- page_offset = to & (nor->page_size - 1);
+ for (i = 0; i < len; ) {
+ ssize_t written;
- /* do all the bytes fit onto one page? */
- if (page_offset + len <= nor->page_size) {
- nor->write(nor, to, len, retlen, buf);
- } else {
+ page_offset = (to + i) & (nor->page_size - 1);
+ WARN_ONCE(page_offset,
+ "Writing at offset %zu into a NOR page. Writing partial pages may decrease reliability and increase wear of NOR flash.",
+ page_offset);
/* the size of data remaining on the first page */
- page_size = nor->page_size - page_offset;
- nor->write(nor, to, page_size, retlen, buf);
-
- /* write everything in nor->page_size chunks */
- for (i = page_size; i < len; i += page_size) {
- page_size = len - i;
- if (page_size > nor->page_size)
- page_size = nor->page_size;
+ page_remain = min_t(size_t,
+ nor->page_size - page_offset, len - i);
- ret = spi_nor_wait_till_ready(nor);
- if (ret)
- goto write_err;
-
- write_enable(nor);
+ write_enable(nor);
+ ret = nor->write(nor, to + i, page_remain, buf + i);
+ if (ret < 0)
+ goto write_err;
+ written = ret;
- nor->write(nor, to + i, page_size, retlen, buf + i);
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto write_err;
+ *retlen += written;
+ i += written;
+ if (written != page_remain) {
+ dev_err(nor->dev,
+ "While writing %zu bytes written %zd bytes\n",
+ page_remain, written);
+ ret = -EIO;
+ goto write_err;
}
}
- ret = spi_nor_wait_till_ready(nor);
write_err:
spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
return ret;
diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c
index daf82ba7a..41b13d1cd 100644
--- a/drivers/mtd/ssfdc.c
+++ b/drivers/mtd/ssfdc.c
@@ -380,8 +380,7 @@ static int ssfdcr_readsect(struct mtd_blktrans_dev *dev,
" block_addr=%d\n", logic_sect_no, sectors_per_block, offset,
block_address);
- if (block_address >= ssfdc->map_len)
- BUG();
+ BUG_ON(block_address >= ssfdc->map_len);
block_address = ssfdc->logic_block_map[block_address];
diff --git a/drivers/mtd/tests/nandbiterrs.c b/drivers/mtd/tests/nandbiterrs.c
index 09a4ccac5..f26dec896 100644
--- a/drivers/mtd/tests/nandbiterrs.c
+++ b/drivers/mtd/tests/nandbiterrs.c
@@ -290,7 +290,7 @@ static int overwrite_test(void)
while (opno < max_overwrite) {
- err = rewrite_page(0);
+ err = write_page(0);
if (err)
break;
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
index c1aaf0336..903becd31 100644
--- a/drivers/mtd/ubi/attach.c
+++ b/drivers/mtd/ubi/attach.c
@@ -175,6 +175,40 @@ static int add_corrupted(struct ubi_attach_info *ai, int pnum, int ec)
}
/**
+ * add_fastmap - add a Fastmap related physical eraseblock.
+ * @ai: attaching information
+ * @pnum: physical eraseblock number the VID header came from
+ * @vid_hdr: the volume identifier header
+ * @ec: erase counter of the physical eraseblock
+ *
+ * This function allocates a 'struct ubi_ainf_peb' object for a Fastamp
+ * physical eraseblock @pnum and adds it to the 'fastmap' list.
+ * Such blocks can be Fastmap super and data blocks from both the most
+ * recent Fastmap we're attaching from or from old Fastmaps which will
+ * be erased.
+ */
+static int add_fastmap(struct ubi_attach_info *ai, int pnum,
+ struct ubi_vid_hdr *vid_hdr, int ec)
+{
+ struct ubi_ainf_peb *aeb;
+
+ aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+ if (!aeb)
+ return -ENOMEM;
+
+ aeb->pnum = pnum;
+ aeb->vol_id = be32_to_cpu(vidh->vol_id);
+ aeb->sqnum = be64_to_cpu(vidh->sqnum);
+ aeb->ec = ec;
+ list_add(&aeb->u.list, &ai->fastmap);
+
+ dbg_bld("add to fastmap list: PEB %d, vol_id %d, sqnum: %llu", pnum,
+ aeb->vol_id, aeb->sqnum);
+
+ return 0;
+}
+
+/**
* validate_vid_hdr - check volume identifier header.
* @ubi: UBI device description object
* @vid_hdr: the volume identifier header to check
@@ -803,13 +837,26 @@ out_unlock:
return err;
}
+static bool vol_ignored(int vol_id)
+{
+ switch (vol_id) {
+ case UBI_LAYOUT_VOLUME_ID:
+ return true;
+ }
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ return ubi_is_fm_vol(vol_id);
+#else
+ return false;
+#endif
+}
+
/**
* scan_peb - scan and process UBI headers of a PEB.
* @ubi: UBI device description object
* @ai: attaching information
* @pnum: the physical eraseblock number
- * @vid: The volume ID of the found volume will be stored in this pointer
- * @sqnum: The sqnum of the found volume will be stored in this pointer
+ * @fast: true if we're scanning for a Fastmap
*
* This function reads UBI headers of PEB @pnum, checks them, and adds
* information about this PEB to the corresponding list or RB-tree in the
@@ -817,9 +864,9 @@ out_unlock:
* successfully handled and a negative error code in case of failure.
*/
static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
- int pnum, int *vid, unsigned long long *sqnum)
+ int pnum, bool fast)
{
- long long uninitialized_var(ec);
+ long long ec;
int err, bitflips = 0, vol_id = -1, ec_err = 0;
dbg_bld("scan PEB %d", pnum);
@@ -935,6 +982,20 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
*/
ai->maybe_bad_peb_count += 1;
case UBI_IO_BAD_HDR:
+ /*
+ * If we're facing a bad VID header we have to drop *all*
+ * Fastmap data structures we find. The most recent Fastmap
+ * could be bad and therefore there is a chance that we attach
+ * from an old one. On a fine MTD stack a PEB must not render
+ * bad all of a sudden, but the reality is different.
+ * So, let's be paranoid and help finding the root cause by
+ * falling back to scanning mode instead of attaching with a
+ * bad EBA table and cause data corruption which is hard to
+ * analyze.
+ */
+ if (fast)
+ ai->force_full_scan = 1;
+
if (ec_err)
/*
* Both headers are corrupted. There is a possibility
@@ -991,21 +1052,15 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
}
vol_id = be32_to_cpu(vidh->vol_id);
- if (vid)
- *vid = vol_id;
- if (sqnum)
- *sqnum = be64_to_cpu(vidh->sqnum);
- if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOLUME_ID) {
+ if (vol_id > UBI_MAX_VOLUMES && !vol_ignored(vol_id)) {
int lnum = be32_to_cpu(vidh->lnum);
/* Unsupported internal volume */
switch (vidh->compat) {
case UBI_COMPAT_DELETE:
- if (vol_id != UBI_FM_SB_VOLUME_ID
- && vol_id != UBI_FM_DATA_VOLUME_ID) {
- ubi_msg(ubi, "\"delete\" compatible internal volume %d:%d found, will remove it",
- vol_id, lnum);
- }
+ ubi_msg(ubi, "\"delete\" compatible internal volume %d:%d found, will remove it",
+ vol_id, lnum);
+
err = add_to_list(ai, pnum, vol_id, lnum,
ec, 1, &ai->erase);
if (err)
@@ -1037,7 +1092,12 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
if (ec_err)
ubi_warn(ubi, "valid VID header but corrupted EC header at PEB %d",
pnum);
- err = ubi_add_to_av(ubi, ai, pnum, ec, vidh, bitflips);
+
+ if (ubi_is_fm_vol(vol_id))
+ err = add_fastmap(ai, pnum, vidh, ec);
+ else
+ err = ubi_add_to_av(ubi, ai, pnum, ec, vidh, bitflips);
+
if (err)
return err;
@@ -1186,6 +1246,10 @@ static void destroy_ai(struct ubi_attach_info *ai)
list_del(&aeb->u.list);
kmem_cache_free(ai->aeb_slab_cache, aeb);
}
+ list_for_each_entry_safe(aeb, aeb_tmp, &ai->fastmap, u.list) {
+ list_del(&aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
+ }
/* Destroy the volume RB-tree */
rb = ai->volumes.rb_node;
@@ -1245,7 +1309,7 @@ static int scan_all(struct ubi_device *ubi, struct ubi_attach_info *ai,
cond_resched();
dbg_gen("process PEB %d", pnum);
- err = scan_peb(ubi, ai, pnum, NULL, NULL);
+ err = scan_peb(ubi, ai, pnum, false);
if (err < 0)
goto out_vidh;
}
@@ -1311,6 +1375,7 @@ static struct ubi_attach_info *alloc_ai(void)
INIT_LIST_HEAD(&ai->free);
INIT_LIST_HEAD(&ai->erase);
INIT_LIST_HEAD(&ai->alien);
+ INIT_LIST_HEAD(&ai->fastmap);
ai->volumes = RB_ROOT;
ai->aeb_slab_cache = kmem_cache_create("ubi_aeb_slab_cache",
sizeof(struct ubi_ainf_peb),
@@ -1326,7 +1391,7 @@ static struct ubi_attach_info *alloc_ai(void)
#ifdef CONFIG_MTD_UBI_FASTMAP
/**
- * scan_fastmap - try to find a fastmap and attach from it.
+ * scan_fast - try to find a fastmap and attach from it.
* @ubi: UBI device description object
* @ai: attach info object
*
@@ -1337,52 +1402,58 @@ static struct ubi_attach_info *alloc_ai(void)
*/
static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info **ai)
{
- int err, pnum, fm_anchor = -1;
- unsigned long long max_sqnum = 0;
+ int err, pnum;
+ struct ubi_attach_info *scan_ai;
err = -ENOMEM;
+ scan_ai = alloc_ai();
+ if (!scan_ai)
+ goto out;
+
ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
if (!ech)
- goto out;
+ goto out_ai;
vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
if (!vidh)
goto out_ech;
for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) {
- int vol_id = -1;
- unsigned long long sqnum = -1;
cond_resched();
dbg_gen("process PEB %d", pnum);
- err = scan_peb(ubi, *ai, pnum, &vol_id, &sqnum);
+ err = scan_peb(ubi, scan_ai, pnum, true);
if (err < 0)
goto out_vidh;
-
- if (vol_id == UBI_FM_SB_VOLUME_ID && sqnum > max_sqnum) {
- max_sqnum = sqnum;
- fm_anchor = pnum;
- }
}
ubi_free_vid_hdr(ubi, vidh);
kfree(ech);
- if (fm_anchor < 0)
- return UBI_NO_FASTMAP;
+ if (scan_ai->force_full_scan)
+ err = UBI_NO_FASTMAP;
+ else
+ err = ubi_scan_fastmap(ubi, *ai, scan_ai);
- destroy_ai(*ai);
- *ai = alloc_ai();
- if (!*ai)
- return -ENOMEM;
+ if (err) {
+ /*
+ * Didn't attach via fastmap, do a full scan but reuse what
+ * we've aready scanned.
+ */
+ destroy_ai(*ai);
+ *ai = scan_ai;
+ } else
+ destroy_ai(scan_ai);
- return ubi_scan_fastmap(ubi, *ai, fm_anchor);
+ return err;
out_vidh:
ubi_free_vid_hdr(ubi, vidh);
out_ech:
kfree(ech);
+out_ai:
+ destroy_ai(scan_ai);
out:
return err;
}
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 990898b9d..48eb55f34 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -15,20 +15,22 @@
*/
#include <linux/crc32.h>
+#include <linux/bitmap.h>
#include "ubi.h"
/**
* init_seen - allocate memory for used for debugging.
* @ubi: UBI device description object
*/
-static inline int *init_seen(struct ubi_device *ubi)
+static inline unsigned long *init_seen(struct ubi_device *ubi)
{
- int *ret;
+ unsigned long *ret;
if (!ubi_dbg_chk_fastmap(ubi))
return NULL;
- ret = kcalloc(ubi->peb_count, sizeof(int), GFP_KERNEL);
+ ret = kcalloc(BITS_TO_LONGS(ubi->peb_count), sizeof(unsigned long),
+ GFP_KERNEL);
if (!ret)
return ERR_PTR(-ENOMEM);
@@ -39,7 +41,7 @@ static inline int *init_seen(struct ubi_device *ubi)
* free_seen - free the seen logic integer array.
* @seen: integer array of @ubi->peb_count size
*/
-static inline void free_seen(int *seen)
+static inline void free_seen(unsigned long *seen)
{
kfree(seen);
}
@@ -50,12 +52,12 @@ static inline void free_seen(int *seen)
* @pnum: The PEB to be makred as seen
* @seen: integer array of @ubi->peb_count size
*/
-static inline void set_seen(struct ubi_device *ubi, int pnum, int *seen)
+static inline void set_seen(struct ubi_device *ubi, int pnum, unsigned long *seen)
{
if (!ubi_dbg_chk_fastmap(ubi) || !seen)
return;
- seen[pnum] = 1;
+ set_bit(pnum, seen);
}
/**
@@ -63,7 +65,7 @@ static inline void set_seen(struct ubi_device *ubi, int pnum, int *seen)
* @ubi: UBI device description object
* @seen: integer array of @ubi->peb_count size
*/
-static int self_check_seen(struct ubi_device *ubi, int *seen)
+static int self_check_seen(struct ubi_device *ubi, unsigned long *seen)
{
int pnum, ret = 0;
@@ -71,7 +73,7 @@ static int self_check_seen(struct ubi_device *ubi, int *seen)
return 0;
for (pnum = 0; pnum < ubi->peb_count; pnum++) {
- if (!seen[pnum] && ubi->lookuptbl[pnum]) {
+ if (test_bit(pnum, seen) && ubi->lookuptbl[pnum]) {
ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum);
ret = -EINVAL;
}
@@ -578,7 +580,7 @@ static int count_fastmap_pebs(struct ubi_attach_info *ai)
list_for_each_entry(aeb, &ai->free, u.list)
n++;
- ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
n++;
@@ -850,27 +852,57 @@ fail:
}
/**
+ * find_fm_anchor - find the most recent Fastmap superblock (anchor)
+ * @ai: UBI attach info to be filled
+ */
+static int find_fm_anchor(struct ubi_attach_info *ai)
+{
+ int ret = -1;
+ struct ubi_ainf_peb *aeb;
+ unsigned long long max_sqnum = 0;
+
+ list_for_each_entry(aeb, &ai->fastmap, u.list) {
+ if (aeb->vol_id == UBI_FM_SB_VOLUME_ID && aeb->sqnum > max_sqnum) {
+ max_sqnum = aeb->sqnum;
+ ret = aeb->pnum;
+ }
+ }
+
+ return ret;
+}
+
+/**
* ubi_scan_fastmap - scan the fastmap.
* @ubi: UBI device object
* @ai: UBI attach info to be filled
- * @fm_anchor: The fastmap starts at this PEB
+ * @scan_ai: UBI attach info from the first 64 PEBs,
+ * used to find the most recent Fastmap data structure
*
* Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
* UBI_BAD_FASTMAP if one was found but is not usable.
* < 0 indicates an internal error.
*/
int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
- int fm_anchor)
+ struct ubi_attach_info *scan_ai)
{
struct ubi_fm_sb *fmsb, *fmsb2;
struct ubi_vid_hdr *vh;
struct ubi_ec_hdr *ech;
struct ubi_fastmap_layout *fm;
- int i, used_blocks, pnum, ret = 0;
+ struct ubi_ainf_peb *tmp_aeb, *aeb;
+ int i, used_blocks, pnum, fm_anchor, ret = 0;
size_t fm_size;
__be32 crc, tmp_crc;
unsigned long long sqnum = 0;
+ fm_anchor = find_fm_anchor(scan_ai);
+ if (fm_anchor < 0)
+ return UBI_NO_FASTMAP;
+
+ /* Move all (possible) fastmap blocks into our new attach structure. */
+ list_for_each_entry_safe(aeb, tmp_aeb, &scan_ai->fastmap, u.list)
+ list_move_tail(&aeb->u.list, &ai->fastmap);
+
down_write(&ubi->fm_protect);
memset(ubi->fm_buf, 0, ubi->fm_size);
@@ -945,6 +977,13 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
goto free_hdr;
}
+ if (i == 0 && pnum != fm_anchor) {
+ ubi_err(ubi, "Fastmap anchor PEB mismatch: PEB: %i vs. %i",
+ pnum, fm_anchor);
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+
ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
if (ret && ret != UBI_IO_BITFLIPS) {
ubi_err(ubi, "unable to read fastmap block# %i EC (PEB: %i)",
@@ -1102,7 +1141,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
struct rb_node *tmp_rb;
int ret, i, j, free_peb_count, used_peb_count, vol_count;
int scrub_peb_count, erase_peb_count;
- int *seen_pebs = NULL;
+ unsigned long *seen_pebs = NULL;
fm_raw = ubi->fm_buf;
memset(ubi->fm_buf, 0, ubi->fm_size);
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
index cb7c075f2..1cb287ec3 100644
--- a/drivers/mtd/ubi/gluebi.c
+++ b/drivers/mtd/ubi/gluebi.c
@@ -99,9 +99,6 @@ static int gluebi_get_device(struct mtd_info *mtd)
struct gluebi_device *gluebi;
int ubi_mode = UBI_READONLY;
- if (!try_module_get(THIS_MODULE))
- return -ENODEV;
-
if (mtd->flags & MTD_WRITEABLE)
ubi_mode = UBI_READWRITE;
@@ -129,7 +126,6 @@ static int gluebi_get_device(struct mtd_info *mtd)
ubi_mode);
if (IS_ERR(gluebi->desc)) {
mutex_unlock(&devices_mutex);
- module_put(THIS_MODULE);
return PTR_ERR(gluebi->desc);
}
gluebi->refcnt += 1;
@@ -153,7 +149,6 @@ static void gluebi_put_device(struct mtd_info *mtd)
gluebi->refcnt -= 1;
if (gluebi->refcnt == 0)
ubi_close_volume(gluebi->desc);
- module_put(THIS_MODULE);
mutex_unlock(&devices_mutex);
}
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 10cf3b549..ff8cafe1e 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -1019,7 +1019,7 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
p = (char *)vid_hdr - ubi->vid_hdr_shift;
read_err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
- ubi->vid_hdr_alsize);
+ ubi->vid_hdr_shift + UBI_VID_HDR_SIZE);
if (read_err && read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err))
return read_err;
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 61d4e9975..b616a115c 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -703,6 +703,8 @@ struct ubi_ainf_volume {
* @erase: list of physical eraseblocks which have to be erased
* @alien: list of physical eraseblocks which should not be used by UBI (e.g.,
* those belonging to "preserve"-compatible internal volumes)
+ * @fastmap: list of physical eraseblocks which relate to fastmap (e.g.,
+ * eraseblocks of the current and not yet erased old fastmap blocks)
* @corr_peb_count: count of PEBs in the @corr list
* @empty_peb_count: count of PEBs which are presumably empty (contain only
* 0xFF bytes)
@@ -713,6 +715,8 @@ struct ubi_ainf_volume {
* @vols_found: number of volumes found
* @highest_vol_id: highest volume ID
* @is_empty: flag indicating whether the MTD device is empty or not
+ * @force_full_scan: flag indicating whether we need to do a full scan and drop
+ all existing Fastmap data structures
* @min_ec: lowest erase counter value
* @max_ec: highest erase counter value
* @max_sqnum: highest sequence number value
@@ -731,6 +735,7 @@ struct ubi_attach_info {
struct list_head free;
struct list_head erase;
struct list_head alien;
+ struct list_head fastmap;
int corr_peb_count;
int empty_peb_count;
int alien_peb_count;
@@ -739,6 +744,7 @@ struct ubi_attach_info {
int vols_found;
int highest_vol_id;
int is_empty;
+ int force_full_scan;
int min_ec;
int max_ec;
unsigned long long max_sqnum;
@@ -911,7 +917,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
size_t ubi_calc_fm_size(struct ubi_device *ubi);
int ubi_update_fastmap(struct ubi_device *ubi);
int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
- int fm_anchor);
+ struct ubi_attach_info *scan_ai);
#else
static inline int ubi_update_fastmap(struct ubi_device *ubi) { return 0; }
#endif
@@ -1105,4 +1111,42 @@ static inline int idx2vol_id(const struct ubi_device *ubi, int idx)
return idx;
}
+/**
+ * ubi_is_fm_vol - check whether a volume ID is a Fastmap volume.
+ * @vol_id: volume ID
+ */
+static inline bool ubi_is_fm_vol(int vol_id)
+{
+ switch (vol_id) {
+ case UBI_FM_SB_VOLUME_ID:
+ case UBI_FM_DATA_VOLUME_ID:
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ubi_find_fm_block - check whether a PEB is part of the current Fastmap.
+ * @ubi: UBI device description object
+ * @pnum: physical eraseblock to look for
+ *
+ * This function returns a wear leveling object if @pnum relates to the current
+ * fastmap, @NULL otherwise.
+ */
+static inline struct ubi_wl_entry *ubi_find_fm_block(const struct ubi_device *ubi,
+ int pnum)
+{
+ int i;
+
+ if (ubi->fm) {
+ for (i = 0; i < ubi->fm->used_blocks; i++) {
+ if (ubi->fm->e[i]->pnum == pnum)
+ return ubi->fm->e[i];
+ }
+ }
+
+ return NULL;
+}
+
#endif /* !__UBI_UBI_H__ */
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 959c7b12e..f4533266d 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1598,19 +1598,44 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
}
}
- dbg_wl("found %i PEBs", found_pebs);
+ list_for_each_entry(aeb, &ai->fastmap, u.list) {
+ cond_resched();
+
+ e = ubi_find_fm_block(ubi, aeb->pnum);
- if (ubi->fm) {
- ubi_assert(ubi->good_peb_count ==
- found_pebs + ubi->fm->used_blocks);
+ if (e) {
+ ubi_assert(!ubi->lookuptbl[e->pnum]);
+ ubi->lookuptbl[e->pnum] = e;
+ } else {
+ /*
+ * Usually old Fastmap PEBs are scheduled for erasure
+ * and we don't have to care about them but if we face
+ * an power cut before scheduling them we need to
+ * take care of them here.
+ */
+ if (ubi->lookuptbl[aeb->pnum])
+ continue;
+
+ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
+ if (!e)
+ goto out_free;
- for (i = 0; i < ubi->fm->used_blocks; i++) {
- e = ubi->fm->e[i];
+ e->pnum = aeb->pnum;
+ e->ec = aeb->ec;
+ ubi_assert(!ubi->lookuptbl[e->pnum]);
ubi->lookuptbl[e->pnum] = e;
+ if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
+ wl_entry_destroy(ubi, e);
+ goto out_free;
+ }
}
+
+ found_pebs++;
}
- else
- ubi_assert(ubi->good_peb_count == found_pebs);
+
+ dbg_wl("found %i PEBs", found_pebs);
+
+ ubi_assert(ubi->good_peb_count == found_pebs);
reserved_pebs = WL_RESERVED_PEBS;
ubi_fastmap_init(ubi, &reserved_pebs);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 70dac7302..9599ed6f1 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -152,7 +152,7 @@ module_param(lacp_rate, charp, 0);
MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; "
"0 for slow, 1 for fast");
module_param(ad_select, charp, 0);
-MODULE_PARM_DESC(ad_select, "803.ad aggregation selection logic; "
+MODULE_PARM_DESC(ad_select, "802.3ad aggregation selection logic; "
"0 for stable (default), 1 for bandwidth, "
"2 for count");
module_param(min_links, int, 0);
@@ -4148,6 +4148,8 @@ static const struct net_device_ops bond_netdev_ops = {
.ndo_add_slave = bond_enslave,
.ndo_del_slave = bond_release,
.ndo_fix_features = bond_fix_features,
+ .ndo_neigh_construct = netdev_default_l2upper_neigh_construct,
+ .ndo_neigh_destroy = netdev_default_l2upper_neigh_destroy,
.ndo_bridge_setlink = switchdev_port_bridge_setlink,
.ndo_bridge_getlink = switchdev_port_bridge_getlink,
.ndo_bridge_dellink = switchdev_port_bridge_dellink,
@@ -4618,26 +4620,6 @@ static int bond_check_params(struct bond_params *params)
return 0;
}
-static struct lock_class_key bonding_netdev_xmit_lock_key;
-static struct lock_class_key bonding_netdev_addr_lock_key;
-static struct lock_class_key bonding_tx_busylock_key;
-
-static void bond_set_lockdep_class_one(struct net_device *dev,
- struct netdev_queue *txq,
- void *_unused)
-{
- lockdep_set_class(&txq->_xmit_lock,
- &bonding_netdev_xmit_lock_key);
-}
-
-static void bond_set_lockdep_class(struct net_device *dev)
-{
- lockdep_set_class(&dev->addr_list_lock,
- &bonding_netdev_addr_lock_key);
- netdev_for_each_tx_queue(dev, bond_set_lockdep_class_one, NULL);
- dev->qdisc_tx_busylock = &bonding_tx_busylock_key;
-}
-
/* Called from registration process */
static int bond_init(struct net_device *bond_dev)
{
@@ -4650,7 +4632,7 @@ static int bond_init(struct net_device *bond_dev)
if (!bond->wq)
return -ENOMEM;
- bond_set_lockdep_class(bond_dev);
+ netdev_lockdep_set_classes(bond_dev);
list_add_tail(&bond->bond_list, &bn->dev_list);
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index 547098086..f81df91a9 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -52,5 +52,5 @@ config CAIF_VIRTIO
The caif driver for CAIF over Virtio.
if CAIF_VIRTIO
-source "drivers/vhost/Kconfig"
+source "drivers/vhost/Kconfig.vringh"
endif
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index 615c65da3..ddabce759 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -1201,7 +1201,7 @@ static int cfhsi_open(struct net_device *ndev)
clear_bit(CFHSI_AWAKE, &cfhsi->bits);
/* Create work thread. */
- cfhsi->wq = create_singlethread_workqueue(cfhsi->ndev->name);
+ cfhsi->wq = alloc_ordered_workqueue(cfhsi->ndev->name, WQ_MEM_RECLAIM);
if (!cfhsi->wq) {
netdev_err(cfhsi->ndev, "%s: Failed to create work queue.\n",
__func__);
@@ -1267,9 +1267,6 @@ static int cfhsi_close(struct net_device *ndev)
/* going to shutdown driver */
set_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
- /* Flush workqueue */
- flush_workqueue(cfhsi->wq);
-
/* Delete timers if pending */
del_timer_sync(&cfhsi->inactivity_timer);
del_timer_sync(&cfhsi->rx_slowpath_timer);
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
index 4721948a9..3a529fbe5 100644
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@ -185,8 +185,8 @@ static ssize_t print_frame(char *buf, size_t size, char *frm,
/* Fast forward. */
i = count - cut;
len += snprintf((buf + len), (size - len),
- "--- %u bytes skipped ---\n",
- (int)(count - (cut * 2)));
+ "--- %zu bytes skipped ---\n",
+ count - (cut * 2));
}
if ((!(i % 10)) && i) {
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 0d40aef92..22570ea3a 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -104,16 +104,6 @@ config CAN_JANZ_ICAN3
This driver can also be built as a module. If so, the module will be
called janz-ican3.ko.
-config CAN_RCAR
- tristate "Renesas R-Car CAN controller"
- depends on ARCH_RENESAS || ARM
- ---help---
- Say Y here if you want to use CAN controller found on Renesas R-Car
- SoCs.
-
- To compile this driver as a module, choose M here: the module will
- be called rcar_can.
-
config CAN_SUN4I
tristate "Allwinner A10 CAN controller"
depends on MACH_SUN4I || MACH_SUN7I || COMPILE_TEST
@@ -152,6 +142,7 @@ source "drivers/net/can/cc770/Kconfig"
source "drivers/net/can/ifi_canfd/Kconfig"
source "drivers/net/can/m_can/Kconfig"
source "drivers/net/can/mscan/Kconfig"
+source "drivers/net/can/rcar/Kconfig"
source "drivers/net/can/sja1000/Kconfig"
source "drivers/net/can/softing/Kconfig"
source "drivers/net/can/spi/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index e3db0c807..26ba4b794 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -10,6 +10,7 @@ can-dev-y := dev.o
can-dev-$(CONFIG_CAN_LEDS) += led.o
+obj-y += rcar/
obj-y += spi/
obj-y += usb/
obj-y += softing/
@@ -24,7 +25,6 @@ obj-$(CONFIG_CAN_IFI_CANFD) += ifi_canfd/
obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o
obj-$(CONFIG_CAN_MSCAN) += mscan/
obj-$(CONFIG_CAN_M_CAN) += m_can/
-obj-$(CONFIG_CAN_RCAR) += rcar_can.o
obj-$(CONFIG_CAN_SJA1000) += sja1000/
obj-$(CONFIG_CAN_SUN4I) += sun4i_can.o
obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index ad535a854..8d6208c0b 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -21,6 +21,7 @@
#include <linux/slab.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
+#include <linux/workqueue.h>
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/skb.h>
@@ -69,6 +70,7 @@ EXPORT_SYMBOL_GPL(can_len2dlc);
#ifdef CONFIG_CAN_CALC_BITTIMING
#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */
+#define CAN_CALC_SYNC_SEG 1
/*
* Bit-timing calculation derived from:
@@ -83,98 +85,126 @@ EXPORT_SYMBOL_GPL(can_len2dlc);
* registers of the CAN controller. You can find more information
* in the header file linux/can/netlink.h.
*/
-static int can_update_spt(const struct can_bittiming_const *btc,
- int sampl_pt, int tseg, int *tseg1, int *tseg2)
+static int can_update_sample_point(const struct can_bittiming_const *btc,
+ unsigned int sample_point_nominal, unsigned int tseg,
+ unsigned int *tseg1_ptr, unsigned int *tseg2_ptr,
+ unsigned int *sample_point_error_ptr)
{
- *tseg2 = tseg + 1 - (sampl_pt * (tseg + 1)) / 1000;
- if (*tseg2 < btc->tseg2_min)
- *tseg2 = btc->tseg2_min;
- if (*tseg2 > btc->tseg2_max)
- *tseg2 = btc->tseg2_max;
- *tseg1 = tseg - *tseg2;
- if (*tseg1 > btc->tseg1_max) {
- *tseg1 = btc->tseg1_max;
- *tseg2 = tseg - *tseg1;
+ unsigned int sample_point_error, best_sample_point_error = UINT_MAX;
+ unsigned int sample_point, best_sample_point = 0;
+ unsigned int tseg1, tseg2;
+ int i;
+
+ for (i = 0; i <= 1; i++) {
+ tseg2 = tseg + CAN_CALC_SYNC_SEG - (sample_point_nominal * (tseg + CAN_CALC_SYNC_SEG)) / 1000 - i;
+ tseg2 = clamp(tseg2, btc->tseg2_min, btc->tseg2_max);
+ tseg1 = tseg - tseg2;
+ if (tseg1 > btc->tseg1_max) {
+ tseg1 = btc->tseg1_max;
+ tseg2 = tseg - tseg1;
+ }
+
+ sample_point = 1000 * (tseg + CAN_CALC_SYNC_SEG - tseg2) / (tseg + CAN_CALC_SYNC_SEG);
+ sample_point_error = abs(sample_point_nominal - sample_point);
+
+ if ((sample_point <= sample_point_nominal) && (sample_point_error < best_sample_point_error)) {
+ best_sample_point = sample_point;
+ best_sample_point_error = sample_point_error;
+ *tseg1_ptr = tseg1;
+ *tseg2_ptr = tseg2;
+ }
}
- return 1000 * (tseg + 1 - *tseg2) / (tseg + 1);
+
+ if (sample_point_error_ptr)
+ *sample_point_error_ptr = best_sample_point_error;
+
+ return best_sample_point;
}
static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
const struct can_bittiming_const *btc)
{
struct can_priv *priv = netdev_priv(dev);
- long best_error = 1000000000, error = 0;
- int best_tseg = 0, best_brp = 0, brp = 0;
- int tsegall, tseg = 0, tseg1 = 0, tseg2 = 0;
- int spt_error = 1000, spt = 0, sampl_pt;
- long rate;
+ unsigned int bitrate; /* current bitrate */
+ unsigned int bitrate_error; /* difference between current and nominal value */
+ unsigned int best_bitrate_error = UINT_MAX;
+ unsigned int sample_point_error; /* difference between current and nominal value */
+ unsigned int best_sample_point_error = UINT_MAX;
+ unsigned int sample_point_nominal; /* nominal sample point */
+ unsigned int best_tseg = 0; /* current best value for tseg */
+ unsigned int best_brp = 0; /* current best value for brp */
+ unsigned int brp, tsegall, tseg, tseg1 = 0, tseg2 = 0;
u64 v64;
/* Use CiA recommended sample points */
if (bt->sample_point) {
- sampl_pt = bt->sample_point;
+ sample_point_nominal = bt->sample_point;
} else {
if (bt->bitrate > 800000)
- sampl_pt = 750;
+ sample_point_nominal = 750;
else if (bt->bitrate > 500000)
- sampl_pt = 800;
+ sample_point_nominal = 800;
else
- sampl_pt = 875;
+ sample_point_nominal = 875;
}
/* tseg even = round down, odd = round up */
for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1;
tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) {
- tsegall = 1 + tseg / 2;
+ tsegall = CAN_CALC_SYNC_SEG + tseg / 2;
+
/* Compute all possible tseg choices (tseg=tseg1+tseg2) */
brp = priv->clock.freq / (tsegall * bt->bitrate) + tseg % 2;
- /* chose brp step which is possible in system */
+
+ /* choose brp step which is possible in system */
brp = (brp / btc->brp_inc) * btc->brp_inc;
if ((brp < btc->brp_min) || (brp > btc->brp_max))
continue;
- rate = priv->clock.freq / (brp * tsegall);
- error = bt->bitrate - rate;
+
+ bitrate = priv->clock.freq / (brp * tsegall);
+ bitrate_error = abs(bt->bitrate - bitrate);
+
/* tseg brp biterror */
- if (error < 0)
- error = -error;
- if (error > best_error)
+ if (bitrate_error > best_bitrate_error)
continue;
- best_error = error;
- if (error == 0) {
- spt = can_update_spt(btc, sampl_pt, tseg / 2,
- &tseg1, &tseg2);
- error = sampl_pt - spt;
- if (error < 0)
- error = -error;
- if (error > spt_error)
- continue;
- spt_error = error;
- }
+
+ /* reset sample point error if we have a better bitrate */
+ if (bitrate_error < best_bitrate_error)
+ best_sample_point_error = UINT_MAX;
+
+ can_update_sample_point(btc, sample_point_nominal, tseg / 2, &tseg1, &tseg2, &sample_point_error);
+ if (sample_point_error > best_sample_point_error)
+ continue;
+
+ best_sample_point_error = sample_point_error;
+ best_bitrate_error = bitrate_error;
best_tseg = tseg / 2;
best_brp = brp;
- if (error == 0)
+
+ if (bitrate_error == 0 && sample_point_error == 0)
break;
}
- if (best_error) {
+ if (best_bitrate_error) {
/* Error in one-tenth of a percent */
- error = (best_error * 1000) / bt->bitrate;
- if (error > CAN_CALC_MAX_ERROR) {
+ v64 = (u64)best_bitrate_error * 1000;
+ do_div(v64, bt->bitrate);
+ bitrate_error = (u32)v64;
+ if (bitrate_error > CAN_CALC_MAX_ERROR) {
netdev_err(dev,
- "bitrate error %ld.%ld%% too high\n",
- error / 10, error % 10);
+ "bitrate error %d.%d%% too high\n",
+ bitrate_error / 10, bitrate_error % 10);
return -EDOM;
- } else {
- netdev_warn(dev, "bitrate error %ld.%ld%%\n",
- error / 10, error % 10);
}
+ netdev_warn(dev, "bitrate error %d.%d%%\n",
+ bitrate_error / 10, bitrate_error % 10);
}
/* real sample point */
- bt->sample_point = can_update_spt(btc, sampl_pt, best_tseg,
- &tseg1, &tseg2);
+ bt->sample_point = can_update_sample_point(btc, sample_point_nominal, best_tseg,
+ &tseg1, &tseg2, NULL);
- v64 = (u64)best_brp * 1000000000UL;
+ v64 = (u64)best_brp * 1000 * 1000 * 1000;
do_div(v64, priv->clock.freq);
bt->tq = (u32)v64;
bt->prop_seg = tseg1 / 2;
@@ -182,9 +212,9 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
bt->phase_seg2 = tseg2;
/* check for sjw user settings */
- if (!bt->sjw || !btc->sjw_max)
+ if (!bt->sjw || !btc->sjw_max) {
bt->sjw = 1;
- else {
+ } else {
/* bt->sjw is at least 1 -> sanitize upper bound to sjw_max */
if (bt->sjw > btc->sjw_max)
bt->sjw = btc->sjw_max;
@@ -194,8 +224,9 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
}
bt->brp = best_brp;
- /* real bit-rate */
- bt->bitrate = priv->clock.freq / (bt->brp * (tseg1 + tseg2 + 1));
+
+ /* real bitrate */
+ bt->bitrate = priv->clock.freq / (bt->brp * (CAN_CALC_SYNC_SEG + tseg1 + tseg2));
return 0;
}
@@ -471,9 +502,8 @@ EXPORT_SYMBOL_GPL(can_free_echo_skb);
/*
* CAN device restart for bus-off recovery
*/
-static void can_restart(unsigned long data)
+static void can_restart(struct net_device *dev)
{
- struct net_device *dev = (struct net_device *)data;
struct can_priv *priv = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
struct sk_buff *skb;
@@ -513,6 +543,14 @@ restart:
netdev_err(dev, "Error %d during restart", err);
}
+static void can_restart_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct can_priv *priv = container_of(dwork, struct can_priv, restart_work);
+
+ can_restart(priv->dev);
+}
+
int can_restart_now(struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
@@ -526,8 +564,8 @@ int can_restart_now(struct net_device *dev)
if (priv->state != CAN_STATE_BUS_OFF)
return -EBUSY;
- /* Runs as soon as possible in the timer context */
- mod_timer(&priv->restart_timer, jiffies);
+ cancel_delayed_work_sync(&priv->restart_work);
+ can_restart(dev);
return 0;
}
@@ -548,8 +586,8 @@ void can_bus_off(struct net_device *dev)
netif_carrier_off(dev);
if (priv->restart_ms)
- mod_timer(&priv->restart_timer,
- jiffies + (priv->restart_ms * HZ) / 1000);
+ schedule_delayed_work(&priv->restart_work,
+ msecs_to_jiffies(priv->restart_ms));
}
EXPORT_SYMBOL_GPL(can_bus_off);
@@ -658,6 +696,7 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max)
return NULL;
priv = netdev_priv(dev);
+ priv->dev = dev;
if (echo_skb_max) {
priv->echo_skb_max = echo_skb_max;
@@ -667,7 +706,7 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max)
priv->state = CAN_STATE_STOPPED;
- init_timer(&priv->restart_timer);
+ INIT_DELAYED_WORK(&priv->restart_work, can_restart_work);
return dev;
}
@@ -748,8 +787,6 @@ int open_candev(struct net_device *dev)
if (!netif_carrier_ok(dev))
netif_carrier_on(dev);
- setup_timer(&priv->restart_timer, can_restart, (unsigned long)dev);
-
return 0;
}
EXPORT_SYMBOL_GPL(open_candev);
@@ -764,7 +801,7 @@ void close_candev(struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
- del_timer_sync(&priv->restart_timer);
+ cancel_delayed_work_sync(&priv->restart_work);
can_flush_echo_skb(dev);
}
EXPORT_SYMBOL_GPL(close_candev);
diff --git a/drivers/net/can/rcar/Kconfig b/drivers/net/can/rcar/Kconfig
new file mode 100644
index 000000000..7b03a3a37
--- /dev/null
+++ b/drivers/net/can/rcar/Kconfig
@@ -0,0 +1,21 @@
+config CAN_RCAR
+ tristate "Renesas R-Car CAN controller"
+ depends on ARCH_RENESAS || ARM
+ ---help---
+ Say Y here if you want to use CAN controller found on Renesas R-Car
+ SoCs.
+
+ To compile this driver as a module, choose M here: the module will
+ be called rcar_can.
+
+config CAN_RCAR_CANFD
+ tristate "Renesas R-Car CAN FD controller"
+ depends on ARCH_RENESAS || ARM
+ ---help---
+ Say Y here if you want to use CAN FD controller found on
+ Renesas R-Car SoCs. The driver puts the controller in CAN FD only
+ mode, which can interoperate with CAN2.0 nodes but does not support
+ dedicated CAN 2.0 mode.
+
+ To compile this driver as a module, choose M here: the module will
+ be called rcar_canfd.
diff --git a/drivers/net/can/rcar/Makefile b/drivers/net/can/rcar/Makefile
new file mode 100644
index 000000000..08de36a4c
--- /dev/null
+++ b/drivers/net/can/rcar/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the Renesas R-Car CAN & CAN FD controller drivers
+#
+
+obj-$(CONFIG_CAN_RCAR) += rcar_can.o
+obj-$(CONFIG_CAN_RCAR_CANFD) += rcar_canfd.o
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
new file mode 100644
index 000000000..788459f6b
--- /dev/null
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -0,0 +1,929 @@
+/* Renesas R-Car CAN device driver
+ *
+ * Copyright (C) 2013 Cogent Embedded, Inc. <source@cogentembedded.com>
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/can/led.h>
+#include <linux/can/dev.h>
+#include <linux/clk.h>
+#include <linux/can/platform/rcar_can.h>
+#include <linux/of.h>
+
+#define RCAR_CAN_DRV_NAME "rcar_can"
+
+/* Mailbox configuration:
+ * mailbox 60 - 63 - Rx FIFO mailboxes
+ * mailbox 56 - 59 - Tx FIFO mailboxes
+ * non-FIFO mailboxes are not used
+ */
+#define RCAR_CAN_N_MBX 64 /* Number of mailboxes in non-FIFO mode */
+#define RCAR_CAN_RX_FIFO_MBX 60 /* Mailbox - window to Rx FIFO */
+#define RCAR_CAN_TX_FIFO_MBX 56 /* Mailbox - window to Tx FIFO */
+#define RCAR_CAN_FIFO_DEPTH 4
+
+/* Mailbox registers structure */
+struct rcar_can_mbox_regs {
+ u32 id; /* IDE and RTR bits, SID and EID */
+ u8 stub; /* Not used */
+ u8 dlc; /* Data Length Code - bits [0..3] */
+ u8 data[8]; /* Data Bytes */
+ u8 tsh; /* Time Stamp Higher Byte */
+ u8 tsl; /* Time Stamp Lower Byte */
+};
+
+struct rcar_can_regs {
+ struct rcar_can_mbox_regs mb[RCAR_CAN_N_MBX]; /* Mailbox registers */
+ u32 mkr_2_9[8]; /* Mask Registers 2-9 */
+ u32 fidcr[2]; /* FIFO Received ID Compare Register */
+ u32 mkivlr1; /* Mask Invalid Register 1 */
+ u32 mier1; /* Mailbox Interrupt Enable Register 1 */
+ u32 mkr_0_1[2]; /* Mask Registers 0-1 */
+ u32 mkivlr0; /* Mask Invalid Register 0*/
+ u32 mier0; /* Mailbox Interrupt Enable Register 0 */
+ u8 pad_440[0x3c0];
+ u8 mctl[64]; /* Message Control Registers */
+ u16 ctlr; /* Control Register */
+ u16 str; /* Status register */
+ u8 bcr[3]; /* Bit Configuration Register */
+ u8 clkr; /* Clock Select Register */
+ u8 rfcr; /* Receive FIFO Control Register */
+ u8 rfpcr; /* Receive FIFO Pointer Control Register */
+ u8 tfcr; /* Transmit FIFO Control Register */
+ u8 tfpcr; /* Transmit FIFO Pointer Control Register */
+ u8 eier; /* Error Interrupt Enable Register */
+ u8 eifr; /* Error Interrupt Factor Judge Register */
+ u8 recr; /* Receive Error Count Register */
+ u8 tecr; /* Transmit Error Count Register */
+ u8 ecsr; /* Error Code Store Register */
+ u8 cssr; /* Channel Search Support Register */
+ u8 mssr; /* Mailbox Search Status Register */
+ u8 msmr; /* Mailbox Search Mode Register */
+ u16 tsr; /* Time Stamp Register */
+ u8 afsr; /* Acceptance Filter Support Register */
+ u8 pad_857;
+ u8 tcr; /* Test Control Register */
+ u8 pad_859[7];
+ u8 ier; /* Interrupt Enable Register */
+ u8 isr; /* Interrupt Status Register */
+ u8 pad_862;
+ u8 mbsmr; /* Mailbox Search Mask Register */
+};
+
+struct rcar_can_priv {
+ struct can_priv can; /* Must be the first member! */
+ struct net_device *ndev;
+ struct napi_struct napi;
+ struct rcar_can_regs __iomem *regs;
+ struct clk *clk;
+ struct clk *can_clk;
+ u8 tx_dlc[RCAR_CAN_FIFO_DEPTH];
+ u32 tx_head;
+ u32 tx_tail;
+ u8 clock_select;
+ u8 ier;
+};
+
+static const struct can_bittiming_const rcar_can_bittiming_const = {
+ .name = RCAR_CAN_DRV_NAME,
+ .tseg1_min = 4,
+ .tseg1_max = 16,
+ .tseg2_min = 2,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 1024,
+ .brp_inc = 1,
+};
+
+/* Control Register bits */
+#define RCAR_CAN_CTLR_BOM (3 << 11) /* Bus-Off Recovery Mode Bits */
+#define RCAR_CAN_CTLR_BOM_ENT (1 << 11) /* Entry to halt mode */
+ /* at bus-off entry */
+#define RCAR_CAN_CTLR_SLPM (1 << 10)
+#define RCAR_CAN_CTLR_CANM (3 << 8) /* Operating Mode Select Bit */
+#define RCAR_CAN_CTLR_CANM_HALT (1 << 9)
+#define RCAR_CAN_CTLR_CANM_RESET (1 << 8)
+#define RCAR_CAN_CTLR_CANM_FORCE_RESET (3 << 8)
+#define RCAR_CAN_CTLR_MLM (1 << 3) /* Message Lost Mode Select */
+#define RCAR_CAN_CTLR_IDFM (3 << 1) /* ID Format Mode Select Bits */
+#define RCAR_CAN_CTLR_IDFM_MIXED (1 << 2) /* Mixed ID mode */
+#define RCAR_CAN_CTLR_MBM (1 << 0) /* Mailbox Mode select */
+
+/* Status Register bits */
+#define RCAR_CAN_STR_RSTST (1 << 8) /* Reset Status Bit */
+
+/* FIFO Received ID Compare Registers 0 and 1 bits */
+#define RCAR_CAN_FIDCR_IDE (1 << 31) /* ID Extension Bit */
+#define RCAR_CAN_FIDCR_RTR (1 << 30) /* Remote Transmission Request Bit */
+
+/* Receive FIFO Control Register bits */
+#define RCAR_CAN_RFCR_RFEST (1 << 7) /* Receive FIFO Empty Status Flag */
+#define RCAR_CAN_RFCR_RFE (1 << 0) /* Receive FIFO Enable */
+
+/* Transmit FIFO Control Register bits */
+#define RCAR_CAN_TFCR_TFUST (7 << 1) /* Transmit FIFO Unsent Message */
+ /* Number Status Bits */
+#define RCAR_CAN_TFCR_TFUST_SHIFT 1 /* Offset of Transmit FIFO Unsent */
+ /* Message Number Status Bits */
+#define RCAR_CAN_TFCR_TFE (1 << 0) /* Transmit FIFO Enable */
+
+#define RCAR_CAN_N_RX_MKREGS1 2 /* Number of mask registers */
+ /* for Rx mailboxes 0-31 */
+#define RCAR_CAN_N_RX_MKREGS2 8
+
+/* Bit Configuration Register settings */
+#define RCAR_CAN_BCR_TSEG1(x) (((x) & 0x0f) << 20)
+#define RCAR_CAN_BCR_BPR(x) (((x) & 0x3ff) << 8)
+#define RCAR_CAN_BCR_SJW(x) (((x) & 0x3) << 4)
+#define RCAR_CAN_BCR_TSEG2(x) ((x) & 0x07)
+
+/* Mailbox and Mask Registers bits */
+#define RCAR_CAN_IDE (1 << 31)
+#define RCAR_CAN_RTR (1 << 30)
+#define RCAR_CAN_SID_SHIFT 18
+
+/* Mailbox Interrupt Enable Register 1 bits */
+#define RCAR_CAN_MIER1_RXFIE (1 << 28) /* Receive FIFO Interrupt Enable */
+#define RCAR_CAN_MIER1_TXFIE (1 << 24) /* Transmit FIFO Interrupt Enable */
+
+/* Interrupt Enable Register bits */
+#define RCAR_CAN_IER_ERSIE (1 << 5) /* Error (ERS) Interrupt Enable Bit */
+#define RCAR_CAN_IER_RXFIE (1 << 4) /* Reception FIFO Interrupt */
+ /* Enable Bit */
+#define RCAR_CAN_IER_TXFIE (1 << 3) /* Transmission FIFO Interrupt */
+ /* Enable Bit */
+/* Interrupt Status Register bits */
+#define RCAR_CAN_ISR_ERSF (1 << 5) /* Error (ERS) Interrupt Status Bit */
+#define RCAR_CAN_ISR_RXFF (1 << 4) /* Reception FIFO Interrupt */
+ /* Status Bit */
+#define RCAR_CAN_ISR_TXFF (1 << 3) /* Transmission FIFO Interrupt */
+ /* Status Bit */
+
+/* Error Interrupt Enable Register bits */
+#define RCAR_CAN_EIER_BLIE (1 << 7) /* Bus Lock Interrupt Enable */
+#define RCAR_CAN_EIER_OLIE (1 << 6) /* Overload Frame Transmit */
+ /* Interrupt Enable */
+#define RCAR_CAN_EIER_ORIE (1 << 5) /* Receive Overrun Interrupt Enable */
+#define RCAR_CAN_EIER_BORIE (1 << 4) /* Bus-Off Recovery Interrupt Enable */
+#define RCAR_CAN_EIER_BOEIE (1 << 3) /* Bus-Off Entry Interrupt Enable */
+#define RCAR_CAN_EIER_EPIE (1 << 2) /* Error Passive Interrupt Enable */
+#define RCAR_CAN_EIER_EWIE (1 << 1) /* Error Warning Interrupt Enable */
+#define RCAR_CAN_EIER_BEIE (1 << 0) /* Bus Error Interrupt Enable */
+
+/* Error Interrupt Factor Judge Register bits */
+#define RCAR_CAN_EIFR_BLIF (1 << 7) /* Bus Lock Detect Flag */
+#define RCAR_CAN_EIFR_OLIF (1 << 6) /* Overload Frame Transmission */
+ /* Detect Flag */
+#define RCAR_CAN_EIFR_ORIF (1 << 5) /* Receive Overrun Detect Flag */
+#define RCAR_CAN_EIFR_BORIF (1 << 4) /* Bus-Off Recovery Detect Flag */
+#define RCAR_CAN_EIFR_BOEIF (1 << 3) /* Bus-Off Entry Detect Flag */
+#define RCAR_CAN_EIFR_EPIF (1 << 2) /* Error Passive Detect Flag */
+#define RCAR_CAN_EIFR_EWIF (1 << 1) /* Error Warning Detect Flag */
+#define RCAR_CAN_EIFR_BEIF (1 << 0) /* Bus Error Detect Flag */
+
+/* Error Code Store Register bits */
+#define RCAR_CAN_ECSR_EDPM (1 << 7) /* Error Display Mode Select Bit */
+#define RCAR_CAN_ECSR_ADEF (1 << 6) /* ACK Delimiter Error Flag */
+#define RCAR_CAN_ECSR_BE0F (1 << 5) /* Bit Error (dominant) Flag */
+#define RCAR_CAN_ECSR_BE1F (1 << 4) /* Bit Error (recessive) Flag */
+#define RCAR_CAN_ECSR_CEF (1 << 3) /* CRC Error Flag */
+#define RCAR_CAN_ECSR_AEF (1 << 2) /* ACK Error Flag */
+#define RCAR_CAN_ECSR_FEF (1 << 1) /* Form Error Flag */
+#define RCAR_CAN_ECSR_SEF (1 << 0) /* Stuff Error Flag */
+
+#define RCAR_CAN_NAPI_WEIGHT 4
+#define MAX_STR_READS 0x100
+
+static void tx_failure_cleanup(struct net_device *ndev)
+{
+ int i;
+
+ for (i = 0; i < RCAR_CAN_FIFO_DEPTH; i++)
+ can_free_echo_skb(ndev, i);
+}
+
+static void rcar_can_error(struct net_device *ndev)
+{
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ u8 eifr, txerr = 0, rxerr = 0;
+
+ /* Propagate the error condition to the CAN stack */
+ skb = alloc_can_err_skb(ndev, &cf);
+
+ eifr = readb(&priv->regs->eifr);
+ if (eifr & (RCAR_CAN_EIFR_EWIF | RCAR_CAN_EIFR_EPIF)) {
+ txerr = readb(&priv->regs->tecr);
+ rxerr = readb(&priv->regs->recr);
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
+ }
+ if (eifr & RCAR_CAN_EIFR_BEIF) {
+ int rx_errors = 0, tx_errors = 0;
+ u8 ecsr;
+
+ netdev_dbg(priv->ndev, "Bus error interrupt:\n");
+ if (skb)
+ cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
+
+ ecsr = readb(&priv->regs->ecsr);
+ if (ecsr & RCAR_CAN_ECSR_ADEF) {
+ netdev_dbg(priv->ndev, "ACK Delimiter Error\n");
+ tx_errors++;
+ writeb(~RCAR_CAN_ECSR_ADEF, &priv->regs->ecsr);
+ if (skb)
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK_DEL;
+ }
+ if (ecsr & RCAR_CAN_ECSR_BE0F) {
+ netdev_dbg(priv->ndev, "Bit Error (dominant)\n");
+ tx_errors++;
+ writeb(~RCAR_CAN_ECSR_BE0F, &priv->regs->ecsr);
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_BIT0;
+ }
+ if (ecsr & RCAR_CAN_ECSR_BE1F) {
+ netdev_dbg(priv->ndev, "Bit Error (recessive)\n");
+ tx_errors++;
+ writeb(~RCAR_CAN_ECSR_BE1F, &priv->regs->ecsr);
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_BIT1;
+ }
+ if (ecsr & RCAR_CAN_ECSR_CEF) {
+ netdev_dbg(priv->ndev, "CRC Error\n");
+ rx_errors++;
+ writeb(~RCAR_CAN_ECSR_CEF, &priv->regs->ecsr);
+ if (skb)
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ }
+ if (ecsr & RCAR_CAN_ECSR_AEF) {
+ netdev_dbg(priv->ndev, "ACK Error\n");
+ tx_errors++;
+ writeb(~RCAR_CAN_ECSR_AEF, &priv->regs->ecsr);
+ if (skb) {
+ cf->can_id |= CAN_ERR_ACK;
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ }
+ }
+ if (ecsr & RCAR_CAN_ECSR_FEF) {
+ netdev_dbg(priv->ndev, "Form Error\n");
+ rx_errors++;
+ writeb(~RCAR_CAN_ECSR_FEF, &priv->regs->ecsr);
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ }
+ if (ecsr & RCAR_CAN_ECSR_SEF) {
+ netdev_dbg(priv->ndev, "Stuff Error\n");
+ rx_errors++;
+ writeb(~RCAR_CAN_ECSR_SEF, &priv->regs->ecsr);
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ }
+
+ priv->can.can_stats.bus_error++;
+ ndev->stats.rx_errors += rx_errors;
+ ndev->stats.tx_errors += tx_errors;
+ writeb(~RCAR_CAN_EIFR_BEIF, &priv->regs->eifr);
+ }
+ if (eifr & RCAR_CAN_EIFR_EWIF) {
+ netdev_dbg(priv->ndev, "Error warning interrupt\n");
+ priv->can.state = CAN_STATE_ERROR_WARNING;
+ priv->can.can_stats.error_warning++;
+ /* Clear interrupt condition */
+ writeb(~RCAR_CAN_EIFR_EWIF, &priv->regs->eifr);
+ if (skb)
+ cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_WARNING :
+ CAN_ERR_CRTL_RX_WARNING;
+ }
+ if (eifr & RCAR_CAN_EIFR_EPIF) {
+ netdev_dbg(priv->ndev, "Error passive interrupt\n");
+ priv->can.state = CAN_STATE_ERROR_PASSIVE;
+ priv->can.can_stats.error_passive++;
+ /* Clear interrupt condition */
+ writeb(~RCAR_CAN_EIFR_EPIF, &priv->regs->eifr);
+ if (skb)
+ cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_PASSIVE :
+ CAN_ERR_CRTL_RX_PASSIVE;
+ }
+ if (eifr & RCAR_CAN_EIFR_BOEIF) {
+ netdev_dbg(priv->ndev, "Bus-off entry interrupt\n");
+ tx_failure_cleanup(ndev);
+ priv->ier = RCAR_CAN_IER_ERSIE;
+ writeb(priv->ier, &priv->regs->ier);
+ priv->can.state = CAN_STATE_BUS_OFF;
+ /* Clear interrupt condition */
+ writeb(~RCAR_CAN_EIFR_BOEIF, &priv->regs->eifr);
+ priv->can.can_stats.bus_off++;
+ can_bus_off(ndev);
+ if (skb)
+ cf->can_id |= CAN_ERR_BUSOFF;
+ }
+ if (eifr & RCAR_CAN_EIFR_ORIF) {
+ netdev_dbg(priv->ndev, "Receive overrun error interrupt\n");
+ ndev->stats.rx_over_errors++;
+ ndev->stats.rx_errors++;
+ writeb(~RCAR_CAN_EIFR_ORIF, &priv->regs->eifr);
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ }
+ }
+ if (eifr & RCAR_CAN_EIFR_OLIF) {
+ netdev_dbg(priv->ndev,
+ "Overload Frame Transmission error interrupt\n");
+ ndev->stats.rx_over_errors++;
+ ndev->stats.rx_errors++;
+ writeb(~RCAR_CAN_EIFR_OLIF, &priv->regs->eifr);
+ if (skb) {
+ cf->can_id |= CAN_ERR_PROT;
+ cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
+ }
+ }
+
+ if (skb) {
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+ }
+}
+
+static void rcar_can_tx_done(struct net_device *ndev)
+{
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ u8 isr;
+
+ while (1) {
+ u8 unsent = readb(&priv->regs->tfcr);
+
+ unsent = (unsent & RCAR_CAN_TFCR_TFUST) >>
+ RCAR_CAN_TFCR_TFUST_SHIFT;
+ if (priv->tx_head - priv->tx_tail <= unsent)
+ break;
+ stats->tx_packets++;
+ stats->tx_bytes += priv->tx_dlc[priv->tx_tail %
+ RCAR_CAN_FIFO_DEPTH];
+ priv->tx_dlc[priv->tx_tail % RCAR_CAN_FIFO_DEPTH] = 0;
+ can_get_echo_skb(ndev, priv->tx_tail % RCAR_CAN_FIFO_DEPTH);
+ priv->tx_tail++;
+ netif_wake_queue(ndev);
+ }
+ /* Clear interrupt */
+ isr = readb(&priv->regs->isr);
+ writeb(isr & ~RCAR_CAN_ISR_TXFF, &priv->regs->isr);
+ can_led_event(ndev, CAN_LED_EVENT_TX);
+}
+
+static irqreturn_t rcar_can_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = dev_id;
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+ u8 isr;
+
+ isr = readb(&priv->regs->isr);
+ if (!(isr & priv->ier))
+ return IRQ_NONE;
+
+ if (isr & RCAR_CAN_ISR_ERSF)
+ rcar_can_error(ndev);
+
+ if (isr & RCAR_CAN_ISR_TXFF)
+ rcar_can_tx_done(ndev);
+
+ if (isr & RCAR_CAN_ISR_RXFF) {
+ if (napi_schedule_prep(&priv->napi)) {
+ /* Disable Rx FIFO interrupts */
+ priv->ier &= ~RCAR_CAN_IER_RXFIE;
+ writeb(priv->ier, &priv->regs->ier);
+ __napi_schedule(&priv->napi);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void rcar_can_set_bittiming(struct net_device *dev)
+{
+ struct rcar_can_priv *priv = netdev_priv(dev);
+ struct can_bittiming *bt = &priv->can.bittiming;
+ u32 bcr;
+
+ bcr = RCAR_CAN_BCR_TSEG1(bt->phase_seg1 + bt->prop_seg - 1) |
+ RCAR_CAN_BCR_BPR(bt->brp - 1) | RCAR_CAN_BCR_SJW(bt->sjw - 1) |
+ RCAR_CAN_BCR_TSEG2(bt->phase_seg2 - 1);
+ /* Don't overwrite CLKR with 32-bit BCR access; CLKR has 8-bit access.
+ * All the registers are big-endian but they get byte-swapped on 32-bit
+ * read/write (but not on 8-bit, contrary to the manuals)...
+ */
+ writel((bcr << 8) | priv->clock_select, &priv->regs->bcr);
+}
+
+static void rcar_can_start(struct net_device *ndev)
+{
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+ u16 ctlr;
+ int i;
+
+ /* Set controller to known mode:
+ * - FIFO mailbox mode
+ * - accept all messages
+ * - overrun mode
+ * CAN is in sleep mode after MCU hardware or software reset.
+ */
+ ctlr = readw(&priv->regs->ctlr);
+ ctlr &= ~RCAR_CAN_CTLR_SLPM;
+ writew(ctlr, &priv->regs->ctlr);
+ /* Go to reset mode */
+ ctlr |= RCAR_CAN_CTLR_CANM_FORCE_RESET;
+ writew(ctlr, &priv->regs->ctlr);
+ for (i = 0; i < MAX_STR_READS; i++) {
+ if (readw(&priv->regs->str) & RCAR_CAN_STR_RSTST)
+ break;
+ }
+ rcar_can_set_bittiming(ndev);
+ ctlr |= RCAR_CAN_CTLR_IDFM_MIXED; /* Select mixed ID mode */
+ ctlr |= RCAR_CAN_CTLR_BOM_ENT; /* Entry to halt mode automatically */
+ /* at bus-off */
+ ctlr |= RCAR_CAN_CTLR_MBM; /* Select FIFO mailbox mode */
+ ctlr |= RCAR_CAN_CTLR_MLM; /* Overrun mode */
+ writew(ctlr, &priv->regs->ctlr);
+
+ /* Accept all SID and EID */
+ writel(0, &priv->regs->mkr_2_9[6]);
+ writel(0, &priv->regs->mkr_2_9[7]);
+ /* In FIFO mailbox mode, write "0" to bits 24 to 31 */
+ writel(0, &priv->regs->mkivlr1);
+ /* Accept all frames */
+ writel(0, &priv->regs->fidcr[0]);
+ writel(RCAR_CAN_FIDCR_IDE | RCAR_CAN_FIDCR_RTR, &priv->regs->fidcr[1]);
+ /* Enable and configure FIFO mailbox interrupts */
+ writel(RCAR_CAN_MIER1_RXFIE | RCAR_CAN_MIER1_TXFIE, &priv->regs->mier1);
+
+ priv->ier = RCAR_CAN_IER_ERSIE | RCAR_CAN_IER_RXFIE |
+ RCAR_CAN_IER_TXFIE;
+ writeb(priv->ier, &priv->regs->ier);
+
+ /* Accumulate error codes */
+ writeb(RCAR_CAN_ECSR_EDPM, &priv->regs->ecsr);
+ /* Enable error interrupts */
+ writeb(RCAR_CAN_EIER_EWIE | RCAR_CAN_EIER_EPIE | RCAR_CAN_EIER_BOEIE |
+ (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING ?
+ RCAR_CAN_EIER_BEIE : 0) | RCAR_CAN_EIER_ORIE |
+ RCAR_CAN_EIER_OLIE, &priv->regs->eier);
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ /* Go to operation mode */
+ writew(ctlr & ~RCAR_CAN_CTLR_CANM, &priv->regs->ctlr);
+ for (i = 0; i < MAX_STR_READS; i++) {
+ if (!(readw(&priv->regs->str) & RCAR_CAN_STR_RSTST))
+ break;
+ }
+ /* Enable Rx and Tx FIFO */
+ writeb(RCAR_CAN_RFCR_RFE, &priv->regs->rfcr);
+ writeb(RCAR_CAN_TFCR_TFE, &priv->regs->tfcr);
+}
+
+static int rcar_can_open(struct net_device *ndev)
+{
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+ int err;
+
+ err = clk_prepare_enable(priv->clk);
+ if (err) {
+ netdev_err(ndev,
+ "failed to enable peripheral clock, error %d\n",
+ err);
+ goto out;
+ }
+ err = clk_prepare_enable(priv->can_clk);
+ if (err) {
+ netdev_err(ndev, "failed to enable CAN clock, error %d\n",
+ err);
+ goto out_clock;
+ }
+ err = open_candev(ndev);
+ if (err) {
+ netdev_err(ndev, "open_candev() failed, error %d\n", err);
+ goto out_can_clock;
+ }
+ napi_enable(&priv->napi);
+ err = request_irq(ndev->irq, rcar_can_interrupt, 0, ndev->name, ndev);
+ if (err) {
+ netdev_err(ndev, "request_irq(%d) failed, error %d\n",
+ ndev->irq, err);
+ goto out_close;
+ }
+ can_led_event(ndev, CAN_LED_EVENT_OPEN);
+ rcar_can_start(ndev);
+ netif_start_queue(ndev);
+ return 0;
+out_close:
+ napi_disable(&priv->napi);
+ close_candev(ndev);
+out_can_clock:
+ clk_disable_unprepare(priv->can_clk);
+out_clock:
+ clk_disable_unprepare(priv->clk);
+out:
+ return err;
+}
+
+static void rcar_can_stop(struct net_device *ndev)
+{
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+ u16 ctlr;
+ int i;
+
+ /* Go to (force) reset mode */
+ ctlr = readw(&priv->regs->ctlr);
+ ctlr |= RCAR_CAN_CTLR_CANM_FORCE_RESET;
+ writew(ctlr, &priv->regs->ctlr);
+ for (i = 0; i < MAX_STR_READS; i++) {
+ if (readw(&priv->regs->str) & RCAR_CAN_STR_RSTST)
+ break;
+ }
+ writel(0, &priv->regs->mier0);
+ writel(0, &priv->regs->mier1);
+ writeb(0, &priv->regs->ier);
+ writeb(0, &priv->regs->eier);
+ /* Go to sleep mode */
+ ctlr |= RCAR_CAN_CTLR_SLPM;
+ writew(ctlr, &priv->regs->ctlr);
+ priv->can.state = CAN_STATE_STOPPED;
+}
+
+static int rcar_can_close(struct net_device *ndev)
+{
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+ rcar_can_stop(ndev);
+ free_irq(ndev->irq, ndev);
+ napi_disable(&priv->napi);
+ clk_disable_unprepare(priv->can_clk);
+ clk_disable_unprepare(priv->clk);
+ close_candev(ndev);
+ can_led_event(ndev, CAN_LED_EVENT_STOP);
+ return 0;
+}
+
+static netdev_tx_t rcar_can_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ u32 data, i;
+
+ if (can_dropped_invalid_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
+ if (cf->can_id & CAN_EFF_FLAG) /* Extended frame format */
+ data = (cf->can_id & CAN_EFF_MASK) | RCAR_CAN_IDE;
+ else /* Standard frame format */
+ data = (cf->can_id & CAN_SFF_MASK) << RCAR_CAN_SID_SHIFT;
+
+ if (cf->can_id & CAN_RTR_FLAG) { /* Remote transmission request */
+ data |= RCAR_CAN_RTR;
+ } else {
+ for (i = 0; i < cf->can_dlc; i++)
+ writeb(cf->data[i],
+ &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].data[i]);
+ }
+
+ writel(data, &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].id);
+
+ writeb(cf->can_dlc, &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].dlc);
+
+ priv->tx_dlc[priv->tx_head % RCAR_CAN_FIFO_DEPTH] = cf->can_dlc;
+ can_put_echo_skb(skb, ndev, priv->tx_head % RCAR_CAN_FIFO_DEPTH);
+ priv->tx_head++;
+ /* Start Tx: write 0xff to the TFPCR register to increment
+ * the CPU-side pointer for the transmit FIFO to the next
+ * mailbox location
+ */
+ writeb(0xff, &priv->regs->tfpcr);
+ /* Stop the queue if we've filled all FIFO entries */
+ if (priv->tx_head - priv->tx_tail >= RCAR_CAN_FIFO_DEPTH)
+ netif_stop_queue(ndev);
+
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops rcar_can_netdev_ops = {
+ .ndo_open = rcar_can_open,
+ .ndo_stop = rcar_can_close,
+ .ndo_start_xmit = rcar_can_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
+};
+
+static void rcar_can_rx_pkt(struct rcar_can_priv *priv)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ u32 data;
+ u8 dlc;
+
+ skb = alloc_can_skb(priv->ndev, &cf);
+ if (!skb) {
+ stats->rx_dropped++;
+ return;
+ }
+
+ data = readl(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].id);
+ if (data & RCAR_CAN_IDE)
+ cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG;
+ else
+ cf->can_id = (data >> RCAR_CAN_SID_SHIFT) & CAN_SFF_MASK;
+
+ dlc = readb(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].dlc);
+ cf->can_dlc = get_can_dlc(dlc);
+ if (data & RCAR_CAN_RTR) {
+ cf->can_id |= CAN_RTR_FLAG;
+ } else {
+ for (dlc = 0; dlc < cf->can_dlc; dlc++)
+ cf->data[dlc] =
+ readb(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].data[dlc]);
+ }
+
+ can_led_event(priv->ndev, CAN_LED_EVENT_RX);
+
+ stats->rx_bytes += cf->can_dlc;
+ stats->rx_packets++;
+ netif_receive_skb(skb);
+}
+
+static int rcar_can_rx_poll(struct napi_struct *napi, int quota)
+{
+ struct rcar_can_priv *priv = container_of(napi,
+ struct rcar_can_priv, napi);
+ int num_pkts;
+
+ for (num_pkts = 0; num_pkts < quota; num_pkts++) {
+ u8 rfcr, isr;
+
+ isr = readb(&priv->regs->isr);
+ /* Clear interrupt bit */
+ if (isr & RCAR_CAN_ISR_RXFF)
+ writeb(isr & ~RCAR_CAN_ISR_RXFF, &priv->regs->isr);
+ rfcr = readb(&priv->regs->rfcr);
+ if (rfcr & RCAR_CAN_RFCR_RFEST)
+ break;
+ rcar_can_rx_pkt(priv);
+ /* Write 0xff to the RFPCR register to increment
+ * the CPU-side pointer for the receive FIFO
+ * to the next mailbox location
+ */
+ writeb(0xff, &priv->regs->rfpcr);
+ }
+ /* All packets processed */
+ if (num_pkts < quota) {
+ napi_complete(napi);
+ priv->ier |= RCAR_CAN_IER_RXFIE;
+ writeb(priv->ier, &priv->regs->ier);
+ }
+ return num_pkts;
+}
+
+static int rcar_can_do_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+ switch (mode) {
+ case CAN_MODE_START:
+ rcar_can_start(ndev);
+ netif_wake_queue(ndev);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int rcar_can_get_berr_counter(const struct net_device *dev,
+ struct can_berr_counter *bec)
+{
+ struct rcar_can_priv *priv = netdev_priv(dev);
+ int err;
+
+ err = clk_prepare_enable(priv->clk);
+ if (err)
+ return err;
+ bec->txerr = readb(&priv->regs->tecr);
+ bec->rxerr = readb(&priv->regs->recr);
+ clk_disable_unprepare(priv->clk);
+ return 0;
+}
+
+static const char * const clock_names[] = {
+ [CLKR_CLKP1] = "clkp1",
+ [CLKR_CLKP2] = "clkp2",
+ [CLKR_CLKEXT] = "can_clk",
+};
+
+static int rcar_can_probe(struct platform_device *pdev)
+{
+ struct rcar_can_platform_data *pdata;
+ struct rcar_can_priv *priv;
+ struct net_device *ndev;
+ struct resource *mem;
+ void __iomem *addr;
+ u32 clock_select = CLKR_CLKP1;
+ int err = -ENODEV;
+ int irq;
+
+ if (pdev->dev.of_node) {
+ of_property_read_u32(pdev->dev.of_node,
+ "renesas,can-clock-select", &clock_select);
+ } else {
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platform data provided!\n");
+ goto fail;
+ }
+ clock_select = pdata->clock_select;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "No IRQ resource\n");
+ err = irq;
+ goto fail;
+ }
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ addr = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(addr)) {
+ err = PTR_ERR(addr);
+ goto fail;
+ }
+
+ ndev = alloc_candev(sizeof(struct rcar_can_priv), RCAR_CAN_FIFO_DEPTH);
+ if (!ndev) {
+ dev_err(&pdev->dev, "alloc_candev() failed\n");
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ priv = netdev_priv(ndev);
+
+ priv->clk = devm_clk_get(&pdev->dev, "clkp1");
+ if (IS_ERR(priv->clk)) {
+ err = PTR_ERR(priv->clk);
+ dev_err(&pdev->dev, "cannot get peripheral clock, error %d\n",
+ err);
+ goto fail_clk;
+ }
+
+ if (clock_select >= ARRAY_SIZE(clock_names)) {
+ err = -EINVAL;
+ dev_err(&pdev->dev, "invalid CAN clock selected\n");
+ goto fail_clk;
+ }
+ priv->can_clk = devm_clk_get(&pdev->dev, clock_names[clock_select]);
+ if (IS_ERR(priv->can_clk)) {
+ err = PTR_ERR(priv->can_clk);
+ dev_err(&pdev->dev, "cannot get CAN clock, error %d\n", err);
+ goto fail_clk;
+ }
+
+ ndev->netdev_ops = &rcar_can_netdev_ops;
+ ndev->irq = irq;
+ ndev->flags |= IFF_ECHO;
+ priv->ndev = ndev;
+ priv->regs = addr;
+ priv->clock_select = clock_select;
+ priv->can.clock.freq = clk_get_rate(priv->can_clk);
+ priv->can.bittiming_const = &rcar_can_bittiming_const;
+ priv->can.do_set_mode = rcar_can_do_set_mode;
+ priv->can.do_get_berr_counter = rcar_can_get_berr_counter;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING;
+ platform_set_drvdata(pdev, ndev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ netif_napi_add(ndev, &priv->napi, rcar_can_rx_poll,
+ RCAR_CAN_NAPI_WEIGHT);
+ err = register_candev(ndev);
+ if (err) {
+ dev_err(&pdev->dev, "register_candev() failed, error %d\n",
+ err);
+ goto fail_candev;
+ }
+
+ devm_can_led_init(ndev);
+
+ dev_info(&pdev->dev, "device registered (regs @ %p, IRQ%d)\n",
+ priv->regs, ndev->irq);
+
+ return 0;
+fail_candev:
+ netif_napi_del(&priv->napi);
+fail_clk:
+ free_candev(ndev);
+fail:
+ return err;
+}
+
+static int rcar_can_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+
+ unregister_candev(ndev);
+ netif_napi_del(&priv->napi);
+ free_candev(ndev);
+ return 0;
+}
+
+static int __maybe_unused rcar_can_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+ u16 ctlr;
+
+ if (netif_running(ndev)) {
+ netif_stop_queue(ndev);
+ netif_device_detach(ndev);
+ }
+ ctlr = readw(&priv->regs->ctlr);
+ ctlr |= RCAR_CAN_CTLR_CANM_HALT;
+ writew(ctlr, &priv->regs->ctlr);
+ ctlr |= RCAR_CAN_CTLR_SLPM;
+ writew(ctlr, &priv->regs->ctlr);
+ priv->can.state = CAN_STATE_SLEEPING;
+
+ clk_disable(priv->clk);
+ return 0;
+}
+
+static int __maybe_unused rcar_can_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+ u16 ctlr;
+ int err;
+
+ err = clk_enable(priv->clk);
+ if (err) {
+ netdev_err(ndev, "clk_enable() failed, error %d\n", err);
+ return err;
+ }
+
+ ctlr = readw(&priv->regs->ctlr);
+ ctlr &= ~RCAR_CAN_CTLR_SLPM;
+ writew(ctlr, &priv->regs->ctlr);
+ ctlr &= ~RCAR_CAN_CTLR_CANM;
+ writew(ctlr, &priv->regs->ctlr);
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ if (netif_running(ndev)) {
+ netif_device_attach(ndev);
+ netif_start_queue(ndev);
+ }
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(rcar_can_pm_ops, rcar_can_suspend, rcar_can_resume);
+
+static const struct of_device_id rcar_can_of_table[] __maybe_unused = {
+ { .compatible = "renesas,can-r8a7778" },
+ { .compatible = "renesas,can-r8a7779" },
+ { .compatible = "renesas,can-r8a7790" },
+ { .compatible = "renesas,can-r8a7791" },
+ { .compatible = "renesas,rcar-gen1-can" },
+ { .compatible = "renesas,rcar-gen2-can" },
+ { .compatible = "renesas,rcar-gen3-can" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, rcar_can_of_table);
+
+static struct platform_driver rcar_can_driver = {
+ .driver = {
+ .name = RCAR_CAN_DRV_NAME,
+ .of_match_table = of_match_ptr(rcar_can_of_table),
+ .pm = &rcar_can_pm_ops,
+ },
+ .probe = rcar_can_probe,
+ .remove = rcar_can_remove,
+};
+
+module_platform_driver(rcar_can_driver);
+
+MODULE_AUTHOR("Cogent Embedded, Inc.");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("CAN driver for Renesas R-Car SoC");
+MODULE_ALIAS("platform:" RCAR_CAN_DRV_NAME);
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
new file mode 100644
index 000000000..43cdd5544
--- /dev/null
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -0,0 +1,1858 @@
+/* Renesas R-Car CAN FD device driver
+ *
+ * Copyright (C) 2015 Renesas Electronics Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/* The R-Car CAN FD controller can operate in either one of the below two modes
+ * - CAN FD only mode
+ * - Classical CAN (CAN 2.0) only mode
+ *
+ * This driver puts the controller in CAN FD only mode by default. In this
+ * mode, the controller acts as a CAN FD node that can also interoperate with
+ * CAN 2.0 nodes.
+ *
+ * To switch the controller to Classical CAN (CAN 2.0) only mode, add
+ * "renesas,no-can-fd" optional property to the device tree node. A h/w reset is
+ * also required to switch modes.
+ *
+ * Note: The h/w manual register naming convention is clumsy and not acceptable
+ * to use as it is in the driver. However, those names are added as comments
+ * wherever it is modified to a readable name.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/can/led.h>
+#include <linux/can/dev.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/iopoll.h>
+
+#define RCANFD_DRV_NAME "rcar_canfd"
+
+/* Global register bits */
+
+/* RSCFDnCFDGRMCFG */
+#define RCANFD_GRMCFG_RCMC BIT(0)
+
+/* RSCFDnCFDGCFG / RSCFDnGCFG */
+#define RCANFD_GCFG_EEFE BIT(6)
+#define RCANFD_GCFG_CMPOC BIT(5) /* CAN FD only */
+#define RCANFD_GCFG_DCS BIT(4)
+#define RCANFD_GCFG_DCE BIT(1)
+#define RCANFD_GCFG_TPRI BIT(0)
+
+/* RSCFDnCFDGCTR / RSCFDnGCTR */
+#define RCANFD_GCTR_TSRST BIT(16)
+#define RCANFD_GCTR_CFMPOFIE BIT(11) /* CAN FD only */
+#define RCANFD_GCTR_THLEIE BIT(10)
+#define RCANFD_GCTR_MEIE BIT(9)
+#define RCANFD_GCTR_DEIE BIT(8)
+#define RCANFD_GCTR_GSLPR BIT(2)
+#define RCANFD_GCTR_GMDC_MASK (0x3)
+#define RCANFD_GCTR_GMDC_GOPM (0x0)
+#define RCANFD_GCTR_GMDC_GRESET (0x1)
+#define RCANFD_GCTR_GMDC_GTEST (0x2)
+
+/* RSCFDnCFDGSTS / RSCFDnGSTS */
+#define RCANFD_GSTS_GRAMINIT BIT(3)
+#define RCANFD_GSTS_GSLPSTS BIT(2)
+#define RCANFD_GSTS_GHLTSTS BIT(1)
+#define RCANFD_GSTS_GRSTSTS BIT(0)
+/* Non-operational status */
+#define RCANFD_GSTS_GNOPM (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+
+/* RSCFDnCFDGERFL / RSCFDnGERFL */
+#define RCANFD_GERFL_EEF1 BIT(17)
+#define RCANFD_GERFL_EEF0 BIT(16)
+#define RCANFD_GERFL_CMPOF BIT(3) /* CAN FD only */
+#define RCANFD_GERFL_THLES BIT(2)
+#define RCANFD_GERFL_MES BIT(1)
+#define RCANFD_GERFL_DEF BIT(0)
+
+#define RCANFD_GERFL_ERR(gpriv, x) ((x) & (RCANFD_GERFL_EEF1 |\
+ RCANFD_GERFL_EEF0 | RCANFD_GERFL_MES |\
+ (gpriv->fdmode ?\
+ RCANFD_GERFL_CMPOF : 0)))
+
+/* AFL Rx rules registers */
+
+/* RSCFDnCFDGAFLCFG0 / RSCFDnGAFLCFG0 */
+#define RCANFD_GAFLCFG_SETRNC(n, x) (((x) & 0xff) << (24 - n * 8))
+#define RCANFD_GAFLCFG_GETRNC(n, x) (((x) >> (24 - n * 8)) & 0xff)
+
+/* RSCFDnCFDGAFLECTR / RSCFDnGAFLECTR */
+#define RCANFD_GAFLECTR_AFLDAE BIT(8)
+#define RCANFD_GAFLECTR_AFLPN(x) ((x) & 0x1f)
+
+/* RSCFDnCFDGAFLIDj / RSCFDnGAFLIDj */
+#define RCANFD_GAFLID_GAFLLB BIT(29)
+
+/* RSCFDnCFDGAFLP1_j / RSCFDnGAFLP1_j */
+#define RCANFD_GAFLP1_GAFLFDP(x) (1 << (x))
+
+/* Channel register bits */
+
+/* RSCFDnCmCFG - Classical CAN only */
+#define RCANFD_CFG_SJW(x) (((x) & 0x3) << 24)
+#define RCANFD_CFG_TSEG2(x) (((x) & 0x7) << 20)
+#define RCANFD_CFG_TSEG1(x) (((x) & 0xf) << 16)
+#define RCANFD_CFG_BRP(x) (((x) & 0x3ff) << 0)
+
+/* RSCFDnCFDCmNCFG - CAN FD only */
+#define RCANFD_NCFG_NTSEG2(x) (((x) & 0x1f) << 24)
+#define RCANFD_NCFG_NTSEG1(x) (((x) & 0x7f) << 16)
+#define RCANFD_NCFG_NSJW(x) (((x) & 0x1f) << 11)
+#define RCANFD_NCFG_NBRP(x) (((x) & 0x3ff) << 0)
+
+/* RSCFDnCFDCmCTR / RSCFDnCmCTR */
+#define RCANFD_CCTR_CTME BIT(24)
+#define RCANFD_CCTR_ERRD BIT(23)
+#define RCANFD_CCTR_BOM_MASK (0x3 << 21)
+#define RCANFD_CCTR_BOM_ISO (0x0 << 21)
+#define RCANFD_CCTR_BOM_BENTRY (0x1 << 21)
+#define RCANFD_CCTR_BOM_BEND (0x2 << 21)
+#define RCANFD_CCTR_TDCVFIE BIT(19)
+#define RCANFD_CCTR_SOCOIE BIT(18)
+#define RCANFD_CCTR_EOCOIE BIT(17)
+#define RCANFD_CCTR_TAIE BIT(16)
+#define RCANFD_CCTR_ALIE BIT(15)
+#define RCANFD_CCTR_BLIE BIT(14)
+#define RCANFD_CCTR_OLIE BIT(13)
+#define RCANFD_CCTR_BORIE BIT(12)
+#define RCANFD_CCTR_BOEIE BIT(11)
+#define RCANFD_CCTR_EPIE BIT(10)
+#define RCANFD_CCTR_EWIE BIT(9)
+#define RCANFD_CCTR_BEIE BIT(8)
+#define RCANFD_CCTR_CSLPR BIT(2)
+#define RCANFD_CCTR_CHMDC_MASK (0x3)
+#define RCANFD_CCTR_CHDMC_COPM (0x0)
+#define RCANFD_CCTR_CHDMC_CRESET (0x1)
+#define RCANFD_CCTR_CHDMC_CHLT (0x2)
+
+/* RSCFDnCFDCmSTS / RSCFDnCmSTS */
+#define RCANFD_CSTS_COMSTS BIT(7)
+#define RCANFD_CSTS_RECSTS BIT(6)
+#define RCANFD_CSTS_TRMSTS BIT(5)
+#define RCANFD_CSTS_BOSTS BIT(4)
+#define RCANFD_CSTS_EPSTS BIT(3)
+#define RCANFD_CSTS_SLPSTS BIT(2)
+#define RCANFD_CSTS_HLTSTS BIT(1)
+#define RCANFD_CSTS_CRSTSTS BIT(0)
+
+#define RCANFD_CSTS_TECCNT(x) (((x) >> 24) & 0xff)
+#define RCANFD_CSTS_RECCNT(x) (((x) >> 16) & 0xff)
+
+/* RSCFDnCFDCmERFL / RSCFDnCmERFL */
+#define RCANFD_CERFL_ADERR BIT(14)
+#define RCANFD_CERFL_B0ERR BIT(13)
+#define RCANFD_CERFL_B1ERR BIT(12)
+#define RCANFD_CERFL_CERR BIT(11)
+#define RCANFD_CERFL_AERR BIT(10)
+#define RCANFD_CERFL_FERR BIT(9)
+#define RCANFD_CERFL_SERR BIT(8)
+#define RCANFD_CERFL_ALF BIT(7)
+#define RCANFD_CERFL_BLF BIT(6)
+#define RCANFD_CERFL_OVLF BIT(5)
+#define RCANFD_CERFL_BORF BIT(4)
+#define RCANFD_CERFL_BOEF BIT(3)
+#define RCANFD_CERFL_EPF BIT(2)
+#define RCANFD_CERFL_EWF BIT(1)
+#define RCANFD_CERFL_BEF BIT(0)
+
+#define RCANFD_CERFL_ERR(x) ((x) & (0x7fff)) /* above bits 14:0 */
+
+/* RSCFDnCFDCmDCFG */
+#define RCANFD_DCFG_DSJW(x) (((x) & 0x7) << 24)
+#define RCANFD_DCFG_DTSEG2(x) (((x) & 0x7) << 20)
+#define RCANFD_DCFG_DTSEG1(x) (((x) & 0xf) << 16)
+#define RCANFD_DCFG_DBRP(x) (((x) & 0xff) << 0)
+
+/* RSCFDnCFDCmFDCFG */
+#define RCANFD_FDCFG_TDCE BIT(9)
+#define RCANFD_FDCFG_TDCOC BIT(8)
+#define RCANFD_FDCFG_TDCO(x) (((x) & 0x7f) >> 16)
+
+/* RSCFDnCFDRFCCx */
+#define RCANFD_RFCC_RFIM BIT(12)
+#define RCANFD_RFCC_RFDC(x) (((x) & 0x7) << 8)
+#define RCANFD_RFCC_RFPLS(x) (((x) & 0x7) << 4)
+#define RCANFD_RFCC_RFIE BIT(1)
+#define RCANFD_RFCC_RFE BIT(0)
+
+/* RSCFDnCFDRFSTSx */
+#define RCANFD_RFSTS_RFIF BIT(3)
+#define RCANFD_RFSTS_RFMLT BIT(2)
+#define RCANFD_RFSTS_RFFLL BIT(1)
+#define RCANFD_RFSTS_RFEMP BIT(0)
+
+/* RSCFDnCFDRFIDx */
+#define RCANFD_RFID_RFIDE BIT(31)
+#define RCANFD_RFID_RFRTR BIT(30)
+
+/* RSCFDnCFDRFPTRx */
+#define RCANFD_RFPTR_RFDLC(x) (((x) >> 28) & 0xf)
+#define RCANFD_RFPTR_RFPTR(x) (((x) >> 16) & 0xfff)
+#define RCANFD_RFPTR_RFTS(x) (((x) >> 0) & 0xffff)
+
+/* RSCFDnCFDRFFDSTSx */
+#define RCANFD_RFFDSTS_RFFDF BIT(2)
+#define RCANFD_RFFDSTS_RFBRS BIT(1)
+#define RCANFD_RFFDSTS_RFESI BIT(0)
+
+/* Common FIFO bits */
+
+/* RSCFDnCFDCFCCk */
+#define RCANFD_CFCC_CFTML(x) (((x) & 0xf) << 20)
+#define RCANFD_CFCC_CFM(x) (((x) & 0x3) << 16)
+#define RCANFD_CFCC_CFIM BIT(12)
+#define RCANFD_CFCC_CFDC(x) (((x) & 0x7) << 8)
+#define RCANFD_CFCC_CFPLS(x) (((x) & 0x7) << 4)
+#define RCANFD_CFCC_CFTXIE BIT(2)
+#define RCANFD_CFCC_CFE BIT(0)
+
+/* RSCFDnCFDCFSTSk */
+#define RCANFD_CFSTS_CFMC(x) (((x) >> 8) & 0xff)
+#define RCANFD_CFSTS_CFTXIF BIT(4)
+#define RCANFD_CFSTS_CFMLT BIT(2)
+#define RCANFD_CFSTS_CFFLL BIT(1)
+#define RCANFD_CFSTS_CFEMP BIT(0)
+
+/* RSCFDnCFDCFIDk */
+#define RCANFD_CFID_CFIDE BIT(31)
+#define RCANFD_CFID_CFRTR BIT(30)
+#define RCANFD_CFID_CFID_MASK(x) ((x) & 0x1fffffff)
+
+/* RSCFDnCFDCFPTRk */
+#define RCANFD_CFPTR_CFDLC(x) (((x) & 0xf) << 28)
+#define RCANFD_CFPTR_CFPTR(x) (((x) & 0xfff) << 16)
+#define RCANFD_CFPTR_CFTS(x) (((x) & 0xff) << 0)
+
+/* RSCFDnCFDCFFDCSTSk */
+#define RCANFD_CFFDCSTS_CFFDF BIT(2)
+#define RCANFD_CFFDCSTS_CFBRS BIT(1)
+#define RCANFD_CFFDCSTS_CFESI BIT(0)
+
+/* This controller supports either Classical CAN only mode or CAN FD only mode.
+ * These modes are supported in two separate set of register maps & names.
+ * However, some of the register offsets are common for both modes. Those
+ * offsets are listed below as Common registers.
+ *
+ * The CAN FD only mode specific registers & Classical CAN only mode specific
+ * registers are listed separately. Their register names starts with
+ * RCANFD_F_xxx & RCANFD_C_xxx respectively.
+ */
+
+/* Common registers */
+
+/* RSCFDnCFDCmNCFG / RSCFDnCmCFG */
+#define RCANFD_CCFG(m) (0x0000 + (0x10 * (m)))
+/* RSCFDnCFDCmCTR / RSCFDnCmCTR */
+#define RCANFD_CCTR(m) (0x0004 + (0x10 * (m)))
+/* RSCFDnCFDCmSTS / RSCFDnCmSTS */
+#define RCANFD_CSTS(m) (0x0008 + (0x10 * (m)))
+/* RSCFDnCFDCmERFL / RSCFDnCmERFL */
+#define RCANFD_CERFL(m) (0x000C + (0x10 * (m)))
+
+/* RSCFDnCFDGCFG / RSCFDnGCFG */
+#define RCANFD_GCFG (0x0084)
+/* RSCFDnCFDGCTR / RSCFDnGCTR */
+#define RCANFD_GCTR (0x0088)
+/* RSCFDnCFDGCTS / RSCFDnGCTS */
+#define RCANFD_GSTS (0x008c)
+/* RSCFDnCFDGERFL / RSCFDnGERFL */
+#define RCANFD_GERFL (0x0090)
+/* RSCFDnCFDGTSC / RSCFDnGTSC */
+#define RCANFD_GTSC (0x0094)
+/* RSCFDnCFDGAFLECTR / RSCFDnGAFLECTR */
+#define RCANFD_GAFLECTR (0x0098)
+/* RSCFDnCFDGAFLCFG0 / RSCFDnGAFLCFG0 */
+#define RCANFD_GAFLCFG0 (0x009c)
+/* RSCFDnCFDGAFLCFG1 / RSCFDnGAFLCFG1 */
+#define RCANFD_GAFLCFG1 (0x00a0)
+/* RSCFDnCFDRMNB / RSCFDnRMNB */
+#define RCANFD_RMNB (0x00a4)
+/* RSCFDnCFDRMND / RSCFDnRMND */
+#define RCANFD_RMND(y) (0x00a8 + (0x04 * (y)))
+
+/* RSCFDnCFDRFCCx / RSCFDnRFCCx */
+#define RCANFD_RFCC(x) (0x00b8 + (0x04 * (x)))
+/* RSCFDnCFDRFSTSx / RSCFDnRFSTSx */
+#define RCANFD_RFSTS(x) (0x00d8 + (0x04 * (x)))
+/* RSCFDnCFDRFPCTRx / RSCFDnRFPCTRx */
+#define RCANFD_RFPCTR(x) (0x00f8 + (0x04 * (x)))
+
+/* Common FIFO Control registers */
+
+/* RSCFDnCFDCFCCx / RSCFDnCFCCx */
+#define RCANFD_CFCC(ch, idx) (0x0118 + (0x0c * (ch)) + \
+ (0x04 * (idx)))
+/* RSCFDnCFDCFSTSx / RSCFDnCFSTSx */
+#define RCANFD_CFSTS(ch, idx) (0x0178 + (0x0c * (ch)) + \
+ (0x04 * (idx)))
+/* RSCFDnCFDCFPCTRx / RSCFDnCFPCTRx */
+#define RCANFD_CFPCTR(ch, idx) (0x01d8 + (0x0c * (ch)) + \
+ (0x04 * (idx)))
+
+/* RSCFDnCFDFESTS / RSCFDnFESTS */
+#define RCANFD_FESTS (0x0238)
+/* RSCFDnCFDFFSTS / RSCFDnFFSTS */
+#define RCANFD_FFSTS (0x023c)
+/* RSCFDnCFDFMSTS / RSCFDnFMSTS */
+#define RCANFD_FMSTS (0x0240)
+/* RSCFDnCFDRFISTS / RSCFDnRFISTS */
+#define RCANFD_RFISTS (0x0244)
+/* RSCFDnCFDCFRISTS / RSCFDnCFRISTS */
+#define RCANFD_CFRISTS (0x0248)
+/* RSCFDnCFDCFTISTS / RSCFDnCFTISTS */
+#define RCANFD_CFTISTS (0x024c)
+
+/* RSCFDnCFDTMCp / RSCFDnTMCp */
+#define RCANFD_TMC(p) (0x0250 + (0x01 * (p)))
+/* RSCFDnCFDTMSTSp / RSCFDnTMSTSp */
+#define RCANFD_TMSTS(p) (0x02d0 + (0x01 * (p)))
+
+/* RSCFDnCFDTMTRSTSp / RSCFDnTMTRSTSp */
+#define RCANFD_TMTRSTS(y) (0x0350 + (0x04 * (y)))
+/* RSCFDnCFDTMTARSTSp / RSCFDnTMTARSTSp */
+#define RCANFD_TMTARSTS(y) (0x0360 + (0x04 * (y)))
+/* RSCFDnCFDTMTCSTSp / RSCFDnTMTCSTSp */
+#define RCANFD_TMTCSTS(y) (0x0370 + (0x04 * (y)))
+/* RSCFDnCFDTMTASTSp / RSCFDnTMTASTSp */
+#define RCANFD_TMTASTS(y) (0x0380 + (0x04 * (y)))
+/* RSCFDnCFDTMIECy / RSCFDnTMIECy */
+#define RCANFD_TMIEC(y) (0x0390 + (0x04 * (y)))
+
+/* RSCFDnCFDTXQCCm / RSCFDnTXQCCm */
+#define RCANFD_TXQCC(m) (0x03a0 + (0x04 * (m)))
+/* RSCFDnCFDTXQSTSm / RSCFDnTXQSTSm */
+#define RCANFD_TXQSTS(m) (0x03c0 + (0x04 * (m)))
+/* RSCFDnCFDTXQPCTRm / RSCFDnTXQPCTRm */
+#define RCANFD_TXQPCTR(m) (0x03e0 + (0x04 * (m)))
+
+/* RSCFDnCFDTHLCCm / RSCFDnTHLCCm */
+#define RCANFD_THLCC(m) (0x0400 + (0x04 * (m)))
+/* RSCFDnCFDTHLSTSm / RSCFDnTHLSTSm */
+#define RCANFD_THLSTS(m) (0x0420 + (0x04 * (m)))
+/* RSCFDnCFDTHLPCTRm / RSCFDnTHLPCTRm */
+#define RCANFD_THLPCTR(m) (0x0440 + (0x04 * (m)))
+
+/* RSCFDnCFDGTINTSTS0 / RSCFDnGTINTSTS0 */
+#define RCANFD_GTINTSTS0 (0x0460)
+/* RSCFDnCFDGTINTSTS1 / RSCFDnGTINTSTS1 */
+#define RCANFD_GTINTSTS1 (0x0464)
+/* RSCFDnCFDGTSTCFG / RSCFDnGTSTCFG */
+#define RCANFD_GTSTCFG (0x0468)
+/* RSCFDnCFDGTSTCTR / RSCFDnGTSTCTR */
+#define RCANFD_GTSTCTR (0x046c)
+/* RSCFDnCFDGLOCKK / RSCFDnGLOCKK */
+#define RCANFD_GLOCKK (0x047c)
+/* RSCFDnCFDGRMCFG */
+#define RCANFD_GRMCFG (0x04fc)
+
+/* RSCFDnCFDGAFLIDj / RSCFDnGAFLIDj */
+#define RCANFD_GAFLID(offset, j) ((offset) + (0x10 * (j)))
+/* RSCFDnCFDGAFLMj / RSCFDnGAFLMj */
+#define RCANFD_GAFLM(offset, j) ((offset) + 0x04 + (0x10 * (j)))
+/* RSCFDnCFDGAFLP0j / RSCFDnGAFLP0j */
+#define RCANFD_GAFLP0(offset, j) ((offset) + 0x08 + (0x10 * (j)))
+/* RSCFDnCFDGAFLP1j / RSCFDnGAFLP1j */
+#define RCANFD_GAFLP1(offset, j) ((offset) + 0x0c + (0x10 * (j)))
+
+/* Classical CAN only mode register map */
+
+/* RSCFDnGAFLXXXj offset */
+#define RCANFD_C_GAFL_OFFSET (0x0500)
+
+/* RSCFDnRMXXXq -> RCANFD_C_RMXXX(q) */
+#define RCANFD_C_RMID(q) (0x0600 + (0x10 * (q)))
+#define RCANFD_C_RMPTR(q) (0x0604 + (0x10 * (q)))
+#define RCANFD_C_RMDF0(q) (0x0608 + (0x10 * (q)))
+#define RCANFD_C_RMDF1(q) (0x060c + (0x10 * (q)))
+
+/* RSCFDnRFXXx -> RCANFD_C_RFXX(x) */
+#define RCANFD_C_RFOFFSET (0x0e00)
+#define RCANFD_C_RFID(x) (RCANFD_C_RFOFFSET + (0x10 * (x)))
+#define RCANFD_C_RFPTR(x) (RCANFD_C_RFOFFSET + 0x04 + \
+ (0x10 * (x)))
+#define RCANFD_C_RFDF(x, df) (RCANFD_C_RFOFFSET + 0x08 + \
+ (0x10 * (x)) + (0x04 * (df)))
+
+/* RSCFDnCFXXk -> RCANFD_C_CFXX(ch, k) */
+#define RCANFD_C_CFOFFSET (0x0e80)
+#define RCANFD_C_CFID(ch, idx) (RCANFD_C_CFOFFSET + (0x30 * (ch)) + \
+ (0x10 * (idx)))
+#define RCANFD_C_CFPTR(ch, idx) (RCANFD_C_CFOFFSET + 0x04 + \
+ (0x30 * (ch)) + (0x10 * (idx)))
+#define RCANFD_C_CFDF(ch, idx, df) (RCANFD_C_CFOFFSET + 0x08 + \
+ (0x30 * (ch)) + (0x10 * (idx)) + \
+ (0x04 * (df)))
+
+/* RSCFDnTMXXp -> RCANFD_C_TMXX(p) */
+#define RCANFD_C_TMID(p) (0x1000 + (0x10 * (p)))
+#define RCANFD_C_TMPTR(p) (0x1004 + (0x10 * (p)))
+#define RCANFD_C_TMDF0(p) (0x1008 + (0x10 * (p)))
+#define RCANFD_C_TMDF1(p) (0x100c + (0x10 * (p)))
+
+/* RSCFDnTHLACCm */
+#define RCANFD_C_THLACC(m) (0x1800 + (0x04 * (m)))
+/* RSCFDnRPGACCr */
+#define RCANFD_C_RPGACC(r) (0x1900 + (0x04 * (r)))
+
+/* CAN FD mode specific regsiter map */
+
+/* RSCFDnCFDCmXXX -> RCANFD_F_XXX(m) */
+#define RCANFD_F_DCFG(m) (0x0500 + (0x20 * (m)))
+#define RCANFD_F_CFDCFG(m) (0x0504 + (0x20 * (m)))
+#define RCANFD_F_CFDCTR(m) (0x0508 + (0x20 * (m)))
+#define RCANFD_F_CFDSTS(m) (0x050c + (0x20 * (m)))
+#define RCANFD_F_CFDCRC(m) (0x0510 + (0x20 * (m)))
+
+/* RSCFDnCFDGAFLXXXj offset */
+#define RCANFD_F_GAFL_OFFSET (0x1000)
+
+/* RSCFDnCFDRMXXXq -> RCANFD_F_RMXXX(q) */
+#define RCANFD_F_RMID(q) (0x2000 + (0x20 * (q)))
+#define RCANFD_F_RMPTR(q) (0x2004 + (0x20 * (q)))
+#define RCANFD_F_RMFDSTS(q) (0x2008 + (0x20 * (q)))
+#define RCANFD_F_RMDF(q, b) (0x200c + (0x04 * (b)) + (0x20 * (q)))
+
+/* RSCFDnCFDRFXXx -> RCANFD_F_RFXX(x) */
+#define RCANFD_F_RFOFFSET (0x3000)
+#define RCANFD_F_RFID(x) (RCANFD_F_RFOFFSET + (0x80 * (x)))
+#define RCANFD_F_RFPTR(x) (RCANFD_F_RFOFFSET + 0x04 + \
+ (0x80 * (x)))
+#define RCANFD_F_RFFDSTS(x) (RCANFD_F_RFOFFSET + 0x08 + \
+ (0x80 * (x)))
+#define RCANFD_F_RFDF(x, df) (RCANFD_F_RFOFFSET + 0x0c + \
+ (0x80 * (x)) + (0x04 * (df)))
+
+/* RSCFDnCFDCFXXk -> RCANFD_F_CFXX(ch, k) */
+#define RCANFD_F_CFOFFSET (0x3400)
+#define RCANFD_F_CFID(ch, idx) (RCANFD_F_CFOFFSET + (0x180 * (ch)) + \
+ (0x80 * (idx)))
+#define RCANFD_F_CFPTR(ch, idx) (RCANFD_F_CFOFFSET + 0x04 + \
+ (0x180 * (ch)) + (0x80 * (idx)))
+#define RCANFD_F_CFFDCSTS(ch, idx) (RCANFD_F_CFOFFSET + 0x08 + \
+ (0x180 * (ch)) + (0x80 * (idx)))
+#define RCANFD_F_CFDF(ch, idx, df) (RCANFD_F_CFOFFSET + 0x0c + \
+ (0x180 * (ch)) + (0x80 * (idx)) + \
+ (0x04 * (df)))
+
+/* RSCFDnCFDTMXXp -> RCANFD_F_TMXX(p) */
+#define RCANFD_F_TMID(p) (0x4000 + (0x20 * (p)))
+#define RCANFD_F_TMPTR(p) (0x4004 + (0x20 * (p)))
+#define RCANFD_F_TMFDCTR(p) (0x4008 + (0x20 * (p)))
+#define RCANFD_F_TMDF(p, b) (0x400c + (0x20 * (p)) + (0x04 * (b)))
+
+/* RSCFDnCFDTHLACCm */
+#define RCANFD_F_THLACC(m) (0x6000 + (0x04 * (m)))
+/* RSCFDnCFDRPGACCr */
+#define RCANFD_F_RPGACC(r) (0x6400 + (0x04 * (r)))
+
+/* Constants */
+#define RCANFD_FIFO_DEPTH 8 /* Tx FIFO depth */
+#define RCANFD_NAPI_WEIGHT 8 /* Rx poll quota */
+
+#define RCANFD_NUM_CHANNELS 2 /* Two channels max */
+#define RCANFD_CHANNELS_MASK BIT((RCANFD_NUM_CHANNELS) - 1)
+
+#define RCANFD_GAFL_PAGENUM(entry) ((entry) / 16)
+#define RCANFD_CHANNEL_NUMRULES 1 /* only one rule per channel */
+
+/* Rx FIFO is a global resource of the controller. There are 8 such FIFOs
+ * available. Each channel gets a dedicated Rx FIFO (i.e.) the channel
+ * number is added to RFFIFO index.
+ */
+#define RCANFD_RFFIFO_IDX 0
+
+/* Tx/Rx or Common FIFO is a per channel resource. Each channel has 3 Common
+ * FIFOs dedicated to them. Use the first (index 0) FIFO out of the 3 for Tx.
+ */
+#define RCANFD_CFFIFO_IDX 0
+
+/* fCAN clock select register settings */
+enum rcar_canfd_fcanclk {
+ RCANFD_CANFDCLK = 0, /* CANFD clock */
+ RCANFD_EXTCLK, /* Externally input clock */
+};
+
+struct rcar_canfd_global;
+
+/* Channel priv data */
+struct rcar_canfd_channel {
+ struct can_priv can; /* Must be the first member */
+ struct net_device *ndev;
+ struct rcar_canfd_global *gpriv; /* Controller reference */
+ void __iomem *base; /* Register base address */
+ struct napi_struct napi;
+ u8 tx_len[RCANFD_FIFO_DEPTH]; /* For net stats */
+ u32 tx_head; /* Incremented on xmit */
+ u32 tx_tail; /* Incremented on xmit done */
+ u32 channel; /* Channel number */
+ spinlock_t tx_lock; /* To protect tx path */
+};
+
+/* Global priv data */
+struct rcar_canfd_global {
+ struct rcar_canfd_channel *ch[RCANFD_NUM_CHANNELS];
+ void __iomem *base; /* Register base address */
+ struct platform_device *pdev; /* Respective platform device */
+ struct clk *clkp; /* Peripheral clock */
+ struct clk *can_clk; /* fCAN clock */
+ enum rcar_canfd_fcanclk fcan; /* CANFD or Ext clock */
+ unsigned long channels_mask; /* Enabled channels mask */
+ bool fdmode; /* CAN FD or Classical CAN only mode */
+};
+
+/* CAN FD mode nominal rate constants */
+static const struct can_bittiming_const rcar_canfd_nom_bittiming_const = {
+ .name = RCANFD_DRV_NAME,
+ .tseg1_min = 2,
+ .tseg1_max = 128,
+ .tseg2_min = 2,
+ .tseg2_max = 32,
+ .sjw_max = 32,
+ .brp_min = 1,
+ .brp_max = 1024,
+ .brp_inc = 1,
+};
+
+/* CAN FD mode data rate constants */
+static const struct can_bittiming_const rcar_canfd_data_bittiming_const = {
+ .name = RCANFD_DRV_NAME,
+ .tseg1_min = 2,
+ .tseg1_max = 16,
+ .tseg2_min = 2,
+ .tseg2_max = 8,
+ .sjw_max = 8,
+ .brp_min = 1,
+ .brp_max = 256,
+ .brp_inc = 1,
+};
+
+/* Classical CAN mode bitrate constants */
+static const struct can_bittiming_const rcar_canfd_bittiming_const = {
+ .name = RCANFD_DRV_NAME,
+ .tseg1_min = 4,
+ .tseg1_max = 16,
+ .tseg2_min = 2,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 1024,
+ .brp_inc = 1,
+};
+
+/* Helper functions */
+static inline void rcar_canfd_update(u32 mask, u32 val, u32 __iomem *reg)
+{
+ u32 data = readl(reg);
+
+ data &= ~mask;
+ data |= (val & mask);
+ writel(data, reg);
+}
+
+static inline u32 rcar_canfd_read(void __iomem *base, u32 offset)
+{
+ return readl(base + (offset));
+}
+
+static inline void rcar_canfd_write(void __iomem *base, u32 offset, u32 val)
+{
+ writel(val, base + (offset));
+}
+
+static void rcar_canfd_set_bit(void __iomem *base, u32 reg, u32 val)
+{
+ rcar_canfd_update(val, val, base + (reg));
+}
+
+static void rcar_canfd_clear_bit(void __iomem *base, u32 reg, u32 val)
+{
+ rcar_canfd_update(val, 0, base + (reg));
+}
+
+static void rcar_canfd_update_bit(void __iomem *base, u32 reg,
+ u32 mask, u32 val)
+{
+ rcar_canfd_update(mask, val, base + (reg));
+}
+
+static void rcar_canfd_get_data(struct rcar_canfd_channel *priv,
+ struct canfd_frame *cf, u32 off)
+{
+ u32 i, lwords;
+
+ lwords = DIV_ROUND_UP(cf->len, sizeof(u32));
+ for (i = 0; i < lwords; i++)
+ *((u32 *)cf->data + i) =
+ rcar_canfd_read(priv->base, off + (i * sizeof(u32)));
+}
+
+static void rcar_canfd_put_data(struct rcar_canfd_channel *priv,
+ struct canfd_frame *cf, u32 off)
+{
+ u32 i, lwords;
+
+ lwords = DIV_ROUND_UP(cf->len, sizeof(u32));
+ for (i = 0; i < lwords; i++)
+ rcar_canfd_write(priv->base, off + (i * sizeof(u32)),
+ *((u32 *)cf->data + i));
+}
+
+static void rcar_canfd_tx_failure_cleanup(struct net_device *ndev)
+{
+ u32 i;
+
+ for (i = 0; i < RCANFD_FIFO_DEPTH; i++)
+ can_free_echo_skb(ndev, i);
+}
+
+static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
+{
+ u32 sts, ch;
+ int err;
+
+ /* Check RAMINIT flag as CAN RAM initialization takes place
+ * after the MCU reset
+ */
+ err = readl_poll_timeout((gpriv->base + RCANFD_GSTS), sts,
+ !(sts & RCANFD_GSTS_GRAMINIT), 2, 500000);
+ if (err) {
+ dev_dbg(&gpriv->pdev->dev, "global raminit failed\n");
+ return err;
+ }
+
+ /* Transition to Global Reset mode */
+ rcar_canfd_clear_bit(gpriv->base, RCANFD_GCTR, RCANFD_GCTR_GSLPR);
+ rcar_canfd_update_bit(gpriv->base, RCANFD_GCTR,
+ RCANFD_GCTR_GMDC_MASK, RCANFD_GCTR_GMDC_GRESET);
+
+ /* Ensure Global reset mode */
+ err = readl_poll_timeout((gpriv->base + RCANFD_GSTS), sts,
+ (sts & RCANFD_GSTS_GRSTSTS), 2, 500000);
+ if (err) {
+ dev_dbg(&gpriv->pdev->dev, "global reset failed\n");
+ return err;
+ }
+
+ /* Reset Global error flags */
+ rcar_canfd_write(gpriv->base, RCANFD_GERFL, 0x0);
+
+ /* Set the controller into appropriate mode */
+ if (gpriv->fdmode)
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GRMCFG,
+ RCANFD_GRMCFG_RCMC);
+ else
+ rcar_canfd_clear_bit(gpriv->base, RCANFD_GRMCFG,
+ RCANFD_GRMCFG_RCMC);
+
+ /* Transition all Channels to reset mode */
+ for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ rcar_canfd_clear_bit(gpriv->base,
+ RCANFD_CCTR(ch), RCANFD_CCTR_CSLPR);
+
+ rcar_canfd_update_bit(gpriv->base, RCANFD_CCTR(ch),
+ RCANFD_CCTR_CHMDC_MASK,
+ RCANFD_CCTR_CHDMC_CRESET);
+
+ /* Ensure Channel reset mode */
+ err = readl_poll_timeout((gpriv->base + RCANFD_CSTS(ch)), sts,
+ (sts & RCANFD_CSTS_CRSTSTS),
+ 2, 500000);
+ if (err) {
+ dev_dbg(&gpriv->pdev->dev,
+ "channel %u reset failed\n", ch);
+ return err;
+ }
+ }
+ return 0;
+}
+
+static void rcar_canfd_configure_controller(struct rcar_canfd_global *gpriv)
+{
+ u32 cfg, ch;
+
+ /* Global configuration settings */
+
+ /* ECC Error flag Enable */
+ cfg = RCANFD_GCFG_EEFE;
+
+ if (gpriv->fdmode)
+ /* Truncate payload to configured message size RFPLS */
+ cfg |= RCANFD_GCFG_CMPOC;
+
+ /* Set External Clock if selected */
+ if (gpriv->fcan != RCANFD_CANFDCLK)
+ cfg |= RCANFD_GCFG_DCS;
+
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GCFG, cfg);
+
+ /* Channel configuration settings */
+ for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ rcar_canfd_set_bit(gpriv->base, RCANFD_CCTR(ch),
+ RCANFD_CCTR_ERRD);
+ rcar_canfd_update_bit(gpriv->base, RCANFD_CCTR(ch),
+ RCANFD_CCTR_BOM_MASK,
+ RCANFD_CCTR_BOM_BENTRY);
+ }
+}
+
+static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv,
+ u32 ch)
+{
+ u32 cfg;
+ int offset, start, page, num_rules = RCANFD_CHANNEL_NUMRULES;
+ u32 ridx = ch + RCANFD_RFFIFO_IDX;
+
+ if (ch == 0) {
+ start = 0; /* Channel 0 always starts from 0th rule */
+ } else {
+ /* Get number of Channel 0 rules and adjust */
+ cfg = rcar_canfd_read(gpriv->base, RCANFD_GAFLCFG0);
+ start = RCANFD_GAFLCFG_GETRNC(0, cfg);
+ }
+
+ /* Enable write access to entry */
+ page = RCANFD_GAFL_PAGENUM(start);
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLECTR,
+ (RCANFD_GAFLECTR_AFLPN(page) |
+ RCANFD_GAFLECTR_AFLDAE));
+
+ /* Write number of rules for channel */
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLCFG0,
+ RCANFD_GAFLCFG_SETRNC(ch, num_rules));
+ if (gpriv->fdmode)
+ offset = RCANFD_F_GAFL_OFFSET;
+ else
+ offset = RCANFD_C_GAFL_OFFSET;
+
+ /* Accept all IDs */
+ rcar_canfd_write(gpriv->base, RCANFD_GAFLID(offset, start), 0);
+ /* IDE or RTR is not considered for matching */
+ rcar_canfd_write(gpriv->base, RCANFD_GAFLM(offset, start), 0);
+ /* Any data length accepted */
+ rcar_canfd_write(gpriv->base, RCANFD_GAFLP0(offset, start), 0);
+ /* Place the msg in corresponding Rx FIFO entry */
+ rcar_canfd_write(gpriv->base, RCANFD_GAFLP1(offset, start),
+ RCANFD_GAFLP1_GAFLFDP(ridx));
+
+ /* Disable write access to page */
+ rcar_canfd_clear_bit(gpriv->base,
+ RCANFD_GAFLECTR, RCANFD_GAFLECTR_AFLDAE);
+}
+
+static void rcar_canfd_configure_rx(struct rcar_canfd_global *gpriv, u32 ch)
+{
+ /* Rx FIFO is used for reception */
+ u32 cfg;
+ u16 rfdc, rfpls;
+
+ /* Select Rx FIFO based on channel */
+ u32 ridx = ch + RCANFD_RFFIFO_IDX;
+
+ rfdc = 2; /* b010 - 8 messages Rx FIFO depth */
+ if (gpriv->fdmode)
+ rfpls = 7; /* b111 - Max 64 bytes payload */
+ else
+ rfpls = 0; /* b000 - Max 8 bytes payload */
+
+ cfg = (RCANFD_RFCC_RFIM | RCANFD_RFCC_RFDC(rfdc) |
+ RCANFD_RFCC_RFPLS(rfpls) | RCANFD_RFCC_RFIE);
+ rcar_canfd_write(gpriv->base, RCANFD_RFCC(ridx), cfg);
+}
+
+static void rcar_canfd_configure_tx(struct rcar_canfd_global *gpriv, u32 ch)
+{
+ /* Tx/Rx(Common) FIFO configured in Tx mode is
+ * used for transmission
+ *
+ * Each channel has 3 Common FIFO dedicated to them.
+ * Use the 1st (index 0) out of 3
+ */
+ u32 cfg;
+ u16 cftml, cfm, cfdc, cfpls;
+
+ cftml = 0; /* 0th buffer */
+ cfm = 1; /* b01 - Transmit mode */
+ cfdc = 2; /* b010 - 8 messages Tx FIFO depth */
+ if (gpriv->fdmode)
+ cfpls = 7; /* b111 - Max 64 bytes payload */
+ else
+ cfpls = 0; /* b000 - Max 8 bytes payload */
+
+ cfg = (RCANFD_CFCC_CFTML(cftml) | RCANFD_CFCC_CFM(cfm) |
+ RCANFD_CFCC_CFIM | RCANFD_CFCC_CFDC(cfdc) |
+ RCANFD_CFCC_CFPLS(cfpls) | RCANFD_CFCC_CFTXIE);
+ rcar_canfd_write(gpriv->base, RCANFD_CFCC(ch, RCANFD_CFFIFO_IDX), cfg);
+
+ if (gpriv->fdmode)
+ /* Clear FD mode specific control/status register */
+ rcar_canfd_write(gpriv->base,
+ RCANFD_F_CFFDCSTS(ch, RCANFD_CFFIFO_IDX), 0);
+}
+
+static void rcar_canfd_enable_global_interrupts(struct rcar_canfd_global *gpriv)
+{
+ u32 ctr;
+
+ /* Clear any stray error interrupt flags */
+ rcar_canfd_write(gpriv->base, RCANFD_GERFL, 0);
+
+ /* Global interrupts setup */
+ ctr = RCANFD_GCTR_MEIE;
+ if (gpriv->fdmode)
+ ctr |= RCANFD_GCTR_CFMPOFIE;
+
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GCTR, ctr);
+}
+
+static void rcar_canfd_disable_global_interrupts(struct rcar_canfd_global
+ *gpriv)
+{
+ /* Disable all interrupts */
+ rcar_canfd_write(gpriv->base, RCANFD_GCTR, 0);
+
+ /* Clear any stray error interrupt flags */
+ rcar_canfd_write(gpriv->base, RCANFD_GERFL, 0);
+}
+
+static void rcar_canfd_enable_channel_interrupts(struct rcar_canfd_channel
+ *priv)
+{
+ u32 ctr, ch = priv->channel;
+
+ /* Clear any stray error flags */
+ rcar_canfd_write(priv->base, RCANFD_CERFL(ch), 0);
+
+ /* Channel interrupts setup */
+ ctr = (RCANFD_CCTR_TAIE |
+ RCANFD_CCTR_ALIE | RCANFD_CCTR_BLIE |
+ RCANFD_CCTR_OLIE | RCANFD_CCTR_BORIE |
+ RCANFD_CCTR_BOEIE | RCANFD_CCTR_EPIE |
+ RCANFD_CCTR_EWIE | RCANFD_CCTR_BEIE);
+ rcar_canfd_set_bit(priv->base, RCANFD_CCTR(ch), ctr);
+}
+
+static void rcar_canfd_disable_channel_interrupts(struct rcar_canfd_channel
+ *priv)
+{
+ u32 ctr, ch = priv->channel;
+
+ ctr = (RCANFD_CCTR_TAIE |
+ RCANFD_CCTR_ALIE | RCANFD_CCTR_BLIE |
+ RCANFD_CCTR_OLIE | RCANFD_CCTR_BORIE |
+ RCANFD_CCTR_BOEIE | RCANFD_CCTR_EPIE |
+ RCANFD_CCTR_EWIE | RCANFD_CCTR_BEIE);
+ rcar_canfd_clear_bit(priv->base, RCANFD_CCTR(ch), ctr);
+
+ /* Clear any stray error flags */
+ rcar_canfd_write(priv->base, RCANFD_CERFL(ch), 0);
+}
+
+static void rcar_canfd_global_error(struct net_device *ndev)
+{
+ struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ struct rcar_canfd_global *gpriv = priv->gpriv;
+ struct net_device_stats *stats = &ndev->stats;
+ u32 ch = priv->channel;
+ u32 gerfl, sts;
+ u32 ridx = ch + RCANFD_RFFIFO_IDX;
+
+ gerfl = rcar_canfd_read(priv->base, RCANFD_GERFL);
+ if ((gerfl & RCANFD_GERFL_EEF0) && (ch == 0)) {
+ netdev_dbg(ndev, "Ch0: ECC Error flag\n");
+ stats->tx_dropped++;
+ }
+ if ((gerfl & RCANFD_GERFL_EEF1) && (ch == 1)) {
+ netdev_dbg(ndev, "Ch1: ECC Error flag\n");
+ stats->tx_dropped++;
+ }
+ if (gerfl & RCANFD_GERFL_MES) {
+ sts = rcar_canfd_read(priv->base,
+ RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX));
+ if (sts & RCANFD_CFSTS_CFMLT) {
+ netdev_dbg(ndev, "Tx Message Lost flag\n");
+ stats->tx_dropped++;
+ rcar_canfd_write(priv->base,
+ RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX),
+ sts & ~RCANFD_CFSTS_CFMLT);
+ }
+
+ sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx));
+ if (sts & RCANFD_RFSTS_RFMLT) {
+ netdev_dbg(ndev, "Rx Message Lost flag\n");
+ stats->rx_dropped++;
+ rcar_canfd_write(priv->base, RCANFD_RFSTS(ridx),
+ sts & ~RCANFD_RFSTS_RFMLT);
+ }
+ }
+ if (gpriv->fdmode && gerfl & RCANFD_GERFL_CMPOF) {
+ /* Message Lost flag will be set for respective channel
+ * when this condition happens with counters and flags
+ * already updated.
+ */
+ netdev_dbg(ndev, "global payload overflow interrupt\n");
+ }
+
+ /* Clear all global error interrupts. Only affected channels bits
+ * get cleared
+ */
+ rcar_canfd_write(priv->base, RCANFD_GERFL, 0);
+}
+
+static void rcar_canfd_error(struct net_device *ndev, u32 cerfl,
+ u16 txerr, u16 rxerr)
+{
+ struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ u32 ch = priv->channel;
+
+ netdev_dbg(ndev, "ch erfl %x txerr %u rxerr %u\n", cerfl, txerr, rxerr);
+
+ /* Propagate the error condition to the CAN stack */
+ skb = alloc_can_err_skb(ndev, &cf);
+ if (!skb) {
+ stats->rx_dropped++;
+ return;
+ }
+
+ /* Channel error interrupts */
+ if (cerfl & RCANFD_CERFL_BEF) {
+ netdev_dbg(ndev, "Bus error\n");
+ cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
+ cf->data[2] = CAN_ERR_PROT_UNSPEC;
+ priv->can.can_stats.bus_error++;
+ }
+ if (cerfl & RCANFD_CERFL_ADERR) {
+ netdev_dbg(ndev, "ACK Delimiter Error\n");
+ stats->tx_errors++;
+ cf->data[3] |= CAN_ERR_PROT_LOC_ACK_DEL;
+ }
+ if (cerfl & RCANFD_CERFL_B0ERR) {
+ netdev_dbg(ndev, "Bit Error (dominant)\n");
+ stats->tx_errors++;
+ cf->data[2] |= CAN_ERR_PROT_BIT0;
+ }
+ if (cerfl & RCANFD_CERFL_B1ERR) {
+ netdev_dbg(ndev, "Bit Error (recessive)\n");
+ stats->tx_errors++;
+ cf->data[2] |= CAN_ERR_PROT_BIT1;
+ }
+ if (cerfl & RCANFD_CERFL_CERR) {
+ netdev_dbg(ndev, "CRC Error\n");
+ stats->rx_errors++;
+ cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
+ }
+ if (cerfl & RCANFD_CERFL_AERR) {
+ netdev_dbg(ndev, "ACK Error\n");
+ stats->tx_errors++;
+ cf->can_id |= CAN_ERR_ACK;
+ cf->data[3] |= CAN_ERR_PROT_LOC_ACK;
+ }
+ if (cerfl & RCANFD_CERFL_FERR) {
+ netdev_dbg(ndev, "Form Error\n");
+ stats->rx_errors++;
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ }
+ if (cerfl & RCANFD_CERFL_SERR) {
+ netdev_dbg(ndev, "Stuff Error\n");
+ stats->rx_errors++;
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ }
+ if (cerfl & RCANFD_CERFL_ALF) {
+ netdev_dbg(ndev, "Arbitration lost Error\n");
+ priv->can.can_stats.arbitration_lost++;
+ cf->can_id |= CAN_ERR_LOSTARB;
+ cf->data[0] |= CAN_ERR_LOSTARB_UNSPEC;
+ }
+ if (cerfl & RCANFD_CERFL_BLF) {
+ netdev_dbg(ndev, "Bus Lock Error\n");
+ stats->rx_errors++;
+ cf->can_id |= CAN_ERR_BUSERROR;
+ }
+ if (cerfl & RCANFD_CERFL_EWF) {
+ netdev_dbg(ndev, "Error warning interrupt\n");
+ priv->can.state = CAN_STATE_ERROR_WARNING;
+ priv->can.can_stats.error_warning++;
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_WARNING :
+ CAN_ERR_CRTL_RX_WARNING;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
+ if (cerfl & RCANFD_CERFL_EPF) {
+ netdev_dbg(ndev, "Error passive interrupt\n");
+ priv->can.state = CAN_STATE_ERROR_PASSIVE;
+ priv->can.can_stats.error_passive++;
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_PASSIVE :
+ CAN_ERR_CRTL_RX_PASSIVE;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
+ if (cerfl & RCANFD_CERFL_BOEF) {
+ netdev_dbg(ndev, "Bus-off entry interrupt\n");
+ rcar_canfd_tx_failure_cleanup(ndev);
+ priv->can.state = CAN_STATE_BUS_OFF;
+ priv->can.can_stats.bus_off++;
+ can_bus_off(ndev);
+ cf->can_id |= CAN_ERR_BUSOFF;
+ }
+ if (cerfl & RCANFD_CERFL_OVLF) {
+ netdev_dbg(ndev,
+ "Overload Frame Transmission error interrupt\n");
+ stats->tx_errors++;
+ cf->can_id |= CAN_ERR_PROT;
+ cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
+ }
+
+ /* Clear channel error interrupts that are handled */
+ rcar_canfd_write(priv->base, RCANFD_CERFL(ch),
+ RCANFD_CERFL_ERR(~cerfl));
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+}
+
+static void rcar_canfd_tx_done(struct net_device *ndev)
+{
+ struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ u32 sts;
+ unsigned long flags;
+ u32 ch = priv->channel;
+
+ do {
+ u8 unsent, sent;
+
+ sent = priv->tx_tail % RCANFD_FIFO_DEPTH;
+ stats->tx_packets++;
+ stats->tx_bytes += priv->tx_len[sent];
+ priv->tx_len[sent] = 0;
+ can_get_echo_skb(ndev, sent);
+
+ spin_lock_irqsave(&priv->tx_lock, flags);
+ priv->tx_tail++;
+ sts = rcar_canfd_read(priv->base,
+ RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX));
+ unsent = RCANFD_CFSTS_CFMC(sts);
+
+ /* Wake producer only when there is room */
+ if (unsent != RCANFD_FIFO_DEPTH)
+ netif_wake_queue(ndev);
+
+ if (priv->tx_head - priv->tx_tail <= unsent) {
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+ break;
+ }
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+ } while (1);
+
+ /* Clear interrupt */
+ rcar_canfd_write(priv->base, RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX),
+ sts & ~RCANFD_CFSTS_CFTXIF);
+ can_led_event(ndev, CAN_LED_EVENT_TX);
+}
+
+static irqreturn_t rcar_canfd_global_interrupt(int irq, void *dev_id)
+{
+ struct rcar_canfd_global *gpriv = dev_id;
+ struct net_device *ndev;
+ struct rcar_canfd_channel *priv;
+ u32 sts, gerfl;
+ u32 ch, ridx;
+
+ /* Global error interrupts still indicate a condition specific
+ * to a channel. RxFIFO interrupt is a global interrupt.
+ */
+ for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ priv = gpriv->ch[ch];
+ ndev = priv->ndev;
+ ridx = ch + RCANFD_RFFIFO_IDX;
+
+ /* Global error interrupts */
+ gerfl = rcar_canfd_read(priv->base, RCANFD_GERFL);
+ if (unlikely(RCANFD_GERFL_ERR(gpriv, gerfl)))
+ rcar_canfd_global_error(ndev);
+
+ /* Handle Rx interrupts */
+ sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx));
+ if (likely(sts & RCANFD_RFSTS_RFIF)) {
+ if (napi_schedule_prep(&priv->napi)) {
+ /* Disable Rx FIFO interrupts */
+ rcar_canfd_clear_bit(priv->base,
+ RCANFD_RFCC(ridx),
+ RCANFD_RFCC_RFIE);
+ __napi_schedule(&priv->napi);
+ }
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static void rcar_canfd_state_change(struct net_device *ndev,
+ u16 txerr, u16 rxerr)
+{
+ struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ enum can_state rx_state, tx_state, state = priv->can.state;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ /* Handle transition from error to normal states */
+ if (txerr < 96 && rxerr < 96)
+ state = CAN_STATE_ERROR_ACTIVE;
+ else if (txerr < 128 && rxerr < 128)
+ state = CAN_STATE_ERROR_WARNING;
+
+ if (state != priv->can.state) {
+ netdev_dbg(ndev, "state: new %d, old %d: txerr %u, rxerr %u\n",
+ state, priv->can.state, txerr, rxerr);
+ skb = alloc_can_err_skb(ndev, &cf);
+ if (!skb) {
+ stats->rx_dropped++;
+ return;
+ }
+ tx_state = txerr >= rxerr ? state : 0;
+ rx_state = txerr <= rxerr ? state : 0;
+
+ can_change_state(ndev, cf, tx_state, rx_state);
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+ }
+}
+
+static irqreturn_t rcar_canfd_channel_interrupt(int irq, void *dev_id)
+{
+ struct rcar_canfd_global *gpriv = dev_id;
+ struct net_device *ndev;
+ struct rcar_canfd_channel *priv;
+ u32 sts, ch, cerfl;
+ u16 txerr, rxerr;
+
+ /* Common FIFO is a per channel resource */
+ for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ priv = gpriv->ch[ch];
+ ndev = priv->ndev;
+
+ /* Channel error interrupts */
+ cerfl = rcar_canfd_read(priv->base, RCANFD_CERFL(ch));
+ sts = rcar_canfd_read(priv->base, RCANFD_CSTS(ch));
+ txerr = RCANFD_CSTS_TECCNT(sts);
+ rxerr = RCANFD_CSTS_RECCNT(sts);
+ if (unlikely(RCANFD_CERFL_ERR(cerfl)))
+ rcar_canfd_error(ndev, cerfl, txerr, rxerr);
+
+ /* Handle state change to lower states */
+ if (unlikely((priv->can.state != CAN_STATE_ERROR_ACTIVE) &&
+ (priv->can.state != CAN_STATE_BUS_OFF)))
+ rcar_canfd_state_change(ndev, txerr, rxerr);
+
+ /* Handle Tx interrupts */
+ sts = rcar_canfd_read(priv->base,
+ RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX));
+ if (likely(sts & RCANFD_CFSTS_CFTXIF))
+ rcar_canfd_tx_done(ndev);
+ }
+ return IRQ_HANDLED;
+}
+
+static void rcar_canfd_set_bittiming(struct net_device *dev)
+{
+ struct rcar_canfd_channel *priv = netdev_priv(dev);
+ const struct can_bittiming *bt = &priv->can.bittiming;
+ const struct can_bittiming *dbt = &priv->can.data_bittiming;
+ u16 brp, sjw, tseg1, tseg2;
+ u32 cfg;
+ u32 ch = priv->channel;
+
+ /* Nominal bit timing settings */
+ brp = bt->brp - 1;
+ sjw = bt->sjw - 1;
+ tseg1 = bt->prop_seg + bt->phase_seg1 - 1;
+ tseg2 = bt->phase_seg2 - 1;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
+ /* CAN FD only mode */
+ cfg = (RCANFD_NCFG_NTSEG1(tseg1) | RCANFD_NCFG_NBRP(brp) |
+ RCANFD_NCFG_NSJW(sjw) | RCANFD_NCFG_NTSEG2(tseg2));
+
+ rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg);
+ netdev_dbg(priv->ndev, "nrate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n",
+ brp, sjw, tseg1, tseg2);
+
+ /* Data bit timing settings */
+ brp = dbt->brp - 1;
+ sjw = dbt->sjw - 1;
+ tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1;
+ tseg2 = dbt->phase_seg2 - 1;
+
+ cfg = (RCANFD_DCFG_DTSEG1(tseg1) | RCANFD_DCFG_DBRP(brp) |
+ RCANFD_DCFG_DSJW(sjw) | RCANFD_DCFG_DTSEG2(tseg2));
+
+ rcar_canfd_write(priv->base, RCANFD_F_DCFG(ch), cfg);
+ netdev_dbg(priv->ndev, "drate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n",
+ brp, sjw, tseg1, tseg2);
+ } else {
+ /* Classical CAN only mode */
+ cfg = (RCANFD_CFG_TSEG1(tseg1) | RCANFD_CFG_BRP(brp) |
+ RCANFD_CFG_SJW(sjw) | RCANFD_CFG_TSEG2(tseg2));
+
+ rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg);
+ netdev_dbg(priv->ndev,
+ "rate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n",
+ brp, sjw, tseg1, tseg2);
+ }
+}
+
+static int rcar_canfd_start(struct net_device *ndev)
+{
+ struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ int err = -EOPNOTSUPP;
+ u32 sts, ch = priv->channel;
+ u32 ridx = ch + RCANFD_RFFIFO_IDX;
+
+ rcar_canfd_set_bittiming(ndev);
+
+ rcar_canfd_enable_channel_interrupts(priv);
+
+ /* Set channel to Operational mode */
+ rcar_canfd_update_bit(priv->base, RCANFD_CCTR(ch),
+ RCANFD_CCTR_CHMDC_MASK, RCANFD_CCTR_CHDMC_COPM);
+
+ /* Verify channel mode change */
+ err = readl_poll_timeout((priv->base + RCANFD_CSTS(ch)), sts,
+ (sts & RCANFD_CSTS_COMSTS), 2, 500000);
+ if (err) {
+ netdev_err(ndev, "channel %u communication state failed\n", ch);
+ goto fail_mode_change;
+ }
+
+ /* Enable Common & Rx FIFO */
+ rcar_canfd_set_bit(priv->base, RCANFD_CFCC(ch, RCANFD_CFFIFO_IDX),
+ RCANFD_CFCC_CFE);
+ rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx), RCANFD_RFCC_RFE);
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ return 0;
+
+fail_mode_change:
+ rcar_canfd_disable_channel_interrupts(priv);
+ return err;
+}
+
+static int rcar_canfd_open(struct net_device *ndev)
+{
+ struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ struct rcar_canfd_global *gpriv = priv->gpriv;
+ int err;
+
+ /* Peripheral clock is already enabled in probe */
+ err = clk_prepare_enable(gpriv->can_clk);
+ if (err) {
+ netdev_err(ndev, "failed to enable CAN clock, error %d\n", err);
+ goto out_clock;
+ }
+
+ err = open_candev(ndev);
+ if (err) {
+ netdev_err(ndev, "open_candev() failed, error %d\n", err);
+ goto out_can_clock;
+ }
+
+ napi_enable(&priv->napi);
+ err = rcar_canfd_start(ndev);
+ if (err)
+ goto out_close;
+ netif_start_queue(ndev);
+ can_led_event(ndev, CAN_LED_EVENT_OPEN);
+ return 0;
+out_close:
+ napi_disable(&priv->napi);
+ close_candev(ndev);
+out_can_clock:
+ clk_disable_unprepare(gpriv->can_clk);
+out_clock:
+ return err;
+}
+
+static void rcar_canfd_stop(struct net_device *ndev)
+{
+ struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ int err;
+ u32 sts, ch = priv->channel;
+ u32 ridx = ch + RCANFD_RFFIFO_IDX;
+
+ /* Transition to channel reset mode */
+ rcar_canfd_update_bit(priv->base, RCANFD_CCTR(ch),
+ RCANFD_CCTR_CHMDC_MASK, RCANFD_CCTR_CHDMC_CRESET);
+
+ /* Check Channel reset mode */
+ err = readl_poll_timeout((priv->base + RCANFD_CSTS(ch)), sts,
+ (sts & RCANFD_CSTS_CRSTSTS), 2, 500000);
+ if (err)
+ netdev_err(ndev, "channel %u reset failed\n", ch);
+
+ rcar_canfd_disable_channel_interrupts(priv);
+
+ /* Disable Common & Rx FIFO */
+ rcar_canfd_clear_bit(priv->base, RCANFD_CFCC(ch, RCANFD_CFFIFO_IDX),
+ RCANFD_CFCC_CFE);
+ rcar_canfd_clear_bit(priv->base, RCANFD_RFCC(ridx), RCANFD_RFCC_RFE);
+
+ /* Set the state as STOPPED */
+ priv->can.state = CAN_STATE_STOPPED;
+}
+
+static int rcar_canfd_close(struct net_device *ndev)
+{
+ struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ struct rcar_canfd_global *gpriv = priv->gpriv;
+
+ netif_stop_queue(ndev);
+ rcar_canfd_stop(ndev);
+ napi_disable(&priv->napi);
+ clk_disable_unprepare(gpriv->can_clk);
+ close_candev(ndev);
+ can_led_event(ndev, CAN_LED_EVENT_STOP);
+ return 0;
+}
+
+static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+ u32 sts = 0, id, dlc;
+ unsigned long flags;
+ u32 ch = priv->channel;
+
+ if (can_dropped_invalid_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
+ if (cf->can_id & CAN_EFF_FLAG) {
+ id = cf->can_id & CAN_EFF_MASK;
+ id |= RCANFD_CFID_CFIDE;
+ } else {
+ id = cf->can_id & CAN_SFF_MASK;
+ }
+
+ if (cf->can_id & CAN_RTR_FLAG)
+ id |= RCANFD_CFID_CFRTR;
+
+ dlc = RCANFD_CFPTR_CFDLC(can_len2dlc(cf->len));
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
+ rcar_canfd_write(priv->base,
+ RCANFD_F_CFID(ch, RCANFD_CFFIFO_IDX), id);
+ rcar_canfd_write(priv->base,
+ RCANFD_F_CFPTR(ch, RCANFD_CFFIFO_IDX), dlc);
+
+ if (can_is_canfd_skb(skb)) {
+ /* CAN FD frame format */
+ sts |= RCANFD_CFFDCSTS_CFFDF;
+ if (cf->flags & CANFD_BRS)
+ sts |= RCANFD_CFFDCSTS_CFBRS;
+
+ if (priv->can.state == CAN_STATE_ERROR_PASSIVE)
+ sts |= RCANFD_CFFDCSTS_CFESI;
+ }
+
+ rcar_canfd_write(priv->base,
+ RCANFD_F_CFFDCSTS(ch, RCANFD_CFFIFO_IDX), sts);
+
+ rcar_canfd_put_data(priv, cf,
+ RCANFD_F_CFDF(ch, RCANFD_CFFIFO_IDX, 0));
+ } else {
+ rcar_canfd_write(priv->base,
+ RCANFD_C_CFID(ch, RCANFD_CFFIFO_IDX), id);
+ rcar_canfd_write(priv->base,
+ RCANFD_C_CFPTR(ch, RCANFD_CFFIFO_IDX), dlc);
+ rcar_canfd_put_data(priv, cf,
+ RCANFD_C_CFDF(ch, RCANFD_CFFIFO_IDX, 0));
+ }
+
+ priv->tx_len[priv->tx_head % RCANFD_FIFO_DEPTH] = cf->len;
+ can_put_echo_skb(skb, ndev, priv->tx_head % RCANFD_FIFO_DEPTH);
+
+ spin_lock_irqsave(&priv->tx_lock, flags);
+ priv->tx_head++;
+
+ /* Stop the queue if we've filled all FIFO entries */
+ if (priv->tx_head - priv->tx_tail >= RCANFD_FIFO_DEPTH)
+ netif_stop_queue(ndev);
+
+ /* Start Tx: Write 0xff to CFPC to increment the CPU-side
+ * pointer for the Common FIFO
+ */
+ rcar_canfd_write(priv->base,
+ RCANFD_CFPCTR(ch, RCANFD_CFFIFO_IDX), 0xff);
+
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+ return NETDEV_TX_OK;
+}
+
+static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ struct canfd_frame *cf;
+ struct sk_buff *skb;
+ u32 sts = 0, id, dlc;
+ u32 ch = priv->channel;
+ u32 ridx = ch + RCANFD_RFFIFO_IDX;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
+ id = rcar_canfd_read(priv->base, RCANFD_F_RFID(ridx));
+ dlc = rcar_canfd_read(priv->base, RCANFD_F_RFPTR(ridx));
+
+ sts = rcar_canfd_read(priv->base, RCANFD_F_RFFDSTS(ridx));
+ if (sts & RCANFD_RFFDSTS_RFFDF)
+ skb = alloc_canfd_skb(priv->ndev, &cf);
+ else
+ skb = alloc_can_skb(priv->ndev,
+ (struct can_frame **)&cf);
+ } else {
+ id = rcar_canfd_read(priv->base, RCANFD_C_RFID(ridx));
+ dlc = rcar_canfd_read(priv->base, RCANFD_C_RFPTR(ridx));
+ skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cf);
+ }
+
+ if (!skb) {
+ stats->rx_dropped++;
+ return;
+ }
+
+ if (id & RCANFD_RFID_RFIDE)
+ cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG;
+ else
+ cf->can_id = id & CAN_SFF_MASK;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
+ if (sts & RCANFD_RFFDSTS_RFFDF)
+ cf->len = can_dlc2len(RCANFD_RFPTR_RFDLC(dlc));
+ else
+ cf->len = get_can_dlc(RCANFD_RFPTR_RFDLC(dlc));
+
+ if (sts & RCANFD_RFFDSTS_RFESI) {
+ cf->flags |= CANFD_ESI;
+ netdev_dbg(priv->ndev, "ESI Error\n");
+ }
+
+ if (!(sts & RCANFD_RFFDSTS_RFFDF) && (id & RCANFD_RFID_RFRTR)) {
+ cf->can_id |= CAN_RTR_FLAG;
+ } else {
+ if (sts & RCANFD_RFFDSTS_RFBRS)
+ cf->flags |= CANFD_BRS;
+
+ rcar_canfd_get_data(priv, cf, RCANFD_F_RFDF(ridx, 0));
+ }
+ } else {
+ cf->len = get_can_dlc(RCANFD_RFPTR_RFDLC(dlc));
+ if (id & RCANFD_RFID_RFRTR)
+ cf->can_id |= CAN_RTR_FLAG;
+ else
+ rcar_canfd_get_data(priv, cf, RCANFD_C_RFDF(ridx, 0));
+ }
+
+ /* Write 0xff to RFPC to increment the CPU-side
+ * pointer of the Rx FIFO
+ */
+ rcar_canfd_write(priv->base, RCANFD_RFPCTR(ridx), 0xff);
+
+ can_led_event(priv->ndev, CAN_LED_EVENT_RX);
+
+ stats->rx_bytes += cf->len;
+ stats->rx_packets++;
+ netif_receive_skb(skb);
+}
+
+static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota)
+{
+ struct rcar_canfd_channel *priv =
+ container_of(napi, struct rcar_canfd_channel, napi);
+ int num_pkts;
+ u32 sts;
+ u32 ch = priv->channel;
+ u32 ridx = ch + RCANFD_RFFIFO_IDX;
+
+ for (num_pkts = 0; num_pkts < quota; num_pkts++) {
+ sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx));
+ /* Check FIFO empty condition */
+ if (sts & RCANFD_RFSTS_RFEMP)
+ break;
+
+ rcar_canfd_rx_pkt(priv);
+
+ /* Clear interrupt bit */
+ if (sts & RCANFD_RFSTS_RFIF)
+ rcar_canfd_write(priv->base, RCANFD_RFSTS(ridx),
+ sts & ~RCANFD_RFSTS_RFIF);
+ }
+
+ /* All packets processed */
+ if (num_pkts < quota) {
+ napi_complete(napi);
+ /* Enable Rx FIFO interrupts */
+ rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx),
+ RCANFD_RFCC_RFIE);
+ }
+ return num_pkts;
+}
+
+static int rcar_canfd_do_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+ int err;
+
+ switch (mode) {
+ case CAN_MODE_START:
+ err = rcar_canfd_start(ndev);
+ if (err)
+ return err;
+ netif_wake_queue(ndev);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int rcar_canfd_get_berr_counter(const struct net_device *dev,
+ struct can_berr_counter *bec)
+{
+ struct rcar_canfd_channel *priv = netdev_priv(dev);
+ u32 val, ch = priv->channel;
+
+ /* Peripheral clock is already enabled in probe */
+ val = rcar_canfd_read(priv->base, RCANFD_CSTS(ch));
+ bec->txerr = RCANFD_CSTS_TECCNT(val);
+ bec->rxerr = RCANFD_CSTS_RECCNT(val);
+ return 0;
+}
+
+static const struct net_device_ops rcar_canfd_netdev_ops = {
+ .ndo_open = rcar_canfd_open,
+ .ndo_stop = rcar_canfd_close,
+ .ndo_start_xmit = rcar_canfd_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
+};
+
+static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
+ u32 fcan_freq)
+{
+ struct platform_device *pdev = gpriv->pdev;
+ struct rcar_canfd_channel *priv;
+ struct net_device *ndev;
+ int err = -ENODEV;
+
+ ndev = alloc_candev(sizeof(*priv), RCANFD_FIFO_DEPTH);
+ if (!ndev) {
+ dev_err(&pdev->dev, "alloc_candev() failed\n");
+ err = -ENOMEM;
+ goto fail;
+ }
+ priv = netdev_priv(ndev);
+
+ ndev->netdev_ops = &rcar_canfd_netdev_ops;
+ ndev->flags |= IFF_ECHO;
+ priv->ndev = ndev;
+ priv->base = gpriv->base;
+ priv->channel = ch;
+ priv->can.clock.freq = fcan_freq;
+ dev_info(&pdev->dev, "can_clk rate is %u\n", priv->can.clock.freq);
+
+ if (gpriv->fdmode) {
+ priv->can.bittiming_const = &rcar_canfd_nom_bittiming_const;
+ priv->can.data_bittiming_const =
+ &rcar_canfd_data_bittiming_const;
+
+ /* Controller starts in CAN FD only mode */
+ can_set_static_ctrlmode(ndev, CAN_CTRLMODE_FD);
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING;
+ } else {
+ /* Controller starts in Classical CAN only mode */
+ priv->can.bittiming_const = &rcar_canfd_bittiming_const;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING;
+ }
+
+ priv->can.do_set_mode = rcar_canfd_do_set_mode;
+ priv->can.do_get_berr_counter = rcar_canfd_get_berr_counter;
+ priv->gpriv = gpriv;
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ netif_napi_add(ndev, &priv->napi, rcar_canfd_rx_poll,
+ RCANFD_NAPI_WEIGHT);
+ err = register_candev(ndev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "register_candev() failed, error %d\n", err);
+ goto fail_candev;
+ }
+ spin_lock_init(&priv->tx_lock);
+ devm_can_led_init(ndev);
+ gpriv->ch[priv->channel] = priv;
+ dev_info(&pdev->dev, "device registered (channel %u)\n", priv->channel);
+ return 0;
+
+fail_candev:
+ netif_napi_del(&priv->napi);
+ free_candev(ndev);
+fail:
+ return err;
+}
+
+static void rcar_canfd_channel_remove(struct rcar_canfd_global *gpriv, u32 ch)
+{
+ struct rcar_canfd_channel *priv = gpriv->ch[ch];
+
+ if (priv) {
+ unregister_candev(priv->ndev);
+ netif_napi_del(&priv->napi);
+ free_candev(priv->ndev);
+ }
+}
+
+static int rcar_canfd_probe(struct platform_device *pdev)
+{
+ struct resource *mem;
+ void __iomem *addr;
+ u32 sts, ch, fcan_freq;
+ struct rcar_canfd_global *gpriv;
+ struct device_node *of_child;
+ unsigned long channels_mask = 0;
+ int err, ch_irq, g_irq;
+ bool fdmode = true; /* CAN FD only mode - default */
+
+ if (of_property_read_bool(pdev->dev.of_node, "renesas,no-can-fd"))
+ fdmode = false; /* Classical CAN only mode */
+
+ of_child = of_get_child_by_name(pdev->dev.of_node, "channel0");
+ if (of_child && of_device_is_available(of_child))
+ channels_mask |= BIT(0); /* Channel 0 */
+
+ of_child = of_get_child_by_name(pdev->dev.of_node, "channel1");
+ if (of_child && of_device_is_available(of_child))
+ channels_mask |= BIT(1); /* Channel 1 */
+
+ ch_irq = platform_get_irq(pdev, 0);
+ if (ch_irq < 0) {
+ dev_err(&pdev->dev, "no Channel IRQ resource\n");
+ err = ch_irq;
+ goto fail_dev;
+ }
+
+ g_irq = platform_get_irq(pdev, 1);
+ if (g_irq < 0) {
+ dev_err(&pdev->dev, "no Global IRQ resource\n");
+ err = g_irq;
+ goto fail_dev;
+ }
+
+ /* Global controller context */
+ gpriv = devm_kzalloc(&pdev->dev, sizeof(*gpriv), GFP_KERNEL);
+ if (!gpriv) {
+ err = -ENOMEM;
+ goto fail_dev;
+ }
+ gpriv->pdev = pdev;
+ gpriv->channels_mask = channels_mask;
+ gpriv->fdmode = fdmode;
+
+ /* Peripheral clock */
+ gpriv->clkp = devm_clk_get(&pdev->dev, "fck");
+ if (IS_ERR(gpriv->clkp)) {
+ err = PTR_ERR(gpriv->clkp);
+ dev_err(&pdev->dev, "cannot get peripheral clock, error %d\n",
+ err);
+ goto fail_dev;
+ }
+
+ /* fCAN clock: Pick External clock. If not available fallback to
+ * CANFD clock
+ */
+ gpriv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
+ if (IS_ERR(gpriv->can_clk) || (clk_get_rate(gpriv->can_clk) == 0)) {
+ gpriv->can_clk = devm_clk_get(&pdev->dev, "canfd");
+ if (IS_ERR(gpriv->can_clk)) {
+ err = PTR_ERR(gpriv->can_clk);
+ dev_err(&pdev->dev,
+ "cannot get canfd clock, error %d\n", err);
+ goto fail_dev;
+ }
+ gpriv->fcan = RCANFD_CANFDCLK;
+
+ } else {
+ gpriv->fcan = RCANFD_EXTCLK;
+ }
+ fcan_freq = clk_get_rate(gpriv->can_clk);
+
+ if (gpriv->fcan == RCANFD_CANFDCLK)
+ /* CANFD clock is further divided by (1/2) within the IP */
+ fcan_freq /= 2;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ addr = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(addr)) {
+ err = PTR_ERR(addr);
+ goto fail_dev;
+ }
+ gpriv->base = addr;
+
+ /* Request IRQ that's common for both channels */
+ err = devm_request_irq(&pdev->dev, ch_irq,
+ rcar_canfd_channel_interrupt, 0,
+ "canfd.chn", gpriv);
+ if (err) {
+ dev_err(&pdev->dev, "devm_request_irq(%d) failed, error %d\n",
+ ch_irq, err);
+ goto fail_dev;
+ }
+ err = devm_request_irq(&pdev->dev, g_irq,
+ rcar_canfd_global_interrupt, 0,
+ "canfd.gbl", gpriv);
+ if (err) {
+ dev_err(&pdev->dev, "devm_request_irq(%d) failed, error %d\n",
+ g_irq, err);
+ goto fail_dev;
+ }
+
+ /* Enable peripheral clock for register access */
+ err = clk_prepare_enable(gpriv->clkp);
+ if (err) {
+ dev_err(&pdev->dev,
+ "failed to enable peripheral clock, error %d\n", err);
+ goto fail_dev;
+ }
+
+ err = rcar_canfd_reset_controller(gpriv);
+ if (err) {
+ dev_err(&pdev->dev, "reset controller failed\n");
+ goto fail_clk;
+ }
+
+ /* Controller in Global reset & Channel reset mode */
+ rcar_canfd_configure_controller(gpriv);
+
+ /* Configure per channel attributes */
+ for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ /* Configure Channel's Rx fifo */
+ rcar_canfd_configure_rx(gpriv, ch);
+
+ /* Configure Channel's Tx (Common) fifo */
+ rcar_canfd_configure_tx(gpriv, ch);
+
+ /* Configure receive rules */
+ rcar_canfd_configure_afl_rules(gpriv, ch);
+ }
+
+ /* Configure common interrupts */
+ rcar_canfd_enable_global_interrupts(gpriv);
+
+ /* Start Global operation mode */
+ rcar_canfd_update_bit(gpriv->base, RCANFD_GCTR, RCANFD_GCTR_GMDC_MASK,
+ RCANFD_GCTR_GMDC_GOPM);
+
+ /* Verify mode change */
+ err = readl_poll_timeout((gpriv->base + RCANFD_GSTS), sts,
+ !(sts & RCANFD_GSTS_GNOPM), 2, 500000);
+ if (err) {
+ dev_err(&pdev->dev, "global operational mode failed\n");
+ goto fail_mode;
+ }
+
+ for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ err = rcar_canfd_channel_probe(gpriv, ch, fcan_freq);
+ if (err)
+ goto fail_channel;
+ }
+
+ platform_set_drvdata(pdev, gpriv);
+ dev_info(&pdev->dev, "global operational state (clk %d, fdmode %d)\n",
+ gpriv->fcan, gpriv->fdmode);
+ return 0;
+
+fail_channel:
+ for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS)
+ rcar_canfd_channel_remove(gpriv, ch);
+fail_mode:
+ rcar_canfd_disable_global_interrupts(gpriv);
+fail_clk:
+ clk_disable_unprepare(gpriv->clkp);
+fail_dev:
+ return err;
+}
+
+static int rcar_canfd_remove(struct platform_device *pdev)
+{
+ struct rcar_canfd_global *gpriv = platform_get_drvdata(pdev);
+ u32 ch;
+
+ rcar_canfd_reset_controller(gpriv);
+ rcar_canfd_disable_global_interrupts(gpriv);
+
+ for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ rcar_canfd_disable_channel_interrupts(gpriv->ch[ch]);
+ rcar_canfd_channel_remove(gpriv, ch);
+ }
+
+ /* Enter global sleep mode */
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GCTR, RCANFD_GCTR_GSLPR);
+ clk_disable_unprepare(gpriv->clkp);
+ return 0;
+}
+
+static int __maybe_unused rcar_canfd_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int __maybe_unused rcar_canfd_resume(struct device *dev)
+{
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(rcar_canfd_pm_ops, rcar_canfd_suspend,
+ rcar_canfd_resume);
+
+static const struct of_device_id rcar_canfd_of_table[] = {
+ { .compatible = "renesas,rcar-gen3-canfd" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, rcar_canfd_of_table);
+
+static struct platform_driver rcar_canfd_driver = {
+ .driver = {
+ .name = RCANFD_DRV_NAME,
+ .of_match_table = of_match_ptr(rcar_canfd_of_table),
+ .pm = &rcar_canfd_pm_ops,
+ },
+ .probe = rcar_canfd_probe,
+ .remove = rcar_canfd_remove,
+};
+
+module_platform_driver(rcar_canfd_driver);
+
+MODULE_AUTHOR("Ramesh Shanmugasundaram <ramesh.shanmugasundaram@bp.renesas.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("CAN FD driver for Renesas R-Car SoC");
+MODULE_ALIAS("platform:" RCANFD_DRV_NAME);
diff --git a/drivers/net/can/sja1000/tscan1.c b/drivers/net/can/sja1000/tscan1.c
index 76513dd78..79572457a 100644
--- a/drivers/net/can/sja1000/tscan1.c
+++ b/drivers/net/can/sja1000/tscan1.c
@@ -203,14 +203,4 @@ static struct isa_driver tscan1_isa_driver = {
},
};
-static int __init tscan1_init(void)
-{
- return isa_register_driver(&tscan1_isa_driver, TSCAN1_MAXDEV);
-}
-module_init(tscan1_init);
-
-static void __exit tscan1_exit(void)
-{
- isa_unregister_driver(&tscan1_isa_driver);
-}
-module_exit(tscan1_exit);
+module_isa_driver(tscan1_isa_driver, TSCAN1_MAXDEV);
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index 9a3f15cb7..eb7173713 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -354,7 +354,7 @@ static netdev_tx_t slc_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct slcan *sl = netdev_priv(dev);
- if (skb->len != sizeof(struct can_frame))
+ if (skb->len != CAN_MTU)
goto out;
spin_lock(&sl->lock);
@@ -442,7 +442,7 @@ static void slc_setup(struct net_device *dev)
dev->addr_len = 0;
dev->tx_queue_len = 10;
- dev->mtu = sizeof(struct can_frame);
+ dev->mtu = CAN_MTU;
dev->type = ARPHRD_CAN;
/* New-style flags. */
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index cf36d26ef..f3f05fea8 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -1145,8 +1145,11 @@ static int mcp251x_can_probe(struct spi_device *spi)
/* Here is OK to not lock the MCP, no one knows about it yet */
ret = mcp251x_hw_probe(spi);
- if (ret)
+ if (ret) {
+ if (ret == -ENODEV)
+ dev_err(&spi->dev, "Cannot initialize MCP%x. Wrong wiring?\n", priv->model);
goto error_probe;
+ }
mcp251x_hw_sleep(spi);
@@ -1156,6 +1159,7 @@ static int mcp251x_can_probe(struct spi_device *spi)
devm_can_led_init(net);
+ netdev_info(net, "MCP%x successfully initialized.\n", priv->model);
return 0;
error_probe:
@@ -1168,6 +1172,7 @@ out_clk:
out_free:
free_candev(net);
+ dev_err(&spi->dev, "Probe failed, err=%d\n", -ret);
return ret;
}
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index acb0c8490..6f0cbc387 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -44,7 +44,9 @@ enum gs_usb_breq {
GS_USB_BREQ_MODE,
GS_USB_BREQ_BERR,
GS_USB_BREQ_BT_CONST,
- GS_USB_BREQ_DEVICE_CONFIG
+ GS_USB_BREQ_DEVICE_CONFIG,
+ GS_USB_BREQ_TIMESTAMP,
+ GS_USB_BREQ_IDENTIFY,
};
enum gs_can_mode {
@@ -63,6 +65,11 @@ enum gs_can_state {
GS_CAN_STATE_SLEEPING
};
+enum gs_can_identify_mode {
+ GS_CAN_IDENTIFY_OFF = 0,
+ GS_CAN_IDENTIFY_ON
+};
+
/* data types passed between host and device */
struct gs_host_config {
u32 byte_order;
@@ -82,10 +89,10 @@ struct gs_device_config {
} __packed;
#define GS_CAN_MODE_NORMAL 0
-#define GS_CAN_MODE_LISTEN_ONLY (1<<0)
-#define GS_CAN_MODE_LOOP_BACK (1<<1)
-#define GS_CAN_MODE_TRIPLE_SAMPLE (1<<2)
-#define GS_CAN_MODE_ONE_SHOT (1<<3)
+#define GS_CAN_MODE_LISTEN_ONLY BIT(0)
+#define GS_CAN_MODE_LOOP_BACK BIT(1)
+#define GS_CAN_MODE_TRIPLE_SAMPLE BIT(2)
+#define GS_CAN_MODE_ONE_SHOT BIT(3)
struct gs_device_mode {
u32 mode;
@@ -106,10 +113,16 @@ struct gs_device_bittiming {
u32 brp;
} __packed;
-#define GS_CAN_FEATURE_LISTEN_ONLY (1<<0)
-#define GS_CAN_FEATURE_LOOP_BACK (1<<1)
-#define GS_CAN_FEATURE_TRIPLE_SAMPLE (1<<2)
-#define GS_CAN_FEATURE_ONE_SHOT (1<<3)
+struct gs_identify_mode {
+ u32 mode;
+} __packed;
+
+#define GS_CAN_FEATURE_LISTEN_ONLY BIT(0)
+#define GS_CAN_FEATURE_LOOP_BACK BIT(1)
+#define GS_CAN_FEATURE_TRIPLE_SAMPLE BIT(2)
+#define GS_CAN_FEATURE_ONE_SHOT BIT(3)
+#define GS_CAN_FEATURE_HW_TIMESTAMP BIT(4)
+#define GS_CAN_FEATURE_IDENTIFY BIT(5)
struct gs_device_bt_const {
u32 feature;
@@ -214,7 +227,8 @@ static void gs_free_tx_context(struct gs_tx_context *txc)
/* Get a tx context by id.
*/
-static struct gs_tx_context *gs_get_tx_context(struct gs_can *dev, unsigned int id)
+static struct gs_tx_context *gs_get_tx_context(struct gs_can *dev,
+ unsigned int id)
{
unsigned long flags;
@@ -457,7 +471,8 @@ static void gs_usb_xmit_callback(struct urb *urb)
netif_wake_queue(netdev);
}
-static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
{
struct gs_can *dev = netdev_priv(netdev);
struct net_device_stats *stats = &dev->netdev->stats;
@@ -663,7 +678,8 @@ static int gs_can_open(struct net_device *netdev)
rc = usb_control_msg(interface_to_usbdev(dev->iface),
usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0),
GS_USB_BREQ_MODE,
- USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
+ USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_INTERFACE,
dev->channel,
0,
dm,
@@ -726,7 +742,59 @@ static const struct net_device_ops gs_usb_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
-static struct gs_can *gs_make_candev(unsigned int channel, struct usb_interface *intf)
+static int gs_usb_set_identify(struct net_device *netdev, bool do_identify)
+{
+ struct gs_can *dev = netdev_priv(netdev);
+ struct gs_identify_mode imode;
+ int rc;
+
+ if (do_identify)
+ imode.mode = GS_CAN_IDENTIFY_ON;
+ else
+ imode.mode = GS_CAN_IDENTIFY_OFF;
+
+ rc = usb_control_msg(interface_to_usbdev(dev->iface),
+ usb_sndctrlpipe(interface_to_usbdev(dev->iface),
+ 0),
+ GS_USB_BREQ_IDENTIFY,
+ USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_INTERFACE,
+ dev->channel,
+ 0,
+ &imode,
+ sizeof(imode),
+ 100);
+
+ return (rc > 0) ? 0 : rc;
+}
+
+/* blink LED's for finding the this interface */
+static int gs_usb_set_phys_id(struct net_device *dev,
+ enum ethtool_phys_id_state state)
+{
+ int rc = 0;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ rc = gs_usb_set_identify(dev, GS_CAN_IDENTIFY_ON);
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ rc = gs_usb_set_identify(dev, GS_CAN_IDENTIFY_OFF);
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+static const struct ethtool_ops gs_usb_ethtool_ops = {
+ .set_phys_id = gs_usb_set_phys_id,
+};
+
+static struct gs_can *gs_make_candev(unsigned int channel,
+ struct usb_interface *intf,
+ struct gs_device_config *dconf)
{
struct gs_can *dev;
struct net_device *netdev;
@@ -814,10 +882,14 @@ static struct gs_can *gs_make_candev(unsigned int channel, struct usb_interface
if (bt_const->feature & GS_CAN_FEATURE_ONE_SHOT)
dev->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
- kfree(bt_const);
-
SET_NETDEV_DEV(netdev, &intf->dev);
+ if (dconf->sw_version > 1)
+ if (bt_const->feature & GS_CAN_FEATURE_IDENTIFY)
+ netdev->ethtool_ops = &gs_usb_ethtool_ops;
+
+ kfree(bt_const);
+
rc = register_candev(dev->netdev);
if (rc) {
free_candev(dev->netdev);
@@ -835,19 +907,16 @@ static void gs_destroy_candev(struct gs_can *dev)
free_candev(dev->netdev);
}
-static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
+static int gs_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
{
struct gs_usb *dev;
int rc = -ENOMEM;
unsigned int icount, i;
- struct gs_host_config *hconf;
- struct gs_device_config *dconf;
-
- hconf = kmalloc(sizeof(*hconf), GFP_KERNEL);
- if (!hconf)
- return -ENOMEM;
-
- hconf->byte_order = 0x0000beef;
+ struct gs_host_config hconf = {
+ .byte_order = 0x0000beef,
+ };
+ struct gs_device_config dconf;
/* send host config */
rc = usb_control_msg(interface_to_usbdev(intf),
@@ -856,22 +925,16 @@ static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *
USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
1,
intf->altsetting[0].desc.bInterfaceNumber,
- hconf,
- sizeof(*hconf),
+ &hconf,
+ sizeof(hconf),
1000);
- kfree(hconf);
-
if (rc < 0) {
dev_err(&intf->dev, "Couldn't send data format (err=%d)\n",
rc);
return rc;
}
- dconf = kmalloc(sizeof(*dconf), GFP_KERNEL);
- if (!dconf)
- return -ENOMEM;
-
/* read device config */
rc = usb_control_msg(interface_to_usbdev(intf),
usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
@@ -879,22 +942,16 @@ static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *
USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
1,
intf->altsetting[0].desc.bInterfaceNumber,
- dconf,
- sizeof(*dconf),
+ &dconf,
+ sizeof(dconf),
1000);
if (rc < 0) {
dev_err(&intf->dev, "Couldn't get device config: (err=%d)\n",
rc);
-
- kfree(dconf);
-
return rc;
}
- icount = dconf->icount+1;
-
- kfree(dconf);
-
+ icount = dconf.icount + 1;
dev_info(&intf->dev, "Configuring for %d interfaces\n", icount);
if (icount > GS_MAX_INTF) {
@@ -915,7 +972,7 @@ static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *
dev->udev = interface_to_usbdev(intf);
for (i = 0; i < icount; i++) {
- dev->canch[i] = gs_make_candev(i, intf);
+ dev->canch[i] = gs_make_candev(i, intf, &dconf);
if (IS_ERR_OR_NULL(dev->canch[i])) {
/* save error code to return later */
rc = PTR_ERR(dev->canch[i]);
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 200663c43..8f4544394 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -9,14 +9,6 @@ config NET_DSA_MV88E6060
This enables support for the Marvell 88E6060 ethernet switch
chip.
-config NET_DSA_MV88E6XXX
- tristate "Marvell 88E6xxx Ethernet switch chip support"
- depends on NET_DSA
- select NET_DSA_TAG_EDSA
- ---help---
- This enables support for most of the Marvell 88E6xxx models of
- Ethernet switch chips, except 88E6060.
-
config NET_DSA_BCM_SF2
tristate "Broadcom Starfighter 2 Ethernet switch support"
depends on HAS_IOMEM && NET_DSA
@@ -28,4 +20,8 @@ config NET_DSA_BCM_SF2
This enables support for the Broadcom Starfighter 2 Ethernet
switch chips.
+source "drivers/net/dsa/b53/Kconfig"
+
+source "drivers/net/dsa/mv88e6xxx/Kconfig"
+
endmenu
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index 76b751dd9..ca1e71b85 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -1,3 +1,5 @@
obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
-obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx.o
obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm_sf2.o
+
+obj-y += b53/
+obj-y += mv88e6xxx/
diff --git a/drivers/net/dsa/b53/Kconfig b/drivers/net/dsa/b53/Kconfig
new file mode 100644
index 000000000..27f32a50d
--- /dev/null
+++ b/drivers/net/dsa/b53/Kconfig
@@ -0,0 +1,33 @@
+menuconfig B53
+ tristate "Broadcom BCM53xx managed switch support"
+ depends on NET_DSA
+ help
+ This driver adds support for Broadcom managed switch chips. It supports
+ BCM5325E, BCM5365, BCM539x, BCM53115 and BCM53125 as well as BCM63XX
+ integrated switches.
+
+config B53_SPI_DRIVER
+ tristate "B53 SPI connected switch driver"
+ depends on B53 && SPI
+ help
+ Select to enable support for registering switches configured through SPI.
+
+config B53_MDIO_DRIVER
+ tristate "B53 MDIO connected switch driver"
+ depends on B53
+ help
+ Select to enable support for registering switches configured through MDIO.
+
+config B53_MMAP_DRIVER
+ tristate "B53 MMAP connected switch driver"
+ depends on B53 && HAS_IOMEM
+ help
+ Select to enable support for memory-mapped switches like the BCM63XX
+ integrated switches.
+
+config B53_SRAB_DRIVER
+ tristate "B53 SRAB connected switch driver"
+ depends on B53 && HAS_IOMEM
+ help
+ Select to enable support for memory-mapped Switch Register Access
+ Bridge Registers (SRAB) like it is found on the BCM53010
diff --git a/drivers/net/dsa/b53/Makefile b/drivers/net/dsa/b53/Makefile
new file mode 100644
index 000000000..7e6f9a8bf
--- /dev/null
+++ b/drivers/net/dsa/b53/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_B53) += b53_common.o
+
+obj-$(CONFIG_B53_SPI_DRIVER) += b53_spi.o
+obj-$(CONFIG_B53_MDIO_DRIVER) += b53_mdio.o
+obj-$(CONFIG_B53_MMAP_DRIVER) += b53_mmap.o
+obj-$(CONFIG_B53_SRAB_DRIVER) += b53_srab.o
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
new file mode 100644
index 000000000..bda37d336
--- /dev/null
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -0,0 +1,1799 @@
+/*
+ * B53 switch driver main logic
+ *
+ * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org>
+ * Copyright (C) 2016 Florian Fainelli <f.fainelli@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/gpio.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_data/b53.h>
+#include <linux/phy.h>
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <net/dsa.h>
+#include <net/switchdev.h>
+
+#include "b53_regs.h"
+#include "b53_priv.h"
+
+struct b53_mib_desc {
+ u8 size;
+ u8 offset;
+ const char *name;
+};
+
+/* BCM5365 MIB counters */
+static const struct b53_mib_desc b53_mibs_65[] = {
+ { 8, 0x00, "TxOctets" },
+ { 4, 0x08, "TxDropPkts" },
+ { 4, 0x10, "TxBroadcastPkts" },
+ { 4, 0x14, "TxMulticastPkts" },
+ { 4, 0x18, "TxUnicastPkts" },
+ { 4, 0x1c, "TxCollisions" },
+ { 4, 0x20, "TxSingleCollision" },
+ { 4, 0x24, "TxMultipleCollision" },
+ { 4, 0x28, "TxDeferredTransmit" },
+ { 4, 0x2c, "TxLateCollision" },
+ { 4, 0x30, "TxExcessiveCollision" },
+ { 4, 0x38, "TxPausePkts" },
+ { 8, 0x44, "RxOctets" },
+ { 4, 0x4c, "RxUndersizePkts" },
+ { 4, 0x50, "RxPausePkts" },
+ { 4, 0x54, "Pkts64Octets" },
+ { 4, 0x58, "Pkts65to127Octets" },
+ { 4, 0x5c, "Pkts128to255Octets" },
+ { 4, 0x60, "Pkts256to511Octets" },
+ { 4, 0x64, "Pkts512to1023Octets" },
+ { 4, 0x68, "Pkts1024to1522Octets" },
+ { 4, 0x6c, "RxOversizePkts" },
+ { 4, 0x70, "RxJabbers" },
+ { 4, 0x74, "RxAlignmentErrors" },
+ { 4, 0x78, "RxFCSErrors" },
+ { 8, 0x7c, "RxGoodOctets" },
+ { 4, 0x84, "RxDropPkts" },
+ { 4, 0x88, "RxUnicastPkts" },
+ { 4, 0x8c, "RxMulticastPkts" },
+ { 4, 0x90, "RxBroadcastPkts" },
+ { 4, 0x94, "RxSAChanges" },
+ { 4, 0x98, "RxFragments" },
+};
+
+#define B53_MIBS_65_SIZE ARRAY_SIZE(b53_mibs_65)
+
+/* BCM63xx MIB counters */
+static const struct b53_mib_desc b53_mibs_63xx[] = {
+ { 8, 0x00, "TxOctets" },
+ { 4, 0x08, "TxDropPkts" },
+ { 4, 0x0c, "TxQoSPkts" },
+ { 4, 0x10, "TxBroadcastPkts" },
+ { 4, 0x14, "TxMulticastPkts" },
+ { 4, 0x18, "TxUnicastPkts" },
+ { 4, 0x1c, "TxCollisions" },
+ { 4, 0x20, "TxSingleCollision" },
+ { 4, 0x24, "TxMultipleCollision" },
+ { 4, 0x28, "TxDeferredTransmit" },
+ { 4, 0x2c, "TxLateCollision" },
+ { 4, 0x30, "TxExcessiveCollision" },
+ { 4, 0x38, "TxPausePkts" },
+ { 8, 0x3c, "TxQoSOctets" },
+ { 8, 0x44, "RxOctets" },
+ { 4, 0x4c, "RxUndersizePkts" },
+ { 4, 0x50, "RxPausePkts" },
+ { 4, 0x54, "Pkts64Octets" },
+ { 4, 0x58, "Pkts65to127Octets" },
+ { 4, 0x5c, "Pkts128to255Octets" },
+ { 4, 0x60, "Pkts256to511Octets" },
+ { 4, 0x64, "Pkts512to1023Octets" },
+ { 4, 0x68, "Pkts1024to1522Octets" },
+ { 4, 0x6c, "RxOversizePkts" },
+ { 4, 0x70, "RxJabbers" },
+ { 4, 0x74, "RxAlignmentErrors" },
+ { 4, 0x78, "RxFCSErrors" },
+ { 8, 0x7c, "RxGoodOctets" },
+ { 4, 0x84, "RxDropPkts" },
+ { 4, 0x88, "RxUnicastPkts" },
+ { 4, 0x8c, "RxMulticastPkts" },
+ { 4, 0x90, "RxBroadcastPkts" },
+ { 4, 0x94, "RxSAChanges" },
+ { 4, 0x98, "RxFragments" },
+ { 4, 0xa0, "RxSymbolErrors" },
+ { 4, 0xa4, "RxQoSPkts" },
+ { 8, 0xa8, "RxQoSOctets" },
+ { 4, 0xb0, "Pkts1523to2047Octets" },
+ { 4, 0xb4, "Pkts2048to4095Octets" },
+ { 4, 0xb8, "Pkts4096to8191Octets" },
+ { 4, 0xbc, "Pkts8192to9728Octets" },
+ { 4, 0xc0, "RxDiscarded" },
+};
+
+#define B53_MIBS_63XX_SIZE ARRAY_SIZE(b53_mibs_63xx)
+
+/* MIB counters */
+static const struct b53_mib_desc b53_mibs[] = {
+ { 8, 0x00, "TxOctets" },
+ { 4, 0x08, "TxDropPkts" },
+ { 4, 0x10, "TxBroadcastPkts" },
+ { 4, 0x14, "TxMulticastPkts" },
+ { 4, 0x18, "TxUnicastPkts" },
+ { 4, 0x1c, "TxCollisions" },
+ { 4, 0x20, "TxSingleCollision" },
+ { 4, 0x24, "TxMultipleCollision" },
+ { 4, 0x28, "TxDeferredTransmit" },
+ { 4, 0x2c, "TxLateCollision" },
+ { 4, 0x30, "TxExcessiveCollision" },
+ { 4, 0x38, "TxPausePkts" },
+ { 8, 0x50, "RxOctets" },
+ { 4, 0x58, "RxUndersizePkts" },
+ { 4, 0x5c, "RxPausePkts" },
+ { 4, 0x60, "Pkts64Octets" },
+ { 4, 0x64, "Pkts65to127Octets" },
+ { 4, 0x68, "Pkts128to255Octets" },
+ { 4, 0x6c, "Pkts256to511Octets" },
+ { 4, 0x70, "Pkts512to1023Octets" },
+ { 4, 0x74, "Pkts1024to1522Octets" },
+ { 4, 0x78, "RxOversizePkts" },
+ { 4, 0x7c, "RxJabbers" },
+ { 4, 0x80, "RxAlignmentErrors" },
+ { 4, 0x84, "RxFCSErrors" },
+ { 8, 0x88, "RxGoodOctets" },
+ { 4, 0x90, "RxDropPkts" },
+ { 4, 0x94, "RxUnicastPkts" },
+ { 4, 0x98, "RxMulticastPkts" },
+ { 4, 0x9c, "RxBroadcastPkts" },
+ { 4, 0xa0, "RxSAChanges" },
+ { 4, 0xa4, "RxFragments" },
+ { 4, 0xa8, "RxJumboPkts" },
+ { 4, 0xac, "RxSymbolErrors" },
+ { 4, 0xc0, "RxDiscarded" },
+};
+
+#define B53_MIBS_SIZE ARRAY_SIZE(b53_mibs)
+
+static int b53_do_vlan_op(struct b53_device *dev, u8 op)
+{
+ unsigned int i;
+
+ b53_write8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], VTA_START_CMD | op);
+
+ for (i = 0; i < 10; i++) {
+ u8 vta;
+
+ b53_read8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], &vta);
+ if (!(vta & VTA_START_CMD))
+ return 0;
+
+ usleep_range(100, 200);
+ }
+
+ return -EIO;
+}
+
+static void b53_set_vlan_entry(struct b53_device *dev, u16 vid,
+ struct b53_vlan *vlan)
+{
+ if (is5325(dev)) {
+ u32 entry = 0;
+
+ if (vlan->members) {
+ entry = ((vlan->untag & VA_UNTAG_MASK_25) <<
+ VA_UNTAG_S_25) | vlan->members;
+ if (dev->core_rev >= 3)
+ entry |= VA_VALID_25_R4 | vid << VA_VID_HIGH_S;
+ else
+ entry |= VA_VALID_25;
+ }
+
+ b53_write32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, entry);
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid |
+ VTA_RW_STATE_WR | VTA_RW_OP_EN);
+ } else if (is5365(dev)) {
+ u16 entry = 0;
+
+ if (vlan->members)
+ entry = ((vlan->untag & VA_UNTAG_MASK_65) <<
+ VA_UNTAG_S_65) | vlan->members | VA_VALID_65;
+
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, entry);
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid |
+ VTA_RW_STATE_WR | VTA_RW_OP_EN);
+ } else {
+ b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid);
+ b53_write32(dev, B53_ARLIO_PAGE, dev->vta_regs[2],
+ (vlan->untag << VTE_UNTAG_S) | vlan->members);
+
+ b53_do_vlan_op(dev, VTA_CMD_WRITE);
+ }
+
+ dev_dbg(dev->ds->dev, "VID: %d, members: 0x%04x, untag: 0x%04x\n",
+ vid, vlan->members, vlan->untag);
+}
+
+static void b53_get_vlan_entry(struct b53_device *dev, u16 vid,
+ struct b53_vlan *vlan)
+{
+ if (is5325(dev)) {
+ u32 entry = 0;
+
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid |
+ VTA_RW_STATE_RD | VTA_RW_OP_EN);
+ b53_read32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, &entry);
+
+ if (dev->core_rev >= 3)
+ vlan->valid = !!(entry & VA_VALID_25_R4);
+ else
+ vlan->valid = !!(entry & VA_VALID_25);
+ vlan->members = entry & VA_MEMBER_MASK;
+ vlan->untag = (entry >> VA_UNTAG_S_25) & VA_UNTAG_MASK_25;
+
+ } else if (is5365(dev)) {
+ u16 entry = 0;
+
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid |
+ VTA_RW_STATE_WR | VTA_RW_OP_EN);
+ b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, &entry);
+
+ vlan->valid = !!(entry & VA_VALID_65);
+ vlan->members = entry & VA_MEMBER_MASK;
+ vlan->untag = (entry >> VA_UNTAG_S_65) & VA_UNTAG_MASK_65;
+ } else {
+ u32 entry = 0;
+
+ b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid);
+ b53_do_vlan_op(dev, VTA_CMD_READ);
+ b53_read32(dev, B53_ARLIO_PAGE, dev->vta_regs[2], &entry);
+ vlan->members = entry & VTE_MEMBERS;
+ vlan->untag = (entry >> VTE_UNTAG_S) & VTE_MEMBERS;
+ vlan->valid = true;
+ }
+}
+
+static void b53_set_forwarding(struct b53_device *dev, int enable)
+{
+ u8 mgmt;
+
+ b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
+
+ if (enable)
+ mgmt |= SM_SW_FWD_EN;
+ else
+ mgmt &= ~SM_SW_FWD_EN;
+
+ b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
+}
+
+static void b53_enable_vlan(struct b53_device *dev, bool enable)
+{
+ u8 mgmt, vc0, vc1, vc4 = 0, vc5;
+
+ b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
+ b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, &vc0);
+ b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, &vc1);
+
+ if (is5325(dev) || is5365(dev)) {
+ b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4);
+ b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, &vc5);
+ } else if (is63xx(dev)) {
+ b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, &vc4);
+ b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, &vc5);
+ } else {
+ b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, &vc4);
+ b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5);
+ }
+
+ mgmt &= ~SM_SW_FWD_MODE;
+
+ if (enable) {
+ vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID;
+ vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN;
+ vc4 &= ~VC4_ING_VID_CHECK_MASK;
+ vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S;
+ vc5 |= VC5_DROP_VTABLE_MISS;
+
+ if (is5325(dev))
+ vc0 &= ~VC0_RESERVED_1;
+
+ if (is5325(dev) || is5365(dev))
+ vc1 |= VC1_RX_MCST_TAG_EN;
+
+ } else {
+ vc0 &= ~(VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID);
+ vc1 &= ~(VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN);
+ vc4 &= ~VC4_ING_VID_CHECK_MASK;
+ vc5 &= ~VC5_DROP_VTABLE_MISS;
+
+ if (is5325(dev) || is5365(dev))
+ vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S;
+ else
+ vc4 |= VC4_ING_VID_VIO_TO_IMP << VC4_ING_VID_CHECK_S;
+
+ if (is5325(dev) || is5365(dev))
+ vc1 &= ~VC1_RX_MCST_TAG_EN;
+ }
+
+ if (!is5325(dev) && !is5365(dev))
+ vc5 &= ~VC5_VID_FFF_EN;
+
+ b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, vc0);
+ b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, vc1);
+
+ if (is5325(dev) || is5365(dev)) {
+ /* enable the high 8 bit vid check on 5325 */
+ if (is5325(dev) && enable)
+ b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3,
+ VC3_HIGH_8BIT_EN);
+ else
+ b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0);
+
+ b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, vc4);
+ b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, vc5);
+ } else if (is63xx(dev)) {
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3_63XX, 0);
+ b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, vc4);
+ b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, vc5);
+ } else {
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0);
+ b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, vc4);
+ b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, vc5);
+ }
+
+ b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
+}
+
+static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100)
+{
+ u32 port_mask = 0;
+ u16 max_size = JMS_MIN_SIZE;
+
+ if (is5325(dev) || is5365(dev))
+ return -EINVAL;
+
+ if (enable) {
+ port_mask = dev->enabled_ports;
+ max_size = JMS_MAX_SIZE;
+ if (allow_10_100)
+ port_mask |= JPM_10_100_JUMBO_EN;
+ }
+
+ b53_write32(dev, B53_JUMBO_PAGE, dev->jumbo_pm_reg, port_mask);
+ return b53_write16(dev, B53_JUMBO_PAGE, dev->jumbo_size_reg, max_size);
+}
+
+static int b53_flush_arl(struct b53_device *dev, u8 mask)
+{
+ unsigned int i;
+
+ b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL,
+ FAST_AGE_DONE | FAST_AGE_DYNAMIC | mask);
+
+ for (i = 0; i < 10; i++) {
+ u8 fast_age_ctrl;
+
+ b53_read8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL,
+ &fast_age_ctrl);
+
+ if (!(fast_age_ctrl & FAST_AGE_DONE))
+ goto out;
+
+ msleep(1);
+ }
+
+ return -ETIMEDOUT;
+out:
+ /* Only age dynamic entries (default behavior) */
+ b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, FAST_AGE_DYNAMIC);
+ return 0;
+}
+
+static int b53_fast_age_port(struct b53_device *dev, int port)
+{
+ b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_PORT_CTRL, port);
+
+ return b53_flush_arl(dev, FAST_AGE_PORT);
+}
+
+static int b53_fast_age_vlan(struct b53_device *dev, u16 vid)
+{
+ b53_write16(dev, B53_CTRL_PAGE, B53_FAST_AGE_VID_CTRL, vid);
+
+ return b53_flush_arl(dev, FAST_AGE_VLAN);
+}
+
+static void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
+{
+ struct b53_device *dev = ds_to_priv(ds);
+ unsigned int i;
+ u16 pvlan;
+
+ /* Enable the IMP port to be in the same VLAN as the other ports
+ * on a per-port basis such that we only have Port i and IMP in
+ * the same VLAN.
+ */
+ b53_for_each_port(dev, i) {
+ b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &pvlan);
+ pvlan |= BIT(cpu_port);
+ b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), pvlan);
+ }
+}
+
+static int b53_enable_port(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct b53_device *dev = ds_to_priv(ds);
+ unsigned int cpu_port = dev->cpu_port;
+ u16 pvlan;
+
+ /* Clear the Rx and Tx disable bits and set to no spanning tree */
+ b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), 0);
+
+ /* Set this port, and only this one to be in the default VLAN,
+ * if member of a bridge, restore its membership prior to
+ * bringing down this port.
+ */
+ b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
+ pvlan &= ~0x1ff;
+ pvlan |= BIT(port);
+ pvlan |= dev->ports[port].vlan_ctl_mask;
+ b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
+
+ b53_imp_vlan_setup(ds, cpu_port);
+
+ return 0;
+}
+
+static void b53_disable_port(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct b53_device *dev = ds_to_priv(ds);
+ u8 reg;
+
+ /* Disable Tx/Rx for the port */
+ b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), &reg);
+ reg |= PORT_CTRL_RX_DISABLE | PORT_CTRL_TX_DISABLE;
+ b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg);
+}
+
+static void b53_enable_cpu_port(struct b53_device *dev)
+{
+ unsigned int cpu_port = dev->cpu_port;
+ u8 port_ctrl;
+
+ /* BCM5325 CPU port is at 8 */
+ if ((is5325(dev) || is5365(dev)) && cpu_port == B53_CPU_PORT_25)
+ cpu_port = B53_CPU_PORT;
+
+ port_ctrl = PORT_CTRL_RX_BCST_EN |
+ PORT_CTRL_RX_MCST_EN |
+ PORT_CTRL_RX_UCST_EN;
+ b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(cpu_port), port_ctrl);
+}
+
+static void b53_enable_mib(struct b53_device *dev)
+{
+ u8 gc;
+
+ b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc);
+ gc &= ~(GC_RESET_MIB | GC_MIB_AC_EN);
+ b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc);
+}
+
+static int b53_configure_vlan(struct b53_device *dev)
+{
+ struct b53_vlan vl = { 0 };
+ int i;
+
+ /* clear all vlan entries */
+ if (is5325(dev) || is5365(dev)) {
+ for (i = 1; i < dev->num_vlans; i++)
+ b53_set_vlan_entry(dev, i, &vl);
+ } else {
+ b53_do_vlan_op(dev, VTA_CMD_CLEAR);
+ }
+
+ b53_enable_vlan(dev, false);
+
+ b53_for_each_port(dev, i)
+ b53_write16(dev, B53_VLAN_PAGE,
+ B53_VLAN_PORT_DEF_TAG(i), 1);
+
+ if (!is5325(dev) && !is5365(dev))
+ b53_set_jumbo(dev, dev->enable_jumbo, false);
+
+ return 0;
+}
+
+static void b53_switch_reset_gpio(struct b53_device *dev)
+{
+ int gpio = dev->reset_gpio;
+
+ if (gpio < 0)
+ return;
+
+ /* Reset sequence: RESET low(50ms)->high(20ms)
+ */
+ gpio_set_value(gpio, 0);
+ mdelay(50);
+
+ gpio_set_value(gpio, 1);
+ mdelay(20);
+
+ dev->current_page = 0xff;
+}
+
+static int b53_switch_reset(struct b53_device *dev)
+{
+ u8 mgmt;
+
+ b53_switch_reset_gpio(dev);
+
+ if (is539x(dev)) {
+ b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x83);
+ b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x00);
+ }
+
+ b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
+
+ if (!(mgmt & SM_SW_FWD_EN)) {
+ mgmt &= ~SM_SW_FWD_MODE;
+ mgmt |= SM_SW_FWD_EN;
+
+ b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
+ b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
+
+ if (!(mgmt & SM_SW_FWD_EN)) {
+ dev_err(dev->dev, "Failed to enable switch!\n");
+ return -EINVAL;
+ }
+ }
+
+ b53_enable_mib(dev);
+
+ return b53_flush_arl(dev, FAST_AGE_STATIC);
+}
+
+static int b53_phy_read16(struct dsa_switch *ds, int addr, int reg)
+{
+ struct b53_device *priv = ds_to_priv(ds);
+ u16 value = 0;
+ int ret;
+
+ if (priv->ops->phy_read16)
+ ret = priv->ops->phy_read16(priv, addr, reg, &value);
+ else
+ ret = b53_read16(priv, B53_PORT_MII_PAGE(addr),
+ reg * 2, &value);
+
+ return ret ? ret : value;
+}
+
+static int b53_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
+{
+ struct b53_device *priv = ds_to_priv(ds);
+
+ if (priv->ops->phy_write16)
+ return priv->ops->phy_write16(priv, addr, reg, val);
+
+ return b53_write16(priv, B53_PORT_MII_PAGE(addr), reg * 2, val);
+}
+
+static int b53_reset_switch(struct b53_device *priv)
+{
+ /* reset vlans */
+ priv->enable_jumbo = false;
+
+ memset(priv->vlans, 0, sizeof(*priv->vlans) * priv->num_vlans);
+ memset(priv->ports, 0, sizeof(*priv->ports) * priv->num_ports);
+
+ return b53_switch_reset(priv);
+}
+
+static int b53_apply_config(struct b53_device *priv)
+{
+ /* disable switching */
+ b53_set_forwarding(priv, 0);
+
+ b53_configure_vlan(priv);
+
+ /* enable switching */
+ b53_set_forwarding(priv, 1);
+
+ return 0;
+}
+
+static void b53_reset_mib(struct b53_device *priv)
+{
+ u8 gc;
+
+ b53_read8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc);
+
+ b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc | GC_RESET_MIB);
+ msleep(1);
+ b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc & ~GC_RESET_MIB);
+ msleep(1);
+}
+
+static const struct b53_mib_desc *b53_get_mib(struct b53_device *dev)
+{
+ if (is5365(dev))
+ return b53_mibs_65;
+ else if (is63xx(dev))
+ return b53_mibs_63xx;
+ else
+ return b53_mibs;
+}
+
+static unsigned int b53_get_mib_size(struct b53_device *dev)
+{
+ if (is5365(dev))
+ return B53_MIBS_65_SIZE;
+ else if (is63xx(dev))
+ return B53_MIBS_63XX_SIZE;
+ else
+ return B53_MIBS_SIZE;
+}
+
+static void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
+{
+ struct b53_device *dev = ds_to_priv(ds);
+ const struct b53_mib_desc *mibs = b53_get_mib(dev);
+ unsigned int mib_size = b53_get_mib_size(dev);
+ unsigned int i;
+
+ for (i = 0; i < mib_size; i++)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ mibs[i].name, ETH_GSTRING_LEN);
+}
+
+static void b53_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ struct b53_device *dev = ds_to_priv(ds);
+ const struct b53_mib_desc *mibs = b53_get_mib(dev);
+ unsigned int mib_size = b53_get_mib_size(dev);
+ const struct b53_mib_desc *s;
+ unsigned int i;
+ u64 val = 0;
+
+ if (is5365(dev) && port == 5)
+ port = 8;
+
+ mutex_lock(&dev->stats_mutex);
+
+ for (i = 0; i < mib_size; i++) {
+ s = &mibs[i];
+
+ if (s->size == 8) {
+ b53_read64(dev, B53_MIB_PAGE(port), s->offset, &val);
+ } else {
+ u32 val32;
+
+ b53_read32(dev, B53_MIB_PAGE(port), s->offset,
+ &val32);
+ val = val32;
+ }
+ data[i] = (u64)val;
+ }
+
+ mutex_unlock(&dev->stats_mutex);
+}
+
+static int b53_get_sset_count(struct dsa_switch *ds)
+{
+ struct b53_device *dev = ds_to_priv(ds);
+
+ return b53_get_mib_size(dev);
+}
+
+static int b53_set_addr(struct dsa_switch *ds, u8 *addr)
+{
+ return 0;
+}
+
+static int b53_setup(struct dsa_switch *ds)
+{
+ struct b53_device *dev = ds_to_priv(ds);
+ unsigned int port;
+ int ret;
+
+ ret = b53_reset_switch(dev);
+ if (ret) {
+ dev_err(ds->dev, "failed to reset switch\n");
+ return ret;
+ }
+
+ b53_reset_mib(dev);
+
+ ret = b53_apply_config(dev);
+ if (ret)
+ dev_err(ds->dev, "failed to apply configuration\n");
+
+ for (port = 0; port < dev->num_ports; port++) {
+ if (BIT(port) & ds->enabled_port_mask)
+ b53_enable_port(ds, port, NULL);
+ else if (dsa_is_cpu_port(ds, port))
+ b53_enable_cpu_port(dev);
+ else
+ b53_disable_port(ds, port, NULL);
+ }
+
+ return ret;
+}
+
+static void b53_adjust_link(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
+{
+ struct b53_device *dev = ds_to_priv(ds);
+ u8 rgmii_ctrl = 0, reg = 0, off;
+
+ if (!phy_is_pseudo_fixed_link(phydev))
+ return;
+
+ /* Override the port settings */
+ if (port == dev->cpu_port) {
+ off = B53_PORT_OVERRIDE_CTRL;
+ reg = PORT_OVERRIDE_EN;
+ } else {
+ off = B53_GMII_PORT_OVERRIDE_CTRL(port);
+ reg = GMII_PO_EN;
+ }
+
+ /* Set the link UP */
+ if (phydev->link)
+ reg |= PORT_OVERRIDE_LINK;
+
+ if (phydev->duplex == DUPLEX_FULL)
+ reg |= PORT_OVERRIDE_FULL_DUPLEX;
+
+ switch (phydev->speed) {
+ case 2000:
+ reg |= PORT_OVERRIDE_SPEED_2000M;
+ /* fallthrough */
+ case SPEED_1000:
+ reg |= PORT_OVERRIDE_SPEED_1000M;
+ break;
+ case SPEED_100:
+ reg |= PORT_OVERRIDE_SPEED_100M;
+ break;
+ case SPEED_10:
+ reg |= PORT_OVERRIDE_SPEED_10M;
+ break;
+ default:
+ dev_err(ds->dev, "unknown speed: %d\n", phydev->speed);
+ return;
+ }
+
+ /* Enable flow control on BCM5301x's CPU port */
+ if (is5301x(dev) && port == dev->cpu_port)
+ reg |= PORT_OVERRIDE_RX_FLOW | PORT_OVERRIDE_TX_FLOW;
+
+ if (phydev->pause) {
+ if (phydev->asym_pause)
+ reg |= PORT_OVERRIDE_TX_FLOW;
+ reg |= PORT_OVERRIDE_RX_FLOW;
+ }
+
+ b53_write8(dev, B53_CTRL_PAGE, off, reg);
+
+ if (is531x5(dev) && phy_interface_is_rgmii(phydev)) {
+ if (port == 8)
+ off = B53_RGMII_CTRL_IMP;
+ else
+ off = B53_RGMII_CTRL_P(port);
+
+ /* Configure the port RGMII clock delay by DLL disabled and
+ * tx_clk aligned timing (restoring to reset defaults)
+ */
+ b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl);
+ rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC |
+ RGMII_CTRL_TIMING_SEL);
+
+ /* PHY_INTERFACE_MODE_RGMII_TXID means TX internal delay, make
+ * sure that we enable the port TX clock internal delay to
+ * account for this internal delay that is inserted, otherwise
+ * the switch won't be able to receive correctly.
+ *
+ * PHY_INTERFACE_MODE_RGMII means that we are not introducing
+ * any delay neither on transmission nor reception, so the
+ * BCM53125 must also be configured accordingly to account for
+ * the lack of delay and introduce
+ *
+ * The BCM53125 switch has its RX clock and TX clock control
+ * swapped, hence the reason why we modify the TX clock path in
+ * the "RGMII" case
+ */
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
+ rgmii_ctrl |= RGMII_CTRL_DLL_TXC;
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
+ rgmii_ctrl |= RGMII_CTRL_DLL_TXC | RGMII_CTRL_DLL_RXC;
+ rgmii_ctrl |= RGMII_CTRL_TIMING_SEL;
+ b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl);
+
+ dev_info(ds->dev, "Configured port %d for %s\n", port,
+ phy_modes(phydev->interface));
+ }
+
+ /* configure MII port if necessary */
+ if (is5325(dev)) {
+ b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
+ &reg);
+
+ /* reverse mii needs to be enabled */
+ if (!(reg & PORT_OVERRIDE_RV_MII_25)) {
+ b53_write8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
+ reg | PORT_OVERRIDE_RV_MII_25);
+ b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
+ &reg);
+
+ if (!(reg & PORT_OVERRIDE_RV_MII_25)) {
+ dev_err(ds->dev,
+ "Failed to enable reverse MII mode\n");
+ return;
+ }
+ }
+ } else if (is5301x(dev)) {
+ if (port != dev->cpu_port) {
+ u8 po_reg = B53_GMII_PORT_OVERRIDE_CTRL(dev->cpu_port);
+ u8 gmii_po;
+
+ b53_read8(dev, B53_CTRL_PAGE, po_reg, &gmii_po);
+ gmii_po |= GMII_PO_LINK |
+ GMII_PO_RX_FLOW |
+ GMII_PO_TX_FLOW |
+ GMII_PO_EN |
+ GMII_PO_SPEED_2000M;
+ b53_write8(dev, B53_CTRL_PAGE, po_reg, gmii_po);
+ }
+ }
+}
+
+static int b53_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering)
+{
+ return 0;
+}
+
+static int b53_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
+{
+ struct b53_device *dev = ds_to_priv(ds);
+
+ if ((is5325(dev) || is5365(dev)) && vlan->vid_begin == 0)
+ return -EOPNOTSUPP;
+
+ if (vlan->vid_end > dev->num_vlans)
+ return -ERANGE;
+
+ b53_enable_vlan(dev, true);
+
+ return 0;
+}
+
+static void b53_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
+{
+ struct b53_device *dev = ds_to_priv(ds);
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ unsigned int cpu_port = dev->cpu_port;
+ struct b53_vlan *vl;
+ u16 vid;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+ vl = &dev->vlans[vid];
+
+ b53_get_vlan_entry(dev, vid, vl);
+
+ vl->members |= BIT(port) | BIT(cpu_port);
+ if (untagged)
+ vl->untag |= BIT(port) | BIT(cpu_port);
+ else
+ vl->untag &= ~(BIT(port) | BIT(cpu_port));
+
+ b53_set_vlan_entry(dev, vid, vl);
+ b53_fast_age_vlan(dev, vid);
+ }
+
+ if (pvid) {
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
+ vlan->vid_end);
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(cpu_port),
+ vlan->vid_end);
+ b53_fast_age_vlan(dev, vid);
+ }
+}
+
+static int b53_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct b53_device *dev = ds_to_priv(ds);
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ unsigned int cpu_port = dev->cpu_port;
+ struct b53_vlan *vl;
+ u16 vid;
+ u16 pvid;
+
+ b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+ vl = &dev->vlans[vid];
+
+ b53_get_vlan_entry(dev, vid, vl);
+
+ vl->members &= ~BIT(port);
+ if ((vl->members & BIT(cpu_port)) == BIT(cpu_port))
+ vl->members = 0;
+
+ if (pvid == vid) {
+ if (is5325(dev) || is5365(dev))
+ pvid = 1;
+ else
+ pvid = 0;
+ }
+
+ if (untagged) {
+ vl->untag &= ~(BIT(port));
+ if ((vl->untag & BIT(cpu_port)) == BIT(cpu_port))
+ vl->untag = 0;
+ }
+
+ b53_set_vlan_entry(dev, vid, vl);
+ b53_fast_age_vlan(dev, vid);
+ }
+
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid);
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(cpu_port), pvid);
+ b53_fast_age_vlan(dev, pvid);
+
+ return 0;
+}
+
+static int b53_vlan_dump(struct dsa_switch *ds, int port,
+ struct switchdev_obj_port_vlan *vlan,
+ int (*cb)(struct switchdev_obj *obj))
+{
+ struct b53_device *dev = ds_to_priv(ds);
+ u16 vid, vid_start = 0, pvid;
+ struct b53_vlan *vl;
+ int err = 0;
+
+ if (is5325(dev) || is5365(dev))
+ vid_start = 1;
+
+ b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
+
+ /* Use our software cache for dumps, since we do not have any HW
+ * operation returning only the used/valid VLANs
+ */
+ for (vid = vid_start; vid < dev->num_vlans; vid++) {
+ vl = &dev->vlans[vid];
+
+ if (!vl->valid)
+ continue;
+
+ if (!(vl->members & BIT(port)))
+ continue;
+
+ vlan->vid_begin = vlan->vid_end = vid;
+ vlan->flags = 0;
+
+ if (vl->untag & BIT(port))
+ vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
+ if (pvid == vid)
+ vlan->flags |= BRIDGE_VLAN_INFO_PVID;
+
+ err = cb(&vlan->obj);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+/* Address Resolution Logic routines */
+static int b53_arl_op_wait(struct b53_device *dev)
+{
+ unsigned int timeout = 10;
+ u8 reg;
+
+ do {
+ b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, &reg);
+ if (!(reg & ARLTBL_START_DONE))
+ return 0;
+
+ usleep_range(1000, 2000);
+ } while (timeout--);
+
+ dev_warn(dev->dev, "timeout waiting for ARL to finish: 0x%02x\n", reg);
+
+ return -ETIMEDOUT;
+}
+
+static int b53_arl_rw_op(struct b53_device *dev, unsigned int op)
+{
+ u8 reg;
+
+ if (op > ARLTBL_RW)
+ return -EINVAL;
+
+ b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, &reg);
+ reg |= ARLTBL_START_DONE;
+ if (op)
+ reg |= ARLTBL_RW;
+ else
+ reg &= ~ARLTBL_RW;
+ b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, reg);
+
+ return b53_arl_op_wait(dev);
+}
+
+static int b53_arl_read(struct b53_device *dev, u64 mac,
+ u16 vid, struct b53_arl_entry *ent, u8 *idx,
+ bool is_valid)
+{
+ unsigned int i;
+ int ret;
+
+ ret = b53_arl_op_wait(dev);
+ if (ret)
+ return ret;
+
+ /* Read the bins */
+ for (i = 0; i < dev->num_arl_entries; i++) {
+ u64 mac_vid;
+ u32 fwd_entry;
+
+ b53_read64(dev, B53_ARLIO_PAGE,
+ B53_ARLTBL_MAC_VID_ENTRY(i), &mac_vid);
+ b53_read32(dev, B53_ARLIO_PAGE,
+ B53_ARLTBL_DATA_ENTRY(i), &fwd_entry);
+ b53_arl_to_entry(ent, mac_vid, fwd_entry);
+
+ if (!(fwd_entry & ARLTBL_VALID))
+ continue;
+ if ((mac_vid & ARLTBL_MAC_MASK) != mac)
+ continue;
+ *idx = i;
+ }
+
+ return -ENOENT;
+}
+
+static int b53_arl_op(struct b53_device *dev, int op, int port,
+ const unsigned char *addr, u16 vid, bool is_valid)
+{
+ struct b53_arl_entry ent;
+ u32 fwd_entry;
+ u64 mac, mac_vid = 0;
+ u8 idx = 0;
+ int ret;
+
+ /* Convert the array into a 64-bit MAC */
+ mac = b53_mac_to_u64(addr);
+
+ /* Perform a read for the given MAC and VID */
+ b53_write48(dev, B53_ARLIO_PAGE, B53_MAC_ADDR_IDX, mac);
+ b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid);
+
+ /* Issue a read operation for this MAC */
+ ret = b53_arl_rw_op(dev, 1);
+ if (ret)
+ return ret;
+
+ ret = b53_arl_read(dev, mac, vid, &ent, &idx, is_valid);
+ /* If this is a read, just finish now */
+ if (op)
+ return ret;
+
+ /* We could not find a matching MAC, so reset to a new entry */
+ if (ret) {
+ fwd_entry = 0;
+ idx = 1;
+ }
+
+ memset(&ent, 0, sizeof(ent));
+ ent.port = port;
+ ent.is_valid = is_valid;
+ ent.vid = vid;
+ ent.is_static = true;
+ memcpy(ent.mac, addr, ETH_ALEN);
+ b53_arl_from_entry(&mac_vid, &fwd_entry, &ent);
+
+ b53_write64(dev, B53_ARLIO_PAGE,
+ B53_ARLTBL_MAC_VID_ENTRY(idx), mac_vid);
+ b53_write32(dev, B53_ARLIO_PAGE,
+ B53_ARLTBL_DATA_ENTRY(idx), fwd_entry);
+
+ return b53_arl_rw_op(dev, 0);
+}
+
+static int b53_fdb_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_fdb *fdb,
+ struct switchdev_trans *trans)
+{
+ struct b53_device *priv = ds_to_priv(ds);
+
+ /* 5325 and 5365 require some more massaging, but could
+ * be supported eventually
+ */
+ if (is5325(priv) || is5365(priv))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static void b53_fdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_fdb *fdb,
+ struct switchdev_trans *trans)
+{
+ struct b53_device *priv = ds_to_priv(ds);
+
+ if (b53_arl_op(priv, 0, port, fdb->addr, fdb->vid, true))
+ pr_err("%s: failed to add MAC address\n", __func__);
+}
+
+static int b53_fdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_fdb *fdb)
+{
+ struct b53_device *priv = ds_to_priv(ds);
+
+ return b53_arl_op(priv, 0, port, fdb->addr, fdb->vid, false);
+}
+
+static int b53_arl_search_wait(struct b53_device *dev)
+{
+ unsigned int timeout = 1000;
+ u8 reg;
+
+ do {
+ b53_read8(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, &reg);
+ if (!(reg & ARL_SRCH_STDN))
+ return 0;
+
+ if (reg & ARL_SRCH_VLID)
+ return 0;
+
+ usleep_range(1000, 2000);
+ } while (timeout--);
+
+ return -ETIMEDOUT;
+}
+
+static void b53_arl_search_rd(struct b53_device *dev, u8 idx,
+ struct b53_arl_entry *ent)
+{
+ u64 mac_vid;
+ u32 fwd_entry;
+
+ b53_read64(dev, B53_ARLIO_PAGE,
+ B53_ARL_SRCH_RSTL_MACVID(idx), &mac_vid);
+ b53_read32(dev, B53_ARLIO_PAGE,
+ B53_ARL_SRCH_RSTL(idx), &fwd_entry);
+ b53_arl_to_entry(ent, mac_vid, fwd_entry);
+}
+
+static int b53_fdb_copy(struct net_device *dev, int port,
+ const struct b53_arl_entry *ent,
+ struct switchdev_obj_port_fdb *fdb,
+ int (*cb)(struct switchdev_obj *obj))
+{
+ if (!ent->is_valid)
+ return 0;
+
+ if (port != ent->port)
+ return 0;
+
+ ether_addr_copy(fdb->addr, ent->mac);
+ fdb->vid = ent->vid;
+ fdb->ndm_state = ent->is_static ? NUD_NOARP : NUD_REACHABLE;
+
+ return cb(&fdb->obj);
+}
+
+static int b53_fdb_dump(struct dsa_switch *ds, int port,
+ struct switchdev_obj_port_fdb *fdb,
+ int (*cb)(struct switchdev_obj *obj))
+{
+ struct b53_device *priv = ds_to_priv(ds);
+ struct net_device *dev = ds->ports[port].netdev;
+ struct b53_arl_entry results[2];
+ unsigned int count = 0;
+ int ret;
+ u8 reg;
+
+ /* Start search operation */
+ reg = ARL_SRCH_STDN;
+ b53_write8(priv, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, reg);
+
+ do {
+ ret = b53_arl_search_wait(priv);
+ if (ret)
+ return ret;
+
+ b53_arl_search_rd(priv, 0, &results[0]);
+ ret = b53_fdb_copy(dev, port, &results[0], fdb, cb);
+ if (ret)
+ return ret;
+
+ if (priv->num_arl_entries > 2) {
+ b53_arl_search_rd(priv, 1, &results[1]);
+ ret = b53_fdb_copy(dev, port, &results[1], fdb, cb);
+ if (ret)
+ return ret;
+
+ if (!results[0].is_valid && !results[1].is_valid)
+ break;
+ }
+
+ } while (count++ < 1024);
+
+ return 0;
+}
+
+static int b53_br_join(struct dsa_switch *ds, int port,
+ struct net_device *bridge)
+{
+ struct b53_device *dev = ds_to_priv(ds);
+ u16 pvlan, reg;
+ unsigned int i;
+
+ dev->ports[port].bridge_dev = bridge;
+ b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
+
+ b53_for_each_port(dev, i) {
+ if (dev->ports[i].bridge_dev != bridge)
+ continue;
+
+ /* Add this local port to the remote port VLAN control
+ * membership and update the remote port bitmask
+ */
+ b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &reg);
+ reg |= BIT(port);
+ b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg);
+ dev->ports[i].vlan_ctl_mask = reg;
+
+ pvlan |= BIT(i);
+ }
+
+ /* Configure the local port VLAN control membership to include
+ * remote ports and update the local port bitmask
+ */
+ b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
+ dev->ports[port].vlan_ctl_mask = pvlan;
+
+ return 0;
+}
+
+static void b53_br_leave(struct dsa_switch *ds, int port)
+{
+ struct b53_device *dev = ds_to_priv(ds);
+ struct net_device *bridge = dev->ports[port].bridge_dev;
+ struct b53_vlan *vl = &dev->vlans[0];
+ unsigned int i;
+ u16 pvlan, reg, pvid;
+
+ b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
+
+ b53_for_each_port(dev, i) {
+ /* Don't touch the remaining ports */
+ if (dev->ports[i].bridge_dev != bridge)
+ continue;
+
+ b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &reg);
+ reg &= ~BIT(port);
+ b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg);
+ dev->ports[port].vlan_ctl_mask = reg;
+
+ /* Prevent self removal to preserve isolation */
+ if (port != i)
+ pvlan &= ~BIT(i);
+ }
+
+ b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
+ dev->ports[port].vlan_ctl_mask = pvlan;
+ dev->ports[port].bridge_dev = NULL;
+
+ if (is5325(dev) || is5365(dev))
+ pvid = 1;
+ else
+ pvid = 0;
+
+ b53_get_vlan_entry(dev, pvid, vl);
+ vl->members |= BIT(port) | BIT(dev->cpu_port);
+ vl->untag |= BIT(port) | BIT(dev->cpu_port);
+ b53_set_vlan_entry(dev, pvid, vl);
+}
+
+static void b53_br_set_stp_state(struct dsa_switch *ds, int port,
+ u8 state)
+{
+ struct b53_device *dev = ds_to_priv(ds);
+ u8 hw_state, cur_hw_state;
+ u8 reg;
+
+ b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), &reg);
+ cur_hw_state = reg & PORT_CTRL_STP_STATE_MASK;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ hw_state = PORT_CTRL_DIS_STATE;
+ break;
+ case BR_STATE_LISTENING:
+ hw_state = PORT_CTRL_LISTEN_STATE;
+ break;
+ case BR_STATE_LEARNING:
+ hw_state = PORT_CTRL_LEARN_STATE;
+ break;
+ case BR_STATE_FORWARDING:
+ hw_state = PORT_CTRL_FWD_STATE;
+ break;
+ case BR_STATE_BLOCKING:
+ hw_state = PORT_CTRL_BLOCK_STATE;
+ break;
+ default:
+ dev_err(ds->dev, "invalid STP state: %d\n", state);
+ return;
+ }
+
+ /* Fast-age ARL entries if we are moving a port from Learning or
+ * Forwarding (cur_hw_state) state to Disabled, Blocking or Listening
+ * state (hw_state)
+ */
+ if (cur_hw_state != hw_state) {
+ if (cur_hw_state >= PORT_CTRL_LEARN_STATE &&
+ hw_state <= PORT_CTRL_LISTEN_STATE) {
+ if (b53_fast_age_port(dev, port)) {
+ dev_err(ds->dev, "fast ageing failed\n");
+ return;
+ }
+ }
+ }
+
+ b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), &reg);
+ reg &= ~PORT_CTRL_STP_STATE_MASK;
+ reg |= hw_state;
+ b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg);
+}
+
+static struct dsa_switch_driver b53_switch_ops = {
+ .tag_protocol = DSA_TAG_PROTO_NONE,
+ .setup = b53_setup,
+ .set_addr = b53_set_addr,
+ .get_strings = b53_get_strings,
+ .get_ethtool_stats = b53_get_ethtool_stats,
+ .get_sset_count = b53_get_sset_count,
+ .phy_read = b53_phy_read16,
+ .phy_write = b53_phy_write16,
+ .adjust_link = b53_adjust_link,
+ .port_enable = b53_enable_port,
+ .port_disable = b53_disable_port,
+ .port_bridge_join = b53_br_join,
+ .port_bridge_leave = b53_br_leave,
+ .port_stp_state_set = b53_br_set_stp_state,
+ .port_vlan_filtering = b53_vlan_filtering,
+ .port_vlan_prepare = b53_vlan_prepare,
+ .port_vlan_add = b53_vlan_add,
+ .port_vlan_del = b53_vlan_del,
+ .port_vlan_dump = b53_vlan_dump,
+ .port_fdb_prepare = b53_fdb_prepare,
+ .port_fdb_dump = b53_fdb_dump,
+ .port_fdb_add = b53_fdb_add,
+ .port_fdb_del = b53_fdb_del,
+};
+
+struct b53_chip_data {
+ u32 chip_id;
+ const char *dev_name;
+ u16 vlans;
+ u16 enabled_ports;
+ u8 cpu_port;
+ u8 vta_regs[3];
+ u8 arl_entries;
+ u8 duplex_reg;
+ u8 jumbo_pm_reg;
+ u8 jumbo_size_reg;
+};
+
+#define B53_VTA_REGS \
+ { B53_VT_ACCESS, B53_VT_INDEX, B53_VT_ENTRY }
+#define B53_VTA_REGS_9798 \
+ { B53_VT_ACCESS_9798, B53_VT_INDEX_9798, B53_VT_ENTRY_9798 }
+#define B53_VTA_REGS_63XX \
+ { B53_VT_ACCESS_63XX, B53_VT_INDEX_63XX, B53_VT_ENTRY_63XX }
+
+static const struct b53_chip_data b53_switch_chips[] = {
+ {
+ .chip_id = BCM5325_DEVICE_ID,
+ .dev_name = "BCM5325",
+ .vlans = 16,
+ .enabled_ports = 0x1f,
+ .arl_entries = 2,
+ .cpu_port = B53_CPU_PORT_25,
+ .duplex_reg = B53_DUPLEX_STAT_FE,
+ },
+ {
+ .chip_id = BCM5365_DEVICE_ID,
+ .dev_name = "BCM5365",
+ .vlans = 256,
+ .enabled_ports = 0x1f,
+ .arl_entries = 2,
+ .cpu_port = B53_CPU_PORT_25,
+ .duplex_reg = B53_DUPLEX_STAT_FE,
+ },
+ {
+ .chip_id = BCM5395_DEVICE_ID,
+ .dev_name = "BCM5395",
+ .vlans = 4096,
+ .enabled_ports = 0x1f,
+ .arl_entries = 4,
+ .cpu_port = B53_CPU_PORT,
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM5397_DEVICE_ID,
+ .dev_name = "BCM5397",
+ .vlans = 4096,
+ .enabled_ports = 0x1f,
+ .arl_entries = 4,
+ .cpu_port = B53_CPU_PORT,
+ .vta_regs = B53_VTA_REGS_9798,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM5398_DEVICE_ID,
+ .dev_name = "BCM5398",
+ .vlans = 4096,
+ .enabled_ports = 0x7f,
+ .arl_entries = 4,
+ .cpu_port = B53_CPU_PORT,
+ .vta_regs = B53_VTA_REGS_9798,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM53115_DEVICE_ID,
+ .dev_name = "BCM53115",
+ .vlans = 4096,
+ .enabled_ports = 0x1f,
+ .arl_entries = 4,
+ .vta_regs = B53_VTA_REGS,
+ .cpu_port = B53_CPU_PORT,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM53125_DEVICE_ID,
+ .dev_name = "BCM53125",
+ .vlans = 4096,
+ .enabled_ports = 0xff,
+ .cpu_port = B53_CPU_PORT,
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM53128_DEVICE_ID,
+ .dev_name = "BCM53128",
+ .vlans = 4096,
+ .enabled_ports = 0x1ff,
+ .arl_entries = 4,
+ .cpu_port = B53_CPU_PORT,
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM63XX_DEVICE_ID,
+ .dev_name = "BCM63xx",
+ .vlans = 4096,
+ .enabled_ports = 0, /* pdata must provide them */
+ .arl_entries = 4,
+ .cpu_port = B53_CPU_PORT,
+ .vta_regs = B53_VTA_REGS_63XX,
+ .duplex_reg = B53_DUPLEX_STAT_63XX,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX,
+ },
+ {
+ .chip_id = BCM53010_DEVICE_ID,
+ .dev_name = "BCM53010",
+ .vlans = 4096,
+ .enabled_ports = 0x1f,
+ .arl_entries = 4,
+ .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM53011_DEVICE_ID,
+ .dev_name = "BCM53011",
+ .vlans = 4096,
+ .enabled_ports = 0x1bf,
+ .arl_entries = 4,
+ .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM53012_DEVICE_ID,
+ .dev_name = "BCM53012",
+ .vlans = 4096,
+ .enabled_ports = 0x1bf,
+ .arl_entries = 4,
+ .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM53018_DEVICE_ID,
+ .dev_name = "BCM53018",
+ .vlans = 4096,
+ .enabled_ports = 0x1f,
+ .arl_entries = 4,
+ .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM53019_DEVICE_ID,
+ .dev_name = "BCM53019",
+ .vlans = 4096,
+ .enabled_ports = 0x1f,
+ .arl_entries = 4,
+ .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
+ .chip_id = BCM58XX_DEVICE_ID,
+ .dev_name = "BCM585xx/586xx/88312",
+ .vlans = 4096,
+ .enabled_ports = 0x1ff,
+ .arl_entries = 4,
+ .cpu_port = B53_CPU_PORT_25,
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+};
+
+static int b53_switch_init(struct b53_device *dev)
+{
+ struct dsa_switch *ds = dev->ds;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(b53_switch_chips); i++) {
+ const struct b53_chip_data *chip = &b53_switch_chips[i];
+
+ if (chip->chip_id == dev->chip_id) {
+ if (!dev->enabled_ports)
+ dev->enabled_ports = chip->enabled_ports;
+ dev->name = chip->dev_name;
+ dev->duplex_reg = chip->duplex_reg;
+ dev->vta_regs[0] = chip->vta_regs[0];
+ dev->vta_regs[1] = chip->vta_regs[1];
+ dev->vta_regs[2] = chip->vta_regs[2];
+ dev->jumbo_pm_reg = chip->jumbo_pm_reg;
+ ds->drv = &b53_switch_ops;
+ dev->cpu_port = chip->cpu_port;
+ dev->num_vlans = chip->vlans;
+ dev->num_arl_entries = chip->arl_entries;
+ break;
+ }
+ }
+
+ /* check which BCM5325x version we have */
+ if (is5325(dev)) {
+ u8 vc4;
+
+ b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4);
+
+ /* check reserved bits */
+ switch (vc4 & 3) {
+ case 1:
+ /* BCM5325E */
+ break;
+ case 3:
+ /* BCM5325F - do not use port 4 */
+ dev->enabled_ports &= ~BIT(4);
+ break;
+ default:
+/* On the BCM47XX SoCs this is the supported internal switch.*/
+#ifndef CONFIG_BCM47XX
+ /* BCM5325M */
+ return -EINVAL;
+#else
+ break;
+#endif
+ }
+ } else if (dev->chip_id == BCM53115_DEVICE_ID) {
+ u64 strap_value;
+
+ b53_read48(dev, B53_STAT_PAGE, B53_STRAP_VALUE, &strap_value);
+ /* use second IMP port if GMII is enabled */
+ if (strap_value & SV_GMII_CTRL_115)
+ dev->cpu_port = 5;
+ }
+
+ /* cpu port is always last */
+ dev->num_ports = dev->cpu_port + 1;
+ dev->enabled_ports |= BIT(dev->cpu_port);
+
+ dev->ports = devm_kzalloc(dev->dev,
+ sizeof(struct b53_port) * dev->num_ports,
+ GFP_KERNEL);
+ if (!dev->ports)
+ return -ENOMEM;
+
+ dev->vlans = devm_kzalloc(dev->dev,
+ sizeof(struct b53_vlan) * dev->num_vlans,
+ GFP_KERNEL);
+ if (!dev->vlans)
+ return -ENOMEM;
+
+ dev->reset_gpio = b53_switch_get_reset_gpio(dev);
+ if (dev->reset_gpio >= 0) {
+ ret = devm_gpio_request_one(dev->dev, dev->reset_gpio,
+ GPIOF_OUT_INIT_HIGH, "robo_reset");
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+struct b53_device *b53_switch_alloc(struct device *base, struct b53_io_ops *ops,
+ void *priv)
+{
+ struct dsa_switch *ds;
+ struct b53_device *dev;
+
+ ds = devm_kzalloc(base, sizeof(*ds) + sizeof(*dev), GFP_KERNEL);
+ if (!ds)
+ return NULL;
+
+ dev = (struct b53_device *)(ds + 1);
+
+ ds->priv = dev;
+ ds->dev = base;
+ dev->dev = base;
+
+ dev->ds = ds;
+ dev->priv = priv;
+ dev->ops = ops;
+ mutex_init(&dev->reg_mutex);
+ mutex_init(&dev->stats_mutex);
+
+ return dev;
+}
+EXPORT_SYMBOL(b53_switch_alloc);
+
+int b53_switch_detect(struct b53_device *dev)
+{
+ u32 id32;
+ u16 tmp;
+ u8 id8;
+ int ret;
+
+ ret = b53_read8(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id8);
+ if (ret)
+ return ret;
+
+ switch (id8) {
+ case 0:
+ /* BCM5325 and BCM5365 do not have this register so reads
+ * return 0. But the read operation did succeed, so assume this
+ * is one of them.
+ *
+ * Next check if we can write to the 5325's VTA register; for
+ * 5365 it is read only.
+ */
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, 0xf);
+ b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, &tmp);
+
+ if (tmp == 0xf)
+ dev->chip_id = BCM5325_DEVICE_ID;
+ else
+ dev->chip_id = BCM5365_DEVICE_ID;
+ break;
+ case BCM5395_DEVICE_ID:
+ case BCM5397_DEVICE_ID:
+ case BCM5398_DEVICE_ID:
+ dev->chip_id = id8;
+ break;
+ default:
+ ret = b53_read32(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id32);
+ if (ret)
+ return ret;
+
+ switch (id32) {
+ case BCM53115_DEVICE_ID:
+ case BCM53125_DEVICE_ID:
+ case BCM53128_DEVICE_ID:
+ case BCM53010_DEVICE_ID:
+ case BCM53011_DEVICE_ID:
+ case BCM53012_DEVICE_ID:
+ case BCM53018_DEVICE_ID:
+ case BCM53019_DEVICE_ID:
+ dev->chip_id = id32;
+ break;
+ default:
+ pr_err("unsupported switch detected (BCM53%02x/BCM%x)\n",
+ id8, id32);
+ return -ENODEV;
+ }
+ }
+
+ if (dev->chip_id == BCM5325_DEVICE_ID)
+ return b53_read8(dev, B53_STAT_PAGE, B53_REV_ID_25,
+ &dev->core_rev);
+ else
+ return b53_read8(dev, B53_MGMT_PAGE, B53_REV_ID,
+ &dev->core_rev);
+}
+EXPORT_SYMBOL(b53_switch_detect);
+
+int b53_switch_register(struct b53_device *dev)
+{
+ int ret;
+
+ if (dev->pdata) {
+ dev->chip_id = dev->pdata->chip_id;
+ dev->enabled_ports = dev->pdata->enabled_ports;
+ }
+
+ if (!dev->chip_id && b53_switch_detect(dev))
+ return -EINVAL;
+
+ ret = b53_switch_init(dev);
+ if (ret)
+ return ret;
+
+ pr_info("found switch: %s, rev %i\n", dev->name, dev->core_rev);
+
+ return dsa_register_switch(dev->ds, dev->ds->dev->of_node);
+}
+EXPORT_SYMBOL(b53_switch_register);
+
+MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>");
+MODULE_DESCRIPTION("B53 switch library");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/dsa/b53/b53_mdio.c b/drivers/net/dsa/b53/b53_mdio.c
new file mode 100644
index 000000000..aa87c3fff
--- /dev/null
+++ b/drivers/net/dsa/b53/b53_mdio.c
@@ -0,0 +1,392 @@
+/*
+ * B53 register access through MII registers
+ *
+ * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/phy.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/brcmphy.h>
+#include <linux/rtnetlink.h>
+#include <net/dsa.h>
+
+#include "b53_priv.h"
+
+/* MII registers */
+#define REG_MII_PAGE 0x10 /* MII Page register */
+#define REG_MII_ADDR 0x11 /* MII Address register */
+#define REG_MII_DATA0 0x18 /* MII Data register 0 */
+#define REG_MII_DATA1 0x19 /* MII Data register 1 */
+#define REG_MII_DATA2 0x1a /* MII Data register 2 */
+#define REG_MII_DATA3 0x1b /* MII Data register 3 */
+
+#define REG_MII_PAGE_ENABLE BIT(0)
+#define REG_MII_ADDR_WRITE BIT(0)
+#define REG_MII_ADDR_READ BIT(1)
+
+static int b53_mdio_op(struct b53_device *dev, u8 page, u8 reg, u16 op)
+{
+ int i;
+ u16 v;
+ int ret;
+ struct mii_bus *bus = dev->priv;
+
+ if (dev->current_page != page) {
+ /* set page number */
+ v = (page << 8) | REG_MII_PAGE_ENABLE;
+ ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_PAGE, v);
+ if (ret)
+ return ret;
+ dev->current_page = page;
+ }
+
+ /* set register address */
+ v = (reg << 8) | op;
+ ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR, REG_MII_ADDR, v);
+ if (ret)
+ return ret;
+
+ /* check if operation completed */
+ for (i = 0; i < 5; ++i) {
+ v = mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_ADDR);
+ if (!(v & (REG_MII_ADDR_WRITE | REG_MII_ADDR_READ)))
+ break;
+ usleep_range(10, 100);
+ }
+
+ if (WARN_ON(i == 5))
+ return -EIO;
+
+ return 0;
+}
+
+static int b53_mdio_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val)
+{
+ struct mii_bus *bus = dev->priv;
+ int ret;
+
+ ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ);
+ if (ret)
+ return ret;
+
+ *val = mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_DATA0) & 0xff;
+
+ return 0;
+}
+
+static int b53_mdio_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val)
+{
+ struct mii_bus *bus = dev->priv;
+ int ret;
+
+ ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ);
+ if (ret)
+ return ret;
+
+ *val = mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR, REG_MII_DATA0);
+
+ return 0;
+}
+
+static int b53_mdio_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val)
+{
+ struct mii_bus *bus = dev->priv;
+ int ret;
+
+ ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ);
+ if (ret)
+ return ret;
+
+ *val = mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR, REG_MII_DATA0);
+ *val |= mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_DATA1) << 16;
+
+ return 0;
+}
+
+static int b53_mdio_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val)
+{
+ struct mii_bus *bus = dev->priv;
+ u64 temp = 0;
+ int i;
+ int ret;
+
+ ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ);
+ if (ret)
+ return ret;
+
+ for (i = 2; i >= 0; i--) {
+ temp <<= 16;
+ temp |= mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_DATA0 + i);
+ }
+
+ *val = temp;
+
+ return 0;
+}
+
+static int b53_mdio_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val)
+{
+ struct mii_bus *bus = dev->priv;
+ u64 temp = 0;
+ int i;
+ int ret;
+
+ ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ);
+ if (ret)
+ return ret;
+
+ for (i = 3; i >= 0; i--) {
+ temp <<= 16;
+ temp |= mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_DATA0 + i);
+ }
+
+ *val = temp;
+
+ return 0;
+}
+
+static int b53_mdio_write8(struct b53_device *dev, u8 page, u8 reg, u8 value)
+{
+ struct mii_bus *bus = dev->priv;
+ int ret;
+
+ ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_DATA0, value);
+ if (ret)
+ return ret;
+
+ return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE);
+}
+
+static int b53_mdio_write16(struct b53_device *dev, u8 page, u8 reg,
+ u16 value)
+{
+ struct mii_bus *bus = dev->priv;
+ int ret;
+
+ ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_DATA0, value);
+ if (ret)
+ return ret;
+
+ return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE);
+}
+
+static int b53_mdio_write32(struct b53_device *dev, u8 page, u8 reg,
+ u32 value)
+{
+ struct mii_bus *bus = dev->priv;
+ unsigned int i;
+ u32 temp = value;
+
+ for (i = 0; i < 2; i++) {
+ int ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_DATA0 + i,
+ temp & 0xffff);
+ if (ret)
+ return ret;
+ temp >>= 16;
+ }
+
+ return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE);
+}
+
+static int b53_mdio_write48(struct b53_device *dev, u8 page, u8 reg,
+ u64 value)
+{
+ struct mii_bus *bus = dev->priv;
+ unsigned int i;
+ u64 temp = value;
+
+ for (i = 0; i < 3; i++) {
+ int ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_DATA0 + i,
+ temp & 0xffff);
+ if (ret)
+ return ret;
+ temp >>= 16;
+ }
+
+ return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE);
+}
+
+static int b53_mdio_write64(struct b53_device *dev, u8 page, u8 reg,
+ u64 value)
+{
+ struct mii_bus *bus = dev->priv;
+ unsigned int i;
+ u64 temp = value;
+
+ for (i = 0; i < 4; i++) {
+ int ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR,
+ REG_MII_DATA0 + i,
+ temp & 0xffff);
+ if (ret)
+ return ret;
+ temp >>= 16;
+ }
+
+ return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE);
+}
+
+static int b53_mdio_phy_read16(struct b53_device *dev, int addr, int reg,
+ u16 *value)
+{
+ struct mii_bus *bus = dev->priv;
+
+ *value = mdiobus_read_nested(bus, addr, reg);
+
+ return 0;
+}
+
+static int b53_mdio_phy_write16(struct b53_device *dev, int addr, int reg,
+ u16 value)
+{
+ struct mii_bus *bus = dev->bus;
+
+ return mdiobus_write_nested(bus, addr, reg, value);
+}
+
+static struct b53_io_ops b53_mdio_ops = {
+ .read8 = b53_mdio_read8,
+ .read16 = b53_mdio_read16,
+ .read32 = b53_mdio_read32,
+ .read48 = b53_mdio_read48,
+ .read64 = b53_mdio_read64,
+ .write8 = b53_mdio_write8,
+ .write16 = b53_mdio_write16,
+ .write32 = b53_mdio_write32,
+ .write48 = b53_mdio_write48,
+ .write64 = b53_mdio_write64,
+ .phy_read16 = b53_mdio_phy_read16,
+ .phy_write16 = b53_mdio_phy_write16,
+};
+
+#define B53_BRCM_OUI_1 0x0143bc00
+#define B53_BRCM_OUI_2 0x03625c00
+#define B53_BRCM_OUI_3 0x00406000
+
+static int b53_mdio_probe(struct mdio_device *mdiodev)
+{
+ struct b53_device *dev;
+ u32 phy_id;
+ int ret;
+
+ /* allow the generic PHY driver to take over the non-management MDIO
+ * addresses
+ */
+ if (mdiodev->addr != BRCM_PSEUDO_PHY_ADDR && mdiodev->addr != 0) {
+ dev_err(&mdiodev->dev, "leaving address %d to PHY\n",
+ mdiodev->addr);
+ return -ENODEV;
+ }
+
+ /* read the first port's id */
+ phy_id = mdiobus_read(mdiodev->bus, 0, 2) << 16;
+ phy_id |= mdiobus_read(mdiodev->bus, 0, 3);
+
+ /* BCM5325, BCM539x (OUI_1)
+ * BCM53125, BCM53128 (OUI_2)
+ * BCM5365 (OUI_3)
+ */
+ if ((phy_id & 0xfffffc00) != B53_BRCM_OUI_1 &&
+ (phy_id & 0xfffffc00) != B53_BRCM_OUI_2 &&
+ (phy_id & 0xfffffc00) != B53_BRCM_OUI_3) {
+ dev_err(&mdiodev->dev, "Unsupported device: 0x%08x\n", phy_id);
+ return -ENODEV;
+ }
+
+ /* First probe will come from SWITCH_MDIO controller on the 7445D0
+ * switch, which will conflict with the 7445 integrated switch
+ * pseudo-phy (we end-up programming both). In that case, we return
+ * -EPROBE_DEFER for the first time we get here, and wait until we come
+ * back with the slave MDIO bus which has the correct indirection
+ * layer setup
+ */
+ if (of_machine_is_compatible("brcm,bcm7445d0") &&
+ strcmp(mdiodev->bus->name, "sf2 slave mii"))
+ return -EPROBE_DEFER;
+
+ dev = b53_switch_alloc(&mdiodev->dev, &b53_mdio_ops, mdiodev->bus);
+ if (!dev)
+ return -ENOMEM;
+
+ /* we don't use page 0xff, so force a page set */
+ dev->current_page = 0xff;
+ dev->bus = mdiodev->bus;
+
+ dev_set_drvdata(&mdiodev->dev, dev);
+
+ ret = b53_switch_register(dev);
+ if (ret) {
+ dev_err(&mdiodev->dev, "failed to register switch: %i\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static void b53_mdio_remove(struct mdio_device *mdiodev)
+{
+ struct b53_device *dev = dev_get_drvdata(&mdiodev->dev);
+ struct dsa_switch *ds = dev->ds;
+
+ dsa_unregister_switch(ds);
+}
+
+static const struct of_device_id b53_of_match[] = {
+ { .compatible = "brcm,bcm5325" },
+ { .compatible = "brcm,bcm53115" },
+ { .compatible = "brcm,bcm53125" },
+ { .compatible = "brcm,bcm53128" },
+ { .compatible = "brcm,bcm5365" },
+ { .compatible = "brcm,bcm5395" },
+ { .compatible = "brcm,bcm5397" },
+ { .compatible = "brcm,bcm5398" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, b53_of_match);
+
+static struct mdio_driver b53_mdio_driver = {
+ .probe = b53_mdio_probe,
+ .remove = b53_mdio_remove,
+ .mdiodrv.driver = {
+ .name = "bcm53xx",
+ .of_match_table = b53_of_match,
+ },
+};
+
+static int __init b53_mdio_driver_register(void)
+{
+ return mdio_driver_register(&b53_mdio_driver);
+}
+module_init(b53_mdio_driver_register);
+
+static void __exit b53_mdio_driver_unregister(void)
+{
+ mdio_driver_unregister(&b53_mdio_driver);
+}
+module_exit(b53_mdio_driver_unregister);
+
+MODULE_DESCRIPTION("B53 MDIO access driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c
new file mode 100644
index 000000000..77ffc4312
--- /dev/null
+++ b/drivers/net/dsa/b53/b53_mmap.c
@@ -0,0 +1,273 @@
+/*
+ * B53 register access through memory mapped registers
+ *
+ * Copyright (C) 2012-2013 Jonas Gorski <jogo@openwrt.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kconfig.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/b53.h>
+
+#include "b53_priv.h"
+
+struct b53_mmap_priv {
+ void __iomem *regs;
+};
+
+static int b53_mmap_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val)
+{
+ u8 __iomem *regs = dev->priv;
+
+ *val = readb(regs + (page << 8) + reg);
+
+ return 0;
+}
+
+static int b53_mmap_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val)
+{
+ u8 __iomem *regs = dev->priv;
+
+ if (WARN_ON(reg % 2))
+ return -EINVAL;
+
+ if (dev->pdata && dev->pdata->big_endian)
+ *val = ioread16be(regs + (page << 8) + reg);
+ else
+ *val = readw(regs + (page << 8) + reg);
+
+ return 0;
+}
+
+static int b53_mmap_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val)
+{
+ u8 __iomem *regs = dev->priv;
+
+ if (WARN_ON(reg % 4))
+ return -EINVAL;
+
+ if (dev->pdata && dev->pdata->big_endian)
+ *val = ioread32be(regs + (page << 8) + reg);
+ else
+ *val = readl(regs + (page << 8) + reg);
+
+ return 0;
+}
+
+static int b53_mmap_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val)
+{
+ u8 __iomem *regs = dev->priv;
+
+ if (WARN_ON(reg % 2))
+ return -EINVAL;
+
+ if (reg % 4) {
+ u16 lo;
+ u32 hi;
+
+ if (dev->pdata && dev->pdata->big_endian) {
+ lo = ioread16be(regs + (page << 8) + reg);
+ hi = ioread32be(regs + (page << 8) + reg + 2);
+ } else {
+ lo = readw(regs + (page << 8) + reg);
+ hi = readl(regs + (page << 8) + reg + 2);
+ }
+
+ *val = ((u64)hi << 16) | lo;
+ } else {
+ u32 lo;
+ u16 hi;
+
+ if (dev->pdata && dev->pdata->big_endian) {
+ lo = ioread32be(regs + (page << 8) + reg);
+ hi = ioread16be(regs + (page << 8) + reg + 4);
+ } else {
+ lo = readl(regs + (page << 8) + reg);
+ hi = readw(regs + (page << 8) + reg + 4);
+ }
+
+ *val = ((u64)hi << 32) | lo;
+ }
+
+ return 0;
+}
+
+static int b53_mmap_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val)
+{
+ u8 __iomem *regs = dev->priv;
+ u32 hi, lo;
+
+ if (WARN_ON(reg % 4))
+ return -EINVAL;
+
+ if (dev->pdata && dev->pdata->big_endian) {
+ lo = ioread32be(regs + (page << 8) + reg);
+ hi = ioread32be(regs + (page << 8) + reg + 4);
+ } else {
+ lo = readl(regs + (page << 8) + reg);
+ hi = readl(regs + (page << 8) + reg + 4);
+ }
+
+ *val = ((u64)hi << 32) | lo;
+
+ return 0;
+}
+
+static int b53_mmap_write8(struct b53_device *dev, u8 page, u8 reg, u8 value)
+{
+ u8 __iomem *regs = dev->priv;
+
+ writeb(value, regs + (page << 8) + reg);
+
+ return 0;
+}
+
+static int b53_mmap_write16(struct b53_device *dev, u8 page, u8 reg,
+ u16 value)
+{
+ u8 __iomem *regs = dev->priv;
+
+ if (WARN_ON(reg % 2))
+ return -EINVAL;
+
+ if (dev->pdata && dev->pdata->big_endian)
+ iowrite16be(value, regs + (page << 8) + reg);
+ else
+ writew(value, regs + (page << 8) + reg);
+
+ return 0;
+}
+
+static int b53_mmap_write32(struct b53_device *dev, u8 page, u8 reg,
+ u32 value)
+{
+ u8 __iomem *regs = dev->priv;
+
+ if (WARN_ON(reg % 4))
+ return -EINVAL;
+
+ if (dev->pdata && dev->pdata->big_endian)
+ iowrite32be(value, regs + (page << 8) + reg);
+ else
+ writel(value, regs + (page << 8) + reg);
+
+ return 0;
+}
+
+static int b53_mmap_write48(struct b53_device *dev, u8 page, u8 reg,
+ u64 value)
+{
+ if (WARN_ON(reg % 2))
+ return -EINVAL;
+
+ if (reg % 4) {
+ u32 hi = (u32)(value >> 16);
+ u16 lo = (u16)value;
+
+ b53_mmap_write16(dev, page, reg, lo);
+ b53_mmap_write32(dev, page, reg + 2, hi);
+ } else {
+ u16 hi = (u16)(value >> 32);
+ u32 lo = (u32)value;
+
+ b53_mmap_write32(dev, page, reg, lo);
+ b53_mmap_write16(dev, page, reg + 4, hi);
+ }
+
+ return 0;
+}
+
+static int b53_mmap_write64(struct b53_device *dev, u8 page, u8 reg,
+ u64 value)
+{
+ u32 hi, lo;
+
+ hi = upper_32_bits(value);
+ lo = lower_32_bits(value);
+
+ if (WARN_ON(reg % 4))
+ return -EINVAL;
+
+ b53_mmap_write32(dev, page, reg, lo);
+ b53_mmap_write32(dev, page, reg + 4, hi);
+
+ return 0;
+}
+
+static struct b53_io_ops b53_mmap_ops = {
+ .read8 = b53_mmap_read8,
+ .read16 = b53_mmap_read16,
+ .read32 = b53_mmap_read32,
+ .read48 = b53_mmap_read48,
+ .read64 = b53_mmap_read64,
+ .write8 = b53_mmap_write8,
+ .write16 = b53_mmap_write16,
+ .write32 = b53_mmap_write32,
+ .write48 = b53_mmap_write48,
+ .write64 = b53_mmap_write64,
+};
+
+static int b53_mmap_probe(struct platform_device *pdev)
+{
+ struct b53_platform_data *pdata = pdev->dev.platform_data;
+ struct b53_device *dev;
+
+ if (!pdata)
+ return -EINVAL;
+
+ dev = b53_switch_alloc(&pdev->dev, &b53_mmap_ops, pdata->regs);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->pdata = pdata;
+
+ platform_set_drvdata(pdev, dev);
+
+ return b53_switch_register(dev);
+}
+
+static int b53_mmap_remove(struct platform_device *pdev)
+{
+ struct b53_device *dev = platform_get_drvdata(pdev);
+
+ if (dev)
+ b53_switch_remove(dev);
+
+ return 0;
+}
+
+static const struct of_device_id b53_mmap_of_table[] = {
+ { .compatible = "brcm,bcm3384-switch" },
+ { .compatible = "brcm,bcm6328-switch" },
+ { .compatible = "brcm,bcm6368-switch" },
+ { .compatible = "brcm,bcm63xx-switch" },
+ { /* sentinel */ },
+};
+
+static struct platform_driver b53_mmap_driver = {
+ .probe = b53_mmap_probe,
+ .remove = b53_mmap_remove,
+ .driver = {
+ .name = "b53-switch",
+ .of_match_table = b53_mmap_of_table,
+ },
+};
+
+module_platform_driver(b53_mmap_driver);
+MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>");
+MODULE_DESCRIPTION("B53 MMAP access driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
new file mode 100644
index 000000000..835a744f2
--- /dev/null
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -0,0 +1,388 @@
+/*
+ * B53 common definitions
+ *
+ * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __B53_PRIV_H
+#define __B53_PRIV_H
+
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/phy.h>
+#include <net/dsa.h>
+
+#include "b53_regs.h"
+
+struct b53_device;
+struct net_device;
+
+struct b53_io_ops {
+ int (*read8)(struct b53_device *dev, u8 page, u8 reg, u8 *value);
+ int (*read16)(struct b53_device *dev, u8 page, u8 reg, u16 *value);
+ int (*read32)(struct b53_device *dev, u8 page, u8 reg, u32 *value);
+ int (*read48)(struct b53_device *dev, u8 page, u8 reg, u64 *value);
+ int (*read64)(struct b53_device *dev, u8 page, u8 reg, u64 *value);
+ int (*write8)(struct b53_device *dev, u8 page, u8 reg, u8 value);
+ int (*write16)(struct b53_device *dev, u8 page, u8 reg, u16 value);
+ int (*write32)(struct b53_device *dev, u8 page, u8 reg, u32 value);
+ int (*write48)(struct b53_device *dev, u8 page, u8 reg, u64 value);
+ int (*write64)(struct b53_device *dev, u8 page, u8 reg, u64 value);
+ int (*phy_read16)(struct b53_device *dev, int addr, int reg, u16 *value);
+ int (*phy_write16)(struct b53_device *dev, int addr, int reg, u16 value);
+};
+
+enum {
+ BCM5325_DEVICE_ID = 0x25,
+ BCM5365_DEVICE_ID = 0x65,
+ BCM5395_DEVICE_ID = 0x95,
+ BCM5397_DEVICE_ID = 0x97,
+ BCM5398_DEVICE_ID = 0x98,
+ BCM53115_DEVICE_ID = 0x53115,
+ BCM53125_DEVICE_ID = 0x53125,
+ BCM53128_DEVICE_ID = 0x53128,
+ BCM63XX_DEVICE_ID = 0x6300,
+ BCM53010_DEVICE_ID = 0x53010,
+ BCM53011_DEVICE_ID = 0x53011,
+ BCM53012_DEVICE_ID = 0x53012,
+ BCM53018_DEVICE_ID = 0x53018,
+ BCM53019_DEVICE_ID = 0x53019,
+ BCM58XX_DEVICE_ID = 0x5800,
+};
+
+#define B53_N_PORTS 9
+#define B53_N_PORTS_25 6
+
+struct b53_port {
+ u16 vlan_ctl_mask;
+ struct net_device *bridge_dev;
+};
+
+struct b53_vlan {
+ u16 members;
+ u16 untag;
+ bool valid;
+};
+
+struct b53_device {
+ struct dsa_switch *ds;
+ struct b53_platform_data *pdata;
+ const char *name;
+
+ struct mutex reg_mutex;
+ struct mutex stats_mutex;
+ const struct b53_io_ops *ops;
+
+ /* chip specific data */
+ u32 chip_id;
+ u8 core_rev;
+ u8 vta_regs[3];
+ u8 duplex_reg;
+ u8 jumbo_pm_reg;
+ u8 jumbo_size_reg;
+ int reset_gpio;
+ u8 num_arl_entries;
+
+ /* used ports mask */
+ u16 enabled_ports;
+ unsigned int cpu_port;
+
+ /* connect specific data */
+ u8 current_page;
+ struct device *dev;
+
+ /* Master MDIO bus we got probed from */
+ struct mii_bus *bus;
+
+ void *priv;
+
+ /* run time configuration */
+ bool enable_jumbo;
+
+ unsigned int num_vlans;
+ struct b53_vlan *vlans;
+ unsigned int num_ports;
+ struct b53_port *ports;
+};
+
+#define b53_for_each_port(dev, i) \
+ for (i = 0; i < B53_N_PORTS; i++) \
+ if (dev->enabled_ports & BIT(i))
+
+
+static inline int is5325(struct b53_device *dev)
+{
+ return dev->chip_id == BCM5325_DEVICE_ID;
+}
+
+static inline int is5365(struct b53_device *dev)
+{
+#ifdef CONFIG_BCM47XX
+ return dev->chip_id == BCM5365_DEVICE_ID;
+#else
+ return 0;
+#endif
+}
+
+static inline int is5397_98(struct b53_device *dev)
+{
+ return dev->chip_id == BCM5397_DEVICE_ID ||
+ dev->chip_id == BCM5398_DEVICE_ID;
+}
+
+static inline int is539x(struct b53_device *dev)
+{
+ return dev->chip_id == BCM5395_DEVICE_ID ||
+ dev->chip_id == BCM5397_DEVICE_ID ||
+ dev->chip_id == BCM5398_DEVICE_ID;
+}
+
+static inline int is531x5(struct b53_device *dev)
+{
+ return dev->chip_id == BCM53115_DEVICE_ID ||
+ dev->chip_id == BCM53125_DEVICE_ID ||
+ dev->chip_id == BCM53128_DEVICE_ID;
+}
+
+static inline int is63xx(struct b53_device *dev)
+{
+#ifdef CONFIG_BCM63XX
+ return dev->chip_id == BCM63XX_DEVICE_ID;
+#else
+ return 0;
+#endif
+}
+
+static inline int is5301x(struct b53_device *dev)
+{
+ return dev->chip_id == BCM53010_DEVICE_ID ||
+ dev->chip_id == BCM53011_DEVICE_ID ||
+ dev->chip_id == BCM53012_DEVICE_ID ||
+ dev->chip_id == BCM53018_DEVICE_ID ||
+ dev->chip_id == BCM53019_DEVICE_ID;
+}
+
+#define B53_CPU_PORT_25 5
+#define B53_CPU_PORT 8
+
+static inline int is_cpu_port(struct b53_device *dev, int port)
+{
+ return dev->cpu_port;
+}
+
+struct b53_device *b53_switch_alloc(struct device *base, struct b53_io_ops *ops,
+ void *priv);
+
+int b53_switch_detect(struct b53_device *dev);
+
+int b53_switch_register(struct b53_device *dev);
+
+static inline void b53_switch_remove(struct b53_device *dev)
+{
+ dsa_unregister_switch(dev->ds);
+}
+
+static inline int b53_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->read8(dev, page, reg, val);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int b53_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->read16(dev, page, reg, val);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int b53_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->read32(dev, page, reg, val);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int b53_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->read48(dev, page, reg, val);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int b53_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->read64(dev, page, reg, val);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int b53_write8(struct b53_device *dev, u8 page, u8 reg, u8 value)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->write8(dev, page, reg, value);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int b53_write16(struct b53_device *dev, u8 page, u8 reg,
+ u16 value)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->write16(dev, page, reg, value);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int b53_write32(struct b53_device *dev, u8 page, u8 reg,
+ u32 value)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->write32(dev, page, reg, value);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int b53_write48(struct b53_device *dev, u8 page, u8 reg,
+ u64 value)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->write48(dev, page, reg, value);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int b53_write64(struct b53_device *dev, u8 page, u8 reg,
+ u64 value)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->write64(dev, page, reg, value);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+struct b53_arl_entry {
+ u8 port;
+ u8 mac[ETH_ALEN];
+ u16 vid;
+ u8 is_valid:1;
+ u8 is_age:1;
+ u8 is_static:1;
+};
+
+static inline void b53_mac_from_u64(u64 src, u8 *dst)
+{
+ unsigned int i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ dst[ETH_ALEN - 1 - i] = (src >> (8 * i)) & 0xff;
+}
+
+static inline u64 b53_mac_to_u64(const u8 *src)
+{
+ unsigned int i;
+ u64 dst = 0;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ dst |= (u64)src[ETH_ALEN - 1 - i] << (8 * i);
+
+ return dst;
+}
+
+static inline void b53_arl_to_entry(struct b53_arl_entry *ent,
+ u64 mac_vid, u32 fwd_entry)
+{
+ memset(ent, 0, sizeof(*ent));
+ ent->port = fwd_entry & ARLTBL_DATA_PORT_ID_MASK;
+ ent->is_valid = !!(fwd_entry & ARLTBL_VALID);
+ ent->is_age = !!(fwd_entry & ARLTBL_AGE);
+ ent->is_static = !!(fwd_entry & ARLTBL_STATIC);
+ b53_mac_from_u64(mac_vid, ent->mac);
+ ent->vid = mac_vid >> ARLTBL_VID_S;
+}
+
+static inline void b53_arl_from_entry(u64 *mac_vid, u32 *fwd_entry,
+ const struct b53_arl_entry *ent)
+{
+ *mac_vid = b53_mac_to_u64(ent->mac);
+ *mac_vid |= (u64)(ent->vid & ARLTBL_VID_MASK) << ARLTBL_VID_S;
+ *fwd_entry = ent->port & ARLTBL_DATA_PORT_ID_MASK;
+ if (ent->is_valid)
+ *fwd_entry |= ARLTBL_VALID;
+ if (ent->is_static)
+ *fwd_entry |= ARLTBL_STATIC;
+ if (ent->is_age)
+ *fwd_entry |= ARLTBL_AGE;
+}
+
+#ifdef CONFIG_BCM47XX
+
+#include <linux/version.h>
+#include <linux/bcm47xx_nvram.h>
+#include <bcm47xx_board.h>
+static inline int b53_switch_get_reset_gpio(struct b53_device *dev)
+{
+ enum bcm47xx_board board = bcm47xx_board_get();
+
+ switch (board) {
+ case BCM47XX_BOARD_LINKSYS_WRT300NV11:
+ case BCM47XX_BOARD_LINKSYS_WRT310NV1:
+ return 8;
+ default:
+ return bcm47xx_nvram_gpio_pin("robo_reset");
+ }
+}
+#else
+static inline int b53_switch_get_reset_gpio(struct b53_device *dev)
+{
+ return -ENOENT;
+}
+#endif
+#endif
diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h
new file mode 100644
index 000000000..a0b453ea3
--- /dev/null
+++ b/drivers/net/dsa/b53/b53_regs.h
@@ -0,0 +1,434 @@
+/*
+ * B53 register definitions
+ *
+ * Copyright (C) 2004 Broadcom Corporation
+ * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __B53_REGS_H
+#define __B53_REGS_H
+
+/* Management Port (SMP) Page offsets */
+#define B53_CTRL_PAGE 0x00 /* Control */
+#define B53_STAT_PAGE 0x01 /* Status */
+#define B53_MGMT_PAGE 0x02 /* Management Mode */
+#define B53_MIB_AC_PAGE 0x03 /* MIB Autocast */
+#define B53_ARLCTRL_PAGE 0x04 /* ARL Control */
+#define B53_ARLIO_PAGE 0x05 /* ARL Access */
+#define B53_FRAMEBUF_PAGE 0x06 /* Management frame access */
+#define B53_MEM_ACCESS_PAGE 0x08 /* Memory access */
+
+/* PHY Registers */
+#define B53_PORT_MII_PAGE(i) (0x10 + (i)) /* Port i MII Registers */
+#define B53_IM_PORT_PAGE 0x18 /* Inverse MII Port (to EMAC) */
+#define B53_ALL_PORT_PAGE 0x19 /* All ports MII (broadcast) */
+
+/* MIB registers */
+#define B53_MIB_PAGE(i) (0x20 + (i))
+
+/* Quality of Service (QoS) Registers */
+#define B53_QOS_PAGE 0x30
+
+/* Port VLAN Page */
+#define B53_PVLAN_PAGE 0x31
+
+/* VLAN Registers */
+#define B53_VLAN_PAGE 0x34
+
+/* Jumbo Frame Registers */
+#define B53_JUMBO_PAGE 0x40
+
+/* CFP Configuration Registers Page */
+#define B53_CFP_PAGE 0xa1
+
+/*************************************************************************
+ * Control Page registers
+ *************************************************************************/
+
+/* Port Control Register (8 bit) */
+#define B53_PORT_CTRL(i) (0x00 + (i))
+#define PORT_CTRL_RX_DISABLE BIT(0)
+#define PORT_CTRL_TX_DISABLE BIT(1)
+#define PORT_CTRL_RX_BCST_EN BIT(2) /* Broadcast RX (P8 only) */
+#define PORT_CTRL_RX_MCST_EN BIT(3) /* Multicast RX (P8 only) */
+#define PORT_CTRL_RX_UCST_EN BIT(4) /* Unicast RX (P8 only) */
+#define PORT_CTRL_STP_STATE_S 5
+#define PORT_CTRL_NO_STP (0 << PORT_CTRL_STP_STATE_S)
+#define PORT_CTRL_DIS_STATE (1 << PORT_CTRL_STP_STATE_S)
+#define PORT_CTRL_BLOCK_STATE (2 << PORT_CTRL_STP_STATE_S)
+#define PORT_CTRL_LISTEN_STATE (3 << PORT_CTRL_STP_STATE_S)
+#define PORT_CTRL_LEARN_STATE (4 << PORT_CTRL_STP_STATE_S)
+#define PORT_CTRL_FWD_STATE (5 << PORT_CTRL_STP_STATE_S)
+#define PORT_CTRL_STP_STATE_MASK (0x7 << PORT_CTRL_STP_STATE_S)
+
+/* SMP Control Register (8 bit) */
+#define B53_SMP_CTRL 0x0a
+
+/* Switch Mode Control Register (8 bit) */
+#define B53_SWITCH_MODE 0x0b
+#define SM_SW_FWD_MODE BIT(0) /* 1 = Managed Mode */
+#define SM_SW_FWD_EN BIT(1) /* Forwarding Enable */
+
+/* IMP Port state override register (8 bit) */
+#define B53_PORT_OVERRIDE_CTRL 0x0e
+#define PORT_OVERRIDE_LINK BIT(0)
+#define PORT_OVERRIDE_FULL_DUPLEX BIT(1) /* 0 = Half Duplex */
+#define PORT_OVERRIDE_SPEED_S 2
+#define PORT_OVERRIDE_SPEED_10M (0 << PORT_OVERRIDE_SPEED_S)
+#define PORT_OVERRIDE_SPEED_100M (1 << PORT_OVERRIDE_SPEED_S)
+#define PORT_OVERRIDE_SPEED_1000M (2 << PORT_OVERRIDE_SPEED_S)
+#define PORT_OVERRIDE_RV_MII_25 BIT(4) /* BCM5325 only */
+#define PORT_OVERRIDE_RX_FLOW BIT(4)
+#define PORT_OVERRIDE_TX_FLOW BIT(5)
+#define PORT_OVERRIDE_SPEED_2000M BIT(6) /* BCM5301X only, requires setting 1000M */
+#define PORT_OVERRIDE_EN BIT(7) /* Use the register contents */
+
+/* Power-down mode control */
+#define B53_PD_MODE_CTRL_25 0x0f
+
+/* IP Multicast control (8 bit) */
+#define B53_IP_MULTICAST_CTRL 0x21
+#define B53_IPMC_FWD_EN BIT(1)
+#define B53_UC_FWD_EN BIT(6)
+#define B53_MC_FWD_EN BIT(7)
+
+/* (16 bit) */
+#define B53_UC_FLOOD_MASK 0x32
+#define B53_MC_FLOOD_MASK 0x34
+#define B53_IPMC_FLOOD_MASK 0x36
+
+/*
+ * Override Ports 0-7 State on devices with xMII interfaces (8 bit)
+ *
+ * For port 8 still use B53_PORT_OVERRIDE_CTRL
+ * Please note that not all ports are available on every hardware, e.g. BCM5301X
+ * don't include overriding port 6, BCM63xx also have some limitations.
+ */
+#define B53_GMII_PORT_OVERRIDE_CTRL(i) (0x58 + (i))
+#define GMII_PO_LINK BIT(0)
+#define GMII_PO_FULL_DUPLEX BIT(1) /* 0 = Half Duplex */
+#define GMII_PO_SPEED_S 2
+#define GMII_PO_SPEED_10M (0 << GMII_PO_SPEED_S)
+#define GMII_PO_SPEED_100M (1 << GMII_PO_SPEED_S)
+#define GMII_PO_SPEED_1000M (2 << GMII_PO_SPEED_S)
+#define GMII_PO_RX_FLOW BIT(4)
+#define GMII_PO_TX_FLOW BIT(5)
+#define GMII_PO_EN BIT(6) /* Use the register contents */
+#define GMII_PO_SPEED_2000M BIT(7) /* BCM5301X only, requires setting 1000M */
+
+#define B53_RGMII_CTRL_IMP 0x60
+#define RGMII_CTRL_ENABLE_GMII BIT(7)
+#define RGMII_CTRL_TIMING_SEL BIT(2)
+#define RGMII_CTRL_DLL_RXC BIT(1)
+#define RGMII_CTRL_DLL_TXC BIT(0)
+
+#define B53_RGMII_CTRL_P(i) (B53_RGMII_CTRL_IMP + (i))
+
+/* Software reset register (8 bit) */
+#define B53_SOFTRESET 0x79
+#define SW_RST BIT(7)
+#define EN_SW_RST BIT(4)
+
+/* Fast Aging Control register (8 bit) */
+#define B53_FAST_AGE_CTRL 0x88
+#define FAST_AGE_STATIC BIT(0)
+#define FAST_AGE_DYNAMIC BIT(1)
+#define FAST_AGE_PORT BIT(2)
+#define FAST_AGE_VLAN BIT(3)
+#define FAST_AGE_STP BIT(4)
+#define FAST_AGE_MC BIT(5)
+#define FAST_AGE_DONE BIT(7)
+
+/* Fast Aging Port Control register (8 bit) */
+#define B53_FAST_AGE_PORT_CTRL 0x89
+
+/* Fast Aging VID Control register (16 bit) */
+#define B53_FAST_AGE_VID_CTRL 0x8a
+
+/*************************************************************************
+ * Status Page registers
+ *************************************************************************/
+
+/* Link Status Summary Register (16bit) */
+#define B53_LINK_STAT 0x00
+
+/* Link Status Change Register (16 bit) */
+#define B53_LINK_STAT_CHANGE 0x02
+
+/* Port Speed Summary Register (16 bit for FE, 32 bit for GE) */
+#define B53_SPEED_STAT 0x04
+#define SPEED_PORT_FE(reg, port) (((reg) >> (port)) & 1)
+#define SPEED_PORT_GE(reg, port) (((reg) >> 2 * (port)) & 3)
+#define SPEED_STAT_10M 0
+#define SPEED_STAT_100M 1
+#define SPEED_STAT_1000M 2
+
+/* Duplex Status Summary (16 bit) */
+#define B53_DUPLEX_STAT_FE 0x06
+#define B53_DUPLEX_STAT_GE 0x08
+#define B53_DUPLEX_STAT_63XX 0x0c
+
+/* Revision ID register for BCM5325 */
+#define B53_REV_ID_25 0x50
+
+/* Strap Value (48 bit) */
+#define B53_STRAP_VALUE 0x70
+#define SV_GMII_CTRL_115 BIT(27)
+
+/*************************************************************************
+ * Management Mode Page Registers
+ *************************************************************************/
+
+/* Global Management Config Register (8 bit) */
+#define B53_GLOBAL_CONFIG 0x00
+#define GC_RESET_MIB 0x01
+#define GC_RX_BPDU_EN 0x02
+#define GC_MIB_AC_HDR_EN 0x10
+#define GC_MIB_AC_EN 0x20
+#define GC_FRM_MGMT_PORT_M 0xC0
+#define GC_FRM_MGMT_PORT_04 0x00
+#define GC_FRM_MGMT_PORT_MII 0x80
+
+/* Broadcom Header control register (8 bit) */
+#define B53_BRCM_HDR 0x03
+#define BRCM_HDR_P8_EN BIT(0) /* Enable tagging on port 8 */
+#define BRCM_HDR_P5_EN BIT(1) /* Enable tagging on port 5 */
+
+/* Device ID register (8 or 32 bit) */
+#define B53_DEVICE_ID 0x30
+
+/* Revision ID register (8 bit) */
+#define B53_REV_ID 0x40
+
+/*************************************************************************
+ * ARL Access Page Registers
+ *************************************************************************/
+
+/* VLAN Table Access Register (8 bit) */
+#define B53_VT_ACCESS 0x80
+#define B53_VT_ACCESS_9798 0x60 /* for BCM5397/BCM5398 */
+#define B53_VT_ACCESS_63XX 0x60 /* for BCM6328/62/68 */
+#define VTA_CMD_WRITE 0
+#define VTA_CMD_READ 1
+#define VTA_CMD_CLEAR 2
+#define VTA_START_CMD BIT(7)
+
+/* VLAN Table Index Register (16 bit) */
+#define B53_VT_INDEX 0x81
+#define B53_VT_INDEX_9798 0x61
+#define B53_VT_INDEX_63XX 0x62
+
+/* VLAN Table Entry Register (32 bit) */
+#define B53_VT_ENTRY 0x83
+#define B53_VT_ENTRY_9798 0x63
+#define B53_VT_ENTRY_63XX 0x64
+#define VTE_MEMBERS 0x1ff
+#define VTE_UNTAG_S 9
+#define VTE_UNTAG (0x1ff << 9)
+
+/*************************************************************************
+ * ARL I/O Registers
+ *************************************************************************/
+
+/* ARL Table Read/Write Register (8 bit) */
+#define B53_ARLTBL_RW_CTRL 0x00
+#define ARLTBL_RW BIT(0)
+#define ARLTBL_START_DONE BIT(7)
+
+/* MAC Address Index Register (48 bit) */
+#define B53_MAC_ADDR_IDX 0x02
+
+/* VLAN ID Index Register (16 bit) */
+#define B53_VLAN_ID_IDX 0x08
+
+/* ARL Table MAC/VID Entry N Registers (64 bit)
+ *
+ * BCM5325 and BCM5365 share most definitions below
+ */
+#define B53_ARLTBL_MAC_VID_ENTRY(n) (0x10 * (n))
+#define ARLTBL_MAC_MASK 0xffffffffffffULL
+#define ARLTBL_VID_S 48
+#define ARLTBL_VID_MASK_25 0xff
+#define ARLTBL_VID_MASK 0xfff
+#define ARLTBL_DATA_PORT_ID_S_25 48
+#define ARLTBL_DATA_PORT_ID_MASK_25 0xf
+#define ARLTBL_AGE_25 BIT(61)
+#define ARLTBL_STATIC_25 BIT(62)
+#define ARLTBL_VALID_25 BIT(63)
+
+/* ARL Table Data Entry N Registers (32 bit) */
+#define B53_ARLTBL_DATA_ENTRY(n) ((0x10 * (n)) + 0x08)
+#define ARLTBL_DATA_PORT_ID_MASK 0x1ff
+#define ARLTBL_TC(tc) ((3 & tc) << 11)
+#define ARLTBL_AGE BIT(14)
+#define ARLTBL_STATIC BIT(15)
+#define ARLTBL_VALID BIT(16)
+
+/* ARL Search Control Register (8 bit) */
+#define B53_ARL_SRCH_CTL 0x50
+#define B53_ARL_SRCH_CTL_25 0x20
+#define ARL_SRCH_VLID BIT(0)
+#define ARL_SRCH_STDN BIT(7)
+
+/* ARL Search Address Register (16 bit) */
+#define B53_ARL_SRCH_ADDR 0x51
+#define B53_ARL_SRCH_ADDR_25 0x22
+#define B53_ARL_SRCH_ADDR_65 0x24
+#define ARL_ADDR_MASK GENMASK(14, 0)
+
+/* ARL Search MAC/VID Result (64 bit) */
+#define B53_ARL_SRCH_RSTL_0_MACVID 0x60
+
+/* Single register search result on 5325 */
+#define B53_ARL_SRCH_RSTL_0_MACVID_25 0x24
+/* Single register search result on 5365 */
+#define B53_ARL_SRCH_RSTL_0_MACVID_65 0x30
+
+/* ARL Search Data Result (32 bit) */
+#define B53_ARL_SRCH_RSTL_0 0x68
+
+#define B53_ARL_SRCH_RSTL_MACVID(x) (B53_ARL_SRCH_RSTL_0_MACVID + ((x) * 0x10))
+#define B53_ARL_SRCH_RSTL(x) (B53_ARL_SRCH_RSTL_0 + ((x) * 0x10))
+
+/*************************************************************************
+ * Port VLAN Registers
+ *************************************************************************/
+
+/* Port VLAN mask (16 bit) IMP port is always 8, also on 5325 & co */
+#define B53_PVLAN_PORT_MASK(i) ((i) * 2)
+
+/*************************************************************************
+ * 802.1Q Page Registers
+ *************************************************************************/
+
+/* Global QoS Control (8 bit) */
+#define B53_QOS_GLOBAL_CTL 0x00
+
+/* Enable 802.1Q for individual Ports (16 bit) */
+#define B53_802_1P_EN 0x04
+
+/*************************************************************************
+ * VLAN Page Registers
+ *************************************************************************/
+
+/* VLAN Control 0 (8 bit) */
+#define B53_VLAN_CTRL0 0x00
+#define VC0_8021PF_CTRL_MASK 0x3
+#define VC0_8021PF_CTRL_NONE 0x0
+#define VC0_8021PF_CTRL_CHANGE_PRI 0x1
+#define VC0_8021PF_CTRL_CHANGE_VID 0x2
+#define VC0_8021PF_CTRL_CHANGE_BOTH 0x3
+#define VC0_8021QF_CTRL_MASK 0xc
+#define VC0_8021QF_CTRL_CHANGE_PRI 0x1
+#define VC0_8021QF_CTRL_CHANGE_VID 0x2
+#define VC0_8021QF_CTRL_CHANGE_BOTH 0x3
+#define VC0_RESERVED_1 BIT(1)
+#define VC0_DROP_VID_MISS BIT(4)
+#define VC0_VID_HASH_VID BIT(5)
+#define VC0_VID_CHK_EN BIT(6) /* Use VID,DA or VID,SA */
+#define VC0_VLAN_EN BIT(7) /* 802.1Q VLAN Enabled */
+
+/* VLAN Control 1 (8 bit) */
+#define B53_VLAN_CTRL1 0x01
+#define VC1_RX_MCST_TAG_EN BIT(1)
+#define VC1_RX_MCST_FWD_EN BIT(2)
+#define VC1_RX_MCST_UNTAG_EN BIT(3)
+
+/* VLAN Control 2 (8 bit) */
+#define B53_VLAN_CTRL2 0x02
+
+/* VLAN Control 3 (8 bit when BCM5325, 16 bit else) */
+#define B53_VLAN_CTRL3 0x03
+#define B53_VLAN_CTRL3_63XX 0x04
+#define VC3_MAXSIZE_1532 BIT(6) /* 5325 only */
+#define VC3_HIGH_8BIT_EN BIT(7) /* 5325 only */
+
+/* VLAN Control 4 (8 bit) */
+#define B53_VLAN_CTRL4 0x05
+#define B53_VLAN_CTRL4_25 0x04
+#define B53_VLAN_CTRL4_63XX 0x06
+#define VC4_ING_VID_CHECK_S 6
+#define VC4_ING_VID_CHECK_MASK (0x3 << VC4_ING_VID_CHECK_S)
+#define VC4_ING_VID_VIO_FWD 0 /* forward, but do not learn */
+#define VC4_ING_VID_VIO_DROP 1 /* drop VID violations */
+#define VC4_NO_ING_VID_CHK 2 /* do not check */
+#define VC4_ING_VID_VIO_TO_IMP 3 /* redirect to MII port */
+
+/* VLAN Control 5 (8 bit) */
+#define B53_VLAN_CTRL5 0x06
+#define B53_VLAN_CTRL5_25 0x05
+#define B53_VLAN_CTRL5_63XX 0x07
+#define VC5_VID_FFF_EN BIT(2)
+#define VC5_DROP_VTABLE_MISS BIT(3)
+
+/* VLAN Control 6 (8 bit) */
+#define B53_VLAN_CTRL6 0x07
+#define B53_VLAN_CTRL6_63XX 0x08
+
+/* VLAN Table Access Register (16 bit) */
+#define B53_VLAN_TABLE_ACCESS_25 0x06 /* BCM5325E/5350 */
+#define B53_VLAN_TABLE_ACCESS_65 0x08 /* BCM5365 */
+#define VTA_VID_LOW_MASK_25 0xf
+#define VTA_VID_LOW_MASK_65 0xff
+#define VTA_VID_HIGH_S_25 4
+#define VTA_VID_HIGH_S_65 8
+#define VTA_VID_HIGH_MASK_25 (0xff << VTA_VID_HIGH_S_25E)
+#define VTA_VID_HIGH_MASK_65 (0xf << VTA_VID_HIGH_S_65)
+#define VTA_RW_STATE BIT(12)
+#define VTA_RW_STATE_RD 0
+#define VTA_RW_STATE_WR BIT(12)
+#define VTA_RW_OP_EN BIT(13)
+
+/* VLAN Read/Write Registers for (16/32 bit) */
+#define B53_VLAN_WRITE_25 0x08
+#define B53_VLAN_WRITE_65 0x0a
+#define B53_VLAN_READ 0x0c
+#define VA_MEMBER_MASK 0x3f
+#define VA_UNTAG_S_25 6
+#define VA_UNTAG_MASK_25 0x3f
+#define VA_UNTAG_S_65 7
+#define VA_UNTAG_MASK_65 0x1f
+#define VA_VID_HIGH_S 12
+#define VA_VID_HIGH_MASK (0xffff << VA_VID_HIGH_S)
+#define VA_VALID_25 BIT(20)
+#define VA_VALID_25_R4 BIT(24)
+#define VA_VALID_65 BIT(14)
+
+/* VLAN Port Default Tag (16 bit) */
+#define B53_VLAN_PORT_DEF_TAG(i) (0x10 + 2 * (i))
+
+/*************************************************************************
+ * Jumbo Frame Page Registers
+ *************************************************************************/
+
+/* Jumbo Enable Port Mask (bit i == port i enabled) (32 bit) */
+#define B53_JUMBO_PORT_MASK 0x01
+#define B53_JUMBO_PORT_MASK_63XX 0x04
+#define JPM_10_100_JUMBO_EN BIT(24) /* GigE always enabled */
+
+/* Good Frame Max Size without 802.1Q TAG (16 bit) */
+#define B53_JUMBO_MAX_SIZE 0x05
+#define B53_JUMBO_MAX_SIZE_63XX 0x08
+#define JMS_MIN_SIZE 1518
+#define JMS_MAX_SIZE 9724
+
+/*************************************************************************
+ * CFP Configuration Page Registers
+ *************************************************************************/
+
+/* CFP Control Register with ports map (8 bit) */
+#define B53_CFP_CTRL 0x00
+
+#endif /* !__B53_REGS_H */
diff --git a/drivers/net/dsa/b53/b53_spi.c b/drivers/net/dsa/b53/b53_spi.c
new file mode 100644
index 000000000..2bda0b5f1
--- /dev/null
+++ b/drivers/net/dsa/b53/b53_spi.c
@@ -0,0 +1,331 @@
+/*
+ * B53 register access through SPI
+ *
+ * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <asm/unaligned.h>
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/platform_data/b53.h>
+
+#include "b53_priv.h"
+
+#define B53_SPI_DATA 0xf0
+
+#define B53_SPI_STATUS 0xfe
+#define B53_SPI_CMD_SPIF BIT(7)
+#define B53_SPI_CMD_RACK BIT(5)
+
+#define B53_SPI_CMD_READ 0x00
+#define B53_SPI_CMD_WRITE 0x01
+#define B53_SPI_CMD_NORMAL 0x60
+#define B53_SPI_CMD_FAST 0x10
+
+#define B53_SPI_PAGE_SELECT 0xff
+
+static inline int b53_spi_read_reg(struct spi_device *spi, u8 reg, u8 *val,
+ unsigned int len)
+{
+ u8 txbuf[2];
+
+ txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_READ;
+ txbuf[1] = reg;
+
+ return spi_write_then_read(spi, txbuf, 2, val, len);
+}
+
+static inline int b53_spi_clear_status(struct spi_device *spi)
+{
+ unsigned int i;
+ u8 rxbuf;
+ int ret;
+
+ for (i = 0; i < 10; i++) {
+ ret = b53_spi_read_reg(spi, B53_SPI_STATUS, &rxbuf, 1);
+ if (ret)
+ return ret;
+
+ if (!(rxbuf & B53_SPI_CMD_SPIF))
+ break;
+
+ mdelay(1);
+ }
+
+ if (i == 10)
+ return -EIO;
+
+ return 0;
+}
+
+static inline int b53_spi_set_page(struct spi_device *spi, u8 page)
+{
+ u8 txbuf[3];
+
+ txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE;
+ txbuf[1] = B53_SPI_PAGE_SELECT;
+ txbuf[2] = page;
+
+ return spi_write(spi, txbuf, sizeof(txbuf));
+}
+
+static inline int b53_prepare_reg_access(struct spi_device *spi, u8 page)
+{
+ int ret = b53_spi_clear_status(spi);
+
+ if (ret)
+ return ret;
+
+ return b53_spi_set_page(spi, page);
+}
+
+static int b53_spi_prepare_reg_read(struct spi_device *spi, u8 reg)
+{
+ u8 rxbuf;
+ int retry_count;
+ int ret;
+
+ ret = b53_spi_read_reg(spi, reg, &rxbuf, 1);
+ if (ret)
+ return ret;
+
+ for (retry_count = 0; retry_count < 10; retry_count++) {
+ ret = b53_spi_read_reg(spi, B53_SPI_STATUS, &rxbuf, 1);
+ if (ret)
+ return ret;
+
+ if (rxbuf & B53_SPI_CMD_RACK)
+ break;
+
+ mdelay(1);
+ }
+
+ if (retry_count == 10)
+ return -EIO;
+
+ return 0;
+}
+
+static int b53_spi_read(struct b53_device *dev, u8 page, u8 reg, u8 *data,
+ unsigned int len)
+{
+ struct spi_device *spi = dev->priv;
+ int ret;
+
+ ret = b53_prepare_reg_access(spi, page);
+ if (ret)
+ return ret;
+
+ ret = b53_spi_prepare_reg_read(spi, reg);
+ if (ret)
+ return ret;
+
+ return b53_spi_read_reg(spi, B53_SPI_DATA, data, len);
+}
+
+static int b53_spi_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val)
+{
+ return b53_spi_read(dev, page, reg, val, 1);
+}
+
+static int b53_spi_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val)
+{
+ int ret = b53_spi_read(dev, page, reg, (u8 *)val, 2);
+
+ if (!ret)
+ *val = le16_to_cpu(*val);
+
+ return ret;
+}
+
+static int b53_spi_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val)
+{
+ int ret = b53_spi_read(dev, page, reg, (u8 *)val, 4);
+
+ if (!ret)
+ *val = le32_to_cpu(*val);
+
+ return ret;
+}
+
+static int b53_spi_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val)
+{
+ int ret;
+
+ *val = 0;
+ ret = b53_spi_read(dev, page, reg, (u8 *)val, 6);
+ if (!ret)
+ *val = le64_to_cpu(*val);
+
+ return ret;
+}
+
+static int b53_spi_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val)
+{
+ int ret = b53_spi_read(dev, page, reg, (u8 *)val, 8);
+
+ if (!ret)
+ *val = le64_to_cpu(*val);
+
+ return ret;
+}
+
+static int b53_spi_write8(struct b53_device *dev, u8 page, u8 reg, u8 value)
+{
+ struct spi_device *spi = dev->priv;
+ int ret;
+ u8 txbuf[3];
+
+ ret = b53_prepare_reg_access(spi, page);
+ if (ret)
+ return ret;
+
+ txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE;
+ txbuf[1] = reg;
+ txbuf[2] = value;
+
+ return spi_write(spi, txbuf, sizeof(txbuf));
+}
+
+static int b53_spi_write16(struct b53_device *dev, u8 page, u8 reg, u16 value)
+{
+ struct spi_device *spi = dev->priv;
+ int ret;
+ u8 txbuf[4];
+
+ ret = b53_prepare_reg_access(spi, page);
+ if (ret)
+ return ret;
+
+ txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE;
+ txbuf[1] = reg;
+ put_unaligned_le16(value, &txbuf[2]);
+
+ return spi_write(spi, txbuf, sizeof(txbuf));
+}
+
+static int b53_spi_write32(struct b53_device *dev, u8 page, u8 reg, u32 value)
+{
+ struct spi_device *spi = dev->priv;
+ int ret;
+ u8 txbuf[6];
+
+ ret = b53_prepare_reg_access(spi, page);
+ if (ret)
+ return ret;
+
+ txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE;
+ txbuf[1] = reg;
+ put_unaligned_le32(value, &txbuf[2]);
+
+ return spi_write(spi, txbuf, sizeof(txbuf));
+}
+
+static int b53_spi_write48(struct b53_device *dev, u8 page, u8 reg, u64 value)
+{
+ struct spi_device *spi = dev->priv;
+ int ret;
+ u8 txbuf[10];
+
+ ret = b53_prepare_reg_access(spi, page);
+ if (ret)
+ return ret;
+
+ txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE;
+ txbuf[1] = reg;
+ put_unaligned_le64(value, &txbuf[2]);
+
+ return spi_write(spi, txbuf, sizeof(txbuf) - 2);
+}
+
+static int b53_spi_write64(struct b53_device *dev, u8 page, u8 reg, u64 value)
+{
+ struct spi_device *spi = dev->priv;
+ int ret;
+ u8 txbuf[10];
+
+ ret = b53_prepare_reg_access(spi, page);
+ if (ret)
+ return ret;
+
+ txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE;
+ txbuf[1] = reg;
+ put_unaligned_le64(value, &txbuf[2]);
+
+ return spi_write(spi, txbuf, sizeof(txbuf));
+}
+
+static struct b53_io_ops b53_spi_ops = {
+ .read8 = b53_spi_read8,
+ .read16 = b53_spi_read16,
+ .read32 = b53_spi_read32,
+ .read48 = b53_spi_read48,
+ .read64 = b53_spi_read64,
+ .write8 = b53_spi_write8,
+ .write16 = b53_spi_write16,
+ .write32 = b53_spi_write32,
+ .write48 = b53_spi_write48,
+ .write64 = b53_spi_write64,
+};
+
+static int b53_spi_probe(struct spi_device *spi)
+{
+ struct b53_device *dev;
+ int ret;
+
+ dev = b53_switch_alloc(&spi->dev, &b53_spi_ops, spi);
+ if (!dev)
+ return -ENOMEM;
+
+ if (spi->dev.platform_data)
+ dev->pdata = spi->dev.platform_data;
+
+ ret = b53_switch_register(dev);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, dev);
+
+ return 0;
+}
+
+static int b53_spi_remove(struct spi_device *spi)
+{
+ struct b53_device *dev = spi_get_drvdata(spi);
+
+ if (dev)
+ b53_switch_remove(dev);
+
+ return 0;
+}
+
+static struct spi_driver b53_spi_driver = {
+ .driver = {
+ .name = "b53-switch",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = b53_spi_probe,
+ .remove = b53_spi_remove,
+};
+
+module_spi_driver(b53_spi_driver);
+
+MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>");
+MODULE_DESCRIPTION("B53 SPI access driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c
new file mode 100644
index 000000000..3e2d4a5fc
--- /dev/null
+++ b/drivers/net/dsa/b53/b53_srab.c
@@ -0,0 +1,442 @@
+/*
+ * B53 register access through Switch Register Access Bridge Registers
+ *
+ * Copyright (C) 2013 Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/b53.h>
+#include <linux/of.h>
+
+#include "b53_priv.h"
+
+/* command and status register of the SRAB */
+#define B53_SRAB_CMDSTAT 0x2c
+#define B53_SRAB_CMDSTAT_RST BIT(2)
+#define B53_SRAB_CMDSTAT_WRITE BIT(1)
+#define B53_SRAB_CMDSTAT_GORDYN BIT(0)
+#define B53_SRAB_CMDSTAT_PAGE 24
+#define B53_SRAB_CMDSTAT_REG 16
+
+/* high order word of write data to switch registe */
+#define B53_SRAB_WD_H 0x30
+
+/* low order word of write data to switch registe */
+#define B53_SRAB_WD_L 0x34
+
+/* high order word of read data from switch register */
+#define B53_SRAB_RD_H 0x38
+
+/* low order word of read data from switch register */
+#define B53_SRAB_RD_L 0x3c
+
+/* command and status register of the SRAB */
+#define B53_SRAB_CTRLS 0x40
+#define B53_SRAB_CTRLS_RCAREQ BIT(3)
+#define B53_SRAB_CTRLS_RCAGNT BIT(4)
+#define B53_SRAB_CTRLS_SW_INIT_DONE BIT(6)
+
+/* the register captures interrupt pulses from the switch */
+#define B53_SRAB_INTR 0x44
+#define B53_SRAB_INTR_P(x) BIT(x)
+#define B53_SRAB_SWITCH_PHY BIT(8)
+#define B53_SRAB_1588_SYNC BIT(9)
+#define B53_SRAB_IMP1_SLEEP_TIMER BIT(10)
+#define B53_SRAB_P7_SLEEP_TIMER BIT(11)
+#define B53_SRAB_IMP0_SLEEP_TIMER BIT(12)
+
+struct b53_srab_priv {
+ void __iomem *regs;
+};
+
+static int b53_srab_request_grant(struct b53_device *dev)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ u32 ctrls;
+ int i;
+
+ ctrls = readl(regs + B53_SRAB_CTRLS);
+ ctrls |= B53_SRAB_CTRLS_RCAREQ;
+ writel(ctrls, regs + B53_SRAB_CTRLS);
+
+ for (i = 0; i < 20; i++) {
+ ctrls = readl(regs + B53_SRAB_CTRLS);
+ if (ctrls & B53_SRAB_CTRLS_RCAGNT)
+ break;
+ usleep_range(10, 100);
+ }
+ if (WARN_ON(i == 5))
+ return -EIO;
+
+ return 0;
+}
+
+static void b53_srab_release_grant(struct b53_device *dev)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ u32 ctrls;
+
+ ctrls = readl(regs + B53_SRAB_CTRLS);
+ ctrls &= ~B53_SRAB_CTRLS_RCAREQ;
+ writel(ctrls, regs + B53_SRAB_CTRLS);
+}
+
+static int b53_srab_op(struct b53_device *dev, u8 page, u8 reg, u32 op)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int i;
+ u32 cmdstat;
+
+ /* set register address */
+ cmdstat = (page << B53_SRAB_CMDSTAT_PAGE) |
+ (reg << B53_SRAB_CMDSTAT_REG) |
+ B53_SRAB_CMDSTAT_GORDYN |
+ op;
+ writel(cmdstat, regs + B53_SRAB_CMDSTAT);
+
+ /* check if operation completed */
+ for (i = 0; i < 5; ++i) {
+ cmdstat = readl(regs + B53_SRAB_CMDSTAT);
+ if (!(cmdstat & B53_SRAB_CMDSTAT_GORDYN))
+ break;
+ usleep_range(10, 100);
+ }
+
+ if (WARN_ON(i == 5))
+ return -EIO;
+
+ return 0;
+}
+
+static int b53_srab_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int ret = 0;
+
+ ret = b53_srab_request_grant(dev);
+ if (ret)
+ goto err;
+
+ ret = b53_srab_op(dev, page, reg, 0);
+ if (ret)
+ goto err;
+
+ *val = readl(regs + B53_SRAB_RD_L) & 0xff;
+
+err:
+ b53_srab_release_grant(dev);
+
+ return ret;
+}
+
+static int b53_srab_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int ret = 0;
+
+ ret = b53_srab_request_grant(dev);
+ if (ret)
+ goto err;
+
+ ret = b53_srab_op(dev, page, reg, 0);
+ if (ret)
+ goto err;
+
+ *val = readl(regs + B53_SRAB_RD_L) & 0xffff;
+
+err:
+ b53_srab_release_grant(dev);
+
+ return ret;
+}
+
+static int b53_srab_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int ret = 0;
+
+ ret = b53_srab_request_grant(dev);
+ if (ret)
+ goto err;
+
+ ret = b53_srab_op(dev, page, reg, 0);
+ if (ret)
+ goto err;
+
+ *val = readl(regs + B53_SRAB_RD_L);
+
+err:
+ b53_srab_release_grant(dev);
+
+ return ret;
+}
+
+static int b53_srab_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int ret = 0;
+
+ ret = b53_srab_request_grant(dev);
+ if (ret)
+ goto err;
+
+ ret = b53_srab_op(dev, page, reg, 0);
+ if (ret)
+ goto err;
+
+ *val = readl(regs + B53_SRAB_RD_L);
+ *val += ((u64)readl(regs + B53_SRAB_RD_H) & 0xffff) << 32;
+
+err:
+ b53_srab_release_grant(dev);
+
+ return ret;
+}
+
+static int b53_srab_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int ret = 0;
+
+ ret = b53_srab_request_grant(dev);
+ if (ret)
+ goto err;
+
+ ret = b53_srab_op(dev, page, reg, 0);
+ if (ret)
+ goto err;
+
+ *val = readl(regs + B53_SRAB_RD_L);
+ *val += (u64)readl(regs + B53_SRAB_RD_H) << 32;
+
+err:
+ b53_srab_release_grant(dev);
+
+ return ret;
+}
+
+static int b53_srab_write8(struct b53_device *dev, u8 page, u8 reg, u8 value)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int ret = 0;
+
+ ret = b53_srab_request_grant(dev);
+ if (ret)
+ goto err;
+
+ writel(value, regs + B53_SRAB_WD_L);
+
+ ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE);
+
+err:
+ b53_srab_release_grant(dev);
+
+ return ret;
+}
+
+static int b53_srab_write16(struct b53_device *dev, u8 page, u8 reg,
+ u16 value)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int ret = 0;
+
+ ret = b53_srab_request_grant(dev);
+ if (ret)
+ goto err;
+
+ writel(value, regs + B53_SRAB_WD_L);
+
+ ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE);
+
+err:
+ b53_srab_release_grant(dev);
+
+ return ret;
+}
+
+static int b53_srab_write32(struct b53_device *dev, u8 page, u8 reg,
+ u32 value)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int ret = 0;
+
+ ret = b53_srab_request_grant(dev);
+ if (ret)
+ goto err;
+
+ writel(value, regs + B53_SRAB_WD_L);
+
+ ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE);
+
+err:
+ b53_srab_release_grant(dev);
+
+ return ret;
+}
+
+static int b53_srab_write48(struct b53_device *dev, u8 page, u8 reg,
+ u64 value)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int ret = 0;
+
+ ret = b53_srab_request_grant(dev);
+ if (ret)
+ goto err;
+
+ writel((u32)value, regs + B53_SRAB_WD_L);
+ writel((u16)(value >> 32), regs + B53_SRAB_WD_H);
+
+ ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE);
+
+err:
+ b53_srab_release_grant(dev);
+
+ return ret;
+}
+
+static int b53_srab_write64(struct b53_device *dev, u8 page, u8 reg,
+ u64 value)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ u8 __iomem *regs = priv->regs;
+ int ret = 0;
+
+ ret = b53_srab_request_grant(dev);
+ if (ret)
+ goto err;
+
+ writel((u32)value, regs + B53_SRAB_WD_L);
+ writel((u32)(value >> 32), regs + B53_SRAB_WD_H);
+
+ ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE);
+
+err:
+ b53_srab_release_grant(dev);
+
+ return ret;
+}
+
+static struct b53_io_ops b53_srab_ops = {
+ .read8 = b53_srab_read8,
+ .read16 = b53_srab_read16,
+ .read32 = b53_srab_read32,
+ .read48 = b53_srab_read48,
+ .read64 = b53_srab_read64,
+ .write8 = b53_srab_write8,
+ .write16 = b53_srab_write16,
+ .write32 = b53_srab_write32,
+ .write48 = b53_srab_write48,
+ .write64 = b53_srab_write64,
+};
+
+static const struct of_device_id b53_srab_of_match[] = {
+ { .compatible = "brcm,bcm53010-srab" },
+ { .compatible = "brcm,bcm53011-srab" },
+ { .compatible = "brcm,bcm53012-srab" },
+ { .compatible = "brcm,bcm53018-srab" },
+ { .compatible = "brcm,bcm53019-srab" },
+ { .compatible = "brcm,bcm5301x-srab" },
+ { .compatible = "brcm,bcm58522-srab", .data = (void *)BCM58XX_DEVICE_ID },
+ { .compatible = "brcm,bcm58525-srab", .data = (void *)BCM58XX_DEVICE_ID },
+ { .compatible = "brcm,bcm58535-srab", .data = (void *)BCM58XX_DEVICE_ID },
+ { .compatible = "brcm,bcm58622-srab", .data = (void *)BCM58XX_DEVICE_ID },
+ { .compatible = "brcm,bcm58623-srab", .data = (void *)BCM58XX_DEVICE_ID },
+ { .compatible = "brcm,bcm58625-srab", .data = (void *)BCM58XX_DEVICE_ID },
+ { .compatible = "brcm,bcm88312-srab", .data = (void *)BCM58XX_DEVICE_ID },
+ { .compatible = "brcm,nsp-srab", .data = (void *)BCM58XX_DEVICE_ID },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, b53_srab_of_match);
+
+static int b53_srab_probe(struct platform_device *pdev)
+{
+ struct b53_platform_data *pdata = pdev->dev.platform_data;
+ struct device_node *dn = pdev->dev.of_node;
+ const struct of_device_id *of_id = NULL;
+ struct b53_srab_priv *priv;
+ struct b53_device *dev;
+ struct resource *r;
+
+ if (dn)
+ of_id = of_match_node(b53_srab_of_match, dn);
+
+ if (of_id) {
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ pdata->chip_id = (u32)(unsigned long)of_id->data;
+ }
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->regs = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(priv->regs))
+ return -ENOMEM;
+
+ dev = b53_switch_alloc(&pdev->dev, &b53_srab_ops, priv);
+ if (!dev)
+ return -ENOMEM;
+
+ if (pdata)
+ dev->pdata = pdata;
+
+ platform_set_drvdata(pdev, dev);
+
+ return b53_switch_register(dev);
+}
+
+static int b53_srab_remove(struct platform_device *pdev)
+{
+ struct b53_device *dev = platform_get_drvdata(pdev);
+
+ if (dev)
+ b53_switch_remove(dev);
+
+ return 0;
+}
+
+static struct platform_driver b53_srab_driver = {
+ .probe = b53_srab_probe,
+ .remove = b53_srab_remove,
+ .driver = {
+ .name = "b53-srab-switch",
+ .of_match_table = b53_srab_of_match,
+ },
+};
+
+module_platform_driver(b53_srab_driver);
+MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>");
+MODULE_DESCRIPTION("B53 Switch Register Access Bridge Registers (SRAB) access driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 10ddd5a5d..b2b838724 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -22,6 +22,7 @@
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/of_net.h>
+#include <linux/of_mdio.h>
#include <net/dsa.h>
#include <linux/ethtool.h>
#include <linux/if_bridge.h>
@@ -460,19 +461,13 @@ static int bcm_sf2_sw_set_eee(struct dsa_switch *ds, int port,
return 0;
}
-/* Fast-ageing of ARL entries for a given port, equivalent to an ARL
- * flush for that port.
- */
-static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port)
+static int bcm_sf2_fast_age_op(struct bcm_sf2_priv *priv)
{
- struct bcm_sf2_priv *priv = ds_to_priv(ds);
unsigned int timeout = 1000;
u32 reg;
- core_writel(priv, port, CORE_FAST_AGE_PORT);
-
reg = core_readl(priv, CORE_FAST_AGE_CTRL);
- reg |= EN_AGE_PORT | EN_AGE_DYNAMIC | FAST_AGE_STR_DONE;
+ reg |= EN_AGE_PORT | EN_AGE_VLAN | EN_AGE_DYNAMIC | FAST_AGE_STR_DONE;
core_writel(priv, reg, CORE_FAST_AGE_CTRL);
do {
@@ -491,13 +486,98 @@ static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port)
return 0;
}
+/* Fast-ageing of ARL entries for a given port, equivalent to an ARL
+ * flush for that port.
+ */
+static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port)
+{
+ struct bcm_sf2_priv *priv = ds_to_priv(ds);
+
+ core_writel(priv, port, CORE_FAST_AGE_PORT);
+
+ return bcm_sf2_fast_age_op(priv);
+}
+
+static int bcm_sf2_sw_fast_age_vlan(struct bcm_sf2_priv *priv, u16 vid)
+{
+ core_writel(priv, vid, CORE_FAST_AGE_VID);
+
+ return bcm_sf2_fast_age_op(priv);
+}
+
+static int bcm_sf2_vlan_op_wait(struct bcm_sf2_priv *priv)
+{
+ unsigned int timeout = 10;
+ u32 reg;
+
+ do {
+ reg = core_readl(priv, CORE_ARLA_VTBL_RWCTRL);
+ if (!(reg & ARLA_VTBL_STDN))
+ return 0;
+
+ usleep_range(1000, 2000);
+ } while (timeout--);
+
+ return -ETIMEDOUT;
+}
+
+static int bcm_sf2_vlan_op(struct bcm_sf2_priv *priv, u8 op)
+{
+ core_writel(priv, ARLA_VTBL_STDN | op, CORE_ARLA_VTBL_RWCTRL);
+
+ return bcm_sf2_vlan_op_wait(priv);
+}
+
+static void bcm_sf2_set_vlan_entry(struct bcm_sf2_priv *priv, u16 vid,
+ struct bcm_sf2_vlan *vlan)
+{
+ int ret;
+
+ core_writel(priv, vid & VTBL_ADDR_INDEX_MASK, CORE_ARLA_VTBL_ADDR);
+ core_writel(priv, vlan->untag << UNTAG_MAP_SHIFT | vlan->members,
+ CORE_ARLA_VTBL_ENTRY);
+
+ ret = bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_WRITE);
+ if (ret)
+ pr_err("failed to write VLAN entry\n");
+}
+
+static int bcm_sf2_get_vlan_entry(struct bcm_sf2_priv *priv, u16 vid,
+ struct bcm_sf2_vlan *vlan)
+{
+ u32 entry;
+ int ret;
+
+ core_writel(priv, vid & VTBL_ADDR_INDEX_MASK, CORE_ARLA_VTBL_ADDR);
+
+ ret = bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_READ);
+ if (ret)
+ return ret;
+
+ entry = core_readl(priv, CORE_ARLA_VTBL_ENTRY);
+ vlan->members = entry & FWD_MAP_MASK;
+ vlan->untag = (entry >> UNTAG_MAP_SHIFT) & UNTAG_MAP_MASK;
+
+ return 0;
+}
+
static int bcm_sf2_sw_br_join(struct dsa_switch *ds, int port,
struct net_device *bridge)
{
struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ s8 cpu_port = ds->dst->cpu_port;
unsigned int i;
u32 reg, p_ctl;
+ /* Make this port leave the all VLANs join since we will have proper
+ * VLAN entries from now on
+ */
+ reg = core_readl(priv, CORE_JOIN_ALL_VLAN_EN);
+ reg &= ~BIT(port);
+ if ((reg & BIT(cpu_port)) == BIT(cpu_port))
+ reg &= ~BIT(cpu_port);
+ core_writel(priv, reg, CORE_JOIN_ALL_VLAN_EN);
+
priv->port_sts[port].bridge_dev = bridge;
p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
@@ -529,6 +609,7 @@ static void bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port)
{
struct bcm_sf2_priv *priv = ds_to_priv(ds);
struct net_device *bridge = priv->port_sts[port].bridge_dev;
+ s8 cpu_port = ds->dst->cpu_port;
unsigned int i;
u32 reg, p_ctl;
@@ -552,6 +633,13 @@ static void bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port)
core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port));
priv->port_sts[port].vlan_ctl_mask = p_ctl;
priv->port_sts[port].bridge_dev = NULL;
+
+ /* Make this port join all VLANs without VLAN entries */
+ reg = core_readl(priv, CORE_JOIN_ALL_VLAN_EN);
+ reg |= BIT(port);
+ if (!(reg & BIT(cpu_port)))
+ reg |= BIT(cpu_port);
+ core_writel(priv, reg, CORE_JOIN_ALL_VLAN_EN);
}
static void bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
@@ -804,7 +892,7 @@ static int bcm_sf2_sw_fdb_dump(struct dsa_switch *ds, int port,
int (*cb)(struct switchdev_obj *obj))
{
struct bcm_sf2_priv *priv = ds_to_priv(ds);
- struct net_device *dev = ds->ports[port];
+ struct net_device *dev = ds->ports[port].netdev;
struct bcm_sf2_arl_entry results[2];
unsigned int count = 0;
int ret;
@@ -836,6 +924,66 @@ static int bcm_sf2_sw_fdb_dump(struct dsa_switch *ds, int port,
return 0;
}
+static int bcm_sf2_sw_indir_rw(struct bcm_sf2_priv *priv, int op, int addr,
+ int regnum, u16 val)
+{
+ int ret = 0;
+ u32 reg;
+
+ reg = reg_readl(priv, REG_SWITCH_CNTRL);
+ reg |= MDIO_MASTER_SEL;
+ reg_writel(priv, reg, REG_SWITCH_CNTRL);
+
+ /* Page << 8 | offset */
+ reg = 0x70;
+ reg <<= 2;
+ core_writel(priv, addr, reg);
+
+ /* Page << 8 | offset */
+ reg = 0x80 << 8 | regnum << 1;
+ reg <<= 2;
+
+ if (op)
+ ret = core_readl(priv, reg);
+ else
+ core_writel(priv, val, reg);
+
+ reg = reg_readl(priv, REG_SWITCH_CNTRL);
+ reg &= ~MDIO_MASTER_SEL;
+ reg_writel(priv, reg, REG_SWITCH_CNTRL);
+
+ return ret & 0xffff;
+}
+
+static int bcm_sf2_sw_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct bcm_sf2_priv *priv = bus->priv;
+
+ /* Intercept reads from Broadcom pseudo-PHY address, else, send
+ * them to our master MDIO bus controller
+ */
+ if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
+ return bcm_sf2_sw_indir_rw(priv, 1, addr, regnum, 0);
+ else
+ return mdiobus_read(priv->master_mii_bus, addr, regnum);
+}
+
+static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ struct bcm_sf2_priv *priv = bus->priv;
+
+ /* Intercept writes to the Broadcom pseudo-PHY address, else,
+ * send them to our master MDIO bus controller
+ */
+ if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
+ bcm_sf2_sw_indir_rw(priv, 0, addr, regnum, val);
+ else
+ mdiobus_write(priv->master_mii_bus, addr, regnum, val);
+
+ return 0;
+}
+
static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id)
{
struct bcm_sf2_priv *priv = dev_id;
@@ -932,133 +1080,70 @@ static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
}
}
-static int bcm_sf2_sw_setup(struct dsa_switch *ds)
+static int bcm_sf2_mdio_register(struct dsa_switch *ds)
{
- const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
struct bcm_sf2_priv *priv = ds_to_priv(ds);
struct device_node *dn;
- void __iomem **base;
- unsigned int port;
- unsigned int i;
- u32 reg, rev;
- int ret;
-
- spin_lock_init(&priv->indir_lock);
- mutex_init(&priv->stats_mutex);
-
- /* All the interesting properties are at the parent device_node
- * level
- */
- dn = ds->cd->of_node->parent;
- bcm_sf2_identify_ports(priv, ds->cd->of_node);
-
- priv->irq0 = irq_of_parse_and_map(dn, 0);
- priv->irq1 = irq_of_parse_and_map(dn, 1);
-
- base = &priv->core;
- for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
- *base = of_iomap(dn, i);
- if (*base == NULL) {
- pr_err("unable to find register: %s\n", reg_names[i]);
- ret = -ENOMEM;
- goto out_unmap;
- }
- base++;
- }
-
- ret = bcm_sf2_sw_rst(priv);
- if (ret) {
- pr_err("unable to software reset switch: %d\n", ret);
- goto out_unmap;
- }
-
- /* Disable all interrupts and request them */
- bcm_sf2_intr_disable(priv);
-
- ret = request_irq(priv->irq0, bcm_sf2_switch_0_isr, 0,
- "switch_0", priv);
- if (ret < 0) {
- pr_err("failed to request switch_0 IRQ\n");
- goto out_unmap;
- }
-
- ret = request_irq(priv->irq1, bcm_sf2_switch_1_isr, 0,
- "switch_1", priv);
- if (ret < 0) {
- pr_err("failed to request switch_1 IRQ\n");
- goto out_free_irq0;
- }
-
- /* Reset the MIB counters */
- reg = core_readl(priv, CORE_GMNCFGCFG);
- reg |= RST_MIB_CNT;
- core_writel(priv, reg, CORE_GMNCFGCFG);
- reg &= ~RST_MIB_CNT;
- core_writel(priv, reg, CORE_GMNCFGCFG);
-
- /* Get the maximum number of ports for this switch */
- priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1;
- if (priv->hw_params.num_ports > DSA_MAX_PORTS)
- priv->hw_params.num_ports = DSA_MAX_PORTS;
-
- /* Assume a single GPHY setup if we can't read that property */
- if (of_property_read_u32(dn, "brcm,num-gphy",
- &priv->hw_params.num_gphy))
- priv->hw_params.num_gphy = 1;
-
- /* Enable all valid ports and disable those unused */
- for (port = 0; port < priv->hw_params.num_ports; port++) {
- /* IMP port receives special treatment */
- if ((1 << port) & ds->enabled_port_mask)
- bcm_sf2_port_setup(ds, port, NULL);
- else if (dsa_is_cpu_port(ds, port))
- bcm_sf2_imp_setup(ds, port);
- else
- bcm_sf2_port_disable(ds, port, NULL);
- }
-
- /* Include the pseudo-PHY address and the broadcast PHY address to
- * divert reads towards our workaround. This is only required for
- * 7445D0, since 7445E0 disconnects the internal switch pseudo-PHY such
- * that we can use the regular SWITCH_MDIO master controller instead.
+ static int index;
+ int err;
+
+ /* Find our integrated MDIO bus node */
+ dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio");
+ priv->master_mii_bus = of_mdio_find_bus(dn);
+ if (!priv->master_mii_bus)
+ return -EPROBE_DEFER;
+
+ get_device(&priv->master_mii_bus->dev);
+ priv->master_mii_dn = dn;
+
+ priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
+ if (!priv->slave_mii_bus)
+ return -ENOMEM;
+
+ priv->slave_mii_bus->priv = priv;
+ priv->slave_mii_bus->name = "sf2 slave mii";
+ priv->slave_mii_bus->read = bcm_sf2_sw_mdio_read;
+ priv->slave_mii_bus->write = bcm_sf2_sw_mdio_write;
+ snprintf(priv->slave_mii_bus->id, MII_BUS_ID_SIZE, "sf2-%d",
+ index++);
+ priv->slave_mii_bus->dev.of_node = dn;
+
+ /* Include the pseudo-PHY address to divert reads towards our
+ * workaround. This is only required for 7445D0, since 7445E0
+ * disconnects the internal switch pseudo-PHY such that we can use the
+ * regular SWITCH_MDIO master controller instead.
*
- * By default, DSA initializes ds->phys_mii_mask to
- * ds->enabled_port_mask to have a 1:1 mapping between Port address
- * and PHY address in order to utilize the slave_mii_bus instance to
- * read from Port PHYs. This is not what we want here, so we
- * initialize phys_mii_mask 0 to always utilize the "master" MDIO
- * bus backed by the "mdio-unimac" driver.
+ * Here we flag the pseudo PHY as needing special treatment and would
+ * otherwise make all other PHY read/writes go to the master MDIO bus
+ * controller that comes with this switch backed by the "mdio-unimac"
+ * driver.
*/
if (of_machine_is_compatible("brcm,bcm7445d0"))
- ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0));
+ priv->indir_phy_mask |= (1 << BRCM_PSEUDO_PHY_ADDR);
else
- ds->phys_mii_mask = 0;
+ priv->indir_phy_mask = 0;
- rev = reg_readl(priv, REG_SWITCH_REVISION);
- priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
- SWITCH_TOP_REV_MASK;
- priv->hw_params.core_rev = (rev & SF2_REV_MASK);
+ ds->phys_mii_mask = priv->indir_phy_mask;
+ ds->slave_mii_bus = priv->slave_mii_bus;
+ priv->slave_mii_bus->parent = ds->dev->parent;
+ priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask;
- rev = reg_readl(priv, REG_PHY_REVISION);
- priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK;
+ if (dn)
+ err = of_mdiobus_register(priv->slave_mii_bus, dn);
+ else
+ err = mdiobus_register(priv->slave_mii_bus);
- pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n",
- priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff,
- priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff,
- priv->core, priv->irq0, priv->irq1);
+ if (err)
+ of_node_put(dn);
- return 0;
+ return err;
+}
-out_free_irq0:
- free_irq(priv->irq0, priv);
-out_unmap:
- base = &priv->core;
- for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
- if (*base)
- iounmap(*base);
- base++;
- }
- return ret;
+static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv)
+{
+ mdiobus_unregister(priv->slave_mii_bus);
+ if (priv->master_mii_dn)
+ of_node_put(priv->master_mii_dn);
}
static int bcm_sf2_sw_set_addr(struct dsa_switch *ds, u8 *addr)
@@ -1078,68 +1163,6 @@ static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
return priv->hw_params.gphy_rev;
}
-static int bcm_sf2_sw_indir_rw(struct dsa_switch *ds, int op, int addr,
- int regnum, u16 val)
-{
- struct bcm_sf2_priv *priv = ds_to_priv(ds);
- int ret = 0;
- u32 reg;
-
- reg = reg_readl(priv, REG_SWITCH_CNTRL);
- reg |= MDIO_MASTER_SEL;
- reg_writel(priv, reg, REG_SWITCH_CNTRL);
-
- /* Page << 8 | offset */
- reg = 0x70;
- reg <<= 2;
- core_writel(priv, addr, reg);
-
- /* Page << 8 | offset */
- reg = 0x80 << 8 | regnum << 1;
- reg <<= 2;
-
- if (op)
- ret = core_readl(priv, reg);
- else
- core_writel(priv, val, reg);
-
- reg = reg_readl(priv, REG_SWITCH_CNTRL);
- reg &= ~MDIO_MASTER_SEL;
- reg_writel(priv, reg, REG_SWITCH_CNTRL);
-
- return ret & 0xffff;
-}
-
-static int bcm_sf2_sw_phy_read(struct dsa_switch *ds, int addr, int regnum)
-{
- /* Intercept reads from the MDIO broadcast address or Broadcom
- * pseudo-PHY address
- */
- switch (addr) {
- case 0:
- case BRCM_PSEUDO_PHY_ADDR:
- return bcm_sf2_sw_indir_rw(ds, 1, addr, regnum, 0);
- default:
- return 0xffff;
- }
-}
-
-static int bcm_sf2_sw_phy_write(struct dsa_switch *ds, int addr, int regnum,
- u16 val)
-{
- /* Intercept writes to the MDIO broadcast address or Broadcom
- * pseudo-PHY address
- */
- switch (addr) {
- case 0:
- case BRCM_PSEUDO_PHY_ADDR:
- bcm_sf2_sw_indir_rw(ds, 0, addr, regnum, val);
- break;
- }
-
- return 0;
-}
-
static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port,
struct phy_device *phydev)
{
@@ -1248,7 +1271,7 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
* state machine and make it go in PHY_FORCING state instead.
*/
if (!status->link)
- netif_carrier_off(ds->ports[port]);
+ netif_carrier_off(ds->ports[port].netdev);
status->duplex = 1;
} else {
status->link = 1;
@@ -1370,14 +1393,310 @@ static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
return p->ethtool_ops->set_wol(p, wol);
}
+static void bcm_sf2_enable_vlan(struct bcm_sf2_priv *priv, bool enable)
+{
+ u32 mgmt, vc0, vc1, vc4, vc5;
+
+ mgmt = core_readl(priv, CORE_SWMODE);
+ vc0 = core_readl(priv, CORE_VLAN_CTRL0);
+ vc1 = core_readl(priv, CORE_VLAN_CTRL1);
+ vc4 = core_readl(priv, CORE_VLAN_CTRL4);
+ vc5 = core_readl(priv, CORE_VLAN_CTRL5);
+
+ mgmt &= ~SW_FWDG_MODE;
+
+ if (enable) {
+ vc0 |= VLAN_EN | VLAN_LEARN_MODE_IVL;
+ vc1 |= EN_RSV_MCAST_UNTAG | EN_RSV_MCAST_FWDMAP;
+ vc4 &= ~(INGR_VID_CHK_MASK << INGR_VID_CHK_SHIFT);
+ vc4 |= INGR_VID_CHK_DROP;
+ vc5 |= DROP_VTABLE_MISS | EN_VID_FFF_FWD;
+ } else {
+ vc0 &= ~(VLAN_EN | VLAN_LEARN_MODE_IVL);
+ vc1 &= ~(EN_RSV_MCAST_UNTAG | EN_RSV_MCAST_FWDMAP);
+ vc4 &= ~(INGR_VID_CHK_MASK << INGR_VID_CHK_SHIFT);
+ vc5 &= ~(DROP_VTABLE_MISS | EN_VID_FFF_FWD);
+ vc4 |= INGR_VID_CHK_VID_VIOL_IMP;
+ }
+
+ core_writel(priv, vc0, CORE_VLAN_CTRL0);
+ core_writel(priv, vc1, CORE_VLAN_CTRL1);
+ core_writel(priv, 0, CORE_VLAN_CTRL3);
+ core_writel(priv, vc4, CORE_VLAN_CTRL4);
+ core_writel(priv, vc5, CORE_VLAN_CTRL5);
+ core_writel(priv, mgmt, CORE_SWMODE);
+}
+
+static void bcm_sf2_sw_configure_vlan(struct dsa_switch *ds)
+{
+ struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ unsigned int port;
+
+ /* Clear all VLANs */
+ bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_CLEAR);
+
+ for (port = 0; port < priv->hw_params.num_ports; port++) {
+ if (!((1 << port) & ds->enabled_port_mask))
+ continue;
+
+ core_writel(priv, 1, CORE_DEFAULT_1Q_TAG_P(port));
+ }
+}
+
+static int bcm_sf2_sw_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering)
+{
+ return 0;
+}
+
+static int bcm_sf2_sw_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
+{
+ struct bcm_sf2_priv *priv = ds_to_priv(ds);
+
+ bcm_sf2_enable_vlan(priv, true);
+
+ return 0;
+}
+
+static void bcm_sf2_sw_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
+{
+ struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ s8 cpu_port = ds->dst->cpu_port;
+ struct bcm_sf2_vlan *vl;
+ u16 vid;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+ vl = &priv->vlans[vid];
+
+ bcm_sf2_get_vlan_entry(priv, vid, vl);
+
+ vl->members |= BIT(port) | BIT(cpu_port);
+ if (untagged)
+ vl->untag |= BIT(port) | BIT(cpu_port);
+ else
+ vl->untag &= ~(BIT(port) | BIT(cpu_port));
+
+ bcm_sf2_set_vlan_entry(priv, vid, vl);
+ bcm_sf2_sw_fast_age_vlan(priv, vid);
+ }
+
+ if (pvid) {
+ core_writel(priv, vlan->vid_end, CORE_DEFAULT_1Q_TAG_P(port));
+ core_writel(priv, vlan->vid_end,
+ CORE_DEFAULT_1Q_TAG_P(cpu_port));
+ bcm_sf2_sw_fast_age_vlan(priv, vid);
+ }
+}
+
+static int bcm_sf2_sw_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ s8 cpu_port = ds->dst->cpu_port;
+ struct bcm_sf2_vlan *vl;
+ u16 vid, pvid;
+ int ret;
+
+ pvid = core_readl(priv, CORE_DEFAULT_1Q_TAG_P(port));
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+ vl = &priv->vlans[vid];
+
+ ret = bcm_sf2_get_vlan_entry(priv, vid, vl);
+ if (ret)
+ return ret;
+
+ vl->members &= ~BIT(port);
+ if ((vl->members & BIT(cpu_port)) == BIT(cpu_port))
+ vl->members = 0;
+ if (pvid == vid)
+ pvid = 0;
+ if (untagged) {
+ vl->untag &= ~BIT(port);
+ if ((vl->untag & BIT(port)) == BIT(cpu_port))
+ vl->untag = 0;
+ }
+
+ bcm_sf2_set_vlan_entry(priv, vid, vl);
+ bcm_sf2_sw_fast_age_vlan(priv, vid);
+ }
+
+ core_writel(priv, pvid, CORE_DEFAULT_1Q_TAG_P(port));
+ core_writel(priv, pvid, CORE_DEFAULT_1Q_TAG_P(cpu_port));
+ bcm_sf2_sw_fast_age_vlan(priv, vid);
+
+ return 0;
+}
+
+static int bcm_sf2_sw_vlan_dump(struct dsa_switch *ds, int port,
+ struct switchdev_obj_port_vlan *vlan,
+ int (*cb)(struct switchdev_obj *obj))
+{
+ struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ struct bcm_sf2_port_status *p = &priv->port_sts[port];
+ struct bcm_sf2_vlan *vl;
+ u16 vid, pvid;
+ int err = 0;
+
+ pvid = core_readl(priv, CORE_DEFAULT_1Q_TAG_P(port));
+
+ for (vid = 0; vid < VLAN_N_VID; vid++) {
+ vl = &priv->vlans[vid];
+
+ if (!(vl->members & BIT(port)))
+ continue;
+
+ vlan->vid_begin = vlan->vid_end = vid;
+ vlan->flags = 0;
+
+ if (vl->untag & BIT(port))
+ vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
+ if (p->pvid == vid)
+ vlan->flags |= BRIDGE_VLAN_INFO_PVID;
+
+ err = cb(&vlan->obj);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static int bcm_sf2_sw_setup(struct dsa_switch *ds)
+{
+ const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
+ struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ struct device_node *dn;
+ void __iomem **base;
+ unsigned int port;
+ unsigned int i;
+ u32 reg, rev;
+ int ret;
+
+ spin_lock_init(&priv->indir_lock);
+ mutex_init(&priv->stats_mutex);
+
+ /* All the interesting properties are at the parent device_node
+ * level
+ */
+ dn = ds->cd->of_node->parent;
+ bcm_sf2_identify_ports(priv, ds->cd->of_node);
+
+ priv->irq0 = irq_of_parse_and_map(dn, 0);
+ priv->irq1 = irq_of_parse_and_map(dn, 1);
+
+ base = &priv->core;
+ for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
+ *base = of_iomap(dn, i);
+ if (*base == NULL) {
+ pr_err("unable to find register: %s\n", reg_names[i]);
+ ret = -ENOMEM;
+ goto out_unmap;
+ }
+ base++;
+ }
+
+ ret = bcm_sf2_sw_rst(priv);
+ if (ret) {
+ pr_err("unable to software reset switch: %d\n", ret);
+ goto out_unmap;
+ }
+
+ ret = bcm_sf2_mdio_register(ds);
+ if (ret) {
+ pr_err("failed to register MDIO bus\n");
+ goto out_unmap;
+ }
+
+ /* Disable all interrupts and request them */
+ bcm_sf2_intr_disable(priv);
+
+ ret = request_irq(priv->irq0, bcm_sf2_switch_0_isr, 0,
+ "switch_0", priv);
+ if (ret < 0) {
+ pr_err("failed to request switch_0 IRQ\n");
+ goto out_mdio;
+ }
+
+ ret = request_irq(priv->irq1, bcm_sf2_switch_1_isr, 0,
+ "switch_1", priv);
+ if (ret < 0) {
+ pr_err("failed to request switch_1 IRQ\n");
+ goto out_free_irq0;
+ }
+
+ /* Reset the MIB counters */
+ reg = core_readl(priv, CORE_GMNCFGCFG);
+ reg |= RST_MIB_CNT;
+ core_writel(priv, reg, CORE_GMNCFGCFG);
+ reg &= ~RST_MIB_CNT;
+ core_writel(priv, reg, CORE_GMNCFGCFG);
+
+ /* Get the maximum number of ports for this switch */
+ priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1;
+ if (priv->hw_params.num_ports > DSA_MAX_PORTS)
+ priv->hw_params.num_ports = DSA_MAX_PORTS;
+
+ /* Assume a single GPHY setup if we can't read that property */
+ if (of_property_read_u32(dn, "brcm,num-gphy",
+ &priv->hw_params.num_gphy))
+ priv->hw_params.num_gphy = 1;
+
+ /* Enable all valid ports and disable those unused */
+ for (port = 0; port < priv->hw_params.num_ports; port++) {
+ /* IMP port receives special treatment */
+ if ((1 << port) & ds->enabled_port_mask)
+ bcm_sf2_port_setup(ds, port, NULL);
+ else if (dsa_is_cpu_port(ds, port))
+ bcm_sf2_imp_setup(ds, port);
+ else
+ bcm_sf2_port_disable(ds, port, NULL);
+ }
+
+ bcm_sf2_sw_configure_vlan(ds);
+
+ rev = reg_readl(priv, REG_SWITCH_REVISION);
+ priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
+ SWITCH_TOP_REV_MASK;
+ priv->hw_params.core_rev = (rev & SF2_REV_MASK);
+
+ rev = reg_readl(priv, REG_PHY_REVISION);
+ priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK;
+
+ pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n",
+ priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff,
+ priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff,
+ priv->core, priv->irq0, priv->irq1);
+
+ return 0;
+
+out_free_irq0:
+ free_irq(priv->irq0, priv);
+out_mdio:
+ bcm_sf2_mdio_unregister(priv);
+out_unmap:
+ base = &priv->core;
+ for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
+ if (*base)
+ iounmap(*base);
+ base++;
+ }
+ return ret;
+}
+
static struct dsa_switch_driver bcm_sf2_switch_driver = {
.tag_protocol = DSA_TAG_PROTO_BRCM,
.probe = bcm_sf2_sw_drv_probe,
.setup = bcm_sf2_sw_setup,
.set_addr = bcm_sf2_sw_set_addr,
.get_phy_flags = bcm_sf2_sw_get_phy_flags,
- .phy_read = bcm_sf2_sw_phy_read,
- .phy_write = bcm_sf2_sw_phy_write,
.get_strings = bcm_sf2_sw_get_strings,
.get_ethtool_stats = bcm_sf2_sw_get_ethtool_stats,
.get_sset_count = bcm_sf2_sw_get_sset_count,
@@ -1398,6 +1717,11 @@ static struct dsa_switch_driver bcm_sf2_switch_driver = {
.port_fdb_add = bcm_sf2_sw_fdb_add,
.port_fdb_del = bcm_sf2_sw_fdb_del,
.port_fdb_dump = bcm_sf2_sw_fdb_dump,
+ .port_vlan_filtering = bcm_sf2_sw_vlan_filtering,
+ .port_vlan_prepare = bcm_sf2_sw_vlan_prepare,
+ .port_vlan_add = bcm_sf2_sw_vlan_add,
+ .port_vlan_del = bcm_sf2_sw_vlan_del,
+ .port_vlan_dump = bcm_sf2_sw_vlan_dump,
};
static int __init bcm_sf2_init(void)
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index 71b1e5298..dd446e466 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -21,6 +21,7 @@
#include <linux/ethtool.h>
#include <linux/types.h>
#include <linux/bitops.h>
+#include <linux/if_vlan.h>
#include <net/dsa.h>
@@ -50,6 +51,7 @@ struct bcm_sf2_port_status {
struct ethtool_eee eee;
u32 vlan_ctl_mask;
+ u16 pvid;
struct net_device *bridge_dev;
};
@@ -63,6 +65,11 @@ struct bcm_sf2_arl_entry {
u8 is_static:1;
};
+struct bcm_sf2_vlan {
+ u16 members;
+ u16 untag;
+};
+
static inline void bcm_sf2_mac_from_u64(u64 src, u8 *dst)
{
unsigned int i;
@@ -142,6 +149,15 @@ struct bcm_sf2_priv {
/* Bitmask of ports having an integrated PHY */
unsigned int int_phy_mask;
+
+ /* Master and slave MDIO bus controller */
+ unsigned int indir_phy_mask;
+ struct device_node *master_mii_dn;
+ struct mii_bus *slave_mii_bus;
+ struct mii_bus *master_mii_bus;
+
+ /* Cache of programmed VLANs */
+ struct bcm_sf2_vlan vlans[VLAN_N_VID];
};
struct bcm_sf2_hw_stats {
diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h
index 97780d43b..9f2a9cb42 100644
--- a/drivers/net/dsa/bcm_sf2_regs.h
+++ b/drivers/net/dsa/bcm_sf2_regs.h
@@ -274,6 +274,23 @@
#define CORE_ARLA_SRCH_RSLT_MACVID(x) (CORE_ARLA_SRCH_RSLT_0_MACVID + ((x) * 0x40))
#define CORE_ARLA_SRCH_RSLT(x) (CORE_ARLA_SRCH_RSLT_0 + ((x) * 0x40))
+#define CORE_ARLA_VTBL_RWCTRL 0x1600
+#define ARLA_VTBL_CMD_WRITE 0
+#define ARLA_VTBL_CMD_READ 1
+#define ARLA_VTBL_CMD_CLEAR 2
+#define ARLA_VTBL_STDN (1 << 7)
+
+#define CORE_ARLA_VTBL_ADDR 0x1604
+#define VTBL_ADDR_INDEX_MASK 0xfff
+
+#define CORE_ARLA_VTBL_ENTRY 0x160c
+#define FWD_MAP_MASK 0x1ff
+#define UNTAG_MAP_MASK 0x1ff
+#define UNTAG_MAP_SHIFT 9
+#define MSTP_INDEX_MASK 0x7
+#define MSTP_INDEX_SHIFT 18
+#define FWD_MODE (1 << 21)
+
#define CORE_MEM_PSM_VDD_CTRL 0x2380
#define P_TXQ_PSM_VDD_SHIFT 2
#define P_TXQ_PSM_VDD_MASK 0x3
@@ -287,6 +304,59 @@
#define CORE_PORT_VLAN_CTL_PORT(x) (0xc400 + ((x) * 0x8))
#define PORT_VLAN_CTRL_MASK 0x1ff
+#define CORE_VLAN_CTRL0 0xd000
+#define CHANGE_1P_VID_INNER (1 << 0)
+#define CHANGE_1P_VID_OUTER (1 << 1)
+#define CHANGE_1Q_VID (1 << 3)
+#define VLAN_LEARN_MODE_SVL (0 << 5)
+#define VLAN_LEARN_MODE_IVL (3 << 5)
+#define VLAN_EN (1 << 7)
+
+#define CORE_VLAN_CTRL1 0xd004
+#define EN_RSV_MCAST_FWDMAP (1 << 2)
+#define EN_RSV_MCAST_UNTAG (1 << 3)
+#define EN_IPMC_BYPASS_FWDMAP (1 << 5)
+#define EN_IPMC_BYPASS_UNTAG (1 << 6)
+
+#define CORE_VLAN_CTRL2 0xd008
+#define EN_MIIM_BYPASS_V_FWDMAP (1 << 2)
+#define EN_GMRP_GVRP_V_FWDMAP (1 << 5)
+#define EN_GMRP_GVRP_UNTAG_MAP (1 << 6)
+
+#define CORE_VLAN_CTRL3 0xd00c
+#define EN_DROP_NON1Q_MASK 0x1ff
+
+#define CORE_VLAN_CTRL4 0xd014
+#define RESV_MCAST_FLOOD (1 << 1)
+#define EN_DOUBLE_TAG_MASK 0x3
+#define EN_DOUBLE_TAG_SHIFT 2
+#define EN_MGE_REV_GMRP (1 << 4)
+#define EN_MGE_REV_GVRP (1 << 5)
+#define INGR_VID_CHK_SHIFT 6
+#define INGR_VID_CHK_MASK 0x3
+#define INGR_VID_CHK_FWD (0 << INGR_VID_CHK_SHIFT)
+#define INGR_VID_CHK_DROP (1 << INGR_VID_CHK_SHIFT)
+#define INGR_VID_CHK_NO_CHK (2 << INGR_VID_CHK_SHIFT)
+#define INGR_VID_CHK_VID_VIOL_IMP (3 << INGR_VID_CHK_SHIFT)
+
+#define CORE_VLAN_CTRL5 0xd018
+#define EN_CPU_RX_BYP_INNER_CRCCHCK (1 << 0)
+#define EN_VID_FFF_FWD (1 << 2)
+#define DROP_VTABLE_MISS (1 << 3)
+#define EGRESS_DIR_FRM_BYP_TRUNK_EN (1 << 4)
+#define PRESV_NON1Q (1 << 6)
+
+#define CORE_VLAN_CTRL6 0xd01c
+#define STRICT_SFD_DETECT (1 << 0)
+#define DIS_ARL_BUST_LMIT (1 << 4)
+
+#define CORE_DEFAULT_1Q_TAG_P(x) (0xd040 + ((x) * 8))
+#define CFI_SHIFT 12
+#define PRI_SHIFT 13
+#define PRI_MASK 0x7
+
+#define CORE_JOIN_ALL_VLAN_EN 0xd140
+
#define CORE_EEE_EN_CTRL 0x24800
#define CORE_EEE_LPI_INDICATE 0x24810
diff --git a/drivers/net/dsa/mv88e6xxx/Kconfig b/drivers/net/dsa/mv88e6xxx/Kconfig
new file mode 100644
index 000000000..490bc06f9
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/Kconfig
@@ -0,0 +1,7 @@
+config NET_DSA_MV88E6XXX
+ tristate "Marvell 88E6xxx Ethernet switch fabric support"
+ depends on NET_DSA
+ select NET_DSA_TAG_EDSA
+ help
+ This driver adds support for most of the Marvell 88E6xxx models of
+ Ethernet switch chips, except 88E6060.
diff --git a/drivers/net/dsa/mv88e6xxx/Makefile b/drivers/net/dsa/mv88e6xxx/Makefile
new file mode 100644
index 000000000..6e29a75ee
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_NET_DSA_MV88E6XXX) += chip.o
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
new file mode 100644
index 000000000..710679067
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -0,0 +1,4093 @@
+/*
+ * Marvell 88e6xxx Ethernet switch single-chip support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2015 CMC Electronics, Inc.
+ * Added support for VLAN Table Unit operations
+ *
+ * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_bridge.h>
+#include <linux/jiffies.h>
+#include <linux/list.h>
+#include <linux/mdio.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/netdevice.h>
+#include <linux/gpio/consumer.h>
+#include <linux/phy.h>
+#include <net/dsa.h>
+#include <net/switchdev.h>
+#include "mv88e6xxx.h"
+
+static void assert_reg_lock(struct mv88e6xxx_chip *chip)
+{
+ if (unlikely(!mutex_is_locked(&chip->reg_lock))) {
+ dev_err(chip->dev, "Switch registers lock not held!\n");
+ dump_stack();
+ }
+}
+
+/* The switch ADDR[4:1] configuration pins define the chip SMI device address
+ * (ADDR[0] is always zero, thus only even SMI addresses can be strapped).
+ *
+ * When ADDR is all zero, the chip uses Single-chip Addressing Mode, assuming it
+ * is the only device connected to the SMI master. In this mode it responds to
+ * all 32 possible SMI addresses, and thus maps directly the internal devices.
+ *
+ * When ADDR is non-zero, the chip uses Multi-chip Addressing Mode, allowing
+ * multiple devices to share the SMI interface. In this mode it responds to only
+ * 2 registers, used to indirectly access the internal SMI devices.
+ */
+
+static int mv88e6xxx_smi_read(struct mv88e6xxx_chip *chip,
+ int addr, int reg, u16 *val)
+{
+ if (!chip->smi_ops)
+ return -EOPNOTSUPP;
+
+ return chip->smi_ops->read(chip, addr, reg, val);
+}
+
+static int mv88e6xxx_smi_write(struct mv88e6xxx_chip *chip,
+ int addr, int reg, u16 val)
+{
+ if (!chip->smi_ops)
+ return -EOPNOTSUPP;
+
+ return chip->smi_ops->write(chip, addr, reg, val);
+}
+
+static int mv88e6xxx_smi_single_chip_read(struct mv88e6xxx_chip *chip,
+ int addr, int reg, u16 *val)
+{
+ int ret;
+
+ ret = mdiobus_read_nested(chip->bus, addr, reg);
+ if (ret < 0)
+ return ret;
+
+ *val = ret & 0xffff;
+
+ return 0;
+}
+
+static int mv88e6xxx_smi_single_chip_write(struct mv88e6xxx_chip *chip,
+ int addr, int reg, u16 val)
+{
+ int ret;
+
+ ret = mdiobus_write_nested(chip->bus, addr, reg, val);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static const struct mv88e6xxx_ops mv88e6xxx_smi_single_chip_ops = {
+ .read = mv88e6xxx_smi_single_chip_read,
+ .write = mv88e6xxx_smi_single_chip_write,
+};
+
+static int mv88e6xxx_smi_multi_chip_wait(struct mv88e6xxx_chip *chip)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ ret = mdiobus_read_nested(chip->bus, chip->sw_addr, SMI_CMD);
+ if (ret < 0)
+ return ret;
+
+ if ((ret & SMI_CMD_BUSY) == 0)
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int mv88e6xxx_smi_multi_chip_read(struct mv88e6xxx_chip *chip,
+ int addr, int reg, u16 *val)
+{
+ int ret;
+
+ /* Wait for the bus to become free. */
+ ret = mv88e6xxx_smi_multi_chip_wait(chip);
+ if (ret < 0)
+ return ret;
+
+ /* Transmit the read command. */
+ ret = mdiobus_write_nested(chip->bus, chip->sw_addr, SMI_CMD,
+ SMI_CMD_OP_22_READ | (addr << 5) | reg);
+ if (ret < 0)
+ return ret;
+
+ /* Wait for the read command to complete. */
+ ret = mv88e6xxx_smi_multi_chip_wait(chip);
+ if (ret < 0)
+ return ret;
+
+ /* Read the data. */
+ ret = mdiobus_read_nested(chip->bus, chip->sw_addr, SMI_DATA);
+ if (ret < 0)
+ return ret;
+
+ *val = ret & 0xffff;
+
+ return 0;
+}
+
+static int mv88e6xxx_smi_multi_chip_write(struct mv88e6xxx_chip *chip,
+ int addr, int reg, u16 val)
+{
+ int ret;
+
+ /* Wait for the bus to become free. */
+ ret = mv88e6xxx_smi_multi_chip_wait(chip);
+ if (ret < 0)
+ return ret;
+
+ /* Transmit the data to write. */
+ ret = mdiobus_write_nested(chip->bus, chip->sw_addr, SMI_DATA, val);
+ if (ret < 0)
+ return ret;
+
+ /* Transmit the write command. */
+ ret = mdiobus_write_nested(chip->bus, chip->sw_addr, SMI_CMD,
+ SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
+ if (ret < 0)
+ return ret;
+
+ /* Wait for the write command to complete. */
+ ret = mv88e6xxx_smi_multi_chip_wait(chip);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static const struct mv88e6xxx_ops mv88e6xxx_smi_multi_chip_ops = {
+ .read = mv88e6xxx_smi_multi_chip_read,
+ .write = mv88e6xxx_smi_multi_chip_write,
+};
+
+static int mv88e6xxx_read(struct mv88e6xxx_chip *chip,
+ int addr, int reg, u16 *val)
+{
+ int err;
+
+ assert_reg_lock(chip);
+
+ err = mv88e6xxx_smi_read(chip, addr, reg, val);
+ if (err)
+ return err;
+
+ dev_dbg(chip->dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
+ addr, reg, *val);
+
+ return 0;
+}
+
+static int mv88e6xxx_write(struct mv88e6xxx_chip *chip,
+ int addr, int reg, u16 val)
+{
+ int err;
+
+ assert_reg_lock(chip);
+
+ err = mv88e6xxx_smi_write(chip, addr, reg, val);
+ if (err)
+ return err;
+
+ dev_dbg(chip->dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
+ addr, reg, val);
+
+ return 0;
+}
+
+/* Indirect write to single pointer-data register with an Update bit */
+static int mv88e6xxx_update(struct mv88e6xxx_chip *chip, int addr, int reg,
+ u16 update)
+{
+ u16 val;
+ int i, err;
+
+ /* Wait until the previous operation is completed */
+ for (i = 0; i < 16; ++i) {
+ err = mv88e6xxx_read(chip, addr, reg, &val);
+ if (err)
+ return err;
+
+ if (!(val & BIT(15)))
+ break;
+ }
+
+ if (i == 16)
+ return -ETIMEDOUT;
+
+ /* Set the Update bit to trigger a write operation */
+ val = BIT(15) | update;
+
+ return mv88e6xxx_write(chip, addr, reg, val);
+}
+
+static int _mv88e6xxx_reg_read(struct mv88e6xxx_chip *chip, int addr, int reg)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_read(chip, addr, reg, &val);
+ if (err)
+ return err;
+
+ return val;
+}
+
+static int _mv88e6xxx_reg_write(struct mv88e6xxx_chip *chip, int addr,
+ int reg, u16 val)
+{
+ return mv88e6xxx_write(chip, addr, reg, val);
+}
+
+static int mv88e6xxx_mdio_read_direct(struct mv88e6xxx_chip *chip,
+ int addr, int regnum)
+{
+ if (addr >= 0)
+ return _mv88e6xxx_reg_read(chip, addr, regnum);
+ return 0xffff;
+}
+
+static int mv88e6xxx_mdio_write_direct(struct mv88e6xxx_chip *chip,
+ int addr, int regnum, u16 val)
+{
+ if (addr >= 0)
+ return _mv88e6xxx_reg_write(chip, addr, regnum, val);
+ return 0;
+}
+
+static int mv88e6xxx_ppu_disable(struct mv88e6xxx_chip *chip)
+{
+ int ret;
+ unsigned long timeout;
+
+ ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_CONTROL);
+ if (ret < 0)
+ return ret;
+
+ ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_CONTROL,
+ ret & ~GLOBAL_CONTROL_PPU_ENABLE);
+ if (ret)
+ return ret;
+
+ timeout = jiffies + 1 * HZ;
+ while (time_before(jiffies, timeout)) {
+ ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATUS);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(1000, 2000);
+ if ((ret & GLOBAL_STATUS_PPU_MASK) !=
+ GLOBAL_STATUS_PPU_POLLING)
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int mv88e6xxx_ppu_enable(struct mv88e6xxx_chip *chip)
+{
+ int ret, err;
+ unsigned long timeout;
+
+ ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_CONTROL);
+ if (ret < 0)
+ return ret;
+
+ err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_CONTROL,
+ ret | GLOBAL_CONTROL_PPU_ENABLE);
+ if (err)
+ return err;
+
+ timeout = jiffies + 1 * HZ;
+ while (time_before(jiffies, timeout)) {
+ ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATUS);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(1000, 2000);
+ if ((ret & GLOBAL_STATUS_PPU_MASK) ==
+ GLOBAL_STATUS_PPU_POLLING)
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
+static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly)
+{
+ struct mv88e6xxx_chip *chip;
+
+ chip = container_of(ugly, struct mv88e6xxx_chip, ppu_work);
+
+ mutex_lock(&chip->reg_lock);
+
+ if (mutex_trylock(&chip->ppu_mutex)) {
+ if (mv88e6xxx_ppu_enable(chip) == 0)
+ chip->ppu_disabled = 0;
+ mutex_unlock(&chip->ppu_mutex);
+ }
+
+ mutex_unlock(&chip->reg_lock);
+}
+
+static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps)
+{
+ struct mv88e6xxx_chip *chip = (void *)_ps;
+
+ schedule_work(&chip->ppu_work);
+}
+
+static int mv88e6xxx_ppu_access_get(struct mv88e6xxx_chip *chip)
+{
+ int ret;
+
+ mutex_lock(&chip->ppu_mutex);
+
+ /* If the PHY polling unit is enabled, disable it so that
+ * we can access the PHY registers. If it was already
+ * disabled, cancel the timer that is going to re-enable
+ * it.
+ */
+ if (!chip->ppu_disabled) {
+ ret = mv88e6xxx_ppu_disable(chip);
+ if (ret < 0) {
+ mutex_unlock(&chip->ppu_mutex);
+ return ret;
+ }
+ chip->ppu_disabled = 1;
+ } else {
+ del_timer(&chip->ppu_timer);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static void mv88e6xxx_ppu_access_put(struct mv88e6xxx_chip *chip)
+{
+ /* Schedule a timer to re-enable the PHY polling unit. */
+ mod_timer(&chip->ppu_timer, jiffies + msecs_to_jiffies(10));
+ mutex_unlock(&chip->ppu_mutex);
+}
+
+static void mv88e6xxx_ppu_state_init(struct mv88e6xxx_chip *chip)
+{
+ mutex_init(&chip->ppu_mutex);
+ INIT_WORK(&chip->ppu_work, mv88e6xxx_ppu_reenable_work);
+ init_timer(&chip->ppu_timer);
+ chip->ppu_timer.data = (unsigned long)chip;
+ chip->ppu_timer.function = mv88e6xxx_ppu_reenable_timer;
+}
+
+static int mv88e6xxx_mdio_read_ppu(struct mv88e6xxx_chip *chip, int addr,
+ int regnum)
+{
+ int ret;
+
+ ret = mv88e6xxx_ppu_access_get(chip);
+ if (ret >= 0) {
+ ret = _mv88e6xxx_reg_read(chip, addr, regnum);
+ mv88e6xxx_ppu_access_put(chip);
+ }
+
+ return ret;
+}
+
+static int mv88e6xxx_mdio_write_ppu(struct mv88e6xxx_chip *chip, int addr,
+ int regnum, u16 val)
+{
+ int ret;
+
+ ret = mv88e6xxx_ppu_access_get(chip);
+ if (ret >= 0) {
+ ret = _mv88e6xxx_reg_write(chip, addr, regnum, val);
+ mv88e6xxx_ppu_access_put(chip);
+ }
+
+ return ret;
+}
+
+static bool mv88e6xxx_6065_family(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->family == MV88E6XXX_FAMILY_6065;
+}
+
+static bool mv88e6xxx_6095_family(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->family == MV88E6XXX_FAMILY_6095;
+}
+
+static bool mv88e6xxx_6097_family(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->family == MV88E6XXX_FAMILY_6097;
+}
+
+static bool mv88e6xxx_6165_family(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->family == MV88E6XXX_FAMILY_6165;
+}
+
+static bool mv88e6xxx_6185_family(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->family == MV88E6XXX_FAMILY_6185;
+}
+
+static bool mv88e6xxx_6320_family(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->family == MV88E6XXX_FAMILY_6320;
+}
+
+static bool mv88e6xxx_6351_family(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->family == MV88E6XXX_FAMILY_6351;
+}
+
+static bool mv88e6xxx_6352_family(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->family == MV88E6XXX_FAMILY_6352;
+}
+
+static unsigned int mv88e6xxx_num_databases(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->num_databases;
+}
+
+static bool mv88e6xxx_has_fid_reg(struct mv88e6xxx_chip *chip)
+{
+ /* Does the device have dedicated FID registers for ATU and VTU ops? */
+ if (mv88e6xxx_6097_family(chip) || mv88e6xxx_6165_family(chip) ||
+ mv88e6xxx_6351_family(chip) || mv88e6xxx_6352_family(chip))
+ return true;
+
+ return false;
+}
+
+/* We expect the switch to perform auto negotiation if there is a real
+ * phy. However, in the case of a fixed link phy, we force the port
+ * settings from the fixed link settings.
+ */
+static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
+{
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ u32 reg;
+ int ret;
+
+ if (!phy_is_pseudo_fixed_link(phydev))
+ return;
+
+ mutex_lock(&chip->reg_lock);
+
+ ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_PCS_CTRL);
+ if (ret < 0)
+ goto out;
+
+ reg = ret & ~(PORT_PCS_CTRL_LINK_UP |
+ PORT_PCS_CTRL_FORCE_LINK |
+ PORT_PCS_CTRL_DUPLEX_FULL |
+ PORT_PCS_CTRL_FORCE_DUPLEX |
+ PORT_PCS_CTRL_UNFORCED);
+
+ reg |= PORT_PCS_CTRL_FORCE_LINK;
+ if (phydev->link)
+ reg |= PORT_PCS_CTRL_LINK_UP;
+
+ if (mv88e6xxx_6065_family(chip) && phydev->speed > SPEED_100)
+ goto out;
+
+ switch (phydev->speed) {
+ case SPEED_1000:
+ reg |= PORT_PCS_CTRL_1000;
+ break;
+ case SPEED_100:
+ reg |= PORT_PCS_CTRL_100;
+ break;
+ case SPEED_10:
+ reg |= PORT_PCS_CTRL_10;
+ break;
+ default:
+ pr_info("Unknown speed");
+ goto out;
+ }
+
+ reg |= PORT_PCS_CTRL_FORCE_DUPLEX;
+ if (phydev->duplex == DUPLEX_FULL)
+ reg |= PORT_PCS_CTRL_DUPLEX_FULL;
+
+ if ((mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip)) &&
+ (port >= chip->info->num_ports - 2)) {
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
+ reg |= PORT_PCS_CTRL_RGMII_DELAY_RXCLK;
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
+ reg |= PORT_PCS_CTRL_RGMII_DELAY_TXCLK;
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
+ reg |= (PORT_PCS_CTRL_RGMII_DELAY_RXCLK |
+ PORT_PCS_CTRL_RGMII_DELAY_TXCLK);
+ }
+ _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_PCS_CTRL, reg);
+
+out:
+ mutex_unlock(&chip->reg_lock);
+}
+
+static int _mv88e6xxx_stats_wait(struct mv88e6xxx_chip *chip)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATS_OP);
+ if ((ret & GLOBAL_STATS_OP_BUSY) == 0)
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int _mv88e6xxx_stats_snapshot(struct mv88e6xxx_chip *chip, int port)
+{
+ int ret;
+
+ if (mv88e6xxx_6320_family(chip) || mv88e6xxx_6352_family(chip))
+ port = (port + 1) << 5;
+
+ /* Snapshot the hardware statistics counters for this port. */
+ ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_STATS_OP,
+ GLOBAL_STATS_OP_CAPTURE_PORT |
+ GLOBAL_STATS_OP_HIST_RX_TX | port);
+ if (ret < 0)
+ return ret;
+
+ /* Wait for the snapshotting to complete. */
+ ret = _mv88e6xxx_stats_wait(chip);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void _mv88e6xxx_stats_read(struct mv88e6xxx_chip *chip,
+ int stat, u32 *val)
+{
+ u32 _val;
+ int ret;
+
+ *val = 0;
+
+ ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_STATS_OP,
+ GLOBAL_STATS_OP_READ_CAPTURED |
+ GLOBAL_STATS_OP_HIST_RX_TX | stat);
+ if (ret < 0)
+ return;
+
+ ret = _mv88e6xxx_stats_wait(chip);
+ if (ret < 0)
+ return;
+
+ ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
+ if (ret < 0)
+ return;
+
+ _val = ret << 16;
+
+ ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
+ if (ret < 0)
+ return;
+
+ *val = _val | ret;
+}
+
+static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = {
+ { "in_good_octets", 8, 0x00, BANK0, },
+ { "in_bad_octets", 4, 0x02, BANK0, },
+ { "in_unicast", 4, 0x04, BANK0, },
+ { "in_broadcasts", 4, 0x06, BANK0, },
+ { "in_multicasts", 4, 0x07, BANK0, },
+ { "in_pause", 4, 0x16, BANK0, },
+ { "in_undersize", 4, 0x18, BANK0, },
+ { "in_fragments", 4, 0x19, BANK0, },
+ { "in_oversize", 4, 0x1a, BANK0, },
+ { "in_jabber", 4, 0x1b, BANK0, },
+ { "in_rx_error", 4, 0x1c, BANK0, },
+ { "in_fcs_error", 4, 0x1d, BANK0, },
+ { "out_octets", 8, 0x0e, BANK0, },
+ { "out_unicast", 4, 0x10, BANK0, },
+ { "out_broadcasts", 4, 0x13, BANK0, },
+ { "out_multicasts", 4, 0x12, BANK0, },
+ { "out_pause", 4, 0x15, BANK0, },
+ { "excessive", 4, 0x11, BANK0, },
+ { "collisions", 4, 0x1e, BANK0, },
+ { "deferred", 4, 0x05, BANK0, },
+ { "single", 4, 0x14, BANK0, },
+ { "multiple", 4, 0x17, BANK0, },
+ { "out_fcs_error", 4, 0x03, BANK0, },
+ { "late", 4, 0x1f, BANK0, },
+ { "hist_64bytes", 4, 0x08, BANK0, },
+ { "hist_65_127bytes", 4, 0x09, BANK0, },
+ { "hist_128_255bytes", 4, 0x0a, BANK0, },
+ { "hist_256_511bytes", 4, 0x0b, BANK0, },
+ { "hist_512_1023bytes", 4, 0x0c, BANK0, },
+ { "hist_1024_max_bytes", 4, 0x0d, BANK0, },
+ { "sw_in_discards", 4, 0x10, PORT, },
+ { "sw_in_filtered", 2, 0x12, PORT, },
+ { "sw_out_filtered", 2, 0x13, PORT, },
+ { "in_discards", 4, 0x00 | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "in_filtered", 4, 0x01 | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "in_accepted", 4, 0x02 | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "in_bad_accepted", 4, 0x03 | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "in_good_avb_class_a", 4, 0x04 | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "in_good_avb_class_b", 4, 0x05 | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "in_bad_avb_class_a", 4, 0x06 | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "in_bad_avb_class_b", 4, 0x07 | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "tcam_counter_0", 4, 0x08 | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "tcam_counter_1", 4, 0x09 | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "tcam_counter_2", 4, 0x0a | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "tcam_counter_3", 4, 0x0b | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "in_da_unknown", 4, 0x0e | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "in_management", 4, 0x0f | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "out_queue_0", 4, 0x10 | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "out_queue_1", 4, 0x11 | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "out_queue_2", 4, 0x12 | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "out_queue_3", 4, 0x13 | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "out_queue_4", 4, 0x14 | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "out_queue_5", 4, 0x15 | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "out_queue_6", 4, 0x16 | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "out_queue_7", 4, 0x17 | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "out_cut_through", 4, 0x18 | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "out_octets_a", 4, 0x1a | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "out_octets_b", 4, 0x1b | GLOBAL_STATS_OP_BANK_1, BANK1, },
+ { "out_management", 4, 0x1f | GLOBAL_STATS_OP_BANK_1, BANK1, },
+};
+
+static bool mv88e6xxx_has_stat(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_hw_stat *stat)
+{
+ switch (stat->type) {
+ case BANK0:
+ return true;
+ case BANK1:
+ return mv88e6xxx_6320_family(chip);
+ case PORT:
+ return mv88e6xxx_6095_family(chip) ||
+ mv88e6xxx_6185_family(chip) ||
+ mv88e6xxx_6097_family(chip) ||
+ mv88e6xxx_6165_family(chip) ||
+ mv88e6xxx_6351_family(chip) ||
+ mv88e6xxx_6352_family(chip);
+ }
+ return false;
+}
+
+static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_hw_stat *s,
+ int port)
+{
+ u32 low;
+ u32 high = 0;
+ int ret;
+ u64 value;
+
+ switch (s->type) {
+ case PORT:
+ ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), s->reg);
+ if (ret < 0)
+ return UINT64_MAX;
+
+ low = ret;
+ if (s->sizeof_stat == 4) {
+ ret = _mv88e6xxx_reg_read(chip, REG_PORT(port),
+ s->reg + 1);
+ if (ret < 0)
+ return UINT64_MAX;
+ high = ret;
+ }
+ break;
+ case BANK0:
+ case BANK1:
+ _mv88e6xxx_stats_read(chip, s->reg, &low);
+ if (s->sizeof_stat == 8)
+ _mv88e6xxx_stats_read(chip, s->reg + 1, &high);
+ }
+ value = (((u64)high) << 16) | low;
+ return value;
+}
+
+static void mv88e6xxx_get_strings(struct dsa_switch *ds, int port,
+ uint8_t *data)
+{
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ struct mv88e6xxx_hw_stat *stat;
+ int i, j;
+
+ for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
+ stat = &mv88e6xxx_hw_stats[i];
+ if (mv88e6xxx_has_stat(chip, stat)) {
+ memcpy(data + j * ETH_GSTRING_LEN, stat->string,
+ ETH_GSTRING_LEN);
+ j++;
+ }
+ }
+}
+
+static int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ struct mv88e6xxx_hw_stat *stat;
+ int i, j;
+
+ for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
+ stat = &mv88e6xxx_hw_stats[i];
+ if (mv88e6xxx_has_stat(chip, stat))
+ j++;
+ }
+ return j;
+}
+
+static void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ struct mv88e6xxx_hw_stat *stat;
+ int ret;
+ int i, j;
+
+ mutex_lock(&chip->reg_lock);
+
+ ret = _mv88e6xxx_stats_snapshot(chip, port);
+ if (ret < 0) {
+ mutex_unlock(&chip->reg_lock);
+ return;
+ }
+ for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
+ stat = &mv88e6xxx_hw_stats[i];
+ if (mv88e6xxx_has_stat(chip, stat)) {
+ data[j] = _mv88e6xxx_get_ethtool_stat(chip, stat, port);
+ j++;
+ }
+ }
+
+ mutex_unlock(&chip->reg_lock);
+}
+
+static int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
+{
+ return 32 * sizeof(u16);
+}
+
+static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
+ struct ethtool_regs *regs, void *_p)
+{
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ u16 *p = _p;
+ int i;
+
+ regs->version = 0;
+
+ memset(p, 0xff, 32 * sizeof(u16));
+
+ mutex_lock(&chip->reg_lock);
+
+ for (i = 0; i < 32; i++) {
+ int ret;
+
+ ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), i);
+ if (ret >= 0)
+ p[i] = ret;
+ }
+
+ mutex_unlock(&chip->reg_lock);
+}
+
+static int _mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int reg, int offset,
+ u16 mask)
+{
+ unsigned long timeout = jiffies + HZ / 10;
+
+ while (time_before(jiffies, timeout)) {
+ int ret;
+
+ ret = _mv88e6xxx_reg_read(chip, reg, offset);
+ if (ret < 0)
+ return ret;
+ if (!(ret & mask))
+ return 0;
+
+ usleep_range(1000, 2000);
+ }
+ return -ETIMEDOUT;
+}
+
+static int mv88e6xxx_mdio_wait(struct mv88e6xxx_chip *chip)
+{
+ return _mv88e6xxx_wait(chip, REG_GLOBAL2, GLOBAL2_SMI_OP,
+ GLOBAL2_SMI_OP_BUSY);
+}
+
+static int _mv88e6xxx_atu_wait(struct mv88e6xxx_chip *chip)
+{
+ return _mv88e6xxx_wait(chip, REG_GLOBAL, GLOBAL_ATU_OP,
+ GLOBAL_ATU_OP_BUSY);
+}
+
+static int mv88e6xxx_mdio_read_indirect(struct mv88e6xxx_chip *chip,
+ int addr, int regnum)
+{
+ int ret;
+
+ ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_SMI_OP,
+ GLOBAL2_SMI_OP_22_READ | (addr << 5) |
+ regnum);
+ if (ret < 0)
+ return ret;
+
+ ret = mv88e6xxx_mdio_wait(chip);
+ if (ret < 0)
+ return ret;
+
+ ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL2, GLOBAL2_SMI_DATA);
+
+ return ret;
+}
+
+static int mv88e6xxx_mdio_write_indirect(struct mv88e6xxx_chip *chip,
+ int addr, int regnum, u16 val)
+{
+ int ret;
+
+ ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
+ if (ret < 0)
+ return ret;
+
+ ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_SMI_OP,
+ GLOBAL2_SMI_OP_22_WRITE | (addr << 5) |
+ regnum);
+
+ return mv88e6xxx_mdio_wait(chip);
+}
+
+static int mv88e6xxx_get_eee(struct dsa_switch *ds, int port,
+ struct ethtool_eee *e)
+{
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ int reg;
+
+ if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_EEE))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&chip->reg_lock);
+
+ reg = mv88e6xxx_mdio_read_indirect(chip, port, 16);
+ if (reg < 0)
+ goto out;
+
+ e->eee_enabled = !!(reg & 0x0200);
+ e->tx_lpi_enabled = !!(reg & 0x0100);
+
+ reg = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_STATUS);
+ if (reg < 0)
+ goto out;
+
+ e->eee_active = !!(reg & PORT_STATUS_EEE);
+ reg = 0;
+
+out:
+ mutex_unlock(&chip->reg_lock);
+ return reg;
+}
+
+static int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
+ struct phy_device *phydev, struct ethtool_eee *e)
+{
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ int reg;
+ int ret;
+
+ if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_EEE))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&chip->reg_lock);
+
+ ret = mv88e6xxx_mdio_read_indirect(chip, port, 16);
+ if (ret < 0)
+ goto out;
+
+ reg = ret & ~0x0300;
+ if (e->eee_enabled)
+ reg |= 0x0200;
+ if (e->tx_lpi_enabled)
+ reg |= 0x0100;
+
+ ret = mv88e6xxx_mdio_write_indirect(chip, port, 16, reg);
+out:
+ mutex_unlock(&chip->reg_lock);
+
+ return ret;
+}
+
+static int _mv88e6xxx_atu_cmd(struct mv88e6xxx_chip *chip, u16 fid, u16 cmd)
+{
+ int ret;
+
+ if (mv88e6xxx_has_fid_reg(chip)) {
+ ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_ATU_FID,
+ fid);
+ if (ret < 0)
+ return ret;
+ } else if (mv88e6xxx_num_databases(chip) == 256) {
+ /* ATU DBNum[7:4] are located in ATU Control 15:12 */
+ ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_ATU_CONTROL);
+ if (ret < 0)
+ return ret;
+
+ ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_ATU_CONTROL,
+ (ret & 0xfff) |
+ ((fid << 8) & 0xf000));
+ if (ret < 0)
+ return ret;
+
+ /* ATU DBNum[3:0] are located in ATU Operation 3:0 */
+ cmd |= fid & 0xf;
+ }
+
+ ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_ATU_OP, cmd);
+ if (ret < 0)
+ return ret;
+
+ return _mv88e6xxx_atu_wait(chip);
+}
+
+static int _mv88e6xxx_atu_data_write(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_atu_entry *entry)
+{
+ u16 data = entry->state & GLOBAL_ATU_DATA_STATE_MASK;
+
+ if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) {
+ unsigned int mask, shift;
+
+ if (entry->trunk) {
+ data |= GLOBAL_ATU_DATA_TRUNK;
+ mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
+ shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
+ } else {
+ mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
+ shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
+ }
+
+ data |= (entry->portv_trunkid << shift) & mask;
+ }
+
+ return _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_ATU_DATA, data);
+}
+
+static int _mv88e6xxx_atu_flush_move(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_atu_entry *entry,
+ bool static_too)
+{
+ int op;
+ int err;
+
+ err = _mv88e6xxx_atu_wait(chip);
+ if (err)
+ return err;
+
+ err = _mv88e6xxx_atu_data_write(chip, entry);
+ if (err)
+ return err;
+
+ if (entry->fid) {
+ op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL_DB :
+ GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC_DB;
+ } else {
+ op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL :
+ GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC;
+ }
+
+ return _mv88e6xxx_atu_cmd(chip, entry->fid, op);
+}
+
+static int _mv88e6xxx_atu_flush(struct mv88e6xxx_chip *chip,
+ u16 fid, bool static_too)
+{
+ struct mv88e6xxx_atu_entry entry = {
+ .fid = fid,
+ .state = 0, /* EntryState bits must be 0 */
+ };
+
+ return _mv88e6xxx_atu_flush_move(chip, &entry, static_too);
+}
+
+static int _mv88e6xxx_atu_move(struct mv88e6xxx_chip *chip, u16 fid,
+ int from_port, int to_port, bool static_too)
+{
+ struct mv88e6xxx_atu_entry entry = {
+ .trunk = false,
+ .fid = fid,
+ };
+
+ /* EntryState bits must be 0xF */
+ entry.state = GLOBAL_ATU_DATA_STATE_MASK;
+
+ /* ToPort and FromPort are respectively in PortVec bits 7:4 and 3:0 */
+ entry.portv_trunkid = (to_port & 0x0f) << 4;
+ entry.portv_trunkid |= from_port & 0x0f;
+
+ return _mv88e6xxx_atu_flush_move(chip, &entry, static_too);
+}
+
+static int _mv88e6xxx_atu_remove(struct mv88e6xxx_chip *chip, u16 fid,
+ int port, bool static_too)
+{
+ /* Destination port 0xF means remove the entries */
+ return _mv88e6xxx_atu_move(chip, fid, port, 0x0f, static_too);
+}
+
+static const char * const mv88e6xxx_port_state_names[] = {
+ [PORT_CONTROL_STATE_DISABLED] = "Disabled",
+ [PORT_CONTROL_STATE_BLOCKING] = "Blocking/Listening",
+ [PORT_CONTROL_STATE_LEARNING] = "Learning",
+ [PORT_CONTROL_STATE_FORWARDING] = "Forwarding",
+};
+
+static int _mv88e6xxx_port_state(struct mv88e6xxx_chip *chip, int port,
+ u8 state)
+{
+ struct dsa_switch *ds = chip->ds;
+ int reg, ret = 0;
+ u8 oldstate;
+
+ reg = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_CONTROL);
+ if (reg < 0)
+ return reg;
+
+ oldstate = reg & PORT_CONTROL_STATE_MASK;
+
+ if (oldstate != state) {
+ /* Flush forwarding database if we're moving a port
+ * from Learning or Forwarding state to Disabled or
+ * Blocking or Listening state.
+ */
+ if ((oldstate == PORT_CONTROL_STATE_LEARNING ||
+ oldstate == PORT_CONTROL_STATE_FORWARDING) &&
+ (state == PORT_CONTROL_STATE_DISABLED ||
+ state == PORT_CONTROL_STATE_BLOCKING)) {
+ ret = _mv88e6xxx_atu_remove(chip, 0, port, false);
+ if (ret)
+ return ret;
+ }
+
+ reg = (reg & ~PORT_CONTROL_STATE_MASK) | state;
+ ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_CONTROL,
+ reg);
+ if (ret)
+ return ret;
+
+ netdev_dbg(ds->ports[port].netdev, "PortState %s (was %s)\n",
+ mv88e6xxx_port_state_names[state],
+ mv88e6xxx_port_state_names[oldstate]);
+ }
+
+ return ret;
+}
+
+static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_chip *chip, int port)
+{
+ struct net_device *bridge = chip->ports[port].bridge_dev;
+ const u16 mask = (1 << chip->info->num_ports) - 1;
+ struct dsa_switch *ds = chip->ds;
+ u16 output_ports = 0;
+ int reg;
+ int i;
+
+ /* allow CPU port or DSA link(s) to send frames to every port */
+ if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
+ output_ports = mask;
+ } else {
+ for (i = 0; i < chip->info->num_ports; ++i) {
+ /* allow sending frames to every group member */
+ if (bridge && chip->ports[i].bridge_dev == bridge)
+ output_ports |= BIT(i);
+
+ /* allow sending frames to CPU port and DSA link(s) */
+ if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
+ output_ports |= BIT(i);
+ }
+ }
+
+ /* prevent frames from going back out of the port they came in on */
+ output_ports &= ~BIT(port);
+
+ reg = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_BASE_VLAN);
+ if (reg < 0)
+ return reg;
+
+ reg &= ~mask;
+ reg |= output_ports & mask;
+
+ return _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_BASE_VLAN, reg);
+}
+
+static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state)
+{
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ int stp_state;
+ int err;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ stp_state = PORT_CONTROL_STATE_DISABLED;
+ break;
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ stp_state = PORT_CONTROL_STATE_BLOCKING;
+ break;
+ case BR_STATE_LEARNING:
+ stp_state = PORT_CONTROL_STATE_LEARNING;
+ break;
+ case BR_STATE_FORWARDING:
+ default:
+ stp_state = PORT_CONTROL_STATE_FORWARDING;
+ break;
+ }
+
+ mutex_lock(&chip->reg_lock);
+ err = _mv88e6xxx_port_state(chip, port, stp_state);
+ mutex_unlock(&chip->reg_lock);
+
+ if (err)
+ netdev_err(ds->ports[port].netdev,
+ "failed to update state to %s\n",
+ mv88e6xxx_port_state_names[stp_state]);
+}
+
+static int _mv88e6xxx_port_pvid(struct mv88e6xxx_chip *chip, int port,
+ u16 *new, u16 *old)
+{
+ struct dsa_switch *ds = chip->ds;
+ u16 pvid;
+ int ret;
+
+ ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_DEFAULT_VLAN);
+ if (ret < 0)
+ return ret;
+
+ pvid = ret & PORT_DEFAULT_VLAN_MASK;
+
+ if (new) {
+ ret &= ~PORT_DEFAULT_VLAN_MASK;
+ ret |= *new & PORT_DEFAULT_VLAN_MASK;
+
+ ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
+ PORT_DEFAULT_VLAN, ret);
+ if (ret < 0)
+ return ret;
+
+ netdev_dbg(ds->ports[port].netdev,
+ "DefaultVID %d (was %d)\n", *new, pvid);
+ }
+
+ if (old)
+ *old = pvid;
+
+ return 0;
+}
+
+static int _mv88e6xxx_port_pvid_get(struct mv88e6xxx_chip *chip,
+ int port, u16 *pvid)
+{
+ return _mv88e6xxx_port_pvid(chip, port, NULL, pvid);
+}
+
+static int _mv88e6xxx_port_pvid_set(struct mv88e6xxx_chip *chip,
+ int port, u16 pvid)
+{
+ return _mv88e6xxx_port_pvid(chip, port, &pvid, NULL);
+}
+
+static int _mv88e6xxx_vtu_wait(struct mv88e6xxx_chip *chip)
+{
+ return _mv88e6xxx_wait(chip, REG_GLOBAL, GLOBAL_VTU_OP,
+ GLOBAL_VTU_OP_BUSY);
+}
+
+static int _mv88e6xxx_vtu_cmd(struct mv88e6xxx_chip *chip, u16 op)
+{
+ int ret;
+
+ ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_OP, op);
+ if (ret < 0)
+ return ret;
+
+ return _mv88e6xxx_vtu_wait(chip);
+}
+
+static int _mv88e6xxx_vtu_stu_flush(struct mv88e6xxx_chip *chip)
+{
+ int ret;
+
+ ret = _mv88e6xxx_vtu_wait(chip);
+ if (ret < 0)
+ return ret;
+
+ return _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_FLUSH_ALL);
+}
+
+static int _mv88e6xxx_vtu_stu_data_read(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_stu_entry *entry,
+ unsigned int nibble_offset)
+{
+ u16 regs[3];
+ int i;
+ int ret;
+
+ for (i = 0; i < 3; ++i) {
+ ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL,
+ GLOBAL_VTU_DATA_0_3 + i);
+ if (ret < 0)
+ return ret;
+
+ regs[i] = ret;
+ }
+
+ for (i = 0; i < chip->info->num_ports; ++i) {
+ unsigned int shift = (i % 4) * 4 + nibble_offset;
+ u16 reg = regs[i / 4];
+
+ entry->data[i] = (reg >> shift) & GLOBAL_VTU_STU_DATA_MASK;
+ }
+
+ return 0;
+}
+
+static int mv88e6xxx_vtu_data_read(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_stu_entry *entry)
+{
+ return _mv88e6xxx_vtu_stu_data_read(chip, entry, 0);
+}
+
+static int mv88e6xxx_stu_data_read(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_stu_entry *entry)
+{
+ return _mv88e6xxx_vtu_stu_data_read(chip, entry, 2);
+}
+
+static int _mv88e6xxx_vtu_stu_data_write(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_stu_entry *entry,
+ unsigned int nibble_offset)
+{
+ u16 regs[3] = { 0 };
+ int i;
+ int ret;
+
+ for (i = 0; i < chip->info->num_ports; ++i) {
+ unsigned int shift = (i % 4) * 4 + nibble_offset;
+ u8 data = entry->data[i];
+
+ regs[i / 4] |= (data & GLOBAL_VTU_STU_DATA_MASK) << shift;
+ }
+
+ for (i = 0; i < 3; ++i) {
+ ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL,
+ GLOBAL_VTU_DATA_0_3 + i, regs[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mv88e6xxx_vtu_data_write(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_stu_entry *entry)
+{
+ return _mv88e6xxx_vtu_stu_data_write(chip, entry, 0);
+}
+
+static int mv88e6xxx_stu_data_write(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_stu_entry *entry)
+{
+ return _mv88e6xxx_vtu_stu_data_write(chip, entry, 2);
+}
+
+static int _mv88e6xxx_vtu_vid_write(struct mv88e6xxx_chip *chip, u16 vid)
+{
+ return _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_VID,
+ vid & GLOBAL_VTU_VID_MASK);
+}
+
+static int _mv88e6xxx_vtu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_stu_entry *entry)
+{
+ struct mv88e6xxx_vtu_stu_entry next = { 0 };
+ int ret;
+
+ ret = _mv88e6xxx_vtu_wait(chip);
+ if (ret < 0)
+ return ret;
+
+ ret = _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_VTU_GET_NEXT);
+ if (ret < 0)
+ return ret;
+
+ ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_VTU_VID);
+ if (ret < 0)
+ return ret;
+
+ next.vid = ret & GLOBAL_VTU_VID_MASK;
+ next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
+
+ if (next.valid) {
+ ret = mv88e6xxx_vtu_data_read(chip, &next);
+ if (ret < 0)
+ return ret;
+
+ if (mv88e6xxx_has_fid_reg(chip)) {
+ ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL,
+ GLOBAL_VTU_FID);
+ if (ret < 0)
+ return ret;
+
+ next.fid = ret & GLOBAL_VTU_FID_MASK;
+ } else if (mv88e6xxx_num_databases(chip) == 256) {
+ /* VTU DBNum[7:4] are located in VTU Operation 11:8, and
+ * VTU DBNum[3:0] are located in VTU Operation 3:0
+ */
+ ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL,
+ GLOBAL_VTU_OP);
+ if (ret < 0)
+ return ret;
+
+ next.fid = (ret & 0xf00) >> 4;
+ next.fid |= ret & 0xf;
+ }
+
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_STU)) {
+ ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL,
+ GLOBAL_VTU_SID);
+ if (ret < 0)
+ return ret;
+
+ next.sid = ret & GLOBAL_VTU_SID_MASK;
+ }
+ }
+
+ *entry = next;
+ return 0;
+}
+
+static int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port,
+ struct switchdev_obj_port_vlan *vlan,
+ int (*cb)(struct switchdev_obj *obj))
+{
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ struct mv88e6xxx_vtu_stu_entry next;
+ u16 pvid;
+ int err;
+
+ if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&chip->reg_lock);
+
+ err = _mv88e6xxx_port_pvid_get(chip, port, &pvid);
+ if (err)
+ goto unlock;
+
+ err = _mv88e6xxx_vtu_vid_write(chip, GLOBAL_VTU_VID_MASK);
+ if (err)
+ goto unlock;
+
+ do {
+ err = _mv88e6xxx_vtu_getnext(chip, &next);
+ if (err)
+ break;
+
+ if (!next.valid)
+ break;
+
+ if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
+ continue;
+
+ /* reinit and dump this VLAN obj */
+ vlan->vid_begin = next.vid;
+ vlan->vid_end = next.vid;
+ vlan->flags = 0;
+
+ if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED)
+ vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
+
+ if (next.vid == pvid)
+ vlan->flags |= BRIDGE_VLAN_INFO_PVID;
+
+ err = cb(&vlan->obj);
+ if (err)
+ break;
+ } while (next.vid < GLOBAL_VTU_VID_MASK);
+
+unlock:
+ mutex_unlock(&chip->reg_lock);
+
+ return err;
+}
+
+static int _mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_stu_entry *entry)
+{
+ u16 op = GLOBAL_VTU_OP_VTU_LOAD_PURGE;
+ u16 reg = 0;
+ int ret;
+
+ ret = _mv88e6xxx_vtu_wait(chip);
+ if (ret < 0)
+ return ret;
+
+ if (!entry->valid)
+ goto loadpurge;
+
+ /* Write port member tags */
+ ret = mv88e6xxx_vtu_data_write(chip, entry);
+ if (ret < 0)
+ return ret;
+
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_STU)) {
+ reg = entry->sid & GLOBAL_VTU_SID_MASK;
+ ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_SID,
+ reg);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (mv88e6xxx_has_fid_reg(chip)) {
+ reg = entry->fid & GLOBAL_VTU_FID_MASK;
+ ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_FID,
+ reg);
+ if (ret < 0)
+ return ret;
+ } else if (mv88e6xxx_num_databases(chip) == 256) {
+ /* VTU DBNum[7:4] are located in VTU Operation 11:8, and
+ * VTU DBNum[3:0] are located in VTU Operation 3:0
+ */
+ op |= (entry->fid & 0xf0) << 8;
+ op |= entry->fid & 0xf;
+ }
+
+ reg = GLOBAL_VTU_VID_VALID;
+loadpurge:
+ reg |= entry->vid & GLOBAL_VTU_VID_MASK;
+ ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_VID, reg);
+ if (ret < 0)
+ return ret;
+
+ return _mv88e6xxx_vtu_cmd(chip, op);
+}
+
+static int _mv88e6xxx_stu_getnext(struct mv88e6xxx_chip *chip, u8 sid,
+ struct mv88e6xxx_vtu_stu_entry *entry)
+{
+ struct mv88e6xxx_vtu_stu_entry next = { 0 };
+ int ret;
+
+ ret = _mv88e6xxx_vtu_wait(chip);
+ if (ret < 0)
+ return ret;
+
+ ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_SID,
+ sid & GLOBAL_VTU_SID_MASK);
+ if (ret < 0)
+ return ret;
+
+ ret = _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_STU_GET_NEXT);
+ if (ret < 0)
+ return ret;
+
+ ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_VTU_SID);
+ if (ret < 0)
+ return ret;
+
+ next.sid = ret & GLOBAL_VTU_SID_MASK;
+
+ ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_VTU_VID);
+ if (ret < 0)
+ return ret;
+
+ next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
+
+ if (next.valid) {
+ ret = mv88e6xxx_stu_data_read(chip, &next);
+ if (ret < 0)
+ return ret;
+ }
+
+ *entry = next;
+ return 0;
+}
+
+static int _mv88e6xxx_stu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_stu_entry *entry)
+{
+ u16 reg = 0;
+ int ret;
+
+ ret = _mv88e6xxx_vtu_wait(chip);
+ if (ret < 0)
+ return ret;
+
+ if (!entry->valid)
+ goto loadpurge;
+
+ /* Write port states */
+ ret = mv88e6xxx_stu_data_write(chip, entry);
+ if (ret < 0)
+ return ret;
+
+ reg = GLOBAL_VTU_VID_VALID;
+loadpurge:
+ ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_VID, reg);
+ if (ret < 0)
+ return ret;
+
+ reg = entry->sid & GLOBAL_VTU_SID_MASK;
+ ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_SID, reg);
+ if (ret < 0)
+ return ret;
+
+ return _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_STU_LOAD_PURGE);
+}
+
+static int _mv88e6xxx_port_fid(struct mv88e6xxx_chip *chip, int port,
+ u16 *new, u16 *old)
+{
+ struct dsa_switch *ds = chip->ds;
+ u16 upper_mask;
+ u16 fid;
+ int ret;
+
+ if (mv88e6xxx_num_databases(chip) == 4096)
+ upper_mask = 0xff;
+ else if (mv88e6xxx_num_databases(chip) == 256)
+ upper_mask = 0xf;
+ else
+ return -EOPNOTSUPP;
+
+ /* Port's default FID bits 3:0 are located in reg 0x06, offset 12 */
+ ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_BASE_VLAN);
+ if (ret < 0)
+ return ret;
+
+ fid = (ret & PORT_BASE_VLAN_FID_3_0_MASK) >> 12;
+
+ if (new) {
+ ret &= ~PORT_BASE_VLAN_FID_3_0_MASK;
+ ret |= (*new << 12) & PORT_BASE_VLAN_FID_3_0_MASK;
+
+ ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_BASE_VLAN,
+ ret);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Port's default FID bits 11:4 are located in reg 0x05, offset 0 */
+ ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_CONTROL_1);
+ if (ret < 0)
+ return ret;
+
+ fid |= (ret & upper_mask) << 4;
+
+ if (new) {
+ ret &= ~upper_mask;
+ ret |= (*new >> 4) & upper_mask;
+
+ ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_CONTROL_1,
+ ret);
+ if (ret < 0)
+ return ret;
+
+ netdev_dbg(ds->ports[port].netdev,
+ "FID %d (was %d)\n", *new, fid);
+ }
+
+ if (old)
+ *old = fid;
+
+ return 0;
+}
+
+static int _mv88e6xxx_port_fid_get(struct mv88e6xxx_chip *chip,
+ int port, u16 *fid)
+{
+ return _mv88e6xxx_port_fid(chip, port, NULL, fid);
+}
+
+static int _mv88e6xxx_port_fid_set(struct mv88e6xxx_chip *chip,
+ int port, u16 fid)
+{
+ return _mv88e6xxx_port_fid(chip, port, &fid, NULL);
+}
+
+static int _mv88e6xxx_fid_new(struct mv88e6xxx_chip *chip, u16 *fid)
+{
+ DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
+ struct mv88e6xxx_vtu_stu_entry vlan;
+ int i, err;
+
+ bitmap_zero(fid_bitmap, MV88E6XXX_N_FID);
+
+ /* Set every FID bit used by the (un)bridged ports */
+ for (i = 0; i < chip->info->num_ports; ++i) {
+ err = _mv88e6xxx_port_fid_get(chip, i, fid);
+ if (err)
+ return err;
+
+ set_bit(*fid, fid_bitmap);
+ }
+
+ /* Set every FID bit used by the VLAN entries */
+ err = _mv88e6xxx_vtu_vid_write(chip, GLOBAL_VTU_VID_MASK);
+ if (err)
+ return err;
+
+ do {
+ err = _mv88e6xxx_vtu_getnext(chip, &vlan);
+ if (err)
+ return err;
+
+ if (!vlan.valid)
+ break;
+
+ set_bit(vlan.fid, fid_bitmap);
+ } while (vlan.vid < GLOBAL_VTU_VID_MASK);
+
+ /* The reset value 0x000 is used to indicate that multiple address
+ * databases are not needed. Return the next positive available.
+ */
+ *fid = find_next_zero_bit(fid_bitmap, MV88E6XXX_N_FID, 1);
+ if (unlikely(*fid >= mv88e6xxx_num_databases(chip)))
+ return -ENOSPC;
+
+ /* Clear the database */
+ return _mv88e6xxx_atu_flush(chip, *fid, true);
+}
+
+static int _mv88e6xxx_vtu_new(struct mv88e6xxx_chip *chip, u16 vid,
+ struct mv88e6xxx_vtu_stu_entry *entry)
+{
+ struct dsa_switch *ds = chip->ds;
+ struct mv88e6xxx_vtu_stu_entry vlan = {
+ .valid = true,
+ .vid = vid,
+ };
+ int i, err;
+
+ err = _mv88e6xxx_fid_new(chip, &vlan.fid);
+ if (err)
+ return err;
+
+ /* exclude all ports except the CPU and DSA ports */
+ for (i = 0; i < chip->info->num_ports; ++i)
+ vlan.data[i] = dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i)
+ ? GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED
+ : GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
+
+ if (mv88e6xxx_6097_family(chip) || mv88e6xxx_6165_family(chip) ||
+ mv88e6xxx_6351_family(chip) || mv88e6xxx_6352_family(chip)) {
+ struct mv88e6xxx_vtu_stu_entry vstp;
+
+ /* Adding a VTU entry requires a valid STU entry. As VSTP is not
+ * implemented, only one STU entry is needed to cover all VTU
+ * entries. Thus, validate the SID 0.
+ */
+ vlan.sid = 0;
+ err = _mv88e6xxx_stu_getnext(chip, GLOBAL_VTU_SID_MASK, &vstp);
+ if (err)
+ return err;
+
+ if (vstp.sid != vlan.sid || !vstp.valid) {
+ memset(&vstp, 0, sizeof(vstp));
+ vstp.valid = true;
+ vstp.sid = vlan.sid;
+
+ err = _mv88e6xxx_stu_loadpurge(chip, &vstp);
+ if (err)
+ return err;
+ }
+ }
+
+ *entry = vlan;
+ return 0;
+}
+
+static int _mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid,
+ struct mv88e6xxx_vtu_stu_entry *entry, bool creat)
+{
+ int err;
+
+ if (!vid)
+ return -EINVAL;
+
+ err = _mv88e6xxx_vtu_vid_write(chip, vid - 1);
+ if (err)
+ return err;
+
+ err = _mv88e6xxx_vtu_getnext(chip, entry);
+ if (err)
+ return err;
+
+ if (entry->vid != vid || !entry->valid) {
+ if (!creat)
+ return -EOPNOTSUPP;
+ /* -ENOENT would've been more appropriate, but switchdev expects
+ * -EOPNOTSUPP to inform bridge about an eventual software VLAN.
+ */
+
+ err = _mv88e6xxx_vtu_new(chip, vid, entry);
+ }
+
+ return err;
+}
+
+static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
+ u16 vid_begin, u16 vid_end)
+{
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ struct mv88e6xxx_vtu_stu_entry vlan;
+ int i, err;
+
+ if (!vid_begin)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&chip->reg_lock);
+
+ err = _mv88e6xxx_vtu_vid_write(chip, vid_begin - 1);
+ if (err)
+ goto unlock;
+
+ do {
+ err = _mv88e6xxx_vtu_getnext(chip, &vlan);
+ if (err)
+ goto unlock;
+
+ if (!vlan.valid)
+ break;
+
+ if (vlan.vid > vid_end)
+ break;
+
+ for (i = 0; i < chip->info->num_ports; ++i) {
+ if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i))
+ continue;
+
+ if (vlan.data[i] ==
+ GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
+ continue;
+
+ if (chip->ports[i].bridge_dev ==
+ chip->ports[port].bridge_dev)
+ break; /* same bridge, check next VLAN */
+
+ netdev_warn(ds->ports[port].netdev,
+ "hardware VLAN %d already used by %s\n",
+ vlan.vid,
+ netdev_name(chip->ports[i].bridge_dev));
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+ } while (vlan.vid < vid_end);
+
+unlock:
+ mutex_unlock(&chip->reg_lock);
+
+ return err;
+}
+
+static const char * const mv88e6xxx_port_8021q_mode_names[] = {
+ [PORT_CONTROL_2_8021Q_DISABLED] = "Disabled",
+ [PORT_CONTROL_2_8021Q_FALLBACK] = "Fallback",
+ [PORT_CONTROL_2_8021Q_CHECK] = "Check",
+ [PORT_CONTROL_2_8021Q_SECURE] = "Secure",
+};
+
+static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering)
+{
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ u16 old, new = vlan_filtering ? PORT_CONTROL_2_8021Q_SECURE :
+ PORT_CONTROL_2_8021Q_DISABLED;
+ int ret;
+
+ if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&chip->reg_lock);
+
+ ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_CONTROL_2);
+ if (ret < 0)
+ goto unlock;
+
+ old = ret & PORT_CONTROL_2_8021Q_MASK;
+
+ if (new != old) {
+ ret &= ~PORT_CONTROL_2_8021Q_MASK;
+ ret |= new & PORT_CONTROL_2_8021Q_MASK;
+
+ ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_CONTROL_2,
+ ret);
+ if (ret < 0)
+ goto unlock;
+
+ netdev_dbg(ds->ports[port].netdev, "802.1Q Mode %s (was %s)\n",
+ mv88e6xxx_port_8021q_mode_names[new],
+ mv88e6xxx_port_8021q_mode_names[old]);
+ }
+
+ ret = 0;
+unlock:
+ mutex_unlock(&chip->reg_lock);
+
+ return ret;
+}
+
+static int
+mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
+{
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ int err;
+
+ if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU))
+ return -EOPNOTSUPP;
+
+ /* If the requested port doesn't belong to the same bridge as the VLAN
+ * members, do not support it (yet) and fallback to software VLAN.
+ */
+ err = mv88e6xxx_port_check_hw_vlan(ds, port, vlan->vid_begin,
+ vlan->vid_end);
+ if (err)
+ return err;
+
+ /* We don't need any dynamic resource from the kernel (yet),
+ * so skip the prepare phase.
+ */
+ return 0;
+}
+
+static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_chip *chip, int port,
+ u16 vid, bool untagged)
+{
+ struct mv88e6xxx_vtu_stu_entry vlan;
+ int err;
+
+ err = _mv88e6xxx_vtu_get(chip, vid, &vlan, true);
+ if (err)
+ return err;
+
+ vlan.data[port] = untagged ?
+ GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED :
+ GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED;
+
+ return _mv88e6xxx_vtu_loadpurge(chip, &vlan);
+}
+
+static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
+{
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ u16 vid;
+
+ if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU))
+ return;
+
+ mutex_lock(&chip->reg_lock);
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
+ if (_mv88e6xxx_port_vlan_add(chip, port, vid, untagged))
+ netdev_err(ds->ports[port].netdev,
+ "failed to add VLAN %d%c\n",
+ vid, untagged ? 'u' : 't');
+
+ if (pvid && _mv88e6xxx_port_pvid_set(chip, port, vlan->vid_end))
+ netdev_err(ds->ports[port].netdev, "failed to set PVID %d\n",
+ vlan->vid_end);
+
+ mutex_unlock(&chip->reg_lock);
+}
+
+static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_chip *chip,
+ int port, u16 vid)
+{
+ struct dsa_switch *ds = chip->ds;
+ struct mv88e6xxx_vtu_stu_entry vlan;
+ int i, err;
+
+ err = _mv88e6xxx_vtu_get(chip, vid, &vlan, false);
+ if (err)
+ return err;
+
+ /* Tell switchdev if this VLAN is handled in software */
+ if (vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
+ return -EOPNOTSUPP;
+
+ vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
+
+ /* keep the VLAN unless all ports are excluded */
+ vlan.valid = false;
+ for (i = 0; i < chip->info->num_ports; ++i) {
+ if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
+ continue;
+
+ if (vlan.data[i] != GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) {
+ vlan.valid = true;
+ break;
+ }
+ }
+
+ err = _mv88e6xxx_vtu_loadpurge(chip, &vlan);
+ if (err)
+ return err;
+
+ return _mv88e6xxx_atu_remove(chip, vlan.fid, port, false);
+}
+
+static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ u16 pvid, vid;
+ int err = 0;
+
+ if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&chip->reg_lock);
+
+ err = _mv88e6xxx_port_pvid_get(chip, port, &pvid);
+ if (err)
+ goto unlock;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+ err = _mv88e6xxx_port_vlan_del(chip, port, vid);
+ if (err)
+ goto unlock;
+
+ if (vid == pvid) {
+ err = _mv88e6xxx_port_pvid_set(chip, port, 0);
+ if (err)
+ goto unlock;
+ }
+ }
+
+unlock:
+ mutex_unlock(&chip->reg_lock);
+
+ return err;
+}
+
+static int _mv88e6xxx_atu_mac_write(struct mv88e6xxx_chip *chip,
+ const unsigned char *addr)
+{
+ int i, ret;
+
+ for (i = 0; i < 3; i++) {
+ ret = _mv88e6xxx_reg_write(
+ chip, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i,
+ (addr[i * 2] << 8) | addr[i * 2 + 1]);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int _mv88e6xxx_atu_mac_read(struct mv88e6xxx_chip *chip,
+ unsigned char *addr)
+{
+ int i, ret;
+
+ for (i = 0; i < 3; i++) {
+ ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL,
+ GLOBAL_ATU_MAC_01 + i);
+ if (ret < 0)
+ return ret;
+ addr[i * 2] = ret >> 8;
+ addr[i * 2 + 1] = ret & 0xff;
+ }
+
+ return 0;
+}
+
+static int _mv88e6xxx_atu_load(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_atu_entry *entry)
+{
+ int ret;
+
+ ret = _mv88e6xxx_atu_wait(chip);
+ if (ret < 0)
+ return ret;
+
+ ret = _mv88e6xxx_atu_mac_write(chip, entry->mac);
+ if (ret < 0)
+ return ret;
+
+ ret = _mv88e6xxx_atu_data_write(chip, entry);
+ if (ret < 0)
+ return ret;
+
+ return _mv88e6xxx_atu_cmd(chip, entry->fid, GLOBAL_ATU_OP_LOAD_DB);
+}
+
+static int _mv88e6xxx_port_fdb_load(struct mv88e6xxx_chip *chip, int port,
+ const unsigned char *addr, u16 vid,
+ u8 state)
+{
+ struct mv88e6xxx_atu_entry entry = { 0 };
+ struct mv88e6xxx_vtu_stu_entry vlan;
+ int err;
+
+ /* Null VLAN ID corresponds to the port private database */
+ if (vid == 0)
+ err = _mv88e6xxx_port_fid_get(chip, port, &vlan.fid);
+ else
+ err = _mv88e6xxx_vtu_get(chip, vid, &vlan, false);
+ if (err)
+ return err;
+
+ entry.fid = vlan.fid;
+ entry.state = state;
+ ether_addr_copy(entry.mac, addr);
+ if (state != GLOBAL_ATU_DATA_STATE_UNUSED) {
+ entry.trunk = false;
+ entry.portv_trunkid = BIT(port);
+ }
+
+ return _mv88e6xxx_atu_load(chip, &entry);
+}
+
+static int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_fdb *fdb,
+ struct switchdev_trans *trans)
+{
+ /* We don't need any dynamic resource from the kernel (yet),
+ * so skip the prepare phase.
+ */
+ return 0;
+}
+
+static void mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_fdb *fdb,
+ struct switchdev_trans *trans)
+{
+ int state = is_multicast_ether_addr(fdb->addr) ?
+ GLOBAL_ATU_DATA_STATE_MC_STATIC :
+ GLOBAL_ATU_DATA_STATE_UC_STATIC;
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+
+ mutex_lock(&chip->reg_lock);
+ if (_mv88e6xxx_port_fdb_load(chip, port, fdb->addr, fdb->vid, state))
+ netdev_err(ds->ports[port].netdev,
+ "failed to load MAC address\n");
+ mutex_unlock(&chip->reg_lock);
+}
+
+static int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_fdb *fdb)
+{
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ int ret;
+
+ mutex_lock(&chip->reg_lock);
+ ret = _mv88e6xxx_port_fdb_load(chip, port, fdb->addr, fdb->vid,
+ GLOBAL_ATU_DATA_STATE_UNUSED);
+ mutex_unlock(&chip->reg_lock);
+
+ return ret;
+}
+
+static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_chip *chip, u16 fid,
+ struct mv88e6xxx_atu_entry *entry)
+{
+ struct mv88e6xxx_atu_entry next = { 0 };
+ int ret;
+
+ next.fid = fid;
+
+ ret = _mv88e6xxx_atu_wait(chip);
+ if (ret < 0)
+ return ret;
+
+ ret = _mv88e6xxx_atu_cmd(chip, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
+ if (ret < 0)
+ return ret;
+
+ ret = _mv88e6xxx_atu_mac_read(chip, next.mac);
+ if (ret < 0)
+ return ret;
+
+ ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_ATU_DATA);
+ if (ret < 0)
+ return ret;
+
+ next.state = ret & GLOBAL_ATU_DATA_STATE_MASK;
+ if (next.state != GLOBAL_ATU_DATA_STATE_UNUSED) {
+ unsigned int mask, shift;
+
+ if (ret & GLOBAL_ATU_DATA_TRUNK) {
+ next.trunk = true;
+ mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
+ shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
+ } else {
+ next.trunk = false;
+ mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
+ shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
+ }
+
+ next.portv_trunkid = (ret & mask) >> shift;
+ }
+
+ *entry = next;
+ return 0;
+}
+
+static int _mv88e6xxx_port_fdb_dump_one(struct mv88e6xxx_chip *chip,
+ u16 fid, u16 vid, int port,
+ struct switchdev_obj_port_fdb *fdb,
+ int (*cb)(struct switchdev_obj *obj))
+{
+ struct mv88e6xxx_atu_entry addr = {
+ .mac = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ };
+ int err;
+
+ err = _mv88e6xxx_atu_mac_write(chip, addr.mac);
+ if (err)
+ return err;
+
+ do {
+ err = _mv88e6xxx_atu_getnext(chip, fid, &addr);
+ if (err)
+ break;
+
+ if (addr.state == GLOBAL_ATU_DATA_STATE_UNUSED)
+ break;
+
+ if (!addr.trunk && addr.portv_trunkid & BIT(port)) {
+ bool is_static = addr.state ==
+ (is_multicast_ether_addr(addr.mac) ?
+ GLOBAL_ATU_DATA_STATE_MC_STATIC :
+ GLOBAL_ATU_DATA_STATE_UC_STATIC);
+
+ fdb->vid = vid;
+ ether_addr_copy(fdb->addr, addr.mac);
+ fdb->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
+
+ err = cb(&fdb->obj);
+ if (err)
+ break;
+ }
+ } while (!is_broadcast_ether_addr(addr.mac));
+
+ return err;
+}
+
+static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
+ struct switchdev_obj_port_fdb *fdb,
+ int (*cb)(struct switchdev_obj *obj))
+{
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ struct mv88e6xxx_vtu_stu_entry vlan = {
+ .vid = GLOBAL_VTU_VID_MASK, /* all ones */
+ };
+ u16 fid;
+ int err;
+
+ mutex_lock(&chip->reg_lock);
+
+ /* Dump port's default Filtering Information Database (VLAN ID 0) */
+ err = _mv88e6xxx_port_fid_get(chip, port, &fid);
+ if (err)
+ goto unlock;
+
+ err = _mv88e6xxx_port_fdb_dump_one(chip, fid, 0, port, fdb, cb);
+ if (err)
+ goto unlock;
+
+ /* Dump VLANs' Filtering Information Databases */
+ err = _mv88e6xxx_vtu_vid_write(chip, vlan.vid);
+ if (err)
+ goto unlock;
+
+ do {
+ err = _mv88e6xxx_vtu_getnext(chip, &vlan);
+ if (err)
+ break;
+
+ if (!vlan.valid)
+ break;
+
+ err = _mv88e6xxx_port_fdb_dump_one(chip, vlan.fid, vlan.vid,
+ port, fdb, cb);
+ if (err)
+ break;
+ } while (vlan.vid < GLOBAL_VTU_VID_MASK);
+
+unlock:
+ mutex_unlock(&chip->reg_lock);
+
+ return err;
+}
+
+static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
+ struct net_device *bridge)
+{
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ int i, err = 0;
+
+ mutex_lock(&chip->reg_lock);
+
+ /* Assign the bridge and remap each port's VLANTable */
+ chip->ports[port].bridge_dev = bridge;
+
+ for (i = 0; i < chip->info->num_ports; ++i) {
+ if (chip->ports[i].bridge_dev == bridge) {
+ err = _mv88e6xxx_port_based_vlan_map(chip, i);
+ if (err)
+ break;
+ }
+ }
+
+ mutex_unlock(&chip->reg_lock);
+
+ return err;
+}
+
+static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port)
+{
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ struct net_device *bridge = chip->ports[port].bridge_dev;
+ int i;
+
+ mutex_lock(&chip->reg_lock);
+
+ /* Unassign the bridge and remap each port's VLANTable */
+ chip->ports[port].bridge_dev = NULL;
+
+ for (i = 0; i < chip->info->num_ports; ++i)
+ if (i == port || chip->ports[i].bridge_dev == bridge)
+ if (_mv88e6xxx_port_based_vlan_map(chip, i))
+ netdev_warn(ds->ports[i].netdev,
+ "failed to remap\n");
+
+ mutex_unlock(&chip->reg_lock);
+}
+
+static int _mv88e6xxx_mdio_page_write(struct mv88e6xxx_chip *chip,
+ int port, int page, int reg, int val)
+{
+ int ret;
+
+ ret = mv88e6xxx_mdio_write_indirect(chip, port, 0x16, page);
+ if (ret < 0)
+ goto restore_page_0;
+
+ ret = mv88e6xxx_mdio_write_indirect(chip, port, reg, val);
+restore_page_0:
+ mv88e6xxx_mdio_write_indirect(chip, port, 0x16, 0x0);
+
+ return ret;
+}
+
+static int _mv88e6xxx_mdio_page_read(struct mv88e6xxx_chip *chip,
+ int port, int page, int reg)
+{
+ int ret;
+
+ ret = mv88e6xxx_mdio_write_indirect(chip, port, 0x16, page);
+ if (ret < 0)
+ goto restore_page_0;
+
+ ret = mv88e6xxx_mdio_read_indirect(chip, port, reg);
+restore_page_0:
+ mv88e6xxx_mdio_write_indirect(chip, port, 0x16, 0x0);
+
+ return ret;
+}
+
+static int mv88e6xxx_switch_reset(struct mv88e6xxx_chip *chip)
+{
+ bool ppu_active = mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU_ACTIVE);
+ u16 is_reset = (ppu_active ? 0x8800 : 0xc800);
+ struct gpio_desc *gpiod = chip->reset;
+ unsigned long timeout;
+ int ret;
+ int i;
+
+ /* Set all ports to the disabled state. */
+ for (i = 0; i < chip->info->num_ports; i++) {
+ ret = _mv88e6xxx_reg_read(chip, REG_PORT(i), PORT_CONTROL);
+ if (ret < 0)
+ return ret;
+
+ ret = _mv88e6xxx_reg_write(chip, REG_PORT(i), PORT_CONTROL,
+ ret & 0xfffc);
+ if (ret)
+ return ret;
+ }
+
+ /* Wait for transmit queues to drain. */
+ usleep_range(2000, 4000);
+
+ /* If there is a gpio connected to the reset pin, toggle it */
+ if (gpiod) {
+ gpiod_set_value_cansleep(gpiod, 1);
+ usleep_range(10000, 20000);
+ gpiod_set_value_cansleep(gpiod, 0);
+ usleep_range(10000, 20000);
+ }
+
+ /* Reset the switch. Keep the PPU active if requested. The PPU
+ * needs to be active to support indirect phy register access
+ * through global registers 0x18 and 0x19.
+ */
+ if (ppu_active)
+ ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, 0x04, 0xc000);
+ else
+ ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, 0x04, 0xc400);
+ if (ret)
+ return ret;
+
+ /* Wait up to one second for reset to complete. */
+ timeout = jiffies + 1 * HZ;
+ while (time_before(jiffies, timeout)) {
+ ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, 0x00);
+ if (ret < 0)
+ return ret;
+
+ if ((ret & is_reset) == is_reset)
+ break;
+ usleep_range(1000, 2000);
+ }
+ if (time_after(jiffies, timeout))
+ ret = -ETIMEDOUT;
+ else
+ ret = 0;
+
+ return ret;
+}
+
+static int mv88e6xxx_power_on_serdes(struct mv88e6xxx_chip *chip)
+{
+ int ret;
+
+ ret = _mv88e6xxx_mdio_page_read(chip, REG_FIBER_SERDES,
+ PAGE_FIBER_SERDES, MII_BMCR);
+ if (ret < 0)
+ return ret;
+
+ if (ret & BMCR_PDOWN) {
+ ret &= ~BMCR_PDOWN;
+ ret = _mv88e6xxx_mdio_page_write(chip, REG_FIBER_SERDES,
+ PAGE_FIBER_SERDES, MII_BMCR,
+ ret);
+ }
+
+ return ret;
+}
+
+static int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port,
+ int reg, u16 *val)
+{
+ int addr = chip->info->port_base_addr + port;
+
+ if (port >= chip->info->num_ports)
+ return -EINVAL;
+
+ return mv88e6xxx_read(chip, addr, reg, val);
+}
+
+static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
+{
+ struct dsa_switch *ds = chip->ds;
+ int ret;
+ u16 reg;
+
+ if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
+ mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
+ mv88e6xxx_6185_family(chip) || mv88e6xxx_6095_family(chip) ||
+ mv88e6xxx_6065_family(chip) || mv88e6xxx_6320_family(chip)) {
+ /* MAC Forcing register: don't force link, speed,
+ * duplex or flow control state to any particular
+ * values on physical ports, but force the CPU port
+ * and all DSA ports to their maximum bandwidth and
+ * full duplex.
+ */
+ reg = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_PCS_CTRL);
+ if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
+ reg &= ~PORT_PCS_CTRL_UNFORCED;
+ reg |= PORT_PCS_CTRL_FORCE_LINK |
+ PORT_PCS_CTRL_LINK_UP |
+ PORT_PCS_CTRL_DUPLEX_FULL |
+ PORT_PCS_CTRL_FORCE_DUPLEX;
+ if (mv88e6xxx_6065_family(chip))
+ reg |= PORT_PCS_CTRL_100;
+ else
+ reg |= PORT_PCS_CTRL_1000;
+ } else {
+ reg |= PORT_PCS_CTRL_UNFORCED;
+ }
+
+ ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
+ PORT_PCS_CTRL, reg);
+ if (ret)
+ return ret;
+ }
+
+ /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
+ * disable Header mode, enable IGMP/MLD snooping, disable VLAN
+ * tunneling, determine priority by looking at 802.1p and IP
+ * priority fields (IP prio has precedence), and set STP state
+ * to Forwarding.
+ *
+ * If this is the CPU link, use DSA or EDSA tagging depending
+ * on which tagging mode was configured.
+ *
+ * If this is a link to another switch, use DSA tagging mode.
+ *
+ * If this is the upstream port for this switch, enable
+ * forwarding of unknown unicasts and multicasts.
+ */
+ reg = 0;
+ if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
+ mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
+ mv88e6xxx_6095_family(chip) || mv88e6xxx_6065_family(chip) ||
+ mv88e6xxx_6185_family(chip) || mv88e6xxx_6320_family(chip))
+ reg = PORT_CONTROL_IGMP_MLD_SNOOP |
+ PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP |
+ PORT_CONTROL_STATE_FORWARDING;
+ if (dsa_is_cpu_port(ds, port)) {
+ if (mv88e6xxx_6095_family(chip) || mv88e6xxx_6185_family(chip))
+ reg |= PORT_CONTROL_DSA_TAG;
+ if (mv88e6xxx_6352_family(chip) ||
+ mv88e6xxx_6351_family(chip) ||
+ mv88e6xxx_6165_family(chip) ||
+ mv88e6xxx_6097_family(chip) ||
+ mv88e6xxx_6320_family(chip)) {
+ reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA |
+ PORT_CONTROL_FORWARD_UNKNOWN |
+ PORT_CONTROL_FORWARD_UNKNOWN_MC;
+ }
+
+ if (mv88e6xxx_6352_family(chip) ||
+ mv88e6xxx_6351_family(chip) ||
+ mv88e6xxx_6165_family(chip) ||
+ mv88e6xxx_6097_family(chip) ||
+ mv88e6xxx_6095_family(chip) ||
+ mv88e6xxx_6065_family(chip) ||
+ mv88e6xxx_6185_family(chip) ||
+ mv88e6xxx_6320_family(chip)) {
+ reg |= PORT_CONTROL_EGRESS_ADD_TAG;
+ }
+ }
+ if (dsa_is_dsa_port(ds, port)) {
+ if (mv88e6xxx_6095_family(chip) ||
+ mv88e6xxx_6185_family(chip))
+ reg |= PORT_CONTROL_DSA_TAG;
+ if (mv88e6xxx_6352_family(chip) ||
+ mv88e6xxx_6351_family(chip) ||
+ mv88e6xxx_6165_family(chip) ||
+ mv88e6xxx_6097_family(chip) ||
+ mv88e6xxx_6320_family(chip)) {
+ reg |= PORT_CONTROL_FRAME_MODE_DSA;
+ }
+
+ if (port == dsa_upstream_port(ds))
+ reg |= PORT_CONTROL_FORWARD_UNKNOWN |
+ PORT_CONTROL_FORWARD_UNKNOWN_MC;
+ }
+ if (reg) {
+ ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
+ PORT_CONTROL, reg);
+ if (ret)
+ return ret;
+ }
+
+ /* If this port is connected to a SerDes, make sure the SerDes is not
+ * powered down.
+ */
+ if (mv88e6xxx_6352_family(chip)) {
+ ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_STATUS);
+ if (ret < 0)
+ return ret;
+ ret &= PORT_STATUS_CMODE_MASK;
+ if ((ret == PORT_STATUS_CMODE_100BASE_X) ||
+ (ret == PORT_STATUS_CMODE_1000BASE_X) ||
+ (ret == PORT_STATUS_CMODE_SGMII)) {
+ ret = mv88e6xxx_power_on_serdes(chip);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ /* Port Control 2: don't force a good FCS, set the maximum frame size to
+ * 10240 bytes, disable 802.1q tags checking, don't discard tagged or
+ * untagged frames on this port, do a destination address lookup on all
+ * received packets as usual, disable ARP mirroring and don't send a
+ * copy of all transmitted/received frames on this port to the CPU.
+ */
+ reg = 0;
+ if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
+ mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
+ mv88e6xxx_6095_family(chip) || mv88e6xxx_6320_family(chip) ||
+ mv88e6xxx_6185_family(chip))
+ reg = PORT_CONTROL_2_MAP_DA;
+
+ if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
+ mv88e6xxx_6165_family(chip) || mv88e6xxx_6320_family(chip))
+ reg |= PORT_CONTROL_2_JUMBO_10240;
+
+ if (mv88e6xxx_6095_family(chip) || mv88e6xxx_6185_family(chip)) {
+ /* Set the upstream port this port should use */
+ reg |= dsa_upstream_port(ds);
+ /* enable forwarding of unknown multicast addresses to
+ * the upstream port
+ */
+ if (port == dsa_upstream_port(ds))
+ reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
+ }
+
+ reg |= PORT_CONTROL_2_8021Q_DISABLED;
+
+ if (reg) {
+ ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
+ PORT_CONTROL_2, reg);
+ if (ret)
+ return ret;
+ }
+
+ /* Port Association Vector: when learning source addresses
+ * of packets, add the address to the address database using
+ * a port bitmap that has only the bit for this port set and
+ * the other bits clear.
+ */
+ reg = 1 << port;
+ /* Disable learning for CPU port */
+ if (dsa_is_cpu_port(ds, port))
+ reg = 0;
+
+ ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_ASSOC_VECTOR,
+ reg);
+ if (ret)
+ return ret;
+
+ /* Egress rate control 2: disable egress rate control. */
+ ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_RATE_CONTROL_2,
+ 0x0000);
+ if (ret)
+ return ret;
+
+ if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
+ mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
+ mv88e6xxx_6320_family(chip)) {
+ /* Do not limit the period of time that this port can
+ * be paused for by the remote end or the period of
+ * time that this port can pause the remote end.
+ */
+ ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
+ PORT_PAUSE_CTRL, 0x0000);
+ if (ret)
+ return ret;
+
+ /* Port ATU control: disable limiting the number of
+ * address database entries that this port is allowed
+ * to use.
+ */
+ ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
+ PORT_ATU_CONTROL, 0x0000);
+ /* Priority Override: disable DA, SA and VTU priority
+ * override.
+ */
+ ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
+ PORT_PRI_OVERRIDE, 0x0000);
+ if (ret)
+ return ret;
+
+ /* Port Ethertype: use the Ethertype DSA Ethertype
+ * value.
+ */
+ ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
+ PORT_ETH_TYPE, ETH_P_EDSA);
+ if (ret)
+ return ret;
+ /* Tag Remap: use an identity 802.1p prio -> switch
+ * prio mapping.
+ */
+ ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
+ PORT_TAG_REGMAP_0123, 0x3210);
+ if (ret)
+ return ret;
+
+ /* Tag Remap 2: use an identity 802.1p prio -> switch
+ * prio mapping.
+ */
+ ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
+ PORT_TAG_REGMAP_4567, 0x7654);
+ if (ret)
+ return ret;
+ }
+
+ /* Rate Control: disable ingress rate limiting. */
+ if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
+ mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
+ mv88e6xxx_6320_family(chip)) {
+ ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
+ PORT_RATE_CONTROL, 0x0001);
+ if (ret)
+ return ret;
+ } else if (mv88e6xxx_6185_family(chip) || mv88e6xxx_6095_family(chip)) {
+ ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
+ PORT_RATE_CONTROL, 0x0000);
+ if (ret)
+ return ret;
+ }
+
+ /* Port Control 1: disable trunking, disable sending
+ * learning messages to this port.
+ */
+ ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_CONTROL_1,
+ 0x0000);
+ if (ret)
+ return ret;
+
+ /* Port based VLAN map: give each port the same default address
+ * database, and allow bidirectional communication between the
+ * CPU and DSA port(s), and the other ports.
+ */
+ ret = _mv88e6xxx_port_fid_set(chip, port, 0);
+ if (ret)
+ return ret;
+
+ ret = _mv88e6xxx_port_based_vlan_map(chip, port);
+ if (ret)
+ return ret;
+
+ /* Default VLAN ID and priority: don't set a default VLAN
+ * ID, and set the default packet priority to zero.
+ */
+ ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_DEFAULT_VLAN,
+ 0x0000);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr)
+{
+ int err;
+
+ err = mv88e6xxx_write(chip, REG_GLOBAL, GLOBAL_MAC_01,
+ (addr[0] << 8) | addr[1]);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_write(chip, REG_GLOBAL, GLOBAL_MAC_23,
+ (addr[2] << 8) | addr[3]);
+ if (err)
+ return err;
+
+ return mv88e6xxx_write(chip, REG_GLOBAL, GLOBAL_MAC_45,
+ (addr[4] << 8) | addr[5]);
+}
+
+static int mv88e6xxx_g1_set_age_time(struct mv88e6xxx_chip *chip,
+ unsigned int msecs)
+{
+ const unsigned int coeff = chip->info->age_time_coeff;
+ const unsigned int min = 0x01 * coeff;
+ const unsigned int max = 0xff * coeff;
+ u8 age_time;
+ u16 val;
+ int err;
+
+ if (msecs < min || msecs > max)
+ return -ERANGE;
+
+ /* Round to nearest multiple of coeff */
+ age_time = (msecs + coeff / 2) / coeff;
+
+ err = mv88e6xxx_read(chip, REG_GLOBAL, GLOBAL_ATU_CONTROL, &val);
+ if (err)
+ return err;
+
+ /* AgeTime is 11:4 bits */
+ val &= ~0xff0;
+ val |= age_time << 4;
+
+ return mv88e6xxx_write(chip, REG_GLOBAL, GLOBAL_ATU_CONTROL, val);
+}
+
+static int mv88e6xxx_set_ageing_time(struct dsa_switch *ds,
+ unsigned int ageing_time)
+{
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ int err;
+
+ mutex_lock(&chip->reg_lock);
+ err = mv88e6xxx_g1_set_age_time(chip, ageing_time);
+ mutex_unlock(&chip->reg_lock);
+
+ return err;
+}
+
+static int mv88e6xxx_g1_setup(struct mv88e6xxx_chip *chip)
+{
+ struct dsa_switch *ds = chip->ds;
+ u32 upstream_port = dsa_upstream_port(ds);
+ u16 reg;
+ int err;
+
+ /* Enable the PHY Polling Unit if present, don't discard any packets,
+ * and mask all interrupt sources.
+ */
+ reg = 0;
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU) ||
+ mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU_ACTIVE))
+ reg |= GLOBAL_CONTROL_PPU_ENABLE;
+
+ err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_CONTROL, reg);
+ if (err)
+ return err;
+
+ /* Configure the upstream port, and configure it as the port to which
+ * ingress and egress and ARP monitor frames are to be sent.
+ */
+ reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
+ upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
+ upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT;
+ err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_MONITOR_CONTROL,
+ reg);
+ if (err)
+ return err;
+
+ /* Disable remote management, and set the switch's DSA device number. */
+ err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_CONTROL_2,
+ GLOBAL_CONTROL_2_MULTIPLE_CASCADE |
+ (ds->index & 0x1f));
+ if (err)
+ return err;
+
+ /* Clear all the VTU and STU entries */
+ err = _mv88e6xxx_vtu_stu_flush(chip);
+ if (err < 0)
+ return err;
+
+ /* Set the default address aging time to 5 minutes, and
+ * enable address learn messages to be sent to all message
+ * ports.
+ */
+ err = mv88e6xxx_write(chip, REG_GLOBAL, GLOBAL_ATU_CONTROL,
+ GLOBAL_ATU_CONTROL_LEARN2ALL);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_set_age_time(chip, 300000);
+ if (err)
+ return err;
+
+ /* Clear all ATU entries */
+ err = _mv88e6xxx_atu_flush(chip, 0, true);
+ if (err)
+ return err;
+
+ /* Configure the IP ToS mapping registers. */
+ err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
+ if (err)
+ return err;
+ err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
+ if (err)
+ return err;
+ err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
+ if (err)
+ return err;
+ err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
+ if (err)
+ return err;
+ err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
+ if (err)
+ return err;
+ err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
+ if (err)
+ return err;
+ err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
+ if (err)
+ return err;
+ err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
+ if (err)
+ return err;
+
+ /* Configure the IEEE 802.1p priority mapping register. */
+ err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
+ if (err)
+ return err;
+
+ /* Clear the statistics counters for all ports */
+ err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_STATS_OP,
+ GLOBAL_STATS_OP_FLUSH_ALL);
+ if (err)
+ return err;
+
+ /* Wait for the flush to complete. */
+ err = _mv88e6xxx_stats_wait(chip);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip,
+ int target, int port)
+{
+ u16 val = (target << 8) | (port & 0xf);
+
+ return mv88e6xxx_update(chip, REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING, val);
+}
+
+static int mv88e6xxx_g2_set_device_mapping(struct mv88e6xxx_chip *chip)
+{
+ int target, port;
+ int err;
+
+ /* Initialize the routing port to the 32 possible target devices */
+ for (target = 0; target < 32; ++target) {
+ port = 0xf;
+
+ if (target < DSA_MAX_SWITCHES) {
+ port = chip->ds->rtable[target];
+ if (port == DSA_RTABLE_NONE)
+ port = 0xf;
+ }
+
+ err = mv88e6xxx_g2_device_mapping_write(chip, target, port);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static int mv88e6xxx_g2_trunk_mask_write(struct mv88e6xxx_chip *chip, int num,
+ bool hask, u16 mask)
+{
+ const u16 port_mask = BIT(chip->info->num_ports) - 1;
+ u16 val = (num << 12) | (mask & port_mask);
+
+ if (hask)
+ val |= GLOBAL2_TRUNK_MASK_HASK;
+
+ return mv88e6xxx_update(chip, REG_GLOBAL2, GLOBAL2_TRUNK_MASK, val);
+}
+
+static int mv88e6xxx_g2_trunk_mapping_write(struct mv88e6xxx_chip *chip, int id,
+ u16 map)
+{
+ const u16 port_mask = BIT(chip->info->num_ports) - 1;
+ u16 val = (id << 11) | (map & port_mask);
+
+ return mv88e6xxx_update(chip, REG_GLOBAL2, GLOBAL2_TRUNK_MAPPING, val);
+}
+
+static int mv88e6xxx_g2_clear_trunk(struct mv88e6xxx_chip *chip)
+{
+ const u16 port_mask = BIT(chip->info->num_ports) - 1;
+ int i, err;
+
+ /* Clear all eight possible Trunk Mask vectors */
+ for (i = 0; i < 8; ++i) {
+ err = mv88e6xxx_g2_trunk_mask_write(chip, i, false, port_mask);
+ if (err)
+ return err;
+ }
+
+ /* Clear all sixteen possible Trunk ID routing vectors */
+ for (i = 0; i < 16; ++i) {
+ err = mv88e6xxx_g2_trunk_mapping_write(chip, i, 0);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mv88e6xxx_g2_clear_irl(struct mv88e6xxx_chip *chip)
+{
+ int port, err;
+
+ /* Init all Ingress Rate Limit resources of all ports */
+ for (port = 0; port < chip->info->num_ports; ++port) {
+ /* XXX newer chips (like 88E6390) have different 2-bit ops */
+ err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_IRL_CMD,
+ GLOBAL2_IRL_CMD_OP_INIT_ALL |
+ (port << 8));
+ if (err)
+ break;
+
+ /* Wait for the operation to complete */
+ err = _mv88e6xxx_wait(chip, REG_GLOBAL2, GLOBAL2_IRL_CMD,
+ GLOBAL2_IRL_CMD_BUSY);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+/* Indirect write to the Switch MAC/WoL/WoF register */
+static int mv88e6xxx_g2_switch_mac_write(struct mv88e6xxx_chip *chip,
+ unsigned int pointer, u8 data)
+{
+ u16 val = (pointer << 8) | data;
+
+ return mv88e6xxx_update(chip, REG_GLOBAL2, GLOBAL2_SWITCH_MAC, val);
+}
+
+static int mv88e6xxx_g2_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr)
+{
+ int i, err;
+
+ for (i = 0; i < 6; i++) {
+ err = mv88e6xxx_g2_switch_mac_write(chip, i, addr[i]);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static int mv88e6xxx_g2_pot_write(struct mv88e6xxx_chip *chip, int pointer,
+ u8 data)
+{
+ u16 val = (pointer << 8) | (data & 0x7);
+
+ return mv88e6xxx_update(chip, REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE, val);
+}
+
+static int mv88e6xxx_g2_clear_pot(struct mv88e6xxx_chip *chip)
+{
+ int i, err;
+
+ /* Clear all sixteen possible Priority Override entries */
+ for (i = 0; i < 16; i++) {
+ err = mv88e6xxx_g2_pot_write(chip, i, 0);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip)
+{
+ return _mv88e6xxx_wait(chip, REG_GLOBAL2, GLOBAL2_EEPROM_CMD,
+ GLOBAL2_EEPROM_CMD_BUSY |
+ GLOBAL2_EEPROM_CMD_RUNNING);
+}
+
+static int mv88e6xxx_g2_eeprom_cmd(struct mv88e6xxx_chip *chip, u16 cmd)
+{
+ int err;
+
+ err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_EEPROM_CMD, cmd);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g2_eeprom_wait(chip);
+}
+
+static int mv88e6xxx_g2_eeprom_read16(struct mv88e6xxx_chip *chip,
+ u8 addr, u16 *data)
+{
+ u16 cmd = GLOBAL2_EEPROM_CMD_OP_READ | addr;
+ int err;
+
+ err = mv88e6xxx_g2_eeprom_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g2_eeprom_cmd(chip, cmd);
+ if (err)
+ return err;
+
+ return mv88e6xxx_read(chip, REG_GLOBAL2, GLOBAL2_EEPROM_DATA, data);
+}
+
+static int mv88e6xxx_g2_eeprom_write16(struct mv88e6xxx_chip *chip,
+ u8 addr, u16 data)
+{
+ u16 cmd = GLOBAL2_EEPROM_CMD_OP_WRITE | addr;
+ int err;
+
+ err = mv88e6xxx_g2_eeprom_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_EEPROM_DATA, data);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g2_eeprom_cmd(chip, cmd);
+}
+
+static int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip)
+{
+ u16 reg;
+ int err;
+
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_2X)) {
+ /* Consider the frames with reserved multicast destination
+ * addresses matching 01:80:c2:00:00:2x as MGMT.
+ */
+ err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_MGMT_EN_2X,
+ 0xffff);
+ if (err)
+ return err;
+ }
+
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_0X)) {
+ /* Consider the frames with reserved multicast destination
+ * addresses matching 01:80:c2:00:00:0x as MGMT.
+ */
+ err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_MGMT_EN_0X,
+ 0xffff);
+ if (err)
+ return err;
+ }
+
+ /* Ignore removed tag data on doubly tagged packets, disable
+ * flow control messages, force flow control priority to the
+ * highest, and send all special multicast frames to the CPU
+ * port at the highest priority.
+ */
+ reg = GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI | (0x7 << 4);
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_0X) ||
+ mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_2X))
+ reg |= GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x7;
+ err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_SWITCH_MGMT, reg);
+ if (err)
+ return err;
+
+ /* Program the DSA routing table. */
+ err = mv88e6xxx_g2_set_device_mapping(chip);
+ if (err)
+ return err;
+
+ /* Clear all trunk masks and mapping. */
+ err = mv88e6xxx_g2_clear_trunk(chip);
+ if (err)
+ return err;
+
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_IRL)) {
+ /* Disable ingress rate limiting by resetting all per port
+ * ingress rate limit resources to their initial state.
+ */
+ err = mv88e6xxx_g2_clear_irl(chip);
+ if (err)
+ return err;
+ }
+
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_PVT)) {
+ /* Initialize Cross-chip Port VLAN Table to reset defaults */
+ err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_PVT_ADDR,
+ GLOBAL2_PVT_ADDR_OP_INIT_ONES);
+ if (err)
+ return err;
+ }
+
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_POT)) {
+ /* Clear the priority override table. */
+ err = mv88e6xxx_g2_clear_pot(chip);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mv88e6xxx_setup(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ int err;
+ int i;
+
+ chip->ds = ds;
+ ds->slave_mii_bus = chip->mdio_bus;
+
+ mutex_lock(&chip->reg_lock);
+
+ err = mv88e6xxx_switch_reset(chip);
+ if (err)
+ goto unlock;
+
+ /* Setup Switch Port Registers */
+ for (i = 0; i < chip->info->num_ports; i++) {
+ err = mv88e6xxx_setup_port(chip, i);
+ if (err)
+ goto unlock;
+ }
+
+ /* Setup Switch Global 1 Registers */
+ err = mv88e6xxx_g1_setup(chip);
+ if (err)
+ goto unlock;
+
+ /* Setup Switch Global 2 Registers */
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_GLOBAL2)) {
+ err = mv88e6xxx_g2_setup(chip);
+ if (err)
+ goto unlock;
+ }
+
+unlock:
+ mutex_unlock(&chip->reg_lock);
+
+ return err;
+}
+
+static int mv88e6xxx_set_addr(struct dsa_switch *ds, u8 *addr)
+{
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ int err;
+
+ mutex_lock(&chip->reg_lock);
+
+ /* Has an indirect Switch MAC/WoL/WoF register in Global 2? */
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_SWITCH_MAC))
+ err = mv88e6xxx_g2_set_switch_mac(chip, addr);
+ else
+ err = mv88e6xxx_g1_set_switch_mac(chip, addr);
+
+ mutex_unlock(&chip->reg_lock);
+
+ return err;
+}
+
+#ifdef CONFIG_NET_DSA_HWMON
+static int mv88e6xxx_mdio_page_read(struct dsa_switch *ds, int port, int page,
+ int reg)
+{
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ int ret;
+
+ mutex_lock(&chip->reg_lock);
+ ret = _mv88e6xxx_mdio_page_read(chip, port, page, reg);
+ mutex_unlock(&chip->reg_lock);
+
+ return ret;
+}
+
+static int mv88e6xxx_mdio_page_write(struct dsa_switch *ds, int port, int page,
+ int reg, int val)
+{
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ int ret;
+
+ mutex_lock(&chip->reg_lock);
+ ret = _mv88e6xxx_mdio_page_write(chip, port, page, reg, val);
+ mutex_unlock(&chip->reg_lock);
+
+ return ret;
+}
+#endif
+
+static int mv88e6xxx_port_to_mdio_addr(struct mv88e6xxx_chip *chip, int port)
+{
+ if (port >= 0 && port < chip->info->num_ports)
+ return port;
+ return -EINVAL;
+}
+
+static int mv88e6xxx_mdio_read(struct mii_bus *bus, int port, int regnum)
+{
+ struct mv88e6xxx_chip *chip = bus->priv;
+ int addr = mv88e6xxx_port_to_mdio_addr(chip, port);
+ int ret;
+
+ if (addr < 0)
+ return 0xffff;
+
+ mutex_lock(&chip->reg_lock);
+
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU))
+ ret = mv88e6xxx_mdio_read_ppu(chip, addr, regnum);
+ else if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_SMI_PHY))
+ ret = mv88e6xxx_mdio_read_indirect(chip, addr, regnum);
+ else
+ ret = mv88e6xxx_mdio_read_direct(chip, addr, regnum);
+
+ mutex_unlock(&chip->reg_lock);
+ return ret;
+}
+
+static int mv88e6xxx_mdio_write(struct mii_bus *bus, int port, int regnum,
+ u16 val)
+{
+ struct mv88e6xxx_chip *chip = bus->priv;
+ int addr = mv88e6xxx_port_to_mdio_addr(chip, port);
+ int ret;
+
+ if (addr < 0)
+ return 0xffff;
+
+ mutex_lock(&chip->reg_lock);
+
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU))
+ ret = mv88e6xxx_mdio_write_ppu(chip, addr, regnum, val);
+ else if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_SMI_PHY))
+ ret = mv88e6xxx_mdio_write_indirect(chip, addr, regnum, val);
+ else
+ ret = mv88e6xxx_mdio_write_direct(chip, addr, regnum, val);
+
+ mutex_unlock(&chip->reg_lock);
+ return ret;
+}
+
+static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
+ struct device_node *np)
+{
+ static int index;
+ struct mii_bus *bus;
+ int err;
+
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU))
+ mv88e6xxx_ppu_state_init(chip);
+
+ if (np)
+ chip->mdio_np = of_get_child_by_name(np, "mdio");
+
+ bus = devm_mdiobus_alloc(chip->dev);
+ if (!bus)
+ return -ENOMEM;
+
+ bus->priv = (void *)chip;
+ if (np) {
+ bus->name = np->full_name;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s", np->full_name);
+ } else {
+ bus->name = "mv88e6xxx SMI";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "mv88e6xxx-%d", index++);
+ }
+
+ bus->read = mv88e6xxx_mdio_read;
+ bus->write = mv88e6xxx_mdio_write;
+ bus->parent = chip->dev;
+
+ if (chip->mdio_np)
+ err = of_mdiobus_register(bus, chip->mdio_np);
+ else
+ err = mdiobus_register(bus);
+ if (err) {
+ dev_err(chip->dev, "Cannot register MDIO bus (%d)\n", err);
+ goto out;
+ }
+ chip->mdio_bus = bus;
+
+ return 0;
+
+out:
+ if (chip->mdio_np)
+ of_node_put(chip->mdio_np);
+
+ return err;
+}
+
+static void mv88e6xxx_mdio_unregister(struct mv88e6xxx_chip *chip)
+
+{
+ struct mii_bus *bus = chip->mdio_bus;
+
+ mdiobus_unregister(bus);
+
+ if (chip->mdio_np)
+ of_node_put(chip->mdio_np);
+}
+
+#ifdef CONFIG_NET_DSA_HWMON
+
+static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp)
+{
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ int ret;
+ int val;
+
+ *temp = 0;
+
+ mutex_lock(&chip->reg_lock);
+
+ ret = mv88e6xxx_mdio_write_direct(chip, 0x0, 0x16, 0x6);
+ if (ret < 0)
+ goto error;
+
+ /* Enable temperature sensor */
+ ret = mv88e6xxx_mdio_read_direct(chip, 0x0, 0x1a);
+ if (ret < 0)
+ goto error;
+
+ ret = mv88e6xxx_mdio_write_direct(chip, 0x0, 0x1a, ret | (1 << 5));
+ if (ret < 0)
+ goto error;
+
+ /* Wait for temperature to stabilize */
+ usleep_range(10000, 12000);
+
+ val = mv88e6xxx_mdio_read_direct(chip, 0x0, 0x1a);
+ if (val < 0) {
+ ret = val;
+ goto error;
+ }
+
+ /* Disable temperature sensor */
+ ret = mv88e6xxx_mdio_write_direct(chip, 0x0, 0x1a, ret & ~(1 << 5));
+ if (ret < 0)
+ goto error;
+
+ *temp = ((val & 0x1f) - 5) * 5;
+
+error:
+ mv88e6xxx_mdio_write_direct(chip, 0x0, 0x16, 0x0);
+ mutex_unlock(&chip->reg_lock);
+ return ret;
+}
+
+static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp)
+{
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ int phy = mv88e6xxx_6320_family(chip) ? 3 : 0;
+ int ret;
+
+ *temp = 0;
+
+ ret = mv88e6xxx_mdio_page_read(ds, phy, 6, 27);
+ if (ret < 0)
+ return ret;
+
+ *temp = (ret & 0xff) - 25;
+
+ return 0;
+}
+
+static int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
+{
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+
+ if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP))
+ return -EOPNOTSUPP;
+
+ if (mv88e6xxx_6320_family(chip) || mv88e6xxx_6352_family(chip))
+ return mv88e63xx_get_temp(ds, temp);
+
+ return mv88e61xx_get_temp(ds, temp);
+}
+
+static int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
+{
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ int phy = mv88e6xxx_6320_family(chip) ? 3 : 0;
+ int ret;
+
+ if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT))
+ return -EOPNOTSUPP;
+
+ *temp = 0;
+
+ ret = mv88e6xxx_mdio_page_read(ds, phy, 6, 26);
+ if (ret < 0)
+ return ret;
+
+ *temp = (((ret >> 8) & 0x1f) * 5) - 25;
+
+ return 0;
+}
+
+static int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
+{
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ int phy = mv88e6xxx_6320_family(chip) ? 3 : 0;
+ int ret;
+
+ if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT))
+ return -EOPNOTSUPP;
+
+ ret = mv88e6xxx_mdio_page_read(ds, phy, 6, 26);
+ if (ret < 0)
+ return ret;
+ temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
+ return mv88e6xxx_mdio_page_write(ds, phy, 6, 26,
+ (ret & 0xe0ff) | (temp << 8));
+}
+
+static int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
+{
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ int phy = mv88e6xxx_6320_family(chip) ? 3 : 0;
+ int ret;
+
+ if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT))
+ return -EOPNOTSUPP;
+
+ *alarm = false;
+
+ ret = mv88e6xxx_mdio_page_read(ds, phy, 6, 26);
+ if (ret < 0)
+ return ret;
+
+ *alarm = !!(ret & 0x40);
+
+ return 0;
+}
+#endif /* CONFIG_NET_DSA_HWMON */
+
+static int mv88e6xxx_get_eeprom_len(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+
+ return chip->eeprom_len;
+}
+
+static int mv88e6xxx_get_eeprom16(struct mv88e6xxx_chip *chip,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ unsigned int offset = eeprom->offset;
+ unsigned int len = eeprom->len;
+ u16 val;
+ int err;
+
+ eeprom->len = 0;
+
+ if (offset & 1) {
+ err = mv88e6xxx_g2_eeprom_read16(chip, offset >> 1, &val);
+ if (err)
+ return err;
+
+ *data++ = (val >> 8) & 0xff;
+
+ offset++;
+ len--;
+ eeprom->len++;
+ }
+
+ while (len >= 2) {
+ err = mv88e6xxx_g2_eeprom_read16(chip, offset >> 1, &val);
+ if (err)
+ return err;
+
+ *data++ = val & 0xff;
+ *data++ = (val >> 8) & 0xff;
+
+ offset += 2;
+ len -= 2;
+ eeprom->len += 2;
+ }
+
+ if (len) {
+ err = mv88e6xxx_g2_eeprom_read16(chip, offset >> 1, &val);
+ if (err)
+ return err;
+
+ *data++ = val & 0xff;
+
+ offset++;
+ len--;
+ eeprom->len++;
+ }
+
+ return 0;
+}
+
+static int mv88e6xxx_get_eeprom(struct dsa_switch *ds,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ int err;
+
+ mutex_lock(&chip->reg_lock);
+
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_EEPROM16))
+ err = mv88e6xxx_get_eeprom16(chip, eeprom, data);
+ else
+ err = -EOPNOTSUPP;
+
+ mutex_unlock(&chip->reg_lock);
+
+ if (err)
+ return err;
+
+ eeprom->magic = 0xc3ec4951;
+
+ return 0;
+}
+
+static int mv88e6xxx_set_eeprom16(struct mv88e6xxx_chip *chip,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ unsigned int offset = eeprom->offset;
+ unsigned int len = eeprom->len;
+ u16 val;
+ int err;
+
+ /* Ensure the RO WriteEn bit is set */
+ err = mv88e6xxx_read(chip, REG_GLOBAL2, GLOBAL2_EEPROM_CMD, &val);
+ if (err)
+ return err;
+
+ if (!(val & GLOBAL2_EEPROM_CMD_WRITE_EN))
+ return -EROFS;
+
+ eeprom->len = 0;
+
+ if (offset & 1) {
+ err = mv88e6xxx_g2_eeprom_read16(chip, offset >> 1, &val);
+ if (err)
+ return err;
+
+ val = (*data++ << 8) | (val & 0xff);
+
+ err = mv88e6xxx_g2_eeprom_write16(chip, offset >> 1, val);
+ if (err)
+ return err;
+
+ offset++;
+ len--;
+ eeprom->len++;
+ }
+
+ while (len >= 2) {
+ val = *data++;
+ val |= *data++ << 8;
+
+ err = mv88e6xxx_g2_eeprom_write16(chip, offset >> 1, val);
+ if (err)
+ return err;
+
+ offset += 2;
+ len -= 2;
+ eeprom->len += 2;
+ }
+
+ if (len) {
+ err = mv88e6xxx_g2_eeprom_read16(chip, offset >> 1, &val);
+ if (err)
+ return err;
+
+ val = (val & 0xff00) | *data++;
+
+ err = mv88e6xxx_g2_eeprom_write16(chip, offset >> 1, val);
+ if (err)
+ return err;
+
+ offset++;
+ len--;
+ eeprom->len++;
+ }
+
+ return 0;
+}
+
+static int mv88e6xxx_set_eeprom(struct dsa_switch *ds,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+ int err;
+
+ if (eeprom->magic != 0xc3ec4951)
+ return -EINVAL;
+
+ mutex_lock(&chip->reg_lock);
+
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_EEPROM16))
+ err = mv88e6xxx_set_eeprom16(chip, eeprom, data);
+ else
+ err = -EOPNOTSUPP;
+
+ mutex_unlock(&chip->reg_lock);
+
+ return err;
+}
+
+static const struct mv88e6xxx_info mv88e6xxx_table[] = {
+ [MV88E6085] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6085,
+ .family = MV88E6XXX_FAMILY_6097,
+ .name = "Marvell 88E6085",
+ .num_databases = 4096,
+ .num_ports = 10,
+ .port_base_addr = 0x10,
+ .age_time_coeff = 15000,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6097,
+ },
+
+ [MV88E6095] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6095,
+ .family = MV88E6XXX_FAMILY_6095,
+ .name = "Marvell 88E6095/88E6095F",
+ .num_databases = 256,
+ .num_ports = 11,
+ .port_base_addr = 0x10,
+ .age_time_coeff = 15000,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6095,
+ },
+
+ [MV88E6123] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6123,
+ .family = MV88E6XXX_FAMILY_6165,
+ .name = "Marvell 88E6123",
+ .num_databases = 4096,
+ .num_ports = 3,
+ .port_base_addr = 0x10,
+ .age_time_coeff = 15000,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6165,
+ },
+
+ [MV88E6131] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6131,
+ .family = MV88E6XXX_FAMILY_6185,
+ .name = "Marvell 88E6131",
+ .num_databases = 256,
+ .num_ports = 8,
+ .port_base_addr = 0x10,
+ .age_time_coeff = 15000,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6185,
+ },
+
+ [MV88E6161] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6161,
+ .family = MV88E6XXX_FAMILY_6165,
+ .name = "Marvell 88E6161",
+ .num_databases = 4096,
+ .num_ports = 6,
+ .port_base_addr = 0x10,
+ .age_time_coeff = 15000,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6165,
+ },
+
+ [MV88E6165] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6165,
+ .family = MV88E6XXX_FAMILY_6165,
+ .name = "Marvell 88E6165",
+ .num_databases = 4096,
+ .num_ports = 6,
+ .port_base_addr = 0x10,
+ .age_time_coeff = 15000,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6165,
+ },
+
+ [MV88E6171] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6171,
+ .family = MV88E6XXX_FAMILY_6351,
+ .name = "Marvell 88E6171",
+ .num_databases = 4096,
+ .num_ports = 7,
+ .port_base_addr = 0x10,
+ .age_time_coeff = 15000,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6351,
+ },
+
+ [MV88E6172] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6172,
+ .family = MV88E6XXX_FAMILY_6352,
+ .name = "Marvell 88E6172",
+ .num_databases = 4096,
+ .num_ports = 7,
+ .port_base_addr = 0x10,
+ .age_time_coeff = 15000,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6352,
+ },
+
+ [MV88E6175] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6175,
+ .family = MV88E6XXX_FAMILY_6351,
+ .name = "Marvell 88E6175",
+ .num_databases = 4096,
+ .num_ports = 7,
+ .port_base_addr = 0x10,
+ .age_time_coeff = 15000,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6351,
+ },
+
+ [MV88E6176] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6176,
+ .family = MV88E6XXX_FAMILY_6352,
+ .name = "Marvell 88E6176",
+ .num_databases = 4096,
+ .num_ports = 7,
+ .port_base_addr = 0x10,
+ .age_time_coeff = 15000,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6352,
+ },
+
+ [MV88E6185] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6185,
+ .family = MV88E6XXX_FAMILY_6185,
+ .name = "Marvell 88E6185",
+ .num_databases = 256,
+ .num_ports = 10,
+ .port_base_addr = 0x10,
+ .age_time_coeff = 15000,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6185,
+ },
+
+ [MV88E6240] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6240,
+ .family = MV88E6XXX_FAMILY_6352,
+ .name = "Marvell 88E6240",
+ .num_databases = 4096,
+ .num_ports = 7,
+ .port_base_addr = 0x10,
+ .age_time_coeff = 15000,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6352,
+ },
+
+ [MV88E6320] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6320,
+ .family = MV88E6XXX_FAMILY_6320,
+ .name = "Marvell 88E6320",
+ .num_databases = 4096,
+ .num_ports = 7,
+ .port_base_addr = 0x10,
+ .age_time_coeff = 15000,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6320,
+ },
+
+ [MV88E6321] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6321,
+ .family = MV88E6XXX_FAMILY_6320,
+ .name = "Marvell 88E6321",
+ .num_databases = 4096,
+ .num_ports = 7,
+ .port_base_addr = 0x10,
+ .age_time_coeff = 15000,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6320,
+ },
+
+ [MV88E6350] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6350,
+ .family = MV88E6XXX_FAMILY_6351,
+ .name = "Marvell 88E6350",
+ .num_databases = 4096,
+ .num_ports = 7,
+ .port_base_addr = 0x10,
+ .age_time_coeff = 15000,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6351,
+ },
+
+ [MV88E6351] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6351,
+ .family = MV88E6XXX_FAMILY_6351,
+ .name = "Marvell 88E6351",
+ .num_databases = 4096,
+ .num_ports = 7,
+ .port_base_addr = 0x10,
+ .age_time_coeff = 15000,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6351,
+ },
+
+ [MV88E6352] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6352,
+ .family = MV88E6XXX_FAMILY_6352,
+ .name = "Marvell 88E6352",
+ .num_databases = 4096,
+ .num_ports = 7,
+ .port_base_addr = 0x10,
+ .age_time_coeff = 15000,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6352,
+ },
+};
+
+static const struct mv88e6xxx_info *mv88e6xxx_lookup_info(unsigned int prod_num)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mv88e6xxx_table); ++i)
+ if (mv88e6xxx_table[i].prod_num == prod_num)
+ return &mv88e6xxx_table[i];
+
+ return NULL;
+}
+
+static int mv88e6xxx_detect(struct mv88e6xxx_chip *chip)
+{
+ const struct mv88e6xxx_info *info;
+ unsigned int prod_num, rev;
+ u16 id;
+ int err;
+
+ mutex_lock(&chip->reg_lock);
+ err = mv88e6xxx_port_read(chip, 0, PORT_SWITCH_ID, &id);
+ mutex_unlock(&chip->reg_lock);
+ if (err)
+ return err;
+
+ prod_num = (id & 0xfff0) >> 4;
+ rev = id & 0x000f;
+
+ info = mv88e6xxx_lookup_info(prod_num);
+ if (!info)
+ return -ENODEV;
+
+ /* Update the compatible info with the probed one */
+ chip->info = info;
+
+ dev_info(chip->dev, "switch 0x%x detected: %s, revision %u\n",
+ chip->info->prod_num, chip->info->name, rev);
+
+ return 0;
+}
+
+static struct mv88e6xxx_chip *mv88e6xxx_alloc_chip(struct device *dev)
+{
+ struct mv88e6xxx_chip *chip;
+
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return NULL;
+
+ chip->dev = dev;
+
+ mutex_init(&chip->reg_lock);
+
+ return chip;
+}
+
+static int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus, int sw_addr)
+{
+ /* ADDR[0] pin is unavailable externally and considered zero */
+ if (sw_addr & 0x1)
+ return -EINVAL;
+
+ if (sw_addr == 0)
+ chip->smi_ops = &mv88e6xxx_smi_single_chip_ops;
+ else if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_MULTI_CHIP))
+ chip->smi_ops = &mv88e6xxx_smi_multi_chip_ops;
+ else
+ return -EINVAL;
+
+ chip->bus = bus;
+ chip->sw_addr = sw_addr;
+
+ return 0;
+}
+
+static const char *mv88e6xxx_drv_probe(struct device *dsa_dev,
+ struct device *host_dev, int sw_addr,
+ void **priv)
+{
+ struct mv88e6xxx_chip *chip;
+ struct mii_bus *bus;
+ int err;
+
+ bus = dsa_host_dev_to_mii_bus(host_dev);
+ if (!bus)
+ return NULL;
+
+ chip = mv88e6xxx_alloc_chip(dsa_dev);
+ if (!chip)
+ return NULL;
+
+ /* Legacy SMI probing will only support chips similar to 88E6085 */
+ chip->info = &mv88e6xxx_table[MV88E6085];
+
+ err = mv88e6xxx_smi_init(chip, bus, sw_addr);
+ if (err)
+ goto free;
+
+ err = mv88e6xxx_detect(chip);
+ if (err)
+ goto free;
+
+ err = mv88e6xxx_mdio_register(chip, NULL);
+ if (err)
+ goto free;
+
+ *priv = chip;
+
+ return chip->info->name;
+free:
+ devm_kfree(dsa_dev, chip);
+
+ return NULL;
+}
+
+static struct dsa_switch_driver mv88e6xxx_switch_driver = {
+ .tag_protocol = DSA_TAG_PROTO_EDSA,
+ .probe = mv88e6xxx_drv_probe,
+ .setup = mv88e6xxx_setup,
+ .set_addr = mv88e6xxx_set_addr,
+ .adjust_link = mv88e6xxx_adjust_link,
+ .get_strings = mv88e6xxx_get_strings,
+ .get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
+ .get_sset_count = mv88e6xxx_get_sset_count,
+ .set_eee = mv88e6xxx_set_eee,
+ .get_eee = mv88e6xxx_get_eee,
+#ifdef CONFIG_NET_DSA_HWMON
+ .get_temp = mv88e6xxx_get_temp,
+ .get_temp_limit = mv88e6xxx_get_temp_limit,
+ .set_temp_limit = mv88e6xxx_set_temp_limit,
+ .get_temp_alarm = mv88e6xxx_get_temp_alarm,
+#endif
+ .get_eeprom_len = mv88e6xxx_get_eeprom_len,
+ .get_eeprom = mv88e6xxx_get_eeprom,
+ .set_eeprom = mv88e6xxx_set_eeprom,
+ .get_regs_len = mv88e6xxx_get_regs_len,
+ .get_regs = mv88e6xxx_get_regs,
+ .set_ageing_time = mv88e6xxx_set_ageing_time,
+ .port_bridge_join = mv88e6xxx_port_bridge_join,
+ .port_bridge_leave = mv88e6xxx_port_bridge_leave,
+ .port_stp_state_set = mv88e6xxx_port_stp_state_set,
+ .port_vlan_filtering = mv88e6xxx_port_vlan_filtering,
+ .port_vlan_prepare = mv88e6xxx_port_vlan_prepare,
+ .port_vlan_add = mv88e6xxx_port_vlan_add,
+ .port_vlan_del = mv88e6xxx_port_vlan_del,
+ .port_vlan_dump = mv88e6xxx_port_vlan_dump,
+ .port_fdb_prepare = mv88e6xxx_port_fdb_prepare,
+ .port_fdb_add = mv88e6xxx_port_fdb_add,
+ .port_fdb_del = mv88e6xxx_port_fdb_del,
+ .port_fdb_dump = mv88e6xxx_port_fdb_dump,
+};
+
+static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip,
+ struct device_node *np)
+{
+ struct device *dev = chip->dev;
+ struct dsa_switch *ds;
+
+ ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
+ if (!ds)
+ return -ENOMEM;
+
+ ds->dev = dev;
+ ds->priv = chip;
+ ds->drv = &mv88e6xxx_switch_driver;
+
+ dev_set_drvdata(dev, ds);
+
+ return dsa_register_switch(ds, np);
+}
+
+static void mv88e6xxx_unregister_switch(struct mv88e6xxx_chip *chip)
+{
+ dsa_unregister_switch(chip->ds);
+}
+
+static int mv88e6xxx_probe(struct mdio_device *mdiodev)
+{
+ struct device *dev = &mdiodev->dev;
+ struct device_node *np = dev->of_node;
+ const struct mv88e6xxx_info *compat_info;
+ struct mv88e6xxx_chip *chip;
+ u32 eeprom_len;
+ int err;
+
+ compat_info = of_device_get_match_data(dev);
+ if (!compat_info)
+ return -EINVAL;
+
+ chip = mv88e6xxx_alloc_chip(dev);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->info = compat_info;
+
+ err = mv88e6xxx_smi_init(chip, mdiodev->bus, mdiodev->addr);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_detect(chip);
+ if (err)
+ return err;
+
+ chip->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_ASIS);
+ if (IS_ERR(chip->reset))
+ return PTR_ERR(chip->reset);
+
+ if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_EEPROM16) &&
+ !of_property_read_u32(np, "eeprom-length", &eeprom_len))
+ chip->eeprom_len = eeprom_len;
+
+ err = mv88e6xxx_mdio_register(chip, np);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_register_switch(chip, np);
+ if (err) {
+ mv88e6xxx_mdio_unregister(chip);
+ return err;
+ }
+
+ return 0;
+}
+
+static void mv88e6xxx_remove(struct mdio_device *mdiodev)
+{
+ struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
+ struct mv88e6xxx_chip *chip = ds_to_priv(ds);
+
+ mv88e6xxx_unregister_switch(chip);
+ mv88e6xxx_mdio_unregister(chip);
+}
+
+static const struct of_device_id mv88e6xxx_of_match[] = {
+ {
+ .compatible = "marvell,mv88e6085",
+ .data = &mv88e6xxx_table[MV88E6085],
+ },
+ { /* sentinel */ },
+};
+
+MODULE_DEVICE_TABLE(of, mv88e6xxx_of_match);
+
+static struct mdio_driver mv88e6xxx_driver = {
+ .probe = mv88e6xxx_probe,
+ .remove = mv88e6xxx_remove,
+ .mdiodrv.driver = {
+ .name = "mv88e6085",
+ .of_match_table = mv88e6xxx_of_match,
+ },
+};
+
+static int __init mv88e6xxx_init(void)
+{
+ register_switch_driver(&mv88e6xxx_switch_driver);
+ return mdio_driver_register(&mv88e6xxx_driver);
+}
+module_init(mv88e6xxx_init);
+
+static void __exit mv88e6xxx_cleanup(void)
+{
+ mdio_driver_unregister(&mv88e6xxx_driver);
+ unregister_switch_driver(&mv88e6xxx_switch_driver);
+}
+module_exit(mv88e6xxx_cleanup);
+
+MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
+MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h
new file mode 100644
index 000000000..48d6ea77f
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h
@@ -0,0 +1,678 @@
+/*
+ * Marvell 88e6xxx common definitions
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __MV88E6XXX_H
+#define __MV88E6XXX_H
+
+#include <linux/if_vlan.h>
+#include <linux/gpio/consumer.h>
+
+#ifndef UINT64_MAX
+#define UINT64_MAX (u64)(~((u64)0))
+#endif
+
+#define SMI_CMD 0x00
+#define SMI_CMD_BUSY BIT(15)
+#define SMI_CMD_CLAUSE_22 BIT(12)
+#define SMI_CMD_OP_22_WRITE ((1 << 10) | SMI_CMD_BUSY | SMI_CMD_CLAUSE_22)
+#define SMI_CMD_OP_22_READ ((2 << 10) | SMI_CMD_BUSY | SMI_CMD_CLAUSE_22)
+#define SMI_CMD_OP_45_WRITE_ADDR ((0 << 10) | SMI_CMD_BUSY)
+#define SMI_CMD_OP_45_WRITE_DATA ((1 << 10) | SMI_CMD_BUSY)
+#define SMI_CMD_OP_45_READ_DATA ((2 << 10) | SMI_CMD_BUSY)
+#define SMI_CMD_OP_45_READ_DATA_INC ((3 << 10) | SMI_CMD_BUSY)
+#define SMI_DATA 0x01
+
+/* Fiber/SERDES Registers are located at SMI address F, page 1 */
+#define REG_FIBER_SERDES 0x0f
+#define PAGE_FIBER_SERDES 0x01
+
+#define REG_PORT(p) (0x10 + (p))
+#define PORT_STATUS 0x00
+#define PORT_STATUS_PAUSE_EN BIT(15)
+#define PORT_STATUS_MY_PAUSE BIT(14)
+#define PORT_STATUS_HD_FLOW BIT(13)
+#define PORT_STATUS_PHY_DETECT BIT(12)
+#define PORT_STATUS_LINK BIT(11)
+#define PORT_STATUS_DUPLEX BIT(10)
+#define PORT_STATUS_SPEED_MASK 0x0300
+#define PORT_STATUS_SPEED_10 0x0000
+#define PORT_STATUS_SPEED_100 0x0100
+#define PORT_STATUS_SPEED_1000 0x0200
+#define PORT_STATUS_EEE BIT(6) /* 6352 */
+#define PORT_STATUS_AM_DIS BIT(6) /* 6165 */
+#define PORT_STATUS_MGMII BIT(6) /* 6185 */
+#define PORT_STATUS_TX_PAUSED BIT(5)
+#define PORT_STATUS_FLOW_CTRL BIT(4)
+#define PORT_STATUS_CMODE_MASK 0x0f
+#define PORT_STATUS_CMODE_100BASE_X 0x8
+#define PORT_STATUS_CMODE_1000BASE_X 0x9
+#define PORT_STATUS_CMODE_SGMII 0xa
+#define PORT_PCS_CTRL 0x01
+#define PORT_PCS_CTRL_RGMII_DELAY_RXCLK BIT(15)
+#define PORT_PCS_CTRL_RGMII_DELAY_TXCLK BIT(14)
+#define PORT_PCS_CTRL_FC BIT(7)
+#define PORT_PCS_CTRL_FORCE_FC BIT(6)
+#define PORT_PCS_CTRL_LINK_UP BIT(5)
+#define PORT_PCS_CTRL_FORCE_LINK BIT(4)
+#define PORT_PCS_CTRL_DUPLEX_FULL BIT(3)
+#define PORT_PCS_CTRL_FORCE_DUPLEX BIT(2)
+#define PORT_PCS_CTRL_10 0x00
+#define PORT_PCS_CTRL_100 0x01
+#define PORT_PCS_CTRL_1000 0x02
+#define PORT_PCS_CTRL_UNFORCED 0x03
+#define PORT_PAUSE_CTRL 0x02
+#define PORT_SWITCH_ID 0x03
+#define PORT_SWITCH_ID_PROD_NUM_6085 0x04a
+#define PORT_SWITCH_ID_PROD_NUM_6095 0x095
+#define PORT_SWITCH_ID_PROD_NUM_6131 0x106
+#define PORT_SWITCH_ID_PROD_NUM_6320 0x115
+#define PORT_SWITCH_ID_PROD_NUM_6123 0x121
+#define PORT_SWITCH_ID_PROD_NUM_6161 0x161
+#define PORT_SWITCH_ID_PROD_NUM_6165 0x165
+#define PORT_SWITCH_ID_PROD_NUM_6171 0x171
+#define PORT_SWITCH_ID_PROD_NUM_6172 0x172
+#define PORT_SWITCH_ID_PROD_NUM_6175 0x175
+#define PORT_SWITCH_ID_PROD_NUM_6176 0x176
+#define PORT_SWITCH_ID_PROD_NUM_6185 0x1a7
+#define PORT_SWITCH_ID_PROD_NUM_6240 0x240
+#define PORT_SWITCH_ID_PROD_NUM_6321 0x310
+#define PORT_SWITCH_ID_PROD_NUM_6352 0x352
+#define PORT_SWITCH_ID_PROD_NUM_6350 0x371
+#define PORT_SWITCH_ID_PROD_NUM_6351 0x375
+#define PORT_CONTROL 0x04
+#define PORT_CONTROL_USE_CORE_TAG BIT(15)
+#define PORT_CONTROL_DROP_ON_LOCK BIT(14)
+#define PORT_CONTROL_EGRESS_UNMODIFIED (0x0 << 12)
+#define PORT_CONTROL_EGRESS_UNTAGGED (0x1 << 12)
+#define PORT_CONTROL_EGRESS_TAGGED (0x2 << 12)
+#define PORT_CONTROL_EGRESS_ADD_TAG (0x3 << 12)
+#define PORT_CONTROL_HEADER BIT(11)
+#define PORT_CONTROL_IGMP_MLD_SNOOP BIT(10)
+#define PORT_CONTROL_DOUBLE_TAG BIT(9)
+#define PORT_CONTROL_FRAME_MODE_NORMAL (0x0 << 8)
+#define PORT_CONTROL_FRAME_MODE_DSA (0x1 << 8)
+#define PORT_CONTROL_FRAME_MODE_PROVIDER (0x2 << 8)
+#define PORT_CONTROL_FRAME_ETHER_TYPE_DSA (0x3 << 8)
+#define PORT_CONTROL_DSA_TAG BIT(8)
+#define PORT_CONTROL_VLAN_TUNNEL BIT(7)
+#define PORT_CONTROL_TAG_IF_BOTH BIT(6)
+#define PORT_CONTROL_USE_IP BIT(5)
+#define PORT_CONTROL_USE_TAG BIT(4)
+#define PORT_CONTROL_FORWARD_UNKNOWN_MC BIT(3)
+#define PORT_CONTROL_FORWARD_UNKNOWN BIT(2)
+#define PORT_CONTROL_STATE_MASK 0x03
+#define PORT_CONTROL_STATE_DISABLED 0x00
+#define PORT_CONTROL_STATE_BLOCKING 0x01
+#define PORT_CONTROL_STATE_LEARNING 0x02
+#define PORT_CONTROL_STATE_FORWARDING 0x03
+#define PORT_CONTROL_1 0x05
+#define PORT_CONTROL_1_FID_11_4_MASK (0xff << 0)
+#define PORT_BASE_VLAN 0x06
+#define PORT_BASE_VLAN_FID_3_0_MASK (0xf << 12)
+#define PORT_DEFAULT_VLAN 0x07
+#define PORT_DEFAULT_VLAN_MASK 0xfff
+#define PORT_CONTROL_2 0x08
+#define PORT_CONTROL_2_IGNORE_FCS BIT(15)
+#define PORT_CONTROL_2_VTU_PRI_OVERRIDE BIT(14)
+#define PORT_CONTROL_2_SA_PRIO_OVERRIDE BIT(13)
+#define PORT_CONTROL_2_DA_PRIO_OVERRIDE BIT(12)
+#define PORT_CONTROL_2_JUMBO_1522 (0x00 << 12)
+#define PORT_CONTROL_2_JUMBO_2048 (0x01 << 12)
+#define PORT_CONTROL_2_JUMBO_10240 (0x02 << 12)
+#define PORT_CONTROL_2_8021Q_MASK (0x03 << 10)
+#define PORT_CONTROL_2_8021Q_DISABLED (0x00 << 10)
+#define PORT_CONTROL_2_8021Q_FALLBACK (0x01 << 10)
+#define PORT_CONTROL_2_8021Q_CHECK (0x02 << 10)
+#define PORT_CONTROL_2_8021Q_SECURE (0x03 << 10)
+#define PORT_CONTROL_2_DISCARD_TAGGED BIT(9)
+#define PORT_CONTROL_2_DISCARD_UNTAGGED BIT(8)
+#define PORT_CONTROL_2_MAP_DA BIT(7)
+#define PORT_CONTROL_2_DEFAULT_FORWARD BIT(6)
+#define PORT_CONTROL_2_FORWARD_UNKNOWN BIT(6)
+#define PORT_CONTROL_2_EGRESS_MONITOR BIT(5)
+#define PORT_CONTROL_2_INGRESS_MONITOR BIT(4)
+#define PORT_RATE_CONTROL 0x09
+#define PORT_RATE_CONTROL_2 0x0a
+#define PORT_ASSOC_VECTOR 0x0b
+#define PORT_ASSOC_VECTOR_HOLD_AT_1 BIT(15)
+#define PORT_ASSOC_VECTOR_INT_AGE_OUT BIT(14)
+#define PORT_ASSOC_VECTOR_LOCKED_PORT BIT(13)
+#define PORT_ASSOC_VECTOR_IGNORE_WRONG BIT(12)
+#define PORT_ASSOC_VECTOR_REFRESH_LOCKED BIT(11)
+#define PORT_ATU_CONTROL 0x0c
+#define PORT_PRI_OVERRIDE 0x0d
+#define PORT_ETH_TYPE 0x0f
+#define PORT_IN_DISCARD_LO 0x10
+#define PORT_IN_DISCARD_HI 0x11
+#define PORT_IN_FILTERED 0x12
+#define PORT_OUT_FILTERED 0x13
+#define PORT_TAG_REGMAP_0123 0x18
+#define PORT_TAG_REGMAP_4567 0x19
+
+#define REG_GLOBAL 0x1b
+#define GLOBAL_STATUS 0x00
+#define GLOBAL_STATUS_PPU_STATE BIT(15) /* 6351 and 6171 */
+/* Two bits for 6165, 6185 etc */
+#define GLOBAL_STATUS_PPU_MASK (0x3 << 14)
+#define GLOBAL_STATUS_PPU_DISABLED_RST (0x0 << 14)
+#define GLOBAL_STATUS_PPU_INITIALIZING (0x1 << 14)
+#define GLOBAL_STATUS_PPU_DISABLED (0x2 << 14)
+#define GLOBAL_STATUS_PPU_POLLING (0x3 << 14)
+#define GLOBAL_MAC_01 0x01
+#define GLOBAL_MAC_23 0x02
+#define GLOBAL_MAC_45 0x03
+#define GLOBAL_ATU_FID 0x01 /* 6097 6165 6351 6352 */
+#define GLOBAL_VTU_FID 0x02 /* 6097 6165 6351 6352 */
+#define GLOBAL_VTU_FID_MASK 0xfff
+#define GLOBAL_VTU_SID 0x03 /* 6097 6165 6351 6352 */
+#define GLOBAL_VTU_SID_MASK 0x3f
+#define GLOBAL_CONTROL 0x04
+#define GLOBAL_CONTROL_SW_RESET BIT(15)
+#define GLOBAL_CONTROL_PPU_ENABLE BIT(14)
+#define GLOBAL_CONTROL_DISCARD_EXCESS BIT(13) /* 6352 */
+#define GLOBAL_CONTROL_SCHED_PRIO BIT(11) /* 6152 */
+#define GLOBAL_CONTROL_MAX_FRAME_1632 BIT(10) /* 6152 */
+#define GLOBAL_CONTROL_RELOAD_EEPROM BIT(9) /* 6152 */
+#define GLOBAL_CONTROL_DEVICE_EN BIT(7)
+#define GLOBAL_CONTROL_STATS_DONE_EN BIT(6)
+#define GLOBAL_CONTROL_VTU_PROBLEM_EN BIT(5)
+#define GLOBAL_CONTROL_VTU_DONE_EN BIT(4)
+#define GLOBAL_CONTROL_ATU_PROBLEM_EN BIT(3)
+#define GLOBAL_CONTROL_ATU_DONE_EN BIT(2)
+#define GLOBAL_CONTROL_TCAM_EN BIT(1)
+#define GLOBAL_CONTROL_EEPROM_DONE_EN BIT(0)
+#define GLOBAL_VTU_OP 0x05
+#define GLOBAL_VTU_OP_BUSY BIT(15)
+#define GLOBAL_VTU_OP_FLUSH_ALL ((0x01 << 12) | GLOBAL_VTU_OP_BUSY)
+#define GLOBAL_VTU_OP_VTU_LOAD_PURGE ((0x03 << 12) | GLOBAL_VTU_OP_BUSY)
+#define GLOBAL_VTU_OP_VTU_GET_NEXT ((0x04 << 12) | GLOBAL_VTU_OP_BUSY)
+#define GLOBAL_VTU_OP_STU_LOAD_PURGE ((0x05 << 12) | GLOBAL_VTU_OP_BUSY)
+#define GLOBAL_VTU_OP_STU_GET_NEXT ((0x06 << 12) | GLOBAL_VTU_OP_BUSY)
+#define GLOBAL_VTU_VID 0x06
+#define GLOBAL_VTU_VID_MASK 0xfff
+#define GLOBAL_VTU_VID_VALID BIT(12)
+#define GLOBAL_VTU_DATA_0_3 0x07
+#define GLOBAL_VTU_DATA_4_7 0x08
+#define GLOBAL_VTU_DATA_8_11 0x09
+#define GLOBAL_VTU_STU_DATA_MASK 0x03
+#define GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED 0x00
+#define GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED 0x01
+#define GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED 0x02
+#define GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER 0x03
+#define GLOBAL_STU_DATA_PORT_STATE_DISABLED 0x00
+#define GLOBAL_STU_DATA_PORT_STATE_BLOCKING 0x01
+#define GLOBAL_STU_DATA_PORT_STATE_LEARNING 0x02
+#define GLOBAL_STU_DATA_PORT_STATE_FORWARDING 0x03
+#define GLOBAL_ATU_CONTROL 0x0a
+#define GLOBAL_ATU_CONTROL_LEARN2ALL BIT(3)
+#define GLOBAL_ATU_OP 0x0b
+#define GLOBAL_ATU_OP_BUSY BIT(15)
+#define GLOBAL_ATU_OP_NOP (0 << 12)
+#define GLOBAL_ATU_OP_FLUSH_MOVE_ALL ((1 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC ((2 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_OP_LOAD_DB ((3 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_OP_GET_NEXT_DB ((4 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_OP_FLUSH_MOVE_ALL_DB ((5 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC_DB ((6 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_OP_GET_CLR_VIOLATION ((7 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_DATA 0x0c
+#define GLOBAL_ATU_DATA_TRUNK BIT(15)
+#define GLOBAL_ATU_DATA_TRUNK_ID_MASK 0x00f0
+#define GLOBAL_ATU_DATA_TRUNK_ID_SHIFT 4
+#define GLOBAL_ATU_DATA_PORT_VECTOR_MASK 0x3ff0
+#define GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT 4
+#define GLOBAL_ATU_DATA_STATE_MASK 0x0f
+#define GLOBAL_ATU_DATA_STATE_UNUSED 0x00
+#define GLOBAL_ATU_DATA_STATE_UC_MGMT 0x0d
+#define GLOBAL_ATU_DATA_STATE_UC_STATIC 0x0e
+#define GLOBAL_ATU_DATA_STATE_UC_PRIO_OVER 0x0f
+#define GLOBAL_ATU_DATA_STATE_MC_NONE_RATE 0x05
+#define GLOBAL_ATU_DATA_STATE_MC_STATIC 0x07
+#define GLOBAL_ATU_DATA_STATE_MC_MGMT 0x0e
+#define GLOBAL_ATU_DATA_STATE_MC_PRIO_OVER 0x0f
+#define GLOBAL_ATU_MAC_01 0x0d
+#define GLOBAL_ATU_MAC_23 0x0e
+#define GLOBAL_ATU_MAC_45 0x0f
+#define GLOBAL_IP_PRI_0 0x10
+#define GLOBAL_IP_PRI_1 0x11
+#define GLOBAL_IP_PRI_2 0x12
+#define GLOBAL_IP_PRI_3 0x13
+#define GLOBAL_IP_PRI_4 0x14
+#define GLOBAL_IP_PRI_5 0x15
+#define GLOBAL_IP_PRI_6 0x16
+#define GLOBAL_IP_PRI_7 0x17
+#define GLOBAL_IEEE_PRI 0x18
+#define GLOBAL_CORE_TAG_TYPE 0x19
+#define GLOBAL_MONITOR_CONTROL 0x1a
+#define GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT 12
+#define GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT 8
+#define GLOBAL_MONITOR_CONTROL_ARP_SHIFT 4
+#define GLOBAL_MONITOR_CONTROL_MIRROR_SHIFT 0
+#define GLOBAL_MONITOR_CONTROL_ARP_DISABLED (0xf0)
+#define GLOBAL_CONTROL_2 0x1c
+#define GLOBAL_CONTROL_2_NO_CASCADE 0xe000
+#define GLOBAL_CONTROL_2_MULTIPLE_CASCADE 0xf000
+
+#define GLOBAL_STATS_OP 0x1d
+#define GLOBAL_STATS_OP_BUSY BIT(15)
+#define GLOBAL_STATS_OP_NOP (0 << 12)
+#define GLOBAL_STATS_OP_FLUSH_ALL ((1 << 12) | GLOBAL_STATS_OP_BUSY)
+#define GLOBAL_STATS_OP_FLUSH_PORT ((2 << 12) | GLOBAL_STATS_OP_BUSY)
+#define GLOBAL_STATS_OP_READ_CAPTURED ((4 << 12) | GLOBAL_STATS_OP_BUSY)
+#define GLOBAL_STATS_OP_CAPTURE_PORT ((5 << 12) | GLOBAL_STATS_OP_BUSY)
+#define GLOBAL_STATS_OP_HIST_RX ((1 << 10) | GLOBAL_STATS_OP_BUSY)
+#define GLOBAL_STATS_OP_HIST_TX ((2 << 10) | GLOBAL_STATS_OP_BUSY)
+#define GLOBAL_STATS_OP_HIST_RX_TX ((3 << 10) | GLOBAL_STATS_OP_BUSY)
+#define GLOBAL_STATS_OP_BANK_1 BIT(9)
+#define GLOBAL_STATS_COUNTER_32 0x1e
+#define GLOBAL_STATS_COUNTER_01 0x1f
+
+#define REG_GLOBAL2 0x1c
+#define GLOBAL2_INT_SOURCE 0x00
+#define GLOBAL2_INT_MASK 0x01
+#define GLOBAL2_MGMT_EN_2X 0x02
+#define GLOBAL2_MGMT_EN_0X 0x03
+#define GLOBAL2_FLOW_CONTROL 0x04
+#define GLOBAL2_SWITCH_MGMT 0x05
+#define GLOBAL2_SWITCH_MGMT_USE_DOUBLE_TAG_DATA BIT(15)
+#define GLOBAL2_SWITCH_MGMT_PREVENT_LOOPS BIT(14)
+#define GLOBAL2_SWITCH_MGMT_FLOW_CONTROL_MSG BIT(13)
+#define GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI BIT(7)
+#define GLOBAL2_SWITCH_MGMT_RSVD2CPU BIT(3)
+#define GLOBAL2_DEVICE_MAPPING 0x06
+#define GLOBAL2_DEVICE_MAPPING_UPDATE BIT(15)
+#define GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT 8
+#define GLOBAL2_DEVICE_MAPPING_PORT_MASK 0x0f
+#define GLOBAL2_TRUNK_MASK 0x07
+#define GLOBAL2_TRUNK_MASK_UPDATE BIT(15)
+#define GLOBAL2_TRUNK_MASK_NUM_SHIFT 12
+#define GLOBAL2_TRUNK_MASK_HASK BIT(11)
+#define GLOBAL2_TRUNK_MAPPING 0x08
+#define GLOBAL2_TRUNK_MAPPING_UPDATE BIT(15)
+#define GLOBAL2_TRUNK_MAPPING_ID_SHIFT 11
+#define GLOBAL2_IRL_CMD 0x09
+#define GLOBAL2_IRL_CMD_BUSY BIT(15)
+#define GLOBAL2_IRL_CMD_OP_INIT_ALL ((0x001 << 12) | GLOBAL2_IRL_CMD_BUSY)
+#define GLOBAL2_IRL_CMD_OP_INIT_SEL ((0x010 << 12) | GLOBAL2_IRL_CMD_BUSY)
+#define GLOBAL2_IRL_CMD_OP_WRITE_SEL ((0x011 << 12) | GLOBAL2_IRL_CMD_BUSY)
+#define GLOBAL2_IRL_CMD_OP_READ_SEL ((0x100 << 12) | GLOBAL2_IRL_CMD_BUSY)
+#define GLOBAL2_IRL_DATA 0x0a
+#define GLOBAL2_PVT_ADDR 0x0b
+#define GLOBAL2_PVT_ADDR_BUSY BIT(15)
+#define GLOBAL2_PVT_ADDR_OP_INIT_ONES ((0x01 << 12) | GLOBAL2_PVT_ADDR_BUSY)
+#define GLOBAL2_PVT_ADDR_OP_WRITE_PVLAN ((0x03 << 12) | GLOBAL2_PVT_ADDR_BUSY)
+#define GLOBAL2_PVT_ADDR_OP_READ ((0x04 << 12) | GLOBAL2_PVT_ADDR_BUSY)
+#define GLOBAL2_PVT_DATA 0x0c
+#define GLOBAL2_SWITCH_MAC 0x0d
+#define GLOBAL2_ATU_STATS 0x0e
+#define GLOBAL2_PRIO_OVERRIDE 0x0f
+#define GLOBAL2_PRIO_OVERRIDE_FORCE_SNOOP BIT(7)
+#define GLOBAL2_PRIO_OVERRIDE_SNOOP_SHIFT 4
+#define GLOBAL2_PRIO_OVERRIDE_FORCE_ARP BIT(3)
+#define GLOBAL2_PRIO_OVERRIDE_ARP_SHIFT 0
+#define GLOBAL2_EEPROM_CMD 0x14
+#define GLOBAL2_EEPROM_CMD_BUSY BIT(15)
+#define GLOBAL2_EEPROM_CMD_OP_WRITE ((0x3 << 12) | GLOBAL2_EEPROM_CMD_BUSY)
+#define GLOBAL2_EEPROM_CMD_OP_READ ((0x4 << 12) | GLOBAL2_EEPROM_CMD_BUSY)
+#define GLOBAL2_EEPROM_CMD_OP_LOAD ((0x6 << 12) | GLOBAL2_EEPROM_CMD_BUSY)
+#define GLOBAL2_EEPROM_CMD_RUNNING BIT(11)
+#define GLOBAL2_EEPROM_CMD_WRITE_EN BIT(10)
+#define GLOBAL2_EEPROM_CMD_ADDR_MASK 0xff
+#define GLOBAL2_EEPROM_DATA 0x15
+#define GLOBAL2_PTP_AVB_OP 0x16
+#define GLOBAL2_PTP_AVB_DATA 0x17
+#define GLOBAL2_SMI_OP 0x18
+#define GLOBAL2_SMI_OP_BUSY BIT(15)
+#define GLOBAL2_SMI_OP_CLAUSE_22 BIT(12)
+#define GLOBAL2_SMI_OP_22_WRITE ((1 << 10) | GLOBAL2_SMI_OP_BUSY | \
+ GLOBAL2_SMI_OP_CLAUSE_22)
+#define GLOBAL2_SMI_OP_22_READ ((2 << 10) | GLOBAL2_SMI_OP_BUSY | \
+ GLOBAL2_SMI_OP_CLAUSE_22)
+#define GLOBAL2_SMI_OP_45_WRITE_ADDR ((0 << 10) | GLOBAL2_SMI_OP_BUSY)
+#define GLOBAL2_SMI_OP_45_WRITE_DATA ((1 << 10) | GLOBAL2_SMI_OP_BUSY)
+#define GLOBAL2_SMI_OP_45_READ_DATA ((2 << 10) | GLOBAL2_SMI_OP_BUSY)
+#define GLOBAL2_SMI_DATA 0x19
+#define GLOBAL2_SCRATCH_MISC 0x1a
+#define GLOBAL2_SCRATCH_BUSY BIT(15)
+#define GLOBAL2_SCRATCH_REGISTER_SHIFT 8
+#define GLOBAL2_SCRATCH_VALUE_MASK 0xff
+#define GLOBAL2_WDOG_CONTROL 0x1b
+#define GLOBAL2_QOS_WEIGHT 0x1c
+#define GLOBAL2_MISC 0x1d
+
+#define MV88E6XXX_N_FID 4096
+
+/* List of supported models */
+enum mv88e6xxx_model {
+ MV88E6085,
+ MV88E6095,
+ MV88E6123,
+ MV88E6131,
+ MV88E6161,
+ MV88E6165,
+ MV88E6171,
+ MV88E6172,
+ MV88E6175,
+ MV88E6176,
+ MV88E6185,
+ MV88E6240,
+ MV88E6320,
+ MV88E6321,
+ MV88E6350,
+ MV88E6351,
+ MV88E6352,
+};
+
+enum mv88e6xxx_family {
+ MV88E6XXX_FAMILY_NONE,
+ MV88E6XXX_FAMILY_6065, /* 6031 6035 6061 6065 */
+ MV88E6XXX_FAMILY_6095, /* 6092 6095 */
+ MV88E6XXX_FAMILY_6097, /* 6046 6085 6096 6097 */
+ MV88E6XXX_FAMILY_6165, /* 6123 6161 6165 */
+ MV88E6XXX_FAMILY_6185, /* 6108 6121 6122 6131 6152 6155 6182 6185 */
+ MV88E6XXX_FAMILY_6320, /* 6320 6321 */
+ MV88E6XXX_FAMILY_6351, /* 6171 6175 6350 6351 */
+ MV88E6XXX_FAMILY_6352, /* 6172 6176 6240 6352 */
+};
+
+enum mv88e6xxx_cap {
+ /* Energy Efficient Ethernet.
+ */
+ MV88E6XXX_CAP_EEE,
+
+ /* Switch Global 2 Registers.
+ * The device contains a second set of global 16-bit registers.
+ */
+ MV88E6XXX_CAP_GLOBAL2,
+ MV88E6XXX_CAP_G2_MGMT_EN_2X, /* (0x02) MGMT Enable Register 2x */
+ MV88E6XXX_CAP_G2_MGMT_EN_0X, /* (0x03) MGMT Enable Register 0x */
+ MV88E6XXX_CAP_G2_IRL_CMD, /* (0x09) Ingress Rate Command */
+ MV88E6XXX_CAP_G2_IRL_DATA, /* (0x0a) Ingress Rate Data */
+ MV88E6XXX_CAP_G2_PVT_ADDR, /* (0x0b) Cross Chip Port VLAN Addr */
+ MV88E6XXX_CAP_G2_PVT_DATA, /* (0x0c) Cross Chip Port VLAN Data */
+ MV88E6XXX_CAP_G2_SWITCH_MAC, /* (0x0d) Switch MAC/WoL/WoF */
+ MV88E6XXX_CAP_G2_POT, /* (0x0f) Priority Override Table */
+ MV88E6XXX_CAP_G2_EEPROM_CMD, /* (0x14) EEPROM Command */
+ MV88E6XXX_CAP_G2_EEPROM_DATA, /* (0x15) EEPROM Data */
+
+ /* Multi-chip Addressing Mode.
+ * Some chips require an indirect SMI access when their SMI device
+ * address is not zero. See SMI_CMD and SMI_DATA.
+ */
+ MV88E6XXX_CAP_MULTI_CHIP,
+
+ /* PHY Polling Unit.
+ * See GLOBAL_CONTROL_PPU_ENABLE and GLOBAL_STATUS_PPU_POLLING.
+ */
+ MV88E6XXX_CAP_PPU,
+ MV88E6XXX_CAP_PPU_ACTIVE,
+
+ /* SMI PHY Command and Data registers.
+ * This requires an indirect access to PHY registers through
+ * GLOBAL2_SMI_OP, otherwise direct access to PHY registers is done.
+ */
+ MV88E6XXX_CAP_SMI_PHY,
+
+ /* Per VLAN Spanning Tree Unit (STU).
+ * The Port State database, if present, is accessed through VTU
+ * operations and dedicated SID registers. See GLOBAL_VTU_SID.
+ */
+ MV88E6XXX_CAP_STU,
+
+ /* Internal temperature sensor.
+ * Available from any enabled port's PHY register 26, page 6.
+ */
+ MV88E6XXX_CAP_TEMP,
+ MV88E6XXX_CAP_TEMP_LIMIT,
+
+ /* VLAN Table Unit.
+ * The VTU is used to program 802.1Q VLANs. See GLOBAL_VTU_OP.
+ */
+ MV88E6XXX_CAP_VTU,
+};
+
+/* Bitmask of capabilities */
+#define MV88E6XXX_FLAG_EEE BIT(MV88E6XXX_CAP_EEE)
+#define MV88E6XXX_FLAG_GLOBAL2 BIT(MV88E6XXX_CAP_GLOBAL2)
+#define MV88E6XXX_FLAG_G2_MGMT_EN_2X BIT(MV88E6XXX_CAP_G2_MGMT_EN_2X)
+#define MV88E6XXX_FLAG_G2_MGMT_EN_0X BIT(MV88E6XXX_CAP_G2_MGMT_EN_0X)
+#define MV88E6XXX_FLAG_G2_IRL_CMD BIT(MV88E6XXX_CAP_G2_IRL_CMD)
+#define MV88E6XXX_FLAG_G2_IRL_DATA BIT(MV88E6XXX_CAP_G2_IRL_DATA)
+#define MV88E6XXX_FLAG_G2_PVT_ADDR BIT(MV88E6XXX_CAP_G2_PVT_ADDR)
+#define MV88E6XXX_FLAG_G2_PVT_DATA BIT(MV88E6XXX_CAP_G2_PVT_DATA)
+#define MV88E6XXX_FLAG_G2_SWITCH_MAC BIT(MV88E6XXX_CAP_G2_SWITCH_MAC)
+#define MV88E6XXX_FLAG_G2_POT BIT(MV88E6XXX_CAP_G2_POT)
+#define MV88E6XXX_FLAG_G2_EEPROM_CMD BIT(MV88E6XXX_CAP_G2_EEPROM_CMD)
+#define MV88E6XXX_FLAG_G2_EEPROM_DATA BIT(MV88E6XXX_CAP_G2_EEPROM_DATA)
+#define MV88E6XXX_FLAG_MULTI_CHIP BIT(MV88E6XXX_CAP_MULTI_CHIP)
+#define MV88E6XXX_FLAG_PPU BIT(MV88E6XXX_CAP_PPU)
+#define MV88E6XXX_FLAG_PPU_ACTIVE BIT(MV88E6XXX_CAP_PPU_ACTIVE)
+#define MV88E6XXX_FLAG_SMI_PHY BIT(MV88E6XXX_CAP_SMI_PHY)
+#define MV88E6XXX_FLAG_STU BIT(MV88E6XXX_CAP_STU)
+#define MV88E6XXX_FLAG_TEMP BIT(MV88E6XXX_CAP_TEMP)
+#define MV88E6XXX_FLAG_TEMP_LIMIT BIT(MV88E6XXX_CAP_TEMP_LIMIT)
+#define MV88E6XXX_FLAG_VTU BIT(MV88E6XXX_CAP_VTU)
+
+/* EEPROM Programming via Global2 with 16-bit data */
+#define MV88E6XXX_FLAGS_EEPROM16 \
+ (MV88E6XXX_FLAG_G2_EEPROM_CMD | \
+ MV88E6XXX_FLAG_G2_EEPROM_DATA)
+
+/* Ingress Rate Limit unit */
+#define MV88E6XXX_FLAGS_IRL \
+ (MV88E6XXX_FLAG_G2_IRL_CMD | \
+ MV88E6XXX_FLAG_G2_IRL_DATA)
+
+/* Cross-chip Port VLAN Table */
+#define MV88E6XXX_FLAGS_PVT \
+ (MV88E6XXX_FLAG_G2_PVT_ADDR | \
+ MV88E6XXX_FLAG_G2_PVT_DATA)
+
+#define MV88E6XXX_FLAGS_FAMILY_6095 \
+ (MV88E6XXX_FLAG_GLOBAL2 | \
+ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
+ MV88E6XXX_FLAG_MULTI_CHIP | \
+ MV88E6XXX_FLAG_PPU | \
+ MV88E6XXX_FLAG_VTU)
+
+#define MV88E6XXX_FLAGS_FAMILY_6097 \
+ (MV88E6XXX_FLAG_GLOBAL2 | \
+ MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
+ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
+ MV88E6XXX_FLAG_G2_POT | \
+ MV88E6XXX_FLAG_MULTI_CHIP | \
+ MV88E6XXX_FLAG_PPU | \
+ MV88E6XXX_FLAG_STU | \
+ MV88E6XXX_FLAG_VTU | \
+ MV88E6XXX_FLAGS_IRL | \
+ MV88E6XXX_FLAGS_PVT)
+
+#define MV88E6XXX_FLAGS_FAMILY_6165 \
+ (MV88E6XXX_FLAG_GLOBAL2 | \
+ MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
+ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
+ MV88E6XXX_FLAG_G2_SWITCH_MAC | \
+ MV88E6XXX_FLAG_G2_POT | \
+ MV88E6XXX_FLAG_MULTI_CHIP | \
+ MV88E6XXX_FLAG_STU | \
+ MV88E6XXX_FLAG_TEMP | \
+ MV88E6XXX_FLAG_VTU | \
+ MV88E6XXX_FLAGS_IRL | \
+ MV88E6XXX_FLAGS_PVT)
+
+#define MV88E6XXX_FLAGS_FAMILY_6185 \
+ (MV88E6XXX_FLAG_GLOBAL2 | \
+ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
+ MV88E6XXX_FLAG_MULTI_CHIP | \
+ MV88E6XXX_FLAG_PPU | \
+ MV88E6XXX_FLAG_VTU)
+
+#define MV88E6XXX_FLAGS_FAMILY_6320 \
+ (MV88E6XXX_FLAG_EEE | \
+ MV88E6XXX_FLAG_GLOBAL2 | \
+ MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
+ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
+ MV88E6XXX_FLAG_G2_SWITCH_MAC | \
+ MV88E6XXX_FLAG_G2_POT | \
+ MV88E6XXX_FLAG_MULTI_CHIP | \
+ MV88E6XXX_FLAG_PPU_ACTIVE | \
+ MV88E6XXX_FLAG_SMI_PHY | \
+ MV88E6XXX_FLAG_TEMP | \
+ MV88E6XXX_FLAG_TEMP_LIMIT | \
+ MV88E6XXX_FLAG_VTU | \
+ MV88E6XXX_FLAGS_EEPROM16 | \
+ MV88E6XXX_FLAGS_IRL | \
+ MV88E6XXX_FLAGS_PVT)
+
+#define MV88E6XXX_FLAGS_FAMILY_6351 \
+ (MV88E6XXX_FLAG_GLOBAL2 | \
+ MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
+ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
+ MV88E6XXX_FLAG_G2_SWITCH_MAC | \
+ MV88E6XXX_FLAG_G2_POT | \
+ MV88E6XXX_FLAG_MULTI_CHIP | \
+ MV88E6XXX_FLAG_PPU_ACTIVE | \
+ MV88E6XXX_FLAG_SMI_PHY | \
+ MV88E6XXX_FLAG_STU | \
+ MV88E6XXX_FLAG_TEMP | \
+ MV88E6XXX_FLAG_VTU | \
+ MV88E6XXX_FLAGS_IRL | \
+ MV88E6XXX_FLAGS_PVT)
+
+#define MV88E6XXX_FLAGS_FAMILY_6352 \
+ (MV88E6XXX_FLAG_EEE | \
+ MV88E6XXX_FLAG_GLOBAL2 | \
+ MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
+ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
+ MV88E6XXX_FLAG_G2_SWITCH_MAC | \
+ MV88E6XXX_FLAG_G2_POT | \
+ MV88E6XXX_FLAG_MULTI_CHIP | \
+ MV88E6XXX_FLAG_PPU_ACTIVE | \
+ MV88E6XXX_FLAG_SMI_PHY | \
+ MV88E6XXX_FLAG_STU | \
+ MV88E6XXX_FLAG_TEMP | \
+ MV88E6XXX_FLAG_TEMP_LIMIT | \
+ MV88E6XXX_FLAG_VTU | \
+ MV88E6XXX_FLAGS_EEPROM16 | \
+ MV88E6XXX_FLAGS_IRL | \
+ MV88E6XXX_FLAGS_PVT)
+
+struct mv88e6xxx_info {
+ enum mv88e6xxx_family family;
+ u16 prod_num;
+ const char *name;
+ unsigned int num_databases;
+ unsigned int num_ports;
+ unsigned int port_base_addr;
+ unsigned int age_time_coeff;
+ unsigned long flags;
+};
+
+struct mv88e6xxx_atu_entry {
+ u16 fid;
+ u8 state;
+ bool trunk;
+ u16 portv_trunkid;
+ u8 mac[ETH_ALEN];
+};
+
+struct mv88e6xxx_vtu_stu_entry {
+ /* VTU only */
+ u16 vid;
+ u16 fid;
+
+ /* VTU and STU */
+ u8 sid;
+ bool valid;
+ u8 data[DSA_MAX_PORTS];
+};
+
+struct mv88e6xxx_ops;
+
+struct mv88e6xxx_priv_port {
+ struct net_device *bridge_dev;
+};
+
+struct mv88e6xxx_chip {
+ const struct mv88e6xxx_info *info;
+
+ /* The dsa_switch this private structure is related to */
+ struct dsa_switch *ds;
+
+ /* The device this structure is associated to */
+ struct device *dev;
+
+ /* This mutex protects the access to the switch registers */
+ struct mutex reg_lock;
+
+ /* The MII bus and the address on the bus that is used to
+ * communication with the switch
+ */
+ const struct mv88e6xxx_ops *smi_ops;
+ struct mii_bus *bus;
+ int sw_addr;
+
+ /* Handles automatic disabling and re-enabling of the PHY
+ * polling unit.
+ */
+ struct mutex ppu_mutex;
+ int ppu_disabled;
+ struct work_struct ppu_work;
+ struct timer_list ppu_timer;
+
+ /* This mutex serialises access to the statistics unit.
+ * Hold this mutex over snapshot + dump sequences.
+ */
+ struct mutex stats_mutex;
+
+ struct mv88e6xxx_priv_port ports[DSA_MAX_PORTS];
+
+ /* A switch may have a GPIO line tied to its reset pin. Parse
+ * this from the device tree, and use it before performing
+ * switch soft reset.
+ */
+ struct gpio_desc *reset;
+
+ /* set to size of eeprom if supported by the switch */
+ int eeprom_len;
+
+ /* Device node for the MDIO bus */
+ struct device_node *mdio_np;
+
+ /* And the MDIO bus itself */
+ struct mii_bus *mdio_bus;
+};
+
+struct mv88e6xxx_ops {
+ int (*read)(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val);
+ int (*write)(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val);
+};
+
+enum stat_type {
+ BANK0,
+ BANK1,
+ PORT,
+};
+
+struct mv88e6xxx_hw_stat {
+ char string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int reg;
+ enum stat_type type;
+};
+
+static inline bool mv88e6xxx_has(struct mv88e6xxx_chip *chip,
+ unsigned long flags)
+{
+ return (chip->info->flags & flags) == flags;
+}
+
+#endif
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index c89b9aeec..39ca9350d 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -84,7 +84,6 @@ static u32 ax_msg_enable;
struct ax_device {
struct mii_bus *mii_bus;
struct mdiobb_ctrl bb_ctrl;
- struct phy_device *phy_dev;
void __iomem *addr_memr;
u8 reg_memr;
int link;
@@ -320,7 +319,7 @@ static void ax_block_output(struct net_device *dev, int count,
static void ax_handle_link_change(struct net_device *dev)
{
struct ax_device *ax = to_ax_dev(dev);
- struct phy_device *phy_dev = ax->phy_dev;
+ struct phy_device *phy_dev = dev->phydev;
int status_change = 0;
if (phy_dev->link && ((ax->speed != phy_dev->speed) ||
@@ -369,8 +368,6 @@ static int ax_mii_probe(struct net_device *dev)
phy_dev->supported &= PHY_BASIC_FEATURES;
phy_dev->advertising = phy_dev->supported;
- ax->phy_dev = phy_dev;
-
netdev_info(dev, "PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
phy_dev->drv->name, phydev_name(phy_dev), phy_dev->irq);
@@ -410,7 +407,7 @@ static int ax_open(struct net_device *dev)
ret = ax_mii_probe(dev);
if (ret)
goto failed_mii_probe;
- phy_start(ax->phy_dev);
+ phy_start(dev->phydev);
ret = ax_ei_open(dev);
if (ret)
@@ -421,7 +418,7 @@ static int ax_open(struct net_device *dev)
return 0;
failed_ax_ei_open:
- phy_disconnect(ax->phy_dev);
+ phy_disconnect(dev->phydev);
failed_mii_probe:
ax_phy_switch(dev, 0);
free_irq(dev->irq, dev);
@@ -442,7 +439,7 @@ static int ax_close(struct net_device *dev)
/* turn the phy off */
ax_phy_switch(dev, 0);
- phy_disconnect(ax->phy_dev);
+ phy_disconnect(dev->phydev);
free_irq(dev->irq, dev);
return 0;
@@ -450,8 +447,7 @@ static int ax_close(struct net_device *dev)
static int ax_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
{
- struct ax_device *ax = to_ax_dev(dev);
- struct phy_device *phy_dev = ax->phy_dev;
+ struct phy_device *phy_dev = dev->phydev;
if (!netif_running(dev))
return -EINVAL;
@@ -474,28 +470,6 @@ static void ax_get_drvinfo(struct net_device *dev,
strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
}
-static int ax_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct ax_device *ax = to_ax_dev(dev);
- struct phy_device *phy_dev = ax->phy_dev;
-
- if (!phy_dev)
- return -ENODEV;
-
- return phy_ethtool_gset(phy_dev, cmd);
-}
-
-static int ax_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct ax_device *ax = to_ax_dev(dev);
- struct phy_device *phy_dev = ax->phy_dev;
-
- if (!phy_dev)
- return -ENODEV;
-
- return phy_ethtool_sset(phy_dev, cmd);
-}
-
static u32 ax_get_msglevel(struct net_device *dev)
{
struct ei_device *ei_local = netdev_priv(dev);
@@ -512,12 +486,12 @@ static void ax_set_msglevel(struct net_device *dev, u32 v)
static const struct ethtool_ops ax_ethtool_ops = {
.get_drvinfo = ax_get_drvinfo,
- .get_settings = ax_get_settings,
- .set_settings = ax_set_settings,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
.get_msglevel = ax_get_msglevel,
.set_msglevel = ax_set_msglevel,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
#ifdef CONFIG_AX88796_93CX6
@@ -936,7 +910,8 @@ static int ax_probe(struct platform_device *pdev)
iounmap(ax->map2);
exit_mem2:
- release_mem_region(mem2->start, mem2_size);
+ if (mem2)
+ release_mem_region(mem2->start, mem2_size);
exit_mem1:
iounmap(ei_local->mem);
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index 3d2245fdc..38eaea18d 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -310,7 +310,7 @@ static int bfin_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
static void bfin_mac_adjust_link(struct net_device *dev)
{
struct bfin_mac_local *lp = netdev_priv(dev);
- struct phy_device *phydev = lp->phydev;
+ struct phy_device *phydev = dev->phydev;
unsigned long flags;
int new_state = 0;
@@ -430,7 +430,6 @@ static int mii_probe(struct net_device *dev, int phy_mode)
lp->old_link = 0;
lp->old_speed = 0;
lp->old_duplex = -1;
- lp->phydev = phydev;
phy_attached_print(phydev, "mdc_clk=%dHz(mdc_div=%d)@sclk=%dMHz)\n",
MDC_CLK, mdc_div, sclk / 1000000);
@@ -450,31 +449,6 @@ static irqreturn_t bfin_mac_wake_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int
-bfin_mac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct bfin_mac_local *lp = netdev_priv(dev);
-
- if (lp->phydev)
- return phy_ethtool_gset(lp->phydev, cmd);
-
- return -EINVAL;
-}
-
-static int
-bfin_mac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct bfin_mac_local *lp = netdev_priv(dev);
-
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- if (lp->phydev)
- return phy_ethtool_sset(lp->phydev, cmd);
-
- return -EINVAL;
-}
-
static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
@@ -552,8 +526,6 @@ static int bfin_mac_ethtool_get_ts_info(struct net_device *dev,
#endif
static const struct ethtool_ops bfin_mac_ethtool_ops = {
- .get_settings = bfin_mac_ethtool_getsettings,
- .set_settings = bfin_mac_ethtool_setsettings,
.get_link = ethtool_op_get_link,
.get_drvinfo = bfin_mac_ethtool_getdrvinfo,
.get_wol = bfin_mac_ethtool_getwol,
@@ -561,6 +533,8 @@ static const struct ethtool_ops bfin_mac_ethtool_ops = {
#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
.get_ts_info = bfin_mac_ethtool_get_ts_info,
#endif
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
/**************************************************************************/
@@ -1427,7 +1401,7 @@ static void bfin_mac_timeout(struct net_device *dev)
if (netif_queue_stopped(dev))
netif_wake_queue(dev);
- bfin_mac_enable(lp->phydev);
+ bfin_mac_enable(dev->phydev);
/* We can accept TX packets again */
netif_trans_update(dev); /* prevent tx timeout */
@@ -1491,8 +1465,6 @@ static void bfin_mac_set_multicast_list(struct net_device *dev)
static int bfin_mac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
- struct bfin_mac_local *lp = netdev_priv(netdev);
-
if (!netif_running(netdev))
return -EINVAL;
@@ -1502,8 +1474,8 @@ static int bfin_mac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
case SIOCGHWTSTAMP:
return bfin_mac_hwtstamp_get(netdev, ifr);
default:
- if (lp->phydev)
- return phy_mii_ioctl(lp->phydev, ifr, cmd);
+ if (netdev->phydev)
+ return phy_mii_ioctl(netdev->phydev, ifr, cmd);
else
return -EOPNOTSUPP;
}
@@ -1547,12 +1519,12 @@ static int bfin_mac_open(struct net_device *dev)
if (ret)
return ret;
- phy_start(lp->phydev);
+ phy_start(dev->phydev);
setup_system_regs(dev);
setup_mac_addr(dev->dev_addr);
bfin_mac_disable();
- ret = bfin_mac_enable(lp->phydev);
+ ret = bfin_mac_enable(dev->phydev);
if (ret)
return ret;
pr_debug("hardware init finished\n");
@@ -1578,8 +1550,8 @@ static int bfin_mac_close(struct net_device *dev)
napi_disable(&lp->napi);
netif_carrier_off(dev);
- phy_stop(lp->phydev);
- phy_write(lp->phydev, MII_BMCR, BMCR_PDOWN);
+ phy_stop(dev->phydev);
+ phy_write(dev->phydev, MII_BMCR, BMCR_PDOWN);
/* clear everything */
bfin_mac_shutdown(dev);
diff --git a/drivers/net/ethernet/adi/bfin_mac.h b/drivers/net/ethernet/adi/bfin_mac.h
index d1217db70..8c3b56198 100644
--- a/drivers/net/ethernet/adi/bfin_mac.h
+++ b/drivers/net/ethernet/adi/bfin_mac.h
@@ -92,7 +92,6 @@ struct bfin_mac_local {
int old_speed;
int old_duplex;
- struct phy_device *phydev;
struct mii_bus *mii_bus;
#if defined(CONFIG_BFIN_MAC_USE_HWSTAMP)
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 821d86c38..c83ebae73 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -440,7 +440,6 @@ struct et131x_adapter {
struct net_device *netdev;
struct pci_dev *pdev;
struct mii_bus *mii_bus;
- struct phy_device *phydev;
struct napi_struct napi;
/* Flags that indicate current state of the adapter */
@@ -864,7 +863,7 @@ static void et1310_config_mac_regs2(struct et131x_adapter *adapter)
{
int32_t delay = 0;
struct mac_regs __iomem *mac = &adapter->regs->mac;
- struct phy_device *phydev = adapter->phydev;
+ struct phy_device *phydev = adapter->netdev->phydev;
u32 cfg1;
u32 cfg2;
u32 ifctrl;
@@ -1035,7 +1034,7 @@ static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter)
static void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
{
struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
- struct phy_device *phydev = adapter->phydev;
+ struct phy_device *phydev = adapter->netdev->phydev;
u32 sa_lo;
u32 sa_hi = 0;
u32 pf_ctrl = 0;
@@ -1230,7 +1229,7 @@ out:
static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value)
{
- struct phy_device *phydev = adapter->phydev;
+ struct phy_device *phydev = adapter->netdev->phydev;
if (!phydev)
return -EIO;
@@ -1311,7 +1310,7 @@ static void et1310_phy_read_mii_bit(struct et131x_adapter *adapter,
static void et1310_config_flow_control(struct et131x_adapter *adapter)
{
- struct phy_device *phydev = adapter->phydev;
+ struct phy_device *phydev = adapter->netdev->phydev;
if (phydev->duplex == DUPLEX_HALF) {
adapter->flow = FLOW_NONE;
@@ -1456,7 +1455,7 @@ static int et131x_mdio_write(struct mii_bus *bus, int phy_addr,
static void et1310_phy_power_switch(struct et131x_adapter *adapter, bool down)
{
u16 data;
- struct phy_device *phydev = adapter->phydev;
+ struct phy_device *phydev = adapter->netdev->phydev;
et131x_mii_read(adapter, MII_BMCR, &data);
data &= ~BMCR_PDOWN;
@@ -1469,7 +1468,7 @@ static void et1310_phy_power_switch(struct et131x_adapter *adapter, bool down)
static void et131x_xcvr_init(struct et131x_adapter *adapter)
{
u16 lcr2;
- struct phy_device *phydev = adapter->phydev;
+ struct phy_device *phydev = adapter->netdev->phydev;
/* Set the LED behavior such that LED 1 indicates speed (off =
* 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates
@@ -2111,7 +2110,7 @@ static int et131x_init_recv(struct et131x_adapter *adapter)
/* et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate */
static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter)
{
- struct phy_device *phydev = adapter->phydev;
+ struct phy_device *phydev = adapter->netdev->phydev;
/* For version B silicon, we do not use the RxDMA timer for 10 and 100
* Mbits/s line rates. We do not enable and RxDMA interrupt coalescing.
@@ -2426,7 +2425,7 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
struct sk_buff *skb = tcb->skb;
u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
- struct phy_device *phydev = adapter->phydev;
+ struct phy_device *phydev = adapter->netdev->phydev;
dma_addr_t dma_addr;
struct tx_ring *tx_ring = &adapter->tx_ring;
@@ -2791,22 +2790,6 @@ static void et131x_handle_send_pkts(struct et131x_adapter *adapter)
spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
}
-static int et131x_get_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
-{
- struct et131x_adapter *adapter = netdev_priv(netdev);
-
- return phy_ethtool_gset(adapter->phydev, cmd);
-}
-
-static int et131x_set_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
-{
- struct et131x_adapter *adapter = netdev_priv(netdev);
-
- return phy_ethtool_sset(adapter->phydev, cmd);
-}
-
static int et131x_get_regs_len(struct net_device *netdev)
{
#define ET131X_REGS_LEN 256
@@ -2979,12 +2962,12 @@ static void et131x_get_drvinfo(struct net_device *netdev,
}
static struct ethtool_ops et131x_ethtool_ops = {
- .get_settings = et131x_get_settings,
- .set_settings = et131x_set_settings,
.get_drvinfo = et131x_get_drvinfo,
.get_regs_len = et131x_get_regs_len,
.get_regs = et131x_get_regs,
.get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
/* et131x_hwaddr_init - set up the MAC Address */
@@ -3098,7 +3081,7 @@ err_out:
static void et131x_error_timer_handler(unsigned long data)
{
struct et131x_adapter *adapter = (struct et131x_adapter *)data;
- struct phy_device *phydev = adapter->phydev;
+ struct phy_device *phydev = adapter->netdev->phydev;
if (et1310_in_phy_coma(adapter)) {
/* Bring the device immediately out of coma, to
@@ -3168,7 +3151,7 @@ static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
static void et131x_adjust_link(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
- struct phy_device *phydev = adapter->phydev;
+ struct phy_device *phydev = netdev->phydev;
if (!phydev)
return;
@@ -3287,7 +3270,6 @@ static int et131x_mii_probe(struct net_device *netdev)
phydev->advertising = phydev->supported;
phydev->autoneg = AUTONEG_ENABLE;
- adapter->phydev = phydev;
phy_attached_info(phydev);
@@ -3323,7 +3305,7 @@ static void et131x_pci_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
netif_napi_del(&adapter->napi);
- phy_disconnect(adapter->phydev);
+ phy_disconnect(netdev->phydev);
mdiobus_unregister(adapter->mii_bus);
mdiobus_free(adapter->mii_bus);
@@ -3338,20 +3320,16 @@ static void et131x_pci_remove(struct pci_dev *pdev)
static void et131x_up(struct net_device *netdev)
{
- struct et131x_adapter *adapter = netdev_priv(netdev);
-
et131x_enable_txrx(netdev);
- phy_start(adapter->phydev);
+ phy_start(netdev->phydev);
}
static void et131x_down(struct net_device *netdev)
{
- struct et131x_adapter *adapter = netdev_priv(netdev);
-
/* Save the timestamp for the TX watchdog, prevent a timeout */
netif_trans_update(netdev);
- phy_stop(adapter->phydev);
+ phy_stop(netdev->phydev);
et131x_disable_txrx(netdev);
}
@@ -3684,12 +3662,10 @@ static int et131x_close(struct net_device *netdev)
static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf,
int cmd)
{
- struct et131x_adapter *adapter = netdev_priv(netdev);
-
- if (!adapter->phydev)
+ if (!netdev->phydev)
return -EINVAL;
- return phy_mii_ioctl(adapter->phydev, reqbuf, cmd);
+ return phy_mii_ioctl(netdev->phydev, reqbuf, cmd);
}
/* et131x_set_packet_filter - Configures the Rx Packet filtering */
@@ -4073,7 +4049,7 @@ out:
return rc;
err_phy_disconnect:
- phy_disconnect(adapter->phydev);
+ phy_disconnect(netdev->phydev);
err_mdio_unregister:
mdiobus_unregister(adapter->mii_bus);
err_mdio_free:
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index de2c4bf5f..6ffdff68b 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -77,7 +77,6 @@ struct emac_board_info {
int emacrx_completed_flag;
- struct phy_device *phy_dev;
struct device_node *phy_node;
unsigned int link;
unsigned int speed;
@@ -115,7 +114,7 @@ static void emac_update_duplex(struct net_device *dev)
static void emac_handle_link_change(struct net_device *dev)
{
struct emac_board_info *db = netdev_priv(dev);
- struct phy_device *phydev = db->phy_dev;
+ struct phy_device *phydev = dev->phydev;
unsigned long flags;
int status_change = 0;
@@ -154,21 +153,22 @@ static void emac_handle_link_change(struct net_device *dev)
static int emac_mdio_probe(struct net_device *dev)
{
struct emac_board_info *db = netdev_priv(dev);
+ struct phy_device *phydev;
/* to-do: PHY interrupts are currently not supported */
/* attach the mac to the phy */
- db->phy_dev = of_phy_connect(db->ndev, db->phy_node,
- &emac_handle_link_change, 0,
- db->phy_interface);
- if (!db->phy_dev) {
+ phydev = of_phy_connect(db->ndev, db->phy_node,
+ &emac_handle_link_change, 0,
+ db->phy_interface);
+ if (!phydev) {
netdev_err(db->ndev, "could not find the PHY\n");
return -ENODEV;
}
/* mask with MAC supported features */
- db->phy_dev->supported &= PHY_BASIC_FEATURES;
- db->phy_dev->advertising = db->phy_dev->supported;
+ phydev->supported &= PHY_BASIC_FEATURES;
+ phydev->advertising = phydev->supported;
db->link = 0;
db->speed = 0;
@@ -179,10 +179,7 @@ static int emac_mdio_probe(struct net_device *dev)
static void emac_mdio_remove(struct net_device *dev)
{
- struct emac_board_info *db = netdev_priv(dev);
-
- phy_disconnect(db->phy_dev);
- db->phy_dev = NULL;
+ phy_disconnect(dev->phydev);
}
static void emac_reset(struct emac_board_info *db)
@@ -208,8 +205,7 @@ static void emac_inblk_32bit(void __iomem *reg, void *data, int count)
static int emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct emac_board_info *dm = netdev_priv(dev);
- struct phy_device *phydev = dm->phy_dev;
+ struct phy_device *phydev = dev->phydev;
if (!netif_running(dev))
return -EINVAL;
@@ -229,33 +225,11 @@ static void emac_get_drvinfo(struct net_device *dev,
strlcpy(info->bus_info, dev_name(&dev->dev), sizeof(info->bus_info));
}
-static int emac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct emac_board_info *dm = netdev_priv(dev);
- struct phy_device *phydev = dm->phy_dev;
-
- if (!phydev)
- return -ENODEV;
-
- return phy_ethtool_gset(phydev, cmd);
-}
-
-static int emac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct emac_board_info *dm = netdev_priv(dev);
- struct phy_device *phydev = dm->phy_dev;
-
- if (!phydev)
- return -ENODEV;
-
- return phy_ethtool_sset(phydev, cmd);
-}
-
static const struct ethtool_ops emac_ethtool_ops = {
.get_drvinfo = emac_get_drvinfo,
- .get_settings = emac_get_settings,
- .set_settings = emac_set_settings,
.get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static unsigned int emac_setup(struct net_device *ndev)
@@ -744,7 +718,7 @@ static int emac_open(struct net_device *dev)
return ret;
}
- phy_start(db->phy_dev);
+ phy_start(dev->phydev);
netif_start_queue(dev);
return 0;
@@ -781,7 +755,7 @@ static int emac_stop(struct net_device *ndev)
netif_stop_queue(ndev);
netif_carrier_off(ndev);
- phy_stop(db->phy_dev);
+ phy_stop(ndev->phydev);
emac_mdio_remove(ndev);
diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h
index 103c30ddd..e0052003d 100644
--- a/drivers/net/ethernet/altera/altera_tse.h
+++ b/drivers/net/ethernet/altera/altera_tse.h
@@ -473,7 +473,6 @@ struct altera_tse_private {
int phy_addr; /* PHY's MDIO address, -1 for autodetection */
phy_interface_t phy_iface;
struct mii_bus *mdio;
- struct phy_device *phydev;
int oldspeed;
int oldduplex;
int oldlink;
diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c
index be72e1e64..7c367713c 100644
--- a/drivers/net/ethernet/altera/altera_tse_ethtool.c
+++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c
@@ -233,40 +233,18 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
buf[i] = csrrd32(priv->mac_dev, i * 4);
}
-static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct altera_tse_private *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phydev;
-
- if (phydev == NULL)
- return -ENODEV;
-
- return phy_ethtool_gset(phydev, cmd);
-}
-
-static int tse_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct altera_tse_private *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phydev;
-
- if (phydev == NULL)
- return -ENODEV;
-
- return phy_ethtool_sset(phydev, cmd);
-}
-
static const struct ethtool_ops tse_ethtool_ops = {
.get_drvinfo = tse_get_drvinfo,
.get_regs_len = tse_reglen,
.get_regs = tse_get_regs,
.get_link = ethtool_op_get_link,
- .get_settings = tse_get_settings,
- .set_settings = tse_set_settings,
.get_strings = tse_gstrings,
.get_sset_count = tse_sset_count,
.get_ethtool_stats = tse_fill_stats,
.get_msglevel = tse_get_msglevel,
.set_msglevel = tse_set_msglevel,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
void altera_tse_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index f749e4d38..bda31f308 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -625,7 +625,7 @@ out:
static void altera_tse_adjust_link(struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phydev;
+ struct phy_device *phydev = dev->phydev;
int new_state = 0;
/* only change config if there is a link */
@@ -815,6 +815,7 @@ static int init_phy(struct net_device *dev)
phydev = of_phy_connect(dev, phynode,
&altera_tse_adjust_link, 0, priv->phy_iface);
}
+ of_node_put(phynode);
if (!phydev) {
netdev_err(dev, "Could not find the PHY\n");
@@ -845,7 +846,6 @@ static int init_phy(struct net_device *dev)
netdev_dbg(dev, "attached to PHY %d UID 0x%08x Link = %d\n",
phydev->mdio.addr, phydev->phy_id, phydev->link);
- priv->phydev = phydev;
return 0;
}
@@ -1172,8 +1172,8 @@ static int tse_open(struct net_device *dev)
spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
- if (priv->phydev)
- phy_start(priv->phydev);
+ if (dev->phydev)
+ phy_start(dev->phydev);
napi_enable(&priv->napi);
netif_start_queue(dev);
@@ -1205,8 +1205,8 @@ static int tse_shutdown(struct net_device *dev)
unsigned long int flags;
/* Stop the PHY */
- if (priv->phydev)
- phy_stop(priv->phydev);
+ if (dev->phydev)
+ phy_stop(dev->phydev);
netif_stop_queue(dev);
napi_disable(&priv->napi);
@@ -1545,10 +1545,9 @@ err_free_netdev:
static int altera_tse_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
- struct altera_tse_private *priv = netdev_priv(ndev);
- if (priv->phydev)
- phy_disconnect(priv->phydev);
+ if (ndev->phydev)
+ phy_disconnect(ndev->phydev);
platform_set_drvdata(pdev, NULL);
altera_tse_mdio_destroy(ndev);
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 20760e102..df664187c 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -412,13 +412,13 @@ static void
au1000_adjust_link(struct net_device *dev)
{
struct au1000_private *aup = netdev_priv(dev);
- struct phy_device *phydev = aup->phy_dev;
+ struct phy_device *phydev = dev->phydev;
unsigned long flags;
u32 reg;
int status_change = 0;
- BUG_ON(!aup->phy_dev);
+ BUG_ON(!phydev);
spin_lock_irqsave(&aup->lock, flags);
@@ -579,7 +579,6 @@ static int au1000_mii_probe(struct net_device *dev)
aup->old_link = 0;
aup->old_speed = 0;
aup->old_duplex = -1;
- aup->phy_dev = phydev;
phy_attached_info(phydev);
@@ -678,29 +677,6 @@ au1000_setup_hw_rings(struct au1000_private *aup, void __iomem *tx_base)
* ethtool operations
*/
-static int au1000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct au1000_private *aup = netdev_priv(dev);
-
- if (aup->phy_dev)
- return phy_ethtool_gset(aup->phy_dev, cmd);
-
- return -EINVAL;
-}
-
-static int au1000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct au1000_private *aup = netdev_priv(dev);
-
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- if (aup->phy_dev)
- return phy_ethtool_sset(aup->phy_dev, cmd);
-
- return -EINVAL;
-}
-
static void
au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
@@ -725,12 +701,12 @@ static u32 au1000_get_msglevel(struct net_device *dev)
}
static const struct ethtool_ops au1000_ethtool_ops = {
- .get_settings = au1000_get_settings,
- .set_settings = au1000_set_settings,
.get_drvinfo = au1000_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_msglevel = au1000_get_msglevel,
.set_msglevel = au1000_set_msglevel,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
@@ -778,8 +754,8 @@ static int au1000_init(struct net_device *dev)
#ifndef CONFIG_CPU_LITTLE_ENDIAN
control |= MAC_BIG_ENDIAN;
#endif
- if (aup->phy_dev) {
- if (aup->phy_dev->link && (DUPLEX_FULL == aup->phy_dev->duplex))
+ if (dev->phydev) {
+ if (dev->phydev->link && (DUPLEX_FULL == dev->phydev->duplex))
control |= MAC_FULL_DUPLEX;
else
control |= MAC_DISABLE_RX_OWN;
@@ -891,11 +867,10 @@ static int au1000_rx(struct net_device *dev)
static void au1000_update_tx_stats(struct net_device *dev, u32 status)
{
- struct au1000_private *aup = netdev_priv(dev);
struct net_device_stats *ps = &dev->stats;
if (status & TX_FRAME_ABORTED) {
- if (!aup->phy_dev || (DUPLEX_FULL == aup->phy_dev->duplex)) {
+ if (!dev->phydev || (DUPLEX_FULL == dev->phydev->duplex)) {
if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) {
/* any other tx errors are only valid
* in half duplex mode
@@ -975,10 +950,10 @@ static int au1000_open(struct net_device *dev)
return retval;
}
- if (aup->phy_dev) {
+ if (dev->phydev) {
/* cause the PHY state machine to schedule a link state check */
- aup->phy_dev->state = PHY_CHANGELINK;
- phy_start(aup->phy_dev);
+ dev->phydev->state = PHY_CHANGELINK;
+ phy_start(dev->phydev);
}
netif_start_queue(dev);
@@ -995,8 +970,8 @@ static int au1000_close(struct net_device *dev)
netif_dbg(aup, drv, dev, "close: dev=%p\n", dev);
- if (aup->phy_dev)
- phy_stop(aup->phy_dev);
+ if (dev->phydev)
+ phy_stop(dev->phydev);
spin_lock_irqsave(&aup->lock, flags);
@@ -1110,15 +1085,13 @@ static void au1000_multicast_list(struct net_device *dev)
static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct au1000_private *aup = netdev_priv(dev);
-
if (!netif_running(dev))
return -EINVAL;
- if (!aup->phy_dev)
+ if (!dev->phydev)
return -EINVAL; /* PHY not controllable */
- return phy_mii_ioctl(aup->phy_dev, rq, cmd);
+ return phy_mii_ioctl(dev->phydev, rq, cmd);
}
static const struct net_device_ops au1000_netdev_ops = {
diff --git a/drivers/net/ethernet/amd/au1000_eth.h b/drivers/net/ethernet/amd/au1000_eth.h
index ca53024f0..4c47c2377 100644
--- a/drivers/net/ethernet/amd/au1000_eth.h
+++ b/drivers/net/ethernet/amd/au1000_eth.h
@@ -106,7 +106,6 @@ struct au1000_private {
int old_speed;
int old_duplex;
- struct phy_device *phy_dev;
struct mii_bus *mii_bus;
/* PHY configuration */
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index ebf9224b2..a9b270956 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -154,7 +154,7 @@ static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
goto err_rx_ring;
for (i = 0, channel = channel_mem; i < count; i++, channel++) {
- snprintf(channel->name, sizeof(channel->name), "channel-%d", i);
+ snprintf(channel->name, sizeof(channel->name), "channel-%u", i);
channel->pdata = pdata;
channel->queue_index = i;
channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
diff --git a/drivers/net/ethernet/apm/xgene/Kconfig b/drivers/net/ethernet/apm/xgene/Kconfig
index 19e38afbc..300e3b5c5 100644
--- a/drivers/net/ethernet/apm/xgene/Kconfig
+++ b/drivers/net/ethernet/apm/xgene/Kconfig
@@ -3,6 +3,7 @@ config NET_XGENE
depends on HAS_DMA
depends on ARCH_XGENE || COMPILE_TEST
select PHYLIB
+ select MDIO_XGENE
help
This is the Ethernet driver for the on-chip ethernet interface on the
APM X-Gene SoC.
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
index 416d6ebfc..22a7b26ca 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
@@ -65,8 +65,15 @@ static int xgene_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
return phy_ethtool_gset(phydev, cmd);
} else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
- cmd->supported = SUPPORTED_1000baseT_Full |
- SUPPORTED_Autoneg | SUPPORTED_MII;
+ if (pdata->mdio_driver) {
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_ethtool_gset(phydev, cmd);
+ }
+
+ cmd->supported = SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
+ SUPPORTED_MII;
cmd->advertising = cmd->supported;
ethtool_cmd_speed_set(cmd, SPEED_1000);
cmd->duplex = DUPLEX_FULL;
@@ -92,12 +99,21 @@ static int xgene_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
struct phy_device *phydev = pdata->phy_dev;
if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
- if (phydev == NULL)
+ if (!phydev)
return -ENODEV;
return phy_ethtool_sset(phydev, cmd);
}
+ if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
+ if (pdata->mdio_driver) {
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_ethtool_sset(phydev, cmd);
+ }
+ }
+
return -EINVAL;
}
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 2f5638f7f..18bb9556d 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -381,59 +381,6 @@ static void xgene_enet_rd_mcx_mac(struct xgene_enet_pdata *pdata,
rd_addr);
}
-static int xgene_mii_phy_write(struct xgene_enet_pdata *pdata, int phy_id,
- u32 reg, u16 data)
-{
- u32 addr = 0, wr_data = 0;
- u32 done;
- u8 wait = 10;
-
- PHY_ADDR_SET(&addr, phy_id);
- REG_ADDR_SET(&addr, reg);
- xgene_enet_wr_mcx_mac(pdata, MII_MGMT_ADDRESS_ADDR, addr);
-
- PHY_CONTROL_SET(&wr_data, data);
- xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONTROL_ADDR, wr_data);
- do {
- usleep_range(5, 10);
- xgene_enet_rd_mcx_mac(pdata, MII_MGMT_INDICATORS_ADDR, &done);
- } while ((done & BUSY_MASK) && wait--);
-
- if (done & BUSY_MASK) {
- netdev_err(pdata->ndev, "MII_MGMT write failed\n");
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int xgene_mii_phy_read(struct xgene_enet_pdata *pdata,
- u8 phy_id, u32 reg)
-{
- u32 addr = 0;
- u32 data, done;
- u8 wait = 10;
-
- PHY_ADDR_SET(&addr, phy_id);
- REG_ADDR_SET(&addr, reg);
- xgene_enet_wr_mcx_mac(pdata, MII_MGMT_ADDRESS_ADDR, addr);
- xgene_enet_wr_mcx_mac(pdata, MII_MGMT_COMMAND_ADDR, READ_CYCLE_MASK);
- do {
- usleep_range(5, 10);
- xgene_enet_rd_mcx_mac(pdata, MII_MGMT_INDICATORS_ADDR, &done);
- } while ((done & BUSY_MASK) && wait--);
-
- if (done & BUSY_MASK) {
- netdev_err(pdata->ndev, "MII_MGMT read failed\n");
- return -EBUSY;
- }
-
- xgene_enet_rd_mcx_mac(pdata, MII_MGMT_STATUS_ADDR, &data);
- xgene_enet_wr_mcx_mac(pdata, MII_MGMT_COMMAND_ADDR, 0);
-
- return data;
-}
-
static void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata)
{
u32 addr0, addr1;
@@ -512,14 +459,11 @@ static void xgene_enet_configure_clock(struct xgene_enet_pdata *pdata)
#endif
}
-static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
+static void xgene_gmac_set_speed(struct xgene_enet_pdata *pdata)
{
struct device *dev = &pdata->pdev->dev;
- u32 value, mc2;
- u32 intf_ctl, rgmii;
- u32 icm0, icm2;
-
- xgene_gmac_reset(pdata);
+ u32 icm0, icm2, mc2;
+ u32 intf_ctl, rgmii, value;
xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, &icm0);
xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, &icm2);
@@ -564,7 +508,21 @@ static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
mc2 |= FULL_DUPLEX2 | PAD_CRC;
xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_2_ADDR, mc2);
xgene_enet_wr_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, intf_ctl);
+ xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii);
+ xgene_enet_configure_clock(pdata);
+
+ xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, icm0);
+ xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, icm2);
+}
+
+static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
+{
+ u32 value;
+ if (!pdata->mdio_driver)
+ xgene_gmac_reset(pdata);
+
+ xgene_gmac_set_speed(pdata);
xgene_gmac_set_mac_addr(pdata);
/* Adjust MDC clock frequency */
@@ -579,15 +537,10 @@ static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
/* Rtype should be copied from FP */
xgene_enet_wr_csr(pdata, RSIF_RAM_DBG_REG0_ADDR, 0);
- xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii);
- xgene_enet_configure_clock(pdata);
/* Rx-Tx traffic resume */
xgene_enet_wr_csr(pdata, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0);
- xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, icm0);
- xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, icm2);
-
xgene_enet_rd_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, &value);
value &= ~TX_DV_GATE_EN0;
value &= ~RX_DV_GATE_EN0;
@@ -671,93 +624,155 @@ bool xgene_ring_mgr_init(struct xgene_enet_pdata *p)
static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
{
- u32 val;
+ struct device *dev = &pdata->pdev->dev;
if (!xgene_ring_mgr_init(pdata))
return -ENODEV;
- if (!IS_ERR(pdata->clk)) {
+ if (pdata->mdio_driver) {
+ xgene_enet_config_ring_if_assoc(pdata);
+ return 0;
+ }
+
+ if (dev->of_node) {
clk_prepare_enable(pdata->clk);
+ udelay(5);
clk_disable_unprepare(pdata->clk);
+ udelay(5);
clk_prepare_enable(pdata->clk);
- xgene_enet_ecc_init(pdata);
+ udelay(5);
+ } else {
+#ifdef CONFIG_ACPI
+ if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev), "_RST")) {
+ acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
+ "_RST", NULL, NULL);
+ } else if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev),
+ "_INI")) {
+ acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
+ "_INI", NULL, NULL);
+ }
+#endif
}
- xgene_enet_config_ring_if_assoc(pdata);
- /* Enable auto-incr for scanning */
- xgene_enet_rd_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, &val);
- val |= SCAN_AUTO_INCR;
- MGMT_CLOCK_SEL_SET(&val, 1);
- xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, val);
+ xgene_enet_ecc_init(pdata);
+ xgene_enet_config_ring_if_assoc(pdata);
return 0;
}
-static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata)
+static void xgene_enet_clear(struct xgene_enet_pdata *pdata,
+ struct xgene_enet_desc_ring *ring)
{
- if (!IS_ERR(pdata->clk))
- clk_disable_unprepare(pdata->clk);
-}
+ u32 addr, val, data;
-static int xgene_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
-{
- struct xgene_enet_pdata *pdata = bus->priv;
- u32 val;
+ val = xgene_enet_ring_bufnum(ring->id);
- val = xgene_mii_phy_read(pdata, mii_id, regnum);
- netdev_dbg(pdata->ndev, "mdio_rd: bus=%d reg=%d val=%x\n",
- mii_id, regnum, val);
+ if (xgene_enet_is_bufpool(ring->id)) {
+ addr = ENET_CFGSSQMIFPRESET_ADDR;
+ data = BIT(val - 0x20);
+ } else {
+ addr = ENET_CFGSSQMIWQRESET_ADDR;
+ data = BIT(val);
+ }
- return val;
+ xgene_enet_wr_ring_if(pdata, addr, data);
}
-static int xgene_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
- u16 val)
+static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata)
{
- struct xgene_enet_pdata *pdata = bus->priv;
+ struct device *dev = &pdata->pdev->dev;
+ struct xgene_enet_desc_ring *ring;
+ u32 pb, val;
+ int i;
+
+ pb = 0;
+ for (i = 0; i < pdata->rxq_cnt; i++) {
+ ring = pdata->rx_ring[i]->buf_pool;
+
+ val = xgene_enet_ring_bufnum(ring->id);
+ pb |= BIT(val - 0x20);
+ }
+ xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPRESET_ADDR, pb);
+
+ pb = 0;
+ for (i = 0; i < pdata->txq_cnt; i++) {
+ ring = pdata->tx_ring[i];
+
+ val = xgene_enet_ring_bufnum(ring->id);
+ pb |= BIT(val);
+ }
+ xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQRESET_ADDR, pb);
- netdev_dbg(pdata->ndev, "mdio_wr: bus=%d reg=%d val=%x\n",
- mii_id, regnum, val);
- return xgene_mii_phy_write(pdata, mii_id, regnum, val);
+ if (dev->of_node) {
+ if (!IS_ERR(pdata->clk))
+ clk_disable_unprepare(pdata->clk);
+ }
}
static void xgene_enet_adjust_link(struct net_device *ndev)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+ const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
struct phy_device *phydev = pdata->phy_dev;
if (phydev->link) {
if (pdata->phy_speed != phydev->speed) {
pdata->phy_speed = phydev->speed;
- xgene_gmac_init(pdata);
- xgene_gmac_rx_enable(pdata);
- xgene_gmac_tx_enable(pdata);
+ mac_ops->set_speed(pdata);
+ mac_ops->rx_enable(pdata);
+ mac_ops->tx_enable(pdata);
phy_print_status(phydev);
}
} else {
- xgene_gmac_rx_disable(pdata);
- xgene_gmac_tx_disable(pdata);
+ mac_ops->rx_disable(pdata);
+ mac_ops->tx_disable(pdata);
pdata->phy_speed = SPEED_UNKNOWN;
phy_print_status(phydev);
}
}
-static int xgene_enet_phy_connect(struct net_device *ndev)
+#ifdef CONFIG_ACPI
+static struct acpi_device *acpi_phy_find_device(struct device *dev)
+{
+ struct acpi_reference_args args;
+ struct fwnode_handle *fw_node;
+ int status;
+
+ fw_node = acpi_fwnode_handle(ACPI_COMPANION(dev));
+ status = acpi_node_get_property_reference(fw_node, "phy-handle", 0,
+ &args);
+ if (ACPI_FAILURE(status)) {
+ dev_dbg(dev, "No matching phy in ACPI table\n");
+ return NULL;
+ }
+
+ return args.adev;
+}
+#endif
+
+int xgene_enet_phy_connect(struct net_device *ndev)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
- struct device_node *phy_np;
+ struct device_node *np;
struct phy_device *phy_dev;
struct device *dev = &pdata->pdev->dev;
+ int i;
if (dev->of_node) {
- phy_np = of_parse_phandle(dev->of_node, "phy-handle", 0);
- if (!phy_np) {
+ for (i = 0 ; i < 2; i++) {
+ np = of_parse_phandle(dev->of_node, "phy-handle", i);
+ if (np)
+ break;
+ }
+
+ if (!np) {
netdev_dbg(ndev, "No phy-handle found in DT\n");
return -ENODEV;
}
- phy_dev = of_phy_connect(ndev, phy_np, &xgene_enet_adjust_link,
+ phy_dev = of_phy_connect(ndev, np, &xgene_enet_adjust_link,
0, pdata->phy_mode);
+ of_node_put(np);
if (!phy_dev) {
netdev_err(ndev, "Could not connect to PHY\n");
return -ENODEV;
@@ -765,6 +780,11 @@ static int xgene_enet_phy_connect(struct net_device *ndev)
pdata->phy_dev = phy_dev;
} else {
+#ifdef CONFIG_ACPI
+ struct acpi_device *adev = acpi_phy_find_device(dev);
+ if (adev)
+ pdata->phy_dev = adev->driver_data;
+
phy_dev = pdata->phy_dev;
if (!phy_dev ||
@@ -773,6 +793,9 @@ static int xgene_enet_phy_connect(struct net_device *ndev)
netdev_err(ndev, "Could not connect to PHY\n");
return -ENODEV;
}
+#else
+ return -ENODEV;
+#endif
}
pdata->phy_speed = SPEED_UNKNOWN;
@@ -792,8 +815,8 @@ static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata,
struct phy_device *phy;
struct device_node *child_np;
struct device_node *mdio_np = NULL;
+ u32 phy_addr;
int ret;
- u32 phy_id;
if (dev->of_node) {
for_each_child_of_node(dev->of_node, child_np) {
@@ -820,21 +843,17 @@ static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata,
if (ret)
return ret;
- ret = device_property_read_u32(dev, "phy-channel", &phy_id);
+ ret = device_property_read_u32(dev, "phy-channel", &phy_addr);
if (ret)
- ret = device_property_read_u32(dev, "phy-addr", &phy_id);
+ ret = device_property_read_u32(dev, "phy-addr", &phy_addr);
if (ret)
return -EINVAL;
- phy = get_phy_device(mdio, phy_id, false);
- if (IS_ERR(phy))
+ phy = xgene_enet_phy_register(mdio, phy_addr);
+ if (!phy)
return -EIO;
- ret = phy_device_register(phy);
- if (ret)
- phy_device_free(phy);
- else
- pdata->phy_dev = phy;
+ pdata->phy_dev = phy;
return ret;
}
@@ -850,13 +869,13 @@ int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
return -ENOMEM;
mdio_bus->name = "APM X-Gene MDIO bus";
- mdio_bus->read = xgene_enet_mdio_read;
- mdio_bus->write = xgene_enet_mdio_write;
+ mdio_bus->read = xgene_mdio_rgmii_read;
+ mdio_bus->write = xgene_mdio_rgmii_write;
snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%s", "xgene-mii",
ndev->name);
- mdio_bus->priv = pdata;
- mdio_bus->parent = &ndev->dev;
+ mdio_bus->priv = (void __force *)pdata->mcx_mac_addr;
+ mdio_bus->parent = &pdata->pdev->dev;
ret = xgene_mdiobus_register(pdata, mdio_bus);
if (ret) {
@@ -873,6 +892,12 @@ int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
return ret;
}
+void xgene_enet_phy_disconnect(struct xgene_enet_pdata *pdata)
+{
+ if (pdata->phy_dev)
+ phy_disconnect(pdata->phy_dev);
+}
+
void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata)
{
if (pdata->phy_dev)
@@ -890,11 +915,13 @@ const struct xgene_mac_ops xgene_gmac_ops = {
.tx_enable = xgene_gmac_tx_enable,
.rx_disable = xgene_gmac_rx_disable,
.tx_disable = xgene_gmac_tx_disable,
+ .set_speed = xgene_gmac_set_speed,
.set_mac_addr = xgene_gmac_set_mac_addr,
};
const struct xgene_port_ops xgene_gport_ops = {
.reset = xgene_enet_reset,
+ .clear = xgene_enet_clear,
.cle_bypass = xgene_enet_cle_bypass,
.shutdown = xgene_gport_shutdown,
};
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index 45220be31..179a44dce 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -104,6 +104,8 @@ enum xgene_enet_rm {
#define RECOMBBUF BIT(27)
#define MAC_OFFSET 0x30
+#define OFFSET_4 0x04
+#define OFFSET_8 0x08
#define BLOCK_ETH_CSR_OFFSET 0x2000
#define BLOCK_ETH_CLE_CSR_OFFSET 0x6000
@@ -165,6 +167,8 @@ enum xgene_enet_rm {
#define TX_DV_GATE_EN0 BIT(2)
#define RX_DV_GATE_EN0 BIT(1)
#define RESUME_RX0 BIT(0)
+#define ENET_CFGSSQMIFPRESET_ADDR 0x14
+#define ENET_CFGSSQMIWQRESET_ADDR 0x1c
#define ENET_CFGSSQMIWQASSOC_ADDR 0xe0
#define ENET_CFGSSQMIFPQASSOC_ADDR 0xdc
#define ENET_CFGSSQMIQMLITEFPQASSOC_ADDR 0xf0
@@ -297,11 +301,6 @@ enum xgene_enet_ring_bufnum {
RING_BUFNUM_INVALID
};
-enum xgene_enet_cmd {
- XGENE_ENET_WR_CMD = BIT(31),
- XGENE_ENET_RD_CMD = BIT(30)
-};
-
enum xgene_enet_err_code {
HBF_READ_DATA = 3,
HBF_LL_READ = 4,
@@ -347,6 +346,8 @@ void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata);
void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata);
bool xgene_ring_mgr_init(struct xgene_enet_pdata *p);
+int xgene_enet_phy_connect(struct net_device *ndev);
+void xgene_enet_phy_disconnect(struct xgene_enet_pdata *pdata);
extern const struct xgene_mac_ops xgene_gmac_ops;
extern const struct xgene_port_ops xgene_gport_ops;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index d208b172f..d1d6b5eeb 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -102,25 +102,13 @@ static u8 xgene_enet_hdr_len(const void *data)
static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
{
- struct xgene_enet_pdata *pdata = netdev_priv(buf_pool->ndev);
- struct xgene_enet_raw_desc16 *raw_desc;
- u32 slots = buf_pool->slots - 1;
- u32 tail = buf_pool->tail;
- u32 userinfo;
- int i, len;
-
- len = pdata->ring_ops->len(buf_pool);
- for (i = 0; i < len; i++) {
- tail = (tail - 1) & slots;
- raw_desc = &buf_pool->raw_desc16[tail];
+ int i;
- /* Hardware stores descriptor in little endian format */
- userinfo = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
- dev_kfree_skb_any(buf_pool->rx_skb[userinfo]);
+ /* Free up the buffers held by hardware */
+ for (i = 0; i < buf_pool->slots; i++) {
+ if (buf_pool->rx_skb[i])
+ dev_kfree_skb_any(buf_pool->rx_skb[i]);
}
-
- pdata->ring_ops->wr_cmd(buf_pool, -len);
- buf_pool->tail = tail;
}
static irqreturn_t xgene_enet_rx_irq(const int irq, void *data)
@@ -481,6 +469,7 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
XGENE_ENET_MAX_MTU, DMA_FROM_DEVICE);
skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
skb = buf_pool->rx_skb[skb_index];
+ buf_pool->rx_skb[skb_index] = NULL;
/* checking for error */
status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) ||
@@ -619,6 +608,30 @@ static void xgene_enet_timeout(struct net_device *ndev)
}
}
+static void xgene_enet_set_irq_name(struct net_device *ndev)
+{
+ struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+ struct xgene_enet_desc_ring *ring;
+ int i;
+
+ for (i = 0; i < pdata->rxq_cnt; i++) {
+ ring = pdata->rx_ring[i];
+ if (!pdata->cq_cnt) {
+ snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc",
+ ndev->name);
+ } else {
+ snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-%d",
+ ndev->name, i);
+ }
+ }
+
+ for (i = 0; i < pdata->cq_cnt; i++) {
+ ring = pdata->tx_ring[i]->cp_ring;
+ snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-txc-%d",
+ ndev->name, i);
+ }
+}
+
static int xgene_enet_register_irq(struct net_device *ndev)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
@@ -626,6 +639,7 @@ static int xgene_enet_register_irq(struct net_device *ndev)
struct xgene_enet_desc_ring *ring;
int ret = 0, i;
+ xgene_enet_set_irq_name(ndev);
for (i = 0; i < pdata->rxq_cnt; i++) {
ring = pdata->rx_ring[i];
irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
@@ -720,20 +734,21 @@ static int xgene_enet_open(struct net_device *ndev)
if (ret)
return ret;
- mac_ops->tx_enable(pdata);
- mac_ops->rx_enable(pdata);
-
xgene_enet_napi_enable(pdata);
ret = xgene_enet_register_irq(ndev);
if (ret)
return ret;
- if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
+ if (pdata->phy_dev) {
phy_start(pdata->phy_dev);
- else
+ } else {
schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
+ netif_carrier_off(ndev);
+ }
- netif_start_queue(ndev);
+ mac_ops->tx_enable(pdata);
+ mac_ops->rx_enable(pdata);
+ netif_tx_start_all_queues(ndev);
return ret;
}
@@ -744,16 +759,15 @@ static int xgene_enet_close(struct net_device *ndev)
const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
int i;
- netif_stop_queue(ndev);
+ netif_tx_stop_all_queues(ndev);
+ mac_ops->tx_disable(pdata);
+ mac_ops->rx_disable(pdata);
- if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
+ if (pdata->phy_dev)
phy_stop(pdata->phy_dev);
else
cancel_delayed_work_sync(&pdata->link_work);
- mac_ops->tx_disable(pdata);
- mac_ops->rx_disable(pdata);
-
xgene_enet_free_irq(ndev);
xgene_enet_napi_disable(pdata);
for (i = 0; i < pdata->rxq_cnt; i++)
@@ -761,7 +775,6 @@ static int xgene_enet_close(struct net_device *ndev)
return 0;
}
-
static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
{
struct xgene_enet_pdata *pdata;
@@ -771,7 +784,7 @@ static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
dev = ndev_to_dev(ring->ndev);
pdata->ring_ops->clear(ring);
- dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
+ dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
}
static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
@@ -784,6 +797,9 @@ static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
ring = pdata->tx_ring[i];
if (ring) {
xgene_enet_delete_ring(ring);
+ pdata->port_ops->clear(pdata, ring);
+ if (pdata->cq_cnt)
+ xgene_enet_delete_ring(ring->cp_ring);
pdata->tx_ring[i] = NULL;
}
}
@@ -794,6 +810,7 @@ static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
buf_pool = ring->buf_pool;
xgene_enet_delete_bufpool(buf_pool);
xgene_enet_delete_ring(buf_pool);
+ pdata->port_ops->clear(pdata, buf_pool);
xgene_enet_delete_ring(ring);
pdata->rx_ring[i] = NULL;
}
@@ -842,7 +859,7 @@ static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring)
if (ring->desc_addr) {
pdata->ring_ops->clear(ring);
- dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
+ dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
}
devm_kfree(dev, ring);
}
@@ -900,9 +917,10 @@ static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
struct net_device *ndev, u32 ring_num,
enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id)
{
- struct xgene_enet_desc_ring *ring;
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
struct device *dev = ndev_to_dev(ndev);
+ struct xgene_enet_desc_ring *ring;
+ void *irq_mbox_addr;
int size;
size = xgene_enet_get_ring_size(dev, cfgsize);
@@ -919,8 +937,8 @@ static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
ring->cfgsize = cfgsize;
ring->id = ring_id;
- ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma,
- GFP_KERNEL);
+ ring->desc_addr = dmam_alloc_coherent(dev, size, &ring->dma,
+ GFP_KERNEL | __GFP_ZERO);
if (!ring->desc_addr) {
devm_kfree(dev, ring);
return NULL;
@@ -928,14 +946,16 @@ static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
ring->size = size;
if (is_irq_mbox_required(pdata, ring)) {
- ring->irq_mbox_addr = dma_zalloc_coherent(dev, INTR_MBOX_SIZE,
- &ring->irq_mbox_dma, GFP_KERNEL);
- if (!ring->irq_mbox_addr) {
- dma_free_coherent(dev, size, ring->desc_addr,
- ring->dma);
+ irq_mbox_addr = dmam_alloc_coherent(dev, INTR_MBOX_SIZE,
+ &ring->irq_mbox_dma,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!irq_mbox_addr) {
+ dmam_free_coherent(dev, size, ring->desc_addr,
+ ring->dma);
devm_kfree(dev, ring);
return NULL;
}
+ ring->irq_mbox_addr = irq_mbox_addr;
}
ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring);
@@ -996,6 +1016,7 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
u8 eth_bufnum = pdata->eth_bufnum;
u8 bp_bufnum = pdata->bp_bufnum;
u16 ring_num = pdata->ring_num;
+ __le64 *exp_bufs;
u16 ring_id;
int i, ret, size;
@@ -1027,13 +1048,6 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
rx_ring->nbufpool = NUM_BUFPOOL;
rx_ring->buf_pool = buf_pool;
rx_ring->irq = pdata->irqs[i];
- if (!pdata->cq_cnt) {
- snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc",
- ndev->name);
- } else {
- snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx%d",
- ndev->name, i);
- }
buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots,
sizeof(struct sk_buff *),
GFP_KERNEL);
@@ -1060,13 +1074,13 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
}
size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS;
- tx_ring->exp_bufs = dma_zalloc_coherent(dev, size,
- &dma_exp_bufs,
- GFP_KERNEL);
- if (!tx_ring->exp_bufs) {
+ exp_bufs = dmam_alloc_coherent(dev, size, &dma_exp_bufs,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!exp_bufs) {
ret = -ENOMEM;
goto err;
}
+ tx_ring->exp_bufs = exp_bufs;
pdata->tx_ring[i] = tx_ring;
@@ -1086,8 +1100,6 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
cp_ring->irq = pdata->irqs[pdata->rxq_cnt + i];
cp_ring->index = i;
- snprintf(cp_ring->irq_name, IRQ_ID_SIZE, "%s-txc%d",
- ndev->name, i);
}
cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots,
@@ -1283,6 +1295,23 @@ static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
return 0;
}
+static int xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata)
+{
+ int ret;
+
+ if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII)
+ return 0;
+
+ if (!IS_ENABLED(CONFIG_MDIO_XGENE))
+ return 0;
+
+ ret = xgene_enet_phy_connect(pdata->ndev);
+ if (!ret)
+ pdata->mdio_driver = true;
+
+ return 0;
+}
+
static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
{
struct platform_device *pdev;
@@ -1368,6 +1397,10 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
if (ret)
return ret;
+ ret = xgene_enet_check_phy_handle(pdata);
+ if (ret)
+ return ret;
+
pdata->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pdata->clk)) {
/* Firmware may have set up the clock already. */
@@ -1447,6 +1480,7 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id);
}
+ pdata->phy_speed = SPEED_UNKNOWN;
pdata->mac_ops->init(pdata);
return ret;
@@ -1556,28 +1590,12 @@ static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
}
}
-static void xgene_enet_napi_del(struct xgene_enet_pdata *pdata)
-{
- struct napi_struct *napi;
- int i;
-
- for (i = 0; i < pdata->rxq_cnt; i++) {
- napi = &pdata->rx_ring[i]->napi;
- netif_napi_del(napi);
- }
-
- for (i = 0; i < pdata->cq_cnt; i++) {
- napi = &pdata->tx_ring[i]->cp_ring->napi;
- netif_napi_del(napi);
- }
-}
-
static int xgene_enet_probe(struct platform_device *pdev)
{
struct net_device *ndev;
struct xgene_enet_pdata *pdata;
struct device *dev = &pdev->dev;
- const struct xgene_mac_ops *mac_ops;
+ void (*link_state)(struct work_struct *);
const struct of_device_id *of_id;
int ret;
@@ -1635,27 +1653,31 @@ static int xgene_enet_probe(struct platform_device *pdev)
goto err;
}
- ret = register_netdev(ndev);
- if (ret) {
- netdev_err(ndev, "Failed to register netdev\n");
- goto err;
- }
-
ret = xgene_enet_init_hw(pdata);
if (ret)
goto err_netdev;
- mac_ops = pdata->mac_ops;
- if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
- ret = xgene_enet_mdio_config(pdata);
- if (ret)
- goto err_netdev;
- } else {
- INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state);
+ link_state = pdata->mac_ops->link_state;
+ if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
+ INIT_DELAYED_WORK(&pdata->link_work, link_state);
+ } else if (!pdata->mdio_driver) {
+ if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
+ ret = xgene_enet_mdio_config(pdata);
+ else
+ INIT_DELAYED_WORK(&pdata->link_work, link_state);
}
+ if (ret)
+ goto err;
xgene_enet_napi_add(pdata);
+ ret = register_netdev(ndev);
+ if (ret) {
+ netdev_err(ndev, "Failed to register netdev\n");
+ goto err;
+ }
+
return 0;
+
err_netdev:
unregister_netdev(ndev);
err:
@@ -1673,20 +1695,38 @@ static int xgene_enet_remove(struct platform_device *pdev)
mac_ops = pdata->mac_ops;
ndev = pdata->ndev;
- mac_ops->rx_disable(pdata);
- mac_ops->tx_disable(pdata);
+ rtnl_lock();
+ if (netif_running(ndev))
+ dev_close(ndev);
+ rtnl_unlock();
- xgene_enet_napi_del(pdata);
- if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
+ if (pdata->mdio_driver)
+ xgene_enet_phy_disconnect(pdata);
+ else if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
xgene_enet_mdio_remove(pdata);
+
unregister_netdev(ndev);
- xgene_enet_delete_desc_rings(pdata);
pdata->port_ops->shutdown(pdata);
+ xgene_enet_delete_desc_rings(pdata);
free_netdev(ndev);
return 0;
}
+static void xgene_enet_shutdown(struct platform_device *pdev)
+{
+ struct xgene_enet_pdata *pdata;
+
+ pdata = platform_get_drvdata(pdev);
+ if (!pdata)
+ return;
+
+ if (!pdata->ndev)
+ return;
+
+ xgene_enet_remove(pdev);
+}
+
#ifdef CONFIG_ACPI
static const struct acpi_device_id xgene_enet_acpi_match[] = {
{ "APMC0D05", XGENE_ENET1},
@@ -1721,6 +1761,7 @@ static struct platform_driver xgene_enet_driver = {
},
.probe = xgene_enet_probe,
.remove = xgene_enet_remove,
+ .shutdown = xgene_enet_shutdown,
};
module_platform_driver(xgene_enet_driver);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index 092fbecca..217546e57 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -38,6 +38,7 @@
#include "xgene_enet_hw.h"
#include "xgene_enet_cle.h"
#include "xgene_enet_ring2.h"
+#include "../../../phy/mdio-xgene.h"
#define XGENE_DRV_VERSION "v1.0"
#define XGENE_ENET_MAX_MTU 1536
@@ -140,6 +141,7 @@ struct xgene_mac_ops {
void (*rx_enable)(struct xgene_enet_pdata *pdata);
void (*tx_disable)(struct xgene_enet_pdata *pdata);
void (*rx_disable)(struct xgene_enet_pdata *pdata);
+ void (*set_speed)(struct xgene_enet_pdata *pdata);
void (*set_mac_addr)(struct xgene_enet_pdata *pdata);
void (*set_mss)(struct xgene_enet_pdata *pdata);
void (*link_state)(struct work_struct *work);
@@ -147,6 +149,8 @@ struct xgene_mac_ops {
struct xgene_port_ops {
int (*reset)(struct xgene_enet_pdata *pdata);
+ void (*clear)(struct xgene_enet_pdata *pdata,
+ struct xgene_enet_desc_ring *ring);
void (*cle_bypass)(struct xgene_enet_pdata *pdata,
u32 dst_ring_num, u16 bufpool_id);
void (*shutdown)(struct xgene_enet_pdata *pdata);
@@ -211,6 +215,7 @@ struct xgene_enet_pdata {
u32 mss;
u8 tx_delay;
u8 rx_delay;
+ bool mdio_driver;
};
struct xgene_indirect_ctl {
@@ -220,34 +225,6 @@ struct xgene_indirect_ctl {
void __iomem *cmd_done;
};
-/* Set the specified value into a bit-field defined by its starting position
- * and length within a single u64.
- */
-static inline u64 xgene_enet_set_field_value(int pos, int len, u64 val)
-{
- return (val & ((1ULL << len) - 1)) << pos;
-}
-
-#define SET_VAL(field, val) \
- xgene_enet_set_field_value(field ## _POS, field ## _LEN, val)
-
-#define SET_BIT(field) \
- xgene_enet_set_field_value(field ## _POS, 1, 1)
-
-/* Get the value from a bit-field defined by its starting position
- * and length within the specified u64.
- */
-static inline u64 xgene_enet_get_field_value(int pos, int len, u64 src)
-{
- return (src >> pos) & ((1ULL << len) - 1);
-}
-
-#define GET_VAL(field, src) \
- xgene_enet_get_field_value(field ## _POS, field ## _LEN, src)
-
-#define GET_BIT(field, src) \
- xgene_enet_get_field_value(field ## _POS, 1, src)
-
static inline struct device *ndev_to_dev(struct net_device *ndev)
{
return ndev->dev.parent;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
index 78475512b..d12e9cbae 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
@@ -28,6 +28,12 @@ static void xgene_enet_wr_csr(struct xgene_enet_pdata *p, u32 offset, u32 val)
iowrite32(val, p->eth_csr_addr + offset);
}
+static void xgene_enet_wr_clkrst_csr(struct xgene_enet_pdata *p, u32 offset,
+ u32 val)
+{
+ iowrite32(val, p->base_addr + offset);
+}
+
static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *p,
u32 offset, u32 val)
{
@@ -93,6 +99,11 @@ static u32 xgene_enet_rd_diag_csr(struct xgene_enet_pdata *p, u32 offset)
return ioread32(p->eth_diag_csr_addr + offset);
}
+static u32 xgene_enet_rd_mcx_csr(struct xgene_enet_pdata *p, u32 offset)
+{
+ return ioread32(p->mcx_mac_csr_addr + offset);
+}
+
static u32 xgene_enet_rd_indirect(struct xgene_indirect_ctl *ctl, u32 rd_addr)
{
u32 rd_data;
@@ -132,9 +143,17 @@ static u32 xgene_enet_rd_mac(struct xgene_enet_pdata *p, u32 rd_addr)
static int xgene_enet_ecc_init(struct xgene_enet_pdata *p)
{
struct net_device *ndev = p->ndev;
- u32 data;
+ u32 data, shutdown;
int i = 0;
+ shutdown = xgene_enet_rd_diag_csr(p, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR);
+ data = xgene_enet_rd_diag_csr(p, ENET_BLOCK_MEM_RDY_ADDR);
+
+ if (!shutdown && data == ~0U) {
+ netdev_dbg(ndev, "+ ecc_init done, skipping\n");
+ return 0;
+ }
+
xgene_enet_wr_diag_csr(p, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0);
do {
usleep_range(100, 110);
@@ -230,21 +249,105 @@ static u32 xgene_enet_link_status(struct xgene_enet_pdata *p)
data = xgene_mii_phy_read(p, INT_PHY_ADDR,
SGMII_BASE_PAGE_ABILITY_ADDR >> 2);
+ if (LINK_SPEED(data) == PHY_SPEED_1000)
+ p->phy_speed = SPEED_1000;
+ else if (LINK_SPEED(data) == PHY_SPEED_100)
+ p->phy_speed = SPEED_100;
+ else
+ p->phy_speed = SPEED_10;
+
return data & LINK_UP;
}
-static void xgene_sgmac_init(struct xgene_enet_pdata *p)
+static void xgene_sgmii_configure(struct xgene_enet_pdata *p)
{
- u32 data, loop = 10;
- u32 offset = p->port_id * 4;
- u32 enet_spare_cfg_reg, rsif_config_reg;
- u32 cfg_bypass_reg, rx_dv_gate_reg;
-
- xgene_sgmac_reset(p);
+ xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2,
+ 0x8000);
+ xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_CONTROL_ADDR >> 2, 0x9000);
+ xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2, 0);
+}
- /* Enable auto-negotiation */
- xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_CONTROL_ADDR >> 2, 0x1000);
+static void xgene_sgmii_tbi_control_reset(struct xgene_enet_pdata *p)
+{
+ xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2,
+ 0x8000);
xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2, 0);
+}
+
+static void xgene_sgmii_reset(struct xgene_enet_pdata *p)
+{
+ u32 value;
+
+ if (p->phy_speed == SPEED_UNKNOWN)
+ return;
+
+ value = xgene_mii_phy_read(p, INT_PHY_ADDR,
+ SGMII_BASE_PAGE_ABILITY_ADDR >> 2);
+ if (!(value & LINK_UP))
+ xgene_sgmii_tbi_control_reset(p);
+}
+
+static void xgene_sgmac_set_speed(struct xgene_enet_pdata *p)
+{
+ u32 icm0_addr, icm2_addr, debug_addr;
+ u32 icm0, icm2, intf_ctl;
+ u32 mc2, value;
+
+ xgene_sgmii_reset(p);
+
+ if (p->enet_id == XGENE_ENET1) {
+ icm0_addr = ICM_CONFIG0_REG_0_ADDR + p->port_id * OFFSET_8;
+ icm2_addr = ICM_CONFIG2_REG_0_ADDR + p->port_id * OFFSET_4;
+ debug_addr = DEBUG_REG_ADDR;
+ } else {
+ icm0_addr = XG_MCX_ICM_CONFIG0_REG_0_ADDR;
+ icm2_addr = XG_MCX_ICM_CONFIG2_REG_0_ADDR;
+ debug_addr = XG_DEBUG_REG_ADDR;
+ }
+
+ icm0 = xgene_enet_rd_mcx_csr(p, icm0_addr);
+ icm2 = xgene_enet_rd_mcx_csr(p, icm2_addr);
+ mc2 = xgene_enet_rd_mac(p, MAC_CONFIG_2_ADDR);
+ intf_ctl = xgene_enet_rd_mac(p, INTERFACE_CONTROL_ADDR);
+
+ switch (p->phy_speed) {
+ case SPEED_10:
+ ENET_INTERFACE_MODE2_SET(&mc2, 1);
+ intf_ctl &= ~(ENET_LHD_MODE | ENET_GHD_MODE);
+ CFG_MACMODE_SET(&icm0, 0);
+ CFG_WAITASYNCRD_SET(&icm2, 500);
+ break;
+ case SPEED_100:
+ ENET_INTERFACE_MODE2_SET(&mc2, 1);
+ intf_ctl &= ~ENET_GHD_MODE;
+ intf_ctl |= ENET_LHD_MODE;
+ CFG_MACMODE_SET(&icm0, 1);
+ CFG_WAITASYNCRD_SET(&icm2, 80);
+ break;
+ default:
+ ENET_INTERFACE_MODE2_SET(&mc2, 2);
+ intf_ctl &= ~ENET_LHD_MODE;
+ intf_ctl |= ENET_GHD_MODE;
+ CFG_MACMODE_SET(&icm0, 2);
+ CFG_WAITASYNCRD_SET(&icm2, 16);
+ value = xgene_enet_rd_csr(p, debug_addr);
+ value |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX;
+ xgene_enet_wr_csr(p, debug_addr, value);
+ break;
+ }
+
+ mc2 |= FULL_DUPLEX2 | PAD_CRC;
+ xgene_enet_wr_mac(p, MAC_CONFIG_2_ADDR, mc2);
+ xgene_enet_wr_mac(p, INTERFACE_CONTROL_ADDR, intf_ctl);
+ xgene_enet_wr_mcx_csr(p, icm0_addr, icm0);
+ xgene_enet_wr_mcx_csr(p, icm2_addr, icm2);
+}
+
+static void xgene_sgmii_enable_autoneg(struct xgene_enet_pdata *p)
+{
+ u32 data, loop = 10;
+
+ xgene_sgmii_configure(p);
while (loop--) {
data = xgene_mii_phy_read(p, INT_PHY_ADDR,
@@ -255,17 +358,27 @@ static void xgene_sgmac_init(struct xgene_enet_pdata *p)
}
if (!(data & AUTO_NEG_COMPLETE) || !(data & LINK_STATUS))
netdev_err(p->ndev, "Auto-negotiation failed\n");
+}
+
+static void xgene_sgmac_init(struct xgene_enet_pdata *p)
+{
+ u32 enet_spare_cfg_reg, rsif_config_reg;
+ u32 cfg_bypass_reg, rx_dv_gate_reg;
+ u32 data, offset;
- data = xgene_enet_rd_mac(p, MAC_CONFIG_2_ADDR);
- ENET_INTERFACE_MODE2_SET(&data, 2);
- xgene_enet_wr_mac(p, MAC_CONFIG_2_ADDR, data | FULL_DUPLEX2);
- xgene_enet_wr_mac(p, INTERFACE_CONTROL_ADDR, ENET_GHD_MODE);
+ if (!(p->enet_id == XGENE_ENET2 && p->mdio_driver))
+ xgene_sgmac_reset(p);
+
+ xgene_sgmii_enable_autoneg(p);
+ xgene_sgmac_set_speed(p);
+ xgene_sgmac_set_mac_addr(p);
if (p->enet_id == XGENE_ENET1) {
enet_spare_cfg_reg = ENET_SPARE_CFG_REG_ADDR;
rsif_config_reg = RSIF_CONFIG_REG_ADDR;
cfg_bypass_reg = CFG_BYPASS_ADDR;
- rx_dv_gate_reg = SG_RX_DV_GATE_REG_0_ADDR;
+ offset = p->port_id * OFFSET_4;
+ rx_dv_gate_reg = SG_RX_DV_GATE_REG_0_ADDR + offset;
} else {
enet_spare_cfg_reg = XG_ENET_SPARE_CFG_REG_ADDR;
rsif_config_reg = XG_RSIF_CONFIG_REG_ADDR;
@@ -277,8 +390,6 @@ static void xgene_sgmac_init(struct xgene_enet_pdata *p)
data |= MPA_IDLE_WITH_QMI_EMPTY;
xgene_enet_wr_csr(p, enet_spare_cfg_reg, data);
- xgene_sgmac_set_mac_addr(p);
-
/* Adjust MDC clock frequency */
data = xgene_enet_rd_mac(p, MII_MGMT_CONFIG_ADDR);
MGMT_CLOCK_SEL_SET(&data, 7);
@@ -292,7 +403,7 @@ static void xgene_sgmac_init(struct xgene_enet_pdata *p)
/* Bypass traffic gating */
xgene_enet_wr_csr(p, XG_ENET_SPARE_CFG_REG_1_ADDR, 0x84);
xgene_enet_wr_csr(p, cfg_bypass_reg, RESUME_TX);
- xgene_enet_wr_mcx_csr(p, rx_dv_gate_reg + offset, RESUME_RX0);
+ xgene_enet_wr_mcx_csr(p, rx_dv_gate_reg, RESUME_RX0);
}
static void xgene_sgmac_rxtx(struct xgene_enet_pdata *p, u32 bits, bool set)
@@ -331,17 +442,43 @@ static void xgene_sgmac_tx_disable(struct xgene_enet_pdata *p)
static int xgene_enet_reset(struct xgene_enet_pdata *p)
{
+ struct device *dev = &p->pdev->dev;
+
if (!xgene_ring_mgr_init(p))
return -ENODEV;
- if (!IS_ERR(p->clk)) {
- clk_prepare_enable(p->clk);
- clk_disable_unprepare(p->clk);
- clk_prepare_enable(p->clk);
+ if (p->mdio_driver && p->enet_id == XGENE_ENET2) {
+ xgene_enet_config_ring_if_assoc(p);
+ return 0;
}
- xgene_enet_ecc_init(p);
- xgene_enet_config_ring_if_assoc(p);
+ if (p->enet_id == XGENE_ENET2)
+ xgene_enet_wr_clkrst_csr(p, XGENET_CONFIG_REG_ADDR, SGMII_EN);
+
+ if (dev->of_node) {
+ if (!IS_ERR(p->clk)) {
+ clk_prepare_enable(p->clk);
+ udelay(5);
+ clk_disable_unprepare(p->clk);
+ udelay(5);
+ clk_prepare_enable(p->clk);
+ udelay(5);
+ }
+ } else {
+#ifdef CONFIG_ACPI
+ if (acpi_has_method(ACPI_HANDLE(&p->pdev->dev), "_RST"))
+ acpi_evaluate_object(ACPI_HANDLE(&p->pdev->dev),
+ "_RST", NULL, NULL);
+ else if (acpi_has_method(ACPI_HANDLE(&p->pdev->dev), "_INI"))
+ acpi_evaluate_object(ACPI_HANDLE(&p->pdev->dev),
+ "_INI", NULL, NULL);
+#endif
+ }
+
+ if (!p->port_id) {
+ xgene_enet_ecc_init(p);
+ xgene_enet_config_ring_if_assoc(p);
+ }
return 0;
}
@@ -369,10 +506,53 @@ static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p,
xgene_enet_wr_csr(p, cle_bypass_reg1 + offset, data);
}
+static void xgene_enet_clear(struct xgene_enet_pdata *pdata,
+ struct xgene_enet_desc_ring *ring)
+{
+ u32 addr, val, data;
+
+ val = xgene_enet_ring_bufnum(ring->id);
+
+ if (xgene_enet_is_bufpool(ring->id)) {
+ addr = ENET_CFGSSQMIFPRESET_ADDR;
+ data = BIT(val - 0x20);
+ } else {
+ addr = ENET_CFGSSQMIWQRESET_ADDR;
+ data = BIT(val);
+ }
+
+ xgene_enet_wr_ring_if(pdata, addr, data);
+}
+
static void xgene_enet_shutdown(struct xgene_enet_pdata *p)
{
- if (!IS_ERR(p->clk))
- clk_disable_unprepare(p->clk);
+ struct device *dev = &p->pdev->dev;
+ struct xgene_enet_desc_ring *ring;
+ u32 pb, val;
+ int i;
+
+ pb = 0;
+ for (i = 0; i < p->rxq_cnt; i++) {
+ ring = p->rx_ring[i]->buf_pool;
+
+ val = xgene_enet_ring_bufnum(ring->id);
+ pb |= BIT(val - 0x20);
+ }
+ xgene_enet_wr_ring_if(p, ENET_CFGSSQMIFPRESET_ADDR, pb);
+
+ pb = 0;
+ for (i = 0; i < p->txq_cnt; i++) {
+ ring = p->tx_ring[i];
+
+ val = xgene_enet_ring_bufnum(ring->id);
+ pb |= BIT(val);
+ }
+ xgene_enet_wr_ring_if(p, ENET_CFGSSQMIWQRESET_ADDR, pb);
+
+ if (dev->of_node) {
+ if (!IS_ERR(p->clk))
+ clk_disable_unprepare(p->clk);
+ }
}
static void xgene_enet_link_state(struct work_struct *work)
@@ -386,10 +566,11 @@ static void xgene_enet_link_state(struct work_struct *work)
if (link) {
if (!netif_carrier_ok(ndev)) {
netif_carrier_on(ndev);
- xgene_sgmac_init(p);
+ xgene_sgmac_set_speed(p);
xgene_sgmac_rx_enable(p);
xgene_sgmac_tx_enable(p);
- netdev_info(ndev, "Link is Up - 1Gbps\n");
+ netdev_info(ndev, "Link is Up - %dMbps\n",
+ p->phy_speed);
}
poll_interval = PHY_POLL_LINK_ON;
} else {
@@ -412,12 +593,14 @@ const struct xgene_mac_ops xgene_sgmac_ops = {
.tx_enable = xgene_sgmac_tx_enable,
.rx_disable = xgene_sgmac_rx_disable,
.tx_disable = xgene_sgmac_tx_disable,
+ .set_speed = xgene_sgmac_set_speed,
.set_mac_addr = xgene_sgmac_set_mac_addr,
.link_state = xgene_enet_link_state
};
const struct xgene_port_ops xgene_sgport_ops = {
.reset = xgene_enet_reset,
+ .clear = xgene_enet_clear,
.cle_bypass = xgene_enet_cle_bypass,
.shutdown = xgene_enet_shutdown
};
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h
index 002df5a67..3d0ba3744 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h
@@ -24,6 +24,7 @@
#define PHY_ADDR(src) (((src)<<8) & GENMASK(12, 8))
#define REG_ADDR(src) ((src) & GENMASK(4, 0))
#define PHY_CONTROL(src) ((src) & GENMASK(15, 0))
+#define LINK_SPEED(src) (((src) & GENMASK(11, 10)) >> 10)
#define INT_PHY_ADDR 0x1e
#define SGMII_TBI_CONTROL_ADDR 0x44
#define SGMII_CONTROL_ADDR 0x00
@@ -34,6 +35,13 @@
#define LINK_UP BIT(15)
#define MPA_IDLE_WITH_QMI_EMPTY BIT(12)
#define SG_RX_DV_GATE_REG_0_ADDR 0x05fc
+#define SGMII_EN 0x1
+
+enum xgene_phy_speed {
+ PHY_SPEED_10,
+ PHY_SPEED_100,
+ PHY_SPEED_1000
+};
extern const struct xgene_mac_ops xgene_sgmac_ops;
extern const struct xgene_port_ops xgene_sgport_ops;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index ba030dc19..9c6ad0dce 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -258,13 +258,29 @@ static void xgene_xgmac_tx_disable(struct xgene_enet_pdata *pdata)
static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
{
+ struct device *dev = &pdata->pdev->dev;
+
if (!xgene_ring_mgr_init(pdata))
return -ENODEV;
- if (!IS_ERR(pdata->clk)) {
+ if (dev->of_node) {
clk_prepare_enable(pdata->clk);
+ udelay(5);
clk_disable_unprepare(pdata->clk);
+ udelay(5);
clk_prepare_enable(pdata->clk);
+ udelay(5);
+ } else {
+#ifdef CONFIG_ACPI
+ if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev), "_RST")) {
+ acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
+ "_RST", NULL, NULL);
+ } else if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev),
+ "_INI")) {
+ acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
+ "_INI", NULL, NULL);
+ }
+#endif
}
xgene_enet_ecc_init(pdata);
@@ -292,8 +308,51 @@ static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata,
static void xgene_enet_shutdown(struct xgene_enet_pdata *pdata)
{
- if (!IS_ERR(pdata->clk))
- clk_disable_unprepare(pdata->clk);
+ struct device *dev = &pdata->pdev->dev;
+ struct xgene_enet_desc_ring *ring;
+ u32 pb, val;
+ int i;
+
+ pb = 0;
+ for (i = 0; i < pdata->rxq_cnt; i++) {
+ ring = pdata->rx_ring[i]->buf_pool;
+
+ val = xgene_enet_ring_bufnum(ring->id);
+ pb |= BIT(val - 0x20);
+ }
+ xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPRESET_ADDR, pb);
+
+ pb = 0;
+ for (i = 0; i < pdata->txq_cnt; i++) {
+ ring = pdata->tx_ring[i];
+
+ val = xgene_enet_ring_bufnum(ring->id);
+ pb |= BIT(val);
+ }
+ xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQRESET_ADDR, pb);
+
+ if (dev->of_node) {
+ if (!IS_ERR(pdata->clk))
+ clk_disable_unprepare(pdata->clk);
+ }
+}
+
+static void xgene_enet_clear(struct xgene_enet_pdata *pdata,
+ struct xgene_enet_desc_ring *ring)
+{
+ u32 addr, val, data;
+
+ val = xgene_enet_ring_bufnum(ring->id);
+
+ if (xgene_enet_is_bufpool(ring->id)) {
+ addr = ENET_CFGSSQMIFPRESET_ADDR;
+ data = BIT(val - 0x20);
+ } else {
+ addr = ENET_CFGSSQMIWQRESET_ADDR;
+ data = BIT(val);
+ }
+
+ xgene_enet_wr_ring_if(pdata, addr, data);
}
static void xgene_enet_link_state(struct work_struct *work)
@@ -340,6 +399,7 @@ const struct xgene_mac_ops xgene_xgmac_ops = {
const struct xgene_port_ops xgene_xgport_ops = {
.reset = xgene_enet_reset,
+ .clear = xgene_enet_clear,
.cle_bypass = xgene_enet_xgcle_bypass,
.shutdown = xgene_enet_shutdown,
};
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
index 0a2dca8a1..f1ea485f9 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
@@ -65,9 +65,12 @@
#define XG_CFG_LINK_AGGR_RESUME_0_ADDR 0x0214
#define XG_LINK_STATUS_ADDR 0x0228
#define XG_TSIF_MSS_REG0_ADDR 0x02a4
+#define XG_DEBUG_REG_ADDR 0x0400
#define XG_ENET_SPARE_CFG_REG_ADDR 0x040c
#define XG_ENET_SPARE_CFG_REG_1_ADDR 0x0410
#define XGENET_RX_DV_GATE_REG_0_ADDR 0x0804
+#define XG_MCX_ICM_CONFIG0_REG_0_ADDR 0x00e0
+#define XG_MCX_ICM_CONFIG2_REG_0_ADDR 0x00e8
extern const struct xgene_mac_ops xgene_xgmac_ops;
extern const struct xgene_port_ops xgene_xgport_ops;
diff --git a/drivers/net/ethernet/arc/emac.h b/drivers/net/ethernet/arc/emac.h
index ca562bc03..e4feb712d 100644
--- a/drivers/net/ethernet/arc/emac.h
+++ b/drivers/net/ethernet/arc/emac.h
@@ -134,7 +134,6 @@ struct arc_emac_priv {
/* Devices */
struct device *dev;
- struct phy_device *phy_dev;
struct mii_bus *bus;
struct arc_emac_mdio_bus_data bus_data;
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index a3a9392a4..b0da9693f 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -47,7 +47,7 @@ static inline int arc_emac_tx_avail(struct arc_emac_priv *priv)
static void arc_emac_adjust_link(struct net_device *ndev)
{
struct arc_emac_priv *priv = netdev_priv(ndev);
- struct phy_device *phy_dev = priv->phy_dev;
+ struct phy_device *phy_dev = ndev->phydev;
unsigned int reg, state_changed = 0;
if (priv->link != phy_dev->link) {
@@ -80,46 +80,6 @@ static void arc_emac_adjust_link(struct net_device *ndev)
}
/**
- * arc_emac_get_settings - Get PHY settings.
- * @ndev: Pointer to net_device structure.
- * @cmd: Pointer to ethtool_cmd structure.
- *
- * This implements ethtool command for getting PHY settings. If PHY could
- * not be found, the function returns -ENODEV. This function calls the
- * relevant PHY ethtool API to get the PHY settings.
- * Issue "ethtool ethX" under linux prompt to execute this function.
- */
-static int arc_emac_get_settings(struct net_device *ndev,
- struct ethtool_cmd *cmd)
-{
- struct arc_emac_priv *priv = netdev_priv(ndev);
-
- return phy_ethtool_gset(priv->phy_dev, cmd);
-}
-
-/**
- * arc_emac_set_settings - Set PHY settings as passed in the argument.
- * @ndev: Pointer to net_device structure.
- * @cmd: Pointer to ethtool_cmd structure.
- *
- * This implements ethtool command for setting various PHY settings. If PHY
- * could not be found, the function returns -ENODEV. This function calls the
- * relevant PHY ethtool API to set the PHY.
- * Issue e.g. "ethtool -s ethX speed 1000" under linux prompt to execute this
- * function.
- */
-static int arc_emac_set_settings(struct net_device *ndev,
- struct ethtool_cmd *cmd)
-{
- struct arc_emac_priv *priv = netdev_priv(ndev);
-
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- return phy_ethtool_sset(priv->phy_dev, cmd);
-}
-
-/**
* arc_emac_get_drvinfo - Get EMAC driver information.
* @ndev: Pointer to net_device structure.
* @info: Pointer to ethtool_drvinfo structure.
@@ -137,10 +97,10 @@ static void arc_emac_get_drvinfo(struct net_device *ndev,
}
static const struct ethtool_ops arc_emac_ethtool_ops = {
- .get_settings = arc_emac_get_settings,
- .set_settings = arc_emac_set_settings,
.get_drvinfo = arc_emac_get_drvinfo,
.get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
#define FIRST_OR_LAST_MASK (FIRST_MASK | LAST_MASK)
@@ -403,7 +363,7 @@ static void arc_emac_poll_controller(struct net_device *dev)
static int arc_emac_open(struct net_device *ndev)
{
struct arc_emac_priv *priv = netdev_priv(ndev);
- struct phy_device *phy_dev = priv->phy_dev;
+ struct phy_device *phy_dev = ndev->phydev;
int i;
phy_dev->autoneg = AUTONEG_ENABLE;
@@ -474,7 +434,7 @@ static int arc_emac_open(struct net_device *ndev)
/* Enable EMAC */
arc_reg_or(priv, R_CTRL, EN_MASK);
- phy_start_aneg(priv->phy_dev);
+ phy_start_aneg(ndev->phydev);
netif_start_queue(ndev);
@@ -772,6 +732,7 @@ int arc_emac_probe(struct net_device *ndev, int interface)
struct device *dev = ndev->dev.parent;
struct resource res_regs;
struct device_node *phy_node;
+ struct phy_device *phydev = NULL;
struct arc_emac_priv *priv;
const char *mac_addr;
unsigned int id, clock_frequency, irq;
@@ -788,14 +749,16 @@ int arc_emac_probe(struct net_device *ndev, int interface)
err = of_address_to_resource(dev->of_node, 0, &res_regs);
if (err) {
dev_err(dev, "failed to retrieve registers base from device tree\n");
- return -ENODEV;
+ err = -ENODEV;
+ goto out_put_node;
}
/* Get IRQ from device tree */
irq = irq_of_parse_and_map(dev->of_node, 0);
if (!irq) {
dev_err(dev, "failed to retrieve <irq> value from device tree\n");
- return -ENODEV;
+ err = -ENODEV;
+ goto out_put_node;
}
ndev->netdev_ops = &arc_emac_netdev_ops;
@@ -808,8 +771,10 @@ int arc_emac_probe(struct net_device *ndev, int interface)
priv->dev = dev;
priv->regs = devm_ioremap_resource(dev, &res_regs);
- if (IS_ERR(priv->regs))
- return PTR_ERR(priv->regs);
+ if (IS_ERR(priv->regs)) {
+ err = PTR_ERR(priv->regs);
+ goto out_put_node;
+ }
dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs);
@@ -817,7 +782,7 @@ int arc_emac_probe(struct net_device *ndev, int interface)
err = clk_prepare_enable(priv->clk);
if (err) {
dev_err(dev, "failed to enable clock\n");
- return err;
+ goto out_put_node;
}
clock_frequency = clk_get_rate(priv->clk);
@@ -826,7 +791,8 @@ int arc_emac_probe(struct net_device *ndev, int interface)
if (of_property_read_u32(dev->of_node, "clock-frequency",
&clock_frequency)) {
dev_err(dev, "failed to retrieve <clock-frequency> from device tree\n");
- return -EINVAL;
+ err = -EINVAL;
+ goto out_put_node;
}
}
@@ -887,16 +853,16 @@ int arc_emac_probe(struct net_device *ndev, int interface)
goto out_clken;
}
- priv->phy_dev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0,
- interface);
- if (!priv->phy_dev) {
+ phydev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0,
+ interface);
+ if (!phydev) {
dev_err(dev, "of_phy_connect() failed\n");
err = -ENODEV;
goto out_mdio;
}
dev_info(dev, "connected to %s phy with id 0x%x\n",
- priv->phy_dev->drv->name, priv->phy_dev->phy_id);
+ phydev->drv->name, phydev->phy_id);
netif_napi_add(ndev, &priv->napi, arc_emac_poll, ARC_EMAC_NAPI_WEIGHT);
@@ -906,17 +872,20 @@ int arc_emac_probe(struct net_device *ndev, int interface)
goto out_netif_api;
}
+ of_node_put(phy_node);
return 0;
out_netif_api:
netif_napi_del(&priv->napi);
- phy_disconnect(priv->phy_dev);
- priv->phy_dev = NULL;
+ phy_disconnect(phydev);
out_mdio:
arc_mdio_remove(priv);
out_clken:
if (priv->clk)
clk_disable_unprepare(priv->clk);
+out_put_node:
+ of_node_put(phy_node);
+
return err;
}
EXPORT_SYMBOL_GPL(arc_emac_probe);
@@ -925,8 +894,7 @@ int arc_emac_remove(struct net_device *ndev)
{
struct arc_emac_priv *priv = netdev_priv(ndev);
- phy_disconnect(priv->phy_dev);
- priv->phy_dev = NULL;
+ phy_disconnect(ndev->phydev);
arc_mdio_remove(priv);
unregister_netdev(ndev);
netif_napi_del(&priv->napi);
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index e708e360a..4eb17daef 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1251,7 +1251,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct alx_priv *alx;
struct alx_hw *hw;
bool phy_configured;
- int bars, err;
+ int err;
err = pci_enable_device_mem(pdev);
if (err)
@@ -1271,11 +1271,10 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
- bars = pci_select_bars(pdev, IORESOURCE_MEM);
- err = pci_request_selected_regions(pdev, bars, alx_drv_name);
+ err = pci_request_mem_regions(pdev, alx_drv_name);
if (err) {
dev_err(&pdev->dev,
- "pci_request_selected_regions failed(bars:%d)\n", bars);
+ "pci_request_mem_regions failed\n");
goto out_pci_disable;
}
@@ -1401,7 +1400,7 @@ out_unmap:
out_free_netdev:
free_netdev(netdev);
out_pci_release:
- pci_release_selected_regions(pdev, bars);
+ pci_release_mem_regions(pdev);
out_pci_disable:
pci_disable_device(pdev);
return err;
@@ -1420,8 +1419,7 @@ static void alx_remove(struct pci_dev *pdev)
unregister_netdev(alx->dev);
iounmap(hw->hw_addr);
- pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_release_mem_regions(pdev);
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
@@ -1547,6 +1545,8 @@ static const struct pci_device_id alx_pci_tbl[] = {
.driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
{ PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2400),
.driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
+ { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2500),
+ .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
{ PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162),
.driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
{ PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) },
diff --git a/drivers/net/ethernet/atheros/alx/reg.h b/drivers/net/ethernet/atheros/alx/reg.h
index 0959e6824..1fc2d8522 100644
--- a/drivers/net/ethernet/atheros/alx/reg.h
+++ b/drivers/net/ethernet/atheros/alx/reg.h
@@ -38,6 +38,7 @@
#define ALX_DEV_ID_AR8161 0x1091
#define ALX_DEV_ID_E2200 0xe091
#define ALX_DEV_ID_E2400 0xe0a1
+#define ALX_DEV_ID_E2500 0xe0b1
#define ALX_DEV_ID_AR8162 0x1090
#define ALX_DEV_ID_AR8171 0x10A1
#define ALX_DEV_ID_AR8172 0x10A0
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index 1a3555d03..b047fd607 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -632,7 +632,7 @@ static void nb8800_mac_config(struct net_device *dev)
static void nb8800_pause_config(struct net_device *dev)
{
struct nb8800_priv *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phydev;
+ struct phy_device *phydev = dev->phydev;
u32 rxcr;
if (priv->pause_aneg) {
@@ -665,7 +665,7 @@ static void nb8800_pause_config(struct net_device *dev)
static void nb8800_link_reconfigure(struct net_device *dev)
{
struct nb8800_priv *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phydev;
+ struct phy_device *phydev = dev->phydev;
int change = 0;
if (phydev->link) {
@@ -691,7 +691,7 @@ static void nb8800_link_reconfigure(struct net_device *dev)
}
if (change)
- phy_print_status(priv->phydev);
+ phy_print_status(phydev);
}
static void nb8800_update_mac_addr(struct net_device *dev)
@@ -936,9 +936,10 @@ static int nb8800_dma_stop(struct net_device *dev)
static void nb8800_pause_adv(struct net_device *dev)
{
struct nb8800_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = dev->phydev;
u32 adv = 0;
- if (!priv->phydev)
+ if (!phydev)
return;
if (priv->pause_rx)
@@ -946,13 +947,14 @@ static void nb8800_pause_adv(struct net_device *dev)
if (priv->pause_tx)
adv ^= ADVERTISED_Asym_Pause;
- priv->phydev->supported |= adv;
- priv->phydev->advertising |= adv;
+ phydev->supported |= adv;
+ phydev->advertising |= adv;
}
static int nb8800_open(struct net_device *dev)
{
struct nb8800_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev;
int err;
/* clear any pending interrupts */
@@ -970,10 +972,10 @@ static int nb8800_open(struct net_device *dev)
nb8800_mac_rx(dev, true);
nb8800_mac_tx(dev, true);
- priv->phydev = of_phy_connect(dev, priv->phy_node,
- nb8800_link_reconfigure, 0,
- priv->phy_mode);
- if (!priv->phydev)
+ phydev = of_phy_connect(dev, priv->phy_node,
+ nb8800_link_reconfigure, 0,
+ priv->phy_mode);
+ if (!phydev)
goto err_free_irq;
nb8800_pause_adv(dev);
@@ -983,7 +985,7 @@ static int nb8800_open(struct net_device *dev)
netif_start_queue(dev);
nb8800_start_rx(dev);
- phy_start(priv->phydev);
+ phy_start(phydev);
return 0;
@@ -998,8 +1000,9 @@ err_free_dma:
static int nb8800_stop(struct net_device *dev)
{
struct nb8800_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = dev->phydev;
- phy_stop(priv->phydev);
+ phy_stop(phydev);
netif_stop_queue(dev);
napi_disable(&priv->napi);
@@ -1008,8 +1011,7 @@ static int nb8800_stop(struct net_device *dev)
nb8800_mac_rx(dev, false);
nb8800_mac_tx(dev, false);
- phy_disconnect(priv->phydev);
- priv->phydev = NULL;
+ phy_disconnect(phydev);
free_irq(dev->irq, dev);
@@ -1020,9 +1022,7 @@ static int nb8800_stop(struct net_device *dev)
static int nb8800_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct nb8800_priv *priv = netdev_priv(dev);
-
- return phy_mii_ioctl(priv->phydev, rq, cmd);
+ return phy_mii_ioctl(dev->phydev, rq, cmd);
}
static const struct net_device_ops nb8800_netdev_ops = {
@@ -1036,34 +1036,14 @@ static const struct net_device_ops nb8800_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-static int nb8800_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
-
- if (!priv->phydev)
- return -ENODEV;
-
- return phy_ethtool_gset(priv->phydev, cmd);
-}
-
-static int nb8800_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
-
- if (!priv->phydev)
- return -ENODEV;
-
- return phy_ethtool_sset(priv->phydev, cmd);
-}
-
static int nb8800_nway_reset(struct net_device *dev)
{
- struct nb8800_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = dev->phydev;
- if (!priv->phydev)
+ if (!phydev)
return -ENODEV;
- return genphy_restart_aneg(priv->phydev);
+ return genphy_restart_aneg(phydev);
}
static void nb8800_get_pauseparam(struct net_device *dev,
@@ -1080,6 +1060,7 @@ static int nb8800_set_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *pp)
{
struct nb8800_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = dev->phydev;
priv->pause_aneg = pp->autoneg;
priv->pause_rx = pp->rx_pause;
@@ -1089,8 +1070,8 @@ static int nb8800_set_pauseparam(struct net_device *dev,
if (!priv->pause_aneg)
nb8800_pause_config(dev);
- else if (priv->phydev)
- phy_start_aneg(priv->phydev);
+ else if (phydev)
+ phy_start_aneg(phydev);
return 0;
}
@@ -1183,8 +1164,6 @@ static void nb8800_get_ethtool_stats(struct net_device *dev,
}
static const struct ethtool_ops nb8800_ethtool_ops = {
- .get_settings = nb8800_get_settings,
- .set_settings = nb8800_set_settings,
.nway_reset = nb8800_nway_reset,
.get_link = ethtool_op_get_link,
.get_pauseparam = nb8800_get_pauseparam,
@@ -1192,6 +1171,8 @@ static const struct ethtool_ops nb8800_ethtool_ops = {
.get_sset_count = nb8800_get_sset_count,
.get_strings = nb8800_get_strings,
.get_ethtool_stats = nb8800_get_ethtool_stats,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static int nb8800_hw_init(struct net_device *dev)
@@ -1438,7 +1419,7 @@ static int nb8800_probe(struct platform_device *pdev)
if (ops && ops->reset) {
ret = ops->reset(dev);
if (ret)
- goto err_free_dev;
+ goto err_disable_clk;
}
bus = devm_mdiobus_alloc(&pdev->dev);
@@ -1523,6 +1504,7 @@ static int nb8800_probe(struct platform_device *pdev)
err_free_dma:
nb8800_dma_free(dev);
err_free_bus:
+ of_node_put(priv->phy_node);
mdiobus_unregister(bus);
err_disable_clk:
clk_disable_unprepare(priv->clk);
@@ -1538,6 +1520,7 @@ static int nb8800_remove(struct platform_device *pdev)
struct nb8800_priv *priv = netdev_priv(ndev);
unregister_netdev(ndev);
+ of_node_put(priv->phy_node);
mdiobus_unregister(priv->mii_bus);
diff --git a/drivers/net/ethernet/aurora/nb8800.h b/drivers/net/ethernet/aurora/nb8800.h
index e5adbc2aa..6ec4a956e 100644
--- a/drivers/net/ethernet/aurora/nb8800.h
+++ b/drivers/net/ethernet/aurora/nb8800.h
@@ -284,7 +284,6 @@ struct nb8800_priv {
struct mii_bus *mii_bus;
struct device_node *phy_node;
- struct phy_device *phydev;
/* PHY connection type from DT */
int phy_mode;
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 18042c246..bd8c80c0b 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -139,31 +139,19 @@ config BNX2X_SRIOV
Virtualization support in the 578xx and 57712 products. This
allows for virtual function acceleration in virtual environments.
-config BNX2X_VXLAN
- bool "Virtual eXtensible Local Area Network support"
- default n
- depends on BNX2X && VXLAN && !(BNX2X=y && VXLAN=m)
- ---help---
- This enables hardward offload support for VXLAN protocol over the
- NetXtremeII series adapters.
- Say Y here if you want to enable hardware offload support for
- Virtual eXtensible Local Area Network (VXLAN) in the driver.
-
-config BNX2X_GENEVE
- bool "Generic Network Virtualization Encapsulation (GENEVE) support"
- depends on BNX2X && GENEVE && !(BNX2X=y && GENEVE=m)
- ---help---
- This allows one to create GENEVE virtual interfaces that provide
- Layer 2 Networks over Layer 3 Networks. GENEVE is often used
- to tunnel virtual network infrastructure in virtualized environments.
- Say Y here if you want to enable hardware offload support for
- Generic Network Virtualization Encapsulation (GENEVE) in the driver.
-
config BGMAC
- tristate "BCMA bus GBit core support"
+ tristate
+ help
+ This enables the integrated ethernet controller support for many
+ Broadcom (mostly iProc) SoCs. An appropriate bus interface driver
+ needs to be enabled to select this.
+
+config BGMAC_BCMA
+ tristate "Broadcom iProc GBit BCMA support"
depends on BCMA && BCMA_HOST_SOC
depends on HAS_DMA
depends on BCM47XX || ARCH_BCM_5301X || COMPILE_TEST
+ select BGMAC
select PHYLIB
select FIXED_PHY
---help---
@@ -172,6 +160,19 @@ config BGMAC
In case of using this driver on BCM4706 it's also requires to enable
BCMA_DRIVER_GMAC_CMN to make it work.
+config BGMAC_PLATFORM
+ tristate "Broadcom iProc GBit platform support"
+ depends on HAS_DMA
+ depends on ARCH_BCM_IPROC || COMPILE_TEST
+ depends on OF
+ select BGMAC
+ select PHYLIB
+ select FIXED_PHY
+ default ARCH_BCM_IPROC
+ ---help---
+ Say Y here if you want to use the Broadcom iProc Gigabit Ethernet
+ controller through the generic platform interface
+
config SYSTEMPORT
tristate "Broadcom SYSTEMPORT internal MAC support"
depends on OF
@@ -186,7 +187,6 @@ config SYSTEMPORT
config BNXT
tristate "Broadcom NetXtreme-C/E support"
depends on PCI
- depends on VXLAN || VXLAN=n
select FW_LOADER
select LIBCRC32C
---help---
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
index 00584d78b..79f2372c6 100644
--- a/drivers/net/ethernet/broadcom/Makefile
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -11,5 +11,7 @@ obj-$(CONFIG_BNX2X) += bnx2x/
obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o
obj-$(CONFIG_TIGON3) += tg3.o
obj-$(CONFIG_BGMAC) += bgmac.o
+obj-$(CONFIG_BGMAC_BCMA) += bgmac-bcma.o bgmac-bcma-mdio.o
+obj-$(CONFIG_BGMAC_PLATFORM) += bgmac-platform.o
obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o
obj-$(CONFIG_BNXT) += bnxt/
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 87c6b5bdd..6c8bc5fad 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1859,7 +1859,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
} else {
/* run platform code to initialize PHY device */
- if (pd->mii_config &&
+ if (pd && pd->mii_config &&
pd->mii_config(dev, 1, bcm_enet_mdio_read_mii,
bcm_enet_mdio_write_mii)) {
dev_err(&pdev->dev, "unable to configure mdio bus\n");
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index bfa26a259..b2d30863c 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -96,28 +96,6 @@ static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv,
}
/* Ethtool operations */
-static int bcm_sysport_set_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
-{
- struct bcm_sysport_priv *priv = netdev_priv(dev);
-
- if (!netif_running(dev))
- return -EINVAL;
-
- return phy_ethtool_sset(priv->phydev, cmd);
-}
-
-static int bcm_sysport_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
-{
- struct bcm_sysport_priv *priv = netdev_priv(dev);
-
- if (!netif_running(dev))
- return -EINVAL;
-
- return phy_ethtool_gset(priv->phydev, cmd);
-}
-
static int bcm_sysport_set_rx_csum(struct net_device *dev,
netdev_features_t wanted)
{
@@ -1127,7 +1105,7 @@ static void bcm_sysport_tx_timeout(struct net_device *dev)
static void bcm_sysport_adj_link(struct net_device *dev)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phydev;
+ struct phy_device *phydev = dev->phydev;
unsigned int changed = 0;
u32 cmd_bits = 0, reg;
@@ -1182,7 +1160,7 @@ static void bcm_sysport_adj_link(struct net_device *dev)
umac_writel(priv, reg, UMAC_CMD);
}
- phy_print_status(priv->phydev);
+ phy_print_status(phydev);
}
static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
@@ -1525,7 +1503,7 @@ static void bcm_sysport_netif_start(struct net_device *dev)
/* Enable RX interrupt and TX ring full interrupt */
intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
- phy_start(priv->phydev);
+ phy_start(dev->phydev);
/* Enable TX interrupts for the 32 TXQs */
intrl2_1_mask_clear(priv, 0xffffffff);
@@ -1546,6 +1524,7 @@ static void rbuf_init(struct bcm_sysport_priv *priv)
static int bcm_sysport_open(struct net_device *dev)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev;
unsigned int i;
int ret;
@@ -1570,9 +1549,9 @@ static int bcm_sysport_open(struct net_device *dev)
/* Read CRC forward */
priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
- priv->phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
- 0, priv->phy_interface);
- if (!priv->phydev) {
+ phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
+ 0, priv->phy_interface);
+ if (!phydev) {
netdev_err(dev, "could not attach to PHY\n");
return -ENODEV;
}
@@ -1650,7 +1629,7 @@ out_free_tx_ring:
out_free_irq0:
free_irq(priv->irq0, dev);
out_phy_disconnect:
- phy_disconnect(priv->phydev);
+ phy_disconnect(phydev);
return ret;
}
@@ -1661,7 +1640,7 @@ static void bcm_sysport_netif_stop(struct net_device *dev)
/* stop all software from updating hardware */
netif_tx_stop_all_queues(dev);
napi_disable(&priv->napi);
- phy_stop(priv->phydev);
+ phy_stop(dev->phydev);
/* mask all interrupts */
intrl2_0_mask_set(priv, 0xffffffff);
@@ -1708,14 +1687,12 @@ static int bcm_sysport_stop(struct net_device *dev)
free_irq(priv->irq1, dev);
/* Disconnect from PHY */
- phy_disconnect(priv->phydev);
+ phy_disconnect(dev->phydev);
return 0;
}
static struct ethtool_ops bcm_sysport_ethtool_ops = {
- .get_settings = bcm_sysport_get_settings,
- .set_settings = bcm_sysport_set_settings,
.get_drvinfo = bcm_sysport_get_drvinfo,
.get_msglevel = bcm_sysport_get_msglvl,
.set_msglevel = bcm_sysport_set_msglvl,
@@ -1727,6 +1704,8 @@ static struct ethtool_ops bcm_sysport_ethtool_ops = {
.set_wol = bcm_sysport_set_wol,
.get_coalesce = bcm_sysport_get_coalesce,
.set_coalesce = bcm_sysport_set_coalesce,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static const struct net_device_ops bcm_sysport_netdev_ops = {
@@ -1929,7 +1908,7 @@ static int bcm_sysport_suspend(struct device *d)
bcm_sysport_netif_stop(dev);
- phy_suspend(priv->phydev);
+ phy_suspend(dev->phydev);
netif_device_detach(dev);
@@ -2055,7 +2034,7 @@ static int bcm_sysport_resume(struct device *d)
goto out_free_rx_ring;
}
- phy_resume(priv->phydev);
+ phy_resume(dev->phydev);
bcm_sysport_netif_start(dev);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index f28bf545d..1c82e3da6 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -670,7 +670,6 @@ struct bcm_sysport_priv {
/* PHY device */
struct device_node *phy_dn;
- struct phy_device *phydev;
phy_interface_t phy_interface;
int old_pause;
int old_link;
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
new file mode 100644
index 000000000..7c19c8e2b
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
@@ -0,0 +1,266 @@
+/*
+ * Driver for (BCM4706)? GBit MAC core on BCMA bus.
+ *
+ * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bcma/bcma.h>
+#include <linux/brcmphy.h>
+#include "bgmac.h"
+
+struct bcma_mdio {
+ struct bcma_device *core;
+ u8 phyaddr;
+};
+
+static bool bcma_mdio_wait_value(struct bcma_device *core, u16 reg, u32 mask,
+ u32 value, int timeout)
+{
+ u32 val;
+ int i;
+
+ for (i = 0; i < timeout / 10; i++) {
+ val = bcma_read32(core, reg);
+ if ((val & mask) == value)
+ return true;
+ udelay(10);
+ }
+ dev_err(&core->dev, "Timeout waiting for reg 0x%X\n", reg);
+ return false;
+}
+
+/**************************************************
+ * PHY ops
+ **************************************************/
+
+static u16 bcma_mdio_phy_read(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg)
+{
+ struct bcma_device *core;
+ u16 phy_access_addr;
+ u16 phy_ctl_addr;
+ u32 tmp;
+
+ BUILD_BUG_ON(BGMAC_PA_DATA_MASK != BCMA_GMAC_CMN_PA_DATA_MASK);
+ BUILD_BUG_ON(BGMAC_PA_ADDR_MASK != BCMA_GMAC_CMN_PA_ADDR_MASK);
+ BUILD_BUG_ON(BGMAC_PA_ADDR_SHIFT != BCMA_GMAC_CMN_PA_ADDR_SHIFT);
+ BUILD_BUG_ON(BGMAC_PA_REG_MASK != BCMA_GMAC_CMN_PA_REG_MASK);
+ BUILD_BUG_ON(BGMAC_PA_REG_SHIFT != BCMA_GMAC_CMN_PA_REG_SHIFT);
+ BUILD_BUG_ON(BGMAC_PA_WRITE != BCMA_GMAC_CMN_PA_WRITE);
+ BUILD_BUG_ON(BGMAC_PA_START != BCMA_GMAC_CMN_PA_START);
+ BUILD_BUG_ON(BGMAC_PC_EPA_MASK != BCMA_GMAC_CMN_PC_EPA_MASK);
+ BUILD_BUG_ON(BGMAC_PC_MCT_MASK != BCMA_GMAC_CMN_PC_MCT_MASK);
+ BUILD_BUG_ON(BGMAC_PC_MCT_SHIFT != BCMA_GMAC_CMN_PC_MCT_SHIFT);
+ BUILD_BUG_ON(BGMAC_PC_MTE != BCMA_GMAC_CMN_PC_MTE);
+
+ if (bcma_mdio->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
+ core = bcma_mdio->core->bus->drv_gmac_cmn.core;
+ phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
+ phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
+ } else {
+ core = bcma_mdio->core;
+ phy_access_addr = BGMAC_PHY_ACCESS;
+ phy_ctl_addr = BGMAC_PHY_CNTL;
+ }
+
+ tmp = bcma_read32(core, phy_ctl_addr);
+ tmp &= ~BGMAC_PC_EPA_MASK;
+ tmp |= phyaddr;
+ bcma_write32(core, phy_ctl_addr, tmp);
+
+ tmp = BGMAC_PA_START;
+ tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT;
+ tmp |= reg << BGMAC_PA_REG_SHIFT;
+ bcma_write32(core, phy_access_addr, tmp);
+
+ if (!bcma_mdio_wait_value(core, phy_access_addr, BGMAC_PA_START, 0,
+ 1000)) {
+ dev_err(&core->dev, "Reading PHY %d register 0x%X failed\n",
+ phyaddr, reg);
+ return 0xffff;
+ }
+
+ return bcma_read32(core, phy_access_addr) & BGMAC_PA_DATA_MASK;
+}
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphywr */
+static int bcma_mdio_phy_write(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg,
+ u16 value)
+{
+ struct bcma_device *core;
+ u16 phy_access_addr;
+ u16 phy_ctl_addr;
+ u32 tmp;
+
+ if (bcma_mdio->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
+ core = bcma_mdio->core->bus->drv_gmac_cmn.core;
+ phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
+ phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
+ } else {
+ core = bcma_mdio->core;
+ phy_access_addr = BGMAC_PHY_ACCESS;
+ phy_ctl_addr = BGMAC_PHY_CNTL;
+ }
+
+ tmp = bcma_read32(core, phy_ctl_addr);
+ tmp &= ~BGMAC_PC_EPA_MASK;
+ tmp |= phyaddr;
+ bcma_write32(core, phy_ctl_addr, tmp);
+
+ bcma_write32(bcma_mdio->core, BGMAC_INT_STATUS, BGMAC_IS_MDIO);
+ if (bcma_read32(bcma_mdio->core, BGMAC_INT_STATUS) & BGMAC_IS_MDIO)
+ dev_warn(&core->dev, "Error setting MDIO int\n");
+
+ tmp = BGMAC_PA_START;
+ tmp |= BGMAC_PA_WRITE;
+ tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT;
+ tmp |= reg << BGMAC_PA_REG_SHIFT;
+ tmp |= value;
+ bcma_write32(core, phy_access_addr, tmp);
+
+ if (!bcma_mdio_wait_value(core, phy_access_addr, BGMAC_PA_START, 0,
+ 1000)) {
+ dev_err(&core->dev, "Writing to PHY %d register 0x%X failed\n",
+ phyaddr, reg);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */
+static void bcma_mdio_phy_init(struct bcma_mdio *bcma_mdio)
+{
+ struct bcma_chipinfo *ci = &bcma_mdio->core->bus->chipinfo;
+ u8 i;
+
+ if (ci->id == BCMA_CHIP_ID_BCM5356) {
+ for (i = 0; i < 5; i++) {
+ bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x008b);
+ bcma_mdio_phy_write(bcma_mdio, i, 0x15, 0x0100);
+ bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000f);
+ bcma_mdio_phy_write(bcma_mdio, i, 0x12, 0x2aaa);
+ bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000b);
+ }
+ }
+ if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg != 10) ||
+ (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg != 10) ||
+ (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg != 9)) {
+ struct bcma_drv_cc *cc = &bcma_mdio->core->bus->drv_cc;
+
+ bcma_chipco_chipctl_maskset(cc, 2, ~0xc0000000, 0);
+ bcma_chipco_chipctl_maskset(cc, 4, ~0x80000000, 0);
+ for (i = 0; i < 5; i++) {
+ bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000f);
+ bcma_mdio_phy_write(bcma_mdio, i, 0x16, 0x5284);
+ bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000b);
+ bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x0010);
+ bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000f);
+ bcma_mdio_phy_write(bcma_mdio, i, 0x16, 0x5296);
+ bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x1073);
+ bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x9073);
+ bcma_mdio_phy_write(bcma_mdio, i, 0x16, 0x52b6);
+ bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x9273);
+ bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000b);
+ }
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyreset */
+static int bcma_mdio_phy_reset(struct mii_bus *bus)
+{
+ struct bcma_mdio *bcma_mdio = bus->priv;
+ u8 phyaddr = bcma_mdio->phyaddr;
+
+ if (bcma_mdio->phyaddr == BGMAC_PHY_NOREGS)
+ return 0;
+
+ bcma_mdio_phy_write(bcma_mdio, phyaddr, MII_BMCR, BMCR_RESET);
+ udelay(100);
+ if (bcma_mdio_phy_read(bcma_mdio, phyaddr, MII_BMCR) & BMCR_RESET)
+ dev_err(&bcma_mdio->core->dev, "PHY reset failed\n");
+ bcma_mdio_phy_init(bcma_mdio);
+
+ return 0;
+}
+
+/**************************************************
+ * MII
+ **************************************************/
+
+static int bcma_mdio_mii_read(struct mii_bus *bus, int mii_id, int regnum)
+{
+ return bcma_mdio_phy_read(bus->priv, mii_id, regnum);
+}
+
+static int bcma_mdio_mii_write(struct mii_bus *bus, int mii_id, int regnum,
+ u16 value)
+{
+ return bcma_mdio_phy_write(bus->priv, mii_id, regnum, value);
+}
+
+struct mii_bus *bcma_mdio_mii_register(struct bcma_device *core, u8 phyaddr)
+{
+ struct bcma_mdio *bcma_mdio;
+ struct mii_bus *mii_bus;
+ int err;
+
+ bcma_mdio = kzalloc(sizeof(*bcma_mdio), GFP_KERNEL);
+ if (!bcma_mdio)
+ return ERR_PTR(-ENOMEM);
+
+ mii_bus = mdiobus_alloc();
+ if (!mii_bus) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ mii_bus->name = "bcma_mdio mii bus";
+ sprintf(mii_bus->id, "%s-%d-%d", "bcma_mdio", core->bus->num,
+ core->core_unit);
+ mii_bus->priv = bcma_mdio;
+ mii_bus->read = bcma_mdio_mii_read;
+ mii_bus->write = bcma_mdio_mii_write;
+ mii_bus->reset = bcma_mdio_phy_reset;
+ mii_bus->parent = &core->dev;
+ mii_bus->phy_mask = ~(1 << phyaddr);
+
+ bcma_mdio->core = core;
+ bcma_mdio->phyaddr = phyaddr;
+
+ err = mdiobus_register(mii_bus);
+ if (err) {
+ dev_err(&core->dev, "Registration of mii bus failed\n");
+ goto err_free_bus;
+ }
+
+ return mii_bus;
+
+err_free_bus:
+ mdiobus_free(mii_bus);
+err:
+ kfree(bcma_mdio);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(bcma_mdio_mii_register);
+
+void bcma_mdio_mii_unregister(struct mii_bus *mii_bus)
+{
+ struct bcma_mdio *bcma_mdio;
+
+ if (!mii_bus)
+ return;
+
+ bcma_mdio = mii_bus->priv;
+
+ mdiobus_unregister(mii_bus);
+ mdiobus_free(mii_bus);
+ kfree(bcma_mdio);
+}
+EXPORT_SYMBOL_GPL(bcma_mdio_mii_unregister);
+
+MODULE_AUTHOR("Rafał Miłecki");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
new file mode 100644
index 000000000..625235db6
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -0,0 +1,315 @@
+/*
+ * Driver for (BCM4706)? GBit MAC core on BCMA bus.
+ *
+ * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bcma/bcma.h>
+#include <linux/brcmphy.h>
+#include <linux/etherdevice.h>
+#include "bgmac.h"
+
+static inline bool bgmac_is_bcm4707_family(struct bcma_device *core)
+{
+ switch (core->bus->chipinfo.id) {
+ case BCMA_CHIP_ID_BCM4707:
+ case BCMA_CHIP_ID_BCM47094:
+ case BCMA_CHIP_ID_BCM53018:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**************************************************
+ * BCMA bus ops
+ **************************************************/
+
+static u32 bcma_bgmac_read(struct bgmac *bgmac, u16 offset)
+{
+ return bcma_read32(bgmac->bcma.core, offset);
+}
+
+static void bcma_bgmac_write(struct bgmac *bgmac, u16 offset, u32 value)
+{
+ bcma_write32(bgmac->bcma.core, offset, value);
+}
+
+static u32 bcma_bgmac_idm_read(struct bgmac *bgmac, u16 offset)
+{
+ return bcma_aread32(bgmac->bcma.core, offset);
+}
+
+static void bcma_bgmac_idm_write(struct bgmac *bgmac, u16 offset, u32 value)
+{
+ return bcma_awrite32(bgmac->bcma.core, offset, value);
+}
+
+static bool bcma_bgmac_clk_enabled(struct bgmac *bgmac)
+{
+ return bcma_core_is_enabled(bgmac->bcma.core);
+}
+
+static void bcma_bgmac_clk_enable(struct bgmac *bgmac, u32 flags)
+{
+ bcma_core_enable(bgmac->bcma.core, flags);
+}
+
+static void bcma_bgmac_cco_ctl_maskset(struct bgmac *bgmac, u32 offset,
+ u32 mask, u32 set)
+{
+ struct bcma_drv_cc *cc = &bgmac->bcma.core->bus->drv_cc;
+
+ bcma_chipco_chipctl_maskset(cc, offset, mask, set);
+}
+
+static u32 bcma_bgmac_get_bus_clock(struct bgmac *bgmac)
+{
+ struct bcma_drv_cc *cc = &bgmac->bcma.core->bus->drv_cc;
+
+ return bcma_pmu_get_bus_clock(cc);
+}
+
+static void bcma_bgmac_cmn_maskset32(struct bgmac *bgmac, u16 offset, u32 mask,
+ u32 set)
+{
+ bcma_maskset32(bgmac->bcma.cmn, offset, mask, set);
+}
+
+static const struct bcma_device_id bgmac_bcma_tbl[] = {
+ BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_4706_MAC_GBIT,
+ BCMA_ANY_REV, BCMA_ANY_CLASS),
+ BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_MAC_GBIT, BCMA_ANY_REV,
+ BCMA_ANY_CLASS),
+ {},
+};
+MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl);
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */
+static int bgmac_probe(struct bcma_device *core)
+{
+ struct ssb_sprom *sprom = &core->bus->sprom;
+ struct mii_bus *mii_bus;
+ struct bgmac *bgmac;
+ u8 *mac;
+ int err;
+
+ bgmac = kzalloc(sizeof(*bgmac), GFP_KERNEL);
+ if (!bgmac)
+ return -ENOMEM;
+
+ bgmac->bcma.core = core;
+ bgmac->dev = &core->dev;
+ bgmac->dma_dev = core->dma_dev;
+ bgmac->irq = core->irq;
+
+ bcma_set_drvdata(core, bgmac);
+
+ switch (core->core_unit) {
+ case 0:
+ mac = sprom->et0mac;
+ break;
+ case 1:
+ mac = sprom->et1mac;
+ break;
+ case 2:
+ mac = sprom->et2mac;
+ break;
+ default:
+ dev_err(bgmac->dev, "Unsupported core_unit %d\n",
+ core->core_unit);
+ err = -ENOTSUPP;
+ goto err;
+ }
+
+ ether_addr_copy(bgmac->mac_addr, mac);
+
+ /* On BCM4706 we need common core to access PHY */
+ if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
+ !core->bus->drv_gmac_cmn.core) {
+ dev_err(bgmac->dev, "GMAC CMN core not found (required for BCM4706)\n");
+ err = -ENODEV;
+ goto err;
+ }
+ bgmac->bcma.cmn = core->bus->drv_gmac_cmn.core;
+
+ switch (core->core_unit) {
+ case 0:
+ bgmac->phyaddr = sprom->et0phyaddr;
+ break;
+ case 1:
+ bgmac->phyaddr = sprom->et1phyaddr;
+ break;
+ case 2:
+ bgmac->phyaddr = sprom->et2phyaddr;
+ break;
+ }
+ bgmac->phyaddr &= BGMAC_PHY_MASK;
+ if (bgmac->phyaddr == BGMAC_PHY_MASK) {
+ dev_err(bgmac->dev, "No PHY found\n");
+ err = -ENODEV;
+ goto err;
+ }
+ dev_info(bgmac->dev, "Found PHY addr: %d%s\n", bgmac->phyaddr,
+ bgmac->phyaddr == BGMAC_PHY_NOREGS ? " (NOREGS)" : "");
+
+ if (!bgmac_is_bcm4707_family(core)) {
+ mii_bus = bcma_mdio_mii_register(core, bgmac->phyaddr);
+ if (IS_ERR(mii_bus)) {
+ err = PTR_ERR(mii_bus);
+ goto err;
+ }
+
+ bgmac->mii_bus = mii_bus;
+ }
+
+ if (core->bus->hosttype == BCMA_HOSTTYPE_PCI) {
+ dev_err(bgmac->dev, "PCI setup not implemented\n");
+ err = -ENOTSUPP;
+ goto err1;
+ }
+
+ bgmac->has_robosw = !!(core->bus->sprom.boardflags_lo &
+ BGMAC_BFL_ENETROBO);
+ if (bgmac->has_robosw)
+ dev_warn(bgmac->dev, "Support for Roboswitch not implemented\n");
+
+ if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM)
+ dev_warn(bgmac->dev, "Support for ADMtek ethernet switch not implemented\n");
+
+ /* Feature Flags */
+ switch (core->bus->chipinfo.id) {
+ case BCMA_CHIP_ID_BCM5357:
+ bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
+ bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
+ bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1;
+ bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY;
+ if (core->bus->chipinfo.pkg == BCMA_PKG_ID_BCM47186) {
+ bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
+ bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII;
+ }
+ if (core->bus->chipinfo.pkg == BCMA_PKG_ID_BCM5358)
+ bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_EPHYRMII;
+ break;
+ case BCMA_CHIP_ID_BCM53572:
+ bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
+ bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
+ bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1;
+ bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY;
+ if (core->bus->chipinfo.pkg == BCMA_PKG_ID_BCM47188) {
+ bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII;
+ bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
+ }
+ break;
+ case BCMA_CHIP_ID_BCM4749:
+ bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
+ bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
+ bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1;
+ bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY;
+ if (core->bus->chipinfo.pkg == 10) {
+ bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII;
+ bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
+ }
+ break;
+ case BCMA_CHIP_ID_BCM4716:
+ bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
+ /* fallthrough */
+ case BCMA_CHIP_ID_BCM47162:
+ bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL2;
+ bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
+ break;
+ /* bcm4707_family */
+ case BCMA_CHIP_ID_BCM4707:
+ case BCMA_CHIP_ID_BCM47094:
+ case BCMA_CHIP_ID_BCM53018:
+ bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
+ bgmac->feature_flags |= BGMAC_FEAT_NO_RESET;
+ bgmac->feature_flags |= BGMAC_FEAT_FORCE_SPEED_2500;
+ break;
+ default:
+ bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
+ bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
+ }
+
+ if (!bgmac_is_bcm4707_family(core) && core->id.rev > 2)
+ bgmac->feature_flags |= BGMAC_FEAT_MISC_PLL_REQ;
+
+ if (core->id.id == BCMA_CORE_4706_MAC_GBIT) {
+ bgmac->feature_flags |= BGMAC_FEAT_CMN_PHY_CTL;
+ bgmac->feature_flags |= BGMAC_FEAT_NO_CLR_MIB;
+ }
+
+ if (core->id.rev >= 4) {
+ bgmac->feature_flags |= BGMAC_FEAT_CMDCFG_SR_REV4;
+ bgmac->feature_flags |= BGMAC_FEAT_TX_MASK_SETUP;
+ bgmac->feature_flags |= BGMAC_FEAT_RX_MASK_SETUP;
+ }
+
+ bgmac->read = bcma_bgmac_read;
+ bgmac->write = bcma_bgmac_write;
+ bgmac->idm_read = bcma_bgmac_idm_read;
+ bgmac->idm_write = bcma_bgmac_idm_write;
+ bgmac->clk_enabled = bcma_bgmac_clk_enabled;
+ bgmac->clk_enable = bcma_bgmac_clk_enable;
+ bgmac->cco_ctl_maskset = bcma_bgmac_cco_ctl_maskset;
+ bgmac->get_bus_clock = bcma_bgmac_get_bus_clock;
+ bgmac->cmn_maskset32 = bcma_bgmac_cmn_maskset32;
+
+ err = bgmac_enet_probe(bgmac);
+ if (err)
+ goto err1;
+
+ return 0;
+
+err1:
+ bcma_mdio_mii_unregister(bgmac->mii_bus);
+err:
+ kfree(bgmac);
+ bcma_set_drvdata(core, NULL);
+
+ return err;
+}
+
+static void bgmac_remove(struct bcma_device *core)
+{
+ struct bgmac *bgmac = bcma_get_drvdata(core);
+
+ bcma_mdio_mii_unregister(bgmac->mii_bus);
+ bgmac_enet_remove(bgmac);
+ bcma_set_drvdata(core, NULL);
+ kfree(bgmac);
+}
+
+static struct bcma_driver bgmac_bcma_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = bgmac_bcma_tbl,
+ .probe = bgmac_probe,
+ .remove = bgmac_remove,
+};
+
+static int __init bgmac_init(void)
+{
+ int err;
+
+ err = bcma_driver_register(&bgmac_bcma_driver);
+ if (err)
+ return err;
+ pr_info("Broadcom 47xx GBit MAC driver loaded\n");
+
+ return 0;
+}
+
+static void __exit bgmac_exit(void)
+{
+ bcma_driver_unregister(&bgmac_bcma_driver);
+}
+
+module_init(bgmac_init)
+module_exit(bgmac_exit)
+
+MODULE_AUTHOR("Rafał Miłecki");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
new file mode 100644
index 000000000..be52f270c
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -0,0 +1,185 @@
+/*
+ * Copyright (C) 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bcma/bcma.h>
+#include <linux/etherdevice.h>
+#include <linux/of_address.h>
+#include <linux/of_net.h>
+#include "bgmac.h"
+
+static u32 platform_bgmac_read(struct bgmac *bgmac, u16 offset)
+{
+ return readl(bgmac->plat.base + offset);
+}
+
+static void platform_bgmac_write(struct bgmac *bgmac, u16 offset, u32 value)
+{
+ writel(value, bgmac->plat.base + offset);
+}
+
+static u32 platform_bgmac_idm_read(struct bgmac *bgmac, u16 offset)
+{
+ return readl(bgmac->plat.idm_base + offset);
+}
+
+static void platform_bgmac_idm_write(struct bgmac *bgmac, u16 offset, u32 value)
+{
+ return writel(value, bgmac->plat.idm_base + offset);
+}
+
+static bool platform_bgmac_clk_enabled(struct bgmac *bgmac)
+{
+ if ((bgmac_idm_read(bgmac, BCMA_IOCTL) &
+ (BCMA_IOCTL_CLK | BCMA_IOCTL_FGC)) != BCMA_IOCTL_CLK)
+ return false;
+ if (bgmac_idm_read(bgmac, BCMA_RESET_CTL) & BCMA_RESET_CTL_RESET)
+ return false;
+ return true;
+}
+
+static void platform_bgmac_clk_enable(struct bgmac *bgmac, u32 flags)
+{
+ bgmac_idm_write(bgmac, BCMA_IOCTL,
+ (BCMA_IOCTL_CLK | BCMA_IOCTL_FGC | flags));
+ bgmac_idm_read(bgmac, BCMA_IOCTL);
+
+ bgmac_idm_write(bgmac, BCMA_RESET_CTL, 0);
+ bgmac_idm_read(bgmac, BCMA_RESET_CTL);
+ udelay(1);
+
+ bgmac_idm_write(bgmac, BCMA_IOCTL, (BCMA_IOCTL_CLK | flags));
+ bgmac_idm_read(bgmac, BCMA_IOCTL);
+ udelay(1);
+}
+
+static void platform_bgmac_cco_ctl_maskset(struct bgmac *bgmac, u32 offset,
+ u32 mask, u32 set)
+{
+ /* This shouldn't be encountered */
+ WARN_ON(1);
+}
+
+static u32 platform_bgmac_get_bus_clock(struct bgmac *bgmac)
+{
+ /* This shouldn't be encountered */
+ WARN_ON(1);
+
+ return 0;
+}
+
+static void platform_bgmac_cmn_maskset32(struct bgmac *bgmac, u16 offset,
+ u32 mask, u32 set)
+{
+ /* This shouldn't be encountered */
+ WARN_ON(1);
+}
+
+static int bgmac_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct bgmac *bgmac;
+ struct resource *regs;
+ const u8 *mac_addr;
+
+ bgmac = devm_kzalloc(&pdev->dev, sizeof(*bgmac), GFP_KERNEL);
+ if (!bgmac)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, bgmac);
+
+ /* Set the features of the 4707 family */
+ bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
+ bgmac->feature_flags |= BGMAC_FEAT_NO_RESET;
+ bgmac->feature_flags |= BGMAC_FEAT_FORCE_SPEED_2500;
+ bgmac->feature_flags |= BGMAC_FEAT_CMDCFG_SR_REV4;
+ bgmac->feature_flags |= BGMAC_FEAT_TX_MASK_SETUP;
+ bgmac->feature_flags |= BGMAC_FEAT_RX_MASK_SETUP;
+
+ bgmac->dev = &pdev->dev;
+ bgmac->dma_dev = &pdev->dev;
+
+ mac_addr = of_get_mac_address(np);
+ if (mac_addr)
+ ether_addr_copy(bgmac->mac_addr, mac_addr);
+ else
+ dev_warn(&pdev->dev, "MAC address not present in device tree\n");
+
+ bgmac->irq = platform_get_irq(pdev, 0);
+ if (bgmac->irq < 0) {
+ dev_err(&pdev->dev, "Unable to obtain IRQ\n");
+ return bgmac->irq;
+ }
+
+ regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "amac_base");
+ if (!regs) {
+ dev_err(&pdev->dev, "Unable to obtain base resource\n");
+ return -EINVAL;
+ }
+
+ bgmac->plat.base = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(bgmac->plat.base))
+ return PTR_ERR(bgmac->plat.base);
+
+ regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "idm_base");
+ if (!regs) {
+ dev_err(&pdev->dev, "Unable to obtain idm resource\n");
+ return -EINVAL;
+ }
+
+ bgmac->plat.idm_base = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(bgmac->plat.idm_base))
+ return PTR_ERR(bgmac->plat.idm_base);
+
+ bgmac->read = platform_bgmac_read;
+ bgmac->write = platform_bgmac_write;
+ bgmac->idm_read = platform_bgmac_idm_read;
+ bgmac->idm_write = platform_bgmac_idm_write;
+ bgmac->clk_enabled = platform_bgmac_clk_enabled;
+ bgmac->clk_enable = platform_bgmac_clk_enable;
+ bgmac->cco_ctl_maskset = platform_bgmac_cco_ctl_maskset;
+ bgmac->get_bus_clock = platform_bgmac_get_bus_clock;
+ bgmac->cmn_maskset32 = platform_bgmac_cmn_maskset32;
+
+ return bgmac_enet_probe(bgmac);
+}
+
+static int bgmac_remove(struct platform_device *pdev)
+{
+ struct bgmac *bgmac = platform_get_drvdata(pdev);
+
+ bgmac_enet_remove(bgmac);
+
+ return 0;
+}
+
+static const struct of_device_id bgmac_of_enet_match[] = {
+ {.compatible = "brcm,amac",},
+ {.compatible = "brcm,nsp-amac",},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, bgmac_of_enet_match);
+
+static struct platform_driver bgmac_enet_driver = {
+ .driver = {
+ .name = "bgmac-enet",
+ .of_match_table = bgmac_of_enet_match,
+ },
+ .probe = bgmac_probe,
+ .remove = bgmac_remove,
+};
+
+module_platform_driver(bgmac_enet_driver);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 25bbae592..c4751ece7 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -6,51 +6,27 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
-#include "bgmac.h"
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/delay.h>
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bcma/bcma.h>
#include <linux/etherdevice.h>
-#include <linux/mii.h>
-#include <linux/phy.h>
-#include <linux/phy_fixed.h>
-#include <linux/interrupt.h>
-#include <linux/dma-mapping.h>
#include <linux/bcm47xx_nvram.h>
+#include "bgmac.h"
-static const struct bcma_device_id bgmac_bcma_tbl[] = {
- BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_4706_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
- BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
- {},
-};
-MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl);
-
-static inline bool bgmac_is_bcm4707_family(struct bgmac *bgmac)
-{
- switch (bgmac->core->bus->chipinfo.id) {
- case BCMA_CHIP_ID_BCM4707:
- case BCMA_CHIP_ID_BCM47094:
- case BCMA_CHIP_ID_BCM53018:
- return true;
- default:
- return false;
- }
-}
-
-static bool bgmac_wait_value(struct bcma_device *core, u16 reg, u32 mask,
+static bool bgmac_wait_value(struct bgmac *bgmac, u16 reg, u32 mask,
u32 value, int timeout)
{
u32 val;
int i;
for (i = 0; i < timeout / 10; i++) {
- val = bcma_read32(core, reg);
+ val = bgmac_read(bgmac, reg);
if ((val & mask) == value)
return true;
udelay(10);
}
- pr_err("Timeout waiting for reg 0x%X\n", reg);
+ dev_err(bgmac->dev, "Timeout waiting for reg 0x%X\n", reg);
return false;
}
@@ -84,22 +60,22 @@ static void bgmac_dma_tx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
udelay(10);
}
if (i)
- bgmac_err(bgmac, "Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X)\n",
- ring->mmio_base, val);
+ dev_err(bgmac->dev, "Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X)\n",
+ ring->mmio_base, val);
/* Remove SUSPEND bit */
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, 0);
- if (!bgmac_wait_value(bgmac->core,
+ if (!bgmac_wait_value(bgmac,
ring->mmio_base + BGMAC_DMA_TX_STATUS,
BGMAC_DMA_TX_STAT, BGMAC_DMA_TX_STAT_DISABLED,
10000)) {
- bgmac_warn(bgmac, "DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us\n",
- ring->mmio_base);
+ dev_warn(bgmac->dev, "DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us\n",
+ ring->mmio_base);
udelay(300);
val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
if ((val & BGMAC_DMA_TX_STAT) != BGMAC_DMA_TX_STAT_DISABLED)
- bgmac_err(bgmac, "Reset of DMA TX ring 0x%X failed\n",
- ring->mmio_base);
+ dev_err(bgmac->dev, "Reset of DMA TX ring 0x%X failed\n",
+ ring->mmio_base);
}
}
@@ -109,7 +85,7 @@ static void bgmac_dma_tx_enable(struct bgmac *bgmac,
u32 ctl;
ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL);
- if (bgmac->core->id.rev >= 4) {
+ if (bgmac->feature_flags & BGMAC_FEAT_TX_MASK_SETUP) {
ctl &= ~BGMAC_DMA_TX_BL_MASK;
ctl |= BGMAC_DMA_TX_BL_128 << BGMAC_DMA_TX_BL_SHIFT;
@@ -152,7 +128,7 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
struct bgmac_dma_ring *ring,
struct sk_buff *skb)
{
- struct device *dma_dev = bgmac->core->dma_dev;
+ struct device *dma_dev = bgmac->dma_dev;
struct net_device *net_dev = bgmac->net_dev;
int index = ring->end % BGMAC_TX_RING_SLOTS;
struct bgmac_slot_info *slot = &ring->slots[index];
@@ -161,7 +137,7 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
int i;
if (skb->len > BGMAC_DESC_CTL1_LEN) {
- bgmac_err(bgmac, "Too long skb (%d)\n", skb->len);
+ netdev_err(bgmac->net_dev, "Too long skb (%d)\n", skb->len);
goto err_drop;
}
@@ -174,7 +150,7 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
* even when ring->end overflows
*/
if (ring->end - ring->start + nr_frags + 1 >= BGMAC_TX_RING_SLOTS) {
- bgmac_err(bgmac, "TX ring is full, queue should be stopped!\n");
+ netdev_err(bgmac->net_dev, "TX ring is full, queue should be stopped!\n");
netif_stop_queue(net_dev);
return NETDEV_TX_BUSY;
}
@@ -241,18 +217,20 @@ err_dma:
}
err_dma_head:
- bgmac_err(bgmac, "Mapping error of skb on ring 0x%X\n",
- ring->mmio_base);
+ netdev_err(bgmac->net_dev, "Mapping error of skb on ring 0x%X\n",
+ ring->mmio_base);
err_drop:
dev_kfree_skb(skb);
+ net_dev->stats.tx_dropped++;
+ net_dev->stats.tx_errors++;
return NETDEV_TX_OK;
}
/* Free transmitted packets */
static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
{
- struct device *dma_dev = bgmac->core->dma_dev;
+ struct device *dma_dev = bgmac->dma_dev;
int empty_slot;
bool freed = false;
unsigned bytes_compl = 0, pkts_compl = 0;
@@ -285,6 +263,8 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
DMA_TO_DEVICE);
if (slot->skb) {
+ bgmac->net_dev->stats.tx_bytes += slot->skb->len;
+ bgmac->net_dev->stats.tx_packets++;
bytes_compl += slot->skb->len;
pkts_compl++;
@@ -313,12 +293,12 @@ static void bgmac_dma_rx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
return;
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, 0);
- if (!bgmac_wait_value(bgmac->core,
+ if (!bgmac_wait_value(bgmac,
ring->mmio_base + BGMAC_DMA_RX_STATUS,
BGMAC_DMA_RX_STAT, BGMAC_DMA_RX_STAT_DISABLED,
10000))
- bgmac_err(bgmac, "Reset of ring 0x%X RX failed\n",
- ring->mmio_base);
+ dev_err(bgmac->dev, "Reset of ring 0x%X RX failed\n",
+ ring->mmio_base);
}
static void bgmac_dma_rx_enable(struct bgmac *bgmac,
@@ -327,7 +307,7 @@ static void bgmac_dma_rx_enable(struct bgmac *bgmac,
u32 ctl;
ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL);
- if (bgmac->core->id.rev >= 4) {
+ if (bgmac->feature_flags & BGMAC_FEAT_RX_MASK_SETUP) {
ctl &= ~BGMAC_DMA_RX_BL_MASK;
ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT;
@@ -348,7 +328,7 @@ static void bgmac_dma_rx_enable(struct bgmac *bgmac,
static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
struct bgmac_slot_info *slot)
{
- struct device *dma_dev = bgmac->core->dma_dev;
+ struct device *dma_dev = bgmac->dma_dev;
dma_addr_t dma_addr;
struct bgmac_rx_header *rx;
void *buf;
@@ -367,7 +347,7 @@ static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
dma_addr = dma_map_single(dma_dev, buf + BGMAC_RX_BUF_OFFSET,
BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
if (dma_mapping_error(dma_dev, dma_addr)) {
- bgmac_err(bgmac, "DMA mapping error\n");
+ netdev_err(bgmac->net_dev, "DMA mapping error\n");
put_page(virt_to_head_page(buf));
return -ENOMEM;
}
@@ -437,7 +417,7 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
end_slot /= sizeof(struct bgmac_dma_desc);
while (ring->start != end_slot) {
- struct device *dma_dev = bgmac->core->dma_dev;
+ struct device *dma_dev = bgmac->dma_dev;
struct bgmac_slot_info *slot = &ring->slots[ring->start];
struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET;
struct sk_buff *skb;
@@ -462,16 +442,19 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
/* Check for poison and drop or pass the packet */
if (len == 0xdead && flags == 0xbeef) {
- bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
- ring->start);
+ netdev_err(bgmac->net_dev, "Found poisoned packet at slot %d, DMA issue!\n",
+ ring->start);
put_page(virt_to_head_page(buf));
+ bgmac->net_dev->stats.rx_errors++;
break;
}
if (len > BGMAC_RX_ALLOC_SIZE) {
- bgmac_err(bgmac, "Found oversized packet at slot %d, DMA issue!\n",
- ring->start);
+ netdev_err(bgmac->net_dev, "Found oversized packet at slot %d, DMA issue!\n",
+ ring->start);
put_page(virt_to_head_page(buf));
+ bgmac->net_dev->stats.rx_length_errors++;
+ bgmac->net_dev->stats.rx_errors++;
break;
}
@@ -480,8 +463,9 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE);
if (unlikely(!skb)) {
- bgmac_err(bgmac, "build_skb failed\n");
+ netdev_err(bgmac->net_dev, "build_skb failed\n");
put_page(virt_to_head_page(buf));
+ bgmac->net_dev->stats.rx_errors++;
break;
}
skb_put(skb, BGMAC_RX_FRAME_OFFSET +
@@ -491,6 +475,8 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, bgmac->net_dev);
+ bgmac->net_dev->stats.rx_bytes += len;
+ bgmac->net_dev->stats.rx_packets++;
napi_gro_receive(&bgmac->napi, skb);
handled++;
} while (0);
@@ -534,7 +520,7 @@ static bool bgmac_dma_unaligned(struct bgmac *bgmac,
static void bgmac_dma_tx_ring_free(struct bgmac *bgmac,
struct bgmac_dma_ring *ring)
{
- struct device *dma_dev = bgmac->core->dma_dev;
+ struct device *dma_dev = bgmac->dma_dev;
struct bgmac_dma_desc *dma_desc = ring->cpu_base;
struct bgmac_slot_info *slot;
int i;
@@ -560,7 +546,7 @@ static void bgmac_dma_tx_ring_free(struct bgmac *bgmac,
static void bgmac_dma_rx_ring_free(struct bgmac *bgmac,
struct bgmac_dma_ring *ring)
{
- struct device *dma_dev = bgmac->core->dma_dev;
+ struct device *dma_dev = bgmac->dma_dev;
struct bgmac_slot_info *slot;
int i;
@@ -581,7 +567,7 @@ static void bgmac_dma_ring_desc_free(struct bgmac *bgmac,
struct bgmac_dma_ring *ring,
int num_slots)
{
- struct device *dma_dev = bgmac->core->dma_dev;
+ struct device *dma_dev = bgmac->dma_dev;
int size;
if (!ring->cpu_base)
@@ -619,7 +605,7 @@ static void bgmac_dma_free(struct bgmac *bgmac)
static int bgmac_dma_alloc(struct bgmac *bgmac)
{
- struct device *dma_dev = bgmac->core->dma_dev;
+ struct device *dma_dev = bgmac->dma_dev;
struct bgmac_dma_ring *ring;
static const u16 ring_base[] = { BGMAC_DMA_BASE0, BGMAC_DMA_BASE1,
BGMAC_DMA_BASE2, BGMAC_DMA_BASE3, };
@@ -630,8 +616,8 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base));
BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base));
- if (!(bcma_aread32(bgmac->core, BCMA_IOST) & BCMA_IOST_DMA64)) {
- bgmac_err(bgmac, "Core does not report 64-bit DMA\n");
+ if (!(bgmac_idm_read(bgmac, BCMA_IOST) & BCMA_IOST_DMA64)) {
+ dev_err(bgmac->dev, "Core does not report 64-bit DMA\n");
return -ENOTSUPP;
}
@@ -645,8 +631,8 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
&ring->dma_base,
GFP_KERNEL);
if (!ring->cpu_base) {
- bgmac_err(bgmac, "Allocation of TX ring 0x%X failed\n",
- ring->mmio_base);
+ dev_err(bgmac->dev, "Allocation of TX ring 0x%X failed\n",
+ ring->mmio_base);
goto err_dma_free;
}
@@ -670,8 +656,8 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
&ring->dma_base,
GFP_KERNEL);
if (!ring->cpu_base) {
- bgmac_err(bgmac, "Allocation of RX ring 0x%X failed\n",
- ring->mmio_base);
+ dev_err(bgmac->dev, "Allocation of RX ring 0x%X failed\n",
+ ring->mmio_base);
err = -ENOMEM;
goto err_dma_free;
}
@@ -746,150 +732,6 @@ error:
return err;
}
-/**************************************************
- * PHY ops
- **************************************************/
-
-static u16 bgmac_phy_read(struct bgmac *bgmac, u8 phyaddr, u8 reg)
-{
- struct bcma_device *core;
- u16 phy_access_addr;
- u16 phy_ctl_addr;
- u32 tmp;
-
- BUILD_BUG_ON(BGMAC_PA_DATA_MASK != BCMA_GMAC_CMN_PA_DATA_MASK);
- BUILD_BUG_ON(BGMAC_PA_ADDR_MASK != BCMA_GMAC_CMN_PA_ADDR_MASK);
- BUILD_BUG_ON(BGMAC_PA_ADDR_SHIFT != BCMA_GMAC_CMN_PA_ADDR_SHIFT);
- BUILD_BUG_ON(BGMAC_PA_REG_MASK != BCMA_GMAC_CMN_PA_REG_MASK);
- BUILD_BUG_ON(BGMAC_PA_REG_SHIFT != BCMA_GMAC_CMN_PA_REG_SHIFT);
- BUILD_BUG_ON(BGMAC_PA_WRITE != BCMA_GMAC_CMN_PA_WRITE);
- BUILD_BUG_ON(BGMAC_PA_START != BCMA_GMAC_CMN_PA_START);
- BUILD_BUG_ON(BGMAC_PC_EPA_MASK != BCMA_GMAC_CMN_PC_EPA_MASK);
- BUILD_BUG_ON(BGMAC_PC_MCT_MASK != BCMA_GMAC_CMN_PC_MCT_MASK);
- BUILD_BUG_ON(BGMAC_PC_MCT_SHIFT != BCMA_GMAC_CMN_PC_MCT_SHIFT);
- BUILD_BUG_ON(BGMAC_PC_MTE != BCMA_GMAC_CMN_PC_MTE);
-
- if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
- core = bgmac->core->bus->drv_gmac_cmn.core;
- phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
- phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
- } else {
- core = bgmac->core;
- phy_access_addr = BGMAC_PHY_ACCESS;
- phy_ctl_addr = BGMAC_PHY_CNTL;
- }
-
- tmp = bcma_read32(core, phy_ctl_addr);
- tmp &= ~BGMAC_PC_EPA_MASK;
- tmp |= phyaddr;
- bcma_write32(core, phy_ctl_addr, tmp);
-
- tmp = BGMAC_PA_START;
- tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT;
- tmp |= reg << BGMAC_PA_REG_SHIFT;
- bcma_write32(core, phy_access_addr, tmp);
-
- if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) {
- bgmac_err(bgmac, "Reading PHY %d register 0x%X failed\n",
- phyaddr, reg);
- return 0xffff;
- }
-
- return bcma_read32(core, phy_access_addr) & BGMAC_PA_DATA_MASK;
-}
-
-/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphywr */
-static int bgmac_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg, u16 value)
-{
- struct bcma_device *core;
- u16 phy_access_addr;
- u16 phy_ctl_addr;
- u32 tmp;
-
- if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
- core = bgmac->core->bus->drv_gmac_cmn.core;
- phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
- phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
- } else {
- core = bgmac->core;
- phy_access_addr = BGMAC_PHY_ACCESS;
- phy_ctl_addr = BGMAC_PHY_CNTL;
- }
-
- tmp = bcma_read32(core, phy_ctl_addr);
- tmp &= ~BGMAC_PC_EPA_MASK;
- tmp |= phyaddr;
- bcma_write32(core, phy_ctl_addr, tmp);
-
- bgmac_write(bgmac, BGMAC_INT_STATUS, BGMAC_IS_MDIO);
- if (bgmac_read(bgmac, BGMAC_INT_STATUS) & BGMAC_IS_MDIO)
- bgmac_warn(bgmac, "Error setting MDIO int\n");
-
- tmp = BGMAC_PA_START;
- tmp |= BGMAC_PA_WRITE;
- tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT;
- tmp |= reg << BGMAC_PA_REG_SHIFT;
- tmp |= value;
- bcma_write32(core, phy_access_addr, tmp);
-
- if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) {
- bgmac_err(bgmac, "Writing to PHY %d register 0x%X failed\n",
- phyaddr, reg);
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */
-static void bgmac_phy_init(struct bgmac *bgmac)
-{
- struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo;
- struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
- u8 i;
-
- if (ci->id == BCMA_CHIP_ID_BCM5356) {
- for (i = 0; i < 5; i++) {
- bgmac_phy_write(bgmac, i, 0x1f, 0x008b);
- bgmac_phy_write(bgmac, i, 0x15, 0x0100);
- bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
- bgmac_phy_write(bgmac, i, 0x12, 0x2aaa);
- bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
- }
- }
- if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg != 10) ||
- (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg != 10) ||
- (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg != 9)) {
- bcma_chipco_chipctl_maskset(cc, 2, ~0xc0000000, 0);
- bcma_chipco_chipctl_maskset(cc, 4, ~0x80000000, 0);
- for (i = 0; i < 5; i++) {
- bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
- bgmac_phy_write(bgmac, i, 0x16, 0x5284);
- bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
- bgmac_phy_write(bgmac, i, 0x17, 0x0010);
- bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
- bgmac_phy_write(bgmac, i, 0x16, 0x5296);
- bgmac_phy_write(bgmac, i, 0x17, 0x1073);
- bgmac_phy_write(bgmac, i, 0x17, 0x9073);
- bgmac_phy_write(bgmac, i, 0x16, 0x52b6);
- bgmac_phy_write(bgmac, i, 0x17, 0x9273);
- bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
- }
- }
-}
-
-/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyreset */
-static void bgmac_phy_reset(struct bgmac *bgmac)
-{
- if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
- return;
-
- bgmac_phy_write(bgmac, bgmac->phyaddr, MII_BMCR, BMCR_RESET);
- udelay(100);
- if (bgmac_phy_read(bgmac, bgmac->phyaddr, MII_BMCR) & BMCR_RESET)
- bgmac_err(bgmac, "PHY reset failed\n");
- bgmac_phy_init(bgmac);
-}
/**************************************************
* Chip ops
@@ -903,14 +745,20 @@ static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set,
{
u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
u32 new_val = (cmdcfg & mask) | set;
+ u32 cmdcfg_sr;
+
+ if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
+ cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
+ else
+ cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;
- bgmac_set(bgmac, BGMAC_CMDCFG, BGMAC_CMDCFG_SR(bgmac->core->id.rev));
+ bgmac_set(bgmac, BGMAC_CMDCFG, cmdcfg_sr);
udelay(2);
if (new_val != cmdcfg || force)
bgmac_write(bgmac, BGMAC_CMDCFG, new_val);
- bgmac_mask(bgmac, BGMAC_CMDCFG, ~BGMAC_CMDCFG_SR(bgmac->core->id.rev));
+ bgmac_mask(bgmac, BGMAC_CMDCFG, ~cmdcfg_sr);
udelay(2);
}
@@ -939,7 +787,7 @@ static void bgmac_chip_stats_update(struct bgmac *bgmac)
{
int i;
- if (bgmac->core->id.id != BCMA_CORE_4706_MAC_GBIT) {
+ if (!(bgmac->feature_flags & BGMAC_FEAT_NO_CLR_MIB)) {
for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
bgmac->mib_tx_regs[i] =
bgmac_read(bgmac,
@@ -958,7 +806,7 @@ static void bgmac_clear_mib(struct bgmac *bgmac)
{
int i;
- if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT)
+ if (bgmac->feature_flags & BGMAC_FEAT_NO_CLR_MIB)
return;
bgmac_set(bgmac, BGMAC_DEV_CTL, BGMAC_DC_MROR);
@@ -988,7 +836,8 @@ static void bgmac_mac_speed(struct bgmac *bgmac)
set |= BGMAC_CMDCFG_ES_2500;
break;
default:
- bgmac_err(bgmac, "Unsupported speed: %d\n", bgmac->mac_speed);
+ dev_err(bgmac->dev, "Unsupported speed: %d\n",
+ bgmac->mac_speed);
}
if (bgmac->mac_duplex == DUPLEX_HALF)
@@ -999,17 +848,16 @@ static void bgmac_mac_speed(struct bgmac *bgmac)
static void bgmac_miiconfig(struct bgmac *bgmac)
{
- struct bcma_device *core = bgmac->core;
- u8 imode;
-
- if (bgmac_is_bcm4707_family(bgmac)) {
- bcma_awrite32(core, BCMA_IOCTL,
- bcma_aread32(core, BCMA_IOCTL) | 0x40 |
- BGMAC_BCMA_IOCTL_SW_CLKEN);
+ if (bgmac->feature_flags & BGMAC_FEAT_FORCE_SPEED_2500) {
+ bgmac_idm_write(bgmac, BCMA_IOCTL,
+ bgmac_idm_read(bgmac, BCMA_IOCTL) | 0x40 |
+ BGMAC_BCMA_IOCTL_SW_CLKEN);
bgmac->mac_speed = SPEED_2500;
bgmac->mac_duplex = DUPLEX_FULL;
bgmac_mac_speed(bgmac);
} else {
+ u8 imode;
+
imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) &
BGMAC_DS_MM_MASK) >> BGMAC_DS_MM_SHIFT;
if (imode == 0 || imode == 1) {
@@ -1023,14 +871,11 @@ static void bgmac_miiconfig(struct bgmac *bgmac)
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */
static void bgmac_chip_reset(struct bgmac *bgmac)
{
- struct bcma_device *core = bgmac->core;
- struct bcma_bus *bus = core->bus;
- struct bcma_chipinfo *ci = &bus->chipinfo;
- u32 flags;
+ u32 cmdcfg_sr;
u32 iost;
int i;
- if (bcma_core_is_enabled(core)) {
+ if (bgmac_clk_enabled(bgmac)) {
if (!bgmac->stats_grabbed) {
/* bgmac_chip_stats_update(bgmac); */
bgmac->stats_grabbed = true;
@@ -1048,38 +893,32 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
/* TODO: Clear software multicast filter list */
}
- iost = bcma_aread32(core, BCMA_IOST);
- if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) ||
- (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) ||
- (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188))
+ iost = bgmac_idm_read(bgmac, BCMA_IOST);
+ if (bgmac->feature_flags & BGMAC_FEAT_IOST_ATTACHED)
iost &= ~BGMAC_BCMA_IOST_ATTACHED;
/* 3GMAC: for BCM4707 & BCM47094, only do core reset at bgmac_probe() */
- if (ci->id != BCMA_CHIP_ID_BCM4707 &&
- ci->id != BCMA_CHIP_ID_BCM47094) {
- flags = 0;
+ if (!(bgmac->feature_flags & BGMAC_FEAT_NO_RESET)) {
+ u32 flags = 0;
if (iost & BGMAC_BCMA_IOST_ATTACHED) {
flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
if (!bgmac->has_robosw)
flags |= BGMAC_BCMA_IOCTL_SW_RESET;
}
- bcma_core_enable(core, flags);
+ bgmac_clk_enable(bgmac, flags);
}
/* Request Misc PLL for corerev > 2 */
- if (core->id.rev > 2 && !bgmac_is_bcm4707_family(bgmac)) {
+ if (bgmac->feature_flags & BGMAC_FEAT_MISC_PLL_REQ) {
bgmac_set(bgmac, BCMA_CLKCTLST,
BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ);
- bgmac_wait_value(bgmac->core, BCMA_CLKCTLST,
+ bgmac_wait_value(bgmac, BCMA_CLKCTLST,
BGMAC_BCMA_CLKCTLST_MISC_PLL_ST,
BGMAC_BCMA_CLKCTLST_MISC_PLL_ST,
1000);
}
- if (ci->id == BCMA_CHIP_ID_BCM5357 ||
- ci->id == BCMA_CHIP_ID_BCM4749 ||
- ci->id == BCMA_CHIP_ID_BCM53572) {
- struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
+ if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_PHY) {
u8 et_swtype = 0;
u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY |
BGMAC_CHIPCTL_1_IF_TYPE_MII;
@@ -1087,35 +926,37 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) {
if (kstrtou8(buf, 0, &et_swtype))
- bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n",
- buf);
+ dev_err(bgmac->dev, "Failed to parse et_swtype (%s)\n",
+ buf);
et_swtype &= 0x0f;
et_swtype <<= 4;
sw_type = et_swtype;
- } else if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM5358) {
+ } else if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_EPHYRMII) {
sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII;
- } else if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) ||
- (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) ||
- (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) {
+ } else if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_RGMII) {
sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII |
BGMAC_CHIPCTL_1_SW_TYPE_RGMII;
}
- bcma_chipco_chipctl_maskset(cc, 1,
- ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK |
- BGMAC_CHIPCTL_1_SW_TYPE_MASK),
- sw_type);
+ bgmac_cco_ctl_maskset(bgmac, 1, ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK |
+ BGMAC_CHIPCTL_1_SW_TYPE_MASK),
+ sw_type);
}
if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
- bcma_awrite32(core, BCMA_IOCTL,
- bcma_aread32(core, BCMA_IOCTL) &
- ~BGMAC_BCMA_IOCTL_SW_RESET);
+ bgmac_idm_write(bgmac, BCMA_IOCTL,
+ bgmac_idm_read(bgmac, BCMA_IOCTL) &
+ ~BGMAC_BCMA_IOCTL_SW_RESET);
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset
* Specs don't say about using BGMAC_CMDCFG_SR, but in this routine
* BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to
* be keps until taking MAC out of the reset.
*/
+ if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
+ cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
+ else
+ cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;
+
bgmac_cmdcfg_maskset(bgmac,
~(BGMAC_CMDCFG_TE |
BGMAC_CMDCFG_RE |
@@ -1133,19 +974,20 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
BGMAC_CMDCFG_PROM |
BGMAC_CMDCFG_NLC |
BGMAC_CMDCFG_CFE |
- BGMAC_CMDCFG_SR(core->id.rev),
+ cmdcfg_sr,
false);
bgmac->mac_speed = SPEED_UNKNOWN;
bgmac->mac_duplex = DUPLEX_UNKNOWN;
bgmac_clear_mib(bgmac);
- if (core->id.id == BCMA_CORE_4706_MAC_GBIT)
- bcma_maskset32(bgmac->cmn, BCMA_GMAC_CMN_PHY_CTL, ~0,
- BCMA_GMAC_CMN_PC_MTE);
+ if (bgmac->feature_flags & BGMAC_FEAT_CMN_PHY_CTL)
+ bgmac_cmn_maskset32(bgmac, BCMA_GMAC_CMN_PHY_CTL, ~0,
+ BCMA_GMAC_CMN_PC_MTE);
else
bgmac_set(bgmac, BGMAC_PHY_CNTL, BGMAC_PC_MTE);
bgmac_miiconfig(bgmac);
- bgmac_phy_init(bgmac);
+ if (bgmac->mii_bus)
+ bgmac->mii_bus->reset(bgmac->mii_bus);
netdev_reset_queue(bgmac->net_dev);
}
@@ -1164,50 +1006,51 @@ static void bgmac_chip_intrs_off(struct bgmac *bgmac)
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_enable */
static void bgmac_enable(struct bgmac *bgmac)
{
- struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo;
+ u32 cmdcfg_sr;
u32 cmdcfg;
u32 mode;
- u32 rxq_ctl;
- u32 fl_ctl;
- u16 bp_clk;
- u8 mdp;
+
+ if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
+ cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
+ else
+ cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;
cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE),
- BGMAC_CMDCFG_SR(bgmac->core->id.rev), true);
+ cmdcfg_sr, true);
udelay(2);
cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE;
bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg);
mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
BGMAC_DS_MM_SHIFT;
- if (ci->id != BCMA_CHIP_ID_BCM47162 || mode != 0)
+ if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST || mode != 0)
bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
- if (ci->id == BCMA_CHIP_ID_BCM47162 && mode == 2)
- bcma_chipco_chipctl_maskset(&bgmac->core->bus->drv_cc, 1, ~0,
- BGMAC_CHIPCTL_1_RXC_DLL_BYPASS);
-
- switch (ci->id) {
- case BCMA_CHIP_ID_BCM5357:
- case BCMA_CHIP_ID_BCM4749:
- case BCMA_CHIP_ID_BCM53572:
- case BCMA_CHIP_ID_BCM4716:
- case BCMA_CHIP_ID_BCM47162:
- fl_ctl = 0x03cb04cb;
- if (ci->id == BCMA_CHIP_ID_BCM5357 ||
- ci->id == BCMA_CHIP_ID_BCM4749 ||
- ci->id == BCMA_CHIP_ID_BCM53572)
+ if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST && mode == 2)
+ bgmac_cco_ctl_maskset(bgmac, 1, ~0,
+ BGMAC_CHIPCTL_1_RXC_DLL_BYPASS);
+
+ if (bgmac->feature_flags & (BGMAC_FEAT_FLW_CTRL1 |
+ BGMAC_FEAT_FLW_CTRL2)) {
+ u32 fl_ctl;
+
+ if (bgmac->feature_flags & BGMAC_FEAT_FLW_CTRL1)
fl_ctl = 0x2300e1;
+ else
+ fl_ctl = 0x03cb04cb;
+
bgmac_write(bgmac, BGMAC_FLOW_CTL_THRESH, fl_ctl);
bgmac_write(bgmac, BGMAC_PAUSE_CTL, 0x27fff);
- break;
}
- if (!bgmac_is_bcm4707_family(bgmac)) {
+ if (bgmac->feature_flags & BGMAC_FEAT_SET_RXQ_CLK) {
+ u32 rxq_ctl;
+ u16 bp_clk;
+ u8 mdp;
+
rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL);
rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK;
- bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) /
- 1000000;
+ bp_clk = bgmac_get_bus_clock(bgmac) / 1000000;
mdp = (bp_clk * 128 / 1000) - 3;
rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT);
bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl);
@@ -1251,7 +1094,7 @@ static irqreturn_t bgmac_interrupt(int irq, void *dev_id)
int_status &= ~(BGMAC_IS_TX0 | BGMAC_IS_RX);
if (int_status)
- bgmac_err(bgmac, "Unknown IRQs: 0x%08X\n", int_status);
+ dev_err(bgmac->dev, "Unknown IRQs: 0x%08X\n", int_status);
/* Disable new interrupts until handling existing ones */
bgmac_chip_intrs_off(bgmac);
@@ -1302,16 +1145,16 @@ static int bgmac_open(struct net_device *net_dev)
/* Specs say about reclaiming rings here, but we do that in DMA init */
bgmac_chip_init(bgmac);
- err = request_irq(bgmac->core->irq, bgmac_interrupt, IRQF_SHARED,
+ err = request_irq(bgmac->irq, bgmac_interrupt, IRQF_SHARED,
KBUILD_MODNAME, net_dev);
if (err < 0) {
- bgmac_err(bgmac, "IRQ request error: %d!\n", err);
+ dev_err(bgmac->dev, "IRQ request error: %d!\n", err);
bgmac_dma_cleanup(bgmac);
return err;
}
napi_enable(&bgmac->napi);
- phy_start(bgmac->phy_dev);
+ phy_start(net_dev->phydev);
netif_start_queue(net_dev);
@@ -1324,11 +1167,11 @@ static int bgmac_stop(struct net_device *net_dev)
netif_carrier_off(net_dev);
- phy_stop(bgmac->phy_dev);
+ phy_stop(net_dev->phydev);
napi_disable(&bgmac->napi);
bgmac_chip_intrs_off(bgmac);
- free_irq(bgmac->core->irq, net_dev);
+ free_irq(bgmac->irq, net_dev);
bgmac_chip_reset(bgmac);
bgmac_dma_cleanup(bgmac);
@@ -1362,12 +1205,10 @@ static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
{
- struct bgmac *bgmac = netdev_priv(net_dev);
-
if (!netif_running(net_dev))
return -EINVAL;
- return phy_mii_ioctl(bgmac->phy_dev, ifr, cmd);
+ return phy_mii_ioctl(net_dev->phydev, ifr, cmd);
}
static const struct net_device_ops bgmac_netdev_ops = {
@@ -1384,54 +1225,151 @@ static const struct net_device_ops bgmac_netdev_ops = {
* ethtool_ops
**************************************************/
-static int bgmac_get_settings(struct net_device *net_dev,
- struct ethtool_cmd *cmd)
+struct bgmac_stat {
+ u8 size;
+ u32 offset;
+ const char *name;
+};
+
+static struct bgmac_stat bgmac_get_strings_stats[] = {
+ { 8, BGMAC_TX_GOOD_OCTETS, "tx_good_octets" },
+ { 4, BGMAC_TX_GOOD_PKTS, "tx_good" },
+ { 8, BGMAC_TX_OCTETS, "tx_octets" },
+ { 4, BGMAC_TX_PKTS, "tx_pkts" },
+ { 4, BGMAC_TX_BROADCAST_PKTS, "tx_broadcast" },
+ { 4, BGMAC_TX_MULTICAST_PKTS, "tx_multicast" },
+ { 4, BGMAC_TX_LEN_64, "tx_64" },
+ { 4, BGMAC_TX_LEN_65_TO_127, "tx_65_127" },
+ { 4, BGMAC_TX_LEN_128_TO_255, "tx_128_255" },
+ { 4, BGMAC_TX_LEN_256_TO_511, "tx_256_511" },
+ { 4, BGMAC_TX_LEN_512_TO_1023, "tx_512_1023" },
+ { 4, BGMAC_TX_LEN_1024_TO_1522, "tx_1024_1522" },
+ { 4, BGMAC_TX_LEN_1523_TO_2047, "tx_1523_2047" },
+ { 4, BGMAC_TX_LEN_2048_TO_4095, "tx_2048_4095" },
+ { 4, BGMAC_TX_LEN_4096_TO_8191, "tx_4096_8191" },
+ { 4, BGMAC_TX_LEN_8192_TO_MAX, "tx_8192_max" },
+ { 4, BGMAC_TX_JABBER_PKTS, "tx_jabber" },
+ { 4, BGMAC_TX_OVERSIZE_PKTS, "tx_oversize" },
+ { 4, BGMAC_TX_FRAGMENT_PKTS, "tx_fragment" },
+ { 4, BGMAC_TX_UNDERRUNS, "tx_underruns" },
+ { 4, BGMAC_TX_TOTAL_COLS, "tx_total_cols" },
+ { 4, BGMAC_TX_SINGLE_COLS, "tx_single_cols" },
+ { 4, BGMAC_TX_MULTIPLE_COLS, "tx_multiple_cols" },
+ { 4, BGMAC_TX_EXCESSIVE_COLS, "tx_excessive_cols" },
+ { 4, BGMAC_TX_LATE_COLS, "tx_late_cols" },
+ { 4, BGMAC_TX_DEFERED, "tx_defered" },
+ { 4, BGMAC_TX_CARRIER_LOST, "tx_carrier_lost" },
+ { 4, BGMAC_TX_PAUSE_PKTS, "tx_pause" },
+ { 4, BGMAC_TX_UNI_PKTS, "tx_unicast" },
+ { 4, BGMAC_TX_Q0_PKTS, "tx_q0" },
+ { 8, BGMAC_TX_Q0_OCTETS, "tx_q0_octets" },
+ { 4, BGMAC_TX_Q1_PKTS, "tx_q1" },
+ { 8, BGMAC_TX_Q1_OCTETS, "tx_q1_octets" },
+ { 4, BGMAC_TX_Q2_PKTS, "tx_q2" },
+ { 8, BGMAC_TX_Q2_OCTETS, "tx_q2_octets" },
+ { 4, BGMAC_TX_Q3_PKTS, "tx_q3" },
+ { 8, BGMAC_TX_Q3_OCTETS, "tx_q3_octets" },
+ { 8, BGMAC_RX_GOOD_OCTETS, "rx_good_octets" },
+ { 4, BGMAC_RX_GOOD_PKTS, "rx_good" },
+ { 8, BGMAC_RX_OCTETS, "rx_octets" },
+ { 4, BGMAC_RX_PKTS, "rx_pkts" },
+ { 4, BGMAC_RX_BROADCAST_PKTS, "rx_broadcast" },
+ { 4, BGMAC_RX_MULTICAST_PKTS, "rx_multicast" },
+ { 4, BGMAC_RX_LEN_64, "rx_64" },
+ { 4, BGMAC_RX_LEN_65_TO_127, "rx_65_127" },
+ { 4, BGMAC_RX_LEN_128_TO_255, "rx_128_255" },
+ { 4, BGMAC_RX_LEN_256_TO_511, "rx_256_511" },
+ { 4, BGMAC_RX_LEN_512_TO_1023, "rx_512_1023" },
+ { 4, BGMAC_RX_LEN_1024_TO_1522, "rx_1024_1522" },
+ { 4, BGMAC_RX_LEN_1523_TO_2047, "rx_1523_2047" },
+ { 4, BGMAC_RX_LEN_2048_TO_4095, "rx_2048_4095" },
+ { 4, BGMAC_RX_LEN_4096_TO_8191, "rx_4096_8191" },
+ { 4, BGMAC_RX_LEN_8192_TO_MAX, "rx_8192_max" },
+ { 4, BGMAC_RX_JABBER_PKTS, "rx_jabber" },
+ { 4, BGMAC_RX_OVERSIZE_PKTS, "rx_oversize" },
+ { 4, BGMAC_RX_FRAGMENT_PKTS, "rx_fragment" },
+ { 4, BGMAC_RX_MISSED_PKTS, "rx_missed" },
+ { 4, BGMAC_RX_CRC_ALIGN_ERRS, "rx_crc_align" },
+ { 4, BGMAC_RX_UNDERSIZE, "rx_undersize" },
+ { 4, BGMAC_RX_CRC_ERRS, "rx_crc" },
+ { 4, BGMAC_RX_ALIGN_ERRS, "rx_align" },
+ { 4, BGMAC_RX_SYMBOL_ERRS, "rx_symbol" },
+ { 4, BGMAC_RX_PAUSE_PKTS, "rx_pause" },
+ { 4, BGMAC_RX_NONPAUSE_PKTS, "rx_nonpause" },
+ { 4, BGMAC_RX_SACHANGES, "rx_sa_changes" },
+ { 4, BGMAC_RX_UNI_PKTS, "rx_unicast" },
+};
+
+#define BGMAC_STATS_LEN ARRAY_SIZE(bgmac_get_strings_stats)
+
+static int bgmac_get_sset_count(struct net_device *dev, int string_set)
{
- struct bgmac *bgmac = netdev_priv(net_dev);
+ switch (string_set) {
+ case ETH_SS_STATS:
+ return BGMAC_STATS_LEN;
+ }
- return phy_ethtool_gset(bgmac->phy_dev, cmd);
+ return -EOPNOTSUPP;
}
-static int bgmac_set_settings(struct net_device *net_dev,
- struct ethtool_cmd *cmd)
+static void bgmac_get_strings(struct net_device *dev, u32 stringset,
+ u8 *data)
{
- struct bgmac *bgmac = netdev_priv(net_dev);
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < BGMAC_STATS_LEN; i++)
+ strlcpy(data + i * ETH_GSTRING_LEN,
+ bgmac_get_strings_stats[i].name, ETH_GSTRING_LEN);
+}
+
+static void bgmac_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *ss, uint64_t *data)
+{
+ struct bgmac *bgmac = netdev_priv(dev);
+ const struct bgmac_stat *s;
+ unsigned int i;
+ u64 val;
+
+ if (!netif_running(dev))
+ return;
- return phy_ethtool_sset(bgmac->phy_dev, cmd);
+ for (i = 0; i < BGMAC_STATS_LEN; i++) {
+ s = &bgmac_get_strings_stats[i];
+ val = 0;
+ if (s->size == 8)
+ val = (u64)bgmac_read(bgmac, s->offset + 4) << 32;
+ val |= bgmac_read(bgmac, s->offset);
+ data[i] = val;
+ }
}
static void bgmac_get_drvinfo(struct net_device *net_dev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->bus_info, "BCMA", sizeof(info->bus_info));
+ strlcpy(info->bus_info, "AXI", sizeof(info->bus_info));
}
static const struct ethtool_ops bgmac_ethtool_ops = {
- .get_settings = bgmac_get_settings,
- .set_settings = bgmac_set_settings,
+ .get_strings = bgmac_get_strings,
+ .get_sset_count = bgmac_get_sset_count,
+ .get_ethtool_stats = bgmac_get_ethtool_stats,
.get_drvinfo = bgmac_get_drvinfo,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
/**************************************************
* MII
**************************************************/
-static int bgmac_mii_read(struct mii_bus *bus, int mii_id, int regnum)
-{
- return bgmac_phy_read(bus->priv, mii_id, regnum);
-}
-
-static int bgmac_mii_write(struct mii_bus *bus, int mii_id, int regnum,
- u16 value)
-{
- return bgmac_phy_write(bus->priv, mii_id, regnum, value);
-}
-
static void bgmac_adjust_link(struct net_device *net_dev)
{
struct bgmac *bgmac = netdev_priv(net_dev);
- struct phy_device *phy_dev = bgmac->phy_dev;
+ struct phy_device *phy_dev = net_dev->phydev;
bool update = false;
if (phy_dev->link) {
@@ -1452,7 +1390,7 @@ static void bgmac_adjust_link(struct net_device *net_dev)
}
}
-static int bgmac_fixed_phy_register(struct bgmac *bgmac)
+static int bgmac_phy_connect_direct(struct bgmac *bgmac)
{
struct fixed_phy_status fphy_status = {
.link = 1,
@@ -1464,196 +1402,76 @@ static int bgmac_fixed_phy_register(struct bgmac *bgmac)
phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, -1, NULL);
if (!phy_dev || IS_ERR(phy_dev)) {
- bgmac_err(bgmac, "Failed to register fixed PHY device\n");
+ dev_err(bgmac->dev, "Failed to register fixed PHY device\n");
return -ENODEV;
}
err = phy_connect_direct(bgmac->net_dev, phy_dev, bgmac_adjust_link,
PHY_INTERFACE_MODE_MII);
if (err) {
- bgmac_err(bgmac, "Connecting PHY failed\n");
+ dev_err(bgmac->dev, "Connecting PHY failed\n");
return err;
}
- bgmac->phy_dev = phy_dev;
-
return err;
}
-static int bgmac_mii_register(struct bgmac *bgmac)
+static int bgmac_phy_connect(struct bgmac *bgmac)
{
- struct mii_bus *mii_bus;
struct phy_device *phy_dev;
char bus_id[MII_BUS_ID_SIZE + 3];
- int err = 0;
-
- if (bgmac_is_bcm4707_family(bgmac))
- return bgmac_fixed_phy_register(bgmac);
-
- mii_bus = mdiobus_alloc();
- if (!mii_bus)
- return -ENOMEM;
-
- mii_bus->name = "bgmac mii bus";
- sprintf(mii_bus->id, "%s-%d-%d", "bgmac", bgmac->core->bus->num,
- bgmac->core->core_unit);
- mii_bus->priv = bgmac;
- mii_bus->read = bgmac_mii_read;
- mii_bus->write = bgmac_mii_write;
- mii_bus->parent = &bgmac->core->dev;
- mii_bus->phy_mask = ~(1 << bgmac->phyaddr);
-
- err = mdiobus_register(mii_bus);
- if (err) {
- bgmac_err(bgmac, "Registration of mii bus failed\n");
- goto err_free_bus;
- }
-
- bgmac->mii_bus = mii_bus;
/* Connect to the PHY */
- snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id,
+ snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, bgmac->mii_bus->id,
bgmac->phyaddr);
phy_dev = phy_connect(bgmac->net_dev, bus_id, &bgmac_adjust_link,
PHY_INTERFACE_MODE_MII);
if (IS_ERR(phy_dev)) {
- bgmac_err(bgmac, "PHY connection failed\n");
- err = PTR_ERR(phy_dev);
- goto err_unregister_bus;
+ dev_err(bgmac->dev, "PHY connecton failed\n");
+ return PTR_ERR(phy_dev);
}
- bgmac->phy_dev = phy_dev;
-
- return err;
-err_unregister_bus:
- mdiobus_unregister(mii_bus);
-err_free_bus:
- mdiobus_free(mii_bus);
- return err;
-}
-
-static void bgmac_mii_unregister(struct bgmac *bgmac)
-{
- struct mii_bus *mii_bus = bgmac->mii_bus;
-
- mdiobus_unregister(mii_bus);
- mdiobus_free(mii_bus);
+ return 0;
}
-/**************************************************
- * BCMA bus ops
- **************************************************/
-
-/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */
-static int bgmac_probe(struct bcma_device *core)
+int bgmac_enet_probe(struct bgmac *info)
{
struct net_device *net_dev;
struct bgmac *bgmac;
- struct ssb_sprom *sprom = &core->bus->sprom;
- u8 *mac;
int err;
- switch (core->core_unit) {
- case 0:
- mac = sprom->et0mac;
- break;
- case 1:
- mac = sprom->et1mac;
- break;
- case 2:
- mac = sprom->et2mac;
- break;
- default:
- pr_err("Unsupported core_unit %d\n", core->core_unit);
- return -ENOTSUPP;
- }
-
- if (!is_valid_ether_addr(mac)) {
- dev_err(&core->dev, "Invalid MAC addr: %pM\n", mac);
- eth_random_addr(mac);
- dev_warn(&core->dev, "Using random MAC: %pM\n", mac);
- }
-
- /* This (reset &) enable is not preset in specs or reference driver but
- * Broadcom does it in arch PCI code when enabling fake PCI device.
- */
- bcma_core_enable(core, 0);
-
/* Allocation and references */
net_dev = alloc_etherdev(sizeof(*bgmac));
if (!net_dev)
return -ENOMEM;
+
net_dev->netdev_ops = &bgmac_netdev_ops;
- net_dev->irq = core->irq;
net_dev->ethtool_ops = &bgmac_ethtool_ops;
bgmac = netdev_priv(net_dev);
+ memcpy(bgmac, info, sizeof(*bgmac));
bgmac->net_dev = net_dev;
- bgmac->core = core;
- bcma_set_drvdata(core, bgmac);
-
- /* Defaults */
- memcpy(bgmac->net_dev->dev_addr, mac, ETH_ALEN);
-
- /* On BCM4706 we need common core to access PHY */
- if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
- !core->bus->drv_gmac_cmn.core) {
- bgmac_err(bgmac, "GMAC CMN core not found (required for BCM4706)\n");
- err = -ENODEV;
- goto err_netdev_free;
- }
- bgmac->cmn = core->bus->drv_gmac_cmn.core;
-
- switch (core->core_unit) {
- case 0:
- bgmac->phyaddr = sprom->et0phyaddr;
- break;
- case 1:
- bgmac->phyaddr = sprom->et1phyaddr;
- break;
- case 2:
- bgmac->phyaddr = sprom->et2phyaddr;
- break;
- }
- bgmac->phyaddr &= BGMAC_PHY_MASK;
- if (bgmac->phyaddr == BGMAC_PHY_MASK) {
- bgmac_err(bgmac, "No PHY found\n");
- err = -ENODEV;
- goto err_netdev_free;
+ net_dev->irq = bgmac->irq;
+ SET_NETDEV_DEV(net_dev, bgmac->dev);
+
+ if (!is_valid_ether_addr(bgmac->mac_addr)) {
+ dev_err(bgmac->dev, "Invalid MAC addr: %pM\n",
+ bgmac->mac_addr);
+ eth_random_addr(bgmac->mac_addr);
+ dev_warn(bgmac->dev, "Using random MAC: %pM\n",
+ bgmac->mac_addr);
}
- bgmac_info(bgmac, "Found PHY addr: %d%s\n", bgmac->phyaddr,
- bgmac->phyaddr == BGMAC_PHY_NOREGS ? " (NOREGS)" : "");
+ ether_addr_copy(net_dev->dev_addr, bgmac->mac_addr);
- if (core->bus->hosttype == BCMA_HOSTTYPE_PCI) {
- bgmac_err(bgmac, "PCI setup not implemented\n");
- err = -ENOTSUPP;
- goto err_netdev_free;
- }
+ /* This (reset &) enable is not preset in specs or reference driver but
+ * Broadcom does it in arch PCI code when enabling fake PCI device.
+ */
+ bgmac_clk_enable(bgmac, 0);
bgmac_chip_reset(bgmac);
- /* For Northstar, we have to take all GMAC core out of reset */
- if (bgmac_is_bcm4707_family(bgmac)) {
- struct bcma_device *ns_core;
- int ns_gmac;
-
- /* Northstar has 4 GMAC cores */
- for (ns_gmac = 0; ns_gmac < 4; ns_gmac++) {
- /* As Northstar requirement, we have to reset all GMACs
- * before accessing one. bgmac_chip_reset() call
- * bcma_core_enable() for this core. Then the other
- * three GMACs didn't reset. We do it here.
- */
- ns_core = bcma_find_core_unit(core->bus,
- BCMA_CORE_MAC_GBIT,
- ns_gmac);
- if (ns_core && !bcma_core_is_enabled(ns_core))
- bcma_core_enable(ns_core, 0);
- }
- }
-
err = bgmac_dma_alloc(bgmac);
if (err) {
- bgmac_err(bgmac, "Unable to alloc memory for DMA\n");
+ dev_err(bgmac->dev, "Unable to alloc memory for DMA\n");
goto err_netdev_free;
}
@@ -1661,22 +1479,14 @@ static int bgmac_probe(struct bcma_device *core)
if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0)
bgmac->int_mask &= ~BGMAC_IS_TX_MASK;
- /* TODO: reset the external phy. Specs are needed */
- bgmac_phy_reset(bgmac);
-
- bgmac->has_robosw = !!(core->bus->sprom.boardflags_lo &
- BGMAC_BFL_ENETROBO);
- if (bgmac->has_robosw)
- bgmac_warn(bgmac, "Support for Roboswitch not implemented\n");
-
- if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM)
- bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n");
-
netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);
- err = bgmac_mii_register(bgmac);
+ if (!bgmac->mii_bus)
+ err = bgmac_phy_connect_direct(bgmac);
+ else
+ err = bgmac_phy_connect(bgmac);
if (err) {
- bgmac_err(bgmac, "Cannot register MDIO\n");
+ dev_err(bgmac->dev, "Cannot connect to phy\n");
goto err_dma_free;
}
@@ -1686,64 +1496,34 @@ static int bgmac_probe(struct bcma_device *core)
err = register_netdev(bgmac->net_dev);
if (err) {
- bgmac_err(bgmac, "Cannot register net device\n");
- goto err_mii_unregister;
+ dev_err(bgmac->dev, "Cannot register net device\n");
+ goto err_phy_disconnect;
}
netif_carrier_off(net_dev);
return 0;
-err_mii_unregister:
- bgmac_mii_unregister(bgmac);
+err_phy_disconnect:
+ phy_disconnect(net_dev->phydev);
err_dma_free:
bgmac_dma_free(bgmac);
-
err_netdev_free:
- bcma_set_drvdata(core, NULL);
free_netdev(net_dev);
return err;
}
+EXPORT_SYMBOL_GPL(bgmac_enet_probe);
-static void bgmac_remove(struct bcma_device *core)
+void bgmac_enet_remove(struct bgmac *bgmac)
{
- struct bgmac *bgmac = bcma_get_drvdata(core);
-
unregister_netdev(bgmac->net_dev);
- bgmac_mii_unregister(bgmac);
+ phy_disconnect(bgmac->net_dev->phydev);
netif_napi_del(&bgmac->napi);
bgmac_dma_free(bgmac);
- bcma_set_drvdata(core, NULL);
free_netdev(bgmac->net_dev);
}
-
-static struct bcma_driver bgmac_bcma_driver = {
- .name = KBUILD_MODNAME,
- .id_table = bgmac_bcma_tbl,
- .probe = bgmac_probe,
- .remove = bgmac_remove,
-};
-
-static int __init bgmac_init(void)
-{
- int err;
-
- err = bcma_driver_register(&bgmac_bcma_driver);
- if (err)
- return err;
- pr_info("Broadcom 47xx GBit MAC driver loaded\n");
-
- return 0;
-}
-
-static void __exit bgmac_exit(void)
-{
- bcma_driver_unregister(&bgmac_bcma_driver);
-}
-
-module_init(bgmac_init)
-module_exit(bgmac_exit)
+EXPORT_SYMBOL_GPL(bgmac_enet_remove);
MODULE_AUTHOR("Rafał Miłecki");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 9a03c142b..24a250267 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -1,19 +1,6 @@
#ifndef _BGMAC_H
#define _BGMAC_H
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#define bgmac_err(bgmac, fmt, ...) \
- dev_err(&(bgmac)->core->dev, fmt, ##__VA_ARGS__)
-#define bgmac_warn(bgmac, fmt, ...) \
- dev_warn(&(bgmac)->core->dev, fmt, ##__VA_ARGS__)
-#define bgmac_info(bgmac, fmt, ...) \
- dev_info(&(bgmac)->core->dev, fmt, ##__VA_ARGS__)
-#define bgmac_dbg(bgmac, fmt, ...) \
- dev_dbg(&(bgmac)->core->dev, fmt, ##__VA_ARGS__)
-
-#include <linux/bcma/bcma.h>
-#include <linux/brcmphy.h>
#include <linux/netdevice.h>
#define BGMAC_DEV_CTL 0x000
@@ -123,7 +110,7 @@
#define BGMAC_TX_LEN_1024_TO_1522 0x334
#define BGMAC_TX_LEN_1523_TO_2047 0x338
#define BGMAC_TX_LEN_2048_TO_4095 0x33c
-#define BGMAC_TX_LEN_4095_TO_8191 0x340
+#define BGMAC_TX_LEN_4096_TO_8191 0x340
#define BGMAC_TX_LEN_8192_TO_MAX 0x344
#define BGMAC_TX_JABBER_PKTS 0x348 /* Error */
#define BGMAC_TX_OVERSIZE_PKTS 0x34c /* Error */
@@ -166,7 +153,7 @@
#define BGMAC_RX_LEN_1024_TO_1522 0x3e4
#define BGMAC_RX_LEN_1523_TO_2047 0x3e8
#define BGMAC_RX_LEN_2048_TO_4095 0x3ec
-#define BGMAC_RX_LEN_4095_TO_8191 0x3f0
+#define BGMAC_RX_LEN_4096_TO_8191 0x3f0
#define BGMAC_RX_LEN_8192_TO_MAX 0x3f4
#define BGMAC_RX_JABBER_PKTS 0x3f8 /* Error */
#define BGMAC_RX_OVERSIZE_PKTS 0x3fc /* Error */
@@ -201,7 +188,6 @@
#define BGMAC_CMDCFG_HD_SHIFT 10
#define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for core rev 0-3 */
#define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, for core rev >= 4 */
-#define BGMAC_CMDCFG_SR(rev) ((rev >= 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0)
#define BGMAC_CMDCFG_ML 0x00008000 /* Set to activate mac loopback mode */
#define BGMAC_CMDCFG_AE 0x00400000
#define BGMAC_CMDCFG_CFE 0x00800000
@@ -387,6 +373,24 @@
#define ETHER_MAX_LEN 1518
+/* Feature Flags */
+#define BGMAC_FEAT_TX_MASK_SETUP BIT(0)
+#define BGMAC_FEAT_RX_MASK_SETUP BIT(1)
+#define BGMAC_FEAT_IOST_ATTACHED BIT(2)
+#define BGMAC_FEAT_NO_RESET BIT(3)
+#define BGMAC_FEAT_MISC_PLL_REQ BIT(4)
+#define BGMAC_FEAT_SW_TYPE_PHY BIT(5)
+#define BGMAC_FEAT_SW_TYPE_EPHYRMII BIT(6)
+#define BGMAC_FEAT_SW_TYPE_RGMII BIT(7)
+#define BGMAC_FEAT_CMN_PHY_CTL BIT(8)
+#define BGMAC_FEAT_FLW_CTRL1 BIT(9)
+#define BGMAC_FEAT_FLW_CTRL2 BIT(10)
+#define BGMAC_FEAT_SET_RXQ_CLK BIT(11)
+#define BGMAC_FEAT_CLKCTLST BIT(12)
+#define BGMAC_FEAT_NO_CLR_MIB BIT(13)
+#define BGMAC_FEAT_FORCE_SPEED_2500 BIT(14)
+#define BGMAC_FEAT_CMDCFG_SR_REV4 BIT(15)
+
struct bgmac_slot_info {
union {
struct sk_buff *skb;
@@ -436,12 +440,26 @@ struct bgmac_rx_header {
};
struct bgmac {
- struct bcma_device *core;
- struct bcma_device *cmn; /* Reference to CMN core for BCM4706 */
+ union {
+ struct {
+ void *base;
+ void *idm_base;
+ } plat;
+ struct {
+ struct bcma_device *core;
+ /* Reference to CMN core for BCM4706 */
+ struct bcma_device *cmn;
+ } bcma;
+ };
+
+ struct device *dev;
+ struct device *dma_dev;
+ unsigned char mac_addr[ETH_ALEN];
+ u32 feature_flags;
+
struct net_device *net_dev;
struct napi_struct napi;
struct mii_bus *mii_bus;
- struct phy_device *phy_dev;
/* DMA */
struct bgmac_dma_ring tx_ring[BGMAC_MAX_TX_RINGS];
@@ -453,6 +471,7 @@ struct bgmac {
u32 mib_rx_regs[BGMAC_NUM_MIB_RX_REGS];
/* Int */
+ int irq;
u32 int_mask;
/* Current MAC state */
@@ -463,16 +482,71 @@ struct bgmac {
bool has_robosw;
bool loopback;
+
+ u32 (*read)(struct bgmac *bgmac, u16 offset);
+ void (*write)(struct bgmac *bgmac, u16 offset, u32 value);
+ u32 (*idm_read)(struct bgmac *bgmac, u16 offset);
+ void (*idm_write)(struct bgmac *bgmac, u16 offset, u32 value);
+ bool (*clk_enabled)(struct bgmac *bgmac);
+ void (*clk_enable)(struct bgmac *bgmac, u32 flags);
+ void (*cco_ctl_maskset)(struct bgmac *bgmac, u32 offset, u32 mask,
+ u32 set);
+ u32 (*get_bus_clock)(struct bgmac *bgmac);
+ void (*cmn_maskset32)(struct bgmac *bgmac, u16 offset, u32 mask,
+ u32 set);
};
+int bgmac_enet_probe(struct bgmac *info);
+void bgmac_enet_remove(struct bgmac *bgmac);
+
+struct mii_bus *bcma_mdio_mii_register(struct bcma_device *core, u8 phyaddr);
+void bcma_mdio_mii_unregister(struct mii_bus *mii_bus);
+
static inline u32 bgmac_read(struct bgmac *bgmac, u16 offset)
{
- return bcma_read32(bgmac->core, offset);
+ return bgmac->read(bgmac, offset);
}
static inline void bgmac_write(struct bgmac *bgmac, u16 offset, u32 value)
{
- bcma_write32(bgmac->core, offset, value);
+ bgmac->write(bgmac, offset, value);
+}
+
+static inline u32 bgmac_idm_read(struct bgmac *bgmac, u16 offset)
+{
+ return bgmac->idm_read(bgmac, offset);
+}
+
+static inline void bgmac_idm_write(struct bgmac *bgmac, u16 offset, u32 value)
+{
+ bgmac->idm_write(bgmac, offset, value);
+}
+
+static inline bool bgmac_clk_enabled(struct bgmac *bgmac)
+{
+ return bgmac->clk_enabled(bgmac);
+}
+
+static inline void bgmac_clk_enable(struct bgmac *bgmac, u32 flags)
+{
+ bgmac->clk_enable(bgmac, flags);
+}
+
+static inline void bgmac_cco_ctl_maskset(struct bgmac *bgmac, u32 offset,
+ u32 mask, u32 set)
+{
+ bgmac->cco_ctl_maskset(bgmac, offset, mask, set);
+}
+
+static inline u32 bgmac_get_bus_clock(struct bgmac *bgmac)
+{
+ return bgmac->get_bus_clock(bgmac);
+}
+
+static inline void bgmac_cmn_maskset32(struct bgmac *bgmac, u16 offset,
+ u32 mask, u32 set)
+{
+ bgmac->cmn_maskset32(bgmac, offset, mask, set);
}
static inline void bgmac_maskset(struct bgmac *bgmac, u16 offset, u32 mask,
@@ -490,5 +564,4 @@ static inline void bgmac_set(struct bgmac *bgmac, u16 offset, u32 set)
{
bgmac_maskset(bgmac, offset, ~0, set);
}
-
#endif /* _BGMAC_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 8e1231521..8c2220b59 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -6352,10 +6352,6 @@ bnx2_open(struct net_device *dev)
struct bnx2 *bp = netdev_priv(dev);
int rc;
- rc = bnx2_request_firmware(bp);
- if (rc < 0)
- goto out;
-
netif_carrier_off(dev);
bnx2_disable_int(bp);
@@ -6424,7 +6420,6 @@ open_err:
bnx2_free_irq(bp);
bnx2_free_mem(bp);
bnx2_del_napi(bp);
- bnx2_release_firmware(bp);
goto out;
}
@@ -8571,6 +8566,12 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, dev);
+ rc = bnx2_request_firmware(bp);
+ if (rc < 0)
+ goto error;
+
+
+ bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
@@ -8603,6 +8604,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
error:
+ bnx2_release_firmware(bp);
pci_iounmap(pdev, bp->regview);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 9089404bb..e447c8ef4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -59,9 +59,6 @@
#include <linux/semaphore.h>
#include <linux/stringify.h>
#include <linux/vmalloc.h>
-#if IS_ENABLED(CONFIG_BNX2X_GENEVE)
-#include <net/geneve.h>
-#endif
#include "bnx2x.h"
#include "bnx2x_init.h"
#include "bnx2x_init_ops.h"
@@ -769,6 +766,11 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
(bp->common.bc_ver & 0xff00) >> 8,
(bp->common.bc_ver & 0xff));
+ if (pci_channel_offline(bp->pdev)) {
+ BNX2X_ERR("Cannot dump MCP info while in PCI error\n");
+ return;
+ }
+
val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val);
@@ -9412,10 +9414,16 @@ unload_error:
/* Release IRQs */
bnx2x_free_irq(bp);
- /* Reset the chip */
- rc = bnx2x_reset_hw(bp, reset_code);
- if (rc)
- BNX2X_ERR("HW_RESET failed\n");
+ /* Reset the chip, unless PCI function is offline. If we reach this
+ * point following a PCI error handling, it means device is really
+ * in a bad state and we're about to remove it, so reset the chip
+ * is not a good idea.
+ */
+ if (!pci_channel_offline(bp->pdev)) {
+ rc = bnx2x_reset_hw(bp, reset_code);
+ if (rc)
+ BNX2X_ERR("HW_RESET failed\n");
+ }
/* Report UNLOAD_DONE to MCP */
bnx2x_send_unload_done(bp, keep_link);
@@ -10070,7 +10078,6 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
}
}
-#if defined(CONFIG_BNX2X_VXLAN) || IS_ENABLED(CONFIG_BNX2X_GENEVE)
static int bnx2x_udp_port_update(struct bnx2x *bp)
{
struct bnx2x_func_switch_update_params *switch_update_params;
@@ -10171,47 +10178,42 @@ static void __bnx2x_del_udp_port(struct bnx2x *bp, u16 port,
DP(BNX2X_MSG_SP, "Deleted UDP tunnel [%d] port %d\n",
type, port);
}
-#endif
-
-#ifdef CONFIG_BNX2X_VXLAN
-static void bnx2x_add_vxlan_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
-{
- struct bnx2x *bp = netdev_priv(netdev);
- u16 t_port = ntohs(port);
-
- __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN);
-}
-
-static void bnx2x_del_vxlan_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
-{
- struct bnx2x *bp = netdev_priv(netdev);
- u16 t_port = ntohs(port);
-
- __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN);
-}
-#endif
-#if IS_ENABLED(CONFIG_BNX2X_GENEVE)
-static void bnx2x_add_geneve_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
+static void bnx2x_udp_tunnel_add(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
{
struct bnx2x *bp = netdev_priv(netdev);
- u16 t_port = ntohs(port);
+ u16 t_port = ntohs(ti->port);
- __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE);
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN);
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE);
+ break;
+ default:
+ break;
+ }
}
-static void bnx2x_del_geneve_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
+static void bnx2x_udp_tunnel_del(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
{
struct bnx2x *bp = netdev_priv(netdev);
- u16 t_port = ntohs(port);
+ u16 t_port = ntohs(ti->port);
- __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE);
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN);
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE);
+ break;
+ default:
+ break;
+ }
}
-#endif
static int bnx2x_close(struct net_device *dev);
@@ -10319,7 +10321,6 @@ sp_rtnl_not_reset:
&bp->sp_rtnl_state))
bnx2x_update_mng_version(bp);
-#if defined(CONFIG_BNX2X_VXLAN) || IS_ENABLED(CONFIG_BNX2X_GENEVE)
if (test_and_clear_bit(BNX2X_SP_RTNL_CHANGE_UDP_PORT,
&bp->sp_rtnl_state)) {
if (bnx2x_udp_port_update(bp)) {
@@ -10329,20 +10330,14 @@ sp_rtnl_not_reset:
BNX2X_UDP_PORT_MAX);
} else {
/* Since we don't store additional port information,
- * if no port is configured for any feature ask for
+ * if no ports are configured for any feature ask for
* information about currently configured ports.
*/
-#ifdef CONFIG_BNX2X_VXLAN
- if (!bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count)
- vxlan_get_rx_port(bp->dev);
-#endif
-#if IS_ENABLED(CONFIG_BNX2X_GENEVE)
- if (!bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count)
- geneve_get_rx_port(bp->dev);
-#endif
+ if (!bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count &&
+ !bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count)
+ udp_tunnel_get_rx_info(bp->dev);
}
}
-#endif
/* work which needs rtnl lock not-taken (as it takes the lock itself and
* can be called from other contexts as well)
@@ -12545,14 +12540,8 @@ static int bnx2x_open(struct net_device *dev)
if (rc)
return rc;
-#ifdef CONFIG_BNX2X_VXLAN
if (IS_PF(bp))
- vxlan_get_rx_port(dev);
-#endif
-#if IS_ENABLED(CONFIG_BNX2X_GENEVE)
- if (IS_PF(bp))
- geneve_get_rx_port(dev);
-#endif
+ udp_tunnel_get_rx_info(dev);
return 0;
}
@@ -13039,14 +13028,8 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_get_phys_port_id = bnx2x_get_phys_port_id,
.ndo_set_vf_link_state = bnx2x_set_vf_link_state,
.ndo_features_check = bnx2x_features_check,
-#ifdef CONFIG_BNX2X_VXLAN
- .ndo_add_vxlan_port = bnx2x_add_vxlan_port,
- .ndo_del_vxlan_port = bnx2x_del_vxlan_port,
-#endif
-#if IS_ENABLED(CONFIG_BNX2X_GENEVE)
- .ndo_add_geneve_port = bnx2x_add_geneve_port,
- .ndo_del_geneve_port = bnx2x_del_geneve_port,
-#endif
+ .ndo_udp_tunnel_add = bnx2x_udp_tunnel_add,
+ .ndo_udp_tunnel_del = bnx2x_udp_tunnel_del,
};
static int bnx2x_set_coherency_mask(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index e655b76e8..228c964e7 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -37,9 +37,7 @@
#include <net/udp.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
-#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE)
-#include <net/vxlan.h>
-#endif
+#include <net/udp_tunnel.h>
#ifdef CONFIG_NET_RX_BUSY_POLL
#include <net/busy_poll.h>
#endif
@@ -75,12 +73,32 @@ enum board_idx {
BCM57301,
BCM57302,
BCM57304,
+ BCM57417_NPAR,
+ BCM58700,
+ BCM57311,
+ BCM57312,
BCM57402,
BCM57404,
BCM57406,
+ BCM57402_NPAR,
+ BCM57407,
+ BCM57412,
+ BCM57414,
+ BCM57416,
+ BCM57417,
+ BCM57412_NPAR,
BCM57314,
+ BCM57417_SFP,
+ BCM57416_SFP,
+ BCM57404_NPAR,
+ BCM57406_NPAR,
+ BCM57407_SFP,
+ BCM57414_NPAR,
+ BCM57416_NPAR,
BCM57304_VF,
BCM57404_VF,
+ BCM57414_VF,
+ BCM57314_VF,
};
/* indexed by enum above */
@@ -90,25 +108,65 @@ static const struct {
{ "Broadcom BCM57301 NetXtreme-C Single-port 10Gb Ethernet" },
{ "Broadcom BCM57302 NetXtreme-C Dual-port 10Gb/25Gb Ethernet" },
{ "Broadcom BCM57304 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
+ { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
+ { "Broadcom BCM58700 Nitro 4-port 1Gb/2.5Gb/10Gb Ethernet" },
+ { "Broadcom BCM57311 NetXtreme-C Single-port 10Gb Ethernet" },
+ { "Broadcom BCM57312 NetXtreme-C Dual-port 10Gb/25Gb Ethernet" },
{ "Broadcom BCM57402 NetXtreme-E Dual-port 10Gb Ethernet" },
{ "Broadcom BCM57404 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
{ "Broadcom BCM57406 NetXtreme-E Dual-port 10GBase-T Ethernet" },
+ { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
+ { "Broadcom BCM57407 NetXtreme-E Dual-port 10GBase-T Ethernet" },
+ { "Broadcom BCM57412 NetXtreme-E Dual-port 10Gb Ethernet" },
+ { "Broadcom BCM57414 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
+ { "Broadcom BCM57416 NetXtreme-E Dual-port 10GBase-T Ethernet" },
+ { "Broadcom BCM57417 NetXtreme-E Dual-port 10GBase-T Ethernet" },
+ { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
{ "Broadcom BCM57314 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
+ { "Broadcom BCM57417 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
+ { "Broadcom BCM57416 NetXtreme-E Dual-port 10Gb Ethernet" },
+ { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
+ { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
+ { "Broadcom BCM57407 NetXtreme-E Dual-port 25Gb Ethernet" },
+ { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
+ { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
{ "Broadcom BCM57304 NetXtreme-C Ethernet Virtual Function" },
{ "Broadcom BCM57404 NetXtreme-E Ethernet Virtual Function" },
+ { "Broadcom BCM57414 NetXtreme-E Ethernet Virtual Function" },
+ { "Broadcom BCM57314 NetXtreme-E Ethernet Virtual Function" },
};
static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
{ PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
{ PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
+ { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
+ { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
+ { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
{ PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
{ PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
{ PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
+ { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
+ { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
+ { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
+ { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
+ { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
+ { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
+ { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
+ { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
+ { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
+ { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
#ifdef CONFIG_BNXT_SRIOV
{ PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = BCM57304_VF },
{ PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = BCM57404_VF },
+ { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = BCM57414_VF },
+ { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = BCM57314_VF },
#endif
{ 0 }
};
@@ -125,12 +183,14 @@ static const u16 bnxt_async_events_arr[] = {
HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
};
static bool bnxt_vf_pciid(enum board_idx idx)
{
- return (idx == BCM57304_VF || idx == BCM57404_VF);
+ return (idx == BCM57304_VF || idx == BCM57404_VF ||
+ idx == BCM57314_VF || idx == BCM57414_VF);
}
#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
@@ -920,6 +980,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
}
tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
+ tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
rxr->rx_prod = NEXT_RX(prod);
cons = NEXT_RX(cons);
@@ -938,32 +999,102 @@ static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi,
bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
}
+static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
+ int payload_off, int tcp_ts,
+ struct sk_buff *skb)
+{
+#ifdef CONFIG_INET
+ struct tcphdr *th;
+ int len, nw_off;
+ u16 outer_ip_off, inner_ip_off, inner_mac_off;
+ u32 hdr_info = tpa_info->hdr_info;
+ bool loopback = false;
+
+ inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
+ inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
+ outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
+
+ /* If the packet is an internal loopback packet, the offsets will
+ * have an extra 4 bytes.
+ */
+ if (inner_mac_off == 4) {
+ loopback = true;
+ } else if (inner_mac_off > 4) {
+ __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
+ ETH_HLEN - 2));
+
+ /* We only support inner iPv4/ipv6. If we don't see the
+ * correct protocol ID, it must be a loopback packet where
+ * the offsets are off by 4.
+ */
+ if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
+ loopback = true;
+ }
+ if (loopback) {
+ /* internal loopback packet, subtract all offsets by 4 */
+ inner_ip_off -= 4;
+ inner_mac_off -= 4;
+ outer_ip_off -= 4;
+ }
+
+ nw_off = inner_ip_off - ETH_HLEN;
+ skb_set_network_header(skb, nw_off);
+ if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
+ struct ipv6hdr *iph = ipv6_hdr(skb);
+
+ skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
+ len = skb->len - skb_transport_offset(skb);
+ th = tcp_hdr(skb);
+ th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
+ } else {
+ struct iphdr *iph = ip_hdr(skb);
+
+ skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
+ len = skb->len - skb_transport_offset(skb);
+ th = tcp_hdr(skb);
+ th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
+ }
+
+ if (inner_mac_off) { /* tunnel */
+ struct udphdr *uh = NULL;
+ __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
+ ETH_HLEN - 2));
+
+ if (proto == htons(ETH_P_IP)) {
+ struct iphdr *iph = (struct iphdr *)skb->data;
+
+ if (iph->protocol == IPPROTO_UDP)
+ uh = (struct udphdr *)(iph + 1);
+ } else {
+ struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
+
+ if (iph->nexthdr == IPPROTO_UDP)
+ uh = (struct udphdr *)(iph + 1);
+ }
+ if (uh) {
+ if (uh->check)
+ skb_shinfo(skb)->gso_type |=
+ SKB_GSO_UDP_TUNNEL_CSUM;
+ else
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
+ }
+ }
+#endif
+ return skb;
+}
+
#define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
#define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
-static inline struct sk_buff *bnxt_gro_skb(struct bnxt_tpa_info *tpa_info,
- struct rx_tpa_end_cmp *tpa_end,
- struct rx_tpa_end_cmp_ext *tpa_end1,
+static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
+ int payload_off, int tcp_ts,
struct sk_buff *skb)
{
#ifdef CONFIG_INET
struct tcphdr *th;
- int payload_off, tcp_opt_len = 0;
- int len, nw_off;
- u16 segs;
-
- segs = TPA_END_TPA_SEGS(tpa_end);
- if (segs == 1)
- return skb;
+ int len, nw_off, tcp_opt_len;
- NAPI_GRO_CB(skb)->count = segs;
- skb_shinfo(skb)->gso_size =
- le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
- skb_shinfo(skb)->gso_type = tpa_info->gso_type;
- payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
- RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
- RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
- if (TPA_END_GRO_TS(tpa_end))
+ if (tcp_ts)
tcp_opt_len = 12;
if (tpa_info->gso_type == SKB_GSO_TCPV4) {
@@ -1020,6 +1151,32 @@ static inline struct sk_buff *bnxt_gro_skb(struct bnxt_tpa_info *tpa_info,
return skb;
}
+static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
+ struct bnxt_tpa_info *tpa_info,
+ struct rx_tpa_end_cmp *tpa_end,
+ struct rx_tpa_end_cmp_ext *tpa_end1,
+ struct sk_buff *skb)
+{
+#ifdef CONFIG_INET
+ int payload_off;
+ u16 segs;
+
+ segs = TPA_END_TPA_SEGS(tpa_end);
+ if (segs == 1)
+ return skb;
+
+ NAPI_GRO_CB(skb)->count = segs;
+ skb_shinfo(skb)->gso_size =
+ le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
+ skb_shinfo(skb)->gso_type = tpa_info->gso_type;
+ payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
+ RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
+ RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
+ skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
+#endif
+ return skb;
+}
+
static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
struct bnxt_napi *bnapi,
u32 *raw_cons,
@@ -1130,7 +1287,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
}
if (TPA_END_GRO(tpa_end))
- skb = bnxt_gro_skb(tpa_info, tpa_end, tpa_end1, skb);
+ skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
return skb;
}
@@ -1358,6 +1515,11 @@ static int bnxt_async_event_process(struct bnxt *bp,
set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
break;
}
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
+ if (BNXT_PF(bp))
+ goto async_event_process_exit;
+ set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
+ break;
default:
netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n",
event_id);
@@ -1536,6 +1698,76 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
return rx_pkts;
}
+static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
+{
+ struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
+ struct bnxt *bp = bnapi->bp;
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
+ struct tx_cmp *txcmp;
+ struct rx_cmp_ext *rxcmp1;
+ u32 cp_cons, tmp_raw_cons;
+ u32 raw_cons = cpr->cp_raw_cons;
+ u32 rx_pkts = 0;
+ bool agg_event = false;
+
+ while (1) {
+ int rc;
+
+ cp_cons = RING_CMP(raw_cons);
+ txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+
+ if (!TX_CMP_VALID(txcmp, raw_cons))
+ break;
+
+ if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
+ tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
+ cp_cons = RING_CMP(tmp_raw_cons);
+ rxcmp1 = (struct rx_cmp_ext *)
+ &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+
+ if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
+ break;
+
+ /* force an error to recycle the buffer */
+ rxcmp1->rx_cmp_cfa_code_errors_v2 |=
+ cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
+
+ rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
+ if (likely(rc == -EIO))
+ rx_pkts++;
+ else if (rc == -EBUSY) /* partial completion */
+ break;
+ } else if (unlikely(TX_CMP_TYPE(txcmp) ==
+ CMPL_BASE_TYPE_HWRM_DONE)) {
+ bnxt_hwrm_handler(bp, txcmp);
+ } else {
+ netdev_err(bp->dev,
+ "Invalid completion received on special ring\n");
+ }
+ raw_cons = NEXT_RAW_CMP(raw_cons);
+
+ if (rx_pkts == budget)
+ break;
+ }
+
+ cpr->cp_raw_cons = raw_cons;
+ BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
+ writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
+ writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
+
+ if (agg_event) {
+ writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell);
+ writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell);
+ }
+
+ if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
+ napi_complete(napi);
+ BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
+ }
+ return rx_pkts;
+}
+
static int bnxt_poll(struct napi_struct *napi, int budget)
{
struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
@@ -2208,6 +2440,9 @@ static int bnxt_alloc_vnics(struct bnxt *bp)
num_vnics += bp->rx_nr_rings;
#endif
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp))
+ num_vnics++;
+
bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
GFP_KERNEL);
if (!bp->vnic_info)
@@ -2225,7 +2460,8 @@ static void bnxt_init_vnics(struct bnxt *bp)
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
vnic->fw_vnic_id = INVALID_HW_RING_ID;
- vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
+ vnic->fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID;
+ vnic->fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID;
vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
if (bp->vnic_info[i].rss_hash_key) {
@@ -2262,7 +2498,7 @@ static void bnxt_set_tpa_flags(struct bnxt *bp)
bp->flags &= ~BNXT_FLAG_TPA;
if (bp->dev->features & NETIF_F_LRO)
bp->flags |= BNXT_FLAG_LRO;
- if ((bp->dev->features & NETIF_F_GRO) && (bp->pdev->revision > 0))
+ if (bp->dev->features & NETIF_F_GRO)
bp->flags |= BNXT_FLAG_GRO;
}
@@ -2529,7 +2765,7 @@ static int bnxt_alloc_stats(struct bnxt *bp)
cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
}
- if (BNXT_PF(bp)) {
+ if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) {
bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
sizeof(struct tx_port_stats) + 1024;
@@ -3031,7 +3267,7 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
- req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[0];
+ req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
@@ -3068,8 +3304,10 @@ static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
- req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX |
- CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
+ req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
+ if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
+ req.flags |=
+ cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
req.enables =
cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
@@ -3176,7 +3414,7 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
struct hwrm_vnic_rss_cfg_input req = {0};
- if (vnic->fw_rss_cos_lb_ctx == INVALID_HW_RING_ID)
+ if (vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
return 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
@@ -3188,10 +3426,14 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
req.hash_type = cpu_to_le32(vnic->hash_type);
- if (vnic->flags & BNXT_VNIC_RSS_FLAG)
- max_rings = bp->rx_nr_rings;
- else
+ if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp))
+ max_rings = bp->rx_nr_rings - 1;
+ else
+ max_rings = bp->rx_nr_rings;
+ } else {
max_rings = 1;
+ }
/* Fill the RSS indirection table with ring group ids */
for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
@@ -3204,7 +3446,7 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
req.hash_key_tbl_addr =
cpu_to_le64(vnic->rss_hash_key_dma_addr);
}
- req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx);
+ req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
@@ -3227,32 +3469,35 @@ static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
-static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id)
+static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
+ u16 ctx_idx)
{
struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
req.rss_cos_lb_ctx_id =
- cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx);
+ cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
+ bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
}
static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
{
- int i;
+ int i, j;
for (i = 0; i < bp->nr_vnics; i++) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
- if (vnic->fw_rss_cos_lb_ctx != INVALID_HW_RING_ID)
- bnxt_hwrm_vnic_ctx_free_one(bp, i);
+ for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
+ if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
+ bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
+ }
}
bp->rsscos_nr_ctxs = 0;
}
-static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id)
+static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
{
int rc;
struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
@@ -3265,7 +3510,7 @@ static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id)
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc)
- bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx =
+ bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
le16_to_cpu(resp->rss_cos_lb_ctx_id);
mutex_unlock(&bp->hwrm_cmd_lock);
@@ -3277,17 +3522,34 @@ static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
unsigned int ring = 0, grp_idx;
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
struct hwrm_vnic_cfg_input req = {0};
+ u16 def_vlan = 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
+
+ req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
/* Only RSS support for now TBD: COS & LB */
- req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP |
- VNIC_CFG_REQ_ENABLES_RSS_RULE);
- req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx);
- req.cos_rule = cpu_to_le16(0xffff);
+ if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
+ req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
+ req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
+ VNIC_CFG_REQ_ENABLES_MRU);
+ } else {
+ req.rss_rule = cpu_to_le16(0xffff);
+ }
+
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
+ (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
+ req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
+ req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
+ } else {
+ req.cos_rule = cpu_to_le16(0xffff);
+ }
+
if (vnic->flags & BNXT_VNIC_RSS_FLAG)
ring = 0;
else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
ring = vnic_id - 1;
+ else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
+ ring = bp->rx_nr_rings - 1;
grp_idx = bp->rx_ring[ring].bnapi->index;
req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
@@ -3297,7 +3559,11 @@ static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
VLAN_HLEN);
- if (bp->flags & BNXT_FLAG_STRIP_VLAN)
+#ifdef CONFIG_BNXT_SRIOV
+ if (BNXT_VF(bp))
+ def_vlan = bp->vf.vlan;
+#endif
+ if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
@@ -3351,7 +3617,8 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
bp->grp_info[grp_idx].fw_grp_id;
}
- bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
+ bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID;
+ bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID;
if (vnic_id == 0)
req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
@@ -3784,6 +4051,9 @@ static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
if (!bp->bnapi)
return 0;
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp))
+ return 0;
+
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
mutex_lock(&bp->hwrm_cmd_lock);
@@ -3812,9 +4082,12 @@ static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
struct hwrm_stat_ctx_alloc_input req = {0};
struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp))
+ return 0;
+
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
- req.update_period_ms = cpu_to_le32(1000);
+ req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
mutex_lock(&bp->hwrm_cmd_lock);
for (i = 0; i < bp->cp_nr_rings; i++) {
@@ -3836,6 +4109,39 @@ static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
return 0;
}
+static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
+{
+ struct hwrm_func_qcfg_input req = {0};
+ struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
+ req.fid = cpu_to_le16(0xffff);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ goto func_qcfg_exit;
+
+#ifdef CONFIG_BNXT_SRIOV
+ if (BNXT_VF(bp)) {
+ struct bnxt_vf_info *vf = &bp->vf;
+
+ vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
+ }
+#endif
+ switch (resp->port_partition_type) {
+ case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
+ case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
+ case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
+ bp->port_partition_type = resp->port_partition_type;
+ break;
+ }
+
+func_qcfg_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
int bnxt_hwrm_func_qcaps(struct bnxt *bp)
{
int rc = 0;
@@ -3855,6 +4161,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
pf->fw_fid = le16_to_cpu(resp->fid);
pf->port_id = le16_to_cpu(resp->port_id);
+ bp->dev->dev_port = pf->port_id;
memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN);
pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
@@ -3990,6 +4297,11 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
if (resp->hwrm_intf_maj >= 1)
bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
+ bp->chip_num = le16_to_cpu(resp->chip_num);
+ if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
+ !resp->chip_metal)
+ bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
+
hwrm_ver_get_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
@@ -4078,7 +4390,7 @@ static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
int rc;
/* allocate context for vnic */
- rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id);
+ rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
vnic_id, rc);
@@ -4086,6 +4398,16 @@ static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
}
bp->rsscos_nr_ctxs++;
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
+ rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
+ vnic_id, rc);
+ goto vnic_setup_err;
+ }
+ bp->rsscos_nr_ctxs++;
+ }
+
/* configure default vnic, ring grp */
rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
if (rc) {
@@ -4143,6 +4465,36 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
#endif
}
+/* Allow PF and VF with default VLAN to be in promiscuous mode */
+static bool bnxt_promisc_ok(struct bnxt *bp)
+{
+#ifdef CONFIG_BNXT_SRIOV
+ if (BNXT_VF(bp) && !bp->vf.vlan)
+ return false;
+#endif
+ return true;
+}
+
+static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
+{
+ unsigned int rc = 0;
+
+ rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
+ if (rc) {
+ netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
+ rc);
+ return rc;
+ }
+
+ rc = bnxt_hwrm_vnic_cfg(bp, 1);
+ if (rc) {
+ netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
+ rc);
+ return rc;
+ }
+ return rc;
+}
+
static int bnxt_cfg_rx_mode(struct bnxt *);
static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
@@ -4150,6 +4502,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
{
struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
int rc = 0;
+ unsigned int rx_nr_rings = bp->rx_nr_rings;
if (irq_re_init) {
rc = bnxt_hwrm_stat_ctx_alloc(bp);
@@ -4172,8 +4525,11 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
goto err_out;
}
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp))
+ rx_nr_rings--;
+
/* default vnic 0 */
- rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, bp->rx_nr_rings);
+ rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
if (rc) {
netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
goto err_out;
@@ -4208,7 +4564,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
- if ((bp->dev->flags & IFF_PROMISC) && BNXT_PF(bp))
+ if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
if (bp->dev->flags & IFF_ALLMULTI) {
@@ -4228,7 +4584,19 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
rc = bnxt_hwrm_set_coal(bp);
if (rc)
netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
- rc);
+ rc);
+
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
+ rc = bnxt_setup_nitroa0_vnic(bp);
+ if (rc)
+ netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
+ rc);
+ }
+
+ if (BNXT_VF(bp)) {
+ bnxt_hwrm_func_qcfg(bp);
+ netdev_update_features(bp->dev);
+ }
return 0;
@@ -4532,14 +4900,23 @@ static void bnxt_del_napi(struct bnxt *bp)
static void bnxt_init_napi(struct bnxt *bp)
{
int i;
+ unsigned int cp_nr_rings = bp->cp_nr_rings;
struct bnxt_napi *bnapi;
if (bp->flags & BNXT_FLAG_USING_MSIX) {
- for (i = 0; i < bp->cp_nr_rings; i++) {
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp))
+ cp_nr_rings--;
+ for (i = 0; i < cp_nr_rings; i++) {
bnapi = bp->bnapi[i];
netif_napi_add(bp->dev, &bnapi->napi,
bnxt_poll, 64);
}
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
+ bnapi = bp->bnapi[cp_nr_rings];
+ netif_napi_add(bp->dev, &bnapi->napi,
+ bnxt_poll_nitroa0, 64);
+ napi_hash_add(&bnapi->napi);
+ }
} else {
bnapi = bp->bnapi[0];
netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
@@ -4580,9 +4957,7 @@ static void bnxt_tx_disable(struct bnxt *bp)
for (i = 0; i < bp->tx_nr_rings; i++) {
txr = &bp->tx_ring[i];
txq = netdev_get_tx_queue(bp->dev, i);
- __netif_tx_lock(txq, smp_processor_id());
txr->dev_state = BNXT_DEV_STATE_CLOSING;
- __netif_tx_unlock(txq);
}
}
/* Stop all TX queues */
@@ -4644,6 +5019,7 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
int rc = 0;
struct hwrm_port_phy_qcaps_input req = {0};
struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ struct bnxt_link_info *link_info = &bp->link_info;
if (bp->hwrm_spec_code < 0x10201)
return 0;
@@ -4666,6 +5042,8 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
}
+ link_info->support_auto_speeds =
+ le16_to_cpu(resp->supported_speeds_auto_mode);
hwrm_phy_qcaps_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
@@ -4923,7 +5301,7 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
{
struct hwrm_port_phy_cfg_input req = {0};
- if (BNXT_VF(bp))
+ if (!BNXT_SINGLE_PF(bp))
return 0;
if (pci_num_vf(bp->pdev))
@@ -5073,15 +5451,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
netdev_warn(bp->dev, "failed to update phy settings\n");
}
- if (irq_re_init) {
-#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE)
- vxlan_get_rx_port(bp->dev);
-#endif
- if (!bnxt_hwrm_tunnel_dst_port_alloc(
- bp, htons(0x17c1),
- TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE))
- bp->nge_port_cnt = 1;
- }
+ if (irq_re_init)
+ udp_tunnel_get_rx_info(bp->dev);
set_bit(BNXT_STATE_OPEN, &bp->state);
bnxt_enable_int(bp);
@@ -5122,12 +5493,19 @@ static int bnxt_open(struct net_device *dev)
struct bnxt *bp = netdev_priv(dev);
int rc = 0;
- rc = bnxt_hwrm_func_reset(bp);
- if (rc) {
- netdev_err(bp->dev, "hwrm chip reset failure rc: %x\n",
- rc);
- rc = -1;
- return rc;
+ if (!test_bit(BNXT_STATE_FN_RST_DONE, &bp->state)) {
+ rc = bnxt_hwrm_func_reset(bp);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm chip reset failure rc: %x\n",
+ rc);
+ rc = -EBUSY;
+ return rc;
+ }
+ /* Do func_reset during the 1st PF open only to prevent killing
+ * the VFs when the PF is brought down and up.
+ */
+ if (BNXT_PF(bp))
+ set_bit(BNXT_STATE_FN_RST_DONE, &bp->state);
}
return __bnxt_open_nic(bp, true, true);
}
@@ -5347,8 +5725,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST);
- /* Only allow PF to be in promiscuous mode */
- if ((dev->flags & IFF_PROMISC) && BNXT_PF(bp))
+ if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
uc_update = bnxt_uc_list_updated(bp);
@@ -5440,8 +5817,12 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
return false;
vnics = 1 + bp->rx_nr_rings;
- if (vnics > pf->max_rsscos_ctxs || vnics > pf->max_vnics)
+ if (vnics > pf->max_rsscos_ctxs || vnics > pf->max_vnics) {
+ netdev_warn(bp->dev,
+ "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
+ min(pf->max_rsscos_ctxs - 1, pf->max_vnics - 1));
return false;
+ }
return true;
#else
@@ -5454,7 +5835,7 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
{
struct bnxt *bp = netdev_priv(dev);
- if (!bnxt_rfs_capable(bp))
+ if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
features &= ~NETIF_F_NTUPLE;
/* Both CTAG and STAG VLAN accelaration on the RX side have to be
@@ -5469,7 +5850,14 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
features |= NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_STAG_RX;
}
-
+#ifdef CONFIG_BNXT_SRIOV
+ if (BNXT_VF(bp)) {
+ if (bp->vf.vlan) {
+ features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_STAG_RX);
+ }
+ }
+#endif
return features;
}
@@ -5483,7 +5871,7 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
bool update_tpa = false;
flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
- if ((features & NETIF_F_GRO) && (bp->pdev->revision > 0))
+ if ((features & NETIF_F_GRO) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
flags |= BNXT_FLAG_GRO;
if (features & NETIF_F_LRO)
flags |= BNXT_FLAG_LRO;
@@ -5585,9 +5973,10 @@ static void bnxt_dbg_dump_states(struct bnxt *bp)
}
}
-static void bnxt_reset_task(struct bnxt *bp)
+static void bnxt_reset_task(struct bnxt *bp, bool silent)
{
- bnxt_dbg_dump_states(bp);
+ if (!silent)
+ bnxt_dbg_dump_states(bp);
if (netif_running(bp->dev)) {
bnxt_close_nic(bp, false, false);
bnxt_open_nic(bp, false, false);
@@ -5638,6 +6027,23 @@ bnxt_restart_timer:
mod_timer(&bp->timer, jiffies + bp->current_interval);
}
+/* Only called from bnxt_sp_task() */
+static void bnxt_reset(struct bnxt *bp, bool silent)
+{
+ /* bnxt_reset_task() calls bnxt_close_nic() which waits
+ * for BNXT_STATE_IN_SP_TASK to clear.
+ * If there is a parallel dev_close(), bnxt_close() may be holding
+ * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
+ * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
+ */
+ clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
+ rtnl_lock();
+ if (test_bit(BNXT_STATE_OPEN, &bp->state))
+ bnxt_reset_task(bp, silent);
+ set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
+ rtnl_unlock();
+}
+
static void bnxt_cfg_ntp_filters(struct bnxt *);
static void bnxt_sp_task(struct work_struct *work)
@@ -5674,16 +6080,20 @@ static void bnxt_sp_task(struct work_struct *work)
bnxt_hwrm_tunnel_dst_port_free(
bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
}
- if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) {
- /* bnxt_reset_task() calls bnxt_close_nic() which waits
- * for BNXT_STATE_IN_SP_TASK to clear.
- */
- clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
- rtnl_lock();
- bnxt_reset_task(bp);
- set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
- rtnl_unlock();
+ if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) {
+ bnxt_hwrm_tunnel_dst_port_alloc(
+ bp, bp->nge_port,
+ TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
+ }
+ if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) {
+ bnxt_hwrm_tunnel_dst_port_free(
+ bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
}
+ if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
+ bnxt_reset(bp, false);
+
+ if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
+ bnxt_reset(bp, true);
if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event))
bnxt_get_port_module_status(bp);
@@ -5774,6 +6184,8 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->tx_coal_ticks_irq = 2;
bp->tx_coal_bufs_irq = 2;
+ bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
+
init_timer(&bp->timer);
bp->timer.data = (unsigned long)bp;
bp->timer.function = bnxt_timer;
@@ -5839,7 +6251,7 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
{
struct bnxt *bp = netdev_priv(dev);
- if (new_mtu < 60 || new_mtu > 9000)
+ if (new_mtu < 60 || new_mtu > 9500)
return -EINVAL;
if (netif_running(dev))
@@ -5918,7 +6330,8 @@ static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
keys1->ports.ports == keys2->ports.ports &&
keys1->basic.ip_proto == keys2->basic.ip_proto &&
keys1->basic.n_proto == keys2->basic.n_proto &&
- ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr))
+ ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
+ ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
return true;
return false;
@@ -5931,12 +6344,28 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
struct bnxt_ntuple_filter *fltr, *new_fltr;
struct flow_keys *fkeys;
struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
- int rc = 0, idx, bit_id;
+ int rc = 0, idx, bit_id, l2_idx = 0;
struct hlist_head *head;
if (skb->encapsulation)
return -EPROTONOSUPPORT;
+ if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ int off = 0, j;
+
+ netif_addr_lock_bh(dev);
+ for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
+ if (ether_addr_equal(eth->h_dest,
+ vnic->uc_list + off)) {
+ l2_idx = j + 1;
+ break;
+ }
+ }
+ netif_addr_unlock_bh(dev);
+ if (!l2_idx)
+ return -EINVAL;
+ }
new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
if (!new_fltr)
return -ENOMEM;
@@ -5954,6 +6383,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
goto err_free;
}
+ memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
@@ -5979,6 +6409,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
new_fltr->sw_id = (u16)bit_id;
new_fltr->flow_id = flow_id;
+ new_fltr->l2_fltr_idx = l2_idx;
new_fltr->rxq = rxq_index;
hlist_add_head_rcu(&new_fltr->hash, head);
bp->ntp_fltr_count++;
@@ -6048,47 +6479,83 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
#endif /* CONFIG_RFS_ACCEL */
-static void bnxt_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
- __be16 port)
+static void bnxt_udp_tunnel_add(struct net_device *dev,
+ struct udp_tunnel_info *ti)
{
struct bnxt *bp = netdev_priv(dev);
- if (!netif_running(dev))
+ if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
return;
- if (sa_family != AF_INET6 && sa_family != AF_INET)
+ if (!netif_running(dev))
return;
- if (bp->vxlan_port_cnt && bp->vxlan_port != port)
- return;
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port)
+ return;
- bp->vxlan_port_cnt++;
- if (bp->vxlan_port_cnt == 1) {
- bp->vxlan_port = port;
- set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
- schedule_work(&bp->sp_task);
+ bp->vxlan_port_cnt++;
+ if (bp->vxlan_port_cnt == 1) {
+ bp->vxlan_port = ti->port;
+ set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
+ schedule_work(&bp->sp_task);
+ }
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ if (bp->nge_port_cnt && bp->nge_port != ti->port)
+ return;
+
+ bp->nge_port_cnt++;
+ if (bp->nge_port_cnt == 1) {
+ bp->nge_port = ti->port;
+ set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event);
+ }
+ break;
+ default:
+ return;
}
+
+ schedule_work(&bp->sp_task);
}
-static void bnxt_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
- __be16 port)
+static void bnxt_udp_tunnel_del(struct net_device *dev,
+ struct udp_tunnel_info *ti)
{
struct bnxt *bp = netdev_priv(dev);
- if (!netif_running(dev))
+ if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
return;
- if (sa_family != AF_INET6 && sa_family != AF_INET)
+ if (!netif_running(dev))
return;
- if (bp->vxlan_port_cnt && bp->vxlan_port == port) {
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port)
+ return;
bp->vxlan_port_cnt--;
- if (bp->vxlan_port_cnt == 0) {
- set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
- schedule_work(&bp->sp_task);
- }
+ if (bp->vxlan_port_cnt != 0)
+ return;
+
+ set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ if (!bp->nge_port_cnt || bp->nge_port != ti->port)
+ return;
+ bp->nge_port_cnt--;
+
+ if (bp->nge_port_cnt != 0)
+ return;
+
+ set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event);
+ break;
+ default:
+ return;
}
+
+ schedule_work(&bp->sp_task);
}
static const struct net_device_ops bnxt_netdev_ops = {
@@ -6119,8 +6586,8 @@ static const struct net_device_ops bnxt_netdev_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = bnxt_rx_flow_steer,
#endif
- .ndo_add_vxlan_port = bnxt_add_vxlan_port,
- .ndo_del_vxlan_port = bnxt_del_vxlan_port,
+ .ndo_udp_tunnel_add = bnxt_udp_tunnel_add,
+ .ndo_udp_tunnel_del = bnxt_udp_tunnel_del,
#ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = bnxt_busy_poll,
#endif
@@ -6169,6 +6636,12 @@ static int bnxt_probe_phy(struct bnxt *bp)
return rc;
}
+ /* Older firmware does not have supported_auto_speeds, so assume
+ * that all supported speeds can be autonegotiated.
+ */
+ if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
+ link_info->support_auto_speeds = link_info->support_speeds;
+
/*initialize the ethool setting copy with NVM settings */
if (BNXT_AUTO_MODE(link_info->auto_mode)) {
link_info->autoneg = BNXT_AUTONEG_SPEED;
@@ -6224,7 +6697,10 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
*max_cp = min_t(int, *max_cp, bp->pf.max_stat_ctxs);
max_ring_grps = bp->pf.max_hw_ring_grps;
}
-
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
+ *max_cp -= 1;
+ *max_rx -= 2;
+ }
if (bp->flags & BNXT_FLAG_AGG_RINGS)
*max_rx >>= 1;
*max_rx = min_t(int, *max_rx, max_ring_grps);
@@ -6260,6 +6736,10 @@ static int bnxt_set_dflt_rings(struct bnxt *bp)
bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
bp->tx_nr_rings + bp->rx_nr_rings;
bp->num_stat_ctxs = bp->cp_nr_rings;
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
+ bp->rx_nr_rings++;
+ bp->cp_nr_rings++;
+ }
return rc;
}
@@ -6286,6 +6766,9 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct bnxt *bp;
int rc, max_irqs;
+ if (pdev->device == 0x16cd && pci_is_bridge(pdev))
+ return -ENODEV;
+
if (version_printed++ == 0)
pr_info("%s", version);
@@ -6312,13 +6795,25 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, dev);
+ rc = bnxt_alloc_hwrm_resources(bp);
+ if (rc)
+ goto init_err;
+
+ mutex_init(&bp->hwrm_cmd_lock);
+ rc = bnxt_hwrm_ver_get(bp);
+ if (rc)
+ goto init_err;
+
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
NETIF_F_GSO_IPXIP4 |
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
- NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO;
+ NETIF_F_RXCSUM | NETIF_F_GRO;
+
+ if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
+ dev->hw_features |= NETIF_F_LRO;
dev->hw_enc_features =
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
@@ -6337,12 +6832,9 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
#ifdef CONFIG_BNXT_SRIOV
init_waitqueue_head(&bp->sriov_cfg_wait);
#endif
- rc = bnxt_alloc_hwrm_resources(bp);
- if (rc)
- goto init_err;
-
- mutex_init(&bp->hwrm_cmd_lock);
- bnxt_hwrm_ver_get(bp);
+ bp->gro_func = bnxt_gro_func_5730x;
+ if (BNXT_CHIP_NUM_57X1X(bp->chip_num))
+ bp->gro_func = bnxt_gro_func_5731x;
rc = bnxt_hwrm_func_drv_rgtr(bp);
if (rc)
@@ -6365,6 +6857,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto init_err;
}
+ bnxt_hwrm_func_qcfg(bp);
+
bnxt_set_tpa_flags(bp);
bnxt_set_ring_params(bp);
if (BNXT_PF(bp))
@@ -6375,7 +6869,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
#endif
bnxt_set_dflt_rings(bp);
- if (BNXT_PF(bp)) {
+ if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) {
dev->hw_features |= NETIF_F_NTUPLE;
if (bnxt_rfs_capable(bp)) {
bp->flags |= BNXT_FLAG_RFS;
@@ -6424,6 +6918,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
+ struct bnxt *bp = netdev_priv(netdev);
netdev_info(netdev, "PCI I/O error detected\n");
@@ -6438,6 +6933,8 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
if (netif_running(netdev))
bnxt_close(netdev);
+ /* So that func_reset will be done during slot_reset */
+ clear_bit(BNXT_STATE_FN_RST_DONE, &bp->state);
pci_disable_device(pdev);
rtnl_unlock();
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 2824d65b2..23e04a614 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -11,10 +11,10 @@
#define BNXT_H
#define DRV_MODULE_NAME "bnxt_en"
-#define DRV_MODULE_VERSION "1.2.0"
+#define DRV_MODULE_VERSION "1.3.0"
#define DRV_VER_MAJ 1
-#define DRV_VER_MIN 0
+#define DRV_VER_MIN 3
#define DRV_VER_UPD 0
struct tx_bd {
@@ -298,13 +298,14 @@ struct rx_tpa_start_cmp_ext {
#define RX_TPA_START_CMP_FLAGS2_L4_CS_CALC (0x1 << 1)
#define RX_TPA_START_CMP_FLAGS2_T_IP_CS_CALC (0x1 << 2)
#define RX_TPA_START_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3)
+ #define RX_TPA_START_CMP_FLAGS2_IP_TYPE (0x1 << 8)
__le32 rx_tpa_start_cmp_metadata;
__le32 rx_tpa_start_cmp_cfa_code_v2;
#define RX_TPA_START_CMP_V2 (0x1 << 0)
#define RX_TPA_START_CMP_CFA_CODE (0xffff << 16)
#define RX_TPA_START_CMPL_CFA_CODE_SHIFT 16
- __le32 rx_tpa_start_cmp_unused5;
+ __le32 rx_tpa_start_cmp_hdr_info;
};
struct rx_tpa_end_cmp {
@@ -358,7 +359,8 @@ struct rx_tpa_end_cmp {
RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO)
#define TPA_END_GRO_TS(rx_tpa_end) \
- ((rx_tpa_end)->rx_tpa_end_cmp_tsdelta & cpu_to_le32(RX_TPA_END_GRO_TS))
+ (!!((rx_tpa_end)->rx_tpa_end_cmp_tsdelta & \
+ cpu_to_le32(RX_TPA_END_GRO_TS)))
struct rx_tpa_end_cmp_ext {
__le32 rx_tpa_end_cmp_dup_acks;
@@ -584,6 +586,19 @@ struct bnxt_tpa_info {
u32 metadata;
enum pkt_hash_types hash_type;
u32 rss_hash;
+ u32 hdr_info;
+
+#define BNXT_TPA_L4_SIZE(hdr_info) \
+ (((hdr_info) & 0xf8000000) ? ((hdr_info) >> 27) : 32)
+
+#define BNXT_TPA_INNER_L3_OFF(hdr_info) \
+ (((hdr_info) >> 18) & 0x1ff)
+
+#define BNXT_TPA_INNER_L2_OFF(hdr_info) \
+ (((hdr_info) >> 9) & 0x1ff)
+
+#define BNXT_TPA_OUTER_L3_OFF(hdr_info) \
+ ((hdr_info) & 0x1ff)
};
struct bnxt_rx_ring_info {
@@ -680,7 +695,8 @@ struct bnxt_ring_grp_info {
struct bnxt_vnic_info {
u16 fw_vnic_id; /* returned by Chimp during alloc */
- u16 fw_rss_cos_lb_ctx;
+#define BNXT_MAX_CTX_PER_VNIC 2
+ u16 fw_rss_cos_lb_ctx[BNXT_MAX_CTX_PER_VNIC];
u16 fw_l2_ctx_id;
#define BNXT_MAX_UC_ADDRS 4
__le64 fw_l2_filter_id[BNXT_MAX_UC_ADDRS];
@@ -739,8 +755,8 @@ struct bnxt_vf_info {
struct bnxt_pf_info {
#define BNXT_FIRST_PF_FID 1
#define BNXT_FIRST_VF_FID 128
- u32 fw_fid;
- u8 port_id;
+ u16 fw_fid;
+ u16 port_id;
u8 mac_addr[ETH_ALEN];
u16 max_rsscos_ctxs;
u16 max_cp_rings;
@@ -769,10 +785,12 @@ struct bnxt_pf_info {
struct bnxt_ntuple_filter {
struct hlist_node hash;
+ u8 dst_mac_addr[ETH_ALEN];
u8 src_mac_addr[ETH_ALEN];
struct flow_keys fkeys;
__le64 filter_id;
u16 sw_id;
+ u8 l2_fltr_idx;
u16 rxq;
u32 flow_id;
unsigned long state;
@@ -835,6 +853,7 @@ struct bnxt_link_info {
#define BNXT_LINK_SPEED_MSK_25GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB
#define BNXT_LINK_SPEED_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB
#define BNXT_LINK_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB
+ u16 support_auto_speeds;
u16 lp_auto_link_speeds;
u16 force_link_speed;
u32 preemphasis;
@@ -873,6 +892,45 @@ struct bnxt {
void __iomem *bar2;
u32 reg_base;
+ u16 chip_num;
+#define CHIP_NUM_57301 0x16c8
+#define CHIP_NUM_57302 0x16c9
+#define CHIP_NUM_57304 0x16ca
+#define CHIP_NUM_58700 0x16cd
+#define CHIP_NUM_57402 0x16d0
+#define CHIP_NUM_57404 0x16d1
+#define CHIP_NUM_57406 0x16d2
+
+#define CHIP_NUM_57311 0x16ce
+#define CHIP_NUM_57312 0x16cf
+#define CHIP_NUM_57314 0x16df
+#define CHIP_NUM_57412 0x16d6
+#define CHIP_NUM_57414 0x16d7
+#define CHIP_NUM_57416 0x16d8
+#define CHIP_NUM_57417 0x16d9
+
+#define BNXT_CHIP_NUM_5730X(chip_num) \
+ ((chip_num) >= CHIP_NUM_57301 && \
+ (chip_num) <= CHIP_NUM_57304)
+
+#define BNXT_CHIP_NUM_5740X(chip_num) \
+ ((chip_num) >= CHIP_NUM_57402 && \
+ (chip_num) <= CHIP_NUM_57406)
+
+#define BNXT_CHIP_NUM_5731X(chip_num) \
+ ((chip_num) == CHIP_NUM_57311 || \
+ (chip_num) == CHIP_NUM_57312 || \
+ (chip_num) == CHIP_NUM_57314)
+
+#define BNXT_CHIP_NUM_5741X(chip_num) \
+ ((chip_num) >= CHIP_NUM_57412 && \
+ (chip_num) <= CHIP_NUM_57417)
+
+#define BNXT_CHIP_NUM_57X0X(chip_num) \
+ (BNXT_CHIP_NUM_5730X(chip_num) || BNXT_CHIP_NUM_5740X(chip_num))
+
+#define BNXT_CHIP_NUM_57X1X(chip_num) \
+ (BNXT_CHIP_NUM_5731X(chip_num) || BNXT_CHIP_NUM_5741X(chip_num))
struct net_device *dev;
struct pci_dev *pdev;
@@ -900,6 +958,7 @@ struct bnxt {
#define BNXT_FLAG_SHARED_RINGS 0x200
#define BNXT_FLAG_PORT_STATS 0x400
#define BNXT_FLAG_EEE_CAP 0x1000
+ #define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
BNXT_FLAG_RFS | \
@@ -907,12 +966,18 @@ struct bnxt {
#define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF))
#define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF)
+#define BNXT_NPAR(bp) ((bp)->port_partition_type)
+#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp))
+#define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
struct bnxt_napi **bnapi;
struct bnxt_rx_ring_info *rx_ring;
struct bnxt_tx_ring_info *tx_ring;
+ struct sk_buff * (*gro_func)(struct bnxt_tpa_info *, int, int,
+ struct sk_buff *);
+
u32 rx_buf_size;
u32 rx_buf_use_size; /* useable size */
u32 rx_ring_size;
@@ -959,6 +1024,7 @@ struct bnxt {
unsigned long state;
#define BNXT_STATE_OPEN 0
#define BNXT_STATE_IN_SP_TASK 1
+#define BNXT_STATE_FN_RST_DONE 2
struct bnxt_irq *irq_tbl;
u8 mac_addr[ETH_ALEN];
@@ -991,8 +1057,10 @@ struct bnxt {
__be16 vxlan_port;
u8 vxlan_port_cnt;
__le16 vxlan_fw_dst_port_id;
+ __be16 nge_port;
u8 nge_port_cnt;
__le16 nge_fw_dst_port_id;
+ u8 port_partition_type;
u16 rx_coal_ticks;
u16 rx_coal_ticks_irq;
@@ -1005,6 +1073,11 @@ struct bnxt {
#define BNXT_USEC_TO_COAL_TIMER(x) ((x) * 25 / 2)
+ u32 stats_coal_ticks;
+#define BNXT_DEF_STATS_COAL_TICKS 1000000
+#define BNXT_MIN_STATS_COAL_TICKS 250000
+#define BNXT_MAX_STATS_COAL_TICKS 1000000
+
struct work_struct sp_task;
unsigned long sp_event;
#define BNXT_RX_MASK_SP_EVENT 0
@@ -1018,6 +1091,9 @@ struct bnxt {
#define BNXT_HWRM_PF_UNLOAD_SP_EVENT 8
#define BNXT_PERIODIC_STATS_SP_EVENT 9
#define BNXT_HWRM_PORT_MODULE_SP_EVENT 10
+#define BNXT_RESET_TASK_SILENT_SP_EVENT 11
+#define BNXT_GENEVE_ADD_PORT_SP_EVENT 12
+#define BNXT_GENEVE_DEL_PORT_SP_EVENT 13
struct bnxt_pf_info pf;
#ifdef CONFIG_BNXT_SRIOV
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 1b0ae4a72..b83e17403 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -56,6 +56,8 @@ static int bnxt_get_coalesce(struct net_device *dev,
coal->tx_coalesce_usecs_irq = bp->tx_coal_ticks_irq;
coal->tx_max_coalesced_frames_irq = bp->tx_coal_bufs_irq;
+ coal->stats_block_coalesce_usecs = bp->stats_coal_ticks;
+
return 0;
}
@@ -63,6 +65,7 @@ static int bnxt_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *coal)
{
struct bnxt *bp = netdev_priv(dev);
+ bool update_stats = false;
int rc = 0;
bp->rx_coal_ticks = coal->rx_coalesce_usecs;
@@ -76,8 +79,26 @@ static int bnxt_set_coalesce(struct net_device *dev,
bp->tx_coal_ticks_irq = coal->tx_coalesce_usecs_irq;
bp->tx_coal_bufs_irq = coal->tx_max_coalesced_frames_irq;
- if (netif_running(dev))
- rc = bnxt_hwrm_set_coal(bp);
+ if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) {
+ u32 stats_ticks = coal->stats_block_coalesce_usecs;
+
+ stats_ticks = clamp_t(u32, stats_ticks,
+ BNXT_MIN_STATS_COAL_TICKS,
+ BNXT_MAX_STATS_COAL_TICKS);
+ stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS);
+ bp->stats_coal_ticks = stats_ticks;
+ update_stats = true;
+ }
+
+ if (netif_running(dev)) {
+ if (update_stats) {
+ rc = bnxt_close_nic(bp, true, false);
+ if (!rc)
+ rc = bnxt_open_nic(bp, true, false);
+ } else {
+ rc = bnxt_hwrm_set_coal(bp);
+ }
+ }
return rc;
}
@@ -341,9 +362,13 @@ static void bnxt_get_channels(struct net_device *dev,
channel->max_other = 0;
if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
channel->combined_count = bp->rx_nr_rings;
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp))
+ channel->combined_count--;
} else {
- channel->rx_count = bp->rx_nr_rings;
- channel->tx_count = bp->tx_nr_rings_per_tc;
+ if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) {
+ channel->rx_count = bp->rx_nr_rings;
+ channel->tx_count = bp->tx_nr_rings_per_tc;
+ }
}
}
@@ -366,6 +391,10 @@ static int bnxt_set_channels(struct net_device *dev,
(channel->rx_count || channel->tx_count))
return -EINVAL;
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp) && (channel->rx_count ||
+ channel->tx_count))
+ return -EINVAL;
+
if (channel->combined_count)
sh = true;
@@ -628,7 +657,66 @@ u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
return speed_mask;
}
-static u32 bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info)
+#define BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, name)\
+{ \
+ if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100MB) \
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+ 100baseT_Full); \
+ if ((fw_speeds) & BNXT_LINK_SPEED_MSK_1GB) \
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+ 1000baseT_Full); \
+ if ((fw_speeds) & BNXT_LINK_SPEED_MSK_10GB) \
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+ 10000baseT_Full); \
+ if ((fw_speeds) & BNXT_LINK_SPEED_MSK_25GB) \
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+ 25000baseCR_Full); \
+ if ((fw_speeds) & BNXT_LINK_SPEED_MSK_40GB) \
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+ 40000baseCR4_Full);\
+ if ((fw_speeds) & BNXT_LINK_SPEED_MSK_50GB) \
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+ 50000baseCR2_Full);\
+ if ((fw_pause) & BNXT_LINK_PAUSE_RX) { \
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+ Pause); \
+ if (!((fw_pause) & BNXT_LINK_PAUSE_TX)) \
+ ethtool_link_ksettings_add_link_mode( \
+ lk_ksettings, name, Asym_Pause);\
+ } else if ((fw_pause) & BNXT_LINK_PAUSE_TX) { \
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+ Asym_Pause); \
+ } \
+}
+
+#define BNXT_ETHTOOL_TO_FW_SPDS(fw_speeds, lk_ksettings, name) \
+{ \
+ if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
+ 100baseT_Full) || \
+ ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
+ 100baseT_Half)) \
+ (fw_speeds) |= BNXT_LINK_SPEED_MSK_100MB; \
+ if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
+ 1000baseT_Full) || \
+ ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
+ 1000baseT_Half)) \
+ (fw_speeds) |= BNXT_LINK_SPEED_MSK_1GB; \
+ if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
+ 10000baseT_Full)) \
+ (fw_speeds) |= BNXT_LINK_SPEED_MSK_10GB; \
+ if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
+ 25000baseCR_Full)) \
+ (fw_speeds) |= BNXT_LINK_SPEED_MSK_25GB; \
+ if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
+ 40000baseCR4_Full)) \
+ (fw_speeds) |= BNXT_LINK_SPEED_MSK_40GB; \
+ if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
+ 50000baseCR2_Full)) \
+ (fw_speeds) |= BNXT_LINK_SPEED_MSK_50GB; \
+}
+
+static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info,
+ struct ethtool_link_ksettings *lk_ksettings)
{
u16 fw_speeds = link_info->auto_link_speeds;
u8 fw_pause = 0;
@@ -636,10 +724,11 @@ static u32 bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info)
if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
fw_pause = link_info->auto_pause_setting;
- return _bnxt_fw_to_ethtool_adv_spds(fw_speeds, fw_pause);
+ BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, advertising);
}
-static u32 bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info)
+static void bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info,
+ struct ethtool_link_ksettings *lk_ksettings)
{
u16 fw_speeds = link_info->lp_auto_link_speeds;
u8 fw_pause = 0;
@@ -647,16 +736,24 @@ static u32 bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info)
if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
fw_pause = link_info->lp_pause;
- return _bnxt_fw_to_ethtool_adv_spds(fw_speeds, fw_pause);
+ BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings,
+ lp_advertising);
}
-static u32 bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info)
+static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info,
+ struct ethtool_link_ksettings *lk_ksettings)
{
u16 fw_speeds = link_info->support_speeds;
- u32 supported;
- supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
- return supported | SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, 0, lk_ksettings, supported);
+
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, Pause);
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
+ Asym_Pause);
+
+ if (link_info->support_auto_speeds)
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
+ Autoneg);
}
u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
@@ -683,65 +780,62 @@ u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
}
}
-static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int bnxt_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *lk_ksettings)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_link_info *link_info = &bp->link_info;
- u16 ethtool_speed;
+ struct ethtool_link_settings *base = &lk_ksettings->base;
+ u32 ethtool_speed;
- cmd->supported = bnxt_fw_to_ethtool_support_spds(link_info);
-
- if (link_info->auto_link_speeds)
- cmd->supported |= SUPPORTED_Autoneg;
+ ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported);
+ bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings);
+ ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising);
if (link_info->autoneg) {
- cmd->advertising =
- bnxt_fw_to_ethtool_advertised_spds(link_info);
- cmd->advertising |= ADVERTISED_Autoneg;
- cmd->autoneg = AUTONEG_ENABLE;
+ bnxt_fw_to_ethtool_advertised_spds(link_info, lk_ksettings);
+ ethtool_link_ksettings_add_link_mode(lk_ksettings,
+ advertising, Autoneg);
+ base->autoneg = AUTONEG_ENABLE;
if (link_info->phy_link_status == BNXT_LINK_LINK)
- cmd->lp_advertising =
- bnxt_fw_to_ethtool_lp_adv(link_info);
+ bnxt_fw_to_ethtool_lp_adv(link_info, lk_ksettings);
ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
if (!netif_carrier_ok(dev))
- cmd->duplex = DUPLEX_UNKNOWN;
+ base->duplex = DUPLEX_UNKNOWN;
else if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
- cmd->duplex = DUPLEX_FULL;
+ base->duplex = DUPLEX_FULL;
else
- cmd->duplex = DUPLEX_HALF;
+ base->duplex = DUPLEX_HALF;
} else {
- cmd->autoneg = AUTONEG_DISABLE;
- cmd->advertising = 0;
+ base->autoneg = AUTONEG_DISABLE;
ethtool_speed =
bnxt_fw_to_ethtool_speed(link_info->req_link_speed);
- cmd->duplex = DUPLEX_HALF;
+ base->duplex = DUPLEX_HALF;
if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL)
- cmd->duplex = DUPLEX_FULL;
+ base->duplex = DUPLEX_FULL;
}
- ethtool_cmd_speed_set(cmd, ethtool_speed);
+ base->speed = ethtool_speed;
- cmd->port = PORT_NONE;
+ base->port = PORT_NONE;
if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
- cmd->port = PORT_TP;
- cmd->supported |= SUPPORTED_TP;
- cmd->advertising |= ADVERTISED_TP;
+ base->port = PORT_TP;
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
+ TP);
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising,
+ TP);
} else {
- cmd->supported |= SUPPORTED_FIBRE;
- cmd->advertising |= ADVERTISED_FIBRE;
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
+ FIBRE);
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising,
+ FIBRE);
if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC)
- cmd->port = PORT_DA;
+ base->port = PORT_DA;
else if (link_info->media_type ==
PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE)
- cmd->port = PORT_FIBRE;
+ base->port = PORT_FIBRE;
}
-
- if (link_info->transceiver ==
- PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL)
- cmd->transceiver = XCVR_INTERNAL;
- else
- cmd->transceiver = XCVR_EXTERNAL;
- cmd->phy_address = link_info->phy_addr;
+ base->phy_address = link_info->phy_addr;
return 0;
}
@@ -815,37 +909,25 @@ u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
return fw_speed_mask;
}
-static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int bnxt_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *lk_ksettings)
{
- int rc = 0;
struct bnxt *bp = netdev_priv(dev);
struct bnxt_link_info *link_info = &bp->link_info;
+ const struct ethtool_link_settings *base = &lk_ksettings->base;
u32 speed, fw_advertising = 0;
bool set_pause = false;
+ int rc = 0;
- if (BNXT_VF(bp))
- return rc;
-
- if (cmd->autoneg == AUTONEG_ENABLE) {
- u32 supported_spds = bnxt_fw_to_ethtool_support_spds(link_info);
+ if (!BNXT_SINGLE_PF(bp))
+ return -EOPNOTSUPP;
- if (cmd->advertising & ~(supported_spds | ADVERTISED_Autoneg |
- ADVERTISED_TP | ADVERTISED_FIBRE)) {
- netdev_err(dev, "Unsupported advertising mask (adv: 0x%x)\n",
- cmd->advertising);
- rc = -EINVAL;
- goto set_setting_exit;
- }
- fw_advertising = bnxt_get_fw_auto_link_speeds(cmd->advertising);
- if (fw_advertising & ~link_info->support_speeds) {
- netdev_err(dev, "Advertising parameters are not supported! (adv: 0x%x)\n",
- cmd->advertising);
- rc = -EINVAL;
- goto set_setting_exit;
- }
+ if (base->autoneg == AUTONEG_ENABLE) {
+ BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings,
+ advertising);
link_info->autoneg |= BNXT_AUTONEG_SPEED;
if (!fw_advertising)
- link_info->advertising = link_info->support_speeds;
+ link_info->advertising = link_info->support_auto_speeds;
else
link_info->advertising = fw_advertising;
/* any change to autoneg will cause link change, therefore the
@@ -863,16 +945,12 @@ static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
rc = -EINVAL;
goto set_setting_exit;
}
- /* TODO: currently don't support half duplex */
- if (cmd->duplex == DUPLEX_HALF) {
+ if (base->duplex == DUPLEX_HALF) {
netdev_err(dev, "HALF DUPLEX is not supported!\n");
rc = -EINVAL;
goto set_setting_exit;
}
- /* If received a request for an unknown duplex, assume full*/
- if (cmd->duplex == DUPLEX_UNKNOWN)
- cmd->duplex = DUPLEX_FULL;
- speed = ethtool_cmd_speed(cmd);
+ speed = base->speed;
fw_speed = bnxt_get_fw_speed(dev, speed);
if (!fw_speed) {
rc = -EINVAL;
@@ -911,8 +989,8 @@ static int bnxt_set_pauseparam(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev);
struct bnxt_link_info *link_info = &bp->link_info;
- if (BNXT_VF(bp))
- return rc;
+ if (!BNXT_SINGLE_PF(bp))
+ return -EOPNOTSUPP;
if (epause->autoneg) {
if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
@@ -1010,6 +1088,8 @@ static int bnxt_firmware_reset(struct net_device *dev,
case BNX_DIR_TYPE_APE_FW:
case BNX_DIR_TYPE_APE_PATCH:
req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT;
+ /* Self-reset APE upon next PCIe reset: */
+ req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
break;
case BNX_DIR_TYPE_KONG_FW:
case BNX_DIR_TYPE_KONG_PATCH:
@@ -1043,9 +1123,27 @@ static int bnxt_flash_firmware(struct net_device *dev,
case BNX_DIR_TYPE_BOOTCODE_2:
code_type = CODE_BOOT;
break;
+ case BNX_DIR_TYPE_CHIMP_PATCH:
+ code_type = CODE_CHIMP_PATCH;
+ break;
case BNX_DIR_TYPE_APE_FW:
code_type = CODE_MCTP_PASSTHRU;
break;
+ case BNX_DIR_TYPE_APE_PATCH:
+ code_type = CODE_APE_PATCH;
+ break;
+ case BNX_DIR_TYPE_KONG_FW:
+ code_type = CODE_KONG_FW;
+ break;
+ case BNX_DIR_TYPE_KONG_PATCH:
+ code_type = CODE_KONG_PATCH;
+ break;
+ case BNX_DIR_TYPE_BONO_FW:
+ code_type = CODE_BONO_FW;
+ break;
+ case BNX_DIR_TYPE_BONO_PATCH:
+ code_type = CODE_BONO_PATCH;
+ break;
default:
netdev_err(dev, "Unsupported directory entry type: %u\n",
dir_type);
@@ -1100,6 +1198,8 @@ static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type)
case BNX_DIR_TYPE_APE_PATCH:
case BNX_DIR_TYPE_KONG_FW:
case BNX_DIR_TYPE_KONG_PATCH:
+ case BNX_DIR_TYPE_BONO_FW:
+ case BNX_DIR_TYPE_BONO_PATCH:
return true;
}
@@ -1137,7 +1237,8 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev,
const struct firmware *fw;
int rc;
- if (bnxt_dir_type_is_executable(dir_type) == false)
+ if (dir_type != BNX_DIR_TYPE_UPDATE &&
+ bnxt_dir_type_is_executable(dir_type) == false)
return -EINVAL;
rc = request_firmware(&fw, filename, &dev->dev);
@@ -1433,8 +1534,8 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
_bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
int rc = 0;
- if (BNXT_VF(bp))
- return 0;
+ if (!BNXT_SINGLE_PF(bp))
+ return -EOPNOTSUPP;
if (!(bp->flags & BNXT_FLAG_EEE_CAP))
return -EOPNOTSUPP;
@@ -1618,8 +1719,8 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
}
const struct ethtool_ops bnxt_ethtool_ops = {
- .get_settings = bnxt_get_settings,
- .set_settings = bnxt_set_settings,
+ .get_link_ksettings = bnxt_get_link_ksettings,
+ .set_link_ksettings = bnxt_set_link_ksettings,
.get_pauseparam = bnxt_get_pauseparam,
.set_pauseparam = bnxt_set_pauseparam,
.get_drvinfo = bnxt_get_drvinfo,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
index 461675caa..82bf44ab8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
@@ -70,6 +70,7 @@ enum SUPPORTED_CODE {
CODE_KONG_PATCH, /* 18 - KONG Patch firmware */
CODE_BONO_FW, /* 19 - BONO firmware */
CODE_BONO_PATCH, /* 20 - BONO Patch firmware */
+ CODE_CHIMP_PATCH, /* 21 - ChiMP Patch firmware */
MAX_CODE_TYPE,
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 05e3c49a7..517567f6d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -105,6 +105,7 @@ struct hwrm_async_event_cmpl {
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED (0x5UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE (0x6UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE (0x7UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD (0x10UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD (0x11UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD (0x20UL << 0)
@@ -484,12 +485,12 @@ struct hwrm_async_event_cmpl_hwrm_error {
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL
};
-/* HW Resource Manager Specification 1.2.2 */
+/* HW Resource Manager Specification 1.3.0 */
#define HWRM_VERSION_MAJOR 1
-#define HWRM_VERSION_MINOR 2
-#define HWRM_VERSION_UPDATE 2
+#define HWRM_VERSION_MINOR 3
+#define HWRM_VERSION_UPDATE 0
-#define HWRM_VERSION_STR "1.2.2"
+#define HWRM_VERSION_STR "1.3.0"
/*
* Following is the signature for HWRM message field that indicates not
* applicable (All F's). Need to cast it the size of the field if needed.
@@ -611,6 +612,9 @@ struct cmd_nums {
#define HWRM_FWD_RESP (0xd2UL)
#define HWRM_FWD_ASYNC_EVENT_CMPL (0xd3UL)
#define HWRM_TEMP_MONITOR_QUERY (0xe0UL)
+ #define HWRM_WOL_FILTER_ALLOC (0xf0UL)
+ #define HWRM_WOL_FILTER_FREE (0xf1UL)
+ #define HWRM_WOL_FILTER_QCFG (0xf2UL)
#define HWRM_DBG_READ_DIRECT (0xff10UL)
#define HWRM_DBG_READ_INDIRECT (0xff11UL)
#define HWRM_DBG_WRITE_DIRECT (0xff12UL)
@@ -1020,6 +1024,10 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL
#define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL
#define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL
u8 mac_address[6];
__le16 max_rsscos_ctx;
__le16 max_cmpl_rings;
@@ -1066,8 +1074,9 @@ struct hwrm_func_qcfg_output {
__le16 fid;
__le16 port_id;
__le16 vlan;
- u8 unused_0;
- u8 unused_1;
+ __le16 flags;
+ #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED 0x1UL
+ #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED 0x2UL
u8 mac_address[6];
__le16 pci_id;
__le16 alloc_rsscos_ctx;
@@ -1086,23 +1095,23 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 (0x3UL << 0)
#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 (0x4UL << 0)
#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN (0xffUL << 0)
- u8 unused_2;
+ u8 unused_0;
__le16 dflt_vnic_id;
- u8 unused_3;
- u8 unused_4;
+ u8 unused_1;
+ u8 unused_2;
__le32 min_bw;
__le32 max_bw;
u8 evb_mode;
#define FUNC_QCFG_RESP_EVB_MODE_NO_EVB (0x0UL << 0)
#define FUNC_QCFG_RESP_EVB_MODE_VEB (0x1UL << 0)
#define FUNC_QCFG_RESP_EVB_MODE_VEPA (0x2UL << 0)
- u8 unused_5;
- __le16 unused_6;
+ u8 unused_3;
+ __le16 unused_4;
__le32 alloc_mcast_filters;
__le32 alloc_hw_ring_grps;
+ u8 unused_5;
+ u8 unused_6;
u8 unused_7;
- u8 unused_8;
- u8 unused_9;
u8 valid;
};
@@ -1410,8 +1419,8 @@ struct hwrm_func_buf_rgtr_input {
#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K (0xcUL << 0)
#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K (0xdUL << 0)
#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K (0x10UL << 0)
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M (0x16UL << 0)
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M (0x17UL << 0)
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M (0x15UL << 0)
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M (0x16UL << 0)
#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G (0x1eUL << 0)
__le16 req_buf_len;
__le16 resp_buf_len;
@@ -1499,6 +1508,12 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL
#define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL
#define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL
__le32 enables;
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
@@ -1815,13 +1830,22 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP (0xcUL << 24)
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPPLUS (0xdUL << 24)
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24)
- __le32 unused_1;
+ __le16 fec_cfg;
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL
+ u8 unused_1;
+ u8 unused_2;
char phy_vendor_name[16];
char phy_vendor_partnumber[16];
- __le32 unused_2;
- u8 unused_3;
+ __le32 unused_3;
u8 unused_4;
u8 unused_5;
+ u8 unused_6;
u8 valid;
};
@@ -1842,6 +1866,8 @@ struct hwrm_port_mac_cfg_input {
#define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL
#define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL
#define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL
+ #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE 0x100UL
+ #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE 0x200UL
__le32 enables;
#define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
#define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
@@ -2127,6 +2153,7 @@ struct hwrm_port_phy_i2c_read_output {
u8 valid;
};
+/* hwrm_queue_qportcfg */
/* Input (24 bytes) */
struct hwrm_queue_qportcfg_input {
__le16 req_type;
@@ -2382,7 +2409,7 @@ struct hwrm_queue_cos2bw_cfg_input {
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP (0x0UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS (0x1UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
u8 queue_id0_pri_lvl;
u8 queue_id0_bw_weight;
u8 queue_id1;
@@ -2392,7 +2419,7 @@ struct hwrm_queue_cos2bw_cfg_input {
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP (0x0UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS (0x1UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
u8 queue_id1_pri_lvl;
u8 queue_id1_bw_weight;
u8 queue_id2;
@@ -2402,7 +2429,7 @@ struct hwrm_queue_cos2bw_cfg_input {
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP (0x0UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS (0x1UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
u8 queue_id2_pri_lvl;
u8 queue_id2_bw_weight;
u8 queue_id3;
@@ -2412,7 +2439,7 @@ struct hwrm_queue_cos2bw_cfg_input {
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP (0x0UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS (0x1UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
u8 queue_id3_pri_lvl;
u8 queue_id3_bw_weight;
u8 queue_id4;
@@ -2422,7 +2449,7 @@ struct hwrm_queue_cos2bw_cfg_input {
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP (0x0UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS (0x1UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
u8 queue_id4_pri_lvl;
u8 queue_id4_bw_weight;
u8 queue_id5;
@@ -2432,7 +2459,7 @@ struct hwrm_queue_cos2bw_cfg_input {
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP (0x0UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS (0x1UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
u8 queue_id5_pri_lvl;
u8 queue_id5_bw_weight;
u8 queue_id6;
@@ -2442,7 +2469,7 @@ struct hwrm_queue_cos2bw_cfg_input {
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP (0x0UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS (0x1UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
u8 queue_id6_pri_lvl;
u8 queue_id6_bw_weight;
u8 queue_id7;
@@ -2452,7 +2479,7 @@ struct hwrm_queue_cos2bw_cfg_input {
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP (0x0UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS (0x1UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
u8 queue_id7_pri_lvl;
u8 queue_id7_bw_weight;
u8 unused_1[5];
@@ -3150,7 +3177,7 @@ struct hwrm_cfa_l2_filter_cfg_output {
};
/* hwrm_cfa_l2_set_rx_mask */
-/* Input (40 bytes) */
+/* Input (56 bytes) */
struct hwrm_cfa_l2_set_rx_mask_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -3165,9 +3192,15 @@ struct hwrm_cfa_l2_set_rx_mask_input {
#define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL
#define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS 0x10UL
#define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST 0x20UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_VLANONLY 0x40UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_VLAN_NONVLAN 0x80UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_ANYVLAN_NONVLAN 0x100UL
__le64 mc_tbl_addr;
__le32 num_mc_entries;
__le32 unused_0;
+ __le64 vlan_tag_tbl_addr;
+ __le32 num_vlan_tags;
+ __le32 unused_1;
};
/* Output (16 bytes) */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
index 40a7b0e09..73f224955 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
@@ -13,6 +13,7 @@
enum bnxt_nvm_directory_type {
BNX_DIR_TYPE_UNUSED = 0,
BNX_DIR_TYPE_PKG_LOG = 1,
+ BNX_DIR_TYPE_UPDATE = 2,
BNX_DIR_TYPE_CHIMP_PATCH = 3,
BNX_DIR_TYPE_BOOTCODE = 4,
BNX_DIR_TYPE_VPD = 5,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 363884dd9..50d2007a2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -143,6 +143,9 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos)
u16 vlan_tag;
int rc;
+ if (bp->hwrm_spec_code < 0x10201)
+ return -ENOTSUPP;
+
rc = bnxt_vf_ndo_prep(bp, vf_id);
if (rc)
return rc;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 6ad2b4a51..6c4652b52 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -12543,10 +12543,6 @@ static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
info->data = TG3_RSS_MAX_NUM_QS;
}
- /* The first interrupt vector only
- * handles link interrupts.
- */
- info->data -= 1;
return 0;
default:
@@ -14005,7 +14001,9 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
}
if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
+ (!ec->rx_coalesce_usecs) ||
(ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
+ (!ec->tx_coalesce_usecs) ||
(ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
(ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
(ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
@@ -14016,16 +14014,6 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
(ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
return -EINVAL;
- /* No rx interrupts will be generated if both are zero */
- if ((ec->rx_coalesce_usecs == 0) &&
- (ec->rx_max_coalesced_frames == 0))
- return -EINVAL;
-
- /* No tx interrupts will be generated if both are zero */
- if ((ec->tx_coalesce_usecs == 0) &&
- (ec->tx_max_coalesced_frames == 0))
- return -EINVAL;
-
/* Only copy relevant parameters, ignore all others. */
tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
@@ -18125,14 +18113,14 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
rtnl_lock();
- /* We needn't recover from permanent error */
- if (state == pci_channel_io_frozen)
- tp->pcierr_recovery = true;
-
/* We probably don't have netdev yet */
if (!netdev || !netif_running(netdev))
goto done;
+ /* We needn't recover from permanent error */
+ if (state == pci_channel_io_frozen)
+ tp->pcierr_recovery = true;
+
tg3_phy_stop(tp);
tg3_netif_stop(tp);
@@ -18229,7 +18217,7 @@ static void tg3_io_resume(struct pci_dev *pdev)
rtnl_lock();
- if (!netif_running(netdev))
+ if (!netdev || !netif_running(netdev))
goto done;
tg3_full_lock(tp, 0);
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
index 8fc246ea1..05c1c1dd7 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
@@ -312,7 +312,8 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
struct bnad_debug_info *regrd_debug = file->private_data;
struct bnad *bnad = (struct bnad *)regrd_debug->i_private;
struct bfa_ioc *ioc = &bnad->bna.ioceth.ioc;
- int addr, len, rc, i;
+ int rc, i;
+ u32 addr, len;
u32 *regbuf;
void __iomem *rb, *reg_addr;
unsigned long flags;
@@ -372,7 +373,8 @@ bnad_debugfs_write_regwr(struct file *file, const char __user *buf,
struct bnad_debug_info *debug = file->private_data;
struct bnad *bnad = (struct bnad *)debug->i_private;
struct bfa_ioc *ioc = &bnad->bna.ioceth.ioc;
- int addr, val, rc;
+ int rc;
+ u32 addr, val;
void __iomem *reg_addr;
unsigned long flags;
void *kern_buf;
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index d95098223..47b1bbb95 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -31,15 +31,10 @@
#define BNAD_NUM_TXF_COUNTERS 12
#define BNAD_NUM_RXF_COUNTERS 10
#define BNAD_NUM_CQ_COUNTERS (3 + 5)
-#define BNAD_NUM_RXQ_COUNTERS 6
+#define BNAD_NUM_RXQ_COUNTERS 7
#define BNAD_NUM_TXQ_COUNTERS 5
-#define BNAD_ETHTOOL_STATS_NUM \
- (sizeof(struct rtnl_link_stats64) / sizeof(u64) + \
- sizeof(struct bnad_drv_stats) / sizeof(u64) + \
- offsetof(struct bfi_enet_stats, rxf_stats[0]) / sizeof(u64))
-
-static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
+static const char *bnad_net_stats_strings[] = {
"rx_packets",
"tx_packets",
"rx_bytes",
@@ -50,22 +45,10 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
"tx_dropped",
"multicast",
"collisions",
-
"rx_length_errors",
- "rx_over_errors",
"rx_crc_errors",
"rx_frame_errors",
- "rx_fifo_errors",
- "rx_missed_errors",
-
- "tx_aborted_errors",
- "tx_carrier_errors",
"tx_fifo_errors",
- "tx_heartbeat_errors",
- "tx_window_errors",
-
- "rx_compressed",
- "tx_compressed",
"netif_queue_stop",
"netif_queue_wakeup",
@@ -254,6 +237,8 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
"fc_tx_fid_parity_errors",
};
+#define BNAD_ETHTOOL_STATS_NUM ARRAY_SIZE(bnad_net_stats_strings)
+
static int
bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
{
@@ -658,6 +643,8 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string)
string += ETH_GSTRING_LEN;
sprintf(string, "rxq%d_allocbuf_failed", q_num);
string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_mapbuf_failed", q_num);
+ string += ETH_GSTRING_LEN;
sprintf(string, "rxq%d_producer_index", q_num);
string += ETH_GSTRING_LEN;
sprintf(string, "rxq%d_consumer_index", q_num);
@@ -678,6 +665,9 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string)
sprintf(string, "rxq%d_allocbuf_failed",
q_num);
string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_mapbuf_failed",
+ q_num);
+ string += ETH_GSTRING_LEN;
sprintf(string, "rxq%d_producer_index",
q_num);
string += ETH_GSTRING_LEN;
@@ -854,9 +844,9 @@ bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
u64 *buf)
{
struct bnad *bnad = netdev_priv(netdev);
- int i, j, bi;
+ int i, j, bi = 0;
unsigned long flags;
- struct rtnl_link_stats64 *net_stats64;
+ struct rtnl_link_stats64 net_stats64;
u64 *stats64;
u32 bmap;
@@ -871,14 +861,25 @@ bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
* under the same lock
*/
spin_lock_irqsave(&bnad->bna_lock, flags);
- bi = 0;
- memset(buf, 0, stats->n_stats * sizeof(u64));
-
- net_stats64 = (struct rtnl_link_stats64 *)buf;
- bnad_netdev_qstats_fill(bnad, net_stats64);
- bnad_netdev_hwstats_fill(bnad, net_stats64);
- bi = sizeof(*net_stats64) / sizeof(u64);
+ memset(&net_stats64, 0, sizeof(net_stats64));
+ bnad_netdev_qstats_fill(bnad, &net_stats64);
+ bnad_netdev_hwstats_fill(bnad, &net_stats64);
+
+ buf[bi++] = net_stats64.rx_packets;
+ buf[bi++] = net_stats64.tx_packets;
+ buf[bi++] = net_stats64.rx_bytes;
+ buf[bi++] = net_stats64.tx_bytes;
+ buf[bi++] = net_stats64.rx_errors;
+ buf[bi++] = net_stats64.tx_errors;
+ buf[bi++] = net_stats64.rx_dropped;
+ buf[bi++] = net_stats64.tx_dropped;
+ buf[bi++] = net_stats64.multicast;
+ buf[bi++] = net_stats64.collisions;
+ buf[bi++] = net_stats64.rx_length_errors;
+ buf[bi++] = net_stats64.rx_crc_errors;
+ buf[bi++] = net_stats64.rx_frame_errors;
+ buf[bi++] = net_stats64.tx_fifo_errors;
/* Get netif_queue_stopped from stack */
bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev);
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index cb07d95e3..d954a97b0 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -304,7 +304,7 @@ static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
static void macb_handle_link_change(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
- struct phy_device *phydev = bp->phy_dev;
+ struct phy_device *phydev = dev->phydev;
unsigned long flags;
int status_change = 0;
@@ -414,7 +414,6 @@ static int macb_mii_probe(struct net_device *dev)
bp->link = 0;
bp->speed = 0;
bp->duplex = -1;
- bp->phy_dev = phydev;
return 0;
}
@@ -1324,6 +1323,24 @@ dma_error:
return 0;
}
+static inline int macb_clear_csum(struct sk_buff *skb)
+{
+ /* no change for packets without checksum offloading */
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ /* make sure we can modify the header */
+ if (unlikely(skb_cow_head(skb, 0)))
+ return -1;
+
+ /* initialize checksum field
+ * This is required - at least for Zynq, which otherwise calculates
+ * wrong UDP header checksums for UDP packets with UDP data len <=2
+ */
+ *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0;
+ return 0;
+}
+
static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
u16 queue_index = skb_get_queue_mapping(skb);
@@ -1363,6 +1380,11 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
+ if (macb_clear_csum(skb)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
/* Map socket buffer for DMA transfer */
if (!macb_tx_map(bp, queue, skb)) {
dev_kfree_skb_any(skb);
@@ -1886,7 +1908,7 @@ static int macb_open(struct net_device *dev)
netif_carrier_off(dev);
/* if the phy is not yet register, retry later*/
- if (!bp->phy_dev)
+ if (!dev->phydev)
return -EAGAIN;
/* RX buffers initialization */
@@ -1905,7 +1927,7 @@ static int macb_open(struct net_device *dev)
macb_init_hw(bp);
/* schedule a link state check */
- phy_start(bp->phy_dev);
+ phy_start(dev->phydev);
netif_tx_start_all_queues(dev);
@@ -1920,8 +1942,8 @@ static int macb_close(struct net_device *dev)
netif_tx_stop_all_queues(dev);
napi_disable(&bp->napi);
- if (bp->phy_dev)
- phy_stop(bp->phy_dev);
+ if (dev->phydev)
+ phy_stop(dev->phydev);
spin_lock_irqsave(&bp->lock, flags);
macb_reset_hw(bp);
@@ -2092,28 +2114,6 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev)
return nstat;
}
-static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct macb *bp = netdev_priv(dev);
- struct phy_device *phydev = bp->phy_dev;
-
- if (!phydev)
- return -ENODEV;
-
- return phy_ethtool_gset(phydev, cmd);
-}
-
-static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct macb *bp = netdev_priv(dev);
- struct phy_device *phydev = bp->phy_dev;
-
- if (!phydev)
- return -ENODEV;
-
- return phy_ethtool_sset(phydev, cmd);
-}
-
static int macb_get_regs_len(struct net_device *netdev)
{
return MACB_GREGS_NBR * sizeof(u32);
@@ -2186,19 +2186,17 @@ static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
}
static const struct ethtool_ops macb_ethtool_ops = {
- .get_settings = macb_get_settings,
- .set_settings = macb_set_settings,
.get_regs_len = macb_get_regs_len,
.get_regs = macb_get_regs,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
.get_wol = macb_get_wol,
.set_wol = macb_set_wol,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static const struct ethtool_ops gem_ethtool_ops = {
- .get_settings = macb_get_settings,
- .set_settings = macb_set_settings,
.get_regs_len = macb_get_regs_len,
.get_regs = macb_get_regs,
.get_link = ethtool_op_get_link,
@@ -2206,12 +2204,13 @@ static const struct ethtool_ops gem_ethtool_ops = {
.get_ethtool_stats = gem_get_ethtool_stats,
.get_strings = gem_get_ethtool_strings,
.get_sset_count = gem_get_sset_count,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct macb *bp = netdev_priv(dev);
- struct phy_device *phydev = bp->phy_dev;
+ struct phy_device *phydev = dev->phydev;
if (!netif_running(dev))
return -EINVAL;
@@ -2570,7 +2569,7 @@ static int at91ether_open(struct net_device *dev)
MACB_BIT(HRESP));
/* schedule a link state check */
- phy_start(lp->phy_dev);
+ phy_start(dev->phydev);
netif_start_queue(dev);
@@ -3010,7 +3009,7 @@ static int macb_probe(struct platform_device *pdev)
if (err)
goto err_out_free_netdev;
- phydev = bp->phy_dev;
+ phydev = dev->phydev;
netif_carrier_off(dev);
@@ -3029,7 +3028,7 @@ static int macb_probe(struct platform_device *pdev)
return 0;
err_out_unregister_mdio:
- phy_disconnect(bp->phy_dev);
+ phy_disconnect(dev->phydev);
mdiobus_unregister(bp->mii_bus);
mdiobus_free(bp->mii_bus);
@@ -3057,8 +3056,8 @@ static int macb_remove(struct platform_device *pdev)
if (dev) {
bp = netdev_priv(dev);
- if (bp->phy_dev)
- phy_disconnect(bp->phy_dev);
+ if (dev->phydev)
+ phy_disconnect(dev->phydev);
mdiobus_unregister(bp->mii_bus);
mdiobus_free(bp->mii_bus);
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 644743c9c..b6fcf1062 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -823,7 +823,6 @@ struct macb {
struct macb_or_gem_ops macbgem_ops;
struct mii_bus *mii_bus;
- struct phy_device *phy_dev;
int link;
int speed;
int duplex;
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
index 8ad7425f8..c03d37016 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
@@ -19,26 +19,16 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
-#include <linux/version.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
#include <linux/pci.h>
-#include <linux/kthread.h>
#include <linux/netdevice.h>
-#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
-#include "octeon_nic.h"
#include "octeon_main.h"
-#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
-#include "liquidio_image.h"
-#include "octeon_mem_ops.h"
int lio_cn6xxx_soft_reset(struct octeon_device *oct)
{
@@ -74,9 +64,9 @@ void lio_cn6xxx_enable_error_reporting(struct octeon_device *oct)
u32 val;
pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val);
- if (val & 0x000f0000) {
+ if (val & 0x000c0000) {
dev_err(&oct->pci_dev->dev, "PCI-E Link error detected: 0x%08x\n",
- val & 0x000f0000);
+ val & 0x000c0000);
}
val |= 0xf; /* Enable Link error reporting */
@@ -229,7 +219,7 @@ void lio_cn6xxx_setup_global_output_regs(struct octeon_device *oct)
/* / Select Packet count instead of bytes for SLI_PKTi_CNTS[CNT] */
octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_BMODE, 0);
- /* / Select ES,RO,NS setting from register for Output Queue Packet
+ /* Select ES, RO, NS setting from register for Output Queue Packet
* Address
*/
octeon_write_csr(oct, CN6XXX_SLI_PKT_DPADDR, 0xFFFFFFFF);
@@ -367,7 +357,8 @@ void lio_cn6xxx_enable_io_queues(struct octeon_device *oct)
void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
{
- u32 mask, i, loop = HZ;
+ int i;
+ u32 mask, loop = HZ;
u32 d32;
/* Reset the Enable bits for Input Queues. */
@@ -376,7 +367,7 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, mask);
/* Wait until hardware indicates that the queues are out of reset. */
- mask = oct->io_qmask.iq;
+ mask = (u32)oct->io_qmask.iq;
d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ);
while (((d32 & mask) != mask) && loop--) {
d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ);
@@ -384,8 +375,8 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
}
/* Reset the doorbell register for each Input queue. */
- for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
- if (!(oct->io_qmask.iq & (1UL << i)))
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
+ if (!(oct->io_qmask.iq & (1ULL << i)))
continue;
octeon_write_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i), 0xFFFFFFFF);
d32 = octeon_read_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i));
@@ -398,7 +389,7 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
/* Wait until hardware indicates that the queues are out of reset. */
loop = HZ;
- mask = oct->io_qmask.oq;
+ mask = (u32)oct->io_qmask.oq;
d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ);
while (((d32 & mask) != mask) && loop--) {
d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ);
@@ -408,8 +399,8 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
/* Reset the doorbell register for each Output queue. */
/* for (i = 0; i < oct->num_oqs; i++) { */
- for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
- if (!(oct->io_qmask.oq & (1UL << i)))
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
+ if (!(oct->io_qmask.oq & (1ULL << i)))
continue;
octeon_write_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i), 0xFFFFFFFF);
d32 = octeon_read_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i));
@@ -429,16 +420,16 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
void lio_cn6xxx_reinit_regs(struct octeon_device *oct)
{
- u32 i;
+ int i;
- for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
- if (!(oct->io_qmask.iq & (1UL << i)))
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
+ if (!(oct->io_qmask.iq & (1ULL << i)))
continue;
oct->fn_list.setup_iq_regs(oct, i);
}
- for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
- if (!(oct->io_qmask.oq & (1UL << i)))
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
+ if (!(oct->io_qmask.oq & (1ULL << i)))
continue;
oct->fn_list.setup_oq_regs(oct, i);
}
@@ -450,8 +441,8 @@ void lio_cn6xxx_reinit_regs(struct octeon_device *oct)
oct->fn_list.enable_io_queues(oct);
/* for (i = 0; i < oct->num_oqs; i++) { */
- for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
- if (!(oct->io_qmask.oq & (1UL << i)))
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
+ if (!(oct->io_qmask.oq & (1ULL << i)))
continue;
writel(oct->droq[i]->max_count, oct->droq[i]->pkts_credit_reg);
}
@@ -495,8 +486,7 @@ u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx)
}
u32
-lio_cn6xxx_update_read_index(struct octeon_device *oct __attribute__((unused)),
- struct octeon_instr_queue *iq)
+lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq)
{
u32 new_idx = readl(iq->inst_cnt_reg);
@@ -547,17 +537,18 @@ static void lio_cn6xxx_get_pcie_qlmport(struct octeon_device *oct)
dev_dbg(&oct->pci_dev->dev, "Using PCIE Port %d\n", oct->pcie_port);
}
-void
+static void
lio_cn6xxx_process_pcie_error_intr(struct octeon_device *oct, u64 intr64)
{
dev_err(&oct->pci_dev->dev, "Error Intr: 0x%016llx\n",
CVM_CAST64(intr64));
}
-int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct)
+static int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct)
{
struct octeon_droq *droq;
- u32 oq_no, pkt_count, droq_time_mask, droq_mask, droq_int_enb;
+ int oq_no;
+ u32 pkt_count, droq_time_mask, droq_mask, droq_int_enb;
u32 droq_cnt_enb, droq_cnt_mask;
droq_cnt_enb = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB);
@@ -573,12 +564,12 @@ int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct)
oct->droq_intr = 0;
/* for (oq_no = 0; oq_no < oct->num_oqs; oq_no++) { */
- for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES; oq_no++) {
- if (!(droq_mask & (1 << oq_no)))
+ for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct); oq_no++) {
+ if (!(droq_mask & (1ULL << oq_no)))
continue;
droq = oct->droq[oq_no];
- pkt_count = octeon_droq_check_hw_for_pkts(oct, droq);
+ pkt_count = octeon_droq_check_hw_for_pkts(droq);
if (pkt_count) {
oct->droq_intr |= (1ULL << oq_no);
if (droq->ops.poll_mode) {
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
index f77918779..28c472242 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
@@ -82,8 +82,6 @@ void lio_cn6xxx_setup_iq_regs(struct octeon_device *oct, u32 iq_no);
void lio_cn6xxx_setup_oq_regs(struct octeon_device *oct, u32 oq_no);
void lio_cn6xxx_enable_io_queues(struct octeon_device *oct);
void lio_cn6xxx_disable_io_queues(struct octeon_device *oct);
-void lio_cn6xxx_process_pcie_error_intr(struct octeon_device *oct, u64 intr64);
-int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct);
irqreturn_t lio_cn6xxx_process_interrupt_regs(void *dev);
void lio_cn6xxx_reinit_regs(struct octeon_device *oct);
void lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct, u64 core_addr,
@@ -91,8 +89,7 @@ void lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct, u64 core_addr,
void lio_cn6xxx_bar1_idx_write(struct octeon_device *oct, u32 idx, u32 mask);
u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx);
u32
-lio_cn6xxx_update_read_index(struct octeon_device *oct __attribute__((unused)),
- struct octeon_instr_queue *iq);
+lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq);
void lio_cn6xxx_enable_interrupt(void *chip);
void lio_cn6xxx_disable_interrupt(void *chip);
void cn6xxx_get_pcie_qlmport(struct octeon_device *oct);
diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
index 8e830d0c0..29755bc68 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
@@ -19,28 +19,17 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
-#include <linux/version.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
#include <linux/pci.h>
-#include <linux/kthread.h>
#include <linux/netdevice.h>
-#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
-#include "octeon_nic.h"
#include "octeon_main.h"
-#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
#include "cn68xx_regs.h"
-#include "cn68xx_device.h"
-#include "liquidio_image.h"
-#include "octeon_mem_ops.h"
static void lio_cn68xx_set_dpi_regs(struct octeon_device *oct)
{
@@ -129,7 +118,7 @@ static inline void lio_cn68xx_vendor_message_fix(struct octeon_device *oct)
pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_FLTMSK, val);
}
-int lio_is_210nv(struct octeon_device *oct)
+static int lio_is_210nv(struct octeon_device *oct)
{
u64 mio_qlm4_cfg = lio_pci_readq(oct, CN6XXX_MIO_QLM4_CFG);
diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h
index d4e1c9fb0..ea7bdcce6 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h
@@ -28,6 +28,5 @@
#define __CN68XX_DEVICE_H__
int lio_setup_cn68xx_octeon_device(struct octeon_device *oct);
-int lio_is_210nv(struct octeon_device *oct);
#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h b/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h
index 38cddbd10..d45a0f4aa 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h
@@ -29,7 +29,6 @@
#ifndef __CN68XX_REGS_H__
#define __CN68XX_REGS_H__
-#include "cn66xx_regs.h"
/*###################### REQUEST QUEUE #########################*/
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index 245c063ed..289eb8907 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -19,13 +19,9 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
-#include <linux/version.h>
#include <linux/netdevice.h>
#include <linux/net_tstamp.h>
-#include <linux/ethtool.h>
-#include <linux/dma-mapping.h>
#include <linux/pci.h>
-#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
@@ -36,9 +32,8 @@
#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
-#include "cn68xx_regs.h"
-#include "cn68xx_device.h"
-#include "liquidio_image.h"
+
+static int octnet_get_link_stats(struct net_device *netdev);
struct oct_mdio_cmd_context {
int octeon_id;
@@ -71,34 +66,126 @@ enum {
INTERFACE_MODE_RXAUI,
INTERFACE_MODE_QSGMII,
INTERFACE_MODE_AGL,
+ INTERFACE_MODE_XLAUI,
+ INTERFACE_MODE_XFI,
+ INTERFACE_MODE_10G_KR,
+ INTERFACE_MODE_40G_KR4,
+ INTERFACE_MODE_MIXED,
};
#define ARRAY_LENGTH(a) (sizeof(a) / sizeof((a)[0]))
#define OCT_ETHTOOL_REGDUMP_LEN 4096
#define OCT_ETHTOOL_REGSVER 1
+/* statistics of PF */
+static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
+ "rx_packets",
+ "tx_packets",
+ "rx_bytes",
+ "tx_bytes",
+ "rx_errors", /*jabber_err+l2_err+frame_err */
+ "tx_errors", /*fw_err_pko+fw_err_link+fw_err_drop */
+ "rx_dropped", /*st->fromwire.total_rcvd - st->fromwire.fw_total_rcvd
+ *+st->fromwire.dmac_drop + st->fromwire.fw_err_drop
+ */
+ "tx_dropped",
+
+ "tx_total_sent",
+ "tx_total_fwd",
+ "tx_err_pko",
+ "tx_err_link",
+ "tx_err_drop",
+
+ "tx_tso",
+ "tx_tso_packets",
+ "tx_tso_err",
+ "tx_vxlan",
+
+ "mac_tx_total_pkts",
+ "mac_tx_total_bytes",
+ "mac_tx_mcast_pkts",
+ "mac_tx_bcast_pkts",
+ "mac_tx_ctl_packets", /*oct->link_stats.fromhost.ctl_sent */
+ "mac_tx_total_collisions",
+ "mac_tx_one_collision",
+ "mac_tx_multi_collison",
+ "mac_tx_max_collision_fail",
+ "mac_tx_max_deferal_fail",
+ "mac_tx_fifo_err",
+ "mac_tx_runts",
+
+ "rx_total_rcvd",
+ "rx_total_fwd",
+ "rx_jabber_err",
+ "rx_l2_err",
+ "rx_frame_err",
+ "rx_err_pko",
+ "rx_err_link",
+ "rx_err_drop",
+
+ "rx_vxlan",
+ "rx_vxlan_err",
+
+ "rx_lro_pkts",
+ "rx_lro_bytes",
+ "rx_total_lro",
+
+ "rx_lro_aborts",
+ "rx_lro_aborts_port",
+ "rx_lro_aborts_seq",
+ "rx_lro_aborts_tsval",
+ "rx_lro_aborts_timer",
+ "rx_fwd_rate",
+
+ "mac_rx_total_rcvd",
+ "mac_rx_bytes",
+ "mac_rx_total_bcst",
+ "mac_rx_total_mcst",
+ "mac_rx_runts",
+ "mac_rx_ctl_packets",
+ "mac_rx_fifo_err",
+ "mac_rx_dma_drop",
+ "mac_rx_fcs_err",
+
+ "link_state_changes",
+};
+
+/* statistics of host tx queue */
static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = {
- "Instr posted",
- "Instr processed",
- "Instr dropped",
- "Bytes Sent",
- "Sgentry_sent",
- "Inst cntreg",
- "Tx done",
- "Tx Iq busy",
- "Tx dropped",
- "Tx bytes",
+ "packets", /*oct->instr_queue[iq_no]->stats.tx_done*/
+ "bytes", /*oct->instr_queue[iq_no]->stats.tx_tot_bytes*/
+ "dropped",
+ "iq_busy",
+ "sgentry_sent",
+
+ "fw_instr_posted",
+ "fw_instr_processed",
+ "fw_instr_dropped",
+ "fw_bytes_sent",
+
+ "tso",
+ "vxlan",
+ "txq_restart",
};
+/* statistics of host rx queue */
static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = {
- "OQ Pkts Received",
- "OQ Bytes Received",
- "Dropped no dispatch",
- "Dropped nomem",
- "Dropped toomany",
- "Stack RX cnt",
- "Stack RX Bytes",
- "RX dropped",
+ "packets", /*oct->droq[oq_no]->stats.rx_pkts_received */
+ "bytes", /*oct->droq[oq_no]->stats.rx_bytes_received */
+ "dropped", /*oct->droq[oq_no]->stats.rx_dropped+
+ *oct->droq[oq_no]->stats.dropped_nodispatch+
+ *oct->droq[oq_no]->stats.dropped_toomany+
+ *oct->droq[oq_no]->stats.dropped_nomem
+ */
+ "dropped_nomem",
+ "dropped_toomany",
+ "fw_dropped",
+ "fw_pkts_received",
+ "fw_bytes_received",
+ "fw_dropped_nodispatch",
+
+ "vxlan",
+ "buffer_alloc_failure",
};
#define OCTNIC_NCMD_AUTONEG_ON 0x1
@@ -112,8 +199,9 @@ static int lio_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
linfo = &lio->linfo;
- if (linfo->link.s.interface == INTERFACE_MODE_XAUI ||
- linfo->link.s.interface == INTERFACE_MODE_RXAUI) {
+ if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
+ linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
+ linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
ecmd->port = PORT_FIBRE;
ecmd->supported =
(SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE |
@@ -124,10 +212,11 @@ static int lio_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ecmd->autoneg = AUTONEG_DISABLE;
} else {
- dev_err(&oct->pci_dev->dev, "Unknown link interface reported\n");
+ dev_err(&oct->pci_dev->dev, "Unknown link interface reported %d\n",
+ linfo->link.s.if_mode);
}
- if (linfo->link.s.status) {
+ if (linfo->link.s.link_up) {
ethtool_cmd_speed_set(ecmd, linfo->link.s.speed);
ecmd->duplex = linfo->link.s.duplex;
} else {
@@ -222,23 +311,20 @@ static int octnet_gpio_access(struct net_device *netdev, int addr, int val)
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
struct octnic_ctrl_pkt nctrl;
- struct octnic_ctrl_params nparams;
int ret = 0;
memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
nctrl.ncmd.u64 = 0;
nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS;
- nctrl.ncmd.s.param1 = lio->linfo.ifidx;
- nctrl.ncmd.s.param2 = addr;
- nctrl.ncmd.s.param3 = val;
+ nctrl.ncmd.s.param1 = addr;
+ nctrl.ncmd.s.param2 = val;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.wait_time = 100;
nctrl.netpndev = (u64)netdev;
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
- nparams.resp_order = OCTEON_RESP_ORDERED;
-
- ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n");
return -EINVAL;
@@ -253,20 +339,18 @@ static void octnet_mdio_resp_callback(struct octeon_device *oct,
u32 status,
void *buf)
{
- struct oct_mdio_cmd_resp *mdio_cmd_rsp;
struct oct_mdio_cmd_context *mdio_cmd_ctx;
struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
- mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
oct = lio_get_device(mdio_cmd_ctx->octeon_id);
if (status) {
dev_err(&oct->pci_dev->dev, "MIDO instruction failed. Status: %llx\n",
CVM_CAST64(status));
- ACCESS_ONCE(mdio_cmd_ctx->cond) = -1;
+ WRITE_ONCE(mdio_cmd_ctx->cond, -1);
} else {
- ACCESS_ONCE(mdio_cmd_ctx->cond) = 1;
+ WRITE_ONCE(mdio_cmd_ctx->cond, 1);
}
wake_up_interruptible(&mdio_cmd_ctx->wc);
}
@@ -297,15 +381,16 @@ octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr;
- ACCESS_ONCE(mdio_cmd_ctx->cond) = 0;
+ WRITE_ONCE(mdio_cmd_ctx->cond, 0);
mdio_cmd_ctx->octeon_id = lio_get_device_id(oct_dev);
mdio_cmd->op = op;
mdio_cmd->mdio_addr = loc;
if (op)
mdio_cmd->value1 = *value;
- mdio_cmd->value2 = lio->linfo.ifidx;
octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8);
+ sc->iq_no = lio->linfo.txpciq[0].s.q_no;
+
octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45,
0, 0, 0);
@@ -317,7 +402,7 @@ octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
retval = octeon_send_soft_command(oct_dev, sc);
- if (retval) {
+ if (retval == IQ_SEND_FAILED) {
dev_err(&oct_dev->pci_dev->dev,
"octnet_mdio45_access instruction failed status: %x\n",
retval);
@@ -335,7 +420,7 @@ octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp),
sizeof(struct oct_mdio_cmd) / 8);
- if (ACCESS_ONCE(mdio_cmd_ctx->cond) == 1) {
+ if (READ_ONCE(mdio_cmd_ctx->cond) == 1) {
if (!op)
*value = mdio_cmd_rsp->resp.value1;
} else {
@@ -379,18 +464,16 @@ static int lio_set_phys_id(struct net_device *netdev,
/* Configure Beacon values */
value = LIO68XX_LED_BEACON_CFGON;
- ret =
- octnet_mdio45_access(lio, 1,
- LIO68XX_LED_BEACON_ADDR,
- &value);
+ ret = octnet_mdio45_access(lio, 1,
+ LIO68XX_LED_BEACON_ADDR,
+ &value);
if (ret)
return ret;
value = LIO68XX_LED_CTRL_CFGON;
- ret =
- octnet_mdio45_access(lio, 1,
- LIO68XX_LED_CTRL_ADDR,
- &value);
+ ret = octnet_mdio45_access(lio, 1,
+ LIO68XX_LED_CTRL_ADDR,
+ &value);
if (ret)
return ret;
} else {
@@ -469,7 +552,7 @@ lio_ethtool_get_ringparam(struct net_device *netdev,
tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx);
}
- if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE) {
+ if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE - OCTNET_FRM_HEADER_SIZE) {
ering->rx_pending = 0;
ering->rx_max_pending = 0;
ering->rx_mini_pending = 0;
@@ -503,10 +586,10 @@ static void lio_set_msglevel(struct net_device *netdev, u32 msglvl)
if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) {
if (msglvl & NETIF_MSG_HW)
liquidio_set_feature(netdev,
- OCTNET_CMD_VERBOSE_ENABLE);
+ OCTNET_CMD_VERBOSE_ENABLE, 0);
else
liquidio_set_feature(netdev,
- OCTNET_CMD_VERBOSE_DISABLE);
+ OCTNET_CMD_VERBOSE_DISABLE, 0);
}
lio->msg_enable = msglvl;
@@ -518,61 +601,279 @@ lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
/* Notes: Not supporting any auto negotiation in these
* drivers. Just report pause frame support.
*/
- pause->tx_pause = 1;
- pause->rx_pause = 1; /* TODO: Need to support RX pause frame!!. */
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ pause->autoneg = 0;
+
+ pause->tx_pause = oct->tx_pause;
+ pause->rx_pause = oct->rx_pause;
}
static void
lio_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats, u64 *data)
+ struct ethtool_stats *stats __attribute__((unused)),
+ u64 *data)
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct_dev = lio->oct_dev;
+ struct net_device_stats *netstats = &netdev->stats;
int i = 0, j;
- for (j = 0; j < MAX_OCTEON_INSTR_QUEUES; j++) {
- if (!(oct_dev->io_qmask.iq & (1UL << j)))
+ netdev->netdev_ops->ndo_get_stats(netdev);
+ octnet_get_link_stats(netdev);
+
+ /*sum of oct->droq[oq_no]->stats->rx_pkts_received */
+ data[i++] = CVM_CAST64(netstats->rx_packets);
+ /*sum of oct->instr_queue[iq_no]->stats.tx_done */
+ data[i++] = CVM_CAST64(netstats->tx_packets);
+ /*sum of oct->droq[oq_no]->stats->rx_bytes_received */
+ data[i++] = CVM_CAST64(netstats->rx_bytes);
+ /*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
+ data[i++] = CVM_CAST64(netstats->tx_bytes);
+ data[i++] = CVM_CAST64(netstats->rx_errors);
+ data[i++] = CVM_CAST64(netstats->tx_errors);
+ /*sum of oct->droq[oq_no]->stats->rx_dropped +
+ *oct->droq[oq_no]->stats->dropped_nodispatch +
+ *oct->droq[oq_no]->stats->dropped_toomany +
+ *oct->droq[oq_no]->stats->dropped_nomem
+ */
+ data[i++] = CVM_CAST64(netstats->rx_dropped);
+ /*sum of oct->instr_queue[iq_no]->stats.tx_dropped */
+ data[i++] = CVM_CAST64(netstats->tx_dropped);
+
+ /*data[i++] = CVM_CAST64(stats->multicast); */
+ /*data[i++] = CVM_CAST64(stats->collisions); */
+
+ /* firmware tx stats */
+ /*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx].
+ *fromhost.fw_total_sent
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_sent);
+ /*per_core_stats[i].link_stats[port].fromwire.fw_total_fwd */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_fwd);
+ /*per_core_stats[j].link_stats[i].fromhost.fw_err_pko */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pko);
+ /*per_core_stats[j].link_stats[i].fromhost.fw_err_link */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_link);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
+ *fw_err_drop
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_drop);
+
+ /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.fw_tso */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
+ *fw_tso_fwd
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso_fwd);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
+ *fw_err_tso
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_tso);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
+ *fw_tx_vxlan
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tx_vxlan);
+
+ /* mac tx statistics */
+ /*CVMX_BGXX_CMRX_TX_STAT5 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_pkts_sent);
+ /*CVMX_BGXX_CMRX_TX_STAT4 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_bytes_sent);
+ /*CVMX_BGXX_CMRX_TX_STAT15 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.mcast_pkts_sent);
+ /*CVMX_BGXX_CMRX_TX_STAT14 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.bcast_pkts_sent);
+ /*CVMX_BGXX_CMRX_TX_STAT17 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.ctl_sent);
+ /*CVMX_BGXX_CMRX_TX_STAT0 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_collisions);
+ /*CVMX_BGXX_CMRX_TX_STAT3 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.one_collision_sent);
+ /*CVMX_BGXX_CMRX_TX_STAT2 */
+ data[i++] =
+ CVM_CAST64(oct_dev->link_stats.fromhost.multi_collision_sent);
+ /*CVMX_BGXX_CMRX_TX_STAT0 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_collision_fail);
+ /*CVMX_BGXX_CMRX_TX_STAT1 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_deferral_fail);
+ /*CVMX_BGXX_CMRX_TX_STAT16 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fifo_err);
+ /*CVMX_BGXX_CMRX_TX_STAT6 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.runts);
+
+ /* RX firmware stats */
+ /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
+ *fw_total_rcvd
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_rcvd);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
+ *fw_total_fwd
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_fwd);
+ /*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.jabber_err);
+ /*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.l2_err);
+ /*per_core_stats[core_id].link_stats[ifidx].fromwire.frame_err */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.frame_err);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
+ *fw_err_pko
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_pko);
+ /*per_core_stats[j].link_stats[i].fromwire.fw_err_link */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_link);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
+ *fromwire.fw_err_drop
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_drop);
+
+ /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
+ *fromwire.fw_rx_vxlan
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
+ *fromwire.fw_rx_vxlan_err
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan_err);
+
+ /* LRO */
+ /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
+ *fw_lro_pkts
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_pkts);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
+ *fw_lro_octs
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_octs);
+ /*per_core_stats[j].link_stats[i].fromwire.fw_total_lro */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_lro);
+ /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
+ *fw_lro_aborts_port
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_port);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
+ *fw_lro_aborts_seq
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_seq);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
+ *fw_lro_aborts_tsval
+ */
+ data[i++] =
+ CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_tsval);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
+ *fw_lro_aborts_timer
+ */
+ /* intrmod: packet forward rate */
+ data[i++] =
+ CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_timer);
+ /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fwd_rate);
+
+ /* mac: link-level stats */
+ /*CVMX_BGXX_CMRX_RX_STAT0 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_rcvd);
+ /*CVMX_BGXX_CMRX_RX_STAT1 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.bytes_rcvd);
+ /*CVMX_PKI_STATX_STAT5 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_bcst);
+ /*CVMX_PKI_STATX_STAT5 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_mcst);
+ /*wqe->word2.err_code or wqe->word2.err_level */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.runts);
+ /*CVMX_BGXX_CMRX_RX_STAT2 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.ctl_rcvd);
+ /*CVMX_BGXX_CMRX_RX_STAT6 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fifo_err);
+ /*CVMX_BGXX_CMRX_RX_STAT4 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.dmac_drop);
+ /*wqe->word2.err_code or wqe->word2.err_level */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fcs_err);
+ /*lio->link_changes*/
+ data[i++] = CVM_CAST64(lio->link_changes);
+
+ /* TX -- lio_update_stats(lio); */
+ for (j = 0; j < MAX_OCTEON_INSTR_QUEUES(oct_dev); j++) {
+ if (!(oct_dev->io_qmask.iq & (1ULL << j)))
continue;
+ /*packets to network port*/
+ /*# of packets tx to network */
+ data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
+ /*# of bytes tx to network */
data[i++] =
- CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted);
- data[i++] =
- CVM_CAST64(
- oct_dev->instr_queue[j]->stats.instr_processed);
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes);
+ /*# of packets dropped */
data[i++] =
- CVM_CAST64(
- oct_dev->instr_queue[j]->stats.instr_dropped);
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped);
+ /*# of tx fails due to queue full */
data[i++] =
- CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent);
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy);
+ /*XXX gather entries sent */
data[i++] =
CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent);
+
+ /*instruction to firmware: data and control */
+ /*# of instructions to the queue */
data[i++] =
- readl(oct_dev->instr_queue[j]->inst_cnt_reg);
- data[i++] =
- CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
- data[i++] =
- CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy);
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted);
+ /*# of instructions processed */
+ data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->
+ stats.instr_processed);
+ /*# of instructions could not be processed */
+ data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->
+ stats.instr_dropped);
+ /*bytes sent through the queue */
data[i++] =
- CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped);
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent);
+
+ /*tso request*/
+ data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso);
+ /*vxlan request*/
+ data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan);
+ /*txq restart*/
data[i++] =
- CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes);
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_restart);
}
- /* for (j = 0; j < oct_dev->num_oqs; j++){ */
- for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES; j++) {
- if (!(oct_dev->io_qmask.oq & (1UL << j)))
+ /* RX */
+ /* for (j = 0; j < oct_dev->num_oqs; j++) { */
+ for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); j++) {
+ if (!(oct_dev->io_qmask.oq & (1ULL << j)))
continue;
- data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
- data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
- data[i++] =
- CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
- data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
- data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
+
+ /*packets send to TCP/IP network stack */
+ /*# of packets to network stack */
data[i++] =
CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received);
+ /*# of bytes to network stack */
data[i++] =
CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received);
+ /*# of packets dropped */
+ data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem +
+ oct_dev->droq[j]->stats.dropped_toomany +
+ oct_dev->droq[j]->stats.rx_dropped);
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
data[i++] =
CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
+
+ /*control and data path*/
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
+
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan);
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure);
}
}
@@ -581,26 +882,43 @@ static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct_dev = lio->oct_dev;
int num_iq_stats, num_oq_stats, i, j;
+ int num_stats;
- num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
- for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
- if (!(oct_dev->io_qmask.iq & (1UL << i)))
- continue;
- for (j = 0; j < num_iq_stats; j++) {
- sprintf(data, "IQ%d %s", i, oct_iq_stats_strings[j]);
+ switch (stringset) {
+ case ETH_SS_STATS:
+ num_stats = ARRAY_SIZE(oct_stats_strings);
+ for (j = 0; j < num_stats; j++) {
+ sprintf(data, "%s", oct_stats_strings[j]);
data += ETH_GSTRING_LEN;
}
- }
- num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
- /* for (i = 0; i < oct_dev->num_oqs; i++) { */
- for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
- if (!(oct_dev->io_qmask.oq & (1UL << i)))
- continue;
- for (j = 0; j < num_oq_stats; j++) {
- sprintf(data, "OQ%d %s", i, oct_droq_stats_strings[j]);
- data += ETH_GSTRING_LEN;
+ num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) {
+ if (!(oct_dev->io_qmask.iq & (1ULL << i)))
+ continue;
+ for (j = 0; j < num_iq_stats; j++) {
+ sprintf(data, "tx-%d-%s", i,
+ oct_iq_stats_strings[j]);
+ data += ETH_GSTRING_LEN;
+ }
}
+
+ num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
+ /* for (i = 0; i < oct_dev->num_oqs; i++) { */
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) {
+ if (!(oct_dev->io_qmask.oq & (1ULL << i)))
+ continue;
+ for (j = 0; j < num_oq_stats; j++) {
+ sprintf(data, "rx-%d-%s", i,
+ oct_droq_stats_strings[j]);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+ break;
+
+ default:
+ netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n");
+ break;
}
}
@@ -609,8 +927,14 @@ static int lio_get_sset_count(struct net_device *netdev, int sset)
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct_dev = lio->oct_dev;
- return (ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs) +
- (ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
+ switch (sset) {
+ case ETH_SS_STATS:
+ return (ARRAY_SIZE(oct_stats_strings) +
+ ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs +
+ ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
+ default:
+ return -EOPNOTSUPP;
+ }
}
static int lio_get_intr_coalesce(struct net_device *netdev,
@@ -618,50 +942,49 @@ static int lio_get_intr_coalesce(struct net_device *netdev,
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
- struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
struct octeon_instr_queue *iq;
struct oct_intrmod_cfg *intrmod_cfg;
intrmod_cfg = &oct->intrmod;
switch (oct->chip_id) {
- /* case OCTEON_CN73XX: Todo */
- /* break; */
case OCTEON_CN68XX:
- case OCTEON_CN66XX:
- if (!intrmod_cfg->intrmod_enable) {
+ case OCTEON_CN66XX: {
+ struct octeon_cn6xxx *cn6xxx =
+ (struct octeon_cn6xxx *)oct->chip;
+
+ if (!intrmod_cfg->rx_enable) {
intr_coal->rx_coalesce_usecs =
CFG_GET_OQ_INTR_TIME(cn6xxx->conf);
intr_coal->rx_max_coalesced_frames =
CFG_GET_OQ_INTR_PKT(cn6xxx->conf);
- } else {
- intr_coal->use_adaptive_rx_coalesce =
- intrmod_cfg->intrmod_enable;
- intr_coal->rate_sample_interval =
- intrmod_cfg->intrmod_check_intrvl;
- intr_coal->pkt_rate_high =
- intrmod_cfg->intrmod_maxpkt_ratethr;
- intr_coal->pkt_rate_low =
- intrmod_cfg->intrmod_minpkt_ratethr;
- intr_coal->rx_max_coalesced_frames_high =
- intrmod_cfg->intrmod_maxcnt_trigger;
- intr_coal->rx_coalesce_usecs_high =
- intrmod_cfg->intrmod_maxtmr_trigger;
- intr_coal->rx_coalesce_usecs_low =
- intrmod_cfg->intrmod_mintmr_trigger;
- intr_coal->rx_max_coalesced_frames_low =
- intrmod_cfg->intrmod_mincnt_trigger;
}
-
- iq = oct->instr_queue[lio->linfo.txpciq[0]];
+ iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no];
intr_coal->tx_max_coalesced_frames = iq->fill_threshold;
break;
-
+ }
default:
netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
return -EINVAL;
}
-
+ if (intrmod_cfg->rx_enable) {
+ intr_coal->use_adaptive_rx_coalesce =
+ intrmod_cfg->rx_enable;
+ intr_coal->rate_sample_interval =
+ intrmod_cfg->check_intrvl;
+ intr_coal->pkt_rate_high =
+ intrmod_cfg->maxpkt_ratethr;
+ intr_coal->pkt_rate_low =
+ intrmod_cfg->minpkt_ratethr;
+ intr_coal->rx_max_coalesced_frames_high =
+ intrmod_cfg->rx_maxcnt_trigger;
+ intr_coal->rx_coalesce_usecs_high =
+ intrmod_cfg->rx_maxtmr_trigger;
+ intr_coal->rx_coalesce_usecs_low =
+ intrmod_cfg->rx_mintmr_trigger;
+ intr_coal->rx_max_coalesced_frames_low =
+ intrmod_cfg->rx_mincnt_trigger;
+ }
return 0;
}
@@ -681,19 +1004,20 @@ static void octnet_intrmod_callback(struct octeon_device *oct_dev,
else
dev_info(&oct_dev->pci_dev->dev,
"Rx-Adaptive Interrupt moderation enabled:%llx\n",
- oct_dev->intrmod.intrmod_enable);
+ oct_dev->intrmod.rx_enable);
octeon_free_soft_command(oct_dev, sc);
}
/* Configure interrupt moderation parameters */
-static int octnet_set_intrmod_cfg(void *oct, struct oct_intrmod_cfg *intr_cfg)
+static int octnet_set_intrmod_cfg(struct lio *lio,
+ struct oct_intrmod_cfg *intr_cfg)
{
struct octeon_soft_command *sc;
struct oct_intrmod_cmd *cmd;
struct oct_intrmod_cfg *cfg;
int retval;
- struct octeon_device *oct_dev = (struct octeon_device *)oct;
+ struct octeon_device *oct_dev = lio->oct_dev;
/* Alloc soft command */
sc = (struct octeon_soft_command *)
@@ -714,6 +1038,8 @@ static int octnet_set_intrmod_cfg(void *oct, struct oct_intrmod_cfg *intr_cfg)
cmd->cfg = cfg;
cmd->oct_dev = oct_dev;
+ sc->iq_no = lio->linfo.txpciq[0].s.q_no;
+
octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
OPCODE_NIC_INTRMOD_CFG, 0, 0, 0);
@@ -722,17 +1048,171 @@ static int octnet_set_intrmod_cfg(void *oct, struct oct_intrmod_cfg *intr_cfg)
sc->wait_time = 1000;
retval = octeon_send_soft_command(oct_dev, sc);
- if (retval) {
+ if (retval == IQ_SEND_FAILED) {
+ octeon_free_soft_command(oct_dev, sc);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+octnet_nic_stats_callback(struct octeon_device *oct_dev,
+ u32 status, void *ptr)
+{
+ struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
+ struct oct_nic_stats_resp *resp = (struct oct_nic_stats_resp *)
+ sc->virtrptr;
+ struct oct_nic_stats_ctrl *ctrl = (struct oct_nic_stats_ctrl *)
+ sc->ctxptr;
+ struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire;
+ struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost;
+
+ struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire;
+ struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost;
+
+ if ((status != OCTEON_REQUEST_TIMEOUT) && !resp->status) {
+ octeon_swap_8B_data((u64 *)&resp->stats,
+ (sizeof(struct oct_link_stats)) >> 3);
+
+ /* RX link-level stats */
+ rstats->total_rcvd = rsp_rstats->total_rcvd;
+ rstats->bytes_rcvd = rsp_rstats->bytes_rcvd;
+ rstats->total_bcst = rsp_rstats->total_bcst;
+ rstats->total_mcst = rsp_rstats->total_mcst;
+ rstats->runts = rsp_rstats->runts;
+ rstats->ctl_rcvd = rsp_rstats->ctl_rcvd;
+ /* Accounts for over/under-run of buffers */
+ rstats->fifo_err = rsp_rstats->fifo_err;
+ rstats->dmac_drop = rsp_rstats->dmac_drop;
+ rstats->fcs_err = rsp_rstats->fcs_err;
+ rstats->jabber_err = rsp_rstats->jabber_err;
+ rstats->l2_err = rsp_rstats->l2_err;
+ rstats->frame_err = rsp_rstats->frame_err;
+
+ /* RX firmware stats */
+ rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd;
+ rstats->fw_total_fwd = rsp_rstats->fw_total_fwd;
+ rstats->fw_err_pko = rsp_rstats->fw_err_pko;
+ rstats->fw_err_link = rsp_rstats->fw_err_link;
+ rstats->fw_err_drop = rsp_rstats->fw_err_drop;
+ rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan;
+ rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err;
+
+ /* Number of packets that are LROed */
+ rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts;
+ /* Number of octets that are LROed */
+ rstats->fw_lro_octs = rsp_rstats->fw_lro_octs;
+ /* Number of LRO packets formed */
+ rstats->fw_total_lro = rsp_rstats->fw_total_lro;
+ /* Number of times lRO of packet aborted */
+ rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts;
+ rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port;
+ rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq;
+ rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval;
+ rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer;
+ /* intrmod: packet forward rate */
+ rstats->fwd_rate = rsp_rstats->fwd_rate;
+
+ /* TX link-level stats */
+ tstats->total_pkts_sent = rsp_tstats->total_pkts_sent;
+ tstats->total_bytes_sent = rsp_tstats->total_bytes_sent;
+ tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent;
+ tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent;
+ tstats->ctl_sent = rsp_tstats->ctl_sent;
+ /* Packets sent after one collision*/
+ tstats->one_collision_sent = rsp_tstats->one_collision_sent;
+ /* Packets sent after multiple collision*/
+ tstats->multi_collision_sent = rsp_tstats->multi_collision_sent;
+ /* Packets not sent due to max collisions */
+ tstats->max_collision_fail = rsp_tstats->max_collision_fail;
+ /* Packets not sent due to max deferrals */
+ tstats->max_deferral_fail = rsp_tstats->max_deferral_fail;
+ /* Accounts for over/under-run of buffers */
+ tstats->fifo_err = rsp_tstats->fifo_err;
+ tstats->runts = rsp_tstats->runts;
+ /* Total number of collisions detected */
+ tstats->total_collisions = rsp_tstats->total_collisions;
+
+ /* firmware stats */
+ tstats->fw_total_sent = rsp_tstats->fw_total_sent;
+ tstats->fw_total_fwd = rsp_tstats->fw_total_fwd;
+ tstats->fw_err_pko = rsp_tstats->fw_err_pko;
+ tstats->fw_err_link = rsp_tstats->fw_err_link;
+ tstats->fw_err_drop = rsp_tstats->fw_err_drop;
+ tstats->fw_tso = rsp_tstats->fw_tso;
+ tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd;
+ tstats->fw_err_tso = rsp_tstats->fw_err_tso;
+ tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan;
+
+ resp->status = 1;
+ } else {
+ resp->status = -1;
+ }
+ complete(&ctrl->complete);
+}
+
+/* Configure interrupt moderation parameters */
+static int octnet_get_link_stats(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct_dev = lio->oct_dev;
+
+ struct octeon_soft_command *sc;
+ struct oct_nic_stats_ctrl *ctrl;
+ struct oct_nic_stats_resp *resp;
+
+ int retval;
+
+ /* Alloc soft command */
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(oct_dev,
+ 0,
+ sizeof(struct oct_nic_stats_resp),
+ sizeof(struct octnic_ctrl_pkt));
+
+ if (!sc)
+ return -ENOMEM;
+
+ resp = (struct oct_nic_stats_resp *)sc->virtrptr;
+ memset(resp, 0, sizeof(struct oct_nic_stats_resp));
+
+ ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr;
+ memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl));
+ ctrl->netdev = netdev;
+ init_completion(&ctrl->complete);
+
+ sc->iq_no = lio->linfo.txpciq[0].s.q_no;
+
+ octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
+ OPCODE_NIC_PORT_STATS, 0, 0, 0);
+
+ sc->callback = octnet_nic_stats_callback;
+ sc->callback_arg = sc;
+ sc->wait_time = 500; /*in milli seconds*/
+
+ retval = octeon_send_soft_command(oct_dev, sc);
+ if (retval == IQ_SEND_FAILED) {
+ octeon_free_soft_command(oct_dev, sc);
+ return -EINVAL;
+ }
+
+ wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000));
+
+ if (resp->status != 1) {
octeon_free_soft_command(oct_dev, sc);
+
return -EINVAL;
}
+ octeon_free_soft_command(oct_dev, sc);
+
return 0;
}
/* Enable/Disable auto interrupt Moderation */
static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce
- *intr_coal, int adaptive)
+ *intr_coal)
{
int ret = 0;
struct octeon_device *oct = lio->oct_dev;
@@ -740,59 +1220,73 @@ static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce
intrmod_cfg = &oct->intrmod;
- if (adaptive) {
+ if (oct->intrmod.rx_enable || oct->intrmod.tx_enable) {
if (intr_coal->rate_sample_interval)
- intrmod_cfg->intrmod_check_intrvl =
+ intrmod_cfg->check_intrvl =
intr_coal->rate_sample_interval;
else
- intrmod_cfg->intrmod_check_intrvl =
+ intrmod_cfg->check_intrvl =
LIO_INTRMOD_CHECK_INTERVAL;
if (intr_coal->pkt_rate_high)
- intrmod_cfg->intrmod_maxpkt_ratethr =
+ intrmod_cfg->maxpkt_ratethr =
intr_coal->pkt_rate_high;
else
- intrmod_cfg->intrmod_maxpkt_ratethr =
+ intrmod_cfg->maxpkt_ratethr =
LIO_INTRMOD_MAXPKT_RATETHR;
if (intr_coal->pkt_rate_low)
- intrmod_cfg->intrmod_minpkt_ratethr =
+ intrmod_cfg->minpkt_ratethr =
intr_coal->pkt_rate_low;
else
- intrmod_cfg->intrmod_minpkt_ratethr =
+ intrmod_cfg->minpkt_ratethr =
LIO_INTRMOD_MINPKT_RATETHR;
-
+ }
+ if (oct->intrmod.rx_enable) {
if (intr_coal->rx_max_coalesced_frames_high)
- intrmod_cfg->intrmod_maxcnt_trigger =
+ intrmod_cfg->rx_maxcnt_trigger =
intr_coal->rx_max_coalesced_frames_high;
else
- intrmod_cfg->intrmod_maxcnt_trigger =
- LIO_INTRMOD_MAXCNT_TRIGGER;
+ intrmod_cfg->rx_maxcnt_trigger =
+ LIO_INTRMOD_RXMAXCNT_TRIGGER;
if (intr_coal->rx_coalesce_usecs_high)
- intrmod_cfg->intrmod_maxtmr_trigger =
+ intrmod_cfg->rx_maxtmr_trigger =
intr_coal->rx_coalesce_usecs_high;
else
- intrmod_cfg->intrmod_maxtmr_trigger =
- LIO_INTRMOD_MAXTMR_TRIGGER;
+ intrmod_cfg->rx_maxtmr_trigger =
+ LIO_INTRMOD_RXMAXTMR_TRIGGER;
if (intr_coal->rx_coalesce_usecs_low)
- intrmod_cfg->intrmod_mintmr_trigger =
+ intrmod_cfg->rx_mintmr_trigger =
intr_coal->rx_coalesce_usecs_low;
else
- intrmod_cfg->intrmod_mintmr_trigger =
- LIO_INTRMOD_MINTMR_TRIGGER;
+ intrmod_cfg->rx_mintmr_trigger =
+ LIO_INTRMOD_RXMINTMR_TRIGGER;
if (intr_coal->rx_max_coalesced_frames_low)
- intrmod_cfg->intrmod_mincnt_trigger =
+ intrmod_cfg->rx_mincnt_trigger =
intr_coal->rx_max_coalesced_frames_low;
else
- intrmod_cfg->intrmod_mincnt_trigger =
- LIO_INTRMOD_MINCNT_TRIGGER;
+ intrmod_cfg->rx_mincnt_trigger =
+ LIO_INTRMOD_RXMINCNT_TRIGGER;
+ }
+ if (oct->intrmod.tx_enable) {
+ if (intr_coal->tx_max_coalesced_frames_high)
+ intrmod_cfg->tx_maxcnt_trigger =
+ intr_coal->tx_max_coalesced_frames_high;
+ else
+ intrmod_cfg->tx_maxcnt_trigger =
+ LIO_INTRMOD_TXMAXCNT_TRIGGER;
+ if (intr_coal->tx_max_coalesced_frames_low)
+ intrmod_cfg->tx_mincnt_trigger =
+ intr_coal->tx_max_coalesced_frames_low;
+ else
+ intrmod_cfg->tx_mincnt_trigger =
+ LIO_INTRMOD_TXMINCNT_TRIGGER;
}
- intrmod_cfg->intrmod_enable = adaptive;
- ret = octnet_set_intrmod_cfg(oct, intrmod_cfg);
+ ret = octnet_set_intrmod_cfg(lio, intrmod_cfg);
return ret;
}
@@ -800,54 +1294,82 @@ static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce
static int
oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal)
{
- int ret;
struct octeon_device *oct = lio->oct_dev;
- struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
u32 rx_max_coalesced_frames;
- if (!intr_coal->rx_max_coalesced_frames)
- rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT;
- else
- rx_max_coalesced_frames = intr_coal->rx_max_coalesced_frames;
-
- /* Disable adaptive interrupt modulation */
- ret = oct_cfg_adaptive_intr(lio, intr_coal, 0);
- if (ret)
- return ret;
-
/* Config Cnt based interrupt values */
- octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS,
- rx_max_coalesced_frames);
- CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames);
+ switch (oct->chip_id) {
+ case OCTEON_CN68XX:
+ case OCTEON_CN66XX: {
+ struct octeon_cn6xxx *cn6xxx =
+ (struct octeon_cn6xxx *)oct->chip;
+
+ if (!intr_coal->rx_max_coalesced_frames)
+ rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT;
+ else
+ rx_max_coalesced_frames =
+ intr_coal->rx_max_coalesced_frames;
+ octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS,
+ rx_max_coalesced_frames);
+ CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
return 0;
}
static int oct_cfg_rx_intrtime(struct lio *lio, struct ethtool_coalesce
*intr_coal)
{
- int ret;
struct octeon_device *oct = lio->oct_dev;
- struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
u32 time_threshold, rx_coalesce_usecs;
- if (!intr_coal->rx_coalesce_usecs)
- rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME;
- else
- rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
+ /* Config Time based interrupt values */
+ switch (oct->chip_id) {
+ case OCTEON_CN68XX:
+ case OCTEON_CN66XX: {
+ struct octeon_cn6xxx *cn6xxx =
+ (struct octeon_cn6xxx *)oct->chip;
+ if (!intr_coal->rx_coalesce_usecs)
+ rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME;
+ else
+ rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
- /* Disable adaptive interrupt modulation */
- ret = oct_cfg_adaptive_intr(lio, intr_coal, 0);
- if (ret)
- return ret;
+ time_threshold = lio_cn6xxx_get_oq_ticks(oct,
+ rx_coalesce_usecs);
+ octeon_write_csr(oct,
+ CN6XXX_SLI_OQ_INT_LEVEL_TIME,
+ time_threshold);
- /* Config Time based interrupt values */
- time_threshold = lio_cn6xxx_get_oq_ticks(oct, rx_coalesce_usecs);
- octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_TIME, time_threshold);
- CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs);
+ CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
return 0;
}
+static int
+oct_cfg_tx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal
+ __attribute__((unused)))
+{
+ struct octeon_device *oct = lio->oct_dev;
+
+ /* Config Cnt based interrupt values */
+ switch (oct->chip_id) {
+ case OCTEON_CN68XX:
+ case OCTEON_CN66XX:
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int lio_set_intr_coalesce(struct net_device *netdev,
struct ethtool_coalesce *intr_coal)
{
@@ -855,59 +1377,48 @@ static int lio_set_intr_coalesce(struct net_device *netdev,
int ret;
struct octeon_device *oct = lio->oct_dev;
u32 j, q_no;
+ int db_max, db_min;
- if ((intr_coal->tx_max_coalesced_frames >= CN6XXX_DB_MIN) &&
- (intr_coal->tx_max_coalesced_frames <= CN6XXX_DB_MAX)) {
- for (j = 0; j < lio->linfo.num_txpciq; j++) {
- q_no = lio->linfo.txpciq[j];
- oct->instr_queue[q_no]->fill_threshold =
- intr_coal->tx_max_coalesced_frames;
+ switch (oct->chip_id) {
+ case OCTEON_CN68XX:
+ case OCTEON_CN66XX:
+ db_min = CN6XXX_DB_MIN;
+ db_max = CN6XXX_DB_MAX;
+ if ((intr_coal->tx_max_coalesced_frames >= db_min) &&
+ (intr_coal->tx_max_coalesced_frames <= db_max)) {
+ for (j = 0; j < lio->linfo.num_txpciq; j++) {
+ q_no = lio->linfo.txpciq[j].s.q_no;
+ oct->instr_queue[q_no]->fill_threshold =
+ intr_coal->tx_max_coalesced_frames;
+ }
+ } else {
+ dev_err(&oct->pci_dev->dev,
+ "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n",
+ intr_coal->tx_max_coalesced_frames, db_min,
+ db_max);
+ return -EINVAL;
}
- } else {
- dev_err(&oct->pci_dev->dev,
- "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n",
- intr_coal->tx_max_coalesced_frames, CN6XXX_DB_MIN,
- CN6XXX_DB_MAX);
+ break;
+ default:
return -EINVAL;
}
- /* User requested adaptive-rx on */
- if (intr_coal->use_adaptive_rx_coalesce) {
- ret = oct_cfg_adaptive_intr(lio, intr_coal, 1);
- if (ret)
- goto ret_intrmod;
- }
+ oct->intrmod.rx_enable = intr_coal->use_adaptive_rx_coalesce ? 1 : 0;
+ oct->intrmod.tx_enable = intr_coal->use_adaptive_tx_coalesce ? 1 : 0;
- /* User requested adaptive-rx off and rx coalesce */
- if ((intr_coal->rx_coalesce_usecs) &&
- (!intr_coal->use_adaptive_rx_coalesce)) {
+ ret = oct_cfg_adaptive_intr(lio, intr_coal);
+
+ if (!intr_coal->use_adaptive_rx_coalesce) {
ret = oct_cfg_rx_intrtime(lio, intr_coal);
if (ret)
goto ret_intrmod;
- }
- /* User requested adaptive-rx off and rx coalesce */
- if ((intr_coal->rx_max_coalesced_frames) &&
- (!intr_coal->use_adaptive_rx_coalesce)) {
ret = oct_cfg_rx_intrcnt(lio, intr_coal);
if (ret)
goto ret_intrmod;
}
-
- /* User requested adaptive-rx off, so use default coalesce params */
- if ((!intr_coal->rx_max_coalesced_frames) &&
- (!intr_coal->use_adaptive_rx_coalesce) &&
- (!intr_coal->rx_coalesce_usecs)) {
- dev_info(&oct->pci_dev->dev,
- "Turning off adaptive-rx interrupt moderation\n");
- dev_info(&oct->pci_dev->dev,
- "Using RX Coalesce Default values rx_coalesce_usecs:%d rx_max_coalesced_frames:%d\n",
- CN6XXX_OQ_INTR_TIME, CN6XXX_OQ_INTR_PKT);
- ret = oct_cfg_rx_intrtime(lio, intr_coal);
- if (ret)
- goto ret_intrmod;
-
- ret = oct_cfg_rx_intrcnt(lio, intr_coal);
+ if (!intr_coal->use_adaptive_tx_coalesce) {
+ ret = oct_cfg_tx_intrcnt(lio, intr_coal);
if (ret)
goto ret_intrmod;
}
@@ -923,23 +1434,28 @@ static int lio_get_ts_info(struct net_device *netdev,
struct lio *lio = GET_LIO(netdev);
info->so_timestamping =
+#ifdef PTP_HARDWARE_TIMESTAMPING
SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+#endif
SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RAW_HARDWARE;
+ SOF_TIMESTAMPING_SOFTWARE;
if (lio->ptp_clock)
info->phc_index = ptp_clock_index(lio->ptp_clock);
else
info->phc_index = -1;
+#ifdef PTP_HARDWARE_TIMESTAMPING
info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
+#endif
return 0;
}
@@ -950,7 +1466,6 @@ static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
struct octeon_device *oct = lio->oct_dev;
struct oct_link_info *linfo;
struct octnic_ctrl_pkt nctrl;
- struct octnic_ctrl_params nparams;
int ret = 0;
/* get the link info */
@@ -965,12 +1480,14 @@ static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ecmd->duplex != DUPLEX_FULL)))
return -EINVAL;
- /* Ethtool Support is not provided for XAUI and RXAUI Interfaces
+ /* Ethtool Support is not provided for XAUI, RXAUI, and XFI Interfaces
* as they operate at fixed Speed and Duplex settings
*/
- if (linfo->link.s.interface == INTERFACE_MODE_XAUI ||
- linfo->link.s.interface == INTERFACE_MODE_RXAUI) {
- dev_info(&oct->pci_dev->dev, "XAUI IFs settings cannot be modified.\n");
+ if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
+ linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
+ linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
+ dev_info(&oct->pci_dev->dev,
+ "Autonegotiation, duplex and speed settings cannot be modified.\n");
return -EINVAL;
}
@@ -978,9 +1495,9 @@ static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
nctrl.ncmd.u64 = 0;
nctrl.ncmd.s.cmd = OCTNET_CMD_SET_SETTINGS;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.wait_time = 1000;
nctrl.netpndev = (u64)netdev;
- nctrl.ncmd.s.param1 = lio->linfo.ifidx;
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
/* Passing the parameters sent by ethtool like Speed, Autoneg & Duplex
@@ -990,19 +1507,17 @@ static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
/* Autoneg ON */
nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON |
OCTNIC_NCMD_AUTONEG_ON;
- nctrl.ncmd.s.param2 = ecmd->advertising;
+ nctrl.ncmd.s.param1 = ecmd->advertising;
} else {
/* Autoneg OFF */
nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON;
- nctrl.ncmd.s.param3 = ecmd->duplex;
+ nctrl.ncmd.s.param2 = ecmd->duplex;
- nctrl.ncmd.s.param2 = ecmd->speed;
+ nctrl.ncmd.s.param1 = ecmd->speed;
}
- nparams.resp_order = OCTEON_RESP_ORDERED;
-
- ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
dev_err(&oct->pci_dev->dev, "Failed to set settings\n");
return -1;
@@ -1026,7 +1541,7 @@ static int lio_nway_reset(struct net_device *netdev)
}
/* Return register dump len. */
-static int lio_get_regs_len(struct net_device *dev)
+static int lio_get_regs_len(struct net_device *dev __attribute__((unused)))
{
return OCT_ETHTOOL_REGDUMP_LEN;
}
@@ -1170,13 +1685,12 @@ static void lio_get_regs(struct net_device *dev,
int len = 0;
struct octeon_device *oct = lio->oct_dev;
- memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN);
regs->version = OCT_ETHTOOL_REGSVER;
switch (oct->chip_id) {
- /* case OCTEON_CN73XX: Todo */
case OCTEON_CN68XX:
case OCTEON_CN66XX:
+ memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN);
len += cn6xxx_read_csr_reg(regbuf + len, oct);
len += cn6xxx_read_config_reg(regbuf + len, oct);
break;
@@ -1186,6 +1700,23 @@ static void lio_get_regs(struct net_device *dev,
}
}
+static u32 lio_get_priv_flags(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ return lio->oct_dev->priv_flags;
+}
+
+static int lio_set_priv_flags(struct net_device *netdev, u32 flags)
+{
+ struct lio *lio = GET_LIO(netdev);
+ bool intr_by_tx_bytes = !!(flags & (0x1 << OCT_PRIV_FLAG_TX_BYTES));
+
+ lio_set_priv_flag(lio->oct_dev, OCT_PRIV_FLAG_TX_BYTES,
+ intr_by_tx_bytes);
+ return 0;
+}
+
static const struct ethtool_ops lio_ethtool_ops = {
.get_settings = lio_get_settings,
.get_link = ethtool_op_get_link,
@@ -1207,6 +1738,8 @@ static const struct ethtool_ops lio_ethtool_ops = {
.set_settings = lio_set_settings,
.get_coalesce = lio_get_intr_coalesce,
.set_coalesce = lio_set_intr_coalesce,
+ .get_priv_flags = lio_get_priv_flags,
+ .set_priv_flags = lio_set_priv_flags,
.get_ts_info = lio_get_ts_info,
};
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 7708d6b14..f00b44bf9 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -20,24 +20,12 @@
* Contact Cavium, Inc. for more information
**********************************************************************/
#include <linux/version.h>
-#include <linux/module.h>
-#include <linux/crc32.h>
-#include <linux/dma-mapping.h>
#include <linux/pci.h>
-#include <linux/pci_ids.h>
-#include <linux/ip.h>
-#include <net/ip.h>
-#include <linux/ipv6.h>
#include <linux/net_tstamp.h>
#include <linux/if_vlan.h>
#include <linux/firmware.h>
-#include <linux/ethtool.h>
#include <linux/ptp_clock_kernel.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/workqueue.h>
-#include <linux/interrupt.h>
-#include "octeon_config.h"
+#include <net/vxlan.h>
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
@@ -48,7 +36,6 @@
#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
-#include "cn68xx_regs.h"
#include "cn68xx_device.h"
#include "liquidio_image.h"
@@ -70,6 +57,9 @@ MODULE_PARM_DESC(console_bitmask,
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
+#define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \
+ (octeon_dev_ptr->instr_queue[iq_no]->stats.field += count)
+
static int debug = -1;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
@@ -82,6 +72,8 @@ static int conf_type;
module_param(conf_type, int, 0);
MODULE_PARM_DESC(conf_type, "select octeon configuration 0 default 1 ovs");
+static int ptp_enable = 1;
+
/* Bit mask values for lio->ifstate */
#define LIO_IFSTATE_DROQ_OPS 0x01
#define LIO_IFSTATE_REGISTERED 0x02
@@ -164,6 +156,8 @@ struct octnic_gather {
* received from the IP layer.
*/
struct octeon_sg_entry *sg;
+
+ u64 sg_dma_ptr;
};
/** This structure is used by NIC driver to store information required
@@ -218,8 +212,8 @@ static void octeon_droq_bh(unsigned long pdev)
(struct octeon_device_priv *)oct->priv;
/* for (q_no = 0; q_no < oct->num_oqs; q_no++) { */
- for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES; q_no++) {
- if (!(oct->io_qmask.oq & (1UL << q_no)))
+ for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
+ if (!(oct->io_qmask.oq & (1ULL << q_no)))
continue;
reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
MAX_PACKET_BUDGET);
@@ -239,11 +233,10 @@ static int lio_wait_for_oq_pkts(struct octeon_device *oct)
do {
pending_pkts = 0;
- for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
- if (!(oct->io_qmask.oq & (1UL << i)))
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
+ if (!(oct->io_qmask.oq & (1ULL << i)))
continue;
- pkt_cnt += octeon_droq_check_hw_for_pkts(oct,
- oct->droq[i]);
+ pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
}
if (pkt_cnt > 0) {
pending_pkts += pkt_cnt;
@@ -359,7 +352,7 @@ static int wait_for_pending_requests(struct octeon_device *oct)
[OCTEON_ORDERED_SC_LIST].pending_req_count);
if (pcount)
schedule_timeout_uninterruptible(HZ / 10);
- else
+ else
break;
}
@@ -390,10 +383,10 @@ static inline void pcierror_quiesce_device(struct octeon_device *oct)
dev_err(&oct->pci_dev->dev, "There were pending requests\n");
/* Force all requests waiting to be fetched by OCTEON to complete. */
- for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
struct octeon_instr_queue *iq;
- if (!(oct->io_qmask.iq & (1UL << i)))
+ if (!(oct->io_qmask.iq & (1ULL << i)))
continue;
iq = oct->instr_queue[i];
@@ -403,7 +396,7 @@ static inline void pcierror_quiesce_device(struct octeon_device *oct)
iq->octeon_read_index = iq->host_write_index;
iq->stats.instr_processed +=
atomic_read(&iq->instr_pending);
- lio_process_iq_request_list(oct, iq);
+ lio_process_iq_request_list(oct, iq, 0);
spin_unlock_bh(&iq->lock);
}
}
@@ -498,7 +491,8 @@ static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
* \brief mmio handler
* @param pdev Pointer to PCI device
*/
-static pci_ers_result_t liquidio_pcie_mmio_enabled(struct pci_dev *pdev)
+static pci_ers_result_t liquidio_pcie_mmio_enabled(
+ struct pci_dev *pdev __attribute__((unused)))
{
/* We should never hit this since we never ask for a reset for a Fatal
* Error. We always return DISCONNECT in io_error above.
@@ -514,7 +508,8 @@ static pci_ers_result_t liquidio_pcie_mmio_enabled(struct pci_dev *pdev)
* Restart the card from scratch, as if from a cold-boot. Implementation
* resembles the first-half of the octeon_resume routine.
*/
-static pci_ers_result_t liquidio_pcie_slot_reset(struct pci_dev *pdev)
+static pci_ers_result_t liquidio_pcie_slot_reset(
+ struct pci_dev *pdev __attribute__((unused)))
{
/* We should never hit this since we never ask for a reset for a Fatal
* Error. We always return DISCONNECT in io_error above.
@@ -531,7 +526,7 @@ static pci_ers_result_t liquidio_pcie_slot_reset(struct pci_dev *pdev)
* its OK to resume normal operation. Implementation resembles the
* second-half of the octeon_resume routine.
*/
-static void liquidio_pcie_resume(struct pci_dev *pdev)
+static void liquidio_pcie_resume(struct pci_dev *pdev __attribute__((unused)))
{
/* Nothing to be done here. */
}
@@ -542,7 +537,8 @@ static void liquidio_pcie_resume(struct pci_dev *pdev)
* @param pdev Pointer to PCI device
* @param state state to suspend to
*/
-static int liquidio_suspend(struct pci_dev *pdev, pm_message_t state)
+static int liquidio_suspend(struct pci_dev *pdev __attribute__((unused)),
+ pm_message_t state __attribute__((unused)))
{
return 0;
}
@@ -551,7 +547,7 @@ static int liquidio_suspend(struct pci_dev *pdev, pm_message_t state)
* \brief called when resuming
* @param pdev Pointer to PCI device
*/
-static int liquidio_resume(struct pci_dev *pdev)
+static int liquidio_resume(struct pci_dev *pdev __attribute__((unused)))
{
return 0;
}
@@ -676,12 +672,24 @@ static inline void txqs_start(struct net_device *netdev)
*/
static inline void txqs_wake(struct net_device *netdev)
{
+ struct lio *lio = GET_LIO(netdev);
+
if (netif_is_multiqueue(netdev)) {
int i;
- for (i = 0; i < netdev->num_tx_queues; i++)
- netif_wake_subqueue(netdev, i);
+ for (i = 0; i < netdev->num_tx_queues; i++) {
+ int qno = lio->linfo.txpciq[i %
+ (lio->linfo.num_txpciq)].s.q_no;
+
+ if (__netif_subqueue_stopped(netdev, i)) {
+ INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno,
+ tx_restart, 1);
+ netif_wake_subqueue(netdev, i);
+ }
+ }
} else {
+ INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
+ tx_restart, 1);
netif_wake_queue(netdev);
}
}
@@ -703,7 +711,7 @@ static void start_txq(struct net_device *netdev)
{
struct lio *lio = GET_LIO(netdev);
- if (lio->linfo.link.s.status) {
+ if (lio->linfo.link.s.link_up) {
txqs_start(netdev);
return;
}
@@ -750,16 +758,23 @@ static inline int check_txq_status(struct lio *lio)
/* check each sub-queue state */
for (q = 0; q < numqs; q++) {
- iq = lio->linfo.txpciq[q & (lio->linfo.num_txpciq - 1)];
+ iq = lio->linfo.txpciq[q %
+ (lio->linfo.num_txpciq)].s.q_no;
if (octnet_iq_is_full(lio->oct_dev, iq))
continue;
- wake_q(lio->netdev, q);
- ret_val++;
+ if (__netif_subqueue_stopped(lio->netdev, q)) {
+ wake_q(lio->netdev, q);
+ INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
+ tx_restart, 1);
+ ret_val++;
+ }
}
} else {
if (octnet_iq_is_full(lio->oct_dev, lio->txq))
return 0;
wake_q(lio->netdev, lio->txq);
+ INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
+ tx_restart, 1);
ret_val = 1;
}
return ret_val;
@@ -785,64 +800,116 @@ static inline struct list_head *list_delete_head(struct list_head *root)
}
/**
- * \brief Delete gather list
+ * \brief Delete gather lists
* @param lio per-network private data
*/
-static void delete_glist(struct lio *lio)
+static void delete_glists(struct lio *lio)
{
struct octnic_gather *g;
+ int i;
- do {
- g = (struct octnic_gather *)
- list_delete_head(&lio->glist);
- if (g) {
- if (g->sg)
- kfree((void *)((unsigned long)g->sg -
- g->adjust));
- kfree(g);
- }
- } while (g);
+ if (!lio->glist)
+ return;
+
+ for (i = 0; i < lio->linfo.num_txpciq; i++) {
+ do {
+ g = (struct octnic_gather *)
+ list_delete_head(&lio->glist[i]);
+ if (g) {
+ if (g->sg) {
+ dma_unmap_single(&lio->oct_dev->
+ pci_dev->dev,
+ g->sg_dma_ptr,
+ g->sg_size,
+ DMA_TO_DEVICE);
+ kfree((void *)((unsigned long)g->sg -
+ g->adjust));
+ }
+ kfree(g);
+ }
+ } while (g);
+ }
+
+ kfree((void *)lio->glist);
}
/**
- * \brief Setup gather list
+ * \brief Setup gather lists
* @param lio per-network private data
*/
-static int setup_glist(struct lio *lio)
+static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
{
- int i;
+ int i, j;
struct octnic_gather *g;
- INIT_LIST_HEAD(&lio->glist);
+ lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock),
+ GFP_KERNEL);
+ if (!lio->glist_lock)
+ return 1;
- for (i = 0; i < lio->tx_qsize; i++) {
- g = kzalloc(sizeof(*g), GFP_KERNEL);
- if (!g)
- break;
+ lio->glist = kcalloc(num_iqs, sizeof(*lio->glist),
+ GFP_KERNEL);
+ if (!lio->glist) {
+ kfree((void *)lio->glist_lock);
+ return 1;
+ }
- g->sg_size =
- ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
+ for (i = 0; i < num_iqs; i++) {
+ int numa_node = cpu_to_node(i % num_online_cpus());
- g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL);
- if (!g->sg) {
- kfree(g);
- break;
+ spin_lock_init(&lio->glist_lock[i]);
+
+ INIT_LIST_HEAD(&lio->glist[i]);
+
+ for (j = 0; j < lio->tx_qsize; j++) {
+ g = kzalloc_node(sizeof(*g), GFP_KERNEL,
+ numa_node);
+ if (!g)
+ g = kzalloc(sizeof(*g), GFP_KERNEL);
+ if (!g)
+ break;
+
+ g->sg_size = ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) *
+ OCT_SG_ENTRY_SIZE);
+
+ g->sg = kmalloc_node(g->sg_size + 8,
+ GFP_KERNEL, numa_node);
+ if (!g->sg)
+ g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL);
+ if (!g->sg) {
+ kfree(g);
+ break;
+ }
+
+ /* The gather component should be aligned on 64-bit
+ * boundary
+ */
+ if (((unsigned long)g->sg) & 7) {
+ g->adjust = 8 - (((unsigned long)g->sg) & 7);
+ g->sg = (struct octeon_sg_entry *)
+ ((unsigned long)g->sg + g->adjust);
+ }
+ g->sg_dma_ptr = dma_map_single(&oct->pci_dev->dev,
+ g->sg, g->sg_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&oct->pci_dev->dev,
+ g->sg_dma_ptr)) {
+ kfree((void *)((unsigned long)g->sg -
+ g->adjust));
+ kfree(g);
+ break;
+ }
+
+ list_add_tail(&g->list, &lio->glist[i]);
}
- /* The gather component should be aligned on 64-bit boundary */
- if (((unsigned long)g->sg) & 7) {
- g->adjust = 8 - (((unsigned long)g->sg) & 7);
- g->sg = (struct octeon_sg_entry *)
- ((unsigned long)g->sg + g->adjust);
+ if (j != lio->tx_qsize) {
+ delete_glists(lio);
+ return 1;
}
- list_add_tail(&g->list, &lio->glist);
}
- if (i == lio->tx_qsize)
- return 0;
-
- delete_glist(lio);
- return 1;
+ return 0;
}
/**
@@ -856,7 +923,7 @@ static void print_link_info(struct net_device *netdev)
if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) {
struct oct_link_info *linfo = &lio->linfo;
- if (linfo->link.s.status) {
+ if (linfo->link.s.link_up) {
netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
linfo->link.s.speed,
(linfo->link.s.duplex) ? "Full" : "Half");
@@ -878,13 +945,15 @@ static inline void update_link_status(struct net_device *netdev,
union oct_link_status *ls)
{
struct lio *lio = GET_LIO(netdev);
+ int changed = (lio->linfo.link.u64 != ls->u64);
- if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) {
- lio->linfo.link.u64 = ls->u64;
+ lio->linfo.link.u64 = ls->u64;
+ if ((lio->intf_open) && (changed)) {
print_link_info(netdev);
+ lio->link_changes++;
- if (lio->linfo.link.s.status) {
+ if (lio->linfo.link.s.link_up) {
netif_carrier_on(netdev);
/* start_txq(netdev); */
txqs_wake(netdev);
@@ -895,6 +964,42 @@ static inline void update_link_status(struct net_device *netdev,
}
}
+/* Runs in interrupt context. */
+static void update_txq_status(struct octeon_device *oct, int iq_num)
+{
+ struct net_device *netdev;
+ struct lio *lio;
+ struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
+
+ /*octeon_update_iq_read_idx(oct, iq);*/
+
+ netdev = oct->props[iq->ifidx].netdev;
+
+ /* This is needed because the first IQ does not have
+ * a netdev associated with it.
+ */
+ if (!netdev)
+ return;
+
+ lio = GET_LIO(netdev);
+ if (netif_is_multiqueue(netdev)) {
+ if (__netif_subqueue_stopped(netdev, iq->q_index) &&
+ lio->linfo.link.s.link_up &&
+ (!octnet_iq_is_full(oct, iq_num))) {
+ INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
+ tx_restart, 1);
+ netif_wake_subqueue(netdev, iq->q_index);
+ } else {
+ if (!octnet_iq_is_full(oct, lio->txq)) {
+ INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev,
+ lio->txq,
+ tx_restart, 1);
+ wake_q(netdev, lio->txq);
+ }
+ }
+ }
+}
+
/**
* \brief Droq packet processor sceduler
* @param oct octeon device
@@ -908,8 +1013,9 @@ void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
struct octeon_droq *droq;
if (oct->int_status & OCT_DEV_INTR_PKT_DATA) {
- for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES; oq_no++) {
- if (!(oct->droq_intr & (1 << oq_no)))
+ for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct);
+ oq_no++) {
+ if (!(oct->droq_intr & (1ULL << oq_no)))
continue;
droq = oct->droq[oq_no];
@@ -985,7 +1091,9 @@ static int octeon_setup_interrupt(struct octeon_device *oct)
* @param pdev PCI device structure
* @param ent unused
*/
-static int liquidio_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int
+liquidio_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent __attribute__((unused)))
{
struct octeon_device *oct_dev = NULL;
struct handshake *hs;
@@ -1020,6 +1128,9 @@ static int liquidio_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM;
}
+ oct_dev->rx_pause = 1;
+ oct_dev->tx_pause = 1;
+
dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
return 0;
@@ -1085,19 +1196,13 @@ static void octeon_destroy_resources(struct octeon_device *oct)
if (oct->flags & LIO_FLAG_MSI_ENABLED)
pci_disable_msi(oct->pci_dev);
- /* Soft reset the octeon device before exiting */
- oct->fn_list.soft_reset(oct);
-
- /* Disable the device, releasing the PCI INT */
- pci_disable_device(oct->pci_dev);
-
/* fallthrough */
case OCT_DEV_IN_RESET:
case OCT_DEV_DROQ_INIT_DONE:
/*atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);*/
mdelay(100);
- for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
- if (!(oct->io_qmask.oq & (1UL << i)))
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
+ if (!(oct->io_qmask.oq & (1ULL << i)))
continue;
octeon_delete_droq(oct, i);
}
@@ -1124,8 +1229,8 @@ static void octeon_destroy_resources(struct octeon_device *oct)
/* fallthrough */
case OCT_DEV_INSTR_QUEUE_INIT_DONE:
- for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
- if (!(oct->io_qmask.iq & (1UL << i)))
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
+ if (!(oct->io_qmask.iq & (1ULL << i)))
continue;
octeon_delete_instr_queue(oct, i);
}
@@ -1137,14 +1242,21 @@ static void octeon_destroy_resources(struct octeon_device *oct)
/* fallthrough */
case OCT_DEV_PCI_MAP_DONE:
+
+ /* Soft reset the octeon device before exiting */
+ oct->fn_list.soft_reset(oct);
+
octeon_unmap_pci_barx(oct, 0);
octeon_unmap_pci_barx(oct, 1);
/* fallthrough */
case OCT_DEV_BEGIN_STATE:
+ /* Disable the device, releasing the PCI INT */
+ pci_disable_device(oct->pci_dev);
+
/* Nothing to be done here either */
break;
- } /* end switch(oct->status) */
+ } /* end switch (oct->status) */
tasklet_kill(&oct_priv->droq_tasklet);
}
@@ -1157,18 +1269,15 @@ static void octeon_destroy_resources(struct octeon_device *oct)
static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
{
struct octnic_ctrl_pkt nctrl;
- struct octnic_ctrl_params nparams;
memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
nctrl.ncmd.s.cmd = OCTNET_CMD_RX_CTL;
- nctrl.ncmd.s.param1 = lio->linfo.ifidx;
- nctrl.ncmd.s.param2 = start_stop;
+ nctrl.ncmd.s.param1 = start_stop;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.netpndev = (u64)lio->netdev;
- nparams.resp_order = OCTEON_RESP_NORESPONSE;
-
- if (octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams) < 0)
+ if (octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl) < 0)
netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
}
@@ -1184,6 +1293,7 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
{
struct net_device *netdev = oct->props[ifidx].netdev;
struct lio *lio;
+ struct napi_struct *napi, *n;
if (!netdev) {
dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
@@ -1200,13 +1310,22 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
txqs_stop(netdev);
+ if (oct->props[lio->ifidx].napi_enabled == 1) {
+ list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
+ napi_disable(napi);
+
+ oct->props[lio->ifidx].napi_enabled = 0;
+ }
+
if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
unregister_netdev(netdev);
- delete_glist(lio);
+ delete_glists(lio);
free_netdev(netdev);
+ oct->props[ifidx].gmxport = -1;
+
oct->props[ifidx].netdev = NULL;
}
@@ -1225,10 +1344,15 @@ static int liquidio_stop_nic_module(struct octeon_device *oct)
return 1;
}
+ spin_lock_bh(&oct->cmd_resp_wqlock);
+ oct->cmd_resp_state = OCT_DRV_OFFLINE;
+ spin_unlock_bh(&oct->cmd_resp_wqlock);
+
for (i = 0; i < oct->ifcount; i++) {
lio = GET_LIO(oct->props[i].netdev);
for (j = 0; j < lio->linfo.num_rxpciq; j++)
- octeon_unregister_droq_ops(oct, lio->linfo.rxpciq[j]);
+ octeon_unregister_droq_ops(oct,
+ lio->linfo.rxpciq[j].s.q_no);
}
for (i = 0; i < oct->ifcount; i++)
@@ -1272,6 +1396,7 @@ static int octeon_chip_specific_setup(struct octeon_device *oct)
{
u32 dev_id, rev_id;
int ret = 1;
+ char *s;
pci_read_config_dword(oct->pci_dev, 0, &dev_id);
pci_read_config_dword(oct->pci_dev, 8, &rev_id);
@@ -1281,22 +1406,27 @@ static int octeon_chip_specific_setup(struct octeon_device *oct)
case OCTEON_CN68XX_PCIID:
oct->chip_id = OCTEON_CN68XX;
ret = lio_setup_cn68xx_octeon_device(oct);
+ s = "CN68XX";
break;
case OCTEON_CN66XX_PCIID:
oct->chip_id = OCTEON_CN66XX;
ret = lio_setup_cn66xx_octeon_device(oct);
+ s = "CN66XX";
break;
+
default:
+ s = "?";
dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
dev_id);
}
if (!ret)
- dev_info(&oct->pci_dev->dev, "CN68XX PASS%d.%d %s\n",
+ dev_info(&oct->pci_dev->dev, "%s PASS%d.%d %s Version: %s\n", s,
OCTEON_MAJOR_REV(oct),
OCTEON_MINOR_REV(oct),
- octeon_get_conf(oct)->card_name);
+ octeon_get_conf(oct)->card_name,
+ LIQUIDIO_VERSION);
return ret;
}
@@ -1324,6 +1454,16 @@ static int octeon_pci_os_setup(struct octeon_device *oct)
return 0;
}
+static inline int skb_iq(struct lio *lio, struct sk_buff *skb)
+{
+ int q = 0;
+
+ if (netif_is_multiqueue(lio->netdev))
+ q = skb->queue_mapping % lio->linfo.num_txpciq;
+
+ return q;
+}
+
/**
* \brief Check Tx queue state for a given network buffer
* @param lio per-network private data
@@ -1335,14 +1475,19 @@ static inline int check_txq_state(struct lio *lio, struct sk_buff *skb)
if (netif_is_multiqueue(lio->netdev)) {
q = skb->queue_mapping;
- iq = lio->linfo.txpciq[(q & (lio->linfo.num_txpciq - 1))];
+ iq = lio->linfo.txpciq[(q % (lio->linfo.num_txpciq))].s.q_no;
} else {
iq = lio->txq;
+ q = iq;
}
if (octnet_iq_is_full(lio->oct_dev, iq))
return 0;
- wake_q(lio->netdev, q);
+
+ if (__netif_subqueue_stopped(lio->netdev, q)) {
+ INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1);
+ wake_q(lio->netdev, q);
+ }
return 1;
}
@@ -1365,7 +1510,7 @@ static void free_netbuf(void *buf)
check_txq_state(lio, skb);
- recv_buffer_free((struct sk_buff *)skb);
+ tx_buffer_free(skb);
}
/**
@@ -1378,7 +1523,7 @@ static void free_netsgbuf(void *buf)
struct sk_buff *skb;
struct lio *lio;
struct octnic_gather *g;
- int i, frags;
+ int i, frags, iq;
finfo = (struct octnet_buf_free_info *)buf;
skb = finfo->skb;
@@ -1400,17 +1545,17 @@ static void free_netsgbuf(void *buf)
i++;
}
- dma_unmap_single(&lio->oct_dev->pci_dev->dev,
- finfo->dptr, g->sg_size,
- DMA_TO_DEVICE);
+ dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,
+ g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);
- spin_lock(&lio->lock);
- list_add_tail(&g->list, &lio->glist);
- spin_unlock(&lio->lock);
+ iq = skb_iq(lio, skb);
+ spin_lock(&lio->glist_lock[iq]);
+ list_add_tail(&g->list, &lio->glist[iq]);
+ spin_unlock(&lio->glist_lock[iq]);
check_txq_state(lio, skb); /* mq support: sub-queue state check */
- recv_buffer_free((struct sk_buff *)skb);
+ tx_buffer_free(skb);
}
/**
@@ -1424,7 +1569,7 @@ static void free_netsgbuf_with_resp(void *buf)
struct sk_buff *skb;
struct lio *lio;
struct octnic_gather *g;
- int i, frags;
+ int i, frags, iq;
sc = (struct octeon_soft_command *)buf;
skb = (struct sk_buff *)sc->callback_arg;
@@ -1448,13 +1593,14 @@ static void free_netsgbuf_with_resp(void *buf)
i++;
}
- dma_unmap_single(&lio->oct_dev->pci_dev->dev,
- finfo->dptr, g->sg_size,
- DMA_TO_DEVICE);
+ dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,
+ g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);
+
+ iq = skb_iq(lio, skb);
- spin_lock(&lio->lock);
- list_add_tail(&g->list, &lio->glist);
- spin_unlock(&lio->lock);
+ spin_lock(&lio->glist_lock[iq]);
+ list_add_tail(&g->list, &lio->glist[iq]);
+ spin_unlock(&lio->glist_lock[iq]);
/* Don't free the skb yet */
@@ -1567,8 +1713,10 @@ static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
* @param rq request
* @param on is it on
*/
-static int liquidio_ptp_enable(struct ptp_clock_info *ptp,
- struct ptp_clock_request *rq, int on)
+static int
+liquidio_ptp_enable(struct ptp_clock_info *ptp __attribute__((unused)),
+ struct ptp_clock_request *rq __attribute__((unused)),
+ int on __attribute__((unused)))
{
return -EOPNOTSUPP;
}
@@ -1655,6 +1803,7 @@ static int load_firmware(struct octeon_device *oct)
if (ret) {
dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n.",
fw_name);
+ release_firmware(fw);
return ret;
}
@@ -1708,7 +1857,7 @@ static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
* @param buf pointer to resp structure
*/
static void if_cfg_callback(struct octeon_device *oct,
- u32 status,
+ u32 status __attribute__((unused)),
void *buf)
{
struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
@@ -1722,7 +1871,10 @@ static void if_cfg_callback(struct octeon_device *oct,
if (resp->status)
dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n",
CVM_CAST64(resp->status));
- ACCESS_ONCE(ctx->cond) = 1;
+ WRITE_ONCE(ctx->cond, 1);
+
+ snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
+ resp->cfg_info.liquidio_firmware_version);
/* This barrier is required to be sure that the response has been
* written fully before waking up the handler
@@ -1739,16 +1891,16 @@ static void if_cfg_callback(struct octeon_device *oct,
* @returns selected queue number
*/
static u16 select_q(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ void *accel_priv __attribute__((unused)),
+ select_queue_fallback_t fallback __attribute__((unused)))
{
- int qindex;
+ u32 qindex = 0;
struct lio *lio;
lio = GET_LIO(dev);
- /* select queue on chosen queue_mapping or core */
- qindex = skb_rx_queue_recorded(skb) ?
- skb_get_rx_queue(skb) : smp_processor_id();
- return (u16)(qindex & (lio->linfo.num_txpciq - 1));
+ qindex = skb_tx_hash(dev, skb);
+
+ return (u16)(qindex % (lio->linfo.num_txpciq));
}
/** Routine to push packets arriving on Octeon interface upto network layer.
@@ -1757,26 +1909,28 @@ static u16 select_q(struct net_device *dev, struct sk_buff *skb,
* @param len - size of total data received.
* @param rh - Control header associated with the packet
* @param param - additional control data with the packet
+ * @param arg - farg registered in droq_ops
*/
static void
-liquidio_push_packet(u32 octeon_id,
+liquidio_push_packet(u32 octeon_id __attribute__((unused)),
void *skbuff,
u32 len,
union octeon_rh *rh,
- void *param)
+ void *param,
+ void *arg)
{
struct napi_struct *napi = param;
- struct octeon_device *oct = lio_get_device(octeon_id);
struct sk_buff *skb = (struct sk_buff *)skbuff;
struct skb_shared_hwtstamps *shhwtstamps;
u64 ns;
- struct net_device *netdev =
- (struct net_device *)oct->props[rh->r_dh.link].netdev;
+ u16 vtag = 0;
+ struct net_device *netdev = (struct net_device *)arg;
struct octeon_droq *droq = container_of(param, struct octeon_droq,
napi);
if (netdev) {
int packet_was_received;
struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
/* Do not proceed if the interface is not in RUNNING state. */
if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
@@ -1787,32 +1941,86 @@ liquidio_push_packet(u32 octeon_id,
skb->dev = netdev;
- if (rh->r_dh.has_hwtstamp) {
- /* timestamp is included from the hardware at the
- * beginning of the packet.
- */
- if (ifstate_check(lio,
- LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) {
- /* Nanoseconds are in the first 64-bits
- * of the packet.
+ skb_record_rx_queue(skb, droq->q_no);
+ if (likely(len > MIN_SKB_SIZE)) {
+ struct octeon_skb_page_info *pg_info;
+ unsigned char *va;
+
+ pg_info = ((struct octeon_skb_page_info *)(skb->cb));
+ if (pg_info->page) {
+ /* For Paged allocation use the frags */
+ va = page_address(pg_info->page) +
+ pg_info->page_offset;
+ memcpy(skb->data, va, MIN_SKB_SIZE);
+ skb_put(skb, MIN_SKB_SIZE);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ pg_info->page,
+ pg_info->page_offset +
+ MIN_SKB_SIZE,
+ len - MIN_SKB_SIZE,
+ LIO_RXBUFFER_SZ);
+ }
+ } else {
+ struct octeon_skb_page_info *pg_info =
+ ((struct octeon_skb_page_info *)(skb->cb));
+ skb_copy_to_linear_data(skb, page_address(pg_info->page)
+ + pg_info->page_offset, len);
+ skb_put(skb, len);
+ put_page(pg_info->page);
+ }
+
+ if (((oct->chip_id == OCTEON_CN66XX) ||
+ (oct->chip_id == OCTEON_CN68XX)) &&
+ ptp_enable) {
+ if (rh->r_dh.has_hwtstamp) {
+ /* timestamp is included from the hardware at
+ * the beginning of the packet.
*/
- memcpy(&ns, (skb->data), sizeof(ns));
- shhwtstamps = skb_hwtstamps(skb);
- shhwtstamps->hwtstamp =
- ns_to_ktime(ns + lio->ptp_adjust);
+ if (ifstate_check
+ (lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) {
+ /* Nanoseconds are in the first 64-bits
+ * of the packet.
+ */
+ memcpy(&ns, (skb->data), sizeof(ns));
+ shhwtstamps = skb_hwtstamps(skb);
+ shhwtstamps->hwtstamp =
+ ns_to_ktime(ns +
+ lio->ptp_adjust);
+ }
+ skb_pull(skb, sizeof(ns));
}
- skb_pull(skb, sizeof(ns));
}
skb->protocol = eth_type_trans(skb, skb->dev);
-
if ((netdev->features & NETIF_F_RXCSUM) &&
- (rh->r_dh.csum_verified == CNNIC_CSUM_VERIFIED))
+ (((rh->r_dh.encap_on) &&
+ (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) ||
+ (!(rh->r_dh.encap_on) &&
+ (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED))))
/* checksum has already been verified */
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb->ip_summed = CHECKSUM_NONE;
+ /* Setting Encapsulation field on basis of status received
+ * from the firmware
+ */
+ if (rh->r_dh.encap_on) {
+ skb->encapsulation = 1;
+ skb->csum_level = 1;
+ droq->stats.rx_vxlan++;
+ }
+
+ /* inbound VLAN tag */
+ if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ (rh->r_dh.vlan != 0)) {
+ u16 vid = rh->r_dh.vlan;
+ u16 priority = rh->r_dh.priority;
+
+ vtag = priority << 13 | vid;
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
+ }
+
packet_was_received = napi_gro_receive(napi, skb) != GRO_DROP;
if (packet_was_received) {
@@ -1867,39 +2075,6 @@ static void liquidio_napi_drv_callback(void *arg)
}
/**
- * \brief Main NAPI poll function
- * @param droq octeon output queue
- * @param budget maximum number of items to process
- */
-static int liquidio_napi_do_rx(struct octeon_droq *droq, int budget)
-{
- int work_done;
- struct lio *lio = GET_LIO(droq->napi.dev);
- struct octeon_device *oct = lio->oct_dev;
-
- work_done = octeon_process_droq_poll_cmd(oct, droq->q_no,
- POLL_EVENT_PROCESS_PKTS,
- budget);
- if (work_done < 0) {
- netif_info(lio, rx_err, lio->netdev,
- "Receive work_done < 0, rxq:%d\n", droq->q_no);
- goto octnet_napi_finish;
- }
-
- if (work_done > budget)
- dev_err(&oct->pci_dev->dev, ">>>> %s work_done: %d budget: %d\n",
- __func__, work_done, budget);
-
- return work_done;
-
-octnet_napi_finish:
- napi_complete(&droq->napi);
- octeon_process_droq_poll_cmd(oct, droq->q_no, POLL_EVENT_ENABLE_INTR,
- 0);
- return 0;
-}
-
-/**
* \brief Entry point for NAPI polling
* @param napi NAPI structure
* @param budget maximum number of items to process
@@ -1908,35 +2083,57 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget)
{
struct octeon_droq *droq;
int work_done;
+ int tx_done = 0, iq_no;
+ struct octeon_instr_queue *iq;
+ struct octeon_device *oct;
droq = container_of(napi, struct octeon_droq, napi);
+ oct = droq->oct_dev;
+ iq_no = droq->q_no;
+ /* Handle Droq descriptors */
+ work_done = octeon_process_droq_poll_cmd(oct, droq->q_no,
+ POLL_EVENT_PROCESS_PKTS,
+ budget);
- work_done = liquidio_napi_do_rx(droq, budget);
+ /* Flush the instruction queue */
+ iq = oct->instr_queue[iq_no];
+ if (iq) {
+ /* Process iq buffers with in the budget limits */
+ tx_done = octeon_flush_iq(oct, iq, 1, budget);
+ /* Update iq read-index rather than waiting for next interrupt.
+ * Return back if tx_done is false.
+ */
+ update_txq_status(oct, iq_no);
+ /*tx_done = (iq->flush_index == iq->octeon_read_index);*/
+ } else {
+ dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n",
+ __func__, iq_no);
+ }
- if (work_done < budget) {
+ if ((work_done < budget) && (tx_done)) {
napi_complete(napi);
octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no,
POLL_EVENT_ENABLE_INTR, 0);
return 0;
}
- return work_done;
+ return (!tx_done) ? (budget) : (work_done);
}
/**
* \brief Setup input and output queues
* @param octeon_dev octeon device
- * @param net_device Net device
+ * @param ifidx Interface Index
*
* Note: Queues are with respect to the octeon device. Thus
* an input queue is for egress packets, and output queues
* are for ingress packets.
*/
static inline int setup_io_queues(struct octeon_device *octeon_dev,
- struct net_device *net_device)
+ int ifidx)
{
- static int first_time = 1;
- static struct octeon_droq_ops droq_ops;
+ struct octeon_droq_ops droq_ops;
+ struct net_device *netdev;
static int cpu_id;
static int cpu_id_modulus;
struct octeon_droq *droq;
@@ -1945,23 +2142,26 @@ static inline int setup_io_queues(struct octeon_device *octeon_dev,
struct lio *lio;
int num_tx_descs;
- lio = GET_LIO(net_device);
- if (first_time) {
- first_time = 0;
- memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
+ netdev = octeon_dev->props[ifidx].netdev;
- droq_ops.fptr = liquidio_push_packet;
+ lio = GET_LIO(netdev);
- droq_ops.poll_mode = 1;
- droq_ops.napi_fn = liquidio_napi_drv_callback;
- cpu_id = 0;
- cpu_id_modulus = num_present_cpus();
- }
+ memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
+
+ droq_ops.fptr = liquidio_push_packet;
+ droq_ops.farg = (void *)netdev;
+
+ droq_ops.poll_mode = 1;
+ droq_ops.napi_fn = liquidio_napi_drv_callback;
+ cpu_id = 0;
+ cpu_id_modulus = num_present_cpus();
/* set up DROQs. */
for (q = 0; q < lio->linfo.num_rxpciq; q++) {
- q_no = lio->linfo.rxpciq[q];
-
+ q_no = lio->linfo.rxpciq[q].s.q_no;
+ dev_dbg(&octeon_dev->pci_dev->dev,
+ "setup_io_queues index:%d linfo.rxpciq.s.q_no:%d\n",
+ q, q_no);
retval = octeon_setup_droq(octeon_dev, q_no,
CFG_GET_NUM_RX_DESCS_NIC_IF
(octeon_get_conf(octeon_dev),
@@ -1978,7 +2178,11 @@ static inline int setup_io_queues(struct octeon_device *octeon_dev,
droq = octeon_dev->droq[q_no];
napi = &droq->napi;
- netif_napi_add(net_device, napi, liquidio_napi_poll, 64);
+ dev_dbg(&octeon_dev->pci_dev->dev,
+ "netif_napi_add netdev:%llx oct:%llx\n",
+ (u64)netdev,
+ (u64)octeon_dev);
+ netif_napi_add(netdev, napi, liquidio_napi_poll, 64);
/* designate a CPU for this droq */
droq->cpu_id = cpu_id;
@@ -1994,9 +2198,9 @@ static inline int setup_io_queues(struct octeon_device *octeon_dev,
num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(octeon_get_conf
(octeon_dev),
lio->ifidx);
- retval = octeon_setup_iq(octeon_dev, lio->linfo.txpciq[q],
- num_tx_descs,
- netdev_get_tx_queue(net_device, q));
+ retval = octeon_setup_iq(octeon_dev, ifidx, q,
+ lio->linfo.txpciq[q], num_tx_descs,
+ netdev_get_tx_queue(netdev, q));
if (retval) {
dev_err(&octeon_dev->pci_dev->dev,
" %s : Runtime IQ(TxQ) creation failed.\n",
@@ -2034,7 +2238,8 @@ static inline void setup_tx_poll_fn(struct net_device *netdev)
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
- lio->txq_status_wq.wq = create_workqueue("txq-status");
+ lio->txq_status_wq.wq = alloc_workqueue("txq-status",
+ WQ_MEM_RECLAIM, 0);
if (!lio->txq_status_wq.wq) {
dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
return;
@@ -2046,6 +2251,14 @@ static inline void setup_tx_poll_fn(struct net_device *netdev)
&lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
}
+static inline void cleanup_tx_poll_fn(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
+ destroy_workqueue(lio->txq_status_wq.wq);
+}
+
/**
* \brief Net device open for LiquidIO
* @param netdev network device
@@ -2056,17 +2269,22 @@ static int liquidio_open(struct net_device *netdev)
struct octeon_device *oct = lio->oct_dev;
struct napi_struct *napi, *n;
- list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
- napi_enable(napi);
+ if (oct->props[lio->ifidx].napi_enabled == 0) {
+ list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
+ napi_enable(napi);
+
+ oct->props[lio->ifidx].napi_enabled = 1;
+ }
oct_ptp_open(netdev);
ifstate_set(lio, LIO_IFSTATE_RUNNING);
+
setup_tx_poll_fn(netdev);
+
start_txq(netdev);
netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
- try_module_get(THIS_MODULE);
/* tell Octeon to start forwarding packets to host */
send_rx_ctrl_cmd(lio, 1);
@@ -2086,41 +2304,36 @@ static int liquidio_open(struct net_device *netdev)
*/
static int liquidio_stop(struct net_device *netdev)
{
- struct napi_struct *napi, *n;
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
- netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n");
+ ifstate_reset(lio, LIO_IFSTATE_RUNNING);
+
+ netif_tx_disable(netdev);
+
/* Inform that netif carrier is down */
+ netif_carrier_off(netdev);
lio->intf_open = 0;
- lio->linfo.link.s.status = 0;
+ lio->linfo.link.s.link_up = 0;
+ lio->link_changes++;
- netif_carrier_off(netdev);
+ /* Pause for a moment and wait for Octeon to flush out (to the wire) any
+ * egress packets that are in-flight.
+ */
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(msecs_to_jiffies(100));
- /* tell Octeon to stop forwarding packets to host */
+ /* Now it should be safe to tell Octeon that nic interface is down. */
send_rx_ctrl_cmd(lio, 0);
- cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
- flush_workqueue(lio->txq_status_wq.wq);
- destroy_workqueue(lio->txq_status_wq.wq);
+ cleanup_tx_poll_fn(netdev);
if (lio->ptp_clock) {
ptp_clock_unregister(lio->ptp_clock);
lio->ptp_clock = NULL;
}
- ifstate_reset(lio, LIO_IFSTATE_RUNNING);
-
- /* This is a hack that allows DHCP to continue working. */
- set_bit(__LINK_STATE_START, &lio->netdev->state);
-
- list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
- napi_disable(napi);
-
- txqs_stop(netdev);
-
dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
- module_put(THIS_MODULE);
return 0;
}
@@ -2131,6 +2344,7 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
struct net_device *netdev = (struct net_device *)nctrl->netpndev;
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
+ u8 *mac;
switch (nctrl->ncmd.s.cmd) {
case OCTNET_CMD_CHANGE_DEVFLAGS:
@@ -2138,22 +2352,24 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
break;
case OCTNET_CMD_CHANGE_MACADDR:
- /* If command is successful, change the MACADDR. */
- netif_info(lio, probe, lio->netdev, " MACAddr changed to 0x%llx\n",
- CVM_CAST64(nctrl->udd[0]));
- dev_info(&oct->pci_dev->dev, "%s MACAddr changed to 0x%llx\n",
- netdev->name, CVM_CAST64(nctrl->udd[0]));
- memcpy(netdev->dev_addr, ((u8 *)&nctrl->udd[0]) + 2, ETH_ALEN);
+ mac = ((u8 *)&nctrl->udd[0]) + 2;
+ netif_info(lio, probe, lio->netdev,
+ "%s %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
+ "MACAddr changed to", mac[0], mac[1],
+ mac[2], mac[3], mac[4], mac[5]);
break;
case OCTNET_CMD_CHANGE_MTU:
/* If command is successful, change the MTU. */
netif_info(lio, probe, lio->netdev, " MTU Changed from %d to %d\n",
- netdev->mtu, nctrl->ncmd.s.param2);
+ netdev->mtu, nctrl->ncmd.s.param1);
dev_info(&oct->pci_dev->dev, "%s MTU Changed from %d to %d\n",
netdev->name, netdev->mtu,
- nctrl->ncmd.s.param2);
- netdev->mtu = nctrl->ncmd.s.param2;
+ nctrl->ncmd.s.param1);
+ rtnl_lock();
+ netdev->mtu = nctrl->ncmd.s.param1;
+ call_netdevice_notifiers(NETDEV_CHANGEMTU, netdev);
+ rtnl_unlock();
break;
case OCTNET_CMD_GPIO_ACCESS:
@@ -2179,11 +2395,79 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
netdev->name);
break;
+ case OCTNET_CMD_ENABLE_VLAN_FILTER:
+ dev_info(&oct->pci_dev->dev, "%s VLAN filter enabled\n",
+ netdev->name);
+ break;
+
+ case OCTNET_CMD_ADD_VLAN_FILTER:
+ dev_info(&oct->pci_dev->dev, "%s VLAN filter %d added\n",
+ netdev->name, nctrl->ncmd.s.param1);
+ break;
+
+ case OCTNET_CMD_DEL_VLAN_FILTER:
+ dev_info(&oct->pci_dev->dev, "%s VLAN filter %d removed\n",
+ netdev->name, nctrl->ncmd.s.param1);
+ break;
+
case OCTNET_CMD_SET_SETTINGS:
dev_info(&oct->pci_dev->dev, "%s settings changed\n",
netdev->name);
break;
+ /* Case to handle "OCTNET_CMD_TNL_RX_CSUM_CTL"
+ * Command passed by NIC driver
+ */
+ case OCTNET_CMD_TNL_RX_CSUM_CTL:
+ if (nctrl->ncmd.s.param1 == OCTNET_CMD_RXCSUM_ENABLE) {
+ netif_info(lio, probe, lio->netdev,
+ "%s RX Checksum Offload Enabled\n",
+ netdev->name);
+ } else if (nctrl->ncmd.s.param1 ==
+ OCTNET_CMD_RXCSUM_DISABLE) {
+ netif_info(lio, probe, lio->netdev,
+ "%s RX Checksum Offload Disabled\n",
+ netdev->name);
+ }
+ break;
+
+ /* Case to handle "OCTNET_CMD_TNL_TX_CSUM_CTL"
+ * Command passed by NIC driver
+ */
+ case OCTNET_CMD_TNL_TX_CSUM_CTL:
+ if (nctrl->ncmd.s.param1 == OCTNET_CMD_TXCSUM_ENABLE) {
+ netif_info(lio, probe, lio->netdev,
+ "%s TX Checksum Offload Enabled\n",
+ netdev->name);
+ } else if (nctrl->ncmd.s.param1 ==
+ OCTNET_CMD_TXCSUM_DISABLE) {
+ netif_info(lio, probe, lio->netdev,
+ "%s TX Checksum Offload Disabled\n",
+ netdev->name);
+ }
+ break;
+
+ /* Case to handle "OCTNET_CMD_VXLAN_PORT_CONFIG"
+ * Command passed by NIC driver
+ */
+ case OCTNET_CMD_VXLAN_PORT_CONFIG:
+ if (nctrl->ncmd.s.more == OCTNET_CMD_VXLAN_PORT_ADD) {
+ netif_info(lio, probe, lio->netdev,
+ "%s VxLAN Destination UDP PORT:%d ADDED\n",
+ netdev->name,
+ nctrl->ncmd.s.param1);
+ } else if (nctrl->ncmd.s.more ==
+ OCTNET_CMD_VXLAN_PORT_DEL) {
+ netif_info(lio, probe, lio->netdev,
+ "%s VxLAN Destination UDP PORT:%d DELETED\n",
+ netdev->name,
+ nctrl->ncmd.s.param1);
+ }
+ break;
+
+ case OCTNET_CMD_SET_FLOW_CTL:
+ netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n");
+ break;
default:
dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__,
@@ -2233,10 +2517,9 @@ static void liquidio_set_mcast_list(struct net_device *netdev)
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
struct octnic_ctrl_pkt nctrl;
- struct octnic_ctrl_params nparams;
struct netdev_hw_addr *ha;
u64 *mc;
- int ret, i;
+ int ret;
int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
@@ -2244,15 +2527,14 @@ static void liquidio_set_mcast_list(struct net_device *netdev)
/* Create a ctrl pkt command to be sent to core app. */
nctrl.ncmd.u64 = 0;
nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
- nctrl.ncmd.s.param1 = lio->linfo.ifidx;
- nctrl.ncmd.s.param2 = get_new_flags(netdev);
- nctrl.ncmd.s.param3 = mc_count;
+ nctrl.ncmd.s.param1 = get_new_flags(netdev);
+ nctrl.ncmd.s.param2 = mc_count;
nctrl.ncmd.s.more = mc_count;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.netpndev = (u64)netdev;
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
/* copy all the addresses into the udd */
- i = 0;
mc = &nctrl.udd[0];
netdev_for_each_mc_addr(ha, netdev) {
*mc = 0;
@@ -2268,9 +2550,7 @@ static void liquidio_set_mcast_list(struct net_device *netdev)
*/
nctrl.wait_time = 0;
- nparams.resp_order = OCTEON_RESP_NORESPONSE;
-
- ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
ret);
@@ -2288,19 +2568,17 @@ static int liquidio_set_mac(struct net_device *netdev, void *p)
struct octeon_device *oct = lio->oct_dev;
struct sockaddr *addr = (struct sockaddr *)p;
struct octnic_ctrl_pkt nctrl;
- struct octnic_ctrl_params nparams;
- if ((!is_valid_ether_addr(addr->sa_data)) ||
- (ifstate_check(lio, LIO_IFSTATE_RUNNING)))
+ if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
nctrl.ncmd.u64 = 0;
nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
- nctrl.ncmd.s.param1 = lio->linfo.ifidx;
- nctrl.ncmd.s.param2 = 0;
+ nctrl.ncmd.s.param1 = 0;
nctrl.ncmd.s.more = 1;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.netpndev = (u64)netdev;
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
nctrl.wait_time = 100;
@@ -2309,9 +2587,7 @@ static int liquidio_set_mac(struct net_device *netdev, void *p)
/* The MAC Address is presented in network byte order. */
memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN);
- nparams.resp_order = OCTEON_RESP_ORDERED;
-
- ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
return -ENOMEM;
@@ -2339,7 +2615,7 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
oct = lio->oct_dev;
for (i = 0; i < lio->linfo.num_txpciq; i++) {
- iq_no = lio->linfo.txpciq[i];
+ iq_no = lio->linfo.txpciq[i].s.q_no;
iq_stats = &oct->instr_queue[iq_no]->stats;
pkts += iq_stats->tx_done;
drop += iq_stats->tx_dropped;
@@ -2355,7 +2631,7 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
bytes = 0;
for (i = 0; i < lio->linfo.num_rxpciq; i++) {
- oq_no = lio->linfo.rxpciq[i];
+ oq_no = lio->linfo.rxpciq[i].s.q_no;
oq_stats = &oct->droq[oq_no]->stats;
pkts += oq_stats->rx_pkts_received;
drop += (oq_stats->rx_dropped +
@@ -2381,19 +2657,16 @@ static int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
struct octnic_ctrl_pkt nctrl;
- struct octnic_ctrl_params nparams;
- int max_frm_size = new_mtu + OCTNET_FRM_HEADER_SIZE;
int ret = 0;
- /* Limit the MTU to make sure the ethernet packets are between 64 bytes
- * and 65535 bytes
+ /* Limit the MTU to make sure the ethernet packets are between 68 bytes
+ * and 16000 bytes
*/
- if ((max_frm_size < OCTNET_MIN_FRM_SIZE) ||
- (max_frm_size > OCTNET_MAX_FRM_SIZE)) {
+ if ((new_mtu < LIO_MIN_MTU_SIZE) ||
+ (new_mtu > LIO_MAX_MTU_SIZE)) {
dev_err(&oct->pci_dev->dev, "Invalid MTU: %d\n", new_mtu);
dev_err(&oct->pci_dev->dev, "Valid range %d and %d\n",
- (OCTNET_MIN_FRM_SIZE - OCTNET_FRM_HEADER_SIZE),
- (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE));
+ LIO_MIN_MTU_SIZE, LIO_MAX_MTU_SIZE);
return -EINVAL;
}
@@ -2401,15 +2674,13 @@ static int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
nctrl.ncmd.u64 = 0;
nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MTU;
- nctrl.ncmd.s.param1 = lio->linfo.ifidx;
- nctrl.ncmd.s.param2 = new_mtu;
+ nctrl.ncmd.s.param1 = new_mtu;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.wait_time = 100;
nctrl.netpndev = (u64)netdev;
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
- nparams.resp_order = OCTEON_RESP_ORDERED;
-
- ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
dev_err(&oct->pci_dev->dev, "Failed to set MTU\n");
return -1;
@@ -2426,7 +2697,7 @@ static int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
* @param ifr interface request
* @param cmd command
*/
-static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
{
struct hwtstamp_config conf;
struct lio *lio = GET_LIO(netdev);
@@ -2487,7 +2758,7 @@ static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
case SIOCSHWTSTAMP:
- return hwtstamp_ioctl(netdev, ifr, cmd);
+ return hwtstamp_ioctl(netdev, ifr);
default:
return -EOPNOTSUPP;
}
@@ -2534,7 +2805,7 @@ static void handle_timestamp(struct octeon_device *oct,
}
octeon_free_soft_command(oct, sc);
- recv_buffer_free(skb);
+ tx_buffer_free(skb);
}
/* \brief Send a data packet that will be timestamped
@@ -2549,10 +2820,9 @@ static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
{
int retval;
struct octeon_soft_command *sc;
- struct octeon_instr_ih *ih;
- struct octeon_instr_rdp *rdp;
struct lio *lio;
int ring_doorbell;
+ u32 len;
lio = finfo->lio;
@@ -2574,14 +2844,13 @@ static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
sc->callback_arg = finfo->skb;
sc->iq_no = ndata->q_no;
- ih = (struct octeon_instr_ih *)&sc->cmd.ih;
- rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
+ len = (u32)((struct octeon_instr_ih2 *)(&sc->cmd.cmd2.ih2))->dlengsz;
ring_doorbell = !xmit_more;
retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
- sc, ih->dlengsz, ndata->reqtype);
+ sc, len, ndata->reqtype);
- if (retval) {
+ if (retval == IQ_SEND_FAILED) {
dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
retval);
octeon_free_soft_command(oct, sc);
@@ -2592,68 +2861,6 @@ static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
return retval;
}
-static inline int is_ipv4(struct sk_buff *skb)
-{
- return (skb->protocol == htons(ETH_P_IP)) &&
- (ip_hdr(skb)->version == 4);
-}
-
-static inline int is_vlan(struct sk_buff *skb)
-{
- return skb->protocol == htons(ETH_P_8021Q);
-}
-
-static inline int is_ip_fragmented(struct sk_buff *skb)
-{
- /* The Don't fragment and Reserved flag fields are ignored.
- * IP is fragmented if
- * - the More fragments bit is set (indicating this IP is a fragment
- * with more to follow; the current offset could be 0 ).
- * - ths offset field is non-zero.
- */
- return (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) ? 1 : 0;
-}
-
-static inline int is_ipv6(struct sk_buff *skb)
-{
- return (skb->protocol == htons(ETH_P_IPV6)) &&
- (ipv6_hdr(skb)->version == 6);
-}
-
-static inline int is_with_extn_hdr(struct sk_buff *skb)
-{
- return (ipv6_hdr(skb)->nexthdr != IPPROTO_TCP) &&
- (ipv6_hdr(skb)->nexthdr != IPPROTO_UDP);
-}
-
-static inline int is_tcpudp(struct sk_buff *skb)
-{
- return (ip_hdr(skb)->protocol == IPPROTO_TCP) ||
- (ip_hdr(skb)->protocol == IPPROTO_UDP);
-}
-
-static inline u32 get_ipv4_5tuple_tag(struct sk_buff *skb)
-{
- u32 tag;
- struct iphdr *iphdr = ip_hdr(skb);
-
- tag = crc32(0, &iphdr->protocol, 1);
- tag = crc32(tag, (u8 *)&iphdr->saddr, 8);
- tag = crc32(tag, skb_transport_header(skb), 4);
- return tag;
-}
-
-static inline u32 get_ipv6_5tuple_tag(struct sk_buff *skb)
-{
- u32 tag;
- struct ipv6hdr *ipv6hdr = ipv6_hdr(skb);
-
- tag = crc32(0, &ipv6hdr->nexthdr, 1);
- tag = crc32(tag, (u8 *)&ipv6hdr->saddr, 32);
- tag = crc32(tag, skb_transport_header(skb), 4);
- return tag;
-}
-
/** \brief Transmit networks packets to the Octeon interface
* @param skbuff skbuff struct to be passed to network layer.
* @param netdev pointer to network device
@@ -2668,18 +2875,22 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
struct octnic_data_pkt ndata;
struct octeon_device *oct;
struct oct_iq_stats *stats;
- int cpu = 0, status = 0;
+ struct octeon_instr_irh *irh;
+ union tx_info *tx_info;
+ int status = 0;
int q_idx = 0, iq_no = 0;
- int xmit_more;
+ int xmit_more, j;
+ u64 dptr = 0;
u32 tag = 0;
lio = GET_LIO(netdev);
oct = lio->oct_dev;
if (netif_is_multiqueue(netdev)) {
- cpu = skb->queue_mapping;
- q_idx = (cpu & (lio->linfo.num_txpciq - 1));
- iq_no = lio->linfo.txpciq[q_idx];
+ q_idx = skb->queue_mapping;
+ q_idx = (q_idx % (lio->linfo.num_txpciq));
+ tag = q_idx;
+ iq_no = lio->linfo.txpciq[q_idx].s.q_no;
} else {
iq_no = lio->txq;
}
@@ -2690,11 +2901,11 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
* transmitted.
*/
if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
- (!lio->linfo.link.s.status) ||
+ (!lio->linfo.link.s.link_up) ||
(skb->len <= 0)) {
netif_info(lio, tx_err, lio->netdev,
"Transmit failed link_status : %d\n",
- lio->linfo.link.s.status);
+ lio->linfo.link.s.link_up);
goto lio_xmit_failed;
}
@@ -2726,62 +2937,25 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
/* defer sending if queue is full */
stats->tx_iq_busy++;
netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
- ndata.q_no);
+ lio->txq);
return NETDEV_TX_BUSY;
}
}
/* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n",
- * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no );
+ * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
*/
ndata.datasize = skb->len;
cmdsetup.u64 = 0;
- cmdsetup.s.ifidx = lio->linfo.ifidx;
+ cmdsetup.s.iq_no = iq_no;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- if (is_ipv4(skb) && !is_ip_fragmented(skb) && is_tcpudp(skb)) {
- tag = get_ipv4_5tuple_tag(skb);
-
- cmdsetup.s.cksum_offset = sizeof(struct ethhdr) + 1;
-
- if (ip_hdr(skb)->ihl > 5)
- cmdsetup.s.ipv4opts_ipv6exthdr =
- OCT_PKT_PARAM_IPV4OPTS;
-
- } else if (is_ipv6(skb)) {
- tag = get_ipv6_5tuple_tag(skb);
-
- cmdsetup.s.cksum_offset = sizeof(struct ethhdr) + 1;
-
- if (is_with_extn_hdr(skb))
- cmdsetup.s.ipv4opts_ipv6exthdr =
- OCT_PKT_PARAM_IPV6EXTHDR;
-
- } else if (is_vlan(skb)) {
- if (vlan_eth_hdr(skb)->h_vlan_encapsulated_proto
- == htons(ETH_P_IP) &&
- !is_ip_fragmented(skb) && is_tcpudp(skb)) {
- tag = get_ipv4_5tuple_tag(skb);
-
- cmdsetup.s.cksum_offset =
- sizeof(struct vlan_ethhdr) + 1;
-
- if (ip_hdr(skb)->ihl > 5)
- cmdsetup.s.ipv4opts_ipv6exthdr =
- OCT_PKT_PARAM_IPV4OPTS;
-
- } else if (vlan_eth_hdr(skb)->h_vlan_encapsulated_proto
- == htons(ETH_P_IPV6)) {
- tag = get_ipv6_5tuple_tag(skb);
-
- cmdsetup.s.cksum_offset =
- sizeof(struct vlan_ethhdr) + 1;
-
- if (is_with_extn_hdr(skb))
- cmdsetup.s.ipv4opts_ipv6exthdr =
- OCT_PKT_PARAM_IPV6EXTHDR;
- }
+ if (skb->encapsulation) {
+ cmdsetup.s.tnl_csum = 1;
+ stats->tx_vxlan++;
+ } else {
+ cmdsetup.s.transport_csum = 1;
}
}
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
@@ -2791,20 +2965,21 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
if (skb_shinfo(skb)->nr_frags == 0) {
cmdsetup.s.u.datasize = skb->len;
- octnet_prepare_pci_cmd(&ndata.cmd, &cmdsetup, tag);
+ octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
+
/* Offload checksum calculation for TCP/UDP packets */
- ndata.cmd.dptr = dma_map_single(&oct->pci_dev->dev,
- skb->data,
- skb->len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(&oct->pci_dev->dev, ndata.cmd.dptr)) {
+ dptr = dma_map_single(&oct->pci_dev->dev,
+ skb->data,
+ skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
__func__);
return NETDEV_TX_BUSY;
}
- finfo->dptr = ndata.cmd.dptr;
-
+ ndata.cmd.cmd2.dptr = dptr;
+ finfo->dptr = dptr;
ndata.reqtype = REQTYPE_NORESP_NET;
} else {
@@ -2812,19 +2987,20 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
struct skb_frag_struct *frag;
struct octnic_gather *g;
- spin_lock(&lio->lock);
- g = (struct octnic_gather *)list_delete_head(&lio->glist);
- spin_unlock(&lio->lock);
+ spin_lock(&lio->glist_lock[q_idx]);
+ g = (struct octnic_gather *)
+ list_delete_head(&lio->glist[q_idx]);
+ spin_unlock(&lio->glist_lock[q_idx]);
if (!g) {
netif_info(lio, tx_err, lio->netdev,
"Transmit scatter gather: glist null!\n");
- goto lio_xmit_dma_failed;
+ goto lio_xmit_failed;
}
cmdsetup.s.gather = 1;
cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
- octnet_prepare_pci_cmd(&ndata.cmd, &cmdsetup, tag);
+ octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
memset(g->sg, 0, g->sg_size);
@@ -2851,36 +3027,52 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
frag->size,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&oct->pci_dev->dev,
+ g->sg[i >> 2].ptr[i & 3])) {
+ dma_unmap_single(&oct->pci_dev->dev,
+ g->sg[0].ptr[0],
+ skb->len - skb->data_len,
+ DMA_TO_DEVICE);
+ for (j = 1; j < i; j++) {
+ frag = &skb_shinfo(skb)->frags[j - 1];
+ dma_unmap_page(&oct->pci_dev->dev,
+ g->sg[j >> 2].ptr[j & 3],
+ frag->size,
+ DMA_TO_DEVICE);
+ }
+ dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
+ __func__);
+ return NETDEV_TX_BUSY;
+ }
+
add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
i++;
}
- ndata.cmd.dptr = dma_map_single(&oct->pci_dev->dev,
- g->sg, g->sg_size,
- DMA_TO_DEVICE);
- if (dma_mapping_error(&oct->pci_dev->dev, ndata.cmd.dptr)) {
- dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
- __func__);
- dma_unmap_single(&oct->pci_dev->dev, g->sg[0].ptr[0],
- skb->len - skb->data_len,
- DMA_TO_DEVICE);
- return NETDEV_TX_BUSY;
- }
+ dma_sync_single_for_device(&oct->pci_dev->dev, g->sg_dma_ptr,
+ g->sg_size, DMA_TO_DEVICE);
+ dptr = g->sg_dma_ptr;
- finfo->dptr = ndata.cmd.dptr;
+ ndata.cmd.cmd2.dptr = dptr;
+ finfo->dptr = dptr;
finfo->g = g;
ndata.reqtype = REQTYPE_NORESP_NET_SG;
}
- if (skb_shinfo(skb)->gso_size) {
- struct octeon_instr_irh *irh =
- (struct octeon_instr_irh *)&ndata.cmd.irh;
- union tx_info *tx_info = (union tx_info *)&ndata.cmd.ossp[0];
+ irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh;
+ tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0];
- irh->len = 1; /* to indicate that ossp[0] contains tx_info */
+ if (skb_shinfo(skb)->gso_size) {
tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
+ stats->tx_gso++;
+ }
+
+ /* HW insert VLAN tag */
+ if (skb_vlan_tag_present(skb)) {
+ irh->priority = skb_vlan_tag_get(skb) >> 13;
+ irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
}
xmit_more = skb->xmit_more;
@@ -2890,7 +3082,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
else
status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
if (status == IQ_SEND_FAILED)
- goto lio_xmit_dma_failed;
+ goto lio_xmit_failed;
netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
@@ -2899,19 +3091,22 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
netif_trans_update(netdev);
- stats->tx_done++;
+ if (skb_shinfo(skb)->gso_size)
+ stats->tx_done += skb_shinfo(skb)->gso_segs;
+ else
+ stats->tx_done++;
stats->tx_tot_bytes += skb->len;
return NETDEV_TX_OK;
-lio_xmit_dma_failed:
- dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr,
- ndata.datasize, DMA_TO_DEVICE);
lio_xmit_failed:
stats->tx_dropped++;
netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
iq_no, stats->tx_dropped);
- recv_buffer_free(skb);
+ if (dptr)
+ dma_unmap_single(&oct->pci_dev->dev, dptr,
+ ndata.datasize, DMA_TO_DEVICE);
+ tx_buffer_free(skb);
return NETDEV_TX_OK;
}
@@ -2931,27 +3126,145 @@ static void liquidio_tx_timeout(struct net_device *netdev)
txqs_wake(netdev);
}
-int liquidio_set_feature(struct net_device *netdev, int cmd)
+static int liquidio_vlan_rx_add_vid(struct net_device *netdev,
+ __be16 proto __attribute__((unused)),
+ u16 vid)
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
struct octnic_ctrl_pkt nctrl;
- struct octnic_ctrl_params nparams;
int ret = 0;
memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
nctrl.ncmd.u64 = 0;
- nctrl.ncmd.s.cmd = cmd;
- nctrl.ncmd.s.param1 = lio->linfo.ifidx;
- nctrl.ncmd.s.param2 = OCTNIC_LROIPV4 | OCTNIC_LROIPV6;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
+ nctrl.ncmd.s.param1 = vid;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.wait_time = 100;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
+ ret);
+ }
+
+ return ret;
+}
+
+static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
+ __be16 proto __attribute__((unused)),
+ u16 vid)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ int ret = 0;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
+ nctrl.ncmd.s.param1 = vid;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.wait_time = 100;
nctrl.netpndev = (u64)netdev;
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
- nparams.resp_order = OCTEON_RESP_NORESPONSE;
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
+ ret);
+ }
+ return ret;
+}
+
+/** Sending command to enable/disable RX checksum offload
+ * @param netdev pointer to network device
+ * @param command OCTNET_CMD_TNL_RX_CSUM_CTL
+ * @param rx_cmd_bit OCTNET_CMD_RXCSUM_ENABLE/
+ * OCTNET_CMD_RXCSUM_DISABLE
+ * @returns SUCCESS or FAILURE
+ */
+int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
+ u8 rx_cmd)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ int ret = 0;
- ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = command;
+ nctrl.ncmd.s.param1 = rx_cmd;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.wait_time = 100;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev,
+ "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
+ ret);
+ }
+ return ret;
+}
+
+/** Sending command to add/delete VxLAN UDP port to firmware
+ * @param netdev pointer to network device
+ * @param command OCTNET_CMD_VXLAN_PORT_CONFIG
+ * @param vxlan_port VxLAN port to be added or deleted
+ * @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD,
+ * OCTNET_CMD_VXLAN_PORT_DEL
+ * @returns SUCCESS or FAILURE
+ */
+static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
+ u16 vxlan_port, u8 vxlan_cmd_bit)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ int ret = 0;
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = command;
+ nctrl.ncmd.s.more = vxlan_cmd_bit;
+ nctrl.ncmd.s.param1 = vxlan_port;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.wait_time = 100;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev,
+ "VxLAN port add/delete failed in core (ret:0x%x)\n",
+ ret);
+ }
+ return ret;
+}
+
+int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ int ret = 0;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = cmd;
+ nctrl.ncmd.s.param1 = param1;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.wait_time = 100;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n",
ret);
@@ -3007,14 +3320,55 @@ static int liquidio_set_features(struct net_device *netdev,
return 0;
if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO))
- liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE);
+ liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
+ OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
else if (!(features & NETIF_F_LRO) &&
(lio->dev_capability & NETIF_F_LRO))
- liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE);
+ liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
+ OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
+
+ /* Sending command to firmware to enable/disable RX checksum
+ * offload settings using ethtool
+ */
+ if (!(netdev->features & NETIF_F_RXCSUM) &&
+ (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
+ (features & NETIF_F_RXCSUM))
+ liquidio_set_rxcsum_command(netdev,
+ OCTNET_CMD_TNL_RX_CSUM_CTL,
+ OCTNET_CMD_RXCSUM_ENABLE);
+ else if ((netdev->features & NETIF_F_RXCSUM) &&
+ (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
+ !(features & NETIF_F_RXCSUM))
+ liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
+ OCTNET_CMD_RXCSUM_DISABLE);
return 0;
}
+static void liquidio_add_vxlan_port(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
+{
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ return;
+
+ liquidio_vxlan_port_command(netdev,
+ OCTNET_CMD_VXLAN_PORT_CONFIG,
+ htons(ti->port),
+ OCTNET_CMD_VXLAN_PORT_ADD);
+}
+
+static void liquidio_del_vxlan_port(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
+{
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ return;
+
+ liquidio_vxlan_port_command(netdev,
+ OCTNET_CMD_VXLAN_PORT_CONFIG,
+ htons(ti->port),
+ OCTNET_CMD_VXLAN_PORT_DEL);
+}
+
static struct net_device_ops lionetdevops = {
.ndo_open = liquidio_open,
.ndo_stop = liquidio_stop,
@@ -3023,10 +3377,15 @@ static struct net_device_ops lionetdevops = {
.ndo_set_mac_address = liquidio_set_mac,
.ndo_set_rx_mode = liquidio_set_mcast_list,
.ndo_tx_timeout = liquidio_tx_timeout,
+
+ .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid,
.ndo_change_mtu = liquidio_change_mtu,
.ndo_do_ioctl = liquidio_ioctl,
.ndo_fix_features = liquidio_fix_features,
.ndo_set_features = liquidio_set_features,
+ .ndo_udp_tunnel_add = liquidio_add_vxlan_port,
+ .ndo_udp_tunnel_del = liquidio_del_vxlan_port,
};
/** \brief Entry point for the liquidio module
@@ -3081,24 +3440,27 @@ static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
{
struct octeon_device *oct = (struct octeon_device *)buf;
struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
- int ifidx = 0;
+ int gmxport = 0;
union oct_link_status *ls;
int i;
- if ((recv_pkt->buffer_size[0] != sizeof(*ls)) ||
- (recv_pkt->rh.r_nic_info.ifidx > oct->ifcount)) {
+ if (recv_pkt->buffer_size[0] != sizeof(*ls)) {
dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
recv_pkt->buffer_size[0],
- recv_pkt->rh.r_nic_info.ifidx);
+ recv_pkt->rh.r_nic_info.gmxport);
goto nic_info_err;
}
- ifidx = recv_pkt->rh.r_nic_info.ifidx;
+ gmxport = recv_pkt->rh.r_nic_info.gmxport;
ls = (union oct_link_status *)get_rbd(recv_pkt->buffer_ptr[0]);
octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
-
- update_link_status(oct->props[ifidx].netdev, ls);
+ for (i = 0; i < oct->ifcount; i++) {
+ if (oct->props[i].gmxport == gmxport) {
+ update_link_status(oct->props[i].netdev, ls);
+ break;
+ }
+ }
nic_info_err:
for (i = 0; i < recv_pkt->buffer_count; i++)
@@ -3124,13 +3486,12 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
struct liquidio_if_cfg_context *ctx;
struct liquidio_if_cfg_resp *resp;
struct octdev_props *props;
- int retval, num_iqueues, num_oqueues, q_no;
- u64 q_mask;
- int num_cpus = num_online_cpus();
+ int retval, num_iqueues, num_oqueues;
union oct_nic_if_cfg if_cfg;
unsigned int base_queue;
unsigned int gmx_port_id;
u32 resp_size, ctx_size;
+ u32 ifidx_or_pfnum;
/* This is to handle link status changes */
octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
@@ -3166,14 +3527,12 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
CFG_GET_BASE_QUE_NIC_IF(octeon_get_conf(octeon_dev), i);
gmx_port_id =
CFG_GET_GMXID_NIC_IF(octeon_get_conf(octeon_dev), i);
- if (num_iqueues > num_cpus)
- num_iqueues = num_cpus;
- if (num_oqueues > num_cpus)
- num_oqueues = num_cpus;
+ ifidx_or_pfnum = i;
+
dev_dbg(&octeon_dev->pci_dev->dev,
"requesting config for interface %d, iqs %d, oqs %d\n",
- i, num_iqueues, num_oqueues);
- ACCESS_ONCE(ctx->cond) = 0;
+ ifidx_or_pfnum, num_iqueues, num_oqueues);
+ WRITE_ONCE(ctx->cond, 0);
ctx->octeon_id = lio_get_device_id(octeon_dev);
init_waitqueue_head(&ctx->wc);
@@ -3182,16 +3541,19 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
if_cfg.s.num_oqueues = num_oqueues;
if_cfg.s.base_queue = base_queue;
if_cfg.s.gmx_port_id = gmx_port_id;
+
+ sc->iq_no = 0;
+
octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
- OPCODE_NIC_IF_CFG, i,
+ OPCODE_NIC_IF_CFG, 0,
if_cfg.u64, 0);
sc->callback = if_cfg_callback;
sc->callback_arg = sc;
- sc->wait_time = 1000;
+ sc->wait_time = 3000;
retval = octeon_send_soft_command(octeon_dev, sc);
- if (retval) {
+ if (retval == IQ_SEND_FAILED) {
dev_err(&octeon_dev->pci_dev->dev,
"iq/oq config failed status: %x\n",
retval);
@@ -3233,8 +3595,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
goto setup_nic_dev_fail;
}
- props = &octeon_dev->props[i];
- props->netdev = netdev;
+ SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
if (num_iqueues > 1)
lionetdevops.ndo_select_queue = select_q;
@@ -3248,23 +3609,21 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
memset(lio, 0, sizeof(struct lio));
- lio->linfo.ifidx = resp->cfg_info.ifidx;
- lio->ifidx = resp->cfg_info.ifidx;
+ lio->ifidx = ifidx_or_pfnum;
+
+ props = &octeon_dev->props[i];
+ props->gmxport = resp->cfg_info.linfo.gmxport;
+ props->netdev = netdev;
lio->linfo.num_rxpciq = num_oqueues;
lio->linfo.num_txpciq = num_iqueues;
- q_mask = resp->cfg_info.oqmask;
- /* q_mask is 0-based and already verified mask is nonzero */
for (j = 0; j < num_oqueues; j++) {
- q_no = __ffs64(q_mask);
- q_mask &= (~(1UL << q_no));
- lio->linfo.rxpciq[j] = q_no;
+ lio->linfo.rxpciq[j].u64 =
+ resp->cfg_info.linfo.rxpciq[j].u64;
}
- q_mask = resp->cfg_info.iqmask;
for (j = 0; j < num_iqueues; j++) {
- q_no = __ffs64(q_mask);
- q_mask &= (~(1UL << q_no));
- lio->linfo.txpciq[j] = q_no;
+ lio->linfo.txpciq[j].u64 =
+ resp->cfg_info.linfo.txpciq[j].u64;
}
lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
@@ -3273,16 +3632,41 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
lio->dev_capability = NETIF_F_HIGHDMA
- | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
- | NETIF_F_SG | NETIF_F_RXCSUM
- | NETIF_F_TSO | NETIF_F_TSO6
- | NETIF_F_LRO;
+ | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
+ | NETIF_F_SG | NETIF_F_RXCSUM
+ | NETIF_F_GRO
+ | NETIF_F_TSO | NETIF_F_TSO6
+ | NETIF_F_LRO;
netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
- netdev->features = lio->dev_capability;
+ /* Copy of transmit encapsulation capabilities:
+ * TSO, TSO6, Checksums for this device
+ */
+ lio->enc_dev_capability = NETIF_F_IP_CSUM
+ | NETIF_F_IPV6_CSUM
+ | NETIF_F_GSO_UDP_TUNNEL
+ | NETIF_F_HW_CSUM | NETIF_F_SG
+ | NETIF_F_RXCSUM
+ | NETIF_F_TSO | NETIF_F_TSO6
+ | NETIF_F_LRO;
+
+ netdev->hw_enc_features = (lio->enc_dev_capability &
+ ~NETIF_F_LRO);
+
+ lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL;
+
netdev->vlan_features = lio->dev_capability;
+ /* Add any unchangeable hw features */
+ lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX;
+
+ netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
netdev->hw_features = lio->dev_capability;
+ /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/
+ netdev->hw_features = netdev->hw_features &
+ ~NETIF_F_HW_VLAN_CTAG_RX;
/* Point to the properties for octeon device to which this
* interface belongs.
@@ -3290,7 +3674,6 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
lio->oct_dev = octeon_dev;
lio->octprops = props;
lio->netdev = netdev;
- spin_lock_init(&lio->lock);
dev_dbg(&octeon_dev->pci_dev->dev,
"if%d gmx: %d hw_addr: 0x%llx\n", i,
@@ -3305,23 +3688,22 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
ether_addr_copy(netdev->dev_addr, mac);
- if (setup_io_queues(octeon_dev, netdev)) {
+ /* By default all interfaces on a single Octeon uses the same
+ * tx and rx queues
+ */
+ lio->txq = lio->linfo.txpciq[0].s.q_no;
+ lio->rxq = lio->linfo.rxpciq[0].s.q_no;
+ if (setup_io_queues(octeon_dev, i)) {
dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
goto setup_nic_dev_fail;
}
ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
- /* By default all interfaces on a single Octeon uses the same
- * tx and rx queues
- */
- lio->txq = lio->linfo.txpciq[0];
- lio->rxq = lio->linfo.rxpciq[0];
-
lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
- if (setup_glist(lio)) {
+ if (setup_glists(octeon_dev, lio, num_iqueues)) {
dev_err(&octeon_dev->pci_dev->dev,
"Gather list allocation failed\n");
goto setup_nic_dev_fail;
@@ -3329,11 +3711,17 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
/* Register ethtool support */
liquidio_set_ethtool_ops(netdev);
+ octeon_dev->priv_flags = 0x0;
- liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE);
+ if (netdev->features & NETIF_F_LRO)
+ liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
+ OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
+
+ liquidio_set_feature(netdev, OCTNET_CMD_ENABLE_VLAN_FILTER, 0);
if ((debug != -1) && (debug & NETIF_MSG_HW))
- liquidio_set_feature(netdev, OCTNET_CMD_VERBOSE_ENABLE);
+ liquidio_set_feature(netdev,
+ OCTNET_CMD_VERBOSE_ENABLE, 0);
/* Register the network device with the OS */
if (register_netdev(netdev)) {
@@ -3345,16 +3733,19 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
"Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
netif_carrier_off(netdev);
-
- if (lio->linfo.link.s.status) {
- netif_carrier_on(netdev);
- start_txq(netdev);
- } else {
- netif_carrier_off(netdev);
- }
+ lio->link_changes++;
ifstate_set(lio, LIO_IFSTATE_REGISTERED);
+ /* Sending command to firmware to enable Rx checksum offload
+ * by default at the time of setup of Liquidio driver for
+ * this device
+ */
+ liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
+ OCTNET_CMD_RXCSUM_ENABLE);
+ liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
+ OCTNET_CMD_TXCSUM_ENABLE);
+
dev_dbg(&octeon_dev->pci_dev->dev,
"NIC ifidx:%d Setup successful\n", i);
@@ -3385,7 +3776,7 @@ setup_nic_dev_fail:
static int liquidio_init_nic_module(struct octeon_device *oct)
{
struct oct_intrmod_cfg *intrmod_cfg;
- int retval = 0;
+ int i, retval = 0;
int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct));
dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
@@ -3399,6 +3790,9 @@ static int liquidio_init_nic_module(struct octeon_device *oct)
memset(oct->props, 0,
sizeof(struct octdev_props) * num_nic_ports);
+ for (i = 0; i < MAX_OCTEON_LINKS; i++)
+ oct->props[i].gmxport = -1;
+
retval = setup_nic_devices(oct);
if (retval) {
dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
@@ -3409,15 +3803,19 @@ static int liquidio_init_nic_module(struct octeon_device *oct)
/* Initialize interrupt moderation params */
intrmod_cfg = &((struct octeon_device *)oct)->intrmod;
- intrmod_cfg->intrmod_enable = 1;
- intrmod_cfg->intrmod_check_intrvl = LIO_INTRMOD_CHECK_INTERVAL;
- intrmod_cfg->intrmod_maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR;
- intrmod_cfg->intrmod_minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR;
- intrmod_cfg->intrmod_maxcnt_trigger = LIO_INTRMOD_MAXCNT_TRIGGER;
- intrmod_cfg->intrmod_maxtmr_trigger = LIO_INTRMOD_MAXTMR_TRIGGER;
- intrmod_cfg->intrmod_mintmr_trigger = LIO_INTRMOD_MINTMR_TRIGGER;
- intrmod_cfg->intrmod_mincnt_trigger = LIO_INTRMOD_MINCNT_TRIGGER;
-
+ intrmod_cfg->rx_enable = 1;
+ intrmod_cfg->check_intrvl = LIO_INTRMOD_CHECK_INTERVAL;
+ intrmod_cfg->maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR;
+ intrmod_cfg->minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR;
+ intrmod_cfg->rx_maxcnt_trigger = LIO_INTRMOD_RXMAXCNT_TRIGGER;
+ intrmod_cfg->rx_maxtmr_trigger = LIO_INTRMOD_RXMAXTMR_TRIGGER;
+ intrmod_cfg->rx_mintmr_trigger = LIO_INTRMOD_RXMINTMR_TRIGGER;
+ intrmod_cfg->rx_mincnt_trigger = LIO_INTRMOD_RXMINCNT_TRIGGER;
+ intrmod_cfg->tx_enable = 1;
+ intrmod_cfg->tx_maxcnt_trigger = LIO_INTRMOD_TXMAXCNT_TRIGGER;
+ intrmod_cfg->tx_mincnt_trigger = LIO_INTRMOD_TXMINCNT_TRIGGER;
+ intrmod_cfg->rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
+ intrmod_cfg->rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
return retval;
@@ -3480,6 +3878,7 @@ static void nic_starter(struct work_struct *work)
static int octeon_device_init(struct octeon_device *octeon_dev)
{
int j, ret;
+ char bootcmd[] = "\n";
struct octeon_device_priv *oct_priv =
(struct octeon_device_priv *)octeon_dev->priv;
atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
@@ -3557,6 +3956,7 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
/* Release any previously allocated queues */
for (j = 0; j < octeon_dev->num_oqs; j++)
octeon_delete_droq(octeon_dev, j);
+ return 1;
}
atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
@@ -3579,7 +3979,8 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
/* Setup the interrupt handler and record the INT SUM register address
*/
- octeon_setup_interrupt(octeon_dev);
+ if (octeon_setup_interrupt(octeon_dev))
+ return 1;
/* Enable Octeon device interrupts */
octeon_dev->fn_list.enable_interrupt(octeon_dev->chip);
@@ -3591,14 +3992,19 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
- if (ddr_timeout == 0) {
- dev_info(&octeon_dev->pci_dev->dev,
- "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
- }
+ if (ddr_timeout == 0)
+ dev_info(&octeon_dev->pci_dev->dev, "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
/* Wait for the octeon to initialize DDR after the soft-reset. */
+ while (ddr_timeout == 0) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (schedule_timeout(HZ / 10)) {
+ /* user probably pressed Control-C */
+ return 1;
+ }
+ }
ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
if (ret) {
dev_err(&octeon_dev->pci_dev->dev,
@@ -3612,6 +4018,9 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
return 1;
}
+ /* Divert uboot to take commands from host instead. */
+ ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
+
dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
ret = octeon_init_consoles(octeon_dev);
if (ret) {
diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
index 0ac347ccc..199a8b9c7 100644
--- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
+++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
@@ -30,10 +30,10 @@
#include "octeon_config.h"
-#define LIQUIDIO_VERSION "1.1.9"
-#define LIQUIDIO_MAJOR_VERSION 1
-#define LIQUIDIO_MINOR_VERSION 1
-#define LIQUIDIO_MICRO_VERSION 9
+#define LIQUIDIO_BASE_VERSION "1.4"
+#define LIQUIDIO_MICRO_VERSION ".1"
+#define LIQUIDIO_PACKAGE ""
+#define LIQUIDIO_VERSION "1.4.1"
#define CONTROL_IQ 0
/** Tag types used by Octeon cores in its work. */
@@ -174,9 +174,11 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry,
/*------------------------- End Scatter/Gather ---------------------------*/
#define OCTNET_FRM_PTP_HEADER_SIZE 8
-#define OCTNET_FRM_HEADER_SIZE 30 /* PTP timestamp + VLAN + Ethernet */
-#define OCTNET_MIN_FRM_SIZE (64 + OCTNET_FRM_PTP_HEADER_SIZE)
+#define OCTNET_FRM_HEADER_SIZE 22 /* VLAN + Ethernet */
+
+#define OCTNET_MIN_FRM_SIZE 64
+
#define OCTNET_MAX_FRM_SIZE (16000 + OCTNET_FRM_HEADER_SIZE)
#define OCTNET_DEFAULT_FRM_SIZE (1500 + OCTNET_FRM_HEADER_SIZE)
@@ -212,6 +214,17 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry,
#define OCTNET_CMD_VERBOSE_ENABLE 0x14
#define OCTNET_CMD_VERBOSE_DISABLE 0x15
+#define OCTNET_CMD_ENABLE_VLAN_FILTER 0x16
+#define OCTNET_CMD_ADD_VLAN_FILTER 0x17
+#define OCTNET_CMD_DEL_VLAN_FILTER 0x18
+#define OCTNET_CMD_VXLAN_PORT_CONFIG 0x19
+#define OCTNET_CMD_VXLAN_PORT_ADD 0x0
+#define OCTNET_CMD_VXLAN_PORT_DEL 0x1
+#define OCTNET_CMD_RXCSUM_ENABLE 0x0
+#define OCTNET_CMD_RXCSUM_DISABLE 0x1
+#define OCTNET_CMD_TXCSUM_ENABLE 0x0
+#define OCTNET_CMD_TXCSUM_DISABLE 0x1
+
/* RX(packets coming from wire) Checksum verification flags */
/* TCP/UDP csum */
#define CNNIC_L4SUM_VERIFIED 0x1
@@ -258,19 +271,19 @@ union octnet_cmd {
u64 more:6; /* How many udd words follow the command */
- u64 param1:29;
+ u64 reserved:29;
- u64 param2:16;
+ u64 param1:16;
- u64 param3:8;
+ u64 param2:8;
#else
- u64 param3:8;
+ u64 param2:8;
- u64 param2:16;
+ u64 param1:16;
- u64 param1:29;
+ u64 reserved:29;
u64 more:6;
@@ -283,8 +296,140 @@ union octnet_cmd {
#define OCTNET_CMD_SIZE (sizeof(union octnet_cmd))
+/* Instruction Header(DPI) - for OCTEON-III models */
+struct octeon_instr_ih3 {
+#ifdef __BIG_ENDIAN_BITFIELD
+
+ /** Reserved3 */
+ u64 reserved3:1;
+
+ /** Gather indicator 1=gather*/
+ u64 gather:1;
+
+ /** Data length OR no. of entries in gather list */
+ u64 dlengsz:14;
+
+ /** Front Data size */
+ u64 fsz:6;
+
+ /** Reserved2 */
+ u64 reserved2:4;
+
+ /** PKI port kind - PKIND */
+ u64 pkind:6;
+
+ /** Reserved1 */
+ u64 reserved1:32;
+
+#else
+ /** Reserved1 */
+ u64 reserved1:32;
+
+ /** PKI port kind - PKIND */
+ u64 pkind:6;
+
+ /** Reserved2 */
+ u64 reserved2:4;
+
+ /** Front Data size */
+ u64 fsz:6;
+
+ /** Data length OR no. of entries in gather list */
+ u64 dlengsz:14;
+
+ /** Gather indicator 1=gather*/
+ u64 gather:1;
+
+ /** Reserved3 */
+ u64 reserved3:1;
+
+#endif
+};
+
+/* Optional PKI Instruction Header(PKI IH) - for OCTEON-III models */
+/** BIG ENDIAN format. */
+struct octeon_instr_pki_ih3 {
+#ifdef __BIG_ENDIAN_BITFIELD
+
+ /** Wider bit */
+ u64 w:1;
+
+ /** Raw mode indicator 1 = RAW */
+ u64 raw:1;
+
+ /** Use Tag */
+ u64 utag:1;
+
+ /** Use QPG */
+ u64 uqpg:1;
+
+ /** Reserved2 */
+ u64 reserved2:1;
+
+ /** Parse Mode */
+ u64 pm:3;
+
+ /** Skip Length */
+ u64 sl:8;
+
+ /** Use Tag Type */
+ u64 utt:1;
+
+ /** Tag type */
+ u64 tagtype:2;
+
+ /** Reserved1 */
+ u64 reserved1:2;
+
+ /** QPG Value */
+ u64 qpg:11;
+
+ /** Tag Value */
+ u64 tag:32;
+
+#else
+
+ /** Tag Value */
+ u64 tag:32;
+
+ /** QPG Value */
+ u64 qpg:11;
+
+ /** Reserved1 */
+ u64 reserved1:2;
+
+ /** Tag type */
+ u64 tagtype:2;
+
+ /** Use Tag Type */
+ u64 utt:1;
+
+ /** Skip Length */
+ u64 sl:8;
+
+ /** Parse Mode */
+ u64 pm:3;
+
+ /** Reserved2 */
+ u64 reserved2:1;
+
+ /** Use QPG */
+ u64 uqpg:1;
+
+ /** Use Tag */
+ u64 utag:1;
+
+ /** Raw mode indicator 1 = RAW */
+ u64 raw:1;
+
+ /** Wider bit */
+ u64 w:1;
+#endif
+
+};
+
/** Instruction Header */
-struct octeon_instr_ih {
+struct octeon_instr_ih2 {
#ifdef __BIG_ENDIAN_BITFIELD
/** Raw mode indicator 1 = RAW */
u64 raw:1;
@@ -348,15 +493,15 @@ struct octeon_instr_irh {
u64 opcode:4;
u64 rflag:1;
u64 subcode:7;
- u64 len:3;
- u64 rid:13;
- u64 reserved:4;
+ u64 vlan:12;
+ u64 priority:3;
+ u64 reserved:5;
u64 ossp:32; /* opcode/subcode specific parameters */
#else
u64 ossp:32; /* opcode/subcode specific parameters */
- u64 reserved:4;
- u64 rid:13;
- u64 len:3;
+ u64 reserved:5;
+ u64 priority:3;
+ u64 vlan:12;
u64 subcode:7;
u64 rflag:1;
u64 opcode:4;
@@ -383,75 +528,77 @@ union octeon_rh {
struct {
u64 opcode:4;
u64 subcode:8;
- u64 len:3; /** additional 64-bit words */
- u64 rid:13; /** request id in response to pkt sent by host */
- u64 reserved:4;
- u64 ossp:32; /** opcode/subcode specific parameters */
+ u64 len:3; /** additional 64-bit words */
+ u64 reserved:17;
+ u64 ossp:32; /** opcode/subcode specific parameters */
} r;
struct {
u64 opcode:4;
u64 subcode:8;
- u64 len:3; /** additional 64-bit words */
- u64 rid:13; /** request id in response to pkt sent by host */
- u64 extra:24;
- u64 link:8;
+ u64 len:3; /** additional 64-bit words */
+ u64 extra:28;
+ u64 vlan:12;
+ u64 priority:3;
u64 csum_verified:3; /** checksum verified. */
u64 has_hwtstamp:1; /** Has hardware timestamp. 1 = yes. */
+ u64 encap_on:1;
+ u64 has_hash:1; /** Has hash (rth or rss). 1 = yes. */
} r_dh;
struct {
u64 opcode:4;
u64 subcode:8;
- u64 len:3; /** additional 64-bit words */
- u64 rid:13; /** request id in response to pkt sent by host */
+ u64 len:3; /** additional 64-bit words */
+ u64 reserved:11;
u64 num_gmx_ports:8;
- u64 max_nic_ports:8;
+ u64 max_nic_ports:10;
u64 app_cap_flags:4;
- u64 app_mode:16;
+ u64 app_mode:8;
+ u64 pkind:8;
} r_core_drv_init;
struct {
u64 opcode:4;
u64 subcode:8;
u64 len:3; /** additional 64-bit words */
- u64 rid:13;
- u64 reserved:4;
+ u64 reserved:8;
u64 extra:25;
- u64 ifidx:7;
+ u64 gmxport:16;
} r_nic_info;
#else
u64 u64;
struct {
u64 ossp:32; /** opcode/subcode specific parameters */
- u64 reserved:4;
- u64 rid:13; /** req id in response to pkt sent by host */
+ u64 reserved:17;
u64 len:3; /** additional 64-bit words */
u64 subcode:8;
u64 opcode:4;
} r;
struct {
+ u64 has_hash:1; /** Has hash (rth or rss). 1 = yes. */
+ u64 encap_on:1;
u64 has_hwtstamp:1; /** 1 = has hwtstamp */
u64 csum_verified:3; /** checksum verified. */
- u64 link:8;
- u64 extra:24;
- u64 rid:13; /** req id in response to pkt sent by host */
+ u64 priority:3;
+ u64 vlan:12;
+ u64 extra:28;
u64 len:3; /** additional 64-bit words */
u64 subcode:8;
u64 opcode:4;
} r_dh;
struct {
- u64 app_mode:16;
+ u64 pkind:8;
+ u64 app_mode:8;
u64 app_cap_flags:4;
- u64 max_nic_ports:8;
+ u64 max_nic_ports:10;
u64 num_gmx_ports:8;
- u64 rid:13;
+ u64 reserved:11;
u64 len:3; /** additional 64-bit words */
u64 subcode:8;
u64 opcode:4;
} r_core_drv_init;
struct {
- u64 ifidx:7;
+ u64 gmxport:16;
u64 extra:25;
- u64 reserved:4;
- u64 rid:13;
+ u64 reserved:8;
u64 len:3; /** additional 64-bit words */
u64 subcode:8;
u64 opcode:4;
@@ -461,30 +608,25 @@ union octeon_rh {
#define OCT_RH_SIZE (sizeof(union octeon_rh))
-#define OCT_PKT_PARAM_IPV4OPTS 1
-#define OCT_PKT_PARAM_IPV6EXTHDR 2
-
union octnic_packet_params {
u32 u32;
struct {
#ifdef __BIG_ENDIAN_BITFIELD
- u32 reserved:6;
+ u32 reserved:24;
+ u32 ip_csum:1; /* Perform IP header checksum(s) */
+ /* Perform Outer transport header checksum */
+ u32 transport_csum:1;
+ /* Find tunnel, and perform transport csum. */
u32 tnl_csum:1;
- u32 ip_csum:1;
- u32 ipv4opts_ipv6exthdr:2;
- u32 ipsec_ops:4;
- u32 tsflag:1;
- u32 csoffset:9;
- u32 ifidx:8;
+ u32 tsflag:1; /* Timestamp this packet */
+ u32 ipsec_ops:4; /* IPsec operation */
#else
- u32 ifidx:8;
- u32 csoffset:9;
- u32 tsflag:1;
u32 ipsec_ops:4;
- u32 ipv4opts_ipv6exthdr:2;
- u32 ip_csum:1;
+ u32 tsflag:1;
u32 tnl_csum:1;
- u32 reserved:6;
+ u32 transport_csum:1;
+ u32 ip_csum:1;
+ u32 reserved:24;
#endif
} s;
};
@@ -496,56 +638,96 @@ union oct_link_status {
struct {
#ifdef __BIG_ENDIAN_BITFIELD
u64 duplex:8;
- u64 status:8;
u64 mtu:16;
u64 speed:16;
+ u64 link_up:1;
u64 autoneg:1;
- u64 interface:4;
+ u64 if_mode:5;
u64 pause:1;
- u64 reserved:10;
+ u64 flashing:1;
+ u64 reserved:15;
#else
- u64 reserved:10;
+ u64 reserved:15;
+ u64 flashing:1;
u64 pause:1;
- u64 interface:4;
+ u64 if_mode:5;
u64 autoneg:1;
+ u64 link_up:1;
u64 speed:16;
u64 mtu:16;
- u64 status:8;
u64 duplex:8;
#endif
} s;
};
+/** The txpciq info passed to host from the firmware */
+
+union oct_txpciq {
+ u64 u64;
+
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 q_no:8;
+ u64 port:8;
+ u64 pkind:6;
+ u64 use_qpg:1;
+ u64 qpg:11;
+ u64 reserved:30;
+#else
+ u64 reserved:30;
+ u64 qpg:11;
+ u64 use_qpg:1;
+ u64 pkind:6;
+ u64 port:8;
+ u64 q_no:8;
+#endif
+ } s;
+};
+
+/** The rxpciq info passed to host from the firmware */
+
+union oct_rxpciq {
+ u64 u64;
+
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 q_no:8;
+ u64 reserved:56;
+#else
+ u64 reserved:56;
+ u64 q_no:8;
+#endif
+ } s;
+};
+
/** Information for a OCTEON ethernet interface shared between core & host. */
struct oct_link_info {
union oct_link_status link;
u64 hw_addr;
#ifdef __BIG_ENDIAN_BITFIELD
- u16 gmxport;
- u8 rsvd[3];
- u8 num_txpciq;
- u8 num_rxpciq;
- u8 ifidx;
+ u64 gmxport:16;
+ u64 rsvd:32;
+ u64 num_txpciq:8;
+ u64 num_rxpciq:8;
#else
- u8 ifidx;
- u8 num_rxpciq;
- u8 num_txpciq;
- u8 rsvd[3];
- u16 gmxport;
+ u64 num_rxpciq:8;
+ u64 num_txpciq:8;
+ u64 rsvd:32;
+ u64 gmxport:16;
#endif
- u8 txpciq[MAX_IOQS_PER_NICIF];
- u8 rxpciq[MAX_IOQS_PER_NICIF];
+ union oct_txpciq txpciq[MAX_IOQS_PER_NICIF];
+ union oct_rxpciq rxpciq[MAX_IOQS_PER_NICIF];
};
#define OCT_LINK_INFO_SIZE (sizeof(struct oct_link_info))
struct liquidio_if_cfg_info {
- u64 ifidx;
u64 iqmask; /** mask for IQs enabled for the port */
u64 oqmask; /** mask for OQs enabled for the port */
struct oct_link_info linfo; /** initial link information */
+ char liquidio_firmware_version[32];
};
/** Stats for each NIC port in RX direction. */
@@ -570,10 +752,18 @@ struct nic_rx_stats {
u64 fw_err_pko;
u64 fw_err_link;
u64 fw_err_drop;
+ u64 fw_rx_vxlan;
+ u64 fw_rx_vxlan_err;
+
+ /* LRO */
u64 fw_lro_pkts; /* Number of packets that are LROed */
u64 fw_lro_octs; /* Number of octets that are LROed */
u64 fw_total_lro; /* Number of LRO packets formed */
u64 fw_lro_aborts; /* Number of times lRO of packet aborted */
+ u64 fw_lro_aborts_port;
+ u64 fw_lro_aborts_seq;
+ u64 fw_lro_aborts_tsval;
+ u64 fw_lro_aborts_timer;
/* intrmod: packet forward rate */
u64 fwd_rate;
};
@@ -597,9 +787,14 @@ struct nic_tx_stats {
/* firmware stats */
u64 fw_total_sent;
u64 fw_total_fwd;
+ u64 fw_total_fwd_bytes;
u64 fw_err_pko;
u64 fw_err_link;
u64 fw_err_drop;
+ u64 fw_err_tso;
+ u64 fw_tso; /* number of tso requests */
+ u64 fw_tso_fwd; /* number of packets segmented in tso */
+ u64 fw_tx_vxlan;
};
struct oct_link_stats {
@@ -630,23 +825,44 @@ struct oct_mdio_cmd {
#define OCT_LINK_STATS_SIZE (sizeof(struct oct_link_stats))
+/* intrmod: max. packet rate threshold */
+#define LIO_INTRMOD_MAXPKT_RATETHR 196608
+/* intrmod: min. packet rate threshold */
+#define LIO_INTRMOD_MINPKT_RATETHR 9216
+/* intrmod: max. packets to trigger interrupt */
+#define LIO_INTRMOD_RXMAXCNT_TRIGGER 384
+/* intrmod: min. packets to trigger interrupt */
+#define LIO_INTRMOD_RXMINCNT_TRIGGER 1
+/* intrmod: max. time to trigger interrupt */
+#define LIO_INTRMOD_RXMAXTMR_TRIGGER 128
+/* 66xx:intrmod: min. time to trigger interrupt
+ * (value of 1 is optimum for TCP_RR)
+ */
+#define LIO_INTRMOD_RXMINTMR_TRIGGER 1
+
+/* intrmod: max. packets to trigger interrupt */
+#define LIO_INTRMOD_TXMAXCNT_TRIGGER 64
+/* intrmod: min. packets to trigger interrupt */
+#define LIO_INTRMOD_TXMINCNT_TRIGGER 0
+
+/* intrmod: poll interval in seconds */
#define LIO_INTRMOD_CHECK_INTERVAL 1
-#define LIO_INTRMOD_MAXPKT_RATETHR 196608 /* max pkt rate threshold */
-#define LIO_INTRMOD_MINPKT_RATETHR 9216 /* min pkt rate threshold */
-#define LIO_INTRMOD_MAXCNT_TRIGGER 384 /* max pkts to trigger interrupt */
-#define LIO_INTRMOD_MINCNT_TRIGGER 1 /* min pkts to trigger interrupt */
-#define LIO_INTRMOD_MAXTMR_TRIGGER 128 /* max time to trigger interrupt */
-#define LIO_INTRMOD_MINTMR_TRIGGER 32 /* min time to trigger interrupt */
struct oct_intrmod_cfg {
- u64 intrmod_enable;
- u64 intrmod_check_intrvl;
- u64 intrmod_maxpkt_ratethr;
- u64 intrmod_minpkt_ratethr;
- u64 intrmod_maxcnt_trigger;
- u64 intrmod_maxtmr_trigger;
- u64 intrmod_mincnt_trigger;
- u64 intrmod_mintmr_trigger;
+ u64 rx_enable;
+ u64 tx_enable;
+ u64 check_intrvl;
+ u64 maxpkt_ratethr;
+ u64 minpkt_ratethr;
+ u64 rx_maxcnt_trigger;
+ u64 rx_mincnt_trigger;
+ u64 rx_maxtmr_trigger;
+ u64 rx_mintmr_trigger;
+ u64 tx_mincnt_trigger;
+ u64 tx_maxcnt_trigger;
+ u64 rx_frames;
+ u64 tx_frames;
+ u64 rx_usecs;
};
#define BASE_QUEUE_NOT_REQUESTED 65535
@@ -659,9 +875,9 @@ union oct_nic_if_cfg {
u64 num_iqueues:16;
u64 num_oqueues:16;
u64 gmx_port_id:8;
- u64 reserved:8;
+ u64 vf_id:8;
#else
- u64 reserved:8;
+ u64 vf_id:8;
u64 gmx_port_id:8;
u64 num_oqueues:16;
u64 num_iqueues:16;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
index 62a8dd5cd..b3396e3a8 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_config.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
@@ -37,7 +37,7 @@
/* Maximum octeon devices defined as MAX_OCTEON_NICIF to support
* multiple(<= MAX_OCTEON_NICIF) Miniports
*/
-#define MAX_OCTEON_NICIF 32
+#define MAX_OCTEON_NICIF 128
#define MAX_OCTEON_DEVICES MAX_OCTEON_NICIF
#define MAX_OCTEON_LINKS MAX_OCTEON_NICIF
#define MAX_OCTEON_MULTICAST_ADDR 32
@@ -135,7 +135,7 @@
#define CFG_GET_IS_SLI_BP_ON(cfg) ((cfg)->misc.enable_sli_oq_bp)
/* Max IOQs per OCTEON Link */
-#define MAX_IOQS_PER_NICIF 32
+#define MAX_IOQS_PER_NICIF 64
enum lio_card_type {
LIO_210SV = 0, /* Two port, 66xx */
@@ -226,7 +226,7 @@ struct octeon_oq_config {
*/
u64 refill_threshold:16;
- /** If set, the Output queue uses info-pointer mode. (Default: 1 ) */
+ /** If set, the Output queue uses info-pointer mode. (Default: 1) */
u64 info_ptr:32;
/* Max number of OQs available */
@@ -236,7 +236,7 @@ struct octeon_oq_config {
/* Max number of OQs available */
u64 max_oqs:8;
- /** If set, the Output queue uses info-pointer mode. (Default: 1 ) */
+ /** If set, the Output queue uses info-pointer mode. (Default: 1) */
u64 info_ptr:32;
/** The number of buffers that were consumed during packet processing by
@@ -416,9 +416,11 @@ struct octeon_config {
#define DISPATCH_LIST_SIZE BIT(OPCODE_MASK_BITS)
/* Maximum number of Octeon Instruction (command) queues */
-#define MAX_OCTEON_INSTR_QUEUES CN6XXX_MAX_INPUT_QUEUES
+#define MAX_OCTEON_INSTR_QUEUES(oct) CN6XXX_MAX_INPUT_QUEUES
+/* Maximum number of Octeon Output queues */
+#define MAX_OCTEON_OUTPUT_QUEUES(oct) CN6XXX_MAX_OUTPUT_QUEUES
-/* Maximum number of Octeon Instruction (command) queues */
-#define MAX_OCTEON_OUTPUT_QUEUES CN6XXX_MAX_OUTPUT_QUEUES
+#define MAX_POSSIBLE_OCTEON_INSTR_QUEUES CN6XXX_MAX_INPUT_QUEUES
+#define MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES CN6XXX_MAX_OUTPUT_QUEUES
#endif /* __OCTEON_CONFIG_H__ */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
index 466147e40..bbb50ea66 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
@@ -23,27 +23,14 @@
/**
* @file octeon_console.c
*/
-#include <linux/version.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
#include <linux/pci.h>
-#include <linux/kthread.h>
#include <linux/netdevice.h>
-#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
-#include "octeon_nic.h"
#include "octeon_main.h"
-#include "octeon_network.h"
-#include "cn66xx_regs.h"
-#include "cn66xx_device.h"
-#include "cn68xx_regs.h"
-#include "cn68xx_device.h"
-#include "liquidio_image.h"
#include "octeon_mem_ops.h"
static void octeon_remote_lock(void);
@@ -51,6 +38,8 @@ static void octeon_remote_unlock(void);
static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct,
const char *name,
u32 flags);
+static int octeon_console_read(struct octeon_device *oct, u32 console_num,
+ char *buffer, u32 buf_size);
#define MIN(a, b) min((a), (b))
#define CAST_ULL(v) ((u64)(v))
@@ -170,8 +159,8 @@ struct octeon_pci_console_desc {
offsetof(struct cvmx_bootmem_desc, field), \
SIZEOF_FIELD(struct cvmx_bootmem_desc, field))
-#define __cvmx_bootmem_lock(flags)
-#define __cvmx_bootmem_unlock(flags)
+#define __cvmx_bootmem_lock(flags) (flags = flags)
+#define __cvmx_bootmem_unlock(flags) (flags = flags)
/**
* This macro returns a member of the
@@ -234,7 +223,7 @@ static void CVMX_BOOTMEM_NAMED_GET_NAME(struct octeon_device *oct,
u32 len)
{
addr += offsetof(struct cvmx_bootmem_named_block_desc, name);
- octeon_pci_read_core_mem(oct, addr, str, len);
+ octeon_pci_read_core_mem(oct, addr, (u8 *)str, len);
str[len] = 0;
}
@@ -323,6 +312,9 @@ static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct,
if (name && named_size) {
char *name_tmp =
kmalloc(name_length + 1, GFP_KERNEL);
+ if (!name_tmp)
+ break;
+
CVMX_BOOTMEM_NAMED_GET_NAME(oct, named_addr,
name_tmp,
name_length);
@@ -383,7 +375,7 @@ static void octeon_remote_unlock(void)
int octeon_console_send_cmd(struct octeon_device *oct, char *cmd_str,
u32 wait_hundredths)
{
- u32 len = strlen(cmd_str);
+ u32 len = (u32)strlen(cmd_str);
dev_dbg(&oct->pci_dev->dev, "sending \"%s\" to bootloader\n", cmd_str);
@@ -440,8 +432,7 @@ int octeon_wait_for_bootloader(struct octeon_device *oct,
}
static void octeon_console_handle_result(struct octeon_device *oct,
- size_t console_num,
- char *buffer, s32 bytes_read)
+ size_t console_num)
{
struct octeon_console *console;
@@ -492,7 +483,7 @@ static void check_console(struct work_struct *work)
struct octeon_console *console;
struct cavium_wk *wk = (struct cavium_wk *)work;
struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
- size_t console_num = wk->ctxul;
+ u32 console_num = (u32)wk->ctxul;
u32 delay;
console = &oct->console[console_num];
@@ -505,20 +496,17 @@ static void check_console(struct work_struct *work)
*/
bytes_read =
octeon_console_read(oct, console_num, console_buffer,
- sizeof(console_buffer) - 1, 0);
+ sizeof(console_buffer) - 1);
if (bytes_read > 0) {
total_read += bytes_read;
- if (console->waiting) {
- octeon_console_handle_result(oct, console_num,
- console_buffer,
- bytes_read);
- }
+ if (console->waiting)
+ octeon_console_handle_result(oct, console_num);
if (octeon_console_debug_enabled(console_num)) {
output_console_line(oct, console, console_num,
console_buffer, bytes_read);
}
} else if (bytes_read < 0) {
- dev_err(&oct->pci_dev->dev, "Error reading console %lu, ret=%d\n",
+ dev_err(&oct->pci_dev->dev, "Error reading console %u, ret=%d\n",
console_num, bytes_read);
}
@@ -530,7 +518,7 @@ static void check_console(struct work_struct *work)
*/
if (octeon_console_debug_enabled(console_num) &&
(total_read == 0) && (console->leftover[0])) {
- dev_info(&oct->pci_dev->dev, "%lu: %s\n",
+ dev_info(&oct->pci_dev->dev, "%u: %s\n",
console_num, console->leftover);
console->leftover[0] = '\0';
}
@@ -675,8 +663,8 @@ static inline int octeon_console_avail_bytes(u32 buffer_size,
octeon_console_free_bytes(buffer_size, wr_idx, rd_idx);
}
-int octeon_console_read(struct octeon_device *oct, u32 console_num,
- char *buffer, u32 buf_size, u32 flags)
+static int octeon_console_read(struct octeon_device *oct, u32 console_num,
+ char *buffer, u32 buf_size)
{
int bytes_to_read;
u32 rd_idx, wr_idx;
@@ -712,7 +700,7 @@ int octeon_console_read(struct octeon_device *oct, u32 console_num,
bytes_to_read = console->buffer_size - rd_idx;
octeon_pci_read_core_mem(oct, console->output_base_addr + rd_idx,
- buffer, bytes_to_read);
+ (u8 *)buffer, bytes_to_read);
octeon_write_device_mem32(oct, console->addr +
offsetof(struct octeon_pci_console,
output_read_index),
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index 8e23e3fad..0eb504a43 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -19,28 +19,19 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
-#include <linux/version.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/crc32.h>
-#include <linux/kthread.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
-#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
-#include "octeon_nic.h"
#include "octeon_main.h"
#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
-#include "cn68xx_regs.h"
-#include "cn68xx_device.h"
#include "liquidio_image.h"
#include "octeon_mem_ops.h"
@@ -449,10 +440,10 @@ static struct octeon_config_ptr {
};
static char oct_dev_state_str[OCT_DEV_STATES + 1][32] = {
- "BEGIN", "PCI-MAP-DONE", "DISPATCH-INIT-DONE",
+ "BEGIN", "PCI-MAP-DONE", "DISPATCH-INIT-DONE",
"IQ-INIT-DONE", "SCBUFF-POOL-INIT-DONE", "RESPLIST-INIT-DONE",
"DROQ-INIT-DONE", "IO-QUEUES-INIT-DONE", "CONSOLE-INIT-DONE",
- "HOST-READY", "CORE-READY", "RUNNING", "IN-RESET",
+ "HOST-READY", "CORE-READY", "RUNNING", "IN-RESET",
"INVALID"
};
@@ -550,17 +541,19 @@ static char *get_oct_app_string(u32 app_mode)
return oct_dev_app_str[CVM_DRV_INVALID_APP - CVM_DRV_APP_START];
}
+u8 fbuf[4 * 1024 * 1024];
+
int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
size_t size)
{
int ret = 0;
- u8 *p;
- u8 *buffer;
+ u8 *p = fbuf;
u32 crc32_result;
u64 load_addr;
u32 image_len;
struct octeon_firmware_file_header *h;
- u32 i;
+ u32 i, rem, base_len = strlen(LIQUIDIO_BASE_VERSION);
+ char *base;
if (size < sizeof(struct octeon_firmware_file_header)) {
dev_err(&oct->pci_dev->dev, "Firmware file too small (%d < %d).\n",
@@ -576,19 +569,26 @@ int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
return -EINVAL;
}
- crc32_result =
- crc32(~0, data,
- sizeof(struct octeon_firmware_file_header) -
- sizeof(u32)) ^ ~0U;
+ crc32_result = crc32((unsigned int)~0, data,
+ sizeof(struct octeon_firmware_file_header) -
+ sizeof(u32)) ^ ~0U;
if (crc32_result != be32_to_cpu(h->crc32)) {
dev_err(&oct->pci_dev->dev, "Firmware CRC mismatch (0x%08x != 0x%08x).\n",
crc32_result, be32_to_cpu(h->crc32));
return -EINVAL;
}
- if (memcmp(LIQUIDIO_VERSION, h->version, strlen(LIQUIDIO_VERSION))) {
- dev_err(&oct->pci_dev->dev, "Unmatched firmware version. Expected %s, got %s.\n",
- LIQUIDIO_VERSION, h->version);
+ if (strncmp(LIQUIDIO_PACKAGE, h->version, strlen(LIQUIDIO_PACKAGE))) {
+ dev_err(&oct->pci_dev->dev, "Unmatched firmware package type. Expected %s, got %s.\n",
+ LIQUIDIO_PACKAGE, h->version);
+ return -EINVAL;
+ }
+
+ base = h->version + strlen(LIQUIDIO_PACKAGE);
+ ret = memcmp(LIQUIDIO_BASE_VERSION, base, base_len);
+ if (ret) {
+ dev_err(&oct->pci_dev->dev, "Unmatched firmware version. Expected %s.x, got %s.\n",
+ LIQUIDIO_BASE_VERSION, base);
return -EINVAL;
}
@@ -602,58 +602,58 @@ int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
snprintf(oct->fw_info.liquidio_firmware_version, 32, "LIQUIDIO: %s",
h->version);
- buffer = kmemdup(data, size, GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
-
- p = buffer + sizeof(struct octeon_firmware_file_header);
+ data += sizeof(struct octeon_firmware_file_header);
+ dev_info(&oct->pci_dev->dev, "%s: Loading %d images\n", __func__,
+ be32_to_cpu(h->num_images));
/* load all images */
for (i = 0; i < be32_to_cpu(h->num_images); i++) {
load_addr = be64_to_cpu(h->desc[i].addr);
image_len = be32_to_cpu(h->desc[i].len);
- /* validate the image */
- crc32_result = crc32(~0, p, image_len) ^ ~0U;
- if (crc32_result != be32_to_cpu(h->desc[i].crc32)) {
- dev_err(&oct->pci_dev->dev,
- "Firmware CRC mismatch in image %d (0x%08x != 0x%08x).\n",
- i, crc32_result,
- be32_to_cpu(h->desc[i].crc32));
- ret = -EINVAL;
- goto done_downloading;
- }
+ dev_info(&oct->pci_dev->dev, "Loading firmware %d at %llx\n",
+ image_len, load_addr);
- /* download the image */
- octeon_pci_write_core_mem(oct, load_addr, p, image_len);
+ /* Write in 4MB chunks*/
+ rem = image_len;
- p += image_len;
- dev_dbg(&oct->pci_dev->dev,
- "Downloaded image %d (%d bytes) to address 0x%016llx\n",
- i, image_len, load_addr);
+ while (rem) {
+ if (rem < (4 * 1024 * 1024))
+ size = rem;
+ else
+ size = 4 * 1024 * 1024;
+
+ memcpy(p, data, size);
+
+ /* download the image */
+ octeon_pci_write_core_mem(oct, load_addr, p, (u32)size);
+
+ data += size;
+ rem -= (u32)size;
+ load_addr += size;
+ }
}
+ dev_info(&oct->pci_dev->dev, "Writing boot command: %s\n",
+ h->bootcmd);
/* Invoke the bootcmd */
ret = octeon_console_send_cmd(oct, h->bootcmd, 50);
-done_downloading:
- kfree(buffer);
-
- return ret;
+ return 0;
}
void octeon_free_device_mem(struct octeon_device *oct)
{
- u32 i;
+ int i;
- for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
- /* could check mask as well */
- vfree(oct->droq[i]);
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
+ if (oct->io_qmask.oq & (1ULL << i))
+ vfree(oct->droq[i]);
}
- for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
- /* could check mask as well */
- vfree(oct->instr_queue[i]);
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
+ if (oct->io_qmask.iq & (1ULL << i))
+ vfree(oct->instr_queue[i]);
}
i = oct->octeon_id;
@@ -735,55 +735,61 @@ struct octeon_device *octeon_allocate_device(u32 pci_id,
octeon_device[oct_idx] = oct;
oct->octeon_id = oct_idx;
- snprintf((oct->device_name), sizeof(oct->device_name),
+ snprintf(oct->device_name, sizeof(oct->device_name),
"LiquidIO%d", (oct->octeon_id));
return oct;
}
+/* this function is only for setting up the first queue */
int octeon_setup_instr_queues(struct octeon_device *oct)
{
- u32 i, num_iqs = 0;
u32 num_descs = 0;
+ u32 iq_no = 0;
+ union oct_txpciq txpciq;
+ int numa_node = cpu_to_node(iq_no % num_online_cpus());
/* this causes queue 0 to be default queue */
- if (OCTEON_CN6XXX(oct)) {
- num_iqs = 1;
+ if (OCTEON_CN6XXX(oct))
num_descs =
CFG_GET_NUM_DEF_TX_DESCS(CHIP_FIELD(oct, cn6xxx, conf));
- }
oct->num_iqs = 0;
- for (i = 0; i < num_iqs; i++) {
- oct->instr_queue[i] =
+ oct->instr_queue[0] = vmalloc_node(sizeof(*oct->instr_queue[0]),
+ numa_node);
+ if (!oct->instr_queue[0])
+ oct->instr_queue[0] =
vmalloc(sizeof(struct octeon_instr_queue));
- if (!oct->instr_queue[i])
- return 1;
-
- memset(oct->instr_queue[i], 0,
- sizeof(struct octeon_instr_queue));
-
- oct->instr_queue[i]->app_ctx = (void *)(size_t)i;
- if (octeon_init_instr_queue(oct, i, num_descs))
- return 1;
-
- oct->num_iqs++;
+ if (!oct->instr_queue[0])
+ return 1;
+ memset(oct->instr_queue[0], 0, sizeof(struct octeon_instr_queue));
+ oct->instr_queue[0]->q_index = 0;
+ oct->instr_queue[0]->app_ctx = (void *)(size_t)0;
+ oct->instr_queue[0]->ifidx = 0;
+ txpciq.u64 = 0;
+ txpciq.s.q_no = iq_no;
+ txpciq.s.use_qpg = 0;
+ txpciq.s.qpg = 0;
+ if (octeon_init_instr_queue(oct, txpciq, num_descs)) {
+ /* prevent memory leak */
+ vfree(oct->instr_queue[0]);
+ return 1;
}
+ oct->num_iqs++;
return 0;
}
int octeon_setup_output_queues(struct octeon_device *oct)
{
- u32 i, num_oqs = 0;
u32 num_descs = 0;
u32 desc_size = 0;
+ u32 oq_no = 0;
+ int numa_node = cpu_to_node(oq_no % num_online_cpus());
/* this causes queue 0 to be default queue */
if (OCTEON_CN6XXX(oct)) {
- /* CFG_GET_OQ_MAX_BASE_Q(CHIP_FIELD(oct, cn6xxx, conf)); */
- num_oqs = 1;
num_descs =
CFG_GET_NUM_DEF_RX_DESCS(CHIP_FIELD(oct, cn6xxx, conf));
desc_size =
@@ -791,19 +797,15 @@ int octeon_setup_output_queues(struct octeon_device *oct)
}
oct->num_oqs = 0;
+ oct->droq[0] = vmalloc_node(sizeof(*oct->droq[0]), numa_node);
+ if (!oct->droq[0])
+ oct->droq[0] = vmalloc(sizeof(*oct->droq[0]));
+ if (!oct->droq[0])
+ return 1;
- for (i = 0; i < num_oqs; i++) {
- oct->droq[i] = vmalloc(sizeof(*oct->droq[i]));
- if (!oct->droq[i])
- return 1;
-
- memset(oct->droq[i], 0, sizeof(struct octeon_droq));
-
- if (octeon_init_droq(oct, i, num_descs, desc_size, NULL))
- return 1;
-
- oct->num_oqs++;
- }
+ if (octeon_init_droq(oct, oq_no, num_descs, desc_size, NULL))
+ return 1;
+ oct->num_oqs++;
return 0;
}
@@ -1005,79 +1007,6 @@ octeon_register_dispatch_fn(struct octeon_device *oct,
return 0;
}
-/* octeon_unregister_dispatch_fn
- * Parameters:
- * oct - octeon device
- * opcode - driver should unregister the function for this opcode
- * subcode - driver should unregister the function for this subcode
- * Description:
- * Unregister the function set for this opcode+subcode.
- * Returns:
- * Success: 0
- * Failure: 1
- * Locks:
- * No locks are held.
- */
-int
-octeon_unregister_dispatch_fn(struct octeon_device *oct, u16 opcode,
- u16 subcode)
-{
- int retval = 0;
- u32 idx;
- struct list_head *dispatch, *dfree = NULL, *tmp2;
- u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
-
- idx = combined_opcode & OCTEON_OPCODE_MASK;
-
- spin_lock_bh(&oct->dispatch.lock);
-
- if (oct->dispatch.count == 0) {
- spin_unlock_bh(&oct->dispatch.lock);
- dev_err(&oct->pci_dev->dev,
- "No dispatch functions registered for this device\n");
- return 1;
- }
-
- if (oct->dispatch.dlist[idx].opcode == combined_opcode) {
- dispatch = &oct->dispatch.dlist[idx].list;
- if (dispatch->next != dispatch) {
- dispatch = dispatch->next;
- oct->dispatch.dlist[idx].opcode =
- ((struct octeon_dispatch *)dispatch)->opcode;
- oct->dispatch.dlist[idx].dispatch_fn =
- ((struct octeon_dispatch *)
- dispatch)->dispatch_fn;
- oct->dispatch.dlist[idx].arg =
- ((struct octeon_dispatch *)dispatch)->arg;
- list_del(dispatch);
- dfree = dispatch;
- } else {
- oct->dispatch.dlist[idx].opcode = 0;
- oct->dispatch.dlist[idx].dispatch_fn = NULL;
- oct->dispatch.dlist[idx].arg = NULL;
- }
- } else {
- retval = 1;
- list_for_each_safe(dispatch, tmp2,
- &(oct->dispatch.dlist[idx].
- list)) {
- if (((struct octeon_dispatch *)dispatch)->opcode ==
- combined_opcode) {
- list_del(dispatch);
- dfree = dispatch;
- retval = 0;
- }
- }
- }
-
- if (!retval)
- oct->dispatch.count--;
-
- spin_unlock_bh(&oct->dispatch.lock);
- vfree(dfree);
- return retval;
-}
-
int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf)
{
u32 i;
@@ -1152,8 +1081,8 @@ core_drv_init_err:
int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no)
{
- if (oct && (q_no < MAX_OCTEON_INSTR_QUEUES) &&
- (oct->io_qmask.iq & (1UL << q_no)))
+ if (oct && (q_no < MAX_OCTEON_INSTR_QUEUES(oct)) &&
+ (oct->io_qmask.iq & (1ULL << q_no)))
return oct->instr_queue[q_no]->max_count;
return -1;
@@ -1161,8 +1090,8 @@ int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no)
int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no)
{
- if (oct && (q_no < MAX_OCTEON_OUTPUT_QUEUES) &&
- (oct->io_qmask.oq & (1UL << q_no)))
+ if (oct && (q_no < MAX_OCTEON_OUTPUT_QUEUES(oct)) &&
+ (oct->io_qmask.oq & (1ULL << q_no)))
return oct->droq[q_no]->max_count;
return -1;
}
@@ -1253,10 +1182,10 @@ void lio_pci_writeq(struct octeon_device *oct,
int octeon_mem_access_ok(struct octeon_device *oct)
{
u64 access_okay = 0;
+ u64 lmc0_reset_ctl;
/* Check to make sure a DDR interface is enabled */
- u64 lmc0_reset_ctl = lio_pci_readq(oct, CN6XXX_LMC0_RESET_CTL);
-
+ lmc0_reset_ctl = lio_pci_readq(oct, CN6XXX_LMC0_RESET_CTL);
access_okay = (lmc0_reset_ctl & CN6XXX_LMC0_RESET_CTL_DDR3RST_MASK);
return access_okay ? 0 : 1;
@@ -1270,9 +1199,6 @@ int octeon_wait_for_ddr_init(struct octeon_device *oct, u32 *timeout)
if (!timeout)
return ret;
- while (*timeout == 0)
- schedule_timeout_uninterruptible(HZ / 10);
-
for (ms = 0; (ret != 0) && ((*timeout == 0) || (ms <= *timeout));
ms += HZ / 10) {
ret = octeon_mem_access_ok(oct);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
index 36e1f85df..01edfb404 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
@@ -152,9 +152,9 @@ struct octeon_mmio {
#define MAX_OCTEON_MAPS 32
struct octeon_io_enable {
- u32 iq;
- u32 oq;
- u32 iq64B;
+ u64 iq;
+ u64 oq;
+ u64 iq64B;
};
struct octeon_reg_list {
@@ -204,8 +204,7 @@ struct octeon_fn_list {
void (*bar1_idx_setup)(struct octeon_device *, u64, u32, int);
void (*bar1_idx_write)(struct octeon_device *, u32, u32);
u32 (*bar1_idx_read)(struct octeon_device *, u32);
- u32 (*update_iq_read_idx)(struct octeon_device *,
- struct octeon_instr_queue *);
+ u32 (*update_iq_read_idx)(struct octeon_instr_queue *);
void (*enable_oq_pkt_time_intr)(struct octeon_device *, u32);
void (*disable_oq_pkt_time_intr)(struct octeon_device *, u32);
@@ -222,7 +221,7 @@ struct octeon_fn_list {
/* Structure for named memory blocks
* Number of descriptors
- * available can be changed without affecting compatiblity,
+ * available can be changed without affecting compatibility,
* but name length changes require a bump in the bootmem
* descriptor version
* Note: This structure must be naturally 64 bit aligned, as a single
@@ -255,7 +254,7 @@ struct oct_fw_info {
struct cavium_wk {
struct delayed_work work;
void *ctxptr;
- size_t ctxul;
+ u64 ctxul;
};
struct cavium_wq {
@@ -267,6 +266,8 @@ struct octdev_props {
/* Each interface in the Octeon device has a network
* device pointer (used for OS specific calls).
*/
+ int napi_enabled;
+ int gmxport;
struct net_device *netdev;
};
@@ -324,7 +325,8 @@ struct octeon_device {
struct octeon_sc_buffer_pool sc_buf_pool;
/** The input instruction queues */
- struct octeon_instr_queue *instr_queue[MAX_OCTEON_INSTR_QUEUES];
+ struct octeon_instr_queue *instr_queue
+ [MAX_POSSIBLE_OCTEON_INSTR_QUEUES];
/** The doubly-linked list of instruction response */
struct octeon_response_list response_list[MAX_RESPONSE_LISTS];
@@ -332,7 +334,7 @@ struct octeon_device {
u32 num_oqs;
/** The DROQ output queues */
- struct octeon_droq *droq[MAX_OCTEON_OUTPUT_QUEUES];
+ struct octeon_droq *droq[MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES];
struct octeon_io_enable io_qmask;
@@ -381,15 +383,29 @@ struct octeon_device {
struct cavium_wq dma_comp_wq;
- struct cavium_wq check_db_wq[MAX_OCTEON_INSTR_QUEUES];
+ /** Lock for dma response list */
+ spinlock_t cmd_resp_wqlock;
+ u32 cmd_resp_state;
+
+ struct cavium_wq check_db_wq[MAX_POSSIBLE_OCTEON_INSTR_QUEUES];
struct cavium_wk nic_poll_work;
struct cavium_wk console_poll_work[MAX_OCTEON_MAPS];
void *priv;
+
+ int rx_pause;
+ int tx_pause;
+
+ struct oct_link_stats link_stats; /*stastics from firmware*/
+
+ /* private flags to control driver-specific features through ethtool */
+ u32 priv_flags;
};
+#define OCT_DRV_ONLINE 1
+#define OCT_DRV_OFFLINE 2
#define OCTEON_CN6XXX(oct) ((oct->chip_id == OCTEON_CN66XX) || \
(oct->chip_id == OCTEON_CN68XX))
#define CHIP_FIELD(oct, TYPE, field) \
@@ -569,8 +585,7 @@ int octeon_add_console(struct octeon_device *oct, u32 console_num);
int octeon_console_write(struct octeon_device *oct, u32 console_num,
char *buffer, u32 write_request_size, u32 flags);
int octeon_console_write_avail(struct octeon_device *oct, u32 console_num);
-int octeon_console_read(struct octeon_device *oct, u32 console_num,
- char *buffer, u32 buf_size, u32 flags);
+
int octeon_console_read_avail(struct octeon_device *oct, u32 console_num);
/** Removes all attached consoles. */
@@ -646,4 +661,17 @@ void *oct_get_config_info(struct octeon_device *oct, u16 card_type);
*/
struct octeon_config *octeon_get_conf(struct octeon_device *oct);
+/* LiquidIO driver pivate flags */
+enum {
+ OCT_PRIV_FLAG_TX_BYTES = 0, /* Tx interrupts by pending byte count */
+};
+
+static inline void lio_set_priv_flag(struct octeon_device *octdev, u32 flag,
+ u32 val)
+{
+ if (val)
+ octdev->priv_flags |= (0x1 << flag);
+ else
+ octdev->priv_flags &= ~(0x1 << flag);
+}
#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
index 174072b37..e0afe4c1f 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -19,30 +19,18 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
-#include <linux/version.h>
-#include <linux/types.h>
-#include <linux/list.h>
#include <linux/pci.h>
-#include <linux/kthread.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
-#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
-#include "octeon_nic.h"
#include "octeon_main.h"
#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
-#include "cn68xx_regs.h"
-#include "cn68xx_device.h"
-#include "liquidio_image.h"
-#include "octeon_mem_ops.h"
-
-/* #define CAVIUM_ONLY_PERF_MODE */
#define CVM_MIN(d1, d2) (((d1) < (d2)) ? (d1) : (d2))
#define CVM_MAX(d1, d2) (((d1) > (d2)) ? (d1) : (d2))
@@ -104,8 +92,12 @@ static inline void *octeon_get_dispatch_arg(struct octeon_device *octeon_dev,
return fn_arg;
}
-u32 octeon_droq_check_hw_for_pkts(struct octeon_device *oct,
- struct octeon_droq *droq)
+/** Check for packets on Droq. This function should be called with
+ * lock held.
+ * @param droq - Droq on which count is checked.
+ * @return Returns packet count.
+ */
+u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq)
{
u32 pkt_count = 0;
@@ -151,22 +143,26 @@ octeon_droq_destroy_ring_buffers(struct octeon_device *oct,
struct octeon_droq *droq)
{
u32 i;
+ struct octeon_skb_page_info *pg_info;
for (i = 0; i < droq->max_count; i++) {
- if (droq->recv_buf_list[i].buffer) {
- if (droq->desc_ring) {
- lio_unmap_ring_info(oct->pci_dev,
- (u64)droq->
- desc_ring[i].info_ptr,
- OCT_DROQ_INFO_SIZE);
- lio_unmap_ring(oct->pci_dev,
- (u64)droq->desc_ring[i].
- buffer_ptr,
- droq->buffer_size);
- }
- recv_buffer_free(droq->recv_buf_list[i].buffer);
- droq->recv_buf_list[i].buffer = NULL;
- }
+ pg_info = &droq->recv_buf_list[i].pg_info;
+
+ if (pg_info->dma)
+ lio_unmap_ring(oct->pci_dev,
+ (u64)pg_info->dma);
+ pg_info->dma = 0;
+
+ if (pg_info->page)
+ recv_buffer_destroy(droq->recv_buf_list[i].buffer,
+ pg_info);
+
+ if (droq->desc_ring && droq->desc_ring[i].info_ptr)
+ lio_unmap_ring_info(oct->pci_dev,
+ (u64)droq->
+ desc_ring[i].info_ptr,
+ OCT_DROQ_INFO_SIZE);
+ droq->recv_buf_list[i].buffer = NULL;
}
octeon_droq_reset_indices(droq);
@@ -181,25 +177,23 @@ octeon_droq_setup_ring_buffers(struct octeon_device *oct,
struct octeon_droq_desc *desc_ring = droq->desc_ring;
for (i = 0; i < droq->max_count; i++) {
- buf = recv_buffer_alloc(oct, droq->q_no, droq->buffer_size);
+ buf = recv_buffer_alloc(oct, &droq->recv_buf_list[i].pg_info);
if (!buf) {
dev_err(&oct->pci_dev->dev, "%s buffer alloc failed\n",
__func__);
+ droq->stats.rx_alloc_failure++;
return -ENOMEM;
}
droq->recv_buf_list[i].buffer = buf;
droq->recv_buf_list[i].data = get_rbd(buf);
-
droq->info_list[i].length = 0;
/* map ring buffers into memory */
desc_ring[i].info_ptr = lio_map_ring_info(droq, i);
desc_ring[i].buffer_ptr =
- lio_map_ring(oct->pci_dev,
- droq->recv_buf_list[i].buffer,
- droq->buffer_size);
+ lio_map_ring(droq->recv_buf_list[i].buffer);
}
octeon_droq_reset_indices(droq);
@@ -242,6 +236,8 @@ int octeon_init_droq(struct octeon_device *oct,
struct octeon_droq *droq;
u32 desc_ring_size = 0, c_num_descs = 0, c_buf_size = 0;
u32 c_pkts_per_intr = 0, c_refill_threshold = 0;
+ int orig_node = dev_to_node(&oct->pci_dev->dev);
+ int numa_node = cpu_to_node(q_no % num_online_cpus());
dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no);
@@ -261,15 +257,23 @@ int octeon_init_droq(struct octeon_device *oct,
struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf);
c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf6x);
- c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf6x);
+ c_refill_threshold =
+ (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf6x);
+ } else {
+ return 1;
}
droq->max_count = c_num_descs;
droq->buffer_size = c_buf_size;
desc_ring_size = droq->max_count * OCT_DROQ_DESC_SIZE;
+ set_dev_node(&oct->pci_dev->dev, numa_node);
droq->desc_ring = lio_dma_alloc(oct, desc_ring_size,
(dma_addr_t *)&droq->desc_ring_dma);
+ set_dev_node(&oct->pci_dev->dev, orig_node);
+ if (!droq->desc_ring)
+ droq->desc_ring = lio_dma_alloc(oct, desc_ring_size,
+ (dma_addr_t *)&droq->desc_ring_dma);
if (!droq->desc_ring) {
dev_err(&oct->pci_dev->dev,
@@ -283,12 +287,11 @@ int octeon_init_droq(struct octeon_device *oct,
droq->max_count);
droq->info_list =
- cnnic_alloc_aligned_dma(oct->pci_dev,
- (droq->max_count * OCT_DROQ_INFO_SIZE),
- &droq->info_alloc_size,
- &droq->info_base_addr,
- &droq->info_list_dma);
-
+ cnnic_numa_alloc_aligned_dma((droq->max_count *
+ OCT_DROQ_INFO_SIZE),
+ &droq->info_alloc_size,
+ &droq->info_base_addr,
+ numa_node);
if (!droq->info_list) {
dev_err(&oct->pci_dev->dev, "Cannot allocate memory for info list.\n");
lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
@@ -297,7 +300,12 @@ int octeon_init_droq(struct octeon_device *oct,
}
droq->recv_buf_list = (struct octeon_recv_buffer *)
- vmalloc(droq->max_count *
+ vmalloc_node(droq->max_count *
+ OCT_DROQ_RECVBUF_SIZE,
+ numa_node);
+ if (!droq->recv_buf_list)
+ droq->recv_buf_list = (struct octeon_recv_buffer *)
+ vmalloc(droq->max_count *
OCT_DROQ_RECVBUF_SIZE);
if (!droq->recv_buf_list) {
dev_err(&oct->pci_dev->dev, "Output queue recv buf list alloc failed\n");
@@ -320,7 +328,7 @@ int octeon_init_droq(struct octeon_device *oct,
/* For 56xx Pass1, this function won't be called, so no checks. */
oct->fn_list.setup_oq_regs(oct, q_no);
- oct->io_qmask.oq |= (1 << q_no);
+ oct->io_qmask.oq |= (1ULL << q_no);
return 0;
@@ -358,6 +366,7 @@ static inline struct octeon_recv_info *octeon_create_recv_info(
struct octeon_recv_pkt *recv_pkt;
struct octeon_recv_info *recv_info;
u32 i, bytes_left;
+ struct octeon_skb_page_info *pg_info;
info = &droq->info_list[idx];
@@ -375,9 +384,14 @@ static inline struct octeon_recv_info *octeon_create_recv_info(
bytes_left = (u32)info->length;
while (buf_cnt) {
- lio_unmap_ring(octeon_dev->pci_dev,
- (u64)droq->desc_ring[idx].buffer_ptr,
- droq->buffer_size);
+ {
+ pg_info = &droq->recv_buf_list[idx].pg_info;
+
+ lio_unmap_ring(octeon_dev->pci_dev,
+ (u64)pg_info->dma);
+ pg_info->page = NULL;
+ pg_info->dma = 0;
+ }
recv_pkt->buffer_size[i] =
(bytes_left >=
@@ -449,6 +463,7 @@ octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq)
void *buf = NULL;
u8 *data;
u32 desc_refilled = 0;
+ struct octeon_skb_page_info *pg_info;
desc_ring = droq->desc_ring;
@@ -458,13 +473,22 @@ octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq)
* the buffer, else allocate.
*/
if (!droq->recv_buf_list[droq->refill_idx].buffer) {
- buf = recv_buffer_alloc(octeon_dev, droq->q_no,
- droq->buffer_size);
+ pg_info =
+ &droq->recv_buf_list[droq->refill_idx].pg_info;
+ /* Either recycle the existing pages or go for
+ * new page alloc
+ */
+ if (pg_info->page)
+ buf = recv_buffer_reuse(octeon_dev, pg_info);
+ else
+ buf = recv_buffer_alloc(octeon_dev, pg_info);
/* If a buffer could not be allocated, no point in
* continuing
*/
- if (!buf)
+ if (!buf) {
+ droq->stats.rx_alloc_failure++;
break;
+ }
droq->recv_buf_list[droq->refill_idx].buffer =
buf;
data = get_rbd(buf);
@@ -476,11 +500,8 @@ octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq)
droq->recv_buf_list[droq->refill_idx].data = data;
desc_ring[droq->refill_idx].buffer_ptr =
- lio_map_ring(octeon_dev->pci_dev,
- droq->recv_buf_list[droq->
- refill_idx].buffer,
- droq->buffer_size);
-
+ lio_map_ring(droq->recv_buf_list[droq->
+ refill_idx].buffer);
/* Reset any previous values in the length field. */
droq->info_list[droq->refill_idx].length = 0;
@@ -539,7 +560,9 @@ octeon_droq_dispatch_pkt(struct octeon_device *oct,
droq->stats.dropped_nomem++;
}
} else {
- dev_err(&oct->pci_dev->dev, "DROQ: No dispatch function\n");
+ dev_err(&oct->pci_dev->dev, "DROQ: No dispatch function (opcode %u/%u)\n",
+ (unsigned int)rh->r.opcode,
+ (unsigned int)rh->r.subcode);
droq->stats.dropped_nodispatch++;
} /* else (dispatch_fn ... */
@@ -586,6 +609,8 @@ octeon_droq_fast_process_packets(struct octeon_device *oct,
for (pkt = 0; pkt < pkt_count; pkt++) {
u32 pkt_len = 0;
struct sk_buff *nicbuf = NULL;
+ struct octeon_skb_page_info *pg_info;
+ void *buf;
info = &droq->info_list[droq->read_idx];
octeon_swap_8B_data((u64 *)info, 2);
@@ -605,7 +630,6 @@ octeon_droq_fast_process_packets(struct octeon_device *oct,
rh = &info->rh;
total_len += (u32)info->length;
-
if (OPCODE_SLOW_PATH(rh)) {
u32 buf_cnt;
@@ -614,50 +638,45 @@ octeon_droq_fast_process_packets(struct octeon_device *oct,
droq->refill_count += buf_cnt;
} else {
if (info->length <= droq->buffer_size) {
- lio_unmap_ring(oct->pci_dev,
- (u64)droq->desc_ring[
- droq->read_idx].buffer_ptr,
- droq->buffer_size);
pkt_len = (u32)info->length;
nicbuf = droq->recv_buf_list[
droq->read_idx].buffer;
+ pg_info = &droq->recv_buf_list[
+ droq->read_idx].pg_info;
+ if (recv_buffer_recycle(oct, pg_info))
+ pg_info->page = NULL;
droq->recv_buf_list[droq->read_idx].buffer =
NULL;
+
INCR_INDEX_BY1(droq->read_idx, droq->max_count);
- skb_put(nicbuf, pkt_len);
droq->refill_count++;
} else {
- nicbuf = octeon_fast_packet_alloc(oct, droq,
- droq->q_no,
- (u32)
+ nicbuf = octeon_fast_packet_alloc((u32)
info->length);
pkt_len = 0;
/* nicbuf allocation can fail. We'll handle it
* inside the loop.
*/
while (pkt_len < info->length) {
- int cpy_len;
+ int cpy_len, idx = droq->read_idx;
- cpy_len = ((pkt_len +
- droq->buffer_size) >
- info->length) ?
+ cpy_len = ((pkt_len + droq->buffer_size)
+ > info->length) ?
((u32)info->length - pkt_len) :
droq->buffer_size;
if (nicbuf) {
- lio_unmap_ring(oct->pci_dev,
- (u64)
- droq->desc_ring
- [droq->read_idx].
- buffer_ptr,
- droq->
- buffer_size);
octeon_fast_packet_next(droq,
nicbuf,
cpy_len,
- droq->
- read_idx
- );
+ idx);
+ buf = droq->recv_buf_list[idx].
+ buffer;
+ recv_buffer_fast_free(buf);
+ droq->recv_buf_list[idx].buffer
+ = NULL;
+ } else {
+ droq->stats.rx_alloc_failure++;
}
pkt_len += cpy_len;
@@ -668,12 +687,14 @@ octeon_droq_fast_process_packets(struct octeon_device *oct,
}
if (nicbuf) {
- if (droq->ops.fptr)
+ if (droq->ops.fptr) {
droq->ops.fptr(oct->octeon_id,
- nicbuf, pkt_len,
- rh, &droq->napi);
- else
+ nicbuf, pkt_len,
+ rh, &droq->napi,
+ droq->ops.farg);
+ } else {
recv_buffer_free(nicbuf);
+ }
}
}
@@ -681,16 +702,16 @@ octeon_droq_fast_process_packets(struct octeon_device *oct,
int desc_refilled = octeon_droq_refill(oct, droq);
/* Flush the droq descriptor data to memory to be sure
- * that when we update the credits the data in memory
- * is accurate.
- */
+ * that when we update the credits the data in memory
+ * is accurate.
+ */
wmb();
writel((desc_refilled), droq->pkts_credit_reg);
/* make sure mmio write completes */
mmiowb();
}
- } /* for ( each packet )... */
+ } /* for (each packet)... */
/* Increment refill_count by the number of buffers processed. */
droq->stats.pkts_received += pkt;
@@ -721,7 +742,7 @@ octeon_droq_process_packets(struct octeon_device *oct,
if (pkt_count > budget)
pkt_count = budget;
- /* Grab the lock */
+ /* Grab the droq lock */
spin_lock(&droq->lock);
pkts_processed = octeon_droq_fast_process_packets(oct, droq, pkt_count);
@@ -783,7 +804,7 @@ octeon_droq_process_poll_pkts(struct octeon_device *oct,
total_pkts_processed += pkts_processed;
- octeon_droq_check_hw_for_pkts(oct, droq);
+ octeon_droq_check_hw_for_pkts(droq);
}
spin_unlock(&droq->lock);
@@ -807,18 +828,6 @@ octeon_process_droq_poll_cmd(struct octeon_device *oct, u32 q_no, int cmd,
u32 arg)
{
struct octeon_droq *droq;
- struct octeon_config *oct_cfg = NULL;
-
- oct_cfg = octeon_get_conf(oct);
-
- if (!oct_cfg)
- return -EINVAL;
-
- if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) {
- dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n",
- __func__, q_no, (oct->num_oqs - 1));
- return -EINVAL;
- }
droq = oct->droq[q_no];
@@ -937,6 +946,7 @@ int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no)
spin_lock_irqsave(&droq->lock, flags);
droq->ops.fptr = NULL;
+ droq->ops.farg = NULL;
droq->ops.drop_on_max = 0;
spin_unlock_irqrestore(&droq->lock, flags);
@@ -949,6 +959,7 @@ int octeon_create_droq(struct octeon_device *oct,
u32 desc_size, void *app_ctx)
{
struct octeon_droq *droq;
+ int numa_node = cpu_to_node(q_no % num_online_cpus());
if (oct->droq[q_no]) {
dev_dbg(&oct->pci_dev->dev, "Droq already in use. Cannot create droq %d again\n",
@@ -957,7 +968,9 @@ int octeon_create_droq(struct octeon_device *oct,
}
/* Allocate the DS for the new droq. */
- droq = vmalloc(sizeof(*droq));
+ droq = vmalloc_node(sizeof(*droq), numa_node);
+ if (!droq)
+ droq = vmalloc(sizeof(*droq));
if (!droq)
goto create_droq_fail;
memset(droq, 0, sizeof(struct octeon_droq));
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
index 7940ccee1..5a6fb9113 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
@@ -65,6 +65,17 @@ struct octeon_droq_info {
#define OCT_DROQ_INFO_SIZE (sizeof(struct octeon_droq_info))
+struct octeon_skb_page_info {
+ /* DMA address for the page */
+ dma_addr_t dma;
+
+ /* Page for the rx dma **/
+ struct page *page;
+
+ /** which offset into page */
+ unsigned int page_offset;
+};
+
/** Pointer to data buffer.
* Driver keeps a pointer to the data buffer that it made available to
* the Octeon device. Since the descriptor ring keeps physical (bus)
@@ -77,6 +88,9 @@ struct octeon_recv_buffer {
/** Data in the packet buffer. */
u8 *data;
+
+ /** pg_info **/
+ struct octeon_skb_page_info pg_info;
};
#define OCT_DROQ_RECVBUF_SIZE (sizeof(struct octeon_recv_buffer))
@@ -106,6 +120,13 @@ struct oct_droq_stats {
/** Num of Packets dropped due to receive path failures. */
u64 rx_dropped;
+
+ /** Num of vxlan packets received; */
+ u64 rx_vxlan;
+
+ /** Num of failures of recv_buffer_alloc() */
+ u64 rx_alloc_failure;
+
};
#define POLL_EVENT_INTR_ARRIVED 1
@@ -213,7 +234,8 @@ struct octeon_droq_ops {
* data in the buffer. The receive header gives the port
* number to the caller. Function pointer is set by caller.
*/
- void (*fptr)(u32, void *, u32, union octeon_rh *, void *);
+ void (*fptr)(u32, void *, u32, union octeon_rh *, void *, void *);
+ void *farg;
/* This function will be called by the driver for all NAPI related
* events. The first param is the octeon id. The second param is the
@@ -394,24 +416,9 @@ int octeon_register_dispatch_fn(struct octeon_device *oct,
u16 subcode,
octeon_dispatch_fn_t fn, void *fn_arg);
-/** Remove registration for an opcode/subcode. This will delete the mapping for
- * an opcode/subcode. The dispatch function will be unregistered and will no
- * longer be called if a packet with the opcode/subcode arrives in the driver
- * output queues.
- * @param oct - the octeon device to unregister from.
- * @param opcode - the opcode to be unregistered.
- * @param subcode - the subcode to be unregistered.
- *
- * @return Success: 0; Failure: 1
- */
-int octeon_unregister_dispatch_fn(struct octeon_device *oct,
- u16 opcode,
- u16 subcode);
-
void octeon_droq_print_stats(void);
-u32 octeon_droq_check_hw_for_pkts(struct octeon_device *oct,
- struct octeon_droq *droq);
+u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq);
int octeon_create_droq(struct octeon_device *oct, u32 q_no,
u32 num_descs, u32 desc_size, void *app_ctx);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
index 592fe49b5..ff4b1d6f0 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
@@ -65,6 +65,11 @@ struct oct_iq_stats {
u64 tx_iq_busy;/**< Numof times this iq was found to be full. */
u64 tx_dropped;/**< Numof pkts dropped dueto xmitpath errors. */
u64 tx_tot_bytes;/**< Total count of bytes sento to network. */
+ u64 tx_gso; /* count of tso */
+ u64 tx_vxlan; /* tunnel */
+ u64 tx_dmamap_fail;
+ u64 tx_restart;
+ /*u64 tx_timeout_count;*/
};
#define OCT_IQ_STATS_SIZE (sizeof(struct oct_iq_stats))
@@ -75,18 +80,26 @@ struct oct_iq_stats {
* a Octeon device has one such structure to represent it.
*/
struct octeon_instr_queue {
+ struct octeon_device *oct_dev;
+
/** A spinlock to protect access to the input ring. */
spinlock_t lock;
+ /** A spinlock to protect while posting on the ring. */
+ spinlock_t post_lock;
+
+ /** A spinlock to protect access to the input ring.*/
+ spinlock_t iq_flush_running_lock;
+
/** Flag that indicates if the queue uses 64 byte commands. */
u32 iqcmd_64B:1;
- /** Queue Number. */
- u32 iq_no:5;
+ /** Queue info. */
+ union oct_txpciq txpciq;
u32 rsvd:17;
- /* Controls the periodic flushing of iq */
+ /* Controls whether extra flushing of IQ is done on Tx */
u32 do_auto_flush:1;
u32 status:8;
@@ -147,6 +160,13 @@ struct octeon_instr_queue {
/** Application context */
void *app_ctx;
+
+ /* network stack queue index */
+ int q_index;
+
+ /*os ifidx associated with this queue */
+ int ifidx;
+
};
/*---------------------- INSTRUCTION FORMAT ----------------------------*/
@@ -176,12 +196,12 @@ struct octeon_instr_32B {
/** 64-byte instruction format.
* Format of instruction for a 64-byte mode input queue.
*/
-struct octeon_instr_64B {
+struct octeon_instr2_64B {
/** Pointer where the input data is available. */
u64 dptr;
/** Instruction Header. */
- u64 ih;
+ u64 ih2;
/** Input Request Header. */
u64 irh;
@@ -198,14 +218,44 @@ struct octeon_instr_64B {
u64 rptr;
u64 reserved;
+};
+
+struct octeon_instr3_64B {
+ /** Pointer where the input data is available. */
+ u64 dptr;
+
+ /** Instruction Header. */
+ u64 ih3;
+
+ /** Instruction Header. */
+ u64 pki_ih3;
+
+ /** Input Request Header. */
+ u64 irh;
+ /** opcode/subcode specific parameters */
+ u64 ossp[2];
+
+ /** Return Data Parameters */
+ u64 rdp;
+
+ /** Pointer where the response for a RAW mode packet will be written
+ * by Octeon.
+ */
+ u64 rptr;
+
+};
+
+union octeon_instr_64B {
+ struct octeon_instr2_64B cmd2;
+ struct octeon_instr3_64B cmd3;
};
-#define OCT_64B_INSTR_SIZE (sizeof(struct octeon_instr_64B))
+#define OCT_64B_INSTR_SIZE (sizeof(union octeon_instr_64B))
/** The size of each buffer in soft command buffer pool
*/
-#define SOFT_COMMAND_BUFFER_SIZE 1024
+#define SOFT_COMMAND_BUFFER_SIZE 1536
struct octeon_soft_command {
/** Soft command buffer info. */
@@ -214,7 +264,8 @@ struct octeon_soft_command {
u32 size;
/** Command and return status */
- struct octeon_instr_64B cmd;
+ union octeon_instr_64B cmd;
+
#define COMPLETION_WORD_INIT 0xffffffffffffffffULL
u64 *status_word;
@@ -242,7 +293,7 @@ struct octeon_soft_command {
/** Maximum number of buffers to allocate into soft command buffer pool
*/
-#define MAX_SOFT_COMMAND_BUFFERS 16
+#define MAX_SOFT_COMMAND_BUFFERS 256
/** Head of a soft command buffer pool.
*/
@@ -268,14 +319,15 @@ void octeon_free_soft_command(struct octeon_device *oct,
/**
* octeon_init_instr_queue()
* @param octeon_dev - pointer to the octeon device structure.
- * @param iq_no - queue to be initialized (0 <= q_no <= 3).
+ * @param txpciq - queue to be initialized (0 <= q_no <= 3).
*
* Called at driver init time for each input queue. iq_conf has the
* configuration parameters for the queue.
*
* @return Success: 0 Failure: 1
*/
-int octeon_init_instr_queue(struct octeon_device *octeon_dev, u32 iq_no,
+int octeon_init_instr_queue(struct octeon_device *octeon_dev,
+ union oct_txpciq txpciq,
u32 num_descs);
/**
@@ -298,7 +350,7 @@ octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype,
int
lio_process_iq_request_list(struct octeon_device *oct,
- struct octeon_instr_queue *iq);
+ struct octeon_instr_queue *iq, u32 napi_budget);
int octeon_send_command(struct octeon_device *oct, u32 iq_no,
u32 force_db, void *cmd, void *buf,
@@ -313,7 +365,10 @@ void octeon_prepare_soft_command(struct octeon_device *oct,
int octeon_send_soft_command(struct octeon_device *oct,
struct octeon_soft_command *sc);
-int octeon_setup_iq(struct octeon_device *oct, u32 iq_no,
- u32 num_descs, void *app_ctx);
-
+int octeon_setup_iq(struct octeon_device *oct, int ifidx,
+ int q_index, union oct_txpciq iq_no, u32 num_descs,
+ void *app_ctx);
+int
+octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
+ u32 pending_thresh, u32 napi_budget);
#endif /* __OCTEON_IQ_H__ */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
index cbd081981..bc14e4c27 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_main.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
@@ -126,22 +126,27 @@ static inline int octeon_map_pci_barx(struct octeon_device *oct,
}
static inline void *
-cnnic_alloc_aligned_dma(struct pci_dev *pci_dev,
- u32 size,
- u32 *alloc_size,
- size_t *orig_ptr,
- size_t *dma_addr __attribute__((unused)))
+cnnic_numa_alloc_aligned_dma(u32 size,
+ u32 *alloc_size,
+ size_t *orig_ptr,
+ int numa_node)
{
int retries = 0;
void *ptr = NULL;
#define OCTEON_MAX_ALLOC_RETRIES 1
do {
- ptr =
- (void *)__get_free_pages(GFP_KERNEL,
- get_order(size));
+ struct page *page = NULL;
+
+ page = alloc_pages_node(numa_node,
+ GFP_KERNEL,
+ get_order(size));
+ if (!page)
+ page = alloc_pages(GFP_KERNEL,
+ get_order(size));
+ ptr = (void *)page_address(page);
if ((unsigned long)ptr & 0x07) {
- free_pages((unsigned long)ptr, get_order(size));
+ __free_pages(page, get_order(size));
ptr = NULL;
/* Increment the size required if the first
* attempt failed.
@@ -169,7 +174,7 @@ sleep_cond(wait_queue_head_t *wait_queue, int *condition)
init_waitqueue_entry(&we, current);
add_wait_queue(wait_queue, &we);
- while (!(ACCESS_ONCE(*condition))) {
+ while (!(READ_ONCE(*condition))) {
set_current_state(TASK_INTERRUPTIBLE);
if (signal_pending(current))
goto out;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
index 5aecef870..95a4bbedf 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
@@ -19,43 +19,29 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
-#include <linux/version.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
#include <linux/pci.h>
-#include <linux/kthread.h>
#include <linux/netdevice.h>
-#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
-#include "octeon_nic.h"
-#include "octeon_main.h"
-#include "octeon_network.h"
-#include "cn66xx_regs.h"
-#include "cn66xx_device.h"
-#include "cn68xx_regs.h"
-#include "cn68xx_device.h"
-#include "liquidio_image.h"
-#include "octeon_mem_ops.h"
#define MEMOPS_IDX MAX_BAR1_MAP_INDEX
+#ifdef __BIG_ENDIAN_BITFIELD
static inline void
-octeon_toggle_bar1_swapmode(struct octeon_device *oct __attribute__((unused)),
- u32 idx __attribute__((unused)))
+octeon_toggle_bar1_swapmode(struct octeon_device *oct, u32 idx)
{
-#ifdef __BIG_ENDIAN_BITFIELD
u32 mask;
mask = oct->fn_list.bar1_idx_read(oct, idx);
mask = (mask & 0x2) ? (mask & ~2) : (mask | 2);
oct->fn_list.bar1_idx_write(oct, idx, mask);
-#endif
}
+#else
+#define octeon_toggle_bar1_swapmode(oct, idx) (oct = oct)
+#endif
static void
octeon_pci_fastwrite(struct octeon_device *oct, u8 __iomem *mapped_addr,
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
index b3abe5818..fb820dc7f 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
@@ -30,6 +30,20 @@
#include <linux/dma-mapping.h>
#include <linux/ptp_clock_kernel.h>
+#define LIO_MAX_MTU_SIZE (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE)
+#define LIO_MIN_MTU_SIZE 68
+
+struct oct_nic_stats_resp {
+ u64 rh;
+ struct oct_link_stats stats;
+ u64 status;
+};
+
+struct oct_nic_stats_ctrl {
+ struct completion complete;
+ struct net_device *netdev;
+};
+
/** LiquidIO per-interface network private data */
struct lio {
/** State of the interface. Rx/Tx happens only in the RUNNING state. */
@@ -48,11 +62,11 @@ struct lio {
*/
int rxq;
- /** Guards the glist */
- spinlock_t lock;
+ /** Guards each glist */
+ spinlock_t *glist_lock;
- /** Linked list of gather components */
- struct list_head glist;
+ /** Array of gather component linked lists */
+ struct list_head *glist;
/** Pointer to the NIC properties for the Octeon device this network
* interface is associated with.
@@ -67,6 +81,9 @@ struct lio {
/** Link information sent by the core application for this interface. */
struct oct_link_info linfo;
+ /** counter of link changes */
+ u64 link_changes;
+
/** Size of Tx queue for this octeon device. */
u32 tx_qsize;
@@ -82,6 +99,12 @@ struct lio {
/** Copy of Interface capabilities: TSO, TSO6, LRO, Chescksums . */
u64 dev_capability;
+ /* Copy of transmit encapsulation capabilities:
+ * TSO, TSO6, Checksums for this device for Kernel
+ * 3.10.0 onwards
+ */
+ u64 enc_dev_capability;
+
/** Copy of beacaon reg in phy */
u32 phy_beacon_val;
@@ -101,7 +124,6 @@ struct lio {
/* work queue for txq status */
struct cavium_wq txq_status_wq;
-
};
#define LIO_SIZE (sizeof(struct lio))
@@ -111,8 +133,9 @@ struct lio {
* \brief Enable or disable feature
* @param netdev pointer to network device
* @param cmd Command that just requires acknowledgment
+ * @param param1 Parameter to command
*/
-int liquidio_set_feature(struct net_device *netdev, int cmd);
+int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1);
/**
* \brief Link control command completion callback
@@ -131,14 +154,30 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr);
*/
void liquidio_set_ethtool_ops(struct net_device *netdev);
-static inline void
-*recv_buffer_alloc(struct octeon_device *oct __attribute__((unused)),
- u32 q_no __attribute__((unused)), u32 size)
-{
#define SKB_ADJ_MASK 0x3F
#define SKB_ADJ (SKB_ADJ_MASK + 1)
- struct sk_buff *skb = dev_alloc_skb(size + SKB_ADJ);
+#define MIN_SKB_SIZE 256 /* 8 bytes and more - 8 bytes for PTP */
+#define LIO_RXBUFFER_SZ 2048
+
+static inline void
+*recv_buffer_alloc(struct octeon_device *oct,
+ struct octeon_skb_page_info *pg_info)
+{
+ struct page *page;
+ struct sk_buff *skb;
+ struct octeon_skb_page_info *skb_pg_info;
+
+ page = alloc_page(GFP_ATOMIC | __GFP_COLD);
+ if (unlikely(!page))
+ return NULL;
+
+ skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ);
+ if (unlikely(!skb)) {
+ __free_page(page);
+ pg_info->page = NULL;
+ return NULL;
+ }
if ((unsigned long)skb->data & SKB_ADJ_MASK) {
u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
@@ -146,11 +185,151 @@ static inline void
skb_reserve(skb, r);
}
+ skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
+ /* Get DMA info */
+ pg_info->dma = dma_map_page(&oct->pci_dev->dev, page, 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+
+ /* Mapping failed!! */
+ if (dma_mapping_error(&oct->pci_dev->dev, pg_info->dma)) {
+ __free_page(page);
+ dev_kfree_skb_any((struct sk_buff *)skb);
+ pg_info->page = NULL;
+ return NULL;
+ }
+
+ pg_info->page = page;
+ pg_info->page_offset = 0;
+ skb_pg_info->page = page;
+ skb_pg_info->page_offset = 0;
+ skb_pg_info->dma = pg_info->dma;
+
return (void *)skb;
}
+static inline void
+*recv_buffer_fast_alloc(u32 size)
+{
+ struct sk_buff *skb;
+ struct octeon_skb_page_info *skb_pg_info;
+
+ skb = dev_alloc_skb(size + SKB_ADJ);
+ if (unlikely(!skb))
+ return NULL;
+
+ if ((unsigned long)skb->data & SKB_ADJ_MASK) {
+ u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
+
+ skb_reserve(skb, r);
+ }
+
+ skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
+ skb_pg_info->page = NULL;
+ skb_pg_info->page_offset = 0;
+ skb_pg_info->dma = 0;
+
+ return skb;
+}
+
+static inline int
+recv_buffer_recycle(struct octeon_device *oct, void *buf)
+{
+ struct octeon_skb_page_info *pg_info = buf;
+
+ if (!pg_info->page) {
+ dev_err(&oct->pci_dev->dev, "%s: pg_info->page NULL\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ if (unlikely(page_count(pg_info->page) != 1) ||
+ unlikely(page_to_nid(pg_info->page) != numa_node_id())) {
+ dma_unmap_page(&oct->pci_dev->dev,
+ pg_info->dma, (PAGE_SIZE << 0),
+ DMA_FROM_DEVICE);
+ pg_info->dma = 0;
+ pg_info->page = NULL;
+ pg_info->page_offset = 0;
+ return -ENOMEM;
+ }
+
+ /* Flip to other half of the buffer */
+ if (pg_info->page_offset == 0)
+ pg_info->page_offset = LIO_RXBUFFER_SZ;
+ else
+ pg_info->page_offset = 0;
+ page_ref_inc(pg_info->page);
+
+ return 0;
+}
+
+static inline void
+*recv_buffer_reuse(struct octeon_device *oct, void *buf)
+{
+ struct octeon_skb_page_info *pg_info = buf, *skb_pg_info;
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ);
+ if (unlikely(!skb)) {
+ dma_unmap_page(&oct->pci_dev->dev,
+ pg_info->dma, (PAGE_SIZE << 0),
+ DMA_FROM_DEVICE);
+ return NULL;
+ }
+
+ if ((unsigned long)skb->data & SKB_ADJ_MASK) {
+ u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
+
+ skb_reserve(skb, r);
+ }
+
+ skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
+ skb_pg_info->page = pg_info->page;
+ skb_pg_info->page_offset = pg_info->page_offset;
+ skb_pg_info->dma = pg_info->dma;
+
+ return skb;
+}
+
+static inline void
+recv_buffer_destroy(void *buffer, struct octeon_skb_page_info *pg_info)
+{
+ struct sk_buff *skb = (struct sk_buff *)buffer;
+
+ put_page(pg_info->page);
+ pg_info->dma = 0;
+ pg_info->page = NULL;
+ pg_info->page_offset = 0;
+
+ if (skb)
+ dev_kfree_skb_any(skb);
+}
+
static inline void recv_buffer_free(void *buffer)
{
+ struct sk_buff *skb = (struct sk_buff *)buffer;
+ struct octeon_skb_page_info *pg_info;
+
+ pg_info = ((struct octeon_skb_page_info *)(skb->cb));
+
+ if (pg_info->page) {
+ put_page(pg_info->page);
+ pg_info->dma = 0;
+ pg_info->page = NULL;
+ pg_info->page_offset = 0;
+ }
+
+ dev_kfree_skb_any((struct sk_buff *)buffer);
+}
+
+static inline void
+recv_buffer_fast_free(void *buffer)
+{
+ dev_kfree_skb_any((struct sk_buff *)buffer);
+}
+
+static inline void tx_buffer_free(void *buffer)
+{
dev_kfree_skb_any((struct sk_buff *)buffer);
}
@@ -159,7 +338,17 @@ static inline void recv_buffer_free(void *buffer)
#define lio_dma_free(oct, size, virt_addr, dma_addr) \
dma_free_coherent(&oct->pci_dev->dev, size, virt_addr, dma_addr)
-#define get_rbd(ptr) (((struct sk_buff *)(ptr))->data)
+static inline
+void *get_rbd(struct sk_buff *skb)
+{
+ struct octeon_skb_page_info *pg_info;
+ unsigned char *va;
+
+ pg_info = ((struct octeon_skb_page_info *)(skb->cb));
+ va = page_address(pg_info->page) + pg_info->page_offset;
+
+ return va;
+}
static inline u64
lio_map_ring_info(struct octeon_droq *droq, u32 i)
@@ -170,7 +359,7 @@ lio_map_ring_info(struct octeon_droq *droq, u32 i)
dma_addr = dma_map_single(&oct->pci_dev->dev, &droq->info_list[i],
OCT_DROQ_INFO_SIZE, DMA_FROM_DEVICE);
- BUG_ON(dma_mapping_error(&oct->pci_dev->dev, dma_addr));
+ WARN_ON(dma_mapping_error(&oct->pci_dev->dev, dma_addr));
return (u64)dma_addr;
}
@@ -183,33 +372,44 @@ lio_unmap_ring_info(struct pci_dev *pci_dev,
}
static inline u64
-lio_map_ring(struct pci_dev *pci_dev,
- void *buf, u32 size)
+lio_map_ring(void *buf)
{
dma_addr_t dma_addr;
- dma_addr = dma_map_single(&pci_dev->dev, get_rbd(buf), size,
- DMA_FROM_DEVICE);
+ struct sk_buff *skb = (struct sk_buff *)buf;
+ struct octeon_skb_page_info *pg_info;
- BUG_ON(dma_mapping_error(&pci_dev->dev, dma_addr));
+ pg_info = ((struct octeon_skb_page_info *)(skb->cb));
+ if (!pg_info->page) {
+ pr_err("%s: pg_info->page NULL\n", __func__);
+ WARN_ON(1);
+ }
+
+ /* Get DMA info */
+ dma_addr = pg_info->dma;
+ if (!pg_info->dma) {
+ pr_err("%s: ERROR it should be already available\n",
+ __func__);
+ WARN_ON(1);
+ }
+ dma_addr += pg_info->page_offset;
return (u64)dma_addr;
}
static inline void
lio_unmap_ring(struct pci_dev *pci_dev,
- u64 buf_ptr, u32 size)
+ u64 buf_ptr)
+
{
- dma_unmap_single(&pci_dev->dev,
- buf_ptr, size,
- DMA_FROM_DEVICE);
+ dma_unmap_page(&pci_dev->dev,
+ buf_ptr, (PAGE_SIZE << 0),
+ DMA_FROM_DEVICE);
}
-static inline void *octeon_fast_packet_alloc(struct octeon_device *oct,
- struct octeon_droq *droq,
- u32 q_no, u32 size)
+static inline void *octeon_fast_packet_alloc(u32 size)
{
- return recv_buffer_alloc(oct, q_no, size);
+ return recv_buffer_fast_alloc(size);
}
static inline void octeon_fast_packet_next(struct octeon_droq *droq,
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
index 1a0191549..166727be9 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
@@ -19,14 +19,9 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
-#include <linux/version.h>
-#include <linux/types.h>
-#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
-#include <linux/kthread.h>
#include <linux/netdevice.h>
-#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
@@ -34,21 +29,14 @@
#include "octeon_device.h"
#include "octeon_nic.h"
#include "octeon_main.h"
-#include "octeon_network.h"
-#include "cn66xx_regs.h"
-#include "cn66xx_device.h"
-#include "cn68xx_regs.h"
-#include "cn68xx_device.h"
-#include "liquidio_image.h"
-#include "octeon_mem_ops.h"
void *
octeon_alloc_soft_command_resp(struct octeon_device *oct,
- struct octeon_instr_64B *cmd,
- size_t rdatasize)
+ union octeon_instr_64B *cmd,
+ u32 rdatasize)
{
struct octeon_soft_command *sc;
- struct octeon_instr_ih *ih;
+ struct octeon_instr_ih2 *ih2;
struct octeon_instr_irh *irh;
struct octeon_instr_rdp *rdp;
@@ -59,24 +47,25 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct,
return NULL;
/* Copy existing command structure into the soft command */
- memcpy(&sc->cmd, cmd, sizeof(struct octeon_instr_64B));
+ memcpy(&sc->cmd, cmd, sizeof(union octeon_instr_64B));
/* Add in the response related fields. Opcode and Param are already
* there.
*/
- ih = (struct octeon_instr_ih *)&sc->cmd.ih;
- ih->fsz = 40; /* irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */
+ ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
+ ih2->fsz = 40; /* irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */
- irh = (struct octeon_instr_irh *)&sc->cmd.irh;
irh->rflag = 1; /* a response is required */
- irh->len = 4; /* means four 64-bit words immediately follow irh */
- rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
rdp->pcie_port = oct->pcie_port;
rdp->rlen = rdatasize;
*sc->status_word = COMPLETION_WORD_INIT;
+ sc->cmd.cmd2.rptr = sc->dmarptr;
+
sc->wait_time = 1000;
sc->timeout = jiffies + sc->wait_time;
@@ -119,12 +108,11 @@ static void octnet_link_ctrl_callback(struct octeon_device *oct,
static inline struct octeon_soft_command
*octnic_alloc_ctrl_pkt_sc(struct octeon_device *oct,
- struct octnic_ctrl_pkt *nctrl,
- struct octnic_ctrl_params nparams)
+ struct octnic_ctrl_pkt *nctrl)
{
struct octeon_soft_command *sc = NULL;
u8 *data;
- size_t rdatasize;
+ u32 rdatasize;
u32 uddsize = 0, datasize = 0;
uddsize = (u32)(nctrl->ncmd.s.more * 8);
@@ -143,7 +131,7 @@ static inline struct octeon_soft_command
data = (u8 *)sc->virtdptr;
- memcpy(data, &nctrl->ncmd, OCTNET_CMD_SIZE);
+ memcpy(data, &nctrl->ncmd, OCTNET_CMD_SIZE);
octeon_swap_8B_data((u64 *)data, (OCTNET_CMD_SIZE >> 3));
@@ -152,6 +140,8 @@ static inline struct octeon_soft_command
memcpy(data + OCTNET_CMD_SIZE, nctrl->udd, uddsize);
}
+ sc->iq_no = (u32)nctrl->iq_no;
+
octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_CMD,
0, 0, 0);
@@ -164,26 +154,41 @@ static inline struct octeon_soft_command
int
octnet_send_nic_ctrl_pkt(struct octeon_device *oct,
- struct octnic_ctrl_pkt *nctrl,
- struct octnic_ctrl_params nparams)
+ struct octnic_ctrl_pkt *nctrl)
{
int retval;
struct octeon_soft_command *sc = NULL;
- sc = octnic_alloc_ctrl_pkt_sc(oct, nctrl, nparams);
+ spin_lock_bh(&oct->cmd_resp_wqlock);
+ /* Allow only rx ctrl command to stop traffic on the chip
+ * during offline operations
+ */
+ if ((oct->cmd_resp_state == OCT_DRV_OFFLINE) &&
+ (nctrl->ncmd.s.cmd != OCTNET_CMD_RX_CTL)) {
+ spin_unlock_bh(&oct->cmd_resp_wqlock);
+ dev_err(&oct->pci_dev->dev,
+ "%s cmd:%d not processed since driver offline\n",
+ __func__, nctrl->ncmd.s.cmd);
+ return -1;
+ }
+
+ sc = octnic_alloc_ctrl_pkt_sc(oct, nctrl);
if (!sc) {
dev_err(&oct->pci_dev->dev, "%s soft command alloc failed\n",
__func__);
+ spin_unlock_bh(&oct->cmd_resp_wqlock);
return -1;
}
retval = octeon_send_soft_command(oct, sc);
- if (retval) {
+ if (retval == IQ_SEND_FAILED) {
octeon_free_soft_command(oct, sc);
- dev_err(&oct->pci_dev->dev, "%s soft command send failed status: %x\n",
- __func__, retval);
+ dev_err(&oct->pci_dev->dev, "%s soft command:%d send failed status: %x\n",
+ __func__, nctrl->ncmd.s.cmd, retval);
+ spin_unlock_bh(&oct->cmd_resp_wqlock);
return -1;
}
+ spin_unlock_bh(&oct->cmd_resp_wqlock);
return retval;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
index 0238857c8..b71a2bbe4 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
@@ -52,6 +52,9 @@ struct octnic_ctrl_pkt {
/** Additional data that may be needed by some commands. */
u64 udd[MAX_NCTRL_UDD];
+ /** Input queue to use to send this command. */
+ u64 iq_no;
+
/** Time to wait for Octeon software to respond to this control command.
* If wait_time is 0, OSI assumes no response is expected.
*/
@@ -82,7 +85,7 @@ struct octnic_data_pkt {
u32 datasize;
/** Command to be passed to the Octeon device software. */
- struct octeon_instr_64B cmd;
+ union octeon_instr_64B cmd;
/** Input queue to use to send this command. */
u32 q_no;
@@ -94,15 +97,14 @@ struct octnic_data_pkt {
*/
union octnic_cmd_setup {
struct {
- u32 ifidx:8;
- u32 cksum_offset:7;
+ u32 iq_no:8;
u32 gather:1;
u32 timestamp:1;
- u32 ipv4opts_ipv6exthdr:2;
u32 ip_csum:1;
+ u32 transport_csum:1;
u32 tnl_csum:1;
+ u32 rsvd:19;
- u32 rsvd:11;
union {
u32 datasize;
u32 gatherptrs;
@@ -113,79 +115,146 @@ union octnic_cmd_setup {
};
-struct octnic_ctrl_params {
- u32 resp_order;
-};
-
static inline int octnet_iq_is_full(struct octeon_device *oct, u32 q_no)
{
return ((u32)atomic_read(&oct->instr_queue[q_no]->instr_pending)
>= (oct->instr_queue[q_no]->max_count - 2));
}
-/** Utility function to prepare a 64B NIC instruction based on a setup command
- * @param cmd - pointer to instruction to be filled in.
- * @param setup - pointer to the setup structure
- * @param q_no - which queue for back pressure
- *
- * Assumes the cmd instruction is pre-allocated, but no fields are filled in.
- */
static inline void
-octnet_prepare_pci_cmd(struct octeon_instr_64B *cmd,
- union octnic_cmd_setup *setup, u32 tag)
+octnet_prepare_pci_cmd_o2(struct octeon_device *oct,
+ union octeon_instr_64B *cmd,
+ union octnic_cmd_setup *setup, u32 tag)
{
- struct octeon_instr_ih *ih;
+ struct octeon_instr_ih2 *ih2;
struct octeon_instr_irh *irh;
union octnic_packet_params packet_params;
+ int port;
- memset(cmd, 0, sizeof(struct octeon_instr_64B));
+ memset(cmd, 0, sizeof(union octeon_instr_64B));
- ih = (struct octeon_instr_ih *)&cmd->ih;
+ ih2 = (struct octeon_instr_ih2 *)&cmd->cmd2.ih2;
/* assume that rflag is cleared so therefore front data will only have
- * irh and ossp[1] and ossp[2] for a total of 24 bytes
+ * irh and ossp[0], ossp[1] for a total of 32 bytes
*/
- ih->fsz = 24;
+ ih2->fsz = 24;
+
+ ih2->tagtype = ORDERED_TAG;
+ ih2->grp = DEFAULT_POW_GRP;
- ih->tagtype = ORDERED_TAG;
- ih->grp = DEFAULT_POW_GRP;
+ port = (int)oct->instr_queue[setup->s.iq_no]->txpciq.s.port;
if (tag)
- ih->tag = tag;
+ ih2->tag = tag;
else
- ih->tag = LIO_DATA(setup->s.ifidx);
+ ih2->tag = LIO_DATA(port);
- ih->raw = 1;
- ih->qos = (setup->s.ifidx & 3) + 4; /* map qos based on interface */
+ ih2->raw = 1;
+ ih2->qos = (port & 3) + 4; /* map qos based on interface */
if (!setup->s.gather) {
- ih->dlengsz = setup->s.u.datasize;
+ ih2->dlengsz = setup->s.u.datasize;
} else {
- ih->gather = 1;
- ih->dlengsz = setup->s.u.gatherptrs;
+ ih2->gather = 1;
+ ih2->dlengsz = setup->s.u.gatherptrs;
}
- irh = (struct octeon_instr_irh *)&cmd->irh;
+ irh = (struct octeon_instr_irh *)&cmd->cmd2.irh;
irh->opcode = OPCODE_NIC;
irh->subcode = OPCODE_NIC_NW_DATA;
packet_params.u32 = 0;
- if (setup->s.cksum_offset) {
- packet_params.s.csoffset = setup->s.cksum_offset;
- packet_params.s.ipv4opts_ipv6exthdr =
- setup->s.ipv4opts_ipv6exthdr;
+ packet_params.s.ip_csum = setup->s.ip_csum;
+ packet_params.s.transport_csum = setup->s.transport_csum;
+ packet_params.s.tnl_csum = setup->s.tnl_csum;
+ packet_params.s.tsflag = setup->s.timestamp;
+
+ irh->ossp = packet_params.u32;
+}
+
+static inline void
+octnet_prepare_pci_cmd_o3(struct octeon_device *oct,
+ union octeon_instr_64B *cmd,
+ union octnic_cmd_setup *setup, u32 tag)
+{
+ struct octeon_instr_irh *irh;
+ struct octeon_instr_ih3 *ih3;
+ struct octeon_instr_pki_ih3 *pki_ih3;
+ union octnic_packet_params packet_params;
+ int port;
+
+ memset(cmd, 0, sizeof(union octeon_instr_64B));
+
+ ih3 = (struct octeon_instr_ih3 *)&cmd->cmd3.ih3;
+ pki_ih3 = (struct octeon_instr_pki_ih3 *)&cmd->cmd3.pki_ih3;
+
+ /* assume that rflag is cleared so therefore front data will only have
+ * irh and ossp[1] and ossp[2] for a total of 24 bytes
+ */
+ ih3->pkind = oct->instr_queue[setup->s.iq_no]->txpciq.s.pkind;
+ /*PKI IH*/
+ ih3->fsz = 24 + 8;
+
+ if (!setup->s.gather) {
+ ih3->dlengsz = setup->s.u.datasize;
+ } else {
+ ih3->gather = 1;
+ ih3->dlengsz = setup->s.u.gatherptrs;
}
+ pki_ih3->w = 1;
+ pki_ih3->raw = 1;
+ pki_ih3->utag = 1;
+ pki_ih3->utt = 1;
+ pki_ih3->uqpg = oct->instr_queue[setup->s.iq_no]->txpciq.s.use_qpg;
+
+ port = (int)oct->instr_queue[setup->s.iq_no]->txpciq.s.port;
+
+ if (tag)
+ pki_ih3->tag = tag;
+ else
+ pki_ih3->tag = LIO_DATA(port);
+
+ pki_ih3->tagtype = ORDERED_TAG;
+ pki_ih3->qpg = oct->instr_queue[setup->s.iq_no]->txpciq.s.qpg;
+ pki_ih3->pm = 0x7; /*0x7 - meant for Parse nothing, uninterpreted*/
+ pki_ih3->sl = 8; /* sl will be sizeof(pki_ih3)*/
+
+ irh = (struct octeon_instr_irh *)&cmd->cmd3.irh;
+
+ irh->opcode = OPCODE_NIC;
+ irh->subcode = OPCODE_NIC_NW_DATA;
+
+ packet_params.u32 = 0;
+
packet_params.s.ip_csum = setup->s.ip_csum;
+ packet_params.s.transport_csum = setup->s.transport_csum;
packet_params.s.tnl_csum = setup->s.tnl_csum;
- packet_params.s.ifidx = setup->s.ifidx;
packet_params.s.tsflag = setup->s.timestamp;
irh->ossp = packet_params.u32;
}
+/** Utility function to prepare a 64B NIC instruction based on a setup command
+ * @param cmd - pointer to instruction to be filled in.
+ * @param setup - pointer to the setup structure
+ * @param q_no - which queue for back pressure
+ *
+ * Assumes the cmd instruction is pre-allocated, but no fields are filled in.
+ */
+static inline void
+octnet_prepare_pci_cmd(struct octeon_device *oct, union octeon_instr_64B *cmd,
+ union octnic_cmd_setup *setup, u32 tag)
+{
+ if (OCTEON_CN6XXX(oct))
+ octnet_prepare_pci_cmd_o2(oct, cmd, setup, tag);
+ else
+ octnet_prepare_pci_cmd_o3(oct, cmd, setup, tag);
+}
+
/** Allocate and a soft command with space for a response immediately following
* the commnad.
* @param oct - octeon device pointer
@@ -198,8 +267,8 @@ octnet_prepare_pci_cmd(struct octeon_instr_64B *cmd,
*/
void *
octeon_alloc_soft_command_resp(struct octeon_device *oct,
- struct octeon_instr_64B *cmd,
- size_t rdatasize);
+ union octeon_instr_64B *cmd,
+ u32 rdatasize);
/** Send a NIC data packet to the device
* @param oct - octeon device pointer
@@ -214,14 +283,11 @@ int octnet_send_nic_data_pkt(struct octeon_device *oct,
/** Send a NIC control packet to the device
* @param oct - octeon device pointer
* @param nctrl - control structure with command, timout, and callback info
- * @param nparams - response control structure
- *
* @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the
* queue should be stopped, and IQ_SEND_OK if it sent okay.
*/
int
octnet_send_nic_ctrl_pkt(struct octeon_device *oct,
- struct octnic_ctrl_pkt *nctrl,
- struct octnic_ctrl_params nparams);
+ struct octnic_ctrl_pkt *nctrl);
#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index a2a24652c..d32492f18 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -19,28 +19,17 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
-#include <linux/version.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
#include <linux/pci.h>
-#include <linux/kthread.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
-#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
-#include "octeon_nic.h"
#include "octeon_main.h"
#include "octeon_network.h"
-#include "cn66xx_regs.h"
#include "cn66xx_device.h"
-#include "cn68xx_regs.h"
-#include "cn68xx_device.h"
-#include "liquidio_image.h"
#define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \
(octeon_dev_ptr->instr_queue[iq_no]->stats.field += count)
@@ -51,7 +40,7 @@ struct iq_post_status {
};
static void check_db_timeout(struct work_struct *work);
-static void __check_db_timeout(struct octeon_device *oct, unsigned long iq_no);
+static void __check_db_timeout(struct octeon_device *oct, u64 iq_no);
static void (*reqtype_free_fn[MAX_OCTEON_DEVICES][REQTYPE_LAST + 1]) (void *);
@@ -69,12 +58,16 @@ static inline int IQ_INSTR_MODE_64B(struct octeon_device *oct, int iq_no)
/* Return 0 on success, 1 on failure */
int octeon_init_instr_queue(struct octeon_device *oct,
- u32 iq_no, u32 num_descs)
+ union oct_txpciq txpciq,
+ u32 num_descs)
{
struct octeon_instr_queue *iq;
struct octeon_iq_config *conf = NULL;
+ u32 iq_no = (u32)txpciq.s.q_no;
u32 q_size;
struct cavium_wq *db_wq;
+ int orig_node = dev_to_node(&oct->pci_dev->dev);
+ int numa_node = cpu_to_node(iq_no % num_online_cpus());
if (OCTEON_CN6XXX(oct))
conf = &(CFG_GET_IQ_CFG(CHIP_FIELD(oct, cn6xxx, conf)));
@@ -95,9 +88,15 @@ int octeon_init_instr_queue(struct octeon_device *oct,
q_size = (u32)conf->instr_type * num_descs;
iq = oct->instr_queue[iq_no];
+ iq->oct_dev = oct;
+ set_dev_node(&oct->pci_dev->dev, numa_node);
iq->base_addr = lio_dma_alloc(oct, q_size,
(dma_addr_t *)&iq->base_addr_dma);
+ set_dev_node(&oct->pci_dev->dev, orig_node);
+ if (!iq->base_addr)
+ iq->base_addr = lio_dma_alloc(oct, q_size,
+ (dma_addr_t *)&iq->base_addr_dma);
if (!iq->base_addr) {
dev_err(&oct->pci_dev->dev, "Cannot allocate memory for instr queue %d\n",
iq_no);
@@ -109,7 +108,11 @@ int octeon_init_instr_queue(struct octeon_device *oct,
/* Initialize a list to holds requests that have been posted to Octeon
* but has yet to be fetched by octeon
*/
- iq->request_list = vmalloc(sizeof(*iq->request_list) * num_descs);
+ iq->request_list = vmalloc_node((sizeof(*iq->request_list) * num_descs),
+ numa_node);
+ if (!iq->request_list)
+ iq->request_list = vmalloc(sizeof(*iq->request_list) *
+ num_descs);
if (!iq->request_list) {
lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
dev_err(&oct->pci_dev->dev, "Alloc failed for IQ[%d] nr free list\n",
@@ -122,7 +125,7 @@ int octeon_init_instr_queue(struct octeon_device *oct,
dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %llx count: %d\n",
iq_no, iq->base_addr, iq->base_addr_dma, iq->max_count);
- iq->iq_no = iq_no;
+ iq->txpciq.u64 = txpciq.u64;
iq->fill_threshold = (u32)conf->db_min;
iq->fill_cnt = 0;
iq->host_write_index = 0;
@@ -135,8 +138,11 @@ int octeon_init_instr_queue(struct octeon_device *oct,
/* Initialize the spinlock for this instruction queue */
spin_lock_init(&iq->lock);
+ spin_lock_init(&iq->post_lock);
- oct->io_qmask.iq |= (1 << iq_no);
+ spin_lock_init(&iq->iq_flush_running_lock);
+
+ oct->io_qmask.iq |= (1ULL << iq_no);
/* Set the 32B/64B mode for each input queue */
oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
@@ -144,7 +150,9 @@ int octeon_init_instr_queue(struct octeon_device *oct,
oct->fn_list.setup_iq_regs(oct, iq_no);
- oct->check_db_wq[iq_no].wq = create_workqueue("check_iq_db");
+ oct->check_db_wq[iq_no].wq = alloc_workqueue("check_iq_db",
+ WQ_MEM_RECLAIM,
+ 0);
if (!oct->check_db_wq[iq_no].wq) {
lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
dev_err(&oct->pci_dev->dev, "check db wq create failed for iq %d\n",
@@ -168,7 +176,6 @@ int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no)
struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
cancel_delayed_work_sync(&oct->check_db_wq[iq_no].wk.work);
- flush_workqueue(oct->check_db_wq[iq_no].wq);
destroy_workqueue(oct->check_db_wq[iq_no].wq);
if (OCTEON_CN6XXX(oct))
@@ -188,26 +195,38 @@ int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no)
/* Return 0 on success, 1 on failure */
int octeon_setup_iq(struct octeon_device *oct,
- u32 iq_no,
+ int ifidx,
+ int q_index,
+ union oct_txpciq txpciq,
u32 num_descs,
void *app_ctx)
{
+ u32 iq_no = (u32)txpciq.s.q_no;
+ int numa_node = cpu_to_node(iq_no % num_online_cpus());
+
if (oct->instr_queue[iq_no]) {
dev_dbg(&oct->pci_dev->dev, "IQ is in use. Cannot create the IQ: %d again\n",
iq_no);
+ oct->instr_queue[iq_no]->txpciq.u64 = txpciq.u64;
oct->instr_queue[iq_no]->app_ctx = app_ctx;
return 0;
}
oct->instr_queue[iq_no] =
- vmalloc(sizeof(struct octeon_instr_queue));
+ vmalloc_node(sizeof(struct octeon_instr_queue), numa_node);
+ if (!oct->instr_queue[iq_no])
+ oct->instr_queue[iq_no] =
+ vmalloc(sizeof(struct octeon_instr_queue));
if (!oct->instr_queue[iq_no])
return 1;
memset(oct->instr_queue[iq_no], 0,
sizeof(struct octeon_instr_queue));
+ oct->instr_queue[iq_no]->q_index = q_index;
oct->instr_queue[iq_no]->app_ctx = app_ctx;
- if (octeon_init_instr_queue(oct, iq_no, num_descs)) {
+ oct->instr_queue[iq_no]->ifidx = ifidx;
+
+ if (octeon_init_instr_queue(oct, txpciq, num_descs)) {
vfree(oct->instr_queue[iq_no]);
oct->instr_queue[iq_no] = NULL;
return 1;
@@ -226,8 +245,8 @@ int lio_wait_for_instr_fetch(struct octeon_device *oct)
instr_cnt = 0;
/*for (i = 0; i < oct->num_iqs; i++) {*/
- for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
- if (!(oct->io_qmask.iq & (1UL << i)))
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
+ if (!(oct->io_qmask.iq & (1ULL << i)))
continue;
pending =
atomic_read(&oct->
@@ -271,40 +290,8 @@ static inline void __copy_cmd_into_iq(struct octeon_instr_queue *iq,
memcpy(iqptr, cmd, cmdsize);
}
-static inline int
-__post_command(struct octeon_device *octeon_dev __attribute__((unused)),
- struct octeon_instr_queue *iq,
- u32 force_db __attribute__((unused)), u8 *cmd)
-{
- u32 index = -1;
-
- /* This ensures that the read index does not wrap around to the same
- * position if queue gets full before Octeon could fetch any instr.
- */
- if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1))
- return -1;
-
- __copy_cmd_into_iq(iq, cmd);
-
- /* "index" is returned, host_write_index is modified. */
- index = iq->host_write_index;
- INCR_INDEX_BY1(iq->host_write_index, iq->max_count);
- iq->fill_cnt++;
-
- /* Flush the command into memory. We need to be sure the data is in
- * memory before indicating that the instruction is pending.
- */
- wmb();
-
- atomic_inc(&iq->instr_pending);
-
- return index;
-}
-
static inline struct iq_post_status
-__post_command2(struct octeon_device *octeon_dev __attribute__((unused)),
- struct octeon_instr_queue *iq,
- u32 force_db __attribute__((unused)), u8 *cmd)
+__post_command2(struct octeon_instr_queue *iq, u8 *cmd)
{
struct iq_post_status st;
@@ -362,17 +349,19 @@ __add_to_request_list(struct octeon_instr_queue *iq,
iq->request_list[idx].reqtype = reqtype;
}
+/* Can only run in process context */
int
lio_process_iq_request_list(struct octeon_device *oct,
- struct octeon_instr_queue *iq)
+ struct octeon_instr_queue *iq, u32 napi_budget)
{
int reqtype;
void *buf;
u32 old = iq->flush_index;
u32 inst_count = 0;
- unsigned pkts_compl = 0, bytes_compl = 0;
+ unsigned int pkts_compl = 0, bytes_compl = 0;
struct octeon_soft_command *sc;
struct octeon_instr_irh *irh;
+ unsigned long flags;
while (old != iq->octeon_read_index) {
reqtype = iq->request_list[old].reqtype;
@@ -394,7 +383,7 @@ lio_process_iq_request_list(struct octeon_device *oct,
case REQTYPE_SOFT_COMMAND:
sc = buf;
- irh = (struct octeon_instr_irh *)&sc->cmd.irh;
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
if (irh->rflag) {
/* We're expecting a response from Octeon.
* It's up to lio_process_ordered_list() to
@@ -402,17 +391,22 @@ lio_process_iq_request_list(struct octeon_device *oct,
* command response list because we expect
* a response from Octeon.
*/
- spin_lock_bh(&oct->response_list
- [OCTEON_ORDERED_SC_LIST].lock);
+ spin_lock_irqsave
+ (&oct->response_list
+ [OCTEON_ORDERED_SC_LIST].lock,
+ flags);
atomic_inc(&oct->response_list
[OCTEON_ORDERED_SC_LIST].
pending_req_count);
list_add_tail(&sc->node, &oct->response_list
[OCTEON_ORDERED_SC_LIST].head);
- spin_unlock_bh(&oct->response_list
- [OCTEON_ORDERED_SC_LIST].lock);
+ spin_unlock_irqrestore
+ (&oct->response_list
+ [OCTEON_ORDERED_SC_LIST].lock,
+ flags);
} else {
if (sc->callback) {
+ /* This callback must not sleep */
sc->callback(oct, OCTEON_REQUEST_DONE,
sc->callback_arg);
}
@@ -430,6 +424,9 @@ lio_process_iq_request_list(struct octeon_device *oct,
skip_this:
inst_count++;
INCR_INDEX_BY1(old, iq->max_count);
+
+ if ((napi_budget) && (inst_count >= napi_budget))
+ break;
}
if (bytes_compl)
octeon_report_tx_completion_to_bql(iq->app_ctx, pkts_compl,
@@ -439,38 +436,63 @@ lio_process_iq_request_list(struct octeon_device *oct,
return inst_count;
}
-static inline void
-update_iq_indices(struct octeon_device *oct, struct octeon_instr_queue *iq)
+/* Can only be called from process context */
+int
+octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
+ u32 pending_thresh, u32 napi_budget)
{
u32 inst_processed = 0;
+ u32 tot_inst_processed = 0;
+ int tx_done = 1;
- /* Calculate how many commands Octeon has read and move the read index
- * accordingly.
- */
- iq->octeon_read_index = oct->fn_list.update_iq_read_idx(oct, iq);
+ if (!spin_trylock(&iq->iq_flush_running_lock))
+ return tx_done;
- /* Move the NORESPONSE requests to the per-device completion list. */
- if (iq->flush_index != iq->octeon_read_index)
- inst_processed = lio_process_iq_request_list(oct, iq);
+ spin_lock_bh(&iq->lock);
- if (inst_processed) {
- atomic_sub(inst_processed, &iq->instr_pending);
- iq->stats.instr_processed += inst_processed;
- }
-}
+ iq->octeon_read_index = oct->fn_list.update_iq_read_idx(iq);
-static void
-octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
- u32 pending_thresh)
-{
if (atomic_read(&iq->instr_pending) >= (s32)pending_thresh) {
- spin_lock_bh(&iq->lock);
- update_iq_indices(oct, iq);
- spin_unlock_bh(&iq->lock);
+ do {
+ /* Process any outstanding IQ packets. */
+ if (iq->flush_index == iq->octeon_read_index)
+ break;
+
+ if (napi_budget)
+ inst_processed = lio_process_iq_request_list
+ (oct, iq,
+ napi_budget - tot_inst_processed);
+ else
+ inst_processed =
+ lio_process_iq_request_list(oct, iq, 0);
+
+ if (inst_processed) {
+ atomic_sub(inst_processed, &iq->instr_pending);
+ iq->stats.instr_processed += inst_processed;
+ }
+
+ tot_inst_processed += inst_processed;
+ inst_processed = 0;
+
+ } while (tot_inst_processed < napi_budget);
+
+ if (napi_budget && (tot_inst_processed >= napi_budget))
+ tx_done = 0;
}
+
+ iq->last_db_time = jiffies;
+
+ spin_unlock_bh(&iq->lock);
+
+ spin_unlock(&iq->iq_flush_running_lock);
+
+ return tx_done;
}
-static void __check_db_timeout(struct octeon_device *oct, unsigned long iq_no)
+/* Process instruction queue after timeout.
+ * This routine gets called from a workqueue or when removing the module.
+ */
+static void __check_db_timeout(struct octeon_device *oct, u64 iq_no)
{
struct octeon_instr_queue *iq;
u64 next_time;
@@ -481,24 +503,17 @@ static void __check_db_timeout(struct octeon_device *oct, unsigned long iq_no)
if (!iq)
return;
+ /* return immediately, if no work pending */
+ if (!atomic_read(&iq->instr_pending))
+ return;
/* If jiffies - last_db_time < db_timeout do nothing */
next_time = iq->last_db_time + iq->db_timeout;
if (!time_after(jiffies, (unsigned long)next_time))
return;
iq->last_db_time = jiffies;
- /* Get the lock and prevent tasklets. This routine gets called from
- * the poll thread. Instructions can now be posted in tasklet context
- */
- spin_lock_bh(&iq->lock);
- if (iq->fill_cnt != 0)
- ring_doorbell(oct, iq);
-
- spin_unlock_bh(&iq->lock);
-
/* Flush the instruction queue */
- if (iq->do_auto_flush)
- octeon_flush_iq(oct, iq, 1);
+ octeon_flush_iq(oct, iq, 1, 0);
}
/* Called by the Poll thread at regular intervals to check the instruction
@@ -508,11 +523,12 @@ static void check_db_timeout(struct work_struct *work)
{
struct cavium_wk *wk = (struct cavium_wk *)work;
struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
- unsigned long iq_no = wk->ctxul;
+ u64 iq_no = wk->ctxul;
struct cavium_wq *db_wq = &oct->check_db_wq[iq_no];
+ u32 delay = 10;
__check_db_timeout(oct, iq_no);
- queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1));
+ queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(delay));
}
int
@@ -523,9 +539,12 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no,
struct iq_post_status st;
struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
- spin_lock_bh(&iq->lock);
+ /* Get the lock and prevent other tasks and tx interrupt handler from
+ * running.
+ */
+ spin_lock_bh(&iq->post_lock);
- st = __post_command2(oct, iq, force_db, cmd);
+ st = __post_command2(iq, cmd);
if (st.status != IQ_SEND_FAILED) {
octeon_report_sent_bytes_to_bql(buf, reqtype);
@@ -533,16 +552,19 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no,
INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize);
INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1);
- if (iq->fill_cnt >= iq->fill_threshold || force_db)
+ if (force_db)
ring_doorbell(oct, iq);
} else {
INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1);
}
- spin_unlock_bh(&iq->lock);
+ spin_unlock_bh(&iq->post_lock);
- if (iq->do_auto_flush)
- octeon_flush_iq(oct, iq, 2);
+ /* This is only done here to expedite packets being flushed
+ * for cases where there are no IQ completion interrupts.
+ */
+ /*if (iq->do_auto_flush)*/
+ /* octeon_flush_iq(oct, iq, 2, 0);*/
return st.status;
}
@@ -557,82 +579,75 @@ octeon_prepare_soft_command(struct octeon_device *oct,
u64 ossp1)
{
struct octeon_config *oct_cfg;
- struct octeon_instr_ih *ih;
+ struct octeon_instr_ih2 *ih2;
struct octeon_instr_irh *irh;
struct octeon_instr_rdp *rdp;
- BUG_ON(opcode > 15);
- BUG_ON(subcode > 127);
+ WARN_ON(opcode > 15);
+ WARN_ON(subcode > 127);
oct_cfg = octeon_get_conf(oct);
- ih = (struct octeon_instr_ih *)&sc->cmd.ih;
- ih->tagtype = ATOMIC_TAG;
- ih->tag = LIO_CONTROL;
- ih->raw = 1;
- ih->grp = CFG_GET_CTRL_Q_GRP(oct_cfg);
+ ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
+ ih2->tagtype = ATOMIC_TAG;
+ ih2->tag = LIO_CONTROL;
+ ih2->raw = 1;
+ ih2->grp = CFG_GET_CTRL_Q_GRP(oct_cfg);
if (sc->datasize) {
- ih->dlengsz = sc->datasize;
- ih->rs = 1;
+ ih2->dlengsz = sc->datasize;
+ ih2->rs = 1;
}
- irh = (struct octeon_instr_irh *)&sc->cmd.irh;
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
irh->opcode = opcode;
irh->subcode = subcode;
/* opcode/subcode specific parameters (ossp) */
irh->ossp = irh_ossp;
- sc->cmd.ossp[0] = ossp0;
- sc->cmd.ossp[1] = ossp1;
+ sc->cmd.cmd2.ossp[0] = ossp0;
+ sc->cmd.cmd2.ossp[1] = ossp1;
if (sc->rdatasize) {
- rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
rdp->pcie_port = oct->pcie_port;
rdp->rlen = sc->rdatasize;
irh->rflag = 1;
- irh->len = 4;
- ih->fsz = 40; /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */
+ ih2->fsz = 40; /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */
} else {
irh->rflag = 0;
- irh->len = 2;
- ih->fsz = 24; /* irh + ossp[0] + ossp[1] = 24 bytes */
+ ih2->fsz = 24; /* irh + ossp[0] + ossp[1] = 24 bytes */
}
-
- while (!(oct->io_qmask.iq & (1 << sc->iq_no)))
- sc->iq_no++;
}
int octeon_send_soft_command(struct octeon_device *oct,
struct octeon_soft_command *sc)
{
- struct octeon_instr_ih *ih;
+ struct octeon_instr_ih2 *ih2;
struct octeon_instr_irh *irh;
- struct octeon_instr_rdp *rdp;
+ u32 len;
- ih = (struct octeon_instr_ih *)&sc->cmd.ih;
- if (ih->dlengsz) {
- BUG_ON(!sc->dmadptr);
- sc->cmd.dptr = sc->dmadptr;
+ ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
+ if (ih2->dlengsz) {
+ WARN_ON(!sc->dmadptr);
+ sc->cmd.cmd2.dptr = sc->dmadptr;
}
-
- irh = (struct octeon_instr_irh *)&sc->cmd.irh;
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
if (irh->rflag) {
- BUG_ON(!sc->dmarptr);
- BUG_ON(!sc->status_word);
+ WARN_ON(!sc->dmarptr);
+ WARN_ON(!sc->status_word);
*sc->status_word = COMPLETION_WORD_INIT;
- rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
-
- sc->cmd.rptr = sc->dmarptr;
+ sc->cmd.cmd2.rptr = sc->dmarptr;
}
+ len = (u32)ih2->dlengsz;
if (sc->wait_time)
sc->timeout = jiffies + sc->wait_time;
- return octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc,
- (u32)ih->dlengsz, REQTYPE_SOFT_COMMAND);
+ return (octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc,
+ len, REQTYPE_SOFT_COMMAND));
}
int octeon_setup_sc_buffer_pool(struct octeon_device *oct)
@@ -667,7 +682,7 @@ int octeon_free_sc_buffer_pool(struct octeon_device *oct)
struct list_head *tmp, *tmp2;
struct octeon_soft_command *sc;
- spin_lock(&oct->sc_buf_pool.lock);
+ spin_lock_bh(&oct->sc_buf_pool.lock);
list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) {
list_del(tmp);
@@ -679,7 +694,7 @@ int octeon_free_sc_buffer_pool(struct octeon_device *oct)
INIT_LIST_HEAD(&oct->sc_buf_pool.head);
- spin_unlock(&oct->sc_buf_pool.lock);
+ spin_unlock_bh(&oct->sc_buf_pool.lock);
return 0;
}
@@ -695,13 +710,13 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
struct octeon_soft_command *sc = NULL;
struct list_head *tmp;
- BUG_ON((offset + datasize + rdatasize + ctxsize) >
+ WARN_ON((offset + datasize + rdatasize + ctxsize) >
SOFT_COMMAND_BUFFER_SIZE);
- spin_lock(&oct->sc_buf_pool.lock);
+ spin_lock_bh(&oct->sc_buf_pool.lock);
if (list_empty(&oct->sc_buf_pool.head)) {
- spin_unlock(&oct->sc_buf_pool.lock);
+ spin_unlock_bh(&oct->sc_buf_pool.lock);
return NULL;
}
@@ -712,7 +727,7 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
atomic_inc(&oct->sc_buf_pool.alloc_buf_count);
- spin_unlock(&oct->sc_buf_pool.lock);
+ spin_unlock_bh(&oct->sc_buf_pool.lock);
sc = (struct octeon_soft_command *)tmp;
@@ -742,7 +757,7 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
offset = (offset + datasize + 127) & 0xffffff80;
if (rdatasize) {
- BUG_ON(rdatasize < 16);
+ WARN_ON(rdatasize < 16);
sc->virtrptr = (u8 *)sc + offset;
sc->dmarptr = dma_addr + offset;
sc->rdatasize = rdatasize;
@@ -755,11 +770,11 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
void octeon_free_soft_command(struct octeon_device *oct,
struct octeon_soft_command *sc)
{
- spin_lock(&oct->sc_buf_pool.lock);
+ spin_lock_bh(&oct->sc_buf_pool.lock);
list_add_tail(&sc->node, &oct->sc_buf_pool.head);
atomic_dec(&oct->sc_buf_pool.alloc_buf_count);
- spin_unlock(&oct->sc_buf_pool.lock);
+ spin_unlock_bh(&oct->sc_buf_pool.lock);
}
diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.c b/drivers/net/ethernet/cavium/liquidio/response_manager.c
index 091f537a9..709049e36 100644
--- a/drivers/net/ethernet/cavium/liquidio/response_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/response_manager.c
@@ -19,28 +19,14 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
-#include <linux/version.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
-#include <linux/dma-mapping.h>
#include <linux/pci.h>
-#include <linux/kthread.h>
#include <linux/netdevice.h>
-#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
-#include "octeon_nic.h"
#include "octeon_main.h"
-#include "octeon_network.h"
-#include "cn66xx_regs.h"
-#include "cn66xx_device.h"
-#include "cn68xx_regs.h"
-#include "cn68xx_device.h"
-#include "liquidio_image.h"
static void oct_poll_req_completion(struct work_struct *work);
@@ -54,8 +40,9 @@ int octeon_setup_response_list(struct octeon_device *oct)
spin_lock_init(&oct->response_list[i].lock);
atomic_set(&oct->response_list[i].pending_req_count, 0);
}
+ spin_lock_init(&oct->cmd_resp_wqlock);
- oct->dma_comp_wq.wq = create_workqueue("dma-comp");
+ oct->dma_comp_wq.wq = alloc_workqueue("dma-comp", WQ_MEM_RECLAIM, 0);
if (!oct->dma_comp_wq.wq) {
dev_err(&oct->pci_dev->dev, "failed to create wq thread\n");
return -ENOMEM;
@@ -64,7 +51,8 @@ int octeon_setup_response_list(struct octeon_device *oct)
cwq = &oct->dma_comp_wq;
INIT_DELAYED_WORK(&cwq->wk.work, oct_poll_req_completion);
cwq->wk.ctxptr = oct;
- queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(100));
+ oct->cmd_resp_state = OCT_DRV_ONLINE;
+ queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(50));
return ret;
}
@@ -72,7 +60,6 @@ int octeon_setup_response_list(struct octeon_device *oct)
void octeon_delete_response_list(struct octeon_device *oct)
{
cancel_delayed_work_sync(&oct->dma_comp_wq.wk.work);
- flush_workqueue(oct->dma_comp_wq.wq);
destroy_workqueue(oct->dma_comp_wq.wq);
}
@@ -86,6 +73,7 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev,
u32 status;
u64 status64;
struct octeon_instr_rdp *rdp;
+ u64 rptr;
ordered_sc_list = &octeon_dev->response_list[OCTEON_ORDERED_SC_LIST];
@@ -103,7 +91,8 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev,
sc = (struct octeon_soft_command *)ordered_sc_list->
head.next;
- rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
+ rptr = sc->cmd.cmd2.rptr;
status = OCTEON_REQUEST_PENDING;
@@ -111,7 +100,7 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev,
* to where rptr is pointing to
*/
dma_sync_single_for_cpu(&octeon_dev->pci_dev->dev,
- sc->cmd.rptr, rdp->rlen,
+ rptr, rdp->rlen,
DMA_FROM_DEVICE);
status64 = *sc->status_word;
@@ -173,6 +162,5 @@ static void oct_poll_req_completion(struct work_struct *work)
struct cavium_wq *cwq = &oct->dma_comp_wq;
lio_process_ordered_list(oct, 0);
-
- queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(100));
+ queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(50));
}
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 388cd799d..4ab404f45 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -146,7 +146,6 @@ struct octeon_mgmt {
struct device *dev;
struct napi_struct napi;
struct tasklet_struct tx_clean_tasklet;
- struct phy_device *phydev;
struct device_node *phy_np;
resource_size_t mix_phys;
resource_size_t mix_size;
@@ -787,14 +786,12 @@ static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev,
static int octeon_mgmt_ioctl(struct net_device *netdev,
struct ifreq *rq, int cmd)
{
- struct octeon_mgmt *p = netdev_priv(netdev);
-
switch (cmd) {
case SIOCSHWTSTAMP:
return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd);
default:
- if (p->phydev)
- return phy_mii_ioctl(p->phydev, rq, cmd);
+ if (netdev->phydev)
+ return phy_mii_ioctl(netdev->phydev, rq, cmd);
return -EINVAL;
}
}
@@ -836,16 +833,18 @@ static void octeon_mgmt_enable_link(struct octeon_mgmt *p)
static void octeon_mgmt_update_link(struct octeon_mgmt *p)
{
+ struct net_device *ndev = p->netdev;
+ struct phy_device *phydev = ndev->phydev;
union cvmx_agl_gmx_prtx_cfg prtx_cfg;
prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
- if (!p->phydev->link)
+ if (!phydev->link)
prtx_cfg.s.duplex = 1;
else
- prtx_cfg.s.duplex = p->phydev->duplex;
+ prtx_cfg.s.duplex = phydev->duplex;
- switch (p->phydev->speed) {
+ switch (phydev->speed) {
case 10:
prtx_cfg.s.speed = 0;
prtx_cfg.s.slottime = 0;
@@ -871,7 +870,7 @@ static void octeon_mgmt_update_link(struct octeon_mgmt *p)
prtx_cfg.s.speed_msb = 0;
/* Only matters for half-duplex */
prtx_cfg.s.slottime = 1;
- prtx_cfg.s.burst = p->phydev->duplex;
+ prtx_cfg.s.burst = phydev->duplex;
}
break;
case 0: /* No link */
@@ -894,9 +893,9 @@ static void octeon_mgmt_update_link(struct octeon_mgmt *p)
/* MII (both speeds) and RGMII 1000 speed. */
agl_clk.s.clk_cnt = 1;
if (prtx_ctl.s.mode == 0) { /* RGMII mode */
- if (p->phydev->speed == 10)
+ if (phydev->speed == 10)
agl_clk.s.clk_cnt = 50;
- else if (p->phydev->speed == 100)
+ else if (phydev->speed == 100)
agl_clk.s.clk_cnt = 5;
}
cvmx_write_csr(p->agl + AGL_GMX_TX_CLK, agl_clk.u64);
@@ -906,39 +905,40 @@ static void octeon_mgmt_update_link(struct octeon_mgmt *p)
static void octeon_mgmt_adjust_link(struct net_device *netdev)
{
struct octeon_mgmt *p = netdev_priv(netdev);
+ struct phy_device *phydev = netdev->phydev;
unsigned long flags;
int link_changed = 0;
- if (!p->phydev)
+ if (!phydev)
return;
spin_lock_irqsave(&p->lock, flags);
- if (!p->phydev->link && p->last_link)
+ if (!phydev->link && p->last_link)
link_changed = -1;
- if (p->phydev->link
- && (p->last_duplex != p->phydev->duplex
- || p->last_link != p->phydev->link
- || p->last_speed != p->phydev->speed)) {
+ if (phydev->link &&
+ (p->last_duplex != phydev->duplex ||
+ p->last_link != phydev->link ||
+ p->last_speed != phydev->speed)) {
octeon_mgmt_disable_link(p);
link_changed = 1;
octeon_mgmt_update_link(p);
octeon_mgmt_enable_link(p);
}
- p->last_link = p->phydev->link;
- p->last_speed = p->phydev->speed;
- p->last_duplex = p->phydev->duplex;
+ p->last_link = phydev->link;
+ p->last_speed = phydev->speed;
+ p->last_duplex = phydev->duplex;
spin_unlock_irqrestore(&p->lock, flags);
if (link_changed != 0) {
if (link_changed > 0) {
pr_info("%s: Link is up - %d/%s\n", netdev->name,
- p->phydev->speed,
- DUPLEX_FULL == p->phydev->duplex ?
+ phydev->speed,
+ phydev->duplex == DUPLEX_FULL ?
"Full" : "Half");
} else {
pr_info("%s: Link is down\n", netdev->name);
@@ -949,6 +949,7 @@ static void octeon_mgmt_adjust_link(struct net_device *netdev)
static int octeon_mgmt_init_phy(struct net_device *netdev)
{
struct octeon_mgmt *p = netdev_priv(netdev);
+ struct phy_device *phydev = NULL;
if (octeon_is_simulation() || p->phy_np == NULL) {
/* No PHYs in the simulator. */
@@ -956,11 +957,11 @@ static int octeon_mgmt_init_phy(struct net_device *netdev)
return 0;
}
- p->phydev = of_phy_connect(netdev, p->phy_np,
- octeon_mgmt_adjust_link, 0,
- PHY_INTERFACE_MODE_MII);
+ phydev = of_phy_connect(netdev, p->phy_np,
+ octeon_mgmt_adjust_link, 0,
+ PHY_INTERFACE_MODE_MII);
- if (!p->phydev)
+ if (!phydev)
return -ENODEV;
return 0;
@@ -1080,9 +1081,9 @@ static int octeon_mgmt_open(struct net_device *netdev)
}
/* Set the mode of the interface, RGMII/MII. */
- if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && p->phydev) {
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && netdev->phydev) {
union cvmx_agl_prtx_ctl agl_prtx_ctl;
- int rgmii_mode = (p->phydev->supported &
+ int rgmii_mode = (netdev->phydev->supported &
(SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) != 0;
agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
@@ -1205,7 +1206,7 @@ static int octeon_mgmt_open(struct net_device *netdev)
/* Configure the port duplex, speed and enables */
octeon_mgmt_disable_link(p);
- if (p->phydev)
+ if (netdev->phydev)
octeon_mgmt_update_link(p);
octeon_mgmt_enable_link(p);
@@ -1214,9 +1215,9 @@ static int octeon_mgmt_open(struct net_device *netdev)
/* PHY is not present in simulator. The carrier is enabled
* while initializing the phy for simulator, leave it enabled.
*/
- if (p->phydev) {
+ if (netdev->phydev) {
netif_carrier_off(netdev);
- phy_start_aneg(p->phydev);
+ phy_start_aneg(netdev->phydev);
}
netif_wake_queue(netdev);
@@ -1244,9 +1245,8 @@ static int octeon_mgmt_stop(struct net_device *netdev)
napi_disable(&p->napi);
netif_stop_queue(netdev);
- if (p->phydev)
- phy_disconnect(p->phydev);
- p->phydev = NULL;
+ if (netdev->phydev)
+ phy_disconnect(netdev->phydev);
netif_carrier_off(netdev);
@@ -1346,50 +1346,23 @@ static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
}
-static int octeon_mgmt_get_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
-{
- struct octeon_mgmt *p = netdev_priv(netdev);
-
- if (p->phydev)
- return phy_ethtool_gset(p->phydev, cmd);
-
- return -EOPNOTSUPP;
-}
-
-static int octeon_mgmt_set_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
-{
- struct octeon_mgmt *p = netdev_priv(netdev);
-
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- if (p->phydev)
- return phy_ethtool_sset(p->phydev, cmd);
-
- return -EOPNOTSUPP;
-}
-
static int octeon_mgmt_nway_reset(struct net_device *dev)
{
- struct octeon_mgmt *p = netdev_priv(dev);
-
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- if (p->phydev)
- return phy_start_aneg(p->phydev);
+ if (dev->phydev)
+ return phy_start_aneg(dev->phydev);
return -EOPNOTSUPP;
}
static const struct ethtool_ops octeon_mgmt_ethtool_ops = {
.get_drvinfo = octeon_mgmt_get_drvinfo,
- .get_settings = octeon_mgmt_get_settings,
- .set_settings = octeon_mgmt_set_settings,
.nway_reset = octeon_mgmt_nway_reset,
.get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static const struct net_device_ops octeon_mgmt_ops = {
@@ -1540,6 +1513,7 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
return 0;
err:
+ of_node_put(p->phy_np);
free_netdev(netdev);
return result;
}
@@ -1547,8 +1521,10 @@ err:
static int octeon_mgmt_remove(struct platform_device *pdev)
{
struct net_device *netdev = platform_get_drvdata(pdev);
+ struct octeon_mgmt *p = netdev_priv(netdev);
unregister_netdev(netdev);
+ of_node_put(p->phy_np);
free_netdev(netdev);
return 0;
}
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index 83025bb47..e29815d9e 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -279,6 +279,7 @@ struct nicvf {
u8 sqs_id;
bool sqs_mode;
bool hw_tso;
+ bool t88;
/* Receive buffer alloc */
u32 rb_page_offset;
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 16ed20357..85cc782b9 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -251,9 +251,14 @@ static void nic_set_tx_pkt_pad(struct nicpf *nic, int size)
int lmac;
u64 lmac_cfg;
- /* Max value that can be set is 60 */
- if (size > 60)
- size = 60;
+ /* There is a issue in HW where-in while sending GSO sized
+ * pkts as part of TSO, if pkt len falls below this size
+ * NIC will zero PAD packet and also updates IP total length.
+ * Hence set this value to lessthan min pkt size of MAC+IP+TCP
+ * headers, BGX will do the padding to transmit 64 byte pkt.
+ */
+ if (size > 52)
+ size = 52;
for (lmac = 0; lmac < (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX); lmac++) {
lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3));
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index a19e73f11..324034961 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -513,6 +513,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
struct nicvf *nic = netdev_priv(netdev);
struct snd_queue *sq;
struct sq_hdr_subdesc *hdr;
+ struct sq_hdr_subdesc *tso_sqe;
sq = &nic->qs->sq[cqe_tx->sq_idx];
@@ -527,17 +528,21 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
nicvf_check_cqe_tx_errs(nic, cq, cqe_tx);
skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
- /* For TSO offloaded packets only one SQE will have a valid SKB */
if (skb) {
+ /* Check for dummy descriptor used for HW TSO offload on 88xx */
+ if (hdr->dont_send) {
+ /* Get actual TSO descriptors and free them */
+ tso_sqe =
+ (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
+ nicvf_put_sq_desc(sq, tso_sqe->subdesc_cnt + 1);
+ }
nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
prefetch(skb);
dev_consume_skb_any(skb);
sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
} else {
- /* In case of HW TSO, HW sends a CQE for each segment of a TSO
- * packet instead of a single CQE for the whole TSO packet
- * transmitted. Each of this CQE points to the same SQE, so
- * avoid freeing same SQE multiple times.
+ /* In case of SW TSO on 88xx, only last segment will have
+ * a SKB attached, so just free SQEs here.
*/
if (!nic->hw_tso)
nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
@@ -1502,6 +1507,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct net_device *netdev;
struct nicvf *nic;
int err, qcount;
+ u16 sdevid;
err = pci_enable_device(pdev);
if (err) {
@@ -1575,6 +1581,10 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!pass1_silicon(nic->pdev))
nic->hw_tso = true;
+ pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
+ if (sdevid == 0xA134)
+ nic->t88 = true;
+
/* Check if this VF is in QS only mode */
if (nic->sqs_mode)
return 0;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 0ff8e60de..dda3ea3f3 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -938,6 +938,8 @@ static int nicvf_tso_count_subdescs(struct sk_buff *skb)
return num_edescs + sh->gso_segs;
}
+#define POST_CQE_DESC_COUNT 2
+
/* Get the number of SQ descriptors needed to xmit this skb */
static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
{
@@ -948,6 +950,10 @@ static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
return subdesc_cnt;
}
+ /* Dummy descriptors to get TSO pkt completion notification */
+ if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size)
+ subdesc_cnt += POST_CQE_DESC_COUNT;
+
if (skb_shinfo(skb)->nr_frags)
subdesc_cnt += skb_shinfo(skb)->nr_frags;
@@ -965,14 +971,21 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
struct sq_hdr_subdesc *hdr;
hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
- sq->skbuff[qentry] = (u64)skb;
-
memset(hdr, 0, SND_QUEUE_DESC_SIZE);
hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
- /* Enable notification via CQE after processing SQE */
- hdr->post_cqe = 1;
- /* No of subdescriptors following this */
- hdr->subdesc_cnt = subdesc_cnt;
+
+ if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) {
+ /* post_cqe = 0, to avoid HW posting a CQE for every TSO
+ * segment transmitted on 88xx.
+ */
+ hdr->subdesc_cnt = subdesc_cnt - POST_CQE_DESC_COUNT;
+ } else {
+ sq->skbuff[qentry] = (u64)skb;
+ /* Enable notification via CQE after processing SQE */
+ hdr->post_cqe = 1;
+ /* No of subdescriptors following this */
+ hdr->subdesc_cnt = subdesc_cnt;
+ }
hdr->tot_len = len;
/* Offload checksum calculation to HW */
@@ -1023,6 +1036,37 @@ static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
gather->addr = data;
}
+/* Add HDR + IMMEDIATE subdescriptors right after descriptors of a TSO
+ * packet so that a CQE is posted as a notifation for transmission of
+ * TSO packet.
+ */
+static inline void nicvf_sq_add_cqe_subdesc(struct snd_queue *sq, int qentry,
+ int tso_sqe, struct sk_buff *skb)
+{
+ struct sq_imm_subdesc *imm;
+ struct sq_hdr_subdesc *hdr;
+
+ sq->skbuff[qentry] = (u64)skb;
+
+ hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
+ memset(hdr, 0, SND_QUEUE_DESC_SIZE);
+ hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
+ /* Enable notification via CQE after processing SQE */
+ hdr->post_cqe = 1;
+ /* There is no packet to transmit here */
+ hdr->dont_send = 1;
+ hdr->subdesc_cnt = POST_CQE_DESC_COUNT - 1;
+ hdr->tot_len = 1;
+ /* Actual TSO header SQE index, needed for cleanup */
+ hdr->rsvd2 = tso_sqe;
+
+ qentry = nicvf_get_nxt_sqentry(sq, qentry);
+ imm = (struct sq_imm_subdesc *)GET_SQ_DESC(sq, qentry);
+ memset(imm, 0, SND_QUEUE_DESC_SIZE);
+ imm->subdesc_type = SQ_DESC_TYPE_IMMEDIATE;
+ imm->len = 1;
+}
+
/* Segment a TSO packet into 'gso_size' segments and append
* them to SQ for transfer
*/
@@ -1096,7 +1140,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
{
int i, size;
- int subdesc_cnt;
+ int subdesc_cnt, tso_sqe = 0;
int sq_num, qentry;
struct queue_set *qs;
struct snd_queue *sq;
@@ -1131,6 +1175,7 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
/* Add SQ header subdesc */
nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1,
skb, skb->len);
+ tso_sqe = qentry;
/* Add SQ gather subdescs */
qentry = nicvf_get_nxt_sqentry(sq, qentry);
@@ -1154,6 +1199,11 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
}
doorbell:
+ if (nic->t88 && skb_shinfo(skb)->gso_size) {
+ qentry = nicvf_get_nxt_sqentry(sq, qentry);
+ nicvf_sq_add_cqe_subdesc(sq, qentry, tso_sqe, skb);
+ }
+
/* make sure all memory stores are done before ringing doorbell */
smp_wmb();
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig
index 4686a85a8..5713e83be 100644
--- a/drivers/net/ethernet/chelsio/Kconfig
+++ b/drivers/net/ethernet/chelsio/Kconfig
@@ -96,17 +96,6 @@ config CHELSIO_T4_DCB
If unsure, say N.
-config CHELSIO_T4_UWIRE
- bool "Unified Wire Support for Chelsio T5 cards"
- default n
- depends on CHELSIO_T4
- ---help---
- Enable unified-wire offload features.
- Say Y here if you want to enable unified-wire over Ethernet
- in the driver.
-
- If unsure, say N.
-
config CHELSIO_T4_FCOE
bool "Fibre Channel over Ethernet (FCoE) Support for Chelsio T5 cards"
default n
@@ -137,4 +126,9 @@ config CHELSIO_T4VF
To compile this driver as a module choose M here; the module
will be called cxgb4vf.
+config CHELSIO_LIB
+ tristate
+ ---help---
+ Common library for Chelsio drivers.
+
endif # NET_VENDOR_CHELSIO
diff --git a/drivers/net/ethernet/chelsio/Makefile b/drivers/net/ethernet/chelsio/Makefile
index 390510b5e..b6a5eec6e 100644
--- a/drivers/net/ethernet/chelsio/Makefile
+++ b/drivers/net/ethernet/chelsio/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_CHELSIO_T1) += cxgb/
obj-$(CONFIG_CHELSIO_T3) += cxgb3/
obj-$(CONFIG_CHELSIO_T4) += cxgb4/
obj-$(CONFIG_CHELSIO_T4VF) += cxgb4vf/
+obj-$(CONFIG_CHELSIO_LIB) += libcxgb/
diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile
index 85c92821b..ace0ab98d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/Makefile
+++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile
@@ -7,5 +7,4 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o
cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o
cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o
-cxgb4-$(CONFIG_CHELSIO_T4_UWIRE) += cxgb4_ppm.o
cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index b4fceb924..edd23386b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -418,8 +418,9 @@ struct trace_params {
struct link_config {
unsigned short supported; /* link capabilities */
unsigned short advertising; /* advertised capabilities */
- unsigned short requested_speed; /* speed user has requested */
- unsigned short speed; /* actual link speed */
+ unsigned short lp_advertising; /* peer advertised capabilities */
+ unsigned int requested_speed; /* speed user has requested */
+ unsigned int speed; /* actual link speed */
unsigned char requested_fc; /* flow control user has requested */
unsigned char fc; /* actual link flow control */
unsigned char autoneg; /* autonegotiating? */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 7a0b92b2f..02f80febe 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -480,178 +480,293 @@ static int identify_port(struct net_device *dev,
return t4_identify_port(adap, adap->pf, netdev2pinfo(dev)->viid, val);
}
-static unsigned int from_fw_linkcaps(enum fw_port_type type, unsigned int caps)
+/**
+ * from_fw_port_mod_type - translate Firmware Port/Module type to Ethtool
+ * @port_type: Firmware Port Type
+ * @mod_type: Firmware Module Type
+ *
+ * Translate Firmware Port/Module type to Ethtool Port Type.
+ */
+static int from_fw_port_mod_type(enum fw_port_type port_type,
+ enum fw_port_module_type mod_type)
{
- unsigned int v = 0;
-
- if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
- type == FW_PORT_TYPE_BT_XAUI) {
- v |= SUPPORTED_TP;
- if (caps & FW_PORT_CAP_SPEED_100M)
- v |= SUPPORTED_100baseT_Full;
- if (caps & FW_PORT_CAP_SPEED_1G)
- v |= SUPPORTED_1000baseT_Full;
- if (caps & FW_PORT_CAP_SPEED_10G)
- v |= SUPPORTED_10000baseT_Full;
- } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
- v |= SUPPORTED_Backplane;
- if (caps & FW_PORT_CAP_SPEED_1G)
- v |= SUPPORTED_1000baseKX_Full;
- if (caps & FW_PORT_CAP_SPEED_10G)
- v |= SUPPORTED_10000baseKX4_Full;
- } else if (type == FW_PORT_TYPE_KR) {
- v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
- } else if (type == FW_PORT_TYPE_BP_AP) {
- v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
- SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
- } else if (type == FW_PORT_TYPE_BP4_AP) {
- v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
- SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
- SUPPORTED_10000baseKX4_Full;
- } else if (type == FW_PORT_TYPE_FIBER_XFI ||
- type == FW_PORT_TYPE_FIBER_XAUI ||
- type == FW_PORT_TYPE_SFP ||
- type == FW_PORT_TYPE_QSFP_10G ||
- type == FW_PORT_TYPE_QSA) {
- v |= SUPPORTED_FIBRE;
- if (caps & FW_PORT_CAP_SPEED_1G)
- v |= SUPPORTED_1000baseT_Full;
- if (caps & FW_PORT_CAP_SPEED_10G)
- v |= SUPPORTED_10000baseT_Full;
- } else if (type == FW_PORT_TYPE_BP40_BA ||
- type == FW_PORT_TYPE_QSFP) {
- v |= SUPPORTED_40000baseSR4_Full;
- v |= SUPPORTED_FIBRE;
+ if (port_type == FW_PORT_TYPE_BT_SGMII ||
+ port_type == FW_PORT_TYPE_BT_XFI ||
+ port_type == FW_PORT_TYPE_BT_XAUI) {
+ return PORT_TP;
+ } else if (port_type == FW_PORT_TYPE_FIBER_XFI ||
+ port_type == FW_PORT_TYPE_FIBER_XAUI) {
+ return PORT_FIBRE;
+ } else if (port_type == FW_PORT_TYPE_SFP ||
+ port_type == FW_PORT_TYPE_QSFP_10G ||
+ port_type == FW_PORT_TYPE_QSA ||
+ port_type == FW_PORT_TYPE_QSFP) {
+ if (mod_type == FW_PORT_MOD_TYPE_LR ||
+ mod_type == FW_PORT_MOD_TYPE_SR ||
+ mod_type == FW_PORT_MOD_TYPE_ER ||
+ mod_type == FW_PORT_MOD_TYPE_LRM)
+ return PORT_FIBRE;
+ else if (mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
+ mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
+ return PORT_DA;
+ else
+ return PORT_OTHER;
}
- if (caps & FW_PORT_CAP_ANEG)
- v |= SUPPORTED_Autoneg;
- return v;
+ return PORT_OTHER;
}
-static unsigned int to_fw_linkcaps(unsigned int caps)
+/**
+ * speed_to_fw_caps - translate Port Speed to Firmware Port Capabilities
+ * @speed: speed in Kb/s
+ *
+ * Translates a specific Port Speed into a Firmware Port Capabilities
+ * value.
+ */
+static unsigned int speed_to_fw_caps(int speed)
{
- unsigned int v = 0;
-
- if (caps & ADVERTISED_100baseT_Full)
- v |= FW_PORT_CAP_SPEED_100M;
- if (caps & ADVERTISED_1000baseT_Full)
- v |= FW_PORT_CAP_SPEED_1G;
- if (caps & ADVERTISED_10000baseT_Full)
- v |= FW_PORT_CAP_SPEED_10G;
- if (caps & ADVERTISED_40000baseSR4_Full)
- v |= FW_PORT_CAP_SPEED_40G;
- return v;
+ if (speed == 100)
+ return FW_PORT_CAP_SPEED_100M;
+ if (speed == 1000)
+ return FW_PORT_CAP_SPEED_1G;
+ if (speed == 10000)
+ return FW_PORT_CAP_SPEED_10G;
+ if (speed == 25000)
+ return FW_PORT_CAP_SPEED_25G;
+ if (speed == 40000)
+ return FW_PORT_CAP_SPEED_40G;
+ if (speed == 100000)
+ return FW_PORT_CAP_SPEED_100G;
+ return 0;
}
-static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+/**
+ * fw_caps_to_lmm - translate Firmware to ethtool Link Mode Mask
+ * @port_type: Firmware Port Type
+ * @fw_caps: Firmware Port Capabilities
+ * @link_mode_mask: ethtool Link Mode Mask
+ *
+ * Translate a Firmware Port Capabilities specification to an ethtool
+ * Link Mode Mask.
+ */
+static void fw_caps_to_lmm(enum fw_port_type port_type,
+ unsigned int fw_caps,
+ unsigned long *link_mode_mask)
{
- const struct port_info *p = netdev_priv(dev);
-
- if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
- p->port_type == FW_PORT_TYPE_BT_XFI ||
- p->port_type == FW_PORT_TYPE_BT_XAUI) {
- cmd->port = PORT_TP;
- } else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
- p->port_type == FW_PORT_TYPE_FIBER_XAUI) {
- cmd->port = PORT_FIBRE;
- } else if (p->port_type == FW_PORT_TYPE_SFP ||
- p->port_type == FW_PORT_TYPE_QSFP_10G ||
- p->port_type == FW_PORT_TYPE_QSA ||
- p->port_type == FW_PORT_TYPE_QSFP) {
- if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
- p->mod_type == FW_PORT_MOD_TYPE_SR ||
- p->mod_type == FW_PORT_MOD_TYPE_ER ||
- p->mod_type == FW_PORT_MOD_TYPE_LRM)
- cmd->port = PORT_FIBRE;
- else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
- p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
- cmd->port = PORT_DA;
- else
- cmd->port = PORT_OTHER;
+ #define SET_LMM(__lmm_name) __set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name \
+ ## _BIT, link_mode_mask)
+
+ #define FW_CAPS_TO_LMM(__fw_name, __lmm_name) \
+ do { \
+ if (fw_caps & FW_PORT_CAP_ ## __fw_name) \
+ SET_LMM(__lmm_name); \
+ } while (0)
+
+ switch (port_type) {
+ case FW_PORT_TYPE_BT_SGMII:
+ case FW_PORT_TYPE_BT_XFI:
+ case FW_PORT_TYPE_BT_XAUI:
+ SET_LMM(TP);
+ FW_CAPS_TO_LMM(SPEED_100M, 100baseT_Full);
+ FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
+ FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
+ break;
+
+ case FW_PORT_TYPE_KX4:
+ case FW_PORT_TYPE_KX:
+ SET_LMM(Backplane);
+ FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
+ FW_CAPS_TO_LMM(SPEED_10G, 10000baseKX4_Full);
+ break;
+
+ case FW_PORT_TYPE_KR:
+ SET_LMM(Backplane);
+ SET_LMM(10000baseKR_Full);
+ break;
+
+ case FW_PORT_TYPE_BP_AP:
+ SET_LMM(Backplane);
+ SET_LMM(10000baseR_FEC);
+ SET_LMM(10000baseKR_Full);
+ SET_LMM(1000baseKX_Full);
+ break;
+
+ case FW_PORT_TYPE_BP4_AP:
+ SET_LMM(Backplane);
+ SET_LMM(10000baseR_FEC);
+ SET_LMM(10000baseKR_Full);
+ SET_LMM(1000baseKX_Full);
+ SET_LMM(10000baseKX4_Full);
+ break;
+
+ case FW_PORT_TYPE_FIBER_XFI:
+ case FW_PORT_TYPE_FIBER_XAUI:
+ case FW_PORT_TYPE_SFP:
+ case FW_PORT_TYPE_QSFP_10G:
+ case FW_PORT_TYPE_QSA:
+ SET_LMM(FIBRE);
+ FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
+ FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
+ break;
+
+ case FW_PORT_TYPE_BP40_BA:
+ case FW_PORT_TYPE_QSFP:
+ SET_LMM(FIBRE);
+ SET_LMM(40000baseSR4_Full);
+ break;
+
+ case FW_PORT_TYPE_CR_QSFP:
+ case FW_PORT_TYPE_SFP28:
+ SET_LMM(FIBRE);
+ SET_LMM(25000baseCR_Full);
+ break;
+
+ case FW_PORT_TYPE_KR4_100G:
+ case FW_PORT_TYPE_CR4_QSFP:
+ SET_LMM(FIBRE);
+ SET_LMM(100000baseCR4_Full);
+ break;
+
+ default:
+ break;
+ }
+
+ FW_CAPS_TO_LMM(ANEG, Autoneg);
+ FW_CAPS_TO_LMM(802_3_PAUSE, Pause);
+ FW_CAPS_TO_LMM(802_3_ASM_DIR, Asym_Pause);
+
+ #undef FW_CAPS_TO_LMM
+ #undef SET_LMM
+}
+
+/**
+ * lmm_to_fw_caps - translate ethtool Link Mode Mask to Firmware
+ * capabilities
+ *
+ * @link_mode_mask: ethtool Link Mode Mask
+ *
+ * Translate ethtool Link Mode Mask into a Firmware Port capabilities
+ * value.
+ */
+static unsigned int lmm_to_fw_caps(const unsigned long *link_mode_mask)
+{
+ unsigned int fw_caps = 0;
+
+ #define LMM_TO_FW_CAPS(__lmm_name, __fw_name) \
+ do { \
+ if (test_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \
+ link_mode_mask)) \
+ fw_caps |= FW_PORT_CAP_ ## __fw_name; \
+ } while (0)
+
+ LMM_TO_FW_CAPS(100baseT_Full, SPEED_100M);
+ LMM_TO_FW_CAPS(1000baseT_Full, SPEED_1G);
+ LMM_TO_FW_CAPS(10000baseT_Full, SPEED_10G);
+ LMM_TO_FW_CAPS(40000baseSR4_Full, SPEED_40G);
+ LMM_TO_FW_CAPS(25000baseCR_Full, SPEED_25G);
+ LMM_TO_FW_CAPS(100000baseCR4_Full, SPEED_100G);
+
+ #undef LMM_TO_FW_CAPS
+
+ return fw_caps;
+}
+
+static int get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *link_ksettings)
+{
+ const struct port_info *pi = netdev_priv(dev);
+ struct ethtool_link_settings *base = &link_ksettings->base;
+
+ ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
+ ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
+ ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising);
+
+ base->port = from_fw_port_mod_type(pi->port_type, pi->mod_type);
+
+ if (pi->mdio_addr >= 0) {
+ base->phy_address = pi->mdio_addr;
+ base->mdio_support = (pi->port_type == FW_PORT_TYPE_BT_SGMII
+ ? ETH_MDIO_SUPPORTS_C22
+ : ETH_MDIO_SUPPORTS_C45);
} else {
- cmd->port = PORT_OTHER;
+ base->phy_address = 255;
+ base->mdio_support = 0;
}
- if (p->mdio_addr >= 0) {
- cmd->phy_address = p->mdio_addr;
- cmd->transceiver = XCVR_EXTERNAL;
- cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
- MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
+ fw_caps_to_lmm(pi->port_type, pi->link_cfg.supported,
+ link_ksettings->link_modes.supported);
+ fw_caps_to_lmm(pi->port_type, pi->link_cfg.advertising,
+ link_ksettings->link_modes.advertising);
+ fw_caps_to_lmm(pi->port_type, pi->link_cfg.lp_advertising,
+ link_ksettings->link_modes.lp_advertising);
+
+ if (netif_carrier_ok(dev)) {
+ base->speed = pi->link_cfg.speed;
+ base->duplex = DUPLEX_FULL;
} else {
- cmd->phy_address = 0; /* not really, but no better option */
- cmd->transceiver = XCVR_INTERNAL;
- cmd->mdio_support = 0;
+ base->speed = SPEED_UNKNOWN;
+ base->duplex = DUPLEX_UNKNOWN;
}
- cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
- cmd->advertising = from_fw_linkcaps(p->port_type,
- p->link_cfg.advertising);
- ethtool_cmd_speed_set(cmd,
- netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
- cmd->duplex = DUPLEX_FULL;
- cmd->autoneg = p->link_cfg.autoneg;
- cmd->maxtxpkt = 0;
- cmd->maxrxpkt = 0;
- return 0;
-}
+ base->autoneg = pi->link_cfg.autoneg;
+ if (pi->link_cfg.supported & FW_PORT_CAP_ANEG)
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ supported, Autoneg);
+ if (pi->link_cfg.autoneg)
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ advertising, Autoneg);
-static unsigned int speed_to_caps(int speed)
-{
- if (speed == 100)
- return FW_PORT_CAP_SPEED_100M;
- if (speed == 1000)
- return FW_PORT_CAP_SPEED_1G;
- if (speed == 10000)
- return FW_PORT_CAP_SPEED_10G;
- if (speed == 40000)
- return FW_PORT_CAP_SPEED_40G;
return 0;
}
-static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings
+ *link_ksettings)
{
- unsigned int cap;
- struct port_info *p = netdev_priv(dev);
- struct link_config *lc = &p->link_cfg;
- u32 speed = ethtool_cmd_speed(cmd);
+ struct port_info *pi = netdev_priv(dev);
+ struct link_config *lc = &pi->link_cfg;
+ const struct ethtool_link_settings *base = &link_ksettings->base;
struct link_config old_lc;
- int ret;
+ unsigned int fw_caps;
+ int ret = 0;
- if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
+ /* only full-duplex supported */
+ if (base->duplex != DUPLEX_FULL)
return -EINVAL;
if (!(lc->supported & FW_PORT_CAP_ANEG)) {
/* PHY offers a single speed. See if that's what's
* being requested.
*/
- if (cmd->autoneg == AUTONEG_DISABLE &&
- (lc->supported & speed_to_caps(speed)))
+ if (base->autoneg == AUTONEG_DISABLE &&
+ (lc->supported & speed_to_fw_caps(base->speed)))
return 0;
return -EINVAL;
}
old_lc = *lc;
- if (cmd->autoneg == AUTONEG_DISABLE) {
- cap = speed_to_caps(speed);
+ if (base->autoneg == AUTONEG_DISABLE) {
+ fw_caps = speed_to_fw_caps(base->speed);
- if (!(lc->supported & cap))
+ if (!(lc->supported & fw_caps))
return -EINVAL;
- lc->requested_speed = cap;
+ lc->requested_speed = fw_caps;
lc->advertising = 0;
} else {
- cap = to_fw_linkcaps(cmd->advertising);
- if (!(lc->supported & cap))
+ fw_caps =
+ lmm_to_fw_caps(link_ksettings->link_modes.advertising);
+
+ if (!(lc->supported & fw_caps))
return -EINVAL;
lc->requested_speed = 0;
- lc->advertising = cap | FW_PORT_CAP_ANEG;
+ lc->advertising = fw_caps | FW_PORT_CAP_ANEG;
}
- lc->autoneg = cmd->autoneg;
+ lc->autoneg = base->autoneg;
/* If the firmware rejects the Link Configuration request, back out
* the changes and report the error.
*/
- ret = t4_link_l1cfg(p->adapter, p->adapter->mbox, p->tx_chan, lc);
+ ret = t4_link_l1cfg(pi->adapter, pi->adapter->mbox, pi->tx_chan, lc);
if (ret)
*lc = old_lc;
@@ -1093,8 +1208,8 @@ static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
}
static const struct ethtool_ops cxgb_ethtool_ops = {
- .get_settings = get_settings,
- .set_settings = set_settings,
+ .get_link_ksettings = get_link_ksettings,
+ .set_link_ksettings = set_link_ksettings,
.get_drvinfo = get_drvinfo,
.get_msglevel = get_msglevel,
.set_msglevel = set_msglevel,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 6eea06a31..6694233e2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -64,6 +64,7 @@
#include <net/bonding.h>
#include <net/addrconf.h>
#include <asm/uaccess.h>
+#include <linux/crash_dump.h>
#include "cxgb4.h"
#include "t4_regs.h"
@@ -204,7 +205,7 @@ static int rx_dma_offset = 2;
static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
module_param_array(num_vf, uint, NULL, 0644);
-MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
+MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3, deprecated parameter - please use the pci sysfs interface.");
#endif
/* TX Queue select used to determine what algorithm to use for selecting TX
@@ -458,11 +459,8 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
- if (!(dev->flags & IFF_PROMISC)) {
- __dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
- if (!(dev->flags & IFF_ALLMULTI))
- __dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
- }
+ __dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
+ __dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
return t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu,
(dev->flags & IFF_PROMISC) ? 1 : 0,
@@ -3733,7 +3731,8 @@ static int adap_init0(struct adapter *adap)
return ret;
/* Contact FW, advertising Master capability */
- ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state);
+ ret = t4_fw_hello(adap, adap->mbox, adap->mbox,
+ is_kdump_kernel() ? MASTER_MUST : MASTER_MAY, &state);
if (ret < 0) {
dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
ret);
@@ -4304,10 +4303,17 @@ static const struct pci_error_handlers cxgb4_eeh = {
.resume = eeh_resume,
};
+/* Return true if the Link Configuration supports "High Speeds" (those greater
+ * than 1Gb/s).
+ */
static inline bool is_x_10g_port(const struct link_config *lc)
{
- return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
- (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
+ unsigned int speeds, high_speeds;
+
+ speeds = FW_PORT_CAP_SPEED_V(FW_PORT_CAP_SPEED_G(lc->supported));
+ high_speeds = speeds & ~(FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G);
+
+ return high_speeds != 0;
}
static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
@@ -4334,6 +4340,11 @@ static void cfg_queues(struct adapter *adap)
#endif
int ciq_size;
+ /* Reduce memory usage in kdump environment, disable all offload.
+ */
+ if (is_kdump_kernel())
+ adap->params.offload = 0;
+
for_each_port(adap, i)
n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
#ifdef CONFIG_CHELSIO_T4_DCB
@@ -4750,8 +4761,12 @@ static void print_port_info(const struct net_device *dev)
bufp += sprintf(bufp, "1000/");
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
bufp += sprintf(bufp, "10G/");
+ if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_25G)
+ bufp += sprintf(bufp, "25G/");
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
bufp += sprintf(bufp, "40G/");
+ if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100G)
+ bufp += sprintf(bufp, "100G/");
if (bufp != buf)
--bufp;
sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
@@ -4827,6 +4842,60 @@ static int get_chip_type(struct pci_dev *pdev, u32 pl_rev)
return -EINVAL;
}
+#ifdef CONFIG_PCI_IOV
+static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ int err = 0;
+ int current_vfs = pci_num_vf(pdev);
+ u32 pcie_fw;
+ void __iomem *regs;
+
+ regs = pci_ioremap_bar(pdev, 0);
+ if (!regs) {
+ dev_err(&pdev->dev, "cannot map device registers\n");
+ return -ENOMEM;
+ }
+
+ pcie_fw = readl(regs + PCIE_FW_A);
+ iounmap(regs);
+ /* Check if cxgb4 is the MASTER and fw is initialized */
+ if (!(pcie_fw & PCIE_FW_INIT_F) ||
+ !(pcie_fw & PCIE_FW_MASTER_VLD_F) ||
+ PCIE_FW_MASTER_G(pcie_fw) != 4) {
+ dev_warn(&pdev->dev,
+ "cxgb4 driver needs to be MASTER to support SRIOV\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* If any of the VF's is already assigned to Guest OS, then
+ * SRIOV for the same cannot be modified
+ */
+ if (current_vfs && pci_vfs_assigned(pdev)) {
+ dev_err(&pdev->dev,
+ "Cannot modify SR-IOV while VFs are assigned\n");
+ num_vfs = current_vfs;
+ return num_vfs;
+ }
+
+ /* Disable SRIOV when zero is passed.
+ * One needs to disable SRIOV before modifying it, else
+ * stack throws the below warning:
+ * " 'n' VFs already enabled. Disable before enabling 'm' VFs."
+ */
+ if (!num_vfs) {
+ pci_disable_sriov(pdev);
+ return num_vfs;
+ }
+
+ if (num_vfs != current_vfs) {
+ err = pci_enable_sriov(pdev, num_vfs);
+ if (err)
+ return err;
+ }
+ return num_vfs;
+}
+#endif
+
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int func, i, err, s_qpp, qpp, num_seg;
@@ -5160,11 +5229,16 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
sriov:
#ifdef CONFIG_PCI_IOV
- if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
+ if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0) {
+ dev_warn(&pdev->dev,
+ "Enabling SR-IOV VFs using the num_vf module "
+ "parameter is deprecated - please use the pci sysfs "
+ "interface instead.\n");
if (pci_enable_sriov(pdev, num_vf[func]) == 0)
dev_info(&pdev->dev,
"instantiated %u virtual functions\n",
num_vf[func]);
+ }
#endif
return 0;
@@ -5257,6 +5331,9 @@ static struct pci_driver cxgb4_driver = {
.probe = init_one,
.remove = remove_one,
.shutdown = remove_one,
+#ifdef CONFIG_PCI_IOV
+ .sriov_configure = cxgb4_iov_configure,
+#endif
.err_handler = &cxgb4_eeh,
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index bad253beb..ad3552df0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1192,7 +1192,7 @@ out_free: dev_kfree_skb_any(skb);
/* Discard the packet if the length is greater than mtu */
max_pkt_len = ETH_HLEN + dev->mtu;
- if (skb_vlan_tag_present(skb))
+ if (skb_vlan_tagged(skb))
max_pkt_len += VLAN_HLEN;
if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
goto out_free;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index a63addb4e..660204bff 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3627,7 +3627,8 @@ void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
}
#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
- FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
+ FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_25G | \
+ FW_PORT_CAP_SPEED_40G | FW_PORT_CAP_SPEED_100G | \
FW_PORT_CAP_ANEG)
/**
@@ -7196,8 +7197,12 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
speed = 1000;
else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
speed = 10000;
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G))
+ speed = 25000;
else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
speed = 40000;
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G))
+ speed = 100000;
lc = &pi->link_cfg;
@@ -7219,6 +7224,7 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
lc->speed = speed;
lc->fc = fc;
lc->supported = be16_to_cpu(p->u.info.pcap);
+ lc->lp_advertising = be16_to_cpu(p->u.info.lpacap);
t4_os_link_changed(adap, pi->port_id, link_ok);
}
}
@@ -7284,6 +7290,7 @@ static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
static void init_link_config(struct link_config *lc, unsigned int caps)
{
lc->supported = caps;
+ lc->lp_advertising = 0;
lc->requested_speed = 0;
lc->speed = 0;
lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 4705e2dea..e0ebe1378 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -104,6 +104,8 @@ enum {
enum CPL_error {
CPL_ERR_NONE = 0,
+ CPL_ERR_TCAM_PARITY = 1,
+ CPL_ERR_TCAM_MISS = 2,
CPL_ERR_TCAM_FULL = 3,
CPL_ERR_BAD_LENGTH = 15,
CPL_ERR_BAD_ROUTE = 18,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 392d6644f..30507d444 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -2249,22 +2249,28 @@ struct fw_acl_vlan_cmd {
enum fw_port_cap {
FW_PORT_CAP_SPEED_100M = 0x0001,
FW_PORT_CAP_SPEED_1G = 0x0002,
- FW_PORT_CAP_SPEED_2_5G = 0x0004,
+ FW_PORT_CAP_SPEED_25G = 0x0004,
FW_PORT_CAP_SPEED_10G = 0x0008,
FW_PORT_CAP_SPEED_40G = 0x0010,
FW_PORT_CAP_SPEED_100G = 0x0020,
FW_PORT_CAP_FC_RX = 0x0040,
FW_PORT_CAP_FC_TX = 0x0080,
FW_PORT_CAP_ANEG = 0x0100,
- FW_PORT_CAP_MDI_0 = 0x0200,
- FW_PORT_CAP_MDI_1 = 0x0400,
- FW_PORT_CAP_BEAN = 0x0800,
- FW_PORT_CAP_PMA_LPBK = 0x1000,
- FW_PORT_CAP_PCS_LPBK = 0x2000,
- FW_PORT_CAP_PHYXS_LPBK = 0x4000,
- FW_PORT_CAP_FAR_END_LPBK = 0x8000,
+ FW_PORT_CAP_MDIX = 0x0200,
+ FW_PORT_CAP_MDIAUTO = 0x0400,
+ FW_PORT_CAP_FEC = 0x0800,
+ FW_PORT_CAP_TECHKR = 0x1000,
+ FW_PORT_CAP_TECHKX4 = 0x2000,
+ FW_PORT_CAP_802_3_PAUSE = 0x4000,
+ FW_PORT_CAP_802_3_ASM_DIR = 0x8000,
};
+#define FW_PORT_CAP_SPEED_S 0
+#define FW_PORT_CAP_SPEED_M 0x3f
+#define FW_PORT_CAP_SPEED_V(x) ((x) << FW_PORT_CAP_SPEED_S)
+#define FW_PORT_CAP_SPEED_G(x) \
+ (((x) >> FW_PORT_CAP_SPEED_S) & FW_PORT_CAP_SPEED_M)
+
enum fw_port_mdi {
FW_PORT_CAP_MDI_UNCHANGED,
FW_PORT_CAP_MDI_AUTO,
@@ -2376,7 +2382,8 @@ struct fw_port_cmd {
__u8 cbllen;
__u8 auxlinfo;
__u8 dcbxdis_pkd;
- __u8 r8_lo[3];
+ __u8 r8_lo;
+ __be16 lpacap;
__be64 r9;
} info;
struct fw_port_diags {
@@ -2555,6 +2562,11 @@ enum fw_port_type {
FW_PORT_TYPE_QSA,
FW_PORT_TYPE_QSFP,
FW_PORT_TYPE_BP40_BA,
+ FW_PORT_TYPE_KR4_100G,
+ FW_PORT_TYPE_CR4_QSFP,
+ FW_PORT_TYPE_CR_QSFP,
+ FW_PORT_TYPE_CR2_QSFP,
+ FW_PORT_TYPE_SFP28,
FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_M
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index 734dd776c..109bc6304 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -353,6 +353,10 @@ struct hash_mac_addr {
u8 addr[ETH_ALEN];
};
+struct mbox_list {
+ struct list_head list;
+};
+
/*
* Per-"adapter" (Virtual Function) information.
*/
@@ -387,6 +391,10 @@ struct adapter {
/* various locks */
spinlock_t stats_lock;
+ /* lock for mailbox cmd list */
+ spinlock_t mbox_lock;
+ struct mbox_list mlist;
+
/* support for mailbox command/reply logging */
#define T4VF_OS_LOG_MBOX_CMDS 256
struct mbox_cmd_log *mbox_log;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 04fc6f6d1..e116bb8d1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -937,12 +937,8 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
{
struct port_info *pi = netdev_priv(dev);
- if (!(dev->flags & IFF_PROMISC)) {
- __dev_uc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync);
- if (!(dev->flags & IFF_ALLMULTI))
- __dev_mc_sync(dev, cxgb4vf_mac_sync,
- cxgb4vf_mac_unsync);
- }
+ __dev_uc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync);
+ __dev_mc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync);
return t4vf_set_rxmode(pi->adapter, pi->viid, -1,
(dev->flags & IFF_PROMISC) != 0,
(dev->flags & IFF_ALLMULTI) != 0,
@@ -1205,105 +1201,187 @@ static void cxgb4vf_poll_controller(struct net_device *dev)
* state of the port to which we're linked.
*/
-static unsigned int t4vf_from_fw_linkcaps(enum fw_port_type type,
- unsigned int caps)
-{
- unsigned int v = 0;
-
- if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
- type == FW_PORT_TYPE_BT_XAUI) {
- v |= SUPPORTED_TP;
- if (caps & FW_PORT_CAP_SPEED_100M)
- v |= SUPPORTED_100baseT_Full;
- if (caps & FW_PORT_CAP_SPEED_1G)
- v |= SUPPORTED_1000baseT_Full;
- if (caps & FW_PORT_CAP_SPEED_10G)
- v |= SUPPORTED_10000baseT_Full;
- } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
- v |= SUPPORTED_Backplane;
- if (caps & FW_PORT_CAP_SPEED_1G)
- v |= SUPPORTED_1000baseKX_Full;
- if (caps & FW_PORT_CAP_SPEED_10G)
- v |= SUPPORTED_10000baseKX4_Full;
- } else if (type == FW_PORT_TYPE_KR)
- v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
- else if (type == FW_PORT_TYPE_BP_AP)
- v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
- SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
- else if (type == FW_PORT_TYPE_BP4_AP)
- v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
- SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
- SUPPORTED_10000baseKX4_Full;
- else if (type == FW_PORT_TYPE_FIBER_XFI ||
- type == FW_PORT_TYPE_FIBER_XAUI ||
- type == FW_PORT_TYPE_SFP ||
- type == FW_PORT_TYPE_QSFP_10G ||
- type == FW_PORT_TYPE_QSA) {
- v |= SUPPORTED_FIBRE;
- if (caps & FW_PORT_CAP_SPEED_1G)
- v |= SUPPORTED_1000baseT_Full;
- if (caps & FW_PORT_CAP_SPEED_10G)
- v |= SUPPORTED_10000baseT_Full;
- } else if (type == FW_PORT_TYPE_BP40_BA ||
- type == FW_PORT_TYPE_QSFP) {
- v |= SUPPORTED_40000baseSR4_Full;
- v |= SUPPORTED_FIBRE;
- }
-
- if (caps & FW_PORT_CAP_ANEG)
- v |= SUPPORTED_Autoneg;
- return v;
-}
-
-static int cxgb4vf_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- const struct port_info *p = netdev_priv(dev);
-
- if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
- p->port_type == FW_PORT_TYPE_BT_XFI ||
- p->port_type == FW_PORT_TYPE_BT_XAUI)
- cmd->port = PORT_TP;
- else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
- p->port_type == FW_PORT_TYPE_FIBER_XAUI)
- cmd->port = PORT_FIBRE;
- else if (p->port_type == FW_PORT_TYPE_SFP ||
- p->port_type == FW_PORT_TYPE_QSFP_10G ||
- p->port_type == FW_PORT_TYPE_QSA ||
- p->port_type == FW_PORT_TYPE_QSFP) {
- if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
- p->mod_type == FW_PORT_MOD_TYPE_SR ||
- p->mod_type == FW_PORT_MOD_TYPE_ER ||
- p->mod_type == FW_PORT_MOD_TYPE_LRM)
- cmd->port = PORT_FIBRE;
- else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
- p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
- cmd->port = PORT_DA;
+/**
+ * from_fw_port_mod_type - translate Firmware Port/Module type to Ethtool
+ * @port_type: Firmware Port Type
+ * @mod_type: Firmware Module Type
+ *
+ * Translate Firmware Port/Module type to Ethtool Port Type.
+ */
+static int from_fw_port_mod_type(enum fw_port_type port_type,
+ enum fw_port_module_type mod_type)
+{
+ if (port_type == FW_PORT_TYPE_BT_SGMII ||
+ port_type == FW_PORT_TYPE_BT_XFI ||
+ port_type == FW_PORT_TYPE_BT_XAUI) {
+ return PORT_TP;
+ } else if (port_type == FW_PORT_TYPE_FIBER_XFI ||
+ port_type == FW_PORT_TYPE_FIBER_XAUI) {
+ return PORT_FIBRE;
+ } else if (port_type == FW_PORT_TYPE_SFP ||
+ port_type == FW_PORT_TYPE_QSFP_10G ||
+ port_type == FW_PORT_TYPE_QSA ||
+ port_type == FW_PORT_TYPE_QSFP) {
+ if (mod_type == FW_PORT_MOD_TYPE_LR ||
+ mod_type == FW_PORT_MOD_TYPE_SR ||
+ mod_type == FW_PORT_MOD_TYPE_ER ||
+ mod_type == FW_PORT_MOD_TYPE_LRM)
+ return PORT_FIBRE;
+ else if (mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
+ mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
+ return PORT_DA;
else
- cmd->port = PORT_OTHER;
- } else
- cmd->port = PORT_OTHER;
+ return PORT_OTHER;
+ }
+
+ return PORT_OTHER;
+}
+
+/**
+ * fw_caps_to_lmm - translate Firmware to ethtool Link Mode Mask
+ * @port_type: Firmware Port Type
+ * @fw_caps: Firmware Port Capabilities
+ * @link_mode_mask: ethtool Link Mode Mask
+ *
+ * Translate a Firmware Port Capabilities specification to an ethtool
+ * Link Mode Mask.
+ */
+static void fw_caps_to_lmm(enum fw_port_type port_type,
+ unsigned int fw_caps,
+ unsigned long *link_mode_mask)
+{
+ #define SET_LMM(__lmm_name) __set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name\
+ ## _BIT, link_mode_mask)
+
+ #define FW_CAPS_TO_LMM(__fw_name, __lmm_name) \
+ do { \
+ if (fw_caps & FW_PORT_CAP_ ## __fw_name) \
+ SET_LMM(__lmm_name); \
+ } while (0)
+
+ switch (port_type) {
+ case FW_PORT_TYPE_BT_SGMII:
+ case FW_PORT_TYPE_BT_XFI:
+ case FW_PORT_TYPE_BT_XAUI:
+ SET_LMM(TP);
+ FW_CAPS_TO_LMM(SPEED_100M, 100baseT_Full);
+ FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
+ FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
+ break;
+
+ case FW_PORT_TYPE_KX4:
+ case FW_PORT_TYPE_KX:
+ SET_LMM(Backplane);
+ FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
+ FW_CAPS_TO_LMM(SPEED_10G, 10000baseKX4_Full);
+ break;
+
+ case FW_PORT_TYPE_KR:
+ SET_LMM(Backplane);
+ SET_LMM(10000baseKR_Full);
+ break;
+
+ case FW_PORT_TYPE_BP_AP:
+ SET_LMM(Backplane);
+ SET_LMM(10000baseR_FEC);
+ SET_LMM(10000baseKR_Full);
+ SET_LMM(1000baseKX_Full);
+ break;
+
+ case FW_PORT_TYPE_BP4_AP:
+ SET_LMM(Backplane);
+ SET_LMM(10000baseR_FEC);
+ SET_LMM(10000baseKR_Full);
+ SET_LMM(1000baseKX_Full);
+ SET_LMM(10000baseKX4_Full);
+ break;
+
+ case FW_PORT_TYPE_FIBER_XFI:
+ case FW_PORT_TYPE_FIBER_XAUI:
+ case FW_PORT_TYPE_SFP:
+ case FW_PORT_TYPE_QSFP_10G:
+ case FW_PORT_TYPE_QSA:
+ SET_LMM(FIBRE);
+ FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
+ FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
+ break;
+
+ case FW_PORT_TYPE_BP40_BA:
+ case FW_PORT_TYPE_QSFP:
+ SET_LMM(FIBRE);
+ SET_LMM(40000baseSR4_Full);
+ break;
+
+ case FW_PORT_TYPE_CR_QSFP:
+ case FW_PORT_TYPE_SFP28:
+ SET_LMM(FIBRE);
+ SET_LMM(25000baseCR_Full);
+ break;
+
+ case FW_PORT_TYPE_KR4_100G:
+ case FW_PORT_TYPE_CR4_QSFP:
+ SET_LMM(FIBRE);
+ SET_LMM(100000baseCR4_Full);
+ break;
+
+ default:
+ break;
+ }
+
+ FW_CAPS_TO_LMM(ANEG, Autoneg);
+ FW_CAPS_TO_LMM(802_3_PAUSE, Pause);
+ FW_CAPS_TO_LMM(802_3_ASM_DIR, Asym_Pause);
+
+ #undef FW_CAPS_TO_LMM
+ #undef SET_LMM
+}
+
+static int cxgb4vf_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings
+ *link_ksettings)
+{
+ const struct port_info *pi = netdev_priv(dev);
+ struct ethtool_link_settings *base = &link_ksettings->base;
- if (p->mdio_addr >= 0) {
- cmd->phy_address = p->mdio_addr;
- cmd->transceiver = XCVR_EXTERNAL;
- cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
- MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
+ ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
+ ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
+ ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising);
+
+ base->port = from_fw_port_mod_type(pi->port_type, pi->mod_type);
+
+ if (pi->mdio_addr >= 0) {
+ base->phy_address = pi->mdio_addr;
+ base->mdio_support = (pi->port_type == FW_PORT_TYPE_BT_SGMII
+ ? ETH_MDIO_SUPPORTS_C22
+ : ETH_MDIO_SUPPORTS_C45);
} else {
- cmd->phy_address = 0; /* not really, but no better option */
- cmd->transceiver = XCVR_INTERNAL;
- cmd->mdio_support = 0;
- }
-
- cmd->supported = t4vf_from_fw_linkcaps(p->port_type,
- p->link_cfg.supported);
- cmd->advertising = t4vf_from_fw_linkcaps(p->port_type,
- p->link_cfg.advertising);
- ethtool_cmd_speed_set(cmd,
- netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
- cmd->duplex = DUPLEX_FULL;
- cmd->autoneg = p->link_cfg.autoneg;
- cmd->maxtxpkt = 0;
- cmd->maxrxpkt = 0;
+ base->phy_address = 255;
+ base->mdio_support = 0;
+ }
+
+ fw_caps_to_lmm(pi->port_type, pi->link_cfg.supported,
+ link_ksettings->link_modes.supported);
+ fw_caps_to_lmm(pi->port_type, pi->link_cfg.advertising,
+ link_ksettings->link_modes.advertising);
+ fw_caps_to_lmm(pi->port_type, pi->link_cfg.lp_advertising,
+ link_ksettings->link_modes.lp_advertising);
+
+ if (netif_carrier_ok(dev)) {
+ base->speed = pi->link_cfg.speed;
+ base->duplex = DUPLEX_FULL;
+ } else {
+ base->speed = SPEED_UNKNOWN;
+ base->duplex = DUPLEX_UNKNOWN;
+ }
+
+ base->autoneg = pi->link_cfg.autoneg;
+ if (pi->link_cfg.supported & FW_PORT_CAP_ANEG)
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ supported, Autoneg);
+ if (pi->link_cfg.autoneg)
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ advertising, Autoneg);
+
return 0;
}
@@ -1679,7 +1757,7 @@ static void cxgb4vf_get_wol(struct net_device *dev,
#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
static const struct ethtool_ops cxgb4vf_ethtool_ops = {
- .get_settings = cxgb4vf_get_settings,
+ .get_link_ksettings = cxgb4vf_get_link_ksettings,
.get_drvinfo = cxgb4vf_get_drvinfo,
.get_msglevel = cxgb4vf_get_msglevel,
.set_msglevel = cxgb4vf_set_msglevel,
@@ -2778,6 +2856,8 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
* Initialize SMP data synchronization resources.
*/
spin_lock_init(&adapter->stats_lock);
+ spin_lock_init(&adapter->mbox_lock);
+ INIT_LIST_HEAD(&adapter->mlist.list);
/*
* Map our I/O registers in BAR0.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 1bb57d3fb..c8fd4f8fe 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1188,7 +1188,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
/* Discard the packet if the length is greater than mtu */
max_pkt_len = ETH_HLEN + dev->mtu;
- if (skb_vlan_tag_present(skb))
+ if (skb_vlan_tagged(skb))
max_pkt_len += VLAN_HLEN;
if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
goto out_free;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 438374a05..17a2bbcf9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -107,8 +107,9 @@ struct t4vf_port_stats {
struct link_config {
unsigned int supported; /* link capabilities */
unsigned int advertising; /* advertised capabilities */
- unsigned short requested_speed; /* speed user has requested */
- unsigned short speed; /* actual link speed */
+ unsigned short lp_advertising; /* peer advertised capabilities */
+ unsigned int requested_speed; /* speed user has requested */
+ unsigned int speed; /* actual link speed */
unsigned char requested_fc; /* flow control user has requested */
unsigned char fc; /* actual link flow control */
unsigned char autoneg; /* autonegotiating? */
@@ -270,10 +271,17 @@ static inline bool is_10g_port(const struct link_config *lc)
return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
}
+/* Return true if the Link Configuration supports "High Speeds" (those greater
+ * than 1Gb/s).
+ */
static inline bool is_x_10g_port(const struct link_config *lc)
{
- return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
- (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
+ unsigned int speeds, high_speeds;
+
+ speeds = FW_PORT_CAP_SPEED_V(FW_PORT_CAP_SPEED_G(lc->supported));
+ high_speeds = speeds & ~(FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G);
+
+ return high_speeds != 0;
}
static inline unsigned int core_ticks_per_usec(const struct adapter *adapter)
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 955ff7c61..b5622b168 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -139,6 +139,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL;
u32 cmd_op = FW_CMD_OP_G(be32_to_cpu(((struct fw_cmd_hdr *)cmd)->hi));
__be64 cmd_rpl[MBOX_LEN / 8];
+ struct mbox_list entry;
/* In T6, mailbox size is changed to 128 bytes to avoid
* invalidating the entire prefetch buffer.
@@ -156,6 +157,51 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4)
return -EINVAL;
+ /* Queue ourselves onto the mailbox access list. When our entry is at
+ * the front of the list, we have rights to access the mailbox. So we
+ * wait [for a while] till we're at the front [or bail out with an
+ * EBUSY] ...
+ */
+ spin_lock(&adapter->mbox_lock);
+ list_add_tail(&entry.list, &adapter->mlist.list);
+ spin_unlock(&adapter->mbox_lock);
+
+ delay_idx = 0;
+ ms = delay[0];
+
+ for (i = 0; ; i += ms) {
+ /* If we've waited too long, return a busy indication. This
+ * really ought to be based on our initial position in the
+ * mailbox access list but this is a start. We very rearely
+ * contend on access to the mailbox ...
+ */
+ if (i > FW_CMD_MAX_TIMEOUT) {
+ spin_lock(&adapter->mbox_lock);
+ list_del(&entry.list);
+ spin_unlock(&adapter->mbox_lock);
+ ret = -EBUSY;
+ t4vf_record_mbox(adapter, cmd, size, access, ret);
+ return ret;
+ }
+
+ /* If we're at the head, break out and start the mailbox
+ * protocol.
+ */
+ if (list_first_entry(&adapter->mlist.list, struct mbox_list,
+ list) == &entry)
+ break;
+
+ /* Delay for a bit before checking again ... */
+ if (sleep_ok) {
+ ms = delay[delay_idx]; /* last element may repeat */
+ if (delay_idx < ARRAY_SIZE(delay) - 1)
+ delay_idx++;
+ msleep(ms);
+ } else {
+ mdelay(ms);
+ }
+ }
+
/*
* Loop trying to get ownership of the mailbox. Return an error
* if we can't gain ownership.
@@ -164,6 +210,9 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl));
if (v != MBOX_OWNER_DRV) {
+ spin_lock(&adapter->mbox_lock);
+ list_del(&entry.list);
+ spin_unlock(&adapter->mbox_lock);
ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
t4vf_record_mbox(adapter, cmd, size, access, ret);
return ret;
@@ -248,6 +297,9 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
if (cmd_op != FW_VI_STATS_CMD)
t4vf_record_mbox(adapter, cmd_rpl, size, access,
execute);
+ spin_lock(&adapter->mbox_lock);
+ list_del(&entry.list);
+ spin_unlock(&adapter->mbox_lock);
return -FW_CMD_RETVAL_G(v);
}
}
@@ -255,12 +307,16 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
/* We timed out. Return the error ... */
ret = -ETIMEDOUT;
t4vf_record_mbox(adapter, cmd, size, access, ret);
+ spin_lock(&adapter->mbox_lock);
+ list_del(&entry.list);
+ spin_unlock(&adapter->mbox_lock);
return ret;
}
#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
- FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
- FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG)
+ FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_25G | \
+ FW_PORT_CAP_SPEED_40G | FW_PORT_CAP_SPEED_100G | \
+ FW_PORT_CAP_ANEG)
/**
* init_link_config - initialize a link's SW state
@@ -273,6 +329,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
static void init_link_config(struct link_config *lc, unsigned int caps)
{
lc->supported = caps;
+ lc->lp_advertising = 0;
lc->requested_speed = 0;
lc->speed = 0;
lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
@@ -1656,8 +1713,12 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
speed = 1000;
else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
speed = 10000;
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G))
+ speed = 25000;
else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
speed = 40000;
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G))
+ speed = 100000;
/*
* Scan all of our "ports" (Virtual Interfaces) looking for
@@ -1688,6 +1749,8 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
lc->fc = fc;
lc->supported =
be16_to_cpu(port_cmd->u.info.pcap);
+ lc->lp_advertising =
+ be16_to_cpu(port_cmd->u.info.lpacap);
t4vf_os_link_changed(adapter, pidx, link_ok);
}
}
diff --git a/drivers/net/ethernet/chelsio/libcxgb/Makefile b/drivers/net/ethernet/chelsio/libcxgb/Makefile
new file mode 100644
index 000000000..2362230ef
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/libcxgb/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_CHELSIO_LIB) += libcxgb.o
+
+libcxgb-y := libcxgb_ppm.o
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
new file mode 100644
index 000000000..0ed161642
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
@@ -0,0 +1,498 @@
+/*
+ * libcxgb_ppm.c: Chelsio common library for T3/T4/T5 iSCSI PagePod Manager
+ *
+ * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Written by: Karen Xie (kxie@chelsio.com)
+ */
+
+#define DRV_NAME "libcxgb"
+#define DRV_VERSION "1.0.0-ko"
+#define pr_fmt(fmt) DRV_NAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include <linux/pci.h>
+#include <linux/scatterlist.h>
+
+#include "libcxgb_ppm.h"
+
+/* Direct Data Placement -
+ * Directly place the iSCSI Data-In or Data-Out PDU's payload into
+ * pre-posted final destination host-memory buffers based on the
+ * Initiator Task Tag (ITT) in Data-In or Target Task Tag (TTT)
+ * in Data-Out PDUs. The host memory address is programmed into
+ * h/w in the format of pagepod entries. The location of the
+ * pagepod entry is encoded into ddp tag which is used as the base
+ * for ITT/TTT.
+ */
+
+/* Direct-Data Placement page size adjustment
+ */
+int cxgbi_ppm_find_page_index(struct cxgbi_ppm *ppm, unsigned long pgsz)
+{
+ struct cxgbi_tag_format *tformat = &ppm->tformat;
+ int i;
+
+ for (i = 0; i < DDP_PGIDX_MAX; i++) {
+ if (pgsz == 1UL << (DDP_PGSZ_BASE_SHIFT +
+ tformat->pgsz_order[i])) {
+ pr_debug("%s: %s ppm, pgsz %lu -> idx %d.\n",
+ __func__, ppm->ndev->name, pgsz, i);
+ return i;
+ }
+ }
+ pr_info("ippm: ddp page size %lu not supported.\n", pgsz);
+ return DDP_PGIDX_MAX;
+}
+
+/* DDP setup & teardown
+ */
+static int ppm_find_unused_entries(unsigned long *bmap,
+ unsigned int max_ppods,
+ unsigned int start,
+ unsigned int nr,
+ unsigned int align_mask)
+{
+ unsigned long i;
+
+ i = bitmap_find_next_zero_area(bmap, max_ppods, start, nr, align_mask);
+
+ if (unlikely(i >= max_ppods) && (start > nr))
+ i = bitmap_find_next_zero_area(bmap, max_ppods, 0, start - 1,
+ align_mask);
+ if (unlikely(i >= max_ppods))
+ return -ENOSPC;
+
+ bitmap_set(bmap, i, nr);
+ return (int)i;
+}
+
+static void ppm_mark_entries(struct cxgbi_ppm *ppm, int i, int count,
+ unsigned long caller_data)
+{
+ struct cxgbi_ppod_data *pdata = ppm->ppod_data + i;
+
+ pdata->caller_data = caller_data;
+ pdata->npods = count;
+
+ if (pdata->color == ((1 << PPOD_IDX_SHIFT) - 1))
+ pdata->color = 0;
+ else
+ pdata->color++;
+}
+
+static int ppm_get_cpu_entries(struct cxgbi_ppm *ppm, unsigned int count,
+ unsigned long caller_data)
+{
+ struct cxgbi_ppm_pool *pool;
+ unsigned int cpu;
+ int i;
+
+ cpu = get_cpu();
+ pool = per_cpu_ptr(ppm->pool, cpu);
+ spin_lock_bh(&pool->lock);
+ put_cpu();
+
+ i = ppm_find_unused_entries(pool->bmap, ppm->pool_index_max,
+ pool->next, count, 0);
+ if (i < 0) {
+ pool->next = 0;
+ spin_unlock_bh(&pool->lock);
+ return -ENOSPC;
+ }
+
+ pool->next = i + count;
+ if (pool->next >= ppm->pool_index_max)
+ pool->next = 0;
+
+ spin_unlock_bh(&pool->lock);
+
+ pr_debug("%s: cpu %u, idx %d + %d (%d), next %u.\n",
+ __func__, cpu, i, count, i + cpu * ppm->pool_index_max,
+ pool->next);
+
+ i += cpu * ppm->pool_index_max;
+ ppm_mark_entries(ppm, i, count, caller_data);
+
+ return i;
+}
+
+static int ppm_get_entries(struct cxgbi_ppm *ppm, unsigned int count,
+ unsigned long caller_data)
+{
+ int i;
+
+ spin_lock_bh(&ppm->map_lock);
+ i = ppm_find_unused_entries(ppm->ppod_bmap, ppm->bmap_index_max,
+ ppm->next, count, 0);
+ if (i < 0) {
+ ppm->next = 0;
+ spin_unlock_bh(&ppm->map_lock);
+ pr_debug("ippm: NO suitable entries %u available.\n",
+ count);
+ return -ENOSPC;
+ }
+
+ ppm->next = i + count;
+ if (ppm->next >= ppm->bmap_index_max)
+ ppm->next = 0;
+
+ spin_unlock_bh(&ppm->map_lock);
+
+ pr_debug("%s: idx %d + %d (%d), next %u, caller_data 0x%lx.\n",
+ __func__, i, count, i + ppm->pool_rsvd, ppm->next,
+ caller_data);
+
+ i += ppm->pool_rsvd;
+ ppm_mark_entries(ppm, i, count, caller_data);
+
+ return i;
+}
+
+static void ppm_unmark_entries(struct cxgbi_ppm *ppm, int i, int count)
+{
+ pr_debug("%s: idx %d + %d.\n", __func__, i, count);
+
+ if (i < ppm->pool_rsvd) {
+ unsigned int cpu;
+ struct cxgbi_ppm_pool *pool;
+
+ cpu = i / ppm->pool_index_max;
+ i %= ppm->pool_index_max;
+
+ pool = per_cpu_ptr(ppm->pool, cpu);
+ spin_lock_bh(&pool->lock);
+ bitmap_clear(pool->bmap, i, count);
+
+ if (i < pool->next)
+ pool->next = i;
+ spin_unlock_bh(&pool->lock);
+
+ pr_debug("%s: cpu %u, idx %d, next %u.\n",
+ __func__, cpu, i, pool->next);
+ } else {
+ spin_lock_bh(&ppm->map_lock);
+
+ i -= ppm->pool_rsvd;
+ bitmap_clear(ppm->ppod_bmap, i, count);
+
+ if (i < ppm->next)
+ ppm->next = i;
+ spin_unlock_bh(&ppm->map_lock);
+
+ pr_debug("%s: idx %d, next %u.\n", __func__, i, ppm->next);
+ }
+}
+
+void cxgbi_ppm_ppod_release(struct cxgbi_ppm *ppm, u32 idx)
+{
+ struct cxgbi_ppod_data *pdata;
+
+ if (idx >= ppm->ppmax) {
+ pr_warn("ippm: idx too big %u > %u.\n", idx, ppm->ppmax);
+ return;
+ }
+
+ pdata = ppm->ppod_data + idx;
+ if (!pdata->npods) {
+ pr_warn("ippm: idx %u, npods 0.\n", idx);
+ return;
+ }
+
+ pr_debug("release idx %u, npods %u.\n", idx, pdata->npods);
+ ppm_unmark_entries(ppm, idx, pdata->npods);
+}
+EXPORT_SYMBOL(cxgbi_ppm_ppod_release);
+
+int cxgbi_ppm_ppods_reserve(struct cxgbi_ppm *ppm, unsigned short nr_pages,
+ u32 per_tag_pg_idx, u32 *ppod_idx,
+ u32 *ddp_tag, unsigned long caller_data)
+{
+ struct cxgbi_ppod_data *pdata;
+ unsigned int npods;
+ int idx = -1;
+ unsigned int hwidx;
+ u32 tag;
+
+ npods = (nr_pages + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
+ if (!npods) {
+ pr_warn("%s: pages %u -> npods %u, full.\n",
+ __func__, nr_pages, npods);
+ return -EINVAL;
+ }
+
+ /* grab from cpu pool first */
+ idx = ppm_get_cpu_entries(ppm, npods, caller_data);
+ /* try the general pool */
+ if (idx < 0)
+ idx = ppm_get_entries(ppm, npods, caller_data);
+ if (idx < 0) {
+ pr_debug("ippm: pages %u, nospc %u, nxt %u, 0x%lx.\n",
+ nr_pages, npods, ppm->next, caller_data);
+ return idx;
+ }
+
+ pdata = ppm->ppod_data + idx;
+ hwidx = ppm->base_idx + idx;
+
+ tag = cxgbi_ppm_make_ddp_tag(hwidx, pdata->color);
+
+ if (per_tag_pg_idx)
+ tag |= (per_tag_pg_idx << 30) & 0xC0000000;
+
+ *ppod_idx = idx;
+ *ddp_tag = tag;
+
+ pr_debug("ippm: sg %u, tag 0x%x(%u,%u), data 0x%lx.\n",
+ nr_pages, tag, idx, npods, caller_data);
+
+ return npods;
+}
+EXPORT_SYMBOL(cxgbi_ppm_ppods_reserve);
+
+void cxgbi_ppm_make_ppod_hdr(struct cxgbi_ppm *ppm, u32 tag,
+ unsigned int tid, unsigned int offset,
+ unsigned int length,
+ struct cxgbi_pagepod_hdr *hdr)
+{
+ /* The ddp tag in pagepod should be with bit 31:30 set to 0.
+ * The ddp Tag on the wire should be with non-zero 31:30 to the peer
+ */
+ tag &= 0x3FFFFFFF;
+
+ hdr->vld_tid = htonl(PPOD_VALID_FLAG | PPOD_TID(tid));
+
+ hdr->rsvd = 0;
+ hdr->pgsz_tag_clr = htonl(tag & ppm->tformat.idx_clr_mask);
+ hdr->max_offset = htonl(length);
+ hdr->page_offset = htonl(offset);
+
+ pr_debug("ippm: tag 0x%x, tid 0x%x, xfer %u, off %u.\n",
+ tag, tid, length, offset);
+}
+EXPORT_SYMBOL(cxgbi_ppm_make_ppod_hdr);
+
+static void ppm_free(struct cxgbi_ppm *ppm)
+{
+ vfree(ppm);
+}
+
+static void ppm_destroy(struct kref *kref)
+{
+ struct cxgbi_ppm *ppm = container_of(kref,
+ struct cxgbi_ppm,
+ refcnt);
+ pr_info("ippm: kref 0, destroy %s ppm 0x%p.\n",
+ ppm->ndev->name, ppm);
+
+ *ppm->ppm_pp = NULL;
+
+ free_percpu(ppm->pool);
+ ppm_free(ppm);
+}
+
+int cxgbi_ppm_release(struct cxgbi_ppm *ppm)
+{
+ if (ppm) {
+ int rv;
+
+ rv = kref_put(&ppm->refcnt, ppm_destroy);
+ return rv;
+ }
+ return 1;
+}
+EXPORT_SYMBOL(cxgbi_ppm_release);
+
+static struct cxgbi_ppm_pool *ppm_alloc_cpu_pool(unsigned int *total,
+ unsigned int *pcpu_ppmax)
+{
+ struct cxgbi_ppm_pool *pools;
+ unsigned int ppmax = (*total) / num_possible_cpus();
+ unsigned int max = (PCPU_MIN_UNIT_SIZE - sizeof(*pools)) << 3;
+ unsigned int bmap;
+ unsigned int alloc_sz;
+ unsigned int count = 0;
+ unsigned int cpu;
+
+ /* make sure per cpu pool fits into PCPU_MIN_UNIT_SIZE */
+ if (ppmax > max)
+ ppmax = max;
+
+ /* pool size must be multiple of unsigned long */
+ bmap = BITS_TO_LONGS(ppmax);
+ ppmax = (bmap * sizeof(unsigned long)) << 3;
+
+ alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap;
+ pools = __alloc_percpu(alloc_sz, __alignof__(struct cxgbi_ppm_pool));
+
+ if (!pools)
+ return NULL;
+
+ for_each_possible_cpu(cpu) {
+ struct cxgbi_ppm_pool *ppool = per_cpu_ptr(pools, cpu);
+
+ memset(ppool, 0, alloc_sz);
+ spin_lock_init(&ppool->lock);
+ count += ppmax;
+ }
+
+ *total = count;
+ *pcpu_ppmax = ppmax;
+
+ return pools;
+}
+
+int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev,
+ struct pci_dev *pdev, void *lldev,
+ struct cxgbi_tag_format *tformat,
+ unsigned int ppmax,
+ unsigned int llimit,
+ unsigned int start,
+ unsigned int reserve_factor)
+{
+ struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*ppm_pp);
+ struct cxgbi_ppm_pool *pool = NULL;
+ unsigned int ppmax_pool = 0;
+ unsigned int pool_index_max = 0;
+ unsigned int alloc_sz;
+ unsigned int ppod_bmap_size;
+
+ if (ppm) {
+ pr_info("ippm: %s, ppm 0x%p,0x%p already initialized, %u/%u.\n",
+ ndev->name, ppm_pp, ppm, ppm->ppmax, ppmax);
+ kref_get(&ppm->refcnt);
+ return 1;
+ }
+
+ if (reserve_factor) {
+ ppmax_pool = ppmax / reserve_factor;
+ pool = ppm_alloc_cpu_pool(&ppmax_pool, &pool_index_max);
+
+ pr_debug("%s: ppmax %u, cpu total %u, per cpu %u.\n",
+ ndev->name, ppmax, ppmax_pool, pool_index_max);
+ }
+
+ ppod_bmap_size = BITS_TO_LONGS(ppmax - ppmax_pool);
+ alloc_sz = sizeof(struct cxgbi_ppm) +
+ ppmax * (sizeof(struct cxgbi_ppod_data)) +
+ ppod_bmap_size * sizeof(unsigned long);
+
+ ppm = vmalloc(alloc_sz);
+ if (!ppm)
+ goto release_ppm_pool;
+
+ memset(ppm, 0, alloc_sz);
+
+ ppm->ppod_bmap = (unsigned long *)(&ppm->ppod_data[ppmax]);
+
+ if ((ppod_bmap_size >> 3) > (ppmax - ppmax_pool)) {
+ unsigned int start = ppmax - ppmax_pool;
+ unsigned int end = ppod_bmap_size >> 3;
+
+ bitmap_set(ppm->ppod_bmap, ppmax, end - start);
+ pr_info("%s: %u - %u < %u * 8, mask extra bits %u, %u.\n",
+ __func__, ppmax, ppmax_pool, ppod_bmap_size, start,
+ end);
+ }
+
+ spin_lock_init(&ppm->map_lock);
+ kref_init(&ppm->refcnt);
+
+ memcpy(&ppm->tformat, tformat, sizeof(struct cxgbi_tag_format));
+
+ ppm->ppm_pp = ppm_pp;
+ ppm->ndev = ndev;
+ ppm->pdev = pdev;
+ ppm->lldev = lldev;
+ ppm->ppmax = ppmax;
+ ppm->next = 0;
+ ppm->llimit = llimit;
+ ppm->base_idx = start > llimit ?
+ (start - llimit + 1) >> PPOD_SIZE_SHIFT : 0;
+ ppm->bmap_index_max = ppmax - ppmax_pool;
+
+ ppm->pool = pool;
+ ppm->pool_rsvd = ppmax_pool;
+ ppm->pool_index_max = pool_index_max;
+
+ /* check one more time */
+ if (*ppm_pp) {
+ ppm_free(ppm);
+ ppm = (struct cxgbi_ppm *)(*ppm_pp);
+
+ pr_info("ippm: %s, ppm 0x%p,0x%p already initialized, %u/%u.\n",
+ ndev->name, ppm_pp, *ppm_pp, ppm->ppmax, ppmax);
+
+ kref_get(&ppm->refcnt);
+ return 1;
+ }
+ *ppm_pp = ppm;
+
+ ppm->tformat.pgsz_idx_dflt = cxgbi_ppm_find_page_index(ppm, PAGE_SIZE);
+
+ pr_info("ippm %s: ppm 0x%p, 0x%p, base %u/%u, pg %lu,%u, rsvd %u,%u.\n",
+ ndev->name, ppm_pp, ppm, ppm->base_idx, ppm->ppmax, PAGE_SIZE,
+ ppm->tformat.pgsz_idx_dflt, ppm->pool_rsvd,
+ ppm->pool_index_max);
+
+ return 0;
+
+release_ppm_pool:
+ free_percpu(pool);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(cxgbi_ppm_init);
+
+unsigned int cxgbi_tagmask_set(unsigned int ppmax)
+{
+ unsigned int bits = fls(ppmax);
+
+ if (bits > PPOD_IDX_MAX_SIZE)
+ bits = PPOD_IDX_MAX_SIZE;
+
+ pr_info("ippm: ppmax %u/0x%x -> bits %u, tagmask 0x%x.\n",
+ ppmax, ppmax, bits, 1 << (bits + PPOD_IDX_SHIFT));
+
+ return 1 << (bits + PPOD_IDX_SHIFT);
+}
+EXPORT_SYMBOL(cxgbi_tagmask_set);
+
+MODULE_AUTHOR("Chelsio Communications");
+MODULE_DESCRIPTION("Chelsio common library");
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h
new file mode 100644
index 000000000..e995a1a38
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h
@@ -0,0 +1,334 @@
+/*
+ * libcxgb_ppm.h: Chelsio common library for T3/T4/T5 iSCSI ddp operation
+ *
+ * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Written by: Karen Xie (kxie@chelsio.com)
+ */
+
+#ifndef __LIBCXGB_PPM_H__
+#define __LIBCXGB_PPM_H__
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/scatterlist.h>
+#include <linux/skbuff.h>
+#include <linux/vmalloc.h>
+#include <linux/bitmap.h>
+
+struct cxgbi_pagepod_hdr {
+ u32 vld_tid;
+ u32 pgsz_tag_clr;
+ u32 max_offset;
+ u32 page_offset;
+ u64 rsvd;
+};
+
+#define PPOD_PAGES_MAX 4
+struct cxgbi_pagepod {
+ struct cxgbi_pagepod_hdr hdr;
+ u64 addr[PPOD_PAGES_MAX + 1];
+};
+
+/* ddp tag format
+ * for a 32-bit tag:
+ * bit #
+ * 31 ..... ..... 0
+ * X Y...Y Z...Z, where
+ * ^ ^^^^^ ^^^^
+ * | | |____ when ddp bit = 0: color bits
+ * | |
+ * | |____ when ddp bit = 0: idx into the ddp memory region
+ * |
+ * |____ ddp bit: 0 - ddp tag, 1 - non-ddp tag
+ *
+ * [page selector:2] [sw/free bits] [0] [idx] [color:6]
+ */
+
+#define DDP_PGIDX_MAX 4
+#define DDP_PGSZ_BASE_SHIFT 12 /* base page 4K */
+
+struct cxgbi_task_tag_info {
+ unsigned char flags;
+#define CXGBI_PPOD_INFO_FLAG_VALID 0x1
+#define CXGBI_PPOD_INFO_FLAG_MAPPED 0x2
+ unsigned char cid;
+ unsigned short pg_shift;
+ unsigned int npods;
+ unsigned int idx;
+ unsigned int tag;
+ struct cxgbi_pagepod_hdr hdr;
+ int nents;
+ int nr_pages;
+ struct scatterlist *sgl;
+};
+
+struct cxgbi_tag_format {
+ unsigned char pgsz_order[DDP_PGIDX_MAX];
+ unsigned char pgsz_idx_dflt;
+ unsigned char free_bits:4;
+ unsigned char color_bits:4;
+ unsigned char idx_bits;
+ unsigned char rsvd_bits;
+ unsigned int no_ddp_mask;
+ unsigned int idx_mask;
+ unsigned int color_mask;
+ unsigned int idx_clr_mask;
+ unsigned int rsvd_mask;
+};
+
+struct cxgbi_ppod_data {
+ unsigned char pg_idx:2;
+ unsigned char color:6;
+ unsigned char chan_id;
+ unsigned short npods;
+ unsigned long caller_data;
+};
+
+/* per cpu ppm pool */
+struct cxgbi_ppm_pool {
+ unsigned int base; /* base index */
+ unsigned int next; /* next possible free index */
+ spinlock_t lock; /* ppm pool lock */
+ unsigned long bmap[0];
+} ____cacheline_aligned_in_smp;
+
+struct cxgbi_ppm {
+ struct kref refcnt;
+ struct net_device *ndev; /* net_device, 1st port */
+ struct pci_dev *pdev;
+ void *lldev;
+ void **ppm_pp;
+ struct cxgbi_tag_format tformat;
+ unsigned int ppmax;
+ unsigned int llimit;
+ unsigned int base_idx;
+
+ unsigned int pool_rsvd;
+ unsigned int pool_index_max;
+ struct cxgbi_ppm_pool __percpu *pool;
+ /* map lock */
+ spinlock_t map_lock; /* ppm map lock */
+ unsigned int bmap_index_max;
+ unsigned int next;
+ unsigned long *ppod_bmap;
+ struct cxgbi_ppod_data ppod_data[0];
+};
+
+#define DDP_THRESHOLD 512
+
+#define PPOD_PAGES_SHIFT 2 /* 4 pages per pod */
+
+#define IPPOD_SIZE sizeof(struct cxgbi_pagepod) /* 64 */
+#define PPOD_SIZE_SHIFT 6
+
+/* page pods are allocated in groups of this size (must be power of 2) */
+#define PPOD_CLUSTER_SIZE 16U
+
+#define ULPMEM_DSGL_MAX_NPPODS 16 /* 1024/PPOD_SIZE */
+#define ULPMEM_IDATA_MAX_NPPODS 3 /* (PPOD_SIZE * 3 + ulptx hdr) < 256B */
+#define PCIE_MEMWIN_MAX_NPPODS 16 /* 1024/PPOD_SIZE */
+
+#define PPOD_COLOR_SHIFT 0
+#define PPOD_COLOR(x) ((x) << PPOD_COLOR_SHIFT)
+
+#define PPOD_IDX_SHIFT 6
+#define PPOD_IDX_MAX_SIZE 24
+
+#define PPOD_TID_SHIFT 0
+#define PPOD_TID(x) ((x) << PPOD_TID_SHIFT)
+
+#define PPOD_TAG_SHIFT 6
+#define PPOD_TAG(x) ((x) << PPOD_TAG_SHIFT)
+
+#define PPOD_VALID_SHIFT 24
+#define PPOD_VALID(x) ((x) << PPOD_VALID_SHIFT)
+#define PPOD_VALID_FLAG PPOD_VALID(1U)
+
+#define PPOD_PI_EXTRACT_CTL_SHIFT 31
+#define PPOD_PI_EXTRACT_CTL(x) ((x) << PPOD_PI_EXTRACT_CTL_SHIFT)
+#define PPOD_PI_EXTRACT_CTL_FLAG V_PPOD_PI_EXTRACT_CTL(1U)
+
+#define PPOD_PI_TYPE_SHIFT 29
+#define PPOD_PI_TYPE_MASK 0x3
+#define PPOD_PI_TYPE(x) ((x) << PPOD_PI_TYPE_SHIFT)
+
+#define PPOD_PI_CHECK_CTL_SHIFT 27
+#define PPOD_PI_CHECK_CTL_MASK 0x3
+#define PPOD_PI_CHECK_CTL(x) ((x) << PPOD_PI_CHECK_CTL_SHIFT)
+
+#define PPOD_PI_REPORT_CTL_SHIFT 25
+#define PPOD_PI_REPORT_CTL_MASK 0x3
+#define PPOD_PI_REPORT_CTL(x) ((x) << PPOD_PI_REPORT_CTL_SHIFT)
+
+static inline int cxgbi_ppm_is_ddp_tag(struct cxgbi_ppm *ppm, u32 tag)
+{
+ return !(tag & ppm->tformat.no_ddp_mask);
+}
+
+static inline int cxgbi_ppm_sw_tag_is_usable(struct cxgbi_ppm *ppm,
+ u32 tag)
+{
+ /* the sw tag must be using <= 31 bits */
+ return !(tag & 0x80000000U);
+}
+
+static inline int cxgbi_ppm_make_non_ddp_tag(struct cxgbi_ppm *ppm,
+ u32 sw_tag,
+ u32 *final_tag)
+{
+ struct cxgbi_tag_format *tformat = &ppm->tformat;
+
+ if (!cxgbi_ppm_sw_tag_is_usable(ppm, sw_tag)) {
+ pr_info("sw_tag 0x%x NOT usable.\n", sw_tag);
+ return -EINVAL;
+ }
+
+ if (!sw_tag) {
+ *final_tag = tformat->no_ddp_mask;
+ } else {
+ unsigned int shift = tformat->idx_bits + tformat->color_bits;
+ u32 lower = sw_tag & tformat->idx_clr_mask;
+ u32 upper = (sw_tag >> shift) << (shift + 1);
+
+ *final_tag = upper | tformat->no_ddp_mask | lower;
+ }
+ return 0;
+}
+
+static inline u32 cxgbi_ppm_decode_non_ddp_tag(struct cxgbi_ppm *ppm,
+ u32 tag)
+{
+ struct cxgbi_tag_format *tformat = &ppm->tformat;
+ unsigned int shift = tformat->idx_bits + tformat->color_bits;
+ u32 lower = tag & tformat->idx_clr_mask;
+ u32 upper = (tag >> tformat->rsvd_bits) << shift;
+
+ return upper | lower;
+}
+
+static inline u32 cxgbi_ppm_ddp_tag_get_idx(struct cxgbi_ppm *ppm,
+ u32 ddp_tag)
+{
+ u32 hw_idx = (ddp_tag >> PPOD_IDX_SHIFT) &
+ ppm->tformat.idx_mask;
+
+ return hw_idx - ppm->base_idx;
+}
+
+static inline u32 cxgbi_ppm_make_ddp_tag(unsigned int hw_idx,
+ unsigned char color)
+{
+ return (hw_idx << PPOD_IDX_SHIFT) | ((u32)color);
+}
+
+static inline unsigned long
+cxgbi_ppm_get_tag_caller_data(struct cxgbi_ppm *ppm,
+ u32 ddp_tag)
+{
+ u32 idx = cxgbi_ppm_ddp_tag_get_idx(ppm, ddp_tag);
+
+ return ppm->ppod_data[idx].caller_data;
+}
+
+/* sw bits are the free bits */
+static inline int cxgbi_ppm_ddp_tag_update_sw_bits(struct cxgbi_ppm *ppm,
+ u32 val, u32 orig_tag,
+ u32 *final_tag)
+{
+ struct cxgbi_tag_format *tformat = &ppm->tformat;
+ u32 v = val >> tformat->free_bits;
+
+ if (v) {
+ pr_info("sw_bits 0x%x too large, avail bits %u.\n",
+ val, tformat->free_bits);
+ return -EINVAL;
+ }
+ if (!cxgbi_ppm_is_ddp_tag(ppm, orig_tag))
+ return -EINVAL;
+
+ *final_tag = (val << tformat->rsvd_bits) |
+ (orig_tag & ppm->tformat.rsvd_mask);
+ return 0;
+}
+
+static inline void cxgbi_ppm_ppod_clear(struct cxgbi_pagepod *ppod)
+{
+ ppod->hdr.vld_tid = 0U;
+}
+
+static inline void cxgbi_tagmask_check(unsigned int tagmask,
+ struct cxgbi_tag_format *tformat)
+{
+ unsigned int bits = fls(tagmask);
+
+ /* reserve top most 2 bits for page selector */
+ tformat->free_bits = 32 - 2 - bits;
+ tformat->rsvd_bits = bits;
+ tformat->color_bits = PPOD_IDX_SHIFT;
+ tformat->idx_bits = bits - 1 - PPOD_IDX_SHIFT;
+ tformat->no_ddp_mask = 1 << (bits - 1);
+ tformat->idx_mask = (1 << tformat->idx_bits) - 1;
+ tformat->color_mask = (1 << PPOD_IDX_SHIFT) - 1;
+ tformat->idx_clr_mask = (1 << (bits - 1)) - 1;
+ tformat->rsvd_mask = (1 << bits) - 1;
+
+ pr_info("ippm: tagmask 0x%x, rsvd %u=%u+%u+1, mask 0x%x,0x%x, "
+ "pg %u,%u,%u,%u.\n",
+ tagmask, tformat->rsvd_bits, tformat->idx_bits,
+ tformat->color_bits, tformat->no_ddp_mask, tformat->rsvd_mask,
+ tformat->pgsz_order[0], tformat->pgsz_order[1],
+ tformat->pgsz_order[2], tformat->pgsz_order[3]);
+}
+
+int cxgbi_ppm_find_page_index(struct cxgbi_ppm *ppm, unsigned long pgsz);
+void cxgbi_ppm_make_ppod_hdr(struct cxgbi_ppm *ppm, u32 tag,
+ unsigned int tid, unsigned int offset,
+ unsigned int length,
+ struct cxgbi_pagepod_hdr *hdr);
+void cxgbi_ppm_ppod_release(struct cxgbi_ppm *, u32 idx);
+int cxgbi_ppm_ppods_reserve(struct cxgbi_ppm *, unsigned short nr_pages,
+ u32 per_tag_pg_idx, u32 *ppod_idx, u32 *ddp_tag,
+ unsigned long caller_data);
+int cxgbi_ppm_init(void **ppm_pp, struct net_device *, struct pci_dev *,
+ void *lldev, struct cxgbi_tag_format *,
+ unsigned int ppmax, unsigned int llimit,
+ unsigned int start,
+ unsigned int reserve_factor);
+int cxgbi_ppm_release(struct cxgbi_ppm *ppm);
+void cxgbi_tagmask_check(unsigned int tagmask, struct cxgbi_tag_format *);
+unsigned int cxgbi_tagmask_set(unsigned int ppmax);
+
+#endif /*__LIBCXGB_PPM_H__*/
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index 60383040d..c363b5855 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -53,6 +53,8 @@
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -1895,9 +1897,17 @@ static int cs89x0_platform_remove(struct platform_device *pdev)
return 0;
}
+static const struct __maybe_unused of_device_id cs89x0_match[] = {
+ { .compatible = "cirrus,cs8900", },
+ { .compatible = "cirrus,cs8920", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, cs89x0_match);
+
static struct platform_driver cs89x0_driver = {
.driver = {
- .name = DRV_NAME,
+ .name = DRV_NAME,
+ .of_match_table = of_match_ptr(cs89x0_match),
},
.remove = cs89x0_platform_remove,
};
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index f44a39c40..fd3980cc1 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -103,25 +103,29 @@ static void enic_intr_coal_set_rx(struct enic *enic, u32 timer)
}
}
-static int enic_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int enic_get_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *ecmd)
{
struct enic *enic = netdev_priv(netdev);
+ struct ethtool_link_settings *base = &ecmd->base;
- ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
- ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
- ecmd->port = PORT_FIBRE;
- ecmd->transceiver = XCVR_EXTERNAL;
+ ethtool_link_ksettings_add_link_mode(ecmd, supported,
+ 10000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ecmd, supported, FIBRE);
+ ethtool_link_ksettings_add_link_mode(ecmd, advertising,
+ 10000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ecmd, advertising, FIBRE);
+ base->port = PORT_FIBRE;
if (netif_carrier_ok(netdev)) {
- ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev));
- ecmd->duplex = DUPLEX_FULL;
+ base->speed = vnic_dev_port_speed(enic->vdev);
+ base->duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
- ecmd->duplex = DUPLEX_UNKNOWN;
+ base->speed = SPEED_UNKNOWN;
+ base->duplex = DUPLEX_UNKNOWN;
}
- ecmd->autoneg = AUTONEG_DISABLE;
+ base->autoneg = AUTONEG_DISABLE;
return 0;
}
@@ -500,7 +504,6 @@ static int enic_set_rxfh(struct net_device *netdev, const u32 *indir,
}
static const struct ethtool_ops enic_ethtool_ops = {
- .get_settings = enic_get_settings,
.get_drvinfo = enic_get_drvinfo,
.get_msglevel = enic_get_msglevel,
.set_msglevel = enic_set_msglevel,
@@ -516,6 +519,7 @@ static const struct ethtool_ops enic_ethtool_ops = {
.get_rxfh_key_size = enic_get_rxfh_key_size,
.get_rxfh = enic_get_rxfh,
.set_rxfh = enic_set_rxfh,
+ .get_link_ksettings = enic_get_ksettings,
};
void enic_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index f15560a06..48f82ab6c 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1566,7 +1566,7 @@ static int enic_request_intr(struct enic *enic)
intr = enic_msix_rq_intr(enic, i);
snprintf(enic->msix[intr].devname,
sizeof(enic->msix[intr].devname),
- "%.11s-rx-%d", netdev->name, i);
+ "%.11s-rx-%u", netdev->name, i);
enic->msix[intr].isr = enic_isr_msix;
enic->msix[intr].devid = &enic->napi[i];
}
@@ -1577,7 +1577,7 @@ static int enic_request_intr(struct enic *enic)
intr = enic_msix_wq_intr(enic, i);
snprintf(enic->msix[intr].devname,
sizeof(enic->msix[intr].devname),
- "%.11s-tx-%d", netdev->name, i);
+ "%.11s-tx-%u", netdev->name, i);
enic->msix[intr].isr = enic_isr_msix;
enic->msix[intr].devid = &enic->napi[wq];
}
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 1471e16ba..f45385f5c 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1299,6 +1299,7 @@ static int
dm9000_open(struct net_device *dev)
{
struct board_info *db = netdev_priv(dev);
+ unsigned int irq_flags = irq_get_trigger_type(dev->irq);
if (netif_msg_ifup(db))
dev_dbg(db->dev, "enabling %s\n", dev->name);
@@ -1306,9 +1307,11 @@ dm9000_open(struct net_device *dev)
/* If there is no IRQ type specified, tell the user that this is a
* problem
*/
- if (irq_get_trigger_type(dev->irq) == IRQF_TRIGGER_NONE)
+ if (irq_flags == IRQF_TRIGGER_NONE)
dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
+ irq_flags |= IRQF_SHARED;
+
/* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
mdelay(1); /* delay needs by DM9000B */
@@ -1316,8 +1319,7 @@ dm9000_open(struct net_device *dev)
/* Initialize DM9000 board */
dm9000_init_dm9000(dev);
- if (request_irq(dev->irq, dm9000_interrupt, IRQF_SHARED,
- dev->name, dev))
+ if (request_irq(dev->irq, dm9000_interrupt, irq_flags, dev->name, dev))
return -EAGAIN;
/* Now that we have an interrupt handler hooked up we can unmask
* our interrupts
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index cbe84972f..f0e9e2ef6 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -1319,7 +1319,7 @@ de4x5_open(struct net_device *dev)
if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
lp->adapter_name, dev)) {
- printk("de4x5_open(): Requested IRQ%d is busy - attemping FAST/SHARE...", dev->irq);
+ printk("de4x5_open(): Requested IRQ%d is busy - attempting FAST/SHARE...", dev->irq);
if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
lp->adapter_name, dev)) {
printk("\n Cannot get IRQ- reconfigure your hardware.\n");
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index b69a9eacc..c3b64cdd0 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -173,7 +173,7 @@ static int dnet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
static void dnet_handle_link_change(struct net_device *dev)
{
struct dnet *bp = netdev_priv(dev);
- struct phy_device *phydev = bp->phy_dev;
+ struct phy_device *phydev = dev->phydev;
unsigned long flags;
u32 mode_reg, ctl_reg;
@@ -295,7 +295,6 @@ static int dnet_mii_probe(struct net_device *dev)
bp->link = 0;
bp->speed = 0;
bp->duplex = -1;
- bp->phy_dev = phydev;
return 0;
}
@@ -629,16 +628,16 @@ static int dnet_open(struct net_device *dev)
struct dnet *bp = netdev_priv(dev);
/* if the phy is not yet register, retry later */
- if (!bp->phy_dev)
+ if (!dev->phydev)
return -EAGAIN;
napi_enable(&bp->napi);
dnet_init_hw(bp);
- phy_start_aneg(bp->phy_dev);
+ phy_start_aneg(dev->phydev);
/* schedule a link state check */
- phy_start(bp->phy_dev);
+ phy_start(dev->phydev);
netif_start_queue(dev);
@@ -652,8 +651,8 @@ static int dnet_close(struct net_device *dev)
netif_stop_queue(dev);
napi_disable(&bp->napi);
- if (bp->phy_dev)
- phy_stop(bp->phy_dev);
+ if (dev->phydev)
+ phy_stop(dev->phydev);
dnet_reset_hw(bp);
netif_carrier_off(dev);
@@ -731,32 +730,9 @@ static struct net_device_stats *dnet_get_stats(struct net_device *dev)
return nstat;
}
-static int dnet_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct dnet *bp = netdev_priv(dev);
- struct phy_device *phydev = bp->phy_dev;
-
- if (!phydev)
- return -ENODEV;
-
- return phy_ethtool_gset(phydev, cmd);
-}
-
-static int dnet_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct dnet *bp = netdev_priv(dev);
- struct phy_device *phydev = bp->phy_dev;
-
- if (!phydev)
- return -ENODEV;
-
- return phy_ethtool_sset(phydev, cmd);
-}
-
static int dnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct dnet *bp = netdev_priv(dev);
- struct phy_device *phydev = bp->phy_dev;
+ struct phy_device *phydev = dev->phydev;
if (!netif_running(dev))
return -EINVAL;
@@ -776,11 +752,11 @@ static void dnet_get_drvinfo(struct net_device *dev,
}
static const struct ethtool_ops dnet_ethtool_ops = {
- .get_settings = dnet_get_settings,
- .set_settings = dnet_set_settings,
.get_drvinfo = dnet_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static const struct net_device_ops dnet_netdev_ops = {
@@ -875,7 +851,7 @@ static int dnet_probe(struct platform_device *pdev)
(bp->capabilities & DNET_HAS_IRQ) ? "" : "no ",
(bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ",
(bp->capabilities & DNET_HAS_DMA) ? "" : "no ");
- phydev = bp->phy_dev;
+ phydev = dev->phydev;
phy_attached_info(phydev);
return 0;
@@ -899,8 +875,8 @@ static int dnet_remove(struct platform_device *pdev)
if (dev) {
bp = netdev_priv(dev);
- if (bp->phy_dev)
- phy_disconnect(bp->phy_dev);
+ if (dev->phydev)
+ phy_disconnect(dev->phydev);
mdiobus_unregister(bp->mii_bus);
mdiobus_free(bp->mii_bus);
unregister_netdev(dev);
diff --git a/drivers/net/ethernet/dnet.h b/drivers/net/ethernet/dnet.h
index 37f5b30fa..d985080bb 100644
--- a/drivers/net/ethernet/dnet.h
+++ b/drivers/net/ethernet/dnet.h
@@ -216,7 +216,6 @@ struct dnet {
/* PHY stuff */
struct mii_bus *mii_bus;
- struct phy_device *phy_dev;
unsigned int link;
unsigned int speed;
unsigned int duplex;
diff --git a/drivers/net/ethernet/emulex/benet/Kconfig b/drivers/net/ethernet/emulex/benet/Kconfig
index 710856326..b4853ec9d 100644
--- a/drivers/net/ethernet/emulex/benet/Kconfig
+++ b/drivers/net/ethernet/emulex/benet/Kconfig
@@ -13,11 +13,3 @@ config BE2NET_HWMON
---help---
Say Y here if you want to expose thermal sensor data on
be2net network adapter.
-
-config BE2NET_VXLAN
- bool "VXLAN offload support on be2net driver"
- default y
- depends on BE2NET && VXLAN && !(BE2NET=y && VXLAN=m)
- ---help---
- Say Y here if you want to enable VXLAN offload support on
- be2net driver.
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index fe3763df3..4555e041e 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -97,7 +97,8 @@
* SURF/DPDK
*/
-#define MAX_RSS_IFACES 15
+#define MAX_PORT_RSS_TABLES 15
+#define MAX_NIC_FUNCS 16
#define MAX_RX_QS 32
#define MAX_EVT_QS 32
#define MAX_TX_QS 32
@@ -442,8 +443,20 @@ struct be_resources {
u16 max_iface_count;
u16 max_mcc_count;
u16 max_evt_qs;
+ u16 max_nic_evt_qs; /* NIC's share of evt qs */
u32 if_cap_flags;
u32 vf_if_cap_flags; /* VF if capability flags */
+ u32 flags;
+ /* Calculated PF Pool's share of RSS Tables. This is not enforced by
+ * the FW, but is a self-imposed driver limitation.
+ */
+ u16 max_rss_tables;
+};
+
+/* These are port-wide values */
+struct be_port_resources {
+ u16 max_vfs;
+ u16 nic_pfs;
};
#define be_is_os2bmc_enabled(adapter) (adapter->flags & BE_FLAGS_OS2BMC)
@@ -513,7 +526,8 @@ struct be_adapter {
spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
spinlock_t mcc_cq_lock;
- u16 cfg_num_qs; /* configured via set-channels */
+ u16 cfg_num_rx_irqs; /* configured via set-channels */
+ u16 cfg_num_tx_irqs; /* configured via set-channels */
u16 num_evt_qs;
u16 num_msix_vec;
struct be_eq_obj eq_obj[MAX_EVT_QS];
@@ -632,16 +646,42 @@ struct be_adapter {
#define be_max_txqs(adapter) (adapter->res.max_tx_qs)
#define be_max_prio_txqs(adapter) (adapter->res.max_prio_tx_qs)
#define be_max_rxqs(adapter) (adapter->res.max_rx_qs)
-#define be_max_eqs(adapter) (adapter->res.max_evt_qs)
+/* Max number of EQs available for the function (NIC + RoCE (if enabled)) */
+#define be_max_func_eqs(adapter) (adapter->res.max_evt_qs)
+/* Max number of EQs available avaialble only for NIC */
+#define be_max_nic_eqs(adapter) (adapter->res.max_nic_evt_qs)
#define be_if_cap_flags(adapter) (adapter->res.if_cap_flags)
-
-static inline u16 be_max_qs(struct be_adapter *adapter)
+#define be_max_pf_pool_rss_tables(adapter) \
+ (adapter->pool_res.max_rss_tables)
+/* Max irqs avaialble for NIC */
+#define be_max_irqs(adapter) \
+ (min_t(u16, be_max_nic_eqs(adapter), num_online_cpus()))
+
+/* Max irqs *needed* for RX queues */
+static inline u16 be_max_rx_irqs(struct be_adapter *adapter)
{
- /* If no RSS, need atleast the one def RXQ */
+ /* If no RSS, need atleast one irq for def-RXQ */
u16 num = max_t(u16, be_max_rss(adapter), 1);
- num = min(num, be_max_eqs(adapter));
- return min_t(u16, num, num_online_cpus());
+ return min_t(u16, num, be_max_irqs(adapter));
+}
+
+/* Max irqs *needed* for TX queues */
+static inline u16 be_max_tx_irqs(struct be_adapter *adapter)
+{
+ return min_t(u16, be_max_txqs(adapter), be_max_irqs(adapter));
+}
+
+/* Max irqs *needed* for combined queues */
+static inline u16 be_max_qp_irqs(struct be_adapter *adapter)
+{
+ return min(be_max_tx_irqs(adapter), be_max_rx_irqs(adapter));
+}
+
+/* Max irqs *needed* for RX and TX queues together */
+static inline u16 be_max_any_irqs(struct be_adapter *adapter)
+{
+ return max(be_max_tx_irqs(adapter), be_max_rx_irqs(adapter));
}
/* Is BE in pvid_tagging mode */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 22402db27..2cc117568 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -87,6 +87,11 @@ static struct be_cmd_priv_map cmd_priv_map[] = {
CMD_SUBSYSTEM_LOWLEVEL,
BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
},
+ {
+ OPCODE_COMMON_SET_HSW_CONFIG,
+ CMD_SUBSYSTEM_COMMON,
+ BE_PRIV_DEVCFG | BE_PRIV_VHADM
+ },
};
static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
@@ -3850,6 +3855,10 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
void *ctxt;
int status;
+ if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_HSW_CONFIG,
+ CMD_SUBSYSTEM_COMMON))
+ return -EPERM;
+
spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
@@ -3871,7 +3880,7 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
}
- if (!BEx_chip(adapter) && hsw_mode) {
+ if (hsw_mode) {
AMAP_SET_BITS(struct amap_set_hsw_context, interface_id,
ctxt, adapter->hba_port_num);
AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1);
@@ -4023,7 +4032,10 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *)cmd.va;
adapter->wol_cap = resp->wol_settings;
- if (adapter->wol_cap & BE_WOL_CAP)
+
+ /* Non-zero macaddr indicates WOL is enabled */
+ if (adapter->wol_cap & BE_WOL_CAP &&
+ !is_zero_ether_addr(resp->magic_mac))
adapter->wol_en = true;
}
err:
@@ -4360,9 +4372,35 @@ err:
return status;
}
+/* This routine returns a list of all the NIC PF_nums in the adapter */
+u16 be_get_nic_pf_num_list(u8 *buf, u32 desc_count, u16 *nic_pf_nums)
+{
+ struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
+ struct be_pcie_res_desc *pcie = NULL;
+ int i;
+ u16 nic_pf_count = 0;
+
+ for (i = 0; i < desc_count; i++) {
+ if (hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 ||
+ hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1) {
+ pcie = (struct be_pcie_res_desc *)hdr;
+ if (pcie->pf_state && (pcie->pf_type == MISSION_NIC ||
+ pcie->pf_type == MISSION_RDMA)) {
+ nic_pf_nums[nic_pf_count++] = pcie->pf_num;
+ }
+ }
+
+ hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
+ hdr = (void *)hdr + hdr->desc_len;
+ }
+ return nic_pf_count;
+}
+
/* Will use MBOX only if MCCQ has not been created */
int be_cmd_get_profile_config(struct be_adapter *adapter,
- struct be_resources *res, u8 query, u8 domain)
+ struct be_resources *res,
+ struct be_port_resources *port_res,
+ u8 profile_type, u8 query, u8 domain)
{
struct be_cmd_resp_get_profile_config *resp;
struct be_cmd_req_get_profile_config *req;
@@ -4389,7 +4427,7 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
if (!lancer_chip(adapter))
req->hdr.version = 1;
- req->type = ACTIVE_PROFILE_TYPE;
+ req->type = profile_type;
req->hdr.domain = domain;
/* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the
@@ -4406,6 +4444,28 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
resp = cmd.va;
desc_count = le16_to_cpu(resp->desc_count);
+ if (port_res) {
+ u16 nic_pf_cnt = 0, i;
+ u16 nic_pf_num_list[MAX_NIC_FUNCS];
+
+ nic_pf_cnt = be_get_nic_pf_num_list(resp->func_param,
+ desc_count,
+ nic_pf_num_list);
+
+ for (i = 0; i < nic_pf_cnt; i++) {
+ nic = be_get_func_nic_desc(resp->func_param, desc_count,
+ nic_pf_num_list[i]);
+ if (nic->link_param == adapter->port_num) {
+ port_res->nic_pfs++;
+ pcie = be_get_pcie_desc(resp->func_param,
+ desc_count,
+ nic_pf_num_list[i]);
+ port_res->max_vfs += le16_to_cpu(pcie->num_vfs);
+ }
+ }
+ return status;
+ }
+
pcie = be_get_pcie_desc(resp->func_param, desc_count,
adapter->pf_num);
if (pcie)
@@ -4465,7 +4525,7 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
}
/* Mark all fields invalid */
-static void be_reset_nic_desc(struct be_nic_res_desc *nic)
+void be_reset_nic_desc(struct be_nic_res_desc *nic)
{
memset(nic, 0, sizeof(*nic));
nic->unicast_mac_count = 0xFFFF;
@@ -4534,73 +4594,9 @@ int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
1, version, domain);
}
-static void be_fill_vf_res_template(struct be_adapter *adapter,
- struct be_resources pool_res,
- u16 num_vfs, u16 num_vf_qs,
- struct be_nic_res_desc *nic_vft)
-{
- u32 vf_if_cap_flags = pool_res.vf_if_cap_flags;
- struct be_resources res_mod = {0};
-
- /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
- * which are modifiable using SET_PROFILE_CONFIG cmd.
- */
- be_cmd_get_profile_config(adapter, &res_mod, RESOURCE_MODIFIABLE, 0);
-
- /* If RSS IFACE capability flags are modifiable for a VF, set the
- * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
- * more than 1 RSSQ is available for a VF.
- * Otherwise, provision only 1 queue pair for VF.
- */
- if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
- nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
- if (num_vf_qs > 1) {
- vf_if_cap_flags |= BE_IF_FLAGS_RSS;
- if (pool_res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
- vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
- } else {
- vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
- BE_IF_FLAGS_DEFQ_RSS);
- }
- } else {
- num_vf_qs = 1;
- }
-
- if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
- nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
- vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
- }
-
- nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags);
- nic_vft->rq_count = cpu_to_le16(num_vf_qs);
- nic_vft->txq_count = cpu_to_le16(num_vf_qs);
- nic_vft->rssq_count = cpu_to_le16(num_vf_qs);
- nic_vft->cq_count = cpu_to_le16(pool_res.max_cq_count /
- (num_vfs + 1));
-
- /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
- * among the PF and it's VFs, if the fields are changeable
- */
- if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
- nic_vft->unicast_mac_count = cpu_to_le16(pool_res.max_uc_mac /
- (num_vfs + 1));
-
- if (res_mod.max_vlans == FIELD_MODIFIABLE)
- nic_vft->vlan_count = cpu_to_le16(pool_res.max_vlans /
- (num_vfs + 1));
-
- if (res_mod.max_iface_count == FIELD_MODIFIABLE)
- nic_vft->iface_count = cpu_to_le16(pool_res.max_iface_count /
- (num_vfs + 1));
-
- if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
- nic_vft->mcc_count = cpu_to_le16(pool_res.max_mcc_count /
- (num_vfs + 1));
-}
-
int be_cmd_set_sriov_config(struct be_adapter *adapter,
struct be_resources pool_res, u16 num_vfs,
- u16 num_vf_qs)
+ struct be_resources *vft_res)
{
struct {
struct be_pcie_res_desc pcie;
@@ -4620,12 +4616,26 @@ int be_cmd_set_sriov_config(struct be_adapter *adapter,
be_reset_nic_desc(&desc.nic_vft);
desc.nic_vft.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
desc.nic_vft.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
- desc.nic_vft.flags = BIT(VFT_SHIFT) | BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
+ desc.nic_vft.flags = vft_res->flags | BIT(VFT_SHIFT) |
+ BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
desc.nic_vft.pf_num = adapter->pdev->devfn;
desc.nic_vft.vf_num = 0;
-
- be_fill_vf_res_template(adapter, pool_res, num_vfs, num_vf_qs,
- &desc.nic_vft);
+ desc.nic_vft.cap_flags = cpu_to_le32(vft_res->vf_if_cap_flags);
+ desc.nic_vft.rq_count = cpu_to_le16(vft_res->max_rx_qs);
+ desc.nic_vft.txq_count = cpu_to_le16(vft_res->max_tx_qs);
+ desc.nic_vft.rssq_count = cpu_to_le16(vft_res->max_rss_qs);
+ desc.nic_vft.cq_count = cpu_to_le16(vft_res->max_cq_count);
+
+ if (vft_res->max_uc_mac)
+ desc.nic_vft.unicast_mac_count =
+ cpu_to_le16(vft_res->max_uc_mac);
+ if (vft_res->max_vlans)
+ desc.nic_vft.vlan_count = cpu_to_le16(vft_res->max_vlans);
+ if (vft_res->max_iface_count)
+ desc.nic_vft.iface_count =
+ cpu_to_le16(vft_res->max_iface_count);
+ if (vft_res->max_mcc_count)
+ desc.nic_vft.mcc_count = cpu_to_le16(vft_res->max_mcc_count);
return be_cmd_set_profile_config(adapter, &desc,
2 * RESOURCE_DESC_SIZE_V1, 2, 1, 0);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index d8540ae95..0d6be224a 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -1556,7 +1556,9 @@ struct be_cmd_resp_acpi_wol_magic_config_v1 {
u8 rsvd0[2];
u8 wol_settings;
u8 rsvd1[5];
- u32 rsvd2[295];
+ u32 rsvd2[288];
+ u8 magic_mac[6];
+ u8 rsvd3[22];
} __packed;
#define BE_GET_WOL_CAP 2
@@ -2128,6 +2130,9 @@ struct be_cmd_req_set_ext_fat_caps {
#define IMM_SHIFT 6 /* Immediate */
#define NOSV_SHIFT 7 /* No save */
+#define MISSION_NIC 1
+#define MISSION_RDMA 8
+
struct be_res_desc_hdr {
u8 desc_type;
u8 desc_len;
@@ -2244,6 +2249,7 @@ struct be_cmd_req_get_profile_config {
struct be_cmd_req_hdr hdr;
u8 rsvd;
#define ACTIVE_PROFILE_TYPE 0x2
+#define SAVED_PROFILE_TYPE 0x0
#define QUERY_MODIFIABLE_FIELDS_TYPE BIT(3)
u8 type;
u16 rsvd1;
@@ -2449,7 +2455,9 @@ int be_cmd_query_port_name(struct be_adapter *adapter);
int be_cmd_get_func_config(struct be_adapter *adapter,
struct be_resources *res);
int be_cmd_get_profile_config(struct be_adapter *adapter,
- struct be_resources *res, u8 query, u8 domain);
+ struct be_resources *res,
+ struct be_port_resources *port_res,
+ u8 profile_type, u8 query, u8 domain);
int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile);
int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
int vf_num);
@@ -2461,4 +2469,4 @@ int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port);
int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op);
int be_cmd_set_sriov_config(struct be_adapter *adapter,
struct be_resources res, u16 num_vfs,
- u16 num_vf_qs);
+ struct be_resources *vft_res);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 2ff691636..50e7be5da 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -793,6 +793,11 @@ static void be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ struct device *dev = &adapter->pdev->dev;
+ struct be_dma_mem cmd;
+ u8 mac[ETH_ALEN];
+ bool enable;
+ int status;
if (wol->wolopts & ~WAKE_MAGIC)
return -EOPNOTSUPP;
@@ -802,12 +807,32 @@ static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return -EOPNOTSUPP;
}
- if (wol->wolopts & WAKE_MAGIC)
- adapter->wol_en = true;
- else
- adapter->wol_en = false;
+ cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
+ cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
+ if (!cmd.va)
+ return -ENOMEM;
- return 0;
+ eth_zero_addr(mac);
+
+ enable = wol->wolopts & WAKE_MAGIC;
+ if (enable)
+ ether_addr_copy(mac, adapter->netdev->dev_addr);
+
+ status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
+ if (status) {
+ dev_err(dev, "Could not set Wake-on-lan mac address\n");
+ status = be_cmd_status(status);
+ goto err;
+ }
+
+ pci_enable_wake(adapter->pdev, PCI_D3hot, enable);
+ pci_enable_wake(adapter->pdev, PCI_D3cold, enable);
+
+ adapter->wol_en = enable ? true : false;
+
+err:
+ dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma);
+ return status;
}
static int be_test_ddr_dma(struct be_adapter *adapter)
@@ -1171,9 +1196,17 @@ static void be_get_channels(struct net_device *netdev,
struct ethtool_channels *ch)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ u16 num_rx_irqs = max_t(u16, adapter->num_rss_qs, 1);
- ch->combined_count = adapter->num_evt_qs;
- ch->max_combined = be_max_qs(adapter);
+ /* num_tx_qs is always same as the number of irqs used for TX */
+ ch->combined_count = min(adapter->num_tx_qs, num_rx_irqs);
+ ch->rx_count = num_rx_irqs - ch->combined_count;
+ ch->tx_count = adapter->num_tx_qs - ch->combined_count;
+
+ ch->max_combined = be_max_qp_irqs(adapter);
+ /* The user must create atleast one combined channel */
+ ch->max_rx = be_max_rx_irqs(adapter) - 1;
+ ch->max_tx = be_max_tx_irqs(adapter) - 1;
}
static int be_set_channels(struct net_device *netdev,
@@ -1182,11 +1215,22 @@ static int be_set_channels(struct net_device *netdev,
struct be_adapter *adapter = netdev_priv(netdev);
int status;
- if (ch->rx_count || ch->tx_count || ch->other_count ||
- !ch->combined_count || ch->combined_count > be_max_qs(adapter))
+ /* we support either only combined channels or a combination of
+ * combined and either RX-only or TX-only channels.
+ */
+ if (ch->other_count || !ch->combined_count ||
+ (ch->rx_count && ch->tx_count))
+ return -EINVAL;
+
+ if (ch->combined_count > be_max_qp_irqs(adapter) ||
+ (ch->rx_count &&
+ (ch->rx_count + ch->combined_count) > be_max_rx_irqs(adapter)) ||
+ (ch->tx_count &&
+ (ch->tx_count + ch->combined_count) > be_max_tx_irqs(adapter)))
return -EINVAL;
- adapter->cfg_num_qs = ch->combined_count;
+ adapter->cfg_num_rx_irqs = ch->combined_count + ch->rx_count;
+ adapter->cfg_num_tx_irqs = ch->combined_count + ch->tx_count;
status = be_update_queues(adapter);
return be_cmd_status(status);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index ed98ef1ec..874c7539a 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -2620,8 +2620,10 @@ static int be_evt_queues_create(struct be_adapter *adapter)
struct be_aic_obj *aic;
int i, rc;
+ /* need enough EQs to service both RX and TX queues */
adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
- adapter->cfg_num_qs);
+ max(adapter->cfg_num_rx_irqs,
+ adapter->cfg_num_tx_irqs));
for_all_evt_queues(adapter, eqo, i) {
int numa_node = dev_to_node(&adapter->pdev->dev);
@@ -2726,7 +2728,7 @@ static int be_tx_qs_create(struct be_adapter *adapter)
struct be_eq_obj *eqo;
int status, i;
- adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
+ adapter->num_tx_qs = min(adapter->num_evt_qs, adapter->cfg_num_tx_irqs);
for_all_tx_queues(adapter, txo, i) {
cq = &txo->cq;
@@ -2784,11 +2786,11 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
struct be_rx_obj *rxo;
int rc, i;
- /* We can create as many RSS rings as there are EQs. */
- adapter->num_rss_qs = adapter->num_evt_qs;
+ adapter->num_rss_qs =
+ min(adapter->num_evt_qs, adapter->cfg_num_rx_irqs);
/* We'll use RSS only if atleast 2 RSS rings are supported. */
- if (adapter->num_rss_qs <= 1)
+ if (adapter->num_rss_qs < 2)
adapter->num_rss_qs = 0;
adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
@@ -3249,18 +3251,23 @@ static void be_msix_disable(struct be_adapter *adapter)
static int be_msix_enable(struct be_adapter *adapter)
{
- int i, num_vec;
+ unsigned int i, max_roce_eqs;
struct device *dev = &adapter->pdev->dev;
+ int num_vec;
- /* If RoCE is supported, program the max number of NIC vectors that
- * may be configured via set-channels, along with vectors needed for
- * RoCe. Else, just program the number we'll use initially.
+ /* If RoCE is supported, program the max number of vectors that
+ * could be used for NIC and RoCE, else, just program the number
+ * we'll use initially.
*/
- if (be_roce_supported(adapter))
- num_vec = min_t(int, 2 * be_max_eqs(adapter),
- 2 * num_online_cpus());
- else
- num_vec = adapter->cfg_num_qs;
+ if (be_roce_supported(adapter)) {
+ max_roce_eqs =
+ be_max_func_eqs(adapter) - be_max_nic_eqs(adapter);
+ max_roce_eqs = min(max_roce_eqs, num_online_cpus());
+ num_vec = be_max_any_irqs(adapter) + max_roce_eqs;
+ } else {
+ num_vec = max(adapter->cfg_num_rx_irqs,
+ adapter->cfg_num_tx_irqs);
+ }
for (i = 0; i < num_vec; i++)
adapter->msix_entries[i].entry = i;
@@ -3625,10 +3632,8 @@ static int be_open(struct net_device *netdev)
be_link_status_update(adapter, link_status);
netif_tx_start_all_queues(netdev);
-#ifdef CONFIG_BE2NET_VXLAN
if (skyhawk_chip(adapter))
- vxlan_get_rx_port(netdev);
-#endif
+ udp_tunnel_get_rx_info(netdev);
return 0;
err:
@@ -3636,40 +3641,6 @@ err:
return -EIO;
}
-static int be_setup_wol(struct be_adapter *adapter, bool enable)
-{
- struct device *dev = &adapter->pdev->dev;
- struct be_dma_mem cmd;
- u8 mac[ETH_ALEN];
- int status;
-
- eth_zero_addr(mac);
-
- cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
- cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
- if (!cmd.va)
- return -ENOMEM;
-
- if (enable) {
- status = pci_write_config_dword(adapter->pdev,
- PCICFG_PM_CONTROL_OFFSET,
- PCICFG_PM_CONTROL_MASK);
- if (status) {
- dev_err(dev, "Could not enable Wake-on-lan\n");
- goto err;
- }
- } else {
- ether_addr_copy(mac, adapter->netdev->dev_addr);
- }
-
- status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
- pci_enable_wake(adapter->pdev, PCI_D3hot, enable);
- pci_enable_wake(adapter->pdev, PCI_D3cold, enable);
-err:
- dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma);
- return status;
-}
-
static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
{
u32 addr;
@@ -3759,6 +3730,11 @@ static void be_vf_clear(struct be_adapter *adapter)
be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
}
+
+ if (BE3_chip(adapter))
+ be_cmd_set_hsw_config(adapter, 0, 0,
+ adapter->if_handle,
+ PORT_FWD_TYPE_PASSTHRU, 0);
done:
kfree(adapter->vf_cfg);
adapter->num_vfs = 0;
@@ -3789,7 +3765,6 @@ static void be_cancel_err_detection(struct be_adapter *adapter)
}
}
-#ifdef CONFIG_BE2NET_VXLAN
static void be_disable_vxlan_offloads(struct be_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -3808,37 +3783,87 @@ static void be_disable_vxlan_offloads(struct be_adapter *adapter)
netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
}
-#endif
-static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs)
+static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
+ struct be_resources *vft_res)
{
struct be_resources res = adapter->pool_res;
+ u32 vf_if_cap_flags = res.vf_if_cap_flags;
+ struct be_resources res_mod = {0};
u16 num_vf_qs = 1;
- /* Distribute the queue resources among the PF and it's VFs
- * Do not distribute queue resources in multi-channel configuration.
- */
- if (num_vfs && !be_is_mc(adapter)) {
- /* Divide the qpairs evenly among the VFs and the PF, capped
- * at VF-EQ-count. Any remainder qpairs belong to the PF.
- */
+ /* Distribute the queue resources among the PF and it's VFs */
+ if (num_vfs) {
+ /* Divide the rx queues evenly among the VFs and the PF, capped
+ * at VF-EQ-count. Any remainder queues belong to the PF.
+ */
num_vf_qs = min(SH_VF_MAX_NIC_EQS,
res.max_rss_qs / (num_vfs + 1));
- /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
- * interfaces per port. Provide RSS on VFs, only if number
- * of VFs requested is less than MAX_RSS_IFACES limit.
+ /* Skyhawk-R chip supports only MAX_PORT_RSS_TABLES
+ * RSS Tables per port. Provide RSS on VFs, only if number of
+ * VFs requested is less than it's PF Pool's RSS Tables limit.
*/
- if (num_vfs >= MAX_RSS_IFACES)
+ if (num_vfs >= be_max_pf_pool_rss_tables(adapter))
num_vf_qs = 1;
}
- return num_vf_qs;
+
+ /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
+ * which are modifiable using SET_PROFILE_CONFIG cmd.
+ */
+ be_cmd_get_profile_config(adapter, &res_mod, NULL, ACTIVE_PROFILE_TYPE,
+ RESOURCE_MODIFIABLE, 0);
+
+ /* If RSS IFACE capability flags are modifiable for a VF, set the
+ * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
+ * more than 1 RSSQ is available for a VF.
+ * Otherwise, provision only 1 queue pair for VF.
+ */
+ if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
+ vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
+ if (num_vf_qs > 1) {
+ vf_if_cap_flags |= BE_IF_FLAGS_RSS;
+ if (res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
+ vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
+ } else {
+ vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
+ BE_IF_FLAGS_DEFQ_RSS);
+ }
+ } else {
+ num_vf_qs = 1;
+ }
+
+ if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
+ vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
+ vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
+ }
+
+ vft_res->vf_if_cap_flags = vf_if_cap_flags;
+ vft_res->max_rx_qs = num_vf_qs;
+ vft_res->max_rss_qs = num_vf_qs;
+ vft_res->max_tx_qs = res.max_tx_qs / (num_vfs + 1);
+ vft_res->max_cq_count = res.max_cq_count / (num_vfs + 1);
+
+ /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
+ * among the PF and it's VFs, if the fields are changeable
+ */
+ if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
+ vft_res->max_uc_mac = res.max_uc_mac / (num_vfs + 1);
+
+ if (res_mod.max_vlans == FIELD_MODIFIABLE)
+ vft_res->max_vlans = res.max_vlans / (num_vfs + 1);
+
+ if (res_mod.max_iface_count == FIELD_MODIFIABLE)
+ vft_res->max_iface_count = res.max_iface_count / (num_vfs + 1);
+
+ if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
+ vft_res->max_mcc_count = res.max_mcc_count / (num_vfs + 1);
}
static int be_clear(struct be_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
- u16 num_vf_qs;
+ struct be_resources vft_res = {0};
be_cancel_worker(adapter);
@@ -3850,16 +3875,15 @@ static int be_clear(struct be_adapter *adapter)
*/
if (skyhawk_chip(adapter) && be_physfn(adapter) &&
!pci_vfs_assigned(pdev)) {
- num_vf_qs = be_calculate_vf_qs(adapter,
- pci_sriov_get_totalvfs(pdev));
+ be_calculate_vf_res(adapter,
+ pci_sriov_get_totalvfs(pdev),
+ &vft_res);
be_cmd_set_sriov_config(adapter, adapter->pool_res,
pci_sriov_get_totalvfs(pdev),
- num_vf_qs);
+ &vft_res);
}
-#ifdef CONFIG_BE2NET_VXLAN
be_disable_vxlan_offloads(adapter);
-#endif
kfree(adapter->pmac_id);
adapter->pmac_id = NULL;
@@ -3884,7 +3908,8 @@ static int be_vfs_if_create(struct be_adapter *adapter)
for_all_vfs(adapter, vf_cfg, vf) {
if (!BE3_chip(adapter)) {
- status = be_cmd_get_profile_config(adapter, &res,
+ status = be_cmd_get_profile_config(adapter, &res, NULL,
+ ACTIVE_PROFILE_TYPE,
RESOURCE_LIMITS,
vf + 1);
if (!status) {
@@ -4000,6 +4025,15 @@ static int be_vf_setup(struct be_adapter *adapter)
}
}
+ if (BE3_chip(adapter)) {
+ /* On BE3, enable VEB only when SRIOV is enabled */
+ status = be_cmd_set_hsw_config(adapter, 0, 0,
+ adapter->if_handle,
+ PORT_FWD_TYPE_VEB, 0);
+ if (status)
+ goto err;
+ }
+
adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
return 0;
err:
@@ -4069,8 +4103,9 @@ static void BEx_get_resources(struct be_adapter *adapter,
/* On a SuperNIC profile, the driver needs to use the
* GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
*/
- be_cmd_get_profile_config(adapter, &super_nic_res,
- RESOURCE_LIMITS, 0);
+ be_cmd_get_profile_config(adapter, &super_nic_res, NULL,
+ ACTIVE_PROFILE_TYPE, RESOURCE_LIMITS,
+ 0);
/* Some old versions of BE3 FW don't report max_tx_qs value */
res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
} else {
@@ -4109,12 +4144,38 @@ static void be_setup_init(struct be_adapter *adapter)
adapter->cmd_privileges = MIN_PRIVILEGES;
}
+/* HW supports only MAX_PORT_RSS_TABLES RSS Policy Tables per port.
+ * However, this HW limitation is not exposed to the host via any SLI cmd.
+ * As a result, in the case of SRIOV and in particular multi-partition configs
+ * the driver needs to calcuate a proportional share of RSS Tables per PF-pool
+ * for distribution between the VFs. This self-imposed limit will determine the
+ * no: of VFs for which RSS can be enabled.
+ */
+void be_calculate_pf_pool_rss_tables(struct be_adapter *adapter)
+{
+ struct be_port_resources port_res = {0};
+ u8 rss_tables_on_port;
+ u16 max_vfs = be_max_vfs(adapter);
+
+ be_cmd_get_profile_config(adapter, NULL, &port_res, SAVED_PROFILE_TYPE,
+ RESOURCE_LIMITS, 0);
+
+ rss_tables_on_port = MAX_PORT_RSS_TABLES - port_res.nic_pfs;
+
+ /* Each PF Pool's RSS Tables limit =
+ * PF's Max VFs / Total_Max_VFs on Port * RSS Tables on Port
+ */
+ adapter->pool_res.max_rss_tables =
+ max_vfs * rss_tables_on_port / port_res.max_vfs;
+}
+
static int be_get_sriov_config(struct be_adapter *adapter)
{
struct be_resources res = {0};
int max_vfs, old_vfs;
- be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0);
+ be_cmd_get_profile_config(adapter, &res, NULL, ACTIVE_PROFILE_TYPE,
+ RESOURCE_LIMITS, 0);
/* Some old versions of BE3 FW don't report max_vfs value */
if (BE3_chip(adapter) && !res.max_vfs) {
@@ -4138,13 +4199,19 @@ static int be_get_sriov_config(struct be_adapter *adapter)
adapter->num_vfs = old_vfs;
}
+ if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
+ be_calculate_pf_pool_rss_tables(adapter);
+ dev_info(&adapter->pdev->dev,
+ "RSS can be enabled for all VFs if num_vfs <= %d\n",
+ be_max_pf_pool_rss_tables(adapter));
+ }
return 0;
}
static void be_alloc_sriov_res(struct be_adapter *adapter)
{
int old_vfs = pci_num_vf(adapter->pdev);
- u16 num_vf_qs;
+ struct be_resources vft_res = {0};
int status;
be_get_sriov_config(adapter);
@@ -4158,9 +4225,9 @@ static void be_alloc_sriov_res(struct be_adapter *adapter)
* Also, this is done by FW in Lancer chip.
*/
if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
- num_vf_qs = be_calculate_vf_qs(adapter, 0);
+ be_calculate_vf_res(adapter, 0, &vft_res);
status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
- num_vf_qs);
+ &vft_res);
if (status)
dev_err(&adapter->pdev->dev,
"Failed to optimize SRIOV resources\n");
@@ -4173,16 +4240,13 @@ static int be_get_resources(struct be_adapter *adapter)
struct be_resources res = {0};
int status;
- if (BEx_chip(adapter)) {
- BEx_get_resources(adapter, &res);
- adapter->res = res;
- }
-
/* For Lancer, SH etc read per-function resource limits from FW.
* GET_FUNC_CONFIG returns per function guaranteed limits.
* GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
*/
- if (!BEx_chip(adapter)) {
+ if (BEx_chip(adapter)) {
+ BEx_get_resources(adapter, &res);
+ } else {
status = be_cmd_get_func_config(adapter, &res);
if (status)
return status;
@@ -4191,13 +4255,13 @@ static int be_get_resources(struct be_adapter *adapter)
if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
!(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
res.max_rss_qs -= 1;
-
- /* If RoCE may be enabled stash away half the EQs for RoCE */
- if (be_roce_supported(adapter))
- res.max_evt_qs /= 2;
- adapter->res = res;
}
+ /* If RoCE is supported stash away half the EQs for RoCE */
+ res.max_nic_evt_qs = be_roce_supported(adapter) ?
+ res.max_evt_qs / 2 : res.max_evt_qs;
+ adapter->res = res;
+
/* If FW supports RSS default queue, then skip creating non-RSS
* queue for non-IP traffic.
*/
@@ -4206,15 +4270,17 @@ static int be_get_resources(struct be_adapter *adapter)
dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
be_max_txqs(adapter), be_max_rxqs(adapter),
- be_max_rss(adapter), be_max_eqs(adapter),
+ be_max_rss(adapter), be_max_nic_eqs(adapter),
be_max_vfs(adapter));
dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
be_max_uc(adapter), be_max_mc(adapter),
be_max_vlans(adapter));
- /* Sanitize cfg_num_qs based on HW and platform limits */
- adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(),
- be_max_qs(adapter));
+ /* Ensure RX and TX queues are created in pairs at init time */
+ adapter->cfg_num_rx_irqs =
+ min_t(u16, netif_get_num_default_rss_queues(),
+ be_max_qp_irqs(adapter));
+ adapter->cfg_num_tx_irqs = adapter->cfg_num_rx_irqs;
return 0;
}
@@ -4241,6 +4307,8 @@ static int be_get_config(struct be_adapter *adapter)
}
be_cmd_get_acpi_wol_cap(adapter);
+ pci_enable_wake(adapter->pdev, PCI_D3hot, adapter->wol_en);
+ pci_enable_wake(adapter->pdev, PCI_D3cold, adapter->wol_en);
be_cmd_query_port_name(adapter);
@@ -4251,15 +4319,6 @@ static int be_get_config(struct be_adapter *adapter)
"Using profile 0x%x\n", profile_id);
}
- status = be_get_resources(adapter);
- if (status)
- return status;
-
- adapter->pmac_id = kcalloc(be_max_uc(adapter),
- sizeof(*adapter->pmac_id), GFP_KERNEL);
- if (!adapter->pmac_id)
- return -ENOMEM;
-
return 0;
}
@@ -4334,7 +4393,7 @@ static int be_if_create(struct be_adapter *adapter)
u32 cap_flags = be_if_cap_flags(adapter);
int status;
- if (adapter->cfg_num_qs == 1)
+ if (adapter->cfg_num_rx_irqs == 1)
cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS);
en_flags &= cap_flags;
@@ -4460,13 +4519,22 @@ static int be_setup(struct be_adapter *adapter)
return status;
}
+ status = be_get_config(adapter);
+ if (status)
+ goto err;
+
if (!BE2_chip(adapter) && be_physfn(adapter))
be_alloc_sriov_res(adapter);
- status = be_get_config(adapter);
+ status = be_get_resources(adapter);
if (status)
goto err;
+ adapter->pmac_id = kcalloc(be_max_uc(adapter),
+ sizeof(*adapter->pmac_id), GFP_KERNEL);
+ if (!adapter->pmac_id)
+ return -ENOMEM;
+
status = be_msix_enable(adapter);
if (status)
goto err;
@@ -4511,6 +4579,15 @@ static int be_setup(struct be_adapter *adapter)
be_cmd_set_logical_link_config(adapter,
IFLA_VF_LINK_STATE_AUTO, 0);
+ /* BE3 EVB echoes broadcast/multicast packets back to PF's vport
+ * confusing a linux bridge or OVS that it might be connected to.
+ * Set the EVB to PASSTHRU mode which effectively disables the EVB
+ * when SRIOV is not enabled.
+ */
+ if (BE3_chip(adapter))
+ be_cmd_set_hsw_config(adapter, 0, 0, adapter->if_handle,
+ PORT_FWD_TYPE_PASSTHRU, 0);
+
if (adapter->num_vfs)
be_vf_setup(adapter);
@@ -4651,7 +4728,6 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
0, 0, nlflags, filter_mask, NULL);
}
-#ifdef CONFIG_BE2NET_VXLAN
/* VxLAN offload Notes:
*
* The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
@@ -4666,13 +4742,17 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
* adds more than one port, disable offloads and don't re-enable them again
* until after all the tunnels are removed.
*/
-static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
- __be16 port)
+static void be_add_vxlan_port(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct device *dev = &adapter->pdev->dev;
+ __be16 port = ti->port;
int status;
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ return;
+
if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
return;
@@ -4720,10 +4800,14 @@ err:
be_disable_vxlan_offloads(adapter);
}
-static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
- __be16 port)
+static void be_del_vxlan_port(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ __be16 port = ti->port;
+
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ return;
if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
return;
@@ -4785,7 +4869,6 @@ static netdev_features_t be_features_check(struct sk_buff *skb,
return features;
}
-#endif
static int be_get_phys_port_id(struct net_device *dev,
struct netdev_phys_item_id *ppid)
@@ -4833,11 +4916,9 @@ static const struct net_device_ops be_netdev_ops = {
#ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = be_busy_poll,
#endif
-#ifdef CONFIG_BE2NET_VXLAN
- .ndo_add_vxlan_port = be_add_vxlan_port,
- .ndo_del_vxlan_port = be_del_vxlan_port,
+ .ndo_udp_tunnel_add = be_add_vxlan_port,
+ .ndo_udp_tunnel_del = be_del_vxlan_port,
.ndo_features_check = be_features_check,
-#endif
.ndo_get_phys_port_id = be_get_phys_port_id,
};
@@ -4996,6 +5077,10 @@ static void be_worker(struct work_struct *work)
struct be_rx_obj *rxo;
int i;
+ if (be_physfn(adapter) &&
+ MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
+ be_cmd_get_die_temperature(adapter);
+
/* when interrupts are not yet enabled, just reap any pending
* mcc completions
*/
@@ -5014,10 +5099,6 @@ static void be_worker(struct work_struct *work)
be_cmd_get_stats(adapter, &adapter->stats_cmd);
}
- if (be_physfn(adapter) &&
- MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
- be_cmd_get_die_temperature(adapter);
-
for_all_rx_queues(adapter, rxo, i) {
/* Replenish RX-queues starved due to memory
* allocation failures.
@@ -5410,9 +5491,6 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct be_adapter *adapter = pci_get_drvdata(pdev);
- if (adapter->wol_en)
- be_setup_wol(adapter, true);
-
be_intr_set(adapter, false);
be_cancel_err_detection(adapter);
@@ -5441,9 +5519,6 @@ static int be_pci_resume(struct pci_dev *pdev)
be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
- if (adapter->wol_en)
- be_setup_wol(adapter, false);
-
return 0;
}
@@ -5552,7 +5627,7 @@ err:
static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
struct be_adapter *adapter = pci_get_drvdata(pdev);
- u16 num_vf_qs;
+ struct be_resources vft_res = {0};
int status;
if (!num_vfs)
@@ -5575,9 +5650,10 @@ static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
* Also, this is done by FW in Lancer chip.
*/
if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
- num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs);
+ be_calculate_vf_res(adapter, adapter->num_vfs,
+ &vft_res);
status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
- adapter->num_vfs, num_vf_qs);
+ adapter->num_vfs, &vft_res);
if (status)
dev_err(&pdev->dev,
"Failed to optimize SR-IOV resources\n");
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c
index 4089156a7..2b62841c4 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.c
+++ b/drivers/net/ethernet/emulex/benet/be_roce.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h
index fde609789..e51719a73 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.h
+++ b/drivers/net/ethernet/emulex/benet/be_roce.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 4466a1187..c044667a0 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -192,7 +192,6 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
* @napi: NAPI structure
* @msg_enable: device state flags
* @lock: device lock
- * @phy: attached PHY
* @mdio: MDIO bus for PHY access
* @phy_id: address of attached PHY
*/
@@ -219,7 +218,6 @@ struct ethoc {
spinlock_t lock;
- struct phy_device *phy;
struct mii_bus *mdio;
struct clk *clk;
s8 phy_id;
@@ -694,7 +692,6 @@ static int ethoc_mdio_probe(struct net_device *dev)
return err;
}
- priv->phy = phy;
phy->advertising &= ~(ADVERTISED_1000baseT_Full |
ADVERTISED_1000baseT_Half);
phy->supported &= ~(SUPPORTED_1000baseT_Full |
@@ -724,7 +721,7 @@ static int ethoc_open(struct net_device *dev)
netif_start_queue(dev);
}
- phy_start(priv->phy);
+ phy_start(dev->phydev);
napi_enable(&priv->napi);
if (netif_msg_ifup(priv)) {
@@ -741,8 +738,8 @@ static int ethoc_stop(struct net_device *dev)
napi_disable(&priv->napi);
- if (priv->phy)
- phy_stop(priv->phy);
+ if (dev->phydev)
+ phy_stop(dev->phydev);
ethoc_disable_rx_and_tx(priv);
free_irq(dev->irq, dev);
@@ -770,7 +767,7 @@ static int ethoc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (!phy)
return -ENODEV;
} else {
- phy = priv->phy;
+ phy = dev->phydev;
}
return phy_mii_ioctl(phy, ifr, cmd);
@@ -903,28 +900,6 @@ out_no_free:
return NETDEV_TX_OK;
}
-static int ethoc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct ethoc *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phy;
-
- if (!phydev)
- return -EOPNOTSUPP;
-
- return phy_ethtool_gset(phydev, cmd);
-}
-
-static int ethoc_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct ethoc *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phy;
-
- if (!phydev)
- return -EOPNOTSUPP;
-
- return phy_ethtool_sset(phydev, cmd);
-}
-
static int ethoc_get_regs_len(struct net_device *netdev)
{
return ETH_END;
@@ -989,14 +964,14 @@ static int ethoc_set_ringparam(struct net_device *dev,
}
const struct ethtool_ops ethoc_ethtool_ops = {
- .get_settings = ethoc_get_settings,
- .set_settings = ethoc_set_settings,
.get_regs_len = ethoc_get_regs_len,
.get_regs = ethoc_get_regs,
.get_link = ethtool_op_get_link,
.get_ringparam = ethoc_get_ringparam,
.set_ringparam = ethoc_set_ringparam,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static const struct net_device_ops ethoc_netdev_ops = {
@@ -1267,8 +1242,7 @@ static int ethoc_remove(struct platform_device *pdev)
if (netdev) {
netif_napi_del(&priv->napi);
- phy_disconnect(priv->phy);
- priv->phy = NULL;
+ phy_disconnect(netdev->phydev);
if (priv->mdio) {
mdiobus_unregister(priv->mdio);
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 9b7a3f5a2..f928e6f79 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -24,6 +24,14 @@
#define DRV_NAME "nps_mgt_enet"
+static inline bool nps_enet_is_tx_pending(struct nps_enet_priv *priv)
+{
+ u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL);
+ u32 tx_ctrl_ct = (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT;
+
+ return (!tx_ctrl_ct && priv->tx_skb);
+}
+
static void nps_enet_clean_rx_fifo(struct net_device *ndev, u32 frame_len)
{
struct nps_enet_priv *priv = netdev_priv(ndev);
@@ -46,16 +54,17 @@ static void nps_enet_read_rx_fifo(struct net_device *ndev,
if (dst_is_aligned) {
ioread32_rep(priv->regs_base + NPS_ENET_REG_RX_BUF, reg, len);
reg += len;
- }
- else { /* !dst_is_aligned */
+ } else { /* !dst_is_aligned */
for (i = 0; i < len; i++, reg++) {
u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF);
+
put_unaligned_be32(buf, reg);
}
}
/* copy last bytes (if any) */
if (last) {
u32 buf;
+
ioread32_rep(priv->regs_base + NPS_ENET_REG_RX_BUF, &buf, 1);
memcpy((u8 *)reg, &buf, last);
}
@@ -140,12 +149,11 @@ static void nps_enet_tx_handler(struct net_device *ndev)
{
struct nps_enet_priv *priv = netdev_priv(ndev);
u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL);
- u32 tx_ctrl_ct = (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT;
u32 tx_ctrl_et = (tx_ctrl_value & TX_CTL_ET_MASK) >> TX_CTL_ET_SHIFT;
u32 tx_ctrl_nt = (tx_ctrl_value & TX_CTL_NT_MASK) >> TX_CTL_NT_SHIFT;
/* Check if we got TX */
- if (!priv->tx_skb || tx_ctrl_ct)
+ if (!nps_enet_is_tx_pending(priv))
return;
/* Ack Tx ctrl register */
@@ -183,9 +191,6 @@ static int nps_enet_poll(struct napi_struct *napi, int budget)
work_done = nps_enet_rx_handler(ndev);
if (work_done < budget) {
u32 buf_int_enable_value = 0;
- u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL);
- u32 tx_ctrl_ct =
- (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT;
napi_complete(napi);
@@ -204,8 +209,7 @@ static int nps_enet_poll(struct napi_struct *napi, int budget)
* the two code lines below will solve this situation by
* re-adding ourselves to the poll list.
*/
-
- if (priv->tx_skb && !tx_ctrl_ct) {
+ if (nps_enet_is_tx_pending(priv)) {
nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
napi_reschedule(napi);
}
@@ -230,11 +234,9 @@ static irqreturn_t nps_enet_irq_handler(s32 irq, void *dev_instance)
struct net_device *ndev = dev_instance;
struct nps_enet_priv *priv = netdev_priv(ndev);
u32 rx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL);
- u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL);
- u32 tx_ctrl_ct = (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT;
u32 rx_ctrl_cr = (rx_ctrl_value & RX_CTL_CR_MASK) >> RX_CTL_CR_SHIFT;
- if ((!tx_ctrl_ct && priv->tx_skb) || rx_ctrl_cr)
+ if (nps_enet_is_tx_pending(priv) || rx_ctrl_cr)
if (likely(napi_schedule_prep(&priv->napi))) {
nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
__napi_schedule(&priv->napi);
@@ -460,7 +462,6 @@ static void nps_enet_set_rx_mode(struct net_device *ndev)
| NPS_ENET_ENABLE << CFG_2_DISK_DA_SHIFT;
ge_mac_cfg_2_value = (ge_mac_cfg_2_value & ~CFG_2_DISK_MC_MASK)
| NPS_ENET_ENABLE << CFG_2_DISK_MC_SHIFT;
-
}
nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_2, ge_mac_cfg_2_value);
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index e7cf313e3..36361f8bf 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -31,6 +31,7 @@
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <net/ip.h>
+#include <net/ncsi.h>
#include "ftgmac100.h"
@@ -68,10 +69,14 @@ struct ftgmac100 {
struct net_device *netdev;
struct device *dev;
+ struct ncsi_dev *ndev;
struct napi_struct napi;
struct mii_bus *mii_bus;
int old_speed;
+ int int_mask_all;
+ bool use_ncsi;
+ bool enabled;
};
static int ftgmac100_alloc_rx_page(struct ftgmac100 *priv,
@@ -80,14 +85,6 @@ static int ftgmac100_alloc_rx_page(struct ftgmac100 *priv,
/******************************************************************************
* internal functions (hardware register access)
*****************************************************************************/
-#define INT_MASK_ALL_ENABLED (FTGMAC100_INT_RPKT_LOST | \
- FTGMAC100_INT_XPKT_ETH | \
- FTGMAC100_INT_XPKT_LOST | \
- FTGMAC100_INT_AHB_ERR | \
- FTGMAC100_INT_PHYSTS_CHG | \
- FTGMAC100_INT_RPKT_BUF | \
- FTGMAC100_INT_NO_RXBUF)
-
static void ftgmac100_set_rx_ring_base(struct ftgmac100 *priv, dma_addr_t addr)
{
iowrite32(addr, priv->base + FTGMAC100_OFFSET_RXR_BADR);
@@ -141,6 +138,55 @@ static void ftgmac100_set_mac(struct ftgmac100 *priv, const unsigned char *mac)
iowrite32(laddr, priv->base + FTGMAC100_OFFSET_MAC_LADR);
}
+static void ftgmac100_setup_mac(struct ftgmac100 *priv)
+{
+ u8 mac[ETH_ALEN];
+ unsigned int m;
+ unsigned int l;
+ void *addr;
+
+ addr = device_get_mac_address(priv->dev, mac, ETH_ALEN);
+ if (addr) {
+ ether_addr_copy(priv->netdev->dev_addr, mac);
+ dev_info(priv->dev, "Read MAC address %pM from device tree\n",
+ mac);
+ return;
+ }
+
+ m = ioread32(priv->base + FTGMAC100_OFFSET_MAC_MADR);
+ l = ioread32(priv->base + FTGMAC100_OFFSET_MAC_LADR);
+
+ mac[0] = (m >> 8) & 0xff;
+ mac[1] = m & 0xff;
+ mac[2] = (l >> 24) & 0xff;
+ mac[3] = (l >> 16) & 0xff;
+ mac[4] = (l >> 8) & 0xff;
+ mac[5] = l & 0xff;
+
+ if (is_valid_ether_addr(mac)) {
+ ether_addr_copy(priv->netdev->dev_addr, mac);
+ dev_info(priv->dev, "Read MAC address %pM from chip\n", mac);
+ } else {
+ eth_hw_addr_random(priv->netdev);
+ dev_info(priv->dev, "Generated random MAC address %pM\n",
+ priv->netdev->dev_addr);
+ }
+}
+
+static int ftgmac100_set_mac_addr(struct net_device *dev, void *p)
+{
+ int ret;
+
+ ret = eth_prepare_mac_addr_change(dev, p);
+ if (ret < 0)
+ return ret;
+
+ eth_commit_mac_addr_change(dev, p);
+ ftgmac100_set_mac(netdev_priv(dev), dev->dev_addr);
+
+ return 0;
+}
+
static void ftgmac100_init_hw(struct ftgmac100 *priv)
{
/* setup ring buffer base registers */
@@ -952,7 +998,10 @@ static irqreturn_t ftgmac100_interrupt(int irq, void *dev_id)
struct net_device *netdev = dev_id;
struct ftgmac100 *priv = netdev_priv(netdev);
- if (likely(netif_running(netdev))) {
+ /* When running in NCSI mode, the interface should be ready for
+ * receiving or transmitting NCSI packets before it's opened.
+ */
+ if (likely(priv->use_ncsi || netif_running(netdev))) {
/* Disable interrupts for polling */
iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
napi_schedule(&priv->napi);
@@ -1005,8 +1054,9 @@ static int ftgmac100_poll(struct napi_struct *napi, int budget)
ftgmac100_tx_complete(priv);
}
- if (status & (FTGMAC100_INT_NO_RXBUF | FTGMAC100_INT_RPKT_LOST |
- FTGMAC100_INT_AHB_ERR | FTGMAC100_INT_PHYSTS_CHG)) {
+ if (status & priv->int_mask_all & (FTGMAC100_INT_NO_RXBUF |
+ FTGMAC100_INT_RPKT_LOST | FTGMAC100_INT_AHB_ERR |
+ FTGMAC100_INT_PHYSTS_CHG)) {
if (net_ratelimit())
netdev_info(netdev, "[ISR] = 0x%x: %s%s%s%s\n", status,
status & FTGMAC100_INT_NO_RXBUF ? "NO_RXBUF " : "",
@@ -1029,7 +1079,8 @@ static int ftgmac100_poll(struct napi_struct *napi, int budget)
napi_complete(napi);
/* enable all interrupts */
- iowrite32(INT_MASK_ALL_ENABLED, priv->base + FTGMAC100_OFFSET_IER);
+ iowrite32(priv->int_mask_all,
+ priv->base + FTGMAC100_OFFSET_IER);
}
return rx;
@@ -1065,17 +1116,33 @@ static int ftgmac100_open(struct net_device *netdev)
goto err_hw;
ftgmac100_init_hw(priv);
- ftgmac100_start_hw(priv, 10);
-
- phy_start(netdev->phydev);
+ ftgmac100_start_hw(priv, priv->use_ncsi ? 100 : 10);
+ if (netdev->phydev)
+ phy_start(netdev->phydev);
+ else if (priv->use_ncsi)
+ netif_carrier_on(netdev);
napi_enable(&priv->napi);
netif_start_queue(netdev);
/* enable all interrupts */
- iowrite32(INT_MASK_ALL_ENABLED, priv->base + FTGMAC100_OFFSET_IER);
+ iowrite32(priv->int_mask_all, priv->base + FTGMAC100_OFFSET_IER);
+
+ /* Start the NCSI device */
+ if (priv->use_ncsi) {
+ err = ncsi_start_dev(priv->ndev);
+ if (err)
+ goto err_ncsi;
+ }
+
+ priv->enabled = true;
+
return 0;
+err_ncsi:
+ napi_disable(&priv->napi);
+ netif_stop_queue(netdev);
+ iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
err_hw:
free_irq(priv->irq, netdev);
err_irq:
@@ -1088,12 +1155,17 @@ static int ftgmac100_stop(struct net_device *netdev)
{
struct ftgmac100 *priv = netdev_priv(netdev);
+ if (!priv->enabled)
+ return 0;
+
/* disable all interrupts */
+ priv->enabled = false;
iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
netif_stop_queue(netdev);
napi_disable(&priv->napi);
- phy_stop(netdev->phydev);
+ if (netdev->phydev)
+ phy_stop(netdev->phydev);
ftgmac100_stop_hw(priv);
free_irq(priv->irq, netdev);
@@ -1134,6 +1206,9 @@ static int ftgmac100_hard_start_xmit(struct sk_buff *skb,
/* optional */
static int ftgmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
+ if (!netdev->phydev)
+ return -ENXIO;
+
return phy_mii_ioctl(netdev->phydev, ifr, cmd);
}
@@ -1141,11 +1216,74 @@ static const struct net_device_ops ftgmac100_netdev_ops = {
.ndo_open = ftgmac100_open,
.ndo_stop = ftgmac100_stop,
.ndo_start_xmit = ftgmac100_hard_start_xmit,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = ftgmac100_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = ftgmac100_do_ioctl,
};
+static int ftgmac100_setup_mdio(struct net_device *netdev)
+{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+ struct platform_device *pdev = to_platform_device(priv->dev);
+ int i, err = 0;
+
+ /* initialize mdio bus */
+ priv->mii_bus = mdiobus_alloc();
+ if (!priv->mii_bus)
+ return -EIO;
+
+ priv->mii_bus->name = "ftgmac100_mdio";
+ snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d",
+ pdev->name, pdev->id);
+ priv->mii_bus->priv = priv->netdev;
+ priv->mii_bus->read = ftgmac100_mdiobus_read;
+ priv->mii_bus->write = ftgmac100_mdiobus_write;
+
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ priv->mii_bus->irq[i] = PHY_POLL;
+
+ err = mdiobus_register(priv->mii_bus);
+ if (err) {
+ dev_err(priv->dev, "Cannot register MDIO bus!\n");
+ goto err_register_mdiobus;
+ }
+
+ err = ftgmac100_mii_probe(priv);
+ if (err) {
+ dev_err(priv->dev, "MII Probe failed!\n");
+ goto err_mii_probe;
+ }
+
+ return 0;
+
+err_mii_probe:
+ mdiobus_unregister(priv->mii_bus);
+err_register_mdiobus:
+ mdiobus_free(priv->mii_bus);
+ return err;
+}
+
+static void ftgmac100_destroy_mdio(struct net_device *netdev)
+{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+
+ if (!netdev->phydev)
+ return;
+
+ phy_disconnect(netdev->phydev);
+ mdiobus_unregister(priv->mii_bus);
+ mdiobus_free(priv->mii_bus);
+}
+
+static void ftgmac100_ncsi_handler(struct ncsi_dev *nd)
+{
+ if (unlikely(nd->state != ncsi_dev_state_functional))
+ return;
+
+ netdev_info(nd->dev, "NCSI interface %s\n",
+ nd->link_up ? "up" : "down");
+}
+
/******************************************************************************
* struct platform_driver functions
*****************************************************************************/
@@ -1155,7 +1293,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
int irq;
struct net_device *netdev;
struct ftgmac100 *priv;
- int err;
+ int err = 0;
if (!pdev)
return -ENODEV;
@@ -1179,7 +1317,6 @@ static int ftgmac100_probe(struct platform_device *pdev)
netdev->ethtool_ops = &ftgmac100_ethtool_ops;
netdev->netdev_ops = &ftgmac100_netdev_ops;
- netdev->features = NETIF_F_IP_CSUM | NETIF_F_GRO;
platform_set_drvdata(pdev, netdev);
@@ -1211,31 +1348,45 @@ static int ftgmac100_probe(struct platform_device *pdev)
priv->irq = irq;
- /* initialize mdio bus */
- priv->mii_bus = mdiobus_alloc();
- if (!priv->mii_bus) {
- err = -EIO;
- goto err_alloc_mdiobus;
- }
-
- priv->mii_bus->name = "ftgmac100_mdio";
- snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "ftgmac100_mii");
-
- priv->mii_bus->priv = netdev;
- priv->mii_bus->read = ftgmac100_mdiobus_read;
- priv->mii_bus->write = ftgmac100_mdiobus_write;
+ /* MAC address from chip or random one */
+ ftgmac100_setup_mac(priv);
+
+ priv->int_mask_all = (FTGMAC100_INT_RPKT_LOST |
+ FTGMAC100_INT_XPKT_ETH |
+ FTGMAC100_INT_XPKT_LOST |
+ FTGMAC100_INT_AHB_ERR |
+ FTGMAC100_INT_PHYSTS_CHG |
+ FTGMAC100_INT_RPKT_BUF |
+ FTGMAC100_INT_NO_RXBUF);
+ if (pdev->dev.of_node &&
+ of_get_property(pdev->dev.of_node, "use-ncsi", NULL)) {
+ if (!IS_ENABLED(CONFIG_NET_NCSI)) {
+ dev_err(&pdev->dev, "NCSI stack not enabled\n");
+ goto err_ncsi_dev;
+ }
- err = mdiobus_register(priv->mii_bus);
- if (err) {
- dev_err(&pdev->dev, "Cannot register MDIO bus!\n");
- goto err_register_mdiobus;
+ dev_info(&pdev->dev, "Using NCSI interface\n");
+ priv->use_ncsi = true;
+ priv->int_mask_all &= ~FTGMAC100_INT_PHYSTS_CHG;
+ priv->ndev = ncsi_register_dev(netdev, ftgmac100_ncsi_handler);
+ if (!priv->ndev)
+ goto err_ncsi_dev;
+ } else {
+ priv->use_ncsi = false;
+ err = ftgmac100_setup_mdio(netdev);
+ if (err)
+ goto err_setup_mdio;
}
- err = ftgmac100_mii_probe(priv);
- if (err) {
- dev_err(&pdev->dev, "MII Probe failed!\n");
- goto err_mii_probe;
- }
+ /* We have to disable on-chip IP checksum functionality
+ * when NCSI is enabled on the interface. It doesn't work
+ * in that case.
+ */
+ netdev->features = NETIF_F_IP_CSUM | NETIF_F_GRO;
+ if (priv->use_ncsi &&
+ of_get_property(pdev->dev.of_node, "no-hw-checksum", NULL))
+ netdev->features &= ~NETIF_F_IP_CSUM;
+
/* register network device */
err = register_netdev(netdev);
@@ -1246,21 +1397,12 @@ static int ftgmac100_probe(struct platform_device *pdev)
netdev_info(netdev, "irq %d, mapped at %p\n", priv->irq, priv->base);
- if (!is_valid_ether_addr(netdev->dev_addr)) {
- eth_hw_addr_random(netdev);
- netdev_info(netdev, "generated random MAC address %pM\n",
- netdev->dev_addr);
- }
-
return 0;
+err_ncsi_dev:
err_register_netdev:
- phy_disconnect(netdev->phydev);
-err_mii_probe:
- mdiobus_unregister(priv->mii_bus);
-err_register_mdiobus:
- mdiobus_free(priv->mii_bus);
-err_alloc_mdiobus:
+ ftgmac100_destroy_mdio(netdev);
+err_setup_mdio:
iounmap(priv->base);
err_ioremap:
release_resource(priv->res);
@@ -1280,10 +1422,7 @@ static int __exit ftgmac100_remove(struct platform_device *pdev)
priv = netdev_priv(netdev);
unregister_netdev(netdev);
-
- phy_disconnect(netdev->phydev);
- mdiobus_unregister(priv->mii_bus);
- mdiobus_free(priv->mii_bus);
+ ftgmac100_destroy_mdio(netdev);
iounmap(priv->base);
release_resource(priv->res);
@@ -1293,14 +1432,20 @@ static int __exit ftgmac100_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id ftgmac100_of_match[] = {
+ { .compatible = "faraday,ftgmac100" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ftgmac100_of_match);
+
static struct platform_driver ftgmac100_driver = {
- .probe = ftgmac100_probe,
- .remove = __exit_p(ftgmac100_remove),
- .driver = {
- .name = DRV_NAME,
+ .probe = ftgmac100_probe,
+ .remove = __exit_p(ftgmac100_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = ftgmac100_of_match,
},
};
-
module_platform_driver(ftgmac100_driver);
MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>");
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index f58f9ea51..c865135f3 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -442,6 +442,10 @@ struct bufdesc_ex {
#define FEC_QUIRK_SINGLE_MDIO (1 << 11)
/* Controller supports RACC register */
#define FEC_QUIRK_HAS_RACC (1 << 12)
+/* Controller supports interrupt coalesc */
+#define FEC_QUIRK_HAS_COALESCE (1 << 13)
+/* Interrupt doesn't wake CPU from deep idle */
+#define FEC_QUIRK_ERR006687 (1 << 14)
struct bufdesc_prop {
int qid;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index fea0f330d..692ee248e 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -60,6 +60,7 @@
#include <linux/if_vlan.h>
#include <linux/pinctrl/consumer.h>
#include <linux/prefetch.h>
+#include <soc/imx/cpuidle.h>
#include <asm/cacheflush.h>
@@ -88,10 +89,10 @@ static struct platform_device_id fec_devtype[] = {
.driver_data = 0,
}, {
.name = "imx25-fec",
- .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_HAS_RACC,
+ .driver_data = FEC_QUIRK_USE_GASKET,
}, {
.name = "imx27-fec",
- .driver_data = FEC_QUIRK_HAS_RACC,
+ .driver_data = 0,
}, {
.name = "imx28-fec",
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
@@ -111,7 +112,13 @@ static struct platform_device_id fec_devtype[] = {
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
- FEC_QUIRK_HAS_RACC,
+ FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
+ }, {
+ .name = "imx6ul-fec",
+ .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+ FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_BUG_CAPTURE |
+ FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
}, {
/* sentinel */
}
@@ -125,6 +132,7 @@ enum imx_fec_type {
IMX6Q_FEC,
MVF600_FEC,
IMX6SX_FEC,
+ IMX6UL_FEC,
};
static const struct of_device_id fec_dt_ids[] = {
@@ -134,6 +142,7 @@ static const struct of_device_id fec_dt_ids[] = {
{ .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], },
{ .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], },
{ .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], },
+ { .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fec_dt_ids);
@@ -171,6 +180,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
/* FEC receive acceleration */
#define FEC_RACC_IPDIS (1 << 1)
#define FEC_RACC_PRODIS (1 << 2)
+#define FEC_RACC_SHIFT16 BIT(7)
#define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
/*
@@ -936,9 +946,11 @@ fec_restart(struct net_device *ndev)
#if !defined(CONFIG_M5272)
if (fep->quirks & FEC_QUIRK_HAS_RACC) {
- /* set RX checksum */
val = readl(fep->hwp + FEC_RACC);
+ /* align IP header */
+ val |= FEC_RACC_SHIFT16;
if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
+ /* set RX checksum */
val |= FEC_RACC_OPTIONS;
else
val &= ~FEC_RACC_OPTIONS;
@@ -1419,6 +1431,12 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
prefetch(skb->data - NET_IP_ALIGN);
skb_put(skb, pkt_len - 4);
data = skb->data;
+
+#if !defined(CONFIG_M5272)
+ if (fep->quirks & FEC_QUIRK_HAS_RACC)
+ data = skb_pull_inline(skb, 2);
+#endif
+
if (!is_copybreak && need_swap)
swap_buffer(data, pkt_len);
@@ -2358,9 +2376,6 @@ static void fec_enet_itr_coal_set(struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev);
int rx_itr, tx_itr;
- if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
- return;
-
/* Must be greater than zero to avoid unpredictable behavior */
if (!fep->rx_time_itr || !fep->rx_pkts_itr ||
!fep->tx_time_itr || !fep->tx_pkts_itr)
@@ -2383,10 +2398,12 @@ static void fec_enet_itr_coal_set(struct net_device *ndev)
writel(tx_itr, fep->hwp + FEC_TXIC0);
writel(rx_itr, fep->hwp + FEC_RXIC0);
- writel(tx_itr, fep->hwp + FEC_TXIC1);
- writel(rx_itr, fep->hwp + FEC_RXIC1);
- writel(tx_itr, fep->hwp + FEC_TXIC2);
- writel(rx_itr, fep->hwp + FEC_RXIC2);
+ if (fep->quirks & FEC_QUIRK_HAS_AVB) {
+ writel(tx_itr, fep->hwp + FEC_TXIC1);
+ writel(rx_itr, fep->hwp + FEC_RXIC1);
+ writel(tx_itr, fep->hwp + FEC_TXIC2);
+ writel(rx_itr, fep->hwp + FEC_RXIC2);
+ }
}
static int
@@ -2394,7 +2411,7 @@ fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
+ if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
return -EOPNOTSUPP;
ec->rx_coalesce_usecs = fep->rx_time_itr;
@@ -2412,7 +2429,7 @@ fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
struct fec_enet_private *fep = netdev_priv(ndev);
unsigned int cycle;
- if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
+ if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
return -EOPNOTSUPP;
if (ec->rx_max_coalesced_frames > 255) {
@@ -2818,6 +2835,9 @@ fec_enet_open(struct net_device *ndev)
if (ret)
goto err_enet_mii_probe;
+ if (fep->quirks & FEC_QUIRK_ERR006687)
+ imx6q_cpuidle_fec_irqs_used();
+
napi_enable(&fep->napi);
phy_start(ndev->phydev);
netif_tx_start_all_queues(ndev);
@@ -2853,6 +2873,9 @@ fec_enet_close(struct net_device *ndev)
phy_disconnect(ndev->phydev);
+ if (fep->quirks & FEC_QUIRK_ERR006687)
+ imx6q_cpuidle_fec_irqs_unused();
+
fec_enet_clk_enable(ndev, false);
pinctrl_pm_select_sleep_state(&fep->pdev->dev);
pm_runtime_mark_last_busy(&fep->pdev->dev);
@@ -3191,7 +3214,12 @@ static void fec_reset_phy(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
return;
}
- msleep(msec);
+
+ if (msec > 20)
+ msleep(msec);
+ else
+ usleep_range(msec * 1000, msec * 1000 + 1000);
+
gpio_set_value_cansleep(phy_reset, !active_high);
}
#else /* CONFIG_OF */
@@ -3292,6 +3320,11 @@ fec_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ndev);
+ if ((of_machine_is_compatible("fsl,imx6q") ||
+ of_machine_is_compatible("fsl,imx6dl")) &&
+ !of_property_read_bool(np, "fsl,err006687-workaround-present"))
+ fep->quirks |= FEC_QUIRK_ERR006687;
+
if (of_get_property(np, "fsl,magic-packet", NULL))
fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 2e6785b6e..4b4f5bc0e 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2275,7 +2275,7 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
fcb->flags = flags;
}
-void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
+static inline void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
{
fcb->flags |= TXFCB_VLN;
fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb));
@@ -2922,17 +2922,25 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
{
unsigned int size = lstatus & BD_LENGTH_MASK;
struct page *page = rxb->page;
+ bool last = !!(lstatus & BD_LFLAG(RXBD_LAST));
/* Remove the FCS from the packet length */
- if (likely(lstatus & BD_LFLAG(RXBD_LAST)))
+ if (last)
size -= ETH_FCS_LEN;
- if (likely(first))
+ if (likely(first)) {
skb_put(skb, size);
- else
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- rxb->page_offset + RXBUF_ALIGNMENT,
- size, GFAR_RXB_TRUESIZE);
+ } else {
+ /* the last fragments' length contains the full frame length */
+ if (last)
+ size -= skb->len;
+
+ /* in case the last fragment consisted only of the FCS */
+ if (size > 0)
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ rxb->page_offset + RXBUF_ALIGNMENT,
+ size, GFAR_RXB_TRUESIZE);
+ }
/* try reuse page */
if (unlikely(page_count(page) != 1))
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 373fd094f..6e8a9c846 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -100,7 +100,8 @@ extern const char gfar_driver_version[];
#define DEFAULT_RX_LFC_THR 16
#define DEFAULT_LFC_PTVVAL 4
-#define GFAR_RXB_SIZE 1536
+/* prevent fragmenation by HW in DSA environments */
+#define GFAR_RXB_SIZE roundup(1536 + 8, 64)
#define GFAR_SKBFRAG_SIZE (RXBUF_ALIGNMENT + GFAR_RXB_SIZE \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define GFAR_RXB_TRUESIZE 2048
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index 4ccc03263..d11287e11 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_HISILICON
bool "Hisilicon devices"
default y
- depends on OF && HAS_DMA
+ depends on (OF || ACPI) && HAS_DMA
depends on ARM || ARM64 || COMPILE_TEST
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -23,6 +23,18 @@ config HIX5HD2_GMAC
help
This selects the hix5hd2 mac family network device.
+config HISI_FEMAC
+ tristate "Hisilicon Fast Ethernet MAC device support"
+ depends on HAS_IOMEM
+ select PHYLIB
+ select RESET_CONTROLLER
+ help
+ This selects the Hisilicon Fast Ethernet MAC device(FEMAC).
+ The FEMAC receives and transmits data over Ethernet
+ ports at 10/100 Mbps in full-duplex or half-duplex mode.
+ The FEMAC exchanges data with the CPU, and supports
+ the energy efficient Ethernet (EEE).
+
config HIP04_ETH
tristate "HISILICON P04 Ethernet support"
depends on HAS_IOMEM # For MFD_SYSCON
diff --git a/drivers/net/ethernet/hisilicon/Makefile b/drivers/net/ethernet/hisilicon/Makefile
index 390b71fb3..866169502 100644
--- a/drivers/net/ethernet/hisilicon/Makefile
+++ b/drivers/net/ethernet/hisilicon/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_HIX5HD2_GMAC) += hix5hd2_gmac.o
obj-$(CONFIG_HIP04_ETH) += hip04_eth.o
obj-$(CONFIG_HNS_MDIO) += hns_mdio.o
obj-$(CONFIG_HNS) += hns/
+obj-$(CONFIG_HISI_FEMAC) += hisi_femac.o
diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c
new file mode 100644
index 000000000..b5d7ad025
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hisi_femac.c
@@ -0,0 +1,1007 @@
+/*
+ * Hisilicon Fast Ethernet MAC Driver
+ *
+ * Copyright (c) 2016 HiSilicon Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/circ_buf.h>
+#include <linux/clk.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+/* MAC control register list */
+#define MAC_PORTSEL 0x0200
+#define MAC_PORTSEL_STAT_CPU BIT(0)
+#define MAC_PORTSEL_RMII BIT(1)
+#define MAC_PORTSET 0x0208
+#define MAC_PORTSET_DUPLEX_FULL BIT(0)
+#define MAC_PORTSET_LINKED BIT(1)
+#define MAC_PORTSET_SPEED_100M BIT(2)
+#define MAC_SET 0x0210
+#define MAX_FRAME_SIZE 1600
+#define MAX_FRAME_SIZE_MASK GENMASK(10, 0)
+#define BIT_PAUSE_EN BIT(18)
+#define RX_COALESCE_SET 0x0340
+#define RX_COALESCED_FRAME_OFFSET 24
+#define RX_COALESCED_FRAMES 8
+#define RX_COALESCED_TIMER 0x74
+#define QLEN_SET 0x0344
+#define RX_DEPTH_OFFSET 8
+#define MAX_HW_FIFO_DEPTH 64
+#define HW_TX_FIFO_DEPTH 12
+#define HW_RX_FIFO_DEPTH (MAX_HW_FIFO_DEPTH - HW_TX_FIFO_DEPTH)
+#define IQFRM_DES 0x0354
+#define RX_FRAME_LEN_MASK GENMASK(11, 0)
+#define IQ_ADDR 0x0358
+#define EQ_ADDR 0x0360
+#define EQFRM_LEN 0x0364
+#define ADDRQ_STAT 0x036C
+#define TX_CNT_INUSE_MASK GENMASK(5, 0)
+#define BIT_TX_READY BIT(24)
+#define BIT_RX_READY BIT(25)
+/* global control register list */
+#define GLB_HOSTMAC_L32 0x0000
+#define GLB_HOSTMAC_H16 0x0004
+#define GLB_SOFT_RESET 0x0008
+#define SOFT_RESET_ALL BIT(0)
+#define GLB_FWCTRL 0x0010
+#define FWCTRL_VLAN_ENABLE BIT(0)
+#define FWCTRL_FW2CPU_ENA BIT(5)
+#define FWCTRL_FWALL2CPU BIT(7)
+#define GLB_MACTCTRL 0x0014
+#define MACTCTRL_UNI2CPU BIT(1)
+#define MACTCTRL_MULTI2CPU BIT(3)
+#define MACTCTRL_BROAD2CPU BIT(5)
+#define MACTCTRL_MACT_ENA BIT(7)
+#define GLB_IRQ_STAT 0x0030
+#define GLB_IRQ_ENA 0x0034
+#define IRQ_ENA_PORT0_MASK GENMASK(7, 0)
+#define IRQ_ENA_PORT0 BIT(18)
+#define IRQ_ENA_ALL BIT(19)
+#define GLB_IRQ_RAW 0x0038
+#define IRQ_INT_RX_RDY BIT(0)
+#define IRQ_INT_TX_PER_PACKET BIT(1)
+#define IRQ_INT_TX_FIFO_EMPTY BIT(6)
+#define IRQ_INT_MULTI_RXRDY BIT(7)
+#define DEF_INT_MASK (IRQ_INT_MULTI_RXRDY | \
+ IRQ_INT_TX_PER_PACKET | \
+ IRQ_INT_TX_FIFO_EMPTY)
+#define GLB_MAC_L32_BASE 0x0100
+#define GLB_MAC_H16_BASE 0x0104
+#define MACFLT_HI16_MASK GENMASK(15, 0)
+#define BIT_MACFLT_ENA BIT(17)
+#define BIT_MACFLT_FW2CPU BIT(21)
+#define GLB_MAC_H16(reg) (GLB_MAC_H16_BASE + ((reg) * 0x8))
+#define GLB_MAC_L32(reg) (GLB_MAC_L32_BASE + ((reg) * 0x8))
+#define MAX_MAC_FILTER_NUM 8
+#define MAX_UNICAST_ADDRESSES 2
+#define MAX_MULTICAST_ADDRESSES (MAX_MAC_FILTER_NUM - \
+ MAX_UNICAST_ADDRESSES)
+/* software tx and rx queue number, should be power of 2 */
+#define TXQ_NUM 64
+#define RXQ_NUM 128
+#define FEMAC_POLL_WEIGHT 16
+
+#define PHY_RESET_DELAYS_PROPERTY "hisilicon,phy-reset-delays-us"
+
+enum phy_reset_delays {
+ PRE_DELAY,
+ PULSE,
+ POST_DELAY,
+ DELAYS_NUM,
+};
+
+struct hisi_femac_queue {
+ struct sk_buff **skb;
+ dma_addr_t *dma_phys;
+ int num;
+ unsigned int head;
+ unsigned int tail;
+};
+
+struct hisi_femac_priv {
+ void __iomem *port_base;
+ void __iomem *glb_base;
+ struct clk *clk;
+ struct reset_control *mac_rst;
+ struct reset_control *phy_rst;
+ u32 phy_reset_delays[DELAYS_NUM];
+ u32 link_status;
+
+ struct device *dev;
+ struct net_device *ndev;
+
+ struct hisi_femac_queue txq;
+ struct hisi_femac_queue rxq;
+ u32 tx_fifo_used_cnt;
+ struct napi_struct napi;
+};
+
+static void hisi_femac_irq_enable(struct hisi_femac_priv *priv, int irqs)
+{
+ u32 val;
+
+ val = readl(priv->glb_base + GLB_IRQ_ENA);
+ writel(val | irqs, priv->glb_base + GLB_IRQ_ENA);
+}
+
+static void hisi_femac_irq_disable(struct hisi_femac_priv *priv, int irqs)
+{
+ u32 val;
+
+ val = readl(priv->glb_base + GLB_IRQ_ENA);
+ writel(val & (~irqs), priv->glb_base + GLB_IRQ_ENA);
+}
+
+static void hisi_femac_tx_dma_unmap(struct hisi_femac_priv *priv,
+ struct sk_buff *skb, unsigned int pos)
+{
+ dma_addr_t dma_addr;
+
+ dma_addr = priv->txq.dma_phys[pos];
+ dma_unmap_single(priv->dev, dma_addr, skb->len, DMA_TO_DEVICE);
+}
+
+static void hisi_femac_xmit_reclaim(struct net_device *dev)
+{
+ struct sk_buff *skb;
+ struct hisi_femac_priv *priv = netdev_priv(dev);
+ struct hisi_femac_queue *txq = &priv->txq;
+ unsigned int bytes_compl = 0, pkts_compl = 0;
+ u32 val;
+
+ netif_tx_lock(dev);
+
+ val = readl(priv->port_base + ADDRQ_STAT) & TX_CNT_INUSE_MASK;
+ while (val < priv->tx_fifo_used_cnt) {
+ skb = txq->skb[txq->tail];
+ if (unlikely(!skb)) {
+ netdev_err(dev, "xmitq_cnt_inuse=%d, tx_fifo_used=%d\n",
+ val, priv->tx_fifo_used_cnt);
+ break;
+ }
+ hisi_femac_tx_dma_unmap(priv, skb, txq->tail);
+ pkts_compl++;
+ bytes_compl += skb->len;
+ dev_kfree_skb_any(skb);
+
+ priv->tx_fifo_used_cnt--;
+
+ val = readl(priv->port_base + ADDRQ_STAT) & TX_CNT_INUSE_MASK;
+ txq->skb[txq->tail] = NULL;
+ txq->tail = (txq->tail + 1) % txq->num;
+ }
+
+ netdev_completed_queue(dev, pkts_compl, bytes_compl);
+
+ if (unlikely(netif_queue_stopped(dev)) && pkts_compl)
+ netif_wake_queue(dev);
+
+ netif_tx_unlock(dev);
+}
+
+static void hisi_femac_adjust_link(struct net_device *dev)
+{
+ struct hisi_femac_priv *priv = netdev_priv(dev);
+ struct phy_device *phy = dev->phydev;
+ u32 status = 0;
+
+ if (phy->link)
+ status |= MAC_PORTSET_LINKED;
+ if (phy->duplex == DUPLEX_FULL)
+ status |= MAC_PORTSET_DUPLEX_FULL;
+ if (phy->speed == SPEED_100)
+ status |= MAC_PORTSET_SPEED_100M;
+
+ if ((status != priv->link_status) &&
+ ((status | priv->link_status) & MAC_PORTSET_LINKED)) {
+ writel(status, priv->port_base + MAC_PORTSET);
+ priv->link_status = status;
+ phy_print_status(phy);
+ }
+}
+
+static void hisi_femac_rx_refill(struct hisi_femac_priv *priv)
+{
+ struct hisi_femac_queue *rxq = &priv->rxq;
+ struct sk_buff *skb;
+ u32 pos;
+ u32 len = MAX_FRAME_SIZE;
+ dma_addr_t addr;
+
+ pos = rxq->head;
+ while (readl(priv->port_base + ADDRQ_STAT) & BIT_RX_READY) {
+ if (!CIRC_SPACE(pos, rxq->tail, rxq->num))
+ break;
+ if (unlikely(rxq->skb[pos])) {
+ netdev_err(priv->ndev, "err skb[%d]=%p\n",
+ pos, rxq->skb[pos]);
+ break;
+ }
+ skb = netdev_alloc_skb_ip_align(priv->ndev, len);
+ if (unlikely(!skb))
+ break;
+
+ addr = dma_map_single(priv->dev, skb->data, len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(priv->dev, addr)) {
+ dev_kfree_skb_any(skb);
+ break;
+ }
+ rxq->dma_phys[pos] = addr;
+ rxq->skb[pos] = skb;
+ writel(addr, priv->port_base + IQ_ADDR);
+ pos = (pos + 1) % rxq->num;
+ }
+ rxq->head = pos;
+}
+
+static int hisi_femac_rx(struct net_device *dev, int limit)
+{
+ struct hisi_femac_priv *priv = netdev_priv(dev);
+ struct hisi_femac_queue *rxq = &priv->rxq;
+ struct sk_buff *skb;
+ dma_addr_t addr;
+ u32 rx_pkt_info, pos, len, rx_pkts_num = 0;
+
+ pos = rxq->tail;
+ while (readl(priv->glb_base + GLB_IRQ_RAW) & IRQ_INT_RX_RDY) {
+ rx_pkt_info = readl(priv->port_base + IQFRM_DES);
+ len = rx_pkt_info & RX_FRAME_LEN_MASK;
+ len -= ETH_FCS_LEN;
+
+ /* tell hardware we will deal with this packet */
+ writel(IRQ_INT_RX_RDY, priv->glb_base + GLB_IRQ_RAW);
+
+ rx_pkts_num++;
+
+ skb = rxq->skb[pos];
+ if (unlikely(!skb)) {
+ netdev_err(dev, "rx skb NULL. pos=%d\n", pos);
+ break;
+ }
+ rxq->skb[pos] = NULL;
+
+ addr = rxq->dma_phys[pos];
+ dma_unmap_single(priv->dev, addr, MAX_FRAME_SIZE,
+ DMA_FROM_DEVICE);
+ skb_put(skb, len);
+ if (unlikely(skb->len > MAX_FRAME_SIZE)) {
+ netdev_err(dev, "rcv len err, len = %d\n", skb->len);
+ dev->stats.rx_errors++;
+ dev->stats.rx_length_errors++;
+ dev_kfree_skb_any(skb);
+ goto next;
+ }
+
+ skb->protocol = eth_type_trans(skb, dev);
+ napi_gro_receive(&priv->napi, skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += skb->len;
+next:
+ pos = (pos + 1) % rxq->num;
+ if (rx_pkts_num >= limit)
+ break;
+ }
+ rxq->tail = pos;
+
+ hisi_femac_rx_refill(priv);
+
+ return rx_pkts_num;
+}
+
+static int hisi_femac_poll(struct napi_struct *napi, int budget)
+{
+ struct hisi_femac_priv *priv = container_of(napi,
+ struct hisi_femac_priv, napi);
+ struct net_device *dev = priv->ndev;
+ int work_done = 0, task = budget;
+ int ints, num;
+
+ do {
+ hisi_femac_xmit_reclaim(dev);
+ num = hisi_femac_rx(dev, task);
+ work_done += num;
+ task -= num;
+ if (work_done >= budget)
+ break;
+
+ ints = readl(priv->glb_base + GLB_IRQ_RAW);
+ writel(ints & DEF_INT_MASK,
+ priv->glb_base + GLB_IRQ_RAW);
+ } while (ints & DEF_INT_MASK);
+
+ if (work_done < budget) {
+ napi_complete(napi);
+ hisi_femac_irq_enable(priv, DEF_INT_MASK &
+ (~IRQ_INT_TX_PER_PACKET));
+ }
+
+ return work_done;
+}
+
+static irqreturn_t hisi_femac_interrupt(int irq, void *dev_id)
+{
+ int ints;
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct hisi_femac_priv *priv = netdev_priv(dev);
+
+ ints = readl(priv->glb_base + GLB_IRQ_RAW);
+
+ if (likely(ints & DEF_INT_MASK)) {
+ writel(ints & DEF_INT_MASK,
+ priv->glb_base + GLB_IRQ_RAW);
+ hisi_femac_irq_disable(priv, DEF_INT_MASK);
+ napi_schedule(&priv->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int hisi_femac_init_queue(struct device *dev,
+ struct hisi_femac_queue *queue,
+ unsigned int num)
+{
+ queue->skb = devm_kcalloc(dev, num, sizeof(struct sk_buff *),
+ GFP_KERNEL);
+ if (!queue->skb)
+ return -ENOMEM;
+
+ queue->dma_phys = devm_kcalloc(dev, num, sizeof(dma_addr_t),
+ GFP_KERNEL);
+ if (!queue->dma_phys)
+ return -ENOMEM;
+
+ queue->num = num;
+ queue->head = 0;
+ queue->tail = 0;
+
+ return 0;
+}
+
+static int hisi_femac_init_tx_and_rx_queues(struct hisi_femac_priv *priv)
+{
+ int ret;
+
+ ret = hisi_femac_init_queue(priv->dev, &priv->txq, TXQ_NUM);
+ if (ret)
+ return ret;
+
+ ret = hisi_femac_init_queue(priv->dev, &priv->rxq, RXQ_NUM);
+ if (ret)
+ return ret;
+
+ priv->tx_fifo_used_cnt = 0;
+
+ return 0;
+}
+
+static void hisi_femac_free_skb_rings(struct hisi_femac_priv *priv)
+{
+ struct hisi_femac_queue *txq = &priv->txq;
+ struct hisi_femac_queue *rxq = &priv->rxq;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ u32 pos;
+
+ pos = rxq->tail;
+ while (pos != rxq->head) {
+ skb = rxq->skb[pos];
+ if (unlikely(!skb)) {
+ netdev_err(priv->ndev, "NULL rx skb. pos=%d, head=%d\n",
+ pos, rxq->head);
+ continue;
+ }
+
+ dma_addr = rxq->dma_phys[pos];
+ dma_unmap_single(priv->dev, dma_addr, MAX_FRAME_SIZE,
+ DMA_FROM_DEVICE);
+
+ dev_kfree_skb_any(skb);
+ rxq->skb[pos] = NULL;
+ pos = (pos + 1) % rxq->num;
+ }
+ rxq->tail = pos;
+
+ pos = txq->tail;
+ while (pos != txq->head) {
+ skb = txq->skb[pos];
+ if (unlikely(!skb)) {
+ netdev_err(priv->ndev, "NULL tx skb. pos=%d, head=%d\n",
+ pos, txq->head);
+ continue;
+ }
+ hisi_femac_tx_dma_unmap(priv, skb, pos);
+ dev_kfree_skb_any(skb);
+ txq->skb[pos] = NULL;
+ pos = (pos + 1) % txq->num;
+ }
+ txq->tail = pos;
+ priv->tx_fifo_used_cnt = 0;
+}
+
+static int hisi_femac_set_hw_mac_addr(struct hisi_femac_priv *priv,
+ unsigned char *mac)
+{
+ u32 reg;
+
+ reg = mac[1] | (mac[0] << 8);
+ writel(reg, priv->glb_base + GLB_HOSTMAC_H16);
+
+ reg = mac[5] | (mac[4] << 8) | (mac[3] << 16) | (mac[2] << 24);
+ writel(reg, priv->glb_base + GLB_HOSTMAC_L32);
+
+ return 0;
+}
+
+static int hisi_femac_port_reset(struct hisi_femac_priv *priv)
+{
+ u32 val;
+
+ val = readl(priv->glb_base + GLB_SOFT_RESET);
+ val |= SOFT_RESET_ALL;
+ writel(val, priv->glb_base + GLB_SOFT_RESET);
+
+ usleep_range(500, 800);
+
+ val &= ~SOFT_RESET_ALL;
+ writel(val, priv->glb_base + GLB_SOFT_RESET);
+
+ return 0;
+}
+
+static int hisi_femac_net_open(struct net_device *dev)
+{
+ struct hisi_femac_priv *priv = netdev_priv(dev);
+
+ hisi_femac_port_reset(priv);
+ hisi_femac_set_hw_mac_addr(priv, dev->dev_addr);
+ hisi_femac_rx_refill(priv);
+
+ netif_carrier_off(dev);
+ netdev_reset_queue(dev);
+ netif_start_queue(dev);
+ napi_enable(&priv->napi);
+
+ priv->link_status = 0;
+ if (dev->phydev)
+ phy_start(dev->phydev);
+
+ writel(IRQ_ENA_PORT0_MASK, priv->glb_base + GLB_IRQ_RAW);
+ hisi_femac_irq_enable(priv, IRQ_ENA_ALL | IRQ_ENA_PORT0 | DEF_INT_MASK);
+
+ return 0;
+}
+
+static int hisi_femac_net_close(struct net_device *dev)
+{
+ struct hisi_femac_priv *priv = netdev_priv(dev);
+
+ hisi_femac_irq_disable(priv, IRQ_ENA_PORT0);
+
+ if (dev->phydev)
+ phy_stop(dev->phydev);
+
+ netif_stop_queue(dev);
+ napi_disable(&priv->napi);
+
+ hisi_femac_free_skb_rings(priv);
+
+ return 0;
+}
+
+static netdev_tx_t hisi_femac_net_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct hisi_femac_priv *priv = netdev_priv(dev);
+ struct hisi_femac_queue *txq = &priv->txq;
+ dma_addr_t addr;
+ u32 val;
+
+ val = readl(priv->port_base + ADDRQ_STAT);
+ val &= BIT_TX_READY;
+ if (!val) {
+ hisi_femac_irq_enable(priv, IRQ_INT_TX_PER_PACKET);
+ dev->stats.tx_dropped++;
+ dev->stats.tx_fifo_errors++;
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (unlikely(!CIRC_SPACE(txq->head, txq->tail,
+ txq->num))) {
+ hisi_femac_irq_enable(priv, IRQ_INT_TX_PER_PACKET);
+ dev->stats.tx_dropped++;
+ dev->stats.tx_fifo_errors++;
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
+ }
+
+ addr = dma_map_single(priv->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, addr))) {
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+ txq->dma_phys[txq->head] = addr;
+
+ txq->skb[txq->head] = skb;
+ txq->head = (txq->head + 1) % txq->num;
+
+ writel(addr, priv->port_base + EQ_ADDR);
+ writel(skb->len + ETH_FCS_LEN, priv->port_base + EQFRM_LEN);
+
+ priv->tx_fifo_used_cnt++;
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+ netdev_sent_queue(dev, skb->len);
+
+ return NETDEV_TX_OK;
+}
+
+static int hisi_femac_set_mac_address(struct net_device *dev, void *p)
+{
+ struct hisi_femac_priv *priv = netdev_priv(dev);
+ struct sockaddr *skaddr = p;
+
+ if (!is_valid_ether_addr(skaddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(dev->dev_addr, skaddr->sa_data, dev->addr_len);
+ dev->addr_assign_type &= ~NET_ADDR_RANDOM;
+
+ hisi_femac_set_hw_mac_addr(priv, dev->dev_addr);
+
+ return 0;
+}
+
+static void hisi_femac_enable_hw_addr_filter(struct hisi_femac_priv *priv,
+ unsigned int reg_n, bool enable)
+{
+ u32 val;
+
+ val = readl(priv->glb_base + GLB_MAC_H16(reg_n));
+ if (enable)
+ val |= BIT_MACFLT_ENA;
+ else
+ val &= ~BIT_MACFLT_ENA;
+ writel(val, priv->glb_base + GLB_MAC_H16(reg_n));
+}
+
+static void hisi_femac_set_hw_addr_filter(struct hisi_femac_priv *priv,
+ unsigned char *addr,
+ unsigned int reg_n)
+{
+ unsigned int high, low;
+ u32 val;
+
+ high = GLB_MAC_H16(reg_n);
+ low = GLB_MAC_L32(reg_n);
+
+ val = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5];
+ writel(val, priv->glb_base + low);
+
+ val = readl(priv->glb_base + high);
+ val &= ~MACFLT_HI16_MASK;
+ val |= ((addr[0] << 8) | addr[1]);
+ val |= (BIT_MACFLT_ENA | BIT_MACFLT_FW2CPU);
+ writel(val, priv->glb_base + high);
+}
+
+static void hisi_femac_set_promisc_mode(struct hisi_femac_priv *priv,
+ bool promisc_mode)
+{
+ u32 val;
+
+ val = readl(priv->glb_base + GLB_FWCTRL);
+ if (promisc_mode)
+ val |= FWCTRL_FWALL2CPU;
+ else
+ val &= ~FWCTRL_FWALL2CPU;
+ writel(val, priv->glb_base + GLB_FWCTRL);
+}
+
+/* Handle multiple multicast addresses (perfect filtering)*/
+static void hisi_femac_set_mc_addr_filter(struct hisi_femac_priv *priv)
+{
+ struct net_device *dev = priv->ndev;
+ u32 val;
+
+ val = readl(priv->glb_base + GLB_MACTCTRL);
+ if ((netdev_mc_count(dev) > MAX_MULTICAST_ADDRESSES) ||
+ (dev->flags & IFF_ALLMULTI)) {
+ val |= MACTCTRL_MULTI2CPU;
+ } else {
+ int reg = MAX_UNICAST_ADDRESSES;
+ int i;
+ struct netdev_hw_addr *ha;
+
+ for (i = reg; i < MAX_MAC_FILTER_NUM; i++)
+ hisi_femac_enable_hw_addr_filter(priv, i, false);
+
+ netdev_for_each_mc_addr(ha, dev) {
+ hisi_femac_set_hw_addr_filter(priv, ha->addr, reg);
+ reg++;
+ }
+ val &= ~MACTCTRL_MULTI2CPU;
+ }
+ writel(val, priv->glb_base + GLB_MACTCTRL);
+}
+
+/* Handle multiple unicast addresses (perfect filtering)*/
+static void hisi_femac_set_uc_addr_filter(struct hisi_femac_priv *priv)
+{
+ struct net_device *dev = priv->ndev;
+ u32 val;
+
+ val = readl(priv->glb_base + GLB_MACTCTRL);
+ if (netdev_uc_count(dev) > MAX_UNICAST_ADDRESSES) {
+ val |= MACTCTRL_UNI2CPU;
+ } else {
+ int reg = 0;
+ int i;
+ struct netdev_hw_addr *ha;
+
+ for (i = reg; i < MAX_UNICAST_ADDRESSES; i++)
+ hisi_femac_enable_hw_addr_filter(priv, i, false);
+
+ netdev_for_each_uc_addr(ha, dev) {
+ hisi_femac_set_hw_addr_filter(priv, ha->addr, reg);
+ reg++;
+ }
+ val &= ~MACTCTRL_UNI2CPU;
+ }
+ writel(val, priv->glb_base + GLB_MACTCTRL);
+}
+
+static void hisi_femac_net_set_rx_mode(struct net_device *dev)
+{
+ struct hisi_femac_priv *priv = netdev_priv(dev);
+
+ if (dev->flags & IFF_PROMISC) {
+ hisi_femac_set_promisc_mode(priv, true);
+ } else {
+ hisi_femac_set_promisc_mode(priv, false);
+ hisi_femac_set_mc_addr_filter(priv);
+ hisi_femac_set_uc_addr_filter(priv);
+ }
+}
+
+static int hisi_femac_net_ioctl(struct net_device *dev,
+ struct ifreq *ifreq, int cmd)
+{
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (!dev->phydev)
+ return -EINVAL;
+
+ return phy_mii_ioctl(dev->phydev, ifreq, cmd);
+}
+
+static struct ethtool_ops hisi_femac_ethtools_ops = {
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
+
+static const struct net_device_ops hisi_femac_netdev_ops = {
+ .ndo_open = hisi_femac_net_open,
+ .ndo_stop = hisi_femac_net_close,
+ .ndo_start_xmit = hisi_femac_net_xmit,
+ .ndo_do_ioctl = hisi_femac_net_ioctl,
+ .ndo_set_mac_address = hisi_femac_set_mac_address,
+ .ndo_set_rx_mode = hisi_femac_net_set_rx_mode,
+ .ndo_change_mtu = eth_change_mtu,
+};
+
+static void hisi_femac_core_reset(struct hisi_femac_priv *priv)
+{
+ reset_control_assert(priv->mac_rst);
+ reset_control_deassert(priv->mac_rst);
+}
+
+static void hisi_femac_sleep_us(u32 time_us)
+{
+ u32 time_ms;
+
+ if (!time_us)
+ return;
+
+ time_ms = DIV_ROUND_UP(time_us, 1000);
+ if (time_ms < 20)
+ usleep_range(time_us, time_us + 500);
+ else
+ msleep(time_ms);
+}
+
+static void hisi_femac_phy_reset(struct hisi_femac_priv *priv)
+{
+ /* To make sure PHY hardware reset success,
+ * we must keep PHY in deassert state first and
+ * then complete the hardware reset operation
+ */
+ reset_control_deassert(priv->phy_rst);
+ hisi_femac_sleep_us(priv->phy_reset_delays[PRE_DELAY]);
+
+ reset_control_assert(priv->phy_rst);
+ /* delay some time to ensure reset ok,
+ * this depends on PHY hardware feature
+ */
+ hisi_femac_sleep_us(priv->phy_reset_delays[PULSE]);
+ reset_control_deassert(priv->phy_rst);
+ /* delay some time to ensure later MDIO access */
+ hisi_femac_sleep_us(priv->phy_reset_delays[POST_DELAY]);
+}
+
+static void hisi_femac_port_init(struct hisi_femac_priv *priv)
+{
+ u32 val;
+
+ /* MAC gets link status info and phy mode by software config */
+ val = MAC_PORTSEL_STAT_CPU;
+ if (priv->ndev->phydev->interface == PHY_INTERFACE_MODE_RMII)
+ val |= MAC_PORTSEL_RMII;
+ writel(val, priv->port_base + MAC_PORTSEL);
+
+ /*clear all interrupt status */
+ writel(IRQ_ENA_PORT0_MASK, priv->glb_base + GLB_IRQ_RAW);
+ hisi_femac_irq_disable(priv, IRQ_ENA_PORT0_MASK | IRQ_ENA_PORT0);
+
+ val = readl(priv->glb_base + GLB_FWCTRL);
+ val &= ~(FWCTRL_VLAN_ENABLE | FWCTRL_FWALL2CPU);
+ val |= FWCTRL_FW2CPU_ENA;
+ writel(val, priv->glb_base + GLB_FWCTRL);
+
+ val = readl(priv->glb_base + GLB_MACTCTRL);
+ val |= (MACTCTRL_BROAD2CPU | MACTCTRL_MACT_ENA);
+ writel(val, priv->glb_base + GLB_MACTCTRL);
+
+ val = readl(priv->port_base + MAC_SET);
+ val &= ~MAX_FRAME_SIZE_MASK;
+ val |= MAX_FRAME_SIZE;
+ writel(val, priv->port_base + MAC_SET);
+
+ val = RX_COALESCED_TIMER |
+ (RX_COALESCED_FRAMES << RX_COALESCED_FRAME_OFFSET);
+ writel(val, priv->port_base + RX_COALESCE_SET);
+
+ val = (HW_RX_FIFO_DEPTH << RX_DEPTH_OFFSET) | HW_TX_FIFO_DEPTH;
+ writel(val, priv->port_base + QLEN_SET);
+}
+
+static int hisi_femac_drv_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct resource *res;
+ struct net_device *ndev;
+ struct hisi_femac_priv *priv;
+ struct phy_device *phy;
+ const char *mac_addr;
+ int ret;
+
+ ndev = alloc_etherdev(sizeof(*priv));
+ if (!ndev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ndev);
+
+ priv = netdev_priv(ndev);
+ priv->dev = dev;
+ priv->ndev = ndev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->port_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->port_base)) {
+ ret = PTR_ERR(priv->port_base);
+ goto out_free_netdev;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ priv->glb_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->glb_base)) {
+ ret = PTR_ERR(priv->glb_base);
+ goto out_free_netdev;
+ }
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "failed to get clk\n");
+ ret = -ENODEV;
+ goto out_free_netdev;
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clk %d\n", ret);
+ goto out_free_netdev;
+ }
+
+ priv->mac_rst = devm_reset_control_get(dev, "mac");
+ if (IS_ERR(priv->mac_rst)) {
+ ret = PTR_ERR(priv->mac_rst);
+ goto out_disable_clk;
+ }
+ hisi_femac_core_reset(priv);
+
+ priv->phy_rst = devm_reset_control_get(dev, "phy");
+ if (IS_ERR(priv->phy_rst)) {
+ priv->phy_rst = NULL;
+ } else {
+ ret = of_property_read_u32_array(node,
+ PHY_RESET_DELAYS_PROPERTY,
+ priv->phy_reset_delays,
+ DELAYS_NUM);
+ if (ret)
+ goto out_disable_clk;
+ hisi_femac_phy_reset(priv);
+ }
+
+ phy = of_phy_get_and_connect(ndev, node, hisi_femac_adjust_link);
+ if (!phy) {
+ dev_err(dev, "connect to PHY failed!\n");
+ ret = -ENODEV;
+ goto out_disable_clk;
+ }
+
+ phy_attached_print(phy, "phy_id=0x%.8lx, phy_mode=%s\n",
+ (unsigned long)phy->phy_id,
+ phy_modes(phy->interface));
+
+ mac_addr = of_get_mac_address(node);
+ if (mac_addr)
+ ether_addr_copy(ndev->dev_addr, mac_addr);
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ eth_hw_addr_random(ndev);
+ dev_warn(dev, "using random MAC address %pM\n",
+ ndev->dev_addr);
+ }
+
+ ndev->watchdog_timeo = 6 * HZ;
+ ndev->priv_flags |= IFF_UNICAST_FLT;
+ ndev->netdev_ops = &hisi_femac_netdev_ops;
+ ndev->ethtool_ops = &hisi_femac_ethtools_ops;
+ netif_napi_add(ndev, &priv->napi, hisi_femac_poll, FEMAC_POLL_WEIGHT);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ hisi_femac_port_init(priv);
+
+ ret = hisi_femac_init_tx_and_rx_queues(priv);
+ if (ret)
+ goto out_disconnect_phy;
+
+ ndev->irq = platform_get_irq(pdev, 0);
+ if (ndev->irq <= 0) {
+ dev_err(dev, "No irq resource\n");
+ ret = -ENODEV;
+ goto out_disconnect_phy;
+ }
+
+ ret = devm_request_irq(dev, ndev->irq, hisi_femac_interrupt,
+ IRQF_SHARED, pdev->name, ndev);
+ if (ret) {
+ dev_err(dev, "devm_request_irq %d failed!\n", ndev->irq);
+ goto out_disconnect_phy;
+ }
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ dev_err(dev, "register_netdev failed!\n");
+ goto out_disconnect_phy;
+ }
+
+ return ret;
+
+out_disconnect_phy:
+ netif_napi_del(&priv->napi);
+ phy_disconnect(phy);
+out_disable_clk:
+ clk_disable_unprepare(priv->clk);
+out_free_netdev:
+ free_netdev(ndev);
+
+ return ret;
+}
+
+static int hisi_femac_drv_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct hisi_femac_priv *priv = netdev_priv(ndev);
+
+ netif_napi_del(&priv->napi);
+ unregister_netdev(ndev);
+
+ phy_disconnect(ndev->phydev);
+ clk_disable_unprepare(priv->clk);
+ free_netdev(ndev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+int hisi_femac_drv_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct hisi_femac_priv *priv = netdev_priv(ndev);
+
+ disable_irq(ndev->irq);
+ if (netif_running(ndev)) {
+ hisi_femac_net_close(ndev);
+ netif_device_detach(ndev);
+ }
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+int hisi_femac_drv_resume(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct hisi_femac_priv *priv = netdev_priv(ndev);
+
+ clk_prepare_enable(priv->clk);
+ if (priv->phy_rst)
+ hisi_femac_phy_reset(priv);
+
+ if (netif_running(ndev)) {
+ hisi_femac_port_init(priv);
+ hisi_femac_net_open(ndev);
+ netif_device_attach(ndev);
+ }
+ enable_irq(ndev->irq);
+
+ return 0;
+}
+#endif
+
+static const struct of_device_id hisi_femac_match[] = {
+ {.compatible = "hisilicon,hisi-femac-v1",},
+ {.compatible = "hisilicon,hisi-femac-v2",},
+ {.compatible = "hisilicon,hi3516cv300-femac",},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, hisi_femac_match);
+
+static struct platform_driver hisi_femac_driver = {
+ .driver = {
+ .name = "hisi-femac",
+ .of_match_table = hisi_femac_match,
+ },
+ .probe = hisi_femac_drv_probe,
+ .remove = hisi_femac_drv_remove,
+#ifdef CONFIG_PM
+ .suspend = hisi_femac_drv_suspend,
+ .resume = hisi_femac_drv_resume,
+#endif
+};
+
+module_platform_driver(hisi_femac_driver);
+
+MODULE_DESCRIPTION("Hisilicon Fast Ethernet MAC driver");
+MODULE_AUTHOR("Dongpo Li <lidongpo@hisilicon.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:hisi-femac");
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index b9f2ea593..275618bb4 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -218,7 +218,6 @@ struct hix5hd2_priv {
struct device *dev;
struct net_device *netdev;
- struct phy_device *phy;
struct device_node *phy_node;
phy_interface_t phy_mode;
@@ -402,7 +401,7 @@ static int hix5hd2_net_set_mac_address(struct net_device *dev, void *p)
static void hix5hd2_adjust_link(struct net_device *dev)
{
struct hix5hd2_priv *priv = netdev_priv(dev);
- struct phy_device *phy = priv->phy;
+ struct phy_device *phy = dev->phydev;
if ((priv->speed != phy->speed) || (priv->duplex != phy->duplex)) {
hix5hd2_config_port(dev, phy->speed, phy->duplex);
@@ -679,6 +678,7 @@ static void hix5hd2_free_dma_desc_rings(struct hix5hd2_priv *priv)
static int hix5hd2_net_open(struct net_device *dev)
{
struct hix5hd2_priv *priv = netdev_priv(dev);
+ struct phy_device *phy;
int ret;
ret = clk_prepare_enable(priv->clk);
@@ -687,12 +687,12 @@ static int hix5hd2_net_open(struct net_device *dev)
return ret;
}
- priv->phy = of_phy_connect(dev, priv->phy_node,
- &hix5hd2_adjust_link, 0, priv->phy_mode);
- if (!priv->phy)
+ phy = of_phy_connect(dev, priv->phy_node,
+ &hix5hd2_adjust_link, 0, priv->phy_mode);
+ if (!phy)
return -ENODEV;
- phy_start(priv->phy);
+ phy_start(phy);
hix5hd2_hw_init(priv);
hix5hd2_rx_refill(priv);
@@ -716,9 +716,9 @@ static int hix5hd2_net_close(struct net_device *dev)
netif_stop_queue(dev);
hix5hd2_free_dma_desc_rings(priv);
- if (priv->phy) {
- phy_stop(priv->phy);
- phy_disconnect(priv->phy);
+ if (dev->phydev) {
+ phy_stop(dev->phydev);
+ phy_disconnect(dev->phydev);
}
clk_disable_unprepare(priv->clk);
@@ -750,32 +750,10 @@ static const struct net_device_ops hix5hd2_netdev_ops = {
.ndo_set_mac_address = hix5hd2_net_set_mac_address,
};
-static int hix5hd2_get_settings(struct net_device *net_dev,
- struct ethtool_cmd *cmd)
-{
- struct hix5hd2_priv *priv = netdev_priv(net_dev);
-
- if (!priv->phy)
- return -ENODEV;
-
- return phy_ethtool_gset(priv->phy, cmd);
-}
-
-static int hix5hd2_set_settings(struct net_device *net_dev,
- struct ethtool_cmd *cmd)
-{
- struct hix5hd2_priv *priv = netdev_priv(net_dev);
-
- if (!priv->phy)
- return -ENODEV;
-
- return phy_ethtool_sset(priv->phy, cmd);
-}
-
static struct ethtool_ops hix5hd2_ethtools_ops = {
.get_link = ethtool_op_get_link,
- .get_settings = hix5hd2_get_settings,
- .set_settings = hix5hd2_set_settings,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static int hix5hd2_mdio_wait_ready(struct mii_bus *bus)
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index 3bfe36f94..c54c6fac0 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -96,16 +96,22 @@ static int __ae_match(struct device *dev, const void *data)
{
struct hnae_ae_dev *hdev = cls_to_ae_dev(dev);
- return hdev->dev->of_node == data;
+ if (dev_of_node(hdev->dev))
+ return (data == &hdev->dev->of_node->fwnode);
+ else if (is_acpi_node(hdev->dev->fwnode))
+ return (data == hdev->dev->fwnode);
+
+ dev_err(dev, "__ae_match cannot read cfg data from OF or acpi\n");
+ return 0;
}
-static struct hnae_ae_dev *find_ae(const struct device_node *ae_node)
+static struct hnae_ae_dev *find_ae(const struct fwnode_handle *fwnode)
{
struct device *dev;
- WARN_ON(!ae_node);
+ WARN_ON(!fwnode);
- dev = class_find_device(hnae_class, NULL, ae_node, __ae_match);
+ dev = class_find_device(hnae_class, NULL, fwnode, __ae_match);
return dev ? cls_to_ae_dev(dev) : NULL;
}
@@ -312,7 +318,7 @@ EXPORT_SYMBOL(hnae_reinit_handle);
* return handle ptr or ERR_PTR
*/
struct hnae_handle *hnae_get_handle(struct device *owner_dev,
- const struct device_node *ae_node,
+ const struct fwnode_handle *fwnode,
u32 port_id,
struct hnae_buf_ops *bops)
{
@@ -321,7 +327,7 @@ struct hnae_handle *hnae_get_handle(struct device *owner_dev,
int i, j;
int ret;
- dev = find_ae(ae_node);
+ dev = find_ae(fwnode);
if (!dev)
return ERR_PTR(-ENODEV);
@@ -394,7 +400,6 @@ int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner)
if (!hdev->ops || !hdev->ops->get_handle ||
!hdev->ops->toggle_ring_irq ||
- !hdev->ops->toggle_queue_status ||
!hdev->ops->get_status || !hdev->ops->adjust_link)
return -EINVAL;
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index e8d36aaea..e093cbf26 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -27,6 +27,7 @@
* "cb" means control block
*/
+#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/module.h>
@@ -362,6 +363,14 @@ enum hnae_port_type {
HNAE_PORT_DEBUG
};
+/* mac media type */
+enum hnae_media_type {
+ HNAE_MEDIA_TYPE_UNKNOWN = 0,
+ HNAE_MEDIA_TYPE_FIBER,
+ HNAE_MEDIA_TYPE_COPPER,
+ HNAE_MEDIA_TYPE_BACKPLANE,
+};
+
/* This struct defines the operation on the handle.
*
* get_handle(): (mandatory)
@@ -453,7 +462,6 @@ struct hnae_ae_ops {
int (*get_info)(struct hnae_handle *handle,
u8 *auto_neg, u16 *speed, u8 *duplex);
void (*toggle_ring_irq)(struct hnae_ring *ring, u32 val);
- void (*toggle_queue_status)(struct hnae_queue *queue, u32 val);
void (*adjust_link)(struct hnae_handle *handle, int speed, int duplex);
int (*set_loopback)(struct hnae_handle *handle,
enum hnae_loop loop_mode, int en);
@@ -472,6 +480,11 @@ struct hnae_ae_ops {
int (*set_coalesce_usecs)(struct hnae_handle *handle, u32 timeout);
int (*set_coalesce_frames)(struct hnae_handle *handle,
u32 coalesce_frames);
+ void (*get_coalesce_range)(struct hnae_handle *handle,
+ u32 *tx_frames_low, u32 *rx_frames_low,
+ u32 *tx_frames_high, u32 *rx_frames_high,
+ u32 *tx_usecs_low, u32 *rx_usecs_low,
+ u32 *tx_usecs_high, u32 *rx_usecs_high);
void (*set_promisc_mode)(struct hnae_handle *handle, u32 en);
int (*get_mac_addr)(struct hnae_handle *handle, void **p);
int (*set_mac_addr)(struct hnae_handle *handle, void *p);
@@ -512,7 +525,7 @@ struct hnae_ae_dev {
struct hnae_handle {
struct device *owner_dev; /* the device which make use of this handle */
struct hnae_ae_dev *dev; /* the device who provides this handle */
- struct device_node *phy_node;
+ struct phy_device *phy_dev;
phy_interface_t phy_if;
u32 if_support;
int q_num;
@@ -520,6 +533,7 @@ struct hnae_handle {
u32 eport_id;
u32 dport_id; /* v2 tx bd should fill the dport_id */
enum hnae_port_type port_type;
+ enum hnae_media_type media_type;
struct list_head node; /* list to hnae_ae_dev->handle_list */
struct hnae_buf_ops *bops; /* operation for the buffer */
struct hnae_queue **qs; /* array base of all queues */
@@ -528,7 +542,7 @@ struct hnae_handle {
#define ring_to_dev(ring) ((ring)->q->dev->dev)
struct hnae_handle *hnae_get_handle(struct device *owner_dev,
- const struct device_node *ae_node,
+ const struct fwnode_handle *fwnode,
u32 port_id,
struct hnae_buf_ops *bops);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index 7a757e88c..e28d96099 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -131,9 +131,10 @@ struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
vf_cb->mac_cb = dsaf_dev->mac_cb[port_id];
ae_handle->phy_if = vf_cb->mac_cb->phy_if;
- ae_handle->phy_node = vf_cb->mac_cb->phy_node;
+ ae_handle->phy_dev = vf_cb->mac_cb->phy_dev;
ae_handle->if_support = vf_cb->mac_cb->if_support;
ae_handle->port_type = vf_cb->mac_cb->mac_type;
+ ae_handle->media_type = vf_cb->mac_cb->media_type;
ae_handle->dport_id = port_id;
return ae_handle;
@@ -247,12 +248,21 @@ static void hns_ae_set_tso_stats(struct hnae_handle *handle, int enable)
static int hns_ae_start(struct hnae_handle *handle)
{
int ret;
+ int k;
struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
ret = hns_mac_vm_config_bc_en(mac_cb, 0, true);
if (ret)
return ret;
+ for (k = 0; k < handle->q_num; k++) {
+ if (AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver))
+ hns_rcb_int_clr_hw(handle->qs[k],
+ RCB_INT_FLAG_TX | RCB_INT_FLAG_RX);
+ else
+ hns_rcbv2_int_clr_hw(handle->qs[k],
+ RCB_INT_FLAG_TX | RCB_INT_FLAG_RX);
+ }
hns_ae_ring_enable_all(handle, 1);
msleep(100);
@@ -313,18 +323,6 @@ static void hns_aev2_toggle_ring_irq(struct hnae_ring *ring, u32 mask)
hns_rcbv2_int_ctrl_hw(ring->q, flag, mask);
}
-static void hns_ae_toggle_queue_status(struct hnae_queue *queue, u32 val)
-{
- struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(queue->dev);
-
- if (AE_IS_VER1(dsaf_dev->dsaf_ver))
- hns_rcb_int_clr_hw(queue, RCB_INT_FLAG_TX | RCB_INT_FLAG_RX);
- else
- hns_rcbv2_int_clr_hw(queue, RCB_INT_FLAG_TX | RCB_INT_FLAG_RX);
-
- hns_rcb_start(queue, val);
-}
-
static int hns_ae_get_link_status(struct hnae_handle *handle)
{
u32 link_status;
@@ -465,6 +463,30 @@ static int hns_ae_set_coalesce_frames(struct hnae_handle *handle,
ring_pair->port_id_in_comm, coalesce_frames);
}
+static void hns_ae_get_coalesce_range(struct hnae_handle *handle,
+ u32 *tx_frames_low, u32 *rx_frames_low,
+ u32 *tx_frames_high, u32 *rx_frames_high,
+ u32 *tx_usecs_low, u32 *rx_usecs_low,
+ u32 *tx_usecs_high, u32 *rx_usecs_high)
+{
+ struct dsaf_device *dsaf_dev;
+
+ dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
+
+ *tx_frames_low = HNS_RCB_MIN_COALESCED_FRAMES;
+ *rx_frames_low = HNS_RCB_MIN_COALESCED_FRAMES;
+ *tx_frames_high =
+ (dsaf_dev->desc_num - 1 > HNS_RCB_MAX_COALESCED_FRAMES) ?
+ HNS_RCB_MAX_COALESCED_FRAMES : dsaf_dev->desc_num - 1;
+ *rx_frames_high =
+ (dsaf_dev->desc_num - 1 > HNS_RCB_MAX_COALESCED_FRAMES) ?
+ HNS_RCB_MAX_COALESCED_FRAMES : dsaf_dev->desc_num - 1;
+ *tx_usecs_low = 0;
+ *rx_usecs_low = 0;
+ *tx_usecs_high = HNS_RCB_MAX_COALESCED_USECS;
+ *rx_usecs_high = HNS_RCB_MAX_COALESCED_USECS;
+}
+
void hns_ae_update_stats(struct hnae_handle *handle,
struct net_device_stats *net_stats)
{
@@ -587,6 +609,7 @@ void hns_ae_get_strings(struct hnae_handle *handle,
int idx;
struct hns_mac_cb *mac_cb;
struct hns_ppe_cb *ppe_cb;
+ struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
u8 *p = data;
struct hnae_vf_cb *vf_cb;
@@ -609,13 +632,14 @@ void hns_ae_get_strings(struct hnae_handle *handle,
p += ETH_GSTRING_LEN * hns_mac_get_sset_count(mac_cb, stringset);
if (mac_cb->mac_type == HNAE_PORT_SERVICE)
- hns_dsaf_get_strings(stringset, p, port);
+ hns_dsaf_get_strings(stringset, p, port, dsaf_dev);
}
int hns_ae_get_sset_count(struct hnae_handle *handle, int stringset)
{
u32 sset_count = 0;
struct hns_mac_cb *mac_cb;
+ struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
assert(handle);
@@ -626,7 +650,7 @@ int hns_ae_get_sset_count(struct hnae_handle *handle, int stringset)
sset_count += hns_mac_get_sset_count(mac_cb, stringset);
if (mac_cb->mac_type == HNAE_PORT_SERVICE)
- sset_count += hns_dsaf_get_sset_count(stringset);
+ sset_count += hns_dsaf_get_sset_count(dsaf_dev, stringset);
return sset_count;
}
@@ -637,13 +661,15 @@ static int hns_ae_config_loopback(struct hnae_handle *handle,
int ret;
struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
switch (loop) {
case MAC_INTERNALLOOP_PHY:
ret = 0;
break;
case MAC_INTERNALLOOP_SERDES:
- ret = hns_mac_config_sds_loopback(vf_cb->mac_cb, en);
+ ret = dsaf_dev->misc_op->cfg_serdes_loopback(vf_cb->mac_cb,
+ !!en);
break;
case MAC_INTERNALLOOP_MAC:
ret = hns_mac_config_mac_loopback(vf_cb->mac_cb, loop, en);
@@ -780,7 +806,6 @@ static struct hnae_ae_ops hns_dsaf_ops = {
.stop = hns_ae_stop,
.reset = hns_ae_reset,
.toggle_ring_irq = hns_ae_toggle_ring_irq,
- .toggle_queue_status = hns_ae_toggle_queue_status,
.get_status = hns_ae_get_link_status,
.get_info = hns_ae_get_mac_info,
.adjust_link = hns_ae_adjust_link,
@@ -794,6 +819,7 @@ static struct hnae_ae_ops hns_dsaf_ops = {
.get_rx_max_coalesced_frames = hns_ae_get_rx_max_coalesced_frames,
.set_coalesce_usecs = hns_ae_set_coalesce_usecs,
.set_coalesce_frames = hns_ae_set_coalesce_frames,
+ .get_coalesce_range = hns_ae_get_coalesce_range,
.set_promisc_mode = hns_ae_set_promisc_mode,
.set_mac_addr = hns_ae_set_mac_address,
.set_mc_addr = hns_ae_set_multicast_one,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index 44abb08de..1e1eb9299 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -17,7 +17,7 @@ static const struct mac_stats_string g_gmac_stats_string[] = {
{"gmac_rx_octets_total_ok", MAC_STATS_FIELD_OFF(rx_good_bytes)},
{"gmac_rx_octets_bad", MAC_STATS_FIELD_OFF(rx_bad_bytes)},
{"gmac_rx_uc_pkts", MAC_STATS_FIELD_OFF(rx_uc_pkts)},
- {"gamc_rx_mc_pkts", MAC_STATS_FIELD_OFF(rx_mc_pkts)},
+ {"gmac_rx_mc_pkts", MAC_STATS_FIELD_OFF(rx_mc_pkts)},
{"gmac_rx_bc_pkts", MAC_STATS_FIELD_OFF(rx_bc_pkts)},
{"gmac_rx_pkts_64octets", MAC_STATS_FIELD_OFF(rx_64bytes)},
{"gmac_rx_pkts_65to127", MAC_STATS_FIELD_OFF(rx_65to127)},
@@ -110,7 +110,7 @@ static void hns_gmac_free(void *mac_drv)
u32 mac_id = drv->mac_id;
- hns_dsaf_ge_srst_by_port(dsaf_dev, mac_id, 0);
+ dsaf_dev->misc_op->ge_srst(dsaf_dev, mac_id, 0);
}
static void hns_gmac_set_tx_auto_pause_frames(void *mac_drv, u16 newval)
@@ -317,9 +317,9 @@ static void hns_gmac_init(void *mac_drv)
port = drv->mac_id;
- hns_dsaf_ge_srst_by_port(dsaf_dev, port, 0);
+ dsaf_dev->misc_op->ge_srst(dsaf_dev, port, 0);
mdelay(10);
- hns_dsaf_ge_srst_by_port(dsaf_dev, port, 1);
+ dsaf_dev->misc_op->ge_srst(dsaf_dev, port, 1);
mdelay(10);
hns_gmac_disable(mac_drv, MAC_COMM_MODE_RX_AND_TX);
hns_gmac_tx_loop_pkt_dis(mac_drv);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 611581fcc..5c8afe1a5 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -7,6 +7,7 @@
* (at your option) any later version.
*/
+#include <linux/acpi.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -15,7 +16,8 @@
#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/phy_fixed.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
#include <linux/platform_device.h>
#include "hns_dsaf_main.h"
@@ -54,20 +56,6 @@ static const enum mac_mode g_mac_mode_1000[] = {
[PHY_INTERFACE_MODE_RTBI] = MAC_MODE_RTBI_1000
};
-static enum mac_mode hns_mac_dev_to_enet_if(const struct hns_mac_cb *mac_cb)
-{
- switch (mac_cb->max_speed) {
- case MAC_SPEED_100:
- return g_mac_mode_100[mac_cb->phy_if];
- case MAC_SPEED_1000:
- return g_mac_mode_1000[mac_cb->phy_if];
- case MAC_SPEED_10000:
- return MAC_MODE_XGMII_10000;
- default:
- return MAC_MODE_MII_100;
- }
-}
-
static enum mac_mode hns_get_enet_interface(const struct hns_mac_cb *mac_cb)
{
switch (mac_cb->max_speed) {
@@ -94,7 +82,7 @@ void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status)
else
*link_status = 0;
- ret = hns_mac_get_sfp_prsnt(mac_cb, &sfp_prsnt);
+ ret = mac_cb->dsaf_dev->misc_op->get_sfp_prsnt(mac_cb, &sfp_prsnt);
if (!ret)
*link_status = *link_status && sfp_prsnt;
@@ -132,7 +120,6 @@ void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex)
mac_cb->speed = speed;
mac_cb->half_duplex = !duplex;
- mac_ctrl_drv->mac_mode = hns_mac_dev_to_enet_if(mac_cb);
if (mac_ctrl_drv->adjust_link) {
ret = mac_ctrl_drv->adjust_link(mac_ctrl_drv,
@@ -511,7 +498,7 @@ void hns_mac_stop(struct hns_mac_cb *mac_cb)
mac_ctrl_drv->mac_en_flg = 0;
mac_cb->link = 0;
- cpld_led_reset(mac_cb);
+ mac_cb->dsaf_dev->misc_op->cpld_reset_led(mac_cb);
}
/**
@@ -637,6 +624,127 @@ free_mac_drv:
return ret;
}
+static int
+hns_mac_phy_parse_addr(struct device *dev, struct fwnode_handle *fwnode)
+{
+ u32 addr;
+ int ret;
+
+ ret = fwnode_property_read_u32(fwnode, "phy-addr", &addr);
+ if (ret) {
+ dev_err(dev, "has invalid PHY address ret:%d\n", ret);
+ return ret;
+ }
+
+ if (addr >= PHY_MAX_ADDR) {
+ dev_err(dev, "PHY address %i is too large\n", addr);
+ return -EINVAL;
+ }
+
+ return addr;
+}
+
+static int hns_mac_phydev_match(struct device *dev, void *fwnode)
+{
+ return dev->fwnode == fwnode;
+}
+
+static struct
+platform_device *hns_mac_find_platform_device(struct fwnode_handle *fwnode)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&platform_bus_type, NULL,
+ fwnode, hns_mac_phydev_match);
+ return dev ? to_platform_device(dev) : NULL;
+}
+
+static int
+hns_mac_register_phydev(struct mii_bus *mdio, struct hns_mac_cb *mac_cb,
+ u32 addr)
+{
+ struct phy_device *phy;
+ const char *phy_type;
+ bool is_c45;
+ int rc;
+
+ rc = fwnode_property_read_string(mac_cb->fw_port,
+ "phy-mode", &phy_type);
+ if (rc < 0)
+ return rc;
+
+ if (!strcmp(phy_type, phy_modes(PHY_INTERFACE_MODE_XGMII)))
+ is_c45 = 1;
+ else if (!strcmp(phy_type, phy_modes(PHY_INTERFACE_MODE_SGMII)))
+ is_c45 = 0;
+ else
+ return -ENODATA;
+
+ phy = get_phy_device(mdio, addr, is_c45);
+ if (!phy || IS_ERR(phy))
+ return -EIO;
+
+ if (mdio->irq)
+ phy->irq = mdio->irq[addr];
+
+ /* All data is now stored in the phy struct;
+ * register it
+ */
+ rc = phy_device_register(phy);
+ if (rc) {
+ phy_device_free(phy);
+ return -ENODEV;
+ }
+
+ mac_cb->phy_dev = phy;
+
+ dev_dbg(&mdio->dev, "registered phy at address %i\n", addr);
+
+ return 0;
+}
+
+static void hns_mac_register_phy(struct hns_mac_cb *mac_cb)
+{
+ struct acpi_reference_args args;
+ struct platform_device *pdev;
+ struct mii_bus *mii_bus;
+ int rc;
+ int addr;
+
+ /* Loop over the child nodes and register a phy_device for each one */
+ if (!to_acpi_device_node(mac_cb->fw_port))
+ return;
+
+ rc = acpi_node_get_property_reference(
+ mac_cb->fw_port, "mdio-node", 0, &args);
+ if (rc)
+ return;
+
+ addr = hns_mac_phy_parse_addr(mac_cb->dev, mac_cb->fw_port);
+ if (addr < 0)
+ return;
+
+ /* dev address in adev */
+ pdev = hns_mac_find_platform_device(acpi_fwnode_handle(args.adev));
+ mii_bus = platform_get_drvdata(pdev);
+ rc = hns_mac_register_phydev(mii_bus, mac_cb, addr);
+ if (!rc)
+ dev_dbg(mac_cb->dev, "mac%d register phy addr:%d\n",
+ mac_cb->mac_id, addr);
+}
+
+#define MAC_MEDIA_TYPE_MAX_LEN 16
+
+static const struct {
+ enum hnae_media_type value;
+ const char *name;
+} media_type_defs[] = {
+ {HNAE_MEDIA_TYPE_UNKNOWN, "unknown" },
+ {HNAE_MEDIA_TYPE_FIBER, "fiber" },
+ {HNAE_MEDIA_TYPE_COPPER, "copper" },
+ {HNAE_MEDIA_TYPE_BACKPLANE, "backplane" },
+};
+
/**
*hns_mac_get_info - get mac information from device node
*@mac_cb: mac device
@@ -645,13 +753,16 @@ free_mac_drv:
*/
static int hns_mac_get_info(struct hns_mac_cb *mac_cb)
{
- struct device_node *np = mac_cb->dev->of_node;
+ struct device_node *np;
struct regmap *syscon;
struct of_phandle_args cpld_args;
+ const char *media_type;
+ u32 i;
u32 ret;
mac_cb->link = false;
mac_cb->half_duplex = false;
+ mac_cb->media_type = HNAE_MEDIA_TYPE_UNKNOWN;
mac_cb->speed = mac_phy_to_speed[mac_cb->phy_if];
mac_cb->max_speed = mac_cb->speed;
@@ -672,62 +783,98 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb)
* from dsaf node
*/
if (!mac_cb->fw_port) {
- mac_cb->phy_node = of_parse_phandle(np, "phy-handle",
- mac_cb->mac_id);
- if (mac_cb->phy_node)
+ np = of_parse_phandle(mac_cb->dev->of_node, "phy-handle",
+ mac_cb->mac_id);
+ mac_cb->phy_dev = of_phy_find_device(np);
+ if (mac_cb->phy_dev) {
+ /* refcount is held by of_phy_find_device()
+ * if the phy_dev is found
+ */
+ put_device(&mac_cb->phy_dev->mdio.dev);
+
dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n",
- mac_cb->mac_id, mac_cb->phy_node->name);
- return 0;
- }
- if (!is_of_node(mac_cb->fw_port))
- return -EINVAL;
- /* parse property from port subnode in dsaf */
- mac_cb->phy_node = of_parse_phandle(to_of_node(mac_cb->fw_port),
- "phy-handle", 0);
- if (mac_cb->phy_node)
- dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n",
- mac_cb->mac_id, mac_cb->phy_node->name);
- syscon = syscon_node_to_regmap(
- of_parse_phandle(to_of_node(mac_cb->fw_port),
- "serdes-syscon", 0));
- if (IS_ERR_OR_NULL(syscon)) {
- dev_err(mac_cb->dev, "serdes-syscon is needed!\n");
- return -EINVAL;
- }
- mac_cb->serdes_ctrl = syscon;
+ mac_cb->mac_id, np->name);
+ }
+ of_node_put(np);
- ret = fwnode_property_read_u32(mac_cb->fw_port,
- "port-rst-offset",
- &mac_cb->port_rst_off);
- if (ret) {
- dev_dbg(mac_cb->dev,
- "mac%d port-rst-offset not found, use default value.\n",
- mac_cb->mac_id);
+ return 0;
}
- ret = fwnode_property_read_u32(mac_cb->fw_port,
- "port-mode-offset",
- &mac_cb->port_mode_off);
- if (ret) {
- dev_dbg(mac_cb->dev,
- "mac%d port-mode-offset not found, use default value.\n",
- mac_cb->mac_id);
- }
+ if (is_of_node(mac_cb->fw_port)) {
+ /* parse property from port subnode in dsaf */
+ np = of_parse_phandle(to_of_node(mac_cb->fw_port),
+ "phy-handle", 0);
+ mac_cb->phy_dev = of_phy_find_device(np);
+ if (mac_cb->phy_dev) {
+ /* refcount is held by of_phy_find_device()
+ * if the phy_dev is found
+ */
+ put_device(&mac_cb->phy_dev->mdio.dev);
+ dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n",
+ mac_cb->mac_id, np->name);
+ }
+ of_node_put(np);
- ret = of_parse_phandle_with_fixed_args(to_of_node(mac_cb->fw_port),
- "cpld-syscon", 1, 0, &cpld_args);
- if (ret) {
- dev_dbg(mac_cb->dev, "mac%d no cpld-syscon found.\n",
- mac_cb->mac_id);
- mac_cb->cpld_ctrl = NULL;
- } else {
- syscon = syscon_node_to_regmap(cpld_args.np);
+ np = of_parse_phandle(to_of_node(mac_cb->fw_port),
+ "serdes-syscon", 0);
+ syscon = syscon_node_to_regmap(np);
+ of_node_put(np);
if (IS_ERR_OR_NULL(syscon)) {
- dev_dbg(mac_cb->dev, "no cpld-syscon found!\n");
+ dev_err(mac_cb->dev, "serdes-syscon is needed!\n");
+ return -EINVAL;
+ }
+ mac_cb->serdes_ctrl = syscon;
+
+ ret = fwnode_property_read_u32(mac_cb->fw_port,
+ "port-rst-offset",
+ &mac_cb->port_rst_off);
+ if (ret) {
+ dev_dbg(mac_cb->dev,
+ "mac%d port-rst-offset not found, use default value.\n",
+ mac_cb->mac_id);
+ }
+
+ ret = fwnode_property_read_u32(mac_cb->fw_port,
+ "port-mode-offset",
+ &mac_cb->port_mode_off);
+ if (ret) {
+ dev_dbg(mac_cb->dev,
+ "mac%d port-mode-offset not found, use default value.\n",
+ mac_cb->mac_id);
+ }
+
+ ret = of_parse_phandle_with_fixed_args(
+ to_of_node(mac_cb->fw_port), "cpld-syscon", 1, 0,
+ &cpld_args);
+ if (ret) {
+ dev_dbg(mac_cb->dev, "mac%d no cpld-syscon found.\n",
+ mac_cb->mac_id);
mac_cb->cpld_ctrl = NULL;
} else {
- mac_cb->cpld_ctrl = syscon;
- mac_cb->cpld_ctrl_reg = cpld_args.args[0];
+ syscon = syscon_node_to_regmap(cpld_args.np);
+ if (IS_ERR_OR_NULL(syscon)) {
+ dev_dbg(mac_cb->dev, "no cpld-syscon found!\n");
+ mac_cb->cpld_ctrl = NULL;
+ } else {
+ mac_cb->cpld_ctrl = syscon;
+ mac_cb->cpld_ctrl_reg = cpld_args.args[0];
+ }
+ }
+ } else if (is_acpi_node(mac_cb->fw_port)) {
+ hns_mac_register_phy(mac_cb);
+ } else {
+ dev_err(mac_cb->dev, "mac%d cannot find phy node\n",
+ mac_cb->mac_id);
+ }
+
+ if (!fwnode_property_read_string(mac_cb->fw_port, "media-type",
+ &media_type)) {
+ for (i = 0; i < ARRAY_SIZE(media_type_defs); i++) {
+ if (!strncmp(media_type_defs[i].name, media_type,
+ MAC_MEDIA_TYPE_MAX_LEN)) {
+ mac_cb->media_type = media_type_defs[i].value;
+ break;
+ }
}
}
@@ -790,7 +937,7 @@ int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, struct hns_mac_cb *mac_cb)
else
mac_cb->mac_type = HNAE_PORT_DEBUG;
- mac_cb->phy_if = hns_mac_get_phy_if(mac_cb);
+ mac_cb->phy_if = dsaf_dev->misc_op->get_phy_if(mac_cb);
ret = hns_mac_get_mode(mac_cb->phy_if);
if (ret < 0) {
@@ -805,7 +952,7 @@ int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, struct hns_mac_cb *mac_cb)
if (ret)
return ret;
- cpld_led_reset(mac_cb);
+ mac_cb->dsaf_dev->misc_op->cpld_reset_led(mac_cb);
mac_cb->vaddr = hns_mac_get_vaddr(dsaf_dev, mac_cb, mac_mode_idx);
return 0;
@@ -892,7 +1039,7 @@ void hns_mac_uninit(struct dsaf_device *dsaf_dev)
int max_port_num = hns_mac_get_max_port_num(dsaf_dev);
for (i = 0; i < max_port_num; i++) {
- cpld_led_reset(dsaf_dev->mac_cb[i]);
+ dsaf_dev->misc_op->cpld_reset_led(dsaf_dev->mac_cb[i]);
dsaf_dev->mac_cb[i] = NULL;
}
}
@@ -975,7 +1122,7 @@ void hns_set_led_opt(struct hns_mac_cb *mac_cb)
nic_data = 0;
mac_cb->txpkt_for_led = mac_cb->hw_stats.tx_good_pkts;
mac_cb->rxpkt_for_led = mac_cb->hw_stats.rx_good_pkts;
- hns_cpld_set_led(mac_cb, (int)mac_cb->link,
+ mac_cb->dsaf_dev->misc_op->cpld_set_led(mac_cb, (int)mac_cb->link,
mac_cb->speed, nic_data);
}
@@ -985,5 +1132,5 @@ int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb,
if (!mac_cb || !mac_cb->cpld_ctrl)
return 0;
- return cpld_set_led_id(mac_cb, status);
+ return mac_cb->dsaf_dev->misc_op->cpld_set_led_id(mac_cb, status);
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
index 97ce9a750..4cbdf14f5 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -335,10 +335,11 @@ struct hns_mac_cb {
u64 txpkt_for_led;
u64 rxpkt_for_led;
enum hnae_port_type mac_type;
+ enum hnae_media_type media_type;
phy_interface_t phy_if;
enum hnae_loop loop_mode;
- struct device_node *phy_node;
+ struct phy_device *phy_dev;
struct mac_hw_stats hw_stats;
};
@@ -448,8 +449,6 @@ int hns_mac_set_pauseparam(struct hns_mac_cb *mac_cb, u32 rx_en, u32 tx_en);
int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu);
int hns_mac_get_port_info(struct hns_mac_cb *mac_cb,
u8 *auto_neg, u16 *speed, u8 *duplex);
-phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb);
-int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, u8 en);
int hns_mac_config_mac_loopback(struct hns_mac_cb *mac_cb,
enum hnae_loop loop, int en);
void hns_mac_update_stats(struct hns_mac_cb *mac_cb);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 1c2ddb25e..afb5daa37 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -7,6 +7,7 @@
* (at your option) any later version.
*/
+#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -24,6 +25,7 @@
#include "hns_dsaf_main.h"
#include "hns_dsaf_ppe.h"
#include "hns_dsaf_rcb.h"
+#include "hns_dsaf_misc.h"
const char *g_dsaf_mode_match[DSAF_MODE_MAX] = {
[DSAF_MODE_DISABLE_2PORT_64VM] = "2port-64vf",
@@ -32,6 +34,13 @@ const char *g_dsaf_mode_match[DSAF_MODE_MAX] = {
[DSAF_MODE_DISABLE_SP] = "single-port",
};
+static const struct acpi_device_id hns_dsaf_acpi_match[] = {
+ { "HISI00B1", 0 },
+ { "HISI00B2", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, hns_dsaf_acpi_match);
+
int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
{
int ret, i;
@@ -42,15 +51,27 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
const char *mode_str;
struct regmap *syscon;
struct resource *res;
- struct device_node *np = dsaf_dev->dev->of_node;
+ struct device_node *np = dsaf_dev->dev->of_node, *np_temp;
struct platform_device *pdev = to_platform_device(dsaf_dev->dev);
- if (of_device_is_compatible(np, "hisilicon,hns-dsaf-v1"))
- dsaf_dev->dsaf_ver = AE_VERSION_1;
- else
- dsaf_dev->dsaf_ver = AE_VERSION_2;
+ if (dev_of_node(dsaf_dev->dev)) {
+ if (of_device_is_compatible(np, "hisilicon,hns-dsaf-v1"))
+ dsaf_dev->dsaf_ver = AE_VERSION_1;
+ else
+ dsaf_dev->dsaf_ver = AE_VERSION_2;
+ } else if (is_acpi_node(dsaf_dev->dev->fwnode)) {
+ if (acpi_dev_found(hns_dsaf_acpi_match[0].id))
+ dsaf_dev->dsaf_ver = AE_VERSION_1;
+ else if (acpi_dev_found(hns_dsaf_acpi_match[1].id))
+ dsaf_dev->dsaf_ver = AE_VERSION_2;
+ else
+ return -ENXIO;
+ } else {
+ dev_err(dsaf_dev->dev, "cannot get cfg data from of or acpi\n");
+ return -ENXIO;
+ }
- ret = of_property_read_string(np, "mode", &mode_str);
+ ret = device_property_read_string(dsaf_dev->dev, "mode", &mode_str);
if (ret) {
dev_err(dsaf_dev->dev, "get dsaf mode fail, ret=%d!\n", ret);
return ret;
@@ -80,32 +101,41 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
else
dsaf_dev->dsaf_tc_mode = HRD_DSAF_4TC_MODE;
- syscon = syscon_node_to_regmap(
- of_parse_phandle(np, "subctrl-syscon", 0));
- if (IS_ERR_OR_NULL(syscon)) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx++);
- if (!res) {
- dev_err(dsaf_dev->dev, "subctrl info is needed!\n");
- return -ENOMEM;
- }
- dsaf_dev->sc_base = devm_ioremap_resource(&pdev->dev, res);
- if (!dsaf_dev->sc_base) {
- dev_err(dsaf_dev->dev, "subctrl can not map!\n");
- return -ENOMEM;
- }
+ if (dev_of_node(dsaf_dev->dev)) {
+ np_temp = of_parse_phandle(np, "subctrl-syscon", 0);
+ syscon = syscon_node_to_regmap(np_temp);
+ of_node_put(np_temp);
+ if (IS_ERR_OR_NULL(syscon)) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM,
+ res_idx++);
+ if (!res) {
+ dev_err(dsaf_dev->dev, "subctrl info is needed!\n");
+ return -ENOMEM;
+ }
- res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx++);
- if (!res) {
- dev_err(dsaf_dev->dev, "serdes-ctrl info is needed!\n");
- return -ENOMEM;
- }
- dsaf_dev->sds_base = devm_ioremap_resource(&pdev->dev, res);
- if (!dsaf_dev->sds_base) {
- dev_err(dsaf_dev->dev, "serdes-ctrl can not map!\n");
- return -ENOMEM;
+ dsaf_dev->sc_base = devm_ioremap_resource(&pdev->dev,
+ res);
+ if (IS_ERR(dsaf_dev->sc_base)) {
+ dev_err(dsaf_dev->dev, "subctrl can not map!\n");
+ return PTR_ERR(dsaf_dev->sc_base);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM,
+ res_idx++);
+ if (!res) {
+ dev_err(dsaf_dev->dev, "serdes-ctrl info is needed!\n");
+ return -ENOMEM;
+ }
+
+ dsaf_dev->sds_base = devm_ioremap_resource(&pdev->dev,
+ res);
+ if (IS_ERR(dsaf_dev->sds_base)) {
+ dev_err(dsaf_dev->dev, "serdes-ctrl can not map!\n");
+ return PTR_ERR(dsaf_dev->sds_base);
+ }
+ } else {
+ dsaf_dev->sub_ctrl = syscon;
}
- } else {
- dsaf_dev->sub_ctrl = syscon;
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ppe-base");
@@ -117,9 +147,9 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
}
}
dsaf_dev->ppe_base = devm_ioremap_resource(&pdev->dev, res);
- if (!dsaf_dev->ppe_base) {
+ if (IS_ERR(dsaf_dev->ppe_base)) {
dev_err(dsaf_dev->dev, "ppe-base resource can not map!\n");
- return -ENOMEM;
+ return PTR_ERR(dsaf_dev->ppe_base);
}
dsaf_dev->ppe_paddr = res->start;
@@ -136,33 +166,34 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
}
}
dsaf_dev->io_base = devm_ioremap_resource(&pdev->dev, res);
- if (!dsaf_dev->io_base) {
+ if (IS_ERR(dsaf_dev->io_base)) {
dev_err(dsaf_dev->dev, "dsaf-base resource can not map!\n");
- return -ENOMEM;
+ return PTR_ERR(dsaf_dev->io_base);
}
}
- ret = of_property_read_u32(np, "desc-num", &desc_num);
+ ret = device_property_read_u32(dsaf_dev->dev, "desc-num", &desc_num);
if (ret < 0 || desc_num < HNS_DSAF_MIN_DESC_CNT ||
desc_num > HNS_DSAF_MAX_DESC_CNT) {
dev_err(dsaf_dev->dev, "get desc-num(%d) fail, ret=%d!\n",
desc_num, ret);
- goto unmap_base_addr;
+ return -EINVAL;
}
dsaf_dev->desc_num = desc_num;
- ret = of_property_read_u32(np, "reset-field-offset", &reset_offset);
+ ret = device_property_read_u32(dsaf_dev->dev, "reset-field-offset",
+ &reset_offset);
if (ret < 0) {
dev_dbg(dsaf_dev->dev,
"get reset-field-offset fail, ret=%d!\r\n", ret);
}
dsaf_dev->reset_offset = reset_offset;
- ret = of_property_read_u32(np, "buf-size", &buf_size);
+ ret = device_property_read_u32(dsaf_dev->dev, "buf-size", &buf_size);
if (ret < 0) {
dev_err(dsaf_dev->dev,
"get buf-size fail, ret=%d!\r\n", ret);
- goto unmap_base_addr;
+ return ret;
}
dsaf_dev->buf_size = buf_size;
@@ -170,41 +201,19 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
if (dsaf_dev->buf_size_type < 0) {
dev_err(dsaf_dev->dev,
"buf_size(%d) is wrong!\n", buf_size);
- goto unmap_base_addr;
+ return -EINVAL;
}
+ dsaf_dev->misc_op = hns_misc_op_get(dsaf_dev);
+ if (!dsaf_dev->misc_op)
+ return -ENOMEM;
+
if (!dma_set_mask_and_coherent(dsaf_dev->dev, DMA_BIT_MASK(64ULL)))
dev_dbg(dsaf_dev->dev, "set mask to 64bit\n");
else
dev_err(dsaf_dev->dev, "set mask to 64bit fail!\n");
return 0;
-
-unmap_base_addr:
- if (dsaf_dev->io_base)
- iounmap(dsaf_dev->io_base);
- if (dsaf_dev->ppe_base)
- iounmap(dsaf_dev->ppe_base);
- if (dsaf_dev->sds_base)
- iounmap(dsaf_dev->sds_base);
- if (dsaf_dev->sc_base)
- iounmap(dsaf_dev->sc_base);
- return ret;
-}
-
-static void hns_dsaf_free_cfg(struct dsaf_device *dsaf_dev)
-{
- if (dsaf_dev->io_base)
- iounmap(dsaf_dev->io_base);
-
- if (dsaf_dev->ppe_base)
- iounmap(dsaf_dev->ppe_base);
-
- if (dsaf_dev->sds_base)
- iounmap(dsaf_dev->sds_base);
-
- if (dsaf_dev->sc_base)
- iounmap(dsaf_dev->sc_base);
}
/**
@@ -508,10 +517,10 @@ static void hns_dsafv2_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev)
o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
dsaf_set_field(o_sbm_bp_cfg,
DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_M,
- DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 110);
+ DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 48);
dsaf_set_field(o_sbm_bp_cfg,
DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M,
- DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 160);
+ DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 80);
dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
/* for no enable pfc mode */
@@ -519,29 +528,39 @@ static void hns_dsafv2_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev)
o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
dsaf_set_field(o_sbm_bp_cfg,
DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_M,
- DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_S, 128);
+ DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_S, 192);
dsaf_set_field(o_sbm_bp_cfg,
DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_M,
- DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S, 192);
+ DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S, 240);
dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
}
/* PPE */
- reg = DSAF_SBM_BP_CFG_2_PPE_REG_0_REG + 0x80 * i;
- o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
- dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_SET_BUF_NUM_M,
- DSAFV2_SBM_CFG2_SET_BUF_NUM_S, 10);
- dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_RESET_BUF_NUM_M,
- DSAFV2_SBM_CFG2_RESET_BUF_NUM_S, 12);
- dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
+ for (i = 0; i < DSAFV2_SBM_PPE_CHN; i++) {
+ reg = DSAF_SBM_BP_CFG_2_PPE_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg,
+ DSAFV2_SBM_CFG2_PPE_SET_BUF_NUM_M,
+ DSAFV2_SBM_CFG2_PPE_SET_BUF_NUM_S, 2);
+ dsaf_set_field(o_sbm_bp_cfg,
+ DSAFV2_SBM_CFG2_PPE_RESET_BUF_NUM_M,
+ DSAFV2_SBM_CFG2_PPE_RESET_BUF_NUM_S, 3);
+ dsaf_set_field(o_sbm_bp_cfg,
+ DSAFV2_SBM_CFG2_PPE_CFG_USEFUL_NUM_M,
+ DSAFV2_SBM_CFG2_PPE_CFG_USEFUL_NUM_S, 52);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
+ }
+
/* RoCEE */
for (i = 0; i < DASFV2_ROCEE_CRD_NUM; i++) {
reg = DSAFV2_SBM_BP_CFG_2_ROCEE_REG_0_REG + 0x80 * i;
o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
- dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_SET_BUF_NUM_M,
- DSAFV2_SBM_CFG2_SET_BUF_NUM_S, 2);
- dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_RESET_BUF_NUM_M,
- DSAFV2_SBM_CFG2_RESET_BUF_NUM_S, 4);
+ dsaf_set_field(o_sbm_bp_cfg,
+ DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_M,
+ DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_S, 2);
+ dsaf_set_field(o_sbm_bp_cfg,
+ DSAFV2_SBM_CFG2_ROCEE_RESET_BUF_NUM_M,
+ DSAFV2_SBM_CFG2_ROCEE_RESET_BUF_NUM_S, 4);
dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
}
}
@@ -852,6 +871,8 @@ static void hns_dsaf_single_line_tbl_cfg(
struct dsaf_device *dsaf_dev,
u32 address, struct dsaf_tbl_line_cfg *ptbl_line)
{
+ spin_lock_bh(&dsaf_dev->tcam_lock);
+
/*Write Addr*/
hns_dsaf_tbl_line_addr_cfg(dsaf_dev, address);
@@ -860,6 +881,8 @@ static void hns_dsaf_single_line_tbl_cfg(
/*Write Plus*/
hns_dsaf_tbl_line_pul(dsaf_dev);
+
+ spin_unlock_bh(&dsaf_dev->tcam_lock);
}
/**
@@ -873,6 +896,8 @@ static void hns_dsaf_tcam_uc_cfg(
struct dsaf_tbl_tcam_data *ptbl_tcam_data,
struct dsaf_tbl_tcam_ucast_cfg *ptbl_tcam_ucast)
{
+ spin_lock_bh(&dsaf_dev->tcam_lock);
+
/*Write Addr*/
hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
/*Write Tcam Data*/
@@ -881,6 +906,8 @@ static void hns_dsaf_tcam_uc_cfg(
hns_dsaf_tbl_tcam_ucast_cfg(dsaf_dev, ptbl_tcam_ucast);
/*Write Plus*/
hns_dsaf_tbl_tcam_data_ucast_pul(dsaf_dev);
+
+ spin_unlock_bh(&dsaf_dev->tcam_lock);
}
/**
@@ -895,6 +922,8 @@ static void hns_dsaf_tcam_mc_cfg(
struct dsaf_tbl_tcam_data *ptbl_tcam_data,
struct dsaf_tbl_tcam_mcast_cfg *ptbl_tcam_mcast)
{
+ spin_lock_bh(&dsaf_dev->tcam_lock);
+
/*Write Addr*/
hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
/*Write Tcam Data*/
@@ -903,6 +932,8 @@ static void hns_dsaf_tcam_mc_cfg(
hns_dsaf_tbl_tcam_mcast_cfg(dsaf_dev, ptbl_tcam_mcast);
/*Write Plus*/
hns_dsaf_tbl_tcam_data_mcast_pul(dsaf_dev);
+
+ spin_unlock_bh(&dsaf_dev->tcam_lock);
}
/**
@@ -912,6 +943,8 @@ static void hns_dsaf_tcam_mc_cfg(
*/
static void hns_dsaf_tcam_mc_invld(struct dsaf_device *dsaf_dev, u32 address)
{
+ spin_lock_bh(&dsaf_dev->tcam_lock);
+
/*Write Addr*/
hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
@@ -924,6 +957,8 @@ static void hns_dsaf_tcam_mc_invld(struct dsaf_device *dsaf_dev, u32 address)
/*Write Plus*/
hns_dsaf_tbl_tcam_mcast_pul(dsaf_dev);
+
+ spin_unlock_bh(&dsaf_dev->tcam_lock);
}
/**
@@ -941,6 +976,8 @@ static void hns_dsaf_tcam_uc_get(
u32 tcam_read_data0;
u32 tcam_read_data4;
+ spin_lock_bh(&dsaf_dev->tcam_lock);
+
/*Write Addr*/
hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
@@ -949,9 +986,9 @@ static void hns_dsaf_tcam_uc_get(
/*read tcam data*/
ptbl_tcam_data->tbl_tcam_data_high
- = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
- ptbl_tcam_data->tbl_tcam_data_low
= dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
+ ptbl_tcam_data->tbl_tcam_data_low
+ = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
/*read tcam mcast*/
tcam_read_data0 = dsaf_read_dev(dsaf_dev,
@@ -973,6 +1010,8 @@ static void hns_dsaf_tcam_uc_get(
DSAF_TBL_UCAST_CFG1_OUT_PORT_S);
ptbl_tcam_ucast->tbl_ucast_dvc
= dsaf_get_bit(tcam_read_data0, DSAF_TBL_UCAST_CFG1_DVC_S);
+
+ spin_unlock_bh(&dsaf_dev->tcam_lock);
}
/**
@@ -989,6 +1028,8 @@ static void hns_dsaf_tcam_mc_get(
{
u32 data_tmp;
+ spin_lock_bh(&dsaf_dev->tcam_lock);
+
/*Write Addr*/
hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
@@ -997,9 +1038,9 @@ static void hns_dsaf_tcam_mc_get(
/*read tcam data*/
ptbl_tcam_data->tbl_tcam_data_high =
- dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
- ptbl_tcam_data->tbl_tcam_data_low =
dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
+ ptbl_tcam_data->tbl_tcam_data_low =
+ dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
/*read tcam mcast*/
ptbl_tcam_mcast->tbl_mcast_port_msk[0] =
@@ -1019,6 +1060,8 @@ static void hns_dsaf_tcam_mc_get(
ptbl_tcam_mcast->tbl_mcast_port_msk[4] =
dsaf_get_field(data_tmp, DSAF_TBL_MCAST_CFG4_VM128_112_M,
DSAF_TBL_MCAST_CFG4_VM128_112_S);
+
+ spin_unlock_bh(&dsaf_dev->tcam_lock);
}
/**
@@ -1080,10 +1123,10 @@ int hns_dsaf_set_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
u32 en)
{
if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
- if (!en)
+ if (!en) {
dev_err(dsaf_dev->dev, "dsafv1 can't close rx_pause!\n");
-
- return -EINVAL;
+ return -EINVAL;
+ }
}
dsaf_set_dev_bit(dsaf_dev, DSAF_PAUSE_CFG_REG + mac_id * 4,
@@ -1295,9 +1338,9 @@ static int hns_dsaf_init_hw(struct dsaf_device *dsaf_dev)
dev_dbg(dsaf_dev->dev,
"hns_dsaf_init_hw begin %s !\n", dsaf_dev->ae_dev.name);
- hns_dsaf_rst(dsaf_dev, 0);
+ dsaf_dev->misc_op->dsaf_reset(dsaf_dev, 0);
mdelay(10);
- hns_dsaf_rst(dsaf_dev, 1);
+ dsaf_dev->misc_op->dsaf_reset(dsaf_dev, 1);
hns_dsaf_comm_init(dsaf_dev);
@@ -1325,7 +1368,7 @@ static int hns_dsaf_init_hw(struct dsaf_device *dsaf_dev)
static void hns_dsaf_remove_hw(struct dsaf_device *dsaf_dev)
{
/*reset*/
- hns_dsaf_rst(dsaf_dev, 0);
+ dsaf_dev->misc_op->dsaf_reset(dsaf_dev, 0);
}
/**
@@ -1343,6 +1386,7 @@ static int hns_dsaf_init(struct dsaf_device *dsaf_dev)
if (HNS_DSAF_IS_DEBUG(dsaf_dev))
return 0;
+ spin_lock_init(&dsaf_dev->tcam_lock);
ret = hns_dsaf_init_hw(dsaf_dev);
if (ret)
return ret;
@@ -2088,11 +2132,24 @@ void hns_dsaf_fix_mac_mode(struct hns_mac_cb *mac_cb)
hns_dsaf_port_work_rate_cfg(dsaf_dev, mac_id, mode);
}
+static u32 hns_dsaf_get_inode_prio_reg(int index)
+{
+ int base_index, offset;
+ u32 base_addr = DSAF_INODE_IN_PRIO_PAUSE_BASE_REG;
+
+ base_index = (index + 1) / DSAF_REG_PER_ZONE;
+ offset = (index + 1) % DSAF_REG_PER_ZONE;
+
+ return base_addr + DSAF_INODE_IN_PRIO_PAUSE_BASE_OFFSET * base_index +
+ DSAF_INODE_IN_PRIO_PAUSE_OFFSET * offset;
+}
+
void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num)
{
struct dsaf_hw_stats *hw_stats
= &dsaf_dev->hw_stats[node_num];
bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver);
+ int i;
u32 reg_tmp;
hw_stats->pad_drop += dsaf_read_dev(dsaf_dev,
@@ -2127,6 +2184,18 @@ void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num)
hw_stats->stp_drop += dsaf_read_dev(dsaf_dev,
DSAF_INODE_IN_DATA_STP_DISC_0_REG + 0x80 * (u64)node_num);
+ /* pfc pause frame statistics stored in dsaf inode*/
+ if ((node_num < DSAF_SERVICE_NW_NUM) && !is_ver1) {
+ for (i = 0; i < DSAF_PRIO_NR; i++) {
+ reg_tmp = hns_dsaf_get_inode_prio_reg(i);
+ hw_stats->rx_pfc[i] += dsaf_read_dev(dsaf_dev,
+ reg_tmp + 0x4 * (u64)node_num);
+ hw_stats->tx_pfc[i] += dsaf_read_dev(dsaf_dev,
+ DSAF_XOD_XGE_PFC_PRIO_CNT_BASE_REG +
+ DSAF_XOD_XGE_PFC_PRIO_CNT_OFFSET * i +
+ 0xF0 * (u64)node_num);
+ }
+ }
hw_stats->tx_pkts += dsaf_read_dev(dsaf_dev,
DSAF_XOD_RCVPKT_CNT_0_REG + 0x90 * (u64)node_num);
}
@@ -2464,38 +2533,53 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data)
p[i] = 0xdddddddd;
}
-static char *hns_dsaf_get_node_stats_strings(char *data, int node)
+static char *hns_dsaf_get_node_stats_strings(char *data, int node,
+ struct dsaf_device *dsaf_dev)
{
char *buff = data;
+ int i;
+ bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver);
snprintf(buff, ETH_GSTRING_LEN, "innod%d_pad_drop_pkts", node);
- buff = buff + ETH_GSTRING_LEN;
+ buff += ETH_GSTRING_LEN;
snprintf(buff, ETH_GSTRING_LEN, "innod%d_manage_pkts", node);
- buff = buff + ETH_GSTRING_LEN;
+ buff += ETH_GSTRING_LEN;
snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pkts", node);
- buff = buff + ETH_GSTRING_LEN;
+ buff += ETH_GSTRING_LEN;
snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pkt_id", node);
- buff = buff + ETH_GSTRING_LEN;
+ buff += ETH_GSTRING_LEN;
snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pause_frame", node);
- buff = buff + ETH_GSTRING_LEN;
+ buff += ETH_GSTRING_LEN;
snprintf(buff, ETH_GSTRING_LEN, "innod%d_release_buf_num", node);
- buff = buff + ETH_GSTRING_LEN;
+ buff += ETH_GSTRING_LEN;
snprintf(buff, ETH_GSTRING_LEN, "innod%d_sbm_drop_pkts", node);
- buff = buff + ETH_GSTRING_LEN;
+ buff += ETH_GSTRING_LEN;
snprintf(buff, ETH_GSTRING_LEN, "innod%d_crc_false_pkts", node);
- buff = buff + ETH_GSTRING_LEN;
+ buff += ETH_GSTRING_LEN;
snprintf(buff, ETH_GSTRING_LEN, "innod%d_bp_drop_pkts", node);
- buff = buff + ETH_GSTRING_LEN;
+ buff += ETH_GSTRING_LEN;
snprintf(buff, ETH_GSTRING_LEN, "innod%d_lookup_rslt_drop_pkts", node);
- buff = buff + ETH_GSTRING_LEN;
+ buff += ETH_GSTRING_LEN;
snprintf(buff, ETH_GSTRING_LEN, "innod%d_local_rslt_fail_pkts", node);
- buff = buff + ETH_GSTRING_LEN;
+ buff += ETH_GSTRING_LEN;
snprintf(buff, ETH_GSTRING_LEN, "innod%d_vlan_drop_pkts", node);
- buff = buff + ETH_GSTRING_LEN;
+ buff += ETH_GSTRING_LEN;
snprintf(buff, ETH_GSTRING_LEN, "innod%d_stp_drop_pkts", node);
- buff = buff + ETH_GSTRING_LEN;
+ buff += ETH_GSTRING_LEN;
+ if (node < DSAF_SERVICE_NW_NUM && !is_ver1) {
+ for (i = 0; i < DSAF_PRIO_NR; i++) {
+ snprintf(buff + 0 * ETH_GSTRING_LEN * DSAF_PRIO_NR,
+ ETH_GSTRING_LEN, "inod%d_pfc_prio%d_pkts",
+ node, i);
+ snprintf(buff + 1 * ETH_GSTRING_LEN * DSAF_PRIO_NR,
+ ETH_GSTRING_LEN, "onod%d_pfc_prio%d_pkts",
+ node, i);
+ buff += ETH_GSTRING_LEN;
+ }
+ buff += 1 * DSAF_PRIO_NR * ETH_GSTRING_LEN;
+ }
snprintf(buff, ETH_GSTRING_LEN, "onnod%d_tx_pkts", node);
- buff = buff + ETH_GSTRING_LEN;
+ buff += ETH_GSTRING_LEN;
return buff;
}
@@ -2504,7 +2588,9 @@ static u64 *hns_dsaf_get_node_stats(struct dsaf_device *ddev, u64 *data,
int node_num)
{
u64 *p = data;
+ int i;
struct dsaf_hw_stats *hw_stats = &ddev->hw_stats[node_num];
+ bool is_ver1 = AE_IS_VER1(ddev->dsaf_ver);
p[0] = hw_stats->pad_drop;
p[1] = hw_stats->man_pkts;
@@ -2519,8 +2605,16 @@ static u64 *hns_dsaf_get_node_stats(struct dsaf_device *ddev, u64 *data,
p[10] = hw_stats->local_addr_false;
p[11] = hw_stats->vlan_drop;
p[12] = hw_stats->stp_drop;
- p[13] = hw_stats->tx_pkts;
+ if (node_num < DSAF_SERVICE_NW_NUM && !is_ver1) {
+ for (i = 0; i < DSAF_PRIO_NR; i++) {
+ p[13 + i + 0 * DSAF_PRIO_NR] = hw_stats->rx_pfc[i];
+ p[13 + i + 1 * DSAF_PRIO_NR] = hw_stats->tx_pfc[i];
+ }
+ p[29] = hw_stats->tx_pkts;
+ return &p[30];
+ }
+ p[13] = hw_stats->tx_pkts;
return &p[14];
}
@@ -2548,11 +2642,16 @@ void hns_dsaf_get_stats(struct dsaf_device *ddev, u64 *data, int port)
*@stringset: type of values in data
*return dsaf string name count
*/
-int hns_dsaf_get_sset_count(int stringset)
+int hns_dsaf_get_sset_count(struct dsaf_device *dsaf_dev, int stringset)
{
- if (stringset == ETH_SS_STATS)
- return DSAF_STATIC_NUM;
+ bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver);
+ if (stringset == ETH_SS_STATS) {
+ if (is_ver1)
+ return DSAF_STATIC_NUM;
+ else
+ return DSAF_V2_STATIC_NUM;
+ }
return 0;
}
@@ -2562,7 +2661,8 @@ int hns_dsaf_get_sset_count(int stringset)
*@data:strings name value
*@port:port index
*/
-void hns_dsaf_get_strings(int stringset, u8 *data, int port)
+void hns_dsaf_get_strings(int stringset, u8 *data, int port,
+ struct dsaf_device *dsaf_dev)
{
char *buff = (char *)data;
int node = port;
@@ -2571,11 +2671,11 @@ void hns_dsaf_get_strings(int stringset, u8 *data, int port)
return;
/* for ge/xge node info */
- buff = hns_dsaf_get_node_stats_strings(buff, node);
+ buff = hns_dsaf_get_node_stats_strings(buff, node, dsaf_dev);
/* for ppe node info */
node = port + DSAF_PPE_INODE_BASE;
- (void)hns_dsaf_get_node_stats_strings(buff, node);
+ (void)hns_dsaf_get_node_stats_strings(buff, node, dsaf_dev);
}
/**
@@ -2611,7 +2711,7 @@ static int hns_dsaf_probe(struct platform_device *pdev)
ret = hns_dsaf_init(dsaf_dev);
if (ret)
- goto free_cfg;
+ goto free_dev;
ret = hns_mac_init(dsaf_dev);
if (ret)
@@ -2636,9 +2736,6 @@ uninit_mac:
uninit_dsaf:
hns_dsaf_free(dsaf_dev);
-free_cfg:
- hns_dsaf_free_cfg(dsaf_dev);
-
free_dev:
hns_dsaf_free_dev(dsaf_dev);
@@ -2661,8 +2758,6 @@ static int hns_dsaf_remove(struct platform_device *pdev)
hns_dsaf_free(dsaf_dev);
- hns_dsaf_free_cfg(dsaf_dev);
-
hns_dsaf_free_dev(dsaf_dev);
return 0;
@@ -2680,6 +2775,7 @@ static struct platform_driver g_dsaf_driver = {
.driver = {
.name = DSAF_DRV_NAME,
.of_match_table = g_dsaf_match,
+ .acpi_match_table = hns_dsaf_acpi_match,
},
};
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index f0502ba0a..1daf018d9 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -39,6 +39,9 @@ struct hns_mac_cb;
#define DSAF_DUMP_REGS_NUM 504
#define DSAF_STATIC_NUM 28
+#define DSAF_V2_STATIC_NUM 44
+#define DSAF_PRIO_NR 8
+#define DSAF_REG_PER_ZONE 3
#define DSAF_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
#define HNS_DSAF_IS_DEBUG(dev) (dev->dsaf_mode == DSAF_MODE_DISABLE_SP)
@@ -176,6 +179,8 @@ struct dsaf_hw_stats {
u64 local_addr_false;
u64 vlan_drop;
u64 stp_drop;
+ u64 rx_pfc[DSAF_PRIO_NR];
+ u64 tx_pfc[DSAF_PRIO_NR];
u64 tx_pkts;
};
@@ -268,6 +273,27 @@ struct dsaf_int_stat {
};
+struct dsaf_misc_op {
+ void (*cpld_set_led)(struct hns_mac_cb *mac_cb, int link_status,
+ u16 speed, int data);
+ void (*cpld_reset_led)(struct hns_mac_cb *mac_cb);
+ int (*cpld_set_led_id)(struct hns_mac_cb *mac_cb,
+ enum hnae_led_state status);
+ /* reset seris function, it will be reset if the dereseet is 0 */
+ void (*dsaf_reset)(struct dsaf_device *dsaf_dev, bool dereset);
+ void (*xge_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
+ void (*xge_core_srst)(struct dsaf_device *dsaf_dev, u32 port,
+ bool dereset);
+ void (*ge_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
+ void (*ppe_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
+ void (*ppe_comm_srst)(struct dsaf_device *dsaf_dev, bool dereset);
+
+ phy_interface_t (*get_phy_if)(struct hns_mac_cb *mac_cb);
+ int (*get_sfp_prsnt)(struct hns_mac_cb *mac_cb, int *sfp_prsnt);
+
+ int (*cfg_serdes_loopback)(struct hns_mac_cb *mac_cb, bool en);
+};
+
/* Dsaf device struct define ,and mac -> dsaf */
struct dsaf_device {
struct device *dev;
@@ -292,9 +318,12 @@ struct dsaf_device {
struct ppe_common_cb *ppe_common[DSAF_COMM_DEV_NUM];
struct rcb_common_cb *rcb_common[DSAF_COMM_DEV_NUM];
struct hns_mac_cb *mac_cb[DSAF_MAX_PORT_NUM];
+ struct dsaf_misc_op *misc_op;
struct dsaf_hw_stats hw_stats[DSAF_NODE_NUM];
struct dsaf_int_stat int_stat;
+ /* make sure tcam table config spinlock */
+ spinlock_t tcam_lock;
};
static inline void *hns_dsaf_dev_priv(const struct dsaf_device *dsaf_dev)
@@ -388,27 +417,17 @@ int hns_dsaf_get_mac_entry_by_index(
u16 entry_index,
struct dsaf_drv_mac_multi_dest_entry *mac_entry);
-void hns_dsaf_rst(struct dsaf_device *dsaf_dev, u32 val);
-
-void hns_ppe_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val);
-
-void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val);
-
void hns_dsaf_fix_mac_mode(struct hns_mac_cb *mac_cb);
int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev);
void hns_dsaf_ae_uninit(struct dsaf_device *dsaf_dev);
-void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val);
-void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val);
-void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev,
- u32 port, u32 val);
-
void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 inode_num);
-int hns_dsaf_get_sset_count(int stringset);
+int hns_dsaf_get_sset_count(struct dsaf_device *dsaf_dev, int stringset);
void hns_dsaf_get_stats(struct dsaf_device *ddev, u64 *data, int port);
-void hns_dsaf_get_strings(int stringset, u8 *data, int port);
+void hns_dsaf_get_strings(int stringset, u8 *data, int port,
+ struct dsaf_device *dsaf_dev);
void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data);
int hns_dsaf_get_regs_count(void);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index a837bb9e3..611b67b6f 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -12,6 +12,27 @@
#include "hns_dsaf_ppe.h"
#include "hns_dsaf_reg.h"
+enum _dsm_op_index {
+ HNS_OP_RESET_FUNC = 0x1,
+ HNS_OP_SERDES_LP_FUNC = 0x2,
+ HNS_OP_LED_SET_FUNC = 0x3,
+ HNS_OP_GET_PORT_TYPE_FUNC = 0x4,
+ HNS_OP_GET_SFP_STAT_FUNC = 0x5,
+};
+
+enum _dsm_rst_type {
+ HNS_DSAF_RESET_FUNC = 0x1,
+ HNS_PPE_RESET_FUNC = 0x2,
+ HNS_XGE_CORE_RESET_FUNC = 0x3,
+ HNS_XGE_RESET_FUNC = 0x4,
+ HNS_GE_RESET_FUNC = 0x5,
+};
+
+const u8 hns_dsaf_acpi_dsm_uuid[] = {
+ 0x1A, 0xAA, 0x85, 0x1A, 0x93, 0xE2, 0x5E, 0x41,
+ 0x8E, 0x28, 0x8D, 0x69, 0x0A, 0x0F, 0x82, 0x0A
+};
+
static void dsaf_write_sub(struct dsaf_device *dsaf_dev, u32 reg, u32 val)
{
if (dsaf_dev->sub_ctrl)
@@ -32,8 +53,8 @@ static u32 dsaf_read_sub(struct dsaf_device *dsaf_dev, u32 reg)
return ret;
}
-void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
- u16 speed, int data)
+static void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
+ u16 speed, int data)
{
int speed_reg = 0;
u8 value;
@@ -65,13 +86,14 @@ void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
mac_cb->cpld_led_value = value;
}
} else {
- dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg,
- CPLD_LED_DEFAULT_VALUE);
- mac_cb->cpld_led_value = CPLD_LED_DEFAULT_VALUE;
+ value = (mac_cb->cpld_led_value) & (0x1 << DSAF_LED_ANCHOR_B);
+ dsaf_write_syscon(mac_cb->cpld_ctrl,
+ mac_cb->cpld_ctrl_reg, value);
+ mac_cb->cpld_led_value = value;
}
}
-void cpld_led_reset(struct hns_mac_cb *mac_cb)
+static void cpld_led_reset(struct hns_mac_cb *mac_cb)
{
if (!mac_cb || !mac_cb->cpld_ctrl)
return;
@@ -81,8 +103,8 @@ void cpld_led_reset(struct hns_mac_cb *mac_cb)
mac_cb->cpld_led_value = CPLD_LED_DEFAULT_VALUE;
}
-int cpld_set_led_id(struct hns_mac_cb *mac_cb,
- enum hnae_led_state status)
+static int cpld_set_led_id(struct hns_mac_cb *mac_cb,
+ enum hnae_led_state status)
{
switch (status) {
case HNAE_LED_ACTIVE:
@@ -93,7 +115,7 @@ int cpld_set_led_id(struct hns_mac_cb *mac_cb,
CPLD_LED_ON_VALUE);
dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg,
mac_cb->cpld_led_value);
- return 2;
+ break;
case HNAE_LED_INACTIVE:
dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
CPLD_LED_DEFAULT_VALUE);
@@ -101,7 +123,8 @@ int cpld_set_led_id(struct hns_mac_cb *mac_cb,
mac_cb->cpld_led_value);
break;
default:
- break;
+ dev_err(mac_cb->dev, "invalid led state: %d!", status);
+ return -EINVAL;
}
return 0;
@@ -109,12 +132,40 @@ int cpld_set_led_id(struct hns_mac_cb *mac_cb,
#define RESET_REQ_OR_DREQ 1
-void hns_dsaf_rst(struct dsaf_device *dsaf_dev, u32 val)
+static void hns_dsaf_acpi_srst_by_port(struct dsaf_device *dsaf_dev, u8 op_type,
+ u32 port_type, u32 port, u32 val)
+{
+ union acpi_object *obj;
+ union acpi_object obj_args[3], argv4;
+
+ obj_args[0].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[0].integer.value = port_type;
+ obj_args[1].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[1].integer.value = port;
+ obj_args[2].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[2].integer.value = val;
+
+ argv4.type = ACPI_TYPE_PACKAGE;
+ argv4.package.count = 3;
+ argv4.package.elements = obj_args;
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(dsaf_dev->dev),
+ hns_dsaf_acpi_dsm_uuid, 0, op_type, &argv4);
+ if (!obj) {
+ dev_warn(dsaf_dev->dev, "reset port_type%d port%d fail!",
+ port_type, port);
+ return;
+ }
+
+ ACPI_FREE(obj);
+}
+
+static void hns_dsaf_rst(struct dsaf_device *dsaf_dev, bool dereset)
{
u32 xbar_reg_addr;
u32 nt_reg_addr;
- if (!val) {
+ if (!dereset) {
xbar_reg_addr = DSAF_SUB_SC_XBAR_RESET_REQ_REG;
nt_reg_addr = DSAF_SUB_SC_NT_RESET_REQ_REG;
} else {
@@ -126,7 +177,15 @@ void hns_dsaf_rst(struct dsaf_device *dsaf_dev, u32 val)
dsaf_write_sub(dsaf_dev, nt_reg_addr, RESET_REQ_OR_DREQ);
}
-void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
+static void hns_dsaf_rst_acpi(struct dsaf_device *dsaf_dev, bool dereset)
+{
+ hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
+ HNS_DSAF_RESET_FUNC,
+ 0, dereset);
+}
+
+static void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port,
+ bool dereset)
{
u32 reg_val = 0;
u32 reg_addr;
@@ -137,7 +196,7 @@ void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
reg_val |= RESET_REQ_OR_DREQ;
reg_val |= 0x2082082 << dsaf_dev->mac_cb[port]->port_rst_off;
- if (val == 0)
+ if (!dereset)
reg_addr = DSAF_SUB_SC_XGE_RESET_REQ_REG;
else
reg_addr = DSAF_SUB_SC_XGE_RESET_DREQ_REG;
@@ -145,8 +204,15 @@ void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
dsaf_write_sub(dsaf_dev, reg_addr, reg_val);
}
-void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev,
- u32 port, u32 val)
+static void hns_dsaf_xge_srst_by_port_acpi(struct dsaf_device *dsaf_dev,
+ u32 port, bool dereset)
+{
+ hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
+ HNS_XGE_RESET_FUNC, port, dereset);
+}
+
+static void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev,
+ u32 port, bool dereset)
{
u32 reg_val = 0;
u32 reg_addr;
@@ -157,7 +223,7 @@ void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev,
reg_val |= XGMAC_TRX_CORE_SRST_M
<< dsaf_dev->mac_cb[port]->port_rst_off;
- if (val == 0)
+ if (!dereset)
reg_addr = DSAF_SUB_SC_XGE_RESET_REQ_REG;
else
reg_addr = DSAF_SUB_SC_XGE_RESET_DREQ_REG;
@@ -165,7 +231,16 @@ void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev,
dsaf_write_sub(dsaf_dev, reg_addr, reg_val);
}
-void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
+static void
+hns_dsaf_xge_core_srst_by_port_acpi(struct dsaf_device *dsaf_dev,
+ u32 port, bool dereset)
+{
+ hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
+ HNS_XGE_CORE_RESET_FUNC, port, dereset);
+}
+
+static void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port,
+ bool dereset)
{
u32 reg_val_1;
u32 reg_val_2;
@@ -178,12 +253,11 @@ void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
reg_val_1 = 0x1 << port;
port_rst_off = dsaf_dev->mac_cb[port]->port_rst_off;
/* there is difference between V1 and V2 in register.*/
- if (AE_IS_VER1(dsaf_dev->dsaf_ver))
- reg_val_2 = 0x1041041 << port_rst_off;
- else
- reg_val_2 = 0x2082082 << port_rst_off;
+ reg_val_2 = AE_IS_VER1(dsaf_dev->dsaf_ver) ?
+ 0x1041041 : 0x2082082;
+ reg_val_2 <<= port_rst_off;
- if (val == 0) {
+ if (!dereset) {
dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ1_REG,
reg_val_1);
@@ -197,10 +271,13 @@ void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
reg_val_1);
}
} else {
- reg_val_1 = 0x15540 << dsaf_dev->reset_offset;
- reg_val_2 = 0x100 << dsaf_dev->reset_offset;
+ reg_val_1 = 0x15540;
+ reg_val_2 = AE_IS_VER1(dsaf_dev->dsaf_ver) ? 0x100 : 0x40;
+
+ reg_val_1 <<= dsaf_dev->reset_offset;
+ reg_val_2 <<= dsaf_dev->reset_offset;
- if (val == 0) {
+ if (!dereset) {
dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ1_REG,
reg_val_1);
@@ -216,14 +293,22 @@ void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
}
}
-void hns_ppe_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
+static void hns_dsaf_ge_srst_by_port_acpi(struct dsaf_device *dsaf_dev,
+ u32 port, bool dereset)
+{
+ hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
+ HNS_GE_RESET_FUNC, port, dereset);
+}
+
+static void hns_ppe_srst_by_port(struct dsaf_device *dsaf_dev, u32 port,
+ bool dereset)
{
u32 reg_val = 0;
u32 reg_addr;
reg_val |= RESET_REQ_OR_DREQ << dsaf_dev->mac_cb[port]->port_rst_off;
- if (val == 0)
+ if (!dereset)
reg_addr = DSAF_SUB_SC_PPE_RESET_REQ_REG;
else
reg_addr = DSAF_SUB_SC_PPE_RESET_DREQ_REG;
@@ -231,15 +316,24 @@ void hns_ppe_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
dsaf_write_sub(dsaf_dev, reg_addr, reg_val);
}
-void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val)
+static void
+hns_ppe_srst_by_port_acpi(struct dsaf_device *dsaf_dev, u32 port, bool dereset)
+{
+ hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
+ HNS_PPE_RESET_FUNC, port, dereset);
+}
+
+static void hns_ppe_com_srst(struct dsaf_device *dsaf_dev, bool dereset)
{
- struct dsaf_device *dsaf_dev = ppe_common->dsaf_dev;
u32 reg_val;
u32 reg_addr;
+ if (!(dev_of_node(dsaf_dev->dev)))
+ return;
+
if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
reg_val = RESET_REQ_OR_DREQ;
- if (val == 0)
+ if (!dereset)
reg_addr = DSAF_SUB_SC_RCB_PPE_COM_RESET_REQ_REG;
else
reg_addr = DSAF_SUB_SC_RCB_PPE_COM_RESET_DREQ_REG;
@@ -247,7 +341,7 @@ void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val)
} else {
reg_val = 0x100 << dsaf_dev->reset_offset;
- if (val == 0)
+ if (!dereset)
reg_addr = DSAF_SUB_SC_PPE_RESET_REQ_REG;
else
reg_addr = DSAF_SUB_SC_PPE_RESET_DREQ_REG;
@@ -261,7 +355,7 @@ void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val)
* @mac_cb: mac control block
* retuen phy interface
*/
-phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb)
+static phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb)
{
u32 mode;
u32 reg;
@@ -293,6 +387,36 @@ phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb)
return phy_if;
}
+static phy_interface_t hns_mac_get_phy_if_acpi(struct hns_mac_cb *mac_cb)
+{
+ phy_interface_t phy_if = PHY_INTERFACE_MODE_NA;
+ union acpi_object *obj;
+ union acpi_object obj_args, argv4;
+
+ obj_args.integer.type = ACPI_TYPE_INTEGER;
+ obj_args.integer.value = mac_cb->mac_id;
+
+ argv4.type = ACPI_TYPE_PACKAGE,
+ argv4.package.count = 1,
+ argv4.package.elements = &obj_args,
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
+ hns_dsaf_acpi_dsm_uuid, 0,
+ HNS_OP_GET_PORT_TYPE_FUNC, &argv4);
+
+ if (!obj || obj->type != ACPI_TYPE_INTEGER)
+ return phy_if;
+
+ phy_if = obj->integer.value ?
+ PHY_INTERFACE_MODE_XGMII : PHY_INTERFACE_MODE_SGMII;
+
+ dev_dbg(mac_cb->dev, "mac_id=%d, phy_if=%d\n", mac_cb->mac_id, phy_if);
+
+ ACPI_FREE(obj);
+
+ return phy_if;
+}
+
int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
{
if (!mac_cb->cpld_ctrl)
@@ -309,13 +433,8 @@ int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
* @mac_cb: mac control block
* retuen 0 == success
*/
-int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, u8 en)
+static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en)
{
- /* port 0-3 hilink4 base is serdes_vaddr + 0x00280000
- * port 4-7 hilink3 base is serdes_vaddr + 0x00200000
- */
- u8 *base_addr = (u8 *)mac_cb->serdes_vaddr +
- (mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000);
const u8 lane_id[] = {
0, /* mac 0 -> lane 0 */
1, /* mac 1 -> lane 1 */
@@ -332,7 +451,7 @@ int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, u8 en)
int sfp_prsnt;
int ret = hns_mac_get_sfp_prsnt(mac_cb, &sfp_prsnt);
- if (!mac_cb->phy_node) {
+ if (!mac_cb->phy_dev) {
if (ret)
pr_info("please confirm sfp is present or not\n");
else
@@ -341,13 +460,110 @@ int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, u8 en)
}
if (mac_cb->serdes_ctrl) {
- u32 origin = dsaf_read_syscon(mac_cb->serdes_ctrl, reg_offset);
+ u32 origin;
+
+ if (!AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver)) {
+#define HILINK_ACCESS_SEL_CFG 0x40008
+ /* hilink4 & hilink3 use the same xge training and
+ * xge u adaptor. There is a hilink access sel cfg
+ * register to select which one to be configed
+ */
+ if ((!HNS_DSAF_IS_DEBUG(mac_cb->dsaf_dev)) &&
+ (mac_cb->mac_id <= 3))
+ dsaf_write_syscon(mac_cb->serdes_ctrl,
+ HILINK_ACCESS_SEL_CFG, 0);
+ else
+ dsaf_write_syscon(mac_cb->serdes_ctrl,
+ HILINK_ACCESS_SEL_CFG, 3);
+ }
- dsaf_set_field(origin, 1ull << 10, 10, !!en);
+ origin = dsaf_read_syscon(mac_cb->serdes_ctrl, reg_offset);
+
+ dsaf_set_field(origin, 1ull << 10, 10, en);
dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin);
} else {
- dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, !!en);
+ u8 *base_addr = (u8 *)mac_cb->serdes_vaddr +
+ (mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000);
+ dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, en);
+ }
+
+ return 0;
+}
+
+static int
+hns_mac_config_sds_loopback_acpi(struct hns_mac_cb *mac_cb, bool en)
+{
+ union acpi_object *obj;
+ union acpi_object obj_args[3], argv4;
+
+ obj_args[0].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[0].integer.value = mac_cb->mac_id;
+ obj_args[1].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[1].integer.value = !!en;
+
+ argv4.type = ACPI_TYPE_PACKAGE;
+ argv4.package.count = 2;
+ argv4.package.elements = obj_args;
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dsaf_dev->dev),
+ hns_dsaf_acpi_dsm_uuid, 0,
+ HNS_OP_SERDES_LP_FUNC, &argv4);
+ if (!obj) {
+ dev_warn(mac_cb->dsaf_dev->dev, "set port%d serdes lp fail!",
+ mac_cb->mac_id);
+
+ return -ENOTSUPP;
}
+ ACPI_FREE(obj);
+
return 0;
}
+
+struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
+{
+ struct dsaf_misc_op *misc_op;
+
+ misc_op = devm_kzalloc(dsaf_dev->dev, sizeof(*misc_op), GFP_KERNEL);
+ if (!misc_op)
+ return NULL;
+
+ if (dev_of_node(dsaf_dev->dev)) {
+ misc_op->cpld_set_led = hns_cpld_set_led;
+ misc_op->cpld_reset_led = cpld_led_reset;
+ misc_op->cpld_set_led_id = cpld_set_led_id;
+
+ misc_op->dsaf_reset = hns_dsaf_rst;
+ misc_op->xge_srst = hns_dsaf_xge_srst_by_port;
+ misc_op->xge_core_srst = hns_dsaf_xge_core_srst_by_port;
+ misc_op->ge_srst = hns_dsaf_ge_srst_by_port;
+ misc_op->ppe_srst = hns_ppe_srst_by_port;
+ misc_op->ppe_comm_srst = hns_ppe_com_srst;
+
+ misc_op->get_phy_if = hns_mac_get_phy_if;
+ misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt;
+
+ misc_op->cfg_serdes_loopback = hns_mac_config_sds_loopback;
+ } else if (is_acpi_node(dsaf_dev->dev->fwnode)) {
+ misc_op->cpld_set_led = hns_cpld_set_led;
+ misc_op->cpld_reset_led = cpld_led_reset;
+ misc_op->cpld_set_led_id = cpld_set_led_id;
+
+ misc_op->dsaf_reset = hns_dsaf_rst_acpi;
+ misc_op->xge_srst = hns_dsaf_xge_srst_by_port_acpi;
+ misc_op->xge_core_srst = hns_dsaf_xge_core_srst_by_port_acpi;
+ misc_op->ge_srst = hns_dsaf_ge_srst_by_port_acpi;
+ misc_op->ppe_srst = hns_ppe_srst_by_port_acpi;
+ misc_op->ppe_comm_srst = hns_ppe_com_srst;
+
+ misc_op->get_phy_if = hns_mac_get_phy_if_acpi;
+ misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt;
+
+ misc_op->cfg_serdes_loopback = hns_mac_config_sds_loopback_acpi;
+ } else {
+ devm_kfree(dsaf_dev->dev, (void *)misc_op);
+ misc_op = NULL;
+ }
+
+ return (void *)misc_op;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h
index 419f07aa9..f06bb03d4 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h
@@ -33,11 +33,6 @@
#define DSAF_LED_DATA_B 4
#define DSAF_LED_ANCHOR_B 5
-void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
- u16 speed, int data);
-void cpld_led_reset(struct hns_mac_cb *mac_cb);
-int cpld_set_led_id(struct hns_mac_cb *mac_cb,
- enum hnae_led_state status);
-int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt);
+struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index 8cd151a52..6ea872287 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -112,7 +112,6 @@ void hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index)
static void __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common,
int ppe_idx)
{
-
return ppe_common->dsaf_dev->ppe_base + ppe_idx * PPE_REG_OFFSET;
}
@@ -200,11 +199,12 @@ static void hns_ppe_set_port_mode(struct hns_ppe_cb *ppe_cb,
static int hns_ppe_common_init_hw(struct ppe_common_cb *ppe_common)
{
enum ppe_qid_mode qid_mode;
- enum dsaf_mode dsaf_mode = ppe_common->dsaf_dev->dsaf_mode;
+ struct dsaf_device *dsaf_dev = ppe_common->dsaf_dev;
+ enum dsaf_mode dsaf_mode = dsaf_dev->dsaf_mode;
- hns_ppe_com_srst(ppe_common, 0);
+ dsaf_dev->misc_op->ppe_comm_srst(dsaf_dev, 0);
mdelay(100);
- hns_ppe_com_srst(ppe_common, 1);
+ dsaf_dev->misc_op->ppe_comm_srst(dsaf_dev, 1);
mdelay(100);
if (ppe_common->ppe_mode == PPE_COMMON_MODE_SERVICE) {
@@ -288,9 +288,9 @@ static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb)
/* get default RSS key */
netdev_rss_key_fill(ppe_cb->rss_key, HNS_PPEV2_RSS_KEY_SIZE);
- hns_ppe_srst_by_port(dsaf_dev, port, 0);
+ dsaf_dev->misc_op->ppe_srst(dsaf_dev, port, 0);
mdelay(10);
- hns_ppe_srst_by_port(dsaf_dev, port, 1);
+ dsaf_dev->misc_op->ppe_srst(dsaf_dev, port, 1);
/* clr and msk except irq*/
hns_ppe_exc_irq_en(ppe_cb, 0);
@@ -330,8 +330,10 @@ static void hns_ppe_uninit_hw(struct hns_ppe_cb *ppe_cb)
u32 port;
if (ppe_cb->ppe_common_cb) {
+ struct dsaf_device *dsaf_dev = ppe_cb->ppe_common_cb->dsaf_dev;
+
port = ppe_cb->index;
- hns_ppe_srst_by_port(ppe_cb->ppe_common_cb->dsaf_dev, port, 0);
+ dsaf_dev->misc_op->ppe_srst(dsaf_dev, port, 0);
}
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 4ef6d23d9..ef1107777 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -458,7 +458,6 @@ void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common)
u32 i;
u32 ring_num = rcb_common->ring_num;
int base_irq_idx = hns_rcb_get_base_irq_idx(rcb_common);
- struct device_node *np = rcb_common->dsaf_dev->dev->of_node;
struct platform_device *pdev =
to_platform_device(rcb_common->dsaf_dev->dev);
bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver);
@@ -473,10 +472,10 @@ void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common)
ring_pair_cb->port_id_in_comm =
hns_rcb_get_port_in_comm(rcb_common, i);
ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] =
- is_ver1 ? irq_of_parse_and_map(np, base_irq_idx + i * 2) :
+ is_ver1 ? platform_get_irq(pdev, base_irq_idx + i * 2) :
platform_get_irq(pdev, base_irq_idx + i * 3 + 1);
ring_pair_cb->virq[HNS_RCB_IRQ_IDX_RX] =
- is_ver1 ? irq_of_parse_and_map(np, base_irq_idx + i * 2 + 1) :
+ is_ver1 ? platform_get_irq(pdev, base_irq_idx + i * 2 + 1) :
platform_get_irq(pdev, base_irq_idx + i * 3);
ring_pair_cb->q.phy_base =
RCB_COMM_BASE_TO_RING_BASE(rcb_common->phy_base, i);
@@ -541,7 +540,7 @@ int hns_rcb_set_coalesce_usecs(
}
if (timeout > HNS_RCB_MAX_COALESCED_USECS) {
dev_err(rcb_common->dsaf_dev->dev,
- "error: not support coalesce %dus!\n", timeout);
+ "error: coalesce_usecs setting supports 0~1023us\n");
return -EINVAL;
}
hns_rcb_set_port_timeout(rcb_common, port_idx, timeout);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
index bd54dac82..99b4e1ba0 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
@@ -40,7 +40,7 @@ struct rcb_common_cb;
#define HNS_RCB_DEF_COALESCED_FRAMES 50
#define HNS_RCB_CLK_FREQ_MHZ 350
#define HNS_RCB_MAX_COALESCED_USECS 0x3ff
-#define HNS_RCB_DEF_COALESCED_USECS 3
+#define HNS_RCB_DEF_COALESCED_USECS 50
#define HNS_RCB_COMMON_ENDIAN 1
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index 7c3b5103d..235f74444 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -32,7 +32,7 @@
#define DSAFV2_SBM_NUM 8
#define DSAFV2_SBM_XGE_CHN 6
#define DSAFV2_SBM_PPE_CHN 1
-#define DASFV2_ROCEE_CRD_NUM 8
+#define DASFV2_ROCEE_CRD_NUM 1
#define DSAF_VOQ_NUM DSAF_NODE_NUM
#define DSAF_INODE_NUM DSAF_NODE_NUM
@@ -166,6 +166,9 @@
#define DSAF_INODE_GE_FC_EN_0_REG 0x1B00
#define DSAF_INODE_VC0_IN_PKT_NUM_0_REG 0x1B50
#define DSAF_INODE_VC1_IN_PKT_NUM_0_REG 0x1C00
+#define DSAF_INODE_IN_PRIO_PAUSE_BASE_REG 0x1C00
+#define DSAF_INODE_IN_PRIO_PAUSE_BASE_OFFSET 0x100
+#define DSAF_INODE_IN_PRIO_PAUSE_OFFSET 0x50
#define DSAF_SBM_CFG_REG_0_REG 0x2000
#define DSAF_SBM_BP_CFG_0_XGE_REG_0_REG 0x2004
@@ -175,7 +178,7 @@
#define DSAF_SBM_BP_CFG_2_XGE_REG_0_REG 0x200C
#define DSAF_SBM_BP_CFG_2_PPE_REG_0_REG 0x230C
#define DSAF_SBM_BP_CFG_2_ROCEE_REG_0_REG 0x260C
-#define DSAFV2_SBM_BP_CFG_2_ROCEE_REG_0_REG 0x238C
+#define DSAFV2_SBM_BP_CFG_2_ROCEE_REG_0_REG 0x238C
#define DSAF_SBM_FREE_CNT_0_0_REG 0x2010
#define DSAF_SBM_FREE_CNT_1_0_REG 0x2014
#define DSAF_SBM_BP_CNT_0_0_REG 0x2018
@@ -232,6 +235,8 @@
#define DSAF_XOD_ROCEE_RCVIN0_CNT_0_REG 0x3074
#define DSAF_XOD_ROCEE_RCVIN1_CNT_0_REG 0x3078
#define DSAF_XOD_FIFO_STATUS_0_REG 0x307C
+#define DSAF_XOD_XGE_PFC_PRIO_CNT_BASE_REG 0x3A00
+#define DSAF_XOD_XGE_PFC_PRIO_CNT_OFFSET 0x4
#define DSAF_VOQ_ECC_INVERT_EN_0_REG 0x4004
#define DSAF_VOQ_SRAM_PKT_NUM_0_REG 0x4008
@@ -791,6 +796,18 @@
#define DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S 9
#define DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_M (((1ULL << 9) - 1) << 9)
+#define DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_S 0
+#define DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_M (((1ULL << 8) - 1) << 0)
+#define DSAFV2_SBM_CFG2_ROCEE_RESET_BUF_NUM_S 8
+#define DSAFV2_SBM_CFG2_ROCEE_RESET_BUF_NUM_M (((1ULL << 8) - 1) << 8)
+
+#define DSAFV2_SBM_CFG2_PPE_SET_BUF_NUM_S (0)
+#define DSAFV2_SBM_CFG2_PPE_SET_BUF_NUM_M (((1ULL << 6) - 1) << 0)
+#define DSAFV2_SBM_CFG2_PPE_RESET_BUF_NUM_S (6)
+#define DSAFV2_SBM_CFG2_PPE_RESET_BUF_NUM_M (((1ULL << 6) - 1) << 6)
+#define DSAFV2_SBM_CFG2_PPE_CFG_USEFUL_NUM_S (12)
+#define DSAFV2_SBM_CFG2_PPE_CFG_USEFUL_NUM_M (((1ULL << 6) - 1) << 12)
+
#define DSAF_TBL_TCAM_ADDR_S 0
#define DSAF_TBL_TCAM_ADDR_M ((1ULL << 9) - 1)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
index fd90f3737..8f4f0e8da 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -119,7 +119,7 @@ static void hns_xgmac_enable(void *mac_drv, enum mac_commom_mode mode)
= (struct dsaf_device *)dev_get_drvdata(drv->dev);
u32 port = drv->mac_id;
- hns_dsaf_xge_core_srst_by_port(dsaf_dev, port, 1);
+ dsaf_dev->misc_op->xge_core_srst(dsaf_dev, port, 1);
mdelay(10);
/*enable XGE rX/tX */
@@ -157,7 +157,7 @@ static void hns_xgmac_disable(void *mac_drv, enum mac_commom_mode mode)
}
mdelay(10);
- hns_dsaf_xge_core_srst_by_port(dsaf_dev, port, 0);
+ dsaf_dev->misc_op->xge_core_srst(dsaf_dev, port, 0);
}
/**
@@ -198,9 +198,9 @@ static void hns_xgmac_init(void *mac_drv)
= (struct dsaf_device *)dev_get_drvdata(drv->dev);
u32 port = drv->mac_id;
- hns_dsaf_xge_srst_by_port(dsaf_dev, port, 0);
+ dsaf_dev->misc_op->xge_srst(dsaf_dev, port, 0);
mdelay(100);
- hns_dsaf_xge_srst_by_port(dsaf_dev, port, 1);
+ dsaf_dev->misc_op->xge_srst(dsaf_dev, port, 1);
mdelay(100);
hns_xgmac_exc_irq_en(drv, 0);
@@ -425,7 +425,7 @@ static void hns_xgmac_free(void *mac_drv)
u32 mac_id = drv->mac_id;
- hns_dsaf_xge_srst_by_port(dsaf_dev, mac_id, 0);
+ dsaf_dev->misc_op->xge_srst(dsaf_dev, mac_id, 0);
}
/**
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index e621636e6..d7e1f8c7a 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -132,6 +132,13 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
ring_ptr_move_fw(ring, next_to_use);
}
+static const struct acpi_device_id hns_enet_acpi_match[] = {
+ { "HISI00C1", 0 },
+ { "HISI00C2", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, hns_enet_acpi_match);
+
static void fill_desc(struct hnae_ring *ring, void *priv,
int size, dma_addr_t dma, int frag_end,
int buf_num, enum hns_desc_type type, int mtu)
@@ -593,6 +600,7 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
ring->stats.sw_err_cnt++;
return -ENOMEM;
}
+ skb_reset_mac_header(skb);
prefetchw(skb->data);
length = le16_to_cpu(desc->rx.pkt_len);
@@ -754,16 +762,16 @@ static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data,
recv_pkts = 0, recv_bds = 0, clean_count = 0;
recv:
while (recv_pkts < budget && recv_bds < num) {
- /* reuse or realloc buffers*/
+ /* reuse or realloc buffers */
if (clean_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
hns_nic_alloc_rx_buffers(ring_data, clean_count);
clean_count = 0;
}
- /* poll one pkg*/
+ /* poll one pkt */
err = hns_nic_poll_rx_skb(ring_data, &skb, &bnum);
if (unlikely(!skb)) /* this fault cannot be repaired */
- break;
+ goto out;
recv_bds += bnum;
clean_count += bnum;
@@ -789,6 +797,7 @@ recv:
}
}
+out:
/* make all data has been write before submit */
if (clean_count > 0)
hns_nic_alloc_rx_buffers(ring_data, clean_count);
@@ -983,8 +992,26 @@ static void hns_nic_adjust_link(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
struct hnae_handle *h = priv->ae_handle;
+ int state = 1;
+
+ if (priv->phy) {
+ h->dev->ops->adjust_link(h, ndev->phydev->speed,
+ ndev->phydev->duplex);
+ state = priv->phy->link;
+ }
+ state = state && h->dev->ops->get_status(h);
- h->dev->ops->adjust_link(h, ndev->phydev->speed, ndev->phydev->duplex);
+ if (state != priv->link) {
+ if (state) {
+ netif_carrier_on(ndev);
+ netif_tx_wake_all_queues(ndev);
+ netdev_info(ndev, "link up\n");
+ } else {
+ netif_carrier_off(ndev);
+ netdev_info(ndev, "link down\n");
+ }
+ priv->link = state;
+ }
}
/**
@@ -996,19 +1023,22 @@ static void hns_nic_adjust_link(struct net_device *ndev)
int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
- struct phy_device *phy_dev = NULL;
+ struct phy_device *phy_dev = h->phy_dev;
+ int ret;
- if (!h->phy_node)
+ if (!h->phy_dev)
return 0;
- if (h->phy_if != PHY_INTERFACE_MODE_XGMII)
- phy_dev = of_phy_connect(ndev, h->phy_node,
- hns_nic_adjust_link, 0, h->phy_if);
- else
- phy_dev = of_phy_attach(ndev, h->phy_node, 0, h->phy_if);
+ if (h->phy_if != PHY_INTERFACE_MODE_XGMII) {
+ phy_dev->dev_flags = 0;
- if (unlikely(!phy_dev) || IS_ERR(phy_dev))
- return !phy_dev ? -ENODEV : PTR_ERR(phy_dev);
+ ret = phy_connect_direct(ndev, phy_dev, hns_nic_adjust_link,
+ h->phy_if);
+ } else {
+ ret = phy_attach_direct(ndev, phy_dev, 0, h->phy_if);
+ }
+ if (unlikely(ret))
+ return -ENODEV;
phy_dev->supported &= h->if_support;
phy_dev->advertising = phy_dev->supported;
@@ -1067,13 +1097,8 @@ void hns_nic_update_stats(struct net_device *netdev)
static void hns_init_mac_addr(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
- struct device_node *node = priv->dev->of_node;
- const void *mac_addr_temp;
- mac_addr_temp = of_get_mac_address(node);
- if (mac_addr_temp && is_valid_ether_addr(mac_addr_temp)) {
- memcpy(ndev->dev_addr, mac_addr_temp, ndev->addr_len);
- } else {
+ if (!device_get_mac_address(priv->dev, ndev->dev_addr, ETH_ALEN)) {
eth_hw_addr_random(ndev);
dev_warn(priv->dev, "No valid mac, use random mac %pM",
ndev->dev_addr);
@@ -1176,7 +1201,7 @@ static int hns_nic_net_up(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
struct hnae_handle *h = priv->ae_handle;
- int i, j, k;
+ int i, j;
int ret;
ret = hns_nic_init_irq(priv);
@@ -1191,9 +1216,6 @@ static int hns_nic_net_up(struct net_device *ndev)
goto out_has_some_queues;
}
- for (k = 0; k < h->q_num; k++)
- h->dev->ops->toggle_queue_status(h->qs[k], 1);
-
ret = h->dev->ops->set_mac_addr(h, ndev->dev_addr);
if (ret)
goto out_set_mac_addr_err;
@@ -1213,8 +1235,6 @@ static int hns_nic_net_up(struct net_device *ndev)
out_start_err:
netif_stop_queue(ndev);
out_set_mac_addr_err:
- for (k = 0; k < h->q_num; k++)
- h->dev->ops->toggle_queue_status(h->qs[k], 0);
out_has_some_queues:
for (j = i - 1; j >= 0; j--)
hns_nic_ring_close(ndev, j);
@@ -1421,7 +1441,6 @@ static int hns_nic_set_features(struct net_device *netdev,
netdev_features_t features)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
- struct hnae_handle *h = priv->ae_handle;
switch (priv->enet_ver) {
case AE_VERSION_1:
@@ -1434,11 +1453,9 @@ static int hns_nic_set_features(struct net_device *netdev,
priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
/* The chip only support 7*4096 */
netif_set_gso_max_size(netdev, 7 * 4096);
- h->dev->ops->set_tso_stats(h, 1);
} else {
priv->ops.fill_desc = fill_v2_desc;
priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
- h->dev->ops->set_tso_stats(h, 0);
}
break;
}
@@ -1571,27 +1588,14 @@ static void hns_nic_update_link_status(struct net_device *netdev)
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_handle *h = priv->ae_handle;
- int state = 1;
- if (priv->phy) {
- if (!genphy_update_link(priv->phy))
- state = priv->phy->link;
- else
- state = 0;
- }
- state = state && h->dev->ops->get_status(h);
+ if (h->phy_dev) {
+ if (h->phy_if != PHY_INTERFACE_MODE_XGMII)
+ return;
- if (state != priv->link) {
- if (state) {
- netif_carrier_on(netdev);
- netif_tx_wake_all_queues(netdev);
- netdev_info(netdev, "link up\n");
- } else {
- netif_carrier_off(netdev);
- netdev_info(netdev, "link down\n");
- }
- priv->link = state;
+ (void)genphy_read_status(h->phy_dev);
}
+ hns_nic_adjust_link(netdev);
}
/* for dumping key regs*/
@@ -1627,7 +1631,7 @@ static void hns_nic_dump(struct hns_nic_priv *priv)
}
}
-/* for resetting suntask*/
+/* for resetting subtask */
static void hns_nic_reset_subtask(struct hns_nic_priv *priv)
{
enum hnae_port_type type = priv->ae_handle->port_type;
@@ -1797,11 +1801,14 @@ static void hns_nic_set_priv_ops(struct net_device *netdev)
priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
/* This chip only support 7*4096 */
netif_set_gso_max_size(netdev, 7 * 4096);
- h->dev->ops->set_tso_stats(h, 1);
} else {
priv->ops.fill_desc = fill_v2_desc;
priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
}
+ /* enable tso when init
+ * control tso on/off through TSE bit in bd
+ */
+ h->dev->ops->set_tso_stats(h, 1);
}
}
@@ -1812,7 +1819,7 @@ static int hns_nic_try_get_ae(struct net_device *ndev)
int ret;
h = hnae_get_handle(&priv->netdev->dev,
- priv->ae_node, priv->port_id, NULL);
+ priv->fwnode, priv->port_id, NULL);
if (IS_ERR_OR_NULL(h)) {
ret = -ENODEV;
dev_dbg(priv->dev, "has not handle, register notifier!\n");
@@ -1872,7 +1879,6 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct net_device *ndev;
struct hns_nic_priv *priv;
- struct device_node *node = dev->of_node;
u32 port_id;
int ret;
@@ -1886,22 +1892,49 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
priv->dev = dev;
priv->netdev = ndev;
- if (of_device_is_compatible(node, "hisilicon,hns-nic-v1"))
- priv->enet_ver = AE_VERSION_1;
- else
- priv->enet_ver = AE_VERSION_2;
+ if (dev_of_node(dev)) {
+ struct device_node *ae_node;
- priv->ae_node = (void *)of_parse_phandle(node, "ae-handle", 0);
- if (IS_ERR_OR_NULL(priv->ae_node)) {
- ret = PTR_ERR(priv->ae_node);
- dev_err(dev, "not find ae-handle\n");
- goto out_read_prop_fail;
+ if (of_device_is_compatible(dev->of_node,
+ "hisilicon,hns-nic-v1"))
+ priv->enet_ver = AE_VERSION_1;
+ else
+ priv->enet_ver = AE_VERSION_2;
+
+ ae_node = of_parse_phandle(dev->of_node, "ae-handle", 0);
+ if (IS_ERR_OR_NULL(ae_node)) {
+ ret = PTR_ERR(ae_node);
+ dev_err(dev, "not find ae-handle\n");
+ goto out_read_prop_fail;
+ }
+ priv->fwnode = &ae_node->fwnode;
+ } else if (is_acpi_node(dev->fwnode)) {
+ struct acpi_reference_args args;
+
+ if (acpi_dev_found(hns_enet_acpi_match[0].id))
+ priv->enet_ver = AE_VERSION_1;
+ else if (acpi_dev_found(hns_enet_acpi_match[1].id))
+ priv->enet_ver = AE_VERSION_2;
+ else
+ return -ENXIO;
+
+ /* try to find port-idx-in-ae first */
+ ret = acpi_node_get_property_reference(dev->fwnode,
+ "ae-handle", 0, &args);
+ if (ret) {
+ dev_err(dev, "not find ae-handle\n");
+ goto out_read_prop_fail;
+ }
+ priv->fwnode = acpi_fwnode_handle(args.adev);
+ } else {
+ dev_err(dev, "cannot read cfg data from OF or acpi\n");
+ return -ENXIO;
}
- /* try to find port-idx-in-ae first */
- ret = of_property_read_u32(node, "port-idx-in-ae", &port_id);
+
+ ret = device_property_read_u32(dev, "port-idx-in-ae", &port_id);
if (ret) {
/* only for old code compatible */
- ret = of_property_read_u32(node, "port-id", &port_id);
+ ret = device_property_read_u32(dev, "port-id", &port_id);
if (ret)
goto out_read_prop_fail;
/* for old dts, we need to caculate the port offset */
@@ -1940,7 +1973,7 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
dev_dbg(dev, "set mask to 64bit\n");
else
- dev_err(dev, "set mask to 32bit fail!\n");
+ dev_err(dev, "set mask to 64bit fail!\n");
/* carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(ndev);
@@ -2014,6 +2047,7 @@ static struct platform_driver hns_nic_dev_driver = {
.driver = {
.name = "hns-nic",
.of_match_table = hns_enet_of_match,
+ .acpi_match_table = ACPI_PTR(hns_enet_acpi_match),
},
.probe = hns_nic_dev_probe,
.remove = hns_nic_dev_remove,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
index 337efa582..44bb3015e 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
@@ -54,7 +54,7 @@ struct hns_nic_ops {
};
struct hns_nic_priv {
- const struct device_node *ae_node;
+ const struct fwnode_handle *fwnode;
u32 enet_ver;
u32 port_id;
int phy_mode;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 67a648c7d..ab33487a5 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -49,7 +49,7 @@ static u32 hns_nic_get_link(struct net_device *net_dev)
h = priv->ae_handle;
if (priv->phy) {
- if (!genphy_update_link(priv->phy))
+ if (!genphy_read_status(priv->phy))
link_stat = priv->phy->link;
else
link_stat = 0;
@@ -165,13 +165,21 @@ static int hns_nic_get_settings(struct net_device *net_dev,
cmd->advertising |= ADVERTISED_10000baseKR_Full;
}
- if (h->port_type == HNAE_PORT_SERVICE) {
+ switch (h->media_type) {
+ case HNAE_MEDIA_TYPE_FIBER:
cmd->port = PORT_FIBRE;
- cmd->supported |= SUPPORTED_Pause;
- } else {
+ break;
+ case HNAE_MEDIA_TYPE_COPPER:
cmd->port = PORT_TP;
+ break;
+ case HNAE_MEDIA_TYPE_UNKNOWN:
+ default:
+ break;
}
+ if (!(AE_IS_VER1(priv->enet_ver) && h->port_type == HNAE_PORT_DEBUG))
+ cmd->supported |= SUPPORTED_Pause;
+
cmd->transceiver = XCVR_EXTERNAL;
cmd->mdio_support = (ETH_MDIO_SUPPORTS_C45 | ETH_MDIO_SUPPORTS_C22);
hns_get_mdix_mode(net_dev, cmd);
@@ -242,6 +250,7 @@ static const char hns_nic_test_strs[][ETH_GSTRING_LEN] = {
static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en)
{
#define COPPER_CONTROL_REG 0
+#define PHY_POWER_DOWN BIT(11)
#define PHY_LOOP_BACK BIT(14)
u16 val = 0;
@@ -252,33 +261,40 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en)
/* speed : 1000M */
phy_write(phy_dev, HNS_PHY_PAGE_REG, 2);
phy_write(phy_dev, 21, 0x1046);
+
+ phy_write(phy_dev, HNS_PHY_PAGE_REG, 0);
/* Force Master */
phy_write(phy_dev, 9, 0x1F00);
+
/* Soft-reset */
phy_write(phy_dev, 0, 0x9140);
/* If autoneg disabled,two soft-reset operations */
phy_write(phy_dev, 0, 0x9140);
- phy_write(phy_dev, 22, 0xFA);
+
+ phy_write(phy_dev, HNS_PHY_PAGE_REG, 0xFA);
/* Default is 0x0400 */
phy_write(phy_dev, 1, 0x418);
/* Force 1000M Link, Default is 0x0200 */
phy_write(phy_dev, 7, 0x20C);
- phy_write(phy_dev, 22, 0);
+ phy_write(phy_dev, HNS_PHY_PAGE_REG, 0);
- /* Enable MAC loop-back */
+ /* Enable PHY loop-back */
val = phy_read(phy_dev, COPPER_CONTROL_REG);
val |= PHY_LOOP_BACK;
+ val &= ~PHY_POWER_DOWN;
phy_write(phy_dev, COPPER_CONTROL_REG, val);
} else {
- phy_write(phy_dev, 22, 0xFA);
+ phy_write(phy_dev, HNS_PHY_PAGE_REG, 0xFA);
phy_write(phy_dev, 1, 0x400);
phy_write(phy_dev, 7, 0x200);
- phy_write(phy_dev, 22, 0);
+ phy_write(phy_dev, HNS_PHY_PAGE_REG, 0);
+ phy_write(phy_dev, 9, 0xF00);
val = phy_read(phy_dev, COPPER_CONTROL_REG);
val &= ~PHY_LOOP_BACK;
+ val |= PHY_POWER_DOWN;
phy_write(phy_dev, COPPER_CONTROL_REG, val);
}
return 0;
@@ -339,28 +355,16 @@ static int __lb_up(struct net_device *ndev,
hns_nic_net_reset(ndev);
- if (priv->phy) {
- phy_disconnect(priv->phy);
- msleep(100);
-
- ret = hns_nic_init_phy(ndev, h);
- if (ret)
- return ret;
- }
-
ret = __lb_setup(ndev, loop_mode);
if (ret)
return ret;
- msleep(100);
+ msleep(200);
ret = h->dev->ops->start ? h->dev->ops->start(h) : 0;
if (ret)
return ret;
- if (priv->phy)
- phy_start(priv->phy);
-
/* link adjust duplex*/
if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII)
speed = 1000;
@@ -561,9 +565,6 @@ static int __lb_down(struct net_device *ndev)
__func__,
ret);
- if (priv->phy)
- phy_stop(priv->phy);
-
if (h->dev->ops->stop)
h->dev->ops->stop(h);
@@ -596,7 +597,7 @@ static void hns_nic_self_test(struct net_device *ndev,
st_param[1][0] = MAC_INTERNALLOOP_SERDES;
st_param[1][1] = 1; /*serdes must exist*/
st_param[2][0] = MAC_INTERNALLOOP_PHY; /* only supporte phy node*/
- st_param[2][1] = ((!!(priv->ae_handle->phy_node)) &&
+ st_param[2][1] = ((!!(priv->ae_handle->phy_dev)) &&
(priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII));
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
@@ -758,6 +759,16 @@ static int hns_get_coalesce(struct net_device *net_dev,
&ec->tx_max_coalesced_frames,
&ec->rx_max_coalesced_frames);
+ ops->get_coalesce_range(priv->ae_handle,
+ &ec->tx_max_coalesced_frames_low,
+ &ec->rx_max_coalesced_frames_low,
+ &ec->tx_max_coalesced_frames_high,
+ &ec->rx_max_coalesced_frames_high,
+ &ec->tx_coalesce_usecs_low,
+ &ec->rx_coalesce_usecs_low,
+ &ec->tx_coalesce_usecs_high,
+ &ec->rx_coalesce_usecs_high);
+
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index 765ddb3dc..33f4c483a 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -7,6 +7,7 @@
* (at your option) any later version.
*/
+#include <linux/acpi.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/init.h>
@@ -36,9 +37,19 @@
#define MDIO_TIMEOUT 1000000
+struct hns_mdio_sc_reg {
+ u16 mdio_clk_en;
+ u16 mdio_clk_dis;
+ u16 mdio_reset_req;
+ u16 mdio_reset_dreq;
+ u16 mdio_clk_st;
+ u16 mdio_reset_st;
+};
+
struct hns_mdio_device {
void *vbase; /* mdio reg base address */
struct regmap *subctrl_vbase;
+ struct hns_mdio_sc_reg sc_reg;
};
/* mdio reg */
@@ -92,7 +103,6 @@ enum mdio_c45_op_seq {
#define MDIO_SC_CLK_DIS 0x33C
#define MDIO_SC_RESET_REQ 0xA38
#define MDIO_SC_RESET_DREQ 0xA3C
-#define MDIO_SC_CTRL 0x2010
#define MDIO_SC_CLK_ST 0x531C
#define MDIO_SC_RESET_ST 0x5A1C
@@ -352,69 +362,68 @@ static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
static int hns_mdio_reset(struct mii_bus *bus)
{
struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
+ const struct hns_mdio_sc_reg *sc_reg;
int ret;
- if (!mdio_dev->subctrl_vbase) {
- dev_err(&bus->dev, "mdio sys ctl reg has not maped\n");
- return -ENODEV;
- }
-
- /*1. reset req, and read reset st check*/
- ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_RESET_REQ, 0x1,
- MDIO_SC_RESET_ST, 0x1,
- MDIO_CHECK_SET_ST);
- if (ret) {
- dev_err(&bus->dev, "MDIO reset fail\n");
- return ret;
- }
+ if (dev_of_node(bus->parent)) {
+ if (!mdio_dev->subctrl_vbase) {
+ dev_err(&bus->dev, "mdio sys ctl reg has not maped\n");
+ return -ENODEV;
+ }
- /*2. dis clk, and read clk st check*/
- ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_CLK_DIS,
- 0x1, MDIO_SC_CLK_ST, 0x1,
- MDIO_CHECK_CLR_ST);
- if (ret) {
- dev_err(&bus->dev, "MDIO dis clk fail\n");
- return ret;
- }
+ sc_reg = &mdio_dev->sc_reg;
+ /* 1. reset req, and read reset st check */
+ ret = mdio_sc_cfg_reg_write(mdio_dev, sc_reg->mdio_reset_req,
+ 0x1, sc_reg->mdio_reset_st, 0x1,
+ MDIO_CHECK_SET_ST);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO reset fail\n");
+ return ret;
+ }
- /*3. reset dreq, and read reset st check*/
- ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_RESET_DREQ, 0x1,
- MDIO_SC_RESET_ST, 0x1,
- MDIO_CHECK_CLR_ST);
- if (ret) {
- dev_err(&bus->dev, "MDIO dis clk fail\n");
- return ret;
- }
+ /* 2. dis clk, and read clk st check */
+ ret = mdio_sc_cfg_reg_write(mdio_dev, sc_reg->mdio_clk_dis,
+ 0x1, sc_reg->mdio_clk_st, 0x1,
+ MDIO_CHECK_CLR_ST);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO dis clk fail\n");
+ return ret;
+ }
- /*4. en clk, and read clk st check*/
- ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_CLK_EN,
- 0x1, MDIO_SC_CLK_ST, 0x1,
- MDIO_CHECK_SET_ST);
- if (ret)
- dev_err(&bus->dev, "MDIO en clk fail\n");
+ /* 3. reset dreq, and read reset st check */
+ ret = mdio_sc_cfg_reg_write(mdio_dev, sc_reg->mdio_reset_dreq,
+ 0x1, sc_reg->mdio_reset_st, 0x1,
+ MDIO_CHECK_CLR_ST);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO dis clk fail\n");
+ return ret;
+ }
+ /* 4. en clk, and read clk st check */
+ ret = mdio_sc_cfg_reg_write(mdio_dev, sc_reg->mdio_clk_en,
+ 0x1, sc_reg->mdio_clk_st, 0x1,
+ MDIO_CHECK_SET_ST);
+ if (ret)
+ dev_err(&bus->dev, "MDIO en clk fail\n");
+ } else if (is_acpi_node(bus->parent->fwnode)) {
+ acpi_status s;
+
+ s = acpi_evaluate_object(ACPI_HANDLE(bus->parent),
+ "_RST", NULL, NULL);
+ if (ACPI_FAILURE(s)) {
+ dev_err(&bus->dev, "Reset failed, return:%#x\n", s);
+ ret = -EBUSY;
+ } else {
+ ret = 0;
+ }
+ } else {
+ dev_err(&bus->dev, "Can not get cfg data from DT or ACPI\n");
+ ret = -ENXIO;
+ }
return ret;
}
/**
- * hns_mdio_bus_name - get mdio bus name
- * @name: mdio bus name
- * @np: mdio device node pointer
- */
-static void hns_mdio_bus_name(char *name, struct device_node *np)
-{
- const u32 *addr;
- u64 taddr = OF_BAD_ADDR;
-
- addr = of_get_address(np, 0, NULL, NULL);
- if (addr)
- taddr = of_translate_address(np, addr);
-
- snprintf(name, MII_BUS_ID_SIZE, "%s@%llx", np->name,
- (unsigned long long)taddr);
-}
-
-/**
* hns_mdio_probe - probe mdio device
* @pdev: mdio platform device
*
@@ -422,17 +431,16 @@ static void hns_mdio_bus_name(char *name, struct device_node *np)
*/
static int hns_mdio_probe(struct platform_device *pdev)
{
- struct device_node *np;
struct hns_mdio_device *mdio_dev;
struct mii_bus *new_bus;
struct resource *res;
- int ret;
+ int ret = -ENODEV;
if (!pdev) {
dev_err(NULL, "pdev is NULL!\r\n");
return -ENODEV;
}
- np = pdev->dev.of_node;
+
mdio_dev = devm_kzalloc(&pdev->dev, sizeof(*mdio_dev), GFP_KERNEL);
if (!mdio_dev)
return -ENOMEM;
@@ -448,7 +456,7 @@ static int hns_mdio_probe(struct platform_device *pdev)
new_bus->write = hns_mdio_write;
new_bus->reset = hns_mdio_reset;
new_bus->priv = mdio_dev;
- hns_mdio_bus_name(new_bus->id, np);
+ new_bus->parent = &pdev->dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mdio_dev->vbase = devm_ioremap_resource(&pdev->dev, res);
@@ -457,16 +465,73 @@ static int hns_mdio_probe(struct platform_device *pdev)
return ret;
}
- mdio_dev->subctrl_vbase =
- syscon_node_to_regmap(of_parse_phandle(np, "subctrl-vbase", 0));
- if (IS_ERR(mdio_dev->subctrl_vbase)) {
- dev_warn(&pdev->dev, "no syscon hisilicon,peri-c-subctrl\n");
- mdio_dev->subctrl_vbase = NULL;
- }
- new_bus->parent = &pdev->dev;
platform_set_drvdata(pdev, new_bus);
+ snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%s", "Mii",
+ dev_name(&pdev->dev));
+ if (dev_of_node(&pdev->dev)) {
+ struct of_phandle_args reg_args;
+
+ ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
+ "subctrl-vbase",
+ 4,
+ 0,
+ &reg_args);
+ if (!ret) {
+ mdio_dev->subctrl_vbase =
+ syscon_node_to_regmap(reg_args.np);
+ if (IS_ERR(mdio_dev->subctrl_vbase)) {
+ dev_warn(&pdev->dev, "syscon_node_to_regmap error\n");
+ mdio_dev->subctrl_vbase = NULL;
+ } else {
+ if (reg_args.args_count == 4) {
+ mdio_dev->sc_reg.mdio_clk_en =
+ (u16)reg_args.args[0];
+ mdio_dev->sc_reg.mdio_clk_dis =
+ (u16)reg_args.args[0] + 4;
+ mdio_dev->sc_reg.mdio_reset_req =
+ (u16)reg_args.args[1];
+ mdio_dev->sc_reg.mdio_reset_dreq =
+ (u16)reg_args.args[1] + 4;
+ mdio_dev->sc_reg.mdio_clk_st =
+ (u16)reg_args.args[2];
+ mdio_dev->sc_reg.mdio_reset_st =
+ (u16)reg_args.args[3];
+ } else {
+ /* for compatible */
+ mdio_dev->sc_reg.mdio_clk_en =
+ MDIO_SC_CLK_EN;
+ mdio_dev->sc_reg.mdio_clk_dis =
+ MDIO_SC_CLK_DIS;
+ mdio_dev->sc_reg.mdio_reset_req =
+ MDIO_SC_RESET_REQ;
+ mdio_dev->sc_reg.mdio_reset_dreq =
+ MDIO_SC_RESET_DREQ;
+ mdio_dev->sc_reg.mdio_clk_st =
+ MDIO_SC_CLK_ST;
+ mdio_dev->sc_reg.mdio_reset_st =
+ MDIO_SC_RESET_ST;
+ }
+ }
+ } else {
+ dev_warn(&pdev->dev, "find syscon ret = %#x\n", ret);
+ mdio_dev->subctrl_vbase = NULL;
+ }
+
+ ret = of_mdiobus_register(new_bus, pdev->dev.of_node);
+ } else if (is_acpi_node(pdev->dev.fwnode)) {
+ /* Clear all the IRQ properties */
+ memset(new_bus->irq, PHY_POLL, 4 * PHY_MAX_ADDR);
+
+ /* Mask out all PHYs from auto probing. */
+ new_bus->phy_mask = ~0;
+
+ /* Register the MDIO bus */
+ ret = mdiobus_register(new_bus);
+ } else {
+ dev_err(&pdev->dev, "Can not get cfg data from DT or ACPI\n");
+ ret = -ENXIO;
+ }
- ret = of_mdiobus_register(new_bus, np);
if (ret) {
dev_err(&pdev->dev, "Cannot register as MDIO bus!\n");
platform_set_drvdata(pdev, NULL);
@@ -499,12 +564,19 @@ static const struct of_device_id hns_mdio_match[] = {
{}
};
+static const struct acpi_device_id hns_mdio_acpi_match[] = {
+ { "HISI0141", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, hns_mdio_acpi_match);
+
static struct platform_driver hns_mdio_driver = {
.probe = hns_mdio_probe,
.remove = hns_mdio_remove,
.driver = {
.name = MDIO_DRV_NAME,
.of_match_table = hns_mdio_match,
+ .acpi_match_table = ACPI_PTR(hns_mdio_acpi_match),
},
};
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 4c9771d57..7af09cbc5 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -977,7 +977,37 @@ static void emac_set_multicast_list(struct net_device *ndev)
dev->mcast_pending = 1;
return;
}
+
+ mutex_lock(&dev->link_lock);
__emac_set_multicast_list(dev);
+ mutex_unlock(&dev->link_lock);
+}
+
+static int emac_set_mac_address(struct net_device *ndev, void *sa)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+ struct sockaddr *addr = sa;
+ struct emac_regs __iomem *p = dev->emacp;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ mutex_lock(&dev->link_lock);
+
+ memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+
+ emac_rx_disable(dev);
+ emac_tx_disable(dev);
+ out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
+ out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
+ (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
+ ndev->dev_addr[5]);
+ emac_tx_enable(dev);
+ emac_rx_enable(dev);
+
+ mutex_unlock(&dev->link_lock);
+
+ return 0;
}
static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
@@ -2686,7 +2716,7 @@ static const struct net_device_ops emac_netdev_ops = {
.ndo_do_ioctl = emac_ioctl,
.ndo_tx_timeout = emac_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = emac_set_mac_address,
.ndo_start_xmit = emac_start_xmit,
.ndo_change_mtu = eth_change_mtu,
};
@@ -2699,7 +2729,7 @@ static const struct net_device_ops emac_gige_netdev_ops = {
.ndo_do_ioctl = emac_ioctl,
.ndo_tx_timeout = emac_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = emac_set_mac_address,
.ndo_start_xmit = emac_start_xmit_sg,
.ndo_change_mtu = emac_change_mtu,
};
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 714bd1014..c0e17433f 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -167,17 +167,6 @@ config IXGBE
To compile this driver as a module, choose M here. The module
will be called ixgbe.
-config IXGBE_VXLAN
- bool "Virtual eXtensible Local Area Network Support"
- default n
- depends on IXGBE && VXLAN && !(IXGBE=y && VXLAN=m)
- ---help---
- This allows one to create VXLAN virtual interfaces that provide
- Layer 2 Networks over Layer 3 Networks. VXLAN is often used
- to tunnel virtual network infrastructure in virtualized environments.
- Say Y here if you want to use Virtual eXtensible Local Area Network
- (VXLAN) in the driver.
-
config IXGBE_HWMON
bool "Intel(R) 10GbE PCI Express adapters HWMON support"
default y
@@ -236,27 +225,6 @@ config I40E
To compile this driver as a module, choose M here. The module
will be called i40e.
-config I40E_VXLAN
- bool "Virtual eXtensible Local Area Network Support"
- default n
- depends on I40E && VXLAN && !(I40E=y && VXLAN=m)
- ---help---
- This allows one to create VXLAN virtual interfaces that provide
- Layer 2 Networks over Layer 3 Networks. VXLAN is often used
- to tunnel virtual network infrastructure in virtualized environments.
- Say Y here if you want to use Virtual eXtensible Local Area Network
- (VXLAN) in the driver.
-
-config I40E_GENEVE
- bool "Generic Network Virtualization Encapsulation (GENEVE) Support"
- depends on I40E && GENEVE && !(I40E=y && GENEVE=m)
- default n
- ---help---
- This allows one to create GENEVE virtual interfaces that provide
- Layer 2 Networks over Layer 3 Networks. GENEVE is often used
- to tunnel virtual network infrastructure in virtualized environments.
- Say Y here if you want to use GENEVE in the driver.
-
config I40E_DCB
bool "Data Center Bridging (DCB) Support"
default n
@@ -307,15 +275,4 @@ config FM10K
To compile this driver as a module, choose M here. The module
will be called fm10k. MSI-X interrupt support is required
-config FM10K_VXLAN
- bool "Virtual eXtensible Local Area Network Support"
- default n
- depends on FM10K && VXLAN && !(FM10K=y && VXLAN=m)
- ---help---
- This allows one to create VXLAN virtual interfaces that provide
- Layer 2 Networks over Layer 3 Networks. VXLAN is often used
- to tunnel virtual network infrastructure in virtualized environments.
- Say Y here if you want to use Virtual eXtensible Local Area Network
- (VXLAN) in the driver.
-
endif # NET_VENDOR_INTEL
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 7fd4d5459..6b03c8553 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -2032,7 +2032,8 @@ const struct e1000_info e1000_82574_info = {
| FLAG2_DISABLE_ASPM_L0S
| FLAG2_DISABLE_ASPM_L1
| FLAG2_NO_DISABLE_RX
- | FLAG2_DMA_BURST,
+ | FLAG2_DMA_BURST
+ | FLAG2_CHECK_SYSTIM_OVERFLOW,
.pba = 32,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,
@@ -2053,7 +2054,8 @@ const struct e1000_info e1000_82583_info = {
| FLAG_HAS_CTRLEXT_ON_LOAD,
.flags2 = FLAG2_DISABLE_ASPM_L0S
| FLAG2_DISABLE_ASPM_L1
- | FLAG2_NO_DISABLE_RX,
+ | FLAG2_NO_DISABLE_RX
+ | FLAG2_CHECK_SYSTIM_OVERFLOW,
.pba = 32,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index ef96cd11d..879cca47b 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -452,6 +452,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
#define FLAG2_PCIM2PCI_ARBITER_WA BIT(11)
#define FLAG2_DFLT_CRC_STRIPPING BIT(12)
#define FLAG2_CHECK_RX_HWTSTAMP BIT(13)
+#define FLAG2_CHECK_SYSTIM_OVERFLOW BIT(14)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 3e11322d8..f3aaca743 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -5885,7 +5885,8 @@ const struct e1000_info e1000_pch_lpt_info = {
| FLAG_HAS_JUMBO_FRAMES
| FLAG_APME_IN_WUC,
.flags2 = FLAG2_HAS_PHY_STATS
- | FLAG2_HAS_EEE,
+ | FLAG2_HAS_EEE
+ | FLAG2_CHECK_SYSTIM_OVERFLOW,
.pba = 26,
.max_hw_frame_size = 9022,
.get_variants = e1000_get_variants_ich8lan,
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 2b2e2f8c6..7017281ba 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -4303,6 +4303,42 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter)
}
/**
+ * e1000e_sanitize_systim - sanitize raw cycle counter reads
+ * @hw: pointer to the HW structure
+ * @systim: cycle_t value read, sanitized and returned
+ *
+ * Errata for 82574/82583 possible bad bits read from SYSTIMH/L:
+ * check to see that the time is incrementing at a reasonable
+ * rate and is a multiple of incvalue.
+ **/
+static cycle_t e1000e_sanitize_systim(struct e1000_hw *hw, cycle_t systim)
+{
+ u64 time_delta, rem, temp;
+ cycle_t systim_next;
+ u32 incvalue;
+ int i;
+
+ incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK;
+ for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) {
+ /* latch SYSTIMH on read of SYSTIML */
+ systim_next = (cycle_t)er32(SYSTIML);
+ systim_next |= (cycle_t)er32(SYSTIMH) << 32;
+
+ time_delta = systim_next - systim;
+ temp = time_delta;
+ /* VMWare users have seen incvalue of zero, don't div / 0 */
+ rem = incvalue ? do_div(temp, incvalue) : (time_delta != 0);
+
+ systim = systim_next;
+
+ if ((time_delta < E1000_82574_SYSTIM_EPSILON) && (rem == 0))
+ break;
+ }
+
+ return systim;
+}
+
+/**
* e1000e_cyclecounter_read - read raw cycle counter (used by time counter)
* @cc: cyclecounter structure
**/
@@ -4312,7 +4348,7 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
cc);
struct e1000_hw *hw = &adapter->hw;
u32 systimel, systimeh;
- cycle_t systim, systim_next;
+ cycle_t systim;
/* SYSTIMH latching upon SYSTIML read does not work well.
* This means that if SYSTIML overflows after we read it but before
* we read SYSTIMH, the value of SYSTIMH has been incremented and we
@@ -4335,32 +4371,9 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
systim = (cycle_t)systimel;
systim |= (cycle_t)systimeh << 32;
- if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) {
- u64 time_delta, rem, temp;
- u32 incvalue;
- int i;
-
- /* errata for 82574/82583 possible bad bits read from SYSTIMH/L
- * check to see that the time is incrementing at a reasonable
- * rate and is a multiple of incvalue
- */
- incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK;
- for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) {
- /* latch SYSTIMH on read of SYSTIML */
- systim_next = (cycle_t)er32(SYSTIML);
- systim_next |= (cycle_t)er32(SYSTIMH) << 32;
-
- time_delta = systim_next - systim;
- temp = time_delta;
- rem = do_div(temp, incvalue);
-
- systim = systim_next;
+ if (adapter->flags2 & FLAG2_CHECK_SYSTIM_OVERFLOW)
+ systim = e1000e_sanitize_systim(hw, systim);
- if ((time_delta < E1000_82574_SYSTIM_EPSILON) &&
- (rem == 0))
- break;
- }
- }
return systim;
}
@@ -7329,8 +7342,7 @@ err_flashmap:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
- pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
pci_disable_device(pdev);
@@ -7397,8 +7409,7 @@ static void e1000_remove(struct pci_dev *pdev)
if ((adapter->hw.flash_address) &&
(adapter->hw.mac.type < e1000_pch_spt))
iounmap(adapter->hw.flash_address);
- pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_release_mem_regions(pdev);
free_netdev(netdev);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index fcf106e54..c4cf08dcf 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -362,6 +362,7 @@ enum fm10k_state_t {
__FM10K_SERVICE_DISABLE,
__FM10K_MBX_LOCK,
__FM10K_LINK_DOWN,
+ __FM10K_UPDATING_STATS,
};
static inline void fm10k_mbx_lock(struct fm10k_intfc *interface)
@@ -406,7 +407,7 @@ static inline u16 fm10k_desc_unused(struct fm10k_ring *ring)
(&(((union fm10k_rx_desc *)((R)->desc))[i]))
#define FM10K_MAX_TXD_PWR 14
-#define FM10K_MAX_DATA_PER_TXD BIT(FM10K_MAX_TXD_PWR)
+#define FM10K_MAX_DATA_PER_TXD (1u << FM10K_MAX_TXD_PWR)
/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), FM10K_MAX_DATA_PER_TXD)
@@ -457,6 +458,7 @@ __be16 fm10k_tx_encap_offload(struct sk_buff *skb);
netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
struct fm10k_ring *tx_ring);
void fm10k_tx_timeout_reset(struct fm10k_intfc *interface);
+u64 fm10k_get_tx_pending(struct fm10k_ring *ring);
bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring);
void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.c b/drivers/net/ethernet/intel/fm10k/fm10k_common.c
index 5bbf19cfe..d6baaea8b 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_common.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.c
@@ -519,8 +519,12 @@ s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready)
goto out;
/* interface cannot receive traffic without logical ports */
- if (mac->dglort_map == FM10K_DGLORTMAP_NONE)
+ if (mac->dglort_map == FM10K_DGLORTMAP_NONE) {
+ if (hw->mac.ops.request_lport_map)
+ ret_val = hw->mac.ops.request_lport_map(hw);
+
goto out;
+ }
/* if we passed all the tests above then the switch is ready and we no
* longer need to check for link
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 9c0d87503..c04cbe9c9 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -76,6 +76,8 @@ static const struct fm10k_stats fm10k_gstrings_global_stats[] = {
FM10K_STAT("mac_rules_used", hw.swapi.mac.used),
FM10K_STAT("mac_rules_avail", hw.swapi.mac.avail),
+ FM10K_STAT("reset_while_pending", hw.mac.reset_while_pending),
+
FM10K_STAT("tx_hang_count", tx_timeout_count),
};
@@ -983,9 +985,10 @@ void fm10k_write_reta(struct fm10k_intfc *interface, const u32 *indir)
/* generate a new table if we weren't given one */
for (j = 0; j < 4; j++) {
if (indir)
- n = indir[i + j];
+ n = indir[4 * i + j];
else
- n = ethtool_rxfh_indir_default(i + j, rss_i);
+ n = ethtool_rxfh_indir_default(4 * i + j,
+ rss_i);
table[j] = n;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 0e166e9c9..e9767b636 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -28,7 +28,7 @@
#include "fm10k.h"
-#define DRV_VERSION "0.19.3-k"
+#define DRV_VERSION "0.21.2-k"
#define DRV_SUMMARY "Intel(R) Ethernet Switch Host Interface Driver"
const char fm10k_driver_version[] = DRV_VERSION;
char fm10k_driver_name[] = "fm10k";
@@ -56,7 +56,7 @@ static int __init fm10k_init_module(void)
pr_info("%s\n", fm10k_copyright);
/* create driver workqueue */
- fm10k_workqueue = create_workqueue("fm10k");
+ fm10k_workqueue = alloc_workqueue("fm10k", WQ_MEM_RECLAIM, 0);
fm10k_dbg_init();
@@ -77,7 +77,6 @@ static void __exit fm10k_exit_module(void)
fm10k_dbg_exit();
/* destroy driver workqueue */
- flush_workqueue(fm10k_workqueue);
destroy_workqueue(fm10k_workqueue);
}
module_exit(fm10k_exit_module);
@@ -272,7 +271,7 @@ static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer,
#if (PAGE_SIZE < 8192)
unsigned int truesize = FM10K_RX_BUFSZ;
#else
- unsigned int truesize = SKB_DATA_ALIGN(size);
+ unsigned int truesize = ALIGN(size, 512);
#endif
unsigned int pull_len;
@@ -1129,11 +1128,13 @@ static u64 fm10k_get_tx_completed(struct fm10k_ring *ring)
return ring->stats.packets;
}
-static u64 fm10k_get_tx_pending(struct fm10k_ring *ring)
+u64 fm10k_get_tx_pending(struct fm10k_ring *ring)
{
- /* use SW head and tail until we have real hardware */
- u32 head = ring->next_to_clean;
- u32 tail = ring->next_to_use;
+ struct fm10k_intfc *interface = ring->q_vector->interface;
+ struct fm10k_hw *hw = &interface->hw;
+
+ u32 head = fm10k_read_reg(hw, FM10K_TDH(ring->reg_idx));
+ u32 tail = fm10k_read_reg(hw, FM10K_TDT(ring->reg_idx));
return ((head <= tail) ? tail : tail + ring->count) - head;
}
@@ -1857,7 +1858,7 @@ static int fm10k_init_msix_capability(struct fm10k_intfc *interface)
if (v_budget < 0) {
kfree(interface->msix_entries);
interface->msix_entries = NULL;
- return -ENOMEM;
+ return v_budget;
}
/* record the number of queues available for q_vectors */
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h
index b7dbc8a84..35c1dbad1 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h
@@ -41,6 +41,8 @@ struct fm10k_mbx_info;
#define FM10K_MBX_ACK_INTERRUPT 0x00000010
#define FM10K_MBX_INTERRUPT_ENABLE 0x00000020
#define FM10K_MBX_INTERRUPT_DISABLE 0x00000040
+#define FM10K_MBX_GLOBAL_REQ_INTERRUPT 0x00000200
+#define FM10K_MBX_GLOBAL_ACK_INTERRUPT 0x00000400
#define FM10K_MBICR(_n) ((_n) + 0x18840)
#define FM10K_GMBX 0x18842
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 2a08d3f5b..20a5bbe3f 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -20,9 +20,7 @@
#include "fm10k.h"
#include <linux/vmalloc.h>
-#ifdef CONFIG_FM10K_VXLAN
-#include <net/vxlan.h>
-#endif /* CONFIG_FM10K_VXLAN */
+#include <net/udp_tunnel.h>
/**
* fm10k_setup_tx_resources - allocate Tx resources (Descriptors)
@@ -434,8 +432,7 @@ static void fm10k_restore_vxlan_port(struct fm10k_intfc *interface)
/**
* fm10k_add_vxlan_port
* @netdev: network interface device structure
- * @sa_family: Address family of new port
- * @port: port number used for VXLAN
+ * @ti: Tunnel endpoint information
*
* This function is called when a new VXLAN interface has added a new port
* number to the range that is currently in use for VXLAN. The new port
@@ -444,18 +441,21 @@ static void fm10k_restore_vxlan_port(struct fm10k_intfc *interface)
* is always used as the VXLAN port number for offloads.
**/
static void fm10k_add_vxlan_port(struct net_device *dev,
- sa_family_t sa_family, __be16 port) {
+ struct udp_tunnel_info *ti)
+{
struct fm10k_intfc *interface = netdev_priv(dev);
struct fm10k_vxlan_port *vxlan_port;
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ return;
/* only the PF supports configuring tunnels */
if (interface->hw.mac.type != fm10k_mac_pf)
return;
/* existing ports are pulled out so our new entry is always last */
fm10k_vxlan_port_for_each(vxlan_port, interface) {
- if ((vxlan_port->port == port) &&
- (vxlan_port->sa_family == sa_family)) {
+ if ((vxlan_port->port == ti->port) &&
+ (vxlan_port->sa_family == ti->sa_family)) {
list_del(&vxlan_port->list);
goto insert_tail;
}
@@ -465,8 +465,8 @@ static void fm10k_add_vxlan_port(struct net_device *dev,
vxlan_port = kmalloc(sizeof(*vxlan_port), GFP_ATOMIC);
if (!vxlan_port)
return;
- vxlan_port->port = port;
- vxlan_port->sa_family = sa_family;
+ vxlan_port->port = ti->port;
+ vxlan_port->sa_family = ti->sa_family;
insert_tail:
/* add new port value to list */
@@ -478,8 +478,7 @@ insert_tail:
/**
* fm10k_del_vxlan_port
* @netdev: network interface device structure
- * @sa_family: Address family of freed port
- * @port: port number used for VXLAN
+ * @ti: Tunnel endpoint information
*
* This function is called when a new VXLAN interface has freed a port
* number from the range that is currently in use for VXLAN. The freed
@@ -487,17 +486,20 @@ insert_tail:
* the port number for offloads.
**/
static void fm10k_del_vxlan_port(struct net_device *dev,
- sa_family_t sa_family, __be16 port) {
+ struct udp_tunnel_info *ti)
+{
struct fm10k_intfc *interface = netdev_priv(dev);
struct fm10k_vxlan_port *vxlan_port;
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ return;
if (interface->hw.mac.type != fm10k_mac_pf)
return;
/* find the port in the list and free it */
fm10k_vxlan_port_for_each(vxlan_port, interface) {
- if ((vxlan_port->port == port) &&
- (vxlan_port->sa_family == sa_family)) {
+ if ((vxlan_port->port == ti->port) &&
+ (vxlan_port->sa_family == ti->sa_family)) {
list_del(&vxlan_port->list);
kfree(vxlan_port);
break;
@@ -553,10 +555,8 @@ int fm10k_open(struct net_device *netdev)
if (err)
goto err_set_queues;
-#ifdef CONFIG_FM10K_VXLAN
/* update VXLAN port configuration */
- vxlan_get_rx_port(netdev);
-#endif
+ udp_tunnel_get_rx_info(netdev);
fm10k_up(interface);
@@ -1375,8 +1375,8 @@ static const struct net_device_ops fm10k_netdev_ops = {
.ndo_set_vf_vlan = fm10k_ndo_set_vf_vlan,
.ndo_set_vf_rate = fm10k_ndo_set_vf_bw,
.ndo_get_vf_config = fm10k_ndo_get_vf_config,
- .ndo_add_vxlan_port = fm10k_add_vxlan_port,
- .ndo_del_vxlan_port = fm10k_del_vxlan_port,
+ .ndo_udp_tunnel_add = fm10k_add_vxlan_port,
+ .ndo_udp_tunnel_del = fm10k_del_vxlan_port,
.ndo_dfwd_add_station = fm10k_dfwd_add_station,
.ndo_dfwd_del_station = fm10k_dfwd_del_station,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index e05aca9be..774a5654b 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -123,11 +123,24 @@ static void fm10k_service_timer(unsigned long data)
static void fm10k_detach_subtask(struct fm10k_intfc *interface)
{
struct net_device *netdev = interface->netdev;
+ u32 __iomem *hw_addr;
+ u32 value;
/* do nothing if device is still present or hw_addr is set */
if (netif_device_present(netdev) || interface->hw.hw_addr)
return;
+ /* check the real address space to see if we've recovered */
+ hw_addr = READ_ONCE(interface->uc_addr);
+ value = readl(hw_addr);
+ if ((~value)) {
+ interface->hw.hw_addr = interface->uc_addr;
+ netif_device_attach(netdev);
+ interface->flags |= FM10K_FLAG_RESET_REQUESTED;
+ netdev_warn(netdev, "PCIe link restored, device now attached\n");
+ return;
+ }
+
rtnl_lock();
if (netif_running(netdev))
@@ -136,11 +149,9 @@ static void fm10k_detach_subtask(struct fm10k_intfc *interface)
rtnl_unlock();
}
-static void fm10k_reinit(struct fm10k_intfc *interface)
+static void fm10k_prepare_for_reset(struct fm10k_intfc *interface)
{
struct net_device *netdev = interface->netdev;
- struct fm10k_hw *hw = &interface->hw;
- int err;
WARN_ON(in_interrupt());
@@ -165,6 +176,19 @@ static void fm10k_reinit(struct fm10k_intfc *interface)
/* delay any future reset requests */
interface->last_reset = jiffies + (10 * HZ);
+ rtnl_unlock();
+}
+
+static int fm10k_handle_reset(struct fm10k_intfc *interface)
+{
+ struct net_device *netdev = interface->netdev;
+ struct fm10k_hw *hw = &interface->hw;
+ int err;
+
+ rtnl_lock();
+
+ pci_set_master(interface->pdev);
+
/* reset and initialize the hardware so it is in a known state */
err = hw->mac.ops.reset_hw(hw);
if (err) {
@@ -185,7 +209,7 @@ static void fm10k_reinit(struct fm10k_intfc *interface)
goto reinit_err;
}
- /* reassociate interrupts */
+ /* re-associate interrupts */
err = fm10k_mbx_request_irq(interface);
if (err)
goto err_mbx_irq;
@@ -219,7 +243,7 @@ static void fm10k_reinit(struct fm10k_intfc *interface)
clear_bit(__FM10K_RESETTING, &interface->state);
- return;
+ return err;
err_open:
fm10k_mbx_free_irq(interface);
err_mbx_irq:
@@ -230,6 +254,20 @@ reinit_err:
rtnl_unlock();
clear_bit(__FM10K_RESETTING, &interface->state);
+
+ return err;
+}
+
+static void fm10k_reinit(struct fm10k_intfc *interface)
+{
+ int err;
+
+ fm10k_prepare_for_reset(interface);
+
+ err = fm10k_handle_reset(interface);
+ if (err)
+ dev_err(&interface->pdev->dev,
+ "fm10k_handle_reset failed: %d\n", err);
}
static void fm10k_reset_subtask(struct fm10k_intfc *interface)
@@ -372,12 +410,19 @@ void fm10k_update_stats(struct fm10k_intfc *interface)
u64 bytes, pkts;
int i;
+ /* ensure only one thread updates stats at a time */
+ if (test_and_set_bit(__FM10K_UPDATING_STATS, &interface->state))
+ return;
+
/* do not allow stats update via service task for next second */
interface->next_stats_update = jiffies + HZ;
/* gather some stats to the interface struct that are per queue */
for (bytes = 0, pkts = 0, i = 0; i < interface->num_tx_queues; i++) {
- struct fm10k_ring *tx_ring = interface->tx_ring[i];
+ struct fm10k_ring *tx_ring = READ_ONCE(interface->tx_ring[i]);
+
+ if (!tx_ring)
+ continue;
restart_queue += tx_ring->tx_stats.restart_queue;
tx_busy += tx_ring->tx_stats.tx_busy;
@@ -396,7 +441,10 @@ void fm10k_update_stats(struct fm10k_intfc *interface)
/* gather some stats to the interface struct that are per queue */
for (bytes = 0, pkts = 0, i = 0; i < interface->num_rx_queues; i++) {
- struct fm10k_ring *rx_ring = interface->rx_ring[i];
+ struct fm10k_ring *rx_ring = READ_ONCE(interface->rx_ring[i]);
+
+ if (!rx_ring)
+ continue;
bytes += rx_ring->stats.bytes;
pkts += rx_ring->stats.packets;
@@ -443,6 +491,8 @@ void fm10k_update_stats(struct fm10k_intfc *interface)
/* Fill out the OS statistics structure */
net_stats->rx_errors = rx_errors;
net_stats->rx_dropped = interface->stats.nodesc_drop.count;
+
+ clear_bit(__FM10K_UPDATING_STATS, &interface->state);
}
/**
@@ -1566,6 +1616,9 @@ void fm10k_up(struct fm10k_intfc *interface)
/* configure interrupts */
hw->mac.ops.update_int_moderator(hw);
+ /* enable statistics capture again */
+ clear_bit(__FM10K_UPDATING_STATS, &interface->state);
+
/* clear down bit to indicate we are ready to go */
clear_bit(__FM10K_DOWN, &interface->state);
@@ -1598,10 +1651,11 @@ void fm10k_down(struct fm10k_intfc *interface)
{
struct net_device *netdev = interface->netdev;
struct fm10k_hw *hw = &interface->hw;
- int err;
+ int err, i = 0, count = 0;
/* signal that we are down to the interrupt handler and service task */
- set_bit(__FM10K_DOWN, &interface->state);
+ if (test_and_set_bit(__FM10K_DOWN, &interface->state))
+ return;
/* call carrier off first to avoid false dev_watchdog timeouts */
netif_carrier_off(netdev);
@@ -1613,18 +1667,57 @@ void fm10k_down(struct fm10k_intfc *interface)
/* reset Rx filters */
fm10k_reset_rx_state(interface);
- /* allow 10ms for device to quiesce */
- usleep_range(10000, 20000);
-
/* disable polling routines */
fm10k_napi_disable_all(interface);
/* capture stats one last time before stopping interface */
fm10k_update_stats(interface);
+ /* prevent updating statistics while we're down */
+ while (test_and_set_bit(__FM10K_UPDATING_STATS, &interface->state))
+ usleep_range(1000, 2000);
+
+ /* skip waiting for TX DMA if we lost PCIe link */
+ if (FM10K_REMOVED(hw->hw_addr))
+ goto skip_tx_dma_drain;
+
+ /* In some rare circumstances it can take a while for Tx queues to
+ * quiesce and be fully disabled. Attempt to .stop_hw() first, and
+ * then if we get ERR_REQUESTS_PENDING, go ahead and wait in a loop
+ * until the Tx queues have emptied, or until a number of retries. If
+ * we fail to clear within the retry loop, we will issue a warning
+ * indicating that Tx DMA is probably hung. Note this means we call
+ * .stop_hw() twice but this shouldn't cause any problems.
+ */
+ err = hw->mac.ops.stop_hw(hw);
+ if (err != FM10K_ERR_REQUESTS_PENDING)
+ goto skip_tx_dma_drain;
+
+#define TX_DMA_DRAIN_RETRIES 25
+ for (count = 0; count < TX_DMA_DRAIN_RETRIES; count++) {
+ usleep_range(10000, 20000);
+
+ /* start checking at the last ring to have pending Tx */
+ for (; i < interface->num_tx_queues; i++)
+ if (fm10k_get_tx_pending(interface->tx_ring[i]))
+ break;
+
+ /* if all the queues are drained, we can break now */
+ if (i == interface->num_tx_queues)
+ break;
+ }
+
+ if (count >= TX_DMA_DRAIN_RETRIES)
+ dev_err(&interface->pdev->dev,
+ "Tx queues failed to drain after %d tries. Tx DMA is probably hung.\n",
+ count);
+skip_tx_dma_drain:
/* Disable DMA engine for Tx/Rx */
err = hw->mac.ops.stop_hw(hw);
- if (err)
+ if (err == FM10K_ERR_REQUESTS_PENDING)
+ dev_err(&interface->pdev->dev,
+ "due to pending requests hw was not shut down gracefully\n");
+ else if (err)
dev_err(&interface->pdev->dev, "stop_hw failed: %d\n", err);
/* free any buffers still on the rings */
@@ -1750,6 +1843,7 @@ static int fm10k_sw_init(struct fm10k_intfc *interface,
/* Start off interface as being down */
set_bit(__FM10K_DOWN, &interface->state);
+ set_bit(__FM10K_UPDATING_STATS, &interface->state);
return 0;
}
@@ -1869,10 +1963,7 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_dma;
}
- err = pci_request_selected_regions(pdev,
- pci_select_bars(pdev,
- IORESOURCE_MEM),
- fm10k_driver_name);
+ err = pci_request_mem_regions(pdev, fm10k_driver_name);
if (err) {
dev_err(&pdev->dev,
"pci_request_selected_regions failed: %d\n", err);
@@ -1976,8 +2067,7 @@ err_sw_init:
err_ioremap:
free_netdev(netdev);
err_alloc_netdev:
- pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
pci_disable_device(pdev);
@@ -2025,14 +2115,55 @@ static void fm10k_remove(struct pci_dev *pdev)
free_netdev(netdev);
- pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_release_mem_regions(pdev);
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
}
+static void fm10k_prepare_suspend(struct fm10k_intfc *interface)
+{
+ /* the watchdog task reads from registers, which might appear like
+ * a surprise remove if the PCIe device is disabled while we're
+ * stopped. We stop the watchdog task until after we resume software
+ * activity.
+ */
+ set_bit(__FM10K_SERVICE_DISABLE, &interface->state);
+ cancel_work_sync(&interface->service_task);
+
+ fm10k_prepare_for_reset(interface);
+}
+
+static int fm10k_handle_resume(struct fm10k_intfc *interface)
+{
+ struct fm10k_hw *hw = &interface->hw;
+ int err;
+
+ /* reset statistics starting values */
+ hw->mac.ops.rebind_hw_stats(hw, &interface->stats);
+
+ err = fm10k_handle_reset(interface);
+ if (err)
+ return err;
+
+ /* assume host is not ready, to prevent race with watchdog in case we
+ * actually don't have connection to the switch
+ */
+ interface->host_ready = false;
+ fm10k_watchdog_host_not_ready(interface);
+
+ /* force link to stay down for a second to prevent link flutter */
+ interface->link_down_event = jiffies + (HZ);
+ set_bit(__FM10K_LINK_DOWN, &interface->state);
+
+ /* clear the service task disable bit to allow service task to start */
+ clear_bit(__FM10K_SERVICE_DISABLE, &interface->state);
+ fm10k_service_event_schedule(interface);
+
+ return err;
+}
+
#ifdef CONFIG_PM
/**
* fm10k_resume - Restore device to pre-sleep state
@@ -2069,60 +2200,13 @@ static int fm10k_resume(struct pci_dev *pdev)
/* refresh hw_addr in case it was dropped */
hw->hw_addr = interface->uc_addr;
- /* reset hardware to known state */
- err = hw->mac.ops.init_hw(&interface->hw);
- if (err) {
- dev_err(&pdev->dev, "init_hw failed: %d\n", err);
- return err;
- }
-
- /* reset statistics starting values */
- hw->mac.ops.rebind_hw_stats(hw, &interface->stats);
-
- rtnl_lock();
-
- err = fm10k_init_queueing_scheme(interface);
- if (err)
- goto err_queueing_scheme;
-
- err = fm10k_mbx_request_irq(interface);
- if (err)
- goto err_mbx_irq;
-
- err = fm10k_hw_ready(interface);
+ err = fm10k_handle_resume(interface);
if (err)
- goto err_open;
-
- err = netif_running(netdev) ? fm10k_open(netdev) : 0;
- if (err)
- goto err_open;
-
- rtnl_unlock();
-
- /* assume host is not ready, to prevent race with watchdog in case we
- * actually don't have connection to the switch
- */
- interface->host_ready = false;
- fm10k_watchdog_host_not_ready(interface);
-
- /* clear the service task disable bit to allow service task to start */
- clear_bit(__FM10K_SERVICE_DISABLE, &interface->state);
- fm10k_service_event_schedule(interface);
-
- /* restore SR-IOV interface */
- fm10k_iov_resume(pdev);
+ return err;
netif_device_attach(netdev);
return 0;
-err_open:
- fm10k_mbx_free_irq(interface);
-err_mbx_irq:
- fm10k_clear_queueing_scheme(interface);
-err_queueing_scheme:
- rtnl_unlock();
-
- return err;
}
/**
@@ -2142,27 +2226,7 @@ static int fm10k_suspend(struct pci_dev *pdev,
netif_device_detach(netdev);
- fm10k_iov_suspend(pdev);
-
- /* the watchdog tasks may read registers, which will appear like a
- * surprise-remove event once the PCI device is disabled. This will
- * cause us to close the netdevice, so we don't retain the open/closed
- * state post-resume. Prevent this by disabling the service task while
- * suspended, until we actually resume.
- */
- set_bit(__FM10K_SERVICE_DISABLE, &interface->state);
- cancel_work_sync(&interface->service_task);
-
- rtnl_lock();
-
- if (netif_running(netdev))
- fm10k_close(netdev);
-
- fm10k_mbx_free_irq(interface);
-
- fm10k_clear_queueing_scheme(interface);
-
- rtnl_unlock();
+ fm10k_prepare_suspend(interface);
err = pci_save_state(pdev);
if (err)
@@ -2195,17 +2259,7 @@ static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev,
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
- rtnl_lock();
-
- if (netif_running(netdev))
- fm10k_close(netdev);
-
- fm10k_mbx_free_irq(interface);
-
- /* free interrupts */
- fm10k_clear_queueing_scheme(interface);
-
- rtnl_unlock();
+ fm10k_prepare_suspend(interface);
/* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
@@ -2219,7 +2273,6 @@ static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev,
*/
static pci_ers_result_t fm10k_io_slot_reset(struct pci_dev *pdev)
{
- struct fm10k_intfc *interface = pci_get_drvdata(pdev);
pci_ers_result_t result;
if (pci_enable_device_mem(pdev)) {
@@ -2237,12 +2290,6 @@ static pci_ers_result_t fm10k_io_slot_reset(struct pci_dev *pdev)
pci_wake_from_d3(pdev, false);
- /* refresh hw_addr in case it was dropped */
- interface->hw.hw_addr = interface->uc_addr;
-
- interface->flags |= FM10K_FLAG_RESET_REQUESTED;
- fm10k_service_event_schedule(interface);
-
result = PCI_ERS_RESULT_RECOVERED;
}
@@ -2262,50 +2309,54 @@ static void fm10k_io_resume(struct pci_dev *pdev)
{
struct fm10k_intfc *interface = pci_get_drvdata(pdev);
struct net_device *netdev = interface->netdev;
- struct fm10k_hw *hw = &interface->hw;
- int err = 0;
-
- /* reset hardware to known state */
- err = hw->mac.ops.init_hw(&interface->hw);
- if (err) {
- dev_err(&pdev->dev, "init_hw failed: %d\n", err);
- return;
- }
-
- /* reset statistics starting values */
- hw->mac.ops.rebind_hw_stats(hw, &interface->stats);
-
- rtnl_lock();
+ int err;
- err = fm10k_init_queueing_scheme(interface);
- if (err) {
- dev_err(&interface->pdev->dev,
- "init_queueing_scheme failed: %d\n", err);
- goto unlock;
- }
+ err = fm10k_handle_resume(interface);
- /* reassociate interrupts */
- fm10k_mbx_request_irq(interface);
+ if (err)
+ dev_warn(&pdev->dev,
+ "fm10k_io_resume failed: %d\n", err);
+ else
+ netif_device_attach(netdev);
+}
- rtnl_lock();
- if (netif_running(netdev))
- err = fm10k_open(netdev);
- rtnl_unlock();
+/**
+ * fm10k_io_reset_notify - called when PCI function is reset
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the PCI function is reset such as from
+ * /sys/class/net/<enpX>/device/reset or similar. When prepare is true, it
+ * means we should prepare for a function reset. If prepare is false, it means
+ * the function reset just occurred.
+ */
+static void fm10k_io_reset_notify(struct pci_dev *pdev, bool prepare)
+{
+ struct fm10k_intfc *interface = pci_get_drvdata(pdev);
+ int err = 0;
- /* final check of hardware state before registering the interface */
- err = err ? : fm10k_hw_ready(interface);
+ if (prepare) {
+ /* warn incase we have any active VF devices */
+ if (pci_num_vf(pdev))
+ dev_warn(&pdev->dev,
+ "PCIe FLR may cause issues for any active VF devices\n");
- if (!err)
- netif_device_attach(netdev);
+ fm10k_prepare_suspend(interface);
+ } else {
+ err = fm10k_handle_resume(interface);
+ }
-unlock:
- rtnl_unlock();
+ if (err) {
+ dev_warn(&pdev->dev,
+ "fm10k_io_reset_notify failed: %d\n", err);
+ netif_device_detach(interface->netdev);
+ }
}
static const struct pci_error_handlers fm10k_err_handler = {
.error_detected = fm10k_io_error_detected,
.slot_reset = fm10k_io_slot_reset,
.resume = fm10k_io_resume,
+ .reset_notify = fm10k_io_reset_notify,
};
static struct pci_driver fm10k_driver = {
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index dc75507c9..682299dd0 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -51,34 +51,37 @@ static s32 fm10k_reset_hw_pf(struct fm10k_hw *hw)
/* shut down all rings */
err = fm10k_disable_queues_generic(hw, FM10K_MAX_QUEUES);
- if (err)
+ if (err == FM10K_ERR_REQUESTS_PENDING) {
+ hw->mac.reset_while_pending++;
+ goto force_reset;
+ } else if (err) {
return err;
+ }
/* Verify that DMA is no longer active */
reg = fm10k_read_reg(hw, FM10K_DMA_CTRL);
if (reg & (FM10K_DMA_CTRL_TX_ACTIVE | FM10K_DMA_CTRL_RX_ACTIVE))
return FM10K_ERR_DMA_PENDING;
- /* verify the switch is ready for reset */
- reg = fm10k_read_reg(hw, FM10K_DMA_CTRL2);
- if (!(reg & FM10K_DMA_CTRL2_SWITCH_READY))
- goto out;
-
+force_reset:
/* Inititate data path reset */
- reg |= FM10K_DMA_CTRL_DATAPATH_RESET;
+ reg = FM10K_DMA_CTRL_DATAPATH_RESET;
fm10k_write_reg(hw, FM10K_DMA_CTRL, reg);
/* Flush write and allow 100us for reset to complete */
fm10k_write_flush(hw);
udelay(FM10K_RESET_TIMEOUT);
+ /* Reset mailbox global interrupts */
+ reg = FM10K_MBX_GLOBAL_REQ_INTERRUPT | FM10K_MBX_GLOBAL_ACK_INTERRUPT;
+ fm10k_write_reg(hw, FM10K_GMBX, reg);
+
/* Verify we made it out of reset */
reg = fm10k_read_reg(hw, FM10K_IP);
if (!(reg & FM10K_IP_NOTINRESET))
- err = FM10K_ERR_RESET_FAILED;
+ return FM10K_ERR_RESET_FAILED;
-out:
- return err;
+ return 0;
}
/**
@@ -1619,25 +1622,15 @@ static s32 fm10k_request_lport_map_pf(struct fm10k_hw *hw)
**/
static s32 fm10k_get_host_state_pf(struct fm10k_hw *hw, bool *switch_ready)
{
- s32 ret_val = 0;
u32 dma_ctrl2;
/* verify the switch is ready for interaction */
dma_ctrl2 = fm10k_read_reg(hw, FM10K_DMA_CTRL2);
if (!(dma_ctrl2 & FM10K_DMA_CTRL2_SWITCH_READY))
- goto out;
+ return 0;
/* retrieve generic host state info */
- ret_val = fm10k_get_host_state_generic(hw, switch_ready);
- if (ret_val)
- goto out;
-
- /* interface cannot receive traffic without logical ports */
- if (hw->mac.dglort_map == FM10K_DGLORTMAP_NONE)
- ret_val = fm10k_request_lport_map_pf(hw);
-
-out:
- return ret_val;
+ return fm10k_get_host_state_generic(hw, switch_ready);
}
/* This structure defines the attibutes to be parsed below */
@@ -1813,6 +1806,7 @@ static const struct fm10k_mac_ops mac_ops_pf = {
.set_dma_mask = fm10k_set_dma_mask_pf,
.get_fault = fm10k_get_fault_pf,
.get_host_state = fm10k_get_host_state_pf,
+ .request_lport_map = fm10k_request_lport_map_pf,
};
static const struct fm10k_iov_ops iov_ops_pf = {
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
index b8bc06183..f4e75c498 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
@@ -526,6 +526,7 @@ struct fm10k_mac_ops {
s32 (*stop_hw)(struct fm10k_hw *);
s32 (*get_bus_info)(struct fm10k_hw *);
s32 (*get_host_state)(struct fm10k_hw *, bool *);
+ s32 (*request_lport_map)(struct fm10k_hw *);
s32 (*update_vlan)(struct fm10k_hw *, u32, u8, bool);
s32 (*read_mac_addr)(struct fm10k_hw *);
s32 (*update_uc_addr)(struct fm10k_hw *, u16, const u8 *,
@@ -562,6 +563,7 @@ struct fm10k_mac_info {
bool tx_ready;
u32 dglort_map;
u8 itr_scale;
+ u64 reset_while_pending;
};
struct fm10k_swapi_table_info {
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
index 3b06685ea..337ba65a9 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
@@ -34,7 +34,7 @@ static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw)
/* we need to disable the queues before taking further steps */
err = fm10k_stop_hw_generic(hw);
- if (err)
+ if (err && err != FM10K_ERR_REQUESTS_PENDING)
return err;
/* If permanent address is set then we need to restore it */
@@ -67,7 +67,7 @@ static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw)
fm10k_write_reg(hw, FM10K_TDLEN(i), tdlen);
}
- return 0;
+ return err;
}
/**
@@ -83,7 +83,9 @@ static s32 fm10k_reset_hw_vf(struct fm10k_hw *hw)
/* shut down queues we own and reset DMA configuration */
err = fm10k_stop_hw_vf(hw);
- if (err)
+ if (err == FM10K_ERR_REQUESTS_PENDING)
+ hw->mac.reset_while_pending++;
+ else if (err)
return err;
/* Inititate VF reset */
@@ -96,9 +98,9 @@ static s32 fm10k_reset_hw_vf(struct fm10k_hw *hw)
/* Clear reset bit and verify it was cleared */
fm10k_write_reg(hw, FM10K_VFCTRL, 0);
if (fm10k_read_reg(hw, FM10K_VFCTRL) & FM10K_VFCTRL_RST)
- err = FM10K_ERR_RESET_FAILED;
+ return FM10K_ERR_RESET_FAILED;
- return err;
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 9c44739da..2a882916b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -283,6 +283,7 @@ struct i40e_pf {
#endif /* I40E_FCOE */
u16 num_lan_qps; /* num lan queues this PF has set up */
u16 num_lan_msix; /* num queue vectors for the base PF vsi */
+ u16 num_fdsb_msix; /* num queue vectors for sideband Fdir */
u16 num_iwarp_msix; /* num of iwarp vectors for this PF */
int iwarp_base_vector;
int queues_left; /* queues left unclaimed */
@@ -447,6 +448,14 @@ struct i40e_pf {
u16 phy_led_val;
};
+enum i40e_filter_state {
+ I40E_FILTER_INVALID = 0, /* Invalid state */
+ I40E_FILTER_NEW, /* New, not sent to FW yet */
+ I40E_FILTER_ACTIVE, /* Added to switch by FW */
+ I40E_FILTER_FAILED, /* Rejected by FW */
+ I40E_FILTER_REMOVE, /* To be removed */
+/* There is no 'removed' state; the filter struct is freed */
+};
struct i40e_mac_filter {
struct list_head list;
u8 macaddr[ETH_ALEN];
@@ -455,8 +464,7 @@ struct i40e_mac_filter {
u8 counter; /* number of instances of this filter */
bool is_vf; /* filter belongs to a VF */
bool is_netdev; /* filter belongs to a netdev */
- bool changed; /* filter needs to be sync'd to the HW */
- bool is_laa; /* filter is a Locally Administered Address */
+ enum i40e_filter_state state;
};
struct i40e_veb {
@@ -522,6 +530,9 @@ struct i40e_vsi {
struct i40e_ring **rx_rings;
struct i40e_ring **tx_rings;
+ u32 active_filters;
+ u32 promisc_threshold;
+
u16 work_limit;
u16 int_rate_limit; /* value in usecs */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index 0e6ac8413..618f18436 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -199,6 +199,7 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi)
void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi)
{
struct i40e_client_instance *cdev;
+ int ret = 0;
if (!vsi)
return;
@@ -211,7 +212,14 @@ void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi)
"Cannot locate client instance open routine\n");
continue;
}
- cdev->client->ops->open(&cdev->lan_info, cdev->client);
+ if (!(test_bit(__I40E_CLIENT_INSTANCE_OPENED,
+ &cdev->state))) {
+ ret = cdev->client->ops->open(&cdev->lan_info,
+ cdev->client);
+ if (!ret)
+ set_bit(__I40E_CLIENT_INSTANCE_OPENED,
+ &cdev->state);
+ }
}
}
mutex_unlock(&i40e_client_instance_mutex);
@@ -407,12 +415,14 @@ struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf,
* i40e_client_add_instance - add a client instance struct to the instance list
* @pf: pointer to the board struct
* @client: pointer to a client struct in the client list.
+ * @existing: if there was already an existing instance
*
- * Returns cdev ptr on success, NULL on failure
+ * Returns cdev ptr on success or if already exists, NULL on failure
**/
static
struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf,
- struct i40e_client *client)
+ struct i40e_client *client,
+ bool *existing)
{
struct i40e_client_instance *cdev;
struct netdev_hw_addr *mac = NULL;
@@ -421,7 +431,7 @@ struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf,
mutex_lock(&i40e_client_instance_mutex);
list_for_each_entry(cdev, &i40e_client_instances, list) {
if ((cdev->lan_info.pf == pf) && (cdev->client == client)) {
- cdev = NULL;
+ *existing = true;
goto out;
}
}
@@ -505,6 +515,7 @@ void i40e_client_subtask(struct i40e_pf *pf)
{
struct i40e_client_instance *cdev;
struct i40e_client *client;
+ bool existing = false;
int ret = 0;
if (!(pf->flags & I40E_FLAG_SERVICE_CLIENT_REQUESTED))
@@ -528,18 +539,25 @@ void i40e_client_subtask(struct i40e_pf *pf)
/* check if L2 VSI is up, if not we are not ready */
if (test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
continue;
+ } else {
+ dev_warn(&pf->pdev->dev, "This client %s is being instanciated at probe\n",
+ client->name);
}
/* Add the client instance to the instance list */
- cdev = i40e_client_add_instance(pf, client);
+ cdev = i40e_client_add_instance(pf, client, &existing);
if (!cdev)
continue;
- /* Also up the ref_cnt of no. of instances of this client */
- atomic_inc(&client->ref_cnt);
- dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x func=0x%02x\n",
- client->name, pf->hw.pf_id,
- pf->hw.bus.device, pf->hw.bus.func);
+ if (!existing) {
+ /* Also up the ref_cnt for no. of instances of this
+ * client.
+ */
+ atomic_inc(&client->ref_cnt);
+ dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x func=0x%02x\n",
+ client->name, pf->hw.pf_id,
+ pf->hw.bus.device, pf->hw.bus.func);
+ }
/* Send an Open request to the client */
atomic_inc(&cdev->ref_cnt);
@@ -588,7 +606,8 @@ int i40e_lan_add_device(struct i40e_pf *pf)
pf->hw.pf_id, pf->hw.bus.device, pf->hw.bus.func);
/* Since in some cases register may have happened before a device gets
- * added, we can schedule a subtask to go initiate the clients.
+ * added, we can schedule a subtask to go initiate the clients if
+ * they can be launched at probe time.
*/
pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
i40e_service_event_schedule(pf);
@@ -980,13 +999,13 @@ int i40e_unregister_client(struct i40e_client *client)
* a close for each of the client instances that were opened.
* client_release function is called to handle this.
*/
+ mutex_lock(&i40e_client_mutex);
if (!client || i40e_client_release(client)) {
ret = -EIO;
goto out;
}
/* TODO: check if device is in reset, or if that matters? */
- mutex_lock(&i40e_client_mutex);
if (!i40e_client_is_registered(client)) {
pr_info("i40e: Client %s has not been registered\n",
client->name);
@@ -1005,8 +1024,8 @@ int i40e_unregister_client(struct i40e_client *client)
client->name);
}
- mutex_unlock(&i40e_client_mutex);
out:
+ mutex_unlock(&i40e_client_mutex);
return ret;
}
EXPORT_SYMBOL(i40e_unregister_client);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 422b41d61..2154a34c1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -61,7 +61,6 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_1G_BASE_T_X722:
case I40E_DEV_ID_10G_BASE_T_X722:
case I40E_DEV_ID_SFP_I_X722:
- case I40E_DEV_ID_QSFP_I_X722:
hw->mac.type = I40E_MAC_X722;
break;
default:
@@ -297,13 +296,15 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
void *buffer, u16 buf_len)
{
struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
- u16 len = le16_to_cpu(aq_desc->datalen);
+ u16 len;
u8 *buf = (u8 *)buffer;
u16 i = 0;
if ((!(mask & hw->debug_mask)) || (desc == NULL))
return;
+ len = le16_to_cpu(aq_desc->datalen);
+
i40e_debug(hw, mask,
"AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
le16_to_cpu(aq_desc->opcode),
@@ -1967,6 +1968,62 @@ aq_add_vsi_exit:
}
/**
+ * i40e_aq_set_default_vsi
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)
+ &desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
+ cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
+ cmd->seid = cpu_to_le16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_clear_default_vsi
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)
+ &desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ cmd->promiscuous_flags = cpu_to_le16(0);
+ cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
+ cmd->seid = cpu_to_le16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_set_vsi_unicast_promiscuous
* @hw: pointer to the hw struct
* @seid: vsi number
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index e6af8c8d7..05cf9a719 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -116,6 +116,14 @@ static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer,
return len;
}
+static char *i40e_filter_state_string[] = {
+ "INVALID",
+ "NEW",
+ "ACTIVE",
+ "FAILED",
+ "REMOVE",
+};
+
/**
* i40e_dbg_dump_vsi_seid - handles dump vsi seid write into command datum
* @pf: the i40e_pf created in command write
@@ -160,10 +168,14 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
pf->hw.mac.port_addr);
list_for_each_entry(f, &vsi->mac_filter_list, list) {
dev_info(&pf->pdev->dev,
- " mac_filter_list: %pM vid=%d, is_netdev=%d is_vf=%d counter=%d\n",
+ " mac_filter_list: %pM vid=%d, is_netdev=%d is_vf=%d counter=%d, state %s\n",
f->macaddr, f->vlan, f->is_netdev, f->is_vf,
- f->counter);
+ f->counter, i40e_filter_state_string[f->state]);
}
+ dev_info(&pf->pdev->dev, " active_filters %d, promisc_threshold %d, overflow promisc %s\n",
+ vsi->active_filters, vsi->promisc_threshold,
+ (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state) ?
+ "ON" : "OFF"));
nstat = i40e_get_vsi_stats_struct(vsi);
dev_info(&pf->pdev->dev,
" net_stats: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h
index d701861c6..dd4457d29 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h
@@ -45,7 +45,6 @@
#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
#define I40E_DEV_ID_SFP_I_X722 0x37D3
-#define I40E_DEV_ID_QSFP_I_X722 0x37D4
#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
(d) == I40E_DEV_ID_QSFP_B || \
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 5e8d84ff7..c912e041d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -272,15 +272,16 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported,
u32 *advertising)
{
enum i40e_aq_capabilities_phy_type phy_types = pf->hw.phy.phy_types;
-
+ struct i40e_link_status *hw_link_info = &pf->hw.phy.link_info;
*supported = 0x0;
*advertising = 0x0;
if (phy_types & I40E_CAP_PHY_TYPE_SGMII) {
*supported |= SUPPORTED_Autoneg |
SUPPORTED_1000baseT_Full;
- *advertising |= ADVERTISED_Autoneg |
- ADVERTISED_1000baseT_Full;
+ *advertising |= ADVERTISED_Autoneg;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
+ *advertising |= ADVERTISED_1000baseT_Full;
if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) {
*supported |= SUPPORTED_100baseT_Full;
*advertising |= ADVERTISED_100baseT_Full;
@@ -299,8 +300,9 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported,
phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) {
*supported |= SUPPORTED_Autoneg |
SUPPORTED_10000baseT_Full;
- *advertising |= ADVERTISED_Autoneg |
- ADVERTISED_10000baseT_Full;
+ *advertising |= ADVERTISED_Autoneg;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+ *advertising |= ADVERTISED_10000baseT_Full;
}
if (phy_types & I40E_CAP_PHY_TYPE_XLAUI ||
phy_types & I40E_CAP_PHY_TYPE_XLPPI ||
@@ -310,15 +312,16 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported,
phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4) {
*supported |= SUPPORTED_Autoneg |
SUPPORTED_40000baseCR4_Full;
- *advertising |= ADVERTISED_Autoneg |
- ADVERTISED_40000baseCR4_Full;
+ *advertising |= ADVERTISED_Autoneg;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_40GB)
+ *advertising |= ADVERTISED_40000baseCR4_Full;
}
- if ((phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) &&
- !(phy_types & I40E_CAP_PHY_TYPE_1000BASE_T)) {
+ if (phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) {
*supported |= SUPPORTED_Autoneg |
SUPPORTED_100baseT_Full;
- *advertising |= ADVERTISED_Autoneg |
- ADVERTISED_100baseT_Full;
+ *advertising |= ADVERTISED_Autoneg;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
+ *advertising |= ADVERTISED_100baseT_Full;
}
if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T ||
phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX ||
@@ -326,8 +329,9 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported,
phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) {
*supported |= SUPPORTED_Autoneg |
SUPPORTED_1000baseT_Full;
- *advertising |= ADVERTISED_Autoneg |
- ADVERTISED_1000baseT_Full;
+ *advertising |= ADVERTISED_Autoneg;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
+ *advertising |= ADVERTISED_1000baseT_Full;
}
if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4)
*supported |= SUPPORTED_40000baseSR4_Full;
@@ -342,26 +346,30 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported,
if (phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) {
*supported |= SUPPORTED_20000baseKR2_Full |
SUPPORTED_Autoneg;
- *advertising |= ADVERTISED_20000baseKR2_Full |
- ADVERTISED_Autoneg;
+ *advertising |= ADVERTISED_Autoneg;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_20GB)
+ *advertising |= ADVERTISED_20000baseKR2_Full;
}
if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR) {
*supported |= SUPPORTED_10000baseKR_Full |
SUPPORTED_Autoneg;
- *advertising |= ADVERTISED_10000baseKR_Full |
- ADVERTISED_Autoneg;
+ *advertising |= ADVERTISED_Autoneg;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+ *advertising |= ADVERTISED_10000baseKR_Full;
}
if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) {
*supported |= SUPPORTED_10000baseKX4_Full |
SUPPORTED_Autoneg;
- *advertising |= ADVERTISED_10000baseKX4_Full |
- ADVERTISED_Autoneg;
+ *advertising |= ADVERTISED_Autoneg;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+ *advertising |= ADVERTISED_10000baseKX4_Full;
}
if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX) {
*supported |= SUPPORTED_1000baseKX_Full |
SUPPORTED_Autoneg;
- *advertising |= ADVERTISED_1000baseKX_Full |
- ADVERTISED_Autoneg;
+ *advertising |= ADVERTISED_Autoneg;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
+ *advertising |= ADVERTISED_1000baseKX_Full;
}
}
@@ -453,6 +461,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
case I40E_PHY_TYPE_10GBASE_SFPP_CU:
case I40E_PHY_TYPE_10GBASE_AOC:
ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->advertising = SUPPORTED_10000baseT_Full;
break;
case I40E_PHY_TYPE_SGMII:
ecmd->supported = SUPPORTED_Autoneg |
@@ -663,6 +672,7 @@ static int i40e_set_settings(struct net_device *netdev,
if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET &&
hw->phy.media_type != I40E_MEDIA_TYPE_FIBER &&
hw->phy.media_type != I40E_MEDIA_TYPE_BACKPLANE &&
+ hw->phy.media_type != I40E_MEDIA_TYPE_DA &&
hw->phy.link_info.link_info & I40E_AQ_LINK_UP)
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 501f15d9f..d0b3a1bb8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -31,12 +31,7 @@
/* Local includes */
#include "i40e.h"
#include "i40e_diag.h"
-#if IS_ENABLED(CONFIG_VXLAN)
-#include <net/vxlan.h>
-#endif
-#if IS_ENABLED(CONFIG_GENEVE)
-#include <net/geneve.h>
-#endif
+#include <net/udp_tunnel.h>
const char i40e_driver_name[] = "i40e";
static const char i40e_driver_string[] =
@@ -45,8 +40,8 @@ static const char i40e_driver_string[] =
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 1
-#define DRV_VERSION_MINOR 5
-#define DRV_VERSION_BUILD 16
+#define DRV_VERSION_MINOR 6
+#define DRV_VERSION_BUILD 11
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -91,7 +86,6 @@ static const struct pci_device_id i40e_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
- {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_I_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
/* required last entry */
@@ -1280,8 +1274,9 @@ int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr,
(is_vf == f->is_vf) &&
(is_netdev == f->is_netdev)) {
f->counter--;
- f->changed = true;
changed = 1;
+ if (f->counter == 0)
+ f->state = I40E_FILTER_REMOVE;
}
}
if (changed) {
@@ -1297,29 +1292,32 @@ int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr,
* @vsi: the PF Main VSI - inappropriate for any other VSI
* @macaddr: the MAC address
*
- * Some older firmware configurations set up a default promiscuous VLAN
- * filter that needs to be removed.
+ * Remove whatever filter the firmware set up so the driver can manage
+ * its own filtering intelligently.
**/
-static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
+static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
{
struct i40e_aqc_remove_macvlan_element_data element;
struct i40e_pf *pf = vsi->back;
- i40e_status ret;
/* Only appropriate for the PF main VSI */
if (vsi->type != I40E_VSI_MAIN)
- return -EINVAL;
+ return;
memset(&element, 0, sizeof(element));
ether_addr_copy(element.mac_addr, macaddr);
element.vlan_tag = 0;
- element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
- I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
- ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
- if (ret)
- return -ENOENT;
+ /* Ignore error returns, some firmware does it this way... */
+ element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
+ i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
- return 0;
+ memset(&element, 0, sizeof(element));
+ ether_addr_copy(element.mac_addr, macaddr);
+ element.vlan_tag = 0;
+ /* ...and some firmware does it this way. */
+ element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
+ I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
+ i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
}
/**
@@ -1340,6 +1338,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
bool is_vf, bool is_netdev)
{
struct i40e_mac_filter *f;
+ int changed = false;
if (!vsi || !macaddr)
return NULL;
@@ -1359,8 +1358,15 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
ether_addr_copy(f->macaddr, macaddr);
f->vlan = vlan;
- f->changed = true;
-
+ /* If we're in overflow promisc mode, set the state directly
+ * to failed, so we don't bother to try sending the filter
+ * to the hardware.
+ */
+ if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state))
+ f->state = I40E_FILTER_FAILED;
+ else
+ f->state = I40E_FILTER_NEW;
+ changed = true;
INIT_LIST_HEAD(&f->list);
list_add_tail(&f->list, &vsi->mac_filter_list);
}
@@ -1380,10 +1386,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
f->counter++;
}
- /* changed tells sync_filters_subtask to
- * push the filter down to the firmware
- */
- if (f->changed) {
+ if (changed) {
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
}
@@ -1402,6 +1405,9 @@ add_filter_out:
*
* NOTE: This function is expected to be called with mac_filter_list_lock
* being held.
+ * ANOTHER NOTE: This function MUST be called from within the context of
+ * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
+ * instead of list_for_each_entry().
**/
void i40e_del_filter(struct i40e_vsi *vsi,
u8 *macaddr, s16 vlan,
@@ -1441,9 +1447,18 @@ void i40e_del_filter(struct i40e_vsi *vsi,
* remove the filter from the firmware's list
*/
if (f->counter == 0) {
- f->changed = true;
- vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
- vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
+ if ((f->state == I40E_FILTER_FAILED) ||
+ (f->state == I40E_FILTER_NEW)) {
+ /* this one never got added by the FW. Just remove it,
+ * no need to sync anything.
+ */
+ list_del(&f->list);
+ kfree(f);
+ } else {
+ f->state = I40E_FILTER_REMOVE;
+ vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+ vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
+ }
}
}
@@ -1465,7 +1480,6 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
struct sockaddr *addr = p;
- struct i40e_mac_filter *f;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
@@ -1486,52 +1500,23 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
else
netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
+ spin_lock_bh(&vsi->mac_filter_list_lock);
+ i40e_del_mac_all_vlan(vsi, netdev->dev_addr, false, true);
+ i40e_put_mac_in_vlan(vsi, addr->sa_data, false, true);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
+ ether_addr_copy(netdev->dev_addr, addr->sa_data);
if (vsi->type == I40E_VSI_MAIN) {
i40e_status ret;
ret = i40e_aq_mac_address_write(&vsi->back->hw,
I40E_AQC_WRITE_TYPE_LAA_WOL,
addr->sa_data, NULL);
- if (ret) {
- netdev_info(netdev,
- "Addr change for Main VSI failed: %d\n",
- ret);
- return -EADDRNOTAVAIL;
- }
- }
-
- if (ether_addr_equal(netdev->dev_addr, hw->mac.addr)) {
- struct i40e_aqc_remove_macvlan_element_data element;
-
- memset(&element, 0, sizeof(element));
- ether_addr_copy(element.mac_addr, netdev->dev_addr);
- element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
- i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
- } else {
- spin_lock_bh(&vsi->mac_filter_list_lock);
- i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
- false, false);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
- }
-
- if (ether_addr_equal(addr->sa_data, hw->mac.addr)) {
- struct i40e_aqc_add_macvlan_element_data element;
-
- memset(&element, 0, sizeof(element));
- ether_addr_copy(element.mac_addr, hw->mac.addr);
- element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
- i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
- } else {
- spin_lock_bh(&vsi->mac_filter_list_lock);
- f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY,
- false, false);
- if (f)
- f->is_laa = true;
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ if (ret)
+ netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
}
- ether_addr_copy(netdev->dev_addr, addr->sa_data);
-
/* schedule our worker thread which will take care of
* applying the new filter changes
*/
@@ -1591,14 +1576,8 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
vsi->tc_config.numtc = numtc;
vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
/* Number of queues per enabled TC */
- /* In MFP case we can have a much lower count of MSIx
- * vectors available and so we need to lower the used
- * q count.
- */
- if (pf->flags & I40E_FLAG_MSIX_ENABLED)
- qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
- else
- qcount = vsi->alloc_queue_pairs;
+ qcount = vsi->alloc_queue_pairs;
+
num_tc_qps = qcount / numtc;
num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
@@ -1768,28 +1747,6 @@ bottom_of_search_loop:
}
/**
- * i40e_mac_filter_entry_clone - Clones a MAC filter entry
- * @src: source MAC filter entry to be clones
- *
- * Returns the pointer to newly cloned MAC filter entry or NULL
- * in case of error
- **/
-static struct i40e_mac_filter *i40e_mac_filter_entry_clone(
- struct i40e_mac_filter *src)
-{
- struct i40e_mac_filter *f;
-
- f = kzalloc(sizeof(*f), GFP_ATOMIC);
- if (!f)
- return NULL;
- *f = *src;
-
- INIT_LIST_HEAD(&f->list);
-
- return f;
-}
-
-/**
* i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
* @vsi: pointer to vsi struct
* @from: Pointer to list which contains MAC filter entries - changes to
@@ -1803,41 +1760,61 @@ static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
struct i40e_mac_filter *f, *ftmp;
list_for_each_entry_safe(f, ftmp, from, list) {
- f->changed = true;
/* Move the element back into MAC filter list*/
list_move_tail(&f->list, &vsi->mac_filter_list);
}
}
/**
- * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
- * @vsi: pointer to vsi struct
+ * i40e_update_filter_state - Update filter state based on return data
+ * from firmware
+ * @count: Number of filters added
+ * @add_list: return data from fw
+ * @head: pointer to first filter in current batch
+ * @aq_err: status from fw
*
- * MAC filter entries from list were slated to be added from device.
+ * MAC filter entries from list were slated to be added to device. Returns
+ * number of successful filters. Note that 0 does NOT mean success!
**/
-static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi)
+static int
+i40e_update_filter_state(int count,
+ struct i40e_aqc_add_macvlan_element_data *add_list,
+ struct i40e_mac_filter *add_head, int aq_err)
{
- struct i40e_mac_filter *f, *ftmp;
-
- list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
- if (!f->changed && f->counter)
- f->changed = true;
- }
-}
+ int retval = 0;
+ int i;
-/**
- * i40e_cleanup_add_list - Deletes the element from add list and release
- * memory
- * @add_list: Pointer to list which contains MAC filter entries
- **/
-static void i40e_cleanup_add_list(struct list_head *add_list)
-{
- struct i40e_mac_filter *f, *ftmp;
- list_for_each_entry_safe(f, ftmp, add_list, list) {
- list_del(&f->list);
- kfree(f);
+ if (!aq_err) {
+ retval = count;
+ /* Everything's good, mark all filters active. */
+ for (i = 0; i < count ; i++) {
+ add_head->state = I40E_FILTER_ACTIVE;
+ add_head = list_next_entry(add_head, list);
+ }
+ } else if (aq_err == I40E_AQ_RC_ENOSPC) {
+ /* Device ran out of filter space. Check the return value
+ * for each filter to see which ones are active.
+ */
+ for (i = 0; i < count ; i++) {
+ if (add_list[i].match_method ==
+ I40E_AQC_MM_ERR_NO_RES) {
+ add_head->state = I40E_FILTER_FAILED;
+ } else {
+ add_head->state = I40E_FILTER_ACTIVE;
+ retval++;
+ }
+ add_head = list_next_entry(add_head, list);
+ }
+ } else {
+ /* Some other horrible thing happened, fail all filters */
+ retval = 0;
+ for (i = 0; i < count ; i++) {
+ add_head->state = I40E_FILTER_FAILED;
+ add_head = list_next_entry(add_head, list);
+ }
}
+ return retval;
}
/**
@@ -1850,20 +1827,22 @@ static void i40e_cleanup_add_list(struct list_head *add_list)
**/
int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
{
- struct list_head tmp_del_list, tmp_add_list;
- struct i40e_mac_filter *f, *ftmp, *fclone;
- bool promisc_forced_on = false;
- bool add_happened = false;
+ struct i40e_mac_filter *f, *ftmp, *add_head = NULL;
+ struct list_head tmp_add_list, tmp_del_list;
+ struct i40e_hw *hw = &vsi->back->hw;
+ bool promisc_changed = false;
+ char vsi_name[16] = "PF";
int filter_list_len = 0;
u32 changed_flags = 0;
i40e_status aq_ret = 0;
- bool err_cond = false;
int retval = 0;
struct i40e_pf *pf;
int num_add = 0;
int num_del = 0;
int aq_err = 0;
u16 cmd_flags;
+ int list_size;
+ int fcnt;
/* empty array typed pointers, kcalloc later */
struct i40e_aqc_add_macvlan_element_data *add_list;
@@ -1878,72 +1857,46 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
vsi->current_netdev_flags = vsi->netdev->flags;
}
- INIT_LIST_HEAD(&tmp_del_list);
INIT_LIST_HEAD(&tmp_add_list);
+ INIT_LIST_HEAD(&tmp_del_list);
+
+ if (vsi->type == I40E_VSI_SRIOV)
+ snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
+ else if (vsi->type != I40E_VSI_MAIN)
+ snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
spin_lock_bh(&vsi->mac_filter_list_lock);
+ /* Create a list of filters to delete. */
list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
- if (!f->changed)
- continue;
-
- if (f->counter != 0)
- continue;
- f->changed = false;
-
- /* Move the element into temporary del_list */
- list_move_tail(&f->list, &tmp_del_list);
- }
-
- list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
- if (!f->changed)
- continue;
-
- if (f->counter == 0)
- continue;
- f->changed = false;
-
- /* Clone MAC filter entry and add into temporary list */
- fclone = i40e_mac_filter_entry_clone(f);
- if (!fclone) {
- err_cond = true;
- break;
+ if (f->state == I40E_FILTER_REMOVE) {
+ WARN_ON(f->counter != 0);
+ /* Move the element into temporary del_list */
+ list_move_tail(&f->list, &tmp_del_list);
+ vsi->active_filters--;
+ }
+ if (f->state == I40E_FILTER_NEW) {
+ WARN_ON(f->counter == 0);
+ /* Move the element into temporary add_list */
+ list_move_tail(&f->list, &tmp_add_list);
}
- list_add_tail(&fclone->list, &tmp_add_list);
- }
-
- /* if failed to clone MAC filter entry - undo */
- if (err_cond) {
- i40e_undo_del_filter_entries(vsi, &tmp_del_list);
- i40e_undo_add_filter_entries(vsi);
}
spin_unlock_bh(&vsi->mac_filter_list_lock);
-
- if (err_cond) {
- i40e_cleanup_add_list(&tmp_add_list);
- retval = -ENOMEM;
- goto out;
- }
}
/* Now process 'del_list' outside the lock */
if (!list_empty(&tmp_del_list)) {
- int del_list_size;
-
- filter_list_len = pf->hw.aq.asq_buf_size /
+ filter_list_len = hw->aq.asq_buf_size /
sizeof(struct i40e_aqc_remove_macvlan_element_data);
- del_list_size = filter_list_len *
+ list_size = filter_list_len *
sizeof(struct i40e_aqc_remove_macvlan_element_data);
- del_list = kzalloc(del_list_size, GFP_ATOMIC);
+ del_list = kzalloc(list_size, GFP_ATOMIC);
if (!del_list) {
- i40e_cleanup_add_list(&tmp_add_list);
-
/* Undo VSI's MAC filter entry element updates */
spin_lock_bh(&vsi->mac_filter_list_lock);
i40e_undo_del_filter_entries(vsi, &tmp_del_list);
- i40e_undo_add_filter_entries(vsi);
spin_unlock_bh(&vsi->mac_filter_list_lock);
retval = -ENOMEM;
goto out;
@@ -1954,9 +1907,13 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
/* add to delete list */
ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
- del_list[num_del].vlan_tag =
- cpu_to_le16((u16)(f->vlan ==
- I40E_VLAN_ANY ? 0 : f->vlan));
+ if (f->vlan == I40E_VLAN_ANY) {
+ del_list[num_del].vlan_tag = 0;
+ cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
+ } else {
+ del_list[num_del].vlan_tag =
+ cpu_to_le16((u16)(f->vlan));
+ }
cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
del_list[num_del].flags = cmd_flags;
@@ -1964,21 +1921,23 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
/* flush a full buffer */
if (num_del == filter_list_len) {
- aq_ret = i40e_aq_remove_macvlan(&pf->hw,
- vsi->seid,
+ aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid,
del_list,
- num_del,
- NULL);
- aq_err = pf->hw.aq.asq_last_status;
+ num_del, NULL);
+ aq_err = hw->aq.asq_last_status;
num_del = 0;
- memset(del_list, 0, del_list_size);
+ memset(del_list, 0, list_size);
- if (aq_ret && aq_err != I40E_AQ_RC_ENOENT) {
+ /* Explicitly ignore and do not report when
+ * firmware returns ENOENT.
+ */
+ if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
retval = -EIO;
- dev_err(&pf->pdev->dev,
- "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n",
- i40e_stat_str(&pf->hw, aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
+ dev_info(&pf->pdev->dev,
+ "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
+ vsi_name,
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw, aq_err));
}
}
/* Release memory for MAC filter entries which were
@@ -1989,17 +1948,22 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
}
if (num_del) {
- aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
- del_list, num_del,
- NULL);
- aq_err = pf->hw.aq.asq_last_status;
+ aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, del_list,
+ num_del, NULL);
+ aq_err = hw->aq.asq_last_status;
num_del = 0;
- if (aq_ret && aq_err != I40E_AQ_RC_ENOENT)
+ /* Explicitly ignore and do not report when firmware
+ * returns ENOENT.
+ */
+ if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
+ retval = -EIO;
dev_info(&pf->pdev->dev,
- "ignoring delete macvlan error, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
+ "ignoring delete macvlan error on %s, err %s aq_err %s\n",
+ vsi_name,
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw, aq_err));
+ }
}
kfree(del_list);
@@ -2007,84 +1971,117 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
}
if (!list_empty(&tmp_add_list)) {
- int add_list_size;
-
- /* do all the adds now */
- filter_list_len = pf->hw.aq.asq_buf_size /
- sizeof(struct i40e_aqc_add_macvlan_element_data),
- add_list_size = filter_list_len *
+ /* Do all the adds now. */
+ filter_list_len = hw->aq.asq_buf_size /
+ sizeof(struct i40e_aqc_add_macvlan_element_data);
+ list_size = filter_list_len *
sizeof(struct i40e_aqc_add_macvlan_element_data);
- add_list = kzalloc(add_list_size, GFP_ATOMIC);
+ add_list = kzalloc(list_size, GFP_ATOMIC);
if (!add_list) {
- /* Purge element from temporary lists */
- i40e_cleanup_add_list(&tmp_add_list);
-
- /* Undo add filter entries from VSI MAC filter list */
- spin_lock_bh(&vsi->mac_filter_list_lock);
- i40e_undo_add_filter_entries(vsi);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
retval = -ENOMEM;
goto out;
}
-
- list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) {
-
- add_happened = true;
- cmd_flags = 0;
-
+ num_add = 0;
+ list_for_each_entry(f, &tmp_add_list, list) {
+ if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
+ &vsi->state)) {
+ f->state = I40E_FILTER_FAILED;
+ continue;
+ }
/* add to add array */
+ if (num_add == 0)
+ add_head = f;
+ cmd_flags = 0;
ether_addr_copy(add_list[num_add].mac_addr, f->macaddr);
- add_list[num_add].vlan_tag =
- cpu_to_le16(
- (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan));
+ if (f->vlan == I40E_VLAN_ANY) {
+ add_list[num_add].vlan_tag = 0;
+ cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
+ } else {
+ add_list[num_add].vlan_tag =
+ cpu_to_le16((u16)(f->vlan));
+ }
add_list[num_add].queue_number = 0;
-
cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
add_list[num_add].flags = cpu_to_le16(cmd_flags);
num_add++;
/* flush a full buffer */
if (num_add == filter_list_len) {
- aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
+ aq_ret = i40e_aq_add_macvlan(hw, vsi->seid,
add_list, num_add,
NULL);
- aq_err = pf->hw.aq.asq_last_status;
+ aq_err = hw->aq.asq_last_status;
+ fcnt = i40e_update_filter_state(num_add,
+ add_list,
+ add_head,
+ aq_ret);
+ vsi->active_filters += fcnt;
+
+ if (fcnt != num_add) {
+ promisc_changed = true;
+ set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
+ &vsi->state);
+ vsi->promisc_threshold =
+ (vsi->active_filters * 3) / 4;
+ dev_warn(&pf->pdev->dev,
+ "Error %s adding RX filters on %s, promiscuous mode forced on\n",
+ i40e_aq_str(hw, aq_err),
+ vsi_name);
+ }
+ memset(add_list, 0, list_size);
num_add = 0;
-
- if (aq_ret)
- break;
- memset(add_list, 0, add_list_size);
}
- /* Entries from tmp_add_list were cloned from MAC
- * filter list, hence clean those cloned entries
- */
- list_del(&f->list);
- kfree(f);
}
-
if (num_add) {
- aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
+ aq_ret = i40e_aq_add_macvlan(hw, vsi->seid,
add_list, num_add, NULL);
- aq_err = pf->hw.aq.asq_last_status;
- num_add = 0;
+ aq_err = hw->aq.asq_last_status;
+ fcnt = i40e_update_filter_state(num_add, add_list,
+ add_head, aq_ret);
+ vsi->active_filters += fcnt;
+ if (fcnt != num_add) {
+ promisc_changed = true;
+ set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
+ &vsi->state);
+ vsi->promisc_threshold =
+ (vsi->active_filters * 3) / 4;
+ dev_warn(&pf->pdev->dev,
+ "Error %s adding RX filters on %s, promiscuous mode forced on\n",
+ i40e_aq_str(hw, aq_err), vsi_name);
+ }
}
+ /* Now move all of the filters from the temp add list back to
+ * the VSI's list.
+ */
+ spin_lock_bh(&vsi->mac_filter_list_lock);
+ list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) {
+ list_move_tail(&f->list, &vsi->mac_filter_list);
+ }
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
kfree(add_list);
add_list = NULL;
+ }
- if (add_happened && aq_ret && aq_err != I40E_AQ_RC_EINVAL) {
- retval = i40e_aq_rc_to_posix(aq_ret, aq_err);
+ /* Check to see if we can drop out of overflow promiscuous mode. */
+ if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state) &&
+ (vsi->active_filters < vsi->promisc_threshold)) {
+ int failed_count = 0;
+ /* See if we have any failed filters. We can't drop out of
+ * promiscuous until these have all been deleted.
+ */
+ spin_lock_bh(&vsi->mac_filter_list_lock);
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ if (f->state == I40E_FILTER_FAILED)
+ failed_count++;
+ }
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
+ if (!failed_count) {
dev_info(&pf->pdev->dev,
- "add filter failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
- if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
- !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
- &vsi->state)) {
- promisc_forced_on = true;
- set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
- &vsi->state);
- dev_info(&pf->pdev->dev, "promiscuous mode forced on\n");
- }
+ "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
+ vsi_name);
+ clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
+ promisc_changed = true;
+ vsi->promisc_threshold = 0;
}
}
@@ -2105,15 +2102,17 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
NULL);
if (aq_ret) {
retval = i40e_aq_rc_to_posix(aq_ret,
- pf->hw.aq.asq_last_status);
+ hw->aq.asq_last_status);
dev_info(&pf->pdev->dev,
- "set multi promisc failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, aq_ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ "set multi promisc failed on %s, err %s aq_err %s\n",
+ vsi_name,
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
}
}
- if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
+ if ((changed_flags & IFF_PROMISC) ||
+ (promisc_changed &&
+ test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state))) {
bool cur_promisc;
cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
@@ -2129,35 +2128,72 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
*/
if (pf->cur_promisc != cur_promisc) {
pf->cur_promisc = cur_promisc;
- set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ if (cur_promisc)
+ aq_ret =
+ i40e_aq_set_default_vsi(hw,
+ vsi->seid,
+ NULL);
+ else
+ aq_ret =
+ i40e_aq_clear_default_vsi(hw,
+ vsi->seid,
+ NULL);
+ if (aq_ret) {
+ retval = i40e_aq_rc_to_posix(aq_ret,
+ hw->aq.asq_last_status);
+ dev_info(&pf->pdev->dev,
+ "Set default VSI failed on %s, err %s, aq_err %s\n",
+ vsi_name,
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw,
+ hw->aq.asq_last_status));
+ }
}
} else {
aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
- &vsi->back->hw,
+ hw,
vsi->seid,
cur_promisc, NULL,
true);
if (aq_ret) {
retval =
i40e_aq_rc_to_posix(aq_ret,
- pf->hw.aq.asq_last_status);
+ hw->aq.asq_last_status);
dev_info(&pf->pdev->dev,
- "set unicast promisc failed, err %d, aq_err %d\n",
- aq_ret, pf->hw.aq.asq_last_status);
+ "set unicast promisc failed on %s, err %s, aq_err %s\n",
+ vsi_name,
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw,
+ hw->aq.asq_last_status));
}
aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
- &vsi->back->hw,
+ hw,
vsi->seid,
cur_promisc, NULL);
if (aq_ret) {
retval =
i40e_aq_rc_to_posix(aq_ret,
- pf->hw.aq.asq_last_status);
+ hw->aq.asq_last_status);
dev_info(&pf->pdev->dev,
- "set multicast promisc failed, err %d, aq_err %d\n",
- aq_ret, pf->hw.aq.asq_last_status);
+ "set multicast promisc failed on %s, err %s, aq_err %s\n",
+ vsi_name,
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw,
+ hw->aq.asq_last_status));
}
}
+ aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
+ vsi->seid,
+ cur_promisc, NULL);
+ if (aq_ret) {
+ retval = i40e_aq_rc_to_posix(aq_ret,
+ pf->hw.aq.asq_last_status);
+ dev_info(&pf->pdev->dev,
+ "set brdcast promisc failed, err %s, aq_err %s\n",
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw,
+ hw->aq.asq_last_status));
+ }
}
out:
/* if something went wrong then set the changed flag so we try again */
@@ -2325,7 +2361,7 @@ static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
**/
int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
{
- struct i40e_mac_filter *f, *add_f;
+ struct i40e_mac_filter *f, *ftmp, *add_f;
bool is_netdev, is_vf;
is_vf = (vsi->type == I40E_VSI_SRIOV);
@@ -2346,7 +2382,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
}
}
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
if (!add_f) {
dev_info(&vsi->back->pdev->dev,
@@ -2360,7 +2396,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
/* Now if we add a vlan tag, make sure to check if it is the first
* tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
* with 0, so we now accept untagged and specified tagged traffic
- * (and not any taged and untagged)
+ * (and not all tags along with untagged)
*/
if (vid > 0) {
if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr,
@@ -2382,7 +2418,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
/* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
if (vid > 0 && !vsi->info.pvid) {
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
if (!i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
is_vf, is_netdev))
continue;
@@ -2419,7 +2455,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
{
struct net_device *netdev = vsi->netdev;
- struct i40e_mac_filter *f, *add_f;
+ struct i40e_mac_filter *f, *ftmp, *add_f;
bool is_vf, is_netdev;
int filter_count = 0;
@@ -2432,7 +2468,7 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
if (is_netdev)
i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
- list_for_each_entry(f, &vsi->mac_filter_list, list)
+ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
/* go through all the filters for this VSI and if there is only
@@ -2465,7 +2501,7 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
}
if (!filter_count) {
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
is_vf, is_netdev);
@@ -2510,8 +2546,6 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev,
if (vid > 4095)
return -EINVAL;
- netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid);
-
/* If the network stack called us with vid = 0 then
* it is asking to receive priority tagged packets with
* vlan id 0. Our HW receives them by default when configured
@@ -2545,8 +2579,6 @@ static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
- netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid);
-
/* return code is ignored as there is nothing a user
* can do about failure to remove and a log message was
* already printed from the other function
@@ -2559,6 +2591,44 @@ static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
}
/**
+ * i40e_macaddr_init - explicitly write the mac address filters
+ *
+ * @vsi: pointer to the vsi
+ * @macaddr: the MAC address
+ *
+ * This is needed when the macaddr has been obtained by other
+ * means than the default, e.g., from Open Firmware or IDPROM.
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_macaddr_init(struct i40e_vsi *vsi, u8 *macaddr)
+{
+ int ret;
+ struct i40e_aqc_add_macvlan_element_data element;
+
+ ret = i40e_aq_mac_address_write(&vsi->back->hw,
+ I40E_AQC_WRITE_TYPE_LAA_WOL,
+ macaddr, NULL);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "Addr change for VSI failed: %d\n", ret);
+ return -EADDRNOTAVAIL;
+ }
+
+ memset(&element, 0, sizeof(element));
+ ether_addr_copy(element.mac_addr, macaddr);
+ element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
+ ret = i40e_aq_add_macvlan(&vsi->back->hw, vsi->seid, &element, 1, NULL);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "add filter failed err %s aq_err %s\n",
+ i40e_stat_str(&vsi->back->hw, ret),
+ i40e_aq_str(&vsi->back->hw,
+ vsi->back->hw.aq.asq_last_status));
+ }
+ return ret;
+}
+
+/**
* i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
* @vsi: the vsi being brought back up
**/
@@ -3004,8 +3074,19 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
**/
static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
{
+ struct i40e_pf *pf = vsi->back;
+ int err;
+
if (vsi->netdev)
i40e_set_rx_mode(vsi->netdev);
+
+ if (!!(pf->flags & I40E_FLAG_PF_MAC)) {
+ err = i40e_macaddr_init(vsi, pf->hw.mac.addr);
+ if (err) {
+ dev_warn(&pf->pdev->dev,
+ "could not set up macaddr; err %d\n", err);
+ }
+ }
}
/**
@@ -3947,6 +4028,7 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
/* clear the affinity_mask in the IRQ descriptor */
irq_set_affinity_hint(pf->msix_entries[vector].vector,
NULL);
+ synchronize_irq(pf->msix_entries[vector].vector);
free_irq(pf->msix_entries[vector].vector,
vsi->q_vectors[i]);
@@ -4472,23 +4554,38 @@ static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
**/
static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
{
+ int i, tc_unused = 0;
u8 num_tc = 0;
- int i;
+ u8 ret = 0;
/* Scan the ETS Config Priority Table to find
* traffic class enabled for a given priority
- * and use the traffic class index to get the
- * number of traffic classes enabled
+ * and create a bitmask of enabled TCs
*/
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- if (dcbcfg->etscfg.prioritytable[i] > num_tc)
- num_tc = dcbcfg->etscfg.prioritytable[i];
- }
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
+ num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
- /* Traffic class index starts from zero so
- * increment to return the actual count
+ /* Now scan the bitmask to check for
+ * contiguous TCs starting with TC0
*/
- return num_tc + 1;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (num_tc & BIT(i)) {
+ if (!tc_unused) {
+ ret++;
+ } else {
+ pr_err("Non-contiguous TC - Disabling DCB\n");
+ return 1;
+ }
+ } else {
+ tc_unused = 1;
+ }
+ }
+
+ /* There is always at least TC0 */
+ if (!ret)
+ ret = 1;
+
+ return ret;
}
/**
@@ -4953,7 +5050,6 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
if (pf->vsi[v]->netdev)
i40e_dcbnl_set_all(pf->vsi[v]);
}
- i40e_notify_client_of_l2_param_changes(pf->vsi[v]);
}
}
@@ -5017,9 +5113,13 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
DCB_CAP_DCBX_VER_IEEE;
pf->flags |= I40E_FLAG_DCB_CAPABLE;
- /* Enable DCB tagging only when more than one TC */
+ /* Enable DCB tagging only when more than one TC
+ * or explicitly disable if only one TC
+ */
if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
pf->flags |= I40E_FLAG_DCB_ENABLED;
+ else
+ pf->flags &= ~I40E_FLAG_DCB_ENABLED;
dev_dbg(&pf->pdev->dev,
"DCBX offload is supported for this PF.\n");
}
@@ -5178,12 +5278,6 @@ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
usleep_range(1000, 2000);
i40e_down(vsi);
- /* Give a VF some time to respond to the reset. The
- * two second wait is based upon the watchdog cycle in
- * the VF driver.
- */
- if (vsi->type == I40E_VSI_SRIOV)
- msleep(2000);
i40e_up(vsi);
clear_bit(__I40E_CONFIG_BUSY, &pf->state);
}
@@ -5226,6 +5320,9 @@ void i40e_down(struct i40e_vsi *vsi)
i40e_clean_tx_ring(vsi->tx_rings[i]);
i40e_clean_rx_ring(vsi->rx_rings[i]);
}
+
+ i40e_notify_client_of_netdev_close(vsi, false);
+
}
/**
@@ -5337,15 +5434,7 @@ int i40e_open(struct net_device *netdev)
TCP_FLAG_CWR) >> 16);
wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
-#ifdef CONFIG_I40E_VXLAN
- vxlan_get_rx_port(netdev);
-#endif
-#ifdef CONFIG_I40E_GENEVE
- if (pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE)
- geneve_get_rx_port(netdev);
-#endif
-
- i40e_notify_client_of_netdev_open(vsi);
+ udp_tunnel_get_rx_info(netdev);
return 0;
}
@@ -5631,7 +5720,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
u8 type;
/* Not DCB capable or capability disabled */
- if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
+ if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
return ret;
/* Ignore if event is not for Nearest Bridge */
@@ -5711,6 +5800,8 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
i40e_service_event_schedule(pf);
} else {
i40e_pf_unquiesce_all_vsi(pf);
+ /* Notify the client for the DCB changes */
+ i40e_notify_client_of_l2_param_changes(pf->vsi[pf->lan_vsi]);
}
exit:
@@ -5935,7 +6026,6 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
}
-
}
/**
@@ -7052,7 +7142,6 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
**/
static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
{
-#if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE)
struct i40e_hw *hw = &pf->hw;
i40e_status ret;
__be16 port;
@@ -7087,7 +7176,6 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
}
}
}
-#endif
}
/**
@@ -7169,7 +7257,7 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
vsi->alloc_queue_pairs = 1;
vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
I40E_REQ_DESCRIPTOR_MULTIPLE);
- vsi->num_q_vectors = 1;
+ vsi->num_q_vectors = pf->num_fdsb_msix;
break;
case I40E_VSI_VMDQ2:
@@ -7553,9 +7641,11 @@ static int i40e_init_msix(struct i40e_pf *pf)
/* reserve one vector for sideband flow director */
if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
if (vectors_left) {
+ pf->num_fdsb_msix = 1;
v_budget++;
vectors_left--;
} else {
+ pf->num_fdsb_msix = 0;
pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
}
}
@@ -7810,6 +7900,7 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
#endif
I40E_FLAG_RSS_ENABLED |
I40E_FLAG_DCB_CAPABLE |
+ I40E_FLAG_DCB_ENABLED |
I40E_FLAG_SRIOV_ENABLED |
I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED |
@@ -7906,7 +7997,6 @@ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
u8 *rss_lut;
int ret, i;
- memset(&rss_key, 0, sizeof(rss_key));
memcpy(&rss_key, seed, sizeof(rss_key));
rss_lut = kzalloc(pf->rss_table_size, GFP_KERNEL);
@@ -8580,7 +8670,9 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
/* Enable filters and mark for reset */
if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
need_reset = true;
- pf->flags |= I40E_FLAG_FD_SB_ENABLED;
+ /* enable FD_SB only if there is MSI-X vector */
+ if (pf->num_fdsb_msix > 0)
+ pf->flags |= I40E_FLAG_FD_SB_ENABLED;
} else {
/* turn off filters, mark for reset and clear SW filter list */
if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
@@ -8629,7 +8721,6 @@ static int i40e_set_features(struct net_device *netdev,
return 0;
}
-#if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE)
/**
* i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port
* @pf: board private structure
@@ -8649,21 +8740,18 @@ static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port)
return i;
}
-#endif
-
-#if IS_ENABLED(CONFIG_VXLAN)
/**
- * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up
+ * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
* @netdev: This physical port's netdev
- * @sa_family: Socket Family that VXLAN is notifying us about
- * @port: New UDP port number that VXLAN started listening to
+ * @ti: Tunnel endpoint information
**/
-static void i40e_add_vxlan_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
+static void i40e_udp_tunnel_add(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
+ __be16 port = ti->port;
u8 next_idx;
u8 idx;
@@ -8671,7 +8759,7 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
/* Check if port already exists */
if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
- netdev_info(netdev, "vxlan port %d already offloaded\n",
+ netdev_info(netdev, "port %d already offloaded\n",
ntohs(port));
return;
}
@@ -8680,131 +8768,75 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
next_idx = i40e_get_udp_port_idx(pf, 0);
if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
- netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n",
+ netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
ntohs(port));
return;
}
- /* New port: add it and mark its index in the bitmap */
- pf->udp_ports[next_idx].index = port;
- pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
- pf->pending_udp_bitmap |= BIT_ULL(next_idx);
- pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
-}
-
-/**
- * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away
- * @netdev: This physical port's netdev
- * @sa_family: Socket Family that VXLAN is notifying us about
- * @port: UDP port number that VXLAN stopped listening to
- **/
-static void i40e_del_vxlan_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
-{
- struct i40e_netdev_priv *np = netdev_priv(netdev);
- struct i40e_vsi *vsi = np->vsi;
- struct i40e_pf *pf = vsi->back;
- u8 idx;
-
- idx = i40e_get_udp_port_idx(pf, port);
-
- /* Check if port already exists */
- if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
- /* if port exists, set it to 0 (mark for deletion)
- * and make it pending
- */
- pf->udp_ports[idx].index = 0;
- pf->pending_udp_bitmap |= BIT_ULL(idx);
- pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
- } else {
- netdev_warn(netdev, "vxlan port %d was not found, not deleting\n",
- ntohs(port));
- }
-}
-#endif
-
-#if IS_ENABLED(CONFIG_GENEVE)
-/**
- * i40e_add_geneve_port - Get notifications about GENEVE ports that come up
- * @netdev: This physical port's netdev
- * @sa_family: Socket Family that GENEVE is notifying us about
- * @port: New UDP port number that GENEVE started listening to
- **/
-static void i40e_add_geneve_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
-{
- struct i40e_netdev_priv *np = netdev_priv(netdev);
- struct i40e_vsi *vsi = np->vsi;
- struct i40e_pf *pf = vsi->back;
- u8 next_idx;
- u8 idx;
-
- if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE))
- return;
-
- idx = i40e_get_udp_port_idx(pf, port);
-
- /* Check if port already exists */
- if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
- netdev_info(netdev, "udp port %d already offloaded\n",
- ntohs(port));
- return;
- }
-
- /* Now check if there is space to add the new port */
- next_idx = i40e_get_udp_port_idx(pf, 0);
-
- if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
- netdev_info(netdev, "maximum number of UDP ports reached, not adding port %d\n",
- ntohs(port));
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE))
+ return;
+ pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
+ break;
+ default:
return;
}
/* New port: add it and mark its index in the bitmap */
pf->udp_ports[next_idx].index = port;
- pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
pf->pending_udp_bitmap |= BIT_ULL(next_idx);
pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
-
- dev_info(&pf->pdev->dev, "adding geneve port %d\n", ntohs(port));
}
/**
- * i40e_del_geneve_port - Get notifications about GENEVE ports that go away
+ * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
* @netdev: This physical port's netdev
- * @sa_family: Socket Family that GENEVE is notifying us about
- * @port: UDP port number that GENEVE stopped listening to
+ * @ti: Tunnel endpoint information
**/
-static void i40e_del_geneve_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
+static void i40e_udp_tunnel_del(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
+ __be16 port = ti->port;
u8 idx;
- if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE))
- return;
-
idx = i40e_get_udp_port_idx(pf, port);
/* Check if port already exists */
- if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
- /* if port exists, set it to 0 (mark for deletion)
- * and make it pending
- */
- pf->udp_ports[idx].index = 0;
- pf->pending_udp_bitmap |= BIT_ULL(idx);
- pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
+ if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS)
+ goto not_found;
- dev_info(&pf->pdev->dev, "deleting geneve port %d\n",
- ntohs(port));
- } else {
- netdev_warn(netdev, "geneve port %d was not found, not deleting\n",
- ntohs(port));
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN)
+ goto not_found;
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE)
+ goto not_found;
+ break;
+ default:
+ goto not_found;
}
+
+ /* if port exists, set it to 0 (mark for deletion)
+ * and make it pending
+ */
+ pf->udp_ports[idx].index = 0;
+ pf->pending_udp_bitmap |= BIT_ULL(idx);
+ pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
+
+ return;
+not_found:
+ netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
+ ntohs(port));
}
-#endif
static int i40e_get_phys_port_id(struct net_device *netdev,
struct netdev_phys_item_id *ppid)
@@ -9034,14 +9066,8 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
.ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
.ndo_set_vf_trust = i40e_ndo_set_vf_trust,
-#if IS_ENABLED(CONFIG_VXLAN)
- .ndo_add_vxlan_port = i40e_add_vxlan_port,
- .ndo_del_vxlan_port = i40e_del_vxlan_port,
-#endif
-#if IS_ENABLED(CONFIG_GENEVE)
- .ndo_add_geneve_port = i40e_add_geneve_port,
- .ndo_del_geneve_port = i40e_del_geneve_port,
-#endif
+ .ndo_udp_tunnel_add = i40e_udp_tunnel_add,
+ .ndo_udp_tunnel_del = i40e_udp_tunnel_del,
.ndo_get_phys_port_id = i40e_get_phys_port_id,
.ndo_fdb_add = i40e_ndo_fdb_add,
.ndo_features_check = i40e_features_check,
@@ -9057,7 +9083,6 @@ static const struct net_device_ops i40e_netdev_ops = {
**/
static int i40e_config_netdev(struct i40e_vsi *vsi)
{
- u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
struct i40e_netdev_priv *np;
@@ -9121,18 +9146,10 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
* default a MAC-VLAN filter that accepts any tagged packet
* which must be replaced by a normal filter.
*/
- if (!i40e_rm_default_mac_filter(vsi, mac_addr)) {
- spin_lock_bh(&vsi->mac_filter_list_lock);
- i40e_add_filter(vsi, mac_addr,
- I40E_VLAN_ANY, false, true);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
- }
- } else if ((pf->hw.aq.api_maj_ver > 1) ||
- ((pf->hw.aq.api_maj_ver == 1) &&
- (pf->hw.aq.api_min_ver > 4))) {
- /* Supported in FW API version higher than 1.4 */
- pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
- pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
+ i40e_rm_default_mac_filter(vsi, mac_addr);
+ spin_lock_bh(&vsi->mac_filter_list_lock);
+ i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, true);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
} else {
/* relate the VSI_VMDQ name to the VSI_MAIN name */
snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
@@ -9144,10 +9161,6 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
spin_unlock_bh(&vsi->mac_filter_list_lock);
}
- spin_lock_bh(&vsi->mac_filter_list_lock);
- i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
-
ether_addr_copy(netdev->dev_addr, mac_addr);
ether_addr_copy(netdev->perm_addr, mac_addr);
@@ -9226,8 +9239,6 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
{
int ret = -ENODEV;
i40e_status aq_ret = 0;
- u8 laa_macaddr[ETH_ALEN];
- bool found_laa_mac_filter = false;
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
struct i40e_vsi_context ctxt;
@@ -9428,41 +9439,16 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
}
}
+ vsi->active_filters = 0;
+ clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
spin_lock_bh(&vsi->mac_filter_list_lock);
/* If macvlan filters already exist, force them to get loaded */
list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
- f->changed = true;
+ f->state = I40E_FILTER_NEW;
f_count++;
-
- /* Expected to have only one MAC filter entry for LAA in list */
- if (f->is_laa && vsi->type == I40E_VSI_MAIN) {
- ether_addr_copy(laa_macaddr, f->macaddr);
- found_laa_mac_filter = true;
- }
}
spin_unlock_bh(&vsi->mac_filter_list_lock);
- if (found_laa_mac_filter) {
- struct i40e_aqc_remove_macvlan_element_data element;
-
- memset(&element, 0, sizeof(element));
- ether_addr_copy(element.mac_addr, laa_macaddr);
- element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
- ret = i40e_aq_remove_macvlan(hw, vsi->seid,
- &element, 1, NULL);
- if (ret) {
- /* some older FW has a different default */
- element.flags |=
- I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
- i40e_aq_remove_macvlan(hw, vsi->seid,
- &element, 1, NULL);
- }
-
- i40e_aq_mac_address_write(hw,
- I40E_AQC_WRITE_TYPE_LAA_WOL,
- laa_macaddr, NULL);
- }
-
if (f_count) {
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
pf->flags |= I40E_FLAG_FILTER_SYNC;
@@ -9673,6 +9659,8 @@ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
+ if (vsi->type == I40E_VSI_MAIN)
+ i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
/* assign it some queues */
ret = i40e_alloc_rings(vsi);
@@ -9698,44 +9686,6 @@ err_vsi:
}
/**
- * i40e_macaddr_init - explicitly write the mac address filters.
- *
- * @vsi: pointer to the vsi.
- * @macaddr: the MAC address
- *
- * This is needed when the macaddr has been obtained by other
- * means than the default, e.g., from Open Firmware or IDPROM.
- * Returns 0 on success, negative on failure
- **/
-static int i40e_macaddr_init(struct i40e_vsi *vsi, u8 *macaddr)
-{
- int ret;
- struct i40e_aqc_add_macvlan_element_data element;
-
- ret = i40e_aq_mac_address_write(&vsi->back->hw,
- I40E_AQC_WRITE_TYPE_LAA_WOL,
- macaddr, NULL);
- if (ret) {
- dev_info(&vsi->back->pdev->dev,
- "Addr change for VSI failed: %d\n", ret);
- return -EADDRNOTAVAIL;
- }
-
- memset(&element, 0, sizeof(element));
- ether_addr_copy(element.mac_addr, macaddr);
- element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
- ret = i40e_aq_add_macvlan(&vsi->back->hw, vsi->seid, &element, 1, NULL);
- if (ret) {
- dev_info(&vsi->back->pdev->dev,
- "add filter failed err %s aq_err %s\n",
- i40e_stat_str(&vsi->back->hw, ret),
- i40e_aq_str(&vsi->back->hw,
- vsi->back->hw.aq.asq_last_status));
- }
- return ret;
-}
-
-/**
* i40e_vsi_setup - Set up a VSI by a given type
* @pf: board private structure
* @type: VSI type
@@ -10147,14 +10097,14 @@ void i40e_veb_release(struct i40e_veb *veb)
static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
{
struct i40e_pf *pf = veb->pf;
- bool is_default = veb->pf->cur_promisc;
bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
int ret;
- /* get a VEB from the hardware */
ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
- veb->enabled_tc, is_default,
+ veb->enabled_tc, false,
&veb->seid, enable_stats, NULL);
+
+ /* get a VEB from the hardware */
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't add VEB, err %s aq_err %s\n",
@@ -10557,6 +10507,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED |
I40E_FLAG_DCB_CAPABLE |
+ I40E_FLAG_DCB_ENABLED |
I40E_FLAG_SRIOV_ENABLED |
I40E_FLAG_VMDQ_ENABLED);
} else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
@@ -10580,7 +10531,8 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
/* Not enough queues for all TCs */
if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
(queues_left < I40E_MAX_TRAFFIC_CLASS)) {
- pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+ pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
+ I40E_FLAG_DCB_ENABLED);
dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
}
pf->num_lan_qps = max_t(int, pf->rss_size_max,
@@ -10703,12 +10655,8 @@ static void i40e_print_features(struct i40e_pf *pf)
}
if (pf->flags & I40E_FLAG_DCB_CAPABLE)
i += snprintf(&buf[i], REMAIN(i), " DCB");
-#if IS_ENABLED(CONFIG_VXLAN)
i += snprintf(&buf[i], REMAIN(i), " VxLAN");
-#endif
-#if IS_ENABLED(CONFIG_GENEVE)
i += snprintf(&buf[i], REMAIN(i), " Geneve");
-#endif
if (pf->flags & I40E_FLAG_PTP)
i += snprintf(&buf[i], REMAIN(i), " PTP");
#ifdef I40E_FCOE
@@ -10783,8 +10731,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* set up pci connections */
- err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
- IORESOURCE_MEM), i40e_driver_name);
+ err = pci_request_mem_regions(pdev, i40e_driver_name);
if (err) {
dev_info(&pdev->dev,
"pci_request_selected_regions failed %d\n", err);
@@ -10982,7 +10929,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = i40e_init_pf_dcb(pf);
if (err) {
dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
- pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+ pf->flags &= ~(I40E_FLAG_DCB_CAPABLE & I40E_FLAG_DCB_ENABLED);
/* Continue without DCB enabled */
}
#endif /* CONFIG_I40E_DCB */
@@ -11281,8 +11228,7 @@ err_ioremap:
kfree(pf);
err_pf_alloc:
pci_disable_pcie_error_reporting(pdev);
- pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
pci_disable_device(pdev);
@@ -11393,8 +11339,7 @@ static void i40e_remove(struct pci_dev *pdev)
iounmap(hw->hw_addr);
kfree(pf);
- pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_release_mem_regions(pdev);
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
@@ -11539,6 +11484,7 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct i40e_pf *pf = pci_get_drvdata(pdev);
struct i40e_hw *hw = &pf->hw;
+ int retval = 0;
set_bit(__I40E_SUSPENDED, &pf->state);
set_bit(__I40E_DOWN, &pf->state);
@@ -11550,10 +11496,16 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+ i40e_stop_misc_vector(pf);
+
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+
pci_wake_from_d3(pdev, pf->wol_en);
pci_set_power_state(pdev, PCI_D3hot);
- return 0;
+ return retval;
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 80403c6ee..4660c5abc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -98,6 +98,8 @@ i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
bool qualified_modules, bool report_init,
struct i40e_aq_get_phy_abilities_resp *abilities,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index a8868e1bf..df7ecc957 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -740,14 +740,12 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
tx_ring->q_vector->tx.total_packets += total_packets;
if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
- unsigned int j = 0;
-
/* check to see if there are < 4 descriptors
* waiting to be written back, then kick the hardware to force
* them to be written back in case we stay in NAPI.
* In this mode on X722 we do not enable Interrupt.
*/
- j = i40e_get_tx_pending(tx_ring, false);
+ unsigned int j = i40e_get_tx_pending(tx_ring, false);
if (budget &&
((j / (WB_STRIDE + 1)) == 0) && (j != 0) &&
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 1fcafcfa8..6fcbf764f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -665,6 +665,8 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
goto error_alloc_vsi_res;
}
if (type == I40E_VSI_SRIOV) {
+ u64 hena = i40e_pf_get_default_rss_hena(pf);
+
vf->lan_vsi_idx = vsi->idx;
vf->lan_vsi_id = vsi->id;
/* If the port VLAN has been configured and then the
@@ -687,6 +689,10 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
vf->default_lan_addr.addr, vf->vf_id);
}
spin_unlock_bh(&vsi->mac_filter_list_lock);
+ i40e_write_rx_ctl(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id),
+ (u32)hena);
+ i40e_write_rx_ctl(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id),
+ (u32)(hena >> 32));
}
/* program mac filter */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 8f6420400..4db0c0326 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -59,7 +59,6 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_1G_BASE_T_X722:
case I40E_DEV_ID_10G_BASE_T_X722:
case I40E_DEV_ID_SFP_I_X722:
- case I40E_DEV_ID_QSFP_I_X722:
hw->mac.type = I40E_MAC_X722;
break;
case I40E_DEV_ID_X722_VF:
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
index d34972bab..702357069 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
@@ -45,7 +45,6 @@
#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
#define I40E_DEV_ID_SFP_I_X722 0x37D3
-#define I40E_DEV_ID_QSFP_I_X722 0x37D4
#define I40E_DEV_ID_X722_VF 0x37CD
#define I40E_DEV_ID_X722_VF_HV 0x37D9
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 79d99cd91..a579193b2 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -259,13 +259,12 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
tx_ring->q_vector->tx.total_packets += total_packets;
if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
- unsigned int j = 0;
/* check to see if there are < 4 descriptors
* waiting to be written back, then kick the hardware to force
* them to be written back in case we stay in NAPI.
* In this mode on X722 we do not enable Interrupt.
*/
- j = i40evf_get_tx_pending(tx_ring, false);
+ unsigned int j = i40evf_get_tx_pending(tx_ring, false);
if (budget &&
((j / (WB_STRIDE + 1)) == 0) && (j > 0) &&
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 16c552952..600fb9c4a 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -37,8 +37,8 @@ static const char i40evf_driver_string[] =
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 1
-#define DRV_VERSION_MINOR 5
-#define DRV_VERSION_BUILD 10
+#define DRV_VERSION_MINOR 6
+#define DRV_VERSION_BUILD 11
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) \
@@ -57,7 +57,9 @@ static const char i40evf_copyright[] =
*/
static const struct pci_device_id i40evf_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF_HV), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF_HV), 0},
/* required last entry */
{0, }
};
@@ -825,7 +827,7 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
ether_addr_copy(f->macaddr, macaddr);
- list_add(&f->list, &adapter->mac_filter_list);
+ list_add_tail(&f->list, &adapter->mac_filter_list);
f->add = true;
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
}
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index f13445691..d76c221d4 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -434,6 +434,8 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
ether_addr_copy(veal->list[i].addr, f->macaddr);
i++;
f->add = false;
+ if (i == count)
+ break;
}
}
if (!more)
@@ -497,6 +499,8 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
i++;
list_del(&f->list);
kfree(f);
+ if (i == count)
+ break;
}
}
if (!more)
@@ -560,6 +564,8 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
vvfl->vlan_id[i] = f->vlan;
i++;
f->add = false;
+ if (i == count)
+ break;
}
}
if (!more)
@@ -623,6 +629,8 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
i++;
list_del(&f->list);
kfree(f);
+ if (i == count)
+ break;
}
}
if (!more)
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index b9609afa5..5387b3a96 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -445,6 +445,7 @@ struct igb_adapter {
unsigned long ptp_tx_start;
unsigned long last_rx_ptp_check;
unsigned long last_rx_timestamp;
+ unsigned int ptp_flags;
spinlock_t tmreg_lock;
struct cyclecounter cc;
struct timecounter tc;
@@ -474,12 +475,15 @@ struct igb_adapter {
u16 eee_advert;
};
+/* flags controlling PTP/1588 function */
+#define IGB_PTP_ENABLED BIT(0)
+#define IGB_PTP_OVERFLOW_CHECK BIT(1)
+
#define IGB_FLAG_HAS_MSI BIT(0)
#define IGB_FLAG_DCA_ENABLED BIT(1)
#define IGB_FLAG_QUAD_PORT_A BIT(2)
#define IGB_FLAG_QUEUE_PAIRS BIT(3)
#define IGB_FLAG_DMAC BIT(4)
-#define IGB_FLAG_PTP BIT(5)
#define IGB_FLAG_RSS_FIELD_IPV4_UDP BIT(6)
#define IGB_FLAG_RSS_FIELD_IPV6_UDP BIT(7)
#define IGB_FLAG_WOL_SUPPORTED BIT(8)
@@ -546,6 +550,7 @@ void igb_set_fw_version(struct igb_adapter *);
void igb_ptp_init(struct igb_adapter *adapter);
void igb_ptp_stop(struct igb_adapter *adapter);
void igb_ptp_reset(struct igb_adapter *adapter);
+void igb_ptp_suspend(struct igb_adapter *adapter);
void igb_ptp_rx_hang(struct igb_adapter *adapter);
void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va,
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index ef3d642f5..942a89fb0 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2027,7 +2027,8 @@ void igb_reset(struct igb_adapter *adapter)
wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
/* Re-enable PTP, where applicable. */
- igb_ptp_reset(adapter);
+ if (adapter->ptp_flags & IGB_PTP_ENABLED)
+ igb_ptp_reset(adapter);
igb_get_phy_info(hw);
}
@@ -2323,9 +2324,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
- err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
- IORESOURCE_MEM),
- igb_driver_name);
+ err = pci_request_mem_regions(pdev, igb_driver_name);
if (err)
goto err_pci_reg;
@@ -2749,8 +2748,7 @@ err_sw_init:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
- pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
pci_disable_device(pdev);
@@ -2915,8 +2913,7 @@ static void igb_remove(struct pci_dev *pdev)
pci_iounmap(pdev, adapter->io_addr);
if (hw->flash_address)
iounmap(hw->flash_address);
- pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_release_mem_regions(pdev);
kfree(adapter->shadow_vfta);
free_netdev(netdev);
@@ -6855,12 +6852,12 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
**/
static bool igb_add_rx_frag(struct igb_ring *rx_ring,
struct igb_rx_buffer *rx_buffer,
+ unsigned int size,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
struct page *page = rx_buffer->page;
unsigned char *va = page_address(page) + rx_buffer->page_offset;
- unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
#if (PAGE_SIZE < 8192)
unsigned int truesize = IGB_RX_BUFSZ;
#else
@@ -6912,6 +6909,7 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
+ unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
struct igb_rx_buffer *rx_buffer;
struct page *page;
@@ -6947,11 +6945,11 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
dma_sync_single_range_for_cpu(rx_ring->dev,
rx_buffer->dma,
rx_buffer->page_offset,
- IGB_RX_BUFSZ,
+ size,
DMA_FROM_DEVICE);
/* pull page into skb */
- if (igb_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+ if (igb_add_rx_frag(rx_ring, rx_buffer, size, rx_desc, skb)) {
/* hand second half of page back to the ring */
igb_reuse_rx_page(rx_ring, rx_buffer);
} else {
@@ -7527,6 +7525,8 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
if (netif_running(netdev))
__igb_close(netdev, true);
+ igb_ptp_suspend(adapter);
+
igb_clear_interrupt_scheme(adapter);
#ifdef CONFIG_PM
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 3c7bcdf76..336c103ae 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -684,6 +684,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter)
u32 tsyncrxctl = rd32(E1000_TSYNCRXCTL);
unsigned long rx_event;
+ /* Other hardware uses per-packet timestamps */
if (hw->mac.type != e1000_82576)
return;
@@ -1062,6 +1063,13 @@ int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
-EFAULT : 0;
}
+/**
+ * igb_ptp_init - Initialize PTP functionality
+ * @adapter: Board private structure
+ *
+ * This function is called at device probe to initialize the PTP
+ * functionality.
+ */
void igb_ptp_init(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@@ -1084,8 +1092,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->cc.mask = CYCLECOUNTER_MASK(64);
adapter->cc.mult = 1;
adapter->cc.shift = IGB_82576_TSYNC_SHIFT;
- /* Dial the nominal frequency. */
- wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576);
+ adapter->ptp_flags |= IGB_PTP_OVERFLOW_CHECK;
break;
case e1000_82580:
case e1000_i354:
@@ -1104,8 +1111,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->cc.mask = CYCLECOUNTER_MASK(IGB_NBITS_82580);
adapter->cc.mult = 1;
adapter->cc.shift = 0;
- /* Enable the timer functions by clearing bit 31. */
- wr32(E1000_TSAUXC, 0x0);
+ adapter->ptp_flags |= IGB_PTP_OVERFLOW_CHECK;
break;
case e1000_i210:
case e1000_i211:
@@ -1130,44 +1136,24 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.settime64 = igb_ptp_settime_i210;
adapter->ptp_caps.enable = igb_ptp_feature_enable_i210;
adapter->ptp_caps.verify = igb_ptp_verify_pin;
- /* Enable the timer functions by clearing bit 31. */
- wr32(E1000_TSAUXC, 0x0);
break;
default:
adapter->ptp_clock = NULL;
return;
}
- wrfl();
-
spin_lock_init(&adapter->tmreg_lock);
INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work);
- /* Initialize the clock and overflow work for devices that need it. */
- if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) {
- struct timespec64 ts = ktime_to_timespec64(ktime_get_real());
-
- igb_ptp_settime_i210(&adapter->ptp_caps, &ts);
- } else {
- timecounter_init(&adapter->tc, &adapter->cc,
- ktime_to_ns(ktime_get_real()));
-
+ if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK)
INIT_DELAYED_WORK(&adapter->ptp_overflow_work,
igb_ptp_overflow_check);
- schedule_delayed_work(&adapter->ptp_overflow_work,
- IGB_SYSTIM_OVERFLOW_PERIOD);
- }
-
- /* Initialize the time sync interrupts for devices that support it. */
- if (hw->mac.type >= e1000_82580) {
- wr32(E1000_TSIM, TSYNC_INTERRUPTS);
- wr32(E1000_IMS, E1000_IMS_TS);
- }
-
adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+ igb_ptp_reset(adapter);
+
adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
&adapter->pdev->dev);
if (IS_ERR(adapter->ptp_clock)) {
@@ -1176,32 +1162,24 @@ void igb_ptp_init(struct igb_adapter *adapter)
} else {
dev_info(&adapter->pdev->dev, "added PHC on %s\n",
adapter->netdev->name);
- adapter->flags |= IGB_FLAG_PTP;
+ adapter->ptp_flags |= IGB_PTP_ENABLED;
}
}
/**
- * igb_ptp_stop - Disable PTP device and stop the overflow check.
- * @adapter: Board private structure.
+ * igb_ptp_suspend - Disable PTP work items and prepare for suspend
+ * @adapter: Board private structure
*
- * This function stops the PTP support and cancels the delayed work.
- **/
-void igb_ptp_stop(struct igb_adapter *adapter)
+ * This function stops the overflow check work and PTP Tx timestamp work, and
+ * will prepare the device for OS suspend.
+ */
+void igb_ptp_suspend(struct igb_adapter *adapter)
{
- switch (adapter->hw.mac.type) {
- case e1000_82576:
- case e1000_82580:
- case e1000_i354:
- case e1000_i350:
- cancel_delayed_work_sync(&adapter->ptp_overflow_work);
- break;
- case e1000_i210:
- case e1000_i211:
- /* No delayed work to cancel. */
- break;
- default:
+ if (!(adapter->ptp_flags & IGB_PTP_ENABLED))
return;
- }
+
+ if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK)
+ cancel_delayed_work_sync(&adapter->ptp_overflow_work);
cancel_work_sync(&adapter->ptp_tx_work);
if (adapter->ptp_tx_skb) {
@@ -1209,12 +1187,23 @@ void igb_ptp_stop(struct igb_adapter *adapter)
adapter->ptp_tx_skb = NULL;
clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
}
+}
+
+/**
+ * igb_ptp_stop - Disable PTP device and stop the overflow check.
+ * @adapter: Board private structure.
+ *
+ * This function stops the PTP support and cancels the delayed work.
+ **/
+void igb_ptp_stop(struct igb_adapter *adapter)
+{
+ igb_ptp_suspend(adapter);
if (adapter->ptp_clock) {
ptp_clock_unregister(adapter->ptp_clock);
dev_info(&adapter->pdev->dev, "removed PHC on %s\n",
adapter->netdev->name);
- adapter->flags &= ~IGB_FLAG_PTP;
+ adapter->ptp_flags &= ~IGB_PTP_ENABLED;
}
}
@@ -1229,9 +1218,6 @@ void igb_ptp_reset(struct igb_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
unsigned long flags;
- if (!(adapter->flags & IGB_FLAG_PTP))
- return;
-
/* reset the tstamp_config */
igb_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
@@ -1268,4 +1254,10 @@ void igb_ptp_reset(struct igb_adapter *adapter)
}
out:
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+
+ wrfl();
+
+ if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK)
+ schedule_delayed_work(&adapter->ptp_overflow_work,
+ IGB_SYSTIM_OVERFLOW_PERIOD);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 9f2db1855..9475ff905 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -804,8 +804,6 @@ struct ixgbe_adapter {
#define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */
u32 rss_key[IXGBE_RSS_KEY_SIZE / sizeof(u32)];
-
- bool need_crosstalk_fix;
};
static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 47afed74a..63b25006a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1813,9 +1813,6 @@ static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
/* We need to run link autotry after the driver loads */
hw->mac.autotry_restart = true;
- if (ret_val)
- return ret_val;
-
return ixgbe_verify_fw_version_82599(hw);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 902d2061c..c47b605e8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -277,6 +277,7 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
{
s32 ret_val;
u32 ctrl_ext;
+ u16 device_caps;
/* Set the media type */
hw->phy.media_type = hw->mac.ops.get_media_type(hw);
@@ -301,6 +302,22 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
if (ret_val)
return ret_val;
+ /* Cashe bit indicating need for crosstalk fix */
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+ hw->mac.ops.get_device_caps(hw, &device_caps);
+ if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR)
+ hw->need_crosstalk_fix = false;
+ else
+ hw->need_crosstalk_fix = true;
+ break;
+ default:
+ hw->need_crosstalk_fix = false;
+ break;
+ }
+
/* Clear adapter stopped flag */
hw->adapter_stopped = false;
@@ -763,6 +780,9 @@ s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
{
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ if (index > 3)
+ return IXGBE_ERR_PARAM;
+
/* To turn on the LED, set mode to ON. */
led_reg &= ~IXGBE_LED_MODE_MASK(index);
led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
@@ -781,6 +801,9 @@ s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
{
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ if (index > 3)
+ return IXGBE_ERR_PARAM;
+
/* To turn off the LED, set mode to OFF. */
led_reg &= ~IXGBE_LED_MODE_MASK(index);
led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
@@ -2657,7 +2680,7 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw)
**/
s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw)
{
- int secrxreg;
+ u32 secrxreg;
secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
@@ -2698,6 +2721,9 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
bool locked = false;
s32 ret_val;
+ if (index > 3)
+ return IXGBE_ERR_PARAM;
+
/*
* Link must be up to auto-blink the LEDs;
* Force it if link is down.
@@ -2741,6 +2767,9 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
bool locked = false;
s32 ret_val;
+ if (index > 3)
+ return IXGBE_ERR_PARAM;
+
ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
if (ret_val)
return ret_val;
@@ -2929,8 +2958,10 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
}
/* was that the last pool using this rar? */
- if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
+ if (mpsar_lo == 0 && mpsar_hi == 0 &&
+ rar != 0 && rar != hw->mac.san_mac_rar_index)
hw->mac.ops.clear_rar(hw, rar);
+
return 0;
}
@@ -3188,6 +3219,31 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_need_crosstalk_fix - Determine if we need to do cross talk fix
+ * @hw: pointer to hardware structure
+ *
+ * Contains the logic to identify if we need to verify link for the
+ * crosstalk fix
+ **/
+static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw)
+{
+ /* Does FW say we need the fix */
+ if (!hw->need_crosstalk_fix)
+ return false;
+
+ /* Only consider SFP+ PHYs i.e. media type fiber */
+ switch (hw->mac.ops.get_media_type(hw)) {
+ case ixgbe_media_type_fiber:
+ case ixgbe_media_type_fiber_qsfp:
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+/**
* ixgbe_check_mac_link_generic - Determine link and speed status
* @hw: pointer to hardware structure
* @speed: pointer to link speed
@@ -3202,6 +3258,35 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
u32 links_reg, links_orig;
u32 i;
+ /* If Crosstalk fix enabled do the sanity check of making sure
+ * the SFP+ cage is full.
+ */
+ if (ixgbe_need_crosstalk_fix(hw)) {
+ u32 sfp_cage_full;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
+ IXGBE_ESDP_SDP2;
+ break;
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+ sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
+ IXGBE_ESDP_SDP0;
+ break;
+ default:
+ /* sanity check - No SFP+ devices here */
+ sfp_cage_full = false;
+ break;
+ }
+
+ if (!sfp_cage_full) {
+ *link_up = false;
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ return 0;
+ }
+ }
+
/* clear the old state */
links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 59b771b9b..0d7209eb5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -2204,11 +2204,11 @@ static int ixgbe_set_phys_id(struct net_device *netdev,
return 2;
case ETHTOOL_ID_ON:
- hw->mac.ops.led_on(hw, IXGBE_LED_ON);
+ hw->mac.ops.led_on(hw, hw->bus.func);
break;
case ETHTOOL_ID_OFF:
- hw->mac.ops.led_off(hw, IXGBE_LED_ON);
+ hw->mac.ops.led_off(hw, hw->bus.func);
break;
case ETHTOOL_ID_INACTIVE:
@@ -2991,10 +2991,15 @@ static int ixgbe_get_ts_info(struct net_device *dev,
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
+ /* we always support timestamping disabled */
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE);
+
switch (adapter->hw.mac.type) {
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL);
+ /* fallthrough */
case ixgbe_mac_X540:
case ixgbe_mac_82599EB:
info->so_timestamping =
@@ -3014,8 +3019,7 @@ static int ixgbe_get_ts_info(struct net_device *dev,
BIT(HWTSTAMP_TX_OFF) |
BIT(HWTSTAMP_TX_ON);
- info->rx_filters =
- BIT(HWTSTAMP_FILTER_NONE) |
+ info->rx_filters |=
BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 58153e818..b4f03748a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -50,7 +50,7 @@
#include <linux/if_bridge.h>
#include <linux/prefetch.h>
#include <scsi/fc/fc_fcoe.h>
-#include <net/vxlan.h>
+#include <net/udp_tunnel.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
@@ -3084,7 +3084,7 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
free_irq(entry->vector, q_vector);
}
- free_irq(adapter->msix_entries[vector++].vector, adapter);
+ free_irq(adapter->msix_entries[vector].vector, adapter);
}
/**
@@ -5631,7 +5631,6 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
unsigned int rss, fdir;
u32 fwsm;
- u16 device_caps;
int i;
/* PCI config space info */
@@ -5728,9 +5727,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
#ifdef CONFIG_IXGBE_DCA
adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
#endif
-#ifdef CONFIG_IXGBE_VXLAN
adapter->flags |= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE;
-#endif
break;
default:
break;
@@ -5779,22 +5776,6 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
- /* Cache bit indicating need for crosstalk fix */
- switch (hw->mac.type) {
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X550EM_x:
- case ixgbe_mac_x550em_a:
- hw->mac.ops.get_device_caps(hw, &device_caps);
- if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR)
- adapter->need_crosstalk_fix = false;
- else
- adapter->need_crosstalk_fix = true;
- break;
- default:
- adapter->need_crosstalk_fix = false;
- break;
- }
-
/* set default work limits */
adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
@@ -6164,9 +6145,7 @@ int ixgbe_open(struct net_device *netdev)
ixgbe_up_complete(adapter);
ixgbe_clear_vxlan_port(adapter);
-#ifdef CONFIG_IXGBE_VXLAN
- vxlan_get_rx_port(netdev);
-#endif
+ udp_tunnel_get_rx_info(netdev);
return 0;
@@ -6717,18 +6696,6 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
link_up = true;
}
- /* If Crosstalk fix enabled do the sanity check of making sure
- * the SFP+ cage is empty.
- */
- if (adapter->need_crosstalk_fix) {
- u32 sfp_cage_full;
-
- sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
- IXGBE_ESDP_SDP2;
- if (ixgbe_is_sfp(hw) && link_up && !sfp_cage_full)
- link_up = false;
- }
-
if (adapter->ixgbe_ieee_pfc)
pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
@@ -7075,16 +7042,6 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
s32 err;
- /* If crosstalk fix enabled verify the SFP+ cage is full */
- if (adapter->need_crosstalk_fix) {
- u32 sfp_cage_full;
-
- sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
- IXGBE_ESDP_SDP2;
- if (!sfp_cage_full)
- return;
- }
-
/* not searching for SFP so there is nothing to do here */
if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
@@ -7268,14 +7225,12 @@ static void ixgbe_service_task(struct work_struct *work)
ixgbe_service_event_complete(adapter);
return;
}
-#ifdef CONFIG_IXGBE_VXLAN
- rtnl_lock();
if (adapter->flags2 & IXGBE_FLAG2_VXLAN_REREG_NEEDED) {
+ rtnl_lock();
adapter->flags2 &= ~IXGBE_FLAG2_VXLAN_REREG_NEEDED;
- vxlan_get_rx_port(adapter->netdev);
+ udp_tunnel_get_rx_info(adapter->netdev);
+ rtnl_unlock();
}
- rtnl_unlock();
-#endif /* CONFIG_IXGBE_VXLAN */
ixgbe_reset_subtask(adapter);
ixgbe_phy_interrupt_subtask(adapter);
ixgbe_sfp_detection_subtask(adapter);
@@ -7703,7 +7658,6 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
/* snag network header to get L4 type and address */
skb = first->skb;
hdr.network = skb_network_header(skb);
-#ifdef CONFIG_IXGBE_VXLAN
if (skb->encapsulation &&
first->protocol == htons(ETH_P_IP) &&
hdr.ipv4->protocol != IPPROTO_UDP) {
@@ -7714,7 +7668,6 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
udp_hdr(skb)->dest == adapter->vxlan_port)
hdr.network = skb_inner_network_header(skb);
}
-#endif /* CONFIG_IXGBE_VXLAN */
/* Currently only IPv4/IPv6 with TCP is supported */
switch (hdr.ipv4->version) {
@@ -8314,14 +8267,53 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter,
struct tc_cls_u32_offload *cls)
{
+ u32 hdl = cls->knode.handle;
u32 uhtid = TC_U32_USERHTID(cls->knode.handle);
- u32 loc;
- int err;
+ u32 loc = cls->knode.handle & 0xfffff;
+ int err = 0, i, j;
+ struct ixgbe_jump_table *jump = NULL;
+
+ if (loc > IXGBE_MAX_HW_ENTRIES)
+ return -EINVAL;
if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE))
return -EINVAL;
- loc = cls->knode.handle & 0xfffff;
+ /* Clear this filter in the link data it is associated with */
+ if (uhtid != 0x800) {
+ jump = adapter->jump_tables[uhtid];
+ if (!jump)
+ return -EINVAL;
+ if (!test_bit(loc - 1, jump->child_loc_map))
+ return -EINVAL;
+ clear_bit(loc - 1, jump->child_loc_map);
+ }
+
+ /* Check if the filter being deleted is a link */
+ for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) {
+ jump = adapter->jump_tables[i];
+ if (jump && jump->link_hdl == hdl) {
+ /* Delete filters in the hardware in the child hash
+ * table associated with this link
+ */
+ for (j = 0; j < IXGBE_MAX_HW_ENTRIES; j++) {
+ if (!test_bit(j, jump->child_loc_map))
+ continue;
+ spin_lock(&adapter->fdir_perfect_lock);
+ err = ixgbe_update_ethtool_fdir_entry(adapter,
+ NULL,
+ j + 1);
+ spin_unlock(&adapter->fdir_perfect_lock);
+ clear_bit(j, jump->child_loc_map);
+ }
+ /* Remove resources for this link */
+ kfree(jump->input);
+ kfree(jump->mask);
+ kfree(jump);
+ adapter->jump_tables[i] = NULL;
+ return err;
+ }
+ }
spin_lock(&adapter->fdir_perfect_lock);
err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, loc);
@@ -8404,12 +8396,14 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter,
struct tcf_exts *exts, u64 *action, u8 *queue)
{
const struct tc_action *a;
+ LIST_HEAD(actions);
int err;
if (tc_no_actions(exts))
return -EINVAL;
- tc_for_each_action(a, exts) {
+ tcf_exts_to_list(exts, &actions);
+ list_for_each_entry(a, &actions, list) {
/* Drop action */
if (is_tcf_gact_shot(a)) {
@@ -8555,6 +8549,18 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
if (!test_bit(link_uhtid - 1, &adapter->tables))
return err;
+ /* Multiple filters as links to the same hash table are not
+ * supported. To add a new filter with the same next header
+ * but different match/jump conditions, create a new hash table
+ * and link to it.
+ */
+ if (adapter->jump_tables[link_uhtid] &&
+ (adapter->jump_tables[link_uhtid])->link_hdl) {
+ e_err(drv, "Link filter exists for link: %x\n",
+ link_uhtid);
+ return err;
+ }
+
for (i = 0; nexthdr[i].jump; i++) {
if (nexthdr[i].o != cls->knode.sel->offoff ||
nexthdr[i].s != cls->knode.sel->offshift ||
@@ -8576,6 +8582,8 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
}
jump->input = input;
jump->mask = mask;
+ jump->link_hdl = cls->knode.handle;
+
err = ixgbe_clsu32_build_input(input, mask, cls,
field_ptr, &nexthdr[i]);
if (!err) {
@@ -8603,6 +8611,20 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
if ((adapter->jump_tables[uhtid])->mask)
memcpy(mask, (adapter->jump_tables[uhtid])->mask,
sizeof(*mask));
+
+ /* Lookup in all child hash tables if this location is already
+ * filled with a filter
+ */
+ for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) {
+ struct ixgbe_jump_table *link = adapter->jump_tables[i];
+
+ if (link && (test_bit(loc - 1, link->child_loc_map))) {
+ e_err(drv, "Filter exists in location: %x\n",
+ loc);
+ err = -EINVAL;
+ goto err_out;
+ }
+ }
}
err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, NULL);
if (err)
@@ -8634,6 +8656,9 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
spin_unlock(&adapter->fdir_perfect_lock);
+ if ((uhtid != 0x800) && (adapter->jump_tables[uhtid]))
+ set_bit(loc - 1, (adapter->jump_tables[uhtid])->child_loc_map);
+
kfree(mask);
return err;
err_out_w_lock:
@@ -8776,14 +8801,12 @@ static int ixgbe_set_features(struct net_device *netdev,
netdev->features = features;
-#ifdef CONFIG_IXGBE_VXLAN
if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) {
if (features & NETIF_F_RXCSUM)
adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED;
else
ixgbe_clear_vxlan_port(adapter);
}
-#endif /* CONFIG_IXGBE_VXLAN */
if (need_reset)
ixgbe_do_reset(netdev);
@@ -8794,23 +8817,25 @@ static int ixgbe_set_features(struct net_device *netdev,
return 0;
}
-#ifdef CONFIG_IXGBE_VXLAN
/**
* ixgbe_add_vxlan_port - Get notifications about VXLAN ports that come up
* @dev: The port's netdev
- * @sa_family: Socket Family that VXLAN is notifiying us about
- * @port: New UDP port number that VXLAN started listening to
+ * @ti: Tunnel endpoint information
**/
-static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
- __be16 port)
+static void ixgbe_add_vxlan_port(struct net_device *dev,
+ struct udp_tunnel_info *ti)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
+ __be16 port = ti->port;
- if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
return;
- if (sa_family == AF_INET6)
+ if (ti->sa_family != AF_INET)
+ return;
+
+ if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
return;
if (adapter->vxlan_port == port)
@@ -8830,30 +8855,31 @@ static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
/**
* ixgbe_del_vxlan_port - Get notifications about VXLAN ports that go away
* @dev: The port's netdev
- * @sa_family: Socket Family that VXLAN is notifying us about
- * @port: UDP port number that VXLAN stopped listening to
+ * @ti: Tunnel endpoint information
**/
-static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
- __be16 port)
+static void ixgbe_del_vxlan_port(struct net_device *dev,
+ struct udp_tunnel_info *ti)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
- if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
return;
- if (sa_family == AF_INET6)
+ if (ti->sa_family != AF_INET)
return;
- if (adapter->vxlan_port != port) {
+ if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
+ return;
+
+ if (adapter->vxlan_port != ti->port) {
netdev_info(dev, "Port %d was not found, not deleting\n",
- ntohs(port));
+ ntohs(ti->port));
return;
}
ixgbe_clear_vxlan_port(adapter);
adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED;
}
-#endif /* CONFIG_IXGBE_VXLAN */
static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
@@ -9166,10 +9192,8 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
.ndo_dfwd_add_station = ixgbe_fwd_add,
.ndo_dfwd_del_station = ixgbe_fwd_del,
-#ifdef CONFIG_IXGBE_VXLAN
- .ndo_add_vxlan_port = ixgbe_add_vxlan_port,
- .ndo_del_vxlan_port = ixgbe_del_vxlan_port,
-#endif /* CONFIG_IXGBE_VXLAN */
+ .ndo_udp_tunnel_add = ixgbe_add_vxlan_port,
+ .ndo_udp_tunnel_del = ixgbe_del_vxlan_port,
.ndo_features_check = ixgbe_features_check,
};
@@ -9337,8 +9361,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_using_dac = 0;
}
- err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
- IORESOURCE_MEM), ixgbe_driver_name);
+ err = pci_request_mem_regions(pdev, ixgbe_driver_name);
if (err) {
dev_err(&pdev->dev,
"pci_request_selected_regions failed 0x%x\n", err);
@@ -9725,8 +9748,7 @@ err_ioremap:
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
err_alloc_etherdev:
- pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
if (!adapter || disable_dev)
@@ -9793,8 +9815,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
#endif
iounmap(adapter->io_addr);
- pci_release_selected_regions(pdev, pci_select_bars(pdev,
- IORESOURCE_MEM));
+ pci_release_mem_regions(pdev);
e_dev_info("complete\n");
@@ -10058,6 +10079,7 @@ static int __init ixgbe_init_module(void)
ret = pci_register_driver(&ixgbe_driver);
if (ret) {
+ destroy_workqueue(ixgbe_wq);
ixgbe_dbg_exit();
return ret;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
index a8bed3d88..538a1c547 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
@@ -42,8 +42,12 @@ struct ixgbe_jump_table {
struct ixgbe_mat_field *mat;
struct ixgbe_fdir_filter *input;
union ixgbe_atr_input *mask;
+ u32 link_hdl;
+ unsigned long child_loc_map[32];
};
+#define IXGBE_MAX_HW_ENTRIES 2045
+
static inline int ixgbe_mat_prgm_sip(struct ixgbe_fdir_filter *input,
union ixgbe_atr_input *mask,
u32 val, u32 m)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index c5caacdd1..8618599df 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -954,6 +954,7 @@ static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
struct ixgbe_hw *hw = &adapter->hw;
hw->mac.ops.set_mac_anti_spoofing(hw, false, vf);
+ hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
}
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index da3d8358f..1248a9936 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -3525,6 +3525,7 @@ struct ixgbe_hw {
bool force_full_reset;
bool allow_unsupported_sfp;
bool wol_enabled;
+ bool need_crosstalk_fix;
};
struct ixgbe_info {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 19b75cd98..4716ca499 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -1618,6 +1618,8 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
+ mac->ops.setup_fc = ixgbe_setup_fc_x550em;
+
switch (mac->ops.get_media_type(hw)) {
case ixgbe_media_type_fiber:
/* CS4227 does not support autoneg, so disable the laser control
@@ -1627,7 +1629,6 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
mac->ops.enable_tx_laser = NULL;
mac->ops.flap_tx_laser = NULL;
mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
- mac->ops.setup_fc = ixgbe_setup_fc_x550em;
switch (hw->device_id) {
case IXGBE_DEV_ID_X550EM_A_SFP_N:
mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_n;
@@ -1655,7 +1656,6 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
mac->ops.setup_link = ixgbe_setup_sgmii;
break;
default:
- mac->ops.setup_fc = ixgbe_setup_fc_x550em;
break;
}
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index ae09d60e7..8617cae2f 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -32,6 +32,7 @@
#define IXGBE_DEV_ID_X540_VF 0x1515
#define IXGBE_DEV_ID_X550_VF 0x1565
#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
+#define IXGBE_DEV_ID_X550EM_A_VF 0x15C5
#define IXGBE_DEV_ID_82599_VF_HV 0x152E
#define IXGBE_DEV_ID_X540_VF_HV 0x1530
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index d5944c391..be52f5976 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -457,6 +457,7 @@ enum ixgbevf_boards {
board_X550_vf_hv,
board_X550EM_x_vf,
board_X550EM_x_vf_hv,
+ board_x550em_a_vf,
};
enum ixgbevf_xcast_modes {
@@ -470,6 +471,7 @@ extern const struct ixgbevf_info ixgbevf_X540_vf_info;
extern const struct ixgbevf_info ixgbevf_X550_vf_info;
extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info;
extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
+extern const struct ixgbevf_info ixgbevf_x550em_a_vf_info;
extern const struct ixgbevf_info ixgbevf_82599_vf_hv_info;
extern const struct ixgbevf_info ixgbevf_X540_vf_hv_info;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index acc24010c..d9d6616f0 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -56,7 +56,7 @@ const char ixgbevf_driver_name[] = "ixgbevf";
static const char ixgbevf_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
-#define DRV_VERSION "2.12.1-k"
+#define DRV_VERSION "3.2.2-k"
const char ixgbevf_driver_version[] = DRV_VERSION;
static char ixgbevf_copyright[] =
"Copyright (c) 2009 - 2015 Intel Corporation.";
@@ -70,6 +70,7 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
[board_X550_vf_hv] = &ixgbevf_X550_vf_hv_info,
[board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
[board_X550EM_x_vf_hv] = &ixgbevf_X550EM_x_vf_hv_info,
+ [board_x550em_a_vf] = &ixgbevf_x550em_a_vf_info,
};
/* ixgbevf_pci_tbl - PCI Device ID Table
@@ -89,6 +90,7 @@ static const struct pci_device_id ixgbevf_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF_HV), board_X550_vf_hv },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_VF), board_x550em_a_vf },
/* required last entry */
{0, }
};
@@ -1800,16 +1802,19 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
**/
static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
{
- int i;
struct ixgbe_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
+ int i, ret;
ixgbevf_setup_psrtype(adapter);
if (hw->mac.type >= ixgbe_mac_X550_vf)
ixgbevf_setup_vfmrqc(adapter);
/* notify the PF of our intent to use this size of frame */
- hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
+ ret = hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
+ if (ret)
+ dev_err(&adapter->pdev->dev,
+ "Failed to set MTU at %d\n", netdev->mtu);
/* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring
@@ -2772,12 +2777,15 @@ static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter)
/* If we're already down or resetting, just bail */
if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
+ test_bit(__IXGBEVF_REMOVING, &adapter->state) ||
test_bit(__IXGBEVF_RESETTING, &adapter->state))
return;
adapter->tx_timeout_count++;
+ rtnl_lock();
ixgbevf_reinit_locked(adapter);
+ rtnl_unlock();
}
/**
@@ -3732,6 +3740,7 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
struct ixgbe_hw *hw = &adapter->hw;
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
+ int ret;
switch (adapter->hw.api_version) {
case ixgbe_mbox_api_11:
@@ -3748,14 +3757,17 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
if ((new_mtu < 68) || (max_frame > max_possible_frame))
return -EINVAL;
+ /* notify the PF of our intent to use this size of frame */
+ ret = hw->mac.ops.set_rlpml(hw, max_frame);
+ if (ret)
+ return -EINVAL;
+
hw_dbg(hw, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
+
/* must set new MTU before calling down or up */
netdev->mtu = new_mtu;
- /* notify the PF of our intent to use this size of frame */
- hw->mac.ops.set_rlpml(hw, max_frame);
-
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index e670d3b19..a52f70ec4 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -33,6 +33,18 @@
*/
#define IXGBE_HV_RESET_OFFSET 0x201
+static inline s32 ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg,
+ u32 *retmsg, u16 size)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 retval = mbx->ops.write_posted(hw, msg, size);
+
+ if (retval)
+ return retval;
+
+ return mbx->ops.read_posted(hw, retmsg, size);
+}
+
/**
* ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
* @hw: pointer to hardware structure
@@ -255,8 +267,7 @@ static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
- u32 msgbuf[3];
+ u32 msgbuf[3], msgbuf_chk;
u8 *msg_addr = (u8 *)(&msgbuf[1]);
s32 ret_val;
@@ -268,19 +279,18 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
*/
msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
+ msgbuf_chk = msgbuf[0];
+
if (addr)
ether_addr_copy(msg_addr, addr);
- ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
- if (!ret_val)
- ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
+ ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 3);
+ if (!ret_val) {
+ msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
- msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
-
- if (!ret_val)
- if (msgbuf[0] ==
- (IXGBE_VF_SET_MACVLAN | IXGBE_VT_MSGTYPE_NACK))
- ret_val = -ENOMEM;
+ if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_NACK))
+ return -ENOMEM;
+ }
return ret_val;
}
@@ -423,7 +433,6 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
u32 vmdq)
{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[3];
u8 *msg_addr = (u8 *)(&msgbuf[1]);
s32 ret_val;
@@ -431,10 +440,8 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
memset(msgbuf, 0, sizeof(msgbuf));
msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
ether_addr_copy(msg_addr, addr);
- ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
- if (!ret_val)
- ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
+ ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
@@ -468,17 +475,6 @@ static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
return -EOPNOTSUPP;
}
-static void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw,
- u32 *msg, u16 size)
-{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
- u32 retmsg[IXGBE_VFMAILBOX_SIZE];
- s32 retval = mbx->ops.write_posted(hw, msg, size);
-
- if (!retval)
- mbx->ops.read_posted(hw, retmsg, size);
-}
-
/**
* ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
* @hw: pointer to the HW structure
@@ -519,7 +515,7 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
}
- ixgbevf_write_msg_read_ack(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
+ ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, IXGBE_VFMAILBOX_SIZE);
return 0;
}
@@ -542,7 +538,6 @@ static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw,
**/
static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[2];
s32 err;
@@ -556,11 +551,7 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;
msgbuf[1] = xcast_mode;
- err = mbx->ops.write_posted(hw, msgbuf, 2);
- if (err)
- return err;
-
- err = mbx->ops.read_posted(hw, msgbuf, 2);
+ err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
if (err)
return err;
@@ -589,7 +580,6 @@ static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on)
{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[2];
s32 err;
@@ -598,11 +588,7 @@ static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
/* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
- err = mbx->ops.write_posted(hw, msgbuf, 2);
- if (err)
- goto mbx_err;
-
- err = mbx->ops.read_posted(hw, msgbuf, 2);
+ err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
if (err)
goto mbx_err;
@@ -797,13 +783,22 @@ out:
* @hw: pointer to the HW structure
* @max_size: value to assign to max frame size
**/
-static void ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
+static s32 ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
{
u32 msgbuf[2];
+ s32 ret_val;
msgbuf[0] = IXGBE_VF_SET_LPE;
msgbuf[1] = max_size;
- ixgbevf_write_msg_read_ack(hw, msgbuf, 2);
+
+ ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
+ if (ret_val)
+ return ret_val;
+ if ((msgbuf[0] & IXGBE_VF_SET_LPE) &&
+ (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK))
+ return IXGBE_ERR_MBX;
+
+ return 0;
}
/**
@@ -812,7 +807,7 @@ static void ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
* @max_size: value to assign to max frame size
* Hyper-V variant.
**/
-static void ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
+static s32 ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
{
u32 reg;
@@ -823,6 +818,8 @@ static void ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
/* CRC == 4 */
reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN);
IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg);
+
+ return 0;
}
/**
@@ -839,11 +836,8 @@ static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
msg[0] = IXGBE_VF_API_NEGOTIATE;
msg[1] = api;
msg[2] = 0;
- err = hw->mbx.ops.write_posted(hw, msg, 3);
-
- if (!err)
- err = hw->mbx.ops.read_posted(hw, msg, 3);
+ err = ixgbevf_write_msg_read_ack(hw, msg, msg, 3);
if (!err) {
msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
@@ -892,11 +886,8 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
/* Fetch queue configuration from the PF */
msg[0] = IXGBE_VF_GET_QUEUE;
msg[1] = msg[2] = msg[3] = msg[4] = 0;
- err = hw->mbx.ops.write_posted(hw, msg, 5);
-
- if (!err)
- err = hw->mbx.ops.read_posted(hw, msg, 5);
+ err = ixgbevf_write_msg_read_ack(hw, msg, msg, 5);
if (!err) {
msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
@@ -1005,3 +996,8 @@ const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info = {
.mac = ixgbe_mac_X550EM_x_vf,
.mac_ops = &ixgbevf_hv_mac_ops,
};
+
+const struct ixgbevf_info ixgbevf_x550em_a_vf_info = {
+ .mac = ixgbe_mac_x550em_a_vf,
+ .mac_ops = &ixgbevf_mac_ops,
+};
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index 2cac610f3..04d8d4ee4 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -69,7 +69,7 @@ struct ixgbe_mac_operations {
s32 (*disable_mc)(struct ixgbe_hw *);
s32 (*clear_vfta)(struct ixgbe_hw *);
s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
- void (*set_rlpml)(struct ixgbe_hw *, u16);
+ s32 (*set_rlpml)(struct ixgbe_hw *, u16);
};
enum ixgbe_mac_type {
@@ -78,6 +78,7 @@ enum ixgbe_mac_type {
ixgbe_mac_X540_vf,
ixgbe_mac_X550_vf,
ixgbe_mac_X550EM_x_vf,
+ ixgbe_mac_x550em_a_vf,
ixgbe_num_macs
};
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index dc82b1b19..91e09d68b 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -102,7 +102,6 @@ struct ltq_etop_priv {
struct resource *res;
struct mii_bus *mii_bus;
- struct phy_device *phydev;
struct ltq_etop_chan ch[MAX_DMA_CHAN];
int tx_free[MAX_DMA_CHAN >> 1];
@@ -305,34 +304,16 @@ ltq_etop_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
}
static int
-ltq_etop_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct ltq_etop_priv *priv = netdev_priv(dev);
-
- return phy_ethtool_gset(priv->phydev, cmd);
-}
-
-static int
-ltq_etop_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct ltq_etop_priv *priv = netdev_priv(dev);
-
- return phy_ethtool_sset(priv->phydev, cmd);
-}
-
-static int
ltq_etop_nway_reset(struct net_device *dev)
{
- struct ltq_etop_priv *priv = netdev_priv(dev);
-
- return phy_start_aneg(priv->phydev);
+ return phy_start_aneg(dev->phydev);
}
static const struct ethtool_ops ltq_etop_ethtool_ops = {
.get_drvinfo = ltq_etop_get_drvinfo,
- .get_settings = ltq_etop_get_settings,
- .set_settings = ltq_etop_set_settings,
.nway_reset = ltq_etop_nway_reset,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static int
@@ -401,7 +382,6 @@ ltq_etop_mdio_probe(struct net_device *dev)
| SUPPORTED_TP);
phydev->advertising = phydev->supported;
- priv->phydev = phydev;
phy_attached_info(phydev);
return 0;
@@ -411,7 +391,6 @@ static int
ltq_etop_mdio_init(struct net_device *dev)
{
struct ltq_etop_priv *priv = netdev_priv(dev);
- int i;
int err;
priv->mii_bus = mdiobus_alloc();
@@ -451,7 +430,7 @@ ltq_etop_mdio_cleanup(struct net_device *dev)
{
struct ltq_etop_priv *priv = netdev_priv(dev);
- phy_disconnect(priv->phydev);
+ phy_disconnect(dev->phydev);
mdiobus_unregister(priv->mii_bus);
mdiobus_free(priv->mii_bus);
}
@@ -470,7 +449,7 @@ ltq_etop_open(struct net_device *dev)
ltq_dma_open(&ch->dma);
napi_enable(&ch->napi);
}
- phy_start(priv->phydev);
+ phy_start(dev->phydev);
netif_tx_start_all_queues(dev);
return 0;
}
@@ -482,7 +461,7 @@ ltq_etop_stop(struct net_device *dev)
int i;
netif_tx_stop_all_queues(dev);
- phy_stop(priv->phydev);
+ phy_stop(dev->phydev);
for (i = 0; i < MAX_DMA_CHAN; i++) {
struct ltq_etop_chan *ch = &priv->ch[i];
@@ -557,10 +536,8 @@ ltq_etop_change_mtu(struct net_device *dev, int new_mtu)
static int
ltq_etop_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct ltq_etop_priv *priv = netdev_priv(dev);
-
/* TODO: mii-toll reports "No MII transceiver present!." ?!*/
- return phy_mii_ioctl(priv->phydev, rq, cmd);
+ return phy_mii_ioctl(dev->phydev, rq, cmd);
}
static int
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index f92018b13..d41c28d00 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -4118,6 +4118,7 @@ static int mvneta_probe(struct platform_device *pdev)
pp->bm_priv = NULL;
}
}
+ of_node_put(bm_node);
err = mvneta_init(&pdev->dev, pp);
if (err < 0)
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 868a957f2..60227a345 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -699,7 +699,6 @@ struct mvpp2_port {
u16 rx_ring_size;
struct mvpp2_pcpu_stats __percpu *stats;
- struct phy_device *phy_dev;
phy_interface_t phy_interface;
struct device_node *phy_node;
unsigned int link;
@@ -4850,7 +4849,7 @@ static irqreturn_t mvpp2_isr(int irq, void *dev_id)
static void mvpp2_link_event(struct net_device *dev)
{
struct mvpp2_port *port = netdev_priv(dev);
- struct phy_device *phydev = port->phy_dev;
+ struct phy_device *phydev = dev->phydev;
int status_change = 0;
u32 val;
@@ -5416,6 +5415,8 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
/* Set hw internals when starting port */
static void mvpp2_start_dev(struct mvpp2_port *port)
{
+ struct net_device *ndev = port->dev;
+
mvpp2_gmac_max_rx_size_set(port);
mvpp2_txp_max_tx_size_set(port);
@@ -5425,13 +5426,15 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
mvpp2_interrupts_enable(port);
mvpp2_port_enable(port);
- phy_start(port->phy_dev);
+ phy_start(ndev->phydev);
netif_tx_start_all_queues(port->dev);
}
/* Set hw internals when stopping port */
static void mvpp2_stop_dev(struct mvpp2_port *port)
{
+ struct net_device *ndev = port->dev;
+
/* Stop new packets from arriving to RXQs */
mvpp2_ingress_disable(port);
@@ -5447,7 +5450,7 @@ static void mvpp2_stop_dev(struct mvpp2_port *port)
mvpp2_egress_disable(port);
mvpp2_port_disable(port);
- phy_stop(port->phy_dev);
+ phy_stop(ndev->phydev);
}
/* Return positive if MTU is valid */
@@ -5535,7 +5538,6 @@ static int mvpp2_phy_connect(struct mvpp2_port *port)
phy_dev->supported &= PHY_GBIT_FEATURES;
phy_dev->advertising = phy_dev->supported;
- port->phy_dev = phy_dev;
port->link = 0;
port->duplex = 0;
port->speed = 0;
@@ -5545,8 +5547,9 @@ static int mvpp2_phy_connect(struct mvpp2_port *port)
static void mvpp2_phy_disconnect(struct mvpp2_port *port)
{
- phy_disconnect(port->phy_dev);
- port->phy_dev = NULL;
+ struct net_device *ndev = port->dev;
+
+ phy_disconnect(ndev->phydev);
}
static int mvpp2_open(struct net_device *dev)
@@ -5796,13 +5799,12 @@ mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- struct mvpp2_port *port = netdev_priv(dev);
int ret;
- if (!port->phy_dev)
+ if (!dev->phydev)
return -ENOTSUPP;
- ret = phy_mii_ioctl(port->phy_dev, ifr, cmd);
+ ret = phy_mii_ioctl(dev->phydev, ifr, cmd);
if (!ret)
mvpp2_link_event(dev);
@@ -5811,28 +5813,6 @@ static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
/* Ethtool methods */
-/* Get settings (phy address, speed) for ethtools */
-static int mvpp2_ethtool_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
-{
- struct mvpp2_port *port = netdev_priv(dev);
-
- if (!port->phy_dev)
- return -ENODEV;
- return phy_ethtool_gset(port->phy_dev, cmd);
-}
-
-/* Set settings (phy address, speed) for ethtools */
-static int mvpp2_ethtool_set_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
-{
- struct mvpp2_port *port = netdev_priv(dev);
-
- if (!port->phy_dev)
- return -ENODEV;
- return phy_ethtool_sset(port->phy_dev, cmd);
-}
-
/* Set interrupt coalescing for ethtools */
static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *c)
@@ -5967,13 +5947,13 @@ static const struct net_device_ops mvpp2_netdev_ops = {
static const struct ethtool_ops mvpp2_eth_tool_ops = {
.get_link = ethtool_op_get_link,
- .get_settings = mvpp2_ethtool_get_settings,
- .set_settings = mvpp2_ethtool_set_settings,
.set_coalesce = mvpp2_ethtool_set_coalesce,
.get_coalesce = mvpp2_ethtool_get_coalesce,
.get_drvinfo = mvpp2_ethtool_get_drvinfo,
.get_ringparam = mvpp2_ethtool_get_ringparam,
.set_ringparam = mvpp2_ethtool_set_ringparam,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
/* Driver initialization */
@@ -6254,6 +6234,7 @@ err_free_stats:
err_free_irq:
irq_dispose_mapping(port->irq);
err_free_netdev:
+ of_node_put(phy_node);
free_netdev(dev);
return err;
}
@@ -6264,6 +6245,7 @@ static void mvpp2_port_remove(struct mvpp2_port *port)
int i;
unregister_netdev(port->dev);
+ of_node_put(port->phy_node);
free_percpu(port->pcpu);
free_percpu(port->stats);
for (i = 0; i < txq_number; i++)
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 54d5154ac..5d5000c8e 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -247,7 +247,6 @@ struct pxa168_eth_private {
*/
struct timer_list timeout;
struct mii_bus *smi_bus;
- struct phy_device *phy;
/* clock */
struct clk *clk;
@@ -275,8 +274,8 @@ enum hash_table_entry {
HASH_ENTRY_RECEIVE_DISCARD_BIT = 2
};
-static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
-static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd);
+static int pxa168_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd);
static int pxa168_init_hw(struct pxa168_eth_private *pep);
static int pxa168_init_phy(struct net_device *dev);
static void eth_port_reset(struct net_device *dev);
@@ -644,7 +643,7 @@ static void eth_port_start(struct net_device *dev)
struct pxa168_eth_private *pep = netdev_priv(dev);
int tx_curr_desc, rx_curr_desc;
- phy_start(pep->phy);
+ phy_start(dev->phydev);
/* Assignment of Tx CTRP of given queue */
tx_curr_desc = pep->tx_curr_desc_q;
@@ -700,7 +699,7 @@ static void eth_port_reset(struct net_device *dev)
val &= ~PCR_EN;
wrl(pep, PORT_CONFIG, val);
- phy_stop(pep->phy);
+ phy_stop(dev->phydev);
}
/*
@@ -943,7 +942,7 @@ static int set_port_config_ext(struct pxa168_eth_private *pep)
static void pxa168_eth_adjust_link(struct net_device *dev)
{
struct pxa168_eth_private *pep = netdev_priv(dev);
- struct phy_device *phy = pep->phy;
+ struct phy_device *phy = dev->phydev;
u32 cfg, cfg_o = rdl(pep, PORT_CONFIG);
u32 cfgext, cfgext_o = rdl(pep, PORT_CONFIG_EXT);
@@ -972,35 +971,37 @@ static void pxa168_eth_adjust_link(struct net_device *dev)
static int pxa168_init_phy(struct net_device *dev)
{
struct pxa168_eth_private *pep = netdev_priv(dev);
- struct ethtool_cmd cmd;
+ struct ethtool_link_ksettings cmd;
+ struct phy_device *phy = NULL;
int err;
- if (pep->phy)
+ if (dev->phydev)
return 0;
- pep->phy = mdiobus_scan(pep->smi_bus, pep->phy_addr);
- if (IS_ERR(pep->phy))
- return PTR_ERR(pep->phy);
+ phy = mdiobus_scan(pep->smi_bus, pep->phy_addr);
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
- err = phy_connect_direct(dev, pep->phy, pxa168_eth_adjust_link,
+ err = phy_connect_direct(dev, phy, pxa168_eth_adjust_link,
pep->phy_intf);
if (err)
return err;
- err = pxa168_get_settings(dev, &cmd);
+ err = pxa168_get_link_ksettings(dev, &cmd);
if (err)
return err;
- cmd.phy_address = pep->phy_addr;
- cmd.speed = pep->phy_speed;
- cmd.duplex = pep->phy_duplex;
- cmd.advertising = PHY_BASIC_FEATURES;
- cmd.autoneg = AUTONEG_ENABLE;
+ cmd.base.phy_address = pep->phy_addr;
+ cmd.base.speed = pep->phy_speed;
+ cmd.base.duplex = pep->phy_duplex;
+ ethtool_convert_legacy_u32_to_link_mode(cmd.link_modes.advertising,
+ PHY_BASIC_FEATURES);
+ cmd.base.autoneg = AUTONEG_ENABLE;
- if (cmd.speed != 0)
- cmd.autoneg = AUTONEG_DISABLE;
+ if (cmd.base.speed != 0)
+ cmd.base.autoneg = AUTONEG_DISABLE;
- return pxa168_set_settings(dev, &cmd);
+ return phy_ethtool_set_link_ksettings(dev, &cmd);
}
static int pxa168_init_hw(struct pxa168_eth_private *pep)
@@ -1366,32 +1367,24 @@ static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum,
static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr,
int cmd)
{
- struct pxa168_eth_private *pep = netdev_priv(dev);
- if (pep->phy != NULL)
- return phy_mii_ioctl(pep->phy, ifr, cmd);
+ if (dev->phydev != NULL)
+ return phy_mii_ioctl(dev->phydev, ifr, cmd);
return -EOPNOTSUPP;
}
-static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int pxa168_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
- struct pxa168_eth_private *pep = netdev_priv(dev);
int err;
- err = phy_read_status(pep->phy);
+ err = phy_read_status(dev->phydev);
if (err == 0)
- err = phy_ethtool_gset(pep->phy, cmd);
+ err = phy_ethtool_ksettings_get(dev->phydev, cmd);
return err;
}
-static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct pxa168_eth_private *pep = netdev_priv(dev);
-
- return phy_ethtool_sset(pep->phy, cmd);
-}
-
static void pxa168_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
@@ -1402,11 +1395,11 @@ static void pxa168_get_drvinfo(struct net_device *dev,
}
static const struct ethtool_ops pxa168_ethtool_ops = {
- .get_settings = pxa168_get_settings,
- .set_settings = pxa168_set_settings,
.get_drvinfo = pxa168_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = pxa168_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static const struct net_device_ops pxa168_eth_netdev_ops = {
@@ -1513,6 +1506,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
}
of_property_read_u32(np, "reg", &pep->phy_addr);
pep->phy_intf = of_get_phy_mode(pdev->dev.of_node);
+ of_node_put(np);
}
/* Hardware supports only 3 ports */
@@ -1569,8 +1563,8 @@ static int pxa168_eth_remove(struct platform_device *pdev)
pep->htpr, pep->htpr_dma);
pep->htpr = NULL;
}
- if (pep->phy)
- phy_disconnect(pep->phy);
+ if (dev->phydev)
+ phy_disconnect(dev->phydev);
if (pep->clk) {
clk_disable_unprepare(pep->clk);
}
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index d1cdc2d76..3743af8f1 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -50,6 +50,10 @@ static const struct mtk_ethtool_stats {
MTK_ETHTOOL_STAT(rx_flow_control_packets),
};
+static const char * const mtk_clks_source_name[] = {
+ "ethif", "esw", "gp1", "gp2"
+};
+
void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
{
__raw_writel(val, eth->base + reg);
@@ -76,8 +80,8 @@ static int mtk_mdio_busy_wait(struct mtk_eth *eth)
return -1;
}
-u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
- u32 phy_register, u32 write_data)
+static u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
+ u32 phy_register, u32 write_data)
{
if (mtk_mdio_busy_wait(eth))
return -1;
@@ -95,7 +99,7 @@ u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
return 0;
}
-u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
+static u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
{
u32 d;
@@ -245,12 +249,16 @@ static int mtk_phy_connect(struct mtk_mac *mac)
case PHY_INTERFACE_MODE_MII:
ge_mode = 1;
break;
- case PHY_INTERFACE_MODE_RMII:
+ case PHY_INTERFACE_MODE_REVMII:
ge_mode = 2;
break;
+ case PHY_INTERFACE_MODE_RMII:
+ if (!mac->id)
+ goto err_phy;
+ ge_mode = 3;
+ break;
default:
- dev_err(eth->dev, "invalid phy_mode\n");
- return -1;
+ goto err_phy;
}
/* put the gmac into the right mode */
@@ -263,19 +271,31 @@ static int mtk_phy_connect(struct mtk_mac *mac)
mac->phy_dev->autoneg = AUTONEG_ENABLE;
mac->phy_dev->speed = 0;
mac->phy_dev->duplex = 0;
+
+ if (of_phy_is_fixed_link(mac->of_node))
+ mac->phy_dev->supported |=
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+
mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
SUPPORTED_Asym_Pause;
mac->phy_dev->advertising = mac->phy_dev->supported |
ADVERTISED_Autoneg;
phy_start_aneg(mac->phy_dev);
+ of_node_put(np);
+
return 0;
+
+err_phy:
+ of_node_put(np);
+ dev_err(eth->dev, "invalid phy_mode\n");
+ return -EINVAL;
}
static int mtk_mdio_init(struct mtk_eth *eth)
{
struct device_node *mii_np;
- int err;
+ int ret;
mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
if (!mii_np) {
@@ -284,13 +304,13 @@ static int mtk_mdio_init(struct mtk_eth *eth)
}
if (!of_device_is_available(mii_np)) {
- err = 0;
+ ret = -ENODEV;
goto err_put_node;
}
- eth->mii_bus = mdiobus_alloc();
+ eth->mii_bus = devm_mdiobus_alloc(eth->dev);
if (!eth->mii_bus) {
- err = -ENOMEM;
+ ret = -ENOMEM;
goto err_put_node;
}
@@ -301,19 +321,11 @@ static int mtk_mdio_init(struct mtk_eth *eth)
eth->mii_bus->parent = eth->dev;
snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name);
- err = of_mdiobus_register(eth->mii_bus, mii_np);
- if (err)
- goto err_free_bus;
-
- return 0;
-
-err_free_bus:
- mdiobus_free(eth->mii_bus);
+ ret = of_mdiobus_register(eth->mii_bus, mii_np);
err_put_node:
of_node_put(mii_np);
- eth->mii_bus = NULL;
- return err;
+ return ret;
}
static void mtk_mdio_cleanup(struct mtk_eth *eth)
@@ -322,28 +334,28 @@ static void mtk_mdio_cleanup(struct mtk_eth *eth)
return;
mdiobus_unregister(eth->mii_bus);
- of_node_put(eth->mii_bus->dev.of_node);
- mdiobus_free(eth->mii_bus);
}
static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask)
{
+ unsigned long flags;
u32 val;
+ spin_lock_irqsave(&eth->irq_lock, flags);
val = mtk_r32(eth, MTK_QDMA_INT_MASK);
mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK);
- /* flush write */
- mtk_r32(eth, MTK_QDMA_INT_MASK);
+ spin_unlock_irqrestore(&eth->irq_lock, flags);
}
static inline void mtk_irq_enable(struct mtk_eth *eth, u32 mask)
{
+ unsigned long flags;
u32 val;
+ spin_lock_irqsave(&eth->irq_lock, flags);
val = mtk_r32(eth, MTK_QDMA_INT_MASK);
mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK);
- /* flush write */
- mtk_r32(eth, MTK_QDMA_INT_MASK);
+ spin_unlock_irqrestore(&eth->irq_lock, flags);
}
static int mtk_set_mac_address(struct net_device *dev, void *p)
@@ -540,15 +552,15 @@ static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
return &ring->buf[idx];
}
-static void mtk_tx_unmap(struct device *dev, struct mtk_tx_buf *tx_buf)
+static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
{
if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
- dma_unmap_single(dev,
+ dma_unmap_single(eth->dev,
dma_unmap_addr(tx_buf, dma_addr0),
dma_unmap_len(tx_buf, dma_len0),
DMA_TO_DEVICE);
} else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
- dma_unmap_page(dev,
+ dma_unmap_page(eth->dev,
dma_unmap_addr(tx_buf, dma_addr0),
dma_unmap_len(tx_buf, dma_len0),
DMA_TO_DEVICE);
@@ -570,14 +582,15 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
dma_addr_t mapped_addr;
unsigned int nr_frags;
int i, n_desc = 1;
- u32 txd4 = 0;
+ u32 txd4 = 0, fport;
itxd = ring->next_free;
if (itxd == ring->last_free)
return -ENOMEM;
/* set the forward port */
- txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT;
+ fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
+ txd4 |= fport;
tx_buf = mtk_desc_to_tx_buf(ring, itxd);
memset(tx_buf, 0, sizeof(*tx_buf));
@@ -593,9 +606,9 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
if (skb_vlan_tag_present(skb))
txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
- mapped_addr = dma_map_single(&dev->dev, skb->data,
+ mapped_addr = dma_map_single(eth->dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
+ if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
return -ENOMEM;
WRITE_ONCE(itxd->txd1, mapped_addr);
@@ -621,10 +634,10 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
n_desc++;
frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
- mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset,
+ mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
frag_map_size,
DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
+ if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
goto err_dma;
if (i == nr_frags - 1 &&
@@ -635,7 +648,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
TX_DMA_PLEN0(frag_map_size) |
last_frag * TX_DMA_LS0));
- WRITE_ONCE(txd->txd4, 0);
+ WRITE_ONCE(txd->txd4, fport);
tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
tx_buf = mtk_desc_to_tx_buf(ring, txd);
@@ -677,7 +690,7 @@ err_dma:
tx_buf = mtk_desc_to_tx_buf(ring, itxd);
/* unmap dma */
- mtk_tx_unmap(&dev->dev, tx_buf);
+ mtk_tx_unmap(eth, tx_buf);
itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
@@ -798,7 +811,7 @@ drop:
}
static int mtk_poll_rx(struct napi_struct *napi, int budget,
- struct mtk_eth *eth, u32 rx_intr)
+ struct mtk_eth *eth)
{
struct mtk_rx_ring *ring = &eth->rx_ring;
int idx = ring->calc_idx;
@@ -834,11 +847,11 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
netdev->stats.rx_dropped++;
goto release_desc;
}
- dma_addr = dma_map_single(&eth->netdev[mac]->dev,
+ dma_addr = dma_map_single(eth->dev,
new_data + NET_SKB_PAD,
ring->buf_size,
DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) {
+ if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
skb_free_frag(new_data);
netdev->stats.rx_dropped++;
goto release_desc;
@@ -847,13 +860,13 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
/* receive data */
skb = build_skb(data, ring->frag_size);
if (unlikely(!skb)) {
- put_page(virt_to_head_page(new_data));
+ skb_free_frag(new_data);
netdev->stats.rx_dropped++;
goto release_desc;
}
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
- dma_unmap_single(&netdev->dev, trxd.rxd1,
+ dma_unmap_single(eth->dev, trxd.rxd1,
ring->buf_size, DMA_FROM_DEVICE);
pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
skb->dev = netdev;
@@ -886,22 +899,22 @@ release_desc:
}
if (done < budget)
- mtk_w32(eth, rx_intr, MTK_QMTK_INT_STATUS);
+ mtk_w32(eth, MTK_RX_DONE_INT, MTK_QMTK_INT_STATUS);
return done;
}
-static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
+static int mtk_poll_tx(struct mtk_eth *eth, int budget)
{
struct mtk_tx_ring *ring = &eth->tx_ring;
struct mtk_tx_dma *desc;
struct sk_buff *skb;
struct mtk_tx_buf *tx_buf;
- int total = 0, done[MTK_MAX_DEVS];
+ unsigned int done[MTK_MAX_DEVS];
unsigned int bytes[MTK_MAX_DEVS];
u32 cpu, dma;
static int condition;
- int i;
+ int total = 0, i;
memset(done, 0, sizeof(done));
memset(bytes, 0, sizeof(bytes));
@@ -935,7 +948,7 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
done[mac]++;
budget--;
}
- mtk_tx_unmap(eth->dev, tx_buf);
+ mtk_tx_unmap(eth, tx_buf);
ring->last_free = desc;
atomic_inc(&ring->free_count);
@@ -952,15 +965,6 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
total += done[i];
}
- /* read hw index again make sure no new tx packet */
- if (cpu != dma || cpu != mtk_r32(eth, MTK_QTX_DRX_PTR))
- *tx_again = true;
- else
- mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
-
- if (!total)
- return 0;
-
if (mtk_queue_stopped(eth) &&
(atomic_read(&ring->free_count) > ring->thresh))
mtk_wake_queue(eth);
@@ -968,49 +972,75 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
return total;
}
-static int mtk_poll(struct napi_struct *napi, int budget)
+static void mtk_handle_status_irq(struct mtk_eth *eth)
{
- struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
- u32 status, status2, mask, tx_intr, rx_intr, status_intr;
- int tx_done, rx_done;
- bool tx_again = false;
+ u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
- status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
- status2 = mtk_r32(eth, MTK_INT_STATUS2);
- tx_intr = MTK_TX_DONE_INT;
- rx_intr = MTK_RX_DONE_INT;
- status_intr = (MTK_GDM1_AF | MTK_GDM2_AF);
- tx_done = 0;
- rx_done = 0;
- tx_again = 0;
+ if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
+ mtk_stats_update(eth);
+ mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
+ MTK_INT_STATUS2);
+ }
+}
- if (status & tx_intr)
- tx_done = mtk_poll_tx(eth, budget, &tx_again);
+static int mtk_napi_tx(struct napi_struct *napi, int budget)
+{
+ struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
+ u32 status, mask;
+ int tx_done = 0;
- if (status & rx_intr)
- rx_done = mtk_poll_rx(napi, budget, eth, rx_intr);
+ mtk_handle_status_irq(eth);
+ mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
+ tx_done = mtk_poll_tx(eth, budget);
- if (unlikely(status2 & status_intr)) {
- mtk_stats_update(eth);
- mtk_w32(eth, status_intr, MTK_INT_STATUS2);
+ if (unlikely(netif_msg_intr(eth))) {
+ status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
+ mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
+ dev_info(eth->dev,
+ "done tx %d, intr 0x%08x/0x%x\n",
+ tx_done, status, mask);
}
+ if (tx_done == budget)
+ return budget;
+
+ status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
+ if (status & MTK_TX_DONE_INT)
+ return budget;
+
+ napi_complete(napi);
+ mtk_irq_enable(eth, MTK_TX_DONE_INT);
+
+ return tx_done;
+}
+
+static int mtk_napi_rx(struct napi_struct *napi, int budget)
+{
+ struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
+ u32 status, mask;
+ int rx_done = 0;
+
+ mtk_handle_status_irq(eth);
+ mtk_w32(eth, MTK_RX_DONE_INT, MTK_QMTK_INT_STATUS);
+ rx_done = mtk_poll_rx(napi, budget, eth);
+
if (unlikely(netif_msg_intr(eth))) {
+ status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
- netdev_info(eth->netdev[0],
- "done tx %d, rx %d, intr 0x%08x/0x%x\n",
- tx_done, rx_done, status, mask);
+ dev_info(eth->dev,
+ "done rx %d, intr 0x%08x/0x%x\n",
+ rx_done, status, mask);
}
- if (tx_again || rx_done == budget)
+ if (rx_done == budget)
return budget;
status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
- if (status & (tx_intr | rx_intr))
+ if (status & MTK_RX_DONE_INT)
return budget;
napi_complete(napi);
- mtk_irq_enable(eth, tx_intr | rx_intr);
+ mtk_irq_enable(eth, MTK_RX_DONE_INT);
return rx_done;
}
@@ -1073,7 +1103,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
if (ring->buf) {
for (i = 0; i < MTK_DMA_SIZE; i++)
- mtk_tx_unmap(eth->dev, &ring->buf[i]);
+ mtk_tx_unmap(eth, &ring->buf[i]);
kfree(ring->buf);
ring->buf = NULL;
}
@@ -1246,22 +1276,26 @@ static void mtk_tx_timeout(struct net_device *dev)
schedule_work(&eth->pending_work);
}
-static irqreturn_t mtk_handle_irq(int irq, void *_eth)
+static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
{
struct mtk_eth *eth = _eth;
- u32 status;
- status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
- if (unlikely(!status))
- return IRQ_NONE;
+ if (likely(napi_schedule_prep(&eth->rx_napi))) {
+ __napi_schedule(&eth->rx_napi);
+ mtk_irq_disable(eth, MTK_RX_DONE_INT);
+ }
- if (likely(status & (MTK_RX_DONE_INT | MTK_TX_DONE_INT))) {
- if (likely(napi_schedule_prep(&eth->rx_napi)))
- __napi_schedule(&eth->rx_napi);
- } else {
- mtk_w32(eth, status, MTK_QMTK_INT_STATUS);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
+{
+ struct mtk_eth *eth = _eth;
+
+ if (likely(napi_schedule_prep(&eth->tx_napi))) {
+ __napi_schedule(&eth->tx_napi);
+ mtk_irq_disable(eth, MTK_TX_DONE_INT);
}
- mtk_irq_disable(eth, (MTK_RX_DONE_INT | MTK_TX_DONE_INT));
return IRQ_HANDLED;
}
@@ -1274,7 +1308,7 @@ static void mtk_poll_controller(struct net_device *dev)
u32 int_mask = MTK_TX_DONE_INT | MTK_RX_DONE_INT;
mtk_irq_disable(eth, int_mask);
- mtk_handle_irq(dev->irq, dev);
+ mtk_handle_irq_rx(eth->irq[2], dev);
mtk_irq_enable(eth, int_mask);
}
#endif
@@ -1310,6 +1344,7 @@ static int mtk_open(struct net_device *dev)
if (err)
return err;
+ napi_enable(&eth->tx_napi);
napi_enable(&eth->rx_napi);
mtk_irq_enable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
}
@@ -1358,6 +1393,7 @@ static int mtk_stop(struct net_device *dev)
return 0;
mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
+ napi_disable(&eth->tx_napi);
napi_disable(&eth->rx_napi);
mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
@@ -1395,7 +1431,11 @@ static int __init mtk_hw_init(struct mtk_eth *eth)
/* Enable RX VLan Offloading */
mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
- err = devm_request_irq(eth->dev, eth->irq, mtk_handle_irq, 0,
+ err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0,
+ dev_name(eth->dev), eth);
+ if (err)
+ return err;
+ err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0,
dev_name(eth->dev), eth);
if (err)
return err;
@@ -1411,7 +1451,11 @@ static int __init mtk_hw_init(struct mtk_eth *eth)
mtk_w32(eth, 0, MTK_RST_GL);
/* FE int grouping */
- mtk_w32(eth, 0, MTK_FE_INT_GRP);
+ mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
+ mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
+ mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
+ mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
+ mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
for (i = 0; i < 2; i++) {
u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
@@ -1457,9 +1501,7 @@ static void mtk_uninit(struct net_device *dev)
struct mtk_eth *eth = mac->hw;
phy_disconnect(mac->phy_dev);
- mtk_mdio_cleanup(eth);
mtk_irq_disable(eth, ~0);
- free_irq(dev->irq, dev);
}
static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -1717,6 +1759,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
goto free_netdev;
}
spin_lock_init(&mac->hw_stats->stats_lock);
+ u64_stats_init(&mac->hw_stats->syncp);
mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
SET_NETDEV_DEV(eth->netdev[id], eth->dev);
@@ -1733,10 +1776,10 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
dev_err(eth->dev, "error bringing up device\n");
goto free_netdev;
}
- eth->netdev[id]->irq = eth->irq;
+ eth->netdev[id]->irq = eth->irq[0];
netif_info(eth, probe, eth->netdev[id],
"mediatek frame engine at 0x%08lx, irq %d\n",
- eth->netdev[id]->base_addr, eth->netdev[id]->irq);
+ eth->netdev[id]->base_addr, eth->irq[0]);
return 0;
@@ -1753,6 +1796,7 @@ static int mtk_probe(struct platform_device *pdev)
struct mtk_soc_data *soc;
struct mtk_eth *eth;
int err;
+ int i;
match = of_match_device(of_mtk_match, &pdev->dev);
soc = (struct mtk_soc_data *)match->data;
@@ -1761,11 +1805,13 @@ static int mtk_probe(struct platform_device *pdev)
if (!eth)
return -ENOMEM;
+ eth->dev = &pdev->dev;
eth->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(eth->base))
return PTR_ERR(eth->base);
spin_lock_init(&eth->page_lock);
+ spin_lock_init(&eth->irq_lock);
eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"mediatek,ethsys");
@@ -1787,26 +1833,28 @@ static int mtk_probe(struct platform_device *pdev)
return PTR_ERR(eth->rstc);
}
- eth->irq = platform_get_irq(pdev, 0);
- if (eth->irq < 0) {
- dev_err(&pdev->dev, "no IRQ resource found\n");
- return -ENXIO;
+ for (i = 0; i < 3; i++) {
+ eth->irq[i] = platform_get_irq(pdev, i);
+ if (eth->irq[i] < 0) {
+ dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
+ return -ENXIO;
+ }
+ }
+ for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
+ eth->clks[i] = devm_clk_get(eth->dev,
+ mtk_clks_source_name[i]);
+ if (IS_ERR(eth->clks[i])) {
+ if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ return -ENODEV;
+ }
}
- eth->clk_ethif = devm_clk_get(&pdev->dev, "ethif");
- eth->clk_esw = devm_clk_get(&pdev->dev, "esw");
- eth->clk_gp1 = devm_clk_get(&pdev->dev, "gp1");
- eth->clk_gp2 = devm_clk_get(&pdev->dev, "gp2");
- if (IS_ERR(eth->clk_esw) || IS_ERR(eth->clk_gp1) ||
- IS_ERR(eth->clk_gp2) || IS_ERR(eth->clk_ethif))
- return -ENODEV;
-
- clk_prepare_enable(eth->clk_ethif);
- clk_prepare_enable(eth->clk_esw);
- clk_prepare_enable(eth->clk_gp1);
- clk_prepare_enable(eth->clk_gp2);
+ clk_prepare_enable(eth->clks[MTK_CLK_ETHIF]);
+ clk_prepare_enable(eth->clks[MTK_CLK_ESW]);
+ clk_prepare_enable(eth->clks[MTK_CLK_GP1]);
+ clk_prepare_enable(eth->clks[MTK_CLK_GP2]);
- eth->dev = &pdev->dev;
eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
INIT_WORK(&eth->pending_work, mtk_pending_work);
@@ -1831,7 +1879,9 @@ static int mtk_probe(struct platform_device *pdev)
* for NAPI to work
*/
init_dummy_netdev(&eth->dummy_dev);
- netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_poll,
+ netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
+ MTK_NAPI_WEIGHT);
+ netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx,
MTK_NAPI_WEIGHT);
platform_set_drvdata(pdev, eth);
@@ -1846,14 +1896,24 @@ err_free_dev:
static int mtk_remove(struct platform_device *pdev)
{
struct mtk_eth *eth = platform_get_drvdata(pdev);
+ int i;
- clk_disable_unprepare(eth->clk_ethif);
- clk_disable_unprepare(eth->clk_esw);
- clk_disable_unprepare(eth->clk_gp1);
- clk_disable_unprepare(eth->clk_gp2);
+ /* stop all devices to make sure that dma is properly shut down */
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->netdev[i])
+ continue;
+ mtk_stop(eth->netdev[i]);
+ }
+ clk_disable_unprepare(eth->clks[MTK_CLK_ETHIF]);
+ clk_disable_unprepare(eth->clks[MTK_CLK_ESW]);
+ clk_disable_unprepare(eth->clks[MTK_CLK_GP1]);
+ clk_disable_unprepare(eth->clks[MTK_CLK_GP2]);
+
+ netif_napi_del(&eth->tx_napi);
netif_napi_del(&eth->rx_napi);
mtk_cleanup(eth);
+ mtk_mdio_cleanup(eth);
platform_set_drvdata(pdev, NULL);
return 0;
@@ -1863,13 +1923,13 @@ const struct of_device_id of_mtk_match[] = {
{ .compatible = "mediatek,mt7623-eth" },
{},
};
+MODULE_DEVICE_TABLE(of, of_mtk_match);
static struct platform_driver mtk_driver = {
.probe = mtk_probe,
.remove = mtk_remove,
.driver = {
.name = "mtk_soc_eth",
- .owner = THIS_MODULE,
.of_match_table = of_mtk_match,
},
};
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index a5eb7c623..6e1ade7a2 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -68,6 +68,10 @@
/* Unicast Filter MAC Address Register - High */
#define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000))
+/* PDMA Interrupt grouping registers */
+#define MTK_PDMA_INT_GRP1 0xa50
+#define MTK_PDMA_INT_GRP2 0xa54
+
/* QDMA TX Queue Configuration Registers */
#define MTK_QTX_CFG(x) (0x1800 + (x * 0x10))
#define QDMA_RES_THRES 4
@@ -125,6 +129,11 @@
#define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \
MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3)
+/* QDMA Interrupt grouping registers */
+#define MTK_QDMA_INT_GRP1 0x1a20
+#define MTK_QDMA_INT_GRP2 0x1a24
+#define MTK_RLS_DONE_INT BIT(0)
+
/* QDMA Interrupt Status Register */
#define MTK_QDMA_INT_MASK 0x1A1C
@@ -281,6 +290,17 @@ enum mtk_tx_flags {
MTK_TX_FLAGS_PAGE0 = 0x02,
};
+/* This enum allows us to identify how the clock is defined on the array of the
+ * clock in the order
+ */
+enum mtk_clks_map {
+ MTK_CLK_ETHIF,
+ MTK_CLK_ESW,
+ MTK_CLK_GP1,
+ MTK_CLK_GP2,
+ MTK_CLK_MAX
+};
+
/* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at
* by the TX descriptor s
* @skb: The SKB pointer of the packet being sent
@@ -356,14 +376,12 @@ struct mtk_rx_ring {
* @dma_refcnt: track how many netdevs are using the DMA engine
* @tx_ring: Pointer to the memore holding info about the TX ring
* @rx_ring: Pointer to the memore holding info about the RX ring
- * @rx_napi: The NAPI struct
+ * @tx_napi: The TX NAPI struct
+ * @rx_napi: The RX NAPI struct
* @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
* @phy_scratch_ring: physical address of scratch_ring
* @scratch_head: The scratch memory that scratch_ring points to.
- * @clk_ethif: The ethif clock
- * @clk_esw: The switch clock
- * @clk_gp1: The gmac1 clock
- * @clk_gp2: The gmac2 clock
+ * @clks: clock array for all clocks required
* @mii_bus: If there is a bus we need to create an instance for it
* @pending_work: The workqueue used to reset the dma ring
*/
@@ -373,10 +391,11 @@ struct mtk_eth {
void __iomem *base;
struct reset_control *rstc;
spinlock_t page_lock;
+ spinlock_t irq_lock;
struct net_device dummy_dev;
struct net_device *netdev[MTK_MAX_DEVS];
struct mtk_mac *mac[MTK_MAX_DEVS];
- int irq;
+ int irq[3];
u32 msg_enable;
unsigned long sysclk;
struct regmap *ethsys;
@@ -384,14 +403,13 @@ struct mtk_eth {
atomic_t dma_refcnt;
struct mtk_tx_ring tx_ring;
struct mtk_rx_ring rx_ring;
+ struct napi_struct tx_napi;
struct napi_struct rx_napi;
struct mtk_tx_dma *scratch_ring;
dma_addr_t phy_scratch_ring;
void *scratch_head;
- struct clk *clk_ethif;
- struct clk *clk_esw;
- struct clk *clk_gp1;
- struct clk *clk_gp2;
+ struct clk *clks[MTK_CLK_MAX];
+
struct mii_bus *mii_bus;
struct work_struct pending_work;
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
index 9ca3734eb..5098e7f21 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
@@ -24,13 +24,6 @@ config MLX4_EN_DCB
If unsure, set to Y
-config MLX4_EN_VXLAN
- bool "VXLAN offloads Support"
- default y
- depends on MLX4_EN && VXLAN && !(MLX4_EN=y && VXLAN=m)
- ---help---
- Say Y here if you want to use VXLAN offloads in the driver.
-
config MLX4_CORE
tristate
depends on PCI
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index f01918c63..b04760a50 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -37,6 +37,11 @@
#include "mlx4_en.h"
#include "fw_qos.h"
+enum {
+ MLX4_CEE_STATE_DOWN = 0,
+ MLX4_CEE_STATE_UP = 1,
+};
+
/* Definitions for QCN
*/
@@ -80,13 +85,205 @@ struct mlx4_congestion_control_mb_prio_802_1_qau_statistics {
__be32 reserved3[4];
};
+static u8 mlx4_en_dcbnl_getcap(struct net_device *dev, int capid, u8 *cap)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ switch (capid) {
+ case DCB_CAP_ATTR_PFC:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_DCBX:
+ *cap = priv->dcbx_cap;
+ break;
+ case DCB_CAP_ATTR_PFC_TCS:
+ *cap = 1 << mlx4_max_tc(priv->mdev->dev);
+ break;
+ default:
+ *cap = false;
+ break;
+ }
+
+ return 0;
+}
+
+static u8 mlx4_en_dcbnl_getpfcstate(struct net_device *netdev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(netdev);
+
+ return priv->cee_config.pfc_state;
+}
+
+static void mlx4_en_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
+{
+ struct mlx4_en_priv *priv = netdev_priv(netdev);
+
+ priv->cee_config.pfc_state = state;
+}
+
+static void mlx4_en_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
+ u8 *setting)
+{
+ struct mlx4_en_priv *priv = netdev_priv(netdev);
+
+ *setting = priv->cee_config.dcb_pfc[priority];
+}
+
+static void mlx4_en_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
+ u8 setting)
+{
+ struct mlx4_en_priv *priv = netdev_priv(netdev);
+
+ priv->cee_config.dcb_pfc[priority] = setting;
+ priv->cee_config.pfc_state = true;
+}
+
+static int mlx4_en_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
+{
+ struct mlx4_en_priv *priv = netdev_priv(netdev);
+
+ if (!(priv->flags & MLX4_EN_FLAG_DCB_ENABLED))
+ return -EINVAL;
+
+ if (tcid == DCB_NUMTCS_ATTR_PFC)
+ *num = mlx4_max_tc(priv->mdev->dev);
+ else
+ *num = 0;
+
+ return 0;
+}
+
+static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(netdev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+
+ if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return 1;
+
+ if (priv->cee_config.pfc_state) {
+ int tc;
+
+ priv->prof->rx_pause = 0;
+ priv->prof->tx_pause = 0;
+ for (tc = 0; tc < CEE_DCBX_MAX_PRIO; tc++) {
+ u8 tc_mask = 1 << tc;
+
+ switch (priv->cee_config.dcb_pfc[tc]) {
+ case pfc_disabled:
+ priv->prof->tx_ppp &= ~tc_mask;
+ priv->prof->rx_ppp &= ~tc_mask;
+ break;
+ case pfc_enabled_full:
+ priv->prof->tx_ppp |= tc_mask;
+ priv->prof->rx_ppp |= tc_mask;
+ break;
+ case pfc_enabled_tx:
+ priv->prof->tx_ppp |= tc_mask;
+ priv->prof->rx_ppp &= ~tc_mask;
+ break;
+ case pfc_enabled_rx:
+ priv->prof->tx_ppp &= ~tc_mask;
+ priv->prof->rx_ppp |= tc_mask;
+ break;
+ default:
+ break;
+ }
+ }
+ en_dbg(DRV, priv, "Set pfc on\n");
+ } else {
+ priv->prof->rx_pause = 1;
+ priv->prof->tx_pause = 1;
+ en_dbg(DRV, priv, "Set pfc off\n");
+ }
+
+ if (mlx4_SET_PORT_general(mdev->dev, priv->port,
+ priv->rx_skb_size + ETH_FCS_LEN,
+ priv->prof->tx_pause,
+ priv->prof->tx_ppp,
+ priv->prof->rx_pause,
+ priv->prof->rx_ppp)) {
+ en_err(priv, "Failed setting pause params\n");
+ return 1;
+ }
+
+ return 0;
+}
+
+static u8 mlx4_en_dcbnl_get_state(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ if (priv->flags & MLX4_EN_FLAG_DCB_ENABLED)
+ return MLX4_CEE_STATE_UP;
+
+ return MLX4_CEE_STATE_DOWN;
+}
+
+static u8 mlx4_en_dcbnl_set_state(struct net_device *dev, u8 state)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ int num_tcs = 0;
+
+ if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return 1;
+
+ if (!!(state) == !!(priv->flags & MLX4_EN_FLAG_DCB_ENABLED))
+ return 0;
+
+ if (state) {
+ priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
+ num_tcs = IEEE_8021QAZ_MAX_TCS;
+ } else {
+ priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
+ }
+
+ if (mlx4_en_setup_tc(dev, num_tcs))
+ return 1;
+
+ return 0;
+}
+
+/* On success returns a non-zero 802.1p user priority bitmap
+ * otherwise returns 0 as the invalid user priority bitmap to
+ * indicate an error.
+ */
+static int mlx4_en_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
+{
+ struct mlx4_en_priv *priv = netdev_priv(netdev);
+ struct dcb_app app = {
+ .selector = idtype,
+ .protocol = id,
+ };
+ if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return 0;
+
+ return dcb_getapp(netdev, &app);
+}
+
+static int mlx4_en_dcbnl_setapp(struct net_device *netdev, u8 idtype,
+ u16 id, u8 up)
+{
+ struct mlx4_en_priv *priv = netdev_priv(netdev);
+ struct dcb_app app;
+
+ if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return -EINVAL;
+
+ memset(&app, 0, sizeof(struct dcb_app));
+ app.selector = idtype;
+ app.protocol = id;
+ app.priority = up;
+
+ return dcb_setapp(netdev, &app);
+}
+
static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev,
struct ieee_ets *ets)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct ieee_ets *my_ets = &priv->ets;
- /* No IEEE PFC settings available */
if (!my_ets)
return -EINVAL;
@@ -237,18 +434,51 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
static u8 mlx4_en_dcbnl_getdcbx(struct net_device *dev)
{
- return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ return priv->dcbx_cap;
}
static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct ieee_ets ets = {0};
+ struct ieee_pfc pfc = {0};
+
+ if (mode == priv->dcbx_cap)
+ return 0;
+
if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
- (mode & DCB_CAP_DCBX_VER_CEE) ||
- !(mode & DCB_CAP_DCBX_VER_IEEE) ||
+ ((mode & DCB_CAP_DCBX_VER_IEEE) &&
+ (mode & DCB_CAP_DCBX_VER_CEE)) ||
!(mode & DCB_CAP_DCBX_HOST))
- return 1;
+ goto err;
+
+ priv->dcbx_cap = mode;
+
+ ets.ets_cap = IEEE_8021QAZ_MAX_TCS;
+ pfc.pfc_cap = IEEE_8021QAZ_MAX_TCS;
+
+ if (mode & DCB_CAP_DCBX_VER_IEEE) {
+ if (mlx4_en_dcbnl_ieee_setets(dev, &ets))
+ goto err;
+ if (mlx4_en_dcbnl_ieee_setpfc(dev, &pfc))
+ goto err;
+ } else if (mode & DCB_CAP_DCBX_VER_CEE) {
+ if (mlx4_en_dcbnl_set_all(dev))
+ goto err;
+ } else {
+ if (mlx4_en_dcbnl_ieee_setets(dev, &ets))
+ goto err;
+ if (mlx4_en_dcbnl_ieee_setpfc(dev, &pfc))
+ goto err;
+ if (mlx4_en_setup_tc(dev, 0))
+ goto err;
+ }
return 0;
+err:
+ return 1;
}
#define MLX4_RATELIMIT_UNITS_IN_KB 100000 /* rate-limit HW unit in Kbps */
@@ -463,24 +693,46 @@ static int mlx4_en_dcbnl_ieee_getqcnstats(struct net_device *dev,
}
const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = {
- .ieee_getets = mlx4_en_dcbnl_ieee_getets,
- .ieee_setets = mlx4_en_dcbnl_ieee_setets,
- .ieee_getmaxrate = mlx4_en_dcbnl_ieee_getmaxrate,
- .ieee_setmaxrate = mlx4_en_dcbnl_ieee_setmaxrate,
- .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc,
- .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc,
+ .ieee_getets = mlx4_en_dcbnl_ieee_getets,
+ .ieee_setets = mlx4_en_dcbnl_ieee_setets,
+ .ieee_getmaxrate = mlx4_en_dcbnl_ieee_getmaxrate,
+ .ieee_setmaxrate = mlx4_en_dcbnl_ieee_setmaxrate,
+ .ieee_getqcn = mlx4_en_dcbnl_ieee_getqcn,
+ .ieee_setqcn = mlx4_en_dcbnl_ieee_setqcn,
+ .ieee_getqcnstats = mlx4_en_dcbnl_ieee_getqcnstats,
+ .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc,
+ .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc,
+
+ .getstate = mlx4_en_dcbnl_get_state,
+ .setstate = mlx4_en_dcbnl_set_state,
+ .getpfccfg = mlx4_en_dcbnl_get_pfc_cfg,
+ .setpfccfg = mlx4_en_dcbnl_set_pfc_cfg,
+ .setall = mlx4_en_dcbnl_set_all,
+ .getcap = mlx4_en_dcbnl_getcap,
+ .getnumtcs = mlx4_en_dcbnl_getnumtcs,
+ .getpfcstate = mlx4_en_dcbnl_getpfcstate,
+ .setpfcstate = mlx4_en_dcbnl_setpfcstate,
+ .getapp = mlx4_en_dcbnl_getapp,
+ .setapp = mlx4_en_dcbnl_setapp,
.getdcbx = mlx4_en_dcbnl_getdcbx,
.setdcbx = mlx4_en_dcbnl_setdcbx,
- .ieee_getqcn = mlx4_en_dcbnl_ieee_getqcn,
- .ieee_setqcn = mlx4_en_dcbnl_ieee_setqcn,
- .ieee_getqcnstats = mlx4_en_dcbnl_ieee_getqcnstats,
};
const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops = {
.ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc,
.ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc,
+ .setstate = mlx4_en_dcbnl_set_state,
+ .getpfccfg = mlx4_en_dcbnl_get_pfc_cfg,
+ .setpfccfg = mlx4_en_dcbnl_set_pfc_cfg,
+ .setall = mlx4_en_dcbnl_set_all,
+ .getnumtcs = mlx4_en_dcbnl_getnumtcs,
+ .getpfcstate = mlx4_en_dcbnl_getpfcstate,
+ .setpfcstate = mlx4_en_dcbnl_setpfcstate,
+ .getapp = mlx4_en_dcbnl_getapp,
+ .setapp = mlx4_en_dcbnl_setapp,
+
.getdcbx = mlx4_en_dcbnl_getdcbx,
.setdcbx = mlx4_en_dcbnl_setdcbx,
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 44cf16d01..bdda17d2e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1112,7 +1112,7 @@ static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- return priv->rx_ring_num;
+ return rounddown_pow_of_two(priv->rx_ring_num);
}
static u32 mlx4_en_get_rxfh_key_size(struct net_device *netdev)
@@ -1146,19 +1146,17 @@ static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key,
u8 *hfunc)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_rss_map *rss_map = &priv->rss_map;
- int rss_rings;
- size_t n = priv->rx_ring_num;
+ u32 n = mlx4_en_get_rxfh_indir_size(dev);
+ u32 i, rss_rings;
int err = 0;
- rss_rings = priv->prof->rss_rings ?: priv->rx_ring_num;
- rss_rings = 1 << ilog2(rss_rings);
+ rss_rings = priv->prof->rss_rings ?: n;
+ rss_rings = rounddown_pow_of_two(rss_rings);
- while (n--) {
+ for (i = 0; i < n; i++) {
if (!ring_index)
break;
- ring_index[n] = rss_map->qps[n % rss_rings].qpn -
- rss_map->base_qpn;
+ ring_index[i] = i % rss_rings;
}
if (key)
memcpy(key, priv->rss_key, MLX4_EN_RSS_KEY_SIZE);
@@ -1171,6 +1169,7 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
const u8 *key, const u8 hfunc)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
+ u32 n = mlx4_en_get_rxfh_indir_size(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int port_up = 0;
int err = 0;
@@ -1180,18 +1179,18 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
/* Calculate RSS table size and make sure flows are spread evenly
* between rings
*/
- for (i = 0; i < priv->rx_ring_num; i++) {
+ for (i = 0; i < n; i++) {
if (!ring_index)
- continue;
+ break;
if (i > 0 && !ring_index[i] && !rss_rings)
rss_rings = i;
- if (ring_index[i] != (i % (rss_rings ?: priv->rx_ring_num)))
+ if (ring_index[i] != (i % (rss_rings ?: n)))
return -EINVAL;
}
if (!rss_rings)
- rss_rings = priv->rx_ring_num;
+ rss_rings = n;
/* RSS table size must be an order of 2 */
if (!is_power_of_2(rss_rings))
@@ -1730,6 +1729,12 @@ static int mlx4_en_set_channels(struct net_device *dev,
!channel->tx_count || !channel->rx_count)
return -EINVAL;
+ if (channel->tx_count * MLX4_EN_NUM_UP <= priv->xdp_ring_num) {
+ en_err(priv, "Minimum %d tx channels required with XDP on\n",
+ priv->xdp_ring_num / MLX4_EN_NUM_UP + 1);
+ return -EINVAL;
+ }
+
tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
if (!tmp)
return -ENOMEM;
@@ -1751,7 +1756,8 @@ static int mlx4_en_set_channels(struct net_device *dev,
mlx4_en_safe_replace_resources(priv, tmp);
- netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
+ netif_set_real_num_tx_queues(dev, priv->tx_ring_num -
+ priv->xdp_ring_num);
netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
if (dev->num_tc)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 8359e9e51..fedb82927 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -31,6 +31,7 @@
*
*/
+#include <linux/bpf.h>
#include <linux/etherdevice.h>
#include <linux/tcp.h>
#include <linux/if_vlan.h>
@@ -67,6 +68,18 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up)
offset += priv->num_tx_rings_p_up;
}
+#ifdef CONFIG_MLX4_EN_DCB
+ if (!mlx4_is_slave(priv->mdev->dev)) {
+ if (up) {
+ if (priv->dcbx_cap)
+ priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
+ } else {
+ priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
+ priv->cee_config.pfc_state = false;
+ }
+ }
+#endif /* CONFIG_MLX4_EN_DCB */
+
return 0;
}
@@ -1201,8 +1214,8 @@ static void mlx4_en_netpoll(struct net_device *dev)
struct mlx4_en_cq *cq;
int i;
- for (i = 0; i < priv->rx_ring_num; i++) {
- cq = priv->rx_cq[i];
+ for (i = 0; i < priv->tx_ring_num; i++) {
+ cq = priv->tx_cq[i];
napi_schedule(&cq->napi);
}
}
@@ -1510,6 +1523,24 @@ static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask);
}
+static void mlx4_en_init_recycle_ring(struct mlx4_en_priv *priv,
+ int tx_ring_idx)
+{
+ struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[tx_ring_idx];
+ int rr_index;
+
+ rr_index = (priv->xdp_ring_num - priv->tx_ring_num) + tx_ring_idx;
+ if (rr_index >= 0) {
+ tx_ring->free_tx_desc = mlx4_en_recycle_tx_desc;
+ tx_ring->recycle_ring = priv->rx_ring[rr_index];
+ en_dbg(DRV, priv,
+ "Set tx_ring[%d]->recycle_ring = rx_ring[%d]\n",
+ tx_ring_idx, rr_index);
+ } else {
+ tx_ring->recycle_ring = NULL;
+ }
+}
+
int mlx4_en_start_port(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -1632,6 +1663,8 @@ int mlx4_en_start_port(struct net_device *dev)
}
tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
+ mlx4_en_init_recycle_ring(priv, i);
+
/* Arm CQ for TX completions */
mlx4_en_arm_cq(priv, cq);
@@ -1696,10 +1729,9 @@ int mlx4_en_start_port(struct net_device *dev)
/* Schedule multicast task to populate multicast list */
queue_work(mdev->workqueue, &priv->rx_mode_task);
-#ifdef CONFIG_MLX4_EN_VXLAN
if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
- vxlan_get_rx_port(dev);
-#endif
+ udp_tunnel_get_rx_info(dev);
+
priv->port_up = true;
netif_tx_start_all_queues(dev);
netif_device_attach(dev);
@@ -2177,6 +2209,11 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
en_err(priv, "Bad MTU size:%d.\n", new_mtu);
return -EPERM;
}
+ if (priv->xdp_ring_num && MLX4_EN_EFF_MTU(new_mtu) > FRAG_SZ0) {
+ en_err(priv, "MTU size:%d requires frags but XDP running\n",
+ new_mtu);
+ return -EOPNOTSUPP;
+ }
dev->mtu = new_mtu;
if (netif_running(dev)) {
@@ -2434,7 +2471,6 @@ static int mlx4_en_get_phys_port_id(struct net_device *dev,
return 0;
}
-#ifdef CONFIG_MLX4_EN_VXLAN
static void mlx4_en_add_vxlan_offloads(struct work_struct *work)
{
int ret;
@@ -2484,15 +2520,19 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
}
static void mlx4_en_add_vxlan_port(struct net_device *dev,
- sa_family_t sa_family, __be16 port)
+ struct udp_tunnel_info *ti)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
+ __be16 port = ti->port;
__be16 current_port;
- if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
return;
- if (sa_family == AF_INET6)
+ if (ti->sa_family != AF_INET)
+ return;
+
+ if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
return;
current_port = priv->vxlan_port;
@@ -2507,15 +2547,19 @@ static void mlx4_en_add_vxlan_port(struct net_device *dev,
}
static void mlx4_en_del_vxlan_port(struct net_device *dev,
- sa_family_t sa_family, __be16 port)
+ struct udp_tunnel_info *ti)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
+ __be16 port = ti->port;
__be16 current_port;
- if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
return;
- if (sa_family == AF_INET6)
+ if (ti->sa_family != AF_INET)
+ return;
+
+ if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
return;
current_port = priv->vxlan_port;
@@ -2550,7 +2594,6 @@ static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
return features;
}
-#endif
static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate)
{
@@ -2579,6 +2622,103 @@ static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 m
return err;
}
+static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct bpf_prog *old_prog;
+ int xdp_ring_num;
+ int port_up = 0;
+ int err;
+ int i;
+
+ xdp_ring_num = prog ? ALIGN(priv->rx_ring_num, MLX4_EN_NUM_UP) : 0;
+
+ /* No need to reconfigure buffers when simply swapping the
+ * program for a new one.
+ */
+ if (priv->xdp_ring_num == xdp_ring_num) {
+ if (prog) {
+ prog = bpf_prog_add(prog, priv->rx_ring_num - 1);
+ if (IS_ERR(prog))
+ return PTR_ERR(prog);
+ }
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ /* This xchg is paired with READ_ONCE in the fastpath */
+ old_prog = xchg(&priv->rx_ring[i]->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+ }
+ return 0;
+ }
+
+ if (priv->num_frags > 1) {
+ en_err(priv, "Cannot set XDP if MTU requires multiple frags\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (priv->tx_ring_num < xdp_ring_num + MLX4_EN_NUM_UP) {
+ en_err(priv,
+ "Minimum %d tx channels required to run XDP\n",
+ (xdp_ring_num + MLX4_EN_NUM_UP) / MLX4_EN_NUM_UP);
+ return -EINVAL;
+ }
+
+ if (prog) {
+ prog = bpf_prog_add(prog, priv->rx_ring_num - 1);
+ if (IS_ERR(prog))
+ return PTR_ERR(prog);
+ }
+
+ mutex_lock(&mdev->state_lock);
+ if (priv->port_up) {
+ port_up = 1;
+ mlx4_en_stop_port(dev, 1);
+ }
+
+ priv->xdp_ring_num = xdp_ring_num;
+ netif_set_real_num_tx_queues(dev, priv->tx_ring_num -
+ priv->xdp_ring_num);
+
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ old_prog = xchg(&priv->rx_ring[i]->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+ }
+
+ if (port_up) {
+ err = mlx4_en_start_port(dev);
+ if (err) {
+ en_err(priv, "Failed starting port %d for XDP change\n",
+ priv->port);
+ queue_work(mdev->workqueue, &priv->watchdog_task);
+ }
+ }
+
+ mutex_unlock(&mdev->state_lock);
+ return 0;
+}
+
+static bool mlx4_xdp_attached(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ return !!priv->xdp_ring_num;
+}
+
+static int mlx4_xdp(struct net_device *dev, struct netdev_xdp *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return mlx4_xdp_set(dev, xdp->prog);
+ case XDP_QUERY_PROG:
+ xdp->prog_attached = mlx4_xdp_attached(dev);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct net_device_ops mlx4_netdev_ops = {
.ndo_open = mlx4_en_open,
.ndo_stop = mlx4_en_close,
@@ -2603,12 +2743,11 @@ static const struct net_device_ops mlx4_netdev_ops = {
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
#endif
.ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
-#ifdef CONFIG_MLX4_EN_VXLAN
- .ndo_add_vxlan_port = mlx4_en_add_vxlan_port,
- .ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
+ .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port,
+ .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
.ndo_features_check = mlx4_en_features_check,
-#endif
.ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
+ .ndo_xdp = mlx4_xdp,
};
static const struct net_device_ops mlx4_netdev_ops_master = {
@@ -2641,12 +2780,11 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
#endif
.ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
-#ifdef CONFIG_MLX4_EN_VXLAN
- .ndo_add_vxlan_port = mlx4_en_add_vxlan_port,
- .ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
+ .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port,
+ .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
.ndo_features_check = mlx4_en_features_check,
-#endif
.ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
+ .ndo_xdp = mlx4_xdp,
};
struct mlx4_en_bond {
@@ -2936,10 +3074,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
-#ifdef CONFIG_MLX4_EN_VXLAN
INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads);
INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads);
-#endif
#ifdef CONFIG_RFS_ACCEL
INIT_LIST_HEAD(&priv->filters);
spin_lock_init(&priv->filters_lock);
@@ -2979,6 +3115,14 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->msg_enable = MLX4_EN_MSG_LEVEL;
#ifdef CONFIG_MLX4_EN_DCB
if (!mlx4_is_slave(priv->mdev->dev)) {
+ priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST |
+ DCB_CAP_DCBX_VER_IEEE;
+ priv->flags |= MLX4_EN_DCB_ENABLED;
+ priv->cee_config.pfc_state = false;
+
+ for (i = 0; i < MLX4_EN_NUM_UP; i++)
+ priv->cee_config.dcb_pfc[i] = pfc_disabled;
+
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) {
dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
} else {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 99b5407f2..2040dad86 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -32,6 +32,7 @@
*/
#include <net/busy_poll.h>
+#include <linux/bpf.h>
#include <linux/mlx4/cq.h>
#include <linux/slab.h>
#include <linux/mlx4/qp.h>
@@ -57,7 +58,7 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
struct page *page;
dma_addr_t dma;
- for (order = MLX4_EN_ALLOC_PREFER_ORDER; ;) {
+ for (order = frag_info->order; ;) {
gfp_t gfp = _gfp;
if (order)
@@ -70,7 +71,7 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
return -ENOMEM;
}
dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE << order,
- PCI_DMA_FROMDEVICE);
+ frag_info->dma_dir);
if (dma_mapping_error(priv->ddev, dma)) {
put_page(page);
return -ENOMEM;
@@ -124,7 +125,8 @@ out:
while (i--) {
if (page_alloc[i].page != ring_alloc[i].page) {
dma_unmap_page(priv->ddev, page_alloc[i].dma,
- page_alloc[i].page_size, PCI_DMA_FROMDEVICE);
+ page_alloc[i].page_size,
+ priv->frag_info[i].dma_dir);
page = page_alloc[i].page;
/* Revert changes done by mlx4_alloc_pages */
page_ref_sub(page, page_alloc[i].page_size /
@@ -145,7 +147,7 @@ static void mlx4_en_free_frag(struct mlx4_en_priv *priv,
if (next_frag_end > frags[i].page_size)
dma_unmap_page(priv->ddev, frags[i].dma, frags[i].page_size,
- PCI_DMA_FROMDEVICE);
+ frag_info->dma_dir);
if (frags[i].page)
put_page(frags[i].page);
@@ -176,7 +178,8 @@ out:
page_alloc = &ring->page_alloc[i];
dma_unmap_page(priv->ddev, page_alloc->dma,
- page_alloc->page_size, PCI_DMA_FROMDEVICE);
+ page_alloc->page_size,
+ priv->frag_info[i].dma_dir);
page = page_alloc->page;
/* Revert changes done by mlx4_alloc_pages */
page_ref_sub(page, page_alloc->page_size /
@@ -201,7 +204,7 @@ static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
i, page_count(page_alloc->page));
dma_unmap_page(priv->ddev, page_alloc->dma,
- page_alloc->page_size, PCI_DMA_FROMDEVICE);
+ page_alloc->page_size, frag_info->dma_dir);
while (page_alloc->page_offset + frag_info->frag_stride <
page_alloc->page_size) {
put_page(page_alloc->page);
@@ -244,6 +247,12 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_alloc *frags = ring->rx_info +
(index << priv->log_rx_info);
+ if (ring->page_cache.index > 0) {
+ frags[0] = ring->page_cache.buf[--ring->page_cache.index];
+ rx_desc->data[0].addr = cpu_to_be64(frags[0].dma);
+ return 0;
+ }
+
return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp);
}
@@ -502,13 +511,35 @@ void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
}
}
+/* When the rx ring is running in page-per-packet mode, a released frame can go
+ * directly into a small cache, to avoid unmapping or touching the page
+ * allocator. In bpf prog performance scenarios, buffers are either forwarded
+ * or dropped, never converted to skbs, so every page can come directly from
+ * this cache when it is sized to be a multiple of the napi budget.
+ */
+bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring,
+ struct mlx4_en_rx_alloc *frame)
+{
+ struct mlx4_en_page_cache *cache = &ring->page_cache;
+
+ if (cache->index >= MLX4_EN_CACHE_SIZE)
+ return false;
+
+ cache->buf[cache->index++] = *frame;
+ return true;
+}
+
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring **pring,
u32 size, u16 stride)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_rx_ring *ring = *pring;
+ struct bpf_prog *old_prog;
+ old_prog = READ_ONCE(ring->xdp_prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
vfree(ring->rx_info);
ring->rx_info = NULL;
@@ -519,6 +550,16 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring)
{
+ int i;
+
+ for (i = 0; i < ring->page_cache.index; i++) {
+ struct mlx4_en_rx_alloc *frame = &ring->page_cache.buf[i];
+
+ dma_unmap_page(priv->ddev, frame->dma, frame->page_size,
+ priv->frag_info[0].dma_dir);
+ put_page(frame->page);
+ }
+ ring->page_cache.index = 0;
mlx4_en_free_rx_buf(priv, ring);
if (ring->stride <= TXBB_SIZE)
ring->buf -= TXBB_SIZE;
@@ -740,7 +781,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
struct mlx4_en_rx_alloc *frags;
struct mlx4_en_rx_desc *rx_desc;
+ struct bpf_prog *xdp_prog;
+ int doorbell_pending;
struct sk_buff *skb;
+ int tx_index;
int index;
int nr;
unsigned int length;
@@ -756,6 +800,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
if (budget <= 0)
return polled;
+ xdp_prog = READ_ONCE(ring->xdp_prog);
+ doorbell_pending = 0;
+ tx_index = (priv->tx_ring_num - priv->xdp_ring_num) + cq->ring;
+
/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
* descriptor offset can be deduced from the CQE index instead of
* reading 'cqe->index' */
@@ -832,6 +880,43 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
(cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
+ /* A bpf program gets first chance to drop the packet. It may
+ * read bytes but not past the end of the frag.
+ */
+ if (xdp_prog) {
+ struct xdp_buff xdp;
+ dma_addr_t dma;
+ u32 act;
+
+ dma = be64_to_cpu(rx_desc->data[0].addr);
+ dma_sync_single_for_cpu(priv->ddev, dma,
+ priv->frag_info[0].frag_size,
+ DMA_FROM_DEVICE);
+
+ xdp.data = page_address(frags[0].page) +
+ frags[0].page_offset;
+ xdp.data_end = xdp.data + length;
+
+ act = bpf_prog_run_xdp(xdp_prog, &xdp);
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ if (!mlx4_en_xmit_frame(frags, dev,
+ length, tx_index,
+ &doorbell_pending))
+ goto consumed;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ case XDP_ABORTED:
+ case XDP_DROP:
+ if (mlx4_en_rx_recycle(ring, frags))
+ goto consumed;
+ goto next;
+ }
+ }
+
if (likely(dev->features & NETIF_F_RXCSUM)) {
if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
MLX4_CQE_STATUS_UDP)) {
@@ -983,6 +1068,7 @@ next:
for (nr = 0; nr < priv->num_frags; nr++)
mlx4_en_free_frag(priv, frags, nr);
+consumed:
++cq->mcq.cons_index;
index = (cq->mcq.cons_index) & ring->size_mask;
cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor;
@@ -991,6 +1077,9 @@ next:
}
out:
+ if (doorbell_pending)
+ mlx4_en_xmit_doorbell(priv->tx_ring[tx_index]);
+
AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
mlx4_cq_set_ci(&cq->mcq);
wmb(); /* ensure HW sees CQ consumer before we post new buffers */
@@ -1058,22 +1147,35 @@ static const int frag_sizes[] = {
void mlx4_en_calc_rx_buf(struct net_device *dev)
{
+ enum dma_data_direction dma_dir = PCI_DMA_FROMDEVICE;
struct mlx4_en_priv *priv = netdev_priv(dev);
- /* VLAN_HLEN is added twice,to support skb vlan tagged with multiple
- * headers. (For example: ETH_P_8021Q and ETH_P_8021AD).
- */
- int eff_mtu = dev->mtu + ETH_HLEN + (2 * VLAN_HLEN);
+ int eff_mtu = MLX4_EN_EFF_MTU(dev->mtu);
+ int order = MLX4_EN_ALLOC_PREFER_ORDER;
+ u32 align = SMP_CACHE_BYTES;
int buf_size = 0;
int i = 0;
+ /* bpf requires buffers to be set up as 1 packet per page.
+ * This only works when num_frags == 1.
+ */
+ if (priv->xdp_ring_num) {
+ dma_dir = PCI_DMA_BIDIRECTIONAL;
+ /* This will gain efficient xdp frame recycling at the expense
+ * of more costly truesize accounting
+ */
+ align = PAGE_SIZE;
+ order = 0;
+ }
+
while (buf_size < eff_mtu) {
+ priv->frag_info[i].order = order;
priv->frag_info[i].frag_size =
(eff_mtu > buf_size + frag_sizes[i]) ?
frag_sizes[i] : eff_mtu - buf_size;
priv->frag_info[i].frag_prefix_size = buf_size;
priv->frag_info[i].frag_stride =
- ALIGN(priv->frag_info[i].frag_size,
- SMP_CACHE_BYTES);
+ ALIGN(priv->frag_info[i].frag_size, align);
+ priv->frag_info[i].dma_dir = dma_dir;
buf_size += priv->frag_info[i].frag_size;
i++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 76aa4d271..e2509bba3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -196,6 +196,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
ring->last_nr_txbb = 1;
memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info));
memset(ring->buf, 0, ring->buf_size);
+ ring->free_tx_desc = mlx4_en_free_tx_desc;
ring->qp_state = MLX4_QP_STATE_RST;
ring->doorbell_qpn = cpu_to_be32(ring->qp.qpn << 8);
@@ -265,10 +266,10 @@ static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv,
}
-static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
- struct mlx4_en_tx_ring *ring,
- int index, u8 owner, u64 timestamp,
- int napi_mode)
+u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring,
+ int index, u8 owner, u64 timestamp,
+ int napi_mode)
{
struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
@@ -344,6 +345,27 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
return tx_info->nr_txbb;
}
+u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring,
+ int index, u8 owner, u64 timestamp,
+ int napi_mode)
+{
+ struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
+ struct mlx4_en_rx_alloc frame = {
+ .page = tx_info->page,
+ .dma = tx_info->map0_dma,
+ .page_offset = 0,
+ .page_size = PAGE_SIZE,
+ };
+
+ if (!mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
+ dma_unmap_page(priv->ddev, tx_info->map0_dma,
+ PAGE_SIZE, priv->frag_info[0].dma_dir);
+ put_page(tx_info->page);
+ }
+
+ return tx_info->nr_txbb;
+}
int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
{
@@ -362,7 +384,7 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
}
while (ring->cons != ring->prod) {
- ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring,
+ ring->last_nr_txbb = ring->free_tx_desc(priv, ring,
ring->cons & ring->size_mask,
!!(ring->cons & ring->size), 0,
0 /* Non-NAPI caller */);
@@ -444,7 +466,7 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
timestamp = mlx4_en_get_cqe_ts(cqe);
/* free next descriptor */
- last_nr_txbb = mlx4_en_free_tx_desc(
+ last_nr_txbb = ring->free_tx_desc(
priv, ring, ring_index,
!!((ring_cons + txbbs_skipped) &
ring->size), timestamp, napi_budget);
@@ -476,6 +498,9 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb;
ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped;
+ if (ring->free_tx_desc == mlx4_en_recycle_tx_desc)
+ return done < budget;
+
netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
/* Wakeup Tx queue if this stopped, and ring is not full.
@@ -631,8 +656,7 @@ static int get_real_size(const struct sk_buff *skb,
static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc,
const struct sk_buff *skb,
const struct skb_shared_info *shinfo,
- int real_size, u16 *vlan_tag,
- int tx_ind, void *fragptr)
+ void *fragptr)
{
struct mlx4_wqe_inline_seg *inl = &tx_desc->inl;
int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl;
@@ -700,10 +724,66 @@ static void mlx4_bf_copy(void __iomem *dst, const void *src,
__iowrite64_copy(dst, src, bytecnt / 8);
}
+void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring)
+{
+ wmb();
+ /* Since there is no iowrite*_native() that writes the
+ * value as is, without byteswapping - using the one
+ * the doesn't do byteswapping in the relevant arch
+ * endianness.
+ */
+#if defined(__LITTLE_ENDIAN)
+ iowrite32(
+#else
+ iowrite32be(
+#endif
+ ring->doorbell_qpn,
+ ring->bf.uar->map + MLX4_SEND_DOORBELL);
+}
+
+static void mlx4_en_tx_write_desc(struct mlx4_en_tx_ring *ring,
+ struct mlx4_en_tx_desc *tx_desc,
+ union mlx4_wqe_qpn_vlan qpn_vlan,
+ int desc_size, int bf_index,
+ __be32 op_own, bool bf_ok,
+ bool send_doorbell)
+{
+ tx_desc->ctrl.qpn_vlan = qpn_vlan;
+
+ if (bf_ok) {
+ op_own |= htonl((bf_index & 0xffff) << 8);
+ /* Ensure new descriptor hits memory
+ * before setting ownership of this descriptor to HW
+ */
+ dma_wmb();
+ tx_desc->ctrl.owner_opcode = op_own;
+
+ wmb();
+
+ mlx4_bf_copy(ring->bf.reg + ring->bf.offset, &tx_desc->ctrl,
+ desc_size);
+
+ wmb();
+
+ ring->bf.offset ^= ring->bf.buf_size;
+ } else {
+ /* Ensure new descriptor hits memory
+ * before setting ownership of this descriptor to HW
+ */
+ dma_wmb();
+ tx_desc->ctrl.owner_opcode = op_own;
+ if (send_doorbell)
+ mlx4_en_xmit_doorbell(ring);
+ else
+ ring->xmit_more++;
+ }
+}
+
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
struct mlx4_en_priv *priv = netdev_priv(dev);
+ union mlx4_wqe_qpn_vlan qpn_vlan = {};
struct device *ddev = priv->ddev;
struct mlx4_en_tx_ring *ring;
struct mlx4_en_tx_desc *tx_desc;
@@ -715,7 +795,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
int real_size;
u32 index, bf_index;
__be32 op_own;
- u16 vlan_tag = 0;
u16 vlan_proto = 0;
int i_frag;
int lso_header_size;
@@ -725,6 +804,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
bool stop_queue;
bool inline_ok;
u32 ring_cons;
+ bool bf_ok;
tx_ind = skb_get_queue_mapping(skb);
ring = priv->tx_ring[tx_ind];
@@ -738,7 +818,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
real_size = get_real_size(skb, shinfo, dev, &lso_header_size,
&inline_ok, &fragptr);
if (unlikely(!real_size))
- goto tx_drop;
+ goto tx_drop_count;
/* Align descriptor to TXBB size */
desc_size = ALIGN(real_size, TXBB_SIZE);
@@ -746,12 +826,20 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
if (netif_msg_tx_err(priv))
en_warn(priv, "Oversized header or SG list\n");
- goto tx_drop;
+ goto tx_drop_count;
}
+ bf_ok = ring->bf_enabled;
if (skb_vlan_tag_present(skb)) {
- vlan_tag = skb_vlan_tag_get(skb);
+ qpn_vlan.vlan_tag = cpu_to_be16(skb_vlan_tag_get(skb));
vlan_proto = be16_to_cpu(skb->vlan_proto);
+ if (vlan_proto == ETH_P_8021AD)
+ qpn_vlan.ins_vlan = MLX4_WQE_CTRL_INS_SVLAN;
+ else if (vlan_proto == ETH_P_8021Q)
+ qpn_vlan.ins_vlan = MLX4_WQE_CTRL_INS_CVLAN;
+ else
+ qpn_vlan.ins_vlan = 0;
+ bf_ok = false;
}
netdev_txq_bql_enqueue_prefetchw(ring->tx_queue);
@@ -771,6 +859,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
else {
tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf;
bounce = true;
+ bf_ok = false;
}
/* Save skb in tx_info ring */
@@ -907,8 +996,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
if (tx_info->inl)
- build_inline_wqe(tx_desc, skb, shinfo, real_size, &vlan_tag,
- tx_ind, fragptr);
+ build_inline_wqe(tx_desc, skb, shinfo, fragptr);
if (skb->encapsulation) {
union {
@@ -946,60 +1034,15 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
real_size = (real_size / 16) & 0x3f;
- if (ring->bf_enabled && desc_size <= MAX_BF && !bounce &&
- !skb_vlan_tag_present(skb) && send_doorbell) {
- tx_desc->ctrl.bf_qpn = ring->doorbell_qpn |
- cpu_to_be32(real_size);
+ bf_ok &= desc_size <= MAX_BF && send_doorbell;
- op_own |= htonl((bf_index & 0xffff) << 8);
- /* Ensure new descriptor hits memory
- * before setting ownership of this descriptor to HW
- */
- dma_wmb();
- tx_desc->ctrl.owner_opcode = op_own;
-
- wmb();
-
- mlx4_bf_copy(ring->bf.reg + ring->bf.offset, &tx_desc->ctrl,
- desc_size);
-
- wmb();
-
- ring->bf.offset ^= ring->bf.buf_size;
- } else {
- tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
- if (vlan_proto == ETH_P_8021AD)
- tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_SVLAN;
- else if (vlan_proto == ETH_P_8021Q)
- tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_CVLAN;
- else
- tx_desc->ctrl.ins_vlan = 0;
+ if (bf_ok)
+ qpn_vlan.bf_qpn = ring->doorbell_qpn | cpu_to_be32(real_size);
+ else
+ qpn_vlan.fence_size = real_size;
- tx_desc->ctrl.fence_size = real_size;
-
- /* Ensure new descriptor hits memory
- * before setting ownership of this descriptor to HW
- */
- dma_wmb();
- tx_desc->ctrl.owner_opcode = op_own;
- if (send_doorbell) {
- wmb();
- /* Since there is no iowrite*_native() that writes the
- * value as is, without byteswapping - using the one
- * the doesn't do byteswapping in the relevant arch
- * endianness.
- */
-#if defined(__LITTLE_ENDIAN)
- iowrite32(
-#else
- iowrite32be(
-#endif
- ring->doorbell_qpn,
- ring->bf.uar->map + MLX4_SEND_DOORBELL);
- } else {
- ring->xmit_more++;
- }
- }
+ mlx4_en_tx_write_desc(ring, tx_desc, qpn_vlan, desc_size, bf_index,
+ op_own, bf_ok, send_doorbell);
if (unlikely(stop_queue)) {
/* If queue was emptied after the if (stop_queue) , and before
@@ -1028,9 +1071,114 @@ tx_drop_unmap:
PCI_DMA_TODEVICE);
}
+tx_drop_count:
+ ring->tx_dropped++;
tx_drop:
dev_kfree_skb_any(skb);
- ring->tx_dropped++;
return NETDEV_TX_OK;
}
+netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame,
+ struct net_device *dev, unsigned int length,
+ int tx_ind, int *doorbell_pending)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ union mlx4_wqe_qpn_vlan qpn_vlan = {};
+ struct mlx4_en_tx_ring *ring;
+ struct mlx4_en_tx_desc *tx_desc;
+ struct mlx4_wqe_data_seg *data;
+ struct mlx4_en_tx_info *tx_info;
+ int index, bf_index;
+ bool send_doorbell;
+ int nr_txbb = 1;
+ bool stop_queue;
+ dma_addr_t dma;
+ int real_size;
+ __be32 op_own;
+ u32 ring_cons;
+ bool bf_ok;
+
+ BUILD_BUG_ON_MSG(ALIGN(CTRL_SIZE + DS_SIZE, TXBB_SIZE) != TXBB_SIZE,
+ "mlx4_en_xmit_frame requires minimum size tx desc");
+
+ ring = priv->tx_ring[tx_ind];
+
+ if (!priv->port_up)
+ goto tx_drop;
+
+ if (mlx4_en_is_tx_ring_full(ring))
+ goto tx_drop_count;
+
+ /* fetch ring->cons far ahead before needing it to avoid stall */
+ ring_cons = READ_ONCE(ring->cons);
+
+ index = ring->prod & ring->size_mask;
+ tx_info = &ring->tx_info[index];
+
+ bf_ok = ring->bf_enabled;
+
+ /* Track current inflight packets for performance analysis */
+ AVG_PERF_COUNTER(priv->pstats.inflight_avg,
+ (u32)(ring->prod - ring_cons - 1));
+
+ bf_index = ring->prod;
+ tx_desc = ring->buf + index * TXBB_SIZE;
+ data = &tx_desc->data;
+
+ dma = frame->dma;
+
+ tx_info->page = frame->page;
+ frame->page = NULL;
+ tx_info->map0_dma = dma;
+ tx_info->map0_byte_count = length;
+ tx_info->nr_txbb = nr_txbb;
+ tx_info->nr_bytes = max_t(unsigned int, length, ETH_ZLEN);
+ tx_info->data_offset = (void *)data - (void *)tx_desc;
+ tx_info->ts_requested = 0;
+ tx_info->nr_maps = 1;
+ tx_info->linear = 1;
+ tx_info->inl = 0;
+
+ dma_sync_single_for_device(priv->ddev, dma, length, PCI_DMA_TODEVICE);
+
+ data->addr = cpu_to_be64(dma);
+ data->lkey = ring->mr_key;
+ dma_wmb();
+ data->byte_count = cpu_to_be32(length);
+
+ /* tx completion can avoid cache line miss for common cases */
+ tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
+
+ op_own = cpu_to_be32(MLX4_OPCODE_SEND) |
+ ((ring->prod & ring->size) ?
+ cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
+
+ ring->packets++;
+ ring->bytes += tx_info->nr_bytes;
+ AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, length);
+
+ ring->prod += nr_txbb;
+
+ stop_queue = mlx4_en_is_tx_ring_full(ring);
+ send_doorbell = stop_queue ||
+ *doorbell_pending > MLX4_EN_DOORBELL_BUDGET;
+ bf_ok &= send_doorbell;
+
+ real_size = ((CTRL_SIZE + nr_txbb * DS_SIZE) / 16) & 0x3f;
+
+ if (bf_ok)
+ qpn_vlan.bf_qpn = ring->doorbell_qpn | cpu_to_be32(real_size);
+ else
+ qpn_vlan.fence_size = real_size;
+
+ mlx4_en_tx_write_desc(ring, tx_desc, qpn_vlan, TXBB_SIZE, bf_index,
+ op_own, bf_ok, send_doorbell);
+ *doorbell_pending = send_doorbell ? 0 : *doorbell_pending + 1;
+
+ return NETDEV_TX_OK;
+
+tx_drop_count:
+ ring->tx_dropped++;
+tx_drop:
+ return NETDEV_TX_BUSY;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index f61397745..cf8f8a72a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -1305,8 +1305,8 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
return 0;
err_out_unmap:
- while (i >= 0)
- mlx4_free_eq(dev, &priv->eq_table.eq[i--]);
+ while (i > 0)
+ mlx4_free_eq(dev, &priv->eq_table.eq[--i]);
#ifdef CONFIG_RFS_ACCEL
for (i = 1; i <= dev->caps.num_ports; i++) {
if (mlx4_priv(dev)->port[i].rmap) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index e97094598..d728704d0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -721,6 +721,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
#define QUERY_DEV_CAP_ETH_BACKPL_OFFSET 0x9c
+#define QUERY_DEV_CAP_DIAG_RPRT_PER_PORT 0x9c
#define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d
#define QUERY_DEV_CAP_VXLAN 0x9e
#define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0
@@ -935,6 +936,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP;
if (field32 & (1 << 7))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT;
+ MLX4_GET(field32, outbox, QUERY_DEV_CAP_DIAG_RPRT_PER_PORT);
+ if (field32 & (1 << 17))
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT;
MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC);
if (field & 1<<6)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN;
@@ -1128,6 +1132,7 @@ int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_c
port_cap->max_pkeys = 1 << (field & 0xf);
MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET);
port_cap->max_vl = field & 0xf;
+ port_cap->max_tc_eth = field >> 4;
MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET);
port_cap->log_max_macs = field & 0xf;
port_cap->log_max_vlans = field >> 4;
@@ -2456,6 +2461,42 @@ int mlx4_NOP(struct mlx4_dev *dev)
MLX4_CMD_NATIVE);
}
+int mlx4_query_diag_counters(struct mlx4_dev *dev, u8 op_modifier,
+ const u32 offset[],
+ u32 value[], size_t array_len, u8 port)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 *outbox;
+ size_t i;
+ int ret;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ outbox = mailbox->buf;
+
+ ret = mlx4_cmd_box(dev, 0, mailbox->dma, port, op_modifier,
+ MLX4_CMD_DIAG_RPRT, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (ret)
+ goto out;
+
+ for (i = 0; i < array_len; i++) {
+ if (offset[i] > MLX4_MAILBOX_SIZE) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ MLX4_GET(value[i], outbox, offset[i]);
+ }
+
+out:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return ret;
+}
+EXPORT_SYMBOL(mlx4_query_diag_counters);
+
int mlx4_get_phys_port_id(struct mlx4_dev *dev)
{
u8 port;
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 7ea258af6..cdbd76f10 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -53,6 +53,7 @@ struct mlx4_port_cap {
int ib_mtu;
int max_port_width;
int max_vl;
+ int max_tc_eth;
int max_gids;
int max_pkeys;
u64 def_mac;
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
index dec77d6f0..0e8b7c449 100644
--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
@@ -147,7 +147,7 @@ int mlx4_do_bond(struct mlx4_dev *dev, bool enable)
if (enable) {
dev->flags |= MLX4_FLAG_BONDED;
} else {
- ret = mlx4_virt2phy_port_map(dev, 1, 2);
+ ret = mlx4_virt2phy_port_map(dev, 1, 2);
if (ret) {
mlx4_err(dev, "Fail to reset port map\n");
return ret;
@@ -218,6 +218,9 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_interface *intf;
+ if (!(dev->persist->interface_state & MLX4_INTERFACE_STATE_UP))
+ return;
+
mlx4_stop_catas_poll(dev);
mutex_lock(&intf_mutex);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 546fab0ec..7183ac413 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -292,6 +292,7 @@ static int _mlx4_dev_port(struct mlx4_dev *dev, int port,
dev->caps.pkey_table_len[port] = port_cap->max_pkeys;
dev->caps.port_width_cap[port] = port_cap->max_port_width;
dev->caps.eth_mtu_cap[port] = port_cap->eth_mtu;
+ dev->caps.max_tc_eth = port_cap->max_tc_eth;
dev->caps.def_mac[port] = port_cap->def_mac;
dev->caps.supported_type[port] = port_cap->supported_port_types;
dev->caps.suggested_type[port] = port_cap->suggested_type;
@@ -2599,7 +2600,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
err = mlx4_init_uar_table(dev);
if (err) {
mlx4_err(dev, "Failed to initialize user access region table, aborting\n");
- return err;
+ return err;
}
err = mlx4_uar_alloc(dev, &priv->driver_uar);
@@ -2969,6 +2970,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
mlx4_err(dev, "Failed to create mtu file for port %d\n", port);
device_remove_file(&info->dev->persist->pdev->dev,
&info->port_attr);
+ devlink_port_unregister(&info->devlink_port);
info->port = -1;
}
@@ -2983,6 +2985,8 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr);
device_remove_file(&info->dev->persist->pdev->dev,
&info->port_mtu_attr);
+ devlink_port_unregister(&info->devlink_port);
+
#ifdef CONFIG_RFS_ACCEL
free_irq_cpu_rmap(info->rmap);
info->rmap = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index f2d092001..94b891c11 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -618,8 +618,8 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
err = mlx4_READ_ENTRY(dev,
entry->index,
mailbox);
- if (err)
- goto out_mailbox;
+ if (err)
+ goto out_mailbox;
members_count =
be32_to_cpu(mgm->members_count) &
0xffffff;
@@ -657,8 +657,8 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
err = mlx4_WRITE_ENTRY(dev,
entry->index,
mailbox);
- if (err)
- goto out_mailbox;
+ if (err)
+ goto out_mailbox;
}
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 13d297ee3..9099dbd04 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -132,6 +132,7 @@ enum {
MLX4_EN_NUM_UP)
#define MLX4_EN_DEFAULT_TX_WORK 256
+#define MLX4_EN_DOORBELL_BUDGET 8
/* Target number of packets to coalesce with interrupt moderation */
#define MLX4_EN_RX_COAL_TARGET 44
@@ -164,6 +165,10 @@ enum {
#define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETH_HLEN)
#define MLX4_EN_MIN_MTU 46
+/* VLAN_HLEN is added twice,to support skb vlan tagged with multiple
+ * headers. (For example: ETH_P_8021Q and ETH_P_8021AD).
+ */
+#define MLX4_EN_EFF_MTU(mtu) ((mtu) + ETH_HLEN + (2 * VLAN_HLEN))
#define ETH_BCAST 0xffffffffffffULL
#define MLX4_EN_LOOPBACK_RETRIES 5
@@ -215,7 +220,10 @@ enum cq_type {
struct mlx4_en_tx_info {
- struct sk_buff *skb;
+ union {
+ struct sk_buff *skb;
+ struct page *page;
+ };
dma_addr_t map0_dma;
u32 map0_byte_count;
u32 nr_txbb;
@@ -255,6 +263,14 @@ struct mlx4_en_rx_alloc {
u32 page_size;
};
+#define MLX4_EN_CACHE_SIZE (2 * NAPI_POLL_WEIGHT)
+struct mlx4_en_page_cache {
+ u32 index;
+ struct mlx4_en_rx_alloc buf[MLX4_EN_CACHE_SIZE];
+};
+
+struct mlx4_en_priv;
+
struct mlx4_en_tx_ring {
/* cache line used and dirtied in tx completion
* (mlx4_en_free_tx_buf())
@@ -288,6 +304,11 @@ struct mlx4_en_tx_ring {
__be32 mr_key;
void *buf;
struct mlx4_en_tx_info *tx_info;
+ struct mlx4_en_rx_ring *recycle_ring;
+ u32 (*free_tx_desc)(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring,
+ int index, u8 owner,
+ u64 timestamp, int napi_mode);
u8 *bounce_buf;
struct mlx4_qp_context context;
int qpn;
@@ -319,6 +340,8 @@ struct mlx4_en_rx_ring {
u8 fcs_del;
void *buf;
void *rx_info;
+ struct bpf_prog *xdp_prog;
+ struct mlx4_en_page_cache page_cache;
unsigned long bytes;
unsigned long packets;
unsigned long csum_ok;
@@ -440,7 +463,9 @@ struct mlx4_en_mc_list {
struct mlx4_en_frag_info {
u16 frag_size;
u16 frag_prefix_size;
- u16 frag_stride;
+ u32 frag_stride;
+ enum dma_data_direction dma_dir;
+ int order;
};
#ifdef CONFIG_MLX4_EN_DCB
@@ -450,6 +475,17 @@ struct mlx4_en_frag_info {
#define MLX4_EN_TC_ETS 7
+enum dcb_pfc_type {
+ pfc_disabled = 0,
+ pfc_enabled_full,
+ pfc_enabled_tx,
+ pfc_enabled_rx
+};
+
+struct mlx4_en_cee_config {
+ bool pfc_state;
+ enum dcb_pfc_type dcb_pfc[MLX4_EN_NUM_UP];
+};
#endif
struct ethtool_flow_id {
@@ -469,6 +505,9 @@ enum {
MLX4_EN_FLAG_RX_FILTER_NEEDED = (1 << 3),
MLX4_EN_FLAG_FORCE_PROMISC = (1 << 4),
MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP = (1 << 5),
+#ifdef CONFIG_MLX4_EN_DCB
+ MLX4_EN_FLAG_DCB_ENABLED = (1 << 6),
+#endif
};
#define PORT_BEACON_MAX_LIMIT (65535)
@@ -536,6 +575,7 @@ struct mlx4_en_priv {
struct mlx4_en_frag_info frag_info[MLX4_EN_MAX_RX_FRAGS];
u16 num_frags;
u16 log_rx_info;
+ int xdp_ring_num;
struct mlx4_en_tx_ring **tx_ring;
struct mlx4_en_rx_ring *rx_ring[MAX_RX_RINGS];
@@ -547,10 +587,8 @@ struct mlx4_en_priv {
struct work_struct linkstate_task;
struct delayed_work stats_task;
struct delayed_work service_task;
-#ifdef CONFIG_MLX4_EN_VXLAN
struct work_struct vxlan_add_task;
struct work_struct vxlan_del_task;
-#endif
struct mlx4_en_perf_stats pstats;
struct mlx4_en_pkt_stats pkstats;
struct mlx4_en_counter_stats pf_stats;
@@ -572,9 +610,12 @@ struct mlx4_en_priv {
u32 counter_index;
#ifdef CONFIG_MLX4_EN_DCB
+#define MLX4_EN_DCB_ENABLED 0x3
struct ieee_ets ets;
u16 maxrate[IEEE_8021QAZ_MAX_TCS];
enum dcbnl_cndd_states cndd_state[IEEE_8021QAZ_MAX_TCS];
+ struct mlx4_en_cee_config cee_config;
+ u8 dcbx_cap;
#endif
#ifdef CONFIG_RFS_ACCEL
spinlock_t filters_lock;
@@ -644,6 +685,12 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq);
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback);
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
+netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame,
+ struct net_device *dev, unsigned int length,
+ int tx_ind, int *doorbell_pending);
+void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring);
+bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring,
+ struct mlx4_en_rx_alloc *frame);
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring **pring,
@@ -672,6 +719,14 @@ int mlx4_en_process_rx_cq(struct net_device *dev,
int budget);
int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget);
+u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring,
+ int index, u8 owner, u64 timestamp,
+ int napi_mode);
+u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring,
+ int index, u8 owner, u64 timestamp,
+ int napi_mode);
void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
int is_tx, int rss, int qpn, int cqn, int user_prio,
struct mlx4_qp_context *context);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 93195191f..395b5463c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -248,7 +248,7 @@ static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
offset, order);
return;
}
- __mlx4_free_mtt_range(dev, offset, order);
+ __mlx4_free_mtt_range(dev, offset, order);
}
void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index b3cc3ab63..6fc156a39 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -205,7 +205,9 @@ int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node)
goto free_uar;
}
- uar->bf_map = io_mapping_map_wc(priv->bf_mapping, uar->index << PAGE_SHIFT);
+ uar->bf_map = io_mapping_map_wc(priv->bf_mapping,
+ uar->index << PAGE_SHIFT,
+ PAGE_SIZE);
if (!uar->bf_map) {
err = -ENOMEM;
goto unamp_uar;
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 087b23b32..c5b206429 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -52,6 +52,7 @@
#define MLX4_FLAG_V_IGNORE_FCS_MASK 0x2
#define MLX4_IGNORE_FCS_MASK 0x1
+#define MLX4_TC_MAX_NUMBER 8
void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
{
@@ -2015,3 +2016,14 @@ out:
return ret;
}
EXPORT_SYMBOL(mlx4_get_module_info);
+
+int mlx4_max_tc(struct mlx4_dev *dev)
+{
+ u8 num_tc = dev->caps.max_tc_eth;
+
+ if (!num_tc)
+ num_tc = MLX4_TC_MAX_NUMBER;
+
+ return num_tc;
+}
+EXPORT_SYMBOL(mlx4_max_tc);
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index cd9b2b28d..8b81114bd 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -2372,16 +2372,15 @@ static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
__mlx4_mpt_release(dev, index);
break;
case RES_OP_MAP_ICM:
- index = get_param_l(&in_param);
- id = index & mpt_mask(dev);
- err = mr_res_start_move_to(dev, slave, id,
- RES_MPT_RESERVED, &mpt);
- if (err)
- return err;
-
- __mlx4_mpt_free_icm(dev, mpt->key);
- res_end_move(dev, slave, RES_MPT, id);
+ index = get_param_l(&in_param);
+ id = index & mpt_mask(dev);
+ err = mr_res_start_move_to(dev, slave, id,
+ RES_MPT_RESERVED, &mpt);
+ if (err)
return err;
+
+ __mlx4_mpt_free_icm(dev, mpt->key);
+ res_end_move(dev, slave, RES_MPT, id);
break;
default:
err = -EINVAL;
@@ -4253,9 +4252,8 @@ int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
(1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)) &&
!(dev->caps.flags2 &
MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) {
- mlx4_warn(dev,
- "Src check LB for slave %d isn't supported\n",
- slave);
+ mlx4_warn(dev, "Src check LB for slave %d isn't supported\n",
+ slave);
return -ENOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 1cf722eba..aae46884b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -4,6 +4,7 @@
config MLX5_CORE
tristate "Mellanox Technologies ConnectX-4 and Connect-IB core driver"
+ depends on MAY_USE_DEVLINK
depends on PCI
default n
---help---
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 9ea7b5830..05cc1effc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -1,11 +1,13 @@
obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
- health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
- mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o fs_counters.o
+ health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
+ mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \
+ fs_counters.o rl.o
-mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o \
- en_main.o en_fs.o en_ethtool.o en_tx.o en_rx.o \
- en_txrx.o en_clock.o vxlan.o en_tc.o en_arfs.o
+mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o eswitch_offloads.o \
+ en_main.o en_common.o en_fs.o en_ethtool.o en_tx.o \
+ en_rx.o en_rx_am.o en_txrx.o en_clock.o vxlan.o \
+ en_tc.o en_arfs.o en_rep.o en_fs_ethtool.o
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 943b1bd43..bf722aa88 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -44,6 +44,7 @@
#include <linux/mlx5/vport.h>
#include <linux/mlx5/transobj.h>
#include <linux/rhashtable.h>
+#include <net/switchdev.h>
#include "wq.h"
#include "mlx5_core.h"
#include "en_stats.h"
@@ -72,13 +73,18 @@
#define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
#define MLX5_MPWRQ_STRIDES_PER_PAGE (MLX5_MPWRQ_NUM_STRIDES >> \
MLX5_MPWRQ_WQE_PAGE_ORDER)
-#define MLX5_CHANNEL_MAX_NUM_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8) * \
- BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW))
+
+#define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2)
+#define MLX5E_REQUIRED_MTTS(rqs, wqes)\
+ (rqs * wqes * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8))
+#define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) <= U16_MAX)
+
#define MLX5_UMR_ALIGN (2048)
#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128)
#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
+#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10
#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20
@@ -88,6 +94,7 @@
#define MLX5E_LOG_INDIR_RQT_SIZE 0x7
#define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE)
#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1)
+#define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
#define MLX5E_TX_CQ_POLL_BUDGET 128
#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */
#define MLX5E_SQ_BF_BUDGET 16
@@ -126,6 +133,12 @@ static inline int mlx5_max_log_rq_size(int wq_type)
}
}
+enum {
+ MLX5E_INLINE_MODE_L2,
+ MLX5E_INLINE_MODE_VPORT_CONTEXT,
+ MLX5_INLINE_MODE_NOT_REQUIRED,
+};
+
struct mlx5e_tx_wqe {
struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_eth_seg eth;
@@ -143,10 +156,31 @@ struct mlx5e_umr_wqe {
struct mlx5_wqe_data_seg data;
};
+static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = {
+ "rx_cqe_moder",
+};
+
+enum mlx5e_priv_flag {
+ MLX5E_PFLAG_RX_CQE_BASED_MODER = (1 << 0),
+};
+
+#define MLX5E_SET_PRIV_FLAG(priv, pflag, enable) \
+ do { \
+ if (enable) \
+ priv->pflags |= pflag; \
+ else \
+ priv->pflags &= ~pflag; \
+ } while (0)
+
#ifdef CONFIG_MLX5_CORE_EN_DCB
#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
#endif
+struct mlx5e_cq_moder {
+ u16 usec;
+ u16 pkts;
+};
+
struct mlx5e_params {
u8 log_sq_size;
u8 rq_wq_type;
@@ -155,16 +189,16 @@ struct mlx5e_params {
u8 log_rq_size;
u16 num_channels;
u8 num_tc;
+ u8 rx_cq_period_mode;
bool rx_cqe_compress_admin;
bool rx_cqe_compress;
- u16 rx_cq_moderation_usec;
- u16 rx_cq_moderation_pkts;
- u16 tx_cq_moderation_usec;
- u16 tx_cq_moderation_pkts;
+ struct mlx5e_cq_moder rx_cq_moderation;
+ struct mlx5e_cq_moder tx_cq_moderation;
u16 min_rx_wqes;
bool lro_en;
u32 lro_wqe_sz;
u16 tx_max_inline;
+ u8 tx_min_inline_mode;
u8 rss_hfunc;
u8 toeplitz_hash_key[40];
u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
@@ -172,6 +206,7 @@ struct mlx5e_params {
#ifdef CONFIG_MLX5_CORE_EN_DCB
struct ieee_ets ets;
#endif
+ bool rx_am_enabled;
};
struct mlx5e_tstamp {
@@ -188,9 +223,9 @@ struct mlx5e_tstamp {
};
enum {
- MLX5E_RQ_STATE_POST_WQES_ENABLE,
+ MLX5E_RQ_STATE_FLUSH,
MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS,
- MLX5E_RQ_STATE_FLUSH_TIMEOUT,
+ MLX5E_RQ_STATE_AM,
};
struct mlx5e_cq {
@@ -198,6 +233,7 @@ struct mlx5e_cq {
struct mlx5_cqwq wq;
/* data path - accessed per napi poll */
+ u16 event_ctr;
struct napi_struct *napi;
struct mlx5_core_cq mcq;
struct mlx5e_channel *channel;
@@ -227,6 +263,30 @@ struct mlx5e_dma_info {
dma_addr_t addr;
};
+struct mlx5e_rx_am_stats {
+ int ppms; /* packets per msec */
+ int epms; /* events per msec */
+};
+
+struct mlx5e_rx_am_sample {
+ ktime_t time;
+ unsigned int pkt_ctr;
+ u16 event_ctr;
+};
+
+struct mlx5e_rx_am { /* Adaptive Moderation */
+ u8 state;
+ struct mlx5e_rx_am_stats prev_stats;
+ struct mlx5e_rx_am_sample start_sample;
+ struct work_struct work;
+ u8 profile_ix;
+ u8 mode;
+ u8 tune_state;
+ u8 steps_right;
+ u8 steps_left;
+ u8 tired;
+};
+
struct mlx5e_rq {
/* data path */
struct mlx5_wq_ll wq;
@@ -247,6 +307,9 @@ struct mlx5e_rq {
unsigned long state;
int ix;
+ u32 mpwqe_mtt_offset;
+
+ struct mlx5e_rx_am am; /* Adaptive Moderation */
/* control */
struct mlx5_wq_ctrl wq_ctrl;
@@ -306,9 +369,8 @@ struct mlx5e_sq_dma {
};
enum {
- MLX5E_SQ_STATE_WAKE_TXQ_ENABLE,
+ MLX5E_SQ_STATE_FLUSH,
MLX5E_SQ_STATE_BF_ENABLE,
- MLX5E_SQ_STATE_TX_TIMEOUT,
};
struct mlx5e_ico_wqe_info {
@@ -346,6 +408,7 @@ struct mlx5e_sq {
u32 sqn;
u16 bf_buf_size;
u16 max_inline;
+ u8 min_inline_mode;
u16 edge;
struct device *pdev;
struct mlx5e_tstamp *tstamp;
@@ -358,6 +421,7 @@ struct mlx5e_sq {
struct mlx5e_channel *channel;
int tc;
struct mlx5e_ico_wqe_info *ico_wqe_info;
+ u32 rate_limit;
} ____cacheline_aligned_in_smp;
static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n)
@@ -495,8 +559,24 @@ enum {
MLX5E_ARFS_FT_LEVEL
};
+struct mlx5e_ethtool_table {
+ struct mlx5_flow_table *ft;
+ int num_rules;
+};
+
+#define ETHTOOL_NUM_L3_L4_FTS 7
+#define ETHTOOL_NUM_L2_FTS 4
+
+struct mlx5e_ethtool_steering {
+ struct mlx5e_ethtool_table l3_l4_ft[ETHTOOL_NUM_L3_L4_FTS];
+ struct mlx5e_ethtool_table l2_ft[ETHTOOL_NUM_L2_FTS];
+ struct list_head rules;
+ int tot_num_rules;
+};
+
struct mlx5e_flow_steering {
struct mlx5_flow_namespace *ns;
+ struct mlx5e_ethtool_steering ethtool;
struct mlx5e_tc_table tc;
struct mlx5e_vlan_table vlan;
struct mlx5e_l2_table l2;
@@ -504,9 +584,15 @@ struct mlx5e_flow_steering {
struct mlx5e_arfs_tables arfs;
};
-struct mlx5e_direct_tir {
- u32 tirn;
+struct mlx5e_rqt {
u32 rqtn;
+ bool enabled;
+};
+
+struct mlx5e_tir {
+ u32 tirn;
+ struct mlx5e_rqt rqt;
+ struct list_head list;
};
enum {
@@ -514,6 +600,22 @@ enum {
MLX5E_NIC_PRIO
};
+struct mlx5e_profile {
+ void (*init)(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile, void *ppriv);
+ void (*cleanup)(struct mlx5e_priv *priv);
+ int (*init_rx)(struct mlx5e_priv *priv);
+ void (*cleanup_rx)(struct mlx5e_priv *priv);
+ int (*init_tx)(struct mlx5e_priv *priv);
+ void (*cleanup_tx)(struct mlx5e_priv *priv);
+ void (*enable)(struct mlx5e_priv *priv);
+ void (*disable)(struct mlx5e_priv *priv);
+ void (*update_stats)(struct mlx5e_priv *priv);
+ int (*max_nch)(struct mlx5_core_dev *mdev);
+ int max_tc;
+};
+
struct mlx5e_priv {
/* priv data path fields - start */
struct mlx5e_sq **txq_to_sq_map;
@@ -522,18 +624,15 @@ struct mlx5e_priv {
unsigned long state;
struct mutex state_lock; /* Protects Interface state */
- struct mlx5_uar cq_uar;
- u32 pdn;
- u32 tdn;
- struct mlx5_core_mkey mkey;
struct mlx5_core_mkey umr_mkey;
struct mlx5e_rq drop_rq;
struct mlx5e_channel **channel;
u32 tisn[MLX5E_MAX_NUM_TC];
- u32 indir_rqtn;
- u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
- struct mlx5e_direct_tir direct_tir[MLX5E_MAX_NUM_CHANNELS];
+ struct mlx5e_rqt indir_rqt;
+ struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS];
+ struct mlx5e_tir direct_tir[MLX5E_MAX_NUM_CHANNELS];
+ u32 tx_rates[MLX5E_MAX_NUM_SQS];
struct mlx5e_flow_steering fs;
struct mlx5e_vxlan_db vxlan;
@@ -545,11 +644,14 @@ struct mlx5e_priv {
struct work_struct tx_timeout_work;
struct delayed_work update_stats_work;
+ u32 pflags;
struct mlx5_core_dev *mdev;
struct net_device *netdev;
struct mlx5e_stats stats;
struct mlx5e_tstamp tstamp;
u16 q_counter;
+ const struct mlx5e_profile *profile;
+ void *ppriv;
};
enum mlx5e_link_mode {
@@ -567,6 +669,7 @@ enum mlx5e_link_mode {
MLX5E_10GBASE_ER = 14,
MLX5E_40GBASE_SR4 = 15,
MLX5E_40GBASE_LR4 = 16,
+ MLX5E_50GBASE_SR2 = 18,
MLX5E_100GBASE_CR4 = 20,
MLX5E_100GBASE_SR4 = 21,
MLX5E_100GBASE_KR4 = 22,
@@ -584,6 +687,9 @@ enum mlx5e_link_mode {
#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
+
+void mlx5e_build_ptys2ethtool_map(void);
+
void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw);
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback);
@@ -595,7 +701,6 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
void mlx5e_free_tx_descs(struct mlx5e_sq *sq);
-void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
@@ -621,12 +726,26 @@ void mlx5e_free_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
struct mlx5e_mpw_info *wi);
struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
+void mlx5e_rx_am(struct mlx5e_rq *rq);
+void mlx5e_rx_am_work(struct work_struct *work);
+struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode);
+
void mlx5e_update_stats(struct mlx5e_priv *priv);
int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft);
+int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
+ int location);
+int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv,
+ struct ethtool_rxnfc *info, u32 *rule_locs);
+int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
+ struct ethtool_rx_flow_spec *fs);
+int mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv,
+ int location);
+void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv);
+void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv);
void mlx5e_set_rx_mode_work(struct work_struct *work);
void mlx5e_fill_hwstamp(struct mlx5e_tstamp *clock, u64 timestamp,
@@ -656,6 +775,9 @@ void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
int num_channels);
int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
+void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
+ u8 cq_period_mode);
+
static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
struct mlx5_wqe_ctrl_seg *ctrl, int bf_sz)
{
@@ -694,11 +816,6 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
MLX5E_MAX_NUM_CHANNELS);
}
-static inline int mlx5e_get_mtt_octw(int npages)
-{
- return ALIGN(npages, 8) / 2;
-}
-
extern const struct ethtool_ops mlx5e_ethtool_ops;
#ifdef CONFIG_MLX5_CORE_EN_DCB
extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
@@ -732,5 +849,39 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
#endif
u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev);
+int mlx5e_create_tir(struct mlx5_core_dev *mdev,
+ struct mlx5e_tir *tir, u32 *in, int inlen);
+void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
+ struct mlx5e_tir *tir);
+int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
+void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
+int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5_core_dev *mdev);
+
+struct mlx5_eswitch_rep;
+int mlx5e_vport_rep_load(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep);
+void mlx5e_vport_rep_unload(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep);
+int mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep);
+void mlx5e_nic_rep_unload(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep);
+int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv);
+void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv);
+int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr);
+
+int mlx5e_create_direct_rqts(struct mlx5e_priv *priv);
+void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
+int mlx5e_create_direct_tirs(struct mlx5e_priv *priv);
+void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv);
+int mlx5e_create_tises(struct mlx5e_priv *priv);
+void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv);
+int mlx5e_close(struct net_device *netdev);
+int mlx5e_open(struct net_device *netdev);
+void mlx5e_update_stats_work(struct work_struct *work);
+void *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
+ const struct mlx5e_profile *profile, void *ppriv);
+void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv);
+struct rtnl_link_stats64 *
+mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
#endif /* __MLX5_EN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 3515e78ba..a8cb38789 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -93,14 +93,14 @@ static enum mlx5e_traffic_types arfs_get_tt(enum arfs_type type)
static int arfs_disable(struct mlx5e_priv *priv)
{
struct mlx5_flow_destination dest;
- u32 *tirn = priv->indir_tirn;
+ struct mlx5e_tir *tir = priv->indir_tir;
int err = 0;
int tt;
int i;
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
for (i = 0; i < ARFS_NUM_TYPES; i++) {
- dest.tir_num = tirn[i];
+ dest.tir_num = tir[i].tirn;
tt = arfs_get_tt(i);
/* Modify ttc rules destination to bypass the aRFS tables*/
err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt],
@@ -175,15 +175,12 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
{
struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type];
struct mlx5_flow_destination dest;
- u8 match_criteria_enable = 0;
- u32 *tirn = priv->indir_tirn;
- u32 *match_criteria;
- u32 *match_value;
+ struct mlx5e_tir *tir = priv->indir_tir;
+ struct mlx5_flow_spec *spec;
int err = 0;
- match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
- match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
- if (!match_value || !match_criteria) {
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec) {
netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
err = -ENOMEM;
goto out;
@@ -192,24 +189,23 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
switch (type) {
case ARFS_IPV4_TCP:
- dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
+ dest.tir_num = tir[MLX5E_TT_IPV4_TCP].tirn;
break;
case ARFS_IPV4_UDP:
- dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
+ dest.tir_num = tir[MLX5E_TT_IPV4_UDP].tirn;
break;
case ARFS_IPV6_TCP:
- dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
+ dest.tir_num = tir[MLX5E_TT_IPV6_TCP].tirn;
break;
case ARFS_IPV6_UDP:
- dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
+ dest.tir_num = tir[MLX5E_TT_IPV6_UDP].tirn;
break;
default:
err = -EINVAL;
goto out;
}
- arfs_t->default_rule = mlx5_add_flow_rule(arfs_t->ft.t, match_criteria_enable,
- match_criteria, match_value,
+ arfs_t->default_rule = mlx5_add_flow_rule(arfs_t->ft.t, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG,
&dest);
@@ -220,8 +216,7 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
__func__, type);
}
out:
- kvfree(match_criteria);
- kvfree(match_value);
+ kvfree(spec);
return err;
}
@@ -475,23 +470,20 @@ static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv,
struct mlx5_flow_rule *rule = NULL;
struct mlx5_flow_destination dest;
struct arfs_table *arfs_table;
- u8 match_criteria_enable = 0;
+ struct mlx5_flow_spec *spec;
struct mlx5_flow_table *ft;
- u32 *match_criteria;
- u32 *match_value;
int err = 0;
- match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
- match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
- if (!match_value || !match_criteria) {
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec) {
netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
err = -ENOMEM;
goto out;
}
- match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.ethertype);
- MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype,
ntohs(tuple->etype));
arfs_table = arfs_get_table(arfs, tuple->ip_proto, tuple->etype);
if (!arfs_table) {
@@ -501,59 +493,58 @@ static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv,
ft = arfs_table->ft.t;
if (tuple->ip_proto == IPPROTO_TCP) {
- MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.tcp_dport);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.tcp_sport);
- MLX5_SET(fte_match_param, match_value, outer_headers.tcp_dport,
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_dport,
ntohs(tuple->dst_port));
- MLX5_SET(fte_match_param, match_value, outer_headers.tcp_sport,
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_sport,
ntohs(tuple->src_port));
} else {
- MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.udp_dport);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.udp_sport);
- MLX5_SET(fte_match_param, match_value, outer_headers.udp_dport,
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport,
ntohs(tuple->dst_port));
- MLX5_SET(fte_match_param, match_value, outer_headers.udp_sport,
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_sport,
ntohs(tuple->src_port));
}
if (tuple->etype == htons(ETH_P_IP)) {
- memcpy(MLX5_ADDR_OF(fte_match_param, match_value,
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4),
&tuple->src_ipv4,
4);
- memcpy(MLX5_ADDR_OF(fte_match_param, match_value,
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
&tuple->dst_ipv4,
4);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
} else {
- memcpy(MLX5_ADDR_OF(fte_match_param, match_value,
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
&tuple->src_ipv6,
16);
- memcpy(MLX5_ADDR_OF(fte_match_param, match_value,
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
&tuple->dst_ipv6,
16);
- memset(MLX5_ADDR_OF(fte_match_param, match_criteria,
+ memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
0xff,
16);
- memset(MLX5_ADDR_OF(fte_match_param, match_criteria,
+ memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
0xff,
16);
}
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dest.tir_num = priv->direct_tir[arfs_rule->rxq].tirn;
- rule = mlx5_add_flow_rule(ft, match_criteria_enable, match_criteria,
- match_value, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ rule = mlx5_add_flow_rule(ft, spec, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG,
&dest);
if (IS_ERR(rule)) {
@@ -563,8 +554,7 @@ static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv,
}
out:
- kvfree(match_criteria);
- kvfree(match_value);
+ kvfree(spec);
return err ? ERR_PTR(err) : rule;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
new file mode 100644
index 000000000..9cce153e1
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "en.h"
+
+/* mlx5e global resources should be placed in this file.
+ * Global resources are common to all the netdevices crated on the same nic.
+ */
+
+int mlx5e_create_tir(struct mlx5_core_dev *mdev,
+ struct mlx5e_tir *tir, u32 *in, int inlen)
+{
+ int err;
+
+ err = mlx5_core_create_tir(mdev, in, inlen, &tir->tirn);
+ if (err)
+ return err;
+
+ list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list);
+
+ return 0;
+}
+
+void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
+ struct mlx5e_tir *tir)
+{
+ mlx5_core_destroy_tir(mdev, tir->tirn);
+ list_del(&tir->list);
+}
+
+static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
+ struct mlx5_core_mkey *mkey)
+{
+ struct mlx5_create_mkey_mbox_in *in;
+ int err;
+
+ in = mlx5_vzalloc(sizeof(*in));
+ if (!in)
+ return -ENOMEM;
+
+ in->seg.flags = MLX5_PERM_LOCAL_WRITE |
+ MLX5_PERM_LOCAL_READ |
+ MLX5_ACCESS_MODE_PA;
+ in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
+ in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
+
+ err = mlx5_core_create_mkey(mdev, mkey, in, sizeof(*in), NULL, NULL,
+ NULL);
+
+ kvfree(in);
+
+ return err;
+}
+
+int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
+{
+ struct mlx5e_resources *res = &mdev->mlx5e_res;
+ int err;
+
+ err = mlx5_alloc_map_uar(mdev, &res->cq_uar, false);
+ if (err) {
+ mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
+ return err;
+ }
+
+ err = mlx5_core_alloc_pd(mdev, &res->pdn);
+ if (err) {
+ mlx5_core_err(mdev, "alloc pd failed, %d\n", err);
+ goto err_unmap_free_uar;
+ }
+
+ err = mlx5_core_alloc_transport_domain(mdev, &res->td.tdn);
+ if (err) {
+ mlx5_core_err(mdev, "alloc td failed, %d\n", err);
+ goto err_dealloc_pd;
+ }
+
+ err = mlx5e_create_mkey(mdev, res->pdn, &res->mkey);
+ if (err) {
+ mlx5_core_err(mdev, "create mkey failed, %d\n", err);
+ goto err_dealloc_transport_domain;
+ }
+
+ INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list);
+
+ return 0;
+
+err_dealloc_transport_domain:
+ mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
+err_dealloc_pd:
+ mlx5_core_dealloc_pd(mdev, res->pdn);
+err_unmap_free_uar:
+ mlx5_unmap_free_uar(mdev, &res->cq_uar);
+
+ return err;
+}
+
+void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
+{
+ struct mlx5e_resources *res = &mdev->mlx5e_res;
+
+ mlx5_core_destroy_mkey(mdev, &res->mkey);
+ mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
+ mlx5_core_dealloc_pd(mdev, res->pdn);
+ mlx5_unmap_free_uar(mdev, &res->cq_uar);
+}
+
+int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5_core_dev *mdev)
+{
+ struct mlx5e_tir *tir;
+ void *in;
+ int inlen;
+ int err = 0;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
+
+ list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
+ err = mlx5_core_modify_tir(mdev, tir->tirn, in, inlen);
+ if (err)
+ goto out;
+ }
+
+out:
+ kvfree(in);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index c585349e0..762af16ed 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -127,29 +127,40 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
return mlx5_set_port_tc_bw_alloc(mdev, tc_tx_bw);
}
-static int mlx5e_dbcnl_validate_ets(struct ieee_ets *ets)
+static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
+ struct ieee_ets *ets)
{
int bw_sum = 0;
int i;
/* Validate Priority */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY)
+ if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY) {
+ netdev_err(netdev,
+ "Failed to validate ETS: priority value greater than max(%d)\n",
+ MLX5E_MAX_PRIORITY);
return -EINVAL;
+ }
}
/* Validate Bandwidth Sum */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
- if (!ets->tc_tx_bw[i])
+ if (!ets->tc_tx_bw[i]) {
+ netdev_err(netdev,
+ "Failed to validate ETS: BW 0 is illegal\n");
return -EINVAL;
+ }
bw_sum += ets->tc_tx_bw[i];
}
}
- if (bw_sum != 0 && bw_sum != 100)
+ if (bw_sum != 0 && bw_sum != 100) {
+ netdev_err(netdev,
+ "Failed to validate ETS: BW sum is illegal\n");
return -EINVAL;
+ }
return 0;
}
@@ -159,7 +170,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
- err = mlx5e_dbcnl_validate_ets(ets);
+ err = mlx5e_dbcnl_validate_ets(netdev, ets);
if (err)
return err;
@@ -195,7 +206,6 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
- enum mlx5_port_status ps;
u8 curr_pfc_en;
int ret;
@@ -204,14 +214,8 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
if (pfc->pfc_en == curr_pfc_en)
return 0;
- mlx5_query_port_admin_status(mdev, &ps);
- if (ps == MLX5_PORT_UP)
- mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
-
ret = mlx5_set_port_pfc(mdev, pfc->pfc_en, pfc->pfc_en);
-
- if (ps == MLX5_PORT_UP)
- mlx5_set_port_admin_status(mdev, MLX5_PORT_UP);
+ mlx5_toggle_port_link(mdev);
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index e667a870e..7a346bb2e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -48,123 +48,85 @@ static void mlx5e_get_drvinfo(struct net_device *dev,
sizeof(drvinfo->bus_info));
}
-static const struct {
- u32 supported;
- u32 advertised;
+struct ptys2ethtool_config {
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised);
u32 speed;
-} ptys2ethtool_table[MLX5E_LINK_MODES_NUMBER] = {
- [MLX5E_1000BASE_CX_SGMII] = {
- .supported = SUPPORTED_1000baseKX_Full,
- .advertised = ADVERTISED_1000baseKX_Full,
- .speed = 1000,
- },
- [MLX5E_1000BASE_KX] = {
- .supported = SUPPORTED_1000baseKX_Full,
- .advertised = ADVERTISED_1000baseKX_Full,
- .speed = 1000,
- },
- [MLX5E_10GBASE_CX4] = {
- .supported = SUPPORTED_10000baseKX4_Full,
- .advertised = ADVERTISED_10000baseKX4_Full,
- .speed = 10000,
- },
- [MLX5E_10GBASE_KX4] = {
- .supported = SUPPORTED_10000baseKX4_Full,
- .advertised = ADVERTISED_10000baseKX4_Full,
- .speed = 10000,
- },
- [MLX5E_10GBASE_KR] = {
- .supported = SUPPORTED_10000baseKR_Full,
- .advertised = ADVERTISED_10000baseKR_Full,
- .speed = 10000,
- },
- [MLX5E_20GBASE_KR2] = {
- .supported = SUPPORTED_20000baseKR2_Full,
- .advertised = ADVERTISED_20000baseKR2_Full,
- .speed = 20000,
- },
- [MLX5E_40GBASE_CR4] = {
- .supported = SUPPORTED_40000baseCR4_Full,
- .advertised = ADVERTISED_40000baseCR4_Full,
- .speed = 40000,
- },
- [MLX5E_40GBASE_KR4] = {
- .supported = SUPPORTED_40000baseKR4_Full,
- .advertised = ADVERTISED_40000baseKR4_Full,
- .speed = 40000,
- },
- [MLX5E_56GBASE_R4] = {
- .supported = SUPPORTED_56000baseKR4_Full,
- .advertised = ADVERTISED_56000baseKR4_Full,
- .speed = 56000,
- },
- [MLX5E_10GBASE_CR] = {
- .supported = SUPPORTED_10000baseKR_Full,
- .advertised = ADVERTISED_10000baseKR_Full,
- .speed = 10000,
- },
- [MLX5E_10GBASE_SR] = {
- .supported = SUPPORTED_10000baseKR_Full,
- .advertised = ADVERTISED_10000baseKR_Full,
- .speed = 10000,
- },
- [MLX5E_10GBASE_ER] = {
- .supported = SUPPORTED_10000baseKR_Full,
- .advertised = ADVERTISED_10000baseKR_Full,
- .speed = 10000,
- },
- [MLX5E_40GBASE_SR4] = {
- .supported = SUPPORTED_40000baseSR4_Full,
- .advertised = ADVERTISED_40000baseSR4_Full,
- .speed = 40000,
- },
- [MLX5E_40GBASE_LR4] = {
- .supported = SUPPORTED_40000baseLR4_Full,
- .advertised = ADVERTISED_40000baseLR4_Full,
- .speed = 40000,
- },
- [MLX5E_100GBASE_CR4] = {
- .speed = 100000,
- },
- [MLX5E_100GBASE_SR4] = {
- .speed = 100000,
- },
- [MLX5E_100GBASE_KR4] = {
- .speed = 100000,
- },
- [MLX5E_100GBASE_LR4] = {
- .speed = 100000,
- },
- [MLX5E_100BASE_TX] = {
- .speed = 100,
- },
- [MLX5E_1000BASE_T] = {
- .supported = SUPPORTED_1000baseT_Full,
- .advertised = ADVERTISED_1000baseT_Full,
- .speed = 1000,
- },
- [MLX5E_10GBASE_T] = {
- .supported = SUPPORTED_10000baseT_Full,
- .advertised = ADVERTISED_10000baseT_Full,
- .speed = 1000,
- },
- [MLX5E_25GBASE_CR] = {
- .speed = 25000,
- },
- [MLX5E_25GBASE_KR] = {
- .speed = 25000,
- },
- [MLX5E_25GBASE_SR] = {
- .speed = 25000,
- },
- [MLX5E_50GBASE_CR2] = {
- .speed = 50000,
- },
- [MLX5E_50GBASE_KR2] = {
- .speed = 50000,
- },
};
+static struct ptys2ethtool_config ptys2ethtool_table[MLX5E_LINK_MODES_NUMBER];
+
+#define MLX5_BUILD_PTYS2ETHTOOL_CONFIG(reg_, speed_, ...) \
+ ({ \
+ struct ptys2ethtool_config *cfg; \
+ const unsigned int modes[] = { __VA_ARGS__ }; \
+ unsigned int i; \
+ cfg = &ptys2ethtool_table[reg_]; \
+ cfg->speed = speed_; \
+ bitmap_zero(cfg->supported, \
+ __ETHTOOL_LINK_MODE_MASK_NBITS); \
+ bitmap_zero(cfg->advertised, \
+ __ETHTOOL_LINK_MODE_MASK_NBITS); \
+ for (i = 0 ; i < ARRAY_SIZE(modes) ; ++i) { \
+ __set_bit(modes[i], cfg->supported); \
+ __set_bit(modes[i], cfg->advertised); \
+ } \
+ })
+
+void mlx5e_build_ptys2ethtool_map(void)
+{
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_CX_SGMII, SPEED_1000,
+ ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_KX, SPEED_1000,
+ ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CX4, SPEED_10000,
+ ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KX4, SPEED_10000,
+ ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KR, SPEED_10000,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_20GBASE_KR2, SPEED_20000,
+ ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_CR4, SPEED_40000,
+ ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_KR4, SPEED_40000,
+ ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_56GBASE_R4, SPEED_56000,
+ ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CR, SPEED_10000,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_SR, SPEED_10000,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_ER, SPEED_10000,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_SR4, SPEED_40000,
+ ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_LR4, SPEED_40000,
+ ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_SR2, SPEED_50000,
+ ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_CR4, SPEED_100000,
+ ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_SR4, SPEED_100000,
+ ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_KR4, SPEED_100000,
+ ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_LR4, SPEED_100000,
+ ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_T, SPEED_10000,
+ ETHTOOL_LINK_MODE_10000baseT_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_CR, SPEED_25000,
+ ETHTOOL_LINK_MODE_25000baseCR_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_KR, SPEED_25000,
+ ETHTOOL_LINK_MODE_25000baseKR_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_SR, SPEED_25000,
+ ETHTOOL_LINK_MODE_25000baseSR_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_CR2, SPEED_50000,
+ ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_KR2, SPEED_50000,
+ ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT);
+}
+
static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
@@ -177,6 +139,18 @@ static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
return err ? 0 : pfc_en_tx | pfc_en_rx;
}
+static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 rx_pause;
+ u32 tx_pause;
+ int err;
+
+ err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
+
+ return err ? false : rx_pause | tx_pause;
+}
+
#define MLX5E_NUM_Q_CNTRS(priv) (NUM_Q_COUNTERS * (!!priv->q_counter))
#define MLX5E_NUM_RQ_STATS(priv) \
(NUM_RQ_STATS * priv->params.num_channels * \
@@ -185,8 +159,8 @@ static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
(NUM_SQ_STATS * priv->params.num_channels * priv->params.num_tc * \
test_bit(MLX5E_STATE_OPENED, &priv->state))
#define MLX5E_NUM_PFC_COUNTERS(priv) \
- (hweight8(mlx5e_query_pfc_combined(priv)) * \
- NUM_PPORT_PER_PRIO_PFC_COUNTERS)
+ ((mlx5e_query_global_pause_combined(priv) + hweight8(mlx5e_query_pfc_combined(priv))) * \
+ NUM_PPORT_PER_PRIO_PFC_COUNTERS)
static int mlx5e_get_sset_count(struct net_device *dev, int sset)
{
@@ -200,6 +174,8 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset)
MLX5E_NUM_RQ_STATS(priv) +
MLX5E_NUM_SQ_STATS(priv) +
MLX5E_NUM_PFC_COUNTERS(priv);
+ case ETH_SS_PRIV_FLAGS:
+ return ARRAY_SIZE(mlx5e_priv_flags);
/* fallthrough */
default:
return -EOPNOTSUPP;
@@ -246,8 +222,18 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
pfc_combined = mlx5e_query_pfc_combined(priv);
for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
+ char pfc_string[ETH_GSTRING_LEN];
+
+ snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ pport_per_prio_pfc_stats_desc[i].format, pfc_string);
+ }
+ }
+
+ if (mlx5e_query_global_pause_combined(priv)) {
+ for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
sprintf(data + (idx++) * ETH_GSTRING_LEN,
- pport_per_prio_pfc_stats_desc[i].format, prio);
+ pport_per_prio_pfc_stats_desc[i].format, "global");
}
}
@@ -272,9 +258,12 @@ static void mlx5e_get_strings(struct net_device *dev,
uint32_t stringset, uint8_t *data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ int i;
switch (stringset) {
case ETH_SS_PRIV_FLAGS:
+ for (i = 0; i < ARRAY_SIZE(mlx5e_priv_flags); i++)
+ strcpy(data + i * ETH_GSTRING_LEN, mlx5e_priv_flags[i]);
break;
case ETH_SS_TEST:
@@ -339,6 +328,13 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
}
}
+ if (mlx5e_query_global_pause_combined(priv)) {
+ for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
+ data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
+ pport_per_prio_pfc_stats_desc, i);
+ }
+ }
+
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return;
@@ -356,15 +352,61 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
sq_stats_desc, j);
}
+static u32 mlx5e_rx_wqes_to_packets(struct mlx5e_priv *priv, int rq_wq_type,
+ int num_wqe)
+{
+ int packets_per_wqe;
+ int stride_size;
+ int num_strides;
+ int wqe_size;
+
+ if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
+ return num_wqe;
+
+ stride_size = 1 << priv->params.mpwqe_log_stride_sz;
+ num_strides = 1 << priv->params.mpwqe_log_num_strides;
+ wqe_size = stride_size * num_strides;
+
+ packets_per_wqe = wqe_size /
+ ALIGN(ETH_DATA_LEN, stride_size);
+ return (1 << (order_base_2(num_wqe * packets_per_wqe) - 1));
+}
+
+static u32 mlx5e_packets_to_rx_wqes(struct mlx5e_priv *priv, int rq_wq_type,
+ int num_packets)
+{
+ int packets_per_wqe;
+ int stride_size;
+ int num_strides;
+ int wqe_size;
+ int num_wqes;
+
+ if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
+ return num_packets;
+
+ stride_size = 1 << priv->params.mpwqe_log_stride_sz;
+ num_strides = 1 << priv->params.mpwqe_log_num_strides;
+ wqe_size = stride_size * num_strides;
+
+ num_packets = (1 << order_base_2(num_packets));
+
+ packets_per_wqe = wqe_size /
+ ALIGN(ETH_DATA_LEN, stride_size);
+ num_wqes = DIV_ROUND_UP(num_packets, packets_per_wqe);
+ return 1 << (order_base_2(num_wqes));
+}
+
static void mlx5e_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *param)
{
struct mlx5e_priv *priv = netdev_priv(dev);
int rq_wq_type = priv->params.rq_wq_type;
- param->rx_max_pending = 1 << mlx5_max_log_rq_size(rq_wq_type);
+ param->rx_max_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
+ 1 << mlx5_max_log_rq_size(rq_wq_type));
param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
- param->rx_pending = 1 << priv->params.log_rq_size;
+ param->rx_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
+ 1 << priv->params.log_rq_size);
param->tx_pending = 1 << priv->params.log_sq_size;
}
@@ -374,9 +416,13 @@ static int mlx5e_set_ringparam(struct net_device *dev,
struct mlx5e_priv *priv = netdev_priv(dev);
bool was_opened;
int rq_wq_type = priv->params.rq_wq_type;
+ u32 rx_pending_wqes;
+ u32 min_rq_size;
+ u32 max_rq_size;
u16 min_rx_wqes;
u8 log_rq_size;
u8 log_sq_size;
+ u32 num_mtts;
int err = 0;
if (param->rx_jumbo_pending) {
@@ -389,18 +435,36 @@ static int mlx5e_set_ringparam(struct net_device *dev,
__func__);
return -EINVAL;
}
- if (param->rx_pending < (1 << mlx5_min_log_rq_size(rq_wq_type))) {
+
+ min_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
+ 1 << mlx5_min_log_rq_size(rq_wq_type));
+ max_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
+ 1 << mlx5_max_log_rq_size(rq_wq_type));
+ rx_pending_wqes = mlx5e_packets_to_rx_wqes(priv, rq_wq_type,
+ param->rx_pending);
+
+ if (param->rx_pending < min_rq_size) {
netdev_info(dev, "%s: rx_pending (%d) < min (%d)\n",
__func__, param->rx_pending,
- 1 << mlx5_min_log_rq_size(rq_wq_type));
+ min_rq_size);
return -EINVAL;
}
- if (param->rx_pending > (1 << mlx5_max_log_rq_size(rq_wq_type))) {
+ if (param->rx_pending > max_rq_size) {
netdev_info(dev, "%s: rx_pending (%d) > max (%d)\n",
__func__, param->rx_pending,
- 1 << mlx5_max_log_rq_size(rq_wq_type));
+ max_rq_size);
return -EINVAL;
}
+
+ num_mtts = MLX5E_REQUIRED_MTTS(priv->params.num_channels,
+ rx_pending_wqes);
+ if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
+ !MLX5E_VALID_NUM_MTTS(num_mtts)) {
+ netdev_info(dev, "%s: rx_pending (%d) request can't be satisfied, try to reduce.\n",
+ __func__, param->rx_pending);
+ return -EINVAL;
+ }
+
if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) {
netdev_info(dev, "%s: tx_pending (%d) < min (%d)\n",
__func__, param->tx_pending,
@@ -414,9 +478,9 @@ static int mlx5e_set_ringparam(struct net_device *dev,
return -EINVAL;
}
- log_rq_size = order_base_2(param->rx_pending);
+ log_rq_size = order_base_2(rx_pending_wqes);
log_sq_size = order_base_2(param->tx_pending);
- min_rx_wqes = mlx5_min_rx_wqes(rq_wq_type, param->rx_pending);
+ min_rx_wqes = mlx5_min_rx_wqes(rq_wq_type, rx_pending_wqes);
if (log_rq_size == priv->params.log_rq_size &&
log_sq_size == priv->params.log_sq_size &&
@@ -458,6 +522,7 @@ static int mlx5e_set_channels(struct net_device *dev,
unsigned int count = ch->combined_count;
bool arfs_enabled;
bool was_opened;
+ u32 num_mtts;
int err = 0;
if (!count) {
@@ -476,6 +541,14 @@ static int mlx5e_set_channels(struct net_device *dev,
return -EINVAL;
}
+ num_mtts = MLX5E_REQUIRED_MTTS(count, BIT(priv->params.log_rq_size));
+ if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
+ !MLX5E_VALID_NUM_MTTS(num_mtts)) {
+ netdev_info(dev, "%s: rx count (%d) request can't be satisfied, try to reduce.\n",
+ __func__, count);
+ return -EINVAL;
+ }
+
if (priv->params.num_channels == count)
return 0;
@@ -519,10 +592,11 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
return -ENOTSUPP;
- coal->rx_coalesce_usecs = priv->params.rx_cq_moderation_usec;
- coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation_pkts;
- coal->tx_coalesce_usecs = priv->params.tx_cq_moderation_usec;
- coal->tx_max_coalesced_frames = priv->params.tx_cq_moderation_pkts;
+ coal->rx_coalesce_usecs = priv->params.rx_cq_moderation.usec;
+ coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation.pkts;
+ coal->tx_coalesce_usecs = priv->params.tx_cq_moderation.usec;
+ coal->tx_max_coalesced_frames = priv->params.tx_cq_moderation.pkts;
+ coal->use_adaptive_rx_coalesce = priv->params.rx_am_enabled;
return 0;
}
@@ -533,6 +607,10 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_channel *c;
+ bool restart =
+ !!coal->use_adaptive_rx_coalesce != priv->params.rx_am_enabled;
+ bool was_opened;
+ int err = 0;
int tc;
int i;
@@ -540,12 +618,19 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
return -ENOTSUPP;
mutex_lock(&priv->state_lock);
- priv->params.tx_cq_moderation_usec = coal->tx_coalesce_usecs;
- priv->params.tx_cq_moderation_pkts = coal->tx_max_coalesced_frames;
- priv->params.rx_cq_moderation_usec = coal->rx_coalesce_usecs;
- priv->params.rx_cq_moderation_pkts = coal->rx_max_coalesced_frames;
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (was_opened && restart) {
+ mlx5e_close_locked(netdev);
+ priv->params.rx_am_enabled = !!coal->use_adaptive_rx_coalesce;
+ }
+
+ priv->params.tx_cq_moderation.usec = coal->tx_coalesce_usecs;
+ priv->params.tx_cq_moderation.pkts = coal->tx_max_coalesced_frames;
+ priv->params.rx_cq_moderation.usec = coal->rx_coalesce_usecs;
+ priv->params.rx_cq_moderation.pkts = coal->rx_max_coalesced_frames;
+
+ if (!was_opened || restart)
goto out;
for (i = 0; i < priv->params.num_channels; ++i) {
@@ -564,35 +649,39 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
}
out:
+ if (was_opened && restart)
+ err = mlx5e_open_locked(netdev);
+
mutex_unlock(&priv->state_lock);
- return 0;
+ return err;
}
-static u32 ptys2ethtool_supported_link(u32 eth_proto_cap)
+static void ptys2ethtool_supported_link(unsigned long *supported_modes,
+ u32 eth_proto_cap)
{
- int i;
- u32 supported_modes = 0;
+ unsigned long proto_cap = eth_proto_cap;
+ int proto;
- for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
- if (eth_proto_cap & MLX5E_PROT_MASK(i))
- supported_modes |= ptys2ethtool_table[i].supported;
- }
- return supported_modes;
+ for_each_set_bit(proto, &proto_cap, MLX5E_LINK_MODES_NUMBER)
+ bitmap_or(supported_modes, supported_modes,
+ ptys2ethtool_table[proto].supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static u32 ptys2ethtool_adver_link(u32 eth_proto_cap)
+static void ptys2ethtool_adver_link(unsigned long *advertising_modes,
+ u32 eth_proto_cap)
{
- int i;
- u32 advertising_modes = 0;
+ unsigned long proto_cap = eth_proto_cap;
+ int proto;
- for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
- if (eth_proto_cap & MLX5E_PROT_MASK(i))
- advertising_modes |= ptys2ethtool_table[i].advertised;
- }
- return advertising_modes;
+ for_each_set_bit(proto, &proto_cap, MLX5E_LINK_MODES_NUMBER)
+ bitmap_or(advertising_modes, advertising_modes,
+ ptys2ethtool_table[proto].advertised,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static u32 ptys2ethtool_supported_port(u32 eth_proto_cap)
+static void ptys2ethtool_supported_port(struct ethtool_link_ksettings *link_ksettings,
+ u32 eth_proto_cap)
{
if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
| MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
@@ -600,7 +689,7 @@ static u32 ptys2ethtool_supported_port(u32 eth_proto_cap)
| MLX5E_PROT_MASK(MLX5E_40GBASE_SR4)
| MLX5E_PROT_MASK(MLX5E_100GBASE_SR4)
| MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) {
- return SUPPORTED_FIBRE;
+ ethtool_link_ksettings_add_link_mode(link_ksettings, supported, FIBRE);
}
if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_100GBASE_KR4)
@@ -608,9 +697,8 @@ static u32 ptys2ethtool_supported_port(u32 eth_proto_cap)
| MLX5E_PROT_MASK(MLX5E_10GBASE_KR)
| MLX5E_PROT_MASK(MLX5E_10GBASE_KX4)
| MLX5E_PROT_MASK(MLX5E_1000BASE_KX))) {
- return SUPPORTED_Backplane;
+ ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Backplane);
}
- return 0;
}
int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
@@ -634,7 +722,7 @@ int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
static void get_speed_duplex(struct net_device *netdev,
u32 eth_proto_oper,
- struct ethtool_cmd *cmd)
+ struct ethtool_link_ksettings *link_ksettings)
{
int i;
u32 speed = SPEED_UNKNOWN;
@@ -651,23 +739,32 @@ static void get_speed_duplex(struct net_device *netdev,
}
}
out:
- ethtool_cmd_speed_set(cmd, speed);
- cmd->duplex = duplex;
+ link_ksettings->base.speed = speed;
+ link_ksettings->base.duplex = duplex;
}
-static void get_supported(u32 eth_proto_cap, u32 *supported)
+static void get_supported(u32 eth_proto_cap,
+ struct ethtool_link_ksettings *link_ksettings)
{
- *supported |= ptys2ethtool_supported_port(eth_proto_cap);
- *supported |= ptys2ethtool_supported_link(eth_proto_cap);
- *supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ unsigned long *supported = link_ksettings->link_modes.supported;
+
+ ptys2ethtool_supported_port(link_ksettings, eth_proto_cap);
+ ptys2ethtool_supported_link(supported, eth_proto_cap);
+ ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause);
+ ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Asym_Pause);
}
static void get_advertising(u32 eth_proto_cap, u8 tx_pause,
- u8 rx_pause, u32 *advertising)
+ u8 rx_pause,
+ struct ethtool_link_ksettings *link_ksettings)
{
- *advertising |= ptys2ethtool_adver_link(eth_proto_cap);
- *advertising |= tx_pause ? ADVERTISED_Pause : 0;
- *advertising |= (tx_pause ^ rx_pause) ? ADVERTISED_Asym_Pause : 0;
+ unsigned long *advertising = link_ksettings->link_modes.advertising;
+
+ ptys2ethtool_adver_link(advertising, eth_proto_cap);
+ if (tx_pause)
+ ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause);
+ if (tx_pause ^ rx_pause)
+ ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Asym_Pause);
}
static u8 get_connector_port(u32 eth_proto)
@@ -695,13 +792,16 @@ static u8 get_connector_port(u32 eth_proto)
return PORT_OTHER;
}
-static void get_lp_advertising(u32 eth_proto_lp, u32 *lp_advertising)
+static void get_lp_advertising(u32 eth_proto_lp,
+ struct ethtool_link_ksettings *link_ksettings)
{
- *lp_advertising = ptys2ethtool_adver_link(eth_proto_lp);
+ unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising;
+
+ ptys2ethtool_adver_link(lp_advertising, eth_proto_lp);
}
-static int mlx5e_get_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
+static int mlx5e_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
@@ -710,6 +810,8 @@ static int mlx5e_get_settings(struct net_device *netdev,
u32 eth_proto_admin;
u32 eth_proto_lp;
u32 eth_proto_oper;
+ u8 an_disable_admin;
+ u8 an_status;
int err;
err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1);
@@ -720,35 +822,49 @@ static int mlx5e_get_settings(struct net_device *netdev,
goto err_query_ptys;
}
- eth_proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability);
- eth_proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin);
- eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
- eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise);
+ eth_proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability);
+ eth_proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin);
+ eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
+ eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise);
+ an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin);
+ an_status = MLX5_GET(ptys_reg, out, an_status);
- cmd->supported = 0;
- cmd->advertising = 0;
+ ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
+ ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
- get_supported(eth_proto_cap, &cmd->supported);
- get_advertising(eth_proto_admin, 0, 0, &cmd->advertising);
- get_speed_duplex(netdev, eth_proto_oper, cmd);
+ get_supported(eth_proto_cap, link_ksettings);
+ get_advertising(eth_proto_admin, 0, 0, link_ksettings);
+ get_speed_duplex(netdev, eth_proto_oper, link_ksettings);
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
- cmd->port = get_connector_port(eth_proto_oper);
- get_lp_advertising(eth_proto_lp, &cmd->lp_advertising);
+ link_ksettings->base.port = get_connector_port(eth_proto_oper);
+ get_lp_advertising(eth_proto_lp, link_ksettings);
+
+ if (an_status == MLX5_AN_COMPLETE)
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ lp_advertising, Autoneg);
- cmd->transceiver = XCVR_INTERNAL;
+ link_ksettings->base.autoneg = an_disable_admin ? AUTONEG_DISABLE :
+ AUTONEG_ENABLE;
+ ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
+ Autoneg);
+ if (!an_disable_admin)
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ advertising, Autoneg);
err_query_ptys:
return err;
}
-static u32 mlx5e_ethtool2ptys_adver_link(u32 link_modes)
+static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes)
{
u32 i, ptys_modes = 0;
for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
- if (ptys2ethtool_table[i].advertised & link_modes)
+ if (bitmap_intersects(ptys2ethtool_table[i].advertised,
+ link_modes,
+ __ETHTOOL_LINK_MODE_MASK_NBITS))
ptys_modes |= MLX5E_PROT_MASK(i);
}
@@ -767,21 +883,25 @@ static u32 mlx5e_ethtool2ptys_speed_link(u32 speed)
return speed_links;
}
-static int mlx5e_set_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
+static int mlx5e_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
+ u32 eth_proto_cap, eth_proto_admin;
+ bool an_changes = false;
+ u8 an_disable_admin;
+ u8 an_disable_cap;
+ bool an_disable;
u32 link_modes;
+ u8 an_status;
u32 speed;
- u32 eth_proto_cap, eth_proto_admin;
- enum mlx5_port_status ps;
int err;
- speed = ethtool_cmd_speed(cmd);
+ speed = link_ksettings->base.speed;
- link_modes = cmd->autoneg == AUTONEG_ENABLE ?
- mlx5e_ethtool2ptys_adver_link(cmd->advertising) :
+ link_modes = link_ksettings->base.autoneg == AUTONEG_ENABLE ?
+ mlx5e_ethtool2ptys_adver_link(link_ksettings->link_modes.advertising) :
mlx5e_ethtool2ptys_speed_link(speed);
err = mlx5_query_port_proto_cap(mdev, &eth_proto_cap, MLX5_PTYS_EN);
@@ -806,15 +926,18 @@ static int mlx5e_set_settings(struct net_device *netdev,
goto out;
}
- if (link_modes == eth_proto_admin)
+ mlx5_query_port_autoneg(mdev, MLX5_PTYS_EN, &an_status,
+ &an_disable_cap, &an_disable_admin);
+
+ an_disable = link_ksettings->base.autoneg == AUTONEG_DISABLE;
+ an_changes = ((!an_disable && an_disable_admin) ||
+ (an_disable && !an_disable_admin));
+
+ if (!an_changes && link_modes == eth_proto_admin)
goto out;
- mlx5_query_port_admin_status(mdev, &ps);
- if (ps == MLX5_PORT_UP)
- mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
- mlx5_set_port_proto(mdev, link_modes, MLX5_PTYS_EN);
- if (ps == MLX5_PORT_UP)
- mlx5_set_port_admin_status(mdev, MLX5_PORT_UP);
+ mlx5_set_port_ptys(mdev, an_disable, link_modes, MLX5_PTYS_EN);
+ mlx5_toggle_port_link(mdev);
out:
return err;
@@ -861,7 +984,7 @@ static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
mlx5e_build_tir_ctx_hash(tirc, priv);
for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
- mlx5_core_modify_tir(mdev, priv->indir_tirn[i], in, inlen);
+ mlx5_core_modify_tir(mdev, priv->indir_tir[i].tirn, in, inlen);
}
static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
@@ -883,7 +1006,7 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
mutex_lock(&priv->state_lock);
if (indir) {
- u32 rqtn = priv->indir_rqtn;
+ u32 rqtn = priv->indir_rqt.rqtn;
memcpy(priv->params.indirection_rqt, indir,
sizeof(priv->params.indirection_rqt));
@@ -916,6 +1039,15 @@ static int mlx5e_get_rxnfc(struct net_device *netdev,
case ETHTOOL_GRXRINGS:
info->data = priv->params.num_channels;
break;
+ case ETHTOOL_GRXCLSRLCNT:
+ info->rule_cnt = priv->fs.ethtool.tot_num_rules;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ err = mlx5e_ethtool_get_flow(priv, info, info->fs.location);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ err = mlx5e_ethtool_get_all_flows(priv, info, rule_locs);
+ break;
default:
err = -EOPNOTSUPP;
break;
@@ -1272,6 +1404,107 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev,
return 0;
}
+typedef int (*mlx5e_pflag_handler)(struct net_device *netdev, bool enable);
+
+static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ bool rx_mode_changed;
+ u8 rx_cq_period_mode;
+ int err = 0;
+ bool reset;
+
+ rx_cq_period_mode = enable ?
+ MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
+ MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+ rx_mode_changed = rx_cq_period_mode != priv->params.rx_cq_period_mode;
+
+ if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE &&
+ !MLX5_CAP_GEN(mdev, cq_period_start_from_cqe))
+ return -ENOTSUPP;
+
+ if (!rx_mode_changed)
+ return 0;
+
+ reset = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (reset)
+ mlx5e_close_locked(netdev);
+
+ mlx5e_set_rx_cq_mode_params(&priv->params, rx_cq_period_mode);
+
+ if (reset)
+ err = mlx5e_open_locked(netdev);
+
+ return err;
+}
+
+static int mlx5e_handle_pflag(struct net_device *netdev,
+ u32 wanted_flags,
+ enum mlx5e_priv_flag flag,
+ mlx5e_pflag_handler pflag_handler)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ bool enable = !!(wanted_flags & flag);
+ u32 changes = wanted_flags ^ priv->pflags;
+ int err;
+
+ if (!(changes & flag))
+ return 0;
+
+ err = pflag_handler(netdev, enable);
+ if (err) {
+ netdev_err(netdev, "%s private flag 0x%x failed err %d\n",
+ enable ? "Enable" : "Disable", flag, err);
+ return err;
+ }
+
+ MLX5E_SET_PRIV_FLAG(priv, flag, enable);
+ return 0;
+}
+
+static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ int err;
+
+ mutex_lock(&priv->state_lock);
+
+ err = mlx5e_handle_pflag(netdev, pflags,
+ MLX5E_PFLAG_RX_CQE_BASED_MODER,
+ set_pflag_rx_cqe_based_moder);
+
+ mutex_unlock(&priv->state_lock);
+ return err ? -EINVAL : 0;
+}
+
+static u32 mlx5e_get_priv_flags(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ return priv->pflags;
+}
+
+static int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ int err = 0;
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ err = mlx5e_ethtool_flow_replace(priv, &cmd->fs);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
const struct ethtool_ops mlx5e_ethtool_ops = {
.get_drvinfo = mlx5e_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -1284,13 +1517,14 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.set_channels = mlx5e_set_channels,
.get_coalesce = mlx5e_get_coalesce,
.set_coalesce = mlx5e_set_coalesce,
- .get_settings = mlx5e_get_settings,
- .set_settings = mlx5e_set_settings,
+ .get_link_ksettings = mlx5e_get_link_ksettings,
+ .set_link_ksettings = mlx5e_set_link_ksettings,
.get_rxfh_key_size = mlx5e_get_rxfh_key_size,
.get_rxfh_indir_size = mlx5e_get_rxfh_indir_size,
.get_rxfh = mlx5e_get_rxfh,
.set_rxfh = mlx5e_set_rxfh,
.get_rxnfc = mlx5e_get_rxnfc,
+ .set_rxnfc = mlx5e_set_rxnfc,
.get_tunable = mlx5e_get_tunable,
.set_tunable = mlx5e_set_tunable,
.get_pauseparam = mlx5e_get_pauseparam,
@@ -1301,4 +1535,6 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.set_wol = mlx5e_set_wol,
.get_module_info = mlx5e_get_module_info,
.get_module_eeprom = mlx5e_get_module_eeprom,
+ .get_priv_flags = mlx5e_get_priv_flags,
+ .set_priv_flags = mlx5e_set_priv_flags
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index b32740092..1587a9fd5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -156,19 +156,18 @@ enum mlx5e_vlan_rule_type {
static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
enum mlx5e_vlan_rule_type rule_type,
- u16 vid, u32 *mc, u32 *mv)
+ u16 vid, struct mlx5_flow_spec *spec)
{
struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
struct mlx5_flow_destination dest;
- u8 match_criteria_enable = 0;
struct mlx5_flow_rule **rule_p;
int err = 0;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = priv->fs.l2.ft.t;
- match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
switch (rule_type) {
case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
@@ -176,17 +175,19 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
break;
case MLX5E_VLAN_RULE_TYPE_ANY_VID:
rule_p = &priv->fs.vlan.any_vlan_rule;
- MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.vlan_tag, 1);
break;
default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
rule_p = &priv->fs.vlan.active_vlans_rule[vid];
- MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
- MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.vlan_tag, 1);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.first_vid);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
+ vid);
break;
}
- *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
+ *rule_p = mlx5_add_flow_rule(ft, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG,
&dest);
@@ -203,27 +204,21 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
enum mlx5e_vlan_rule_type rule_type, u16 vid)
{
- u32 *match_criteria;
- u32 *match_value;
+ struct mlx5_flow_spec *spec;
int err = 0;
- match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
- match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
- if (!match_value || !match_criteria) {
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec) {
netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
- err = -ENOMEM;
- goto add_vlan_rule_out;
+ return -ENOMEM;
}
if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_VID)
mlx5e_vport_context_update_vlans(priv);
- err = __mlx5e_add_vlan_rule(priv, rule_type, vid, match_criteria,
- match_value);
+ err = __mlx5e_add_vlan_rule(priv, rule_type, vid, spec);
-add_vlan_rule_out:
- kvfree(match_criteria);
- kvfree(match_value);
+ kvfree(spec);
return err;
}
@@ -598,32 +593,27 @@ static struct mlx5_flow_rule *mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
u8 proto)
{
struct mlx5_flow_rule *rule;
- u8 match_criteria_enable = 0;
- u32 *match_criteria;
- u32 *match_value;
+ struct mlx5_flow_spec *spec;
int err = 0;
- match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
- match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
- if (!match_value || !match_criteria) {
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec) {
netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
- err = -ENOMEM;
- goto out;
+ return ERR_PTR(-ENOMEM);
}
if (proto) {
- match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.ip_protocol);
- MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol, proto);
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto);
}
if (etype) {
- match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.ethertype);
- MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, etype);
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
}
- rule = mlx5_add_flow_rule(ft, match_criteria_enable,
- match_criteria, match_value,
+ rule = mlx5_add_flow_rule(ft, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG,
dest);
@@ -631,9 +621,8 @@ static struct mlx5_flow_rule *mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
err = PTR_ERR(rule);
netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
}
-out:
- kvfree(match_criteria);
- kvfree(match_value);
+
+ kvfree(spec);
return err ? ERR_PTR(err) : rule;
}
@@ -655,7 +644,7 @@ static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv)
if (tt == MLX5E_TT_ANY)
dest.tir_num = priv->direct_tir[0].tirn;
else
- dest.tir_num = priv->indir_tirn[tt];
+ dest.tir_num = priv->indir_tir[tt].tirn;
rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
ttc_rules[tt].etype,
ttc_rules[tt].proto);
@@ -792,24 +781,20 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
{
struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
struct mlx5_flow_destination dest;
- u8 match_criteria_enable = 0;
- u32 *match_criteria;
- u32 *match_value;
+ struct mlx5_flow_spec *spec;
int err = 0;
u8 *mc_dmac;
u8 *mv_dmac;
- match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
- match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
- if (!match_value || !match_criteria) {
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec) {
netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
- err = -ENOMEM;
- goto add_l2_rule_out;
+ return -ENOMEM;
}
- mc_dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
+ mc_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers.dmac_47_16);
- mv_dmac = MLX5_ADDR_OF(fte_match_param, match_value,
+ mv_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.dmac_47_16);
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
@@ -817,13 +802,13 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
switch (type) {
case MLX5E_FULLMATCH:
- match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
eth_broadcast_addr(mc_dmac);
ether_addr_copy(mv_dmac, ai->addr);
break;
case MLX5E_ALLMULTI:
- match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
mc_dmac[0] = 0x01;
mv_dmac[0] = 0x01;
break;
@@ -832,8 +817,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
break;
}
- ai->rule = mlx5_add_flow_rule(ft, match_criteria_enable, match_criteria,
- match_value,
+ ai->rule = mlx5_add_flow_rule(ft, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG, &dest);
if (IS_ERR(ai->rule)) {
@@ -843,9 +827,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
ai->rule = NULL;
}
-add_l2_rule_out:
- kvfree(match_criteria);
- kvfree(match_value);
+ kvfree(spec);
return err;
}
@@ -1102,6 +1084,8 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
goto err_destroy_l2_table;
}
+ mlx5e_ethtool_init_steering(priv);
+
return 0;
err_destroy_l2_table:
@@ -1121,4 +1105,5 @@ void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
mlx5e_destroy_l2_table(priv);
mlx5e_destroy_ttc_table(priv);
mlx5e_arfs_destroy_tables(priv);
+ mlx5e_ethtool_cleanup_steering(priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
new file mode 100644
index 000000000..d17c24227
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -0,0 +1,586 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/fs.h>
+#include "en.h"
+
+struct mlx5e_ethtool_rule {
+ struct list_head list;
+ struct ethtool_rx_flow_spec flow_spec;
+ struct mlx5_flow_rule *rule;
+ struct mlx5e_ethtool_table *eth_ft;
+};
+
+static void put_flow_table(struct mlx5e_ethtool_table *eth_ft)
+{
+ if (!--eth_ft->num_rules) {
+ mlx5_destroy_flow_table(eth_ft->ft);
+ eth_ft->ft = NULL;
+ }
+}
+
+#define MLX5E_ETHTOOL_L3_L4_PRIO 0
+#define MLX5E_ETHTOOL_L2_PRIO (MLX5E_ETHTOOL_L3_L4_PRIO + ETHTOOL_NUM_L3_L4_FTS)
+#define MLX5E_ETHTOOL_NUM_ENTRIES 64000
+#define MLX5E_ETHTOOL_NUM_GROUPS 10
+static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
+ struct ethtool_rx_flow_spec *fs,
+ int num_tuples)
+{
+ struct mlx5e_ethtool_table *eth_ft;
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_table *ft;
+ int max_tuples;
+ int table_size;
+ int prio;
+
+ switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ max_tuples = ETHTOOL_NUM_L3_L4_FTS;
+ prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
+ eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
+ break;
+ case IP_USER_FLOW:
+ max_tuples = ETHTOOL_NUM_L3_L4_FTS;
+ prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
+ eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
+ break;
+ case ETHER_FLOW:
+ max_tuples = ETHTOOL_NUM_L2_FTS;
+ prio = max_tuples - num_tuples;
+ eth_ft = &priv->fs.ethtool.l2_ft[prio];
+ prio += MLX5E_ETHTOOL_L2_PRIO;
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ eth_ft->num_rules++;
+ if (eth_ft->ft)
+ return eth_ft;
+
+ ns = mlx5_get_flow_namespace(priv->mdev,
+ MLX5_FLOW_NAMESPACE_ETHTOOL);
+ if (!ns)
+ return ERR_PTR(-ENOTSUPP);
+
+ table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev,
+ flow_table_properties_nic_receive.log_max_ft_size)),
+ MLX5E_ETHTOOL_NUM_ENTRIES);
+ ft = mlx5_create_auto_grouped_flow_table(ns, prio,
+ table_size,
+ MLX5E_ETHTOOL_NUM_GROUPS, 0);
+ if (IS_ERR(ft))
+ return (void *)ft;
+
+ eth_ft->ft = ft;
+ return eth_ft;
+}
+
+static void mask_spec(u8 *mask, u8 *val, size_t size)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++, mask++, val++)
+ *((u8 *)val) = *((u8 *)mask) & *((u8 *)val);
+}
+
+static void set_ips(void *outer_headers_v, void *outer_headers_c, __be32 ip4src_m,
+ __be32 ip4src_v, __be32 ip4dst_m, __be32 ip4dst_v)
+{
+ if (ip4src_m) {
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4),
+ &ip4src_v, sizeof(ip4src_v));
+ memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4),
+ 0xff, sizeof(ip4src_m));
+ }
+ if (ip4dst_m) {
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+ &ip4dst_v, sizeof(ip4dst_v));
+ memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+ 0xff, sizeof(ip4dst_m));
+ }
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+ ethertype, ETH_P_IP);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
+ ethertype, 0xffff);
+}
+
+static int set_flow_attrs(u32 *match_c, u32 *match_v,
+ struct ethtool_rx_flow_spec *fs)
+{
+ void *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
+ outer_headers);
+ void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
+ outer_headers);
+ u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
+ struct ethtool_tcpip4_spec *l4_mask;
+ struct ethtool_tcpip4_spec *l4_val;
+ struct ethtool_usrip4_spec *l3_mask;
+ struct ethtool_usrip4_spec *l3_val;
+ struct ethhdr *eth_val;
+ struct ethhdr *eth_mask;
+
+ switch (flow_type) {
+ case TCP_V4_FLOW:
+ l4_mask = &fs->m_u.tcp_ip4_spec;
+ l4_val = &fs->h_u.tcp_ip4_spec;
+ set_ips(outer_headers_v, outer_headers_c, l4_mask->ip4src,
+ l4_val->ip4src, l4_mask->ip4dst, l4_val->ip4dst);
+
+ if (l4_mask->psrc) {
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport,
+ 0xffff);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_sport,
+ ntohs(l4_val->psrc));
+ }
+ if (l4_mask->pdst) {
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport,
+ 0xffff);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_dport,
+ ntohs(l4_val->pdst));
+ }
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
+ 0xffff);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
+ IPPROTO_TCP);
+ break;
+ case UDP_V4_FLOW:
+ l4_mask = &fs->m_u.tcp_ip4_spec;
+ l4_val = &fs->h_u.tcp_ip4_spec;
+ set_ips(outer_headers_v, outer_headers_c, l4_mask->ip4src,
+ l4_val->ip4src, l4_mask->ip4dst, l4_val->ip4dst);
+
+ if (l4_mask->psrc) {
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_sport,
+ 0xffff);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_sport,
+ ntohs(l4_val->psrc));
+ }
+ if (l4_mask->pdst) {
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_dport,
+ 0xffff);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_dport,
+ ntohs(l4_val->pdst));
+ }
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
+ 0xffff);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
+ IPPROTO_UDP);
+ break;
+ case IP_USER_FLOW:
+ l3_mask = &fs->m_u.usr_ip4_spec;
+ l3_val = &fs->h_u.usr_ip4_spec;
+ set_ips(outer_headers_v, outer_headers_c, l3_mask->ip4src,
+ l3_val->ip4src, l3_mask->ip4dst, l3_val->ip4dst);
+ break;
+ case ETHER_FLOW:
+ eth_mask = &fs->m_u.ether_spec;
+ eth_val = &fs->h_u.ether_spec;
+
+ mask_spec((u8 *)eth_mask, (u8 *)eth_val, sizeof(*eth_mask));
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
+ outer_headers_c, smac_47_16),
+ eth_mask->h_source);
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
+ outer_headers_v, smac_47_16),
+ eth_val->h_source);
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
+ outer_headers_c, dmac_47_16),
+ eth_mask->h_dest);
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
+ outer_headers_v, dmac_47_16),
+ eth_val->h_dest);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ethertype,
+ ntohs(eth_mask->h_proto));
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ethertype,
+ ntohs(eth_val->h_proto));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if ((fs->flow_type & FLOW_EXT) &&
+ (fs->m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) {
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
+ vlan_tag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+ vlan_tag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
+ first_vid, 0xfff);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+ first_vid, ntohs(fs->h_ext.vlan_tci));
+ }
+ if (fs->flow_type & FLOW_MAC_EXT &&
+ !is_zero_ether_addr(fs->m_ext.h_dest)) {
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
+ outer_headers_c, dmac_47_16),
+ fs->m_ext.h_dest);
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
+ outer_headers_v, dmac_47_16),
+ fs->h_ext.h_dest);
+ }
+
+ return 0;
+}
+
+static void add_rule_to_list(struct mlx5e_priv *priv,
+ struct mlx5e_ethtool_rule *rule)
+{
+ struct mlx5e_ethtool_rule *iter;
+ struct list_head *head = &priv->fs.ethtool.rules;
+
+ list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
+ if (iter->flow_spec.location > rule->flow_spec.location)
+ break;
+ head = &iter->list;
+ }
+ priv->fs.ethtool.tot_num_rules++;
+ list_add(&rule->list, head);
+}
+
+static bool outer_header_zero(u32 *match_criteria)
+{
+ int size = MLX5_ST_SZ_BYTES(fte_match_param);
+ char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
+ outer_headers);
+
+ return outer_headers_c[0] == 0 && !memcmp(outer_headers_c,
+ outer_headers_c + 1,
+ size - 1);
+}
+
+static struct mlx5_flow_rule *add_ethtool_flow_rule(struct mlx5e_priv *priv,
+ struct mlx5_flow_table *ft,
+ struct ethtool_rx_flow_spec *fs)
+{
+ struct mlx5_flow_destination *dst = NULL;
+ struct mlx5_flow_spec *spec;
+ struct mlx5_flow_rule *rule;
+ int err = 0;
+ u32 action;
+
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec)
+ return ERR_PTR(-ENOMEM);
+ err = set_flow_attrs(spec->match_criteria, spec->match_value,
+ fs);
+ if (err)
+ goto free;
+
+ if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
+ action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+ } else {
+ dst = kzalloc(sizeof(*dst), GFP_KERNEL);
+ if (!dst) {
+ err = -ENOMEM;
+ goto free;
+ }
+
+ dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ dst->tir_num = priv->direct_tir[fs->ring_cookie].tirn;
+ action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ }
+
+ spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria));
+ rule = mlx5_add_flow_rule(ft, spec, action,
+ MLX5_FS_DEFAULT_FLOW_TAG, dst);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n",
+ __func__, err);
+ goto free;
+ }
+free:
+ kvfree(spec);
+ kfree(dst);
+ return err ? ERR_PTR(err) : rule;
+}
+
+static void del_ethtool_rule(struct mlx5e_priv *priv,
+ struct mlx5e_ethtool_rule *eth_rule)
+{
+ if (eth_rule->rule)
+ mlx5_del_flow_rule(eth_rule->rule);
+ list_del(&eth_rule->list);
+ priv->fs.ethtool.tot_num_rules--;
+ put_flow_table(eth_rule->eth_ft);
+ kfree(eth_rule);
+}
+
+static struct mlx5e_ethtool_rule *find_ethtool_rule(struct mlx5e_priv *priv,
+ int location)
+{
+ struct mlx5e_ethtool_rule *iter;
+
+ list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
+ if (iter->flow_spec.location == location)
+ return iter;
+ }
+ return NULL;
+}
+
+static struct mlx5e_ethtool_rule *get_ethtool_rule(struct mlx5e_priv *priv,
+ int location)
+{
+ struct mlx5e_ethtool_rule *eth_rule;
+
+ eth_rule = find_ethtool_rule(priv, location);
+ if (eth_rule)
+ del_ethtool_rule(priv, eth_rule);
+
+ eth_rule = kzalloc(sizeof(*eth_rule), GFP_KERNEL);
+ if (!eth_rule)
+ return ERR_PTR(-ENOMEM);
+
+ add_rule_to_list(priv, eth_rule);
+ return eth_rule;
+}
+
+#define MAX_NUM_OF_ETHTOOL_RULES BIT(10)
+
+#define all_ones(field) (field == (__force typeof(field))-1)
+#define all_zeros_or_all_ones(field) \
+ ((field) == 0 || (field) == (__force typeof(field))-1)
+
+static int validate_flow(struct mlx5e_priv *priv,
+ struct ethtool_rx_flow_spec *fs)
+{
+ struct ethtool_tcpip4_spec *l4_mask;
+ struct ethtool_usrip4_spec *l3_mask;
+ struct ethhdr *eth_mask;
+ int num_tuples = 0;
+
+ if (fs->location >= MAX_NUM_OF_ETHTOOL_RULES)
+ return -EINVAL;
+
+ if (fs->ring_cookie >= priv->params.num_channels &&
+ fs->ring_cookie != RX_CLS_FLOW_DISC)
+ return -EINVAL;
+
+ switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
+ case ETHER_FLOW:
+ eth_mask = &fs->m_u.ether_spec;
+ if (!is_zero_ether_addr(eth_mask->h_dest))
+ num_tuples++;
+ if (!is_zero_ether_addr(eth_mask->h_source))
+ num_tuples++;
+ if (eth_mask->h_proto)
+ num_tuples++;
+ break;
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ if (fs->m_u.tcp_ip4_spec.tos)
+ return -EINVAL;
+ l4_mask = &fs->m_u.tcp_ip4_spec;
+ if (l4_mask->ip4src) {
+ if (!all_ones(l4_mask->ip4src))
+ return -EINVAL;
+ num_tuples++;
+ }
+ if (l4_mask->ip4dst) {
+ if (!all_ones(l4_mask->ip4dst))
+ return -EINVAL;
+ num_tuples++;
+ }
+ if (l4_mask->psrc) {
+ if (!all_ones(l4_mask->psrc))
+ return -EINVAL;
+ num_tuples++;
+ }
+ if (l4_mask->pdst) {
+ if (!all_ones(l4_mask->pdst))
+ return -EINVAL;
+ num_tuples++;
+ }
+ /* Flow is TCP/UDP */
+ num_tuples++;
+ break;
+ case IP_USER_FLOW:
+ l3_mask = &fs->m_u.usr_ip4_spec;
+ if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto ||
+ fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
+ return -EINVAL;
+ if (l3_mask->ip4src) {
+ if (!all_ones(l3_mask->ip4src))
+ return -EINVAL;
+ num_tuples++;
+ }
+ if (l3_mask->ip4dst) {
+ if (!all_ones(l3_mask->ip4dst))
+ return -EINVAL;
+ num_tuples++;
+ }
+ /* Flow is IPv4 */
+ num_tuples++;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if ((fs->flow_type & FLOW_EXT)) {
+ if (fs->m_ext.vlan_etype ||
+ (fs->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK)))
+ return -EINVAL;
+
+ if (fs->m_ext.vlan_tci) {
+ if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
+ return -EINVAL;
+ }
+ num_tuples++;
+ }
+
+ if (fs->flow_type & FLOW_MAC_EXT &&
+ !is_zero_ether_addr(fs->m_ext.h_dest))
+ num_tuples++;
+
+ return num_tuples;
+}
+
+int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
+ struct ethtool_rx_flow_spec *fs)
+{
+ struct mlx5e_ethtool_table *eth_ft;
+ struct mlx5e_ethtool_rule *eth_rule;
+ struct mlx5_flow_rule *rule;
+ int num_tuples;
+ int err;
+
+ num_tuples = validate_flow(priv, fs);
+ if (num_tuples <= 0) {
+ netdev_warn(priv->netdev, "%s: flow is not valid\n", __func__);
+ return -EINVAL;
+ }
+
+ eth_ft = get_flow_table(priv, fs, num_tuples);
+ if (IS_ERR(eth_ft))
+ return PTR_ERR(eth_ft);
+
+ eth_rule = get_ethtool_rule(priv, fs->location);
+ if (IS_ERR(eth_rule)) {
+ put_flow_table(eth_ft);
+ return PTR_ERR(eth_rule);
+ }
+
+ eth_rule->flow_spec = *fs;
+ eth_rule->eth_ft = eth_ft;
+ if (!eth_ft->ft) {
+ err = -EINVAL;
+ goto del_ethtool_rule;
+ }
+ rule = add_ethtool_flow_rule(priv, eth_ft->ft, fs);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ goto del_ethtool_rule;
+ }
+
+ eth_rule->rule = rule;
+
+ return 0;
+
+del_ethtool_rule:
+ del_ethtool_rule(priv, eth_rule);
+
+ return err;
+}
+
+int mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv,
+ int location)
+{
+ struct mlx5e_ethtool_rule *eth_rule;
+ int err = 0;
+
+ if (location >= MAX_NUM_OF_ETHTOOL_RULES)
+ return -ENOSPC;
+
+ eth_rule = find_ethtool_rule(priv, location);
+ if (!eth_rule) {
+ err = -ENOENT;
+ goto out;
+ }
+
+ del_ethtool_rule(priv, eth_rule);
+out:
+ return err;
+}
+
+int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
+ int location)
+{
+ struct mlx5e_ethtool_rule *eth_rule;
+
+ if (location < 0 || location >= MAX_NUM_OF_ETHTOOL_RULES)
+ return -EINVAL;
+
+ list_for_each_entry(eth_rule, &priv->fs.ethtool.rules, list) {
+ if (eth_rule->flow_spec.location == location) {
+ info->fs = eth_rule->flow_spec;
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
+ u32 *rule_locs)
+{
+ int location = 0;
+ int idx = 0;
+ int err = 0;
+
+ while ((!err || err == -ENOENT) && idx < info->rule_cnt) {
+ err = mlx5e_ethtool_get_flow(priv, info, location);
+ if (!err)
+ rule_locs[idx++] = location;
+ location++;
+ }
+ return err;
+}
+
+void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv)
+{
+ struct mlx5e_ethtool_rule *iter;
+ struct mlx5e_ethtool_rule *temp;
+
+ list_for_each_entry_safe(iter, temp, &priv->fs.ethtool.rules, list)
+ del_ethtool_rule(priv, iter);
+}
+
+void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv)
+{
+ INIT_LIST_HEAD(&priv->fs.ethtool.rules);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 5a4d88c2c..2459c7f3d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -39,22 +39,17 @@
#include "eswitch.h"
#include "vxlan.h"
-enum {
- MLX5_EN_QP_FLUSH_TIMEOUT_MS = 5000,
- MLX5_EN_QP_FLUSH_MSLEEP_QUANT = 20,
- MLX5_EN_QP_FLUSH_MAX_ITER = MLX5_EN_QP_FLUSH_TIMEOUT_MS /
- MLX5_EN_QP_FLUSH_MSLEEP_QUANT,
-};
-
struct mlx5e_rq_param {
- u32 rqc[MLX5_ST_SZ_DW(rqc)];
- struct mlx5_wq_param wq;
+ u32 rqc[MLX5_ST_SZ_DW(rqc)];
+ struct mlx5_wq_param wq;
+ bool am_enabled;
};
struct mlx5e_sq_param {
u32 sqc[MLX5_ST_SZ_DW(sqc)];
struct mlx5_wq_param wq;
u16 max_inline;
+ u8 min_inline_mode;
bool icosq;
};
@@ -62,6 +57,7 @@ struct mlx5e_cq_param {
u32 cqc[MLX5_ST_SZ_DW(cqc)];
struct mlx5_wq_param wq;
u16 eq_ix;
+ u8 cq_period_mode;
};
struct mlx5e_channel_param {
@@ -159,6 +155,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
s->tx_queue_stopped += sq_stats->stopped;
s->tx_queue_wake += sq_stats->wake;
s->tx_queue_dropped += sq_stats->dropped;
+ s->tx_xmit_more += sq_stats->xmit_more;
s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
tx_offload_none += sq_stats->csum_none;
}
@@ -254,14 +251,14 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
mlx5e_update_sw_counters(priv);
}
-static void mlx5e_update_stats_work(struct work_struct *work)
+void mlx5e_update_stats_work(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
update_stats_work);
mutex_lock(&priv->state_lock);
if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- mlx5e_update_stats(priv);
+ priv->profile->update_stats(priv);
queue_delayed_work(priv->wq, dwork,
msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL));
}
@@ -337,6 +334,9 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
+ rq->mpwqe_mtt_offset = c->ix *
+ MLX5E_REQUIRED_MTTS(1, BIT(priv->params.log_rq_size));
+
rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
rq->wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides;
@@ -367,6 +367,9 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
wqe->data.byte_count = cpu_to_be32(byte_count);
}
+ INIT_WORK(&rq->am.work, mlx5e_rx_am_work);
+ rq->am.mode = priv->params.rx_cq_period_mode;
+
rq->wq_type = priv->params.rq_wq_type;
rq->pdev = c->pdev;
rq->netdev = c->netdev;
@@ -422,7 +425,6 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
- MLX5_SET(rqc, rqc, flush_in_error_en, 1);
MLX5_SET(rqc, rqc, vsd, priv->params.vlan_strip_disable);
MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
@@ -519,6 +521,27 @@ static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
return -ETIMEDOUT;
}
+static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
+{
+ struct mlx5_wq_ll *wq = &rq->wq;
+ struct mlx5e_rx_wqe *wqe;
+ __be16 wqe_ix_be;
+ u16 wqe_ix;
+
+ /* UMR WQE (if in progress) is always at wq->head */
+ if (test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
+ mlx5e_free_rx_fragmented_mpwqe(rq, &rq->wqe_info[wq->head]);
+
+ while (!mlx5_wq_ll_is_empty(wq)) {
+ wqe_ix_be = *wq->tail_next;
+ wqe_ix = be16_to_cpu(wqe_ix_be);
+ wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
+ rq->dealloc_wqe(rq, wqe_ix);
+ mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
+ &wqe->next.next_wqe_index);
+ }
+}
+
static int mlx5e_open_rq(struct mlx5e_channel *c,
struct mlx5e_rq_param *param,
struct mlx5e_rq *rq)
@@ -539,7 +562,8 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
if (err)
goto err_disable_rq;
- set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
+ if (param->am_enabled)
+ set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP;
sq->ico_wqe_info[pi].num_wqebbs = 1;
@@ -557,22 +581,9 @@ err_destroy_rq:
static void mlx5e_close_rq(struct mlx5e_rq *rq)
{
- int tout = 0;
- int err;
-
- clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
+ set_bit(MLX5E_RQ_STATE_FLUSH, &rq->state);
napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
-
- err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
- while (!mlx5_wq_ll_is_empty(&rq->wq) && !err &&
- tout++ < MLX5_EN_QP_FLUSH_MAX_ITER)
- msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
-
- if (err || tout == MLX5_EN_QP_FLUSH_MAX_ITER)
- set_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state);
-
- /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
- napi_synchronize(&rq->channel->napi);
+ cancel_work_sync(&rq->am.work);
mlx5e_disable_rq(rq);
mlx5e_free_rx_descs(rq);
@@ -639,6 +650,9 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
}
sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
sq->max_inline = param->max_inline;
+ sq->min_inline_mode =
+ MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5E_INLINE_MODE_VPORT_CONTEXT ?
+ param->min_inline_mode : 0;
err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
if (err)
@@ -721,6 +735,7 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
MLX5_SET(sqc, sqc, tis_num_0, param->icosq ? 0 : priv->tisn[sq->tc]);
MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
+ MLX5_SET(sqc, sqc, min_wqe_inline_mode, sq->min_inline_mode);
MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
MLX5_SET(sqc, sqc, tis_lst_sz, param->icosq ? 0 : 1);
MLX5_SET(sqc, sqc, flush_in_error_en, 1);
@@ -741,7 +756,8 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
return err;
}
-static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state)
+static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state,
+ int next_state, bool update_rl, int rl_index)
{
struct mlx5e_channel *c = sq->channel;
struct mlx5e_priv *priv = c->priv;
@@ -761,6 +777,10 @@ static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state)
MLX5_SET(modify_sq_in, in, sq_state, curr_state);
MLX5_SET(sqc, sqc, state, next_state);
+ if (update_rl && next_state == MLX5_SQC_STATE_RDY) {
+ MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
+ MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index);
+ }
err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen);
@@ -776,6 +796,8 @@ static void mlx5e_disable_sq(struct mlx5e_sq *sq)
struct mlx5_core_dev *mdev = priv->mdev;
mlx5_core_destroy_sq(mdev, sq->sqn);
+ if (sq->rate_limit)
+ mlx5_rl_remove_rate(mdev, sq->rate_limit);
}
static int mlx5e_open_sq(struct mlx5e_channel *c,
@@ -793,12 +815,12 @@ static int mlx5e_open_sq(struct mlx5e_channel *c,
if (err)
goto err_destroy_sq;
- err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY);
+ err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY,
+ false, 0);
if (err)
goto err_disable_sq;
if (sq->txq) {
- set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
netdev_tx_reset_queue(sq->txq);
netif_tx_start_queue(sq->txq);
}
@@ -822,38 +844,20 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq)
static void mlx5e_close_sq(struct mlx5e_sq *sq)
{
- int tout = 0;
- int err;
+ set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state);
+ /* prevent netif_tx_wake_queue */
+ napi_synchronize(&sq->channel->napi);
if (sq->txq) {
- clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
- /* prevent netif_tx_wake_queue */
- napi_synchronize(&sq->channel->napi);
netif_tx_disable_queue(sq->txq);
- /* ensure hw is notified of all pending wqes */
+ /* last doorbell out, godspeed .. */
if (mlx5e_sq_has_room_for(sq, 1))
mlx5e_send_nop(sq, true);
-
- err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY,
- MLX5_SQC_STATE_ERR);
- if (err)
- set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
}
- /* wait till sq is empty, unless a TX timeout occurred on this SQ */
- while (sq->cc != sq->pc &&
- !test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)) {
- msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
- if (tout++ > MLX5_EN_QP_FLUSH_MAX_ITER)
- set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
- }
-
- /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
- napi_synchronize(&sq->channel->napi);
-
- mlx5e_free_tx_descs(sq);
mlx5e_disable_sq(sq);
+ mlx5e_free_tx_descs(sq);
mlx5e_destroy_sq(sq);
}
@@ -891,7 +895,7 @@ static int mlx5e_create_cq(struct mlx5e_channel *c,
mcq->comp = mlx5e_completion_event;
mcq->event = mlx5e_cq_error_event;
mcq->irqn = irqn;
- mcq->uar = &priv->cq_uar;
+ mcq->uar = &mdev->mlx5e_res.cq_uar;
for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
@@ -938,6 +942,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
+ MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
MLX5_SET(cqc, cqc, c_eqn, eqn);
MLX5_SET(cqc, cqc, uar_page, mcq->uar->index);
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
@@ -967,8 +972,7 @@ static void mlx5e_disable_cq(struct mlx5e_cq *cq)
static int mlx5e_open_cq(struct mlx5e_channel *c,
struct mlx5e_cq_param *param,
struct mlx5e_cq *cq,
- u16 moderation_usecs,
- u16 moderation_frames)
+ struct mlx5e_cq_moder moderation)
{
int err;
struct mlx5e_priv *priv = c->priv;
@@ -984,8 +988,8 @@ static int mlx5e_open_cq(struct mlx5e_channel *c,
if (MLX5_CAP_GEN(mdev, cq_moderation))
mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
- moderation_usecs,
- moderation_frames);
+ moderation.usec,
+ moderation.pkts);
return 0;
err_destroy_cq:
@@ -1014,8 +1018,7 @@ static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
for (tc = 0; tc < c->num_tc; tc++) {
err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq,
- priv->params.tx_cq_moderation_usec,
- priv->params.tx_cq_moderation_pkts);
+ priv->params.tx_cq_moderation);
if (err)
goto err_close_tx_cqs;
}
@@ -1070,19 +1073,96 @@ static void mlx5e_build_channeltc_to_txq_map(struct mlx5e_priv *priv, int ix)
{
int i;
- for (i = 0; i < MLX5E_MAX_NUM_TC; i++)
+ for (i = 0; i < priv->profile->max_tc; i++)
priv->channeltc_to_txq_map[ix][i] =
ix + i * priv->params.num_channels;
}
+static int mlx5e_set_sq_maxrate(struct net_device *dev,
+ struct mlx5e_sq *sq, u32 rate)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u16 rl_index = 0;
+ int err;
+
+ if (rate == sq->rate_limit)
+ /* nothing to do */
+ return 0;
+
+ if (sq->rate_limit)
+ /* remove current rl index to free space to next ones */
+ mlx5_rl_remove_rate(mdev, sq->rate_limit);
+
+ sq->rate_limit = 0;
+
+ if (rate) {
+ err = mlx5_rl_add_rate(mdev, rate, &rl_index);
+ if (err) {
+ netdev_err(dev, "Failed configuring rate %u: %d\n",
+ rate, err);
+ return err;
+ }
+ }
+
+ err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY,
+ MLX5_SQC_STATE_RDY, true, rl_index);
+ if (err) {
+ netdev_err(dev, "Failed configuring rate %u: %d\n",
+ rate, err);
+ /* remove the rate from the table */
+ if (rate)
+ mlx5_rl_remove_rate(mdev, rate);
+ return err;
+ }
+
+ sq->rate_limit = rate;
+ return 0;
+}
+
+static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_sq *sq = priv->txq_to_sq_map[index];
+ int err = 0;
+
+ if (!mlx5_rl_is_supported(mdev)) {
+ netdev_err(dev, "Rate limiting is not supported on this device\n");
+ return -EINVAL;
+ }
+
+ /* rate is given in Mb/sec, HW config is in Kb/sec */
+ rate = rate << 10;
+
+ /* Check whether rate in valid range, 0 is always valid */
+ if (rate && !mlx5_rl_is_in_range(mdev, rate)) {
+ netdev_err(dev, "TX rate %u, is not in range\n", rate);
+ return -ERANGE;
+ }
+
+ mutex_lock(&priv->state_lock);
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ err = mlx5e_set_sq_maxrate(dev, sq, rate);
+ if (!err)
+ priv->tx_rates[index] = rate;
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_channel_param *cparam,
struct mlx5e_channel **cp)
{
+ struct mlx5e_cq_moder icosq_cq_moder = {0, 0};
struct net_device *netdev = priv->netdev;
+ struct mlx5e_cq_moder rx_cq_profile;
int cpu = mlx5e_get_cpu(priv, ix);
struct mlx5e_channel *c;
+ struct mlx5e_sq *sq;
int err;
+ int i;
c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
if (!c)
@@ -1093,14 +1173,19 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->cpu = cpu;
c->pdev = &priv->mdev->pdev->dev;
c->netdev = priv->netdev;
- c->mkey_be = cpu_to_be32(priv->mkey.key);
+ c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
c->num_tc = priv->params.num_tc;
+ if (priv->params.rx_am_enabled)
+ rx_cq_profile = mlx5e_am_get_def_profile(priv->params.rx_cq_period_mode);
+ else
+ rx_cq_profile = priv->params.rx_cq_moderation;
+
mlx5e_build_channeltc_to_txq_map(priv, ix);
netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
- err = mlx5e_open_cq(c, &cparam->icosq_cq, &c->icosq.cq, 0, 0);
+ err = mlx5e_open_cq(c, &cparam->icosq_cq, &c->icosq.cq, icosq_cq_moder);
if (err)
goto err_napi_del;
@@ -1109,8 +1194,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
goto err_close_icosq_cq;
err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq,
- priv->params.rx_cq_moderation_usec,
- priv->params.rx_cq_moderation_pkts);
+ rx_cq_profile);
if (err)
goto err_close_tx_cqs;
@@ -1124,6 +1208,16 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
if (err)
goto err_close_icosq;
+ for (i = 0; i < priv->params.num_tc; i++) {
+ u32 txq_ix = priv->channeltc_to_txq_map[ix][i];
+
+ if (priv->tx_rates[txq_ix]) {
+ sq = priv->txq_to_sq_map[txq_ix];
+ mlx5e_set_sq_maxrate(priv->netdev, sq,
+ priv->tx_rates[txq_ix]);
+ }
+ }
+
err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
if (err)
goto err_close_sqs;
@@ -1195,11 +1289,13 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
- MLX5_SET(wq, wq, pd, priv->pdn);
+ MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
param->wq.linear = 1;
+
+ param->am_enabled = priv->params.rx_am_enabled;
}
static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param)
@@ -1218,7 +1314,7 @@ static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
- MLX5_SET(wq, wq, pd, priv->pdn);
+ MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
}
@@ -1233,6 +1329,7 @@ static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
param->max_inline = priv->params.tx_max_inline;
+ param->min_inline_mode = priv->params.tx_min_inline_mode;
}
static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
@@ -1240,7 +1337,7 @@ static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
{
void *cqc = param->cqc;
- MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index);
+ MLX5_SET(cqc, cqc, uar_page, priv->mdev->mlx5e_res.cq_uar.index);
}
static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
@@ -1265,6 +1362,8 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
}
mlx5e_build_common_cq_param(priv, param);
+
+ param->cq_period_mode = priv->params.rx_cq_period_mode;
}
static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
@@ -1275,6 +1374,8 @@ static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
mlx5e_build_common_cq_param(priv, param);
+
+ param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
}
static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
@@ -1286,6 +1387,8 @@ static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
mlx5e_build_common_cq_param(priv, param);
+
+ param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
}
static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
@@ -1432,7 +1535,8 @@ static void mlx5e_fill_direct_rqt_rqn(struct mlx5e_priv *priv, void *rqtc,
MLX5_SET(rqtc, rqtc, rq_num[0], rqn);
}
-static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, int ix, u32 *rqtn)
+static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz,
+ int ix, struct mlx5e_rqt *rqt)
{
struct mlx5_core_dev *mdev = priv->mdev;
void *rqtc;
@@ -1455,34 +1559,36 @@ static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, int ix, u32 *rqtn)
else
mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix);
- err = mlx5_core_create_rqt(mdev, in, inlen, rqtn);
+ err = mlx5_core_create_rqt(mdev, in, inlen, &rqt->rqtn);
+ if (!err)
+ rqt->enabled = true;
kvfree(in);
return err;
}
-static void mlx5e_destroy_rqt(struct mlx5e_priv *priv, u32 rqtn)
+void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt)
{
- mlx5_core_destroy_rqt(priv->mdev, rqtn);
+ rqt->enabled = false;
+ mlx5_core_destroy_rqt(priv->mdev, rqt->rqtn);
}
-static int mlx5e_create_rqts(struct mlx5e_priv *priv)
+static int mlx5e_create_indirect_rqts(struct mlx5e_priv *priv)
{
- int nch = mlx5e_get_max_num_channels(priv->mdev);
- u32 *rqtn;
+ struct mlx5e_rqt *rqt = &priv->indir_rqt;
+
+ return mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, 0, rqt);
+}
+
+int mlx5e_create_direct_rqts(struct mlx5e_priv *priv)
+{
+ struct mlx5e_rqt *rqt;
int err;
int ix;
- /* Indirect RQT */
- rqtn = &priv->indir_rqtn;
- err = mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, 0, rqtn);
- if (err)
- return err;
-
- /* Direct RQTs */
- for (ix = 0; ix < nch; ix++) {
- rqtn = &priv->direct_tir[ix].rqtn;
- err = mlx5e_create_rqt(priv, 1 /*size */, ix, rqtn);
+ for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
+ rqt = &priv->direct_tir[ix].rqt;
+ err = mlx5e_create_rqt(priv, 1 /*size */, ix, rqt);
if (err)
goto err_destroy_rqts;
}
@@ -1491,24 +1597,11 @@ static int mlx5e_create_rqts(struct mlx5e_priv *priv)
err_destroy_rqts:
for (ix--; ix >= 0; ix--)
- mlx5e_destroy_rqt(priv, priv->direct_tir[ix].rqtn);
-
- mlx5e_destroy_rqt(priv, priv->indir_rqtn);
+ mlx5e_destroy_rqt(priv, &priv->direct_tir[ix].rqt);
return err;
}
-static void mlx5e_destroy_rqts(struct mlx5e_priv *priv)
-{
- int nch = mlx5e_get_max_num_channels(priv->mdev);
- int i;
-
- for (i = 0; i < nch; i++)
- mlx5e_destroy_rqt(priv, priv->direct_tir[i].rqtn);
-
- mlx5e_destroy_rqt(priv, priv->indir_rqtn);
-}
-
int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix)
{
struct mlx5_core_dev *mdev = priv->mdev;
@@ -1544,10 +1637,15 @@ static void mlx5e_redirect_rqts(struct mlx5e_priv *priv)
u32 rqtn;
int ix;
- rqtn = priv->indir_rqtn;
- mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
+ if (priv->indir_rqt.enabled) {
+ rqtn = priv->indir_rqt.rqtn;
+ mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
+ }
+
for (ix = 0; ix < priv->params.num_channels; ix++) {
- rqtn = priv->direct_tir[ix].rqtn;
+ if (!priv->direct_tir[ix].rqt.enabled)
+ continue;
+ rqtn = priv->direct_tir[ix].rqt.rqtn;
mlx5e_redirect_rqt(priv, rqtn, 1, ix);
}
}
@@ -1607,13 +1705,13 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
mlx5e_build_tir_ctx_lro(tirc, priv);
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
- err = mlx5_core_modify_tir(mdev, priv->indir_tirn[tt], in,
+ err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in,
inlen);
if (err)
goto free_in;
}
- for (ix = 0; ix < mlx5e_get_max_num_channels(mdev); ix++) {
+ for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
in, inlen);
if (err)
@@ -1626,40 +1724,6 @@ free_in:
return err;
}
-static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
-{
- void *in;
- int inlen;
- int err;
- int i;
-
- inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
- in = mlx5_vzalloc(inlen);
- if (!in)
- return -ENOMEM;
-
- MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
-
- for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) {
- err = mlx5_core_modify_tir(priv->mdev, priv->indir_tirn[i], in,
- inlen);
- if (err)
- return err;
- }
-
- for (i = 0; i < priv->params.num_channels; i++) {
- err = mlx5_core_modify_tir(priv->mdev,
- priv->direct_tir[i].tirn, in,
- inlen);
- if (err)
- return err;
- }
-
- kvfree(in);
-
- return 0;
-}
-
static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu)
{
struct mlx5_core_dev *mdev = priv->mdev;
@@ -1731,6 +1795,7 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev)
int mlx5e_open_locked(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
int num_txqs;
int err;
@@ -1742,10 +1807,6 @@ int mlx5e_open_locked(struct net_device *netdev)
netif_set_real_num_tx_queues(netdev, num_txqs);
netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
- err = mlx5e_set_dev_port_mtu(netdev);
- if (err)
- goto err_clear_state_opened_flag;
-
err = mlx5e_open_channels(priv);
if (err) {
netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
@@ -1753,7 +1814,7 @@ int mlx5e_open_locked(struct net_device *netdev)
goto err_clear_state_opened_flag;
}
- err = mlx5e_refresh_tirs_self_loopback_enable(priv);
+ err = mlx5e_refresh_tirs_self_loopback_enable(priv->mdev);
if (err) {
netdev_err(netdev, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n",
__func__, err);
@@ -1766,9 +1827,14 @@ int mlx5e_open_locked(struct net_device *netdev)
#ifdef CONFIG_RFS_ACCEL
priv->netdev->rx_cpu_rmap = priv->mdev->rmap;
#endif
+ if (priv->profile->update_stats)
+ queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
- queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
-
+ if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
+ err = mlx5e_add_sqs_fwd_rules(priv);
+ if (err)
+ goto err_close_channels;
+ }
return 0;
err_close_channels:
@@ -1778,7 +1844,7 @@ err_clear_state_opened_flag:
return err;
}
-static int mlx5e_open(struct net_device *netdev)
+int mlx5e_open(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
@@ -1793,6 +1859,7 @@ static int mlx5e_open(struct net_device *netdev)
int mlx5e_close_locked(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
/* May already be CLOSED in case a previous configuration operation
* (e.g RX/TX queue size change) that involves close&open failed.
@@ -1802,6 +1869,9 @@ int mlx5e_close_locked(struct net_device *netdev)
clear_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (MLX5_CAP_GEN(mdev, vport_group_manager))
+ mlx5e_remove_sqs_fwd_rules(priv);
+
mlx5e_timestamp_cleanup(priv);
netif_carrier_off(priv->netdev);
mlx5e_redirect_rqts(priv);
@@ -1810,7 +1880,7 @@ int mlx5e_close_locked(struct net_device *netdev)
return 0;
}
-static int mlx5e_close(struct net_device *netdev)
+int mlx5e_close(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
@@ -1869,7 +1939,7 @@ static int mlx5e_create_drop_cq(struct mlx5e_priv *priv,
mcq->comp = mlx5e_completion_event;
mcq->event = mlx5e_cq_error_event;
mcq->irqn = irqn;
- mcq->uar = &priv->cq_uar;
+ mcq->uar = &mdev->mlx5e_res.cq_uar;
cq->priv = priv;
@@ -1935,7 +2005,7 @@ static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc)
memset(in, 0, sizeof(in));
MLX5_SET(tisc, tisc, prio, tc << 1);
- MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
+ MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn);
return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
}
@@ -1945,12 +2015,12 @@ static void mlx5e_destroy_tis(struct mlx5e_priv *priv, int tc)
mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
}
-static int mlx5e_create_tises(struct mlx5e_priv *priv)
+int mlx5e_create_tises(struct mlx5e_priv *priv)
{
int err;
int tc;
- for (tc = 0; tc < MLX5E_MAX_NUM_TC; tc++) {
+ for (tc = 0; tc < priv->profile->max_tc; tc++) {
err = mlx5e_create_tis(priv, tc);
if (err)
goto err_close_tises;
@@ -1965,11 +2035,11 @@ err_close_tises:
return err;
}
-static void mlx5e_destroy_tises(struct mlx5e_priv *priv)
+void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
{
int tc;
- for (tc = 0; tc < MLX5E_MAX_NUM_TC; tc++)
+ for (tc = 0; tc < priv->profile->max_tc; tc++)
mlx5e_destroy_tis(priv, tc);
}
@@ -1978,7 +2048,7 @@ static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
{
void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
- MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
+ MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP)
@@ -1995,7 +2065,7 @@ static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
mlx5e_build_tir_ctx_lro(tirc, priv);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
- MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqtn);
+ MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
mlx5e_build_tir_ctx_hash(tirc, priv);
switch (tt) {
@@ -2085,7 +2155,7 @@ static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
u32 rqtn)
{
- MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
+ MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
mlx5e_build_tir_ctx_lro(tirc, priv);
@@ -2094,15 +2164,13 @@ static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
}
-static int mlx5e_create_tirs(struct mlx5e_priv *priv)
+static int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv)
{
- int nch = mlx5e_get_max_num_channels(priv->mdev);
+ struct mlx5e_tir *tir;
void *tirc;
int inlen;
- u32 *tirn;
int err;
u32 *in;
- int ix;
int tt;
inlen = MLX5_ST_SZ_BYTES(create_tir_in);
@@ -2110,25 +2178,51 @@ static int mlx5e_create_tirs(struct mlx5e_priv *priv)
if (!in)
return -ENOMEM;
- /* indirect tirs */
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
memset(in, 0, inlen);
- tirn = &priv->indir_tirn[tt];
+ tir = &priv->indir_tir[tt];
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
mlx5e_build_indir_tir_ctx(priv, tirc, tt);
- err = mlx5_core_create_tir(priv->mdev, in, inlen, tirn);
+ err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
if (err)
goto err_destroy_tirs;
}
- /* direct tirs */
+ kvfree(in);
+
+ return 0;
+
+err_destroy_tirs:
+ for (tt--; tt >= 0; tt--)
+ mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]);
+
+ kvfree(in);
+
+ return err;
+}
+
+int mlx5e_create_direct_tirs(struct mlx5e_priv *priv)
+{
+ int nch = priv->profile->max_nch(priv->mdev);
+ struct mlx5e_tir *tir;
+ void *tirc;
+ int inlen;
+ int err;
+ u32 *in;
+ int ix;
+
+ inlen = MLX5_ST_SZ_BYTES(create_tir_in);
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
for (ix = 0; ix < nch; ix++) {
memset(in, 0, inlen);
- tirn = &priv->direct_tir[ix].tirn;
+ tir = &priv->direct_tir[ix];
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
mlx5e_build_direct_tir_ctx(priv, tirc,
- priv->direct_tir[ix].rqtn);
- err = mlx5_core_create_tir(priv->mdev, in, inlen, tirn);
+ priv->direct_tir[ix].rqt.rqtn);
+ err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
if (err)
goto err_destroy_ch_tirs;
}
@@ -2139,27 +2233,28 @@ static int mlx5e_create_tirs(struct mlx5e_priv *priv)
err_destroy_ch_tirs:
for (ix--; ix >= 0; ix--)
- mlx5_core_destroy_tir(priv->mdev, priv->direct_tir[ix].tirn);
-
-err_destroy_tirs:
- for (tt--; tt >= 0; tt--)
- mlx5_core_destroy_tir(priv->mdev, priv->indir_tirn[tt]);
+ mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[ix]);
kvfree(in);
return err;
}
-static void mlx5e_destroy_tirs(struct mlx5e_priv *priv)
+static void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
{
- int nch = mlx5e_get_max_num_channels(priv->mdev);
int i;
- for (i = 0; i < nch; i++)
- mlx5_core_destroy_tir(priv->mdev, priv->direct_tir[i].tirn);
-
for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
- mlx5_core_destroy_tir(priv->mdev, priv->indir_tirn[i]);
+ mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]);
+}
+
+void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv)
+{
+ int nch = priv->profile->max_nch(priv->mdev);
+ int i;
+
+ for (i = 0; i < nch; i++)
+ mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[i]);
}
int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd)
@@ -2233,7 +2328,7 @@ mqprio:
return mlx5e_setup_tc(dev, tc->tc);
}
-static struct rtnl_link_stats64 *
+struct rtnl_link_stats64 *
mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -2455,6 +2550,7 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
u16 max_mtu;
u16 min_mtu;
int err = 0;
+ bool reset;
mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
@@ -2470,13 +2566,18 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
mutex_lock(&priv->state_lock);
+ reset = !priv->params.lro_en &&
+ (priv->params.rq_wq_type !=
+ MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
+
was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- if (was_opened)
+ if (was_opened && reset)
mlx5e_close_locked(netdev);
netdev->mtu = new_mtu;
+ mlx5e_set_dev_port_mtu(netdev);
- if (was_opened)
+ if (was_opened && reset)
err = mlx5e_open_locked(netdev);
mutex_unlock(&priv->state_lock);
@@ -2585,25 +2686,31 @@ static int mlx5e_get_vf_stats(struct net_device *dev,
}
static void mlx5e_add_vxlan_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
+ struct udp_tunnel_info *ti)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ return;
+
if (!mlx5e_vxlan_allowed(priv->mdev))
return;
- mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 1);
+ mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1);
}
static void mlx5e_del_vxlan_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
+ struct udp_tunnel_info *ti)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ return;
+
if (!mlx5e_vxlan_allowed(priv->mdev))
return;
- mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 0);
+ mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 0);
}
static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv,
@@ -2670,7 +2777,7 @@ static void mlx5e_tx_timeout(struct net_device *dev)
if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
continue;
sched_work = true;
- set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
+ set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state);
netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
}
@@ -2693,6 +2800,7 @@ static const struct net_device_ops mlx5e_netdev_ops_basic = {
.ndo_set_features = mlx5e_set_features,
.ndo_change_mtu = mlx5e_change_mtu,
.ndo_do_ioctl = mlx5e_ioctl,
+ .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx5e_rx_flow_steer,
#endif
@@ -2713,8 +2821,9 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
.ndo_set_features = mlx5e_set_features,
.ndo_change_mtu = mlx5e_change_mtu,
.ndo_do_ioctl = mlx5e_ioctl,
- .ndo_add_vxlan_port = mlx5e_add_vxlan_port,
- .ndo_del_vxlan_port = mlx5e_del_vxlan_port,
+ .ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
+ .ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
+ .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
.ndo_features_check = mlx5e_features_check,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx5e_rx_flow_steer,
@@ -2844,13 +2953,48 @@ static bool cqe_compress_heuristic(u32 link_speed, u32 pci_bw)
(pci_bw < 40000) && (pci_bw < link_speed));
}
-static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
- struct net_device *netdev,
- int num_channels)
+void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
+{
+ params->rx_cq_period_mode = cq_period_mode;
+
+ params->rx_cq_moderation.pkts =
+ MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
+ params->rx_cq_moderation.usec =
+ MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
+
+ if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
+ params->rx_cq_moderation.usec =
+ MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
+}
+
+static void mlx5e_query_min_inline(struct mlx5_core_dev *mdev,
+ u8 *min_inline_mode)
+{
+ switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
+ case MLX5E_INLINE_MODE_L2:
+ *min_inline_mode = MLX5_INLINE_MODE_L2;
+ break;
+ case MLX5E_INLINE_MODE_VPORT_CONTEXT:
+ mlx5_query_nic_vport_min_inline(mdev,
+ min_inline_mode);
+ break;
+ case MLX5_INLINE_MODE_NOT_REQUIRED:
+ *min_inline_mode = MLX5_INLINE_MODE_NONE;
+ break;
+ }
+}
+
+static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
u32 link_speed = 0;
u32 pci_bw = 0;
+ u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
+ MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
+ MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
priv->params.log_sq_size =
MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
@@ -2896,15 +3040,16 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
BIT(priv->params.log_rq_size));
- priv->params.rx_cq_moderation_usec =
- MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
- priv->params.rx_cq_moderation_pkts =
- MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
- priv->params.tx_cq_moderation_usec =
+
+ priv->params.rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
+ mlx5e_set_rx_cq_mode_params(&priv->params, cq_period_mode);
+
+ priv->params.tx_cq_moderation.usec =
MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
- priv->params.tx_cq_moderation_pkts =
+ priv->params.tx_cq_moderation.pkts =
MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
+ mlx5e_query_min_inline(mdev, &priv->params.tx_min_inline_mode);
priv->params.num_tc = 1;
priv->params.rss_hfunc = ETH_RSS_HASH_XOR;
@@ -2912,14 +3057,20 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
sizeof(priv->params.toeplitz_hash_key));
mlx5e_build_default_indir_rqt(mdev, priv->params.indirection_rqt,
- MLX5E_INDIR_RQT_SIZE, num_channels);
+ MLX5E_INDIR_RQT_SIZE, profile->max_nch(mdev));
priv->params.lro_wqe_sz =
MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+ /* Initialize pflags */
+ MLX5E_SET_PRIV_FLAG(priv, MLX5E_PFLAG_RX_CQE_BASED_MODER,
+ priv->params.rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
+
priv->mdev = mdev;
priv->netdev = netdev;
- priv->params.num_channels = num_channels;
+ priv->params.num_channels = profile->max_nch(mdev);
+ priv->profile = profile;
+ priv->ppriv = ppriv;
#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_ets_init(priv);
@@ -2945,7 +3096,11 @@ static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
}
}
-static void mlx5e_build_netdev(struct net_device *netdev)
+static const struct switchdev_ops mlx5e_switchdev_ops = {
+ .switchdev_port_attr_get = mlx5e_attr_get,
+};
+
+static void mlx5e_build_nic_netdev(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
@@ -3026,31 +3181,11 @@ static void mlx5e_build_netdev(struct net_device *netdev)
netdev->priv_flags |= IFF_UNICAST_FLT;
mlx5e_set_netdev_dev_addr(netdev);
-}
-static int mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn,
- struct mlx5_core_mkey *mkey)
-{
- struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5_create_mkey_mbox_in *in;
- int err;
-
- in = mlx5_vzalloc(sizeof(*in));
- if (!in)
- return -ENOMEM;
-
- in->seg.flags = MLX5_PERM_LOCAL_WRITE |
- MLX5_PERM_LOCAL_READ |
- MLX5_ACCESS_MODE_PA;
- in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
- in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
-
- err = mlx5_core_create_mkey(mdev, mkey, in, sizeof(*in), NULL, NULL,
- NULL);
-
- kvfree(in);
-
- return err;
+#ifdef CONFIG_NET_SWITCHDEV
+ if (MLX5_CAP_GEN(mdev, vport_group_manager))
+ netdev->switchdev_ops = &mlx5e_switchdev_ops;
+#endif
}
static void mlx5e_create_q_counter(struct mlx5e_priv *priv)
@@ -3079,8 +3214,8 @@ static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
struct mlx5_create_mkey_mbox_in *in;
struct mlx5_mkey_seg *mkc;
int inlen = sizeof(*in);
- u64 npages =
- mlx5e_get_max_num_channels(mdev) * MLX5_CHANNEL_MAX_NUM_MTTS;
+ u64 npages = MLX5E_REQUIRED_MTTS(priv->profile->max_nch(mdev),
+ BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW));
int err;
in = mlx5_vzalloc(inlen);
@@ -3094,10 +3229,12 @@ static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
MLX5_PERM_LOCAL_WRITE |
MLX5_ACCESS_MODE_MTT;
+ npages = min_t(u32, ALIGN(U16_MAX, 4) * 2, npages);
+
mkc->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
- mkc->flags_pd = cpu_to_be32(priv->pdn);
+ mkc->flags_pd = cpu_to_be32(mdev->mlx5e_res.pdn);
mkc->len = cpu_to_be64(npages << PAGE_SHIFT);
- mkc->xlt_oct_size = cpu_to_be32(mlx5e_get_mtt_octw(npages));
+ mkc->xlt_oct_size = cpu_to_be32(MLX5_MTT_OCTW(npages));
mkc->log2_page_size = PAGE_SHIFT;
err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen, NULL,
@@ -3108,160 +3245,236 @@ static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
return err;
}
-static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
+static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv)
{
- struct net_device *netdev;
- struct mlx5e_priv *priv;
- int nch = mlx5e_get_max_num_channels(mdev);
- int err;
-
- if (mlx5e_check_required_hca_cap(mdev))
- return NULL;
+ struct mlx5e_priv *priv = netdev_priv(netdev);
- netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
- nch * MLX5E_MAX_NUM_TC,
- nch);
- if (!netdev) {
- mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
- return NULL;
- }
+ mlx5e_build_nic_netdev_priv(mdev, netdev, profile, ppriv);
+ mlx5e_build_nic_netdev(netdev);
+ mlx5e_vxlan_init(priv);
+}
- mlx5e_build_netdev_priv(mdev, netdev, nch);
- mlx5e_build_netdev(netdev);
+static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
- netif_carrier_off(netdev);
+ mlx5e_vxlan_cleanup(priv);
- priv = netdev_priv(netdev);
+ if (MLX5_CAP_GEN(mdev, vport_group_manager))
+ mlx5_eswitch_unregister_vport_rep(esw, 0);
+}
- priv->wq = create_singlethread_workqueue("mlx5e");
- if (!priv->wq)
- goto err_free_netdev;
+static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int err;
+ int i;
- err = mlx5_alloc_map_uar(mdev, &priv->cq_uar, false);
+ err = mlx5e_create_indirect_rqts(priv);
if (err) {
- mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
- goto err_destroy_wq;
+ mlx5_core_warn(mdev, "create indirect rqts failed, %d\n", err);
+ return err;
}
- err = mlx5_core_alloc_pd(mdev, &priv->pdn);
+ err = mlx5e_create_direct_rqts(priv);
if (err) {
- mlx5_core_err(mdev, "alloc pd failed, %d\n", err);
- goto err_unmap_free_uar;
+ mlx5_core_warn(mdev, "create direct rqts failed, %d\n", err);
+ goto err_destroy_indirect_rqts;
}
- err = mlx5_core_alloc_transport_domain(mdev, &priv->tdn);
+ err = mlx5e_create_indirect_tirs(priv);
if (err) {
- mlx5_core_err(mdev, "alloc td failed, %d\n", err);
- goto err_dealloc_pd;
+ mlx5_core_warn(mdev, "create indirect tirs failed, %d\n", err);
+ goto err_destroy_direct_rqts;
}
- err = mlx5e_create_mkey(priv, priv->pdn, &priv->mkey);
+ err = mlx5e_create_direct_tirs(priv);
if (err) {
- mlx5_core_err(mdev, "create mkey failed, %d\n", err);
- goto err_dealloc_transport_domain;
+ mlx5_core_warn(mdev, "create direct tirs failed, %d\n", err);
+ goto err_destroy_indirect_tirs;
}
- err = mlx5e_create_umr_mkey(priv);
+ err = mlx5e_create_flow_steering(priv);
if (err) {
- mlx5_core_err(mdev, "create umr mkey failed, %d\n", err);
- goto err_destroy_mkey;
+ mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
+ goto err_destroy_direct_tirs;
}
+ err = mlx5e_tc_init(priv);
+ if (err)
+ goto err_destroy_flow_steering;
+
+ return 0;
+
+err_destroy_flow_steering:
+ mlx5e_destroy_flow_steering(priv);
+err_destroy_direct_tirs:
+ mlx5e_destroy_direct_tirs(priv);
+err_destroy_indirect_tirs:
+ mlx5e_destroy_indirect_tirs(priv);
+err_destroy_direct_rqts:
+ for (i = 0; i < priv->profile->max_nch(mdev); i++)
+ mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
+err_destroy_indirect_rqts:
+ mlx5e_destroy_rqt(priv, &priv->indir_rqt);
+ return err;
+}
+
+static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
+{
+ int i;
+
+ mlx5e_tc_cleanup(priv);
+ mlx5e_destroy_flow_steering(priv);
+ mlx5e_destroy_direct_tirs(priv);
+ mlx5e_destroy_indirect_tirs(priv);
+ for (i = 0; i < priv->profile->max_nch(priv->mdev); i++)
+ mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
+ mlx5e_destroy_rqt(priv, &priv->indir_rqt);
+}
+
+static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
+{
+ int err;
+
err = mlx5e_create_tises(priv);
if (err) {
- mlx5_core_warn(mdev, "create tises failed, %d\n", err);
- goto err_destroy_umr_mkey;
+ mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
+ return err;
}
- err = mlx5e_open_drop_rq(priv);
- if (err) {
- mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
- goto err_destroy_tises;
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+ mlx5e_dcbnl_ieee_setets_core(priv, &priv->params.ets);
+#endif
+ return 0;
+}
+
+static void mlx5e_nic_enable(struct mlx5e_priv *priv)
+{
+ struct net_device *netdev = priv->netdev;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+ struct mlx5_eswitch_rep rep;
+
+ if (mlx5e_vxlan_allowed(mdev)) {
+ rtnl_lock();
+ udp_tunnel_get_rx_info(netdev);
+ rtnl_unlock();
}
- err = mlx5e_create_rqts(priv);
- if (err) {
- mlx5_core_warn(mdev, "create rqts failed, %d\n", err);
- goto err_close_drop_rq;
+ mlx5e_enable_async_events(priv);
+ queue_work(priv->wq, &priv->set_rx_mode_work);
+
+ if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
+ mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
+ rep.load = mlx5e_nic_rep_load;
+ rep.unload = mlx5e_nic_rep_unload;
+ rep.vport = 0;
+ rep.priv_data = priv;
+ mlx5_eswitch_register_vport_rep(esw, &rep);
+ }
+}
+
+static void mlx5e_nic_disable(struct mlx5e_priv *priv)
+{
+ queue_work(priv->wq, &priv->set_rx_mode_work);
+ mlx5e_disable_async_events(priv);
+}
+
+static const struct mlx5e_profile mlx5e_nic_profile = {
+ .init = mlx5e_nic_init,
+ .cleanup = mlx5e_nic_cleanup,
+ .init_rx = mlx5e_init_nic_rx,
+ .cleanup_rx = mlx5e_cleanup_nic_rx,
+ .init_tx = mlx5e_init_nic_tx,
+ .cleanup_tx = mlx5e_cleanup_nic_tx,
+ .enable = mlx5e_nic_enable,
+ .disable = mlx5e_nic_disable,
+ .update_stats = mlx5e_update_stats,
+ .max_nch = mlx5e_get_max_num_channels,
+ .max_tc = MLX5E_MAX_NUM_TC,
+};
+
+void *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
+ const struct mlx5e_profile *profile, void *ppriv)
+{
+ struct net_device *netdev;
+ struct mlx5e_priv *priv;
+ int nch = profile->max_nch(mdev);
+ int err;
+
+ netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
+ nch * profile->max_tc,
+ nch);
+ if (!netdev) {
+ mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
+ return NULL;
}
- err = mlx5e_create_tirs(priv);
+ profile->init(mdev, netdev, profile, ppriv);
+
+ netif_carrier_off(netdev);
+
+ priv = netdev_priv(netdev);
+
+ priv->wq = create_singlethread_workqueue("mlx5e");
+ if (!priv->wq)
+ goto err_free_netdev;
+
+ err = mlx5e_create_umr_mkey(priv);
if (err) {
- mlx5_core_warn(mdev, "create tirs failed, %d\n", err);
- goto err_destroy_rqts;
+ mlx5_core_err(mdev, "create umr mkey failed, %d\n", err);
+ goto err_destroy_wq;
}
- err = mlx5e_create_flow_steering(priv);
+ err = profile->init_tx(priv);
+ if (err)
+ goto err_destroy_umr_mkey;
+
+ err = mlx5e_open_drop_rq(priv);
if (err) {
- mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
- goto err_destroy_tirs;
+ mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
+ goto err_cleanup_tx;
}
+ err = profile->init_rx(priv);
+ if (err)
+ goto err_close_drop_rq;
+
mlx5e_create_q_counter(priv);
mlx5e_init_l2_addr(priv);
- mlx5e_vxlan_init(priv);
-
- err = mlx5e_tc_init(priv);
- if (err)
- goto err_dealloc_q_counters;
-
-#ifdef CONFIG_MLX5_CORE_EN_DCB
- mlx5e_dcbnl_ieee_setets_core(priv, &priv->params.ets);
-#endif
+ mlx5e_set_dev_port_mtu(netdev);
err = register_netdev(netdev);
if (err) {
mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
- goto err_tc_cleanup;
+ goto err_dealloc_q_counters;
}
- if (mlx5e_vxlan_allowed(mdev)) {
- rtnl_lock();
- vxlan_get_rx_port(netdev);
- rtnl_unlock();
- }
-
- mlx5e_enable_async_events(priv);
- queue_work(priv->wq, &priv->set_rx_mode_work);
+ if (profile->enable)
+ profile->enable(priv);
return priv;
-err_tc_cleanup:
- mlx5e_tc_cleanup(priv);
-
err_dealloc_q_counters:
mlx5e_destroy_q_counter(priv);
- mlx5e_destroy_flow_steering(priv);
-
-err_destroy_tirs:
- mlx5e_destroy_tirs(priv);
-
-err_destroy_rqts:
- mlx5e_destroy_rqts(priv);
+ profile->cleanup_rx(priv);
err_close_drop_rq:
mlx5e_close_drop_rq(priv);
-err_destroy_tises:
- mlx5e_destroy_tises(priv);
+err_cleanup_tx:
+ profile->cleanup_tx(priv);
err_destroy_umr_mkey:
mlx5_core_destroy_mkey(mdev, &priv->umr_mkey);
-err_destroy_mkey:
- mlx5_core_destroy_mkey(mdev, &priv->mkey);
-
-err_dealloc_transport_domain:
- mlx5_core_dealloc_transport_domain(mdev, priv->tdn);
-
-err_dealloc_pd:
- mlx5_core_dealloc_pd(mdev, priv->pdn);
-
-err_unmap_free_uar:
- mlx5_unmap_free_uar(mdev, &priv->cq_uar);
-
err_destroy_wq:
destroy_workqueue(priv->wq);
@@ -3271,15 +3484,63 @@ err_free_netdev:
return NULL;
}
-static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
+static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev)
{
- struct mlx5e_priv *priv = vpriv;
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+ int total_vfs = MLX5_TOTAL_VPORTS(mdev);
+ int vport;
+ u8 mac[ETH_ALEN];
+
+ if (!MLX5_CAP_GEN(mdev, vport_group_manager))
+ return;
+
+ mlx5_query_nic_vport_mac_address(mdev, 0, mac);
+
+ for (vport = 1; vport < total_vfs; vport++) {
+ struct mlx5_eswitch_rep rep;
+
+ rep.load = mlx5e_vport_rep_load;
+ rep.unload = mlx5e_vport_rep_unload;
+ rep.vport = vport;
+ ether_addr_copy(rep.hw_id, mac);
+ mlx5_eswitch_register_vport_rep(esw, &rep);
+ }
+}
+
+static void *mlx5e_add(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+ void *ppriv = NULL;
+ void *ret;
+
+ if (mlx5e_check_required_hca_cap(mdev))
+ return NULL;
+
+ if (mlx5e_create_mdev_resources(mdev))
+ return NULL;
+
+ mlx5e_register_vport_rep(mdev);
+
+ if (MLX5_CAP_GEN(mdev, vport_group_manager))
+ ppriv = &esw->offloads.vport_reps[0];
+
+ ret = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, ppriv);
+ if (!ret) {
+ mlx5e_destroy_mdev_resources(mdev);
+ return NULL;
+ }
+ return ret;
+}
+
+void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv)
+{
+ const struct mlx5e_profile *profile = priv->profile;
struct net_device *netdev = priv->netdev;
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
+ if (profile->disable)
+ profile->disable(priv);
- queue_work(priv->wq, &priv->set_rx_mode_work);
- mlx5e_disable_async_events(priv);
flush_workqueue(priv->wq);
if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) {
netif_device_detach(netdev);
@@ -3288,26 +3549,35 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
unregister_netdev(netdev);
}
- mlx5e_tc_cleanup(priv);
- mlx5e_vxlan_cleanup(priv);
mlx5e_destroy_q_counter(priv);
- mlx5e_destroy_flow_steering(priv);
- mlx5e_destroy_tirs(priv);
- mlx5e_destroy_rqts(priv);
+ profile->cleanup_rx(priv);
mlx5e_close_drop_rq(priv);
- mlx5e_destroy_tises(priv);
+ profile->cleanup_tx(priv);
mlx5_core_destroy_mkey(priv->mdev, &priv->umr_mkey);
- mlx5_core_destroy_mkey(priv->mdev, &priv->mkey);
- mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn);
- mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
- mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
cancel_delayed_work_sync(&priv->update_stats_work);
destroy_workqueue(priv->wq);
+ if (profile->cleanup)
+ profile->cleanup(priv);
if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state))
free_netdev(netdev);
}
+static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
+{
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+ int total_vfs = MLX5_TOTAL_VPORTS(mdev);
+ struct mlx5e_priv *priv = vpriv;
+ int vport;
+
+ mlx5e_destroy_netdev(mdev, priv);
+
+ for (vport = 1; vport < total_vfs; vport++)
+ mlx5_eswitch_unregister_vport_rep(esw, vport);
+
+ mlx5e_destroy_mdev_resources(mdev);
+}
+
static void *mlx5e_get_netdev(void *vpriv)
{
struct mlx5e_priv *priv = vpriv;
@@ -3316,8 +3586,8 @@ static void *mlx5e_get_netdev(void *vpriv)
}
static struct mlx5_interface mlx5e_interface = {
- .add = mlx5e_create_netdev,
- .remove = mlx5e_destroy_netdev,
+ .add = mlx5e_add,
+ .remove = mlx5e_remove,
.event = mlx5e_async_event,
.protocol = MLX5_INTERFACE_PROTOCOL_ETH,
.get_dev = mlx5e_get_netdev,
@@ -3325,6 +3595,7 @@ static struct mlx5_interface mlx5e_interface = {
void mlx5e_init(void)
{
+ mlx5e_build_ptys2ethtool_map();
mlx5_register_interface(&mlx5e_interface);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
new file mode 100644
index 000000000..134de4a11
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -0,0 +1,431 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <generated/utsrelease.h>
+#include <linux/mlx5/fs.h>
+#include <net/switchdev.h>
+#include <net/pkt_cls.h>
+
+#include "eswitch.h"
+#include "en.h"
+#include "en_tc.h"
+
+static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
+
+static void mlx5e_rep_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strlcpy(drvinfo->driver, mlx5e_rep_driver_name,
+ sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
+}
+
+static const struct counter_desc sw_rep_stats_desc[] = {
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
+};
+
+#define NUM_VPORT_REP_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
+
+static void mlx5e_rep_get_strings(struct net_device *dev,
+ u32 stringset, uint8_t *data)
+{
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < NUM_VPORT_REP_COUNTERS; i++)
+ strcpy(data + (i * ETH_GSTRING_LEN),
+ sw_rep_stats_desc[i].format);
+ break;
+ }
+}
+
+static void mlx5e_update_sw_rep_counters(struct mlx5e_priv *priv)
+{
+ struct mlx5e_sw_stats *s = &priv->stats.sw;
+ struct mlx5e_rq_stats *rq_stats;
+ struct mlx5e_sq_stats *sq_stats;
+ int i, j;
+
+ memset(s, 0, sizeof(*s));
+ for (i = 0; i < priv->params.num_channels; i++) {
+ rq_stats = &priv->channel[i]->rq.stats;
+
+ s->rx_packets += rq_stats->packets;
+ s->rx_bytes += rq_stats->bytes;
+
+ for (j = 0; j < priv->params.num_tc; j++) {
+ sq_stats = &priv->channel[i]->sq[j].stats;
+
+ s->tx_packets += sq_stats->packets;
+ s->tx_bytes += sq_stats->bytes;
+ }
+ }
+}
+
+static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int i;
+
+ if (!data)
+ return;
+
+ mutex_lock(&priv->state_lock);
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_update_sw_rep_counters(priv);
+ mutex_unlock(&priv->state_lock);
+
+ for (i = 0; i < NUM_VPORT_REP_COUNTERS; i++)
+ data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
+ sw_rep_stats_desc, i);
+}
+
+static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return NUM_VPORT_REP_COUNTERS;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
+ .get_drvinfo = mlx5e_rep_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = mlx5e_rep_get_strings,
+ .get_sset_count = mlx5e_rep_get_sset_count,
+ .get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
+};
+
+int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_eswitch_rep *rep = priv->ppriv;
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+ if (esw->mode == SRIOV_NONE)
+ return -EOPNOTSUPP;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
+ attr->u.ppid.id_len = ETH_ALEN;
+ ether_addr_copy(attr->u.ppid.id, rep->hw_id);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
+
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_eswitch_rep *rep = priv->ppriv;
+ struct mlx5e_channel *c;
+ int n, tc, err, num_sqs = 0;
+ u16 *sqs;
+
+ sqs = kcalloc(priv->params.num_channels * priv->params.num_tc, sizeof(u16), GFP_KERNEL);
+ if (!sqs)
+ return -ENOMEM;
+
+ for (n = 0; n < priv->params.num_channels; n++) {
+ c = priv->channel[n];
+ for (tc = 0; tc < c->num_tc; tc++)
+ sqs[num_sqs++] = c->sq[tc].sqn;
+ }
+
+ err = mlx5_eswitch_sqs2vport_start(esw, rep, sqs, num_sqs);
+
+ kfree(sqs);
+ return err;
+}
+
+int mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
+{
+ struct mlx5e_priv *priv = rep->priv_data;
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return mlx5e_add_sqs_fwd_rules(priv);
+ return 0;
+}
+
+void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_eswitch_rep *rep = priv->ppriv;
+
+ mlx5_eswitch_sqs2vport_stop(esw, rep);
+}
+
+void mlx5e_nic_rep_unload(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep)
+{
+ struct mlx5e_priv *priv = rep->priv_data;
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_remove_sqs_fwd_rules(priv);
+
+ /* clean (and re-init) existing uplink offloaded TC rules */
+ mlx5e_tc_cleanup(priv);
+ mlx5e_tc_init(priv);
+}
+
+static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
+ char *buf, size_t len)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_eswitch_rep *rep = priv->ppriv;
+ int ret;
+
+ ret = snprintf(buf, len, "%d", rep->vport - 1);
+ if (ret >= len)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int mlx5e_rep_ndo_setup_tc(struct net_device *dev, u32 handle,
+ __be16 proto, struct tc_to_netdev *tc)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS))
+ return -EOPNOTSUPP;
+
+ switch (tc->type) {
+ case TC_SETUP_CLSFLOWER:
+ switch (tc->cls_flower->command) {
+ case TC_CLSFLOWER_REPLACE:
+ return mlx5e_configure_flower(priv, proto, tc->cls_flower);
+ case TC_CLSFLOWER_DESTROY:
+ return mlx5e_delete_flower(priv, tc->cls_flower);
+ case TC_CLSFLOWER_STATS:
+ return mlx5e_stats_flower(priv, tc->cls_flower);
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct switchdev_ops mlx5e_rep_switchdev_ops = {
+ .switchdev_port_attr_get = mlx5e_attr_get,
+};
+
+static const struct net_device_ops mlx5e_netdev_ops_rep = {
+ .ndo_open = mlx5e_open,
+ .ndo_stop = mlx5e_close,
+ .ndo_start_xmit = mlx5e_xmit,
+ .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name,
+ .ndo_setup_tc = mlx5e_rep_ndo_setup_tc,
+ .ndo_get_stats64 = mlx5e_get_stats,
+};
+
+static void mlx5e_build_rep_netdev_priv(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
+ MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
+ MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+
+ priv->params.log_sq_size =
+ MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
+ priv->params.rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST;
+ priv->params.log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
+
+ priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
+ BIT(priv->params.log_rq_size));
+
+ priv->params.rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
+ mlx5e_set_rx_cq_mode_params(&priv->params, cq_period_mode);
+
+ priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
+ priv->params.num_tc = 1;
+
+ priv->params.lro_wqe_sz =
+ MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+
+ priv->mdev = mdev;
+ priv->netdev = netdev;
+ priv->params.num_channels = profile->max_nch(mdev);
+ priv->profile = profile;
+ priv->ppriv = ppriv;
+
+ mutex_init(&priv->state_lock);
+
+ INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
+}
+
+static void mlx5e_build_rep_netdev(struct net_device *netdev)
+{
+ netdev->netdev_ops = &mlx5e_netdev_ops_rep;
+
+ netdev->watchdog_timeo = 15 * HZ;
+
+ netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
+
+#ifdef CONFIG_NET_SWITCHDEV
+ netdev->switchdev_ops = &mlx5e_rep_switchdev_ops;
+#endif
+
+ netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC;
+ netdev->hw_features |= NETIF_F_HW_TC;
+
+ eth_hw_addr_random(netdev);
+}
+
+static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv)
+{
+ mlx5e_build_rep_netdev_priv(mdev, netdev, profile, ppriv);
+ mlx5e_build_rep_netdev(netdev);
+}
+
+static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_eswitch_rep *rep = priv->ppriv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_flow_rule *flow_rule;
+ int err;
+ int i;
+
+ err = mlx5e_create_direct_rqts(priv);
+ if (err) {
+ mlx5_core_warn(mdev, "create direct rqts failed, %d\n", err);
+ return err;
+ }
+
+ err = mlx5e_create_direct_tirs(priv);
+ if (err) {
+ mlx5_core_warn(mdev, "create direct tirs failed, %d\n", err);
+ goto err_destroy_direct_rqts;
+ }
+
+ flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
+ rep->vport,
+ priv->direct_tir[0].tirn);
+ if (IS_ERR(flow_rule)) {
+ err = PTR_ERR(flow_rule);
+ goto err_destroy_direct_tirs;
+ }
+ rep->vport_rx_rule = flow_rule;
+
+ err = mlx5e_tc_init(priv);
+ if (err)
+ goto err_del_flow_rule;
+
+ return 0;
+
+err_del_flow_rule:
+ mlx5_del_flow_rule(rep->vport_rx_rule);
+err_destroy_direct_tirs:
+ mlx5e_destroy_direct_tirs(priv);
+err_destroy_direct_rqts:
+ for (i = 0; i < priv->params.num_channels; i++)
+ mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
+ return err;
+}
+
+static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
+{
+ struct mlx5_eswitch_rep *rep = priv->ppriv;
+ int i;
+
+ mlx5e_tc_cleanup(priv);
+ mlx5_del_flow_rule(rep->vport_rx_rule);
+ mlx5e_destroy_direct_tirs(priv);
+ for (i = 0; i < priv->params.num_channels; i++)
+ mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
+}
+
+static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
+{
+ int err;
+
+ err = mlx5e_create_tises(priv);
+ if (err) {
+ mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
+ return err;
+ }
+ return 0;
+}
+
+static int mlx5e_get_rep_max_num_channels(struct mlx5_core_dev *mdev)
+{
+#define MLX5E_PORT_REPRESENTOR_NCH 1
+ return MLX5E_PORT_REPRESENTOR_NCH;
+}
+
+static struct mlx5e_profile mlx5e_rep_profile = {
+ .init = mlx5e_init_rep,
+ .init_rx = mlx5e_init_rep_rx,
+ .cleanup_rx = mlx5e_cleanup_rep_rx,
+ .init_tx = mlx5e_init_rep_tx,
+ .cleanup_tx = mlx5e_cleanup_nic_tx,
+ .update_stats = mlx5e_update_sw_rep_counters,
+ .max_nch = mlx5e_get_rep_max_num_channels,
+ .max_tc = 1,
+};
+
+int mlx5e_vport_rep_load(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep)
+{
+ rep->priv_data = mlx5e_create_netdev(esw->dev, &mlx5e_rep_profile, rep);
+ if (!rep->priv_data) {
+ pr_warn("Failed to create representor for vport %d\n",
+ rep->vport);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+void mlx5e_vport_rep_unload(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep)
+{
+ struct mlx5e_priv *priv = rep->priv_data;
+
+ mlx5e_destroy_netdev(esw->dev, priv);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index e41a06675..e7c969df3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -324,9 +324,9 @@ mlx5e_copy_skb_header_fragmented_mpwqe(struct device *pdev,
}
}
-static u16 mlx5e_get_wqe_mtt_offset(u16 rq_ix, u16 wqe_ix)
+static u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix)
{
- return rq_ix * MLX5_CHANNEL_MAX_NUM_MTTS +
+ return rq->mpwqe_mtt_offset +
wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8);
}
@@ -340,7 +340,7 @@ static void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
struct mlx5_wqe_data_seg *dseg = &wqe->data;
struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
- u16 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq->ix, ix);
+ u32 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq, ix);
memset(wqe, 0, sizeof(*wqe));
cseg->opmod_idx_opcode =
@@ -353,9 +353,9 @@ static void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN;
ucseg->klm_octowords =
- cpu_to_be16(mlx5e_get_mtt_octw(MLX5_MPWRQ_PAGES_PER_WQE));
+ cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
ucseg->bsf_octowords =
- cpu_to_be16(mlx5e_get_mtt_octw(umr_wqe_mtt_offset));
+ cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset));
ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
dseg->lkey = sq->mkey_be;
@@ -423,7 +423,7 @@ static int mlx5e_alloc_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
{
struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
int mtt_sz = mlx5e_get_wqe_mtt_sz();
- u32 dma_offset = mlx5e_get_wqe_mtt_offset(rq->ix, ix) << PAGE_SHIFT;
+ u64 dma_offset = (u64)mlx5e_get_wqe_mtt_offset(rq, ix) << PAGE_SHIFT;
int i;
wi->umr.dma_info = kmalloc(sizeof(*wi->umr.dma_info) *
@@ -506,6 +506,12 @@ void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq)
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
clear_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);
+
+ if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state))) {
+ mlx5e_free_rx_fragmented_mpwqe(rq, &rq->wqe_info[wq->head]);
+ return;
+ }
+
mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
rq->stats.mpwqe_frag++;
@@ -595,26 +601,9 @@ void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
wi->free_wqe(rq, wi);
}
-void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
-{
- struct mlx5_wq_ll *wq = &rq->wq;
- struct mlx5e_rx_wqe *wqe;
- __be16 wqe_ix_be;
- u16 wqe_ix;
-
- while (!mlx5_wq_ll_is_empty(wq)) {
- wqe_ix_be = *wq->tail_next;
- wqe_ix = be16_to_cpu(wqe_ix_be);
- wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
- rq->dealloc_wqe(rq, wqe_ix);
- mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
- &wqe->next.next_wqe_index);
- }
-}
-
#define RQ_CANNOT_POST(rq) \
- (!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state) || \
- test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
+ (test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state) || \
+ test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
{
@@ -924,7 +913,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
int work_done = 0;
- if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state)))
+ if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state)))
return 0;
if (cq->decmprs_left)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
new file mode 100644
index 000000000..1fffe48a9
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
@@ -0,0 +1,335 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "en.h"
+
+/* Adaptive moderation profiles */
+#define MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256
+#define MLX5E_RX_AM_DEF_PROFILE_CQE 1
+#define MLX5E_RX_AM_DEF_PROFILE_EQE 1
+#define MLX5E_PARAMS_AM_NUM_PROFILES 5
+
+/* All profiles sizes must be MLX5E_PARAMS_AM_NUM_PROFILES */
+#define MLX5_AM_EQE_PROFILES { \
+ {1, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {8, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {64, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {128, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {256, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+}
+
+#define MLX5_AM_CQE_PROFILES { \
+ {2, 256}, \
+ {8, 128}, \
+ {16, 64}, \
+ {32, 64}, \
+ {64, 64} \
+}
+
+static const struct mlx5e_cq_moder
+profile[MLX5_CQ_PERIOD_NUM_MODES][MLX5E_PARAMS_AM_NUM_PROFILES] = {
+ MLX5_AM_EQE_PROFILES,
+ MLX5_AM_CQE_PROFILES,
+};
+
+static inline struct mlx5e_cq_moder mlx5e_am_get_profile(u8 cq_period_mode, int ix)
+{
+ return profile[cq_period_mode][ix];
+}
+
+struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode)
+{
+ int default_profile_ix;
+
+ if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
+ default_profile_ix = MLX5E_RX_AM_DEF_PROFILE_CQE;
+ else /* MLX5_CQ_PERIOD_MODE_START_FROM_EQE */
+ default_profile_ix = MLX5E_RX_AM_DEF_PROFILE_EQE;
+
+ return profile[rx_cq_period_mode][default_profile_ix];
+}
+
+/* Adaptive moderation logic */
+enum {
+ MLX5E_AM_START_MEASURE,
+ MLX5E_AM_MEASURE_IN_PROGRESS,
+ MLX5E_AM_APPLY_NEW_PROFILE,
+};
+
+enum {
+ MLX5E_AM_PARKING_ON_TOP,
+ MLX5E_AM_PARKING_TIRED,
+ MLX5E_AM_GOING_RIGHT,
+ MLX5E_AM_GOING_LEFT,
+};
+
+enum {
+ MLX5E_AM_STATS_WORSE,
+ MLX5E_AM_STATS_SAME,
+ MLX5E_AM_STATS_BETTER,
+};
+
+enum {
+ MLX5E_AM_STEPPED,
+ MLX5E_AM_TOO_TIRED,
+ MLX5E_AM_ON_EDGE,
+};
+
+static bool mlx5e_am_on_top(struct mlx5e_rx_am *am)
+{
+ switch (am->tune_state) {
+ case MLX5E_AM_PARKING_ON_TOP:
+ case MLX5E_AM_PARKING_TIRED:
+ WARN_ONCE(true, "mlx5e_am_on_top: PARKING\n");
+ return true;
+ case MLX5E_AM_GOING_RIGHT:
+ return (am->steps_left > 1) && (am->steps_right == 1);
+ default: /* MLX5E_AM_GOING_LEFT */
+ return (am->steps_right > 1) && (am->steps_left == 1);
+ }
+}
+
+static void mlx5e_am_turn(struct mlx5e_rx_am *am)
+{
+ switch (am->tune_state) {
+ case MLX5E_AM_PARKING_ON_TOP:
+ case MLX5E_AM_PARKING_TIRED:
+ WARN_ONCE(true, "mlx5e_am_turn: PARKING\n");
+ break;
+ case MLX5E_AM_GOING_RIGHT:
+ am->tune_state = MLX5E_AM_GOING_LEFT;
+ am->steps_left = 0;
+ break;
+ case MLX5E_AM_GOING_LEFT:
+ am->tune_state = MLX5E_AM_GOING_RIGHT;
+ am->steps_right = 0;
+ break;
+ }
+}
+
+static int mlx5e_am_step(struct mlx5e_rx_am *am)
+{
+ if (am->tired == (MLX5E_PARAMS_AM_NUM_PROFILES * 2))
+ return MLX5E_AM_TOO_TIRED;
+
+ switch (am->tune_state) {
+ case MLX5E_AM_PARKING_ON_TOP:
+ case MLX5E_AM_PARKING_TIRED:
+ WARN_ONCE(true, "mlx5e_am_step: PARKING\n");
+ break;
+ case MLX5E_AM_GOING_RIGHT:
+ if (am->profile_ix == (MLX5E_PARAMS_AM_NUM_PROFILES - 1))
+ return MLX5E_AM_ON_EDGE;
+ am->profile_ix++;
+ am->steps_right++;
+ break;
+ case MLX5E_AM_GOING_LEFT:
+ if (am->profile_ix == 0)
+ return MLX5E_AM_ON_EDGE;
+ am->profile_ix--;
+ am->steps_left++;
+ break;
+ }
+
+ am->tired++;
+ return MLX5E_AM_STEPPED;
+}
+
+static void mlx5e_am_park_on_top(struct mlx5e_rx_am *am)
+{
+ am->steps_right = 0;
+ am->steps_left = 0;
+ am->tired = 0;
+ am->tune_state = MLX5E_AM_PARKING_ON_TOP;
+}
+
+static void mlx5e_am_park_tired(struct mlx5e_rx_am *am)
+{
+ am->steps_right = 0;
+ am->steps_left = 0;
+ am->tune_state = MLX5E_AM_PARKING_TIRED;
+}
+
+static void mlx5e_am_exit_parking(struct mlx5e_rx_am *am)
+{
+ am->tune_state = am->profile_ix ? MLX5E_AM_GOING_LEFT :
+ MLX5E_AM_GOING_RIGHT;
+ mlx5e_am_step(am);
+}
+
+static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr,
+ struct mlx5e_rx_am_stats *prev)
+{
+ int diff;
+
+ if (!prev->ppms)
+ return curr->ppms ? MLX5E_AM_STATS_BETTER :
+ MLX5E_AM_STATS_SAME;
+
+ diff = curr->ppms - prev->ppms;
+ if (((100 * abs(diff)) / prev->ppms) > 10) /* more than 10% diff */
+ return (diff > 0) ? MLX5E_AM_STATS_BETTER :
+ MLX5E_AM_STATS_WORSE;
+
+ if (!prev->epms)
+ return curr->epms ? MLX5E_AM_STATS_WORSE :
+ MLX5E_AM_STATS_SAME;
+
+ diff = curr->epms - prev->epms;
+ if (((100 * abs(diff)) / prev->epms) > 10) /* more than 10% diff */
+ return (diff < 0) ? MLX5E_AM_STATS_BETTER :
+ MLX5E_AM_STATS_WORSE;
+
+ return MLX5E_AM_STATS_SAME;
+}
+
+static bool mlx5e_am_decision(struct mlx5e_rx_am_stats *curr_stats,
+ struct mlx5e_rx_am *am)
+{
+ int prev_state = am->tune_state;
+ int prev_ix = am->profile_ix;
+ int stats_res;
+ int step_res;
+
+ switch (am->tune_state) {
+ case MLX5E_AM_PARKING_ON_TOP:
+ stats_res = mlx5e_am_stats_compare(curr_stats, &am->prev_stats);
+ if (stats_res != MLX5E_AM_STATS_SAME)
+ mlx5e_am_exit_parking(am);
+ break;
+
+ case MLX5E_AM_PARKING_TIRED:
+ am->tired--;
+ if (!am->tired)
+ mlx5e_am_exit_parking(am);
+ break;
+
+ case MLX5E_AM_GOING_RIGHT:
+ case MLX5E_AM_GOING_LEFT:
+ stats_res = mlx5e_am_stats_compare(curr_stats, &am->prev_stats);
+ if (stats_res != MLX5E_AM_STATS_BETTER)
+ mlx5e_am_turn(am);
+
+ if (mlx5e_am_on_top(am)) {
+ mlx5e_am_park_on_top(am);
+ break;
+ }
+
+ step_res = mlx5e_am_step(am);
+ switch (step_res) {
+ case MLX5E_AM_ON_EDGE:
+ mlx5e_am_park_on_top(am);
+ break;
+ case MLX5E_AM_TOO_TIRED:
+ mlx5e_am_park_tired(am);
+ break;
+ }
+
+ break;
+ }
+
+ if ((prev_state != MLX5E_AM_PARKING_ON_TOP) ||
+ (am->tune_state != MLX5E_AM_PARKING_ON_TOP))
+ am->prev_stats = *curr_stats;
+
+ return am->profile_ix != prev_ix;
+}
+
+static void mlx5e_am_sample(struct mlx5e_rq *rq,
+ struct mlx5e_rx_am_sample *s)
+{
+ s->time = ktime_get();
+ s->pkt_ctr = rq->stats.packets;
+ s->event_ctr = rq->cq.event_ctr;
+}
+
+#define MLX5E_AM_NEVENTS 64
+
+static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start,
+ struct mlx5e_rx_am_sample *end,
+ struct mlx5e_rx_am_stats *curr_stats)
+{
+ /* u32 holds up to 71 minutes, should be enough */
+ u32 delta_us = ktime_us_delta(end->time, start->time);
+ unsigned int npkts = end->pkt_ctr - start->pkt_ctr;
+
+ if (!delta_us) {
+ WARN_ONCE(true, "mlx5e_am_calc_stats: delta_us=0\n");
+ return;
+ }
+
+ curr_stats->ppms = (npkts * USEC_PER_MSEC) / delta_us;
+ curr_stats->epms = (MLX5E_AM_NEVENTS * USEC_PER_MSEC) / delta_us;
+}
+
+void mlx5e_rx_am_work(struct work_struct *work)
+{
+ struct mlx5e_rx_am *am = container_of(work, struct mlx5e_rx_am,
+ work);
+ struct mlx5e_rq *rq = container_of(am, struct mlx5e_rq, am);
+ struct mlx5e_cq_moder cur_profile = profile[am->mode][am->profile_ix];
+
+ mlx5_core_modify_cq_moderation(rq->priv->mdev, &rq->cq.mcq,
+ cur_profile.usec, cur_profile.pkts);
+
+ am->state = MLX5E_AM_START_MEASURE;
+}
+
+void mlx5e_rx_am(struct mlx5e_rq *rq)
+{
+ struct mlx5e_rx_am *am = &rq->am;
+ struct mlx5e_rx_am_sample end_sample;
+ struct mlx5e_rx_am_stats curr_stats;
+ u16 nevents;
+
+ switch (am->state) {
+ case MLX5E_AM_MEASURE_IN_PROGRESS:
+ nevents = rq->cq.event_ctr - am->start_sample.event_ctr;
+ if (nevents < MLX5E_AM_NEVENTS)
+ break;
+ mlx5e_am_sample(rq, &end_sample);
+ mlx5e_am_calc_stats(&am->start_sample, &end_sample,
+ &curr_stats);
+ if (mlx5e_am_decision(&curr_stats, am)) {
+ am->state = MLX5E_AM_APPLY_NEW_PROFILE;
+ schedule_work(&am->work);
+ break;
+ }
+ /* fall through */
+ case MLX5E_AM_START_MEASURE:
+ mlx5e_am_sample(rq, &am->start_sample);
+ am->state = MLX5E_AM_MEASURE_IN_PROGRESS;
+ break;
+ case MLX5E_AM_APPLY_NEW_PROFILE:
+ break;
+ }
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index fcd490cc5..499487ce3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -70,6 +70,7 @@ struct mlx5e_sw_stats {
u64 tx_queue_stopped;
u64 tx_queue_wake;
u64 tx_queue_dropped;
+ u64 tx_xmit_more;
u64 rx_wqe_err;
u64 rx_mpwqe_filler;
u64 rx_mpwqe_frag;
@@ -101,6 +102,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_frag) },
@@ -151,6 +153,22 @@ static const struct counter_desc vport_stats_desc[] = {
VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
{ "tx_vport_broadcast_bytes",
VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
+ { "rx_vport_rdma_unicast_packets",
+ VPORT_COUNTER_OFF(received_ib_unicast.packets) },
+ { "rx_vport_rdma_unicast_bytes",
+ VPORT_COUNTER_OFF(received_ib_unicast.octets) },
+ { "tx_vport_rdma_unicast_packets",
+ VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
+ { "tx_vport_rdma_unicast_bytes",
+ VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
+ { "rx_vport_rdma_multicast_packets",
+ VPORT_COUNTER_OFF(received_ib_multicast.packets) },
+ { "rx_vport_rdma_multicast_bytes",
+ VPORT_COUNTER_OFF(received_ib_multicast.octets) },
+ { "tx_vport_rdma_multicast_packets",
+ VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
+ { "tx_vport_rdma_multicast_bytes",
+ VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
};
#define PPORT_802_3_OFF(c) \
@@ -238,11 +256,12 @@ static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
};
static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
- { "rx_prio%d_pause", PPORT_PER_PRIO_OFF(rx_pause) },
- { "rx_prio%d_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
- { "tx_prio%d_pause", PPORT_PER_PRIO_OFF(tx_pause) },
- { "tx_prio%d_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
- { "rx_prio%d_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
+ /* %s is "global" or "prio{i}" */
+ { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
+ { "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
+ { "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
+ { "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
+ { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
};
struct mlx5e_rq_stats {
@@ -281,6 +300,7 @@ struct mlx5e_sq_stats {
/* commonly accessed in data path */
u64 packets;
u64 bytes;
+ u64 xmit_more;
u64 tso_packets;
u64 tso_bytes;
u64 tso_inner_packets;
@@ -307,6 +327,7 @@ static const struct counter_desc sq_stats_desc[] = {
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
};
#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 0db51cc39..22cfc4ac1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -37,8 +37,11 @@
#include <linux/mlx5/fs.h>
#include <linux/mlx5/device.h>
#include <linux/rhashtable.h>
+#include <net/switchdev.h>
+#include <net/tc_act/tc_mirred.h>
#include "en.h"
#include "en_tc.h"
+#include "eswitch.h"
struct mlx5e_tc_flow {
struct rhash_head node;
@@ -49,9 +52,9 @@ struct mlx5e_tc_flow {
#define MLX5E_TC_TABLE_NUM_ENTRIES 1024
#define MLX5E_TC_TABLE_NUM_GROUPS 4
-static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv,
- u32 *match_c, u32 *match_v,
- u32 action, u32 flow_tag)
+static struct mlx5_flow_rule *mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ u32 action, u32 flow_tag)
{
struct mlx5_core_dev *dev = priv->mdev;
struct mlx5_flow_destination dest = { 0 };
@@ -62,7 +65,7 @@ static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv,
if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = priv->fs.vlan.ft.t;
- } else {
+ } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
counter = mlx5_fc_create(dev, true);
if (IS_ERR(counter))
return ERR_CAST(counter);
@@ -88,8 +91,8 @@ static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv,
table_created = true;
}
- rule = mlx5_add_flow_rule(priv->fs.tc.t, MLX5_MATCH_OUTER_HEADERS,
- match_c, match_v,
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ rule = mlx5_add_flow_rule(priv->fs.tc.t, spec,
action, flow_tag,
&dest);
@@ -109,6 +112,22 @@ err_create_ft:
return rule;
}
+static struct mlx5_flow_rule *mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ u32 action, u32 dst_vport)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_eswitch_rep *rep = priv->ppriv;
+ u32 src_vport;
+
+ if (rep->vport) /* set source vport for the flow */
+ src_vport = rep->vport;
+ else
+ src_vport = FDB_UPLINK_VPORT;
+
+ return mlx5_eswitch_add_offloaded_rule(esw, spec, action, src_vport, dst_vport);
+}
+
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
struct mlx5_flow_rule *rule)
{
@@ -120,18 +139,19 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
mlx5_fc_destroy(priv->mdev, counter);
- if (!mlx5e_tc_num_filters(priv)) {
+ if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
mlx5_destroy_flow_table(priv->fs.tc.t);
priv->fs.tc.t = NULL;
}
}
-static int parse_cls_flower(struct mlx5e_priv *priv,
- u32 *match_c, u32 *match_v,
+static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
struct tc_cls_flower_offload *f)
{
- void *headers_c = MLX5_ADDR_OF(fte_match_param, match_c, outer_headers);
- void *headers_v = MLX5_ADDR_OF(fte_match_param, match_v, outer_headers);
+ void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers);
+ void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers);
u16 addr_type = 0;
u8 ip_proto = 0;
@@ -294,10 +314,11 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
return 0;
}
-static int parse_tc_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
- u32 *action, u32 *flow_tag)
+static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
+ u32 *action, u32 *flow_tag)
{
const struct tc_action *a;
+ LIST_HEAD(actions);
if (tc_no_actions(exts))
return -EINVAL;
@@ -305,7 +326,8 @@ static int parse_tc_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
*flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
*action = 0;
- tc_for_each_action(a, exts) {
+ tcf_exts_to_list(exts, &actions);
+ list_for_each_entry(a, &actions, list) {
/* Only support a single action per rule */
if (*action)
return -EINVAL;
@@ -338,17 +360,68 @@ static int parse_tc_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
return 0;
}
+static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
+ u32 *action, u32 *dest_vport)
+{
+ const struct tc_action *a;
+ LIST_HEAD(actions);
+
+ if (tc_no_actions(exts))
+ return -EINVAL;
+
+ *action = 0;
+
+ tcf_exts_to_list(exts, &actions);
+ list_for_each_entry(a, &actions, list) {
+ /* Only support a single action per rule */
+ if (*action)
+ return -EINVAL;
+
+ if (is_tcf_gact_shot(a)) {
+ *action = MLX5_FLOW_CONTEXT_ACTION_DROP |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ continue;
+ }
+
+ if (is_tcf_mirred_redirect(a)) {
+ int ifindex = tcf_mirred_ifindex(a);
+ struct net_device *out_dev;
+ struct mlx5e_priv *out_priv;
+ struct mlx5_eswitch_rep *out_rep;
+
+ out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
+
+ if (!switchdev_port_same_parent_id(priv->netdev, out_dev)) {
+ pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
+ priv->netdev->name, out_dev->name);
+ return -EINVAL;
+ }
+
+ out_priv = netdev_priv(out_dev);
+ out_rep = out_priv->ppriv;
+ if (out_rep->vport == 0)
+ *dest_vport = FDB_UPLINK_VPORT;
+ else
+ *dest_vport = out_rep->vport;
+ *action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ continue;
+ }
+
+ return -EINVAL;
+ }
+ return 0;
+}
+
int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
struct tc_cls_flower_offload *f)
{
struct mlx5e_tc_table *tc = &priv->fs.tc;
- u32 *match_c;
- u32 *match_v;
int err = 0;
- u32 flow_tag;
- u32 action;
+ u32 flow_tag, action, dest_vport = 0;
struct mlx5e_tc_flow *flow;
+ struct mlx5_flow_spec *spec;
struct mlx5_flow_rule *old = NULL;
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
tc->ht_params);
@@ -357,49 +430,53 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
else
flow = kzalloc(sizeof(*flow), GFP_KERNEL);
- match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
- match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
- if (!match_c || !match_v || !flow) {
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec || !flow) {
err = -ENOMEM;
goto err_free;
}
flow->cookie = f->cookie;
- err = parse_cls_flower(priv, match_c, match_v, f);
+ err = parse_cls_flower(priv, spec, f);
if (err < 0)
goto err_free;
- err = parse_tc_actions(priv, f->exts, &action, &flow_tag);
- if (err < 0)
+ if (esw && esw->mode == SRIOV_OFFLOADS) {
+ err = parse_tc_fdb_actions(priv, f->exts, &action, &dest_vport);
+ if (err < 0)
+ goto err_free;
+ flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, action, dest_vport);
+ } else {
+ err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag);
+ if (err < 0)
+ goto err_free;
+ flow->rule = mlx5e_tc_add_nic_flow(priv, spec, action, flow_tag);
+ }
+
+ if (IS_ERR(flow->rule)) {
+ err = PTR_ERR(flow->rule);
goto err_free;
+ }
err = rhashtable_insert_fast(&tc->ht, &flow->node,
tc->ht_params);
if (err)
- goto err_free;
-
- flow->rule = mlx5e_tc_add_flow(priv, match_c, match_v, action,
- flow_tag);
- if (IS_ERR(flow->rule)) {
- err = PTR_ERR(flow->rule);
- goto err_hash_del;
- }
+ goto err_del_rule;
if (old)
mlx5e_tc_del_flow(priv, old);
goto out;
-err_hash_del:
- rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
+err_del_rule:
+ mlx5_del_flow_rule(flow->rule);
err_free:
if (!old)
kfree(flow);
out:
- kfree(match_c);
- kfree(match_v);
+ kvfree(spec);
return err;
}
@@ -430,6 +507,7 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow;
struct tc_action *a;
struct mlx5_fc *counter;
+ LIST_HEAD(actions);
u64 bytes;
u64 packets;
u64 lastuse;
@@ -445,7 +523,8 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
- tc_for_each_action(a, f->exts)
+ tcf_exts_to_list(f->exts, &actions);
+ list_for_each_entry(a, &actions, list)
tcf_action_stats_update(a, bytes, packets, lastuse);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 5740b465e..eb0e72537 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -128,6 +128,50 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
return priv->channeltc_to_txq_map[channel_ix][up];
}
+static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
+{
+#define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
+
+ return max(skb_network_offset(skb), MLX5E_MIN_INLINE);
+}
+
+static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb)
+{
+ struct flow_keys keys;
+
+ if (skb_transport_header_was_set(skb))
+ return skb_transport_offset(skb);
+ else if (skb_flow_dissect_flow_keys(skb, &keys, 0))
+ return keys.control.thoff;
+ else
+ return mlx5e_skb_l2_header_offset(skb);
+}
+
+static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
+ struct sk_buff *skb)
+{
+ int hlen;
+
+ switch (mode) {
+ case MLX5_INLINE_MODE_TCP_UDP:
+ hlen = eth_get_headlen(skb->data, skb_headlen(skb));
+ if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb))
+ hlen += VLAN_HLEN;
+ return hlen;
+ case MLX5_INLINE_MODE_IP:
+ /* When transport header is set to zero, it means no transport
+ * header. When transport header is set to 0xff's, it means
+ * transport header wasn't set.
+ */
+ if (skb_transport_offset(skb))
+ return mlx5e_skb_l3_header_offset(skb);
+ /* fall through */
+ case MLX5_INLINE_MODE_L2:
+ default:
+ return mlx5e_skb_l2_header_offset(skb);
+ }
+}
+
static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
struct sk_buff *skb, bool bf)
{
@@ -135,8 +179,6 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
* headers and occur before the data gather.
* Therefore these headers must be copied into the WQE
*/
-#define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
-
if (bf) {
u16 ihs = skb_headlen(skb);
@@ -146,8 +188,7 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
if (ihs <= sq->max_inline)
return skb_headlen(skb);
}
-
- return max(skb_network_offset(skb), MLX5E_MIN_INLINE);
+ return mlx5e_calc_min_inline(sq->min_inline_mode, skb);
}
static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
@@ -315,6 +356,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
sq->stats.stopped++;
}
+ sq->stats.xmit_more += skb->xmit_more;
if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
int bf_sz = 0;
@@ -353,35 +395,6 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
return mlx5e_sq_xmit(sq, skb);
}
-void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
-{
- struct mlx5e_tx_wqe_info *wi;
- struct sk_buff *skb;
- u16 ci;
- int i;
-
- while (sq->cc != sq->pc) {
- ci = sq->cc & sq->wq.sz_m1;
- skb = sq->skb[ci];
- wi = &sq->wqe_info[ci];
-
- if (!skb) { /* nop */
- sq->cc++;
- continue;
- }
-
- for (i = 0; i < wi->num_dma; i++) {
- struct mlx5e_sq_dma *dma =
- mlx5e_dma_get(sq, sq->dma_fifo_cc++);
-
- mlx5e_tx_dma_unmap(sq->pdev, dma);
- }
-
- dev_kfree_skb_any(skb);
- sq->cc += wi->num_wqebbs;
- }
-}
-
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
{
struct mlx5e_sq *sq;
@@ -393,7 +406,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
sq = container_of(cq, struct mlx5e_sq, cq);
- if (unlikely(test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)))
+ if (unlikely(test_bit(MLX5E_SQ_STATE_FLUSH, &sq->state)))
return false;
npkts = 0;
@@ -471,11 +484,39 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
netdev_tx_completed_queue(sq->txq, npkts, nbytes);
if (netif_tx_queue_stopped(sq->txq) &&
- mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM) &&
- likely(test_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state))) {
- netif_tx_wake_queue(sq->txq);
- sq->stats.wake++;
+ mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM)) {
+ netif_tx_wake_queue(sq->txq);
+ sq->stats.wake++;
}
return (i == MLX5E_TX_CQ_POLL_BUDGET);
}
+
+void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
+{
+ struct mlx5e_tx_wqe_info *wi;
+ struct sk_buff *skb;
+ u16 ci;
+ int i;
+
+ while (sq->cc != sq->pc) {
+ ci = sq->cc & sq->wq.sz_m1;
+ skb = sq->skb[ci];
+ wi = &sq->wqe_info[ci];
+
+ if (!skb) { /* nop */
+ sq->cc++;
+ continue;
+ }
+
+ for (i = 0; i < wi->num_dma; i++) {
+ struct mlx5e_sq_dma *dma =
+ mlx5e_dma_get(sq, sq->dma_fifo_cc++);
+
+ mlx5e_tx_dma_unmap(sq->pdev, dma);
+ }
+
+ dev_kfree_skb_any(skb);
+ sq->cc += wi->num_wqebbs;
+ }
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index c38781fa5..9bf33bb69 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -51,16 +51,18 @@ struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq)
static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
{
+ struct mlx5e_sq *sq = container_of(cq, struct mlx5e_sq, cq);
struct mlx5_wq_cyc *wq;
struct mlx5_cqe64 *cqe;
- struct mlx5e_sq *sq;
u16 sqcc;
+ if (unlikely(test_bit(MLX5E_SQ_STATE_FLUSH, &sq->state)))
+ return;
+
cqe = mlx5e_get_cqe(cq);
if (likely(!cqe))
return;
- sq = container_of(cq, struct mlx5e_sq, cq);
wq = &sq->wq;
/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
@@ -136,6 +138,10 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
for (i = 0; i < c->num_tc; i++)
mlx5e_cq_arm(&c->sq[i].cq);
+
+ if (test_bit(MLX5E_RQ_STATE_AM, &c->rq.state))
+ mlx5e_rx_am(&c->rq);
+
mlx5e_cq_arm(&c->rq.cq);
mlx5e_cq_arm(&c->icosq.cq);
@@ -146,6 +152,7 @@ void mlx5e_completion_event(struct mlx5_core_cq *mcq)
{
struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
+ cq->event_ctr++;
set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags);
napi_schedule(cq->napi);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index aebbd6ccb..b247949df 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -40,17 +40,6 @@
#define UPLINK_VPORT 0xFFFF
-#define MLX5_DEBUG_ESWITCH_MASK BIT(3)
-
-#define esw_info(dev, format, ...) \
- pr_info("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
-
-#define esw_warn(dev, format, ...) \
- pr_warn("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
-
-#define esw_debug(dev, format, ...) \
- mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
-
enum {
MLX5_ACTION_NONE = 0,
MLX5_ACTION_ADD = 1,
@@ -92,6 +81,9 @@ enum {
MC_ADDR_CHANGE | \
PROMISC_CHANGE)
+int esw_offloads_init(struct mlx5_eswitch *esw, int nvports);
+void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports);
+
static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
u32 events_mask)
{
@@ -337,25 +329,23 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
MLX5_MATCH_OUTER_HEADERS);
struct mlx5_flow_rule *flow_rule = NULL;
struct mlx5_flow_destination dest;
+ struct mlx5_flow_spec *spec;
void *mv_misc = NULL;
void *mc_misc = NULL;
u8 *dmac_v = NULL;
u8 *dmac_c = NULL;
- u32 *match_v;
- u32 *match_c;
if (rx_rule)
match_header |= MLX5_MATCH_MISC_PARAMETERS;
- match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
- match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
- if (!match_v || !match_c) {
+
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec) {
pr_warn("FDB: Failed to alloc match parameters\n");
- goto out;
+ return NULL;
}
-
- dmac_v = MLX5_ADDR_OF(fte_match_param, match_v,
+ dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.dmac_47_16);
- dmac_c = MLX5_ADDR_OF(fte_match_param, match_c,
+ dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers.dmac_47_16);
if (match_header & MLX5_MATCH_OUTER_HEADERS) {
@@ -364,8 +354,10 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
}
if (match_header & MLX5_MATCH_MISC_PARAMETERS) {
- mv_misc = MLX5_ADDR_OF(fte_match_param, match_v, misc_parameters);
- mc_misc = MLX5_ADDR_OF(fte_match_param, match_c, misc_parameters);
+ mv_misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters);
+ mc_misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters);
MLX5_SET(fte_match_set_misc, mv_misc, source_port, UPLINK_VPORT);
MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
}
@@ -376,11 +368,9 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
esw_debug(esw->dev,
"\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
dmac_v, dmac_c, vport);
+ spec->match_criteria_enable = match_header;
flow_rule =
- mlx5_add_flow_rule(esw->fdb_table.fdb,
- match_header,
- match_c,
- match_v,
+ mlx5_add_flow_rule(esw->fdb_table.fdb, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
0, &dest);
if (IS_ERR(flow_rule)) {
@@ -389,9 +379,8 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
flow_rule = NULL;
}
-out:
- kfree(match_v);
- kfree(match_c);
+
+ kvfree(spec);
return flow_rule;
}
@@ -428,7 +417,7 @@ esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport)
return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
}
-static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
+static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_core_dev *dev = esw->dev;
@@ -479,7 +468,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
esw_warn(dev, "Failed to create flow group err(%d)\n", err);
goto out;
}
- esw->fdb_table.addr_grp = g;
+ esw->fdb_table.legacy.addr_grp = g;
/* Allmulti group : One rule that forwards any mcast traffic */
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
@@ -494,7 +483,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
goto out;
}
- esw->fdb_table.allmulti_grp = g;
+ esw->fdb_table.legacy.allmulti_grp = g;
/* Promiscuous group :
* One rule that forward all unmatched traffic from previous groups
@@ -511,17 +500,17 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
goto out;
}
- esw->fdb_table.promisc_grp = g;
+ esw->fdb_table.legacy.promisc_grp = g;
out:
if (err) {
- if (!IS_ERR_OR_NULL(esw->fdb_table.allmulti_grp)) {
- mlx5_destroy_flow_group(esw->fdb_table.allmulti_grp);
- esw->fdb_table.allmulti_grp = NULL;
+ if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.allmulti_grp)) {
+ mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
+ esw->fdb_table.legacy.allmulti_grp = NULL;
}
- if (!IS_ERR_OR_NULL(esw->fdb_table.addr_grp)) {
- mlx5_destroy_flow_group(esw->fdb_table.addr_grp);
- esw->fdb_table.addr_grp = NULL;
+ if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.addr_grp)) {
+ mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
+ esw->fdb_table.legacy.addr_grp = NULL;
}
if (!IS_ERR_OR_NULL(esw->fdb_table.fdb)) {
mlx5_destroy_flow_table(esw->fdb_table.fdb);
@@ -533,20 +522,20 @@ out:
return err;
}
-static void esw_destroy_fdb_table(struct mlx5_eswitch *esw)
+static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
{
if (!esw->fdb_table.fdb)
return;
esw_debug(esw->dev, "Destroy FDB Table\n");
- mlx5_destroy_flow_group(esw->fdb_table.promisc_grp);
- mlx5_destroy_flow_group(esw->fdb_table.allmulti_grp);
- mlx5_destroy_flow_group(esw->fdb_table.addr_grp);
+ mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
+ mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
+ mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
mlx5_destroy_flow_table(esw->fdb_table.fdb);
esw->fdb_table.fdb = NULL;
- esw->fdb_table.addr_grp = NULL;
- esw->fdb_table.allmulti_grp = NULL;
- esw->fdb_table.promisc_grp = NULL;
+ esw->fdb_table.legacy.addr_grp = NULL;
+ esw->fdb_table.legacy.allmulti_grp = NULL;
+ esw->fdb_table.legacy.promisc_grp = NULL;
}
/* E-Switch vport UC/MC lists management */
@@ -578,7 +567,8 @@ static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
if (err)
goto abort;
- if (esw->fdb_table.fdb) /* SRIOV is enabled: Forward UC MAC to vport */
+ /* SRIOV is enabled: Forward UC MAC to vport */
+ if (esw->fdb_table.fdb && esw->mode == SRIOV_LEGACY)
vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM index:%d fr(%p)\n",
@@ -1300,9 +1290,8 @@ static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
+ struct mlx5_flow_spec *spec;
u8 smac[ETH_ALEN];
- u32 *match_v;
- u32 *match_c;
int err = 0;
u8 *smac_v;
@@ -1336,9 +1325,8 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
"vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
vport->vport, vport->vlan, vport->qos);
- match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
- match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
- if (!match_v || !match_c) {
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec) {
err = -ENOMEM;
esw_warn(esw->dev, "vport[%d] configure ingress rules failed, err(%d)\n",
vport->vport, err);
@@ -1346,22 +1334,20 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
}
if (vport->vlan || vport->qos)
- MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.vlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
if (vport->spoofchk) {
- MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.smac_47_16);
- MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.smac_15_0);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0);
smac_v = MLX5_ADDR_OF(fte_match_param,
- match_v,
+ spec->match_value,
outer_headers.smac_47_16);
ether_addr_copy(smac_v, smac);
}
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
vport->ingress.allow_rule =
- mlx5_add_flow_rule(vport->ingress.acl,
- MLX5_MATCH_OUTER_HEADERS,
- match_c,
- match_v,
+ mlx5_add_flow_rule(vport->ingress.acl, spec,
MLX5_FLOW_CONTEXT_ACTION_ALLOW,
0, NULL);
if (IS_ERR(vport->ingress.allow_rule)) {
@@ -1372,13 +1358,9 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
goto out;
}
- memset(match_c, 0, MLX5_ST_SZ_BYTES(fte_match_param));
- memset(match_v, 0, MLX5_ST_SZ_BYTES(fte_match_param));
+ memset(spec, 0, sizeof(*spec));
vport->ingress.drop_rule =
- mlx5_add_flow_rule(vport->ingress.acl,
- 0,
- match_c,
- match_v,
+ mlx5_add_flow_rule(vport->ingress.acl, spec,
MLX5_FLOW_CONTEXT_ACTION_DROP,
0, NULL);
if (IS_ERR(vport->ingress.drop_rule)) {
@@ -1392,17 +1374,14 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
out:
if (err)
esw_vport_cleanup_ingress_rules(esw, vport);
-
- kfree(match_v);
- kfree(match_c);
+ kvfree(spec);
return err;
}
static int esw_vport_egress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
- u32 *match_v;
- u32 *match_c;
+ struct mlx5_flow_spec *spec;
int err = 0;
esw_vport_cleanup_egress_rules(esw, vport);
@@ -1418,9 +1397,8 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
"vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
vport->vport, vport->vlan, vport->qos);
- match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
- match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
- if (!match_v || !match_c) {
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec) {
err = -ENOMEM;
esw_warn(esw->dev, "vport[%d] configure egress rules failed, err(%d)\n",
vport->vport, err);
@@ -1428,16 +1406,14 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
}
/* Allowed vlan rule */
- MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.vlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, match_v, outer_headers.vlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.first_vid);
- MLX5_SET(fte_match_param, match_v, outer_headers.first_vid, vport->vlan);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.vlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->vlan);
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
vport->egress.allowed_vlan =
- mlx5_add_flow_rule(vport->egress.acl,
- MLX5_MATCH_OUTER_HEADERS,
- match_c,
- match_v,
+ mlx5_add_flow_rule(vport->egress.acl, spec,
MLX5_FLOW_CONTEXT_ACTION_ALLOW,
0, NULL);
if (IS_ERR(vport->egress.allowed_vlan)) {
@@ -1449,13 +1425,9 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
}
/* Drop others rule (star rule) */
- memset(match_c, 0, MLX5_ST_SZ_BYTES(fte_match_param));
- memset(match_v, 0, MLX5_ST_SZ_BYTES(fte_match_param));
+ memset(spec, 0, sizeof(*spec));
vport->egress.drop_rule =
- mlx5_add_flow_rule(vport->egress.acl,
- 0,
- match_c,
- match_v,
+ mlx5_add_flow_rule(vport->egress.acl, spec,
MLX5_FLOW_CONTEXT_ACTION_DROP,
0, NULL);
if (IS_ERR(vport->egress.drop_rule)) {
@@ -1465,8 +1437,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
vport->egress.drop_rule = NULL;
}
out:
- kfree(match_v);
- kfree(match_c);
+ kvfree(spec);
return err;
}
@@ -1480,7 +1451,8 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
- if (vport_num) { /* Only VFs need ACLs for VST and spoofchk filtering */
+ /* Only VFs need ACLs for VST and spoofchk filtering */
+ if (vport_num && esw->mode == SRIOV_LEGACY) {
esw_vport_ingress_config(esw, vport);
esw_vport_egress_config(esw, vport);
}
@@ -1531,7 +1503,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
*/
esw_vport_change_handle_locked(vport);
vport->enabled_events = 0;
- if (vport_num) {
+ if (vport_num && esw->mode == SRIOV_LEGACY) {
esw_vport_disable_egress_acl(esw, vport);
esw_vport_disable_ingress_acl(esw, vport);
}
@@ -1540,10 +1512,10 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
}
/* Public E-Switch API */
-int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs)
+int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
{
int err;
- int i;
+ int i, enabled_events;
if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
@@ -1561,16 +1533,20 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs)
if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
esw_warn(esw->dev, "E-Switch engress ACL is not supported by FW\n");
- esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d)\n", nvfs);
-
+ esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode);
+ esw->mode = mode;
esw_disable_vport(esw, 0);
- err = esw_create_fdb_table(esw, nvfs + 1);
+ if (mode == SRIOV_LEGACY)
+ err = esw_create_legacy_fdb_table(esw, nvfs + 1);
+ else
+ err = esw_offloads_init(esw, nvfs + 1);
if (err)
goto abort;
+ enabled_events = (mode == SRIOV_LEGACY) ? SRIOV_VPORT_EVENTS : UC_ADDR_CHANGE;
for (i = 0; i <= nvfs; i++)
- esw_enable_vport(esw, i, SRIOV_VPORT_EVENTS);
+ esw_enable_vport(esw, i, enabled_events);
esw_info(esw->dev, "SRIOV enabled: active vports(%d)\n",
esw->enabled_vports);
@@ -1578,22 +1554,25 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs)
abort:
esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
+ esw->mode = SRIOV_NONE;
return err;
}
void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
{
struct esw_mc_addr *mc_promisc;
+ int nvports;
int i;
if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
return;
- esw_info(esw->dev, "disable SRIOV: active vports(%d)\n",
- esw->enabled_vports);
+ esw_info(esw->dev, "disable SRIOV: active vports(%d) mode(%d)\n",
+ esw->enabled_vports, esw->mode);
mc_promisc = esw->mc_promisc;
+ nvports = esw->enabled_vports;
for (i = 0; i < esw->total_vports; i++)
esw_disable_vport(esw, i);
@@ -1601,8 +1580,12 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
if (mc_promisc && mc_promisc->uplink_rule)
mlx5_del_flow_rule(mc_promisc->uplink_rule);
- esw_destroy_fdb_table(esw);
+ if (esw->mode == SRIOV_LEGACY)
+ esw_destroy_legacy_fdb_table(esw);
+ else if (esw->mode == SRIOV_OFFLOADS)
+ esw_offloads_cleanup(esw, nvports);
+ esw->mode = SRIOV_NONE;
/* VPORT 0 (PF) must be enabled back with non-sriov configuration */
esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
}
@@ -1660,6 +1643,14 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
goto abort;
}
+ esw->offloads.vport_reps =
+ kzalloc(total_vports * sizeof(struct mlx5_eswitch_rep),
+ GFP_KERNEL);
+ if (!esw->offloads.vport_reps) {
+ err = -ENOMEM;
+ goto abort;
+ }
+
mutex_init(&esw->state_lock);
for (vport_num = 0; vport_num < total_vports; vport_num++) {
@@ -1673,6 +1664,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
esw->total_vports = total_vports;
esw->enabled_vports = 0;
+ esw->mode = SRIOV_NONE;
dev->priv.eswitch = esw;
esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
@@ -1683,6 +1675,7 @@ abort:
destroy_workqueue(esw->work_queue);
kfree(esw->l2_table.bitmap);
kfree(esw->vports);
+ kfree(esw->offloads.vport_reps);
kfree(esw);
return err;
}
@@ -1700,6 +1693,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
destroy_workqueue(esw->work_queue);
kfree(esw->l2_table.bitmap);
kfree(esw->mc_promisc);
+ kfree(esw->offloads.vport_reps);
kfree(esw->vports);
kfree(esw);
}
@@ -1775,7 +1769,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
vport, err);
mutex_lock(&esw->state_lock);
- if (evport->enabled)
+ if (evport->enabled && esw->mode == SRIOV_LEGACY)
err = esw_vport_ingress_config(esw, evport);
mutex_unlock(&esw->state_lock);
return err;
@@ -1847,7 +1841,7 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
mutex_lock(&esw->state_lock);
evport->vlan = vlan;
evport->qos = qos;
- if (evport->enabled) {
+ if (evport->enabled && esw->mode == SRIOV_LEGACY) {
err = esw_vport_ingress_config(esw, evport);
if (err)
goto out;
@@ -1876,10 +1870,11 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
mutex_lock(&esw->state_lock);
pschk = evport->spoofchk;
evport->spoofchk = spoofchk;
- if (evport->enabled)
+ if (evport->enabled && esw->mode == SRIOV_LEGACY) {
err = esw_vport_ingress_config(esw, evport);
- if (err)
- evport->spoofchk = pschk;
+ if (err)
+ evport->spoofchk = pschk;
+ }
mutex_unlock(&esw->state_lock);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index fd6800256..a96140971 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -35,6 +35,7 @@
#include <linux/if_ether.h>
#include <linux/if_link.h>
+#include <net/devlink.h>
#include <linux/mlx5/device.h>
#define MLX5_MAX_UC_PER_VPORT(dev) \
@@ -46,6 +47,8 @@
#define MLX5_L2_ADDR_HASH_SIZE (BIT(BITS_PER_BYTE))
#define MLX5_L2_ADDR_HASH(addr) (addr[5])
+#define FDB_UPLINK_VPORT 0xffff
+
/* L2 -mac address based- hash helpers */
struct l2addr_node {
struct hlist_node hlist;
@@ -134,9 +137,50 @@ struct mlx5_l2_table {
struct mlx5_eswitch_fdb {
void *fdb;
- struct mlx5_flow_group *addr_grp;
- struct mlx5_flow_group *allmulti_grp;
- struct mlx5_flow_group *promisc_grp;
+ union {
+ struct legacy_fdb {
+ struct mlx5_flow_group *addr_grp;
+ struct mlx5_flow_group *allmulti_grp;
+ struct mlx5_flow_group *promisc_grp;
+ } legacy;
+
+ struct offloads_fdb {
+ struct mlx5_flow_table *fdb;
+ struct mlx5_flow_group *send_to_vport_grp;
+ struct mlx5_flow_group *miss_grp;
+ struct mlx5_flow_rule *miss_rule;
+ } offloads;
+ };
+};
+
+enum {
+ SRIOV_NONE,
+ SRIOV_LEGACY,
+ SRIOV_OFFLOADS
+};
+
+struct mlx5_esw_sq {
+ struct mlx5_flow_rule *send_to_vport_rule;
+ struct list_head list;
+};
+
+struct mlx5_eswitch_rep {
+ int (*load)(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep);
+ void (*unload)(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep);
+ u16 vport;
+ struct mlx5_flow_rule *vport_rx_rule;
+ void *priv_data;
+ struct list_head vport_sqs_list;
+ bool valid;
+ u8 hw_id[ETH_ALEN];
+};
+
+struct mlx5_esw_offload {
+ struct mlx5_flow_table *ft_offloads;
+ struct mlx5_flow_group *vport_rx_group;
+ struct mlx5_eswitch_rep *vport_reps;
};
struct mlx5_eswitch {
@@ -153,13 +197,15 @@ struct mlx5_eswitch {
*/
struct mutex state_lock;
struct esw_mc_addr *mc_promisc;
+ struct mlx5_esw_offload offloads;
+ int mode;
};
/* E-Switch API */
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe);
-int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs);
+int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode);
void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw);
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
int vport, u8 mac[ETH_ALEN]);
@@ -177,4 +223,36 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
int vport,
struct ifla_vf_stats *vf_stats);
+struct mlx5_flow_spec;
+
+struct mlx5_flow_rule *
+mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
+ struct mlx5_flow_spec *spec,
+ u32 action, u32 src_vport, u32 dst_vport);
+struct mlx5_flow_rule *
+mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn);
+
+int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep,
+ u16 *sqns_array, int sqns_num);
+void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep);
+
+int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode);
+int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
+void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep);
+void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
+ int vport);
+
+#define MLX5_DEBUG_ESWITCH_MASK BIT(3)
+
+#define esw_info(dev, format, ...) \
+ pr_info("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
+
+#define esw_warn(dev, format, ...) \
+ pr_warn("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
+
+#define esw_debug(dev, format, ...) \
+ mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
#endif /* __MLX5_ESWITCH_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
new file mode 100644
index 000000000..7de40e6b0
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -0,0 +1,646 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/mlx5_ifc.h>
+#include <linux/mlx5/vport.h>
+#include <linux/mlx5/fs.h>
+#include "mlx5_core.h"
+#include "eswitch.h"
+
+enum {
+ FDB_FAST_PATH = 0,
+ FDB_SLOW_PATH
+};
+
+struct mlx5_flow_rule *
+mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
+ struct mlx5_flow_spec *spec,
+ u32 action, u32 src_vport, u32 dst_vport)
+{
+ struct mlx5_flow_destination dest = { 0 };
+ struct mlx5_fc *counter = NULL;
+ struct mlx5_flow_rule *rule;
+ void *misc;
+
+ if (esw->mode != SRIOV_OFFLOADS)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest.vport_num = dst_vport;
+ action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ counter = mlx5_fc_create(esw->dev, true);
+ if (IS_ERR(counter))
+ return ERR_CAST(counter);
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter = counter;
+ }
+
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
+ MLX5_SET(fte_match_set_misc, misc, source_port, src_vport);
+
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
+ MLX5_MATCH_MISC_PARAMETERS;
+
+ rule = mlx5_add_flow_rule((struct mlx5_flow_table *)esw->fdb_table.fdb,
+ spec, action, 0, &dest);
+
+ if (IS_ERR(rule))
+ mlx5_fc_destroy(esw->dev, counter);
+
+ return rule;
+}
+
+static struct mlx5_flow_rule *
+mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
+{
+ struct mlx5_flow_destination dest;
+ struct mlx5_flow_rule *flow_rule;
+ struct mlx5_flow_spec *spec;
+ void *misc;
+
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec) {
+ esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
+ flow_rule = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
+ MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
+ MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */
+
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest.vport_num = vport;
+
+ flow_rule = mlx5_add_flow_rule(esw->fdb_table.offloads.fdb, spec,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ 0, &dest);
+ if (IS_ERR(flow_rule))
+ esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
+out:
+ kvfree(spec);
+ return flow_rule;
+}
+
+void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep)
+{
+ struct mlx5_esw_sq *esw_sq, *tmp;
+
+ if (esw->mode != SRIOV_OFFLOADS)
+ return;
+
+ list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) {
+ mlx5_del_flow_rule(esw_sq->send_to_vport_rule);
+ list_del(&esw_sq->list);
+ kfree(esw_sq);
+ }
+}
+
+int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep,
+ u16 *sqns_array, int sqns_num)
+{
+ struct mlx5_flow_rule *flow_rule;
+ struct mlx5_esw_sq *esw_sq;
+ int vport;
+ int err;
+ int i;
+
+ if (esw->mode != SRIOV_OFFLOADS)
+ return 0;
+
+ vport = rep->vport == 0 ?
+ FDB_UPLINK_VPORT : rep->vport;
+
+ for (i = 0; i < sqns_num; i++) {
+ esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL);
+ if (!esw_sq) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ /* Add re-inject rule to the PF/representor sqs */
+ flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
+ vport,
+ sqns_array[i]);
+ if (IS_ERR(flow_rule)) {
+ err = PTR_ERR(flow_rule);
+ kfree(esw_sq);
+ goto out_err;
+ }
+ esw_sq->send_to_vport_rule = flow_rule;
+ list_add(&esw_sq->list, &rep->vport_sqs_list);
+ }
+ return 0;
+
+out_err:
+ mlx5_eswitch_sqs2vport_stop(esw, rep);
+ return err;
+}
+
+static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
+{
+ struct mlx5_flow_destination dest;
+ struct mlx5_flow_rule *flow_rule = NULL;
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec) {
+ esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest.vport_num = 0;
+
+ flow_rule = mlx5_add_flow_rule(esw->fdb_table.offloads.fdb, spec,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ 0, &dest);
+ if (IS_ERR(flow_rule)) {
+ err = PTR_ERR(flow_rule);
+ esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err);
+ goto out;
+ }
+
+ esw->fdb_table.offloads.miss_rule = flow_rule;
+out:
+ kvfree(spec);
+ return err;
+}
+
+#define MAX_PF_SQ 256
+#define ESW_OFFLOADS_NUM_ENTRIES (1 << 13) /* 8K */
+#define ESW_OFFLOADS_NUM_GROUPS 4
+
+static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_namespace *root_ns;
+ struct mlx5_flow_table *fdb = NULL;
+ struct mlx5_flow_group *g;
+ u32 *flow_group_in;
+ void *match_criteria;
+ int table_size, ix, err = 0;
+
+ flow_group_in = mlx5_vzalloc(inlen);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
+ if (!root_ns) {
+ esw_warn(dev, "Failed to get FDB flow namespace\n");
+ goto ns_err;
+ }
+
+ esw_debug(dev, "Create offloads FDB table, log_max_size(%d)\n",
+ MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
+
+ fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
+ ESW_OFFLOADS_NUM_ENTRIES,
+ ESW_OFFLOADS_NUM_GROUPS, 0);
+ if (IS_ERR(fdb)) {
+ err = PTR_ERR(fdb);
+ esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
+ goto fast_fdb_err;
+ }
+ esw->fdb_table.fdb = fdb;
+
+ table_size = nvports + MAX_PF_SQ + 1;
+ fdb = mlx5_create_flow_table(root_ns, FDB_SLOW_PATH, table_size, 0);
+ if (IS_ERR(fdb)) {
+ err = PTR_ERR(fdb);
+ esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
+ goto slow_fdb_err;
+ }
+ esw->fdb_table.offloads.fdb = fdb;
+
+ /* create send-to-vport group */
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS);
+
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
+
+ ix = nvports + MAX_PF_SQ;
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
+
+ g = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
+ goto send_vport_err;
+ }
+ esw->fdb_table.offloads.send_to_vport_grp = g;
+
+ /* create miss group */
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 0);
+
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 1);
+
+ g = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
+ goto miss_err;
+ }
+ esw->fdb_table.offloads.miss_grp = g;
+
+ err = esw_add_fdb_miss_rule(esw);
+ if (err)
+ goto miss_rule_err;
+
+ return 0;
+
+miss_rule_err:
+ mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
+miss_err:
+ mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
+send_vport_err:
+ mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
+slow_fdb_err:
+ mlx5_destroy_flow_table(esw->fdb_table.fdb);
+fast_fdb_err:
+ns_err:
+ kvfree(flow_group_in);
+ return err;
+}
+
+static void esw_destroy_offloads_fdb_table(struct mlx5_eswitch *esw)
+{
+ if (!esw->fdb_table.fdb)
+ return;
+
+ esw_debug(esw->dev, "Destroy offloads FDB Table\n");
+ mlx5_del_flow_rule(esw->fdb_table.offloads.miss_rule);
+ mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
+ mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
+
+ mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
+ mlx5_destroy_flow_table(esw->fdb_table.fdb);
+}
+
+static int esw_create_offloads_table(struct mlx5_eswitch *esw)
+{
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_table *ft_offloads;
+ struct mlx5_core_dev *dev = esw->dev;
+ int err = 0;
+
+ ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
+ if (!ns) {
+ esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
+ return -ENOMEM;
+ }
+
+ ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0);
+ if (IS_ERR(ft_offloads)) {
+ err = PTR_ERR(ft_offloads);
+ esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
+ return err;
+ }
+
+ esw->offloads.ft_offloads = ft_offloads;
+ return 0;
+}
+
+static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
+{
+ struct mlx5_esw_offload *offloads = &esw->offloads;
+
+ mlx5_destroy_flow_table(offloads->ft_offloads);
+}
+
+static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *g;
+ struct mlx5_priv *priv = &esw->dev->priv;
+ u32 *flow_group_in;
+ void *match_criteria, *misc;
+ int err = 0;
+ int nvports = priv->sriov.num_vfs + 2;
+
+ flow_group_in = mlx5_vzalloc(inlen);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ /* create vport rx group */
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS);
+
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+ misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
+
+ g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
+
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
+ goto out;
+ }
+
+ esw->offloads.vport_rx_group = g;
+out:
+ kfree(flow_group_in);
+ return err;
+}
+
+static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
+{
+ mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
+}
+
+struct mlx5_flow_rule *
+mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
+{
+ struct mlx5_flow_destination dest;
+ struct mlx5_flow_rule *flow_rule;
+ struct mlx5_flow_spec *spec;
+ void *misc;
+
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec) {
+ esw_warn(esw->dev, "Failed to alloc match parameters\n");
+ flow_rule = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
+ MLX5_SET(fte_match_set_misc, misc, source_port, vport);
+
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ dest.tir_num = tirn;
+
+ flow_rule = mlx5_add_flow_rule(esw->offloads.ft_offloads, spec,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ 0, &dest);
+ if (IS_ERR(flow_rule)) {
+ esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
+ goto out;
+ }
+
+out:
+ kvfree(spec);
+ return flow_rule;
+}
+
+static int esw_offloads_start(struct mlx5_eswitch *esw)
+{
+ int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
+
+ if (esw->mode != SRIOV_LEGACY) {
+ esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
+ return -EINVAL;
+ }
+
+ mlx5_eswitch_disable_sriov(esw);
+ err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
+ if (err) {
+ esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
+ err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
+ if (err1)
+ esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err);
+ }
+ return err;
+}
+
+int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
+{
+ struct mlx5_eswitch_rep *rep;
+ int vport;
+ int err;
+
+ err = esw_create_offloads_fdb_table(esw, nvports);
+ if (err)
+ return err;
+
+ err = esw_create_offloads_table(esw);
+ if (err)
+ goto create_ft_err;
+
+ err = esw_create_vport_rx_group(esw);
+ if (err)
+ goto create_fg_err;
+
+ for (vport = 0; vport < nvports; vport++) {
+ rep = &esw->offloads.vport_reps[vport];
+ if (!rep->valid)
+ continue;
+
+ err = rep->load(esw, rep);
+ if (err)
+ goto err_reps;
+ }
+ return 0;
+
+err_reps:
+ for (vport--; vport >= 0; vport--) {
+ rep = &esw->offloads.vport_reps[vport];
+ if (!rep->valid)
+ continue;
+ rep->unload(esw, rep);
+ }
+ esw_destroy_vport_rx_group(esw);
+
+create_fg_err:
+ esw_destroy_offloads_table(esw);
+
+create_ft_err:
+ esw_destroy_offloads_fdb_table(esw);
+ return err;
+}
+
+static int esw_offloads_stop(struct mlx5_eswitch *esw)
+{
+ int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
+
+ mlx5_eswitch_disable_sriov(esw);
+ err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
+ if (err) {
+ esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err);
+ err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
+ if (err1)
+ esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
+ }
+
+ return err;
+}
+
+void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
+{
+ struct mlx5_eswitch_rep *rep;
+ int vport;
+
+ for (vport = 0; vport < nvports; vport++) {
+ rep = &esw->offloads.vport_reps[vport];
+ if (!rep->valid)
+ continue;
+ rep->unload(esw, rep);
+ }
+
+ esw_destroy_vport_rx_group(esw);
+ esw_destroy_offloads_table(esw);
+ esw_destroy_offloads_fdb_table(esw);
+}
+
+static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
+{
+ switch (mode) {
+ case DEVLINK_ESWITCH_MODE_LEGACY:
+ *mlx5_mode = SRIOV_LEGACY;
+ break;
+ case DEVLINK_ESWITCH_MODE_SWITCHDEV:
+ *mlx5_mode = SRIOV_OFFLOADS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
+{
+ switch (mlx5_mode) {
+ case SRIOV_LEGACY:
+ *mode = DEVLINK_ESWITCH_MODE_LEGACY;
+ break;
+ case SRIOV_OFFLOADS:
+ *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
+{
+ struct mlx5_core_dev *dev;
+ u16 cur_mlx5_mode, mlx5_mode = 0;
+
+ dev = devlink_priv(devlink);
+
+ if (!MLX5_CAP_GEN(dev, vport_group_manager))
+ return -EOPNOTSUPP;
+
+ cur_mlx5_mode = dev->priv.eswitch->mode;
+
+ if (cur_mlx5_mode == SRIOV_NONE)
+ return -EOPNOTSUPP;
+
+ if (esw_mode_from_devlink(mode, &mlx5_mode))
+ return -EINVAL;
+
+ if (cur_mlx5_mode == mlx5_mode)
+ return 0;
+
+ if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
+ return esw_offloads_start(dev->priv.eswitch);
+ else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
+ return esw_offloads_stop(dev->priv.eswitch);
+ else
+ return -EINVAL;
+}
+
+int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
+{
+ struct mlx5_core_dev *dev;
+
+ dev = devlink_priv(devlink);
+
+ if (!MLX5_CAP_GEN(dev, vport_group_manager))
+ return -EOPNOTSUPP;
+
+ if (dev->priv.eswitch->mode == SRIOV_NONE)
+ return -EOPNOTSUPP;
+
+ return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
+}
+
+void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep)
+{
+ struct mlx5_esw_offload *offloads = &esw->offloads;
+
+ memcpy(&offloads->vport_reps[rep->vport], rep,
+ sizeof(struct mlx5_eswitch_rep));
+
+ INIT_LIST_HEAD(&offloads->vport_reps[rep->vport].vport_sqs_list);
+ offloads->vport_reps[rep->vport].valid = true;
+}
+
+void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
+ int vport)
+{
+ struct mlx5_esw_offload *offloads = &esw->offloads;
+ struct mlx5_eswitch_rep *rep;
+
+ rep = &offloads->vport_reps[vport];
+
+ if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport].enabled)
+ rep->unload(esw, rep);
+
+ offloads->vport_reps[vport].valid = false;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index a5bb6b695..287ade151 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -413,3 +413,70 @@ int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id,
return 0;
}
+
+struct mlx5_cmd_fc_bulk {
+ u16 id;
+ int num;
+ int outlen;
+ u32 out[0];
+};
+
+struct mlx5_cmd_fc_bulk *
+mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u16 id, int num)
+{
+ struct mlx5_cmd_fc_bulk *b;
+ int outlen =
+ MLX5_ST_SZ_BYTES(query_flow_counter_out) +
+ MLX5_ST_SZ_BYTES(traffic_counter) * num;
+
+ b = kzalloc(sizeof(*b) + outlen, GFP_KERNEL);
+ if (!b)
+ return NULL;
+
+ b->id = id;
+ b->num = num;
+ b->outlen = outlen;
+
+ return b;
+}
+
+void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b)
+{
+ kfree(b);
+}
+
+int
+mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b)
+{
+ u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(query_flow_counter_in, in, opcode,
+ MLX5_CMD_OP_QUERY_FLOW_COUNTER);
+ MLX5_SET(query_flow_counter_in, in, op_mod, 0);
+ MLX5_SET(query_flow_counter_in, in, flow_counter_id, b->id);
+ MLX5_SET(query_flow_counter_in, in, num_of_counters, b->num);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
+ b->out, b->outlen);
+}
+
+void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
+ struct mlx5_cmd_fc_bulk *b, u16 id,
+ u64 *packets, u64 *bytes)
+{
+ int index = id - b->id;
+ void *stats;
+
+ if (index < 0 || index >= b->num) {
+ mlx5_core_warn(dev, "Flow counter id (0x%x) out of range (0x%x..0x%x). Counter ignored.\n",
+ id, b->id, b->id + b->num - 1);
+ return;
+ }
+
+ stats = MLX5_ADDR_OF(query_flow_counter_out, b->out,
+ flow_statistics[index]);
+ *packets = MLX5_GET64(traffic_counter, stats, packets);
+ *bytes = MLX5_GET64(traffic_counter, stats, octets);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index fc4f7b83f..158844cef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -76,4 +76,16 @@ int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id);
int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id);
int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id,
u64 *packets, u64 *bytes);
+
+struct mlx5_cmd_fc_bulk;
+
+struct mlx5_cmd_fc_bulk *
+mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u16 id, int num);
+void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b);
+int
+mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b);
+void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
+ struct mlx5_cmd_fc_bulk *b, u16 id,
+ u64 *packets, u64 *bytes);
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index e912a3d25..3d6c1f65e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -67,13 +67,21 @@
#define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
.caps = (long[]) {__VA_ARGS__} }
+#define FS_CHAINING_CAPS FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), \
+ FS_CAP(flow_table_properties_nic_receive.modify_root), \
+ FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \
+ FS_CAP(flow_table_properties_nic_receive.flow_table_modify))
+
#define LEFTOVERS_NUM_LEVELS 1
#define LEFTOVERS_NUM_PRIOS 1
#define BY_PASS_PRIO_NUM_LEVELS 1
-#define BY_PASS_MIN_LEVEL (KERNEL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
+#define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
LEFTOVERS_NUM_PRIOS)
+#define ETHTOOL_PRIO_NUM_LEVELS 1
+#define ETHTOOL_NUM_PRIOS 11
+#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
/* Vlan, mac, ttc, aRFS */
#define KERNEL_NIC_PRIO_NUM_LEVELS 4
#define KERNEL_NIC_NUM_PRIOS 1
@@ -83,6 +91,11 @@
#define ANCHOR_NUM_LEVELS 1
#define ANCHOR_NUM_PRIOS 1
#define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
+
+#define OFFLOADS_MAX_FT 1
+#define OFFLOADS_NUM_PRIOS 1
+#define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + 1)
+
struct node_caps {
size_t arr_sz;
long *caps;
@@ -98,24 +111,24 @@ static struct init_tree_node {
int num_levels;
} root_fs = {
.type = FS_TYPE_NAMESPACE,
- .ar_size = 4,
+ .ar_size = 6,
.children = (struct init_tree_node[]) {
ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
- FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en),
- FS_CAP(flow_table_properties_nic_receive.modify_root),
- FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode),
- FS_CAP(flow_table_properties_nic_receive.flow_table_modify)),
+ FS_CHAINING_CAPS,
ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
BY_PASS_PRIO_NUM_LEVELS))),
+ ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {},
+ ADD_NS(ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS, OFFLOADS_MAX_FT))),
+ ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0,
+ FS_CHAINING_CAPS,
+ ADD_NS(ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
+ ETHTOOL_PRIO_NUM_LEVELS))),
ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
ADD_NS(ADD_MULTIPLE_PRIO(1, 1),
ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
KERNEL_NIC_PRIO_NUM_LEVELS))),
ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
- FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en),
- FS_CAP(flow_table_properties_nic_receive.modify_root),
- FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode),
- FS_CAP(flow_table_properties_nic_receive.flow_table_modify)),
+ FS_CHAINING_CAPS,
ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_NUM_LEVELS))),
ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_NUM_LEVELS))),
@@ -1152,9 +1165,7 @@ static bool dest_is_valid(struct mlx5_flow_destination *dest,
static struct mlx5_flow_rule *
_mlx5_add_flow_rule(struct mlx5_flow_table *ft,
- u8 match_criteria_enable,
- u32 *match_criteria,
- u32 *match_value,
+ struct mlx5_flow_spec *spec,
u32 action,
u32 flow_tag,
struct mlx5_flow_destination *dest)
@@ -1168,22 +1179,23 @@ _mlx5_add_flow_rule(struct mlx5_flow_table *ft,
nested_lock_ref_node(&ft->node, FS_MUTEX_GRANDPARENT);
fs_for_each_fg(g, ft)
if (compare_match_criteria(g->mask.match_criteria_enable,
- match_criteria_enable,
+ spec->match_criteria_enable,
g->mask.match_criteria,
- match_criteria)) {
- rule = add_rule_fg(g, match_value,
+ spec->match_criteria)) {
+ rule = add_rule_fg(g, spec->match_value,
action, flow_tag, dest);
if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOSPC)
goto unlock;
}
- g = create_autogroup(ft, match_criteria_enable, match_criteria);
+ g = create_autogroup(ft, spec->match_criteria_enable,
+ spec->match_criteria);
if (IS_ERR(g)) {
rule = (void *)g;
goto unlock;
}
- rule = add_rule_fg(g, match_value,
+ rule = add_rule_fg(g, spec->match_value,
action, flow_tag, dest);
if (IS_ERR(rule)) {
/* Remove assumes refcount > 0 and autogroup creates a group
@@ -1207,9 +1219,7 @@ static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
struct mlx5_flow_rule *
mlx5_add_flow_rule(struct mlx5_flow_table *ft,
- u8 match_criteria_enable,
- u32 *match_criteria,
- u32 *match_value,
+ struct mlx5_flow_spec *spec,
u32 action,
u32 flow_tag,
struct mlx5_flow_destination *dest)
@@ -1240,8 +1250,7 @@ mlx5_add_flow_rule(struct mlx5_flow_table *ft,
}
}
- rule = _mlx5_add_flow_rule(ft, match_criteria_enable, match_criteria,
- match_value, action, flow_tag, dest);
+ rule = _mlx5_add_flow_rule(ft, spec, action, flow_tag, dest);
if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
if (!IS_ERR_OR_NULL(rule) &&
@@ -1359,40 +1368,47 @@ void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
enum mlx5_flow_namespace_type type)
{
- struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns;
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ struct mlx5_flow_root_namespace *root_ns;
int prio;
struct fs_prio *fs_prio;
struct mlx5_flow_namespace *ns;
- if (!root_ns)
+ if (!steering)
return NULL;
switch (type) {
case MLX5_FLOW_NAMESPACE_BYPASS:
+ case MLX5_FLOW_NAMESPACE_OFFLOADS:
+ case MLX5_FLOW_NAMESPACE_ETHTOOL:
case MLX5_FLOW_NAMESPACE_KERNEL:
case MLX5_FLOW_NAMESPACE_LEFTOVERS:
case MLX5_FLOW_NAMESPACE_ANCHOR:
prio = type;
break;
case MLX5_FLOW_NAMESPACE_FDB:
- if (dev->priv.fdb_root_ns)
- return &dev->priv.fdb_root_ns->ns;
+ if (steering->fdb_root_ns)
+ return &steering->fdb_root_ns->ns;
else
return NULL;
case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
- if (dev->priv.esw_egress_root_ns)
- return &dev->priv.esw_egress_root_ns->ns;
+ if (steering->esw_egress_root_ns)
+ return &steering->esw_egress_root_ns->ns;
else
return NULL;
case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
- if (dev->priv.esw_ingress_root_ns)
- return &dev->priv.esw_ingress_root_ns->ns;
+ if (steering->esw_ingress_root_ns)
+ return &steering->esw_ingress_root_ns->ns;
else
return NULL;
default:
return NULL;
}
+ root_ns = steering->root_ns;
+ if (!root_ns)
+ return NULL;
+
fs_prio = find_prio(&root_ns->ns, prio);
if (!fs_prio)
return NULL;
@@ -1478,13 +1494,13 @@ static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
return true;
}
-static int init_root_tree_recursive(struct mlx5_core_dev *dev,
+static int init_root_tree_recursive(struct mlx5_flow_steering *steering,
struct init_tree_node *init_node,
struct fs_node *fs_parent_node,
struct init_tree_node *init_parent_node,
int prio)
{
- int max_ft_level = MLX5_CAP_FLOWTABLE(dev,
+ int max_ft_level = MLX5_CAP_FLOWTABLE(steering->dev,
flow_table_properties_nic_receive.
max_ft_level);
struct mlx5_flow_namespace *fs_ns;
@@ -1495,7 +1511,7 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev,
if (init_node->type == FS_TYPE_PRIO) {
if ((init_node->min_ft_level > max_ft_level) ||
- !has_required_caps(dev, &init_node->caps))
+ !has_required_caps(steering->dev, &init_node->caps))
return 0;
fs_get_obj(fs_ns, fs_parent_node);
@@ -1516,7 +1532,7 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev,
}
prio = 0;
for (i = 0; i < init_node->ar_size; i++) {
- err = init_root_tree_recursive(dev, &init_node->children[i],
+ err = init_root_tree_recursive(steering, &init_node->children[i],
base, init_node, prio);
if (err)
return err;
@@ -1529,7 +1545,7 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev,
return 0;
}
-static int init_root_tree(struct mlx5_core_dev *dev,
+static int init_root_tree(struct mlx5_flow_steering *steering,
struct init_tree_node *init_node,
struct fs_node *fs_parent_node)
{
@@ -1539,7 +1555,7 @@ static int init_root_tree(struct mlx5_core_dev *dev,
fs_get_obj(fs_ns, fs_parent_node);
for (i = 0; i < init_node->ar_size; i++) {
- err = init_root_tree_recursive(dev, &init_node->children[i],
+ err = init_root_tree_recursive(steering, &init_node->children[i],
&fs_ns->node,
init_node, i);
if (err)
@@ -1548,7 +1564,7 @@ static int init_root_tree(struct mlx5_core_dev *dev,
return 0;
}
-static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_core_dev *dev,
+static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_flow_steering *steering,
enum fs_flow_table_type
table_type)
{
@@ -1560,7 +1576,7 @@ static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_core_dev *dev
if (!root_ns)
return NULL;
- root_ns->dev = dev;
+ root_ns->dev = steering->dev;
root_ns->table_type = table_type;
ns = &root_ns->ns;
@@ -1615,220 +1631,135 @@ static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
#define ANCHOR_PRIO 0
#define ANCHOR_SIZE 1
#define ANCHOR_LEVEL 0
-static int create_anchor_flow_table(struct mlx5_core_dev
- *dev)
+static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
{
struct mlx5_flow_namespace *ns = NULL;
struct mlx5_flow_table *ft;
- ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ANCHOR);
+ ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
if (!ns)
return -EINVAL;
ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL);
if (IS_ERR(ft)) {
- mlx5_core_err(dev, "Failed to create last anchor flow table");
+ mlx5_core_err(steering->dev, "Failed to create last anchor flow table");
return PTR_ERR(ft);
}
return 0;
}
-static int init_root_ns(struct mlx5_core_dev *dev)
+static int init_root_ns(struct mlx5_flow_steering *steering)
{
- dev->priv.root_ns = create_root_ns(dev, FS_FT_NIC_RX);
- if (IS_ERR_OR_NULL(dev->priv.root_ns))
+ steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
+ if (IS_ERR_OR_NULL(steering->root_ns))
goto cleanup;
- if (init_root_tree(dev, &root_fs, &dev->priv.root_ns->ns.node))
+ if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node))
goto cleanup;
- set_prio_attrs(dev->priv.root_ns);
+ set_prio_attrs(steering->root_ns);
- if (create_anchor_flow_table(dev))
+ if (create_anchor_flow_table(steering))
goto cleanup;
return 0;
cleanup:
- mlx5_cleanup_fs(dev);
+ mlx5_cleanup_fs(steering->dev);
return -ENOMEM;
}
-static void cleanup_single_prio_root_ns(struct mlx5_core_dev *dev,
- struct mlx5_flow_root_namespace *root_ns)
+static void clean_tree(struct fs_node *node)
{
- struct fs_node *prio;
-
- if (!root_ns)
- return;
+ if (node) {
+ struct fs_node *iter;
+ struct fs_node *temp;
- if (!list_empty(&root_ns->ns.node.children)) {
- prio = list_first_entry(&root_ns->ns.node.children,
- struct fs_node,
- list);
- if (tree_remove_node(prio))
- mlx5_core_warn(dev,
- "Flow steering priority wasn't destroyed, refcount > 1\n");
+ list_for_each_entry_safe(iter, temp, &node->children, list)
+ clean_tree(iter);
+ tree_remove_node(node);
}
- if (tree_remove_node(&root_ns->ns.node))
- mlx5_core_warn(dev,
- "Flow steering namespace wasn't destroyed, refcount > 1\n");
- root_ns = NULL;
}
-static void destroy_flow_tables(struct fs_prio *prio)
+static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
{
- struct mlx5_flow_table *iter;
- struct mlx5_flow_table *tmp;
-
- fs_for_each_ft_safe(iter, tmp, prio)
- mlx5_destroy_flow_table(iter);
-}
-
-static void cleanup_root_ns(struct mlx5_core_dev *dev)
-{
- struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns;
- struct fs_prio *iter_prio;
-
- if (!MLX5_CAP_GEN(dev, nic_flow_table))
- return;
-
if (!root_ns)
return;
- /* stage 1 */
- fs_for_each_prio(iter_prio, &root_ns->ns) {
- struct fs_node *node;
- struct mlx5_flow_namespace *iter_ns;
-
- fs_for_each_ns_or_ft(node, iter_prio) {
- if (node->type == FS_TYPE_FLOW_TABLE)
- continue;
- fs_get_obj(iter_ns, node);
- while (!list_empty(&iter_ns->node.children)) {
- struct fs_prio *obj_iter_prio2;
- struct fs_node *iter_prio2 =
- list_first_entry(&iter_ns->node.children,
- struct fs_node,
- list);
-
- fs_get_obj(obj_iter_prio2, iter_prio2);
- destroy_flow_tables(obj_iter_prio2);
- if (tree_remove_node(iter_prio2)) {
- mlx5_core_warn(dev,
- "Priority %d wasn't destroyed, refcount > 1\n",
- obj_iter_prio2->prio);
- return;
- }
- }
- }
- }
-
- /* stage 2 */
- fs_for_each_prio(iter_prio, &root_ns->ns) {
- while (!list_empty(&iter_prio->node.children)) {
- struct fs_node *iter_ns =
- list_first_entry(&iter_prio->node.children,
- struct fs_node,
- list);
- if (tree_remove_node(iter_ns)) {
- mlx5_core_warn(dev,
- "Namespace wasn't destroyed, refcount > 1\n");
- return;
- }
- }
- }
-
- /* stage 3 */
- while (!list_empty(&root_ns->ns.node.children)) {
- struct fs_prio *obj_prio_node;
- struct fs_node *prio_node =
- list_first_entry(&root_ns->ns.node.children,
- struct fs_node,
- list);
-
- fs_get_obj(obj_prio_node, prio_node);
- if (tree_remove_node(prio_node)) {
- mlx5_core_warn(dev,
- "Priority %d wasn't destroyed, refcount > 1\n",
- obj_prio_node->prio);
- return;
- }
- }
-
- if (tree_remove_node(&root_ns->ns.node)) {
- mlx5_core_warn(dev,
- "root namespace wasn't destroyed, refcount > 1\n");
- return;
- }
-
- dev->priv.root_ns = NULL;
+ clean_tree(&root_ns->ns.node);
}
void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+
if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
return;
- cleanup_root_ns(dev);
- cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns);
- cleanup_single_prio_root_ns(dev, dev->priv.esw_egress_root_ns);
- cleanup_single_prio_root_ns(dev, dev->priv.esw_ingress_root_ns);
+ cleanup_root_ns(steering->root_ns);
+ cleanup_root_ns(steering->esw_egress_root_ns);
+ cleanup_root_ns(steering->esw_ingress_root_ns);
+ cleanup_root_ns(steering->fdb_root_ns);
mlx5_cleanup_fc_stats(dev);
+ kfree(steering);
}
-static int init_fdb_root_ns(struct mlx5_core_dev *dev)
+static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
{
struct fs_prio *prio;
- dev->priv.fdb_root_ns = create_root_ns(dev, FS_FT_FDB);
- if (!dev->priv.fdb_root_ns)
+ steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
+ if (!steering->fdb_root_ns)
return -ENOMEM;
- /* Create single prio */
- prio = fs_create_prio(&dev->priv.fdb_root_ns->ns, 0, 1);
- if (IS_ERR(prio)) {
- cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns);
- return PTR_ERR(prio);
- } else {
- return 0;
- }
+ prio = fs_create_prio(&steering->fdb_root_ns->ns, 0, 1);
+ if (IS_ERR(prio))
+ goto out_err;
+
+ prio = fs_create_prio(&steering->fdb_root_ns->ns, 1, 1);
+ if (IS_ERR(prio))
+ goto out_err;
+
+ set_prio_attrs(steering->fdb_root_ns);
+ return 0;
+
+out_err:
+ cleanup_root_ns(steering->fdb_root_ns);
+ steering->fdb_root_ns = NULL;
+ return PTR_ERR(prio);
}
-static int init_egress_acl_root_ns(struct mlx5_core_dev *dev)
+static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering)
{
struct fs_prio *prio;
- dev->priv.esw_egress_root_ns = create_root_ns(dev, FS_FT_ESW_EGRESS_ACL);
- if (!dev->priv.esw_egress_root_ns)
+ steering->esw_egress_root_ns = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
+ if (!steering->esw_egress_root_ns)
return -ENOMEM;
/* create 1 prio*/
- prio = fs_create_prio(&dev->priv.esw_egress_root_ns->ns, 0, MLX5_TOTAL_VPORTS(dev));
- if (IS_ERR(prio))
- return PTR_ERR(prio);
- else
- return 0;
+ prio = fs_create_prio(&steering->esw_egress_root_ns->ns, 0,
+ MLX5_TOTAL_VPORTS(steering->dev));
+ return PTR_ERR_OR_ZERO(prio);
}
-static int init_ingress_acl_root_ns(struct mlx5_core_dev *dev)
+static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering)
{
struct fs_prio *prio;
- dev->priv.esw_ingress_root_ns = create_root_ns(dev, FS_FT_ESW_INGRESS_ACL);
- if (!dev->priv.esw_ingress_root_ns)
+ steering->esw_ingress_root_ns = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
+ if (!steering->esw_ingress_root_ns)
return -ENOMEM;
/* create 1 prio*/
- prio = fs_create_prio(&dev->priv.esw_ingress_root_ns->ns, 0, MLX5_TOTAL_VPORTS(dev));
- if (IS_ERR(prio))
- return PTR_ERR(prio);
- else
- return 0;
+ prio = fs_create_prio(&steering->esw_ingress_root_ns->ns, 0,
+ MLX5_TOTAL_VPORTS(steering->dev));
+ return PTR_ERR_OR_ZERO(prio);
}
int mlx5_init_fs(struct mlx5_core_dev *dev)
{
+ struct mlx5_flow_steering *steering;
int err = 0;
if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
@@ -1838,26 +1769,32 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
if (err)
return err;
+ steering = kzalloc(sizeof(*steering), GFP_KERNEL);
+ if (!steering)
+ return -ENOMEM;
+ steering->dev = dev;
+ dev->priv.steering = steering;
+
if (MLX5_CAP_GEN(dev, nic_flow_table) &&
MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
- err = init_root_ns(dev);
+ err = init_root_ns(steering);
if (err)
goto err;
}
if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
- err = init_fdb_root_ns(dev);
+ err = init_fdb_root_ns(steering);
if (err)
goto err;
}
if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
- err = init_egress_acl_root_ns(dev);
+ err = init_egress_acl_root_ns(steering);
if (err)
goto err;
}
if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
- err = init_ingress_acl_root_ns(dev);
+ err = init_ingress_acl_root_ns(steering);
if (err)
goto err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index aa41a7314..9cffb6aeb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -55,6 +55,14 @@ enum fs_fte_status {
FS_FTE_STATUS_EXISTING = 1UL << 0,
};
+struct mlx5_flow_steering {
+ struct mlx5_core_dev *dev;
+ struct mlx5_flow_root_namespace *root_ns;
+ struct mlx5_flow_root_namespace *fdb_root_ns;
+ struct mlx5_flow_root_namespace *esw_egress_root_ns;
+ struct mlx5_flow_root_namespace *esw_ingress_root_ns;
+};
+
struct fs_node {
struct list_head list;
struct list_head children;
@@ -103,6 +111,7 @@ struct mlx5_fc_cache {
};
struct mlx5_fc {
+ struct rb_node node;
struct list_head list;
/* last{packets,bytes} members are used when calculating the delta since
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 164dc37fd..3a9195b41 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -32,6 +32,7 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/fs.h>
+#include <linux/rbtree.h>
#include "mlx5_core.h"
#include "fs_core.h"
#include "fs_cmd.h"
@@ -68,32 +69,117 @@
* elapsed, the thread will actually query the hardware.
*/
+static void mlx5_fc_stats_insert(struct rb_root *root, struct mlx5_fc *counter)
+{
+ struct rb_node **new = &root->rb_node;
+ struct rb_node *parent = NULL;
+
+ while (*new) {
+ struct mlx5_fc *this = container_of(*new, struct mlx5_fc, node);
+ int result = counter->id - this->id;
+
+ parent = *new;
+ if (result < 0)
+ new = &((*new)->rb_left);
+ else
+ new = &((*new)->rb_right);
+ }
+
+ /* Add new node and rebalance tree. */
+ rb_link_node(&counter->node, parent, new);
+ rb_insert_color(&counter->node, root);
+}
+
+static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev,
+ struct mlx5_fc *first,
+ u16 last_id)
+{
+ struct mlx5_cmd_fc_bulk *b;
+ struct rb_node *node = NULL;
+ u16 afirst_id;
+ int num;
+ int err;
+ int max_bulk = 1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk);
+
+ /* first id must be aligned to 4 when using bulk query */
+ afirst_id = first->id & ~0x3;
+
+ /* number of counters to query inc. the last counter */
+ num = ALIGN(last_id - afirst_id + 1, 4);
+ if (num > max_bulk) {
+ num = max_bulk;
+ last_id = afirst_id + num - 1;
+ }
+
+ b = mlx5_cmd_fc_bulk_alloc(dev, afirst_id, num);
+ if (!b) {
+ mlx5_core_err(dev, "Error allocating resources for bulk query\n");
+ return NULL;
+ }
+
+ err = mlx5_cmd_fc_bulk_query(dev, b);
+ if (err) {
+ mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
+ goto out;
+ }
+
+ for (node = &first->node; node; node = rb_next(node)) {
+ struct mlx5_fc *counter = rb_entry(node, struct mlx5_fc, node);
+ struct mlx5_fc_cache *c = &counter->cache;
+ u64 packets;
+ u64 bytes;
+
+ if (counter->id > last_id)
+ break;
+
+ mlx5_cmd_fc_bulk_get(dev, b,
+ counter->id, &packets, &bytes);
+
+ if (c->packets == packets)
+ continue;
+
+ c->packets = packets;
+ c->bytes = bytes;
+ c->lastuse = jiffies;
+ }
+
+out:
+ mlx5_cmd_fc_bulk_free(b);
+
+ return node;
+}
+
static void mlx5_fc_stats_work(struct work_struct *work)
{
struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
priv.fc_stats.work.work);
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
unsigned long now = jiffies;
- struct mlx5_fc *counter;
- struct mlx5_fc *tmp;
- int err = 0;
+ struct mlx5_fc *counter = NULL;
+ struct mlx5_fc *last = NULL;
+ struct rb_node *node;
+ LIST_HEAD(tmplist);
spin_lock(&fc_stats->addlist_lock);
- list_splice_tail_init(&fc_stats->addlist, &fc_stats->list);
+ list_splice_tail_init(&fc_stats->addlist, &tmplist);
- if (!list_empty(&fc_stats->list))
+ if (!list_empty(&tmplist) || !RB_EMPTY_ROOT(&fc_stats->counters))
queue_delayed_work(fc_stats->wq, &fc_stats->work, MLX5_FC_STATS_PERIOD);
spin_unlock(&fc_stats->addlist_lock);
- list_for_each_entry_safe(counter, tmp, &fc_stats->list, list) {
- struct mlx5_fc_cache *c = &counter->cache;
- u64 packets;
- u64 bytes;
+ list_for_each_entry(counter, &tmplist, list)
+ mlx5_fc_stats_insert(&fc_stats->counters, counter);
+
+ node = rb_first(&fc_stats->counters);
+ while (node) {
+ counter = rb_entry(node, struct mlx5_fc, node);
+
+ node = rb_next(node);
if (counter->deleted) {
- list_del(&counter->list);
+ rb_erase(&counter->node, &fc_stats->counters);
mlx5_cmd_fc_free(dev, counter->id);
@@ -101,26 +187,20 @@ static void mlx5_fc_stats_work(struct work_struct *work)
continue;
}
- if (time_before(now, fc_stats->next_query))
- continue;
+ last = counter;
+ }
- err = mlx5_cmd_fc_query(dev, counter->id, &packets, &bytes);
- if (err) {
- pr_err("Error querying stats for counter id %d\n",
- counter->id);
- continue;
- }
+ if (time_before(now, fc_stats->next_query) || !last)
+ return;
- if (packets == c->packets)
- continue;
+ node = rb_first(&fc_stats->counters);
+ while (node) {
+ counter = rb_entry(node, struct mlx5_fc, node);
- c->lastuse = jiffies;
- c->packets = packets;
- c->bytes = bytes;
+ node = mlx5_fc_stats_query(dev, counter, last->id);
}
- if (time_after_eq(now, fc_stats->next_query))
- fc_stats->next_query = now + MLX5_FC_STATS_PERIOD;
+ fc_stats->next_query = now + MLX5_FC_STATS_PERIOD;
}
struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
@@ -176,7 +256,7 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
{
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
- INIT_LIST_HEAD(&fc_stats->list);
+ fc_stats->counters = RB_ROOT;
INIT_LIST_HEAD(&fc_stats->addlist);
spin_lock_init(&fc_stats->addlist_lock);
@@ -194,20 +274,32 @@ void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
struct mlx5_fc *counter;
struct mlx5_fc *tmp;
+ struct rb_node *node;
cancel_delayed_work_sync(&dev->priv.fc_stats.work);
destroy_workqueue(dev->priv.fc_stats.wq);
dev->priv.fc_stats.wq = NULL;
- list_splice_tail_init(&fc_stats->addlist, &fc_stats->list);
-
- list_for_each_entry_safe(counter, tmp, &fc_stats->list, list) {
+ list_for_each_entry_safe(counter, tmp, &fc_stats->addlist, list) {
list_del(&counter->list);
mlx5_cmd_fc_free(dev, counter->id);
kfree(counter);
}
+
+ node = rb_first(&fc_stats->counters);
+ while (node) {
+ counter = rb_entry(node, struct mlx5_fc, node);
+
+ node = rb_next(node);
+
+ rb_erase(&counter->node, &fc_stats->counters);
+
+ mlx5_cmd_fc_free(dev, counter->id);
+
+ kfree(counter);
+ }
}
void mlx5_fc_query_cached(struct mlx5_fc *counter,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 75c7ae6a5..77fc1aa26 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -151,6 +151,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
+ if (MLX5_CAP_GEN(dev, qos)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_QOS);
+ if (err)
+ return err;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 96a59463a..1a05fb965 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -272,7 +272,7 @@ static void poll_health(unsigned long data)
if (in_fatal(dev) && !health->sick) {
health->sick = true;
print_health_info(dev);
- queue_work(health->wq, &health->work);
+ schedule_work(&health->work);
}
}
@@ -301,7 +301,7 @@ void mlx5_health_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
- destroy_workqueue(health->wq);
+ flush_work(&health->work);
}
int mlx5_health_init(struct mlx5_core_dev *dev)
@@ -316,10 +316,7 @@ int mlx5_health_init(struct mlx5_core_dev *dev)
strcpy(name, "mlx5_health");
strcat(name, dev_name(&dev->pdev->dev));
- health->wq = create_singlethread_workqueue(name);
kfree(name);
- if (!health->wq)
- return -ENOMEM;
INIT_WORK(&health->work, health_care);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index e782d0fde..2385bae92 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -51,6 +51,7 @@
#ifdef CONFIG_RFS_ACCEL
#include <linux/cpu_rmap.h>
#endif
+#include <net/devlink.h>
#include "mlx5_core.h"
#include "fs_core.h"
#ifdef CONFIG_MLX5_CORE_EN
@@ -1144,6 +1145,13 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
dev_err(&pdev->dev, "Failed to init flow steering\n");
goto err_fs;
}
+
+ err = mlx5_init_rl_table(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init rate limiting\n");
+ goto err_rl;
+ }
+
#ifdef CONFIG_MLX5_CORE_EN
err = mlx5_eswitch_init(dev);
if (err) {
@@ -1183,6 +1191,8 @@ err_sriov:
mlx5_eswitch_cleanup(dev->priv.eswitch);
#endif
err_reg_dev:
+ mlx5_cleanup_rl_table(dev);
+err_rl:
mlx5_cleanup_fs(dev);
err_fs:
mlx5_cleanup_mkey_table(dev);
@@ -1253,6 +1263,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
mlx5_eswitch_cleanup(dev->priv.eswitch);
#endif
+ mlx5_cleanup_rl_table(dev);
mlx5_cleanup_fs(dev);
mlx5_cleanup_mkey_table(dev);
mlx5_cleanup_srq_table(dev);
@@ -1305,19 +1316,28 @@ struct mlx5_core_event_handler {
void *data);
};
+static const struct devlink_ops mlx5_devlink_ops = {
+#ifdef CONFIG_MLX5_CORE_EN
+ .eswitch_mode_set = mlx5_devlink_eswitch_mode_set,
+ .eswitch_mode_get = mlx5_devlink_eswitch_mode_get,
+#endif
+};
static int init_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct mlx5_core_dev *dev;
+ struct devlink *devlink;
struct mlx5_priv *priv;
int err;
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
+ devlink = devlink_alloc(&mlx5_devlink_ops, sizeof(*dev));
+ if (!devlink) {
dev_err(&pdev->dev, "kzalloc failed\n");
return -ENOMEM;
}
+
+ dev = devlink_priv(devlink);
priv = &dev->priv;
priv->pci_dev_data = id->driver_data;
@@ -1354,15 +1374,21 @@ static int init_one(struct pci_dev *pdev,
goto clean_health;
}
+ err = devlink_register(devlink, &pdev->dev);
+ if (err)
+ goto clean_load;
+
return 0;
+clean_load:
+ mlx5_unload_one(dev, priv);
clean_health:
mlx5_health_cleanup(dev);
close_pci:
mlx5_pci_close(dev, priv);
clean_dev:
pci_set_drvdata(pdev, NULL);
- kfree(dev);
+ devlink_free(devlink);
return err;
}
@@ -1370,8 +1396,10 @@ clean_dev:
static void remove_one(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ struct devlink *devlink = priv_to_devlink(dev);
struct mlx5_priv *priv = &dev->priv;
+ devlink_unregister(devlink);
if (mlx5_unload_one(dev, priv)) {
dev_err(&dev->pdev->dev, "mlx5_unload_one failed\n");
mlx5_health_cleanup(dev);
@@ -1380,7 +1408,7 @@ static void remove_one(struct pci_dev *pdev)
mlx5_health_cleanup(dev);
mlx5_pci_close(dev, priv);
pci_set_drvdata(pdev, NULL);
- kfree(dev);
+ devlink_free(devlink);
}
static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 3e35611b1..752c08127 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -202,15 +202,24 @@ int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL_GPL(mlx5_query_port_proto_oper);
-int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
- int proto_mask)
+int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable,
+ u32 proto_admin, int proto_mask)
{
- u32 in[MLX5_ST_SZ_DW(ptys_reg)];
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ u32 in[MLX5_ST_SZ_DW(ptys_reg)];
+ u8 an_disable_admin;
+ u8 an_disable_cap;
+ u8 an_status;
+
+ mlx5_query_port_autoneg(dev, proto_mask, &an_status,
+ &an_disable_cap, &an_disable_admin);
+ if (!an_disable_cap && an_disable)
+ return -EPERM;
memset(in, 0, sizeof(in));
MLX5_SET(ptys_reg, in, local_port, 1);
+ MLX5_SET(ptys_reg, in, an_disable_admin, an_disable);
MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
if (proto_mask == MLX5_PTYS_EN)
MLX5_SET(ptys_reg, in, eth_proto_admin, proto_admin);
@@ -220,7 +229,19 @@ int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PTYS, 0, 1);
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_proto);
+EXPORT_SYMBOL_GPL(mlx5_set_port_ptys);
+
+/* This function should be used after setting a port register only */
+void mlx5_toggle_port_link(struct mlx5_core_dev *dev)
+{
+ enum mlx5_port_status ps;
+
+ mlx5_query_port_admin_status(dev, &ps);
+ mlx5_set_port_admin_status(dev, MLX5_PORT_DOWN);
+ if (ps == MLX5_PORT_UP)
+ mlx5_set_port_admin_status(dev, MLX5_PORT_UP);
+}
+EXPORT_SYMBOL_GPL(mlx5_toggle_port_link);
int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status status)
@@ -518,6 +539,25 @@ int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx)
}
EXPORT_SYMBOL_GPL(mlx5_query_port_pfc);
+void mlx5_query_port_autoneg(struct mlx5_core_dev *dev, int proto_mask,
+ u8 *an_status,
+ u8 *an_disable_cap, u8 *an_disable_admin)
+{
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+
+ *an_status = 0;
+ *an_disable_cap = 0;
+ *an_disable_admin = 0;
+
+ if (mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1))
+ return;
+
+ *an_status = MLX5_GET(ptys_reg, out, an_status);
+ *an_disable_cap = MLX5_GET(ptys_reg, out, an_disable_cap);
+ *an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin);
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_autoneg);
+
int mlx5_max_tc(struct mlx5_core_dev *mdev)
{
u8 num_tc = MLX5_CAP_GEN(mdev, max_tc) ? : 8;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
new file mode 100644
index 000000000..c07c28bd3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
@@ -0,0 +1,209 @@
+/*
+ * Copyright (c) 2013-2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/cmd.h>
+#include "mlx5_core.h"
+
+/* Finds an entry where we can register the given rate
+ * If the rate already exists, return the entry where it is registered,
+ * otherwise return the first available entry.
+ * If the table is full, return NULL
+ */
+static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
+ u32 rate)
+{
+ struct mlx5_rl_entry *ret_entry = NULL;
+ bool empty_found = false;
+ int i;
+
+ for (i = 0; i < table->max_size; i++) {
+ if (table->rl_entry[i].rate == rate)
+ return &table->rl_entry[i];
+ if (!empty_found && !table->rl_entry[i].rate) {
+ empty_found = true;
+ ret_entry = &table->rl_entry[i];
+ }
+ }
+
+ return ret_entry;
+}
+
+static int mlx5_set_rate_limit_cmd(struct mlx5_core_dev *dev,
+ u32 rate, u16 index)
+{
+ u32 in[MLX5_ST_SZ_DW(set_rate_limit_in)];
+ u32 out[MLX5_ST_SZ_DW(set_rate_limit_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(set_rate_limit_in, in, opcode,
+ MLX5_CMD_OP_SET_RATE_LIMIT);
+ MLX5_SET(set_rate_limit_in, in, rate_limit_index, index);
+ MLX5_SET(set_rate_limit_in, in, rate_limit, rate);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
+ out, sizeof(out));
+}
+
+bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate)
+{
+ struct mlx5_rl_table *table = &dev->priv.rl_table;
+
+ return (rate <= table->max_rate && rate >= table->min_rate);
+}
+EXPORT_SYMBOL(mlx5_rl_is_in_range);
+
+int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index)
+{
+ struct mlx5_rl_table *table = &dev->priv.rl_table;
+ struct mlx5_rl_entry *entry;
+ int err = 0;
+
+ mutex_lock(&table->rl_lock);
+
+ if (!rate || !mlx5_rl_is_in_range(dev, rate)) {
+ mlx5_core_err(dev, "Invalid rate: %u, should be %u to %u\n",
+ rate, table->min_rate, table->max_rate);
+ err = -EINVAL;
+ goto out;
+ }
+
+ entry = find_rl_entry(table, rate);
+ if (!entry) {
+ mlx5_core_err(dev, "Max number of %u rates reached\n",
+ table->max_size);
+ err = -ENOSPC;
+ goto out;
+ }
+ if (entry->refcount) {
+ /* rate already configured */
+ entry->refcount++;
+ } else {
+ /* new rate limit */
+ err = mlx5_set_rate_limit_cmd(dev, rate, entry->index);
+ if (err) {
+ mlx5_core_err(dev, "Failed configuring rate: %u (%d)\n",
+ rate, err);
+ goto out;
+ }
+ entry->rate = rate;
+ entry->refcount = 1;
+ }
+ *index = entry->index;
+
+out:
+ mutex_unlock(&table->rl_lock);
+ return err;
+}
+EXPORT_SYMBOL(mlx5_rl_add_rate);
+
+void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate)
+{
+ struct mlx5_rl_table *table = &dev->priv.rl_table;
+ struct mlx5_rl_entry *entry = NULL;
+
+ /* 0 is a reserved value for unlimited rate */
+ if (rate == 0)
+ return;
+
+ mutex_lock(&table->rl_lock);
+ entry = find_rl_entry(table, rate);
+ if (!entry || !entry->refcount) {
+ mlx5_core_warn(dev, "Rate %u is not configured\n", rate);
+ goto out;
+ }
+
+ entry->refcount--;
+ if (!entry->refcount) {
+ /* need to remove rate */
+ mlx5_set_rate_limit_cmd(dev, 0, entry->index);
+ entry->rate = 0;
+ }
+
+out:
+ mutex_unlock(&table->rl_lock);
+}
+EXPORT_SYMBOL(mlx5_rl_remove_rate);
+
+int mlx5_init_rl_table(struct mlx5_core_dev *dev)
+{
+ struct mlx5_rl_table *table = &dev->priv.rl_table;
+ int i;
+
+ mutex_init(&table->rl_lock);
+ if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, packet_pacing)) {
+ table->max_size = 0;
+ return 0;
+ }
+
+ /* First entry is reserved for unlimited rate */
+ table->max_size = MLX5_CAP_QOS(dev, packet_pacing_rate_table_size) - 1;
+ table->max_rate = MLX5_CAP_QOS(dev, packet_pacing_max_rate);
+ table->min_rate = MLX5_CAP_QOS(dev, packet_pacing_min_rate);
+
+ table->rl_entry = kcalloc(table->max_size, sizeof(struct mlx5_rl_entry),
+ GFP_KERNEL);
+ if (!table->rl_entry)
+ return -ENOMEM;
+
+ /* The index represents the index in HW rate limit table
+ * Index 0 is reserved for unlimited rate
+ */
+ for (i = 0; i < table->max_size; i++)
+ table->rl_entry[i].index = i + 1;
+
+ /* Index 0 is reserved */
+ mlx5_core_info(dev, "Rate limit: %u rates are supported, range: %uMbps to %uMbps\n",
+ table->max_size,
+ table->min_rate >> 10,
+ table->max_rate >> 10);
+
+ return 0;
+}
+
+void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev)
+{
+ struct mlx5_rl_table *table = &dev->priv.rl_table;
+ int i;
+
+ /* Clear all configured rates */
+ for (i = 0; i < table->max_size; i++)
+ if (table->rl_entry[i].rate)
+ mlx5_set_rate_limit_cmd(dev, 0,
+ table->rl_entry[i].index);
+
+ kfree(dev->priv.rl_table.rl_entry);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index d6a3f412b..b380a6bc1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -167,7 +167,7 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
mlx5_core_init_vfs(dev, num_vfs);
#ifdef CONFIG_MLX5_CORE_EN
- mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs);
+ mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY);
#endif
return num_vfs;
@@ -209,7 +209,8 @@ int mlx5_sriov_init(struct mlx5_core_dev *dev)
mlx5_core_init_vfs(dev, cur_vfs);
#ifdef CONFIG_MLX5_CORE_EN
if (cur_vfs)
- mlx5_eswitch_enable_sriov(dev->priv.eswitch, cur_vfs);
+ mlx5_eswitch_enable_sriov(dev->priv.eswitch, cur_vfs,
+ SRIOV_LEGACY);
#endif
enable_vfs(dev, cur_vfs);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
index 04bc52260..c07f4d01b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
@@ -63,12 +63,12 @@ void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type)
complete(&srq->free);
}
-static int get_pas_size(void *srqc)
+static int get_pas_size(struct mlx5_srq_attr *in)
{
- u32 log_page_size = MLX5_GET(srqc, srqc, log_page_size) + 12;
- u32 log_srq_size = MLX5_GET(srqc, srqc, log_srq_size);
- u32 log_rq_stride = MLX5_GET(srqc, srqc, log_rq_stride);
- u32 page_offset = MLX5_GET(srqc, srqc, page_offset);
+ u32 log_page_size = in->log_page_size + 12;
+ u32 log_srq_size = in->log_size;
+ u32 log_rq_stride = in->wqe_shift;
+ u32 page_offset = in->page_offset;
u32 po_quanta = 1 << (log_page_size - 6);
u32 rq_sz = 1 << (log_srq_size + 4 + log_rq_stride);
u32 page_size = 1 << log_page_size;
@@ -78,57 +78,58 @@ static int get_pas_size(void *srqc)
return rq_num_pas * sizeof(u64);
}
-static void rmpc_srqc_reformat(void *srqc, void *rmpc, bool srqc_to_rmpc)
+static void set_wq(void *wq, struct mlx5_srq_attr *in)
{
- void *wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
-
- if (srqc_to_rmpc) {
- switch (MLX5_GET(srqc, srqc, state)) {
- case MLX5_SRQC_STATE_GOOD:
- MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
- break;
- case MLX5_SRQC_STATE_ERROR:
- MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_ERR);
- break;
- default:
- pr_warn("%s: %d: Unknown srq state = 0x%x\n", __func__,
- __LINE__, MLX5_GET(srqc, srqc, state));
- MLX5_SET(rmpc, rmpc, state, MLX5_GET(srqc, srqc, state));
- }
-
- MLX5_SET(wq, wq, wq_signature, MLX5_GET(srqc, srqc, wq_signature));
- MLX5_SET(wq, wq, log_wq_pg_sz, MLX5_GET(srqc, srqc, log_page_size));
- MLX5_SET(wq, wq, log_wq_stride, MLX5_GET(srqc, srqc, log_rq_stride) + 4);
- MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(srqc, srqc, log_srq_size));
- MLX5_SET(wq, wq, page_offset, MLX5_GET(srqc, srqc, page_offset));
- MLX5_SET(wq, wq, lwm, MLX5_GET(srqc, srqc, lwm));
- MLX5_SET(wq, wq, pd, MLX5_GET(srqc, srqc, pd));
- MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(srqc, srqc, dbr_addr));
- } else {
- switch (MLX5_GET(rmpc, rmpc, state)) {
- case MLX5_RMPC_STATE_RDY:
- MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_GOOD);
- break;
- case MLX5_RMPC_STATE_ERR:
- MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_ERROR);
- break;
- default:
- pr_warn("%s: %d: Unknown rmp state = 0x%x\n",
- __func__, __LINE__,
- MLX5_GET(rmpc, rmpc, state));
- MLX5_SET(srqc, srqc, state,
- MLX5_GET(rmpc, rmpc, state));
- }
-
- MLX5_SET(srqc, srqc, wq_signature, MLX5_GET(wq, wq, wq_signature));
- MLX5_SET(srqc, srqc, log_page_size, MLX5_GET(wq, wq, log_wq_pg_sz));
- MLX5_SET(srqc, srqc, log_rq_stride, MLX5_GET(wq, wq, log_wq_stride) - 4);
- MLX5_SET(srqc, srqc, log_srq_size, MLX5_GET(wq, wq, log_wq_sz));
- MLX5_SET(srqc, srqc, page_offset, MLX5_GET(wq, wq, page_offset));
- MLX5_SET(srqc, srqc, lwm, MLX5_GET(wq, wq, lwm));
- MLX5_SET(srqc, srqc, pd, MLX5_GET(wq, wq, pd));
- MLX5_SET64(srqc, srqc, dbr_addr, MLX5_GET64(wq, wq, dbr_addr));
- }
+ MLX5_SET(wq, wq, wq_signature, !!(in->flags
+ & MLX5_SRQ_FLAG_WQ_SIG));
+ MLX5_SET(wq, wq, log_wq_pg_sz, in->log_page_size);
+ MLX5_SET(wq, wq, log_wq_stride, in->wqe_shift + 4);
+ MLX5_SET(wq, wq, log_wq_sz, in->log_size);
+ MLX5_SET(wq, wq, page_offset, in->page_offset);
+ MLX5_SET(wq, wq, lwm, in->lwm);
+ MLX5_SET(wq, wq, pd, in->pd);
+ MLX5_SET64(wq, wq, dbr_addr, in->db_record);
+}
+
+static void set_srqc(void *srqc, struct mlx5_srq_attr *in)
+{
+ MLX5_SET(srqc, srqc, wq_signature, !!(in->flags
+ & MLX5_SRQ_FLAG_WQ_SIG));
+ MLX5_SET(srqc, srqc, log_page_size, in->log_page_size);
+ MLX5_SET(srqc, srqc, log_rq_stride, in->wqe_shift);
+ MLX5_SET(srqc, srqc, log_srq_size, in->log_size);
+ MLX5_SET(srqc, srqc, page_offset, in->page_offset);
+ MLX5_SET(srqc, srqc, lwm, in->lwm);
+ MLX5_SET(srqc, srqc, pd, in->pd);
+ MLX5_SET64(srqc, srqc, dbr_addr, in->db_record);
+ MLX5_SET(srqc, srqc, xrcd, in->xrcd);
+ MLX5_SET(srqc, srqc, cqn, in->cqn);
+}
+
+static void get_wq(void *wq, struct mlx5_srq_attr *in)
+{
+ if (MLX5_GET(wq, wq, wq_signature))
+ in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
+ in->log_page_size = MLX5_GET(wq, wq, log_wq_pg_sz);
+ in->wqe_shift = MLX5_GET(wq, wq, log_wq_stride) - 4;
+ in->log_size = MLX5_GET(wq, wq, log_wq_sz);
+ in->page_offset = MLX5_GET(wq, wq, page_offset);
+ in->lwm = MLX5_GET(wq, wq, lwm);
+ in->pd = MLX5_GET(wq, wq, pd);
+ in->db_record = MLX5_GET64(wq, wq, dbr_addr);
+}
+
+static void get_srqc(void *srqc, struct mlx5_srq_attr *in)
+{
+ if (MLX5_GET(srqc, srqc, wq_signature))
+ in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
+ in->log_page_size = MLX5_GET(srqc, srqc, log_page_size);
+ in->wqe_shift = MLX5_GET(srqc, srqc, log_rq_stride);
+ in->log_size = MLX5_GET(srqc, srqc, log_srq_size);
+ in->page_offset = MLX5_GET(srqc, srqc, page_offset);
+ in->lwm = MLX5_GET(srqc, srqc, lwm);
+ in->pd = MLX5_GET(srqc, srqc, pd);
+ in->db_record = MLX5_GET64(srqc, srqc, dbr_addr);
}
struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
@@ -149,19 +150,36 @@ struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
EXPORT_SYMBOL(mlx5_core_get_srq);
static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_create_srq_mbox_in *in, int inlen)
+ struct mlx5_srq_attr *in)
{
- struct mlx5_create_srq_mbox_out out;
+ u32 create_out[MLX5_ST_SZ_DW(create_srq_out)] = {0};
+ void *create_in;
+ void *srqc;
+ void *pas;
+ int pas_size;
+ int inlen;
int err;
- memset(&out, 0, sizeof(out));
+ pas_size = get_pas_size(in);
+ inlen = MLX5_ST_SZ_BYTES(create_srq_in) + pas_size;
+ create_in = mlx5_vzalloc(inlen);
+ if (!create_in)
+ return -ENOMEM;
+
+ srqc = MLX5_ADDR_OF(create_srq_in, create_in, srq_context_entry);
+ pas = MLX5_ADDR_OF(create_srq_in, create_in, pas);
- in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_SRQ);
+ set_srqc(srqc, in);
+ memcpy(pas, in->pas, pas_size);
- err = mlx5_cmd_exec_check_status(dev, (u32 *)in, inlen, (u32 *)(&out),
- sizeof(out));
+ MLX5_SET(create_srq_in, create_in, opcode,
+ MLX5_CMD_OP_CREATE_SRQ);
- srq->srqn = be32_to_cpu(out.srqn) & 0xffffff;
+ err = mlx5_cmd_exec_check_status(dev, create_in, inlen, create_out,
+ sizeof(create_out));
+ kvfree(create_in);
+ if (!err)
+ srq->srqn = MLX5_GET(create_srq_out, create_out, srqn);
return err;
}
@@ -169,67 +187,75 @@ static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
static int destroy_srq_cmd(struct mlx5_core_dev *dev,
struct mlx5_core_srq *srq)
{
- struct mlx5_destroy_srq_mbox_in in;
- struct mlx5_destroy_srq_mbox_out out;
+ u32 srq_in[MLX5_ST_SZ_DW(destroy_srq_in)] = {0};
+ u32 srq_out[MLX5_ST_SZ_DW(destroy_srq_out)] = {0};
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ);
- in.srqn = cpu_to_be32(srq->srqn);
+ MLX5_SET(destroy_srq_in, srq_in, opcode,
+ MLX5_CMD_OP_DESTROY_SRQ);
+ MLX5_SET(destroy_srq_in, srq_in, srqn, srq->srqn);
- return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in),
- (u32 *)(&out), sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, srq_in, sizeof(srq_in),
+ srq_out, sizeof(srq_out));
}
static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
u16 lwm, int is_srq)
{
- struct mlx5_arm_srq_mbox_in in;
- struct mlx5_arm_srq_mbox_out out;
-
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
+ /* arm_srq structs missing using identical xrc ones */
+ u32 srq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0};
+ u32 srq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ);
- in.hdr.opmod = cpu_to_be16(!!is_srq);
- in.srqn = cpu_to_be32(srq->srqn);
- in.lwm = cpu_to_be16(lwm);
+ MLX5_SET(arm_xrc_srq_in, srq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
+ MLX5_SET(arm_xrc_srq_in, srq_in, xrc_srqn, srq->srqn);
+ MLX5_SET(arm_xrc_srq_in, srq_in, lwm, lwm);
- return mlx5_cmd_exec_check_status(dev, (u32 *)(&in),
- sizeof(in), (u32 *)(&out),
- sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, srq_in, sizeof(srq_in),
+ srq_out, sizeof(srq_out));
}
static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_query_srq_mbox_out *out)
+ struct mlx5_srq_attr *out)
{
- struct mlx5_query_srq_mbox_in in;
+ u32 srq_in[MLX5_ST_SZ_DW(query_srq_in)] = {0};
+ u32 *srq_out;
+ void *srqc;
+ int err;
- memset(&in, 0, sizeof(in));
+ srq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_srq_out));
+ if (!srq_out)
+ return -ENOMEM;
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ);
- in.srqn = cpu_to_be32(srq->srqn);
+ MLX5_SET(query_srq_in, srq_in, opcode,
+ MLX5_CMD_OP_QUERY_SRQ);
+ MLX5_SET(query_srq_in, srq_in, srqn, srq->srqn);
+ err = mlx5_cmd_exec_check_status(dev, srq_in, sizeof(srq_in),
+ srq_out,
+ MLX5_ST_SZ_BYTES(query_srq_out));
+ if (err)
+ goto out;
- return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in),
- (u32 *)out, sizeof(*out));
+ srqc = MLX5_ADDR_OF(query_srq_out, srq_out, srq_context_entry);
+ get_srqc(srqc, out);
+ if (MLX5_GET(srqc, srqc, state) != MLX5_SRQC_STATE_GOOD)
+ out->flags |= MLX5_SRQ_FLAG_ERR;
+out:
+ kvfree(srq_out);
+ return err;
}
static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
struct mlx5_core_srq *srq,
- struct mlx5_create_srq_mbox_in *in,
- int srq_inlen)
+ struct mlx5_srq_attr *in)
{
u32 create_out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
void *create_in;
- void *srqc;
void *xrc_srqc;
void *pas;
int pas_size;
int inlen;
int err;
- srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry);
- pas_size = get_pas_size(srqc);
+ pas_size = get_pas_size(in);
inlen = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
create_in = mlx5_vzalloc(inlen);
if (!create_in)
@@ -239,7 +265,8 @@ static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
xrc_srq_context_entry);
pas = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
- memcpy(xrc_srqc, srqc, MLX5_ST_SZ_BYTES(srqc));
+ set_srqc(xrc_srqc, in);
+ MLX5_SET(xrc_srqc, xrc_srqc, user_index, in->user_index);
memcpy(pas, in->pas, pas_size);
MLX5_SET(create_xrc_srq_in, create_in, opcode,
MLX5_CMD_OP_CREATE_XRC_SRQ);
@@ -293,11 +320,10 @@ static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev,
static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
struct mlx5_core_srq *srq,
- struct mlx5_query_srq_mbox_out *out)
+ struct mlx5_srq_attr *out)
{
u32 xrcsrq_in[MLX5_ST_SZ_DW(query_xrc_srq_in)];
u32 *xrcsrq_out;
- void *srqc;
void *xrc_srqc;
int err;
@@ -317,8 +343,9 @@ static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out,
xrc_srq_context_entry);
- srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry);
- memcpy(srqc, xrc_srqc, MLX5_ST_SZ_BYTES(srqc));
+ get_srqc(xrc_srqc, out);
+ if (MLX5_GET(xrc_srqc, xrc_srqc, state) != MLX5_XRC_SRQC_STATE_GOOD)
+ out->flags |= MLX5_SRQ_FLAG_ERR;
out:
kvfree(xrcsrq_out);
@@ -326,26 +353,27 @@ out:
}
static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_create_srq_mbox_in *in, int srq_inlen)
+ struct mlx5_srq_attr *in)
{
void *create_in;
void *rmpc;
- void *srqc;
+ void *wq;
int pas_size;
int inlen;
int err;
- srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry);
- pas_size = get_pas_size(srqc);
+ pas_size = get_pas_size(in);
inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
create_in = mlx5_vzalloc(inlen);
if (!create_in)
return -ENOMEM;
rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx);
+ wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
+ MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
+ set_wq(wq, in);
memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size);
- rmpc_srqc_reformat(srqc, rmpc, true);
err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn);
@@ -390,11 +418,10 @@ static int arm_rmp_cmd(struct mlx5_core_dev *dev,
}
static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_query_srq_mbox_out *out)
+ struct mlx5_srq_attr *out)
{
u32 *rmp_out;
void *rmpc;
- void *srqc;
int err;
rmp_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_rmp_out));
@@ -405,9 +432,10 @@ static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
if (err)
goto out;
- srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry);
rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context);
- rmpc_srqc_reformat(srqc, rmpc, false);
+ get_wq(MLX5_ADDR_OF(rmpc, rmpc, wq), out);
+ if (MLX5_GET(rmpc, rmpc, state) != MLX5_RMPC_STATE_RDY)
+ out->flags |= MLX5_SRQ_FLAG_ERR;
out:
kvfree(rmp_out);
@@ -416,15 +444,14 @@ out:
static int create_srq_split(struct mlx5_core_dev *dev,
struct mlx5_core_srq *srq,
- struct mlx5_create_srq_mbox_in *in,
- int inlen, int is_xrc)
+ struct mlx5_srq_attr *in)
{
if (!dev->issi)
- return create_srq_cmd(dev, srq, in, inlen);
+ return create_srq_cmd(dev, srq, in);
else if (srq->common.res == MLX5_RES_XSRQ)
- return create_xrc_srq_cmd(dev, srq, in, inlen);
+ return create_xrc_srq_cmd(dev, srq, in);
else
- return create_rmp_cmd(dev, srq, in, inlen);
+ return create_rmp_cmd(dev, srq, in);
}
static int destroy_srq_split(struct mlx5_core_dev *dev,
@@ -439,15 +466,17 @@ static int destroy_srq_split(struct mlx5_core_dev *dev,
}
int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_create_srq_mbox_in *in, int inlen,
- int is_xrc)
+ struct mlx5_srq_attr *in)
{
int err;
struct mlx5_srq_table *table = &dev->priv.srq_table;
- srq->common.res = is_xrc ? MLX5_RES_XSRQ : MLX5_RES_SRQ;
+ if (in->type == IB_SRQT_XRC)
+ srq->common.res = MLX5_RES_XSRQ;
+ else
+ srq->common.res = MLX5_RES_SRQ;
- err = create_srq_split(dev, srq, in, inlen, is_xrc);
+ err = create_srq_split(dev, srq, in);
if (err)
return err;
@@ -502,7 +531,7 @@ int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
EXPORT_SYMBOL(mlx5_core_destroy_srq);
int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_query_srq_mbox_out *out)
+ struct mlx5_srq_attr *out)
{
if (!dev->issi)
return query_srq_cmd(dev, srq, out);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index 03a5093ff..28274a6fb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -85,6 +85,7 @@ int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn)
return err;
}
+EXPORT_SYMBOL(mlx5_core_create_rq);
int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen)
{
@@ -110,6 +111,7 @@ void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn)
mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
}
+EXPORT_SYMBOL(mlx5_core_destroy_rq);
int mlx5_core_query_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *out)
{
@@ -430,6 +432,7 @@ int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
return err;
}
+EXPORT_SYMBOL(mlx5_core_create_rqt);
int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
int inlen)
@@ -455,3 +458,4 @@ void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn)
mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
}
+EXPORT_SYMBOL(mlx5_core_destroy_rqt);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 91846dfcb..21365d069 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -135,6 +135,18 @@ static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
return mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
}
+void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
+ u8 *min_inline_mode)
+{
+ u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0};
+
+ mlx5_query_nic_vport_context(mdev, 0, out, sizeof(out));
+
+ *min_inline_mode = MLX5_GET(query_nic_vport_context_out, out,
+ nic_vport_context.min_wqe_inline_mode);
+}
+EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline);
+
int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
u16 vport, u8 *addr)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 9b5ebf84c..d20ae1838 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -7,5 +7,6 @@ obj-$(CONFIG_MLXSW_SWITCHX2) += mlxsw_switchx2.o
mlxsw_switchx2-objs := switchx2.o
obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o
mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
- spectrum_switchdev.o
+ spectrum_switchdev.o spectrum_router.o \
+ spectrum_kvdl.o
mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
index cd63b8263..28271bedd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
@@ -105,6 +105,7 @@ enum mlxsw_cmd_opcode {
MLXSW_CMD_OPCODE_SW2HW_EQ = 0x013,
MLXSW_CMD_OPCODE_HW2SW_EQ = 0x014,
MLXSW_CMD_OPCODE_QUERY_EQ = 0x015,
+ MLXSW_CMD_OPCODE_QUERY_RESOURCES = 0x101,
};
static inline const char *mlxsw_cmd_opcode_str(u16 opcode)
@@ -144,6 +145,8 @@ static inline const char *mlxsw_cmd_opcode_str(u16 opcode)
return "HW2SW_EQ";
case MLXSW_CMD_OPCODE_QUERY_EQ:
return "QUERY_EQ";
+ case MLXSW_CMD_OPCODE_QUERY_RESOURCES:
+ return "QUERY_RESOURCES";
default:
return "*UNKNOWN*";
}
@@ -500,6 +503,35 @@ static inline int mlxsw_cmd_unmap_fa(struct mlxsw_core *mlxsw_core)
return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_UNMAP_FA, 0, 0);
}
+/* QUERY_RESOURCES - Query chip resources
+ * --------------------------------------
+ * OpMod == 0 (N/A) , INMmod is index
+ * ----------------------------------
+ * The QUERY_RESOURCES command retrieves information related to chip resources
+ * by resource ID. Every command returns 32 entries. INmod is being use as base.
+ * for example, index 1 will return entries 32-63. When the tables end and there
+ * are no more sources in the table, will return resource id 0xFFF to indicate
+ * it.
+ */
+static inline int mlxsw_cmd_query_resources(struct mlxsw_core *mlxsw_core,
+ char *out_mbox, int index)
+{
+ return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_RESOURCES,
+ 0, index, false, out_mbox,
+ MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_query_resource_id
+ * The resource id. 0xFFFF indicates table's end.
+ */
+MLXSW_ITEM32_INDEXED(cmd_mbox, query_resource, id, 0x00, 16, 16, 0x8, 0, false);
+
+/* cmd_mbox_query_resource_data
+ * The resource
+ */
+MLXSW_ITEM64_INDEXED(cmd_mbox, query_resource, data,
+ 0x00, 0, 40, 0x8, 0, false);
+
/* CONFIG_PROFILE (Set) - Configure Switch Profile
* ------------------------------
* OpMod == 1 (Set), INMmod == 0 (N/A)
@@ -607,6 +639,24 @@ MLXSW_ITEM32(cmd_mbox, config_profile,
*/
MLXSW_ITEM32(cmd_mbox, config_profile, set_ar_sec, 0x0C, 15, 1);
+/* cmd_mbox_config_set_kvd_linear_size
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_linear_size, 0x0C, 24, 1);
+
+/* cmd_mbox_config_set_kvd_hash_single_size
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_single_size, 0x0C, 25, 1);
+
+/* cmd_mbox_config_set_kvd_hash_double_size
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_double_size, 0x0C, 26, 1);
+
/* cmd_mbox_config_profile_max_vepa_channels
* Maximum number of VEPA channels per port (0 through 16)
* 0 - multi-channel VEPA is disabled
@@ -733,6 +783,31 @@ MLXSW_ITEM32(cmd_mbox, config_profile, adaptive_routing_group_cap, 0x4C, 0, 16);
*/
MLXSW_ITEM32(cmd_mbox, config_profile, arn, 0x50, 31, 1);
+/* cmd_mbox_config_kvd_linear_size
+ * KVD Linear Size
+ * Valid for Spectrum only
+ * Allowed values are 128*N where N=0 or higher
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, kvd_linear_size, 0x54, 0, 24);
+
+/* cmd_mbox_config_kvd_hash_single_size
+ * KVD Hash single-entries size
+ * Valid for Spectrum only
+ * Allowed values are 128*N where N=0 or higher
+ * Must be greater or equal to cap_min_kvd_hash_single_size
+ * Must be smaller or equal to cap_kvd_size - kvd_linear_size
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, kvd_hash_single_size, 0x58, 0, 24);
+
+/* cmd_mbox_config_kvd_hash_double_size
+ * KVD Hash double-entries size (units of single-size entries)
+ * Valid for Spectrum only
+ * Allowed values are 128*N where N=0 or higher
+ * Must be either 0 or greater or equal to cap_min_kvd_hash_double_size
+ * Must be smaller or equal to cap_kvd_size - kvd_linear_size
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, kvd_hash_double_size, 0x5C, 0, 24);
+
/* cmd_mbox_config_profile_swid_config_mask
* Modify Switch Partition Configuration mask. When set, the configu-
* ration value for the Switch Partition are taken from the mailbox.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index b0a0b01bb..068ee65a9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -58,6 +58,7 @@
#include <linux/workqueue.h>
#include <asm/byteorder.h>
#include <net/devlink.h>
+#include <trace/events/devlink.h>
#include "core.h"
#include "item.h"
@@ -110,6 +111,7 @@ struct mlxsw_core {
struct {
u8 *mapping; /* lag_id+port_index to local_port mapping */
} lag;
+ struct mlxsw_resources resources;
struct mlxsw_hwmon *hwmon;
unsigned long driver_priv[0];
/* driver_priv has to be always the last item */
@@ -447,6 +449,10 @@ static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
if (!skb)
return -ENOMEM;
+ trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0,
+ skb->data + mlxsw_core->driver->txhdr_len,
+ skb->len - mlxsw_core->driver->txhdr_len);
+
atomic_set(&trans->active, 1);
err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info);
if (err) {
@@ -529,6 +535,9 @@ static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
struct mlxsw_core *mlxsw_core = priv;
struct mlxsw_reg_trans *trans;
+ trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0,
+ skb->data, skb->len);
+
if (!mlxsw_emad_is_resp(skb))
goto free_skb;
@@ -1102,7 +1111,8 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
}
}
- err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile);
+ err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile,
+ &mlxsw_core->resources);
if (err)
goto err_bus_init;
@@ -1110,14 +1120,14 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (err)
goto err_emad_init;
- err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon);
- if (err)
- goto err_hwmon_init;
-
err = devlink_register(devlink, mlxsw_bus_info->dev);
if (err)
goto err_devlink_register;
+ err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon);
+ if (err)
+ goto err_hwmon_init;
+
err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info);
if (err)
goto err_driver_init;
@@ -1131,9 +1141,9 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
err_debugfs_init:
mlxsw_core->driver->fini(mlxsw_core);
err_driver_init:
+err_hwmon_init:
devlink_unregister(devlink);
err_devlink_register:
-err_hwmon_init:
mlxsw_emad_fini(mlxsw_core);
err_emad_init:
mlxsw_bus->fini(bus_priv);
@@ -1644,6 +1654,12 @@ void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear);
+struct mlxsw_resources *mlxsw_core_resources_get(struct mlxsw_core *mlxsw_core)
+{
+ return &mlxsw_core->resources;
+}
+EXPORT_SYMBOL(mlxsw_core_resources_get);
+
int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core,
struct mlxsw_core_port *mlxsw_core_port, u8 local_port,
struct net_device *dev, bool split, u32 split_group)
@@ -1736,7 +1752,7 @@ static int __init mlxsw_core_module_init(void)
{
int err;
- mlxsw_wq = create_workqueue(mlxsw_core_driver_name);
+ mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_MEM_RECLAIM, 0);
if (!mlxsw_wq)
return -ENOMEM;
mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 436bc49df..d3476ead9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -190,7 +190,8 @@ struct mlxsw_config_profile {
used_max_ib_mc:1,
used_max_pkey:1,
used_ar_sec:1,
- used_adaptive_routing_group_cap:1;
+ used_adaptive_routing_group_cap:1,
+ used_kvd_sizes:1;
u8 max_vepa_channels;
u16 max_lag;
u16 max_port_per_lag;
@@ -211,6 +212,10 @@ struct mlxsw_config_profile {
u8 ar_sec;
u16 adaptive_routing_group_cap;
u8 arn;
+ u32 kvd_linear_size;
+ u32 kvd_hash_single_size;
+ u32 kvd_hash_double_size;
+ u8 resource_query_enable;
struct mlxsw_swid_config swid_config[MLXSW_CONFIG_PROFILE_SWID_COUNT];
};
@@ -262,10 +267,18 @@ struct mlxsw_driver {
const struct mlxsw_config_profile *profile;
};
+struct mlxsw_resources {
+ u8 max_span_valid:1;
+ u8 max_span;
+};
+
+struct mlxsw_resources *mlxsw_core_resources_get(struct mlxsw_core *mlxsw_core);
+
struct mlxsw_bus {
const char *kind;
int (*init)(void *bus_priv, struct mlxsw_core *mlxsw_core,
- const struct mlxsw_config_profile *profile);
+ const struct mlxsw_config_profile *profile,
+ struct mlxsw_resources *resources);
void (*fini)(void *bus_priv);
bool (*skb_transmit_busy)(void *bus_priv,
const struct mlxsw_tx_info *tx_info);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 7f4173c8e..1d1360c17 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1154,6 +1154,61 @@ mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci,
mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask);
}
+#define MLXSW_RESOURCES_TABLE_END_ID 0xffff
+#define MLXSW_MAX_SPAN_ID 0x2420
+#define MLXSW_RESOURCES_QUERY_MAX_QUERIES 100
+#define MLXSW_RESOURCES_PER_QUERY 32
+
+static void mlxsw_pci_resources_query_parse(int id, u64 val,
+ struct mlxsw_resources *resources)
+{
+ switch (id) {
+ case MLXSW_MAX_SPAN_ID:
+ resources->max_span = val;
+ resources->max_span_valid = 1;
+ break;
+ default:
+ break;
+ }
+}
+
+static int mlxsw_pci_resources_query(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ struct mlxsw_resources *resources,
+ u8 query_enabled)
+{
+ int index, i;
+ u64 data;
+ u16 id;
+ int err;
+
+ /* Not all the versions support resources query */
+ if (!query_enabled)
+ return 0;
+
+ mlxsw_cmd_mbox_zero(mbox);
+
+ for (index = 0; index < MLXSW_RESOURCES_QUERY_MAX_QUERIES; index++) {
+ err = mlxsw_cmd_query_resources(mlxsw_pci->core, mbox, index);
+ if (err)
+ return err;
+
+ for (i = 0; i < MLXSW_RESOURCES_PER_QUERY; i++) {
+ id = mlxsw_cmd_mbox_query_resource_id_get(mbox, i);
+ data = mlxsw_cmd_mbox_query_resource_data_get(mbox, i);
+
+ if (id == MLXSW_RESOURCES_TABLE_END_ID)
+ return 0;
+
+ mlxsw_pci_resources_query_parse(id, data, resources);
+ }
+ }
+
+ /* If after MLXSW_RESOURCES_QUERY_MAX_QUERIES we still didn't get
+ * MLXSW_RESOURCES_TABLE_END_ID, something went bad in the FW.
+ */
+ return -EIO;
+}
+
static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
const struct mlxsw_config_profile *profile)
{
@@ -1255,6 +1310,20 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
mbox, profile->adaptive_routing_group_cap);
}
+ if (profile->used_kvd_sizes) {
+ mlxsw_cmd_mbox_config_profile_set_kvd_linear_size_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_kvd_linear_size_set(
+ mbox, profile->kvd_linear_size);
+ mlxsw_cmd_mbox_config_profile_set_kvd_hash_single_size_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_kvd_hash_single_size_set(
+ mbox, profile->kvd_hash_single_size);
+ mlxsw_cmd_mbox_config_profile_set_kvd_hash_double_size_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(
+ mbox, profile->kvd_hash_double_size);
+ }
for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i,
@@ -1390,7 +1459,8 @@ static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci,
}
static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
- const struct mlxsw_config_profile *profile)
+ const struct mlxsw_config_profile *profile,
+ struct mlxsw_resources *resources)
{
struct mlxsw_pci *mlxsw_pci = bus_priv;
struct pci_dev *pdev = mlxsw_pci->pdev;
@@ -1449,6 +1519,11 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
if (err)
goto err_boardinfo;
+ err = mlxsw_pci_resources_query(mlxsw_pci, mbox, resources,
+ profile->resource_query_enable);
+ if (err)
+ goto err_query_resources;
+
err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile);
if (err)
goto err_config_profile;
@@ -1471,6 +1546,7 @@ err_request_eq_irq:
mlxsw_pci_aqs_fini(mlxsw_pci);
err_aqs_init:
err_config_profile:
+err_query_resources:
err_boardinfo:
mlxsw_pci_fw_area_fini(mlxsw_pci);
err_fw_area_init:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h
index f33b997f2..af371a82c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/port.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/port.h
@@ -56,6 +56,7 @@
#define MLXSW_PORT_PHY_BITS_MASK (MLXSW_PORT_MAX_PHY_PORTS - 1)
#define MLXSW_PORT_CPU_PORT 0x0
+#define MLXSW_PORT_ROUTER_PORT (MLXSW_PORT_MAX_PHY_PORTS + 2)
#define MLXSW_PORT_DONT_CARE (MLXSW_PORT_MAX_PORTS)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 57d48da70..1721098ee 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -1,9 +1,10 @@
/*
* drivers/net/ethernet/mellanox/mlxsw/reg.h
* Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015-2016 Ido Schimmel <idosch@mellanox.com>
* Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015-2016 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -386,7 +387,9 @@ enum mlxsw_reg_sfd_rec_action {
/* forward and trap, trap_id is FDB_TRAP */
MLXSW_REG_SFD_REC_ACTION_MIRROR_TO_CPU = 1,
/* trap and do not forward, trap_id is FDB_TRAP */
- MLXSW_REG_SFD_REC_ACTION_TRAP = 3,
+ MLXSW_REG_SFD_REC_ACTION_TRAP = 2,
+ /* forward to IP router */
+ MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER = 3,
MLXSW_REG_SFD_REC_ACTION_DISCARD_ERROR = 15,
};
@@ -2500,6 +2503,7 @@ MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14, 2);
enum mlxsw_reg_ppcnt_grp {
MLXSW_REG_PPCNT_IEEE_8023_CNT = 0x0,
MLXSW_REG_PPCNT_PRIO_CNT = 0x10,
+ MLXSW_REG_PPCNT_TC_CNT = 0x11,
};
/* reg_ppcnt_grp
@@ -2700,6 +2704,23 @@ MLXSW_ITEM64(reg, ppcnt, tx_pause_duration, 0x08 + 0x68, 0, 64);
*/
MLXSW_ITEM64(reg, ppcnt, tx_pause_transition, 0x08 + 0x70, 0, 64);
+/* Ethernet Per Traffic Group Counters */
+
+/* reg_ppcnt_tc_transmit_queue
+ * Contains the transmit queue depth in cells of traffic class
+ * selected by prio_tc and the port selected by local_port.
+ * The field cannot be cleared.
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, tc_transmit_queue, 0x08 + 0x00, 0, 64);
+
+/* reg_ppcnt_tc_no_buffer_discard_uc
+ * The number of unicast packets dropped due to lack of shared
+ * buffer resources.
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, tc_no_buffer_discard_uc, 0x08 + 0x08, 0, 64);
+
static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port,
enum mlxsw_reg_ppcnt_grp grp,
u8 prio_tc)
@@ -3201,6 +3222,1194 @@ static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action, u16 trap_id)
mlxsw_reg_hpkt_ctrl_set(payload, MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT);
}
+/* RGCR - Router General Configuration Register
+ * --------------------------------------------
+ * The register is used for setting up the router configuration.
+ */
+#define MLXSW_REG_RGCR_ID 0x8001
+#define MLXSW_REG_RGCR_LEN 0x28
+
+static const struct mlxsw_reg_info mlxsw_reg_rgcr = {
+ .id = MLXSW_REG_RGCR_ID,
+ .len = MLXSW_REG_RGCR_LEN,
+};
+
+/* reg_rgcr_ipv4_en
+ * IPv4 router enable.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rgcr, ipv4_en, 0x00, 31, 1);
+
+/* reg_rgcr_ipv6_en
+ * IPv6 router enable.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rgcr, ipv6_en, 0x00, 30, 1);
+
+/* reg_rgcr_max_router_interfaces
+ * Defines the maximum number of active router interfaces for all virtual
+ * routers.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rgcr, max_router_interfaces, 0x10, 0, 16);
+
+/* reg_rgcr_usp
+ * Update switch priority and packet color.
+ * 0 - Preserve the value of Switch Priority and packet color.
+ * 1 - Recalculate the value of Switch Priority and packet color.
+ * Access: RW
+ *
+ * Note: Not supported by SwitchX and SwitchX-2.
+ */
+MLXSW_ITEM32(reg, rgcr, usp, 0x18, 20, 1);
+
+/* reg_rgcr_pcp_rw
+ * Indicates how to handle the pcp_rewrite_en value:
+ * 0 - Preserve the value of pcp_rewrite_en.
+ * 2 - Disable PCP rewrite.
+ * 3 - Enable PCP rewrite.
+ * Access: RW
+ *
+ * Note: Not supported by SwitchX and SwitchX-2.
+ */
+MLXSW_ITEM32(reg, rgcr, pcp_rw, 0x18, 16, 2);
+
+/* reg_rgcr_activity_dis
+ * Activity disable:
+ * 0 - Activity will be set when an entry is hit (default).
+ * 1 - Activity will not be set when an entry is hit.
+ *
+ * Bit 0 - Disable activity bit in Router Algorithmic LPM Unicast Entry
+ * (RALUE).
+ * Bit 1 - Disable activity bit in Router Algorithmic LPM Unicast Host
+ * Entry (RAUHT).
+ * Bits 2:7 are reserved.
+ * Access: RW
+ *
+ * Note: Not supported by SwitchX, SwitchX-2 and Switch-IB.
+ */
+MLXSW_ITEM32(reg, rgcr, activity_dis, 0x20, 0, 8);
+
+static inline void mlxsw_reg_rgcr_pack(char *payload, bool ipv4_en)
+{
+ MLXSW_REG_ZERO(rgcr, payload);
+ mlxsw_reg_rgcr_ipv4_en_set(payload, ipv4_en);
+}
+
+/* RITR - Router Interface Table Register
+ * --------------------------------------
+ * The register is used to configure the router interface table.
+ */
+#define MLXSW_REG_RITR_ID 0x8002
+#define MLXSW_REG_RITR_LEN 0x40
+
+static const struct mlxsw_reg_info mlxsw_reg_ritr = {
+ .id = MLXSW_REG_RITR_ID,
+ .len = MLXSW_REG_RITR_LEN,
+};
+
+/* reg_ritr_enable
+ * Enables routing on the router interface.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, enable, 0x00, 31, 1);
+
+/* reg_ritr_ipv4
+ * IPv4 routing enable. Enables routing of IPv4 traffic on the router
+ * interface.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, ipv4, 0x00, 29, 1);
+
+/* reg_ritr_ipv6
+ * IPv6 routing enable. Enables routing of IPv6 traffic on the router
+ * interface.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, ipv6, 0x00, 28, 1);
+
+enum mlxsw_reg_ritr_if_type {
+ MLXSW_REG_RITR_VLAN_IF,
+ MLXSW_REG_RITR_FID_IF,
+ MLXSW_REG_RITR_SP_IF,
+};
+
+/* reg_ritr_type
+ * Router interface type.
+ * 0 - VLAN interface.
+ * 1 - FID interface.
+ * 2 - Sub-port interface.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, type, 0x00, 23, 3);
+
+enum {
+ MLXSW_REG_RITR_RIF_CREATE,
+ MLXSW_REG_RITR_RIF_DEL,
+};
+
+/* reg_ritr_op
+ * Opcode:
+ * 0 - Create or edit RIF.
+ * 1 - Delete RIF.
+ * Reserved for SwitchX-2. For Spectrum, editing of interface properties
+ * is not supported. An interface must be deleted and re-created in order
+ * to update properties.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, ritr, op, 0x00, 20, 2);
+
+/* reg_ritr_rif
+ * Router interface index. A pointer to the Router Interface Table.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ritr, rif, 0x00, 0, 16);
+
+/* reg_ritr_ipv4_fe
+ * IPv4 Forwarding Enable.
+ * Enables routing of IPv4 traffic on the router interface. When disabled,
+ * forwarding is blocked but local traffic (traps and IP2ME) will be enabled.
+ * Not supported in SwitchX-2.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, ipv4_fe, 0x04, 29, 1);
+
+/* reg_ritr_ipv6_fe
+ * IPv6 Forwarding Enable.
+ * Enables routing of IPv6 traffic on the router interface. When disabled,
+ * forwarding is blocked but local traffic (traps and IP2ME) will be enabled.
+ * Not supported in SwitchX-2.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, ipv6_fe, 0x04, 28, 1);
+
+/* reg_ritr_lb_en
+ * Loop-back filter enable for unicast packets.
+ * If the flag is set then loop-back filter for unicast packets is
+ * implemented on the RIF. Multicast packets are always subject to
+ * loop-back filtering.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, lb_en, 0x04, 24, 1);
+
+/* reg_ritr_virtual_router
+ * Virtual router ID associated with the router interface.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, virtual_router, 0x04, 0, 16);
+
+/* reg_ritr_mtu
+ * Router interface MTU.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, mtu, 0x34, 0, 16);
+
+/* reg_ritr_if_swid
+ * Switch partition ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, if_swid, 0x08, 24, 8);
+
+/* reg_ritr_if_mac
+ * Router interface MAC address.
+ * In Spectrum, all MAC addresses must have the same 38 MSBits.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, ritr, if_mac, 0x12, 6);
+
+/* VLAN Interface */
+
+/* reg_ritr_vlan_if_vid
+ * VLAN ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, vlan_if_vid, 0x08, 0, 12);
+
+/* FID Interface */
+
+/* reg_ritr_fid_if_fid
+ * Filtering ID. Used to connect a bridge to the router. Only FIDs from
+ * the vFID range are supported.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, fid_if_fid, 0x08, 0, 16);
+
+static inline void mlxsw_reg_ritr_fid_set(char *payload,
+ enum mlxsw_reg_ritr_if_type rif_type,
+ u16 fid)
+{
+ if (rif_type == MLXSW_REG_RITR_FID_IF)
+ mlxsw_reg_ritr_fid_if_fid_set(payload, fid);
+ else
+ mlxsw_reg_ritr_vlan_if_vid_set(payload, fid);
+}
+
+/* Sub-port Interface */
+
+/* reg_ritr_sp_if_lag
+ * LAG indication. When this bit is set the system_port field holds the
+ * LAG identifier.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, sp_if_lag, 0x08, 24, 1);
+
+/* reg_ritr_sp_system_port
+ * Port unique indentifier. When lag bit is set, this field holds the
+ * lag_id in bits 0:9.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, sp_if_system_port, 0x08, 0, 16);
+
+/* reg_ritr_sp_if_vid
+ * VLAN ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, sp_if_vid, 0x18, 0, 12);
+
+static inline void mlxsw_reg_ritr_rif_pack(char *payload, u16 rif)
+{
+ MLXSW_REG_ZERO(ritr, payload);
+ mlxsw_reg_ritr_rif_set(payload, rif);
+}
+
+static inline void mlxsw_reg_ritr_sp_if_pack(char *payload, bool lag,
+ u16 system_port, u16 vid)
+{
+ mlxsw_reg_ritr_sp_if_lag_set(payload, lag);
+ mlxsw_reg_ritr_sp_if_system_port_set(payload, system_port);
+ mlxsw_reg_ritr_sp_if_vid_set(payload, vid);
+}
+
+static inline void mlxsw_reg_ritr_pack(char *payload, bool enable,
+ enum mlxsw_reg_ritr_if_type type,
+ u16 rif, u16 mtu, const char *mac)
+{
+ bool op = enable ? MLXSW_REG_RITR_RIF_CREATE : MLXSW_REG_RITR_RIF_DEL;
+
+ MLXSW_REG_ZERO(ritr, payload);
+ mlxsw_reg_ritr_enable_set(payload, enable);
+ mlxsw_reg_ritr_ipv4_set(payload, 1);
+ mlxsw_reg_ritr_type_set(payload, type);
+ mlxsw_reg_ritr_op_set(payload, op);
+ mlxsw_reg_ritr_rif_set(payload, rif);
+ mlxsw_reg_ritr_ipv4_fe_set(payload, 1);
+ mlxsw_reg_ritr_lb_en_set(payload, 1);
+ mlxsw_reg_ritr_mtu_set(payload, mtu);
+ mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac);
+}
+
+/* RATR - Router Adjacency Table Register
+ * --------------------------------------
+ * The RATR register is used to configure the Router Adjacency (next-hop)
+ * Table.
+ */
+#define MLXSW_REG_RATR_ID 0x8008
+#define MLXSW_REG_RATR_LEN 0x2C
+
+static const struct mlxsw_reg_info mlxsw_reg_ratr = {
+ .id = MLXSW_REG_RATR_ID,
+ .len = MLXSW_REG_RATR_LEN,
+};
+
+enum mlxsw_reg_ratr_op {
+ /* Read */
+ MLXSW_REG_RATR_OP_QUERY_READ = 0,
+ /* Read and clear activity */
+ MLXSW_REG_RATR_OP_QUERY_READ_CLEAR = 2,
+ /* Write Adjacency entry */
+ MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY = 1,
+ /* Write Adjacency entry only if the activity is cleared.
+ * The write may not succeed if the activity is set. There is not
+ * direct feedback if the write has succeeded or not, however
+ * the get will reveal the actual entry (SW can compare the get
+ * response to the set command).
+ */
+ MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY = 3,
+};
+
+/* reg_ratr_op
+ * Note that Write operation may also be used for updating
+ * counter_set_type and counter_index. In this case all other
+ * fields must not be updated.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, ratr, op, 0x00, 28, 4);
+
+/* reg_ratr_v
+ * Valid bit. Indicates if the adjacency entry is valid.
+ * Note: the device may need some time before reusing an invalidated
+ * entry. During this time the entry can not be reused. It is
+ * recommended to use another entry before reusing an invalidated
+ * entry (e.g. software can put it at the end of the list for
+ * reusing). Trying to access an invalidated entry not yet cleared
+ * by the device results with failure indicating "Try Again" status.
+ * When valid is '0' then egress_router_interface,trap_action,
+ * adjacency_parameters and counters are reserved
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ratr, v, 0x00, 24, 1);
+
+/* reg_ratr_a
+ * Activity. Set for new entries. Set if a packet lookup has hit on
+ * the specific entry. To clear the a bit, use "clear activity".
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ratr, a, 0x00, 16, 1);
+
+/* reg_ratr_adjacency_index_low
+ * Bits 15:0 of index into the adjacency table.
+ * For SwitchX and SwitchX-2, the adjacency table is linear and
+ * used for adjacency entries only.
+ * For Spectrum, the index is to the KVD linear.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ratr, adjacency_index_low, 0x04, 0, 16);
+
+/* reg_ratr_egress_router_interface
+ * Range is 0 .. cap_max_router_interfaces - 1
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ratr, egress_router_interface, 0x08, 0, 16);
+
+enum mlxsw_reg_ratr_trap_action {
+ MLXSW_REG_RATR_TRAP_ACTION_NOP,
+ MLXSW_REG_RATR_TRAP_ACTION_TRAP,
+ MLXSW_REG_RATR_TRAP_ACTION_MIRROR_TO_CPU,
+ MLXSW_REG_RATR_TRAP_ACTION_MIRROR,
+ MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS,
+};
+
+/* reg_ratr_trap_action
+ * see mlxsw_reg_ratr_trap_action
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ratr, trap_action, 0x0C, 28, 4);
+
+enum mlxsw_reg_ratr_trap_id {
+ MLXSW_REG_RATR_TRAP_ID_RTR_EGRESS0 = 0,
+ MLXSW_REG_RATR_TRAP_ID_RTR_EGRESS1 = 1,
+};
+
+/* reg_ratr_adjacency_index_high
+ * Bits 23:16 of the adjacency_index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ratr, adjacency_index_high, 0x0C, 16, 8);
+
+/* reg_ratr_trap_id
+ * Trap ID to be reported to CPU.
+ * Trap-ID is RTR_EGRESS0 or RTR_EGRESS1.
+ * For trap_action of NOP, MIRROR and DISCARD_ERROR
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ratr, trap_id, 0x0C, 0, 8);
+
+/* reg_ratr_eth_destination_mac
+ * MAC address of the destination next-hop.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, ratr, eth_destination_mac, 0x12, 6);
+
+static inline void
+mlxsw_reg_ratr_pack(char *payload,
+ enum mlxsw_reg_ratr_op op, bool valid,
+ u32 adjacency_index, u16 egress_rif)
+{
+ MLXSW_REG_ZERO(ratr, payload);
+ mlxsw_reg_ratr_op_set(payload, op);
+ mlxsw_reg_ratr_v_set(payload, valid);
+ mlxsw_reg_ratr_adjacency_index_low_set(payload, adjacency_index);
+ mlxsw_reg_ratr_adjacency_index_high_set(payload, adjacency_index >> 16);
+ mlxsw_reg_ratr_egress_router_interface_set(payload, egress_rif);
+}
+
+static inline void mlxsw_reg_ratr_eth_entry_pack(char *payload,
+ const char *dest_mac)
+{
+ mlxsw_reg_ratr_eth_destination_mac_memcpy_to(payload, dest_mac);
+}
+
+/* RALTA - Router Algorithmic LPM Tree Allocation Register
+ * -------------------------------------------------------
+ * RALTA is used to allocate the LPM trees of the SHSPM method.
+ */
+#define MLXSW_REG_RALTA_ID 0x8010
+#define MLXSW_REG_RALTA_LEN 0x04
+
+static const struct mlxsw_reg_info mlxsw_reg_ralta = {
+ .id = MLXSW_REG_RALTA_ID,
+ .len = MLXSW_REG_RALTA_LEN,
+};
+
+/* reg_ralta_op
+ * opcode (valid for Write, must be 0 on Read)
+ * 0 - allocate a tree
+ * 1 - deallocate a tree
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, ralta, op, 0x00, 28, 2);
+
+enum mlxsw_reg_ralxx_protocol {
+ MLXSW_REG_RALXX_PROTOCOL_IPV4,
+ MLXSW_REG_RALXX_PROTOCOL_IPV6,
+};
+
+/* reg_ralta_protocol
+ * Protocol.
+ * Deallocation opcode: Reserved.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralta, protocol, 0x00, 24, 4);
+
+/* reg_ralta_tree_id
+ * An identifier (numbered from 1..cap_shspm_max_trees-1) representing
+ * the tree identifier (managed by software).
+ * Note that tree_id 0 is allocated for a default-route tree.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ralta, tree_id, 0x00, 0, 8);
+
+static inline void mlxsw_reg_ralta_pack(char *payload, bool alloc,
+ enum mlxsw_reg_ralxx_protocol protocol,
+ u8 tree_id)
+{
+ MLXSW_REG_ZERO(ralta, payload);
+ mlxsw_reg_ralta_op_set(payload, !alloc);
+ mlxsw_reg_ralta_protocol_set(payload, protocol);
+ mlxsw_reg_ralta_tree_id_set(payload, tree_id);
+}
+
+/* RALST - Router Algorithmic LPM Structure Tree Register
+ * ------------------------------------------------------
+ * RALST is used to set and query the structure of an LPM tree.
+ * The structure of the tree must be sorted as a sorted binary tree, while
+ * each node is a bin that is tagged as the length of the prefixes the lookup
+ * will refer to. Therefore, bin X refers to a set of entries with prefixes
+ * of X bits to match with the destination address. The bin 0 indicates
+ * the default action, when there is no match of any prefix.
+ */
+#define MLXSW_REG_RALST_ID 0x8011
+#define MLXSW_REG_RALST_LEN 0x104
+
+static const struct mlxsw_reg_info mlxsw_reg_ralst = {
+ .id = MLXSW_REG_RALST_ID,
+ .len = MLXSW_REG_RALST_LEN,
+};
+
+/* reg_ralst_root_bin
+ * The bin number of the root bin.
+ * 0<root_bin=<(length of IP address)
+ * For a default-route tree configure 0xff
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralst, root_bin, 0x00, 16, 8);
+
+/* reg_ralst_tree_id
+ * Tree identifier numbered from 1..(cap_shspm_max_trees-1).
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ralst, tree_id, 0x00, 0, 8);
+
+#define MLXSW_REG_RALST_BIN_NO_CHILD 0xff
+#define MLXSW_REG_RALST_BIN_OFFSET 0x04
+#define MLXSW_REG_RALST_BIN_COUNT 128
+
+/* reg_ralst_left_child_bin
+ * Holding the children of the bin according to the stored tree's structure.
+ * For trees composed of less than 4 blocks, the bins in excess are reserved.
+ * Note that tree_id 0 is allocated for a default-route tree, bins are 0xff
+ * Access: RW
+ */
+MLXSW_ITEM16_INDEXED(reg, ralst, left_child_bin, 0x04, 8, 8, 0x02, 0x00, false);
+
+/* reg_ralst_right_child_bin
+ * Holding the children of the bin according to the stored tree's structure.
+ * For trees composed of less than 4 blocks, the bins in excess are reserved.
+ * Note that tree_id 0 is allocated for a default-route tree, bins are 0xff
+ * Access: RW
+ */
+MLXSW_ITEM16_INDEXED(reg, ralst, right_child_bin, 0x04, 0, 8, 0x02, 0x00,
+ false);
+
+static inline void mlxsw_reg_ralst_pack(char *payload, u8 root_bin, u8 tree_id)
+{
+ MLXSW_REG_ZERO(ralst, payload);
+
+ /* Initialize all bins to have no left or right child */
+ memset(payload + MLXSW_REG_RALST_BIN_OFFSET,
+ MLXSW_REG_RALST_BIN_NO_CHILD, MLXSW_REG_RALST_BIN_COUNT * 2);
+
+ mlxsw_reg_ralst_root_bin_set(payload, root_bin);
+ mlxsw_reg_ralst_tree_id_set(payload, tree_id);
+}
+
+static inline void mlxsw_reg_ralst_bin_pack(char *payload, u8 bin_number,
+ u8 left_child_bin,
+ u8 right_child_bin)
+{
+ int bin_index = bin_number - 1;
+
+ mlxsw_reg_ralst_left_child_bin_set(payload, bin_index, left_child_bin);
+ mlxsw_reg_ralst_right_child_bin_set(payload, bin_index,
+ right_child_bin);
+}
+
+/* RALTB - Router Algorithmic LPM Tree Binding Register
+ * ----------------------------------------------------
+ * RALTB is used to bind virtual router and protocol to an allocated LPM tree.
+ */
+#define MLXSW_REG_RALTB_ID 0x8012
+#define MLXSW_REG_RALTB_LEN 0x04
+
+static const struct mlxsw_reg_info mlxsw_reg_raltb = {
+ .id = MLXSW_REG_RALTB_ID,
+ .len = MLXSW_REG_RALTB_LEN,
+};
+
+/* reg_raltb_virtual_router
+ * Virtual Router ID
+ * Range is 0..cap_max_virtual_routers-1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, raltb, virtual_router, 0x00, 16, 16);
+
+/* reg_raltb_protocol
+ * Protocol.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, raltb, protocol, 0x00, 12, 4);
+
+/* reg_raltb_tree_id
+ * Tree to be used for the {virtual_router, protocol}
+ * Tree identifier numbered from 1..(cap_shspm_max_trees-1).
+ * By default, all Unicast IPv4 and IPv6 are bound to tree_id 0.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, raltb, tree_id, 0x00, 0, 8);
+
+static inline void mlxsw_reg_raltb_pack(char *payload, u16 virtual_router,
+ enum mlxsw_reg_ralxx_protocol protocol,
+ u8 tree_id)
+{
+ MLXSW_REG_ZERO(raltb, payload);
+ mlxsw_reg_raltb_virtual_router_set(payload, virtual_router);
+ mlxsw_reg_raltb_protocol_set(payload, protocol);
+ mlxsw_reg_raltb_tree_id_set(payload, tree_id);
+}
+
+/* RALUE - Router Algorithmic LPM Unicast Entry Register
+ * -----------------------------------------------------
+ * RALUE is used to configure and query LPM entries that serve
+ * the Unicast protocols.
+ */
+#define MLXSW_REG_RALUE_ID 0x8013
+#define MLXSW_REG_RALUE_LEN 0x38
+
+static const struct mlxsw_reg_info mlxsw_reg_ralue = {
+ .id = MLXSW_REG_RALUE_ID,
+ .len = MLXSW_REG_RALUE_LEN,
+};
+
+/* reg_ralue_protocol
+ * Protocol.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ralue, protocol, 0x00, 24, 4);
+
+enum mlxsw_reg_ralue_op {
+ /* Read operation. If entry doesn't exist, the operation fails. */
+ MLXSW_REG_RALUE_OP_QUERY_READ = 0,
+ /* Clear on read operation. Used to read entry and
+ * clear Activity bit.
+ */
+ MLXSW_REG_RALUE_OP_QUERY_CLEAR = 1,
+ /* Write operation. Used to write a new entry to the table. All RW
+ * fields are written for new entry. Activity bit is set
+ * for new entries.
+ */
+ MLXSW_REG_RALUE_OP_WRITE_WRITE = 0,
+ /* Update operation. Used to update an existing route entry and
+ * only update the RW fields that are detailed in the field
+ * op_u_mask. If entry doesn't exist, the operation fails.
+ */
+ MLXSW_REG_RALUE_OP_WRITE_UPDATE = 1,
+ /* Clear activity. The Activity bit (the field a) is cleared
+ * for the entry.
+ */
+ MLXSW_REG_RALUE_OP_WRITE_CLEAR = 2,
+ /* Delete operation. Used to delete an existing entry. If entry
+ * doesn't exist, the operation fails.
+ */
+ MLXSW_REG_RALUE_OP_WRITE_DELETE = 3,
+};
+
+/* reg_ralue_op
+ * Operation.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, ralue, op, 0x00, 20, 3);
+
+/* reg_ralue_a
+ * Activity. Set for new entries. Set if a packet lookup has hit on the
+ * specific entry, only if the entry is a route. To clear the a bit, use
+ * "clear activity" op.
+ * Enabled by activity_dis in RGCR
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ralue, a, 0x00, 16, 1);
+
+/* reg_ralue_virtual_router
+ * Virtual Router ID
+ * Range is 0..cap_max_virtual_routers-1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ralue, virtual_router, 0x04, 16, 16);
+
+#define MLXSW_REG_RALUE_OP_U_MASK_ENTRY_TYPE BIT(0)
+#define MLXSW_REG_RALUE_OP_U_MASK_BMP_LEN BIT(1)
+#define MLXSW_REG_RALUE_OP_U_MASK_ACTION BIT(2)
+
+/* reg_ralue_op_u_mask
+ * opcode update mask.
+ * On read operation, this field is reserved.
+ * This field is valid for update opcode, otherwise - reserved.
+ * This field is a bitmask of the fields that should be updated.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, ralue, op_u_mask, 0x04, 8, 3);
+
+/* reg_ralue_prefix_len
+ * Number of bits in the prefix of the LPM route.
+ * Note that for IPv6 prefixes, if prefix_len>64 the entry consumes
+ * two entries in the physical HW table.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ralue, prefix_len, 0x08, 0, 8);
+
+/* reg_ralue_dip*
+ * The prefix of the route or of the marker that the object of the LPM
+ * is compared with. The most significant bits of the dip are the prefix.
+ * The list significant bits must be '0' if the prefix_len is smaller
+ * than 128 for IPv6 or smaller than 32 for IPv4.
+ * IPv4 address uses bits dip[31:0] and bits dip[127:32] are reserved.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ralue, dip4, 0x18, 0, 32);
+
+enum mlxsw_reg_ralue_entry_type {
+ MLXSW_REG_RALUE_ENTRY_TYPE_MARKER_ENTRY = 1,
+ MLXSW_REG_RALUE_ENTRY_TYPE_ROUTE_ENTRY = 2,
+ MLXSW_REG_RALUE_ENTRY_TYPE_MARKER_AND_ROUTE_ENTRY = 3,
+};
+
+/* reg_ralue_entry_type
+ * Entry type.
+ * Note - for Marker entries, the action_type and action fields are reserved.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralue, entry_type, 0x1C, 30, 2);
+
+/* reg_ralue_bmp_len
+ * The best match prefix length in the case that there is no match for
+ * longer prefixes.
+ * If (entry_type != MARKER_ENTRY), bmp_len must be equal to prefix_len
+ * Note for any update operation with entry_type modification this
+ * field must be set.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralue, bmp_len, 0x1C, 16, 8);
+
+enum mlxsw_reg_ralue_action_type {
+ MLXSW_REG_RALUE_ACTION_TYPE_REMOTE,
+ MLXSW_REG_RALUE_ACTION_TYPE_LOCAL,
+ MLXSW_REG_RALUE_ACTION_TYPE_IP2ME,
+};
+
+/* reg_ralue_action_type
+ * Action Type
+ * Indicates how the IP address is connected.
+ * It can be connected to a local subnet through local_erif or can be
+ * on a remote subnet connected through a next-hop router,
+ * or transmitted to the CPU.
+ * Reserved when entry_type = MARKER_ENTRY
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralue, action_type, 0x1C, 0, 2);
+
+enum mlxsw_reg_ralue_trap_action {
+ MLXSW_REG_RALUE_TRAP_ACTION_NOP,
+ MLXSW_REG_RALUE_TRAP_ACTION_TRAP,
+ MLXSW_REG_RALUE_TRAP_ACTION_MIRROR_TO_CPU,
+ MLXSW_REG_RALUE_TRAP_ACTION_MIRROR,
+ MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR,
+};
+
+/* reg_ralue_trap_action
+ * Trap action.
+ * For IP2ME action, only NOP and MIRROR are possible.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralue, trap_action, 0x20, 28, 4);
+
+/* reg_ralue_trap_id
+ * Trap ID to be reported to CPU.
+ * Trap ID is RTR_INGRESS0 or RTR_INGRESS1.
+ * For trap_action of NOP, MIRROR and DISCARD_ERROR, trap_id is reserved.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralue, trap_id, 0x20, 0, 9);
+
+/* reg_ralue_adjacency_index
+ * Points to the first entry of the group-based ECMP.
+ * Only relevant in case of REMOTE action.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralue, adjacency_index, 0x24, 0, 24);
+
+/* reg_ralue_ecmp_size
+ * Amount of sequential entries starting
+ * from the adjacency_index (the number of ECMPs).
+ * The valid range is 1-64, 512, 1024, 2048 and 4096.
+ * Reserved when trap_action is TRAP or DISCARD_ERROR.
+ * Only relevant in case of REMOTE action.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralue, ecmp_size, 0x28, 0, 13);
+
+/* reg_ralue_local_erif
+ * Egress Router Interface.
+ * Only relevant in case of LOCAL action.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralue, local_erif, 0x24, 0, 16);
+
+/* reg_ralue_v
+ * Valid bit for the tunnel_ptr field.
+ * If valid = 0 then trap to CPU as IP2ME trap ID.
+ * If valid = 1 and the packet format allows NVE or IPinIP tunnel
+ * decapsulation then tunnel decapsulation is done.
+ * If valid = 1 and packet format does not allow NVE or IPinIP tunnel
+ * decapsulation then trap as IP2ME trap ID.
+ * Only relevant in case of IP2ME action.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralue, v, 0x24, 31, 1);
+
+/* reg_ralue_tunnel_ptr
+ * Tunnel Pointer for NVE or IPinIP tunnel decapsulation.
+ * For Spectrum, pointer to KVD Linear.
+ * Only relevant in case of IP2ME action.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralue, tunnel_ptr, 0x24, 0, 24);
+
+static inline void mlxsw_reg_ralue_pack(char *payload,
+ enum mlxsw_reg_ralxx_protocol protocol,
+ enum mlxsw_reg_ralue_op op,
+ u16 virtual_router, u8 prefix_len)
+{
+ MLXSW_REG_ZERO(ralue, payload);
+ mlxsw_reg_ralue_protocol_set(payload, protocol);
+ mlxsw_reg_ralue_op_set(payload, op);
+ mlxsw_reg_ralue_virtual_router_set(payload, virtual_router);
+ mlxsw_reg_ralue_prefix_len_set(payload, prefix_len);
+ mlxsw_reg_ralue_entry_type_set(payload,
+ MLXSW_REG_RALUE_ENTRY_TYPE_ROUTE_ENTRY);
+ mlxsw_reg_ralue_bmp_len_set(payload, prefix_len);
+}
+
+static inline void mlxsw_reg_ralue_pack4(char *payload,
+ enum mlxsw_reg_ralxx_protocol protocol,
+ enum mlxsw_reg_ralue_op op,
+ u16 virtual_router, u8 prefix_len,
+ u32 dip)
+{
+ mlxsw_reg_ralue_pack(payload, protocol, op, virtual_router, prefix_len);
+ mlxsw_reg_ralue_dip4_set(payload, dip);
+}
+
+static inline void
+mlxsw_reg_ralue_act_remote_pack(char *payload,
+ enum mlxsw_reg_ralue_trap_action trap_action,
+ u16 trap_id, u32 adjacency_index, u16 ecmp_size)
+{
+ mlxsw_reg_ralue_action_type_set(payload,
+ MLXSW_REG_RALUE_ACTION_TYPE_REMOTE);
+ mlxsw_reg_ralue_trap_action_set(payload, trap_action);
+ mlxsw_reg_ralue_trap_id_set(payload, trap_id);
+ mlxsw_reg_ralue_adjacency_index_set(payload, adjacency_index);
+ mlxsw_reg_ralue_ecmp_size_set(payload, ecmp_size);
+}
+
+static inline void
+mlxsw_reg_ralue_act_local_pack(char *payload,
+ enum mlxsw_reg_ralue_trap_action trap_action,
+ u16 trap_id, u16 local_erif)
+{
+ mlxsw_reg_ralue_action_type_set(payload,
+ MLXSW_REG_RALUE_ACTION_TYPE_LOCAL);
+ mlxsw_reg_ralue_trap_action_set(payload, trap_action);
+ mlxsw_reg_ralue_trap_id_set(payload, trap_id);
+ mlxsw_reg_ralue_local_erif_set(payload, local_erif);
+}
+
+static inline void
+mlxsw_reg_ralue_act_ip2me_pack(char *payload)
+{
+ mlxsw_reg_ralue_action_type_set(payload,
+ MLXSW_REG_RALUE_ACTION_TYPE_IP2ME);
+}
+
+/* RAUHT - Router Algorithmic LPM Unicast Host Table Register
+ * ----------------------------------------------------------
+ * The RAUHT register is used to configure and query the Unicast Host table in
+ * devices that implement the Algorithmic LPM.
+ */
+#define MLXSW_REG_RAUHT_ID 0x8014
+#define MLXSW_REG_RAUHT_LEN 0x74
+
+static const struct mlxsw_reg_info mlxsw_reg_rauht = {
+ .id = MLXSW_REG_RAUHT_ID,
+ .len = MLXSW_REG_RAUHT_LEN,
+};
+
+enum mlxsw_reg_rauht_type {
+ MLXSW_REG_RAUHT_TYPE_IPV4,
+ MLXSW_REG_RAUHT_TYPE_IPV6,
+};
+
+/* reg_rauht_type
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rauht, type, 0x00, 24, 2);
+
+enum mlxsw_reg_rauht_op {
+ MLXSW_REG_RAUHT_OP_QUERY_READ = 0,
+ /* Read operation */
+ MLXSW_REG_RAUHT_OP_QUERY_CLEAR_ON_READ = 1,
+ /* Clear on read operation. Used to read entry and clear
+ * activity bit.
+ */
+ MLXSW_REG_RAUHT_OP_WRITE_ADD = 0,
+ /* Add. Used to write a new entry to the table. All R/W fields are
+ * relevant for new entry. Activity bit is set for new entries.
+ */
+ MLXSW_REG_RAUHT_OP_WRITE_UPDATE = 1,
+ /* Update action. Used to update an existing route entry and
+ * only update the following fields:
+ * trap_action, trap_id, mac, counter_set_type, counter_index
+ */
+ MLXSW_REG_RAUHT_OP_WRITE_CLEAR_ACTIVITY = 2,
+ /* Clear activity. A bit is cleared for the entry. */
+ MLXSW_REG_RAUHT_OP_WRITE_DELETE = 3,
+ /* Delete entry */
+ MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL = 4,
+ /* Delete all host entries on a RIF. In this command, dip
+ * field is reserved.
+ */
+};
+
+/* reg_rauht_op
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, rauht, op, 0x00, 20, 3);
+
+/* reg_rauht_a
+ * Activity. Set for new entries. Set if a packet lookup has hit on
+ * the specific entry.
+ * To clear the a bit, use "clear activity" op.
+ * Enabled by activity_dis in RGCR
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, rauht, a, 0x00, 16, 1);
+
+/* reg_rauht_rif
+ * Router Interface
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rauht, rif, 0x00, 0, 16);
+
+/* reg_rauht_dip*
+ * Destination address.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rauht, dip4, 0x1C, 0x0, 32);
+
+enum mlxsw_reg_rauht_trap_action {
+ MLXSW_REG_RAUHT_TRAP_ACTION_NOP,
+ MLXSW_REG_RAUHT_TRAP_ACTION_TRAP,
+ MLXSW_REG_RAUHT_TRAP_ACTION_MIRROR_TO_CPU,
+ MLXSW_REG_RAUHT_TRAP_ACTION_MIRROR,
+ MLXSW_REG_RAUHT_TRAP_ACTION_DISCARD_ERRORS,
+};
+
+/* reg_rauht_trap_action
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rauht, trap_action, 0x60, 28, 4);
+
+enum mlxsw_reg_rauht_trap_id {
+ MLXSW_REG_RAUHT_TRAP_ID_RTR_EGRESS0,
+ MLXSW_REG_RAUHT_TRAP_ID_RTR_EGRESS1,
+};
+
+/* reg_rauht_trap_id
+ * Trap ID to be reported to CPU.
+ * Trap-ID is RTR_EGRESS0 or RTR_EGRESS1.
+ * For trap_action of NOP, MIRROR and DISCARD_ERROR,
+ * trap_id is reserved.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rauht, trap_id, 0x60, 0, 9);
+
+/* reg_rauht_counter_set_type
+ * Counter set type for flow counters
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rauht, counter_set_type, 0x68, 24, 8);
+
+/* reg_rauht_counter_index
+ * Counter index for flow counters
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rauht, counter_index, 0x68, 0, 24);
+
+/* reg_rauht_mac
+ * MAC address.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, rauht, mac, 0x6E, 6);
+
+static inline void mlxsw_reg_rauht_pack(char *payload,
+ enum mlxsw_reg_rauht_op op, u16 rif,
+ const char *mac)
+{
+ MLXSW_REG_ZERO(rauht, payload);
+ mlxsw_reg_rauht_op_set(payload, op);
+ mlxsw_reg_rauht_rif_set(payload, rif);
+ mlxsw_reg_rauht_mac_memcpy_to(payload, mac);
+}
+
+static inline void mlxsw_reg_rauht_pack4(char *payload,
+ enum mlxsw_reg_rauht_op op, u16 rif,
+ const char *mac, u32 dip)
+{
+ mlxsw_reg_rauht_pack(payload, op, rif, mac);
+ mlxsw_reg_rauht_dip4_set(payload, dip);
+}
+
+/* RALEU - Router Algorithmic LPM ECMP Update Register
+ * ---------------------------------------------------
+ * The register enables updating the ECMP section in the action for multiple
+ * LPM Unicast entries in a single operation. The update is executed to
+ * all entries of a {virtual router, protocol} tuple using the same ECMP group.
+ */
+#define MLXSW_REG_RALEU_ID 0x8015
+#define MLXSW_REG_RALEU_LEN 0x28
+
+static const struct mlxsw_reg_info mlxsw_reg_raleu = {
+ .id = MLXSW_REG_RALEU_ID,
+ .len = MLXSW_REG_RALEU_LEN,
+};
+
+/* reg_raleu_protocol
+ * Protocol.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, raleu, protocol, 0x00, 24, 4);
+
+/* reg_raleu_virtual_router
+ * Virtual Router ID
+ * Range is 0..cap_max_virtual_routers-1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, raleu, virtual_router, 0x00, 0, 16);
+
+/* reg_raleu_adjacency_index
+ * Adjacency Index used for matching on the existing entries.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, raleu, adjacency_index, 0x10, 0, 24);
+
+/* reg_raleu_ecmp_size
+ * ECMP Size used for matching on the existing entries.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, raleu, ecmp_size, 0x14, 0, 13);
+
+/* reg_raleu_new_adjacency_index
+ * New Adjacency Index.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, raleu, new_adjacency_index, 0x20, 0, 24);
+
+/* reg_raleu_new_ecmp_size
+ * New ECMP Size.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, raleu, new_ecmp_size, 0x24, 0, 13);
+
+static inline void mlxsw_reg_raleu_pack(char *payload,
+ enum mlxsw_reg_ralxx_protocol protocol,
+ u16 virtual_router,
+ u32 adjacency_index, u16 ecmp_size,
+ u32 new_adjacency_index,
+ u16 new_ecmp_size)
+{
+ MLXSW_REG_ZERO(raleu, payload);
+ mlxsw_reg_raleu_protocol_set(payload, protocol);
+ mlxsw_reg_raleu_virtual_router_set(payload, virtual_router);
+ mlxsw_reg_raleu_adjacency_index_set(payload, adjacency_index);
+ mlxsw_reg_raleu_ecmp_size_set(payload, ecmp_size);
+ mlxsw_reg_raleu_new_adjacency_index_set(payload, new_adjacency_index);
+ mlxsw_reg_raleu_new_ecmp_size_set(payload, new_ecmp_size);
+}
+
+/* RAUHTD - Router Algorithmic LPM Unicast Host Table Dump Register
+ * ----------------------------------------------------------------
+ * The RAUHTD register allows dumping entries from the Router Unicast Host
+ * Table. For a given session an entry is dumped no more than one time. The
+ * first RAUHTD access after reset is a new session. A session ends when the
+ * num_rec response is smaller than num_rec request or for IPv4 when the
+ * num_entries is smaller than 4. The clear activity affect the current session
+ * or the last session if a new session has not started.
+ */
+#define MLXSW_REG_RAUHTD_ID 0x8018
+#define MLXSW_REG_RAUHTD_BASE_LEN 0x20
+#define MLXSW_REG_RAUHTD_REC_LEN 0x20
+#define MLXSW_REG_RAUHTD_REC_MAX_NUM 32
+#define MLXSW_REG_RAUHTD_LEN (MLXSW_REG_RAUHTD_BASE_LEN + \
+ MLXSW_REG_RAUHTD_REC_MAX_NUM * MLXSW_REG_RAUHTD_REC_LEN)
+#define MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC 4
+
+static const struct mlxsw_reg_info mlxsw_reg_rauhtd = {
+ .id = MLXSW_REG_RAUHTD_ID,
+ .len = MLXSW_REG_RAUHTD_LEN,
+};
+
+#define MLXSW_REG_RAUHTD_FILTER_A BIT(0)
+#define MLXSW_REG_RAUHTD_FILTER_RIF BIT(3)
+
+/* reg_rauhtd_filter_fields
+ * if a bit is '0' then the relevant field is ignored and dump is done
+ * regardless of the field value
+ * Bit0 - filter by activity: entry_a
+ * Bit3 - filter by entry rip: entry_rif
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rauhtd, filter_fields, 0x00, 0, 8);
+
+enum mlxsw_reg_rauhtd_op {
+ MLXSW_REG_RAUHTD_OP_DUMP,
+ MLXSW_REG_RAUHTD_OP_DUMP_AND_CLEAR,
+};
+
+/* reg_rauhtd_op
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, rauhtd, op, 0x04, 24, 2);
+
+/* reg_rauhtd_num_rec
+ * At request: number of records requested
+ * At response: number of records dumped
+ * For IPv4, each record has 4 entries at request and up to 4 entries
+ * at response
+ * Range is 0..MLXSW_REG_RAUHTD_REC_MAX_NUM
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rauhtd, num_rec, 0x04, 0, 8);
+
+/* reg_rauhtd_entry_a
+ * Dump only if activity has value of entry_a
+ * Reserved if filter_fields bit0 is '0'
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rauhtd, entry_a, 0x08, 16, 1);
+
+enum mlxsw_reg_rauhtd_type {
+ MLXSW_REG_RAUHTD_TYPE_IPV4,
+ MLXSW_REG_RAUHTD_TYPE_IPV6,
+};
+
+/* reg_rauhtd_type
+ * Dump only if record type is:
+ * 0 - IPv4
+ * 1 - IPv6
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rauhtd, type, 0x08, 0, 4);
+
+/* reg_rauhtd_entry_rif
+ * Dump only if RIF has value of entry_rif
+ * Reserved if filter_fields bit3 is '0'
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rauhtd, entry_rif, 0x0C, 0, 16);
+
+static inline void mlxsw_reg_rauhtd_pack(char *payload,
+ enum mlxsw_reg_rauhtd_type type)
+{
+ MLXSW_REG_ZERO(rauhtd, payload);
+ mlxsw_reg_rauhtd_filter_fields_set(payload, MLXSW_REG_RAUHTD_FILTER_A);
+ mlxsw_reg_rauhtd_op_set(payload, MLXSW_REG_RAUHTD_OP_DUMP_AND_CLEAR);
+ mlxsw_reg_rauhtd_num_rec_set(payload, MLXSW_REG_RAUHTD_REC_MAX_NUM);
+ mlxsw_reg_rauhtd_entry_a_set(payload, 1);
+ mlxsw_reg_rauhtd_type_set(payload, type);
+}
+
+/* reg_rauhtd_ipv4_rec_num_entries
+ * Number of valid entries in this record:
+ * 0 - 1 valid entry
+ * 1 - 2 valid entries
+ * 2 - 3 valid entries
+ * 3 - 4 valid entries
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_rec_num_entries,
+ MLXSW_REG_RAUHTD_BASE_LEN, 28, 2,
+ MLXSW_REG_RAUHTD_REC_LEN, 0x00, false);
+
+/* reg_rauhtd_rec_type
+ * Record type.
+ * 0 - IPv4
+ * 1 - IPv6
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, rauhtd, rec_type, MLXSW_REG_RAUHTD_BASE_LEN, 24, 2,
+ MLXSW_REG_RAUHTD_REC_LEN, 0x00, false);
+
+#define MLXSW_REG_RAUHTD_IPV4_ENT_LEN 0x8
+
+/* reg_rauhtd_ipv4_ent_a
+ * Activity. Set for new entries. Set if a packet lookup has hit on the
+ * specific entry.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_ent_a, MLXSW_REG_RAUHTD_BASE_LEN, 16, 1,
+ MLXSW_REG_RAUHTD_IPV4_ENT_LEN, 0x00, false);
+
+/* reg_rauhtd_ipv4_ent_rif
+ * Router interface.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_ent_rif, MLXSW_REG_RAUHTD_BASE_LEN, 0,
+ 16, MLXSW_REG_RAUHTD_IPV4_ENT_LEN, 0x00, false);
+
+/* reg_rauhtd_ipv4_ent_dip
+ * Destination IPv4 address.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_ent_dip, MLXSW_REG_RAUHTD_BASE_LEN, 0,
+ 32, MLXSW_REG_RAUHTD_IPV4_ENT_LEN, 0x04, false);
+
+static inline void mlxsw_reg_rauhtd_ent_ipv4_unpack(char *payload,
+ int ent_index, u16 *p_rif,
+ u32 *p_dip)
+{
+ *p_rif = mlxsw_reg_rauhtd_ipv4_ent_rif_get(payload, ent_index);
+ *p_dip = mlxsw_reg_rauhtd_ipv4_ent_dip_get(payload, ent_index);
+}
+
/* MFCR - Management Fan Control Register
* --------------------------------------
* This register controls the settings of the Fan Speed PWM mechanism.
@@ -3435,6 +4644,123 @@ static inline void mlxsw_reg_mtmp_unpack(char *payload, unsigned int *p_temp,
mlxsw_reg_mtmp_sensor_name_memcpy_from(payload, sensor_name);
}
+/* MPAT - Monitoring Port Analyzer Table
+ * -------------------------------------
+ * MPAT Register is used to query and configure the Switch PortAnalyzer Table.
+ * For an enabled analyzer, all fields except e (enable) cannot be modified.
+ */
+#define MLXSW_REG_MPAT_ID 0x901A
+#define MLXSW_REG_MPAT_LEN 0x78
+
+static const struct mlxsw_reg_info mlxsw_reg_mpat = {
+ .id = MLXSW_REG_MPAT_ID,
+ .len = MLXSW_REG_MPAT_LEN,
+};
+
+/* reg_mpat_pa_id
+ * Port Analyzer ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mpat, pa_id, 0x00, 28, 4);
+
+/* reg_mpat_system_port
+ * A unique port identifier for the final destination of the packet.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, system_port, 0x00, 0, 16);
+
+/* reg_mpat_e
+ * Enable. Indicating the Port Analyzer is enabled.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, e, 0x04, 31, 1);
+
+/* reg_mpat_qos
+ * Quality Of Service Mode.
+ * 0: CONFIGURED - QoS parameters (Switch Priority, and encapsulation
+ * PCP, DEI, DSCP or VL) are configured.
+ * 1: MAINTAIN - QoS parameters (Switch Priority, Color) are the
+ * same as in the original packet that has triggered the mirroring. For
+ * SPAN also the pcp,dei are maintained.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, qos, 0x04, 26, 1);
+
+/* reg_mpat_be
+ * Best effort mode. Indicates mirroring traffic should not cause packet
+ * drop or back pressure, but will discard the mirrored packets. Mirrored
+ * packets will be forwarded on a best effort manner.
+ * 0: Do not discard mirrored packets
+ * 1: Discard mirrored packets if causing congestion
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, be, 0x04, 25, 1);
+
+static inline void mlxsw_reg_mpat_pack(char *payload, u8 pa_id,
+ u16 system_port, bool e)
+{
+ MLXSW_REG_ZERO(mpat, payload);
+ mlxsw_reg_mpat_pa_id_set(payload, pa_id);
+ mlxsw_reg_mpat_system_port_set(payload, system_port);
+ mlxsw_reg_mpat_e_set(payload, e);
+ mlxsw_reg_mpat_qos_set(payload, 1);
+ mlxsw_reg_mpat_be_set(payload, 1);
+}
+
+/* MPAR - Monitoring Port Analyzer Register
+ * ----------------------------------------
+ * MPAR register is used to query and configure the port analyzer port mirroring
+ * properties.
+ */
+#define MLXSW_REG_MPAR_ID 0x901B
+#define MLXSW_REG_MPAR_LEN 0x08
+
+static const struct mlxsw_reg_info mlxsw_reg_mpar = {
+ .id = MLXSW_REG_MPAR_ID,
+ .len = MLXSW_REG_MPAR_LEN,
+};
+
+/* reg_mpar_local_port
+ * The local port to mirror the packets from.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mpar, local_port, 0x00, 16, 8);
+
+enum mlxsw_reg_mpar_i_e {
+ MLXSW_REG_MPAR_TYPE_EGRESS,
+ MLXSW_REG_MPAR_TYPE_INGRESS,
+};
+
+/* reg_mpar_i_e
+ * Ingress/Egress
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mpar, i_e, 0x00, 0, 4);
+
+/* reg_mpar_enable
+ * Enable mirroring
+ * By default, port mirroring is disabled for all ports.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpar, enable, 0x04, 31, 1);
+
+/* reg_mpar_pa_id
+ * Port Analyzer ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpar, pa_id, 0x04, 0, 4);
+
+static inline void mlxsw_reg_mpar_pack(char *payload, u8 local_port,
+ enum mlxsw_reg_mpar_i_e i_e,
+ bool enable, u8 pa_id)
+{
+ MLXSW_REG_ZERO(mpar, payload);
+ mlxsw_reg_mpar_local_port_set(payload, local_port);
+ mlxsw_reg_mpar_enable_set(payload, enable);
+ mlxsw_reg_mpar_i_e_set(payload, i_e);
+ mlxsw_reg_mpar_pa_id_set(payload, pa_id);
+}
+
/* MLCR - Management LED Control Register
* --------------------------------------
* Controls the system LEDs.
@@ -3864,6 +5190,45 @@ static inline void mlxsw_reg_sbsr_rec_unpack(char *payload, int rec_index,
mlxsw_reg_sbsr_rec_max_buff_occupancy_get(payload, rec_index);
}
+/* SBIB - Shared Buffer Internal Buffer Register
+ * ---------------------------------------------
+ * The SBIB register configures per port buffers for internal use. The internal
+ * buffers consume memory on the port buffers (note that the port buffers are
+ * used also by PBMC).
+ *
+ * For Spectrum this is used for egress mirroring.
+ */
+#define MLXSW_REG_SBIB_ID 0xB006
+#define MLXSW_REG_SBIB_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_sbib = {
+ .id = MLXSW_REG_SBIB_ID,
+ .len = MLXSW_REG_SBIB_LEN,
+};
+
+/* reg_sbib_local_port
+ * Local port number
+ * Not supported for CPU port and router port
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbib, local_port, 0x00, 16, 8);
+
+/* reg_sbib_buff_size
+ * Units represented in cells
+ * Allowed range is 0 to (cap_max_headroom_size - 1)
+ * Default is 0
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbib, buff_size, 0x08, 0, 24);
+
+static inline void mlxsw_reg_sbib_pack(char *payload, u8 local_port,
+ u32 buff_size)
+{
+ MLXSW_REG_ZERO(sbib, payload);
+ mlxsw_reg_sbib_local_port_set(payload, local_port);
+ mlxsw_reg_sbib_buff_size_set(payload, buff_size);
+}
+
static inline const char *mlxsw_reg_id_str(u16 reg_id)
{
switch (reg_id) {
@@ -3939,6 +5304,26 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
return "HTGT";
case MLXSW_REG_HPKT_ID:
return "HPKT";
+ case MLXSW_REG_RGCR_ID:
+ return "RGCR";
+ case MLXSW_REG_RITR_ID:
+ return "RITR";
+ case MLXSW_REG_RATR_ID:
+ return "RATR";
+ case MLXSW_REG_RALTA_ID:
+ return "RALTA";
+ case MLXSW_REG_RALST_ID:
+ return "RALST";
+ case MLXSW_REG_RALTB_ID:
+ return "RALTB";
+ case MLXSW_REG_RALUE_ID:
+ return "RALUE";
+ case MLXSW_REG_RAUHT_ID:
+ return "RAUHT";
+ case MLXSW_REG_RALEU_ID:
+ return "RALEU";
+ case MLXSW_REG_RAUHTD_ID:
+ return "RAUHTD";
case MLXSW_REG_MFCR_ID:
return "MFCR";
case MLXSW_REG_MFSC_ID:
@@ -3947,6 +5332,10 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
return "MFSM";
case MLXSW_REG_MTCAP_ID:
return "MTCAP";
+ case MLXSW_REG_MPAT_ID:
+ return "MPAT";
+ case MLXSW_REG_MPAR_ID:
+ return "MPAR";
case MLXSW_REG_MTMP_ID:
return "MTMP";
case MLXSW_REG_MLCR_ID:
@@ -3961,6 +5350,8 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
return "SBMM";
case MLXSW_REG_SBSR_ID:
return "SBSR";
+ case MLXSW_REG_SBIB_ID:
+ return "SBIB";
default:
return "*UNKNOWN*";
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 374080027..d48873bcb 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -49,9 +49,14 @@
#include <linux/jiffies.h>
#include <linux/bitops.h>
#include <linux/list.h>
+#include <linux/notifier.h>
#include <linux/dcbnl.h>
+#include <linux/inetdevice.h>
#include <net/switchdev.h>
#include <generated/utsrelease.h>
+#include <net/pkt_cls.h>
+#include <net/tc_act/tc_mirred.h>
+#include <net/netevent.h>
#include "spectrum.h"
#include "core.h"
@@ -131,6 +136,8 @@ MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
*/
MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
+static bool mlxsw_sp_port_dev_check(const struct net_device *dev);
+
static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info)
{
@@ -159,6 +166,303 @@ static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
return 0;
}
+static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_resources *resources;
+ int i;
+
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ if (!resources->max_span_valid)
+ return -EIO;
+
+ mlxsw_sp->span.entries_count = resources->max_span;
+ mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
+ sizeof(struct mlxsw_sp_span_entry),
+ GFP_KERNEL);
+ if (!mlxsw_sp->span.entries)
+ return -ENOMEM;
+
+ for (i = 0; i < mlxsw_sp->span.entries_count; i++)
+ INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list);
+
+ return 0;
+}
+
+static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ int i;
+
+ for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+
+ WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
+ }
+ kfree(mlxsw_sp->span.entries);
+}
+
+static struct mlxsw_sp_span_entry *
+mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port)
+{
+ struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
+ struct mlxsw_sp_span_entry *span_entry;
+ char mpat_pl[MLXSW_REG_MPAT_LEN];
+ u8 local_port = port->local_port;
+ int index;
+ int i;
+ int err;
+
+ /* find a free entry to use */
+ index = -1;
+ for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
+ if (!mlxsw_sp->span.entries[i].used) {
+ index = i;
+ span_entry = &mlxsw_sp->span.entries[i];
+ break;
+ }
+ }
+ if (index < 0)
+ return NULL;
+
+ /* create a new port analayzer entry for local_port */
+ mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
+ if (err)
+ return NULL;
+
+ span_entry->used = true;
+ span_entry->id = index;
+ span_entry->ref_count = 0;
+ span_entry->local_port = local_port;
+ return span_entry;
+}
+
+static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_span_entry *span_entry)
+{
+ u8 local_port = span_entry->local_port;
+ char mpat_pl[MLXSW_REG_MPAT_LEN];
+ int pa_id = span_entry->id;
+
+ mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
+ span_entry->used = false;
+}
+
+struct mlxsw_sp_span_entry *mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port)
+{
+ struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
+ int i;
+
+ for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+
+ if (curr->used && curr->local_port == port->local_port)
+ return curr;
+ }
+ return NULL;
+}
+
+struct mlxsw_sp_span_entry *mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port)
+{
+ struct mlxsw_sp_span_entry *span_entry;
+
+ span_entry = mlxsw_sp_span_entry_find(port);
+ if (span_entry) {
+ span_entry->ref_count++;
+ return span_entry;
+ }
+
+ return mlxsw_sp_span_entry_create(port);
+}
+
+static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_span_entry *span_entry)
+{
+ if (--span_entry->ref_count == 0)
+ mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
+ return 0;
+}
+
+static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
+{
+ struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
+ struct mlxsw_sp_span_inspected_port *p;
+ int i;
+
+ for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+
+ list_for_each_entry(p, &curr->bound_ports_list, list)
+ if (p->local_port == port->local_port &&
+ p->type == MLXSW_SP_SPAN_EGRESS)
+ return true;
+ }
+
+ return false;
+}
+
+static int mlxsw_sp_span_mtu_to_buffsize(int mtu)
+{
+ return MLXSW_SP_BYTES_TO_CELLS(mtu * 5 / 2) + 1;
+}
+
+static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
+{
+ struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
+ char sbib_pl[MLXSW_REG_SBIB_LEN];
+ int err;
+
+ /* If port is egress mirrored, the shared buffer size should be
+ * updated according to the mtu value
+ */
+ if (mlxsw_sp_span_is_egress_mirror(port)) {
+ mlxsw_reg_sbib_pack(sbib_pl, port->local_port,
+ mlxsw_sp_span_mtu_to_buffsize(mtu));
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
+ if (err) {
+ netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static struct mlxsw_sp_span_inspected_port *
+mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port,
+ struct mlxsw_sp_span_entry *span_entry)
+{
+ struct mlxsw_sp_span_inspected_port *p;
+
+ list_for_each_entry(p, &span_entry->bound_ports_list, list)
+ if (port->local_port == p->local_port)
+ return p;
+ return NULL;
+}
+
+static int
+mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
+ struct mlxsw_sp_span_entry *span_entry,
+ enum mlxsw_sp_span_type type)
+{
+ struct mlxsw_sp_span_inspected_port *inspected_port;
+ struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
+ char mpar_pl[MLXSW_REG_MPAR_LEN];
+ char sbib_pl[MLXSW_REG_SBIB_LEN];
+ int pa_id = span_entry->id;
+ int err;
+
+ /* if it is an egress SPAN, bind a shared buffer to it */
+ if (type == MLXSW_SP_SPAN_EGRESS) {
+ mlxsw_reg_sbib_pack(sbib_pl, port->local_port,
+ mlxsw_sp_span_mtu_to_buffsize(port->dev->mtu));
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
+ if (err) {
+ netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
+ return err;
+ }
+ }
+
+ /* bind the port to the SPAN entry */
+ mlxsw_reg_mpar_pack(mpar_pl, port->local_port, type, true, pa_id);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
+ if (err)
+ goto err_mpar_reg_write;
+
+ inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
+ if (!inspected_port) {
+ err = -ENOMEM;
+ goto err_inspected_port_alloc;
+ }
+ inspected_port->local_port = port->local_port;
+ inspected_port->type = type;
+ list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
+
+ return 0;
+
+err_mpar_reg_write:
+err_inspected_port_alloc:
+ if (type == MLXSW_SP_SPAN_EGRESS) {
+ mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
+ }
+ return err;
+}
+
+static void
+mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port,
+ struct mlxsw_sp_span_entry *span_entry,
+ enum mlxsw_sp_span_type type)
+{
+ struct mlxsw_sp_span_inspected_port *inspected_port;
+ struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
+ char mpar_pl[MLXSW_REG_MPAR_LEN];
+ char sbib_pl[MLXSW_REG_SBIB_LEN];
+ int pa_id = span_entry->id;
+
+ inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry);
+ if (!inspected_port)
+ return;
+
+ /* remove the inspected port */
+ mlxsw_reg_mpar_pack(mpar_pl, port->local_port, type, false, pa_id);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
+
+ /* remove the SBIB buffer if it was egress SPAN */
+ if (type == MLXSW_SP_SPAN_EGRESS) {
+ mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
+ }
+
+ mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
+
+ list_del(&inspected_port->list);
+ kfree(inspected_port);
+}
+
+static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
+ struct mlxsw_sp_port *to,
+ enum mlxsw_sp_span_type type)
+{
+ struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
+ struct mlxsw_sp_span_entry *span_entry;
+ int err;
+
+ span_entry = mlxsw_sp_span_entry_get(to);
+ if (!span_entry)
+ return -ENOENT;
+
+ netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
+ span_entry->id);
+
+ err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type);
+ if (err)
+ goto err_port_bind;
+
+ return 0;
+
+err_port_bind:
+ mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
+ return err;
+}
+
+static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from,
+ struct mlxsw_sp_port *to,
+ enum mlxsw_sp_span_type type)
+{
+ struct mlxsw_sp_span_entry *span_entry;
+
+ span_entry = mlxsw_sp_span_entry_find(to);
+ if (!span_entry) {
+ netdev_err(from->dev, "no span entry found\n");
+ return;
+ }
+
+ netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
+ span_entry->id);
+ mlxsw_sp_span_inspected_port_unbind(from, span_entry, type);
+}
+
static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool is_up)
{
@@ -192,23 +496,6 @@ static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
}
-static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vid, enum mlxsw_reg_spms_state state)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char *spms_pl;
- int err;
-
- spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
- if (!spms_pl)
- return -ENOMEM;
- mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
- mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
- kfree(spms_pl);
- return err;
-}
-
static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
@@ -508,6 +795,9 @@ static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
if (err)
return err;
+ err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu);
+ if (err)
+ goto err_span_port_mtu_update;
err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
if (err)
goto err_port_mtu_set;
@@ -515,6 +805,8 @@ static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
return 0;
err_port_mtu_set:
+ mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu);
+err_span_port_mtu_update:
mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
return err;
}
@@ -619,94 +911,8 @@ static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
return 0;
}
-static struct mlxsw_sp_vfid *
-mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, u16 vid)
-{
- struct mlxsw_sp_vfid *vfid;
-
- list_for_each_entry(vfid, &mlxsw_sp->port_vfids.list, list) {
- if (vfid->vid == vid)
- return vfid;
- }
-
- return NULL;
-}
-
-static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
-{
- return find_first_zero_bit(mlxsw_sp->port_vfids.mapped,
- MLXSW_SP_VFID_PORT_MAX);
-}
-
-static int __mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid)
-{
- u16 fid = mlxsw_sp_vfid_to_fid(vfid);
- char sfmr_pl[MLXSW_REG_SFMR_LEN];
-
- mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, 0);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
-}
-
-static void __mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid)
-{
- u16 fid = mlxsw_sp_vfid_to_fid(vfid);
- char sfmr_pl[MLXSW_REG_SFMR_LEN];
-
- mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, fid, 0);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
-}
-
-static struct mlxsw_sp_vfid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
- u16 vid)
-{
- struct device *dev = mlxsw_sp->bus_info->dev;
- struct mlxsw_sp_vfid *vfid;
- u16 n_vfid;
- int err;
-
- n_vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
- if (n_vfid == MLXSW_SP_VFID_PORT_MAX) {
- dev_err(dev, "No available vFIDs\n");
- return ERR_PTR(-ERANGE);
- }
-
- err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid);
- if (err) {
- dev_err(dev, "Failed to create vFID=%d\n", n_vfid);
- return ERR_PTR(err);
- }
-
- vfid = kzalloc(sizeof(*vfid), GFP_KERNEL);
- if (!vfid)
- goto err_allocate_vfid;
-
- vfid->vfid = n_vfid;
- vfid->vid = vid;
-
- list_add(&vfid->list, &mlxsw_sp->port_vfids.list);
- set_bit(n_vfid, mlxsw_sp->port_vfids.mapped);
-
- return vfid;
-
-err_allocate_vfid:
- __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid);
- return ERR_PTR(-ENOMEM);
-}
-
-static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_vfid *vfid)
-{
- clear_bit(vfid->vfid, mlxsw_sp->port_vfids.mapped);
- list_del(&vfid->list);
-
- __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid);
-
- kfree(vfid);
-}
-
static struct mlxsw_sp_port *
-mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_vfid *vfid)
+mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
{
struct mlxsw_sp_port *mlxsw_sp_vport;
@@ -724,8 +930,7 @@ mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
- mlxsw_sp_vport->vport.vfid = vfid;
- mlxsw_sp_vport->vport.vid = vfid->vid;
+ mlxsw_sp_vport->vport.vid = vid;
list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
@@ -738,13 +943,12 @@ static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
kfree(mlxsw_sp_vport);
}
-int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
- u16 vid)
+static int mlxsw_sp_port_add_vid(struct net_device *dev,
+ __be16 __always_unused proto, u16 vid)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_port *mlxsw_sp_vport;
- struct mlxsw_sp_vfid *vfid;
+ bool untagged = vid == 1;
int err;
/* VLAN 0 is added to HW filter when device goes up, but it is
@@ -753,37 +957,12 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
if (!vid)
return 0;
- if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) {
- netdev_warn(dev, "VID=%d already configured\n", vid);
+ if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid))
return 0;
- }
-
- vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid);
- if (!vfid) {
- vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
- if (IS_ERR(vfid)) {
- netdev_err(dev, "Failed to create vFID for VID=%d\n",
- vid);
- return PTR_ERR(vfid);
- }
- }
-
- mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vfid);
- if (!mlxsw_sp_vport) {
- netdev_err(dev, "Failed to create vPort for VID=%d\n", vid);
- err = -ENOMEM;
- goto err_port_vport_create;
- }
- if (!vfid->nr_vports) {
- err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid,
- true, false);
- if (err) {
- netdev_err(dev, "Failed to setup flooding for vFID=%d\n",
- vfid->vfid);
- goto err_vport_flood_set;
- }
- }
+ mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid);
+ if (!mlxsw_sp_vport)
+ return -ENOMEM;
/* When adding the first VLAN interface on a bridged port we need to
* transition all the active 802.1Q bridge VLANs to use explicit
@@ -791,77 +970,36 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
*/
if (list_is_singular(&mlxsw_sp_port->vports_list)) {
err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
- if (err) {
- netdev_err(dev, "Failed to set to Virtual mode\n");
+ if (err)
goto err_port_vp_mode_trans;
- }
- }
-
- err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
- MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
- true,
- mlxsw_sp_vfid_to_fid(vfid->vfid),
- vid);
- if (err) {
- netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n",
- vid, vfid->vfid);
- goto err_port_vid_to_fid_set;
}
err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
- if (err) {
- netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
+ if (err)
goto err_port_vid_learning_set;
- }
- err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, false);
- if (err) {
- netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
- vid);
+ err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged);
+ if (err)
goto err_port_add_vid;
- }
-
- err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
- MLXSW_REG_SPMS_STATE_FORWARDING);
- if (err) {
- netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
- goto err_port_stp_state_set;
- }
-
- vfid->nr_vports++;
return 0;
-err_port_stp_state_set:
- mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
err_port_add_vid:
mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
err_port_vid_learning_set:
- mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
- MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
- mlxsw_sp_vfid_to_fid(vfid->vfid), vid);
-err_port_vid_to_fid_set:
if (list_is_singular(&mlxsw_sp_port->vports_list))
mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
err_port_vp_mode_trans:
- if (!vfid->nr_vports)
- mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false,
- false);
-err_vport_flood_set:
mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
-err_port_vport_create:
- if (!vfid->nr_vports)
- mlxsw_sp_vfid_destroy(mlxsw_sp, vfid);
return err;
}
-int mlxsw_sp_port_kill_vid(struct net_device *dev,
- __be16 __always_unused proto, u16 vid)
+static int mlxsw_sp_port_kill_vid(struct net_device *dev,
+ __be16 __always_unused proto, u16 vid)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp_port *mlxsw_sp_vport;
- struct mlxsw_sp_vfid *vfid;
- int err;
+ struct mlxsw_sp_fid *f;
/* VLAN 0 is removed from HW filter when device goes down, but
* it is reserved in our case, so simply return.
@@ -870,63 +1008,29 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev,
return 0;
mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
- if (!mlxsw_sp_vport) {
- netdev_warn(dev, "VID=%d does not exist\n", vid);
+ if (WARN_ON(!mlxsw_sp_vport))
return 0;
- }
-
- vfid = mlxsw_sp_vport->vport.vfid;
-
- err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
- MLXSW_REG_SPMS_STATE_DISCARDING);
- if (err) {
- netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
- return err;
- }
- err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
- if (err) {
- netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
- vid);
- return err;
- }
+ mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
- err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
- if (err) {
- netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
- return err;
- }
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
- err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
- MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
- false,
- mlxsw_sp_vfid_to_fid(vfid->vfid),
- vid);
- if (err) {
- netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n",
- vid, vfid->vfid);
- return err;
- }
+ /* Drop FID reference. If this was the last reference the
+ * resources will be freed.
+ */
+ f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
+ if (f && !WARN_ON(!f->leave))
+ f->leave(mlxsw_sp_vport);
/* When removing the last VLAN interface on a bridged port we need to
* transition all active 802.1Q bridge VLANs to use VID to FID
* mappings and set port's mode to VLAN mode.
*/
- if (list_is_singular(&mlxsw_sp_port->vports_list)) {
- err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
- if (err) {
- netdev_err(dev, "Failed to set to VLAN mode\n");
- return err;
- }
- }
+ if (list_is_singular(&mlxsw_sp_port->vports_list))
+ mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
- vfid->nr_vports--;
mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
- /* Destroy the vFID if no vPorts are assigned to it anymore. */
- if (!vfid->nr_vports)
- mlxsw_sp_vfid_destroy(mlxsw_sp_port->mlxsw_sp, vfid);
-
return 0;
}
@@ -951,16 +1055,164 @@ static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
return 0;
}
+static struct mlxsw_sp_port_mall_tc_entry *
+mlxsw_sp_port_mirror_entry_find(struct mlxsw_sp_port *port,
+ unsigned long cookie) {
+ struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
+
+ list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list)
+ if (mall_tc_entry->cookie == cookie)
+ return mall_tc_entry;
+
+ return NULL;
+}
+
+static int
+mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_cls_matchall_offload *cls,
+ const struct tc_action *a,
+ bool ingress)
+{
+ struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
+ struct net *net = dev_net(mlxsw_sp_port->dev);
+ enum mlxsw_sp_span_type span_type;
+ struct mlxsw_sp_port *to_port;
+ struct net_device *to_dev;
+ int ifindex;
+ int err;
+
+ ifindex = tcf_mirred_ifindex(a);
+ to_dev = __dev_get_by_index(net, ifindex);
+ if (!to_dev) {
+ netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
+ return -EINVAL;
+ }
+
+ if (!mlxsw_sp_port_dev_check(to_dev)) {
+ netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port");
+ return -ENOTSUPP;
+ }
+ to_port = netdev_priv(to_dev);
+
+ mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
+ if (!mall_tc_entry)
+ return -ENOMEM;
+
+ mall_tc_entry->cookie = cls->cookie;
+ mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
+ mall_tc_entry->mirror.to_local_port = to_port->local_port;
+ mall_tc_entry->mirror.ingress = ingress;
+ list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list);
+
+ span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
+ err = mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type);
+ if (err)
+ goto err_mirror_add;
+ return 0;
+
+err_mirror_add:
+ list_del(&mall_tc_entry->list);
+ kfree(mall_tc_entry);
+ return err;
+}
+
+static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
+ __be16 protocol,
+ struct tc_cls_matchall_offload *cls,
+ bool ingress)
+{
+ const struct tc_action *a;
+ LIST_HEAD(actions);
+ int err;
+
+ if (!tc_single_action(cls->exts)) {
+ netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
+ return -ENOTSUPP;
+ }
+
+ tcf_exts_to_list(cls->exts, &actions);
+ list_for_each_entry(a, &actions, list) {
+ if (!is_tcf_mirred_mirror(a) || protocol != htons(ETH_P_ALL))
+ return -ENOTSUPP;
+
+ err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, cls,
+ a, ingress);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
+ enum mlxsw_sp_span_type span_type;
+ struct mlxsw_sp_port *to_port;
+
+ mall_tc_entry = mlxsw_sp_port_mirror_entry_find(mlxsw_sp_port,
+ cls->cookie);
+ if (!mall_tc_entry) {
+ netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n");
+ return;
+ }
+
+ switch (mall_tc_entry->type) {
+ case MLXSW_SP_PORT_MALL_MIRROR:
+ to_port = mlxsw_sp->ports[mall_tc_entry->mirror.to_local_port];
+ span_type = mall_tc_entry->mirror.ingress ?
+ MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
+
+ mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type);
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ list_del(&mall_tc_entry->list);
+ kfree(mall_tc_entry);
+}
+
+static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle,
+ __be16 proto, struct tc_to_netdev *tc)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS);
+
+ if (tc->type == TC_SETUP_MATCHALL) {
+ switch (tc->cls_mall->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port,
+ proto,
+ tc->cls_mall,
+ ingress);
+ case TC_CLSMATCHALL_DESTROY:
+ mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port,
+ tc->cls_mall);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return -ENOTSUPP;
+}
+
static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
.ndo_open = mlxsw_sp_port_open,
.ndo_stop = mlxsw_sp_port_stop,
.ndo_start_xmit = mlxsw_sp_port_xmit,
+ .ndo_setup_tc = mlxsw_sp_setup_tc,
.ndo_set_rx_mode = mlxsw_sp_set_rx_mode,
.ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
.ndo_change_mtu = mlxsw_sp_port_change_mtu,
.ndo_get_stats64 = mlxsw_sp_port_get_stats64,
.ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
.ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
+ .ndo_neigh_construct = mlxsw_sp_router_neigh_construct,
+ .ndo_neigh_destroy = mlxsw_sp_router_neigh_destroy,
.ndo_fdb_add = switchdev_port_fdb_add,
.ndo_fdb_del = switchdev_port_fdb_del,
.ndo_fdb_dump = switchdev_port_fdb_dump,
@@ -1055,7 +1307,7 @@ struct mlxsw_sp_port_hw_stats {
u64 (*getter)(char *payload);
};
-static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
{
.str = "a_frames_transmitted_ok",
.getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
@@ -1136,6 +1388,90 @@ static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
#define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
+ {
+ .str = "rx_octets_prio",
+ .getter = mlxsw_reg_ppcnt_rx_octets_get,
+ },
+ {
+ .str = "rx_frames_prio",
+ .getter = mlxsw_reg_ppcnt_rx_frames_get,
+ },
+ {
+ .str = "tx_octets_prio",
+ .getter = mlxsw_reg_ppcnt_tx_octets_get,
+ },
+ {
+ .str = "tx_frames_prio",
+ .getter = mlxsw_reg_ppcnt_tx_frames_get,
+ },
+ {
+ .str = "rx_pause_prio",
+ .getter = mlxsw_reg_ppcnt_rx_pause_get,
+ },
+ {
+ .str = "rx_pause_duration_prio",
+ .getter = mlxsw_reg_ppcnt_rx_pause_duration_get,
+ },
+ {
+ .str = "tx_pause_prio",
+ .getter = mlxsw_reg_ppcnt_tx_pause_get,
+ },
+ {
+ .str = "tx_pause_duration_prio",
+ .getter = mlxsw_reg_ppcnt_tx_pause_duration_get,
+ },
+};
+
+#define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
+
+static u64 mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get(char *ppcnt_pl)
+{
+ u64 transmit_queue = mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
+
+ return MLXSW_SP_CELLS_TO_BYTES(transmit_queue);
+}
+
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
+ {
+ .str = "tc_transmit_queue_tc",
+ .getter = mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get,
+ },
+ {
+ .str = "tc_no_buffer_discard_uc_tc",
+ .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get,
+ },
+};
+
+#define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
+
+#define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
+ (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \
+ MLXSW_SP_PORT_HW_TC_STATS_LEN) * \
+ IEEE_8021QAZ_MAX_TCS)
+
+static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) {
+ snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
+ mlxsw_sp_port_hw_prio_stats[i].str, prio);
+ *p += ETH_GSTRING_LEN;
+ }
+}
+
+static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) {
+ snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
+ mlxsw_sp_port_hw_tc_stats[i].str, tc);
+ *p += ETH_GSTRING_LEN;
+ }
+}
+
static void mlxsw_sp_port_get_strings(struct net_device *dev,
u32 stringset, u8 *data)
{
@@ -1149,6 +1485,13 @@ static void mlxsw_sp_port_get_strings(struct net_device *dev,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ mlxsw_sp_port_get_prio_strings(&p, i);
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ mlxsw_sp_port_get_tc_strings(&p, i);
+
break;
}
}
@@ -1176,27 +1519,80 @@ static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
}
-static void mlxsw_sp_port_get_stats(struct net_device *dev,
- struct ethtool_stats *stats, u64 *data)
+static int
+mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
+ int *p_len, enum mlxsw_reg_ppcnt_grp grp)
+{
+ switch (grp) {
+ case MLXSW_REG_PPCNT_IEEE_8023_CNT:
+ *p_hw_stats = mlxsw_sp_port_hw_stats;
+ *p_len = MLXSW_SP_PORT_HW_STATS_LEN;
+ break;
+ case MLXSW_REG_PPCNT_PRIO_CNT:
+ *p_hw_stats = mlxsw_sp_port_hw_prio_stats;
+ *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
+ break;
+ case MLXSW_REG_PPCNT_TC_CNT:
+ *p_hw_stats = mlxsw_sp_port_hw_tc_stats;
+ *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN;
+ break;
+ default:
+ WARN_ON(1);
+ return -ENOTSUPP;
+ }
+ return 0;
+}
+
+static void __mlxsw_sp_port_get_stats(struct net_device *dev,
+ enum mlxsw_reg_ppcnt_grp grp, int prio,
+ u64 *data, int data_index)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_port_hw_stats *hw_stats;
char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
- int i;
+ int i, len;
int err;
- mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port,
- MLXSW_REG_PPCNT_IEEE_8023_CNT, 0);
+ err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
+ if (err)
+ return;
+ mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
- for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++)
- data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0;
+ for (i = 0; i < len; i++)
+ data[data_index + i] = !err ? hw_stats[i].getter(ppcnt_pl) : 0;
+}
+
+static void mlxsw_sp_port_get_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ int i, data_index = 0;
+
+ /* IEEE 802.3 Counters */
+ __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0,
+ data, data_index);
+ data_index = MLXSW_SP_PORT_HW_STATS_LEN;
+
+ /* Per-Priority Counters */
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
+ data, data_index);
+ data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
+ }
+
+ /* Per-TC Counters */
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i,
+ data, data_index);
+ data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN;
+ }
}
static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
- return MLXSW_SP_PORT_HW_STATS_LEN;
+ return MLXSW_SP_PORT_ETHTOOL_STATS_LEN;
default:
return -EOPNOTSUPP;
}
@@ -1655,6 +2051,18 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
return 0;
}
+static int mlxsw_sp_port_pvid_vport_create(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mlxsw_sp_port->pvid = 1;
+
+ return mlxsw_sp_port_add_vid(mlxsw_sp_port->dev, 0, 1);
+}
+
+static int mlxsw_sp_port_pvid_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ return mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
+}
+
static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
bool split, u8 module, u8 width, u8 lane)
{
@@ -1686,6 +2094,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_port_untagged_vlans_alloc;
}
INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
+ INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list);
mlxsw_sp_port->pcpu_stats =
netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
@@ -1697,6 +2106,13 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
+ err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_swid_set;
+ }
+
err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
@@ -1707,7 +2123,8 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
netif_carrier_off(dev);
dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
- NETIF_F_HW_VLAN_CTAG_FILTER;
+ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
+ dev->hw_features |= NETIF_F_HW_TC;
/* Each packet needs to have a Tx header (metadata) on top all other
* headers.
@@ -1721,13 +2138,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_port_system_port_mapping_set;
}
- err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
- mlxsw_sp_port->local_port);
- goto err_port_swid_set;
- }
-
err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
@@ -1768,7 +2178,15 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_port_dcb_init;
}
+ err = mlxsw_sp_port_pvid_vport_create(mlxsw_sp_port);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create PVID vPort\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_pvid_vport_create;
+ }
+
mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
+ mlxsw_sp->ports[local_port] = mlxsw_sp_port;
err = register_netdev(dev);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
@@ -1785,27 +2203,26 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_core_port_init;
}
- err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
- if (err)
- goto err_port_vlan_init;
-
- mlxsw_sp->ports[local_port] = mlxsw_sp_port;
return 0;
-err_port_vlan_init:
- mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
err_core_port_init:
unregister_netdev(dev);
err_register_netdev:
+ mlxsw_sp->ports[local_port] = NULL;
+ mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
+ mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
+err_port_pvid_vport_create:
+ mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
err_port_dcb_init:
err_port_ets_init:
err_port_buffers_init:
err_port_admin_status_set:
err_port_mtu_set:
err_port_speed_by_width_set:
-err_port_swid_set:
err_port_system_port_mapping_set:
err_dev_addr_init:
+ mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
+err_port_swid_set:
free_percpu(mlxsw_sp_port->pcpu_stats);
err_alloc_stats:
kfree(mlxsw_sp_port->untagged_vlans);
@@ -1816,40 +2233,24 @@ err_port_active_vlans_alloc:
return err;
}
-static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port)
-{
- struct net_device *dev = mlxsw_sp_port->dev;
- struct mlxsw_sp_port *mlxsw_sp_vport, *tmp;
-
- list_for_each_entry_safe(mlxsw_sp_vport, tmp,
- &mlxsw_sp_port->vports_list, vport.list) {
- u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
-
- /* vPorts created for VLAN devices should already be gone
- * by now, since we unregistered the port netdev.
- */
- WARN_ON(is_vlan_dev(mlxsw_sp_vport->dev));
- mlxsw_sp_port_kill_vid(dev, 0, vid);
- }
-}
-
static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
if (!mlxsw_sp_port)
return;
- mlxsw_sp->ports[local_port] = NULL;
mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
- mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
- mlxsw_sp_port_vports_fini(mlxsw_sp_port);
+ mlxsw_sp->ports[local_port] = NULL;
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
+ mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
+ mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
free_percpu(mlxsw_sp_port->pcpu_stats);
kfree(mlxsw_sp_port->untagged_vlans);
kfree(mlxsw_sp_port->active_vlans);
+ WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list));
free_netdev(mlxsw_sp_port->dev);
}
@@ -2086,11 +2487,8 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
local_port = mlxsw_reg_pude_local_port_get(pude_pl);
mlxsw_sp_port = mlxsw_sp->ports[local_port];
- if (!mlxsw_sp_port) {
- dev_warn(mlxsw_sp->bus_info->dev, "Port %d: Link event received for non-existent port\n",
- local_port);
+ if (!mlxsw_sp_port)
return;
- }
status = mlxsw_reg_pude_oper_status_get(pude_pl);
if (status == MLXSW_PORT_OPER_STATUS_UP) {
@@ -2245,6 +2643,51 @@ static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
.local_port = MLXSW_PORT_DONT_CARE,
.trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
},
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_ARPBC,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_ARPUC,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_MTUERROR,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_TTLERROR,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_LBERROR,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_OSPF,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_IP2ME,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_RTR_INGRESS0,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_HOST_MISS_IPV4,
+ },
};
static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
@@ -2285,7 +2728,7 @@ err_rx_trap_set:
mlxsw_sp);
err_rx_listener_register:
for (i--; i >= 0; i--) {
- mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
mlxsw_sp_rx_listener[i].trap_id);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
@@ -2302,7 +2745,7 @@ static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
int i;
for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
- mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
mlxsw_sp_rx_listener[i].trap_id);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
@@ -2381,8 +2824,8 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->core = mlxsw_core;
mlxsw_sp->bus_info = mlxsw_bus_info;
- INIT_LIST_HEAD(&mlxsw_sp->port_vfids.list);
- INIT_LIST_HEAD(&mlxsw_sp->br_vfids.list);
+ INIT_LIST_HEAD(&mlxsw_sp->fids);
+ INIT_LIST_HEAD(&mlxsw_sp->vfids.list);
INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
err = mlxsw_sp_base_mac_get(mlxsw_sp);
@@ -2391,16 +2834,10 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
return err;
}
- err = mlxsw_sp_ports_create(mlxsw_sp);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
- return err;
- }
-
err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n");
- goto err_event_register;
+ return err;
}
err = mlxsw_sp_traps_init(mlxsw_sp);
@@ -2433,8 +2870,32 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_switchdev_init;
}
+ err = mlxsw_sp_router_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
+ goto err_router_init;
+ }
+
+ err = mlxsw_sp_span_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
+ goto err_span_init;
+ }
+
+ err = mlxsw_sp_ports_create(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
+ goto err_ports_create;
+ }
+
return 0;
+err_ports_create:
+ mlxsw_sp_span_fini(mlxsw_sp);
+err_span_init:
+ mlxsw_sp_router_fini(mlxsw_sp);
+err_router_init:
+ mlxsw_sp_switchdev_fini(mlxsw_sp);
err_switchdev_init:
err_lag_init:
mlxsw_sp_buffers_fini(mlxsw_sp);
@@ -2443,20 +2904,25 @@ err_flood_init:
mlxsw_sp_traps_fini(mlxsw_sp);
err_rx_listener_register:
mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
-err_event_register:
- mlxsw_sp_ports_remove(mlxsw_sp);
return err;
}
static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ int i;
+ mlxsw_sp_ports_remove(mlxsw_sp);
+ mlxsw_sp_span_fini(mlxsw_sp);
+ mlxsw_sp_router_fini(mlxsw_sp);
mlxsw_sp_switchdev_fini(mlxsw_sp);
mlxsw_sp_buffers_fini(mlxsw_sp);
mlxsw_sp_traps_fini(mlxsw_sp);
mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
- mlxsw_sp_ports_remove(mlxsw_sp);
+ WARN_ON(!list_empty(&mlxsw_sp->vfids.list));
+ WARN_ON(!list_empty(&mlxsw_sp->fids));
+ for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
+ WARN_ON_ONCE(mlxsw_sp->rifs[i]);
}
static struct mlxsw_config_profile mlxsw_sp_config_profile = {
@@ -2487,12 +2953,17 @@ static struct mlxsw_config_profile mlxsw_sp_config_profile = {
.max_ib_mc = 0,
.used_max_pkey = 1,
.max_pkey = 0,
+ .used_kvd_sizes = 1,
+ .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE,
+ .kvd_hash_single_size = MLXSW_SP_KVD_HASH_SINGLE_SIZE,
+ .kvd_hash_double_size = MLXSW_SP_KVD_HASH_DOUBLE_SIZE,
.swid_config = {
{
.used_type = 1,
.type = MLXSW_PORT_SWID_TYPE_ETH,
}
},
+ .resource_query_enable = 1,
};
static struct mlxsw_driver mlxsw_sp_driver = {
@@ -2518,16 +2989,631 @@ static struct mlxsw_driver mlxsw_sp_driver = {
.profile = &mlxsw_sp_config_profile,
};
-static int
-mlxsw_sp_port_fdb_flush_by_port(const struct mlxsw_sp_port *mlxsw_sp_port)
+static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
+{
+ return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
+}
+
+static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
+{
+ struct net_device *lower_dev;
+ struct list_head *iter;
+
+ if (mlxsw_sp_port_dev_check(dev))
+ return netdev_priv(dev);
+
+ netdev_for_each_all_lower_dev(dev, lower_dev, iter) {
+ if (mlxsw_sp_port_dev_check(lower_dev))
+ return netdev_priv(lower_dev);
+ }
+ return NULL;
+}
+
+static struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port;
+
+ mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
+ return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
+}
+
+static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
+{
+ struct net_device *lower_dev;
+ struct list_head *iter;
+
+ if (mlxsw_sp_port_dev_check(dev))
+ return netdev_priv(dev);
+
+ netdev_for_each_all_lower_dev_rcu(dev, lower_dev, iter) {
+ if (mlxsw_sp_port_dev_check(lower_dev))
+ return netdev_priv(lower_dev);
+ }
+ return NULL;
+}
+
+struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port;
+
+ rcu_read_lock();
+ mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
+ if (mlxsw_sp_port)
+ dev_hold(mlxsw_sp_port->dev);
+ rcu_read_unlock();
+ return mlxsw_sp_port;
+}
+
+void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ dev_put(mlxsw_sp_port->dev);
+}
+
+static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *r,
+ unsigned long event)
+{
+ switch (event) {
+ case NETDEV_UP:
+ if (!r)
+ return true;
+ r->ref_count++;
+ return false;
+ case NETDEV_DOWN:
+ if (r && --r->ref_count == 0)
+ return true;
+ /* It is possible we already removed the RIF ourselves
+ * if it was assigned to a netdev that is now a bridge
+ * or LAG slave.
+ */
+ return false;
+ }
+
+ return false;
+}
+
+static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
+ if (!mlxsw_sp->rifs[i])
+ return i;
+
+ return MLXSW_SP_RIF_MAX;
+}
+
+static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport,
+ bool *p_lagged, u16 *p_system_port)
+{
+ u8 local_port = mlxsw_sp_vport->local_port;
+
+ *p_lagged = mlxsw_sp_vport->lagged;
+ *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port;
+}
+
+static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport,
+ struct net_device *l3_dev, u16 rif,
+ bool create)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
+ bool lagged = mlxsw_sp_vport->lagged;
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+ u16 system_port;
+
+ mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif,
+ l3_dev->mtu, l3_dev->dev_addr);
+
+ mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port);
+ mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port,
+ mlxsw_sp_vport_vid_get(mlxsw_sp_vport));
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
+
+static struct mlxsw_sp_fid *
+mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev)
+{
+ struct mlxsw_sp_fid *f;
+
+ f = kzalloc(sizeof(*f), GFP_KERNEL);
+ if (!f)
+ return NULL;
+
+ f->leave = mlxsw_sp_vport_rif_sp_leave;
+ f->ref_count = 0;
+ f->dev = l3_dev;
+ f->fid = fid;
+
+ return f;
+}
+
+static struct mlxsw_sp_rif *
+mlxsw_sp_rif_alloc(u16 rif, struct net_device *l3_dev, struct mlxsw_sp_fid *f)
+{
+ struct mlxsw_sp_rif *r;
+
+ r = kzalloc(sizeof(*r), GFP_KERNEL);
+ if (!r)
+ return NULL;
+
+ ether_addr_copy(r->addr, l3_dev->dev_addr);
+ r->mtu = l3_dev->mtu;
+ r->ref_count = 1;
+ r->dev = l3_dev;
+ r->rif = rif;
+ r->f = f;
+
+ return r;
+}
+
+static struct mlxsw_sp_rif *
+mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport,
+ struct net_device *l3_dev)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
+ struct mlxsw_sp_fid *f;
+ struct mlxsw_sp_rif *r;
+ u16 fid, rif;
+ int err;
+
+ rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
+ if (rif == MLXSW_SP_RIF_MAX)
+ return ERR_PTR(-ERANGE);
+
+ err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, true);
+ if (err)
+ return ERR_PTR(err);
+
+ fid = mlxsw_sp_rif_sp_to_fid(rif);
+ err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true);
+ if (err)
+ goto err_rif_fdb_op;
+
+ f = mlxsw_sp_rfid_alloc(fid, l3_dev);
+ if (!f) {
+ err = -ENOMEM;
+ goto err_rfid_alloc;
+ }
+
+ r = mlxsw_sp_rif_alloc(rif, l3_dev, f);
+ if (!r) {
+ err = -ENOMEM;
+ goto err_rif_alloc;
+ }
+
+ f->r = r;
+ mlxsw_sp->rifs[rif] = r;
+
+ return r;
+
+err_rif_alloc:
+ kfree(f);
+err_rfid_alloc:
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
+err_rif_fdb_op:
+ mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport,
+ struct mlxsw_sp_rif *r)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
+ struct net_device *l3_dev = r->dev;
+ struct mlxsw_sp_fid *f = r->f;
+ u16 fid = f->fid;
+ u16 rif = r->rif;
+
+ mlxsw_sp->rifs[rif] = NULL;
+ f->r = NULL;
+
+ kfree(r);
+
+ kfree(f);
+
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
+
+ mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false);
+}
+
+static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport,
+ struct net_device *l3_dev)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
+ struct mlxsw_sp_rif *r;
+
+ r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
+ if (!r) {
+ r = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev);
+ if (IS_ERR(r))
+ return PTR_ERR(r);
+ }
+
+ mlxsw_sp_vport_fid_set(mlxsw_sp_vport, r->f);
+ r->f->ref_count++;
+
+ netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", r->f->fid);
+
+ return 0;
+}
+
+static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
+{
+ struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
+
+ netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
+
+ mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
+ if (--f->ref_count == 0)
+ mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->r);
+}
+
+static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev,
+ struct net_device *port_dev,
+ unsigned long event, u16 vid)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
+ struct mlxsw_sp_port *mlxsw_sp_vport;
+
+ mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
+ if (WARN_ON(!mlxsw_sp_vport))
+ return -EINVAL;
+
+ switch (event) {
+ case NETDEV_UP:
+ return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev);
+ case NETDEV_DOWN:
+ mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport);
+ break;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
+ unsigned long event)
+{
+ if (netif_is_bridge_port(port_dev) || netif_is_lag_port(port_dev))
+ return 0;
+
+ return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1);
+}
+
+static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
+ struct net_device *lag_dev,
+ unsigned long event, u16 vid)
+{
+ struct net_device *port_dev;
+ struct list_head *iter;
+ int err;
+
+ netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
+ if (mlxsw_sp_port_dev_check(port_dev)) {
+ err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev,
+ event, vid);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
+ unsigned long event)
+{
+ if (netif_is_bridge_port(lag_dev))
+ return 0;
+
+ return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
+}
+
+static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *l3_dev)
+{
+ u16 fid;
+
+ if (is_vlan_dev(l3_dev))
+ fid = vlan_dev_vlan_id(l3_dev);
+ else if (mlxsw_sp->master_bridge.dev == l3_dev)
+ fid = 1;
+ else
+ return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev);
+
+ return mlxsw_sp_fid_find(mlxsw_sp, fid);
+}
+
+static enum mlxsw_flood_table_type mlxsw_sp_flood_table_type_get(u16 fid)
+{
+ return mlxsw_sp_fid_is_vfid(fid) ? MLXSW_REG_SFGC_TABLE_TYPE_FID :
+ MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
+}
+
+static u16 mlxsw_sp_flood_table_index_get(u16 fid)
+{
+ return mlxsw_sp_fid_is_vfid(fid) ? mlxsw_sp_fid_to_vfid(fid) : fid;
+}
+
+static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid,
+ bool set)
+{
+ enum mlxsw_flood_table_type table_type;
+ char *sftr_pl;
+ u16 index;
+ int err;
+
+ sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
+ if (!sftr_pl)
+ return -ENOMEM;
+
+ table_type = mlxsw_sp_flood_table_type_get(fid);
+ index = mlxsw_sp_flood_table_index_get(fid);
+ mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, index, table_type,
+ 1, MLXSW_PORT_ROUTER_PORT, set);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
+
+ kfree(sftr_pl);
+ return err;
+}
+
+static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid)
+{
+ if (mlxsw_sp_fid_is_vfid(fid))
+ return MLXSW_REG_RITR_FID_IF;
+ else
+ return MLXSW_REG_RITR_VLAN_IF;
+}
+
+static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *l3_dev,
+ u16 fid, u16 rif,
+ bool create)
+{
+ enum mlxsw_reg_ritr_if_type rif_type;
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+
+ rif_type = mlxsw_sp_rif_type_get(fid);
+ mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, l3_dev->mtu,
+ l3_dev->dev_addr);
+ mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *l3_dev,
+ struct mlxsw_sp_fid *f)
+{
+ struct mlxsw_sp_rif *r;
+ u16 rif;
+ int err;
+
+ rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
+ if (rif == MLXSW_SP_RIF_MAX)
+ return -ERANGE;
+
+ err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true);
+ if (err)
+ goto err_rif_bridge_op;
+
+ err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true);
+ if (err)
+ goto err_rif_fdb_op;
+
+ r = mlxsw_sp_rif_alloc(rif, l3_dev, f);
+ if (!r) {
+ err = -ENOMEM;
+ goto err_rif_alloc;
+ }
+
+ f->r = r;
+ mlxsw_sp->rifs[rif] = r;
+
+ netdev_dbg(l3_dev, "RIF=%d created\n", rif);
+
+ return 0;
+
+err_rif_alloc:
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
+err_rif_fdb_op:
+ mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
+err_rif_bridge_op:
+ mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
+ return err;
+}
+
+void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *r)
+{
+ struct net_device *l3_dev = r->dev;
+ struct mlxsw_sp_fid *f = r->f;
+ u16 rif = r->rif;
+
+ mlxsw_sp->rifs[rif] = NULL;
+ f->r = NULL;
+
+ kfree(r);
+
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
+
+ mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
+
+ mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
+
+ netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif);
+}
+
+static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
+ struct net_device *br_dev,
+ unsigned long event)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
+ struct mlxsw_sp_fid *f;
+
+ /* FID can either be an actual FID if the L3 device is the
+ * VLAN-aware bridge or a VLAN device on top. Otherwise, the
+ * L3 device is a VLAN-unaware bridge and we get a vFID.
+ */
+ f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev);
+ if (WARN_ON(!f))
+ return -EINVAL;
+
+ switch (event) {
+ case NETDEV_UP:
+ return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f);
+ case NETDEV_DOWN:
+ mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
+ break;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
+ unsigned long event)
+{
+ struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
+ u16 vid = vlan_dev_vlan_id(vlan_dev);
+
+ if (mlxsw_sp_port_dev_check(real_dev))
+ return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event,
+ vid);
+ else if (netif_is_lag_master(real_dev))
+ return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
+ vid);
+ else if (netif_is_bridge_master(real_dev) &&
+ mlxsw_sp->master_bridge.dev == real_dev)
+ return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev,
+ event);
+
+ return 0;
+}
+
+static int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
+ struct net_device *dev = ifa->ifa_dev->dev;
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_rif *r;
+ int err = 0;
+
+ mlxsw_sp = mlxsw_sp_lower_get(dev);
+ if (!mlxsw_sp)
+ goto out;
+
+ r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ if (!mlxsw_sp_rif_should_config(r, event))
+ goto out;
+
+ if (mlxsw_sp_port_dev_check(dev))
+ err = mlxsw_sp_inetaddr_port_event(dev, event);
+ else if (netif_is_lag_master(dev))
+ err = mlxsw_sp_inetaddr_lag_event(dev, event);
+ else if (netif_is_bridge_master(dev))
+ err = mlxsw_sp_inetaddr_bridge_event(dev, dev, event);
+ else if (is_vlan_dev(dev))
+ err = mlxsw_sp_inetaddr_vlan_event(dev, event);
+
+out:
+ return notifier_from_errno(err);
+}
+
+static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif,
+ const char *mac, int mtu)
+{
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+ int err;
+
+ mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
+ mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
+ mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
+{
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_rif *r;
+ int err;
+
+ mlxsw_sp = mlxsw_sp_lower_get(dev);
+ if (!mlxsw_sp)
+ return 0;
+
+ r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ if (!r)
+ return 0;
+
+ err = mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, false);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_rif_edit(mlxsw_sp, r->rif, dev->dev_addr, dev->mtu);
+ if (err)
+ goto err_rif_edit;
+
+ err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, r->f->fid, true);
+ if (err)
+ goto err_rif_fdb_op;
+
+ ether_addr_copy(r->addr, dev->dev_addr);
+ r->mtu = dev->mtu;
+
+ netdev_dbg(dev, "Updated RIF=%d\n", r->rif);
+
+ return 0;
+
+err_rif_fdb_op:
+ mlxsw_sp_rif_edit(mlxsw_sp, r->rif, r->addr, r->mtu);
+err_rif_edit:
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, true);
+ return err;
+}
+
+static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port,
+ u16 fid)
+{
+ if (mlxsw_sp_fid_is_vfid(fid))
+ return mlxsw_sp_port_vport_find_by_fid(lag_port, fid);
+ else
+ return test_bit(fid, lag_port->active_vlans);
+}
+
+static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 fid)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char sfdf_pl[MLXSW_REG_SFDF_LEN];
+ u8 local_port = mlxsw_sp_port->local_port;
+ u16 lag_id = mlxsw_sp_port->lag_id;
+ int i, count = 0;
- mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT);
- mlxsw_reg_sfdf_system_port_set(sfdf_pl, mlxsw_sp_port->local_port);
+ if (!mlxsw_sp_port->lagged)
+ return true;
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
+ for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
+ struct mlxsw_sp_port *lag_port;
+
+ lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
+ if (!lag_port || lag_port->local_port == local_port)
+ continue;
+ if (mlxsw_sp_lag_port_fid_member(lag_port, fid))
+ count++;
+ }
+
+ return !count;
}
static int
@@ -2542,17 +3628,8 @@ mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
mlxsw_sp_port->local_port);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
-}
-
-static int
-mlxsw_sp_port_fdb_flush_by_lag_id(const struct mlxsw_sp_port *mlxsw_sp_port)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char sfdf_pl[MLXSW_REG_SFDF_LEN];
-
- mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG);
- mlxsw_reg_sfdf_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
+ netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using Port=%d, FID=%d\n",
+ mlxsw_sp_port->local_port, fid);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
}
@@ -2568,71 +3645,64 @@ mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
+ netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using LAG ID=%d, FID=%d\n",
+ mlxsw_sp_port->lag_id, fid);
+
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
}
-static int
-__mlxsw_sp_port_fdb_flush(const struct mlxsw_sp_port *mlxsw_sp_port)
+int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
{
- int err, last_err = 0;
- u16 vid;
-
- for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
- err = mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, vid);
- if (err)
- last_err = err;
- }
+ if (!mlxsw_sp_port_fdb_should_flush(mlxsw_sp_port, fid))
+ return 0;
- return last_err;
+ if (mlxsw_sp_port->lagged)
+ return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port,
+ fid);
+ else
+ return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid);
}
-static int
-__mlxsw_sp_port_fdb_flush_lagged(const struct mlxsw_sp_port *mlxsw_sp_port)
+static void mlxsw_sp_master_bridge_gone_sync(struct mlxsw_sp *mlxsw_sp)
{
- int err, last_err = 0;
- u16 vid;
-
- for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
- err = mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, vid);
- if (err)
- last_err = err;
- }
+ struct mlxsw_sp_fid *f, *tmp;
- return last_err;
+ list_for_each_entry_safe(f, tmp, &mlxsw_sp->fids, list)
+ if (--f->ref_count == 0)
+ mlxsw_sp_fid_destroy(mlxsw_sp, f);
+ else
+ WARN_ON_ONCE(1);
}
-static int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port)
+static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *br_dev)
{
- if (!list_empty(&mlxsw_sp_port->vports_list))
- if (mlxsw_sp_port->lagged)
- return __mlxsw_sp_port_fdb_flush_lagged(mlxsw_sp_port);
- else
- return __mlxsw_sp_port_fdb_flush(mlxsw_sp_port);
- else
- if (mlxsw_sp_port->lagged)
- return mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port);
- else
- return mlxsw_sp_port_fdb_flush_by_port(mlxsw_sp_port);
+ return !mlxsw_sp->master_bridge.dev ||
+ mlxsw_sp->master_bridge.dev == br_dev;
}
-static int mlxsw_sp_vport_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_vport)
+static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *br_dev)
{
- u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_vport);
- u16 fid = mlxsw_sp_vfid_to_fid(vfid);
-
- if (mlxsw_sp_vport->lagged)
- return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_vport,
- fid);
- else
- return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_vport, fid);
+ mlxsw_sp->master_bridge.dev = br_dev;
+ mlxsw_sp->master_bridge.ref_count++;
}
-static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
+static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp)
{
- return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
+ if (--mlxsw_sp->master_bridge.ref_count == 0) {
+ mlxsw_sp->master_bridge.dev = NULL;
+ /* It's possible upper VLAN devices are still holding
+ * references to underlying FIDs. Drop the reference
+ * and release the resources if it was the last one.
+ * If it wasn't, then something bad happened.
+ */
+ mlxsw_sp_master_bridge_gone_sync(mlxsw_sp);
+ }
}
-static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port)
+static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *br_dev)
{
struct net_device *dev = mlxsw_sp_port->dev;
int err;
@@ -2646,6 +3716,8 @@ static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port)
if (err)
return err;
+ mlxsw_sp_master_bridge_inc(mlxsw_sp_port->mlxsw_sp, br_dev);
+
mlxsw_sp_port->learning = 1;
mlxsw_sp_port->learning_sync = 1;
mlxsw_sp_port->uc_flood = 1;
@@ -2654,16 +3726,14 @@ static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port)
return 0;
}
-static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
- bool flush_fdb)
+static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port)
{
struct net_device *dev = mlxsw_sp_port->dev;
- if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port))
- netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
-
mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
+ mlxsw_sp_master_bridge_dec(mlxsw_sp_port->mlxsw_sp);
+
mlxsw_sp_port->learning = 0;
mlxsw_sp_port->learning_sync = 0;
mlxsw_sp_port->uc_flood = 0;
@@ -2672,28 +3742,7 @@ static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
/* Add implicit VLAN interface in the device, so that untagged
* packets will be classified to the default vFID.
*/
- return mlxsw_sp_port_add_vid(dev, 0, 1);
-}
-
-static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
- struct net_device *br_dev)
-{
- return !mlxsw_sp->master_bridge.dev ||
- mlxsw_sp->master_bridge.dev == br_dev;
-}
-
-static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
- struct net_device *br_dev)
-{
- mlxsw_sp->master_bridge.dev = br_dev;
- mlxsw_sp->master_bridge.ref_count++;
-}
-
-static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp,
- struct net_device *br_dev)
-{
- if (--mlxsw_sp->master_bridge.ref_count == 0)
- mlxsw_sp->master_bridge.dev = NULL;
+ mlxsw_sp_port_add_vid(dev, 0, 1);
}
static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
@@ -2809,6 +3858,45 @@ static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
return -EBUSY;
}
+static void
+mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 lag_id)
+{
+ struct mlxsw_sp_port *mlxsw_sp_vport;
+ struct mlxsw_sp_fid *f;
+
+ mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
+ if (WARN_ON(!mlxsw_sp_vport))
+ return;
+
+ /* If vPort is assigned a RIF, then leave it since it's no
+ * longer valid.
+ */
+ f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
+ if (f)
+ f->leave(mlxsw_sp_vport);
+
+ mlxsw_sp_vport->lag_id = lag_id;
+ mlxsw_sp_vport->lagged = 1;
+}
+
+static void
+mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp_port *mlxsw_sp_vport;
+ struct mlxsw_sp_fid *f;
+
+ mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
+ if (WARN_ON(!mlxsw_sp_vport))
+ return;
+
+ f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
+ if (f)
+ f->leave(mlxsw_sp_vport);
+
+ mlxsw_sp_vport->lagged = 0;
+}
+
static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *lag_dev)
{
@@ -2844,6 +3932,9 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_port->lag_id = lag_id;
mlxsw_sp_port->lagged = 1;
lag->ref_count++;
+
+ mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_id);
+
return 0;
err_col_port_enable:
@@ -2854,65 +3945,35 @@ err_col_port_add:
return err;
}
-static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
- struct net_device *br_dev,
- bool flush_fdb);
-
-static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
- struct net_device *lag_dev)
+static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *lag_dev)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- struct mlxsw_sp_port *mlxsw_sp_vport;
- struct mlxsw_sp_upper *lag;
u16 lag_id = mlxsw_sp_port->lag_id;
- int err;
+ struct mlxsw_sp_upper *lag;
if (!mlxsw_sp_port->lagged)
- return 0;
+ return;
lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
WARN_ON(lag->ref_count == 0);
- err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
- if (err)
- return err;
- err = mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
- if (err)
- return err;
-
- /* In case we leave a LAG device that has bridges built on top,
- * then their teardown sequence is never issued and we need to
- * invoke the necessary cleanup routines ourselves.
- */
- list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
- vport.list) {
- struct net_device *br_dev;
-
- if (!mlxsw_sp_vport->bridged)
- continue;
-
- br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
- mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, false);
- }
+ mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
+ mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
if (mlxsw_sp_port->bridged) {
mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
- mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false);
- mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL);
+ mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
}
- if (lag->ref_count == 1) {
- if (mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port))
- netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
- err = mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
- if (err)
- return err;
- }
+ if (lag->ref_count == 1)
+ mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
mlxsw_sp_port->local_port);
mlxsw_sp_port->lagged = 0;
lag->ref_count--;
- return 0;
+
+ mlxsw_sp_port_pvid_vport_lag_leave(mlxsw_sp_port);
}
static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -2961,42 +4022,25 @@ static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid = vlan_dev_vlan_id(vlan_dev);
mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
- if (!mlxsw_sp_vport) {
- WARN_ON(!mlxsw_sp_vport);
+ if (WARN_ON(!mlxsw_sp_vport))
return -EINVAL;
- }
mlxsw_sp_vport->dev = vlan_dev;
return 0;
}
-static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
- struct net_device *vlan_dev)
+static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *vlan_dev)
{
struct mlxsw_sp_port *mlxsw_sp_vport;
u16 vid = vlan_dev_vlan_id(vlan_dev);
mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
- if (!mlxsw_sp_vport) {
- WARN_ON(!mlxsw_sp_vport);
- return -EINVAL;
- }
-
- /* When removing a VLAN device while still bridged we should first
- * remove it from the bridge, as we receive the bridge's notification
- * when the vPort is already gone.
- */
- if (mlxsw_sp_vport->bridged) {
- struct net_device *br_dev;
-
- br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
- mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, true);
- }
+ if (WARN_ON(!mlxsw_sp_vport))
+ return;
mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
-
- return 0;
}
static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
@@ -3006,7 +4050,7 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
struct mlxsw_sp_port *mlxsw_sp_port;
struct net_device *upper_dev;
struct mlxsw_sp *mlxsw_sp;
- int err;
+ int err = 0;
mlxsw_sp_port = netdev_priv(dev);
mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
@@ -3015,73 +4059,56 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
switch (event) {
case NETDEV_PRECHANGEUPPER:
upper_dev = info->upper_dev;
- if (!info->master || !info->linking)
+ if (!is_vlan_dev(upper_dev) &&
+ !netif_is_lag_master(upper_dev) &&
+ !netif_is_bridge_master(upper_dev))
+ return -EINVAL;
+ if (!info->linking)
break;
/* HW limitation forbids to put ports to multiple bridges. */
if (netif_is_bridge_master(upper_dev) &&
!mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
- return NOTIFY_BAD;
+ return -EINVAL;
if (netif_is_lag_master(upper_dev) &&
!mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
info->upper_info))
- return NOTIFY_BAD;
+ return -EINVAL;
+ if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev))
+ return -EINVAL;
+ if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
+ !netif_is_lag_master(vlan_dev_real_dev(upper_dev)))
+ return -EINVAL;
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
if (is_vlan_dev(upper_dev)) {
- if (info->linking) {
+ if (info->linking)
err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
upper_dev);
- if (err) {
- netdev_err(dev, "Failed to link VLAN device\n");
- return NOTIFY_BAD;
- }
- } else {
- err = mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
- upper_dev);
- if (err) {
- netdev_err(dev, "Failed to unlink VLAN device\n");
- return NOTIFY_BAD;
- }
- }
+ else
+ mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
+ upper_dev);
} else if (netif_is_bridge_master(upper_dev)) {
- if (info->linking) {
- err = mlxsw_sp_port_bridge_join(mlxsw_sp_port);
- if (err) {
- netdev_err(dev, "Failed to join bridge\n");
- return NOTIFY_BAD;
- }
- mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev);
- } else {
- err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
- true);
- mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev);
- if (err) {
- netdev_err(dev, "Failed to leave bridge\n");
- return NOTIFY_BAD;
- }
- }
+ if (info->linking)
+ err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
+ upper_dev);
+ else
+ mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
} else if (netif_is_lag_master(upper_dev)) {
- if (info->linking) {
+ if (info->linking)
err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
upper_dev);
- if (err) {
- netdev_err(dev, "Failed to join link aggregation\n");
- return NOTIFY_BAD;
- }
- } else {
- err = mlxsw_sp_port_lag_leave(mlxsw_sp_port,
- upper_dev);
- if (err) {
- netdev_err(dev, "Failed to leave link aggregation\n");
- return NOTIFY_BAD;
- }
- }
+ else
+ mlxsw_sp_port_lag_leave(mlxsw_sp_port,
+ upper_dev);
+ } else {
+ err = -EINVAL;
+ WARN_ON(1);
}
break;
}
- return NOTIFY_DONE;
+ return err;
}
static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
@@ -3105,7 +4132,7 @@ static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
break;
}
- return NOTIFY_DONE;
+ return 0;
}
static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
@@ -3119,7 +4146,7 @@ static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr);
}
- return NOTIFY_DONE;
+ return 0;
}
static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
@@ -3132,218 +4159,230 @@ static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
netdev_for_each_lower_dev(lag_dev, dev, iter) {
if (mlxsw_sp_port_dev_check(dev)) {
ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
- if (ret == NOTIFY_BAD)
+ if (ret)
return ret;
}
}
- return NOTIFY_DONE;
+ return 0;
}
-static struct mlxsw_sp_vfid *
-mlxsw_sp_br_vfid_find(const struct mlxsw_sp *mlxsw_sp,
- const struct net_device *br_dev)
+static int mlxsw_sp_master_bridge_vlan_link(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *vlan_dev)
{
- struct mlxsw_sp_vfid *vfid;
+ u16 fid = vlan_dev_vlan_id(vlan_dev);
+ struct mlxsw_sp_fid *f;
- list_for_each_entry(vfid, &mlxsw_sp->br_vfids.list, list) {
- if (vfid->br_dev == br_dev)
- return vfid;
+ f = mlxsw_sp_fid_find(mlxsw_sp, fid);
+ if (!f) {
+ f = mlxsw_sp_fid_create(mlxsw_sp, fid);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
}
- return NULL;
+ f->ref_count++;
+
+ return 0;
}
-static u16 mlxsw_sp_vfid_to_br_vfid(u16 vfid)
+static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *vlan_dev)
{
- return vfid - MLXSW_SP_VFID_PORT_MAX;
+ u16 fid = vlan_dev_vlan_id(vlan_dev);
+ struct mlxsw_sp_fid *f;
+
+ f = mlxsw_sp_fid_find(mlxsw_sp, fid);
+ if (f && f->r)
+ mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
+ if (f && --f->ref_count == 0)
+ mlxsw_sp_fid_destroy(mlxsw_sp, f);
}
-static u16 mlxsw_sp_br_vfid_to_vfid(u16 br_vfid)
+static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
+ unsigned long event, void *ptr)
{
- return MLXSW_SP_VFID_PORT_MAX + br_vfid;
+ struct netdev_notifier_changeupper_info *info;
+ struct net_device *upper_dev;
+ struct mlxsw_sp *mlxsw_sp;
+ int err;
+
+ mlxsw_sp = mlxsw_sp_lower_get(br_dev);
+ if (!mlxsw_sp)
+ return 0;
+ if (br_dev != mlxsw_sp->master_bridge.dev)
+ return 0;
+
+ info = ptr;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ upper_dev = info->upper_dev;
+ if (!is_vlan_dev(upper_dev))
+ break;
+ if (info->linking) {
+ err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp,
+ upper_dev);
+ if (err)
+ return err;
+ } else {
+ mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp, upper_dev);
+ }
+ break;
+ }
+
+ return 0;
}
-static u16 mlxsw_sp_avail_br_vfid_get(const struct mlxsw_sp *mlxsw_sp)
+static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
{
- return find_first_zero_bit(mlxsw_sp->br_vfids.mapped,
- MLXSW_SP_VFID_BR_MAX);
+ return find_first_zero_bit(mlxsw_sp->vfids.mapped,
+ MLXSW_SP_VFID_MAX);
}
-static struct mlxsw_sp_vfid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp,
- struct net_device *br_dev)
+static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
+{
+ char sfmr_pl[MLXSW_REG_SFMR_LEN];
+
+ mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
+}
+
+static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
+
+static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *br_dev)
{
struct device *dev = mlxsw_sp->bus_info->dev;
- struct mlxsw_sp_vfid *vfid;
- u16 n_vfid;
+ struct mlxsw_sp_fid *f;
+ u16 vfid, fid;
int err;
- n_vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp));
- if (n_vfid == MLXSW_SP_VFID_MAX) {
+ vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
+ if (vfid == MLXSW_SP_VFID_MAX) {
dev_err(dev, "No available vFIDs\n");
return ERR_PTR(-ERANGE);
}
- err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid);
+ fid = mlxsw_sp_vfid_to_fid(vfid);
+ err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true);
if (err) {
- dev_err(dev, "Failed to create vFID=%d\n", n_vfid);
+ dev_err(dev, "Failed to create FID=%d\n", fid);
return ERR_PTR(err);
}
- vfid = kzalloc(sizeof(*vfid), GFP_KERNEL);
- if (!vfid)
+ f = kzalloc(sizeof(*f), GFP_KERNEL);
+ if (!f)
goto err_allocate_vfid;
- vfid->vfid = n_vfid;
- vfid->br_dev = br_dev;
+ f->leave = mlxsw_sp_vport_vfid_leave;
+ f->fid = fid;
+ f->dev = br_dev;
- list_add(&vfid->list, &mlxsw_sp->br_vfids.list);
- set_bit(mlxsw_sp_vfid_to_br_vfid(n_vfid), mlxsw_sp->br_vfids.mapped);
+ list_add(&f->list, &mlxsw_sp->vfids.list);
+ set_bit(vfid, mlxsw_sp->vfids.mapped);
- return vfid;
+ return f;
err_allocate_vfid:
- __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid);
+ mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
return ERR_PTR(-ENOMEM);
}
-static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_vfid *vfid)
+static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fid *f)
{
- u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid->vfid);
+ u16 vfid = mlxsw_sp_fid_to_vfid(f->fid);
+ u16 fid = f->fid;
- clear_bit(br_vfid, mlxsw_sp->br_vfids.mapped);
- list_del(&vfid->list);
+ clear_bit(vfid, mlxsw_sp->vfids.mapped);
+ list_del(&f->list);
- __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid);
+ if (f->r)
+ mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
- kfree(vfid);
+ kfree(f);
+
+ mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
}
-static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
- struct net_device *br_dev,
- bool flush_fdb)
+static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
+ bool valid)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
+ enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
- struct net_device *dev = mlxsw_sp_vport->dev;
- struct mlxsw_sp_vfid *vfid, *new_vfid;
- int err;
-
- vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev);
- if (!vfid) {
- WARN_ON(!vfid);
- return -EINVAL;
- }
- /* We need a vFID to go back to after leaving the bridge's vFID. */
- new_vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid);
- if (!new_vfid) {
- new_vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
- if (IS_ERR(new_vfid)) {
- netdev_err(dev, "Failed to create vFID for VID=%d\n",
- vid);
- return PTR_ERR(new_vfid);
- }
- }
-
- /* Invalidate existing {Port, VID} to vFID mapping and create a new
- * one for the new vFID.
- */
- err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
- MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
- false,
- mlxsw_sp_vfid_to_fid(vfid->vfid),
- vid);
- if (err) {
- netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
- vfid->vfid);
- goto err_port_vid_to_fid_invalidate;
- }
+ return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid,
+ vid);
+}
- err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
- MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
- true,
- mlxsw_sp_vfid_to_fid(new_vfid->vfid),
- vid);
- if (err) {
- netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
- new_vfid->vfid);
- goto err_port_vid_to_fid_validate;
- }
+static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport,
+ struct net_device *br_dev)
+{
+ struct mlxsw_sp_fid *f;
+ int err;
- err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
- if (err) {
- netdev_err(dev, "Failed to disable learning\n");
- goto err_port_vid_learning_set;
+ f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev);
+ if (!f) {
+ f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
}
- err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false,
- false);
- if (err) {
- netdev_err(dev, "Failed clear to clear flooding\n");
+ err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true);
+ if (err)
goto err_vport_flood_set;
- }
- err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
- MLXSW_REG_SPMS_STATE_FORWARDING);
- if (err) {
- netdev_err(dev, "Failed to set STP state\n");
- goto err_port_stp_state_set;
- }
-
- if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport))
- netdev_err(dev, "Failed to flush FDB\n");
+ err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true);
+ if (err)
+ goto err_vport_fid_map;
- /* Switch between the vFIDs and destroy the old one if needed. */
- new_vfid->nr_vports++;
- mlxsw_sp_vport->vport.vfid = new_vfid;
- vfid->nr_vports--;
- if (!vfid->nr_vports)
- mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
+ mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f);
+ f->ref_count++;
- mlxsw_sp_vport->learning = 0;
- mlxsw_sp_vport->learning_sync = 0;
- mlxsw_sp_vport->uc_flood = 0;
- mlxsw_sp_vport->bridged = 0;
+ netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", f->fid);
return 0;
-err_port_stp_state_set:
+err_vport_fid_map:
+ mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
err_vport_flood_set:
-err_port_vid_learning_set:
-err_port_vid_to_fid_validate:
-err_port_vid_to_fid_invalidate:
- /* Rollback vFID only if new. */
- if (!new_vfid->nr_vports)
- mlxsw_sp_vfid_destroy(mlxsw_sp, new_vfid);
+ if (!f->ref_count)
+ mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
return err;
}
+static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
+{
+ struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
+
+ netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
+
+ mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false);
+
+ mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
+
+ mlxsw_sp_port_fdb_flush(mlxsw_sp_vport, f->fid);
+
+ mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
+ if (--f->ref_count == 0)
+ mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
+}
+
static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
struct net_device *br_dev)
{
- struct mlxsw_sp_vfid *old_vfid = mlxsw_sp_vport->vport.vfid;
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
+ struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
struct net_device *dev = mlxsw_sp_vport->dev;
- struct mlxsw_sp_vfid *vfid;
int err;
- vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev);
- if (!vfid) {
- vfid = mlxsw_sp_br_vfid_create(mlxsw_sp, br_dev);
- if (IS_ERR(vfid)) {
- netdev_err(dev, "Failed to create bridge vFID\n");
- return PTR_ERR(vfid);
- }
- }
+ if (f && !WARN_ON(!f->leave))
+ f->leave(mlxsw_sp_vport);
- err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, true, false);
+ err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport, br_dev);
if (err) {
- netdev_err(dev, "Failed to setup flooding for vFID=%d\n",
- vfid->vfid);
- goto err_port_flood_set;
+ netdev_err(dev, "Failed to join vFID\n");
+ return err;
}
err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
@@ -3352,38 +4391,6 @@ static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
goto err_port_vid_learning_set;
}
- /* We need to invalidate existing {Port, VID} to vFID mapping and
- * create a new one for the bridge's vFID.
- */
- err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
- MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
- false,
- mlxsw_sp_vfid_to_fid(old_vfid->vfid),
- vid);
- if (err) {
- netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
- old_vfid->vfid);
- goto err_port_vid_to_fid_invalidate;
- }
-
- err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
- MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
- true,
- mlxsw_sp_vfid_to_fid(vfid->vfid),
- vid);
- if (err) {
- netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
- vfid->vfid);
- goto err_port_vid_to_fid_validate;
- }
-
- /* Switch between the vFIDs and destroy the old one if needed. */
- vfid->nr_vports++;
- mlxsw_sp_vport->vport.vfid = vfid;
- old_vfid->nr_vports--;
- if (!old_vfid->nr_vports)
- mlxsw_sp_vfid_destroy(mlxsw_sp, old_vfid);
-
mlxsw_sp_vport->learning = 1;
mlxsw_sp_vport->learning_sync = 1;
mlxsw_sp_vport->uc_flood = 1;
@@ -3391,20 +4398,25 @@ static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
return 0;
-err_port_vid_to_fid_validate:
- mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
- MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
- mlxsw_sp_vfid_to_fid(old_vfid->vfid), vid);
-err_port_vid_to_fid_invalidate:
- mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
err_port_vid_learning_set:
- mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, false);
-err_port_flood_set:
- if (!vfid->nr_vports)
- mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
+ mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
return err;
}
+static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
+{
+ u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
+
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
+
+ mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
+
+ mlxsw_sp_vport->learning = 0;
+ mlxsw_sp_vport->learning_sync = 0;
+ mlxsw_sp_vport->uc_flood = 0;
+ mlxsw_sp_vport->bridged = 0;
+}
+
static bool
mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
const struct net_device *br_dev)
@@ -3413,7 +4425,9 @@ mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
vport.list) {
- if (mlxsw_sp_vport_br_get(mlxsw_sp_vport) == br_dev)
+ struct net_device *dev = mlxsw_sp_vport_dev_get(mlxsw_sp_vport);
+
+ if (dev && dev == br_dev)
return false;
}
@@ -3428,56 +4442,39 @@ static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
struct netdev_notifier_changeupper_info *info = ptr;
struct mlxsw_sp_port *mlxsw_sp_vport;
struct net_device *upper_dev;
- int err;
+ int err = 0;
mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
switch (event) {
case NETDEV_PRECHANGEUPPER:
upper_dev = info->upper_dev;
- if (!info->master || !info->linking)
- break;
if (!netif_is_bridge_master(upper_dev))
- return NOTIFY_BAD;
+ return -EINVAL;
+ if (!info->linking)
+ break;
/* We can't have multiple VLAN interfaces configured on
* the same port and being members in the same bridge.
*/
if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
upper_dev))
- return NOTIFY_BAD;
+ return -EINVAL;
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
- if (!info->master)
- break;
if (info->linking) {
- if (!mlxsw_sp_vport) {
- WARN_ON(!mlxsw_sp_vport);
- return NOTIFY_BAD;
- }
+ if (WARN_ON(!mlxsw_sp_vport))
+ return -EINVAL;
err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
upper_dev);
- if (err) {
- netdev_err(dev, "Failed to join bridge\n");
- return NOTIFY_BAD;
- }
} else {
- /* We ignore bridge's unlinking notifications if vPort
- * is gone, since we already left the bridge when the
- * VLAN device was unlinked from the real device.
- */
if (!mlxsw_sp_vport)
- return NOTIFY_DONE;
- err = mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport,
- upper_dev, true);
- if (err) {
- netdev_err(dev, "Failed to leave bridge\n");
- return NOTIFY_BAD;
- }
+ return 0;
+ mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport);
}
}
- return NOTIFY_DONE;
+ return err;
}
static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
@@ -3492,12 +4489,12 @@ static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
if (mlxsw_sp_port_dev_check(dev)) {
ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
vid);
- if (ret == NOTIFY_BAD)
+ if (ret)
return ret;
}
}
- return NOTIFY_DONE;
+ return 0;
}
static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
@@ -3513,41 +4510,58 @@ static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
vid);
- return NOTIFY_DONE;
+ return 0;
}
static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ int err = 0;
- if (mlxsw_sp_port_dev_check(dev))
- return mlxsw_sp_netdevice_port_event(dev, event, ptr);
-
- if (netif_is_lag_master(dev))
- return mlxsw_sp_netdevice_lag_event(dev, event, ptr);
+ if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU)
+ err = mlxsw_sp_netdevice_router_port_event(dev);
+ else if (mlxsw_sp_port_dev_check(dev))
+ err = mlxsw_sp_netdevice_port_event(dev, event, ptr);
+ else if (netif_is_lag_master(dev))
+ err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
+ else if (netif_is_bridge_master(dev))
+ err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
+ else if (is_vlan_dev(dev))
+ err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
- if (is_vlan_dev(dev))
- return mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
-
- return NOTIFY_DONE;
+ return notifier_from_errno(err);
}
static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
.notifier_call = mlxsw_sp_netdevice_event,
};
+static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
+ .notifier_call = mlxsw_sp_inetaddr_event,
+ .priority = 10, /* Must be called before FIB notifier block */
+};
+
+static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
+ .notifier_call = mlxsw_sp_router_netevent_event,
+};
+
static int __init mlxsw_sp_module_init(void)
{
int err;
register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
+ register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
+ register_netevent_notifier(&mlxsw_sp_router_netevent_nb);
+
err = mlxsw_core_driver_register(&mlxsw_sp_driver);
if (err)
goto err_core_driver_register;
return 0;
err_core_driver_register:
+ unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
+ unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
return err;
}
@@ -3555,6 +4569,8 @@ err_core_driver_register:
static void __exit mlxsw_sp_module_exit(void)
{
mlxsw_core_driver_unregister(&mlxsw_sp_driver);
+ unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
+ unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 13b30eaa1..ac48abebe 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -39,19 +39,22 @@
#include <linux/types.h>
#include <linux/netdevice.h>
+#include <linux/rhashtable.h>
#include <linux/bitops.h>
#include <linux/if_vlan.h>
#include <linux/list.h>
#include <linux/dcbnl.h>
+#include <linux/in6.h>
#include <net/switchdev.h>
#include "port.h"
#include "core.h"
#define MLXSW_SP_VFID_BASE VLAN_N_VID
-#define MLXSW_SP_VFID_PORT_MAX 512 /* Non-bridged VLAN interfaces */
-#define MLXSW_SP_VFID_BR_MAX 6144 /* Bridged VLAN interfaces */
-#define MLXSW_SP_VFID_MAX (MLXSW_SP_VFID_PORT_MAX + MLXSW_SP_VFID_BR_MAX)
+#define MLXSW_SP_VFID_MAX 6656 /* Bridged VLAN interfaces */
+
+#define MLXSW_SP_RFID_BASE 15360
+#define MLXSW_SP_RIF_MAX 800
#define MLXSW_SP_LAG_MAX 64
#define MLXSW_SP_PORT_PER_LAG_MAX 16
@@ -60,6 +63,12 @@
#define MLXSW_SP_PORTS_PER_CLUSTER_MAX 4
+#define MLXSW_SP_LPM_TREE_MIN 2 /* trees 0 and 1 are reserved */
+#define MLXSW_SP_LPM_TREE_MAX 22
+#define MLXSW_SP_LPM_TREE_COUNT (MLXSW_SP_LPM_TREE_MAX - MLXSW_SP_LPM_TREE_MIN)
+
+#define MLXSW_SP_VIRTUAL_ROUTER_MAX 256
+
#define MLXSW_SP_PORT_BASE_SPEED 25000 /* Mb/s */
#define MLXSW_SP_BYTES_PER_CELL 96
@@ -67,6 +76,10 @@
#define MLXSW_SP_BYTES_TO_CELLS(b) DIV_ROUND_UP(b, MLXSW_SP_BYTES_PER_CELL)
#define MLXSW_SP_CELLS_TO_BYTES(c) (c * MLXSW_SP_BYTES_PER_CELL)
+#define MLXSW_SP_KVD_LINEAR_SIZE 65536 /* entries */
+#define MLXSW_SP_KVD_HASH_SINGLE_SIZE 163840 /* entries */
+#define MLXSW_SP_KVD_HASH_DOUBLE_SIZE 32768 /* entries */
+
/* Maximum delay buffer needed in case of PAUSE frames, in cells.
* Assumes 100m cable and maximum MTU.
*/
@@ -87,12 +100,22 @@ struct mlxsw_sp_upper {
unsigned int ref_count;
};
-struct mlxsw_sp_vfid {
+struct mlxsw_sp_fid {
+ void (*leave)(struct mlxsw_sp_port *mlxsw_sp_vport);
struct list_head list;
- u16 nr_vports;
- u16 vfid; /* Starting at 0 */
- struct net_device *br_dev;
- u16 vid;
+ unsigned int ref_count;
+ struct net_device *dev;
+ struct mlxsw_sp_rif *r;
+ u16 fid;
+};
+
+struct mlxsw_sp_rif {
+ struct net_device *dev;
+ unsigned int ref_count;
+ struct mlxsw_sp_fid *f;
+ unsigned char addr[ETH_ALEN];
+ int mtu;
+ u16 rif;
};
struct mlxsw_sp_mid {
@@ -115,7 +138,17 @@ static inline u16 mlxsw_sp_fid_to_vfid(u16 fid)
static inline bool mlxsw_sp_fid_is_vfid(u16 fid)
{
- return fid >= MLXSW_SP_VFID_BASE;
+ return fid >= MLXSW_SP_VFID_BASE && fid < MLXSW_SP_RFID_BASE;
+}
+
+static inline bool mlxsw_sp_fid_is_rfid(u16 fid)
+{
+ return fid >= MLXSW_SP_RFID_BASE;
+}
+
+static inline u16 mlxsw_sp_rif_sp_to_fid(u16 rif)
+{
+ return MLXSW_SP_RFID_BASE + rif;
}
struct mlxsw_sp_sb_pr {
@@ -152,20 +185,97 @@ struct mlxsw_sp_sb {
} ports[MLXSW_PORT_MAX_PORTS];
};
-struct mlxsw_sp {
+#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE)
+
+struct mlxsw_sp_prefix_usage {
+ DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
+};
+
+enum mlxsw_sp_l3proto {
+ MLXSW_SP_L3_PROTO_IPV4,
+ MLXSW_SP_L3_PROTO_IPV6,
+};
+
+struct mlxsw_sp_lpm_tree {
+ u8 id; /* tree ID */
+ unsigned int ref_count;
+ enum mlxsw_sp_l3proto proto;
+ struct mlxsw_sp_prefix_usage prefix_usage;
+};
+
+struct mlxsw_sp_fib;
+
+struct mlxsw_sp_vr {
+ u16 id; /* virtual router ID */
+ bool used;
+ enum mlxsw_sp_l3proto proto;
+ u32 tb_id; /* kernel fib table id */
+ struct mlxsw_sp_lpm_tree *lpm_tree;
+ struct mlxsw_sp_fib *fib;
+};
+
+enum mlxsw_sp_span_type {
+ MLXSW_SP_SPAN_EGRESS,
+ MLXSW_SP_SPAN_INGRESS
+};
+
+struct mlxsw_sp_span_inspected_port {
+ struct list_head list;
+ enum mlxsw_sp_span_type type;
+ u8 local_port;
+};
+
+struct mlxsw_sp_span_entry {
+ u8 local_port;
+ bool used;
+ struct list_head bound_ports_list;
+ int ref_count;
+ int id;
+};
+
+enum mlxsw_sp_port_mall_action_type {
+ MLXSW_SP_PORT_MALL_MIRROR,
+};
+
+struct mlxsw_sp_port_mall_mirror_tc_entry {
+ u8 to_local_port;
+ bool ingress;
+};
+
+struct mlxsw_sp_port_mall_tc_entry {
+ struct list_head list;
+ unsigned long cookie;
+ enum mlxsw_sp_port_mall_action_type type;
+ union {
+ struct mlxsw_sp_port_mall_mirror_tc_entry mirror;
+ };
+};
+
+struct mlxsw_sp_router {
+ struct mlxsw_sp_lpm_tree lpm_trees[MLXSW_SP_LPM_TREE_COUNT];
+ struct mlxsw_sp_vr vrs[MLXSW_SP_VIRTUAL_ROUTER_MAX];
+ struct rhashtable neigh_ht;
struct {
- struct list_head list;
- unsigned long mapped[BITS_TO_LONGS(MLXSW_SP_VFID_PORT_MAX)];
- } port_vfids;
+ struct delayed_work dw;
+ unsigned long interval; /* ms */
+ } neighs_update;
+ struct delayed_work nexthop_probe_dw;
+#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
+ struct list_head nexthop_group_list;
+ struct list_head nexthop_neighs_list;
+};
+
+struct mlxsw_sp {
struct {
struct list_head list;
- unsigned long mapped[BITS_TO_LONGS(MLXSW_SP_VFID_BR_MAX)];
- } br_vfids;
+ DECLARE_BITMAP(mapped, MLXSW_SP_VFID_MAX);
+ } vfids;
struct {
struct list_head list;
- unsigned long mapped[BITS_TO_LONGS(MLXSW_SP_MID_MAX)];
+ DECLARE_BITMAP(mapped, MLXSW_SP_MID_MAX);
} br_mids;
- unsigned long active_fids[BITS_TO_LONGS(VLAN_N_VID)];
+ struct list_head fids; /* VLAN-aware bridge FIDs */
+ struct mlxsw_sp_rif *rifs[MLXSW_SP_RIF_MAX];
struct mlxsw_sp_port **ports;
struct mlxsw_core *core;
const struct mlxsw_bus_info *bus_info;
@@ -183,6 +293,15 @@ struct mlxsw_sp {
struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX];
u8 port_to_module[MLXSW_PORT_MAX_PORTS];
struct mlxsw_sp_sb sb;
+ struct mlxsw_sp_router router;
+ struct {
+ DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE);
+ } kvdl;
+
+ struct {
+ struct mlxsw_sp_span_entry *entries;
+ int entries_count;
+ } span;
};
static inline struct mlxsw_sp_upper *
@@ -217,7 +336,7 @@ struct mlxsw_sp_port {
u16 lag_id;
struct {
struct list_head list;
- struct mlxsw_sp_vfid *vfid;
+ struct mlxsw_sp_fid *f;
u16 vid;
} vport;
struct {
@@ -239,8 +358,13 @@ struct mlxsw_sp_port {
unsigned long *untagged_vlans;
/* VLAN interfaces */
struct list_head vports_list;
+ /* TC handles */
+ struct list_head mall_tc_list;
};
+struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
+void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port);
+
static inline bool
mlxsw_sp_port_is_pause_en(const struct mlxsw_sp_port *mlxsw_sp_port)
{
@@ -259,28 +383,38 @@ mlxsw_sp_port_lagged_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id, u8 port_index)
return mlxsw_sp_port && mlxsw_sp_port->lagged ? mlxsw_sp_port : NULL;
}
+static inline u16
+mlxsw_sp_vport_vid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
+{
+ return mlxsw_sp_vport->vport.vid;
+}
+
static inline bool
mlxsw_sp_port_is_vport(const struct mlxsw_sp_port *mlxsw_sp_port)
{
- return mlxsw_sp_port->vport.vfid;
+ u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
+
+ return vid != 0;
}
-static inline struct net_device *
-mlxsw_sp_vport_br_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
+static inline void mlxsw_sp_vport_fid_set(struct mlxsw_sp_port *mlxsw_sp_vport,
+ struct mlxsw_sp_fid *f)
{
- return mlxsw_sp_vport->vport.vfid->br_dev;
+ mlxsw_sp_vport->vport.f = f;
}
-static inline u16
-mlxsw_sp_vport_vid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
+static inline struct mlxsw_sp_fid *
+mlxsw_sp_vport_fid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
{
- return mlxsw_sp_vport->vport.vid;
+ return mlxsw_sp_vport->vport.f;
}
-static inline u16
-mlxsw_sp_vport_vfid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
+static inline struct net_device *
+mlxsw_sp_vport_dev_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
{
- return mlxsw_sp_vport->vport.vfid->vfid;
+ struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
+
+ return f ? f->dev : NULL;
}
static inline struct mlxsw_sp_port *
@@ -298,20 +432,60 @@ mlxsw_sp_port_vport_find(const struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
}
static inline struct mlxsw_sp_port *
-mlxsw_sp_port_vport_find_by_vfid(const struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vfid)
+mlxsw_sp_port_vport_find_by_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 fid)
{
struct mlxsw_sp_port *mlxsw_sp_vport;
list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
vport.list) {
- if (mlxsw_sp_vport_vfid_get(mlxsw_sp_vport) == vfid)
+ struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
+
+ if (f && f->fid == fid)
return mlxsw_sp_vport;
}
return NULL;
}
+static inline struct mlxsw_sp_fid *mlxsw_sp_fid_find(struct mlxsw_sp *mlxsw_sp,
+ u16 fid)
+{
+ struct mlxsw_sp_fid *f;
+
+ list_for_each_entry(f, &mlxsw_sp->fids, list)
+ if (f->fid == fid)
+ return f;
+
+ return NULL;
+}
+
+static inline struct mlxsw_sp_fid *
+mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *br_dev)
+{
+ struct mlxsw_sp_fid *f;
+
+ list_for_each_entry(f, &mlxsw_sp->vfids.list, list)
+ if (f->dev == br_dev)
+ return f;
+
+ return NULL;
+}
+
+static inline struct mlxsw_sp_rif *
+mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
+ if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev)
+ return mlxsw_sp->rifs[i];
+
+ return NULL;
+}
+
enum mlxsw_sp_flood_table {
MLXSW_SP_FLOOD_TABLE_UC,
MLXSW_SP_FLOOD_TABLE_BM,
@@ -362,14 +536,17 @@ int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid);
int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
u16 vid_end, bool is_member, bool untagged);
-int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
- u16 vid);
-int mlxsw_sp_port_kill_vid(struct net_device *dev,
- __be16 __always_unused proto, u16 vid);
-int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid,
- bool set, bool only_uc);
+int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
+ bool set);
void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);
int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
+int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid);
+int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
+ bool adding);
+struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid);
+void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f);
+void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *r);
int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
bool dwrr, u8 dwrr_weight);
@@ -399,4 +576,21 @@ static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
#endif
+int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_router_fib4_add(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct switchdev_obj_ipv4_fib *fib4,
+ struct switchdev_trans *trans);
+int mlxsw_sp_router_fib4_del(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct switchdev_obj_ipv4_fib *fib4);
+int mlxsw_sp_router_neigh_construct(struct net_device *dev,
+ struct neighbour *n);
+void mlxsw_sp_router_neigh_destroy(struct net_device *dev,
+ struct neighbour *n);
+int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
+ unsigned long event, void *ptr);
+
+int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count);
+void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 074cdda7b..953b214f3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -330,7 +330,7 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM,
- MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(10000), 0, 0),
MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM,
@@ -717,22 +717,18 @@ int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
u8 local_port = mlxsw_sp_port->local_port;
u8 pg_buff = tc_index;
enum mlxsw_reg_sbxx_dir dir = pool_type;
- u8 pool = pool_index;
+ u8 pool = pool_get(pool_index);
u32 max_buff;
int err;
+ if (dir != dir_get(pool_index))
+ return -EINVAL;
+
err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir,
threshold, &max_buff);
if (err)
return err;
- if (pool_type == DEVLINK_SB_POOL_TYPE_EGRESS) {
- if (pool < MLXSW_SP_SB_POOL_COUNT)
- return -EINVAL;
- pool -= MLXSW_SP_SB_POOL_COUNT;
- } else if (pool >= MLXSW_SP_SB_POOL_COUNT) {
- return -EINVAL;
- }
return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff, dir,
0, max_buff, pool);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
index 01cfb7512..b6ed7f7c5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
@@ -341,6 +341,8 @@ static int mlxsw_sp_port_pfc_set(struct mlxsw_sp_port *mlxsw_sp_port,
char pfcc_pl[MLXSW_REG_PFCC_LEN];
mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
+ mlxsw_reg_pfcc_pprx_set(pfcc_pl, mlxsw_sp_port->link.rx_pause);
+ mlxsw_reg_pfcc_pptx_set(pfcc_pl, mlxsw_sp_port->link.tx_pause);
mlxsw_reg_pfcc_prio_pack(pfcc_pl, pfc->pfc_en);
return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
@@ -351,17 +353,17 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev,
struct ieee_pfc *pfc)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
int err;
- if ((mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause) &&
- pfc->pfc_en) {
+ if (pause_en && pfc->pfc_en) {
netdev_err(dev, "PAUSE frames already enabled on port\n");
return -EINVAL;
}
err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu,
mlxsw_sp_port->dcb.ets->prio_tc,
- false, pfc);
+ pause_en, pfc);
if (err) {
netdev_err(dev, "Failed to configure port's headroom for PFC\n");
return err;
@@ -380,7 +382,7 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev,
err_port_pfc_set:
__mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu,
- mlxsw_sp_port->dcb.ets->prio_tc, false,
+ mlxsw_sp_port->dcb.ets->prio_tc, pause_en,
mlxsw_sp_port->dcb.pfc);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
new file mode 100644
index 000000000..ac321e8e5
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
@@ -0,0 +1,91 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+
+#include "spectrum.h"
+
+#define MLXSW_SP_KVDL_SINGLE_BASE 0
+#define MLXSW_SP_KVDL_SINGLE_SIZE 16384
+#define MLXSW_SP_KVDL_CHUNKS_BASE \
+ (MLXSW_SP_KVDL_SINGLE_BASE + MLXSW_SP_KVDL_SINGLE_SIZE)
+#define MLXSW_SP_KVDL_CHUNKS_SIZE \
+ (MLXSW_SP_KVD_LINEAR_SIZE - MLXSW_SP_KVDL_CHUNKS_BASE)
+#define MLXSW_SP_CHUNK_MAX 32
+
+int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count)
+{
+ int entry_index;
+ int size;
+ int type_base;
+ int type_size;
+ int type_entries;
+
+ if (entry_count == 0 || entry_count > MLXSW_SP_CHUNK_MAX) {
+ return -EINVAL;
+ } else if (entry_count == 1) {
+ type_base = MLXSW_SP_KVDL_SINGLE_BASE;
+ type_size = MLXSW_SP_KVDL_SINGLE_SIZE;
+ type_entries = 1;
+ } else {
+ type_base = MLXSW_SP_KVDL_CHUNKS_BASE;
+ type_size = MLXSW_SP_KVDL_CHUNKS_SIZE;
+ type_entries = MLXSW_SP_CHUNK_MAX;
+ }
+
+ entry_index = type_base;
+ size = type_base + type_size;
+ for_each_clear_bit_from(entry_index, mlxsw_sp->kvdl.usage, size) {
+ int i;
+
+ for (i = 0; i < type_entries; i++)
+ set_bit(entry_index + i, mlxsw_sp->kvdl.usage);
+ return entry_index;
+ }
+ return -ENOBUFS;
+}
+
+void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index)
+{
+ int type_entries;
+ int i;
+
+ if (entry_index < MLXSW_SP_KVDL_CHUNKS_BASE)
+ type_entries = 1;
+ else
+ type_entries = MLXSW_SP_CHUNK_MAX;
+ for (i = 0; i < type_entries; i++)
+ clear_bit(entry_index + i, mlxsw_sp->kvdl.usage);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
new file mode 100644
index 000000000..3f5c51da6
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -0,0 +1,1863 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/rhashtable.h>
+#include <linux/bitops.h>
+#include <linux/in6.h>
+#include <linux/notifier.h>
+#include <net/netevent.h>
+#include <net/neighbour.h>
+#include <net/arp.h>
+
+#include "spectrum.h"
+#include "core.h"
+#include "reg.h"
+
+#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
+ for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
+
+static bool
+mlxsw_sp_prefix_usage_subset(struct mlxsw_sp_prefix_usage *prefix_usage1,
+ struct mlxsw_sp_prefix_usage *prefix_usage2)
+{
+ unsigned char prefix;
+
+ mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage1) {
+ if (!test_bit(prefix, prefix_usage2->b))
+ return false;
+ }
+ return true;
+}
+
+static bool
+mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
+ struct mlxsw_sp_prefix_usage *prefix_usage2)
+{
+ return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
+}
+
+static bool
+mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
+{
+ struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
+
+ return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
+}
+
+static void
+mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
+ struct mlxsw_sp_prefix_usage *prefix_usage2)
+{
+ memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
+}
+
+static void
+mlxsw_sp_prefix_usage_zero(struct mlxsw_sp_prefix_usage *prefix_usage)
+{
+ memset(prefix_usage, 0, sizeof(*prefix_usage));
+}
+
+static void
+mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
+ unsigned char prefix_len)
+{
+ set_bit(prefix_len, prefix_usage->b);
+}
+
+static void
+mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
+ unsigned char prefix_len)
+{
+ clear_bit(prefix_len, prefix_usage->b);
+}
+
+struct mlxsw_sp_fib_key {
+ struct net_device *dev;
+ unsigned char addr[sizeof(struct in6_addr)];
+ unsigned char prefix_len;
+};
+
+enum mlxsw_sp_fib_entry_type {
+ MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
+ MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
+ MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
+};
+
+struct mlxsw_sp_nexthop_group;
+
+struct mlxsw_sp_fib_entry {
+ struct rhash_head ht_node;
+ struct mlxsw_sp_fib_key key;
+ enum mlxsw_sp_fib_entry_type type;
+ unsigned int ref_count;
+ u16 rif; /* used for action local */
+ struct mlxsw_sp_vr *vr;
+ struct list_head nexthop_group_node;
+ struct mlxsw_sp_nexthop_group *nh_group;
+};
+
+struct mlxsw_sp_fib {
+ struct rhashtable ht;
+ unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
+ struct mlxsw_sp_prefix_usage prefix_usage;
+};
+
+static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
+ .key_offset = offsetof(struct mlxsw_sp_fib_entry, key),
+ .head_offset = offsetof(struct mlxsw_sp_fib_entry, ht_node),
+ .key_len = sizeof(struct mlxsw_sp_fib_key),
+ .automatic_shrinking = true,
+};
+
+static int mlxsw_sp_fib_entry_insert(struct mlxsw_sp_fib *fib,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ unsigned char prefix_len = fib_entry->key.prefix_len;
+ int err;
+
+ err = rhashtable_insert_fast(&fib->ht, &fib_entry->ht_node,
+ mlxsw_sp_fib_ht_params);
+ if (err)
+ return err;
+ if (fib->prefix_ref_count[prefix_len]++ == 0)
+ mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
+ return 0;
+}
+
+static void mlxsw_sp_fib_entry_remove(struct mlxsw_sp_fib *fib,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ unsigned char prefix_len = fib_entry->key.prefix_len;
+
+ if (--fib->prefix_ref_count[prefix_len] == 0)
+ mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
+ rhashtable_remove_fast(&fib->ht, &fib_entry->ht_node,
+ mlxsw_sp_fib_ht_params);
+}
+
+static struct mlxsw_sp_fib_entry *
+mlxsw_sp_fib_entry_create(struct mlxsw_sp_fib *fib, const void *addr,
+ size_t addr_len, unsigned char prefix_len,
+ struct net_device *dev)
+{
+ struct mlxsw_sp_fib_entry *fib_entry;
+
+ fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL);
+ if (!fib_entry)
+ return NULL;
+ fib_entry->key.dev = dev;
+ memcpy(fib_entry->key.addr, addr, addr_len);
+ fib_entry->key.prefix_len = prefix_len;
+ return fib_entry;
+}
+
+static void mlxsw_sp_fib_entry_destroy(struct mlxsw_sp_fib_entry *fib_entry)
+{
+ kfree(fib_entry);
+}
+
+static struct mlxsw_sp_fib_entry *
+mlxsw_sp_fib_entry_lookup(struct mlxsw_sp_fib *fib, const void *addr,
+ size_t addr_len, unsigned char prefix_len,
+ struct net_device *dev)
+{
+ struct mlxsw_sp_fib_key key;
+
+ memset(&key, 0, sizeof(key));
+ key.dev = dev;
+ memcpy(key.addr, addr, addr_len);
+ key.prefix_len = prefix_len;
+ return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
+}
+
+static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void)
+{
+ struct mlxsw_sp_fib *fib;
+ int err;
+
+ fib = kzalloc(sizeof(*fib), GFP_KERNEL);
+ if (!fib)
+ return ERR_PTR(-ENOMEM);
+ err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
+ if (err)
+ goto err_rhashtable_init;
+ return fib;
+
+err_rhashtable_init:
+ kfree(fib);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
+{
+ rhashtable_destroy(&fib->ht);
+ kfree(fib);
+}
+
+static struct mlxsw_sp_lpm_tree *
+mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp, bool one_reserved)
+{
+ static struct mlxsw_sp_lpm_tree *lpm_tree;
+ int i;
+
+ for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
+ lpm_tree = &mlxsw_sp->router.lpm_trees[i];
+ if (lpm_tree->ref_count == 0) {
+ if (one_reserved)
+ one_reserved = false;
+ else
+ return lpm_tree;
+ }
+ }
+ return NULL;
+}
+
+static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_lpm_tree *lpm_tree)
+{
+ char ralta_pl[MLXSW_REG_RALTA_LEN];
+
+ mlxsw_reg_ralta_pack(ralta_pl, true, lpm_tree->proto, lpm_tree->id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
+}
+
+static int mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_lpm_tree *lpm_tree)
+{
+ char ralta_pl[MLXSW_REG_RALTA_LEN];
+
+ mlxsw_reg_ralta_pack(ralta_pl, false, lpm_tree->proto, lpm_tree->id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
+}
+
+static int
+mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_prefix_usage *prefix_usage,
+ struct mlxsw_sp_lpm_tree *lpm_tree)
+{
+ char ralst_pl[MLXSW_REG_RALST_LEN];
+ u8 root_bin = 0;
+ u8 prefix;
+ u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
+
+ mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
+ root_bin = prefix;
+
+ mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
+ mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
+ if (prefix == 0)
+ continue;
+ mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
+ MLXSW_REG_RALST_BIN_NO_CHILD);
+ last_prefix = prefix;
+ }
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
+}
+
+static struct mlxsw_sp_lpm_tree *
+mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_prefix_usage *prefix_usage,
+ enum mlxsw_sp_l3proto proto, bool one_reserved)
+{
+ struct mlxsw_sp_lpm_tree *lpm_tree;
+ int err;
+
+ lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp, one_reserved);
+ if (!lpm_tree)
+ return ERR_PTR(-EBUSY);
+ lpm_tree->proto = proto;
+ err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
+ if (err)
+ return ERR_PTR(err);
+
+ err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
+ lpm_tree);
+ if (err)
+ goto err_left_struct_set;
+ return lpm_tree;
+
+err_left_struct_set:
+ mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
+ return ERR_PTR(err);
+}
+
+static int mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_lpm_tree *lpm_tree)
+{
+ return mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
+}
+
+static struct mlxsw_sp_lpm_tree *
+mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_prefix_usage *prefix_usage,
+ enum mlxsw_sp_l3proto proto, bool one_reserved)
+{
+ struct mlxsw_sp_lpm_tree *lpm_tree;
+ int i;
+
+ for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
+ lpm_tree = &mlxsw_sp->router.lpm_trees[i];
+ if (lpm_tree->proto == proto &&
+ mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
+ prefix_usage))
+ goto inc_ref_count;
+ }
+ lpm_tree = mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage,
+ proto, one_reserved);
+ if (IS_ERR(lpm_tree))
+ return lpm_tree;
+
+inc_ref_count:
+ lpm_tree->ref_count++;
+ return lpm_tree;
+}
+
+static int mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_lpm_tree *lpm_tree)
+{
+ if (--lpm_tree->ref_count == 0)
+ return mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
+ return 0;
+}
+
+static void mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_lpm_tree *lpm_tree;
+ int i;
+
+ for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
+ lpm_tree = &mlxsw_sp->router.lpm_trees[i];
+ lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
+ }
+}
+
+static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_vr *vr;
+ int i;
+
+ for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) {
+ vr = &mlxsw_sp->router.vrs[i];
+ if (!vr->used)
+ return vr;
+ }
+ return NULL;
+}
+
+static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_vr *vr)
+{
+ char raltb_pl[MLXSW_REG_RALTB_LEN];
+
+ mlxsw_reg_raltb_pack(raltb_pl, vr->id, vr->proto, vr->lpm_tree->id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
+}
+
+static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_vr *vr)
+{
+ char raltb_pl[MLXSW_REG_RALTB_LEN];
+
+ /* Bind to tree 0 which is default */
+ mlxsw_reg_raltb_pack(raltb_pl, vr->id, vr->proto, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
+}
+
+static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
+{
+ /* For our purpose, squash main and local table into one */
+ if (tb_id == RT_TABLE_LOCAL)
+ tb_id = RT_TABLE_MAIN;
+ return tb_id;
+}
+
+static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
+ u32 tb_id,
+ enum mlxsw_sp_l3proto proto)
+{
+ struct mlxsw_sp_vr *vr;
+ int i;
+
+ tb_id = mlxsw_sp_fix_tb_id(tb_id);
+ for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) {
+ vr = &mlxsw_sp->router.vrs[i];
+ if (vr->used && vr->proto == proto && vr->tb_id == tb_id)
+ return vr;
+ }
+ return NULL;
+}
+
+static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
+ unsigned char prefix_len,
+ u32 tb_id,
+ enum mlxsw_sp_l3proto proto)
+{
+ struct mlxsw_sp_prefix_usage req_prefix_usage;
+ struct mlxsw_sp_lpm_tree *lpm_tree;
+ struct mlxsw_sp_vr *vr;
+ int err;
+
+ vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
+ if (!vr)
+ return ERR_PTR(-EBUSY);
+ vr->fib = mlxsw_sp_fib_create();
+ if (IS_ERR(vr->fib))
+ return ERR_CAST(vr->fib);
+
+ vr->proto = proto;
+ vr->tb_id = tb_id;
+ mlxsw_sp_prefix_usage_zero(&req_prefix_usage);
+ mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len);
+ lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
+ proto, true);
+ if (IS_ERR(lpm_tree)) {
+ err = PTR_ERR(lpm_tree);
+ goto err_tree_get;
+ }
+ vr->lpm_tree = lpm_tree;
+ err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
+ if (err)
+ goto err_tree_bind;
+
+ vr->used = true;
+ return vr;
+
+err_tree_bind:
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
+err_tree_get:
+ mlxsw_sp_fib_destroy(vr->fib);
+
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_vr *vr)
+{
+ mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr);
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
+ mlxsw_sp_fib_destroy(vr->fib);
+ vr->used = false;
+}
+
+static int
+mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
+ struct mlxsw_sp_prefix_usage *req_prefix_usage)
+{
+ struct mlxsw_sp_lpm_tree *lpm_tree;
+
+ if (mlxsw_sp_prefix_usage_eq(req_prefix_usage,
+ &vr->lpm_tree->prefix_usage))
+ return 0;
+
+ lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage,
+ vr->proto, false);
+ if (IS_ERR(lpm_tree)) {
+ /* We failed to get a tree according to the required
+ * prefix usage. However, the current tree might be still good
+ * for us if our requirement is subset of the prefixes used
+ * in the tree.
+ */
+ if (mlxsw_sp_prefix_usage_subset(req_prefix_usage,
+ &vr->lpm_tree->prefix_usage))
+ return 0;
+ return PTR_ERR(lpm_tree);
+ }
+
+ mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr);
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
+ vr->lpm_tree = lpm_tree;
+ return mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
+}
+
+static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp,
+ unsigned char prefix_len,
+ u32 tb_id,
+ enum mlxsw_sp_l3proto proto)
+{
+ struct mlxsw_sp_vr *vr;
+ int err;
+
+ tb_id = mlxsw_sp_fix_tb_id(tb_id);
+ vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id, proto);
+ if (!vr) {
+ vr = mlxsw_sp_vr_create(mlxsw_sp, prefix_len, tb_id, proto);
+ if (IS_ERR(vr))
+ return vr;
+ } else {
+ struct mlxsw_sp_prefix_usage req_prefix_usage;
+
+ mlxsw_sp_prefix_usage_cpy(&req_prefix_usage,
+ &vr->fib->prefix_usage);
+ mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len);
+ /* Need to replace LPM tree in case new prefix is required. */
+ err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr,
+ &req_prefix_usage);
+ if (err)
+ return ERR_PTR(err);
+ }
+ return vr;
+}
+
+static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
+{
+ /* Destroy virtual router entity in case the associated FIB is empty
+ * and allow it to be used for other tables in future. Otherwise,
+ * check if some prefix usage did not disappear and change tree if
+ * that is the case. Note that in case new, smaller tree cannot be
+ * allocated, the original one will be kept being used.
+ */
+ if (mlxsw_sp_prefix_usage_none(&vr->fib->prefix_usage))
+ mlxsw_sp_vr_destroy(mlxsw_sp, vr);
+ else
+ mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr,
+ &vr->fib->prefix_usage);
+}
+
+static void mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_vr *vr;
+ int i;
+
+ for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) {
+ vr = &mlxsw_sp->router.vrs[i];
+ vr->id = i;
+ }
+}
+
+struct mlxsw_sp_neigh_key {
+ unsigned char addr[sizeof(struct in6_addr)];
+ struct net_device *dev;
+};
+
+struct mlxsw_sp_neigh_entry {
+ struct rhash_head ht_node;
+ struct mlxsw_sp_neigh_key key;
+ u16 rif;
+ struct neighbour *n;
+ bool offloaded;
+ struct delayed_work dw;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ unsigned char ha[ETH_ALEN];
+ struct list_head nexthop_list; /* list of nexthops using
+ * this neigh entry
+ */
+ struct list_head nexthop_neighs_list_node;
+};
+
+static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
+ .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
+ .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
+ .key_len = sizeof(struct mlxsw_sp_neigh_key),
+};
+
+static int
+mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_neigh_entry *neigh_entry)
+{
+ return rhashtable_insert_fast(&mlxsw_sp->router.neigh_ht,
+ &neigh_entry->ht_node,
+ mlxsw_sp_neigh_ht_params);
+}
+
+static void
+mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_neigh_entry *neigh_entry)
+{
+ rhashtable_remove_fast(&mlxsw_sp->router.neigh_ht,
+ &neigh_entry->ht_node,
+ mlxsw_sp_neigh_ht_params);
+}
+
+static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work);
+
+static struct mlxsw_sp_neigh_entry *
+mlxsw_sp_neigh_entry_create(const void *addr, size_t addr_len,
+ struct net_device *dev, u16 rif,
+ struct neighbour *n)
+{
+ struct mlxsw_sp_neigh_entry *neigh_entry;
+
+ neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_ATOMIC);
+ if (!neigh_entry)
+ return NULL;
+ memcpy(neigh_entry->key.addr, addr, addr_len);
+ neigh_entry->key.dev = dev;
+ neigh_entry->rif = rif;
+ neigh_entry->n = n;
+ INIT_DELAYED_WORK(&neigh_entry->dw, mlxsw_sp_router_neigh_update_hw);
+ INIT_LIST_HEAD(&neigh_entry->nexthop_list);
+ return neigh_entry;
+}
+
+static void
+mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp_neigh_entry *neigh_entry)
+{
+ kfree(neigh_entry);
+}
+
+static struct mlxsw_sp_neigh_entry *
+mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, const void *addr,
+ size_t addr_len, struct net_device *dev)
+{
+ struct mlxsw_sp_neigh_key key = {{ 0 } };
+
+ memcpy(key.addr, addr, addr_len);
+ key.dev = dev;
+ return rhashtable_lookup_fast(&mlxsw_sp->router.neigh_ht,
+ &key, mlxsw_sp_neigh_ht_params);
+}
+
+int mlxsw_sp_router_neigh_construct(struct net_device *dev,
+ struct neighbour *n)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_neigh_entry *neigh_entry;
+ struct mlxsw_sp_rif *r;
+ u32 dip;
+ int err;
+
+ if (n->tbl != &arp_tbl)
+ return 0;
+
+ dip = ntohl(*((__be32 *) n->primary_key));
+ neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &dip, sizeof(dip),
+ n->dev);
+ if (neigh_entry) {
+ WARN_ON(neigh_entry->n != n);
+ return 0;
+ }
+
+ r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
+ if (WARN_ON(!r))
+ return -EINVAL;
+
+ neigh_entry = mlxsw_sp_neigh_entry_create(&dip, sizeof(dip), n->dev,
+ r->rif, n);
+ if (!neigh_entry)
+ return -ENOMEM;
+ err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
+ if (err)
+ goto err_neigh_entry_insert;
+ return 0;
+
+err_neigh_entry_insert:
+ mlxsw_sp_neigh_entry_destroy(neigh_entry);
+ return err;
+}
+
+void mlxsw_sp_router_neigh_destroy(struct net_device *dev,
+ struct neighbour *n)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_neigh_entry *neigh_entry;
+ u32 dip;
+
+ if (n->tbl != &arp_tbl)
+ return;
+
+ dip = ntohl(*((__be32 *) n->primary_key));
+ neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &dip, sizeof(dip),
+ n->dev);
+ if (!neigh_entry)
+ return;
+ mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
+ mlxsw_sp_neigh_entry_destroy(neigh_entry);
+}
+
+static void
+mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
+{
+ unsigned long interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
+
+ mlxsw_sp->router.neighs_update.interval = jiffies_to_msecs(interval);
+}
+
+static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
+ char *rauhtd_pl,
+ int ent_index)
+{
+ struct net_device *dev;
+ struct neighbour *n;
+ __be32 dipn;
+ u32 dip;
+ u16 rif;
+
+ mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
+
+ if (!mlxsw_sp->rifs[rif]) {
+ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
+ return;
+ }
+
+ dipn = htonl(dip);
+ dev = mlxsw_sp->rifs[rif]->dev;
+ n = neigh_lookup(&arp_tbl, &dipn, dev);
+ if (!n) {
+ netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
+ &dip);
+ return;
+ }
+
+ netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
+ neigh_event_send(n, NULL);
+ neigh_release(n);
+}
+
+static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
+ char *rauhtd_pl,
+ int rec_index)
+{
+ u8 num_entries;
+ int i;
+
+ num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
+ rec_index);
+ /* Hardware starts counting at 0, so add 1. */
+ num_entries++;
+
+ /* Each record consists of several neighbour entries. */
+ for (i = 0; i < num_entries; i++) {
+ int ent_index;
+
+ ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
+ mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
+ ent_index);
+ }
+
+}
+
+static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
+ char *rauhtd_pl, int rec_index)
+{
+ switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
+ case MLXSW_REG_RAUHTD_TYPE_IPV4:
+ mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
+ rec_index);
+ break;
+ case MLXSW_REG_RAUHTD_TYPE_IPV6:
+ WARN_ON_ONCE(1);
+ break;
+ }
+}
+
+static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
+{
+ char *rauhtd_pl;
+ u8 num_rec;
+ int i, err;
+
+ rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
+ if (!rauhtd_pl)
+ return -ENOMEM;
+
+ /* Make sure the neighbour's netdev isn't removed in the
+ * process.
+ */
+ rtnl_lock();
+ do {
+ mlxsw_reg_rauhtd_pack(rauhtd_pl, MLXSW_REG_RAUHTD_TYPE_IPV4);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
+ rauhtd_pl);
+ if (err) {
+ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour talbe\n");
+ break;
+ }
+ num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
+ for (i = 0; i < num_rec; i++)
+ mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
+ i);
+ } while (num_rec);
+ rtnl_unlock();
+
+ kfree(rauhtd_pl);
+ return err;
+}
+
+static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_neigh_entry *neigh_entry;
+
+ /* Take RTNL mutex here to prevent lists from changes */
+ rtnl_lock();
+ list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
+ nexthop_neighs_list_node) {
+ /* If this neigh have nexthops, make the kernel think this neigh
+ * is active regardless of the traffic.
+ */
+ if (!list_empty(&neigh_entry->nexthop_list))
+ neigh_event_send(neigh_entry->n, NULL);
+ }
+ rtnl_unlock();
+}
+
+static void
+mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
+{
+ unsigned long interval = mlxsw_sp->router.neighs_update.interval;
+
+ mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw,
+ msecs_to_jiffies(interval));
+}
+
+static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
+{
+ struct mlxsw_sp *mlxsw_sp = container_of(work, struct mlxsw_sp,
+ router.neighs_update.dw.work);
+ int err;
+
+ err = mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp);
+ if (err)
+ dev_err(mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
+
+ mlxsw_sp_router_neighs_update_nh(mlxsw_sp);
+
+ mlxsw_sp_router_neighs_update_work_schedule(mlxsw_sp);
+}
+
+static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
+{
+ struct mlxsw_sp_neigh_entry *neigh_entry;
+ struct mlxsw_sp *mlxsw_sp = container_of(work, struct mlxsw_sp,
+ router.nexthop_probe_dw.work);
+
+ /* Iterate over nexthop neighbours, find those who are unresolved and
+ * send arp on them. This solves the chicken-egg problem when
+ * the nexthop wouldn't get offloaded until the neighbor is resolved
+ * but it wouldn't get resolved ever in case traffic is flowing in HW
+ * using different nexthop.
+ *
+ * Take RTNL mutex here to prevent lists from changes.
+ */
+ rtnl_lock();
+ list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
+ nexthop_neighs_list_node) {
+ if (!(neigh_entry->n->nud_state & NUD_VALID) &&
+ !list_empty(&neigh_entry->nexthop_list))
+ neigh_event_send(neigh_entry->n, NULL);
+ }
+ rtnl_unlock();
+
+ mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw,
+ MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
+}
+
+static void
+mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_neigh_entry *neigh_entry,
+ bool removing);
+
+static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work)
+{
+ struct mlxsw_sp_neigh_entry *neigh_entry =
+ container_of(work, struct mlxsw_sp_neigh_entry, dw.work);
+ struct neighbour *n = neigh_entry->n;
+ struct mlxsw_sp_port *mlxsw_sp_port = neigh_entry->mlxsw_sp_port;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char rauht_pl[MLXSW_REG_RAUHT_LEN];
+ struct net_device *dev;
+ bool entry_connected;
+ u8 nud_state;
+ bool updating;
+ bool removing;
+ bool adding;
+ u32 dip;
+ int err;
+
+ read_lock_bh(&n->lock);
+ dip = ntohl(*((__be32 *) n->primary_key));
+ memcpy(neigh_entry->ha, n->ha, sizeof(neigh_entry->ha));
+ nud_state = n->nud_state;
+ dev = n->dev;
+ read_unlock_bh(&n->lock);
+
+ entry_connected = nud_state & NUD_VALID;
+ adding = (!neigh_entry->offloaded) && entry_connected;
+ updating = neigh_entry->offloaded && entry_connected;
+ removing = neigh_entry->offloaded && !entry_connected;
+
+ if (adding || updating) {
+ mlxsw_reg_rauht_pack4(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_ADD,
+ neigh_entry->rif,
+ neigh_entry->ha, dip);
+ err = mlxsw_reg_write(mlxsw_sp->core,
+ MLXSW_REG(rauht), rauht_pl);
+ if (err) {
+ netdev_err(dev, "Could not add neigh %pI4h\n", &dip);
+ neigh_entry->offloaded = false;
+ } else {
+ neigh_entry->offloaded = true;
+ }
+ mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, false);
+ } else if (removing) {
+ mlxsw_reg_rauht_pack4(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE,
+ neigh_entry->rif,
+ neigh_entry->ha, dip);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht),
+ rauht_pl);
+ if (err) {
+ netdev_err(dev, "Could not delete neigh %pI4h\n", &dip);
+ neigh_entry->offloaded = true;
+ } else {
+ neigh_entry->offloaded = false;
+ }
+ mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, true);
+ }
+
+ neigh_release(n);
+ mlxsw_sp_port_dev_put(mlxsw_sp_port);
+}
+
+int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct mlxsw_sp_neigh_entry *neigh_entry;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_sp *mlxsw_sp;
+ unsigned long interval;
+ struct net_device *dev;
+ struct neigh_parms *p;
+ struct neighbour *n;
+ u32 dip;
+
+ switch (event) {
+ case NETEVENT_DELAY_PROBE_TIME_UPDATE:
+ p = ptr;
+
+ /* We don't care about changes in the default table. */
+ if (!p->dev || p->tbl != &arp_tbl)
+ return NOTIFY_DONE;
+
+ /* We are in atomic context and can't take RTNL mutex,
+ * so use RCU variant to walk the device chain.
+ */
+ mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
+ if (!mlxsw_sp_port)
+ return NOTIFY_DONE;
+
+ mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
+ mlxsw_sp->router.neighs_update.interval = interval;
+
+ mlxsw_sp_port_dev_put(mlxsw_sp_port);
+ break;
+ case NETEVENT_NEIGH_UPDATE:
+ n = ptr;
+ dev = n->dev;
+
+ if (n->tbl != &arp_tbl)
+ return NOTIFY_DONE;
+
+ mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(dev);
+ if (!mlxsw_sp_port)
+ return NOTIFY_DONE;
+
+ mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ dip = ntohl(*((__be32 *) n->primary_key));
+ neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp,
+ &dip,
+ sizeof(__be32),
+ dev);
+ if (WARN_ON(!neigh_entry) || WARN_ON(neigh_entry->n != n)) {
+ mlxsw_sp_port_dev_put(mlxsw_sp_port);
+ return NOTIFY_DONE;
+ }
+ neigh_entry->mlxsw_sp_port = mlxsw_sp_port;
+
+ /* Take a reference to ensure the neighbour won't be
+ * destructed until we drop the reference in delayed
+ * work.
+ */
+ neigh_clone(n);
+ if (!mlxsw_core_schedule_dw(&neigh_entry->dw, 0)) {
+ neigh_release(n);
+ mlxsw_sp_port_dev_put(mlxsw_sp_port);
+ }
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int err;
+
+ err = rhashtable_init(&mlxsw_sp->router.neigh_ht,
+ &mlxsw_sp_neigh_ht_params);
+ if (err)
+ return err;
+
+ /* Initialize the polling interval according to the default
+ * table.
+ */
+ mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
+
+ /* Create the delayed works for the activity_update */
+ INIT_DELAYED_WORK(&mlxsw_sp->router.neighs_update.dw,
+ mlxsw_sp_router_neighs_update_work);
+ INIT_DELAYED_WORK(&mlxsw_sp->router.nexthop_probe_dw,
+ mlxsw_sp_router_probe_unresolved_nexthops);
+ mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw, 0);
+ mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw, 0);
+ return 0;
+}
+
+static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ cancel_delayed_work_sync(&mlxsw_sp->router.neighs_update.dw);
+ cancel_delayed_work_sync(&mlxsw_sp->router.nexthop_probe_dw);
+ rhashtable_destroy(&mlxsw_sp->router.neigh_ht);
+}
+
+struct mlxsw_sp_nexthop {
+ struct list_head neigh_list_node; /* member of neigh entry list */
+ struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
+ * this belongs to
+ */
+ u8 should_offload:1, /* set indicates this neigh is connected and
+ * should be put to KVD linear area of this group.
+ */
+ offloaded:1, /* set in case the neigh is actually put into
+ * KVD linear area of this group.
+ */
+ update:1; /* set indicates that MAC of this neigh should be
+ * updated in HW
+ */
+ struct mlxsw_sp_neigh_entry *neigh_entry;
+};
+
+struct mlxsw_sp_nexthop_group {
+ struct list_head list; /* node in mlxsw->router.nexthop_group_list */
+ struct list_head fib_list; /* list of fib entries that use this group */
+ u8 adj_index_valid:1;
+ u32 adj_index;
+ u16 ecmp_size;
+ u16 count;
+ struct mlxsw_sp_nexthop nexthops[0];
+};
+
+static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_vr *vr,
+ u32 adj_index, u16 ecmp_size,
+ u32 new_adj_index,
+ u16 new_ecmp_size)
+{
+ char raleu_pl[MLXSW_REG_RALEU_LEN];
+
+ mlxsw_reg_raleu_pack(raleu_pl, vr->proto, vr->id,
+ adj_index, ecmp_size,
+ new_adj_index, new_ecmp_size);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
+}
+
+static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp,
+ u32 old_adj_index, u16 old_ecmp_size)
+{
+ struct mlxsw_sp_fib_entry *fib_entry;
+ struct mlxsw_sp_vr *vr = NULL;
+ int err;
+
+ list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
+ if (vr == fib_entry->vr)
+ continue;
+ vr = fib_entry->vr;
+ err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr,
+ old_adj_index,
+ old_ecmp_size,
+ nh_grp->adj_index,
+ nh_grp->ecmp_size);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
+ struct mlxsw_sp_nexthop *nh)
+{
+ struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
+ char ratr_pl[MLXSW_REG_RATR_LEN];
+
+ mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
+ true, adj_index, neigh_entry->rif);
+ mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
+}
+
+static int
+mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ u32 adj_index = nh_grp->adj_index; /* base */
+ struct mlxsw_sp_nexthop *nh;
+ int i;
+ int err;
+
+ for (i = 0; i < nh_grp->count; i++) {
+ nh = &nh_grp->nexthops[i];
+
+ if (!nh->should_offload) {
+ nh->offloaded = 0;
+ continue;
+ }
+
+ if (nh->update) {
+ err = mlxsw_sp_nexthop_mac_update(mlxsw_sp,
+ adj_index, nh);
+ if (err)
+ return err;
+ nh->update = 0;
+ nh->offloaded = 1;
+ }
+ adj_index++;
+ }
+ return 0;
+}
+
+static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry);
+
+static int
+mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ struct mlxsw_sp_fib_entry *fib_entry;
+ int err;
+
+ list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
+ err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static void
+mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ struct mlxsw_sp_nexthop *nh;
+ bool offload_change = false;
+ u32 adj_index;
+ u16 ecmp_size = 0;
+ bool old_adj_index_valid;
+ u32 old_adj_index;
+ u16 old_ecmp_size;
+ int ret;
+ int i;
+ int err;
+
+ for (i = 0; i < nh_grp->count; i++) {
+ nh = &nh_grp->nexthops[i];
+
+ if (nh->should_offload ^ nh->offloaded) {
+ offload_change = true;
+ if (nh->should_offload)
+ nh->update = 1;
+ }
+ if (nh->should_offload)
+ ecmp_size++;
+ }
+ if (!offload_change) {
+ /* Nothing was added or removed, so no need to reallocate. Just
+ * update MAC on existing adjacency indexes.
+ */
+ err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp);
+ if (err) {
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
+ goto set_trap;
+ }
+ return;
+ }
+ if (!ecmp_size)
+ /* No neigh of this group is connected so we just set
+ * the trap and let everthing flow through kernel.
+ */
+ goto set_trap;
+
+ ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size);
+ if (ret < 0) {
+ /* We ran out of KVD linear space, just set the
+ * trap and let everything flow through kernel.
+ */
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
+ goto set_trap;
+ }
+ adj_index = ret;
+ old_adj_index_valid = nh_grp->adj_index_valid;
+ old_adj_index = nh_grp->adj_index;
+ old_ecmp_size = nh_grp->ecmp_size;
+ nh_grp->adj_index_valid = 1;
+ nh_grp->adj_index = adj_index;
+ nh_grp->ecmp_size = ecmp_size;
+ err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp);
+ if (err) {
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
+ goto set_trap;
+ }
+
+ if (!old_adj_index_valid) {
+ /* The trap was set for fib entries, so we have to call
+ * fib entry update to unset it and use adjacency index.
+ */
+ err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
+ if (err) {
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
+ goto set_trap;
+ }
+ return;
+ }
+
+ err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
+ old_adj_index, old_ecmp_size);
+ mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
+ if (err) {
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
+ goto set_trap;
+ }
+ return;
+
+set_trap:
+ old_adj_index_valid = nh_grp->adj_index_valid;
+ nh_grp->adj_index_valid = 0;
+ for (i = 0; i < nh_grp->count; i++) {
+ nh = &nh_grp->nexthops[i];
+ nh->offloaded = 0;
+ }
+ err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
+ if (err)
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
+ if (old_adj_index_valid)
+ mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
+}
+
+static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
+ bool removing)
+{
+ if (!removing && !nh->should_offload)
+ nh->should_offload = 1;
+ else if (removing && nh->offloaded)
+ nh->should_offload = 0;
+ nh->update = 1;
+}
+
+static void
+mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_neigh_entry *neigh_entry,
+ bool removing)
+{
+ struct mlxsw_sp_nexthop *nh;
+
+ /* Take RTNL mutex here to prevent lists from changes */
+ rtnl_lock();
+ list_for_each_entry(nh, &neigh_entry->nexthop_list,
+ neigh_list_node) {
+ __mlxsw_sp_nexthop_neigh_update(nh, removing);
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
+ }
+ rtnl_unlock();
+}
+
+static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp,
+ struct mlxsw_sp_nexthop *nh,
+ struct fib_nh *fib_nh)
+{
+ struct mlxsw_sp_neigh_entry *neigh_entry;
+ u32 gwip = ntohl(fib_nh->nh_gw);
+ struct net_device *dev = fib_nh->nh_dev;
+ struct neighbour *n;
+ u8 nud_state;
+
+ neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &gwip,
+ sizeof(gwip), dev);
+ if (!neigh_entry) {
+ __be32 gwipn = htonl(gwip);
+
+ n = neigh_create(&arp_tbl, &gwipn, dev);
+ if (IS_ERR(n))
+ return PTR_ERR(n);
+ neigh_event_send(n, NULL);
+ neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &gwip,
+ sizeof(gwip), dev);
+ if (!neigh_entry) {
+ neigh_release(n);
+ return -EINVAL;
+ }
+ } else {
+ /* Take a reference of neigh here ensuring that neigh would
+ * not be detructed before the nexthop entry is finished.
+ * The second branch takes the reference in neith_create()
+ */
+ n = neigh_entry->n;
+ neigh_clone(n);
+ }
+
+ /* If that is the first nexthop connected to that neigh, add to
+ * nexthop_neighs_list
+ */
+ if (list_empty(&neigh_entry->nexthop_list))
+ list_add_tail(&neigh_entry->nexthop_neighs_list_node,
+ &mlxsw_sp->router.nexthop_neighs_list);
+
+ nh->nh_grp = nh_grp;
+ nh->neigh_entry = neigh_entry;
+ list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
+ read_lock_bh(&n->lock);
+ nud_state = n->nud_state;
+ read_unlock_bh(&n->lock);
+ __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID));
+
+ return 0;
+}
+
+static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
+{
+ struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
+
+ list_del(&nh->neigh_list_node);
+
+ /* If that is the last nexthop connected to that neigh, remove from
+ * nexthop_neighs_list
+ */
+ if (list_empty(&nh->neigh_entry->nexthop_list))
+ list_del(&nh->neigh_entry->nexthop_neighs_list_node);
+
+ neigh_release(neigh_entry->n);
+}
+
+static struct mlxsw_sp_nexthop_group *
+mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
+{
+ struct mlxsw_sp_nexthop_group *nh_grp;
+ struct mlxsw_sp_nexthop *nh;
+ struct fib_nh *fib_nh;
+ size_t alloc_size;
+ int i;
+ int err;
+
+ alloc_size = sizeof(*nh_grp) +
+ fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
+ nh_grp = kzalloc(alloc_size, GFP_KERNEL);
+ if (!nh_grp)
+ return ERR_PTR(-ENOMEM);
+ INIT_LIST_HEAD(&nh_grp->fib_list);
+ nh_grp->count = fi->fib_nhs;
+ for (i = 0; i < nh_grp->count; i++) {
+ nh = &nh_grp->nexthops[i];
+ fib_nh = &fi->fib_nh[i];
+ err = mlxsw_sp_nexthop_init(mlxsw_sp, nh_grp, nh, fib_nh);
+ if (err)
+ goto err_nexthop_init;
+ }
+ list_add_tail(&nh_grp->list, &mlxsw_sp->router.nexthop_group_list);
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
+ return nh_grp;
+
+err_nexthop_init:
+ for (i--; i >= 0; i--)
+ mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
+ kfree(nh_grp);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ struct mlxsw_sp_nexthop *nh;
+ int i;
+
+ list_del(&nh_grp->list);
+ for (i = 0; i < nh_grp->count; i++) {
+ nh = &nh_grp->nexthops[i];
+ mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
+ }
+ kfree(nh_grp);
+}
+
+static bool mlxsw_sp_nexthop_match(struct mlxsw_sp_nexthop *nh,
+ struct fib_info *fi)
+{
+ int i;
+
+ for (i = 0; i < fi->fib_nhs; i++) {
+ struct fib_nh *fib_nh = &fi->fib_nh[i];
+ u32 gwip = ntohl(fib_nh->nh_gw);
+
+ if (memcmp(nh->neigh_entry->key.addr,
+ &gwip, sizeof(u32)) == 0 &&
+ nh->neigh_entry->key.dev == fib_nh->nh_dev)
+ return true;
+ }
+ return false;
+}
+
+static bool mlxsw_sp_nexthop_group_match(struct mlxsw_sp_nexthop_group *nh_grp,
+ struct fib_info *fi)
+{
+ int i;
+
+ if (nh_grp->count != fi->fib_nhs)
+ return false;
+ for (i = 0; i < nh_grp->count; i++) {
+ struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
+
+ if (!mlxsw_sp_nexthop_match(nh, fi))
+ return false;
+ }
+ return true;
+}
+
+static struct mlxsw_sp_nexthop_group *
+mlxsw_sp_nexthop_group_find(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
+{
+ struct mlxsw_sp_nexthop_group *nh_grp;
+
+ list_for_each_entry(nh_grp, &mlxsw_sp->router.nexthop_group_list,
+ list) {
+ if (mlxsw_sp_nexthop_group_match(nh_grp, fi))
+ return nh_grp;
+ }
+ return NULL;
+}
+
+static int mlxsw_sp_nexthop_group_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ struct fib_info *fi)
+{
+ struct mlxsw_sp_nexthop_group *nh_grp;
+
+ nh_grp = mlxsw_sp_nexthop_group_find(mlxsw_sp, fi);
+ if (!nh_grp) {
+ nh_grp = mlxsw_sp_nexthop_group_create(mlxsw_sp, fi);
+ if (IS_ERR(nh_grp))
+ return PTR_ERR(nh_grp);
+ }
+ list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
+ fib_entry->nh_group = nh_grp;
+ return 0;
+}
+
+static void mlxsw_sp_nexthop_group_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
+
+ list_del(&fib_entry->nexthop_group_node);
+ if (!list_empty(&nh_grp->fib_list))
+ return;
+ mlxsw_sp_nexthop_group_destroy(mlxsw_sp, nh_grp);
+}
+
+static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
+{
+ char rgcr_pl[MLXSW_REG_RGCR_LEN];
+
+ mlxsw_reg_rgcr_pack(rgcr_pl, true);
+ mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, MLXSW_SP_RIF_MAX);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
+}
+
+static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ char rgcr_pl[MLXSW_REG_RGCR_LEN];
+
+ mlxsw_reg_rgcr_pack(rgcr_pl, false);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
+}
+
+int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int err;
+
+ INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_neighs_list);
+ INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_group_list);
+ err = __mlxsw_sp_router_init(mlxsw_sp);
+ if (err)
+ return err;
+ mlxsw_sp_lpm_init(mlxsw_sp);
+ mlxsw_sp_vrs_init(mlxsw_sp);
+ err = mlxsw_sp_neigh_init(mlxsw_sp);
+ if (err)
+ goto err_neigh_init;
+ return 0;
+
+err_neigh_init:
+ __mlxsw_sp_router_fini(mlxsw_sp);
+ return err;
+}
+
+void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp_neigh_fini(mlxsw_sp);
+ __mlxsw_sp_router_fini(mlxsw_sp);
+}
+
+static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_reg_ralue_op op)
+{
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
+ u32 *p_dip = (u32 *) fib_entry->key.addr;
+ struct mlxsw_sp_vr *vr = fib_entry->vr;
+ enum mlxsw_reg_ralue_trap_action trap_action;
+ u16 trap_id = 0;
+ u32 adjacency_index = 0;
+ u16 ecmp_size = 0;
+
+ /* In case the nexthop group adjacency index is valid, use it
+ * with provided ECMP size. Otherwise, setup trap and pass
+ * traffic to kernel.
+ */
+ if (fib_entry->nh_group->adj_index_valid) {
+ trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
+ adjacency_index = fib_entry->nh_group->adj_index;
+ ecmp_size = fib_entry->nh_group->ecmp_size;
+ } else {
+ trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
+ trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
+ }
+
+ mlxsw_reg_ralue_pack4(ralue_pl, vr->proto, op, vr->id,
+ fib_entry->key.prefix_len, *p_dip);
+ mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
+ adjacency_index, ecmp_size);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+}
+
+static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_reg_ralue_op op)
+{
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
+ u32 *p_dip = (u32 *) fib_entry->key.addr;
+ struct mlxsw_sp_vr *vr = fib_entry->vr;
+
+ mlxsw_reg_ralue_pack4(ralue_pl, vr->proto, op, vr->id,
+ fib_entry->key.prefix_len, *p_dip);
+ mlxsw_reg_ralue_act_local_pack(ralue_pl,
+ MLXSW_REG_RALUE_TRAP_ACTION_NOP, 0,
+ fib_entry->rif);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+}
+
+static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_reg_ralue_op op)
+{
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
+ u32 *p_dip = (u32 *) fib_entry->key.addr;
+ struct mlxsw_sp_vr *vr = fib_entry->vr;
+
+ mlxsw_reg_ralue_pack4(ralue_pl, vr->proto, op, vr->id,
+ fib_entry->key.prefix_len, *p_dip);
+ mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+}
+
+static int mlxsw_sp_fib_entry_op4(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_reg_ralue_op op)
+{
+ switch (fib_entry->type) {
+ case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
+ return mlxsw_sp_fib_entry_op4_remote(mlxsw_sp, fib_entry, op);
+ case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
+ return mlxsw_sp_fib_entry_op4_local(mlxsw_sp, fib_entry, op);
+ case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
+ return mlxsw_sp_fib_entry_op4_trap(mlxsw_sp, fib_entry, op);
+ }
+ return -EINVAL;
+}
+
+static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_reg_ralue_op op)
+{
+ switch (fib_entry->vr->proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ return mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op);
+ case MLXSW_SP_L3_PROTO_IPV6:
+ return -EINVAL;
+ }
+ return -EINVAL;
+}
+
+static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
+ MLXSW_REG_RALUE_OP_WRITE_WRITE);
+}
+
+static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
+ MLXSW_REG_RALUE_OP_WRITE_DELETE);
+}
+
+struct mlxsw_sp_router_fib4_add_info {
+ struct switchdev_trans_item tritem;
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_fib_entry *fib_entry;
+};
+
+static void mlxsw_sp_router_fib4_add_info_destroy(void const *data)
+{
+ const struct mlxsw_sp_router_fib4_add_info *info = data;
+ struct mlxsw_sp_fib_entry *fib_entry = info->fib_entry;
+ struct mlxsw_sp *mlxsw_sp = info->mlxsw_sp;
+ struct mlxsw_sp_vr *vr = fib_entry->vr;
+
+ mlxsw_sp_fib_entry_destroy(fib_entry);
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
+ kfree(info);
+}
+
+static int
+mlxsw_sp_router_fib4_entry_init(struct mlxsw_sp *mlxsw_sp,
+ const struct switchdev_obj_ipv4_fib *fib4,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ struct fib_info *fi = fib4->fi;
+
+ if (fib4->type == RTN_LOCAL || fib4->type == RTN_BROADCAST) {
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
+ return 0;
+ }
+ if (fib4->type != RTN_UNICAST)
+ return -EINVAL;
+
+ if (fi->fib_scope != RT_SCOPE_UNIVERSE) {
+ struct mlxsw_sp_rif *r;
+
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
+ r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fi->fib_dev);
+ if (!r)
+ return -EINVAL;
+ fib_entry->rif = r->rif;
+ return 0;
+ }
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
+ return mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fi);
+}
+
+static void
+mlxsw_sp_router_fib4_entry_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_REMOTE)
+ return;
+ mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry);
+}
+
+static struct mlxsw_sp_fib_entry *
+mlxsw_sp_fib_entry_get(struct mlxsw_sp *mlxsw_sp,
+ const struct switchdev_obj_ipv4_fib *fib4)
+{
+ struct mlxsw_sp_fib_entry *fib_entry;
+ struct fib_info *fi = fib4->fi;
+ struct mlxsw_sp_vr *vr;
+ int err;
+
+ vr = mlxsw_sp_vr_get(mlxsw_sp, fib4->dst_len, fib4->tb_id,
+ MLXSW_SP_L3_PROTO_IPV4);
+ if (IS_ERR(vr))
+ return ERR_CAST(vr);
+
+ fib_entry = mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst,
+ sizeof(fib4->dst),
+ fib4->dst_len, fi->fib_dev);
+ if (fib_entry) {
+ /* Already exists, just take a reference */
+ fib_entry->ref_count++;
+ return fib_entry;
+ }
+ fib_entry = mlxsw_sp_fib_entry_create(vr->fib, &fib4->dst,
+ sizeof(fib4->dst),
+ fib4->dst_len, fi->fib_dev);
+ if (!fib_entry) {
+ err = -ENOMEM;
+ goto err_fib_entry_create;
+ }
+ fib_entry->vr = vr;
+ fib_entry->ref_count = 1;
+
+ err = mlxsw_sp_router_fib4_entry_init(mlxsw_sp, fib4, fib_entry);
+ if (err)
+ goto err_fib4_entry_init;
+
+ return fib_entry;
+
+err_fib4_entry_init:
+ mlxsw_sp_fib_entry_destroy(fib_entry);
+err_fib_entry_create:
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
+
+ return ERR_PTR(err);
+}
+
+static struct mlxsw_sp_fib_entry *
+mlxsw_sp_fib_entry_find(struct mlxsw_sp *mlxsw_sp,
+ const struct switchdev_obj_ipv4_fib *fib4)
+{
+ struct mlxsw_sp_vr *vr;
+
+ vr = mlxsw_sp_vr_find(mlxsw_sp, fib4->tb_id, MLXSW_SP_L3_PROTO_IPV4);
+ if (!vr)
+ return NULL;
+
+ return mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst,
+ sizeof(fib4->dst), fib4->dst_len,
+ fib4->fi->fib_dev);
+}
+
+void mlxsw_sp_fib_entry_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ struct mlxsw_sp_vr *vr = fib_entry->vr;
+
+ if (--fib_entry->ref_count == 0) {
+ mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry);
+ mlxsw_sp_fib_entry_destroy(fib_entry);
+ }
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
+}
+
+static int
+mlxsw_sp_router_fib4_add_prepare(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct switchdev_obj_ipv4_fib *fib4,
+ struct switchdev_trans *trans)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_router_fib4_add_info *info;
+ struct mlxsw_sp_fib_entry *fib_entry;
+ int err;
+
+ fib_entry = mlxsw_sp_fib_entry_get(mlxsw_sp, fib4);
+ if (IS_ERR(fib_entry))
+ return PTR_ERR(fib_entry);
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ err = -ENOMEM;
+ goto err_alloc_info;
+ }
+ info->mlxsw_sp = mlxsw_sp;
+ info->fib_entry = fib_entry;
+ switchdev_trans_item_enqueue(trans, info,
+ mlxsw_sp_router_fib4_add_info_destroy,
+ &info->tritem);
+ return 0;
+
+err_alloc_info:
+ mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
+ return err;
+}
+
+static int
+mlxsw_sp_router_fib4_add_commit(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct switchdev_obj_ipv4_fib *fib4,
+ struct switchdev_trans *trans)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_router_fib4_add_info *info;
+ struct mlxsw_sp_fib_entry *fib_entry;
+ struct mlxsw_sp_vr *vr;
+ int err;
+
+ info = switchdev_trans_item_dequeue(trans);
+ fib_entry = info->fib_entry;
+ kfree(info);
+
+ if (fib_entry->ref_count != 1)
+ return 0;
+
+ vr = fib_entry->vr;
+ err = mlxsw_sp_fib_entry_insert(vr->fib, fib_entry);
+ if (err)
+ goto err_fib_entry_insert;
+ err = mlxsw_sp_fib_entry_update(mlxsw_sp_port->mlxsw_sp, fib_entry);
+ if (err)
+ goto err_fib_entry_add;
+ return 0;
+
+err_fib_entry_add:
+ mlxsw_sp_fib_entry_remove(vr->fib, fib_entry);
+err_fib_entry_insert:
+ mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
+ return err;
+}
+
+int mlxsw_sp_router_fib4_add(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct switchdev_obj_ipv4_fib *fib4,
+ struct switchdev_trans *trans)
+{
+ if (switchdev_trans_ph_prepare(trans))
+ return mlxsw_sp_router_fib4_add_prepare(mlxsw_sp_port,
+ fib4, trans);
+ return mlxsw_sp_router_fib4_add_commit(mlxsw_sp_port,
+ fib4, trans);
+}
+
+int mlxsw_sp_router_fib4_del(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct switchdev_obj_ipv4_fib *fib4)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_fib_entry *fib_entry;
+
+ fib_entry = mlxsw_sp_fib_entry_find(mlxsw_sp, fib4);
+ if (!fib_entry) {
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to find FIB4 entry being removed.\n");
+ return -ENOENT;
+ }
+
+ if (fib_entry->ref_count == 1) {
+ mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
+ mlxsw_sp_fib_entry_remove(fib_entry->vr->fib, fib_entry);
+ }
+
+ mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 3710f19ed..7b654c517 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -55,13 +55,10 @@
static u16 mlxsw_sp_port_vid_to_fid_get(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid)
{
+ struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_port);
u16 fid = vid;
- if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
- u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_port);
-
- fid = mlxsw_sp_vfid_to_fid(vfid);
- }
+ fid = f ? f->fid : fid;
if (!fid)
fid = mlxsw_sp_port->pvid;
@@ -169,14 +166,9 @@ static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state);
}
-static bool mlxsw_sp_vfid_is_vport_br(u16 vfid)
-{
- return vfid >= MLXSW_SP_VFID_PORT_MAX;
-}
-
static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 idx_begin, u16 idx_end, bool set,
- bool only_uc)
+ u16 idx_begin, u16 idx_end, bool uc_set,
+ bool bm_set)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u16 local_port = mlxsw_sp_port->local_port;
@@ -185,43 +177,32 @@ static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
char *sftr_pl;
int err;
- if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
+ if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
- if (mlxsw_sp_vfid_is_vport_br(idx_begin))
- local_port = mlxsw_sp_port->local_port;
- else
- local_port = MLXSW_PORT_CPU_PORT;
- } else {
+ else
table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
- }
sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
if (!sftr_pl)
return -ENOMEM;
mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
- table_type, range, local_port, set);
+ table_type, range, local_port, uc_set);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
if (err)
goto buffer_out;
- /* Flooding control allows one to decide whether a given port will
- * flood unicast traffic for which there is no FDB entry.
- */
- if (only_uc)
- goto buffer_out;
-
mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin,
- table_type, range, local_port, set);
+ table_type, range, local_port, bm_set);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
if (err)
goto err_flood_bm_set;
- else
- goto buffer_out;
+
+ goto buffer_out;
err_flood_bm_set:
mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
- table_type, range, local_port, !set);
+ table_type, range, local_port, !uc_set);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
buffer_out:
kfree(sftr_pl);
@@ -236,7 +217,8 @@ static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
int err;
if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
- u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_port);
+ u16 fid = mlxsw_sp_vport_fid_get(mlxsw_sp_port)->fid;
+ u16 vfid = mlxsw_sp_fid_to_vfid(fid);
return __mlxsw_sp_port_flood_set(mlxsw_sp_port, vfid, vfid,
set, true);
@@ -260,14 +242,16 @@ err_port_flood_set:
return err;
}
-int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid,
- bool set, bool only_uc)
+int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
+ bool set)
{
+ u16 vfid;
+
/* In case of vFIDs, index into the flooding table is relative to
* the start of the vFIDs range.
*/
- return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set,
- only_uc);
+ vfid = mlxsw_sp_fid_to_vfid(fid);
+ return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, set);
}
static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -383,6 +367,192 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev,
return err;
}
+static int mlxsw_sp_fid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
+{
+ char sfmr_pl[MLXSW_REG_SFMR_LEN];
+
+ mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, fid);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
+}
+
+static int mlxsw_sp_fid_map(struct mlxsw_sp *mlxsw_sp, u16 fid, bool valid)
+{
+ enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
+ char svfa_pl[MLXSW_REG_SVFA_LEN];
+
+ mlxsw_reg_svfa_pack(svfa_pl, 0, mt, valid, fid, fid);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
+}
+
+static struct mlxsw_sp_fid *mlxsw_sp_fid_alloc(u16 fid)
+{
+ struct mlxsw_sp_fid *f;
+
+ f = kzalloc(sizeof(*f), GFP_KERNEL);
+ if (!f)
+ return NULL;
+
+ f->fid = fid;
+
+ return f;
+}
+
+struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid)
+{
+ struct mlxsw_sp_fid *f;
+ int err;
+
+ err = mlxsw_sp_fid_op(mlxsw_sp, fid, true);
+ if (err)
+ return ERR_PTR(err);
+
+ /* Although all the ports member in the FID might be using a
+ * {Port, VID} to FID mapping, we create a global VID-to-FID
+ * mapping. This allows a port to transition to VLAN mode,
+ * knowing the global mapping exists.
+ */
+ err = mlxsw_sp_fid_map(mlxsw_sp, fid, true);
+ if (err)
+ goto err_fid_map;
+
+ f = mlxsw_sp_fid_alloc(fid);
+ if (!f) {
+ err = -ENOMEM;
+ goto err_allocate_fid;
+ }
+
+ list_add(&f->list, &mlxsw_sp->fids);
+
+ return f;
+
+err_allocate_fid:
+ mlxsw_sp_fid_map(mlxsw_sp, fid, false);
+err_fid_map:
+ mlxsw_sp_fid_op(mlxsw_sp, fid, false);
+ return ERR_PTR(err);
+}
+
+void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f)
+{
+ u16 fid = f->fid;
+
+ list_del(&f->list);
+
+ if (f->r)
+ mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
+
+ kfree(f);
+
+ mlxsw_sp_fid_map(mlxsw_sp, fid, false);
+
+ mlxsw_sp_fid_op(mlxsw_sp, fid, false);
+}
+
+static int __mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 fid)
+{
+ struct mlxsw_sp_fid *f;
+
+ if (test_bit(fid, mlxsw_sp_port->active_vlans))
+ return 0;
+
+ f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
+ if (!f) {
+ f = mlxsw_sp_fid_create(mlxsw_sp_port->mlxsw_sp, fid);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+ }
+
+ f->ref_count++;
+
+ netdev_dbg(mlxsw_sp_port->dev, "Joined FID=%d\n", fid);
+
+ return 0;
+}
+
+static void __mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 fid)
+{
+ struct mlxsw_sp_fid *f;
+
+ f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
+ if (WARN_ON(!f))
+ return;
+
+ netdev_dbg(mlxsw_sp_port->dev, "Left FID=%d\n", fid);
+
+ mlxsw_sp_port_fdb_flush(mlxsw_sp_port, fid);
+
+ if (--f->ref_count == 0)
+ mlxsw_sp_fid_destroy(mlxsw_sp_port->mlxsw_sp, f);
+}
+
+static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid,
+ bool valid)
+{
+ enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
+
+ /* If port doesn't have vPorts, then it can use the global
+ * VID-to-FID mapping.
+ */
+ if (list_empty(&mlxsw_sp_port->vports_list))
+ return 0;
+
+ return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, valid, fid, fid);
+}
+
+static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 fid_begin, u16 fid_end)
+{
+ int fid, err;
+
+ for (fid = fid_begin; fid <= fid_end; fid++) {
+ err = __mlxsw_sp_port_fid_join(mlxsw_sp_port, fid);
+ if (err)
+ goto err_port_fid_join;
+ }
+
+ err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end,
+ mlxsw_sp_port->uc_flood, true);
+ if (err)
+ goto err_port_flood_set;
+
+ for (fid = fid_begin; fid <= fid_end; fid++) {
+ err = mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, true);
+ if (err)
+ goto err_port_fid_map;
+ }
+
+ return 0;
+
+err_port_fid_map:
+ for (fid--; fid >= fid_begin; fid--)
+ mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false);
+ __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false,
+ false);
+err_port_flood_set:
+ fid = fid_end;
+err_port_fid_join:
+ for (fid--; fid >= fid_begin; fid--)
+ __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid);
+ return err;
+}
+
+static void mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 fid_begin, u16 fid_end)
+{
+ int fid;
+
+ for (fid = fid_begin; fid <= fid_end; fid++)
+ mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false);
+
+ __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false,
+ false);
+
+ for (fid = fid_begin; fid <= fid_end; fid++)
+ __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid);
+}
+
static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid)
{
@@ -440,74 +610,6 @@ err_port_allow_untagged_set:
return err;
}
-static int mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid)
-{
- char sfmr_pl[MLXSW_REG_SFMR_LEN];
- int err;
-
- mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, fid);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
-
- if (err)
- return err;
-
- set_bit(fid, mlxsw_sp->active_fids);
- return 0;
-}
-
-static void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, u16 fid)
-{
- char sfmr_pl[MLXSW_REG_SFMR_LEN];
-
- clear_bit(fid, mlxsw_sp->active_fids);
-
- mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID,
- fid, fid);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
-}
-
-static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
-{
- enum mlxsw_reg_svfa_mt mt;
-
- if (!list_empty(&mlxsw_sp_port->vports_list))
- mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
- else
- mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
-
- return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, fid, fid);
-}
-
-static int mlxsw_sp_port_fid_unmap(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
-{
- enum mlxsw_reg_svfa_mt mt;
-
- if (list_empty(&mlxsw_sp_port->vports_list))
- return 0;
-
- mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
- return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, fid, fid);
-}
-
-static int mlxsw_sp_port_add_vids(struct net_device *dev, u16 vid_begin,
- u16 vid_end)
-{
- u16 vid;
- int err;
-
- for (vid = vid_begin; vid <= vid_end; vid++) {
- err = mlxsw_sp_port_add_vid(dev, 0, vid);
- if (err)
- goto err_port_add_vid;
- }
- return 0;
-
-err_port_add_vid:
- for (vid--; vid >= vid_begin; vid--)
- mlxsw_sp_port_kill_vid(dev, 0, vid);
- return err;
-}
-
static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid_begin, u16 vid_end, bool is_member,
bool untagged)
@@ -533,57 +635,17 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid_begin, u16 vid_end,
bool flag_untagged, bool flag_pvid)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct net_device *dev = mlxsw_sp_port->dev;
- u16 vid, last_visited_vid, old_pvid;
- enum mlxsw_reg_svfa_mt mt;
+ u16 vid, old_pvid;
int err;
- /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
- * not bridged, then packets ingressing through the port with
- * the specified VIDs will be directed to CPU.
- */
if (!mlxsw_sp_port->bridged)
- return mlxsw_sp_port_add_vids(dev, vid_begin, vid_end);
-
- for (vid = vid_begin; vid <= vid_end; vid++) {
- if (!test_bit(vid, mlxsw_sp->active_fids)) {
- err = mlxsw_sp_fid_create(mlxsw_sp, vid);
- if (err) {
- netdev_err(dev, "Failed to create FID=%d\n",
- vid);
- return err;
- }
-
- /* When creating a FID, we set a VID to FID mapping
- * regardless of the port's mode.
- */
- mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
- err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt,
- true, vid, vid);
- if (err) {
- netdev_err(dev, "Failed to create FID=VID=%d mapping\n",
- vid);
- goto err_port_vid_to_fid_set;
- }
- }
- }
-
- /* Set FID mapping according to port's mode */
- for (vid = vid_begin; vid <= vid_end; vid++) {
- err = mlxsw_sp_port_fid_map(mlxsw_sp_port, vid);
- if (err) {
- netdev_err(dev, "Failed to map FID=%d", vid);
- last_visited_vid = --vid;
- goto err_port_fid_map;
- }
- }
+ return -EINVAL;
- err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
- true, false);
+ err = mlxsw_sp_port_fid_join(mlxsw_sp_port, vid_begin, vid_end);
if (err) {
- netdev_err(dev, "Failed to configure flooding\n");
- goto err_port_flood_set;
+ netdev_err(dev, "Failed to join FIDs\n");
+ return err;
}
err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
@@ -628,10 +690,6 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
-err_port_vid_to_fid_set:
- mlxsw_sp_fid_destroy(mlxsw_sp, vid);
- return err;
-
err_port_stp_state_set:
for (vid = vid_begin; vid <= vid_end; vid++)
clear_bit(vid, mlxsw_sp_port->active_vlans);
@@ -641,13 +699,7 @@ err_port_pvid_set:
__mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false,
false);
err_port_vlans_set:
- __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end, false,
- false);
-err_port_flood_set:
- last_visited_vid = vid_end;
-err_port_fid_map:
- for (vid = last_visited_vid; vid >= vid_begin; vid--)
- mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid);
+ mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
return err;
}
@@ -678,9 +730,10 @@ static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
MLXSW_REG_SFD_OP_WRITE_REMOVE;
}
-static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- const char *mac, u16 fid, bool adding,
- bool dynamic)
+static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ const char *mac, u16 fid, bool adding,
+ enum mlxsw_reg_sfd_rec_action action,
+ bool dynamic)
{
char *sfd_pl;
int err;
@@ -691,14 +744,29 @@ static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
- mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
- local_port);
+ mac, fid, action, local_port);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
kfree(sfd_pl);
return err;
}
+static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ const char *mac, u16 fid, bool adding,
+ bool dynamic)
+{
+ return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
+ MLXSW_REG_SFD_REC_ACTION_NOP, dynamic);
+}
+
+int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
+ bool adding)
+{
+ return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
+ MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
+ false);
+}
+
static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
const char *mac, u16 fid, u16 lag_vid,
bool adding, bool dynamic)
@@ -903,6 +971,11 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev,
SWITCHDEV_OBJ_PORT_VLAN(obj),
trans);
break;
+ case SWITCHDEV_OBJ_ID_IPV4_FIB:
+ err = mlxsw_sp_router_fib4_add(mlxsw_sp_port,
+ SWITCHDEV_OBJ_IPV4_FIB(obj),
+ trans);
+ break;
case SWITCHDEV_OBJ_ID_PORT_FDB:
err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port,
SWITCHDEV_OBJ_PORT_FDB(obj),
@@ -921,34 +994,15 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev,
return err;
}
-static int mlxsw_sp_port_kill_vids(struct net_device *dev, u16 vid_begin,
- u16 vid_end)
-{
- u16 vid;
- int err;
-
- for (vid = vid_begin; vid <= vid_end; vid++) {
- err = mlxsw_sp_port_kill_vid(dev, 0, vid);
- if (err)
- return err;
- }
-
- return 0;
-}
-
static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vid_begin, u16 vid_end, bool init)
+ u16 vid_begin, u16 vid_end)
{
struct net_device *dev = mlxsw_sp_port->dev;
u16 vid, pvid;
int err;
- /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
- * not bridged, then prevent packets ingressing through the
- * port with the specified VIDs from being trapped to CPU.
- */
- if (!init && !mlxsw_sp_port->bridged)
- return mlxsw_sp_port_kill_vids(dev, vid_begin, vid_end);
+ if (!mlxsw_sp_port->bridged)
+ return -EINVAL;
err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
false, false);
@@ -958,9 +1012,6 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
return err;
}
- if (init)
- goto out;
-
pvid = mlxsw_sp_port->pvid;
if (pvid >= vid_begin && pvid <= vid_end) {
err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
@@ -970,23 +1021,8 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
}
}
- err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
- false, false);
- if (err) {
- netdev_err(dev, "Failed to clear flooding\n");
- return err;
- }
+ mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
- for (vid = vid_begin; vid <= vid_end; vid++) {
- /* Remove FID mapping in case of Virtual mode */
- err = mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid);
- if (err) {
- netdev_err(dev, "Failed to unmap FID=%d", vid);
- return err;
- }
- }
-
-out:
/* Changing activity bits only if HW operation succeded */
for (vid = vid_begin; vid <= vid_end; vid++)
clear_bit(vid, mlxsw_sp_port->active_vlans);
@@ -997,8 +1033,8 @@ out:
static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_port_vlan *vlan)
{
- return __mlxsw_sp_port_vlans_del(mlxsw_sp_port,
- vlan->vid_begin, vlan->vid_end, false);
+ return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vlan->vid_begin,
+ vlan->vid_end);
}
void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port)
@@ -1006,7 +1042,7 @@ void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port)
u16 vid;
for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
- __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid, false);
+ __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid);
}
static int
@@ -1081,6 +1117,10 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev,
err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
SWITCHDEV_OBJ_PORT_VLAN(obj));
break;
+ case SWITCHDEV_OBJ_ID_IPV4_FIB:
+ err = mlxsw_sp_router_fib4_del(mlxsw_sp_port,
+ SWITCHDEV_OBJ_IPV4_FIB(obj));
+ break;
case SWITCHDEV_OBJ_ID_PORT_FDB:
err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port,
SWITCHDEV_OBJ_PORT_FDB(obj));
@@ -1118,7 +1158,8 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_port *tmp;
- u16 vport_fid = 0;
+ struct mlxsw_sp_fid *f;
+ u16 vport_fid;
char *sfd_pl;
char mac[ETH_ALEN];
u16 fid;
@@ -1133,12 +1174,8 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
if (!sfd_pl)
return -ENOMEM;
- if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
- u16 tmp;
-
- tmp = mlxsw_sp_vport_vfid_get(mlxsw_sp_port);
- vport_fid = mlxsw_sp_vfid_to_fid(tmp);
- }
+ f = mlxsw_sp_vport_fid_get(mlxsw_sp_port);
+ vport_fid = f ? f->fid : 0;
mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0);
do {
@@ -1310,11 +1347,10 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
}
if (mlxsw_sp_fid_is_vfid(fid)) {
- u16 vfid = mlxsw_sp_fid_to_vfid(fid);
struct mlxsw_sp_port *mlxsw_sp_vport;
- mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_vfid(mlxsw_sp_port,
- vfid);
+ mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
+ fid);
if (!mlxsw_sp_vport) {
netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
goto just_remove;
@@ -1370,11 +1406,10 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
}
if (mlxsw_sp_fid_is_vfid(fid)) {
- u16 vfid = mlxsw_sp_fid_to_vfid(fid);
struct mlxsw_sp_port *mlxsw_sp_vport;
- mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_vfid(mlxsw_sp_port,
- vfid);
+ mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
+ fid);
if (!mlxsw_sp_vport) {
netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
goto just_remove;
@@ -1495,14 +1530,6 @@ static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
cancel_delayed_work_sync(&mlxsw_sp->fdb_notify.dw);
}
-static void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp)
-{
- u16 fid;
-
- for_each_set_bit(fid, mlxsw_sp->active_fids, VLAN_N_VID)
- mlxsw_sp_fid_destroy(mlxsw_sp, fid);
-}
-
int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
{
return mlxsw_sp_fdb_init(mlxsw_sp);
@@ -1511,33 +1538,6 @@ int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
{
mlxsw_sp_fdb_fini(mlxsw_sp);
- mlxsw_sp_fids_fini(mlxsw_sp);
-}
-
-int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port)
-{
- struct net_device *dev = mlxsw_sp_port->dev;
- int err;
-
- /* Allow only untagged packets to ingress and tag them internally
- * with VID 1.
- */
- mlxsw_sp_port->pvid = 1;
- err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID - 1,
- true);
- if (err) {
- netdev_err(dev, "Unable to init VLANs\n");
- return err;
- }
-
- /* Add implicit VLAN interface in the device, so that untagged
- * packets will be classified to the default vFID.
- */
- err = mlxsw_sp_port_add_vid(dev, 0, 1);
- if (err)
- netdev_err(dev, "Failed to configure default vFID\n");
-
- return err;
}
void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 25f658b38..377daa4d5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -1541,6 +1541,7 @@ static struct mlxsw_config_profile mlxsw_sx_config_profile = {
.type = MLXSW_PORT_SWID_TYPE_ETH,
}
},
+ .resource_query_enable = 0,
};
static struct mlxsw_driver mlxsw_sx_driver = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 53a9550be..ed8e30186 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -54,6 +54,15 @@ enum {
MLXSW_TRAP_ID_IGMP_V2_REPORT = 0x32,
MLXSW_TRAP_ID_IGMP_V2_LEAVE = 0x33,
MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34,
+ MLXSW_TRAP_ID_ARPBC = 0x50,
+ MLXSW_TRAP_ID_ARPUC = 0x51,
+ MLXSW_TRAP_ID_MTUERROR = 0x52,
+ MLXSW_TRAP_ID_TTLERROR = 0x53,
+ MLXSW_TRAP_ID_LBERROR = 0x54,
+ MLXSW_TRAP_ID_OSPF = 0x55,
+ MLXSW_TRAP_ID_IP2ME = 0x5F,
+ MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70,
+ MLXSW_TRAP_ID_HOST_MISS_IPV4 = 0x90,
MLXSW_TRAP_ID_MAX = 0x1FF
};
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 2874dffe7..eaa37c079 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -7412,7 +7412,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
((!ring_data->lro) ||
- (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
+ (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG))) &&
(dev->features & NETIF_F_RXCSUM)) {
l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index e744acc18..690635660 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -63,7 +63,7 @@
#define NFP_NET_POLL_TIMEOUT 5
/* Bar allocation */
-#define NFP_NET_CRTL_BAR 0
+#define NFP_NET_CTRL_BAR 0
#define NFP_NET_Q0_BAR 2
#define NFP_NET_Q1_BAR 4 /* OBSOLETE */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index ba26bb356..39dadfca8 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -41,7 +41,6 @@
* Chris Telfer <chris.telfer@netronome.com>
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -1441,10 +1440,6 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
nfp_net_set_hash(nn->netdev, skb, rxd);
- /* Pad small frames to minimum */
- if (skb_put_padto(skb, 60))
- break;
-
/* Stats update */
u64_stats_update_begin(&r_vec->rx_sync);
r_vec->rx_pkts++;
@@ -1845,13 +1840,14 @@ void nfp_net_coalesce_write_cfg(struct nfp_net *nn)
}
/**
- * nfp_net_write_mac_addr() - Write mac address to device registers
+ * nfp_net_write_mac_addr() - Write mac address to the device control BAR
* @nn: NFP Net device to reconfigure
- * @mac: Six-byte MAC address to be written
*
- * We do a bit of byte swapping dance because firmware is LE.
+ * Writes the MAC address from the netdev to the device control BAR. Does not
+ * perform the required reconfig. We do a bit of byte swapping dance because
+ * firmware is LE.
*/
-static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *mac)
+static void nfp_net_write_mac_addr(struct nfp_net *nn)
{
nn_writel(nn, NFP_NET_CFG_MACADDR + 0,
get_unaligned_be32(nn->netdev->dev_addr));
@@ -1952,7 +1948,7 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->num_rx_rings == 64 ?
0xffffffffffffffffULL : ((u64)1 << nn->num_rx_rings) - 1);
- nfp_net_write_mac_addr(nn, nn->netdev->dev_addr);
+ nfp_net_write_mac_addr(nn);
nn_writel(nn, NFP_NET_CFG_MTU, nn->netdev->mtu);
nn_writel(nn, NFP_NET_CFG_FLBUFSZ, nn->fl_bufsz);
@@ -1979,7 +1975,7 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
if (nn->ctrl & NFP_NET_CFG_CTRL_VXLAN) {
memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports));
memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt));
- vxlan_get_rx_port(nn->netdev);
+ udp_tunnel_get_rx_info(nn->netdev);
}
return err;
@@ -2048,12 +2044,16 @@ static int nfp_net_netdev_open(struct net_device *netdev)
nn->rx_rings = kcalloc(nn->num_rx_rings, sizeof(*nn->rx_rings),
GFP_KERNEL);
- if (!nn->rx_rings)
+ if (!nn->rx_rings) {
+ err = -ENOMEM;
goto err_free_lsc;
+ }
nn->tx_rings = kcalloc(nn->num_tx_rings, sizeof(*nn->tx_rings),
GFP_KERNEL);
- if (!nn->tx_rings)
+ if (!nn->tx_rings) {
+ err = -ENOMEM;
goto err_free_rx_rings;
+ }
for (r = 0; r < nn->num_r_vecs; r++) {
err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
@@ -2551,27 +2551,33 @@ static int nfp_net_find_vxlan_idx(struct nfp_net *nn, __be16 port)
}
static void nfp_net_add_vxlan_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
+ struct udp_tunnel_info *ti)
{
struct nfp_net *nn = netdev_priv(netdev);
int idx;
- idx = nfp_net_find_vxlan_idx(nn, port);
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ return;
+
+ idx = nfp_net_find_vxlan_idx(nn, ti->port);
if (idx == -ENOSPC)
return;
if (!nn->vxlan_usecnt[idx]++)
- nfp_net_set_vxlan_port(nn, idx, port);
+ nfp_net_set_vxlan_port(nn, idx, ti->port);
}
static void nfp_net_del_vxlan_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
+ struct udp_tunnel_info *ti)
{
struct nfp_net *nn = netdev_priv(netdev);
int idx;
- idx = nfp_net_find_vxlan_idx(nn, port);
- if (!nn->vxlan_usecnt[idx] || idx == -ENOSPC)
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ return;
+
+ idx = nfp_net_find_vxlan_idx(nn, ti->port);
+ if (idx == -ENOSPC || !nn->vxlan_usecnt[idx])
return;
if (!--nn->vxlan_usecnt[idx])
@@ -2589,8 +2595,8 @@ static const struct net_device_ops nfp_net_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_set_features = nfp_net_set_features,
.ndo_features_check = nfp_net_features_check,
- .ndo_add_vxlan_port = nfp_net_add_vxlan_port,
- .ndo_del_vxlan_port = nfp_net_del_vxlan_port,
+ .ndo_udp_tunnel_add = nfp_net_add_vxlan_port,
+ .ndo_udp_tunnel_del = nfp_net_del_vxlan_port,
};
/**
@@ -2733,7 +2739,7 @@ int nfp_net_netdev_init(struct net_device *netdev)
nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);
- nfp_net_write_mac_addr(nn, nn->netdev->dev_addr);
+ nfp_net_write_mac_addr(nn);
/* Set default MTU and Freelist buffer size */
if (nn->max_mtu < NFP_NET_DEFAULT_MTU)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index ccfef1f17..4c9897220 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -40,7 +40,6 @@
* Brad Petrus <brad.petrus@netronome.com>
*/
-#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -605,6 +604,7 @@ static int nfp_net_set_coalesce(struct net_device *netdev,
static const struct ethtool_ops nfp_net_ethtool_ops = {
.get_drvinfo = nfp_net_get_drvinfo,
+ .get_link = ethtool_op_get_link,
.get_ringparam = nfp_net_get_ringparam,
.set_ringparam = nfp_net_set_ringparam,
.get_strings = nfp_net_get_strings,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index e2b22b8a2..f7062cb64 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -38,7 +38,6 @@
* Rolf Neugebauer <rolf.neugebauer@netronome.com>
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -124,17 +123,17 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
* first NFP_NET_CFG_BAR_SZ of the BAR. This keeps the code
* the identical for PF and VF drivers.
*/
- ctrl_bar = ioremap_nocache(pci_resource_start(pdev, NFP_NET_CRTL_BAR),
+ ctrl_bar = ioremap_nocache(pci_resource_start(pdev, NFP_NET_CTRL_BAR),
NFP_NET_CFG_BAR_SZ);
if (!ctrl_bar) {
dev_err(&pdev->dev,
- "Failed to map resource %d\n", NFP_NET_CRTL_BAR);
+ "Failed to map resource %d\n", NFP_NET_CTRL_BAR);
err = -EIO;
goto err_pci_regions;
}
nfp_net_get_fw_version(&fw_ver, ctrl_bar);
- if (fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
+ if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
dev_err(&pdev->dev, "Unknown Firmware ABI %d.%d.%d.%d\n",
fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor);
err = -EINVAL;
@@ -142,9 +141,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
}
/* Determine stride */
- if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 0) ||
- nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1) ||
- nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0x12, 0x48)) {
+ if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) {
stride = 2;
tx_bar_no = NFP_NET_Q0_BAR;
rx_bar_no = NFP_NET_Q1_BAR;
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index b1ce7aaa8..8e13ec84c 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -425,7 +425,6 @@ struct netdata_local {
unsigned int last_tx_idx;
unsigned int num_used_tx_buffs;
struct mii_bus *mii_bus;
- struct phy_device *phy_dev;
struct clk *clk;
dma_addr_t dma_buff_base_p;
void *dma_buff_base_v;
@@ -476,14 +475,6 @@ static void __lpc_get_mac(struct netdata_local *pldat, u8 *mac)
mac[5] = tmp >> 8;
}
-static void __lpc_eth_clock_enable(struct netdata_local *pldat, bool enable)
-{
- if (enable)
- clk_prepare_enable(pldat->clk);
- else
- clk_disable_unprepare(pldat->clk);
-}
-
static void __lpc_params_setup(struct netdata_local *pldat)
{
u32 tmp;
@@ -750,7 +741,7 @@ static int lpc_mdio_reset(struct mii_bus *bus)
static void lpc_handle_link_change(struct net_device *ndev)
{
struct netdata_local *pldat = netdev_priv(ndev);
- struct phy_device *phydev = pldat->phy_dev;
+ struct phy_device *phydev = ndev->phydev;
unsigned long flags;
bool status_change = false;
@@ -814,7 +805,6 @@ static int lpc_mii_probe(struct net_device *ndev)
pldat->link = 0;
pldat->speed = 0;
pldat->duplex = -1;
- pldat->phy_dev = phydev;
phy_attached_info(phydev);
@@ -1048,8 +1038,8 @@ static int lpc_eth_close(struct net_device *ndev)
napi_disable(&pldat->napi);
netif_stop_queue(ndev);
- if (pldat->phy_dev)
- phy_stop(pldat->phy_dev);
+ if (ndev->phydev)
+ phy_stop(ndev->phydev);
spin_lock_irqsave(&pldat->lock, flags);
__lpc_eth_reset(pldat);
@@ -1058,7 +1048,7 @@ static int lpc_eth_close(struct net_device *ndev)
writel(0, LPC_ENET_MAC2(pldat->net_base));
spin_unlock_irqrestore(&pldat->lock, flags);
- __lpc_eth_clock_enable(pldat, false);
+ clk_disable_unprepare(pldat->clk);
return 0;
}
@@ -1185,8 +1175,7 @@ static void lpc_eth_set_multicast_list(struct net_device *ndev)
static int lpc_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
{
- struct netdata_local *pldat = netdev_priv(ndev);
- struct phy_device *phydev = pldat->phy_dev;
+ struct phy_device *phydev = ndev->phydev;
if (!netif_running(ndev))
return -EINVAL;
@@ -1200,21 +1189,24 @@ static int lpc_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
static int lpc_eth_open(struct net_device *ndev)
{
struct netdata_local *pldat = netdev_priv(ndev);
+ int ret;
if (netif_msg_ifup(pldat))
dev_dbg(&pldat->pdev->dev, "enabling %s\n", ndev->name);
- __lpc_eth_clock_enable(pldat, true);
+ ret = clk_prepare_enable(pldat->clk);
+ if (ret)
+ return ret;
/* Suspended PHY makes LPC ethernet core block, so resume now */
- phy_resume(pldat->phy_dev);
+ phy_resume(ndev->phydev);
/* Reset and initialize */
__lpc_eth_reset(pldat);
__lpc_eth_init(pldat);
/* schedule a link state check */
- phy_start(pldat->phy_dev);
+ phy_start(ndev->phydev);
netif_start_queue(ndev);
napi_enable(&pldat->napi);
@@ -1247,37 +1239,13 @@ static void lpc_eth_ethtool_setmsglevel(struct net_device *ndev, u32 level)
pldat->msg_enable = level;
}
-static int lpc_eth_ethtool_getsettings(struct net_device *ndev,
- struct ethtool_cmd *cmd)
-{
- struct netdata_local *pldat = netdev_priv(ndev);
- struct phy_device *phydev = pldat->phy_dev;
-
- if (!phydev)
- return -EOPNOTSUPP;
-
- return phy_ethtool_gset(phydev, cmd);
-}
-
-static int lpc_eth_ethtool_setsettings(struct net_device *ndev,
- struct ethtool_cmd *cmd)
-{
- struct netdata_local *pldat = netdev_priv(ndev);
- struct phy_device *phydev = pldat->phy_dev;
-
- if (!phydev)
- return -EOPNOTSUPP;
-
- return phy_ethtool_sset(phydev, cmd);
-}
-
static const struct ethtool_ops lpc_eth_ethtool_ops = {
.get_drvinfo = lpc_eth_ethtool_getdrvinfo,
- .get_settings = lpc_eth_ethtool_getsettings,
- .set_settings = lpc_eth_ethtool_setsettings,
.get_msglevel = lpc_eth_ethtool_getmsglevel,
.set_msglevel = lpc_eth_ethtool_setmsglevel,
.get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static const struct net_device_ops lpc_netdev_ops = {
@@ -1347,7 +1315,9 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
}
/* Enable network clock */
- __lpc_eth_clock_enable(pldat, true);
+ ret = clk_prepare_enable(pldat->clk);
+ if (ret)
+ goto err_out_clk_put;
/* Map IO space */
pldat->net_base = ioremap(res->start, resource_size(res));
@@ -1460,7 +1430,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
netdev_info(ndev, "LPC mac at 0x%08x irq %d\n",
res->start, ndev->irq);
- phydev = pldat->phy_dev;
+ phydev = ndev->phydev;
device_init_wakeup(&pdev->dev, 1);
device_set_wakeup_enable(&pdev->dev, 0);
@@ -1481,6 +1451,7 @@ err_out_iounmap:
iounmap(pldat->net_base);
err_out_disable_clocks:
clk_disable_unprepare(pldat->clk);
+err_out_clk_put:
clk_put(pldat->clk);
err_out_free_dev:
free_netdev(ndev);
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index af54df52a..2f4a837f0 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -989,7 +989,7 @@ static void pasemi_adjust_link(struct net_device *dev)
unsigned int flags;
unsigned int new_flags;
- if (!mac->phydev->link) {
+ if (!dev->phydev->link) {
/* If no link, MAC speed settings don't matter. Just report
* link down and return.
*/
@@ -1010,10 +1010,10 @@ static void pasemi_adjust_link(struct net_device *dev)
new_flags = flags & ~(PAS_MAC_CFG_PCFG_HD | PAS_MAC_CFG_PCFG_SPD_M |
PAS_MAC_CFG_PCFG_TSR_M);
- if (!mac->phydev->duplex)
+ if (!dev->phydev->duplex)
new_flags |= PAS_MAC_CFG_PCFG_HD;
- switch (mac->phydev->speed) {
+ switch (dev->phydev->speed) {
case 1000:
new_flags |= PAS_MAC_CFG_PCFG_SPD_1G |
PAS_MAC_CFG_PCFG_TSR_1G;
@@ -1027,15 +1027,15 @@ static void pasemi_adjust_link(struct net_device *dev)
PAS_MAC_CFG_PCFG_TSR_10M;
break;
default:
- printk("Unsupported speed %d\n", mac->phydev->speed);
+ printk("Unsupported speed %d\n", dev->phydev->speed);
}
/* Print on link or speed/duplex change */
- msg = mac->link != mac->phydev->link || flags != new_flags;
+ msg = mac->link != dev->phydev->link || flags != new_flags;
- mac->duplex = mac->phydev->duplex;
- mac->speed = mac->phydev->speed;
- mac->link = mac->phydev->link;
+ mac->duplex = dev->phydev->duplex;
+ mac->speed = dev->phydev->speed;
+ mac->link = dev->phydev->link;
if (new_flags != flags)
write_mac_reg(mac, PAS_MAC_CFG_PCFG, new_flags);
@@ -1067,8 +1067,6 @@ static int pasemi_mac_phy_init(struct net_device *dev)
return -ENODEV;
}
- mac->phydev = phydev;
-
return 0;
}
@@ -1198,8 +1196,8 @@ static int pasemi_mac_open(struct net_device *dev)
goto out_rx_int;
}
- if (mac->phydev)
- phy_start(mac->phydev);
+ if (dev->phydev)
+ phy_start(dev->phydev);
setup_timer(&mac->tx->clean_timer, pasemi_mac_tx_timer,
(unsigned long)mac->tx);
@@ -1293,9 +1291,9 @@ static int pasemi_mac_close(struct net_device *dev)
rxch = rx_ring(mac)->chan.chno;
txch = tx_ring(mac)->chan.chno;
- if (mac->phydev) {
- phy_stop(mac->phydev);
- phy_disconnect(mac->phydev);
+ if (dev->phydev) {
+ phy_stop(dev->phydev);
+ phy_disconnect(dev->phydev);
}
del_timer_sync(&mac->tx->clean_timer);
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.h b/drivers/net/ethernet/pasemi/pasemi_mac.h
index 161c99a98..7c47e263b 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.h
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.h
@@ -70,7 +70,6 @@ struct pasemi_mac {
struct pci_dev *pdev;
struct pci_dev *dma_pdev;
struct pci_dev *iob_pdev;
- struct phy_device *phydev;
struct napi_struct napi;
int bufsz; /* RX ring buffer size */
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c b/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c
index f046bfc18..d0afc2b8f 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c
@@ -62,32 +62,6 @@ static struct {
{ "tx-1024-1518-byte-packets" },
};
-static int
-pasemi_mac_ethtool_get_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
-{
- struct pasemi_mac *mac = netdev_priv(netdev);
- struct phy_device *phydev = mac->phydev;
-
- if (!phydev)
- return -EOPNOTSUPP;
-
- return phy_ethtool_gset(phydev, cmd);
-}
-
-static int
-pasemi_mac_ethtool_set_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
-{
- struct pasemi_mac *mac = netdev_priv(netdev);
- struct phy_device *phydev = mac->phydev;
-
- if (!phydev)
- return -EOPNOTSUPP;
-
- return phy_ethtool_sset(phydev, cmd);
-}
-
static u32
pasemi_mac_ethtool_get_msglevel(struct net_device *netdev)
{
@@ -145,8 +119,6 @@ static void pasemi_mac_get_strings(struct net_device *netdev, u32 stringset,
}
const struct ethtool_ops pasemi_mac_ethtool_ops = {
- .get_settings = pasemi_mac_ethtool_get_settings,
- .set_settings = pasemi_mac_ethtool_set_settings,
.get_msglevel = pasemi_mac_ethtool_get_msglevel,
.set_msglevel = pasemi_mac_ethtool_set_msglevel,
.get_link = ethtool_op_get_link,
@@ -154,5 +126,7 @@ const struct ethtool_ops pasemi_mac_ethtool_ops = {
.get_strings = pasemi_mac_get_strings,
.get_sset_count = pasemi_mac_get_sset_count,
.get_ethtool_stats = pasemi_mac_get_ethtool_stats,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index 680d8c736..6ba484068 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -54,16 +54,6 @@ config QLCNIC_DCB
mode of DCB is supported. PG and PFC values are related only
to Tx.
-config QLCNIC_VXLAN
- bool "Virtual eXtensible Local Area Network (VXLAN) offload support"
- default n
- depends on QLCNIC && VXLAN && !(QLCNIC=y && VXLAN=m)
- ---help---
- This enables hardware offload support for VXLAN protocol over QLogic's
- 84XX series adapters.
- Say Y here if you want to enable hardware offload support for
- Virtual eXtensible Local Area Network (VXLAN) in the driver.
-
config QLCNIC_HWMON
bool "QLOGIC QLCNIC 82XX and 83XX family HWMON support"
depends on QLCNIC && HWMON && !(QLCNIC=y && HWMON=m)
@@ -114,24 +104,4 @@ config QEDE
---help---
This enables the support for ...
-config QEDE_VXLAN
- bool "Virtual eXtensible Local Area Network support"
- default n
- depends on QEDE && VXLAN && !(QEDE=y && VXLAN=m)
- ---help---
- This enables hardware offload support for VXLAN protocol over
- qede module. Say Y here if you want to enable hardware offload
- support for Virtual eXtensible Local Area Network (VXLAN)
- in the driver.
-
-config QEDE_GENEVE
- bool "Generic Network Virtualization Encapsulation (GENEVE) support"
- depends on QEDE && GENEVE && !(QEDE=y && GENEVE=m)
- ---help---
- This allows one to create GENEVE virtual interfaces that provide
- Layer 2 Networks over Layer 3 Networks. GENEVE is often used
- to tunnel virtual network infrastructure in virtualized environments.
- Say Y here if you want to enable hardware offload support for
- Generic Network Virtualization Encapsulation (GENEVE) in the driver.
-
endif # NET_VENDOR_QLOGIC
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 1042f2af8..45ab74676 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -127,6 +127,8 @@ struct qed_tunn_update_params {
*/
enum qed_pci_personality {
QED_PCI_ETH,
+ QED_PCI_ISCSI,
+ QED_PCI_ETH_ROCE,
QED_PCI_DEFAULT /* default in shmem */
};
@@ -170,6 +172,8 @@ enum QED_PORT_MODE {
enum qed_dev_cap {
QED_DEV_CAP_ETH,
+ QED_DEV_CAP_ISCSI,
+ QED_DEV_CAP_ROCE,
};
struct qed_hw_info {
@@ -183,6 +187,8 @@ struct qed_hw_info {
#define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc])
#define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc])
+#define RESC_END(_p_hwfn, resc) (RESC_START(_p_hwfn, resc) + \
+ RESC_NUM(_p_hwfn, resc))
#define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])
u8 num_tc;
@@ -255,6 +261,7 @@ struct qed_qm_info {
u8 pure_lb_pq;
u8 offload_pq;
u8 pure_ack_pq;
+ u8 ooo_pq;
u8 vf_queues_offset;
u16 num_pqs;
u16 num_vf_pqs;
@@ -267,6 +274,7 @@ struct qed_qm_info {
u8 pf_wfq;
u32 pf_rl;
struct qed_wfq_data *wfq_data;
+ u8 num_pf_rls;
};
struct storm_stats {
@@ -312,6 +320,7 @@ struct qed_hwfn {
bool hw_init_done;
u8 num_funcs_on_engine;
+ u8 enabled_func_idx;
/* BAR access */
void __iomem *regview;
@@ -350,6 +359,9 @@ struct qed_hwfn {
/* Protocol related */
struct qed_pf_params pf_params;
+ bool b_rdma_enabled_in_prs;
+ u32 rdma_prs_search_reg;
+
/* Array of sb_info of all status blocks */
struct qed_sb_info *sbs_info[MAX_SB_PER_PF_MIMD];
u16 num_sbs;
@@ -477,8 +489,8 @@ struct qed_dev {
u32 int_mode;
enum qed_coalescing_mode int_coalescing_mode;
- u8 rx_coalesce_usecs;
- u8 tx_coalesce_usecs;
+ u16 rx_coalesce_usecs;
+ u16 tx_coalesce_usecs;
/* Start Bar offset of first hwfn */
void __iomem *regview;
@@ -549,12 +561,22 @@ struct qed_dev {
static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
u32 concrete_fid)
{
+ u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID);
u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
+ u8 vf_valid = GET_FIELD(concrete_fid,
+ PXP_CONCRETE_FID_VFVALID);
+ u8 sw_fid;
+
+ if (vf_valid)
+ sw_fid = vfid + MAX_NUM_PFS;
+ else
+ sw_fid = pfid;
- return pfid;
+ return sw_fid;
}
#define PURE_LB_TC 8
+#define OOO_LB_TC 9
int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index ac284c58d..1c35f3761 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -39,6 +39,14 @@
#define DQ_RANGE_SHIFT 4
#define DQ_RANGE_ALIGN BIT(DQ_RANGE_SHIFT)
+/* Searcher constants */
+#define SRC_MIN_NUM_ELEMS 256
+
+/* Timers constants */
+#define TM_SHIFT 7
+#define TM_ALIGN BIT(TM_SHIFT)
+#define TM_ELEM_SIZE 4
+
/* ILT constants */
#define ILT_DEFAULT_HW_P_SIZE 3
#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
@@ -56,26 +64,71 @@
union conn_context {
struct core_conn_context core_ctx;
struct eth_conn_context eth_ctx;
+ struct iscsi_conn_context iscsi_ctx;
+ struct roce_conn_context roce_ctx;
+};
+
+/* TYPE-0 task context - iSCSI */
+union type0_task_context {
+ struct iscsi_task_context iscsi_ctx;
};
+/* TYPE-1 task context - ROCE */
+union type1_task_context {
+ struct rdma_task_context roce_ctx;
+};
+
+struct src_ent {
+ u8 opaque[56];
+ u64 next;
+};
+
+#define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */
+#define CDUT_SEG_ALIGNMET_IN_BYTES (1 << (CDUT_SEG_ALIGNMET + 12))
+
#define CONN_CXT_SIZE(p_hwfn) \
ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
+#define SRQ_CXT_SIZE (sizeof(struct rdma_srq_context))
+
+#define TYPE0_TASK_CXT_SIZE(p_hwfn) \
+ ALIGNED_TYPE_SIZE(union type0_task_context, p_hwfn)
+
+/* Alignment is inherent to the type1_task_context structure */
+#define TYPE1_TASK_CXT_SIZE(p_hwfn) sizeof(union type1_task_context)
+
/* PF per protocl configuration object */
+#define TASK_SEGMENTS (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS)
+#define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS)
+
+struct qed_tid_seg {
+ u32 count;
+ u8 type;
+ bool has_fl_mem;
+};
+
struct qed_conn_type_cfg {
u32 cid_count;
u32 cid_start;
u32 cids_per_vf;
+ struct qed_tid_seg tid_seg[TASK_SEGMENTS];
};
/* ILT Client configuration, Per connection type (protocol) resources. */
#define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
#define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2)
#define CDUC_BLK (0)
+#define SRQ_BLK (0)
+#define CDUT_SEG_BLK(n) (1 + (u8)(n))
+#define CDUT_FL_SEG_BLK(n, X) (1 + (n) + NUM_TASK_ ## X ## _SEGMENTS)
enum ilt_clients {
ILT_CLI_CDUC,
+ ILT_CLI_CDUT,
ILT_CLI_QM,
+ ILT_CLI_TM,
+ ILT_CLI_SRC,
+ ILT_CLI_TSDM,
ILT_CLI_MAX
};
@@ -88,6 +141,7 @@ struct qed_ilt_cli_blk {
u32 total_size; /* 0 means not active */
u32 real_size_in_page;
u32 start_line;
+ u32 dynamic_line_cnt;
};
struct qed_ilt_client_cfg {
@@ -131,18 +185,44 @@ struct qed_cxt_mngr {
/* computed ILT structure */
struct qed_ilt_client_cfg clients[ILT_CLI_MAX];
+ /* Task type sizes */
+ u32 task_type_size[NUM_TASK_TYPES];
+
/* total number of VFs for this hwfn -
* ALL VFs are symmetric in terms of HW resources
*/
u32 vf_count;
+ /* total number of SRQ's for this hwfn */
+ u32 srq_count;
+
/* Acquired CIDs */
struct qed_cid_acquired_map acquired[MAX_CONN_TYPES];
/* ILT shadow table */
struct qed_dma_mem *ilt_shadow;
u32 pf_start_line;
+
+ /* Mutex for a dynamic ILT allocation */
+ struct mutex mutex;
+
+ /* SRC T2 */
+ struct qed_dma_mem *t2;
+ u32 t2_num_pages;
+ u64 first_free;
+ u64 last_free;
};
+static bool src_proto(enum protocol_type type)
+{
+ return type == PROTOCOLID_ISCSI ||
+ type == PROTOCOLID_ROCE;
+}
+
+static bool tm_cid_proto(enum protocol_type type)
+{
+ return type == PROTOCOLID_ISCSI ||
+ type == PROTOCOLID_ROCE;
+}
/* counts the iids for the CDU/CDUC ILT client configuration */
struct qed_cdu_iids {
@@ -161,21 +241,120 @@ static void qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr,
}
}
+/* counts the iids for the Searcher block configuration */
+struct qed_src_iids {
+ u32 pf_cids;
+ u32 per_vf_cids;
+};
+
+static void qed_cxt_src_iids(struct qed_cxt_mngr *p_mngr,
+ struct qed_src_iids *iids)
+{
+ u32 i;
+
+ for (i = 0; i < MAX_CONN_TYPES; i++) {
+ if (!src_proto(i))
+ continue;
+
+ iids->pf_cids += p_mngr->conn_cfg[i].cid_count;
+ iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf;
+ }
+}
+
+/* counts the iids for the Timers block configuration */
+struct qed_tm_iids {
+ u32 pf_cids;
+ u32 pf_tids[NUM_TASK_PF_SEGMENTS]; /* per segment */
+ u32 pf_tids_total;
+ u32 per_vf_cids;
+ u32 per_vf_tids;
+};
+
+static void qed_cxt_tm_iids(struct qed_cxt_mngr *p_mngr,
+ struct qed_tm_iids *iids)
+{
+ u32 i, j;
+
+ for (i = 0; i < MAX_CONN_TYPES; i++) {
+ struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i];
+
+ if (tm_cid_proto(i)) {
+ iids->pf_cids += p_cfg->cid_count;
+ iids->per_vf_cids += p_cfg->cids_per_vf;
+ }
+ }
+
+ iids->pf_cids = roundup(iids->pf_cids, TM_ALIGN);
+ iids->per_vf_cids = roundup(iids->per_vf_cids, TM_ALIGN);
+ iids->per_vf_tids = roundup(iids->per_vf_tids, TM_ALIGN);
+
+ for (iids->pf_tids_total = 0, j = 0; j < NUM_TASK_PF_SEGMENTS; j++) {
+ iids->pf_tids[j] = roundup(iids->pf_tids[j], TM_ALIGN);
+ iids->pf_tids_total += iids->pf_tids[j];
+ }
+}
+
static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
struct qed_qm_iids *iids)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
- u32 vf_cids = 0, type;
+ struct qed_tid_seg *segs;
+ u32 vf_cids = 0, type, j;
+ u32 vf_tids = 0;
for (type = 0; type < MAX_CONN_TYPES; type++) {
iids->cids += p_mngr->conn_cfg[type].cid_count;
vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
+
+ segs = p_mngr->conn_cfg[type].tid_seg;
+ /* for each segment there is at most one
+ * protocol for which count is not 0.
+ */
+ for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
+ iids->tids += segs[j].count;
+
+ /* The last array elelment is for the VFs. As for PF
+ * segments there can be only one protocol for
+ * which this value is not 0.
+ */
+ vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
}
iids->vf_cids += vf_cids * p_mngr->vf_count;
+ iids->tids += vf_tids * p_mngr->vf_count;
+
DP_VERBOSE(p_hwfn, QED_MSG_ILT,
- "iids: CIDS %08x vf_cids %08x\n",
- iids->cids, iids->vf_cids);
+ "iids: CIDS %08x vf_cids %08x tids %08x vf_tids %08x\n",
+ iids->cids, iids->vf_cids, iids->tids, vf_tids);
+}
+
+static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn,
+ u32 seg)
+{
+ struct qed_cxt_mngr *p_cfg = p_hwfn->p_cxt_mngr;
+ u32 i;
+
+ /* Find the protocol with tid count > 0 for this segment.
+ * Note: there can only be one and this is already validated.
+ */
+ for (i = 0; i < MAX_CONN_TYPES; i++)
+ if (p_cfg->conn_cfg[i].tid_seg[seg].count)
+ return &p_cfg->conn_cfg[i].tid_seg[seg];
+ return NULL;
+}
+
+void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs)
+{
+ struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
+
+ p_mgr->srq_count = num_srqs;
+}
+
+u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
+
+ return p_mgr->srq_count;
}
/* set the iids count per protocol */
@@ -188,6 +367,14 @@ static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN);
p_conn->cids_per_vf = roundup(vf_cid_cnt, DQ_RANGE_ALIGN);
+
+ if (type == PROTOCOLID_ROCE) {
+ u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val;
+ u32 cxt_size = CONN_CXT_SIZE(p_hwfn);
+ u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+
+ p_conn->cid_count = roundup(p_conn->cid_count, elems_per_page);
+ }
}
u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
@@ -200,6 +387,37 @@ u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
}
+u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn,
+ enum protocol_type type)
+{
+ return p_hwfn->p_cxt_mngr->acquired[type].start_cid;
+}
+
+u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
+ enum protocol_type type)
+{
+ u32 cnt = 0;
+ int i;
+
+ for (i = 0; i < TASK_SEGMENTS; i++)
+ cnt += p_hwfn->p_cxt_mngr->conn_cfg[type].tid_seg[i].count;
+
+ return cnt;
+}
+
+static void
+qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn,
+ enum protocol_type proto,
+ u8 seg, u8 seg_type, u32 count, bool has_fl)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct qed_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg];
+
+ p_seg->count = count;
+ p_seg->has_fl_mem = has_fl;
+ p_seg->type = seg_type;
+}
+
static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
struct qed_ilt_cli_blk *p_blk,
u32 start_line, u32 total_size,
@@ -241,17 +459,42 @@ static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn,
p_blk->real_size_in_page, p_blk->start_line);
}
+static u32 qed_ilt_get_dynamic_line_cnt(struct qed_hwfn *p_hwfn,
+ enum ilt_clients ilt_client)
+{
+ u32 cid_count = p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE].cid_count;
+ struct qed_ilt_client_cfg *p_cli;
+ u32 lines_to_skip = 0;
+ u32 cxts_per_p;
+
+ if (ilt_client == ILT_CLI_CDUC) {
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+
+ cxts_per_p = ILT_PAGE_IN_BYTES(p_cli->p_size.val) /
+ (u32) CONN_CXT_SIZE(p_hwfn);
+
+ lines_to_skip = cid_count / cxts_per_p;
+ }
+
+ return lines_to_skip;
+}
+
int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 curr_line, total, i, task_size, line;
struct qed_ilt_client_cfg *p_cli;
struct qed_ilt_cli_blk *p_blk;
struct qed_cdu_iids cdu_iids;
+ struct qed_src_iids src_iids;
struct qed_qm_iids qm_iids;
- u32 curr_line, total, i;
+ struct qed_tm_iids tm_iids;
+ struct qed_tid_seg *p_seg;
memset(&qm_iids, 0, sizeof(qm_iids));
memset(&cdu_iids, 0, sizeof(cdu_iids));
+ memset(&src_iids, 0, sizeof(src_iids));
+ memset(&tm_iids, 0, sizeof(tm_iids));
p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
@@ -279,6 +522,9 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
p_cli->pf_total_lines = curr_line - p_blk->start_line;
+ p_blk->dynamic_line_cnt = qed_ilt_get_dynamic_line_cnt(p_hwfn,
+ ILT_CLI_CDUC);
+
/* CDUC VF */
p_blk = &p_cli->vf_blks[CDUC_BLK];
total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);
@@ -293,21 +539,128 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
ILT_CLI_CDUC);
+ /* CDUT PF */
+ p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+ p_cli->first.val = curr_line;
+
+ /* first the 'working' task memory */
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+ p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
+ if (!p_seg || p_seg->count == 0)
+ continue;
+
+ p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)];
+ total = p_seg->count * p_mngr->task_type_size[p_seg->type];
+ qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total,
+ p_mngr->task_type_size[p_seg->type]);
+
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUT);
+ }
+
+ /* next the 'init' task memory (forced load memory) */
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+ p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
+ if (!p_seg || p_seg->count == 0)
+ continue;
+
+ p_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)];
+
+ if (!p_seg->has_fl_mem) {
+ /* The segment is active (total size pf 'working'
+ * memory is > 0) but has no FL (forced-load, Init)
+ * memory. Thus:
+ *
+ * 1. The total-size in the corrsponding FL block of
+ * the ILT client is set to 0 - No ILT line are
+ * provisioned and no ILT memory allocated.
+ *
+ * 2. The start-line of said block is set to the
+ * start line of the matching working memory
+ * block in the ILT client. This is later used to
+ * configure the CDU segment offset registers and
+ * results in an FL command for TIDs of this
+ * segement behaves as regular load commands
+ * (loading TIDs from the working memory).
+ */
+ line = p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line;
+
+ qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
+ continue;
+ }
+ total = p_seg->count * p_mngr->task_type_size[p_seg->type];
+
+ qed_ilt_cli_blk_fill(p_cli, p_blk,
+ curr_line, total,
+ p_mngr->task_type_size[p_seg->type]);
+
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUT);
+ }
+ p_cli->pf_total_lines = curr_line - p_cli->pf_blks[0].start_line;
+
+ /* CDUT VF */
+ p_seg = qed_cxt_tid_seg_info(p_hwfn, TASK_SEGMENT_VF);
+ if (p_seg && p_seg->count) {
+ /* Stricly speaking we need to iterate over all VF
+ * task segment types, but a VF has only 1 segment
+ */
+
+ /* 'working' memory */
+ total = p_seg->count * p_mngr->task_type_size[p_seg->type];
+
+ p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
+ qed_ilt_cli_blk_fill(p_cli, p_blk,
+ curr_line, total,
+ p_mngr->task_type_size[p_seg->type]);
+
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUT);
+
+ /* 'init' memory */
+ p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
+ if (!p_seg->has_fl_mem) {
+ /* see comment above */
+ line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line;
+ qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
+ } else {
+ task_size = p_mngr->task_type_size[p_seg->type];
+ qed_ilt_cli_blk_fill(p_cli, p_blk,
+ curr_line, total, task_size);
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUT);
+ }
+ p_cli->vf_total_lines = curr_line -
+ p_cli->vf_blks[0].start_line;
+
+ /* Now for the rest of the VFs */
+ for (i = 1; i < p_mngr->vf_count; i++) {
+ p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUT);
+
+ p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUT);
+ }
+ }
+
/* QM */
p_cli = &p_mngr->clients[ILT_CLI_QM];
p_blk = &p_cli->pf_blks[0];
qed_cxt_qm_iids(p_hwfn, &qm_iids);
total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids,
- qm_iids.vf_cids, 0,
+ qm_iids.vf_cids, qm_iids.tids,
p_hwfn->qm_info.num_pqs,
p_hwfn->qm_info.num_vf_pqs);
DP_VERBOSE(p_hwfn,
QED_MSG_ILT,
- "QM ILT Info, (cids=%d, vf_cids=%d, num_pqs=%d, num_vf_pqs=%d, memory_size=%d)\n",
+ "QM ILT Info, (cids=%d, vf_cids=%d, tids=%d, num_pqs=%d, num_vf_pqs=%d, memory_size=%d)\n",
qm_iids.cids,
qm_iids.vf_cids,
+ qm_iids.tids,
p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total);
qed_ilt_cli_blk_fill(p_cli, p_blk,
@@ -317,6 +670,75 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM);
p_cli->pf_total_lines = curr_line - p_blk->start_line;
+ /* SRC */
+ p_cli = &p_mngr->clients[ILT_CLI_SRC];
+ qed_cxt_src_iids(p_mngr, &src_iids);
+
+ /* Both the PF and VFs searcher connections are stored in the per PF
+ * database. Thus sum the PF searcher cids and all the VFs searcher
+ * cids.
+ */
+ total = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
+ if (total) {
+ u32 local_max = max_t(u32, total,
+ SRC_MIN_NUM_ELEMS);
+
+ total = roundup_pow_of_two(local_max);
+
+ p_blk = &p_cli->pf_blks[0];
+ qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total * sizeof(struct src_ent),
+ sizeof(struct src_ent));
+
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_SRC);
+ p_cli->pf_total_lines = curr_line - p_blk->start_line;
+ }
+
+ /* TM PF */
+ p_cli = &p_mngr->clients[ILT_CLI_TM];
+ qed_cxt_tm_iids(p_mngr, &tm_iids);
+ total = tm_iids.pf_cids + tm_iids.pf_tids_total;
+ if (total) {
+ p_blk = &p_cli->pf_blks[0];
+ qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total * TM_ELEM_SIZE, TM_ELEM_SIZE);
+
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_TM);
+ p_cli->pf_total_lines = curr_line - p_blk->start_line;
+ }
+
+ /* TM VF */
+ total = tm_iids.per_vf_cids + tm_iids.per_vf_tids;
+ if (total) {
+ p_blk = &p_cli->vf_blks[0];
+ qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total * TM_ELEM_SIZE, TM_ELEM_SIZE);
+
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_TM);
+ p_cli->pf_total_lines = curr_line - p_blk->start_line;
+
+ for (i = 1; i < p_mngr->vf_count; i++)
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_TM);
+ }
+
+ /* TSDM (SRQ CONTEXT) */
+ total = qed_cxt_get_srq_count(p_hwfn);
+
+ if (total) {
+ p_cli = &p_mngr->clients[ILT_CLI_TSDM];
+ p_blk = &p_cli->pf_blks[SRQ_BLK];
+ qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total * SRQ_CXT_SIZE, SRQ_CXT_SIZE);
+
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_TSDM);
+ p_cli->pf_total_lines = curr_line - p_blk->start_line;
+ }
+
if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
RESC_NUM(p_hwfn, QED_ILT)) {
DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n",
@@ -327,8 +749,122 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
return 0;
}
+static void qed_cxt_src_t2_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 i;
+
+ if (!p_mngr->t2)
+ return;
+
+ for (i = 0; i < p_mngr->t2_num_pages; i++)
+ if (p_mngr->t2[i].p_virt)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ p_mngr->t2[i].size,
+ p_mngr->t2[i].p_virt,
+ p_mngr->t2[i].p_phys);
+
+ kfree(p_mngr->t2);
+ p_mngr->t2 = NULL;
+}
+
+static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 conn_num, total_size, ent_per_page, psz, i;
+ struct qed_ilt_client_cfg *p_src;
+ struct qed_src_iids src_iids;
+ struct qed_dma_mem *p_t2;
+ int rc;
+
+ memset(&src_iids, 0, sizeof(src_iids));
+
+ /* if the SRC ILT client is inactive - there are no connection
+ * requiring the searcer, leave.
+ */
+ p_src = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_SRC];
+ if (!p_src->active)
+ return 0;
+
+ qed_cxt_src_iids(p_mngr, &src_iids);
+ conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
+ total_size = conn_num * sizeof(struct src_ent);
+
+ /* use the same page size as the SRC ILT client */
+ psz = ILT_PAGE_IN_BYTES(p_src->p_size.val);
+ p_mngr->t2_num_pages = DIV_ROUND_UP(total_size, psz);
+
+ /* allocate t2 */
+ p_mngr->t2 = kzalloc(p_mngr->t2_num_pages * sizeof(struct qed_dma_mem),
+ GFP_KERNEL);
+ if (!p_mngr->t2) {
+ DP_NOTICE(p_hwfn, "Failed to allocate t2 table\n");
+ rc = -ENOMEM;
+ goto t2_fail;
+ }
+
+ /* allocate t2 pages */
+ for (i = 0; i < p_mngr->t2_num_pages; i++) {
+ u32 size = min_t(u32, total_size, psz);
+ void **p_virt = &p_mngr->t2[i].p_virt;
+
+ *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ size,
+ &p_mngr->t2[i].p_phys, GFP_KERNEL);
+ if (!p_mngr->t2[i].p_virt) {
+ rc = -ENOMEM;
+ goto t2_fail;
+ }
+ memset(*p_virt, 0, size);
+ p_mngr->t2[i].size = size;
+ total_size -= size;
+ }
+
+ /* Set the t2 pointers */
+
+ /* entries per page - must be a power of two */
+ ent_per_page = psz / sizeof(struct src_ent);
+
+ p_mngr->first_free = (u64) p_mngr->t2[0].p_phys;
+
+ p_t2 = &p_mngr->t2[(conn_num - 1) / ent_per_page];
+ p_mngr->last_free = (u64) p_t2->p_phys +
+ ((conn_num - 1) & (ent_per_page - 1)) * sizeof(struct src_ent);
+
+ for (i = 0; i < p_mngr->t2_num_pages; i++) {
+ u32 ent_num = min_t(u32,
+ ent_per_page,
+ conn_num);
+ struct src_ent *entries = p_mngr->t2[i].p_virt;
+ u64 p_ent_phys = (u64) p_mngr->t2[i].p_phys, val;
+ u32 j;
+
+ for (j = 0; j < ent_num - 1; j++) {
+ val = p_ent_phys + (j + 1) * sizeof(struct src_ent);
+ entries[j].next = cpu_to_be64(val);
+ }
+
+ if (i < p_mngr->t2_num_pages - 1)
+ val = (u64) p_mngr->t2[i + 1].p_phys;
+ else
+ val = 0;
+ entries[j].next = cpu_to_be64(val);
+
+ conn_num -= ent_num;
+ }
+
+ return 0;
+
+t2_fail:
+ qed_cxt_src_t2_free(p_hwfn);
+ return rc;
+}
+
#define for_each_ilt_valid_client(pos, clients) \
- for (pos = 0; pos < ILT_CLI_MAX; pos++)
+ for (pos = 0; pos < ILT_CLI_MAX; pos++) \
+ if (!clients[pos].active) { \
+ continue; \
+ } else \
/* Total number of ILT lines used by this PF */
static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients)
@@ -336,12 +872,8 @@ static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients)
u32 size = 0;
u32 i;
- for_each_ilt_valid_client(i, ilt_clients) {
- if (!ilt_clients[i].active)
- continue;
- size += (ilt_clients[i].last.val -
- ilt_clients[i].first.val + 1);
- }
+ for_each_ilt_valid_client(i, ilt_clients)
+ size += (ilt_clients[i].last.val - ilt_clients[i].first.val + 1);
return size;
}
@@ -372,15 +904,22 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
u32 start_line_offset)
{
struct qed_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
- u32 lines, line, sz_left;
+ u32 lines, line, sz_left, lines_to_skip = 0;
+
+ /* Special handling for RoCE that supports dynamic allocation */
+ if ((p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) &&
+ ((ilt_client == ILT_CLI_CDUT) || ilt_client == ILT_CLI_TSDM))
+ return 0;
+
+ lines_to_skip = p_blk->dynamic_line_cnt;
if (!p_blk->total_size)
return 0;
sz_left = p_blk->total_size;
- lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page);
+ lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) - lines_to_skip;
line = p_blk->start_line + start_line_offset -
- p_hwfn->p_cxt_mngr->pf_start_line;
+ p_hwfn->p_cxt_mngr->pf_start_line + lines_to_skip;
for (; lines; lines--) {
dma_addr_t p_phys;
@@ -434,8 +973,6 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
(u32)(size * sizeof(struct qed_dma_mem)));
for_each_ilt_valid_client(i, clients) {
- if (!clients[i].active)
- continue;
for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
p_blk = &clients[i].pf_blks[j];
rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
@@ -514,6 +1051,7 @@ cid_map_fail:
int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
{
+ struct qed_ilt_client_cfg *clients;
struct qed_cxt_mngr *p_mngr;
u32 i;
@@ -524,20 +1062,42 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
}
/* Initialize ILT client registers */
- p_mngr->clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
- p_mngr->clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
- p_mngr->clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
-
- p_mngr->clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
- p_mngr->clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
- p_mngr->clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);
-
+ clients = p_mngr->clients;
+ clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
+ clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
+ clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
+
+ clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
+ clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
+ clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);
+
+ clients[ILT_CLI_TM].first.reg = ILT_CFG_REG(TM, FIRST_ILT);
+ clients[ILT_CLI_TM].last.reg = ILT_CFG_REG(TM, LAST_ILT);
+ clients[ILT_CLI_TM].p_size.reg = ILT_CFG_REG(TM, P_SIZE);
+
+ clients[ILT_CLI_SRC].first.reg = ILT_CFG_REG(SRC, FIRST_ILT);
+ clients[ILT_CLI_SRC].last.reg = ILT_CFG_REG(SRC, LAST_ILT);
+ clients[ILT_CLI_SRC].p_size.reg = ILT_CFG_REG(SRC, P_SIZE);
+
+ clients[ILT_CLI_CDUT].first.reg = ILT_CFG_REG(CDUT, FIRST_ILT);
+ clients[ILT_CLI_CDUT].last.reg = ILT_CFG_REG(CDUT, LAST_ILT);
+ clients[ILT_CLI_CDUT].p_size.reg = ILT_CFG_REG(CDUT, P_SIZE);
+
+ clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT);
+ clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
+ clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
/* default ILT page size for all clients is 32K */
for (i = 0; i < ILT_CLI_MAX; i++)
p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
+ /* Initialize task sizes */
+ p_mngr->task_type_size[0] = TYPE0_TASK_CXT_SIZE(p_hwfn);
+ p_mngr->task_type_size[1] = TYPE1_TASK_CXT_SIZE(p_hwfn);
+
if (p_hwfn->cdev->p_iov_info)
p_mngr->vf_count = p_hwfn->cdev->p_iov_info->total_vfs;
+ /* Initialize the dynamic ILT allocation mutex */
+ mutex_init(&p_mngr->mutex);
/* Set the cxt mangr pointer priori to further allocations */
p_hwfn->p_cxt_mngr = p_mngr;
@@ -556,6 +1116,13 @@ int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn)
goto tables_alloc_fail;
}
+ /* Allocate the T2 table */
+ rc = qed_cxt_src_t2_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to allocate T2 memory\n");
+ goto tables_alloc_fail;
+ }
+
/* Allocate and initialize the acquired cids bitmaps */
rc = qed_cid_map_alloc(p_hwfn);
if (rc) {
@@ -576,6 +1143,7 @@ void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn)
return;
qed_cid_map_free(p_hwfn);
+ qed_cxt_src_t2_free(p_hwfn);
qed_ilt_shadow_free(p_hwfn);
kfree(p_hwfn->p_cxt_mngr);
@@ -620,6 +1188,48 @@ void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn)
#define CDUC_NCIB_MASK \
(CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT)
+#define CDUT_TYPE0_CXT_SIZE_SHIFT \
+ CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT
+
+#define CDUT_TYPE0_CXT_SIZE_MASK \
+ (CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE >> \
+ CDUT_TYPE0_CXT_SIZE_SHIFT)
+
+#define CDUT_TYPE0_BLOCK_WASTE_SHIFT \
+ CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT
+
+#define CDUT_TYPE0_BLOCK_WASTE_MASK \
+ (CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE >> \
+ CDUT_TYPE0_BLOCK_WASTE_SHIFT)
+
+#define CDUT_TYPE0_NCIB_SHIFT \
+ CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT
+
+#define CDUT_TYPE0_NCIB_MASK \
+ (CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK >> \
+ CDUT_TYPE0_NCIB_SHIFT)
+
+#define CDUT_TYPE1_CXT_SIZE_SHIFT \
+ CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT
+
+#define CDUT_TYPE1_CXT_SIZE_MASK \
+ (CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE >> \
+ CDUT_TYPE1_CXT_SIZE_SHIFT)
+
+#define CDUT_TYPE1_BLOCK_WASTE_SHIFT \
+ CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT
+
+#define CDUT_TYPE1_BLOCK_WASTE_MASK \
+ (CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE >> \
+ CDUT_TYPE1_BLOCK_WASTE_SHIFT)
+
+#define CDUT_TYPE1_NCIB_SHIFT \
+ CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT
+
+#define CDUT_TYPE1_NCIB_MASK \
+ (CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK >> \
+ CDUT_TYPE1_NCIB_SHIFT)
+
static void qed_cdu_init_common(struct qed_hwfn *p_hwfn)
{
u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0;
@@ -634,6 +1244,92 @@ static void qed_cdu_init_common(struct qed_hwfn *p_hwfn)
SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste);
SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page);
STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params);
+
+ /* CDUT - type-0 tasks configuration */
+ page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT].p_size.val;
+ cxt_size = p_hwfn->p_cxt_mngr->task_type_size[0];
+ elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+ block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
+
+ /* cxt size and block-waste are multipes of 8 */
+ cdu_params = 0;
+ SET_FIELD(cdu_params, CDUT_TYPE0_CXT_SIZE, (cxt_size >> 3));
+ SET_FIELD(cdu_params, CDUT_TYPE0_BLOCK_WASTE, (block_waste >> 3));
+ SET_FIELD(cdu_params, CDUT_TYPE0_NCIB, elems_per_page);
+ STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT0_PARAMS_RT_OFFSET, cdu_params);
+
+ /* CDUT - type-1 tasks configuration */
+ cxt_size = p_hwfn->p_cxt_mngr->task_type_size[1];
+ elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+ block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
+
+ /* cxt size and block-waste are multipes of 8 */
+ cdu_params = 0;
+ SET_FIELD(cdu_params, CDUT_TYPE1_CXT_SIZE, (cxt_size >> 3));
+ SET_FIELD(cdu_params, CDUT_TYPE1_BLOCK_WASTE, (block_waste >> 3));
+ SET_FIELD(cdu_params, CDUT_TYPE1_NCIB, elems_per_page);
+ STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT1_PARAMS_RT_OFFSET, cdu_params);
+}
+
+/* CDU PF */
+#define CDU_SEG_REG_TYPE_SHIFT CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT
+#define CDU_SEG_REG_TYPE_MASK 0x1
+#define CDU_SEG_REG_OFFSET_SHIFT 0
+#define CDU_SEG_REG_OFFSET_MASK CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK
+
+static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *p_cli;
+ struct qed_tid_seg *p_seg;
+ u32 cdu_seg_params, offset;
+ int i;
+
+ static const u32 rt_type_offset_arr[] = {
+ CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET,
+ CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET,
+ CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET,
+ CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET
+ };
+
+ static const u32 rt_type_offset_fl_arr[] = {
+ CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET,
+ CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET,
+ CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET,
+ CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET
+ };
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+
+ /* There are initializations only for CDUT during pf Phase */
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+ /* Segment 0 */
+ p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
+ if (!p_seg)
+ continue;
+
+ /* Note: start_line is already adjusted for the CDU
+ * segment register granularity, so we just need to
+ * divide. Adjustment is implicit as we assume ILT
+ * Page size is larger than 32K!
+ */
+ offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
+ (p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line -
+ p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
+
+ cdu_seg_params = 0;
+ SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
+ SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
+ STORE_RT_REG(p_hwfn, rt_type_offset_arr[i], cdu_seg_params);
+
+ offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
+ (p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)].start_line -
+ p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
+
+ cdu_seg_params = 0;
+ SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
+ SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
+ STORE_RT_REG(p_hwfn, rt_type_offset_fl_arr[i], cdu_seg_params);
+ }
}
void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
@@ -742,14 +1438,11 @@ static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn)
ilt_clients = p_hwfn->p_cxt_mngr->clients;
for_each_ilt_valid_client(i, ilt_clients) {
- if (!ilt_clients[i].active)
- continue;
STORE_RT_REG(p_hwfn,
ilt_clients[i].first.reg,
ilt_clients[i].first.val);
STORE_RT_REG(p_hwfn,
- ilt_clients[i].last.reg,
- ilt_clients[i].last.val);
+ ilt_clients[i].last.reg, ilt_clients[i].last.val);
STORE_RT_REG(p_hwfn,
ilt_clients[i].p_size.reg,
ilt_clients[i].p_size.val);
@@ -786,6 +1479,33 @@ static void qed_ilt_vf_bounds_init(struct qed_hwfn *p_hwfn)
PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET,
p_cli->vf_total_lines);
}
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+ blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
+ if (p_cli->active) {
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET,
+ blk_factor);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
+ p_cli->pf_total_lines);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET,
+ p_cli->vf_total_lines);
+ }
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TM];
+ blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
+ if (p_cli->active) {
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET, blk_factor);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
+ p_cli->pf_total_lines);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET,
+ p_cli->vf_total_lines);
+ }
}
/* ILT (PSWRQ2) PF */
@@ -804,9 +1524,6 @@ static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
clients = p_hwfn->p_cxt_mngr->clients;
for_each_ilt_valid_client(i, clients) {
- if (!clients[i].active)
- continue;
-
/** Client's 1st val and RT array are absolute, ILT shadows'
* lines are relative.
*/
@@ -837,6 +1554,137 @@ static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
}
}
+/* SRC (Searcher) PF */
+static void qed_src_init_pf(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 rounded_conn_num, conn_num, conn_max;
+ struct qed_src_iids src_iids;
+
+ memset(&src_iids, 0, sizeof(src_iids));
+ qed_cxt_src_iids(p_mngr, &src_iids);
+ conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
+ if (!conn_num)
+ return;
+
+ conn_max = max_t(u32, conn_num, SRC_MIN_NUM_ELEMS);
+ rounded_conn_num = roundup_pow_of_two(conn_max);
+
+ STORE_RT_REG(p_hwfn, SRC_REG_COUNTFREE_RT_OFFSET, conn_num);
+ STORE_RT_REG(p_hwfn, SRC_REG_NUMBER_HASH_BITS_RT_OFFSET,
+ ilog2(rounded_conn_num));
+
+ STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET,
+ p_hwfn->p_cxt_mngr->first_free);
+ STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET,
+ p_hwfn->p_cxt_mngr->last_free);
+}
+
+/* Timers PF */
+#define TM_CFG_NUM_IDS_SHIFT 0
+#define TM_CFG_NUM_IDS_MASK 0xFFFFULL
+#define TM_CFG_PRE_SCAN_OFFSET_SHIFT 16
+#define TM_CFG_PRE_SCAN_OFFSET_MASK 0x1FFULL
+#define TM_CFG_PARENT_PF_SHIFT 25
+#define TM_CFG_PARENT_PF_MASK 0x7ULL
+
+#define TM_CFG_CID_PRE_SCAN_ROWS_SHIFT 30
+#define TM_CFG_CID_PRE_SCAN_ROWS_MASK 0x1FFULL
+
+#define TM_CFG_TID_OFFSET_SHIFT 30
+#define TM_CFG_TID_OFFSET_MASK 0x7FFFFULL
+#define TM_CFG_TID_PRE_SCAN_ROWS_SHIFT 49
+#define TM_CFG_TID_PRE_SCAN_ROWS_MASK 0x1FFULL
+
+static void qed_tm_init_pf(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 active_seg_mask = 0, tm_offset, rt_reg;
+ struct qed_tm_iids tm_iids;
+ u64 cfg_word;
+ u8 i;
+
+ memset(&tm_iids, 0, sizeof(tm_iids));
+ qed_cxt_tm_iids(p_mngr, &tm_iids);
+
+ /* @@@TBD No pre-scan for now */
+
+ /* Note: We assume consecutive VFs for a PF */
+ for (i = 0; i < p_mngr->vf_count; i++) {
+ cfg_word = 0;
+ SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids);
+ SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
+ SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
+ SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0);
+ rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
+ (sizeof(cfg_word) / sizeof(u32)) *
+ (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i);
+ STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+ }
+
+ cfg_word = 0;
+ SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_cids);
+ SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
+ SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0); /* n/a for PF */
+ SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all */
+
+ rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
+ (sizeof(cfg_word) / sizeof(u32)) *
+ (NUM_OF_VFS(p_hwfn->cdev) + p_hwfn->rel_pf_id);
+ STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+
+ /* enale scan */
+ STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_CONN_RT_OFFSET,
+ tm_iids.pf_cids ? 0x1 : 0x0);
+
+ /* @@@TBD how to enable the scan for the VFs */
+
+ tm_offset = tm_iids.per_vf_cids;
+
+ /* Note: We assume consecutive VFs for a PF */
+ for (i = 0; i < p_mngr->vf_count; i++) {
+ cfg_word = 0;
+ SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_tids);
+ SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
+ SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
+ SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
+ SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0);
+
+ rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
+ (sizeof(cfg_word) / sizeof(u32)) *
+ (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i);
+
+ STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+ }
+
+ tm_offset = tm_iids.pf_cids;
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+ cfg_word = 0;
+ SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_tids[i]);
+ SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
+ SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0);
+ SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
+ SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0);
+
+ rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
+ (sizeof(cfg_word) / sizeof(u32)) *
+ (NUM_OF_VFS(p_hwfn->cdev) +
+ p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i);
+
+ STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+ active_seg_mask |= (tm_iids.pf_tids[i] ? (1 << i) : 0);
+
+ tm_offset += tm_iids.pf_tids[i];
+ }
+
+ if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE)
+ active_seg_mask = 0;
+
+ STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask);
+
+ /* @@@TBD how to enable the scan for the VFs */
+}
+
void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
{
qed_cdu_init_common(p_hwfn);
@@ -847,7 +1695,10 @@ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn)
qed_qm_init_pf(p_hwfn);
qed_cm_init_pf(p_hwfn);
qed_dq_init_pf(p_hwfn);
+ qed_cdu_init_pf(p_hwfn);
qed_ilt_init_pf(p_hwfn);
+ qed_src_init_pf(p_hwfn);
+ qed_tm_init_pf(p_hwfn);
}
int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
@@ -968,17 +1819,439 @@ int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
return 0;
}
-int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
+void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_pf_params *p_params)
{
- struct qed_eth_pf_params *p_params = &p_hwfn->pf_params.eth_pf_params;
+ u32 num_cons, num_tasks, num_qps, num_mrs, num_srqs;
+ enum protocol_type proto;
+
+ num_mrs = min_t(u32, RDMA_MAX_TIDS, p_params->num_mrs);
+ num_tasks = num_mrs; /* each mr uses a single task id */
+ num_srqs = min_t(u32, 32 * 1024, p_params->num_srqs);
+
+ switch (p_hwfn->hw_info.personality) {
+ case QED_PCI_ETH_ROCE:
+ num_qps = min_t(u32, ROCE_MAX_QPS, p_params->num_qps);
+ num_cons = num_qps * 2; /* each QP requires two connections */
+ proto = PROTOCOLID_ROCE;
+ break;
+ default:
+ return;
+ }
+
+ if (num_cons && num_tasks) {
+ qed_cxt_set_proto_cid_count(p_hwfn, proto, num_cons, 0);
+
+ /* Deliberatly passing ROCE for tasks id. This is because
+ * iWARP / RoCE share the task id.
+ */
+ qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_ROCE,
+ QED_CXT_ROCE_TID_SEG, 1,
+ num_tasks, false);
+ qed_cxt_set_srq_count(p_hwfn, num_srqs);
+ } else {
+ DP_INFO(p_hwfn->cdev,
+ "RDMA personality used without setting params!\n");
+ }
+}
+int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
+{
/* Set the number of required CORE connections */
u32 core_cids = 1; /* SPQ */
qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
- qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
- p_params->num_cons, 1);
+ switch (p_hwfn->hw_info.personality) {
+ case QED_PCI_ETH_ROCE:
+ {
+ qed_rdma_set_pf_params(p_hwfn,
+ &p_hwfn->
+ pf_params.rdma_pf_params);
+ /* no need for break since RoCE coexist with Ethernet */
+ }
+ case QED_PCI_ETH:
+ {
+ struct qed_eth_pf_params *p_params =
+ &p_hwfn->pf_params.eth_pf_params;
+
+ qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
+ p_params->num_cons, 1);
+ break;
+ }
+ case QED_PCI_ISCSI:
+ {
+ struct qed_iscsi_pf_params *p_params;
+
+ p_params = &p_hwfn->pf_params.iscsi_pf_params;
+
+ if (p_params->num_cons && p_params->num_tasks) {
+ qed_cxt_set_proto_cid_count(p_hwfn,
+ PROTOCOLID_ISCSI,
+ p_params->num_cons,
+ 0);
+
+ qed_cxt_set_proto_tid_count(p_hwfn,
+ PROTOCOLID_ISCSI,
+ QED_CXT_ISCSI_TID_SEG,
+ 0,
+ p_params->num_tasks,
+ true);
+ } else {
+ DP_INFO(p_hwfn->cdev,
+ "Iscsi personality used without setting params!\n");
+ }
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
+ struct qed_tid_mem *p_info)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 proto, seg, total_lines, i, shadow_line;
+ struct qed_ilt_client_cfg *p_cli;
+ struct qed_ilt_cli_blk *p_fl_seg;
+ struct qed_tid_seg *p_seg_info;
+
+ /* Verify the personality */
+ switch (p_hwfn->hw_info.personality) {
+ case QED_PCI_ISCSI:
+ proto = PROTOCOLID_ISCSI;
+ seg = QED_CXT_ISCSI_TID_SEG;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+ if (!p_cli->active)
+ return -EINVAL;
+
+ p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
+ if (!p_seg_info->has_fl_mem)
+ return -EINVAL;
+
+ p_fl_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
+ total_lines = DIV_ROUND_UP(p_fl_seg->total_size,
+ p_fl_seg->real_size_in_page);
+
+ for (i = 0; i < total_lines; i++) {
+ shadow_line = i + p_fl_seg->start_line -
+ p_hwfn->p_cxt_mngr->pf_start_line;
+ p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].p_virt;
+ }
+ p_info->waste = ILT_PAGE_IN_BYTES(p_cli->p_size.val) -
+ p_fl_seg->real_size_in_page;
+ p_info->tid_size = p_mngr->task_type_size[p_seg_info->type];
+ p_info->num_tids_per_block = p_fl_seg->real_size_in_page /
+ p_info->tid_size;
+
+ return 0;
+}
+
+/* This function is very RoCE oriented, if another protocol in the future
+ * will want this feature we'll need to modify the function to be more generic
+ */
+int
+qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
+ enum qed_cxt_elem_type elem_type, u32 iid)
+{
+ u32 reg_offset, shadow_line, elem_size, hw_p_size, elems_per_p, line;
+ struct qed_ilt_client_cfg *p_cli;
+ struct qed_ilt_cli_blk *p_blk;
+ struct qed_ptt *p_ptt;
+ dma_addr_t p_phys;
+ u64 ilt_hw_entry;
+ void *p_virt;
+ int rc = 0;
+
+ switch (elem_type) {
+ case QED_ELEM_CXT:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+ elem_size = CONN_CXT_SIZE(p_hwfn);
+ p_blk = &p_cli->pf_blks[CDUC_BLK];
+ break;
+ case QED_ELEM_SRQ:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
+ elem_size = SRQ_CXT_SIZE;
+ p_blk = &p_cli->pf_blks[SRQ_BLK];
+ break;
+ case QED_ELEM_TASK:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+ elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
+ p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)];
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type);
+ return -EINVAL;
+ }
+
+ /* Calculate line in ilt */
+ hw_p_size = p_cli->p_size.val;
+ elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
+ line = p_blk->start_line + (iid / elems_per_p);
+ shadow_line = line - p_hwfn->p_cxt_mngr->pf_start_line;
+
+ /* If line is already allocated, do nothing, otherwise allocate it and
+ * write it to the PSWRQ2 registers.
+ * This section can be run in parallel from different contexts and thus
+ * a mutex protection is needed.
+ */
+
+ mutex_lock(&p_hwfn->p_cxt_mngr->mutex);
+
+ if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt)
+ goto out0;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_NOTICE(p_hwfn,
+ "QED_TIME_OUT on ptt acquire - dynamic allocation");
+ rc = -EBUSY;
+ goto out0;
+ }
+
+ p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ p_blk->real_size_in_page,
+ &p_phys, GFP_KERNEL);
+ if (!p_virt) {
+ rc = -ENOMEM;
+ goto out1;
+ }
+ memset(p_virt, 0, p_blk->real_size_in_page);
+
+ /* configuration of refTagMask to 0xF is required for RoCE DIF MR only,
+ * to compensate for a HW bug, but it is configured even if DIF is not
+ * enabled. This is harmless and allows us to avoid a dedicated API. We
+ * configure the field for all of the contexts on the newly allocated
+ * page.
+ */
+ if (elem_type == QED_ELEM_TASK) {
+ u32 elem_i;
+ u8 *elem_start = (u8 *)p_virt;
+ union type1_task_context *elem;
+
+ for (elem_i = 0; elem_i < elems_per_p; elem_i++) {
+ elem = (union type1_task_context *)elem_start;
+ SET_FIELD(elem->roce_ctx.tdif_context.flags1,
+ TDIF_TASK_CONTEXT_REFTAGMASK, 0xf);
+ elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn);
+ }
+ }
+
+ p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt = p_virt;
+ p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys = p_phys;
+ p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].size =
+ p_blk->real_size_in_page;
+
+ /* compute absolute offset */
+ reg_offset = PSWRQ2_REG_ILT_MEMORY +
+ (line * ILT_REG_SIZE_IN_BYTES * ILT_ENTRY_IN_REGS);
+
+ ilt_hw_entry = 0;
+ SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
+ SET_FIELD(ilt_hw_entry,
+ ILT_ENTRY_PHY_ADDR,
+ (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys >> 12));
+
+ /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */
+ qed_dmae_host2grc(p_hwfn, p_ptt, (u64) (uintptr_t)&ilt_hw_entry,
+ reg_offset, sizeof(ilt_hw_entry) / sizeof(u32), 0);
+
+ if (elem_type == QED_ELEM_CXT) {
+ u32 last_cid_allocated = (1 + (iid / elems_per_p)) *
+ elems_per_p;
+
+ /* Update the relevant register in the parser */
+ qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF,
+ last_cid_allocated - 1);
+
+ if (!p_hwfn->b_rdma_enabled_in_prs) {
+ /* Enable RoCE search */
+ qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
+ p_hwfn->b_rdma_enabled_in_prs = true;
+ }
+ }
+
+out1:
+ qed_ptt_release(p_hwfn, p_ptt);
+out0:
+ mutex_unlock(&p_hwfn->p_cxt_mngr->mutex);
+
+ return rc;
+}
+
+/* This function is very RoCE oriented, if another protocol in the future
+ * will want this feature we'll need to modify the function to be more generic
+ */
+static int
+qed_cxt_free_ilt_range(struct qed_hwfn *p_hwfn,
+ enum qed_cxt_elem_type elem_type,
+ u32 start_iid, u32 count)
+{
+ u32 start_line, end_line, shadow_start_line, shadow_end_line;
+ u32 reg_offset, elem_size, hw_p_size, elems_per_p;
+ struct qed_ilt_client_cfg *p_cli;
+ struct qed_ilt_cli_blk *p_blk;
+ u32 end_iid = start_iid + count;
+ struct qed_ptt *p_ptt;
+ u64 ilt_hw_entry = 0;
+ u32 i;
+
+ switch (elem_type) {
+ case QED_ELEM_CXT:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+ elem_size = CONN_CXT_SIZE(p_hwfn);
+ p_blk = &p_cli->pf_blks[CDUC_BLK];
+ break;
+ case QED_ELEM_SRQ:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
+ elem_size = SRQ_CXT_SIZE;
+ p_blk = &p_cli->pf_blks[SRQ_BLK];
+ break;
+ case QED_ELEM_TASK:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+ elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
+ p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)];
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type);
+ return -EINVAL;
+ }
+
+ /* Calculate line in ilt */
+ hw_p_size = p_cli->p_size.val;
+ elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
+ start_line = p_blk->start_line + (start_iid / elems_per_p);
+ end_line = p_blk->start_line + (end_iid / elems_per_p);
+ if (((end_iid + 1) / elems_per_p) != (end_iid / elems_per_p))
+ end_line--;
+
+ shadow_start_line = start_line - p_hwfn->p_cxt_mngr->pf_start_line;
+ shadow_end_line = end_line - p_hwfn->p_cxt_mngr->pf_start_line;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_NOTICE(p_hwfn,
+ "QED_TIME_OUT on ptt acquire - dynamic allocation");
+ return -EBUSY;
+ }
+
+ for (i = shadow_start_line; i < shadow_end_line; i++) {
+ if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt)
+ continue;
+
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].size,
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt,
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys);
+
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt = NULL;
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys = 0;
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0;
+
+ /* compute absolute offset */
+ reg_offset = PSWRQ2_REG_ILT_MEMORY +
+ ((start_line++) * ILT_REG_SIZE_IN_BYTES *
+ ILT_ENTRY_IN_REGS);
+
+ /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a
+ * wide-bus.
+ */
+ qed_dmae_host2grc(p_hwfn, p_ptt,
+ (u64) (uintptr_t) &ilt_hw_entry,
+ reg_offset,
+ sizeof(ilt_hw_entry) / sizeof(u32),
+ 0);
+ }
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return 0;
+}
+
+int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto)
+{
+ int rc;
+ u32 cid;
+
+ /* Free Connection CXT */
+ rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_CXT,
+ qed_cxt_get_proto_cid_start(p_hwfn,
+ proto),
+ qed_cxt_get_proto_cid_count(p_hwfn,
+ proto, &cid));
+
+ if (rc)
+ return rc;
+
+ /* Free Task CXT */
+ rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_TASK, 0,
+ qed_cxt_get_proto_tid_count(p_hwfn, proto));
+ if (rc)
+ return rc;
+
+ /* Free TSDM CXT */
+ rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_SRQ, 0,
+ qed_cxt_get_srq_count(p_hwfn));
+
+ return rc;
+}
+
+int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn,
+ u32 tid, u8 ctx_type, void **pp_task_ctx)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct qed_ilt_client_cfg *p_cli;
+ struct qed_ilt_cli_blk *p_seg;
+ struct qed_tid_seg *p_seg_info;
+ u32 proto, seg;
+ u32 total_lines;
+ u32 tid_size, ilt_idx;
+ u32 num_tids_per_block;
+
+ /* Verify the personality */
+ switch (p_hwfn->hw_info.personality) {
+ case QED_PCI_ISCSI:
+ proto = PROTOCOLID_ISCSI;
+ seg = QED_CXT_ISCSI_TID_SEG;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+ if (!p_cli->active)
+ return -EINVAL;
+
+ p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
+
+ if (ctx_type == QED_CTX_WORKING_MEM) {
+ p_seg = &p_cli->pf_blks[CDUT_SEG_BLK(seg)];
+ } else if (ctx_type == QED_CTX_FL_MEM) {
+ if (!p_seg_info->has_fl_mem)
+ return -EINVAL;
+ p_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
+ } else {
+ return -EINVAL;
+ }
+ total_lines = DIV_ROUND_UP(p_seg->total_size, p_seg->real_size_in_page);
+ tid_size = p_mngr->task_type_size[p_seg_info->type];
+ num_tids_per_block = p_seg->real_size_in_page / tid_size;
+
+ if (total_lines < tid / num_tids_per_block)
+ return -EINVAL;
+
+ ilt_idx = tid / num_tids_per_block + p_seg->start_line -
+ p_mngr->pf_start_line;
+ *pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].p_virt +
+ (tid % num_tids_per_block) * tid_size;
return 0;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
index 234c0fa8d..c6f6f2e81 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -21,6 +21,14 @@ struct qed_cxt_info {
enum protocol_type type;
};
+#define MAX_TID_BLOCKS 512
+struct qed_tid_mem {
+ u32 tid_size;
+ u32 num_tids_per_block;
+ u32 waste;
+ u8 *blocks[MAX_TID_BLOCKS]; /* 4K */
+};
+
/**
* @brief qed_cxt_acquire - Acquire a new cid of a specific protocol type
*
@@ -46,8 +54,22 @@ int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
struct qed_cxt_info *p_info);
+/**
+ * @brief qed_cxt_get_tid_mem_info
+ *
+ * @param p_hwfn
+ * @param p_info
+ *
+ * @return int
+ */
+int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
+ struct qed_tid_mem *p_info);
+
+#define QED_CXT_ISCSI_TID_SEG PROTOCOLID_ISCSI
+#define QED_CXT_ROCE_TID_SEG PROTOCOLID_ROCE
enum qed_cxt_elem_type {
QED_ELEM_CXT,
+ QED_ELEM_SRQ,
QED_ELEM_TASK
};
@@ -149,4 +171,6 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
u32 cid);
+#define QED_CTX_WORKING_MEM 0
+#define QED_CTX_FL_MEM 1
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index 21ec1c2df..3656d2fd6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -9,6 +9,7 @@
#include <linux/types.h>
#include <asm/byteorder.h>
#include <linux/bitops.h>
+#include <linux/dcbnl.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -18,6 +19,10 @@
#include "qed_dcbx.h"
#include "qed_hsi.h"
#include "qed_sp.h"
+#include "qed_sriov.h"
+#ifdef CONFIG_DCB
+#include <linux/qed/qed_eth_if.h>
+#endif
#define QED_DCBX_MAX_MIB_READ_TRY (100)
#define QED_ETH_TYPE_DEFAULT (0)
@@ -48,40 +53,94 @@ static bool qed_dcbx_app_ethtype(u32 app_info_bitmap)
DCBX_APP_SF_ETHTYPE);
}
+static bool qed_dcbx_ieee_app_ethtype(u32 app_info_bitmap)
+{
+ u8 mfw_val = QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE);
+
+ /* Old MFW */
+ if (mfw_val == DCBX_APP_SF_IEEE_RESERVED)
+ return qed_dcbx_app_ethtype(app_info_bitmap);
+
+ return !!(mfw_val == DCBX_APP_SF_IEEE_ETHTYPE);
+}
+
static bool qed_dcbx_app_port(u32 app_info_bitmap)
{
return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
DCBX_APP_SF_PORT);
}
-static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id)
+static bool qed_dcbx_ieee_app_port(u32 app_info_bitmap, u8 type)
+{
+ u8 mfw_val = QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE);
+
+ /* Old MFW */
+ if (mfw_val == DCBX_APP_SF_IEEE_RESERVED)
+ return qed_dcbx_app_port(app_info_bitmap);
+
+ return !!(mfw_val == type || mfw_val == DCBX_APP_SF_IEEE_TCP_UDP_PORT);
+}
+
+static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
{
- return !!(qed_dcbx_app_ethtype(app_info_bitmap) &&
- proto_id == QED_ETH_TYPE_DEFAULT);
+ bool ethtype;
+
+ if (ieee)
+ ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap);
+ else
+ ethtype = qed_dcbx_app_ethtype(app_info_bitmap);
+
+ return !!(ethtype && (proto_id == QED_ETH_TYPE_DEFAULT));
}
-static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id)
+static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
{
- return !!(qed_dcbx_app_port(app_info_bitmap) &&
- proto_id == QED_TCP_PORT_ISCSI);
+ bool port;
+
+ if (ieee)
+ port = qed_dcbx_ieee_app_port(app_info_bitmap,
+ DCBX_APP_SF_IEEE_TCP_PORT);
+ else
+ port = qed_dcbx_app_port(app_info_bitmap);
+
+ return !!(port && (proto_id == QED_TCP_PORT_ISCSI));
}
-static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id)
+static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
{
- return !!(qed_dcbx_app_ethtype(app_info_bitmap) &&
- proto_id == QED_ETH_TYPE_FCOE);
+ bool ethtype;
+
+ if (ieee)
+ ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap);
+ else
+ ethtype = qed_dcbx_app_ethtype(app_info_bitmap);
+
+ return !!(ethtype && (proto_id == QED_ETH_TYPE_FCOE));
}
-static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id)
+static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
{
- return !!(qed_dcbx_app_ethtype(app_info_bitmap) &&
- proto_id == QED_ETH_TYPE_ROCE);
+ bool ethtype;
+
+ if (ieee)
+ ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap);
+ else
+ ethtype = qed_dcbx_app_ethtype(app_info_bitmap);
+
+ return !!(ethtype && (proto_id == QED_ETH_TYPE_ROCE));
}
-static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id)
+static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
{
- return !!(qed_dcbx_app_port(app_info_bitmap) &&
- proto_id == QED_UDP_PORT_TYPE_ROCE_V2);
+ bool port;
+
+ if (ieee)
+ port = qed_dcbx_ieee_app_port(app_info_bitmap,
+ DCBX_APP_SF_IEEE_UDP_PORT);
+ else
+ port = qed_dcbx_app_port(app_info_bitmap);
+
+ return !!(port && (proto_id == QED_UDP_PORT_TYPE_ROCE_V2));
}
static void
@@ -160,17 +219,17 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
static bool
qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn,
u32 app_prio_bitmap,
- u16 id, enum dcbx_protocol_type *type)
+ u16 id, enum dcbx_protocol_type *type, bool ieee)
{
- if (qed_dcbx_fcoe_tlv(app_prio_bitmap, id)) {
+ if (qed_dcbx_fcoe_tlv(app_prio_bitmap, id, ieee)) {
*type = DCBX_PROTOCOL_FCOE;
- } else if (qed_dcbx_roce_tlv(app_prio_bitmap, id)) {
+ } else if (qed_dcbx_roce_tlv(app_prio_bitmap, id, ieee)) {
*type = DCBX_PROTOCOL_ROCE;
- } else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id)) {
+ } else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id, ieee)) {
*type = DCBX_PROTOCOL_ISCSI;
- } else if (qed_dcbx_default_tlv(app_prio_bitmap, id)) {
+ } else if (qed_dcbx_default_tlv(app_prio_bitmap, id, ieee)) {
*type = DCBX_PROTOCOL_ETH;
- } else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id)) {
+ } else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id, ieee)) {
*type = DCBX_PROTOCOL_ROCE_V2;
} else {
*type = DCBX_MAX_PROTOCOL_TYPE;
@@ -190,17 +249,18 @@ static int
qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
struct qed_dcbx_results *p_data,
struct dcbx_app_priority_entry *p_tbl,
- u32 pri_tc_tbl, int count, bool dcbx_enabled)
+ u32 pri_tc_tbl, int count, u8 dcbx_version)
{
u8 tc, priority_map;
enum dcbx_protocol_type type;
+ bool enable, ieee;
u16 protocol_id;
int priority;
- bool enable;
int i;
DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count);
+ ieee = (dcbx_version == DCBX_CONFIG_VERSION_IEEE);
/* Parse APP TLV */
for (i = 0; i < count; i++) {
protocol_id = QED_MFW_GET_FIELD(p_tbl[i].entry,
@@ -215,7 +275,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
tc = QED_DCBX_PRIO2TC(pri_tc_tbl, priority);
if (qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry,
- protocol_id, &type)) {
+ protocol_id, &type, ieee)) {
/* ETH always have the enable bit reset, as it gets
* vlan information per packet. For other protocols,
* should be set according to the dcbx_enabled
@@ -252,7 +312,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
if (p_data->arr[type].update)
continue;
- enable = (type == DCBX_PROTOCOL_ETH) ? false : dcbx_enabled;
+ enable = !(type == DCBX_PROTOCOL_ETH);
qed_dcbx_update_app_info(p_data, p_hwfn, enable, true,
priority, tc, type);
}
@@ -271,15 +331,12 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn)
struct dcbx_ets_feature *p_ets;
struct qed_hw_info *p_info;
u32 pri_tc_tbl, flags;
- bool dcbx_enabled;
+ u8 dcbx_version;
int num_entries;
int rc = 0;
- /* If DCBx version is non zero, then negotiation was
- * successfuly performed
- */
flags = p_hwfn->p_dcbx_info->operational.flags;
- dcbx_enabled = !!QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION);
+ dcbx_version = QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION);
p_app = &p_hwfn->p_dcbx_info->operational.features.app;
p_tbl = p_app->app_pri_tbl;
@@ -291,13 +348,13 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn)
num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES);
rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl,
- num_entries, dcbx_enabled);
+ num_entries, dcbx_version);
if (rc)
return rc;
p_info->num_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS);
data.pf_id = p_hwfn->rel_pf_id;
- data.dcbx_enabled = dcbx_enabled;
+ data.dcbx_enabled = !!dcbx_version;
qed_dcbx_dp_protocol(p_hwfn, &data);
@@ -351,6 +408,325 @@ qed_dcbx_copy_mib(struct qed_hwfn *p_hwfn,
return rc;
}
+#ifdef CONFIG_DCB
+static void
+qed_dcbx_get_priority_info(struct qed_hwfn *p_hwfn,
+ struct qed_dcbx_app_prio *p_prio,
+ struct qed_dcbx_results *p_results)
+{
+ u8 val;
+
+ p_prio->roce = QED_DCBX_INVALID_PRIORITY;
+ p_prio->roce_v2 = QED_DCBX_INVALID_PRIORITY;
+ p_prio->iscsi = QED_DCBX_INVALID_PRIORITY;
+ p_prio->fcoe = QED_DCBX_INVALID_PRIORITY;
+
+ if (p_results->arr[DCBX_PROTOCOL_ROCE].update &&
+ p_results->arr[DCBX_PROTOCOL_ROCE].enable)
+ p_prio->roce = p_results->arr[DCBX_PROTOCOL_ROCE].priority;
+
+ if (p_results->arr[DCBX_PROTOCOL_ROCE_V2].update &&
+ p_results->arr[DCBX_PROTOCOL_ROCE_V2].enable) {
+ val = p_results->arr[DCBX_PROTOCOL_ROCE_V2].priority;
+ p_prio->roce_v2 = val;
+ }
+
+ if (p_results->arr[DCBX_PROTOCOL_ISCSI].update &&
+ p_results->arr[DCBX_PROTOCOL_ISCSI].enable)
+ p_prio->iscsi = p_results->arr[DCBX_PROTOCOL_ISCSI].priority;
+
+ if (p_results->arr[DCBX_PROTOCOL_FCOE].update &&
+ p_results->arr[DCBX_PROTOCOL_FCOE].enable)
+ p_prio->fcoe = p_results->arr[DCBX_PROTOCOL_FCOE].priority;
+
+ if (p_results->arr[DCBX_PROTOCOL_ETH].update &&
+ p_results->arr[DCBX_PROTOCOL_ETH].enable)
+ p_prio->eth = p_results->arr[DCBX_PROTOCOL_ETH].priority;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+ "Priorities: iscsi %d, roce %d, roce v2 %d, fcoe %d, eth %d\n",
+ p_prio->iscsi, p_prio->roce, p_prio->roce_v2, p_prio->fcoe,
+ p_prio->eth);
+}
+
+static void
+qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn,
+ struct dcbx_app_priority_feature *p_app,
+ struct dcbx_app_priority_entry *p_tbl,
+ struct qed_dcbx_params *p_params, bool ieee)
+{
+ struct qed_app_entry *entry;
+ u8 pri_map;
+ int i;
+
+ p_params->app_willing = QED_MFW_GET_FIELD(p_app->flags,
+ DCBX_APP_WILLING);
+ p_params->app_valid = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_ENABLED);
+ p_params->app_error = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_ERROR);
+ p_params->num_app_entries = QED_MFW_GET_FIELD(p_app->flags,
+ DCBX_APP_NUM_ENTRIES);
+ for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
+ entry = &p_params->app_entry[i];
+ if (ieee) {
+ u8 sf_ieee;
+ u32 val;
+
+ sf_ieee = QED_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_SF_IEEE);
+ switch (sf_ieee) {
+ case DCBX_APP_SF_IEEE_RESERVED:
+ /* Old MFW */
+ val = QED_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_SF);
+ entry->sf_ieee = val ?
+ QED_DCBX_SF_IEEE_TCP_UDP_PORT :
+ QED_DCBX_SF_IEEE_ETHTYPE;
+ break;
+ case DCBX_APP_SF_IEEE_ETHTYPE:
+ entry->sf_ieee = QED_DCBX_SF_IEEE_ETHTYPE;
+ break;
+ case DCBX_APP_SF_IEEE_TCP_PORT:
+ entry->sf_ieee = QED_DCBX_SF_IEEE_TCP_PORT;
+ break;
+ case DCBX_APP_SF_IEEE_UDP_PORT:
+ entry->sf_ieee = QED_DCBX_SF_IEEE_UDP_PORT;
+ break;
+ case DCBX_APP_SF_IEEE_TCP_UDP_PORT:
+ entry->sf_ieee = QED_DCBX_SF_IEEE_TCP_UDP_PORT;
+ break;
+ }
+ } else {
+ entry->ethtype = !(QED_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_SF));
+ }
+
+ pri_map = QED_MFW_GET_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP);
+ entry->prio = ffs(pri_map) - 1;
+ entry->proto_id = QED_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_PROTOCOL_ID);
+ qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry,
+ entry->proto_id,
+ &entry->proto_type, ieee);
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+ "APP params: willing %d, valid %d error = %d\n",
+ p_params->app_willing, p_params->app_valid,
+ p_params->app_error);
+}
+
+static void
+qed_dcbx_get_pfc_data(struct qed_hwfn *p_hwfn,
+ u32 pfc, struct qed_dcbx_params *p_params)
+{
+ u8 pfc_map;
+
+ p_params->pfc.willing = QED_MFW_GET_FIELD(pfc, DCBX_PFC_WILLING);
+ p_params->pfc.max_tc = QED_MFW_GET_FIELD(pfc, DCBX_PFC_CAPS);
+ p_params->pfc.enabled = QED_MFW_GET_FIELD(pfc, DCBX_PFC_ENABLED);
+ pfc_map = QED_MFW_GET_FIELD(pfc, DCBX_PFC_PRI_EN_BITMAP);
+ p_params->pfc.prio[0] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_0);
+ p_params->pfc.prio[1] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_1);
+ p_params->pfc.prio[2] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_2);
+ p_params->pfc.prio[3] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_3);
+ p_params->pfc.prio[4] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_4);
+ p_params->pfc.prio[5] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_5);
+ p_params->pfc.prio[6] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_6);
+ p_params->pfc.prio[7] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_7);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+ "PFC params: willing %d, pfc_bitmap %d\n",
+ p_params->pfc.willing, pfc_map);
+}
+
+static void
+qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn,
+ struct dcbx_ets_feature *p_ets,
+ struct qed_dcbx_params *p_params)
+{
+ u32 bw_map[2], tsa_map[2], pri_map;
+ int i;
+
+ p_params->ets_willing = QED_MFW_GET_FIELD(p_ets->flags,
+ DCBX_ETS_WILLING);
+ p_params->ets_enabled = QED_MFW_GET_FIELD(p_ets->flags,
+ DCBX_ETS_ENABLED);
+ p_params->ets_cbs = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_CBS);
+ p_params->max_ets_tc = QED_MFW_GET_FIELD(p_ets->flags,
+ DCBX_ETS_MAX_TCS);
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+ "ETS params: willing %d, ets_cbs %d pri_tc_tbl_0 %x max_ets_tc %d\n",
+ p_params->ets_willing,
+ p_params->ets_cbs,
+ p_ets->pri_tc_tbl[0], p_params->max_ets_tc);
+
+ /* 8 bit tsa and bw data corresponding to each of the 8 TC's are
+ * encoded in a type u32 array of size 2.
+ */
+ bw_map[0] = be32_to_cpu(p_ets->tc_bw_tbl[0]);
+ bw_map[1] = be32_to_cpu(p_ets->tc_bw_tbl[1]);
+ tsa_map[0] = be32_to_cpu(p_ets->tc_tsa_tbl[0]);
+ tsa_map[1] = be32_to_cpu(p_ets->tc_tsa_tbl[1]);
+ pri_map = p_ets->pri_tc_tbl[0];
+ for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) {
+ p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i];
+ p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i];
+ p_params->ets_pri_tc_tbl[i] = QED_DCBX_PRIO2TC(pri_map, i);
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+ "elem %d bw_tbl %x tsa_tbl %x\n",
+ i, p_params->ets_tc_bw_tbl[i],
+ p_params->ets_tc_tsa_tbl[i]);
+ }
+}
+
+static void
+qed_dcbx_get_common_params(struct qed_hwfn *p_hwfn,
+ struct dcbx_app_priority_feature *p_app,
+ struct dcbx_app_priority_entry *p_tbl,
+ struct dcbx_ets_feature *p_ets,
+ u32 pfc, struct qed_dcbx_params *p_params, bool ieee)
+{
+ qed_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params, ieee);
+ qed_dcbx_get_ets_data(p_hwfn, p_ets, p_params);
+ qed_dcbx_get_pfc_data(p_hwfn, pfc, p_params);
+}
+
+static void
+qed_dcbx_get_local_params(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, struct qed_dcbx_get *params)
+{
+ struct dcbx_features *p_feat;
+
+ p_feat = &p_hwfn->p_dcbx_info->local_admin.features;
+ qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
+ p_feat->app.app_pri_tbl, &p_feat->ets,
+ p_feat->pfc, &params->local.params, false);
+ params->local.valid = true;
+}
+
+static void
+qed_dcbx_get_remote_params(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, struct qed_dcbx_get *params)
+{
+ struct dcbx_features *p_feat;
+
+ p_feat = &p_hwfn->p_dcbx_info->remote.features;
+ qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
+ p_feat->app.app_pri_tbl, &p_feat->ets,
+ p_feat->pfc, &params->remote.params, false);
+ params->remote.valid = true;
+}
+
+static void
+qed_dcbx_get_operational_params(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_dcbx_get *params)
+{
+ struct qed_dcbx_operational_params *p_operational;
+ struct qed_dcbx_results *p_results;
+ struct dcbx_features *p_feat;
+ bool enabled, err;
+ u32 flags;
+ bool val;
+
+ flags = p_hwfn->p_dcbx_info->operational.flags;
+
+ /* If DCBx version is non zero, then negotiation
+ * was successfuly performed
+ */
+ p_operational = &params->operational;
+ enabled = !!(QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) !=
+ DCBX_CONFIG_VERSION_DISABLED);
+ if (!enabled) {
+ p_operational->enabled = enabled;
+ p_operational->valid = false;
+ return;
+ }
+
+ p_feat = &p_hwfn->p_dcbx_info->operational.features;
+ p_results = &p_hwfn->p_dcbx_info->results;
+
+ val = !!(QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) ==
+ DCBX_CONFIG_VERSION_IEEE);
+ p_operational->ieee = val;
+ val = !!(QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) ==
+ DCBX_CONFIG_VERSION_CEE);
+ p_operational->cee = val;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Version support: ieee %d, cee %d\n",
+ p_operational->ieee, p_operational->cee);
+
+ qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
+ p_feat->app.app_pri_tbl, &p_feat->ets,
+ p_feat->pfc, &params->operational.params,
+ p_operational->ieee);
+ qed_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio, p_results);
+ err = QED_MFW_GET_FIELD(p_feat->app.flags, DCBX_APP_ERROR);
+ p_operational->err = err;
+ p_operational->enabled = enabled;
+ p_operational->valid = true;
+}
+
+static void
+qed_dcbx_get_local_lldp_params(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_dcbx_get *params)
+{
+ struct lldp_config_params_s *p_local;
+
+ p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE];
+
+ memcpy(params->lldp_local.local_chassis_id, p_local->local_chassis_id,
+ ARRAY_SIZE(p_local->local_chassis_id));
+ memcpy(params->lldp_local.local_port_id, p_local->local_port_id,
+ ARRAY_SIZE(p_local->local_port_id));
+}
+
+static void
+qed_dcbx_get_remote_lldp_params(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_dcbx_get *params)
+{
+ struct lldp_status_params_s *p_remote;
+
+ p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE];
+
+ memcpy(params->lldp_remote.peer_chassis_id, p_remote->peer_chassis_id,
+ ARRAY_SIZE(p_remote->peer_chassis_id));
+ memcpy(params->lldp_remote.peer_port_id, p_remote->peer_port_id,
+ ARRAY_SIZE(p_remote->peer_port_id));
+}
+
+static int
+qed_dcbx_get_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_dcbx_get *p_params,
+ enum qed_mib_read_type type)
+{
+ switch (type) {
+ case QED_DCBX_REMOTE_MIB:
+ qed_dcbx_get_remote_params(p_hwfn, p_ptt, p_params);
+ break;
+ case QED_DCBX_LOCAL_MIB:
+ qed_dcbx_get_local_params(p_hwfn, p_ptt, p_params);
+ break;
+ case QED_DCBX_OPERATIONAL_MIB:
+ qed_dcbx_get_operational_params(p_hwfn, p_ptt, p_params);
+ break;
+ case QED_DCBX_REMOTE_LLDP_MIB:
+ qed_dcbx_get_remote_lldp_params(p_hwfn, p_ptt, p_params);
+ break;
+ case QED_DCBX_LOCAL_LLDP_MIB:
+ qed_dcbx_get_local_lldp_params(p_hwfn, p_ptt, p_params);
+ break;
+ default:
+ DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+#endif
+
static int
qed_dcbx_read_local_lldp_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
@@ -561,3 +937,1377 @@ void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src,
p_dcb_data = &p_dest->eth_dcb_data;
qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ETH);
}
+
+#ifdef CONFIG_DCB
+static int qed_dcbx_query_params(struct qed_hwfn *p_hwfn,
+ struct qed_dcbx_get *p_get,
+ enum qed_mib_read_type type)
+{
+ struct qed_ptt *p_ptt;
+ int rc;
+
+ if (IS_VF(p_hwfn->cdev))
+ return -EINVAL;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EBUSY;
+
+ rc = qed_dcbx_read_mib(p_hwfn, p_ptt, type);
+ if (rc)
+ goto out;
+
+ rc = qed_dcbx_get_params(p_hwfn, p_ptt, p_get, type);
+
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
+ return rc;
+}
+
+static void
+qed_dcbx_set_pfc_data(struct qed_hwfn *p_hwfn,
+ u32 *pfc, struct qed_dcbx_params *p_params)
+{
+ u8 pfc_map = 0;
+ int i;
+
+ if (p_params->pfc.willing)
+ *pfc |= DCBX_PFC_WILLING_MASK;
+ else
+ *pfc &= ~DCBX_PFC_WILLING_MASK;
+
+ if (p_params->pfc.enabled)
+ *pfc |= DCBX_PFC_ENABLED_MASK;
+ else
+ *pfc &= ~DCBX_PFC_ENABLED_MASK;
+
+ *pfc &= ~DCBX_PFC_CAPS_MASK;
+ *pfc |= (u32)p_params->pfc.max_tc << DCBX_PFC_CAPS_SHIFT;
+
+ for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
+ if (p_params->pfc.prio[i])
+ pfc_map |= BIT(i);
+
+ *pfc &= ~DCBX_PFC_PRI_EN_BITMAP_MASK;
+ *pfc |= (pfc_map << DCBX_PFC_PRI_EN_BITMAP_SHIFT);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB, "pfc = 0x%x\n", *pfc);
+}
+
+static void
+qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn,
+ struct dcbx_ets_feature *p_ets,
+ struct qed_dcbx_params *p_params)
+{
+ u8 *bw_map, *tsa_map;
+ u32 val;
+ int i;
+
+ if (p_params->ets_willing)
+ p_ets->flags |= DCBX_ETS_WILLING_MASK;
+ else
+ p_ets->flags &= ~DCBX_ETS_WILLING_MASK;
+
+ if (p_params->ets_cbs)
+ p_ets->flags |= DCBX_ETS_CBS_MASK;
+ else
+ p_ets->flags &= ~DCBX_ETS_CBS_MASK;
+
+ if (p_params->ets_enabled)
+ p_ets->flags |= DCBX_ETS_ENABLED_MASK;
+ else
+ p_ets->flags &= ~DCBX_ETS_ENABLED_MASK;
+
+ p_ets->flags &= ~DCBX_ETS_MAX_TCS_MASK;
+ p_ets->flags |= (u32)p_params->max_ets_tc << DCBX_ETS_MAX_TCS_SHIFT;
+
+ bw_map = (u8 *)&p_ets->tc_bw_tbl[0];
+ tsa_map = (u8 *)&p_ets->tc_tsa_tbl[0];
+ p_ets->pri_tc_tbl[0] = 0;
+ for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) {
+ bw_map[i] = p_params->ets_tc_bw_tbl[i];
+ tsa_map[i] = p_params->ets_tc_tsa_tbl[i];
+ /* Copy the priority value to the corresponding 4 bits in the
+ * traffic class table.
+ */
+ val = (((u32)p_params->ets_pri_tc_tbl[i]) << ((7 - i) * 4));
+ p_ets->pri_tc_tbl[0] |= val;
+ }
+ for (i = 0; i < 2; i++) {
+ p_ets->tc_bw_tbl[i] = cpu_to_be32(p_ets->tc_bw_tbl[i]);
+ p_ets->tc_tsa_tbl[i] = cpu_to_be32(p_ets->tc_tsa_tbl[i]);
+ }
+}
+
+static void
+qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn,
+ struct dcbx_app_priority_feature *p_app,
+ struct qed_dcbx_params *p_params, bool ieee)
+{
+ u32 *entry;
+ int i;
+
+ if (p_params->app_willing)
+ p_app->flags |= DCBX_APP_WILLING_MASK;
+ else
+ p_app->flags &= ~DCBX_APP_WILLING_MASK;
+
+ if (p_params->app_valid)
+ p_app->flags |= DCBX_APP_ENABLED_MASK;
+ else
+ p_app->flags &= ~DCBX_APP_ENABLED_MASK;
+
+ p_app->flags &= ~DCBX_APP_NUM_ENTRIES_MASK;
+ p_app->flags |= (u32)p_params->num_app_entries <<
+ DCBX_APP_NUM_ENTRIES_SHIFT;
+
+ for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
+ entry = &p_app->app_pri_tbl[i].entry;
+ *entry = 0;
+ if (ieee) {
+ *entry &= ~(DCBX_APP_SF_IEEE_MASK | DCBX_APP_SF_MASK);
+ switch (p_params->app_entry[i].sf_ieee) {
+ case QED_DCBX_SF_IEEE_ETHTYPE:
+ *entry |= ((u32)DCBX_APP_SF_IEEE_ETHTYPE <<
+ DCBX_APP_SF_IEEE_SHIFT);
+ *entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
+ DCBX_APP_SF_SHIFT);
+ break;
+ case QED_DCBX_SF_IEEE_TCP_PORT:
+ *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_PORT <<
+ DCBX_APP_SF_IEEE_SHIFT);
+ *entry |= ((u32)DCBX_APP_SF_PORT <<
+ DCBX_APP_SF_SHIFT);
+ break;
+ case QED_DCBX_SF_IEEE_UDP_PORT:
+ *entry |= ((u32)DCBX_APP_SF_IEEE_UDP_PORT <<
+ DCBX_APP_SF_IEEE_SHIFT);
+ *entry |= ((u32)DCBX_APP_SF_PORT <<
+ DCBX_APP_SF_SHIFT);
+ break;
+ case QED_DCBX_SF_IEEE_TCP_UDP_PORT:
+ *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_UDP_PORT <<
+ DCBX_APP_SF_IEEE_SHIFT);
+ *entry |= ((u32)DCBX_APP_SF_PORT <<
+ DCBX_APP_SF_SHIFT);
+ break;
+ }
+ } else {
+ *entry &= ~DCBX_APP_SF_MASK;
+ if (p_params->app_entry[i].ethtype)
+ *entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
+ DCBX_APP_SF_SHIFT);
+ else
+ *entry |= ((u32)DCBX_APP_SF_PORT <<
+ DCBX_APP_SF_SHIFT);
+ }
+
+ *entry &= ~DCBX_APP_PROTOCOL_ID_MASK;
+ *entry |= ((u32)p_params->app_entry[i].proto_id <<
+ DCBX_APP_PROTOCOL_ID_SHIFT);
+ *entry &= ~DCBX_APP_PRI_MAP_MASK;
+ *entry |= ((u32)(p_params->app_entry[i].prio) <<
+ DCBX_APP_PRI_MAP_SHIFT);
+ }
+}
+
+static void
+qed_dcbx_set_local_params(struct qed_hwfn *p_hwfn,
+ struct dcbx_local_params *local_admin,
+ struct qed_dcbx_set *params)
+{
+ bool ieee = false;
+
+ local_admin->flags = 0;
+ memcpy(&local_admin->features,
+ &p_hwfn->p_dcbx_info->operational.features,
+ sizeof(local_admin->features));
+
+ if (params->enabled) {
+ local_admin->config = params->ver_num;
+ ieee = !!(params->ver_num & DCBX_CONFIG_VERSION_IEEE);
+ } else {
+ local_admin->config = DCBX_CONFIG_VERSION_DISABLED;
+ }
+
+ if (params->override_flags & QED_DCBX_OVERRIDE_PFC_CFG)
+ qed_dcbx_set_pfc_data(p_hwfn, &local_admin->features.pfc,
+ &params->config.params);
+
+ if (params->override_flags & QED_DCBX_OVERRIDE_ETS_CFG)
+ qed_dcbx_set_ets_data(p_hwfn, &local_admin->features.ets,
+ &params->config.params);
+
+ if (params->override_flags & QED_DCBX_OVERRIDE_APP_CFG)
+ qed_dcbx_set_app_data(p_hwfn, &local_admin->features.app,
+ &params->config.params, ieee);
+}
+
+int qed_dcbx_config_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_dcbx_set *params, bool hw_commit)
+{
+ struct dcbx_local_params local_admin;
+ struct qed_dcbx_mib_meta_data data;
+ u32 resp = 0, param = 0;
+ int rc = 0;
+
+ if (!hw_commit) {
+ memcpy(&p_hwfn->p_dcbx_info->set, params,
+ sizeof(struct qed_dcbx_set));
+ return 0;
+ }
+
+ /* clear set-parmas cache */
+ memset(&p_hwfn->p_dcbx_info->set, 0, sizeof(p_hwfn->p_dcbx_info->set));
+
+ memset(&local_admin, 0, sizeof(local_admin));
+ qed_dcbx_set_local_params(p_hwfn, &local_admin, params);
+
+ data.addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, local_admin_dcbx_mib);
+ data.local_admin = &local_admin;
+ data.size = sizeof(struct dcbx_local_params);
+ qed_memcpy_to(p_hwfn, p_ptt, data.addr, data.local_admin, data.size);
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_DCBX,
+ 1 << DRV_MB_PARAM_LLDP_SEND_SHIFT, &resp, &param);
+ if (rc)
+ DP_NOTICE(p_hwfn, "Failed to send DCBX update request\n");
+
+ return rc;
+}
+
+int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
+ struct qed_dcbx_set *params)
+{
+ struct qed_dcbx_get *dcbx_info;
+ int rc;
+
+ if (p_hwfn->p_dcbx_info->set.config.valid) {
+ memcpy(params, &p_hwfn->p_dcbx_info->set,
+ sizeof(struct qed_dcbx_set));
+ return 0;
+ }
+
+ dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL);
+ if (!dcbx_info) {
+ DP_ERR(p_hwfn, "Failed to allocate struct qed_dcbx_info\n");
+ return -ENOMEM;
+ }
+
+ rc = qed_dcbx_query_params(p_hwfn, dcbx_info, QED_DCBX_OPERATIONAL_MIB);
+ if (rc) {
+ kfree(dcbx_info);
+ return rc;
+ }
+
+ p_hwfn->p_dcbx_info->set.override_flags = 0;
+ p_hwfn->p_dcbx_info->set.ver_num = DCBX_CONFIG_VERSION_DISABLED;
+ if (dcbx_info->operational.cee)
+ p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_CEE;
+ if (dcbx_info->operational.ieee)
+ p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_IEEE;
+
+ p_hwfn->p_dcbx_info->set.enabled = dcbx_info->operational.enabled;
+ memcpy(&p_hwfn->p_dcbx_info->set.config.params,
+ &dcbx_info->operational.params,
+ sizeof(struct qed_dcbx_admin_params));
+ p_hwfn->p_dcbx_info->set.config.valid = true;
+
+ memcpy(params, &p_hwfn->p_dcbx_info->set, sizeof(struct qed_dcbx_set));
+
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn,
+ enum qed_mib_read_type type)
+{
+ struct qed_dcbx_get *dcbx_info;
+
+ dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL);
+ if (!dcbx_info) {
+ DP_ERR(hwfn->cdev, "Failed to allocate memory for dcbx_info\n");
+ return NULL;
+ }
+
+ if (qed_dcbx_query_params(hwfn, dcbx_info, type)) {
+ kfree(dcbx_info);
+ return NULL;
+ }
+
+ if ((type == QED_DCBX_OPERATIONAL_MIB) &&
+ !dcbx_info->operational.enabled) {
+ DP_INFO(hwfn, "DCBX is not enabled/operational\n");
+ kfree(dcbx_info);
+ return NULL;
+ }
+
+ return dcbx_info;
+}
+
+static u8 qed_dcbnl_getstate(struct qed_dev *cdev)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ bool enabled;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return 0;
+
+ enabled = dcbx_info->operational.enabled;
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "DCB state = %d\n", enabled);
+ kfree(dcbx_info);
+
+ return enabled;
+}
+
+static u8 qed_dcbnl_setstate(struct qed_dev *cdev, u8 state)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "DCB state = %d\n", state);
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return 1;
+
+ dcbx_set.enabled = !!state;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return 1;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc ? 1 : 0;
+}
+
+static void qed_dcbnl_getpgtccfgtx(struct qed_dev *cdev, int tc, u8 *prio_type,
+ u8 *pgid, u8 *bw_pct, u8 *up_map)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "tc = %d\n", tc);
+ *prio_type = *pgid = *bw_pct = *up_map = 0;
+ if (tc < 0 || tc >= QED_MAX_PFC_PRIORITIES) {
+ DP_INFO(hwfn, "Invalid tc %d\n", tc);
+ return;
+ }
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return;
+
+ *pgid = dcbx_info->operational.params.ets_pri_tc_tbl[tc];
+ kfree(dcbx_info);
+}
+
+static void qed_dcbnl_getpgbwgcfgtx(struct qed_dev *cdev, int pgid, u8 *bw_pct)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+
+ *bw_pct = 0;
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "pgid = %d\n", pgid);
+ if (pgid < 0 || pgid >= QED_MAX_PFC_PRIORITIES) {
+ DP_INFO(hwfn, "Invalid pgid %d\n", pgid);
+ return;
+ }
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return;
+
+ *bw_pct = dcbx_info->operational.params.ets_tc_bw_tbl[pgid];
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "bw_pct = %d\n", *bw_pct);
+ kfree(dcbx_info);
+}
+
+static void qed_dcbnl_getpgtccfgrx(struct qed_dev *cdev, int tc, u8 *prio,
+ u8 *bwg_id, u8 *bw_pct, u8 *up_map)
+{
+ DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n");
+ *prio = *bwg_id = *bw_pct = *up_map = 0;
+}
+
+static void qed_dcbnl_getpgbwgcfgrx(struct qed_dev *cdev,
+ int bwg_id, u8 *bw_pct)
+{
+ DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n");
+ *bw_pct = 0;
+}
+
+static void qed_dcbnl_getpfccfg(struct qed_dev *cdev,
+ int priority, u8 *setting)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "priority = %d\n", priority);
+ if (priority < 0 || priority >= QED_MAX_PFC_PRIORITIES) {
+ DP_INFO(hwfn, "Invalid priority %d\n", priority);
+ return;
+ }
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return;
+
+ *setting = dcbx_info->operational.params.pfc.prio[priority];
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "setting = %d\n", *setting);
+ kfree(dcbx_info);
+}
+
+static void qed_dcbnl_setpfccfg(struct qed_dev *cdev, int priority, u8 setting)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "priority = %d setting = %d\n",
+ priority, setting);
+ if (priority < 0 || priority >= QED_MAX_PFC_PRIORITIES) {
+ DP_INFO(hwfn, "Invalid priority %d\n", priority);
+ return;
+ }
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return;
+
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG;
+ dcbx_set.config.params.pfc.prio[priority] = !!setting;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+}
+
+static u8 qed_dcbnl_getcap(struct qed_dev *cdev, int capid, u8 *cap)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ int rc = 0;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "capid = %d\n", capid);
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return 1;
+
+ switch (capid) {
+ case DCB_CAP_ATTR_PG:
+ case DCB_CAP_ATTR_PFC:
+ case DCB_CAP_ATTR_UP2TC:
+ case DCB_CAP_ATTR_GSP:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_PG_TCS:
+ case DCB_CAP_ATTR_PFC_TCS:
+ *cap = 0x80;
+ break;
+ case DCB_CAP_ATTR_DCBX:
+ *cap = (DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_CEE |
+ DCB_CAP_DCBX_VER_IEEE);
+ break;
+ default:
+ *cap = false;
+ rc = 1;
+ }
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "id = %d caps = %d\n", capid, *cap);
+ kfree(dcbx_info);
+
+ return rc;
+}
+
+static int qed_dcbnl_getnumtcs(struct qed_dev *cdev, int tcid, u8 *num)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ int rc = 0;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "tcid = %d\n", tcid);
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ switch (tcid) {
+ case DCB_NUMTCS_ATTR_PG:
+ *num = dcbx_info->operational.params.max_ets_tc;
+ break;
+ case DCB_NUMTCS_ATTR_PFC:
+ *num = dcbx_info->operational.params.pfc.max_tc;
+ break;
+ default:
+ rc = -EINVAL;
+ }
+
+ kfree(dcbx_info);
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "numtcs = %d\n", *num);
+
+ return rc;
+}
+
+static u8 qed_dcbnl_getpfcstate(struct qed_dev *cdev)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ bool enabled;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return 0;
+
+ enabled = dcbx_info->operational.params.pfc.enabled;
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "pfc state = %d\n", enabled);
+ kfree(dcbx_info);
+
+ return enabled;
+}
+
+static u8 qed_dcbnl_getdcbx(struct qed_dev *cdev)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ u8 mode = 0;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return 0;
+
+ if (dcbx_info->operational.enabled)
+ mode |= DCB_CAP_DCBX_LLD_MANAGED;
+ if (dcbx_info->operational.ieee)
+ mode |= DCB_CAP_DCBX_VER_IEEE;
+ if (dcbx_info->operational.cee)
+ mode |= DCB_CAP_DCBX_VER_CEE;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "dcb mode = %d\n", mode);
+ kfree(dcbx_info);
+
+ return mode;
+}
+
+static void qed_dcbnl_setpgtccfgtx(struct qed_dev *cdev,
+ int tc,
+ u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB,
+ "tc = %d pri_type = %d pgid = %d bw_pct = %d up_map = %d\n",
+ tc, pri_type, pgid, bw_pct, up_map);
+
+ if (tc < 0 || tc >= QED_MAX_PFC_PRIORITIES) {
+ DP_INFO(hwfn, "Invalid tc %d\n", tc);
+ return;
+ }
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return;
+
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG;
+ dcbx_set.config.params.ets_pri_tc_tbl[tc] = pgid;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+}
+
+static void qed_dcbnl_setpgtccfgrx(struct qed_dev *cdev, int prio,
+ u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map)
+{
+ DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n");
+}
+
+static void qed_dcbnl_setpgbwgcfgtx(struct qed_dev *cdev, int pgid, u8 bw_pct)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "pgid = %d bw_pct = %d\n", pgid, bw_pct);
+ if (pgid < 0 || pgid >= QED_MAX_PFC_PRIORITIES) {
+ DP_INFO(hwfn, "Invalid pgid %d\n", pgid);
+ return;
+ }
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return;
+
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG;
+ dcbx_set.config.params.ets_tc_bw_tbl[pgid] = bw_pct;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+}
+
+static void qed_dcbnl_setpgbwgcfgrx(struct qed_dev *cdev, int pgid, u8 bw_pct)
+{
+ DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n");
+}
+
+static u8 qed_dcbnl_setall(struct qed_dev *cdev)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return 1;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return 1;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 1);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
+static int qed_dcbnl_setnumtcs(struct qed_dev *cdev, int tcid, u8 num)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "tcid = %d num = %d\n", tcid, num);
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return 1;
+
+ switch (tcid) {
+ case DCB_NUMTCS_ATTR_PG:
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG;
+ dcbx_set.config.params.max_ets_tc = num;
+ break;
+ case DCB_NUMTCS_ATTR_PFC:
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG;
+ dcbx_set.config.params.pfc.max_tc = num;
+ break;
+ default:
+ DP_INFO(hwfn, "Invalid tcid %d\n", tcid);
+ return -EINVAL;
+ }
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EINVAL;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return 0;
+}
+
+static void qed_dcbnl_setpfcstate(struct qed_dev *cdev, u8 state)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "new state = %d\n", state);
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return;
+
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG;
+ dcbx_set.config.params.pfc.enabled = !!state;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+}
+
+static int qed_dcbnl_getapp(struct qed_dev *cdev, u8 idtype, u16 idval)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ struct qed_app_entry *entry;
+ bool ethtype;
+ u8 prio = 0;
+ int i;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ ethtype = !!(idtype == DCB_APP_IDTYPE_ETHTYPE);
+ for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) {
+ entry = &dcbx_info->operational.params.app_entry[i];
+ if ((entry->ethtype == ethtype) && (entry->proto_id == idval)) {
+ prio = entry->prio;
+ break;
+ }
+ }
+
+ if (i == QED_DCBX_MAX_APP_PROTOCOL) {
+ DP_ERR(cdev, "App entry (%d, %d) not found\n", idtype, idval);
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ kfree(dcbx_info);
+
+ return prio;
+}
+
+static int qed_dcbnl_setapp(struct qed_dev *cdev,
+ u8 idtype, u16 idval, u8 pri_map)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_app_entry *entry;
+ struct qed_ptt *ptt;
+ bool ethtype;
+ int rc, i;
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return -EINVAL;
+
+ ethtype = !!(idtype == DCB_APP_IDTYPE_ETHTYPE);
+ for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) {
+ entry = &dcbx_set.config.params.app_entry[i];
+ if ((entry->ethtype == ethtype) && (entry->proto_id == idval))
+ break;
+ /* First empty slot */
+ if (!entry->proto_id) {
+ dcbx_set.config.params.num_app_entries++;
+ break;
+ }
+ }
+
+ if (i == QED_DCBX_MAX_APP_PROTOCOL) {
+ DP_ERR(cdev, "App table is full\n");
+ return -EBUSY;
+ }
+
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_APP_CFG;
+ dcbx_set.config.params.app_entry[i].ethtype = ethtype;
+ dcbx_set.config.params.app_entry[i].proto_id = idval;
+ dcbx_set.config.params.app_entry[i].prio = pri_map;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EBUSY;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
+static u8 qed_dcbnl_setdcbx(struct qed_dev *cdev, u8 mode)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "new mode = %x\n", mode);
+
+ if (!(mode & DCB_CAP_DCBX_VER_IEEE) && !(mode & DCB_CAP_DCBX_VER_CEE)) {
+ DP_INFO(hwfn, "Allowed mode is cee, ieee or both\n");
+ return 1;
+ }
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return 1;
+
+ dcbx_set.ver_num = 0;
+ if (mode & DCB_CAP_DCBX_VER_CEE) {
+ dcbx_set.ver_num |= DCBX_CONFIG_VERSION_CEE;
+ dcbx_set.enabled = true;
+ }
+
+ if (mode & DCB_CAP_DCBX_VER_IEEE) {
+ dcbx_set.ver_num |= DCBX_CONFIG_VERSION_IEEE;
+ dcbx_set.enabled = true;
+ }
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return 1;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return 0;
+}
+
+static u8 qed_dcbnl_getfeatcfg(struct qed_dev *cdev, int featid, u8 *flags)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "Feature id = %d\n", featid);
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return 1;
+
+ *flags = 0;
+ switch (featid) {
+ case DCB_FEATCFG_ATTR_PG:
+ if (dcbx_info->operational.params.ets_enabled)
+ *flags = DCB_FEATCFG_ENABLE;
+ else
+ *flags = DCB_FEATCFG_ERROR;
+ break;
+ case DCB_FEATCFG_ATTR_PFC:
+ if (dcbx_info->operational.params.pfc.enabled)
+ *flags = DCB_FEATCFG_ENABLE;
+ else
+ *flags = DCB_FEATCFG_ERROR;
+ break;
+ case DCB_FEATCFG_ATTR_APP:
+ if (dcbx_info->operational.params.app_valid)
+ *flags = DCB_FEATCFG_ENABLE;
+ else
+ *flags = DCB_FEATCFG_ERROR;
+ break;
+ default:
+ DP_INFO(hwfn, "Invalid feature-ID %d\n", featid);
+ kfree(dcbx_info);
+ return 1;
+ }
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "flags = %d\n", *flags);
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+static u8 qed_dcbnl_setfeatcfg(struct qed_dev *cdev, int featid, u8 flags)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ bool enabled, willing;
+ struct qed_ptt *ptt;
+ int rc;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "featid = %d flags = %d\n",
+ featid, flags);
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return 1;
+
+ enabled = !!(flags & DCB_FEATCFG_ENABLE);
+ willing = !!(flags & DCB_FEATCFG_WILLING);
+ switch (featid) {
+ case DCB_FEATCFG_ATTR_PG:
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG;
+ dcbx_set.config.params.ets_enabled = enabled;
+ dcbx_set.config.params.ets_willing = willing;
+ break;
+ case DCB_FEATCFG_ATTR_PFC:
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG;
+ dcbx_set.config.params.pfc.enabled = enabled;
+ dcbx_set.config.params.pfc.willing = willing;
+ break;
+ case DCB_FEATCFG_ATTR_APP:
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_APP_CFG;
+ dcbx_set.config.params.app_willing = willing;
+ break;
+ default:
+ DP_INFO(hwfn, "Invalid feature-ID %d\n", featid);
+ return 1;
+ }
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return 1;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return 0;
+}
+
+static int qed_dcbnl_peer_getappinfo(struct qed_dev *cdev,
+ struct dcb_peer_app_info *info,
+ u16 *app_count)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ info->willing = dcbx_info->remote.params.app_willing;
+ info->error = dcbx_info->remote.params.app_error;
+ *app_count = dcbx_info->remote.params.num_app_entries;
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+static int qed_dcbnl_peer_getapptable(struct qed_dev *cdev,
+ struct dcb_app *table)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ int i;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ for (i = 0; i < dcbx_info->remote.params.num_app_entries; i++) {
+ if (dcbx_info->remote.params.app_entry[i].ethtype)
+ table[i].selector = DCB_APP_IDTYPE_ETHTYPE;
+ else
+ table[i].selector = DCB_APP_IDTYPE_PORTNUM;
+ table[i].priority = dcbx_info->remote.params.app_entry[i].prio;
+ table[i].protocol =
+ dcbx_info->remote.params.app_entry[i].proto_id;
+ }
+
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+static int qed_dcbnl_cee_peer_getpfc(struct qed_dev *cdev, struct cee_pfc *pfc)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ int i;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
+ if (dcbx_info->remote.params.pfc.prio[i])
+ pfc->pfc_en |= BIT(i);
+
+ pfc->tcs_supported = dcbx_info->remote.params.pfc.max_tc;
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "pfc state = %d tcs_supported = %d\n",
+ pfc->pfc_en, pfc->tcs_supported);
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+static int qed_dcbnl_cee_peer_getpg(struct qed_dev *cdev, struct cee_pg *pg)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ int i;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ pg->willing = dcbx_info->remote.params.ets_willing;
+ for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) {
+ pg->pg_bw[i] = dcbx_info->remote.params.ets_tc_bw_tbl[i];
+ pg->prio_pg[i] = dcbx_info->remote.params.ets_pri_tc_tbl[i];
+ }
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "willing = %d", pg->willing);
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+static int qed_dcbnl_get_ieee_pfc(struct qed_dev *cdev,
+ struct ieee_pfc *pfc, bool remote)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_params *params;
+ struct qed_dcbx_get *dcbx_info;
+ int rc, i;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ if (!dcbx_info->operational.ieee) {
+ DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
+ return -EINVAL;
+ }
+
+ if (remote) {
+ memset(dcbx_info, 0, sizeof(*dcbx_info));
+ rc = qed_dcbx_query_params(hwfn, dcbx_info,
+ QED_DCBX_REMOTE_MIB);
+ if (rc) {
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ params = &dcbx_info->remote.params;
+ } else {
+ params = &dcbx_info->operational.params;
+ }
+
+ pfc->pfc_cap = params->pfc.max_tc;
+ pfc->pfc_en = 0;
+ for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
+ if (params->pfc.prio[i])
+ pfc->pfc_en |= BIT(i);
+
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+static int qed_dcbnl_ieee_getpfc(struct qed_dev *cdev, struct ieee_pfc *pfc)
+{
+ return qed_dcbnl_get_ieee_pfc(cdev, pfc, false);
+}
+
+static int qed_dcbnl_ieee_setpfc(struct qed_dev *cdev, struct ieee_pfc *pfc)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc, i;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ if (!dcbx_info->operational.ieee) {
+ DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ kfree(dcbx_info);
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return -EINVAL;
+
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG;
+ for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
+ dcbx_set.config.params.pfc.prio[i] = !!(pfc->pfc_en & BIT(i));
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EINVAL;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
+static int qed_dcbnl_get_ieee_ets(struct qed_dev *cdev,
+ struct ieee_ets *ets, bool remote)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ struct qed_dcbx_params *params;
+ int rc;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ if (!dcbx_info->operational.ieee) {
+ DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ if (remote) {
+ memset(dcbx_info, 0, sizeof(*dcbx_info));
+ rc = qed_dcbx_query_params(hwfn, dcbx_info,
+ QED_DCBX_REMOTE_MIB);
+ if (rc) {
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ params = &dcbx_info->remote.params;
+ } else {
+ params = &dcbx_info->operational.params;
+ }
+
+ ets->ets_cap = params->max_ets_tc;
+ ets->willing = params->ets_willing;
+ ets->cbs = params->ets_cbs;
+ memcpy(ets->tc_tx_bw, params->ets_tc_bw_tbl, sizeof(ets->tc_tx_bw));
+ memcpy(ets->tc_tsa, params->ets_tc_tsa_tbl, sizeof(ets->tc_tsa));
+ memcpy(ets->prio_tc, params->ets_pri_tc_tbl, sizeof(ets->prio_tc));
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+static int qed_dcbnl_ieee_getets(struct qed_dev *cdev, struct ieee_ets *ets)
+{
+ return qed_dcbnl_get_ieee_ets(cdev, ets, false);
+}
+
+static int qed_dcbnl_ieee_setets(struct qed_dev *cdev, struct ieee_ets *ets)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ if (!dcbx_info->operational.ieee) {
+ DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ kfree(dcbx_info);
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return -EINVAL;
+
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG;
+ dcbx_set.config.params.max_ets_tc = ets->ets_cap;
+ dcbx_set.config.params.ets_willing = ets->willing;
+ dcbx_set.config.params.ets_cbs = ets->cbs;
+ memcpy(dcbx_set.config.params.ets_tc_bw_tbl, ets->tc_tx_bw,
+ sizeof(ets->tc_tx_bw));
+ memcpy(dcbx_set.config.params.ets_tc_tsa_tbl, ets->tc_tsa,
+ sizeof(ets->tc_tsa));
+ memcpy(dcbx_set.config.params.ets_pri_tc_tbl, ets->prio_tc,
+ sizeof(ets->prio_tc));
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EINVAL;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
+int qed_dcbnl_ieee_peer_getets(struct qed_dev *cdev, struct ieee_ets *ets)
+{
+ return qed_dcbnl_get_ieee_ets(cdev, ets, true);
+}
+
+int qed_dcbnl_ieee_peer_getpfc(struct qed_dev *cdev, struct ieee_pfc *pfc)
+{
+ return qed_dcbnl_get_ieee_pfc(cdev, pfc, true);
+}
+
+int qed_dcbnl_ieee_getapp(struct qed_dev *cdev, struct dcb_app *app)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ struct qed_app_entry *entry;
+ bool ethtype;
+ u8 prio = 0;
+ int i;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ if (!dcbx_info->operational.ieee) {
+ DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ /* ieee defines the selector field value for ethertype to be 1 */
+ ethtype = !!((app->selector - 1) == DCB_APP_IDTYPE_ETHTYPE);
+ for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) {
+ entry = &dcbx_info->operational.params.app_entry[i];
+ if ((entry->ethtype == ethtype) &&
+ (entry->proto_id == app->protocol)) {
+ prio = entry->prio;
+ break;
+ }
+ }
+
+ if (i == QED_DCBX_MAX_APP_PROTOCOL) {
+ DP_ERR(cdev, "App entry (%d, %d) not found\n", app->selector,
+ app->protocol);
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ app->priority = ffs(prio) - 1;
+
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ struct qed_dcbx_set dcbx_set;
+ struct qed_app_entry *entry;
+ struct qed_ptt *ptt;
+ bool ethtype;
+ int rc, i;
+
+ if (app->priority < 0 || app->priority >= QED_MAX_PFC_PRIORITIES) {
+ DP_INFO(hwfn, "Invalid priority %d\n", app->priority);
+ return -EINVAL;
+ }
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ if (!dcbx_info->operational.ieee) {
+ DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ kfree(dcbx_info);
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return -EINVAL;
+
+ /* ieee defines the selector field value for ethertype to be 1 */
+ ethtype = !!((app->selector - 1) == DCB_APP_IDTYPE_ETHTYPE);
+ for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) {
+ entry = &dcbx_set.config.params.app_entry[i];
+ if ((entry->ethtype == ethtype) &&
+ (entry->proto_id == app->protocol))
+ break;
+ /* First empty slot */
+ if (!entry->proto_id) {
+ dcbx_set.config.params.num_app_entries++;
+ break;
+ }
+ }
+
+ if (i == QED_DCBX_MAX_APP_PROTOCOL) {
+ DP_ERR(cdev, "App table is full\n");
+ return -EBUSY;
+ }
+
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_APP_CFG;
+ dcbx_set.config.params.app_entry[i].ethtype = ethtype;
+ dcbx_set.config.params.app_entry[i].proto_id = app->protocol;
+ dcbx_set.config.params.app_entry[i].prio = BIT(app->priority);
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EBUSY;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
+const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass = {
+ .getstate = qed_dcbnl_getstate,
+ .setstate = qed_dcbnl_setstate,
+ .getpgtccfgtx = qed_dcbnl_getpgtccfgtx,
+ .getpgbwgcfgtx = qed_dcbnl_getpgbwgcfgtx,
+ .getpgtccfgrx = qed_dcbnl_getpgtccfgrx,
+ .getpgbwgcfgrx = qed_dcbnl_getpgbwgcfgrx,
+ .getpfccfg = qed_dcbnl_getpfccfg,
+ .setpfccfg = qed_dcbnl_setpfccfg,
+ .getcap = qed_dcbnl_getcap,
+ .getnumtcs = qed_dcbnl_getnumtcs,
+ .getpfcstate = qed_dcbnl_getpfcstate,
+ .getdcbx = qed_dcbnl_getdcbx,
+ .setpgtccfgtx = qed_dcbnl_setpgtccfgtx,
+ .setpgtccfgrx = qed_dcbnl_setpgtccfgrx,
+ .setpgbwgcfgtx = qed_dcbnl_setpgbwgcfgtx,
+ .setpgbwgcfgrx = qed_dcbnl_setpgbwgcfgrx,
+ .setall = qed_dcbnl_setall,
+ .setnumtcs = qed_dcbnl_setnumtcs,
+ .setpfcstate = qed_dcbnl_setpfcstate,
+ .setapp = qed_dcbnl_setapp,
+ .setdcbx = qed_dcbnl_setdcbx,
+ .setfeatcfg = qed_dcbnl_setfeatcfg,
+ .getfeatcfg = qed_dcbnl_getfeatcfg,
+ .getapp = qed_dcbnl_getapp,
+ .peer_getappinfo = qed_dcbnl_peer_getappinfo,
+ .peer_getapptable = qed_dcbnl_peer_getapptable,
+ .cee_peer_getpfc = qed_dcbnl_cee_peer_getpfc,
+ .cee_peer_getpg = qed_dcbnl_cee_peer_getpg,
+ .ieee_getpfc = qed_dcbnl_ieee_getpfc,
+ .ieee_setpfc = qed_dcbnl_ieee_setpfc,
+ .ieee_getets = qed_dcbnl_ieee_getets,
+ .ieee_setets = qed_dcbnl_ieee_setets,
+ .ieee_peer_getpfc = qed_dcbnl_ieee_peer_getpfc,
+ .ieee_peer_getets = qed_dcbnl_ieee_peer_getets,
+ .ieee_getapp = qed_dcbnl_ieee_getapp,
+ .ieee_setapp = qed_dcbnl_ieee_setapp,
+};
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
index e7f834dbd..9ba681643 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
@@ -33,6 +33,24 @@ struct qed_dcbx_app_data {
u8 tc; /* Traffic Class */
};
+#ifdef CONFIG_DCB
+#define QED_DCBX_VERSION_DISABLED 0
+#define QED_DCBX_VERSION_IEEE 1
+#define QED_DCBX_VERSION_CEE 2
+
+struct qed_dcbx_set {
+#define QED_DCBX_OVERRIDE_STATE BIT(0)
+#define QED_DCBX_OVERRIDE_PFC_CFG BIT(1)
+#define QED_DCBX_OVERRIDE_ETS_CFG BIT(2)
+#define QED_DCBX_OVERRIDE_APP_CFG BIT(3)
+#define QED_DCBX_OVERRIDE_DSCP_CFG BIT(4)
+ u32 override_flags;
+ bool enabled;
+ struct qed_dcbx_admin_params config;
+ u32 ver_num;
+};
+#endif
+
struct qed_dcbx_results {
bool dcbx_enabled;
u8 pf_id;
@@ -55,6 +73,9 @@ struct qed_dcbx_info {
struct qed_dcbx_results results;
struct dcbx_mib operational;
struct dcbx_mib remote;
+#ifdef CONFIG_DCB
+ struct qed_dcbx_set set;
+#endif
u8 dcbx_cap;
};
@@ -67,6 +88,13 @@ struct qed_dcbx_mib_meta_data {
u32 addr;
};
+#ifdef CONFIG_DCB
+int qed_dcbx_get_config_params(struct qed_hwfn *, struct qed_dcbx_set *);
+
+int qed_dcbx_config_params(struct qed_hwfn *,
+ struct qed_ptt *, struct qed_dcbx_set *, bool);
+#endif
+
/* QED local interface routines */
int
qed_dcbx_mib_update_event(struct qed_hwfn *,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 2d89e8c16..0e4f4a930 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -17,6 +17,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/vmalloc.h>
#include <linux/etherdevice.h>
#include <linux/qed/qed_chain.h>
#include <linux/qed/qed_if.h>
@@ -160,9 +161,13 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0;
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
struct init_qm_port_params *p_qm_port;
+ bool init_rdma_offload_pq = false;
+ bool init_pure_ack_pq = false;
+ bool init_ooo_pq = false;
u16 num_pqs, multi_cos_tcs = 1;
u8 pf_wfq = qm_info->pf_wfq;
u32 pf_rl = qm_info->pf_rl;
+ u16 num_pf_rls = 0;
u16 num_vfs = 0;
#ifdef CONFIG_QED_SRIOV
@@ -174,6 +179,25 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
num_pqs = multi_cos_tcs + num_vfs + 1; /* The '1' is for pure-LB */
num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT);
+ if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
+ num_pqs++; /* for RoCE queue */
+ init_rdma_offload_pq = true;
+ /* we subtract num_vfs because each require a rate limiter,
+ * and one default rate limiter
+ */
+ if (p_hwfn->pf_params.rdma_pf_params.enable_dcqcn)
+ num_pf_rls = RESC_NUM(p_hwfn, QED_RL) - num_vfs - 1;
+
+ num_pqs += num_pf_rls;
+ qm_info->num_pf_rls = (u8) num_pf_rls;
+ }
+
+ if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
+ num_pqs += 2; /* for iSCSI pure-ACK / OOO queue */
+ init_pure_ack_pq = true;
+ init_ooo_pq = true;
+ }
+
/* Sanity checking that setup requires legal number of resources */
if (num_pqs > RESC_NUM(p_hwfn, QED_PQ)) {
DP_ERR(p_hwfn,
@@ -211,12 +235,22 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
+ /* First init rate limited queues */
+ for (curr_queue = 0; curr_queue < num_pf_rls; curr_queue++) {
+ qm_info->qm_pq_params[curr_queue].vport_id = vport_id++;
+ qm_info->qm_pq_params[curr_queue].tc_id =
+ p_hwfn->hw_info.non_offload_tc;
+ qm_info->qm_pq_params[curr_queue].wrr_group = 1;
+ qm_info->qm_pq_params[curr_queue].rl_valid = 1;
+ }
+
/* First init per-TC PQs */
for (i = 0; i < multi_cos_tcs; i++) {
struct init_qm_pq_params *params =
&qm_info->qm_pq_params[curr_queue++];
- if (p_hwfn->hw_info.personality == QED_PCI_ETH) {
+ if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE ||
+ p_hwfn->hw_info.personality == QED_PCI_ETH) {
params->vport_id = vport_id;
params->tc_id = p_hwfn->hw_info.non_offload_tc;
params->wrr_group = 1;
@@ -236,6 +270,32 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
curr_queue++;
qm_info->offload_pq = 0;
+ if (init_rdma_offload_pq) {
+ qm_info->offload_pq = curr_queue;
+ qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
+ qm_info->qm_pq_params[curr_queue].tc_id =
+ p_hwfn->hw_info.offload_tc;
+ qm_info->qm_pq_params[curr_queue].wrr_group = 1;
+ curr_queue++;
+ }
+
+ if (init_pure_ack_pq) {
+ qm_info->pure_ack_pq = curr_queue;
+ qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
+ qm_info->qm_pq_params[curr_queue].tc_id =
+ p_hwfn->hw_info.offload_tc;
+ qm_info->qm_pq_params[curr_queue].wrr_group = 1;
+ curr_queue++;
+ }
+
+ if (init_ooo_pq) {
+ qm_info->ooo_pq = curr_queue;
+ qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
+ qm_info->qm_pq_params[curr_queue].tc_id = DCBX_ISCSI_OOO_TC;
+ qm_info->qm_pq_params[curr_queue].wrr_group = 1;
+ curr_queue++;
+ }
+
/* Then init per-VF PQs */
vf_offset = curr_queue;
for (i = 0; i < num_vfs; i++) {
@@ -244,6 +304,7 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
qm_info->qm_pq_params[curr_queue].tc_id =
p_hwfn->hw_info.non_offload_tc;
qm_info->qm_pq_params[curr_queue].wrr_group = 1;
+ qm_info->qm_pq_params[curr_queue].rl_valid = 1;
curr_queue++;
}
@@ -256,7 +317,10 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
for (i = 0; i < num_ports; i++) {
p_qm_port = &qm_info->qm_port_params[i];
p_qm_port->active = 1;
- p_qm_port->num_active_phys_tcs = 4;
+ if (num_ports == 4)
+ p_qm_port->active_phys_tcs = 0x7;
+ else
+ p_qm_port->active_phys_tcs = 0x9f;
p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
}
@@ -366,21 +430,20 @@ int qed_resc_alloc(struct qed_dev *cdev)
if (!p_hwfn->p_tx_cids) {
DP_NOTICE(p_hwfn,
"Failed to allocate memory for Tx Cids\n");
- rc = -ENOMEM;
- goto alloc_err;
+ goto alloc_no_mem;
}
p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL);
if (!p_hwfn->p_rx_cids) {
DP_NOTICE(p_hwfn,
"Failed to allocate memory for Rx Cids\n");
- rc = -ENOMEM;
- goto alloc_err;
+ goto alloc_no_mem;
}
}
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ u32 n_eqes, num_cons;
/* First allocate the context manager structure */
rc = qed_cxt_mngr_alloc(p_hwfn);
@@ -429,18 +492,35 @@ int qed_resc_alloc(struct qed_dev *cdev)
goto alloc_err;
/* EQ */
- p_eq = qed_eq_alloc(p_hwfn, 256);
- if (!p_eq) {
- rc = -ENOMEM;
+ n_eqes = qed_chain_get_capacity(&p_hwfn->p_spq->chain);
+ if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
+ num_cons = qed_cxt_get_proto_cid_count(p_hwfn,
+ PROTOCOLID_ROCE,
+ 0) * 2;
+ n_eqes += num_cons + 2 * MAX_NUM_VFS_BB;
+ } else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
+ num_cons =
+ qed_cxt_get_proto_cid_count(p_hwfn,
+ PROTOCOLID_ISCSI, 0);
+ n_eqes += 2 * num_cons;
+ }
+
+ if (n_eqes > 0xFFFF) {
+ DP_ERR(p_hwfn,
+ "Cannot allocate 0x%x EQ elements. The maximum of a u16 chain is 0x%x\n",
+ n_eqes, 0xFFFF);
+ rc = -EINVAL;
goto alloc_err;
}
+
+ p_eq = qed_eq_alloc(p_hwfn, (u16) n_eqes);
+ if (!p_eq)
+ goto alloc_no_mem;
p_hwfn->p_eq = p_eq;
p_consq = qed_consq_alloc(p_hwfn);
- if (!p_consq) {
- rc = -ENOMEM;
- goto alloc_err;
- }
+ if (!p_consq)
+ goto alloc_no_mem;
p_hwfn->p_consq = p_consq;
/* DMA info initialization */
@@ -469,6 +549,8 @@ int qed_resc_alloc(struct qed_dev *cdev)
return 0;
+alloc_no_mem:
+ rc = -ENOMEM;
alloc_err:
qed_resc_free(cdev);
return rc;
@@ -634,6 +716,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
struct qed_qm_common_rt_init_params params;
struct qed_dev *cdev = p_hwfn->cdev;
+ u16 num_pfs, pf_id;
u32 concrete_fid;
int rc = 0;
u8 vf_id;
@@ -682,9 +765,16 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
- /* Disable relaxed ordering in the PCI config space */
- qed_wr(p_hwfn, p_ptt, 0x20b4,
- qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10);
+ if (QED_IS_BB(p_hwfn->cdev)) {
+ num_pfs = NUM_OF_ENG_PFS(p_hwfn->cdev);
+ for (pf_id = 0; pf_id < num_pfs; pf_id++) {
+ qed_fid_pretend(p_hwfn, p_ptt, pf_id);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
+ }
+ /* pretend to original PF */
+ qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+ }
for (vf_id = 0; vf_id < MAX_NUM_VFS_BB; vf_id++) {
concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id);
@@ -703,8 +793,31 @@ static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
{
int rc = 0;
- rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
- hw_mode);
+ rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, hw_mode);
+ if (rc != 0)
+ return rc;
+
+ if (hw_mode & (1 << MODE_MF_SI)) {
+ u8 pf_id = 0;
+
+ if (!qed_hw_init_first_eth(p_hwfn, p_ptt, &pf_id)) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
+ "PF[%08x] is first eth on engine\n", pf_id);
+
+ /* We should have configured BIT for ppfid, i.e., the
+ * relative function number in the port. But there's a
+ * bug in LLH in BB where the ppfid is actually engine
+ * based, so we need to take this into account.
+ */
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR, 1 << pf_id);
+ }
+
+ /* Take the protocol-based hit vector if there is a hit,
+ * otherwise take the other vector.
+ */
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_CLS_TYPE_DUALMODE, 0x2);
+ }
return rc;
}
@@ -751,7 +864,8 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
}
/* Protocl Configuration */
- STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 0);
+ STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET,
+ (p_hwfn->hw_info.personality == QED_PCI_ISCSI) ? 1 : 0);
STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 0);
STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
@@ -773,6 +887,21 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
/* Pure runtime initializations - directly to the HW */
qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
+ if (hw_mode & (1 << MODE_MF_SI)) {
+ u8 pf_id = 0;
+ u32 val = 0;
+
+ if (!qed_hw_init_first_eth(p_hwfn, p_ptt, &pf_id)) {
+ if (p_hwfn->rel_pf_id == pf_id) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
+ "PF[%d] is first ETH on engine\n",
+ pf_id);
+ val = 1;
+ }
+ qed_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, val);
+ }
+ }
+
if (b_hw_start) {
/* enable interrupts */
qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
@@ -1213,8 +1342,9 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
num_features);
}
-static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
+static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
{
+ u8 enabled_func_idx = p_hwfn->enabled_func_idx;
u32 *resc_start = p_hwfn->hw_info.resc_start;
u8 num_funcs = p_hwfn->num_funcs_on_engine;
u32 *resc_num = p_hwfn->hw_info.resc_num;
@@ -1238,14 +1368,22 @@ static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs;
resc_num[QED_RSS_ENG] = ETH_RSS_ENGINE_NUM_BB / num_funcs;
resc_num[QED_PQ] = MAX_QM_TX_QUEUES_BB / num_funcs;
- resc_num[QED_RL] = 8;
+ resc_num[QED_RL] = min_t(u32, 64, resc_num[QED_VPORT]);
resc_num[QED_MAC] = ETH_NUM_MAC_FILTERS / num_funcs;
resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) /
num_funcs;
- resc_num[QED_ILT] = 950;
+ resc_num[QED_ILT] = PXP_NUM_ILT_RECORDS_BB / num_funcs;
for (i = 0; i < QED_MAX_RESC; i++)
- resc_start[i] = resc_num[i] * p_hwfn->rel_pf_id;
+ resc_start[i] = resc_num[i] * enabled_func_idx;
+
+ /* Sanity for ILT */
+ if (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB) {
+ DP_NOTICE(p_hwfn, "Can't assign ILT pages [%08x,...,%08x]\n",
+ RESC_START(p_hwfn, QED_ILT),
+ RESC_END(p_hwfn, QED_ILT) - 1);
+ return -EINVAL;
+ }
qed_hw_set_feat(p_hwfn);
@@ -1275,6 +1413,8 @@ static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
p_hwfn->hw_info.resc_start[QED_VLAN],
p_hwfn->hw_info.resc_num[QED_ILT],
p_hwfn->hw_info.resc_start[QED_ILT]);
+
+ return 0;
}
static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
@@ -1304,31 +1444,31 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
- case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G;
break;
- case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G;
break;
- case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G;
break;
- case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F;
break;
- case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E;
break;
- case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G;
break;
- case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G;
break;
- case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G;
break;
- case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
break;
default:
@@ -1373,7 +1513,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
link->speed.forced_speed = 50000;
break;
- case NVM_CFG1_PORT_DRV_LINK_SPEED_100G:
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G:
link->speed.forced_speed = 100000;
break;
default:
@@ -1429,14 +1569,20 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
__set_bit(QED_DEV_CAP_ETH,
&p_hwfn->hw_info.device_capabilities);
+ if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI)
+ __set_bit(QED_DEV_CAP_ISCSI,
+ &p_hwfn->hw_info.device_capabilities);
+ if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE)
+ __set_bit(QED_DEV_CAP_ROCE,
+ &p_hwfn->hw_info.device_capabilities);
return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
}
static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
- u32 reg_function_hide, tmp, eng_mask;
- u8 num_funcs;
+ u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id;
+ u32 reg_function_hide, tmp, eng_mask, low_pfs_mask;
num_funcs = MAX_NUM_PFS_BB;
@@ -1466,9 +1612,19 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
num_funcs++;
tmp >>= 0x1;
}
+
+ /* Get the PF index within the enabled functions */
+ low_pfs_mask = (0x1 << p_hwfn->abs_pf_id) - 1;
+ tmp = reg_function_hide & eng_mask & low_pfs_mask;
+ while (tmp) {
+ if (tmp & 0x1)
+ enabled_func_idx--;
+ tmp >>= 0x1;
+ }
}
p_hwfn->num_funcs_on_engine = num_funcs;
+ p_hwfn->enabled_func_idx = enabled_func_idx;
DP_VERBOSE(p_hwfn,
NETIF_MSG_PROBE,
@@ -1538,9 +1694,7 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
qed_get_num_funcs(p_hwfn, p_ptt);
- qed_hw_get_resc(p_hwfn);
-
- return rc;
+ return qed_hw_get_resc(p_hwfn);
}
static int qed_get_dev_info(struct qed_dev *cdev)
@@ -1737,92 +1891,285 @@ void qed_hw_remove(struct qed_dev *cdev)
qed_iov_free_hw_info(cdev);
}
-int qed_chain_alloc(struct qed_dev *cdev,
- enum qed_chain_use_mode intended_use,
- enum qed_chain_mode mode,
- u16 num_elems,
- size_t elem_size,
- struct qed_chain *p_chain)
+static void qed_chain_free_next_ptr(struct qed_dev *cdev,
+ struct qed_chain *p_chain)
+{
+ void *p_virt = p_chain->p_virt_addr, *p_virt_next = NULL;
+ dma_addr_t p_phys = p_chain->p_phys_addr, p_phys_next = 0;
+ struct qed_chain_next *p_next;
+ u32 size, i;
+
+ if (!p_virt)
+ return;
+
+ size = p_chain->elem_size * p_chain->usable_per_page;
+
+ for (i = 0; i < p_chain->page_cnt; i++) {
+ if (!p_virt)
+ break;
+
+ p_next = (struct qed_chain_next *)((u8 *)p_virt + size);
+ p_virt_next = p_next->next_virt;
+ p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys);
+
+ dma_free_coherent(&cdev->pdev->dev,
+ QED_CHAIN_PAGE_SIZE, p_virt, p_phys);
+
+ p_virt = p_virt_next;
+ p_phys = p_phys_next;
+ }
+}
+
+static void qed_chain_free_single(struct qed_dev *cdev,
+ struct qed_chain *p_chain)
{
- dma_addr_t p_pbl_phys = 0;
- void *p_pbl_virt = NULL;
+ if (!p_chain->p_virt_addr)
+ return;
+
+ dma_free_coherent(&cdev->pdev->dev,
+ QED_CHAIN_PAGE_SIZE,
+ p_chain->p_virt_addr, p_chain->p_phys_addr);
+}
+
+static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
+{
+ void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl;
+ u32 page_cnt = p_chain->page_cnt, i, pbl_size;
+ u8 *p_pbl_virt = p_chain->pbl.p_virt_table;
+
+ if (!pp_virt_addr_tbl)
+ return;
+
+ if (!p_chain->pbl.p_virt_table)
+ goto out;
+
+ for (i = 0; i < page_cnt; i++) {
+ if (!pp_virt_addr_tbl[i])
+ break;
+
+ dma_free_coherent(&cdev->pdev->dev,
+ QED_CHAIN_PAGE_SIZE,
+ pp_virt_addr_tbl[i],
+ *(dma_addr_t *)p_pbl_virt);
+
+ p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE;
+ }
+
+ pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
+ dma_free_coherent(&cdev->pdev->dev,
+ pbl_size,
+ p_chain->pbl.p_virt_table, p_chain->pbl.p_phys_table);
+out:
+ vfree(p_chain->pbl.pp_virt_addr_tbl);
+}
+
+void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain)
+{
+ switch (p_chain->mode) {
+ case QED_CHAIN_MODE_NEXT_PTR:
+ qed_chain_free_next_ptr(cdev, p_chain);
+ break;
+ case QED_CHAIN_MODE_SINGLE:
+ qed_chain_free_single(cdev, p_chain);
+ break;
+ case QED_CHAIN_MODE_PBL:
+ qed_chain_free_pbl(cdev, p_chain);
+ break;
+ }
+}
+
+static int
+qed_chain_alloc_sanity_check(struct qed_dev *cdev,
+ enum qed_chain_cnt_type cnt_type,
+ size_t elem_size, u32 page_cnt)
+{
+ u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt;
+
+ /* The actual chain size can be larger than the maximal possible value
+ * after rounding up the requested elements number to pages, and after
+ * taking into acount the unusuable elements (next-ptr elements).
+ * The size of a "u16" chain can be (U16_MAX + 1) since the chain
+ * size/capacity fields are of a u32 type.
+ */
+ if ((cnt_type == QED_CHAIN_CNT_TYPE_U16 &&
+ chain_size > 0x10000) ||
+ (cnt_type == QED_CHAIN_CNT_TYPE_U32 &&
+ chain_size > 0x100000000ULL)) {
+ DP_NOTICE(cdev,
+ "The actual chain size (0x%llx) is larger than the maximal possible value\n",
+ chain_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+qed_chain_alloc_next_ptr(struct qed_dev *cdev, struct qed_chain *p_chain)
+{
+ void *p_virt = NULL, *p_virt_prev = NULL;
dma_addr_t p_phys = 0;
- void *p_virt = NULL;
- u16 page_cnt = 0;
- size_t size;
+ u32 i;
- if (mode == QED_CHAIN_MODE_SINGLE)
- page_cnt = 1;
- else
- page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
+ for (i = 0; i < p_chain->page_cnt; i++) {
+ p_virt = dma_alloc_coherent(&cdev->pdev->dev,
+ QED_CHAIN_PAGE_SIZE,
+ &p_phys, GFP_KERNEL);
+ if (!p_virt) {
+ DP_NOTICE(cdev, "Failed to allocate chain memory\n");
+ return -ENOMEM;
+ }
+
+ if (i == 0) {
+ qed_chain_init_mem(p_chain, p_virt, p_phys);
+ qed_chain_reset(p_chain);
+ } else {
+ qed_chain_init_next_ptr_elem(p_chain, p_virt_prev,
+ p_virt, p_phys);
+ }
+
+ p_virt_prev = p_virt;
+ }
+ /* Last page's next element should point to the beginning of the
+ * chain.
+ */
+ qed_chain_init_next_ptr_elem(p_chain, p_virt_prev,
+ p_chain->p_virt_addr,
+ p_chain->p_phys_addr);
+
+ return 0;
+}
+
+static int
+qed_chain_alloc_single(struct qed_dev *cdev, struct qed_chain *p_chain)
+{
+ dma_addr_t p_phys = 0;
+ void *p_virt = NULL;
- size = page_cnt * QED_CHAIN_PAGE_SIZE;
p_virt = dma_alloc_coherent(&cdev->pdev->dev,
- size, &p_phys, GFP_KERNEL);
+ QED_CHAIN_PAGE_SIZE, &p_phys, GFP_KERNEL);
if (!p_virt) {
- DP_NOTICE(cdev, "Failed to allocate chain mem\n");
- goto nomem;
+ DP_NOTICE(cdev, "Failed to allocate chain memory\n");
+ return -ENOMEM;
}
- if (mode == QED_CHAIN_MODE_PBL) {
- size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
- p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
- size, &p_pbl_phys,
- GFP_KERNEL);
- if (!p_pbl_virt) {
- DP_NOTICE(cdev, "Failed to allocate chain pbl mem\n");
- goto nomem;
- }
+ qed_chain_init_mem(p_chain, p_virt, p_phys);
+ qed_chain_reset(p_chain);
- qed_chain_pbl_init(p_chain, p_virt, p_phys, page_cnt,
- (u8)elem_size, intended_use,
- p_pbl_phys, p_pbl_virt);
- } else {
- qed_chain_init(p_chain, p_virt, p_phys, page_cnt,
- (u8)elem_size, intended_use, mode);
+ return 0;
+}
+
+static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
+{
+ u32 page_cnt = p_chain->page_cnt, size, i;
+ dma_addr_t p_phys = 0, p_pbl_phys = 0;
+ void **pp_virt_addr_tbl = NULL;
+ u8 *p_pbl_virt = NULL;
+ void *p_virt = NULL;
+
+ size = page_cnt * sizeof(*pp_virt_addr_tbl);
+ pp_virt_addr_tbl = vmalloc(size);
+ if (!pp_virt_addr_tbl) {
+ DP_NOTICE(cdev,
+ "Failed to allocate memory for the chain virtual addresses table\n");
+ return -ENOMEM;
}
+ memset(pp_virt_addr_tbl, 0, size);
- return 0;
+ /* The allocation of the PBL table is done with its full size, since it
+ * is expected to be successive.
+ * qed_chain_init_pbl_mem() is called even in a case of an allocation
+ * failure, since pp_virt_addr_tbl was previously allocated, and it
+ * should be saved to allow its freeing during the error flow.
+ */
+ size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
+ p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
+ size, &p_pbl_phys, GFP_KERNEL);
+ qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
+ pp_virt_addr_tbl);
+ if (!p_pbl_virt) {
+ DP_NOTICE(cdev, "Failed to allocate chain pbl memory\n");
+ return -ENOMEM;
+ }
-nomem:
- dma_free_coherent(&cdev->pdev->dev,
- page_cnt * QED_CHAIN_PAGE_SIZE,
- p_virt, p_phys);
- dma_free_coherent(&cdev->pdev->dev,
- page_cnt * QED_CHAIN_PBL_ENTRY_SIZE,
- p_pbl_virt, p_pbl_phys);
+ for (i = 0; i < page_cnt; i++) {
+ p_virt = dma_alloc_coherent(&cdev->pdev->dev,
+ QED_CHAIN_PAGE_SIZE,
+ &p_phys, GFP_KERNEL);
+ if (!p_virt) {
+ DP_NOTICE(cdev, "Failed to allocate chain memory\n");
+ return -ENOMEM;
+ }
- return -ENOMEM;
+ if (i == 0) {
+ qed_chain_init_mem(p_chain, p_virt, p_phys);
+ qed_chain_reset(p_chain);
+ }
+
+ /* Fill the PBL table with the physical address of the page */
+ *(dma_addr_t *)p_pbl_virt = p_phys;
+ /* Keep the virtual address of the page */
+ p_chain->pbl.pp_virt_addr_tbl[i] = p_virt;
+
+ p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE;
+ }
+
+ return 0;
}
-void qed_chain_free(struct qed_dev *cdev,
- struct qed_chain *p_chain)
+int qed_chain_alloc(struct qed_dev *cdev,
+ enum qed_chain_use_mode intended_use,
+ enum qed_chain_mode mode,
+ enum qed_chain_cnt_type cnt_type,
+ u32 num_elems, size_t elem_size, struct qed_chain *p_chain)
{
- size_t size;
+ u32 page_cnt;
+ int rc = 0;
- if (!p_chain->p_virt_addr)
- return;
+ if (mode == QED_CHAIN_MODE_SINGLE)
+ page_cnt = 1;
+ else
+ page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
- if (p_chain->mode == QED_CHAIN_MODE_PBL) {
- size = p_chain->page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
- dma_free_coherent(&cdev->pdev->dev, size,
- p_chain->pbl.p_virt_table,
- p_chain->pbl.p_phys_table);
+ rc = qed_chain_alloc_sanity_check(cdev, cnt_type, elem_size, page_cnt);
+ if (rc) {
+ DP_NOTICE(cdev,
+ "Cannot allocate a chain with the given arguments:\n"
+ "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n",
+ intended_use, mode, cnt_type, num_elems, elem_size);
+ return rc;
}
- size = p_chain->page_cnt * QED_CHAIN_PAGE_SIZE;
- dma_free_coherent(&cdev->pdev->dev, size,
- p_chain->p_virt_addr,
- p_chain->p_phys_addr);
+ qed_chain_init_params(p_chain, page_cnt, (u8) elem_size, intended_use,
+ mode, cnt_type);
+
+ switch (mode) {
+ case QED_CHAIN_MODE_NEXT_PTR:
+ rc = qed_chain_alloc_next_ptr(cdev, p_chain);
+ break;
+ case QED_CHAIN_MODE_SINGLE:
+ rc = qed_chain_alloc_single(cdev, p_chain);
+ break;
+ case QED_CHAIN_MODE_PBL:
+ rc = qed_chain_alloc_pbl(cdev, p_chain);
+ break;
+ }
+ if (rc)
+ goto nomem;
+
+ return 0;
+
+nomem:
+ qed_chain_free(cdev, p_chain);
+ return rc;
}
-int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
- u16 src_id, u16 *dst_id)
+int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id)
{
if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
u16 min, max;
- min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
+ min = (u16) RESC_START(p_hwfn, QED_L2_QUEUE);
max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE);
DP_NOTICE(p_hwfn,
"l2_queue id [%d] is not valid, available indices [%d - %d]\n",
@@ -1876,6 +2223,110 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
return 0;
}
+static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u32 hw_addr, void *p_eth_qzone,
+ size_t eth_qzone_size, u8 timeset)
+{
+ struct coalescing_timeset *p_coal_timeset;
+
+ if (p_hwfn->cdev->int_coalescing_mode != QED_COAL_MODE_ENABLE) {
+ DP_NOTICE(p_hwfn, "Coalescing configuration not enabled\n");
+ return -EINVAL;
+ }
+
+ p_coal_timeset = p_eth_qzone;
+ memset(p_coal_timeset, 0, eth_qzone_size);
+ SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset);
+ SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1);
+ qed_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size);
+
+ return 0;
+}
+
+int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 coalesce, u8 qid, u16 sb_id)
+{
+ struct ustorm_eth_queue_zone eth_qzone;
+ u8 timeset, timer_res;
+ u16 fw_qid = 0;
+ u32 address;
+ int rc;
+
+ /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
+ if (coalesce <= 0x7F) {
+ timer_res = 0;
+ } else if (coalesce <= 0xFF) {
+ timer_res = 1;
+ } else if (coalesce <= 0x1FF) {
+ timer_res = 2;
+ } else {
+ DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce);
+ return -EINVAL;
+ }
+ timeset = (u8)(coalesce >> timer_res);
+
+ rc = qed_fw_l2_queue(p_hwfn, (u16)qid, &fw_qid);
+ if (rc)
+ return rc;
+
+ rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, sb_id, false);
+ if (rc)
+ goto out;
+
+ address = BAR0_MAP_REG_USDM_RAM + USTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid);
+
+ rc = qed_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
+ sizeof(struct ustorm_eth_queue_zone), timeset);
+ if (rc)
+ goto out;
+
+ p_hwfn->cdev->rx_coalesce_usecs = coalesce;
+out:
+ return rc;
+}
+
+int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 coalesce, u8 qid, u16 sb_id)
+{
+ struct xstorm_eth_queue_zone eth_qzone;
+ u8 timeset, timer_res;
+ u16 fw_qid = 0;
+ u32 address;
+ int rc;
+
+ /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
+ if (coalesce <= 0x7F) {
+ timer_res = 0;
+ } else if (coalesce <= 0xFF) {
+ timer_res = 1;
+ } else if (coalesce <= 0x1FF) {
+ timer_res = 2;
+ } else {
+ DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce);
+ return -EINVAL;
+ }
+ timeset = (u8)(coalesce >> timer_res);
+
+ rc = qed_fw_l2_queue(p_hwfn, (u16)qid, &fw_qid);
+ if (rc)
+ return rc;
+
+ rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, sb_id, true);
+ if (rc)
+ goto out;
+
+ address = BAR0_MAP_REG_XSDM_RAM + XSTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid);
+
+ rc = qed_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
+ sizeof(struct xstorm_eth_queue_zone), timeset);
+ if (rc)
+ goto out;
+
+ p_hwfn->cdev->tx_coalesce_usecs = coalesce;
+out:
+ return rc;
+}
+
/* Calculate final WFQ values for all vports and configure them.
* After this configuration each vport will have
* approx min rate = min_pf_rate * (vport_wfq / QED_WFQ_UNIT)
@@ -2089,7 +2540,7 @@ int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate)
rc = __qed_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate);
- if (!rc) {
+ if (rc) {
qed_ptt_release(p_hwfn, p_ptt);
return rc;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
index dde364d6f..343bb0344 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -212,6 +212,20 @@ qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
u32 size_in_dwords,
u32 flags);
+ /**
+ * @brief qed_dmae_grc2host - Read data from dmae data offset
+ * to source address using the given ptt
+ *
+ * @param p_ptt
+ * @param grc_addr (dmae_data_offset)
+ * @param dest_addr
+ * @param size_in_dwords
+ * @param flags - one of the flags defined above
+ */
+int qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u32 grc_addr, dma_addr_t dest_addr, u32 size_in_dwords,
+ u32 flags);
+
/**
* @brief qed_dmae_host2host - copy data from to source address
* to a destination adress (for SRIOV) using the given ptt
@@ -245,9 +259,8 @@ int
qed_chain_alloc(struct qed_dev *cdev,
enum qed_chain_use_mode intended_use,
enum qed_chain_mode mode,
- u16 num_elems,
- size_t elem_size,
- struct qed_chain *p_chain);
+ enum qed_chain_cnt_type cnt_type,
+ u32 num_elems, size_t elem_size, struct qed_chain *p_chain);
/**
* @brief qed_chain_free - Free chain DMA memory
@@ -255,8 +268,7 @@ qed_chain_alloc(struct qed_dev *cdev,
* @param p_hwfn
* @param p_chain
*/
-void qed_chain_free(struct qed_dev *cdev,
- struct qed_chain *p_chain);
+void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain);
/**
* @@brief qed_fw_l2_queue - Get absolute L2 queue ID
@@ -310,4 +322,37 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
int qed_final_cleanup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 id, bool is_vf);
+/**
+ * @brief qed_set_rxq_coalesce - Configure coalesce parameters for an Rx queue
+ * The fact that we can configure coalescing to up to 511, but on varying
+ * accuracy [the bigger the value the less accurate] up to a mistake of 3usec
+ * for the highest values.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param coalesce - Coalesce value in micro seconds.
+ * @param qid - Queue index.
+ * @param qid - SB Id
+ *
+ * @return int
+ */
+int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 coalesce, u8 qid, u16 sb_id);
+
+/**
+ * @brief qed_set_txq_coalesce - Configure coalesce parameters for a Tx queue
+ * While the API allows setting coalescing per-qid, all tx queues sharing a
+ * SB should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
+ * otherwise configuration would break.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param coalesce - Coalesce value in micro seconds.
+ * @param qid - Queue index.
+ * @param qid - SB Id
+ *
+ * @return int
+ */
+int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 coalesce, u8 qid, u16 sb_id);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index e29ed5a69..6f9d3b831 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -17,13 +17,15 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/qed/common_hsi.h>
+#include <linux/qed/storage_common.h>
+#include <linux/qed/tcp_common.h>
#include <linux/qed/eth_common.h>
+#include <linux/qed/iscsi_common.h>
+#include <linux/qed/rdma_common.h>
+#include <linux/qed/roce_common.h>
struct qed_hwfn;
struct qed_ptt;
-/********************************/
-/* Add include to common target */
-/********************************/
/* opcodes for the event ring */
enum common_event_opcode {
@@ -32,9 +34,10 @@ enum common_event_opcode {
COMMON_EVENT_VF_START,
COMMON_EVENT_VF_STOP,
COMMON_EVENT_VF_PF_CHANNEL,
- COMMON_EVENT_RESERVED4,
- COMMON_EVENT_RESERVED5,
- COMMON_EVENT_RESERVED6,
+ COMMON_EVENT_VF_FLR,
+ COMMON_EVENT_PF_UPDATE,
+ COMMON_EVENT_MALICIOUS_VF,
+ COMMON_EVENT_RL_UPDATE,
COMMON_EVENT_EMPTY,
MAX_COMMON_EVENT_OPCODE
};
@@ -42,11 +45,12 @@ enum common_event_opcode {
/* Common Ramrod Command IDs */
enum common_ramrod_cmd_id {
COMMON_RAMROD_UNUSED,
- COMMON_RAMROD_PF_START /* PF Function Start Ramrod */,
- COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */,
+ COMMON_RAMROD_PF_START,
+ COMMON_RAMROD_PF_STOP,
COMMON_RAMROD_VF_START,
COMMON_RAMROD_VF_STOP,
COMMON_RAMROD_PF_UPDATE,
+ COMMON_RAMROD_RL_UPDATE,
COMMON_RAMROD_EMPTY,
MAX_COMMON_RAMROD_CMD_ID
};
@@ -63,448 +67,448 @@ struct pstorm_core_conn_st_ctx {
/* Core Slowpath Connection storm context of Xstorm */
struct xstorm_core_conn_st_ctx {
- __le32 spq_base_lo /* SPQ Ring Base Address low dword */;
- __le32 spq_base_hi /* SPQ Ring Base Address high dword */;
- struct regpair consolid_base_addr;
- __le16 spq_cons /* SPQ Ring Consumer */;
- __le16 consolid_cons /* Consolidation Ring Consumer */;
- __le32 reserved0[55] /* Pad to 15 cycles */;
+ __le32 spq_base_lo;
+ __le32 spq_base_hi;
+ struct regpair consolid_base_addr;
+ __le16 spq_cons;
+ __le16 consolid_cons;
+ __le32 reserved0[55];
};
struct xstorm_core_conn_ag_ctx {
- u8 reserved0 /* cdu_validation */;
- u8 core_state /* state */;
- u8 flags0;
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1 /* bit6 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1 /* bit7 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 reserved0;
+ u8 core_state;
+ u8 flags0;
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1 /* bit8 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1 /* bit9 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1 /* bit10 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1 /* bit11 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1 /* bit12 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1 /* bit13 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 /* bit14 */
-#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 /* bit15 */
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
u8 flags2;
-#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
-#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
-#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
-#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
-#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
-#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
-#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
-#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
-#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
-#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
-#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3 /* cf11 */
-#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3 /* cf12 */
-#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3 /* cf13 */
-#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3 /* cf14 */
-#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3 /* cf15 */
-#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3 /* cf16 */
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3 /* cf18 */
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 /* cf19 */
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
u8 flags7;
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 /* cf20 */
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3 /* cf21 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 /* cf22 */
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
-#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
-#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
-#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
-#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
-#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
-#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
-#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
-#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1 /* cf11en */
-#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1 /* cf12en */
-#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1 /* cf13en */
-#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1 /* cf14en */
-#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1 /* cf15en */
-#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1 /* cf16en */
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 /* cf18en */
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 /* cf19en */
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 /* cf20en */
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1 /* cf21en */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 /* cf22en */
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1 /* cf23en */
-#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1 /* rule0en */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1 /* rule1en */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7
u8 flags11;
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1 /* rule2en */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1 /* rule3en */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 /* rule4en */
-#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 /* rule8en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1 /* rule9en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1 /* rule10en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1 /* rule11en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 /* rule12en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 /* rule13en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1 /* rule14en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1 /* rule15en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1 /* rule16en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1 /* rule17en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1 /* rule18en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1 /* rule19en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 /* rule20en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 /* rule21en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 /* rule22en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 /* rule23en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 /* rule24en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 /* rule25en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1 /* bit16 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1 /* bit17 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1 /* bit18 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1 /* bit19 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1 /* bit20 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1 /* bit21 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3 /* cf23 */
-#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6
- u8 byte2 /* byte2 */;
- __le16 physical_q0 /* physical_q0 */;
- __le16 consolid_prod /* physical_q1 */;
- __le16 reserved16 /* physical_q2 */;
- __le16 tx_bd_cons /* word3 */;
- __le16 tx_bd_or_spq_prod /* word4 */;
- __le16 word5 /* word5 */;
- __le16 conn_dpi /* conn_dpi */;
- u8 byte3 /* byte3 */;
- u8 byte4 /* byte4 */;
- u8 byte5 /* byte5 */;
- u8 byte6 /* byte6 */;
- __le32 reg0 /* reg0 */;
- __le32 reg1 /* reg1 */;
- __le32 reg2 /* reg2 */;
- __le32 reg3 /* reg3 */;
- __le32 reg4 /* reg4 */;
- __le32 reg5 /* cf_array0 */;
- __le32 reg6 /* cf_array1 */;
- __le16 word7 /* word7 */;
- __le16 word8 /* word8 */;
- __le16 word9 /* word9 */;
- __le16 word10 /* word10 */;
- __le32 reg7 /* reg7 */;
- __le32 reg8 /* reg8 */;
- __le32 reg9 /* reg9 */;
- u8 byte7 /* byte7 */;
- u8 byte8 /* byte8 */;
- u8 byte9 /* byte9 */;
- u8 byte10 /* byte10 */;
- u8 byte11 /* byte11 */;
- u8 byte12 /* byte12 */;
- u8 byte13 /* byte13 */;
- u8 byte14 /* byte14 */;
- u8 byte15 /* byte15 */;
- u8 byte16 /* byte16 */;
- __le16 word11 /* word11 */;
- __le32 reg10 /* reg10 */;
- __le32 reg11 /* reg11 */;
- __le32 reg12 /* reg12 */;
- __le32 reg13 /* reg13 */;
- __le32 reg14 /* reg14 */;
- __le32 reg15 /* reg15 */;
- __le32 reg16 /* reg16 */;
- __le32 reg17 /* reg17 */;
- __le32 reg18 /* reg18 */;
- __le32 reg19 /* reg19 */;
- __le16 word12 /* word12 */;
- __le16 word13 /* word13 */;
- __le16 word14 /* word14 */;
- __le16 word15 /* word15 */;
+#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6
+ u8 byte2;
+ __le16 physical_q0;
+ __le16 consolid_prod;
+ __le16 reserved16;
+ __le16 tx_bd_cons;
+ __le16 tx_bd_or_spq_prod;
+ __le16 word5;
+ __le16 conn_dpi;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le16 word7;
+ __le16 word8;
+ __le16 word9;
+ __le16 word10;
+ __le32 reg7;
+ __le32 reg8;
+ __le32 reg9;
+ u8 byte7;
+ u8 byte8;
+ u8 byte9;
+ u8 byte10;
+ u8 byte11;
+ u8 byte12;
+ u8 byte13;
+ u8 byte14;
+ u8 byte15;
+ u8 byte16;
+ __le16 word11;
+ __le32 reg10;
+ __le32 reg11;
+ __le32 reg12;
+ __le32 reg13;
+ __le32 reg14;
+ __le32 reg15;
+ __le32 reg16;
+ __le32 reg17;
+ __le32 reg18;
+ __le32 reg19;
+ __le16 word12;
+ __le16 word13;
+ __le16 word14;
+ __le16 word15;
};
struct tstorm_core_conn_ag_ctx {
- u8 byte0 /* cdu_validation */;
- u8 byte1 /* state */;
- u8 flags0;
-#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
-#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
-#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
-#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
-#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
-#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
-#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
-#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
-#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
-#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
+#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
u8 flags4;
-#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
-#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
-#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
-#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
-#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
-#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
-#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
-#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
- __le32 reg0 /* reg0 */;
- __le32 reg1 /* reg1 */;
- __le32 reg2 /* reg2 */;
- __le32 reg3 /* reg3 */;
- __le32 reg4 /* reg4 */;
- __le32 reg5 /* reg5 */;
- __le32 reg6 /* reg6 */;
- __le32 reg7 /* reg7 */;
- __le32 reg8 /* reg8 */;
- u8 byte2 /* byte2 */;
- u8 byte3 /* byte3 */;
- __le16 word0 /* word0 */;
- u8 byte4 /* byte4 */;
- u8 byte5 /* byte5 */;
- __le16 word1 /* word1 */;
- __le16 word2 /* conn_dpi */;
- __le16 word3 /* word3 */;
- __le32 reg9 /* reg9 */;
- __le32 reg10 /* reg10 */;
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le32 reg7;
+ __le32 reg8;
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ u8 byte4;
+ u8 byte5;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le32 reg9;
+ __le32 reg10;
};
struct ustorm_core_conn_ag_ctx {
- u8 reserved /* cdu_validation */;
- u8 byte1 /* state */;
- u8 flags0;
-#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
-#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
-#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
-#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
-#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
-#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 reserved;
+ u8 byte1;
+ u8 flags0;
+#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
-#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
-#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
-#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
-#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
-#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
-#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
-#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
-#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
-#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
-#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
-#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
-#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
-#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
-#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
- u8 byte2 /* byte2 */;
- u8 byte3 /* byte3 */;
- __le16 word0 /* conn_dpi */;
- __le16 word1 /* word1 */;
- __le32 rx_producers /* reg0 */;
- __le32 reg1 /* reg1 */;
- __le32 reg2 /* reg2 */;
- __le32 reg3 /* reg3 */;
- __le16 word2 /* word2 */;
- __le16 word3 /* word3 */;
+#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le16 word1;
+ __le32 rx_producers;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le16 word2;
+ __le16 word3;
};
/* The core storm context for the Mstorm */
@@ -519,122 +523,186 @@ struct ustorm_core_conn_st_ctx {
/* core connection context */
struct core_conn_context {
- struct ystorm_core_conn_st_ctx ystorm_st_context;
- struct regpair ystorm_st_padding[2] /* padding */;
- struct pstorm_core_conn_st_ctx pstorm_st_context;
- struct regpair pstorm_st_padding[2];
- struct xstorm_core_conn_st_ctx xstorm_st_context;
- struct xstorm_core_conn_ag_ctx xstorm_ag_context;
- struct tstorm_core_conn_ag_ctx tstorm_ag_context;
- struct ustorm_core_conn_ag_ctx ustorm_ag_context;
- struct mstorm_core_conn_st_ctx mstorm_st_context;
- struct ustorm_core_conn_st_ctx ustorm_st_context;
- struct regpair ustorm_st_padding[2] /* padding */;
+ struct ystorm_core_conn_st_ctx ystorm_st_context;
+ struct regpair ystorm_st_padding[2];
+ struct pstorm_core_conn_st_ctx pstorm_st_context;
+ struct regpair pstorm_st_padding[2];
+ struct xstorm_core_conn_st_ctx xstorm_st_context;
+ struct xstorm_core_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_core_conn_ag_ctx tstorm_ag_context;
+ struct ustorm_core_conn_ag_ctx ustorm_ag_context;
+ struct mstorm_core_conn_st_ctx mstorm_st_context;
+ struct ustorm_core_conn_st_ctx ustorm_st_context;
+ struct regpair ustorm_st_padding[2];
+};
+
+struct eth_mstorm_per_pf_stat {
+ struct regpair gre_discard_pkts;
+ struct regpair vxlan_discard_pkts;
+ struct regpair geneve_discard_pkts;
+ struct regpair lb_discard_pkts;
};
struct eth_mstorm_per_queue_stat {
- struct regpair ttl0_discard;
- struct regpair packet_too_big_discard;
- struct regpair no_buff_discard;
- struct regpair not_active_discard;
- struct regpair tpa_coalesced_pkts;
- struct regpair tpa_coalesced_events;
- struct regpair tpa_aborts_num;
- struct regpair tpa_coalesced_bytes;
+ struct regpair ttl0_discard;
+ struct regpair packet_too_big_discard;
+ struct regpair no_buff_discard;
+ struct regpair not_active_discard;
+ struct regpair tpa_coalesced_pkts;
+ struct regpair tpa_coalesced_events;
+ struct regpair tpa_aborts_num;
+ struct regpair tpa_coalesced_bytes;
+};
+
+/* Ethernet TX Per PF */
+struct eth_pstorm_per_pf_stat {
+ struct regpair sent_lb_ucast_bytes;
+ struct regpair sent_lb_mcast_bytes;
+ struct regpair sent_lb_bcast_bytes;
+ struct regpair sent_lb_ucast_pkts;
+ struct regpair sent_lb_mcast_pkts;
+ struct regpair sent_lb_bcast_pkts;
+ struct regpair sent_gre_bytes;
+ struct regpair sent_vxlan_bytes;
+ struct regpair sent_geneve_bytes;
+ struct regpair sent_gre_pkts;
+ struct regpair sent_vxlan_pkts;
+ struct regpair sent_geneve_pkts;
+ struct regpair gre_drop_pkts;
+ struct regpair vxlan_drop_pkts;
+ struct regpair geneve_drop_pkts;
+};
+
+/* Ethernet TX Per Queue Stats */
+struct eth_pstorm_per_queue_stat {
+ struct regpair sent_ucast_bytes;
+ struct regpair sent_mcast_bytes;
+ struct regpair sent_bcast_bytes;
+ struct regpair sent_ucast_pkts;
+ struct regpair sent_mcast_pkts;
+ struct regpair sent_bcast_pkts;
+ struct regpair error_drop_pkts;
+};
+
+/* ETH Rx producers data */
+struct eth_rx_rate_limit {
+ __le16 mult;
+ __le16 cnst;
+ u8 add_sub_cnst;
+ u8 reserved0;
+ __le16 reserved1;
};
-struct eth_pstorm_per_queue_stat {
- struct regpair sent_ucast_bytes;
- struct regpair sent_mcast_bytes;
- struct regpair sent_bcast_bytes;
- struct regpair sent_ucast_pkts;
- struct regpair sent_mcast_pkts;
- struct regpair sent_bcast_pkts;
- struct regpair error_drop_pkts;
+struct eth_ustorm_per_pf_stat {
+ struct regpair rcv_lb_ucast_bytes;
+ struct regpair rcv_lb_mcast_bytes;
+ struct regpair rcv_lb_bcast_bytes;
+ struct regpair rcv_lb_ucast_pkts;
+ struct regpair rcv_lb_mcast_pkts;
+ struct regpair rcv_lb_bcast_pkts;
+ struct regpair rcv_gre_bytes;
+ struct regpair rcv_vxlan_bytes;
+ struct regpair rcv_geneve_bytes;
+ struct regpair rcv_gre_pkts;
+ struct regpair rcv_vxlan_pkts;
+ struct regpair rcv_geneve_pkts;
};
struct eth_ustorm_per_queue_stat {
- struct regpair rcv_ucast_bytes;
- struct regpair rcv_mcast_bytes;
- struct regpair rcv_bcast_bytes;
- struct regpair rcv_ucast_pkts;
- struct regpair rcv_mcast_pkts;
- struct regpair rcv_bcast_pkts;
+ struct regpair rcv_ucast_bytes;
+ struct regpair rcv_mcast_bytes;
+ struct regpair rcv_bcast_bytes;
+ struct regpair rcv_ucast_pkts;
+ struct regpair rcv_mcast_pkts;
+ struct regpair rcv_bcast_pkts;
};
/* Event Ring Next Page Address */
struct event_ring_next_addr {
- struct regpair addr /* Next Page Address */;
- __le32 reserved[2] /* Reserved */;
+ struct regpair addr;
+ __le32 reserved[2];
};
+/* Event Ring Element */
union event_ring_element {
- struct event_ring_entry entry /* Event Ring Entry */;
- struct event_ring_next_addr next_addr;
+ struct event_ring_entry entry;
+ struct event_ring_next_addr next_addr;
+};
+
+/* Major and Minor hsi Versions */
+struct hsi_fp_ver_struct {
+ u8 minor_ver_arr[2];
+ u8 major_ver_arr[2];
};
+/* Mstorm non-triggering VF zone */
struct mstorm_non_trigger_vf_zone {
struct eth_mstorm_per_queue_stat eth_queue_stat;
+ struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF];
};
+/* Mstorm VF zone */
struct mstorm_vf_zone {
struct mstorm_non_trigger_vf_zone non_trigger;
+
};
+/* personality per PF */
enum personality_type {
BAD_PERSONALITY_TYP,
- PERSONALITY_RESERVED,
+ PERSONALITY_ISCSI,
PERSONALITY_RESERVED2,
- PERSONALITY_RDMA_AND_ETH /* Roce or Iwarp */,
+ PERSONALITY_RDMA_AND_ETH,
PERSONALITY_RESERVED3,
PERSONALITY_CORE,
- PERSONALITY_ETH /* Ethernet */,
+ PERSONALITY_ETH,
PERSONALITY_RESERVED4,
MAX_PERSONALITY_TYPE
};
+/* tunnel configuration */
struct pf_start_tunnel_config {
- u8 set_vxlan_udp_port_flg;
- u8 set_geneve_udp_port_flg;
- u8 tx_enable_vxlan /* If set, enable VXLAN tunnel in TX path. */;
- u8 tx_enable_l2geneve;
- u8 tx_enable_ipgeneve;
- u8 tx_enable_l2gre /* If set, enable l2 GRE tunnel in TX path. */;
- u8 tx_enable_ipgre /* If set, enable IP GRE tunnel in TX path. */;
- u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */;
- u8 tunnel_clss_l2geneve;
- u8 tunnel_clss_ipgeneve;
- u8 tunnel_clss_l2gre;
- u8 tunnel_clss_ipgre;
- __le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */;
- __le16 geneve_udp_port /* GENEVE tunnel UDP destination port. */;
+ u8 set_vxlan_udp_port_flg;
+ u8 set_geneve_udp_port_flg;
+ u8 tx_enable_vxlan;
+ u8 tx_enable_l2geneve;
+ u8 tx_enable_ipgeneve;
+ u8 tx_enable_l2gre;
+ u8 tx_enable_ipgre;
+ u8 tunnel_clss_vxlan;
+ u8 tunnel_clss_l2geneve;
+ u8 tunnel_clss_ipgeneve;
+ u8 tunnel_clss_l2gre;
+ u8 tunnel_clss_ipgre;
+ __le16 vxlan_udp_port;
+ __le16 geneve_udp_port;
};
/* Ramrod data for PF start ramrod */
struct pf_start_ramrod_data {
- struct regpair event_ring_pbl_addr;
- struct regpair consolid_q_pbl_addr;
- struct pf_start_tunnel_config tunnel_config;
- __le16 event_ring_sb_id;
- u8 base_vf_id;
- u8 num_vfs;
- u8 event_ring_num_pages;
- u8 event_ring_sb_index;
- u8 path_id;
- u8 warning_as_error;
- u8 dont_log_ramrods;
- u8 personality;
- __le16 log_type_mask;
- u8 mf_mode /* Multi function mode */;
- u8 integ_phase /* Integration phase */;
- u8 allow_npar_tx_switching;
- u8 inner_to_outer_pri_map[8];
- u8 pri_map_valid;
- u32 outer_tag;
- u8 reserved0[4];
-};
-
-/* Data for port update ramrod */
+ struct regpair event_ring_pbl_addr;
+ struct regpair consolid_q_pbl_addr;
+ struct pf_start_tunnel_config tunnel_config;
+ __le16 event_ring_sb_id;
+ u8 base_vf_id;
+ u8 num_vfs;
+ u8 event_ring_num_pages;
+ u8 event_ring_sb_index;
+ u8 path_id;
+ u8 warning_as_error;
+ u8 dont_log_ramrods;
+ u8 personality;
+ __le16 log_type_mask;
+ u8 mf_mode;
+ u8 integ_phase;
+ u8 allow_npar_tx_switching;
+ u8 inner_to_outer_pri_map[8];
+ u8 pri_map_valid;
+ __le32 outer_tag;
+ struct hsi_fp_ver_struct hsi_fp_ver;
+
+};
+
struct protocol_dcb_data {
u8 dcb_enable_flag;
u8 dcb_priority;
@@ -642,25 +710,24 @@ struct protocol_dcb_data {
u8 reserved;
};
-/* tunnel configuration */
struct pf_update_tunnel_config {
- u8 update_rx_pf_clss;
- u8 update_tx_pf_clss;
- u8 set_vxlan_udp_port_flg;
- u8 set_geneve_udp_port_flg;
- u8 tx_enable_vxlan;
- u8 tx_enable_l2geneve;
- u8 tx_enable_ipgeneve;
- u8 tx_enable_l2gre;
- u8 tx_enable_ipgre;
- u8 tunnel_clss_vxlan;
- u8 tunnel_clss_l2geneve;
- u8 tunnel_clss_ipgeneve;
- u8 tunnel_clss_l2gre;
- u8 tunnel_clss_ipgre;
- __le16 vxlan_udp_port;
- __le16 geneve_udp_port;
- __le16 reserved[3];
+ u8 update_rx_pf_clss;
+ u8 update_tx_pf_clss;
+ u8 set_vxlan_udp_port_flg;
+ u8 set_geneve_udp_port_flg;
+ u8 tx_enable_vxlan;
+ u8 tx_enable_l2geneve;
+ u8 tx_enable_ipgeneve;
+ u8 tx_enable_l2gre;
+ u8 tx_enable_ipgre;
+ u8 tunnel_clss_vxlan;
+ u8 tunnel_clss_l2geneve;
+ u8 tunnel_clss_ipgeneve;
+ u8 tunnel_clss_l2gre;
+ u8 tunnel_clss_ipgre;
+ __le16 vxlan_udp_port;
+ __le16 geneve_udp_port;
+ __le16 reserved[3];
};
struct pf_update_ramrod_data {
@@ -669,38 +736,43 @@ struct pf_update_ramrod_data {
u8 update_fcoe_dcb_data_flag;
u8 update_iscsi_dcb_data_flag;
u8 update_roce_dcb_data_flag;
+ u8 update_iwarp_dcb_data_flag;
u8 update_mf_vlan_flag;
- __le16 mf_vlan;
+ u8 reserved;
struct protocol_dcb_data eth_dcb_data;
struct protocol_dcb_data fcoe_dcb_data;
struct protocol_dcb_data iscsi_dcb_data;
struct protocol_dcb_data roce_dcb_data;
- struct pf_update_tunnel_config tunnel_config;
-};
-
-/* Tunnel classification scheme */
-enum tunnel_clss {
- TUNNEL_CLSS_MAC_VLAN = 0,
- TUNNEL_CLSS_MAC_VNI,
- TUNNEL_CLSS_INNER_MAC_VLAN,
- TUNNEL_CLSS_INNER_MAC_VNI,
- MAX_TUNNEL_CLSS
+ struct protocol_dcb_data iwarp_dcb_data;
+ __le16 mf_vlan;
+ __le16 reserved2;
+ struct pf_update_tunnel_config tunnel_config;
};
+/* Ports mode */
enum ports_mode {
- ENGX2_PORTX1 /* 2 engines x 1 port */,
- ENGX2_PORTX2 /* 2 engines x 2 ports */,
- ENGX1_PORTX1 /* 1 engine x 1 port */,
- ENGX1_PORTX2 /* 1 engine x 2 ports */,
- ENGX1_PORTX4 /* 1 engine x 4 ports */,
+ ENGX2_PORTX1,
+ ENGX2_PORTX2,
+ ENGX1_PORTX1,
+ ENGX1_PORTX2,
+ ENGX1_PORTX4,
MAX_PORTS_MODE
};
+/* use to index in hsi_fp_[major|minor]_ver_arr per protocol */
+enum protocol_version_array_key {
+ ETH_VER_KEY = 0,
+ ROCE_VER_KEY,
+ MAX_PROTOCOL_VERSION_ARRAY_KEY
+};
+
+/* Pstorm non-triggering VF zone */
struct pstorm_non_trigger_vf_zone {
struct eth_pstorm_per_queue_stat eth_queue_stat;
struct regpair reserved[2];
};
+/* Pstorm VF zone */
struct pstorm_vf_zone {
struct pstorm_non_trigger_vf_zone non_trigger;
struct regpair reserved[7];
@@ -708,56 +780,89 @@ struct pstorm_vf_zone {
/* Ramrod Header of SPQE */
struct ramrod_header {
- __le32 cid /* Slowpath Connection CID */;
- u8 cmd_id /* Ramrod Cmd (Per Protocol Type) */;
- u8 protocol_id /* Ramrod Protocol ID */;
- __le16 echo /* Ramrod echo */;
+ __le32 cid;
+ u8 cmd_id;
+ u8 protocol_id;
+ __le16 echo;
};
/* Slowpath Element (SPQE) */
struct slow_path_element {
- struct ramrod_header hdr /* Ramrod Header */;
- struct regpair data_ptr;
+ struct ramrod_header hdr;
+ struct regpair data_ptr;
+};
+
+/* Tstorm non-triggering VF zone */
+struct tstorm_non_trigger_vf_zone {
+ struct regpair reserved[2];
};
struct tstorm_per_port_stat {
- struct regpair trunc_error_discard;
- struct regpair mac_error_discard;
- struct regpair mftag_filter_discard;
- struct regpair eth_mac_filter_discard;
- struct regpair ll2_mac_filter_discard;
- struct regpair ll2_conn_disabled_discard;
- struct regpair iscsi_irregular_pkt;
- struct regpair fcoe_irregular_pkt;
- struct regpair roce_irregular_pkt;
- struct regpair eth_irregular_pkt;
- struct regpair toe_irregular_pkt;
- struct regpair preroce_irregular_pkt;
+ struct regpair trunc_error_discard;
+ struct regpair mac_error_discard;
+ struct regpair mftag_filter_discard;
+ struct regpair eth_mac_filter_discard;
+ struct regpair reserved[5];
+ struct regpair eth_irregular_pkt;
+ struct regpair reserved1[2];
+ struct regpair eth_gre_tunn_filter_discard;
+ struct regpair eth_vxlan_tunn_filter_discard;
+ struct regpair eth_geneve_tunn_filter_discard;
+};
+
+/* Tstorm VF zone */
+struct tstorm_vf_zone {
+ struct tstorm_non_trigger_vf_zone non_trigger;
+};
+
+/* Tunnel classification scheme */
+enum tunnel_clss {
+ TUNNEL_CLSS_MAC_VLAN = 0,
+ TUNNEL_CLSS_MAC_VNI,
+ TUNNEL_CLSS_INNER_MAC_VLAN,
+ TUNNEL_CLSS_INNER_MAC_VNI,
+ TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE,
+ MAX_TUNNEL_CLSS
};
+/* Ustorm non-triggering VF zone */
struct ustorm_non_trigger_vf_zone {
struct eth_ustorm_per_queue_stat eth_queue_stat;
struct regpair vf_pf_msg_addr;
};
+/* Ustorm triggering VF zone */
struct ustorm_trigger_vf_zone {
u8 vf_pf_msg_valid;
u8 reserved[7];
};
+/* Ustorm VF zone */
struct ustorm_vf_zone {
struct ustorm_non_trigger_vf_zone non_trigger;
struct ustorm_trigger_vf_zone trigger;
};
+/* VF-PF channel data */
+struct vf_pf_channel_data {
+ __le32 ready;
+ u8 valid;
+ u8 reserved0;
+ __le16 reserved1;
+};
+
+/* Ramrod data for VF start ramrod */
struct vf_start_ramrod_data {
u8 vf_id;
u8 enable_flr_ack;
__le16 opaque_fid;
u8 personality;
- u8 reserved[3];
+ u8 reserved[7];
+ struct hsi_fp_ver_struct hsi_fp_ver;
+
};
+/* Ramrod data for VF start ramrod */
struct vf_stop_ramrod_data {
u8 vf_id;
u8 reserved0;
@@ -765,94 +870,474 @@ struct vf_stop_ramrod_data {
__le32 reserved2;
};
+/* Attentions status block */
struct atten_status_block {
- __le32 atten_bits;
- __le32 atten_ack;
- __le16 reserved0;
- __le16 sb_index /* status block running index */;
- __le32 reserved1;
+ __le32 atten_bits;
+ __le32 atten_ack;
+ __le16 reserved0;
+ __le16 sb_index;
+ __le32 reserved1;
+};
+
+enum command_type_bit {
+ IGU_COMMAND_TYPE_NOP = 0,
+ IGU_COMMAND_TYPE_SET = 1,
+ MAX_COMMAND_TYPE_BIT
+};
+
+/* DMAE command */
+struct dmae_cmd {
+ __le32 opcode;
+#define DMAE_CMD_SRC_MASK 0x1
+#define DMAE_CMD_SRC_SHIFT 0
+#define DMAE_CMD_DST_MASK 0x3
+#define DMAE_CMD_DST_SHIFT 1
+#define DMAE_CMD_C_DST_MASK 0x1
+#define DMAE_CMD_C_DST_SHIFT 3
+#define DMAE_CMD_CRC_RESET_MASK 0x1
+#define DMAE_CMD_CRC_RESET_SHIFT 4
+#define DMAE_CMD_SRC_ADDR_RESET_MASK 0x1
+#define DMAE_CMD_SRC_ADDR_RESET_SHIFT 5
+#define DMAE_CMD_DST_ADDR_RESET_MASK 0x1
+#define DMAE_CMD_DST_ADDR_RESET_SHIFT 6
+#define DMAE_CMD_COMP_FUNC_MASK 0x1
+#define DMAE_CMD_COMP_FUNC_SHIFT 7
+#define DMAE_CMD_COMP_WORD_EN_MASK 0x1
+#define DMAE_CMD_COMP_WORD_EN_SHIFT 8
+#define DMAE_CMD_COMP_CRC_EN_MASK 0x1
+#define DMAE_CMD_COMP_CRC_EN_SHIFT 9
+#define DMAE_CMD_COMP_CRC_OFFSET_MASK 0x7
+#define DMAE_CMD_COMP_CRC_OFFSET_SHIFT 10
+#define DMAE_CMD_RESERVED1_MASK 0x1
+#define DMAE_CMD_RESERVED1_SHIFT 13
+#define DMAE_CMD_ENDIANITY_MODE_MASK 0x3
+#define DMAE_CMD_ENDIANITY_MODE_SHIFT 14
+#define DMAE_CMD_ERR_HANDLING_MASK 0x3
+#define DMAE_CMD_ERR_HANDLING_SHIFT 16
+#define DMAE_CMD_PORT_ID_MASK 0x3
+#define DMAE_CMD_PORT_ID_SHIFT 18
+#define DMAE_CMD_SRC_PF_ID_MASK 0xF
+#define DMAE_CMD_SRC_PF_ID_SHIFT 20
+#define DMAE_CMD_DST_PF_ID_MASK 0xF
+#define DMAE_CMD_DST_PF_ID_SHIFT 24
+#define DMAE_CMD_SRC_VF_ID_VALID_MASK 0x1
+#define DMAE_CMD_SRC_VF_ID_VALID_SHIFT 28
+#define DMAE_CMD_DST_VF_ID_VALID_MASK 0x1
+#define DMAE_CMD_DST_VF_ID_VALID_SHIFT 29
+#define DMAE_CMD_RESERVED2_MASK 0x3
+#define DMAE_CMD_RESERVED2_SHIFT 30
+ __le32 src_addr_lo;
+ __le32 src_addr_hi;
+ __le32 dst_addr_lo;
+ __le32 dst_addr_hi;
+ __le16 length_dw;
+ __le16 opcode_b;
+#define DMAE_CMD_SRC_VF_ID_MASK 0xFF
+#define DMAE_CMD_SRC_VF_ID_SHIFT 0
+#define DMAE_CMD_DST_VF_ID_MASK 0xFF
+#define DMAE_CMD_DST_VF_ID_SHIFT 8
+ __le32 comp_addr_lo;
+ __le32 comp_addr_hi;
+ __le32 comp_val;
+ __le32 crc32;
+ __le32 crc_32_c;
+ __le16 crc16;
+ __le16 crc16_c;
+ __le16 crc10;
+ __le16 reserved;
+ __le16 xsum16;
+ __le16 xsum8;
+};
+
+enum dmae_cmd_comp_crc_en_enum {
+ dmae_cmd_comp_crc_disabled,
+ dmae_cmd_comp_crc_enabled,
+ MAX_DMAE_CMD_COMP_CRC_EN_ENUM
+};
+
+enum dmae_cmd_comp_func_enum {
+ dmae_cmd_comp_func_to_src,
+ dmae_cmd_comp_func_to_dst,
+ MAX_DMAE_CMD_COMP_FUNC_ENUM
+};
+
+enum dmae_cmd_comp_word_en_enum {
+ dmae_cmd_comp_word_disabled,
+ dmae_cmd_comp_word_enabled,
+ MAX_DMAE_CMD_COMP_WORD_EN_ENUM
+};
+
+enum dmae_cmd_c_dst_enum {
+ dmae_cmd_c_dst_pcie,
+ dmae_cmd_c_dst_grc,
+ MAX_DMAE_CMD_C_DST_ENUM
+};
+
+enum dmae_cmd_dst_enum {
+ dmae_cmd_dst_none_0,
+ dmae_cmd_dst_pcie,
+ dmae_cmd_dst_grc,
+ dmae_cmd_dst_none_3,
+ MAX_DMAE_CMD_DST_ENUM
+};
+
+enum dmae_cmd_error_handling_enum {
+ dmae_cmd_error_handling_send_regular_comp,
+ dmae_cmd_error_handling_send_comp_with_err,
+ dmae_cmd_error_handling_dont_send_comp,
+ MAX_DMAE_CMD_ERROR_HANDLING_ENUM
+};
+
+enum dmae_cmd_src_enum {
+ dmae_cmd_src_pcie,
+ dmae_cmd_src_grc,
+ MAX_DMAE_CMD_SRC_ENUM
+};
+
+/* IGU cleanup command */
+struct igu_cleanup {
+ __le32 sb_id_and_flags;
+#define IGU_CLEANUP_RESERVED0_MASK 0x7FFFFFF
+#define IGU_CLEANUP_RESERVED0_SHIFT 0
+#define IGU_CLEANUP_CLEANUP_SET_MASK 0x1
+#define IGU_CLEANUP_CLEANUP_SET_SHIFT 27
+#define IGU_CLEANUP_CLEANUP_TYPE_MASK 0x7
+#define IGU_CLEANUP_CLEANUP_TYPE_SHIFT 28
+#define IGU_CLEANUP_COMMAND_TYPE_MASK 0x1
+#define IGU_CLEANUP_COMMAND_TYPE_SHIFT 31
+ __le32 reserved1;
+};
+
+/* IGU firmware driver command */
+union igu_command {
+ struct igu_prod_cons_update prod_cons_update;
+ struct igu_cleanup cleanup;
+};
+
+/* IGU firmware driver command */
+struct igu_command_reg_ctrl {
+ __le16 opaque_fid;
+ __le16 igu_command_reg_ctrl_fields;
+#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_MASK 0xFFF
+#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_SHIFT 0
+#define IGU_COMMAND_REG_CTRL_RESERVED_MASK 0x7
+#define IGU_COMMAND_REG_CTRL_RESERVED_SHIFT 12
+#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_MASK 0x1
+#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_SHIFT 15
};
+/* IGU mapping line structure */
+struct igu_mapping_line {
+ __le32 igu_mapping_line_fields;
+#define IGU_MAPPING_LINE_VALID_MASK 0x1
+#define IGU_MAPPING_LINE_VALID_SHIFT 0
+#define IGU_MAPPING_LINE_VECTOR_NUMBER_MASK 0xFF
+#define IGU_MAPPING_LINE_VECTOR_NUMBER_SHIFT 1
+#define IGU_MAPPING_LINE_FUNCTION_NUMBER_MASK 0xFF
+#define IGU_MAPPING_LINE_FUNCTION_NUMBER_SHIFT 9
+#define IGU_MAPPING_LINE_PF_VALID_MASK 0x1
+#define IGU_MAPPING_LINE_PF_VALID_SHIFT 17
+#define IGU_MAPPING_LINE_IPS_GROUP_MASK 0x3F
+#define IGU_MAPPING_LINE_IPS_GROUP_SHIFT 18
+#define IGU_MAPPING_LINE_RESERVED_MASK 0xFF
+#define IGU_MAPPING_LINE_RESERVED_SHIFT 24
+};
+
+/* IGU MSIX line structure */
+struct igu_msix_vector {
+ struct regpair address;
+ __le32 data;
+ __le32 msix_vector_fields;
+#define IGU_MSIX_VECTOR_MASK_BIT_MASK 0x1
+#define IGU_MSIX_VECTOR_MASK_BIT_SHIFT 0
+#define IGU_MSIX_VECTOR_RESERVED0_MASK 0x7FFF
+#define IGU_MSIX_VECTOR_RESERVED0_SHIFT 1
+#define IGU_MSIX_VECTOR_STEERING_TAG_MASK 0xFF
+#define IGU_MSIX_VECTOR_STEERING_TAG_SHIFT 16
+#define IGU_MSIX_VECTOR_RESERVED1_MASK 0xFF
+#define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24
+};
+
+struct mstorm_core_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+};
+
+/* per encapsulation type enabling flags */
+struct prs_reg_encapsulation_type_en {
+ u8 flags;
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT 0
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT 1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT 2
+#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_SHIFT 3
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT 4
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT 5
+#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_MASK 0x3
+#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_SHIFT 6
+};
+
+enum pxp_tph_st_hint {
+ TPH_ST_HINT_BIDIR,
+ TPH_ST_HINT_REQUESTER,
+ TPH_ST_HINT_TARGET,
+ TPH_ST_HINT_TARGET_PRIO,
+ MAX_PXP_TPH_ST_HINT
+};
+
+/* QM hardware structure of enable bypass credit mask */
+struct qm_rf_bypass_mask {
+ u8 flags;
+#define QM_RF_BYPASS_MASK_LINEVOQ_MASK 0x1
+#define QM_RF_BYPASS_MASK_LINEVOQ_SHIFT 0
+#define QM_RF_BYPASS_MASK_RESERVED0_MASK 0x1
+#define QM_RF_BYPASS_MASK_RESERVED0_SHIFT 1
+#define QM_RF_BYPASS_MASK_PFWFQ_MASK 0x1
+#define QM_RF_BYPASS_MASK_PFWFQ_SHIFT 2
+#define QM_RF_BYPASS_MASK_VPWFQ_MASK 0x1
+#define QM_RF_BYPASS_MASK_VPWFQ_SHIFT 3
+#define QM_RF_BYPASS_MASK_PFRL_MASK 0x1
+#define QM_RF_BYPASS_MASK_PFRL_SHIFT 4
+#define QM_RF_BYPASS_MASK_VPQCNRL_MASK 0x1
+#define QM_RF_BYPASS_MASK_VPQCNRL_SHIFT 5
+#define QM_RF_BYPASS_MASK_FWPAUSE_MASK 0x1
+#define QM_RF_BYPASS_MASK_FWPAUSE_SHIFT 6
+#define QM_RF_BYPASS_MASK_RESERVED1_MASK 0x1
+#define QM_RF_BYPASS_MASK_RESERVED1_SHIFT 7
+};
+
+/* QM hardware structure of opportunistic credit mask */
+struct qm_rf_opportunistic_mask {
+ __le16 flags;
+#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT 0
+#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT 1
+#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT 2
+#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT 3
+#define QM_RF_OPPORTUNISTIC_MASK_PFRL_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT 4
+#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT 5
+#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT 6
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_SHIFT 7
+#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT 8
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_MASK 0x7F
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_SHIFT 9
+};
+
+/* QM hardware structure of QM map memory */
+struct qm_rf_pq_map {
+ __le32 reg;
+#define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1
+#define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0
+#define QM_RF_PQ_MAP_RL_ID_MASK 0xFF
+#define QM_RF_PQ_MAP_RL_ID_SHIFT 1
+#define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF
+#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9
+#define QM_RF_PQ_MAP_VOQ_MASK 0x1F
+#define QM_RF_PQ_MAP_VOQ_SHIFT 18
+#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3
+#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23
+#define QM_RF_PQ_MAP_RL_VALID_MASK 0x1
+#define QM_RF_PQ_MAP_RL_VALID_SHIFT 25
+#define QM_RF_PQ_MAP_RESERVED_MASK 0x3F
+#define QM_RF_PQ_MAP_RESERVED_SHIFT 26
+};
+
+/* Completion params for aggregated interrupt completion */
+struct sdm_agg_int_comp_params {
+ __le16 params;
+#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_MASK 0x3F
+#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT 0
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_MASK 0x1
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT 6
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_MASK 0x1FF
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT 7
+};
+
+/* SDM operation gen command (generate aggregative interrupt) */
+struct sdm_op_gen {
+ __le32 command;
+#define SDM_OP_GEN_COMP_PARAM_MASK 0xFFFF
+#define SDM_OP_GEN_COMP_PARAM_SHIFT 0
+#define SDM_OP_GEN_COMP_TYPE_MASK 0xF
+#define SDM_OP_GEN_COMP_TYPE_SHIFT 16
+#define SDM_OP_GEN_RESERVED_MASK 0xFFF
+#define SDM_OP_GEN_RESERVED_SHIFT 20
+};
+
+struct ystorm_core_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le32 reg0;
+ __le32 reg1;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg2;
+ __le32 reg3;
+};
+
+/****************************************/
+/* Debug Tools HSI constants and macros */
+/****************************************/
+
enum block_addr {
- GRCBASE_GRC = 0x50000,
- GRCBASE_MISCS = 0x9000,
- GRCBASE_MISC = 0x8000,
- GRCBASE_DBU = 0xa000,
- GRCBASE_PGLUE_B = 0x2a8000,
- GRCBASE_CNIG = 0x218000,
- GRCBASE_CPMU = 0x30000,
- GRCBASE_NCSI = 0x40000,
- GRCBASE_OPTE = 0x53000,
- GRCBASE_BMB = 0x540000,
- GRCBASE_PCIE = 0x54000,
- GRCBASE_MCP = 0xe00000,
- GRCBASE_MCP2 = 0x52000,
- GRCBASE_PSWHST = 0x2a0000,
- GRCBASE_PSWHST2 = 0x29e000,
- GRCBASE_PSWRD = 0x29c000,
- GRCBASE_PSWRD2 = 0x29d000,
- GRCBASE_PSWWR = 0x29a000,
- GRCBASE_PSWWR2 = 0x29b000,
- GRCBASE_PSWRQ = 0x280000,
- GRCBASE_PSWRQ2 = 0x240000,
- GRCBASE_PGLCS = 0x0,
- GRCBASE_PTU = 0x560000,
- GRCBASE_DMAE = 0xc000,
- GRCBASE_TCM = 0x1180000,
- GRCBASE_MCM = 0x1200000,
- GRCBASE_UCM = 0x1280000,
- GRCBASE_XCM = 0x1000000,
- GRCBASE_YCM = 0x1080000,
- GRCBASE_PCM = 0x1100000,
- GRCBASE_QM = 0x2f0000,
- GRCBASE_TM = 0x2c0000,
- GRCBASE_DORQ = 0x100000,
- GRCBASE_BRB = 0x340000,
- GRCBASE_SRC = 0x238000,
- GRCBASE_PRS = 0x1f0000,
- GRCBASE_TSDM = 0xfb0000,
- GRCBASE_MSDM = 0xfc0000,
- GRCBASE_USDM = 0xfd0000,
- GRCBASE_XSDM = 0xf80000,
- GRCBASE_YSDM = 0xf90000,
- GRCBASE_PSDM = 0xfa0000,
- GRCBASE_TSEM = 0x1700000,
- GRCBASE_MSEM = 0x1800000,
- GRCBASE_USEM = 0x1900000,
- GRCBASE_XSEM = 0x1400000,
- GRCBASE_YSEM = 0x1500000,
- GRCBASE_PSEM = 0x1600000,
- GRCBASE_RSS = 0x238800,
- GRCBASE_TMLD = 0x4d0000,
- GRCBASE_MULD = 0x4e0000,
- GRCBASE_YULD = 0x4c8000,
- GRCBASE_XYLD = 0x4c0000,
- GRCBASE_PRM = 0x230000,
- GRCBASE_PBF_PB1 = 0xda0000,
- GRCBASE_PBF_PB2 = 0xda4000,
- GRCBASE_RPB = 0x23c000,
- GRCBASE_BTB = 0xdb0000,
- GRCBASE_PBF = 0xd80000,
- GRCBASE_RDIF = 0x300000,
- GRCBASE_TDIF = 0x310000,
- GRCBASE_CDU = 0x580000,
- GRCBASE_CCFC = 0x2e0000,
- GRCBASE_TCFC = 0x2d0000,
- GRCBASE_IGU = 0x180000,
- GRCBASE_CAU = 0x1c0000,
- GRCBASE_UMAC = 0x51000,
- GRCBASE_XMAC = 0x210000,
- GRCBASE_DBG = 0x10000,
- GRCBASE_NIG = 0x500000,
- GRCBASE_WOL = 0x600000,
- GRCBASE_BMBN = 0x610000,
- GRCBASE_IPC = 0x20000,
- GRCBASE_NWM = 0x800000,
- GRCBASE_NWS = 0x700000,
- GRCBASE_MS = 0x6a0000,
- GRCBASE_PHY_PCIE = 0x620000,
- GRCBASE_MISC_AEU = 0x8000,
- GRCBASE_BAR0_MAP = 0x1c00000,
+ GRCBASE_GRC = 0x50000,
+ GRCBASE_MISCS = 0x9000,
+ GRCBASE_MISC = 0x8000,
+ GRCBASE_DBU = 0xa000,
+ GRCBASE_PGLUE_B = 0x2a8000,
+ GRCBASE_CNIG = 0x218000,
+ GRCBASE_CPMU = 0x30000,
+ GRCBASE_NCSI = 0x40000,
+ GRCBASE_OPTE = 0x53000,
+ GRCBASE_BMB = 0x540000,
+ GRCBASE_PCIE = 0x54000,
+ GRCBASE_MCP = 0xe00000,
+ GRCBASE_MCP2 = 0x52000,
+ GRCBASE_PSWHST = 0x2a0000,
+ GRCBASE_PSWHST2 = 0x29e000,
+ GRCBASE_PSWRD = 0x29c000,
+ GRCBASE_PSWRD2 = 0x29d000,
+ GRCBASE_PSWWR = 0x29a000,
+ GRCBASE_PSWWR2 = 0x29b000,
+ GRCBASE_PSWRQ = 0x280000,
+ GRCBASE_PSWRQ2 = 0x240000,
+ GRCBASE_PGLCS = 0x0,
+ GRCBASE_DMAE = 0xc000,
+ GRCBASE_PTU = 0x560000,
+ GRCBASE_TCM = 0x1180000,
+ GRCBASE_MCM = 0x1200000,
+ GRCBASE_UCM = 0x1280000,
+ GRCBASE_XCM = 0x1000000,
+ GRCBASE_YCM = 0x1080000,
+ GRCBASE_PCM = 0x1100000,
+ GRCBASE_QM = 0x2f0000,
+ GRCBASE_TM = 0x2c0000,
+ GRCBASE_DORQ = 0x100000,
+ GRCBASE_BRB = 0x340000,
+ GRCBASE_SRC = 0x238000,
+ GRCBASE_PRS = 0x1f0000,
+ GRCBASE_TSDM = 0xfb0000,
+ GRCBASE_MSDM = 0xfc0000,
+ GRCBASE_USDM = 0xfd0000,
+ GRCBASE_XSDM = 0xf80000,
+ GRCBASE_YSDM = 0xf90000,
+ GRCBASE_PSDM = 0xfa0000,
+ GRCBASE_TSEM = 0x1700000,
+ GRCBASE_MSEM = 0x1800000,
+ GRCBASE_USEM = 0x1900000,
+ GRCBASE_XSEM = 0x1400000,
+ GRCBASE_YSEM = 0x1500000,
+ GRCBASE_PSEM = 0x1600000,
+ GRCBASE_RSS = 0x238800,
+ GRCBASE_TMLD = 0x4d0000,
+ GRCBASE_MULD = 0x4e0000,
+ GRCBASE_YULD = 0x4c8000,
+ GRCBASE_XYLD = 0x4c0000,
+ GRCBASE_PRM = 0x230000,
+ GRCBASE_PBF_PB1 = 0xda0000,
+ GRCBASE_PBF_PB2 = 0xda4000,
+ GRCBASE_RPB = 0x23c000,
+ GRCBASE_BTB = 0xdb0000,
+ GRCBASE_PBF = 0xd80000,
+ GRCBASE_RDIF = 0x300000,
+ GRCBASE_TDIF = 0x310000,
+ GRCBASE_CDU = 0x580000,
+ GRCBASE_CCFC = 0x2e0000,
+ GRCBASE_TCFC = 0x2d0000,
+ GRCBASE_IGU = 0x180000,
+ GRCBASE_CAU = 0x1c0000,
+ GRCBASE_UMAC = 0x51000,
+ GRCBASE_XMAC = 0x210000,
+ GRCBASE_DBG = 0x10000,
+ GRCBASE_NIG = 0x500000,
+ GRCBASE_WOL = 0x600000,
+ GRCBASE_BMBN = 0x610000,
+ GRCBASE_IPC = 0x20000,
+ GRCBASE_NWM = 0x800000,
+ GRCBASE_NWS = 0x700000,
+ GRCBASE_MS = 0x6a0000,
+ GRCBASE_PHY_PCIE = 0x620000,
+ GRCBASE_LED = 0x6b8000,
+ GRCBASE_MISC_AEU = 0x8000,
+ GRCBASE_BAR0_MAP = 0x1c00000,
MAX_BLOCK_ADDR
};
@@ -879,8 +1364,8 @@ enum block_id {
BLOCK_PSWRQ,
BLOCK_PSWRQ2,
BLOCK_PGLCS,
- BLOCK_PTU,
BLOCK_DMAE,
+ BLOCK_PTU,
BLOCK_TCM,
BLOCK_MCM,
BLOCK_UCM,
@@ -934,141 +1419,216 @@ enum block_id {
BLOCK_NWS,
BLOCK_MS,
BLOCK_PHY_PCIE,
+ BLOCK_LED,
BLOCK_MISC_AEU,
BLOCK_BAR0_MAP,
MAX_BLOCK_ID
};
-enum command_type_bit {
- IGU_COMMAND_TYPE_NOP = 0,
- IGU_COMMAND_TYPE_SET = 1,
- MAX_COMMAND_TYPE_BIT
+/* binary debug buffer types */
+enum bin_dbg_buffer_type {
+ BIN_BUF_DBG_MODE_TREE,
+ BIN_BUF_DBG_DUMP_REG,
+ BIN_BUF_DBG_DUMP_MEM,
+ BIN_BUF_DBG_IDLE_CHK_REGS,
+ BIN_BUF_DBG_IDLE_CHK_IMMS,
+ BIN_BUF_DBG_IDLE_CHK_RULES,
+ BIN_BUF_DBG_IDLE_CHK_PARSING_DATA,
+ BIN_BUF_DBG_ATTN_BLOCKS,
+ BIN_BUF_DBG_ATTN_REGS,
+ BIN_BUF_DBG_ATTN_INDEXES,
+ BIN_BUF_DBG_ATTN_NAME_OFFSETS,
+ BIN_BUF_DBG_PARSING_STRINGS,
+ MAX_BIN_DBG_BUFFER_TYPE
};
-struct dmae_cmd {
- __le32 opcode;
-#define DMAE_CMD_SRC_MASK 0x1
-#define DMAE_CMD_SRC_SHIFT 0
-#define DMAE_CMD_DST_MASK 0x3
-#define DMAE_CMD_DST_SHIFT 1
-#define DMAE_CMD_C_DST_MASK 0x1
-#define DMAE_CMD_C_DST_SHIFT 3
-#define DMAE_CMD_CRC_RESET_MASK 0x1
-#define DMAE_CMD_CRC_RESET_SHIFT 4
-#define DMAE_CMD_SRC_ADDR_RESET_MASK 0x1
-#define DMAE_CMD_SRC_ADDR_RESET_SHIFT 5
-#define DMAE_CMD_DST_ADDR_RESET_MASK 0x1
-#define DMAE_CMD_DST_ADDR_RESET_SHIFT 6
-#define DMAE_CMD_COMP_FUNC_MASK 0x1
-#define DMAE_CMD_COMP_FUNC_SHIFT 7
-#define DMAE_CMD_COMP_WORD_EN_MASK 0x1
-#define DMAE_CMD_COMP_WORD_EN_SHIFT 8
-#define DMAE_CMD_COMP_CRC_EN_MASK 0x1
-#define DMAE_CMD_COMP_CRC_EN_SHIFT 9
-#define DMAE_CMD_COMP_CRC_OFFSET_MASK 0x7
-#define DMAE_CMD_COMP_CRC_OFFSET_SHIFT 10
-#define DMAE_CMD_RESERVED1_MASK 0x1
-#define DMAE_CMD_RESERVED1_SHIFT 13
-#define DMAE_CMD_ENDIANITY_MODE_MASK 0x3
-#define DMAE_CMD_ENDIANITY_MODE_SHIFT 14
-#define DMAE_CMD_ERR_HANDLING_MASK 0x3
-#define DMAE_CMD_ERR_HANDLING_SHIFT 16
-#define DMAE_CMD_PORT_ID_MASK 0x3
-#define DMAE_CMD_PORT_ID_SHIFT 18
-#define DMAE_CMD_SRC_PF_ID_MASK 0xF
-#define DMAE_CMD_SRC_PF_ID_SHIFT 20
-#define DMAE_CMD_DST_PF_ID_MASK 0xF
-#define DMAE_CMD_DST_PF_ID_SHIFT 24
-#define DMAE_CMD_SRC_VF_ID_VALID_MASK 0x1
-#define DMAE_CMD_SRC_VF_ID_VALID_SHIFT 28
-#define DMAE_CMD_DST_VF_ID_VALID_MASK 0x1
-#define DMAE_CMD_DST_VF_ID_VALID_SHIFT 29
-#define DMAE_CMD_RESERVED2_MASK 0x3
-#define DMAE_CMD_RESERVED2_SHIFT 30
- __le32 src_addr_lo;
- __le32 src_addr_hi;
- __le32 dst_addr_lo;
- __le32 dst_addr_hi;
- __le16 length /* Length in DW */;
- __le16 opcode_b;
-#define DMAE_CMD_SRC_VF_ID_MASK 0xFF /* Source VF id */
-#define DMAE_CMD_SRC_VF_ID_SHIFT 0
-#define DMAE_CMD_DST_VF_ID_MASK 0xFF /* Destination VF id */
-#define DMAE_CMD_DST_VF_ID_SHIFT 8
- __le32 comp_addr_lo /* PCIe completion address low or grc address */;
- __le32 comp_addr_hi;
- __le32 comp_val /* Value to write to copmletion address */;
- __le32 crc32 /* crc16 result */;
- __le32 crc_32_c /* crc32_c result */;
- __le16 crc16 /* crc16 result */;
- __le16 crc16_c /* crc16_c result */;
- __le16 crc10 /* crc_t10 result */;
- __le16 reserved;
- __le16 xsum16 /* checksum16 result */;
- __le16 xsum8 /* checksum8 result */;
+/* Chip IDs */
+enum chip_ids {
+ CHIP_RESERVED,
+ CHIP_BB_B0,
+ CHIP_RESERVED2,
+ MAX_CHIP_IDS
};
-struct igu_cleanup {
- __le32 sb_id_and_flags;
-#define IGU_CLEANUP_RESERVED0_MASK 0x7FFFFFF
-#define IGU_CLEANUP_RESERVED0_SHIFT 0
-#define IGU_CLEANUP_CLEANUP_SET_MASK 0x1 /* cleanup clear - 0, set - 1 */
-#define IGU_CLEANUP_CLEANUP_SET_SHIFT 27
-#define IGU_CLEANUP_CLEANUP_TYPE_MASK 0x7
-#define IGU_CLEANUP_CLEANUP_TYPE_SHIFT 28
-#define IGU_CLEANUP_COMMAND_TYPE_MASK 0x1
-#define IGU_CLEANUP_COMMAND_TYPE_SHIFT 31
- __le32 reserved1;
+/* Attention bit mapping */
+struct dbg_attn_bit_mapping {
+ __le16 data;
+#define DBG_ATTN_BIT_MAPPING_VAL_MASK 0x7FFF
+#define DBG_ATTN_BIT_MAPPING_VAL_SHIFT 0
+#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_MASK 0x1
+#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_SHIFT 15
};
-union igu_command {
- struct igu_prod_cons_update prod_cons_update;
- struct igu_cleanup cleanup;
+/* Attention block per-type data */
+struct dbg_attn_block_type_data {
+ __le16 names_offset;
+ __le16 reserved1;
+ u8 num_regs;
+ u8 reserved2;
+ __le16 regs_offset;
};
-struct igu_command_reg_ctrl {
- __le16 opaque_fid;
- __le16 igu_command_reg_ctrl_fields;
-#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_MASK 0xFFF
-#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_SHIFT 0
-#define IGU_COMMAND_REG_CTRL_RESERVED_MASK 0x7
-#define IGU_COMMAND_REG_CTRL_RESERVED_SHIFT 12
-#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_MASK 0x1
-#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_SHIFT 15
+/* Block attentions */
+struct dbg_attn_block {
+ struct dbg_attn_block_type_data per_type_data[2];
};
-struct igu_mapping_line {
- __le32 igu_mapping_line_fields;
-#define IGU_MAPPING_LINE_VALID_MASK 0x1
-#define IGU_MAPPING_LINE_VALID_SHIFT 0
-#define IGU_MAPPING_LINE_VECTOR_NUMBER_MASK 0xFF
-#define IGU_MAPPING_LINE_VECTOR_NUMBER_SHIFT 1
-#define IGU_MAPPING_LINE_FUNCTION_NUMBER_MASK 0xFF
-#define IGU_MAPPING_LINE_FUNCTION_NUMBER_SHIFT 9
-#define IGU_MAPPING_LINE_PF_VALID_MASK 0x1 /* PF-1, VF-0 */
-#define IGU_MAPPING_LINE_PF_VALID_SHIFT 17
-#define IGU_MAPPING_LINE_IPS_GROUP_MASK 0x3F
-#define IGU_MAPPING_LINE_IPS_GROUP_SHIFT 18
-#define IGU_MAPPING_LINE_RESERVED_MASK 0xFF
-#define IGU_MAPPING_LINE_RESERVED_SHIFT 24
+/* Attention register result */
+struct dbg_attn_reg_result {
+ __le32 data;
+#define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF
+#define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0
+#define DBG_ATTN_REG_RESULT_NUM_ATTN_IDX_MASK 0xFF
+#define DBG_ATTN_REG_RESULT_NUM_ATTN_IDX_SHIFT 24
+ __le16 attn_idx_offset;
+ __le16 reserved;
+ __le32 sts_val;
+ __le32 mask_val;
+};
+
+/* Attention block result */
+struct dbg_attn_block_result {
+ u8 block_id;
+ u8 data;
+#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_MASK 0x3
+#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_SHIFT 0
+#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_MASK 0x3F
+#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_SHIFT 2
+ __le16 names_offset;
+ struct dbg_attn_reg_result reg_results[15];
+};
+
+/* mode header */
+struct dbg_mode_hdr {
+ __le16 data;
+#define DBG_MODE_HDR_EVAL_MODE_MASK 0x1
+#define DBG_MODE_HDR_EVAL_MODE_SHIFT 0
+#define DBG_MODE_HDR_MODES_BUF_OFFSET_MASK 0x7FFF
+#define DBG_MODE_HDR_MODES_BUF_OFFSET_SHIFT 1
+};
+
+/* Attention register */
+struct dbg_attn_reg {
+ struct dbg_mode_hdr mode;
+ __le16 attn_idx_offset;
+ __le32 data;
+#define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF
+#define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0
+#define DBG_ATTN_REG_NUM_ATTN_IDX_MASK 0xFF
+#define DBG_ATTN_REG_NUM_ATTN_IDX_SHIFT 24
+ __le32 sts_clr_address;
+ __le32 mask_address;
+};
+
+/* attention types */
+enum dbg_attn_type {
+ ATTN_TYPE_INTERRUPT,
+ ATTN_TYPE_PARITY,
+ MAX_DBG_ATTN_TYPE
+};
+
+/* Debug status codes */
+enum dbg_status {
+ DBG_STATUS_OK,
+ DBG_STATUS_APP_VERSION_NOT_SET,
+ DBG_STATUS_UNSUPPORTED_APP_VERSION,
+ DBG_STATUS_DBG_BLOCK_NOT_RESET,
+ DBG_STATUS_INVALID_ARGS,
+ DBG_STATUS_OUTPUT_ALREADY_SET,
+ DBG_STATUS_INVALID_PCI_BUF_SIZE,
+ DBG_STATUS_PCI_BUF_ALLOC_FAILED,
+ DBG_STATUS_PCI_BUF_NOT_ALLOCATED,
+ DBG_STATUS_TOO_MANY_INPUTS,
+ DBG_STATUS_INPUT_OVERLAP,
+ DBG_STATUS_HW_ONLY_RECORDING,
+ DBG_STATUS_STORM_ALREADY_ENABLED,
+ DBG_STATUS_STORM_NOT_ENABLED,
+ DBG_STATUS_BLOCK_ALREADY_ENABLED,
+ DBG_STATUS_BLOCK_NOT_ENABLED,
+ DBG_STATUS_NO_INPUT_ENABLED,
+ DBG_STATUS_NO_FILTER_TRIGGER_64B,
+ DBG_STATUS_FILTER_ALREADY_ENABLED,
+ DBG_STATUS_TRIGGER_ALREADY_ENABLED,
+ DBG_STATUS_TRIGGER_NOT_ENABLED,
+ DBG_STATUS_CANT_ADD_CONSTRAINT,
+ DBG_STATUS_TOO_MANY_TRIGGER_STATES,
+ DBG_STATUS_TOO_MANY_CONSTRAINTS,
+ DBG_STATUS_RECORDING_NOT_STARTED,
+ DBG_STATUS_DATA_DIDNT_TRIGGER,
+ DBG_STATUS_NO_DATA_RECORDED,
+ DBG_STATUS_DUMP_BUF_TOO_SMALL,
+ DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED,
+ DBG_STATUS_UNKNOWN_CHIP,
+ DBG_STATUS_VIRT_MEM_ALLOC_FAILED,
+ DBG_STATUS_BLOCK_IN_RESET,
+ DBG_STATUS_INVALID_TRACE_SIGNATURE,
+ DBG_STATUS_INVALID_NVRAM_BUNDLE,
+ DBG_STATUS_NVRAM_GET_IMAGE_FAILED,
+ DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE,
+ DBG_STATUS_NVRAM_READ_FAILED,
+ DBG_STATUS_IDLE_CHK_PARSE_FAILED,
+ DBG_STATUS_MCP_TRACE_BAD_DATA,
+ DBG_STATUS_MCP_TRACE_NO_META,
+ DBG_STATUS_MCP_COULD_NOT_HALT,
+ DBG_STATUS_MCP_COULD_NOT_RESUME,
+ DBG_STATUS_DMAE_FAILED,
+ DBG_STATUS_SEMI_FIFO_NOT_EMPTY,
+ DBG_STATUS_IGU_FIFO_BAD_DATA,
+ DBG_STATUS_MCP_COULD_NOT_MASK_PRTY,
+ DBG_STATUS_FW_ASSERTS_PARSE_FAILED,
+ DBG_STATUS_REG_FIFO_BAD_DATA,
+ DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA,
+ DBG_STATUS_DBG_ARRAY_NOT_SET,
+ MAX_DBG_STATUS
};
-struct igu_msix_vector {
- struct regpair address;
- __le32 data;
- __le32 msix_vector_fields;
-#define IGU_MSIX_VECTOR_MASK_BIT_MASK 0x1
-#define IGU_MSIX_VECTOR_MASK_BIT_SHIFT 0
-#define IGU_MSIX_VECTOR_RESERVED0_MASK 0x7FFF
-#define IGU_MSIX_VECTOR_RESERVED0_SHIFT 1
-#define IGU_MSIX_VECTOR_STEERING_TAG_MASK 0xFF
-#define IGU_MSIX_VECTOR_STEERING_TAG_SHIFT 16
-#define IGU_MSIX_VECTOR_RESERVED1_MASK 0xFF
-#define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24
+/********************************/
+/* HSI Init Functions constants */
+/********************************/
+
+/* Number of VLAN priorities */
+#define NUM_OF_VLAN_PRIORITIES 8
+
+/* QM per-port init parameters */
+struct init_qm_port_params {
+ u8 active;
+ u8 active_phys_tcs;
+ __le16 num_pbf_cmd_lines;
+ __le16 num_btb_blocks;
+ __le16 reserved;
};
+/* QM per-PQ init parameters */
+struct init_qm_pq_params {
+ u8 vport_id;
+ u8 tc_id;
+ u8 wrr_group;
+ u8 rl_valid;
+};
+
+/* QM per-vport init parameters */
+struct init_qm_vport_params {
+ __le32 vport_rl;
+ __le16 vport_wfq;
+ __le16 first_tx_pq_id[NUM_OF_TCS];
+};
+
+/**************************************/
+/* Init Tool HSI constants and macros */
+/**************************************/
+
+/* Width of GRC address in bits (addresses are specified in dwords) */
+#define GRC_ADDR_BITS 23
+#define MAX_GRC_ADDR ((1 << GRC_ADDR_BITS) - 1)
+
+/* indicates an init that should be applied to any phase ID */
+#define ANY_PHASE_ID 0xffff
+
+/* Max size in dwords of a zipped array */
+#define MAX_ZIPPED_SIZE 8192
+
enum init_modes {
- MODE_BB_A0,
+ MODE_RESERVED,
MODE_BB_B0,
MODE_RESERVED2,
MODE_ASIC,
@@ -1083,7 +1643,8 @@ enum init_modes {
MODE_PORTS_PER_ENG_2,
MODE_PORTS_PER_ENG_4,
MODE_100G,
- MODE_EAGLE_ENG1_WORKAROUND,
+ MODE_40G,
+ MODE_RESERVED7,
MAX_INIT_MODES
};
@@ -1096,484 +1657,302 @@ enum init_phases {
MAX_INIT_PHASES
};
-/* per encapsulation type enabling flags */
-struct prs_reg_encapsulation_type_en {
- u8 flags;
-#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_MASK 0x1
-#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT 0
-#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_MASK 0x1
-#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT 1
-#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_MASK 0x1
-#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT 2
-#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_MASK 0x1
-#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_SHIFT 3
-#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_MASK 0x1
-#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT 4
-#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_MASK 0x1
-#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT 5
-#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_MASK 0x3
-#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_SHIFT 6
-};
-
-enum pxp_tph_st_hint {
- TPH_ST_HINT_BIDIR /* Read/Write access by Host and Device */,
- TPH_ST_HINT_REQUESTER /* Read/Write access by Device */,
- TPH_ST_HINT_TARGET,
- TPH_ST_HINT_TARGET_PRIO,
- MAX_PXP_TPH_ST_HINT
-};
-
-/* QM hardware structure of enable bypass credit mask */
-struct qm_rf_bypass_mask {
- u8 flags;
-#define QM_RF_BYPASS_MASK_LINEVOQ_MASK 0x1
-#define QM_RF_BYPASS_MASK_LINEVOQ_SHIFT 0
-#define QM_RF_BYPASS_MASK_RESERVED0_MASK 0x1
-#define QM_RF_BYPASS_MASK_RESERVED0_SHIFT 1
-#define QM_RF_BYPASS_MASK_PFWFQ_MASK 0x1
-#define QM_RF_BYPASS_MASK_PFWFQ_SHIFT 2
-#define QM_RF_BYPASS_MASK_VPWFQ_MASK 0x1
-#define QM_RF_BYPASS_MASK_VPWFQ_SHIFT 3
-#define QM_RF_BYPASS_MASK_PFRL_MASK 0x1
-#define QM_RF_BYPASS_MASK_PFRL_SHIFT 4
-#define QM_RF_BYPASS_MASK_VPQCNRL_MASK 0x1
-#define QM_RF_BYPASS_MASK_VPQCNRL_SHIFT 5
-#define QM_RF_BYPASS_MASK_FWPAUSE_MASK 0x1
-#define QM_RF_BYPASS_MASK_FWPAUSE_SHIFT 6
-#define QM_RF_BYPASS_MASK_RESERVED1_MASK 0x1
-#define QM_RF_BYPASS_MASK_RESERVED1_SHIFT 7
-};
-
-/* QM hardware structure of opportunistic credit mask */
-struct qm_rf_opportunistic_mask {
- __le16 flags;
-#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_MASK 0x1
-#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT 0
-#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_MASK 0x1
-#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT 1
-#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_MASK 0x1
-#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT 2
-#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_MASK 0x1
-#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT 3
-#define QM_RF_OPPORTUNISTIC_MASK_PFRL_MASK 0x1
-#define QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT 4
-#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_MASK 0x1
-#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT 5
-#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_MASK 0x1
-#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT 6
-#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_MASK 0x1
-#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_SHIFT 7
-#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_MASK 0x1
-#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT 8
-#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_MASK 0x7F
-#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_SHIFT 9
-};
-
-/* QM hardware structure of QM map memory */
-struct qm_rf_pq_map {
- u32 reg;
-#define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1 /* PQ active */
-#define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0
-#define QM_RF_PQ_MAP_RL_ID_MASK 0xFF /* RL ID */
-#define QM_RF_PQ_MAP_RL_ID_SHIFT 1
-#define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF
-#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9
-#define QM_RF_PQ_MAP_VOQ_MASK 0x1F /* VOQ */
-#define QM_RF_PQ_MAP_VOQ_SHIFT 18
-#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3 /* WRR weight */
-#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23
-#define QM_RF_PQ_MAP_RL_VALID_MASK 0x1 /* RL active */
-#define QM_RF_PQ_MAP_RL_VALID_SHIFT 25
-#define QM_RF_PQ_MAP_RESERVED_MASK 0x3F
-#define QM_RF_PQ_MAP_RESERVED_SHIFT 26
-};
-
-/* Completion params for aggregated interrupt completion */
-struct sdm_agg_int_comp_params {
- __le16 params;
-#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_MASK 0x3F
-#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT 0
-#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_MASK 0x1
-#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT 6
-#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_MASK 0x1FF
-#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT 7
+enum init_split_types {
+ SPLIT_TYPE_NONE,
+ SPLIT_TYPE_PORT,
+ SPLIT_TYPE_PF,
+ SPLIT_TYPE_PORT_PF,
+ SPLIT_TYPE_VF,
+ MAX_INIT_SPLIT_TYPES
};
-/* SDM operation gen command (generate aggregative interrupt) */
-struct sdm_op_gen {
- __le32 command;
-#define SDM_OP_GEN_COMP_PARAM_MASK 0xFFFF /* completion parameters 0-15 */
-#define SDM_OP_GEN_COMP_PARAM_SHIFT 0
-#define SDM_OP_GEN_COMP_TYPE_MASK 0xF /* completion type 16-19 */
-#define SDM_OP_GEN_COMP_TYPE_SHIFT 16
-#define SDM_OP_GEN_RESERVED_MASK 0xFFF /* reserved 20-31 */
-#define SDM_OP_GEN_RESERVED_SHIFT 20
-};
-
-/*********************************** Init ************************************/
-
-/* Width of GRC address in bits (addresses are specified in dwords) */
-#define GRC_ADDR_BITS 23
-#define MAX_GRC_ADDR ((1 << GRC_ADDR_BITS) - 1)
-
-/* indicates an init that should be applied to any phase ID */
-#define ANY_PHASE_ID 0xffff
-
-/* init pattern size in bytes */
-#define INIT_PATTERN_SIZE_BITS 4
-#define MAX_INIT_PATTERN_SIZE BIT(INIT_PATTERN_SIZE_BITS)
-
-/* Max size in dwords of a zipped array */
-#define MAX_ZIPPED_SIZE 8192
-
-/* Global PXP window */
-#define NUM_OF_PXP_WIN 19
-#define PXP_WIN_DWORD_SIZE_BITS 10
-#define PXP_WIN_DWORD_SIZE BIT(PXP_WIN_DWORD_SIZE_BITS)
-#define PXP_WIN_BYTE_SIZE_BITS (PXP_WIN_DWORD_SIZE_BITS + 2)
-#define PXP_WIN_BYTE_SIZE (PXP_WIN_DWORD_SIZE * 4)
-
-/********************************* GRC Dump **********************************/
-
-/* width of GRC dump register sequence length in bits */
-#define DUMP_SEQ_LEN_BITS 8
-#define DUMP_SEQ_LEN_MAX_VAL ((1 << DUMP_SEQ_LEN_BITS) - 1)
-
-/* width of GRC dump memory length in bits */
-#define DUMP_MEM_LEN_BITS 18
-#define DUMP_MEM_LEN_MAX_VAL ((1 << DUMP_MEM_LEN_BITS) - 1)
-
-/* width of register type ID in bits */
-#define REG_TYPE_ID_BITS 6
-#define REG_TYPE_ID_MAX_VAL ((1 << REG_TYPE_ID_BITS) - 1)
-
-/* width of block ID in bits */
-#define BLOCK_ID_BITS 8
-#define BLOCK_ID_MAX_VAL ((1 << BLOCK_ID_BITS) - 1)
-
-/******************************** Idle Check *********************************/
-
-/* max number of idle check predicate immediates */
-#define MAX_IDLE_CHK_PRED_IMM 3
-
-/* max number of idle check argument registers */
-#define MAX_IDLE_CHK_READ_REGS 3
-
-/* max number of idle check loops */
-#define MAX_IDLE_CHK_LOOPS 0x10000
-
-/* max idle check address increment */
-#define MAX_IDLE_CHK_INCREMENT 0x10000
-
-/* inicates an undefined idle check line index */
-#define IDLE_CHK_UNDEFINED_LINE_IDX 0xffffff
-
-/* max number of register values following the idle check header */
-#define IDLE_CHK_MAX_DUMP_REGS 2
-
-/* arguments for IDLE_CHK_MACRO_TYPE_QM_RD_WR */
-#define IDLE_CHK_QM_RD_WR_PTR 0
-#define IDLE_CHK_QM_RD_WR_BANK 1
-
-/**************************************/
-/* HSI Functions constants and macros */
-/**************************************/
-
-/* Number of VLAN priorities */
-#define NUM_OF_VLAN_PRIORITIES 8
-
-/* the MCP Trace meta data signautre is duplicated in the perl script that
- * generats the NVRAM images.
- */
-#define MCP_TRACE_META_IMAGE_SIGNATURE 0x669955aa
-
/* Binary buffer header */
struct bin_buffer_hdr {
- u32 offset;
- u32 length /* buffer length in bytes */;
-};
-
-/* binary buffer types */
-enum bin_buffer_type {
- BIN_BUF_FW_VER_INFO /* fw_ver_info struct */,
- BIN_BUF_INIT_CMD /* init commands */,
- BIN_BUF_INIT_VAL /* init data */,
- BIN_BUF_INIT_MODE_TREE /* init modes tree */,
- BIN_BUF_IRO /* internal RAM offsets array */,
- MAX_BIN_BUFFER_TYPE
+ __le32 offset;
+ __le32 length;
};
-/* Chip IDs */
-enum chip_ids {
- CHIP_BB_A0 /* BB A0 chip ID */,
- CHIP_BB_B0 /* BB B0 chip ID */,
- CHIP_K2 /* AH chip ID */,
- MAX_CHIP_IDS
+/* binary init buffer types */
+enum bin_init_buffer_type {
+ BIN_BUF_FW_VER_INFO,
+ BIN_BUF_INIT_CMD,
+ BIN_BUF_INIT_VAL,
+ BIN_BUF_INIT_MODE_TREE,
+ BIN_BUF_IRO,
+ MAX_BIN_INIT_BUFFER_TYPE
};
+/* init array header: raw */
struct init_array_raw_hdr {
__le32 data;
-#define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF
-#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT 0
-#define INIT_ARRAY_RAW_HDR_PARAMS_MASK 0xFFFFFFF /* init array params */
-#define INIT_ARRAY_RAW_HDR_PARAMS_SHIFT 4
+#define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT 0
+#define INIT_ARRAY_RAW_HDR_PARAMS_MASK 0xFFFFFFF
+#define INIT_ARRAY_RAW_HDR_PARAMS_SHIFT 4
};
+/* init array header: standard */
struct init_array_standard_hdr {
__le32 data;
-#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK 0xF
-#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0
-#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK 0xFFFFFFF
-#define INIT_ARRAY_STANDARD_HDR_SIZE_SHIFT 4
+#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0
+#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK 0xFFFFFFF
+#define INIT_ARRAY_STANDARD_HDR_SIZE_SHIFT 4
};
+/* init array header: zipped */
struct init_array_zipped_hdr {
__le32 data;
-#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK 0xF
-#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT 0
-#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK 0xFFFFFFF
-#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_SHIFT 4
+#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT 0
+#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK 0xFFFFFFF
+#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_SHIFT 4
};
+/* init array header: pattern */
struct init_array_pattern_hdr {
__le32 data;
-#define INIT_ARRAY_PATTERN_HDR_TYPE_MASK 0xF
-#define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT 0
-#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK 0xF
-#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_SHIFT 4
-#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_MASK 0xFFFFFF
-#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_SHIFT 8
+#define INIT_ARRAY_PATTERN_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT 0
+#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK 0xF
+#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_SHIFT 4
+#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_MASK 0xFFFFFF
+#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_SHIFT 8
};
+/* init array header union */
union init_array_hdr {
- struct init_array_raw_hdr raw /* raw init array header */;
- struct init_array_standard_hdr standard;
- struct init_array_zipped_hdr zipped /* zipped init array header */;
- struct init_array_pattern_hdr pattern /* pattern init array header */;
+ struct init_array_raw_hdr raw;
+ struct init_array_standard_hdr standard;
+ struct init_array_zipped_hdr zipped;
+ struct init_array_pattern_hdr pattern;
};
+/* init array types */
enum init_array_types {
- INIT_ARR_STANDARD /* standard init array */,
- INIT_ARR_ZIPPED /* zipped init array */,
- INIT_ARR_PATTERN /* a repeated pattern */,
+ INIT_ARR_STANDARD,
+ INIT_ARR_ZIPPED,
+ INIT_ARR_PATTERN,
MAX_INIT_ARRAY_TYPES
};
/* init operation: callback */
struct init_callback_op {
- __le32 op_data;
-#define INIT_CALLBACK_OP_OP_MASK 0xF
-#define INIT_CALLBACK_OP_OP_SHIFT 0
-#define INIT_CALLBACK_OP_RESERVED_MASK 0xFFFFFFF
-#define INIT_CALLBACK_OP_RESERVED_SHIFT 4
- __le16 callback_id /* Callback ID */;
- __le16 block_id /* Blocks ID */;
+ __le32 op_data;
+#define INIT_CALLBACK_OP_OP_MASK 0xF
+#define INIT_CALLBACK_OP_OP_SHIFT 0
+#define INIT_CALLBACK_OP_RESERVED_MASK 0xFFFFFFF
+#define INIT_CALLBACK_OP_RESERVED_SHIFT 4
+ __le16 callback_id;
+ __le16 block_id;
};
/* init operation: delay */
struct init_delay_op {
- __le32 op_data;
-#define INIT_DELAY_OP_OP_MASK 0xF
-#define INIT_DELAY_OP_OP_SHIFT 0
-#define INIT_DELAY_OP_RESERVED_MASK 0xFFFFFFF
-#define INIT_DELAY_OP_RESERVED_SHIFT 4
- __le32 delay /* delay in us */;
+ __le32 op_data;
+#define INIT_DELAY_OP_OP_MASK 0xF
+#define INIT_DELAY_OP_OP_SHIFT 0
+#define INIT_DELAY_OP_RESERVED_MASK 0xFFFFFFF
+#define INIT_DELAY_OP_RESERVED_SHIFT 4
+ __le32 delay;
};
/* init operation: if_mode */
struct init_if_mode_op {
__le32 op_data;
-#define INIT_IF_MODE_OP_OP_MASK 0xF
-#define INIT_IF_MODE_OP_OP_SHIFT 0
-#define INIT_IF_MODE_OP_RESERVED1_MASK 0xFFF
-#define INIT_IF_MODE_OP_RESERVED1_SHIFT 4
-#define INIT_IF_MODE_OP_CMD_OFFSET_MASK 0xFFFF
-#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16
- __le16 reserved2;
- __le16 modes_buf_offset;
+#define INIT_IF_MODE_OP_OP_MASK 0xF
+#define INIT_IF_MODE_OP_OP_SHIFT 0
+#define INIT_IF_MODE_OP_RESERVED1_MASK 0xFFF
+#define INIT_IF_MODE_OP_RESERVED1_SHIFT 4
+#define INIT_IF_MODE_OP_CMD_OFFSET_MASK 0xFFFF
+#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16
+ __le16 reserved2;
+ __le16 modes_buf_offset;
};
-/* init operation: if_phase */
+/* init operation: if_phase */
struct init_if_phase_op {
__le32 op_data;
-#define INIT_IF_PHASE_OP_OP_MASK 0xF
-#define INIT_IF_PHASE_OP_OP_SHIFT 0
-#define INIT_IF_PHASE_OP_DMAE_ENABLE_MASK 0x1
-#define INIT_IF_PHASE_OP_DMAE_ENABLE_SHIFT 4
-#define INIT_IF_PHASE_OP_RESERVED1_MASK 0x7FF
-#define INIT_IF_PHASE_OP_RESERVED1_SHIFT 5
-#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF
-#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16
+#define INIT_IF_PHASE_OP_OP_MASK 0xF
+#define INIT_IF_PHASE_OP_OP_SHIFT 0
+#define INIT_IF_PHASE_OP_DMAE_ENABLE_MASK 0x1
+#define INIT_IF_PHASE_OP_DMAE_ENABLE_SHIFT 4
+#define INIT_IF_PHASE_OP_RESERVED1_MASK 0x7FF
+#define INIT_IF_PHASE_OP_RESERVED1_SHIFT 5
+#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF
+#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16
__le32 phase_data;
-#define INIT_IF_PHASE_OP_PHASE_MASK 0xFF /* Init phase */
-#define INIT_IF_PHASE_OP_PHASE_SHIFT 0
-#define INIT_IF_PHASE_OP_RESERVED2_MASK 0xFF
-#define INIT_IF_PHASE_OP_RESERVED2_SHIFT 8
-#define INIT_IF_PHASE_OP_PHASE_ID_MASK 0xFFFF /* Init phase ID */
-#define INIT_IF_PHASE_OP_PHASE_ID_SHIFT 16
+#define INIT_IF_PHASE_OP_PHASE_MASK 0xFF
+#define INIT_IF_PHASE_OP_PHASE_SHIFT 0
+#define INIT_IF_PHASE_OP_RESERVED2_MASK 0xFF
+#define INIT_IF_PHASE_OP_RESERVED2_SHIFT 8
+#define INIT_IF_PHASE_OP_PHASE_ID_MASK 0xFFFF
+#define INIT_IF_PHASE_OP_PHASE_ID_SHIFT 16
};
/* init mode operators */
enum init_mode_ops {
- INIT_MODE_OP_NOT /* init mode not operator */,
- INIT_MODE_OP_OR /* init mode or operator */,
- INIT_MODE_OP_AND /* init mode and operator */,
+ INIT_MODE_OP_NOT,
+ INIT_MODE_OP_OR,
+ INIT_MODE_OP_AND,
MAX_INIT_MODE_OPS
};
/* init operation: raw */
struct init_raw_op {
- __le32 op_data;
-#define INIT_RAW_OP_OP_MASK 0xF
-#define INIT_RAW_OP_OP_SHIFT 0
-#define INIT_RAW_OP_PARAM1_MASK 0xFFFFFFF /* init param 1 */
-#define INIT_RAW_OP_PARAM1_SHIFT 4
- __le32 param2 /* Init param 2 */;
+ __le32 op_data;
+#define INIT_RAW_OP_OP_MASK 0xF
+#define INIT_RAW_OP_OP_SHIFT 0
+#define INIT_RAW_OP_PARAM1_MASK 0xFFFFFFF
+#define INIT_RAW_OP_PARAM1_SHIFT 4
+ __le32 param2;
};
/* init array params */
struct init_op_array_params {
- __le16 size /* array size in dwords */;
- __le16 offset /* array start offset in dwords */;
+ __le16 size;
+ __le16 offset;
};
/* Write init operation arguments */
union init_write_args {
- __le32 inline_val;
- __le32 zeros_count;
- __le32 array_offset;
- struct init_op_array_params runtime;
+ __le32 inline_val;
+ __le32 zeros_count;
+ __le32 array_offset;
+ struct init_op_array_params runtime;
};
/* init operation: write */
struct init_write_op {
__le32 data;
-#define INIT_WRITE_OP_OP_MASK 0xF
-#define INIT_WRITE_OP_OP_SHIFT 0
-#define INIT_WRITE_OP_SOURCE_MASK 0x7
-#define INIT_WRITE_OP_SOURCE_SHIFT 4
-#define INIT_WRITE_OP_RESERVED_MASK 0x1
-#define INIT_WRITE_OP_RESERVED_SHIFT 7
-#define INIT_WRITE_OP_WIDE_BUS_MASK 0x1
-#define INIT_WRITE_OP_WIDE_BUS_SHIFT 8
-#define INIT_WRITE_OP_ADDRESS_MASK 0x7FFFFF
-#define INIT_WRITE_OP_ADDRESS_SHIFT 9
- union init_write_args args /* Write init operation arguments */;
+#define INIT_WRITE_OP_OP_MASK 0xF
+#define INIT_WRITE_OP_OP_SHIFT 0
+#define INIT_WRITE_OP_SOURCE_MASK 0x7
+#define INIT_WRITE_OP_SOURCE_SHIFT 4
+#define INIT_WRITE_OP_RESERVED_MASK 0x1
+#define INIT_WRITE_OP_RESERVED_SHIFT 7
+#define INIT_WRITE_OP_WIDE_BUS_MASK 0x1
+#define INIT_WRITE_OP_WIDE_BUS_SHIFT 8
+#define INIT_WRITE_OP_ADDRESS_MASK 0x7FFFFF
+#define INIT_WRITE_OP_ADDRESS_SHIFT 9
+ union init_write_args args;
};
/* init operation: read */
struct init_read_op {
__le32 op_data;
-#define INIT_READ_OP_OP_MASK 0xF
-#define INIT_READ_OP_OP_SHIFT 0
-#define INIT_READ_OP_POLL_TYPE_MASK 0xF
-#define INIT_READ_OP_POLL_TYPE_SHIFT 4
-#define INIT_READ_OP_RESERVED_MASK 0x1
-#define INIT_READ_OP_RESERVED_SHIFT 8
-#define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF
-#define INIT_READ_OP_ADDRESS_SHIFT 9
+#define INIT_READ_OP_OP_MASK 0xF
+#define INIT_READ_OP_OP_SHIFT 0
+#define INIT_READ_OP_POLL_TYPE_MASK 0xF
+#define INIT_READ_OP_POLL_TYPE_SHIFT 4
+#define INIT_READ_OP_RESERVED_MASK 0x1
+#define INIT_READ_OP_RESERVED_SHIFT 8
+#define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF
+#define INIT_READ_OP_ADDRESS_SHIFT 9
__le32 expected_val;
+
};
/* Init operations union */
union init_op {
- struct init_raw_op raw /* raw init operation */;
- struct init_write_op write /* write init operation */;
- struct init_read_op read /* read init operation */;
- struct init_if_mode_op if_mode /* if_mode init operation */;
- struct init_if_phase_op if_phase /* if_phase init operation */;
- struct init_callback_op callback /* callback init operation */;
- struct init_delay_op delay /* delay init operation */;
+ struct init_raw_op raw;
+ struct init_write_op write;
+ struct init_read_op read;
+ struct init_if_mode_op if_mode;
+ struct init_if_phase_op if_phase;
+ struct init_callback_op callback;
+ struct init_delay_op delay;
};
/* Init command operation types */
enum init_op_types {
- INIT_OP_READ /* GRC read init command */,
- INIT_OP_WRITE /* GRC write init command */,
+ INIT_OP_READ,
+ INIT_OP_WRITE,
INIT_OP_IF_MODE,
INIT_OP_IF_PHASE,
- INIT_OP_DELAY /* delay init command */,
- INIT_OP_CALLBACK /* callback init command */,
+ INIT_OP_DELAY,
+ INIT_OP_CALLBACK,
MAX_INIT_OP_TYPES
};
+/* init polling types */
enum init_poll_types {
- INIT_POLL_NONE /* No polling */,
- INIT_POLL_EQ /* init value is included in the init command */,
- INIT_POLL_OR /* init value is all zeros */,
- INIT_POLL_AND /* init value is an array of values */,
+ INIT_POLL_NONE,
+ INIT_POLL_EQ,
+ INIT_POLL_OR,
+ INIT_POLL_AND,
MAX_INIT_POLL_TYPES
};
/* init source types */
enum init_source_types {
- INIT_SRC_INLINE /* init value is included in the init command */,
- INIT_SRC_ZEROS /* init value is all zeros */,
- INIT_SRC_ARRAY /* init value is an array of values */,
- INIT_SRC_RUNTIME /* init value is provided during runtime */,
+ INIT_SRC_INLINE,
+ INIT_SRC_ZEROS,
+ INIT_SRC_ARRAY,
+ INIT_SRC_RUNTIME,
MAX_INIT_SOURCE_TYPES
};
/* Internal RAM Offsets macro data */
struct iro {
- u32 base /* RAM field offset */;
- u16 m1 /* multiplier 1 */;
- u16 m2 /* multiplier 2 */;
- u16 m3 /* multiplier 3 */;
- u16 size /* RAM field size */;
+ __le32 base;
+ __le16 m1;
+ __le16 m2;
+ __le16 m3;
+ __le16 size;
};
-/* QM per-port init parameters */
-struct init_qm_port_params {
- u8 active /* Indicates if this port is active */;
- u8 num_active_phys_tcs;
- u16 num_pbf_cmd_lines;
- u16 num_btb_blocks;
- __le16 reserved;
-};
-
-/* QM per-PQ init parameters */
-struct init_qm_pq_params {
- u8 vport_id /* VPORT ID */;
- u8 tc_id /* TC ID */;
- u8 wrr_group /* WRR group */;
- u8 reserved;
-};
+/**
+ * @brief qed_dbg_print_attn - Prints attention registers values in the specified results struct.
+ *
+ * @param p_hwfn
+ * @param results - Pointer to the attention read results
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn,
+ struct dbg_attn_block_result *results);
-/* QM per-vport init parameters */
-struct init_qm_vport_params {
- u32 vport_rl;
- u16 vport_wfq;
- u16 first_tx_pq_id[NUM_OF_TCS];
-};
+#define MAX_NAME_LEN 16
/* Win 2 */
#define GTT_BAR0_MAP_REG_IGU_CMD \
0x00f000UL
+
/* Win 3 */
#define GTT_BAR0_MAP_REG_TSDM_RAM \
0x010000UL
+
/* Win 4 */
#define GTT_BAR0_MAP_REG_MSDM_RAM \
0x011000UL
+
/* Win 5 */
#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 \
0x012000UL
+
/* Win 6 */
#define GTT_BAR0_MAP_REG_USDM_RAM \
0x013000UL
+
/* Win 7 */
#define GTT_BAR0_MAP_REG_USDM_RAM_1024 \
0x014000UL
+
/* Win 8 */
#define GTT_BAR0_MAP_REG_USDM_RAM_2048 \
0x015000UL
+
/* Win 9 */
#define GTT_BAR0_MAP_REG_XSDM_RAM \
0x016000UL
+
/* Win 10 */
#define GTT_BAR0_MAP_REG_YSDM_RAM \
0x017000UL
+
/* Win 11 */
#define GTT_BAR0_MAP_REG_PSDM_RAM \
0x018000UL
@@ -1584,785 +1963,718 @@ struct init_qm_vport_params {
* Returns the required host memory size in 4KB units.
* Must be called before all QM init HSI functions.
*
- * @param pf_id - physical function ID
- * @param num_pf_cids - number of connections used by this PF
- * @param num_vf_cids - number of connections used by VFs of this PF
- * @param num_tids - number of tasks used by this PF
- * @param num_pf_pqs - number of PQs used by this PF
- * @param num_vf_pqs - number of PQs used by VFs of this PF
+ * @param pf_id - physical function ID
+ * @param num_pf_cids - number of connections used by this PF
+ * @param num_vf_cids - number of connections used by VFs of this PF
+ * @param num_tids - number of tasks used by this PF
+ * @param num_pf_pqs - number of PQs used by this PF
+ * @param num_vf_pqs - number of PQs used by VFs of this PF
*
* @return The required host memory size in 4KB units.
*/
-u32 qed_qm_pf_mem_size(u8 pf_id,
- u32 num_pf_cids,
- u32 num_vf_cids,
- u32 num_tids,
- u16 num_pf_pqs,
- u16 num_vf_pqs);
+u32 qed_qm_pf_mem_size(u8 pf_id,
+ u32 num_pf_cids,
+ u32 num_vf_cids,
+ u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs);
struct qed_qm_common_rt_init_params {
- u8 max_ports_per_engine;
- u8 max_phys_tcs_per_port;
- bool pf_rl_en;
- bool pf_wfq_en;
- bool vport_rl_en;
- bool vport_wfq_en;
- struct init_qm_port_params *port_params;
+ u8 max_ports_per_engine;
+ u8 max_phys_tcs_per_port;
+ bool pf_rl_en;
+ bool pf_wfq_en;
+ bool vport_rl_en;
+ bool vport_wfq_en;
+ struct init_qm_port_params *port_params;
};
+int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
+ struct qed_qm_common_rt_init_params *p_params);
+
+struct qed_qm_pf_rt_init_params {
+ u8 port_id;
+ u8 pf_id;
+ u8 max_phys_tcs_per_port;
+ bool is_first_pf;
+ u32 num_pf_cids;
+ u32 num_vf_cids;
+ u32 num_tids;
+ u16 start_pq;
+ u16 num_pf_pqs;
+ u16 num_vf_pqs;
+ u8 start_vport;
+ u8 num_vports;
+ u8 pf_wfq;
+ u32 pf_rl;
+ struct init_qm_pq_params *pq_params;
+ struct init_qm_vport_params *vport_params;
+};
+
+int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_qm_pf_rt_init_params *p_params);
+
/**
- * @brief qed_qm_common_rt_init - Prepare QM runtime init values for the
- * engine phase.
+ * @brief qed_init_pf_wfq - Initializes the WFQ weight of the specified PF
*
* @param p_hwfn
- * @param max_ports_per_engine - max number of ports per engine in HW
- * @param max_phys_tcs_per_port - max number of physical TCs per port in HW
- * @param pf_rl_en - enable per-PF rate limiters
- * @param pf_wfq_en - enable per-PF WFQ
- * @param vport_rl_en - enable per-VPORT rate limiters
- * @param vport_wfq_en - enable per-VPORT WFQ
- * @param port_params - array of size MAX_NUM_PORTS with
- * arameters for each port
+ * @param p_ptt - ptt window used for writing the registers
+ * @param pf_id - PF ID
+ * @param pf_wfq - WFQ weight. Must be non-zero.
*
* @return 0 on success, -1 on error.
*/
-int qed_qm_common_rt_init(
- struct qed_hwfn *p_hwfn,
- struct qed_qm_common_rt_init_params *p_params);
-
-struct qed_qm_pf_rt_init_params {
- u8 port_id;
- u8 pf_id;
- u8 max_phys_tcs_per_port;
- bool is_first_pf;
- u32 num_pf_cids;
- u32 num_vf_cids;
- u32 num_tids;
- u16 start_pq;
- u16 num_pf_pqs;
- u16 num_vf_pqs;
- u8 start_vport;
- u8 num_vports;
- u8 pf_wfq;
- u32 pf_rl;
- struct init_qm_pq_params *pq_params;
- struct init_qm_vport_params *vport_params;
-};
-
-int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_qm_pf_rt_init_params *p_params);
+int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq);
/**
- * @brief qed_init_pf_rl Initializes the rate limit of the specified PF
+ * @brief qed_init_pf_rl - Initializes the rate limit of the specified PF
*
* @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers
- * @param pf_id - PF ID
- * @param pf_rl - rate limit in Mb/sec units
+ * @param p_ptt - ptt window used for writing the registers
+ * @param pf_id - PF ID
+ * @param pf_rl - rate limit in Mb/sec units
*
* @return 0 on success, -1 on error.
*/
-int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u8 pf_id,
- u32 pf_rl);
+int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl);
/**
- * @brief qed_init_vport_rl Initializes the rate limit of the specified VPORT
+ * @brief qed_init_vport_wfq Initializes the WFQ weight of the specified VPORT
*
* @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers
- * @param vport_id - VPORT ID
- * @param vport_rl - rate limit in Mb/sec units
+ * @param p_ptt - ptt window used for writing the registers
+ * @param first_tx_pq_id- An array containing the first Tx PQ ID associated
+ * with the VPORT for each TC. This array is filled by
+ * qed_qm_pf_rt_init
+ * @param vport_wfq - WFQ weight. Must be non-zero.
*
* @return 0 on success, -1 on error.
*/
+int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq);
-int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u8 vport_id,
- u32 vport_rl);
+/**
+ * @brief qed_init_vport_rl - Initializes the rate limit of the specified VPORT
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers
+ * @param vport_id - VPORT ID
+ * @param vport_rl - rate limit in Mb/sec units
+ *
+ * @return 0 on success, -1 on error.
+ */
+int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 vport_id, u32 vport_rl);
/**
* @brief qed_send_qm_stop_cmd Sends a stop command to the QM
*
* @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers
+ * @param p_ptt
* @param is_release_cmd - true for release, false for stop.
- * @param is_tx_pq - true for Tx PQs, false for Other PQs.
- * @param start_pq - first PQ ID to stop
- * @param num_pqs - Number of PQs to stop, starting from start_pq.
+ * @param is_tx_pq - true for Tx PQs, false for Other PQs.
+ * @param start_pq - first PQ ID to stop
+ * @param num_pqs - Number of PQs to stop, starting from start_pq.
*
- * @return bool, true if successful, false if timeout occurred while waiting
- * for QM command done.
+ * @return bool, true if successful, false if timeout occured while waiting for QM command done.
*/
+bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool is_release_cmd,
+ bool is_tx_pq, u16 start_pq, u16 num_pqs);
-bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- bool is_release_cmd,
- bool is_tx_pq,
- u16 start_pq,
- u16 num_pqs);
-
+/**
+ * @brief qed_set_vxlan_dest_port - initializes vxlan tunnel destination udp port
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param dest_port - vxlan destination udp port.
+ */
void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u16 dest_port);
+ struct qed_ptt *p_ptt, u16 dest_port);
+
+/**
+ * @brief qed_set_vxlan_enable - enable or disable VXLAN tunnel in HW
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param vxlan_enable - vxlan enable flag.
+ */
void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool vxlan_enable);
+
+/**
+ * @brief qed_set_gre_enable - enable or disable GRE tunnel in HW
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param eth_gre_enable - eth GRE enable enable flag.
+ * @param ip_gre_enable - IP GRE enable enable flag.
+ */
void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, bool eth_gre_enable,
- bool ip_gre_enable);
+ struct qed_ptt *p_ptt,
+ bool eth_gre_enable, bool ip_gre_enable);
+
+/**
+ * @brief qed_set_geneve_dest_port - initializes geneve tunnel destination udp port
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param dest_port - geneve destination udp port.
+ */
void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 dest_port);
+
+/**
+ * @brief qed_set_gre_enable - enable or disable GRE tunnel in HW
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param eth_geneve_enable - eth GENEVE enable enable flag.
+ * @param ip_geneve_enable - IP GENEVE enable enable flag.
+ */
void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, bool eth_geneve_enable,
- bool ip_geneve_enable);
-
-/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
-#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
-#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
-/* Tstorm port statistics */
-#define TSTORM_PORT_STAT_OFFSET(port_id) (IRO[1].base + ((port_id) * IRO[1].m1))
-#define TSTORM_PORT_STAT_SIZE (IRO[1].size)
-/* Tstorm ll2 port statistics */
-#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \
- (IRO[2].base + ((port_id) * IRO[2].m1))
-#define TSTORM_LL2_PORT_STAT_SIZE (IRO[2].size)
-/* Ustorm VF-PF Channel ready flag */
-#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \
- (IRO[3].base + ((vf_id) * IRO[3].m1))
-#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size)
-/* Ustorm Final flr cleanup ack */
-#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) (IRO[4].base + ((pf_id) * IRO[4].m1))
-#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size)
-/* Ustorm Event ring consumer */
-#define USTORM_EQE_CONS_OFFSET(pf_id) (IRO[5].base + ((pf_id) * IRO[5].m1))
-#define USTORM_EQE_CONS_SIZE (IRO[5].size)
-/* Ustorm Common Queue ring consumer */
-#define USTORM_COMMON_QUEUE_CONS_OFFSET(global_queue_id) \
- (IRO[6].base + ((global_queue_id) * IRO[6].m1))
-#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[6].size)
-/* Xstorm Integration Test Data */
-#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[7].base)
-#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[7].size)
-/* Ystorm Integration Test Data */
-#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[8].base)
-#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[8].size)
-/* Pstorm Integration Test Data */
-#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base)
-#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size)
-/* Tstorm Integration Test Data */
-#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base)
-#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size)
-/* Mstorm Integration Test Data */
-#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base)
-#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size)
-/* Ustorm Integration Test Data */
-#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base)
-#define USTORM_INTEG_TEST_DATA_SIZE (IRO[12].size)
-/* Tstorm producers */
-#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) \
- (IRO[13].base + ((core_rx_queue_id) * IRO[13].m1))
-#define TSTORM_LL2_RX_PRODS_SIZE (IRO[13].size)
-/* Tstorm LightL2 queue statistics */
-#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
- (IRO[14].base + ((core_rx_queue_id) * IRO[14].m1))
-#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[14].size)
-/* Ustorm LiteL2 queue statistics */
-#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
- (IRO[15].base + ((core_rx_queue_id) * IRO[15].m1))
-#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[15].size)
-/* Pstorm LiteL2 queue statistics */
-#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \
- (IRO[16].base + ((core_tx_stats_id) * IRO[16].m1))
-#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[16].size)
-/* Mstorm queue statistics */
-#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
- (IRO[17].base + ((stat_counter_id) * IRO[17].m1))
-#define MSTORM_QUEUE_STAT_SIZE (IRO[17].size)
-/* Mstorm producers */
-#define MSTORM_PRODS_OFFSET(queue_id) (IRO[18].base + ((queue_id) * IRO[18].m1))
-#define MSTORM_PRODS_SIZE (IRO[18].size)
-/* TPA agregation timeout in us resolution (on ASIC) */
-#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[19].base)
-#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[19].size)
-/* Ustorm queue statistics */
-#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
- (IRO[20].base + ((stat_counter_id) * IRO[20].m1))
-#define USTORM_QUEUE_STAT_SIZE (IRO[20].size)
-/* Ustorm queue zone */
-#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
- (IRO[21].base + ((queue_id) * IRO[21].m1))
-#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[21].size)
-/* Pstorm queue statistics */
-#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
- (IRO[22].base + ((stat_counter_id) * IRO[22].m1))
-#define PSTORM_QUEUE_STAT_SIZE (IRO[22].size)
-/* Tstorm last parser message */
-#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[23].base)
-#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[23].size)
-/* Tstorm Eth limit Rx rate */
-#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) (IRO[24].base + ((pf_id) * IRO[24].m1))
-#define ETH_RX_RATE_LIMIT_SIZE (IRO[24].size)
-/* Ystorm queue zone */
-#define YSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
- (IRO[25].base + ((queue_id) * IRO[25].m1))
-#define YSTORM_ETH_QUEUE_ZONE_SIZE (IRO[25].size)
-/* Ystorm cqe producer */
-#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \
- (IRO[26].base + ((rss_id) * IRO[26].m1))
-#define YSTORM_TOE_CQ_PROD_SIZE (IRO[26].size)
-/* Ustorm cqe producer */
-#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \
- (IRO[27].base + ((rss_id) * IRO[27].m1))
-#define USTORM_TOE_CQ_PROD_SIZE (IRO[27].size)
-/* Ustorm grq producer */
-#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \
- (IRO[28].base + ((pf_id) * IRO[28].m1))
-#define USTORM_TOE_GRQ_PROD_SIZE (IRO[28].size)
-/* Tstorm cmdq-cons of given command queue-id */
-#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \
- (IRO[29].base + ((cmdq_queue_id) * IRO[29].m1))
-#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[29].size)
-/* Mstorm rq-cons of given queue-id */
-#define MSTORM_SCSI_RQ_CONS_OFFSET(rq_queue_id) \
- (IRO[30].base + ((rq_queue_id) * IRO[30].m1))
-#define MSTORM_SCSI_RQ_CONS_SIZE (IRO[30].size)
-/* Mstorm bdq-external-producer of given BDQ function ID, BDqueue-id */
-#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
- (IRO[31].base + ((func_id) * IRO[31].m1) + ((bdq_id) * IRO[31].m2))
-#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[31].size)
-/* Tstorm (reflects M-Storm) bdq-external-producer of given fn ID, BDqueue-id */
-#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
- (IRO[32].base + ((func_id) * IRO[32].m1) + ((bdq_id) * IRO[32].m2))
-#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[32].size)
-/* Tstorm iSCSI RX stats */
-#define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
- (IRO[33].base + ((pf_id) * IRO[33].m1))
-#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[33].size)
-/* Mstorm iSCSI RX stats */
-#define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
- (IRO[34].base + ((pf_id) * IRO[34].m1))
-#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[34].size)
-/* Ustorm iSCSI RX stats */
-#define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
- (IRO[35].base + ((pf_id) * IRO[35].m1))
-#define USTORM_ISCSI_RX_STATS_SIZE (IRO[35].size)
-/* Xstorm iSCSI TX stats */
-#define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
- (IRO[36].base + ((pf_id) * IRO[36].m1))
-#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[36].size)
-/* Ystorm iSCSI TX stats */
-#define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
- (IRO[37].base + ((pf_id) * IRO[37].m1))
-#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[37].size)
-/* Pstorm iSCSI TX stats */
-#define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
- (IRO[38].base + ((pf_id) * IRO[38].m1))
-#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[38].size)
-/* Tstorm FCoE RX stats */
-#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \
- (IRO[39].base + ((pf_id) * IRO[39].m1))
-#define TSTORM_FCOE_RX_STATS_SIZE (IRO[39].size)
-/* Mstorm FCoE RX stats */
-#define MSTORM_FCOE_RX_STATS_OFFSET(pf_id) \
- (IRO[40].base + ((pf_id) * IRO[40].m1))
-#define MSTORM_FCOE_RX_STATS_SIZE (IRO[40].size)
-/* Pstorm FCoE TX stats */
-#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \
- (IRO[41].base + ((pf_id) * IRO[41].m1))
-#define PSTORM_FCOE_TX_STATS_SIZE (IRO[41].size)
-/* Pstorm RoCE statistics */
-#define PSTORM_ROCE_STAT_OFFSET(stat_counter_id) \
- (IRO[42].base + ((stat_counter_id) * IRO[42].m1))
-#define PSTORM_ROCE_STAT_SIZE (IRO[42].size)
-/* Tstorm RoCE statistics */
-#define TSTORM_ROCE_STAT_OFFSET(stat_counter_id) \
- (IRO[43].base + ((stat_counter_id) * IRO[43].m1))
-#define TSTORM_ROCE_STAT_SIZE (IRO[43].size)
-
-static const struct iro iro_arr[44] = {
- { 0x10, 0x0, 0x0, 0x0, 0x8 },
- { 0x47c8, 0x60, 0x0, 0x0, 0x60 },
- { 0x5e30, 0x20, 0x0, 0x0, 0x20 },
- { 0x510, 0x8, 0x0, 0x0, 0x4 },
- { 0x490, 0x8, 0x0, 0x0, 0x4 },
- { 0x10, 0x8, 0x0, 0x0, 0x2 },
- { 0x90, 0x8, 0x0, 0x0, 0x2 },
- { 0x4940, 0x0, 0x0, 0x0, 0x78 },
- { 0x3de0, 0x0, 0x0, 0x0, 0x78 },
- { 0x2998, 0x0, 0x0, 0x0, 0x78 },
- { 0x4750, 0x0, 0x0, 0x0, 0x78 },
- { 0x56d0, 0x0, 0x0, 0x0, 0x78 },
- { 0x7e50, 0x0, 0x0, 0x0, 0x78 },
- { 0x100, 0x8, 0x0, 0x0, 0x8 },
- { 0x5c10, 0x10, 0x0, 0x0, 0x10 },
- { 0xb508, 0x30, 0x0, 0x0, 0x30 },
- { 0x95c0, 0x30, 0x0, 0x0, 0x30 },
- { 0x58a0, 0x40, 0x0, 0x0, 0x40 },
- { 0x200, 0x10, 0x0, 0x0, 0x8 },
- { 0xa230, 0x0, 0x0, 0x0, 0x4 },
- { 0x8058, 0x40, 0x0, 0x0, 0x30 },
- { 0xd00, 0x8, 0x0, 0x0, 0x8 },
- { 0x2b30, 0x80, 0x0, 0x0, 0x38 },
- { 0xa808, 0x0, 0x0, 0x0, 0xf0 },
- { 0xa8f8, 0x8, 0x0, 0x0, 0x8 },
- { 0x80, 0x8, 0x0, 0x0, 0x8 },
- { 0xac0, 0x8, 0x0, 0x0, 0x8 },
- { 0x2580, 0x8, 0x0, 0x0, 0x8 },
- { 0x2500, 0x8, 0x0, 0x0, 0x8 },
- { 0x440, 0x8, 0x0, 0x0, 0x2 },
- { 0x1800, 0x8, 0x0, 0x0, 0x2 },
- { 0x1a00, 0x10, 0x8, 0x0, 0x2 },
- { 0x640, 0x10, 0x8, 0x0, 0x2 },
- { 0xd9b8, 0x38, 0x0, 0x0, 0x24 },
- { 0x11048, 0x10, 0x0, 0x0, 0x8 },
- { 0x11678, 0x38, 0x0, 0x0, 0x18 },
- { 0xaec0, 0x30, 0x0, 0x0, 0x10 },
- { 0x8700, 0x28, 0x0, 0x0, 0x18 },
- { 0xec00, 0x10, 0x0, 0x0, 0x10 },
- { 0xde38, 0x40, 0x0, 0x0, 0x30 },
- { 0x121a8, 0x38, 0x0, 0x0, 0x8 },
- { 0xf068, 0x20, 0x0, 0x0, 0x20 },
- { 0x2b68, 0x80, 0x0, 0x0, 0x10 },
- { 0x4ab8, 0x10, 0x0, 0x0, 0x10 },
+ struct qed_ptt *p_ptt,
+ bool eth_geneve_enable, bool ip_geneve_enable);
+
+#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
+#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
+#define TSTORM_PORT_STAT_OFFSET(port_id) \
+ (IRO[1].base + ((port_id) * IRO[1].m1))
+#define TSTORM_PORT_STAT_SIZE (IRO[1].size)
+#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \
+ (IRO[3].base + ((vf_id) * IRO[3].m1))
+#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size)
+#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) \
+ (IRO[4].base + (pf_id) * IRO[4].m1)
+#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size)
+#define USTORM_EQE_CONS_OFFSET(pf_id) \
+ (IRO[5].base + ((pf_id) * IRO[5].m1))
+#define USTORM_EQE_CONS_SIZE (IRO[5].size)
+#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) \
+ (IRO[6].base + ((queue_zone_id) * IRO[6].m1))
+#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[6].size)
+#define USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) \
+ (IRO[7].base + ((queue_zone_id) * IRO[7].m1))
+#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[7].size)
+#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+ (IRO[18].base + ((stat_counter_id) * IRO[18].m1))
+#define MSTORM_QUEUE_STAT_SIZE (IRO[18].size)
+#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \
+ (IRO[19].base + ((queue_id) * IRO[19].m1))
+#define MSTORM_ETH_PF_PRODS_SIZE (IRO[19].size)
+#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[20].base)
+#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[20].size)
+#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
+ (IRO[21].base + ((pf_id) * IRO[21].m1))
+#define MSTORM_ETH_PF_STAT_SIZE (IRO[21].size)
+#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+ (IRO[22].base + ((stat_counter_id) * IRO[22].m1))
+#define USTORM_QUEUE_STAT_SIZE (IRO[22].size)
+#define USTORM_ETH_PF_STAT_OFFSET(pf_id) \
+ (IRO[23].base + ((pf_id) * IRO[23].m1))
+#define USTORM_ETH_PF_STAT_SIZE (IRO[23].size)
+#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+ (IRO[24].base + ((stat_counter_id) * IRO[24].m1))
+#define PSTORM_QUEUE_STAT_SIZE (IRO[24].size)
+#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \
+ (IRO[25].base + ((pf_id) * IRO[25].m1))
+#define PSTORM_ETH_PF_STAT_SIZE (IRO[25].size)
+#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethtype) \
+ (IRO[26].base + ((ethtype) * IRO[26].m1))
+#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[26].size)
+#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[27].base)
+#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[27].size)
+#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
+ (IRO[28].base + ((pf_id) * IRO[28].m1))
+#define ETH_RX_RATE_LIMIT_SIZE (IRO[28].size)
+#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
+ (IRO[29].base + ((queue_id) * IRO[29].m1))
+#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[29].size)
+
+static const struct iro iro_arr[46] = {
+ {0x0, 0x0, 0x0, 0x0, 0x8},
+ {0x4cb0, 0x78, 0x0, 0x0, 0x78},
+ {0x6318, 0x20, 0x0, 0x0, 0x20},
+ {0xb00, 0x8, 0x0, 0x0, 0x4},
+ {0xa80, 0x8, 0x0, 0x0, 0x4},
+ {0x0, 0x8, 0x0, 0x0, 0x2},
+ {0x80, 0x8, 0x0, 0x0, 0x4},
+ {0x84, 0x8, 0x0, 0x0, 0x2},
+ {0x4bc0, 0x0, 0x0, 0x0, 0x78},
+ {0x3df0, 0x0, 0x0, 0x0, 0x78},
+ {0x29b0, 0x0, 0x0, 0x0, 0x78},
+ {0x4c38, 0x0, 0x0, 0x0, 0x78},
+ {0x4a48, 0x0, 0x0, 0x0, 0x78},
+ {0x7e48, 0x0, 0x0, 0x0, 0x78},
+ {0xa28, 0x8, 0x0, 0x0, 0x8},
+ {0x60f8, 0x10, 0x0, 0x0, 0x10},
+ {0xb820, 0x30, 0x0, 0x0, 0x30},
+ {0x95b8, 0x30, 0x0, 0x0, 0x30},
+ {0x4c18, 0x80, 0x0, 0x0, 0x40},
+ {0x1f8, 0x4, 0x0, 0x0, 0x4},
+ {0xc9a8, 0x0, 0x0, 0x0, 0x4},
+ {0x4c58, 0x80, 0x0, 0x0, 0x20},
+ {0x8050, 0x40, 0x0, 0x0, 0x30},
+ {0xe770, 0x60, 0x0, 0x0, 0x60},
+ {0x2b48, 0x80, 0x0, 0x0, 0x38},
+ {0xdf88, 0x78, 0x0, 0x0, 0x78},
+ {0x1f8, 0x4, 0x0, 0x0, 0x4},
+ {0xacf0, 0x0, 0x0, 0x0, 0xf0},
+ {0xade0, 0x8, 0x0, 0x0, 0x8},
+ {0x1f8, 0x8, 0x0, 0x0, 0x8},
+ {0xac0, 0x8, 0x0, 0x0, 0x8},
+ {0x2578, 0x8, 0x0, 0x0, 0x8},
+ {0x24f8, 0x8, 0x0, 0x0, 0x8},
+ {0x0, 0x8, 0x0, 0x0, 0x8},
+ {0x200, 0x10, 0x8, 0x0, 0x8},
+ {0xb78, 0x10, 0x8, 0x0, 0x2},
+ {0xd888, 0x38, 0x0, 0x0, 0x24},
+ {0x12120, 0x10, 0x0, 0x0, 0x8},
+ {0x11b20, 0x38, 0x0, 0x0, 0x18},
+ {0xa8c0, 0x30, 0x0, 0x0, 0x10},
+ {0x86f8, 0x28, 0x0, 0x0, 0x18},
+ {0xeff8, 0x10, 0x0, 0x0, 0x10},
+ {0xdd08, 0x48, 0x0, 0x0, 0x38},
+ {0xf460, 0x20, 0x0, 0x0, 0x20},
+ {0x2b80, 0x80, 0x0, 0x0, 0x10},
+ {0x5000, 0x10, 0x0, 0x0, 0x10},
};
/* Runtime array offsets */
-#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
-#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1
-#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2
-#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3
-#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4
-#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5
-#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6
-#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7
-#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8
-#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9
-#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10
-#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11
-#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12
-#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13
-#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14
-#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
-#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
-#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17
-#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18
-#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19
-#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20
-#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21
-#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22
-#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23
-#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
-#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
-#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
-#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497
-#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736
-#define CAU_REG_PI_MEMORY_RT_OFFSET 2233
-#define CAU_REG_PI_MEMORY_RT_SIZE 4416
-#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649
-#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650
-#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651
-#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652
-#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653
-#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654
-#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6655
-#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6656
-#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6657
-#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6658
-#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659
-#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660
-#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661
-#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662
-#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663
-#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664
-#define SRC_REG_FIRSTFREE_RT_OFFSET 6665
-#define SRC_REG_FIRSTFREE_RT_SIZE 2
-#define SRC_REG_LASTFREE_RT_OFFSET 6667
-#define SRC_REG_LASTFREE_RT_SIZE 2
-#define SRC_REG_COUNTFREE_RT_OFFSET 6669
-#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670
-#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671
-#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672
-#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673
-#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674
-#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675
-#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6676
-#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6677
-#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6678
-#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6679
-#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6680
-#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6681
-#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6682
-#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6683
-#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6684
-#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6685
-#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6686
-#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6687
-#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6688
-#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689
-#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690
-#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6691
-#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6692
-#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6693
-#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6694
-#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6695
-#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6696
-#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6697
-#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6698
-#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6699
-#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6700
-#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6701
-#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6702
-#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6703
-#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
-#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28703
-#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28704
-#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28705
-#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28706
-#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28707
-#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28708
-#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28709
-#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28710
-#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28711
-#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28712
-#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28713
-#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
-#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29129
-#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512
-#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29641
-#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29642
-#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29643
-#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29644
-#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29645
-#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29646
-#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29647
-#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29648
-#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29649
-#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29650
-#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29651
-#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29652
-#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29653
-#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29654
-#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29655
-#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29656
-#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29657
-#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29658
-#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29659
-#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29660
-#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29661
-#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29662
-#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29663
-#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29664
-#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29665
-#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29666
-#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29667
-#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29668
-#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29669
-#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29670
-#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29671
-#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29672
-#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29673
-#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29674
-#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29675
-#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29676
-#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29677
-#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29678
-#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29679
-#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29680
-#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29681
-#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29682
-#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29683
-#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29684
-#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29685
-#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29686
-#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29687
-#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29688
-#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29689
-#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29690
-#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29691
-#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29692
-#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29693
-#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29694
-#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29695
-#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29696
-#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29697
-#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29698
-#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29699
-#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29700
-#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29701
-#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29702
-#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29703
-#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29704
-#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29705
-#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29706
-#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29707
-#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29708
-#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
-#define QM_REG_VOQCRDLINE_RT_OFFSET 29836
-#define QM_REG_VOQCRDLINE_RT_SIZE 20
-#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29856
-#define QM_REG_VOQINITCRDLINE_RT_SIZE 20
-#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29876
-#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29877
-#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29878
-#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29879
-#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29880
-#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29881
-#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29882
-#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29883
-#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29884
-#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29885
-#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29886
-#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29887
-#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29888
-#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29889
-#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29890
-#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29891
-#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29892
-#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29893
-#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29894
-#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29895
-#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29896
-#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29897
-#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29898
-#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29899
-#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29900
-#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29901
-#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29902
-#define QM_REG_PQTX2PF_0_RT_OFFSET 29903
-#define QM_REG_PQTX2PF_1_RT_OFFSET 29904
-#define QM_REG_PQTX2PF_2_RT_OFFSET 29905
-#define QM_REG_PQTX2PF_3_RT_OFFSET 29906
-#define QM_REG_PQTX2PF_4_RT_OFFSET 29907
-#define QM_REG_PQTX2PF_5_RT_OFFSET 29908
-#define QM_REG_PQTX2PF_6_RT_OFFSET 29909
-#define QM_REG_PQTX2PF_7_RT_OFFSET 29910
-#define QM_REG_PQTX2PF_8_RT_OFFSET 29911
-#define QM_REG_PQTX2PF_9_RT_OFFSET 29912
-#define QM_REG_PQTX2PF_10_RT_OFFSET 29913
-#define QM_REG_PQTX2PF_11_RT_OFFSET 29914
-#define QM_REG_PQTX2PF_12_RT_OFFSET 29915
-#define QM_REG_PQTX2PF_13_RT_OFFSET 29916
-#define QM_REG_PQTX2PF_14_RT_OFFSET 29917
-#define QM_REG_PQTX2PF_15_RT_OFFSET 29918
-#define QM_REG_PQTX2PF_16_RT_OFFSET 29919
-#define QM_REG_PQTX2PF_17_RT_OFFSET 29920
-#define QM_REG_PQTX2PF_18_RT_OFFSET 29921
-#define QM_REG_PQTX2PF_19_RT_OFFSET 29922
-#define QM_REG_PQTX2PF_20_RT_OFFSET 29923
-#define QM_REG_PQTX2PF_21_RT_OFFSET 29924
-#define QM_REG_PQTX2PF_22_RT_OFFSET 29925
-#define QM_REG_PQTX2PF_23_RT_OFFSET 29926
-#define QM_REG_PQTX2PF_24_RT_OFFSET 29927
-#define QM_REG_PQTX2PF_25_RT_OFFSET 29928
-#define QM_REG_PQTX2PF_26_RT_OFFSET 29929
-#define QM_REG_PQTX2PF_27_RT_OFFSET 29930
-#define QM_REG_PQTX2PF_28_RT_OFFSET 29931
-#define QM_REG_PQTX2PF_29_RT_OFFSET 29932
-#define QM_REG_PQTX2PF_30_RT_OFFSET 29933
-#define QM_REG_PQTX2PF_31_RT_OFFSET 29934
-#define QM_REG_PQTX2PF_32_RT_OFFSET 29935
-#define QM_REG_PQTX2PF_33_RT_OFFSET 29936
-#define QM_REG_PQTX2PF_34_RT_OFFSET 29937
-#define QM_REG_PQTX2PF_35_RT_OFFSET 29938
-#define QM_REG_PQTX2PF_36_RT_OFFSET 29939
-#define QM_REG_PQTX2PF_37_RT_OFFSET 29940
-#define QM_REG_PQTX2PF_38_RT_OFFSET 29941
-#define QM_REG_PQTX2PF_39_RT_OFFSET 29942
-#define QM_REG_PQTX2PF_40_RT_OFFSET 29943
-#define QM_REG_PQTX2PF_41_RT_OFFSET 29944
-#define QM_REG_PQTX2PF_42_RT_OFFSET 29945
-#define QM_REG_PQTX2PF_43_RT_OFFSET 29946
-#define QM_REG_PQTX2PF_44_RT_OFFSET 29947
-#define QM_REG_PQTX2PF_45_RT_OFFSET 29948
-#define QM_REG_PQTX2PF_46_RT_OFFSET 29949
-#define QM_REG_PQTX2PF_47_RT_OFFSET 29950
-#define QM_REG_PQTX2PF_48_RT_OFFSET 29951
-#define QM_REG_PQTX2PF_49_RT_OFFSET 29952
-#define QM_REG_PQTX2PF_50_RT_OFFSET 29953
-#define QM_REG_PQTX2PF_51_RT_OFFSET 29954
-#define QM_REG_PQTX2PF_52_RT_OFFSET 29955
-#define QM_REG_PQTX2PF_53_RT_OFFSET 29956
-#define QM_REG_PQTX2PF_54_RT_OFFSET 29957
-#define QM_REG_PQTX2PF_55_RT_OFFSET 29958
-#define QM_REG_PQTX2PF_56_RT_OFFSET 29959
-#define QM_REG_PQTX2PF_57_RT_OFFSET 29960
-#define QM_REG_PQTX2PF_58_RT_OFFSET 29961
-#define QM_REG_PQTX2PF_59_RT_OFFSET 29962
-#define QM_REG_PQTX2PF_60_RT_OFFSET 29963
-#define QM_REG_PQTX2PF_61_RT_OFFSET 29964
-#define QM_REG_PQTX2PF_62_RT_OFFSET 29965
-#define QM_REG_PQTX2PF_63_RT_OFFSET 29966
-#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29967
-#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29968
-#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29969
-#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29970
-#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29971
-#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29972
-#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29973
-#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29974
-#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29975
-#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29976
-#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29977
-#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29978
-#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29979
-#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29980
-#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29981
-#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29982
-#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29983
-#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29984
-#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29985
-#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29986
-#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29987
-#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29988
-#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29989
-#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29990
-#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29991
-#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29992
-#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29993
-#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29994
-#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29995
-#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
-#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30251
-#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
-#define QM_REG_RLGLBLCRD_RT_OFFSET 30507
-#define QM_REG_RLGLBLCRD_RT_SIZE 256
-#define QM_REG_RLGLBLENABLE_RT_OFFSET 30763
-#define QM_REG_RLPFPERIOD_RT_OFFSET 30764
-#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30765
-#define QM_REG_RLPFINCVAL_RT_OFFSET 30766
-#define QM_REG_RLPFINCVAL_RT_SIZE 16
-#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30782
-#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_RLPFCRD_RT_OFFSET 30798
-#define QM_REG_RLPFCRD_RT_SIZE 16
-#define QM_REG_RLPFENABLE_RT_OFFSET 30814
-#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30815
-#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30816
-#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
-#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30832
-#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_WFQPFCRD_RT_OFFSET 30848
-#define QM_REG_WFQPFCRD_RT_SIZE 160
-#define QM_REG_WFQPFENABLE_RT_OFFSET 31008
-#define QM_REG_WFQVPENABLE_RT_OFFSET 31009
-#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31010
-#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
-#define QM_REG_TXPQMAP_RT_OFFSET 31522
-#define QM_REG_TXPQMAP_RT_SIZE 512
-#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32034
-#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
-#define QM_REG_WFQVPCRD_RT_OFFSET 32546
-#define QM_REG_WFQVPCRD_RT_SIZE 512
-#define QM_REG_WFQVPMAP_RT_OFFSET 33058
-#define QM_REG_WFQVPMAP_RT_SIZE 512
-#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33570
-#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160
-#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33730
-#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33731
-#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33732
-#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33733
-#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33734
-#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33735
-#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33736
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33737
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33741
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33745
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33749
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33750
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33782
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33798
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33814
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33830
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
-#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33846
-#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33847
-#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33848
-#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33849
-#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33850
-#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33851
-#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33852
-#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33853
-#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33854
-#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33855
-#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33856
-#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33857
-#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33858
-#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33859
-#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33860
-#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33861
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33862
-#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33863
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33864
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33865
-#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33866
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33867
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33868
-#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33869
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33870
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33871
-#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33872
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33873
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33874
-#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33875
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33876
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33877
-#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33878
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33879
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33880
-#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33881
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33882
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33883
-#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33884
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33885
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33886
-#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33887
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33888
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33889
-#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33890
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33891
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33892
-#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33893
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33894
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33895
-#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33896
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33897
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33898
-#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33899
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33900
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33901
-#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33902
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33903
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33904
-#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33905
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33906
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33907
-#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33908
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33909
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33910
-#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33911
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33912
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33913
-#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33914
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33915
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33916
-#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33917
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33918
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33919
-#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33920
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33921
-#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33922
-
-#define RUNTIME_ARRAY_SIZE 33923
+#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
+#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1
+#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2
+#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3
+#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4
+#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5
+#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6
+#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7
+#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8
+#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9
+#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10
+#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11
+#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12
+#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13
+#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14
+#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
+#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
+#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17
+#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18
+#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19
+#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20
+#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21
+#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22
+#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23
+#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
+#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497
+#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736
+#define CAU_REG_PI_MEMORY_RT_OFFSET 2233
+#define CAU_REG_PI_MEMORY_RT_SIZE 4416
+#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649
+#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650
+#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651
+#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652
+#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653
+#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654
+#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6655
+#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6656
+#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6657
+#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6658
+#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659
+#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660
+#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661
+#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662
+#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663
+#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664
+#define SRC_REG_FIRSTFREE_RT_OFFSET 6665
+#define SRC_REG_FIRSTFREE_RT_SIZE 2
+#define SRC_REG_LASTFREE_RT_OFFSET 6667
+#define SRC_REG_LASTFREE_RT_SIZE 2
+#define SRC_REG_COUNTFREE_RT_OFFSET 6669
+#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670
+#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671
+#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672
+#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673
+#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674
+#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675
+#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6676
+#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6677
+#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6678
+#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6679
+#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6680
+#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6681
+#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6682
+#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6683
+#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6684
+#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6685
+#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6686
+#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6687
+#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6688
+#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689
+#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690
+#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6691
+#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6692
+#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6693
+#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6694
+#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6695
+#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6696
+#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6697
+#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6698
+#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6699
+#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6700
+#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6701
+#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6702
+#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6703
+#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6704
+#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
+#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28704
+#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28705
+#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28706
+#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28707
+#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28708
+#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28709
+#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28710
+#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28711
+#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28712
+#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28713
+#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28714
+#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
+#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29130
+#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29642
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29643
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29644
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29645
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29646
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29647
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29648
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29649
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29650
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29651
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29652
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29653
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29654
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29655
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29656
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29657
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29658
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29659
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29660
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29661
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29662
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29663
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29664
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29665
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29666
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29667
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29668
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29669
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29670
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29671
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29672
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29673
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29674
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29675
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29676
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29677
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29678
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29679
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29680
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29681
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29682
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29683
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29684
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29685
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29686
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29687
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29688
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29689
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29690
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29691
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29692
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29693
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29694
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29695
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29696
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29697
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29698
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29699
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29700
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29701
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29702
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29703
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29704
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29705
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29706
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29707
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29708
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29709
+#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
+#define QM_REG_VOQCRDLINE_RT_OFFSET 29837
+#define QM_REG_VOQCRDLINE_RT_SIZE 20
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29857
+#define QM_REG_VOQINITCRDLINE_RT_SIZE 20
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29877
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29878
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29879
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29880
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29881
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29882
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29883
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29884
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29885
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29886
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29887
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29888
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29889
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29890
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29891
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29892
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29893
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29894
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29895
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29896
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29897
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29898
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29899
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29900
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29901
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29902
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29903
+#define QM_REG_PQTX2PF_0_RT_OFFSET 29904
+#define QM_REG_PQTX2PF_1_RT_OFFSET 29905
+#define QM_REG_PQTX2PF_2_RT_OFFSET 29906
+#define QM_REG_PQTX2PF_3_RT_OFFSET 29907
+#define QM_REG_PQTX2PF_4_RT_OFFSET 29908
+#define QM_REG_PQTX2PF_5_RT_OFFSET 29909
+#define QM_REG_PQTX2PF_6_RT_OFFSET 29910
+#define QM_REG_PQTX2PF_7_RT_OFFSET 29911
+#define QM_REG_PQTX2PF_8_RT_OFFSET 29912
+#define QM_REG_PQTX2PF_9_RT_OFFSET 29913
+#define QM_REG_PQTX2PF_10_RT_OFFSET 29914
+#define QM_REG_PQTX2PF_11_RT_OFFSET 29915
+#define QM_REG_PQTX2PF_12_RT_OFFSET 29916
+#define QM_REG_PQTX2PF_13_RT_OFFSET 29917
+#define QM_REG_PQTX2PF_14_RT_OFFSET 29918
+#define QM_REG_PQTX2PF_15_RT_OFFSET 29919
+#define QM_REG_PQTX2PF_16_RT_OFFSET 29920
+#define QM_REG_PQTX2PF_17_RT_OFFSET 29921
+#define QM_REG_PQTX2PF_18_RT_OFFSET 29922
+#define QM_REG_PQTX2PF_19_RT_OFFSET 29923
+#define QM_REG_PQTX2PF_20_RT_OFFSET 29924
+#define QM_REG_PQTX2PF_21_RT_OFFSET 29925
+#define QM_REG_PQTX2PF_22_RT_OFFSET 29926
+#define QM_REG_PQTX2PF_23_RT_OFFSET 29927
+#define QM_REG_PQTX2PF_24_RT_OFFSET 29928
+#define QM_REG_PQTX2PF_25_RT_OFFSET 29929
+#define QM_REG_PQTX2PF_26_RT_OFFSET 29930
+#define QM_REG_PQTX2PF_27_RT_OFFSET 29931
+#define QM_REG_PQTX2PF_28_RT_OFFSET 29932
+#define QM_REG_PQTX2PF_29_RT_OFFSET 29933
+#define QM_REG_PQTX2PF_30_RT_OFFSET 29934
+#define QM_REG_PQTX2PF_31_RT_OFFSET 29935
+#define QM_REG_PQTX2PF_32_RT_OFFSET 29936
+#define QM_REG_PQTX2PF_33_RT_OFFSET 29937
+#define QM_REG_PQTX2PF_34_RT_OFFSET 29938
+#define QM_REG_PQTX2PF_35_RT_OFFSET 29939
+#define QM_REG_PQTX2PF_36_RT_OFFSET 29940
+#define QM_REG_PQTX2PF_37_RT_OFFSET 29941
+#define QM_REG_PQTX2PF_38_RT_OFFSET 29942
+#define QM_REG_PQTX2PF_39_RT_OFFSET 29943
+#define QM_REG_PQTX2PF_40_RT_OFFSET 29944
+#define QM_REG_PQTX2PF_41_RT_OFFSET 29945
+#define QM_REG_PQTX2PF_42_RT_OFFSET 29946
+#define QM_REG_PQTX2PF_43_RT_OFFSET 29947
+#define QM_REG_PQTX2PF_44_RT_OFFSET 29948
+#define QM_REG_PQTX2PF_45_RT_OFFSET 29949
+#define QM_REG_PQTX2PF_46_RT_OFFSET 29950
+#define QM_REG_PQTX2PF_47_RT_OFFSET 29951
+#define QM_REG_PQTX2PF_48_RT_OFFSET 29952
+#define QM_REG_PQTX2PF_49_RT_OFFSET 29953
+#define QM_REG_PQTX2PF_50_RT_OFFSET 29954
+#define QM_REG_PQTX2PF_51_RT_OFFSET 29955
+#define QM_REG_PQTX2PF_52_RT_OFFSET 29956
+#define QM_REG_PQTX2PF_53_RT_OFFSET 29957
+#define QM_REG_PQTX2PF_54_RT_OFFSET 29958
+#define QM_REG_PQTX2PF_55_RT_OFFSET 29959
+#define QM_REG_PQTX2PF_56_RT_OFFSET 29960
+#define QM_REG_PQTX2PF_57_RT_OFFSET 29961
+#define QM_REG_PQTX2PF_58_RT_OFFSET 29962
+#define QM_REG_PQTX2PF_59_RT_OFFSET 29963
+#define QM_REG_PQTX2PF_60_RT_OFFSET 29964
+#define QM_REG_PQTX2PF_61_RT_OFFSET 29965
+#define QM_REG_PQTX2PF_62_RT_OFFSET 29966
+#define QM_REG_PQTX2PF_63_RT_OFFSET 29967
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29968
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29969
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29970
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29971
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29972
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29973
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29974
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29975
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29976
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29977
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29978
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29979
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29980
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29981
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29982
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29983
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29984
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29985
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29986
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29987
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29988
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29989
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29990
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29991
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29992
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29993
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29994
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29995
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29996
+#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30252
+#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
+#define QM_REG_RLGLBLCRD_RT_OFFSET 30508
+#define QM_REG_RLGLBLCRD_RT_SIZE 256
+#define QM_REG_RLGLBLENABLE_RT_OFFSET 30764
+#define QM_REG_RLPFPERIOD_RT_OFFSET 30765
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30766
+#define QM_REG_RLPFINCVAL_RT_OFFSET 30767
+#define QM_REG_RLPFINCVAL_RT_SIZE 16
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30783
+#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_RLPFCRD_RT_OFFSET 30799
+#define QM_REG_RLPFCRD_RT_SIZE 16
+#define QM_REG_RLPFENABLE_RT_OFFSET 30815
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30816
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30817
+#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30833
+#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_WFQPFCRD_RT_OFFSET 30849
+#define QM_REG_WFQPFCRD_RT_SIZE 160
+#define QM_REG_WFQPFENABLE_RT_OFFSET 31009
+#define QM_REG_WFQVPENABLE_RT_OFFSET 31010
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31011
+#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
+#define QM_REG_TXPQMAP_RT_OFFSET 31523
+#define QM_REG_TXPQMAP_RT_SIZE 512
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32035
+#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
+#define QM_REG_WFQVPCRD_RT_OFFSET 32547
+#define QM_REG_WFQVPCRD_RT_SIZE 512
+#define QM_REG_WFQVPMAP_RT_OFFSET 33059
+#define QM_REG_WFQVPMAP_RT_SIZE 512
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33571
+#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33731
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33732
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33733
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33734
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33735
+#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33736
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33737
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33738
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33742
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33746
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33750
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33751
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33783
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33799
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33815
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33831
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33847
+#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 33848
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33849
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33850
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33851
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33852
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33853
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33854
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33855
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33856
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33857
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33858
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33859
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33860
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33861
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33862
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33863
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33864
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33865
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33866
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33867
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33868
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33869
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33870
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33871
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33872
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33873
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33874
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33875
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33876
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33877
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33878
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33879
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33880
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33881
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33882
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33883
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33884
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33885
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33886
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33887
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33888
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33889
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33890
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33891
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33892
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33893
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33894
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33895
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33896
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33897
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33898
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33899
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33900
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33901
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33902
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33903
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33904
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33905
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33906
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33907
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33908
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33909
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33910
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33911
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33912
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33913
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33914
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33915
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33916
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33917
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33918
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33919
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33920
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33921
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33922
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33923
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33924
+
+#define RUNTIME_ARRAY_SIZE 33925
/* The eth storm context for the Tstorm */
struct tstorm_eth_conn_st_ctx {
@@ -2380,266 +2692,266 @@ struct xstorm_eth_conn_st_ctx {
};
struct xstorm_eth_conn_ag_ctx {
- u8 reserved0 /* cdu_validation */;
- u8 eth_state /* state */;
- u8 flags0;
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1 /* bit4 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1 /* bit6 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1 /* bit7 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7
- u8 flags1;
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1 /* bit8 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1 /* bit9 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1 /* bit10 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1 /* bit11 */
-#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_BIT12_MASK 0x1 /* bit12 */
-#define XSTORM_ETH_CONN_AG_CTX_BIT12_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_BIT13_MASK 0x1 /* bit13 */
-#define XSTORM_ETH_CONN_AG_CTX_BIT13_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 /* bit14 */
-#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 /* bit15 */
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+ u8 reserved0;
+ u8 eth_state;
+ u8 flags0;
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
u8 flags2;
-#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
-#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
-#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
-#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
-#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
-#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
-#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
-#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6
- u8 flags4;
-#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
-#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
-#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
-#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3 /* cf11 */
-#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3 /* cf12 */
-#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3 /* cf13 */
-#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3 /* cf14 */
-#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3 /* cf15 */
-#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 /* cf16 */
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3 /* cf18 */
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 /* cf19 */
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
u8 flags7;
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 /* cf20 */
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3 /* cf21 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3 /* cf22 */
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
-#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
-#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
-#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
-#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
-#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
-#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
-#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
-#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1 /* cf11en */
-#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1 /* cf12en */
-#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1 /* cf13en */
-#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1 /* cf14en */
-#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1 /* cf15en */
-#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 /* cf16en */
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
u8 flags10;
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 /* cf18en */
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 /* cf19en */
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 /* cf20en */
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1 /* cf21en */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 /* cf22en */
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 /* cf23en */
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1 /* rule0en */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1 /* rule1en */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7
u8 flags11;
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1 /* rule2en */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1 /* rule3en */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 /* rule4en */
-#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 /* rule8en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1 /* rule9en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1 /* rule10en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1 /* rule11en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 /* rule12en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 /* rule13en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1 /* rule14en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1 /* rule15en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1 /* rule16en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1 /* rule17en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1 /* rule18en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1 /* rule19en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 /* rule20en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 /* rule21en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 /* rule22en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 /* rule23en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 /* rule24en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 /* rule25en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 /* bit16 */
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 /* bit17 */
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 /* bit18 */
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 /* bit19 */
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 /* bit20 */
-#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 /* bit21 */
-#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 /* cf23 */
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
- u8 edpm_event_id /* byte2 */;
- __le16 physical_q0 /* physical_q0 */;
- __le16 word1 /* physical_q1 */;
- __le16 edpm_num_bds /* physical_q2 */;
- __le16 tx_bd_cons /* word3 */;
- __le16 tx_bd_prod /* word4 */;
- __le16 go_to_bd_cons /* word5 */;
- __le16 conn_dpi /* conn_dpi */;
- u8 byte3 /* byte3 */;
- u8 byte4 /* byte4 */;
- u8 byte5 /* byte5 */;
- u8 byte6 /* byte6 */;
- __le32 reg0 /* reg0 */;
- __le32 reg1 /* reg1 */;
- __le32 reg2 /* reg2 */;
- __le32 reg3 /* reg3 */;
- __le32 reg4 /* reg4 */;
- __le32 reg5 /* cf_array0 */;
- __le32 reg6 /* cf_array1 */;
- __le16 word7 /* word7 */;
- __le16 word8 /* word8 */;
- __le16 word9 /* word9 */;
- __le16 word10 /* word10 */;
- __le32 reg7 /* reg7 */;
- __le32 reg8 /* reg8 */;
- __le32 reg9 /* reg9 */;
- u8 byte7 /* byte7 */;
- u8 byte8 /* byte8 */;
- u8 byte9 /* byte9 */;
- u8 byte10 /* byte10 */;
- u8 byte11 /* byte11 */;
- u8 byte12 /* byte12 */;
- u8 byte13 /* byte13 */;
- u8 byte14 /* byte14 */;
- u8 byte15 /* byte15 */;
- u8 byte16 /* byte16 */;
- __le16 word11 /* word11 */;
- __le32 reg10 /* reg10 */;
- __le32 reg11 /* reg11 */;
- __le32 reg12 /* reg12 */;
- __le32 reg13 /* reg13 */;
- __le32 reg14 /* reg14 */;
- __le32 reg15 /* reg15 */;
- __le32 reg16 /* reg16 */;
- __le32 reg17 /* reg17 */;
- __le32 reg18 /* reg18 */;
- __le32 reg19 /* reg19 */;
- __le16 word12 /* word12 */;
- __le16 word13 /* word13 */;
- __le16 word14 /* word14 */;
- __le16 word15 /* word15 */;
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
+ u8 edpm_event_id;
+ __le16 physical_q0;
+ __le16 quota;
+ __le16 edpm_num_bds;
+ __le16 tx_bd_cons;
+ __le16 tx_bd_prod;
+ __le16 tx_class;
+ __le16 conn_dpi;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le16 word7;
+ __le16 word8;
+ __le16 word9;
+ __le16 word10;
+ __le32 reg7;
+ __le32 reg8;
+ __le32 reg9;
+ u8 byte7;
+ u8 byte8;
+ u8 byte9;
+ u8 byte10;
+ u8 byte11;
+ u8 byte12;
+ u8 byte13;
+ u8 byte14;
+ u8 byte15;
+ u8 byte16;
+ __le16 word11;
+ __le32 reg10;
+ __le32 reg11;
+ __le32 reg12;
+ __le32 reg13;
+ __le32 reg14;
+ __le32 reg15;
+ __le32 reg16;
+ __le32 reg17;
+ __le32 reg18;
+ __le32 reg19;
+ __le16 word12;
+ __le16 word13;
+ __le16 word14;
+ __le16 word15;
};
/* The eth storm context for the Ystorm */
@@ -2648,220 +2960,220 @@ struct ystorm_eth_conn_st_ctx {
};
struct ystorm_eth_conn_ag_ctx {
- u8 byte0 /* cdu_validation */;
- u8 byte1 /* state */;
- u8 flags0;
-#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
-#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
-#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
-#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf0 */
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3 /* cf1 */
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4
-#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
-#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+ u8 byte0;
+ u8 state;
+ u8 flags0;
+#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4
+#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 /* cf0en */
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1 /* cf1en */
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1
-#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
-#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
- u8 byte2 /* byte2 */;
- u8 byte3 /* byte3 */;
- __le16 word0 /* word0 */;
- __le32 terminate_spqe /* reg0 */;
- __le32 reg1 /* reg1 */;
- __le16 tx_bd_cons_upd /* word1 */;
- __le16 word2 /* word2 */;
- __le16 word3 /* word3 */;
- __le16 word4 /* word4 */;
- __le32 reg2 /* reg2 */;
- __le32 reg3 /* reg3 */;
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1
+#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 tx_q0_int_coallecing_timeset;
+ u8 byte3;
+ __le16 word0;
+ __le32 terminate_spqe;
+ __le32 reg1;
+ __le16 tx_bd_cons_upd;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg2;
+ __le32 reg3;
};
struct tstorm_eth_conn_ag_ctx {
- u8 byte0 /* cdu_validation */;
- u8 byte1 /* state */;
- u8 flags0;
-#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3
-#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
-#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
-#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
-#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
-#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
-#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
-#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
-#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
-#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
-#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
-#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
-#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6
-#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
-#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7
+#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7
u8 flags4;
-#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
-#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
-#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1
-#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
-#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
-#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3
-#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
-#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
-#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
-#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6
-#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 /* rule6en */
-#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
- __le32 reg0 /* reg0 */;
- __le32 reg1 /* reg1 */;
- __le32 reg2 /* reg2 */;
- __le32 reg3 /* reg3 */;
- __le32 reg4 /* reg4 */;
- __le32 reg5 /* reg5 */;
- __le32 reg6 /* reg6 */;
- __le32 reg7 /* reg7 */;
- __le32 reg8 /* reg8 */;
- u8 byte2 /* byte2 */;
- u8 byte3 /* byte3 */;
- __le16 rx_bd_cons /* word0 */;
- u8 byte4 /* byte4 */;
- u8 byte5 /* byte5 */;
- __le16 rx_bd_prod /* word1 */;
- __le16 word2 /* conn_dpi */;
- __le16 word3 /* word3 */;
- __le32 reg9 /* reg9 */;
- __le32 reg10 /* reg10 */;
+#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le32 reg7;
+ __le32 reg8;
+ u8 byte2;
+ u8 byte3;
+ __le16 rx_bd_cons;
+ u8 byte4;
+ u8 byte5;
+ __le16 rx_bd_prod;
+ __le16 word2;
+ __le16 word3;
+ __le32 reg9;
+ __le32 reg10;
};
struct ustorm_eth_conn_ag_ctx {
- u8 byte0 /* cdu_validation */;
- u8 byte1 /* state */;
- u8 flags0;
-#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
-#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
-#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
-#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3 /* timer0cf */
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3 /* timer1cf */
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4
-#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
-#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
-#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3 /* cf4 */
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3 /* cf5 */
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf6 */
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6
+#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6
u8 flags2;
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1 /* cf0en */
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1 /* cf1en */
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1
-#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
-#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
-#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1 /* cf4en */
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1 /* cf5en */
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 /* cf6en */
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6
-#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6
+#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
-#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
-#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
-#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
-#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
- u8 byte2 /* byte2 */;
- u8 byte3 /* byte3 */;
- __le16 word0 /* conn_dpi */;
- __le16 tx_bd_cons /* word1 */;
- __le32 reg0 /* reg0 */;
- __le32 reg1 /* reg1 */;
- __le32 reg2 /* reg2 */;
- __le32 tx_int_coallecing_timeset /* reg3 */;
- __le16 tx_drv_bd_cons /* word2 */;
- __le16 rx_drv_cqe_cons /* word3 */;
+#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le16 tx_bd_cons;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 tx_int_coallecing_timeset;
+ __le16 tx_drv_bd_cons;
+ __le16 rx_drv_cqe_cons;
};
/* The eth storm context for the Ustorm */
@@ -2876,47 +3188,75 @@ struct mstorm_eth_conn_st_ctx {
/* eth connection context */
struct eth_conn_context {
- struct tstorm_eth_conn_st_ctx tstorm_st_context;
- struct regpair tstorm_st_padding[2];
- struct pstorm_eth_conn_st_ctx pstorm_st_context;
- struct xstorm_eth_conn_st_ctx xstorm_st_context;
- struct xstorm_eth_conn_ag_ctx xstorm_ag_context;
- struct ystorm_eth_conn_st_ctx ystorm_st_context;
- struct ystorm_eth_conn_ag_ctx ystorm_ag_context;
- struct tstorm_eth_conn_ag_ctx tstorm_ag_context;
- struct ustorm_eth_conn_ag_ctx ustorm_ag_context;
- struct ustorm_eth_conn_st_ctx ustorm_st_context;
- struct mstorm_eth_conn_st_ctx mstorm_st_context;
+ struct tstorm_eth_conn_st_ctx tstorm_st_context;
+ struct regpair tstorm_st_padding[2];
+ struct pstorm_eth_conn_st_ctx pstorm_st_context;
+ struct xstorm_eth_conn_st_ctx xstorm_st_context;
+ struct xstorm_eth_conn_ag_ctx xstorm_ag_context;
+ struct ystorm_eth_conn_st_ctx ystorm_st_context;
+ struct ystorm_eth_conn_ag_ctx ystorm_ag_context;
+ struct tstorm_eth_conn_ag_ctx tstorm_ag_context;
+ struct ustorm_eth_conn_ag_ctx ustorm_ag_context;
+ struct ustorm_eth_conn_st_ctx ustorm_st_context;
+ struct mstorm_eth_conn_st_ctx mstorm_st_context;
};
+/* opcodes for the event ring */
+enum eth_event_opcode {
+ ETH_EVENT_UNUSED,
+ ETH_EVENT_VPORT_START,
+ ETH_EVENT_VPORT_UPDATE,
+ ETH_EVENT_VPORT_STOP,
+ ETH_EVENT_TX_QUEUE_START,
+ ETH_EVENT_TX_QUEUE_STOP,
+ ETH_EVENT_RX_QUEUE_START,
+ ETH_EVENT_RX_QUEUE_UPDATE,
+ ETH_EVENT_RX_QUEUE_STOP,
+ ETH_EVENT_FILTERS_UPDATE,
+ ETH_EVENT_RESERVED,
+ ETH_EVENT_RESERVED2,
+ ETH_EVENT_RESERVED3,
+ ETH_EVENT_RX_ADD_UDP_FILTER,
+ ETH_EVENT_RX_DELETE_UDP_FILTER,
+ ETH_EVENT_RESERVED4,
+ ETH_EVENT_RESERVED5,
+ MAX_ETH_EVENT_OPCODE
+};
+
+/* Classify rule types in E2/E3 */
enum eth_filter_action {
+ ETH_FILTER_ACTION_UNUSED,
ETH_FILTER_ACTION_REMOVE,
ETH_FILTER_ACTION_ADD,
ETH_FILTER_ACTION_REMOVE_ALL,
MAX_ETH_FILTER_ACTION
};
+/* Command for adding/removing a classification rule $$KEEP_ENDIANNESS$$ */
struct eth_filter_cmd {
- u8 type /* Filter Type (MAC/VLAN/Pair/VNI) */;
- u8 vport_id /* the vport id */;
- u8 action /* filter command action: add/remove/replace */;
- u8 reserved0;
- __le32 vni;
- __le16 mac_lsb;
- __le16 mac_mid;
- __le16 mac_msb;
- __le16 vlan_id;
+ u8 type;
+ u8 vport_id;
+ u8 action;
+ u8 reserved0;
+ __le32 vni;
+ __le16 mac_lsb;
+ __le16 mac_mid;
+ __le16 mac_msb;
+ __le16 vlan_id;
};
+/* $$KEEP_ENDIANNESS$$ */
struct eth_filter_cmd_header {
- u8 rx;
- u8 tx;
- u8 cmd_cnt;
- u8 assert_on_error;
- u8 reserved1[4];
+ u8 rx;
+ u8 tx;
+ u8 cmd_cnt;
+ u8 assert_on_error;
+ u8 reserved1[4];
};
+/* Ethernet filter types: mac/vlan/pair */
enum eth_filter_type {
+ ETH_FILTER_TYPE_UNUSED,
ETH_FILTER_TYPE_MAC,
ETH_FILTER_TYPE_VLAN,
ETH_FILTER_TYPE_PAIR,
@@ -2929,463 +3269,3515 @@ enum eth_filter_type {
MAX_ETH_FILTER_TYPE
};
+/* Ethernet Ramrod Command IDs */
enum eth_ramrod_cmd_id {
ETH_RAMROD_UNUSED,
- ETH_RAMROD_VPORT_START /* VPort Start Ramrod */,
- ETH_RAMROD_VPORT_UPDATE /* VPort Update Ramrod */,
- ETH_RAMROD_VPORT_STOP /* VPort Stop Ramrod */,
- ETH_RAMROD_RX_QUEUE_START /* RX Queue Start Ramrod */,
- ETH_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */,
- ETH_RAMROD_TX_QUEUE_START /* TX Queue Start Ramrod */,
- ETH_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */,
- ETH_RAMROD_FILTERS_UPDATE /* Add or Remove Mac/Vlan/Pair filters */,
- ETH_RAMROD_RX_QUEUE_UPDATE /* RX Queue Update Ramrod */,
- ETH_RAMROD_RESERVED,
- ETH_RAMROD_RESERVED2,
- ETH_RAMROD_RESERVED3,
- ETH_RAMROD_RESERVED4,
- ETH_RAMROD_RESERVED5,
- ETH_RAMROD_RESERVED6,
- ETH_RAMROD_RESERVED7,
- ETH_RAMROD_RESERVED8,
+ ETH_RAMROD_VPORT_START,
+ ETH_RAMROD_VPORT_UPDATE,
+ ETH_RAMROD_VPORT_STOP,
+ ETH_RAMROD_RX_QUEUE_START,
+ ETH_RAMROD_RX_QUEUE_STOP,
+ ETH_RAMROD_TX_QUEUE_START,
+ ETH_RAMROD_TX_QUEUE_STOP,
+ ETH_RAMROD_FILTERS_UPDATE,
+ ETH_RAMROD_RX_QUEUE_UPDATE,
+ ETH_RAMROD_RX_CREATE_OPENFLOW_ACTION,
+ ETH_RAMROD_RX_ADD_OPENFLOW_FILTER,
+ ETH_RAMROD_RX_DELETE_OPENFLOW_FILTER,
+ ETH_RAMROD_RX_ADD_UDP_FILTER,
+ ETH_RAMROD_RX_DELETE_UDP_FILTER,
+ ETH_RAMROD_RX_CREATE_GFT_ACTION,
+ ETH_RAMROD_GFT_UPDATE_FILTER,
MAX_ETH_RAMROD_CMD_ID
};
+/* return code from eth sp ramrods */
+struct eth_return_code {
+ u8 value;
+#define ETH_RETURN_CODE_ERR_CODE_MASK 0x1F
+#define ETH_RETURN_CODE_ERR_CODE_SHIFT 0
+#define ETH_RETURN_CODE_RESERVED_MASK 0x3
+#define ETH_RETURN_CODE_RESERVED_SHIFT 5
+#define ETH_RETURN_CODE_RX_TX_MASK 0x1
+#define ETH_RETURN_CODE_RX_TX_SHIFT 7
+};
+
+/* What to do in case an error occurs */
enum eth_tx_err {
- ETH_TX_ERR_DROP /* Drop erronous packet. */,
+ ETH_TX_ERR_DROP,
ETH_TX_ERR_ASSERT_MALICIOUS,
MAX_ETH_TX_ERR
};
+/* Array of the different error type behaviors */
struct eth_tx_err_vals {
__le16 values;
-#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_MASK 0x1
-#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_SHIFT 0
-#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_MASK 0x1
-#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_SHIFT 1
-#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_MASK 0x1
-#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_SHIFT 2
-#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_MASK 0x1
-#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_SHIFT 3
-#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_MASK 0x1
-#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_SHIFT 4
-#define ETH_TX_ERR_VALS_MTU_VIOLATION_MASK 0x1
-#define ETH_TX_ERR_VALS_MTU_VIOLATION_SHIFT 5
-#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_MASK 0x1
-#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_SHIFT 6
-#define ETH_TX_ERR_VALS_RESERVED_MASK 0x1FF
-#define ETH_TX_ERR_VALS_RESERVED_SHIFT 7
-};
-
+#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_MASK 0x1
+#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_SHIFT 0
+#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_MASK 0x1
+#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_SHIFT 1
+#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_MASK 0x1
+#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_SHIFT 2
+#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_MASK 0x1
+#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_SHIFT 3
+#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_MASK 0x1
+#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_SHIFT 4
+#define ETH_TX_ERR_VALS_MTU_VIOLATION_MASK 0x1
+#define ETH_TX_ERR_VALS_MTU_VIOLATION_SHIFT 5
+#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_MASK 0x1
+#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_SHIFT 6
+#define ETH_TX_ERR_VALS_RESERVED_MASK 0x1FF
+#define ETH_TX_ERR_VALS_RESERVED_SHIFT 7
+};
+
+/* vport rss configuration data */
struct eth_vport_rss_config {
__le16 capabilities;
-#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_MASK 0x1
-#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_SHIFT 0
-#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_MASK 0x1
-#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_SHIFT 1
-#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_MASK 0x1
-#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_SHIFT 2
-#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_MASK 0x1
-#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_SHIFT 3
-#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_MASK 0x1
-#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_SHIFT 4
-#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_MASK 0x1
-#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_SHIFT 5
-#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_MASK 0x1
-#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_SHIFT 6
-#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK 0x1FF
-#define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT 7
- u8 rss_id;
- u8 rss_mode;
- u8 update_rss_key;
- u8 update_rss_ind_table;
- u8 update_rss_capabilities;
- u8 tbl_size;
- __le32 reserved2[2];
- __le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM];
- __le32 rss_key[ETH_RSS_KEY_SIZE_REGS];
- __le32 reserved3[2];
-};
-
+#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_SHIFT 0
+#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_SHIFT 1
+#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_SHIFT 2
+#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_SHIFT 3
+#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_SHIFT 4
+#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_SHIFT 5
+#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_SHIFT 6
+#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK 0x1FF
+#define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT 7
+ u8 rss_id;
+ u8 rss_mode;
+ u8 update_rss_key;
+ u8 update_rss_ind_table;
+ u8 update_rss_capabilities;
+ u8 tbl_size;
+ __le32 reserved2[2];
+ __le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM];
+
+ __le32 rss_key[ETH_RSS_KEY_SIZE_REGS];
+ __le32 reserved3[2];
+};
+
+/* eth vport RSS mode */
enum eth_vport_rss_mode {
ETH_VPORT_RSS_MODE_DISABLED,
ETH_VPORT_RSS_MODE_REGULAR,
MAX_ETH_VPORT_RSS_MODE
};
+/* Command for setting classification flags for a vport $$KEEP_ENDIANNESS$$ */
struct eth_vport_rx_mode {
__le16 state;
-#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_MASK 0x1
-#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_SHIFT 0
-#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_MASK 0x1
-#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_SHIFT 1
-#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_MASK 0x1
-#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_SHIFT 2
-#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_MASK 0x1
-#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_SHIFT 3
-#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_MASK 0x1
-#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_SHIFT 4
-#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_MASK 0x1
-#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT 5
-#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x3FF
-#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 6
+#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_SHIFT 0
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_SHIFT 1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_MASK 0x1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_SHIFT 2
+#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_SHIFT 3
+#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_SHIFT 4
+#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT 5
+#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x3FF
+#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 6
__le16 reserved2[3];
};
+/* Command for setting tpa parameters */
struct eth_vport_tpa_param {
- u8 tpa_ipv4_en_flg;
- u8 tpa_ipv6_en_flg;
- u8 tpa_ipv4_tunn_en_flg;
- u8 tpa_ipv6_tunn_en_flg;
- u8 tpa_pkt_split_flg;
- u8 tpa_hdr_data_split_flg;
- u8 tpa_gro_consistent_flg;
- u8 tpa_max_aggs_num;
- u16 tpa_max_size;
- u16 tpa_min_size_to_start;
- u16 tpa_min_size_to_cont;
- u8 max_buff_num;
- u8 reserved;
+ u8 tpa_ipv4_en_flg;
+ u8 tpa_ipv6_en_flg;
+ u8 tpa_ipv4_tunn_en_flg;
+ u8 tpa_ipv6_tunn_en_flg;
+ u8 tpa_pkt_split_flg;
+ u8 tpa_hdr_data_split_flg;
+ u8 tpa_gro_consistent_flg;
+
+ u8 tpa_max_aggs_num;
+
+ __le16 tpa_max_size;
+ __le16 tpa_min_size_to_start;
+
+ __le16 tpa_min_size_to_cont;
+ u8 max_buff_num;
+ u8 reserved;
};
+/* Command for setting classification flags for a vport $$KEEP_ENDIANNESS$$ */
struct eth_vport_tx_mode {
__le16 state;
-#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_MASK 0x1
-#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_SHIFT 0
-#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_MASK 0x1
-#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_SHIFT 1
-#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_MASK 0x1
-#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_SHIFT 2
-#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_MASK 0x1
-#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_SHIFT 3
-#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_MASK 0x1
-#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_SHIFT 4
-#define ETH_VPORT_TX_MODE_RESERVED1_MASK 0x7FF
-#define ETH_VPORT_TX_MODE_RESERVED1_SHIFT 5
+#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_SHIFT 0
+#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_SHIFT 1
+#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_SHIFT 2
+#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_SHIFT 3
+#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_SHIFT 4
+#define ETH_VPORT_TX_MODE_RESERVED1_MASK 0x7FF
+#define ETH_VPORT_TX_MODE_RESERVED1_SHIFT 5
__le16 reserved2[3];
};
+/* Ramrod data for rx queue start ramrod */
struct rx_queue_start_ramrod_data {
- __le16 rx_queue_id;
- __le16 num_of_pbl_pages;
- __le16 bd_max_bytes;
- __le16 sb_id;
- u8 sb_index;
- u8 vport_id;
- u8 default_rss_queue_flg;
- u8 complete_cqe_flg;
- u8 complete_event_flg;
- u8 stats_counter_id;
- u8 pin_context;
- u8 pxp_tph_valid_bd;
- u8 pxp_tph_valid_pkt;
- u8 pxp_st_hint;
- __le16 pxp_st_index;
- u8 pmd_mode;
- u8 notify_en;
- u8 toggle_val;
- u8 reserved[7];
- __le16 reserved1;
- struct regpair cqe_pbl_addr;
- struct regpair bd_base;
- struct regpair reserved2;
+ __le16 rx_queue_id;
+ __le16 num_of_pbl_pages;
+ __le16 bd_max_bytes;
+ __le16 sb_id;
+ u8 sb_index;
+ u8 vport_id;
+ u8 default_rss_queue_flg;
+ u8 complete_cqe_flg;
+ u8 complete_event_flg;
+ u8 stats_counter_id;
+ u8 pin_context;
+ u8 pxp_tph_valid_bd;
+ u8 pxp_tph_valid_pkt;
+ u8 pxp_st_hint;
+
+ __le16 pxp_st_index;
+ u8 pmd_mode;
+
+ u8 notify_en;
+ u8 toggle_val;
+
+ u8 vf_rx_prod_index;
+
+ u8 reserved[6];
+ __le16 reserved1;
+ struct regpair cqe_pbl_addr;
+ struct regpair bd_base;
+ struct regpair reserved2;
};
+/* Ramrod data for rx queue start ramrod */
struct rx_queue_stop_ramrod_data {
- __le16 rx_queue_id;
- u8 complete_cqe_flg;
- u8 complete_event_flg;
- u8 vport_id;
- u8 reserved[3];
+ __le16 rx_queue_id;
+ u8 complete_cqe_flg;
+ u8 complete_event_flg;
+ u8 vport_id;
+ u8 reserved[3];
};
+/* Ramrod data for rx queue update ramrod */
struct rx_queue_update_ramrod_data {
- __le16 rx_queue_id;
- u8 complete_cqe_flg;
- u8 complete_event_flg;
- u8 vport_id;
- u8 reserved[4];
- u8 reserved1;
- u8 reserved2;
- u8 reserved3;
- __le16 reserved4;
- __le16 reserved5;
+ __le16 rx_queue_id;
+ u8 complete_cqe_flg;
+ u8 complete_event_flg;
+ u8 vport_id;
+ u8 reserved[4];
+ u8 reserved1;
+ u8 reserved2;
+ u8 reserved3;
+ __le16 reserved4;
+ __le16 reserved5;
struct regpair reserved6;
};
-struct tx_queue_start_ramrod_data {
- __le16 sb_id;
- u8 sb_index;
- u8 vport_id;
- u8 reserved0;
- u8 stats_counter_id;
- __le16 qm_pq_id;
- u8 flags;
-#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_MASK 0x1
-#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_SHIFT 0
-#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_MASK 0x1
-#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_SHIFT 1
-#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_MASK 0x1
-#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_SHIFT 2
-#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_MASK 0x1
-#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_SHIFT 3
-#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_MASK 0x1
-#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_SHIFT 4
-#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_MASK 0x1
-#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_SHIFT 5
-#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_MASK 0x3
-#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_SHIFT 6
- u8 pxp_st_hint;
- u8 pxp_tph_valid_bd;
- u8 pxp_tph_valid_pkt;
- __le16 pxp_st_index;
- __le16 comp_agg_size;
- __le16 queue_zone_id;
- __le16 test_dup_count;
- __le16 pbl_size;
- __le16 tx_queue_id;
- struct regpair pbl_base_addr;
- struct regpair bd_cons_address;
+/* Ramrod data for rx Add UDP Filter */
+struct rx_udp_filter_data {
+ __le16 action_icid;
+ __le16 vlan_id;
+ u8 ip_type;
+ u8 tenant_id_exists;
+ __le16 reserved1;
+ __le32 ip_dst_addr[4];
+ __le32 ip_src_addr[4];
+ __le16 udp_dst_port;
+ __le16 udp_src_port;
+ __le32 tenant_id;
};
+/* Ramrod data for rx queue start ramrod */
+struct tx_queue_start_ramrod_data {
+ __le16 sb_id;
+ u8 sb_index;
+ u8 vport_id;
+ u8 reserved0;
+ u8 stats_counter_id;
+ __le16 qm_pq_id;
+ u8 flags;
+#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_SHIFT 0
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_SHIFT 1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_SHIFT 2
+#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_SHIFT 3
+#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_SHIFT 4
+#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_SHIFT 5
+#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_MASK 0x3
+#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_SHIFT 6
+ u8 pxp_st_hint;
+ u8 pxp_tph_valid_bd;
+ u8 pxp_tph_valid_pkt;
+ __le16 pxp_st_index;
+ __le16 comp_agg_size;
+ __le16 queue_zone_id;
+ __le16 test_dup_count;
+ __le16 pbl_size;
+ __le16 tx_queue_id;
+
+ struct regpair pbl_base_addr;
+ struct regpair bd_cons_address;
+};
+
+/* Ramrod data for tx queue stop ramrod */
struct tx_queue_stop_ramrod_data {
__le16 reserved[4];
};
+/* Ramrod data for vport update ramrod */
struct vport_filter_update_ramrod_data {
- struct eth_filter_cmd_header filter_cmd_hdr;
- struct eth_filter_cmd filter_cmds[ETH_FILTER_RULES_COUNT];
+ struct eth_filter_cmd_header filter_cmd_hdr;
+ struct eth_filter_cmd filter_cmds[ETH_FILTER_RULES_COUNT];
};
+/* Ramrod data for vport start ramrod */
struct vport_start_ramrod_data {
- u8 vport_id;
- u8 sw_fid;
- __le16 mtu;
- u8 drop_ttl0_en;
- u8 inner_vlan_removal_en;
- struct eth_vport_rx_mode rx_mode;
- struct eth_vport_tx_mode tx_mode;
- struct eth_vport_tpa_param tpa_param;
- __le16 default_vlan;
- u8 tx_switching_en;
- u8 anti_spoofing_en;
- u8 default_vlan_en;
- u8 handle_ptp_pkts;
- u8 silent_vlan_removal_en;
- u8 untagged;
- struct eth_tx_err_vals tx_err_behav;
- u8 zero_placement_offset;
- u8 reserved[7];
-};
-
+ u8 vport_id;
+ u8 sw_fid;
+ __le16 mtu;
+ u8 drop_ttl0_en;
+ u8 inner_vlan_removal_en;
+ struct eth_vport_rx_mode rx_mode;
+ struct eth_vport_tx_mode tx_mode;
+ struct eth_vport_tpa_param tpa_param;
+ __le16 default_vlan;
+ u8 tx_switching_en;
+ u8 anti_spoofing_en;
+
+ u8 default_vlan_en;
+
+ u8 handle_ptp_pkts;
+ u8 silent_vlan_removal_en;
+ u8 untagged;
+ struct eth_tx_err_vals tx_err_behav;
+
+ u8 zero_placement_offset;
+ u8 ctl_frame_mac_check_en;
+ u8 ctl_frame_ethtype_check_en;
+ u8 reserved[5];
+};
+
+/* Ramrod data for vport stop ramrod */
struct vport_stop_ramrod_data {
- u8 vport_id;
- u8 reserved[7];
+ u8 vport_id;
+ u8 reserved[7];
};
+/* Ramrod data for vport update ramrod */
struct vport_update_ramrod_data_cmn {
- u8 vport_id;
- u8 update_rx_active_flg;
- u8 rx_active_flg;
- u8 update_tx_active_flg;
- u8 tx_active_flg;
- u8 update_rx_mode_flg;
- u8 update_tx_mode_flg;
- u8 update_approx_mcast_flg;
- u8 update_rss_flg;
- u8 update_inner_vlan_removal_en_flg;
- u8 inner_vlan_removal_en;
- u8 update_tpa_param_flg;
- u8 update_tpa_en_flg;
- u8 update_tx_switching_en_flg;
- u8 tx_switching_en;
- u8 update_anti_spoofing_en_flg;
- u8 anti_spoofing_en;
- u8 update_handle_ptp_pkts;
- u8 handle_ptp_pkts;
- u8 update_default_vlan_en_flg;
- u8 default_vlan_en;
- u8 update_default_vlan_flg;
- __le16 default_vlan;
- u8 update_accept_any_vlan_flg;
- u8 accept_any_vlan;
- u8 silent_vlan_removal_en;
- u8 update_mtu_flg;
- __le16 mtu;
- u8 reserved[2];
+ u8 vport_id;
+ u8 update_rx_active_flg;
+ u8 rx_active_flg;
+ u8 update_tx_active_flg;
+ u8 tx_active_flg;
+ u8 update_rx_mode_flg;
+ u8 update_tx_mode_flg;
+ u8 update_approx_mcast_flg;
+
+ u8 update_rss_flg;
+ u8 update_inner_vlan_removal_en_flg;
+
+ u8 inner_vlan_removal_en;
+ u8 update_tpa_param_flg;
+ u8 update_tpa_en_flg;
+ u8 update_tx_switching_en_flg;
+
+ u8 tx_switching_en;
+ u8 update_anti_spoofing_en_flg;
+
+ u8 anti_spoofing_en;
+ u8 update_handle_ptp_pkts;
+
+ u8 handle_ptp_pkts;
+ u8 update_default_vlan_en_flg;
+
+ u8 default_vlan_en;
+
+ u8 update_default_vlan_flg;
+
+ __le16 default_vlan;
+ u8 update_accept_any_vlan_flg;
+
+ u8 accept_any_vlan;
+ u8 silent_vlan_removal_en;
+ u8 update_mtu_flg;
+
+ __le16 mtu;
+ u8 reserved[2];
};
struct vport_update_ramrod_mcast {
__le32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
};
+/* Ramrod data for vport update ramrod */
struct vport_update_ramrod_data {
- struct vport_update_ramrod_data_cmn common;
- struct eth_vport_rx_mode rx_mode;
- struct eth_vport_tx_mode tx_mode;
- struct eth_vport_tpa_param tpa_param;
- struct vport_update_ramrod_mcast approx_mcast;
- struct eth_vport_rss_config rss_config;
+ struct vport_update_ramrod_data_cmn common;
+
+ struct eth_vport_rx_mode rx_mode;
+ struct eth_vport_tx_mode tx_mode;
+ struct eth_vport_tpa_param tpa_param;
+ struct vport_update_ramrod_mcast approx_mcast;
+ struct eth_vport_rss_config rss_config;
+};
+
+struct mstorm_rdma_task_st_ctx {
+ struct regpair temp[4];
+};
+
+struct rdma_close_func_ramrod_data {
+ u8 cnq_start_offset;
+ u8 num_cnqs;
+ u8 vf_id;
+ u8 vf_valid;
+ u8 reserved[4];
+};
+
+struct rdma_cnq_params {
+ __le16 sb_num;
+ u8 sb_index;
+ u8 num_pbl_pages;
+ __le32 reserved;
+ struct regpair pbl_base_addr;
+ __le16 queue_zone_num;
+ u8 reserved1[6];
+};
+
+struct rdma_create_cq_ramrod_data {
+ struct regpair cq_handle;
+ struct regpair pbl_addr;
+ __le32 max_cqes;
+ __le16 pbl_num_pages;
+ __le16 dpi;
+ u8 is_two_level_pbl;
+ u8 cnq_id;
+ u8 pbl_log_page_size;
+ u8 toggle_bit;
+ __le16 int_timeout;
+ __le16 reserved1;
};
-#define VF_MAX_STATIC 192 /* In case of K2 */
+struct rdma_deregister_tid_ramrod_data {
+ __le32 itid;
+ __le32 reserved;
+};
-#define MCP_GLOB_PATH_MAX 2
-#define MCP_PORT_MAX 2 /* Global */
-#define MCP_GLOB_PORT_MAX 4 /* Global */
-#define MCP_GLOB_FUNC_MAX 16 /* Global */
+struct rdma_destroy_cq_output_params {
+ __le16 cnq_num;
+ __le16 reserved0;
+ __le32 reserved1;
+};
+
+struct rdma_destroy_cq_ramrod_data {
+ struct regpair output_params_addr;
+};
+
+enum rdma_event_opcode {
+ RDMA_EVENT_UNUSED,
+ RDMA_EVENT_FUNC_INIT,
+ RDMA_EVENT_FUNC_CLOSE,
+ RDMA_EVENT_REGISTER_MR,
+ RDMA_EVENT_DEREGISTER_MR,
+ RDMA_EVENT_CREATE_CQ,
+ RDMA_EVENT_RESIZE_CQ,
+ RDMA_EVENT_DESTROY_CQ,
+ RDMA_EVENT_CREATE_SRQ,
+ RDMA_EVENT_MODIFY_SRQ,
+ RDMA_EVENT_DESTROY_SRQ,
+ MAX_RDMA_EVENT_OPCODE
+};
+
+enum rdma_fw_return_code {
+ RDMA_RETURN_OK = 0,
+ RDMA_RETURN_REGISTER_MR_BAD_STATE_ERR,
+ RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR,
+ RDMA_RETURN_RESIZE_CQ_ERR,
+ RDMA_RETURN_NIG_DRAIN_REQ,
+ MAX_RDMA_FW_RETURN_CODE
+};
+
+struct rdma_init_func_hdr {
+ u8 cnq_start_offset;
+ u8 num_cnqs;
+ u8 cq_ring_mode;
+ u8 cnp_vlan_priority;
+ __le32 cnp_send_timeout;
+ u8 cnp_dscp;
+ u8 vf_id;
+ u8 vf_valid;
+ u8 reserved[5];
+};
+
+struct rdma_init_func_ramrod_data {
+ struct rdma_init_func_hdr params_header;
+ struct rdma_cnq_params cnq_params[NUM_OF_GLOBAL_QUEUES];
+};
+
+enum rdma_ramrod_cmd_id {
+ RDMA_RAMROD_UNUSED,
+ RDMA_RAMROD_FUNC_INIT,
+ RDMA_RAMROD_FUNC_CLOSE,
+ RDMA_RAMROD_REGISTER_MR,
+ RDMA_RAMROD_DEREGISTER_MR,
+ RDMA_RAMROD_CREATE_CQ,
+ RDMA_RAMROD_RESIZE_CQ,
+ RDMA_RAMROD_DESTROY_CQ,
+ RDMA_RAMROD_CREATE_SRQ,
+ RDMA_RAMROD_MODIFY_SRQ,
+ RDMA_RAMROD_DESTROY_SRQ,
+ MAX_RDMA_RAMROD_CMD_ID
+};
+
+struct rdma_register_tid_ramrod_data {
+ __le32 flags;
+#define RDMA_REGISTER_TID_RAMROD_DATA_MAX_ID_MASK 0x3FFFF
+#define RDMA_REGISTER_TID_RAMROD_DATA_MAX_ID_SHIFT 0
+#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_MASK 0x1F
+#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_SHIFT 18
+#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_SHIFT 23
+#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_SHIFT 24
+#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_SHIFT 25
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_SHIFT 26
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_SHIFT 27
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_SHIFT 28
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_SHIFT 29
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_SHIFT 30
+#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_SHIFT 31
+ u8 flags1;
+#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_MASK 0x1F
+#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_SHIFT 0
+#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_MASK 0x7
+#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_SHIFT 5
+ u8 flags2;
+#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_SHIFT 0
+#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_SHIFT 1
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_MASK 0x3F
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_SHIFT 2
+ u8 key;
+ u8 length_hi;
+ u8 vf_id;
+ u8 vf_valid;
+ __le16 pd;
+ __le32 length_lo;
+ __le32 itid;
+ __le32 reserved2;
+ struct regpair va;
+ struct regpair pbl_base;
+ struct regpair dif_error_addr;
+ struct regpair dif_runt_addr;
+ __le32 reserved3[2];
+};
+
+struct rdma_resize_cq_output_params {
+ __le32 old_cq_cons;
+ __le32 old_cq_prod;
+};
+
+struct rdma_resize_cq_ramrod_data {
+ u8 flags;
+#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_MASK 0x1
+#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_SHIFT 0
+#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_MASK 0x1
+#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_SHIFT 1
+#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_MASK 0x3F
+#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_SHIFT 2
+ u8 pbl_log_page_size;
+ __le16 pbl_num_pages;
+ __le32 max_cqes;
+ struct regpair pbl_addr;
+ struct regpair output_params_addr;
+};
+
+struct rdma_srq_context {
+ struct regpair temp[8];
+};
+
+struct rdma_srq_create_ramrod_data {
+ struct regpair pbl_base_addr;
+ __le16 pages_in_srq_pbl;
+ __le16 pd_id;
+ struct rdma_srq_id srq_id;
+ __le16 page_size;
+ __le16 reserved1;
+ __le32 reserved2;
+ struct regpair producers_addr;
+};
+
+struct rdma_srq_destroy_ramrod_data {
+ struct rdma_srq_id srq_id;
+ __le32 reserved;
+};
+
+struct rdma_srq_modify_ramrod_data {
+ struct rdma_srq_id srq_id;
+ __le32 wqe_limit;
+};
+
+struct ystorm_rdma_task_st_ctx {
+ struct regpair temp[4];
+};
+
+struct ystorm_rdma_task_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ __le16 msem_ctx_upd_seq;
+ u8 flags0;
+#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
+#define YSTORM_RDMA_TASK_AG_CTX_VALID_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT 6
+#define YSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
+ u8 flags1;
+#define YSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
+#define YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
+#define YSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
+#define YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
+#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
+#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
+#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
+#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
+ u8 flags2;
+#define YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
+#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
+ u8 key;
+ __le32 mw_cnt;
+ u8 ref_cnt_seq;
+ u8 ctx_upd_seq;
+ __le16 dif_flags;
+ __le16 tx_ref_count;
+ __le16 last_used_ltid;
+ __le16 parent_mr_lo;
+ __le16 parent_mr_hi;
+ __le32 fbo_lo;
+ __le32 fbo_hi;
+};
+
+struct mstorm_rdma_task_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ __le16 icid;
+ u8 flags0;
+#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
+#define MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
+#define MSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
+ u8 flags1;
+#define MSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
+#define MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
+#define MSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
+#define MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
+#define MSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3
+#define MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 4
+#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
+#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
+ u8 flags2;
+#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 0
+#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
+ u8 key;
+ __le32 mw_cnt;
+ u8 ref_cnt_seq;
+ u8 ctx_upd_seq;
+ __le16 dif_flags;
+ __le16 tx_ref_count;
+ __le16 last_used_ltid;
+ __le16 parent_mr_lo;
+ __le16 parent_mr_hi;
+ __le32 fbo_lo;
+ __le32 fbo_hi;
+};
+
+struct ustorm_rdma_task_st_ctx {
+ struct regpair temp[2];
+};
+
+struct ustorm_rdma_task_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ __le16 icid;
+ u8 flags0;
+#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_SHIFT 5
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK 0x3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT 6
+ u8 flags1;
+#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK 0x3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT 0
+#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK 0x3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT 2
+#define USTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3
+#define USTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 4
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
+ u8 flags2;
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT 0
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT 1
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT 2
+#define USTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
+#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 5
+#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 6
+#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 7
+ u8 flags3;
+#define USTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 0
+#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1
+#define USTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 2
+#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
+ __le32 dif_err_intervals;
+ __le32 dif_error_1st_interval;
+ __le32 reg2;
+ __le32 dif_runt_value;
+ __le32 reg4;
+ __le32 reg5;
+};
+
+struct rdma_task_context {
+ struct ystorm_rdma_task_st_ctx ystorm_st_context;
+ struct ystorm_rdma_task_ag_ctx ystorm_ag_context;
+ struct tdif_task_context tdif_context;
+ struct mstorm_rdma_task_ag_ctx mstorm_ag_context;
+ struct mstorm_rdma_task_st_ctx mstorm_st_context;
+ struct rdif_task_context rdif_context;
+ struct ustorm_rdma_task_st_ctx ustorm_st_context;
+ struct regpair ustorm_st_padding[2];
+ struct ustorm_rdma_task_ag_ctx ustorm_ag_context;
+};
+
+enum rdma_tid_type {
+ RDMA_TID_REGISTERED_MR,
+ RDMA_TID_FMR,
+ RDMA_TID_MW_TYPE1,
+ RDMA_TID_MW_TYPE2A,
+ MAX_RDMA_TID_TYPE
+};
+
+struct mstorm_rdma_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define MSTORM_RDMA_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+};
+
+struct tstorm_rdma_conn_ag_ctx {
+ u8 reserved0;
+ u8 byte1;
+ u8 flags0;
+#define TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_RDMA_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_RDMA_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_RDMA_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_RDMA_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 6
+ u8 flags1;
+#define TSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
+#define TSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
+#define TSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
+#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
+#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+ u8 flags2;
+#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
+#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
+#define TSTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_RDMA_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_RDMA_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_RDMA_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define TSTORM_RDMA_CONN_AG_CTX_CF9_MASK 0x3
+#define TSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_RDMA_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1
+#define TSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_RDMA_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define TSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le32 reg7;
+ __le32 reg8;
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ u8 byte4;
+ u8 byte5;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le32 reg9;
+ __le32 reg10;
+};
+
+struct tstorm_rdma_task_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ __le16 word0;
+ u8 flags0;
+#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_MASK 0xF
+#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define TSTORM_RDMA_TASK_AG_CTX_BIT0_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT0_SHIFT 4
+#define TSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
+#define TSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
+#define TSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
+ u8 flags1;
+#define TSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
+#define TSTORM_RDMA_TASK_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT5_SHIFT 1
+#define TSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 2
+#define TSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 4
+#define TSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 6
+ u8 flags2;
+#define TSTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 0
+#define TSTORM_RDMA_TASK_AG_CTX_CF4_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF4_SHIFT 2
+#define TSTORM_RDMA_TASK_AG_CTX_CF5_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF5_SHIFT 4
+#define TSTORM_RDMA_TASK_AG_CTX_CF6_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF6_SHIFT 6
+ u8 flags3;
+#define TSTORM_RDMA_TASK_AG_CTX_CF7_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF7_SHIFT 0
+#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 2
+#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 3
+#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 4
+#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 5
+#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_SHIFT 6
+#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_SHIFT 0
+#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_SHIFT 1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 2
+#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 3
+#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 4
+#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 5
+#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 6
+#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 7
+ u8 byte2;
+ __le16 word1;
+ __le32 reg0;
+ u8 byte3;
+ u8 byte4;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg1;
+ __le32 reg2;
+};
+
+struct ustorm_rdma_conn_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ u8 flags0;
+#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define USTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 2
+#define USTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define USTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4
+#define USTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 6
+ u8 flags2;
+#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define USTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5
+#define USTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_SHIFT 7
+ u8 flags3;
+#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_SHIFT 0
+#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 conn_dpi;
+ __le16 word1;
+ __le32 cq_cons;
+ __le32 cq_se_prod;
+ __le32 cq_prod;
+ __le32 reg3;
+ __le16 int_timeout;
+ __le16 word3;
+};
+
+struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_SHIFT 7
+ u8 flags1;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT13_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT13_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_SHIFT 7
+ u8 flags2;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_SHIFT 6
+ u8 flags4;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_SHIFT 6
+ u8 flags7;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_SHIFT 7
+ u8 flags10;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_SHIFT 7
+ u8 flags11;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_SHIFT 6
+ u8 byte2;
+ __le16 physical_q0;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le16 word5;
+ __le16 conn_dpi;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 snd_nxt_psn;
+ __le32 reg4;
+};
+
+struct xstorm_rdma_conn_ag_ctx {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT2_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_RDMA_CONN_AG_CTX_BIT4_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_BIT5_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT 5
+#define XSTORM_RDMA_CONN_AG_CTX_BIT6_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT6_SHIFT 6
+#define XSTORM_RDMA_CONN_AG_CTX_BIT7_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT7_SHIFT 7
+ u8 flags1;
+#define XSTORM_RDMA_CONN_AG_CTX_BIT8_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT8_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_BIT9_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT9_SHIFT 1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_RDMA_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_RDMA_CONN_AG_CTX_BIT14_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT14_SHIFT 6
+#define XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
+ u8 flags2;
+#define XSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORM_RDMA_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+ u8 flags4;
+#define XSTORM_RDMA_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_RDMA_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_RDMA_CONN_AG_CTX_CF16_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF16_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_CF19_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF19_SHIFT 6
+ u8 flags7;
+#define XSTORM_RDMA_CONN_AG_CTX_CF20_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF20_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_CF21_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF21_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_RDMA_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
+#define XSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_RDMA_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_RDMA_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_RDMA_CONN_AG_CTX_CF16EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF16EN_SHIFT 6
+#define XSTORM_RDMA_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF17EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_RDMA_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_CF19EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF19EN_SHIFT 1
+#define XSTORM_RDMA_CONN_AG_CTX_CF20EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF20EN_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_CF21EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF21EN_SHIFT 3
+#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 7
+ u8 flags11;
+#define XSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_RDMA_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_RDMA_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_RDMA_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_RDMA_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_RDMA_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_RDMA_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_RDMA_CONN_AG_CTX_MIGRATION_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_MIGRATION_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_RESERVED_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RESERVED_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_RDMA_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF23_SHIFT 6
+ u8 byte2;
+ __le16 physical_q0;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le16 word5;
+ __le16 conn_dpi;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 snd_nxt_psn;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+};
+
+struct ystorm_rdma_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define YSTORM_RDMA_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le32 reg0;
+ __le32 reg1;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg2;
+ __le32 reg3;
+};
+
+struct mstorm_roce_conn_st_ctx {
+ struct regpair temp[6];
+};
+
+struct pstorm_roce_conn_st_ctx {
+ struct regpair temp[16];
+};
+
+struct ystorm_roce_conn_st_ctx {
+ struct regpair temp[2];
+};
+
+struct xstorm_roce_conn_st_ctx {
+ struct regpair temp[22];
+};
+
+struct tstorm_roce_conn_st_ctx {
+ struct regpair temp[30];
+};
+
+struct ustorm_roce_conn_st_ctx {
+ struct regpair temp[12];
+};
+
+struct roce_conn_context {
+ struct ystorm_roce_conn_st_ctx ystorm_st_context;
+ struct regpair ystorm_st_padding[2];
+ struct pstorm_roce_conn_st_ctx pstorm_st_context;
+ struct xstorm_roce_conn_st_ctx xstorm_st_context;
+ struct regpair xstorm_st_padding[2];
+ struct xstorm_rdma_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_rdma_conn_ag_ctx tstorm_ag_context;
+ struct timers_context timer_context;
+ struct ustorm_rdma_conn_ag_ctx ustorm_ag_context;
+ struct tstorm_roce_conn_st_ctx tstorm_st_context;
+ struct mstorm_roce_conn_st_ctx mstorm_st_context;
+ struct ustorm_roce_conn_st_ctx ustorm_st_context;
+ struct regpair ustorm_st_padding[2];
+};
+
+struct roce_create_qp_req_ramrod_data {
+ __le16 flags;
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_MASK 0x3
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_SHIFT 0
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK 0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 2
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_MASK 0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_SHIFT 3
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_MASK 0x7
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_SHIFT 4
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 7
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 8
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK 0xF
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT 12
+ u8 max_ord;
+ u8 traffic_class;
+ u8 hop_limit;
+ u8 orq_num_pages;
+ __le16 p_key;
+ __le32 flow_label;
+ __le32 dst_qp_id;
+ __le32 ack_timeout_val;
+ __le32 initial_psn;
+ __le16 mtu;
+ __le16 pd;
+ __le16 sq_num_pages;
+ __le16 reseved2;
+ struct regpair sq_pbl_addr;
+ struct regpair orq_pbl_addr;
+ __le16 local_mac_addr[3];
+ __le16 remote_mac_addr[3];
+ __le16 vlan_id;
+ __le16 udp_src_port;
+ __le32 src_gid[4];
+ __le32 dst_gid[4];
+ struct regpair qp_handle_for_cqe;
+ struct regpair qp_handle_for_async;
+ u8 stats_counter_id;
+ u8 reserved3[7];
+ __le32 cq_cid;
+ __le16 physical_queue0;
+ __le16 dpi;
+};
+
+struct roce_create_qp_resp_ramrod_data {
+ __le16 flags;
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_MASK 0x3
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_SHIFT 0
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT 2
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT 3
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT 4
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_SHIFT 5
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_SHIFT 6
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED0_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED0_SHIFT 7
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_MASK 0x7
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_SHIFT 8
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT 11
+ u8 max_ird;
+ u8 traffic_class;
+ u8 hop_limit;
+ u8 irq_num_pages;
+ __le16 p_key;
+ __le32 flow_label;
+ __le32 dst_qp_id;
+ u8 stats_counter_id;
+ u8 reserved1;
+ __le16 mtu;
+ __le32 initial_psn;
+ __le16 pd;
+ __le16 rq_num_pages;
+ struct rdma_srq_id srq_id;
+ struct regpair rq_pbl_addr;
+ struct regpair irq_pbl_addr;
+ __le16 local_mac_addr[3];
+ __le16 remote_mac_addr[3];
+ __le16 vlan_id;
+ __le16 udp_src_port;
+ __le32 src_gid[4];
+ __le32 dst_gid[4];
+ struct regpair qp_handle_for_cqe;
+ struct regpair qp_handle_for_async;
+ __le32 reserved2[2];
+ __le32 cq_cid;
+ __le16 physical_queue0;
+ __le16 dpi;
+};
+
+struct roce_destroy_qp_req_output_params {
+ __le32 num_bound_mw;
+ __le32 reserved;
+};
+
+struct roce_destroy_qp_req_ramrod_data {
+ struct regpair output_params_addr;
+};
+
+struct roce_destroy_qp_resp_output_params {
+ __le32 num_invalidated_mw;
+ __le32 reserved;
+};
+
+struct roce_destroy_qp_resp_ramrod_data {
+ struct regpair output_params_addr;
+};
+
+enum roce_event_opcode {
+ ROCE_EVENT_CREATE_QP = 11,
+ ROCE_EVENT_MODIFY_QP,
+ ROCE_EVENT_QUERY_QP,
+ ROCE_EVENT_DESTROY_QP,
+ MAX_ROCE_EVENT_OPCODE
+};
+
+struct roce_modify_qp_req_ramrod_data {
+ __le16 flags;
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT 0
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_SHIFT 1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_SHIFT 2
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_SHIFT 3
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT 4
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_SHIFT 5
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_SHIFT 6
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_SHIFT 7
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_SHIFT 8
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_SHIFT 9
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_MASK 0x7
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_SHIFT 10
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK 0x7
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT 13
+ u8 fields;
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 0
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK 0xF
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT 4
+ u8 max_ord;
+ u8 traffic_class;
+ u8 hop_limit;
+ __le16 p_key;
+ __le32 flow_label;
+ __le32 ack_timeout_val;
+ __le16 mtu;
+ __le16 reserved2;
+ __le32 reserved3[3];
+ __le32 src_gid[4];
+ __le32 dst_gid[4];
+};
+
+struct roce_modify_qp_resp_ramrod_data {
+ __le16 flags;
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT 0
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT 1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT 2
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT 3
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_SHIFT 4
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT 5
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_SHIFT 6
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_SHIFT 7
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_SHIFT 8
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 9
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK 0x3F
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT 10
+ u8 fields;
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_MASK 0x7
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_SHIFT 0
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT 3
+ u8 max_ird;
+ u8 traffic_class;
+ u8 hop_limit;
+ __le16 p_key;
+ __le32 flow_label;
+ __le16 mtu;
+ __le16 reserved2;
+ __le32 src_gid[4];
+ __le32 dst_gid[4];
+};
+
+struct roce_query_qp_req_output_params {
+ __le32 psn;
+ __le32 flags;
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_MASK 0x1
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_SHIFT 0
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_MASK 0x1
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_SHIFT 1
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_MASK 0x3FFFFFFF
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_SHIFT 2
+};
+
+struct roce_query_qp_req_ramrod_data {
+ struct regpair output_params_addr;
+};
+
+struct roce_query_qp_resp_output_params {
+ __le32 psn;
+ __le32 err_flag;
+#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG_MASK 0x1
+#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG_SHIFT 0
+#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_RESERVED0_MASK 0x7FFFFFFF
+#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_RESERVED0_SHIFT 1
+};
+
+struct roce_query_qp_resp_ramrod_data {
+ struct regpair output_params_addr;
+};
+
+enum roce_ramrod_cmd_id {
+ ROCE_RAMROD_CREATE_QP = 11,
+ ROCE_RAMROD_MODIFY_QP,
+ ROCE_RAMROD_QUERY_QP,
+ ROCE_RAMROD_DESTROY_QP,
+ MAX_ROCE_RAMROD_CMD_ID
+};
+
+struct mstorm_roce_req_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+};
+
+struct mstorm_roce_resp_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+};
+
+enum roce_flavor {
+ PLAIN_ROCE /* RoCE v1 */ ,
+ RROCE_IPV4 /* RoCE v2 (Routable RoCE) over ipv4 */ ,
+ RROCE_IPV6 /* RoCE v2 (Routable RoCE) over ipv6 */ ,
+ MAX_ROCE_FLAVOR
+};
+
+struct tstorm_roce_req_conn_ag_ctx {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURED_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURED_SHIFT 1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURED_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURED_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_SHIFT 5
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_SHIFT 6
+ u8 flags1;
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+ u8 flags2;
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_SHIFT 6
+ u8 flags3;
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_SHIFT 6
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_SHIFT 3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_SHIFT 5
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_SHIFT 6
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_SHIFT 5
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0;
+ __le32 snd_nxt_psn;
+ __le32 snd_max_psn;
+ __le32 orq_prod;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le32 reg7;
+ __le32 reg8;
+ u8 tx_cqe_error_type;
+ u8 orq_cache_idx;
+ __le16 snd_sq_cons_th;
+ u8 byte4;
+ u8 byte5;
+ __le16 snd_sq_cons;
+ __le16 word2;
+ __le16 word3;
+ __le32 reg9;
+ __le32 reg10;
+};
+
+struct tstorm_roce_resp_conn_ag_ctx {
+ u8 byte0;
+ u8 state;
+ u8 flags0;
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 6
+ u8 flags1;
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+ u8 flags2;
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 5
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_SHIFT 6
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_SHIFT 5
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 psn_and_rxmit_id_echo;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le32 reg7;
+ __le32 reg8;
+ u8 tx_async_error_type;
+ u8 byte3;
+ __le16 rq_cons;
+ u8 byte4;
+ u8 byte5;
+ __le16 rq_prod;
+ __le16 conn_dpi;
+ __le16 irq_cons;
+ __le32 num_invlidated_mw;
+ __le32 reg10;
+};
+
+struct ustorm_roce_req_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_SHIFT 6
+ u8 flags2;
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags3;
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le16 word2;
+ __le16 word3;
+};
+
+struct ustorm_roce_resp_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 6
+ u8 flags2;
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags3;
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le16 word2;
+ __le16 word3;
+};
+
+struct xstorm_roce_req_conn_ag_ctx {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
+ u8 flags2;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+ u8 flags4;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_SHIFT 6
+ u8 flags7;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 7
+ u8 flags11;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_SHIFT 6
+ u8 byte2;
+ __le16 physical_q0;
+ __le16 word1;
+ __le16 sq_cmp_cons;
+ __le16 sq_cons;
+ __le16 sq_prod;
+ __le16 word5;
+ __le16 conn_dpi;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 lsn;
+ __le32 ssn;
+ __le32 snd_una_psn;
+ __le32 snd_nxt_psn;
+ __le32 reg4;
+ __le32 orq_cons_th;
+ __le32 orq_cons;
+};
+
+struct xstorm_roce_resp_conn_ag_ctx {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
+ u8 flags2;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+ u8 flags4;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_SHIFT 6
+ u8 flags7;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 7
+ u8 flags11;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_SHIFT 6
+ u8 byte2;
+ __le16 physical_q0;
+ __le16 word1;
+ __le16 irq_prod;
+ __le16 word3;
+ __le16 word4;
+ __le16 word5;
+ __le16 irq_cons;
+ u8 rxmit_opcode;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 rxmit_psn_and_id;
+ __le32 rxmit_bytes_length;
+ __le32 psn;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 msn_and_syndrome;
+};
+
+struct ystorm_roce_req_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le32 reg0;
+ __le32 reg1;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg2;
+ __le32 reg3;
+};
+
+struct ystorm_roce_resp_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le32 reg0;
+ __le32 reg1;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg2;
+ __le32 reg3;
+};
+
+struct ystorm_iscsi_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+struct pstorm_iscsi_tcp_conn_st_ctx {
+ __le32 tcp[32];
+ __le32 iscsi[4];
+};
+
+struct xstorm_iscsi_tcp_conn_st_ctx {
+ __le32 reserved_iscsi[40];
+ __le32 reserved_tcp[4];
+};
+
+struct xstorm_iscsi_conn_ag_ctx {
+ u8 cdu_validation;
+ u8 state;
+ u8 flags0;
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_SHIFT 7
+ u8 flags1;
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_SHIFT 7
+ u8 flags2;
+#define XSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
+ u8 flags3;
+#define XSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_ISCSI_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_SHIFT 6
+ u8 flags6;
+#define XSTORM_ISCSI_CONN_AG_CTX_CF16_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF16_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
+ u8 flags7;
+#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT 7
+ u8 flags11;
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_SHIFT 6
+ u8 byte2;
+ __le16 physical_q0;
+ __le16 physical_q1;
+ __le16 dummy_dorq_var;
+ __le16 sq_cons;
+ __le16 sq_prod;
+ __le16 word5;
+ __le16 slow_io_total_data_tx_update;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 more_to_send_seq;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 hq_scan_next_relevant_ack;
+ __le16 r2tq_prod;
+ __le16 r2tq_cons;
+ __le16 hq_prod;
+ __le16 hq_cons;
+ __le32 remain_seq;
+ __le32 bytes_to_next_pdu;
+ __le32 hq_tcp_seq;
+ u8 byte7;
+ u8 byte8;
+ u8 byte9;
+ u8 byte10;
+ u8 byte11;
+ u8 byte12;
+ u8 byte13;
+ u8 byte14;
+ u8 byte15;
+ u8 byte16;
+ __le16 word11;
+ __le32 reg10;
+ __le32 reg11;
+ __le32 exp_stat_sn;
+ __le32 reg13;
+ __le32 reg14;
+ __le32 reg15;
+ __le32 reg16;
+ __le32 reg17;
+};
+
+struct tstorm_iscsi_conn_ag_ctx {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 6
+ u8 flags1;
+#define TSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 6
+ u8 flags2;
+#define TSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5
+#define TSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le32 reg7;
+ __le32 reg8;
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+};
+
+struct ustorm_iscsi_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define USTORM_ISCSI_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 6
+ u8 flags2;
+#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags3;
+#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le16 word2;
+ __le16 word3;
+};
+
+struct tstorm_iscsi_conn_st_ctx {
+ __le32 reserved[40];
+};
+
+struct mstorm_iscsi_conn_ag_ctx {
+ u8 reserved;
+ u8 state;
+ u8 flags0;
+#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+};
+
+struct mstorm_iscsi_tcp_conn_st_ctx {
+ __le32 reserved_tcp[20];
+ __le32 reserved_iscsi[8];
+};
+
+struct ustorm_iscsi_conn_st_ctx {
+ __le32 reserved[52];
+};
+
+struct iscsi_conn_context {
+ struct ystorm_iscsi_conn_st_ctx ystorm_st_context;
+ struct regpair ystorm_st_padding[2];
+ struct pstorm_iscsi_tcp_conn_st_ctx pstorm_st_context;
+ struct regpair pstorm_st_padding[2];
+ struct pb_context xpb2_context;
+ struct xstorm_iscsi_tcp_conn_st_ctx xstorm_st_context;
+ struct regpair xstorm_st_padding[2];
+ struct xstorm_iscsi_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_iscsi_conn_ag_ctx tstorm_ag_context;
+ struct regpair tstorm_ag_padding[2];
+ struct timers_context timer_context;
+ struct ustorm_iscsi_conn_ag_ctx ustorm_ag_context;
+ struct pb_context upb_context;
+ struct tstorm_iscsi_conn_st_ctx tstorm_st_context;
+ struct regpair tstorm_st_padding[2];
+ struct mstorm_iscsi_conn_ag_ctx mstorm_ag_context;
+ struct mstorm_iscsi_tcp_conn_st_ctx mstorm_st_context;
+ struct ustorm_iscsi_conn_st_ctx ustorm_st_context;
+};
+
+struct iscsi_init_ramrod_params {
+ struct iscsi_spe_func_init iscsi_init_spe;
+ struct tcp_init_params tcp_init;
+};
+
+struct ystorm_iscsi_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le32 reg0;
+ __le32 reg1;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg2;
+ __le32 reg3;
+};
+#define VF_MAX_STATIC 192
+
+#define MCP_GLOB_PATH_MAX 2
+#define MCP_PORT_MAX 2
+#define MCP_GLOB_PORT_MAX 4
+#define MCP_GLOB_FUNC_MAX 16
-typedef u32 offsize_t; /* In DWORDS !!! */
/* Offset from the beginning of the MCP scratchpad */
-#define OFFSIZE_OFFSET_SHIFT 0
-#define OFFSIZE_OFFSET_MASK 0x0000ffff
+#define OFFSIZE_OFFSET_SHIFT 0
+#define OFFSIZE_OFFSET_MASK 0x0000ffff
/* Size of specific element (not the whole array if any) */
-#define OFFSIZE_SIZE_SHIFT 16
-#define OFFSIZE_SIZE_MASK 0xffff0000
+#define OFFSIZE_SIZE_SHIFT 16
+#define OFFSIZE_SIZE_MASK 0xffff0000
-/* SECTION_OFFSET is calculating the offset in bytes out of offsize */
-#define SECTION_OFFSET(_offsize) ((((_offsize & \
- OFFSIZE_OFFSET_MASK) >> \
- OFFSIZE_OFFSET_SHIFT) << 2))
+#define SECTION_OFFSET(_offsize) ((((_offsize & \
+ OFFSIZE_OFFSET_MASK) >> \
+ OFFSIZE_OFFSET_SHIFT) << 2))
-/* QED_SECTION_SIZE is calculating the size in bytes out of offsize */
-#define QED_SECTION_SIZE(_offsize) (((_offsize & \
- OFFSIZE_SIZE_MASK) >> \
- OFFSIZE_SIZE_SHIFT) << 2)
+#define QED_SECTION_SIZE(_offsize) (((_offsize & \
+ OFFSIZE_SIZE_MASK) >> \
+ OFFSIZE_SIZE_SHIFT) << 2)
-/* SECTION_ADDR returns the GRC addr of a section, given offsize and index
- * within section.
- */
-#define SECTION_ADDR(_offsize, idx) (MCP_REG_SCRATCH + \
- SECTION_OFFSET(_offsize) + \
- (QED_SECTION_SIZE(_offsize) * idx))
+#define SECTION_ADDR(_offsize, idx) (MCP_REG_SCRATCH + \
+ SECTION_OFFSET(_offsize) + \
+ (QED_SECTION_SIZE(_offsize) * idx))
+
+#define SECTION_OFFSIZE_ADDR(_pub_base, _section) \
+ (_pub_base + offsetof(struct mcp_public_data, sections[_section]))
-/* SECTION_OFFSIZE_ADDR returns the GRC addr to the offsize address.
- * Use offsetof, since the OFFSETUP collide with the firmware definition
- */
-#define SECTION_OFFSIZE_ADDR(_pub_base, _section) (_pub_base + \
- offsetof(struct \
- mcp_public_data, \
- sections[_section]))
/* PHY configuration */
-struct pmm_phy_cfg {
- u32 speed;
-#define PMM_SPEED_AUTONEG 0
-
- u32 pause; /* bitmask */
-#define PMM_PAUSE_NONE 0x0
-#define PMM_PAUSE_AUTONEG 0x1
-#define PMM_PAUSE_RX 0x2
-#define PMM_PAUSE_TX 0x4
-
- u32 adv_speed; /* Default should be the speed_cap_mask */
- u32 loopback_mode;
-#define PMM_LOOPBACK_NONE 0
-#define PMM_LOOPBACK_INT_PHY 1
-#define PMM_LOOPBACK_EXT_PHY 2
-#define PMM_LOOPBACK_EXT 3
-#define PMM_LOOPBACK_MAC 4
-
- /* features */
+struct eth_phy_cfg {
+ u32 speed;
+#define ETH_SPEED_AUTONEG 0
+#define ETH_SPEED_SMARTLINQ 0x8
+
+ u32 pause;
+#define ETH_PAUSE_NONE 0x0
+#define ETH_PAUSE_AUTONEG 0x1
+#define ETH_PAUSE_RX 0x2
+#define ETH_PAUSE_TX 0x4
+
+ u32 adv_speed;
+ u32 loopback_mode;
+#define ETH_LOOPBACK_NONE (0)
+#define ETH_LOOPBACK_INT_PHY (1)
+#define ETH_LOOPBACK_EXT_PHY (2)
+#define ETH_LOOPBACK_EXT (3)
+#define ETH_LOOPBACK_MAC (4)
+
u32 feature_config_flags;
+#define ETH_EEE_MODE_ADV_LPI (1 << 0)
};
struct port_mf_cfg {
- u32 dynamic_cfg; /* device control channel */
-#define PORT_MF_CFG_OV_TAG_MASK 0x0000ffff
-#define PORT_MF_CFG_OV_TAG_SHIFT 0
-#define PORT_MF_CFG_OV_TAG_DEFAULT PORT_MF_CFG_OV_TAG_MASK
-
- u32 reserved[1];
-};
-
-/* DO NOT add new fields in the middle
- * MUST be synced with struct pmm_stats_map
- */
-struct pmm_stats {
- u64 r64; /* 0x00 (Offset 0x00 ) RX 64-byte frame counter*/
- u64 r127; /* 0x01 (Offset 0x08 ) RX 65 to 127 byte frame counter*/
- u64 r255;
- u64 r511;
- u64 r1023;
- u64 r1518;
- u64 r1522;
- u64 r2047;
- u64 r4095;
- u64 r9216;
- u64 r16383;
- u64 rfcs; /* 0x0F (Offset 0x58 ) RX FCS error frame counter*/
- u64 rxcf; /* 0x10 (Offset 0x60 ) RX control frame counter*/
- u64 rxpf; /* 0x11 (Offset 0x68 ) RX pause frame counter*/
- u64 rxpp; /* 0x12 (Offset 0x70 ) RX PFC frame counter*/
- u64 raln; /* 0x16 (Offset 0x78 ) RX alignment error counter*/
- u64 rfcr; /* 0x19 (Offset 0x80 ) RX false carrier counter */
- u64 rovr; /* 0x1A (Offset 0x88 ) RX oversized frame counter*/
- u64 rjbr; /* 0x1B (Offset 0x90 ) RX jabber frame counter */
- u64 rund; /* 0x34 (Offset 0x98 ) RX undersized frame counter */
- u64 rfrg; /* 0x35 (Offset 0xa0 ) RX fragment counter */
- u64 t64; /* 0x40 (Offset 0xa8 ) TX 64-byte frame counter */
- u64 t127;
- u64 t255;
- u64 t511;
- u64 t1023;
- u64 t1518;
- u64 t2047;
- u64 t4095;
- u64 t9216;
- u64 t16383;
- u64 txpf; /* 0x50 (Offset 0xf8 ) TX pause frame counter */
- u64 txpp; /* 0x51 (Offset 0x100) TX PFC frame counter */
- u64 tlpiec;
- u64 tncl;
- u64 rbyte; /* 0x3d (Offset 0x118) RX byte counter */
- u64 rxuca; /* 0x0c (Offset 0x120) RX UC frame counter */
- u64 rxmca; /* 0x0d (Offset 0x128) RX MC frame counter */
- u64 rxbca; /* 0x0e (Offset 0x130) RX BC frame counter */
- u64 rxpok;
- u64 tbyte; /* 0x6f (Offset 0x140) TX byte counter */
- u64 txuca; /* 0x4d (Offset 0x148) TX UC frame counter */
- u64 txmca; /* 0x4e (Offset 0x150) TX MC frame counter */
- u64 txbca; /* 0x4f (Offset 0x158) TX BC frame counter */
- u64 txcf; /* 0x54 (Offset 0x160) TX control frame counter */
+ u32 dynamic_cfg;
+#define PORT_MF_CFG_OV_TAG_MASK 0x0000ffff
+#define PORT_MF_CFG_OV_TAG_SHIFT 0
+#define PORT_MF_CFG_OV_TAG_DEFAULT PORT_MF_CFG_OV_TAG_MASK
+
+ u32 reserved[1];
+};
+
+struct eth_stats {
+ u64 r64;
+ u64 r127;
+ u64 r255;
+ u64 r511;
+ u64 r1023;
+ u64 r1518;
+ u64 r1522;
+ u64 r2047;
+ u64 r4095;
+ u64 r9216;
+ u64 r16383;
+ u64 rfcs;
+ u64 rxcf;
+ u64 rxpf;
+ u64 rxpp;
+ u64 raln;
+ u64 rfcr;
+ u64 rovr;
+ u64 rjbr;
+ u64 rund;
+ u64 rfrg;
+ u64 t64;
+ u64 t127;
+ u64 t255;
+ u64 t511;
+ u64 t1023;
+ u64 t1518;
+ u64 t2047;
+ u64 t4095;
+ u64 t9216;
+ u64 t16383;
+ u64 txpf;
+ u64 txpp;
+ u64 tlpiec;
+ u64 tncl;
+ u64 rbyte;
+ u64 rxuca;
+ u64 rxmca;
+ u64 rxbca;
+ u64 rxpok;
+ u64 tbyte;
+ u64 txuca;
+ u64 txmca;
+ u64 txbca;
+ u64 txcf;
};
struct brb_stats {
- u64 brb_truncate[8];
- u64 brb_discard[8];
+ u64 brb_truncate[8];
+ u64 brb_discard[8];
};
struct port_stats {
- struct brb_stats brb;
- struct pmm_stats pmm;
+ struct brb_stats brb;
+ struct eth_stats eth;
};
-#define CMT_TEAM0 0
-#define CMT_TEAM1 1
-#define CMT_TEAM_MAX 2
-
struct couple_mode_teaming {
u8 port_cmt[MCP_GLOB_PORT_MAX];
-#define PORT_CMT_IN_TEAM BIT(0)
+#define PORT_CMT_IN_TEAM (1 << 0)
-#define PORT_CMT_PORT_ROLE BIT(1)
-#define PORT_CMT_PORT_INACTIVE (0 << 1)
-#define PORT_CMT_PORT_ACTIVE BIT(1)
+#define PORT_CMT_PORT_ROLE (1 << 1)
+#define PORT_CMT_PORT_INACTIVE (0 << 1)
+#define PORT_CMT_PORT_ACTIVE (1 << 1)
-#define PORT_CMT_TEAM_MASK BIT(2)
-#define PORT_CMT_TEAM0 (0 << 2)
-#define PORT_CMT_TEAM1 BIT(2)
+#define PORT_CMT_TEAM_MASK (1 << 2)
+#define PORT_CMT_TEAM0 (0 << 2)
+#define PORT_CMT_TEAM1 (1 << 2)
};
-/**************************************
-* LLDP and DCBX HSI structures
-**************************************/
-#define LLDP_CHASSIS_ID_STAT_LEN 4
-#define LLDP_PORT_ID_STAT_LEN 4
-#define DCBX_MAX_APP_PROTOCOL 32
-#define MAX_SYSTEM_LLDP_TLV_DATA 32
+#define LLDP_CHASSIS_ID_STAT_LEN 4
+#define LLDP_PORT_ID_STAT_LEN 4
+#define DCBX_MAX_APP_PROTOCOL 32
+#define MAX_SYSTEM_LLDP_TLV_DATA 32
-enum lldp_agent_e {
+enum _lldp_agent {
LLDP_NEAREST_BRIDGE = 0,
LLDP_NEAREST_NON_TPMR_BRIDGE,
LLDP_NEAREST_CUSTOMER_BRIDGE,
@@ -3394,690 +6786,525 @@ enum lldp_agent_e {
struct lldp_config_params_s {
u32 config;
-#define LLDP_CONFIG_TX_INTERVAL_MASK 0x000000ff
-#define LLDP_CONFIG_TX_INTERVAL_SHIFT 0
-#define LLDP_CONFIG_HOLD_MASK 0x00000f00
-#define LLDP_CONFIG_HOLD_SHIFT 8
-#define LLDP_CONFIG_MAX_CREDIT_MASK 0x0000f000
-#define LLDP_CONFIG_MAX_CREDIT_SHIFT 12
-#define LLDP_CONFIG_ENABLE_RX_MASK 0x40000000
-#define LLDP_CONFIG_ENABLE_RX_SHIFT 30
-#define LLDP_CONFIG_ENABLE_TX_MASK 0x80000000
-#define LLDP_CONFIG_ENABLE_TX_SHIFT 31
- u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
- u32 local_port_id[LLDP_PORT_ID_STAT_LEN];
+#define LLDP_CONFIG_TX_INTERVAL_MASK 0x000000ff
+#define LLDP_CONFIG_TX_INTERVAL_SHIFT 0
+#define LLDP_CONFIG_HOLD_MASK 0x00000f00
+#define LLDP_CONFIG_HOLD_SHIFT 8
+#define LLDP_CONFIG_MAX_CREDIT_MASK 0x0000f000
+#define LLDP_CONFIG_MAX_CREDIT_SHIFT 12
+#define LLDP_CONFIG_ENABLE_RX_MASK 0x40000000
+#define LLDP_CONFIG_ENABLE_RX_SHIFT 30
+#define LLDP_CONFIG_ENABLE_TX_MASK 0x80000000
+#define LLDP_CONFIG_ENABLE_TX_SHIFT 31
+ u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
+ u32 local_port_id[LLDP_PORT_ID_STAT_LEN];
};
struct lldp_status_params_s {
- u32 prefix_seq_num;
- u32 status; /* TBD */
-
- /* Holds remote Chassis ID TLV header, subtype and 9B of payload. */
- u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
-
- /* Holds remote Port ID TLV header, subtype and 9B of payload. */
- u32 peer_port_id[LLDP_PORT_ID_STAT_LEN];
- u32 suffix_seq_num;
+ u32 prefix_seq_num;
+ u32 status;
+ u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
+ u32 peer_port_id[LLDP_PORT_ID_STAT_LEN];
+ u32 suffix_seq_num;
};
struct dcbx_ets_feature {
u32 flags;
-#define DCBX_ETS_ENABLED_MASK 0x00000001
-#define DCBX_ETS_ENABLED_SHIFT 0
-#define DCBX_ETS_WILLING_MASK 0x00000002
-#define DCBX_ETS_WILLING_SHIFT 1
-#define DCBX_ETS_ERROR_MASK 0x00000004
-#define DCBX_ETS_ERROR_SHIFT 2
-#define DCBX_ETS_CBS_MASK 0x00000008
-#define DCBX_ETS_CBS_SHIFT 3
-#define DCBX_ETS_MAX_TCS_MASK 0x000000f0
-#define DCBX_ETS_MAX_TCS_SHIFT 4
- u32 pri_tc_tbl[1];
-#define DCBX_ISCSI_OOO_TC 4
-#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_ISCSI_OOO_TC + 1)
- u32 tc_bw_tbl[2];
- u32 tc_tsa_tbl[2];
-#define DCBX_ETS_TSA_STRICT 0
-#define DCBX_ETS_TSA_CBS 1
-#define DCBX_ETS_TSA_ETS 2
+#define DCBX_ETS_ENABLED_MASK 0x00000001
+#define DCBX_ETS_ENABLED_SHIFT 0
+#define DCBX_ETS_WILLING_MASK 0x00000002
+#define DCBX_ETS_WILLING_SHIFT 1
+#define DCBX_ETS_ERROR_MASK 0x00000004
+#define DCBX_ETS_ERROR_SHIFT 2
+#define DCBX_ETS_CBS_MASK 0x00000008
+#define DCBX_ETS_CBS_SHIFT 3
+#define DCBX_ETS_MAX_TCS_MASK 0x000000f0
+#define DCBX_ETS_MAX_TCS_SHIFT 4
+#define DCBX_ISCSI_OOO_TC_MASK 0x00000f00
+#define DCBX_ISCSI_OOO_TC_SHIFT 8
+ u32 pri_tc_tbl[1];
+#define DCBX_ISCSI_OOO_TC (4)
+
+#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_ISCSI_OOO_TC + 1)
+#define DCBX_CEE_STRICT_PRIORITY 0xf
+ u32 tc_bw_tbl[2];
+ u32 tc_tsa_tbl[2];
+#define DCBX_ETS_TSA_STRICT 0
+#define DCBX_ETS_TSA_CBS 1
+#define DCBX_ETS_TSA_ETS 2
};
struct dcbx_app_priority_entry {
u32 entry;
-#define DCBX_APP_PRI_MAP_MASK 0x000000ff
-#define DCBX_APP_PRI_MAP_SHIFT 0
-#define DCBX_APP_PRI_0 0x01
-#define DCBX_APP_PRI_1 0x02
-#define DCBX_APP_PRI_2 0x04
-#define DCBX_APP_PRI_3 0x08
-#define DCBX_APP_PRI_4 0x10
-#define DCBX_APP_PRI_5 0x20
-#define DCBX_APP_PRI_6 0x40
-#define DCBX_APP_PRI_7 0x80
-#define DCBX_APP_SF_MASK 0x00000300
-#define DCBX_APP_SF_SHIFT 8
-#define DCBX_APP_SF_ETHTYPE 0
-#define DCBX_APP_SF_PORT 1
-#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000
-#define DCBX_APP_PROTOCOL_ID_SHIFT 16
-};
-
-/* FW structure in BE */
+#define DCBX_APP_PRI_MAP_MASK 0x000000ff
+#define DCBX_APP_PRI_MAP_SHIFT 0
+#define DCBX_APP_PRI_0 0x01
+#define DCBX_APP_PRI_1 0x02
+#define DCBX_APP_PRI_2 0x04
+#define DCBX_APP_PRI_3 0x08
+#define DCBX_APP_PRI_4 0x10
+#define DCBX_APP_PRI_5 0x20
+#define DCBX_APP_PRI_6 0x40
+#define DCBX_APP_PRI_7 0x80
+#define DCBX_APP_SF_MASK 0x00000300
+#define DCBX_APP_SF_SHIFT 8
+#define DCBX_APP_SF_ETHTYPE 0
+#define DCBX_APP_SF_PORT 1
+#define DCBX_APP_SF_IEEE_MASK 0x0000f000
+#define DCBX_APP_SF_IEEE_SHIFT 12
+#define DCBX_APP_SF_IEEE_RESERVED 0
+#define DCBX_APP_SF_IEEE_ETHTYPE 1
+#define DCBX_APP_SF_IEEE_TCP_PORT 2
+#define DCBX_APP_SF_IEEE_UDP_PORT 3
+#define DCBX_APP_SF_IEEE_TCP_UDP_PORT 4
+
+#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000
+#define DCBX_APP_PROTOCOL_ID_SHIFT 16
+};
+
struct dcbx_app_priority_feature {
u32 flags;
-#define DCBX_APP_ENABLED_MASK 0x00000001
-#define DCBX_APP_ENABLED_SHIFT 0
-#define DCBX_APP_WILLING_MASK 0x00000002
-#define DCBX_APP_WILLING_SHIFT 1
-#define DCBX_APP_ERROR_MASK 0x00000004
-#define DCBX_APP_ERROR_SHIFT 2
-/* Not in use
- * #define DCBX_APP_DEFAULT_PRI_MASK 0x00000f00
- * #define DCBX_APP_DEFAULT_PRI_SHIFT 8
- */
-#define DCBX_APP_MAX_TCS_MASK 0x0000f000
-#define DCBX_APP_MAX_TCS_SHIFT 12
-#define DCBX_APP_NUM_ENTRIES_MASK 0x00ff0000
-#define DCBX_APP_NUM_ENTRIES_SHIFT 16
+#define DCBX_APP_ENABLED_MASK 0x00000001
+#define DCBX_APP_ENABLED_SHIFT 0
+#define DCBX_APP_WILLING_MASK 0x00000002
+#define DCBX_APP_WILLING_SHIFT 1
+#define DCBX_APP_ERROR_MASK 0x00000004
+#define DCBX_APP_ERROR_SHIFT 2
+#define DCBX_APP_MAX_TCS_MASK 0x0000f000
+#define DCBX_APP_MAX_TCS_SHIFT 12
+#define DCBX_APP_NUM_ENTRIES_MASK 0x00ff0000
+#define DCBX_APP_NUM_ENTRIES_SHIFT 16
struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL];
};
-/* FW structure in BE */
struct dcbx_features {
- /* PG feature */
struct dcbx_ets_feature ets;
+ u32 pfc;
+#define DCBX_PFC_PRI_EN_BITMAP_MASK 0x000000ff
+#define DCBX_PFC_PRI_EN_BITMAP_SHIFT 0
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_0 0x01
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_1 0x02
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_2 0x04
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_3 0x08
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_4 0x10
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_5 0x20
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_6 0x40
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_7 0x80
+
+#define DCBX_PFC_FLAGS_MASK 0x0000ff00
+#define DCBX_PFC_FLAGS_SHIFT 8
+#define DCBX_PFC_CAPS_MASK 0x00000f00
+#define DCBX_PFC_CAPS_SHIFT 8
+#define DCBX_PFC_MBC_MASK 0x00004000
+#define DCBX_PFC_MBC_SHIFT 14
+#define DCBX_PFC_WILLING_MASK 0x00008000
+#define DCBX_PFC_WILLING_SHIFT 15
+#define DCBX_PFC_ENABLED_MASK 0x00010000
+#define DCBX_PFC_ENABLED_SHIFT 16
+#define DCBX_PFC_ERROR_MASK 0x00020000
+#define DCBX_PFC_ERROR_SHIFT 17
- /* PFC feature */
- u32 pfc;
-#define DCBX_PFC_PRI_EN_BITMAP_MASK 0x000000ff
-#define DCBX_PFC_PRI_EN_BITMAP_SHIFT 0
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_0 0x01
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_1 0x02
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_2 0x04
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_3 0x08
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_4 0x10
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_5 0x20
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_6 0x40
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_7 0x80
-
-#define DCBX_PFC_FLAGS_MASK 0x0000ff00
-#define DCBX_PFC_FLAGS_SHIFT 8
-#define DCBX_PFC_CAPS_MASK 0x00000f00
-#define DCBX_PFC_CAPS_SHIFT 8
-#define DCBX_PFC_MBC_MASK 0x00004000
-#define DCBX_PFC_MBC_SHIFT 14
-#define DCBX_PFC_WILLING_MASK 0x00008000
-#define DCBX_PFC_WILLING_SHIFT 15
-#define DCBX_PFC_ENABLED_MASK 0x00010000
-#define DCBX_PFC_ENABLED_SHIFT 16
-#define DCBX_PFC_ERROR_MASK 0x00020000
-#define DCBX_PFC_ERROR_SHIFT 17
-
- /* APP feature */
struct dcbx_app_priority_feature app;
};
struct dcbx_local_params {
u32 config;
-#define DCBX_CONFIG_VERSION_MASK 0x00000003
-#define DCBX_CONFIG_VERSION_SHIFT 0
-#define DCBX_CONFIG_VERSION_DISABLED 0
-#define DCBX_CONFIG_VERSION_IEEE 1
-#define DCBX_CONFIG_VERSION_CEE 2
+#define DCBX_CONFIG_VERSION_MASK 0x00000007
+#define DCBX_CONFIG_VERSION_SHIFT 0
+#define DCBX_CONFIG_VERSION_DISABLED 0
+#define DCBX_CONFIG_VERSION_IEEE 1
+#define DCBX_CONFIG_VERSION_CEE 2
+#define DCBX_CONFIG_VERSION_STATIC 4
- u32 flags;
- struct dcbx_features features;
+ u32 flags;
+ struct dcbx_features features;
};
struct dcbx_mib {
- u32 prefix_seq_num;
- u32 flags;
- struct dcbx_features features;
- u32 suffix_seq_num;
+ u32 prefix_seq_num;
+ u32 flags;
+ struct dcbx_features features;
+ u32 suffix_seq_num;
};
struct lldp_system_tlvs_buffer_s {
- u16 valid;
- u16 length;
- u32 data[MAX_SYSTEM_LLDP_TLV_DATA];
+ u16 valid;
+ u16 length;
+ u32 data[MAX_SYSTEM_LLDP_TLV_DATA];
};
-/**************************************/
-/* */
-/* P U B L I C G L O B A L */
-/* */
-/**************************************/
-struct public_global {
- u32 max_path;
-#define MAX_PATH_BIG_BEAR 2
-#define MAX_PATH_K2 1
- u32 max_ports;
-#define MODE_1P 1
-#define MODE_2P 2
-#define MODE_3P 3
-#define MODE_4P 4
- u32 debug_mb_offset;
- u32 phymod_dbg_mb_offset;
- struct couple_mode_teaming cmt;
- s32 internal_temperature;
- u32 mfw_ver;
- u32 running_bundle_id;
+struct dcb_dscp_map {
+ u32 flags;
+#define DCB_DSCP_ENABLE_MASK 0x1
+#define DCB_DSCP_ENABLE_SHIFT 0
+#define DCB_DSCP_ENABLE 1
+ u32 dscp_pri_map[8];
};
-/**************************************/
-/* */
-/* P U B L I C P A T H */
-/* */
-/**************************************/
+struct public_global {
+ u32 max_path;
+ u32 max_ports;
+ u32 debug_mb_offset;
+ u32 phymod_dbg_mb_offset;
+ struct couple_mode_teaming cmt;
+ s32 internal_temperature;
+ u32 mfw_ver;
+ u32 running_bundle_id;
+ s32 external_temperature;
+ u32 mdump_reason;
+};
-/****************************************************************************
-* Shared Memory 2 Region *
-****************************************************************************/
-/* The fw_flr_ack is actually built in the following way: */
-/* 8 bit: PF ack */
-/* 128 bit: VF ack */
-/* 8 bit: ios_dis_ack */
-/* In order to maintain endianity in the mailbox hsi, we want to keep using */
-/* u32. The fw must have the VF right after the PF since this is how it */
-/* access arrays(it expects always the VF to reside after the PF, and that */
-/* makes the calculation much easier for it. ) */
-/* In order to answer both limitations, and keep the struct small, the code */
-/* will abuse the structure defined here to achieve the actual partition */
-/* above */
-/****************************************************************************/
struct fw_flr_mb {
- u32 aggint;
- u32 opgen_addr;
- u32 accum_ack; /* 0..15:PF, 16..207:VF, 256..271:IOV_DIS */
-#define ACCUM_ACK_PF_BASE 0
-#define ACCUM_ACK_PF_SHIFT 0
-
-#define ACCUM_ACK_VF_BASE 8
-#define ACCUM_ACK_VF_SHIFT 3
-
-#define ACCUM_ACK_IOV_DIS_BASE 256
-#define ACCUM_ACK_IOV_DIS_SHIFT 8
+ u32 aggint;
+ u32 opgen_addr;
+ u32 accum_ack;
};
struct public_path {
- struct fw_flr_mb flr_mb;
- u32 mcp_vf_disabled[VF_MAX_STATIC / 32];
-
- u32 process_kill;
-#define PROCESS_KILL_COUNTER_MASK 0x0000ffff
-#define PROCESS_KILL_COUNTER_SHIFT 0
-#define PROCESS_KILL_GLOB_AEU_BIT_MASK 0xffff0000
-#define PROCESS_KILL_GLOB_AEU_BIT_SHIFT 16
+ struct fw_flr_mb flr_mb;
+ u32 mcp_vf_disabled[VF_MAX_STATIC / 32];
+
+ u32 process_kill;
+#define PROCESS_KILL_COUNTER_MASK 0x0000ffff
+#define PROCESS_KILL_COUNTER_SHIFT 0
+#define PROCESS_KILL_GLOB_AEU_BIT_MASK 0xffff0000
+#define PROCESS_KILL_GLOB_AEU_BIT_SHIFT 16
#define GLOBAL_AEU_BIT(aeu_reg_id, aeu_bit) (aeu_reg_id * 32 + aeu_bit)
};
-/**************************************/
-/* */
-/* P U B L I C P O R T */
-/* */
-/**************************************/
-
-/****************************************************************************
-* Driver <-> FW Mailbox *
-****************************************************************************/
-
struct public_port {
- u32 validity_map; /* 0x0 (4*2 = 0x8) */
-
- /* validity bits */
-#define MCP_VALIDITY_PCI_CFG 0x00100000
-#define MCP_VALIDITY_MB 0x00200000
-#define MCP_VALIDITY_DEV_INFO 0x00400000
-#define MCP_VALIDITY_RESERVED 0x00000007
-
- /* One licensing bit should be set */
-#define MCP_VALIDITY_LIC_KEY_IN_EFFECT_MASK 0x00000038
-#define MCP_VALIDITY_LIC_MANUF_KEY_IN_EFFECT 0x00000008
-#define MCP_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT 0x00000010
-#define MCP_VALIDITY_LIC_NO_KEY_IN_EFFECT 0x00000020
-
- /* Active MFW */
-#define MCP_VALIDITY_ACTIVE_MFW_UNKNOWN 0x00000000
-#define MCP_VALIDITY_ACTIVE_MFW_MASK 0x000001c0
-#define MCP_VALIDITY_ACTIVE_MFW_NCSI 0x00000040
-#define MCP_VALIDITY_ACTIVE_MFW_NONE 0x000001c0
+ u32 validity_map;
u32 link_status;
-#define LINK_STATUS_LINK_UP \
- 0x00000001
-#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e
-#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD BIT(1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1)
-
-#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020
-
-#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040
-#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080
-
-#define LINK_STATUS_PFC_ENABLED \
- 0x00000100
-#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200
-#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400
-#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE 0x00000800
-#define LINK_STATUS_LINK_PARTNER_20G_CAPABLE 0x00001000
-#define LINK_STATUS_LINK_PARTNER_40G_CAPABLE 0x00002000
-#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE 0x00004000
-#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE 0x00008000
-#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE 0x00010000
-
-#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000
-#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0 << 18)
-#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE BIT(18)
-#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2 << 18)
-#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18)
-
-#define LINK_STATUS_SFP_TX_FAULT \
- 0x00100000
-#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000
-#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000
-
- u32 link_status1;
- u32 ext_phy_fw_version;
- u32 drv_phy_cfg_addr;
-
- u32 port_stx;
-
- u32 stat_nig_timer;
-
- struct port_mf_cfg port_mf_config;
- struct port_stats stats;
-
- u32 media_type;
-#define MEDIA_UNSPECIFIED 0x0
-#define MEDIA_SFPP_10G_FIBER 0x1
-#define MEDIA_XFP_FIBER 0x2
-#define MEDIA_DA_TWINAX 0x3
-#define MEDIA_BASE_T 0x4
-#define MEDIA_SFP_1G_FIBER 0x5
-#define MEDIA_MODULE_FIBER 0x6
-#define MEDIA_KR 0xf0
-#define MEDIA_NOT_PRESENT 0xff
+#define LINK_STATUS_LINK_UP 0x00000001
+#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e
+#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (1 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1)
+
+#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020
+
+#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040
+#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080
+
+#define LINK_STATUS_PFC_ENABLED 0x00000100
+#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200
+#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400
+#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE 0x00000800
+#define LINK_STATUS_LINK_PARTNER_20G_CAPABLE 0x00001000
+#define LINK_STATUS_LINK_PARTNER_40G_CAPABLE 0x00002000
+#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE 0x00004000
+#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE 0x00008000
+#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE 0x00010000
+
+#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000
+#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0 << 18)
+#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1 << 18)
+#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2 << 18)
+#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18)
+
+#define LINK_STATUS_SFP_TX_FAULT 0x00100000
+#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000
+#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000
+#define LINK_STATUS_RX_SIGNAL_PRESENT 0x00800000
+#define LINK_STATUS_MAC_LOCAL_FAULT 0x01000000
+#define LINK_STATUS_MAC_REMOTE_FAULT 0x02000000
+#define LINK_STATUS_UNSUPPORTED_SPD_REQ 0x04000000
+
+ u32 link_status1;
+ u32 ext_phy_fw_version;
+ u32 drv_phy_cfg_addr;
+
+ u32 port_stx;
+
+ u32 stat_nig_timer;
+
+ struct port_mf_cfg port_mf_config;
+ struct port_stats stats;
+
+ u32 media_type;
+#define MEDIA_UNSPECIFIED 0x0
+#define MEDIA_SFPP_10G_FIBER 0x1
+#define MEDIA_XFP_FIBER 0x2
+#define MEDIA_DA_TWINAX 0x3
+#define MEDIA_BASE_T 0x4
+#define MEDIA_SFP_1G_FIBER 0x5
+#define MEDIA_MODULE_FIBER 0x6
+#define MEDIA_KR 0xf0
+#define MEDIA_NOT_PRESENT 0xff
u32 lfa_status;
-#define LFA_LINK_FLAP_REASON_OFFSET 0
-#define LFA_LINK_FLAP_REASON_MASK 0x000000ff
-#define LFA_NO_REASON (0 << 0)
-#define LFA_LINK_DOWN BIT(0)
-#define LFA_FORCE_INIT BIT(1)
-#define LFA_LOOPBACK_MISMATCH BIT(2)
-#define LFA_SPEED_MISMATCH BIT(3)
-#define LFA_FLOW_CTRL_MISMATCH BIT(4)
-#define LFA_ADV_SPEED_MISMATCH BIT(5)
-#define LINK_FLAP_AVOIDANCE_COUNT_OFFSET 8
-#define LINK_FLAP_AVOIDANCE_COUNT_MASK 0x0000ff00
-#define LINK_FLAP_COUNT_OFFSET 16
-#define LINK_FLAP_COUNT_MASK 0x00ff0000
-
- u32 link_change_count;
-
- /* LLDP params */
- struct lldp_config_params_s lldp_config_params[
- LLDP_MAX_LLDP_AGENTS];
- struct lldp_status_params_s lldp_status_params[
- LLDP_MAX_LLDP_AGENTS];
- struct lldp_system_tlvs_buffer_s system_lldp_tlvs_buf;
+ u32 link_change_count;
+
+ struct lldp_config_params_s lldp_config_params[LLDP_MAX_LLDP_AGENTS];
+ struct lldp_status_params_s lldp_status_params[LLDP_MAX_LLDP_AGENTS];
+ struct lldp_system_tlvs_buffer_s system_lldp_tlvs_buf;
/* DCBX related MIB */
- struct dcbx_local_params local_admin_dcbx_mib;
- struct dcbx_mib remote_dcbx_mib;
- struct dcbx_mib operational_dcbx_mib;
+ struct dcbx_local_params local_admin_dcbx_mib;
+ struct dcbx_mib remote_dcbx_mib;
+ struct dcbx_mib operational_dcbx_mib;
- u32 fc_npiv_nvram_tbl_addr;
- u32 fc_npiv_nvram_tbl_size;
- u32 transceiver_data;
-#define PMM_TRANSCEIVER_STATE_MASK 0x000000FF
-#define PMM_TRANSCEIVER_STATE_SHIFT 0x00000000
-#define PMM_TRANSCEIVER_STATE_PRESENT 0x00000001
-};
+ u32 reserved[2];
+ u32 transceiver_data;
+#define ETH_TRANSCEIVER_STATE_MASK 0x000000FF
+#define ETH_TRANSCEIVER_STATE_SHIFT 0x00000000
+#define ETH_TRANSCEIVER_STATE_UNPLUGGED 0x00000000
+#define ETH_TRANSCEIVER_STATE_PRESENT 0x00000001
+#define ETH_TRANSCEIVER_STATE_VALID 0x00000003
+#define ETH_TRANSCEIVER_STATE_UPDATING 0x00000008
-/**************************************/
-/* */
-/* P U B L I C F U N C */
-/* */
-/**************************************/
+ u32 wol_info;
+ u32 wol_pkt_len;
+ u32 wol_pkt_details;
+ struct dcb_dscp_map dcb_dscp_map;
+};
struct public_func {
- u32 iscsi_boot_signature;
- u32 iscsi_boot_block_offset;
-
- u32 mtu_size;
- u32 c2s_pcp_map_lower;
- u32 c2s_pcp_map_upper;
- u32 c2s_pcp_map_default;
- u32 reserved[4];
-
- u32 config;
-
- /* E/R/I/D */
- /* function 0 of each port cannot be hidden */
-#define FUNC_MF_CFG_FUNC_HIDE 0x00000001
-#define FUNC_MF_CFG_PAUSE_ON_HOST_RING 0x00000002
-#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT 0x00000001
-
-#define FUNC_MF_CFG_PROTOCOL_MASK 0x000000f0
-#define FUNC_MF_CFG_PROTOCOL_SHIFT 4
-#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000
+ u32 reserved0[2];
+
+ u32 mtu_size;
+
+ u32 reserved[7];
+
+ u32 config;
+#define FUNC_MF_CFG_FUNC_HIDE 0x00000001
+#define FUNC_MF_CFG_PAUSE_ON_HOST_RING 0x00000002
+#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT 0x00000001
+
+#define FUNC_MF_CFG_PROTOCOL_MASK 0x000000f0
+#define FUNC_MF_CFG_PROTOCOL_SHIFT 4
+#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000
#define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000010
-#define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000020
#define FUNC_MF_CFG_PROTOCOL_ROCE 0x00000030
-#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000030
+#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000030
- /* MINBW, MAXBW */
- /* value range - 0..100, increments in 1 % */
-#define FUNC_MF_CFG_MIN_BW_MASK 0x0000ff00
-#define FUNC_MF_CFG_MIN_BW_SHIFT 8
-#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000
-#define FUNC_MF_CFG_MAX_BW_MASK 0x00ff0000
-#define FUNC_MF_CFG_MAX_BW_SHIFT 16
-#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000
+#define FUNC_MF_CFG_MIN_BW_MASK 0x0000ff00
+#define FUNC_MF_CFG_MIN_BW_SHIFT 8
+#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000
+#define FUNC_MF_CFG_MAX_BW_MASK 0x00ff0000
+#define FUNC_MF_CFG_MAX_BW_SHIFT 16
+#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000
- u32 status;
-#define FUNC_STATUS_VLINK_DOWN 0x00000001
+ u32 status;
+#define FUNC_STATUS_VLINK_DOWN 0x00000001
- u32 mac_upper; /* MAC */
-#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff
-#define FUNC_MF_CFG_UPPERMAC_SHIFT 0
-#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK
- u32 mac_lower;
-#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff
+ u32 mac_upper;
+#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff
+#define FUNC_MF_CFG_UPPERMAC_SHIFT 0
+#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK
+ u32 mac_lower;
+#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff
- u32 fcoe_wwn_port_name_upper;
- u32 fcoe_wwn_port_name_lower;
+ u32 fcoe_wwn_port_name_upper;
+ u32 fcoe_wwn_port_name_lower;
- u32 fcoe_wwn_node_name_upper;
- u32 fcoe_wwn_node_name_lower;
+ u32 fcoe_wwn_node_name_upper;
+ u32 fcoe_wwn_node_name_lower;
- u32 ovlan_stag; /* tags */
-#define FUNC_MF_CFG_OV_STAG_MASK 0x0000ffff
-#define FUNC_MF_CFG_OV_STAG_SHIFT 0
-#define FUNC_MF_CFG_OV_STAG_DEFAULT FUNC_MF_CFG_OV_STAG_MASK
+ u32 ovlan_stag;
+#define FUNC_MF_CFG_OV_STAG_MASK 0x0000ffff
+#define FUNC_MF_CFG_OV_STAG_SHIFT 0
+#define FUNC_MF_CFG_OV_STAG_DEFAULT FUNC_MF_CFG_OV_STAG_MASK
- u32 pf_allocation; /* vf per pf */
+ u32 pf_allocation;
- u32 preserve_data; /* Will be used bt CCM */
+ u32 preserve_data;
- u32 driver_last_activity_ts;
+ u32 driver_last_activity_ts;
- u32 drv_ack_vf_disabled[VF_MAX_STATIC / 32]; /* 0x0044 */
+ u32 drv_ack_vf_disabled[VF_MAX_STATIC / 32];
- u32 drv_id;
-#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff
-#define DRV_ID_PDA_COMP_VER_SHIFT 0
+ u32 drv_id;
+#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff
+#define DRV_ID_PDA_COMP_VER_SHIFT 0
-#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000
-#define DRV_ID_MCP_HSI_VER_SHIFT 16
-#define DRV_ID_MCP_HSI_VER_CURRENT BIT(DRV_ID_MCP_HSI_VER_SHIFT)
+#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000
+#define DRV_ID_MCP_HSI_VER_SHIFT 16
+#define DRV_ID_MCP_HSI_VER_CURRENT (1 << DRV_ID_MCP_HSI_VER_SHIFT)
-#define DRV_ID_DRV_TYPE_MASK 0x7f000000
-#define DRV_ID_DRV_TYPE_SHIFT 24
-#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_LINUX (1 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_WINDOWS (2 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_DIAG (3 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_PREBOOT (4 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_SOLARIS (5 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_VMWARE (6 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_FREEBSD (7 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_AIX (8 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_MASK 0x7f000000
+#define DRV_ID_DRV_TYPE_SHIFT 24
+#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_LINUX (1 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_INIT_HW_MASK 0x80000000
-#define DRV_ID_DRV_INIT_HW_SHIFT 31
-#define DRV_ID_DRV_INIT_HW_FLAG BIT(DRV_ID_DRV_INIT_HW_SHIFT)
+#define DRV_ID_DRV_INIT_HW_MASK 0x80000000
+#define DRV_ID_DRV_INIT_HW_SHIFT 31
+#define DRV_ID_DRV_INIT_HW_FLAG (1 << DRV_ID_DRV_INIT_HW_SHIFT)
};
-/**************************************/
-/* */
-/* P U B L I C M B */
-/* */
-/**************************************/
-/* This is the only section that the driver can write to, and each */
-/* Basically each driver request to set feature parameters,
- * will be done using a different command, which will be linked
- * to a specific data structure from the union below.
- * For huge strucuture, the common blank structure should be used.
- */
-
struct mcp_mac {
- u32 mac_upper; /* Upper 16 bits are always zeroes */
- u32 mac_lower;
+ u32 mac_upper;
+ u32 mac_lower;
};
struct mcp_val64 {
- u32 lo;
- u32 hi;
+ u32 lo;
+ u32 hi;
};
struct mcp_file_att {
- u32 nvm_start_addr;
- u32 len;
+ u32 nvm_start_addr;
+ u32 len;
+};
+
+struct bist_nvm_image_att {
+ u32 return_code;
+ u32 image_type;
+ u32 nvm_start_addr;
+ u32 len;
};
#define MCP_DRV_VER_STR_SIZE 16
#define MCP_DRV_VER_STR_SIZE_DWORD (MCP_DRV_VER_STR_SIZE / sizeof(u32))
#define MCP_DRV_NVM_BUF_LEN 32
struct drv_version_stc {
- u32 version;
- u8 name[MCP_DRV_VER_STR_SIZE - 4];
+ u32 version;
+ u8 name[MCP_DRV_VER_STR_SIZE - 4];
+};
+
+struct lan_stats_stc {
+ u64 ucast_rx_pkts;
+ u64 ucast_tx_pkts;
+ u32 fcs_err;
+ u32 rserved;
+};
+
+struct ocbb_data_stc {
+ u32 ocbb_host_addr;
+ u32 ocsd_host_addr;
+ u32 ocsd_req_update_interval;
+};
+
+#define MAX_NUM_OF_SENSORS 7
+struct temperature_status_stc {
+ u32 num_of_sensors;
+ u32 sensor[MAX_NUM_OF_SENSORS];
+};
+
+/* crash dump configuration header */
+struct mdump_config_stc {
+ u32 version;
+ u32 config;
+ u32 epoc;
+ u32 num_of_logs;
+ u32 valid_logs;
};
union drv_union_data {
- u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD];
- struct mcp_mac wol_mac;
+ u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD];
+ struct mcp_mac wol_mac;
+
+ struct eth_phy_cfg drv_phy_cfg;
- struct pmm_phy_cfg drv_phy_cfg;
+ struct mcp_val64 val64;
- struct mcp_val64 val64; /* For PHY / AVS commands */
+ u8 raw_data[MCP_DRV_NVM_BUF_LEN];
- u8 raw_data[MCP_DRV_NVM_BUF_LEN];
+ struct mcp_file_att file_att;
- struct mcp_file_att file_att;
+ u32 ack_vf_disabled[VF_MAX_STATIC / 32];
- u32 ack_vf_disabled[VF_MAX_STATIC / 32];
+ struct drv_version_stc drv_version;
- struct drv_version_stc drv_version;
+ struct lan_stats_stc lan_stats;
+ u64 reserved_stats[11];
+ struct ocbb_data_stc ocbb_info;
+ struct temperature_status_stc temp_info;
+ struct bist_nvm_image_att nvm_image_att;
+ struct mdump_config_stc mdump_config;
};
struct public_drv_mb {
u32 drv_mb_header;
-#define DRV_MSG_CODE_MASK 0xffff0000
-#define DRV_MSG_CODE_LOAD_REQ 0x10000000
-#define DRV_MSG_CODE_LOAD_DONE 0x11000000
-#define DRV_MSG_CODE_INIT_HW 0x12000000
-#define DRV_MSG_CODE_UNLOAD_REQ 0x20000000
-#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000
-#define DRV_MSG_CODE_INIT_PHY 0x22000000
- /* Params - FORCE - Reinitialize the link regardless of LFA */
- /* - DONT_CARE - Don't flap the link if up */
-#define DRV_MSG_CODE_LINK_RESET 0x23000000
-
-#define DRV_MSG_CODE_SET_LLDP 0x24000000
-#define DRV_MSG_CODE_SET_DCBX 0x25000000
+#define DRV_MSG_CODE_MASK 0xffff0000
+#define DRV_MSG_CODE_LOAD_REQ 0x10000000
+#define DRV_MSG_CODE_LOAD_DONE 0x11000000
+#define DRV_MSG_CODE_INIT_HW 0x12000000
+#define DRV_MSG_CODE_UNLOAD_REQ 0x20000000
+#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000
+#define DRV_MSG_CODE_INIT_PHY 0x22000000
+#define DRV_MSG_CODE_LINK_RESET 0x23000000
+#define DRV_MSG_CODE_SET_DCBX 0x25000000
+
#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
-#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
-
-#define DRV_MSG_CODE_INITIATE_FLR 0x02000000
-#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
-#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000
-#define DRV_MSG_CODE_NVM_PUT_FILE_BEGIN 0x00010000
-#define DRV_MSG_CODE_NVM_PUT_FILE_DATA 0x00020000
-#define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000
-#define DRV_MSG_CODE_NVM_READ_NVRAM 0x00050000
-#define DRV_MSG_CODE_NVM_WRITE_NVRAM 0x00060000
-#define DRV_MSG_CODE_NVM_DEL_FILE 0x00080000
-#define DRV_MSG_CODE_MCP_RESET 0x00090000
-#define DRV_MSG_CODE_SET_SECURE_MODE 0x000a0000
-#define DRV_MSG_CODE_PHY_RAW_READ 0x000b0000
-#define DRV_MSG_CODE_PHY_RAW_WRITE 0x000c0000
-#define DRV_MSG_CODE_PHY_CORE_READ 0x000d0000
-#define DRV_MSG_CODE_PHY_CORE_WRITE 0x000e0000
-#define DRV_MSG_CODE_SET_VERSION 0x000f0000
-
-#define DRV_MSG_CODE_BIST_TEST 0x001e0000
-#define DRV_MSG_CODE_SET_LED_MODE 0x00200000
-
-#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
+#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
+#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
+#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000
+#define DRV_MSG_CODE_MCP_RESET 0x00090000
+#define DRV_MSG_CODE_SET_VERSION 0x000f0000
+
+#define DRV_MSG_CODE_BIST_TEST 0x001e0000
+#define DRV_MSG_CODE_SET_LED_MODE 0x00200000
+
+#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
u32 drv_mb_param;
+#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001
+#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x000000FF
+#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3
+#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0
+#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF
+#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8
+#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK 0x0000FF00
+#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001
+#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0
+
+
+#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0
+#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
+#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2
+
+#define DRV_MB_PARAM_BIST_REGISTER_TEST 1
+#define DRV_MB_PARAM_BIST_CLOCK_TEST 2
+
+#define DRV_MB_PARAM_BIST_RC_UNKNOWN 0
+#define DRV_MB_PARAM_BIST_RC_PASSED 1
+#define DRV_MB_PARAM_BIST_RC_FAILED 2
+#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER 3
- /* UNLOAD_REQ params */
-#define DRV_MB_PARAM_UNLOAD_WOL_UNKNOWN 0x00000000
-#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001
-#define DRV_MB_PARAM_UNLOAD_WOL_DISABLED 0x00000002
-#define DRV_MB_PARAM_UNLOAD_WOL_ENABLED 0x00000003
-
- /* UNLOAD_DONE_params */
-#define DRV_MB_PARAM_UNLOAD_NON_D3_POWER 0x00000001
-
- /* INIT_PHY params */
-#define DRV_MB_PARAM_INIT_PHY_FORCE 0x00000001
-#define DRV_MB_PARAM_INIT_PHY_DONT_CARE 0x00000002
-
- /* LLDP / DCBX params*/
-#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001
-#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0
-#define DRV_MB_PARAM_LLDP_AGENT_MASK 0x00000006
-#define DRV_MB_PARAM_LLDP_AGENT_SHIFT 1
-#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x00000008
-#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3
-
-#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_MASK 0x000000FF
-#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_SHIFT 0
-
-#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MFW 0x1
-#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_IMAGE 0x2
-
-#define DRV_MB_PARAM_NVM_OFFSET_SHIFT 0
-#define DRV_MB_PARAM_NVM_OFFSET_MASK 0x00FFFFFF
-#define DRV_MB_PARAM_NVM_LEN_SHIFT 24
-#define DRV_MB_PARAM_NVM_LEN_MASK 0xFF000000
-
-#define DRV_MB_PARAM_PHY_ADDR_SHIFT 0
-#define DRV_MB_PARAM_PHY_ADDR_MASK 0x1FF0FFFF
-#define DRV_MB_PARAM_PHY_LANE_SHIFT 16
-#define DRV_MB_PARAM_PHY_LANE_MASK 0x000F0000
-#define DRV_MB_PARAM_PHY_SELECT_PORT_SHIFT 29
-#define DRV_MB_PARAM_PHY_SELECT_PORT_MASK 0x20000000
-#define DRV_MB_PARAM_PHY_PORT_SHIFT 30
-#define DRV_MB_PARAM_PHY_PORT_MASK 0xc0000000
-
-/* configure vf MSIX params*/
-#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0
-#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF
-#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8
-#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK 0x0000FF00
-
-#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0
-#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
-#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2
-
-#define DRV_MB_PARAM_BIST_UNKNOWN_TEST 0
-#define DRV_MB_PARAM_BIST_REGISTER_TEST 1
-#define DRV_MB_PARAM_BIST_CLOCK_TEST 2
-
-#define DRV_MB_PARAM_BIST_RC_UNKNOWN 0
-#define DRV_MB_PARAM_BIST_RC_PASSED 1
-#define DRV_MB_PARAM_BIST_RC_FAILED 2
-#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER 3
-
-#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT 0
-#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000FF
+#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT 0
+#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000FF
u32 fw_mb_header;
-#define FW_MSG_CODE_MASK 0xffff0000
-#define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000
-#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000
-#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA 0x10200000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10210000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG 0x10220000
-#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000
-#define FW_MSG_CODE_DRV_UNLOAD_ENGINE 0x20110000
-#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000
-#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20130000
-#define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000
-#define FW_MSG_CODE_INIT_PHY_DONE 0x21200000
-#define FW_MSG_CODE_INIT_PHY_ERR_INVALID_ARGS 0x21300000
-#define FW_MSG_CODE_LINK_RESET_DONE 0x23000000
-#define FW_MSG_CODE_SET_LLDP_DONE 0x24000000
-#define FW_MSG_CODE_SET_LLDP_UNSUPPORTED_AGENT 0x24010000
-#define FW_MSG_CODE_SET_DCBX_DONE 0x25000000
-#define FW_MSG_CODE_NIG_DRAIN_DONE 0x30000000
-#define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000
-#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000
-#define FW_MSG_CODE_FLR_ACK 0x02000000
-#define FW_MSG_CODE_FLR_NACK 0x02100000
-
-#define FW_MSG_CODE_NVM_OK 0x00010000
-#define FW_MSG_CODE_NVM_INVALID_MODE 0x00020000
-#define FW_MSG_CODE_NVM_PREV_CMD_WAS_NOT_FINISHED 0x00030000
-#define FW_MSG_CODE_NVM_FAILED_TO_ALLOCATE_PAGE 0x00040000
-#define FW_MSG_CODE_NVM_INVALID_DIR_FOUND 0x00050000
-#define FW_MSG_CODE_NVM_PAGE_NOT_FOUND 0x00060000
-#define FW_MSG_CODE_NVM_FAILED_PARSING_BNDLE_HEADER 0x00070000
-#define FW_MSG_CODE_NVM_FAILED_PARSING_IMAGE_HEADER 0x00080000
-#define FW_MSG_CODE_NVM_PARSING_OUT_OF_SYNC 0x00090000
-#define FW_MSG_CODE_NVM_FAILED_UPDATING_DIR 0x000a0000
-#define FW_MSG_CODE_NVM_FAILED_TO_FREE_PAGE 0x000b0000
-#define FW_MSG_CODE_NVM_FILE_NOT_FOUND 0x000c0000
-#define FW_MSG_CODE_NVM_OPERATION_FAILED 0x000d0000
-#define FW_MSG_CODE_NVM_FAILED_UNALIGNED 0x000e0000
-#define FW_MSG_CODE_NVM_BAD_OFFSET 0x000f0000
-#define FW_MSG_CODE_NVM_BAD_SIGNATURE 0x00100000
-#define FW_MSG_CODE_NVM_FILE_READ_ONLY 0x00200000
-#define FW_MSG_CODE_NVM_UNKNOWN_FILE 0x00300000
-#define FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK 0x00400000
-#define FW_MSG_CODE_MCP_RESET_REJECT 0x00600000
-#define FW_MSG_CODE_PHY_OK 0x00110000
-#define FW_MSG_CODE_PHY_ERROR 0x00120000
-#define FW_MSG_CODE_SET_SECURE_MODE_ERROR 0x00130000
-#define FW_MSG_CODE_SET_SECURE_MODE_OK 0x00140000
-#define FW_MSG_MODE_PHY_PRIVILEGE_ERROR 0x00150000
-#define FW_MSG_CODE_OK 0x00160000
-
-#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
-
- u32 fw_mb_param;
-
- u32 drv_pulse_mb;
-#define DRV_PULSE_SEQ_MASK 0x00007fff
-#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
-#define DRV_PULSE_ALWAYS_ALIVE 0x00008000
+#define FW_MSG_CODE_MASK 0xffff0000
+#define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000
+#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000
+#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA 0x10200000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10210000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG 0x10220000
+#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000
+#define FW_MSG_CODE_DRV_UNLOAD_ENGINE 0x20110000
+#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000
+#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20130000
+#define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000
+#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000
+#define FW_MSG_CODE_OK 0x00160000
+
+#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
+
+ u32 fw_mb_param;
+
+ u32 drv_pulse_mb;
+#define DRV_PULSE_SEQ_MASK 0x00007fff
+#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
+#define DRV_PULSE_ALWAYS_ALIVE 0x00008000
+
u32 mcp_pulse_mb;
-#define MCP_PULSE_SEQ_MASK 0x00007fff
-#define MCP_PULSE_ALWAYS_ALIVE 0x00008000
-#define MCP_EVENT_MASK 0xffff0000
-#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000
+#define MCP_PULSE_SEQ_MASK 0x00007fff
+#define MCP_PULSE_ALWAYS_ALIVE 0x00008000
+#define MCP_EVENT_MASK 0xffff0000
+#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000
union drv_union_data union_data;
};
-/* MFW - DRV MB */
-/**********************************************************************
-* Description
-* Incremental Aggregative
-* 8-bit MFW counter per message
-* 8-bit ack-counter per message
-* Capabilities
-* Provides up to 256 aggregative message per type
-* Provides 4 message types in dword
-* Message type pointers to byte offset
-* Backward Compatibility by using sizeof for the counters.
-* No lock requires for 32bit messages
-* Limitations:
-* In case of messages greater than 32bit, a dedicated mechanism(e.g lock)
-* is required to prevent data corruption.
-**********************************************************************/
enum MFW_DRV_MSG_TYPE {
MFW_DRV_MSG_LINK_CHANGE,
MFW_DRV_MSG_FLR_FW_ACK_FAILED,
@@ -4085,37 +7312,33 @@ enum MFW_DRV_MSG_TYPE {
MFW_DRV_MSG_LLDP_DATA_UPDATED,
MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED,
MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED,
- MFW_DRV_MSG_ERROR_RECOVERY,
+ MFW_DRV_MSG_RESERVED4,
MFW_DRV_MSG_BW_UPDATE,
- MFW_DRV_MSG_S_TAG_UPDATE,
- MFW_DRV_MSG_GET_LAN_STATS,
- MFW_DRV_MSG_GET_FCOE_STATS,
- MFW_DRV_MSG_GET_ISCSI_STATS,
- MFW_DRV_MSG_GET_RDMA_STATS,
- MFW_DRV_MSG_FAILURE_DETECTED,
+ MFW_DRV_MSG_BW_UPDATE5,
+ MFW_DRV_MSG_BW_UPDATE6,
+ MFW_DRV_MSG_BW_UPDATE7,
+ MFW_DRV_MSG_BW_UPDATE8,
+ MFW_DRV_MSG_BW_UPDATE9,
+ MFW_DRV_MSG_BW_UPDATE10,
MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
+ MFW_DRV_MSG_BW_UPDATE11,
MFW_DRV_MSG_MAX
};
-#define MFW_DRV_MSG_MAX_DWORDS(msgs) (((msgs - 1) >> 2) + 1)
-#define MFW_DRV_MSG_DWORD(msg_id) (msg_id >> 2)
-#define MFW_DRV_MSG_OFFSET(msg_id) ((msg_id & 0x3) << 3)
-#define MFW_DRV_MSG_MASK(msg_id) (0xff << MFW_DRV_MSG_OFFSET(msg_id))
+#define MFW_DRV_MSG_MAX_DWORDS(msgs) (((msgs - 1) >> 2) + 1)
+#define MFW_DRV_MSG_DWORD(msg_id) (msg_id >> 2)
+#define MFW_DRV_MSG_OFFSET(msg_id) ((msg_id & 0x3) << 3)
+#define MFW_DRV_MSG_MASK(msg_id) (0xff << MFW_DRV_MSG_OFFSET(msg_id))
struct public_mfw_mb {
- u32 sup_msgs;
- u32 msg[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
- u32 ack[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
+ u32 sup_msgs;
+ u32 msg[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
+ u32 ack[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
};
-/**************************************/
-/* */
-/* P U B L I C D A T A */
-/* */
-/**************************************/
enum public_sections {
- PUBLIC_DRV_MB, /* Points to the first drv_mb of path0 */
- PUBLIC_MFW_MB, /* Points to the first mfw_mb of path0 */
+ PUBLIC_DRV_MB,
+ PUBLIC_MFW_MB,
PUBLIC_GLOBAL,
PUBLIC_PATH,
PUBLIC_PORT,
@@ -4123,1080 +7346,179 @@ enum public_sections {
PUBLIC_MAX_SECTIONS
};
-struct drv_ver_info_stc {
- u32 ver;
- u8 name[32];
-};
-
struct mcp_public_data {
- /* The sections fields is an array */
- u32 num_sections;
- offsize_t sections[PUBLIC_MAX_SECTIONS];
- struct public_drv_mb drv_mb[MCP_GLOB_FUNC_MAX];
- struct public_mfw_mb mfw_mb[MCP_GLOB_FUNC_MAX];
- struct public_global global;
- struct public_path path[MCP_GLOB_PATH_MAX];
- struct public_port port[MCP_GLOB_PORT_MAX];
- struct public_func func[MCP_GLOB_FUNC_MAX];
- struct drv_ver_info_stc drv_info;
+ u32 num_sections;
+ u32 sections[PUBLIC_MAX_SECTIONS];
+ struct public_drv_mb drv_mb[MCP_GLOB_FUNC_MAX];
+ struct public_mfw_mb mfw_mb[MCP_GLOB_FUNC_MAX];
+ struct public_global global;
+ struct public_path path[MCP_GLOB_PATH_MAX];
+ struct public_port port[MCP_GLOB_PORT_MAX];
+ struct public_func func[MCP_GLOB_FUNC_MAX];
};
struct nvm_cfg_mac_address {
- u32 mac_addr_hi;
-#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000FFFF
-#define NVM_CFG_MAC_ADDRESS_HI_OFFSET 0
-
- u32 mac_addr_lo;
+ u32 mac_addr_hi;
+#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000FFFF
+#define NVM_CFG_MAC_ADDRESS_HI_OFFSET 0
+ u32 mac_addr_lo;
};
-/******************************************
-* nvm_cfg1 structs
-******************************************/
-
struct nvm_cfg1_glob {
- u32 generic_cont0; /* 0x0 */
-#define NVM_CFG1_GLOB_BOARD_SWAP_MASK 0x0000000F
-#define NVM_CFG1_GLOB_BOARD_SWAP_OFFSET 0
-#define NVM_CFG1_GLOB_BOARD_SWAP_NONE 0x0
-#define NVM_CFG1_GLOB_BOARD_SWAP_PATH 0x1
-#define NVM_CFG1_GLOB_BOARD_SWAP_PORT 0x2
-#define NVM_CFG1_GLOB_BOARD_SWAP_BOTH 0x3
-#define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000FF0
-#define NVM_CFG1_GLOB_MF_MODE_OFFSET 4
-#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0
-#define NVM_CFG1_GLOB_MF_MODE_DEFAULT 0x1
-#define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2
-#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3
-#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4
-#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0 0x5
-#define NVM_CFG1_GLOB_MF_MODE_BD 0x6
-#define NVM_CFG1_GLOB_MF_MODE_UFP 0x7
-#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_MASK 0x00001000
-#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_OFFSET 12
-#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_DISABLED 0x0
-#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_ENABLED 0x1
-#define NVM_CFG1_GLOB_AVS_MARGIN_LOW_MASK 0x001FE000
-#define NVM_CFG1_GLOB_AVS_MARGIN_LOW_OFFSET 13
-#define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_MASK 0x1FE00000
-#define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_OFFSET 21
-#define NVM_CFG1_GLOB_ENABLE_SRIOV_MASK 0x20000000
-#define NVM_CFG1_GLOB_ENABLE_SRIOV_OFFSET 29
-#define NVM_CFG1_GLOB_ENABLE_SRIOV_DISABLED 0x0
-#define NVM_CFG1_GLOB_ENABLE_SRIOV_ENABLED 0x1
-#define NVM_CFG1_GLOB_ENABLE_ATC_MASK 0x40000000
-#define NVM_CFG1_GLOB_ENABLE_ATC_OFFSET 30
-#define NVM_CFG1_GLOB_ENABLE_ATC_DISABLED 0x0
-#define NVM_CFG1_GLOB_ENABLE_ATC_ENABLED 0x1
-#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_MASK 0x80000000
-#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_OFFSET 31
-#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_DISABLED 0x0
-#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_ENABLED 0x1
-
- u32 engineering_change[3]; /* 0x4 */
-
- u32 manufacturing_id; /* 0x10 */
-
- u32 serial_number[4]; /* 0x14 */
-
- u32 pcie_cfg; /* 0x24 */
-#define NVM_CFG1_GLOB_PCI_GEN_MASK 0x00000003
-#define NVM_CFG1_GLOB_PCI_GEN_OFFSET 0
-#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN1 0x0
-#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN2 0x1
-#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN3 0x2
-#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_MASK 0x00000004
-#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_OFFSET 2
-#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_DISABLED 0x0
-#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_ENABLED 0x1
-#define NVM_CFG1_GLOB_ASPM_SUPPORT_MASK 0x00000018
-#define NVM_CFG1_GLOB_ASPM_SUPPORT_OFFSET 3
-#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_L1_ENABLED 0x0
-#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_DISABLED 0x1
-#define NVM_CFG1_GLOB_ASPM_SUPPORT_L1_DISABLED 0x2
-#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_L1_DISABLED 0x3
-#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_MASK 0x00000020
-#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_OFFSET 5
-#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_DISABLED 0x0
-#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_ENABLED 0x1
-#define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_MASK 0x000003C0
-#define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_OFFSET 6
-#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_MASK 0x00001C00
-#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_OFFSET 10
-#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_HW 0x0
-#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_0DB 0x1
-#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_3_5DB 0x2
-#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_6_0DB 0x3
-#define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_MASK 0x001FE000
-#define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_OFFSET 13
-#define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_MASK 0x1FE00000
-#define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_OFFSET 21
-#define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_MASK 0x60000000
-#define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_OFFSET 29
-
- u32 mgmt_traffic; /* 0x28 */
-#define NVM_CFG1_GLOB_RESERVED60_MASK 0x00000001
-#define NVM_CFG1_GLOB_RESERVED60_OFFSET 0
-#define NVM_CFG1_GLOB_RESERVED60_100KHZ 0x0
-#define NVM_CFG1_GLOB_RESERVED60_400KHZ 0x1
-#define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_MASK 0x000001FE
-#define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_OFFSET 1
-#define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_MASK 0x0001FE00
-#define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_OFFSET 9
-#define NVM_CFG1_GLOB_SMBUS_ADDRESS_MASK 0x01FE0000
-#define NVM_CFG1_GLOB_SMBUS_ADDRESS_OFFSET 17
-#define NVM_CFG1_GLOB_SIDEBAND_MODE_MASK 0x06000000
-#define NVM_CFG1_GLOB_SIDEBAND_MODE_OFFSET 25
-#define NVM_CFG1_GLOB_SIDEBAND_MODE_DISABLED 0x0
-#define NVM_CFG1_GLOB_SIDEBAND_MODE_RMII 0x1
-#define NVM_CFG1_GLOB_SIDEBAND_MODE_SGMII 0x2
-
- u32 core_cfg; /* 0x2C */
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK 0x000000FF
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET 0
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G 0x0
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G 0x1
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G 0x2
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F 0x3
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E 0x4
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G 0x5
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G 0xB
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G 0xC
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G 0xD
-#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_MASK 0x00000100
-#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_OFFSET 8
-#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_DISABLED 0x0
-#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_ENABLED 0x1
-#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_MASK 0x00000200
-#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_OFFSET 9
-#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_DISABLED 0x0
-#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_ENABLED 0x1
-#define NVM_CFG1_GLOB_EAGLE_CORE_ADDR_MASK 0x0003FC00
-#define NVM_CFG1_GLOB_EAGLE_CORE_ADDR_OFFSET 10
-#define NVM_CFG1_GLOB_FALCON_CORE_ADDR_MASK 0x03FC0000
-#define NVM_CFG1_GLOB_FALCON_CORE_ADDR_OFFSET 18
-#define NVM_CFG1_GLOB_AVS_MODE_MASK 0x1C000000
-#define NVM_CFG1_GLOB_AVS_MODE_OFFSET 26
-#define NVM_CFG1_GLOB_AVS_MODE_CLOSE_LOOP 0x0
-#define NVM_CFG1_GLOB_AVS_MODE_OPEN_LOOP 0x1
-#define NVM_CFG1_GLOB_AVS_MODE_DISABLED 0x3
-#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_MASK 0x60000000
-#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_OFFSET 29
-#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_DISABLED 0x0
-#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_ENABLED 0x1
-
- u32 e_lane_cfg1; /* 0x30 */
-#define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK 0x0000000F
-#define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET 0
-#define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK 0x000000F0
-#define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET 4
-#define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK 0x00000F00
-#define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET 8
-#define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK 0x0000F000
-#define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET 12
-#define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK 0x000F0000
-#define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET 16
-#define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK 0x00F00000
-#define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET 20
-#define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK 0x0F000000
-#define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET 24
-#define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK 0xF0000000
-#define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET 28
-
- u32 e_lane_cfg2; /* 0x34 */
-#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK 0x00000001
-#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET 0
-#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK 0x00000002
-#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET 1
-#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK 0x00000004
-#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET 2
-#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK 0x00000008
-#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET 3
-#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK 0x00000010
-#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET 4
-#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK 0x00000020
-#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET 5
-#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK 0x00000040
-#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET 6
-#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK 0x00000080
-#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET 7
-#define NVM_CFG1_GLOB_SMBUS_MODE_MASK 0x00000F00
-#define NVM_CFG1_GLOB_SMBUS_MODE_OFFSET 8
-#define NVM_CFG1_GLOB_SMBUS_MODE_DISABLED 0x0
-#define NVM_CFG1_GLOB_SMBUS_MODE_100KHZ 0x1
-#define NVM_CFG1_GLOB_SMBUS_MODE_400KHZ 0x2
-#define NVM_CFG1_GLOB_NCSI_MASK 0x0000F000
-#define NVM_CFG1_GLOB_NCSI_OFFSET 12
-#define NVM_CFG1_GLOB_NCSI_DISABLED 0x0
-#define NVM_CFG1_GLOB_NCSI_ENABLED 0x1
-
- u32 f_lane_cfg1; /* 0x38 */
-#define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK 0x0000000F
-#define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET 0
-#define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK 0x000000F0
-#define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET 4
-#define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK 0x00000F00
-#define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET 8
-#define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK 0x0000F000
-#define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET 12
-#define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK 0x000F0000
-#define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET 16
-#define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK 0x00F00000
-#define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET 20
-#define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK 0x0F000000
-#define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET 24
-#define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK 0xF0000000
-#define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET 28
-
- u32 f_lane_cfg2; /* 0x3C */
-#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK 0x00000001
-#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET 0
-#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK 0x00000002
-#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET 1
-#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK 0x00000004
-#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET 2
-#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK 0x00000008
-#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET 3
-#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK 0x00000010
-#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET 4
-#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK 0x00000020
-#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET 5
-#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK 0x00000040
-#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET 6
-#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK 0x00000080
-#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET 7
-
- u32 eagle_preemphasis; /* 0x40 */
-#define NVM_CFG1_GLOB_LANE0_PREEMP_MASK 0x000000FF
-#define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET 0
-#define NVM_CFG1_GLOB_LANE1_PREEMP_MASK 0x0000FF00
-#define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET 8
-#define NVM_CFG1_GLOB_LANE2_PREEMP_MASK 0x00FF0000
-#define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET 16
-#define NVM_CFG1_GLOB_LANE3_PREEMP_MASK 0xFF000000
-#define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET 24
-
- u32 eagle_driver_current; /* 0x44 */
-#define NVM_CFG1_GLOB_LANE0_AMP_MASK 0x000000FF
-#define NVM_CFG1_GLOB_LANE0_AMP_OFFSET 0
-#define NVM_CFG1_GLOB_LANE1_AMP_MASK 0x0000FF00
-#define NVM_CFG1_GLOB_LANE1_AMP_OFFSET 8
-#define NVM_CFG1_GLOB_LANE2_AMP_MASK 0x00FF0000
-#define NVM_CFG1_GLOB_LANE2_AMP_OFFSET 16
-#define NVM_CFG1_GLOB_LANE3_AMP_MASK 0xFF000000
-#define NVM_CFG1_GLOB_LANE3_AMP_OFFSET 24
-
- u32 falcon_preemphasis; /* 0x48 */
-#define NVM_CFG1_GLOB_LANE0_PREEMP_MASK 0x000000FF
-#define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET 0
-#define NVM_CFG1_GLOB_LANE1_PREEMP_MASK 0x0000FF00
-#define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET 8
-#define NVM_CFG1_GLOB_LANE2_PREEMP_MASK 0x00FF0000
-#define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET 16
-#define NVM_CFG1_GLOB_LANE3_PREEMP_MASK 0xFF000000
-#define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET 24
-
- u32 falcon_driver_current; /* 0x4C */
-#define NVM_CFG1_GLOB_LANE0_AMP_MASK 0x000000FF
-#define NVM_CFG1_GLOB_LANE0_AMP_OFFSET 0
-#define NVM_CFG1_GLOB_LANE1_AMP_MASK 0x0000FF00
-#define NVM_CFG1_GLOB_LANE1_AMP_OFFSET 8
-#define NVM_CFG1_GLOB_LANE2_AMP_MASK 0x00FF0000
-#define NVM_CFG1_GLOB_LANE2_AMP_OFFSET 16
-#define NVM_CFG1_GLOB_LANE3_AMP_MASK 0xFF000000
-#define NVM_CFG1_GLOB_LANE3_AMP_OFFSET 24
-
- u32 pci_id; /* 0x50 */
-#define NVM_CFG1_GLOB_VENDOR_ID_MASK 0x0000FFFF
-#define NVM_CFG1_GLOB_VENDOR_ID_OFFSET 0
-
- u32 pci_subsys_id; /* 0x54 */
-#define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFF
-#define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_OFFSET 0
-#define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_MASK 0xFFFF0000
-#define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_OFFSET 16
-
- u32 bar; /* 0x58 */
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_MASK 0x0000000F
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_OFFSET 0
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_DISABLED 0x0
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2K 0x1
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4K 0x2
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8K 0x3
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16K 0x4
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32K 0x5
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_64K 0x6
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_128K 0x7
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_256K 0x8
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_512K 0x9
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_1M 0xA
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2M 0xB
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4M 0xC
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8M 0xD
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16M 0xE
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32M 0xF
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_MASK 0x000000F0
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_OFFSET 4
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_DISABLED 0x0
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4K 0x1
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8K 0x2
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16K 0x3
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32K 0x4
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64K 0x5
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_128K 0x6
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_256K 0x7
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_512K 0x8
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_1M 0x9
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_2M 0xA
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4M 0xB
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8M 0xC
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16M 0xD
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32M 0xE
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64M 0xF
-#define NVM_CFG1_GLOB_BAR2_SIZE_MASK 0x00000F00
-#define NVM_CFG1_GLOB_BAR2_SIZE_OFFSET 8
-#define NVM_CFG1_GLOB_BAR2_SIZE_DISABLED 0x0
-#define NVM_CFG1_GLOB_BAR2_SIZE_64K 0x1
-#define NVM_CFG1_GLOB_BAR2_SIZE_128K 0x2
-#define NVM_CFG1_GLOB_BAR2_SIZE_256K 0x3
-#define NVM_CFG1_GLOB_BAR2_SIZE_512K 0x4
-#define NVM_CFG1_GLOB_BAR2_SIZE_1M 0x5
-#define NVM_CFG1_GLOB_BAR2_SIZE_2M 0x6
-#define NVM_CFG1_GLOB_BAR2_SIZE_4M 0x7
-#define NVM_CFG1_GLOB_BAR2_SIZE_8M 0x8
-#define NVM_CFG1_GLOB_BAR2_SIZE_16M 0x9
-#define NVM_CFG1_GLOB_BAR2_SIZE_32M 0xA
-#define NVM_CFG1_GLOB_BAR2_SIZE_64M 0xB
-#define NVM_CFG1_GLOB_BAR2_SIZE_128M 0xC
-#define NVM_CFG1_GLOB_BAR2_SIZE_256M 0xD
-#define NVM_CFG1_GLOB_BAR2_SIZE_512M 0xE
-#define NVM_CFG1_GLOB_BAR2_SIZE_1G 0xF
-
- u32 eagle_txfir_main; /* 0x5C */
-#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK 0x000000FF
-#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET 0
-#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK 0x0000FF00
-#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET 8
-#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK 0x00FF0000
-#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET 16
-#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK 0xFF000000
-#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET 24
-
- u32 eagle_txfir_post; /* 0x60 */
-#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK 0x000000FF
-#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET 0
-#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK 0x0000FF00
-#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET 8
-#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK 0x00FF0000
-#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET 16
-#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK 0xFF000000
-#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET 24
-
- u32 falcon_txfir_main; /* 0x64 */
-#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK 0x000000FF
-#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET 0
-#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK 0x0000FF00
-#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET 8
-#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK 0x00FF0000
-#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET 16
-#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK 0xFF000000
-#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET 24
-
- u32 falcon_txfir_post; /* 0x68 */
-#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK 0x000000FF
-#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET 0
-#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK 0x0000FF00
-#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET 8
-#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK 0x00FF0000
-#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET 16
-#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK 0xFF000000
-#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET 24
-
- u32 manufacture_ver; /* 0x6C */
-#define NVM_CFG1_GLOB_MANUF0_VER_MASK 0x0000003F
-#define NVM_CFG1_GLOB_MANUF0_VER_OFFSET 0
-#define NVM_CFG1_GLOB_MANUF1_VER_MASK 0x00000FC0
-#define NVM_CFG1_GLOB_MANUF1_VER_OFFSET 6
-#define NVM_CFG1_GLOB_MANUF2_VER_MASK 0x0003F000
-#define NVM_CFG1_GLOB_MANUF2_VER_OFFSET 12
-#define NVM_CFG1_GLOB_MANUF3_VER_MASK 0x00FC0000
-#define NVM_CFG1_GLOB_MANUF3_VER_OFFSET 18
-#define NVM_CFG1_GLOB_MANUF4_VER_MASK 0x3F000000
-#define NVM_CFG1_GLOB_MANUF4_VER_OFFSET 24
-
- u32 manufacture_time; /* 0x70 */
-#define NVM_CFG1_GLOB_MANUF0_TIME_MASK 0x0000003F
-#define NVM_CFG1_GLOB_MANUF0_TIME_OFFSET 0
-#define NVM_CFG1_GLOB_MANUF1_TIME_MASK 0x00000FC0
-#define NVM_CFG1_GLOB_MANUF1_TIME_OFFSET 6
-#define NVM_CFG1_GLOB_MANUF2_TIME_MASK 0x0003F000
-#define NVM_CFG1_GLOB_MANUF2_TIME_OFFSET 12
-
- u32 led_global_settings; /* 0x74 */
-#define NVM_CFG1_GLOB_LED_SWAP_0_MASK 0x0000000F
-#define NVM_CFG1_GLOB_LED_SWAP_0_OFFSET 0
-#define NVM_CFG1_GLOB_LED_SWAP_1_MASK 0x000000F0
-#define NVM_CFG1_GLOB_LED_SWAP_1_OFFSET 4
-#define NVM_CFG1_GLOB_LED_SWAP_2_MASK 0x00000F00
-#define NVM_CFG1_GLOB_LED_SWAP_2_OFFSET 8
-#define NVM_CFG1_GLOB_LED_SWAP_3_MASK 0x0000F000
-#define NVM_CFG1_GLOB_LED_SWAP_3_OFFSET 12
-
- u32 generic_cont1; /* 0x78 */
-#define NVM_CFG1_GLOB_AVS_DAC_CODE_MASK 0x000003FF
-#define NVM_CFG1_GLOB_AVS_DAC_CODE_OFFSET 0
-
- u32 mbi_version; /* 0x7C */
-#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000FF
-#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0
-#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000FF00
-#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8
-#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00FF0000
-#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16
-
- u32 mbi_date; /* 0x80 */
-
- u32 misc_sig; /* 0x84 */
-
- /* Define the GPIO mapping to switch i2c mux */
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_MASK 0x000000FF
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_OFFSET 0
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_MASK 0x0000FF00
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_OFFSET 8
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__NA 0x0
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO0 0x1
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO1 0x2
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO2 0x3
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO3 0x4
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO4 0x5
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO5 0x6
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO6 0x7
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO7 0x8
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO8 0x9
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO9 0xA
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO10 0xB
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO11 0xC
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO12 0xD
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO13 0xE
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO14 0xF
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO15 0x10
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO16 0x11
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO17 0x12
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO18 0x13
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO19 0x14
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO20 0x15
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO21 0x16
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO22 0x17
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO23 0x18
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO24 0x19
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO25 0x1A
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO26 0x1B
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO27 0x1C
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO28 0x1D
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO29 0x1E
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO30 0x1F
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO31 0x20
- u32 device_capabilities; /* 0x88 */
-#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1
- u32 power_dissipated; /* 0x8C */
- u32 power_consumed; /* 0x90 */
- u32 efi_version; /* 0x94 */
- u32 reserved[42]; /* 0x98 */
+ u32 generic_cont0;
+#define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000FF0
+#define NVM_CFG1_GLOB_MF_MODE_OFFSET 4
+#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0
+#define NVM_CFG1_GLOB_MF_MODE_DEFAULT 0x1
+#define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2
+#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3
+#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4
+#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0 0x5
+#define NVM_CFG1_GLOB_MF_MODE_BD 0x6
+#define NVM_CFG1_GLOB_MF_MODE_UFP 0x7
+ u32 engineering_change[3];
+ u32 manufacturing_id;
+ u32 serial_number[4];
+ u32 pcie_cfg;
+ u32 mgmt_traffic;
+ u32 core_cfg;
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK 0x000000FF
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET 0
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G 0x0
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G 0x1
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G 0x2
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F 0x3
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E 0x4
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G 0x5
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G 0xB
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G 0xC
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G 0xD
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G 0xE
+ u32 e_lane_cfg1;
+ u32 e_lane_cfg2;
+ u32 f_lane_cfg1;
+ u32 f_lane_cfg2;
+ u32 mps10_preemphasis;
+ u32 mps10_driver_current;
+ u32 mps25_preemphasis;
+ u32 mps25_driver_current;
+ u32 pci_id;
+ u32 pci_subsys_id;
+ u32 bar;
+ u32 mps10_txfir_main;
+ u32 mps10_txfir_post;
+ u32 mps25_txfir_main;
+ u32 mps25_txfir_post;
+ u32 manufacture_ver;
+ u32 manufacture_time;
+ u32 led_global_settings;
+ u32 generic_cont1;
+ u32 mbi_version;
+ u32 mbi_date;
+ u32 misc_sig;
+ u32 device_capabilities;
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI 0x4
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE 0x8
+ u32 power_dissipated;
+ u32 power_consumed;
+ u32 efi_version;
+ u32 multi_network_modes_capability;
+ u32 reserved[41];
};
struct nvm_cfg1_path {
- u32 reserved[30]; /* 0x0 */
+ u32 reserved[30];
};
struct nvm_cfg1_port {
- u32 reserved__m_relocated_to_option_123; /* 0x0 */
- u32 reserved__m_relocated_to_option_124; /* 0x4 */
- u32 generic_cont0; /* 0x8 */
-#define NVM_CFG1_PORT_LED_MODE_MASK 0x000000FF
-#define NVM_CFG1_PORT_LED_MODE_OFFSET 0
-#define NVM_CFG1_PORT_LED_MODE_MAC1 0x0
-#define NVM_CFG1_PORT_LED_MODE_PHY1 0x1
-#define NVM_CFG1_PORT_LED_MODE_PHY2 0x2
-#define NVM_CFG1_PORT_LED_MODE_PHY3 0x3
-#define NVM_CFG1_PORT_LED_MODE_MAC2 0x4
-#define NVM_CFG1_PORT_LED_MODE_PHY4 0x5
-#define NVM_CFG1_PORT_LED_MODE_PHY5 0x6
-#define NVM_CFG1_PORT_LED_MODE_PHY6 0x7
-#define NVM_CFG1_PORT_LED_MODE_MAC3 0x8
-#define NVM_CFG1_PORT_LED_MODE_PHY7 0x9
-#define NVM_CFG1_PORT_LED_MODE_PHY8 0xA
-#define NVM_CFG1_PORT_LED_MODE_PHY9 0xB
-#define NVM_CFG1_PORT_LED_MODE_MAC4 0xC
-#define NVM_CFG1_PORT_LED_MODE_PHY10 0xD
-#define NVM_CFG1_PORT_LED_MODE_PHY11 0xE
-#define NVM_CFG1_PORT_LED_MODE_PHY12 0xF
-#define NVM_CFG1_PORT_ROCE_PRIORITY_MASK 0x0000FF00
-#define NVM_CFG1_PORT_ROCE_PRIORITY_OFFSET 8
-#define NVM_CFG1_PORT_DCBX_MODE_MASK 0x000F0000
-#define NVM_CFG1_PORT_DCBX_MODE_OFFSET 16
-#define NVM_CFG1_PORT_DCBX_MODE_DISABLED 0x0
-#define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1
-#define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2
-#define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3
-#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00F00000
-#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET 20
-#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1
- u32 pcie_cfg; /* 0xC */
-#define NVM_CFG1_PORT_RESERVED15_MASK 0x00000007
-#define NVM_CFG1_PORT_RESERVED15_OFFSET 0
-
- u32 features; /* 0x10 */
-#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_MASK 0x00000001
-#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_OFFSET 0
-#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_DISABLED 0x0
-#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_ENABLED 0x1
-#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_MASK 0x00000002
-#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_OFFSET 1
-#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_DISABLED 0x0
-#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_ENABLED 0x1
-
- u32 speed_cap_mask; /* 0x14 */
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000FFFF
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G 0x40
-#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_MASK 0xFFFF0000
-#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_OFFSET 16
-#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_1G 0x1
-#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_10G 0x2
-#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_25G 0x8
-#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_40G 0x10
-#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_50G 0x20
-#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_100G 0x40
-
- u32 link_settings; /* 0x18 */
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000F
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET 0
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_100G 0x7
-#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK 0x00000070
-#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET 4
-#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1
-#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2
-#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4
-#define NVM_CFG1_PORT_MFW_LINK_SPEED_MASK 0x00000780
-#define NVM_CFG1_PORT_MFW_LINK_SPEED_OFFSET 7
-#define NVM_CFG1_PORT_MFW_LINK_SPEED_AUTONEG 0x0
-#define NVM_CFG1_PORT_MFW_LINK_SPEED_1G 0x1
-#define NVM_CFG1_PORT_MFW_LINK_SPEED_10G 0x2
-#define NVM_CFG1_PORT_MFW_LINK_SPEED_25G 0x4
-#define NVM_CFG1_PORT_MFW_LINK_SPEED_40G 0x5
-#define NVM_CFG1_PORT_MFW_LINK_SPEED_50G 0x6
-#define NVM_CFG1_PORT_MFW_LINK_SPEED_100G 0x7
-#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_MASK 0x00003800
-#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_OFFSET 11
-#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_AUTONEG 0x1
-#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_RX 0x2
-#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_TX 0x4
-#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_MASK 0x00004000
-#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_OFFSET 14
-#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_DISABLED 0x0
-#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_ENABLED 0x1
-
- u32 phy_cfg; /* 0x1C */
-#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_MASK 0x0000FFFF
-#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_OFFSET 0
-#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_HIGIG 0x1
-#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_SCRAMBLER 0x2
-#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_FIBER 0x4
-#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_CL72_AN 0x8
-#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_FEC_AN 0x10
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_MASK 0x00FF0000
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_OFFSET 16
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_BYPASS 0x0
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR 0x2
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR2 0x3
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR4 0x4
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XFI 0x8
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SFI 0x9
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_1000X 0xB
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SGMII 0xC
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLAUI 0x11
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLPPI 0x12
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CAUI 0x21
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CPPI 0x22
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_25GAUI 0x31
-#define NVM_CFG1_PORT_AN_MODE_MASK 0xFF000000
-#define NVM_CFG1_PORT_AN_MODE_OFFSET 24
-#define NVM_CFG1_PORT_AN_MODE_NONE 0x0
-#define NVM_CFG1_PORT_AN_MODE_CL73 0x1
-#define NVM_CFG1_PORT_AN_MODE_CL37 0x2
-#define NVM_CFG1_PORT_AN_MODE_CL73_BAM 0x3
-#define NVM_CFG1_PORT_AN_MODE_CL37_BAM 0x4
-#define NVM_CFG1_PORT_AN_MODE_HPAM 0x5
-#define NVM_CFG1_PORT_AN_MODE_SGMII 0x6
-
- u32 mgmt_traffic; /* 0x20 */
-#define NVM_CFG1_PORT_RESERVED61_MASK 0x0000000F
-#define NVM_CFG1_PORT_RESERVED61_OFFSET 0
-
- u32 ext_phy; /* 0x24 */
-#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_MASK 0x000000FF
-#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_OFFSET 0
-#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_NONE 0x0
-#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_BCM84844 0x1
-#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_MASK 0x0000FF00
-#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_OFFSET 8
-
- u32 mba_cfg1; /* 0x28 */
-#define NVM_CFG1_PORT_PREBOOT_OPROM_MASK 0x00000001
-#define NVM_CFG1_PORT_PREBOOT_OPROM_OFFSET 0
-#define NVM_CFG1_PORT_PREBOOT_OPROM_DISABLED 0x0
-#define NVM_CFG1_PORT_PREBOOT_OPROM_ENABLED 0x1
-#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_TYPE_MASK 0x00000006
-#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_TYPE_OFFSET 1
-#define NVM_CFG1_PORT_MBA_DELAY_TIME_MASK 0x00000078
-#define NVM_CFG1_PORT_MBA_DELAY_TIME_OFFSET 3
-#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_MASK 0x00000080
-#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_OFFSET 7
-#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_S 0x0
-#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_B 0x1
-#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_MASK 0x00000100
-#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_OFFSET 8
-#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_DISABLED 0x0
-#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_ENABLED 0x1
-#define NVM_CFG1_PORT_RESERVED5_MASK 0x0001FE00
-#define NVM_CFG1_PORT_RESERVED5_OFFSET 9
-#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_MASK 0x001E0000
-#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_OFFSET 17
-#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_AUTONEG 0x0
-#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_1G 0x1
-#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_10G 0x2
-#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_25G 0x4
-#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_40G 0x5
-#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_50G 0x6
-#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_100G 0x7
-#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_SMARTLINQ 0x8
-#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_MASK 0x00E00000
-#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_OFFSET 21
-
- u32 mba_cfg2; /* 0x2C */
-#define NVM_CFG1_PORT_RESERVED65_MASK 0x0000FFFF
-#define NVM_CFG1_PORT_RESERVED65_OFFSET 0
-#define NVM_CFG1_PORT_RESERVED66_MASK 0x00010000
-#define NVM_CFG1_PORT_RESERVED66_OFFSET 16
-
- u32 vf_cfg; /* 0x30 */
-#define NVM_CFG1_PORT_RESERVED8_MASK 0x0000FFFF
-#define NVM_CFG1_PORT_RESERVED8_OFFSET 0
-#define NVM_CFG1_PORT_RESERVED6_MASK 0x000F0000
-#define NVM_CFG1_PORT_RESERVED6_OFFSET 16
-
- struct nvm_cfg_mac_address lldp_mac_address; /* 0x34 */
-
- u32 led_port_settings; /* 0x3C */
-#define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_MASK 0x000000FF
-#define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_OFFSET 0
-#define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_MASK 0x0000FF00
-#define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_OFFSET 8
-#define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_MASK 0x00FF0000
-#define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_OFFSET 16
-#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_1G 0x1
-#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_10G 0x2
-#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_25G 0x8
-#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_40G 0x10
-#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_50G 0x20
-#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_100G 0x40
-
- u32 transceiver_00; /* 0x40 */
-
- /* Define for mapping of transceiver signal module absent */
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_MASK 0x000000FF
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_OFFSET 0
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_NA 0x0
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO0 0x1
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO1 0x2
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO2 0x3
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO3 0x4
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO4 0x5
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO5 0x6
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO6 0x7
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO7 0x8
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO8 0x9
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO9 0xA
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO10 0xB
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO11 0xC
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO12 0xD
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO13 0xE
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO14 0xF
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO15 0x10
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO16 0x11
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO17 0x12
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO18 0x13
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO19 0x14
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO20 0x15
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO21 0x16
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO22 0x17
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO23 0x18
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO24 0x19
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO25 0x1A
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO26 0x1B
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO27 0x1C
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO28 0x1D
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO29 0x1E
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO30 0x1F
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO31 0x20
- /* Define the GPIO mux settings to switch i2c mux to this port */
-#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_MASK 0x00000F00
-#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_OFFSET 8
-#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_MASK 0x0000F000
-#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_OFFSET 12
-
- u32 reserved[133]; /* 0x44 */
+ u32 reserved__m_relocated_to_option_123;
+ u32 reserved__m_relocated_to_option_124;
+ u32 generic_cont0;
+#define NVM_CFG1_PORT_DCBX_MODE_MASK 0x000F0000
+#define NVM_CFG1_PORT_DCBX_MODE_OFFSET 16
+#define NVM_CFG1_PORT_DCBX_MODE_DISABLED 0x0
+#define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1
+#define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2
+#define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00F00000
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET 20
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_FCOE 0x2
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ISCSI 0x4
+ u32 pcie_cfg;
+ u32 features;
+ u32 speed_cap_mask;
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000FFFF
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G 0x40
+ u32 link_settings;
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000F
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET 0
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G 0x7
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_SMARTLINQ 0x8
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK 0x00000070
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET 4
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4
+ u32 phy_cfg;
+ u32 mgmt_traffic;
+ u32 ext_phy;
+ u32 mba_cfg1;
+ u32 mba_cfg2;
+ u32 vf_cfg;
+ struct nvm_cfg_mac_address lldp_mac_address;
+ u32 led_port_settings;
+ u32 transceiver_00;
+ u32 device_ids;
+ u32 board_cfg;
+ u32 mnm_10g_cap;
+ u32 mnm_10g_ctrl;
+ u32 mnm_10g_misc;
+ u32 mnm_25g_cap;
+ u32 mnm_25g_ctrl;
+ u32 mnm_25g_misc;
+ u32 mnm_40g_cap;
+ u32 mnm_40g_ctrl;
+ u32 mnm_40g_misc;
+ u32 mnm_50g_cap;
+ u32 mnm_50g_ctrl;
+ u32 mnm_50g_misc;
+ u32 mnm_100g_cap;
+ u32 mnm_100g_ctrl;
+ u32 mnm_100g_misc;
+ u32 reserved[116];
};
struct nvm_cfg1_func {
- struct nvm_cfg_mac_address mac_address; /* 0x0 */
-
- u32 rsrv1; /* 0x8 */
-#define NVM_CFG1_FUNC_RESERVED1_MASK 0x0000FFFF
-#define NVM_CFG1_FUNC_RESERVED1_OFFSET 0
-#define NVM_CFG1_FUNC_RESERVED2_MASK 0xFFFF0000
-#define NVM_CFG1_FUNC_RESERVED2_OFFSET 16
-
- u32 rsrv2; /* 0xC */
-#define NVM_CFG1_FUNC_RESERVED3_MASK 0x0000FFFF
-#define NVM_CFG1_FUNC_RESERVED3_OFFSET 0
-#define NVM_CFG1_FUNC_RESERVED4_MASK 0xFFFF0000
-#define NVM_CFG1_FUNC_RESERVED4_OFFSET 16
-
- u32 device_id; /* 0x10 */
-#define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK 0x0000FFFF
-#define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET 0
-#define NVM_CFG1_FUNC_RESERVED77_MASK 0xFFFF0000
-#define NVM_CFG1_FUNC_RESERVED77_OFFSET 16
-
- u32 cmn_cfg; /* 0x14 */
-#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_MASK 0x00000007
-#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_OFFSET 0
-#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_PXE 0x0
-#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_ISCSI_BOOT 0x3
-#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_FCOE_BOOT 0x4
-#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_NONE 0x7
-#define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_MASK 0x0007FFF8
-#define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_OFFSET 3
-#define NVM_CFG1_FUNC_PERSONALITY_MASK 0x00780000
-#define NVM_CFG1_FUNC_PERSONALITY_OFFSET 19
-#define NVM_CFG1_FUNC_PERSONALITY_ETHERNET 0x0
-#define NVM_CFG1_FUNC_PERSONALITY_ISCSI 0x1
-#define NVM_CFG1_FUNC_PERSONALITY_FCOE 0x2
-#define NVM_CFG1_FUNC_PERSONALITY_ROCE 0x3
-#define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_MASK 0x7F800000
-#define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_OFFSET 23
-#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_MASK 0x80000000
-#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_OFFSET 31
-#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_DISABLED 0x0
-#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_ENABLED 0x1
-
- u32 pci_cfg; /* 0x18 */
-#define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_MASK 0x0000007F
-#define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_OFFSET 0
-#define NVM_CFG1_FUNC_RESERVESD12_MASK 0x00003F80
-#define NVM_CFG1_FUNC_RESERVESD12_OFFSET 7
-#define NVM_CFG1_FUNC_BAR1_SIZE_MASK 0x0003C000
-#define NVM_CFG1_FUNC_BAR1_SIZE_OFFSET 14
-#define NVM_CFG1_FUNC_BAR1_SIZE_DISABLED 0x0
-#define NVM_CFG1_FUNC_BAR1_SIZE_64K 0x1
-#define NVM_CFG1_FUNC_BAR1_SIZE_128K 0x2
-#define NVM_CFG1_FUNC_BAR1_SIZE_256K 0x3
-#define NVM_CFG1_FUNC_BAR1_SIZE_512K 0x4
-#define NVM_CFG1_FUNC_BAR1_SIZE_1M 0x5
-#define NVM_CFG1_FUNC_BAR1_SIZE_2M 0x6
-#define NVM_CFG1_FUNC_BAR1_SIZE_4M 0x7
-#define NVM_CFG1_FUNC_BAR1_SIZE_8M 0x8
-#define NVM_CFG1_FUNC_BAR1_SIZE_16M 0x9
-#define NVM_CFG1_FUNC_BAR1_SIZE_32M 0xA
-#define NVM_CFG1_FUNC_BAR1_SIZE_64M 0xB
-#define NVM_CFG1_FUNC_BAR1_SIZE_128M 0xC
-#define NVM_CFG1_FUNC_BAR1_SIZE_256M 0xD
-#define NVM_CFG1_FUNC_BAR1_SIZE_512M 0xE
-#define NVM_CFG1_FUNC_BAR1_SIZE_1G 0xF
-#define NVM_CFG1_FUNC_MAX_BANDWIDTH_MASK 0x03FC0000
-#define NVM_CFG1_FUNC_MAX_BANDWIDTH_OFFSET 18
-
- struct nvm_cfg_mac_address fcoe_node_wwn_mac_addr; /* 0x1C */
-
- struct nvm_cfg_mac_address fcoe_port_wwn_mac_addr; /* 0x24 */
- u32 preboot_generic_cfg; /* 0x2C */
- u32 reserved[8]; /* 0x30 */
+ struct nvm_cfg_mac_address mac_address;
+ u32 rsrv1;
+ u32 rsrv2;
+ u32 device_id;
+ u32 cmn_cfg;
+ u32 pci_cfg;
+ struct nvm_cfg_mac_address fcoe_node_wwn_mac_addr;
+ struct nvm_cfg_mac_address fcoe_port_wwn_mac_addr;
+ u32 preboot_generic_cfg;
+ u32 reserved[8];
};
struct nvm_cfg1 {
- struct nvm_cfg1_glob glob; /* 0x0 */
-
- struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX]; /* 0x140 */
-
- struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX]; /* 0x230 */
-
- struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX]; /* 0xB90 */
-};
-
-/******************************************
-* nvm_cfg structs
-******************************************/
-
-enum nvm_cfg_sections {
- NVM_CFG_SECTION_NVM_CFG1,
- NVM_CFG_SECTION_MAX
-};
-
-struct nvm_cfg {
- u32 num_sections;
- u32 sections_offset[NVM_CFG_SECTION_MAX];
- struct nvm_cfg1 cfg1;
-};
-
-#define PORT_0 0
-#define PORT_1 1
-#define PORT_2 2
-#define PORT_3 3
-
-extern struct spad_layout g_spad;
-
-#define MCP_SPAD_SIZE 0x00028000 /* 160 KB */
-
-#define SPAD_OFFSET(addr) (((u32)addr - (u32)CPU_SPAD_BASE))
-
-#define TO_OFFSIZE(_offset, _size) \
- (u32)((((u32)(_offset) >> 2) << OFFSIZE_OFFSET_SHIFT) | \
- (((u32)(_size) >> 2) << OFFSIZE_SIZE_SHIFT))
-
-enum spad_sections {
- SPAD_SECTION_TRACE,
- SPAD_SECTION_NVM_CFG,
- SPAD_SECTION_PUBLIC,
- SPAD_SECTION_PRIVATE,
- SPAD_SECTION_MAX
-};
-
-struct spad_layout {
- struct nvm_cfg nvm_cfg;
- struct mcp_public_data public_data;
+ struct nvm_cfg1_glob glob;
+ struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX];
+ struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX];
+ struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX];
};
-
-#define CRC_MAGIC_VALUE 0xDEBB20E3
-#define CRC32_POLYNOMIAL 0xEDB88320
-#define NVM_CRC_SIZE (sizeof(u32))
-
-enum nvm_sw_arbitrator {
- NVM_SW_ARB_HOST,
- NVM_SW_ARB_MCP,
- NVM_SW_ARB_UART,
- NVM_SW_ARB_RESERVED
-};
-
-/****************************************************************************
-* Boot Strap Region *
-****************************************************************************/
-struct legacy_bootstrap_region {
- u32 magic_value;
-#define NVM_MAGIC_VALUE 0x669955aa
- u32 sram_start_addr;
- u32 code_len; /* boot code length (in dwords) */
- u32 code_start_addr;
- u32 crc; /* 32-bit CRC */
-};
-
-/****************************************************************************
-* Directories Region *
-****************************************************************************/
-struct nvm_code_entry {
- u32 image_type; /* Image type */
- u32 nvm_start_addr; /* NVM address of the image */
- u32 len; /* Include CRC */
- u32 sram_start_addr;
- u32 sram_run_addr; /* Relevant in case of MIM only */
-};
-
-enum nvm_image_type {
- NVM_TYPE_TIM1 = 0x01,
- NVM_TYPE_TIM2 = 0x02,
- NVM_TYPE_MIM1 = 0x03,
- NVM_TYPE_MIM2 = 0x04,
- NVM_TYPE_MBA = 0x05,
- NVM_TYPE_MODULES_PN = 0x06,
- NVM_TYPE_VPD = 0x07,
- NVM_TYPE_MFW_TRACE1 = 0x08,
- NVM_TYPE_MFW_TRACE2 = 0x09,
- NVM_TYPE_NVM_CFG1 = 0x0a,
- NVM_TYPE_L2B = 0x0b,
- NVM_TYPE_DIR1 = 0x0c,
- NVM_TYPE_EAGLE_FW1 = 0x0d,
- NVM_TYPE_FALCON_FW1 = 0x0e,
- NVM_TYPE_PCIE_FW1 = 0x0f,
- NVM_TYPE_HW_SET = 0x10,
- NVM_TYPE_LIM = 0x11,
- NVM_TYPE_AVS_FW1 = 0x12,
- NVM_TYPE_DIR2 = 0x13,
- NVM_TYPE_CCM = 0x14,
- NVM_TYPE_EAGLE_FW2 = 0x15,
- NVM_TYPE_FALCON_FW2 = 0x16,
- NVM_TYPE_PCIE_FW2 = 0x17,
- NVM_TYPE_AVS_FW2 = 0x18,
-
- NVM_TYPE_MAX,
-};
-
-#define MAX_NVM_DIR_ENTRIES 200
-
-struct nvm_dir {
- s32 seq;
-#define NVM_DIR_NEXT_MFW_MASK 0x00000001
-#define NVM_DIR_SEQ_MASK 0xfffffffe
-#define NVM_DIR_NEXT_MFW(seq) ((seq) & NVM_DIR_NEXT_MFW_MASK)
-
-#define IS_DIR_SEQ_VALID(seq) ((seq & NVM_DIR_SEQ_MASK) != NVM_DIR_SEQ_MASK)
-
- u32 num_images;
- u32 rsrv;
- struct nvm_code_entry code[1]; /* Up to MAX_NVM_DIR_ENTRIES */
-};
-
-#define NVM_DIR_SIZE(_num_images) (sizeof(struct nvm_dir) + \
- (_num_images - \
- 1) * sizeof(struct nvm_code_entry) + \
- NVM_CRC_SIZE)
-
-struct nvm_vpd_image {
- u32 format_revision;
-#define VPD_IMAGE_VERSION 1
-
- /* This array length depends on the number of VPD fields */
- u8 vpd_data[1];
-};
-
-/****************************************************************************
-* NVRAM FULL MAP *
-****************************************************************************/
-#define DIR_ID_1 (0)
-#define DIR_ID_2 (1)
-#define MAX_DIR_IDS (2)
-
-#define MFW_BUNDLE_1 (0)
-#define MFW_BUNDLE_2 (1)
-#define MAX_MFW_BUNDLES (2)
-
-#define FLASH_PAGE_SIZE 0x1000
-#define NVM_DIR_MAX_SIZE (FLASH_PAGE_SIZE) /* 4Kb */
-#define ASIC_MIM_MAX_SIZE (300 * FLASH_PAGE_SIZE) /* 1.2Mb */
-#define FPGA_MIM_MAX_SIZE (25 * FLASH_PAGE_SIZE) /* 60Kb */
-
-#define LIM_MAX_SIZE ((2 * \
- FLASH_PAGE_SIZE) - \
- sizeof(struct legacy_bootstrap_region) - \
- NVM_RSV_SIZE)
-#define LIM_OFFSET (NVM_OFFSET(lim_image))
-#define NVM_RSV_SIZE (44)
-#define MIM_MAX_SIZE(is_asic) ((is_asic) ? ASIC_MIM_MAX_SIZE : \
- FPGA_MIM_MAX_SIZE)
-#define MIM_OFFSET(idx, is_asic) (NVM_OFFSET(dir[MAX_MFW_BUNDLES]) + \
- ((idx == \
- NVM_TYPE_MIM2) ? MIM_MAX_SIZE(is_asic) : 0))
-#define NVM_FIXED_AREA_SIZE(is_asic) (sizeof(struct nvm_image) + \
- MIM_MAX_SIZE(is_asic) * 2)
-
-union nvm_dir_union {
- struct nvm_dir dir;
- u8 page[FLASH_PAGE_SIZE];
-};
-
-/* Address
- * +-------------------+ 0x000000
- * | Bootstrap: |
- * | magic_number |
- * | sram_start_addr |
- * | code_len |
- * | code_start_addr |
- * | crc |
- * +-------------------+ 0x000014
- * | rsrv |
- * +-------------------+ 0x000040
- * | LIM |
- * +-------------------+ 0x002000
- * | Dir1 |
- * +-------------------+ 0x003000
- * | Dir2 |
- * +-------------------+ 0x004000
- * | MIM1 |
- * +-------------------+ 0x130000
- * | MIM2 |
- * +-------------------+ 0x25C000
- * | Rest Images: |
- * | TIM1/2 |
- * | MFW_TRACE1/2 |
- * | Eagle/Falcon FW |
- * | PCIE/AVS FW |
- * | MBA/CCM/L2B |
- * | VPD |
- * | optic_modules |
- * | ... |
- * +-------------------+ 0x400000
- */
-struct nvm_image {
-/*********** !!! FIXED SECTIONS !!! DO NOT MODIFY !!! **********************/
- /* NVM Offset (size) */
- struct legacy_bootstrap_region bootstrap;
- u8 rsrv[NVM_RSV_SIZE];
- u8 lim_image[LIM_MAX_SIZE];
- union nvm_dir_union dir[MAX_MFW_BUNDLES];
-
- /* MIM1_IMAGE 0x004000 (0x12c000) */
- /* MIM2_IMAGE 0x130000 (0x12c000) */
-/*********** !!! FIXED SECTIONS !!! DO NOT MODIFY !!! **********************/
-}; /* 0x134 */
-
-#define NVM_OFFSET(f) ((u32_t)((int_ptr_t)(&(((struct nvm_image *)0)->f))))
-
-struct hw_set_info {
- u32 reg_type;
-#define GRC_REG_TYPE 1
-#define PHY_REG_TYPE 2
-#define PCI_REG_TYPE 4
-
- u32 bank_num;
- u32 pf_num;
- u32 operation;
-#define READ_OP 1
-#define WRITE_OP 2
-#define RMW_SET_OP 3
-#define RMW_CLR_OP 4
-
- u32 reg_addr;
- u32 reg_data;
-
- u32 reset_type;
-#define POR_RESET_TYPE BIT(0)
-#define HARD_RESET_TYPE BIT(1)
-#define CORE_RESET_TYPE BIT(2)
-#define MCP_RESET_TYPE BIT(3)
-#define PERSET_ASSERT BIT(4)
-#define PERSET_DEASSERT BIT(5)
-};
-
-struct hw_set_image {
- u32 format_version;
-#define HW_SET_IMAGE_VERSION 1
- u32 no_hw_sets;
-
- /* This array length depends on the no_hw_sets */
- struct hw_set_info hw_sets[1];
-};
-
-int qed_init_pf_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
- u8 pf_id, u16 pf_wfq);
-int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
- u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
index 0ada7fdb9..e17885321 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -446,7 +446,7 @@ qed_dmae_post_command(struct qed_hwfn *p_hwfn,
idx_cmd,
le32_to_cpu(command->opcode),
le16_to_cpu(command->opcode_b),
- le16_to_cpu(command->length),
+ le16_to_cpu(command->length_dw),
le32_to_cpu(command->src_addr_hi),
le32_to_cpu(command->src_addr_lo),
le32_to_cpu(command->dst_addr_hi),
@@ -461,7 +461,7 @@ qed_dmae_post_command(struct qed_hwfn *p_hwfn,
idx_cmd,
le32_to_cpu(command->opcode),
le16_to_cpu(command->opcode_b),
- le16_to_cpu(command->length),
+ le16_to_cpu(command->length_dw),
le32_to_cpu(command->src_addr_hi),
le32_to_cpu(command->src_addr_lo),
le32_to_cpu(command->dst_addr_hi),
@@ -645,7 +645,7 @@ static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
- cmd->length = cpu_to_le16((u16)length);
+ cmd->length_dw = cpu_to_le16((u16)length);
qed_dmae_post_command(p_hwfn, p_ptt);
@@ -769,6 +769,29 @@ int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
}
int
+qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 grc_addr,
+ dma_addr_t dest_addr, u32 size_in_dwords, u32 flags)
+{
+ u32 grc_addr_in_dw = grc_addr / sizeof(u32);
+ struct qed_dmae_params params;
+ int rc;
+
+ memset(&params, 0, sizeof(struct qed_dmae_params));
+ params.flags = flags;
+
+ mutex_lock(&p_hwfn->dmae_info.mutex);
+
+ rc = qed_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw,
+ dest_addr, QED_DMAE_ADDRESS_GRC,
+ QED_DMAE_ADDRESS_HOST_VIRT,
+ size_in_dwords, &params);
+
+ mutex_unlock(&p_hwfn->dmae_info.mutex);
+
+ return rc;
+}
+
+int
qed_dmae_host2host(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
dma_addr_t source_addr,
@@ -791,16 +814,16 @@ qed_dmae_host2host(struct qed_hwfn *p_hwfn,
}
u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
- enum protocol_type proto,
- union qed_qm_pq_params *p_params)
+ enum protocol_type proto, union qed_qm_pq_params *p_params)
{
u16 pq_id = 0;
- if ((proto == PROTOCOLID_CORE || proto == PROTOCOLID_ETH) &&
- !p_params) {
+ if ((proto == PROTOCOLID_CORE ||
+ proto == PROTOCOLID_ETH ||
+ proto == PROTOCOLID_ISCSI ||
+ proto == PROTOCOLID_ROCE) && !p_params) {
DP_NOTICE(p_hwfn,
- "Protocol %d received NULL PQ params\n",
- proto);
+ "Protocol %d received NULL PQ params\n", proto);
return 0;
}
@@ -808,6 +831,8 @@ u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
case PROTOCOLID_CORE:
if (p_params->core.tc == LB_TC)
pq_id = p_hwfn->qm_info.pure_lb_pq;
+ else if (p_params->core.tc == OOO_LB_TC)
+ pq_id = p_hwfn->qm_info.ooo_pq;
else
pq_id = p_hwfn->qm_info.offload_pq;
break;
@@ -817,6 +842,18 @@ u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
pq_id += p_hwfn->qm_info.vf_queues_offset +
p_params->eth.vf_id;
break;
+ case PROTOCOLID_ISCSI:
+ if (p_params->iscsi.q_idx == 1)
+ pq_id = p_hwfn->qm_info.pure_ack_pq;
+ break;
+ case PROTOCOLID_ROCE:
+ if (p_params->roce.dcqcn)
+ pq_id = p_params->roce.qpid;
+ else
+ pq_id = p_hwfn->qm_info.offload_pq;
+ if (pq_id > p_hwfn->qm_info.num_pf_rls)
+ pq_id = p_hwfn->qm_info.offload_pq;
+ break;
default:
pq_id = 0;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h
index 4367363ad..d01557092 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h
@@ -254,6 +254,10 @@ void qed_dmae_info_free(struct qed_hwfn *p_hwfn);
union qed_qm_pq_params {
struct {
+ u8 q_idx;
+ } iscsi;
+
+ struct {
u8 tc;
} core;
@@ -262,11 +266,15 @@ union qed_qm_pq_params {
u8 vf_id;
u8 tc;
} eth;
+
+ struct {
+ u8 dcqcn;
+ u8 qpid; /* roce relative */
+ } roce;
};
u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
- enum protocol_type proto,
- union qed_qm_pq_params *params);
+ enum protocol_type proto, union qed_qm_pq_params *params);
int qed_init_fw_data(struct qed_dev *cdev,
const u8 *fw_data);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index e8a3b9da5..23e455f22 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -31,7 +31,6 @@ enum cminterface {
};
/* general constants */
-#define QM_PQ_ELEMENT_SIZE 4 /* in bytes */
#define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
QM_PQ_ELEMENT_SIZE, \
0x1000) : 0)
@@ -44,28 +43,28 @@ enum cminterface {
/* other PQ constants */
#define QM_OTHER_PQS_PER_PF 4
/* WFQ constants */
-#define QM_WFQ_UPPER_BOUND 6250000
+#define QM_WFQ_UPPER_BOUND 62500000
#define QM_WFQ_VP_PQ_VOQ_SHIFT 0
#define QM_WFQ_VP_PQ_PF_SHIFT 5
#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
-#define QM_WFQ_MAX_INC_VAL 4375000
-#define QM_WFQ_INIT_CRD(inc_val) (2 * (inc_val))
+#define QM_WFQ_MAX_INC_VAL 43750000
+
/* RL constants */
-#define QM_RL_UPPER_BOUND 6250000
+#define QM_RL_UPPER_BOUND 62500000
#define QM_RL_PERIOD 5 /* in us */
#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
+#define QM_RL_MAX_INC_VAL 43750000
#define QM_RL_INC_VAL(rate) max_t(u32, \
- (((rate ? rate : 1000000) \
- * QM_RL_PERIOD) / 8), 1)
-#define QM_RL_MAX_INC_VAL 4375000
+ (u32)(((rate ? rate : \
+ 1000000) * \
+ QM_RL_PERIOD * \
+ 101) / (8 * 100)), 1)
/* AFullOprtnstcCrdMask constants */
#define QM_OPPOR_LINE_VOQ_DEF 1
#define QM_OPPOR_FW_STOP_DEF 0
#define QM_OPPOR_PQ_EMPTY_DEF 1
-#define EAGLE_WORKAROUND_TC 7
/* Command Queue constants */
#define PBF_CMDQ_PURE_LB_LINES 150
-#define PBF_CMDQ_EAGLE_WORKAROUND_LINES 8
#define PBF_CMDQ_LINES_RT_OFFSET(voq) ( \
PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \
(PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
@@ -80,7 +79,6 @@ enum cminterface {
/* BTB: blocks constants (block size = 256B) */
#define BTB_JUMBO_PKT_BLOCKS 38
#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
-#define BTB_EAGLE_WORKAROUND_BLOCKS 4
#define BTB_PURE_LB_FACTOR 10
#define BTB_PURE_LB_RATIO 7
/* QM stop command constants */
@@ -107,9 +105,9 @@ enum cminterface {
cmd ## _ ## field, \
value)
/* QM: VOQ macros */
-#define PHYS_VOQ(port, tc, max_phy_tcs_pr_port) ((port) * \
- (max_phy_tcs_pr_port) \
- + (tc))
+#define PHYS_VOQ(port, tc, max_phys_tcs_per_port) ((port) * \
+ (max_phys_tcs_per_port) + \
+ (tc))
#define LB_VOQ(port) ( \
MAX_PHYS_VOQS + (port))
#define VOQ(port, tc, max_phy_tcs_pr_port) \
@@ -120,8 +118,7 @@ enum cminterface {
: LB_VOQ(port))
/******************** INTERNAL IMPLEMENTATION *********************/
/* Prepare PF RL enable/disable runtime init values */
-static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn,
- bool pf_rl_en)
+static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
{
STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
if (pf_rl_en) {
@@ -130,8 +127,7 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn,
(1 << MAX_NUM_VOQS) - 1);
/* write RL period */
STORE_RT_REG(p_hwfn,
- QM_REG_RLPFPERIOD_RT_OFFSET,
- QM_RL_PERIOD_CLK_25M);
+ QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
STORE_RT_REG(p_hwfn,
QM_REG_RLPFPERIODTIMER_RT_OFFSET,
QM_RL_PERIOD_CLK_25M);
@@ -144,8 +140,7 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn,
}
/* Prepare PF WFQ enable/disable runtime init values */
-static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn,
- bool pf_wfq_en)
+static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, bool pf_wfq_en)
{
STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
/* set credit threshold for QM bypass flow */
@@ -156,8 +151,7 @@ static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn,
}
/* Prepare VPORT RL enable/disable runtime init values */
-static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn,
- bool vport_rl_en)
+static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn, bool vport_rl_en)
{
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
vport_rl_en ? 1 : 0);
@@ -178,8 +172,7 @@ static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn,
}
/* Prepare VPORT WFQ enable/disable runtime init values */
-static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn,
- bool vport_wfq_en)
+static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en)
{
STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
vport_wfq_en ? 1 : 0);
@@ -194,8 +187,7 @@ static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn,
* the specified VOQ
*/
static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
- u8 voq,
- u16 cmdq_lines)
+ u8 voq, u16 cmdq_lines)
{
u32 qm_line_crd;
@@ -221,7 +213,7 @@ static void qed_cmdq_lines_rt_init(
u8 max_phys_tcs_per_port,
struct init_qm_port_params port_params[MAX_NUM_PORTS])
{
- u8 tc, voq, port_id;
+ u8 tc, voq, port_id, num_tcs_in_port;
/* clear PBF lines for all VOQs */
for (voq = 0; voq < MAX_NUM_VOQS; voq++)
@@ -229,22 +221,31 @@ static void qed_cmdq_lines_rt_init(
for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
if (port_params[port_id].active) {
u16 phys_lines, phys_lines_per_tc;
- u8 phys_tcs = port_params[port_id].num_active_phys_tcs;
- /* find #lines to divide between the active
- * physical TCs.
- */
+ /* find #lines to divide between active phys TCs */
phys_lines = port_params[port_id].num_pbf_cmd_lines -
PBF_CMDQ_PURE_LB_LINES;
/* find #lines per active physical TC */
- phys_lines_per_tc = phys_lines / phys_tcs;
+ num_tcs_in_port = 0;
+ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
+ if (((port_params[port_id].active_phys_tcs >>
+ tc) & 0x1) == 1)
+ num_tcs_in_port++;
+ }
+
+ phys_lines_per_tc = phys_lines / num_tcs_in_port;
/* init registers per active TC */
- for (tc = 0; tc < phys_tcs; tc++) {
+ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
+ if (((port_params[port_id].active_phys_tcs >>
+ tc) & 0x1) != 1)
+ continue;
+
voq = PHYS_VOQ(port_id, tc,
max_phys_tcs_per_port);
qed_cmdq_lines_voq_rt_init(p_hwfn, voq,
phys_lines_per_tc);
}
+
/* init registers for pure LB TC */
qed_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
PBF_CMDQ_PURE_LB_LINES);
@@ -259,34 +260,42 @@ static void qed_btb_blocks_rt_init(
struct init_qm_port_params port_params[MAX_NUM_PORTS])
{
u32 usable_blocks, pure_lb_blocks, phys_blocks;
- u8 tc, voq, port_id;
+ u8 tc, voq, port_id, num_tcs_in_port;
for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
u32 temp;
- u8 phys_tcs;
if (!port_params[port_id].active)
continue;
- phys_tcs = port_params[port_id].num_active_phys_tcs;
-
/* subtract headroom blocks */
usable_blocks = port_params[port_id].num_btb_blocks -
BTB_HEADROOM_BLOCKS;
- /* find blocks per physical TC. use factor to avoid
- * floating arithmethic.
- */
+ /* find blocks per physical TC */
+ num_tcs_in_port = 0;
+ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
+ if (((port_params[port_id].active_phys_tcs >>
+ tc) & 0x1) == 1)
+ num_tcs_in_port++;
+ }
+
pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
- (phys_tcs * BTB_PURE_LB_FACTOR +
+ (num_tcs_in_port * BTB_PURE_LB_FACTOR +
BTB_PURE_LB_RATIO);
pure_lb_blocks = max_t(u32, BTB_JUMBO_PKT_BLOCKS,
pure_lb_blocks / BTB_PURE_LB_FACTOR);
- phys_blocks = (usable_blocks - pure_lb_blocks) / phys_tcs;
+ phys_blocks = (usable_blocks - pure_lb_blocks) /
+ num_tcs_in_port;
/* init physical TCs */
- for (tc = 0; tc < phys_tcs; tc++) {
- voq = PHYS_VOQ(port_id, tc, max_phys_tcs_per_port);
+ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
+ if (((port_params[port_id].active_phys_tcs >>
+ tc) & 0x1) != 1)
+ continue;
+
+ voq = PHYS_VOQ(port_id, tc,
+ max_phys_tcs_per_port);
STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(voq),
phys_blocks);
}
@@ -360,10 +369,11 @@ static void qed_tx_pq_map_rt_init(
memset(&tx_pq_map, 0, sizeof(tx_pq_map));
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
- is_vf_pq ? 1 : 0);
+ p_params->pq_params[i].rl_valid ? 1 : 0);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
- is_vf_pq ? p_params->pq_params[i].vport_id : 0);
+ p_params->pq_params[i].rl_valid ?
+ p_params->pq_params[i].vport_id : 0);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
p_params->pq_params[i].wrr_group);
@@ -390,25 +400,11 @@ static void qed_tx_pq_map_rt_init(
/* store Tx PQ VF mask to size select register */
for (i = 0; i < num_tx_pq_vf_masks; i++) {
if (tx_pq_vf_mask[i]) {
- if (is_bb_a0) {
- u32 curr_mask = 0, addr;
-
- addr = QM_REG_MAXPQSIZETXSEL_0 + (i * 4);
- if (!p_params->is_first_pf)
- curr_mask = qed_rd(p_hwfn, p_ptt,
- addr);
-
- addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i;
-
- STORE_RT_REG(p_hwfn, addr,
- curr_mask | tx_pq_vf_mask[i]);
- } else {
- u32 addr;
+ u32 addr;
- addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i;
- STORE_RT_REG(p_hwfn, addr,
- tx_pq_vf_mask[i]);
- }
+ addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i;
+ STORE_RT_REG(p_hwfn, addr,
+ tx_pq_vf_mask[i]);
}
}
}
@@ -418,8 +414,7 @@ static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
u8 port_id,
u8 pf_id,
u32 num_pf_cids,
- u32 num_tids,
- u32 base_mem_addr_4kb)
+ u32 num_tids, u32 base_mem_addr_4kb)
{
u16 i, pq_id;
@@ -465,15 +460,10 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
(p_params->pf_id % MAX_NUM_PFS_BB);
inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
- if (inc_val > QM_WFQ_MAX_INC_VAL) {
+ if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration");
return -1;
}
- STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
- inc_val);
- STORE_RT_REG(p_hwfn,
- QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
- QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
for (i = 0; i < num_tx_pqs; i++) {
u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
@@ -481,19 +471,21 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
OVERWRITE_RT_REG(p_hwfn,
crd_reg_offset + voq * MAX_NUM_PFS_BB,
- QM_WFQ_INIT_CRD(inc_val) |
QM_WFQ_CRD_REG_SIGN_BIT);
}
+ STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
+ inc_val);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
+ QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
return 0;
}
/* Prepare PF RL runtime init values for the specified PF.
* Return -1 on error.
*/
-static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn,
- u8 pf_id,
- u32 pf_rl)
+static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
{
u32 inc_val = QM_RL_INC_VAL(pf_rl);
@@ -607,9 +599,7 @@ static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn,
static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u32 cmd_addr,
- u32 cmd_data_lsb,
- u32 cmd_data_msb)
+ u32 cmd_addr, u32 cmd_data_lsb, u32 cmd_data_msb)
{
if (!qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
return false;
@@ -627,9 +617,7 @@ static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn,
u32 qed_qm_pf_mem_size(u8 pf_id,
u32 num_pf_cids,
u32 num_vf_cids,
- u32 num_tids,
- u16 num_pf_pqs,
- u16 num_vf_pqs)
+ u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs)
{
return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
@@ -713,8 +701,7 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
}
int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u8 pf_id, u16 pf_wfq)
+ struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
{
u32 inc_val = QM_WFQ_INC_VAL(pf_wfq);
@@ -728,9 +715,7 @@ int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
}
int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u8 pf_id,
- u32 pf_rl)
+ struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl)
{
u32 inc_val = QM_RL_INC_VAL(pf_rl);
@@ -749,8 +734,7 @@ int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u16 first_tx_pq_id[NUM_OF_TCS],
- u16 vport_wfq)
+ u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
{
u32 inc_val = QM_WFQ_INC_VAL(vport_wfq);
u8 tc;
@@ -773,9 +757,7 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
}
int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u8 vport_id,
- u32 vport_rl)
+ struct qed_ptt *p_ptt, u8 vport_id, u32 vport_rl)
{
u32 inc_val = QM_RL_INC_VAL(vport_rl);
@@ -795,9 +777,7 @@ int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
bool is_release_cmd,
- bool is_tx_pq,
- u16 start_pq,
- u16 num_pqs)
+ bool is_tx_pq, u16 start_pq, u16 num_pqs)
{
u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id;
@@ -841,17 +821,15 @@ qed_set_tunnel_type_enable_bit(unsigned long *var, int bit, bool enable)
#define PRS_ETH_TUNN_FIC_FORMAT -188897008
void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u16 dest_port)
+ struct qed_ptt *p_ptt, u16 dest_port)
{
qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
- qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_PORT, dest_port);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
}
void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- bool vxlan_enable)
+ struct qed_ptt *p_ptt, bool vxlan_enable)
{
unsigned long reg_val = 0;
u8 shift;
@@ -908,8 +886,7 @@ void qed_set_gre_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
}
void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u16 dest_port)
+ struct qed_ptt *p_ptt, u16 dest_port)
{
qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
@@ -918,8 +895,7 @@ void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- bool eth_geneve_enable,
- bool ip_geneve_enable)
+ bool eth_geneve_enable, bool ip_geneve_enable)
{
unsigned long reg_val = 0;
u8 shift;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index d358c3bb1..9866a20d2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -543,8 +543,7 @@ void qed_gtt_init(struct qed_hwfn *p_hwfn)
pxp_global_win[i]);
}
-int qed_init_fw_data(struct qed_dev *cdev,
- const u8 *data)
+int qed_init_fw_data(struct qed_dev *cdev, const u8 *data)
{
struct qed_fw_data *fw = cdev->fw_data;
struct bin_buffer_hdr *buf_hdr;
@@ -555,7 +554,11 @@ int qed_init_fw_data(struct qed_dev *cdev,
return -EINVAL;
}
- buf_hdr = (struct bin_buffer_hdr *)data;
+ /* First Dword contains metadata and should be skipped */
+ buf_hdr = (struct bin_buffer_hdr *)(data + sizeof(u32));
+
+ offset = buf_hdr[BIN_BUF_FW_VER_INFO].offset;
+ fw->fw_ver_info = (struct fw_ver_info *)(data + offset);
offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
fw->init_ops = (union init_op *)(data + offset);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 09a6ad3d2..8fa50fa23 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -2418,6 +2418,7 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
{
struct qed_dev *cdev = p_hwfn->cdev;
u32 cau_state;
+ u8 timer_res;
memset(p_sb_entry, 0, sizeof(*p_sb_entry));
@@ -2443,6 +2444,23 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
cdev->tx_coalesce_usecs = QED_CAU_DEF_TX_USECS;
}
+ /* Coalesce = (timeset << timer-res), timeset is 7bit wide */
+ if (cdev->rx_coalesce_usecs <= 0x7F)
+ timer_res = 0;
+ else if (cdev->rx_coalesce_usecs <= 0xFF)
+ timer_res = 1;
+ else
+ timer_res = 2;
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
+
+ if (cdev->tx_coalesce_usecs <= 0x7F)
+ timer_res = 0;
+ else if (cdev->tx_coalesce_usecs <= 0xFF)
+ timer_res = 1;
+ else
+ timer_res = 2;
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
+
SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
}
@@ -2484,17 +2502,28 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
/* Configure pi coalescing if set */
if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
- u8 timeset = p_hwfn->cdev->rx_coalesce_usecs >>
- (QED_CAU_DEF_RX_TIMER_RES + 1);
+ u8 timeset, timer_res;
u8 num_tc = 1, i;
+ /* timeset = (coalesce >> timer-res), timeset is 7bit wide */
+ if (p_hwfn->cdev->rx_coalesce_usecs <= 0x7F)
+ timer_res = 0;
+ else if (p_hwfn->cdev->rx_coalesce_usecs <= 0xFF)
+ timer_res = 1;
+ else
+ timer_res = 2;
+ timeset = (u8)(p_hwfn->cdev->rx_coalesce_usecs >> timer_res);
qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
QED_COAL_RX_STATE_MACHINE,
timeset);
- timeset = p_hwfn->cdev->tx_coalesce_usecs >>
- (QED_CAU_DEF_TX_TIMER_RES + 1);
-
+ if (p_hwfn->cdev->tx_coalesce_usecs <= 0x7F)
+ timer_res = 0;
+ else if (p_hwfn->cdev->tx_coalesce_usecs <= 0xFF)
+ timer_res = 1;
+ else
+ timer_res = 2;
+ timeset = (u8)(p_hwfn->cdev->tx_coalesce_usecs >> timer_res);
for (i = 0; i < num_tc; i++) {
qed_int_cau_conf_pi(p_hwfn, p_ptt,
igu_sb_id, TX_PI(i),
@@ -3199,3 +3228,39 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev)
for_each_hwfn(cdev, i)
cdev->hwfns[i].b_int_requested = false;
}
+
+int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u8 timer_res, u16 sb_id, bool tx)
+{
+ struct cau_sb_entry sb_entry;
+ int rc;
+
+ if (!p_hwfn->hw_init_done) {
+ DP_ERR(p_hwfn, "hardware not initialized yet\n");
+ return -EINVAL;
+ }
+
+ rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
+ sb_id * sizeof(u64),
+ (u64)(uintptr_t)&sb_entry, 2, 0);
+ if (rc) {
+ DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
+ return rc;
+ }
+
+ if (tx)
+ SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
+ else
+ SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
+
+ rc = qed_dmae_host2grc(p_hwfn, p_ptt,
+ (u64)(uintptr_t)&sb_entry,
+ CAU_REG_SB_VAR_MEMORY +
+ sb_id * sizeof(u64), 2, 0);
+ if (rc) {
+ DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index 20b468637..0948be64d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -389,6 +389,9 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
u16 vf_number,
u8 vf_valid);
+int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u8 timer_res, u16 sb_id, bool tx);
+
#define QED_MAPPING_MEMORY_SIZE(dev) (NUM_OF_SBS(dev))
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index aada4c7e0..401e73854 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -572,9 +572,12 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
- rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ p_ramrod->vf_rx_prod_index = params->vf_qid;
+ if (params->vf_qid)
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Queue is meant for VF rxq[%04x]\n", params->vf_qid);
- return rc;
+ return qed_spq_post(p_hwfn, p_ent, NULL);
}
static int
@@ -587,7 +590,7 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
u16 cqe_pbl_size, void __iomem **pp_prod)
{
struct qed_hw_cid_data *p_rx_cid;
- u64 init_prod_val = 0;
+ u32 init_prod_val = 0;
u16 abs_l2_queue = 0;
u8 abs_stats_id = 0;
int rc;
@@ -612,10 +615,10 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
*pp_prod = (u8 __iomem *)p_hwfn->regview +
GTT_BAR0_MAP_REG_MSDM_RAM +
- MSTORM_PRODS_OFFSET(abs_l2_queue);
+ MSTORM_ETH_PF_PRODS_OFFSET(abs_l2_queue);
/* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
- __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64),
+ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
(u32 *)(&init_prod_val));
/* Allocate a CID for the queue */
@@ -756,9 +759,9 @@ int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
struct qed_hw_cid_data *p_tx_cid;
- u8 abs_vport_id;
+ u16 pq_id, abs_tx_q_id = 0;
int rc = -EINVAL;
- u16 pq_id;
+ u8 abs_vport_id;
/* Store information for the stop */
p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
@@ -769,6 +772,10 @@ int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
+ rc = qed_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_tx_q_id);
+ if (rc)
+ return rc;
+
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
init_data.cid = cid;
@@ -788,6 +795,7 @@ int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
p_ramrod->sb_index = p_params->sb_idx;
p_ramrod->stats_counter_id = stats_id;
+ p_ramrod->queue_zone_id = cpu_to_le16(abs_tx_q_id);
p_ramrod->pbl_size = cpu_to_le16(pbl_size);
DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
@@ -1482,51 +1490,51 @@ static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
offsetof(struct public_port, stats),
sizeof(port_stats));
- p_stats->rx_64_byte_packets += port_stats.pmm.r64;
- p_stats->rx_65_to_127_byte_packets += port_stats.pmm.r127;
- p_stats->rx_128_to_255_byte_packets += port_stats.pmm.r255;
- p_stats->rx_256_to_511_byte_packets += port_stats.pmm.r511;
- p_stats->rx_512_to_1023_byte_packets += port_stats.pmm.r1023;
- p_stats->rx_1024_to_1518_byte_packets += port_stats.pmm.r1518;
- p_stats->rx_1519_to_1522_byte_packets += port_stats.pmm.r1522;
- p_stats->rx_1519_to_2047_byte_packets += port_stats.pmm.r2047;
- p_stats->rx_2048_to_4095_byte_packets += port_stats.pmm.r4095;
- p_stats->rx_4096_to_9216_byte_packets += port_stats.pmm.r9216;
- p_stats->rx_9217_to_16383_byte_packets += port_stats.pmm.r16383;
- p_stats->rx_crc_errors += port_stats.pmm.rfcs;
- p_stats->rx_mac_crtl_frames += port_stats.pmm.rxcf;
- p_stats->rx_pause_frames += port_stats.pmm.rxpf;
- p_stats->rx_pfc_frames += port_stats.pmm.rxpp;
- p_stats->rx_align_errors += port_stats.pmm.raln;
- p_stats->rx_carrier_errors += port_stats.pmm.rfcr;
- p_stats->rx_oversize_packets += port_stats.pmm.rovr;
- p_stats->rx_jabbers += port_stats.pmm.rjbr;
- p_stats->rx_undersize_packets += port_stats.pmm.rund;
- p_stats->rx_fragments += port_stats.pmm.rfrg;
- p_stats->tx_64_byte_packets += port_stats.pmm.t64;
- p_stats->tx_65_to_127_byte_packets += port_stats.pmm.t127;
- p_stats->tx_128_to_255_byte_packets += port_stats.pmm.t255;
- p_stats->tx_256_to_511_byte_packets += port_stats.pmm.t511;
- p_stats->tx_512_to_1023_byte_packets += port_stats.pmm.t1023;
- p_stats->tx_1024_to_1518_byte_packets += port_stats.pmm.t1518;
- p_stats->tx_1519_to_2047_byte_packets += port_stats.pmm.t2047;
- p_stats->tx_2048_to_4095_byte_packets += port_stats.pmm.t4095;
- p_stats->tx_4096_to_9216_byte_packets += port_stats.pmm.t9216;
- p_stats->tx_9217_to_16383_byte_packets += port_stats.pmm.t16383;
- p_stats->tx_pause_frames += port_stats.pmm.txpf;
- p_stats->tx_pfc_frames += port_stats.pmm.txpp;
- p_stats->tx_lpi_entry_count += port_stats.pmm.tlpiec;
- p_stats->tx_total_collisions += port_stats.pmm.tncl;
- p_stats->rx_mac_bytes += port_stats.pmm.rbyte;
- p_stats->rx_mac_uc_packets += port_stats.pmm.rxuca;
- p_stats->rx_mac_mc_packets += port_stats.pmm.rxmca;
- p_stats->rx_mac_bc_packets += port_stats.pmm.rxbca;
- p_stats->rx_mac_frames_ok += port_stats.pmm.rxpok;
- p_stats->tx_mac_bytes += port_stats.pmm.tbyte;
- p_stats->tx_mac_uc_packets += port_stats.pmm.txuca;
- p_stats->tx_mac_mc_packets += port_stats.pmm.txmca;
- p_stats->tx_mac_bc_packets += port_stats.pmm.txbca;
- p_stats->tx_mac_ctrl_frames += port_stats.pmm.txcf;
+ p_stats->rx_64_byte_packets += port_stats.eth.r64;
+ p_stats->rx_65_to_127_byte_packets += port_stats.eth.r127;
+ p_stats->rx_128_to_255_byte_packets += port_stats.eth.r255;
+ p_stats->rx_256_to_511_byte_packets += port_stats.eth.r511;
+ p_stats->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
+ p_stats->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
+ p_stats->rx_1519_to_1522_byte_packets += port_stats.eth.r1522;
+ p_stats->rx_1519_to_2047_byte_packets += port_stats.eth.r2047;
+ p_stats->rx_2048_to_4095_byte_packets += port_stats.eth.r4095;
+ p_stats->rx_4096_to_9216_byte_packets += port_stats.eth.r9216;
+ p_stats->rx_9217_to_16383_byte_packets += port_stats.eth.r16383;
+ p_stats->rx_crc_errors += port_stats.eth.rfcs;
+ p_stats->rx_mac_crtl_frames += port_stats.eth.rxcf;
+ p_stats->rx_pause_frames += port_stats.eth.rxpf;
+ p_stats->rx_pfc_frames += port_stats.eth.rxpp;
+ p_stats->rx_align_errors += port_stats.eth.raln;
+ p_stats->rx_carrier_errors += port_stats.eth.rfcr;
+ p_stats->rx_oversize_packets += port_stats.eth.rovr;
+ p_stats->rx_jabbers += port_stats.eth.rjbr;
+ p_stats->rx_undersize_packets += port_stats.eth.rund;
+ p_stats->rx_fragments += port_stats.eth.rfrg;
+ p_stats->tx_64_byte_packets += port_stats.eth.t64;
+ p_stats->tx_65_to_127_byte_packets += port_stats.eth.t127;
+ p_stats->tx_128_to_255_byte_packets += port_stats.eth.t255;
+ p_stats->tx_256_to_511_byte_packets += port_stats.eth.t511;
+ p_stats->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
+ p_stats->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
+ p_stats->tx_1519_to_2047_byte_packets += port_stats.eth.t2047;
+ p_stats->tx_2048_to_4095_byte_packets += port_stats.eth.t4095;
+ p_stats->tx_4096_to_9216_byte_packets += port_stats.eth.t9216;
+ p_stats->tx_9217_to_16383_byte_packets += port_stats.eth.t16383;
+ p_stats->tx_pause_frames += port_stats.eth.txpf;
+ p_stats->tx_pfc_frames += port_stats.eth.txpp;
+ p_stats->tx_lpi_entry_count += port_stats.eth.tlpiec;
+ p_stats->tx_total_collisions += port_stats.eth.tncl;
+ p_stats->rx_mac_bytes += port_stats.eth.rbyte;
+ p_stats->rx_mac_uc_packets += port_stats.eth.rxuca;
+ p_stats->rx_mac_mc_packets += port_stats.eth.rxmca;
+ p_stats->rx_mac_bc_packets += port_stats.eth.rxbca;
+ p_stats->rx_mac_frames_ok += port_stats.eth.rxpok;
+ p_stats->tx_mac_bytes += port_stats.eth.tbyte;
+ p_stats->tx_mac_uc_packets += port_stats.eth.txuca;
+ p_stats->tx_mac_mc_packets += port_stats.eth.txmca;
+ p_stats->tx_mac_bc_packets += port_stats.eth.txbca;
+ p_stats->tx_mac_ctrl_frames += port_stats.eth.txcf;
for (j = 0; j < 8; j++) {
p_stats->brb_truncates += port_stats.brb.brb_truncate[j];
p_stats->brb_discards += port_stats.brb.brb_discard[j];
@@ -1656,6 +1664,8 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
info->num_tc = 1;
if (IS_PF(cdev)) {
+ int max_vf_vlan_filters = 0;
+
if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
for_each_hwfn(cdev, i)
info->num_queues +=
@@ -1668,7 +1678,12 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
info->num_queues = cdev->num_hwfns;
}
- info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN);
+ if (IS_QED_SRIOV(cdev))
+ max_vf_vlan_filters = cdev->p_iov_info->total_vfs *
+ QED_ETH_VF_NUM_VLAN_FILTERS;
+ info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN) -
+ max_vf_vlan_filters;
+
ether_addr_copy(info->port_mac,
cdev->hwfns[0].hw_info.hw_mac_addr);
} else {
@@ -2156,11 +2171,18 @@ static int qed_fp_cqe_completion(struct qed_dev *dev,
extern const struct qed_iov_hv_ops qed_iov_ops_pass;
#endif
+#ifdef CONFIG_DCB
+extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass;
+#endif
+
static const struct qed_eth_ops qed_eth_ops_pass = {
.common = &qed_common_ops_pass,
#ifdef CONFIG_QED_SRIOV
.iov = &qed_iov_ops_pass,
#endif
+#ifdef CONFIG_DCB
+ .dcb = &qed_dcbnl_ops_pass,
+#endif
.fill_dev_info = &qed_fill_eth_dev_info,
.register_ops = &qed_register_eth_ops,
.check_mac = &qed_check_mac,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index a41c6b559..d5a32a1e6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -207,6 +207,8 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->pci_mem_start = cdev->pci_params.mem_start;
dev_info->pci_mem_end = cdev->pci_params.mem_end;
dev_info->pci_irq = cdev->pci_params.irq;
+ dev_info->rdma_supported =
+ (cdev->hwfns[0].hw_info.personality == QED_PCI_ETH_ROCE);
dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
@@ -657,8 +659,13 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
struct qed_sb_cnt_info sb_cnt_info;
int rc;
int i;
- memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
+ if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
+ DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
+ return -EINVAL;
+ }
+
+ memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
cdev->int_params.in.int_mode = int_mode;
for_each_hwfn(cdev, i) {
memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
@@ -832,7 +839,8 @@ static int qed_slowpath_start(struct qed_dev *cdev,
goto err2;
}
- data = cdev->firmware->data;
+ /* First Dword used to diffrentiate between various sources */
+ data = cdev->firmware->data + sizeof(u32);
}
memset(&tunn_info, 0, sizeof(tunn_info));
@@ -900,7 +908,8 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
if (IS_PF(cdev)) {
qed_free_stream_mem(cdev);
- qed_sriov_disable(cdev, true);
+ if (IS_QED_ETH_IF(cdev))
+ qed_sriov_disable(cdev, true);
qed_nic_stop(cdev);
qed_slowpath_irq_free(cdev);
@@ -991,8 +1000,7 @@ static bool qed_can_link_change(struct qed_dev *cdev)
return true;
}
-static int qed_set_link(struct qed_dev *cdev,
- struct qed_link_params *params)
+static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
{
struct qed_hwfn *hwfn;
struct qed_mcp_link_params *link_params;
@@ -1032,7 +1040,7 @@ static int qed_set_link(struct qed_dev *cdev,
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
if (params->adv_speeds & 0)
link_params->speed.advertised_speeds |=
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G;
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G;
}
if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
link_params->speed.forced_speed = params->forced_speed;
@@ -1053,19 +1061,19 @@ static int qed_set_link(struct qed_dev *cdev,
if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) {
switch (params->loopback_mode) {
case QED_LINK_LOOPBACK_INT_PHY:
- link_params->loopback_mode = PMM_LOOPBACK_INT_PHY;
+ link_params->loopback_mode = ETH_LOOPBACK_INT_PHY;
break;
case QED_LINK_LOOPBACK_EXT_PHY:
- link_params->loopback_mode = PMM_LOOPBACK_EXT_PHY;
+ link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY;
break;
case QED_LINK_LOOPBACK_EXT:
- link_params->loopback_mode = PMM_LOOPBACK_EXT;
+ link_params->loopback_mode = ETH_LOOPBACK_EXT;
break;
case QED_LINK_LOOPBACK_MAC:
- link_params->loopback_mode = PMM_LOOPBACK_MAC;
+ link_params->loopback_mode = ETH_LOOPBACK_MAC;
break;
default:
- link_params->loopback_mode = PMM_LOOPBACK_NONE;
+ link_params->loopback_mode = ETH_LOOPBACK_NONE;
break;
}
}
@@ -1185,7 +1193,7 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
if_link->advertised_caps |= 0;
if (params.speed.advertised_speeds &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G)
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
if_link->advertised_caps |= 0;
if (link_caps.speed_capabilities &
@@ -1202,7 +1210,7 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
if_link->supported_caps |= 0;
if (link_caps.speed_capabilities &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G)
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
if_link->supported_caps |= 0;
if (link.link_up)
@@ -1301,6 +1309,38 @@ static int qed_drain(struct qed_dev *cdev)
return 0;
}
+static void qed_get_coalesce(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal)
+{
+ *rx_coal = cdev->rx_coalesce_usecs;
+ *tx_coal = cdev->tx_coalesce_usecs;
+}
+
+static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
+ u8 qid, u16 sb_id)
+{
+ struct qed_hwfn *hwfn;
+ struct qed_ptt *ptt;
+ int hwfn_index;
+ int status = 0;
+
+ hwfn_index = qid % cdev->num_hwfns;
+ hwfn = &cdev->hwfns[hwfn_index];
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EAGAIN;
+
+ status = qed_set_rxq_coalesce(hwfn, ptt, rx_coal,
+ qid / cdev->num_hwfns, sb_id);
+ if (status)
+ goto out;
+ status = qed_set_txq_coalesce(hwfn, ptt, tx_coal,
+ qid / cdev->num_hwfns, sb_id);
+out:
+ qed_ptt_release(hwfn, ptt);
+
+ return status;
+}
+
static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
@@ -1347,5 +1387,7 @@ const struct qed_common_ops qed_common_ops_pass = {
.update_msglvl = &qed_init_dp,
.chain_alloc = &qed_chain_alloc,
.chain_free = &qed_chain_free,
+ .get_coalesce = &qed_get_coalesce,
+ .set_coalesce = &qed_set_coalesce,
.set_led = &qed_set_led,
};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 118236179..f776a7779 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -531,9 +531,9 @@ static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
transceiver_data)));
transceiver_state = GET_FIELD(transceiver_state,
- PMM_TRANSCEIVER_STATE);
+ ETH_TRANSCEIVER_STATE);
- if (transceiver_state == PMM_TRANSCEIVER_STATE_PRESENT)
+ if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
DP_NOTICE(p_hwfn, "Transceiver is present.\n");
else
DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n");
@@ -668,14 +668,12 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
qed_link_update(p_hwfn);
}
-int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- bool b_up)
+int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
{
struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
struct qed_mcp_mb_params mb_params;
union drv_union_data union_data;
- struct pmm_phy_cfg *phy_cfg;
+ struct eth_phy_cfg *phy_cfg;
int rc = 0;
u32 cmd;
@@ -685,9 +683,9 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
if (!params->speed.autoneg)
phy_cfg->speed = params->speed.forced_speed;
- phy_cfg->pause |= (params->pause.autoneg) ? PMM_PAUSE_AUTONEG : 0;
- phy_cfg->pause |= (params->pause.forced_rx) ? PMM_PAUSE_RX : 0;
- phy_cfg->pause |= (params->pause.forced_tx) ? PMM_PAUSE_TX : 0;
+ phy_cfg->pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
+ phy_cfg->pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
+ phy_cfg->pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
phy_cfg->adv_speed = params->speed.advertised_speeds;
phy_cfg->loopback_mode = params->loopback_mode;
@@ -773,6 +771,34 @@ static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
return size;
}
+int qed_hw_init_first_eth(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 *p_pf)
+{
+ struct public_func shmem_info;
+ int i;
+
+ /* Find first Ethernet interface in port */
+ for (i = 0; i < NUM_OF_ENG_PFS(p_hwfn->cdev);
+ i += p_hwfn->cdev->num_ports_in_engines) {
+ qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+ MCP_PF_ID_BY_REL(p_hwfn, i));
+
+ if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
+ continue;
+
+ if ((shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK) ==
+ FUNC_MF_CFG_PROTOCOL_ETHERNET) {
+ *p_pf = (u8)i;
+ return 0;
+ }
+ }
+
+ DP_NOTICE(p_hwfn,
+ "Failed to find on port an ethernet interface in MF_SI mode\n");
+
+ return -EINVAL;
+}
+
static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
@@ -951,7 +977,18 @@ qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
case FUNC_MF_CFG_PROTOCOL_ETHERNET:
- *p_proto = QED_PCI_ETH;
+ if (test_bit(QED_DEV_CAP_ROCE,
+ &p_hwfn->hw_info.device_capabilities))
+ *p_proto = QED_PCI_ETH_ROCE;
+ else
+ *p_proto = QED_PCI_ETH;
+ break;
+ case FUNC_MF_CFG_PROTOCOL_ISCSI:
+ *p_proto = QED_PCI_ISCSI;
+ break;
+ case FUNC_MF_CFG_PROTOCOL_ROCE:
+ DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n");
+ rc = -EINVAL;
break;
default:
rc = -EINVAL;
@@ -1116,8 +1153,8 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
p_drv_version = &union_data.drv_version;
p_drv_version->version = p_ver->version;
- for (i = 0; i < MCP_DRV_VER_STR_SIZE - 1; i += 4) {
- val = cpu_to_be32(p_ver->name[i]);
+ for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) {
+ val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)]));
*(__be32 *)&p_drv_version->name[i * sizeof(u32)] = val;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 6dd59eb7f..7f319aa1b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -457,4 +457,7 @@ int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_mcp_link_state *p_link,
u8 min_bw);
+
+int qed_hw_init_first_eth(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 *p_pf);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index 3a6c506f0..f6b86ca1f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -27,6 +27,35 @@
#define CDU_REG_CID_ADDR_PARAMS_NCIB ( \
0xff << 24)
+#define CDU_REG_SEGMENT0_PARAMS \
+ 0x580904UL
+#define CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK \
+ (0xfff << 0)
+#define CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT \
+ 0
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE \
+ (0xff << 16)
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT \
+ 16
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE \
+ (0xff << 24)
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT \
+ 24
+#define CDU_REG_SEGMENT1_PARAMS \
+ 0x580908UL
+#define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK \
+ (0xfff << 0)
+#define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT \
+ 0
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE \
+ (0xff << 16)
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT \
+ 16
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE \
+ (0xff << 24)
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT \
+ 24
+
#define XSDM_REG_OPERATION_GEN \
0xf80408UL
#define NIG_REG_RX_BRB_OUT_EN \
@@ -51,6 +80,8 @@
0x1f00000UL
#define BAR0_MAP_REG_TSDM_RAM \
0x1c80000UL
+#define BAR0_MAP_REG_XSDM_RAM \
+ 0x1e00000UL
#define NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF \
0x5011f4UL
#define PRS_REG_SEARCH_TCP \
@@ -167,6 +198,10 @@
0x1800004UL
#define NIG_REG_CM_HDR \
0x500840UL
+#define NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR \
+ 0x50196cUL
+#define NIG_REG_LLH_CLS_TYPE_DUALMODE \
+ 0x501964UL
#define NCSI_REG_CONFIG \
0x040200UL
#define PBF_REG_INIT \
@@ -219,6 +254,10 @@
0x230000UL
#define PRS_REG_SOFT_RST \
0x1f0000UL
+#define PRS_REG_MSG_INFO \
+ 0x1f0a1cUL
+#define PRS_REG_ROCE_DEST_QP_MAX_PF \
+ 0x1f0430UL
#define PSDM_REG_ENABLE_IN1 \
0xfa0004UL
#define PSEM_REG_ENABLE_IN \
@@ -227,6 +266,8 @@
0x280020UL
#define PSWRQ2_REG_CDUT_P_SIZE \
0x24000cUL
+#define PSWRQ2_REG_ILT_MEMORY \
+ 0x260000UL
#define PSWHST_REG_DISCARD_INTERNAL_WRITES \
0x2a0040UL
#define PSWHST2_REG_DBGSYN_ALMOST_FULL_THR \
@@ -460,7 +501,7 @@
#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE (0x1 << 2)
#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT 2
-#define NIG_REG_VXLAN_PORT 0x50105cUL
+#define NIG_REG_VXLAN_CTRL 0x50105cUL
#define PBF_REG_VXLAN_PORT 0xd80518UL
#define PBF_REG_NGE_PORT 0xd8051cUL
#define PRS_REG_NGE_PORT 0x1f086cUL
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index ea4e9ce53..a548504c3 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -63,6 +63,32 @@ union ramrod_data {
struct vport_update_ramrod_data vport_update;
struct vport_filter_update_ramrod_data vport_filter_update;
+ struct rdma_init_func_ramrod_data rdma_init_func;
+ struct rdma_close_func_ramrod_data rdma_close_func;
+ struct rdma_register_tid_ramrod_data rdma_register_tid;
+ struct rdma_deregister_tid_ramrod_data rdma_deregister_tid;
+ struct roce_create_qp_resp_ramrod_data roce_create_qp_resp;
+ struct roce_create_qp_req_ramrod_data roce_create_qp_req;
+ struct roce_modify_qp_resp_ramrod_data roce_modify_qp_resp;
+ struct roce_modify_qp_req_ramrod_data roce_modify_qp_req;
+ struct roce_query_qp_resp_ramrod_data roce_query_qp_resp;
+ struct roce_query_qp_req_ramrod_data roce_query_qp_req;
+ struct roce_destroy_qp_resp_ramrod_data roce_destroy_qp_resp;
+ struct roce_destroy_qp_req_ramrod_data roce_destroy_qp_req;
+ struct rdma_create_cq_ramrod_data rdma_create_cq;
+ struct rdma_resize_cq_ramrod_data rdma_resize_cq;
+ struct rdma_destroy_cq_ramrod_data rdma_destroy_cq;
+ struct rdma_srq_create_ramrod_data rdma_create_srq;
+ struct rdma_srq_destroy_ramrod_data rdma_destroy_srq;
+ struct rdma_srq_modify_ramrod_data rdma_modify_srq;
+
+ struct iscsi_slow_path_hdr iscsi_empty;
+ struct iscsi_init_ramrod_params iscsi_init;
+ struct iscsi_spe_func_dstry iscsi_destroy;
+ struct iscsi_spe_conn_offload iscsi_conn_offload;
+ struct iscsi_conn_update_ramrod_params iscsi_conn_update;
+ struct iscsi_spe_conn_termination iscsi_conn_terminate;
+
struct vf_start_ramrod_data vf_start;
struct vf_stop_ramrod_data vf_stop;
};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index 67f6ce3c8..a52f3fc05 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -308,6 +308,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
int rc = -EINVAL;
+ u8 page_cnt;
/* update initial eq producer */
qed_eq_prod_update(p_hwfn,
@@ -332,7 +333,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
p_ramrod->path_id = QED_PATH_ID(p_hwfn);
p_ramrod->dont_log_ramrods = 0;
p_ramrod->log_type_mask = cpu_to_le16(0xf);
- p_ramrod->mf_mode = mode;
+
switch (mode) {
case QED_MF_DEFAULT:
case QED_MF_NPAR:
@@ -350,24 +351,41 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
/* Place EQ address in RAMROD */
DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
p_hwfn->p_eq->chain.pbl.p_phys_table);
- p_ramrod->event_ring_num_pages = (u8)p_hwfn->p_eq->chain.page_cnt;
-
+ page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain);
+ p_ramrod->event_ring_num_pages = page_cnt;
DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
p_hwfn->p_consq->chain.pbl.p_phys_table);
qed_tunn_set_pf_start_params(p_hwfn, p_tunn,
&p_ramrod->tunnel_config);
- p_hwfn->hw_info.personality = PERSONALITY_ETH;
if (IS_MF_SI(p_hwfn))
p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
+ switch (p_hwfn->hw_info.personality) {
+ case QED_PCI_ETH:
+ p_ramrod->personality = PERSONALITY_ETH;
+ break;
+ case QED_PCI_ISCSI:
+ p_ramrod->personality = PERSONALITY_ISCSI;
+ break;
+ case QED_PCI_ETH_ROCE:
+ p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unkown personality %d\n",
+ p_hwfn->hw_info.personality);
+ p_ramrod->personality = PERSONALITY_ETH;
+ }
+
if (p_hwfn->cdev->p_iov_info) {
struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
p_ramrod->base_vf_id = (u8) p_iov->first_vf_in_pf;
p_ramrod->num_vfs = (u8) p_iov->total_vfs;
}
+ p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
+ p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
"Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 03601dfc0..d73456eab 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -339,6 +339,7 @@ struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
if (qed_chain_alloc(p_hwfn->cdev,
QED_CHAIN_USE_TO_PRODUCE,
QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U16,
num_elem,
sizeof(union event_ring_element),
&p_eq->chain)) {
@@ -412,10 +413,10 @@ int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
***************************************************************************/
void qed_spq_setup(struct qed_hwfn *p_hwfn)
{
- struct qed_spq *p_spq = p_hwfn->p_spq;
- struct qed_spq_entry *p_virt = NULL;
- dma_addr_t p_phys = 0;
- unsigned int i = 0;
+ struct qed_spq *p_spq = p_hwfn->p_spq;
+ struct qed_spq_entry *p_virt = NULL;
+ dma_addr_t p_phys = 0;
+ u32 i, capacity;
INIT_LIST_HEAD(&p_spq->pending);
INIT_LIST_HEAD(&p_spq->completion_pending);
@@ -427,7 +428,8 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn)
p_phys = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
p_virt = p_spq->p_virt;
- for (i = 0; i < p_spq->chain.capacity; i++) {
+ capacity = qed_chain_get_capacity(&p_spq->chain);
+ for (i = 0; i < capacity; i++) {
DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
list_add_tail(&p_virt->list, &p_spq->free_pool);
@@ -455,9 +457,10 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn)
int qed_spq_alloc(struct qed_hwfn *p_hwfn)
{
- struct qed_spq *p_spq = NULL;
- dma_addr_t p_phys = 0;
- struct qed_spq_entry *p_virt = NULL;
+ struct qed_spq_entry *p_virt = NULL;
+ struct qed_spq *p_spq = NULL;
+ dma_addr_t p_phys = 0;
+ u32 capacity;
/* SPQ struct */
p_spq =
@@ -471,6 +474,7 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn)
if (qed_chain_alloc(p_hwfn->cdev,
QED_CHAIN_USE_TO_PRODUCE,
QED_CHAIN_MODE_SINGLE,
+ QED_CHAIN_CNT_TYPE_U16,
0, /* N/A when the mode is SINGLE */
sizeof(struct slow_path_element),
&p_spq->chain)) {
@@ -479,11 +483,11 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn)
}
/* allocate and fill the SPQ elements (incl. ramrod data list) */
+ capacity = qed_chain_get_capacity(&p_spq->chain);
p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
- p_spq->chain.capacity *
+ capacity *
sizeof(struct qed_spq_entry),
- &p_phys,
- GFP_KERNEL);
+ &p_phys, GFP_KERNEL);
if (!p_virt)
goto spq_allocate_fail;
@@ -503,16 +507,18 @@ spq_allocate_fail:
void qed_spq_free(struct qed_hwfn *p_hwfn)
{
struct qed_spq *p_spq = p_hwfn->p_spq;
+ u32 capacity;
if (!p_spq)
return;
- if (p_spq->p_virt)
+ if (p_spq->p_virt) {
+ capacity = qed_chain_get_capacity(&p_spq->chain);
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
- p_spq->chain.capacity *
+ capacity *
sizeof(struct qed_spq_entry),
- p_spq->p_virt,
- p_spq->p_phys);
+ p_spq->p_virt, p_spq->p_phys);
+ }
qed_chain_free(p_hwfn->cdev, &p_spq->chain);
;
@@ -881,9 +887,9 @@ struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
if (qed_chain_alloc(p_hwfn->cdev,
QED_CHAIN_USE_TO_PRODUCE,
QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U16,
QED_CHAIN_PAGE_SIZE / 0x80,
- 0x80,
- &p_consq->chain)) {
+ 0x80, &p_consq->chain)) {
DP_NOTICE(p_hwfn, "Failed to allocate consq chain");
goto consq_allocate_fail;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index c325ee857..15399da26 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -21,18 +21,18 @@
#include "qed_vf.h"
/* IOV ramrods */
-static int qed_sp_vf_start(struct qed_hwfn *p_hwfn,
- u32 concrete_vfid, u16 opaque_vfid)
+static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
{
struct vf_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
int rc = -EINVAL;
+ u8 fp_minor;
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
init_data.cid = qed_spq_get_cid(p_hwfn);
- init_data.opaque_fid = opaque_vfid;
+ init_data.opaque_fid = p_vf->opaque_fid;
init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
rc = qed_sp_init_request(p_hwfn, &p_ent,
@@ -43,10 +43,39 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn,
p_ramrod = &p_ent->ramrod.vf_start;
- p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
- p_ramrod->opaque_fid = cpu_to_le16(opaque_vfid);
+ p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
+ p_ramrod->opaque_fid = cpu_to_le16(p_vf->opaque_fid);
- p_ramrod->personality = PERSONALITY_ETH;
+ switch (p_hwfn->hw_info.personality) {
+ case QED_PCI_ETH:
+ p_ramrod->personality = PERSONALITY_ETH;
+ break;
+ case QED_PCI_ETH_ROCE:
+ p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unknown VF personality %d\n",
+ p_hwfn->hw_info.personality);
+ return -EINVAL;
+ }
+
+ fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
+ if (fp_minor > ETH_HSI_VER_MINOR) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n",
+ p_vf->abs_vf_id,
+ ETH_HSI_VER_MAJOR,
+ fp_minor, ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
+ fp_minor = ETH_HSI_VER_MINOR;
+ }
+
+ p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
+ p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%d] - Starting using HSI %02x.%02x\n",
+ p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
return qed_spq_post(p_hwfn, p_ent, NULL);
}
@@ -117,6 +146,45 @@ static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
return vf;
}
+static bool qed_iov_validate_rxq(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, u16 rx_qid)
+{
+ if (rx_qid >= p_vf->num_rxqs)
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n",
+ p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
+ return rx_qid < p_vf->num_rxqs;
+}
+
+static bool qed_iov_validate_txq(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, u16 tx_qid)
+{
+ if (tx_qid >= p_vf->num_txqs)
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n",
+ p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
+ return tx_qid < p_vf->num_txqs;
+}
+
+static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, u16 sb_idx)
+{
+ int i;
+
+ for (i = 0; i < p_vf->num_sbs; i++)
+ if (p_vf->igu_sbs[i] == sb_idx)
+ return true;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n",
+ p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
+
+ return false;
+}
+
int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
int vfid, struct qed_ptt *p_ptt)
{
@@ -293,6 +361,9 @@ static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
(vf->abs_vf_id << 8);
vf->vport_id = idx + 1;
+
+ vf->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS;
+ vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
}
}
@@ -598,17 +669,6 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
/* unpretend */
qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
- if (vf->state != VF_STOPPED) {
- DP_NOTICE(p_hwfn, "VF[%02x] is already started\n",
- vf->abs_vf_id);
- return -EINVAL;
- }
-
- /* Start VF */
- rc = qed_sp_vf_start(p_hwfn, vf->concrete_fid, vf->opaque_fid);
- if (rc)
- DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id);
-
vf->state = VF_FREE;
return rc;
@@ -852,7 +912,6 @@ static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_params params;
struct qed_mcp_link_state link;
struct qed_vf_info *vf = NULL;
- int rc = 0;
vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
if (!vf) {
@@ -874,18 +933,8 @@ static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps));
qed_iov_set_link(p_hwfn, rel_vf_id, &params, &link, &caps);
- if (vf->state != VF_STOPPED) {
- /* Stopping the VF */
- rc = qed_sp_vf_stop(p_hwfn, vf->concrete_fid, vf->opaque_fid);
-
- if (rc != 0) {
- DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n",
- rc);
- return rc;
- }
-
- vf->state = VF_STOPPED;
- }
+ /* Forget the VF's acquisition message */
+ memset(&vf->acquire, 0, sizeof(vf->acquire));
/* disablng interrupts and resetting permission table was done during
* vf-close, however, we could get here without going through vf_close
@@ -1116,8 +1165,6 @@ static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
p_vf->vf_bulletin = 0;
p_vf->vport_instance = 0;
- p_vf->num_mac_filters = 0;
- p_vf->num_vlan_filters = 0;
p_vf->configured_features = 0;
/* If VF previously requested less resources, go back to default */
@@ -1130,9 +1177,95 @@ static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
p_vf->vf_queues[i].rxq_active = 0;
memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
+ memset(&p_vf->acquire, 0, sizeof(p_vf->acquire));
qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id);
}
+static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *p_vf,
+ struct vf_pf_resc_request *p_req,
+ struct pf_vf_resc *p_resp)
+{
+ int i;
+
+ /* Queue related information */
+ p_resp->num_rxqs = p_vf->num_rxqs;
+ p_resp->num_txqs = p_vf->num_txqs;
+ p_resp->num_sbs = p_vf->num_sbs;
+
+ for (i = 0; i < p_resp->num_sbs; i++) {
+ p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
+ p_resp->hw_sbs[i].sb_qid = 0;
+ }
+
+ /* These fields are filled for backward compatibility.
+ * Unused by modern vfs.
+ */
+ for (i = 0; i < p_resp->num_rxqs; i++) {
+ qed_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
+ (u16 *)&p_resp->hw_qid[i]);
+ p_resp->cid[i] = p_vf->vf_queues[i].fw_cid;
+ }
+
+ /* Filter related information */
+ p_resp->num_mac_filters = min_t(u8, p_vf->num_mac_filters,
+ p_req->num_mac_filters);
+ p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters,
+ p_req->num_vlan_filters);
+
+ /* This isn't really needed/enforced, but some legacy VFs might depend
+ * on the correct filling of this field.
+ */
+ p_resp->num_mc_filters = QED_MAX_MC_ADDRS;
+
+ /* Validate sufficient resources for VF */
+ if (p_resp->num_rxqs < p_req->num_rxqs ||
+ p_resp->num_txqs < p_req->num_txqs ||
+ p_resp->num_sbs < p_req->num_sbs ||
+ p_resp->num_mac_filters < p_req->num_mac_filters ||
+ p_resp->num_vlan_filters < p_req->num_vlan_filters ||
+ p_resp->num_mc_filters < p_req->num_mc_filters) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]\n",
+ p_vf->abs_vf_id,
+ p_req->num_rxqs,
+ p_resp->num_rxqs,
+ p_req->num_rxqs,
+ p_resp->num_txqs,
+ p_req->num_sbs,
+ p_resp->num_sbs,
+ p_req->num_mac_filters,
+ p_resp->num_mac_filters,
+ p_req->num_vlan_filters,
+ p_resp->num_vlan_filters,
+ p_req->num_mc_filters, p_resp->num_mc_filters);
+ return PFVF_STATUS_NO_RESOURCE;
+ }
+
+ return PFVF_STATUS_SUCCESS;
+}
+
+static void qed_iov_vf_mbx_acquire_stats(struct qed_hwfn *p_hwfn,
+ struct pfvf_stats_info *p_stats)
+{
+ p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
+ offsetof(struct mstorm_vf_zone,
+ non_trigger.eth_queue_stat);
+ p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
+ p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
+ offsetof(struct ustorm_vf_zone,
+ non_trigger.eth_queue_stat);
+ p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
+ p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
+ offsetof(struct pstorm_vf_zone,
+ non_trigger.eth_queue_stat);
+ p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
+ p_stats->tstats.address = 0;
+ p_stats->tstats.len = 0;
+}
+
static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *vf)
@@ -1141,25 +1274,27 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
- u8 i, vfpf_status = PFVF_STATUS_SUCCESS;
+ u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
struct pf_vf_resc *resc = &resp->resc;
+ int rc;
+
+ memset(resp, 0, sizeof(*resp));
/* Validate FW compatibility */
- if (req->vfdev_info.fw_major != FW_MAJOR_VERSION ||
- req->vfdev_info.fw_minor != FW_MINOR_VERSION ||
- req->vfdev_info.fw_revision != FW_REVISION_VERSION ||
- req->vfdev_info.fw_engineering != FW_ENGINEERING_VERSION) {
+ if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
DP_INFO(p_hwfn,
- "VF[%d] is running an incompatible driver [VF needs FW %02x:%02x:%02x:%02x but Hypervisor is using %02x:%02x:%02x:%02x]\n",
+ "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n",
vf->abs_vf_id,
- req->vfdev_info.fw_major,
- req->vfdev_info.fw_minor,
- req->vfdev_info.fw_revision,
- req->vfdev_info.fw_engineering,
- FW_MAJOR_VERSION,
- FW_MINOR_VERSION,
- FW_REVISION_VERSION, FW_ENGINEERING_VERSION);
- vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
+ req->vfdev_info.eth_fp_hsi_major,
+ req->vfdev_info.eth_fp_hsi_minor,
+ ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
+
+ /* Write the PF version so that VF would know which version
+ * is supported.
+ */
+ pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
+ pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
+
goto out;
}
@@ -1169,16 +1304,13 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
DP_INFO(p_hwfn,
"VF[%d] is running an old driver that doesn't support 100g\n",
vf->abs_vf_id);
- vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
goto out;
}
- memset(resp, 0, sizeof(*resp));
+ /* Store the acquire message */
+ memcpy(&vf->acquire, req, sizeof(vf->acquire));
- /* Fill in vf info stuff */
vf->opaque_fid = req->vfdev_info.opaque_fid;
- vf->num_mac_filters = 1;
- vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
vf->vf_bulletin = req->bulletin_addr;
vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
@@ -1194,26 +1326,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
if (p_hwfn->cdev->num_hwfns > 1)
pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
- pfdev_info->stats_info.mstats.address =
- PXP_VF_BAR0_START_MSDM_ZONE_B +
- offsetof(struct mstorm_vf_zone, non_trigger.eth_queue_stat);
- pfdev_info->stats_info.mstats.len =
- sizeof(struct eth_mstorm_per_queue_stat);
-
- pfdev_info->stats_info.ustats.address =
- PXP_VF_BAR0_START_USDM_ZONE_B +
- offsetof(struct ustorm_vf_zone, non_trigger.eth_queue_stat);
- pfdev_info->stats_info.ustats.len =
- sizeof(struct eth_ustorm_per_queue_stat);
-
- pfdev_info->stats_info.pstats.address =
- PXP_VF_BAR0_START_PSDM_ZONE_B +
- offsetof(struct pstorm_vf_zone, non_trigger.eth_queue_stat);
- pfdev_info->stats_info.pstats.len =
- sizeof(struct eth_pstorm_per_queue_stat);
-
- pfdev_info->stats_info.tstats.address = 0;
- pfdev_info->stats_info.tstats.len = 0;
+ qed_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
@@ -1221,36 +1334,31 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
pfdev_info->fw_minor = FW_MINOR_VERSION;
pfdev_info->fw_rev = FW_REVISION_VERSION;
pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
+ pfdev_info->minor_fp_hsi = min_t(u8,
+ ETH_HSI_VER_MINOR,
+ req->vfdev_info.eth_fp_hsi_minor);
pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX;
qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL);
pfdev_info->dev_type = p_hwfn->cdev->type;
pfdev_info->chip_rev = p_hwfn->cdev->chip_rev;
- resc->num_rxqs = vf->num_rxqs;
- resc->num_txqs = vf->num_txqs;
- resc->num_sbs = vf->num_sbs;
- for (i = 0; i < resc->num_sbs; i++) {
- resc->hw_sbs[i].hw_sb_id = vf->igu_sbs[i];
- resc->hw_sbs[i].sb_qid = 0;
- }
+ /* Fill resources available to VF; Make sure there are enough to
+ * satisfy the VF's request.
+ */
+ vfpf_status = qed_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
+ &req->resc_request, resc);
+ if (vfpf_status != PFVF_STATUS_SUCCESS)
+ goto out;
- for (i = 0; i < resc->num_rxqs; i++) {
- qed_fw_l2_queue(p_hwfn, vf->vf_queues[i].fw_rx_qid,
- (u16 *)&resc->hw_qid[i]);
- resc->cid[i] = vf->vf_queues[i].fw_cid;
+ /* Start the VF in FW */
+ rc = qed_sp_vf_start(p_hwfn, vf);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id);
+ vfpf_status = PFVF_STATUS_FAILURE;
+ goto out;
}
- resc->num_mac_filters = min_t(u8, vf->num_mac_filters,
- req->resc_request.num_mac_filters);
- resc->num_vlan_filters = min_t(u8, vf->num_vlan_filters,
- req->resc_request.num_vlan_filters);
-
- /* This isn't really required as VF isn't limited, but some VFs might
- * actually test this value, so need to provide it.
- */
- resc->num_mc_filters = req->resc_request.num_mc_filters;
-
/* Fill agreed size of bulletin board in response */
resp->bulletin_size = vf->bulletin.size;
qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
@@ -1296,7 +1404,7 @@ static int __qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn,
params.anti_spoofing_en = val;
rc = qed_sp_vport_update(p_hwfn, &params, QED_SPQ_MODE_EBLOCK, NULL);
- if (rc) {
+ if (!rc) {
p_vf->spoof_chk = val;
p_vf->req_spoofchk_val = p_vf->spoof_chk;
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
@@ -1585,10 +1693,6 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
sizeof(struct pfvf_def_resp_tlv), status);
}
-#define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A
-#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
- (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
-
static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *vf, u8 status)
@@ -1606,16 +1710,11 @@ static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
/* Update the TLV with the response */
if (status == PFVF_STATUS_SUCCESS) {
- u16 hw_qid = 0;
-
req = &mbx->req_virt->start_rxq;
- qed_fw_l2_queue(p_hwfn, vf->vf_queues[req->rx_qid].fw_rx_qid,
- &hw_qid);
-
- p_tlv->offset = MSTORM_QZONE_START(p_hwfn->cdev) +
- hw_qid * MSTORM_QZONE_SIZE +
- offsetof(struct mstorm_eth_queue_zone,
- rx_producers);
+ p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
+ offsetof(struct mstorm_vf_zone,
+ non_trigger.eth_rx_queue_producers) +
+ sizeof(struct eth_rx_prod_data) * req->rx_qid;
}
qed_iov_send_response(p_hwfn, p_ptt, vf, sizeof(*p_tlv), status);
@@ -1627,13 +1726,19 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
{
struct qed_queue_start_common_params params;
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
- u8 status = PFVF_STATUS_SUCCESS;
+ u8 status = PFVF_STATUS_NO_RESOURCE;
struct vfpf_start_rxq_tlv *req;
int rc;
memset(&params, 0, sizeof(params));
req = &mbx->req_virt->start_rxq;
+
+ if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid) ||
+ !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
+ goto out;
+
params.queue_id = vf->vf_queues[req->rx_qid].fw_rx_qid;
+ params.vf_qid = req->rx_qid;
params.vport_id = vf->vport_id;
params.sb = req->hw_sb;
params.sb_idx = req->sb_index;
@@ -1649,22 +1754,48 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
if (rc) {
status = PFVF_STATUS_FAILURE;
} else {
+ status = PFVF_STATUS_SUCCESS;
vf->vf_queues[req->rx_qid].rxq_active = true;
vf->num_active_rxqs++;
}
+out:
qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status);
}
+static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *p_vf, u8 status)
+{
+ struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+ struct pfvf_start_queue_resp_tlv *p_tlv;
+
+ mbx->offset = (u8 *)mbx->reply_virt;
+
+ p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
+ sizeof(*p_tlv));
+ qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* Update the TLV with the response */
+ if (status == PFVF_STATUS_SUCCESS) {
+ u16 qid = mbx->req_virt->start_txq.tx_qid;
+
+ p_tlv->offset = qed_db_addr(p_vf->vf_queues[qid].fw_cid,
+ DQ_DEMS_LEGACY);
+ }
+
+ qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_tlv), status);
+}
+
static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *vf)
{
- u16 length = sizeof(struct pfvf_def_resp_tlv);
struct qed_queue_start_common_params params;
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ u8 status = PFVF_STATUS_NO_RESOURCE;
union qed_qm_pq_params pq_params;
- u8 status = PFVF_STATUS_SUCCESS;
struct vfpf_start_txq_tlv *req;
int rc;
@@ -1675,6 +1806,11 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
memset(&params, 0, sizeof(params));
req = &mbx->req_virt->start_txq;
+
+ if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid) ||
+ !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
+ goto out;
+
params.queue_id = vf->vf_queues[req->tx_qid].fw_tx_qid;
params.vport_id = vf->vport_id;
params.sb = req->hw_sb;
@@ -1688,13 +1824,15 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
req->pbl_addr,
req->pbl_size, &pq_params);
- if (rc)
+ if (rc) {
status = PFVF_STATUS_FAILURE;
- else
+ } else {
+ status = PFVF_STATUS_SUCCESS;
vf->vf_queues[req->tx_qid].txq_active = true;
+ }
- qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_START_TXQ,
- length, status);
+out:
+ qed_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, status);
}
static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
@@ -2119,6 +2257,16 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
u16 length;
int rc;
+ /* Valiate PF can send such a request */
+ if (!vf->vport_instance) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "No VPORT instance available for VF[%d], failing vport update\n",
+ vf->abs_vf_id);
+ status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
+
memset(&params, 0, sizeof(params));
params.opaque_fid = vf->opaque_fid;
params.vport_id = vf->vport_id;
@@ -2161,15 +2309,12 @@ out:
qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
}
-static int qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn,
- struct qed_vf_info *p_vf,
- struct qed_filter_ucast *p_params)
+static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf,
+ struct qed_filter_ucast *p_params)
{
int i;
- if (p_params->type == QED_FILTER_MAC)
- return 0;
-
/* First remove entries and then add new ones */
if (p_params->opcode == QED_FILTER_REMOVE) {
for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
@@ -2222,6 +2367,80 @@ static int qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn,
return 0;
}
+static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf,
+ struct qed_filter_ucast *p_params)
+{
+ int i;
+
+ /* If we're in forced-mode, we don't allow any change */
+ if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))
+ return 0;
+
+ /* First remove entries and then add new ones */
+ if (p_params->opcode == QED_FILTER_REMOVE) {
+ for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
+ if (ether_addr_equal(p_vf->shadow_config.macs[i],
+ p_params->mac)) {
+ memset(p_vf->shadow_config.macs[i], 0,
+ ETH_ALEN);
+ break;
+ }
+ }
+
+ if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "MAC isn't configured\n");
+ return -EINVAL;
+ }
+ } else if (p_params->opcode == QED_FILTER_REPLACE ||
+ p_params->opcode == QED_FILTER_FLUSH) {
+ for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++)
+ memset(p_vf->shadow_config.macs[i], 0, ETH_ALEN);
+ }
+
+ /* List the new MAC address */
+ if (p_params->opcode != QED_FILTER_ADD &&
+ p_params->opcode != QED_FILTER_REPLACE)
+ return 0;
+
+ for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
+ if (is_zero_ether_addr(p_vf->shadow_config.macs[i])) {
+ ether_addr_copy(p_vf->shadow_config.macs[i],
+ p_params->mac);
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Added MAC at %d entry in shadow\n", i);
+ break;
+ }
+ }
+
+ if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No available place for MAC\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf,
+ struct qed_filter_ucast *p_params)
+{
+ int rc = 0;
+
+ if (p_params->type == QED_FILTER_MAC) {
+ rc = qed_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
+ if (rc)
+ return rc;
+ }
+
+ if (p_params->type == QED_FILTER_VLAN)
+ rc = qed_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
+
+ return rc;
+}
+
int qed_iov_chk_ucast(struct qed_hwfn *hwfn,
int vfid, struct qed_filter_ucast *params)
{
@@ -2366,11 +2585,27 @@ static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn,
struct qed_vf_info *p_vf)
{
u16 length = sizeof(struct pfvf_def_resp_tlv);
+ u8 status = PFVF_STATUS_SUCCESS;
+ int rc = 0;
qed_iov_vf_cleanup(p_hwfn, p_vf);
+ if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
+ /* Stopping the VF */
+ rc = qed_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
+ p_vf->opaque_fid);
+
+ if (rc) {
+ DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n",
+ rc);
+ status = PFVF_STATUS_FAILURE;
+ }
+
+ p_vf->state = VF_STOPPED;
+ }
+
qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
- length, PFVF_STATUS_SUCCESS);
+ length, status);
}
static int
@@ -2622,7 +2857,6 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
{
struct qed_iov_vf_mbx *mbx;
struct qed_vf_info *p_vf;
- int i;
p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
if (!p_vf)
@@ -2631,9 +2865,8 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
mbx = &p_vf->vf_mbx;
/* qed_iov_process_mbx_request */
- DP_VERBOSE(p_hwfn,
- QED_MSG_IOV,
- "qed_iov_process_mbx_req vfid %d\n", p_vf->abs_vf_id);
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%02x]: Processing mailbox message\n", p_vf->abs_vf_id);
mbx->first_tlv = mbx->req_virt->first_tlv;
@@ -2687,15 +2920,28 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
* support them. Or this may be because someone wrote a crappy
* VF driver and is sending garbage over the channel.
*/
- DP_ERR(p_hwfn,
- "unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n",
- mbx->first_tlv.tl.type, mbx->first_tlv.tl.length);
-
- for (i = 0; i < 20; i++) {
+ DP_NOTICE(p_hwfn,
+ "VF[%02x]: unknown TLV. type %04x length %04x padding %08x reply address %llu\n",
+ p_vf->abs_vf_id,
+ mbx->first_tlv.tl.type,
+ mbx->first_tlv.tl.length,
+ mbx->first_tlv.padding, mbx->first_tlv.reply_address);
+
+ /* Try replying in case reply address matches the acquisition's
+ * posted address.
+ */
+ if (p_vf->acquire.first_tlv.reply_address &&
+ (mbx->first_tlv.reply_address ==
+ p_vf->acquire.first_tlv.reply_address)) {
+ qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
+ mbx->first_tlv.tl.type,
+ sizeof(struct pfvf_def_resp_tlv),
+ PFVF_STATUS_NOT_SUPPORTED);
+ } else {
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
- "%x ",
- mbx->req_virt->tlv_buf_size.tlv_buffer[i]);
+ "VF[%02x]: Can't respond to TLV - no valid reply address\n",
+ p_vf->abs_vf_id);
}
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
index c90b2b6ad..0dd23e409 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -10,6 +10,9 @@
#define _QED_SRIOV_H
#include <linux/types.h>
#include "qed_vf.h"
+
+#define QED_ETH_VF_NUM_MAC_FILTERS 1
+#define QED_ETH_VF_NUM_VLAN_FILTERS 2
#define QED_VF_ARRAY_LENGTH (3)
#ifdef CONFIG_QED_SRIOV
@@ -24,7 +27,6 @@
#define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info))
#define QED_MAX_VF_CHAINS_PER_PF 16
-#define QED_ETH_VF_NUM_VLAN_FILTERS 2
#define QED_ETH_MAX_VF_NUM_VLAN_FILTERS \
(MAX_NUM_VFS * QED_ETH_VF_NUM_VLAN_FILTERS)
@@ -120,6 +122,8 @@ struct qed_vf_shadow_config {
/* Shadow copy of all guest vlans */
struct qed_vf_vlan_shadow vlans[QED_ETH_VF_NUM_VLAN_FILTERS + 1];
+ /* Shadow copy of all configured MACs; Empty if forcing MACs */
+ u8 macs[QED_ETH_VF_NUM_MAC_FILTERS][ETH_ALEN];
u8 inner_vlan_removal;
};
@@ -133,6 +137,9 @@ struct qed_vf_info {
struct qed_bulletin bulletin;
dma_addr_t vf_bulletin;
+ /* PF saves a copy of the last VF acquire message */
+ struct vfpf_acquire_tlv acquire;
+
u32 concrete_fid;
u16 opaque_fid;
u16 mtu;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index 72e69c0ec..9b780b31b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -117,36 +117,64 @@ exit:
}
#define VF_ACQUIRE_THRESH 3
-#define VF_ACQUIRE_MAC_FILTERS 1
+static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn,
+ struct vf_pf_resc_request *p_req,
+ struct pf_vf_resc *p_resp)
+{
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]. Try PF recommended amount\n",
+ p_req->num_rxqs,
+ p_resp->num_rxqs,
+ p_req->num_rxqs,
+ p_resp->num_txqs,
+ p_req->num_sbs,
+ p_resp->num_sbs,
+ p_req->num_mac_filters,
+ p_resp->num_mac_filters,
+ p_req->num_vlan_filters,
+ p_resp->num_vlan_filters,
+ p_req->num_mc_filters, p_resp->num_mc_filters);
+
+ /* humble our request */
+ p_req->num_txqs = p_resp->num_txqs;
+ p_req->num_rxqs = p_resp->num_rxqs;
+ p_req->num_sbs = p_resp->num_sbs;
+ p_req->num_mac_filters = p_resp->num_mac_filters;
+ p_req->num_vlan_filters = p_resp->num_vlan_filters;
+ p_req->num_mc_filters = p_resp->num_mc_filters;
+}
static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
- u8 rx_count = 1, tx_count = 1, num_sbs = 1;
- u8 num_mac = VF_ACQUIRE_MAC_FILTERS;
+ struct vf_pf_resc_request *p_resc;
bool resources_acquired = false;
struct vfpf_acquire_tlv *req;
int rc = 0, attempts = 0;
/* clear mailbox and prep first tlv */
req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req));
+ p_resc = &req->resc_request;
/* starting filling the request */
req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid;
- req->resc_request.num_rxqs = rx_count;
- req->resc_request.num_txqs = tx_count;
- req->resc_request.num_sbs = num_sbs;
- req->resc_request.num_mac_filters = num_mac;
- req->resc_request.num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
+ p_resc->num_rxqs = QED_MAX_VF_CHAINS_PER_PF;
+ p_resc->num_txqs = QED_MAX_VF_CHAINS_PER_PF;
+ p_resc->num_sbs = QED_MAX_VF_CHAINS_PER_PF;
+ p_resc->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS;
+ p_resc->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX;
req->vfdev_info.fw_major = FW_MAJOR_VERSION;
req->vfdev_info.fw_minor = FW_MINOR_VERSION;
req->vfdev_info.fw_revision = FW_REVISION_VERSION;
req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION;
+ req->vfdev_info.eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
+ req->vfdev_info.eth_fp_hsi_minor = ETH_HSI_VER_MINOR;
/* Fill capability field with any non-deprecated config we support */
req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G;
@@ -185,21 +213,21 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
resources_acquired = true;
} else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE &&
attempts < VF_ACQUIRE_THRESH) {
- DP_VERBOSE(p_hwfn,
- QED_MSG_IOV,
- "PF unwilling to fullfill resource request. Try PF recommended amount\n");
-
- /* humble our request */
- req->resc_request.num_txqs = resp->resc.num_txqs;
- req->resc_request.num_rxqs = resp->resc.num_rxqs;
- req->resc_request.num_sbs = resp->resc.num_sbs;
- req->resc_request.num_mac_filters =
- resp->resc.num_mac_filters;
- req->resc_request.num_vlan_filters =
- resp->resc.num_vlan_filters;
+ qed_vf_pf_acquire_reduce_resc(p_hwfn, p_resc,
+ &resp->resc);
/* Clear response buffer */
memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
+ } else if ((resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) &&
+ pfdev_info->major_fp_hsi &&
+ (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) {
+ DP_NOTICE(p_hwfn,
+ "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n",
+ pfdev_info->major_fp_hsi,
+ pfdev_info->minor_fp_hsi,
+ ETH_HSI_VER_MAJOR,
+ ETH_HSI_VER_MINOR, pfdev_info->major_fp_hsi);
+ return -EINVAL;
} else {
DP_ERR(p_hwfn,
"PF returned error %d to VF acquisition request\n",
@@ -225,6 +253,13 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
}
}
+ if (ETH_HSI_VER_MINOR &&
+ (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) {
+ DP_INFO(p_hwfn,
+ "PF is using older fastpath HSI; %02x.%02x is configured\n",
+ ETH_HSI_VER_MAJOR, resp->pfdev_info.minor_fp_hsi);
+ }
+
return 0;
}
@@ -353,7 +388,7 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
/* Learn the address of the producer from the response */
if (pp_prod) {
- u64 init_prod_val = 0;
+ u32 init_prod_val = 0;
*pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset;
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
@@ -361,7 +396,7 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
rx_qid, *pp_prod, resp->offset);
/* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
- __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64),
+ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
(u32 *)&init_prod_val);
}
@@ -405,8 +440,8 @@ int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
u16 pbl_size, void __iomem **pp_doorbell)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_start_queue_resp_tlv *resp;
struct vfpf_start_txq_tlv *req;
- struct pfvf_def_resp_tlv *resp;
int rc;
/* clear mailbox and prep first tlv */
@@ -424,20 +459,24 @@ int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
qed_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
- resp = &p_iov->pf2vf_reply->default_resp;
+ resp = &p_iov->pf2vf_reply->queue_start;
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return -EINVAL;
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto exit;
+ }
if (pp_doorbell) {
- u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id];
+ *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + resp->offset;
- *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
- qed_db_addr(cid, DQ_DEMS_LEGACY);
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
+ tx_queue_id, *pp_doorbell, resp->offset);
}
+exit:
return rc;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h
index b82fda964..b23ce58e9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h
@@ -96,7 +96,9 @@ struct vfpf_acquire_tlv {
u32 driver_version;
u16 opaque_fid; /* ME register value */
u8 os_type; /* VFPF_ACQUIRE_OS_* value */
- u8 padding[5];
+ u8 eth_fp_hsi_major;
+ u8 eth_fp_hsi_minor;
+ u8 padding[3];
} vfdev_info;
struct vf_pf_resc_request resc_request;
@@ -171,7 +173,14 @@ struct pfvf_acquire_resp_tlv {
struct pfvf_stats_info stats_info;
u8 port_mac[ETH_ALEN];
- u8 padding2[2];
+
+ /* It's possible PF had to configure an older fastpath HSI
+ * [in case VF is newer than PF]. This is communicated back
+ * to the VF. It can also be used in case of error due to
+ * non-matching versions to shed light in VF about failure.
+ */
+ u8 major_fp_hsi;
+ u8 minor_fp_hsi;
} pfdev_info;
struct pf_vf_resc {
diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile
index 06ff90d87..74a49850d 100644
--- a/drivers/net/ethernet/qlogic/qede/Makefile
+++ b/drivers/net/ethernet/qlogic/qede/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_QEDE) := qede.o
qede-y := qede_main.o qede_ethtool.o
+qede-$(CONFIG_DCB) += qede_dcbnl.o
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 47d6b2225..02b06d4e4 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -24,7 +24,7 @@
#include <linux/qed/qed_eth_if.h>
#define QEDE_MAJOR_VERSION 8
-#define QEDE_MINOR_VERSION 7
+#define QEDE_MINOR_VERSION 10
#define QEDE_REVISION_VERSION 1
#define QEDE_ENGINEERING_VERSION 20
#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
@@ -143,6 +143,8 @@ struct qede_dev {
struct mutex qede_lock;
u32 state; /* Protected by qede_lock */
u16 rx_buf_size;
+ u32 rx_copybreak;
+
/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
#define ETH_OVERHEAD (ETH_HLEN + 8 + 8)
/* Max supported alignment is 256 (8 shift)
@@ -235,6 +237,7 @@ struct qede_rx_queue {
u64 rx_hw_errors;
u64 rx_alloc_errors;
+ u64 rx_ip_frags;
};
union db_prod {
@@ -304,6 +307,9 @@ union qede_reload_args {
u16 mtu;
};
+#ifdef CONFIG_DCB
+void qede_set_dcbnl_ops(struct net_device *ndev);
+#endif
void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level);
void qede_set_ethtool_ops(struct net_device *netdev);
void qede_reload(struct qede_dev *edev,
@@ -329,6 +335,7 @@ void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev,
#define NUM_TX_BDS_MIN 128
#define NUM_TX_BDS_DEF NUM_TX_BDS_MAX
+#define QEDE_MIN_PKT_LEN 64
#define QEDE_RX_HDR_SIZE 256
#define for_each_rss(i) for (i = 0; i < edev->num_rss; i++)
diff --git a/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c b/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c
new file mode 100644
index 000000000..03e8c0212
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c
@@ -0,0 +1,348 @@
+/* QLogic qede NIC Driver
+* Copyright (c) 2015 QLogic Corporation
+*
+* This software is available under the terms of the GNU General Public License
+* (GPL) Version 2, available from the file COPYING in the main directory of
+* this source tree.
+*/
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include <net/dcbnl.h>
+#include "qede.h"
+
+static u8 qede_dcbnl_getstate(struct net_device *netdev)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->getstate(edev->cdev);
+}
+
+static u8 qede_dcbnl_setstate(struct net_device *netdev, u8 state)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setstate(edev->cdev, state);
+}
+
+static void qede_dcbnl_getpermhwaddr(struct net_device *netdev,
+ u8 *perm_addr)
+{
+ memcpy(perm_addr, netdev->dev_addr, netdev->addr_len);
+}
+
+static void qede_dcbnl_getpgtccfgtx(struct net_device *netdev, int prio,
+ u8 *prio_type, u8 *pgid, u8 *bw_pct,
+ u8 *up_map)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ edev->ops->dcb->getpgtccfgtx(edev->cdev, prio, prio_type,
+ pgid, bw_pct, up_map);
+}
+
+static void qede_dcbnl_getpgbwgcfgtx(struct net_device *netdev,
+ int pgid, u8 *bw_pct)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ edev->ops->dcb->getpgbwgcfgtx(edev->cdev, pgid, bw_pct);
+}
+
+static void qede_dcbnl_getpgtccfgrx(struct net_device *netdev, int prio,
+ u8 *prio_type, u8 *pgid, u8 *bw_pct,
+ u8 *up_map)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ edev->ops->dcb->getpgtccfgrx(edev->cdev, prio, prio_type, pgid, bw_pct,
+ up_map);
+}
+
+static void qede_dcbnl_getpgbwgcfgrx(struct net_device *netdev,
+ int pgid, u8 *bw_pct)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ edev->ops->dcb->getpgbwgcfgrx(edev->cdev, pgid, bw_pct);
+}
+
+static void qede_dcbnl_getpfccfg(struct net_device *netdev, int prio,
+ u8 *setting)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ edev->ops->dcb->getpfccfg(edev->cdev, prio, setting);
+}
+
+static void qede_dcbnl_setpfccfg(struct net_device *netdev, int prio,
+ u8 setting)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ edev->ops->dcb->setpfccfg(edev->cdev, prio, setting);
+}
+
+static u8 qede_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->getcap(edev->cdev, capid, cap);
+}
+
+static int qede_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->getnumtcs(edev->cdev, tcid, num);
+}
+
+static u8 qede_dcbnl_getpfcstate(struct net_device *netdev)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->getpfcstate(edev->cdev);
+}
+
+static int qede_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->getapp(edev->cdev, idtype, id);
+}
+
+static u8 qede_dcbnl_getdcbx(struct net_device *netdev)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->getdcbx(edev->cdev);
+}
+
+static void qede_dcbnl_setpgtccfgtx(struct net_device *netdev, int prio,
+ u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setpgtccfgtx(edev->cdev, prio, pri_type, pgid,
+ bw_pct, up_map);
+}
+
+static void qede_dcbnl_setpgtccfgrx(struct net_device *netdev, int prio,
+ u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setpgtccfgrx(edev->cdev, prio, pri_type, pgid,
+ bw_pct, up_map);
+}
+
+static void qede_dcbnl_setpgbwgcfgtx(struct net_device *netdev, int pgid,
+ u8 bw_pct)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setpgbwgcfgtx(edev->cdev, pgid, bw_pct);
+}
+
+static void qede_dcbnl_setpgbwgcfgrx(struct net_device *netdev, int pgid,
+ u8 bw_pct)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setpgbwgcfgrx(edev->cdev, pgid, bw_pct);
+}
+
+static u8 qede_dcbnl_setall(struct net_device *netdev)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setall(edev->cdev);
+}
+
+static int qede_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setnumtcs(edev->cdev, tcid, num);
+}
+
+static void qede_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setpfcstate(edev->cdev, state);
+}
+
+static int qede_dcbnl_setapp(struct net_device *netdev, u8 idtype, u16 idval,
+ u8 up)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setapp(edev->cdev, idtype, idval, up);
+}
+
+static u8 qede_dcbnl_setdcbx(struct net_device *netdev, u8 state)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setdcbx(edev->cdev, state);
+}
+
+static u8 qede_dcbnl_getfeatcfg(struct net_device *netdev, int featid,
+ u8 *flags)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->getfeatcfg(edev->cdev, featid, flags);
+}
+
+static u8 qede_dcbnl_setfeatcfg(struct net_device *netdev, int featid, u8 flags)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setfeatcfg(edev->cdev, featid, flags);
+}
+
+static int qede_dcbnl_peer_getappinfo(struct net_device *netdev,
+ struct dcb_peer_app_info *info,
+ u16 *count)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->peer_getappinfo(edev->cdev, info, count);
+}
+
+static int qede_dcbnl_peer_getapptable(struct net_device *netdev,
+ struct dcb_app *app)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->peer_getapptable(edev->cdev, app);
+}
+
+static int qede_dcbnl_cee_peer_getpfc(struct net_device *netdev,
+ struct cee_pfc *pfc)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->cee_peer_getpfc(edev->cdev, pfc);
+}
+
+static int qede_dcbnl_cee_peer_getpg(struct net_device *netdev,
+ struct cee_pg *pg)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->cee_peer_getpg(edev->cdev, pg);
+}
+
+static int qede_dcbnl_ieee_getpfc(struct net_device *netdev,
+ struct ieee_pfc *pfc)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_getpfc(edev->cdev, pfc);
+}
+
+static int qede_dcbnl_ieee_setpfc(struct net_device *netdev,
+ struct ieee_pfc *pfc)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_setpfc(edev->cdev, pfc);
+}
+
+static int qede_dcbnl_ieee_getets(struct net_device *netdev,
+ struct ieee_ets *ets)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_getets(edev->cdev, ets);
+}
+
+static int qede_dcbnl_ieee_setets(struct net_device *netdev,
+ struct ieee_ets *ets)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_setets(edev->cdev, ets);
+}
+
+static int qede_dcbnl_ieee_getapp(struct net_device *netdev,
+ struct dcb_app *app)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_getapp(edev->cdev, app);
+}
+
+static int qede_dcbnl_ieee_setapp(struct net_device *netdev,
+ struct dcb_app *app)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_setapp(edev->cdev, app);
+}
+
+static int qede_dcbnl_ieee_peer_getpfc(struct net_device *netdev,
+ struct ieee_pfc *pfc)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_peer_getpfc(edev->cdev, pfc);
+}
+
+static int qede_dcbnl_ieee_peer_getets(struct net_device *netdev,
+ struct ieee_ets *ets)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_peer_getets(edev->cdev, ets);
+}
+
+static const struct dcbnl_rtnl_ops qede_dcbnl_ops = {
+ .ieee_getpfc = qede_dcbnl_ieee_getpfc,
+ .ieee_setpfc = qede_dcbnl_ieee_setpfc,
+ .ieee_getets = qede_dcbnl_ieee_getets,
+ .ieee_setets = qede_dcbnl_ieee_setets,
+ .ieee_getapp = qede_dcbnl_ieee_getapp,
+ .ieee_setapp = qede_dcbnl_ieee_setapp,
+ .getdcbx = qede_dcbnl_getdcbx,
+ .ieee_peer_getpfc = qede_dcbnl_ieee_peer_getpfc,
+ .ieee_peer_getets = qede_dcbnl_ieee_peer_getets,
+ .getstate = qede_dcbnl_getstate,
+ .setstate = qede_dcbnl_setstate,
+ .getpermhwaddr = qede_dcbnl_getpermhwaddr,
+ .getpgtccfgtx = qede_dcbnl_getpgtccfgtx,
+ .getpgbwgcfgtx = qede_dcbnl_getpgbwgcfgtx,
+ .getpgtccfgrx = qede_dcbnl_getpgtccfgrx,
+ .getpgbwgcfgrx = qede_dcbnl_getpgbwgcfgrx,
+ .getpfccfg = qede_dcbnl_getpfccfg,
+ .setpfccfg = qede_dcbnl_setpfccfg,
+ .getcap = qede_dcbnl_getcap,
+ .getnumtcs = qede_dcbnl_getnumtcs,
+ .getpfcstate = qede_dcbnl_getpfcstate,
+ .getapp = qede_dcbnl_getapp,
+ .getdcbx = qede_dcbnl_getdcbx,
+ .setpgtccfgtx = qede_dcbnl_setpgtccfgtx,
+ .setpgtccfgrx = qede_dcbnl_setpgtccfgrx,
+ .setpgbwgcfgtx = qede_dcbnl_setpgbwgcfgtx,
+ .setpgbwgcfgrx = qede_dcbnl_setpgbwgcfgrx,
+ .setall = qede_dcbnl_setall,
+ .setnumtcs = qede_dcbnl_setnumtcs,
+ .setpfcstate = qede_dcbnl_setpfcstate,
+ .setapp = qede_dcbnl_setapp,
+ .setdcbx = qede_dcbnl_setdcbx,
+ .setfeatcfg = qede_dcbnl_setfeatcfg,
+ .getfeatcfg = qede_dcbnl_getfeatcfg,
+ .peer_getappinfo = qede_dcbnl_peer_getappinfo,
+ .peer_getapptable = qede_dcbnl_peer_getapptable,
+ .cee_peer_getpfc = qede_dcbnl_cee_peer_getpfc,
+ .cee_peer_getpg = qede_dcbnl_cee_peer_getpg,
+};
+
+void qede_set_dcbnl_ops(struct net_device *dev)
+{
+ dev->dcbnl_ops = &qede_dcbnl_ops;
+}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index ad3cae3b7..f8492cac9 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -37,6 +37,7 @@ static const struct {
} qede_rqstats_arr[] = {
QEDE_RQSTAT(rx_hw_errors),
QEDE_RQSTAT(rx_alloc_errors),
+ QEDE_RQSTAT(rx_ip_frags),
};
#define QEDE_NUM_RQSTATS ARRAY_SIZE(qede_rqstats_arr)
@@ -426,6 +427,59 @@ static u32 qede_get_link(struct net_device *dev)
return current_link.link_up;
}
+static int qede_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coal)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u16 rxc, txc;
+
+ memset(coal, 0, sizeof(struct ethtool_coalesce));
+ edev->ops->common->get_coalesce(edev->cdev, &rxc, &txc);
+
+ coal->rx_coalesce_usecs = rxc;
+ coal->tx_coalesce_usecs = txc;
+
+ return 0;
+}
+
+static int qede_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coal)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ int i, rc = 0;
+ u16 rxc, txc;
+ u8 sb_id;
+
+ if (!netif_running(dev)) {
+ DP_INFO(edev, "Interface is down\n");
+ return -EINVAL;
+ }
+
+ if (coal->rx_coalesce_usecs > QED_COALESCE_MAX ||
+ coal->tx_coalesce_usecs > QED_COALESCE_MAX) {
+ DP_INFO(edev,
+ "Can't support requested %s coalesce value [max supported value %d]\n",
+ coal->rx_coalesce_usecs > QED_COALESCE_MAX ? "rx"
+ : "tx",
+ QED_COALESCE_MAX);
+ return -EINVAL;
+ }
+
+ rxc = (u16)coal->rx_coalesce_usecs;
+ txc = (u16)coal->tx_coalesce_usecs;
+ for_each_rss(i) {
+ sb_id = edev->fp_array[i].sb_info->igu_sb_id;
+ rc = edev->ops->common->set_coalesce(edev->cdev, rxc, txc,
+ (u8)i, sb_id);
+ if (rc) {
+ DP_INFO(edev, "Set coalesce error, rc = %d\n", rc);
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
static void qede_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *ering)
{
@@ -910,6 +964,8 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
memset(first_bd, 0, sizeof(*first_bd));
val = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
first_bd->data.bd_flags.bitfields = val;
+ val = skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK;
+ first_bd->data.bitfields |= (val << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT);
/* Map skb linear data for DMA and set in the first BD */
mapping = dma_map_single(&edev->pdev->dev, skb->data,
@@ -1129,6 +1185,48 @@ static void qede_self_test(struct net_device *dev,
}
}
+static int qede_set_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna,
+ const void *data)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u32 val;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ val = *(u32 *)data;
+ if (val < QEDE_MIN_PKT_LEN || val > QEDE_RX_HDR_SIZE) {
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Invalid rx copy break value, range is [%u, %u]",
+ QEDE_MIN_PKT_LEN, QEDE_RX_HDR_SIZE);
+ return -EINVAL;
+ }
+
+ edev->rx_copybreak = *(u32 *)data;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int qede_get_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna, void *data)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ *(u32 *)data = edev->rx_copybreak;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static const struct ethtool_ops qede_ethtool_ops = {
.get_settings = qede_get_settings,
.set_settings = qede_set_settings,
@@ -1137,6 +1235,8 @@ static const struct ethtool_ops qede_ethtool_ops = {
.set_msglevel = qede_set_msglevel,
.nway_reset = qede_nway_reset,
.get_link = qede_get_link,
+ .get_coalesce = qede_get_coalesce,
+ .set_coalesce = qede_set_coalesce,
.get_ringparam = qede_get_ringparam,
.set_ringparam = qede_set_ringparam,
.get_pauseparam = qede_get_pauseparam,
@@ -1155,6 +1255,8 @@ static const struct ethtool_ops qede_ethtool_ops = {
.get_channels = qede_get_channels,
.set_channels = qede_set_channels,
.self_test = qede_self_test,
+ .get_tunable = qede_get_tunable,
+ .set_tunable = qede_set_tunable,
};
static const struct ethtool_ops qede_vf_ethtool_ops = {
@@ -1177,6 +1279,8 @@ static const struct ethtool_ops qede_vf_ethtool_ops = {
.set_rxfh = qede_set_rxfh,
.get_channels = qede_get_channels,
.set_channels = qede_set_channels,
+ .get_tunable = qede_get_tunable,
+ .set_tunable = qede_set_tunable,
};
void qede_set_ethtool_ops(struct net_device *dev)
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index f8e11f953..9544e4c41 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -24,12 +24,7 @@
#include <linux/netdev_features.h>
#include <linux/udp.h>
#include <linux/tcp.h>
-#ifdef CONFIG_QEDE_VXLAN
-#include <net/vxlan.h>
-#endif
-#ifdef CONFIG_QEDE_GENEVE
-#include <net/geneve.h>
-#endif
+#include <net/udp_tunnel.h>
#include <linux/ip.h>
#include <net/ipv6.h>
#include <net/tcp.h>
@@ -490,6 +485,24 @@ static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb,
}
#endif
+static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
+{
+ /* wmb makes sure that the BDs data is updated before updating the
+ * producer, otherwise FW may read old data from the BDs.
+ */
+ wmb();
+ barrier();
+ writel(txq->tx_db.raw, txq->doorbell_addr);
+
+ /* mmiowb is needed to synchronize doorbell writes from more than one
+ * processor. It guarantees that the write arrives to the device before
+ * the queue lock is released and another start_xmit is called (possibly
+ * on another CPU). Without this barrier, the next doorbell can bypass
+ * this doorbell. This is applicable to IA64/Altix systems.
+ */
+ mmiowb();
+}
+
/* Main transmit function */
static
netdev_tx_t qede_start_xmit(struct sk_buff *skb,
@@ -548,6 +561,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
DP_NOTICE(edev, "SKB mapping failed\n");
qede_free_failed_tx_pkt(edev, txq, first_bd, 0, false);
+ qede_update_tx_producer(txq);
return NETDEV_TX_OK;
}
nbd++;
@@ -579,8 +593,6 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
/* Fill the parsing flags & params according to the requested offload */
if (xmit_type & XMIT_L4_CSUM) {
- u16 temp = 1 << ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_SHIFT;
-
/* We don't re-calculate IP checksum as it is already done by
* the upper stack
*/
@@ -590,14 +602,8 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
if (xmit_type & XMIT_ENC) {
first_bd->data.bd_flags.bitfields |=
1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
- } else {
- /* In cases when OS doesn't indicate for inner offloads
- * when packet is tunnelled, we need to override the HW
- * tunnel configuration so that packets are treated as
- * regular non tunnelled packets and no inner offloads
- * are done by the hardware.
- */
- first_bd->data.bitfields |= cpu_to_le16(temp);
+ first_bd->data.bitfields |=
+ 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
}
/* If the packet is IPv6 with extension header, indicate that
@@ -655,6 +661,10 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
tx_data_bd = (struct eth_tx_bd *)third_bd;
data_split = true;
}
+ } else {
+ first_bd->data.bitfields |=
+ (skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
+ ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
}
/* Handle fragmented skb */
@@ -666,6 +676,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
if (rc) {
qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
data_split);
+ qede_update_tx_producer(txq);
return NETDEV_TX_OK;
}
@@ -690,6 +701,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
if (rc) {
qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
data_split);
+ qede_update_tx_producer(txq);
return NETDEV_TX_OK;
}
}
@@ -710,23 +722,14 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
txq->tx_db.data.bd_prod =
cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
- /* wmb makes sure that the BDs data is updated before updating the
- * producer, otherwise FW may read old data from the BDs.
- */
- wmb();
- barrier();
- writel(txq->tx_db.raw, txq->doorbell_addr);
-
- /* mmiowb is needed to synchronize doorbell writes from more than one
- * processor. It guarantees that the write arrives to the device before
- * the queue lock is released and another start_xmit is called (possibly
- * on another CPU). Without this barrier, the next doorbell can bypass
- * this doorbell. This is applicable to IA64/Altix systems.
- */
- mmiowb();
+ if (!skb->xmit_more || netif_xmit_stopped(netdev_txq))
+ qede_update_tx_producer(txq);
if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
< (MAX_SKB_FRAGS + 1))) {
+ if (skb->xmit_more)
+ qede_update_tx_producer(txq);
+
netif_tx_stop_queue(netdev_txq);
DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
"Stop queue was called\n");
@@ -1357,6 +1360,20 @@ static u8 qede_check_csum(u16 flag)
return qede_check_tunn_csum(flag);
}
+static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe,
+ u16 flag)
+{
+ u8 tun_pars_flg = cqe->tunnel_pars_flags.flags;
+
+ if ((tun_pars_flg & (ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK <<
+ ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT)) ||
+ (flag & (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
+ PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT)))
+ return true;
+
+ return false;
+}
+
static int qede_rx_int(struct qede_fastpath *fp, int budget)
{
struct qede_dev *edev = fp->edev;
@@ -1435,6 +1452,12 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
csum_flag = qede_check_csum(parse_flag);
if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
+ if (qede_pkt_is_ip_fragmented(&cqe->fast_path_regular,
+ parse_flag)) {
+ rxq->rx_ip_frags++;
+ goto alloc_skb;
+ }
+
DP_NOTICE(edev,
"CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
sw_comp_cons, parse_flag);
@@ -1443,6 +1466,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
goto next_cqe;
}
+alloc_skb:
skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
if (unlikely(!skb)) {
DP_NOTICE(edev,
@@ -1453,7 +1477,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
}
/* Copy data into SKB */
- if (len + pad <= QEDE_RX_HDR_SIZE) {
+ if (len + pad <= edev->rx_copybreak) {
memcpy(skb_put(skb, len),
page_address(data) + pad +
sw_rx_data->page_offset, len);
@@ -1585,56 +1609,49 @@ next_cqe: /* don't consume bd rx buffer */
static int qede_poll(struct napi_struct *napi, int budget)
{
- int work_done = 0;
struct qede_fastpath *fp = container_of(napi, struct qede_fastpath,
- napi);
+ napi);
struct qede_dev *edev = fp->edev;
+ int rx_work_done = 0;
+ u8 tc;
- while (1) {
- u8 tc;
-
- for (tc = 0; tc < edev->num_tc; tc++)
- if (qede_txq_has_work(&fp->txqs[tc]))
- qede_tx_int(edev, &fp->txqs[tc]);
-
- if (qede_has_rx_work(fp->rxq)) {
- work_done += qede_rx_int(fp, budget - work_done);
-
- /* must not complete if we consumed full budget */
- if (work_done >= budget)
- break;
- }
+ for (tc = 0; tc < edev->num_tc; tc++)
+ if (qede_txq_has_work(&fp->txqs[tc]))
+ qede_tx_int(edev, &fp->txqs[tc]);
+
+ rx_work_done = qede_has_rx_work(fp->rxq) ?
+ qede_rx_int(fp, budget) : 0;
+ if (rx_work_done < budget) {
+ qed_sb_update_sb_idx(fp->sb_info);
+ /* *_has_*_work() reads the status block,
+ * thus we need to ensure that status block indices
+ * have been actually read (qed_sb_update_sb_idx)
+ * prior to this check (*_has_*_work) so that
+ * we won't write the "newer" value of the status block
+ * to HW (if there was a DMA right after
+ * qede_has_rx_work and if there is no rmb, the memory
+ * reading (qed_sb_update_sb_idx) may be postponed
+ * to right before *_ack_sb). In this case there
+ * will never be another interrupt until there is
+ * another update of the status block, while there
+ * is still unhandled work.
+ */
+ rmb();
/* Fall out from the NAPI loop if needed */
- if (!(qede_has_rx_work(fp->rxq) || qede_has_tx_work(fp))) {
- qed_sb_update_sb_idx(fp->sb_info);
- /* *_has_*_work() reads the status block,
- * thus we need to ensure that status block indices
- * have been actually read (qed_sb_update_sb_idx)
- * prior to this check (*_has_*_work) so that
- * we won't write the "newer" value of the status block
- * to HW (if there was a DMA right after
- * qede_has_rx_work and if there is no rmb, the memory
- * reading (qed_sb_update_sb_idx) may be postponed
- * to right before *_ack_sb). In this case there
- * will never be another interrupt until there is
- * another update of the status block, while there
- * is still unhandled work.
- */
- rmb();
-
- if (!(qede_has_rx_work(fp->rxq) ||
- qede_has_tx_work(fp))) {
- napi_complete(napi);
- /* Update and reenable interrupts */
- qed_sb_ack(fp->sb_info, IGU_INT_ENABLE,
- 1 /*update*/);
- break;
- }
+ if (!(qede_has_rx_work(fp->rxq) ||
+ qede_has_tx_work(fp))) {
+ napi_complete(napi);
+
+ /* Update and reenable interrupts */
+ qed_sb_ack(fp->sb_info, IGU_INT_ENABLE,
+ 1 /*update*/);
+ } else {
+ rx_work_done = budget;
}
}
- return work_done;
+ return rx_work_done;
}
static irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie)
@@ -2050,10 +2067,13 @@ static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
}
/* Remove vlan */
- rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL, vid);
- if (rc) {
- DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
- return -EINVAL;
+ if (vlan->configured) {
+ rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL,
+ vid);
+ if (rc) {
+ DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
+ return -EINVAL;
+ }
}
qede_del_vlan_from_list(edev, vlan);
@@ -2116,75 +2136,75 @@ int qede_set_features(struct net_device *dev, netdev_features_t features)
return 0;
}
-#ifdef CONFIG_QEDE_VXLAN
-static void qede_add_vxlan_port(struct net_device *dev,
- sa_family_t sa_family, __be16 port)
+static void qede_udp_tunnel_add(struct net_device *dev,
+ struct udp_tunnel_info *ti)
{
struct qede_dev *edev = netdev_priv(dev);
- u16 t_port = ntohs(port);
+ u16 t_port = ntohs(ti->port);
- if (edev->vxlan_dst_port)
- return;
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ if (edev->vxlan_dst_port)
+ return;
- edev->vxlan_dst_port = t_port;
+ edev->vxlan_dst_port = t_port;
- DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d", t_port);
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d",
+ t_port);
- set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
- schedule_delayed_work(&edev->sp_task, 0);
-}
+ set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ if (edev->geneve_dst_port)
+ return;
-static void qede_del_vxlan_port(struct net_device *dev,
- sa_family_t sa_family, __be16 port)
-{
- struct qede_dev *edev = netdev_priv(dev);
- u16 t_port = ntohs(port);
+ edev->geneve_dst_port = t_port;
- if (t_port != edev->vxlan_dst_port)
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d",
+ t_port);
+ set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
+ break;
+ default:
return;
+ }
- edev->vxlan_dst_port = 0;
-
- DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d", t_port);
-
- set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
schedule_delayed_work(&edev->sp_task, 0);
}
-#endif
-#ifdef CONFIG_QEDE_GENEVE
-static void qede_add_geneve_port(struct net_device *dev,
- sa_family_t sa_family, __be16 port)
+static void qede_udp_tunnel_del(struct net_device *dev,
+ struct udp_tunnel_info *ti)
{
struct qede_dev *edev = netdev_priv(dev);
- u16 t_port = ntohs(port);
+ u16 t_port = ntohs(ti->port);
- if (edev->geneve_dst_port)
- return;
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ if (t_port != edev->vxlan_dst_port)
+ return;
- edev->geneve_dst_port = t_port;
+ edev->vxlan_dst_port = 0;
- DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d", t_port);
- set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
- schedule_delayed_work(&edev->sp_task, 0);
-}
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d",
+ t_port);
-static void qede_del_geneve_port(struct net_device *dev,
- sa_family_t sa_family, __be16 port)
-{
- struct qede_dev *edev = netdev_priv(dev);
- u16 t_port = ntohs(port);
+ set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ if (t_port != edev->geneve_dst_port)
+ return;
- if (t_port != edev->geneve_dst_port)
- return;
+ edev->geneve_dst_port = 0;
- edev->geneve_dst_port = 0;
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d",
+ t_port);
+ set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
+ break;
+ default:
+ return;
+ }
- DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d", t_port);
- set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
schedule_delayed_work(&edev->sp_task, 0);
}
-#endif
static const struct net_device_ops qede_netdev_ops = {
.ndo_open = qede_open,
@@ -2208,14 +2228,8 @@ static const struct net_device_ops qede_netdev_ops = {
.ndo_get_vf_config = qede_get_vf_config,
.ndo_set_vf_rate = qede_set_vf_rate,
#endif
-#ifdef CONFIG_QEDE_VXLAN
- .ndo_add_vxlan_port = qede_add_vxlan_port,
- .ndo_del_vxlan_port = qede_del_vxlan_port,
-#endif
-#ifdef CONFIG_QEDE_GENEVE
- .ndo_add_geneve_port = qede_add_geneve_port,
- .ndo_del_geneve_port = qede_del_geneve_port,
-#endif
+ .ndo_udp_tunnel_add = qede_udp_tunnel_add,
+ .ndo_udp_tunnel_del = qede_udp_tunnel_del,
};
/* -------------------------------------------------------------------------
@@ -2505,8 +2519,14 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
edev->ops->register_ops(cdev, &qede_ll_ops, edev);
+#ifdef CONFIG_DCB
+ if (!IS_VF(edev))
+ qede_set_dcbnl_ops(edev->ndev);
+#endif
+
INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
mutex_init(&edev->qede_lock);
+ edev->rx_copybreak = QEDE_RX_HDR_SIZE;
DP_INFO(edev, "Ending successfully qede probe\n");
@@ -2823,6 +2843,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
rc = edev->ops->common->chain_alloc(edev->cdev,
QED_CHAIN_USE_TO_CONSUME_PRODUCE,
QED_CHAIN_MODE_NEXT_PTR,
+ QED_CHAIN_CNT_TYPE_U16,
RX_RING_SIZE,
sizeof(struct eth_rx_bd),
&rxq->rx_bd_ring);
@@ -2834,6 +2855,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
rc = edev->ops->common->chain_alloc(edev->cdev,
QED_CHAIN_USE_TO_CONSUME,
QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U16,
RX_RING_SIZE,
sizeof(union eth_rx_cqe),
&rxq->rx_comp_ring);
@@ -2885,9 +2907,9 @@ static int qede_alloc_mem_txq(struct qede_dev *edev,
rc = edev->ops->common->chain_alloc(edev->cdev,
QED_CHAIN_USE_TO_CONSUME_PRODUCE,
QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U16,
NUM_TX_BDS_MAX,
- sizeof(*p_virt),
- &txq->tx_pbl);
+ sizeof(*p_virt), &txq->tx_pbl);
if (rc)
goto err;
@@ -3253,6 +3275,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
start.vport_id = 0;
start.drop_ttl0 = true;
start.remove_inner_vlan = vlan_removal_en;
+ start.clear_stats = clear_stats;
rc = edev->ops->vport_start(cdev, &start);
@@ -3578,12 +3601,8 @@ static int qede_open(struct net_device *ndev)
if (rc)
return rc;
-#ifdef CONFIG_QEDE_VXLAN
- vxlan_get_rx_port(ndev);
-#endif
-#ifdef CONFIG_QEDE_GENEVE
- geneve_get_rx_port(ndev);
-#endif
+ udp_tunnel_get_rx_info(ndev);
+
return 0;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 94128abf2..18ebcc710 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -37,8 +37,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 3
-#define _QLCNIC_LINUX_SUBVERSION 64
-#define QLCNIC_LINUX_VERSIONID "5.3.64"
+#define _QLCNIC_LINUX_SUBVERSION 65
+#define QLCNIC_LINUX_VERSIONID "5.3.65"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -1026,10 +1026,8 @@ struct qlcnic_ipaddr {
#define QLCNIC_HAS_PHYS_PORT_ID 0x40000
#define QLCNIC_TSS_RSS 0x80000
-#ifdef CONFIG_QLCNIC_VXLAN
#define QLCNIC_ADD_VXLAN_PORT 0x100000
#define QLCNIC_DEL_VXLAN_PORT 0x200000
-#endif
#define QLCNIC_VLAN_FILTERING 0x800000
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index f9640d5ce..bdbcd2b08 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -2159,7 +2159,6 @@ int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac,
struct qlcnic_cmd_args cmd;
u32 mac_low, mac_high;
- function = 0;
err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS);
if (err)
return err;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 13dc6646a..74105c6d0 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1020,7 +1020,6 @@ static int qlcnic_83xx_idc_check_state_validity(struct qlcnic_adapter *adapter,
return 0;
}
-#ifdef CONFIG_QLCNIC_VXLAN
#define QLC_83XX_ENCAP_TYPE_VXLAN BIT_1
#define QLC_83XX_MATCH_ENCAP_ID BIT_2
#define QLC_83XX_SET_VXLAN_UDP_DPORT BIT_3
@@ -1089,14 +1088,12 @@ static int qlcnic_set_vxlan_parsing(struct qlcnic_adapter *adapter,
return ret;
}
-#endif
static void qlcnic_83xx_periodic_tasks(struct qlcnic_adapter *adapter)
{
if (adapter->fhash.fnum)
qlcnic_prune_lb_filters(adapter);
-#ifdef CONFIG_QLCNIC_VXLAN
if (adapter->flags & QLCNIC_ADD_VXLAN_PORT) {
if (qlcnic_set_vxlan_port(adapter))
return;
@@ -1112,7 +1109,6 @@ static void qlcnic_83xx_periodic_tasks(struct qlcnic_adapter *adapter)
adapter->ahw->vxlan_port = 0;
adapter->flags &= ~QLCNIC_DEL_VXLAN_PORT;
}
-#endif
}
/**
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
index 9777e5713..f4aa6331b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
@@ -45,7 +45,6 @@ struct qlcnic_dcb {
static inline void qlcnic_clear_dcb_ops(struct qlcnic_dcb *dcb)
{
kfree(dcb);
- dcb = NULL;
}
static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 87c642d3b..fedd73667 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -102,7 +102,6 @@
#define QLCNIC_RESPONSE_DESC 0x05
#define QLCNIC_LRO_DESC 0x12
-#define QLCNIC_TX_POLL_BUDGET 128
#define QLCNIC_TCP_HDR_SIZE 20
#define QLCNIC_TCP_TS_OPTION_SIZE 12
#define QLCNIC_FETCH_RING_ID(handle) ((handle) >> 63)
@@ -2008,7 +2007,6 @@ static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget)
struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_adapter *adapter;
- budget = QLCNIC_TX_POLL_BUDGET;
tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
adapter = tx_ring->adapter;
work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 9fedc8f62..6d03721a6 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -16,9 +16,7 @@
#include <linux/aer.h>
#include <linux/log2.h>
#include <linux/pci.h>
-#ifdef CONFIG_QLCNIC_VXLAN
#include <net/vxlan.h>
-#endif
#include "qlcnic.h"
#include "qlcnic_sriov.h"
@@ -474,13 +472,15 @@ static int qlcnic_get_phys_port_id(struct net_device *netdev,
return 0;
}
-#ifdef CONFIG_QLCNIC_VXLAN
static void qlcnic_add_vxlan_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
+ struct udp_tunnel_info *ti)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ return;
+
/* Adapter supports only one VXLAN port. Use very first port
* for enabling offload
*/
@@ -488,23 +488,26 @@ static void qlcnic_add_vxlan_port(struct net_device *netdev,
return;
if (!ahw->vxlan_port_count) {
ahw->vxlan_port_count = 1;
- ahw->vxlan_port = ntohs(port);
+ ahw->vxlan_port = ntohs(ti->port);
adapter->flags |= QLCNIC_ADD_VXLAN_PORT;
return;
}
- if (ahw->vxlan_port == ntohs(port))
+ if (ahw->vxlan_port == ntohs(ti->port))
ahw->vxlan_port_count++;
}
static void qlcnic_del_vxlan_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
+ struct udp_tunnel_info *ti)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ return;
+
if (!qlcnic_encap_rx_offload(adapter) || !ahw->vxlan_port_count ||
- (ahw->vxlan_port != ntohs(port)))
+ (ahw->vxlan_port != ntohs(ti->port)))
return;
ahw->vxlan_port_count--;
@@ -519,7 +522,6 @@ static netdev_features_t qlcnic_features_check(struct sk_buff *skb,
features = vlan_features_check(skb, features);
return vxlan_features_check(skb, features);
}
-#endif
static const struct net_device_ops qlcnic_netdev_ops = {
.ndo_open = qlcnic_open,
@@ -539,11 +541,9 @@ static const struct net_device_ops qlcnic_netdev_ops = {
.ndo_fdb_del = qlcnic_fdb_del,
.ndo_fdb_dump = qlcnic_fdb_dump,
.ndo_get_phys_port_id = qlcnic_get_phys_port_id,
-#ifdef CONFIG_QLCNIC_VXLAN
- .ndo_add_vxlan_port = qlcnic_add_vxlan_port,
- .ndo_del_vxlan_port = qlcnic_del_vxlan_port,
+ .ndo_udp_tunnel_add = qlcnic_add_vxlan_port,
+ .ndo_udp_tunnel_del = qlcnic_del_vxlan_port,
.ndo_features_check = qlcnic_features_check,
-#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = qlcnic_poll_controller,
#endif
@@ -2015,10 +2015,8 @@ qlcnic_attach(struct qlcnic_adapter *adapter)
qlcnic_create_sysfs_entries(adapter);
-#ifdef CONFIG_QLCNIC_VXLAN
if (qlcnic_encap_rx_offload(adapter))
- vxlan_get_rx_port(netdev);
-#endif
+ udp_tunnel_get_rx_info(netdev);
adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
return 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
index 017d8c2c8..24061b9b9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
@@ -156,10 +156,8 @@ struct qlcnic_vf_info {
spinlock_t vlan_list_lock; /* Lock for VLAN list */
};
-struct qlcnic_async_work_list {
+struct qlcnic_async_cmd {
struct list_head list;
- struct work_struct work;
- void *ptr;
struct qlcnic_cmd_args *cmd;
};
@@ -168,7 +166,10 @@ struct qlcnic_back_channel {
struct workqueue_struct *bc_trans_wq;
struct workqueue_struct *bc_async_wq;
struct workqueue_struct *bc_flr_wq;
- struct list_head async_list;
+ struct qlcnic_adapter *adapter;
+ struct list_head async_cmd_list;
+ struct work_struct vf_async_work;
+ spinlock_t queue_lock; /* async_cmd_list queue lock */
};
struct qlcnic_sriov {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 7327b729b..d7107055e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -29,6 +29,7 @@
#define QLC_83XX_VF_RESET_FAIL_THRESH 8
#define QLC_BC_CMD_MAX_RETRY_CNT 5
+static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work);
static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *);
static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32);
static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *);
@@ -177,7 +178,10 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
}
bc->bc_async_wq = wq;
- INIT_LIST_HEAD(&bc->async_list);
+ INIT_LIST_HEAD(&bc->async_cmd_list);
+ INIT_WORK(&bc->vf_async_work, qlcnic_sriov_handle_async_issue_cmd);
+ spin_lock_init(&bc->queue_lock);
+ bc->adapter = adapter;
for (i = 0; i < num_vfs; i++) {
vf = &sriov->vf_info[i];
@@ -1517,17 +1521,21 @@ static void qlcnic_vf_add_mc_list(struct net_device *netdev, const u8 *mac,
void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
{
- struct list_head *head = &bc->async_list;
- struct qlcnic_async_work_list *entry;
+ struct list_head *head = &bc->async_cmd_list;
+ struct qlcnic_async_cmd *entry;
flush_workqueue(bc->bc_async_wq);
+ cancel_work_sync(&bc->vf_async_work);
+
+ spin_lock(&bc->queue_lock);
while (!list_empty(head)) {
- entry = list_entry(head->next, struct qlcnic_async_work_list,
+ entry = list_entry(head->next, struct qlcnic_async_cmd,
list);
- cancel_work_sync(&entry->work);
list_del(&entry->list);
+ kfree(entry->cmd);
kfree(entry);
}
+ spin_unlock(&bc->queue_lock);
}
void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
@@ -1587,57 +1595,64 @@ void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work)
{
- struct qlcnic_async_work_list *entry;
- struct qlcnic_adapter *adapter;
+ struct qlcnic_async_cmd *entry, *tmp;
+ struct qlcnic_back_channel *bc;
struct qlcnic_cmd_args *cmd;
+ struct list_head *head;
+ LIST_HEAD(del_list);
+
+ bc = container_of(work, struct qlcnic_back_channel, vf_async_work);
+ head = &bc->async_cmd_list;
+
+ spin_lock(&bc->queue_lock);
+ list_splice_init(head, &del_list);
+ spin_unlock(&bc->queue_lock);
+
+ list_for_each_entry_safe(entry, tmp, &del_list, list) {
+ list_del(&entry->list);
+ cmd = entry->cmd;
+ __qlcnic_sriov_issue_cmd(bc->adapter, cmd);
+ kfree(entry);
+ }
+
+ if (!list_empty(head))
+ queue_work(bc->bc_async_wq, &bc->vf_async_work);
- entry = container_of(work, struct qlcnic_async_work_list, work);
- adapter = entry->ptr;
- cmd = entry->cmd;
- __qlcnic_sriov_issue_cmd(adapter, cmd);
return;
}
-static struct qlcnic_async_work_list *
-qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc)
+static struct qlcnic_async_cmd *
+qlcnic_sriov_alloc_async_cmd(struct qlcnic_back_channel *bc,
+ struct qlcnic_cmd_args *cmd)
{
- struct list_head *node;
- struct qlcnic_async_work_list *entry = NULL;
- u8 empty = 0;
+ struct qlcnic_async_cmd *entry = NULL;
- list_for_each(node, &bc->async_list) {
- entry = list_entry(node, struct qlcnic_async_work_list, list);
- if (!work_pending(&entry->work)) {
- empty = 1;
- break;
- }
- }
+ entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!entry)
+ return NULL;
- if (!empty) {
- entry = kzalloc(sizeof(struct qlcnic_async_work_list),
- GFP_ATOMIC);
- if (entry == NULL)
- return NULL;
- list_add_tail(&entry->list, &bc->async_list);
- }
+ entry->cmd = cmd;
+
+ spin_lock(&bc->queue_lock);
+ list_add_tail(&entry->list, &bc->async_cmd_list);
+ spin_unlock(&bc->queue_lock);
return entry;
}
static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc,
- work_func_t func, void *data,
struct qlcnic_cmd_args *cmd)
{
- struct qlcnic_async_work_list *entry = NULL;
+ struct qlcnic_async_cmd *entry = NULL;
- entry = qlcnic_sriov_get_free_node_async_work(bc);
- if (!entry)
+ entry = qlcnic_sriov_alloc_async_cmd(bc, cmd);
+ if (!entry) {
+ qlcnic_free_mbx_args(cmd);
+ kfree(cmd);
return;
+ }
- entry->ptr = data;
- entry->cmd = cmd;
- INIT_WORK(&entry->work, func);
- queue_work(bc->bc_async_wq, &entry->work);
+ queue_work(bc->bc_async_wq, &bc->vf_async_work);
}
static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter,
@@ -1649,8 +1664,8 @@ static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter,
if (adapter->need_fw_reset)
return -EIO;
- qlcnic_sriov_schedule_async_cmd(bc, qlcnic_sriov_handle_async_issue_cmd,
- adapter, cmd);
+ qlcnic_sriov_schedule_async_cmd(bc, cmd);
+
return 0;
}
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index fd5d1c93b..fd4a8e473 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -1892,7 +1892,6 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
skb->len += length;
skb->data_len += length;
skb->truesize += length;
- length -= length;
ql_update_mac_hdr_len(qdev, ib_mac_rsp,
lbq_desc->p.pg_chunk.va,
&hlen);
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 6b541e57c..cb29ee24c 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -4,7 +4,7 @@
* Copyright (C) 2004 Sten Wang <sten.wang@rdc.com.tw>
* Copyright (C) 2007
* Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us>
- * Copyright (C) 2007-2012 Florian Fainelli <florian@openwrt.org>
+ * Copyright (C) 2007-2012 Florian Fainelli <f.fainelli@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -48,8 +48,8 @@
#include <asm/processor.h>
#define DRV_NAME "r6040"
-#define DRV_VERSION "0.28"
-#define DRV_RELDATE "07Oct2011"
+#define DRV_VERSION "0.29"
+#define DRV_RELDATE "04Jul2016"
/* Time in jiffies before concluding the transmitter is hung. */
#define TX_TIMEOUT (6000 * HZ / 1000)
@@ -162,7 +162,7 @@
MODULE_AUTHOR("Sten Wang <sten.wang@rdc.com.tw>,"
"Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us>,"
- "Florian Fainelli <florian@openwrt.org>");
+ "Florian Fainelli <f.fainelli@gmail.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("RDC R6040 NAPI PCI FastEthernet driver");
MODULE_VERSION(DRV_VERSION " " DRV_RELDATE);
@@ -200,7 +200,6 @@ struct r6040_private {
struct mii_bus *mii_bus;
struct napi_struct napi;
void __iomem *base;
- struct phy_device *phydev;
int old_link;
int old_duplex;
};
@@ -474,7 +473,7 @@ static void r6040_down(struct net_device *dev)
iowrite16(adrp[1], ioaddr + MID_0M);
iowrite16(adrp[2], ioaddr + MID_0H);
- phy_stop(lp->phydev);
+ phy_stop(dev->phydev);
}
static int r6040_close(struct net_device *dev)
@@ -515,12 +514,10 @@ static int r6040_close(struct net_device *dev)
static int r6040_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct r6040_private *lp = netdev_priv(dev);
-
- if (!lp->phydev)
+ if (!dev->phydev)
return -EINVAL;
- return phy_mii_ioctl(lp->phydev, rq, cmd);
+ return phy_mii_ioctl(dev->phydev, rq, cmd);
}
static int r6040_rx(struct net_device *dev, int limit)
@@ -617,10 +614,15 @@ static void r6040_tx(struct net_device *dev)
if (descptr->status & DSC_OWNER_MAC)
break; /* Not complete */
skb_ptr = descptr->skb_ptr;
+
+ /* Statistic Counter */
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb_ptr->len;
+
pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf),
skb_ptr->len, PCI_DMA_TODEVICE);
/* Free buffer */
- dev_kfree_skb_irq(skb_ptr);
+ dev_kfree_skb(skb_ptr);
descptr->skb_ptr = NULL;
/* To next descriptor */
descptr = descptr->vndescp;
@@ -641,12 +643,15 @@ static int r6040_poll(struct napi_struct *napi, int budget)
void __iomem *ioaddr = priv->base;
int work_done;
+ r6040_tx(dev);
+
work_done = r6040_rx(dev, budget);
if (work_done < budget) {
- napi_complete(napi);
- /* Enable RX interrupt */
- iowrite16(ioread16(ioaddr + MIER) | RX_INTS, ioaddr + MIER);
+ napi_complete_done(napi, work_done);
+ /* Enable RX/TX interrupt */
+ iowrite16(ioread16(ioaddr + MIER) | RX_INTS | TX_INTS,
+ ioaddr + MIER);
}
return work_done;
}
@@ -673,7 +678,7 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id)
}
/* RX interrupt request */
- if (status & RX_INTS) {
+ if (status & (RX_INTS | TX_INTS)) {
if (status & RX_NO_DESC) {
/* RX descriptor unavailable */
dev->stats.rx_dropped++;
@@ -684,15 +689,11 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id)
if (likely(napi_schedule_prep(&lp->napi))) {
/* Mask off RX interrupt */
- misr &= ~RX_INTS;
- __napi_schedule(&lp->napi);
+ misr &= ~(RX_INTS | TX_INTS);
+ __napi_schedule_irqoff(&lp->napi);
}
}
- /* TX interrupt request */
- if (status & TX_INTS)
- r6040_tx(dev);
-
/* Restore RDC MAC interrupt */
iowrite16(misr, ioaddr + MIER);
@@ -732,7 +733,7 @@ static int r6040_up(struct net_device *dev)
/* Initialize all MAC registers */
r6040_init_mac_regs(dev);
- phy_start(lp->phydev);
+ phy_start(dev->phydev);
return 0;
}
@@ -813,6 +814,9 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
void __iomem *ioaddr = lp->base;
unsigned long flags;
+ if (skb_put_padto(skb, ETH_ZLEN) < 0)
+ return NETDEV_TX_OK;
+
/* Critical Section */
spin_lock_irqsave(&lp->lock, flags);
@@ -824,17 +828,10 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
- /* Statistic Counter */
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
/* Set TX descriptor & Transmit it */
lp->tx_free_desc--;
descptr = lp->tx_insert_ptr;
- if (skb->len < ETH_ZLEN)
- descptr->len = ETH_ZLEN;
- else
- descptr->len = skb->len;
-
+ descptr->len = skb->len;
descptr->skb_ptr = skb;
descptr->buf = cpu_to_le32(pci_map_single(lp->pdev,
skb->data, skb->len, PCI_DMA_TODEVICE));
@@ -843,7 +840,8 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
skb_tx_timestamp(skb);
/* Trigger the MAC to check the TX descriptor */
- iowrite16(TM2TX, ioaddr + MTPR);
+ if (!skb->xmit_more || netif_queue_stopped(dev))
+ iowrite16(TM2TX, ioaddr + MTPR);
lp->tx_insert_ptr = descptr->vndescp;
/* If no tx resource, stop */
@@ -957,26 +955,12 @@ static void netdev_get_drvinfo(struct net_device *dev,
strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
}
-static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct r6040_private *rp = netdev_priv(dev);
-
- return phy_ethtool_gset(rp->phydev, cmd);
-}
-
-static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct r6040_private *rp = netdev_priv(dev);
-
- return phy_ethtool_sset(rp->phydev, cmd);
-}
-
static const struct ethtool_ops netdev_ethtool_ops = {
.get_drvinfo = netdev_get_drvinfo,
- .get_settings = netdev_get_settings,
- .set_settings = netdev_set_settings,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static const struct net_device_ops r6040_netdev_ops = {
@@ -998,7 +982,7 @@ static const struct net_device_ops r6040_netdev_ops = {
static void r6040_adjust_link(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
- struct phy_device *phydev = lp->phydev;
+ struct phy_device *phydev = dev->phydev;
int status_changed = 0;
void __iomem *ioaddr = lp->base;
@@ -1018,14 +1002,8 @@ static void r6040_adjust_link(struct net_device *dev)
lp->old_duplex = phydev->duplex;
}
- if (status_changed) {
- pr_info("%s: link %s", dev->name, phydev->link ?
- "UP" : "DOWN");
- if (phydev->link)
- pr_cont(" - %d/%s", phydev->speed,
- DUPLEX_FULL == phydev->duplex ? "full" : "half");
- pr_cont("\n");
- }
+ if (status_changed)
+ phy_print_status(phydev);
}
static int r6040_mii_probe(struct net_device *dev)
@@ -1057,7 +1035,6 @@ static int r6040_mii_probe(struct net_device *dev)
| SUPPORTED_TP);
phydev->advertising = phydev->supported;
- lp->phydev = phydev;
lp->old_link = 0;
lp->old_duplex = -1;
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index deae10d74..5297bf772 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -467,8 +467,8 @@ static int cp_rx_poll(struct napi_struct *napi, int budget)
unsigned int rx_tail = cp->rx_tail;
int rx;
-rx_status_loop:
rx = 0;
+rx_status_loop:
cpw16(IntrStatus, cp_rx_intr_mask);
while (rx < budget) {
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index ef668d300..da4c2d8a4 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -1667,6 +1667,10 @@ static void rtl8139_tx_timeout_task (struct work_struct *work)
int i;
u8 tmp8;
+ napi_disable(&tp->napi);
+ netif_stop_queue(dev);
+ synchronize_sched();
+
netdev_dbg(dev, "Transmit timeout, status %02x %04x %04x media %02x\n",
RTL_R8(ChipCmd), RTL_R16(IntrStatus),
RTL_R16(IntrMask), RTL_R8(MediaStatus));
@@ -1696,10 +1700,10 @@ static void rtl8139_tx_timeout_task (struct work_struct *work)
spin_unlock_irq(&tp->lock);
/* ...and finally, reset everything */
- if (netif_running(dev)) {
- rtl8139_hw_start (dev);
- netif_wake_queue (dev);
- }
+ napi_enable(&tp->napi);
+ rtl8139_hw_start(dev);
+ netif_wake_queue(dev);
+
spin_unlock_bh(&tp->rx_lock);
}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index e4030c3d7..c071624e7 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -1731,13 +1731,21 @@ static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct rtl8169_private *tp = netdev_priv(dev);
+ struct device *d = &tp->pci_dev->dev;
+
+ pm_runtime_get_noresume(d);
rtl_lock_work(tp);
wol->supported = WAKE_ANY;
- wol->wolopts = __rtl8169_get_wol(tp);
+ if (pm_runtime_active(d))
+ wol->wolopts = __rtl8169_get_wol(tp);
+ else
+ wol->wolopts = tp->saved_wolopts;
rtl_unlock_work(tp);
+
+ pm_runtime_put_noidle(d);
}
static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
@@ -1827,6 +1835,9 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct rtl8169_private *tp = netdev_priv(dev);
+ struct device *d = &tp->pci_dev->dev;
+
+ pm_runtime_get_noresume(d);
rtl_lock_work(tp);
@@ -1834,12 +1845,17 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
tp->features |= RTL_FEATURE_WOL;
else
tp->features &= ~RTL_FEATURE_WOL;
- __rtl8169_set_wol(tp, wol->wolopts);
+ if (pm_runtime_active(d))
+ __rtl8169_set_wol(tp, wol->wolopts);
+ else
+ tp->saved_wolopts = wol->wolopts;
rtl_unlock_work(tp);
device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
+ pm_runtime_put_noidle(d);
+
return 0;
}
@@ -2274,11 +2290,17 @@ static void rtl8169_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct rtl8169_private *tp = netdev_priv(dev);
+ struct device *d = &tp->pci_dev->dev;
struct rtl8169_counters *counters = tp->counters;
ASSERT_RTNL();
- rtl8169_update_counters(dev);
+ pm_runtime_get_noresume(d);
+
+ if (pm_runtime_active(d))
+ rtl8169_update_counters(dev);
+
+ pm_runtime_put_noidle(d);
data[0] = le64_to_cpu(counters->tx_packets);
data[1] = le64_to_cpu(counters->rx_packets);
@@ -4440,6 +4462,7 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
static int rtl_set_mac_address(struct net_device *dev, void *p)
{
struct rtl8169_private *tp = netdev_priv(dev);
+ struct device *d = &tp->pci_dev->dev;
struct sockaddr *addr = p;
if (!is_valid_ether_addr(addr->sa_data))
@@ -4447,7 +4470,12 @@ static int rtl_set_mac_address(struct net_device *dev, void *p)
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
- rtl_rar_set(tp, dev->dev_addr);
+ pm_runtime_get_noresume(d);
+
+ if (pm_runtime_active(d))
+ rtl_rar_set(tp, dev->dev_addr);
+
+ pm_runtime_put_noidle(d);
return 0;
}
@@ -7850,6 +7878,7 @@ static int rtl8169_runtime_resume(struct device *device)
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
+ rtl_rar_set(tp, dev->dev_addr);
if (!tp->TxDescArray)
return 0;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 867caf6e7..1e1cc0fad 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -362,8 +362,6 @@ static void ravb_emac_init(struct net_device *ndev)
ravb_write(ndev,
(ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
- ravb_write(ndev, 1, MPR);
-
/* E-MAC status register clear */
ravb_write(ndev, ECSR_ICD | ECSR_MPD, ECSR);
@@ -402,7 +400,8 @@ static int ravb_dmac_init(struct net_device *ndev)
#endif
/* Set AVB RX */
- ravb_write(ndev, RCR_EFFS | RCR_ENCF | RCR_ETS0 | 0x18000000, RCR);
+ ravb_write(ndev,
+ RCR_EFFS | RCR_ENCF | RCR_ETS0 | RCR_ESF | 0x18000000, RCR);
/* Set FIFO size */
ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00222200, TGC);
@@ -1006,6 +1005,7 @@ static int ravb_phy_init(struct net_device *ndev)
}
phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0,
priv->phy_interface);
+ of_node_put(pn);
if (!phydev) {
netdev_err(ndev, "failed to connect PHY\n");
return -ENOENT;
@@ -1909,7 +1909,6 @@ static int ravb_probe(struct platform_device *pdev)
/* The Ether-specific entries in the device structure. */
ndev->base_addr = res->start;
- ndev->dma = -1;
chip_id = (enum ravb_chip_id)of_device_get_match_data(&pdev->dev);
@@ -2111,8 +2110,7 @@ static int ravb_runtime_nop(struct device *dev)
}
static const struct dev_pm_ops ravb_dev_pm_ops = {
- .runtime_suspend = ravb_runtime_nop,
- .runtime_resume = ravb_runtime_nop,
+ SET_RUNTIME_PM_OPS(ravb_runtime_nop, ravb_runtime_nop, NULL)
};
#define RAVB_PM_OPS (&ravb_dev_pm_ops)
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 04cd39f66..054e795df 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -201,9 +201,14 @@ static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
[ARSTR] = 0x0000,
[TSU_CTRST] = 0x0004,
+ [TSU_FWSLC] = 0x0038,
[TSU_VTAG0] = 0x0058,
[TSU_ADSBSY] = 0x0060,
[TSU_TEN] = 0x0064,
+ [TSU_POST1] = 0x0070,
+ [TSU_POST2] = 0x0074,
+ [TSU_POST3] = 0x0078,
+ [TSU_POST4] = 0x007c,
[TSU_ADRH0] = 0x0100,
[TXNLCR0] = 0x0080,
@@ -1780,6 +1785,7 @@ static int sh_eth_phy_init(struct net_device *ndev)
sh_eth_adjust_link, 0,
mdp->phy_interface);
+ of_node_put(pn);
if (!phydev)
phydev = ERR_PTR(-ENOENT);
} else {
@@ -2785,6 +2791,8 @@ static void sh_eth_tsu_init(struct sh_eth_private *mdp)
{
if (sh_eth_is_rz_fast_ether(mdp)) {
sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
+ sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL,
+ TSU_FWSLC); /* Enable POST registers */
return;
}
@@ -2996,7 +3004,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
if (devno < 0)
devno = 0;
- ndev->dma = -1;
ret = platform_get_irq(pdev, 0);
if (ret < 0)
goto out_release;
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 28b775e5a..f0b09b05e 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -1996,7 +1996,8 @@ static int rocker_port_change_proto_down(struct net_device *dev,
return 0;
}
-static void rocker_port_neigh_destroy(struct neighbour *n)
+static void rocker_port_neigh_destroy(struct net_device *dev,
+ struct neighbour *n)
{
struct rocker_port *rocker_port = netdev_priv(n->dev);
int err;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
index 45019649b..5cb51b609 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
@@ -475,7 +475,6 @@ struct sxgbe_priv_data {
int rxcsum_insertion;
spinlock_t stats_lock; /* lock for tx/rx statatics */
- struct phy_device *phydev;
int oldlink;
int speed;
int oldduplex;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
index c0981ae45..542b67d43 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
@@ -147,7 +147,7 @@ static int sxgbe_get_eee(struct net_device *dev,
edata->eee_active = priv->eee_active;
edata->tx_lpi_timer = priv->tx_lpi_timer;
- return phy_ethtool_get_eee(priv->phydev, edata);
+ return phy_ethtool_get_eee(dev->phydev, edata);
}
static int sxgbe_set_eee(struct net_device *dev,
@@ -172,7 +172,7 @@ static int sxgbe_set_eee(struct net_device *dev,
priv->tx_lpi_timer = edata->tx_lpi_timer;
}
- return phy_ethtool_set_eee(priv->phydev, edata);
+ return phy_ethtool_set_eee(dev->phydev, edata);
}
static void sxgbe_getdrvinfo(struct net_device *dev,
@@ -182,27 +182,6 @@ static void sxgbe_getdrvinfo(struct net_device *dev,
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
}
-static int sxgbe_getsettings(struct net_device *dev,
- struct ethtool_cmd *cmd)
-{
- struct sxgbe_priv_data *priv = netdev_priv(dev);
-
- if (priv->phydev)
- return phy_ethtool_gset(priv->phydev, cmd);
-
- return -EOPNOTSUPP;
-}
-
-static int sxgbe_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct sxgbe_priv_data *priv = netdev_priv(dev);
-
- if (priv->phydev)
- return phy_ethtool_sset(priv->phydev, cmd);
-
- return -EOPNOTSUPP;
-}
-
static u32 sxgbe_getmsglevel(struct net_device *dev)
{
struct sxgbe_priv_data *priv = netdev_priv(dev);
@@ -255,7 +234,7 @@ static void sxgbe_get_ethtool_stats(struct net_device *dev,
char *p;
if (priv->eee_enabled) {
- int val = phy_get_eee_err(priv->phydev);
+ int val = phy_get_eee_err(dev->phydev);
if (val)
priv->xstats.eee_wakeup_error_n = val;
@@ -499,8 +478,6 @@ static int sxgbe_get_regs_len(struct net_device *dev)
static const struct ethtool_ops sxgbe_ethtool_ops = {
.get_drvinfo = sxgbe_getdrvinfo,
- .get_settings = sxgbe_getsettings,
- .set_settings = sxgbe_setsettings,
.get_msglevel = sxgbe_getmsglevel,
.set_msglevel = sxgbe_setmsglevel,
.get_link = ethtool_op_get_link,
@@ -516,6 +493,8 @@ static const struct ethtool_ops sxgbe_ethtool_ops = {
.get_regs_len = sxgbe_get_regs_len,
.get_eee = sxgbe_get_eee,
.set_eee = sxgbe_set_eee,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
void sxgbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 413ea14ab..ea44a2456 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -124,12 +124,13 @@ static void sxgbe_eee_ctrl_timer(unsigned long arg)
*/
bool sxgbe_eee_init(struct sxgbe_priv_data * const priv)
{
+ struct net_device *ndev = priv->dev;
bool ret = false;
/* MAC core supports the EEE feature. */
if (priv->hw_cap.eee) {
/* Check if the PHY supports EEE */
- if (phy_init_eee(priv->phydev, 1))
+ if (phy_init_eee(ndev->phydev, 1))
return false;
priv->eee_active = 1;
@@ -152,12 +153,14 @@ bool sxgbe_eee_init(struct sxgbe_priv_data * const priv)
static void sxgbe_eee_adjust(const struct sxgbe_priv_data *priv)
{
+ struct net_device *ndev = priv->dev;
+
/* When the EEE has been already initialised we have to
* modify the PLS bit in the LPI ctrl & status reg according
* to the PHY link status. For this reason.
*/
if (priv->eee_enabled)
- priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link);
+ priv->hw->mac->set_eee_pls(priv->ioaddr, ndev->phydev->link);
}
/**
@@ -203,7 +206,7 @@ static inline u32 sxgbe_tx_avail(struct sxgbe_tx_queue *queue, int tx_qsize)
static void sxgbe_adjust_link(struct net_device *dev)
{
struct sxgbe_priv_data *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phydev;
+ struct phy_device *phydev = dev->phydev;
u8 new_state = 0;
u8 speed = 0xff;
@@ -306,9 +309,6 @@ static int sxgbe_init_phy(struct net_device *ndev)
netdev_dbg(ndev, "%s: attached to PHY (UID 0x%x) Link = %d\n",
__func__, phydev->phy_id, phydev->link);
- /* save phy device in private structure */
- priv->phydev = phydev;
-
return 0;
}
@@ -1173,8 +1173,8 @@ static int sxgbe_open(struct net_device *dev)
priv->hw->dma->start_tx(priv->ioaddr, SXGBE_TX_QUEUES);
priv->hw->dma->start_rx(priv->ioaddr, SXGBE_RX_QUEUES);
- if (priv->phydev)
- phy_start(priv->phydev);
+ if (dev->phydev)
+ phy_start(dev->phydev);
/* initialise TX coalesce parameters */
sxgbe_tx_init_coalesce(priv);
@@ -1194,8 +1194,8 @@ static int sxgbe_open(struct net_device *dev)
init_error:
free_dma_desc_resources(priv);
- if (priv->phydev)
- phy_disconnect(priv->phydev);
+ if (dev->phydev)
+ phy_disconnect(dev->phydev);
phy_error:
clk_disable_unprepare(priv->sxgbe_clk);
@@ -1216,10 +1216,9 @@ static int sxgbe_release(struct net_device *dev)
del_timer_sync(&priv->eee_ctrl_timer);
/* Stop and disconnect the PHY */
- if (priv->phydev) {
- phy_stop(priv->phydev);
- phy_disconnect(priv->phydev);
- priv->phydev = NULL;
+ if (dev->phydev) {
+ phy_stop(dev->phydev);
+ phy_disconnect(dev->phydev);
}
netif_tx_stop_all_queues(dev);
@@ -1969,7 +1968,6 @@ static void sxgbe_poll_controller(struct net_device *dev)
*/
static int sxgbe_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct sxgbe_priv_data *priv = netdev_priv(dev);
int ret = -EOPNOTSUPP;
if (!netif_running(dev))
@@ -1979,9 +1977,9 @@ static int sxgbe_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
case SIOCGMIIPHY:
case SIOCGMIIREG:
case SIOCSMIIREG:
- if (!priv->phydev)
+ if (!dev->phydev)
return -EINVAL;
- ret = phy_mii_ioctl(priv->phydev, rq, cmd);
+ ret = phy_mii_ioctl(dev->phydev, rq, cmd);
break;
default:
break;
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 1f3091274..e00a669e9 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -50,14 +50,34 @@ enum {
#define HUNT_FILTER_TBL_ROWS 8192
#define EFX_EF10_FILTER_ID_INVALID 0xffff
+
+#define EFX_EF10_FILTER_DEV_UC_MAX 32
+#define EFX_EF10_FILTER_DEV_MC_MAX 256
+
+/* VLAN list entry */
+struct efx_ef10_vlan {
+ struct list_head list;
+ u16 vid;
+};
+
+/* Per-VLAN filters information */
+struct efx_ef10_filter_vlan {
+ struct list_head list;
+ u16 vid;
+ u16 uc[EFX_EF10_FILTER_DEV_UC_MAX];
+ u16 mc[EFX_EF10_FILTER_DEV_MC_MAX];
+ u16 ucdef;
+ u16 bcast;
+ u16 mcdef;
+};
+
struct efx_ef10_dev_addr {
u8 addr[ETH_ALEN];
- u16 id;
};
struct efx_ef10_filter_table {
-/* The RX match field masks supported by this fw & hw, in order of priority */
- enum efx_filter_match_flags rx_match_flags[
+/* The MCDI match masks supported by this fw & hw, in order of priority */
+ u32 rx_match_mcdi_flags[
MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM];
unsigned int rx_match_count;
@@ -73,16 +93,16 @@ struct efx_ef10_filter_table {
} *entry;
wait_queue_head_t waitq;
/* Shadow of net_device address lists, guarded by mac_lock */
-#define EFX_EF10_FILTER_DEV_UC_MAX 32
-#define EFX_EF10_FILTER_DEV_MC_MAX 256
struct efx_ef10_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX];
struct efx_ef10_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
int dev_uc_count;
int dev_mc_count;
-/* Indices (like efx_ef10_dev_addr.id) for promisc/allmulti filters */
- u16 ucdef_id;
- u16 bcast_id;
- u16 mcdef_id;
+ bool uc_promisc;
+ bool mc_promisc;
+/* Whether in multicast promiscuous mode when last changed */
+ bool mc_promisc_last;
+ bool vlan_filter;
+ struct list_head vlan_list;
};
/* An arbitrary search limit for the software hash table */
@@ -90,6 +110,10 @@ struct efx_ef10_filter_table {
static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
static void efx_ef10_filter_table_remove(struct efx_nic *efx);
+static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid);
+static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx,
+ struct efx_ef10_filter_vlan *vlan);
+static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid);
static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
{
@@ -275,6 +299,131 @@ static ssize_t efx_ef10_show_primary_flag(struct device *dev,
? 1 : 0);
}
+static struct efx_ef10_vlan *efx_ef10_find_vlan(struct efx_nic *efx, u16 vid)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_ef10_vlan *vlan;
+
+ WARN_ON(!mutex_is_locked(&nic_data->vlan_lock));
+
+ list_for_each_entry(vlan, &nic_data->vlan_list, list) {
+ if (vlan->vid == vid)
+ return vlan;
+ }
+
+ return NULL;
+}
+
+static int efx_ef10_add_vlan(struct efx_nic *efx, u16 vid)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_ef10_vlan *vlan;
+ int rc;
+
+ mutex_lock(&nic_data->vlan_lock);
+
+ vlan = efx_ef10_find_vlan(efx, vid);
+ if (vlan) {
+ /* We add VID 0 on init. 8021q adds it on module init
+ * for all interfaces with VLAN filtring feature.
+ */
+ if (vid == 0)
+ goto done_unlock;
+ netif_warn(efx, drv, efx->net_dev,
+ "VLAN %u already added\n", vid);
+ rc = -EALREADY;
+ goto fail_exist;
+ }
+
+ rc = -ENOMEM;
+ vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
+ if (!vlan)
+ goto fail_alloc;
+
+ vlan->vid = vid;
+
+ list_add_tail(&vlan->list, &nic_data->vlan_list);
+
+ if (efx->filter_state) {
+ mutex_lock(&efx->mac_lock);
+ down_write(&efx->filter_sem);
+ rc = efx_ef10_filter_add_vlan(efx, vlan->vid);
+ up_write(&efx->filter_sem);
+ mutex_unlock(&efx->mac_lock);
+ if (rc)
+ goto fail_filter_add_vlan;
+ }
+
+done_unlock:
+ mutex_unlock(&nic_data->vlan_lock);
+ return 0;
+
+fail_filter_add_vlan:
+ list_del(&vlan->list);
+ kfree(vlan);
+fail_alloc:
+fail_exist:
+ mutex_unlock(&nic_data->vlan_lock);
+ return rc;
+}
+
+static void efx_ef10_del_vlan_internal(struct efx_nic *efx,
+ struct efx_ef10_vlan *vlan)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ WARN_ON(!mutex_is_locked(&nic_data->vlan_lock));
+
+ if (efx->filter_state) {
+ down_write(&efx->filter_sem);
+ efx_ef10_filter_del_vlan(efx, vlan->vid);
+ up_write(&efx->filter_sem);
+ }
+
+ list_del(&vlan->list);
+ kfree(vlan);
+}
+
+static int efx_ef10_del_vlan(struct efx_nic *efx, u16 vid)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_ef10_vlan *vlan;
+ int rc = 0;
+
+ /* 8021q removes VID 0 on module unload for all interfaces
+ * with VLAN filtering feature. We need to keep it to receive
+ * untagged traffic.
+ */
+ if (vid == 0)
+ return 0;
+
+ mutex_lock(&nic_data->vlan_lock);
+
+ vlan = efx_ef10_find_vlan(efx, vid);
+ if (!vlan) {
+ netif_err(efx, drv, efx->net_dev,
+ "VLAN %u to be deleted not found\n", vid);
+ rc = -ENOENT;
+ } else {
+ efx_ef10_del_vlan_internal(efx, vlan);
+ }
+
+ mutex_unlock(&nic_data->vlan_lock);
+
+ return rc;
+}
+
+static void efx_ef10_cleanup_vlans(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_ef10_vlan *vlan, *next_vlan;
+
+ mutex_lock(&nic_data->vlan_lock);
+ list_for_each_entry_safe(vlan, next_vlan, &nic_data->vlan_list, list)
+ efx_ef10_del_vlan_internal(efx, vlan);
+ mutex_unlock(&nic_data->vlan_lock);
+}
+
static DEVICE_ATTR(link_control_flag, 0444, efx_ef10_show_link_control_flag,
NULL);
static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL);
@@ -421,8 +570,30 @@ static int efx_ef10_probe(struct efx_nic *efx)
#endif
ether_addr_copy(nic_data->port_id, efx->net_dev->perm_addr);
+ INIT_LIST_HEAD(&nic_data->vlan_list);
+ mutex_init(&nic_data->vlan_lock);
+
+ /* Add unspecified VID to support VLAN filtering being disabled */
+ rc = efx_ef10_add_vlan(efx, EFX_FILTER_VID_UNSPEC);
+ if (rc)
+ goto fail_add_vid_unspec;
+
+ /* If VLAN filtering is enabled, we need VID 0 to get untagged
+ * traffic. It is added automatically if 8021q module is loaded,
+ * but we can't rely on it since module may be not loaded.
+ */
+ rc = efx_ef10_add_vlan(efx, 0);
+ if (rc)
+ goto fail_add_vid_0;
+
return 0;
+fail_add_vid_0:
+ efx_ef10_cleanup_vlans(efx);
+fail_add_vid_unspec:
+ mutex_destroy(&nic_data->vlan_lock);
+ efx_ptp_remove(efx);
+ efx_mcdi_mon_remove(efx);
fail5:
device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
fail4:
@@ -676,6 +847,9 @@ static void efx_ef10_remove(struct efx_nic *efx)
}
#endif
+ efx_ef10_cleanup_vlans(efx);
+ mutex_destroy(&nic_data->vlan_lock);
+
efx_ptp_remove(efx);
efx_mcdi_mon_remove(efx);
@@ -704,6 +878,45 @@ static int efx_ef10_probe_pf(struct efx_nic *efx)
return efx_ef10_probe(efx);
}
+int efx_ef10_vadaptor_query(struct efx_nic *efx, unsigned int port_id,
+ u32 *port_flags, u32 *vadaptor_flags,
+ unsigned int *vlan_tags)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_QUERY_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_VADAPTOR_QUERY_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ if (nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN)) {
+ MCDI_SET_DWORD(inbuf, VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID,
+ port_id);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_QUERY, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+
+ if (outlen < sizeof(outbuf)) {
+ rc = -EIO;
+ return rc;
+ }
+ }
+
+ if (port_flags)
+ *port_flags = MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_PORT_FLAGS);
+ if (vadaptor_flags)
+ *vadaptor_flags =
+ MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS);
+ if (vlan_tags)
+ *vlan_tags =
+ MCDI_DWORD(outbuf,
+ VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS);
+
+ return 0;
+}
+
int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN);
@@ -1304,13 +1517,14 @@ static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
}
#if BITS_PER_LONG == 64
+ BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 2);
mask[0] = raw_mask[0];
mask[1] = raw_mask[1];
#else
+ BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 3);
mask[0] = raw_mask[0] & 0xffffffff;
mask[1] = raw_mask[0] >> 32;
mask[2] = raw_mask[1] & 0xffffffff;
- mask[3] = raw_mask[1] >> 32;
#endif
}
@@ -3040,15 +3254,55 @@ static int efx_ef10_filter_push(struct efx_nic *efx,
return rc;
}
-static int efx_ef10_filter_rx_match_pri(struct efx_ef10_filter_table *table,
- enum efx_filter_match_flags match_flags)
+static u32 efx_ef10_filter_mcdi_flags_from_spec(const struct efx_filter_spec *spec)
{
+ unsigned int match_flags = spec->match_flags;
+ u32 mcdi_flags = 0;
+
+ if (match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) {
+ match_flags &= ~EFX_FILTER_MATCH_LOC_MAC_IG;
+ mcdi_flags |=
+ is_multicast_ether_addr(spec->loc_mac) ?
+ (1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN) :
+ (1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN);
+ }
+
+#define MAP_FILTER_TO_MCDI_FLAG(gen_flag, mcdi_field) { \
+ unsigned int old_match_flags = match_flags; \
+ match_flags &= ~EFX_FILTER_MATCH_ ## gen_flag; \
+ if (match_flags != old_match_flags) \
+ mcdi_flags |= \
+ (1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
+ mcdi_field ## _LBN); \
+ }
+ MAP_FILTER_TO_MCDI_FLAG(REM_HOST, SRC_IP);
+ MAP_FILTER_TO_MCDI_FLAG(LOC_HOST, DST_IP);
+ MAP_FILTER_TO_MCDI_FLAG(REM_MAC, SRC_MAC);
+ MAP_FILTER_TO_MCDI_FLAG(REM_PORT, SRC_PORT);
+ MAP_FILTER_TO_MCDI_FLAG(LOC_MAC, DST_MAC);
+ MAP_FILTER_TO_MCDI_FLAG(LOC_PORT, DST_PORT);
+ MAP_FILTER_TO_MCDI_FLAG(ETHER_TYPE, ETHER_TYPE);
+ MAP_FILTER_TO_MCDI_FLAG(INNER_VID, INNER_VLAN);
+ MAP_FILTER_TO_MCDI_FLAG(OUTER_VID, OUTER_VLAN);
+ MAP_FILTER_TO_MCDI_FLAG(IP_PROTO, IP_PROTO);
+#undef MAP_FILTER_TO_MCDI_FLAG
+
+ /* Did we map them all? */
+ WARN_ON_ONCE(match_flags);
+
+ return mcdi_flags;
+}
+
+static int efx_ef10_filter_pri(struct efx_ef10_filter_table *table,
+ const struct efx_filter_spec *spec)
+{
+ u32 mcdi_flags = efx_ef10_filter_mcdi_flags_from_spec(spec);
unsigned int match_pri;
for (match_pri = 0;
match_pri < table->rx_match_count;
match_pri++)
- if (table->rx_match_flags[match_pri] == match_flags)
+ if (table->rx_match_mcdi_flags[match_pri] == mcdi_flags)
return match_pri;
return -EPROTONOSUPPORT;
@@ -3074,7 +3328,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx,
EFX_FILTER_FLAG_RX)
return -EINVAL;
- rc = efx_ef10_filter_rx_match_pri(table, spec->match_flags);
+ rc = efx_ef10_filter_pri(table, spec);
if (rc < 0)
return rc;
match_pri = rc;
@@ -3313,7 +3567,7 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
spec = efx_ef10_filter_entry_spec(table, filter_idx);
if (!spec ||
(!by_index &&
- efx_ef10_filter_rx_match_pri(table, spec->match_flags) !=
+ efx_ef10_filter_pri(table, spec) !=
filter_id / HUNT_FILTER_TBL_ROWS)) {
rc = -ENOENT;
goto out_unlock;
@@ -3394,12 +3648,13 @@ static u32 efx_ef10_filter_get_unsafe_id(struct efx_nic *efx, u32 filter_id)
return filter_id % HUNT_FILTER_TBL_ROWS;
}
-static int efx_ef10_filter_remove_unsafe(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 filter_id)
+static void efx_ef10_filter_remove_unsafe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id)
{
- return efx_ef10_filter_remove_internal(efx, 1U << priority,
- filter_id, true);
+ if (filter_id == EFX_EF10_FILTER_ID_INVALID)
+ return;
+ efx_ef10_filter_remove_internal(efx, 1U << priority, filter_id, true);
}
static int efx_ef10_filter_get_safe(struct efx_nic *efx,
@@ -3414,7 +3669,7 @@ static int efx_ef10_filter_get_safe(struct efx_nic *efx,
spin_lock_bh(&efx->filter_lock);
saved_spec = efx_ef10_filter_entry_spec(table, filter_idx);
if (saved_spec && saved_spec->priority == priority &&
- efx_ef10_filter_rx_match_pri(table, saved_spec->match_flags) ==
+ efx_ef10_filter_pri(table, saved_spec) ==
filter_id / HUNT_FILTER_TBL_ROWS) {
*spec = *saved_spec;
rc = 0;
@@ -3487,8 +3742,7 @@ static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
count = -EMSGSIZE;
break;
}
- buf[count++] = (efx_ef10_filter_rx_match_pri(
- table, spec->match_flags) *
+ buf[count++] = (efx_ef10_filter_pri(table, spec) *
HUNT_FILTER_TBL_ROWS +
filter_idx);
}
@@ -3724,15 +3978,58 @@ static int efx_ef10_filter_match_flags_from_mcdi(u32 mcdi_flags)
return match_flags;
}
+static void efx_ef10_filter_cleanup_vlans(struct efx_nic *efx)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct efx_ef10_filter_vlan *vlan, *next_vlan;
+
+ /* See comment in efx_ef10_filter_table_remove() */
+ if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
+ return;
+
+ if (!table)
+ return;
+
+ list_for_each_entry_safe(vlan, next_vlan, &table->vlan_list, list)
+ efx_ef10_filter_del_vlan_internal(efx, vlan);
+}
+
+static bool efx_ef10_filter_match_supported(struct efx_ef10_filter_table *table,
+ enum efx_filter_match_flags match_flags)
+{
+ unsigned int match_pri;
+ int mf;
+
+ for (match_pri = 0;
+ match_pri < table->rx_match_count;
+ match_pri++) {
+ mf = efx_ef10_filter_match_flags_from_mcdi(
+ table->rx_match_mcdi_flags[match_pri]);
+ if (mf == match_flags)
+ return true;
+ }
+
+ return false;
+}
+
static int efx_ef10_filter_table_probe(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct net_device *net_dev = efx->net_dev;
unsigned int pd_match_pri, pd_match_count;
struct efx_ef10_filter_table *table;
+ struct efx_ef10_vlan *vlan;
size_t outlen;
int rc;
+ if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
+ return -EINVAL;
+
+ if (efx->filter_state) /* already probed */
+ return 0;
+
table = kzalloc(sizeof(*table), GFP_KERNEL);
if (!table)
return -ENOMEM;
@@ -3765,24 +4062,48 @@ static int efx_ef10_filter_table_probe(struct efx_nic *efx)
"%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n",
__func__, mcdi_flags, pd_match_pri,
rc, table->rx_match_count);
- table->rx_match_flags[table->rx_match_count++] = rc;
+ table->rx_match_mcdi_flags[table->rx_match_count] = mcdi_flags;
+ table->rx_match_count++;
}
}
+ if ((efx_supported_features(efx) & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ !(efx_ef10_filter_match_supported(table,
+ (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC)) &&
+ efx_ef10_filter_match_supported(table,
+ (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC_IG)))) {
+ netif_info(efx, probe, net_dev,
+ "VLAN filters are not supported in this firmware variant\n");
+ net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+ efx->fixed_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+ net_dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+ }
+
table->entry = vzalloc(HUNT_FILTER_TBL_ROWS * sizeof(*table->entry));
if (!table->entry) {
rc = -ENOMEM;
goto fail;
}
- table->ucdef_id = EFX_EF10_FILTER_ID_INVALID;
- table->bcast_id = EFX_EF10_FILTER_ID_INVALID;
- table->mcdef_id = EFX_EF10_FILTER_ID_INVALID;
+ table->mc_promisc_last = false;
+ table->vlan_filter =
+ !!(efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER);
+ INIT_LIST_HEAD(&table->vlan_list);
efx->filter_state = table;
init_waitqueue_head(&table->waitq);
+
+ list_for_each_entry(vlan, &nic_data->vlan_list, list) {
+ rc = efx_ef10_filter_add_vlan(efx, vlan->vid);
+ if (rc)
+ goto fail_add_vlan;
+ }
+
return 0;
+fail_add_vlan:
+ efx_ef10_filter_cleanup_vlans(efx);
+ efx->filter_state = NULL;
fail:
kfree(table);
return rc;
@@ -3843,7 +4164,6 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx)
nic_data->must_restore_filters = false;
}
-/* Caller must hold efx->filter_sem for write */
static void efx_ef10_filter_table_remove(struct efx_nic *efx)
{
struct efx_ef10_filter_table *table = efx->filter_state;
@@ -3852,7 +4172,17 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx)
unsigned int filter_idx;
int rc;
+ efx_ef10_filter_cleanup_vlans(efx);
efx->filter_state = NULL;
+ /* If we were called without locking, then it's not safe to free
+ * the table as others might be using it. So we just WARN, leak
+ * the memory, and potentially get an inconsistent filter table
+ * state.
+ * This should never actually happen.
+ */
+ if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
+ return;
+
if (!table)
return;
@@ -3880,37 +4210,54 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx)
kfree(table);
}
-#define EFX_EF10_FILTER_DO_MARK_OLD(id) \
- if (id != EFX_EF10_FILTER_ID_INVALID) { \
- filter_idx = efx_ef10_filter_get_unsafe_id(efx, id); \
- if (!table->entry[filter_idx].spec) \
- netif_dbg(efx, drv, efx->net_dev, \
- "%s: marked null spec old %04x:%04x\n", \
- __func__, id, filter_idx); \
- table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;\
- }
-static void efx_ef10_filter_mark_old(struct efx_nic *efx)
+static void efx_ef10_filter_mark_one_old(struct efx_nic *efx, uint16_t *id)
{
struct efx_ef10_filter_table *table = efx->filter_state;
- unsigned int filter_idx, i;
+ unsigned int filter_idx;
- if (!table)
- return;
+ if (*id != EFX_EF10_FILTER_ID_INVALID) {
+ filter_idx = efx_ef10_filter_get_unsafe_id(efx, *id);
+ if (!table->entry[filter_idx].spec)
+ netif_dbg(efx, drv, efx->net_dev,
+ "marked null spec old %04x:%04x\n", *id,
+ filter_idx);
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
+ *id = EFX_EF10_FILTER_ID_INVALID;
+ }
+}
+
+/* Mark old per-VLAN filters that may need to be removed */
+static void _efx_ef10_filter_vlan_mark_old(struct efx_nic *efx,
+ struct efx_ef10_filter_vlan *vlan)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ unsigned int i;
- /* Mark old filters that may need to be removed */
- spin_lock_bh(&efx->filter_lock);
for (i = 0; i < table->dev_uc_count; i++)
- EFX_EF10_FILTER_DO_MARK_OLD(table->dev_uc_list[i].id);
+ efx_ef10_filter_mark_one_old(efx, &vlan->uc[i]);
for (i = 0; i < table->dev_mc_count; i++)
- EFX_EF10_FILTER_DO_MARK_OLD(table->dev_mc_list[i].id);
- EFX_EF10_FILTER_DO_MARK_OLD(table->ucdef_id);
- EFX_EF10_FILTER_DO_MARK_OLD(table->bcast_id);
- EFX_EF10_FILTER_DO_MARK_OLD(table->mcdef_id);
+ efx_ef10_filter_mark_one_old(efx, &vlan->mc[i]);
+ efx_ef10_filter_mark_one_old(efx, &vlan->ucdef);
+ efx_ef10_filter_mark_one_old(efx, &vlan->bcast);
+ efx_ef10_filter_mark_one_old(efx, &vlan->mcdef);
+}
+
+/* Mark old filters that may need to be removed.
+ * Caller must hold efx->filter_sem for read if race against
+ * efx_ef10_filter_table_remove() is possible
+ */
+static void efx_ef10_filter_mark_old(struct efx_nic *efx)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct efx_ef10_filter_vlan *vlan;
+
+ spin_lock_bh(&efx->filter_lock);
+ list_for_each_entry(vlan, &table->vlan_list, list)
+ _efx_ef10_filter_vlan_mark_old(efx, vlan);
spin_unlock_bh(&efx->filter_lock);
}
-#undef EFX_EF10_FILTER_DO_MARK_OLD
-static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx, bool *promisc)
+static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx)
{
struct efx_ef10_filter_table *table = efx->filter_state;
struct net_device *net_dev = efx->net_dev;
@@ -3918,45 +4265,38 @@ static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx, bool *promisc)
int addr_count;
unsigned int i;
- table->ucdef_id = EFX_EF10_FILTER_ID_INVALID;
addr_count = netdev_uc_count(net_dev);
- if (net_dev->flags & IFF_PROMISC)
- *promisc = true;
+ table->uc_promisc = !!(net_dev->flags & IFF_PROMISC);
table->dev_uc_count = 1 + addr_count;
ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
i = 1;
netdev_for_each_uc_addr(uc, net_dev) {
if (i >= EFX_EF10_FILTER_DEV_UC_MAX) {
- *promisc = true;
+ table->uc_promisc = true;
break;
}
ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
- table->dev_uc_list[i].id = EFX_EF10_FILTER_ID_INVALID;
i++;
}
}
-static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx, bool *promisc)
+static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx)
{
struct efx_ef10_filter_table *table = efx->filter_state;
struct net_device *net_dev = efx->net_dev;
struct netdev_hw_addr *mc;
unsigned int i, addr_count;
- table->mcdef_id = EFX_EF10_FILTER_ID_INVALID;
- table->bcast_id = EFX_EF10_FILTER_ID_INVALID;
- if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI))
- *promisc = true;
+ table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI));
addr_count = netdev_mc_count(net_dev);
i = 0;
netdev_for_each_mc_addr(mc, net_dev) {
if (i >= EFX_EF10_FILTER_DEV_MC_MAX) {
- *promisc = true;
+ table->mc_promisc = true;
break;
}
ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
- table->dev_mc_list[i].id = EFX_EF10_FILTER_ID_INVALID;
i++;
}
@@ -3964,7 +4304,8 @@ static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx, bool *promisc)
}
static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
- bool multicast, bool rollback)
+ struct efx_ef10_filter_vlan *vlan,
+ bool multicast, bool rollback)
{
struct efx_ef10_filter_table *table = efx->filter_state;
struct efx_ef10_dev_addr *addr_list;
@@ -3973,14 +4314,17 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
u8 baddr[ETH_ALEN];
unsigned int i, j;
int addr_count;
+ u16 *ids;
int rc;
if (multicast) {
addr_list = table->dev_mc_list;
addr_count = table->dev_mc_count;
+ ids = vlan->mc;
} else {
addr_list = table->dev_uc_list;
addr_count = table->dev_uc_count;
+ ids = vlan->uc;
}
filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0;
@@ -3988,8 +4332,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
/* Insert/renew filters */
for (i = 0; i < addr_count; i++) {
efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
- efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
- addr_list[i].addr);
+ efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr);
rc = efx_ef10_filter_insert(efx, &spec, true);
if (rc < 0) {
if (rollback) {
@@ -3998,12 +4341,10 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
rc);
/* Fall back to promiscuous */
for (j = 0; j < i; j++) {
- if (addr_list[j].id == EFX_EF10_FILTER_ID_INVALID)
- continue;
efx_ef10_filter_remove_unsafe(
efx, EFX_FILTER_PRI_AUTO,
- addr_list[j].id);
- addr_list[j].id = EFX_EF10_FILTER_ID_INVALID;
+ ids[j]);
+ ids[j] = EFX_EF10_FILTER_ID_INVALID;
}
return rc;
} else {
@@ -4011,40 +4352,40 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
rc = EFX_EF10_FILTER_ID_INVALID;
}
}
- addr_list[i].id = efx_ef10_filter_get_unsafe_id(efx, rc);
+ ids[i] = efx_ef10_filter_get_unsafe_id(efx, rc);
}
if (multicast && rollback) {
/* Also need an Ethernet broadcast filter */
efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
eth_broadcast_addr(baddr);
- efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, baddr);
+ efx_filter_set_eth_local(&spec, vlan->vid, baddr);
rc = efx_ef10_filter_insert(efx, &spec, true);
if (rc < 0) {
netif_warn(efx, drv, efx->net_dev,
"Broadcast filter insert failed rc=%d\n", rc);
/* Fall back to promiscuous */
for (j = 0; j < i; j++) {
- if (addr_list[j].id == EFX_EF10_FILTER_ID_INVALID)
- continue;
efx_ef10_filter_remove_unsafe(
efx, EFX_FILTER_PRI_AUTO,
- addr_list[j].id);
- addr_list[j].id = EFX_EF10_FILTER_ID_INVALID;
+ ids[j]);
+ ids[j] = EFX_EF10_FILTER_ID_INVALID;
}
return rc;
} else {
- table->bcast_id = efx_ef10_filter_get_unsafe_id(efx, rc);
+ EFX_WARN_ON_PARANOID(vlan->bcast !=
+ EFX_EF10_FILTER_ID_INVALID);
+ vlan->bcast = efx_ef10_filter_get_unsafe_id(efx, rc);
}
}
return 0;
}
-static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast,
- bool rollback)
+static int efx_ef10_filter_insert_def(struct efx_nic *efx,
+ struct efx_ef10_filter_vlan *vlan,
+ bool multicast, bool rollback)
{
- struct efx_ef10_filter_table *table = efx->filter_state;
struct efx_ef10_nic_data *nic_data = efx->nic_data;
enum efx_filter_flags filter_flags;
struct efx_filter_spec spec;
@@ -4060,6 +4401,9 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast,
else
efx_filter_set_uc_def(&spec);
+ if (vlan->vid != EFX_FILTER_VID_UNSPEC)
+ efx_filter_set_eth_local(&spec, vlan->vid, NULL);
+
rc = efx_ef10_filter_insert(efx, &spec, true);
if (rc < 0) {
netif_printk(efx, drv, rc == -EPERM ? KERN_DEBUG : KERN_WARNING,
@@ -4067,14 +4411,14 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast,
"%scast mismatch filter insert failed rc=%d\n",
multicast ? "Multi" : "Uni", rc);
} else if (multicast) {
- table->mcdef_id = efx_ef10_filter_get_unsafe_id(efx, rc);
+ EFX_WARN_ON_PARANOID(vlan->mcdef != EFX_EF10_FILTER_ID_INVALID);
+ vlan->mcdef = efx_ef10_filter_get_unsafe_id(efx, rc);
if (!nic_data->workaround_26807) {
/* Also need an Ethernet broadcast filter */
efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
filter_flags, 0);
eth_broadcast_addr(baddr);
- efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
- baddr);
+ efx_filter_set_eth_local(&spec, vlan->vid, baddr);
rc = efx_ef10_filter_insert(efx, &spec, true);
if (rc < 0) {
netif_warn(efx, drv, efx->net_dev,
@@ -4084,17 +4428,20 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast,
/* Roll back the mc_def filter */
efx_ef10_filter_remove_unsafe(
efx, EFX_FILTER_PRI_AUTO,
- table->mcdef_id);
- table->mcdef_id = EFX_EF10_FILTER_ID_INVALID;
+ vlan->mcdef);
+ vlan->mcdef = EFX_EF10_FILTER_ID_INVALID;
return rc;
}
} else {
- table->bcast_id = efx_ef10_filter_get_unsafe_id(efx, rc);
+ EFX_WARN_ON_PARANOID(vlan->bcast !=
+ EFX_EF10_FILTER_ID_INVALID);
+ vlan->bcast = efx_ef10_filter_get_unsafe_id(efx, rc);
}
}
rc = 0;
} else {
- table->ucdef_id = rc;
+ EFX_WARN_ON_PARANOID(vlan->ucdef != EFX_EF10_FILTER_ID_INVALID);
+ vlan->ucdef = rc;
rc = 0;
}
return rc;
@@ -4203,64 +4550,55 @@ reset_nic:
/* Caller must hold efx->filter_sem for read if race against
* efx_ef10_filter_table_remove() is possible
*/
-static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
+static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx,
+ struct efx_ef10_filter_vlan *vlan)
{
struct efx_ef10_filter_table *table = efx->filter_state;
struct efx_ef10_nic_data *nic_data = efx->nic_data;
- struct net_device *net_dev = efx->net_dev;
- bool uc_promisc = false, mc_promisc = false;
- if (!efx_dev_registered(efx))
- return;
-
- if (!table)
- return;
-
- efx_ef10_filter_mark_old(efx);
-
- /* Copy/convert the address lists; add the primary station
- * address and broadcast address
+ /* Do not install unspecified VID if VLAN filtering is enabled.
+ * Do not install all specified VIDs if VLAN filtering is disabled.
*/
- netif_addr_lock_bh(net_dev);
- efx_ef10_filter_uc_addr_list(efx, &uc_promisc);
- efx_ef10_filter_mc_addr_list(efx, &mc_promisc);
- netif_addr_unlock_bh(net_dev);
+ if ((vlan->vid == EFX_FILTER_VID_UNSPEC) == table->vlan_filter)
+ return;
/* Insert/renew unicast filters */
- if (uc_promisc) {
- efx_ef10_filter_insert_def(efx, false, false);
- efx_ef10_filter_insert_addr_list(efx, false, false);
+ if (table->uc_promisc) {
+ efx_ef10_filter_insert_def(efx, vlan, false, false);
+ efx_ef10_filter_insert_addr_list(efx, vlan, false, false);
} else {
/* If any of the filters failed to insert, fall back to
* promiscuous mode - add in the uc_def filter. But keep
* our individual unicast filters.
*/
- if (efx_ef10_filter_insert_addr_list(efx, false, false))
- efx_ef10_filter_insert_def(efx, false, false);
+ if (efx_ef10_filter_insert_addr_list(efx, vlan, false, false))
+ efx_ef10_filter_insert_def(efx, vlan, false, false);
}
/* Insert/renew multicast filters */
/* If changing promiscuous state with cascaded multicast filters, remove
* old filters first, so that packets are dropped rather than duplicated
*/
- if (nic_data->workaround_26807 && efx->mc_promisc != mc_promisc)
+ if (nic_data->workaround_26807 &&
+ table->mc_promisc_last != table->mc_promisc)
efx_ef10_filter_remove_old(efx);
- if (mc_promisc) {
+ if (table->mc_promisc) {
if (nic_data->workaround_26807) {
/* If we failed to insert promiscuous filters, rollback
* and fall back to individual multicast filters
*/
- if (efx_ef10_filter_insert_def(efx, true, true)) {
+ if (efx_ef10_filter_insert_def(efx, vlan, true, true)) {
/* Changing promisc state, so remove old filters */
efx_ef10_filter_remove_old(efx);
- efx_ef10_filter_insert_addr_list(efx, true, false);
+ efx_ef10_filter_insert_addr_list(efx, vlan,
+ true, false);
}
} else {
/* If we failed to insert promiscuous filters, don't
* rollback. Regardless, also insert the mc_list
*/
- efx_ef10_filter_insert_def(efx, true, false);
- efx_ef10_filter_insert_addr_list(efx, true, false);
+ efx_ef10_filter_insert_def(efx, vlan, true, false);
+ efx_ef10_filter_insert_addr_list(efx, vlan, true, false);
}
} else {
/* If any filters failed to insert, rollback and fall back to
@@ -4268,17 +4606,153 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
* that fails, roll back again and insert as many of our
* individual multicast filters as we can.
*/
- if (efx_ef10_filter_insert_addr_list(efx, true, true)) {
+ if (efx_ef10_filter_insert_addr_list(efx, vlan, true, true)) {
/* Changing promisc state, so remove old filters */
if (nic_data->workaround_26807)
efx_ef10_filter_remove_old(efx);
- if (efx_ef10_filter_insert_def(efx, true, true))
- efx_ef10_filter_insert_addr_list(efx, true, false);
+ if (efx_ef10_filter_insert_def(efx, vlan, true, true))
+ efx_ef10_filter_insert_addr_list(efx, vlan,
+ true, false);
}
}
+}
+
+/* Caller must hold efx->filter_sem for read if race against
+ * efx_ef10_filter_table_remove() is possible
+ */
+static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct net_device *net_dev = efx->net_dev;
+ struct efx_ef10_filter_vlan *vlan;
+ bool vlan_filter;
+
+ if (!efx_dev_registered(efx))
+ return;
+
+ if (!table)
+ return;
+
+ efx_ef10_filter_mark_old(efx);
+
+ /* Copy/convert the address lists; add the primary station
+ * address and broadcast address
+ */
+ netif_addr_lock_bh(net_dev);
+ efx_ef10_filter_uc_addr_list(efx);
+ efx_ef10_filter_mc_addr_list(efx);
+ netif_addr_unlock_bh(net_dev);
+
+ /* If VLAN filtering changes, all old filters are finally removed.
+ * Do it in advance to avoid conflicts for unicast untagged and
+ * VLAN 0 tagged filters.
+ */
+ vlan_filter = !!(net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER);
+ if (table->vlan_filter != vlan_filter) {
+ table->vlan_filter = vlan_filter;
+ efx_ef10_filter_remove_old(efx);
+ }
+
+ list_for_each_entry(vlan, &table->vlan_list, list)
+ efx_ef10_filter_vlan_sync_rx_mode(efx, vlan);
efx_ef10_filter_remove_old(efx);
- efx->mc_promisc = mc_promisc;
+ table->mc_promisc_last = table->mc_promisc;
+}
+
+static struct efx_ef10_filter_vlan *efx_ef10_filter_find_vlan(struct efx_nic *efx, u16 vid)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct efx_ef10_filter_vlan *vlan;
+
+ WARN_ON(!rwsem_is_locked(&efx->filter_sem));
+
+ list_for_each_entry(vlan, &table->vlan_list, list) {
+ if (vlan->vid == vid)
+ return vlan;
+ }
+
+ return NULL;
+}
+
+static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct efx_ef10_filter_vlan *vlan;
+ unsigned int i;
+
+ if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
+ return -EINVAL;
+
+ vlan = efx_ef10_filter_find_vlan(efx, vid);
+ if (WARN_ON(vlan)) {
+ netif_err(efx, drv, efx->net_dev,
+ "VLAN %u already added\n", vid);
+ return -EALREADY;
+ }
+
+ vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
+ if (!vlan)
+ return -ENOMEM;
+
+ vlan->vid = vid;
+
+ for (i = 0; i < ARRAY_SIZE(vlan->uc); i++)
+ vlan->uc[i] = EFX_EF10_FILTER_ID_INVALID;
+ for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
+ vlan->mc[i] = EFX_EF10_FILTER_ID_INVALID;
+ vlan->ucdef = EFX_EF10_FILTER_ID_INVALID;
+ vlan->bcast = EFX_EF10_FILTER_ID_INVALID;
+ vlan->mcdef = EFX_EF10_FILTER_ID_INVALID;
+
+ list_add_tail(&vlan->list, &table->vlan_list);
+
+ if (efx_dev_registered(efx))
+ efx_ef10_filter_vlan_sync_rx_mode(efx, vlan);
+
+ return 0;
+}
+
+static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx,
+ struct efx_ef10_filter_vlan *vlan)
+{
+ unsigned int i;
+
+ /* See comment in efx_ef10_filter_table_remove() */
+ if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
+ return;
+
+ list_del(&vlan->list);
+
+ for (i = 0; i < ARRAY_SIZE(vlan->uc); i++)
+ efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
+ vlan->uc[i]);
+ for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
+ efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
+ vlan->mc[i]);
+ efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->ucdef);
+ efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->bcast);
+ efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->mcdef);
+
+ kfree(vlan);
+}
+
+static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid)
+{
+ struct efx_ef10_filter_vlan *vlan;
+
+ /* See comment in efx_ef10_filter_table_remove() */
+ if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
+ return;
+
+ vlan = efx_ef10_filter_find_vlan(efx, vid);
+ if (!vlan) {
+ netif_err(efx, drv, efx->net_dev,
+ "VLAN %u not found in filter state\n", vid);
+ return;
+ }
+
+ efx_ef10_filter_del_vlan_internal(efx, vlan);
}
static int efx_ef10_set_mac_address(struct efx_nic *efx)
@@ -4290,6 +4764,8 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
efx_device_detach_sync(efx);
efx_net_stop(efx->net_dev);
+
+ mutex_lock(&efx->mac_lock);
down_write(&efx->filter_sem);
efx_ef10_filter_table_remove(efx);
@@ -4302,6 +4778,8 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
efx_ef10_filter_table_probe(efx);
up_write(&efx->filter_sem);
+ mutex_unlock(&efx->mac_lock);
+
if (was_enabled)
efx_net_open(efx->net_dev);
netif_device_attach(efx->net_dev);
@@ -4703,6 +5181,29 @@ static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
}
}
+static int efx_ef10_vlan_rx_add_vid(struct efx_nic *efx, __be16 proto, u16 vid)
+{
+ if (proto != htons(ETH_P_8021Q))
+ return -EINVAL;
+
+ return efx_ef10_add_vlan(efx, vid);
+}
+
+static int efx_ef10_vlan_rx_kill_vid(struct efx_nic *efx, __be16 proto, u16 vid)
+{
+ if (proto != htons(ETH_P_8021Q))
+ return -EINVAL;
+
+ return efx_ef10_del_vlan(efx, vid);
+}
+
+#define EF10_OFFLOAD_FEATURES \
+ (NETIF_F_IP_CSUM | \
+ NETIF_F_HW_VLAN_CTAG_FILTER | \
+ NETIF_F_IPV6_CSUM | \
+ NETIF_F_RXHASH | \
+ NETIF_F_NTUPLE)
+
const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.is_vf = true,
.mem_bar = EFX_MEM_VF_BAR,
@@ -4780,6 +5281,8 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
#endif
.ptp_write_host_time = efx_ef10_ptp_write_host_time_vf,
.ptp_set_ts_config = efx_ef10_ptp_set_ts_config_vf,
+ .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid,
+ .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid,
#ifdef CONFIG_SFC_SRIOV
.vswitching_probe = efx_ef10_vswitching_probe_vf,
.vswitching_restore = efx_ef10_vswitching_restore_vf,
@@ -4798,8 +5301,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.always_rx_scatter = true,
.max_interrupt_mode = EFX_INT_MODE_MSIX,
.timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
- .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_RXHASH | NETIF_F_NTUPLE),
+ .offload_features = EF10_OFFLOAD_FEATURES,
.mcdi_max_ver = 2,
.max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
.hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
@@ -4891,6 +5393,8 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.ptp_write_host_time = efx_ef10_ptp_write_host_time,
.ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events,
.ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
+ .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid,
+ .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid,
#ifdef CONFIG_SFC_SRIOV
.sriov_configure = efx_ef10_sriov_configure,
.sriov_init = efx_ef10_sriov_init,
@@ -4919,8 +5423,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.always_rx_scatter = true,
.max_interrupt_mode = EFX_INT_MODE_MSIX,
.timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
- .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_RXHASH | NETIF_F_NTUPLE),
+ .offload_features = EF10_OFFLOAD_FEATURES,
.mcdi_max_ver = 2,
.max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
.hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
index 3c17f274e..a949b9d27 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.c
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -232,6 +232,35 @@ fail:
return rc;
}
+static int efx_ef10_vadaptor_alloc_set_features(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ u32 port_flags;
+ int rc;
+
+ rc = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
+ if (rc)
+ goto fail_vadaptor_alloc;
+
+ rc = efx_ef10_vadaptor_query(efx, nic_data->vport_id,
+ &port_flags, NULL, NULL);
+ if (rc)
+ goto fail_vadaptor_query;
+
+ if (port_flags &
+ (1 << MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_LBN))
+ efx->fixed_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ else
+ efx->fixed_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ return 0;
+
+fail_vadaptor_query:
+ efx_ef10_vadaptor_free(efx, EVB_PORT_ID_ASSIGNED);
+fail_vadaptor_alloc:
+ return rc;
+}
+
/* On top of the default firmware vswitch setup, create a VEB vswitch and
* expansion vport for use by this function.
*/
@@ -243,7 +272,7 @@ int efx_ef10_vswitching_probe_pf(struct efx_nic *efx)
if (pci_sriov_get_totalvfs(efx->pci_dev) <= 0) {
/* vswitch not needed as we have no VFs */
- efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
+ efx_ef10_vadaptor_alloc_set_features(efx);
return 0;
}
@@ -263,7 +292,7 @@ int efx_ef10_vswitching_probe_pf(struct efx_nic *efx)
goto fail3;
ether_addr_copy(nic_data->vport_mac, net_dev->dev_addr);
- rc = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
+ rc = efx_ef10_vadaptor_alloc_set_features(efx);
if (rc)
goto fail4;
@@ -282,9 +311,7 @@ fail1:
int efx_ef10_vswitching_probe_vf(struct efx_nic *efx)
{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
-
- return efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
+ return efx_ef10_vadaptor_alloc_set_features(efx);
}
int efx_ef10_vswitching_restore_pf(struct efx_nic *efx)
@@ -554,6 +581,7 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan,
efx_device_detach_sync(vf->efx);
efx_net_stop(vf->efx->net_dev);
+ mutex_lock(&vf->efx->mac_lock);
down_write(&vf->efx->filter_sem);
vf->efx->type->filter_table_remove(vf->efx);
@@ -630,6 +658,7 @@ restore_filters:
goto reset_nic_up_write;
up_write(&vf->efx->filter_sem);
+ mutex_unlock(&vf->efx->mac_lock);
up_write(&vf->efx->filter_sem);
@@ -642,9 +671,10 @@ restore_filters:
return rc;
reset_nic_up_write:
- if (vf->efx)
+ if (vf->efx) {
up_write(&vf->efx->filter_sem);
-
+ mutex_unlock(&vf->efx->mac_lock);
+ }
reset_nic:
if (vf->efx) {
netif_err(efx, drv, efx->net_dev,
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.h b/drivers/net/ethernet/sfc/ef10_sriov.h
index 6d25b92cb..9ceb7ef0a 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.h
+++ b/drivers/net/ethernet/sfc/ef10_sriov.h
@@ -70,6 +70,9 @@ int efx_ef10_vport_add_mac(struct efx_nic *efx,
int efx_ef10_vport_del_mac(struct efx_nic *efx,
unsigned int port_id, u8 *mac);
int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id);
+int efx_ef10_vadaptor_query(struct efx_nic *efx, unsigned int port_id,
+ u32 *port_flags, u32 *vadaptor_flags,
+ unsigned int *vlan_tags);
int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id);
#endif /* EF10_SRIOV_H */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 097f363f1..14b821b1c 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -600,6 +600,7 @@ fail:
*/
static void efx_start_datapath(struct efx_nic *efx)
{
+ netdev_features_t old_features = efx->net_dev->features;
bool old_rx_scatter = efx->rx_scatter;
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
@@ -644,6 +645,15 @@ static void efx_start_datapath(struct efx_nic *efx)
efx->rx_dma_len, efx->rx_page_buf_step,
efx->rx_bufs_per_page, efx->rx_pages_per_batch);
+ /* Restore previously fixed features in hw_features and remove
+ * features which are fixed now
+ */
+ efx->net_dev->hw_features |= efx->net_dev->features;
+ efx->net_dev->hw_features &= ~efx->fixed_features;
+ efx->net_dev->features |= efx->fixed_features;
+ if (efx->net_dev->features != old_features)
+ netdev_features_change(efx->net_dev);
+
/* RX filters may also have scatter-enabled flags */
if (efx->rx_scatter != old_rx_scatter)
efx->type->filter_update_rx_scatter(efx);
@@ -1719,6 +1729,7 @@ static int efx_probe_filters(struct efx_nic *efx)
spin_lock_init(&efx->filter_lock);
init_rwsem(&efx->filter_sem);
+ mutex_lock(&efx->mac_lock);
down_write(&efx->filter_sem);
rc = efx->type->filter_table_probe(efx);
if (rc)
@@ -1757,6 +1768,7 @@ static int efx_probe_filters(struct efx_nic *efx)
#endif
out_unlock:
up_write(&efx->filter_sem);
+ mutex_unlock(&efx->mac_lock);
return rc;
}
@@ -2312,14 +2324,46 @@ static void efx_set_rx_mode(struct net_device *net_dev)
static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
{
struct efx_nic *efx = netdev_priv(net_dev);
+ int rc;
/* If disabling RX n-tuple filtering, clear existing filters */
- if (net_dev->features & ~data & NETIF_F_NTUPLE)
- return efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
+ if (net_dev->features & ~data & NETIF_F_NTUPLE) {
+ rc = efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
+ if (rc)
+ return rc;
+ }
+
+ /* If Rx VLAN filter is changed, update filters via mac_reconfigure */
+ if ((net_dev->features ^ data) & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ /* efx_set_rx_mode() will schedule MAC work to update filters
+ * when a new features are finally set in net_dev.
+ */
+ efx_set_rx_mode(net_dev);
+ }
return 0;
}
+static int efx_vlan_rx_add_vid(struct net_device *net_dev, __be16 proto, u16 vid)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->type->vlan_rx_add_vid)
+ return efx->type->vlan_rx_add_vid(efx, proto, vid);
+ else
+ return -EOPNOTSUPP;
+}
+
+static int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vid)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->type->vlan_rx_kill_vid)
+ return efx->type->vlan_rx_kill_vid(efx, proto, vid);
+ else
+ return -EOPNOTSUPP;
+}
+
static const struct net_device_ops efx_netdev_ops = {
.ndo_open = efx_net_open,
.ndo_stop = efx_net_stop,
@@ -2332,6 +2376,8 @@ static const struct net_device_ops efx_netdev_ops = {
.ndo_set_mac_address = efx_set_mac_address,
.ndo_set_rx_mode = efx_set_rx_mode,
.ndo_set_features = efx_set_features,
+ .ndo_vlan_rx_add_vid = efx_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = efx_vlan_rx_kill_vid,
#ifdef CONFIG_SFC_SRIOV
.ndo_set_vf_mac = efx_sriov_set_vf_mac,
.ndo_set_vf_vlan = efx_sriov_set_vf_vlan,
@@ -3147,17 +3193,25 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
return -ENOMEM;
efx = netdev_priv(net_dev);
efx->type = (const struct efx_nic_type *) entry->driver_data;
+ efx->fixed_features |= NETIF_F_HIGHDMA;
net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
- NETIF_F_HIGHDMA | NETIF_F_TSO |
- NETIF_F_RXCSUM);
+ NETIF_F_TSO | NETIF_F_RXCSUM);
if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
net_dev->features |= NETIF_F_TSO6;
/* Mask for features that also apply to VLAN devices */
net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG |
NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
NETIF_F_RXCSUM);
- /* All offloads can be toggled */
- net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA;
+
+ net_dev->hw_features = net_dev->features & ~efx->fixed_features;
+
+ /* Disable VLAN filtering by default. It may be enforced if
+ * the feature is fixed (i.e. VLAN filters are required to
+ * receive VLAN tagged packets due to vPort restrictions).
+ */
+ net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+ net_dev->features |= efx->fixed_features;
+
pci_set_drvdata(pci_dev, efx);
SET_NETDEV_DEV(net_dev, &pci_dev->dev);
rc = efx_init_struct(efx, pci_dev, net_dev);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 5e3f93f04..c3ae739e9 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -274,4 +274,13 @@ static inline void efx_device_detach_sync(struct efx_nic *efx)
netif_tx_unlock_bh(dev);
}
+static inline bool efx_rwsem_assert_write_locked(struct rw_semaphore *sem)
+{
+ if (WARN_ON(down_read_trylock(sem))) {
+ up_read(sem);
+ return false;
+ }
+ return true;
+}
+
#endif /* EFX_EFX_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index 4cc772164..c9a5b003c 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -273,6 +273,9 @@
* have already installed filters. See the comment at
* MC_CMD_WORKAROUND_BUG26807. */
#define MC_CMD_ERR_FILTERS_PRESENT 0x1014
+/* The clock whose frequency you've attempted to set set
+ * doesn't exist on this NIC */
+#define MC_CMD_ERR_NO_CLOCK 0x1015
#define MC_CMD_ERR_CODE_OFST 0
@@ -292,9 +295,11 @@
/* Point to the copycode entry point. */
#define SIENA_MC_BOOTROM_COPYCODE_VEC (0x800 - 3 * 0x4)
#define HUNT_MC_BOOTROM_COPYCODE_VEC (0x8000 - 3 * 0x4)
+#define MEDFORD_MC_BOOTROM_COPYCODE_VEC (0x10000 - 3 * 0x4)
/* Points to the recovery mode entry point. */
#define SIENA_MC_BOOTROM_NOFLASH_VEC (0x800 - 2 * 0x4)
#define HUNT_MC_BOOTROM_NOFLASH_VEC (0x8000 - 2 * 0x4)
+#define MEDFORD_MC_BOOTROM_NOFLASH_VEC (0x10000 - 2 * 0x4)
/* The command set exported by the boot ROM (MCDI v0) */
#define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \
@@ -686,6 +691,12 @@
#define FCDI_EVENT_CODE_PTP_STATUS 0x9
/* enum: Port id config to map MC-FC port idx */
#define FCDI_EVENT_CODE_PORT_CONFIG 0xa
+/* enum: Boot result or error code */
+#define FCDI_EVENT_CODE_BOOT_RESULT 0xb
+#define FCDI_EVENT_REBOOT_SRC_LBN 36
+#define FCDI_EVENT_REBOOT_SRC_WIDTH 8
+#define FCDI_EVENT_REBOOT_FC_FW 0x0 /* enum */
+#define FCDI_EVENT_REBOOT_FC_BOOTLOADER 0x1 /* enum */
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
@@ -717,6 +728,11 @@
#define FCDI_EVENT_PORT_CONFIG_DATA_OFST 0
#define FCDI_EVENT_PORT_CONFIG_DATA_LBN 0
#define FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32
+#define FCDI_EVENT_BOOT_RESULT_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_AOE/MC_CMD_AOE_OUT_INFO/FC_BOOT_RESULT */
+#define FCDI_EVENT_BOOT_RESULT_LBN 0
+#define FCDI_EVENT_BOOT_RESULT_WIDTH 32
/* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events
* to the MC. Note that this structure | is overlayed over a normal FCDI event
@@ -1649,15 +1665,30 @@
/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16
-/* Uncorrected error on transmit timestamps in NIC clock format */
+/* Uncorrected error on PTP transmit timestamps in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_OFST 0
-/* Uncorrected error on receive timestamps in NIC clock format */
+/* Uncorrected error on PTP receive timestamps in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_OFST 4
/* Uncorrected error on PPS output in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_OFST 8
/* Uncorrected error on PPS input in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_OFST 12
+/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2 msgresponse */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN 24
+/* Uncorrected error on PTP transmit timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_OFST 0
+/* Uncorrected error on PTP receive timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_OFST 4
+/* Uncorrected error on PPS output in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_OFST 8
+/* Uncorrected error on PPS input in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_OFST 12
+/* Uncorrected error on non-PTP transmit timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_OFST 16
+/* Uncorrected error on non-PTP receive timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_OFST 20
+
/* MC_CMD_PTP_OUT_MANFTEST_PPS msgresponse */
#define MC_CMD_PTP_OUT_MANFTEST_PPS_LEN 4
/* Results of testing */
@@ -2158,8 +2189,12 @@
/* MC_CMD_DRV_ATTACH_IN msgrequest */
#define MC_CMD_DRV_ATTACH_IN_LEN 12
-/* new state (0=detached, 1=attached) to set if UPDATE=1 */
+/* new state to set if UPDATE=1 */
#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0
+#define MC_CMD_DRV_ATTACH_LBN 0
+#define MC_CMD_DRV_ATTACH_WIDTH 1
+#define MC_CMD_DRV_PREBOOT_LBN 1
+#define MC_CMD_DRV_PREBOOT_WIDTH 1
/* 1 to set new state, or 0 to just report the existing state */
#define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4
/* preferred datapath firmware (for Huntington; ignored for Siena) */
@@ -2181,12 +2216,12 @@
/* MC_CMD_DRV_ATTACH_OUT msgresponse */
#define MC_CMD_DRV_ATTACH_OUT_LEN 4
-/* previous or existing state (0=detached, 1=attached) */
+/* previous or existing state, see the bitmask at NEW_STATE */
#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0
/* MC_CMD_DRV_ATTACH_EXT_OUT msgresponse */
#define MC_CMD_DRV_ATTACH_EXT_OUT_LEN 8
-/* previous or existing state (0=detached, 1=attached) */
+/* previous or existing state, see the bitmask at NEW_STATE */
#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_OFST 0
/* Flags associated with this function */
#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4
@@ -2198,6 +2233,10 @@
#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL 0x1
/* enum: The function can perform privileged operations */
#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED 0x2
+/* enum: The function does not have an active port associated with it. The port
+ * refers to the Sorrento external FPGA port.
+ */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT 0x3
/***********************************/
@@ -2892,7 +2931,7 @@
*/
#define MC_CMD_SET_MAC 0x2c
-#define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_LINK
+#define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
/* MC_CMD_SET_MAC_IN msgrequest */
#define MC_CMD_SET_MAC_IN_LEN 28
@@ -2927,9 +2966,66 @@
#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0
#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1
+/* MC_CMD_SET_MAC_EXT_IN msgrequest */
+#define MC_CMD_SET_MAC_EXT_IN_LEN 32
+/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of
+ * EtherII, VLAN, bug16011 padding).
+ */
+#define MC_CMD_SET_MAC_EXT_IN_MTU_OFST 0
+#define MC_CMD_SET_MAC_EXT_IN_DRAIN_OFST 4
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_OFST 8
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_LEN 8
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_LO_OFST 8
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_HI_OFST 12
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_OFST 16
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_LBN 0
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_LBN 1
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_FCNTL_OFST 20
+/* enum: Flow control is off. */
+/* MC_CMD_FCNTL_OFF 0x0 */
+/* enum: Respond to flow control. */
+/* MC_CMD_FCNTL_RESPOND 0x1 */
+/* enum: Respond to and Issue flow control. */
+/* MC_CMD_FCNTL_BIDIR 0x2 */
+/* enum: Auto neg flow control. */
+/* MC_CMD_FCNTL_AUTO 0x3 */
+/* enum: Priority flow control (eftest builds only). */
+/* MC_CMD_FCNTL_QBB 0x4 */
+/* enum: Issue flow control. */
+/* MC_CMD_FCNTL_GENERATE 0x5 */
+#define MC_CMD_SET_MAC_EXT_IN_FLAGS_OFST 24
+#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_LBN 0
+#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_WIDTH 1
+/* Select which parameters to configure. A parameter will only be modified if
+ * the corresponding control flag is set. If SET_MAC_ENHANCED is not set in
+ * capabilities then this field is ignored (and all flags are assumed to be
+ * set).
+ */
+#define MC_CMD_SET_MAC_EXT_IN_CONTROL_OFST 28
+#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_LBN 0
+#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_LBN 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_LBN 2
+#define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_LBN 3
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_LBN 4
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_WIDTH 1
+
/* MC_CMD_SET_MAC_OUT msgresponse */
#define MC_CMD_SET_MAC_OUT_LEN 0
+/* MC_CMD_SET_MAC_V2_OUT msgresponse */
+#define MC_CMD_SET_MAC_V2_OUT_LEN 4
+/* MTU as configured after processing the request. See comment at
+ * MC_CMD_SET_MAC_IN/MTU. To query MTU without doing any changes, set CONTROL
+ * to 0.
+ */
+#define MC_CMD_SET_MAC_V2_OUT_MTU_OFST 0
+
/***********************************/
/* MC_CMD_PHY_STATS
@@ -3521,6 +3617,26 @@
#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16
#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20
+/* MC_CMD_NVRAM_INFO_V2_OUT msgresponse */
+#define MC_CMD_NVRAM_INFO_V2_OUT_LEN 28
+#define MC_CMD_NVRAM_INFO_V2_OUT_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_INFO_V2_OUT_SIZE_OFST 4
+#define MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_OFST 8
+#define MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_OFST 12
+#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_LBN 0
+#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_LBN 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_LBN 7
+#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_OFST 16
+#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_OFST 20
+/* Writes must be multiples of this size. Added to support the MUM on Sorrento.
+ */
+#define MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_OFST 24
+
/***********************************/
/* MC_CMD_NVRAM_UPDATE_START
@@ -3561,6 +3677,37 @@
/* amount to read in bytes */
#define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8
+/* MC_CMD_NVRAM_READ_IN_V2 msgrequest */
+#define MC_CMD_NVRAM_READ_IN_V2_LEN 16
+#define MC_CMD_NVRAM_READ_IN_V2_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_READ_IN_V2_OFFSET_OFST 4
+/* amount to read in bytes */
+#define MC_CMD_NVRAM_READ_IN_V2_LENGTH_OFST 8
+/* Optional control info. If a partition is stored with an A/B versioning
+ * scheme (i.e. in more than one physical partition in NVRAM) the host can set
+ * this to control which underlying physical partition is used to read data
+ * from. This allows it to perform a read-modify-write-verify with the write
+ * lock continuously held by calling NVRAM_UPDATE_START, reading the old
+ * contents using MODE=TARGET_CURRENT, overwriting the old partition and then
+ * verifying by reading with MODE=TARGET_BACKUP.
+ */
+#define MC_CMD_NVRAM_READ_IN_V2_MODE_OFST 12
+/* enum: Same as omitting MODE: caller sees data in current partition unless it
+ * holds the write lock in which case it sees data in the partition it is
+ * updating.
+ */
+#define MC_CMD_NVRAM_READ_IN_V2_DEFAULT 0x0
+/* enum: Read from the current partition of an A/B pair, even if holding the
+ * write lock.
+ */
+#define MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT 0x1
+/* enum: Read from the non-current (i.e. to be updated) partition of an A/B
+ * pair
+ */
+#define MC_CMD_NVRAM_READ_IN_V2_TARGET_BACKUP 0x2
+
/* MC_CMD_NVRAM_READ_OUT msgresponse */
#define MC_CMD_NVRAM_READ_OUT_LENMIN 1
#define MC_CMD_NVRAM_READ_OUT_LENMAX 252
@@ -3895,6 +4042,8 @@
#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY 0x39
/* enum: CCOM AVREG 1v8 supply (external ADC): mV */
#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC 0x3a
+/* enum: CCOM RTS temperature: degC */
+#define MC_CMD_SENSOR_CONTROLLER_RTS 0x3b
/* enum: Not a sensor: reserved for the next page flag */
#define MC_CMD_SENSOR_PAGE1_NEXT 0x3f
/* enum: controller internal temperature sensor voltage on master core
@@ -3931,6 +4080,12 @@
#define MC_CMD_SENSOR_PHY0_VCC 0x4c
/* enum: Voltage supplied to the QSFP #1 from their power supply: mV */
#define MC_CMD_SENSOR_PHY1_VCC 0x4d
+/* enum: Controller die temperature (TDIODE): degC */
+#define MC_CMD_SENSOR_CONTROLLER_TDIODE_TEMP 0x4e
+/* enum: Board temperature (front): degC */
+#define MC_CMD_SENSOR_BOARD_FRONT_TEMP 0x4f
+/* enum: Board temperature (back): degC */
+#define MC_CMD_SENSOR_BOARD_BACK_TEMP 0x50
/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
#define MC_CMD_SENSOR_ENTRY_OFST 4
#define MC_CMD_SENSOR_ENTRY_LEN 8
@@ -4007,7 +4162,7 @@
/* MC_CMD_READ_SENSORS_EXT_IN msgrequest */
#define MC_CMD_READ_SENSORS_EXT_IN_LEN 12
-/* DMA address of host buffer for sensor readings */
+/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). */
#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_OFST 0
#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LEN 8
#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_OFST 0
@@ -4608,6 +4763,10 @@
* operations
*/
#define MC_CMD_MUM_OP_QSFP 0xc
+/* enum: Request discrete and SODIMM DDR info (type, size, speed grade, voltage
+ * level) from MUM
+ */
+#define MC_CMD_MUM_OP_READ_DDR_INFO 0xd
/* MC_CMD_MUM_IN_NULL msgrequest */
#define MC_CMD_MUM_IN_NULL_LEN 4
@@ -4793,6 +4952,10 @@
#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8
#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0
#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_LBN 1
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_WIDTH 1
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_LBN 2
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_WIDTH 1
/* MC_CMD_MUM_IN_FPGA_LOAD msgrequest */
#define MC_CMD_MUM_IN_FPGA_LOAD_LEN 8
@@ -4862,6 +5025,11 @@
#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4
#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8
+/* MC_CMD_MUM_IN_READ_DDR_INFO msgrequest */
+#define MC_CMD_MUM_IN_READ_DDR_INFO_LEN 4
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+
/* MC_CMD_MUM_OUT msgresponse */
#define MC_CMD_MUM_OUT_LEN 0
@@ -5004,6 +5172,69 @@
#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4
#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0
+/* MC_CMD_MUM_OUT_READ_DDR_INFO msgresponse */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMIN 24
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMAX 248
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_LEN(num) (8+8*(num))
+/* Discrete (soldered) DDR resistor strap info */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_OFST 0
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_LBN 0
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_WIDTH 16
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_LBN 16
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_WIDTH 16
+/* Number of SODIMM info records */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_OFST 4
+/* Array of SODIMM info records */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_OFST 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LEN 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_OFST 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_OFST 12
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MINNUM 2
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM 30
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_LBN 0
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_WIDTH 8
+/* enum: SODIMM bank 1 (Top SODIMM for Sorrento) */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK1 0x0
+/* enum: SODIMM bank 2 (Bottom SODDIMM for Sorrento) */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK2 0x1
+/* enum: Total number of SODIMM banks */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_BANKS 0x2
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_LBN 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_WIDTH 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_LBN 16
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_WIDTH 4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_LBN 20
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_WIDTH 4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_POWERED 0x0 /* enum */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V25 0x1 /* enum */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V35 0x2 /* enum */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V5 0x3 /* enum */
+/* enum: Values 5-15 are reserved for future usage */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V8 0x4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_LBN 24
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_WIDTH 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_LBN 32
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_WIDTH 16
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_LBN 48
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_WIDTH 4
+/* enum: No module present */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_ABSENT 0x0
+/* enum: Module present supported and powered on */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_POWERED 0x1
+/* enum: Module present but bad type */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_TYPE 0x2
+/* enum: Module present but incompatible voltage */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_VOLTAGE 0x3
+/* enum: Module present but unknown SPD */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SPD 0x4
+/* enum: Module present but slot cannot support it */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SLOT 0x5
+/* enum: Modules may or may not be present, but cannot establish contact by I2C
+ */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_REACHABLE 0x6
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_LBN 52
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_WIDTH 12
+
/* MC_CMD_RESOURCE_SPECIFIER enum */
/* enum: Any */
#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff
@@ -5076,6 +5307,8 @@
#define NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG 0x500
/* enum: Expansion ROM configuration data for port 0 */
#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0 0x600
+/* enum: Synonym for EXPROM_CONFIG_PORT0 as used in pmap files */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG 0x600
/* enum: Expansion ROM configuration data for port 1 */
#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1 0x601
/* enum: Expansion ROM configuration data for port 2 */
@@ -5084,6 +5317,8 @@
#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3 0x603
/* enum: Non-volatile log output partition */
#define NVRAM_PARTITION_TYPE_LOG 0x700
+/* enum: Non-volatile log output of second core on dual-core device */
+#define NVRAM_PARTITION_TYPE_LOG_SLAVE 0x701
/* enum: Device state dump output partition */
#define NVRAM_PARTITION_TYPE_DUMP 0x800
/* enum: Application license key storage partition */
@@ -5116,6 +5351,20 @@
#define NVRAM_PARTITION_TYPE_MUM_USER_ROM 0xc05
/* enum: MUM fuses and lockbits partition. */
#define NVRAM_PARTITION_TYPE_MUM_FUSELOCK 0xc06
+/* enum: UEFI expansion ROM if separate from PXE */
+#define NVRAM_PARTITION_TYPE_EXPANSION_UEFI 0xd00
+/* enum: Spare partition 0 */
+#define NVRAM_PARTITION_TYPE_SPARE_0 0x1000
+/* enum: Spare partition 1 */
+#define NVRAM_PARTITION_TYPE_SPARE_1 0x1100
+/* enum: Spare partition 2 */
+#define NVRAM_PARTITION_TYPE_SPARE_2 0x1200
+/* enum: Spare partition 3 */
+#define NVRAM_PARTITION_TYPE_SPARE_3 0x1300
+/* enum: Spare partition 4 */
+#define NVRAM_PARTITION_TYPE_SPARE_4 0x1400
+/* enum: Spare partition 5 */
+#define NVRAM_PARTITION_TYPE_SPARE_5 0x1500
/* enum: Start of reserved value range (firmware may use for any purpose) */
#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00
/* enum: End of reserved value range (firmware may use for any purpose) */
@@ -5149,6 +5398,90 @@
#define LICENSED_APP_ID_ID_LBN 0
#define LICENSED_APP_ID_ID_WIDTH 32
+/* LICENSED_FEATURES structuredef */
+#define LICENSED_FEATURES_LEN 8
+/* Bitmask of licensed firmware features */
+#define LICENSED_FEATURES_MASK_OFST 0
+#define LICENSED_FEATURES_MASK_LEN 8
+#define LICENSED_FEATURES_MASK_LO_OFST 0
+#define LICENSED_FEATURES_MASK_HI_OFST 4
+#define LICENSED_FEATURES_RX_CUT_THROUGH_LBN 0
+#define LICENSED_FEATURES_RX_CUT_THROUGH_WIDTH 1
+#define LICENSED_FEATURES_PIO_LBN 1
+#define LICENSED_FEATURES_PIO_WIDTH 1
+#define LICENSED_FEATURES_EVQ_TIMER_LBN 2
+#define LICENSED_FEATURES_EVQ_TIMER_WIDTH 1
+#define LICENSED_FEATURES_CLOCK_LBN 3
+#define LICENSED_FEATURES_CLOCK_WIDTH 1
+#define LICENSED_FEATURES_RX_TIMESTAMPS_LBN 4
+#define LICENSED_FEATURES_RX_TIMESTAMPS_WIDTH 1
+#define LICENSED_FEATURES_TX_TIMESTAMPS_LBN 5
+#define LICENSED_FEATURES_TX_TIMESTAMPS_WIDTH 1
+#define LICENSED_FEATURES_RX_SNIFF_LBN 6
+#define LICENSED_FEATURES_RX_SNIFF_WIDTH 1
+#define LICENSED_FEATURES_TX_SNIFF_LBN 7
+#define LICENSED_FEATURES_TX_SNIFF_WIDTH 1
+#define LICENSED_FEATURES_PROXY_FILTER_OPS_LBN 8
+#define LICENSED_FEATURES_PROXY_FILTER_OPS_WIDTH 1
+#define LICENSED_FEATURES_EVENT_CUT_THROUGH_LBN 9
+#define LICENSED_FEATURES_EVENT_CUT_THROUGH_WIDTH 1
+#define LICENSED_FEATURES_MASK_LBN 0
+#define LICENSED_FEATURES_MASK_WIDTH 64
+
+/* LICENSED_V3_APPS structuredef */
+#define LICENSED_V3_APPS_LEN 8
+/* Bitmask of licensed applications */
+#define LICENSED_V3_APPS_MASK_OFST 0
+#define LICENSED_V3_APPS_MASK_LEN 8
+#define LICENSED_V3_APPS_MASK_LO_OFST 0
+#define LICENSED_V3_APPS_MASK_HI_OFST 4
+#define LICENSED_V3_APPS_ONLOAD_LBN 0
+#define LICENSED_V3_APPS_ONLOAD_WIDTH 1
+#define LICENSED_V3_APPS_PTP_LBN 1
+#define LICENSED_V3_APPS_PTP_WIDTH 1
+#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_LBN 2
+#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_WIDTH 1
+#define LICENSED_V3_APPS_SOLARSECURE_LBN 3
+#define LICENSED_V3_APPS_SOLARSECURE_WIDTH 1
+#define LICENSED_V3_APPS_PERF_MONITOR_LBN 4
+#define LICENSED_V3_APPS_PERF_MONITOR_WIDTH 1
+#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_LBN 5
+#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_WIDTH 1
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_LBN 6
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_WIDTH 1
+#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_LBN 7
+#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_WIDTH 1
+#define LICENSED_V3_APPS_MASK_LBN 0
+#define LICENSED_V3_APPS_MASK_WIDTH 64
+
+/* LICENSED_V3_FEATURES structuredef */
+#define LICENSED_V3_FEATURES_LEN 8
+/* Bitmask of licensed firmware features */
+#define LICENSED_V3_FEATURES_MASK_OFST 0
+#define LICENSED_V3_FEATURES_MASK_LEN 8
+#define LICENSED_V3_FEATURES_MASK_LO_OFST 0
+#define LICENSED_V3_FEATURES_MASK_HI_OFST 4
+#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_LBN 0
+#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_WIDTH 1
+#define LICENSED_V3_FEATURES_PIO_LBN 1
+#define LICENSED_V3_FEATURES_PIO_WIDTH 1
+#define LICENSED_V3_FEATURES_EVQ_TIMER_LBN 2
+#define LICENSED_V3_FEATURES_EVQ_TIMER_WIDTH 1
+#define LICENSED_V3_FEATURES_CLOCK_LBN 3
+#define LICENSED_V3_FEATURES_CLOCK_WIDTH 1
+#define LICENSED_V3_FEATURES_RX_TIMESTAMPS_LBN 4
+#define LICENSED_V3_FEATURES_RX_TIMESTAMPS_WIDTH 1
+#define LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN 5
+#define LICENSED_V3_FEATURES_TX_TIMESTAMPS_WIDTH 1
+#define LICENSED_V3_FEATURES_RX_SNIFF_LBN 6
+#define LICENSED_V3_FEATURES_RX_SNIFF_WIDTH 1
+#define LICENSED_V3_FEATURES_TX_SNIFF_LBN 7
+#define LICENSED_V3_FEATURES_TX_SNIFF_WIDTH 1
+#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_LBN 8
+#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_WIDTH 1
+#define LICENSED_V3_FEATURES_MASK_LBN 0
+#define LICENSED_V3_FEATURES_MASK_WIDTH 64
+
/* TX_TIMESTAMP_EVENT structuredef */
#define TX_TIMESTAMP_EVENT_LEN 6
/* lower 16 bits of timestamp data */
@@ -5258,6 +5591,8 @@
#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_WIDTH 1
#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_LBN 5
#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_LBN 6
+#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_WIDTH 1
#define MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20
/* enum: Disabled */
#define MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS 0x0
@@ -5362,6 +5697,8 @@
#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_WIDTH 1
#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_LBN 9
#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_FORCE_EV_MERGING_LBN 10
+#define MC_CMD_INIT_RXQ_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20
/* The port ID associated with the v-adaptor which should contain this DMAQ. */
@@ -5422,6 +5759,8 @@
#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K 0x4 /* enum */
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_LBN 18
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_LBN 19
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20
/* The port ID associated with the v-adaptor which should contain this DMAQ. */
@@ -5535,6 +5874,8 @@
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_LBN 12
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20
/* The port ID associated with the v-adaptor which should contain this DMAQ. */
@@ -5747,6 +6088,46 @@
#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44
#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64
+/* MC_CMD_PROXY_CONFIGURE_EXT_IN msgrequest */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_LEN 112
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_OFST 0
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_LBN 0
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_WIDTH 1
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REQUEST_BLOCK_SIZE.
+ */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_OFST 4
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_OFST 4
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_OFST 8
+/* Must be a power of 2 */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_OFST 12
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REPLY_BLOCK_SIZE.
+ */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_OFST 16
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_OFST 16
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_OFST 20
+/* Must be a power of 2 */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_OFST 24
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
+ * host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
+ */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_OFST 28
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_OFST 28
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_OFST 32
+/* Must be a power of 2, or zero if this buffer is not provided */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_OFST 36
+/* Applies to all three buffers */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_OFST 40
+/* A bit mask defining which MCDI operations may be proxied */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_OFST 44
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_LEN 64
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_OFST 108
+
/* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */
#define MC_CMD_PROXY_CONFIGURE_OUT_LEN 0
@@ -6323,6 +6704,15 @@
* client
*/
#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_RESTRICTIONS 0x2
+/* enum: read properties relating to security rules (Medford-only; for use by
+ * SolarSecure apps, not directly by drivers. See SF-114946-SW.)
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SECURITY_RULE_INFO 0x3
+/* enum: read the list of supported RX filter matches for VXLAN/NVGRE
+ * encapsulated frames, which follow a different match sequence to normal
+ * frames (Medford only)
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES 0x4
/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */
#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8
@@ -6356,7 +6746,10 @@
/***********************************/
/* MC_CMD_PARSER_DISP_RW
- * Direct read/write of parser-dispatcher state (DICPUs and LUE) for debugging
+ * Direct read/write of parser-dispatcher state (DICPUs and LUE) for debugging.
+ * Please note that this interface is only of use to debug tools which have
+ * knowledge of firmware and hardware data structures; nothing here is intended
+ * for use by normal driver code.
*/
#define MC_CMD_PARSER_DISP_RW 0xe5
@@ -6374,6 +6767,12 @@
#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2
/* enum: Lookup engine (with requested metadata format) */
#define MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA 0x3
+/* enum: RX0 dispatcher CPU (alias for RX_DICPU; Medford has 2 RX DICPUs) */
+#define MC_CMD_PARSER_DISP_RW_IN_RX0_DICPU 0x0
+/* enum: RX1 dispatcher CPU (only valid for Medford) */
+#define MC_CMD_PARSER_DISP_RW_IN_RX1_DICPU 0x4
+/* enum: Miscellaneous other state (only valid for Medford) */
+#define MC_CMD_PARSER_DISP_RW_IN_MISC_STATE 0x5
/* identifies the type of operation requested */
#define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4
/* enum: read a word of DICPU DMEM or a LUE entry */
@@ -6382,8 +6781,12 @@
#define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1
/* enum: read-modify-write a word of DICPU DMEM (not valid for LUE) */
#define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2
-/* data memory address or LUE index */
+/* data memory address (DICPU targets) or LUE index (LUE targets) */
#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8
+/* selector (for MISC_STATE target) */
+#define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_OFST 8
+/* enum: Port to datapath mapping */
+#define MC_CMD_PARSER_DISP_RW_IN_PORT_DP_MAPPING 0x1
/* value to write (for DMEM writes) */
#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12
/* XOR value (for DMEM read-modify-writes: new = (old & mask) ^ value) */
@@ -6408,6 +6811,12 @@
*/
#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_OFST 20
#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_LEN 32
+/* datapath(s) used for each port (for MISC_STATE PORT_DP_MAPPING selector) */
+#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_OFST 0
+#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_LEN 4
+#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_NUM 4
+#define MC_CMD_PARSER_DISP_RW_OUT_DP0 0x1 /* enum */
+#define MC_CMD_PARSER_DISP_RW_OUT_DP1 0x2 /* enum */
/***********************************/
@@ -7071,6 +7480,24 @@
#define MC_CMD_GET_CAPABILITIES_OUT_LEN 20
/* First word of flags. */
#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN 13
@@ -7138,6 +7565,8 @@
#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
/* enum: RXDP Test firmware image 8 */
#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
/* TxDPCPU firmware id. */
#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_OFST 6
#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_LEN 2
@@ -7153,6 +7582,8 @@
#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
/* enum: TXDP Test firmware image 2 */
#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_CSR 0x103
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_OFST 8
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_LEN 2
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_LBN 0
@@ -7227,6 +7658,258 @@
/* Licensed capabilities */
#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_OFST 16
+/* MC_CMD_GET_CAPABILITIES_V2_IN msgrequest */
+#define MC_CMD_GET_CAPABILITIES_V2_IN_LEN 0
+
+/* MC_CMD_GET_CAPABILITIES_V2_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_LEN 72
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Virtual switching (full feature) RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Virtual switching (full feature) TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_OFST 12
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_OFST 16
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
+ * on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_LEN 2
+
/***********************************/
/* MC_CMD_V2_EXTN
@@ -7475,6 +8158,25 @@
/***********************************/
+/* MC_CMD_VSWITCH_QUERY
+ * read some config of v-switch. For now this command is an empty placeholder.
+ * It may be used to check if a v-switch is connected to a given EVB port (if
+ * not, then the command returns ENOENT).
+ */
+#define MC_CMD_VSWITCH_QUERY 0x63
+
+#define MC_CMD_0x63_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VSWITCH_QUERY_IN msgrequest */
+#define MC_CMD_VSWITCH_QUERY_IN_LEN 4
+/* The port to which the v-switch is connected. */
+#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_VSWITCH_QUERY_OUT msgresponse */
+#define MC_CMD_VSWITCH_QUERY_OUT_LEN 0
+
+
+/***********************************/
/* MC_CMD_VPORT_ALLOC
* allocate a v-port.
*/
@@ -7510,6 +8212,8 @@
#define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8
#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_LBN 1
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_WIDTH 1
/* The number of VLAN tags to insert/remove. An error will be returned if
* incompatible with the number of VLAN tags specified for the upstream
* v-switch.
@@ -7561,6 +8265,8 @@
#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_OFST 8
#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_LBN 0
#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 1
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
/* The number of VLAN tags to strip on receive */
#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12
/* The number of VLAN tags to transparently insert/remove. */
@@ -7639,6 +8345,29 @@
/***********************************/
+/* MC_CMD_VADAPTOR_QUERY
+ * read some config of v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_QUERY 0x61
+
+#define MC_CMD_0x61_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_QUERY_IN msgrequest */
+#define MC_CMD_VADAPTOR_QUERY_IN_LEN 4
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_VADAPTOR_QUERY_OUT msgresponse */
+#define MC_CMD_VADAPTOR_QUERY_OUT_LEN 12
+/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
+#define MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_OFST 0
+/* The v-adaptor flags as defined at MC_CMD_VADAPTOR_ALLOC. */
+#define MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_OFST 4
+/* The number of VLAN tags that may still be added */
+#define MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 8
+
+
+/***********************************/
/* MC_CMD_EVB_PORT_ASSIGN
* assign a port to a PCI function.
*/
@@ -7875,10 +8604,17 @@
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8
/* The handle of the RSS context */
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
-/* Hash control flags. The _EN bits are always supported. The _MODE bits only
- * work when the firmware reports ADDITIONAL_RSS_MODES in
- * MC_CMD_GET_CAPABILITIES and override the _EN bits if any of them are not 0.
- * See the RSS_MODE structure for the meaning of the mode bits.
+/* Hash control flags. The _EN bits are always supported, but new modes are
+ * available when ADDITIONAL_RSS_MODES is reported by MC_CMD_GET_CAPABILITIES:
+ * in this case, the MODE fields may be set to non-zero values, and will take
+ * effect regardless of the settings of the _EN flags. See the RSS_MODE
+ * structure for the meaning of the mode bits. Drivers must check the
+ * capability before trying to set any _MODE fields, as older firmware will
+ * reject any attempt to set the FLAGS field to a value > 0xff with EINVAL. In
+ * the case where all the _MODE flags are zero, the _EN flags take effect,
+ * providing backward compatibility for existing drivers. (Setting all _MODE
+ * *and* all _EN flags to zero is valid, to disable RSS spreading for that
+ * particular packet type.)
*/
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0
@@ -7923,11 +8659,18 @@
/* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8
-/* Hash control flags. If any _MODE bits are non-zero (which will only be true
- * when the firmware reports ADDITIONAL_RSS_MODES) then the _EN bits should be
- * disregarded (but are guaranteed to be consistent with the _MODE bits if
- * RSS_CONTEXT_SET_FLAGS has never been called for this context since it was
- * allocated).
+/* Hash control flags. If all _MODE bits are zero (which will always be true
+ * for older firmware which does not report the ADDITIONAL_RSS_MODES
+ * capability), the _EN bits report the state. If any _MODE bits are non-zero
+ * (which will only be true when the firmware reports ADDITIONAL_RSS_MODES)
+ * then the _EN bits should be disregarded, although the _MODE flags are
+ * guaranteed to be consistent with the _EN flags for a freshly-allocated RSS
+ * context and in the case where the _EN flags were used in the SET. This
+ * provides backward compatibility: old drivers will not be attempting to
+ * derive any meaning from the _MODE bits (and can never set them to any value
+ * not representable by the _EN bits); new drivers can always determine the
+ * mode by looking only at the _MODE bits; the value returned by a GET can
+ * always be used for a SET regardless of old/new driver vs. old/new firmware.
*/
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0
@@ -8155,6 +8898,74 @@
/***********************************/
+/* MC_CMD_VPORT_RECONFIGURE
+ * Replace VLAN tags and/or MAC addresses of an existing v-port. If the v-port
+ * has already been passed to another function (v-port's user), then that
+ * function will be reset before applying the changes.
+ */
+#define MC_CMD_VPORT_RECONFIGURE 0xeb
+
+#define MC_CMD_0xeb_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_RECONFIGURE_IN msgrequest */
+#define MC_CMD_VPORT_RECONFIGURE_IN_LEN 44
+/* The handle of the v-port */
+#define MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_OFST 0
+/* Flags requesting what should be changed. */
+#define MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_OFST 4
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_LBN 0
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_WIDTH 1
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_LBN 1
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_WIDTH 1
+/* The number of VLAN tags to insert/remove. An error will be returned if
+ * incompatible with the number of VLAN tags specified for the upstream
+ * v-switch.
+ */
+#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_OFST 8
+/* The actual VLAN tags to insert/remove */
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_OFST 12
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_LBN 0
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_WIDTH 16
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_LBN 16
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_WIDTH 16
+/* The number of MAC addresses to add */
+#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_OFST 16
+/* MAC addresses to add */
+#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_OFST 20
+#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_LEN 6
+#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_NUM 4
+
+/* MC_CMD_VPORT_RECONFIGURE_OUT msgresponse */
+#define MC_CMD_VPORT_RECONFIGURE_OUT_LEN 4
+#define MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_OFST 0
+#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_LBN 0
+#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_EVB_PORT_QUERY
+ * read some config of v-port.
+ */
+#define MC_CMD_EVB_PORT_QUERY 0x62
+
+#define MC_CMD_0x62_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_EVB_PORT_QUERY_IN msgrequest */
+#define MC_CMD_EVB_PORT_QUERY_IN_LEN 4
+/* The handle of the v-port */
+#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_OFST 0
+
+/* MC_CMD_EVB_PORT_QUERY_OUT msgresponse */
+#define MC_CMD_EVB_PORT_QUERY_OUT_LEN 8
+/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
+#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_OFST 0
+/* The number of VLAN tags that may be used on a v-adaptor connected to this
+ * EVB port.
+ */
+#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 4
+
+
+/***********************************/
/* MC_CMD_DUMP_BUFTBL_ENTRIES
* Dump buffer table entries, mainly for command client debug use. Dumps
* absolute entries, and does not use chunk handles. All entries must be in
@@ -8196,6 +9007,14 @@
#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0
#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0
#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_LBN 1
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_WIDTH 2
+/* enum: pad to 64 bytes */
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64 0x0
+/* enum: pad to 128 bytes (Medford only) */
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128 0x1
+/* enum: pad to 256 bytes (Medford only) */
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256 0x2
/* MC_CMD_SET_RXDP_CONFIG_OUT msgresponse */
#define MC_CMD_SET_RXDP_CONFIG_OUT_LEN 0
@@ -8217,6 +9036,10 @@
#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0
#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0
#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1
+#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_LBN 1
+#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_WIDTH 2
+/* Enum values, see field(s): */
+/* MC_CMD_SET_RXDP_CONFIG/MC_CMD_SET_RXDP_CONFIG_IN/PAD_HOST_LEN */
/***********************************/
@@ -8788,32 +9611,38 @@
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
-/* enum: Attenuation (0-15, TBD for Medford) */
+/* enum: Attenuation (0-15, Huntington) */
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT 0x0
-/* enum: CTLE Boost (0-15, TBD for Medford) */
+/* enum: CTLE Boost (0-15, Huntington) */
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST 0x1
-/* enum: Edge DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive, TBD
- * for Medford)
+/* enum: Edge DFE Tap1 (Huntington - 0 - max negative, 64 - zero, 127 - max
+ * positive, Medford - 0-31)
*/
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1 0x2
-/* enum: Edge DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive, TBD for
- * Medford)
+/* enum: Edge DFE Tap2 (Huntington - 0 - max negative, 32 - zero, 63 - max
+ * positive, Medford - 0-31)
*/
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2 0x3
-/* enum: Edge DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive, TBD for
- * Medford)
+/* enum: Edge DFE Tap3 (Huntington - 0 - max negative, 32 - zero, 63 - max
+ * positive, Medford - 0-16)
*/
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3 0x4
-/* enum: Edge DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive, TBD for
- * Medford)
+/* enum: Edge DFE Tap4 (Huntington - 0 - max negative, 32 - zero, 63 - max
+ * positive, Medford - 0-16)
*/
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4 0x5
-/* enum: Edge DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive, TBD for
- * Medford)
+/* enum: Edge DFE Tap5 (Huntington - 0 - max negative, 32 - zero, 63 - max
+ * positive, Medford - 0-16)
*/
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5 0x6
-/* enum: Edge DFE DLEV (TBD for Medford) */
+/* enum: Edge DFE DLEV (0-128 for Medford) */
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_DLEV 0x7
+/* enum: Variable Gain Amplifier (0-15, Medford) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_VGA 0x8
+/* enum: CTLE EQ Capacitor (0-15, Medford) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9
+/* enum: CTLE EQ Resistor (0-7, Medford) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
@@ -8885,26 +9714,32 @@
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
-/* enum: TX Amplitude */
+/* enum: TX Amplitude (Huntington, Medford) */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV 0x0
-/* enum: De-Emphasis Tap1 Magnitude (0-7) */
+/* enum: De-Emphasis Tap1 Magnitude (0-7) (Huntington) */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_MODE 0x1
/* enum: De-Emphasis Tap1 Fine */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_DTLEV 0x2
-/* enum: De-Emphasis Tap2 Magnitude (0-6) */
+/* enum: De-Emphasis Tap2 Magnitude (0-6) (Huntington) */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2 0x3
-/* enum: De-Emphasis Tap2 Fine */
+/* enum: De-Emphasis Tap2 Fine (Huntington) */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2TLEV 0x4
-/* enum: Pre-Emphasis Magnitude */
+/* enum: Pre-Emphasis Magnitude (Huntington) */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_E 0x5
-/* enum: Pre-Emphasis Fine */
+/* enum: Pre-Emphasis Fine (Huntington) */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_ETLEV 0x6
-/* enum: TX Slew Rate Coarse control */
+/* enum: TX Slew Rate Coarse control (Huntington) */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY 0x7
-/* enum: TX Slew Rate Fine control */
+/* enum: TX Slew Rate Fine control (Huntington) */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET 0x8
-/* enum: TX Termination Impedance control */
+/* enum: TX Termination Impedance control (Huntington) */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET 0x9
+/* enum: TX Amplitude Fine control (Medford) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_FINE 0xa
+/* enum: Pre-shoot Tap (Medford) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV 0xb
+/* enum: De-emphasis Tap (Medford) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY 0xc
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0 0x0 /* enum */
@@ -9086,8 +9921,16 @@
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x5
/* enum: DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x6
+/* enum: DFE DLev */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_DLEV 0x7
+/* enum: Figure of Merit */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_FOM 0x8
+/* enum: CTLE EQ Capacitor (HF Gain) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9
+/* enum: CTLE EQ Resistor (DC Gain) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 4
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 5
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
@@ -9096,12 +9939,57 @@
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_5 0x5 /* enum */
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_6 0x6 /* enum */
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_7 0x7 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x8 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_LBN 12
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 12
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_8 0x8 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_9 0x9 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_10 0xa /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_11 0xb /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_12 0xc /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_13 0xd /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_14 0xe /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_15 0xf /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x10 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 13
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_LBN 14
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 10
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
+/* MC_CMD_PCIE_TUNE_RXEQ_SET_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMIN 8
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMAX 252
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_RSVD_LEN 3
+/* RXEQ Parameter */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_OFST 4
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LEN 4
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MINNUM 1
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MAXNUM 62
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_LBN 0
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_WIDTH 8
+/* Enum values, see field(s): */
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_ID */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_LBN 8
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_WIDTH 5
+/* Enum values, see field(s): */
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_LBN 13
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_WIDTH 1
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_LBN 14
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_WIDTH 2
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_LBN 16
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_LBN 24
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_WIDTH 8
+
+/* MC_CMD_PCIE_TUNE_RXEQ_SET_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_OUT_LEN 0
+
/* MC_CMD_PCIE_TUNE_TXEQ_GET_IN msgrequest */
#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_LEN 4
/* Requested operation */
@@ -9176,6 +10064,7 @@
/***********************************/
/* MC_CMD_LICENSING
* Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition
+ * - not used for V3 licensing
*/
#define MC_CMD_LICENSING 0xf3
@@ -9220,6 +10109,93 @@
/***********************************/
+/* MC_CMD_LICENSING_V3
+ * Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition
+ * - V3 licensing (Medford)
+ */
+#define MC_CMD_LICENSING_V3 0xd0
+
+#define MC_CMD_0xd0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSING_V3_IN msgrequest */
+#define MC_CMD_LICENSING_V3_IN_LEN 4
+/* identifies the type of operation requested */
+#define MC_CMD_LICENSING_V3_IN_OP_OFST 0
+/* enum: re-read and apply licenses after a license key partition update; note
+ * that this operation returns a zero-length response
+ */
+#define MC_CMD_LICENSING_V3_IN_OP_UPDATE_LICENSE 0x0
+/* enum: report counts of installed licenses */
+#define MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE 0x1
+
+/* MC_CMD_LICENSING_V3_OUT msgresponse */
+#define MC_CMD_LICENSING_V3_OUT_LEN 88
+/* count of keys which are valid */
+#define MC_CMD_LICENSING_V3_OUT_VALID_KEYS_OFST 0
+/* sum of UNVERIFIABLE_KEYS + WRONG_NODE_KEYS (for compatibility with
+ * MC_CMD_FC_OP_LICENSE)
+ */
+#define MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_OFST 4
+/* count of keys which are invalid due to being unverifiable */
+#define MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_OFST 8
+/* count of keys which are invalid due to being for the wrong node */
+#define MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_OFST 12
+/* licensing state (for diagnostics; the exact meaning of the bits in this
+ * field are private to the firmware)
+ */
+#define MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_OFST 16
+/* licensing subsystem self-test report (for manftest) */
+#define MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_OFST 20
+/* enum: licensing subsystem self-test failed */
+#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_FAIL 0x0
+/* enum: licensing subsystem self-test passed */
+#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_PASS 0x1
+/* bitmask of licensed applications */
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_OFST 24
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LEN 8
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LO_OFST 24
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_HI_OFST 28
+/* reserved for future use */
+#define MC_CMD_LICENSING_V3_OUT_RESERVED_0_OFST 32
+#define MC_CMD_LICENSING_V3_OUT_RESERVED_0_LEN 24
+/* bitmask of licensed features */
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_OFST 56
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LEN 8
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LO_OFST 56
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_HI_OFST 60
+/* reserved for future use */
+#define MC_CMD_LICENSING_V3_OUT_RESERVED_1_OFST 64
+#define MC_CMD_LICENSING_V3_OUT_RESERVED_1_LEN 24
+
+
+/***********************************/
+/* MC_CMD_LICENSING_GET_ID_V3
+ * Get ID and type from the NVRAM_PARTITION_TYPE_LICENSE application license
+ * partition - V3 licensing (Medford)
+ */
+#define MC_CMD_LICENSING_GET_ID_V3 0xd1
+
+#define MC_CMD_0xd1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSING_GET_ID_V3_IN msgrequest */
+#define MC_CMD_LICENSING_GET_ID_V3_IN_LEN 0
+
+/* MC_CMD_LICENSING_GET_ID_V3_OUT msgresponse */
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN 8
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX 252
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LEN(num) (8+1*(num))
+/* type of license (eg 3) */
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_OFST 0
+/* length of the license ID (in bytes) */
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_OFST 4
+/* the unique license ID of the adapter */
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST 8
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LEN 1
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MINNUM 0
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MAXNUM 244
+
+
+/***********************************/
/* MC_CMD_MC2MC_PROXY
* Execute an arbitrary MCDI command on the slave MC of a dual-core device.
* This will fail on a single-core system.
@@ -9239,7 +10215,7 @@
/* MC_CMD_GET_LICENSED_APP_STATE
* Query the state of an individual licensed application. (Note that the actual
* state may be invalidated by the MC_CMD_LICENSING OP_UPDATE_LICENSE operation
- * or a reboot of the MC.)
+ * or a reboot of the MC.) Not used for V3 licensing
*/
#define MC_CMD_GET_LICENSED_APP_STATE 0xf5
@@ -9261,8 +10237,68 @@
/***********************************/
+/* MC_CMD_GET_LICENSED_V3_APP_STATE
+ * Query the state of an individual licensed application. (Note that the actual
+ * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE
+ * operation or a reboot of the MC.) Used for V3 licensing (Medford)
+ */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE 0xd2
+
+#define MC_CMD_0xd2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LICENSED_V3_APP_STATE_IN msgrequest */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN 8
+/* application ID to query (LICENSED_V3_APPS_xxx) expressed as a single bit
+ * mask
+ */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_OFST 0
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LEN 8
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_OFST 0
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_OFST 4
+
+/* MC_CMD_GET_LICENSED_V3_APP_STATE_OUT msgresponse */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN 4
+/* state of this application */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_OFST 0
+/* enum: no (or invalid) license is present for the application */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED 0x0
+/* enum: a valid license is present for the application */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LICENSED 0x1
+
+
+/***********************************/
+/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES
+ * Query the state of an one or more licensed features. (Note that the actual
+ * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE
+ * operation or a reboot of the MC.) Used for V3 licensing (Medford)
+ */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES 0xd3
+
+#define MC_CMD_0xd3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN msgrequest */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_LEN 8
+/* features to query (LICENSED_V3_FEATURES_xxx) expressed as a mask with one or
+ * more bits set
+ */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LEN 8
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_OFST 4
+
+/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT msgresponse */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_LEN 8
+/* states of these features - bit set for licensed, clear for not licensed */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LEN 8
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_OFST 4
+
+
+/***********************************/
/* MC_CMD_LICENSED_APP_OP
- * Perform an action for an individual licensed application.
+ * Perform an action for an individual licensed application - not used for V3
+ * licensing.
*/
#define MC_CMD_LICENSED_APP_OP 0xf6
@@ -9328,6 +10364,67 @@
/***********************************/
+/* MC_CMD_LICENSED_V3_VALIDATE_APP
+ * Perform validation for an individual licensed application - V3 licensing
+ * (Medford)
+ */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP 0xd4
+
+#define MC_CMD_0xd4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSED_V3_VALIDATE_APP_IN msgrequest */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_LEN 72
+/* application ID expressed as a single bit mask */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_OFST 0
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LEN 8
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_OFST 0
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_OFST 4
+/* challenge for validation */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_OFST 8
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_LEN 64
+
+/* MC_CMD_LICENSED_V3_VALIDATE_APP_OUT msgresponse */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_LEN 72
+/* application expiry time */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_OFST 0
+/* application expiry units */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_OFST 4
+/* enum: expiry units are accounting units */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC 0x0
+/* enum: expiry units are calendar days */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_DAYS 0x1
+/* validation response to challenge */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_OFST 8
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_LEN 64
+
+
+/***********************************/
+/* MC_CMD_LICENSED_V3_MASK_FEATURES
+ * Mask features - V3 licensing (Medford)
+ */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES 0xd5
+
+#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSED_V3_MASK_FEATURES_IN msgrequest */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_LEN 12
+/* mask to be applied to features to be changed */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_OFST 0
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LEN 8
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_OFST 0
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_OFST 4
+/* whether to turn on or turn off the masked features */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_OFST 8
+/* enum: turn the features off */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_OFF 0x0
+/* enum: turn the features back on */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_ON 0x1
+
+/* MC_CMD_LICENSED_V3_MASK_FEATURES_OUT msgresponse */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_OUT_LEN 0
+
+
+/***********************************/
/* MC_CMD_SET_PORT_SNIFF_CONFIG
* Configure RX port sniffing for the physical port associated with the calling
* function. Only a privileged function may change the port sniffing
@@ -9696,12 +10793,27 @@
#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD 0x4 /* enum */
#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP 0x8 /* enum */
#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS 0x10 /* enum */
-#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING 0x20 /* enum */
+/* enum: Deprecated. Equivalent to MAC_SPOOFING_TX combined with CHANGE_MAC. */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING 0x20
#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST 0x40 /* enum */
#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST 0x80 /* enum */
#define MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST 0x100 /* enum */
#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST 0x200 /* enum */
#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS 0x400 /* enum */
+/* enum: Allows to set the TX packets' source MAC address to any arbitrary MAC
+ * adress.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING_TX 0x800
+/* enum: Privilege that allows a Function to change the MAC address configured
+ * in its associated vAdapter/vPort.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_CHANGE_MAC 0x1000
+/* enum: Privilege that allows a Function to install filters that specify VLANs
+ * that are not in the permit list for the associated vPort. This privilege is
+ * primarily to support ESX where vPorts are created that restrict traffic to
+ * only a set of permitted VLANs. See the vPort flag FLAG_VLAN_RESTRICT.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNRESTRICTED_VLAN 0x2000
/* enum: Set this bit to indicate that a new privilege mask is to be set,
* otherwise the command will only read the existing mask.
*/
@@ -9951,7 +11063,7 @@
/* Sector type */
#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4
/* Enum values, see field(s): */
-/* MC_CMD_XPM_READ_SECTOR_OUT/TYPE */
+/* MC_CMD_XPM_READ_SECTOR/MC_CMD_XPM_READ_SECTOR_OUT/TYPE */
/* Sector size */
#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8
/* Sector data */
@@ -10067,4 +11179,123 @@
#define MC_CMD_XPM_WRITE_TEST_OUT_LEN 0
+/***********************************/
+/* MC_CMD_EXEC_SIGNED
+ * Check the CMAC of the contents of IMEM and DMEM against the value supplied
+ * and if correct begin execution from the start of IMEM. The caller supplies a
+ * key ID, the length of IMEM and DMEM to validate and the expected CMAC. CMAC
+ * computation runs from the start of IMEM, and from the start of DMEM + 16k,
+ * to match flash booting. The command will respond with EINVAL if the CMAC
+ * does match, otherwise it will respond with success before it jumps to IMEM.
+ */
+#define MC_CMD_EXEC_SIGNED 0x10c
+
+#define MC_CMD_0x10c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_EXEC_SIGNED_IN msgrequest */
+#define MC_CMD_EXEC_SIGNED_IN_LEN 28
+/* the length of code to include in the CMAC */
+#define MC_CMD_EXEC_SIGNED_IN_CODELEN_OFST 0
+/* the length of date to include in the CMAC */
+#define MC_CMD_EXEC_SIGNED_IN_DATALEN_OFST 4
+/* the XPM sector containing the key to use */
+#define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_OFST 8
+/* the expected CMAC value */
+#define MC_CMD_EXEC_SIGNED_IN_CMAC_OFST 12
+#define MC_CMD_EXEC_SIGNED_IN_CMAC_LEN 16
+
+/* MC_CMD_EXEC_SIGNED_OUT msgresponse */
+#define MC_CMD_EXEC_SIGNED_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PREPARE_SIGNED
+ * Prepare to upload a signed image. This will scrub the specified length of
+ * the data region, which must be at least as large as the DATALEN supplied to
+ * MC_CMD_EXEC_SIGNED.
+ */
+#define MC_CMD_PREPARE_SIGNED 0x10d
+
+#define MC_CMD_0x10d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PREPARE_SIGNED_IN msgrequest */
+#define MC_CMD_PREPARE_SIGNED_IN_LEN 4
+/* the length of data area to clear */
+#define MC_CMD_PREPARE_SIGNED_IN_DATALEN_OFST 0
+
+/* MC_CMD_PREPARE_SIGNED_OUT msgresponse */
+#define MC_CMD_PREPARE_SIGNED_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS
+ * Configure UDP ports for tunnel encapsulation hardware acceleration. The
+ * parser-dispatcher will attempt to parse traffic on these ports as tunnel
+ * encapsulation PDUs and filter them using the tunnel encapsulation filter
+ * chain rather than the standard filter chain. Note that this command can
+ * cause all functions to see a reset. (Available on Medford only.)
+ */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS 0x117
+
+#define MC_CMD_0x117_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN msgrequest */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMIN 4
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX 68
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(num) (4+4*(num))
+/* Flags */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_OFST 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_LEN 2
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_LBN 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_WIDTH 1
+/* The number of entries in the ENTRIES array */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_OFST 2
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_LEN 2
+/* Entries defining the UDP port to protocol mapping, each laid out as a
+ * TUNNEL_ENCAP_UDP_PORT_ENTRY
+ */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_OFST 4
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_LEN 4
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MINNUM 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM 16
+
+/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT msgresponse */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN 2
+/* Flags */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS_OFST 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS_LEN 2
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_RX_BALANCING
+ * Configure a port upconverter to distribute the packets on both RX engines.
+ * Packets are distributed based on a table with the destination vFIFO. The
+ * index of the table is a hash of source and destination of IPV4 and VLAN
+ * priority.
+ */
+#define MC_CMD_RX_BALANCING 0x118
+
+#define MC_CMD_0x118_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_RX_BALANCING_IN msgrequest */
+#define MC_CMD_RX_BALANCING_IN_LEN 4
+/* The RX port whose upconverter table will be modified */
+#define MC_CMD_RX_BALANCING_IN_PORT_OFST 0
+#define MC_CMD_RX_BALANCING_IN_PORT_LEN 1
+/* The VLAN priority associated to the table index and vFIFO */
+#define MC_CMD_RX_BALANCING_IN_PRIORITY_OFST 1
+#define MC_CMD_RX_BALANCING_IN_PRIORITY_LEN 1
+/* The resulting bit of SRC^DST for indexing the table */
+#define MC_CMD_RX_BALANCING_IN_SRC_DST_OFST 2
+#define MC_CMD_RX_BALANCING_IN_SRC_DST_LEN 1
+/* The RX engine to which the vFIFO in the table entry will point to */
+#define MC_CMD_RX_BALANCING_IN_ENG_OFST 3
+#define MC_CMD_RX_BALANCING_IN_ENG_LEN 1
+
+/* MC_CMD_RX_BALANCING_OUT msgresponse */
+#define MC_CMD_RX_BALANCING_OUT_LEN 0
+
+
#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index d13ddf970..9ff062a36 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -868,6 +868,7 @@ struct vfdi_status;
* be held to modify it.
* @port_initialized: Port initialized?
* @net_dev: Operating system network device. Consider holding the rtnl lock
+ * @fixed_features: Features which cannot be turned off
* @stats_buffer: DMA buffer for statistics
* @phy_type: PHY type
* @phy_op: PHY interface
@@ -916,7 +917,6 @@ struct vfdi_status;
* @stats_lock: Statistics update lock. Must be held when calling
* efx_nic_type::{update,start,stop}_stats.
* @n_rx_noskb_drops: Count of RX packets dropped due to failure to allocate an skb
- * @mc_promisc: Whether in multicast promiscuous mode when last changed
*
* This is stored in the private area of the &struct net_device.
*/
@@ -1008,6 +1008,8 @@ struct efx_nic {
bool port_initialized;
struct net_device *net_dev;
+ netdev_features_t fixed_features;
+
struct efx_buffer stats_buffer;
u64 rx_nodesc_drops_total;
u64 rx_nodesc_drops_while_down;
@@ -1065,7 +1067,6 @@ struct efx_nic {
int last_irq_cpu;
spinlock_t stats_lock;
atomic_t n_rx_noskb_drops;
- bool mc_promisc;
};
static inline int efx_dev_registered(struct efx_nic *efx)
@@ -1333,6 +1334,8 @@ struct efx_nic_type {
int (*ptp_set_ts_config)(struct efx_nic *efx,
struct hwtstamp_config *init);
int (*sriov_configure)(struct efx_nic *efx, int num_vfs);
+ int (*vlan_rx_add_vid)(struct efx_nic *efx, __be16 proto, u16 vid);
+ int (*vlan_rx_kill_vid)(struct efx_nic *efx, __be16 proto, u16 vid);
int (*sriov_init)(struct efx_nic *efx);
void (*sriov_fini)(struct efx_nic *efx);
bool (*sriov_wanted)(struct efx_nic *efx);
@@ -1521,4 +1524,16 @@ static inline void efx_xmit_hwtstamp_pending(struct sk_buff *skb)
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
}
+/* Get all supported features.
+ * If a feature is not fixed, it is present in hw_features.
+ * If a feature is fixed, it does not present in hw_features, but
+ * always in features.
+ */
+static inline netdev_features_t efx_supported_features(const struct efx_nic *efx)
+{
+ const struct net_device *net_dev = efx->net_dev;
+
+ return net_dev->features | net_dev->hw_features;
+}
+
#endif /* EFX_NET_DRIVER_H */
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 0b536e27d..96944c3c9 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -519,6 +519,9 @@ enum {
#ifdef CONFIG_SFC_SRIOV
* @vf: Pointer to VF data structure
#endif
+ * @vport_mac: The MAC address on the vport, only for PFs; VFs will be zero
+ * @vlan_list: List of VLANs added over the interface. Serialised by vlan_lock.
+ * @vlan_lock: Lock to serialize access to vlan_list.
*/
struct efx_ef10_nic_data {
struct efx_buffer mcdi_buf;
@@ -550,6 +553,8 @@ struct efx_ef10_nic_data {
struct ef10_vf *vf;
#endif
u8 vport_mac[ETH_ALEN];
+ struct list_head vlan_list;
+ struct mutex vlan_lock;
};
int efx_init_sriov(void);
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index b69d0e1e8..503a3b6dc 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2195,6 +2195,12 @@ static void smc_release_datacs(struct platform_device *pdev, struct net_device *
}
}
+static const struct acpi_device_id smc91x_acpi_match[] = {
+ { "LNRO0003", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, smc91x_acpi_match);
+
#if IS_BUILTIN(CONFIG_OF)
static const struct of_device_id smc91x_match[] = {
{ .compatible = "smsc,lan91c94", },
@@ -2281,7 +2287,6 @@ static int smc_drv_probe(struct platform_device *pdev)
#if IS_BUILTIN(CONFIG_OF)
match = of_match_device(of_match_ptr(smc91x_match), &pdev->dev);
if (match) {
- struct device_node *np = pdev->dev.of_node;
u32 val;
/* Optional pwrdwn GPIO configured? */
@@ -2307,7 +2312,8 @@ static int smc_drv_probe(struct platform_device *pdev)
usleep_range(750, 1000);
/* Combination of IO widths supported, default to 16-bit */
- if (!of_property_read_u32(np, "reg-io-width", &val)) {
+ if (!device_property_read_u32(&pdev->dev, "reg-io-width",
+ &val)) {
if (val & 1)
lp->cfg.flags |= SMC91X_USE_8BIT;
if ((val == 0) || (val & 2))
@@ -2485,7 +2491,8 @@ static struct platform_driver smc_driver = {
.driver = {
.name = CARDNAME,
.pm = &smc_drv_pm_ops,
- .of_match_table = of_match_ptr(smc91x_match),
+ .of_match_table = of_match_ptr(smc91x_match),
+ .acpi_match_table = smc91x_acpi_match,
},
};
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index e17671c9d..ea8465467 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -470,7 +470,9 @@ smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
#endif
#if ! SMC_CAN_USE_8BIT
+#undef SMC_inb
#define SMC_inb(ioaddr, reg) ({ BUG(); 0; })
+#undef SMC_outb
#define SMC_outb(x, ioaddr, reg) BUG()
#define SMC_insb(a, r, p, l) BUG()
#define SMC_outsb(a, r, p, l) BUG()
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index b5ab5e120..4f8910b7d 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -114,7 +114,6 @@ struct smsc911x_data {
/* spinlock to ensure register accesses are serialised */
spinlock_t dev_lock;
- struct phy_device *phy_dev;
struct mii_bus *mii_bus;
unsigned int using_extphy;
int last_duplex;
@@ -833,7 +832,7 @@ static int smsc911x_phy_reset(struct smsc911x_data *pdata)
static int smsc911x_phy_loopbacktest(struct net_device *dev)
{
struct smsc911x_data *pdata = netdev_priv(dev);
- struct phy_device *phy_dev = pdata->phy_dev;
+ struct phy_device *phy_dev = dev->phydev;
int result = -EIO;
unsigned int i, val;
unsigned long flags;
@@ -903,7 +902,8 @@ static int smsc911x_phy_loopbacktest(struct net_device *dev)
static void smsc911x_phy_update_flowcontrol(struct smsc911x_data *pdata)
{
- struct phy_device *phy_dev = pdata->phy_dev;
+ struct net_device *ndev = pdata->dev;
+ struct phy_device *phy_dev = ndev->phydev;
u32 afc = smsc911x_reg_read(pdata, AFC_CFG);
u32 flow;
unsigned long flags;
@@ -944,7 +944,7 @@ static void smsc911x_phy_update_flowcontrol(struct smsc911x_data *pdata)
static void smsc911x_phy_adjust_link(struct net_device *dev)
{
struct smsc911x_data *pdata = netdev_priv(dev);
- struct phy_device *phy_dev = pdata->phy_dev;
+ struct phy_device *phy_dev = dev->phydev;
unsigned long flags;
int carrier;
@@ -1037,7 +1037,6 @@ static int smsc911x_mii_probe(struct net_device *dev)
SUPPORTED_Asym_Pause);
phydev->advertising = phydev->supported;
- pdata->phy_dev = phydev;
pdata->last_duplex = -1;
pdata->last_carrier = -1;
@@ -1100,15 +1099,8 @@ static int smsc911x_mii_init(struct platform_device *pdev,
goto err_out_free_bus_2;
}
- if (smsc911x_mii_probe(dev) < 0) {
- SMSC_WARN(pdata, probe, "Error registering mii bus");
- goto err_out_unregister_bus_3;
- }
-
return 0;
-err_out_unregister_bus_3:
- mdiobus_unregister(pdata->mii_bus);
err_out_free_bus_2:
mdiobus_free(pdata->mii_bus);
err_out_1:
@@ -1338,9 +1330,11 @@ static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata)
static int smsc911x_phy_general_power_up(struct smsc911x_data *pdata)
{
+ struct net_device *ndev = pdata->dev;
+ struct phy_device *phy_dev = ndev->phydev;
int rc = 0;
- if (!pdata->phy_dev)
+ if (!phy_dev)
return rc;
/* If the internal PHY is in General Power-Down mode, all, except the
@@ -1350,7 +1344,7 @@ static int smsc911x_phy_general_power_up(struct smsc911x_data *pdata)
* In that case, clear the bit 0.11, so the PHY powers up and we can
* access to the phy registers.
*/
- rc = phy_read(pdata->phy_dev, MII_BMCR);
+ rc = phy_read(phy_dev, MII_BMCR);
if (rc < 0) {
SMSC_WARN(pdata, drv, "Failed reading PHY control reg");
return rc;
@@ -1360,7 +1354,7 @@ static int smsc911x_phy_general_power_up(struct smsc911x_data *pdata)
* disable the general power down-mode.
*/
if (rc & BMCR_PDOWN) {
- rc = phy_write(pdata->phy_dev, MII_BMCR, rc & ~BMCR_PDOWN);
+ rc = phy_write(phy_dev, MII_BMCR, rc & ~BMCR_PDOWN);
if (rc < 0) {
SMSC_WARN(pdata, drv, "Failed writing PHY control reg");
return rc;
@@ -1374,12 +1368,14 @@ static int smsc911x_phy_general_power_up(struct smsc911x_data *pdata)
static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
{
+ struct net_device *ndev = pdata->dev;
+ struct phy_device *phy_dev = ndev->phydev;
int rc = 0;
- if (!pdata->phy_dev)
+ if (!phy_dev)
return rc;
- rc = phy_read(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS);
+ rc = phy_read(phy_dev, MII_LAN83C185_CTRL_STATUS);
if (rc < 0) {
SMSC_WARN(pdata, drv, "Failed reading PHY control reg");
@@ -1389,7 +1385,7 @@ static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
/* Only disable if energy detect mode is already enabled */
if (rc & MII_LAN83C185_EDPWRDOWN) {
/* Disable energy detect mode for this SMSC Transceivers */
- rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS,
+ rc = phy_write(phy_dev, MII_LAN83C185_CTRL_STATUS,
rc & (~MII_LAN83C185_EDPWRDOWN));
if (rc < 0) {
@@ -1405,12 +1401,14 @@ static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata)
{
+ struct net_device *ndev = pdata->dev;
+ struct phy_device *phy_dev = ndev->phydev;
int rc = 0;
- if (!pdata->phy_dev)
+ if (!phy_dev)
return rc;
- rc = phy_read(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS);
+ rc = phy_read(phy_dev, MII_LAN83C185_CTRL_STATUS);
if (rc < 0) {
SMSC_WARN(pdata, drv, "Failed reading PHY control reg");
@@ -1420,7 +1418,7 @@ static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata)
/* Only enable if energy detect mode is already disabled */
if (!(rc & MII_LAN83C185_EDPWRDOWN)) {
/* Enable energy detect mode for this SMSC Transceivers */
- rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS,
+ rc = phy_write(phy_dev, MII_LAN83C185_CTRL_STATUS,
rc | MII_LAN83C185_EDPWRDOWN);
if (rc < 0) {
@@ -1509,23 +1507,90 @@ static void smsc911x_disable_irq_chip(struct net_device *dev)
smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF);
}
+static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct smsc911x_data *pdata = netdev_priv(dev);
+ u32 intsts = smsc911x_reg_read(pdata, INT_STS);
+ u32 inten = smsc911x_reg_read(pdata, INT_EN);
+ int serviced = IRQ_NONE;
+ u32 temp;
+
+ if (unlikely(intsts & inten & INT_STS_SW_INT_)) {
+ temp = smsc911x_reg_read(pdata, INT_EN);
+ temp &= (~INT_EN_SW_INT_EN_);
+ smsc911x_reg_write(pdata, INT_EN, temp);
+ smsc911x_reg_write(pdata, INT_STS, INT_STS_SW_INT_);
+ pdata->software_irq_signal = 1;
+ smp_wmb();
+ serviced = IRQ_HANDLED;
+ }
+
+ if (unlikely(intsts & inten & INT_STS_RXSTOP_INT_)) {
+ /* Called when there is a multicast update scheduled and
+ * it is now safe to complete the update */
+ SMSC_TRACE(pdata, intr, "RX Stop interrupt");
+ smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_);
+ if (pdata->multicast_update_pending)
+ smsc911x_rx_multicast_update_workaround(pdata);
+ serviced = IRQ_HANDLED;
+ }
+
+ if (intsts & inten & INT_STS_TDFA_) {
+ temp = smsc911x_reg_read(pdata, FIFO_INT);
+ temp |= FIFO_INT_TX_AVAIL_LEVEL_;
+ smsc911x_reg_write(pdata, FIFO_INT, temp);
+ smsc911x_reg_write(pdata, INT_STS, INT_STS_TDFA_);
+ netif_wake_queue(dev);
+ serviced = IRQ_HANDLED;
+ }
+
+ if (unlikely(intsts & inten & INT_STS_RXE_)) {
+ SMSC_TRACE(pdata, intr, "RX Error interrupt");
+ smsc911x_reg_write(pdata, INT_STS, INT_STS_RXE_);
+ serviced = IRQ_HANDLED;
+ }
+
+ if (likely(intsts & inten & INT_STS_RSFL_)) {
+ if (likely(napi_schedule_prep(&pdata->napi))) {
+ /* Disable Rx interrupts */
+ temp = smsc911x_reg_read(pdata, INT_EN);
+ temp &= (~INT_EN_RSFL_EN_);
+ smsc911x_reg_write(pdata, INT_EN, temp);
+ /* Schedule a NAPI poll */
+ __napi_schedule(&pdata->napi);
+ } else {
+ SMSC_WARN(pdata, rx_err, "napi_schedule_prep failed");
+ }
+ serviced = IRQ_HANDLED;
+ }
+
+ return serviced;
+}
+
static int smsc911x_open(struct net_device *dev)
{
struct smsc911x_data *pdata = netdev_priv(dev);
unsigned int timeout;
unsigned int temp;
unsigned int intcfg;
+ int retval;
+ int irq_flags;
- /* if the phy is not yet registered, retry later*/
- if (!pdata->phy_dev) {
- SMSC_WARN(pdata, hw, "phy_dev is NULL");
- return -EAGAIN;
+ /* find and start the given phy */
+ if (!dev->phydev) {
+ retval = smsc911x_mii_probe(dev);
+ if (retval < 0) {
+ SMSC_WARN(pdata, probe, "Error starting phy");
+ goto out;
+ }
}
/* Reset the LAN911x */
- if (smsc911x_soft_reset(pdata)) {
+ retval = smsc911x_soft_reset(pdata);
+ if (retval) {
SMSC_WARN(pdata, hw, "soft reset failed");
- return -EIO;
+ goto mii_free_out;
}
smsc911x_reg_write(pdata, HW_CFG, 0x00050000);
@@ -1581,6 +1646,15 @@ static int smsc911x_open(struct net_device *dev)
pdata->software_irq_signal = 0;
smp_wmb();
+ irq_flags = irq_get_trigger_type(dev->irq);
+ retval = request_irq(dev->irq, smsc911x_irqhandler,
+ irq_flags | IRQF_SHARED, dev->name, dev);
+ if (retval) {
+ SMSC_WARN(pdata, probe,
+ "Unable to claim requested irq: %d", dev->irq);
+ goto mii_free_out;
+ }
+
temp = smsc911x_reg_read(pdata, INT_EN);
temp |= INT_EN_SW_INT_EN_;
smsc911x_reg_write(pdata, INT_EN, temp);
@@ -1595,7 +1669,8 @@ static int smsc911x_open(struct net_device *dev)
if (!pdata->software_irq_signal) {
netdev_warn(dev, "ISR failed signaling test (IRQ %d)\n",
dev->irq);
- return -ENODEV;
+ retval = -ENODEV;
+ goto irq_stop_out;
}
SMSC_TRACE(pdata, ifup, "IRQ handler passed test using IRQ %d",
dev->irq);
@@ -1608,7 +1683,7 @@ static int smsc911x_open(struct net_device *dev)
pdata->last_carrier = -1;
/* Bring the PHY up */
- phy_start(pdata->phy_dev);
+ phy_start(dev->phydev);
temp = smsc911x_reg_read(pdata, HW_CFG);
/* Preserve TX FIFO size and external PHY configuration */
@@ -1641,6 +1716,14 @@ static int smsc911x_open(struct net_device *dev)
netif_start_queue(dev);
return 0;
+
+irq_stop_out:
+ free_irq(dev->irq, dev);
+mii_free_out:
+ phy_disconnect(dev->phydev);
+ dev->phydev = NULL;
+out:
+ return retval;
}
/* Entry point for stopping the interface */
@@ -1662,9 +1745,15 @@ static int smsc911x_stop(struct net_device *dev)
dev->stats.rx_dropped += smsc911x_reg_read(pdata, RX_DROP);
smsc911x_tx_update_txcounters(dev);
+ free_irq(dev->irq, dev);
+
/* Bring the PHY down */
- if (pdata->phy_dev)
- phy_stop(pdata->phy_dev);
+ if (dev->phydev) {
+ phy_stop(dev->phydev);
+ phy_disconnect(dev->phydev);
+ dev->phydev = NULL;
+ }
+ netif_carrier_off(dev);
SMSC_TRACE(pdata, ifdown, "Interface stopped");
return 0;
@@ -1806,67 +1895,6 @@ static void smsc911x_set_multicast_list(struct net_device *dev)
spin_unlock_irqrestore(&pdata->mac_lock, flags);
}
-static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct smsc911x_data *pdata = netdev_priv(dev);
- u32 intsts = smsc911x_reg_read(pdata, INT_STS);
- u32 inten = smsc911x_reg_read(pdata, INT_EN);
- int serviced = IRQ_NONE;
- u32 temp;
-
- if (unlikely(intsts & inten & INT_STS_SW_INT_)) {
- temp = smsc911x_reg_read(pdata, INT_EN);
- temp &= (~INT_EN_SW_INT_EN_);
- smsc911x_reg_write(pdata, INT_EN, temp);
- smsc911x_reg_write(pdata, INT_STS, INT_STS_SW_INT_);
- pdata->software_irq_signal = 1;
- smp_wmb();
- serviced = IRQ_HANDLED;
- }
-
- if (unlikely(intsts & inten & INT_STS_RXSTOP_INT_)) {
- /* Called when there is a multicast update scheduled and
- * it is now safe to complete the update */
- SMSC_TRACE(pdata, intr, "RX Stop interrupt");
- smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_);
- if (pdata->multicast_update_pending)
- smsc911x_rx_multicast_update_workaround(pdata);
- serviced = IRQ_HANDLED;
- }
-
- if (intsts & inten & INT_STS_TDFA_) {
- temp = smsc911x_reg_read(pdata, FIFO_INT);
- temp |= FIFO_INT_TX_AVAIL_LEVEL_;
- smsc911x_reg_write(pdata, FIFO_INT, temp);
- smsc911x_reg_write(pdata, INT_STS, INT_STS_TDFA_);
- netif_wake_queue(dev);
- serviced = IRQ_HANDLED;
- }
-
- if (unlikely(intsts & inten & INT_STS_RXE_)) {
- SMSC_TRACE(pdata, intr, "RX Error interrupt");
- smsc911x_reg_write(pdata, INT_STS, INT_STS_RXE_);
- serviced = IRQ_HANDLED;
- }
-
- if (likely(intsts & inten & INT_STS_RSFL_)) {
- if (likely(napi_schedule_prep(&pdata->napi))) {
- /* Disable Rx interrupts */
- temp = smsc911x_reg_read(pdata, INT_EN);
- temp &= (~INT_EN_RSFL_EN_);
- smsc911x_reg_write(pdata, INT_EN, temp);
- /* Schedule a NAPI poll */
- __napi_schedule(&pdata->napi);
- } else {
- SMSC_WARN(pdata, rx_err, "napi_schedule_prep failed");
- }
- serviced = IRQ_HANDLED;
- }
-
- return serviced;
-}
-
#ifdef CONFIG_NET_POLL_CONTROLLER
static void smsc911x_poll_controller(struct net_device *dev)
{
@@ -1904,30 +1932,10 @@ static int smsc911x_set_mac_address(struct net_device *dev, void *p)
/* Standard ioctls for mii-tool */
static int smsc911x_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- struct smsc911x_data *pdata = netdev_priv(dev);
-
- if (!netif_running(dev) || !pdata->phy_dev)
+ if (!netif_running(dev) || !dev->phydev)
return -EINVAL;
- return phy_mii_ioctl(pdata->phy_dev, ifr, cmd);
-}
-
-static int
-smsc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct smsc911x_data *pdata = netdev_priv(dev);
-
- cmd->maxtxpkt = 1;
- cmd->maxrxpkt = 1;
- return phy_ethtool_gset(pdata->phy_dev, cmd);
-}
-
-static int
-smsc911x_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct smsc911x_data *pdata = netdev_priv(dev);
-
- return phy_ethtool_sset(pdata->phy_dev, cmd);
+ return phy_mii_ioctl(dev->phydev, ifr, cmd);
}
static void smsc911x_ethtool_getdrvinfo(struct net_device *dev,
@@ -1941,9 +1949,7 @@ static void smsc911x_ethtool_getdrvinfo(struct net_device *dev,
static int smsc911x_ethtool_nwayreset(struct net_device *dev)
{
- struct smsc911x_data *pdata = netdev_priv(dev);
-
- return phy_start_aneg(pdata->phy_dev);
+ return phy_start_aneg(dev->phydev);
}
static u32 smsc911x_ethtool_getmsglevel(struct net_device *dev)
@@ -1969,7 +1975,7 @@ smsc911x_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs,
void *buf)
{
struct smsc911x_data *pdata = netdev_priv(dev);
- struct phy_device *phy_dev = pdata->phy_dev;
+ struct phy_device *phy_dev = dev->phydev;
unsigned long flags;
unsigned int i;
unsigned int j = 0;
@@ -2115,8 +2121,6 @@ static int smsc911x_ethtool_set_eeprom(struct net_device *dev,
}
static const struct ethtool_ops smsc911x_ethtool_ops = {
- .get_settings = smsc911x_ethtool_getsettings,
- .set_settings = smsc911x_ethtool_setsettings,
.get_link = ethtool_op_get_link,
.get_drvinfo = smsc911x_ethtool_getdrvinfo,
.nway_reset = smsc911x_ethtool_nwayreset,
@@ -2128,6 +2132,8 @@ static const struct ethtool_ops smsc911x_ethtool_ops = {
.get_eeprom = smsc911x_ethtool_get_eeprom,
.set_eeprom = smsc911x_ethtool_set_eeprom,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static const struct net_device_ops smsc911x_netdev_ops = {
@@ -2308,17 +2314,14 @@ static int smsc911x_drv_remove(struct platform_device *pdev)
pdata = netdev_priv(dev);
BUG_ON(!pdata);
BUG_ON(!pdata->ioaddr);
- BUG_ON(!pdata->phy_dev);
+ WARN_ON(dev->phydev);
SMSC_TRACE(pdata, ifdown, "Stopping driver");
- phy_disconnect(pdata->phy_dev);
- pdata->phy_dev = NULL;
mdiobus_unregister(pdata->mii_bus);
mdiobus_free(pdata->mii_bus);
unregister_netdev(dev);
- free_irq(dev->irq, dev);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"smsc911x-memory");
if (!res)
@@ -2403,8 +2406,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
struct smsc911x_data *pdata;
struct smsc911x_platform_config *config = dev_get_platdata(&pdev->dev);
struct resource *res;
- unsigned int intcfg = 0;
- int res_size, irq, irq_flags;
+ int res_size, irq;
int retval;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
@@ -2443,7 +2445,6 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
pdata = netdev_priv(dev);
dev->irq = irq;
- irq_flags = irq_get_trigger_type(irq);
pdata->ioaddr = ioremap_nocache(res->start, res_size);
pdata->dev = dev;
@@ -2490,43 +2491,23 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
if (retval < 0)
goto out_disable_resources;
- /* configure irq polarity and type before connecting isr */
- if (pdata->config.irq_polarity == SMSC911X_IRQ_POLARITY_ACTIVE_HIGH)
- intcfg |= INT_CFG_IRQ_POL_;
-
- if (pdata->config.irq_type == SMSC911X_IRQ_TYPE_PUSH_PULL)
- intcfg |= INT_CFG_IRQ_TYPE_;
-
- smsc911x_reg_write(pdata, INT_CFG, intcfg);
-
- /* Ensure interrupts are globally disabled before connecting ISR */
- smsc911x_disable_irq_chip(dev);
+ netif_carrier_off(dev);
- retval = request_irq(dev->irq, smsc911x_irqhandler,
- irq_flags | IRQF_SHARED, dev->name, dev);
+ retval = smsc911x_mii_init(pdev, dev);
if (retval) {
- SMSC_WARN(pdata, probe,
- "Unable to claim requested irq: %d", dev->irq);
+ SMSC_WARN(pdata, probe, "Error %i initialising mii", retval);
goto out_disable_resources;
}
- netif_carrier_off(dev);
-
retval = register_netdev(dev);
if (retval) {
SMSC_WARN(pdata, probe, "Error %i registering device", retval);
- goto out_free_irq;
+ goto out_disable_resources;
} else {
SMSC_TRACE(pdata, probe,
"Network interface: \"%s\"", dev->name);
}
- retval = smsc911x_mii_init(pdev, dev);
- if (retval) {
- SMSC_WARN(pdata, probe, "Error %i initialising mii", retval);
- goto out_unregister_netdev_5;
- }
-
spin_lock_irq(&pdata->mac_lock);
/* Check if mac address has been specified when bringing interface up */
@@ -2562,10 +2543,6 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
return 0;
-out_unregister_netdev_5:
- unregister_netdev(dev);
-out_free_irq:
- free_irq(dev->irq, dev);
out_disable_resources:
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index 8594b9e8b..b7bfed4bc 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -76,7 +76,6 @@ struct smsc9420_pdata {
bool rx_csum;
u32 msg_enable;
- struct phy_device *phy_dev;
struct mii_bus *mii_bus;
int last_duplex;
int last_carrier;
@@ -226,36 +225,10 @@ static int smsc9420_eeprom_reload(struct smsc9420_pdata *pd)
/* Standard ioctls for mii-tool */
static int smsc9420_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- struct smsc9420_pdata *pd = netdev_priv(dev);
-
- if (!netif_running(dev) || !pd->phy_dev)
+ if (!netif_running(dev) || !dev->phydev)
return -EINVAL;
- return phy_mii_ioctl(pd->phy_dev, ifr, cmd);
-}
-
-static int smsc9420_ethtool_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
-{
- struct smsc9420_pdata *pd = netdev_priv(dev);
-
- if (!pd->phy_dev)
- return -ENODEV;
-
- cmd->maxtxpkt = 1;
- cmd->maxrxpkt = 1;
- return phy_ethtool_gset(pd->phy_dev, cmd);
-}
-
-static int smsc9420_ethtool_set_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
-{
- struct smsc9420_pdata *pd = netdev_priv(dev);
-
- if (!pd->phy_dev)
- return -ENODEV;
-
- return phy_ethtool_sset(pd->phy_dev, cmd);
+ return phy_mii_ioctl(dev->phydev, ifr, cmd);
}
static void smsc9420_ethtool_get_drvinfo(struct net_device *netdev,
@@ -283,12 +256,10 @@ static void smsc9420_ethtool_set_msglevel(struct net_device *netdev, u32 data)
static int smsc9420_ethtool_nway_reset(struct net_device *netdev)
{
- struct smsc9420_pdata *pd = netdev_priv(netdev);
-
- if (!pd->phy_dev)
+ if (!netdev->phydev)
return -ENODEV;
- return phy_start_aneg(pd->phy_dev);
+ return phy_start_aneg(netdev->phydev);
}
static int smsc9420_ethtool_getregslen(struct net_device *dev)
@@ -302,7 +273,7 @@ smsc9420_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs,
void *buf)
{
struct smsc9420_pdata *pd = netdev_priv(dev);
- struct phy_device *phy_dev = pd->phy_dev;
+ struct phy_device *phy_dev = dev->phydev;
unsigned int i, j = 0;
u32 *data = buf;
@@ -443,8 +414,6 @@ static int smsc9420_ethtool_set_eeprom(struct net_device *dev,
}
static const struct ethtool_ops smsc9420_ethtool_ops = {
- .get_settings = smsc9420_ethtool_get_settings,
- .set_settings = smsc9420_ethtool_set_settings,
.get_drvinfo = smsc9420_ethtool_get_drvinfo,
.get_msglevel = smsc9420_ethtool_get_msglevel,
.set_msglevel = smsc9420_ethtool_set_msglevel,
@@ -456,6 +425,8 @@ static const struct ethtool_ops smsc9420_ethtool_ops = {
.get_regs_len = smsc9420_ethtool_getregslen,
.get_regs = smsc9420_ethtool_getregs,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
/* Sets the device MAC address to dev_addr */
@@ -736,7 +707,7 @@ static int smsc9420_stop(struct net_device *dev)
ulong flags;
BUG_ON(!pd);
- BUG_ON(!pd->phy_dev);
+ BUG_ON(!dev->phydev);
/* disable master interrupt */
spin_lock_irqsave(&pd->int_lock, flags);
@@ -757,10 +728,9 @@ static int smsc9420_stop(struct net_device *dev)
smsc9420_dmac_soft_reset(pd);
- phy_stop(pd->phy_dev);
+ phy_stop(dev->phydev);
- phy_disconnect(pd->phy_dev);
- pd->phy_dev = NULL;
+ phy_disconnect(dev->phydev);
mdiobus_unregister(pd->mii_bus);
mdiobus_free(pd->mii_bus);
@@ -1093,7 +1063,8 @@ static void smsc9420_set_multicast_list(struct net_device *dev)
static void smsc9420_phy_update_flowcontrol(struct smsc9420_pdata *pd)
{
- struct phy_device *phy_dev = pd->phy_dev;
+ struct net_device *dev = pd->dev;
+ struct phy_device *phy_dev = dev->phydev;
u32 flow;
if (phy_dev->duplex == DUPLEX_FULL) {
@@ -1122,7 +1093,7 @@ static void smsc9420_phy_update_flowcontrol(struct smsc9420_pdata *pd)
static void smsc9420_phy_adjust_link(struct net_device *dev)
{
struct smsc9420_pdata *pd = netdev_priv(dev);
- struct phy_device *phy_dev = pd->phy_dev;
+ struct phy_device *phy_dev = dev->phydev;
int carrier;
if (phy_dev->duplex != pd->last_duplex) {
@@ -1155,7 +1126,7 @@ static int smsc9420_mii_probe(struct net_device *dev)
struct smsc9420_pdata *pd = netdev_priv(dev);
struct phy_device *phydev = NULL;
- BUG_ON(pd->phy_dev);
+ BUG_ON(dev->phydev);
/* Device only supports internal PHY at address 1 */
phydev = mdiobus_get_phy(pd->mii_bus, 1);
@@ -1179,7 +1150,6 @@ static int smsc9420_mii_probe(struct net_device *dev)
phy_attached_info(phydev);
- pd->phy_dev = phydev;
pd->last_duplex = -1;
pd->last_carrier = -1;
@@ -1440,7 +1410,7 @@ static int smsc9420_open(struct net_device *dev)
}
/* Bring the PHY up */
- phy_start(pd->phy_dev);
+ phy_start(dev->phydev);
napi_enable(&pd->napi);
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index cec147d1d..8f06a6621 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -40,7 +40,7 @@ config DWMAC_GENERIC
config DWMAC_IPQ806X
tristate "QCA IPQ806x DWMAC support"
default ARCH_QCOM
- depends on OF
+ depends on OF && (ARCH_QCOM || COMPILE_TEST)
select MFD_SYSCON
help
Support for QCA IPQ806X DWMAC Ethernet.
@@ -53,7 +53,7 @@ config DWMAC_IPQ806X
config DWMAC_LPC18XX
tristate "NXP LPC18xx/43xx DWMAC support"
default ARCH_LPC18XX
- depends on OF
+ depends on OF && (ARCH_LPC18XX || COMPILE_TEST)
select MFD_SYSCON
---help---
Support for NXP LPC18xx/43xx DWMAC Ethernet.
@@ -61,7 +61,7 @@ config DWMAC_LPC18XX
config DWMAC_MESON
tristate "Amlogic Meson dwmac support"
default ARCH_MESON
- depends on OF
+ depends on OF && (ARCH_MESON || COMPILE_TEST)
help
Support for Ethernet controller on Amlogic Meson SoCs.
@@ -72,7 +72,7 @@ config DWMAC_MESON
config DWMAC_ROCKCHIP
tristate "Rockchip dwmac support"
default ARCH_ROCKCHIP
- depends on OF
+ depends on OF && (ARCH_ROCKCHIP || COMPILE_TEST)
select MFD_SYSCON
help
Support for Ethernet controller on Rockchip RK3288 SoC.
@@ -83,7 +83,7 @@ config DWMAC_ROCKCHIP
config DWMAC_SOCFPGA
tristate "SOCFPGA dwmac support"
default ARCH_SOCFPGA
- depends on OF
+ depends on OF && (ARCH_SOCFPGA || COMPILE_TEST)
select MFD_SYSCON
help
Support for ethernet controller on Altera SOCFPGA
@@ -95,7 +95,7 @@ config DWMAC_SOCFPGA
config DWMAC_STI
tristate "STi GMAC support"
default ARCH_STI
- depends on OF
+ depends on OF && (ARCH_STI || COMPILE_TEST)
select MFD_SYSCON
---help---
Support for ethernet controller on STi SOCs.
@@ -107,7 +107,7 @@ config DWMAC_STI
config DWMAC_SUNXI
tristate "Allwinner GMAC support"
default ARCH_SUNXI
- depends on OF
+ depends on OF && (ARCH_SUNXI || COMPILE_TEST)
---help---
Support for Allwinner A20/A31 GMAC ethernet controllers.
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 0fb362d5a..44b630cd1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -11,11 +11,12 @@ obj-$(CONFIG_DWMAC_IPQ806X) += dwmac-ipq806x.o
obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o
obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o
obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o
-obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-socfpga.o
+obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o
obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o
obj-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o
obj-$(CONFIG_DWMAC_GENERIC) += dwmac-generic.o
stmmac-platform-objs:= stmmac_platform.o
+dwmac-altr-socfpga-objs := altr_tse_pcs.o dwmac-socfpga.o
obj-$(CONFIG_STMMAC_PCI) += stmmac-pci.o
stmmac-pci-objs:= stmmac_pci.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
new file mode 100644
index 000000000..2920e2ee3
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
@@ -0,0 +1,274 @@
+/* Copyright Altera Corporation (C) 2016. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author: Tien Hock Loh <thloh@altera.com>
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/stmmac.h>
+
+#include "stmmac.h"
+#include "stmmac_platform.h"
+#include "altr_tse_pcs.h"
+
+#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0
+#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII BIT(1)
+#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII BIT(2)
+#define SYSMGR_EMACGRP_CTRL_PHYSEL_WIDTH 2
+#define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK GENMASK(1, 0)
+
+#define TSE_PCS_CONTROL_AN_EN_MASK BIT(12)
+#define TSE_PCS_CONTROL_REG 0x00
+#define TSE_PCS_CONTROL_RESTART_AN_MASK BIT(9)
+#define TSE_PCS_IF_MODE_REG 0x28
+#define TSE_PCS_LINK_TIMER_0_REG 0x24
+#define TSE_PCS_LINK_TIMER_1_REG 0x26
+#define TSE_PCS_SIZE 0x40
+#define TSE_PCS_STATUS_AN_COMPLETED_MASK BIT(5)
+#define TSE_PCS_STATUS_LINK_MASK 0x0004
+#define TSE_PCS_STATUS_REG 0x02
+#define TSE_PCS_SGMII_SPEED_1000 BIT(3)
+#define TSE_PCS_SGMII_SPEED_100 BIT(2)
+#define TSE_PCS_SGMII_SPEED_10 0x0
+#define TSE_PCS_SW_RST_MASK 0x8000
+#define TSE_PCS_PARTNER_ABILITY_REG 0x0A
+#define TSE_PCS_PARTNER_DUPLEX_FULL 0x1000
+#define TSE_PCS_PARTNER_DUPLEX_HALF 0x0000
+#define TSE_PCS_PARTNER_DUPLEX_MASK 0x1000
+#define TSE_PCS_PARTNER_SPEED_MASK GENMASK(11, 10)
+#define TSE_PCS_PARTNER_SPEED_1000 BIT(11)
+#define TSE_PCS_PARTNER_SPEED_100 BIT(10)
+#define TSE_PCS_PARTNER_SPEED_10 0x0000
+#define TSE_PCS_PARTNER_SPEED_1000 BIT(11)
+#define TSE_PCS_PARTNER_SPEED_100 BIT(10)
+#define TSE_PCS_PARTNER_SPEED_10 0x0000
+#define TSE_PCS_SGMII_SPEED_MASK GENMASK(3, 2)
+#define TSE_PCS_SGMII_LINK_TIMER_0 0x0D40
+#define TSE_PCS_SGMII_LINK_TIMER_1 0x0003
+#define TSE_PCS_SW_RESET_TIMEOUT 100
+#define TSE_PCS_USE_SGMII_AN_MASK BIT(2)
+#define TSE_PCS_USE_SGMII_ENA BIT(1)
+
+#define SGMII_ADAPTER_CTRL_REG 0x00
+#define SGMII_ADAPTER_DISABLE 0x0001
+#define SGMII_ADAPTER_ENABLE 0x0000
+
+#define AUTONEGO_LINK_TIMER 20
+
+static int tse_pcs_reset(void __iomem *base, struct tse_pcs *pcs)
+{
+ int counter = 0;
+ u16 val;
+
+ val = readw(base + TSE_PCS_CONTROL_REG);
+ val |= TSE_PCS_SW_RST_MASK;
+ writew(val, base + TSE_PCS_CONTROL_REG);
+
+ while (counter < TSE_PCS_SW_RESET_TIMEOUT) {
+ val = readw(base + TSE_PCS_CONTROL_REG);
+ val &= TSE_PCS_SW_RST_MASK;
+ if (val == 0)
+ break;
+ counter++;
+ udelay(1);
+ }
+ if (counter >= TSE_PCS_SW_RESET_TIMEOUT) {
+ dev_err(pcs->dev, "PCS could not get out of sw reset\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+int tse_pcs_init(void __iomem *base, struct tse_pcs *pcs)
+{
+ int ret = 0;
+
+ writew(TSE_PCS_USE_SGMII_ENA, base + TSE_PCS_IF_MODE_REG);
+
+ writew(TSE_PCS_SGMII_LINK_TIMER_0, base + TSE_PCS_LINK_TIMER_0_REG);
+ writew(TSE_PCS_SGMII_LINK_TIMER_1, base + TSE_PCS_LINK_TIMER_1_REG);
+
+ ret = tse_pcs_reset(base, pcs);
+ if (ret == 0)
+ writew(SGMII_ADAPTER_ENABLE,
+ pcs->sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
+
+ return ret;
+}
+
+static void pcs_link_timer_callback(unsigned long data)
+{
+ u16 val = 0;
+ struct tse_pcs *pcs = (struct tse_pcs *)data;
+ void __iomem *tse_pcs_base = pcs->tse_pcs_base;
+ void __iomem *sgmii_adapter_base = pcs->sgmii_adapter_base;
+
+ val = readw(tse_pcs_base + TSE_PCS_STATUS_REG);
+ val &= TSE_PCS_STATUS_LINK_MASK;
+
+ if (val != 0) {
+ dev_dbg(pcs->dev, "Adapter: Link is established\n");
+ writew(SGMII_ADAPTER_ENABLE,
+ sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
+ } else {
+ mod_timer(&pcs->aneg_link_timer, jiffies +
+ msecs_to_jiffies(AUTONEGO_LINK_TIMER));
+ }
+}
+
+static void auto_nego_timer_callback(unsigned long data)
+{
+ u16 val = 0;
+ u16 speed = 0;
+ u16 duplex = 0;
+ struct tse_pcs *pcs = (struct tse_pcs *)data;
+ void __iomem *tse_pcs_base = pcs->tse_pcs_base;
+ void __iomem *sgmii_adapter_base = pcs->sgmii_adapter_base;
+
+ val = readw(tse_pcs_base + TSE_PCS_STATUS_REG);
+ val &= TSE_PCS_STATUS_AN_COMPLETED_MASK;
+
+ if (val != 0) {
+ dev_dbg(pcs->dev, "Adapter: Auto Negotiation is completed\n");
+ val = readw(tse_pcs_base + TSE_PCS_PARTNER_ABILITY_REG);
+ speed = val & TSE_PCS_PARTNER_SPEED_MASK;
+ duplex = val & TSE_PCS_PARTNER_DUPLEX_MASK;
+
+ if (speed == TSE_PCS_PARTNER_SPEED_10 &&
+ duplex == TSE_PCS_PARTNER_DUPLEX_FULL)
+ dev_dbg(pcs->dev,
+ "Adapter: Link Partner is Up - 10/Full\n");
+ else if (speed == TSE_PCS_PARTNER_SPEED_100 &&
+ duplex == TSE_PCS_PARTNER_DUPLEX_FULL)
+ dev_dbg(pcs->dev,
+ "Adapter: Link Partner is Up - 100/Full\n");
+ else if (speed == TSE_PCS_PARTNER_SPEED_1000 &&
+ duplex == TSE_PCS_PARTNER_DUPLEX_FULL)
+ dev_dbg(pcs->dev,
+ "Adapter: Link Partner is Up - 1000/Full\n");
+ else if (speed == TSE_PCS_PARTNER_SPEED_10 &&
+ duplex == TSE_PCS_PARTNER_DUPLEX_HALF)
+ dev_err(pcs->dev,
+ "Adapter does not support Half Duplex\n");
+ else if (speed == TSE_PCS_PARTNER_SPEED_100 &&
+ duplex == TSE_PCS_PARTNER_DUPLEX_HALF)
+ dev_err(pcs->dev,
+ "Adapter does not support Half Duplex\n");
+ else if (speed == TSE_PCS_PARTNER_SPEED_1000 &&
+ duplex == TSE_PCS_PARTNER_DUPLEX_HALF)
+ dev_err(pcs->dev,
+ "Adapter does not support Half Duplex\n");
+ else
+ dev_err(pcs->dev,
+ "Adapter: Invalid Partner Speed and Duplex\n");
+
+ if (duplex == TSE_PCS_PARTNER_DUPLEX_FULL &&
+ (speed == TSE_PCS_PARTNER_SPEED_10 ||
+ speed == TSE_PCS_PARTNER_SPEED_100 ||
+ speed == TSE_PCS_PARTNER_SPEED_1000))
+ writew(SGMII_ADAPTER_ENABLE,
+ sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
+ } else {
+ val = readw(tse_pcs_base + TSE_PCS_CONTROL_REG);
+ val |= TSE_PCS_CONTROL_RESTART_AN_MASK;
+ writew(val, tse_pcs_base + TSE_PCS_CONTROL_REG);
+
+ tse_pcs_reset(tse_pcs_base, pcs);
+ mod_timer(&pcs->aneg_link_timer, jiffies +
+ msecs_to_jiffies(AUTONEGO_LINK_TIMER));
+ }
+}
+
+static void aneg_link_timer_callback(unsigned long data)
+{
+ struct tse_pcs *pcs = (struct tse_pcs *)data;
+
+ if (pcs->autoneg == AUTONEG_ENABLE)
+ auto_nego_timer_callback(data);
+ else if (pcs->autoneg == AUTONEG_DISABLE)
+ pcs_link_timer_callback(data);
+}
+
+void tse_pcs_fix_mac_speed(struct tse_pcs *pcs, struct phy_device *phy_dev,
+ unsigned int speed)
+{
+ void __iomem *tse_pcs_base = pcs->tse_pcs_base;
+ void __iomem *sgmii_adapter_base = pcs->sgmii_adapter_base;
+ u32 val;
+
+ writew(SGMII_ADAPTER_ENABLE,
+ sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
+
+ pcs->autoneg = phy_dev->autoneg;
+
+ if (phy_dev->autoneg == AUTONEG_ENABLE) {
+ val = readw(tse_pcs_base + TSE_PCS_CONTROL_REG);
+ val |= TSE_PCS_CONTROL_AN_EN_MASK;
+ writew(val, tse_pcs_base + TSE_PCS_CONTROL_REG);
+
+ val = readw(tse_pcs_base + TSE_PCS_IF_MODE_REG);
+ val |= TSE_PCS_USE_SGMII_AN_MASK;
+ writew(val, tse_pcs_base + TSE_PCS_IF_MODE_REG);
+
+ val = readw(tse_pcs_base + TSE_PCS_CONTROL_REG);
+ val |= TSE_PCS_CONTROL_RESTART_AN_MASK;
+
+ tse_pcs_reset(tse_pcs_base, pcs);
+
+ setup_timer(&pcs->aneg_link_timer,
+ aneg_link_timer_callback, (unsigned long)pcs);
+ mod_timer(&pcs->aneg_link_timer, jiffies +
+ msecs_to_jiffies(AUTONEGO_LINK_TIMER));
+ } else if (phy_dev->autoneg == AUTONEG_DISABLE) {
+ val = readw(tse_pcs_base + TSE_PCS_CONTROL_REG);
+ val &= ~TSE_PCS_CONTROL_AN_EN_MASK;
+ writew(val, tse_pcs_base + TSE_PCS_CONTROL_REG);
+
+ val = readw(tse_pcs_base + TSE_PCS_IF_MODE_REG);
+ val &= ~TSE_PCS_USE_SGMII_AN_MASK;
+ writew(val, tse_pcs_base + TSE_PCS_IF_MODE_REG);
+
+ val = readw(tse_pcs_base + TSE_PCS_IF_MODE_REG);
+ val &= ~TSE_PCS_SGMII_SPEED_MASK;
+
+ switch (speed) {
+ case 1000:
+ val |= TSE_PCS_SGMII_SPEED_1000;
+ break;
+ case 100:
+ val |= TSE_PCS_SGMII_SPEED_100;
+ break;
+ case 10:
+ val |= TSE_PCS_SGMII_SPEED_10;
+ break;
+ default:
+ return;
+ }
+ writew(val, tse_pcs_base + TSE_PCS_IF_MODE_REG);
+
+ tse_pcs_reset(tse_pcs_base, pcs);
+
+ setup_timer(&pcs->aneg_link_timer,
+ aneg_link_timer_callback, (unsigned long)pcs);
+ mod_timer(&pcs->aneg_link_timer, jiffies +
+ msecs_to_jiffies(AUTONEGO_LINK_TIMER));
+ }
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h
new file mode 100644
index 000000000..2f5882450
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h
@@ -0,0 +1,36 @@
+/* Copyright Altera Corporation (C) 2016. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author: Tien Hock Loh <thloh@altera.com>
+ */
+
+#ifndef __TSE_PCS_H__
+#define __TSE_PCS_H__
+
+#include <linux/phy.h>
+#include <linux/timer.h>
+
+struct tse_pcs {
+ struct device *dev;
+ void __iomem *tse_pcs_base;
+ void __iomem *sgmii_adapter_base;
+ struct timer_list aneg_link_timer;
+ int autoneg;
+};
+
+int tse_pcs_init(void __iomem *base, struct tse_pcs *pcs);
+void tse_pcs_fix_mac_speed(struct tse_pcs *pcs, struct phy_device *phy_dev,
+ unsigned int speed);
+
+#endif /* __TSE_PCS_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index fc60368df..2533b91f1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -232,6 +232,11 @@ struct stmmac_extra_stats {
#define DMA_HW_FEAT_ACTPHYIF 0x70000000 /* Active/selected PHY iface */
#define DEFAULT_DMA_PBL 8
+/* PCS status and mask defines */
+#define PCS_ANE_IRQ BIT(2) /* PCS Auto-Negotiation */
+#define PCS_LINK_IRQ BIT(1) /* PCS Link */
+#define PCS_RGSMIIIS_IRQ BIT(0) /* RGMII or SMII Interrupt */
+
/* Max/Min RI Watchdog Timer count value */
#define MAX_DMA_RIWT 0xff
#define MIN_DMA_RIWT 0x20
@@ -272,9 +277,6 @@ enum dma_irq_status {
#define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 2)
#define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 3)
-#define CORE_PCS_ANE_COMPLETE (1 << 5)
-#define CORE_PCS_LINK_STATUS (1 << 6)
-#define CORE_RGMII_IRQ (1 << 7)
#define CORE_IRQ_MTL_RX_OVERFLOW BIT(8)
/* Physical Coding Sublayer */
@@ -469,9 +471,12 @@ struct stmmac_ops {
void (*reset_eee_mode)(struct mac_device_info *hw);
void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw);
void (*set_eee_pls)(struct mac_device_info *hw, int link);
- void (*ctrl_ane)(struct mac_device_info *hw, bool restart);
- void (*get_adv)(struct mac_device_info *hw, struct rgmii_adv *adv);
void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x);
+ /* PCS calls */
+ void (*pcs_ctrl_ane)(void __iomem *ioaddr, bool ane, bool srgmi_ral,
+ bool loopback);
+ void (*pcs_rane)(void __iomem *ioaddr, bool restart);
+ void (*pcs_get_adv_lp)(void __iomem *ioaddr, struct rgmii_adv *adv);
};
/* PTP and HW Timer helpers */
@@ -524,6 +529,9 @@ struct mac_device_info {
int unicast_filter_entries;
int mcast_bits_log2;
unsigned int rx_csum;
+ unsigned int pcs;
+ unsigned int pmt;
+ unsigned int ps;
};
struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
@@ -546,6 +554,7 @@ void stmmac_dwmac4_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable);
void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
+
extern const struct stmmac_mode_ops ring_mode_ops;
extern const struct stmmac_mode_ops chain_mode_ops;
extern const struct stmmac_desc_ops dwmac4_desc_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 0cd3ecff7..92105916e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -46,6 +46,7 @@ struct rk_priv_data {
struct platform_device *pdev;
int phy_iface;
struct regulator *regulator;
+ bool suspended;
const struct rk_gmac_ops *ops;
bool clk_enabled;
@@ -72,6 +73,122 @@ struct rk_priv_data {
#define GRF_BIT(nr) (BIT(nr) | BIT(nr+16))
#define GRF_CLR_BIT(nr) (BIT(nr+16))
+#define RK3228_GRF_MAC_CON0 0x0900
+#define RK3228_GRF_MAC_CON1 0x0904
+
+/* RK3228_GRF_MAC_CON0 */
+#define RK3228_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7)
+#define RK3228_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+
+/* RK3228_GRF_MAC_CON1 */
+#define RK3228_GMAC_PHY_INTF_SEL_RGMII \
+ (GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6))
+#define RK3228_GMAC_PHY_INTF_SEL_RMII \
+ (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6))
+#define RK3228_GMAC_FLOW_CTRL GRF_BIT(3)
+#define RK3228_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3)
+#define RK3228_GMAC_SPEED_10M GRF_CLR_BIT(2)
+#define RK3228_GMAC_SPEED_100M GRF_BIT(2)
+#define RK3228_GMAC_RMII_CLK_25M GRF_BIT(7)
+#define RK3228_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(7)
+#define RK3228_GMAC_CLK_125M (GRF_CLR_BIT(8) | GRF_CLR_BIT(9))
+#define RK3228_GMAC_CLK_25M (GRF_BIT(8) | GRF_BIT(9))
+#define RK3228_GMAC_CLK_2_5M (GRF_CLR_BIT(8) | GRF_BIT(9))
+#define RK3228_GMAC_RMII_MODE GRF_BIT(10)
+#define RK3228_GMAC_RMII_MODE_CLR GRF_CLR_BIT(10)
+#define RK3228_GMAC_TXCLK_DLY_ENABLE GRF_BIT(0)
+#define RK3228_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(0)
+#define RK3228_GMAC_RXCLK_DLY_ENABLE GRF_BIT(1)
+#define RK3228_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(1)
+
+static void rk3228_set_to_rgmii(struct rk_priv_data *bsp_priv,
+ int tx_delay, int rx_delay)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
+ RK3228_GMAC_PHY_INTF_SEL_RGMII |
+ RK3228_GMAC_RMII_MODE_CLR |
+ RK3228_GMAC_RXCLK_DLY_ENABLE |
+ RK3228_GMAC_TXCLK_DLY_ENABLE);
+
+ regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON0,
+ RK3228_GMAC_CLK_RX_DL_CFG(rx_delay) |
+ RK3228_GMAC_CLK_TX_DL_CFG(tx_delay));
+}
+
+static void rk3228_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
+ RK3228_GMAC_PHY_INTF_SEL_RMII |
+ RK3228_GMAC_RMII_MODE);
+
+ /* set MAC to RMII mode */
+ regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, GRF_BIT(11));
+}
+
+static void rk3228_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ if (speed == 10)
+ regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
+ RK3228_GMAC_CLK_2_5M);
+ else if (speed == 100)
+ regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
+ RK3228_GMAC_CLK_25M);
+ else if (speed == 1000)
+ regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
+ RK3228_GMAC_CLK_125M);
+ else
+ dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
+}
+
+static void rk3228_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ if (speed == 10)
+ regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
+ RK3228_GMAC_RMII_CLK_2_5M |
+ RK3228_GMAC_SPEED_10M);
+ else if (speed == 100)
+ regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
+ RK3228_GMAC_RMII_CLK_25M |
+ RK3228_GMAC_SPEED_100M);
+ else
+ dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+}
+
+static const struct rk_gmac_ops rk3228_ops = {
+ .set_to_rgmii = rk3228_set_to_rgmii,
+ .set_to_rmii = rk3228_set_to_rmii,
+ .set_rgmii_speed = rk3228_set_rgmii_speed,
+ .set_rmii_speed = rk3228_set_rmii_speed,
+};
+
#define RK3288_GRF_SOC_CON1 0x0248
#define RK3288_GRF_SOC_CON3 0x0250
@@ -529,9 +646,8 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
return bsp_priv;
}
-static int rk_gmac_init(struct platform_device *pdev, void *priv)
+static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
{
- struct rk_priv_data *bsp_priv = priv;
int ret;
ret = phy_power_on(bsp_priv, true);
@@ -545,14 +661,50 @@ static int rk_gmac_init(struct platform_device *pdev, void *priv)
return 0;
}
-static void rk_gmac_exit(struct platform_device *pdev, void *priv)
+static void rk_gmac_powerdown(struct rk_priv_data *gmac)
{
- struct rk_priv_data *gmac = priv;
-
phy_power_on(gmac, false);
gmac_clk_enable(gmac, false);
}
+static int rk_gmac_init(struct platform_device *pdev, void *priv)
+{
+ struct rk_priv_data *bsp_priv = priv;
+
+ return rk_gmac_powerup(bsp_priv);
+}
+
+static void rk_gmac_exit(struct platform_device *pdev, void *priv)
+{
+ struct rk_priv_data *bsp_priv = priv;
+
+ rk_gmac_powerdown(bsp_priv);
+}
+
+static void rk_gmac_suspend(struct platform_device *pdev, void *priv)
+{
+ struct rk_priv_data *bsp_priv = priv;
+
+ /* Keep the PHY up if we use Wake-on-Lan. */
+ if (device_may_wakeup(&pdev->dev))
+ return;
+
+ rk_gmac_powerdown(bsp_priv);
+ bsp_priv->suspended = true;
+}
+
+static void rk_gmac_resume(struct platform_device *pdev, void *priv)
+{
+ struct rk_priv_data *bsp_priv = priv;
+
+ /* The PHY was up for Wake-on-Lan. */
+ if (!bsp_priv->suspended)
+ return;
+
+ rk_gmac_powerup(bsp_priv);
+ bsp_priv->suspended = false;
+}
+
static void rk_fix_speed(void *priv, unsigned int speed)
{
struct rk_priv_data *bsp_priv = priv;
@@ -591,6 +743,8 @@ static int rk_gmac_probe(struct platform_device *pdev)
plat_dat->init = rk_gmac_init;
plat_dat->exit = rk_gmac_exit;
plat_dat->fix_mac_speed = rk_fix_speed;
+ plat_dat->suspend = rk_gmac_suspend;
+ plat_dat->resume = rk_gmac_resume;
plat_dat->bsp_priv = rk_gmac_setup(pdev, data);
if (IS_ERR(plat_dat->bsp_priv))
@@ -604,6 +758,7 @@ static int rk_gmac_probe(struct platform_device *pdev)
}
static const struct of_device_id rk_gmac_dwmac_match[] = {
+ { .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops },
{ .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops },
{ .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops },
{ }
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index f13499fa1..bec6963ac 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -27,6 +27,11 @@
#include "stmmac.h"
#include "stmmac_platform.h"
+#include "altr_tse_pcs.h"
+
+#define SGMII_ADAPTER_CTRL_REG 0x00
+#define SGMII_ADAPTER_DISABLE 0x0001
+
#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0
#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1
#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII 0x2
@@ -52,35 +57,46 @@ struct socfpga_dwmac {
struct reset_control *stmmac_rst;
void __iomem *splitter_base;
bool f2h_ptp_ref_clk;
+ struct tse_pcs pcs;
};
static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed)
{
struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv;
void __iomem *splitter_base = dwmac->splitter_base;
+ void __iomem *tse_pcs_base = dwmac->pcs.tse_pcs_base;
+ void __iomem *sgmii_adapter_base = dwmac->pcs.sgmii_adapter_base;
+ struct device *dev = dwmac->dev;
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct phy_device *phy_dev = ndev->phydev;
u32 val;
- if (!splitter_base)
- return;
-
- val = readl(splitter_base + EMAC_SPLITTER_CTRL_REG);
- val &= ~EMAC_SPLITTER_CTRL_SPEED_MASK;
-
- switch (speed) {
- case 1000:
- val |= EMAC_SPLITTER_CTRL_SPEED_1000;
- break;
- case 100:
- val |= EMAC_SPLITTER_CTRL_SPEED_100;
- break;
- case 10:
- val |= EMAC_SPLITTER_CTRL_SPEED_10;
- break;
- default:
- return;
+ if ((tse_pcs_base) && (sgmii_adapter_base))
+ writew(SGMII_ADAPTER_DISABLE,
+ sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
+
+ if (splitter_base) {
+ val = readl(splitter_base + EMAC_SPLITTER_CTRL_REG);
+ val &= ~EMAC_SPLITTER_CTRL_SPEED_MASK;
+
+ switch (speed) {
+ case 1000:
+ val |= EMAC_SPLITTER_CTRL_SPEED_1000;
+ break;
+ case 100:
+ val |= EMAC_SPLITTER_CTRL_SPEED_100;
+ break;
+ case 10:
+ val |= EMAC_SPLITTER_CTRL_SPEED_10;
+ break;
+ default:
+ return;
+ }
+ writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG);
}
- writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG);
+ if (tse_pcs_base && sgmii_adapter_base)
+ tse_pcs_fix_mac_speed(&dwmac->pcs, phy_dev, speed);
}
static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *dev)
@@ -88,9 +104,12 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *
struct device_node *np = dev->of_node;
struct regmap *sys_mgr_base_addr;
u32 reg_offset, reg_shift;
- int ret;
- struct device_node *np_splitter;
+ int ret, index;
+ struct device_node *np_splitter = NULL;
+ struct device_node *np_sgmii_adapter = NULL;
struct resource res_splitter;
+ struct resource res_tse_pcs;
+ struct resource res_sgmii_adapter;
dwmac->interface = of_get_phy_mode(np);
@@ -116,7 +135,9 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *
np_splitter = of_parse_phandle(np, "altr,emac-splitter", 0);
if (np_splitter) {
- if (of_address_to_resource(np_splitter, 0, &res_splitter)) {
+ ret = of_address_to_resource(np_splitter, 0, &res_splitter);
+ of_node_put(np_splitter);
+ if (ret) {
dev_info(dev, "Missing emac splitter address\n");
return -EINVAL;
}
@@ -128,12 +149,86 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *
}
}
+ np_sgmii_adapter = of_parse_phandle(np,
+ "altr,gmii-to-sgmii-converter", 0);
+ if (np_sgmii_adapter) {
+ index = of_property_match_string(np_sgmii_adapter, "reg-names",
+ "hps_emac_interface_splitter_avalon_slave");
+
+ if (index >= 0) {
+ if (of_address_to_resource(np_sgmii_adapter, index,
+ &res_splitter)) {
+ dev_err(dev,
+ "%s: ERROR: missing emac splitter address\n",
+ __func__);
+ ret = -EINVAL;
+ goto err_node_put;
+ }
+
+ dwmac->splitter_base =
+ devm_ioremap_resource(dev, &res_splitter);
+
+ if (IS_ERR(dwmac->splitter_base)) {
+ ret = PTR_ERR(dwmac->splitter_base);
+ goto err_node_put;
+ }
+ }
+
+ index = of_property_match_string(np_sgmii_adapter, "reg-names",
+ "gmii_to_sgmii_adapter_avalon_slave");
+
+ if (index >= 0) {
+ if (of_address_to_resource(np_sgmii_adapter, index,
+ &res_sgmii_adapter)) {
+ dev_err(dev,
+ "%s: ERROR: failed mapping adapter\n",
+ __func__);
+ ret = -EINVAL;
+ goto err_node_put;
+ }
+
+ dwmac->pcs.sgmii_adapter_base =
+ devm_ioremap_resource(dev, &res_sgmii_adapter);
+
+ if (IS_ERR(dwmac->pcs.sgmii_adapter_base)) {
+ ret = PTR_ERR(dwmac->pcs.sgmii_adapter_base);
+ goto err_node_put;
+ }
+ }
+
+ index = of_property_match_string(np_sgmii_adapter, "reg-names",
+ "eth_tse_control_port");
+
+ if (index >= 0) {
+ if (of_address_to_resource(np_sgmii_adapter, index,
+ &res_tse_pcs)) {
+ dev_err(dev,
+ "%s: ERROR: failed mapping tse control port\n",
+ __func__);
+ ret = -EINVAL;
+ goto err_node_put;
+ }
+
+ dwmac->pcs.tse_pcs_base =
+ devm_ioremap_resource(dev, &res_tse_pcs);
+
+ if (IS_ERR(dwmac->pcs.tse_pcs_base)) {
+ ret = PTR_ERR(dwmac->pcs.tse_pcs_base);
+ goto err_node_put;
+ }
+ }
+ }
dwmac->reg_offset = reg_offset;
dwmac->reg_shift = reg_shift;
dwmac->sys_mgr_base_addr = sys_mgr_base_addr;
dwmac->dev = dev;
+ of_node_put(np_sgmii_adapter);
return 0;
+
+err_node_put:
+ of_node_put(np_sgmii_adapter);
+ return ret;
}
static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac)
@@ -151,6 +246,7 @@ static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac)
break;
case PHY_INTERFACE_MODE_MII:
case PHY_INTERFACE_MODE_GMII:
+ case PHY_INTERFACE_MODE_SGMII:
val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII;
break;
default:
@@ -191,6 +287,12 @@ static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac)
*/
if (dwmac->stmmac_rst)
reset_control_deassert(dwmac->stmmac_rst);
+ if (phymode == PHY_INTERFACE_MODE_SGMII) {
+ if (tse_pcs_init(dwmac->pcs.tse_pcs_base, &dwmac->pcs) != 0) {
+ dev_err(dwmac->dev, "Unable to initialize TSE PCS");
+ return -EINVAL;
+ }
+ }
return 0;
}
@@ -225,6 +327,7 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+
if (!ret) {
struct net_device *ndev = platform_get_drvdata(pdev);
struct stmmac_priv *stpriv = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index b0593a426..ff3e5ab39 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -38,19 +38,26 @@
#define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */
#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */
-enum dwmac1000_irq_status {
- lpiis_irq = 0x400,
- time_stamp_irq = 0x0200,
- mmc_rx_csum_offload_irq = 0x0080,
- mmc_tx_irq = 0x0040,
- mmc_rx_irq = 0x0020,
- mmc_irq = 0x0010,
- pmt_irq = 0x0008,
- pcs_ane_irq = 0x0004,
- pcs_link_irq = 0x0002,
- rgmii_irq = 0x0001,
-};
-#define GMAC_INT_MASK 0x0000003c /* interrupt mask register */
+#define GMAC_INT_STATUS_PMT BIT(3)
+#define GMAC_INT_STATUS_MMCIS BIT(4)
+#define GMAC_INT_STATUS_MMCRIS BIT(5)
+#define GMAC_INT_STATUS_MMCTIS BIT(6)
+#define GMAC_INT_STATUS_MMCCSUM BIT(7)
+#define GMAC_INT_STATUS_TSTAMP BIT(9)
+#define GMAC_INT_STATUS_LPIIS BIT(10)
+
+/* interrupt mask register */
+#define GMAC_INT_MASK 0x0000003c
+#define GMAC_INT_DISABLE_RGMII BIT(0)
+#define GMAC_INT_DISABLE_PCSLINK BIT(1)
+#define GMAC_INT_DISABLE_PCSAN BIT(2)
+#define GMAC_INT_DISABLE_PMT BIT(3)
+#define GMAC_INT_DISABLE_TIMESTAMP BIT(9)
+#define GMAC_INT_DISABLE_PCS (GMAC_INT_DISABLE_RGMII | \
+ GMAC_INT_DISABLE_PCSLINK | \
+ GMAC_INT_DISABLE_PCSAN)
+#define GMAC_INT_DEFAULT_MASK (GMAC_INT_DISABLE_TIMESTAMP | \
+ GMAC_INT_DISABLE_PCS)
/* PMT Control and Status */
#define GMAC_PMT 0x0000002c
@@ -90,42 +97,23 @@ enum power_event {
(reg * 8))
#define GMAC_MAX_PERFECT_ADDRESSES 1
-/* PCS registers (AN/TBI/SGMII/RGMII) offset */
-#define GMAC_AN_CTRL 0x000000c0 /* AN control */
-#define GMAC_AN_STATUS 0x000000c4 /* AN status */
-#define GMAC_ANE_ADV 0x000000c8 /* Auto-Neg. Advertisement */
-#define GMAC_ANE_LPA 0x000000cc /* Auto-Neg. link partener ability */
-#define GMAC_ANE_EXP 0x000000d0 /* ANE expansion */
-#define GMAC_TBI 0x000000d4 /* TBI extend status */
-#define GMAC_S_R_GMII 0x000000d8 /* SGMII RGMII status */
-
-/* AN Configuration defines */
-#define GMAC_AN_CTRL_RAN 0x00000200 /* Restart Auto-Negotiation */
-#define GMAC_AN_CTRL_ANE 0x00001000 /* Auto-Negotiation Enable */
-#define GMAC_AN_CTRL_ELE 0x00004000 /* External Loopback Enable */
-#define GMAC_AN_CTRL_ECD 0x00010000 /* Enable Comma Detect */
-#define GMAC_AN_CTRL_LR 0x00020000 /* Lock to Reference */
-#define GMAC_AN_CTRL_SGMRAL 0x00040000 /* SGMII RAL Control */
-
-/* AN Status defines */
-#define GMAC_AN_STATUS_LS 0x00000004 /* Link Status 0:down 1:up */
-#define GMAC_AN_STATUS_ANA 0x00000008 /* Auto-Negotiation Ability */
-#define GMAC_AN_STATUS_ANC 0x00000020 /* Auto-Negotiation Complete */
-#define GMAC_AN_STATUS_ES 0x00000100 /* Extended Status */
-
-/* Register 54 (SGMII/RGMII status register) */
-#define GMAC_S_R_GMII_LINK 0x8
-#define GMAC_S_R_GMII_SPEED 0x5
-#define GMAC_S_R_GMII_SPEED_SHIFT 0x1
-#define GMAC_S_R_GMII_MODE 0x1
-#define GMAC_S_R_GMII_SPEED_125 2
-#define GMAC_S_R_GMII_SPEED_25 1
-
-/* Common ADV and LPA defines */
-#define GMAC_ANE_FD (1 << 5)
-#define GMAC_ANE_HD (1 << 6)
-#define GMAC_ANE_PSE (3 << 7)
-#define GMAC_ANE_PSE_SHIFT 7
+#define GMAC_PCS_BASE 0x000000c0 /* PCS register base */
+#define GMAC_RGSMIIIS 0x000000d8 /* RGMII/SMII status */
+
+/* SGMII/RGMII status register */
+#define GMAC_RGSMIIIS_LNKMODE BIT(0)
+#define GMAC_RGSMIIIS_SPEED GENMASK(2, 1)
+#define GMAC_RGSMIIIS_SPEED_SHIFT 1
+#define GMAC_RGSMIIIS_LNKSTS BIT(3)
+#define GMAC_RGSMIIIS_JABTO BIT(4)
+#define GMAC_RGSMIIIS_FALSECARDET BIT(5)
+#define GMAC_RGSMIIIS_SMIDRXS BIT(16)
+/* LNKMOD */
+#define GMAC_RGSMIIIS_LNKMOD_MASK 0x1
+/* LNKSPEED */
+#define GMAC_RGSMIIIS_SPEED_125 0x2
+#define GMAC_RGSMIIIS_SPEED_25 0x1
+#define GMAC_RGSMIIIS_SPEED_2_5 0x0
/* GMAC Configuration defines */
#define GMAC_CONTROL_2K 0x08000000 /* IEEE 802.3as 2K packets */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index fb1eb578e..885a5e645 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -30,22 +30,48 @@
#include <linux/slab.h>
#include <linux/ethtool.h>
#include <asm/io.h>
+#include "stmmac_pcs.h"
#include "dwmac1000.h"
static void dwmac1000_core_init(struct mac_device_info *hw, int mtu)
{
void __iomem *ioaddr = hw->pcsr;
u32 value = readl(ioaddr + GMAC_CONTROL);
+
+ /* Configure GMAC core */
value |= GMAC_CORE_INIT;
+
if (mtu > 1500)
value |= GMAC_CONTROL_2K;
if (mtu > 2000)
value |= GMAC_CONTROL_JE;
+ if (hw->ps) {
+ value |= GMAC_CONTROL_TE;
+
+ if (hw->ps == SPEED_1000) {
+ value &= ~GMAC_CONTROL_PS;
+ } else {
+ value |= GMAC_CONTROL_PS;
+
+ if (hw->ps == SPEED_10)
+ value &= ~GMAC_CONTROL_FES;
+ else
+ value |= GMAC_CONTROL_FES;
+ }
+ }
+
writel(value, ioaddr + GMAC_CONTROL);
/* Mask GMAC interrupts */
- writel(0x207, ioaddr + GMAC_INT_MASK);
+ value = GMAC_INT_DEFAULT_MASK;
+
+ if (hw->pmt)
+ value &= ~GMAC_INT_DISABLE_PMT;
+ if (hw->pcs)
+ value &= ~GMAC_INT_DISABLE_PCS;
+
+ writel(value, ioaddr + GMAC_INT_MASK);
#ifdef STMMAC_VLAN_TAG_USED
/* Tag detection without filtering */
@@ -235,12 +261,45 @@ static void dwmac1000_pmt(struct mac_device_info *hw, unsigned long mode)
}
if (mode & WAKE_UCAST) {
pr_debug("GMAC: WOL on global unicast\n");
- pmt |= global_unicast;
+ pmt |= power_down | global_unicast | wake_up_frame_en;
}
writel(pmt, ioaddr + GMAC_PMT);
}
+/* RGMII or SMII interface */
+static void dwmac1000_rgsmii(void __iomem *ioaddr, struct stmmac_extra_stats *x)
+{
+ u32 status;
+
+ status = readl(ioaddr + GMAC_RGSMIIIS);
+ x->irq_rgmii_n++;
+
+ /* Check the link status */
+ if (status & GMAC_RGSMIIIS_LNKSTS) {
+ int speed_value;
+
+ x->pcs_link = 1;
+
+ speed_value = ((status & GMAC_RGSMIIIS_SPEED) >>
+ GMAC_RGSMIIIS_SPEED_SHIFT);
+ if (speed_value == GMAC_RGSMIIIS_SPEED_125)
+ x->pcs_speed = SPEED_1000;
+ else if (speed_value == GMAC_RGSMIIIS_SPEED_25)
+ x->pcs_speed = SPEED_100;
+ else
+ x->pcs_speed = SPEED_10;
+
+ x->pcs_duplex = (status & GMAC_RGSMIIIS_LNKMOD_MASK);
+
+ pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed,
+ x->pcs_duplex ? "Full" : "Half");
+ } else {
+ x->pcs_link = 0;
+ pr_info("Link is Down\n");
+ }
+}
+
static int dwmac1000_irq_status(struct mac_device_info *hw,
struct stmmac_extra_stats *x)
{
@@ -249,19 +308,20 @@ static int dwmac1000_irq_status(struct mac_device_info *hw,
int ret = 0;
/* Not used events (e.g. MMC interrupts) are not handled. */
- if ((intr_status & mmc_tx_irq))
+ if ((intr_status & GMAC_INT_STATUS_MMCTIS))
x->mmc_tx_irq_n++;
- if (unlikely(intr_status & mmc_rx_irq))
+ if (unlikely(intr_status & GMAC_INT_STATUS_MMCRIS))
x->mmc_rx_irq_n++;
- if (unlikely(intr_status & mmc_rx_csum_offload_irq))
+ if (unlikely(intr_status & GMAC_INT_STATUS_MMCCSUM))
x->mmc_rx_csum_offload_irq_n++;
- if (unlikely(intr_status & pmt_irq)) {
+ if (unlikely(intr_status & GMAC_INT_DISABLE_PMT)) {
/* clear the PMT bits 5 and 6 by reading the PMT status reg */
readl(ioaddr + GMAC_PMT);
x->irq_receive_pmt_irq_n++;
}
- /* MAC trx/rx EEE LPI entry/exit interrupts */
- if (intr_status & lpiis_irq) {
+
+ /* MAC tx/rx EEE LPI entry/exit interrupts */
+ if (intr_status & GMAC_INT_STATUS_LPIIS) {
/* Clean LPI interrupt by reading the Reg 12 */
ret = readl(ioaddr + LPI_CTRL_STATUS);
@@ -275,36 +335,10 @@ static int dwmac1000_irq_status(struct mac_device_info *hw,
x->irq_rx_path_exit_lpi_mode_n++;
}
- if ((intr_status & pcs_ane_irq) || (intr_status & pcs_link_irq)) {
- readl(ioaddr + GMAC_AN_STATUS);
- x->irq_pcs_ane_n++;
- }
- if (intr_status & rgmii_irq) {
- u32 status = readl(ioaddr + GMAC_S_R_GMII);
- x->irq_rgmii_n++;
-
- /* Save and dump the link status. */
- if (status & GMAC_S_R_GMII_LINK) {
- int speed_value = (status & GMAC_S_R_GMII_SPEED) >>
- GMAC_S_R_GMII_SPEED_SHIFT;
- x->pcs_duplex = (status & GMAC_S_R_GMII_MODE);
-
- if (speed_value == GMAC_S_R_GMII_SPEED_125)
- x->pcs_speed = SPEED_1000;
- else if (speed_value == GMAC_S_R_GMII_SPEED_25)
- x->pcs_speed = SPEED_100;
- else
- x->pcs_speed = SPEED_10;
+ dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
- x->pcs_link = 1;
- pr_debug("%s: Link is Up - %d/%s\n", __func__,
- (int)x->pcs_speed,
- x->pcs_duplex ? "Full" : "Half");
- } else {
- x->pcs_link = 0;
- pr_debug("%s: Link is Down\n", __func__);
- }
- }
+ if (intr_status & PCS_RGSMIIIS_IRQ)
+ dwmac1000_rgsmii(ioaddr, x);
return ret;
}
@@ -363,38 +397,20 @@ static void dwmac1000_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
writel(value, ioaddr + LPI_TIMER_CTRL);
}
-static void dwmac1000_ctrl_ane(struct mac_device_info *hw, bool restart)
+static void dwmac1000_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral,
+ bool loopback)
{
- void __iomem *ioaddr = hw->pcsr;
- /* auto negotiation enable and External Loopback enable */
- u32 value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE;
-
- if (restart)
- value |= GMAC_AN_CTRL_RAN;
-
- writel(value, ioaddr + GMAC_AN_CTRL);
+ dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
}
-static void dwmac1000_get_adv(struct mac_device_info *hw, struct rgmii_adv *adv)
+static void dwmac1000_rane(void __iomem *ioaddr, bool restart)
{
- void __iomem *ioaddr = hw->pcsr;
- u32 value = readl(ioaddr + GMAC_ANE_ADV);
-
- if (value & GMAC_ANE_FD)
- adv->duplex = DUPLEX_FULL;
- if (value & GMAC_ANE_HD)
- adv->duplex |= DUPLEX_HALF;
-
- adv->pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT;
-
- value = readl(ioaddr + GMAC_ANE_LPA);
-
- if (value & GMAC_ANE_FD)
- adv->lp_duplex = DUPLEX_FULL;
- if (value & GMAC_ANE_HD)
- adv->lp_duplex = DUPLEX_HALF;
+ dwmac_rane(ioaddr, GMAC_PCS_BASE, restart);
+}
- adv->lp_pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT;
+static void dwmac1000_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
+{
+ dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
}
static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x)
@@ -485,9 +501,10 @@ static const struct stmmac_ops dwmac1000_ops = {
.reset_eee_mode = dwmac1000_reset_eee_mode,
.set_eee_timer = dwmac1000_set_eee_timer,
.set_eee_pls = dwmac1000_set_eee_pls,
- .ctrl_ane = dwmac1000_ctrl_ane,
- .get_adv = dwmac1000_get_adv,
.debug = dwmac1000_debug,
+ .pcs_ctrl_ane = dwmac1000_ctrl_ane,
+ .pcs_rane = dwmac1000_rane,
+ .pcs_get_adv_lp = dwmac1000_get_adv_lp,
};
struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index bc50952a1..6f4f5ce25 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -24,10 +24,8 @@
#define GMAC_QX_TX_FLOW_CTRL(x) (0x70 + x * 4)
#define GMAC_INT_STATUS 0x000000b0
#define GMAC_INT_EN 0x000000b4
-#define GMAC_AN_CTRL 0x000000e0
-#define GMAC_AN_STATUS 0x000000e4
-#define GMAC_AN_ADV 0x000000e8
-#define GMAC_AN_LPA 0x000000ec
+#define GMAC_PCS_BASE 0x000000e0
+#define GMAC_PHYIF_CONTROL_STATUS 0x000000f8
#define GMAC_PMT 0x000000c0
#define GMAC_VERSION 0x00000110
#define GMAC_DEBUG 0x00000114
@@ -54,9 +52,18 @@
#define GMAC_TX_FLOW_CTRL_PT_SHIFT 16
/* MAC Interrupt bitmap*/
+#define GMAC_INT_RGSMIIS BIT(0)
+#define GMAC_INT_PCS_LINK BIT(1)
+#define GMAC_INT_PCS_ANE BIT(2)
+#define GMAC_INT_PCS_PHYIS BIT(3)
#define GMAC_INT_PMT_EN BIT(4)
#define GMAC_INT_LPI_EN BIT(5)
+#define GMAC_PCS_IRQ_DEFAULT (GMAC_INT_RGSMIIS | GMAC_INT_PCS_LINK | \
+ GMAC_INT_PCS_ANE)
+
+#define GMAC_INT_DEFAULT_MASK GMAC_INT_PMT_EN
+
enum dwmac4_irq_status {
time_stamp_irq = 0x00001000,
mmc_rx_csum_offload_irq = 0x00000800,
@@ -64,19 +71,8 @@ enum dwmac4_irq_status {
mmc_rx_irq = 0x00000200,
mmc_irq = 0x00000100,
pmt_irq = 0x00000010,
- pcs_ane_irq = 0x00000004,
- pcs_link_irq = 0x00000002,
};
-/* MAC Auto-Neg bitmap*/
-#define GMAC_AN_CTRL_RAN BIT(9)
-#define GMAC_AN_CTRL_ANE BIT(12)
-#define GMAC_AN_CTRL_ELE BIT(14)
-#define GMAC_AN_FD BIT(5)
-#define GMAC_AN_HD BIT(6)
-#define GMAC_AN_PSE_MASK GENMASK(8, 7)
-#define GMAC_AN_PSE_SHIFT 7
-
/* MAC PMT bitmap */
enum power_event {
pointer_reset = 0x80000000,
@@ -250,6 +246,23 @@ enum power_event {
#define MTL_DEBUG_RRCSTS_FLUSH 3
#define MTL_DEBUG_RWCSTS BIT(0)
+/* SGMII/RGMII status register */
+#define GMAC_PHYIF_CTRLSTATUS_TC BIT(0)
+#define GMAC_PHYIF_CTRLSTATUS_LUD BIT(1)
+#define GMAC_PHYIF_CTRLSTATUS_SMIDRXS BIT(4)
+#define GMAC_PHYIF_CTRLSTATUS_LNKMOD BIT(16)
+#define GMAC_PHYIF_CTRLSTATUS_SPEED GENMASK(18, 17)
+#define GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT 17
+#define GMAC_PHYIF_CTRLSTATUS_LNKSTS BIT(19)
+#define GMAC_PHYIF_CTRLSTATUS_JABTO BIT(20)
+#define GMAC_PHYIF_CTRLSTATUS_FALSECARDET BIT(21)
+/* LNKMOD */
+#define GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK 0x1
+/* LNKSPEED */
+#define GMAC_PHYIF_CTRLSTATUS_SPEED_125 0x2
+#define GMAC_PHYIF_CTRLSTATUS_SPEED_25 0x1
+#define GMAC_PHYIF_CTRLSTATUS_SPEED_2_5 0x0
+
extern const struct stmmac_dma_ops dwmac4_dma_ops;
extern const struct stmmac_dma_ops dwmac410_dma_ops;
#endif /* __DWMAC4_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 44da877d2..51019b794 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -17,6 +17,7 @@
#include <linux/slab.h>
#include <linux/ethtool.h>
#include <linux/io.h>
+#include "stmmac_pcs.h"
#include "dwmac4.h"
static void dwmac4_core_init(struct mac_device_info *hw, int mtu)
@@ -31,10 +32,31 @@ static void dwmac4_core_init(struct mac_device_info *hw, int mtu)
if (mtu > 2000)
value |= GMAC_CONFIG_JE;
+ if (hw->ps) {
+ value |= GMAC_CONFIG_TE;
+
+ if (hw->ps == SPEED_1000) {
+ value &= ~GMAC_CONFIG_PS;
+ } else {
+ value |= GMAC_CONFIG_PS;
+
+ if (hw->ps == SPEED_10)
+ value &= ~GMAC_CONFIG_FES;
+ else
+ value |= GMAC_CONFIG_FES;
+ }
+ }
+
writel(value, ioaddr + GMAC_CONFIG);
/* Mask GMAC interrupts */
- writel(GMAC_INT_PMT_EN, ioaddr + GMAC_INT_EN);
+ value = GMAC_INT_DEFAULT_MASK;
+ if (hw->pmt)
+ value |= GMAC_INT_PMT_EN;
+ if (hw->pcs)
+ value |= GMAC_PCS_IRQ_DEFAULT;
+
+ writel(value, ioaddr + GMAC_INT_EN);
}
static void dwmac4_dump_regs(struct mac_device_info *hw)
@@ -80,7 +102,7 @@ static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
}
if (mode & WAKE_UCAST) {
pr_debug("GMAC: WOL on global unicast\n");
- pmt |= global_unicast;
+ pmt |= power_down | global_unicast | wake_up_frame_en;
}
writel(pmt, ioaddr + GMAC_PMT);
@@ -190,39 +212,53 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
}
}
-static void dwmac4_ctrl_ane(struct mac_device_info *hw, bool restart)
+static void dwmac4_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral,
+ bool loopback)
{
- void __iomem *ioaddr = hw->pcsr;
-
- /* auto negotiation enable and External Loopback enable */
- u32 value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE;
+ dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
+}
- if (restart)
- value |= GMAC_AN_CTRL_RAN;
+static void dwmac4_rane(void __iomem *ioaddr, bool restart)
+{
+ dwmac_rane(ioaddr, GMAC_PCS_BASE, restart);
+}
- writel(value, ioaddr + GMAC_AN_CTRL);
+static void dwmac4_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
+{
+ dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
}
-static void dwmac4_get_adv(struct mac_device_info *hw, struct rgmii_adv *adv)
+/* RGMII or SMII interface */
+static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x)
{
- void __iomem *ioaddr = hw->pcsr;
- u32 value = readl(ioaddr + GMAC_AN_ADV);
+ u32 status;
- if (value & GMAC_AN_FD)
- adv->duplex = DUPLEX_FULL;
- if (value & GMAC_AN_HD)
- adv->duplex |= DUPLEX_HALF;
+ status = readl(ioaddr + GMAC_PHYIF_CONTROL_STATUS);
+ x->irq_rgmii_n++;
- adv->pause = (value & GMAC_AN_PSE_MASK) >> GMAC_AN_PSE_SHIFT;
+ /* Check the link status */
+ if (status & GMAC_PHYIF_CTRLSTATUS_LNKSTS) {
+ int speed_value;
- value = readl(ioaddr + GMAC_AN_LPA);
+ x->pcs_link = 1;
+
+ speed_value = ((status & GMAC_PHYIF_CTRLSTATUS_SPEED) >>
+ GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT);
+ if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_125)
+ x->pcs_speed = SPEED_1000;
+ else if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_25)
+ x->pcs_speed = SPEED_100;
+ else
+ x->pcs_speed = SPEED_10;
- if (value & GMAC_AN_FD)
- adv->lp_duplex = DUPLEX_FULL;
- if (value & GMAC_AN_HD)
- adv->lp_duplex = DUPLEX_HALF;
+ x->pcs_duplex = (status & GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK);
- adv->lp_pause = (value & GMAC_AN_PSE_MASK) >> GMAC_AN_PSE_SHIFT;
+ pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed,
+ x->pcs_duplex ? "Full" : "Half");
+ } else {
+ x->pcs_link = 0;
+ pr_info("Link is Down\n");
+ }
}
static int dwmac4_irq_status(struct mac_device_info *hw,
@@ -248,11 +284,6 @@ static int dwmac4_irq_status(struct mac_device_info *hw,
x->irq_receive_pmt_irq_n++;
}
- if ((intr_status & pcs_ane_irq) || (intr_status & pcs_link_irq)) {
- readl(ioaddr + GMAC_AN_STATUS);
- x->irq_pcs_ane_n++;
- }
-
mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
/* Check MTL Interrupt: Currently only one queue is used: Q0. */
if (mtl_int_qx_status & MTL_INT_Q0) {
@@ -267,6 +298,10 @@ static int dwmac4_irq_status(struct mac_device_info *hw,
}
}
+ dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
+ if (intr_status & PCS_RGSMIIIS_IRQ)
+ dwmac4_phystatus(ioaddr, x);
+
return ret;
}
@@ -363,8 +398,9 @@ static const struct stmmac_ops dwmac4_ops = {
.pmt = dwmac4_pmt,
.set_umac_addr = dwmac4_set_umac_addr,
.get_umac_addr = dwmac4_get_umac_addr,
- .ctrl_ane = dwmac4_ctrl_ane,
- .get_adv = dwmac4_get_adv,
+ .pcs_ctrl_ane = dwmac4_ctrl_ane,
+ .pcs_rane = dwmac4_rane,
+ .pcs_get_adv_lp = dwmac4_get_adv_lp,
.debug = dwmac4_debug,
.set_filter = dwmac4_set_filter,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 59ae6088c..8dc9056c1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -117,7 +117,6 @@ struct stmmac_priv {
int eee_enabled;
int eee_active;
int tx_lpi_timer;
- int pcs;
unsigned int mode;
int extend_desc;
struct ptp_clock *ptp_clock;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index e2b98b016..1e06173fc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -276,7 +276,8 @@ static int stmmac_ethtool_getsettings(struct net_device *dev,
struct phy_device *phy = priv->phydev;
int rc;
- if ((priv->pcs & STMMAC_PCS_RGMII) || (priv->pcs & STMMAC_PCS_SGMII)) {
+ if (priv->hw->pcs & STMMAC_PCS_RGMII ||
+ priv->hw->pcs & STMMAC_PCS_SGMII) {
struct rgmii_adv adv;
if (!priv->xstats.pcs_link) {
@@ -289,10 +290,10 @@ static int stmmac_ethtool_getsettings(struct net_device *dev,
ethtool_cmd_speed_set(cmd, priv->xstats.pcs_speed);
/* Get and convert ADV/LP_ADV from the HW AN registers */
- if (!priv->hw->mac->get_adv)
+ if (!priv->hw->mac->pcs_get_adv_lp)
return -EOPNOTSUPP; /* should never happen indeed */
- priv->hw->mac->get_adv(priv->hw, &adv);
+ priv->hw->mac->pcs_get_adv_lp(priv->ioaddr, &adv);
/* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */
@@ -361,7 +362,8 @@ static int stmmac_ethtool_setsettings(struct net_device *dev,
struct phy_device *phy = priv->phydev;
int rc;
- if ((priv->pcs & STMMAC_PCS_RGMII) || (priv->pcs & STMMAC_PCS_SGMII)) {
+ if (priv->hw->pcs & STMMAC_PCS_RGMII ||
+ priv->hw->pcs & STMMAC_PCS_SGMII) {
u32 mask = ADVERTISED_Autoneg | ADVERTISED_Pause;
/* Only support ANE */
@@ -376,8 +378,11 @@ static int stmmac_ethtool_setsettings(struct net_device *dev,
ADVERTISED_10baseT_Full);
spin_lock(&priv->lock);
- if (priv->hw->mac->ctrl_ane)
- priv->hw->mac->ctrl_ane(priv->hw, 1);
+
+ if (priv->hw->mac->pcs_ctrl_ane)
+ priv->hw->mac->pcs_ctrl_ane(priv->ioaddr, 1,
+ priv->hw->ps, 0);
+
spin_unlock(&priv->lock);
return 0;
@@ -452,11 +457,22 @@ stmmac_get_pauseparam(struct net_device *netdev,
{
struct stmmac_priv *priv = netdev_priv(netdev);
- if (priv->pcs) /* FIXME */
- return;
-
pause->rx_pause = 0;
pause->tx_pause = 0;
+
+ if (priv->hw->pcs && priv->hw->mac->pcs_get_adv_lp) {
+ struct rgmii_adv adv_lp;
+
+ pause->autoneg = 1;
+ priv->hw->mac->pcs_get_adv_lp(priv->ioaddr, &adv_lp);
+ if (!adv_lp.pause)
+ return;
+ } else {
+ if (!(priv->phydev->supported & SUPPORTED_Pause) ||
+ !(priv->phydev->supported & SUPPORTED_Asym_Pause))
+ return;
+ }
+
pause->autoneg = priv->phydev->autoneg;
if (priv->flow_ctrl & FLOW_RX)
@@ -473,10 +489,19 @@ stmmac_set_pauseparam(struct net_device *netdev,
struct stmmac_priv *priv = netdev_priv(netdev);
struct phy_device *phy = priv->phydev;
int new_pause = FLOW_OFF;
- int ret = 0;
- if (priv->pcs) /* FIXME */
- return -EOPNOTSUPP;
+ if (priv->hw->pcs && priv->hw->mac->pcs_get_adv_lp) {
+ struct rgmii_adv adv_lp;
+
+ pause->autoneg = 1;
+ priv->hw->mac->pcs_get_adv_lp(priv->ioaddr, &adv_lp);
+ if (!adv_lp.pause)
+ return -EOPNOTSUPP;
+ } else {
+ if (!(phy->supported & SUPPORTED_Pause) ||
+ !(phy->supported & SUPPORTED_Asym_Pause))
+ return -EOPNOTSUPP;
+ }
if (pause->rx_pause)
new_pause |= FLOW_RX;
@@ -488,11 +513,12 @@ stmmac_set_pauseparam(struct net_device *netdev,
if (phy->autoneg) {
if (netif_running(netdev))
- ret = phy_start_aneg(phy);
- } else
- priv->hw->mac->flow_ctrl(priv->hw, phy->duplex,
- priv->flow_ctrl, priv->pause);
- return ret;
+ return phy_start_aneg(phy);
+ }
+
+ priv->hw->mac->flow_ctrl(priv->hw, phy->duplex, priv->flow_ctrl,
+ priv->pause);
+ return 0;
}
static void stmmac_get_ethtool_stats(struct net_device *dev,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index e4071265b..4c8c60af7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -285,8 +285,9 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
/* Using PCS we cannot dial with the phy registers at this stage
* so we do not support extra feature like EEE.
*/
- if ((priv->pcs == STMMAC_PCS_RGMII) || (priv->pcs == STMMAC_PCS_TBI) ||
- (priv->pcs == STMMAC_PCS_RTBI))
+ if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
+ (priv->hw->pcs == STMMAC_PCS_TBI) ||
+ (priv->hw->pcs == STMMAC_PCS_RTBI))
goto out;
/* MAC core supports the EEE feature. */
@@ -799,10 +800,10 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
(interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
(interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
pr_debug("STMMAC: PCS RGMII support enable\n");
- priv->pcs = STMMAC_PCS_RGMII;
+ priv->hw->pcs = STMMAC_PCS_RGMII;
} else if (interface == PHY_INTERFACE_MODE_SGMII) {
pr_debug("STMMAC: PCS SGMII support enable\n");
- priv->pcs = STMMAC_PCS_SGMII;
+ priv->hw->pcs = STMMAC_PCS_SGMII;
}
}
}
@@ -1665,6 +1666,19 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
if (priv->plat->bus_setup)
priv->plat->bus_setup(priv->ioaddr);
+ /* PS and related bits will be programmed according to the speed */
+ if (priv->hw->pcs) {
+ int speed = priv->plat->mac_port_sel_speed;
+
+ if ((speed == SPEED_10) || (speed == SPEED_100) ||
+ (speed == SPEED_1000)) {
+ priv->hw->ps = speed;
+ } else {
+ dev_warn(priv->device, "invalid port speed\n");
+ priv->hw->ps = 0;
+ }
+ }
+
/* Initialize the MAC Core */
priv->hw->mac->core_init(priv->hw, dev->mtu);
@@ -1714,8 +1728,8 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
}
- if (priv->pcs && priv->hw->mac->ctrl_ane)
- priv->hw->mac->ctrl_ane(priv->hw, 0);
+ if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
+ priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
/* set TX ring length */
if (priv->hw->dma->set_tx_ring_len)
@@ -1748,8 +1762,9 @@ static int stmmac_open(struct net_device *dev)
stmmac_check_ether_addr(priv);
- if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
- priv->pcs != STMMAC_PCS_RTBI) {
+ if (priv->hw->pcs != STMMAC_PCS_RGMII &&
+ priv->hw->pcs != STMMAC_PCS_TBI &&
+ priv->hw->pcs != STMMAC_PCS_RTBI) {
ret = stmmac_init_phy(dev);
if (ret) {
pr_err("%s: Cannot attach to PHY (error: %d)\n",
@@ -2809,6 +2824,14 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
priv->rx_tail_addr,
STMMAC_CHAN0);
}
+
+ /* PCS link status */
+ if (priv->hw->pcs) {
+ if (priv->xstats.pcs_link)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+ }
}
/* To handle DMA interrupts */
@@ -3130,6 +3153,7 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
*/
priv->plat->enh_desc = priv->dma_cap.enh_desc;
priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
+ priv->hw->pmt = priv->plat->pmt;
/* TXCOE doesn't work in thresh DMA mode */
if (priv->plat->force_thresh_dma_mode)
@@ -3325,8 +3349,9 @@ int stmmac_dvr_probe(struct device *device,
stmmac_check_pcs_mode(priv);
- if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
- priv->pcs != STMMAC_PCS_RTBI) {
+ if (priv->hw->pcs != STMMAC_PCS_RGMII &&
+ priv->hw->pcs != STMMAC_PCS_TBI &&
+ priv->hw->pcs != STMMAC_PCS_RTBI) {
/* MDIO bus Registration */
ret = stmmac_mdio_register(ndev);
if (ret < 0) {
@@ -3372,12 +3397,14 @@ int stmmac_dvr_remove(struct device *dev)
stmmac_set_mac(priv->ioaddr, false);
netif_carrier_off(ndev);
unregister_netdev(ndev);
+ of_node_put(priv->plat->phy_node);
if (priv->stmmac_rst)
reset_control_assert(priv->stmmac_rst);
clk_disable_unprepare(priv->pclk);
clk_disable_unprepare(priv->stmmac_clk);
- if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
- priv->pcs != STMMAC_PCS_RTBI)
+ if (priv->hw->pcs != STMMAC_PCS_RGMII &&
+ priv->hw->pcs != STMMAC_PCS_TBI &&
+ priv->hw->pcs != STMMAC_PCS_RTBI)
stmmac_mdio_unregister(ndev);
free_netdev(ndev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
new file mode 100644
index 000000000..eba41c24b
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
@@ -0,0 +1,159 @@
+/*
+ * stmmac_pcs.h: Physical Coding Sublayer Header File
+ *
+ * Copyright (C) 2016 STMicroelectronics (R&D) Limited
+ * Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __STMMAC_PCS_H__
+#define __STMMAC_PCS_H__
+
+#include <linux/slab.h>
+#include <linux/io.h>
+#include "common.h"
+
+/* PCS registers (AN/TBI/SGMII/RGMII) offsets */
+#define GMAC_AN_CTRL(x) (x) /* AN control */
+#define GMAC_AN_STATUS(x) (x + 0x4) /* AN status */
+#define GMAC_ANE_ADV(x) (x + 0x8) /* ANE Advertisement */
+#define GMAC_ANE_LPA(x) (x + 0xc) /* ANE link partener ability */
+#define GMAC_ANE_EXP(x) (x + 0x10) /* ANE expansion */
+#define GMAC_TBI(x) (x + 0x14) /* TBI extend status */
+
+/* AN Configuration defines */
+#define GMAC_AN_CTRL_RAN BIT(9) /* Restart Auto-Negotiation */
+#define GMAC_AN_CTRL_ANE BIT(12) /* Auto-Negotiation Enable */
+#define GMAC_AN_CTRL_ELE BIT(14) /* External Loopback Enable */
+#define GMAC_AN_CTRL_ECD BIT(16) /* Enable Comma Detect */
+#define GMAC_AN_CTRL_LR BIT(17) /* Lock to Reference */
+#define GMAC_AN_CTRL_SGMRAL BIT(18) /* SGMII RAL Control */
+
+/* AN Status defines */
+#define GMAC_AN_STATUS_LS BIT(2) /* Link Status 0:down 1:up */
+#define GMAC_AN_STATUS_ANA BIT(3) /* Auto-Negotiation Ability */
+#define GMAC_AN_STATUS_ANC BIT(5) /* Auto-Negotiation Complete */
+#define GMAC_AN_STATUS_ES BIT(8) /* Extended Status */
+
+/* ADV and LPA defines */
+#define GMAC_ANE_FD BIT(5)
+#define GMAC_ANE_HD BIT(6)
+#define GMAC_ANE_PSE GENMASK(8, 7)
+#define GMAC_ANE_PSE_SHIFT 7
+#define GMAC_ANE_RFE GENMASK(13, 12)
+#define GMAC_ANE_RFE_SHIFT 12
+#define GMAC_ANE_ACK BIT(14)
+
+/**
+ * dwmac_pcs_isr - TBI, RTBI, or SGMII PHY ISR
+ * @ioaddr: IO registers pointer
+ * @reg: Base address of the AN Control Register.
+ * @intr_status: GMAC core interrupt status
+ * @x: pointer to log these events as stats
+ * Description: it is the ISR for PCS events: Auto-Negotiation Completed and
+ * Link status.
+ */
+static inline void dwmac_pcs_isr(void __iomem *ioaddr, u32 reg,
+ unsigned int intr_status,
+ struct stmmac_extra_stats *x)
+{
+ u32 val = readl(ioaddr + GMAC_AN_STATUS(reg));
+
+ if (intr_status & PCS_ANE_IRQ) {
+ x->irq_pcs_ane_n++;
+ if (val & GMAC_AN_STATUS_ANC)
+ pr_info("stmmac_pcs: ANE process completed\n");
+ }
+
+ if (intr_status & PCS_LINK_IRQ) {
+ x->irq_pcs_link_n++;
+ if (val & GMAC_AN_STATUS_LS)
+ pr_info("stmmac_pcs: Link Up\n");
+ else
+ pr_info("stmmac_pcs: Link Down\n");
+ }
+}
+
+/**
+ * dwmac_rane - To restart ANE
+ * @ioaddr: IO registers pointer
+ * @reg: Base address of the AN Control Register.
+ * @restart: to restart ANE
+ * Description: this is to just restart the Auto-Negotiation.
+ */
+static inline void dwmac_rane(void __iomem *ioaddr, u32 reg, bool restart)
+{
+ u32 value = readl(ioaddr + GMAC_AN_CTRL(reg));
+
+ if (restart)
+ value |= GMAC_AN_CTRL_RAN;
+
+ writel(value, ioaddr + GMAC_AN_CTRL(reg));
+}
+
+/**
+ * dwmac_ctrl_ane - To program the AN Control Register.
+ * @ioaddr: IO registers pointer
+ * @reg: Base address of the AN Control Register.
+ * @ane: to enable the auto-negotiation
+ * @srgmi_ral: to manage MAC-2-MAC SGMII connections.
+ * @loopback: to cause the PHY to loopback tx data into rx path.
+ * Description: this is the main function to configure the AN control register
+ * and init the ANE, select loopback (usually for debugging purpose) and
+ * configure SGMII RAL.
+ */
+static inline void dwmac_ctrl_ane(void __iomem *ioaddr, u32 reg, bool ane,
+ bool srgmi_ral, bool loopback)
+{
+ u32 value = readl(ioaddr + GMAC_AN_CTRL(reg));
+
+ /* Enable and restart the Auto-Negotiation */
+ if (ane)
+ value |= GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_RAN;
+
+ /* In case of MAC-2-MAC connection, block is configured to operate
+ * according to MAC conf register.
+ */
+ if (srgmi_ral)
+ value |= GMAC_AN_CTRL_SGMRAL;
+
+ if (loopback)
+ value |= GMAC_AN_CTRL_ELE;
+
+ writel(value, ioaddr + GMAC_AN_CTRL(reg));
+}
+
+/**
+ * dwmac_get_adv_lp - Get ADV and LP cap
+ * @ioaddr: IO registers pointer
+ * @reg: Base address of the AN Control Register.
+ * @adv_lp: structure to store the adv,lp status
+ * Description: this is to expose the ANE advertisement and Link partner ability
+ * status to ethtool support.
+ */
+static inline void dwmac_get_adv_lp(void __iomem *ioaddr, u32 reg,
+ struct rgmii_adv *adv_lp)
+{
+ u32 value = readl(ioaddr + GMAC_ANE_ADV(reg));
+
+ if (value & GMAC_ANE_FD)
+ adv_lp->duplex = DUPLEX_FULL;
+ if (value & GMAC_ANE_HD)
+ adv_lp->duplex |= DUPLEX_HALF;
+
+ adv_lp->pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT;
+
+ value = readl(ioaddr + GMAC_ANE_LPA(reg));
+
+ if (value & GMAC_ANE_FD)
+ adv_lp->lp_duplex = DUPLEX_FULL;
+ if (value & GMAC_ANE_HD)
+ adv_lp->lp_duplex = DUPLEX_HALF;
+
+ adv_lp->lp_pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT;
+}
+#endif /* __STMMAC_PCS_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 409db913b..756bb548e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -113,8 +113,10 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
return NULL;
axi = kzalloc(sizeof(*axi), GFP_KERNEL);
- if (!axi)
+ if (!axi) {
+ of_node_put(np);
return ERR_PTR(-ENOMEM);
+ }
axi->axi_lpi_en = of_property_read_bool(np, "snps,lpi_en");
axi->axi_xit_frm = of_property_read_bool(np, "snps,xit_frm");
@@ -127,6 +129,7 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
of_property_read_u32(np, "snps,wr_osr_lmt", &axi->axi_wr_osr_lmt);
of_property_read_u32(np, "snps,rd_osr_lmt", &axi->axi_rd_osr_lmt);
of_property_read_u32_array(np, "snps,blen", axi->axi_blen, AXI_BLEN);
+ of_node_put(np);
return axi;
}
@@ -302,7 +305,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
GFP_KERNEL);
if (!dma_cfg) {
- of_node_put(np);
+ of_node_put(plat->phy_node);
return ERR_PTR(-ENOMEM);
}
plat->dma_cfg = dma_cfg;
@@ -319,6 +322,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set.");
}
+ of_property_read_u32(np, "snps,ps-speed", &plat->mac_port_sel_speed);
+
plat->axi = stmmac_axi_setup(pdev);
return plat;
@@ -411,7 +416,9 @@ static int stmmac_pltfr_suspend(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
ret = stmmac_suspend(dev);
- if (priv->plat->exit)
+ if (priv->plat->suspend)
+ priv->plat->suspend(pdev, priv->plat->bsp_priv);
+ else if (priv->plat->exit)
priv->plat->exit(pdev, priv->plat->bsp_priv);
return ret;
@@ -430,7 +437,9 @@ static int stmmac_pltfr_resume(struct device *dev)
struct stmmac_priv *priv = netdev_priv(ndev);
struct platform_device *pdev = to_platform_device(dev);
- if (priv->plat->init)
+ if (priv->plat->resume)
+ priv->plat->resume(pdev, priv->plat->bsp_priv);
+ else if (priv->plat->init)
priv->plat->init(pdev, priv->plat->bsp_priv);
return stmmac_resume(dev);
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
index 158213cd6..4490ebaed 100644
--- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c
+++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
@@ -46,7 +46,6 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/vmalloc.h>
-#include <linux/version.h>
#include <linux/device.h>
#include <linux/bitrev.h>
@@ -598,7 +597,6 @@ struct net_local {
struct work_struct txtimeout_reinit;
phy_interface_t phy_interface;
- struct phy_device *phy_dev;
struct mii_bus *mii_bus;
unsigned int link;
@@ -816,7 +814,7 @@ static int dwceqos_mdio_write(struct mii_bus *bus, int mii_id, int phyreg,
static int dwceqos_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
{
struct net_local *lp = netdev_priv(ndev);
- struct phy_device *phydev = lp->phy_dev;
+ struct phy_device *phydev = ndev->phydev;
if (!netif_running(ndev))
return -EINVAL;
@@ -850,6 +848,7 @@ static void dwceqos_link_down(struct net_local *lp)
static void dwceqos_link_up(struct net_local *lp)
{
+ struct net_device *ndev = lp->ndev;
u32 regval;
unsigned long flags;
@@ -860,7 +859,7 @@ static void dwceqos_link_up(struct net_local *lp)
dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
spin_unlock_irqrestore(&lp->hw_lock, flags);
- lp->eee_active = !phy_init_eee(lp->phy_dev, 0);
+ lp->eee_active = !phy_init_eee(ndev->phydev, 0);
/* Check for changed EEE capability */
if (!lp->eee_active && lp->eee_enabled) {
@@ -876,7 +875,8 @@ static void dwceqos_link_up(struct net_local *lp)
static void dwceqos_set_speed(struct net_local *lp)
{
- struct phy_device *phydev = lp->phy_dev;
+ struct net_device *ndev = lp->ndev;
+ struct phy_device *phydev = ndev->phydev;
u32 regval;
regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG);
@@ -903,7 +903,7 @@ static void dwceqos_set_speed(struct net_local *lp)
static void dwceqos_adjust_link(struct net_device *ndev)
{
struct net_local *lp = netdev_priv(ndev);
- struct phy_device *phydev = lp->phy_dev;
+ struct phy_device *phydev = ndev->phydev;
int status_change = 0;
if (lp->phy_defer)
@@ -987,7 +987,6 @@ static int dwceqos_mii_probe(struct net_device *ndev)
lp->link = 0;
lp->speed = 0;
lp->duplex = DUPLEX_UNKNOWN;
- lp->phy_dev = phydev;
return 0;
}
@@ -1247,7 +1246,7 @@ static int dwceqos_mii_init(struct net_local *lp)
lp->mii_bus->read = &dwceqos_mdio_read;
lp->mii_bus->write = &dwceqos_mdio_write;
lp->mii_bus->priv = lp;
- lp->mii_bus->parent = &lp->ndev->dev;
+ lp->mii_bus->parent = &lp->pdev->dev;
of_address_to_resource(lp->pdev->dev.of_node, 0, &res);
snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%.8llx",
@@ -1531,6 +1530,7 @@ static void dwceqos_configure_bus(struct net_local *lp)
static void dwceqos_init_hw(struct net_local *lp)
{
+ struct net_device *ndev = lp->ndev;
u32 regval;
u32 buswidth;
u32 dma_skip;
@@ -1622,13 +1622,7 @@ static void dwceqos_init_hw(struct net_local *lp)
DWCEQOS_MMC_CTRL_RSTONRD);
dwceqos_enable_mmc_interrupt(lp);
- /* Enable Interrupts */
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE,
- DWCEQOS_DMA_CH0_IE_NIE |
- DWCEQOS_DMA_CH0_IE_RIE | DWCEQOS_DMA_CH0_IE_TIE |
- DWCEQOS_DMA_CH0_IE_AIE |
- DWCEQOS_DMA_CH0_IE_FBEE);
-
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, 0);
dwceqos_write(lp, REG_DWCEQOS_MAC_IE, 0);
dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, DWCEQOS_MAC_CFG_IPC |
@@ -1645,10 +1639,10 @@ static void dwceqos_init_hw(struct net_local *lp)
regval | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE);
lp->phy_defer = false;
- mutex_lock(&lp->phy_dev->lock);
- phy_read_status(lp->phy_dev);
+ mutex_lock(&ndev->phydev->lock);
+ phy_read_status(ndev->phydev);
dwceqos_adjust_link(lp->ndev);
- mutex_unlock(&lp->phy_dev->lock);
+ mutex_unlock(&ndev->phydev->lock);
}
static void dwceqos_tx_reclaim(unsigned long data)
@@ -1898,13 +1892,22 @@ static int dwceqos_open(struct net_device *ndev)
* hence the unusual init order with phy_start first.
*/
lp->phy_defer = true;
- phy_start(lp->phy_dev);
+ phy_start(ndev->phydev);
dwceqos_init_hw(lp);
napi_enable(&lp->napi);
netif_start_queue(ndev);
tasklet_enable(&lp->tx_bdreclaim_tasklet);
+ /* Enable Interrupts -- do this only after we enable NAPI and the
+ * tasklet.
+ */
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE,
+ DWCEQOS_DMA_CH0_IE_NIE |
+ DWCEQOS_DMA_CH0_IE_RIE | DWCEQOS_DMA_CH0_IE_TIE |
+ DWCEQOS_DMA_CH0_IE_AIE |
+ DWCEQOS_DMA_CH0_IE_FBEE);
+
return 0;
}
@@ -1943,7 +1946,7 @@ static int dwceqos_stop(struct net_device *ndev)
dwceqos_drain_dma(lp);
dwceqos_reset_hw(lp);
- phy_stop(lp->phy_dev);
+ phy_stop(ndev->phydev);
dwceqos_descriptor_free(lp);
@@ -2523,30 +2526,6 @@ dwceqos_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *s)
return s;
}
-static int
-dwceqos_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
-{
- struct net_local *lp = netdev_priv(ndev);
- struct phy_device *phydev = lp->phy_dev;
-
- if (!phydev)
- return -ENODEV;
-
- return phy_ethtool_gset(phydev, ecmd);
-}
-
-static int
-dwceqos_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
-{
- struct net_local *lp = netdev_priv(ndev);
- struct phy_device *phydev = lp->phy_dev;
-
- if (!phydev)
- return -ENODEV;
-
- return phy_ethtool_sset(phydev, ecmd);
-}
-
static void
dwceqos_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *ed)
{
@@ -2574,17 +2553,17 @@ static int dwceqos_set_pauseparam(struct net_device *ndev,
lp->flowcontrol.autoneg = pp->autoneg;
if (pp->autoneg) {
- lp->phy_dev->advertising |= ADVERTISED_Pause;
- lp->phy_dev->advertising |= ADVERTISED_Asym_Pause;
+ ndev->phydev->advertising |= ADVERTISED_Pause;
+ ndev->phydev->advertising |= ADVERTISED_Asym_Pause;
} else {
- lp->phy_dev->advertising &= ~ADVERTISED_Pause;
- lp->phy_dev->advertising &= ~ADVERTISED_Asym_Pause;
+ ndev->phydev->advertising &= ~ADVERTISED_Pause;
+ ndev->phydev->advertising &= ~ADVERTISED_Asym_Pause;
lp->flowcontrol.rx = pp->rx_pause;
lp->flowcontrol.tx = pp->tx_pause;
}
if (netif_running(ndev))
- ret = phy_start_aneg(lp->phy_dev);
+ ret = phy_start_aneg(ndev->phydev);
return ret;
}
@@ -2705,7 +2684,7 @@ static int dwceqos_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
dwceqos_get_tx_lpi_state(regval));
}
- return phy_ethtool_get_eee(lp->phy_dev, edata);
+ return phy_ethtool_get_eee(ndev->phydev, edata);
}
static int dwceqos_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
@@ -2747,7 +2726,7 @@ static int dwceqos_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
spin_unlock_irqrestore(&lp->hw_lock, flags);
}
- return phy_ethtool_set_eee(lp->phy_dev, edata);
+ return phy_ethtool_set_eee(ndev->phydev, edata);
}
static u32 dwceqos_get_msglevel(struct net_device *ndev)
@@ -2765,8 +2744,6 @@ static void dwceqos_set_msglevel(struct net_device *ndev, u32 msglevel)
}
static struct ethtool_ops dwceqos_ethtool_ops = {
- .get_settings = dwceqos_get_settings,
- .set_settings = dwceqos_set_settings,
.get_drvinfo = dwceqos_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_pauseparam = dwceqos_get_pauseparam,
@@ -2780,6 +2757,8 @@ static struct ethtool_ops dwceqos_ethtool_ops = {
.set_eee = dwceqos_set_eee,
.get_msglevel = dwceqos_get_msglevel,
.set_msglevel = dwceqos_set_msglevel,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static struct net_device_ops netdev_ops = {
@@ -2874,25 +2853,17 @@ static int dwceqos_probe(struct platform_device *pdev)
ndev->features = ndev->hw_features;
- netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT);
-
- ret = register_netdev(ndev);
- if (ret) {
- dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
- goto err_out_clk_dis_aper;
- }
-
lp->phy_ref_clk = devm_clk_get(&pdev->dev, "phy_ref_clk");
if (IS_ERR(lp->phy_ref_clk)) {
dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
ret = PTR_ERR(lp->phy_ref_clk);
- goto err_out_unregister_netdev;
+ goto err_out_clk_dis_aper;
}
ret = clk_prepare_enable(lp->phy_ref_clk);
if (ret) {
dev_err(&pdev->dev, "Unable to enable device clock.\n");
- goto err_out_unregister_netdev;
+ goto err_out_clk_dis_aper;
}
lp->phy_node = of_parse_phandle(lp->pdev->dev.of_node,
@@ -2901,7 +2872,7 @@ static int dwceqos_probe(struct platform_device *pdev)
ret = of_phy_register_fixed_link(lp->pdev->dev.of_node);
if (ret < 0) {
dev_err(&pdev->dev, "invalid fixed-link");
- goto err_out_unregister_netdev;
+ goto err_out_clk_dis_phy;
}
lp->phy_node = of_node_get(lp->pdev->dev.of_node);
@@ -2910,7 +2881,7 @@ static int dwceqos_probe(struct platform_device *pdev)
ret = of_get_phy_mode(lp->pdev->dev.of_node);
if (ret < 0) {
dev_err(&lp->pdev->dev, "error in getting phy i/f\n");
- goto err_out_unregister_clk_notifier;
+ goto err_out_clk_dis_phy;
}
lp->phy_interface = ret;
@@ -2918,14 +2889,14 @@ static int dwceqos_probe(struct platform_device *pdev)
ret = dwceqos_mii_init(lp);
if (ret) {
dev_err(&lp->pdev->dev, "error in dwceqos_mii_init\n");
- goto err_out_unregister_clk_notifier;
+ goto err_out_clk_dis_phy;
}
ret = dwceqos_mii_probe(ndev);
if (ret != 0) {
netdev_err(ndev, "mii_probe fail.\n");
ret = -ENXIO;
- goto err_out_unregister_clk_notifier;
+ goto err_out_clk_dis_phy;
}
dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
@@ -2934,7 +2905,8 @@ static int dwceqos_probe(struct platform_device *pdev)
(unsigned long)ndev);
tasklet_disable(&lp->tx_bdreclaim_tasklet);
- lp->txtimeout_handler_wq = create_singlethread_workqueue(DRIVER_NAME);
+ lp->txtimeout_handler_wq = alloc_workqueue(DRIVER_NAME,
+ WQ_MEM_RECLAIM, 0);
INIT_WORK(&lp->txtimeout_reinit, dwceqos_reinit_for_txtimeout);
platform_set_drvdata(pdev, ndev);
@@ -2942,7 +2914,7 @@ static int dwceqos_probe(struct platform_device *pdev)
if (ret) {
dev_err(&lp->pdev->dev, "Unable to retrieve DT, error %d\n",
ret);
- goto err_out_unregister_clk_notifier;
+ goto err_out_clk_dis_phy;
}
dev_info(&lp->pdev->dev, "pdev->id %d, baseaddr 0x%08lx, irq %d\n",
pdev->id, ndev->base_addr, ndev->irq);
@@ -2952,18 +2924,24 @@ static int dwceqos_probe(struct platform_device *pdev)
if (ret) {
dev_err(&lp->pdev->dev, "Unable to request IRQ %d, error %d\n",
ndev->irq, ret);
- goto err_out_unregister_clk_notifier;
+ goto err_out_clk_dis_phy;
}
if (netif_msg_probe(lp))
netdev_dbg(ndev, "net_local@%p\n", lp);
+ netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT);
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
+ goto err_out_clk_dis_phy;
+ }
+
return 0;
-err_out_unregister_clk_notifier:
+err_out_clk_dis_phy:
clk_disable_unprepare(lp->phy_ref_clk);
-err_out_unregister_netdev:
- unregister_netdev(ndev);
err_out_clk_dis_aper:
clk_disable_unprepare(lp->apb_pclk);
err_out_free_netdev:
@@ -2981,8 +2959,8 @@ static int dwceqos_remove(struct platform_device *pdev)
if (ndev) {
lp = netdev_priv(ndev);
- if (lp->phy_dev)
- phy_disconnect(lp->phy_dev);
+ if (ndev->phydev)
+ phy_disconnect(ndev->phydev);
mdiobus_unregister(lp->mii_bus);
mdiobus_free(lp->mii_bus);
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index f7540dd6b..f5e931e50 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1987,7 +1987,7 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if ((readl(nic->regs + FPGA_VER) & 0xFFF) >= 378) {
err = pci_enable_msi(pdev);
if (err)
- pr_err("Can't eneble msi. error is %d\n", err);
+ pr_err("Can't enable msi. error is %d\n", err);
else
nic->irq_type = IRQ_MSI;
} else
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index e7f0b7d95..9904d740d 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -48,8 +48,7 @@ config TI_DAVINCI_CPDMA
will be called davinci_cpdma. This is recommended.
config TI_CPSW_PHY_SEL
- bool "TI CPSW Switch Phy sel Support"
- depends on TI_CPSW
+ bool
---help---
This driver supports configuring of the phy mode connected to
the CPSW.
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 7eef45e6d..d300d536d 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -205,7 +205,6 @@ struct cpmac_priv {
dma_addr_t dma_ring;
void __iomem *regs;
struct mii_bus *mii_bus;
- struct phy_device *phy;
char phy_name[MII_BUS_ID_SIZE + 3];
int oldlink, oldspeed, oldduplex;
u32 msg_enable;
@@ -830,37 +829,12 @@ static void cpmac_tx_timeout(struct net_device *dev)
static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- struct cpmac_priv *priv = netdev_priv(dev);
-
if (!(netif_running(dev)))
return -EINVAL;
- if (!priv->phy)
+ if (!dev->phydev)
return -EINVAL;
- return phy_mii_ioctl(priv->phy, ifr, cmd);
-}
-
-static int cpmac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct cpmac_priv *priv = netdev_priv(dev);
-
- if (priv->phy)
- return phy_ethtool_gset(priv->phy, cmd);
-
- return -EINVAL;
-}
-
-static int cpmac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct cpmac_priv *priv = netdev_priv(dev);
-
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- if (priv->phy)
- return phy_ethtool_sset(priv->phy, cmd);
-
- return -EINVAL;
+ return phy_mii_ioctl(dev->phydev, ifr, cmd);
}
static void cpmac_get_ringparam(struct net_device *dev,
@@ -900,12 +874,12 @@ static void cpmac_get_drvinfo(struct net_device *dev,
}
static const struct ethtool_ops cpmac_ethtool_ops = {
- .get_settings = cpmac_get_settings,
- .set_settings = cpmac_set_settings,
.get_drvinfo = cpmac_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = cpmac_get_ringparam,
.set_ringparam = cpmac_set_ringparam,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static void cpmac_adjust_link(struct net_device *dev)
@@ -914,16 +888,16 @@ static void cpmac_adjust_link(struct net_device *dev)
int new_state = 0;
spin_lock(&priv->lock);
- if (priv->phy->link) {
+ if (dev->phydev->link) {
netif_tx_start_all_queues(dev);
- if (priv->phy->duplex != priv->oldduplex) {
+ if (dev->phydev->duplex != priv->oldduplex) {
new_state = 1;
- priv->oldduplex = priv->phy->duplex;
+ priv->oldduplex = dev->phydev->duplex;
}
- if (priv->phy->speed != priv->oldspeed) {
+ if (dev->phydev->speed != priv->oldspeed) {
new_state = 1;
- priv->oldspeed = priv->phy->speed;
+ priv->oldspeed = dev->phydev->speed;
}
if (!priv->oldlink) {
@@ -938,7 +912,7 @@ static void cpmac_adjust_link(struct net_device *dev)
}
if (new_state && netif_msg_link(priv) && net_ratelimit())
- phy_print_status(priv->phy);
+ phy_print_status(dev->phydev);
spin_unlock(&priv->lock);
}
@@ -1016,8 +990,8 @@ static int cpmac_open(struct net_device *dev)
cpmac_hw_start(dev);
napi_enable(&priv->napi);
- priv->phy->state = PHY_CHANGELINK;
- phy_start(priv->phy);
+ dev->phydev->state = PHY_CHANGELINK;
+ phy_start(dev->phydev);
return 0;
@@ -1032,8 +1006,10 @@ fail_desc:
kfree_skb(priv->rx_head[i].skb);
}
}
+ dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) * size,
+ priv->desc_ring, priv->dma_ring);
+
fail_alloc:
- kfree(priv->desc_ring);
iounmap(priv->regs);
fail_remap:
@@ -1053,7 +1029,7 @@ static int cpmac_stop(struct net_device *dev)
cancel_work_sync(&priv->reset_work);
napi_disable(&priv->napi);
- phy_stop(priv->phy);
+ phy_stop(dev->phydev);
cpmac_hw_stop(dev);
@@ -1106,6 +1082,7 @@ static int cpmac_probe(struct platform_device *pdev)
struct cpmac_priv *priv;
struct net_device *dev;
struct plat_cpmac_data *pdata;
+ struct phy_device *phydev = NULL;
pdata = dev_get_platdata(&pdev->dev);
@@ -1142,7 +1119,7 @@ static int cpmac_probe(struct platform_device *pdev)
mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
if (!mem) {
rc = -ENODEV;
- goto out;
+ goto fail;
}
dev->irq = platform_get_irq_byname(pdev, "irq");
@@ -1162,15 +1139,15 @@ static int cpmac_probe(struct platform_device *pdev)
snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT,
mdio_bus_id, phy_id);
- priv->phy = phy_connect(dev, priv->phy_name, cpmac_adjust_link,
- PHY_INTERFACE_MODE_MII);
+ phydev = phy_connect(dev, priv->phy_name, cpmac_adjust_link,
+ PHY_INTERFACE_MODE_MII);
- if (IS_ERR(priv->phy)) {
+ if (IS_ERR(phydev)) {
if (netif_msg_drv(priv))
dev_err(&pdev->dev, "Could not attach to PHY\n");
- rc = PTR_ERR(priv->phy);
- goto out;
+ rc = PTR_ERR(phydev);
+ goto fail;
}
rc = register_netdev(dev);
@@ -1189,7 +1166,6 @@ static int cpmac_probe(struct platform_device *pdev)
fail:
free_netdev(dev);
-out:
return rc;
}
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 53190894f..f85d605e4 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -364,7 +364,6 @@ static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset)
}
struct cpsw_priv {
- spinlock_t lock;
struct platform_device *pdev;
struct net_device *ndev;
struct napi_struct napi_rx;
@@ -735,6 +734,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
netif_receive_skb(skb);
ndev->stats.rx_bytes += len;
ndev->stats.rx_packets++;
+ kmemleak_not_leak(new_skb);
} else {
ndev->stats.rx_dropped++;
new_skb = skb;
@@ -1244,6 +1244,7 @@ static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv)
slave->phy = NULL;
cpsw_ale_control_set(priv->ale, slave_port,
ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
+ soft_reset_slave(slave);
}
static int cpsw_ndo_open(struct net_device *ndev)
@@ -1252,7 +1253,11 @@ static int cpsw_ndo_open(struct net_device *ndev)
int i, ret;
u32 reg;
- pm_runtime_get_sync(&priv->pdev->dev);
+ ret = pm_runtime_get_sync(&priv->pdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&priv->pdev->dev);
+ return ret;
+ }
if (!cpsw_common_res_usage_state(priv))
cpsw_intr_disable(priv);
@@ -1278,6 +1283,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
if (!cpsw_common_res_usage_state(priv)) {
struct cpsw_priv *priv_sl0 = cpsw_get_slave_priv(priv, 0);
+ int buf_num;
/* setup tx dma to fixed prio and zero offset */
cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1);
@@ -1305,10 +1311,8 @@ static int cpsw_ndo_open(struct net_device *ndev)
enable_irq(priv->irqs_table[0]);
}
- if (WARN_ON(!priv->data.rx_descs))
- priv->data.rx_descs = 128;
-
- for (i = 0; i < priv->data.rx_descs; i++) {
+ buf_num = cpdma_chan_get_rx_buf_num(priv->dma);
+ for (i = 0; i < buf_num; i++) {
struct sk_buff *skb;
ret = -ENOMEM;
@@ -1322,6 +1326,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
kfree_skb(skb);
goto err_cleanup;
}
+ kmemleak_not_leak(skb);
}
/* continue even if we didn't manage to submit all
* receive descs
@@ -1611,10 +1616,17 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
struct sockaddr *addr = (struct sockaddr *)p;
int flags = 0;
u16 vid = 0;
+ int ret;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
+ ret = pm_runtime_get_sync(&priv->pdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&priv->pdev->dev);
+ return ret;
+ }
+
if (priv->data.dual_emac) {
vid = priv->slaves[priv->emac_port].port_vlan;
flags = ALE_VLAN;
@@ -1629,6 +1641,8 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
for_each_slave(priv, cpsw_set_slave_mac, priv);
+ pm_runtime_put(&priv->pdev->dev);
+
return 0;
}
@@ -1693,10 +1707,17 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
__be16 proto, u16 vid)
{
struct cpsw_priv *priv = netdev_priv(ndev);
+ int ret;
if (vid == priv->data.default_vlan)
return 0;
+ ret = pm_runtime_get_sync(&priv->pdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&priv->pdev->dev);
+ return ret;
+ }
+
if (priv->data.dual_emac) {
/* In dual EMAC, reserved VLAN id should not be used for
* creating VLAN interfaces as this can break the dual
@@ -1711,7 +1732,10 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
}
dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
- return cpsw_add_vlan_ale_entry(priv, vid);
+ ret = cpsw_add_vlan_ale_entry(priv, vid);
+
+ pm_runtime_put(&priv->pdev->dev);
+ return ret;
}
static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
@@ -1723,6 +1747,12 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
if (vid == priv->data.default_vlan)
return 0;
+ ret = pm_runtime_get_sync(&priv->pdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&priv->pdev->dev);
+ return ret;
+ }
+
if (priv->data.dual_emac) {
int i;
@@ -1742,8 +1772,10 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
if (ret != 0)
return ret;
- return cpsw_ale_del_mcast(priv->ale, priv->ndev->broadcast,
- 0, ALE_VLAN, vid);
+ ret = cpsw_ale_del_mcast(priv->ale, priv->ndev->broadcast,
+ 0, ALE_VLAN, vid);
+ pm_runtime_put(&priv->pdev->dev);
+ return ret;
}
static const struct net_device_ops cpsw_netdev_ops = {
@@ -1902,10 +1934,33 @@ static int cpsw_set_pauseparam(struct net_device *ndev,
priv->tx_pause = pause->tx_pause ? true : false;
for_each_slave(priv, _cpsw_adjust_link, priv, &link);
-
return 0;
}
+static int cpsw_ethtool_op_begin(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = pm_runtime_get_sync(&priv->pdev->dev);
+ if (ret < 0) {
+ cpsw_err(priv, drv, "ethtool begin failed %d\n", ret);
+ pm_runtime_put_noidle(&priv->pdev->dev);
+ }
+
+ return ret;
+}
+
+static void cpsw_ethtool_op_complete(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = pm_runtime_put(&priv->pdev->dev);
+ if (ret < 0)
+ cpsw_err(priv, drv, "ethtool complete failed %d\n", ret);
+}
+
static const struct ethtool_ops cpsw_ethtool_ops = {
.get_drvinfo = cpsw_get_drvinfo,
.get_msglevel = cpsw_get_msglevel,
@@ -1925,6 +1980,8 @@ static const struct ethtool_ops cpsw_ethtool_ops = {
.set_wol = cpsw_set_wol,
.get_regs_len = cpsw_get_regs_len,
.get_regs = cpsw_get_regs,
+ .begin = cpsw_ethtool_op_begin,
+ .complete = cpsw_ethtool_op_complete,
};
static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
@@ -1999,12 +2056,6 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
}
data->bd_ram_size = prop;
- if (of_property_read_u32(node, "rx_descs", &prop)) {
- dev_err(&pdev->dev, "Missing rx_descs property in the DT.\n");
- return -EINVAL;
- }
- data->rx_descs = prop;
-
if (of_property_read_u32(node, "mac_control", &prop)) {
dev_err(&pdev->dev, "Missing mac_control property in the DT.\n");
return -EINVAL;
@@ -2022,7 +2073,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
if (ret)
dev_warn(&pdev->dev, "Doesn't have any child node\n");
- for_each_child_of_node(node, slave_node) {
+ for_each_available_child_of_node(node, slave_node) {
struct cpsw_slave_data *slave_data = data->slave_data + i;
const void *mac_addr = NULL;
int lenp;
@@ -2124,7 +2175,6 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
}
priv_sl2 = netdev_priv(ndev);
- spin_lock_init(&priv_sl2->lock);
priv_sl2->data = *data;
priv_sl2->pdev = pdev;
priv_sl2->ndev = ndev;
@@ -2243,7 +2293,6 @@ static int cpsw_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ndev);
priv = netdev_priv(ndev);
- spin_lock_init(&priv->lock);
priv->pdev = pdev;
priv->ndev = ndev;
priv->dev = &ndev->dev;
@@ -2321,7 +2370,11 @@ static int cpsw_probe(struct platform_device *pdev)
/* Need to enable clocks with runtime PM api to access module
* registers
*/
- pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
+ goto clean_runtime_disable_ret;
+ }
priv->version = readl(&priv->regs->id_ver);
pm_runtime_put_sync(&pdev->dev);
@@ -2513,19 +2566,17 @@ clean_ndev_ret:
return ret;
}
-static int cpsw_remove_child_device(struct device *dev, void *c)
-{
- struct platform_device *pdev = to_platform_device(dev);
-
- of_device_unregister(pdev);
-
- return 0;
-}
-
static int cpsw_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct cpsw_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
+ return ret;
+ }
if (priv->data.dual_emac)
unregister_netdev(cpsw_get_slave_ndev(priv, 1));
@@ -2533,8 +2584,9 @@ static int cpsw_remove(struct platform_device *pdev)
cpsw_ale_destroy(priv->ale);
cpdma_ctlr_destroy(priv->dma);
+ of_platform_depopulate(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- device_for_each_child(&pdev->dev, NULL, cpsw_remove_child_device);
if (priv->data.dual_emac)
free_netdev(cpsw_get_slave_ndev(priv, 1));
free_netdev(ndev);
@@ -2554,16 +2606,12 @@ static int cpsw_suspend(struct device *dev)
for (i = 0; i < priv->data.slaves; i++) {
if (netif_running(priv->slaves[i].ndev))
cpsw_ndo_stop(priv->slaves[i].ndev);
- soft_reset_slave(priv->slaves + i);
}
} else {
if (netif_running(ndev))
cpsw_ndo_stop(ndev);
- for_each_slave(priv, soft_reset_slave);
}
- pm_runtime_put_sync(&pdev->dev);
-
/* Select sleep pin state */
pinctrl_pm_select_sleep_state(&pdev->dev);
@@ -2576,8 +2624,6 @@ static int cpsw_resume(struct device *dev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct cpsw_priv *priv = netdev_priv(ndev);
- pm_runtime_get_sync(&pdev->dev);
-
/* Select default pin state */
pinctrl_pm_select_default_state(&pdev->dev);
diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h
index e50afd1b2..16b54c6f3 100644
--- a/drivers/net/ethernet/ti/cpsw.h
+++ b/drivers/net/ethernet/ti/cpsw.h
@@ -35,7 +35,6 @@ struct cpsw_platform_data {
u32 cpts_clock_shift; /* convert input clock ticks to nanoseconds */
u32 ale_entries; /* ale table size */
u32 bd_ram_size; /*buffer descriptor ram size */
- u32 rx_descs; /* Number of Rx Descriptios */
u32 mac_control; /* Mac control register */
u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/
bool dual_emac; /* Enable Dual EMAC mode */
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 18bf3a8fd..19e5f32a8 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -21,7 +21,7 @@
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/delay.h>
-
+#include <linux/genalloc.h>
#include "davinci_cpdma.h"
/* DMA Registers */
@@ -87,9 +87,8 @@ struct cpdma_desc_pool {
void *cpumap; /* dma_alloc map */
int desc_size, mem_size;
int num_desc, used_desc;
- unsigned long *bitmap;
struct device *dev;
- spinlock_t lock;
+ struct gen_pool *gen_pool;
};
enum cpdma_state {
@@ -98,8 +97,6 @@ enum cpdma_state {
CPDMA_STATE_TEARDOWN,
};
-static const char *cpdma_state_str[] = { "idle", "active", "teardown" };
-
struct cpdma_ctlr {
enum cpdma_state state;
struct cpdma_params params;
@@ -117,6 +114,7 @@ struct cpdma_chan {
int chan_num;
spinlock_t lock;
int count;
+ u32 desc_num;
u32 mask;
cpdma_handler_fn handler;
enum dma_data_direction dir;
@@ -145,6 +143,19 @@ struct cpdma_chan {
(directed << CPDMA_TO_PORT_SHIFT)); \
} while (0)
+static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
+{
+ if (!pool)
+ return;
+
+ WARN_ON(pool->used_desc);
+ if (pool->cpumap)
+ dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
+ pool->phys);
+ else
+ iounmap(pool->iomap);
+}
+
/*
* Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
* emac) have dedicated on-chip memory for these descriptors. Some other
@@ -155,24 +166,25 @@ static struct cpdma_desc_pool *
cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr,
int size, int align)
{
- int bitmap_size;
struct cpdma_desc_pool *pool;
+ int ret;
pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL);
if (!pool)
- goto fail;
-
- spin_lock_init(&pool->lock);
+ goto gen_pool_create_fail;
pool->dev = dev;
pool->mem_size = size;
pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align);
pool->num_desc = size / pool->desc_size;
- bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long);
- pool->bitmap = devm_kzalloc(dev, bitmap_size, GFP_KERNEL);
- if (!pool->bitmap)
- goto fail;
+ pool->gen_pool = devm_gen_pool_create(dev, ilog2(pool->desc_size), -1,
+ "cpdma");
+ if (IS_ERR(pool->gen_pool)) {
+ dev_err(dev, "pool create failed %ld\n",
+ PTR_ERR(pool->gen_pool));
+ goto gen_pool_create_fail;
+ }
if (phys) {
pool->phys = phys;
@@ -185,24 +197,22 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr,
pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */
}
- if (pool->iomap)
- return pool;
-fail:
- return NULL;
-}
+ if (!pool->iomap)
+ goto gen_pool_create_fail;
-static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
-{
- if (!pool)
- return;
-
- WARN_ON(pool->used_desc);
- if (pool->cpumap) {
- dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
- pool->phys);
- } else {
- iounmap(pool->iomap);
+ ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->iomap,
+ pool->phys, pool->mem_size, -1);
+ if (ret < 0) {
+ dev_err(dev, "pool add failed %d\n", ret);
+ goto gen_pool_add_virt_fail;
}
+
+ return pool;
+
+gen_pool_add_virt_fail:
+ cpdma_desc_pool_destroy(pool);
+gen_pool_create_fail:
+ return NULL;
}
static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
@@ -220,47 +230,23 @@ desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
}
static struct cpdma_desc __iomem *
-cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc, bool is_rx)
+cpdma_desc_alloc(struct cpdma_desc_pool *pool)
{
- unsigned long flags;
- int index;
- int desc_start;
- int desc_end;
struct cpdma_desc __iomem *desc = NULL;
- spin_lock_irqsave(&pool->lock, flags);
-
- if (is_rx) {
- desc_start = 0;
- desc_end = pool->num_desc/2;
- } else {
- desc_start = pool->num_desc/2;
- desc_end = pool->num_desc;
- }
-
- index = bitmap_find_next_zero_area(pool->bitmap,
- desc_end, desc_start, num_desc, 0);
- if (index < desc_end) {
- bitmap_set(pool->bitmap, index, num_desc);
- desc = pool->iomap + pool->desc_size * index;
+ desc = (struct cpdma_desc __iomem *)gen_pool_alloc(pool->gen_pool,
+ pool->desc_size);
+ if (desc)
pool->used_desc++;
- }
- spin_unlock_irqrestore(&pool->lock, flags);
return desc;
}
static void cpdma_desc_free(struct cpdma_desc_pool *pool,
struct cpdma_desc __iomem *desc, int num_desc)
{
- unsigned long flags, index;
-
- index = ((unsigned long)desc - (unsigned long)pool->iomap) /
- pool->desc_size;
- spin_lock_irqsave(&pool->lock, flags);
- bitmap_clear(pool->bitmap, index, num_desc);
+ gen_pool_free(pool->gen_pool, (unsigned long)desc, pool->desc_size);
pool->used_desc--;
- spin_unlock_irqrestore(&pool->lock, flags);
}
struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
@@ -369,86 +355,13 @@ int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
}
EXPORT_SYMBOL_GPL(cpdma_ctlr_stop);
-int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr)
-{
- struct device *dev = ctlr->dev;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&ctlr->lock, flags);
-
- dev_info(dev, "CPDMA: state: %s", cpdma_state_str[ctlr->state]);
-
- dev_info(dev, "CPDMA: txidver: %x",
- dma_reg_read(ctlr, CPDMA_TXIDVER));
- dev_info(dev, "CPDMA: txcontrol: %x",
- dma_reg_read(ctlr, CPDMA_TXCONTROL));
- dev_info(dev, "CPDMA: txteardown: %x",
- dma_reg_read(ctlr, CPDMA_TXTEARDOWN));
- dev_info(dev, "CPDMA: rxidver: %x",
- dma_reg_read(ctlr, CPDMA_RXIDVER));
- dev_info(dev, "CPDMA: rxcontrol: %x",
- dma_reg_read(ctlr, CPDMA_RXCONTROL));
- dev_info(dev, "CPDMA: softreset: %x",
- dma_reg_read(ctlr, CPDMA_SOFTRESET));
- dev_info(dev, "CPDMA: rxteardown: %x",
- dma_reg_read(ctlr, CPDMA_RXTEARDOWN));
- dev_info(dev, "CPDMA: txintstatraw: %x",
- dma_reg_read(ctlr, CPDMA_TXINTSTATRAW));
- dev_info(dev, "CPDMA: txintstatmasked: %x",
- dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED));
- dev_info(dev, "CPDMA: txintmaskset: %x",
- dma_reg_read(ctlr, CPDMA_TXINTMASKSET));
- dev_info(dev, "CPDMA: txintmaskclear: %x",
- dma_reg_read(ctlr, CPDMA_TXINTMASKCLEAR));
- dev_info(dev, "CPDMA: macinvector: %x",
- dma_reg_read(ctlr, CPDMA_MACINVECTOR));
- dev_info(dev, "CPDMA: maceoivector: %x",
- dma_reg_read(ctlr, CPDMA_MACEOIVECTOR));
- dev_info(dev, "CPDMA: rxintstatraw: %x",
- dma_reg_read(ctlr, CPDMA_RXINTSTATRAW));
- dev_info(dev, "CPDMA: rxintstatmasked: %x",
- dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED));
- dev_info(dev, "CPDMA: rxintmaskset: %x",
- dma_reg_read(ctlr, CPDMA_RXINTMASKSET));
- dev_info(dev, "CPDMA: rxintmaskclear: %x",
- dma_reg_read(ctlr, CPDMA_RXINTMASKCLEAR));
- dev_info(dev, "CPDMA: dmaintstatraw: %x",
- dma_reg_read(ctlr, CPDMA_DMAINTSTATRAW));
- dev_info(dev, "CPDMA: dmaintstatmasked: %x",
- dma_reg_read(ctlr, CPDMA_DMAINTSTATMASKED));
- dev_info(dev, "CPDMA: dmaintmaskset: %x",
- dma_reg_read(ctlr, CPDMA_DMAINTMASKSET));
- dev_info(dev, "CPDMA: dmaintmaskclear: %x",
- dma_reg_read(ctlr, CPDMA_DMAINTMASKCLEAR));
-
- if (!ctlr->params.has_ext_regs) {
- dev_info(dev, "CPDMA: dmacontrol: %x",
- dma_reg_read(ctlr, CPDMA_DMACONTROL));
- dev_info(dev, "CPDMA: dmastatus: %x",
- dma_reg_read(ctlr, CPDMA_DMASTATUS));
- dev_info(dev, "CPDMA: rxbuffofs: %x",
- dma_reg_read(ctlr, CPDMA_RXBUFFOFS));
- }
-
- for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
- if (ctlr->channels[i])
- cpdma_chan_dump(ctlr->channels[i]);
-
- spin_unlock_irqrestore(&ctlr->lock, flags);
- return 0;
-}
-EXPORT_SYMBOL_GPL(cpdma_ctlr_dump);
-
int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
{
- unsigned long flags;
int ret = 0, i;
if (!ctlr)
return -EINVAL;
- spin_lock_irqsave(&ctlr->lock, flags);
if (ctlr->state != CPDMA_STATE_IDLE)
cpdma_ctlr_stop(ctlr);
@@ -456,7 +369,6 @@ int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
cpdma_chan_destroy(ctlr->channels[i]);
cpdma_desc_pool_destroy(ctlr->pool);
- spin_unlock_irqrestore(&ctlr->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy);
@@ -516,6 +428,7 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
chan->state = CPDMA_STATE_IDLE;
chan->chan_num = chan_num;
chan->handler = handler;
+ chan->desc_num = ctlr->pool->num_desc / 2;
if (is_rx_chan(chan)) {
chan->hdp = ctlr->params.rxhdp + offset;
@@ -543,6 +456,12 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
}
EXPORT_SYMBOL_GPL(cpdma_chan_create);
+int cpdma_chan_get_rx_buf_num(struct cpdma_ctlr *ctlr)
+{
+ return ctlr->pool->num_desc / 2;
+}
+EXPORT_SYMBOL_GPL(cpdma_chan_get_rx_buf_num);
+
int cpdma_chan_destroy(struct cpdma_chan *chan)
{
struct cpdma_ctlr *ctlr;
@@ -574,54 +493,6 @@ int cpdma_chan_get_stats(struct cpdma_chan *chan,
}
EXPORT_SYMBOL_GPL(cpdma_chan_get_stats);
-int cpdma_chan_dump(struct cpdma_chan *chan)
-{
- unsigned long flags;
- struct device *dev = chan->ctlr->dev;
-
- spin_lock_irqsave(&chan->lock, flags);
-
- dev_info(dev, "channel %d (%s %d) state %s",
- chan->chan_num, is_rx_chan(chan) ? "rx" : "tx",
- chan_linear(chan), cpdma_state_str[chan->state]);
- dev_info(dev, "\thdp: %x\n", chan_read(chan, hdp));
- dev_info(dev, "\tcp: %x\n", chan_read(chan, cp));
- if (chan->rxfree) {
- dev_info(dev, "\trxfree: %x\n",
- chan_read(chan, rxfree));
- }
-
- dev_info(dev, "\tstats head_enqueue: %d\n",
- chan->stats.head_enqueue);
- dev_info(dev, "\tstats tail_enqueue: %d\n",
- chan->stats.tail_enqueue);
- dev_info(dev, "\tstats pad_enqueue: %d\n",
- chan->stats.pad_enqueue);
- dev_info(dev, "\tstats misqueued: %d\n",
- chan->stats.misqueued);
- dev_info(dev, "\tstats desc_alloc_fail: %d\n",
- chan->stats.desc_alloc_fail);
- dev_info(dev, "\tstats pad_alloc_fail: %d\n",
- chan->stats.pad_alloc_fail);
- dev_info(dev, "\tstats runt_receive_buff: %d\n",
- chan->stats.runt_receive_buff);
- dev_info(dev, "\tstats runt_transmit_buff: %d\n",
- chan->stats.runt_transmit_buff);
- dev_info(dev, "\tstats empty_dequeue: %d\n",
- chan->stats.empty_dequeue);
- dev_info(dev, "\tstats busy_dequeue: %d\n",
- chan->stats.busy_dequeue);
- dev_info(dev, "\tstats good_dequeue: %d\n",
- chan->stats.good_dequeue);
- dev_info(dev, "\tstats requeue: %d\n",
- chan->stats.requeue);
- dev_info(dev, "\tstats teardown_dequeue: %d\n",
- chan->stats.teardown_dequeue);
-
- spin_unlock_irqrestore(&chan->lock, flags);
- return 0;
-}
-
static void __cpdma_chan_submit(struct cpdma_chan *chan,
struct cpdma_desc __iomem *desc)
{
@@ -675,7 +546,13 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
goto unlock_ret;
}
- desc = cpdma_desc_alloc(ctlr->pool, 1, is_rx_chan(chan));
+ if (chan->count >= chan->desc_num) {
+ chan->stats.desc_alloc_fail++;
+ ret = -ENOMEM;
+ goto unlock_ret;
+ }
+
+ desc = cpdma_desc_alloc(ctlr->pool);
if (!desc) {
chan->stats.desc_alloc_fail++;
ret = -ENOMEM;
@@ -721,24 +598,16 @@ EXPORT_SYMBOL_GPL(cpdma_chan_submit);
bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
{
- unsigned long flags;
- int index;
- bool ret;
struct cpdma_ctlr *ctlr = chan->ctlr;
struct cpdma_desc_pool *pool = ctlr->pool;
+ bool free_tx_desc;
+ unsigned long flags;
- spin_lock_irqsave(&pool->lock, flags);
-
- index = bitmap_find_next_zero_area(pool->bitmap,
- pool->num_desc, pool->num_desc/2, 1, 0);
-
- if (index < pool->num_desc)
- ret = true;
- else
- ret = false;
-
- spin_unlock_irqrestore(&pool->lock, flags);
- return ret;
+ spin_lock_irqsave(&chan->lock, flags);
+ free_tx_desc = (chan->count < chan->desc_num) &&
+ gen_pool_avail(pool->gen_pool);
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return free_tx_desc;
}
EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc);
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h
index 86dee487f..4b46cd6e9 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.h
+++ b/drivers/net/ethernet/ti/davinci_cpdma.h
@@ -77,14 +77,13 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params);
int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr);
int cpdma_ctlr_start(struct cpdma_ctlr *ctlr);
int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr);
-int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr);
struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
cpdma_handler_fn handler);
+int cpdma_chan_get_rx_buf_num(struct cpdma_ctlr *ctlr);
int cpdma_chan_destroy(struct cpdma_chan *chan);
int cpdma_chan_start(struct cpdma_chan *chan);
int cpdma_chan_stop(struct cpdma_chan *chan);
-int cpdma_chan_dump(struct cpdma_chan *chan);
int cpdma_chan_get_stats(struct cpdma_chan *chan,
struct cpdma_chan_stats *stats);
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index f56d66e6e..727a79f3c 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -348,7 +348,6 @@ struct emac_priv {
u32 rx_addr_type;
const char *phy_id;
struct device_node *phy_node;
- struct phy_device *phydev;
spinlock_t lock;
/*platform specific members*/
void (*int_enable) (void);
@@ -380,97 +379,6 @@ static char *emac_rxhost_errcodes[16] = {
#define emac_ctrl_write(reg, val) iowrite32(val, (priv->ctrl_base + (reg)))
/**
- * emac_dump_regs - Dump important EMAC registers to debug terminal
- * @priv: The DaVinci EMAC private adapter structure
- *
- * Executes ethtool set cmd & sets phy mode
- *
- */
-static void emac_dump_regs(struct emac_priv *priv)
-{
- struct device *emac_dev = &priv->ndev->dev;
-
- /* Print important registers in EMAC */
- dev_info(emac_dev, "EMAC Basic registers\n");
- if (priv->version == EMAC_VERSION_1) {
- dev_info(emac_dev, "EMAC: EWCTL: %08X, EWINTTCNT: %08X\n",
- emac_ctrl_read(EMAC_CTRL_EWCTL),
- emac_ctrl_read(EMAC_CTRL_EWINTTCNT));
- }
- dev_info(emac_dev, "EMAC: EmuControl:%08X, FifoControl: %08X\n",
- emac_read(EMAC_EMCONTROL), emac_read(EMAC_FIFOCONTROL));
- dev_info(emac_dev, "EMAC: MBPEnable:%08X, RXUnicastSet: %08X, "\
- "RXMaxLen=%08X\n", emac_read(EMAC_RXMBPENABLE),
- emac_read(EMAC_RXUNICASTSET), emac_read(EMAC_RXMAXLEN));
- dev_info(emac_dev, "EMAC: MacControl:%08X, MacStatus: %08X, "\
- "MacConfig=%08X\n", emac_read(EMAC_MACCONTROL),
- emac_read(EMAC_MACSTATUS), emac_read(EMAC_MACCONFIG));
- dev_info(emac_dev, "EMAC Statistics\n");
- dev_info(emac_dev, "EMAC: rx_good_frames:%d\n",
- emac_read(EMAC_RXGOODFRAMES));
- dev_info(emac_dev, "EMAC: rx_broadcast_frames:%d\n",
- emac_read(EMAC_RXBCASTFRAMES));
- dev_info(emac_dev, "EMAC: rx_multicast_frames:%d\n",
- emac_read(EMAC_RXMCASTFRAMES));
- dev_info(emac_dev, "EMAC: rx_pause_frames:%d\n",
- emac_read(EMAC_RXPAUSEFRAMES));
- dev_info(emac_dev, "EMAC: rx_crcerrors:%d\n",
- emac_read(EMAC_RXCRCERRORS));
- dev_info(emac_dev, "EMAC: rx_align_code_errors:%d\n",
- emac_read(EMAC_RXALIGNCODEERRORS));
- dev_info(emac_dev, "EMAC: rx_oversized_frames:%d\n",
- emac_read(EMAC_RXOVERSIZED));
- dev_info(emac_dev, "EMAC: rx_jabber_frames:%d\n",
- emac_read(EMAC_RXJABBER));
- dev_info(emac_dev, "EMAC: rx_undersized_frames:%d\n",
- emac_read(EMAC_RXUNDERSIZED));
- dev_info(emac_dev, "EMAC: rx_fragments:%d\n",
- emac_read(EMAC_RXFRAGMENTS));
- dev_info(emac_dev, "EMAC: rx_filtered_frames:%d\n",
- emac_read(EMAC_RXFILTERED));
- dev_info(emac_dev, "EMAC: rx_qos_filtered_frames:%d\n",
- emac_read(EMAC_RXQOSFILTERED));
- dev_info(emac_dev, "EMAC: rx_octets:%d\n",
- emac_read(EMAC_RXOCTETS));
- dev_info(emac_dev, "EMAC: tx_goodframes:%d\n",
- emac_read(EMAC_TXGOODFRAMES));
- dev_info(emac_dev, "EMAC: tx_bcastframes:%d\n",
- emac_read(EMAC_TXBCASTFRAMES));
- dev_info(emac_dev, "EMAC: tx_mcastframes:%d\n",
- emac_read(EMAC_TXMCASTFRAMES));
- dev_info(emac_dev, "EMAC: tx_pause_frames:%d\n",
- emac_read(EMAC_TXPAUSEFRAMES));
- dev_info(emac_dev, "EMAC: tx_deferred_frames:%d\n",
- emac_read(EMAC_TXDEFERRED));
- dev_info(emac_dev, "EMAC: tx_collision_frames:%d\n",
- emac_read(EMAC_TXCOLLISION));
- dev_info(emac_dev, "EMAC: tx_single_coll_frames:%d\n",
- emac_read(EMAC_TXSINGLECOLL));
- dev_info(emac_dev, "EMAC: tx_mult_coll_frames:%d\n",
- emac_read(EMAC_TXMULTICOLL));
- dev_info(emac_dev, "EMAC: tx_excessive_collisions:%d\n",
- emac_read(EMAC_TXEXCESSIVECOLL));
- dev_info(emac_dev, "EMAC: tx_late_collisions:%d\n",
- emac_read(EMAC_TXLATECOLL));
- dev_info(emac_dev, "EMAC: tx_underrun:%d\n",
- emac_read(EMAC_TXUNDERRUN));
- dev_info(emac_dev, "EMAC: tx_carrier_sense_errors:%d\n",
- emac_read(EMAC_TXCARRIERSENSE));
- dev_info(emac_dev, "EMAC: tx_octets:%d\n",
- emac_read(EMAC_TXOCTETS));
- dev_info(emac_dev, "EMAC: net_octets:%d\n",
- emac_read(EMAC_NETOCTETS));
- dev_info(emac_dev, "EMAC: rx_sof_overruns:%d\n",
- emac_read(EMAC_RXSOFOVERRUNS));
- dev_info(emac_dev, "EMAC: rx_mof_overruns:%d\n",
- emac_read(EMAC_RXMOFOVERRUNS));
- dev_info(emac_dev, "EMAC: rx_dma_overruns:%d\n",
- emac_read(EMAC_RXDMAOVERRUNS));
-
- cpdma_ctlr_dump(priv->dma);
-}
-
-/**
* emac_get_drvinfo - Get EMAC driver information
* @ndev: The DaVinci EMAC network adapter
* @info: ethtool info structure containing name and version
@@ -486,43 +394,6 @@ static void emac_get_drvinfo(struct net_device *ndev,
}
/**
- * emac_get_settings - Get EMAC settings
- * @ndev: The DaVinci EMAC network adapter
- * @ecmd: ethtool command
- *
- * Executes ethool get command
- *
- */
-static int emac_get_settings(struct net_device *ndev,
- struct ethtool_cmd *ecmd)
-{
- struct emac_priv *priv = netdev_priv(ndev);
- if (priv->phydev)
- return phy_ethtool_gset(priv->phydev, ecmd);
- else
- return -EOPNOTSUPP;
-
-}
-
-/**
- * emac_set_settings - Set EMAC settings
- * @ndev: The DaVinci EMAC network adapter
- * @ecmd: ethtool command
- *
- * Executes ethool set command
- *
- */
-static int emac_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
-{
- struct emac_priv *priv = netdev_priv(ndev);
- if (priv->phydev)
- return phy_ethtool_sset(priv->phydev, ecmd);
- else
- return -EOPNOTSUPP;
-
-}
-
-/**
* emac_get_coalesce - Get interrupt coalesce settings for this device
* @ndev : The DaVinci EMAC network adapter
* @coal : ethtool coalesce settings structure
@@ -625,12 +496,12 @@ static int emac_set_coalesce(struct net_device *ndev,
*/
static const struct ethtool_ops ethtool_ops = {
.get_drvinfo = emac_get_drvinfo,
- .get_settings = emac_get_settings,
- .set_settings = emac_set_settings,
.get_link = ethtool_op_get_link,
.get_coalesce = emac_get_coalesce,
.set_coalesce = emac_set_coalesce,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
/**
@@ -651,8 +522,8 @@ static void emac_update_phystatus(struct emac_priv *priv)
mac_control = emac_read(EMAC_MACCONTROL);
cur_duplex = (mac_control & EMAC_MACCONTROL_FULLDUPLEXEN) ?
DUPLEX_FULL : DUPLEX_HALF;
- if (priv->phydev)
- new_duplex = priv->phydev->duplex;
+ if (ndev->phydev)
+ new_duplex = ndev->phydev->duplex;
else
new_duplex = DUPLEX_FULL;
@@ -1134,8 +1005,6 @@ static void emac_dev_tx_timeout(struct net_device *ndev)
if (netif_msg_tx_err(priv))
dev_err(emac_dev, "DaVinci EMAC: xmit timeout, restarting TX");
- emac_dump_regs(priv);
-
ndev->stats.tx_errors++;
emac_int_disable(priv);
cpdma_chan_stop(priv->txchan);
@@ -1454,7 +1323,7 @@ static void emac_poll_controller(struct net_device *ndev)
static void emac_adjust_link(struct net_device *ndev)
{
struct emac_priv *priv = netdev_priv(ndev);
- struct phy_device *phydev = priv->phydev;
+ struct phy_device *phydev = ndev->phydev;
unsigned long flags;
int new_state = 0;
@@ -1483,7 +1352,7 @@ static void emac_adjust_link(struct net_device *ndev)
}
if (new_state) {
emac_update_phystatus(priv);
- phy_print_status(priv->phydev);
+ phy_print_status(ndev->phydev);
}
spin_unlock_irqrestore(&priv->lock, flags);
@@ -1505,15 +1374,13 @@ static void emac_adjust_link(struct net_device *ndev)
*/
static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
{
- struct emac_priv *priv = netdev_priv(ndev);
-
if (!(netif_running(ndev)))
return -EINVAL;
/* TODO: Add phy read and write and private statistics get feature */
- if (priv->phydev)
- return phy_mii_ioctl(priv->phydev, ifrq, cmd);
+ if (ndev->phydev)
+ return phy_mii_ioctl(ndev->phydev, ifrq, cmd);
else
return -EOPNOTSUPP;
}
@@ -1542,6 +1409,7 @@ static int emac_dev_open(struct net_device *ndev)
int res_num = 0, irq_num = 0;
int i = 0;
struct emac_priv *priv = netdev_priv(ndev);
+ struct phy_device *phydev = NULL;
ret = pm_runtime_get_sync(&priv->pdev->dev);
if (ret < 0) {
@@ -1607,12 +1475,10 @@ static int emac_dev_open(struct net_device *ndev)
cpdma_ctlr_start(priv->dma);
- priv->phydev = NULL;
-
if (priv->phy_node) {
- priv->phydev = of_phy_connect(ndev, priv->phy_node,
- &emac_adjust_link, 0, 0);
- if (!priv->phydev) {
+ phydev = of_phy_connect(ndev, priv->phy_node,
+ &emac_adjust_link, 0, 0);
+ if (!phydev) {
dev_err(emac_dev, "could not connect to phy %s\n",
priv->phy_node->full_name);
ret = -ENODEV;
@@ -1621,7 +1487,7 @@ static int emac_dev_open(struct net_device *ndev)
}
/* use the first phy on the bus if pdata did not give us a phy id */
- if (!priv->phydev && !priv->phy_id) {
+ if (!phydev && !priv->phy_id) {
struct device *phy;
phy = bus_find_device(&mdio_bus_type, NULL, NULL,
@@ -1630,16 +1496,15 @@ static int emac_dev_open(struct net_device *ndev)
priv->phy_id = dev_name(phy);
}
- if (!priv->phydev && priv->phy_id && *priv->phy_id) {
- priv->phydev = phy_connect(ndev, priv->phy_id,
- &emac_adjust_link,
- PHY_INTERFACE_MODE_MII);
+ if (!phydev && priv->phy_id && *priv->phy_id) {
+ phydev = phy_connect(ndev, priv->phy_id,
+ &emac_adjust_link,
+ PHY_INTERFACE_MODE_MII);
- if (IS_ERR(priv->phydev)) {
+ if (IS_ERR(phydev)) {
dev_err(emac_dev, "could not connect to phy %s\n",
priv->phy_id);
- ret = PTR_ERR(priv->phydev);
- priv->phydev = NULL;
+ ret = PTR_ERR(phydev);
goto err;
}
@@ -1647,10 +1512,10 @@ static int emac_dev_open(struct net_device *ndev)
priv->speed = 0;
priv->duplex = ~0;
- phy_attached_info(priv->phydev);
+ phy_attached_info(phydev);
}
- if (!priv->phydev) {
+ if (!phydev) {
/* No PHY , fix the link, speed and duplex settings */
dev_notice(emac_dev, "no phy, defaulting to 100/full\n");
priv->link = 1;
@@ -1659,14 +1524,11 @@ static int emac_dev_open(struct net_device *ndev)
emac_update_phystatus(priv);
}
- if (!netif_running(ndev)) /* debug only - to avoid compiler warning */
- emac_dump_regs(priv);
-
if (netif_msg_drv(priv))
dev_notice(emac_dev, "DaVinci EMAC: Opened %s\n", ndev->name);
- if (priv->phydev)
- phy_start(priv->phydev);
+ if (phydev)
+ phy_start(phydev);
return 0;
@@ -1717,8 +1579,8 @@ static int emac_dev_stop(struct net_device *ndev)
cpdma_ctlr_stop(priv->dma);
emac_write(EMAC_SOFTRESET, 1);
- if (priv->phydev)
- phy_disconnect(priv->phydev);
+ if (ndev->phydev)
+ phy_disconnect(ndev->phydev);
/* Free IRQ */
while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) {
@@ -2102,6 +1964,7 @@ static int davinci_emac_remove(struct platform_device *pdev)
cpdma_ctlr_destroy(priv->dma);
unregister_netdev(ndev);
+ of_node_put(priv->phy_node);
pm_runtime_disable(&pdev->dev);
free_netdev(ndev);
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 4e7c9b9b0..33df340db 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -53,6 +53,10 @@
#define DEF_OUT_FREQ 2200000 /* 2.2 MHz */
+struct davinci_mdio_of_param {
+ int autosuspend_delay_ms;
+};
+
struct davinci_mdio_regs {
u32 version;
u32 control;
@@ -90,19 +94,19 @@ static const struct mdio_platform_data default_pdata = {
struct davinci_mdio_data {
struct mdio_platform_data pdata;
struct davinci_mdio_regs __iomem *regs;
- spinlock_t lock;
struct clk *clk;
struct device *dev;
struct mii_bus *bus;
- bool suspended;
+ bool active_in_suspend;
unsigned long access_time; /* jiffies */
/* Indicates that driver shouldn't modify phy_mask in case
* if MDIO bus is registered from DT.
*/
bool skip_scan;
+ u32 clk_div;
};
-static void __davinci_mdio_reset(struct davinci_mdio_data *data)
+static void davinci_mdio_init_clk(struct davinci_mdio_data *data)
{
u32 mdio_in, div, mdio_out_khz, access_time;
@@ -111,9 +115,7 @@ static void __davinci_mdio_reset(struct davinci_mdio_data *data)
if (div > CONTROL_MAX_DIV)
div = CONTROL_MAX_DIV;
- /* set enable and clock divider */
- __raw_writel(div | CONTROL_ENABLE, &data->regs->control);
-
+ data->clk_div = div;
/*
* One mdio transaction consists of:
* 32 bits of preamble
@@ -134,12 +136,23 @@ static void __davinci_mdio_reset(struct davinci_mdio_data *data)
data->access_time = 1;
}
+static void davinci_mdio_enable(struct davinci_mdio_data *data)
+{
+ /* set enable and clock divider */
+ __raw_writel(data->clk_div | CONTROL_ENABLE, &data->regs->control);
+}
+
static int davinci_mdio_reset(struct mii_bus *bus)
{
struct davinci_mdio_data *data = bus->priv;
u32 phy_mask, ver;
+ int ret;
- __davinci_mdio_reset(data);
+ ret = pm_runtime_get_sync(data->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(data->dev);
+ return ret;
+ }
/* wait for scan logic to settle */
msleep(PHY_MAX_ADDR * data->access_time);
@@ -150,7 +163,7 @@ static int davinci_mdio_reset(struct mii_bus *bus)
(ver >> 8) & 0xff, ver & 0xff);
if (data->skip_scan)
- return 0;
+ goto done;
/* get phy mask from the alive register */
phy_mask = __raw_readl(&data->regs->alive);
@@ -165,6 +178,10 @@ static int davinci_mdio_reset(struct mii_bus *bus)
}
data->bus->phy_mask = phy_mask;
+done:
+ pm_runtime_mark_last_busy(data->dev);
+ pm_runtime_put_autosuspend(data->dev);
+
return 0;
}
@@ -190,7 +207,7 @@ static inline int wait_for_user_access(struct davinci_mdio_data *data)
* operation
*/
dev_warn(data->dev, "resetting idled controller\n");
- __davinci_mdio_reset(data);
+ davinci_mdio_enable(data);
return -EAGAIN;
}
@@ -225,11 +242,10 @@ static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
return -EINVAL;
- spin_lock(&data->lock);
-
- if (data->suspended) {
- spin_unlock(&data->lock);
- return -ENODEV;
+ ret = pm_runtime_get_sync(data->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(data->dev);
+ return ret;
}
reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
@@ -255,8 +271,8 @@ static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
break;
}
- spin_unlock(&data->lock);
-
+ pm_runtime_mark_last_busy(data->dev);
+ pm_runtime_put_autosuspend(data->dev);
return ret;
}
@@ -270,11 +286,10 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
return -EINVAL;
- spin_lock(&data->lock);
-
- if (data->suspended) {
- spin_unlock(&data->lock);
- return -ENODEV;
+ ret = pm_runtime_get_sync(data->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(data->dev);
+ return ret;
}
reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
@@ -295,9 +310,10 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
break;
}
- spin_unlock(&data->lock);
+ pm_runtime_mark_last_busy(data->dev);
+ pm_runtime_put_autosuspend(data->dev);
- return 0;
+ return ret;
}
#if IS_ENABLED(CONFIG_OF)
@@ -320,6 +336,19 @@ static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
}
#endif
+#if IS_ENABLED(CONFIG_OF)
+static const struct davinci_mdio_of_param of_cpsw_mdio_data = {
+ .autosuspend_delay_ms = 100,
+};
+
+static const struct of_device_id davinci_mdio_of_mtable[] = {
+ { .compatible = "ti,davinci_mdio", },
+ { .compatible = "ti,cpsw-mdio", .data = &of_cpsw_mdio_data},
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable);
+#endif
+
static int davinci_mdio_probe(struct platform_device *pdev)
{
struct mdio_platform_data *pdata = dev_get_platdata(&pdev->dev);
@@ -328,6 +357,7 @@ static int davinci_mdio_probe(struct platform_device *pdev)
struct resource *res;
struct phy_device *phy;
int ret, addr;
+ int autosuspend_delay_ms = -1;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
@@ -340,9 +370,22 @@ static int davinci_mdio_probe(struct platform_device *pdev)
}
if (dev->of_node) {
- if (davinci_mdio_probe_dt(&data->pdata, pdev))
- data->pdata = default_pdata;
+ const struct of_device_id *of_id;
+
+ ret = davinci_mdio_probe_dt(&data->pdata, pdev);
+ if (ret)
+ return ret;
snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s", pdev->name);
+
+ of_id = of_match_device(davinci_mdio_of_mtable, &pdev->dev);
+ if (of_id) {
+ const struct davinci_mdio_of_param *of_mdio_data;
+
+ of_mdio_data = of_id->data;
+ if (of_mdio_data)
+ autosuspend_delay_ms =
+ of_mdio_data->autosuspend_delay_ms;
+ }
} else {
data->pdata = pdata ? (*pdata) : default_pdata;
snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x",
@@ -356,26 +399,25 @@ static int davinci_mdio_probe(struct platform_device *pdev)
data->bus->parent = dev;
data->bus->priv = data;
- pm_runtime_enable(&pdev->dev);
- pm_runtime_get_sync(&pdev->dev);
data->clk = devm_clk_get(dev, "fck");
if (IS_ERR(data->clk)) {
dev_err(dev, "failed to get device clock\n");
- ret = PTR_ERR(data->clk);
- data->clk = NULL;
- goto bail_out;
+ return PTR_ERR(data->clk);
}
dev_set_drvdata(dev, data);
data->dev = dev;
- spin_lock_init(&data->lock);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
data->regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(data->regs)) {
- ret = PTR_ERR(data->regs);
- goto bail_out;
- }
+ if (IS_ERR(data->regs))
+ return PTR_ERR(data->regs);
+
+ davinci_mdio_init_clk(data);
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, autosuspend_delay_ms);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
/* register the mii bus
* Create PHYs from DT only in case if PHY child nodes are explicitly
@@ -404,9 +446,8 @@ static int davinci_mdio_probe(struct platform_device *pdev)
return 0;
bail_out:
- pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
-
return ret;
}
@@ -417,29 +458,47 @@ static int davinci_mdio_remove(struct platform_device *pdev)
if (data->bus)
mdiobus_unregister(data->bus);
- pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int davinci_mdio_suspend(struct device *dev)
+#ifdef CONFIG_PM
+static int davinci_mdio_runtime_suspend(struct device *dev)
{
struct davinci_mdio_data *data = dev_get_drvdata(dev);
u32 ctrl;
- spin_lock(&data->lock);
-
/* shutdown the scan state machine */
ctrl = __raw_readl(&data->regs->control);
ctrl &= ~CONTROL_ENABLE;
__raw_writel(ctrl, &data->regs->control);
wait_for_idle(data);
- data->suspended = true;
- spin_unlock(&data->lock);
- pm_runtime_put_sync(data->dev);
+ return 0;
+}
+
+static int davinci_mdio_runtime_resume(struct device *dev)
+{
+ struct davinci_mdio_data *data = dev_get_drvdata(dev);
+
+ davinci_mdio_enable(data);
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int davinci_mdio_suspend(struct device *dev)
+{
+ struct davinci_mdio_data *data = dev_get_drvdata(dev);
+ int ret = 0;
+
+ data->active_in_suspend = !pm_runtime_status_suspended(dev);
+ if (data->active_in_suspend)
+ ret = pm_runtime_force_suspend(dev);
+ if (ret < 0)
+ return ret;
/* Select sleep pin state */
pinctrl_pm_select_sleep_state(dev);
@@ -454,31 +513,19 @@ static int davinci_mdio_resume(struct device *dev)
/* Select default pin state */
pinctrl_pm_select_default_state(dev);
- pm_runtime_get_sync(data->dev);
-
- spin_lock(&data->lock);
- /* restart the scan state machine */
- __davinci_mdio_reset(data);
-
- data->suspended = false;
- spin_unlock(&data->lock);
+ if (data->active_in_suspend)
+ pm_runtime_force_resume(dev);
return 0;
}
#endif
static const struct dev_pm_ops davinci_mdio_pm_ops = {
+ SET_RUNTIME_PM_OPS(davinci_mdio_runtime_suspend,
+ davinci_mdio_runtime_resume, NULL)
SET_LATE_SYSTEM_SLEEP_PM_OPS(davinci_mdio_suspend, davinci_mdio_resume)
};
-#if IS_ENABLED(CONFIG_OF)
-static const struct of_device_id davinci_mdio_of_mtable[] = {
- { .compatible = "ti,davinci_mdio", },
- { /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable);
-#endif
-
static struct platform_driver davinci_mdio_driver = {
.driver = {
.name = "davinci_mdio",
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 561703317..ece0ea0f6 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -1651,7 +1651,6 @@ static u32 tlan_handle_tx_eoc(struct net_device *dev, u16 host_int)
dma_addr_t head_list_phys;
u32 ack = 1;
- host_int = 0;
if (priv->tlan_rev < 0x30) {
TLAN_DBG(TLAN_DEBUG_TX,
"TRANSMIT: handling TX EOC (Head=%d Tail=%d) -- IRQ\n",
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 922a443e3..4ef605a90 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -588,7 +588,7 @@ static bool tile_net_lepp_free_comps(struct net_device *dev, bool all)
static void tile_net_schedule_egress_timer(struct tile_net_cpu *info)
{
if (!info->egress_timer_scheduled) {
- mod_timer_pinned(&info->egress_timer, jiffies + 1);
+ mod_timer(&info->egress_timer, jiffies + 1);
info->egress_timer_scheduled = true;
}
}
@@ -1004,7 +1004,7 @@ static void tile_net_register(void *dev_ptr)
BUG();
/* Initialize the egress timer. */
- init_timer(&info->egress_timer);
+ init_timer_pinned(&info->egress_timer);
info->egress_timer.data = (long)info;
info->egress_timer.function = tile_net_handle_egress_timer;
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 548747834..5b01b3fa9 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -280,7 +280,7 @@ struct tc35815_regs {
* Descriptors
*/
-/* Frame descripter */
+/* Frame descriptor */
struct FDesc {
volatile __u32 FDNext;
volatile __u32 FDSystem;
@@ -288,7 +288,7 @@ struct FDesc {
volatile __u32 FDCtl;
};
-/* Buffer descripter */
+/* Buffer descriptor */
struct BDesc {
volatile __u32 BuffData;
volatile __u32 BDCtl;
@@ -296,7 +296,7 @@ struct BDesc {
#define FD_ALIGN 16
-/* Frame Descripter bit assign ---------------------------------------------- */
+/* Frame Descriptor bit assign ---------------------------------------------- */
#define FD_FDLength_MASK 0x0000FFFF /* Length MASK */
#define FD_BDCnt_MASK 0x001F0000 /* BD count MASK in FD */
#define FD_FrmOpt_MASK 0x7C000000 /* Frame option MASK */
@@ -309,7 +309,7 @@ struct BDesc {
#define FD_Next_EOL 0x00000001 /* FD EOL indicator */
#define FD_BDCnt_SHIFT 16
-/* Buffer Descripter bit assign --------------------------------------------- */
+/* Buffer Descriptor bit assign --------------------------------------------- */
#define BD_BuffLength_MASK 0x0000FFFF /* Receive Data Size */
#define BD_RxBDID_MASK 0x00FF0000 /* BD ID Number MASK */
#define BD_RxBDSeqN_MASK 0x7F000000 /* Rx BD Sequence Number */
@@ -405,7 +405,6 @@ struct tc35815_local {
spinlock_t rx_lock;
struct mii_bus *mii_bus;
- struct phy_device *phy_dev;
int duplex;
int speed;
int link;
@@ -539,7 +538,7 @@ static int tc_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 val)
static void tc_handle_link_change(struct net_device *dev)
{
struct tc35815_local *lp = netdev_priv(dev);
- struct phy_device *phydev = lp->phy_dev;
+ struct phy_device *phydev = dev->phydev;
unsigned long flags;
int status_change = 0;
@@ -645,7 +644,6 @@ static int tc_mii_probe(struct net_device *dev)
lp->link = 0;
lp->speed = 0;
lp->duplex = -1;
- lp->phy_dev = phydev;
return 0;
}
@@ -853,7 +851,7 @@ static void tc35815_remove_one(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct tc35815_local *lp = netdev_priv(dev);
- phy_disconnect(lp->phy_dev);
+ phy_disconnect(dev->phydev);
mdiobus_unregister(lp->mii_bus);
mdiobus_free(lp->mii_bus);
unregister_netdev(dev);
@@ -1143,8 +1141,8 @@ static void tc35815_restart(struct net_device *dev)
struct tc35815_local *lp = netdev_priv(dev);
int ret;
- if (lp->phy_dev) {
- ret = phy_init_hw(lp->phy_dev);
+ if (dev->phydev) {
+ ret = phy_init_hw(dev->phydev);
if (ret)
printk(KERN_ERR "%s: PHY init failed.\n", dev->name);
}
@@ -1236,7 +1234,7 @@ tc35815_open(struct net_device *dev)
netif_carrier_off(dev);
/* schedule a link state check */
- phy_start(lp->phy_dev);
+ phy_start(dev->phydev);
/* We are now ready to accept transmit requeusts from
* the queueing layer of the networking.
@@ -1387,7 +1385,7 @@ static int tc35815_do_interrupt(struct net_device *dev, u32 status, int limit)
if (status & Int_IntExBD) {
if (netif_msg_rx_err(lp))
dev_warn(&dev->dev,
- "Excessive Buffer Descriptiors (%#x).\n",
+ "Excessive Buffer Descriptors (%#x).\n",
status);
dev->stats.rx_length_errors++;
ret = 0;
@@ -1819,8 +1817,8 @@ tc35815_close(struct net_device *dev)
netif_stop_queue(dev);
napi_disable(&lp->napi);
- if (lp->phy_dev)
- phy_stop(lp->phy_dev);
+ if (dev->phydev)
+ phy_stop(dev->phydev);
cancel_work_sync(&lp->restart_work);
/* Flush the Tx and disable Rx here. */
@@ -1946,24 +1944,6 @@ static void tc35815_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *
strlcpy(info->bus_info, pci_name(lp->pci_dev), sizeof(info->bus_info));
}
-static int tc35815_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct tc35815_local *lp = netdev_priv(dev);
-
- if (!lp->phy_dev)
- return -ENODEV;
- return phy_ethtool_gset(lp->phy_dev, cmd);
-}
-
-static int tc35815_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct tc35815_local *lp = netdev_priv(dev);
-
- if (!lp->phy_dev)
- return -ENODEV;
- return phy_ethtool_sset(lp->phy_dev, cmd);
-}
-
static u32 tc35815_get_msglevel(struct net_device *dev)
{
struct tc35815_local *lp = netdev_priv(dev);
@@ -2013,25 +1993,23 @@ static void tc35815_get_strings(struct net_device *dev, u32 stringset, u8 *data)
static const struct ethtool_ops tc35815_ethtool_ops = {
.get_drvinfo = tc35815_get_drvinfo,
- .get_settings = tc35815_get_settings,
- .set_settings = tc35815_set_settings,
.get_link = ethtool_op_get_link,
.get_msglevel = tc35815_get_msglevel,
.set_msglevel = tc35815_set_msglevel,
.get_strings = tc35815_get_strings,
.get_sset_count = tc35815_get_sset_count,
.get_ethtool_stats = tc35815_get_ethtool_stats,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static int tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct tc35815_local *lp = netdev_priv(dev);
-
if (!netif_running(dev))
return -EINVAL;
- if (!lp->phy_dev)
+ if (!dev->phydev)
return -ENODEV;
- return phy_mii_ioctl(lp->phy_dev, rq, cmd);
+ return phy_mii_ioctl(dev->phydev, rq, cmd);
}
static void tc35815_chip_reset(struct net_device *dev)
@@ -2116,7 +2094,7 @@ static void tc35815_chip_init(struct net_device *dev)
if (lp->chiptype == TC35815_TX4939)
txctl &= ~Tx_EnLCarr;
/* WORKAROUND: ignore LostCrS in full duplex operation */
- if (!lp->phy_dev || !lp->link || lp->duplex == DUPLEX_FULL)
+ if (!dev->phydev || !lp->link || lp->duplex == DUPLEX_FULL)
txctl &= ~Tx_EnLCarr;
tc_writel(txctl, &tr->Tx_Ctl);
}
@@ -2132,8 +2110,8 @@ static int tc35815_suspend(struct pci_dev *pdev, pm_message_t state)
if (!netif_running(dev))
return 0;
netif_device_detach(dev);
- if (lp->phy_dev)
- phy_stop(lp->phy_dev);
+ if (dev->phydev)
+ phy_stop(dev->phydev);
spin_lock_irqsave(&lp->lock, flags);
tc35815_chip_reset(dev);
spin_unlock_irqrestore(&lp->lock, flags);
@@ -2144,7 +2122,6 @@ static int tc35815_suspend(struct pci_dev *pdev, pm_message_t state)
static int tc35815_resume(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
- struct tc35815_local *lp = netdev_priv(dev);
pci_restore_state(pdev);
if (!netif_running(dev))
@@ -2152,8 +2129,8 @@ static int tc35815_resume(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D0);
tc35815_restart(dev);
netif_carrier_off(dev);
- if (lp->phy_dev)
- phy_start(lp->phy_dev);
+ if (dev->phydev)
+ phy_start(dev->phydev);
netif_device_attach(dev);
return 0;
}
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index 01a77145a..8fd131207 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -166,6 +166,7 @@ static struct platform_driver tsi_eth_driver = {
static void tsi108_timed_checker(unsigned long dev_ptr);
+#ifdef DEBUG
static void dump_eth_one(struct net_device *dev)
{
struct tsi108_prv_data *data = netdev_priv(dev);
@@ -190,6 +191,7 @@ static void dump_eth_one(struct net_device *dev)
TSI_READ(TSI108_EC_RXESTAT),
TSI_READ(TSI108_EC_RXERR), data->rxpending);
}
+#endif
/* Synchronization is needed between the thread and up/down events.
* Note that the PHY is accessed through the same registers for both
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index 4f6255cf6..37ab46cdb 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -1154,7 +1154,7 @@ int w5100_probe(struct device *dev, const struct w5100_ops *ops,
if (err < 0)
goto err_register;
- priv->xfer_wq = create_workqueue(netdev_name(ndev));
+ priv->xfer_wq = alloc_workqueue(netdev_name(ndev), WQ_MEM_RECLAIM, 0);
if (!priv->xfer_wq) {
err = -ENOMEM;
goto err_wq;
@@ -1233,7 +1233,6 @@ int w5100_remove(struct device *dev)
flush_work(&priv->setrx_work);
flush_work(&priv->restart_work);
- flush_workqueue(priv->xfer_wq);
destroy_workqueue(priv->xfer_wq);
unregister_netdev(ndev);
diff --git a/drivers/net/ethernet/xilinx/ll_temac.h b/drivers/net/ethernet/xilinx/ll_temac.h
index 902457e43..7d06e3e1a 100644
--- a/drivers/net/ethernet/xilinx/ll_temac.h
+++ b/drivers/net/ethernet/xilinx/ll_temac.h
@@ -332,7 +332,6 @@ struct temac_local {
struct device *dev;
/* Connection to PHY device */
- struct phy_device *phy_dev; /* Pointer to PHY device */
struct device_node *phy_node;
/* MDIO bus data */
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 739708712..a9bd665fd 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -590,7 +590,7 @@ static void temac_device_reset(struct net_device *ndev)
static void temac_adjust_link(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
- struct phy_device *phy = lp->phy_dev;
+ struct phy_device *phy = ndev->phydev;
u32 mii_speed;
int link_state;
@@ -843,19 +843,20 @@ static irqreturn_t ll_temac_rx_irq(int irq, void *_ndev)
static int temac_open(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
+ struct phy_device *phydev = NULL;
int rc;
dev_dbg(&ndev->dev, "temac_open()\n");
if (lp->phy_node) {
- lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
- temac_adjust_link, 0, 0);
- if (!lp->phy_dev) {
+ phydev = of_phy_connect(lp->ndev, lp->phy_node,
+ temac_adjust_link, 0, 0);
+ if (!phydev) {
dev_err(lp->dev, "of_phy_connect() failed\n");
return -ENODEV;
}
- phy_start(lp->phy_dev);
+ phy_start(phydev);
}
temac_device_reset(ndev);
@@ -872,9 +873,8 @@ static int temac_open(struct net_device *ndev)
err_rx_irq:
free_irq(lp->tx_irq, ndev);
err_tx_irq:
- if (lp->phy_dev)
- phy_disconnect(lp->phy_dev);
- lp->phy_dev = NULL;
+ if (phydev)
+ phy_disconnect(phydev);
dev_err(lp->dev, "request_irq() failed\n");
return rc;
}
@@ -882,15 +882,15 @@ static int temac_open(struct net_device *ndev)
static int temac_stop(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
+ struct phy_device *phydev = ndev->phydev;
dev_dbg(&ndev->dev, "temac_close()\n");
free_irq(lp->tx_irq, ndev);
free_irq(lp->rx_irq, ndev);
- if (lp->phy_dev)
- phy_disconnect(lp->phy_dev);
- lp->phy_dev = NULL;
+ if (phydev)
+ phy_disconnect(phydev);
temac_dma_bd_release(ndev);
@@ -916,15 +916,13 @@ temac_poll_controller(struct net_device *ndev)
static int temac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
{
- struct temac_local *lp = netdev_priv(ndev);
-
if (!netif_running(ndev))
return -EINVAL;
- if (!lp->phy_dev)
+ if (!ndev->phydev)
return -EINVAL;
- return phy_mii_ioctl(lp->phy_dev, rq, cmd);
+ return phy_mii_ioctl(ndev->phydev, rq, cmd);
}
static const struct net_device_ops temac_netdev_ops = {
@@ -969,30 +967,17 @@ static const struct attribute_group temac_attr_group = {
};
/* ethtool support */
-static int temac_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
-{
- struct temac_local *lp = netdev_priv(ndev);
- return phy_ethtool_gset(lp->phy_dev, cmd);
-}
-
-static int temac_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
-{
- struct temac_local *lp = netdev_priv(ndev);
- return phy_ethtool_sset(lp->phy_dev, cmd);
-}
-
static int temac_nway_reset(struct net_device *ndev)
{
- struct temac_local *lp = netdev_priv(ndev);
- return phy_start_aneg(lp->phy_dev);
+ return phy_start_aneg(ndev->phydev);
}
static const struct ethtool_ops temac_ethtool_ops = {
- .get_settings = temac_get_settings,
- .set_settings = temac_set_settings,
.nway_reset = temac_nway_reset,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static int temac_of_probe(struct platform_device *op)
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index 9ead4e269..af27f7d1c 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -382,7 +382,6 @@ struct axidma_bd {
* struct axienet_local - axienet private per device data
* @ndev: Pointer for net_device to which it will be attached.
* @dev: Pointer to device structure
- * @phy_dev: Pointer to PHY device structure attached to the axienet_local
* @phy_node: Pointer to device node structure
* @mii_bus: Pointer to MII bus structure
* @regs: Base address for the axienet_local device address space
@@ -420,7 +419,6 @@ struct axienet_local {
struct device *dev;
/* Connection to PHY device */
- struct phy_device *phy_dev; /* Pointer to PHY device */
struct device_node *phy_node;
/* MDIO bus data */
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 8c7f5be51..36ee7ab30 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -525,7 +525,7 @@ static void axienet_adjust_link(struct net_device *ndev)
u32 link_state;
u32 setspeed = 1;
struct axienet_local *lp = netdev_priv(ndev);
- struct phy_device *phy = lp->phy_dev;
+ struct phy_device *phy = ndev->phydev;
link_state = phy->speed | (phy->duplex << 1) | phy->link;
if (lp->last_link != link_state) {
@@ -911,6 +911,7 @@ static int axienet_open(struct net_device *ndev)
{
int ret, mdio_mcreg;
struct axienet_local *lp = netdev_priv(ndev);
+ struct phy_device *phydev = NULL;
dev_dbg(&ndev->dev, "axienet_open()\n");
@@ -934,19 +935,19 @@ static int axienet_open(struct net_device *ndev)
if (lp->phy_node) {
if (lp->phy_type == XAE_PHY_TYPE_GMII) {
- lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
- axienet_adjust_link, 0,
- PHY_INTERFACE_MODE_GMII);
+ phydev = of_phy_connect(lp->ndev, lp->phy_node,
+ axienet_adjust_link, 0,
+ PHY_INTERFACE_MODE_GMII);
} else if (lp->phy_type == XAE_PHY_TYPE_RGMII_2_0) {
- lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
- axienet_adjust_link, 0,
- PHY_INTERFACE_MODE_RGMII_ID);
+ phydev = of_phy_connect(lp->ndev, lp->phy_node,
+ axienet_adjust_link, 0,
+ PHY_INTERFACE_MODE_RGMII_ID);
}
- if (!lp->phy_dev)
+ if (!phydev)
dev_err(lp->dev, "of_phy_connect() failed\n");
else
- phy_start(lp->phy_dev);
+ phy_start(phydev);
}
/* Enable tasklets for Axi DMA error handling */
@@ -967,9 +968,8 @@ static int axienet_open(struct net_device *ndev)
err_rx_irq:
free_irq(lp->tx_irq, ndev);
err_tx_irq:
- if (lp->phy_dev)
- phy_disconnect(lp->phy_dev);
- lp->phy_dev = NULL;
+ if (phydev)
+ phy_disconnect(phydev);
tasklet_kill(&lp->dma_err_tasklet);
dev_err(lp->dev, "request_irq() failed\n");
return ret;
@@ -1006,9 +1006,8 @@ static int axienet_stop(struct net_device *ndev)
free_irq(lp->tx_irq, ndev);
free_irq(lp->rx_irq, ndev);
- if (lp->phy_dev)
- phy_disconnect(lp->phy_dev);
- lp->phy_dev = NULL;
+ if (ndev->phydev)
+ phy_disconnect(ndev->phydev);
axienet_dma_bd_release(ndev);
return 0;
@@ -1078,51 +1077,6 @@ static const struct net_device_ops axienet_netdev_ops = {
};
/**
- * axienet_ethtools_get_settings - Get Axi Ethernet settings related to PHY.
- * @ndev: Pointer to net_device structure
- * @ecmd: Pointer to ethtool_cmd structure
- *
- * This implements ethtool command for getting PHY settings. If PHY could
- * not be found, the function returns -ENODEV. This function calls the
- * relevant PHY ethtool API to get the PHY settings.
- * Issue "ethtool ethX" under linux prompt to execute this function.
- *
- * Return: 0 on success, -ENODEV if PHY doesn't exist
- */
-static int axienet_ethtools_get_settings(struct net_device *ndev,
- struct ethtool_cmd *ecmd)
-{
- struct axienet_local *lp = netdev_priv(ndev);
- struct phy_device *phydev = lp->phy_dev;
- if (!phydev)
- return -ENODEV;
- return phy_ethtool_gset(phydev, ecmd);
-}
-
-/**
- * axienet_ethtools_set_settings - Set PHY settings as passed in the argument.
- * @ndev: Pointer to net_device structure
- * @ecmd: Pointer to ethtool_cmd structure
- *
- * This implements ethtool command for setting various PHY settings. If PHY
- * could not be found, the function returns -ENODEV. This function calls the
- * relevant PHY ethtool API to set the PHY.
- * Issue e.g. "ethtool -s ethX speed 1000" under linux prompt to execute this
- * function.
- *
- * Return: 0 on success, -ENODEV if PHY doesn't exist
- */
-static int axienet_ethtools_set_settings(struct net_device *ndev,
- struct ethtool_cmd *ecmd)
-{
- struct axienet_local *lp = netdev_priv(ndev);
- struct phy_device *phydev = lp->phy_dev;
- if (!phydev)
- return -ENODEV;
- return phy_ethtool_sset(phydev, ecmd);
-}
-
-/**
* axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
* @ndev: Pointer to net_device structure
* @ed: Pointer to ethtool_drvinfo structure
@@ -1344,8 +1298,6 @@ static int axienet_ethtools_set_coalesce(struct net_device *ndev,
}
static struct ethtool_ops axienet_ethtool_ops = {
- .get_settings = axienet_ethtools_get_settings,
- .set_settings = axienet_ethtools_set_settings,
.get_drvinfo = axienet_ethtools_get_drvinfo,
.get_regs_len = axienet_ethtools_get_regs_len,
.get_regs = axienet_ethtools_get_regs,
@@ -1354,6 +1306,8 @@ static struct ethtool_ops axienet_ethtool_ops = {
.set_pauseparam = axienet_ethtools_set_pauseparam,
.get_coalesce = axienet_ethtools_get_coalesce,
.set_coalesce = axienet_ethtools_set_coalesce,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
/**
@@ -1587,9 +1541,9 @@ static int axienet_probe(struct platform_device *pdev)
/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
- if (IS_ERR(np)) {
+ if (!np) {
dev_err(&pdev->dev, "could not find DMA node\n");
- ret = PTR_ERR(np);
+ ret = -ENODEV;
goto free_netdev;
}
ret = of_address_to_resource(np, 0, &dmares);
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 3cee84a24..93dc10b10 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1131,11 +1131,13 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong");
mac_address = of_get_mac_address(ofdev->dev.of_node);
- if (mac_address)
+ if (mac_address) {
/* Set the MAC address. */
memcpy(ndev->dev_addr, mac_address, ETH_ALEN);
- else
- dev_warn(dev, "No MAC address found\n");
+ } else {
+ dev_warn(dev, "No MAC address found, using random\n");
+ eth_hw_addr_random(ndev);
+ }
/* Clear the Tx CSR's in case this is a restart */
__raw_writel(0, lp->base_addr + XEL_TSR_OFFSET);
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index 7b44968e0..ddced28e8 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -1144,8 +1144,8 @@ xirc2ps_interrupt(int irq, void *dev_id)
dev->stats.tx_packets += lp->last_ptr_value - n;
netif_wake_queue(dev);
}
- if (tx_status & 0x0002) { /* Execessive collissions */
- pr_debug("tx restarted due to execssive collissions\n");
+ if (tx_status & 0x0002) { /* Excessive collisions */
+ pr_debug("tx restarted due to excessive collisions\n");
PutByte(XIRCREG_CR, RestartTx); /* restart transmitter process */
}
if (tx_status & 0x0040)
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 513840794..7f127dc1b 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -171,7 +171,6 @@ struct port {
struct npe *npe;
struct net_device *netdev;
struct napi_struct napi;
- struct phy_device *phydev;
struct eth_plat_info *plat;
buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
struct desc *desc_tab; /* coherent */
@@ -562,7 +561,7 @@ static void ixp4xx_mdio_remove(void)
static void ixp4xx_adjust_link(struct net_device *dev)
{
struct port *port = netdev_priv(dev);
- struct phy_device *phydev = port->phydev;
+ struct phy_device *phydev = dev->phydev;
if (!phydev->link) {
if (port->speed) {
@@ -976,8 +975,6 @@ static void eth_set_mcast_list(struct net_device *dev)
static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
{
- struct port *port = netdev_priv(dev);
-
if (!netif_running(dev))
return -EINVAL;
@@ -988,7 +985,7 @@ static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
return hwtstamp_get(dev, req);
}
- return phy_mii_ioctl(port->phydev, req, cmd);
+ return phy_mii_ioctl(dev->phydev, req, cmd);
}
/* ethtool support */
@@ -1005,22 +1002,9 @@ static void ixp4xx_get_drvinfo(struct net_device *dev,
strlcpy(info->bus_info, "internal", sizeof(info->bus_info));
}
-static int ixp4xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct port *port = netdev_priv(dev);
- return phy_ethtool_gset(port->phydev, cmd);
-}
-
-static int ixp4xx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct port *port = netdev_priv(dev);
- return phy_ethtool_sset(port->phydev, cmd);
-}
-
static int ixp4xx_nway_reset(struct net_device *dev)
{
- struct port *port = netdev_priv(dev);
- return phy_start_aneg(port->phydev);
+ return phy_start_aneg(dev->phydev);
}
int ixp46x_phc_index = -1;
@@ -1054,11 +1038,11 @@ static int ixp4xx_get_ts_info(struct net_device *dev,
static const struct ethtool_ops ixp4xx_ethtool_ops = {
.get_drvinfo = ixp4xx_get_drvinfo,
- .get_settings = ixp4xx_get_settings,
- .set_settings = ixp4xx_set_settings,
.nway_reset = ixp4xx_nway_reset,
.get_link = ethtool_op_get_link,
.get_ts_info = ixp4xx_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
@@ -1259,7 +1243,7 @@ static int eth_open(struct net_device *dev)
}
port->speed = 0; /* force "link up" message */
- phy_start(port->phydev);
+ phy_start(dev->phydev);
for (i = 0; i < ETH_ALEN; i++)
__raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]);
@@ -1380,7 +1364,7 @@ static int eth_close(struct net_device *dev)
printk(KERN_CRIT "%s: unable to disable loopback\n",
dev->name);
- phy_stop(port->phydev);
+ phy_stop(dev->phydev);
if (!ports_open)
qmgr_disable_irq(TXDONE_QUEUE);
@@ -1405,6 +1389,7 @@ static int eth_init_one(struct platform_device *pdev)
struct port *port;
struct net_device *dev;
struct eth_plat_info *plat = dev_get_platdata(&pdev->dev);
+ struct phy_device *phydev = NULL;
u32 regs_phys;
char phy_id[MII_BUS_ID_SIZE + 3];
int err;
@@ -1466,14 +1451,14 @@ static int eth_init_one(struct platform_device *pdev)
snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
mdio_bus->id, plat->phy);
- port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link,
- PHY_INTERFACE_MODE_MII);
- if (IS_ERR(port->phydev)) {
- err = PTR_ERR(port->phydev);
+ phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link,
+ PHY_INTERFACE_MODE_MII);
+ if (IS_ERR(phydev)) {
+ err = PTR_ERR(phydev);
goto err_free_mem;
}
- port->phydev->irq = PHY_POLL;
+ phydev->irq = PHY_POLL;
if ((err = register_netdev(dev)))
goto err_phy_dis;
@@ -1484,7 +1469,7 @@ static int eth_init_one(struct platform_device *pdev)
return 0;
err_phy_dis:
- phy_disconnect(port->phydev);
+ phy_disconnect(phydev);
err_free_mem:
npe_port_tab[NPE_ID(port->id)] = NULL;
release_resource(port->mem_res);
@@ -1498,10 +1483,11 @@ err_free:
static int eth_remove_one(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
+ struct phy_device *phydev = dev->phydev;
struct port *port = netdev_priv(dev);
unregister_netdev(dev);
- phy_disconnect(port->phydev);
+ phy_disconnect(phydev);
npe_port_tab[NPE_ID(port->id)] = NULL;
npe_release(port->npe);
release_resource(port->mem_res);
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index 86c331bb5..9006877c5 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -1187,8 +1187,9 @@ static int fjes_probe(struct platform_device *plat_dev)
adapter->force_reset = false;
adapter->open_guard = false;
- adapter->txrx_wq = create_workqueue(DRV_NAME "/txrx");
- adapter->control_wq = create_workqueue(DRV_NAME "/control");
+ adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx", WQ_MEM_RECLAIM, 0);
+ adapter->control_wq = alloc_workqueue(DRV_NAME "/control",
+ WQ_MEM_RECLAIM, 0);
INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task);
INIT_WORK(&adapter->raise_intr_rxdata_task,
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 9b3dc3c61..3c20e87bb 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -12,7 +12,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/hash.h>
#include <net/dst_metadata.h>
@@ -397,23 +396,6 @@ static struct socket *geneve_create_sock(struct net *net, bool ipv6,
return sock;
}
-static void geneve_notify_add_rx_port(struct geneve_sock *gs)
-{
- struct net_device *dev;
- struct sock *sk = gs->sock->sk;
- struct net *net = sock_net(sk);
- sa_family_t sa_family = geneve_get_sk_family(gs);
- __be16 port = inet_sk(sk)->inet_sport;
-
- rcu_read_lock();
- for_each_netdev_rcu(net, dev) {
- if (dev->netdev_ops->ndo_add_geneve_port)
- dev->netdev_ops->ndo_add_geneve_port(dev, sa_family,
- port);
- }
- rcu_read_unlock();
-}
-
static int geneve_hlen(struct genevehdr *gh)
{
return sizeof(*gh) + gh->opt_len * 4;
@@ -533,7 +515,7 @@ static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
INIT_HLIST_HEAD(&gs->vni_list[h]);
/* Initialize the geneve udp offloads structure */
- geneve_notify_add_rx_port(gs);
+ udp_tunnel_notify_add_rx_port(gs->sock, UDP_TUNNEL_TYPE_GENEVE);
/* Mark socket as an encapsulation socket */
memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
@@ -548,31 +530,13 @@ static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
return gs;
}
-static void geneve_notify_del_rx_port(struct geneve_sock *gs)
-{
- struct net_device *dev;
- struct sock *sk = gs->sock->sk;
- struct net *net = sock_net(sk);
- sa_family_t sa_family = geneve_get_sk_family(gs);
- __be16 port = inet_sk(sk)->inet_sport;
-
- rcu_read_lock();
- for_each_netdev_rcu(net, dev) {
- if (dev->netdev_ops->ndo_del_geneve_port)
- dev->netdev_ops->ndo_del_geneve_port(dev, sa_family,
- port);
- }
-
- rcu_read_unlock();
-}
-
static void __geneve_sock_release(struct geneve_sock *gs)
{
if (!gs || --gs->refcnt)
return;
list_del(&gs->list);
- geneve_notify_del_rx_port(gs);
+ udp_tunnel_notify_del_rx_port(gs->sock, UDP_TUNNEL_TYPE_GENEVE);
udp_tunnel_sock_release(gs->sock);
kfree_rcu(gs, rcu);
}
@@ -1170,29 +1134,20 @@ static struct device_type geneve_type = {
.name = "geneve",
};
-/* Calls the ndo_add_geneve_port of the caller in order to
+/* Calls the ndo_udp_tunnel_add of the caller in order to
* supply the listening GENEVE udp ports. Callers are expected
- * to implement the ndo_add_geneve_port.
+ * to implement the ndo_udp_tunnel_add.
*/
static void geneve_push_rx_ports(struct net_device *dev)
{
struct net *net = dev_net(dev);
struct geneve_net *gn = net_generic(net, geneve_net_id);
struct geneve_sock *gs;
- sa_family_t sa_family;
- struct sock *sk;
- __be16 port;
-
- if (!dev->netdev_ops->ndo_add_geneve_port)
- return;
rcu_read_lock();
- list_for_each_entry_rcu(gs, &gn->sock_list, list) {
- sk = gs->sock->sk;
- sa_family = sk->sk_family;
- port = inet_sk(sk)->inet_sport;
- dev->netdev_ops->ndo_add_geneve_port(dev, sa_family, port);
- }
+ list_for_each_entry_rcu(gs, &gn->sock_list, list)
+ udp_tunnel_push_rx_port(dev, gs->sock,
+ UDP_TUNNEL_TYPE_GENEVE);
rcu_read_unlock();
}
@@ -1555,7 +1510,7 @@ static int geneve_netdevice_event(struct notifier_block *unused,
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- if (event == NETDEV_OFFLOAD_PUSH_GENEVE)
+ if (event == NETDEV_UDP_TUNNEL_PUSH_INFO)
geneve_push_rx_ports(dev);
return NOTIFY_DONE;
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 4e976a0d5..97e0cbca0 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -16,7 +16,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/skbuff.h>
#include <linux/udp.h>
#include <linux/rculist.h>
diff --git a/drivers/net/hamradio/baycom_par.c b/drivers/net/hamradio/baycom_par.c
index acb636963..072cddce9 100644
--- a/drivers/net/hamradio/baycom_par.c
+++ b/drivers/net/hamradio/baycom_par.c
@@ -156,7 +156,7 @@ struct baycom_state {
/* --------------------------------------------------------------------- */
-static void __inline__ baycom_int_freq(struct baycom_state *bc)
+static inline void baycom_int_freq(struct baycom_state *bc)
{
#ifdef BAYCOM_DEBUG
unsigned long cur_jiffies = jiffies;
@@ -192,7 +192,7 @@ static void __inline__ baycom_int_freq(struct baycom_state *bc)
/* --------------------------------------------------------------------- */
-static __inline__ void par96_tx(struct net_device *dev, struct baycom_state *bc)
+static inline void par96_tx(struct net_device *dev, struct baycom_state *bc)
{
int i;
unsigned int data = hdlcdrv_getbits(&bc->hdrv);
@@ -216,7 +216,7 @@ static __inline__ void par96_tx(struct net_device *dev, struct baycom_state *bc)
/* --------------------------------------------------------------------- */
-static __inline__ void par96_rx(struct net_device *dev, struct baycom_state *bc)
+static inline void par96_rx(struct net_device *dev, struct baycom_state *bc)
{
int i;
unsigned int data, mask, mask2, descx;
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index c270c5a54..591af71ea 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -173,6 +173,7 @@ struct rndis_device {
/* Interface */
struct rndis_message;
+struct netvsc_device;
int netvsc_device_add(struct hv_device *device, void *additional_info);
int netvsc_device_remove(struct hv_device *device);
int netvsc_send(struct hv_device *device,
@@ -189,8 +190,8 @@ int netvsc_recv_callback(struct hv_device *device_obj,
struct vmbus_channel *channel,
u16 vlan_tci);
void netvsc_channel_cb(void *context);
-int rndis_filter_open(struct hv_device *dev);
-int rndis_filter_close(struct hv_device *dev);
+int rndis_filter_open(struct netvsc_device *nvdev);
+int rndis_filter_close(struct netvsc_device *nvdev);
int rndis_filter_device_add(struct hv_device *dev,
void *additional_info);
void rndis_filter_device_remove(struct hv_device *dev);
@@ -200,7 +201,7 @@ int rndis_filter_receive(struct hv_device *dev,
struct vmbus_channel *channel);
int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter);
-int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac);
+int rndis_filter_set_device_mac(struct net_device *ndev, char *mac);
void netvsc_switch_datapath(struct net_device *nv_dev, bool vf);
@@ -643,12 +644,6 @@ struct netvsc_reconfig {
u32 event;
};
-struct garp_wrk {
- struct work_struct dwrk;
- struct net_device *netdev;
- struct netvsc_device *netvsc_dev;
-};
-
/* The context of the netvsc device */
struct net_device_context {
/* point back to our device context */
@@ -666,7 +661,6 @@ struct net_device_context {
struct work_struct work;
u32 msg_enable; /* debug level */
- struct garp_wrk gwrk;
struct netvsc_stats __percpu *tx_stats;
struct netvsc_stats __percpu *rx_stats;
@@ -677,6 +671,15 @@ struct net_device_context {
/* the device is going away */
bool start_remove;
+
+ /* State to manage the associated VF interface. */
+ struct net_device *vf_netdev;
+ bool vf_inject;
+ atomic_t vf_use_cnt;
+ /* 1: allocated, serial number is valid. 0: not allocated */
+ u32 vf_alloc;
+ /* Serial number of the VF to team with */
+ u32 vf_serial;
};
/* Per netvsc device */
@@ -732,17 +735,21 @@ struct netvsc_device {
u32 max_pkt; /* max number of pkt in one send, e.g. 8 */
u32 pkt_align; /* alignment bytes, e.g. 8 */
- /* 1: allocated, serial number is valid. 0: not allocated */
- u32 vf_alloc;
- /* Serial number of the VF to team with */
- u32 vf_serial;
atomic_t open_cnt;
- /* State to manage the associated VF interface. */
- bool vf_inject;
- struct net_device *vf_netdev;
- atomic_t vf_use_cnt;
};
+static inline struct netvsc_device *
+net_device_to_netvsc_device(struct net_device *ndev)
+{
+ return ((struct net_device_context *)netdev_priv(ndev))->nvdev;
+}
+
+static inline struct netvsc_device *
+hv_device_to_netvsc_device(struct hv_device *device)
+{
+ return net_device_to_netvsc_device(hv_get_drvdata(device));
+}
+
/* NdisInitialize message */
struct rndis_initialize_request {
u32 req_id;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 719cb3578..410fb8e81 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -77,13 +77,9 @@ static struct netvsc_device *alloc_net_device(void)
init_waitqueue_head(&net_device->wait_drain);
net_device->destroy = false;
atomic_set(&net_device->open_cnt, 0);
- atomic_set(&net_device->vf_use_cnt, 0);
net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
- net_device->vf_netdev = NULL;
- net_device->vf_inject = false;
-
return net_device;
}
@@ -95,9 +91,7 @@ static void free_netvsc_device(struct netvsc_device *nvdev)
static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
{
- struct net_device *ndev = hv_get_drvdata(device);
- struct net_device_context *net_device_ctx = netdev_priv(ndev);
- struct netvsc_device *net_device = net_device_ctx->nvdev;
+ struct netvsc_device *net_device = hv_device_to_netvsc_device(device);
if (net_device && net_device->destroy)
net_device = NULL;
@@ -107,9 +101,7 @@ static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
static struct netvsc_device *get_inbound_net_device(struct hv_device *device)
{
- struct net_device *ndev = hv_get_drvdata(device);
- struct net_device_context *net_device_ctx = netdev_priv(ndev);
- struct netvsc_device *net_device = net_device_ctx->nvdev;
+ struct netvsc_device *net_device = hv_device_to_netvsc_device(device);
if (!net_device)
goto get_in_err;
@@ -128,8 +120,7 @@ static int netvsc_destroy_buf(struct hv_device *device)
struct nvsp_message *revoke_packet;
int ret = 0;
struct net_device *ndev = hv_get_drvdata(device);
- struct net_device_context *net_device_ctx = netdev_priv(ndev);
- struct netvsc_device *net_device = net_device_ctx->nvdev;
+ struct netvsc_device *net_device = net_device_to_netvsc_device(ndev);
/*
* If we got a section count, it means we received a
@@ -249,7 +240,6 @@ static int netvsc_destroy_buf(struct hv_device *device)
static int netvsc_init_buf(struct hv_device *device)
{
int ret = 0;
- unsigned long t;
struct netvsc_device *net_device;
struct nvsp_message *init_packet;
struct net_device *ndev;
@@ -310,9 +300,7 @@ static int netvsc_init_buf(struct hv_device *device)
goto cleanup;
}
- t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
- BUG_ON(t == 0);
-
+ wait_for_completion(&net_device->channel_init_wait);
/* Check the response */
if (init_packet->msg.v1_msg.
@@ -395,8 +383,7 @@ static int netvsc_init_buf(struct hv_device *device)
goto cleanup;
}
- t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
- BUG_ON(t == 0);
+ wait_for_completion(&net_device->channel_init_wait);
/* Check the response */
if (init_packet->msg.v1_msg.
@@ -450,7 +437,6 @@ static int negotiate_nvsp_ver(struct hv_device *device,
{
struct net_device *ndev = hv_get_drvdata(device);
int ret;
- unsigned long t;
memset(init_packet, 0, sizeof(struct nvsp_message));
init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
@@ -467,10 +453,7 @@ static int negotiate_nvsp_ver(struct hv_device *device,
if (ret != 0)
return ret;
- t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
-
- if (t == 0)
- return -ETIMEDOUT;
+ wait_for_completion(&net_device->channel_init_wait);
if (init_packet->msg.init_msg.init_complete.status !=
NVSP_STAT_SUCCESS)
@@ -1119,16 +1102,16 @@ static void netvsc_send_table(struct hv_device *hdev,
nvscdev->send_table[i] = tab[i];
}
-static void netvsc_send_vf(struct netvsc_device *nvdev,
+static void netvsc_send_vf(struct net_device_context *net_device_ctx,
struct nvsp_message *nvmsg)
{
- nvdev->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
- nvdev->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
+ net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
+ net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
}
static inline void netvsc_receive_inband(struct hv_device *hdev,
- struct netvsc_device *nvdev,
- struct nvsp_message *nvmsg)
+ struct net_device_context *net_device_ctx,
+ struct nvsp_message *nvmsg)
{
switch (nvmsg->hdr.msg_type) {
case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
@@ -1136,11 +1119,45 @@ static inline void netvsc_receive_inband(struct hv_device *hdev,
break;
case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
- netvsc_send_vf(nvdev, nvmsg);
+ netvsc_send_vf(net_device_ctx, nvmsg);
break;
}
}
+static void netvsc_process_raw_pkt(struct hv_device *device,
+ struct vmbus_channel *channel,
+ struct netvsc_device *net_device,
+ struct net_device *ndev,
+ u64 request_id,
+ struct vmpacket_descriptor *desc)
+{
+ struct nvsp_message *nvmsg;
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
+
+ nvmsg = (struct nvsp_message *)((unsigned long)
+ desc + (desc->offset8 << 3));
+
+ switch (desc->type) {
+ case VM_PKT_COMP:
+ netvsc_send_completion(net_device, channel, device, desc);
+ break;
+
+ case VM_PKT_DATA_USING_XFER_PAGES:
+ netvsc_receive(net_device, channel, device, desc);
+ break;
+
+ case VM_PKT_DATA_INBAND:
+ netvsc_receive_inband(device, net_device_ctx, nvmsg);
+ break;
+
+ default:
+ netdev_err(ndev, "unhandled packet type %d, tid %llx\n",
+ desc->type, request_id);
+ break;
+ }
+}
+
+
void netvsc_channel_cb(void *context)
{
int ret;
@@ -1153,7 +1170,7 @@ void netvsc_channel_cb(void *context)
unsigned char *buffer;
int bufferlen = NETVSC_PACKET_SIZE;
struct net_device *ndev;
- struct nvsp_message *nvmsg;
+ bool need_to_commit = false;
if (channel->primary_channel != NULL)
device = channel->primary_channel->device_obj;
@@ -1167,39 +1184,36 @@ void netvsc_channel_cb(void *context)
buffer = get_per_channel_state(channel);
do {
+ desc = get_next_pkt_raw(channel);
+ if (desc != NULL) {
+ netvsc_process_raw_pkt(device,
+ channel,
+ net_device,
+ ndev,
+ desc->trans_id,
+ desc);
+
+ put_pkt_raw(channel, desc);
+ need_to_commit = true;
+ continue;
+ }
+ if (need_to_commit) {
+ need_to_commit = false;
+ commit_rd_index(channel);
+ }
+
ret = vmbus_recvpacket_raw(channel, buffer, bufferlen,
&bytes_recvd, &request_id);
if (ret == 0) {
if (bytes_recvd > 0) {
desc = (struct vmpacket_descriptor *)buffer;
- nvmsg = (struct nvsp_message *)((unsigned long)
- desc + (desc->offset8 << 3));
- switch (desc->type) {
- case VM_PKT_COMP:
- netvsc_send_completion(net_device,
- channel,
- device, desc);
- break;
-
- case VM_PKT_DATA_USING_XFER_PAGES:
- netvsc_receive(net_device, channel,
- device, desc);
- break;
-
- case VM_PKT_DATA_INBAND:
- netvsc_receive_inband(device,
- net_device,
- nvmsg);
- break;
-
- default:
- netdev_err(ndev,
- "unhandled packet type %d, "
- "tid %llx len %d\n",
- desc->type, request_id,
- bytes_recvd);
- break;
- }
+ netvsc_process_raw_pkt(device,
+ channel,
+ net_device,
+ ndev,
+ request_id,
+ desc);
+
} else {
/*
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 6a69b5cc9..3ba29fc80 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -98,16 +98,14 @@ static void netvsc_set_multicast_list(struct net_device *net)
static int netvsc_open(struct net_device *net)
{
- struct net_device_context *net_device_ctx = netdev_priv(net);
- struct hv_device *device_obj = net_device_ctx->device_ctx;
- struct netvsc_device *nvdev = net_device_ctx->nvdev;
+ struct netvsc_device *nvdev = net_device_to_netvsc_device(net);
struct rndis_device *rdev;
int ret = 0;
netif_carrier_off(net);
/* Open up the device */
- ret = rndis_filter_open(device_obj);
+ ret = rndis_filter_open(nvdev);
if (ret != 0) {
netdev_err(net, "unable to open device (ret %d).\n", ret);
return ret;
@@ -125,7 +123,6 @@ static int netvsc_open(struct net_device *net)
static int netvsc_close(struct net_device *net)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
- struct hv_device *device_obj = net_device_ctx->device_ctx;
struct netvsc_device *nvdev = net_device_ctx->nvdev;
int ret;
u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20;
@@ -135,7 +132,7 @@ static int netvsc_close(struct net_device *net)
/* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
cancel_work_sync(&net_device_ctx->work);
- ret = rndis_filter_close(device_obj);
+ ret = rndis_filter_close(nvdev);
if (ret != 0) {
netdev_err(net, "unable to close device (ret %d).\n", ret);
return ret;
@@ -661,20 +658,19 @@ int netvsc_recv_callback(struct hv_device *device_obj,
struct sk_buff *skb;
struct sk_buff *vf_skb;
struct netvsc_stats *rx_stats;
- struct netvsc_device *netvsc_dev = net_device_ctx->nvdev;
u32 bytes_recvd = packet->total_data_buflen;
int ret = 0;
if (!net || net->reg_state != NETREG_REGISTERED)
return NVSP_STAT_FAIL;
- if (READ_ONCE(netvsc_dev->vf_inject)) {
- atomic_inc(&netvsc_dev->vf_use_cnt);
- if (!READ_ONCE(netvsc_dev->vf_inject)) {
+ if (READ_ONCE(net_device_ctx->vf_inject)) {
+ atomic_inc(&net_device_ctx->vf_use_cnt);
+ if (!READ_ONCE(net_device_ctx->vf_inject)) {
/*
* We raced; just move on.
*/
- atomic_dec(&netvsc_dev->vf_use_cnt);
+ atomic_dec(&net_device_ctx->vf_use_cnt);
goto vf_injection_done;
}
@@ -686,22 +682,23 @@ int netvsc_recv_callback(struct hv_device *device_obj,
* the host). Deliver these via the VF interface
* in the guest.
*/
- vf_skb = netvsc_alloc_recv_skb(netvsc_dev->vf_netdev, packet,
- csum_info, *data, vlan_tci);
+ vf_skb = netvsc_alloc_recv_skb(net_device_ctx->vf_netdev,
+ packet, csum_info, *data,
+ vlan_tci);
if (vf_skb != NULL) {
- ++netvsc_dev->vf_netdev->stats.rx_packets;
- netvsc_dev->vf_netdev->stats.rx_bytes += bytes_recvd;
+ ++net_device_ctx->vf_netdev->stats.rx_packets;
+ net_device_ctx->vf_netdev->stats.rx_bytes +=
+ bytes_recvd;
netif_receive_skb(vf_skb);
} else {
++net->stats.rx_dropped;
ret = NVSP_STAT_FAIL;
}
- atomic_dec(&netvsc_dev->vf_use_cnt);
+ atomic_dec(&net_device_ctx->vf_use_cnt);
return ret;
}
vf_injection_done:
- net_device_ctx = netdev_priv(net);
rx_stats = this_cpu_ptr(net_device_ctx->rx_stats);
/* Allocate a skb - TODO direct I/O to pages? */
@@ -986,8 +983,6 @@ static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net,
static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
{
- struct net_device_context *ndevctx = netdev_priv(ndev);
- struct hv_device *hdev = ndevctx->device_ctx;
struct sockaddr *addr = p;
char save_adr[ETH_ALEN];
unsigned char save_aatype;
@@ -1000,7 +995,7 @@ static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
if (err != 0)
return err;
- err = rndis_filter_set_device_mac(hdev, addr->sa_data);
+ err = rndis_filter_set_device_mac(ndev, addr->sa_data);
if (err != 0) {
/* roll back to saved MAC */
memcpy(ndev->dev_addr, save_adr, ETH_ALEN);
@@ -1156,17 +1151,6 @@ static void netvsc_free_netdev(struct net_device *netdev)
free_netdev(netdev);
}
-static void netvsc_notify_peers(struct work_struct *wrk)
-{
- struct garp_wrk *gwrk;
-
- gwrk = container_of(wrk, struct garp_wrk, dwrk);
-
- netdev_notify_peers(gwrk->netdev);
-
- atomic_dec(&gwrk->netvsc_dev->vf_use_cnt);
-}
-
static struct net_device *get_netvsc_net_device(char *mac)
{
struct net_device *dev, *found = NULL;
@@ -1209,7 +1193,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
net_device_ctx = netdev_priv(ndev);
netvsc_dev = net_device_ctx->nvdev;
- if (netvsc_dev == NULL)
+ if (!netvsc_dev || net_device_ctx->vf_netdev)
return NOTIFY_DONE;
netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
@@ -1217,10 +1201,23 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
* Take a reference on the module.
*/
try_module_get(THIS_MODULE);
- netvsc_dev->vf_netdev = vf_netdev;
+ net_device_ctx->vf_netdev = vf_netdev;
return NOTIFY_OK;
}
+static void netvsc_inject_enable(struct net_device_context *net_device_ctx)
+{
+ net_device_ctx->vf_inject = true;
+}
+
+static void netvsc_inject_disable(struct net_device_context *net_device_ctx)
+{
+ net_device_ctx->vf_inject = false;
+
+ /* Wait for currently active users to drain out. */
+ while (atomic_read(&net_device_ctx->vf_use_cnt) != 0)
+ udelay(50);
+}
static int netvsc_vf_up(struct net_device *vf_netdev)
{
@@ -1239,16 +1236,16 @@ static int netvsc_vf_up(struct net_device *vf_netdev)
net_device_ctx = netdev_priv(ndev);
netvsc_dev = net_device_ctx->nvdev;
- if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL))
+ if (!netvsc_dev || !net_device_ctx->vf_netdev)
return NOTIFY_DONE;
netdev_info(ndev, "VF up: %s\n", vf_netdev->name);
- netvsc_dev->vf_inject = true;
+ netvsc_inject_enable(net_device_ctx);
/*
* Open the device before switching data path.
*/
- rndis_filter_open(net_device_ctx->device_ctx);
+ rndis_filter_open(netvsc_dev);
/*
* notify the host to switch the data path.
@@ -1258,15 +1255,8 @@ static int netvsc_vf_up(struct net_device *vf_netdev)
netif_carrier_off(ndev);
- /*
- * Now notify peers. We are scheduling work to
- * notify peers; take a reference to prevent
- * the VF interface from vanishing.
- */
- atomic_inc(&netvsc_dev->vf_use_cnt);
- net_device_ctx->gwrk.netdev = vf_netdev;
- net_device_ctx->gwrk.netvsc_dev = netvsc_dev;
- schedule_work(&net_device_ctx->gwrk.dwrk);
+ /* Now notify peers through VF device. */
+ call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, vf_netdev);
return NOTIFY_OK;
}
@@ -1289,29 +1279,18 @@ static int netvsc_vf_down(struct net_device *vf_netdev)
net_device_ctx = netdev_priv(ndev);
netvsc_dev = net_device_ctx->nvdev;
- if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL))
+ if (!netvsc_dev || !net_device_ctx->vf_netdev)
return NOTIFY_DONE;
netdev_info(ndev, "VF down: %s\n", vf_netdev->name);
- netvsc_dev->vf_inject = false;
- /*
- * Wait for currently active users to
- * drain out.
- */
-
- while (atomic_read(&netvsc_dev->vf_use_cnt) != 0)
- udelay(50);
+ netvsc_inject_disable(net_device_ctx);
netvsc_switch_datapath(ndev, false);
netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name);
- rndis_filter_close(net_device_ctx->device_ctx);
+ rndis_filter_close(netvsc_dev);
netif_carrier_on(ndev);
- /*
- * Notify peers.
- */
- atomic_inc(&netvsc_dev->vf_use_cnt);
- net_device_ctx->gwrk.netdev = ndev;
- net_device_ctx->gwrk.netvsc_dev = netvsc_dev;
- schedule_work(&net_device_ctx->gwrk.dwrk);
+
+ /* Now notify peers through netvsc device. */
+ call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, ndev);
return NOTIFY_OK;
}
@@ -1333,11 +1312,11 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev)
net_device_ctx = netdev_priv(ndev);
netvsc_dev = net_device_ctx->nvdev;
- if (netvsc_dev == NULL)
+ if (!netvsc_dev || !net_device_ctx->vf_netdev)
return NOTIFY_DONE;
netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
-
- netvsc_dev->vf_netdev = NULL;
+ netvsc_inject_disable(net_device_ctx);
+ net_device_ctx->vf_netdev = NULL;
module_put(THIS_MODULE);
return NOTIFY_OK;
}
@@ -1383,11 +1362,14 @@ static int netvsc_probe(struct hv_device *dev,
INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
INIT_WORK(&net_device_ctx->work, do_set_multicast);
- INIT_WORK(&net_device_ctx->gwrk.dwrk, netvsc_notify_peers);
spin_lock_init(&net_device_ctx->lock);
INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
+ atomic_set(&net_device_ctx->vf_use_cnt, 0);
+ net_device_ctx->vf_netdev = NULL;
+ net_device_ctx->vf_inject = false;
+
net->netdev_ops = &device_ops;
net->hw_features = NETVSC_HW_FEATURES;
@@ -1500,6 +1482,15 @@ static int netvsc_netdev_event(struct notifier_block *this,
{
struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
+ /* Avoid Vlan dev with same MAC registering as VF */
+ if (event_dev->priv_flags & IFF_802_1Q_VLAN)
+ return NOTIFY_DONE;
+
+ /* Avoid Bonding master dev with same MAC registering as VF */
+ if (event_dev->priv_flags & IFF_BONDING &&
+ event_dev->flags & IFF_MASTER)
+ return NOTIFY_DONE;
+
switch (event) {
case NETDEV_REGISTER:
return netvsc_register_vf(event_dev);
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 97c292b7d..8e830f741 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -466,7 +466,6 @@ static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
struct rndis_query_request *query;
struct rndis_query_complete *query_complete;
int ret = 0;
- unsigned long t;
if (!result)
return -EINVAL;
@@ -503,11 +502,7 @@ static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
if (ret != 0)
goto cleanup;
- t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
- if (t == 0) {
- ret = -ETIMEDOUT;
- goto cleanup;
- }
+ wait_for_completion(&request->wait_event);
/* Copy the response back */
query_complete = &request->response_msg.msg.query_complete;
@@ -543,11 +538,9 @@ static int rndis_filter_query_device_mac(struct rndis_device *dev)
#define NWADR_STR "NetworkAddress"
#define NWADR_STRLEN 14
-int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac)
+int rndis_filter_set_device_mac(struct net_device *ndev, char *mac)
{
- struct net_device *ndev = hv_get_drvdata(hdev);
- struct net_device_context *net_device_ctx = netdev_priv(ndev);
- struct netvsc_device *nvdev = net_device_ctx->nvdev;
+ struct netvsc_device *nvdev = net_device_to_netvsc_device(ndev);
struct rndis_device *rdev = nvdev->extension;
struct rndis_request *request;
struct rndis_set_request *set;
@@ -558,7 +551,6 @@ int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac)
u32 extlen = sizeof(struct rndis_config_parameter_info) +
2*NWADR_STRLEN + 4*ETH_ALEN;
int ret;
- unsigned long t;
request = get_rndis_request(rdev, RNDIS_MSG_SET,
RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
@@ -599,21 +591,13 @@ int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac)
if (ret != 0)
goto cleanup;
- t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
- if (t == 0) {
- netdev_err(ndev, "timeout before we got a set response...\n");
- /*
- * can't put_rndis_request, since we may still receive a
- * send-completion.
- */
- return -EBUSY;
- } else {
- set_complete = &request->response_msg.msg.set_complete;
- if (set_complete->status != RNDIS_STATUS_SUCCESS) {
- netdev_err(ndev, "Fail to set MAC on host side:0x%x\n",
- set_complete->status);
- ret = -EINVAL;
- }
+ wait_for_completion(&request->wait_event);
+
+ set_complete = &request->response_msg.msg.set_complete;
+ if (set_complete->status != RNDIS_STATUS_SUCCESS) {
+ netdev_err(ndev, "Fail to set MAC on host side:0x%x\n",
+ set_complete->status);
+ ret = -EINVAL;
}
cleanup:
@@ -622,12 +606,10 @@ cleanup:
}
static int
-rndis_filter_set_offload_params(struct hv_device *hdev,
+rndis_filter_set_offload_params(struct net_device *ndev,
struct ndis_offload_params *req_offloads)
{
- struct net_device *ndev = hv_get_drvdata(hdev);
- struct net_device_context *net_device_ctx = netdev_priv(ndev);
- struct netvsc_device *nvdev = net_device_ctx->nvdev;
+ struct netvsc_device *nvdev = net_device_to_netvsc_device(ndev);
struct rndis_device *rdev = nvdev->extension;
struct rndis_request *request;
struct rndis_set_request *set;
@@ -635,7 +617,6 @@ rndis_filter_set_offload_params(struct hv_device *hdev,
struct rndis_set_complete *set_complete;
u32 extlen = sizeof(struct ndis_offload_params);
int ret;
- unsigned long t;
u32 vsp_version = nvdev->nvsp_version;
if (vsp_version <= NVSP_PROTOCOL_VERSION_4) {
@@ -669,20 +650,12 @@ rndis_filter_set_offload_params(struct hv_device *hdev,
if (ret != 0)
goto cleanup;
- t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
- if (t == 0) {
- netdev_err(ndev, "timeout before we got aOFFLOAD set response...\n");
- /* can't put_rndis_request, since we may still receive a
- * send-completion.
- */
- return -EBUSY;
- } else {
- set_complete = &request->response_msg.msg.set_complete;
- if (set_complete->status != RNDIS_STATUS_SUCCESS) {
- netdev_err(ndev, "Fail to set offload on host side:0x%x\n",
- set_complete->status);
- ret = -EINVAL;
- }
+ wait_for_completion(&request->wait_event);
+ set_complete = &request->response_msg.msg.set_complete;
+ if (set_complete->status != RNDIS_STATUS_SUCCESS) {
+ netdev_err(ndev, "Fail to set offload on host side:0x%x\n",
+ set_complete->status);
+ ret = -EINVAL;
}
cleanup:
@@ -710,7 +683,6 @@ static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
u32 *itab;
u8 *keyp;
int i, ret;
- unsigned long t;
request = get_rndis_request(
rdev, RNDIS_MSG_SET,
@@ -753,20 +725,12 @@ static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
if (ret != 0)
goto cleanup;
- t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
- if (t == 0) {
- netdev_err(ndev, "timeout before we got a set response...\n");
- /* can't put_rndis_request, since we may still receive a
- * send-completion.
- */
- return -ETIMEDOUT;
- } else {
- set_complete = &request->response_msg.msg.set_complete;
- if (set_complete->status != RNDIS_STATUS_SUCCESS) {
- netdev_err(ndev, "Fail to set RSS parameters:0x%x\n",
- set_complete->status);
- ret = -EINVAL;
- }
+ wait_for_completion(&request->wait_event);
+ set_complete = &request->response_msg.msg.set_complete;
+ if (set_complete->status != RNDIS_STATUS_SUCCESS) {
+ netdev_err(ndev, "Fail to set RSS parameters:0x%x\n",
+ set_complete->status);
+ ret = -EINVAL;
}
cleanup:
@@ -795,8 +759,6 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
struct rndis_set_complete *set_complete;
u32 status;
int ret;
- unsigned long t;
- struct net_device *ndev = dev->ndev;
request = get_rndis_request(dev, RNDIS_MSG_SET,
RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
@@ -819,26 +781,14 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
if (ret != 0)
goto cleanup;
- t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
+ wait_for_completion(&request->wait_event);
- if (t == 0) {
- netdev_err(ndev,
- "timeout before we got a set response...\n");
- ret = -ETIMEDOUT;
- /*
- * We can't deallocate the request since we may still receive a
- * send completion for it.
- */
- goto exit;
- } else {
- set_complete = &request->response_msg.msg.set_complete;
- status = set_complete->status;
- }
+ set_complete = &request->response_msg.msg.set_complete;
+ status = set_complete->status;
cleanup:
if (request)
put_rndis_request(dev, request);
-exit:
return ret;
}
@@ -850,9 +800,7 @@ static int rndis_filter_init_device(struct rndis_device *dev)
struct rndis_initialize_complete *init_complete;
u32 status;
int ret;
- unsigned long t;
- struct net_device_context *net_device_ctx = netdev_priv(dev->ndev);
- struct netvsc_device *nvdev = net_device_ctx->nvdev;
+ struct netvsc_device *nvdev = net_device_to_netvsc_device(dev->ndev);
request = get_rndis_request(dev, RNDIS_MSG_INIT,
RNDIS_MESSAGE_SIZE(struct rndis_initialize_request));
@@ -875,12 +823,7 @@ static int rndis_filter_init_device(struct rndis_device *dev)
goto cleanup;
}
- t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
-
- if (t == 0) {
- ret = -ETIMEDOUT;
- goto cleanup;
- }
+ wait_for_completion(&request->wait_event);
init_complete = &request->response_msg.msg.init_complete;
status = init_complete->status;
@@ -977,8 +920,7 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
{
struct net_device *ndev =
hv_get_drvdata(new_sc->primary_channel->device_obj);
- struct net_device_context *net_device_ctx = netdev_priv(ndev);
- struct netvsc_device *nvscdev = net_device_ctx->nvdev;
+ struct netvsc_device *nvscdev = net_device_to_netvsc_device(ndev);
u16 chn_index = new_sc->offermsg.offer.sub_channel_index;
int ret;
unsigned long flags;
@@ -1014,7 +956,6 @@ int rndis_filter_device_add(struct hv_device *dev,
struct netvsc_device_info *device_info = additional_info;
struct ndis_offload_params offloads;
struct nvsp_message *init_packet;
- unsigned long t;
struct ndis_recv_scale_cap rsscap;
u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
u32 mtu, size;
@@ -1088,7 +1029,7 @@ int rndis_filter_device_add(struct hv_device *dev,
offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
- ret = rndis_filter_set_offload_params(dev, &offloads);
+ ret = rndis_filter_set_offload_params(net, &offloads);
if (ret)
goto err_dev_remv;
@@ -1157,11 +1098,8 @@ int rndis_filter_device_add(struct hv_device *dev,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret)
goto out;
- t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
- if (t == 0) {
- ret = -ETIMEDOUT;
- goto out;
- }
+ wait_for_completion(&net_device->channel_init_wait);
+
if (init_packet->msg.v5_msg.subchn_comp.status !=
NVSP_STAT_SUCCESS) {
ret = -ENODEV;
@@ -1196,21 +1134,14 @@ err_dev_remv:
void rndis_filter_device_remove(struct hv_device *dev)
{
- struct net_device *ndev = hv_get_drvdata(dev);
- struct net_device_context *net_device_ctx = netdev_priv(ndev);
- struct netvsc_device *net_dev = net_device_ctx->nvdev;
+ struct netvsc_device *net_dev = hv_device_to_netvsc_device(dev);
struct rndis_device *rndis_dev = net_dev->extension;
- unsigned long t;
/* If not all subchannel offers are complete, wait for them until
* completion to avoid race.
*/
- while (net_dev->num_sc_offered > 0) {
- t = wait_for_completion_timeout(&net_dev->channel_init_wait,
- 10 * HZ);
- if (t == 0)
- WARN(1, "Netvsc: Waiting for sub-channel processing");
- }
+ if (net_dev->num_sc_offered > 0)
+ wait_for_completion(&net_dev->channel_init_wait);
/* Halt and release the rndis device */
rndis_filter_halt_device(rndis_dev);
@@ -1222,27 +1153,19 @@ void rndis_filter_device_remove(struct hv_device *dev)
}
-int rndis_filter_open(struct hv_device *dev)
+int rndis_filter_open(struct netvsc_device *nvdev)
{
- struct net_device *ndev = hv_get_drvdata(dev);
- struct net_device_context *net_device_ctx = netdev_priv(ndev);
- struct netvsc_device *net_device = net_device_ctx->nvdev;
-
- if (!net_device)
+ if (!nvdev)
return -EINVAL;
- if (atomic_inc_return(&net_device->open_cnt) != 1)
+ if (atomic_inc_return(&nvdev->open_cnt) != 1)
return 0;
- return rndis_filter_open_device(net_device->extension);
+ return rndis_filter_open_device(nvdev->extension);
}
-int rndis_filter_close(struct hv_device *dev)
+int rndis_filter_close(struct netvsc_device *nvdev)
{
- struct net_device *ndev = hv_get_drvdata(dev);
- struct net_device_context *net_device_ctx = netdev_priv(ndev);
- struct netvsc_device *nvdev = net_device_ctx->nvdev;
-
if (!nvdev)
return -EINVAL;
diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c
index 52c9051f3..1056ed142 100644
--- a/drivers/net/ieee802154/atusb.c
+++ b/drivers/net/ieee802154/atusb.c
@@ -366,11 +366,7 @@ static int atusb_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
struct atusb *atusb = hw->priv;
int ret;
- /* This implicitly sets the CCA (Clear Channel Assessment) mode to 0,
- * "Mode 3a, Carrier sense OR energy above threshold".
- * We should probably make this configurable. @@@
- */
- ret = atusb_write_reg(atusb, RG_PHY_CC_CCA, channel);
+ ret = atusb_write_subreg(atusb, SR_CHANNEL, channel);
if (ret < 0)
return ret;
msleep(1); /* @@@ ugly synchronization */
diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c
index 860d4aed8..0becf0ac3 100644
--- a/drivers/net/ieee802154/fakelb.c
+++ b/drivers/net/ieee802154/fakelb.c
@@ -112,6 +112,12 @@ static void fakelb_hw_stop(struct ieee802154_hw *hw)
write_unlock_bh(&fakelb_ifup_phys_lock);
}
+static int
+fakelb_set_promiscuous_mode(struct ieee802154_hw *hw, const bool on)
+{
+ return 0;
+}
+
static const struct ieee802154_ops fakelb_ops = {
.owner = THIS_MODULE,
.xmit_async = fakelb_hw_xmit,
@@ -119,6 +125,7 @@ static const struct ieee802154_ops fakelb_ops = {
.set_channel = fakelb_hw_channel,
.start = fakelb_hw_start,
.stop = fakelb_hw_stop,
+ .set_promiscuous_mode = fakelb_set_promiscuous_mode,
};
/* Number of dummy devices to be set up by this module. */
@@ -174,6 +181,7 @@ static int fakelb_add_one(struct device *dev)
hw->phy->current_channel = 13;
phy->channel = hw->phy->current_channel;
+ hw->flags = IEEE802154_HW_PROMISCUOUS;
hw->parent = dev;
err = ieee802154_register_hw(hw);
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index f446db828..7b131f8e4 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -1054,6 +1054,8 @@ static irqreturn_t mrf24j40_isr(int irq, void *data)
disable_irq_nosync(irq);
devrec->irq_buf[0] = MRF24J40_READSHORT(REG_INTSTAT);
+ devrec->irq_buf[1] = 0;
+
/* Read the interrupt status */
ret = spi_async(devrec->spi, &devrec->irq_msg);
if (ret) {
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index d6d0524ee..b5f9511d8 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -254,6 +254,18 @@ acct:
}
}
+static void ipvlan_skb_crossing_ns(struct sk_buff *skb, struct net_device *dev)
+{
+ bool xnet = true;
+
+ if (dev)
+ xnet = !net_eq(dev_net(skb->dev), dev_net(dev));
+
+ skb_scrub_packet(skb, xnet);
+ if (dev)
+ skb->dev = dev;
+}
+
static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb,
bool local)
{
@@ -280,7 +292,7 @@ static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb,
*pskb = skb;
}
- skb->dev = dev;
+ ipvlan_skb_crossing_ns(skb, dev);
if (local) {
skb->pkt_type = PACKET_HOST;
@@ -347,7 +359,7 @@ static struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port,
return addr;
}
-static int ipvlan_process_v4_outbound(struct sk_buff *skb, bool xnet)
+static int ipvlan_process_v4_outbound(struct sk_buff *skb)
{
const struct iphdr *ip4h = ip_hdr(skb);
struct net_device *dev = skb->dev;
@@ -370,7 +382,6 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb, bool xnet)
ip_rt_put(rt);
goto err;
}
- skb_scrub_packet(skb, xnet);
skb_dst_set(skb, &rt->dst);
err = ip_local_out(net, skb->sk, skb);
if (unlikely(net_xmit_eval(err)))
@@ -385,7 +396,7 @@ out:
return ret;
}
-static int ipvlan_process_v6_outbound(struct sk_buff *skb, bool xnet)
+static int ipvlan_process_v6_outbound(struct sk_buff *skb)
{
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
struct net_device *dev = skb->dev;
@@ -408,7 +419,6 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb, bool xnet)
dst_release(dst);
goto err;
}
- skb_scrub_packet(skb, xnet);
skb_dst_set(skb, dst);
err = ip6_local_out(net, skb->sk, skb);
if (unlikely(net_xmit_eval(err)))
@@ -423,7 +433,7 @@ out:
return ret;
}
-static int ipvlan_process_outbound(struct sk_buff *skb, bool xnet)
+static int ipvlan_process_outbound(struct sk_buff *skb)
{
struct ethhdr *ethh = eth_hdr(skb);
int ret = NET_XMIT_DROP;
@@ -447,9 +457,9 @@ static int ipvlan_process_outbound(struct sk_buff *skb, bool xnet)
}
if (skb->protocol == htons(ETH_P_IPV6))
- ret = ipvlan_process_v6_outbound(skb, xnet);
+ ret = ipvlan_process_v6_outbound(skb);
else if (skb->protocol == htons(ETH_P_IP))
- ret = ipvlan_process_v4_outbound(skb, xnet);
+ ret = ipvlan_process_v4_outbound(skb);
else {
pr_warn_ratelimited("Dropped outbound packet type=%x\n",
ntohs(skb->protocol));
@@ -485,7 +495,6 @@ static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev)
void *lyr3h;
struct ipvl_addr *addr;
int addr_type;
- bool xnet;
lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
if (!lyr3h)
@@ -496,9 +505,8 @@ static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev)
return ipvlan_rcv_frame(addr, &skb, true);
out:
- xnet = !net_eq(dev_net(skb->dev), dev_net(ipvlan->phy_dev));
- skb->dev = ipvlan->phy_dev;
- return ipvlan_process_outbound(skb, xnet);
+ ipvlan_skb_crossing_ns(skb, ipvlan->phy_dev);
+ return ipvlan_process_outbound(skb);
}
static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
@@ -528,11 +536,12 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
return dev_forward_skb(ipvlan->phy_dev, skb);
} else if (is_multicast_ether_addr(eth->h_dest)) {
+ ipvlan_skb_crossing_ns(skb, NULL);
ipvlan_multicast_enqueue(ipvlan->port, skb);
return NET_XMIT_SUCCESS;
}
- skb->dev = ipvlan->phy_dev;
+ ipvlan_skb_crossing_ns(skb, ipvlan->phy_dev);
return dev_queue_xmit(skb);
}
@@ -622,8 +631,10 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
* when work-queue processes this frame. This is
* achieved by returning RX_HANDLER_PASS.
*/
- if (nskb)
+ if (nskb) {
+ ipvlan_skb_crossing_ns(nskb, NULL);
ipvlan_multicast_enqueue(port, nskb);
+ }
}
} else {
struct ipvl_addr *addr;
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 1c4d395fb..18b4e8c7f 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -80,13 +80,6 @@ static void ipvlan_port_destroy(struct net_device *dev)
kfree_rcu(port, rcu);
}
-/* ipvlan network devices have devices nesting below it and are a special
- * "super class" of normal network devices; split their locks off into a
- * separate class since they always nest.
- */
-static struct lock_class_key ipvlan_netdev_xmit_lock_key;
-static struct lock_class_key ipvlan_netdev_addr_lock_key;
-
#define IPVLAN_FEATURES \
(NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \
@@ -96,19 +89,6 @@ static struct lock_class_key ipvlan_netdev_addr_lock_key;
#define IPVLAN_STATE_MASK \
((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
-static void ipvlan_set_lockdep_class_one(struct net_device *dev,
- struct netdev_queue *txq,
- void *_unused)
-{
- lockdep_set_class(&txq->_xmit_lock, &ipvlan_netdev_xmit_lock_key);
-}
-
-static void ipvlan_set_lockdep_class(struct net_device *dev)
-{
- lockdep_set_class(&dev->addr_list_lock, &ipvlan_netdev_addr_lock_key);
- netdev_for_each_tx_queue(dev, ipvlan_set_lockdep_class_one, NULL);
-}
-
static int ipvlan_init(struct net_device *dev)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
@@ -123,7 +103,7 @@ static int ipvlan_init(struct net_device *dev)
dev->gso_max_segs = phy_dev->gso_max_segs;
dev->hard_header_len = phy_dev->hard_header_len;
- ipvlan_set_lockdep_class(dev);
+ netdev_lockdep_set_classes(dev);
ipvlan->pcpu_stats = alloc_percpu(struct ipvl_pcpu_stats);
if (!ipvlan->pcpu_stats)
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index a400288cb..6255973e3 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -169,10 +169,9 @@ static void loopback_setup(struct net_device *dev)
dev->flags = IFF_LOOPBACK;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
netif_keep_dst(dev);
- dev->hw_features = NETIF_F_ALL_TSO | NETIF_F_UFO;
+ dev->hw_features = NETIF_F_GSO_SOFTWARE;
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
- | NETIF_F_ALL_TSO
- | NETIF_F_UFO
+ | NETIF_F_GSO_SOFTWARE
| NETIF_F_HW_CSUM
| NETIF_F_RXCSUM
| NETIF_F_SCTP_CRC
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index a70b6c460..351e701eb 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -18,6 +18,7 @@
#include <linux/rtnetlink.h>
#include <net/genetlink.h>
#include <net/sock.h>
+#include <net/gro_cells.h>
#include <uapi/linux/if_macsec.h>
@@ -268,6 +269,8 @@ struct macsec_dev {
struct net_device *real_dev;
struct pcpu_secy_stats __percpu *stats;
struct list_head secys;
+ struct gro_cells gro_cells;
+ unsigned int nest_level;
};
/**
@@ -342,7 +345,6 @@ static void free_rxsa(struct rcu_head *head)
crypto_free_aead(sa->key.tfm);
free_percpu(sa->stats);
- macsec_rxsc_put(sa->sc);
kfree(sa);
}
@@ -508,7 +510,7 @@ static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len)
}
#define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true))
-#define MACSEC_NEEDED_TAILROOM MACSEC_MAX_ICV_LEN
+#define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN
static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn)
{
@@ -861,6 +863,7 @@ static void macsec_decrypt_done(struct crypto_async_request *base, int err)
struct net_device *dev = skb->dev;
struct macsec_dev *macsec = macsec_priv(dev);
struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
+ struct macsec_rx_sc *rx_sc = rx_sa->sc;
int len, ret;
u32 pn;
@@ -879,7 +882,7 @@ static void macsec_decrypt_done(struct crypto_async_request *base, int err)
macsec_reset_skb(skb, macsec->secy.netdev);
len = skb->len;
- ret = netif_rx(skb);
+ ret = gro_cells_receive(&macsec->gro_cells, skb);
if (ret == NET_RX_SUCCESS)
count_rx(dev, len);
else
@@ -889,6 +892,7 @@ static void macsec_decrypt_done(struct crypto_async_request *base, int err)
out:
macsec_rxsa_put(rx_sa);
+ macsec_rxsc_put(rx_sc);
dev_put(dev);
}
@@ -1051,6 +1055,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
struct pcpu_rx_sc_stats *rxsc_stats;
struct pcpu_secy_stats *secy_stats;
bool pulled_sci;
+ int ret;
if (skb_headroom(skb) < ETH_HLEN)
goto drop_direct;
@@ -1103,6 +1108,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci);
+ sc = sc ? macsec_rxsc_get(sc) : NULL;
if (sc) {
secy = &macsec->secy;
@@ -1177,8 +1183,10 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
if (IS_ERR(skb)) {
/* the decrypt callback needs the reference */
- if (PTR_ERR(skb) != -EINPROGRESS)
+ if (PTR_ERR(skb) != -EINPROGRESS) {
macsec_rxsa_put(rx_sa);
+ macsec_rxsc_put(rx_sc);
+ }
rcu_read_unlock();
*pskb = NULL;
return RX_HANDLER_CONSUMED;
@@ -1194,16 +1202,23 @@ deliver:
if (rx_sa)
macsec_rxsa_put(rx_sa);
- count_rx(dev, skb->len);
+ macsec_rxsc_put(rx_sc);
+
+ ret = gro_cells_receive(&macsec->gro_cells, skb);
+ if (ret == NET_RX_SUCCESS)
+ count_rx(dev, skb->len);
+ else
+ macsec->secy.netdev->stats.rx_dropped++;
rcu_read_unlock();
- *pskb = skb;
- return RX_HANDLER_ANOTHER;
+ *pskb = NULL;
+ return RX_HANDLER_CONSUMED;
drop:
macsec_rxsa_put(rx_sa);
drop_nosa:
+ macsec_rxsc_put(rx_sc);
rcu_read_unlock();
drop_direct:
kfree_skb(skb);
@@ -1219,7 +1234,6 @@ nosci:
list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
struct sk_buff *nskb;
- int ret;
secy_stats = this_cpu_ptr(macsec->stats);
@@ -1264,22 +1278,22 @@ static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
int ret;
tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
- if (!tfm || IS_ERR(tfm))
- return NULL;
+
+ if (IS_ERR(tfm))
+ return tfm;
ret = crypto_aead_setkey(tfm, key, key_len);
- if (ret < 0) {
- crypto_free_aead(tfm);
- return NULL;
- }
+ if (ret < 0)
+ goto fail;
ret = crypto_aead_setauthsize(tfm, icv_len);
- if (ret < 0) {
- crypto_free_aead(tfm);
- return NULL;
- }
+ if (ret < 0)
+ goto fail;
return tfm;
+fail:
+ crypto_free_aead(tfm);
+ return ERR_PTR(ret);
}
static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len,
@@ -1287,12 +1301,12 @@ static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len,
{
rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats);
if (!rx_sa->stats)
- return -1;
+ return -ENOMEM;
rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
- if (!rx_sa->key.tfm) {
+ if (IS_ERR(rx_sa->key.tfm)) {
free_percpu(rx_sa->stats);
- return -1;
+ return PTR_ERR(rx_sa->key.tfm);
}
rx_sa->active = false;
@@ -1385,12 +1399,12 @@ static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len,
{
tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats);
if (!tx_sa->stats)
- return -1;
+ return -ENOMEM;
tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
- if (!tx_sa->key.tfm) {
+ if (IS_ERR(tx_sa->key.tfm)) {
free_percpu(tx_sa->stats);
- return -1;
+ return PTR_ERR(tx_sa->key.tfm);
}
tx_sa->active = false;
@@ -1623,6 +1637,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
unsigned char assoc_num;
struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
+ int err;
if (!attrs[MACSEC_ATTR_IFINDEX])
return -EINVAL;
@@ -1638,7 +1653,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
rtnl_lock();
rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
- if (IS_ERR(rx_sc) || !macsec_rxsc_get(rx_sc)) {
+ if (IS_ERR(rx_sc)) {
rtnl_unlock();
return PTR_ERR(rx_sc);
}
@@ -1659,13 +1674,19 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
}
rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL);
- if (!rx_sa || init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
- secy->key_len, secy->icv_len)) {
- kfree(rx_sa);
+ if (!rx_sa) {
rtnl_unlock();
return -ENOMEM;
}
+ err = init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
+ secy->key_len, secy->icv_len);
+ if (err < 0) {
+ kfree(rx_sa);
+ rtnl_unlock();
+ return err;
+ }
+
if (tb_sa[MACSEC_SA_ATTR_PN]) {
spin_lock_bh(&rx_sa->lock);
rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
@@ -1771,6 +1792,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
struct macsec_tx_sa *tx_sa;
unsigned char assoc_num;
struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
+ int err;
if (!attrs[MACSEC_ATTR_IFINDEX])
return -EINVAL;
@@ -1807,13 +1829,19 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
}
tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL);
- if (!tx_sa || init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
- secy->key_len, secy->icv_len)) {
- kfree(tx_sa);
+ if (!tx_sa) {
rtnl_unlock();
return -ENOMEM;
}
+ err = init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
+ secy->key_len, secy->icv_len);
+ if (err < 0) {
+ kfree(tx_sa);
+ rtnl_unlock();
+ return err;
+ }
+
nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
spin_lock_bh(&tx_sa->lock);
@@ -2672,15 +2700,24 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
#define MACSEC_FEATURES \
(NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
+static struct lock_class_key macsec_netdev_addr_lock_key;
+
static int macsec_dev_init(struct net_device *dev)
{
struct macsec_dev *macsec = macsec_priv(dev);
struct net_device *real_dev = macsec->real_dev;
+ int err;
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
+ err = gro_cells_init(&macsec->gro_cells, dev);
+ if (err) {
+ free_percpu(dev->tstats);
+ return err;
+ }
+
dev->features = real_dev->features & MACSEC_FEATURES;
dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
@@ -2699,6 +2736,9 @@ static int macsec_dev_init(struct net_device *dev)
static void macsec_dev_uninit(struct net_device *dev)
{
+ struct macsec_dev *macsec = macsec_priv(dev);
+
+ gro_cells_destroy(&macsec->gro_cells);
free_percpu(dev->tstats);
}
@@ -2708,8 +2748,9 @@ static netdev_features_t macsec_fix_features(struct net_device *dev,
struct macsec_dev *macsec = macsec_priv(dev);
struct net_device *real_dev = macsec->real_dev;
- features &= real_dev->features & MACSEC_FEATURES;
- features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
+ features &= (real_dev->features & MACSEC_FEATURES) |
+ NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES;
+ features |= NETIF_F_LLTX;
return features;
}
@@ -2872,6 +2913,13 @@ static int macsec_get_iflink(const struct net_device *dev)
return macsec_priv(dev)->real_dev->ifindex;
}
+
+static int macsec_get_nest_level(struct net_device *dev)
+{
+ return macsec_priv(dev)->nest_level;
+}
+
+
static const struct net_device_ops macsec_netdev_ops = {
.ndo_init = macsec_dev_init,
.ndo_uninit = macsec_dev_uninit,
@@ -2885,6 +2933,7 @@ static const struct net_device_ops macsec_netdev_ops = {
.ndo_start_xmit = macsec_start_xmit,
.ndo_get_stats64 = macsec_get_stats64,
.ndo_get_iflink = macsec_get_iflink,
+ .ndo_get_lock_subclass = macsec_get_nest_level,
};
static const struct device_type macsec_type = {
@@ -3009,22 +3058,31 @@ static void macsec_del_dev(struct macsec_dev *macsec)
}
}
+static void macsec_common_dellink(struct net_device *dev, struct list_head *head)
+{
+ struct macsec_dev *macsec = macsec_priv(dev);
+ struct net_device *real_dev = macsec->real_dev;
+
+ unregister_netdevice_queue(dev, head);
+ list_del_rcu(&macsec->secys);
+ macsec_del_dev(macsec);
+ netdev_upper_dev_unlink(real_dev, dev);
+
+ macsec_generation++;
+}
+
static void macsec_dellink(struct net_device *dev, struct list_head *head)
{
struct macsec_dev *macsec = macsec_priv(dev);
struct net_device *real_dev = macsec->real_dev;
struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
- macsec_generation++;
+ macsec_common_dellink(dev, head);
- unregister_netdevice_queue(dev, head);
- list_del_rcu(&macsec->secys);
if (list_empty(&rxd->secys)) {
netdev_rx_handler_unregister(real_dev);
kfree(rxd);
}
-
- macsec_del_dev(macsec);
}
static int register_macsec_dev(struct net_device *real_dev,
@@ -3141,6 +3199,18 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
if (err < 0)
return err;
+ dev_hold(real_dev);
+
+ macsec->nest_level = dev_get_nest_level(real_dev) + 1;
+ netdev_lockdep_set_classes(dev);
+ lockdep_set_class_and_subclass(&dev->addr_list_lock,
+ &macsec_netdev_addr_lock_key,
+ macsec_get_nest_level(dev));
+
+ err = netdev_upper_dev_link(real_dev, dev);
+ if (err < 0)
+ goto unregister;
+
/* need to be already registered so that ->init has run and
* the MAC addr is set
*/
@@ -3153,12 +3223,12 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
if (rx_handler && sci_exists(real_dev, sci)) {
err = -EBUSY;
- goto unregister;
+ goto unlink;
}
err = macsec_add_dev(dev, sci, icv_len);
if (err)
- goto unregister;
+ goto unlink;
if (data)
macsec_changelink_common(dev, data);
@@ -3169,12 +3239,12 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
macsec_generation++;
- dev_hold(real_dev);
-
return 0;
del_dev:
macsec_del_dev(macsec);
+unlink:
+ netdev_upper_dev_unlink(real_dev, dev);
unregister:
unregister_netdevice(dev);
return err;
@@ -3193,14 +3263,26 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[])
if (data[IFLA_MACSEC_CIPHER_SUITE])
csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]);
- if (data[IFLA_MACSEC_ICV_LEN])
+ if (data[IFLA_MACSEC_ICV_LEN]) {
icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
+ if (icv_len != DEFAULT_ICV_LEN) {
+ char dummy_key[DEFAULT_SAK_LEN] = { 0 };
+ struct crypto_aead *dummy_tfm;
+
+ dummy_tfm = macsec_alloc_tfm(dummy_key,
+ DEFAULT_SAK_LEN,
+ icv_len);
+ if (IS_ERR(dummy_tfm))
+ return PTR_ERR(dummy_tfm);
+ crypto_free_aead(dummy_tfm);
+ }
+ }
switch (csid) {
case MACSEC_DEFAULT_CIPHER_ID:
case MACSEC_DEFAULT_CIPHER_ALT:
if (icv_len < MACSEC_MIN_ICV_LEN ||
- icv_len > MACSEC_MAX_ICV_LEN)
+ icv_len > MACSEC_STD_ICV_LEN)
return -EINVAL;
break;
default:
@@ -3332,8 +3414,12 @@ static int macsec_notify(struct notifier_block *this, unsigned long event,
rxd = macsec_data_rtnl(real_dev);
list_for_each_entry_safe(m, n, &rxd->secys, secys) {
- macsec_dellink(m->secy.netdev, &head);
+ macsec_common_dellink(m->secy.netdev, &head);
}
+
+ netdev_rx_handler_unregister(real_dev);
+ kfree(rxd);
+
unregister_netdevice_many(&head);
break;
}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index cb01023ea..3234fcdea 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -49,6 +49,7 @@ struct macvlan_port {
bool passthru;
int count;
struct hlist_head vlan_source_hash[MACVLAN_HASH_SIZE];
+ DECLARE_BITMAP(mc_filter, MACVLAN_MC_FILTER_SZ);
};
struct macvlan_source_entry {
@@ -305,11 +306,14 @@ static void macvlan_process_broadcast(struct work_struct *w)
rcu_read_unlock();
+ if (src)
+ dev_put(src->dev);
kfree_skb(skb);
}
}
static void macvlan_broadcast_enqueue(struct macvlan_port *port,
+ const struct macvlan_dev *src,
struct sk_buff *skb)
{
struct sk_buff *nskb;
@@ -319,8 +323,12 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
if (!nskb)
goto err;
+ MACVLAN_SKB_CB(nskb)->src = src;
+
spin_lock(&port->bc_queue.lock);
if (skb_queue_len(&port->bc_queue) < MACVLAN_BC_QUEUE_LEN) {
+ if (src)
+ dev_hold(src->dev);
__skb_queue_tail(&port->bc_queue, nskb);
err = 0;
}
@@ -412,6 +420,8 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
port = macvlan_port_get_rcu(skb->dev);
if (is_multicast_ether_addr(eth->h_dest)) {
+ unsigned int hash;
+
skb = ip_check_defrag(dev_net(skb->dev), skb, IP_DEFRAG_MACVLAN);
if (!skb)
return RX_HANDLER_CONSUMED;
@@ -429,8 +439,9 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
goto out;
}
- MACVLAN_SKB_CB(skb)->src = src;
- macvlan_broadcast_enqueue(port, skb);
+ hash = mc_hash(NULL, eth->h_dest);
+ if (test_bit(hash, port->mc_filter))
+ macvlan_broadcast_enqueue(port, src, skb);
return RX_HANDLER_PASS;
}
@@ -716,12 +727,12 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change)
}
}
-static void macvlan_set_mac_lists(struct net_device *dev)
+static void macvlan_compute_filter(unsigned long *mc_filter,
+ struct net_device *dev,
+ struct macvlan_dev *vlan)
{
- struct macvlan_dev *vlan = netdev_priv(dev);
-
if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
- bitmap_fill(vlan->mc_filter, MACVLAN_MC_FILTER_SZ);
+ bitmap_fill(mc_filter, MACVLAN_MC_FILTER_SZ);
} else {
struct netdev_hw_addr *ha;
DECLARE_BITMAP(filter, MACVLAN_MC_FILTER_SZ);
@@ -733,10 +744,33 @@ static void macvlan_set_mac_lists(struct net_device *dev)
__set_bit(mc_hash(vlan, dev->broadcast), filter);
- bitmap_copy(vlan->mc_filter, filter, MACVLAN_MC_FILTER_SZ);
+ bitmap_copy(mc_filter, filter, MACVLAN_MC_FILTER_SZ);
}
+}
+
+static void macvlan_set_mac_lists(struct net_device *dev)
+{
+ struct macvlan_dev *vlan = netdev_priv(dev);
+
+ macvlan_compute_filter(vlan->mc_filter, dev, vlan);
+
dev_uc_sync(vlan->lowerdev, dev);
dev_mc_sync(vlan->lowerdev, dev);
+
+ /* This is slightly inaccurate as we're including the subscription
+ * list of vlan->lowerdev too.
+ *
+ * Bug alert: This only works if everyone has the same broadcast
+ * address as lowerdev. As soon as someone changes theirs this
+ * will break.
+ *
+ * However, this is already broken as when you change your broadcast
+ * address we don't get called.
+ *
+ * The solution is to maintain a list of broadcast addresses like
+ * we do for uc/mc, if you care.
+ */
+ macvlan_compute_filter(vlan->port->mc_filter, vlan->lowerdev, NULL);
}
static int macvlan_change_mtu(struct net_device *dev, int new_mtu)
@@ -754,7 +788,6 @@ static int macvlan_change_mtu(struct net_device *dev, int new_mtu)
* "super class" of normal network devices; split their locks off into a
* separate class since they always nest.
*/
-static struct lock_class_key macvlan_netdev_xmit_lock_key;
static struct lock_class_key macvlan_netdev_addr_lock_key;
#define ALWAYS_ON_FEATURES \
@@ -775,20 +808,12 @@ static int macvlan_get_nest_level(struct net_device *dev)
return ((struct macvlan_dev *)netdev_priv(dev))->nest_level;
}
-static void macvlan_set_lockdep_class_one(struct net_device *dev,
- struct netdev_queue *txq,
- void *_unused)
-{
- lockdep_set_class(&txq->_xmit_lock,
- &macvlan_netdev_xmit_lock_key);
-}
-
static void macvlan_set_lockdep_class(struct net_device *dev)
{
+ netdev_lockdep_set_classes(dev);
lockdep_set_class_and_subclass(&dev->addr_list_lock,
&macvlan_netdev_addr_lock_key,
macvlan_get_nest_level(dev));
- netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL);
}
static int macvlan_init(struct net_device *dev)
@@ -1290,7 +1315,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
vlan->dev = dev;
vlan->port = port;
vlan->set_features = MACVLAN_FEATURES;
- vlan->nest_level = dev_get_nest_level(lowerdev, netif_is_macvlan) + 1;
+ vlan->nest_level = dev_get_nest_level(lowerdev) + 1;
vlan->mode = MACVLAN_MODE_VEPA;
if (data && data[IFLA_MACVLAN_MODE])
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index bd6720962..070e3290a 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -21,6 +21,7 @@
#include <net/rtnetlink.h>
#include <net/sock.h>
#include <linux/virtio_net.h>
+#include <linux/skb_array.h>
/*
* A macvtap queue is the central object of this driver, it connects
@@ -43,6 +44,7 @@ struct macvtap_queue {
u16 queue_index;
bool enabled;
struct list_head next;
+ struct skb_array skb_array;
};
#define MACVTAP_FEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE)
@@ -299,6 +301,9 @@ static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
if (!numvtaps)
goto out;
+ if (numvtaps == 1)
+ goto single;
+
/* Check if we can use flow to select a queue */
rxq = skb_get_hash(skb);
if (rxq) {
@@ -316,6 +321,7 @@ static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
goto out;
}
+single:
tap = rcu_dereference(vlan->taps[0]);
out:
return tap;
@@ -362,7 +368,7 @@ static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb)
if (!q)
return RX_HANDLER_PASS;
- if (skb_queue_len(&q->sk.sk_receive_queue) >= dev->tx_queue_len)
+ if (__skb_array_full(&q->skb_array))
goto drop;
skb_push(skb, ETH_HLEN);
@@ -380,7 +386,8 @@ static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb)
goto drop;
if (!segs) {
- skb_queue_tail(&q->sk.sk_receive_queue, skb);
+ if (skb_array_produce(&q->skb_array, skb))
+ goto drop;
goto wake_up;
}
@@ -389,7 +396,11 @@ static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb)
struct sk_buff *nskb = segs->next;
segs->next = NULL;
- skb_queue_tail(&q->sk.sk_receive_queue, segs);
+ if (skb_array_produce(&q->skb_array, segs)) {
+ kfree_skb(segs);
+ kfree_skb_list(nskb);
+ break;
+ }
segs = nskb;
}
} else {
@@ -402,7 +413,8 @@ static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb)
!(features & NETIF_F_CSUM_MASK) &&
skb_checksum_help(skb))
goto drop;
- skb_queue_tail(&q->sk.sk_receive_queue, skb);
+ if (skb_array_produce(&q->skb_array, skb))
+ goto drop;
}
wake_up:
@@ -519,7 +531,9 @@ static void macvtap_sock_write_space(struct sock *sk)
static void macvtap_sock_destruct(struct sock *sk)
{
- skb_queue_purge(&sk->sk_receive_queue);
+ struct macvtap_queue *q = container_of(sk, struct macvtap_queue, sk);
+
+ skb_array_cleanup(&q->skb_array);
}
static int macvtap_open(struct inode *inode, struct file *file)
@@ -532,13 +546,13 @@ static int macvtap_open(struct inode *inode, struct file *file)
rtnl_lock();
dev = dev_get_by_macvtap_minor(iminor(inode));
if (!dev)
- goto out;
+ goto err;
err = -ENOMEM;
q = (struct macvtap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
&macvtap_proto, 0);
if (!q)
- goto out;
+ goto err;
RCU_INIT_POINTER(q->sock.wq, &q->wq);
init_waitqueue_head(&q->wq.wait);
@@ -562,11 +576,24 @@ static int macvtap_open(struct inode *inode, struct file *file)
if ((dev->features & NETIF_F_HIGHDMA) && (dev->features & NETIF_F_SG))
sock_set_flag(&q->sk, SOCK_ZEROCOPY);
+ err = -ENOMEM;
+ if (skb_array_init(&q->skb_array, dev->tx_queue_len, GFP_KERNEL))
+ goto err_array;
+
err = macvtap_set_queue(dev, file, q);
if (err)
- sock_put(&q->sk);
+ goto err_queue;
-out:
+ dev_put(dev);
+
+ rtnl_unlock();
+ return err;
+
+err_queue:
+ skb_array_cleanup(&q->skb_array);
+err_array:
+ sock_put(&q->sk);
+err:
if (dev)
dev_put(dev);
@@ -592,7 +619,7 @@ static unsigned int macvtap_poll(struct file *file, poll_table * wait)
mask = 0;
poll_wait(file, &q->wq.wait, wait);
- if (!skb_queue_empty(&q->sk.sk_receive_queue))
+ if (!skb_array_empty(&q->skb_array))
mask |= POLLIN | POLLRDNORM;
if (sock_writeable(&q->sk) ||
@@ -627,93 +654,6 @@ static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad,
return skb;
}
-/*
- * macvtap_skb_from_vnet_hdr and macvtap_skb_to_vnet_hdr should
- * be shared with the tun/tap driver.
- */
-static int macvtap_skb_from_vnet_hdr(struct macvtap_queue *q,
- struct sk_buff *skb,
- struct virtio_net_hdr *vnet_hdr)
-{
- unsigned short gso_type = 0;
- if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
- switch (vnet_hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
- case VIRTIO_NET_HDR_GSO_TCPV4:
- gso_type = SKB_GSO_TCPV4;
- break;
- case VIRTIO_NET_HDR_GSO_TCPV6:
- gso_type = SKB_GSO_TCPV6;
- break;
- case VIRTIO_NET_HDR_GSO_UDP:
- gso_type = SKB_GSO_UDP;
- break;
- default:
- return -EINVAL;
- }
-
- if (vnet_hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN)
- gso_type |= SKB_GSO_TCP_ECN;
-
- if (vnet_hdr->gso_size == 0)
- return -EINVAL;
- }
-
- if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
- if (!skb_partial_csum_set(skb, macvtap16_to_cpu(q, vnet_hdr->csum_start),
- macvtap16_to_cpu(q, vnet_hdr->csum_offset)))
- return -EINVAL;
- }
-
- if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
- skb_shinfo(skb)->gso_size = macvtap16_to_cpu(q, vnet_hdr->gso_size);
- skb_shinfo(skb)->gso_type = gso_type;
-
- /* Header must be checked, and gso_segs computed. */
- skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
- skb_shinfo(skb)->gso_segs = 0;
- }
- return 0;
-}
-
-static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q,
- const struct sk_buff *skb,
- struct virtio_net_hdr *vnet_hdr)
-{
- memset(vnet_hdr, 0, sizeof(*vnet_hdr));
-
- if (skb_is_gso(skb)) {
- struct skb_shared_info *sinfo = skb_shinfo(skb);
-
- /* This is a hint as to how much should be linear. */
- vnet_hdr->hdr_len = cpu_to_macvtap16(q, skb_headlen(skb));
- vnet_hdr->gso_size = cpu_to_macvtap16(q, sinfo->gso_size);
- if (sinfo->gso_type & SKB_GSO_TCPV4)
- vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
- else if (sinfo->gso_type & SKB_GSO_TCPV6)
- vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
- else if (sinfo->gso_type & SKB_GSO_UDP)
- vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
- else
- BUG();
- if (sinfo->gso_type & SKB_GSO_TCP_ECN)
- vnet_hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
- } else
- vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
-
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
- if (skb_vlan_tag_present(skb))
- vnet_hdr->csum_start = cpu_to_macvtap16(q,
- skb_checksum_start_offset(skb) + VLAN_HLEN);
- else
- vnet_hdr->csum_start = cpu_to_macvtap16(q,
- skb_checksum_start_offset(skb));
- vnet_hdr->csum_offset = cpu_to_macvtap16(q, skb->csum_offset);
- } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
- vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
- } /* else everything is zero */
-}
-
/* Neighbour code has some assumptions on HH_DATA_MOD alignment */
#define MACVTAP_RESERVE HH_DATA_OFF(ETH_HLEN)
@@ -812,7 +752,8 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
skb->protocol = eth_hdr(skb)->h_proto;
if (vnet_hdr_len) {
- err = macvtap_skb_from_vnet_hdr(q, skb, &vnet_hdr);
+ err = virtio_net_hdr_to_skb(skb, &vnet_hdr,
+ macvtap_is_little_endian(q));
if (err)
goto err_kfree;
}
@@ -880,7 +821,10 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
if (iov_iter_count(iter) < vnet_hdr_len)
return -EINVAL;
- macvtap_skb_to_vnet_hdr(q, skb, &vnet_hdr);
+ ret = virtio_net_hdr_from_skb(skb, &vnet_hdr,
+ macvtap_is_little_endian(q));
+ if (ret)
+ BUG();
if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
sizeof(vnet_hdr))
@@ -935,7 +879,7 @@ static ssize_t macvtap_do_read(struct macvtap_queue *q,
TASK_INTERRUPTIBLE);
/* Read frames from the queue */
- skb = skb_dequeue(&q->sk.sk_receive_queue);
+ skb = skb_array_consume(&q->skb_array);
if (skb)
break;
if (noblock) {
@@ -1259,10 +1203,18 @@ static int macvtap_recvmsg(struct socket *sock, struct msghdr *m,
return ret;
}
+static int macvtap_peek_len(struct socket *sock)
+{
+ struct macvtap_queue *q = container_of(sock, struct macvtap_queue,
+ sock);
+ return skb_array_peek_len(&q->skb_array);
+}
+
/* Ops structure to mimic raw sockets with tun */
static const struct proto_ops macvtap_socket_ops = {
.sendmsg = macvtap_sendmsg,
.recvmsg = macvtap_recvmsg,
+ .peek_len = macvtap_peek_len,
};
/* Get an underlying socket object from tun file. Returns error unless file is
@@ -1281,6 +1233,28 @@ struct socket *macvtap_get_socket(struct file *file)
}
EXPORT_SYMBOL_GPL(macvtap_get_socket);
+static int macvtap_queue_resize(struct macvlan_dev *vlan)
+{
+ struct net_device *dev = vlan->dev;
+ struct macvtap_queue *q;
+ struct skb_array **arrays;
+ int n = vlan->numqueues;
+ int ret, i = 0;
+
+ arrays = kmalloc(sizeof *arrays * n, GFP_KERNEL);
+ if (!arrays)
+ return -ENOMEM;
+
+ list_for_each_entry(q, &vlan->queue_list, next)
+ arrays[i++] = &q->skb_array;
+
+ ret = skb_array_resize_multiple(arrays, n,
+ dev->tx_queue_len, GFP_KERNEL);
+
+ kfree(arrays);
+ return ret;
+}
+
static int macvtap_device_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
@@ -1328,6 +1302,10 @@ static int macvtap_device_event(struct notifier_block *unused,
device_destroy(&macvtap_class, devt);
macvtap_free_minor(vlan);
break;
+ case NETDEV_CHANGE_TX_QUEUE_LEN:
+ if (macvtap_queue_resize(vlan))
+ return NOTIFY_BAD;
+ break;
}
return NOTIFY_DONE;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 6dad9a9c3..b4863e4e5 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -12,6 +12,9 @@ menuconfig PHYLIB
if PHYLIB
+config SWPHY
+ bool
+
comment "MII PHY device drivers"
config AQUANTIA_PHY
@@ -159,6 +162,7 @@ config MICROCHIP_PHY
config FIXED_PHY
tristate "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
depends on PHYLIB
+ select SWPHY
---help---
Adds the platform "fixed" MDIO Bus to cover the boards that use
PHYs that are not connected to the real MDIO bus.
@@ -254,6 +258,17 @@ config MDIO_BUS_MUX_MMIOREG
Currently, only 8-bit registers are supported.
+config MDIO_BUS_MUX_BCM_IPROC
+ tristate "Support for iProc based MDIO bus multiplexers"
+ depends on OF && OF_MDIO && (ARCH_BCM_IPROC || COMPILE_TEST)
+ select MDIO_BUS_MUX
+ default ARCH_BCM_IPROC
+ help
+ This module provides a driver for MDIO bus multiplexers found in
+ iProc based Broadcom SoCs. This multiplexer connects one of several
+ child MDIO bus to a parent bus. Buses could be internal as well as
+ external and selection logic lies inside the same multiplexer.
+
config MDIO_BCM_UNIMAC
tristate "Broadcom UniMAC MDIO bus controller"
depends on HAS_IOMEM
@@ -271,6 +286,28 @@ config MDIO_BCM_IPROC
This module provides a driver for the MDIO busses found in the
Broadcom iProc SoC's.
+config INTEL_XWAY_PHY
+ tristate "Driver for Intel XWAY PHYs"
+ ---help---
+ Supports the Intel XWAY (former Lantiq) 11G and 22E PHYs.
+ These PHYs are marked as standalone chips under the names
+ PEF 7061, PEF 7071 and PEF 7072 or integrated into the Intel
+ SoCs xRX200, xRX300, xRX330, xRX350 and xRX550.
+
+config MDIO_HISI_FEMAC
+ tristate "Hisilicon FEMAC MDIO bus controller"
+ depends on HAS_IOMEM && OF_MDIO
+ help
+ This module provides a driver for the MDIO busses found in the
+ Hisilicon SoC that have an Fast Ethernet MAC.
+
+config MDIO_XGENE
+ tristate "APM X-Gene SoC MDIO bus controller"
+ depends on ARCH_XGENE || COMPILE_TEST
+ help
+ This module provides a driver for the MDIO busses found in the
+ APM X-Gene SoC's.
+
endif # PHYLIB
config MICREL_KS8995MA
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index fcdbb9299..534dfa74d 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -1,6 +1,7 @@
# Makefile for Linux PHY drivers
-libphy-objs := phy.o phy_device.o mdio_bus.o mdio_device.o
+libphy-y := phy.o phy_device.o mdio_bus.o mdio_device.o
+libphy-$(CONFIG_SWPHY) += swphy.o
obj-$(CONFIG_PHYLIB) += libphy.o
obj-$(CONFIG_AQUANTIA_PHY) += aquantia.o
@@ -39,8 +40,12 @@ obj-$(CONFIG_AMD_PHY) += amd.o
obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o
obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
+obj-$(CONFIG_MDIO_BUS_MUX_BCM_IPROC) += mdio-mux-bcm-iproc.o
obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o
obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o
obj-$(CONFIG_MDIO_BCM_UNIMAC) += mdio-bcm-unimac.o
obj-$(CONFIG_MICROCHIP_PHY) += microchip.o
obj-$(CONFIG_MDIO_BCM_IPROC) += mdio-bcm-iproc.o
+obj-$(CONFIG_INTEL_XWAY_PHY) += intel-xway.o
+obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o
+obj-$(CONFIG_MDIO_XGENE) += mdio-xgene.o
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index 9ec7f7353..c649c101b 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -23,9 +23,10 @@
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/gpio.h>
+#include <linux/seqlock.h>
#include <linux/idr.h>
-#define MII_REGS_NUM 29
+#include "swphy.h"
struct fixed_mdio_bus {
struct mii_bus *mii_bus;
@@ -34,8 +35,8 @@ struct fixed_mdio_bus {
struct fixed_phy {
int addr;
- u16 regs[MII_REGS_NUM];
struct phy_device *phydev;
+ seqcount_t seqcount;
struct fixed_phy_status status;
int (*link_update)(struct net_device *, struct fixed_phy_status *);
struct list_head node;
@@ -47,103 +48,10 @@ static struct fixed_mdio_bus platform_fmb = {
.phys = LIST_HEAD_INIT(platform_fmb.phys),
};
-static int fixed_phy_update_regs(struct fixed_phy *fp)
+static void fixed_phy_update(struct fixed_phy *fp)
{
- u16 bmsr = BMSR_ANEGCAPABLE;
- u16 bmcr = 0;
- u16 lpagb = 0;
- u16 lpa = 0;
-
if (gpio_is_valid(fp->link_gpio))
fp->status.link = !!gpio_get_value_cansleep(fp->link_gpio);
-
- if (fp->status.duplex) {
- switch (fp->status.speed) {
- case 1000:
- bmsr |= BMSR_ESTATEN;
- break;
- case 100:
- bmsr |= BMSR_100FULL;
- break;
- case 10:
- bmsr |= BMSR_10FULL;
- break;
- default:
- break;
- }
- } else {
- switch (fp->status.speed) {
- case 1000:
- bmsr |= BMSR_ESTATEN;
- break;
- case 100:
- bmsr |= BMSR_100HALF;
- break;
- case 10:
- bmsr |= BMSR_10HALF;
- break;
- default:
- break;
- }
- }
-
- if (fp->status.link) {
- bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE;
-
- if (fp->status.duplex) {
- bmcr |= BMCR_FULLDPLX;
-
- switch (fp->status.speed) {
- case 1000:
- bmcr |= BMCR_SPEED1000;
- lpagb |= LPA_1000FULL;
- break;
- case 100:
- bmcr |= BMCR_SPEED100;
- lpa |= LPA_100FULL;
- break;
- case 10:
- lpa |= LPA_10FULL;
- break;
- default:
- pr_warn("fixed phy: unknown speed\n");
- return -EINVAL;
- }
- } else {
- switch (fp->status.speed) {
- case 1000:
- bmcr |= BMCR_SPEED1000;
- lpagb |= LPA_1000HALF;
- break;
- case 100:
- bmcr |= BMCR_SPEED100;
- lpa |= LPA_100HALF;
- break;
- case 10:
- lpa |= LPA_10HALF;
- break;
- default:
- pr_warn("fixed phy: unknown speed\n");
- return -EINVAL;
- }
- }
-
- if (fp->status.pause)
- lpa |= LPA_PAUSE_CAP;
-
- if (fp->status.asym_pause)
- lpa |= LPA_PAUSE_ASYM;
- }
-
- fp->regs[MII_PHYSID1] = 0;
- fp->regs[MII_PHYSID2] = 0;
-
- fp->regs[MII_BMSR] = bmsr;
- fp->regs[MII_BMCR] = bmcr;
- fp->regs[MII_LPA] = lpa;
- fp->regs[MII_STAT1000] = lpagb;
-
- return 0;
}
static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num)
@@ -151,29 +59,23 @@ static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num)
struct fixed_mdio_bus *fmb = bus->priv;
struct fixed_phy *fp;
- if (reg_num >= MII_REGS_NUM)
- return -1;
-
- /* We do not support emulating Clause 45 over Clause 22 register reads
- * return an error instead of bogus data.
- */
- switch (reg_num) {
- case MII_MMD_CTRL:
- case MII_MMD_DATA:
- return -1;
- default:
- break;
- }
-
list_for_each_entry(fp, &fmb->phys, node) {
if (fp->addr == phy_addr) {
- /* Issue callback if user registered it. */
- if (fp->link_update) {
- fp->link_update(fp->phydev->attached_dev,
- &fp->status);
- fixed_phy_update_regs(fp);
- }
- return fp->regs[reg_num];
+ struct fixed_phy_status state;
+ int s;
+
+ do {
+ s = read_seqcount_begin(&fp->seqcount);
+ /* Issue callback if user registered it. */
+ if (fp->link_update) {
+ fp->link_update(fp->phydev->attached_dev,
+ &fp->status);
+ fixed_phy_update(fp);
+ }
+ state = fp->status;
+ } while (read_seqcount_retry(&fp->seqcount, s));
+
+ return swphy_read_reg(reg_num, &state);
}
}
@@ -225,6 +127,7 @@ int fixed_phy_update_state(struct phy_device *phydev,
list_for_each_entry(fp, &fmb->phys, node) {
if (fp->addr == phydev->mdio.addr) {
+ write_seqcount_begin(&fp->seqcount);
#define _UPD(x) if (changed->x) \
fp->status.x = status->x
_UPD(link);
@@ -233,7 +136,8 @@ int fixed_phy_update_state(struct phy_device *phydev,
_UPD(pause);
_UPD(asym_pause);
#undef _UPD
- fixed_phy_update_regs(fp);
+ fixed_phy_update(fp);
+ write_seqcount_end(&fp->seqcount);
return 0;
}
}
@@ -250,11 +154,15 @@ int fixed_phy_add(unsigned int irq, int phy_addr,
struct fixed_mdio_bus *fmb = &platform_fmb;
struct fixed_phy *fp;
+ ret = swphy_validate_state(status);
+ if (ret < 0)
+ return ret;
+
fp = kzalloc(sizeof(*fp), GFP_KERNEL);
if (!fp)
return -ENOMEM;
- memset(fp->regs, 0xFF, sizeof(fp->regs[0]) * MII_REGS_NUM);
+ seqcount_init(&fp->seqcount);
if (irq != PHY_POLL)
fmb->mii_bus->irq[phy_addr] = irq;
@@ -270,17 +178,12 @@ int fixed_phy_add(unsigned int irq, int phy_addr,
goto err_regs;
}
- ret = fixed_phy_update_regs(fp);
- if (ret)
- goto err_gpio;
+ fixed_phy_update(fp);
list_add_tail(&fp->node, &fmb->phys);
return 0;
-err_gpio:
- if (gpio_is_valid(fp->link_gpio))
- gpio_free(fp->link_gpio);
err_regs:
kfree(fp);
return ret;
diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c
new file mode 100644
index 000000000..c300ab558
--- /dev/null
+++ b/drivers/net/phy/intel-xway.c
@@ -0,0 +1,376 @@
+/*
+ * Copyright (C) 2012 Daniel Schwierzeck <daniel.schwierzeck@googlemail.com>
+ * Copyright (C) 2016 Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/mdio.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+
+#define XWAY_MDIO_IMASK 0x19 /* interrupt mask */
+#define XWAY_MDIO_ISTAT 0x1A /* interrupt status */
+
+#define XWAY_MDIO_INIT_WOL BIT(15) /* Wake-On-LAN */
+#define XWAY_MDIO_INIT_MSRE BIT(14)
+#define XWAY_MDIO_INIT_NPRX BIT(13)
+#define XWAY_MDIO_INIT_NPTX BIT(12)
+#define XWAY_MDIO_INIT_ANE BIT(11) /* Auto-Neg error */
+#define XWAY_MDIO_INIT_ANC BIT(10) /* Auto-Neg complete */
+#define XWAY_MDIO_INIT_ADSC BIT(5) /* Link auto-downspeed detect */
+#define XWAY_MDIO_INIT_MPIPC BIT(4)
+#define XWAY_MDIO_INIT_MDIXC BIT(3)
+#define XWAY_MDIO_INIT_DXMC BIT(2) /* Duplex mode change */
+#define XWAY_MDIO_INIT_LSPC BIT(1) /* Link speed change */
+#define XWAY_MDIO_INIT_LSTC BIT(0) /* Link state change */
+#define XWAY_MDIO_INIT_MASK (XWAY_MDIO_INIT_LSTC | \
+ XWAY_MDIO_INIT_ADSC)
+
+#define ADVERTISED_MPD BIT(10) /* Multi-port device */
+
+/* LED Configuration */
+#define XWAY_MMD_LEDCH 0x01E0
+/* Inverse of SCAN Function */
+#define XWAY_MMD_LEDCH_NACS_NONE 0x0000
+#define XWAY_MMD_LEDCH_NACS_LINK 0x0001
+#define XWAY_MMD_LEDCH_NACS_PDOWN 0x0002
+#define XWAY_MMD_LEDCH_NACS_EEE 0x0003
+#define XWAY_MMD_LEDCH_NACS_ANEG 0x0004
+#define XWAY_MMD_LEDCH_NACS_ABIST 0x0005
+#define XWAY_MMD_LEDCH_NACS_CDIAG 0x0006
+#define XWAY_MMD_LEDCH_NACS_TEST 0x0007
+/* Slow Blink Frequency */
+#define XWAY_MMD_LEDCH_SBF_F02HZ 0x0000
+#define XWAY_MMD_LEDCH_SBF_F04HZ 0x0010
+#define XWAY_MMD_LEDCH_SBF_F08HZ 0x0020
+#define XWAY_MMD_LEDCH_SBF_F16HZ 0x0030
+/* Fast Blink Frequency */
+#define XWAY_MMD_LEDCH_FBF_F02HZ 0x0000
+#define XWAY_MMD_LEDCH_FBF_F04HZ 0x0040
+#define XWAY_MMD_LEDCH_FBF_F08HZ 0x0080
+#define XWAY_MMD_LEDCH_FBF_F16HZ 0x00C0
+/* LED Configuration */
+#define XWAY_MMD_LEDCL 0x01E1
+/* Complex Blinking Configuration */
+#define XWAY_MMD_LEDCH_CBLINK_NONE 0x0000
+#define XWAY_MMD_LEDCH_CBLINK_LINK 0x0001
+#define XWAY_MMD_LEDCH_CBLINK_PDOWN 0x0002
+#define XWAY_MMD_LEDCH_CBLINK_EEE 0x0003
+#define XWAY_MMD_LEDCH_CBLINK_ANEG 0x0004
+#define XWAY_MMD_LEDCH_CBLINK_ABIST 0x0005
+#define XWAY_MMD_LEDCH_CBLINK_CDIAG 0x0006
+#define XWAY_MMD_LEDCH_CBLINK_TEST 0x0007
+/* Complex SCAN Configuration */
+#define XWAY_MMD_LEDCH_SCAN_NONE 0x0000
+#define XWAY_MMD_LEDCH_SCAN_LINK 0x0010
+#define XWAY_MMD_LEDCH_SCAN_PDOWN 0x0020
+#define XWAY_MMD_LEDCH_SCAN_EEE 0x0030
+#define XWAY_MMD_LEDCH_SCAN_ANEG 0x0040
+#define XWAY_MMD_LEDCH_SCAN_ABIST 0x0050
+#define XWAY_MMD_LEDCH_SCAN_CDIAG 0x0060
+#define XWAY_MMD_LEDCH_SCAN_TEST 0x0070
+/* Configuration for LED Pin x */
+#define XWAY_MMD_LED0H 0x01E2
+/* Fast Blinking Configuration */
+#define XWAY_MMD_LEDxH_BLINKF_MASK 0x000F
+#define XWAY_MMD_LEDxH_BLINKF_NONE 0x0000
+#define XWAY_MMD_LEDxH_BLINKF_LINK10 0x0001
+#define XWAY_MMD_LEDxH_BLINKF_LINK100 0x0002
+#define XWAY_MMD_LEDxH_BLINKF_LINK10X 0x0003
+#define XWAY_MMD_LEDxH_BLINKF_LINK1000 0x0004
+#define XWAY_MMD_LEDxH_BLINKF_LINK10_0 0x0005
+#define XWAY_MMD_LEDxH_BLINKF_LINK100X 0x0006
+#define XWAY_MMD_LEDxH_BLINKF_LINK10XX 0x0007
+#define XWAY_MMD_LEDxH_BLINKF_PDOWN 0x0008
+#define XWAY_MMD_LEDxH_BLINKF_EEE 0x0009
+#define XWAY_MMD_LEDxH_BLINKF_ANEG 0x000A
+#define XWAY_MMD_LEDxH_BLINKF_ABIST 0x000B
+#define XWAY_MMD_LEDxH_BLINKF_CDIAG 0x000C
+/* Constant On Configuration */
+#define XWAY_MMD_LEDxH_CON_MASK 0x00F0
+#define XWAY_MMD_LEDxH_CON_NONE 0x0000
+#define XWAY_MMD_LEDxH_CON_LINK10 0x0010
+#define XWAY_MMD_LEDxH_CON_LINK100 0x0020
+#define XWAY_MMD_LEDxH_CON_LINK10X 0x0030
+#define XWAY_MMD_LEDxH_CON_LINK1000 0x0040
+#define XWAY_MMD_LEDxH_CON_LINK10_0 0x0050
+#define XWAY_MMD_LEDxH_CON_LINK100X 0x0060
+#define XWAY_MMD_LEDxH_CON_LINK10XX 0x0070
+#define XWAY_MMD_LEDxH_CON_PDOWN 0x0080
+#define XWAY_MMD_LEDxH_CON_EEE 0x0090
+#define XWAY_MMD_LEDxH_CON_ANEG 0x00A0
+#define XWAY_MMD_LEDxH_CON_ABIST 0x00B0
+#define XWAY_MMD_LEDxH_CON_CDIAG 0x00C0
+#define XWAY_MMD_LEDxH_CON_COPPER 0x00D0
+#define XWAY_MMD_LEDxH_CON_FIBER 0x00E0
+/* Configuration for LED Pin x */
+#define XWAY_MMD_LED0L 0x01E3
+/* Pulsing Configuration */
+#define XWAY_MMD_LEDxL_PULSE_MASK 0x000F
+#define XWAY_MMD_LEDxL_PULSE_NONE 0x0000
+#define XWAY_MMD_LEDxL_PULSE_TXACT 0x0001
+#define XWAY_MMD_LEDxL_PULSE_RXACT 0x0002
+#define XWAY_MMD_LEDxL_PULSE_COL 0x0004
+/* Slow Blinking Configuration */
+#define XWAY_MMD_LEDxL_BLINKS_MASK 0x00F0
+#define XWAY_MMD_LEDxL_BLINKS_NONE 0x0000
+#define XWAY_MMD_LEDxL_BLINKS_LINK10 0x0010
+#define XWAY_MMD_LEDxL_BLINKS_LINK100 0x0020
+#define XWAY_MMD_LEDxL_BLINKS_LINK10X 0x0030
+#define XWAY_MMD_LEDxL_BLINKS_LINK1000 0x0040
+#define XWAY_MMD_LEDxL_BLINKS_LINK10_0 0x0050
+#define XWAY_MMD_LEDxL_BLINKS_LINK100X 0x0060
+#define XWAY_MMD_LEDxL_BLINKS_LINK10XX 0x0070
+#define XWAY_MMD_LEDxL_BLINKS_PDOWN 0x0080
+#define XWAY_MMD_LEDxL_BLINKS_EEE 0x0090
+#define XWAY_MMD_LEDxL_BLINKS_ANEG 0x00A0
+#define XWAY_MMD_LEDxL_BLINKS_ABIST 0x00B0
+#define XWAY_MMD_LEDxL_BLINKS_CDIAG 0x00C0
+#define XWAY_MMD_LED1H 0x01E4
+#define XWAY_MMD_LED1L 0x01E5
+#define XWAY_MMD_LED2H 0x01E6
+#define XWAY_MMD_LED2L 0x01E7
+#define XWAY_MMD_LED3H 0x01E8
+#define XWAY_MMD_LED3L 0x01E9
+
+#define PHY_ID_PHY11G_1_3 0x030260D1
+#define PHY_ID_PHY22F_1_3 0x030260E1
+#define PHY_ID_PHY11G_1_4 0xD565A400
+#define PHY_ID_PHY22F_1_4 0xD565A410
+#define PHY_ID_PHY11G_1_5 0xD565A401
+#define PHY_ID_PHY22F_1_5 0xD565A411
+#define PHY_ID_PHY11G_VR9 0xD565A409
+#define PHY_ID_PHY22F_VR9 0xD565A419
+
+static int xway_gphy_config_init(struct phy_device *phydev)
+{
+ int err;
+ u32 ledxh;
+ u32 ledxl;
+
+ /* Mask all interrupts */
+ err = phy_write(phydev, XWAY_MDIO_IMASK, 0);
+ if (err)
+ return err;
+
+ /* Clear all pending interrupts */
+ phy_read(phydev, XWAY_MDIO_ISTAT);
+
+ phy_write_mmd_indirect(phydev, XWAY_MMD_LEDCH, MDIO_MMD_VEND2,
+ XWAY_MMD_LEDCH_NACS_NONE |
+ XWAY_MMD_LEDCH_SBF_F02HZ |
+ XWAY_MMD_LEDCH_FBF_F16HZ);
+ phy_write_mmd_indirect(phydev, XWAY_MMD_LEDCL, MDIO_MMD_VEND2,
+ XWAY_MMD_LEDCH_CBLINK_NONE |
+ XWAY_MMD_LEDCH_SCAN_NONE);
+
+ /**
+ * In most cases only one LED is connected to this phy, so
+ * configure them all to constant on and pulse mode. LED3 is
+ * only available in some packages, leave it in its reset
+ * configuration.
+ */
+ ledxh = XWAY_MMD_LEDxH_BLINKF_NONE | XWAY_MMD_LEDxH_CON_LINK10XX;
+ ledxl = XWAY_MMD_LEDxL_PULSE_TXACT | XWAY_MMD_LEDxL_PULSE_RXACT |
+ XWAY_MMD_LEDxL_BLINKS_NONE;
+ phy_write_mmd_indirect(phydev, XWAY_MMD_LED0H, MDIO_MMD_VEND2, ledxh);
+ phy_write_mmd_indirect(phydev, XWAY_MMD_LED0L, MDIO_MMD_VEND2, ledxl);
+ phy_write_mmd_indirect(phydev, XWAY_MMD_LED1H, MDIO_MMD_VEND2, ledxh);
+ phy_write_mmd_indirect(phydev, XWAY_MMD_LED1L, MDIO_MMD_VEND2, ledxl);
+ phy_write_mmd_indirect(phydev, XWAY_MMD_LED2H, MDIO_MMD_VEND2, ledxh);
+ phy_write_mmd_indirect(phydev, XWAY_MMD_LED2L, MDIO_MMD_VEND2, ledxl);
+
+ return 0;
+}
+
+static int xway_gphy14_config_aneg(struct phy_device *phydev)
+{
+ int reg, err;
+
+ /* Advertise as multi-port device, see IEEE802.3-2002 40.5.1.1 */
+ /* This is a workaround for an errata in rev < 1.5 devices */
+ reg = phy_read(phydev, MII_CTRL1000);
+ reg |= ADVERTISED_MPD;
+ err = phy_write(phydev, MII_CTRL1000, reg);
+ if (err)
+ return err;
+
+ return genphy_config_aneg(phydev);
+}
+
+static int xway_gphy_ack_interrupt(struct phy_device *phydev)
+{
+ int reg;
+
+ reg = phy_read(phydev, XWAY_MDIO_ISTAT);
+ return (reg < 0) ? reg : 0;
+}
+
+static int xway_gphy_did_interrupt(struct phy_device *phydev)
+{
+ int reg;
+
+ reg = phy_read(phydev, XWAY_MDIO_ISTAT);
+ return reg & XWAY_MDIO_INIT_MASK;
+}
+
+static int xway_gphy_config_intr(struct phy_device *phydev)
+{
+ u16 mask = 0;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ mask = XWAY_MDIO_INIT_MASK;
+
+ return phy_write(phydev, XWAY_MDIO_IMASK, mask);
+}
+
+static struct phy_driver xway_gphy[] = {
+ {
+ .phy_id = PHY_ID_PHY11G_1_3,
+ .phy_id_mask = 0xffffffff,
+ .name = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.3",
+ .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = xway_gphy_config_init,
+ .config_aneg = xway_gphy14_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = xway_gphy_ack_interrupt,
+ .did_interrupt = xway_gphy_did_interrupt,
+ .config_intr = xway_gphy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ }, {
+ .phy_id = PHY_ID_PHY22F_1_3,
+ .phy_id_mask = 0xffffffff,
+ .name = "Intel XWAY PHY22F (PEF 7061) v1.3",
+ .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = xway_gphy_config_init,
+ .config_aneg = xway_gphy14_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = xway_gphy_ack_interrupt,
+ .did_interrupt = xway_gphy_did_interrupt,
+ .config_intr = xway_gphy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ }, {
+ .phy_id = PHY_ID_PHY11G_1_4,
+ .phy_id_mask = 0xffffffff,
+ .name = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.4",
+ .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = xway_gphy_config_init,
+ .config_aneg = xway_gphy14_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = xway_gphy_ack_interrupt,
+ .did_interrupt = xway_gphy_did_interrupt,
+ .config_intr = xway_gphy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ }, {
+ .phy_id = PHY_ID_PHY22F_1_4,
+ .phy_id_mask = 0xffffffff,
+ .name = "Intel XWAY PHY22F (PEF 7061) v1.4",
+ .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = xway_gphy_config_init,
+ .config_aneg = xway_gphy14_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = xway_gphy_ack_interrupt,
+ .did_interrupt = xway_gphy_did_interrupt,
+ .config_intr = xway_gphy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ }, {
+ .phy_id = PHY_ID_PHY11G_1_5,
+ .phy_id_mask = 0xffffffff,
+ .name = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.5 / v1.6",
+ .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = xway_gphy_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = xway_gphy_ack_interrupt,
+ .did_interrupt = xway_gphy_did_interrupt,
+ .config_intr = xway_gphy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ }, {
+ .phy_id = PHY_ID_PHY22F_1_5,
+ .phy_id_mask = 0xffffffff,
+ .name = "Intel XWAY PHY22F (PEF 7061) v1.5 / v1.6",
+ .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = xway_gphy_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = xway_gphy_ack_interrupt,
+ .did_interrupt = xway_gphy_did_interrupt,
+ .config_intr = xway_gphy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ }, {
+ .phy_id = PHY_ID_PHY11G_VR9,
+ .phy_id_mask = 0xffffffff,
+ .name = "Intel XWAY PHY11G (xRX integrated)",
+ .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = xway_gphy_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = xway_gphy_ack_interrupt,
+ .did_interrupt = xway_gphy_did_interrupt,
+ .config_intr = xway_gphy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ }, {
+ .phy_id = PHY_ID_PHY22F_VR9,
+ .phy_id_mask = 0xffffffff,
+ .name = "Intel XWAY PHY22F (xRX integrated)",
+ .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = xway_gphy_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = xway_gphy_ack_interrupt,
+ .did_interrupt = xway_gphy_did_interrupt,
+ .config_intr = xway_gphy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ },
+};
+module_phy_driver(xway_gphy);
+
+static struct mdio_device_id __maybe_unused xway_gphy_tbl[] = {
+ { PHY_ID_PHY11G_1_3, 0xffffffff },
+ { PHY_ID_PHY22F_1_3, 0xffffffff },
+ { PHY_ID_PHY11G_1_4, 0xffffffff },
+ { PHY_ID_PHY22F_1_4, 0xffffffff },
+ { PHY_ID_PHY11G_1_5, 0xffffffff },
+ { PHY_ID_PHY22F_1_5, 0xffffffff },
+ { PHY_ID_PHY11G_VR9, 0xffffffff },
+ { PHY_ID_PHY22F_VR9, 0xffffffff },
+ { }
+};
+MODULE_DEVICE_TABLE(mdio, xway_gphy_tbl);
+
+MODULE_DESCRIPTION("Intel XWAY PHY driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index ec2c1eee6..c2dcf02df 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -138,6 +138,21 @@
#define MII_88E1510_GEN_CTRL_REG_1_MODE_SGMII 0x1 /* SGMII to copper */
#define MII_88E1510_GEN_CTRL_REG_1_RESET 0x8000 /* Soft reset */
+#define LPA_FIBER_1000HALF 0x40
+#define LPA_FIBER_1000FULL 0x20
+
+#define LPA_PAUSE_FIBER 0x180
+#define LPA_PAUSE_ASYM_FIBER 0x100
+
+#define ADVERTISE_FIBER_1000HALF 0x40
+#define ADVERTISE_FIBER_1000FULL 0x20
+
+#define ADVERTISE_PAUSE_FIBER 0x180
+#define ADVERTISE_PAUSE_ASYM_FIBER 0x100
+
+#define REGISTER_LINK_STATUS 0x400
+#define NB_FIBER_STATS 1
+
MODULE_DESCRIPTION("Marvell PHY driver");
MODULE_AUTHOR("Andy Fleming");
MODULE_LICENSE("GPL");
@@ -150,8 +165,9 @@ struct marvell_hw_stat {
};
static struct marvell_hw_stat marvell_hw_stats[] = {
- { "phy_receive_errors", 0, 21, 16},
+ { "phy_receive_errors_copper", 0, 21, 16},
{ "phy_idle_errors", 0, 10, 8 },
+ { "phy_receive_errors_fiber", 1, 21, 16},
};
struct marvell_priv {
@@ -477,15 +493,122 @@ static int m88e1318_config_aneg(struct phy_device *phydev)
return m88e1121_config_aneg(phydev);
}
+/**
+ * ethtool_adv_to_fiber_adv_t
+ * @ethadv: the ethtool advertisement settings
+ *
+ * A small helper function that translates ethtool advertisement
+ * settings to phy autonegotiation advertisements for the
+ * MII_ADV register for fiber link.
+ */
+static inline u32 ethtool_adv_to_fiber_adv_t(u32 ethadv)
+{
+ u32 result = 0;
+
+ if (ethadv & ADVERTISED_1000baseT_Half)
+ result |= ADVERTISE_FIBER_1000HALF;
+ if (ethadv & ADVERTISED_1000baseT_Full)
+ result |= ADVERTISE_FIBER_1000FULL;
+
+ if ((ethadv & ADVERTISE_PAUSE_ASYM) && (ethadv & ADVERTISE_PAUSE_CAP))
+ result |= LPA_PAUSE_ASYM_FIBER;
+ else if (ethadv & ADVERTISE_PAUSE_CAP)
+ result |= (ADVERTISE_PAUSE_FIBER
+ & (~ADVERTISE_PAUSE_ASYM_FIBER));
+
+ return result;
+}
+
+/**
+ * marvell_config_aneg_fiber - restart auto-negotiation or write BMCR
+ * @phydev: target phy_device struct
+ *
+ * Description: If auto-negotiation is enabled, we configure the
+ * advertising, and then restart auto-negotiation. If it is not
+ * enabled, then we write the BMCR. Adapted for fiber link in
+ * some Marvell's devices.
+ */
+static int marvell_config_aneg_fiber(struct phy_device *phydev)
+{
+ int changed = 0;
+ int err;
+ int adv, oldadv;
+ u32 advertise;
+
+ if (phydev->autoneg != AUTONEG_ENABLE)
+ return genphy_setup_forced(phydev);
+
+ /* Only allow advertising what this PHY supports */
+ phydev->advertising &= phydev->supported;
+ advertise = phydev->advertising;
+
+ /* Setup fiber advertisement */
+ adv = phy_read(phydev, MII_ADVERTISE);
+ if (adv < 0)
+ return adv;
+
+ oldadv = adv;
+ adv &= ~(ADVERTISE_FIBER_1000HALF | ADVERTISE_FIBER_1000FULL
+ | LPA_PAUSE_FIBER);
+ adv |= ethtool_adv_to_fiber_adv_t(advertise);
+
+ if (adv != oldadv) {
+ err = phy_write(phydev, MII_ADVERTISE, adv);
+ if (err < 0)
+ return err;
+
+ changed = 1;
+ }
+
+ if (changed == 0) {
+ /* Advertisement hasn't changed, but maybe aneg was never on to
+ * begin with? Or maybe phy was isolated?
+ */
+ int ctl = phy_read(phydev, MII_BMCR);
+
+ if (ctl < 0)
+ return ctl;
+
+ if (!(ctl & BMCR_ANENABLE) || (ctl & BMCR_ISOLATE))
+ changed = 1; /* do restart aneg */
+ }
+
+ /* Only restart aneg if we are advertising something different
+ * than we were before.
+ */
+ if (changed > 0)
+ changed = genphy_restart_aneg(phydev);
+
+ return changed;
+}
+
static int m88e1510_config_aneg(struct phy_device *phydev)
{
int err;
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_COPPER);
+ if (err < 0)
+ goto error;
+
+ /* Configure the copper link first */
err = m88e1318_config_aneg(phydev);
if (err < 0)
- return err;
+ goto error;
- return 0;
+ /* Then the fiber link */
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_FIBER);
+ if (err < 0)
+ goto error;
+
+ err = marvell_config_aneg_fiber(phydev);
+ if (err < 0)
+ goto error;
+
+ return phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_COPPER);
+
+error:
+ phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_COPPER);
+ return err;
}
static int marvell_config_init(struct phy_device *phydev)
@@ -890,26 +1013,79 @@ static int m88e1145_config_init(struct phy_device *phydev)
return 0;
}
-/* marvell_read_status
+/**
+ * fiber_lpa_to_ethtool_lpa_t
+ * @lpa: value of the MII_LPA register for fiber link
+ *
+ * A small helper function that translates MII_LPA
+ * bits to ethtool LP advertisement settings.
+ */
+static u32 fiber_lpa_to_ethtool_lpa_t(u32 lpa)
+{
+ u32 result = 0;
+
+ if (lpa & LPA_FIBER_1000HALF)
+ result |= ADVERTISED_1000baseT_Half;
+ if (lpa & LPA_FIBER_1000FULL)
+ result |= ADVERTISED_1000baseT_Full;
+
+ return result;
+}
+
+/**
+ * marvell_update_link - update link status in real time in @phydev
+ * @phydev: target phy_device struct
+ *
+ * Description: Update the value in phydev->link to reflect the
+ * current link value.
+ */
+static int marvell_update_link(struct phy_device *phydev, int fiber)
+{
+ int status;
+
+ /* Use the generic register for copper link, or specific
+ * register for fiber case */
+ if (fiber) {
+ status = phy_read(phydev, MII_M1011_PHY_STATUS);
+ if (status < 0)
+ return status;
+
+ if ((status & REGISTER_LINK_STATUS) == 0)
+ phydev->link = 0;
+ else
+ phydev->link = 1;
+ } else {
+ return genphy_update_link(phydev);
+ }
+
+ return 0;
+}
+
+/* marvell_read_status_page
*
- * Generic status code does not detect Fiber correctly!
* Description:
* Check the link, then figure out the current state
* by comparing what we advertise with what the link partner
* advertises. Start by checking the gigabit possibilities,
* then move on to 10/100.
*/
-static int marvell_read_status(struct phy_device *phydev)
+static int marvell_read_status_page(struct phy_device *phydev, int page)
{
int adv;
int err;
int lpa;
int lpagb;
int status = 0;
+ int fiber;
- /* Update the link, but return if there
+ /* Detect and update the link, but return if there
* was an error */
- err = genphy_update_link(phydev);
+ if (page == MII_M1111_FIBER)
+ fiber = 1;
+ else
+ fiber = 0;
+
+ err = marvell_update_link(phydev, fiber);
if (err)
return err;
@@ -930,9 +1106,6 @@ static int marvell_read_status(struct phy_device *phydev)
if (adv < 0)
return adv;
- phydev->lp_advertising = mii_stat1000_to_ethtool_lpa_t(lpagb) |
- mii_lpa_to_ethtool_lpa_t(lpa);
-
lpa &= adv;
if (status & MII_M1011_PHY_STATUS_FULLDUPLEX)
@@ -957,9 +1130,30 @@ static int marvell_read_status(struct phy_device *phydev)
break;
}
- if (phydev->duplex == DUPLEX_FULL) {
- phydev->pause = lpa & LPA_PAUSE_CAP ? 1 : 0;
- phydev->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0;
+ if (!fiber) {
+ phydev->lp_advertising = mii_stat1000_to_ethtool_lpa_t(lpagb) |
+ mii_lpa_to_ethtool_lpa_t(lpa);
+
+ if (phydev->duplex == DUPLEX_FULL) {
+ phydev->pause = lpa & LPA_PAUSE_CAP ? 1 : 0;
+ phydev->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0;
+ }
+ } else {
+ /* The fiber link is only 1000M capable */
+ phydev->lp_advertising = fiber_lpa_to_ethtool_lpa_t(lpa);
+
+ if (phydev->duplex == DUPLEX_FULL) {
+ if (!(lpa & LPA_PAUSE_FIBER)) {
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+ } else if ((lpa & LPA_PAUSE_ASYM_FIBER)) {
+ phydev->pause = 1;
+ phydev->asym_pause = 1;
+ } else {
+ phydev->pause = 1;
+ phydev->asym_pause = 0;
+ }
+ }
}
} else {
int bmcr = phy_read(phydev, MII_BMCR);
@@ -986,6 +1180,119 @@ static int marvell_read_status(struct phy_device *phydev)
return 0;
}
+/* marvell_read_status
+ *
+ * Some Marvell's phys have two modes: fiber and copper.
+ * Both need status checked.
+ * Description:
+ * First, check the fiber link and status.
+ * If the fiber link is down, check the copper link and status which
+ * will be the default value if both link are down.
+ */
+static int marvell_read_status(struct phy_device *phydev)
+{
+ int err;
+
+ /* Check the fiber mode first */
+ if (phydev->supported & SUPPORTED_FIBRE) {
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_FIBER);
+ if (err < 0)
+ goto error;
+
+ err = marvell_read_status_page(phydev, MII_M1111_FIBER);
+ if (err < 0)
+ goto error;
+
+ /* If the fiber link is up, it is the selected and used link.
+ * In this case, we need to stay in the fiber page.
+ * Please to be careful about that, avoid to restore Copper page
+ * in other functions which could break the behaviour
+ * for some fiber phy like 88E1512.
+ * */
+ if (phydev->link)
+ return 0;
+
+ /* If fiber link is down, check and save copper mode state */
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_COPPER);
+ if (err < 0)
+ goto error;
+ }
+
+ return marvell_read_status_page(phydev, MII_M1111_COPPER);
+
+error:
+ phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_COPPER);
+ return err;
+}
+
+/* marvell_suspend
+ *
+ * Some Marvell's phys have two modes: fiber and copper.
+ * Both need to be suspended
+ */
+static int marvell_suspend(struct phy_device *phydev)
+{
+ int err;
+
+ /* Suspend the fiber mode first */
+ if (!(phydev->supported & SUPPORTED_FIBRE)) {
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_FIBER);
+ if (err < 0)
+ goto error;
+
+ /* With the page set, use the generic suspend */
+ err = genphy_suspend(phydev);
+ if (err < 0)
+ goto error;
+
+ /* Then, the copper link */
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_COPPER);
+ if (err < 0)
+ goto error;
+ }
+
+ /* With the page set, use the generic suspend */
+ return genphy_suspend(phydev);
+
+error:
+ phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_COPPER);
+ return err;
+}
+
+/* marvell_resume
+ *
+ * Some Marvell's phys have two modes: fiber and copper.
+ * Both need to be resumed
+ */
+static int marvell_resume(struct phy_device *phydev)
+{
+ int err;
+
+ /* Resume the fiber mode first */
+ if (!(phydev->supported & SUPPORTED_FIBRE)) {
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_FIBER);
+ if (err < 0)
+ goto error;
+
+ /* With the page set, use the generic resume */
+ err = genphy_resume(phydev);
+ if (err < 0)
+ goto error;
+
+ /* Then, the copper link */
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_COPPER);
+ if (err < 0)
+ goto error;
+ }
+
+ /* With the page set, use the generic resume */
+ return genphy_resume(phydev);
+
+error:
+ phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_COPPER);
+ return err;
+}
+
static int marvell_aneg_done(struct phy_device *phydev)
{
int retval = phy_read(phydev, MII_M1011_PHY_STATUS);
@@ -1107,7 +1414,10 @@ static int m88e1318_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *w
static int marvell_get_sset_count(struct phy_device *phydev)
{
- return ARRAY_SIZE(marvell_hw_stats);
+ if (phydev->supported & SUPPORTED_FIBRE)
+ return ARRAY_SIZE(marvell_hw_stats);
+ else
+ return ARRAY_SIZE(marvell_hw_stats) - NB_FIBER_STATS;
}
static void marvell_get_strings(struct phy_device *phydev, u8 *data)
@@ -1361,7 +1671,7 @@ static struct phy_driver marvell_drivers[] = {
.phy_id = MARVELL_PHY_ID_88E1510,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1510",
- .features = PHY_GBIT_FEATURES,
+ .features = PHY_GBIT_FEATURES | SUPPORTED_FIBRE,
.flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &m88e1510_config_init,
@@ -1370,8 +1680,8 @@ static struct phy_driver marvell_drivers[] = {
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
.did_interrupt = &m88e1121_did_interrupt,
- .resume = &genphy_resume,
- .suspend = &genphy_suspend,
+ .resume = &marvell_resume,
+ .suspend = &marvell_suspend,
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
diff --git a/drivers/net/phy/mdio-hisi-femac.c b/drivers/net/phy/mdio-hisi-femac.c
new file mode 100644
index 000000000..b03fedd6c
--- /dev/null
+++ b/drivers/net/phy/mdio-hisi-femac.c
@@ -0,0 +1,166 @@
+/*
+ * Hisilicon Fast Ethernet MDIO Bus Driver
+ *
+ * Copyright (c) 2016 HiSilicon Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_mdio.h>
+#include <linux/platform_device.h>
+
+#define MDIO_RWCTRL 0x00
+#define MDIO_RO_DATA 0x04
+#define MDIO_WRITE BIT(13)
+#define MDIO_RW_FINISH BIT(15)
+#define BIT_PHY_ADDR_OFFSET 8
+#define BIT_WR_DATA_OFFSET 16
+
+struct hisi_femac_mdio_data {
+ struct clk *clk;
+ void __iomem *membase;
+};
+
+static int hisi_femac_mdio_wait_ready(struct hisi_femac_mdio_data *data)
+{
+ u32 val;
+
+ return readl_poll_timeout(data->membase + MDIO_RWCTRL,
+ val, val & MDIO_RW_FINISH, 20, 10000);
+}
+
+static int hisi_femac_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+{
+ struct hisi_femac_mdio_data *data = bus->priv;
+ int ret;
+
+ ret = hisi_femac_mdio_wait_ready(data);
+ if (ret)
+ return ret;
+
+ writel((mii_id << BIT_PHY_ADDR_OFFSET) | regnum,
+ data->membase + MDIO_RWCTRL);
+
+ ret = hisi_femac_mdio_wait_ready(data);
+ if (ret)
+ return ret;
+
+ return readl(data->membase + MDIO_RO_DATA) & 0xFFFF;
+}
+
+static int hisi_femac_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
+ u16 value)
+{
+ struct hisi_femac_mdio_data *data = bus->priv;
+ int ret;
+
+ ret = hisi_femac_mdio_wait_ready(data);
+ if (ret)
+ return ret;
+
+ writel(MDIO_WRITE | (value << BIT_WR_DATA_OFFSET) |
+ (mii_id << BIT_PHY_ADDR_OFFSET) | regnum,
+ data->membase + MDIO_RWCTRL);
+
+ return hisi_femac_mdio_wait_ready(data);
+}
+
+static int hisi_femac_mdio_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct mii_bus *bus;
+ struct hisi_femac_mdio_data *data;
+ struct resource *res;
+ int ret;
+
+ bus = mdiobus_alloc_size(sizeof(*data));
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "hisi_femac_mii_bus";
+ bus->read = &hisi_femac_mdio_read;
+ bus->write = &hisi_femac_mdio_write;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s", pdev->name);
+ bus->parent = &pdev->dev;
+
+ data = bus->priv;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(data->membase)) {
+ ret = PTR_ERR(data->membase);
+ goto err_out_free_mdiobus;
+ }
+
+ data->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(data->clk)) {
+ ret = PTR_ERR(data->clk);
+ goto err_out_free_mdiobus;
+ }
+
+ ret = clk_prepare_enable(data->clk);
+ if (ret)
+ goto err_out_free_mdiobus;
+
+ ret = of_mdiobus_register(bus, np);
+ if (ret)
+ goto err_out_disable_clk;
+
+ platform_set_drvdata(pdev, bus);
+
+ return 0;
+
+err_out_disable_clk:
+ clk_disable_unprepare(data->clk);
+err_out_free_mdiobus:
+ mdiobus_free(bus);
+ return ret;
+}
+
+static int hisi_femac_mdio_remove(struct platform_device *pdev)
+{
+ struct mii_bus *bus = platform_get_drvdata(pdev);
+ struct hisi_femac_mdio_data *data = bus->priv;
+
+ mdiobus_unregister(bus);
+ clk_disable_unprepare(data->clk);
+ mdiobus_free(bus);
+
+ return 0;
+}
+
+static const struct of_device_id hisi_femac_mdio_dt_ids[] = {
+ { .compatible = "hisilicon,hisi-femac-mdio" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, hisi_femac_mdio_dt_ids);
+
+static struct platform_driver hisi_femac_mdio_driver = {
+ .probe = hisi_femac_mdio_probe,
+ .remove = hisi_femac_mdio_remove,
+ .driver = {
+ .name = "hisi-femac-mdio",
+ .of_match_table = hisi_femac_mdio_dt_ids,
+ },
+};
+
+module_platform_driver(hisi_femac_mdio_driver);
+
+MODULE_DESCRIPTION("Hisilicon Fast Ethernet MAC MDIO interface driver");
+MODULE_AUTHOR("Dongpo Li <lidongpo@hisilicon.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c
new file mode 100644
index 000000000..0a0412524
--- /dev/null
+++ b/drivers/net/phy/mdio-mux-bcm-iproc.c
@@ -0,0 +1,248 @@
+/*
+ * Copyright 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation (the "GPL").
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 (GPLv2) for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 (GPLv2) along with this source code.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/of_mdio.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/mdio-mux.h>
+#include <linux/delay.h>
+
+#define MDIO_PARAM_OFFSET 0x00
+#define MDIO_PARAM_MIIM_CYCLE 29
+#define MDIO_PARAM_INTERNAL_SEL 25
+#define MDIO_PARAM_BUS_ID 22
+#define MDIO_PARAM_C45_SEL 21
+#define MDIO_PARAM_PHY_ID 16
+#define MDIO_PARAM_PHY_DATA 0
+
+#define MDIO_READ_OFFSET 0x04
+#define MDIO_READ_DATA_MASK 0xffff
+#define MDIO_ADDR_OFFSET 0x08
+
+#define MDIO_CTRL_OFFSET 0x0C
+#define MDIO_CTRL_WRITE_OP 0x1
+#define MDIO_CTRL_READ_OP 0x2
+
+#define MDIO_STAT_OFFSET 0x10
+#define MDIO_STAT_DONE 1
+
+#define BUS_MAX_ADDR 32
+#define EXT_BUS_START_ADDR 16
+
+struct iproc_mdiomux_desc {
+ void *mux_handle;
+ void __iomem *base;
+ struct device *dev;
+ struct mii_bus *mii_bus;
+};
+
+static int iproc_mdio_wait_for_idle(void __iomem *base, bool result)
+{
+ unsigned int timeout = 1000; /* loop for 1s */
+ u32 val;
+
+ do {
+ val = readl(base + MDIO_STAT_OFFSET);
+ if ((val & MDIO_STAT_DONE) == result)
+ return 0;
+
+ usleep_range(1000, 2000);
+ } while (timeout--);
+
+ return -ETIMEDOUT;
+}
+
+/* start_miim_ops- Program and start MDIO transaction over mdio bus.
+ * @base: Base address
+ * @phyid: phyid of the selected bus.
+ * @reg: register offset to be read/written.
+ * @val :0 if read op else value to be written in @reg;
+ * @op: Operation that need to be carried out.
+ * MDIO_CTRL_READ_OP: Read transaction.
+ * MDIO_CTRL_WRITE_OP: Write transaction.
+ *
+ * Return value: Successful Read operation returns read reg values and write
+ * operation returns 0. Failure operation returns negative error code.
+ */
+static int start_miim_ops(void __iomem *base,
+ u16 phyid, u32 reg, u16 val, u32 op)
+{
+ u32 param;
+ int ret;
+
+ writel(0, base + MDIO_CTRL_OFFSET);
+ ret = iproc_mdio_wait_for_idle(base, 0);
+ if (ret)
+ goto err;
+
+ param = readl(base + MDIO_PARAM_OFFSET);
+ param |= phyid << MDIO_PARAM_PHY_ID;
+ param |= val << MDIO_PARAM_PHY_DATA;
+ if (reg & MII_ADDR_C45)
+ param |= BIT(MDIO_PARAM_C45_SEL);
+
+ writel(param, base + MDIO_PARAM_OFFSET);
+
+ writel(reg, base + MDIO_ADDR_OFFSET);
+
+ writel(op, base + MDIO_CTRL_OFFSET);
+
+ ret = iproc_mdio_wait_for_idle(base, 1);
+ if (ret)
+ goto err;
+
+ if (op == MDIO_CTRL_READ_OP)
+ ret = readl(base + MDIO_READ_OFFSET) & MDIO_READ_DATA_MASK;
+err:
+ return ret;
+}
+
+static int iproc_mdiomux_read(struct mii_bus *bus, int phyid, int reg)
+{
+ struct iproc_mdiomux_desc *md = bus->priv;
+ int ret;
+
+ ret = start_miim_ops(md->base, phyid, reg, 0, MDIO_CTRL_READ_OP);
+ if (ret < 0)
+ dev_err(&bus->dev, "mdiomux read operation failed!!!");
+
+ return ret;
+}
+
+static int iproc_mdiomux_write(struct mii_bus *bus,
+ int phyid, int reg, u16 val)
+{
+ struct iproc_mdiomux_desc *md = bus->priv;
+ int ret;
+
+ /* Write val at reg offset */
+ ret = start_miim_ops(md->base, phyid, reg, val, MDIO_CTRL_WRITE_OP);
+ if (ret < 0)
+ dev_err(&bus->dev, "mdiomux write operation failed!!!");
+
+ return ret;
+}
+
+static int mdio_mux_iproc_switch_fn(int current_child, int desired_child,
+ void *data)
+{
+ struct iproc_mdiomux_desc *md = data;
+ u32 param, bus_id;
+ bool bus_dir;
+
+ /* select bus and its properties */
+ bus_dir = (desired_child < EXT_BUS_START_ADDR);
+ bus_id = bus_dir ? desired_child : (desired_child - EXT_BUS_START_ADDR);
+
+ param = (bus_dir ? 1 : 0) << MDIO_PARAM_INTERNAL_SEL;
+ param |= (bus_id << MDIO_PARAM_BUS_ID);
+
+ writel(param, md->base + MDIO_PARAM_OFFSET);
+ return 0;
+}
+
+static int mdio_mux_iproc_probe(struct platform_device *pdev)
+{
+ struct iproc_mdiomux_desc *md;
+ struct mii_bus *bus;
+ struct resource *res;
+ int rc;
+
+ md = devm_kzalloc(&pdev->dev, sizeof(*md), GFP_KERNEL);
+ if (!md)
+ return -ENOMEM;
+ md->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ md->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(md->base)) {
+ dev_err(&pdev->dev, "failed to ioremap register\n");
+ return PTR_ERR(md->base);
+ }
+
+ md->mii_bus = mdiobus_alloc();
+ if (!md->mii_bus) {
+ dev_err(&pdev->dev, "mdiomux bus alloc failed\n");
+ return -ENOMEM;
+ }
+
+ bus = md->mii_bus;
+ bus->priv = md;
+ bus->name = "iProc MDIO mux bus";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", pdev->name, pdev->id);
+ bus->parent = &pdev->dev;
+ bus->read = iproc_mdiomux_read;
+ bus->write = iproc_mdiomux_write;
+
+ bus->phy_mask = ~0;
+ bus->dev.of_node = pdev->dev.of_node;
+ rc = mdiobus_register(bus);
+ if (rc) {
+ dev_err(&pdev->dev, "mdiomux registration failed\n");
+ goto out;
+ }
+
+ platform_set_drvdata(pdev, md);
+
+ rc = mdio_mux_init(md->dev, mdio_mux_iproc_switch_fn,
+ &md->mux_handle, md, md->mii_bus);
+ if (rc) {
+ dev_info(md->dev, "mdiomux initialization failed\n");
+ goto out;
+ }
+
+ dev_info(md->dev, "iProc mdiomux registered\n");
+ return 0;
+out:
+ mdiobus_free(bus);
+ return rc;
+}
+
+static int mdio_mux_iproc_remove(struct platform_device *pdev)
+{
+ struct iproc_mdiomux_desc *md = dev_get_platdata(&pdev->dev);
+
+ mdio_mux_uninit(md->mux_handle);
+ mdiobus_unregister(md->mii_bus);
+ mdiobus_free(md->mii_bus);
+
+ return 0;
+}
+
+static const struct of_device_id mdio_mux_iproc_match[] = {
+ {
+ .compatible = "brcm,mdio-mux-iproc",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mdio_mux_iproc_match);
+
+static struct platform_driver mdiomux_iproc_driver = {
+ .driver = {
+ .name = "mdio-mux-iproc",
+ .of_match_table = mdio_mux_iproc_match,
+ },
+ .probe = mdio_mux_iproc_probe,
+ .remove = mdio_mux_iproc_remove,
+};
+
+module_platform_driver(mdiomux_iproc_driver);
+
+MODULE_DESCRIPTION("iProc MDIO Mux Bus Driver");
+MODULE_AUTHOR("Pramod Kumar <pramod.kumar@broadcom.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c
index 7ddb1ab70..919949960 100644
--- a/drivers/net/phy/mdio-mux-gpio.c
+++ b/drivers/net/phy/mdio-mux-gpio.c
@@ -55,7 +55,7 @@ static int mdio_mux_gpio_probe(struct platform_device *pdev)
return PTR_ERR(s->gpios);
r = mdio_mux_init(&pdev->dev,
- mdio_mux_gpio_switch_fn, &s->mux_handle, s);
+ mdio_mux_gpio_switch_fn, &s->mux_handle, s, NULL);
if (r != 0) {
gpiod_put_array(s->gpios);
diff --git a/drivers/net/phy/mdio-mux-mmioreg.c b/drivers/net/phy/mdio-mux-mmioreg.c
index 7fde454fb..d0bed52c8 100644
--- a/drivers/net/phy/mdio-mux-mmioreg.c
+++ b/drivers/net/phy/mdio-mux-mmioreg.c
@@ -126,7 +126,7 @@ static int mdio_mux_mmioreg_probe(struct platform_device *pdev)
}
ret = mdio_mux_init(&pdev->dev, mdio_mux_mmioreg_switch_fn,
- &s->mux_handle, s);
+ &s->mux_handle, s, NULL);
if (ret) {
dev_err(&pdev->dev, "failed to register mdio-mux bus %s\n",
np->full_name);
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
index 5c81d6faf..963838d4f 100644
--- a/drivers/net/phy/mdio-mux.c
+++ b/drivers/net/phy/mdio-mux.c
@@ -89,7 +89,8 @@ static int parent_count;
int mdio_mux_init(struct device *dev,
int (*switch_fn)(int cur, int desired, void *data),
void **mux_handle,
- void *data)
+ void *data,
+ struct mii_bus *mux_bus)
{
struct device_node *parent_bus_node;
struct device_node *child_bus_node;
@@ -101,10 +102,22 @@ int mdio_mux_init(struct device *dev,
if (!dev->of_node)
return -ENODEV;
- parent_bus_node = of_parse_phandle(dev->of_node, "mdio-parent-bus", 0);
+ if (!mux_bus) {
+ parent_bus_node = of_parse_phandle(dev->of_node,
+ "mdio-parent-bus", 0);
- if (!parent_bus_node)
- return -ENODEV;
+ if (!parent_bus_node)
+ return -ENODEV;
+
+ parent_bus = of_mdio_find_bus(parent_bus_node);
+ if (!parent_bus) {
+ ret_val = -EPROBE_DEFER;
+ goto err_parent_bus;
+ }
+ } else {
+ parent_bus_node = NULL;
+ parent_bus = mux_bus;
+ }
pb = devm_kzalloc(dev, sizeof(*pb), GFP_KERNEL);
if (pb == NULL) {
@@ -112,11 +125,6 @@ int mdio_mux_init(struct device *dev,
goto err_parent_bus;
}
- parent_bus = of_mdio_find_bus(parent_bus_node);
- if (parent_bus == NULL) {
- ret_val = -EPROBE_DEFER;
- goto err_parent_bus;
- }
pb->switch_data = data;
pb->switch_fn = switch_fn;
diff --git a/drivers/net/phy/mdio-xgene.c b/drivers/net/phy/mdio-xgene.c
new file mode 100644
index 000000000..92af18295
--- /dev/null
+++ b/drivers/net/phy/mdio-xgene.c
@@ -0,0 +1,473 @@
+/* Applied Micro X-Gene SoC MDIO Driver
+ *
+ * Copyright (c) 2016, Applied Micro Circuits Corporation
+ * Author: Iyappan Subramanian <isubramanian@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/efi.h>
+#include <linux/if_vlan.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/prefetch.h>
+#include <linux/phy.h>
+#include <net/ip.h>
+#include "mdio-xgene.h"
+
+static bool xgene_mdio_status;
+
+static u32 xgene_enet_rd_mac(void __iomem *base_addr, u32 rd_addr)
+{
+ void __iomem *addr, *rd, *cmd, *cmd_done;
+ u32 done, rd_data = BUSY_MASK;
+ u8 wait = 10;
+
+ addr = base_addr + MAC_ADDR_REG_OFFSET;
+ rd = base_addr + MAC_READ_REG_OFFSET;
+ cmd = base_addr + MAC_COMMAND_REG_OFFSET;
+ cmd_done = base_addr + MAC_COMMAND_DONE_REG_OFFSET;
+
+ iowrite32(rd_addr, addr);
+ iowrite32(XGENE_ENET_RD_CMD, cmd);
+
+ while (wait--) {
+ done = ioread32(cmd_done);
+ if (done)
+ break;
+ udelay(1);
+ }
+
+ if (!done)
+ return rd_data;
+
+ rd_data = ioread32(rd);
+ iowrite32(0, cmd);
+
+ return rd_data;
+}
+
+static void xgene_enet_wr_mac(void __iomem *base_addr, u32 wr_addr, u32 wr_data)
+{
+ void __iomem *addr, *wr, *cmd, *cmd_done;
+ u8 wait = 10;
+ u32 done;
+
+ addr = base_addr + MAC_ADDR_REG_OFFSET;
+ wr = base_addr + MAC_WRITE_REG_OFFSET;
+ cmd = base_addr + MAC_COMMAND_REG_OFFSET;
+ cmd_done = base_addr + MAC_COMMAND_DONE_REG_OFFSET;
+
+ iowrite32(wr_addr, addr);
+ iowrite32(wr_data, wr);
+ iowrite32(XGENE_ENET_WR_CMD, cmd);
+
+ while (wait--) {
+ done = ioread32(cmd_done);
+ if (done)
+ break;
+ udelay(1);
+ }
+
+ if (!done)
+ pr_err("MCX mac write failed, addr: 0x%04x\n", wr_addr);
+
+ iowrite32(0, cmd);
+}
+
+int xgene_mdio_rgmii_read(struct mii_bus *bus, int phy_id, int reg)
+{
+ void __iomem *addr = (void __iomem *)bus->priv;
+ u32 data, done;
+ u8 wait = 10;
+
+ data = SET_VAL(PHY_ADDR, phy_id) | SET_VAL(REG_ADDR, reg);
+ xgene_enet_wr_mac(addr, MII_MGMT_ADDRESS_ADDR, data);
+ xgene_enet_wr_mac(addr, MII_MGMT_COMMAND_ADDR, READ_CYCLE_MASK);
+ do {
+ usleep_range(5, 10);
+ done = xgene_enet_rd_mac(addr, MII_MGMT_INDICATORS_ADDR);
+ } while ((done & BUSY_MASK) && wait--);
+
+ if (done & BUSY_MASK) {
+ dev_err(&bus->dev, "MII_MGMT read failed\n");
+ return -EBUSY;
+ }
+
+ data = xgene_enet_rd_mac(addr, MII_MGMT_STATUS_ADDR);
+ xgene_enet_wr_mac(addr, MII_MGMT_COMMAND_ADDR, 0);
+
+ return data;
+}
+EXPORT_SYMBOL(xgene_mdio_rgmii_read);
+
+int xgene_mdio_rgmii_write(struct mii_bus *bus, int phy_id, int reg, u16 data)
+{
+ void __iomem *addr = (void __iomem *)bus->priv;
+ u32 val, done;
+ u8 wait = 10;
+
+ val = SET_VAL(PHY_ADDR, phy_id) | SET_VAL(REG_ADDR, reg);
+ xgene_enet_wr_mac(addr, MII_MGMT_ADDRESS_ADDR, val);
+
+ xgene_enet_wr_mac(addr, MII_MGMT_CONTROL_ADDR, data);
+ do {
+ usleep_range(5, 10);
+ done = xgene_enet_rd_mac(addr, MII_MGMT_INDICATORS_ADDR);
+ } while ((done & BUSY_MASK) && wait--);
+
+ if (done & BUSY_MASK) {
+ dev_err(&bus->dev, "MII_MGMT write failed\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(xgene_mdio_rgmii_write);
+
+static u32 xgene_menet_rd_diag_csr(struct xgene_mdio_pdata *pdata, u32 offset)
+{
+ return ioread32(pdata->diag_csr_addr + offset);
+}
+
+static void xgene_menet_wr_diag_csr(struct xgene_mdio_pdata *pdata,
+ u32 offset, u32 val)
+{
+ iowrite32(val, pdata->diag_csr_addr + offset);
+}
+
+static int xgene_enet_ecc_init(struct xgene_mdio_pdata *pdata)
+{
+ u32 data;
+ u8 wait = 10;
+
+ xgene_menet_wr_diag_csr(pdata, MENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0x0);
+ do {
+ usleep_range(100, 110);
+ data = xgene_menet_rd_diag_csr(pdata, MENET_BLOCK_MEM_RDY_ADDR);
+ } while ((data != 0xffffffff) && wait--);
+
+ if (data != 0xffffffff) {
+ dev_err(pdata->dev, "Failed to release memory from shutdown\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void xgene_gmac_reset(struct xgene_mdio_pdata *pdata)
+{
+ xgene_enet_wr_mac(pdata->mac_csr_addr, MAC_CONFIG_1_ADDR, SOFT_RESET);
+ xgene_enet_wr_mac(pdata->mac_csr_addr, MAC_CONFIG_1_ADDR, 0);
+}
+
+static int xgene_mdio_reset(struct xgene_mdio_pdata *pdata)
+{
+ int ret;
+
+ if (pdata->dev->of_node) {
+ clk_prepare_enable(pdata->clk);
+ udelay(5);
+ clk_disable_unprepare(pdata->clk);
+ udelay(5);
+ clk_prepare_enable(pdata->clk);
+ udelay(5);
+ } else {
+#ifdef CONFIG_ACPI
+ acpi_evaluate_object(ACPI_HANDLE(pdata->dev),
+ "_RST", NULL, NULL);
+#endif
+ }
+
+ ret = xgene_enet_ecc_init(pdata);
+ if (ret)
+ return ret;
+ xgene_gmac_reset(pdata);
+
+ return 0;
+}
+
+static void xgene_enet_rd_mdio_csr(void __iomem *base_addr,
+ u32 offset, u32 *val)
+{
+ void __iomem *addr = base_addr + offset;
+
+ *val = ioread32(addr);
+}
+
+static void xgene_enet_wr_mdio_csr(void __iomem *base_addr,
+ u32 offset, u32 val)
+{
+ void __iomem *addr = base_addr + offset;
+
+ iowrite32(val, addr);
+}
+
+static int xgene_xfi_mdio_write(struct mii_bus *bus, int phy_id,
+ int reg, u16 data)
+{
+ void __iomem *addr = (void __iomem *)bus->priv;
+ int timeout = 100;
+ u32 status, val;
+
+ val = SET_VAL(HSTPHYADX, phy_id) | SET_VAL(HSTREGADX, reg) |
+ SET_VAL(HSTMIIMWRDAT, data);
+ xgene_enet_wr_mdio_csr(addr, MIIM_FIELD_ADDR, data);
+
+ val = HSTLDCMD | SET_VAL(HSTMIIMCMD, MIIM_CMD_LEGACY_WRITE);
+ xgene_enet_wr_mdio_csr(addr, MIIM_COMMAND_ADDR, val);
+
+ do {
+ usleep_range(5, 10);
+ xgene_enet_rd_mdio_csr(addr, MIIM_INDICATOR_ADDR, &status);
+ } while ((status & BUSY_MASK) && timeout--);
+
+ xgene_enet_wr_mdio_csr(addr, MIIM_COMMAND_ADDR, 0);
+
+ return 0;
+}
+
+static int xgene_xfi_mdio_read(struct mii_bus *bus, int phy_id, int reg)
+{
+ void __iomem *addr = (void __iomem *)bus->priv;
+ u32 data, status, val;
+ int timeout = 100;
+
+ val = SET_VAL(HSTPHYADX, phy_id) | SET_VAL(HSTREGADX, reg);
+ xgene_enet_wr_mdio_csr(addr, MIIM_FIELD_ADDR, val);
+
+ val = HSTLDCMD | SET_VAL(HSTMIIMCMD, MIIM_CMD_LEGACY_READ);
+ xgene_enet_wr_mdio_csr(addr, MIIM_COMMAND_ADDR, val);
+
+ do {
+ usleep_range(5, 10);
+ xgene_enet_rd_mdio_csr(addr, MIIM_INDICATOR_ADDR, &status);
+ } while ((status & BUSY_MASK) && timeout--);
+
+ if (status & BUSY_MASK) {
+ pr_err("XGENET_MII_MGMT write failed\n");
+ return -EBUSY;
+ }
+
+ xgene_enet_rd_mdio_csr(addr, MIIMRD_FIELD_ADDR, &data);
+ xgene_enet_wr_mdio_csr(addr, MIIM_COMMAND_ADDR, 0);
+
+ return data;
+}
+
+struct phy_device *xgene_enet_phy_register(struct mii_bus *bus, int phy_addr)
+{
+ struct phy_device *phy_dev;
+
+ phy_dev = get_phy_device(bus, phy_addr, false);
+ if (!phy_dev || IS_ERR(phy_dev))
+ return NULL;
+
+ if (phy_device_register(phy_dev))
+ phy_device_free(phy_dev);
+
+ return phy_dev;
+}
+EXPORT_SYMBOL(xgene_enet_phy_register);
+
+#ifdef CONFIG_ACPI
+static acpi_status acpi_register_phy(acpi_handle handle, u32 lvl,
+ void *context, void **ret)
+{
+ struct mii_bus *mdio = context;
+ struct acpi_device *adev;
+ struct phy_device *phy_dev;
+ const union acpi_object *obj;
+ u32 phy_addr;
+
+ if (acpi_bus_get_device(handle, &adev))
+ return AE_OK;
+
+ if (acpi_dev_get_property(adev, "phy-channel", ACPI_TYPE_INTEGER, &obj))
+ return AE_OK;
+ phy_addr = obj->integer.value;
+
+ phy_dev = xgene_enet_phy_register(mdio, phy_addr);
+ adev->driver_data = phy_dev;
+
+ return AE_OK;
+}
+#endif
+
+static int xgene_mdio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mii_bus *mdio_bus;
+ const struct of_device_id *of_id;
+ struct resource *res;
+ struct xgene_mdio_pdata *pdata;
+ void __iomem *csr_base;
+ int mdio_id = 0, ret = 0;
+
+ of_id = of_match_device(xgene_mdio_of_match, &pdev->dev);
+ if (of_id) {
+ mdio_id = (enum xgene_mdio_id)of_id->data;
+ } else {
+#ifdef CONFIG_ACPI
+ const struct acpi_device_id *acpi_id;
+
+ acpi_id = acpi_match_device(xgene_mdio_acpi_match, &pdev->dev);
+ if (acpi_id)
+ mdio_id = (enum xgene_mdio_id)acpi_id->driver_data;
+#endif
+ }
+
+ if (!mdio_id)
+ return -ENODEV;
+
+ pdata = devm_kzalloc(dev, sizeof(struct xgene_mdio_pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+ pdata->mdio_id = mdio_id;
+ pdata->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ csr_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(csr_base))
+ return PTR_ERR(csr_base);
+ pdata->mac_csr_addr = csr_base;
+ pdata->mdio_csr_addr = csr_base + BLOCK_XG_MDIO_CSR_OFFSET;
+ pdata->diag_csr_addr = csr_base + BLOCK_DIAG_CSR_OFFSET;
+
+ if (dev->of_node) {
+ pdata->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(pdata->clk)) {
+ dev_err(dev, "Unable to retrieve clk\n");
+ return PTR_ERR(pdata->clk);
+ }
+ }
+
+ ret = xgene_mdio_reset(pdata);
+ if (ret)
+ return ret;
+
+ mdio_bus = mdiobus_alloc();
+ if (!mdio_bus)
+ return -ENOMEM;
+
+ mdio_bus->name = "APM X-Gene MDIO bus";
+
+ if (mdio_id == XGENE_MDIO_RGMII) {
+ mdio_bus->read = xgene_mdio_rgmii_read;
+ mdio_bus->write = xgene_mdio_rgmii_write;
+ mdio_bus->priv = (void __force *)pdata->mac_csr_addr;
+ snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s",
+ "xgene-mii-rgmii");
+ } else {
+ mdio_bus->read = xgene_xfi_mdio_read;
+ mdio_bus->write = xgene_xfi_mdio_write;
+ mdio_bus->priv = (void __force *)pdata->mdio_csr_addr;
+ snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s",
+ "xgene-mii-xfi");
+ }
+
+ mdio_bus->parent = dev;
+ platform_set_drvdata(pdev, pdata);
+
+ if (dev->of_node) {
+ ret = of_mdiobus_register(mdio_bus, dev->of_node);
+ } else {
+#ifdef CONFIG_ACPI
+ /* Mask out all PHYs from auto probing. */
+ mdio_bus->phy_mask = ~0;
+ ret = mdiobus_register(mdio_bus);
+ if (ret)
+ goto out;
+
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_HANDLE(dev), 1,
+ acpi_register_phy, NULL, mdio_bus, NULL);
+#endif
+ }
+
+ if (ret)
+ goto out;
+
+ pdata->mdio_bus = mdio_bus;
+ xgene_mdio_status = true;
+
+ return 0;
+
+out:
+ mdiobus_free(mdio_bus);
+
+ return ret;
+}
+
+static int xgene_mdio_remove(struct platform_device *pdev)
+{
+ struct xgene_mdio_pdata *pdata = platform_get_drvdata(pdev);
+ struct mii_bus *mdio_bus = pdata->mdio_bus;
+ struct device *dev = &pdev->dev;
+
+ mdiobus_unregister(mdio_bus);
+ mdiobus_free(mdio_bus);
+
+ if (dev->of_node)
+ clk_disable_unprepare(pdata->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id xgene_mdio_of_match[] = {
+ {
+ .compatible = "apm,xgene-mdio-rgmii",
+ .data = (void *)XGENE_MDIO_RGMII
+ },
+ {
+ .compatible = "apm,xgene-mdio-xfi",
+ .data = (void *)XGENE_MDIO_XFI
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, xgene_mdio_of_match);
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xgene_mdio_acpi_match[] = {
+ { "APMC0D65", XGENE_MDIO_RGMII },
+ { "APMC0D66", XGENE_MDIO_XFI },
+ { }
+};
+
+MODULE_DEVICE_TABLE(acpi, xgene_mdio_acpi_match);
+#endif
+
+static struct platform_driver xgene_mdio_driver = {
+ .driver = {
+ .name = "xgene-mdio",
+ .of_match_table = of_match_ptr(xgene_mdio_of_match),
+ .acpi_match_table = ACPI_PTR(xgene_mdio_acpi_match),
+ },
+ .probe = xgene_mdio_probe,
+ .remove = xgene_mdio_remove,
+};
+
+module_platform_driver(xgene_mdio_driver);
+
+MODULE_DESCRIPTION("APM X-Gene SoC MDIO driver");
+MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/mdio-xgene.h b/drivers/net/phy/mdio-xgene.h
new file mode 100644
index 000000000..354241b53
--- /dev/null
+++ b/drivers/net/phy/mdio-xgene.h
@@ -0,0 +1,143 @@
+/* Applied Micro X-Gene SoC MDIO Driver
+ *
+ * Copyright (c) 2016, Applied Micro Circuits Corporation
+ * Author: Iyappan Subramanian <isubramanian@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MDIO_XGENE_H__
+#define __MDIO_XGENE_H__
+
+#define BLOCK_XG_MDIO_CSR_OFFSET 0x5000
+#define BLOCK_DIAG_CSR_OFFSET 0xd000
+#define XGENET_CONFIG_REG_ADDR 0x20
+
+#define MAC_ADDR_REG_OFFSET 0x00
+#define MAC_COMMAND_REG_OFFSET 0x04
+#define MAC_WRITE_REG_OFFSET 0x08
+#define MAC_READ_REG_OFFSET 0x0c
+#define MAC_COMMAND_DONE_REG_OFFSET 0x10
+
+#define CLKEN_OFFSET 0x08
+#define SRST_OFFSET 0x00
+
+#define MENET_CFG_MEM_RAM_SHUTDOWN_ADDR 0x70
+#define MENET_BLOCK_MEM_RDY_ADDR 0x74
+
+#define MAC_CONFIG_1_ADDR 0x00
+#define MII_MGMT_COMMAND_ADDR 0x24
+#define MII_MGMT_ADDRESS_ADDR 0x28
+#define MII_MGMT_CONTROL_ADDR 0x2c
+#define MII_MGMT_STATUS_ADDR 0x30
+#define MII_MGMT_INDICATORS_ADDR 0x34
+#define SOFT_RESET BIT(31)
+
+#define MII_MGMT_CONFIG_ADDR 0x20
+#define MII_MGMT_COMMAND_ADDR 0x24
+#define MII_MGMT_ADDRESS_ADDR 0x28
+#define MII_MGMT_CONTROL_ADDR 0x2c
+#define MII_MGMT_STATUS_ADDR 0x30
+#define MII_MGMT_INDICATORS_ADDR 0x34
+
+#define MIIM_COMMAND_ADDR 0x20
+#define MIIM_FIELD_ADDR 0x24
+#define MIIM_CONFIGURATION_ADDR 0x28
+#define MIIM_LINKFAILVECTOR_ADDR 0x2c
+#define MIIM_INDICATOR_ADDR 0x30
+#define MIIMRD_FIELD_ADDR 0x34
+
+#define MDIO_CSR_OFFSET 0x5000
+
+#define REG_ADDR_POS 0
+#define REG_ADDR_LEN 5
+#define PHY_ADDR_POS 8
+#define PHY_ADDR_LEN 5
+
+#define HSTMIIMWRDAT_POS 0
+#define HSTMIIMWRDAT_LEN 16
+#define HSTPHYADX_POS 23
+#define HSTPHYADX_LEN 5
+#define HSTREGADX_POS 18
+#define HSTREGADX_LEN 5
+#define HSTLDCMD BIT(3)
+#define HSTMIIMCMD_POS 0
+#define HSTMIIMCMD_LEN 3
+
+#define BUSY_MASK BIT(0)
+#define READ_CYCLE_MASK BIT(0)
+
+enum xgene_enet_cmd {
+ XGENE_ENET_WR_CMD = BIT(31),
+ XGENE_ENET_RD_CMD = BIT(30)
+};
+
+enum {
+ MIIM_CMD_IDLE,
+ MIIM_CMD_LEGACY_WRITE,
+ MIIM_CMD_LEGACY_READ,
+};
+
+enum xgene_mdio_id {
+ XGENE_MDIO_RGMII = 1,
+ XGENE_MDIO_XFI
+};
+
+struct xgene_mdio_pdata {
+ struct clk *clk;
+ struct device *dev;
+ void __iomem *mac_csr_addr;
+ void __iomem *diag_csr_addr;
+ void __iomem *mdio_csr_addr;
+ struct mii_bus *mdio_bus;
+ int mdio_id;
+};
+
+/* Set the specified value into a bit-field defined by its starting position
+ * and length within a single u64.
+ */
+static inline u64 xgene_enet_set_field_value(int pos, int len, u64 val)
+{
+ return (val & ((1ULL << len) - 1)) << pos;
+}
+
+#define SET_VAL(field, val) \
+ xgene_enet_set_field_value(field ## _POS, field ## _LEN, val)
+
+#define SET_BIT(field) \
+ xgene_enet_set_field_value(field ## _POS, 1, 1)
+
+/* Get the value from a bit-field defined by its starting position
+ * and length within the specified u64.
+ */
+static inline u64 xgene_enet_get_field_value(int pos, int len, u64 src)
+{
+ return (src >> pos) & ((1ULL << len) - 1);
+}
+
+#define GET_VAL(field, src) \
+ xgene_enet_get_field_value(field ## _POS, field ## _LEN, src)
+
+#define GET_BIT(field, src) \
+ xgene_enet_get_field_value(field ## _POS, 1, src)
+
+static const struct of_device_id xgene_mdio_of_match[];
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xgene_mdio_acpi_match[];
+#endif
+int xgene_mdio_rgmii_read(struct mii_bus *bus, int phy_id, int reg);
+int xgene_mdio_rgmii_write(struct mii_bus *bus, int phy_id, int reg, u16 data);
+struct phy_device *xgene_enet_phy_register(struct mii_bus *bus, int phy_addr);
+
+#endif /* __MDIO_XGENE_H__ */
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 5a8fefc25..885ac9cba 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -311,6 +311,36 @@ static int kszphy_config_init(struct phy_device *phydev)
return 0;
}
+static int ksz8041_config_init(struct phy_device *phydev)
+{
+ struct device_node *of_node = phydev->mdio.dev.of_node;
+
+ /* Limit supported and advertised modes in fiber mode */
+ if (of_property_read_bool(of_node, "micrel,fiber-mode")) {
+ phydev->dev_flags |= MICREL_PHY_FXEN;
+ phydev->supported &= SUPPORTED_FIBRE |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_100baseT_Half;
+ phydev->advertising &= ADVERTISED_FIBRE |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_100baseT_Half;
+ phydev->autoneg = AUTONEG_DISABLE;
+ }
+
+ return kszphy_config_init(phydev);
+}
+
+static int ksz8041_config_aneg(struct phy_device *phydev)
+{
+ /* Skip auto-negotiation in fiber mode */
+ if (phydev->dev_flags & MICREL_PHY_FXEN) {
+ phydev->speed = SPEED_100;
+ return 0;
+ }
+
+ return genphy_config_aneg(phydev);
+}
+
static int ksz9021_load_values_from_of(struct phy_device *phydev,
const struct device_node *of_node,
u16 reg,
@@ -647,17 +677,28 @@ static void kszphy_get_stats(struct phy_device *phydev,
data[i] = kszphy_get_stat(phydev, i);
}
-static int kszphy_resume(struct phy_device *phydev)
+static int kszphy_suspend(struct phy_device *phydev)
{
- int value;
+ /* Disable PHY Interrupts */
+ if (phy_interrupt_is_valid(phydev)) {
+ phydev->interrupts = PHY_INTERRUPT_DISABLED;
+ if (phydev->drv->config_intr)
+ phydev->drv->config_intr(phydev);
+ }
- mutex_lock(&phydev->lock);
+ return genphy_suspend(phydev);
+}
- value = phy_read(phydev, MII_BMCR);
- phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN);
+static int kszphy_resume(struct phy_device *phydev)
+{
+ genphy_resume(phydev);
- kszphy_config_intr(phydev);
- mutex_unlock(&phydev->lock);
+ /* Enable PHY Interrupts */
+ if (phy_interrupt_is_valid(phydev)) {
+ phydev->interrupts = PHY_INTERRUPT_ENABLED;
+ if (phydev->drv->config_intr)
+ phydev->drv->config_intr(phydev);
+ }
return 0;
}
@@ -788,8 +829,8 @@ static struct phy_driver ksphy_driver[] = {
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.driver_data = &ksz8041_type,
.probe = kszphy_probe,
- .config_init = kszphy_config_init,
- .config_aneg = genphy_config_aneg,
+ .config_init = ksz8041_config_init,
+ .config_aneg = ksz8041_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
@@ -839,7 +880,7 @@ static struct phy_driver ksphy_driver[] = {
}, {
.phy_id = PHY_ID_KSZ8001,
.name = "Micrel KSZ8001 or KS8721",
- .phy_id_mask = 0x00ffffff,
+ .phy_id_mask = 0x00fffffc,
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.driver_data = &ksz8041_type,
@@ -870,7 +911,7 @@ static struct phy_driver ksphy_driver[] = {
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
- .suspend = genphy_suspend,
+ .suspend = kszphy_suspend,
.resume = kszphy_resume,
}, {
.phy_id = PHY_ID_KSZ8061,
@@ -923,7 +964,7 @@ static struct phy_driver ksphy_driver[] = {
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
.suspend = genphy_suspend,
- .resume = genphy_resume,
+ .resume = kszphy_resume,
}, {
.phy_id = PHY_ID_KSZ8873MLL,
.phy_id_mask = MICREL_PHY_ID_MASK,
@@ -963,7 +1004,7 @@ MODULE_LICENSE("GPL");
static struct mdio_device_id __maybe_unused micrel_tbl[] = {
{ PHY_ID_KSZ9021, 0x000ffffe },
{ PHY_ID_KSZ9031, MICREL_PHY_ID_MASK },
- { PHY_ID_KSZ8001, 0x00ffffff },
+ { PHY_ID_KSZ8001, 0x00fffffc },
{ PHY_ID_KS8737, MICREL_PHY_ID_MASK },
{ PHY_ID_KSZ8021, 0x00ffffff },
{ PHY_ID_KSZ8031, 0x00ffffff },
diff --git a/drivers/net/phy/swphy.c b/drivers/net/phy/swphy.c
new file mode 100644
index 000000000..34f58f234
--- /dev/null
+++ b/drivers/net/phy/swphy.c
@@ -0,0 +1,179 @@
+/*
+ * Software PHY emulation
+ *
+ * Code taken from fixed_phy.c by Russell King <rmk+kernel@arm.linux.org.uk>
+ *
+ * Author: Vitaly Bordug <vbordug@ru.mvista.com>
+ * Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * Copyright (c) 2006-2007 MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#include <linux/export.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+
+#include "swphy.h"
+
+#define MII_REGS_NUM 29
+
+struct swmii_regs {
+ u16 bmcr;
+ u16 bmsr;
+ u16 lpa;
+ u16 lpagb;
+};
+
+enum {
+ SWMII_SPEED_10 = 0,
+ SWMII_SPEED_100,
+ SWMII_SPEED_1000,
+ SWMII_DUPLEX_HALF = 0,
+ SWMII_DUPLEX_FULL,
+};
+
+/*
+ * These two tables get bitwise-anded together to produce the final result.
+ * This means the speed table must contain both duplex settings, and the
+ * duplex table must contain all speed settings.
+ */
+static const struct swmii_regs speed[] = {
+ [SWMII_SPEED_10] = {
+ .bmcr = BMCR_FULLDPLX,
+ .lpa = LPA_10FULL | LPA_10HALF,
+ },
+ [SWMII_SPEED_100] = {
+ .bmcr = BMCR_FULLDPLX | BMCR_SPEED100,
+ .bmsr = BMSR_100FULL | BMSR_100HALF,
+ .lpa = LPA_100FULL | LPA_100HALF,
+ },
+ [SWMII_SPEED_1000] = {
+ .bmcr = BMCR_FULLDPLX | BMCR_SPEED1000,
+ .bmsr = BMSR_ESTATEN,
+ .lpagb = LPA_1000FULL | LPA_1000HALF,
+ },
+};
+
+static const struct swmii_regs duplex[] = {
+ [SWMII_DUPLEX_HALF] = {
+ .bmcr = ~BMCR_FULLDPLX,
+ .bmsr = BMSR_ESTATEN | BMSR_100HALF,
+ .lpa = LPA_10HALF | LPA_100HALF,
+ .lpagb = LPA_1000HALF,
+ },
+ [SWMII_DUPLEX_FULL] = {
+ .bmcr = ~0,
+ .bmsr = BMSR_ESTATEN | BMSR_100FULL,
+ .lpa = LPA_10FULL | LPA_100FULL,
+ .lpagb = LPA_1000FULL,
+ },
+};
+
+static int swphy_decode_speed(int speed)
+{
+ switch (speed) {
+ case 1000:
+ return SWMII_SPEED_1000;
+ case 100:
+ return SWMII_SPEED_100;
+ case 10:
+ return SWMII_SPEED_10;
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * swphy_validate_state - validate the software phy status
+ * @state: software phy status
+ *
+ * This checks that we can represent the state stored in @state can be
+ * represented in the emulated MII registers. Returns 0 if it can,
+ * otherwise returns -EINVAL.
+ */
+int swphy_validate_state(const struct fixed_phy_status *state)
+{
+ int err;
+
+ if (state->link) {
+ err = swphy_decode_speed(state->speed);
+ if (err < 0) {
+ pr_warn("swphy: unknown speed\n");
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(swphy_validate_state);
+
+/**
+ * swphy_read_reg - return a MII register from the fixed phy state
+ * @reg: MII register
+ * @state: fixed phy status
+ *
+ * Return the MII @reg register generated from the fixed phy state @state.
+ */
+int swphy_read_reg(int reg, const struct fixed_phy_status *state)
+{
+ int speed_index, duplex_index;
+ u16 bmsr = BMSR_ANEGCAPABLE;
+ u16 bmcr = 0;
+ u16 lpagb = 0;
+ u16 lpa = 0;
+
+ if (reg > MII_REGS_NUM)
+ return -1;
+
+ speed_index = swphy_decode_speed(state->speed);
+ if (WARN_ON(speed_index < 0))
+ return 0;
+
+ duplex_index = state->duplex ? SWMII_DUPLEX_FULL : SWMII_DUPLEX_HALF;
+
+ bmsr |= speed[speed_index].bmsr & duplex[duplex_index].bmsr;
+
+ if (state->link) {
+ bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE;
+
+ bmcr |= speed[speed_index].bmcr & duplex[duplex_index].bmcr;
+ lpa |= speed[speed_index].lpa & duplex[duplex_index].lpa;
+ lpagb |= speed[speed_index].lpagb & duplex[duplex_index].lpagb;
+
+ if (state->pause)
+ lpa |= LPA_PAUSE_CAP;
+
+ if (state->asym_pause)
+ lpa |= LPA_PAUSE_ASYM;
+ }
+
+ switch (reg) {
+ case MII_BMCR:
+ return bmcr;
+ case MII_BMSR:
+ return bmsr;
+ case MII_PHYSID1:
+ case MII_PHYSID2:
+ return 0;
+ case MII_LPA:
+ return lpa;
+ case MII_STAT1000:
+ return lpagb;
+
+ /*
+ * We do not support emulating Clause 45 over Clause 22 register
+ * reads. Return an error instead of bogus data.
+ */
+ case MII_MMD_CTRL:
+ case MII_MMD_DATA:
+ return -1;
+
+ default:
+ return 0xffff;
+ }
+}
+EXPORT_SYMBOL_GPL(swphy_read_reg);
diff --git a/drivers/net/phy/swphy.h b/drivers/net/phy/swphy.h
new file mode 100644
index 000000000..2f09ac324
--- /dev/null
+++ b/drivers/net/phy/swphy.h
@@ -0,0 +1,9 @@
+#ifndef SWPHY_H
+#define SWPHY_H
+
+struct fixed_phy_status;
+
+int swphy_validate_state(const struct fixed_phy_status *state);
+int swphy_read_reg(int reg, const struct fixed_phy_status *state);
+
+#endif
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index a30ee427e..f226db461 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1312,10 +1312,9 @@ ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
return stats64;
}
-static struct lock_class_key ppp_tx_busylock;
static int ppp_dev_init(struct net_device *dev)
{
- dev->qdisc_tx_busylock = &ppp_tx_busylock;
+ netdev_lockdep_set_classes(dev);
return 0;
}
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index fdee77207..a380649bf 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1576,23 +1576,6 @@ static const struct team_option team_options[] = {
},
};
-static struct lock_class_key team_netdev_xmit_lock_key;
-static struct lock_class_key team_netdev_addr_lock_key;
-static struct lock_class_key team_tx_busylock_key;
-
-static void team_set_lockdep_class_one(struct net_device *dev,
- struct netdev_queue *txq,
- void *unused)
-{
- lockdep_set_class(&txq->_xmit_lock, &team_netdev_xmit_lock_key);
-}
-
-static void team_set_lockdep_class(struct net_device *dev)
-{
- lockdep_set_class(&dev->addr_list_lock, &team_netdev_addr_lock_key);
- netdev_for_each_tx_queue(dev, team_set_lockdep_class_one, NULL);
- dev->qdisc_tx_busylock = &team_tx_busylock_key;
-}
static int team_init(struct net_device *dev)
{
@@ -1628,7 +1611,7 @@ static int team_init(struct net_device *dev)
goto err_options_register;
netif_carrier_off(dev);
- team_set_lockdep_class(dev);
+ netdev_lockdep_set_classes(dev);
return 0;
@@ -2019,6 +2002,8 @@ static const struct net_device_ops team_netdev_ops = {
.ndo_add_slave = team_add_slave,
.ndo_del_slave = team_del_slave,
.ndo_fix_features = team_fix_features,
+ .ndo_neigh_construct = netdev_default_l2upper_neigh_construct,
+ .ndo_neigh_destroy = netdev_default_l2upper_neigh_destroy,
.ndo_change_carrier = team_change_carrier,
.ndo_bridge_setlink = switchdev_port_bridge_setlink,
.ndo_bridge_getlink = switchdev_port_bridge_getlink,
diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
index cdb19b385..b228bea79 100644
--- a/drivers/net/team/team_mode_loadbalance.c
+++ b/drivers/net/team/team_mode_loadbalance.c
@@ -14,9 +14,23 @@
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include <linux/filter.h>
#include <linux/if_team.h>
+static rx_handler_result_t lb_receive(struct team *team, struct team_port *port,
+ struct sk_buff *skb)
+{
+ if (unlikely(skb->protocol == htons(ETH_P_SLOW))) {
+ /* LACPDU packets should go to exact delivery */
+ const unsigned char *dest = eth_hdr(skb)->h_dest;
+
+ if (is_link_local_ether_addr(dest) && dest[5] == 0x02)
+ return RX_HANDLER_EXACT;
+ }
+ return RX_HANDLER_ANOTHER;
+}
+
struct lb_priv;
typedef struct team_port *lb_select_tx_port_func_t(struct team *,
@@ -652,6 +666,7 @@ static const struct team_mode_ops lb_mode_ops = {
.port_enter = lb_port_enter,
.port_leave = lb_port_leave,
.port_disabled = lb_port_disabled,
+ .receive = lb_receive,
.transmit = lb_transmit,
};
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 34259bd0a..6f9df375c 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -71,6 +71,7 @@
#include <net/sock.h>
#include <linux/seq_file.h>
#include <linux/uio.h>
+#include <linux/skb_array.h>
#include <asm/uaccess.h>
@@ -167,6 +168,7 @@ struct tun_file {
};
struct list_head next;
struct tun_struct *detached;
+ struct skb_array tx_array;
};
struct tun_flow_entry {
@@ -515,7 +517,11 @@ static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
static void tun_queue_purge(struct tun_file *tfile)
{
- skb_queue_purge(&tfile->sk.sk_receive_queue);
+ struct sk_buff *skb;
+
+ while ((skb = skb_array_consume(&tfile->tx_array)) != NULL)
+ kfree_skb(skb);
+
skb_queue_purge(&tfile->sk.sk_error_queue);
}
@@ -560,6 +566,8 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
tun->dev->reg_state == NETREG_REGISTERED)
unregister_netdevice(tun->dev);
}
+ if (tun)
+ skb_array_cleanup(&tfile->tx_array);
sock_put(&tfile->sk);
}
}
@@ -613,6 +621,7 @@ static void tun_detach_all(struct net_device *dev)
static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filter)
{
struct tun_file *tfile = file->private_data;
+ struct net_device *dev = tun->dev;
int err;
err = security_tun_dev_attach(tfile->socket.sk, tun->security);
@@ -642,6 +651,13 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
if (!err)
goto out;
}
+
+ if (!tfile->detached &&
+ skb_array_init(&tfile->tx_array, dev->tx_queue_len, GFP_KERNEL)) {
+ err = -ENOMEM;
+ goto out;
+ }
+
tfile->queue_index = tun->numqueues;
tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
rcu_assign_pointer(tfile->tun, tun);
@@ -887,8 +903,8 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
nf_reset(skb);
- /* Enqueue packet */
- skb_queue_tail(&tfile->socket.sk->sk_receive_queue, skb);
+ if (skb_array_produce(&tfile->tx_array, skb))
+ goto drop;
/* Notify and wake up reader process */
if (tfile->flags & TUN_FASYNC)
@@ -1103,7 +1119,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
poll_wait(file, sk_sleep(sk), wait);
- if (!skb_queue_empty(&sk->sk_receive_queue))
+ if (!skb_array_empty(&tfile->tx_array))
mask |= POLLIN | POLLRDNORM;
if (sock_writeable(sk) ||
@@ -1250,13 +1266,11 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
return -EFAULT;
}
- if (gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
- if (!skb_partial_csum_set(skb, tun16_to_cpu(tun, gso.csum_start),
- tun16_to_cpu(tun, gso.csum_offset))) {
- this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
- kfree_skb(skb);
- return -EINVAL;
- }
+ err = virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun));
+ if (err) {
+ this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
+ kfree_skb(skb);
+ return -EINVAL;
}
switch (tun->flags & TUN_TYPE_MASK) {
@@ -1285,39 +1299,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
break;
}
- if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
- pr_debug("GSO!\n");
- switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
- case VIRTIO_NET_HDR_GSO_TCPV4:
- skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
- break;
- case VIRTIO_NET_HDR_GSO_TCPV6:
- skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
- break;
- case VIRTIO_NET_HDR_GSO_UDP:
- skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
- break;
- default:
- this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
- kfree_skb(skb);
- return -EINVAL;
- }
-
- if (gso.gso_type & VIRTIO_NET_HDR_GSO_ECN)
- skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
-
- skb_shinfo(skb)->gso_size = tun16_to_cpu(tun, gso.gso_size);
- if (skb_shinfo(skb)->gso_size == 0) {
- this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
- kfree_skb(skb);
- return -EINVAL;
- }
-
- /* Header must be checked, and gso_segs computed. */
- skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
- skb_shinfo(skb)->gso_segs = 0;
- }
-
/* copy skb_ubuf_info for callback when skb has no error */
if (zerocopy) {
skb_shinfo(skb)->destructor_arg = msg_control;
@@ -1395,46 +1376,26 @@ static ssize_t tun_put_user(struct tun_struct *tun,
if (vnet_hdr_sz) {
struct virtio_net_hdr gso = { 0 }; /* no info leak */
+ int ret;
+
if (iov_iter_count(iter) < vnet_hdr_sz)
return -EINVAL;
- if (skb_is_gso(skb)) {
+ ret = virtio_net_hdr_from_skb(skb, &gso,
+ tun_is_little_endian(tun));
+ if (ret) {
struct skb_shared_info *sinfo = skb_shinfo(skb);
-
- /* This is a hint as to how much should be linear. */
- gso.hdr_len = cpu_to_tun16(tun, skb_headlen(skb));
- gso.gso_size = cpu_to_tun16(tun, sinfo->gso_size);
- if (sinfo->gso_type & SKB_GSO_TCPV4)
- gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
- else if (sinfo->gso_type & SKB_GSO_TCPV6)
- gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
- else if (sinfo->gso_type & SKB_GSO_UDP)
- gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
- else {
- pr_err("unexpected GSO type: "
- "0x%x, gso_size %d, hdr_len %d\n",
- sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
- tun16_to_cpu(tun, gso.hdr_len));
- print_hex_dump(KERN_ERR, "tun: ",
- DUMP_PREFIX_NONE,
- 16, 1, skb->head,
- min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
- WARN_ON_ONCE(1);
- return -EINVAL;
- }
- if (sinfo->gso_type & SKB_GSO_TCP_ECN)
- gso.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
- } else
- gso.gso_type = VIRTIO_NET_HDR_GSO_NONE;
-
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
- gso.csum_start = cpu_to_tun16(tun, skb_checksum_start_offset(skb) +
- vlan_hlen);
- gso.csum_offset = cpu_to_tun16(tun, skb->csum_offset);
- } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
- gso.flags = VIRTIO_NET_HDR_F_DATA_VALID;
- } /* else everything is zero */
+ pr_err("unexpected GSO type: "
+ "0x%x, gso_size %d, hdr_len %d\n",
+ sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
+ tun16_to_cpu(tun, gso.hdr_len));
+ print_hex_dump(KERN_ERR, "tun: ",
+ DUMP_PREFIX_NONE,
+ 16, 1, skb->head,
+ min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso))
return -EFAULT;
@@ -1477,22 +1438,63 @@ done:
return total;
}
+static struct sk_buff *tun_ring_recv(struct tun_file *tfile, int noblock,
+ int *err)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ struct sk_buff *skb = NULL;
+ int error = 0;
+
+ skb = skb_array_consume(&tfile->tx_array);
+ if (skb)
+ goto out;
+ if (noblock) {
+ error = -EAGAIN;
+ goto out;
+ }
+
+ add_wait_queue(&tfile->wq.wait, &wait);
+ current->state = TASK_INTERRUPTIBLE;
+
+ while (1) {
+ skb = skb_array_consume(&tfile->tx_array);
+ if (skb)
+ break;
+ if (signal_pending(current)) {
+ error = -ERESTARTSYS;
+ break;
+ }
+ if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) {
+ error = -EFAULT;
+ break;
+ }
+
+ schedule();
+ }
+
+ current->state = TASK_RUNNING;
+ remove_wait_queue(&tfile->wq.wait, &wait);
+
+out:
+ *err = error;
+ return skb;
+}
+
static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
struct iov_iter *to,
int noblock)
{
struct sk_buff *skb;
ssize_t ret;
- int peeked, err, off = 0;
+ int err;
tun_debug(KERN_INFO, tun, "tun_do_read\n");
if (!iov_iter_count(to))
return 0;
- /* Read frames from queue */
- skb = __skb_recv_datagram(tfile->socket.sk, noblock ? MSG_DONTWAIT : 0,
- &peeked, &off, &err);
+ /* Read frames from ring */
+ skb = tun_ring_recv(tfile, noblock, &err);
if (!skb)
return err;
@@ -1625,8 +1627,25 @@ out:
return ret;
}
+static int tun_peek_len(struct socket *sock)
+{
+ struct tun_file *tfile = container_of(sock, struct tun_file, socket);
+ struct tun_struct *tun;
+ int ret = 0;
+
+ tun = __tun_get(tfile);
+ if (!tun)
+ return 0;
+
+ ret = skb_array_peek_len(&tfile->tx_array);
+ tun_put(tun);
+
+ return ret;
+}
+
/* Ops structure to mimic raw sockets with tun */
static const struct proto_ops tun_socket_ops = {
+ .peek_len = tun_peek_len,
.sendmsg = tun_sendmsg,
.recvmsg = tun_recvmsg,
};
@@ -2448,6 +2467,56 @@ static const struct ethtool_ops tun_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
};
+static int tun_queue_resize(struct tun_struct *tun)
+{
+ struct net_device *dev = tun->dev;
+ struct tun_file *tfile;
+ struct skb_array **arrays;
+ int n = tun->numqueues + tun->numdisabled;
+ int ret, i;
+
+ arrays = kmalloc(sizeof *arrays * n, GFP_KERNEL);
+ if (!arrays)
+ return -ENOMEM;
+
+ for (i = 0; i < tun->numqueues; i++) {
+ tfile = rtnl_dereference(tun->tfiles[i]);
+ arrays[i] = &tfile->tx_array;
+ }
+ list_for_each_entry(tfile, &tun->disabled, next)
+ arrays[i++] = &tfile->tx_array;
+
+ ret = skb_array_resize_multiple(arrays, n,
+ dev->tx_queue_len, GFP_KERNEL);
+
+ kfree(arrays);
+ return ret;
+}
+
+static int tun_device_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct tun_struct *tun = netdev_priv(dev);
+
+ if (dev->rtnl_link_ops != &tun_link_ops)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_CHANGE_TX_QUEUE_LEN:
+ if (tun_queue_resize(tun))
+ return NOTIFY_BAD;
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block tun_notifier_block __read_mostly = {
+ .notifier_call = tun_device_event,
+};
static int __init tun_init(void)
{
@@ -2467,6 +2536,8 @@ static int __init tun_init(void)
pr_err("Can't register misc device %d\n", TUN_MINOR);
goto err_misc;
}
+
+ register_netdevice_notifier(&tun_notifier_block);
return 0;
err_misc:
rtnl_link_unregister(&tun_link_ops);
@@ -2478,6 +2549,7 @@ static void tun_cleanup(void)
{
misc_deregister(&tun_miscdev);
rtnl_link_unregister(&tun_link_ops);
+ unregister_netdevice_notifier(&tun_notifier_block);
}
/* Get an underlying socket object from tun file. Returns error unless file is
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index cf77f2dff..163a2c576 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -149,24 +149,6 @@ static const struct net_device_ops ax88172a_netdev_ops = {
.ndo_set_rx_mode = asix_set_multicast,
};
-static int ax88172a_get_settings(struct net_device *net,
- struct ethtool_cmd *cmd)
-{
- if (!net->phydev)
- return -ENODEV;
-
- return phy_ethtool_gset(net->phydev, cmd);
-}
-
-static int ax88172a_set_settings(struct net_device *net,
- struct ethtool_cmd *cmd)
-{
- if (!net->phydev)
- return -ENODEV;
-
- return phy_ethtool_sset(net->phydev, cmd);
-}
-
static int ax88172a_nway_reset(struct net_device *net)
{
if (!net->phydev)
@@ -185,9 +167,9 @@ static const struct ethtool_ops ax88172a_ethtool_ops = {
.get_eeprom_len = asix_get_eeprom_len,
.get_eeprom = asix_get_eeprom,
.set_eeprom = asix_set_eeprom,
- .get_settings = ax88172a_get_settings,
- .set_settings = ax88172a_set_settings,
.nway_reset = ax88172a_nway_reset,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static int ax88172a_reset_phy(struct usbnet *dev, int embd_phy)
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 7cba2c375..c47ec0a04 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -388,6 +388,12 @@ void usbnet_cdc_status(struct usbnet *dev, struct urb *urb)
case USB_CDC_NOTIFY_NETWORK_CONNECTION:
netif_dbg(dev, timer, dev->net, "CDC: carrier %s\n",
event->wValue ? "on" : "off");
+
+ /* Work-around for devices with broken off-notifications */
+ if (event->wValue &&
+ !test_bit(__LINK_STATE_NOCARRIER, &dev->net->state))
+ usbnet_link_change(dev, 0, 0);
+
usbnet_link_change(dev, !!event->wValue, 0);
break;
case USB_CDC_NOTIFY_SPEED_CHANGE: /* tx/rx rates */
@@ -432,6 +438,34 @@ int usbnet_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
}
EXPORT_SYMBOL_GPL(usbnet_cdc_bind);
+static int usbnet_cdc_zte_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+ int status = usbnet_cdc_bind(dev, intf);
+
+ if (!status && (dev->net->dev_addr[0] & 0x02))
+ eth_hw_addr_random(dev->net);
+
+ return status;
+}
+
+/* Make sure packets have correct destination MAC address
+ *
+ * A firmware bug observed on some devices (ZTE MF823/831/910) is that the
+ * device sends packets with a static, bogus, random MAC address (event if
+ * device MAC address has been updated). Always set MAC address to that of the
+ * device.
+ */
+static int usbnet_cdc_zte_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+{
+ if (skb->len < ETH_HLEN || !(skb->data[0] & 0x02))
+ return 1;
+
+ skb_reset_mac_header(skb);
+ ether_addr_copy(eth_hdr(skb)->h_dest, dev->net->dev_addr);
+
+ return 1;
+}
+
static const struct driver_info cdc_info = {
.description = "CDC Ethernet Device",
.flags = FLAG_ETHER | FLAG_POINTTOPOINT,
@@ -442,6 +476,17 @@ static const struct driver_info cdc_info = {
.manage_power = usbnet_manage_power,
};
+static const struct driver_info zte_cdc_info = {
+ .description = "ZTE CDC Ethernet Device",
+ .flags = FLAG_ETHER | FLAG_POINTTOPOINT,
+ .bind = usbnet_cdc_zte_bind,
+ .unbind = usbnet_cdc_unbind,
+ .status = usbnet_cdc_status,
+ .set_rx_mode = usbnet_cdc_update_filter,
+ .manage_power = usbnet_manage_power,
+ .rx_fixup = usbnet_cdc_zte_rx_fixup,
+};
+
static const struct driver_info wwan_info = {
.description = "Mobile Broadband Network Device",
.flags = FLAG_WWAN,
@@ -707,6 +752,12 @@ static const struct usb_device_id products[] = {
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
.driver_info = (kernel_ulong_t)&wwan_info,
}, {
+ /* ZTE modules */
+ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&zte_cdc_info,
+}, {
USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long) &cdc_info,
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 52c72ed22..ff95b2857 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -1006,6 +1006,7 @@ static int kaweth_probe(
struct net_device *netdev;
const eth_addr_t bcast_addr = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
int result = 0;
+ int rv = -EIO;
dev_dbg(dev,
"Kawasaki Device Probe (Device number:%d): 0x%4.4x:0x%4.4x:0x%4.4x\n",
@@ -1026,6 +1027,7 @@ static int kaweth_probe(
kaweth = netdev_priv(netdev);
kaweth->dev = udev;
kaweth->net = netdev;
+ kaweth->intf = intf;
spin_lock_init(&kaweth->device_lock);
init_waitqueue_head(&kaweth->term_wait);
@@ -1045,6 +1047,10 @@ static int kaweth_probe(
/* Download the firmware */
dev_info(dev, "Downloading firmware...\n");
kaweth->firmware_buf = (__u8 *)__get_free_page(GFP_KERNEL);
+ if (!kaweth->firmware_buf) {
+ rv = -ENOMEM;
+ goto err_free_netdev;
+ }
if ((result = kaweth_download_firmware(kaweth,
"/*(DEBLOBBED)*/",
100,
@@ -1136,8 +1142,6 @@ err_fw:
dev_dbg(dev, "Initializing net device.\n");
- kaweth->intf = intf;
-
kaweth->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!kaweth->tx_urb)
goto err_free_netdev;
@@ -1201,7 +1205,7 @@ err_only_tx:
err_free_netdev:
free_netdev(netdev);
- return -EIO;
+ return rv;
}
/****************************************************************
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index e9654a685..c25424886 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -32,7 +32,7 @@
#define NETNEXT_VERSION "08"
/* Information for net */
-#define NET_VERSION "5"
+#define NET_VERSION "6"
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -613,7 +613,7 @@ struct r8152 {
struct list_head rx_done, tx_free;
struct sk_buff_head tx_queue, rx_queue;
spinlock_t rx_lock, tx_lock;
- struct delayed_work schedule;
+ struct delayed_work schedule, hw_phy_work;
struct mii_if_info mii;
struct mutex control; /* use for hw setting */
#ifdef CONFIG_PM_SLEEP
@@ -630,6 +630,7 @@ struct r8152 {
int (*eee_get)(struct r8152 *, struct ethtool_eee *);
int (*eee_set)(struct r8152 *, struct ethtool_eee *);
bool (*in_nway)(struct r8152 *);
+ void (*hw_phy_cfg)(struct r8152 *);
void (*autosuspend_en)(struct r8152 *tp, bool enable);
} rtl_ops;
@@ -639,8 +640,11 @@ struct r8152 {
u32 tx_qlen;
u32 coalesce;
u16 ocp_base;
+ u16 speed;
u8 *intr_buff;
u8 version;
+ u8 duplex;
+ u8 autoneg;
};
enum rtl_version {
@@ -1820,7 +1824,7 @@ static int rx_bottom(struct r8152 *tp, int budget)
pkt_len -= CRC_SIZE;
rx_data += sizeof(struct rx_desc);
- skb = netdev_alloc_skb_ip_align(netdev, pkt_len);
+ skb = napi_alloc_skb(&tp->napi, pkt_len);
if (!skb) {
stats->rx_dropped++;
goto find_next_rx;
@@ -2512,27 +2516,6 @@ static void rtl8153_runtime_enable(struct r8152 *tp, bool enable)
}
}
-static void rtl_phy_reset(struct r8152 *tp)
-{
- u16 data;
- int i;
-
- data = r8152_mdio_read(tp, MII_BMCR);
-
- /* don't reset again before the previous one complete */
- if (data & BMCR_RESET)
- return;
-
- data |= BMCR_RESET;
- r8152_mdio_write(tp, MII_BMCR, data);
-
- for (i = 0; i < 50; i++) {
- msleep(20);
- if ((r8152_mdio_read(tp, MII_BMCR) & BMCR_RESET) == 0)
- break;
- }
-}
-
static void r8153_teredo_off(struct r8152 *tp)
{
u32 ocp_data;
@@ -2569,6 +2552,77 @@ static void r8152_aldps_en(struct r8152 *tp, bool enable)
}
}
+static inline void r8152_mmd_indirect(struct r8152 *tp, u16 dev, u16 reg)
+{
+ ocp_reg_write(tp, OCP_EEE_AR, FUN_ADDR | dev);
+ ocp_reg_write(tp, OCP_EEE_DATA, reg);
+ ocp_reg_write(tp, OCP_EEE_AR, FUN_DATA | dev);
+}
+
+static u16 r8152_mmd_read(struct r8152 *tp, u16 dev, u16 reg)
+{
+ u16 data;
+
+ r8152_mmd_indirect(tp, dev, reg);
+ data = ocp_reg_read(tp, OCP_EEE_DATA);
+ ocp_reg_write(tp, OCP_EEE_AR, 0x0000);
+
+ return data;
+}
+
+static void r8152_mmd_write(struct r8152 *tp, u16 dev, u16 reg, u16 data)
+{
+ r8152_mmd_indirect(tp, dev, reg);
+ ocp_reg_write(tp, OCP_EEE_DATA, data);
+ ocp_reg_write(tp, OCP_EEE_AR, 0x0000);
+}
+
+static void r8152_eee_en(struct r8152 *tp, bool enable)
+{
+ u16 config1, config2, config3;
+ u32 ocp_data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
+ config1 = ocp_reg_read(tp, OCP_EEE_CONFIG1) & ~sd_rise_time_mask;
+ config2 = ocp_reg_read(tp, OCP_EEE_CONFIG2);
+ config3 = ocp_reg_read(tp, OCP_EEE_CONFIG3) & ~fast_snr_mask;
+
+ if (enable) {
+ ocp_data |= EEE_RX_EN | EEE_TX_EN;
+ config1 |= EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN | RX_QUIET_EN;
+ config1 |= sd_rise_time(1);
+ config2 |= RG_DACQUIET_EN | RG_LDVQUIET_EN;
+ config3 |= fast_snr(42);
+ } else {
+ ocp_data &= ~(EEE_RX_EN | EEE_TX_EN);
+ config1 &= ~(EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN |
+ RX_QUIET_EN);
+ config1 |= sd_rise_time(7);
+ config2 &= ~(RG_DACQUIET_EN | RG_LDVQUIET_EN);
+ config3 |= fast_snr(511);
+ }
+
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data);
+ ocp_reg_write(tp, OCP_EEE_CONFIG1, config1);
+ ocp_reg_write(tp, OCP_EEE_CONFIG2, config2);
+ ocp_reg_write(tp, OCP_EEE_CONFIG3, config3);
+}
+
+static void r8152b_enable_eee(struct r8152 *tp)
+{
+ r8152_eee_en(tp, true);
+ r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, MDIO_EEE_100TX);
+}
+
+static void r8152b_enable_fc(struct r8152 *tp)
+{
+ u16 anar;
+
+ anar = r8152_mdio_read(tp, MII_ADVERTISE);
+ anar |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+ r8152_mdio_write(tp, MII_ADVERTISE, anar);
+}
+
static void rtl8152_disable(struct r8152 *tp)
{
r8152_aldps_en(tp, false);
@@ -2578,13 +2632,9 @@ static void rtl8152_disable(struct r8152 *tp)
static void r8152b_hw_phy_cfg(struct r8152 *tp)
{
- u16 data;
-
- data = r8152_mdio_read(tp, MII_BMCR);
- if (data & BMCR_PDOWN) {
- data &= ~BMCR_PDOWN;
- r8152_mdio_write(tp, MII_BMCR, data);
- }
+ r8152b_enable_eee(tp);
+ r8152_aldps_en(tp, true);
+ r8152b_enable_fc(tp);
set_bit(PHY_RESET, &tp->flags);
}
@@ -2600,8 +2650,6 @@ static void r8152b_exit_oob(struct r8152 *tp)
rxdy_gated_en(tp, true);
r8153_teredo_off(tp);
- r8152b_hw_phy_cfg(tp);
-
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, 0x00);
@@ -2720,20 +2768,52 @@ static void r8152b_enter_oob(struct r8152 *tp)
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
}
+static void r8153_aldps_en(struct r8152 *tp, bool enable)
+{
+ u16 data;
+
+ data = ocp_reg_read(tp, OCP_POWER_CFG);
+ if (enable) {
+ data |= EN_ALDPS;
+ ocp_reg_write(tp, OCP_POWER_CFG, data);
+ } else {
+ data &= ~EN_ALDPS;
+ ocp_reg_write(tp, OCP_POWER_CFG, data);
+ msleep(20);
+ }
+}
+
+static void r8153_eee_en(struct r8152 *tp, bool enable)
+{
+ u32 ocp_data;
+ u16 config;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
+ config = ocp_reg_read(tp, OCP_EEE_CFG);
+
+ if (enable) {
+ ocp_data |= EEE_RX_EN | EEE_TX_EN;
+ config |= EEE10_EN;
+ } else {
+ ocp_data &= ~(EEE_RX_EN | EEE_TX_EN);
+ config &= ~EEE10_EN;
+ }
+
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data);
+ ocp_reg_write(tp, OCP_EEE_CFG, config);
+}
+
static void r8153_hw_phy_cfg(struct r8152 *tp)
{
u32 ocp_data;
u16 data;
- if (tp->version == RTL_VER_03 || tp->version == RTL_VER_04 ||
- tp->version == RTL_VER_05)
- ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L);
+ /* disable ALDPS before updating the PHY parameters */
+ r8153_aldps_en(tp, false);
- data = r8152_mdio_read(tp, MII_BMCR);
- if (data & BMCR_PDOWN) {
- data &= ~BMCR_PDOWN;
- r8152_mdio_write(tp, MII_BMCR, data);
- }
+ /* disable EEE before updating the PHY parameters */
+ r8153_eee_en(tp, false);
+ ocp_reg_write(tp, OCP_EEE_ADV, 0);
if (tp->version == RTL_VER_03) {
data = ocp_reg_read(tp, OCP_EEE_CFG);
@@ -2764,6 +2844,12 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
sram_write(tp, SRAM_10M_AMP1, 0x00af);
sram_write(tp, SRAM_10M_AMP2, 0x0208);
+ r8153_eee_en(tp, true);
+ ocp_reg_write(tp, OCP_EEE_ADV, MDIO_EEE_1000T | MDIO_EEE_100TX);
+
+ r8153_aldps_en(tp, true);
+ r8152b_enable_fc(tp);
+
set_bit(PHY_RESET, &tp->flags);
}
@@ -2779,8 +2865,6 @@ static void r8153_first_init(struct r8152 *tp)
ocp_data &= ~RCR_ACPT_ALL;
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
- r8153_hw_phy_cfg(tp);
-
rtl8152_nic_reset(tp);
rtl_reset_bmu(tp);
@@ -2887,21 +2971,6 @@ static void r8153_enter_oob(struct r8152 *tp)
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
}
-static void r8153_aldps_en(struct r8152 *tp, bool enable)
-{
- u16 data;
-
- data = ocp_reg_read(tp, OCP_POWER_CFG);
- if (enable) {
- data |= EN_ALDPS;
- ocp_reg_write(tp, OCP_POWER_CFG, data);
- } else {
- data &= ~EN_ALDPS;
- ocp_reg_write(tp, OCP_POWER_CFG, data);
- msleep(20);
- }
-}
-
static void rtl8153_disable(struct r8152 *tp)
{
r8153_aldps_en(tp, false);
@@ -2916,7 +2985,6 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
u16 bmcr, anar, gbcr;
int ret = 0;
- cancel_delayed_work_sync(&tp->schedule);
anar = r8152_mdio_read(tp, MII_ADVERTISE);
anar &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
ADVERTISE_100HALF | ADVERTISE_100FULL);
@@ -2976,7 +3044,7 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
}
- if (test_bit(PHY_RESET, &tp->flags))
+ if (test_and_clear_bit(PHY_RESET, &tp->flags))
bmcr |= BMCR_RESET;
if (tp->mii.supports_gmii)
@@ -2985,7 +3053,7 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
r8152_mdio_write(tp, MII_ADVERTISE, anar);
r8152_mdio_write(tp, MII_BMCR, bmcr);
- if (test_and_clear_bit(PHY_RESET, &tp->flags)) {
+ if (bmcr & BMCR_RESET) {
int i;
for (i = 0; i < 50; i++) {
@@ -3135,15 +3203,33 @@ static void rtl_work_func_t(struct work_struct *work)
netif_carrier_ok(tp->netdev))
napi_schedule(&tp->napi);
- if (test_and_clear_bit(PHY_RESET, &tp->flags))
- rtl_phy_reset(tp);
-
mutex_unlock(&tp->control);
out1:
usb_autopm_put_interface(tp->intf);
}
+static void rtl_hw_phy_work_func_t(struct work_struct *work)
+{
+ struct r8152 *tp = container_of(work, struct r8152, hw_phy_work.work);
+
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ return;
+
+ if (usb_autopm_get_interface(tp->intf) < 0)
+ return;
+
+ mutex_lock(&tp->control);
+
+ tp->rtl_ops.hw_phy_cfg(tp);
+
+ rtl8152_set_speed(tp, tp->autoneg, tp->speed, tp->duplex);
+
+ mutex_unlock(&tp->control);
+
+ usb_autopm_put_interface(tp->intf);
+}
+
#ifdef CONFIG_PM_SLEEP
static int rtl_notifier(struct notifier_block *nb, unsigned long action,
void *data)
@@ -3180,8 +3266,6 @@ static int rtl8152_open(struct net_device *netdev)
if (res)
goto out;
- netif_carrier_off(netdev);
-
res = usb_autopm_get_interface(tp->intf);
if (res < 0) {
free_all_mem(tp);
@@ -3192,9 +3276,6 @@ static int rtl8152_open(struct net_device *netdev)
tp->rtl_ops.up(tp);
- rtl8152_set_speed(tp, AUTONEG_ENABLE,
- tp->mii.supports_gmii ? SPEED_1000 : SPEED_100,
- DUPLEX_FULL);
netif_carrier_off(netdev);
netif_start_queue(netdev);
set_bit(WORK_ENABLE, &tp->flags);
@@ -3255,103 +3336,6 @@ static int rtl8152_close(struct net_device *netdev)
return res;
}
-static inline void r8152_mmd_indirect(struct r8152 *tp, u16 dev, u16 reg)
-{
- ocp_reg_write(tp, OCP_EEE_AR, FUN_ADDR | dev);
- ocp_reg_write(tp, OCP_EEE_DATA, reg);
- ocp_reg_write(tp, OCP_EEE_AR, FUN_DATA | dev);
-}
-
-static u16 r8152_mmd_read(struct r8152 *tp, u16 dev, u16 reg)
-{
- u16 data;
-
- r8152_mmd_indirect(tp, dev, reg);
- data = ocp_reg_read(tp, OCP_EEE_DATA);
- ocp_reg_write(tp, OCP_EEE_AR, 0x0000);
-
- return data;
-}
-
-static void r8152_mmd_write(struct r8152 *tp, u16 dev, u16 reg, u16 data)
-{
- r8152_mmd_indirect(tp, dev, reg);
- ocp_reg_write(tp, OCP_EEE_DATA, data);
- ocp_reg_write(tp, OCP_EEE_AR, 0x0000);
-}
-
-static void r8152_eee_en(struct r8152 *tp, bool enable)
-{
- u16 config1, config2, config3;
- u32 ocp_data;
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
- config1 = ocp_reg_read(tp, OCP_EEE_CONFIG1) & ~sd_rise_time_mask;
- config2 = ocp_reg_read(tp, OCP_EEE_CONFIG2);
- config3 = ocp_reg_read(tp, OCP_EEE_CONFIG3) & ~fast_snr_mask;
-
- if (enable) {
- ocp_data |= EEE_RX_EN | EEE_TX_EN;
- config1 |= EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN | RX_QUIET_EN;
- config1 |= sd_rise_time(1);
- config2 |= RG_DACQUIET_EN | RG_LDVQUIET_EN;
- config3 |= fast_snr(42);
- } else {
- ocp_data &= ~(EEE_RX_EN | EEE_TX_EN);
- config1 &= ~(EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN |
- RX_QUIET_EN);
- config1 |= sd_rise_time(7);
- config2 &= ~(RG_DACQUIET_EN | RG_LDVQUIET_EN);
- config3 |= fast_snr(511);
- }
-
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data);
- ocp_reg_write(tp, OCP_EEE_CONFIG1, config1);
- ocp_reg_write(tp, OCP_EEE_CONFIG2, config2);
- ocp_reg_write(tp, OCP_EEE_CONFIG3, config3);
-}
-
-static void r8152b_enable_eee(struct r8152 *tp)
-{
- r8152_eee_en(tp, true);
- r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, MDIO_EEE_100TX);
-}
-
-static void r8153_eee_en(struct r8152 *tp, bool enable)
-{
- u32 ocp_data;
- u16 config;
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
- config = ocp_reg_read(tp, OCP_EEE_CFG);
-
- if (enable) {
- ocp_data |= EEE_RX_EN | EEE_TX_EN;
- config |= EEE10_EN;
- } else {
- ocp_data &= ~(EEE_RX_EN | EEE_TX_EN);
- config &= ~EEE10_EN;
- }
-
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data);
- ocp_reg_write(tp, OCP_EEE_CFG, config);
-}
-
-static void r8153_enable_eee(struct r8152 *tp)
-{
- r8153_eee_en(tp, true);
- ocp_reg_write(tp, OCP_EEE_ADV, MDIO_EEE_1000T | MDIO_EEE_100TX);
-}
-
-static void r8152b_enable_fc(struct r8152 *tp)
-{
- u16 anar;
-
- anar = r8152_mdio_read(tp, MII_ADVERTISE);
- anar |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
- r8152_mdio_write(tp, MII_ADVERTISE, anar);
-}
-
static void rtl_tally_reset(struct r8152 *tp)
{
u32 ocp_data;
@@ -3364,10 +3348,17 @@ static void rtl_tally_reset(struct r8152 *tp)
static void r8152b_init(struct r8152 *tp)
{
u32 ocp_data;
+ u16 data;
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return;
+ data = r8152_mdio_read(tp, MII_BMCR);
+ if (data & BMCR_PDOWN) {
+ data &= ~BMCR_PDOWN;
+ r8152_mdio_write(tp, MII_BMCR, data);
+ }
+
r8152_aldps_en(tp, false);
if (tp->version == RTL_VER_01) {
@@ -3389,9 +3380,6 @@ static void r8152b_init(struct r8152 *tp)
SPDWN_RXDV_MSK | SPDWN_LINKCHG_MSK;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_GPHY_INTR_IMR, ocp_data);
- r8152b_enable_eee(tp);
- r8152_aldps_en(tp, true);
- r8152b_enable_fc(tp);
rtl_tally_reset(tp);
/* enable rx aggregation */
@@ -3403,12 +3391,12 @@ static void r8152b_init(struct r8152 *tp)
static void r8153_init(struct r8152 *tp)
{
u32 ocp_data;
+ u16 data;
int i;
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return;
- r8153_aldps_en(tp, false);
r8153_u1u2en(tp, false);
for (i = 0; i < 500; i++) {
@@ -3425,6 +3413,23 @@ static void r8153_init(struct r8152 *tp)
msleep(20);
}
+ if (tp->version == RTL_VER_03 || tp->version == RTL_VER_04 ||
+ tp->version == RTL_VER_05)
+ ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L);
+
+ data = r8152_mdio_read(tp, MII_BMCR);
+ if (data & BMCR_PDOWN) {
+ data &= ~BMCR_PDOWN;
+ r8152_mdio_write(tp, MII_BMCR, data);
+ }
+
+ for (i = 0; i < 500; i++) {
+ ocp_data = ocp_reg_read(tp, OCP_PHY_STATUS) & PHY_STAT_MASK;
+ if (ocp_data == PHY_STAT_LAN_ON)
+ break;
+ msleep(20);
+ }
+
usb_disable_lpm(tp->udev);
r8153_u2p3en(tp, false);
@@ -3492,9 +3497,6 @@ static void r8153_init(struct r8152 *tp)
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0);
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0);
- r8153_enable_eee(tp);
- r8153_aldps_en(tp, true);
- r8152b_enable_fc(tp);
rtl_tally_reset(tp);
r8153_u2p3en(tp, true);
}
@@ -3618,6 +3620,7 @@ static int rtl8152_resume(struct usb_interface *intf)
if (!test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
tp->rtl_ops.init(tp);
+ queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0);
netif_device_attach(tp->netdev);
}
@@ -3632,10 +3635,6 @@ static int rtl8152_resume(struct usb_interface *intf)
napi_enable(&tp->napi);
} else {
tp->rtl_ops.up(tp);
- rtl8152_set_speed(tp, AUTONEG_ENABLE,
- tp->mii.supports_gmii ?
- SPEED_1000 : SPEED_100,
- DUPLEX_FULL);
netif_carrier_off(tp->netdev);
set_bit(WORK_ENABLE, &tp->flags);
}
@@ -3765,6 +3764,11 @@ static int rtl8152_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
mutex_lock(&tp->control);
ret = rtl8152_set_speed(tp, cmd->autoneg, cmd->speed, cmd->duplex);
+ if (!ret) {
+ tp->autoneg = cmd->autoneg;
+ tp->speed = cmd->speed;
+ tp->duplex = cmd->duplex;
+ }
mutex_unlock(&tp->control);
@@ -4222,6 +4226,7 @@ static int rtl_ops_init(struct r8152 *tp)
ops->eee_get = r8152_get_eee;
ops->eee_set = r8152_set_eee;
ops->in_nway = rtl8152_in_nway;
+ ops->hw_phy_cfg = r8152b_hw_phy_cfg;
ops->autosuspend_en = rtl_runtime_suspend_enable;
break;
@@ -4238,6 +4243,7 @@ static int rtl_ops_init(struct r8152 *tp)
ops->eee_get = r8153_get_eee;
ops->eee_set = r8153_set_eee;
ops->in_nway = rtl8153_in_nway;
+ ops->hw_phy_cfg = r8153_hw_phy_cfg;
ops->autosuspend_en = rtl8153_runtime_enable;
break;
@@ -4285,6 +4291,7 @@ static int rtl8152_probe(struct usb_interface *intf,
mutex_init(&tp->control);
INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t);
+ INIT_DELAYED_WORK(&tp->hw_phy_work, rtl_hw_phy_work_func_t);
netdev->netdev_ops = &rtl8152_netdev_ops;
netdev->watchdog_timeo = RTL8152_TX_TIMEOUT;
@@ -4324,9 +4331,14 @@ static int rtl8152_probe(struct usb_interface *intf,
break;
}
+ tp->autoneg = AUTONEG_ENABLE;
+ tp->speed = tp->mii.supports_gmii ? SPEED_1000 : SPEED_100;
+ tp->duplex = DUPLEX_FULL;
+
intf->needs_remote_wakeup = 1;
tp->rtl_ops.init(tp);
+ queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0);
set_ethernet_addr(tp);
usb_set_intfdata(intf, tp);
@@ -4372,6 +4384,7 @@ static void rtl8152_disconnect(struct usb_interface *intf)
netif_napi_del(&tp->napi);
unregister_netdev(tp->netdev);
+ cancel_delayed_work_sync(&tp->hw_phy_work);
tp->rtl_ops.unload(tp);
free_netdev(tp->netdev);
}
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 524a47a28..4f4f71b29 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -428,7 +428,11 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
dev_err(&intf->dev, "rndis get ethaddr, %d\n", retval);
goto halt_fail_and_release;
}
- memcpy(net->dev_addr, bp, ETH_ALEN);
+
+ if (bp[0] & 0x02)
+ eth_hw_addr_random(net);
+ else
+ ether_addr_copy(net->dev_addr, bp);
/* set a nonzero filter to enable data transfers */
memset(u.set, 0, sizeof *u.set);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 6086a0163..3bfb59209 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -42,7 +42,6 @@
#include <linux/mii.h>
#include <linux/usb.h>
#include <linux/usb/usbnet.h>
-#include <linux/usb/cdc.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/pm_runtime.h>
@@ -1972,143 +1971,6 @@ out:
return err;
}
-int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr,
- struct usb_interface *intf,
- u8 *buffer,
- int buflen)
-{
- /* duplicates are ignored */
- struct usb_cdc_union_desc *union_header = NULL;
-
- /* duplicates are not tolerated */
- struct usb_cdc_header_desc *header = NULL;
- struct usb_cdc_ether_desc *ether = NULL;
- struct usb_cdc_mdlm_detail_desc *detail = NULL;
- struct usb_cdc_mdlm_desc *desc = NULL;
-
- unsigned int elength;
- int cnt = 0;
-
- memset(hdr, 0x00, sizeof(struct usb_cdc_parsed_header));
- hdr->phonet_magic_present = false;
- while (buflen > 0) {
- elength = buffer[0];
- if (!elength) {
- dev_err(&intf->dev, "skipping garbage byte\n");
- elength = 1;
- goto next_desc;
- }
- if (buffer[1] != USB_DT_CS_INTERFACE) {
- dev_err(&intf->dev, "skipping garbage\n");
- goto next_desc;
- }
-
- switch (buffer[2]) {
- case USB_CDC_UNION_TYPE: /* we've found it */
- if (elength < sizeof(struct usb_cdc_union_desc))
- goto next_desc;
- if (union_header) {
- dev_err(&intf->dev, "More than one union descriptor, skipping ...\n");
- goto next_desc;
- }
- union_header = (struct usb_cdc_union_desc *)buffer;
- break;
- case USB_CDC_COUNTRY_TYPE:
- if (elength < sizeof(struct usb_cdc_country_functional_desc))
- goto next_desc;
- hdr->usb_cdc_country_functional_desc =
- (struct usb_cdc_country_functional_desc *)buffer;
- break;
- case USB_CDC_HEADER_TYPE:
- if (elength != sizeof(struct usb_cdc_header_desc))
- goto next_desc;
- if (header)
- return -EINVAL;
- header = (struct usb_cdc_header_desc *)buffer;
- break;
- case USB_CDC_ACM_TYPE:
- if (elength < sizeof(struct usb_cdc_acm_descriptor))
- goto next_desc;
- hdr->usb_cdc_acm_descriptor =
- (struct usb_cdc_acm_descriptor *)buffer;
- break;
- case USB_CDC_ETHERNET_TYPE:
- if (elength != sizeof(struct usb_cdc_ether_desc))
- goto next_desc;
- if (ether)
- return -EINVAL;
- ether = (struct usb_cdc_ether_desc *)buffer;
- break;
- case USB_CDC_CALL_MANAGEMENT_TYPE:
- if (elength < sizeof(struct usb_cdc_call_mgmt_descriptor))
- goto next_desc;
- hdr->usb_cdc_call_mgmt_descriptor =
- (struct usb_cdc_call_mgmt_descriptor *)buffer;
- break;
- case USB_CDC_DMM_TYPE:
- if (elength < sizeof(struct usb_cdc_dmm_desc))
- goto next_desc;
- hdr->usb_cdc_dmm_desc =
- (struct usb_cdc_dmm_desc *)buffer;
- break;
- case USB_CDC_MDLM_TYPE:
- if (elength < sizeof(struct usb_cdc_mdlm_desc *))
- goto next_desc;
- if (desc)
- return -EINVAL;
- desc = (struct usb_cdc_mdlm_desc *)buffer;
- break;
- case USB_CDC_MDLM_DETAIL_TYPE:
- if (elength < sizeof(struct usb_cdc_mdlm_detail_desc *))
- goto next_desc;
- if (detail)
- return -EINVAL;
- detail = (struct usb_cdc_mdlm_detail_desc *)buffer;
- break;
- case USB_CDC_NCM_TYPE:
- if (elength < sizeof(struct usb_cdc_ncm_desc))
- goto next_desc;
- hdr->usb_cdc_ncm_desc = (struct usb_cdc_ncm_desc *)buffer;
- break;
- case USB_CDC_MBIM_TYPE:
- if (elength < sizeof(struct usb_cdc_mbim_desc))
- goto next_desc;
-
- hdr->usb_cdc_mbim_desc = (struct usb_cdc_mbim_desc *)buffer;
- break;
- case USB_CDC_MBIM_EXTENDED_TYPE:
- if (elength < sizeof(struct usb_cdc_mbim_extended_desc))
- break;
- hdr->usb_cdc_mbim_extended_desc =
- (struct usb_cdc_mbim_extended_desc *)buffer;
- break;
- case CDC_PHONET_MAGIC_NUMBER:
- hdr->phonet_magic_present = true;
- break;
- default:
- /*
- * there are LOTS more CDC descriptors that
- * could legitimately be found here.
- */
- dev_dbg(&intf->dev, "Ignoring descriptor: type %02x, length %ud\n",
- buffer[2], elength);
- goto next_desc;
- }
- cnt++;
-next_desc:
- buflen -= elength;
- buffer += elength;
- }
- hdr->usb_cdc_union_desc = union_header;
- hdr->usb_cdc_header_desc = header;
- hdr->usb_cdc_mdlm_detail_desc = detail;
- hdr->usb_cdc_mdlm_desc = desc;
- hdr->usb_cdc_ether_desc = ether;
- return cnt;
-}
-
-EXPORT_SYMBOL(cdc_parse_cdc_header);
-
/*
* The function can't be called inside suspend/resume callback,
* otherwise deadlock will be caused.
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index e0638e556..1b5f531ee 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -144,8 +144,10 @@ struct virtnet_info {
/* Control VQ buffers: protected by the rtnl lock */
struct virtio_net_ctrl_hdr ctrl_hdr;
virtio_net_ctrl_ack ctrl_status;
+ struct virtio_net_ctrl_mq ctrl_mq;
u8 ctrl_promisc;
u8 ctrl_allmulti;
+ u16 ctrl_vid;
/* Ethtool settings */
u8 duplex;
@@ -479,53 +481,21 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
stats->rx_packets++;
u64_stats_update_end(&stats->rx_syncp);
- if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
- pr_debug("Needs csum!\n");
- if (!skb_partial_csum_set(skb,
- virtio16_to_cpu(vi->vdev, hdr->hdr.csum_start),
- virtio16_to_cpu(vi->vdev, hdr->hdr.csum_offset)))
- goto frame_err;
- } else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) {
+ if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
+ virtio_is_little_endian(vi->vdev))) {
+ net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n",
+ dev->name, hdr->hdr.gso_type,
+ hdr->hdr.gso_size);
+ goto frame_err;
}
skb->protocol = eth_type_trans(skb, dev);
pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
ntohs(skb->protocol), skb->len, skb->pkt_type);
- if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
- pr_debug("GSO!\n");
- switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
- case VIRTIO_NET_HDR_GSO_TCPV4:
- skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
- break;
- case VIRTIO_NET_HDR_GSO_UDP:
- skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
- break;
- case VIRTIO_NET_HDR_GSO_TCPV6:
- skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
- break;
- default:
- net_warn_ratelimited("%s: bad gso type %u.\n",
- dev->name, hdr->hdr.gso_type);
- goto frame_err;
- }
-
- if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
- skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
-
- skb_shinfo(skb)->gso_size = virtio16_to_cpu(vi->vdev,
- hdr->hdr.gso_size);
- if (skb_shinfo(skb)->gso_size == 0) {
- net_warn_ratelimited("%s: zero gso size.\n", dev->name);
- goto frame_err;
- }
-
- /* Header must be checked, and gso_segs computed. */
- skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
- skb_shinfo(skb)->gso_segs = 0;
- }
-
napi_gro_receive(&rq->napi, skb);
return;
@@ -868,35 +838,9 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
else
hdr = skb_vnet_hdr(skb);
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
- hdr->hdr.csum_start = cpu_to_virtio16(vi->vdev,
- skb_checksum_start_offset(skb));
- hdr->hdr.csum_offset = cpu_to_virtio16(vi->vdev,
- skb->csum_offset);
- } else {
- hdr->hdr.flags = 0;
- hdr->hdr.csum_offset = hdr->hdr.csum_start = 0;
- }
-
- if (skb_is_gso(skb)) {
- hdr->hdr.hdr_len = cpu_to_virtio16(vi->vdev, skb_headlen(skb));
- hdr->hdr.gso_size = cpu_to_virtio16(vi->vdev,
- skb_shinfo(skb)->gso_size);
- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
- hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
- else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
- hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
- else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
- hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
- else
- BUG();
- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
- hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
- } else {
- hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
- hdr->hdr.gso_size = hdr->hdr.hdr_len = 0;
- }
+ if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
+ virtio_is_little_endian(vi->vdev)))
+ BUG();
if (vi->mergeable_rx_bufs)
hdr->num_buffers = 0;
@@ -1116,14 +1060,13 @@ static void virtnet_ack_link_announce(struct virtnet_info *vi)
static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
{
struct scatterlist sg;
- struct virtio_net_ctrl_mq s;
struct net_device *dev = vi->dev;
if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
return 0;
- s.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
- sg_init_one(&sg, &s, sizeof(s));
+ vi->ctrl_mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
+ sg_init_one(&sg, &vi->ctrl_mq, sizeof(vi->ctrl_mq));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
@@ -1230,7 +1173,8 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev,
struct virtnet_info *vi = netdev_priv(dev);
struct scatterlist sg;
- sg_init_one(&sg, &vid, sizeof(vid));
+ vi->ctrl_vid = vid;
+ sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
VIRTIO_NET_CTRL_VLAN_ADD, &sg))
@@ -1244,7 +1188,8 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
struct virtnet_info *vi = netdev_priv(dev);
struct scatterlist sg;
- sg_init_one(&sg, &vid, sizeof(vid));
+ vi->ctrl_vid = vid;
+ sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
VIRTIO_NET_CTRL_VLAN_DEL, &sg))
@@ -1780,6 +1725,7 @@ static int virtnet_probe(struct virtio_device *vdev)
struct net_device *dev;
struct virtnet_info *vi;
u16 max_queue_pairs;
+ int mtu;
if (!vdev->config->get) {
dev_err(&vdev->dev, "%s failure: config access disabled\n",
@@ -1896,6 +1842,14 @@ static int virtnet_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
vi->has_cvq = true;
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
+ mtu = virtio_cread16(vdev,
+ offsetof(struct virtio_net_config,
+ mtu));
+ if (virtnet_change_mtu(dev, mtu))
+ __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
+ }
+
if (vi->any_header_sg)
dev->needed_headroom = vi->hdr_len;
@@ -2067,6 +2021,7 @@ static unsigned int features[] = {
VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
VIRTIO_NET_F_CTRL_MAC_ADDR,
VIRTIO_F_ANY_LAYOUT,
+ VIRTIO_NET_F_MTU,
};
static struct virtio_driver virtio_net_driver = {
diff --git a/drivers/net/vmxnet3/Makefile b/drivers/net/vmxnet3/Makefile
index 880f5098e..8cdbb63d1 100644
--- a/drivers/net/vmxnet3/Makefile
+++ b/drivers/net/vmxnet3/Makefile
@@ -2,7 +2,7 @@
#
# Linux driver for VMware's vmxnet3 ethernet NIC.
#
-# Copyright (C) 2007-2009, VMware, Inc. All Rights Reserved.
+# Copyright (C) 2007-2016, VMware, Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
@@ -21,7 +21,7 @@
# The full GNU General Public License is included in this distribution in
# the file called "COPYING".
#
-# Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
+# Maintained by: pv-drivers@vmware.com
#
#
################################################################################
diff --git a/drivers/net/vmxnet3/upt1_defs.h b/drivers/net/vmxnet3/upt1_defs.h
index 969c751ee..db9f1fde3 100644
--- a/drivers/net/vmxnet3/upt1_defs.h
+++ b/drivers/net/vmxnet3/upt1_defs.h
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2016, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -20,7 +20,7 @@
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
- * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
+ * Maintained by: pv-drivers@vmware.com
*
*/
diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h
index 72ba8ae7f..c3a316461 100644
--- a/drivers/net/vmxnet3/vmxnet3_defs.h
+++ b/drivers/net/vmxnet3/vmxnet3_defs.h
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2015, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2016, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -20,7 +20,7 @@
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
- * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
+ * Maintained by: pv-drivers@vmware.com
*
*/
@@ -76,7 +76,12 @@ enum {
VMXNET3_CMD_UPDATE_IML,
VMXNET3_CMD_UPDATE_PMCFG,
VMXNET3_CMD_UPDATE_FEATURE,
+ VMXNET3_CMD_RESERVED1,
VMXNET3_CMD_LOAD_PLUGIN,
+ VMXNET3_CMD_RESERVED2,
+ VMXNET3_CMD_RESERVED3,
+ VMXNET3_CMD_SET_COALESCE,
+ VMXNET3_CMD_REGISTER_MEMREGS,
VMXNET3_CMD_FIRST_GET = 0xF00D0000,
VMXNET3_CMD_GET_QUEUE_STATUS = VMXNET3_CMD_FIRST_GET,
@@ -87,7 +92,10 @@ enum {
VMXNET3_CMD_GET_DID_LO,
VMXNET3_CMD_GET_DID_HI,
VMXNET3_CMD_GET_DEV_EXTRA_INFO,
- VMXNET3_CMD_GET_CONF_INTR
+ VMXNET3_CMD_GET_CONF_INTR,
+ VMXNET3_CMD_GET_RESERVED1,
+ VMXNET3_CMD_GET_TXDATA_DESC_SIZE,
+ VMXNET3_CMD_GET_COALESCE,
};
/*
@@ -169,6 +177,8 @@ struct Vmxnet3_TxDataDesc {
u8 data[VMXNET3_HDR_COPY_SIZE];
};
+typedef u8 Vmxnet3_RxDataDesc;
+
#define VMXNET3_TCD_GEN_SHIFT 31
#define VMXNET3_TCD_GEN_SIZE 1
#define VMXNET3_TCD_TXIDX_SHIFT 0
@@ -373,6 +383,14 @@ union Vmxnet3_GenericDesc {
#define VMXNET3_RING_SIZE_ALIGN 32
#define VMXNET3_RING_SIZE_MASK (VMXNET3_RING_SIZE_ALIGN - 1)
+/* Tx Data Ring buffer size must be a multiple of 64 */
+#define VMXNET3_TXDATA_DESC_SIZE_ALIGN 64
+#define VMXNET3_TXDATA_DESC_SIZE_MASK (VMXNET3_TXDATA_DESC_SIZE_ALIGN - 1)
+
+/* Rx Data Ring buffer size must be a multiple of 64 */
+#define VMXNET3_RXDATA_DESC_SIZE_ALIGN 64
+#define VMXNET3_RXDATA_DESC_SIZE_MASK (VMXNET3_RXDATA_DESC_SIZE_ALIGN - 1)
+
/* Max ring size */
#define VMXNET3_TX_RING_MAX_SIZE 4096
#define VMXNET3_TC_RING_MAX_SIZE 4096
@@ -380,6 +398,11 @@ union Vmxnet3_GenericDesc {
#define VMXNET3_RX_RING2_MAX_SIZE 4096
#define VMXNET3_RC_RING_MAX_SIZE 8192
+#define VMXNET3_TXDATA_DESC_MIN_SIZE 128
+#define VMXNET3_TXDATA_DESC_MAX_SIZE 2048
+
+#define VMXNET3_RXDATA_DESC_MAX_SIZE 2048
+
/* a list of reasons for queue stop */
enum {
@@ -466,7 +489,9 @@ struct Vmxnet3_TxQueueConf {
__le32 compRingSize; /* # of comp desc */
__le32 ddLen; /* size of driver data */
u8 intrIdx;
- u8 _pad[7];
+ u8 _pad1[1];
+ __le16 txDataRingDescSize;
+ u8 _pad2[4];
};
@@ -474,12 +499,14 @@ struct Vmxnet3_RxQueueConf {
__le64 rxRingBasePA[2];
__le64 compRingBasePA;
__le64 ddPA; /* driver data */
- __le64 reserved;
+ __le64 rxDataRingBasePA;
__le32 rxRingSize[2]; /* # of rx desc */
__le32 compRingSize; /* # of rx comp desc */
__le32 ddLen; /* size of driver data */
u8 intrIdx;
- u8 _pad[7];
+ u8 _pad1[1];
+ __le16 rxDataRingDescSize; /* size of rx data ring buffer */
+ u8 _pad2[4];
};
@@ -609,6 +636,63 @@ struct Vmxnet3_RxQueueDesc {
u8 __pad[88]; /* 128 aligned */
};
+struct Vmxnet3_SetPolling {
+ u8 enablePolling;
+};
+
+#define VMXNET3_COAL_STATIC_MAX_DEPTH 128
+#define VMXNET3_COAL_RBC_MIN_RATE 100
+#define VMXNET3_COAL_RBC_MAX_RATE 100000
+
+enum Vmxnet3_CoalesceMode {
+ VMXNET3_COALESCE_DISABLED = 0,
+ VMXNET3_COALESCE_ADAPT = 1,
+ VMXNET3_COALESCE_STATIC = 2,
+ VMXNET3_COALESCE_RBC = 3
+};
+
+struct Vmxnet3_CoalesceRbc {
+ u32 rbc_rate;
+};
+
+struct Vmxnet3_CoalesceStatic {
+ u32 tx_depth;
+ u32 tx_comp_depth;
+ u32 rx_depth;
+};
+
+struct Vmxnet3_CoalesceScheme {
+ enum Vmxnet3_CoalesceMode coalMode;
+ union {
+ struct Vmxnet3_CoalesceRbc coalRbc;
+ struct Vmxnet3_CoalesceStatic coalStatic;
+ } coalPara;
+};
+
+struct Vmxnet3_MemoryRegion {
+ __le64 startPA;
+ __le32 length;
+ __le16 txQueueBits;
+ __le16 rxQueueBits;
+};
+
+#define MAX_MEMORY_REGION_PER_QUEUE 16
+#define MAX_MEMORY_REGION_PER_DEVICE 256
+
+struct Vmxnet3_MemRegs {
+ __le16 numRegs;
+ __le16 pad[3];
+ struct Vmxnet3_MemoryRegion memRegs[1];
+};
+
+/* If the command data <= 16 bytes, use the shared memory directly.
+ * otherwise, use variable length configuration descriptor.
+ */
+union Vmxnet3_CmdInfo {
+ struct Vmxnet3_VariableLenConfDesc varConf;
+ struct Vmxnet3_SetPolling setPolling;
+ __le64 data[2];
+};
struct Vmxnet3_DSDevRead {
/* read-only region for device, read by dev in response to a SET cmd */
@@ -627,7 +711,14 @@ struct Vmxnet3_DriverShared {
__le32 pad;
struct Vmxnet3_DSDevRead devRead;
__le32 ecr;
- __le32 reserved[5];
+ __le32 reserved;
+ union {
+ __le32 reserved1[4];
+ union Vmxnet3_CmdInfo cmdInfo; /* only valid in the context of
+ * executing the relevant
+ * command
+ */
+ } cu;
};
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 08885bc8d..4244b9d44 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2016, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -20,7 +20,7 @@
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
- * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
+ * Maintained by: pv-drivers@vmware.com
*
*/
@@ -435,8 +435,8 @@ vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
tq->tx_ring.base = NULL;
}
if (tq->data_ring.base) {
- dma_free_coherent(&adapter->pdev->dev, tq->data_ring.size *
- sizeof(struct Vmxnet3_TxDataDesc),
+ dma_free_coherent(&adapter->pdev->dev,
+ tq->data_ring.size * tq->txdata_desc_size,
tq->data_ring.base, tq->data_ring.basePA);
tq->data_ring.base = NULL;
}
@@ -478,8 +478,8 @@ vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
tq->tx_ring.gen = VMXNET3_INIT_GEN;
- memset(tq->data_ring.base, 0, tq->data_ring.size *
- sizeof(struct Vmxnet3_TxDataDesc));
+ memset(tq->data_ring.base, 0,
+ tq->data_ring.size * tq->txdata_desc_size);
/* reset the tx comp ring contents to 0 and reset comp ring states */
memset(tq->comp_ring.base, 0, tq->comp_ring.size *
@@ -514,10 +514,10 @@ vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
}
tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
- tq->data_ring.size * sizeof(struct Vmxnet3_TxDataDesc),
+ tq->data_ring.size * tq->txdata_desc_size,
&tq->data_ring.basePA, GFP_KERNEL);
if (!tq->data_ring.base) {
- netdev_err(adapter->netdev, "failed to allocate data ring\n");
+ netdev_err(adapter->netdev, "failed to allocate tx data ring\n");
goto err;
}
@@ -689,7 +689,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
if (ctx->copy_size) {
ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
tq->tx_ring.next2fill *
- sizeof(struct Vmxnet3_TxDataDesc));
+ tq->txdata_desc_size);
ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
ctx->sop_txd->dword[3] = 0;
@@ -873,8 +873,9 @@ vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
ctx->eth_ip_hdr_size = 0;
ctx->l4_hdr_size = 0;
/* copy as much as allowed */
- ctx->copy_size = min((unsigned int)VMXNET3_HDR_COPY_SIZE
- , skb_headlen(skb));
+ ctx->copy_size = min_t(unsigned int,
+ tq->txdata_desc_size,
+ skb_headlen(skb));
}
if (skb->len <= VMXNET3_HDR_COPY_SIZE)
@@ -885,7 +886,7 @@ vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
goto err;
}
- if (unlikely(ctx->copy_size > VMXNET3_HDR_COPY_SIZE)) {
+ if (unlikely(ctx->copy_size > tq->txdata_desc_size)) {
tq->stats.oversized_hdr++;
ctx->copy_size = 0;
return 0;
@@ -913,7 +914,9 @@ vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
{
struct Vmxnet3_TxDataDesc *tdd;
- tdd = tq->data_ring.base + tq->tx_ring.next2fill;
+ tdd = (struct Vmxnet3_TxDataDesc *)((u8 *)tq->data_ring.base +
+ tq->tx_ring.next2fill *
+ tq->txdata_desc_size);
memcpy(tdd->data, skb->data, ctx->copy_size);
netdev_dbg(adapter->netdev,
@@ -1283,9 +1286,10 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
*/
break;
}
- BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
+ BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 &&
+ rcd->rqID != rq->dataRingQid);
idx = rcd->rxdIdx;
- ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1;
+ ring_idx = VMXNET3_GET_RING_IDX(adapter, rcd->rqID);
ring = rq->rx_ring + ring_idx;
vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
&rxCmdDesc);
@@ -1300,8 +1304,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
}
if (rcd->sop) { /* first buf of the pkt */
+ bool rxDataRingUsed;
+ u16 len;
+
BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
- rcd->rqID != rq->qid);
+ (rcd->rqID != rq->qid &&
+ rcd->rqID != rq->dataRingQid));
BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB);
BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
@@ -1317,8 +1325,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
skip_page_frags = false;
ctx->skb = rbi->skb;
+
+ rxDataRingUsed =
+ VMXNET3_RX_DATA_RING(adapter, rcd->rqID);
+ len = rxDataRingUsed ? rcd->len : rbi->len;
new_skb = netdev_alloc_skb_ip_align(adapter->netdev,
- rbi->len);
+ len);
if (new_skb == NULL) {
/* Skb allocation failed, do not handover this
* skb to stack. Reuse it. Drop the existing pkt
@@ -1329,25 +1341,48 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
skip_page_frags = true;
goto rcd_done;
}
- new_dma_addr = dma_map_single(&adapter->pdev->dev,
- new_skb->data, rbi->len,
- PCI_DMA_FROMDEVICE);
- if (dma_mapping_error(&adapter->pdev->dev,
- new_dma_addr)) {
- dev_kfree_skb(new_skb);
- /* Skb allocation failed, do not handover this
- * skb to stack. Reuse it. Drop the existing pkt
- */
- rq->stats.rx_buf_alloc_failure++;
- ctx->skb = NULL;
- rq->stats.drop_total++;
- skip_page_frags = true;
- goto rcd_done;
- }
- dma_unmap_single(&adapter->pdev->dev, rbi->dma_addr,
- rbi->len,
- PCI_DMA_FROMDEVICE);
+ if (rxDataRingUsed) {
+ size_t sz;
+
+ BUG_ON(rcd->len > rq->data_ring.desc_size);
+
+ ctx->skb = new_skb;
+ sz = rcd->rxdIdx * rq->data_ring.desc_size;
+ memcpy(new_skb->data,
+ &rq->data_ring.base[sz], rcd->len);
+ } else {
+ ctx->skb = rbi->skb;
+
+ new_dma_addr =
+ dma_map_single(&adapter->pdev->dev,
+ new_skb->data, rbi->len,
+ PCI_DMA_FROMDEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev,
+ new_dma_addr)) {
+ dev_kfree_skb(new_skb);
+ /* Skb allocation failed, do not
+ * handover this skb to stack. Reuse
+ * it. Drop the existing pkt.
+ */
+ rq->stats.rx_buf_alloc_failure++;
+ ctx->skb = NULL;
+ rq->stats.drop_total++;
+ skip_page_frags = true;
+ goto rcd_done;
+ }
+
+ dma_unmap_single(&adapter->pdev->dev,
+ rbi->dma_addr,
+ rbi->len,
+ PCI_DMA_FROMDEVICE);
+
+ /* Immediate refill */
+ rbi->skb = new_skb;
+ rbi->dma_addr = new_dma_addr;
+ rxd->addr = cpu_to_le64(rbi->dma_addr);
+ rxd->len = rbi->len;
+ }
#ifdef VMXNET3_RSS
if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
@@ -1358,12 +1393,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
#endif
skb_put(ctx->skb, rcd->len);
- /* Immediate refill */
- rbi->skb = new_skb;
- rbi->dma_addr = new_dma_addr;
- rxd->addr = cpu_to_le64(rbi->dma_addr);
- rxd->len = rbi->len;
- if (adapter->version == 2 &&
+ if (VMXNET3_VERSION_GE_2(adapter) &&
rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) {
struct Vmxnet3_RxCompDescExt *rcdlro;
rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd;
@@ -1589,6 +1619,13 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
rq->buf_info[i] = NULL;
}
+ if (rq->data_ring.base) {
+ dma_free_coherent(&adapter->pdev->dev,
+ rq->rx_ring[0].size * rq->data_ring.desc_size,
+ rq->data_ring.base, rq->data_ring.basePA);
+ rq->data_ring.base = NULL;
+ }
+
if (rq->comp_ring.base) {
dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size
* sizeof(struct Vmxnet3_RxCompDesc),
@@ -1604,6 +1641,25 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
}
}
+void
+vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
+
+ if (rq->data_ring.base) {
+ dma_free_coherent(&adapter->pdev->dev,
+ (rq->rx_ring[0].size *
+ rq->data_ring.desc_size),
+ rq->data_ring.base,
+ rq->data_ring.basePA);
+ rq->data_ring.base = NULL;
+ rq->data_ring.desc_size = 0;
+ }
+ }
+}
static int
vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
@@ -1697,6 +1753,22 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
}
}
+ if ((adapter->rxdataring_enabled) && (rq->data_ring.desc_size != 0)) {
+ sz = rq->rx_ring[0].size * rq->data_ring.desc_size;
+ rq->data_ring.base =
+ dma_alloc_coherent(&adapter->pdev->dev, sz,
+ &rq->data_ring.basePA,
+ GFP_KERNEL);
+ if (!rq->data_ring.base) {
+ netdev_err(adapter->netdev,
+ "rx data ring will be disabled\n");
+ adapter->rxdataring_enabled = false;
+ }
+ } else {
+ rq->data_ring.base = NULL;
+ rq->data_ring.desc_size = 0;
+ }
+
sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz,
&rq->comp_ring.basePA,
@@ -1729,6 +1801,8 @@ vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
{
int i, err = 0;
+ adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
+
for (i = 0; i < adapter->num_rx_queues; i++) {
err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
if (unlikely(err)) {
@@ -1738,6 +1812,10 @@ vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
goto err_out;
}
}
+
+ if (!adapter->rxdataring_enabled)
+ vmxnet3_rq_destroy_all_rxdataring(adapter);
+
return err;
err_out:
vmxnet3_rq_destroy_all(adapter);
@@ -2045,10 +2123,9 @@ vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
rq->qid = i;
rq->qid2 = i + adapter->num_rx_queues;
+ rq->dataRingQid = i + 2 * adapter->num_rx_queues;
}
-
-
/* init our intr settings */
for (i = 0; i < intr->num_intrs; i++)
intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
@@ -2336,6 +2413,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
tqc->ddPA = cpu_to_le64(tq->buf_info_pa);
tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
+ tqc->txDataRingDescSize = cpu_to_le32(tq->txdata_desc_size);
tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
tqc->ddLen = cpu_to_le32(
sizeof(struct vmxnet3_tx_buf_info) *
@@ -2360,6 +2438,12 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
(rqc->rxRingSize[0] +
rqc->rxRingSize[1]));
rqc->intrIdx = rq->comp_ring.intr_idx;
+ if (VMXNET3_VERSION_GE_3(adapter)) {
+ rqc->rxDataRingBasePA =
+ cpu_to_le64(rq->data_ring.basePA);
+ rqc->rxDataRingDescSize =
+ cpu_to_le16(rq->data_ring.desc_size);
+ }
}
#ifdef VMXNET3_RSS
@@ -2409,6 +2493,32 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
/* the rest are already zeroed */
}
+static void
+vmxnet3_init_coalesce(struct vmxnet3_adapter *adapter)
+{
+ struct Vmxnet3_DriverShared *shared = adapter->shared;
+ union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
+ unsigned long flags;
+
+ if (!VMXNET3_VERSION_GE_3(adapter))
+ return;
+
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ cmdInfo->varConf.confVer = 1;
+ cmdInfo->varConf.confLen =
+ cpu_to_le32(sizeof(*adapter->coal_conf));
+ cmdInfo->varConf.confPA = cpu_to_le64(adapter->coal_conf_pa);
+
+ if (adapter->default_coal_mode) {
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_GET_COALESCE);
+ } else {
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_SET_COALESCE);
+ }
+
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+}
int
vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
@@ -2458,6 +2568,8 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
goto activate_err;
}
+ vmxnet3_init_coalesce(adapter);
+
for (i = 0; i < adapter->num_rx_queues; i++) {
VMXNET3_WRITE_BAR0_REG(adapter,
VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN,
@@ -2689,7 +2801,8 @@ vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
int
vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
- u32 rx_ring_size, u32 rx_ring2_size)
+ u32 rx_ring_size, u32 rx_ring2_size,
+ u16 txdata_desc_size, u16 rxdata_desc_size)
{
int err = 0, i;
@@ -2698,6 +2811,7 @@ vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
tq->tx_ring.size = tx_ring_size;
tq->data_ring.size = tx_ring_size;
tq->comp_ring.size = tx_ring_size;
+ tq->txdata_desc_size = txdata_desc_size;
tq->shared = &adapter->tqd_start[i].ctrl;
tq->stopped = true;
tq->adapter = adapter;
@@ -2714,12 +2828,15 @@ vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
vmxnet3_adjust_rx_ring_size(adapter);
+
+ adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
for (i = 0; i < adapter->num_rx_queues; i++) {
struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
/* qid and qid2 for rx queues will be assigned later when num
* of rx queues is finalized after allocating intrs */
rq->shared = &adapter->rqd_start[i].ctrl;
rq->adapter = adapter;
+ rq->data_ring.desc_size = rxdata_desc_size;
err = vmxnet3_rq_create(rq, adapter);
if (err) {
if (i == 0) {
@@ -2737,6 +2854,10 @@ vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
}
}
}
+
+ if (!adapter->rxdataring_enabled)
+ vmxnet3_rq_destroy_all_rxdataring(adapter);
+
return err;
queue_err:
vmxnet3_tq_destroy_all(adapter);
@@ -2754,9 +2875,35 @@ vmxnet3_open(struct net_device *netdev)
for (i = 0; i < adapter->num_tx_queues; i++)
spin_lock_init(&adapter->tx_queue[i].tx_lock);
- err = vmxnet3_create_queues(adapter, adapter->tx_ring_size,
+ if (VMXNET3_VERSION_GE_3(adapter)) {
+ unsigned long flags;
+ u16 txdata_desc_size;
+
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
+ txdata_desc_size = VMXNET3_READ_BAR1_REG(adapter,
+ VMXNET3_REG_CMD);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+
+ if ((txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE) ||
+ (txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE) ||
+ (txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK)) {
+ adapter->txdata_desc_size =
+ sizeof(struct Vmxnet3_TxDataDesc);
+ } else {
+ adapter->txdata_desc_size = txdata_desc_size;
+ }
+ } else {
+ adapter->txdata_desc_size = sizeof(struct Vmxnet3_TxDataDesc);
+ }
+
+ err = vmxnet3_create_queues(adapter,
+ adapter->tx_ring_size,
adapter->rx_ring_size,
- adapter->rx_ring2_size);
+ adapter->rx_ring2_size,
+ adapter->txdata_desc_size,
+ adapter->rxdata_desc_size);
if (err)
goto queue_err;
@@ -3200,12 +3347,21 @@ vmxnet3_probe_device(struct pci_dev *pdev,
goto err_alloc_pci;
ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
- if (ver & 2) {
- VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 2);
- adapter->version = 2;
- } else if (ver & 1) {
- VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1);
- adapter->version = 1;
+ if (ver & (1 << VMXNET3_REV_3)) {
+ VMXNET3_WRITE_BAR1_REG(adapter,
+ VMXNET3_REG_VRRS,
+ 1 << VMXNET3_REV_3);
+ adapter->version = VMXNET3_REV_3 + 1;
+ } else if (ver & (1 << VMXNET3_REV_2)) {
+ VMXNET3_WRITE_BAR1_REG(adapter,
+ VMXNET3_REG_VRRS,
+ 1 << VMXNET3_REV_2);
+ adapter->version = VMXNET3_REV_2 + 1;
+ } else if (ver & (1 << VMXNET3_REV_1)) {
+ VMXNET3_WRITE_BAR1_REG(adapter,
+ VMXNET3_REG_VRRS,
+ 1 << VMXNET3_REV_1);
+ adapter->version = VMXNET3_REV_1 + 1;
} else {
dev_err(&pdev->dev,
"Incompatible h/w version (0x%x) for adapter\n", ver);
@@ -3224,9 +3380,28 @@ vmxnet3_probe_device(struct pci_dev *pdev,
goto err_ver;
}
+ if (VMXNET3_VERSION_GE_3(adapter)) {
+ adapter->coal_conf =
+ dma_alloc_coherent(&adapter->pdev->dev,
+ sizeof(struct Vmxnet3_CoalesceScheme)
+ ,
+ &adapter->coal_conf_pa,
+ GFP_KERNEL);
+ if (!adapter->coal_conf) {
+ err = -ENOMEM;
+ goto err_ver;
+ }
+ memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf));
+ adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED;
+ adapter->default_coal_mode = true;
+ }
+
SET_NETDEV_DEV(netdev, &pdev->dev);
vmxnet3_declare_features(adapter, dma64);
+ adapter->rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ?
+ VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
+
if (adapter->num_tx_queues == adapter->num_rx_queues)
adapter->share_intr = VMXNET3_INTR_BUDDYSHARE;
else
@@ -3283,6 +3458,11 @@ vmxnet3_probe_device(struct pci_dev *pdev,
return 0;
err_register:
+ if (VMXNET3_VERSION_GE_3(adapter)) {
+ dma_free_coherent(&adapter->pdev->dev,
+ sizeof(struct Vmxnet3_CoalesceScheme),
+ adapter->coal_conf, adapter->coal_conf_pa);
+ }
vmxnet3_free_intr_resources(adapter);
err_ver:
vmxnet3_free_pci_resources(adapter);
@@ -3333,6 +3513,11 @@ vmxnet3_remove_device(struct pci_dev *pdev)
vmxnet3_free_intr_resources(adapter);
vmxnet3_free_pci_resources(adapter);
+ if (VMXNET3_VERSION_GE_3(adapter)) {
+ dma_free_coherent(&adapter->pdev->dev,
+ sizeof(struct Vmxnet3_CoalesceScheme),
+ adapter->coal_conf, adapter->coal_conf_pa);
+ }
#ifdef VMXNET3_RSS
dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
adapter->rss_conf, adapter->rss_conf_pa);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 9ba11d737..aabc6ef36 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2016, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -20,7 +20,7 @@
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
- * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
+ * Maintained by: pv-drivers@vmware.com
*
*/
@@ -396,8 +396,7 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
buf[j++] = VMXNET3_GET_ADDR_LO(tq->data_ring.basePA);
buf[j++] = VMXNET3_GET_ADDR_HI(tq->data_ring.basePA);
buf[j++] = tq->data_ring.size;
- /* transmit data ring buffer size */
- buf[j++] = VMXNET3_HDR_COPY_SIZE;
+ buf[j++] = tq->txdata_desc_size;
buf[j++] = VMXNET3_GET_ADDR_LO(tq->comp_ring.basePA);
buf[j++] = VMXNET3_GET_ADDR_HI(tq->comp_ring.basePA);
@@ -431,11 +430,10 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
buf[j++] = rq->rx_ring[1].next2comp;
buf[j++] = rq->rx_ring[1].gen;
- /* receive data ring */
- buf[j++] = 0;
- buf[j++] = 0;
- buf[j++] = 0;
- buf[j++] = 0;
+ buf[j++] = VMXNET3_GET_ADDR_LO(rq->data_ring.basePA);
+ buf[j++] = VMXNET3_GET_ADDR_HI(rq->data_ring.basePA);
+ buf[j++] = rq->rx_ring[0].size;
+ buf[j++] = rq->data_ring.desc_size;
buf[j++] = VMXNET3_GET_ADDR_LO(rq->comp_ring.basePA);
buf[j++] = VMXNET3_GET_ADDR_HI(rq->comp_ring.basePA);
@@ -504,12 +502,14 @@ vmxnet3_get_ringparam(struct net_device *netdev,
param->rx_max_pending = VMXNET3_RX_RING_MAX_SIZE;
param->tx_max_pending = VMXNET3_TX_RING_MAX_SIZE;
- param->rx_mini_max_pending = 0;
+ param->rx_mini_max_pending = VMXNET3_VERSION_GE_3(adapter) ?
+ VMXNET3_RXDATA_DESC_MAX_SIZE : 0;
param->rx_jumbo_max_pending = VMXNET3_RX_RING2_MAX_SIZE;
param->rx_pending = adapter->rx_ring_size;
param->tx_pending = adapter->tx_ring_size;
- param->rx_mini_pending = 0;
+ param->rx_mini_pending = VMXNET3_VERSION_GE_3(adapter) ?
+ adapter->rxdata_desc_size : 0;
param->rx_jumbo_pending = adapter->rx_ring2_size;
}
@@ -520,6 +520,7 @@ vmxnet3_set_ringparam(struct net_device *netdev,
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
u32 new_tx_ring_size, new_rx_ring_size, new_rx_ring2_size;
+ u16 new_rxdata_desc_size;
u32 sz;
int err = 0;
@@ -542,6 +543,15 @@ vmxnet3_set_ringparam(struct net_device *netdev,
return -EOPNOTSUPP;
}
+ if (VMXNET3_VERSION_GE_3(adapter)) {
+ if (param->rx_mini_pending < 0 ||
+ param->rx_mini_pending > VMXNET3_RXDATA_DESC_MAX_SIZE) {
+ return -EINVAL;
+ }
+ } else if (param->rx_mini_pending != 0) {
+ return -EINVAL;
+ }
+
/* round it up to a multiple of VMXNET3_RING_SIZE_ALIGN */
new_tx_ring_size = (param->tx_pending + VMXNET3_RING_SIZE_MASK) &
~VMXNET3_RING_SIZE_MASK;
@@ -568,9 +578,19 @@ vmxnet3_set_ringparam(struct net_device *netdev,
new_rx_ring2_size = min_t(u32, new_rx_ring2_size,
VMXNET3_RX_RING2_MAX_SIZE);
+ /* rx data ring buffer size has to be a multiple of
+ * VMXNET3_RXDATA_DESC_SIZE_ALIGN
+ */
+ new_rxdata_desc_size =
+ (param->rx_mini_pending + VMXNET3_RXDATA_DESC_SIZE_MASK) &
+ ~VMXNET3_RXDATA_DESC_SIZE_MASK;
+ new_rxdata_desc_size = min_t(u16, new_rxdata_desc_size,
+ VMXNET3_RXDATA_DESC_MAX_SIZE);
+
if (new_tx_ring_size == adapter->tx_ring_size &&
new_rx_ring_size == adapter->rx_ring_size &&
- new_rx_ring2_size == adapter->rx_ring2_size) {
+ new_rx_ring2_size == adapter->rx_ring2_size &&
+ new_rxdata_desc_size == adapter->rxdata_desc_size) {
return 0;
}
@@ -591,8 +611,9 @@ vmxnet3_set_ringparam(struct net_device *netdev,
vmxnet3_rq_destroy_all(adapter);
err = vmxnet3_create_queues(adapter, new_tx_ring_size,
- new_rx_ring_size, new_rx_ring2_size);
-
+ new_rx_ring_size, new_rx_ring2_size,
+ adapter->txdata_desc_size,
+ new_rxdata_desc_size);
if (err) {
/* failed, most likely because of OOM, try default
* size */
@@ -601,10 +622,15 @@ vmxnet3_set_ringparam(struct net_device *netdev,
new_rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
new_rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
new_tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
+ new_rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ?
+ VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
+
err = vmxnet3_create_queues(adapter,
new_tx_ring_size,
new_rx_ring_size,
- new_rx_ring2_size);
+ new_rx_ring2_size,
+ adapter->txdata_desc_size,
+ new_rxdata_desc_size);
if (err) {
netdev_err(netdev, "failed to create queues "
"with default sizes. Closing it\n");
@@ -620,6 +646,7 @@ vmxnet3_set_ringparam(struct net_device *netdev,
adapter->tx_ring_size = new_tx_ring_size;
adapter->rx_ring_size = new_rx_ring_size;
adapter->rx_ring2_size = new_rx_ring2_size;
+ adapter->rxdata_desc_size = new_rxdata_desc_size;
out:
clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
@@ -698,6 +725,162 @@ vmxnet3_set_rss(struct net_device *netdev, const u32 *p, const u8 *key,
}
#endif
+static int
+vmxnet3_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+
+ if (!VMXNET3_VERSION_GE_3(adapter))
+ return -EOPNOTSUPP;
+
+ switch (adapter->coal_conf->coalMode) {
+ case VMXNET3_COALESCE_DISABLED:
+ /* struct ethtool_coalesce is already initialized to 0 */
+ break;
+ case VMXNET3_COALESCE_ADAPT:
+ ec->use_adaptive_rx_coalesce = true;
+ break;
+ case VMXNET3_COALESCE_STATIC:
+ ec->tx_max_coalesced_frames =
+ adapter->coal_conf->coalPara.coalStatic.tx_comp_depth;
+ ec->rx_max_coalesced_frames =
+ adapter->coal_conf->coalPara.coalStatic.rx_depth;
+ break;
+ case VMXNET3_COALESCE_RBC: {
+ u32 rbc_rate;
+
+ rbc_rate = adapter->coal_conf->coalPara.coalRbc.rbc_rate;
+ ec->rx_coalesce_usecs = VMXNET3_COAL_RBC_USECS(rbc_rate);
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+vmxnet3_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ struct Vmxnet3_DriverShared *shared = adapter->shared;
+ union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
+ unsigned long flags;
+
+ if (!VMXNET3_VERSION_GE_3(adapter))
+ return -EOPNOTSUPP;
+
+ if (ec->rx_coalesce_usecs_irq ||
+ ec->rx_max_coalesced_frames_irq ||
+ ec->tx_coalesce_usecs ||
+ ec->tx_coalesce_usecs_irq ||
+ ec->tx_max_coalesced_frames_irq ||
+ ec->stats_block_coalesce_usecs ||
+ ec->use_adaptive_tx_coalesce ||
+ ec->pkt_rate_low ||
+ ec->rx_coalesce_usecs_low ||
+ ec->rx_max_coalesced_frames_low ||
+ ec->tx_coalesce_usecs_low ||
+ ec->tx_max_coalesced_frames_low ||
+ ec->pkt_rate_high ||
+ ec->rx_coalesce_usecs_high ||
+ ec->rx_max_coalesced_frames_high ||
+ ec->tx_coalesce_usecs_high ||
+ ec->tx_max_coalesced_frames_high ||
+ ec->rate_sample_interval) {
+ return -EINVAL;
+ }
+
+ if ((ec->rx_coalesce_usecs == 0) &&
+ (ec->use_adaptive_rx_coalesce == 0) &&
+ (ec->tx_max_coalesced_frames == 0) &&
+ (ec->rx_max_coalesced_frames == 0)) {
+ memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf));
+ adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED;
+ goto done;
+ }
+
+ if (ec->rx_coalesce_usecs != 0) {
+ u32 rbc_rate;
+
+ if ((ec->use_adaptive_rx_coalesce != 0) ||
+ (ec->tx_max_coalesced_frames != 0) ||
+ (ec->rx_max_coalesced_frames != 0)) {
+ return -EINVAL;
+ }
+
+ rbc_rate = VMXNET3_COAL_RBC_RATE(ec->rx_coalesce_usecs);
+ if (rbc_rate < VMXNET3_COAL_RBC_MIN_RATE ||
+ rbc_rate > VMXNET3_COAL_RBC_MAX_RATE) {
+ return -EINVAL;
+ }
+
+ memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf));
+ adapter->coal_conf->coalMode = VMXNET3_COALESCE_RBC;
+ adapter->coal_conf->coalPara.coalRbc.rbc_rate = rbc_rate;
+ goto done;
+ }
+
+ if (ec->use_adaptive_rx_coalesce != 0) {
+ if ((ec->rx_coalesce_usecs != 0) ||
+ (ec->tx_max_coalesced_frames != 0) ||
+ (ec->rx_max_coalesced_frames != 0)) {
+ return -EINVAL;
+ }
+ memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf));
+ adapter->coal_conf->coalMode = VMXNET3_COALESCE_ADAPT;
+ goto done;
+ }
+
+ if ((ec->tx_max_coalesced_frames != 0) ||
+ (ec->rx_max_coalesced_frames != 0)) {
+ if ((ec->rx_coalesce_usecs != 0) ||
+ (ec->use_adaptive_rx_coalesce != 0)) {
+ return -EINVAL;
+ }
+
+ if ((ec->tx_max_coalesced_frames >
+ VMXNET3_COAL_STATIC_MAX_DEPTH) ||
+ (ec->rx_max_coalesced_frames >
+ VMXNET3_COAL_STATIC_MAX_DEPTH)) {
+ return -EINVAL;
+ }
+
+ memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf));
+ adapter->coal_conf->coalMode = VMXNET3_COALESCE_STATIC;
+
+ adapter->coal_conf->coalPara.coalStatic.tx_comp_depth =
+ (ec->tx_max_coalesced_frames ?
+ ec->tx_max_coalesced_frames :
+ VMXNET3_COAL_STATIC_DEFAULT_DEPTH);
+
+ adapter->coal_conf->coalPara.coalStatic.rx_depth =
+ (ec->rx_max_coalesced_frames ?
+ ec->rx_max_coalesced_frames :
+ VMXNET3_COAL_STATIC_DEFAULT_DEPTH);
+
+ adapter->coal_conf->coalPara.coalStatic.tx_depth =
+ VMXNET3_COAL_STATIC_DEFAULT_DEPTH;
+ goto done;
+ }
+
+done:
+ adapter->default_coal_mode = false;
+ if (netif_running(netdev)) {
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ cmdInfo->varConf.confVer = 1;
+ cmdInfo->varConf.confLen =
+ cpu_to_le32(sizeof(*adapter->coal_conf));
+ cmdInfo->varConf.confPA = cpu_to_le64(adapter->coal_conf_pa);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_SET_COALESCE);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+ }
+
+ return 0;
+}
+
static const struct ethtool_ops vmxnet3_ethtool_ops = {
.get_settings = vmxnet3_get_settings,
.get_drvinfo = vmxnet3_get_drvinfo,
@@ -706,6 +889,8 @@ static const struct ethtool_ops vmxnet3_ethtool_ops = {
.get_wol = vmxnet3_get_wol,
.set_wol = vmxnet3_set_wol,
.get_link = ethtool_op_get_link,
+ .get_coalesce = vmxnet3_get_coalesce,
+ .set_coalesce = vmxnet3_set_coalesce,
.get_strings = vmxnet3_get_strings,
.get_sset_count = vmxnet3_get_sset_count,
.get_ethtool_stats = vmxnet3_get_ethtool_stats,
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 3d2b64e63..7dc37a090 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2016, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -20,7 +20,7 @@
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
- * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
+ * Maintained by: pv-drivers@vmware.com
*
*/
@@ -69,16 +69,20 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.4.8.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.4.a.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM 0x01040800
+#define VMXNET3_DRIVER_VERSION_NUM 0x01040a00
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
#define VMXNET3_RSS
#endif
+#define VMXNET3_REV_3 2 /* Vmxnet3 Rev. 3 */
+#define VMXNET3_REV_2 1 /* Vmxnet3 Rev. 2 */
+#define VMXNET3_REV_1 0 /* Vmxnet3 Rev. 1 */
+
/*
* Capabilities
*/
@@ -237,6 +241,7 @@ struct vmxnet3_tx_queue {
int num_stop; /* # of times the queue is
* stopped */
int qid;
+ u16 txdata_desc_size;
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
enum vmxnet3_rx_buf_type {
@@ -267,15 +272,23 @@ struct vmxnet3_rq_driver_stats {
u64 rx_buf_alloc_failure;
};
+struct vmxnet3_rx_data_ring {
+ Vmxnet3_RxDataDesc *base;
+ dma_addr_t basePA;
+ u16 desc_size;
+};
+
struct vmxnet3_rx_queue {
char name[IFNAMSIZ + 8]; /* To identify interrupt */
struct vmxnet3_adapter *adapter;
struct napi_struct napi;
struct vmxnet3_cmd_ring rx_ring[2];
+ struct vmxnet3_rx_data_ring data_ring;
struct vmxnet3_comp_ring comp_ring;
struct vmxnet3_rx_ctx rx_ctx;
u32 qid; /* rqID in RCD for buffer from 1st ring */
u32 qid2; /* rqID in RCD for buffer from 2nd ring */
+ u32 dataRingQid; /* rqID in RCD for buffer from data ring */
struct vmxnet3_rx_buf_info *buf_info[2];
dma_addr_t buf_info_pa;
struct Vmxnet3_RxQueueCtrl *shared;
@@ -345,6 +358,7 @@ struct vmxnet3_adapter {
int rx_buf_per_pkt; /* only apply to the 1st ring */
dma_addr_t shared_pa;
dma_addr_t queue_desc_pa;
+ dma_addr_t coal_conf_pa;
/* Wake-on-LAN */
u32 wol;
@@ -359,12 +373,21 @@ struct vmxnet3_adapter {
u32 rx_ring_size;
u32 rx_ring2_size;
+ /* Size of buffer in the data ring */
+ u16 txdata_desc_size;
+ u16 rxdata_desc_size;
+
+ bool rxdataring_enabled;
+
struct work_struct work;
unsigned long state; /* VMXNET3_STATE_BIT_xxx */
int share_intr;
+ struct Vmxnet3_CoalesceScheme *coal_conf;
+ bool default_coal_mode;
+
dma_addr_t adapter_pa;
dma_addr_t pm_conf_pa;
dma_addr_t rss_conf_pa;
@@ -387,14 +410,34 @@ struct vmxnet3_adapter {
#define VMXNET3_GET_ADDR_LO(dma) ((u32)(dma))
#define VMXNET3_GET_ADDR_HI(dma) ((u32)(((u64)(dma)) >> 32))
+#define VMXNET3_VERSION_GE_2(adapter) \
+ (adapter->version >= VMXNET3_REV_2 + 1)
+#define VMXNET3_VERSION_GE_3(adapter) \
+ (adapter->version >= VMXNET3_REV_3 + 1)
+
/* must be a multiple of VMXNET3_RING_SIZE_ALIGN */
#define VMXNET3_DEF_TX_RING_SIZE 512
#define VMXNET3_DEF_RX_RING_SIZE 256
#define VMXNET3_DEF_RX_RING2_SIZE 128
+#define VMXNET3_DEF_RXDATA_DESC_SIZE 128
+
#define VMXNET3_MAX_ETH_HDR_SIZE 22
#define VMXNET3_MAX_SKB_BUF_SIZE (3*1024)
+#define VMXNET3_GET_RING_IDX(adapter, rqID) \
+ ((rqID >= adapter->num_rx_queues && \
+ rqID < 2 * adapter->num_rx_queues) ? 1 : 0) \
+
+#define VMXNET3_RX_DATA_RING(adapter, rqID) \
+ (rqID >= 2 * adapter->num_rx_queues && \
+ rqID < 3 * adapter->num_rx_queues) \
+
+#define VMXNET3_COAL_STATIC_DEFAULT_DEPTH 64
+
+#define VMXNET3_COAL_RBC_RATE(usecs) (1000000 / usecs)
+#define VMXNET3_COAL_RBC_USECS(rbc_rate) (1000000 / rbc_rate)
+
int
vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter);
@@ -418,7 +461,8 @@ vmxnet3_set_features(struct net_device *netdev, netdev_features_t features);
int
vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
- u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size);
+ u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size,
+ u16 txdata_desc_size, u16 rxdata_desc_size);
void vmxnet3_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 8bd8c7e1e..1ce742032 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -35,6 +35,7 @@
#include <net/route.h>
#include <net/addrconf.h>
#include <net/l3mdev.h>
+#include <net/fib_rules.h>
#define RT_FL_TOS(oldflp4) \
((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
@@ -42,9 +43,14 @@
#define DRV_NAME "vrf"
#define DRV_VERSION "1.0"
+#define FIB_RULE_PREF 1000 /* default preference for FIB rules */
+static bool add_fib_rules = true;
+
struct net_vrf {
struct rtable __rcu *rth;
+ struct rtable __rcu *rth_local;
struct rt6_info __rcu *rt6;
+ struct rt6_info __rcu *rt6_local;
u32 tb_id;
};
@@ -54,9 +60,20 @@ struct pcpu_dstats {
u64 tx_drps;
u64 rx_pkts;
u64 rx_bytes;
+ u64 rx_drps;
struct u64_stats_sync syncp;
};
+static void vrf_rx_stats(struct net_device *dev, int len)
+{
+ struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
+
+ u64_stats_update_begin(&dstats->syncp);
+ dstats->rx_pkts++;
+ dstats->rx_bytes += len;
+ u64_stats_update_end(&dstats->syncp);
+}
+
static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb)
{
vrf_dev->stats.tx_errors++;
@@ -91,6 +108,34 @@ static struct rtnl_link_stats64 *vrf_get_stats64(struct net_device *dev,
return stats;
}
+/* Local traffic destined to local address. Reinsert the packet to rx
+ * path, similar to loopback handling.
+ */
+static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev,
+ struct dst_entry *dst)
+{
+ int len = skb->len;
+
+ skb_orphan(skb);
+
+ skb_dst_set(skb, dst);
+ skb_dst_force(skb);
+
+ /* set pkt_type to avoid skb hitting packet taps twice -
+ * once on Tx and again in Rx processing
+ */
+ skb->pkt_type = PACKET_LOOPBACK;
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+ if (likely(netif_rx(skb) == NET_RX_SUCCESS))
+ vrf_rx_stats(dev, len);
+ else
+ this_cpu_inc(dev->dstats->rx_drps);
+
+ return NETDEV_TX_OK;
+}
+
#if IS_ENABLED(CONFIG_IPV6)
static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
struct net_device *dev)
@@ -117,8 +162,51 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
goto err;
skb_dst_drop(skb);
+
+ /* if dst.dev is loopback or the VRF device again this is locally
+ * originated traffic destined to a local address. Short circuit
+ * to Rx path using our local dst
+ */
+ if (dst->dev == net->loopback_dev || dst->dev == dev) {
+ struct net_vrf *vrf = netdev_priv(dev);
+ struct rt6_info *rt6_local;
+
+ /* release looked up dst and use cached local dst */
+ dst_release(dst);
+
+ rcu_read_lock();
+
+ rt6_local = rcu_dereference(vrf->rt6_local);
+ if (unlikely(!rt6_local)) {
+ rcu_read_unlock();
+ goto err;
+ }
+
+ /* Ordering issue: cached local dst is created on newlink
+ * before the IPv6 initialization. Using the local dst
+ * requires rt6i_idev to be set so make sure it is.
+ */
+ if (unlikely(!rt6_local->rt6i_idev)) {
+ rt6_local->rt6i_idev = in6_dev_get(dev);
+ if (!rt6_local->rt6i_idev) {
+ rcu_read_unlock();
+ goto err;
+ }
+ }
+
+ dst = &rt6_local->dst;
+ dst_hold(dst);
+
+ rcu_read_unlock();
+
+ return vrf_local_xmit(skb, dev, &rt6_local->dst);
+ }
+
skb_dst_set(skb, dst);
+ /* strip the ethernet header added for pass through VRF device */
+ __skb_pull(skb, skb_network_offset(skb));
+
ret = ip6_local_out(net, skb->sk, skb);
if (unlikely(net_xmit_eval(ret)))
dev->stats.tx_errors++;
@@ -139,29 +227,6 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
}
#endif
-static int vrf_send_v4_prep(struct sk_buff *skb, struct flowi4 *fl4,
- struct net_device *vrf_dev)
-{
- struct rtable *rt;
- int err = 1;
-
- rt = ip_route_output_flow(dev_net(vrf_dev), fl4, NULL);
- if (IS_ERR(rt))
- goto out;
-
- /* TO-DO: what about broadcast ? */
- if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
- ip_rt_put(rt);
- goto out;
- }
-
- skb_dst_drop(skb);
- skb_dst_set(skb, &rt->dst);
- err = 0;
-out:
- return err;
-}
-
static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
struct net_device *vrf_dev)
{
@@ -176,9 +241,51 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
FLOWI_FLAG_SKIP_NH_OIF,
.daddr = ip4h->daddr,
};
+ struct net *net = dev_net(vrf_dev);
+ struct rtable *rt;
- if (vrf_send_v4_prep(skb, &fl4, vrf_dev))
+ rt = ip_route_output_flow(net, &fl4, NULL);
+ if (IS_ERR(rt))
+ goto err;
+
+ if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
+ ip_rt_put(rt);
goto err;
+ }
+
+ skb_dst_drop(skb);
+
+ /* if dst.dev is loopback or the VRF device again this is locally
+ * originated traffic destined to a local address. Short circuit
+ * to Rx path using our local dst
+ */
+ if (rt->dst.dev == net->loopback_dev || rt->dst.dev == vrf_dev) {
+ struct net_vrf *vrf = netdev_priv(vrf_dev);
+ struct rtable *rth_local;
+ struct dst_entry *dst = NULL;
+
+ ip_rt_put(rt);
+
+ rcu_read_lock();
+
+ rth_local = rcu_dereference(vrf->rth_local);
+ if (likely(rth_local)) {
+ dst = &rth_local->dst;
+ dst_hold(dst);
+ }
+
+ rcu_read_unlock();
+
+ if (unlikely(!dst))
+ goto err;
+
+ return vrf_local_xmit(skb, vrf_dev, dst);
+ }
+
+ skb_dst_set(skb, &rt->dst);
+
+ /* strip the ethernet header added for pass through VRF device */
+ __skb_pull(skb, skb_network_offset(skb));
if (!ip4h->saddr) {
ip4h->saddr = inet_select_addr(skb_dst(skb)->dev, 0,
@@ -200,9 +307,6 @@ err:
static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
{
- /* strip the ethernet header added for pass through VRF device */
- __skb_pull(skb, skb_network_offset(skb));
-
switch (skb->protocol) {
case htons(ETH_P_IP):
return vrf_process_v4_outbound(skb, dev);
@@ -274,45 +378,92 @@ static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb)
}
/* holding rtnl */
-static void vrf_rt6_release(struct net_vrf *vrf)
+static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
{
struct rt6_info *rt6 = rtnl_dereference(vrf->rt6);
+ struct rt6_info *rt6_local = rtnl_dereference(vrf->rt6_local);
+ struct net *net = dev_net(dev);
+ struct dst_entry *dst;
- rcu_assign_pointer(vrf->rt6, NULL);
+ RCU_INIT_POINTER(vrf->rt6, NULL);
+ RCU_INIT_POINTER(vrf->rt6_local, NULL);
+ synchronize_rcu();
+
+ /* move dev in dst's to loopback so this VRF device can be deleted
+ * - based on dst_ifdown
+ */
+ if (rt6) {
+ dst = &rt6->dst;
+ dev_put(dst->dev);
+ dst->dev = net->loopback_dev;
+ dev_hold(dst->dev);
+ dst_release(dst);
+ }
- if (rt6)
- dst_release(&rt6->dst);
+ if (rt6_local) {
+ if (rt6_local->rt6i_idev)
+ in6_dev_put(rt6_local->rt6i_idev);
+
+ dst = &rt6_local->dst;
+ dev_put(dst->dev);
+ dst->dev = net->loopback_dev;
+ dev_hold(dst->dev);
+ dst_release(dst);
+ }
}
static int vrf_rt6_create(struct net_device *dev)
{
+ int flags = DST_HOST | DST_NOPOLICY | DST_NOXFRM | DST_NOCACHE;
struct net_vrf *vrf = netdev_priv(dev);
struct net *net = dev_net(dev);
struct fib6_table *rt6i_table;
- struct rt6_info *rt6;
+ struct rt6_info *rt6, *rt6_local;
int rc = -ENOMEM;
+ /* IPv6 can be CONFIG enabled and then disabled runtime */
+ if (!ipv6_mod_enabled())
+ return 0;
+
rt6i_table = fib6_new_table(net, vrf->tb_id);
if (!rt6i_table)
goto out;
- rt6 = ip6_dst_alloc(net, dev,
- DST_HOST | DST_NOPOLICY | DST_NOXFRM | DST_NOCACHE);
+ /* create a dst for routing packets out a VRF device */
+ rt6 = ip6_dst_alloc(net, dev, flags);
if (!rt6)
goto out;
dst_hold(&rt6->dst);
rt6->rt6i_table = rt6i_table;
- rt6->dst.output = vrf_output6;
+ rt6->dst.output = vrf_output6;
+
+ /* create a dst for local routing - packets sent locally
+ * to local address via the VRF device as a loopback
+ */
+ rt6_local = ip6_dst_alloc(net, dev, flags);
+ if (!rt6_local) {
+ dst_release(&rt6->dst);
+ goto out;
+ }
+
+ dst_hold(&rt6_local->dst);
+
+ rt6_local->rt6i_idev = in6_dev_get(dev);
+ rt6_local->rt6i_flags = RTF_UP | RTF_NONEXTHOP | RTF_LOCAL;
+ rt6_local->rt6i_table = rt6i_table;
+ rt6_local->dst.input = ip6_input;
+
rcu_assign_pointer(vrf->rt6, rt6);
+ rcu_assign_pointer(vrf->rt6_local, rt6_local);
rc = 0;
out:
return rc;
}
#else
-static void vrf_rt6_release(struct net_vrf *vrf)
+static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
{
}
@@ -381,32 +532,66 @@ static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
}
/* holding rtnl */
-static void vrf_rtable_release(struct net_vrf *vrf)
+static void vrf_rtable_release(struct net_device *dev, struct net_vrf *vrf)
{
struct rtable *rth = rtnl_dereference(vrf->rth);
+ struct rtable *rth_local = rtnl_dereference(vrf->rth_local);
+ struct net *net = dev_net(dev);
+ struct dst_entry *dst;
- rcu_assign_pointer(vrf->rth, NULL);
+ RCU_INIT_POINTER(vrf->rth, NULL);
+ RCU_INIT_POINTER(vrf->rth_local, NULL);
+ synchronize_rcu();
+
+ /* move dev in dst's to loopback so this VRF device can be deleted
+ * - based on dst_ifdown
+ */
+ if (rth) {
+ dst = &rth->dst;
+ dev_put(dst->dev);
+ dst->dev = net->loopback_dev;
+ dev_hold(dst->dev);
+ dst_release(dst);
+ }
- if (rth)
- dst_release(&rth->dst);
+ if (rth_local) {
+ dst = &rth_local->dst;
+ dev_put(dst->dev);
+ dst->dev = net->loopback_dev;
+ dev_hold(dst->dev);
+ dst_release(dst);
+ }
}
static int vrf_rtable_create(struct net_device *dev)
{
struct net_vrf *vrf = netdev_priv(dev);
- struct rtable *rth;
+ struct rtable *rth, *rth_local;
if (!fib_new_table(dev_net(dev), vrf->tb_id))
return -ENOMEM;
+ /* create a dst for routing packets out through a VRF device */
rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0);
if (!rth)
return -ENOMEM;
- rth->dst.output = vrf_output;
+ /* create a dst for local ingress routing - packets sent locally
+ * to local address via the VRF device as a loopback
+ */
+ rth_local = rt_dst_alloc(dev, RTCF_LOCAL, RTN_LOCAL, 1, 1, 0);
+ if (!rth_local) {
+ dst_release(&rth->dst);
+ return -ENOMEM;
+ }
+
+ rth->dst.output = vrf_output;
rth->rt_table_id = vrf->tb_id;
+ rth_local->rt_table_id = vrf->tb_id;
+
rcu_assign_pointer(vrf->rth, rth);
+ rcu_assign_pointer(vrf->rth_local, rth_local);
return 0;
}
@@ -477,8 +662,8 @@ static void vrf_dev_uninit(struct net_device *dev)
struct net_device *port_dev;
struct list_head *iter;
- vrf_rtable_release(vrf);
- vrf_rt6_release(vrf);
+ vrf_rtable_release(dev, vrf);
+ vrf_rt6_release(dev, vrf);
netdev_for_each_lower_dev(dev, port_dev, iter)
vrf_del_slave(dev, port_dev);
@@ -504,10 +689,16 @@ static int vrf_dev_init(struct net_device *dev)
dev->flags = IFF_MASTER | IFF_NOARP;
+ /* MTU is irrelevant for VRF device; set to 64k similar to lo */
+ dev->mtu = 64 * 1024;
+
+ /* similarly, oper state is irrelevant; set to up to avoid confusion */
+ dev->operstate = IF_OPER_UP;
+ netdev_lockdep_set_classes(dev);
return 0;
out_rth:
- vrf_rtable_release(vrf);
+ vrf_rtable_release(dev, vrf);
out_stats:
free_percpu(dev->dstats);
dev->dstats = NULL;
@@ -588,6 +779,25 @@ static int vrf_get_saddr(struct net_device *dev, struct flowi4 *fl4)
return rc;
}
+static int vrf_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+{
+ return 0;
+}
+
+static struct sk_buff *vrf_rcv_nfhook(u8 pf, unsigned int hook,
+ struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct net *net = dev_net(dev);
+
+ nf_reset(skb);
+
+ if (NF_HOOK(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) < 0)
+ skb = NULL; /* kfree_skb(skb) handled by nf code */
+
+ return skb;
+}
+
#if IS_ENABLED(CONFIG_IPV6)
/* neighbor handling is done with actual device; do not want
* to flip skb->dev for those ndisc packets. This really fails
@@ -623,11 +833,78 @@ out:
return rc;
}
+static struct rt6_info *vrf_ip6_route_lookup(struct net *net,
+ const struct net_device *dev,
+ struct flowi6 *fl6,
+ int ifindex,
+ int flags)
+{
+ struct net_vrf *vrf = netdev_priv(dev);
+ struct fib6_table *table = NULL;
+ struct rt6_info *rt6;
+
+ rcu_read_lock();
+
+ /* fib6_table does not have a refcnt and can not be freed */
+ rt6 = rcu_dereference(vrf->rt6);
+ if (likely(rt6))
+ table = rt6->rt6i_table;
+
+ rcu_read_unlock();
+
+ if (!table)
+ return NULL;
+
+ return ip6_pol_route(net, table, ifindex, fl6, flags);
+}
+
+static void vrf_ip6_input_dst(struct sk_buff *skb, struct net_device *vrf_dev,
+ int ifindex)
+{
+ const struct ipv6hdr *iph = ipv6_hdr(skb);
+ struct flowi6 fl6 = {
+ .daddr = iph->daddr,
+ .saddr = iph->saddr,
+ .flowlabel = ip6_flowinfo(iph),
+ .flowi6_mark = skb->mark,
+ .flowi6_proto = iph->nexthdr,
+ .flowi6_iif = ifindex,
+ };
+ struct net *net = dev_net(vrf_dev);
+ struct rt6_info *rt6;
+
+ rt6 = vrf_ip6_route_lookup(net, vrf_dev, &fl6, ifindex,
+ RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_IFACE);
+ if (unlikely(!rt6))
+ return;
+
+ if (unlikely(&rt6->dst == &net->ipv6.ip6_null_entry->dst))
+ return;
+
+ skb_dst_set(skb, &rt6->dst);
+}
+
static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
struct sk_buff *skb)
{
- /* if packet is NDISC keep the ingress interface */
- if (!ipv6_ndisc_frame(skb)) {
+ int orig_iif = skb->skb_iif;
+ bool need_strict;
+
+ /* loopback traffic; do not push through packet taps again.
+ * Reset pkt_type for upper layers to process skb
+ */
+ if (skb->pkt_type == PACKET_LOOPBACK) {
+ skb->dev = vrf_dev;
+ skb->skb_iif = vrf_dev->ifindex;
+ skb->pkt_type = PACKET_HOST;
+ goto out;
+ }
+
+ /* if packet is NDISC or addressed to multicast or link-local
+ * then keep the ingress interface
+ */
+ need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
+ if (!ipv6_ndisc_frame(skb) && !need_strict) {
skb->dev = vrf_dev;
skb->skb_iif = vrf_dev->ifindex;
@@ -638,6 +915,11 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
}
+ if (need_strict)
+ vrf_ip6_input_dst(skb, vrf_dev, orig_iif);
+
+ skb = vrf_rcv_nfhook(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, vrf_dev);
+out:
return skb;
}
@@ -655,10 +937,20 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
skb->dev = vrf_dev;
skb->skb_iif = vrf_dev->ifindex;
+ /* loopback traffic; do not push through packet taps again.
+ * Reset pkt_type for upper layers to process skb
+ */
+ if (skb->pkt_type == PACKET_LOOPBACK) {
+ skb->pkt_type = PACKET_HOST;
+ goto out;
+ }
+
skb_push(skb, skb->mac_len);
dev_queue_xmit_nit(skb, vrf_dev);
skb_pull(skb, skb->mac_len);
+ skb = vrf_rcv_nfhook(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, vrf_dev);
+out:
return skb;
}
@@ -679,13 +971,37 @@ static struct sk_buff *vrf_l3_rcv(struct net_device *vrf_dev,
#if IS_ENABLED(CONFIG_IPV6)
static struct dst_entry *vrf_get_rt6_dst(const struct net_device *dev,
- const struct flowi6 *fl6)
+ struct flowi6 *fl6)
{
+ bool need_strict = rt6_need_strict(&fl6->daddr);
+ struct net_vrf *vrf = netdev_priv(dev);
+ struct net *net = dev_net(dev);
struct dst_entry *dst = NULL;
+ struct rt6_info *rt;
- if (!(fl6->flowi6_flags & FLOWI_FLAG_L3MDEV_SRC)) {
- struct net_vrf *vrf = netdev_priv(dev);
- struct rt6_info *rt;
+ /* send to link-local or multicast address */
+ if (need_strict) {
+ int flags = RT6_LOOKUP_F_IFACE;
+
+ /* VRF device does not have a link-local address and
+ * sending packets to link-local or mcast addresses over
+ * a VRF device does not make sense
+ */
+ if (fl6->flowi6_oif == dev->ifindex) {
+ struct dst_entry *dst = &net->ipv6.ip6_null_entry->dst;
+
+ dst_hold(dst);
+ return dst;
+ }
+
+ if (!ipv6_addr_any(&fl6->saddr))
+ flags |= RT6_LOOKUP_F_HAS_SADDR;
+
+ rt = vrf_ip6_route_lookup(net, dev, fl6, fl6->flowi6_oif, flags);
+ if (rt)
+ dst = &rt->dst;
+
+ } else if (!(fl6->flowi6_flags & FLOWI_FLAG_L3MDEV_SRC)) {
rcu_read_lock();
@@ -698,8 +1014,52 @@ static struct dst_entry *vrf_get_rt6_dst(const struct net_device *dev,
rcu_read_unlock();
}
+ /* make sure oif is set to VRF device for lookup */
+ if (!need_strict)
+ fl6->flowi6_oif = dev->ifindex;
+
return dst;
}
+
+/* called under rcu_read_lock */
+static int vrf_get_saddr6(struct net_device *dev, const struct sock *sk,
+ struct flowi6 *fl6)
+{
+ struct net *net = dev_net(dev);
+ struct dst_entry *dst;
+ struct rt6_info *rt;
+ int err;
+
+ if (rt6_need_strict(&fl6->daddr)) {
+ rt = vrf_ip6_route_lookup(net, dev, fl6, fl6->flowi6_oif,
+ RT6_LOOKUP_F_IFACE);
+ if (unlikely(!rt))
+ return 0;
+
+ dst = &rt->dst;
+ } else {
+ __u8 flags = fl6->flowi6_flags;
+
+ fl6->flowi6_flags |= FLOWI_FLAG_L3MDEV_SRC;
+ fl6->flowi6_flags |= FLOWI_FLAG_SKIP_NH_OIF;
+
+ dst = ip6_route_output(net, sk, fl6);
+ rt = (struct rt6_info *)dst;
+
+ fl6->flowi6_flags = flags;
+ }
+
+ err = dst->error;
+ if (!err) {
+ err = ip6_route_get_saddr(net, rt, &fl6->daddr,
+ sk ? inet6_sk(sk)->srcprefs : 0,
+ &fl6->saddr);
+ }
+
+ dst_release(dst);
+
+ return err;
+}
#endif
static const struct l3mdev_ops vrf_l3mdev_ops = {
@@ -709,6 +1069,7 @@ static const struct l3mdev_ops vrf_l3mdev_ops = {
.l3mdev_l3_rcv = vrf_l3_rcv,
#if IS_ENABLED(CONFIG_IPV6)
.l3mdev_get_rt6_dst = vrf_get_rt6_dst,
+ .l3mdev_get_saddr6 = vrf_get_saddr6,
#endif
};
@@ -723,6 +1084,94 @@ static const struct ethtool_ops vrf_ethtool_ops = {
.get_drvinfo = vrf_get_drvinfo,
};
+static inline size_t vrf_fib_rule_nl_size(void)
+{
+ size_t sz;
+
+ sz = NLMSG_ALIGN(sizeof(struct fib_rule_hdr));
+ sz += nla_total_size(sizeof(u8)); /* FRA_L3MDEV */
+ sz += nla_total_size(sizeof(u32)); /* FRA_PRIORITY */
+
+ return sz;
+}
+
+static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it)
+{
+ struct fib_rule_hdr *frh;
+ struct nlmsghdr *nlh;
+ struct sk_buff *skb;
+ int err;
+
+ if (family == AF_INET6 && !ipv6_mod_enabled())
+ return 0;
+
+ skb = nlmsg_new(vrf_fib_rule_nl_size(), GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ nlh = nlmsg_put(skb, 0, 0, 0, sizeof(*frh), 0);
+ if (!nlh)
+ goto nla_put_failure;
+
+ /* rule only needs to appear once */
+ nlh->nlmsg_flags &= NLM_F_EXCL;
+
+ frh = nlmsg_data(nlh);
+ memset(frh, 0, sizeof(*frh));
+ frh->family = family;
+ frh->action = FR_ACT_TO_TBL;
+
+ if (nla_put_u32(skb, FRA_L3MDEV, 1))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, FRA_PRIORITY, FIB_RULE_PREF))
+ goto nla_put_failure;
+
+ nlmsg_end(skb, nlh);
+
+ /* fib_nl_{new,del}rule handling looks for net from skb->sk */
+ skb->sk = dev_net(dev)->rtnl;
+ if (add_it) {
+ err = fib_nl_newrule(skb, nlh);
+ if (err == -EEXIST)
+ err = 0;
+ } else {
+ err = fib_nl_delrule(skb, nlh);
+ if (err == -ENOENT)
+ err = 0;
+ }
+ nlmsg_free(skb);
+
+ return err;
+
+nla_put_failure:
+ nlmsg_free(skb);
+
+ return -EMSGSIZE;
+}
+
+static int vrf_add_fib_rules(const struct net_device *dev)
+{
+ int err;
+
+ err = vrf_fib_rule(dev, AF_INET, true);
+ if (err < 0)
+ goto out_err;
+
+ err = vrf_fib_rule(dev, AF_INET6, true);
+ if (err < 0)
+ goto ipv6_err;
+
+ return 0;
+
+ipv6_err:
+ vrf_fib_rule(dev, AF_INET, false);
+
+out_err:
+ netdev_err(dev, "Failed to add FIB rules.\n");
+ return err;
+}
+
static void vrf_setup(struct net_device *dev)
{
ether_setup(dev);
@@ -741,6 +1190,20 @@ static void vrf_setup(struct net_device *dev)
/* don't allow vrf devices to change network namespaces. */
dev->features |= NETIF_F_NETNS_LOCAL;
+
+ /* does not make sense for a VLAN to be added to a vrf device */
+ dev->features |= NETIF_F_VLAN_CHALLENGED;
+
+ /* enable offload features */
+ dev->features |= NETIF_F_GSO_SOFTWARE;
+ dev->features |= NETIF_F_RXCSUM | NETIF_F_HW_CSUM;
+ dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA;
+
+ dev->hw_features = dev->features;
+ dev->hw_enc_features = dev->features;
+
+ /* default to no qdisc; user can add if desired */
+ dev->priv_flags |= IFF_NO_QUEUE;
}
static int vrf_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -763,6 +1226,7 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
struct net_vrf *vrf = netdev_priv(dev);
+ int err;
if (!data || !data[IFLA_VRF_TABLE])
return -EINVAL;
@@ -771,7 +1235,21 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
dev->priv_flags |= IFF_L3MDEV_MASTER;
- return register_netdevice(dev);
+ err = register_netdevice(dev);
+ if (err)
+ goto out;
+
+ if (add_fib_rules) {
+ err = vrf_add_fib_rules(dev);
+ if (err) {
+ unregister_netdevice(dev);
+ goto out;
+ }
+ add_fib_rules = false;
+ }
+
+out:
+ return err;
}
static size_t vrf_nl_getsize(const struct net_device *dev)
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index b3b9db68f..6e6583205 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -11,32 +11,18 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
-#include <linux/types.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/slab.h>
-#include <linux/skbuff.h>
-#include <linux/rculist.h>
-#include <linux/netdevice.h>
-#include <linux/in.h>
-#include <linux/ip.h>
#include <linux/udp.h>
#include <linux/igmp.h>
-#include <linux/etherdevice.h>
#include <linux/if_ether.h>
-#include <linux/if_vlan.h>
-#include <linux/hash.h>
#include <linux/ethtool.h>
#include <net/arp.h>
#include <net/ndisc.h>
#include <net/ip.h>
-#include <net/ip_tunnels.h>
#include <net/icmp.h>
-#include <net/udp.h>
-#include <net/udp_tunnel.h>
#include <net/rtnetlink.h>
-#include <net/route.h>
-#include <net/dsfield.h>
#include <net/inet_ecn.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
@@ -44,12 +30,9 @@
#include <net/protocol.h>
#if IS_ENABLED(CONFIG_IPV6)
-#include <net/ipv6.h>
-#include <net/addrconf.h>
#include <net/ip6_tunnel.h>
#include <net/ip6_checksum.h>
#endif
-#include <net/dst_metadata.h>
#define VXLAN_VERSION "0.1"
@@ -619,42 +602,6 @@ static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
}
-/* Notify netdevs that UDP port started listening */
-static void vxlan_notify_add_rx_port(struct vxlan_sock *vs)
-{
- struct net_device *dev;
- struct sock *sk = vs->sock->sk;
- struct net *net = sock_net(sk);
- sa_family_t sa_family = vxlan_get_sk_family(vs);
- __be16 port = inet_sk(sk)->inet_sport;
-
- rcu_read_lock();
- for_each_netdev_rcu(net, dev) {
- if (dev->netdev_ops->ndo_add_vxlan_port)
- dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
- port);
- }
- rcu_read_unlock();
-}
-
-/* Notify netdevs that UDP port is no more listening */
-static void vxlan_notify_del_rx_port(struct vxlan_sock *vs)
-{
- struct net_device *dev;
- struct sock *sk = vs->sock->sk;
- struct net *net = sock_net(sk);
- sa_family_t sa_family = vxlan_get_sk_family(vs);
- __be16 port = inet_sk(sk)->inet_sport;
-
- rcu_read_lock();
- for_each_netdev_rcu(net, dev) {
- if (dev->netdev_ops->ndo_del_vxlan_port)
- dev->netdev_ops->ndo_del_vxlan_port(dev, sa_family,
- port);
- }
- rcu_read_unlock();
-}
-
/* Add new entry to forwarding table -- assumes lock held */
static int vxlan_fdb_create(struct vxlan_dev *vxlan,
const u8 *mac, union vxlan_addr *ip,
@@ -1050,7 +997,10 @@ static bool __vxlan_sock_release_prep(struct vxlan_sock *vs)
vn = net_generic(sock_net(vs->sock->sk), vxlan_net_id);
spin_lock(&vn->sock_lock);
hlist_del_rcu(&vs->hlist);
- vxlan_notify_del_rx_port(vs);
+ udp_tunnel_notify_del_rx_port(vs->sock,
+ (vs->flags & VXLAN_F_GPE) ?
+ UDP_TUNNEL_TYPE_VXLAN_GPE :
+ UDP_TUNNEL_TYPE_VXLAN);
spin_unlock(&vn->sock_lock);
return true;
@@ -1861,7 +1811,7 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan,
fl4.flowi4_mark = skb->mark;
fl4.flowi4_proto = IPPROTO_UDP;
fl4.daddr = daddr;
- fl4.saddr = vxlan->cfg.saddr.sin.sin_addr.s_addr;
+ fl4.saddr = *saddr;
rt = ip_route_output_key(vxlan->net, &fl4);
if (!IS_ERR(rt)) {
@@ -1897,7 +1847,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_oif = oif;
fl6.daddr = *daddr;
- fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr;
+ fl6.saddr = *saddr;
fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label);
fl6.flowi6_mark = skb->mark;
fl6.flowi6_proto = IPPROTO_UDP;
@@ -1970,7 +1920,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
struct rtable *rt = NULL;
const struct iphdr *old_iph;
union vxlan_addr *dst;
- union vxlan_addr remote_ip;
+ union vxlan_addr remote_ip, local_ip;
+ union vxlan_addr *src;
struct vxlan_metadata _md;
struct vxlan_metadata *md = &_md;
__be16 src_port = 0, dst_port;
@@ -1988,6 +1939,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port;
vni = rdst->remote_vni;
dst = &rdst->remote_ip;
+ src = &vxlan->cfg.saddr;
dst_cache = &rdst->dst_cache;
} else {
if (!info) {
@@ -1998,11 +1950,15 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
vni = vxlan_tun_id_to_vni(info->key.tun_id);
remote_ip.sa.sa_family = ip_tunnel_info_af(info);
- if (remote_ip.sa.sa_family == AF_INET)
+ if (remote_ip.sa.sa_family == AF_INET) {
remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst;
- else
+ local_ip.sin.sin_addr.s_addr = info->key.u.ipv4.src;
+ } else {
remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst;
+ local_ip.sin6.sin6_addr = info->key.u.ipv6.src;
+ }
dst = &remote_ip;
+ src = &local_ip;
dst_cache = &info->dst_cache;
}
@@ -2042,15 +1998,14 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
}
if (dst->sa.sa_family == AF_INET) {
- __be32 saddr;
-
if (!vxlan->vn4_sock)
goto drop;
sk = vxlan->vn4_sock->sock->sk;
rt = vxlan_get_route(vxlan, skb,
rdst ? rdst->remote_ifindex : 0, tos,
- dst->sin.sin_addr.s_addr, &saddr,
+ dst->sin.sin_addr.s_addr,
+ &src->sin.sin_addr.s_addr,
dst_cache, info);
if (IS_ERR(rt)) {
netdev_dbg(dev, "no route to %pI4\n",
@@ -2067,7 +2022,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
}
/* Bypass encapsulation if the destination is local */
- if (rt->rt_flags & RTCF_LOCAL &&
+ if (!info && rt->rt_flags & RTCF_LOCAL &&
!(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
struct vxlan_dev *dst_vxlan;
@@ -2093,13 +2048,12 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
if (err < 0)
goto xmit_tx_error;
- udp_tunnel_xmit_skb(rt, sk, skb, saddr,
+ udp_tunnel_xmit_skb(rt, sk, skb, src->sin.sin_addr.s_addr,
dst->sin.sin_addr.s_addr, tos, ttl, df,
src_port, dst_port, xnet, !udp_sum);
#if IS_ENABLED(CONFIG_IPV6)
} else {
struct dst_entry *ndst;
- struct in6_addr saddr;
u32 rt6i_flags;
if (!vxlan->vn6_sock)
@@ -2108,7 +2062,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
ndst = vxlan6_get_route(vxlan, skb,
rdst ? rdst->remote_ifindex : 0, tos,
- label, &dst->sin6.sin6_addr, &saddr,
+ label, &dst->sin6.sin6_addr,
+ &src->sin6.sin6_addr,
dst_cache, info);
if (IS_ERR(ndst)) {
netdev_dbg(dev, "no route to %pI6\n",
@@ -2127,7 +2082,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
/* Bypass encapsulation if the destination is local */
rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags;
- if (rt6i_flags & RTF_LOCAL &&
+ if (!info && rt6i_flags & RTF_LOCAL &&
!(rt6i_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
struct vxlan_dev *dst_vxlan;
@@ -2154,7 +2109,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
return;
}
udp_tunnel6_xmit_skb(ndst, sk, skb, dev,
- &saddr, &dst->sin6.sin6_addr, tos, ttl,
+ &src->sin6.sin6_addr,
+ &dst->sin6.sin6_addr, tos, ttl,
label, src_port, dst_port, !udp_sum);
#endif
}
@@ -2525,30 +2481,24 @@ static struct device_type vxlan_type = {
.name = "vxlan",
};
-/* Calls the ndo_add_vxlan_port of the caller in order to
+/* Calls the ndo_udp_tunnel_add of the caller in order to
* supply the listening VXLAN udp ports. Callers are expected
- * to implement the ndo_add_vxlan_port.
+ * to implement the ndo_udp_tunnel_add.
*/
static void vxlan_push_rx_ports(struct net_device *dev)
{
struct vxlan_sock *vs;
struct net *net = dev_net(dev);
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
- sa_family_t sa_family;
- __be16 port;
unsigned int i;
- if (!dev->netdev_ops->ndo_add_vxlan_port)
- return;
-
spin_lock(&vn->sock_lock);
for (i = 0; i < PORT_HASH_SIZE; ++i) {
- hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) {
- port = inet_sk(vs->sock->sk)->inet_sport;
- sa_family = vxlan_get_sk_family(vs);
- dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
- port);
- }
+ hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist)
+ udp_tunnel_push_rx_port(dev, vs->sock,
+ (vs->flags & VXLAN_F_GPE) ?
+ UDP_TUNNEL_TYPE_VXLAN_GPE :
+ UDP_TUNNEL_TYPE_VXLAN);
}
spin_unlock(&vn->sock_lock);
}
@@ -2750,7 +2700,10 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
spin_lock(&vn->sock_lock);
hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
- vxlan_notify_add_rx_port(vs);
+ udp_tunnel_notify_add_rx_port(sock,
+ (vs->flags & VXLAN_F_GPE) ?
+ UDP_TUNNEL_TYPE_VXLAN_GPE :
+ UDP_TUNNEL_TYPE_VXLAN);
spin_unlock(&vn->sock_lock);
/* Mark socket as an encapsulation socket. */
@@ -2829,14 +2782,15 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
struct net_device *lowerdev = NULL;
if (conf->flags & VXLAN_F_GPE) {
- if (conf->flags & ~VXLAN_F_ALLOWED_GPE)
- return -EINVAL;
/* For now, allow GPE only together with COLLECT_METADATA.
* This can be relaxed later; in such case, the other side
* of the PtP link will have to be provided.
*/
- if (!(conf->flags & VXLAN_F_COLLECT_METADATA))
+ if ((conf->flags & ~VXLAN_F_ALLOWED_GPE) ||
+ !(conf->flags & VXLAN_F_COLLECT_METADATA)) {
+ pr_info("unsupported combination of extensions\n");
return -EINVAL;
+ }
vxlan_raw_setup(dev);
} else {
@@ -2889,6 +2843,9 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
needed_headroom = lowerdev->hard_header_len;
+ } else if (vxlan_addr_multicast(&dst->remote_ip)) {
+ pr_info("multicast destination requires interface to be specified\n");
+ return -EINVAL;
}
if (conf->mtu) {
@@ -2921,8 +2878,10 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
tmp->cfg.saddr.sa.sa_family == AF_INET6) == use_ipv6 &&
tmp->cfg.dst_port == vxlan->cfg.dst_port &&
(tmp->flags & VXLAN_F_RCV_FLAGS) ==
- (vxlan->flags & VXLAN_F_RCV_FLAGS))
- return -EEXIST;
+ (vxlan->flags & VXLAN_F_RCV_FLAGS)) {
+ pr_info("duplicate VNI %u\n", be32_to_cpu(conf->vni));
+ return -EEXIST;
+ }
}
dev->ethtool_ops = &vxlan_ethtool_ops;
@@ -2956,7 +2915,6 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
struct vxlan_config conf;
- int err;
memset(&conf, 0, sizeof(conf));
@@ -3065,26 +3023,7 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
if (tb[IFLA_MTU])
conf.mtu = nla_get_u32(tb[IFLA_MTU]);
- err = vxlan_dev_configure(src_net, dev, &conf);
- switch (err) {
- case -ENODEV:
- pr_info("ifindex %d does not exist\n", conf.remote_ifindex);
- break;
-
- case -EPERM:
- pr_info("IPv6 is disabled via sysctl\n");
- break;
-
- case -EEXIST:
- pr_info("duplicate VNI %u\n", be32_to_cpu(conf.vni));
- break;
-
- case -EINVAL:
- pr_info("unsupported combination of extensions\n");
- break;
- }
-
- return err;
+ return vxlan_dev_configure(src_net, dev, &conf);
}
static void vxlan_dellink(struct net_device *dev, struct list_head *head)
@@ -3308,7 +3247,7 @@ static int vxlan_netdevice_event(struct notifier_block *unused,
if (event == NETDEV_UNREGISTER)
vxlan_handle_lowerdev_unregister(vn, dev);
- else if (event == NETDEV_OFFLOAD_PUSH_VXLAN)
+ else if (event == NETDEV_UDP_TUNNEL_PUSH_INFO)
vxlan_push_rx_ports(dev);
return NOTIFY_DONE;
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index a2fdd15f2..33ab3345d 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -280,6 +280,28 @@ config DSCC4
To compile this driver as a module, choose M here: the
module will be called dscc4.
+config FSL_UCC_HDLC
+ tristate "Freescale QUICC Engine HDLC support"
+ depends on HDLC
+ depends on QUICC_ENGINE
+ help
+ Driver for Freescale QUICC Engine HDLC controller. The driver
+ supports HDLC in NMSI and TDM mode.
+
+ To compile this driver as a module, choose M here: the
+ module will be called fsl_ucc_hdlc.
+
+config SLIC_DS26522
+ tristate "Slic Maxim ds26522 card support"
+ depends on SPI
+ depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
+ help
+ This module initializes and configures the slic maxim card
+ in T1 or E1 mode.
+
+ To compile this driver as a module, choose M here: the
+ module will be called slic_ds26522.
+
config DSCC4_PCISYNC
bool "Etinc PCISYNC features"
depends on DSCC4
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
index c135ef47c..73c232660 100644
--- a/drivers/net/wan/Makefile
+++ b/drivers/net/wan/Makefile
@@ -32,6 +32,8 @@ obj-$(CONFIG_WANXL) += wanxl.o
obj-$(CONFIG_PCI200SYN) += pci200syn.o
obj-$(CONFIG_PC300TOO) += pc300too.o
obj-$(CONFIG_IXP4XX_HSS) += ixp4xx_hss.o
+obj-$(CONFIG_FSL_UCC_HDLC) += fsl_ucc_hdlc.o
+obj-$(CONFIG_SLIC_DS26522) += slic_ds26522.o
clean-files := wanxlfw.inc
$(obj)/wanxl.o: $(obj)/wanxlfw.inc
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
new file mode 100644
index 000000000..6f044450b
--- /dev/null
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -0,0 +1,1178 @@
+/* Freescale QUICC Engine HDLC Device Driver
+ *
+ * Copyright 2016 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/hdlc.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <soc/fsl/qe/qe_tdm.h>
+#include <uapi/linux/if_arp.h>
+
+#include "fsl_ucc_hdlc.h"
+
+#define DRV_DESC "Freescale QE UCC HDLC Driver"
+#define DRV_NAME "ucc_hdlc"
+
+#define TDM_PPPOHT_SLIC_MAXIN
+#define BROKEN_FRAME_INFO
+
+static struct ucc_tdm_info utdm_primary_info = {
+ .uf_info = {
+ .tsa = 0,
+ .cdp = 0,
+ .cds = 1,
+ .ctsp = 1,
+ .ctss = 1,
+ .revd = 0,
+ .urfs = 256,
+ .utfs = 256,
+ .urfet = 128,
+ .urfset = 192,
+ .utfet = 128,
+ .utftt = 0x40,
+ .ufpt = 256,
+ .mode = UCC_FAST_PROTOCOL_MODE_HDLC,
+ .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
+ .tenc = UCC_FAST_TX_ENCODING_NRZ,
+ .renc = UCC_FAST_RX_ENCODING_NRZ,
+ .tcrc = UCC_FAST_16_BIT_CRC,
+ .synl = UCC_FAST_SYNC_LEN_NOT_USED,
+ },
+
+ .si_info = {
+#ifdef TDM_PPPOHT_SLIC_MAXIN
+ .simr_rfsd = 1,
+ .simr_tfsd = 2,
+#else
+ .simr_rfsd = 0,
+ .simr_tfsd = 0,
+#endif
+ .simr_crt = 0,
+ .simr_sl = 0,
+ .simr_ce = 1,
+ .simr_fe = 1,
+ .simr_gm = 0,
+ },
+};
+
+static struct ucc_tdm_info utdm_info[MAX_HDLC_NUM];
+
+static int uhdlc_init(struct ucc_hdlc_private *priv)
+{
+ struct ucc_tdm_info *ut_info;
+ struct ucc_fast_info *uf_info;
+ u32 cecr_subblock;
+ u16 bd_status;
+ int ret, i;
+ void *bd_buffer;
+ dma_addr_t bd_dma_addr;
+ u32 riptr;
+ u32 tiptr;
+ u32 gumr;
+
+ ut_info = priv->ut_info;
+ uf_info = &ut_info->uf_info;
+
+ if (priv->tsa) {
+ uf_info->tsa = 1;
+ uf_info->ctsp = 1;
+ }
+ uf_info->uccm_mask = ((UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_RXF |
+ UCC_HDLC_UCCE_TXB) << 16);
+
+ ret = ucc_fast_init(uf_info, &priv->uccf);
+ if (ret) {
+ dev_err(priv->dev, "Failed to init uccf.");
+ return ret;
+ }
+
+ priv->uf_regs = priv->uccf->uf_regs;
+ ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
+
+ /* Loopback mode */
+ if (priv->loopback) {
+ dev_info(priv->dev, "Loopback Mode\n");
+ gumr = ioread32be(&priv->uf_regs->gumr);
+ gumr |= (UCC_FAST_GUMR_LOOPBACK | UCC_FAST_GUMR_CDS |
+ UCC_FAST_GUMR_TCI);
+ gumr &= ~(UCC_FAST_GUMR_CTSP | UCC_FAST_GUMR_RSYN);
+ iowrite32be(gumr, &priv->uf_regs->gumr);
+ }
+
+ /* Initialize SI */
+ if (priv->tsa)
+ ucc_tdm_init(priv->utdm, priv->ut_info);
+
+ /* Write to QE CECR, UCCx channel to Stop Transmission */
+ cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
+ ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
+ QE_CR_PROTOCOL_UNSPECIFIED, 0);
+
+ /* Set UPSMR normal mode (need fixed)*/
+ iowrite32be(0, &priv->uf_regs->upsmr);
+
+ priv->rx_ring_size = RX_BD_RING_LEN;
+ priv->tx_ring_size = TX_BD_RING_LEN;
+ /* Alloc Rx BD */
+ priv->rx_bd_base = dma_alloc_coherent(priv->dev,
+ RX_BD_RING_LEN * sizeof(struct qe_bd *),
+ &priv->dma_rx_bd, GFP_KERNEL);
+
+ if (!priv->rx_bd_base) {
+ dev_err(priv->dev, "Cannot allocate MURAM memory for RxBDs\n");
+ ret = -ENOMEM;
+ goto free_uccf;
+ }
+
+ /* Alloc Tx BD */
+ priv->tx_bd_base = dma_alloc_coherent(priv->dev,
+ TX_BD_RING_LEN * sizeof(struct qe_bd *),
+ &priv->dma_tx_bd, GFP_KERNEL);
+
+ if (!priv->tx_bd_base) {
+ dev_err(priv->dev, "Cannot allocate MURAM memory for TxBDs\n");
+ ret = -ENOMEM;
+ goto free_rx_bd;
+ }
+
+ /* Alloc parameter ram for ucc hdlc */
+ priv->ucc_pram_offset = qe_muram_alloc(sizeof(priv->ucc_pram),
+ ALIGNMENT_OF_UCC_HDLC_PRAM);
+
+ if (priv->ucc_pram_offset < 0) {
+ dev_err(priv->dev, "Can not allocate MURAM for hdlc prameter.\n");
+ ret = -ENOMEM;
+ goto free_tx_bd;
+ }
+
+ priv->rx_skbuff = kzalloc(priv->rx_ring_size * sizeof(*priv->rx_skbuff),
+ GFP_KERNEL);
+ if (!priv->rx_skbuff)
+ goto free_ucc_pram;
+
+ priv->tx_skbuff = kzalloc(priv->tx_ring_size * sizeof(*priv->tx_skbuff),
+ GFP_KERNEL);
+ if (!priv->tx_skbuff)
+ goto free_rx_skbuff;
+
+ priv->skb_curtx = 0;
+ priv->skb_dirtytx = 0;
+ priv->curtx_bd = priv->tx_bd_base;
+ priv->dirty_tx = priv->tx_bd_base;
+ priv->currx_bd = priv->rx_bd_base;
+ priv->currx_bdnum = 0;
+
+ /* init parameter base */
+ cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
+ ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
+ QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
+
+ priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
+ qe_muram_addr(priv->ucc_pram_offset);
+
+ /* Zero out parameter ram */
+ memset_io(priv->ucc_pram, 0, sizeof(struct ucc_hdlc_param));
+
+ /* Alloc riptr, tiptr */
+ riptr = qe_muram_alloc(32, 32);
+ if (riptr < 0) {
+ dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n");
+ ret = -ENOMEM;
+ goto free_tx_skbuff;
+ }
+
+ tiptr = qe_muram_alloc(32, 32);
+ if (tiptr < 0) {
+ dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n");
+ ret = -ENOMEM;
+ goto free_riptr;
+ }
+
+ /* Set RIPTR, TIPTR */
+ iowrite16be(riptr, &priv->ucc_pram->riptr);
+ iowrite16be(tiptr, &priv->ucc_pram->tiptr);
+
+ /* Set MRBLR */
+ iowrite16be(MAX_RX_BUF_LENGTH, &priv->ucc_pram->mrblr);
+
+ /* Set RBASE, TBASE */
+ iowrite32be(priv->dma_rx_bd, &priv->ucc_pram->rbase);
+ iowrite32be(priv->dma_tx_bd, &priv->ucc_pram->tbase);
+
+ /* Set RSTATE, TSTATE */
+ iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->rstate);
+ iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->tstate);
+
+ /* Set C_MASK, C_PRES for 16bit CRC */
+ iowrite32be(CRC_16BIT_MASK, &priv->ucc_pram->c_mask);
+ iowrite32be(CRC_16BIT_PRES, &priv->ucc_pram->c_pres);
+
+ iowrite16be(MAX_FRAME_LENGTH, &priv->ucc_pram->mflr);
+ iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfthr);
+ iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfcnt);
+ iowrite16be(DEFAULT_ADDR_MASK, &priv->ucc_pram->hmask);
+ iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr1);
+ iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr2);
+ iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr3);
+ iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4);
+
+ /* Get BD buffer */
+ bd_buffer = dma_alloc_coherent(priv->dev,
+ (RX_BD_RING_LEN + TX_BD_RING_LEN) *
+ MAX_RX_BUF_LENGTH,
+ &bd_dma_addr, GFP_KERNEL);
+
+ if (!bd_buffer) {
+ dev_err(priv->dev, "Could not allocate buffer descriptors\n");
+ ret = -ENOMEM;
+ goto free_tiptr;
+ }
+
+ memset(bd_buffer, 0, (RX_BD_RING_LEN + TX_BD_RING_LEN)
+ * MAX_RX_BUF_LENGTH);
+
+ priv->rx_buffer = bd_buffer;
+ priv->tx_buffer = bd_buffer + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
+
+ priv->dma_rx_addr = bd_dma_addr;
+ priv->dma_tx_addr = bd_dma_addr + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
+
+ for (i = 0; i < RX_BD_RING_LEN; i++) {
+ if (i < (RX_BD_RING_LEN - 1))
+ bd_status = R_E_S | R_I_S;
+ else
+ bd_status = R_E_S | R_I_S | R_W_S;
+
+ iowrite16be(bd_status, &priv->rx_bd_base[i].status);
+ iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
+ &priv->rx_bd_base[i].buf);
+ }
+
+ for (i = 0; i < TX_BD_RING_LEN; i++) {
+ if (i < (TX_BD_RING_LEN - 1))
+ bd_status = T_I_S | T_TC_S;
+ else
+ bd_status = T_I_S | T_TC_S | T_W_S;
+
+ iowrite16be(bd_status, &priv->tx_bd_base[i].status);
+ iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
+ &priv->tx_bd_base[i].buf);
+ }
+
+ return 0;
+
+free_tiptr:
+ qe_muram_free(tiptr);
+free_riptr:
+ qe_muram_free(riptr);
+free_tx_skbuff:
+ kfree(priv->tx_skbuff);
+free_rx_skbuff:
+ kfree(priv->rx_skbuff);
+free_ucc_pram:
+ qe_muram_free(priv->ucc_pram_offset);
+free_tx_bd:
+ dma_free_coherent(priv->dev,
+ TX_BD_RING_LEN * sizeof(struct qe_bd),
+ priv->tx_bd_base, priv->dma_tx_bd);
+free_rx_bd:
+ dma_free_coherent(priv->dev,
+ RX_BD_RING_LEN * sizeof(struct qe_bd),
+ priv->rx_bd_base, priv->dma_rx_bd);
+free_uccf:
+ ucc_fast_free(priv->uccf);
+
+ return ret;
+}
+
+static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)hdlc->priv;
+ struct qe_bd __iomem *bd;
+ u16 bd_status;
+ unsigned long flags;
+ u8 *send_buf;
+ int i;
+ u16 *proto_head;
+
+ switch (dev->type) {
+ case ARPHRD_RAWHDLC:
+ if (skb_headroom(skb) < HDLC_HEAD_LEN) {
+ dev->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ netdev_err(dev, "No enough space for hdlc head\n");
+ return -ENOMEM;
+ }
+
+ skb_push(skb, HDLC_HEAD_LEN);
+
+ proto_head = (u16 *)skb->data;
+ *proto_head = htons(DEFAULT_HDLC_HEAD);
+
+ dev->stats.tx_bytes += skb->len;
+ break;
+
+ case ARPHRD_PPP:
+ proto_head = (u16 *)skb->data;
+ if (*proto_head != htons(DEFAULT_PPP_HEAD)) {
+ dev->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ netdev_err(dev, "Wrong ppp header\n");
+ return -ENOMEM;
+ }
+
+ dev->stats.tx_bytes += skb->len;
+ break;
+
+ default:
+ dev->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ return -ENOMEM;
+ }
+
+ pr_info("Tx data skb->len:%d ", skb->len);
+ send_buf = (u8 *)skb->data;
+ pr_info("\nTransmitted data:\n");
+ for (i = 0; i < 16; i++) {
+ if (i == skb->len)
+ pr_info("++++");
+ else
+ pr_info("%02x\n", send_buf[i]);
+ }
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Start from the next BD that should be filled */
+ bd = priv->curtx_bd;
+ bd_status = ioread16be(&bd->status);
+ /* Save the skb pointer so we can free it later */
+ priv->tx_skbuff[priv->skb_curtx] = skb;
+
+ /* Update the current skb pointer (wrapping if this was the last) */
+ priv->skb_curtx =
+ (priv->skb_curtx + 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
+
+ /* copy skb data to tx buffer for sdma processing */
+ memcpy(priv->tx_buffer + (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
+ skb->data, skb->len);
+
+ /* set bd status and length */
+ bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S;
+
+ iowrite16be(bd_status, &bd->status);
+ iowrite16be(skb->len, &bd->length);
+
+ /* Move to next BD in the ring */
+ if (!(bd_status & T_W_S))
+ bd += 1;
+ else
+ bd = priv->tx_bd_base;
+
+ if (bd == priv->dirty_tx) {
+ if (!netif_queue_stopped(dev))
+ netif_stop_queue(dev);
+ }
+
+ priv->curtx_bd = bd;
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+static int hdlc_tx_done(struct ucc_hdlc_private *priv)
+{
+ /* Start from the next BD that should be filled */
+ struct net_device *dev = priv->ndev;
+ struct qe_bd *bd; /* BD pointer */
+ u16 bd_status;
+
+ bd = priv->dirty_tx;
+ bd_status = ioread16be(&bd->status);
+
+ /* Normal processing. */
+ while ((bd_status & T_R_S) == 0) {
+ struct sk_buff *skb;
+
+ /* BD contains already transmitted buffer. */
+ /* Handle the transmitted buffer and release */
+ /* the BD to be used with the current frame */
+
+ skb = priv->tx_skbuff[priv->skb_dirtytx];
+ if (!skb)
+ break;
+ pr_info("TxBD: %x\n", bd_status);
+ dev->stats.tx_packets++;
+ memset(priv->tx_buffer +
+ (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
+ 0, skb->len);
+ dev_kfree_skb_irq(skb);
+
+ priv->tx_skbuff[priv->skb_dirtytx] = NULL;
+ priv->skb_dirtytx =
+ (priv->skb_dirtytx +
+ 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
+
+ /* We freed a buffer, so now we can restart transmission */
+ if (netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+
+ /* Advance the confirmation BD pointer */
+ if (!(bd_status & T_W_S))
+ bd += 1;
+ else
+ bd = priv->tx_bd_base;
+ bd_status = ioread16be(&bd->status);
+ }
+ priv->dirty_tx = bd;
+
+ return 0;
+}
+
+static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
+{
+ struct net_device *dev = priv->ndev;
+ struct sk_buff *skb;
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ struct qe_bd *bd;
+ u32 bd_status;
+ u16 length, howmany = 0;
+ u8 *bdbuffer;
+ int i;
+ static int entry;
+
+ bd = priv->currx_bd;
+ bd_status = ioread16be(&bd->status);
+
+ /* while there are received buffers and BD is full (~R_E) */
+ while (!((bd_status & (R_E_S)) || (--rx_work_limit < 0))) {
+ if (bd_status & R_OV_S)
+ dev->stats.rx_over_errors++;
+ if (bd_status & R_CR_S) {
+#ifdef BROKEN_FRAME_INFO
+ pr_info("Broken Frame with RxBD: %x\n", bd_status);
+#endif
+ dev->stats.rx_crc_errors++;
+ dev->stats.rx_dropped++;
+ goto recycle;
+ }
+ bdbuffer = priv->rx_buffer +
+ (priv->currx_bdnum * MAX_RX_BUF_LENGTH);
+ length = ioread16be(&bd->length);
+
+ pr_info("Received data length:%d", length);
+ pr_info("while entry times:%d", entry++);
+
+ pr_info("\nReceived data:\n");
+ for (i = 0; (i < 16); i++) {
+ if (i == length)
+ pr_info("++++");
+ else
+ pr_info("%02x\n", bdbuffer[i]);
+ }
+
+ switch (dev->type) {
+ case ARPHRD_RAWHDLC:
+ bdbuffer += HDLC_HEAD_LEN;
+ length -= (HDLC_HEAD_LEN + HDLC_CRC_SIZE);
+
+ skb = dev_alloc_skb(length);
+ if (!skb) {
+ dev->stats.rx_dropped++;
+ return -ENOMEM;
+ }
+
+ skb_put(skb, length);
+ skb->len = length;
+ skb->dev = dev;
+ memcpy(skb->data, bdbuffer, length);
+ break;
+
+ case ARPHRD_PPP:
+ length -= HDLC_CRC_SIZE;
+
+ skb = dev_alloc_skb(length);
+ if (!skb) {
+ dev->stats.rx_dropped++;
+ return -ENOMEM;
+ }
+
+ skb_put(skb, length);
+ skb->len = length;
+ skb->dev = dev;
+ memcpy(skb->data, bdbuffer, length);
+ break;
+ }
+
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += skb->len;
+ howmany++;
+ if (hdlc->proto)
+ skb->protocol = hdlc_type_trans(skb, dev);
+ pr_info("skb->protocol:%x\n", skb->protocol);
+ netif_receive_skb(skb);
+
+recycle:
+ iowrite16be(bd_status | R_E_S | R_I_S, &bd->status);
+
+ /* update to point at the next bd */
+ if (bd_status & R_W_S) {
+ priv->currx_bdnum = 0;
+ bd = priv->rx_bd_base;
+ } else {
+ if (priv->currx_bdnum < (RX_BD_RING_LEN - 1))
+ priv->currx_bdnum += 1;
+ else
+ priv->currx_bdnum = RX_BD_RING_LEN - 1;
+
+ bd += 1;
+ }
+
+ bd_status = ioread16be(&bd->status);
+ }
+
+ priv->currx_bd = bd;
+ return howmany;
+}
+
+static int ucc_hdlc_poll(struct napi_struct *napi, int budget)
+{
+ struct ucc_hdlc_private *priv = container_of(napi,
+ struct ucc_hdlc_private,
+ napi);
+ int howmany;
+
+ /* Tx event processing */
+ spin_lock(&priv->lock);
+ hdlc_tx_done(priv);
+ spin_unlock(&priv->lock);
+
+ howmany = 0;
+ howmany += hdlc_rx_done(priv, budget - howmany);
+
+ if (howmany < budget) {
+ napi_complete(napi);
+ qe_setbits32(priv->uccf->p_uccm,
+ (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16);
+ }
+
+ return howmany;
+}
+
+static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id)
+{
+ struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)dev_id;
+ struct net_device *dev = priv->ndev;
+ struct ucc_fast_private *uccf;
+ struct ucc_tdm_info *ut_info;
+ u32 ucce;
+ u32 uccm;
+
+ ut_info = priv->ut_info;
+ uccf = priv->uccf;
+
+ ucce = ioread32be(uccf->p_ucce);
+ uccm = ioread32be(uccf->p_uccm);
+ ucce &= uccm;
+ iowrite32be(ucce, uccf->p_ucce);
+ pr_info("irq ucce:%x\n", ucce);
+ if (!ucce)
+ return IRQ_NONE;
+
+ if ((ucce >> 16) & (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)) {
+ if (napi_schedule_prep(&priv->napi)) {
+ uccm &= ~((UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)
+ << 16);
+ iowrite32be(uccm, uccf->p_uccm);
+ __napi_schedule(&priv->napi);
+ }
+ }
+
+ /* Errors and other events */
+ if (ucce >> 16 & UCC_HDLC_UCCE_BSY)
+ dev->stats.rx_errors++;
+ if (ucce >> 16 & UCC_HDLC_UCCE_TXE)
+ dev->stats.tx_errors++;
+
+ return IRQ_HANDLED;
+}
+
+static int uhdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ const size_t size = sizeof(te1_settings);
+ te1_settings line;
+ struct ucc_hdlc_private *priv = netdev_priv(dev);
+
+ if (cmd != SIOCWANDEV)
+ return hdlc_ioctl(dev, ifr, cmd);
+
+ switch (ifr->ifr_settings.type) {
+ case IF_GET_IFACE:
+ ifr->ifr_settings.type = IF_IFACE_E1;
+ if (ifr->ifr_settings.size < size) {
+ ifr->ifr_settings.size = size; /* data size wanted */
+ return -ENOBUFS;
+ }
+ memset(&line, 0, sizeof(line));
+ line.clock_type = priv->clocking;
+
+ if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size))
+ return -EFAULT;
+ return 0;
+
+ default:
+ return hdlc_ioctl(dev, ifr, cmd);
+ }
+}
+
+static int uhdlc_open(struct net_device *dev)
+{
+ u32 cecr_subblock;
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ struct ucc_hdlc_private *priv = hdlc->priv;
+ struct ucc_tdm *utdm = priv->utdm;
+
+ if (priv->hdlc_busy != 1) {
+ if (request_irq(priv->ut_info->uf_info.irq,
+ ucc_hdlc_irq_handler, 0, "hdlc", priv))
+ return -ENODEV;
+
+ cecr_subblock = ucc_fast_get_qe_cr_subblock(
+ priv->ut_info->uf_info.ucc_num);
+
+ qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
+ QE_CR_PROTOCOL_UNSPECIFIED, 0);
+
+ ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
+
+ /* Enable the TDM port */
+ if (priv->tsa)
+ utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
+
+ priv->hdlc_busy = 1;
+ netif_device_attach(priv->ndev);
+ napi_enable(&priv->napi);
+ netif_start_queue(dev);
+ hdlc_open(dev);
+ }
+
+ return 0;
+}
+
+static void uhdlc_memclean(struct ucc_hdlc_private *priv)
+{
+ qe_muram_free(priv->ucc_pram->riptr);
+ qe_muram_free(priv->ucc_pram->tiptr);
+
+ if (priv->rx_bd_base) {
+ dma_free_coherent(priv->dev,
+ RX_BD_RING_LEN * sizeof(struct qe_bd),
+ priv->rx_bd_base, priv->dma_rx_bd);
+
+ priv->rx_bd_base = NULL;
+ priv->dma_rx_bd = 0;
+ }
+
+ if (priv->tx_bd_base) {
+ dma_free_coherent(priv->dev,
+ TX_BD_RING_LEN * sizeof(struct qe_bd),
+ priv->tx_bd_base, priv->dma_tx_bd);
+
+ priv->tx_bd_base = NULL;
+ priv->dma_tx_bd = 0;
+ }
+
+ if (priv->ucc_pram) {
+ qe_muram_free(priv->ucc_pram_offset);
+ priv->ucc_pram = NULL;
+ priv->ucc_pram_offset = 0;
+ }
+
+ kfree(priv->rx_skbuff);
+ priv->rx_skbuff = NULL;
+
+ kfree(priv->tx_skbuff);
+ priv->tx_skbuff = NULL;
+
+ if (priv->uf_regs) {
+ iounmap(priv->uf_regs);
+ priv->uf_regs = NULL;
+ }
+
+ if (priv->uccf) {
+ ucc_fast_free(priv->uccf);
+ priv->uccf = NULL;
+ }
+
+ if (priv->rx_buffer) {
+ dma_free_coherent(priv->dev,
+ RX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
+ priv->rx_buffer, priv->dma_rx_addr);
+ priv->rx_buffer = NULL;
+ priv->dma_rx_addr = 0;
+ }
+
+ if (priv->tx_buffer) {
+ dma_free_coherent(priv->dev,
+ TX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
+ priv->tx_buffer, priv->dma_tx_addr);
+ priv->tx_buffer = NULL;
+ priv->dma_tx_addr = 0;
+ }
+}
+
+static int uhdlc_close(struct net_device *dev)
+{
+ struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
+ struct ucc_tdm *utdm = priv->utdm;
+ u32 cecr_subblock;
+
+ napi_disable(&priv->napi);
+ cecr_subblock = ucc_fast_get_qe_cr_subblock(
+ priv->ut_info->uf_info.ucc_num);
+
+ qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
+ (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
+ qe_issue_cmd(QE_CLOSE_RX_BD, cecr_subblock,
+ (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
+
+ if (priv->tsa)
+ utdm->si_regs->siglmr1_h &= ~(0x1 << utdm->tdm_port);
+
+ ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
+
+ free_irq(priv->ut_info->uf_info.irq, priv);
+ netif_stop_queue(dev);
+ priv->hdlc_busy = 0;
+
+ return 0;
+}
+
+static int ucc_hdlc_attach(struct net_device *dev, unsigned short encoding,
+ unsigned short parity)
+{
+ struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
+
+ if (encoding != ENCODING_NRZ &&
+ encoding != ENCODING_NRZI)
+ return -EINVAL;
+
+ if (parity != PARITY_NONE &&
+ parity != PARITY_CRC32_PR1_CCITT &&
+ parity != PARITY_CRC16_PR1_CCITT)
+ return -EINVAL;
+
+ priv->encoding = encoding;
+ priv->parity = parity;
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static void store_clk_config(struct ucc_hdlc_private *priv)
+{
+ struct qe_mux *qe_mux_reg = &qe_immr->qmx;
+
+ /* store si clk */
+ priv->cmxsi1cr_h = ioread32be(&qe_mux_reg->cmxsi1cr_h);
+ priv->cmxsi1cr_l = ioread32be(&qe_mux_reg->cmxsi1cr_l);
+
+ /* store si sync */
+ priv->cmxsi1syr = ioread32be(&qe_mux_reg->cmxsi1syr);
+
+ /* store ucc clk */
+ memcpy_fromio(priv->cmxucr, qe_mux_reg->cmxucr, 4 * sizeof(u32));
+}
+
+static void resume_clk_config(struct ucc_hdlc_private *priv)
+{
+ struct qe_mux *qe_mux_reg = &qe_immr->qmx;
+
+ memcpy_toio(qe_mux_reg->cmxucr, priv->cmxucr, 4 * sizeof(u32));
+
+ iowrite32be(priv->cmxsi1cr_h, &qe_mux_reg->cmxsi1cr_h);
+ iowrite32be(priv->cmxsi1cr_l, &qe_mux_reg->cmxsi1cr_l);
+
+ iowrite32be(priv->cmxsi1syr, &qe_mux_reg->cmxsi1syr);
+}
+
+static int uhdlc_suspend(struct device *dev)
+{
+ struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
+ struct ucc_tdm_info *ut_info;
+ struct ucc_fast __iomem *uf_regs;
+
+ if (!priv)
+ return -EINVAL;
+
+ if (!netif_running(priv->ndev))
+ return 0;
+
+ netif_device_detach(priv->ndev);
+ napi_disable(&priv->napi);
+
+ ut_info = priv->ut_info;
+ uf_regs = priv->uf_regs;
+
+ /* backup gumr guemr*/
+ priv->gumr = ioread32be(&uf_regs->gumr);
+ priv->guemr = ioread8(&uf_regs->guemr);
+
+ priv->ucc_pram_bak = kmalloc(sizeof(*priv->ucc_pram_bak),
+ GFP_KERNEL);
+ if (!priv->ucc_pram_bak)
+ return -ENOMEM;
+
+ /* backup HDLC parameter */
+ memcpy_fromio(priv->ucc_pram_bak, priv->ucc_pram,
+ sizeof(struct ucc_hdlc_param));
+
+ /* store the clk configuration */
+ store_clk_config(priv);
+
+ /* save power */
+ ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
+
+ dev_dbg(dev, "ucc hdlc suspend\n");
+ return 0;
+}
+
+static int uhdlc_resume(struct device *dev)
+{
+ struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
+ struct ucc_tdm *utdm;
+ struct ucc_tdm_info *ut_info;
+ struct ucc_fast __iomem *uf_regs;
+ struct ucc_fast_private *uccf;
+ struct ucc_fast_info *uf_info;
+ int ret, i;
+ u32 cecr_subblock;
+ u16 bd_status;
+
+ if (!priv)
+ return -EINVAL;
+
+ if (!netif_running(priv->ndev))
+ return 0;
+
+ utdm = priv->utdm;
+ ut_info = priv->ut_info;
+ uf_info = &ut_info->uf_info;
+ uf_regs = priv->uf_regs;
+ uccf = priv->uccf;
+
+ /* restore gumr guemr */
+ iowrite8(priv->guemr, &uf_regs->guemr);
+ iowrite32be(priv->gumr, &uf_regs->gumr);
+
+ /* Set Virtual Fifo registers */
+ iowrite16be(uf_info->urfs, &uf_regs->urfs);
+ iowrite16be(uf_info->urfet, &uf_regs->urfet);
+ iowrite16be(uf_info->urfset, &uf_regs->urfset);
+ iowrite16be(uf_info->utfs, &uf_regs->utfs);
+ iowrite16be(uf_info->utfet, &uf_regs->utfet);
+ iowrite16be(uf_info->utftt, &uf_regs->utftt);
+ /* utfb, urfb are offsets from MURAM base */
+ iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, &uf_regs->utfb);
+ iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, &uf_regs->urfb);
+
+ /* Rx Tx and sync clock routing */
+ resume_clk_config(priv);
+
+ iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
+ iowrite32be(0xffffffff, &uf_regs->ucce);
+
+ ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
+
+ /* rebuild SIRAM */
+ if (priv->tsa)
+ ucc_tdm_init(priv->utdm, priv->ut_info);
+
+ /* Write to QE CECR, UCCx channel to Stop Transmission */
+ cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
+ ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
+ (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
+
+ /* Set UPSMR normal mode */
+ iowrite32be(0, &uf_regs->upsmr);
+
+ /* init parameter base */
+ cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
+ ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
+ QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
+
+ priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
+ qe_muram_addr(priv->ucc_pram_offset);
+
+ /* restore ucc parameter */
+ memcpy_toio(priv->ucc_pram, priv->ucc_pram_bak,
+ sizeof(struct ucc_hdlc_param));
+ kfree(priv->ucc_pram_bak);
+
+ /* rebuild BD entry */
+ for (i = 0; i < RX_BD_RING_LEN; i++) {
+ if (i < (RX_BD_RING_LEN - 1))
+ bd_status = R_E_S | R_I_S;
+ else
+ bd_status = R_E_S | R_I_S | R_W_S;
+
+ iowrite16be(bd_status, &priv->rx_bd_base[i].status);
+ iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
+ &priv->rx_bd_base[i].buf);
+ }
+
+ for (i = 0; i < TX_BD_RING_LEN; i++) {
+ if (i < (TX_BD_RING_LEN - 1))
+ bd_status = T_I_S | T_TC_S;
+ else
+ bd_status = T_I_S | T_TC_S | T_W_S;
+
+ iowrite16be(bd_status, &priv->tx_bd_base[i].status);
+ iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
+ &priv->tx_bd_base[i].buf);
+ }
+
+ /* if hdlc is busy enable TX and RX */
+ if (priv->hdlc_busy == 1) {
+ cecr_subblock = ucc_fast_get_qe_cr_subblock(
+ priv->ut_info->uf_info.ucc_num);
+
+ qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
+ (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
+
+ ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
+
+ /* Enable the TDM port */
+ if (priv->tsa)
+ utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
+ }
+
+ napi_enable(&priv->napi);
+ netif_device_attach(priv->ndev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops uhdlc_pm_ops = {
+ .suspend = uhdlc_suspend,
+ .resume = uhdlc_resume,
+ .freeze = uhdlc_suspend,
+ .thaw = uhdlc_resume,
+};
+
+#define HDLC_PM_OPS (&uhdlc_pm_ops)
+
+#else
+
+#define HDLC_PM_OPS NULL
+
+#endif
+static const struct net_device_ops uhdlc_ops = {
+ .ndo_open = uhdlc_open,
+ .ndo_stop = uhdlc_close,
+ .ndo_change_mtu = hdlc_change_mtu,
+ .ndo_start_xmit = hdlc_start_xmit,
+ .ndo_do_ioctl = uhdlc_ioctl,
+};
+
+static int ucc_hdlc_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct ucc_hdlc_private *uhdlc_priv = NULL;
+ struct ucc_tdm_info *ut_info;
+ struct ucc_tdm *utdm;
+ struct resource res;
+ struct net_device *dev;
+ hdlc_device *hdlc;
+ int ucc_num;
+ const char *sprop;
+ int ret;
+ u32 val;
+
+ ret = of_property_read_u32_index(np, "cell-index", 0, &val);
+ if (ret) {
+ dev_err(&pdev->dev, "Invalid ucc property\n");
+ return -ENODEV;
+ }
+
+ ucc_num = val - 1;
+ if ((ucc_num > 3) || (ucc_num < 0)) {
+ dev_err(&pdev->dev, ": Invalid UCC num\n");
+ return -EINVAL;
+ }
+
+ memcpy(&utdm_info[ucc_num], &utdm_primary_info,
+ sizeof(utdm_primary_info));
+
+ ut_info = &utdm_info[ucc_num];
+ ut_info->uf_info.ucc_num = ucc_num;
+
+ sprop = of_get_property(np, "rx-clock-name", NULL);
+ if (sprop) {
+ ut_info->uf_info.rx_clock = qe_clock_source(sprop);
+ if ((ut_info->uf_info.rx_clock < QE_CLK_NONE) ||
+ (ut_info->uf_info.rx_clock > QE_CLK24)) {
+ dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
+ return -EINVAL;
+ }
+ } else {
+ dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
+ return -EINVAL;
+ }
+
+ sprop = of_get_property(np, "tx-clock-name", NULL);
+ if (sprop) {
+ ut_info->uf_info.tx_clock = qe_clock_source(sprop);
+ if ((ut_info->uf_info.tx_clock < QE_CLK_NONE) ||
+ (ut_info->uf_info.tx_clock > QE_CLK24)) {
+ dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
+ return -EINVAL;
+ }
+ } else {
+ dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
+ return -EINVAL;
+ }
+
+ /* use the same clock when work in loopback */
+ if (ut_info->uf_info.rx_clock == ut_info->uf_info.tx_clock)
+ qe_setbrg(ut_info->uf_info.rx_clock, 20000000, 1);
+
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret)
+ return -EINVAL;
+
+ ut_info->uf_info.regs = res.start;
+ ut_info->uf_info.irq = irq_of_parse_and_map(np, 0);
+
+ uhdlc_priv = kzalloc(sizeof(*uhdlc_priv), GFP_KERNEL);
+ if (!uhdlc_priv) {
+ return -ENOMEM;
+ }
+
+ dev_set_drvdata(&pdev->dev, uhdlc_priv);
+ uhdlc_priv->dev = &pdev->dev;
+ uhdlc_priv->ut_info = ut_info;
+
+ if (of_get_property(np, "fsl,tdm-interface", NULL))
+ uhdlc_priv->tsa = 1;
+
+ if (of_get_property(np, "fsl,ucc-internal-loopback", NULL))
+ uhdlc_priv->loopback = 1;
+
+ if (uhdlc_priv->tsa == 1) {
+ utdm = kzalloc(sizeof(*utdm), GFP_KERNEL);
+ if (!utdm) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "No mem to alloc ucc tdm data\n");
+ goto free_uhdlc_priv;
+ }
+ uhdlc_priv->utdm = utdm;
+ ret = ucc_of_parse_tdm(np, utdm, ut_info);
+ if (ret)
+ goto free_utdm;
+ }
+
+ ret = uhdlc_init(uhdlc_priv);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to init uhdlc\n");
+ goto free_utdm;
+ }
+
+ dev = alloc_hdlcdev(uhdlc_priv);
+ if (!dev) {
+ ret = -ENOMEM;
+ pr_err("ucc_hdlc: unable to allocate memory\n");
+ goto undo_uhdlc_init;
+ }
+
+ uhdlc_priv->ndev = dev;
+ hdlc = dev_to_hdlc(dev);
+ dev->tx_queue_len = 16;
+ dev->netdev_ops = &uhdlc_ops;
+ hdlc->attach = ucc_hdlc_attach;
+ hdlc->xmit = ucc_hdlc_tx;
+ netif_napi_add(dev, &uhdlc_priv->napi, ucc_hdlc_poll, 32);
+ if (register_hdlc_device(dev)) {
+ ret = -ENOBUFS;
+ pr_err("ucc_hdlc: unable to register hdlc device\n");
+ free_netdev(dev);
+ goto free_dev;
+ }
+
+ return 0;
+
+free_dev:
+ free_netdev(dev);
+undo_uhdlc_init:
+free_utdm:
+ if (uhdlc_priv->tsa)
+ kfree(utdm);
+free_uhdlc_priv:
+ kfree(uhdlc_priv);
+ return ret;
+}
+
+static int ucc_hdlc_remove(struct platform_device *pdev)
+{
+ struct ucc_hdlc_private *priv = dev_get_drvdata(&pdev->dev);
+
+ uhdlc_memclean(priv);
+
+ if (priv->utdm->si_regs) {
+ iounmap(priv->utdm->si_regs);
+ priv->utdm->si_regs = NULL;
+ }
+
+ if (priv->utdm->siram) {
+ iounmap(priv->utdm->siram);
+ priv->utdm->siram = NULL;
+ }
+ kfree(priv);
+
+ dev_info(&pdev->dev, "UCC based hdlc module removed\n");
+
+ return 0;
+}
+
+static const struct of_device_id fsl_ucc_hdlc_of_match[] = {
+ {
+ .compatible = "fsl,ucc-hdlc",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, fsl_ucc_hdlc_of_match);
+
+static struct platform_driver ucc_hdlc_driver = {
+ .probe = ucc_hdlc_probe,
+ .remove = ucc_hdlc_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .pm = HDLC_PM_OPS,
+ .of_match_table = fsl_ucc_hdlc_of_match,
+ },
+};
+
+module_platform_driver(ucc_hdlc_driver);
diff --git a/drivers/net/wan/fsl_ucc_hdlc.h b/drivers/net/wan/fsl_ucc_hdlc.h
new file mode 100644
index 000000000..881ecdeef
--- /dev/null
+++ b/drivers/net/wan/fsl_ucc_hdlc.h
@@ -0,0 +1,147 @@
+/* Freescale QUICC Engine HDLC Device Driver
+ *
+ * Copyright 2014 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _UCC_HDLC_H_
+#define _UCC_HDLC_H_
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+
+#include <soc/fsl/qe/ucc.h>
+#include <soc/fsl/qe/ucc_fast.h>
+
+/* UCC HDLC event register */
+#define UCCE_HDLC_RX_EVENTS \
+(UCC_HDLC_UCCE_RXF | UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_BSY)
+#define UCCE_HDLC_TX_EVENTS (UCC_HDLC_UCCE_TXB | UCC_HDLC_UCCE_TXE)
+
+struct ucc_hdlc_param {
+ __be16 riptr;
+ __be16 tiptr;
+ __be16 res0;
+ __be16 mrblr;
+ __be32 rstate;
+ __be32 rbase;
+ __be16 rbdstat;
+ __be16 rbdlen;
+ __be32 rdptr;
+ __be32 tstate;
+ __be32 tbase;
+ __be16 tbdstat;
+ __be16 tbdlen;
+ __be32 tdptr;
+ __be32 rbptr;
+ __be32 tbptr;
+ __be32 rcrc;
+ __be32 res1;
+ __be32 tcrc;
+ __be32 res2;
+ __be32 res3;
+ __be32 c_mask;
+ __be32 c_pres;
+ __be16 disfc;
+ __be16 crcec;
+ __be16 abtsc;
+ __be16 nmarc;
+ __be32 max_cnt;
+ __be16 mflr;
+ __be16 rfthr;
+ __be16 rfcnt;
+ __be16 hmask;
+ __be16 haddr1;
+ __be16 haddr2;
+ __be16 haddr3;
+ __be16 haddr4;
+ __be16 ts_tmp;
+ __be16 tmp_mb;
+};
+
+struct ucc_hdlc_private {
+ struct ucc_tdm *utdm;
+ struct ucc_tdm_info *ut_info;
+ struct ucc_fast_private *uccf;
+ struct device *dev;
+ struct net_device *ndev;
+ struct napi_struct napi;
+ struct ucc_fast __iomem *uf_regs; /* UCC Fast registers */
+ struct ucc_hdlc_param __iomem *ucc_pram;
+ u16 tsa;
+ bool hdlc_busy;
+ bool loopback;
+
+ u8 *tx_buffer;
+ u8 *rx_buffer;
+ dma_addr_t dma_tx_addr;
+ dma_addr_t dma_rx_addr;
+
+ struct qe_bd *tx_bd_base;
+ struct qe_bd *rx_bd_base;
+ dma_addr_t dma_tx_bd;
+ dma_addr_t dma_rx_bd;
+ struct qe_bd *curtx_bd;
+ struct qe_bd *currx_bd;
+ struct qe_bd *dirty_tx;
+ u16 currx_bdnum;
+
+ struct sk_buff **tx_skbuff;
+ struct sk_buff **rx_skbuff;
+ u16 skb_curtx;
+ u16 skb_currx;
+ unsigned short skb_dirtytx;
+
+ unsigned short tx_ring_size;
+ unsigned short rx_ring_size;
+ u32 ucc_pram_offset;
+
+ unsigned short encoding;
+ unsigned short parity;
+ u32 clocking;
+ spinlock_t lock; /* lock for Tx BD and Tx buffer */
+#ifdef CONFIG_PM
+ struct ucc_hdlc_param *ucc_pram_bak;
+ u32 gumr;
+ u8 guemr;
+ u32 cmxsi1cr_l, cmxsi1cr_h;
+ u32 cmxsi1syr;
+ u32 cmxucr[4];
+#endif
+};
+
+#define TX_BD_RING_LEN 0x10
+#define RX_BD_RING_LEN 0x20
+#define RX_CLEAN_MAX 0x10
+#define NUM_OF_BUF 4
+#define MAX_RX_BUF_LENGTH (48 * 0x20)
+#define MAX_FRAME_LENGTH (MAX_RX_BUF_LENGTH + 8)
+#define ALIGNMENT_OF_UCC_HDLC_PRAM 64
+#define SI_BANK_SIZE 128
+#define MAX_HDLC_NUM 4
+#define HDLC_HEAD_LEN 2
+#define HDLC_CRC_SIZE 2
+#define TX_RING_MOD_MASK(size) (size - 1)
+#define RX_RING_MOD_MASK(size) (size - 1)
+
+#define HDLC_HEAD_MASK 0x0000
+#define DEFAULT_HDLC_HEAD 0xff44
+#define DEFAULT_ADDR_MASK 0x00ff
+#define DEFAULT_HDLC_ADDR 0x00ff
+
+#define BMR_GBL 0x20000000
+#define BMR_BIG_ENDIAN 0x10000000
+#define CRC_16BIT_MASK 0x0000F0B8
+#define CRC_16BIT_PRES 0x0000FFFF
+#define DEFAULT_RFTHR 1
+
+#define DEFAULT_PPP_HEAD 0xff03
+
+#endif
diff --git a/drivers/net/wan/slic_ds26522.c b/drivers/net/wan/slic_ds26522.c
new file mode 100644
index 000000000..d06a887a2
--- /dev/null
+++ b/drivers/net/wan/slic_ds26522.c
@@ -0,0 +1,255 @@
+/*
+ * drivers/net/wan/slic_ds26522.c
+ *
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ *
+ * Author:Zhao Qiang<qiang.zhao@nxp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/bitrev.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/kthread.h>
+#include <linux/spi/spi.h>
+#include <linux/wait.h>
+#include <linux/param.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include "slic_ds26522.h"
+
+#define DRV_NAME "ds26522"
+
+#define SLIC_TRANS_LEN 1
+#define SLIC_TWO_LEN 2
+#define SLIC_THREE_LEN 3
+
+static struct spi_device *g_spi;
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Zhao Qiang<B45475@freescale.com>");
+
+/* the read/write format of address is
+ * w/r|A13|A12|A11|A10|A9|A8|A7|A6|A5|A4|A3|A2|A1|A0|x
+ */
+static void slic_write(struct spi_device *spi, u16 addr,
+ u8 data)
+{
+ u8 temp[3];
+
+ addr = bitrev16(addr) >> 1;
+ data = bitrev8(data);
+ temp[0] = (u8)((addr >> 8) & 0x7f);
+ temp[1] = (u8)(addr & 0xfe);
+ temp[2] = data;
+
+ /* write spi addr and value */
+ spi_write(spi, &temp[0], SLIC_THREE_LEN);
+}
+
+static u8 slic_read(struct spi_device *spi, u16 addr)
+{
+ u8 temp[2];
+ u8 data;
+
+ addr = bitrev16(addr) >> 1;
+ temp[0] = (u8)(((addr >> 8) & 0x7f) | 0x80);
+ temp[1] = (u8)(addr & 0xfe);
+
+ spi_write_then_read(spi, &temp[0], SLIC_TWO_LEN, &data,
+ SLIC_TRANS_LEN);
+
+ data = bitrev8(data);
+ return data;
+}
+
+static bool get_slic_product_code(struct spi_device *spi)
+{
+ u8 device_id;
+
+ device_id = slic_read(spi, DS26522_IDR_ADDR);
+ if ((device_id & 0xf8) == 0x68)
+ return true;
+ else
+ return false;
+}
+
+static void ds26522_e1_spec_config(struct spi_device *spi)
+{
+ /* Receive E1 Mode, Framer Disabled */
+ slic_write(spi, DS26522_RMMR_ADDR, DS26522_RMMR_E1);
+
+ /* Transmit E1 Mode, Framer Disable */
+ slic_write(spi, DS26522_TMMR_ADDR, DS26522_TMMR_E1);
+
+ /* Receive E1 Mode Framer Enable */
+ slic_write(spi, DS26522_RMMR_ADDR,
+ slic_read(spi, DS26522_RMMR_ADDR) | DS26522_RMMR_FRM_EN);
+
+ /* Transmit E1 Mode Framer Enable */
+ slic_write(spi, DS26522_TMMR_ADDR,
+ slic_read(spi, DS26522_TMMR_ADDR) | DS26522_TMMR_FRM_EN);
+
+ /* RCR1, receive E1 B8zs & ESF */
+ slic_write(spi, DS26522_RCR1_ADDR,
+ DS26522_RCR1_E1_HDB3 | DS26522_RCR1_E1_CCS);
+
+ /* RSYSCLK=2.048MHz, RSYNC-Output */
+ slic_write(spi, DS26522_RIOCR_ADDR,
+ DS26522_RIOCR_2048KHZ | DS26522_RIOCR_RSIO_OUT);
+
+ /* TCR1 Transmit E1 b8zs */
+ slic_write(spi, DS26522_TCR1_ADDR, DS26522_TCR1_TB8ZS);
+
+ /* TSYSCLK=2.048MHz, TSYNC-Output */
+ slic_write(spi, DS26522_TIOCR_ADDR,
+ DS26522_TIOCR_2048KHZ | DS26522_TIOCR_TSIO_OUT);
+
+ /* Set E1TAF */
+ slic_write(spi, DS26522_E1TAF_ADDR, DS26522_E1TAF_DEFAULT);
+
+ /* Set E1TNAF register */
+ slic_write(spi, DS26522_E1TNAF_ADDR, DS26522_E1TNAF_DEFAULT);
+
+ /* Receive E1 Mode Framer Enable & init Done */
+ slic_write(spi, DS26522_RMMR_ADDR, slic_read(spi, DS26522_RMMR_ADDR) |
+ DS26522_RMMR_INIT_DONE);
+
+ /* Transmit E1 Mode Framer Enable & init Done */
+ slic_write(spi, DS26522_TMMR_ADDR, slic_read(spi, DS26522_TMMR_ADDR) |
+ DS26522_TMMR_INIT_DONE);
+
+ /* Configure LIU E1 mode */
+ slic_write(spi, DS26522_LTRCR_ADDR, DS26522_LTRCR_E1);
+
+ /* E1 Mode default 75 ohm w/Transmit Impedance Matlinking */
+ slic_write(spi, DS26522_LTITSR_ADDR,
+ DS26522_LTITSR_TLIS_75OHM | DS26522_LTITSR_LBOS_75OHM);
+
+ /* E1 Mode default 75 ohm Long Haul w/Receive Impedance Matlinking */
+ slic_write(spi, DS26522_LRISMR_ADDR,
+ DS26522_LRISMR_75OHM | DS26522_LRISMR_MAX);
+
+ /* Enable Transmit output */
+ slic_write(spi, DS26522_LMCR_ADDR, DS26522_LMCR_TE);
+}
+
+static int slic_ds26522_init_configure(struct spi_device *spi)
+{
+ u16 addr;
+
+ /* set clock */
+ slic_write(spi, DS26522_GTCCR_ADDR, DS26522_GTCCR_BPREFSEL_REFCLKIN |
+ DS26522_GTCCR_BFREQSEL_2048KHZ |
+ DS26522_GTCCR_FREQSEL_2048KHZ);
+ slic_write(spi, DS26522_GTCR2_ADDR, DS26522_GTCR2_TSSYNCOUT);
+ slic_write(spi, DS26522_GFCR_ADDR, DS26522_GFCR_BPCLK_2048KHZ);
+
+ /* set gtcr */
+ slic_write(spi, DS26522_GTCR1_ADDR, DS26522_GTCR1);
+
+ /* Global LIU Software Reset Register */
+ slic_write(spi, DS26522_GLSRR_ADDR, DS26522_GLSRR_RESET);
+
+ /* Global Framer and BERT Software Reset Register */
+ slic_write(spi, DS26522_GFSRR_ADDR, DS26522_GFSRR_RESET);
+
+ usleep_range(100, 120);
+
+ slic_write(spi, DS26522_GLSRR_ADDR, DS26522_GLSRR_NORMAL);
+ slic_write(spi, DS26522_GFSRR_ADDR, DS26522_GFSRR_NORMAL);
+
+ /* Perform RX/TX SRESET,Reset receiver */
+ slic_write(spi, DS26522_RMMR_ADDR, DS26522_RMMR_SFTRST);
+
+ /* Reset tranceiver */
+ slic_write(spi, DS26522_TMMR_ADDR, DS26522_TMMR_SFTRST);
+
+ usleep_range(100, 120);
+
+ /* Zero all Framer Registers */
+ for (addr = DS26522_RF_ADDR_START; addr <= DS26522_RF_ADDR_END;
+ addr++)
+ slic_write(spi, addr, 0);
+
+ for (addr = DS26522_TF_ADDR_START; addr <= DS26522_TF_ADDR_END;
+ addr++)
+ slic_write(spi, addr, 0);
+
+ for (addr = DS26522_LIU_ADDR_START; addr <= DS26522_LIU_ADDR_END;
+ addr++)
+ slic_write(spi, addr, 0);
+
+ for (addr = DS26522_BERT_ADDR_START; addr <= DS26522_BERT_ADDR_END;
+ addr++)
+ slic_write(spi, addr, 0);
+
+ /* setup ds26522 for E1 specification */
+ ds26522_e1_spec_config(spi);
+
+ slic_write(spi, DS26522_GTCR1_ADDR, 0x00);
+
+ return 0;
+}
+
+static int slic_ds26522_remove(struct spi_device *spi)
+{
+ pr_info("DS26522 module uninstalled\n");
+ return 0;
+}
+
+static int slic_ds26522_probe(struct spi_device *spi)
+{
+ int ret = 0;
+
+ g_spi = spi;
+ spi->bits_per_word = 8;
+
+ if (!get_slic_product_code(spi))
+ return ret;
+
+ ret = slic_ds26522_init_configure(spi);
+ if (ret == 0)
+ pr_info("DS26522 cs%d configurated\n", spi->chip_select);
+
+ return ret;
+}
+
+static const struct of_device_id slic_ds26522_match[] = {
+ {
+ .compatible = "maxim,ds26522",
+ },
+ {},
+};
+
+static struct spi_driver slic_ds26522_driver = {
+ .driver = {
+ .name = "ds26522",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ .of_match_table = slic_ds26522_match,
+ },
+ .probe = slic_ds26522_probe,
+ .remove = slic_ds26522_remove,
+};
+
+static int __init slic_ds26522_init(void)
+{
+ return spi_register_driver(&slic_ds26522_driver);
+}
+
+static void __exit slic_ds26522_exit(void)
+{
+ spi_unregister_driver(&slic_ds26522_driver);
+}
+
+module_init(slic_ds26522_init);
+module_exit(slic_ds26522_exit);
diff --git a/drivers/net/wan/slic_ds26522.h b/drivers/net/wan/slic_ds26522.h
new file mode 100644
index 000000000..22aa0ecbd
--- /dev/null
+++ b/drivers/net/wan/slic_ds26522.h
@@ -0,0 +1,134 @@
+/*
+ * drivers/tdm/line_ctrl/slic_ds26522.h
+ *
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ *
+ * Author: Zhao Qiang <B45475@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#define DS26522_RF_ADDR_START 0x00
+#define DS26522_RF_ADDR_END 0xef
+#define DS26522_GLB_ADDR_START 0xf0
+#define DS26522_GLB_ADDR_END 0xff
+#define DS26522_TF_ADDR_START 0x100
+#define DS26522_TF_ADDR_END 0x1ef
+#define DS26522_LIU_ADDR_START 0x1000
+#define DS26522_LIU_ADDR_END 0x101f
+#define DS26522_TEST_ADDR_START 0x1008
+#define DS26522_TEST_ADDR_END 0x101f
+#define DS26522_BERT_ADDR_START 0x1100
+#define DS26522_BERT_ADDR_END 0x110f
+
+#define DS26522_RMMR_ADDR 0x80
+#define DS26522_RCR1_ADDR 0x81
+#define DS26522_RCR3_ADDR 0x83
+#define DS26522_RIOCR_ADDR 0x84
+
+#define DS26522_GTCR1_ADDR 0xf0
+#define DS26522_GFCR_ADDR 0xf1
+#define DS26522_GTCR2_ADDR 0xf2
+#define DS26522_GTCCR_ADDR 0xf3
+#define DS26522_GLSRR_ADDR 0xf5
+#define DS26522_GFSRR_ADDR 0xf6
+#define DS26522_IDR_ADDR 0xf8
+
+#define DS26522_E1TAF_ADDR 0x164
+#define DS26522_E1TNAF_ADDR 0x165
+#define DS26522_TMMR_ADDR 0x180
+#define DS26522_TCR1_ADDR 0x181
+#define DS26522_TIOCR_ADDR 0x184
+
+#define DS26522_LTRCR_ADDR 0x1000
+#define DS26522_LTITSR_ADDR 0x1001
+#define DS26522_LMCR_ADDR 0x1002
+#define DS26522_LRISMR_ADDR 0x1007
+
+#define MAX_NUM_OF_CHANNELS 8
+#define PQ_MDS_8E1T1_BRD_REV 0x00
+#define PQ_MDS_8E1T1_PLD_REV 0x00
+
+#define DS26522_GTCCR_BPREFSEL_REFCLKIN 0xa0
+#define DS26522_GTCCR_BFREQSEL_1544KHZ 0x08
+#define DS26522_GTCCR_FREQSEL_1544KHZ 0x04
+#define DS26522_GTCCR_BFREQSEL_2048KHZ 0x00
+#define DS26522_GTCCR_FREQSEL_2048KHZ 0x00
+
+#define DS26522_GFCR_BPCLK_2048KHZ 0x00
+
+#define DS26522_GTCR2_TSSYNCOUT 0x02
+#define DS26522_GTCR1 0x00
+
+#define DS26522_GFSRR_RESET 0x01
+#define DS26522_GFSRR_NORMAL 0x00
+
+#define DS26522_GLSRR_RESET 0x01
+#define DS26522_GLSRR_NORMAL 0x00
+
+#define DS26522_RMMR_SFTRST 0x02
+#define DS26522_RMMR_FRM_EN 0x80
+#define DS26522_RMMR_INIT_DONE 0x40
+#define DS26522_RMMR_T1 0x00
+#define DS26522_RMMR_E1 0x01
+
+#define DS26522_E1TAF_DEFAULT 0x1b
+#define DS26522_E1TNAF_DEFAULT 0x40
+
+#define DS26522_TMMR_SFTRST 0x02
+#define DS26522_TMMR_FRM_EN 0x80
+#define DS26522_TMMR_INIT_DONE 0x40
+#define DS26522_TMMR_T1 0x00
+#define DS26522_TMMR_E1 0x01
+
+#define DS26522_RCR1_T1_SYNCT 0x80
+#define DS26522_RCR1_T1_RB8ZS 0x40
+#define DS26522_RCR1_T1_SYNCC 0x08
+
+#define DS26522_RCR1_E1_HDB3 0x40
+#define DS26522_RCR1_E1_CCS 0x20
+
+#define DS26522_RIOCR_1544KHZ 0x00
+#define DS26522_RIOCR_2048KHZ 0x10
+#define DS26522_RIOCR_RSIO_OUT 0x00
+
+#define DS26522_RCR3_FLB 0x01
+
+#define DS26522_TIOCR_1544KHZ 0x00
+#define DS26522_TIOCR_2048KHZ 0x10
+#define DS26522_TIOCR_TSIO_OUT 0x04
+
+#define DS26522_TCR1_TB8ZS 0x04
+
+#define DS26522_LTRCR_T1 0x02
+#define DS26522_LTRCR_E1 0x00
+
+#define DS26522_LTITSR_TLIS_75OHM 0x00
+#define DS26522_LTITSR_LBOS_75OHM 0x00
+#define DS26522_LTITSR_TLIS_100OHM 0x10
+#define DS26522_LTITSR_TLIS_0DB_CSU 0x00
+
+#define DS26522_LRISMR_75OHM 0x00
+#define DS26522_LRISMR_100OHM 0x10
+#define DS26522_LRISMR_MAX 0x03
+
+#define DS26522_LMCR_TE 0x01
+
+enum line_rate {
+ LINE_RATE_T1, /* T1 line rate (1.544 Mbps) */
+ LINE_RATE_E1 /* E1 line rate (2.048 Mbps) */
+};
+
+enum tdm_trans_mode {
+ NORMAL = 0,
+ FRAMER_LB
+};
+
+enum card_support_type {
+ LM_CARD = 0,
+ DS26522_CARD,
+ NO_CARD
+};
diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
index bd62bc19e..acec16b9c 100644
--- a/drivers/net/wireless/ath/ath10k/ahb.c
+++ b/drivers/net/wireless/ath/ath10k/ahb.c
@@ -25,10 +25,9 @@
#include "ahb.h"
static const struct of_device_id ath10k_ahb_of_match[] = {
- /* TODO: enable this entry once everything in place.
- * { .compatible = "qcom,ipq4019-wifi",
- * .data = (void *)ATH10K_HW_QCA4019 },
- */
+ { .compatible = "qcom,ipq4019-wifi",
+ .data = (void *)ATH10K_HW_QCA4019
+ },
{ }
};
@@ -476,6 +475,7 @@ static irqreturn_t ath10k_ahb_interrupt_handler(int irq, void *arg)
static int ath10k_ahb_request_irq_legacy(struct ath10k *ar)
{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
int ret;
@@ -487,6 +487,7 @@ static int ath10k_ahb_request_irq_legacy(struct ath10k *ar)
ar_ahb->irq, ret);
return ret;
}
+ ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY;
return 0;
}
@@ -918,8 +919,6 @@ int ath10k_ahb_init(void)
{
int ret;
- printk(KERN_ERR "AHB support is still work in progress\n");
-
ret = platform_driver_register(&ath10k_ahb_driver);
if (ret)
printk(KERN_ERR "failed to register ath10k ahb driver: %d\n",
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 697f8d054..d037ecef2 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/of.h>
+#include <asm/byteorder.h>
#include "core.h"
#include "mac.h"
@@ -55,7 +56,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca988x hw2.0",
.patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
- .has_shifted_cc_wraparound = true,
+ .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL,
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
@@ -69,6 +70,25 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
},
},
{
+ .id = QCA9887_HW_1_0_VERSION,
+ .dev_id = QCA9887_1_0_DEVICE_ID,
+ .name = "qca9887 hw1.0",
+ .patch_load_addr = QCA9887_HW_1_0_PATCH_LOAD_ADDR,
+ .uart_pin = 7,
+ .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL,
+ .otp_exe_param = 0,
+ .channel_counters_freq_hz = 88000,
+ .max_probe_resp_desc_thres = 0,
+ .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
+ .cal_data_len = 2116,
+ .fw = {
+ .dir = QCA9887_HW_1_0_FW_DIR,
+ .board = QCA9887_HW_1_0_BOARD_DATA_FILE,
+ .board_size = QCA9887_BOARD_DATA_SZ,
+ .board_ext_size = QCA9887_BOARD_EXT_DATA_SZ,
+ },
+ },
+ {
.id = QCA6174_HW_2_1_VERSION,
.dev_id = QCA6164_2_1_DEVICE_ID,
.name = "qca6164 hw2.1",
@@ -148,6 +168,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.uart_pin = 7,
.otp_exe_param = 0x00000700,
.continuous_frag_desc = true,
+ .cck_rate_map_rev2 = true,
.channel_counters_freq_hz = 150000,
.max_probe_resp_desc_thres = 24,
.hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
@@ -163,6 +184,51 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
},
},
{
+ .id = QCA9984_HW_1_0_DEV_VERSION,
+ .dev_id = QCA9984_1_0_DEVICE_ID,
+ .name = "qca9984/qca9994 hw1.0",
+ .patch_load_addr = QCA9984_HW_1_0_PATCH_LOAD_ADDR,
+ .uart_pin = 7,
+ .otp_exe_param = 0x00000700,
+ .continuous_frag_desc = true,
+ .cck_rate_map_rev2 = true,
+ .channel_counters_freq_hz = 150000,
+ .max_probe_resp_desc_thres = 24,
+ .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
+ .tx_chain_mask = 0xf,
+ .rx_chain_mask = 0xf,
+ .max_spatial_stream = 4,
+ .cal_data_len = 12064,
+ .fw = {
+ .dir = QCA9984_HW_1_0_FW_DIR,
+ .board = QCA9984_HW_1_0_BOARD_DATA_FILE,
+ .board_size = QCA99X0_BOARD_DATA_SZ,
+ .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
+ },
+ },
+ {
+ .id = QCA9888_HW_2_0_DEV_VERSION,
+ .dev_id = QCA9888_2_0_DEVICE_ID,
+ .name = "qca9888 hw2.0",
+ .patch_load_addr = QCA9888_HW_2_0_PATCH_LOAD_ADDR,
+ .uart_pin = 7,
+ .otp_exe_param = 0x00000700,
+ .continuous_frag_desc = true,
+ .channel_counters_freq_hz = 150000,
+ .max_probe_resp_desc_thres = 24,
+ .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
+ .tx_chain_mask = 3,
+ .rx_chain_mask = 3,
+ .max_spatial_stream = 2,
+ .cal_data_len = 12064,
+ .fw = {
+ .dir = QCA9888_HW_2_0_FW_DIR,
+ .board = QCA9888_HW_2_0_BOARD_DATA_FILE,
+ .board_size = QCA99X0_BOARD_DATA_SZ,
+ .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
+ },
+ },
+ {
.id = QCA9377_HW_1_0_DEV_VERSION,
.dev_id = QCA9377_1_0_DEVICE_ID,
.name = "qca9377 hw1.0",
@@ -202,9 +268,10 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca4019 hw1.0",
.patch_load_addr = QCA4019_HW_1_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
- .has_shifted_cc_wraparound = true,
+ .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_EACH,
.otp_exe_param = 0x0010000,
.continuous_frag_desc = true,
+ .cck_rate_map_rev2 = true,
.channel_counters_freq_hz = 125000,
.max_probe_resp_desc_thres = 24,
.hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
@@ -236,6 +303,7 @@ static const char *const ath10k_core_fw_feature_str[] = {
[ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA] = "adaptive-cca",
[ATH10K_FW_FEATURE_MFP_SUPPORT] = "mfp",
[ATH10K_FW_FEATURE_PEER_FLOW_CONTROL] = "peer-flow-ctrl",
+ [ATH10K_FW_FEATURE_BTCOEX_PARAM] = "btcoex-param",
};
static unsigned int ath10k_core_get_fw_feature_str(char *buf,
@@ -531,6 +599,35 @@ out:
return ret;
}
+static int ath10k_download_cal_eeprom(struct ath10k *ar)
+{
+ size_t data_len;
+ void *data = NULL;
+ int ret;
+
+ ret = ath10k_hif_fetch_cal_eeprom(ar, &data, &data_len);
+ if (ret) {
+ if (ret != -EOPNOTSUPP)
+ ath10k_warn(ar, "failed to read calibration data from EEPROM: %d\n",
+ ret);
+ goto out_free;
+ }
+
+ ret = ath10k_download_board_data(ar, data, data_len);
+ if (ret) {
+ ath10k_warn(ar, "failed to download calibration data from EEPROM: %d\n",
+ ret);
+ goto out_free;
+ }
+
+ ret = 0;
+
+out_free:
+ kfree(data);
+
+ return ret;
+}
+
static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
{
u32 result, address;
@@ -1293,7 +1390,17 @@ static int ath10k_download_cal_data(struct ath10k *ar)
}
ath10k_dbg(ar, ATH10K_DBG_BOOT,
- "boot did not find DT entry, try OTP next: %d\n",
+ "boot did not find DT entry, try target EEPROM next: %d\n",
+ ret);
+
+ ret = ath10k_download_cal_eeprom(ar);
+ if (ret == 0) {
+ ar->cal_mode = ATH10K_CAL_MODE_EEPROM;
+ goto done;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot did not find target EEPROM entry, try OTP next: %d\n",
ret);
ret = ath10k_download_and_run_otp(ar);
@@ -1590,7 +1697,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
case ATH10K_FW_WMI_OP_VERSION_10_4:
case ATH10K_FW_WMI_OP_VERSION_UNSET:
case ATH10K_FW_WMI_OP_VERSION_MAX:
- WARN_ON(1);
+ ath10k_err(ar, "htt op version not found from fw meta data");
return -EINVAL;
}
}
@@ -1733,6 +1840,16 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
if (test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map))
val |= WMI_10_4_BSS_CHANNEL_INFO_64;
+ /* 10.4 firmware supports BT-Coex without reloading firmware
+ * via pdev param. To support Bluetooth coexistence pdev param,
+ * WMI_COEX_GPIO_SUPPORT of extended resource config should be
+ * enabled always.
+ */
+ if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) &&
+ test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM,
+ ar->running_fw->fw_file.fw_features))
+ val |= WMI_10_4_COEX_GPIO_SUPPORT;
+
status = ath10k_mac_ext_resource_config(ar, val);
if (status) {
ath10k_err(ar,
@@ -2062,6 +2179,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
switch (hw_rev) {
case ATH10K_HW_QCA988X:
+ case ATH10K_HW_QCA9887:
ar->regs = &qca988x_regs;
ar->hw_values = &qca988x_values;
break;
@@ -2071,9 +2189,14 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
ar->hw_values = &qca6174_values;
break;
case ATH10K_HW_QCA99X0:
+ case ATH10K_HW_QCA9984:
ar->regs = &qca99x0_regs;
ar->hw_values = &qca99x0_values;
break;
+ case ATH10K_HW_QCA9888:
+ ar->regs = &qca99x0_regs;
+ ar->hw_values = &qca9888_values;
+ break;
case ATH10K_HW_QCA4019:
ar->regs = &qca4019_regs;
ar->hw_values = &qca4019_values;
@@ -2159,5 +2282,5 @@ void ath10k_core_destroy(struct ath10k *ar)
EXPORT_SYMBOL(ath10k_core_destroy);
MODULE_AUTHOR("Qualcomm Atheros");
-MODULE_DESCRIPTION("Core module for QCA988X PCIe devices.");
+MODULE_DESCRIPTION("Core module for Qualcomm Atheros 802.11ac wireless LAN cards.");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 1852e0ee3..30ae5bf81 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -165,6 +165,13 @@ struct ath10k_fw_stats_peer {
u32 rx_duration;
};
+struct ath10k_fw_extd_stats_peer {
+ struct list_head list;
+
+ u8 peer_macaddr[ETH_ALEN];
+ u32 rx_duration;
+};
+
struct ath10k_fw_stats_vdev {
struct list_head list;
@@ -256,9 +263,11 @@ struct ath10k_fw_stats_pdev {
};
struct ath10k_fw_stats {
+ bool extended;
struct list_head pdevs;
struct list_head vdevs;
struct list_head peers;
+ struct list_head peers_extd;
};
#define ATH10K_TPC_TABLE_TYPE_FLAG 1
@@ -535,6 +544,13 @@ enum ath10k_fw_features {
*/
ATH10K_FW_FEATURE_PEER_FLOW_CONTROL = 13,
+ /* Firmware supports BT-Coex without reloading firmware via pdev param.
+ * To support Bluetooth coexistence pdev param, WMI_COEX_GPIO_SUPPORT of
+ * extended resource config should be enabled always. This firmware IE
+ * is used to configure WMI_COEX_GPIO_SUPPORT.
+ */
+ ATH10K_FW_FEATURE_BTCOEX_PARAM = 14,
+
/* keep last */
ATH10K_FW_FEATURE_COUNT,
};
@@ -571,6 +587,7 @@ enum ath10k_cal_mode {
ATH10K_CAL_MODE_DT,
ATH10K_PRE_CAL_MODE_FILE,
ATH10K_PRE_CAL_MODE_DT,
+ ATH10K_CAL_MODE_EEPROM,
};
enum ath10k_crypt_mode {
@@ -593,6 +610,8 @@ static inline const char *ath10k_cal_mode_str(enum ath10k_cal_mode mode)
return "pre-cal-file";
case ATH10K_PRE_CAL_MODE_DT:
return "pre-cal-dt";
+ case ATH10K_CAL_MODE_EEPROM:
+ return "eeprom";
}
return "unknown";
@@ -657,6 +676,7 @@ struct ath10k_fw_components {
struct ath10k {
struct ath_common ath_common;
struct ieee80211_hw *hw;
+ struct ieee80211_ops *ops;
struct device *dev;
u8 mac_addr[ETH_ALEN];
@@ -703,12 +723,10 @@ struct ath10k {
int uart_pin;
u32 otp_exe_param;
- /* This is true if given HW chip has a quirky Cycle Counter
- * wraparound which resets to 0x7fffffff instead of 0. All
- * other CC related counters (e.g. Rx Clear Count) are divided
- * by 2 so they never wraparound themselves.
+ /* Type of hw cycle counter wraparound logic, for more info
+ * refer enum ath10k_hw_cc_wraparound_type.
*/
- bool has_shifted_cc_wraparound;
+ enum ath10k_hw_cc_wraparound_type cc_wraparound_type;
/* Some of chip expects fragment descriptor to be continuous
* memory for any TX operation. Set continuous_frag_desc flag
@@ -716,6 +734,12 @@ struct ath10k {
*/
bool continuous_frag_desc;
+ /* CCK hardware rate table mapping for the newer chipsets
+ * like QCA99X0, QCA4019 got revised. The CCK h/w rate values
+ * are in a proper order with respect to the rate/preamble
+ */
+ bool cck_rate_map_rev2;
+
u32 channel_counters_freq_hz;
/* Mgmt tx descriptors threshold for limiting probe response
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index e2511550f..8f0fd41df 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -139,11 +139,11 @@ void ath10k_debug_print_hwfw_info(struct ath10k *ar)
ar->id.subsystem_vendor, ar->id.subsystem_device);
ath10k_info(ar, "kconfig debug %d debugfs %d tracing %d dfs %d testmode %d\n",
- config_enabled(CONFIG_ATH10K_DEBUG),
- config_enabled(CONFIG_ATH10K_DEBUGFS),
- config_enabled(CONFIG_ATH10K_TRACING),
- config_enabled(CONFIG_ATH10K_DFS_CERTIFIED),
- config_enabled(CONFIG_NL80211_TESTMODE));
+ IS_ENABLED(CONFIG_ATH10K_DEBUG),
+ IS_ENABLED(CONFIG_ATH10K_DEBUGFS),
+ IS_ENABLED(CONFIG_ATH10K_TRACING),
+ IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED),
+ IS_ENABLED(CONFIG_NL80211_TESTMODE));
firmware = ar->normal_mode_fw.fw_file.firmware;
if (firmware)
@@ -313,13 +313,25 @@ static void ath10k_fw_stats_peers_free(struct list_head *head)
}
}
+static void ath10k_fw_extd_stats_peers_free(struct list_head *head)
+{
+ struct ath10k_fw_extd_stats_peer *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
static void ath10k_debug_fw_stats_reset(struct ath10k *ar)
{
spin_lock_bh(&ar->data_lock);
ar->debug.fw_stats_done = false;
+ ar->debug.fw_stats.extended = false;
ath10k_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs);
ath10k_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs);
ath10k_fw_stats_peers_free(&ar->debug.fw_stats.peers);
+ ath10k_fw_extd_stats_peers_free(&ar->debug.fw_stats.peers_extd);
spin_unlock_bh(&ar->data_lock);
}
@@ -334,6 +346,7 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
INIT_LIST_HEAD(&stats.pdevs);
INIT_LIST_HEAD(&stats.vdevs);
INIT_LIST_HEAD(&stats.peers);
+ INIT_LIST_HEAD(&stats.peers_extd);
spin_lock_bh(&ar->data_lock);
ret = ath10k_wmi_pull_fw_stats(ar, skb, &stats);
@@ -354,7 +367,7 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
* delivered which is treated as end-of-data and is itself discarded
*/
if (ath10k_peer_stats_enabled(ar))
- ath10k_sta_update_rx_duration(ar, &stats.peers);
+ ath10k_sta_update_rx_duration(ar, &stats);
if (ar->debug.fw_stats_done) {
if (!ath10k_peer_stats_enabled(ar))
@@ -396,6 +409,8 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
list_splice_tail_init(&stats.peers, &ar->debug.fw_stats.peers);
list_splice_tail_init(&stats.vdevs, &ar->debug.fw_stats.vdevs);
+ list_splice_tail_init(&stats.peers_extd,
+ &ar->debug.fw_stats.peers_extd);
}
complete(&ar->debug.fw_stats_complete);
@@ -407,6 +422,7 @@ free:
ath10k_fw_stats_pdevs_free(&stats.pdevs);
ath10k_fw_stats_vdevs_free(&stats.vdevs);
ath10k_fw_stats_peers_free(&stats.peers);
+ ath10k_fw_extd_stats_peers_free(&stats.peers_extd);
spin_unlock_bh(&ar->data_lock);
}
@@ -609,25 +625,23 @@ static ssize_t ath10k_write_simulate_fw_crash(struct file *file,
char buf[32];
int ret;
- mutex_lock(&ar->conf_mutex);
-
simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
/* make sure that buf is null terminated */
buf[sizeof(buf) - 1] = 0;
+ /* drop the possible '\n' from the end */
+ if (buf[count - 1] == '\n')
+ buf[count - 1] = 0;
+
+ mutex_lock(&ar->conf_mutex);
+
if (ar->state != ATH10K_STATE_ON &&
ar->state != ATH10K_STATE_RESTARTED) {
ret = -ENETDOWN;
goto exit;
}
- /* drop the possible '\n' from the end */
- if (buf[count - 1] == '\n') {
- buf[count - 1] = 0;
- count--;
- }
-
if (!strcmp(buf, "soft")) {
ath10k_info(ar, "simulating soft firmware crash\n");
ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0);
@@ -2127,6 +2141,7 @@ static ssize_t ath10k_write_btcoex(struct file *file,
size_t buf_size;
int ret;
bool val;
+ u32 pdev_param;
buf_size = min(count, (sizeof(buf) - 1));
if (copy_from_user(buf, ubuf, buf_size))
@@ -2150,14 +2165,25 @@ static ssize_t ath10k_write_btcoex(struct file *file,
goto exit;
}
+ pdev_param = ar->wmi.pdev_param->enable_btcoex;
+ if (test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM,
+ ar->running_fw->fw_file.fw_features)) {
+ ret = ath10k_wmi_pdev_set_param(ar, pdev_param, val);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable btcoex: %d\n", ret);
+ ret = count;
+ goto exit;
+ }
+ } else {
+ ath10k_info(ar, "restarting firmware due to btcoex change");
+ queue_work(ar->workqueue, &ar->restart_work);
+ }
+
if (val)
set_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags);
else
clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags);
- ath10k_info(ar, "restarting firmware due to btcoex change");
-
- queue_work(ar->workqueue, &ar->restart_work);
ret = count;
exit:
@@ -2320,6 +2346,7 @@ int ath10k_debug_create(struct ath10k *ar)
INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs);
INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs);
INIT_LIST_HEAD(&ar->debug.fw_stats.peers);
+ INIT_LIST_HEAD(&ar->debug.fw_stats.peers_extd);
return 0;
}
@@ -2397,7 +2424,7 @@ int ath10k_debug_register(struct ath10k *ar)
debugfs_create_file("nf_cal_period", S_IRUSR | S_IWUSR,
ar->debug.debugfs_phy, ar, &fops_nf_cal_period);
- if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) {
+ if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) {
debugfs_create_file("dfs_simulate_radar", S_IWUSR,
ar->debug.debugfs_phy, ar,
&fops_simulate_radar);
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index 75c89e362..c458fa96a 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -154,10 +154,15 @@ ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
#ifdef CONFIG_MAC80211_DEBUGFS
void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir);
-void ath10k_sta_update_rx_duration(struct ath10k *ar, struct list_head *peer);
+void ath10k_sta_update_rx_duration(struct ath10k *ar,
+ struct ath10k_fw_stats *stats);
+void ath10k_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct station_info *sinfo);
#else
-static inline void ath10k_sta_update_rx_duration(struct ath10k *ar,
- struct list_head *peer)
+static inline
+void ath10k_sta_update_rx_duration(struct ath10k *ar,
+ struct ath10k_fw_stats *stats)
{
}
#endif /* CONFIG_MAC80211_DEBUGFS */
diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
index 67ef75b60..9955fea08 100644
--- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
@@ -18,13 +18,34 @@
#include "wmi-ops.h"
#include "debug.h"
-void ath10k_sta_update_rx_duration(struct ath10k *ar, struct list_head *head)
-{ struct ieee80211_sta *sta;
+static void ath10k_sta_update_extd_stats_rx_duration(struct ath10k *ar,
+ struct ath10k_fw_stats *stats)
+{
+ struct ath10k_fw_extd_stats_peer *peer;
+ struct ieee80211_sta *sta;
+ struct ath10k_sta *arsta;
+
+ rcu_read_lock();
+ list_for_each_entry(peer, &stats->peers_extd, list) {
+ sta = ieee80211_find_sta_by_ifaddr(ar->hw, peer->peer_macaddr,
+ NULL);
+ if (!sta)
+ continue;
+ arsta = (struct ath10k_sta *)sta->drv_priv;
+ arsta->rx_duration += (u64)peer->rx_duration;
+ }
+ rcu_read_unlock();
+}
+
+static void ath10k_sta_update_stats_rx_duration(struct ath10k *ar,
+ struct ath10k_fw_stats *stats)
+{
struct ath10k_fw_stats_peer *peer;
+ struct ieee80211_sta *sta;
struct ath10k_sta *arsta;
rcu_read_lock();
- list_for_each_entry(peer, head, list) {
+ list_for_each_entry(peer, &stats->peers, list) {
sta = ieee80211_find_sta_by_ifaddr(ar->hw, peer->peer_macaddr,
NULL);
if (!sta)
@@ -35,6 +56,29 @@ void ath10k_sta_update_rx_duration(struct ath10k *ar, struct list_head *head)
rcu_read_unlock();
}
+void ath10k_sta_update_rx_duration(struct ath10k *ar,
+ struct ath10k_fw_stats *stats)
+{
+ if (stats->extended)
+ ath10k_sta_update_extd_stats_rx_duration(ar, stats);
+ else
+ ath10k_sta_update_stats_rx_duration(ar, stats);
+}
+
+void ath10k_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct station_info *sinfo)
+{
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arsta->arvif->ar;
+
+ if (!ath10k_peer_stats_enabled(ar))
+ return;
+
+ sinfo->rx_duration = arsta->rx_duration;
+ sinfo->filled |= 1ULL << NL80211_STA_INFO_RX_DURATION;
+}
+
static ssize_t ath10k_dbg_sta_read_aggr_mode(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
@@ -249,28 +293,6 @@ static const struct file_operations fops_delba = {
.llseek = default_llseek,
};
-static ssize_t ath10k_dbg_sta_read_rx_duration(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ieee80211_sta *sta = file->private_data;
- struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
- char buf[100];
- int len = 0;
-
- len = scnprintf(buf, sizeof(buf),
- "%llu usecs\n", arsta->rx_duration);
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static const struct file_operations fops_rx_duration = {
- .read = ath10k_dbg_sta_read_rx_duration,
- .open = simple_open,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
-
void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir)
{
@@ -279,6 +301,4 @@ void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
debugfs_create_file("addba", S_IWUSR, dir, sta, &fops_addba);
debugfs_create_file("addba_resp", S_IWUSR, dir, sta, &fops_addba_resp);
debugfs_create_file("delba", S_IWUSR, dir, sta, &fops_delba);
- debugfs_create_file("rx_duration", S_IRUGO, dir, sta,
- &fops_rx_duration);
}
diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h
index 89e7076c9..b2566b06e 100644
--- a/drivers/net/wireless/ath/ath10k/hif.h
+++ b/drivers/net/wireless/ath/ath10k/hif.h
@@ -87,6 +87,10 @@ struct ath10k_hif_ops {
int (*suspend)(struct ath10k *ar);
int (*resume)(struct ath10k *ar);
+
+ /* fetch calibration data from target eeprom */
+ int (*fetch_cal_eeprom)(struct ath10k *ar, void **data,
+ size_t *data_len);
};
static inline int ath10k_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
@@ -202,4 +206,14 @@ static inline void ath10k_hif_write32(struct ath10k *ar,
ar->hif.ops->write32(ar, address, data);
}
+static inline int ath10k_hif_fetch_cal_eeprom(struct ath10k *ar,
+ void **data,
+ size_t *data_len)
+{
+ if (!ar->hif.ops->fetch_cal_eeprom)
+ return -EOPNOTSUPP;
+
+ return ar->hif.ops->fetch_cal_eeprom(ar, data, data_len);
+}
+
#endif /* _HIF_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
index cc827185d..0c55cd92a 100644
--- a/drivers/net/wireless/ath/ath10k/htc.h
+++ b/drivers/net/wireless/ath/ath10k/htc.h
@@ -22,7 +22,6 @@
#include <linux/list.h>
#include <linux/bug.h>
#include <linux/skbuff.h>
-#include <linux/semaphore.h>
#include <linux/timer.h>
struct ath10k;
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 911c535d0..430a83e14 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -485,10 +485,10 @@ struct htt_mgmt_tx_completion {
__le32 status;
} __packed;
-#define HTT_RX_INDICATION_INFO0_EXT_TID_MASK (0x3F)
+#define HTT_RX_INDICATION_INFO0_EXT_TID_MASK (0x1F)
#define HTT_RX_INDICATION_INFO0_EXT_TID_LSB (0)
-#define HTT_RX_INDICATION_INFO0_FLUSH_VALID (1 << 6)
-#define HTT_RX_INDICATION_INFO0_RELEASE_VALID (1 << 7)
+#define HTT_RX_INDICATION_INFO0_FLUSH_VALID (1 << 5)
+#define HTT_RX_INDICATION_INFO0_RELEASE_VALID (1 << 6)
#define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_MASK 0x0000003F
#define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_LSB 0
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 813cdd262..24c8d65bc 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -748,7 +748,7 @@ ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
if (WARN_ON_ONCE(!arvif))
return NULL;
- if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
+ if (ath10k_mac_vif_chan(arvif->vif, &def))
return NULL;
return def.chan;
@@ -939,7 +939,8 @@ static void ath10k_process_rx(struct ath10k *ar,
is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
"mcast" : "ucast",
(__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
- status->flag == 0 ? "legacy" : "",
+ (status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) == 0 ?
+ "legacy" : "",
status->flag & RX_FLAG_HT ? "ht" : "",
status->flag & RX_FLAG_VHT ? "vht" : "",
status->flag & RX_FLAG_40MHZ ? "40" : "",
@@ -1524,7 +1525,7 @@ static void ath10k_htt_rx_h_filter(struct ath10k *ar,
static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
- static struct ieee80211_rx_status rx_status;
+ struct ieee80211_rx_status *rx_status = &htt->rx_status;
struct sk_buff_head amsdu;
int ret;
@@ -1548,11 +1549,11 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
return ret;
}
- ath10k_htt_rx_h_ppdu(ar, &amsdu, &rx_status, 0xffff);
+ ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
- ath10k_htt_rx_h_filter(ar, &amsdu, &rx_status);
- ath10k_htt_rx_h_mpdu(ar, &amsdu, &rx_status);
- ath10k_htt_rx_h_deliver(ar, &amsdu, &rx_status);
+ ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
+ ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
+ ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
return 0;
}
@@ -2181,34 +2182,6 @@ static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
ath10k_mac_tx_push_pending(ar);
}
-static inline enum nl80211_band phy_mode_to_band(u32 phy_mode)
-{
- enum nl80211_band band;
-
- switch (phy_mode) {
- case MODE_11A:
- case MODE_11NA_HT20:
- case MODE_11NA_HT40:
- case MODE_11AC_VHT20:
- case MODE_11AC_VHT40:
- case MODE_11AC_VHT80:
- band = NL80211_BAND_5GHZ;
- break;
- case MODE_11G:
- case MODE_11B:
- case MODE_11GONLY:
- case MODE_11NG_HT20:
- case MODE_11NG_HT40:
- case MODE_11AC_VHT20_2G:
- case MODE_11AC_VHT40_2G:
- case MODE_11AC_VHT80_2G:
- default:
- band = NL80211_BAND_2GHZ;
- }
-
- return band;
-}
-
void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
{
bool release;
@@ -2290,7 +2263,6 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
ath10k_htt_tx_mgmt_dec_pending(htt);
spin_unlock_bh(&htt->tx_lock);
}
- ath10k_mac_tx_push_pending(ar);
break;
}
case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
@@ -2335,12 +2307,10 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
ath10k_htt_rx_delba(ar, resp);
break;
case HTT_T2H_MSG_TYPE_PKTLOG: {
- struct ath10k_pktlog_hdr *hdr =
- (struct ath10k_pktlog_hdr *)resp->pktlog_msg.payload;
-
trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
- sizeof(*hdr) +
- __le16_to_cpu(hdr->size));
+ skb->len -
+ offsetof(struct htt_resp,
+ pktlog_msg.payload));
break;
}
case HTT_T2H_MSG_TYPE_RX_FLUSH: {
@@ -2441,8 +2411,6 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr)
dev_kfree_skb_any(skb);
}
- ath10k_mac_tx_push_pending(ar);
-
num_mpdus = atomic_read(&htt->num_mpdus_ready);
while (num_mpdus) {
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index 6269c610b..7c072b605 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -49,7 +49,7 @@ static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
struct ieee80211_txq *txq)
{
struct ath10k *ar = hw->priv;
- struct ath10k_sta *arsta = (void *)txq->sta->drv_priv;
+ struct ath10k_sta *arsta;
struct ath10k_vif *arvif = (void *)txq->vif->drv_priv;
unsigned long frame_cnt;
unsigned long byte_cnt;
@@ -67,10 +67,12 @@ static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL)
return;
- if (txq->sta)
+ if (txq->sta) {
+ arsta = (void *)txq->sta->drv_priv;
peer_id = arsta->peer_id;
- else
+ } else {
peer_id = arvif->peer_id;
+ }
tid = txq->tid;
bit = BIT(peer_id % 32);
@@ -388,6 +390,8 @@ void ath10k_htt_tx_free(struct ath10k_htt *htt)
{
int size;
+ tasklet_kill(&htt->txrx_compl_task);
+
idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
idr_destroy(&htt->pending_tx);
@@ -733,16 +737,18 @@ static u8 ath10k_htt_tx_get_vdev_id(struct ath10k *ar, struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
- struct ath10k_vif *arvif = (void *)cb->vif->drv_priv;
+ struct ath10k_vif *arvif;
- if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
+ if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
return ar->scan.vdev_id;
- else if (cb->vif)
+ } else if (cb->vif) {
+ arvif = (void *)cb->vif->drv_priv;
return arvif->vdev_id;
- else if (ar->monitor_started)
+ } else if (ar->monitor_started) {
return ar->monitor_vdev_id;
- else
+ } else {
return 0;
+ }
}
static u8 ath10k_htt_tx_get_tid(struct sk_buff *skb, bool is_eth)
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index f544d4851..f903d468d 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -19,7 +19,6 @@
#include "hw.h"
const struct ath10k_hw_regs qca988x_regs = {
- .rtc_state_cold_reset_mask = 0x00000400,
.rtc_soc_base_address = 0x00004000,
.rtc_wmac_base_address = 0x00005000,
.soc_core_base_address = 0x00009000,
@@ -46,7 +45,6 @@ const struct ath10k_hw_regs qca988x_regs = {
};
const struct ath10k_hw_regs qca6174_regs = {
- .rtc_state_cold_reset_mask = 0x00002000,
.rtc_soc_base_address = 0x00000800,
.rtc_wmac_base_address = 0x00001000,
.soc_core_base_address = 0x0003a000,
@@ -73,7 +71,6 @@ const struct ath10k_hw_regs qca6174_regs = {
};
const struct ath10k_hw_regs qca99x0_regs = {
- .rtc_state_cold_reset_mask = 0x00000400,
.rtc_soc_base_address = 0x00080000,
.rtc_wmac_base_address = 0x00000000,
.soc_core_base_address = 0x00082000,
@@ -168,6 +165,15 @@ const struct ath10k_hw_values qca99x0_values = {
.ce_desc_meta_data_lsb = 4,
};
+const struct ath10k_hw_values qca9888_values = {
+ .rtc_state_val_on = 3,
+ .ce_count = 12,
+ .msi_assign_ce_max = 12,
+ .num_target_ce_config_wlan = 10,
+ .ce_desc_meta_data_mask = 0xFFF0,
+ .ce_desc_meta_data_lsb = 4,
+};
+
const struct ath10k_hw_values qca4019_values = {
.ce_count = 12,
.num_target_ce_config_wlan = 10,
@@ -179,17 +185,36 @@ void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev)
{
u32 cc_fix = 0;
+ u32 rcc_fix = 0;
+ enum ath10k_hw_cc_wraparound_type wraparound_type;
survey->filled |= SURVEY_INFO_TIME |
SURVEY_INFO_TIME_BUSY;
- if (ar->hw_params.has_shifted_cc_wraparound && cc < cc_prev) {
- cc_fix = 0x7fffffff;
- survey->filled &= ~SURVEY_INFO_TIME_BUSY;
+ wraparound_type = ar->hw_params.cc_wraparound_type;
+
+ if (cc < cc_prev || rcc < rcc_prev) {
+ switch (wraparound_type) {
+ case ATH10K_HW_CC_WRAP_SHIFTED_ALL:
+ if (cc < cc_prev) {
+ cc_fix = 0x7fffffff;
+ survey->filled &= ~SURVEY_INFO_TIME_BUSY;
+ }
+ break;
+ case ATH10K_HW_CC_WRAP_SHIFTED_EACH:
+ if (cc < cc_prev)
+ cc_fix = 0x7fffffff;
+
+ if (rcc < rcc_prev)
+ rcc_fix = 0x7fffffff;
+ break;
+ case ATH10K_HW_CC_WRAP_DISABLED:
+ break;
+ }
}
cc -= cc_prev - cc_fix;
- rcc -= rcc_prev;
+ rcc -= rcc_prev - rcc_fix;
survey->time = CCNT_TO_MSEC(ar, cc);
survey->time_busy = CCNT_TO_MSEC(ar, rcc);
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 1f9774f7a..bf0015476 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -26,7 +26,10 @@
#define QCA6164_2_1_DEVICE_ID (0x0041)
#define QCA6174_2_1_DEVICE_ID (0x003e)
#define QCA99X0_2_0_DEVICE_ID (0x0040)
+#define QCA9888_2_0_DEVICE_ID (0x0056)
+#define QCA9984_1_0_DEVICE_ID (0x0046)
#define QCA9377_1_0_DEVICE_ID (0x0042)
+#define QCA9887_1_0_DEVICE_ID (0x0050)
/* QCA988X 1.0 definitions (unsupported) */
#define QCA988X_HW_1_0_CHIP_ID_REV 0x0
@@ -38,6 +41,13 @@
#define QCA988X_HW_2_0_BOARD_DATA_FILE "/*(DEBLOBBED)*/"
#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234
+/* QCA9887 1.0 definitions */
+#define QCA9887_HW_1_0_VERSION 0x4100016d
+#define QCA9887_HW_1_0_CHIP_ID_REV 0
+#define QCA9887_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9887/hw1.0"
+#define QCA9887_HW_1_0_BOARD_DATA_FILE "/*(DEBLOBBED)*/"
+#define QCA9887_HW_1_0_PATCH_LOAD_ADDR 0x1234
+
/* QCA6174 target BMI version signatures */
#define QCA6174_HW_1_0_VERSION 0x05000000
#define QCA6174_HW_1_1_VERSION 0x05000001
@@ -91,6 +101,22 @@ enum qca9377_chip_id_rev {
#define QCA99X0_HW_2_0_BOARD_DATA_FILE "/*(DEBLOBBED)*/"
#define QCA99X0_HW_2_0_PATCH_LOAD_ADDR 0x1234
+/* QCA9984 1.0 defines */
+#define QCA9984_HW_1_0_DEV_VERSION 0x1000000
+#define QCA9984_HW_DEV_TYPE 0xa
+#define QCA9984_HW_1_0_CHIP_ID_REV 0x0
+#define QCA9984_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9984/hw1.0"
+#define QCA9984_HW_1_0_BOARD_DATA_FILE "/*(DEBLOBBED)*/"
+#define QCA9984_HW_1_0_PATCH_LOAD_ADDR 0x1234
+
+/* QCA9888 2.0 defines */
+#define QCA9888_HW_2_0_DEV_VERSION 0x1000000
+#define QCA9888_HW_DEV_TYPE 0xc
+#define QCA9888_HW_2_0_CHIP_ID_REV 0x0
+#define QCA9888_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA9888/hw2.0"
+#define QCA9888_HW_2_0_BOARD_DATA_FILE "/*(DEBLOBBED)*/"
+#define QCA9888_HW_2_0_PATCH_LOAD_ADDR 0x1234
+
/* QCA9377 1.0 definitions */
#define QCA9377_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9377/hw1.0"
#define QCA9377_HW_1_0_BOARD_DATA_FILE "/*(DEBLOBBED)*/"
@@ -193,12 +219,14 @@ enum ath10k_hw_rev {
ATH10K_HW_QCA988X,
ATH10K_HW_QCA6174,
ATH10K_HW_QCA99X0,
+ ATH10K_HW_QCA9888,
+ ATH10K_HW_QCA9984,
ATH10K_HW_QCA9377,
ATH10K_HW_QCA4019,
+ ATH10K_HW_QCA9887,
};
struct ath10k_hw_regs {
- u32 rtc_state_cold_reset_mask;
u32 rtc_soc_base_address;
u32 rtc_wmac_base_address;
u32 soc_core_base_address;
@@ -241,14 +269,18 @@ struct ath10k_hw_values {
extern const struct ath10k_hw_values qca988x_values;
extern const struct ath10k_hw_values qca6174_values;
extern const struct ath10k_hw_values qca99x0_values;
+extern const struct ath10k_hw_values qca9888_values;
extern const struct ath10k_hw_values qca4019_values;
void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev);
#define QCA_REV_988X(ar) ((ar)->hw_rev == ATH10K_HW_QCA988X)
+#define QCA_REV_9887(ar) ((ar)->hw_rev == ATH10K_HW_QCA9887)
#define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174)
#define QCA_REV_99X0(ar) ((ar)->hw_rev == ATH10K_HW_QCA99X0)
+#define QCA_REV_9888(ar) ((ar)->hw_rev == ATH10K_HW_QCA9888)
+#define QCA_REV_9984(ar) ((ar)->hw_rev == ATH10K_HW_QCA9984)
#define QCA_REV_9377(ar) ((ar)->hw_rev == ATH10K_HW_QCA9377)
#define QCA_REV_40XX(ar) ((ar)->hw_rev == ATH10K_HW_QCA4019)
@@ -275,25 +307,6 @@ enum ath10k_mcast2ucast_mode {
ATH10K_MCAST2UCAST_ENABLED = 1,
};
-struct ath10k_pktlog_hdr {
- __le16 flags;
- __le16 missed_cnt;
- __le16 log_type;
- __le16 size;
- __le32 timestamp;
- u8 payload[0];
-} __packed;
-
-struct ath10k_pktlog_10_4_hdr {
- __le16 flags;
- __le16 missed_cnt;
- __le16 log_type;
- __le16 size;
- __le32 timestamp;
- __le32 type_specific_data;
- u8 payload[0];
-} __packed;
-
enum ath10k_hw_rate_ofdm {
ATH10K_HW_RATE_OFDM_48M = 0,
ATH10K_HW_RATE_OFDM_24M,
@@ -315,11 +328,41 @@ enum ath10k_hw_rate_cck {
ATH10K_HW_RATE_CCK_SP_2M,
};
+enum ath10k_hw_rate_rev2_cck {
+ ATH10K_HW_RATE_REV2_CCK_LP_1M = 1,
+ ATH10K_HW_RATE_REV2_CCK_LP_2M,
+ ATH10K_HW_RATE_REV2_CCK_LP_5_5M,
+ ATH10K_HW_RATE_REV2_CCK_LP_11M,
+ ATH10K_HW_RATE_REV2_CCK_SP_2M,
+ ATH10K_HW_RATE_REV2_CCK_SP_5_5M,
+ ATH10K_HW_RATE_REV2_CCK_SP_11M,
+};
+
enum ath10k_hw_4addr_pad {
ATH10K_HW_4ADDR_PAD_AFTER,
ATH10K_HW_4ADDR_PAD_BEFORE,
};
+enum ath10k_hw_cc_wraparound_type {
+ ATH10K_HW_CC_WRAP_DISABLED = 0,
+
+ /* This type is when the HW chip has a quirky Cycle Counter
+ * wraparound which resets to 0x7fffffff instead of 0. All
+ * other CC related counters (e.g. Rx Clear Count) are divided
+ * by 2 so they never wraparound themselves.
+ */
+ ATH10K_HW_CC_WRAP_SHIFTED_ALL = 1,
+
+ /* Each hw counter wrapsaround independently. When the
+ * counter overflows the repestive counter is right shifted
+ * by 1, i.e reset to 0x7fffffff, and other counters will be
+ * running unaffected. In this type of wraparound, it should
+ * be possible to report accurate Rx busy time unlike the
+ * first type.
+ */
+ ATH10K_HW_CC_WRAP_SHIFTED_EACH = 2,
+};
+
/* Target specific defines for MAIN firmware */
#define TARGET_NUM_VDEVS 8
#define TARGET_NUM_PEER_AST 2
@@ -484,7 +527,6 @@ enum ath10k_hw_4addr_pad {
/* as of IP3.7.1 */
#define RTC_STATE_V_ON ar->hw_values->rtc_state_val_on
-#define RTC_STATE_COLD_RESET_MASK ar->regs->rtc_state_cold_reset_mask
#define RTC_STATE_V_LSB 0
#define RTC_STATE_V_MASK 0x00000007
#define RTC_STATE_ADDRESS 0x0000
@@ -547,7 +589,10 @@ enum ath10k_hw_4addr_pad {
#define WLAN_SYSTEM_SLEEP_DISABLE_MASK 0x00000001
#define WLAN_GPIO_PIN0_ADDRESS 0x00000028
+#define WLAN_GPIO_PIN0_CONFIG_LSB 11
#define WLAN_GPIO_PIN0_CONFIG_MASK 0x00007800
+#define WLAN_GPIO_PIN0_PAD_PULL_LSB 5
+#define WLAN_GPIO_PIN0_PAD_PULL_MASK 0x00000060
#define WLAN_GPIO_PIN1_ADDRESS 0x0000002c
#define WLAN_GPIO_PIN1_CONFIG_MASK 0x00007800
#define WLAN_GPIO_PIN10_ADDRESS 0x00000050
@@ -560,6 +605,8 @@ enum ath10k_hw_4addr_pad {
#define CLOCK_GPIO_BT_CLK_OUT_EN_MASK 0
#define SI_CONFIG_OFFSET 0x00000000
+#define SI_CONFIG_ERR_INT_LSB 19
+#define SI_CONFIG_ERR_INT_MASK 0x00080000
#define SI_CONFIG_BIDIR_OD_DATA_LSB 18
#define SI_CONFIG_BIDIR_OD_DATA_MASK 0x00040000
#define SI_CONFIG_I2C_LSB 16
@@ -573,7 +620,9 @@ enum ath10k_hw_4addr_pad {
#define SI_CONFIG_DIVIDER_LSB 0
#define SI_CONFIG_DIVIDER_MASK 0x0000000f
#define SI_CS_OFFSET 0x00000004
+#define SI_CS_DONE_ERR_LSB 10
#define SI_CS_DONE_ERR_MASK 0x00000400
+#define SI_CS_DONE_INT_LSB 9
#define SI_CS_DONE_INT_MASK 0x00000200
#define SI_CS_START_LSB 8
#define SI_CS_START_MASK 0x00000100
@@ -624,7 +673,10 @@ enum ath10k_hw_4addr_pad {
#define GPIO_BASE_ADDRESS WLAN_GPIO_BASE_ADDRESS
#define GPIO_PIN0_OFFSET WLAN_GPIO_PIN0_ADDRESS
#define GPIO_PIN1_OFFSET WLAN_GPIO_PIN1_ADDRESS
+#define GPIO_PIN0_CONFIG_LSB WLAN_GPIO_PIN0_CONFIG_LSB
#define GPIO_PIN0_CONFIG_MASK WLAN_GPIO_PIN0_CONFIG_MASK
+#define GPIO_PIN0_PAD_PULL_LSB WLAN_GPIO_PIN0_PAD_PULL_LSB
+#define GPIO_PIN0_PAD_PULL_MASK WLAN_GPIO_PIN0_PAD_PULL_MASK
#define GPIO_PIN1_CONFIG_MASK WLAN_GPIO_PIN1_CONFIG_MASK
#define SI_BASE_ADDRESS WLAN_SI_BASE_ADDRESS
#define SCRATCH_BASE_ADDRESS SOC_CORE_BASE_ADDRESS
@@ -679,6 +731,18 @@ enum ath10k_hw_4addr_pad {
#define WINDOW_READ_ADDR_ADDRESS MISSING
#define WINDOW_WRITE_ADDR_ADDRESS MISSING
+#define QCA9887_1_0_I2C_SDA_GPIO_PIN 5
+#define QCA9887_1_0_I2C_SDA_PIN_CONFIG 3
+#define QCA9887_1_0_SI_CLK_GPIO_PIN 17
+#define QCA9887_1_0_SI_CLK_PIN_CONFIG 3
+#define QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS 0x00000010
+
+#define QCA9887_EEPROM_SELECT_READ 0xa10000a0
+#define QCA9887_EEPROM_ADDR_HI_MASK 0x0000ff00
+#define QCA9887_EEPROM_ADDR_HI_LSB 8
+#define QCA9887_EEPROM_ADDR_LO_MASK 0x00ff0000
+#define QCA9887_EEPROM_ADDR_LO_LSB 16
+
#define RTC_STATE_V_GET(x) (((x) & RTC_STATE_V_MASK) >> RTC_STATE_V_LSB)
#endif /* _HW_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 4040f9413..0bbd0a00e 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -62,6 +62,32 @@ static struct ieee80211_rate ath10k_rates[] = {
{ .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
};
+static struct ieee80211_rate ath10k_rates_rev2[] = {
+ { .bitrate = 10,
+ .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_1M },
+ { .bitrate = 20,
+ .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_2M,
+ .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_2M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 55,
+ .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_5_5M,
+ .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_5_5M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 110,
+ .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_11M,
+ .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_11M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+
+ { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M },
+ { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M },
+ { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M },
+ { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M },
+ { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M },
+ { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M },
+ { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M },
+ { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
+};
+
#define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4
#define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX)
@@ -70,6 +96,9 @@ static struct ieee80211_rate ath10k_rates[] = {
#define ath10k_g_rates (ath10k_rates + 0)
#define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates))
+#define ath10k_g_rates_rev2 (ath10k_rates_rev2 + 0)
+#define ath10k_g_rates_rev2_size (ARRAY_SIZE(ath10k_rates_rev2))
+
static bool ath10k_mac_bitrate_is_cck(int bitrate)
{
switch (bitrate) {
@@ -773,6 +802,7 @@ static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
{
struct ath10k_peer *peer, *tmp;
int peer_id;
+ int i;
lockdep_assert_held(&ar->conf_mutex);
@@ -789,6 +819,17 @@ static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
ar->peer_map[peer_id] = NULL;
}
+ /* Double check that peer is properly un-referenced from
+ * the peer_map
+ */
+ for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
+ if (ar->peer_map[i] == peer) {
+ ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %p idx %d)\n",
+ peer->addr, peer, i);
+ ar->peer_map[i] = NULL;
+ }
+ }
+
list_del(&peer->list);
kfree(peer);
ar->num_peers--;
@@ -799,6 +840,7 @@ static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
static void ath10k_peer_cleanup_all(struct ath10k *ar)
{
struct ath10k_peer *peer, *tmp;
+ int i;
lockdep_assert_held(&ar->conf_mutex);
@@ -807,6 +849,10 @@ static void ath10k_peer_cleanup_all(struct ath10k *ar)
list_del(&peer->list);
kfree(peer);
}
+
+ for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++)
+ ar->peer_map[i] = NULL;
+
spin_unlock_bh(&ar->data_lock);
ar->num_peers = 0;
@@ -2910,7 +2956,7 @@ static int ath10k_update_channel_list(struct ath10k *ar)
if (channel->flags & IEEE80211_CHAN_DISABLED)
continue;
- ch->allow_ht = true;
+ ch->allow_ht = true;
/* FIXME: when should we really allow VHT? */
ch->allow_vht = true;
@@ -2993,7 +3039,7 @@ static void ath10k_regd_update(struct ath10k *ar)
regpair = ar->ath_common.regulatory.regpair;
- if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
+ if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
nl_dfs_reg = ar->dfs_detector->region;
wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg);
} else {
@@ -3022,7 +3068,7 @@ static void ath10k_reg_notifier(struct wiphy *wiphy,
ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory);
- if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
+ if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n",
request->dfs_region);
result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector,
@@ -3646,17 +3692,18 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
static void ath10k_mac_txq_init(struct ieee80211_txq *txq)
{
- struct ath10k_txq *artxq = (void *)txq->drv_priv;
+ struct ath10k_txq *artxq;
if (!txq)
return;
+ artxq = (void *)txq->drv_priv;
INIT_LIST_HEAD(&artxq->list);
}
static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq)
{
- struct ath10k_txq *artxq = (void *)txq->drv_priv;
+ struct ath10k_txq *artxq;
struct ath10k_skb_cb *cb;
struct sk_buff *msdu;
int msdu_id;
@@ -3664,6 +3711,7 @@ static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq)
if (!txq)
return;
+ artxq = (void *)txq->drv_priv;
spin_lock_bh(&ar->txqs_lock);
if (!list_empty(&artxq->list))
list_del_init(&artxq->list);
@@ -3781,6 +3829,9 @@ void ath10k_mac_tx_push_pending(struct ath10k *ar)
int ret;
int max;
+ if (ar->htt.num_pending_tx >= (ar->htt.max_num_pending_tx / 2))
+ return;
+
spin_lock_bh(&ar->txqs_lock);
rcu_read_lock();
@@ -3826,12 +3877,16 @@ void __ath10k_scan_finish(struct ath10k *ar)
break;
case ATH10K_SCAN_RUNNING:
case ATH10K_SCAN_ABORTING:
- if (!ar->scan.is_roc)
- ieee80211_scan_completed(ar->hw,
- (ar->scan.state ==
- ATH10K_SCAN_ABORTING));
- else if (ar->scan.roc_notify)
+ if (!ar->scan.is_roc) {
+ struct cfg80211_scan_info info = {
+ .aborted = (ar->scan.state ==
+ ATH10K_SCAN_ABORTING),
+ };
+
+ ieee80211_scan_completed(ar->hw, &info);
+ } else if (ar->scan.roc_notify) {
ieee80211_remain_on_channel_expired(ar->hw);
+ }
/* fall through */
case ATH10K_SCAN_STARTING:
ar->scan.state = ATH10K_SCAN_IDLE;
@@ -4051,9 +4106,7 @@ static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
list_add_tail(&artxq->list, &ar->txqs);
spin_unlock_bh(&ar->txqs_lock);
- if (ath10k_mac_tx_can_push(hw, txq))
- tasklet_schedule(&ar->htt.txrx_compl_task);
-
+ ath10k_mac_tx_push_pending(ar);
ath10k_htt_tx_txq_update(hw, txq);
}
@@ -4194,6 +4247,9 @@ static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
}
+ if (ar->cfg_tx_chainmask <= 1)
+ vht_cap.cap &= ~IEEE80211_VHT_CAP_TXSTBC;
+
vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
@@ -4231,7 +4287,7 @@ static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar)
ht_cap.cap |= smps;
}
- if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC)
+ if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC && (ar->cfg_tx_chainmask > 1))
ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) {
@@ -4467,6 +4523,19 @@ static int ath10k_start(struct ieee80211_hw *hw)
}
}
+ param = ar->wmi.pdev_param->enable_btcoex;
+ if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) &&
+ test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM,
+ ar->running_fw->fw_file.fw_features)) {
+ ret = ath10k_wmi_pdev_set_param(ar, param, 0);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to set btcoex param: %d\n", ret);
+ goto err_core_stop;
+ }
+ clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags);
+ }
+
ar->num_started_vdevs = 0;
ath10k_regd_update(ar);
@@ -5932,9 +6001,17 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
continue;
if (peer->sta == sta) {
- ath10k_warn(ar, "found sta peer %pM entry on vdev %i after it was supposedly removed\n",
- sta->addr, arvif->vdev_id);
+ ath10k_warn(ar, "found sta peer %pM (ptr %p id %d) entry on vdev %i after it was supposedly removed\n",
+ sta->addr, peer, i, arvif->vdev_id);
peer->sta = NULL;
+
+ /* Clean up the peer object as well since we
+ * must have failed to do this above.
+ */
+ list_del(&peer->list);
+ ar->peer_map[i] = NULL;
+ kfree(peer);
+ ar->num_peers--;
}
}
spin_unlock_bh(&ar->data_lock);
@@ -7359,6 +7436,7 @@ static const struct ieee80211_ops ath10k_ops = {
#endif
#ifdef CONFIG_MAC80211_DEBUGFS
.sta_add_debugfs = ath10k_sta_add_debugfs,
+ .sta_statistics = ath10k_sta_statistics,
#endif
};
@@ -7428,21 +7506,32 @@ static const struct ieee80211_channel ath10k_5ghz_channels[] = {
struct ath10k *ath10k_mac_create(size_t priv_size)
{
struct ieee80211_hw *hw;
+ struct ieee80211_ops *ops;
struct ath10k *ar;
- hw = ieee80211_alloc_hw(sizeof(struct ath10k) + priv_size, &ath10k_ops);
- if (!hw)
+ ops = kmemdup(&ath10k_ops, sizeof(ath10k_ops), GFP_KERNEL);
+ if (!ops)
return NULL;
+ hw = ieee80211_alloc_hw(sizeof(struct ath10k) + priv_size, ops);
+ if (!hw) {
+ kfree(ops);
+ return NULL;
+ }
+
ar = hw->priv;
ar->hw = hw;
+ ar->ops = ops;
return ar;
}
void ath10k_mac_destroy(struct ath10k *ar)
{
+ struct ieee80211_ops *ops = ar->ops;
+
ieee80211_free_hw(ar->hw);
+ kfree(ops);
}
static const struct ieee80211_iface_limit ath10k_if_limits[] = {
@@ -7695,8 +7784,14 @@ int ath10k_mac_register(struct ath10k *ar)
band = &ar->mac.sbands[NL80211_BAND_2GHZ];
band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels);
band->channels = channels;
- band->n_bitrates = ath10k_g_rates_size;
- band->bitrates = ath10k_g_rates;
+
+ if (ar->hw_params.cck_rate_map_rev2) {
+ band->n_bitrates = ath10k_g_rates_rev2_size;
+ band->bitrates = ath10k_g_rates_rev2;
+ } else {
+ band->n_bitrates = ath10k_g_rates_size;
+ band->bitrates = ath10k_g_rates;
+ }
ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
}
@@ -7860,7 +7955,7 @@ int ath10k_mac_register(struct ath10k *ar)
if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
ar->hw->netdev_features = NETIF_F_HW_CSUM;
- if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) {
+ if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) {
/* Init ath dfs pattern detector */
ar->ath_common.debug_mask = ATH_DBG_DFS;
ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common,
@@ -7870,6 +7965,15 @@ int ath10k_mac_register(struct ath10k *ar)
ath10k_warn(ar, "failed to initialise DFS pattern detector\n");
}
+ /* Current wake_tx_queue implementation imposes a significant
+ * performance penalty in some setups. The tx scheduling code needs
+ * more work anyway so disable the wake_tx_queue unless firmware
+ * supports the pull-push mechanism.
+ */
+ if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+ ar->running_fw->fw_file.fw_features))
+ ar->ops->wake_tx_queue = NULL;
+
ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
ath10k_reg_notifier);
if (ret) {
@@ -7899,7 +8003,7 @@ err_unregister:
ieee80211_unregister_hw(ar->hw);
err_dfs_detector_exit:
- if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
+ if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
ar->dfs_detector->exit(ar->dfs_detector);
err_free:
@@ -7914,7 +8018,7 @@ void ath10k_mac_unregister(struct ath10k *ar)
{
ieee80211_unregister_hw(ar->hw);
- if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
+ if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
ar->dfs_detector->exit(ar->dfs_detector);
kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index d2674ade4..b4eb1cd60 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -56,7 +56,10 @@ static const struct pci_device_id ath10k_pci_id_table[] = {
{ PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */
{ PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
{ PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */
+ { PCI_VDEVICE(ATHEROS, QCA9888_2_0_DEVICE_ID) }, /* PCI-E QCA9888 V2 */
+ { PCI_VDEVICE(ATHEROS, QCA9984_1_0_DEVICE_ID) }, /* PCI-E QCA9984 V1 */
{ PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) }, /* PCI-E QCA9377 V1 */
+ { PCI_VDEVICE(ATHEROS, QCA9887_1_0_DEVICE_ID) }, /* PCI-E QCA9887 */
{0}
};
@@ -81,8 +84,14 @@ static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
{ QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV },
+ { QCA9984_1_0_DEVICE_ID, QCA9984_HW_1_0_CHIP_ID_REV },
+
+ { QCA9888_2_0_DEVICE_ID, QCA9888_HW_2_0_CHIP_ID_REV },
+
{ QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV },
{ QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV },
+
+ { QCA9887_1_0_DEVICE_ID, QCA9887_HW_1_0_CHIP_ID_REV },
};
static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
@@ -837,13 +846,16 @@ static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
switch (ar->hw_rev) {
case ATH10K_HW_QCA988X:
+ case ATH10K_HW_QCA9887:
case ATH10K_HW_QCA6174:
case ATH10K_HW_QCA9377:
val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
CORE_CTRL_ADDRESS) &
0x7ff) << 21;
break;
+ case ATH10K_HW_QCA9888:
case ATH10K_HW_QCA99X0:
+ case ATH10K_HW_QCA9984:
case ATH10K_HW_QCA4019:
val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
break;
@@ -864,7 +876,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret = 0;
u32 *buf;
- unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
+ unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
struct ath10k_ce_pipe *ce_diag;
/* Host buffer address in CE space */
u32 ce_data;
@@ -882,9 +894,10 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
* 1) 4-byte alignment
* 2) Buffer in DMA-able space
*/
- orig_nbytes = nbytes;
+ alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
+
data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
- orig_nbytes,
+ alloc_nbytes,
&ce_data_base,
GFP_ATOMIC);
@@ -892,9 +905,9 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
ret = -ENOMEM;
goto done;
}
- memset(data_buf, 0, orig_nbytes);
+ memset(data_buf, 0, alloc_nbytes);
- remaining_bytes = orig_nbytes;
+ remaining_bytes = nbytes;
ce_data = ce_data_base;
while (remaining_bytes) {
nbytes = min_t(unsigned int, remaining_bytes,
@@ -954,19 +967,22 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
}
remaining_bytes -= nbytes;
+
+ if (ret) {
+ ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n",
+ address, ret);
+ break;
+ }
+ memcpy(data, data_buf, nbytes);
+
address += nbytes;
- ce_data += nbytes;
+ data += nbytes;
}
done:
- if (ret == 0)
- memcpy(data, data_buf, orig_nbytes);
- else
- ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n",
- address, ret);
if (data_buf)
- dma_free_coherent(ar->dev, orig_nbytes, data_buf,
+ dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
ce_data_base);
spin_unlock_bh(&ar_pci->ce_lock);
@@ -1560,6 +1576,7 @@ static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
switch (ar->hw_rev) {
case ATH10K_HW_QCA988X:
+ case ATH10K_HW_QCA9887:
case ATH10K_HW_QCA6174:
case ATH10K_HW_QCA9377:
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
@@ -1569,6 +1586,8 @@ static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
CORE_CTRL_ADDRESS, val);
break;
case ATH10K_HW_QCA99X0:
+ case ATH10K_HW_QCA9984:
+ case ATH10K_HW_QCA9888:
case ATH10K_HW_QCA4019:
/* TODO: Find appropriate register configuration for QCA99X0
* to mask irq/MSI.
@@ -1583,6 +1602,7 @@ static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
switch (ar->hw_rev) {
case ATH10K_HW_QCA988X:
+ case ATH10K_HW_QCA9887:
case ATH10K_HW_QCA6174:
case ATH10K_HW_QCA9377:
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
@@ -1592,6 +1612,8 @@ static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
CORE_CTRL_ADDRESS, val);
break;
case ATH10K_HW_QCA99X0:
+ case ATH10K_HW_QCA9984:
+ case ATH10K_HW_QCA9888:
case ATH10K_HW_QCA4019:
/* TODO: Find appropriate register configuration for QCA99X0
* to unmask irq/MSI.
@@ -1932,6 +1954,9 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar)
switch (ar_pci->pdev->device) {
case QCA988X_2_0_DEVICE_ID:
case QCA99X0_2_0_DEVICE_ID:
+ case QCA9888_2_0_DEVICE_ID:
+ case QCA9984_1_0_DEVICE_ID:
+ case QCA9887_1_0_DEVICE_ID:
return 1;
case QCA6164_2_1_DEVICE_ID:
case QCA6174_2_1_DEVICE_ID:
@@ -2198,6 +2223,14 @@ static void ath10k_pci_fw_crashed_clear(struct ath10k *ar)
ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val);
}
+static bool ath10k_pci_has_device_gone(struct ath10k *ar)
+{
+ u32 val;
+
+ val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
+ return (val == 0xffffffff);
+}
+
/* this function effectively clears target memory controller assert line */
static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
{
@@ -2293,16 +2326,20 @@ static int ath10k_pci_warm_reset(struct ath10k *ar)
return 0;
}
+static int ath10k_pci_qca99x0_soft_chip_reset(struct ath10k *ar)
+{
+ ath10k_pci_irq_disable(ar);
+ return ath10k_pci_qca99x0_chip_reset(ar);
+}
+
static int ath10k_pci_safe_chip_reset(struct ath10k *ar)
{
- if (QCA_REV_988X(ar) || QCA_REV_6174(ar)) {
- return ath10k_pci_warm_reset(ar);
- } else if (QCA_REV_99X0(ar)) {
- ath10k_pci_irq_disable(ar);
- return ath10k_pci_qca99x0_chip_reset(ar);
- } else {
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ if (!ar_pci->pci_soft_reset)
return -ENOTSUPP;
- }
+
+ return ar_pci->pci_soft_reset(ar);
}
static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
@@ -2437,16 +2474,12 @@ static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar)
static int ath10k_pci_chip_reset(struct ath10k *ar)
{
- if (QCA_REV_988X(ar))
- return ath10k_pci_qca988x_chip_reset(ar);
- else if (QCA_REV_6174(ar))
- return ath10k_pci_qca6174_chip_reset(ar);
- else if (QCA_REV_9377(ar))
- return ath10k_pci_qca6174_chip_reset(ar);
- else if (QCA_REV_99X0(ar))
- return ath10k_pci_qca99x0_chip_reset(ar);
- else
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ if (WARN_ON(!ar_pci->pci_hard_reset))
return -ENOTSUPP;
+
+ return ar_pci->pci_hard_reset(ar);
}
static int ath10k_pci_hif_power_up(struct ath10k *ar)
@@ -2559,6 +2592,144 @@ static int ath10k_pci_hif_resume(struct ath10k *ar)
}
#endif
+static bool ath10k_pci_validate_cal(void *data, size_t size)
+{
+ __le16 *cal_words = data;
+ u16 checksum = 0;
+ size_t i;
+
+ if (size % 2 != 0)
+ return false;
+
+ for (i = 0; i < size / 2; i++)
+ checksum ^= le16_to_cpu(cal_words[i]);
+
+ return checksum == 0xffff;
+}
+
+static void ath10k_pci_enable_eeprom(struct ath10k *ar)
+{
+ /* Enable SI clock */
+ ath10k_pci_soc_write32(ar, CLOCK_CONTROL_OFFSET, 0x0);
+
+ /* Configure GPIOs for I2C operation */
+ ath10k_pci_write32(ar,
+ GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
+ 4 * QCA9887_1_0_I2C_SDA_GPIO_PIN,
+ SM(QCA9887_1_0_I2C_SDA_PIN_CONFIG,
+ GPIO_PIN0_CONFIG) |
+ SM(1, GPIO_PIN0_PAD_PULL));
+
+ ath10k_pci_write32(ar,
+ GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
+ 4 * QCA9887_1_0_SI_CLK_GPIO_PIN,
+ SM(QCA9887_1_0_SI_CLK_PIN_CONFIG, GPIO_PIN0_CONFIG) |
+ SM(1, GPIO_PIN0_PAD_PULL));
+
+ ath10k_pci_write32(ar,
+ GPIO_BASE_ADDRESS +
+ QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS,
+ 1u << QCA9887_1_0_SI_CLK_GPIO_PIN);
+
+ /* In Swift ASIC - EEPROM clock will be (110MHz/512) = 214KHz */
+ ath10k_pci_write32(ar,
+ SI_BASE_ADDRESS + SI_CONFIG_OFFSET,
+ SM(1, SI_CONFIG_ERR_INT) |
+ SM(1, SI_CONFIG_BIDIR_OD_DATA) |
+ SM(1, SI_CONFIG_I2C) |
+ SM(1, SI_CONFIG_POS_SAMPLE) |
+ SM(1, SI_CONFIG_INACTIVE_DATA) |
+ SM(1, SI_CONFIG_INACTIVE_CLK) |
+ SM(8, SI_CONFIG_DIVIDER));
+}
+
+static int ath10k_pci_read_eeprom(struct ath10k *ar, u16 addr, u8 *out)
+{
+ u32 reg;
+ int wait_limit;
+
+ /* set device select byte and for the read operation */
+ reg = QCA9887_EEPROM_SELECT_READ |
+ SM(addr, QCA9887_EEPROM_ADDR_LO) |
+ SM(addr >> 8, QCA9887_EEPROM_ADDR_HI);
+ ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_TX_DATA0_OFFSET, reg);
+
+ /* write transmit data, transfer length, and START bit */
+ ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET,
+ SM(1, SI_CS_START) | SM(1, SI_CS_RX_CNT) |
+ SM(4, SI_CS_TX_CNT));
+
+ /* wait max 1 sec */
+ wait_limit = 100000;
+
+ /* wait for SI_CS_DONE_INT */
+ do {
+ reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET);
+ if (MS(reg, SI_CS_DONE_INT))
+ break;
+
+ wait_limit--;
+ udelay(10);
+ } while (wait_limit > 0);
+
+ if (!MS(reg, SI_CS_DONE_INT)) {
+ ath10k_err(ar, "timeout while reading device EEPROM at %04x\n",
+ addr);
+ return -ETIMEDOUT;
+ }
+
+ /* clear SI_CS_DONE_INT */
+ ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET, reg);
+
+ if (MS(reg, SI_CS_DONE_ERR)) {
+ ath10k_err(ar, "failed to read device EEPROM at %04x\n", addr);
+ return -EIO;
+ }
+
+ /* extract receive data */
+ reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_RX_DATA0_OFFSET);
+ *out = reg;
+
+ return 0;
+}
+
+static int ath10k_pci_hif_fetch_cal_eeprom(struct ath10k *ar, void **data,
+ size_t *data_len)
+{
+ u8 *caldata = NULL;
+ size_t calsize, i;
+ int ret;
+
+ if (!QCA_REV_9887(ar))
+ return -EOPNOTSUPP;
+
+ calsize = ar->hw_params.cal_data_len;
+ caldata = kmalloc(calsize, GFP_KERNEL);
+ if (!caldata)
+ return -ENOMEM;
+
+ ath10k_pci_enable_eeprom(ar);
+
+ for (i = 0; i < calsize; i++) {
+ ret = ath10k_pci_read_eeprom(ar, i, &caldata[i]);
+ if (ret)
+ goto err_free;
+ }
+
+ if (!ath10k_pci_validate_cal(caldata, calsize))
+ goto err_free;
+
+ *data = caldata;
+ *data_len = calsize;
+
+ return 0;
+
+err_free:
+ kfree(data);
+
+ return -EINVAL;
+}
+
static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
.tx_sg = ath10k_pci_hif_tx_sg,
.diag_read = ath10k_pci_hif_diag_read,
@@ -2578,6 +2749,7 @@ static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
.suspend = ath10k_pci_hif_suspend,
.resume = ath10k_pci_hif_resume,
#endif
+ .fetch_cal_eeprom = ath10k_pci_hif_fetch_cal_eeprom,
};
/*
@@ -2591,6 +2763,9 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret;
+ if (ath10k_pci_has_device_gone(ar))
+ return IRQ_NONE;
+
ret = ath10k_pci_force_wake(ar);
if (ret) {
ath10k_warn(ar, "failed to wake device up on irq: %d\n", ret);
@@ -2976,24 +3151,52 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
enum ath10k_hw_rev hw_rev;
u32 chip_id;
bool pci_ps;
+ int (*pci_soft_reset)(struct ath10k *ar);
+ int (*pci_hard_reset)(struct ath10k *ar);
switch (pci_dev->device) {
case QCA988X_2_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA988X;
pci_ps = false;
+ pci_soft_reset = ath10k_pci_warm_reset;
+ pci_hard_reset = ath10k_pci_qca988x_chip_reset;
+ break;
+ case QCA9887_1_0_DEVICE_ID:
+ hw_rev = ATH10K_HW_QCA9887;
+ pci_ps = false;
+ pci_soft_reset = ath10k_pci_warm_reset;
+ pci_hard_reset = ath10k_pci_qca988x_chip_reset;
break;
case QCA6164_2_1_DEVICE_ID:
case QCA6174_2_1_DEVICE_ID:
hw_rev = ATH10K_HW_QCA6174;
pci_ps = true;
+ pci_soft_reset = ath10k_pci_warm_reset;
+ pci_hard_reset = ath10k_pci_qca6174_chip_reset;
break;
case QCA99X0_2_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA99X0;
pci_ps = false;
+ pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
+ pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
+ break;
+ case QCA9984_1_0_DEVICE_ID:
+ hw_rev = ATH10K_HW_QCA9984;
+ pci_ps = false;
+ pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
+ pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
+ break;
+ case QCA9888_2_0_DEVICE_ID:
+ hw_rev = ATH10K_HW_QCA9888;
+ pci_ps = false;
+ pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
+ pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
break;
case QCA9377_1_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA9377;
pci_ps = true;
+ pci_soft_reset = NULL;
+ pci_hard_reset = ath10k_pci_qca6174_chip_reset;
break;
default:
WARN_ON(1);
@@ -3018,6 +3221,8 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
ar->dev_id = pci_dev->device;
ar_pci->pci_ps = pci_ps;
ar_pci->bus_ops = &ath10k_pci_bus_ops;
+ ar_pci->pci_soft_reset = pci_soft_reset;
+ ar_pci->pci_hard_reset = pci_hard_reset;
ar->id.vendor = pdev->vendor;
ar->id.device = pdev->device;
@@ -3169,12 +3374,15 @@ static void __exit ath10k_pci_exit(void)
module_exit(ath10k_pci_exit);
MODULE_AUTHOR("Qualcomm Atheros");
-MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
+MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN PCIe/AHB devices");
MODULE_LICENSE("Dual BSD/GPL");
/* QCA988x 2.0 firmware files */
/*(DEBLOBBED)*/
+/* QCA9887 1.0 firmware files */
+/*(DEBLOBBED)*/
+
/* QCA6174 2.1 firmware files */
/*(DEBLOBBED)*/
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index 959dc321b..6eca1df2c 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -234,6 +234,12 @@ struct ath10k_pci {
const struct ath10k_bus_ops *bus_ops;
+ /* Chip specific pci reset routine used to do a safe reset */
+ int (*pci_soft_reset)(struct ath10k *ar);
+
+ /* Chip specific pci full reset function */
+ int (*pci_hard_reset)(struct ath10k *ar);
+
/* Keep this entry in the last, memory for struct ath10k_ahb is
* allocated (ahb support enabled case) in the continuation of
* this struct.
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
index ca8d16884..034e7a54c 100644
--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -656,26 +656,6 @@ struct rx_msdu_end {
* Reserved: HW should fill with zero. FW should ignore.
*/
-#define RX_PPDU_START_SIG_RATE_SELECT_OFDM 0
-#define RX_PPDU_START_SIG_RATE_SELECT_CCK 1
-
-#define RX_PPDU_START_SIG_RATE_OFDM_48 0
-#define RX_PPDU_START_SIG_RATE_OFDM_24 1
-#define RX_PPDU_START_SIG_RATE_OFDM_12 2
-#define RX_PPDU_START_SIG_RATE_OFDM_6 3
-#define RX_PPDU_START_SIG_RATE_OFDM_54 4
-#define RX_PPDU_START_SIG_RATE_OFDM_36 5
-#define RX_PPDU_START_SIG_RATE_OFDM_18 6
-#define RX_PPDU_START_SIG_RATE_OFDM_9 7
-
-#define RX_PPDU_START_SIG_RATE_CCK_LP_11 0
-#define RX_PPDU_START_SIG_RATE_CCK_LP_5_5 1
-#define RX_PPDU_START_SIG_RATE_CCK_LP_2 2
-#define RX_PPDU_START_SIG_RATE_CCK_LP_1 3
-#define RX_PPDU_START_SIG_RATE_CCK_SP_11 4
-#define RX_PPDU_START_SIG_RATE_CCK_SP_5_5 5
-#define RX_PPDU_START_SIG_RATE_CCK_SP_2 6
-
#define HTT_RX_PPDU_START_PREAMBLE_LEGACY 0x04
#define HTT_RX_PPDU_START_PREAMBLE_HT 0x08
#define HTT_RX_PPDU_START_PREAMBLE_HT_WITH_TXBF 0x09
@@ -711,25 +691,6 @@ struct rx_msdu_end {
/* No idea what this flag means. It seems to be always set in rate. */
#define RX_PPDU_START_RATE_FLAG BIT(3)
-enum rx_ppdu_start_rate {
- RX_PPDU_START_RATE_OFDM_48M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_48M,
- RX_PPDU_START_RATE_OFDM_24M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_24M,
- RX_PPDU_START_RATE_OFDM_12M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_12M,
- RX_PPDU_START_RATE_OFDM_6M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_6M,
- RX_PPDU_START_RATE_OFDM_54M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_54M,
- RX_PPDU_START_RATE_OFDM_36M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_36M,
- RX_PPDU_START_RATE_OFDM_18M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_18M,
- RX_PPDU_START_RATE_OFDM_9M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_9M,
-
- RX_PPDU_START_RATE_CCK_LP_11M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_11M,
- RX_PPDU_START_RATE_CCK_LP_5_5M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_5_5M,
- RX_PPDU_START_RATE_CCK_LP_2M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_2M,
- RX_PPDU_START_RATE_CCK_LP_1M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_1M,
- RX_PPDU_START_RATE_CCK_SP_11M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_11M,
- RX_PPDU_START_RATE_CCK_SP_5_5M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_5_5M,
- RX_PPDU_START_RATE_CCK_SP_2M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_2M,
-};
-
struct rx_ppdu_start {
struct {
u8 pri20_mhz;
@@ -994,7 +955,41 @@ struct rx_pkt_end {
__le32 info0; /* %RX_PKT_END_INFO0_ */
__le32 phy_timestamp_1;
__le32 phy_timestamp_2;
- __le32 rx_location_info; /* %RX_LOCATION_INFO_ */
+} __packed;
+
+#define RX_LOCATION_INFO0_RTT_FAC_LEGACY_MASK 0x00003fff
+#define RX_LOCATION_INFO0_RTT_FAC_LEGACY_LSB 0
+#define RX_LOCATION_INFO0_RTT_FAC_VHT_MASK 0x1fff8000
+#define RX_LOCATION_INFO0_RTT_FAC_VHT_LSB 15
+#define RX_LOCATION_INFO0_RTT_STRONGEST_CHAIN_MASK 0xc0000000
+#define RX_LOCATION_INFO0_RTT_STRONGEST_CHAIN_LSB 30
+#define RX_LOCATION_INFO0_RTT_FAC_LEGACY_STATUS BIT(14)
+#define RX_LOCATION_INFO0_RTT_FAC_VHT_STATUS BIT(29)
+
+#define RX_LOCATION_INFO1_RTT_PREAMBLE_TYPE_MASK 0x0000000c
+#define RX_LOCATION_INFO1_RTT_PREAMBLE_TYPE_LSB 2
+#define RX_LOCATION_INFO1_PKT_BW_MASK 0x00000030
+#define RX_LOCATION_INFO1_PKT_BW_LSB 4
+#define RX_LOCATION_INFO1_SKIP_P_SKIP_BTCF_MASK 0x0000ff00
+#define RX_LOCATION_INFO1_SKIP_P_SKIP_BTCF_LSB 8
+#define RX_LOCATION_INFO1_RTT_MSC_RATE_MASK 0x000f0000
+#define RX_LOCATION_INFO1_RTT_MSC_RATE_LSB 16
+#define RX_LOCATION_INFO1_RTT_PBD_LEG_BW_MASK 0x00300000
+#define RX_LOCATION_INFO1_RTT_PBD_LEG_BW_LSB 20
+#define RX_LOCATION_INFO1_TIMING_BACKOFF_MASK 0x07c00000
+#define RX_LOCATION_INFO1_TIMING_BACKOFF_LSB 22
+#define RX_LOCATION_INFO1_RTT_TX_FRAME_PHASE_MASK 0x18000000
+#define RX_LOCATION_INFO1_RTT_TX_FRAME_PHASE_LSB 27
+#define RX_LOCATION_INFO1_RTT_CFR_STATUS BIT(0)
+#define RX_LOCATION_INFO1_RTT_CIR_STATUS BIT(1)
+#define RX_LOCATION_INFO1_RTT_GI_TYPE BIT(7)
+#define RX_LOCATION_INFO1_RTT_MAC_PHY_PHASE BIT(29)
+#define RX_LOCATION_INFO1_RTT_TX_DATA_START_X_PHASE BIT(30)
+#define RX_LOCATION_INFO1_RX_LOCATION_VALID BIT(31)
+
+struct rx_location_info {
+ __le32 rx_location_info0; /* %RX_LOCATION_INFO0_ */
+ __le32 rx_location_info1; /* %RX_LOCATION_INFO1_ */
} __packed;
enum rx_phy_ppdu_end_info0 {
@@ -1067,6 +1062,17 @@ struct rx_phy_ppdu_end {
struct rx_ppdu_end_qca99x0 {
struct rx_pkt_end rx_pkt_end;
+ __le32 rx_location_info; /* %RX_LOCATION_INFO_ */
+ struct rx_phy_ppdu_end rx_phy_ppdu_end;
+ __le32 rx_timing_offset; /* %RX_PPDU_END_RX_TIMING_OFFSET_ */
+ __le32 rx_info; /* %RX_PPDU_END_RX_INFO_ */
+ __le16 bb_length;
+ __le16 info1; /* %RX_PPDU_END_INFO1_ */
+} __packed;
+
+struct rx_ppdu_end_qca9984 {
+ struct rx_pkt_end rx_pkt_end;
+ struct rx_location_info rx_location_info;
struct rx_phy_ppdu_end rx_phy_ppdu_end;
__le32 rx_timing_offset; /* %RX_PPDU_END_RX_TIMING_OFFSET_ */
__le32 rx_info; /* %RX_PPDU_END_RX_INFO_ */
@@ -1080,6 +1086,7 @@ struct rx_ppdu_end {
struct rx_ppdu_end_qca988x qca988x;
struct rx_ppdu_end_qca6174 qca6174;
struct rx_ppdu_end_qca99x0 qca99x0;
+ struct rx_ppdu_end_qca9984 qca9984;
} __packed;
} __packed;
diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c
index 4671cfbcd..7d9b0da1b 100644
--- a/drivers/net/wireless/ath/ath10k/spectral.c
+++ b/drivers/net/wireless/ath/ath10k/spectral.c
@@ -101,9 +101,9 @@ int ath10k_spectral_process_fft(struct ath10k *ar,
break;
case 80:
/* TODO: As experiments with an analogue sender and various
- * configuaritions (fft-sizes of 64/128/256 and 20/40/80 Mhz)
+ * configurations (fft-sizes of 64/128/256 and 20/40/80 Mhz)
* show, the particular configuration of 80 MHz/64 bins does
- * not match with the other smaples at all. Until the reason
+ * not match with the other samples at all. Until the reason
* for that is found, don't report these samples.
*/
if (bin_len == 64)
diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h
index 8e24099fa..aaf53a81e 100644
--- a/drivers/net/wireless/ath/ath10k/targaddrs.h
+++ b/drivers/net/wireless/ath/ath10k/targaddrs.h
@@ -447,6 +447,9 @@ Fw Mode/SubMode Mask
#define QCA988X_BOARD_DATA_SZ 7168
#define QCA988X_BOARD_EXT_DATA_SZ 0
+#define QCA9887_BOARD_DATA_SZ 7168
+#define QCA9887_BOARD_EXT_DATA_SZ 0
+
#define QCA6174_BOARD_DATA_SZ 8192
#define QCA6174_BOARD_EXT_DATA_SZ 0
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 576e7c42e..b29a86a26 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -81,10 +81,11 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
skb_cb = ATH10K_SKB_CB(msdu);
txq = skb_cb->txq;
- artxq = (void *)txq->drv_priv;
- if (txq)
+ if (txq) {
+ artxq = (void *)txq->drv_priv;
artxq->num_fw_queued--;
+ }
ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
ath10k_htt_tx_dec_pending(htt);
@@ -117,6 +118,9 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
ieee80211_tx_status(htt->ar->hw, msdu);
/* we do not own the msdu anymore */
+
+ ath10k_mac_tx_push_pending(ar);
+
return 0;
}
@@ -213,6 +217,7 @@ void ath10k_peer_map_event(struct ath10k_htt *htt,
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n",
ev->vdev_id, ev->addr, ev->peer_id);
+ WARN_ON(ar->peer_map[ev->peer_id] && (ar->peer_map[ev->peer_id] != peer));
ar->peer_map[ev->peer_id] = peer;
set_bit(ev->peer_id, peer->peer_ids);
exit:
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 2c300329e..d2462886b 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -1104,6 +1104,7 @@ static struct wmi_pdev_param_map wmi_pdev_param_map = {
.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED,
};
static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
@@ -1199,6 +1200,7 @@ static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED,
};
static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = {
@@ -1294,6 +1296,7 @@ static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = {
.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED,
};
/* firmware 10.2 specific mappings */
@@ -1550,6 +1553,7 @@ static struct wmi_pdev_param_map wmi_10_4_pdev_param_map = {
.wapi_mbssid_offset = WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET,
.arp_srcaddr = WMI_10_4_PDEV_PARAM_ARP_SRCADDR,
.arp_dstaddr = WMI_10_4_PDEV_PARAM_ARP_DSTADDR,
+ .enable_btcoex = WMI_10_4_PDEV_PARAM_ENABLE_BTCOEX,
};
static const struct wmi_peer_flags_map wmi_peer_flags_map = {
@@ -1822,7 +1826,7 @@ static struct sk_buff *
ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
{
struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
- struct ath10k_vif *arvif = (void *)cb->vif->drv_priv;
+ struct ath10k_vif *arvif;
struct wmi_mgmt_tx_cmd *cmd;
struct ieee80211_hdr *hdr;
struct sk_buff *skb;
@@ -1834,10 +1838,12 @@ ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
hdr = (struct ieee80211_hdr *)msdu->data;
fc = le16_to_cpu(hdr->frame_control);
- if (cb->vif)
+ if (cb->vif) {
+ arvif = (void *)cb->vif->drv_priv;
vdev_id = arvif->vdev_id;
- else
+ } else {
vdev_id = 0;
+ }
if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control)))
return ERR_PTR(-EINVAL);
@@ -2920,6 +2926,7 @@ static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar,
u32 num_pdev_ext_stats;
u32 num_vdev_stats;
u32 num_peer_stats;
+ u32 num_bcnflt_stats;
u32 stats_id;
int i;
@@ -2930,6 +2937,7 @@ static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar,
num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
+ num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats);
stats_id = __le32_to_cpu(ev->stats_id);
for (i = 0; i < num_pdev_stats; i++) {
@@ -2970,32 +2978,57 @@ static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar,
/* fw doesn't implement vdev stats */
for (i = 0; i < num_peer_stats; i++) {
- const struct wmi_10_4_peer_extd_stats *src;
+ const struct wmi_10_4_peer_stats *src;
struct ath10k_fw_stats_peer *dst;
- int stats_len;
- bool extd_peer_stats = !!(stats_id & WMI_10_4_STAT_PEER_EXTD);
-
- if (extd_peer_stats)
- stats_len = sizeof(struct wmi_10_4_peer_extd_stats);
- else
- stats_len = sizeof(struct wmi_10_4_peer_stats);
src = (void *)skb->data;
- if (!skb_pull(skb, stats_len))
+ if (!skb_pull(skb, sizeof(*src)))
return -EPROTO;
dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
if (!dst)
continue;
- ath10k_wmi_10_4_pull_peer_stats(&src->common, dst);
- /* FIXME: expose 10.4 specific values */
- if (extd_peer_stats)
- dst->rx_duration = __le32_to_cpu(src->rx_duration);
-
+ ath10k_wmi_10_4_pull_peer_stats(src, dst);
list_add_tail(&dst->list, &stats->peers);
}
+ for (i = 0; i < num_bcnflt_stats; i++) {
+ const struct wmi_10_4_bss_bcn_filter_stats *src;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ /* FIXME: expose values to userspace
+ *
+ * Note: Even though this loop seems to do nothing it is
+ * required to parse following sub-structures properly.
+ */
+ }
+
+ if ((stats_id & WMI_10_4_STAT_PEER_EXTD) == 0)
+ return 0;
+
+ stats->extended = true;
+
+ for (i = 0; i < num_peer_stats; i++) {
+ const struct wmi_10_4_peer_extd_stats *src;
+ struct ath10k_fw_extd_stats_peer *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
+ dst->rx_duration = __le32_to_cpu(src->rx_duration);
+ list_add_tail(&dst->list, &stats->peers_extd);
+ }
+
return 0;
}
@@ -3671,7 +3704,7 @@ void ath10k_wmi_event_dfs(struct ath10k *ar,
phyerr->tsf_timestamp, tsf, buf_len);
/* Skip event if DFS disabled */
- if (!config_enabled(CONFIG_ATH10K_DFS_CERTIFIED))
+ if (!IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED))
return;
ATH10K_DFS_STAT_INC(ar, pulses_total);
@@ -5253,6 +5286,9 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
case WMI_10_4_PEER_STA_KICKOUT_EVENTID:
ath10k_wmi_event_peer_sta_kickout(ar, skb);
break;
+ case WMI_10_4_ROAM_EVENTID:
+ ath10k_wmi_event_roam(ar, skb);
+ break;
case WMI_10_4_HOST_SWBA_EVENTID:
ath10k_wmi_event_host_swba(ar, skb);
break;
@@ -7899,6 +7935,7 @@ static const struct wmi_ops wmi_10_4_ops = {
.pull_phyerr = ath10k_wmi_10_4_op_pull_phyerr_ev,
.pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+ .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
.get_txbf_conf_scheme = ath10k_wmi_10_4_txbf_conf_scheme,
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 9fdf47ea2..3ef468893 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -3447,6 +3447,7 @@ struct wmi_pdev_param_map {
u32 wapi_mbssid_offset;
u32 arp_srcaddr;
u32 arp_dstaddr;
+ u32 enable_btcoex;
};
#define WMI_PDEV_PARAM_UNSUPPORTED 0
@@ -3760,6 +3761,9 @@ enum wmi_10_4_pdev_param {
WMI_10_4_PDEV_PARAM_ATF_OBSS_NOISE_SCH,
WMI_10_4_PDEV_PARAM_ATF_OBSS_NOISE_SCALING_FACTOR,
WMI_10_4_PDEV_PARAM_CUST_TXPOWER_SCALE,
+ WMI_10_4_PDEV_PARAM_ATF_DYNAMIC_ENABLE,
+ WMI_10_4_PDEV_PARAM_ATF_SSID_GROUP_POLICY,
+ WMI_10_4_PDEV_PARAM_ENABLE_BTCOEX,
};
struct wmi_pdev_set_param_cmd {
@@ -4336,7 +4340,6 @@ struct wmi_10_4_peer_stats {
} __packed;
struct wmi_10_4_peer_extd_stats {
- struct wmi_10_4_peer_stats common;
struct wmi_mac_addr peer_macaddr;
__le32 inactive_time;
__le32 peer_chain_rssi;
@@ -4344,6 +4347,19 @@ struct wmi_10_4_peer_extd_stats {
__le32 reserved[10];
} __packed;
+struct wmi_10_4_bss_bcn_stats {
+ __le32 vdev_id;
+ __le32 bss_bcns_dropped;
+ __le32 bss_bcn_delivered;
+} __packed;
+
+struct wmi_10_4_bss_bcn_filter_stats {
+ __le32 bcns_dropped;
+ __le32 bcns_delivered;
+ __le32 active_filters;
+ struct wmi_10_4_bss_bcn_stats bss_stats;
+} __packed;
+
struct wmi_10_2_pdev_ext_stats {
__le32 rx_rssi_comb;
__le32 rx_rssi[4];
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index fc47b7098..f23c85176 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -219,8 +219,8 @@ ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
sifs = AR5K_INIT_SIFS_QUARTER_RATE;
break;
case AR5K_BWMODE_DEFAULT:
- sifs = AR5K_INIT_SIFS_DEFAULT_BG;
default:
+ sifs = AR5K_INIT_SIFS_DEFAULT_BG;
if (channel->band == NL80211_BAND_5GHZ)
sifs = AR5K_INIT_SIFS_DEFAULT_A;
break;
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 4e11ba06f..72e2ec677 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -847,8 +847,6 @@ static int ath6kl_cfg80211_disconnect(struct wiphy *wiphy,
up(&ar->sem);
- vif->sme_state = SME_DISCONNECTED;
-
return 0;
}
@@ -859,7 +857,11 @@ void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason,
struct ath6kl *ar = vif->ar;
if (vif->scan_req) {
- cfg80211_scan_done(vif->scan_req, true);
+ struct cfg80211_scan_info info = {
+ .aborted = true,
+ };
+
+ cfg80211_scan_done(vif->scan_req, &info);
vif->scan_req = NULL;
}
@@ -1069,6 +1071,9 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy,
void ath6kl_cfg80211_scan_complete_event(struct ath6kl_vif *vif, bool aborted)
{
struct ath6kl *ar = vif->ar;
+ struct cfg80211_scan_info info = {
+ .aborted = aborted,
+ };
int i;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: status%s\n", __func__,
@@ -1089,7 +1094,7 @@ void ath6kl_cfg80211_scan_complete_event(struct ath6kl_vif *vif, bool aborted)
}
out:
- cfg80211_scan_done(vif->scan_req, aborted);
+ cfg80211_scan_done(vif->scan_req, &info);
vif->scan_req = NULL;
}
@@ -1104,7 +1109,8 @@ void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq,
cfg80211_chandef_create(&chandef,
ieee80211_get_channel(vif->ar->wiphy, freq),
- (mode == WMI_11G_HT20) ?
+ (mode == WMI_11G_HT20 &&
+ ath6kl_band_2ghz.ht_cap.ht_supported) ?
NL80211_CHAN_HT20 : NL80211_CHAN_NO_HT);
mutex_lock(&vif->wdev.mtx);
@@ -2971,6 +2977,7 @@ static int ath6kl_stop_ap(struct wiphy *wiphy, struct net_device *dev)
ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx);
clear_bit(CONNECTED, &vif->flags);
+ netif_carrier_off(vif->ndev);
/* Restore ht setting in firmware */
return ath6kl_restore_htcap(vif);
@@ -3614,7 +3621,11 @@ void ath6kl_cfg80211_vif_stop(struct ath6kl_vif *vif, bool wmi_ready)
}
if (vif->scan_req) {
- cfg80211_scan_done(vif->scan_req, true);
+ struct cfg80211_scan_info info = {
+ .aborted = true,
+ };
+
+ cfg80211_scan_done(vif->scan_req, &info);
vif->scan_req = NULL;
}
@@ -3870,7 +3881,7 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
BIT(NL80211_IFTYPE_P2P_CLIENT);
}
- if (config_enabled(CONFIG_ATH6KL_REGDOMAIN) &&
+ if (IS_ENABLED(CONFIG_ATH6KL_REGDOMAIN) &&
test_bit(ATH6KL_FW_CAPABILITY_REGDOMAIN, ar->fw_capabilities)) {
wiphy->reg_notifier = ath6kl_cfg80211_reg_notify;
ar->wiphy->features |= NL80211_FEATURE_CELL_BASE_REG_HINTS;
diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h
index 7067b87d0..0bf4592a3 100644
--- a/drivers/net/wireless/ath/ath6kl/core.h
+++ b/drivers/net/wireless/ath/ath6kl/core.h
@@ -148,7 +148,7 @@ enum ath6kl_fw_capability {
/* ratetable is the 2 stream version (max MCS15) */
ATH6KL_FW_CAPABILITY_RATETABLE_MCS15,
- /* firmare doesn't support IP checksumming */
+ /* firmware doesn't support IP checksumming */
ATH6KL_FW_CAPABILITY_NO_IP_CHECKSUM,
/* this needs to be last */
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
index 40432fe7a..9df41d5e3 100644
--- a/drivers/net/wireless/ath/ath6kl/txrx.c
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -1401,6 +1401,10 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
return;
}
+ pad_before_data_start =
+ (le16_to_cpu(dhdr->info3) >> WMI_DATA_HDR_PAD_BEFORE_DATA_SHIFT)
+ & WMI_DATA_HDR_PAD_BEFORE_DATA_MASK;
+
/* Get the Power save state of the STA */
if (vif->nw_type == AP_NETWORK) {
meta_type = wmi_data_hdr_get_meta(dhdr);
@@ -1408,7 +1412,7 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) &
WMI_DATA_HDR_PS_MASK);
- offset = sizeof(struct wmi_data_hdr);
+ offset = sizeof(struct wmi_data_hdr) + pad_before_data_start;
trig_state = !!(le16_to_cpu(dhdr->info3) & WMI_DATA_HDR_TRIG);
switch (meta_type) {
@@ -1523,9 +1527,6 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
seq_no = wmi_data_hdr_get_seqno(dhdr);
meta_type = wmi_data_hdr_get_meta(dhdr);
dot11_hdr = wmi_data_hdr_get_dot11(dhdr);
- pad_before_data_start =
- (le16_to_cpu(dhdr->info3) >> WMI_DATA_HDR_PAD_BEFORE_DATA_SHIFT)
- & WMI_DATA_HDR_PAD_BEFORE_DATA_MASK;
skb_pull(skb, sizeof(struct wmi_data_hdr));
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index 631c3a0c5..b8cf04d11 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -2544,8 +2544,7 @@ int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi, u8 if_idx,
s32 nominal_phy = 0;
int ret;
- if (!((params->user_pri < 8) &&
- (params->user_pri <= 0x7) &&
+ if (!((params->user_pri <= 0x7) &&
(up_to_ac[params->user_pri & 0x7] == params->traffic_class) &&
(params->traffic_direc == UPLINK_TRAFFIC ||
params->traffic_direc == DNLINK_TRAFFIC ||
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index bd4a1a655..bea6186f7 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -18,7 +18,6 @@
#include <linux/nl80211.h>
#include <linux/platform_device.h>
-#include <linux/ath9k_platform.h>
#include <linux/module.h>
#include "ath9k.h"
@@ -58,20 +57,9 @@ static void ath_ahb_read_cachesize(struct ath_common *common, int *csz)
static bool ath_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
{
- struct ath_softc *sc = (struct ath_softc *)common->priv;
- struct platform_device *pdev = to_platform_device(sc->dev);
- struct ath9k_platform_data *pdata;
-
- pdata = dev_get_platdata(&pdev->dev);
- if (off >= (ARRAY_SIZE(pdata->eeprom_data))) {
- ath_err(common,
- "%s: flash read failed, offset %08x is out of range\n",
- __func__, off);
- return false;
- }
-
- *data = pdata->eeprom_data[off];
- return true;
+ ath_err(common, "%s: eeprom data has to be provided externally\n",
+ __func__);
+ return false;
}
static struct ath_bus_ops ath_ahb_bus_ops = {
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index 53d7445a5..61a9b8504 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -476,6 +476,7 @@ static void ar9002_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable)
static void ar9002_hw_spectral_scan_config(struct ath_hw *ah,
struct ath_spec_scan *param)
{
+ u32 repeat_bit;
u8 count;
if (!param->enabled) {
@@ -486,12 +487,15 @@ static void ar9002_hw_spectral_scan_config(struct ath_hw *ah,
REG_SET_BIT(ah, AR_PHY_RADAR_0, AR_PHY_RADAR_0_FFT_ENA);
REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN, AR_PHY_SPECTRAL_SCAN_ENABLE);
+ if (AR_SREV_9280(ah))
+ repeat_bit = AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT;
+ else
+ repeat_bit = AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT_KIWI;
+
if (param->short_repeat)
- REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN,
- AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT);
+ REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN, repeat_bit);
else
- REG_CLR_BIT(ah, AR_PHY_SPECTRAL_SCAN,
- AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT);
+ REG_CLR_BIT(ah, AR_PHY_SPECTRAL_SCAN, repeat_bit);
/* on AR92xx, the highest bit of count will make the the chip send
* spectral samples endlessly. Check if this really was intended,
@@ -499,15 +503,25 @@ static void ar9002_hw_spectral_scan_config(struct ath_hw *ah,
*/
count = param->count;
if (param->endless) {
- if (AR_SREV_9271(ah))
- count = 0;
- else
+ if (AR_SREV_9280(ah))
count = 0x80;
+ else
+ count = 0;
} else if (count & 0x80)
count = 0x7f;
+ else if (!count)
+ count = 1;
+
+ if (AR_SREV_9280(ah)) {
+ REG_RMW_FIELD(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_COUNT, count);
+ } else {
+ REG_RMW_FIELD(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_COUNT_KIWI, count);
+ REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN,
+ AR_PHY_SPECTRAL_SCAN_PHYERR_MASK_SELECT);
+ }
- REG_RMW_FIELD(ah, AR_PHY_SPECTRAL_SCAN,
- AR_PHY_SPECTRAL_SCAN_COUNT, count);
REG_RMW_FIELD(ah, AR_PHY_SPECTRAL_SCAN,
AR_PHY_SPECTRAL_SCAN_PERIOD, param->period);
REG_RMW_FIELD(ah, AR_PHY_SPECTRAL_SCAN,
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.h b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
index 9d17a5375..2b58245f7 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
@@ -177,8 +177,11 @@
#define AR_PHY_SPECTRAL_SCAN_PERIOD_S 8
#define AR_PHY_SPECTRAL_SCAN_COUNT 0x00FF0000 /* Number of reports, reg 68, bits 16-23*/
#define AR_PHY_SPECTRAL_SCAN_COUNT_S 16
+#define AR_PHY_SPECTRAL_SCAN_COUNT_KIWI 0x0FFF0000 /* Number of reports, reg 68, bits 16-27*/
+#define AR_PHY_SPECTRAL_SCAN_COUNT_KIWI_S 16
#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT 0x01000000 /* Short repeat, reg 68, bit 24*/
-#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT_S 24 /* Short repeat, reg 68, bit 24*/
+#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT_KIWI 0x10000000 /* Short repeat, reg 68, bit 28*/
+#define AR_PHY_SPECTRAL_SCAN_PHYERR_MASK_SELECT 0x40000000
#define AR_PHY_RX_DELAY 0x9914
#define AR_PHY_SEARCH_START_DELAY 0x9918
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 518e649ec..b6f064a8d 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -33,6 +33,7 @@ struct coeff {
enum ar9003_cal_types {
IQ_MISMATCH_CAL = BIT(0),
+ TEMP_COMP_CAL = BIT(1),
};
static void ar9003_hw_setup_calibration(struct ath_hw *ah,
@@ -58,6 +59,12 @@ static void ar9003_hw_setup_calibration(struct ath_hw *ah,
/* Kick-off cal */
REG_SET_BIT(ah, AR_PHY_TIMING4, AR_PHY_TIMING4_DO_CAL);
break;
+ case TEMP_COMP_CAL:
+ ath_dbg(common, CALIBRATE,
+ "starting Temperature Compensation Calibration\n");
+ REG_SET_BIT(ah, AR_CH0_THERM, AR_CH0_THERM_LOCAL);
+ REG_SET_BIT(ah, AR_CH0_THERM, AR_CH0_THERM_START);
+ break;
default:
ath_err(common, "Invalid calibration type\n");
break;
@@ -75,50 +82,51 @@ static bool ar9003_hw_per_calibration(struct ath_hw *ah,
struct ath9k_cal_list *currCal)
{
struct ath9k_hw_cal_data *caldata = ah->caldata;
- /* Cal is assumed not done until explicitly set below */
- bool iscaldone = false;
+ const struct ath9k_percal_data *cur_caldata = currCal->calData;
/* Calibration in progress. */
if (currCal->calState == CAL_RUNNING) {
/* Check to see if it has finished. */
- if (!(REG_READ(ah, AR_PHY_TIMING4) & AR_PHY_TIMING4_DO_CAL)) {
+ if (REG_READ(ah, AR_PHY_TIMING4) & AR_PHY_TIMING4_DO_CAL)
+ return false;
+
+ /*
+ * Accumulate cal measures for active chains
+ */
+ if (cur_caldata->calCollect)
+ cur_caldata->calCollect(ah);
+ ah->cal_samples++;
+
+ if (ah->cal_samples >= cur_caldata->calNumSamples) {
+ unsigned int i, numChains = 0;
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ if (rxchainmask & (1 << i))
+ numChains++;
+ }
+
/*
- * Accumulate cal measures for active chains
+ * Process accumulated data
*/
- currCal->calData->calCollect(ah);
- ah->cal_samples++;
-
- if (ah->cal_samples >=
- currCal->calData->calNumSamples) {
- unsigned int i, numChains = 0;
- for (i = 0; i < AR9300_MAX_CHAINS; i++) {
- if (rxchainmask & (1 << i))
- numChains++;
- }
-
- /*
- * Process accumulated data
- */
- currCal->calData->calPostProc(ah, numChains);
+ if (cur_caldata->calPostProc)
+ cur_caldata->calPostProc(ah, numChains);
- /* Calibration has finished. */
- caldata->CalValid |= currCal->calData->calType;
- currCal->calState = CAL_DONE;
- iscaldone = true;
- } else {
+ /* Calibration has finished. */
+ caldata->CalValid |= cur_caldata->calType;
+ currCal->calState = CAL_DONE;
+ return true;
+ } else {
/*
* Set-up collection of another sub-sample until we
* get desired number
*/
ar9003_hw_setup_calibration(ah, currCal);
- }
}
- } else if (!(caldata->CalValid & currCal->calData->calType)) {
+ } else if (!(caldata->CalValid & cur_caldata->calType)) {
/* If current cal is marked invalid in channel, kick it off */
ath9k_hw_reset_calibration(ah, currCal);
}
- return iscaldone;
+ return false;
}
static int ar9003_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
@@ -315,9 +323,16 @@ static const struct ath9k_percal_data iq_cal_single_sample = {
ar9003_hw_iqcalibrate
};
+static const struct ath9k_percal_data temp_cal_single_sample = {
+ TEMP_COMP_CAL,
+ MIN_CAL_SAMPLES,
+ PER_MAX_LOG_COUNT,
+};
+
static void ar9003_hw_init_cal_settings(struct ath_hw *ah)
{
ah->iq_caldata.calData = &iq_cal_single_sample;
+ ah->temp_caldata.calData = &temp_cal_single_sample;
if (AR_SREV_9300_20_OR_LATER(ah)) {
ah->enabled_cals |= TX_IQ_CAL;
@@ -325,7 +340,7 @@ static void ar9003_hw_init_cal_settings(struct ath_hw *ah)
ah->enabled_cals |= TX_IQ_ON_AGC_CAL;
}
- ah->supp_cals = IQ_MISMATCH_CAL;
+ ah->supp_cals = IQ_MISMATCH_CAL | TEMP_COMP_CAL;
}
#define OFF_UPPER_LT 24
@@ -1374,6 +1389,29 @@ static void ar9003_hw_cl_cal_post_proc(struct ath_hw *ah, bool is_reusable)
}
}
+static void ar9003_hw_init_cal_common(struct ath_hw *ah)
+{
+ struct ath9k_hw_cal_data *caldata = ah->caldata;
+
+ /* Initialize list pointers */
+ ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
+
+ INIT_CAL(&ah->iq_caldata);
+ INSERT_CAL(ah, &ah->iq_caldata);
+
+ INIT_CAL(&ah->temp_caldata);
+ INSERT_CAL(ah, &ah->temp_caldata);
+
+ /* Initialize current pointer to first element in list */
+ ah->cal_list_curr = ah->cal_list;
+
+ if (ah->cal_list_curr)
+ ath9k_hw_reset_calibration(ah, ah->cal_list_curr);
+
+ if (caldata)
+ caldata->CalValid = 0;
+}
+
static bool ar9003_hw_init_cal_pcoem(struct ath_hw *ah,
struct ath9k_channel *chan)
{
@@ -1533,21 +1571,7 @@ skip_tx_iqcal:
/* Revert chainmask to runtime parameters */
ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
- /* Initialize list pointers */
- ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
-
- INIT_CAL(&ah->iq_caldata);
- INSERT_CAL(ah, &ah->iq_caldata);
- ath_dbg(common, CALIBRATE, "enabling IQ Calibration\n");
-
- /* Initialize current pointer to first element in list */
- ah->cal_list_curr = ah->cal_list;
-
- if (ah->cal_list_curr)
- ath9k_hw_reset_calibration(ah, ah->cal_list_curr);
-
- if (caldata)
- caldata->CalValid = 0;
+ ar9003_hw_init_cal_common(ah);
return true;
}
@@ -1578,8 +1602,6 @@ static bool do_ar9003_agc_cal(struct ath_hw *ah)
static bool ar9003_hw_init_cal_soc(struct ath_hw *ah,
struct ath9k_channel *chan)
{
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath9k_hw_cal_data *caldata = ah->caldata;
bool txiqcal_done = false;
bool status = true;
bool run_agc_cal = false, sep_iq_cal = false;
@@ -1677,21 +1699,7 @@ skip_tx_iqcal:
/* Revert chainmask to runtime parameters */
ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
- /* Initialize list pointers */
- ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
-
- INIT_CAL(&ah->iq_caldata);
- INSERT_CAL(ah, &ah->iq_caldata);
- ath_dbg(common, CALIBRATE, "enabling IQ Calibration\n");
-
- /* Initialize current pointer to first element in list */
- ah->cal_list_curr = ah->cal_list;
-
- if (ah->cal_list_curr)
- ath9k_hw_reset_calibration(ah, ah->cal_list_curr);
-
- if (caldata)
- caldata->CalValid = 0;
+ ar9003_hw_init_cal_common(ah);
return true;
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index dec1a317a..5bd2cbaf5 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3202,8 +3202,7 @@ static int ar9300_compress_decision(struct ath_hw *ah,
it, length);
break;
case _CompressBlock:
- if (reference == 0) {
- } else {
+ if (reference != 0) {
eep = ar9003_eeprom_struct_find_by_id(reference);
if (eep == NULL) {
ath_dbg(common, EEPROM,
@@ -4176,7 +4175,7 @@ static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
if (!AR_SREV_9330(ah) && !AR_SREV_9340(ah) && !AR_SREV_9531(ah))
ar9003_hw_internal_regulator_apply(ah);
ar9003_hw_apply_tuning_caps(ah);
- ar9003_hw_apply_minccapwr_thresh(ah, chan);
+ ar9003_hw_apply_minccapwr_thresh(ah, is2ghz);
ar9003_hw_txend_to_xpa_off_apply(ah, is2ghz);
ar9003_hw_thermometer_apply(ah);
ar9003_hw_thermo_cal_apply(ah);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 566da789f..a171dbb29 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -689,13 +689,6 @@
#define AR_CH0_TOP_XPABIASLVL (AR_SREV_9550(ah) ? 0x3c0 : 0x300)
#define AR_CH0_TOP_XPABIASLVL_S (AR_SREV_9550(ah) ? 6 : 8)
-#define AR_CH0_THERM (AR_SREV_9300(ah) ? 0x16290 : \
- ((AR_SREV_9485(ah) ? 0x1628c : 0x16294)))
-#define AR_CH0_THERM_XPABIASLVL_MSB 0x3
-#define AR_CH0_THERM_XPABIASLVL_MSB_S 0
-#define AR_CH0_THERM_XPASHORT2GND 0x4
-#define AR_CH0_THERM_XPASHORT2GND_S 2
-
#define AR_SWITCH_TABLE_COM_ALL (0xffff)
#define AR_SWITCH_TABLE_COM_ALL_S (0)
#define AR_SWITCH_TABLE_COM_AR9462_ALL (0xffffff)
@@ -712,15 +705,17 @@
#define AR_SWITCH_TABLE_ALL (0xfff)
#define AR_SWITCH_TABLE_ALL_S (0)
-#define AR_PHY_65NM_CH0_THERM (AR_SREV_9300(ah) ? 0x16290 :\
- ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16294 : 0x1628c))
+#define AR_CH0_THERM (AR_SREV_9300(ah) ? 0x16290 :\
+ ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16294 : 0x1628c))
+#define AR_CH0_THERM_XPABIASLVL_MSB 0x3
+#define AR_CH0_THERM_XPABIASLVL_MSB_S 0
+#define AR_CH0_THERM_XPASHORT2GND 0x4
+#define AR_CH0_THERM_XPASHORT2GND_S 2
-#define AR_PHY_65NM_CH0_THERM_LOCAL 0x80000000
-#define AR_PHY_65NM_CH0_THERM_LOCAL_S 31
-#define AR_PHY_65NM_CH0_THERM_START 0x20000000
-#define AR_PHY_65NM_CH0_THERM_START_S 29
-#define AR_PHY_65NM_CH0_THERM_SAR_ADC_OUT 0x0000ff00
-#define AR_PHY_65NM_CH0_THERM_SAR_ADC_OUT_S 8
+#define AR_CH0_THERM_LOCAL 0x80000000
+#define AR_CH0_THERM_START 0x20000000
+#define AR_CH0_THERM_SAR_ADC_OUT 0x0000ff00
+#define AR_CH0_THERM_SAR_ADC_OUT_S 8
#define AR_CH0_TOP2 (AR_SREV_9300(ah) ? 0x1628c : \
(AR_SREV_9462(ah) ? 0x16290 : 0x16284))
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 93b3793cc..26fc8ecfe 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -637,6 +637,8 @@ struct ath9k_vif_iter_data {
int nwds; /* number of WDS vifs */
int nadhocs; /* number of adhoc vifs */
int nocbs; /* number of OCB vifs */
+ int nbcnvifs; /* number of beaconing vifs */
+ struct ieee80211_vif *primary_beacon_vif;
struct ieee80211_vif *primary_sta;
};
@@ -685,10 +687,11 @@ struct ath_beacon {
};
void ath9k_beacon_tasklet(unsigned long data);
-void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
- u32 changed);
+void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *main_vif,
+ bool beacons);
void ath9k_beacon_assign_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
+void ath9k_beacon_ensure_primary_slot(struct ath_softc *sc);
void ath9k_set_beacon(struct ath_softc *sc);
bool ath9k_csa_is_finished(struct ath_softc *sc, struct ieee80211_vif *vif);
void ath9k_csa_update(struct ath_softc *sc);
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 5cf0cd7cb..e36f947e1 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -50,7 +50,7 @@ static void ath9k_beaconq_config(struct ath_softc *sc)
txq = sc->tx.txq_map[IEEE80211_AC_BE];
ath9k_hw_get_txq_props(ah, txq->axq_qnum, &qi_be);
qi.tqi_aifs = qi_be.tqi_aifs;
- if (ah->slottime == ATH9K_SLOT_TIME_20)
+ if (ah->slottime == 20)
qi.tqi_cwmin = 2*qi_be.tqi_cwmin;
else
qi.tqi_cwmin = 4*qi_be.tqi_cwmin;
@@ -209,7 +209,6 @@ void ath9k_beacon_assign_slot(struct ath_softc *sc, struct ieee80211_vif *vif)
}
sc->beacon.bslot[avp->av_bslot] = vif;
- sc->nbcnvifs++;
ath_dbg(common, CONFIG, "Added interface at beacon slot: %d\n",
avp->av_bslot);
@@ -220,15 +219,12 @@ void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif)
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_vif *avp = (void *)vif->drv_priv;
struct ath_buf *bf = avp->av_bcbuf;
- struct ath_beacon_config *cur_conf = &sc->cur_chan->beacon;
ath_dbg(common, CONFIG, "Removing interface at beacon slot: %d\n",
avp->av_bslot);
tasklet_disable(&sc->bcon_tasklet);
- cur_conf->enable_beacon &= ~BIT(avp->av_bslot);
-
if (bf && bf->bf_mpdu) {
struct sk_buff *skb = bf->bf_mpdu;
dma_unmap_single(sc->dev, bf->bf_buf_addr,
@@ -240,12 +236,73 @@ void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif)
avp->av_bcbuf = NULL;
sc->beacon.bslot[avp->av_bslot] = NULL;
- sc->nbcnvifs--;
list_add_tail(&bf->list, &sc->beacon.bbuf);
tasklet_enable(&sc->bcon_tasklet);
}
+void ath9k_beacon_ensure_primary_slot(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ieee80211_vif *vif;
+ struct ath_vif *avp;
+ s64 tsfadjust;
+ u32 offset;
+ int first_slot = ATH_BCBUF;
+ int slot;
+
+ tasklet_disable(&sc->bcon_tasklet);
+
+ /* Find first taken slot. */
+ for (slot = 0; slot < ATH_BCBUF; slot++) {
+ if (sc->beacon.bslot[slot]) {
+ first_slot = slot;
+ break;
+ }
+ }
+ if (first_slot == 0)
+ goto out;
+
+ /* Re-enumarate all slots, moving them forward. */
+ for (slot = 0; slot < ATH_BCBUF; slot++) {
+ if (slot + first_slot < ATH_BCBUF) {
+ vif = sc->beacon.bslot[slot + first_slot];
+ sc->beacon.bslot[slot] = vif;
+
+ if (vif) {
+ avp = (void *)vif->drv_priv;
+ avp->av_bslot = slot;
+ }
+ } else {
+ sc->beacon.bslot[slot] = NULL;
+ }
+ }
+
+ vif = sc->beacon.bslot[0];
+ if (WARN_ON(!vif))
+ goto out;
+
+ /* Get the tsf_adjust value for the new first slot. */
+ avp = (void *)vif->drv_priv;
+ tsfadjust = le64_to_cpu(avp->tsf_adjust);
+
+ ath_dbg(common, CONFIG,
+ "Adjusting global TSF after beacon slot reassignment: %lld\n",
+ (signed long long)tsfadjust);
+
+ /* Modify TSF as required and update the HW. */
+ avp->chanctx->tsf_val += tsfadjust;
+ if (sc->cur_chan == avp->chanctx) {
+ offset = ath9k_hw_get_tsf_offset(&avp->chanctx->tsf_ts, NULL);
+ ath9k_hw_settsf64(sc->sc_ah, avp->chanctx->tsf_val + offset);
+ }
+
+ /* The slots tsf_adjust will be updated by ath9k_beacon_config later. */
+
+out:
+ tasklet_enable(&sc->bcon_tasklet);
+}
+
static int ath9k_beacon_choose_slot(struct ath_softc *sc)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -274,22 +331,33 @@ static int ath9k_beacon_choose_slot(struct ath_softc *sc)
return slot;
}
-static void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif)
+static void ath9k_set_tsfadjust(struct ath_softc *sc,
+ struct ath_beacon_config *cur_conf)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath_vif *avp = (void *)vif->drv_priv;
- struct ath_beacon_config *cur_conf = &avp->chanctx->beacon;
- u32 tsfadjust;
+ s64 tsfadjust;
+ int slot;
- if (avp->av_bslot == 0)
- return;
+ for (slot = 0; slot < ATH_BCBUF; slot++) {
+ struct ath_vif *avp;
- tsfadjust = cur_conf->beacon_interval * avp->av_bslot;
- tsfadjust = TU_TO_USEC(tsfadjust) / ATH_BCBUF;
- avp->tsf_adjust = cpu_to_le64(tsfadjust);
+ if (!sc->beacon.bslot[slot])
+ continue;
- ath_dbg(common, CONFIG, "tsfadjust is: %llu for bslot: %d\n",
- (unsigned long long)tsfadjust, avp->av_bslot);
+ avp = (void *)sc->beacon.bslot[slot]->drv_priv;
+
+ /* tsf_adjust is added to the TSF value. We send out the
+ * beacon late, so need to adjust the TSF starting point to be
+ * later in time (i.e. the theoretical first beacon has a TSF
+ * of 0 after correction).
+ */
+ tsfadjust = cur_conf->beacon_interval * avp->av_bslot;
+ tsfadjust = -TU_TO_USEC(tsfadjust) / ATH_BCBUF;
+ avp->tsf_adjust = cpu_to_le64(tsfadjust);
+
+ ath_dbg(common, CONFIG, "tsfadjust is: %lld for bslot: %d\n",
+ (signed long long)tsfadjust, avp->av_bslot);
+ }
}
bool ath9k_csa_is_finished(struct ath_softc *sc, struct ieee80211_vif *vif)
@@ -443,20 +511,28 @@ void ath9k_beacon_tasklet(unsigned long data)
* Both nexttbtt and intval have to be in usecs.
*/
static void ath9k_beacon_init(struct ath_softc *sc, u32 nexttbtt,
- u32 intval, bool reset_tsf)
+ u32 intval)
{
struct ath_hw *ah = sc->sc_ah;
ath9k_hw_disable_interrupts(ah);
- if (reset_tsf)
- ath9k_hw_reset_tsf(ah);
ath9k_beaconq_config(sc);
ath9k_hw_beaconinit(ah, nexttbtt, intval);
+ ah->imask |= ATH9K_INT_SWBA;
sc->beacon.bmisscnt = 0;
ath9k_hw_set_interrupts(ah);
ath9k_hw_enable_interrupts(ah);
}
+static void ath9k_beacon_stop(struct ath_softc *sc)
+{
+ ath9k_hw_disable_interrupts(sc->sc_ah);
+ sc->sc_ah->imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
+ sc->beacon.bmisscnt = 0;
+ ath9k_hw_set_interrupts(sc->sc_ah);
+ ath9k_hw_enable_interrupts(sc->sc_ah);
+}
+
/*
* For multi-bss ap support beacons are either staggered evenly over N slots or
* burst together. For the former arrange for the SWBA to be delivered for each
@@ -468,7 +544,7 @@ static void ath9k_beacon_config_ap(struct ath_softc *sc,
struct ath_hw *ah = sc->sc_ah;
ath9k_cmn_beacon_config_ap(ah, conf, ATH_BCBUF);
- ath9k_beacon_init(sc, conf->nexttbtt, conf->intval, false);
+ ath9k_beacon_init(sc, conf->nexttbtt, conf->intval);
}
static void ath9k_beacon_config_sta(struct ath_hw *ah,
@@ -497,7 +573,7 @@ static void ath9k_beacon_config_adhoc(struct ath_softc *sc,
ath9k_cmn_beacon_config_adhoc(ah, conf);
- ath9k_beacon_init(sc, conf->nexttbtt, conf->intval, conf->ibss_creator);
+ ath9k_beacon_init(sc, conf->nexttbtt, conf->intval);
/*
* Set the global 'beacon has been configured' flag for the
@@ -507,44 +583,6 @@ static void ath9k_beacon_config_adhoc(struct ath_softc *sc,
set_bit(ATH_OP_BEACONS, &common->op_flags);
}
-static bool ath9k_allow_beacon_config(struct ath_softc *sc,
- struct ieee80211_vif *vif)
-{
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath_vif *avp = (void *)vif->drv_priv;
-
- if (ath9k_is_chanctx_enabled()) {
- /*
- * If the VIF is not present in the current channel context,
- * then we can't do the usual opmode checks. Allow the
- * beacon config for the VIF to be updated in this case and
- * return immediately.
- */
- if (sc->cur_chan != avp->chanctx)
- return true;
- }
-
- if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) {
- if (vif->type != NL80211_IFTYPE_AP) {
- ath_dbg(common, CONFIG,
- "An AP interface is already present !\n");
- return false;
- }
- }
-
- if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) {
- if ((vif->type == NL80211_IFTYPE_STATION) &&
- test_bit(ATH_OP_BEACONS, &common->op_flags) &&
- vif != sc->cur_chan->primary_sta) {
- ath_dbg(common, CONFIG,
- "Beacon already configured for a station interface\n");
- return false;
- }
- }
-
- return true;
-}
-
static void ath9k_cache_beacon_config(struct ath_softc *sc,
struct ath_chanctx *ctx,
struct ieee80211_bss_conf *bss_conf)
@@ -580,87 +618,79 @@ static void ath9k_cache_beacon_config(struct ath_softc *sc,
if (cur_conf->dtim_period == 0)
cur_conf->dtim_period = 1;
+ ath9k_set_tsfadjust(sc, cur_conf);
}
-void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
- u32 changed)
+void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *main_vif,
+ bool beacons)
{
- struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath_vif *avp = (void *)vif->drv_priv;
- struct ath_chanctx *ctx = avp->chanctx;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_vif *avp;
+ struct ath_chanctx *ctx;
struct ath_beacon_config *cur_conf;
unsigned long flags;
+ bool enabled;
bool skip_beacon = false;
- if (!ctx)
+ if (!beacons) {
+ clear_bit(ATH_OP_BEACONS, &common->op_flags);
+ ath9k_beacon_stop(sc);
return;
+ }
- cur_conf = &avp->chanctx->beacon;
- if (vif->type == NL80211_IFTYPE_AP)
- ath9k_set_tsfadjust(sc, vif);
-
- if (!ath9k_allow_beacon_config(sc, vif))
+ if (WARN_ON(!main_vif))
return;
- if (vif->type == NL80211_IFTYPE_STATION) {
- ath9k_cache_beacon_config(sc, ctx, bss_conf);
- if (ctx != sc->cur_chan)
- return;
+ avp = (void *)main_vif->drv_priv;
+ ctx = avp->chanctx;
+ cur_conf = &ctx->beacon;
+ enabled = cur_conf->enable_beacon;
+ cur_conf->enable_beacon = beacons;
+
+ if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) {
+ ath9k_cache_beacon_config(sc, ctx, &main_vif->bss_conf);
ath9k_set_beacon(sc);
set_bit(ATH_OP_BEACONS, &common->op_flags);
return;
}
- /*
- * Take care of multiple interfaces when
- * enabling/disabling SWBA.
- */
- if (changed & BSS_CHANGED_BEACON_ENABLED) {
- bool enabled = cur_conf->enable_beacon;
-
- if (!bss_conf->enable_beacon) {
- cur_conf->enable_beacon &= ~BIT(avp->av_bslot);
- } else {
- cur_conf->enable_beacon |= BIT(avp->av_bslot);
- if (!enabled)
- ath9k_cache_beacon_config(sc, ctx, bss_conf);
- }
- }
-
- if (ctx != sc->cur_chan)
- return;
+ /* Update the beacon configuration. */
+ ath9k_cache_beacon_config(sc, ctx, &main_vif->bss_conf);
/*
* Configure the HW beacon registers only when we have a valid
* beacon interval.
*/
if (cur_conf->beacon_interval) {
- /*
- * If we are joining an existing IBSS network, start beaconing
- * only after a TSF-sync has taken place. Ensure that this
- * happens by setting the appropriate flags.
+ /* Special case to sync the TSF when joining an existing IBSS.
+ * This is only done if no AP interface is active.
+ * Note that mac80211 always resets the TSF when creating a new
+ * IBSS interface.
*/
- if ((changed & BSS_CHANGED_IBSS) && !bss_conf->ibss_creator &&
- bss_conf->enable_beacon) {
+ if (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC &&
+ !enabled && beacons && !main_vif->bss_conf.ibss_creator) {
spin_lock_irqsave(&sc->sc_pm_lock, flags);
sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
skip_beacon = true;
- } else {
- ath9k_set_beacon(sc);
}
/*
* Do not set the ATH_OP_BEACONS flag for IBSS joiner mode
* here, it is done in ath9k_beacon_config_adhoc().
*/
- if (cur_conf->enable_beacon && !skip_beacon)
+ if (beacons && !skip_beacon) {
set_bit(ATH_OP_BEACONS, &common->op_flags);
- else
+ ath9k_set_beacon(sc);
+ } else {
clear_bit(ATH_OP_BEACONS, &common->op_flags);
+ ath9k_beacon_stop(sc);
+ }
+ } else {
+ clear_bit(ATH_OP_BEACONS, &common->op_flags);
+ ath9k_beacon_stop(sc);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c
index e56bafcf5..57e26a640 100644
--- a/drivers/net/wireless/ath/ath9k/channel.c
+++ b/drivers/net/wireless/ath/ath9k/channel.c
@@ -960,6 +960,9 @@ void ath_roc_complete(struct ath_softc *sc, enum ath_roc_complete_reason reason)
void ath_scan_complete(struct ath_softc *sc, bool abort)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct cfg80211_scan_info info = {
+ .aborted = abort,
+ };
if (abort)
ath_dbg(common, CHAN_CTX, "HW scan aborted\n");
@@ -969,7 +972,7 @@ void ath_scan_complete(struct ath_softc *sc, bool abort)
sc->offchannel.scan_req = NULL;
sc->offchannel.scan_vif = NULL;
sc->offchannel.state = ATH_OFFCHANNEL_IDLE;
- ieee80211_scan_completed(sc->hw, abort);
+ ieee80211_scan_completed(sc->hw, &info);
clear_bit(ATH_OP_SCANNING, &common->op_flags);
spin_lock_bh(&sc->chan_lock);
if (test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags))
diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c
index a8762711a..e2512d5bc 100644
--- a/drivers/net/wireless/ath/ath9k/common-spectral.c
+++ b/drivers/net/wireless/ath/ath9k/common-spectral.c
@@ -731,7 +731,7 @@ void ath9k_cmn_spectral_scan_trigger(struct ath_common *common,
struct ath_hw *ah = spec_priv->ah;
u32 rxfilter;
- if (config_enabled(CONFIG_ATH9K_TX99))
+ if (IS_ENABLED(CONFIG_ATH9K_TX99))
return;
if (!ath9k_hw_ops(ah)->spectral_scan_trigger) {
@@ -806,7 +806,7 @@ static ssize_t write_file_spec_scan_ctl(struct file *file,
char buf[32];
ssize_t len;
- if (config_enabled(CONFIG_ATH9K_TX99))
+ if (IS_ENABLED(CONFIG_ATH9K_TX99))
return -EOPNOTSUPP;
len = min(count, sizeof(buf) - 1);
@@ -1072,7 +1072,7 @@ static struct rchan_callbacks rfs_spec_scan_cb = {
void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv)
{
- if (config_enabled(CONFIG_ATH9K_DEBUGFS)) {
+ if (IS_ENABLED(CONFIG_ATH9K_DEBUGFS)) {
relay_close(spec_priv->rfs_chan_spec_scan);
spec_priv->rfs_chan_spec_scan = NULL;
}
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index d23737342..f0ab6f995 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -50,6 +50,7 @@
#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
struct ath_beacon_config {
+ struct ieee80211_vif *main_vif;
int beacon_interval;
u16 dtim_period;
u16 bmiss_timeout;
diff --git a/drivers/net/wireless/ath/ath9k/dynack.c b/drivers/net/wireless/ath/ath9k/dynack.c
index d2ff0fc04..7334c9b09 100644
--- a/drivers/net/wireless/ath/ath9k/dynack.c
+++ b/drivers/net/wireless/ath/ath9k/dynack.c
@@ -280,7 +280,7 @@ EXPORT_SYMBOL(ath_dynack_sample_ack_ts);
void ath_dynack_node_init(struct ath_hw *ah, struct ath_node *an)
{
/* ackto = slottime + sifs + air delay */
- u32 ackto = ATH9K_SLOT_TIME_9 + 16 + 64;
+ u32 ackto = 9 + 16 + 64;
struct ath_dynack *da = &ah->dynack;
an->ackto = ackto;
@@ -315,7 +315,7 @@ EXPORT_SYMBOL(ath_dynack_node_deinit);
void ath_dynack_reset(struct ath_hw *ah)
{
/* ackto = slottime + sifs + air delay */
- u32 ackto = ATH9K_SLOT_TIME_9 + 16 + 64;
+ u32 ackto = 9 + 16 + 64;
struct ath_dynack *da = &ah->dynack;
da->lto = jiffies;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index a794157a1..a449588a8 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -15,6 +15,7 @@
*/
#include "hw.h"
+#include <linux/ath9k_platform.h>
void ath9k_hw_analog_shift_regwrite(struct ath_hw *ah, u32 reg, u32 val)
{
@@ -108,26 +109,42 @@ void ath9k_hw_usb_gen_fill_eeprom(struct ath_hw *ah, u16 *eep_data,
}
}
-static bool ath9k_hw_nvram_read_blob(struct ath_hw *ah, u32 off,
- u16 *data)
+static bool ath9k_hw_nvram_read_array(u16 *blob, size_t blob_size,
+ off_t offset, u16 *data)
{
- u16 *blob_data;
-
- if (off * sizeof(u16) > ah->eeprom_blob->size)
+ if (offset > blob_size)
return false;
- blob_data = (u16 *)ah->eeprom_blob->data;
- *data = blob_data[off];
+ *data = blob[offset];
return true;
}
+static bool ath9k_hw_nvram_read_pdata(struct ath9k_platform_data *pdata,
+ off_t offset, u16 *data)
+{
+ return ath9k_hw_nvram_read_array(pdata->eeprom_data,
+ ARRAY_SIZE(pdata->eeprom_data),
+ offset, data);
+}
+
+static bool ath9k_hw_nvram_read_firmware(const struct firmware *eeprom_blob,
+ off_t offset, u16 *data)
+{
+ return ath9k_hw_nvram_read_array((u16 *) eeprom_blob->data,
+ eeprom_blob->size / sizeof(u16),
+ offset, data);
+}
+
bool ath9k_hw_nvram_read(struct ath_hw *ah, u32 off, u16 *data)
{
struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_platform_data *pdata = ah->dev->platform_data;
bool ret;
if (ah->eeprom_blob)
- ret = ath9k_hw_nvram_read_blob(ah, off, data);
+ ret = ath9k_hw_nvram_read_firmware(ah->eeprom_blob, off, data);
+ else if (pdata && !pdata->use_eeprom && pdata->eeprom_data)
+ ret = ath9k_hw_nvram_read_pdata(pdata, off, data);
else
ret = common->bus_ops->eeprom_read(common, off, data);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index e6bcb4c90..2c0e4d26e 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -45,7 +45,7 @@ void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv)
* Long slot time : 2x cwmin
* Short slot time : 4x cwmin
*/
- if (ah->slottime == ATH9K_SLOT_TIME_20)
+ if (ah->slottime == 20)
qi.tqi_cwmin = 2*qi_be.tqi_cwmin;
else
qi.tqi_cwmin = 4*qi_be.tqi_cwmin;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index c148c6c50..b65c1b661 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -678,7 +678,7 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
for (i = 0; i < ATH9K_HTC_MAX_BCN_VIF; i++)
priv->beacon.bslot[i] = NULL;
- priv->beacon.slottime = ATH9K_SLOT_TIME_9;
+ priv->beacon.slottime = 9;
ath9k_cmn_init_channels_rates(common);
ath9k_cmn_init_crypto(ah);
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 8b2895f9a..14b13f07c 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -454,7 +454,7 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
if (AR_SREV_9100(ah))
ah->sta_id1_defaults |= AR_STA_ID1_AR9100_BA_FIX;
- ah->slottime = ATH9K_SLOT_TIME_9;
+ ah->slottime = 9;
ah->globaltxtimeout = (u32) -1;
ah->power_mode = ATH9K_PM_UNDEFINED;
ah->htc_reset_init = true;
@@ -471,33 +471,34 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
ah->tx_trig_level = (AR_FTRIG_512B >> AR_FTRIG_S);
}
-static int ath9k_hw_init_macaddr(struct ath_hw *ah)
+static void ath9k_hw_init_macaddr(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
- u32 sum;
int i;
u16 eeval;
static const u32 EEP_MAC[] = { EEP_MAC_LSW, EEP_MAC_MID, EEP_MAC_MSW };
- sum = 0;
+ /* MAC address may already be loaded via ath9k_platform_data */
+ if (is_valid_ether_addr(common->macaddr))
+ return;
+
for (i = 0; i < 3; i++) {
eeval = ah->eep_ops->get_eeprom(ah, EEP_MAC[i]);
- sum += eeval;
common->macaddr[2 * i] = eeval >> 8;
common->macaddr[2 * i + 1] = eeval & 0xff;
}
- if (!is_valid_ether_addr(common->macaddr)) {
- ath_err(common,
- "eeprom contains invalid mac address: %pM\n",
- common->macaddr);
- random_ether_addr(common->macaddr);
- ath_err(common,
- "random mac address will be used: %pM\n",
- common->macaddr);
- }
+ if (is_valid_ether_addr(common->macaddr))
+ return;
- return 0;
+ ath_err(common, "eeprom contains invalid mac address: %pM\n",
+ common->macaddr);
+
+ random_ether_addr(common->macaddr);
+ ath_err(common, "random mac address will be used: %pM\n",
+ common->macaddr);
+
+ return;
}
static int ath9k_hw_post_init(struct ath_hw *ah)
@@ -636,12 +637,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
if (r)
return r;
- r = ath9k_hw_init_macaddr(ah);
- if (r) {
- ath_err(common, "Failed to initialize MAC address\n");
- return r;
- }
-
+ ath9k_hw_init_macaddr(ah);
ath9k_hw_init_hang_checks(ah);
common->state = ATH_HW_INITIALIZED;
@@ -1832,8 +1828,9 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
u32 saveLedState;
u32 saveDefAntenna;
u32 macStaId1;
+ struct timespec tsf_ts;
+ u32 tsf_offset;
u64 tsf = 0;
- s64 usec = 0;
int r;
bool start_mci_reset = false;
bool save_fullsleep = ah->chip_fullsleep;
@@ -1877,8 +1874,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B;
/* Save TSF before chip reset, a cold reset clears it */
+ getrawmonotonic(&tsf_ts);
tsf = ath9k_hw_gettsf64(ah);
- usec = ktime_to_us(ktime_get_raw());
saveLedState = REG_READ(ah, AR_CFG_LED) &
(AR_CFG_LED_ASSOC_CTL | AR_CFG_LED_MODE_SEL |
@@ -1911,8 +1908,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
}
/* Restore TSF */
- usec = ktime_to_us(ktime_get_raw()) - usec;
- ath9k_hw_settsf64(ah, tsf + usec);
+ tsf_offset = ath9k_hw_get_tsf_offset(&tsf_ts, NULL);
+ ath9k_hw_settsf64(ah, tsf + tsf_offset);
if (AR_SREV_9280_20_OR_LATER(ah))
REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE);
@@ -1932,12 +1929,11 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
/*
* Some AR91xx SoC devices frequently fail to accept TSF writes
* right after the chip reset. When that happens, write a new
- * value after the initvals have been applied, with an offset
- * based on measured time difference
+ * value after the initvals have been applied.
*/
if (AR_SREV_9100(ah) && (ath9k_hw_gettsf64(ah) < tsf)) {
- tsf += 1500;
- ath9k_hw_settsf64(ah, tsf);
+ tsf_offset = ath9k_hw_get_tsf_offset(&tsf_ts, NULL);
+ ath9k_hw_settsf64(ah, tsf + tsf_offset);
}
ath9k_hw_init_mfp(ah);
@@ -2486,6 +2482,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
return -EINVAL;
}
+ ath9k_gpio_cap_init(ah);
+
if (AR_SREV_9485(ah) ||
AR_SREV_9285(ah) ||
AR_SREV_9330(ah) ||
@@ -2535,8 +2533,6 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
else
pCap->hw_caps &= ~ATH9K_HW_CAP_HT;
- ath9k_gpio_cap_init(ah);
-
if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah))
pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX;
else
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 9cbca1229..2a5d3ad11 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -830,6 +830,7 @@ struct ath_hw {
/* Calibration */
u32 supp_cals;
struct ath9k_cal_list iq_caldata;
+ struct ath9k_cal_list temp_caldata;
struct ath9k_cal_list adcgain_caldata;
struct ath9k_cal_list adcdc_caldata;
struct ath9k_cal_list *cal_list;
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 2ee862475..cfa3fe82a 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -372,7 +372,7 @@ static void ath9k_init_misc(struct ath_softc *sc)
common->last_rssi = ATH_RSSI_DUMMY_MARKER;
memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
- sc->beacon.slottime = ATH9K_SLOT_TIME_9;
+ sc->beacon.slottime = 9;
for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++)
sc->beacon.bslot[i] = NULL;
@@ -512,31 +512,52 @@ static void ath9k_eeprom_release(struct ath_softc *sc)
release_firmware(sc->sc_ah->eeprom_blob);
}
-static int ath9k_init_soc_platform(struct ath_softc *sc)
+static int ath9k_init_platform(struct ath_softc *sc)
{
struct ath9k_platform_data *pdata = sc->dev->platform_data;
struct ath_hw *ah = sc->sc_ah;
- int ret = 0;
+ struct ath_common *common = ath9k_hw_common(ah);
+ int ret;
if (!pdata)
return 0;
+ if (!pdata->use_eeprom) {
+ ah->ah_flags &= ~AH_USE_EEPROM;
+ ah->gpio_mask = pdata->gpio_mask;
+ ah->gpio_val = pdata->gpio_val;
+ ah->led_pin = pdata->led_pin;
+ ah->is_clk_25mhz = pdata->is_clk_25mhz;
+ ah->get_mac_revision = pdata->get_mac_revision;
+ ah->external_reset = pdata->external_reset;
+ ah->disable_2ghz = pdata->disable_2ghz;
+ ah->disable_5ghz = pdata->disable_5ghz;
+
+ if (!pdata->endian_check)
+ ah->ah_flags |= AH_NO_EEP_SWAP;
+ }
+
if (pdata->eeprom_name) {
ret = ath9k_eeprom_request(sc, pdata->eeprom_name);
if (ret)
return ret;
}
+ if (pdata->led_active_high)
+ ah->config.led_active_high = true;
+
if (pdata->tx_gain_buffalo)
ah->config.tx_gain_buffalo = true;
- return ret;
+ if (pdata->macaddr)
+ ether_addr_copy(common->macaddr, pdata->macaddr);
+
+ return 0;
}
static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
const struct ath_bus_ops *bus_ops)
{
- struct ath9k_platform_data *pdata = sc->dev->platform_data;
struct ath_hw *ah = NULL;
struct ath9k_hw_capabilities *pCap;
struct ath_common *common;
@@ -550,6 +571,8 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
ah->dev = sc->dev;
ah->hw = sc->hw;
ah->hw_version.devid = devid;
+ ah->ah_flags |= AH_USE_EEPROM;
+ ah->led_pin = -1;
ah->reg_ops.read = ath9k_ioread32;
ah->reg_ops.multi_read = ath9k_multi_ioread32;
ah->reg_ops.write = ath9k_iowrite32;
@@ -569,22 +592,6 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
if (!ath9k_is_chanctx_enabled())
sc->cur_chan->hw_queue_base = 0;
- if (!pdata || pdata->use_eeprom) {
- ah->ah_flags |= AH_USE_EEPROM;
- sc->sc_ah->led_pin = -1;
- } else {
- sc->sc_ah->gpio_mask = pdata->gpio_mask;
- sc->sc_ah->gpio_val = pdata->gpio_val;
- sc->sc_ah->led_pin = pdata->led_pin;
- ah->is_clk_25mhz = pdata->is_clk_25mhz;
- ah->get_mac_revision = pdata->get_mac_revision;
- ah->external_reset = pdata->external_reset;
- ah->disable_2ghz = pdata->disable_2ghz;
- ah->disable_5ghz = pdata->disable_5ghz;
- if (!pdata->endian_check)
- ah->ah_flags |= AH_NO_EEP_SWAP;
- }
-
common->ops = &ah->reg_ops;
common->bus_ops = bus_ops;
common->ps_ops = &ath9k_ps_ops;
@@ -600,7 +607,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
*/
ath9k_init_pcoem_platform(sc);
- ret = ath9k_init_soc_platform(sc);
+ ret = ath9k_init_platform(sc);
if (ret)
return ret;
@@ -646,9 +653,6 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
if (ret)
goto err_hw;
- if (pdata && pdata->macaddr)
- memcpy(common->macaddr, pdata->macaddr, ETH_ALEN);
-
ret = ath9k_init_queues(sc);
if (ret)
goto err_queues;
@@ -839,7 +843,7 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
NL80211_FEATURE_P2P_GO_CTWIN;
- if (!config_enabled(CONFIG_ATH9K_TX99)) {
+ if (!IS_ENABLED(CONFIG_ATH9K_TX99)) {
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 7fbf7f965..3bab01435 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -65,10 +65,6 @@
#define INIT_SSH_RETRY 32
#define INIT_SLG_RETRY 32
-#define ATH9K_SLOT_TIME_6 6
-#define ATH9K_SLOT_TIME_9 9
-#define ATH9K_SLOT_TIME_20 20
-
#define ATH9K_TXERR_XRETRY 0x01
#define ATH9K_TXERR_FILT 0x02
#define ATH9K_TXERR_FIFO 0x04
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 4b59a4c15..7cb65c303 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -915,6 +915,22 @@ static bool ath9k_uses_beacons(int type)
}
}
+static void ath9k_vif_iter_set_beacon(struct ath9k_vif_iter_data *iter_data,
+ struct ieee80211_vif *vif)
+{
+ /* Use the first (configured) interface, but prefering AP interfaces. */
+ if (!iter_data->primary_beacon_vif) {
+ iter_data->primary_beacon_vif = vif;
+ } else {
+ if (iter_data->primary_beacon_vif->type != NL80211_IFTYPE_AP &&
+ vif->type == NL80211_IFTYPE_AP)
+ iter_data->primary_beacon_vif = vif;
+ }
+
+ iter_data->beacons = true;
+ iter_data->nbcnvifs += 1;
+}
+
static void ath9k_vif_iter(struct ath9k_vif_iter_data *iter_data,
u8 *mac, struct ieee80211_vif *vif)
{
@@ -931,11 +947,13 @@ static void ath9k_vif_iter(struct ath9k_vif_iter_data *iter_data,
}
if (!vif->bss_conf.use_short_slot)
- iter_data->slottime = ATH9K_SLOT_TIME_20;
+ iter_data->slottime = 20;
switch (vif->type) {
case NL80211_IFTYPE_AP:
iter_data->naps++;
+ if (vif->bss_conf.enable_beacon)
+ ath9k_vif_iter_set_beacon(iter_data, vif);
break;
case NL80211_IFTYPE_STATION:
iter_data->nstations++;
@@ -948,12 +966,12 @@ static void ath9k_vif_iter(struct ath9k_vif_iter_data *iter_data,
case NL80211_IFTYPE_ADHOC:
iter_data->nadhocs++;
if (vif->bss_conf.enable_beacon)
- iter_data->beacons = true;
+ ath9k_vif_iter_set_beacon(iter_data, vif);
break;
case NL80211_IFTYPE_MESH_POINT:
iter_data->nmeshes++;
if (vif->bss_conf.enable_beacon)
- iter_data->beacons = true;
+ ath9k_vif_iter_set_beacon(iter_data, vif);
break;
case NL80211_IFTYPE_WDS:
iter_data->nwds++;
@@ -1004,7 +1022,7 @@ void ath9k_calculate_iter_data(struct ath_softc *sc,
*/
memset(iter_data, 0, sizeof(*iter_data));
eth_broadcast_addr(iter_data->mask);
- iter_data->slottime = ATH9K_SLOT_TIME_9;
+ iter_data->slottime = 9;
list_for_each_entry(avp, &ctx->vifs, list)
ath9k_vif_iter(iter_data, avp->vif->addr, avp->vif);
@@ -1066,7 +1084,7 @@ static void ath9k_set_offchannel_state(struct ath_softc *sc)
ah->opmode = vif->type;
ah->imask &= ~ATH9K_INT_SWBA;
ah->imask &= ~ATH9K_INT_TSFOOR;
- ah->slottime = ATH9K_SLOT_TIME_9;
+ ah->slottime = 9;
ath_hw_setbssidmask(common);
ath9k_hw_setopmode(ah);
@@ -1086,7 +1104,6 @@ void ath9k_calculate_summary_state(struct ath_softc *sc,
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_vif_iter_data iter_data;
- struct ath_beacon_config *cur_conf;
ath_chanctx_check_active(sc, ctx);
@@ -1108,13 +1125,12 @@ void ath9k_calculate_summary_state(struct ath_softc *sc,
ath_hw_setbssidmask(common);
if (iter_data.naps > 0) {
- cur_conf = &ctx->beacon;
ath9k_hw_set_tsfadjust(ah, true);
ah->opmode = NL80211_IFTYPE_AP;
- if (cur_conf->enable_beacon)
- iter_data.beacons = true;
} else {
ath9k_hw_set_tsfadjust(ah, false);
+ if (iter_data.beacons)
+ ath9k_beacon_ensure_primary_slot(sc);
if (iter_data.nmeshes)
ah->opmode = NL80211_IFTYPE_MESH_POINT;
@@ -1139,11 +1155,11 @@ void ath9k_calculate_summary_state(struct ath_softc *sc,
ctx->switch_after_beacon = true;
}
- ah->imask &= ~ATH9K_INT_SWBA;
if (ah->opmode == NL80211_IFTYPE_STATION) {
bool changed = (iter_data.primary_sta != ctx->primary_sta);
if (iter_data.primary_sta) {
+ iter_data.primary_beacon_vif = iter_data.primary_sta;
iter_data.beacons = true;
ath9k_set_assoc_state(sc, iter_data.primary_sta,
changed);
@@ -1156,16 +1172,12 @@ void ath9k_calculate_summary_state(struct ath_softc *sc,
if (ath9k_hw_mci_is_enabled(sc->sc_ah))
ath9k_mci_update_wlan_channels(sc, true);
}
- } else if (iter_data.beacons) {
- ah->imask |= ATH9K_INT_SWBA;
}
+ sc->nbcnvifs = iter_data.nbcnvifs;
+ ath9k_beacon_config(sc, iter_data.primary_beacon_vif,
+ iter_data.beacons);
ath9k_hw_set_interrupts(ah);
- if (iter_data.beacons)
- set_bit(ATH_OP_BEACONS, &common->op_flags);
- else
- clear_bit(ATH_OP_BEACONS, &common->op_flags);
-
if (ah->slottime != iter_data.slottime) {
ah->slottime = iter_data.slottime;
ath9k_hw_init_global_settings(ah);
@@ -1244,7 +1256,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
mutex_lock(&sc->mutex);
- if (config_enabled(CONFIG_ATH9K_TX99)) {
+ if (IS_ENABLED(CONFIG_ATH9K_TX99)) {
if (sc->cur_chan->nvifs >= 1) {
mutex_unlock(&sc->mutex);
return -EOPNOTSUPP;
@@ -1294,7 +1306,7 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
mutex_lock(&sc->mutex);
- if (config_enabled(CONFIG_ATH9K_TX99)) {
+ if (IS_ENABLED(CONFIG_ATH9K_TX99)) {
mutex_unlock(&sc->mutex);
return -EOPNOTSUPP;
}
@@ -1354,7 +1366,7 @@ static void ath9k_enable_ps(struct ath_softc *sc)
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
- if (config_enabled(CONFIG_ATH9K_TX99))
+ if (IS_ENABLED(CONFIG_ATH9K_TX99))
return;
sc->ps_enabled = true;
@@ -1373,7 +1385,7 @@ static void ath9k_disable_ps(struct ath_softc *sc)
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
- if (config_enabled(CONFIG_ATH9K_TX99))
+ if (IS_ENABLED(CONFIG_ATH9K_TX99))
return;
sc->ps_enabled = false;
@@ -1782,9 +1794,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
if ((changed & BSS_CHANGED_BEACON_ENABLED) ||
(changed & BSS_CHANGED_BEACON_INT) ||
(changed & BSS_CHANGED_BEACON_INFO)) {
- ath9k_beacon_config(sc, vif, changed);
- if (changed & BSS_CHANGED_BEACON_ENABLED)
- ath9k_calculate_summary_state(sc, avp->chanctx);
+ ath9k_calculate_summary_state(sc, avp->chanctx);
}
if ((avp->chanctx == sc->cur_chan) &&
@@ -1793,6 +1803,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
slottime = 9;
else
slottime = 20;
+
if (vif->type == NL80211_IFTYPE_AP) {
/*
* Defer update, so that connected stations can adjust
@@ -1828,11 +1839,19 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
static u64 ath9k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct ath_softc *sc = hw->priv;
+ struct ath_vif *avp = (void *)vif->drv_priv;
u64 tsf;
mutex_lock(&sc->mutex);
ath9k_ps_wakeup(sc);
- tsf = ath9k_hw_gettsf64(sc->sc_ah);
+ /* Get current TSF either from HW or kernel time. */
+ if (sc->cur_chan == avp->chanctx) {
+ tsf = ath9k_hw_gettsf64(sc->sc_ah);
+ } else {
+ tsf = sc->cur_chan->tsf_val +
+ ath9k_hw_get_tsf_offset(&sc->cur_chan->tsf_ts, NULL);
+ }
+ tsf += le64_to_cpu(avp->tsf_adjust);
ath9k_ps_restore(sc);
mutex_unlock(&sc->mutex);
@@ -1844,10 +1863,15 @@ static void ath9k_set_tsf(struct ieee80211_hw *hw,
u64 tsf)
{
struct ath_softc *sc = hw->priv;
+ struct ath_vif *avp = (void *)vif->drv_priv;
mutex_lock(&sc->mutex);
ath9k_ps_wakeup(sc);
- ath9k_hw_settsf64(sc->sc_ah, tsf);
+ tsf -= le64_to_cpu(avp->tsf_adjust);
+ getrawmonotonic(&avp->chanctx->tsf_ts);
+ if (sc->cur_chan == avp->chanctx)
+ ath9k_hw_settsf64(sc->sc_ah, tsf);
+ avp->chanctx->tsf_val = tsf;
ath9k_ps_restore(sc);
mutex_unlock(&sc->mutex);
}
@@ -1855,11 +1879,15 @@ static void ath9k_set_tsf(struct ieee80211_hw *hw,
static void ath9k_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct ath_softc *sc = hw->priv;
+ struct ath_vif *avp = (void *)vif->drv_priv;
mutex_lock(&sc->mutex);
ath9k_ps_wakeup(sc);
- ath9k_hw_reset_tsf(sc->sc_ah);
+ getrawmonotonic(&avp->chanctx->tsf_ts);
+ if (sc->cur_chan == avp->chanctx)
+ ath9k_hw_reset_tsf(sc->sc_ah);
+ avp->chanctx->tsf_val = 0;
ath9k_ps_restore(sc);
mutex_unlock(&sc->mutex);
@@ -1931,7 +1959,7 @@ static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
struct ieee80211_channel *chan;
int pos;
- if (config_enabled(CONFIG_ATH9K_TX99))
+ if (IS_ENABLED(CONFIG_ATH9K_TX99))
return -EOPNOTSUPP;
spin_lock_bh(&common->cc_lock);
@@ -1981,7 +2009,7 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw,
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
- if (config_enabled(CONFIG_ATH9K_TX99))
+ if (IS_ENABLED(CONFIG_ATH9K_TX99))
return;
mutex_lock(&sc->mutex);
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 7cdaf40c3..0dd454acf 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -19,7 +19,6 @@
#include <linux/nl80211.h>
#include <linux/pci.h>
#include <linux/pci-aspm.h>
-#include <linux/ath9k_platform.h>
#include <linux/module.h>
#include "ath9k.h"
@@ -786,35 +785,21 @@ static void ath_pci_read_cachesize(struct ath_common *common, int *csz)
static bool ath_pci_eeprom_read(struct ath_common *common, u32 off, u16 *data)
{
- struct ath_softc *sc = (struct ath_softc *) common->priv;
- struct ath9k_platform_data *pdata = sc->dev->platform_data;
-
- if (pdata && !pdata->use_eeprom) {
- if (off >= (ARRAY_SIZE(pdata->eeprom_data))) {
- ath_err(common,
- "%s: eeprom read failed, offset %08x is out of range\n",
- __func__, off);
- }
-
- *data = pdata->eeprom_data[off];
- } else {
- struct ath_hw *ah = (struct ath_hw *) common->ah;
-
- common->ops->read(ah, AR5416_EEPROM_OFFSET +
- (off << AR5416_EEPROM_S));
-
- if (!ath9k_hw_wait(ah,
- AR_EEPROM_STATUS_DATA,
- AR_EEPROM_STATUS_DATA_BUSY |
- AR_EEPROM_STATUS_DATA_PROT_ACCESS, 0,
- AH_WAIT_TIMEOUT)) {
- return false;
- }
-
- *data = MS(common->ops->read(ah, AR_EEPROM_STATUS_DATA),
- AR_EEPROM_STATUS_DATA_VAL);
+ struct ath_hw *ah = (struct ath_hw *) common->ah;
+
+ common->ops->read(ah, AR5416_EEPROM_OFFSET + (off << AR5416_EEPROM_S));
+
+ if (!ath9k_hw_wait(ah,
+ AR_EEPROM_STATUS_DATA,
+ AR_EEPROM_STATUS_DATA_BUSY |
+ AR_EEPROM_STATUS_DATA_PROT_ACCESS, 0,
+ AH_WAIT_TIMEOUT)) {
+ return false;
}
+ *data = MS(common->ops->read(ah, AR_EEPROM_STATUS_DATA),
+ AR_EEPROM_STATUS_DATA_VAL);
+
return true;
}
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 32160fca8..669734252 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -377,7 +377,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
u32 rfilt;
- if (config_enabled(CONFIG_ATH9K_TX99))
+ if (IS_ENABLED(CONFIG_ATH9K_TX99))
return 0;
rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c
index ac4781f37..16aca9e28 100644
--- a/drivers/net/wireless/ath/ath9k/tx99.c
+++ b/drivers/net/wireless/ath/ath9k/tx99.c
@@ -132,7 +132,6 @@ static int ath9k_tx99_init(struct ath_softc *sc)
ath9k_ps_wakeup(sc);
ath9k_hw_disable_interrupts(ah);
- atomic_set(&ah->intr_ref_cnt, -1);
ath_drain_all_txq(sc);
ath_stoprecv(sc);
@@ -266,7 +265,7 @@ static const struct file_operations fops_tx99_power = {
void ath9k_tx99_init_debug(struct ath_softc *sc)
{
- if (!AR_SREV_9300_20_OR_LATER(sc->sc_ah))
+ if (!AR_SREV_9280_20_OR_LATER(sc->sc_ah))
return;
debugfs_create_file("tx99", S_IRUSR | S_IWUSR,
diff --git a/drivers/net/wireless/ath/carl9170/Kconfig b/drivers/net/wireless/ath/carl9170/Kconfig
index 1a796e5f6..2e34baeaf 100644
--- a/drivers/net/wireless/ath/carl9170/Kconfig
+++ b/drivers/net/wireless/ath/carl9170/Kconfig
@@ -5,12 +5,10 @@ config CARL9170
select FW_LOADER
select CRC32
help
- This is another driver for the Atheros "otus" 802.11n USB devices.
+ This is the mainline driver for the Atheros "otus" 802.11n USB devices.
- This driver provides more features than the original,
- but it needs a special firmware (carl9170-1.fw) to do that.
-
- The firmware can be downloaded from our wiki here:
+ It needs a special firmware (carl9170-1.fw), which can be downloaded
+ from our wiki here:
<http://wireless.kernel.org/en/users/Drivers/carl9170>
If you choose to build a module, it'll be called carl9170.
diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c
index 2303ef962..2f8136d50 100644
--- a/drivers/net/wireless/ath/dfs_pattern_detector.c
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.c
@@ -352,7 +352,7 @@ dfs_pattern_detector_init(struct ath_common *common,
{
struct dfs_pattern_detector *dpd;
- if (!config_enabled(CONFIG_CFG80211_CERTIFICATION_ONUS))
+ if (!IS_ENABLED(CONFIG_CFG80211_CERTIFICATION_ONUS))
return NULL;
dpd = kmalloc(sizeof(*dpd), GFP_KERNEL);
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 7e15ed9ed..f85060377 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -116,7 +116,7 @@ static const struct ieee80211_regdomain ath_world_regdom_67_68_6A_6C = {
static bool dynamic_country_user_possible(struct ath_regulatory *reg)
{
- if (config_enabled(CONFIG_ATH_REG_DYNAMIC_USER_CERT_TESTING))
+ if (IS_ENABLED(CONFIG_ATH_REG_DYNAMIC_USER_CERT_TESTING))
return true;
switch (reg->country_code) {
@@ -188,7 +188,7 @@ static bool dynamic_country_user_possible(struct ath_regulatory *reg)
static bool ath_reg_dyn_country_user_allow(struct ath_regulatory *reg)
{
- if (!config_enabled(CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS))
+ if (!IS_ENABLED(CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS))
return false;
if (!dynamic_country_user_possible(reg))
return false;
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c
index 8643801f3..231fd022f 100644
--- a/drivers/net/wireless/ath/wcn36xx/dxe.c
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.c
@@ -35,26 +35,27 @@ void *wcn36xx_dxe_get_next_bd(struct wcn36xx *wcn, bool is_low)
return ch->head_blk_ctl->bd_cpu_addr;
}
+static void wcn36xx_ccu_write_register(struct wcn36xx *wcn, int addr, int data)
+{
+ wcn36xx_dbg(WCN36XX_DBG_DXE,
+ "wcn36xx_ccu_write_register: addr=%x, data=%x\n",
+ addr, data);
+
+ writel(data, wcn->ccu_base + addr);
+}
+
static void wcn36xx_dxe_write_register(struct wcn36xx *wcn, int addr, int data)
{
wcn36xx_dbg(WCN36XX_DBG_DXE,
"wcn36xx_dxe_write_register: addr=%x, data=%x\n",
addr, data);
- writel(data, wcn->mmio + addr);
+ writel(data, wcn->dxe_base + addr);
}
-#define wcn36xx_dxe_write_register_x(wcn, reg, reg_data) \
-do { \
- if (wcn->chip_version == WCN36XX_CHIP_3680) \
- wcn36xx_dxe_write_register(wcn, reg ## _3680, reg_data); \
- else \
- wcn36xx_dxe_write_register(wcn, reg ## _3660, reg_data); \
-} while (0) \
-
static void wcn36xx_dxe_read_register(struct wcn36xx *wcn, int addr, int *data)
{
- *data = readl(wcn->mmio + addr);
+ *data = readl(wcn->dxe_base + addr);
wcn36xx_dbg(WCN36XX_DBG_DXE,
"wcn36xx_dxe_read_register: addr=%x, data=%x\n",
@@ -701,9 +702,13 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn)
reg_data = WCN36XX_DXE_REG_RESET;
wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data);
- /* Setting interrupt path */
- reg_data = WCN36XX_DXE_CCU_INT;
- wcn36xx_dxe_write_register_x(wcn, WCN36XX_DXE_REG_CCU_INT, reg_data);
+ /* Select channels for rx avail and xfer done interrupts... */
+ reg_data = (WCN36XX_DXE_INT_CH3_MASK | WCN36XX_DXE_INT_CH1_MASK) << 16 |
+ WCN36XX_DXE_INT_CH0_MASK | WCN36XX_DXE_INT_CH4_MASK;
+ if (wcn->is_pronto)
+ wcn36xx_ccu_write_register(wcn, WCN36XX_CCU_DXE_INT_SELECT_PRONTO, reg_data);
+ else
+ wcn36xx_ccu_write_register(wcn, WCN36XX_CCU_DXE_INT_SELECT_RIVA, reg_data);
/***************************************/
/* Init descriptors for TX LOW channel */
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.h b/drivers/net/wireless/ath/wcn36xx/dxe.h
index 3eca4f959..c012e8077 100644
--- a/drivers/net/wireless/ath/wcn36xx/dxe.h
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.h
@@ -28,11 +28,10 @@ H2H_TEST_RX_TX = DMA2
*/
/* DXE registers */
-#define WCN36XX_DXE_MEM_REG 0x202000
+#define WCN36XX_DXE_MEM_REG 0
-#define WCN36XX_DXE_CCU_INT 0xA0011
-#define WCN36XX_DXE_REG_CCU_INT_3660 0x200b10
-#define WCN36XX_DXE_REG_CCU_INT_3680 0x2050dc
+#define WCN36XX_CCU_DXE_INT_SELECT_RIVA 0x310
+#define WCN36XX_CCU_DXE_INT_SELECT_PRONTO 0x10dc
/* TODO This must calculated properly but not hardcoded */
#define WCN36XX_DXE_CTRL_TX_L 0x328a44
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
index 658bfb8ba..4f87ef1e1 100644
--- a/drivers/net/wireless/ath/wcn36xx/hal.h
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -4123,7 +4123,7 @@ struct wcn36xx_hal_update_scan_params_req {
/* Update scan params - sent from host to PNO to be used during PNO
* scanningx */
-struct update_scan_params_req_ex {
+struct wcn36xx_hal_update_scan_params_req_ex {
struct wcn36xx_hal_msg_header header;
@@ -4151,7 +4151,7 @@ struct update_scan_params_req_ex {
/* Cb State */
enum phy_chan_bond_state state;
-};
+} __packed;
/* Update scan params - sent from host to PNO to be used during PNO
* scanningx */
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 8e2c5ff7a..22e584246 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -19,6 +19,8 @@
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
#include "wcn36xx.h"
unsigned int wcn36xx_dbg_mask;
@@ -259,17 +261,6 @@ static void wcn36xx_feat_caps_info(struct wcn36xx *wcn)
}
}
-static void wcn36xx_detect_chip_version(struct wcn36xx *wcn)
-{
- if (get_feat_caps(wcn->fw_feat_caps, DOT11AC)) {
- wcn36xx_info("Chip is 3680\n");
- wcn->chip_version = WCN36XX_CHIP_3680;
- } else {
- wcn36xx_info("Chip is 3660\n");
- wcn->chip_version = WCN36XX_CHIP_3660;
- }
-}
-
static int wcn36xx_start(struct ieee80211_hw *hw)
{
struct wcn36xx *wcn = hw->priv;
@@ -324,9 +315,6 @@ static int wcn36xx_start(struct ieee80211_hw *hw)
wcn36xx_feat_caps_info(wcn);
}
- wcn36xx_detect_chip_version(wcn);
- wcn36xx_smd_update_cfg(wcn, WCN36XX_HAL_CFG_ENABLE_MC_ADDR_LIST, 1);
-
/* DMA channel initialization */
ret = wcn36xx_dxe_init(wcn);
if (ret) {
@@ -1064,7 +1052,11 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
static int wcn36xx_platform_get_resources(struct wcn36xx *wcn,
struct platform_device *pdev)
{
+ struct device_node *mmio_node;
struct resource *res;
+ int index;
+ int ret;
+
/* Set TX IRQ */
res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
"wcnss_wlantx_irq");
@@ -1083,19 +1075,40 @@ static int wcn36xx_platform_get_resources(struct wcn36xx *wcn,
}
wcn->rx_irq = res->start;
- /* Map the memory */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "wcnss_mmio");
- if (!res) {
- wcn36xx_err("failed to get mmio\n");
- return -ENOENT;
+ mmio_node = of_parse_phandle(pdev->dev.parent->of_node, "qcom,mmio", 0);
+ if (!mmio_node) {
+ wcn36xx_err("failed to acquire qcom,mmio reference\n");
+ return -EINVAL;
}
- wcn->mmio = ioremap(res->start, resource_size(res));
- if (!wcn->mmio) {
- wcn36xx_err("failed to map io memory\n");
- return -ENOMEM;
+
+ wcn->is_pronto = !!of_device_is_compatible(mmio_node, "qcom,pronto");
+
+ /* Map the CCU memory */
+ index = of_property_match_string(mmio_node, "reg-names", "ccu");
+ wcn->ccu_base = of_iomap(mmio_node, index);
+ if (!wcn->ccu_base) {
+ wcn36xx_err("failed to map ccu memory\n");
+ ret = -ENOMEM;
+ goto put_mmio_node;
}
+
+ /* Map the DXE memory */
+ index = of_property_match_string(mmio_node, "reg-names", "dxe");
+ wcn->dxe_base = of_iomap(mmio_node, index);
+ if (!wcn->dxe_base) {
+ wcn36xx_err("failed to map dxe memory\n");
+ ret = -ENOMEM;
+ goto unmap_ccu;
+ }
+
+ of_node_put(mmio_node);
return 0;
+
+unmap_ccu:
+ iounmap(wcn->ccu_base);
+put_mmio_node:
+ of_node_put(mmio_node);
+ return ret;
}
static int wcn36xx_probe(struct platform_device *pdev)
@@ -1138,7 +1151,8 @@ static int wcn36xx_probe(struct platform_device *pdev)
return 0;
out_unmap:
- iounmap(wcn->mmio);
+ iounmap(wcn->ccu_base);
+ iounmap(wcn->dxe_base);
out_wq:
ieee80211_free_hw(hw);
out_err:
@@ -1154,7 +1168,8 @@ static int wcn36xx_remove(struct platform_device *pdev)
mutex_destroy(&wcn->hal_mutex);
ieee80211_unregister_hw(hw);
- iounmap(wcn->mmio);
+ iounmap(wcn->dxe_base);
+ iounmap(wcn->ccu_base);
ieee80211_free_hw(hw);
return 0;
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index 06c5ddcd3..f93299107 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -674,22 +674,25 @@ static int wcn36xx_smd_update_scan_params_rsp(void *buf, size_t len)
return 0;
}
-int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn)
+int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn,
+ u8 *channels, size_t channel_count)
{
- struct wcn36xx_hal_update_scan_params_req msg_body;
+ struct wcn36xx_hal_update_scan_params_req_ex msg_body;
int ret = 0;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_UPDATE_SCAN_PARAM_REQ);
- msg_body.dot11d_enabled = 0;
- msg_body.dot11d_resolved = 0;
- msg_body.channel_count = 26;
+ msg_body.dot11d_enabled = false;
+ msg_body.dot11d_resolved = true;
+
+ msg_body.channel_count = channel_count;
+ memcpy(msg_body.channels, channels, channel_count);
msg_body.active_min_ch_time = 60;
msg_body.active_max_ch_time = 120;
msg_body.passive_min_ch_time = 60;
msg_body.passive_max_ch_time = 110;
- msg_body.state = 0;
+ msg_body.state = PHY_SINGLE_CHANNEL_CENTERED;
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
@@ -2226,17 +2229,12 @@ static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len)
case WCN36XX_HAL_COEX_IND:
case WCN36XX_HAL_AVOID_FREQ_RANGE_IND:
+ case WCN36XX_HAL_DEL_BA_IND:
case WCN36XX_HAL_OTA_TX_COMPL_IND:
case WCN36XX_HAL_MISSED_BEACON_IND:
case WCN36XX_HAL_DELETE_STA_CONTEXT_IND:
- msg_ind = kmalloc(sizeof(*msg_ind), GFP_KERNEL);
- if (!msg_ind)
- goto nomem;
- msg_ind->msg_len = len;
- msg_ind->msg = kmemdup(buf, len, GFP_KERNEL);
- if (!msg_ind->msg) {
- kfree(msg_ind);
-nomem:
+ msg_ind = kmalloc(sizeof(*msg_ind) + len, GFP_KERNEL);
+ if (!msg_ind) {
/*
* FIXME: Do something smarter then just
* printing an error.
@@ -2245,10 +2243,14 @@ nomem:
msg_header->msg_type);
break;
}
- mutex_lock(&wcn->hal_ind_mutex);
+
+ msg_ind->msg_len = len;
+ memcpy(msg_ind->msg, buf, len);
+
+ spin_lock(&wcn->hal_ind_lock);
list_add_tail(&msg_ind->list, &wcn->hal_ind_queue);
queue_work(wcn->hal_ind_wq, &wcn->hal_ind_work);
- mutex_unlock(&wcn->hal_ind_mutex);
+ spin_unlock(&wcn->hal_ind_lock);
wcn36xx_dbg(WCN36XX_DBG_HAL, "indication arrived\n");
break;
default:
@@ -2262,8 +2264,9 @@ static void wcn36xx_ind_smd_work(struct work_struct *work)
container_of(work, struct wcn36xx, hal_ind_work);
struct wcn36xx_hal_msg_header *msg_header;
struct wcn36xx_hal_ind_msg *hal_ind_msg;
+ unsigned long flags;
- mutex_lock(&wcn->hal_ind_mutex);
+ spin_lock_irqsave(&wcn->hal_ind_lock, flags);
hal_ind_msg = list_first_entry(&wcn->hal_ind_queue,
struct wcn36xx_hal_ind_msg,
@@ -2273,6 +2276,7 @@ static void wcn36xx_ind_smd_work(struct work_struct *work)
switch (msg_header->msg_type) {
case WCN36XX_HAL_COEX_IND:
+ case WCN36XX_HAL_DEL_BA_IND:
case WCN36XX_HAL_AVOID_FREQ_RANGE_IND:
break;
case WCN36XX_HAL_OTA_TX_COMPL_IND:
@@ -2295,9 +2299,8 @@ static void wcn36xx_ind_smd_work(struct work_struct *work)
msg_header->msg_type);
}
list_del(wcn->hal_ind_queue.next);
- kfree(hal_ind_msg->msg);
+ spin_unlock_irqrestore(&wcn->hal_ind_lock, flags);
kfree(hal_ind_msg);
- mutex_unlock(&wcn->hal_ind_mutex);
}
int wcn36xx_smd_open(struct wcn36xx *wcn)
{
@@ -2310,7 +2313,7 @@ int wcn36xx_smd_open(struct wcn36xx *wcn)
}
INIT_WORK(&wcn->hal_ind_work, wcn36xx_ind_smd_work);
INIT_LIST_HEAD(&wcn->hal_ind_queue);
- mutex_init(&wcn->hal_ind_mutex);
+ spin_lock_init(&wcn->hal_ind_lock);
ret = wcn->ctrl_ops->open(wcn, wcn36xx_smd_rsp_process);
if (ret) {
@@ -2330,5 +2333,4 @@ void wcn36xx_smd_close(struct wcn36xx *wcn)
{
wcn->ctrl_ops->close();
destroy_workqueue(wcn->hal_ind_wq);
- mutex_destroy(&wcn->hal_ind_mutex);
}
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h
index d93e3fd73..df80cbbd9 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.h
+++ b/drivers/net/wireless/ath/wcn36xx/smd.h
@@ -46,8 +46,8 @@ struct wcn36xx_fw_msg_status_rsp {
struct wcn36xx_hal_ind_msg {
struct list_head list;
- u8 *msg;
size_t msg_len;
+ u8 msg[];
};
struct wcn36xx;
@@ -63,7 +63,7 @@ int wcn36xx_smd_start_scan(struct wcn36xx *wcn);
int wcn36xx_smd_end_scan(struct wcn36xx *wcn);
int wcn36xx_smd_finish_scan(struct wcn36xx *wcn,
enum wcn36xx_hal_sys_mode mode);
-int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn);
+int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn, u8 *channels, size_t channel_count);
int wcn36xx_smd_add_sta_self(struct wcn36xx *wcn, struct ieee80211_vif *vif);
int wcn36xx_smd_delete_sta_self(struct wcn36xx *wcn, u8 *addr);
int wcn36xx_smd_delete_sta(struct wcn36xx *wcn, u8 sta_index);
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
index 051fb3338..a9a721b6b 100644
--- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
@@ -193,7 +193,7 @@ struct wcn36xx {
u8 fw_minor;
u8 fw_major;
u32 fw_feat_caps[WCN36XX_HAL_CAPS_SIZE];
- u32 chip_version;
+ bool is_pronto;
/* extra byte for the NULL termination */
u8 crm_version[WCN36XX_HAL_VERSION_LENGTH + 1];
@@ -202,7 +202,8 @@ struct wcn36xx {
/* IRQs */
int tx_irq;
int rx_irq;
- void __iomem *mmio;
+ void __iomem *ccu_base;
+ void __iomem *dxe_base;
struct wcn36xx_platform_ctrl_ops *ctrl_ops;
/*
@@ -215,7 +216,7 @@ struct wcn36xx {
struct completion hal_rsp_compl;
struct workqueue_struct *hal_ind_wq;
struct work_struct hal_ind_work;
- struct mutex hal_ind_mutex;
+ spinlock_t hal_ind_lock;
struct list_head hal_ind_queue;
/* DXE channels */
@@ -241,9 +242,6 @@ struct wcn36xx {
};
-#define WCN36XX_CHIP_3660 0
-#define WCN36XX_CHIP_3680 1
-
static inline bool wcn36xx_is_fw_version(struct wcn36xx *wcn,
u8 major,
u8 minor,
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 576981129..f0e1175fb 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -378,6 +378,10 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
/* social scan on P2P_DEVICE is handled as p2p search */
if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE &&
wil_p2p_is_social_scan(request)) {
+ if (!wil->p2p.p2p_dev_started) {
+ wil_err(wil, "P2P search requested on stopped P2P device\n");
+ return -EIO;
+ }
wil->scan_request = request;
wil->radio_wdev = wdev;
rc = wil_p2p_search(wil, request);
@@ -1351,6 +1355,7 @@ static int wil_cfg80211_start_p2p_device(struct wiphy *wiphy,
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
wil_dbg_misc(wil, "%s: entered\n", __func__);
+ wil->p2p.p2p_dev_started = 1;
return 0;
}
@@ -1358,8 +1363,23 @@ static void wil_cfg80211_stop_p2p_device(struct wiphy *wiphy,
struct wireless_dev *wdev)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ u8 started;
wil_dbg_misc(wil, "%s: entered\n", __func__);
+ mutex_lock(&wil->mutex);
+ started = wil_p2p_stop_discovery(wil);
+ if (started && wil->scan_request) {
+ struct cfg80211_scan_info info = {
+ .aborted = true,
+ };
+
+ cfg80211_scan_done(wil->scan_request, &info);
+ wil->scan_request = NULL;
+ wil->radio_wdev = wil->wdev;
+ }
+ mutex_unlock(&wil->mutex);
+
+ wil->p2p.p2p_dev_started = 0;
}
static struct cfg80211_ops wil_cfg80211_ops = {
diff --git a/drivers/net/wireless/ath/wil6210/debug.c b/drivers/net/wireless/ath/wil6210/debug.c
index c312a667c..217a4591b 100644
--- a/drivers/net/wireless/ath/wil6210/debug.c
+++ b/drivers/net/wireless/ath/wil6210/debug.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2013,2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -19,34 +19,31 @@
void __wil_err(struct wil6210_priv *wil, const char *fmt, ...)
{
- struct net_device *ndev = wil_to_ndev(wil);
- struct va_format vaf = {
- .fmt = fmt,
- };
+ struct va_format vaf;
va_list args;
va_start(args, fmt);
+ vaf.fmt = fmt;
vaf.va = &args;
- netdev_err(ndev, "%pV", &vaf);
+ netdev_err(wil_to_ndev(wil), "%pV", &vaf);
trace_wil6210_log_err(&vaf);
va_end(args);
}
void __wil_err_ratelimited(struct wil6210_priv *wil, const char *fmt, ...)
{
- if (net_ratelimit()) {
- struct net_device *ndev = wil_to_ndev(wil);
- struct va_format vaf = {
- .fmt = fmt,
- };
- va_list args;
+ struct va_format vaf;
+ va_list args;
- va_start(args, fmt);
- vaf.va = &args;
- netdev_err(ndev, "%pV", &vaf);
- trace_wil6210_log_err(&vaf);
- va_end(args);
- }
+ if (!net_ratelimit())
+ return;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ netdev_err(wil_to_ndev(wil), "%pV", &vaf);
+ trace_wil6210_log_err(&vaf);
+ va_end(args);
}
void wil_dbg_ratelimited(const struct wil6210_priv *wil, const char *fmt, ...)
@@ -67,27 +64,24 @@ void wil_dbg_ratelimited(const struct wil6210_priv *wil, const char *fmt, ...)
void __wil_info(struct wil6210_priv *wil, const char *fmt, ...)
{
- struct net_device *ndev = wil_to_ndev(wil);
- struct va_format vaf = {
- .fmt = fmt,
- };
+ struct va_format vaf;
va_list args;
va_start(args, fmt);
+ vaf.fmt = fmt;
vaf.va = &args;
- netdev_info(ndev, "%pV", &vaf);
+ netdev_info(wil_to_ndev(wil), "%pV", &vaf);
trace_wil6210_log_info(&vaf);
va_end(args);
}
void wil_dbg_trace(struct wil6210_priv *wil, const char *fmt, ...)
{
- struct va_format vaf = {
- .fmt = fmt,
- };
+ struct va_format vaf;
va_list args;
va_start(args, fmt);
+ vaf.fmt = fmt;
vaf.va = &args;
trace_wil6210_log_dbg(&vaf);
va_end(args);
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 8e31d755b..4bc92e549 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -850,10 +850,14 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
mutex_unlock(&wil->wmi_mutex);
if (wil->scan_request) {
+ struct cfg80211_scan_info info = {
+ .aborted = true,
+ };
+
wil_dbg_misc(wil, "Abort scan_request 0x%p\n",
wil->scan_request);
del_timer_sync(&wil->scan_timer);
- cfg80211_scan_done(wil->scan_request, true);
+ cfg80211_scan_done(wil->scan_request, &info);
wil->scan_request = NULL;
}
@@ -1049,10 +1053,14 @@ int __wil_down(struct wil6210_priv *wil)
(void)wil_p2p_stop_discovery(wil);
if (wil->scan_request) {
+ struct cfg80211_scan_info info = {
+ .aborted = true,
+ };
+
wil_dbg_misc(wil, "Abort scan_request 0x%p\n",
wil->scan_request);
del_timer_sync(&wil->scan_timer);
- cfg80211_scan_done(wil->scan_request, true);
+ cfg80211_scan_done(wil->scan_request, &info);
wil->scan_request = NULL;
}
diff --git a/drivers/net/wireless/ath/wil6210/p2p.c b/drivers/net/wireless/ath/wil6210/p2p.c
index 1c9153894..e0f8aa0eb 100644
--- a/drivers/net/wireless/ath/wil6210/p2p.c
+++ b/drivers/net/wireless/ath/wil6210/p2p.c
@@ -114,8 +114,10 @@ int wil_p2p_listen(struct wil6210_priv *wil, unsigned int duration,
u8 channel = P2P_DMG_SOCIAL_CHANNEL;
int rc;
- if (chan)
- channel = chan->hw_value;
+ if (!chan)
+ return -EINVAL;
+
+ channel = chan->hw_value;
wil_dbg_misc(wil, "%s: duration %d\n", __func__, duration);
@@ -250,8 +252,12 @@ void wil_p2p_search_expired(struct work_struct *work)
mutex_unlock(&wil->mutex);
if (started) {
+ struct cfg80211_scan_info info = {
+ .aborted = false,
+ };
+
mutex_lock(&wil->p2p_wdev_mutex);
- cfg80211_scan_done(wil->scan_request, 0);
+ cfg80211_scan_done(wil->scan_request, &info);
wil->scan_request = NULL;
wil->radio_wdev = wil->wdev;
mutex_unlock(&wil->p2p_wdev_mutex);
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index aeb72c438..7b5c4222b 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -18,13 +18,20 @@
#include <linux/pci.h>
#include <linux/moduleparam.h>
#include <linux/interrupt.h>
-
+#include <linux/suspend.h>
#include "wil6210.h"
static bool use_msi = true;
module_param(use_msi, bool, S_IRUGO);
MODULE_PARM_DESC(use_msi, " Use MSI interrupt, default - true");
+#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
+static int wil6210_pm_notify(struct notifier_block *notify_block,
+ unsigned long mode, void *unused);
+#endif /* CONFIG_PM_SLEEP */
+#endif /* CONFIG_PM */
+
static
void wil_set_capabilities(struct wil6210_priv *wil)
{
@@ -238,6 +245,18 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto bus_disable;
}
+#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
+ wil->pm_notify.notifier_call = wil6210_pm_notify;
+ rc = register_pm_notifier(&wil->pm_notify);
+ if (rc)
+ /* Do not fail the driver initialization, as suspend can
+ * be prevented in a later phase if needed
+ */
+ wil_err(wil, "register_pm_notifier failed: %d\n", rc);
+#endif /* CONFIG_PM_SLEEP */
+#endif /* CONFIG_PM */
+
wil6210_debugfs_init(wil);
@@ -267,6 +286,12 @@ static void wil_pcie_remove(struct pci_dev *pdev)
wil_dbg_misc(wil, "%s()\n", __func__);
+#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
+ unregister_pm_notifier(&wil->pm_notify);
+#endif /* CONFIG_PM_SLEEP */
+#endif /* CONFIG_PM */
+
wil6210_debugfs_remove(wil);
wil_if_remove(wil);
wil_if_pcie_disable(wil);
@@ -335,6 +360,45 @@ static int wil6210_resume(struct device *dev, bool is_runtime)
return rc;
}
+static int wil6210_pm_notify(struct notifier_block *notify_block,
+ unsigned long mode, void *unused)
+{
+ struct wil6210_priv *wil = container_of(
+ notify_block, struct wil6210_priv, pm_notify);
+ int rc = 0;
+ enum wil_platform_event evt;
+
+ wil_dbg_pm(wil, "%s: mode (%ld)\n", __func__, mode);
+
+ switch (mode) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ case PM_RESTORE_PREPARE:
+ rc = wil_can_suspend(wil, false);
+ if (rc)
+ break;
+ evt = WIL_PLATFORM_EVT_PRE_SUSPEND;
+ if (wil->platform_ops.notify)
+ rc = wil->platform_ops.notify(wil->platform_handle,
+ evt);
+ break;
+ case PM_POST_SUSPEND:
+ case PM_POST_HIBERNATION:
+ case PM_POST_RESTORE:
+ evt = WIL_PLATFORM_EVT_POST_SUSPEND;
+ if (wil->platform_ops.notify)
+ rc = wil->platform_ops.notify(wil->platform_handle,
+ evt);
+ break;
+ default:
+ wil_dbg_pm(wil, "unhandled notify mode %ld\n", mode);
+ break;
+ }
+
+ wil_dbg_pm(wil, "notification mode %ld: rc (%d)\n", mode, rc);
+ return rc;
+}
+
static int wil6210_pm_suspend(struct device *dev)
{
return wil6210_suspend(dev, false);
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
index 0b7ecbcac..11ee24d50 100644
--- a/drivers/net/wireless/ath/wil6210/pm.c
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014,2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -24,10 +24,32 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
wil_dbg_pm(wil, "%s(%s)\n", __func__,
is_runtime ? "runtime" : "system");
+ if (!netif_running(wil_to_ndev(wil))) {
+ /* can always sleep when down */
+ wil_dbg_pm(wil, "Interface is down\n");
+ goto out;
+ }
+ if (test_bit(wil_status_resetting, wil->status)) {
+ wil_dbg_pm(wil, "Delay suspend when resetting\n");
+ rc = -EBUSY;
+ goto out;
+ }
+ if (wil->recovery_state != fw_recovery_idle) {
+ wil_dbg_pm(wil, "Delay suspend during recovery\n");
+ rc = -EBUSY;
+ goto out;
+ }
+
+ /* interface is running */
switch (wdev->iftype) {
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
+ if (test_bit(wil_status_fwconnecting, wil->status)) {
+ wil_dbg_pm(wil, "Delay suspend when connecting\n");
+ rc = -EBUSY;
+ goto out;
+ }
break;
/* AP-like interface - can't suspend */
default:
@@ -36,6 +58,7 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
break;
}
+out:
wil_dbg_pm(wil, "%s(%s) => %s (%d)\n", __func__,
is_runtime ? "runtime" : "system", rc ? "No" : "Yes", rc);
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index a4e43796a..f2f6a404d 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -184,6 +184,13 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
&vring->va[vring->swtail].tx;
ctx = &vring->ctx[vring->swtail];
+ if (!ctx) {
+ wil_dbg_txrx(wil,
+ "ctx(%d) was already completed\n",
+ vring->swtail);
+ vring->swtail = wil_vring_next_tail(vring);
+ continue;
+ }
*d = *_d;
wil_txdesc_unmap(dev, d, ctx);
if (ctx->skb)
@@ -544,6 +551,12 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count)
break;
}
}
+
+ /* make sure all writes to descriptors (shared memory) are done before
+ * committing them to HW
+ */
+ wmb();
+
wil_w(wil, v->hwtail, v->swtail);
return rc;
@@ -969,6 +982,13 @@ void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
txdata->dot1x_open = false;
txdata->enabled = 0; /* no Tx can be in progress or start anew */
spin_unlock_bh(&txdata->lock);
+ /* napi_synchronize waits for completion of the current NAPI but will
+ * not prevent the next NAPI run.
+ * Add a memory barrier to guarantee that txdata->enabled is zeroed
+ * before napi_synchronize so that the next scheduled NAPI will not
+ * handle this vring
+ */
+ wmb();
/* make sure NAPI won't touch this vring */
if (test_bit(wil_status_napi_en, wil->status))
napi_synchronize(&wil->napi_tx);
@@ -1551,6 +1571,13 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring,
vring_index, used, used + descs_used);
}
+ /* Make sure to advance the head only after descriptor update is done.
+ * This will prevent a race condition where the completion thread
+ * will see the DU bit set from previous run and will handle the
+ * skb before it was completed.
+ */
+ wmb();
+
/* advance swhead */
wil_vring_advance_head(vring, descs_used);
wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead);
@@ -1567,7 +1594,7 @@ mem_error:
while (descs_used > 0) {
struct wil_ctx *ctx;
- i = (swhead + descs_used) % vring->size;
+ i = (swhead + descs_used - 1) % vring->size;
d = (struct vring_tx_desc *)&vring->va[i].tx;
_desc = &vring->va[i].tx;
*d = *_desc;
@@ -1691,6 +1718,13 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
vring_index, used, used + nr_frags + 1);
}
+ /* Make sure to advance the head only after descriptor update is done.
+ * This will prevent a race condition where the completion thread
+ * will see the DU bit set from previous run and will handle the
+ * skb before it was completed.
+ */
+ wmb();
+
/* advance swhead */
wil_vring_advance_head(vring, nr_frags + 1);
wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", vring_index, swhead,
@@ -1914,6 +1948,12 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
wil_consume_skb(skb, d->dma.error == 0);
}
memset(ctx, 0, sizeof(*ctx));
+ /* Make sure the ctx is zeroed before updating the tail
+ * to prevent a case where wil_tx_vring will see
+ * this descriptor as used and handle it before ctx zero
+ * is completed.
+ */
+ wmb();
/* There is no need to touch HW descriptor:
* - ststus bit TX_DMA_STATUS_DU is set by design,
* so hardware will not try to process this desc.,
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 1abdd45c3..98d302935 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -458,6 +458,7 @@ struct wil_tid_crypto_rx {
struct wil_p2p_info {
struct ieee80211_channel listen_chan;
u8 discovery_started;
+ u8 p2p_dev_started;
u64 cookie;
struct timer_list discovery_timer; /* listen/search duration */
struct work_struct discovery_expired_work; /* listen/search expire */
@@ -662,6 +663,11 @@ struct wil6210_priv {
/* High Access Latency Policy voting */
struct wil_halp halp;
+#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
+ struct notifier_block pm_notify;
+#endif /* CONFIG_PM_SLEEP */
+#endif /* CONFIG_PM */
};
#define wil_to_wiphy(i) (i->wdev->wiphy)
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.h b/drivers/net/wireless/ath/wil6210/wil_platform.h
index 33d4a34b3..f8c41172a 100644
--- a/drivers/net/wireless/ath/wil6210/wil_platform.h
+++ b/drivers/net/wireless/ath/wil6210/wil_platform.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -23,6 +23,8 @@ enum wil_platform_event {
WIL_PLATFORM_EVT_FW_CRASH = 0,
WIL_PLATFORM_EVT_PRE_RESET = 1,
WIL_PLATFORM_EVT_FW_RDY = 2,
+ WIL_PLATFORM_EVT_PRE_SUSPEND = 3,
+ WIL_PLATFORM_EVT_POST_SUSPEND = 4,
};
/**
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index b80c5d850..4d9254191 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -426,15 +426,17 @@ static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
{
if (wil->scan_request) {
struct wmi_scan_complete_event *data = d;
- bool aborted = (data->status != WMI_SCAN_SUCCESS);
+ struct cfg80211_scan_info info = {
+ .aborted = (data->status != WMI_SCAN_SUCCESS),
+ };
wil_dbg_wmi(wil, "SCAN_COMPLETE(0x%08x)\n", data->status);
wil_dbg_misc(wil, "Complete scan_request 0x%p aborted %d\n",
- wil->scan_request, aborted);
+ wil->scan_request, info.aborted);
del_timer_sync(&wil->scan_timer);
mutex_lock(&wil->p2p_wdev_mutex);
- cfg80211_scan_done(wil->scan_request, aborted);
+ cfg80211_scan_done(wil->scan_request, &info);
wil->radio_wdev = wil->wdev;
mutex_unlock(&wil->p2p_wdev_mutex);
wil->scan_request = NULL;
diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c
index 488c08c30..2e3ece345 100644
--- a/drivers/net/wireless/atmel/at76c50x-usb.c
+++ b/drivers/net/wireless/atmel/at76c50x-usb.c
@@ -1915,6 +1915,9 @@ static void at76_dwork_hw_scan(struct work_struct *work)
{
struct at76_priv *priv = container_of(work, struct at76_priv,
dwork_hw_scan.work);
+ struct cfg80211_scan_info info = {
+ .aborted = false,
+ };
int ret;
if (priv->device_unplugged)
@@ -1941,7 +1944,7 @@ static void at76_dwork_hw_scan(struct work_struct *work)
mutex_unlock(&priv->mtx);
- ieee80211_scan_completed(priv->hw, false);
+ ieee80211_scan_completed(priv->hw, &info);
ieee80211_wake_queues(priv->hw);
}
diff --git a/drivers/net/wireless/broadcom/b43/Makefile b/drivers/net/wireless/broadcom/b43/Makefile
index ddc4df466..27fab958e 100644
--- a/drivers/net/wireless/broadcom/b43/Makefile
+++ b/drivers/net/wireless/broadcom/b43/Makefile
@@ -1,6 +1,6 @@
b43-y += main.o
b43-y += bus.o
-b43-$(CONFIG_B43_PHY_G) += phy_a.o phy_g.o tables.o lo.o wa.o
+b43-$(CONFIG_B43_PHY_G) += phy_g.o tables.o lo.o wa.o
b43-$(CONFIG_B43_PHY_N) += tables_nphy.o
b43-$(CONFIG_B43_PHY_N) += radio_2055.o
b43-$(CONFIG_B43_PHY_N) += radio_2056.o
diff --git a/drivers/net/wireless/broadcom/b43/leds.c b/drivers/net/wireless/broadcom/b43/leds.c
index d79ab2a22..cb987c2ec 100644
--- a/drivers/net/wireless/broadcom/b43/leds.c
+++ b/drivers/net/wireless/broadcom/b43/leds.c
@@ -222,7 +222,7 @@ static void b43_led_get_sprominfo(struct b43_wldev *dev,
sprom[2] = dev->dev->bus_sprom->gpio2;
sprom[3] = dev->dev->bus_sprom->gpio3;
- if (sprom[led_index] == 0xFF) {
+ if ((sprom[0] & sprom[1] & sprom[2] & sprom[3]) == 0xff) {
/* There is no LED information in the SPROM
* for this LED. Hardcode it here. */
*activelow = false;
@@ -250,7 +250,11 @@ static void b43_led_get_sprominfo(struct b43_wldev *dev,
return;
}
} else {
- *behaviour = sprom[led_index] & B43_LED_BEHAVIOUR;
+ /* keep LED disabled if no mapping is defined */
+ if (sprom[led_index] == 0xff)
+ *behaviour = B43_LED_OFF;
+ else
+ *behaviour = sprom[led_index] & B43_LED_BEHAVIOUR;
*activelow = !!(sprom[led_index] & B43_LED_ACTIVELOW);
}
}
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index f8c7aa9db..c522719fc 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -3176,7 +3176,6 @@ static void b43_rate_memory_write(struct b43_wldev *dev, u16 rate, int is_ofdm)
static void b43_rate_memory_init(struct b43_wldev *dev)
{
switch (dev->phy.type) {
- case B43_PHYTYPE_A:
case B43_PHYTYPE_G:
case B43_PHYTYPE_N:
case B43_PHYTYPE_LP:
@@ -3190,8 +3189,6 @@ static void b43_rate_memory_init(struct b43_wldev *dev)
b43_rate_memory_write(dev, B43_OFDM_RATE_36MB, 1);
b43_rate_memory_write(dev, B43_OFDM_RATE_48MB, 1);
b43_rate_memory_write(dev, B43_OFDM_RATE_54MB, 1);
- if (dev->phy.type == B43_PHYTYPE_A)
- break;
/* fallthrough */
case B43_PHYTYPE_B:
b43_rate_memory_write(dev, B43_CCK_RATE_1MB, 0);
@@ -4600,14 +4597,6 @@ static int b43_phy_versioning(struct b43_wldev *dev)
if (radio_manuf != 0x17F /* Broadcom */)
unsupported = 1;
switch (phy_type) {
- case B43_PHYTYPE_A:
- if (radio_id != 0x2060)
- unsupported = 1;
- if (radio_rev != 1)
- unsupported = 1;
- if (radio_manuf != 0x17F)
- unsupported = 1;
- break;
case B43_PHYTYPE_B:
if ((radio_id & 0xFFF0) != 0x2050)
unsupported = 1;
@@ -4762,10 +4751,7 @@ static void b43_set_synth_pu_delay(struct b43_wldev *dev, bool idle)
u16 pu_delay;
/* The time value is in microseconds. */
- if (dev->phy.type == B43_PHYTYPE_A)
- pu_delay = 3700;
- else
- pu_delay = 1050;
+ pu_delay = 1050;
if (b43_is_mode(dev->wl, NL80211_IFTYPE_ADHOC) || idle)
pu_delay = 500;
if ((dev->phy.radio_ver == 0x2050) && (dev->phy.radio_rev == 8))
@@ -4780,14 +4766,10 @@ static void b43_set_pretbtt(struct b43_wldev *dev)
u16 pretbtt;
/* The time value is in microseconds. */
- if (b43_is_mode(dev->wl, NL80211_IFTYPE_ADHOC)) {
+ if (b43_is_mode(dev->wl, NL80211_IFTYPE_ADHOC))
pretbtt = 2;
- } else {
- if (dev->phy.type == B43_PHYTYPE_A)
- pretbtt = 120;
- else
- pretbtt = 250;
- }
+ else
+ pretbtt = 250;
b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_PRETBTT, pretbtt);
b43_write16(dev, B43_MMIO_TSF_CFP_PRETBTT, pretbtt);
}
@@ -5376,10 +5358,6 @@ static void b43_supported_bands(struct b43_wldev *dev, bool *have_2ghz_phy,
/* As a fallback, try to guess using PHY type */
switch (dev->phy.type) {
- case B43_PHYTYPE_A:
- *have_2ghz_phy = false;
- *have_5ghz_phy = true;
- return;
case B43_PHYTYPE_G:
case B43_PHYTYPE_N:
case B43_PHYTYPE_LP:
@@ -5451,7 +5429,6 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
/* We don't support 5 GHz on some PHYs yet */
if (have_5ghz_phy) {
switch (dev->phy.type) {
- case B43_PHYTYPE_A:
case B43_PHYTYPE_G:
case B43_PHYTYPE_LP:
case B43_PHYTYPE_HT:
diff --git a/drivers/net/wireless/broadcom/b43/phy_a.h b/drivers/net/wireless/broadcom/b43/phy_a.h
index f7d0d929a..0a92d01c2 100644
--- a/drivers/net/wireless/broadcom/b43/phy_a.h
+++ b/drivers/net/wireless/broadcom/b43/phy_a.h
@@ -101,26 +101,4 @@ u32 b43_ofdmtab_read32(struct b43_wldev *dev, u16 table, u16 offset);
void b43_ofdmtab_write32(struct b43_wldev *dev, u16 table,
u16 offset, u32 value);
-
-struct b43_phy_a {
- /* Pointer to the table used to convert a
- * TSSI value to dBm-Q5.2 */
- const s8 *tssi2dbm;
- /* Target idle TSSI */
- int tgt_idle_tssi;
- /* Current idle TSSI */
- int cur_idle_tssi;//FIXME value currently not set
-
- /* A-PHY TX Power control value. */
- u16 txpwr_offset;
-
- //TODO lots of missing stuff
-};
-
-/**
- * b43_phy_inita - Lowlevel A-PHY init routine.
- * This is _only_ used by the G-PHY code.
- */
-void b43_phy_inita(struct b43_wldev *dev);
-
#endif /* LINUX_B43_PHY_A_H_ */
diff --git a/drivers/net/wireless/broadcom/b43/phy_common.h b/drivers/net/wireless/broadcom/b43/phy_common.h
index 78d865267..ced054a98 100644
--- a/drivers/net/wireless/broadcom/b43/phy_common.h
+++ b/drivers/net/wireless/broadcom/b43/phy_common.h
@@ -190,7 +190,6 @@ struct b43_phy_operations {
void (*pwork_60sec)(struct b43_wldev *dev);
};
-struct b43_phy_a;
struct b43_phy_g;
struct b43_phy_n;
struct b43_phy_lp;
@@ -210,8 +209,6 @@ struct b43_phy {
#else
union {
#endif
- /* A-PHY specific information */
- struct b43_phy_a *a;
/* G-PHY specific information */
struct b43_phy_g *g;
/* N-PHY specific information */
diff --git a/drivers/net/wireless/broadcom/b43/phy_g.c b/drivers/net/wireless/broadcom/b43/phy_g.c
index 462310e6e..822dcaa8a 100644
--- a/drivers/net/wireless/broadcom/b43/phy_g.c
+++ b/drivers/net/wireless/broadcom/b43/phy_g.c
@@ -31,6 +31,7 @@
#include "phy_common.h"
#include "lo.h"
#include "main.h"
+#include "wa.h"
#include <linux/bitrev.h>
#include <linux/slab.h>
@@ -1987,6 +1988,25 @@ static void b43_phy_init_pctl(struct b43_wldev *dev)
b43_shm_clear_tssi(dev);
}
+static void b43_phy_inita(struct b43_wldev *dev)
+{
+ struct b43_phy *phy = &dev->phy;
+
+ might_sleep();
+
+ if (phy->rev >= 6) {
+ if (b43_phy_read(dev, B43_PHY_ENCORE) & B43_PHY_ENCORE_EN)
+ b43_phy_set(dev, B43_PHY_ENCORE, 0x0010);
+ else
+ b43_phy_mask(dev, B43_PHY_ENCORE, ~0x1010);
+ }
+
+ b43_wa_all(dev);
+
+ if (dev->dev->bus_sprom->boardflags_lo & B43_BFL_PACTRL)
+ b43_phy_maskset(dev, B43_PHY_OFDM(0x6E), 0xE000, 0x3CF);
+}
+
static void b43_phy_initg(struct b43_wldev *dev)
{
struct b43_phy *phy = &dev->phy;
@@ -2150,11 +2170,6 @@ static void default_radio_attenuation(struct b43_wldev *dev,
}
}
- if (phy->type == B43_PHYTYPE_A) {
- rf->att = 0x60;
- return;
- }
-
switch (phy->radio_ver) {
case 0x2053:
switch (phy->radio_rev) {
diff --git a/drivers/net/wireless/broadcom/b43/wa.c b/drivers/net/wireless/broadcom/b43/wa.c
index c218c08fb..0e96c08d1 100644
--- a/drivers/net/wireless/broadcom/b43/wa.c
+++ b/drivers/net/wireless/broadcom/b43/wa.c
@@ -30,33 +30,6 @@
#include "phy_common.h"
#include "wa.h"
-static void b43_wa_papd(struct b43_wldev *dev)
-{
- u16 backup;
-
- backup = b43_ofdmtab_read16(dev, B43_OFDMTAB_PWRDYN2, 0);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_PWRDYN2, 0, 7);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_UNKNOWN_APHY, 0, 0);
- b43_dummy_transmission(dev, true, true);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_PWRDYN2, 0, backup);
-}
-
-static void b43_wa_auxclipthr(struct b43_wldev *dev)
-{
- b43_phy_write(dev, B43_PHY_OFDM(0x8E), 0x3800);
-}
-
-static void b43_wa_afcdac(struct b43_wldev *dev)
-{
- b43_phy_write(dev, 0x0035, 0x03FF);
- b43_phy_write(dev, 0x0036, 0x0400);
-}
-
-static void b43_wa_txdc_offset(struct b43_wldev *dev)
-{
- b43_ofdmtab_write16(dev, B43_OFDMTAB_DC, 0, 0x0051);
-}
-
void b43_wa_initgains(struct b43_wldev *dev)
{
struct b43_phy *phy = &dev->phy;
@@ -81,41 +54,6 @@ void b43_wa_initgains(struct b43_wldev *dev)
b43_phy_write(dev, 0x00BA, 0x3ED5);
}
-static void b43_wa_divider(struct b43_wldev *dev)
-{
- b43_phy_mask(dev, 0x002B, ~0x0100);
- b43_phy_write(dev, 0x008E, 0x58C1);
-}
-
-static void b43_wa_gt(struct b43_wldev *dev) /* Gain table. */
-{
- if (dev->phy.rev <= 2) {
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN2, 0, 15);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN2, 1, 31);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN2, 2, 42);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN2, 3, 48);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN2, 4, 58);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 0, 19);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 1, 19);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 2, 19);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 3, 19);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 4, 21);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 5, 21);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 6, 25);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN1, 0, 3);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN1, 1, 3);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN1, 2, 7);
- } else {
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 0, 19);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 1, 19);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 2, 19);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 3, 19);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 4, 21);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 5, 21);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 6, 25);
- }
-}
-
static void b43_wa_rssi_lt(struct b43_wldev *dev) /* RSSI lookup table */
{
int i;
@@ -133,15 +71,11 @@ static void b43_wa_rssi_lt(struct b43_wldev *dev) /* RSSI lookup table */
static void b43_wa_analog(struct b43_wldev *dev)
{
- struct b43_phy *phy = &dev->phy;
u16 ofdmrev;
ofdmrev = b43_phy_read(dev, B43_PHY_VERSION_OFDM) & B43_PHYVER_VERSION;
if (ofdmrev > 2) {
- if (phy->type == B43_PHYTYPE_A)
- b43_phy_write(dev, B43_PHY_PWRDOWN, 0x1808);
- else
- b43_phy_write(dev, B43_PHY_PWRDOWN, 0x1000);
+ b43_phy_write(dev, B43_PHY_PWRDOWN, 0x1000);
} else {
b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 3, 0x1044);
b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 4, 0x7201);
@@ -149,26 +83,13 @@ static void b43_wa_analog(struct b43_wldev *dev)
}
}
-static void b43_wa_dac(struct b43_wldev *dev)
-{
- if (dev->phy.analog == 1)
- b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 1,
- (b43_ofdmtab_read16(dev, B43_OFDMTAB_DAC, 1) & ~0x0034) | 0x0008);
- else
- b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 1,
- (b43_ofdmtab_read16(dev, B43_OFDMTAB_DAC, 1) & ~0x0078) | 0x0010);
-}
-
static void b43_wa_fft(struct b43_wldev *dev) /* Fine frequency table */
{
int i;
- if (dev->phy.type == B43_PHYTYPE_A)
- for (i = 0; i < B43_TAB_FINEFREQA_SIZE; i++)
- b43_ofdmtab_write16(dev, B43_OFDMTAB_DACRFPABB, i, b43_tab_finefreqa[i]);
- else
- for (i = 0; i < B43_TAB_FINEFREQG_SIZE; i++)
- b43_ofdmtab_write16(dev, B43_OFDMTAB_DACRFPABB, i, b43_tab_finefreqg[i]);
+ for (i = 0; i < B43_TAB_FINEFREQG_SIZE; i++)
+ b43_ofdmtab_write16(dev, B43_OFDMTAB_DACRFPABB, i,
+ b43_tab_finefreqg[i]);
}
static void b43_wa_nft(struct b43_wldev *dev) /* Noise figure table */
@@ -176,21 +97,14 @@ static void b43_wa_nft(struct b43_wldev *dev) /* Noise figure table */
struct b43_phy *phy = &dev->phy;
int i;
- if (phy->type == B43_PHYTYPE_A) {
- if (phy->rev == 2)
- for (i = 0; i < B43_TAB_NOISEA2_SIZE; i++)
- b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, i, b43_tab_noisea2[i]);
- else
- for (i = 0; i < B43_TAB_NOISEA3_SIZE; i++)
- b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, i, b43_tab_noisea3[i]);
- } else {
- if (phy->rev == 1)
- for (i = 0; i < B43_TAB_NOISEG1_SIZE; i++)
- b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, i, b43_tab_noiseg1[i]);
- else
- for (i = 0; i < B43_TAB_NOISEG2_SIZE; i++)
- b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, i, b43_tab_noiseg2[i]);
- }
+ if (phy->rev == 1)
+ for (i = 0; i < B43_TAB_NOISEG1_SIZE; i++)
+ b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, i,
+ b43_tab_noiseg1[i]);
+ else
+ for (i = 0; i < B43_TAB_NOISEG2_SIZE; i++)
+ b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, i,
+ b43_tab_noiseg2[i]);
}
static void b43_wa_rt(struct b43_wldev *dev) /* Rotor table */
@@ -201,14 +115,6 @@ static void b43_wa_rt(struct b43_wldev *dev) /* Rotor table */
b43_ofdmtab_write32(dev, B43_OFDMTAB_ROTOR, i, b43_tab_rotor[i]);
}
-static void b43_write_null_nst(struct b43_wldev *dev)
-{
- int i;
-
- for (i = 0; i < B43_TAB_NOISESCALE_SIZE; i++)
- b43_ofdmtab_write16(dev, B43_OFDMTAB_NOISESCALE, i, 0);
-}
-
static void b43_write_nst(struct b43_wldev *dev, const u16 *nst)
{
int i;
@@ -221,24 +127,13 @@ static void b43_wa_nst(struct b43_wldev *dev) /* Noise scale table */
{
struct b43_phy *phy = &dev->phy;
- if (phy->type == B43_PHYTYPE_A) {
- if (phy->rev <= 1)
- b43_write_null_nst(dev);
- else if (phy->rev == 2)
- b43_write_nst(dev, b43_tab_noisescalea2);
- else if (phy->rev == 3)
- b43_write_nst(dev, b43_tab_noisescalea3);
- else
+ if (phy->rev >= 6) {
+ if (b43_phy_read(dev, B43_PHY_ENCORE) & B43_PHY_ENCORE_EN)
b43_write_nst(dev, b43_tab_noisescaleg3);
+ else
+ b43_write_nst(dev, b43_tab_noisescaleg2);
} else {
- if (phy->rev >= 6) {
- if (b43_phy_read(dev, B43_PHY_ENCORE) & B43_PHY_ENCORE_EN)
- b43_write_nst(dev, b43_tab_noisescaleg3);
- else
- b43_write_nst(dev, b43_tab_noisescaleg2);
- } else {
- b43_write_nst(dev, b43_tab_noisescaleg1);
- }
+ b43_write_nst(dev, b43_tab_noisescaleg1);
}
}
@@ -251,41 +146,13 @@ static void b43_wa_art(struct b43_wldev *dev) /* ADV retard table */
i, b43_tab_retard[i]);
}
-static void b43_wa_txlna_gain(struct b43_wldev *dev)
-{
- b43_ofdmtab_write16(dev, B43_OFDMTAB_DC, 13, 0x0000);
-}
-
-static void b43_wa_crs_reset(struct b43_wldev *dev)
-{
- b43_phy_write(dev, 0x002C, 0x0064);
-}
-
-static void b43_wa_2060txlna_gain(struct b43_wldev *dev)
-{
- b43_hf_write(dev, b43_hf_read(dev) |
- B43_HF_2060W);
-}
-
-static void b43_wa_lms(struct b43_wldev *dev)
-{
- b43_phy_maskset(dev, 0x0055, 0xFFC0, 0x0004);
-}
-
-static void b43_wa_mixedsignal(struct b43_wldev *dev)
-{
- b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 1, 3);
-}
-
static void b43_wa_msst(struct b43_wldev *dev) /* Min sigma square table */
{
struct b43_phy *phy = &dev->phy;
int i;
const u16 *tab;
- if (phy->type == B43_PHYTYPE_A) {
- tab = b43_tab_sigmasqr1;
- } else if (phy->type == B43_PHYTYPE_G) {
+ if (phy->type == B43_PHYTYPE_G) {
tab = b43_tab_sigmasqr2;
} else {
B43_WARN_ON(1);
@@ -298,13 +165,6 @@ static void b43_wa_msst(struct b43_wldev *dev) /* Min sigma square table */
}
}
-static void b43_wa_iqadc(struct b43_wldev *dev)
-{
- if (dev->phy.analog == 4)
- b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 0,
- b43_ofdmtab_read16(dev, B43_OFDMTAB_DAC, 0) & ~0xF000);
-}
-
static void b43_wa_crs_ed(struct b43_wldev *dev)
{
struct b43_phy *phy = &dev->phy;
@@ -450,38 +310,6 @@ static void b43_wa_cpll_nonpilot(struct b43_wldev *dev)
b43_ofdmtab_write16(dev, B43_OFDMTAB_UNKNOWN_11, 1, 0);
}
-static void b43_wa_rssi_adc(struct b43_wldev *dev)
-{
- if (dev->phy.analog == 4)
- b43_phy_write(dev, 0x00DC, 0x7454);
-}
-
-static void b43_wa_boards_a(struct b43_wldev *dev)
-{
- if (dev->dev->board_vendor == SSB_BOARDVENDOR_BCM &&
- dev->dev->board_type == SSB_BOARD_BU4306 &&
- dev->dev->board_rev < 0x30) {
- b43_phy_write(dev, 0x0010, 0xE000);
- b43_phy_write(dev, 0x0013, 0x0140);
- b43_phy_write(dev, 0x0014, 0x0280);
- } else {
- if (dev->dev->board_type == SSB_BOARD_MP4318 &&
- dev->dev->board_rev < 0x20) {
- b43_phy_write(dev, 0x0013, 0x0210);
- b43_phy_write(dev, 0x0014, 0x0840);
- } else {
- b43_phy_write(dev, 0x0013, 0x0140);
- b43_phy_write(dev, 0x0014, 0x0280);
- }
- if (dev->phy.rev <= 4)
- b43_phy_write(dev, 0x0010, 0xE000);
- else
- b43_phy_write(dev, 0x0010, 0x2000);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_DC, 1, 0x0039);
- b43_ofdmtab_write16(dev, B43_OFDMTAB_UNKNOWN_APHY, 7, 0x0040);
- }
-}
-
static void b43_wa_boards_g(struct b43_wldev *dev)
{
struct ssb_sprom *sprom = dev->dev->bus_sprom;
@@ -518,80 +346,7 @@ void b43_wa_all(struct b43_wldev *dev)
{
struct b43_phy *phy = &dev->phy;
- if (phy->type == B43_PHYTYPE_A) {
- switch (phy->rev) {
- case 2:
- b43_wa_papd(dev);
- b43_wa_auxclipthr(dev);
- b43_wa_afcdac(dev);
- b43_wa_txdc_offset(dev);
- b43_wa_initgains(dev);
- b43_wa_divider(dev);
- b43_wa_gt(dev);
- b43_wa_rssi_lt(dev);
- b43_wa_analog(dev);
- b43_wa_dac(dev);
- b43_wa_fft(dev);
- b43_wa_nft(dev);
- b43_wa_rt(dev);
- b43_wa_nst(dev);
- b43_wa_art(dev);
- b43_wa_txlna_gain(dev);
- b43_wa_crs_reset(dev);
- b43_wa_2060txlna_gain(dev);
- b43_wa_lms(dev);
- break;
- case 3:
- b43_wa_papd(dev);
- b43_wa_mixedsignal(dev);
- b43_wa_rssi_lt(dev);
- b43_wa_txdc_offset(dev);
- b43_wa_initgains(dev);
- b43_wa_dac(dev);
- b43_wa_nft(dev);
- b43_wa_nst(dev);
- b43_wa_msst(dev);
- b43_wa_analog(dev);
- b43_wa_gt(dev);
- b43_wa_txpuoff_rxpuon(dev);
- b43_wa_txlna_gain(dev);
- break;
- case 5:
- b43_wa_iqadc(dev);
- case 6:
- b43_wa_papd(dev);
- b43_wa_rssi_lt(dev);
- b43_wa_txdc_offset(dev);
- b43_wa_initgains(dev);
- b43_wa_dac(dev);
- b43_wa_nft(dev);
- b43_wa_nst(dev);
- b43_wa_msst(dev);
- b43_wa_analog(dev);
- b43_wa_gt(dev);
- b43_wa_txpuoff_rxpuon(dev);
- b43_wa_txlna_gain(dev);
- break;
- case 7:
- b43_wa_iqadc(dev);
- b43_wa_papd(dev);
- b43_wa_rssi_lt(dev);
- b43_wa_txdc_offset(dev);
- b43_wa_initgains(dev);
- b43_wa_dac(dev);
- b43_wa_nft(dev);
- b43_wa_nst(dev);
- b43_wa_msst(dev);
- b43_wa_analog(dev);
- b43_wa_gt(dev);
- b43_wa_txpuoff_rxpuon(dev);
- b43_wa_txlna_gain(dev);
- b43_wa_rssi_adc(dev);
- default:
- B43_WARN_ON(1);
- }
- b43_wa_boards_a(dev);
- } else if (phy->type == B43_PHYTYPE_G) {
+ if (phy->type == B43_PHYTYPE_G) {
switch (phy->rev) {
case 1://XXX review rev1
b43_wa_crs_ed(dev);
diff --git a/drivers/net/wireless/broadcom/b43/xmit.c b/drivers/net/wireless/broadcom/b43/xmit.c
index f6201264d..b068d5aee 100644
--- a/drivers/net/wireless/broadcom/b43/xmit.c
+++ b/drivers/net/wireless/broadcom/b43/xmit.c
@@ -205,7 +205,7 @@ static u16 b43_generate_tx_phy_ctl1(struct b43_wldev *dev, u8 bitrate)
return control;
}
-static u8 b43_calc_fallback_rate(u8 bitrate)
+static u8 b43_calc_fallback_rate(u8 bitrate, int gmode)
{
switch (bitrate) {
case B43_CCK_RATE_1MB:
@@ -216,8 +216,15 @@ static u8 b43_calc_fallback_rate(u8 bitrate)
return B43_CCK_RATE_2MB;
case B43_CCK_RATE_11MB:
return B43_CCK_RATE_5MB;
+ /*
+ * Don't just fallback to CCK; it may be in 5GHz operation
+ * and falling back to CCK won't work out very well.
+ */
case B43_OFDM_RATE_6MB:
- return B43_CCK_RATE_5MB;
+ if (gmode)
+ return B43_CCK_RATE_5MB;
+ else
+ return B43_OFDM_RATE_6MB;
case B43_OFDM_RATE_9MB:
return B43_OFDM_RATE_6MB;
case B43_OFDM_RATE_12MB:
@@ -438,7 +445,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
rts_rate = rts_cts_rate ? rts_cts_rate->hw_value : B43_CCK_RATE_1MB;
rts_rate_ofdm = b43_is_ofdm_rate(rts_rate);
- rts_rate_fb = b43_calc_fallback_rate(rts_rate);
+ rts_rate_fb = b43_calc_fallback_rate(rts_rate, phy->gmode);
rts_rate_fb_ofdm = b43_is_ofdm_rate(rts_rate_fb);
if (rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
@@ -642,11 +649,7 @@ static s8 b43_rssinoise_postprocess(struct b43_wldev *dev, u8 in_rssi)
struct b43_phy *phy = &dev->phy;
s8 ret;
- if (phy->type == B43_PHYTYPE_A) {
- //TODO: Incomplete specs.
- ret = 0;
- } else
- ret = b43_rssi_postprocess(dev, in_rssi, 0, 1, 1);
+ ret = b43_rssi_postprocess(dev, in_rssi, 0, 1, 1);
return ret;
}
@@ -663,7 +666,6 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
u16 uninitialized_var(chanstat), uninitialized_var(mactime);
u32 uninitialized_var(macstat);
u16 chanid;
- u16 phytype;
int padding, rate_idx;
memset(&status, 0, sizeof(status));
@@ -684,7 +686,6 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
chanstat = le16_to_cpu(rxhdr->format_351.channel);
break;
}
- phytype = chanstat & B43_RX_CHAN_PHYTYPE;
if (unlikely(macstat & B43_RX_MAC_FCSERR)) {
dev->wl->ieee_stats.dot11FCSErrorCount++;
@@ -755,7 +756,6 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
else
status.signal = max(rxhdr->power0, rxhdr->power1);
break;
- case B43_PHYTYPE_A:
case B43_PHYTYPE_B:
case B43_PHYTYPE_G:
case B43_PHYTYPE_LP:
@@ -802,14 +802,6 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
chanid = (chanstat & B43_RX_CHAN_ID) >> B43_RX_CHAN_ID_SHIFT;
switch (chanstat & B43_RX_CHAN_PHYTYPE) {
- case B43_PHYTYPE_A:
- status.band = NL80211_BAND_5GHZ;
- B43_WARN_ON(1);
- /* FIXME: We don't really know which value the "chanid" contains.
- * So the following assignment might be wrong. */
- status.freq =
- ieee80211_channel_to_frequency(chanid, status.band);
- break;
case B43_PHYTYPE_G:
status.band = NL80211_BAND_2GHZ;
/* Somewhere between 478.104 and 508.1084 firmware for G-PHY
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index c7550dab6..f549c2560 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -166,41 +166,45 @@ int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
sdio_claim_irq(sdiodev->func[1], brcmf_sdiod_ib_irqhandler);
sdio_claim_irq(sdiodev->func[2], brcmf_sdiod_dummy_irqhandler);
sdio_release_host(sdiodev->func[1]);
+ sdiodev->sd_irq_requested = true;
}
return 0;
}
-int brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev)
+void brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev)
{
- struct brcmfmac_sdio_pd *pdata;
- brcmf_dbg(SDIO, "Entering\n");
+ brcmf_dbg(SDIO, "Entering oob=%d sd=%d\n",
+ sdiodev->oob_irq_requested,
+ sdiodev->sd_irq_requested);
- pdata = &sdiodev->settings->bus.sdio;
- if (pdata->oob_irq_supported) {
+ if (sdiodev->oob_irq_requested) {
+ struct brcmfmac_sdio_pd *pdata;
+
+ pdata = &sdiodev->settings->bus.sdio;
sdio_claim_host(sdiodev->func[1]);
brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
sdio_release_host(sdiodev->func[1]);
- if (sdiodev->oob_irq_requested) {
- sdiodev->oob_irq_requested = false;
- if (sdiodev->irq_wake) {
- disable_irq_wake(pdata->oob_irq_nr);
- sdiodev->irq_wake = false;
- }
- free_irq(pdata->oob_irq_nr, &sdiodev->func[1]->dev);
- sdiodev->irq_en = false;
+ sdiodev->oob_irq_requested = false;
+ if (sdiodev->irq_wake) {
+ disable_irq_wake(pdata->oob_irq_nr);
+ sdiodev->irq_wake = false;
}
- } else {
+ free_irq(pdata->oob_irq_nr, &sdiodev->func[1]->dev);
+ sdiodev->irq_en = false;
+ sdiodev->oob_irq_requested = false;
+ }
+
+ if (sdiodev->sd_irq_requested) {
sdio_claim_host(sdiodev->func[1]);
sdio_release_irq(sdiodev->func[2]);
sdio_release_irq(sdiodev->func[1]);
sdio_release_host(sdiodev->func[1]);
+ sdiodev->sd_irq_requested = false;
}
-
- return 0;
}
void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev,
@@ -722,8 +726,10 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
return -ENOMEM;
err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr,
glom_skb);
- if (err)
+ if (err) {
+ brcmu_pkt_buf_free_skb(glom_skb);
goto done;
+ }
skb_queue_walk(pktq, skb) {
memcpy(skb->data, glom_skb->data, skb->len);
@@ -1197,12 +1203,17 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func)
brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
brcmf_dbg(SDIO, "Function: %d\n", func->num);
- if (func->num != 1)
- return;
-
bus_if = dev_get_drvdata(&func->dev);
if (bus_if) {
sdiodev = bus_if->bus_priv.sdio;
+
+ /* start by unregistering irqs */
+ brcmf_sdiod_intr_unregister(sdiodev);
+
+ if (func->num != 1)
+ return;
+
+ /* only proceed with rest of cleanup if func 1 */
brcmf_sdiod_remove(sdiodev);
dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 121baba7a..b8aec5e5e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -541,6 +541,21 @@ brcmf_cfg80211_update_proto_addr_mode(struct wireless_dev *wdev)
ADDR_INDIRECT);
}
+static int brcmf_get_first_free_bsscfgidx(struct brcmf_pub *drvr)
+{
+ int bsscfgidx;
+
+ for (bsscfgidx = 0; bsscfgidx < BRCMF_MAX_IFS; bsscfgidx++) {
+ /* bsscfgidx 1 is reserved for legacy P2P */
+ if (bsscfgidx == 1)
+ continue;
+ if (!drvr->iflist[bsscfgidx])
+ return bsscfgidx;
+ }
+
+ return -ENOMEM;
+}
+
static int brcmf_cfg80211_request_ap_if(struct brcmf_if *ifp)
{
struct brcmf_mbss_ssid_le mbss_ssid_le;
@@ -548,7 +563,7 @@ static int brcmf_cfg80211_request_ap_if(struct brcmf_if *ifp)
int err;
memset(&mbss_ssid_le, 0, sizeof(mbss_ssid_le));
- bsscfgidx = brcmf_get_next_free_bsscfgidx(ifp->drvr);
+ bsscfgidx = brcmf_get_first_free_bsscfgidx(ifp->drvr);
if (bsscfgidx < 0)
return bsscfgidx;
@@ -586,7 +601,7 @@ struct wireless_dev *brcmf_ap_add_vif(struct wiphy *wiphy, const char *name,
brcmf_dbg(INFO, "Adding vif \"%s\"\n", name);
- vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_AP, false);
+ vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_AP);
if (IS_ERR(vif))
return (struct wireless_dev *)vif;
@@ -669,20 +684,24 @@ static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy,
return ERR_PTR(-EOPNOTSUPP);
case NL80211_IFTYPE_AP:
wdev = brcmf_ap_add_vif(wiphy, name, flags, params);
- if (!IS_ERR(wdev))
- brcmf_cfg80211_update_proto_addr_mode(wdev);
- return wdev;
+ break;
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_P2P_GO:
case NL80211_IFTYPE_P2P_DEVICE:
wdev = brcmf_p2p_add_vif(wiphy, name, name_assign_type, type, flags, params);
- if (!IS_ERR(wdev))
- brcmf_cfg80211_update_proto_addr_mode(wdev);
- return wdev;
+ break;
case NL80211_IFTYPE_UNSPECIFIED:
default:
return ERR_PTR(-EINVAL);
}
+
+ if (IS_ERR(wdev))
+ brcmf_err("add iface %s type %d failed: err=%d\n",
+ name, type, (int)PTR_ERR(wdev));
+ else
+ brcmf_cfg80211_update_proto_addr_mode(wdev);
+
+ return wdev;
}
static void brcmf_scan_config_mpc(struct brcmf_if *ifp, int mpc)
@@ -756,9 +775,13 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
if (!aborted)
cfg80211_sched_scan_results(cfg_to_wiphy(cfg));
} else if (scan_request) {
+ struct cfg80211_scan_info info = {
+ .aborted = aborted,
+ };
+
brcmf_dbg(SCAN, "ESCAN Completed scan: %s\n",
aborted ? "Aborted" : "Done");
- cfg80211_scan_done(scan_request, aborted);
+ cfg80211_scan_done(scan_request, &info);
}
if (!test_and_clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status))
brcmf_dbg(SCAN, "Scan complete, probably P2P scan\n");
@@ -766,12 +789,48 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
return err;
}
+static int brcmf_cfg80211_del_ap_iface(struct wiphy *wiphy,
+ struct wireless_dev *wdev)
+{
+ struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
+ struct net_device *ndev = wdev->netdev;
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ int ret;
+ int err;
+
+ brcmf_cfg80211_arm_vif_event(cfg, ifp->vif);
+
+ err = brcmf_fil_bsscfg_data_set(ifp, "interface_remove", NULL, 0);
+ if (err) {
+ brcmf_err("interface_remove failed %d\n", err);
+ goto err_unarm;
+ }
+
+ /* wait for firmware event */
+ ret = brcmf_cfg80211_wait_vif_event(cfg, BRCMF_E_IF_DEL,
+ BRCMF_VIF_EVENT_TIMEOUT);
+ if (!ret) {
+ brcmf_err("timeout occurred\n");
+ err = -EIO;
+ goto err_unarm;
+ }
+
+ brcmf_remove_interface(ifp, true);
+
+err_unarm:
+ brcmf_cfg80211_arm_vif_event(cfg, NULL);
+ return err;
+}
+
static
int brcmf_cfg80211_del_iface(struct wiphy *wiphy, struct wireless_dev *wdev)
{
struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
struct net_device *ndev = wdev->netdev;
+ if (ndev && ndev == cfg_to_ndev(cfg))
+ return -ENOTSUPP;
+
/* vif event pending in firmware */
if (brcmf_cfg80211_vif_event_armed(cfg))
return -EBUSY;
@@ -788,12 +847,13 @@ int brcmf_cfg80211_del_iface(struct wiphy *wiphy, struct wireless_dev *wdev)
switch (wdev->iftype) {
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_WDS:
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_MESH_POINT:
return -EOPNOTSUPP;
+ case NL80211_IFTYPE_AP:
+ return brcmf_cfg80211_del_ap_iface(wiphy, wdev);
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_P2P_GO:
case NL80211_IFTYPE_P2P_DEVICE:
@@ -2750,7 +2810,7 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg,
if (!bi->ctl_ch) {
ch.chspec = le16_to_cpu(bi->chanspec);
cfg->d11inf.decchspec(&ch);
- bi->ctl_ch = ch.chnum;
+ bi->ctl_ch = ch.control_ch_num;
}
channel = bi->ctl_ch;
@@ -2868,7 +2928,7 @@ static s32 brcmf_inform_ibss(struct brcmf_cfg80211_info *cfg,
else
band = wiphy->bands[NL80211_BAND_5GHZ];
- freq = ieee80211_channel_to_frequency(ch.chnum, band->band);
+ freq = ieee80211_channel_to_frequency(ch.control_ch_num, band->band);
cfg->channel = freq;
notify_channel = ieee80211_get_channel(wiphy, freq);
@@ -2878,7 +2938,7 @@ static s32 brcmf_inform_ibss(struct brcmf_cfg80211_info *cfg,
notify_ielen = le32_to_cpu(bi->ie_length);
notify_signal = (s16)le16_to_cpu(bi->RSSI) * 100;
- brcmf_dbg(CONN, "channel: %d(%d)\n", ch.chnum, freq);
+ brcmf_dbg(CONN, "channel: %d(%d)\n", ch.control_ch_num, freq);
brcmf_dbg(CONN, "capability: %X\n", notify_capability);
brcmf_dbg(CONN, "beacon interval: %d\n", notify_interval);
brcmf_dbg(CONN, "signal: %d\n", notify_signal);
@@ -4439,7 +4499,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
struct brcmf_join_params join_params;
enum nl80211_iftype dev_role;
struct brcmf_fil_bss_enable_le bss_enable;
- u16 chanspec;
+ u16 chanspec = chandef_to_chanspec(&cfg->d11inf, &settings->chandef);
bool mbss;
int is_11d;
@@ -4515,16 +4575,8 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
brcmf_config_ap_mgmt_ie(ifp->vif, &settings->beacon);
+ /* Parameters shared by all radio interfaces */
if (!mbss) {
- chanspec = chandef_to_chanspec(&cfg->d11inf,
- &settings->chandef);
- err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec);
- if (err < 0) {
- brcmf_err("Set Channel failed: chspec=%d, %d\n",
- chanspec, err);
- goto exit;
- }
-
if (is_11d != ifp->vif->is_11d) {
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_REGULATORY,
is_11d);
@@ -4572,6 +4624,8 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
err = -EINVAL;
goto exit;
}
+
+ /* Interface specific setup */
if (dev_role == NL80211_IFTYPE_AP) {
if ((brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS)) && (!mbss))
brcmf_fil_iovar_int_set(ifp, "mbss", 1);
@@ -4581,6 +4635,17 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
brcmf_err("setting AP mode failed %d\n", err);
goto exit;
}
+ if (!mbss) {
+ /* Firmware 10.x requires setting channel after enabling
+ * AP and before bringing interface up.
+ */
+ err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec);
+ if (err < 0) {
+ brcmf_err("Set Channel failed: chspec=%d, %d\n",
+ chanspec, err);
+ goto exit;
+ }
+ }
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 1);
if (err < 0) {
brcmf_err("BRCMF_C_UP error (%d)\n", err);
@@ -4601,8 +4666,23 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
brcmf_err("SET SSID error (%d)\n", err);
goto exit;
}
+
+ if (settings->hidden_ssid) {
+ err = brcmf_fil_iovar_int_set(ifp, "closednet", 1);
+ if (err) {
+ brcmf_err("closednet error (%d)\n", err);
+ goto exit;
+ }
+ }
+
brcmf_dbg(TRACE, "AP mode configuration complete\n");
- } else {
+ } else if (dev_role == NL80211_IFTYPE_P2P_GO) {
+ err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec);
+ if (err < 0) {
+ brcmf_err("Set Channel failed: chspec=%d, %d\n",
+ chanspec, err);
+ goto exit;
+ }
err = brcmf_fil_bsscfg_data_set(ifp, "ssid", &ssid_le,
sizeof(ssid_le));
if (err < 0) {
@@ -4619,7 +4699,10 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
}
brcmf_dbg(TRACE, "GO mode configuration complete\n");
+ } else {
+ WARN_ON(1);
}
+
set_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
brcmf_net_setcarrier(ifp, true);
@@ -4650,6 +4733,10 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
return err;
}
+ /* First BSS doesn't get a full reset */
+ if (ifp->bsscfgidx == 0)
+ brcmf_fil_iovar_int_set(ifp, "closednet", 0);
+
memset(&join_params, 0, sizeof(join_params));
err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
&join_params, sizeof(join_params));
@@ -4908,6 +4995,68 @@ exit:
return err;
}
+static int brcmf_cfg80211_get_channel(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ struct cfg80211_chan_def *chandef)
+{
+ struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+ struct net_device *ndev = wdev->netdev;
+ struct brcmf_if *ifp;
+ struct brcmu_chan ch;
+ enum nl80211_band band = 0;
+ enum nl80211_chan_width width = 0;
+ u32 chanspec;
+ int freq, err;
+
+ if (!ndev)
+ return -ENODEV;
+ ifp = netdev_priv(ndev);
+
+ err = brcmf_fil_iovar_int_get(ifp, "chanspec", &chanspec);
+ if (err) {
+ brcmf_err("chanspec failed (%d)\n", err);
+ return err;
+ }
+
+ ch.chspec = chanspec;
+ cfg->d11inf.decchspec(&ch);
+
+ switch (ch.band) {
+ case BRCMU_CHAN_BAND_2G:
+ band = NL80211_BAND_2GHZ;
+ break;
+ case BRCMU_CHAN_BAND_5G:
+ band = NL80211_BAND_5GHZ;
+ break;
+ }
+
+ switch (ch.bw) {
+ case BRCMU_CHAN_BW_80:
+ width = NL80211_CHAN_WIDTH_80;
+ break;
+ case BRCMU_CHAN_BW_40:
+ width = NL80211_CHAN_WIDTH_40;
+ break;
+ case BRCMU_CHAN_BW_20:
+ width = NL80211_CHAN_WIDTH_20;
+ break;
+ case BRCMU_CHAN_BW_80P80:
+ width = NL80211_CHAN_WIDTH_80P80;
+ break;
+ case BRCMU_CHAN_BW_160:
+ width = NL80211_CHAN_WIDTH_160;
+ break;
+ }
+
+ freq = ieee80211_channel_to_frequency(ch.control_ch_num, band);
+ chandef->chan = ieee80211_get_channel(wiphy, freq);
+ chandef->width = width;
+ chandef->center_freq1 = ieee80211_channel_to_frequency(ch.chnum, band);
+ chandef->center_freq2 = 0;
+
+ return 0;
+}
+
static int brcmf_cfg80211_crit_proto_start(struct wiphy *wiphy,
struct wireless_dev *wdev,
enum nl80211_crit_proto_id proto,
@@ -5070,6 +5219,7 @@ static struct cfg80211_ops brcmf_cfg80211_ops = {
.mgmt_tx = brcmf_cfg80211_mgmt_tx,
.remain_on_channel = brcmf_p2p_remain_on_channel,
.cancel_remain_on_channel = brcmf_cfg80211_cancel_remain_on_channel,
+ .get_channel = brcmf_cfg80211_get_channel,
.start_p2p_device = brcmf_p2p_start_device,
.stop_p2p_device = brcmf_p2p_stop_device,
.crit_proto_start = brcmf_cfg80211_crit_proto_start,
@@ -5078,8 +5228,7 @@ static struct cfg80211_ops brcmf_cfg80211_ops = {
};
struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
- enum nl80211_iftype type,
- bool pm_block)
+ enum nl80211_iftype type)
{
struct brcmf_cfg80211_vif *vif_walk;
struct brcmf_cfg80211_vif *vif;
@@ -5094,8 +5243,6 @@ struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
vif->wdev.wiphy = cfg->wiphy;
vif->wdev.iftype = type;
- vif->pm_block = pm_block;
-
brcmf_init_prof(&vif->profile);
if (type == NL80211_IFTYPE_AP) {
@@ -5296,7 +5443,7 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
else
band = wiphy->bands[NL80211_BAND_5GHZ];
- freq = ieee80211_channel_to_frequency(ch.chnum, band->band);
+ freq = ieee80211_channel_to_frequency(ch.control_ch_num, band->band);
notify_channel = ieee80211_get_channel(wiphy, freq);
done:
@@ -5352,7 +5499,6 @@ brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg,
struct net_device *ndev,
const struct brcmf_event_msg *e, void *data)
{
- struct brcmf_if *ifp = netdev_priv(ndev);
static int generation;
u32 event = e->event_code;
u32 reason = e->reason;
@@ -5363,8 +5509,6 @@ brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg,
ndev != cfg_to_ndev(cfg)) {
brcmf_dbg(CONN, "AP mode link down\n");
complete(&cfg->vif_disabled);
- if (ifp->vif->mbss)
- brcmf_remove_interface(ifp);
return 0;
}
@@ -5491,7 +5635,7 @@ static s32 brcmf_notify_vif_event(struct brcmf_if *ifp,
ifevent->action, ifevent->flags, ifevent->ifidx,
ifevent->bsscfgidx);
- mutex_lock(&event->vif_event_lock);
+ spin_lock(&event->vif_event_lock);
event->action = ifevent->action;
vif = event->vif;
@@ -5499,7 +5643,7 @@ static s32 brcmf_notify_vif_event(struct brcmf_if *ifp,
case BRCMF_E_IF_ADD:
/* waiting process may have timed out */
if (!cfg->vif_event.vif) {
- mutex_unlock(&event->vif_event_lock);
+ spin_unlock(&event->vif_event_lock);
return -EBADF;
}
@@ -5510,24 +5654,24 @@ static s32 brcmf_notify_vif_event(struct brcmf_if *ifp,
ifp->ndev->ieee80211_ptr = &vif->wdev;
SET_NETDEV_DEV(ifp->ndev, wiphy_dev(cfg->wiphy));
}
- mutex_unlock(&event->vif_event_lock);
+ spin_unlock(&event->vif_event_lock);
wake_up(&event->vif_wq);
return 0;
case BRCMF_E_IF_DEL:
- mutex_unlock(&event->vif_event_lock);
+ spin_unlock(&event->vif_event_lock);
/* event may not be upon user request */
if (brcmf_cfg80211_vif_event_armed(cfg))
wake_up(&event->vif_wq);
return 0;
case BRCMF_E_IF_CHANGE:
- mutex_unlock(&event->vif_event_lock);
+ spin_unlock(&event->vif_event_lock);
wake_up(&event->vif_wq);
return 0;
default:
- mutex_unlock(&event->vif_event_lock);
+ spin_unlock(&event->vif_event_lock);
break;
}
return -EINVAL;
@@ -5648,7 +5792,7 @@ static void wl_deinit_priv(struct brcmf_cfg80211_info *cfg)
static void init_vif_event(struct brcmf_cfg80211_vif_event *event)
{
init_waitqueue_head(&event->vif_wq);
- mutex_init(&event->vif_event_lock);
+ spin_lock_init(&event->vif_event_lock);
}
static s32 brcmf_dongle_roam(struct brcmf_if *ifp)
@@ -5818,14 +5962,15 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
channel = band->channels;
index = band->n_channels;
for (j = 0; j < band->n_channels; j++) {
- if (channel[j].hw_value == ch.chnum) {
+ if (channel[j].hw_value == ch.control_ch_num) {
index = j;
break;
}
}
channel[index].center_freq =
- ieee80211_channel_to_frequency(ch.chnum, band->band);
- channel[index].hw_value = ch.chnum;
+ ieee80211_channel_to_frequency(ch.control_ch_num,
+ band->band);
+ channel[index].hw_value = ch.control_ch_num;
/* assuming the chanspecs order is HT20,
* HT40 upper, HT40 lower, and VHT80.
@@ -5927,7 +6072,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg)
if (WARN_ON(ch.bw != BRCMU_CHAN_BW_40))
continue;
for (j = 0; j < band->n_channels; j++) {
- if (band->channels[j].hw_value == ch.chnum)
+ if (band->channels[j].hw_value == ch.control_ch_num)
break;
}
if (WARN_ON(j == band->n_channels))
@@ -6193,29 +6338,15 @@ static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp)
if (!combo)
goto err;
- c0_limits = kcalloc(p2p ? 3 : 2, sizeof(*c0_limits), GFP_KERNEL);
- if (!c0_limits)
- goto err;
-
- if (p2p) {
- p2p_limits = kcalloc(4, sizeof(*p2p_limits), GFP_KERNEL);
- if (!p2p_limits)
- goto err;
- }
-
- if (mbss) {
- mbss_limits = kcalloc(1, sizeof(*mbss_limits), GFP_KERNEL);
- if (!mbss_limits)
- goto err;
- }
-
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_AP);
c = 0;
i = 0;
- combo[c].num_different_channels = 1;
+ c0_limits = kcalloc(p2p ? 3 : 2, sizeof(*c0_limits), GFP_KERNEL);
+ if (!c0_limits)
+ goto err;
c0_limits[i].max = 1;
c0_limits[i++].types = BIT(NL80211_IFTYPE_STATION);
if (p2p) {
@@ -6233,6 +6364,7 @@ static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp)
c0_limits[i].max = 1;
c0_limits[i++].types = BIT(NL80211_IFTYPE_AP);
}
+ combo[c].num_different_channels = 1;
combo[c].max_interfaces = i;
combo[c].n_limits = i;
combo[c].limits = c0_limits;
@@ -6240,7 +6372,9 @@ static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp)
if (p2p) {
c++;
i = 0;
- combo[c].num_different_channels = 1;
+ p2p_limits = kcalloc(4, sizeof(*p2p_limits), GFP_KERNEL);
+ if (!p2p_limits)
+ goto err;
p2p_limits[i].max = 1;
p2p_limits[i++].types = BIT(NL80211_IFTYPE_STATION);
p2p_limits[i].max = 1;
@@ -6249,6 +6383,7 @@ static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp)
p2p_limits[i++].types = BIT(NL80211_IFTYPE_P2P_CLIENT);
p2p_limits[i].max = 1;
p2p_limits[i++].types = BIT(NL80211_IFTYPE_P2P_DEVICE);
+ combo[c].num_different_channels = 1;
combo[c].max_interfaces = i;
combo[c].n_limits = i;
combo[c].limits = p2p_limits;
@@ -6256,14 +6391,19 @@ static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp)
if (mbss) {
c++;
+ i = 0;
+ mbss_limits = kcalloc(1, sizeof(*mbss_limits), GFP_KERNEL);
+ if (!mbss_limits)
+ goto err;
+ mbss_limits[i].max = 4;
+ mbss_limits[i++].types = BIT(NL80211_IFTYPE_AP);
combo[c].beacon_int_infra_match = true;
combo[c].num_different_channels = 1;
- mbss_limits[0].max = 4;
- mbss_limits[0].types = BIT(NL80211_IFTYPE_AP);
combo[c].max_interfaces = 4;
- combo[c].n_limits = 1;
+ combo[c].n_limits = i;
combo[c].limits = mbss_limits;
}
+
wiphy->n_iface_combinations = n_combos;
wiphy->iface_combinations = combo;
return 0;
@@ -6551,9 +6691,9 @@ static inline bool vif_event_equals(struct brcmf_cfg80211_vif_event *event,
{
u8 evt_action;
- mutex_lock(&event->vif_event_lock);
+ spin_lock(&event->vif_event_lock);
evt_action = event->action;
- mutex_unlock(&event->vif_event_lock);
+ spin_unlock(&event->vif_event_lock);
return evt_action == action;
}
@@ -6562,10 +6702,10 @@ void brcmf_cfg80211_arm_vif_event(struct brcmf_cfg80211_info *cfg,
{
struct brcmf_cfg80211_vif_event *event = &cfg->vif_event;
- mutex_lock(&event->vif_event_lock);
+ spin_lock(&event->vif_event_lock);
event->vif = vif;
event->action = 0;
- mutex_unlock(&event->vif_event_lock);
+ spin_unlock(&event->vif_event_lock);
}
bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg)
@@ -6573,9 +6713,9 @@ bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg)
struct brcmf_cfg80211_vif_event *event = &cfg->vif_event;
bool armed;
- mutex_lock(&event->vif_event_lock);
+ spin_lock(&event->vif_event_lock);
armed = event->vif != NULL;
- mutex_unlock(&event->vif_event_lock);
+ spin_unlock(&event->vif_event_lock);
return armed;
}
@@ -6715,11 +6855,10 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
return NULL;
}
- ops = kzalloc(sizeof(*ops), GFP_KERNEL);
+ ops = kmemdup(&brcmf_cfg80211_ops, sizeof(*ops), GFP_KERNEL);
if (!ops)
return NULL;
- memcpy(ops, &brcmf_cfg80211_ops, sizeof(*ops));
ifp = netdev_priv(ndev);
#ifdef CONFIG_PM
if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL_GTK))
@@ -6740,7 +6879,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
init_vif_event(&cfg->vif_event);
INIT_LIST_HEAD(&cfg->vif_list);
- vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_STATION, false);
+ vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_STATION);
if (IS_ERR(vif))
goto wiphy_out;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
index 95e35bcc1..8889832c1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
@@ -20,6 +20,10 @@
/* for brcmu_d11inf */
#include <brcmu_d11.h>
+#include "core.h"
+#include "fwil_types.h"
+#include "p2p.h"
+
#define WL_NUM_SCAN_MAX 10
#define WL_TLV_INFO_MAX 1024
#define WL_BSS_INFO_MAX 2048
@@ -167,7 +171,6 @@ struct vif_saved_ie {
* @wdev: wireless device.
* @profile: profile information.
* @sme_state: SME state using enum brcmf_vif_status bits.
- * @pm_block: power-management blocked.
* @list: linked list.
* @mgmt_rx_reg: registered rx mgmt frame types.
* @mbss: Multiple BSS type, set if not first AP (not relevant for P2P).
@@ -177,7 +180,6 @@ struct brcmf_cfg80211_vif {
struct wireless_dev wdev;
struct brcmf_cfg80211_profile profile;
unsigned long sme_state;
- bool pm_block;
struct vif_saved_ie saved_ie;
struct list_head list;
u16 mgmt_rx_reg;
@@ -225,7 +227,7 @@ struct escan_info {
*/
struct brcmf_cfg80211_vif_event {
wait_queue_head_t vif_wq;
- struct mutex vif_event_lock;
+ spinlock_t vif_event_lock;
u8 action;
struct brcmf_cfg80211_vif *vif;
};
@@ -388,8 +390,7 @@ s32 brcmf_cfg80211_down(struct net_device *ndev);
enum nl80211_iftype brcmf_cfg80211_get_iftype(struct brcmf_if *ifp);
struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
- enum nl80211_iftype type,
- bool pm_block);
+ enum nl80211_iftype type);
void brcmf_free_vif(struct brcmf_cfg80211_vif *vif);
s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
index d3fd6b1db..05f22ff81 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
@@ -685,6 +685,8 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
case BRCM_CC_43602_CHIP_ID:
case BRCM_CC_4371_CHIP_ID:
return 0x180000;
+ case BRCM_CC_43465_CHIP_ID:
+ case BRCM_CC_43525_CHIP_ID:
case BRCM_CC_4365_CHIP_ID:
case BRCM_CC_4366_CHIP_ID:
return 0x200000;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index b590499f6..65e8c8766 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -516,7 +516,7 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
/* set appropriate operations */
ndev->netdev_ops = &brcmf_netdev_ops_pri;
- ndev->hard_header_len += drvr->hdrlen;
+ ndev->needed_headroom += drvr->hdrlen;
ndev->ethtool_ops = &brcmf_ethtool_ops;
drvr->rxsz = ndev->mtu + ndev->hard_header_len +
@@ -548,12 +548,16 @@ fail:
return -EBADE;
}
-static void brcmf_net_detach(struct net_device *ndev)
+static void brcmf_net_detach(struct net_device *ndev, bool rtnl_locked)
{
- if (ndev->reg_state == NETREG_REGISTERED)
- unregister_netdev(ndev);
- else
+ if (ndev->reg_state == NETREG_REGISTERED) {
+ if (rtnl_locked)
+ unregister_netdevice(ndev);
+ else
+ unregister_netdev(ndev);
+ } else {
brcmf_cfg80211_free_netdev(ndev);
+ }
}
void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on)
@@ -634,7 +638,7 @@ fail:
}
struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bsscfgidx, s32 ifidx,
- bool is_p2pdev, char *name, u8 *mac_addr)
+ bool is_p2pdev, const char *name, u8 *mac_addr)
{
struct brcmf_if *ifp;
struct net_device *ndev;
@@ -651,7 +655,7 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bsscfgidx, s32 ifidx,
brcmf_err("ERROR: netdev:%s already exists\n",
ifp->ndev->name);
netif_stop_queue(ifp->ndev);
- brcmf_net_detach(ifp->ndev);
+ brcmf_net_detach(ifp->ndev, false);
drvr->iflist[bsscfgidx] = NULL;
} else {
brcmf_dbg(INFO, "netdev:%s ignore IF event\n",
@@ -699,7 +703,8 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bsscfgidx, s32 ifidx,
return ifp;
}
-static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx)
+static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx,
+ bool rtnl_locked)
{
struct brcmf_if *ifp;
@@ -729,7 +734,7 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx)
cancel_work_sync(&ifp->multicast_work);
cancel_work_sync(&ifp->ndoffload_work);
}
- brcmf_net_detach(ifp->ndev);
+ brcmf_net_detach(ifp->ndev, rtnl_locked);
} else {
/* Only p2p device interfaces which get dynamically created
* end up here. In this case the p2p module should be informed
@@ -738,43 +743,19 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx)
* serious troublesome side effects. The p2p module will clean
* up the ifp if needed.
*/
- brcmf_p2p_ifp_removed(ifp);
+ brcmf_p2p_ifp_removed(ifp, rtnl_locked);
kfree(ifp);
}
}
-void brcmf_remove_interface(struct brcmf_if *ifp)
+void brcmf_remove_interface(struct brcmf_if *ifp, bool rtnl_locked)
{
if (!ifp || WARN_ON(ifp->drvr->iflist[ifp->bsscfgidx] != ifp))
return;
brcmf_dbg(TRACE, "Enter, bsscfgidx=%d, ifidx=%d\n", ifp->bsscfgidx,
ifp->ifidx);
brcmf_fws_del_interface(ifp);
- brcmf_del_if(ifp->drvr, ifp->bsscfgidx);
-}
-
-int brcmf_get_next_free_bsscfgidx(struct brcmf_pub *drvr)
-{
- int ifidx;
- int bsscfgidx;
- bool available;
- int highest;
-
- available = false;
- bsscfgidx = 2;
- highest = 2;
- for (ifidx = 0; ifidx < BRCMF_MAX_IFS; ifidx++) {
- if (drvr->iflist[ifidx]) {
- if (drvr->iflist[ifidx]->bsscfgidx == bsscfgidx)
- bsscfgidx = highest + 1;
- else if (drvr->iflist[ifidx]->bsscfgidx > highest)
- highest = drvr->iflist[ifidx]->bsscfgidx;
- } else {
- available = true;
- }
- }
-
- return available ? bsscfgidx : -ENOMEM;
+ brcmf_del_if(ifp->drvr, ifp->bsscfgidx, rtnl_locked);
}
#ifdef CONFIG_INET
@@ -1081,9 +1062,9 @@ fail:
brcmf_fws_deinit(drvr);
}
if (ifp)
- brcmf_net_detach(ifp->ndev);
+ brcmf_net_detach(ifp->ndev, false);
if (p2p_ifp)
- brcmf_net_detach(p2p_ifp->ndev);
+ brcmf_net_detach(p2p_ifp->ndev, false);
drvr->iflist[0] = NULL;
drvr->iflist[1] = NULL;
if (drvr->settings->ignore_probe_fail)
@@ -1152,7 +1133,7 @@ void brcmf_detach(struct device *dev)
/* make sure primary interface removed last */
for (i = BRCMF_MAX_IFS-1; i > -1; i--)
- brcmf_remove_interface(drvr->iflist[i]);
+ brcmf_remove_interface(drvr->iflist[i], false);
brcmf_cfg80211_detach(drvr->config);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
index 647d3cc2a..8fa34cad5 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
@@ -215,9 +215,8 @@ char *brcmf_ifname(struct brcmf_if *ifp);
struct brcmf_if *brcmf_get_ifp(struct brcmf_pub *drvr, int ifidx);
int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked);
struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bsscfgidx, s32 ifidx,
- bool is_p2pdev, char *name, u8 *mac_addr);
-void brcmf_remove_interface(struct brcmf_if *ifp);
-int brcmf_get_next_free_bsscfgidx(struct brcmf_pub *drvr);
+ bool is_p2pdev, const char *name, u8 *mac_addr);
+void brcmf_remove_interface(struct brcmf_if *ifp, bool rtnl_locked);
void brcmf_txflowblock_if(struct brcmf_if *ifp,
enum brcmf_netif_stop_reason reason, bool state);
void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
index b39056125..79c081fd5 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
@@ -18,6 +18,7 @@
#include "brcmu_wifi.h"
#include "brcmu_utils.h"
+#include "cfg80211.h"
#include "core.h"
#include "debug.h"
#include "tracepoint.h"
@@ -182,8 +183,13 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
err = brcmf_fweh_call_event_handler(ifp, emsg->event_code, emsg, data);
- if (ifp && ifevent->action == BRCMF_E_IF_DEL)
- brcmf_remove_interface(ifp);
+ if (ifp && ifevent->action == BRCMF_E_IF_DEL) {
+ bool armed = brcmf_cfg80211_vif_event_armed(drvr->config);
+
+ /* Default handling in case no-one waits for this event */
+ if (!armed)
+ brcmf_remove_interface(ifp, false);
+ }
}
/**
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
index 2ce319903..9f9024a7b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
@@ -2101,7 +2101,7 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
brcmf_dbg(DATA, "tx proto=0x%X\n", ntohs(eh->h_proto));
/* determine the priority */
- if (!skb->priority)
+ if ((skb->priority == 0) || (skb->priority > 7))
skb->priority = cfg80211_classify8021d(skb, NULL);
drvr->tx_multicast += !!multicast;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index a70cda6c0..de19c7c92 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -1246,7 +1246,7 @@ bool brcmf_p2p_scan_finding_common_channel(struct brcmf_cfg80211_info *cfg,
if (!bi->ctl_ch) {
ch.chspec = le16_to_cpu(bi->chanspec);
cfg->d11inf.decchspec(&ch);
- bi->ctl_ch = ch.chnum;
+ bi->ctl_ch = ch.control_ch_num;
}
afx_hdl->peer_chan = bi->ctl_ch;
brcmf_dbg(TRACE, "ACTION FRAME SCAN : Peer %pM found, channel : %d\n",
@@ -1385,7 +1385,7 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp,
if (test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
&p2p->status) &&
(ether_addr_equal(afx_hdl->tx_dst_addr, e->addr))) {
- afx_hdl->peer_chan = ch.chnum;
+ afx_hdl->peer_chan = ch.control_ch_num;
brcmf_dbg(INFO, "GON request: Peer found, channel=%d\n",
afx_hdl->peer_chan);
complete(&afx_hdl->act_frm_scan);
@@ -1428,7 +1428,7 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp,
memcpy(&mgmt_frame->u, frame, mgmt_frame_len);
mgmt_frame_len += offsetof(struct ieee80211_mgmt, u);
- freq = ieee80211_channel_to_frequency(ch.chnum,
+ freq = ieee80211_channel_to_frequency(ch.control_ch_num,
ch.band == BRCMU_CHAN_BAND_2G ?
NL80211_BAND_2GHZ :
NL80211_BAND_5GHZ);
@@ -1873,7 +1873,7 @@ s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp,
if (test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status) &&
(ether_addr_equal(afx_hdl->tx_dst_addr, e->addr))) {
- afx_hdl->peer_chan = ch.chnum;
+ afx_hdl->peer_chan = ch.control_ch_num;
brcmf_dbg(INFO, "PROBE REQUEST: Peer found, channel=%d\n",
afx_hdl->peer_chan);
complete(&afx_hdl->act_frm_scan);
@@ -1898,7 +1898,7 @@ s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp,
mgmt_frame = (u8 *)(rxframe + 1);
mgmt_frame_len = e->datalen - sizeof(*rxframe);
- freq = ieee80211_channel_to_frequency(ch.chnum,
+ freq = ieee80211_channel_to_frequency(ch.control_ch_num,
ch.band == BRCMU_CHAN_BAND_2G ?
NL80211_BAND_2GHZ :
NL80211_BAND_5GHZ);
@@ -2030,8 +2030,6 @@ static int brcmf_p2p_request_p2p_if(struct brcmf_p2p_info *p2p,
err = brcmf_fil_iovar_data_set(ifp, "p2p_ifadd", &if_request,
sizeof(if_request));
- if (err)
- return err;
return err;
}
@@ -2076,8 +2074,7 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p,
if (p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif)
return ERR_PTR(-ENOSPC);
- p2p_vif = brcmf_alloc_vif(p2p->cfg, NL80211_IFTYPE_P2P_DEVICE,
- false);
+ p2p_vif = brcmf_alloc_vif(p2p->cfg, NL80211_IFTYPE_P2P_DEVICE);
if (IS_ERR(p2p_vif)) {
brcmf_err("could not create discovery vif\n");
return (struct wireless_dev *)p2p_vif;
@@ -2177,7 +2174,7 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
return ERR_PTR(-EOPNOTSUPP);
}
- vif = brcmf_alloc_vif(cfg, type, false);
+ vif = brcmf_alloc_vif(cfg, type);
if (IS_ERR(vif))
return (struct wireless_dev *)vif;
brcmf_cfg80211_arm_vif_event(cfg, vif);
@@ -2264,6 +2261,8 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
return 0;
brcmf_p2p_cancel_remain_on_channel(vif->ifp);
brcmf_p2p_deinit_discovery(p2p);
+ break;
+
default:
return -ENOTSUPP;
}
@@ -2289,8 +2288,7 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
else
err = 0;
}
- if (err)
- brcmf_remove_interface(vif->ifp);
+ brcmf_remove_interface(vif->ifp, true);
brcmf_cfg80211_arm_vif_event(cfg, NULL);
if (vif->wdev.iftype != NL80211_IFTYPE_P2P_DEVICE)
@@ -2299,7 +2297,7 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
return err;
}
-void brcmf_p2p_ifp_removed(struct brcmf_if *ifp)
+void brcmf_p2p_ifp_removed(struct brcmf_if *ifp, bool rtnl_locked)
{
struct brcmf_cfg80211_info *cfg;
struct brcmf_cfg80211_vif *vif;
@@ -2308,9 +2306,11 @@ void brcmf_p2p_ifp_removed(struct brcmf_if *ifp)
vif = ifp->vif;
cfg = wdev_to_cfg(&vif->wdev);
cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL;
- rtnl_lock();
+ if (!rtnl_locked)
+ rtnl_lock();
cfg80211_unregister_wdev(&vif->wdev);
- rtnl_unlock();
+ if (!rtnl_locked)
+ rtnl_unlock();
brcmf_free_vif(vif);
}
@@ -2396,7 +2396,7 @@ void brcmf_p2p_detach(struct brcmf_p2p_info *p2p)
if (vif != NULL) {
brcmf_p2p_cancel_remain_on_channel(vif->ifp);
brcmf_p2p_deinit_discovery(p2p);
- brcmf_remove_interface(vif->ifp);
+ brcmf_remove_interface(vif->ifp, false);
}
/* just set it all to zero */
memset(p2p, 0, sizeof(*p2p));
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
index a3bd18c23..8ce944753 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
@@ -155,7 +155,7 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev);
int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg,
enum brcmf_fil_p2p_if_types if_type);
-void brcmf_p2p_ifp_removed(struct brcmf_if *ifp);
+void brcmf_p2p_ifp_removed(struct brcmf_if *ifp, bool rtnl_locked);
int brcmf_p2p_start_device(struct wiphy *wiphy, struct wireless_dev *wdev);
void brcmf_p2p_stop_device(struct wiphy *wiphy, struct wireless_dev *wdev);
int brcmf_p2p_scan_prep(struct wiphy *wiphy,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 099879ce8..b3b2c27e7 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -54,21 +54,25 @@ BRCMF_FW_NVRAM_DEF(43570, "/*(DEBLOBBED)*/", "brcmfmac43570-pcie.txt");
BRCMF_FW_NVRAM_DEF(4358, "/*(DEBLOBBED)*/", "brcmfmac4358-pcie.txt");
BRCMF_FW_NVRAM_DEF(4359, "/*(DEBLOBBED)*/", "brcmfmac4359-pcie.txt");
BRCMF_FW_NVRAM_DEF(4365B, "/*(DEBLOBBED)*/", "brcmfmac4365b-pcie.txt");
+BRCMF_FW_NVRAM_DEF(4365C, "/*(DEBLOBBED)*/", "brcmfmac4365c-pcie.txt");
BRCMF_FW_NVRAM_DEF(4366B, "/*(DEBLOBBED)*/", "brcmfmac4366b-pcie.txt");
BRCMF_FW_NVRAM_DEF(4366C, "/*(DEBLOBBED)*/", "brcmfmac4366c-pcie.txt");
BRCMF_FW_NVRAM_DEF(4371, "/*(DEBLOBBED)*/", "brcmfmac4371-pcie.txt");
static struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43602_CHIP_ID, 0xFFFFFFFF, 43602),
+ BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43465_CHIP_ID, 0xFFFFFFF0, 4366C),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4350_CHIP_ID, 0x000000FF, 4350C),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4350_CHIP_ID, 0xFFFFFF00, 4350),
+ BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43525_CHIP_ID, 0xFFFFFFF0, 4365C),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43567_CHIP_ID, 0xFFFFFFFF, 43570),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43569_CHIP_ID, 0xFFFFFFFF, 43570),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43570_CHIP_ID, 0xFFFFFFFF, 43570),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4358_CHIP_ID, 0xFFFFFFFF, 4358),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359),
- BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4365_CHIP_ID, 0xFFFFFFFF, 4365B),
+ BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4365_CHIP_ID, 0x0000000F, 4365B),
+ BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4365_CHIP_ID, 0xFFFFFFF0, 4365C),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4366_CHIP_ID, 0x0000000F, 4366B),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4366_CHIP_ID, 0xFFFFFFF0, 4366C),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4371_CHIP_ID, 0xFFFFFFFF, 4371),
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index af4652047..88159a833 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -1384,8 +1384,7 @@ static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header,
return -ENXIO;
}
if (rd->seq_num != rx_seq) {
- brcmf_err("seq %d: sequence number error, expect %d\n",
- rx_seq, rd->seq_num);
+ brcmf_dbg(SDIO, "seq %d, expected %d\n", rx_seq, rd->seq_num);
bus->sdcnt.rx_badseq++;
rd->seq_num = rx_seq;
}
@@ -3306,10 +3305,6 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
goto err;
}
- /* Allow full data communication using DPC from now on. */
- brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DATA);
- bcmerror = 0;
-
err:
brcmf_sdio_clkctl(bus, CLK_SDONLY, false);
sdio_release_host(bus->sdiodev->func[1]);
@@ -3666,7 +3661,7 @@ brcmf_sdio_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
str_shift = 11;
break;
default:
- brcmf_err("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
+ brcmf_dbg(INFO, "No SDIO driver strength init needed for chip %s rev %d pmurev %d\n",
ci->name, ci->chiprev, ci->pmurev);
break;
}
@@ -4047,6 +4042,9 @@ static void brcmf_sdio_firmware_callback(struct device *dev,
}
if (err == 0) {
+ /* Allow full data communication using DPC from now on. */
+ brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DATA);
+
err = brcmf_sdiod_intr_register(sdiodev);
if (err != 0)
brcmf_err("intr register failed:%d\n", err);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
index dcf0ce8cd..f3da32fc6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
@@ -186,6 +186,7 @@ struct brcmf_sdio_dev {
struct brcmf_bus *bus_if;
struct brcmf_mp_device *settings;
bool oob_irq_requested;
+ bool sd_irq_requested;
bool irq_en; /* irq enable flags */
spinlock_t irq_en_lock;
bool irq_wake; /* irq wake enable flags */
@@ -293,7 +294,7 @@ struct sdpcmd_regs {
/* Register/deregister interrupt handler. */
int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev);
-int brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev);
+void brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev);
/* sdio device register access interface */
u8 brcmf_sdiod_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c
index 796f5f9d5..b7df576bb 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c
@@ -1079,8 +1079,10 @@ bool dma_rxfill(struct dma_pub *pub)
pa = dma_map_single(di->dmadev, p->data, di->rxbufsize,
DMA_FROM_DEVICE);
- if (dma_mapping_error(di->dmadev, pa))
+ if (dma_mapping_error(di->dmadev, pa)) {
+ brcmu_pkt_buf_free_skb(p);
return false;
+ }
/* save the free packet pointer */
di->rxp[rxout] = p;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
index e16ee6063..c2a938b59 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
@@ -3349,8 +3349,8 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
dma_rxfill(wlc_hw->di[RX_FIFO]);
}
-void
-static brcms_b_init(struct brcms_hardware *wlc_hw, u16 chanspec) {
+static void brcms_b_init(struct brcms_hardware *wlc_hw, u16 chanspec)
+{
u32 macintmask;
bool fastclk;
struct brcms_c_info *wlc = wlc_hw->wlc;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
index 99dac9b8a..b3aab2fe9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
@@ -27017,7 +27017,7 @@ wlc_phy_rxcal_gainctrl_nphy_rev5(struct brcms_phy *pi, u8 rx_core,
tx_core = 1 - rx_core;
num_samps = 1024;
- desired_log2_pwr = (cal_type == 0) ? 13 : 13;
+ desired_log2_pwr = 13;
wlc_phy_rx_iq_coeffs_nphy(pi, 0, &save_comp);
zero_comp.a0 = zero_comp.b0 = zero_comp.a1 = zero_comp.b1 = 0x0;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.c
index dd9162722..0ab865de1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.c
@@ -87,7 +87,7 @@ void
brcms_c_stf_ss_algo_channel_get(struct brcms_c_info *wlc, u16 *ss_algo_channel,
u16 chanspec)
{
- struct tx_power power;
+ struct tx_power power = { };
u8 siso_mcs_id, cdd_mcs_id, stbc_mcs_id;
/* Clear previous settings */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
index 2b2522bdd..d8b79cb72 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
@@ -107,6 +107,7 @@ static void brcmu_d11n_decchspec(struct brcmu_chan *ch)
u16 val;
ch->chnum = (u8)(ch->chspec & BRCMU_CHSPEC_CH_MASK);
+ ch->control_ch_num = ch->chnum;
switch (ch->chspec & BRCMU_CHSPEC_D11N_BW_MASK) {
case BRCMU_CHSPEC_D11N_BW_20:
@@ -118,10 +119,10 @@ static void brcmu_d11n_decchspec(struct brcmu_chan *ch)
val = ch->chspec & BRCMU_CHSPEC_D11N_SB_MASK;
if (val == BRCMU_CHSPEC_D11N_SB_L) {
ch->sb = BRCMU_CHAN_SB_L;
- ch->chnum -= CH_10MHZ_APART;
+ ch->control_ch_num -= CH_10MHZ_APART;
} else {
ch->sb = BRCMU_CHAN_SB_U;
- ch->chnum += CH_10MHZ_APART;
+ ch->control_ch_num += CH_10MHZ_APART;
}
break;
default:
@@ -147,6 +148,7 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
u16 val;
ch->chnum = (u8)(ch->chspec & BRCMU_CHSPEC_CH_MASK);
+ ch->control_ch_num = ch->chnum;
switch (ch->chspec & BRCMU_CHSPEC_D11AC_BW_MASK) {
case BRCMU_CHSPEC_D11AC_BW_20:
@@ -158,10 +160,10 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
val = ch->chspec & BRCMU_CHSPEC_D11AC_SB_MASK;
if (val == BRCMU_CHSPEC_D11AC_SB_L) {
ch->sb = BRCMU_CHAN_SB_L;
- ch->chnum -= CH_10MHZ_APART;
+ ch->control_ch_num -= CH_10MHZ_APART;
} else if (val == BRCMU_CHSPEC_D11AC_SB_U) {
ch->sb = BRCMU_CHAN_SB_U;
- ch->chnum += CH_10MHZ_APART;
+ ch->control_ch_num += CH_10MHZ_APART;
} else {
WARN_ON_ONCE(1);
}
@@ -172,16 +174,16 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
BRCMU_CHSPEC_D11AC_SB_SHIFT);
switch (ch->sb) {
case BRCMU_CHAN_SB_LL:
- ch->chnum -= CH_30MHZ_APART;
+ ch->control_ch_num -= CH_30MHZ_APART;
break;
case BRCMU_CHAN_SB_LU:
- ch->chnum -= CH_10MHZ_APART;
+ ch->control_ch_num -= CH_10MHZ_APART;
break;
case BRCMU_CHAN_SB_UL:
- ch->chnum += CH_10MHZ_APART;
+ ch->control_ch_num += CH_10MHZ_APART;
break;
case BRCMU_CHAN_SB_UU:
- ch->chnum += CH_30MHZ_APART;
+ ch->control_ch_num += CH_30MHZ_APART;
break;
default:
WARN_ON_ONCE(1);
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
index 699f2c278..3cc42bef6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
@@ -40,7 +40,9 @@
#define BRCM_CC_4339_CHIP_ID 0x4339
#define BRCM_CC_43430_CHIP_ID 43430
#define BRCM_CC_4345_CHIP_ID 0x4345
+#define BRCM_CC_43465_CHIP_ID 43465
#define BRCM_CC_4350_CHIP_ID 0x4350
+#define BRCM_CC_43525_CHIP_ID 43525
#define BRCM_CC_4354_CHIP_ID 0x4354
#define BRCM_CC_4356_CHIP_ID 0x4356
#define BRCM_CC_43566_CHIP_ID 43566
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcmu_d11.h b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_d11.h
index f9745ea8b..8b8b2ecb3 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/brcmu_d11.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_d11.h
@@ -125,14 +125,36 @@ enum brcmu_chan_sb {
BRCMU_CHAN_SB_UU = BRCMU_CHAN_SB_LUU,
};
+/**
+ * struct brcmu_chan - stores channel formats
+ *
+ * This structure can be used with functions translating chanspec into generic
+ * channel info and the other way.
+ *
+ * @chspec: firmware specific format
+ * @chnum: center channel number
+ * @control_ch_num: control channel number
+ * @band: frequency band
+ * @bw: channel width
+ * @sb: control sideband (location of control channel against the center one)
+ */
struct brcmu_chan {
u16 chspec;
u8 chnum;
+ u8 control_ch_num;
u8 band;
enum brcmu_chan_bw bw;
enum brcmu_chan_sb sb;
};
+/**
+ * struct brcmu_d11inf - provides functions translating channel format
+ *
+ * @io_type: determines version of channel format used by firmware
+ * @encchspec: encodes channel info into a chanspec, requires center channel
+ * number, ignores control one
+ * @decchspec: decodes chanspec into generic info
+ */
struct brcmu_d11inf {
u8 io_type;
diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
index ca3cd2102..69b826d22 100644
--- a/drivers/net/wireless/cisco/airo.c
+++ b/drivers/net/wireless/cisco/airo.c
@@ -1102,8 +1102,8 @@ static const char version[] = "airo.c 0.6 (Ben Reed & Javier Achirica)";
struct airo_info;
static int get_dec_u16( char *buffer, int *start, int limit );
-static void OUT4500( struct airo_info *, u16 register, u16 value );
-static unsigned short IN4500( struct airo_info *, u16 register );
+static void OUT4500( struct airo_info *, u16 reg, u16 value );
+static unsigned short IN4500( struct airo_info *, u16 reg );
static u16 setup_card(struct airo_info*, u8 *mac, int lock);
static int enable_MAC(struct airo_info *ai, int lock);
static void disable_MAC(struct airo_info *ai, int lock);
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index 9f02e7e78..85d91de1e 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -4093,7 +4093,7 @@ static const char *ipw_get_status_code(u16 status)
return "Unknown status value.";
}
-static void inline average_init(struct average *avg)
+static inline void average_init(struct average *avg)
{
memset(avg, 0, sizeof(*avg));
}
diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
index eb24b9241..140b6ea8f 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.c
+++ b/drivers/net/wireless/intel/iwlegacy/common.c
@@ -1305,10 +1305,14 @@ il_send_scan_abort(struct il_priv *il)
static void
il_complete_scan(struct il_priv *il, bool aborted)
{
+ struct cfg80211_scan_info info = {
+ .aborted = aborted,
+ };
+
/* check if scan was requested from mac80211 */
if (il->scan_request) {
D_SCAN("Complete scan in mac80211\n");
- ieee80211_scan_completed(il->hw, aborted);
+ ieee80211_scan_completed(il->hw, &info);
}
il->scan_vif = NULL;
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
index 05828c61d..6e7ed908d 100644
--- a/drivers/net/wireless/intel/iwlwifi/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -8,7 +8,7 @@ iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o
iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
iwlwifi-$(CONFIG_IWLDVM) += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o
-iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o iwl-8000.o iwl-9000.o
+iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o iwl-8000.o iwl-9000.o iwl-a000.o
iwlwifi-objs += iwl-trans.o
iwlwifi-objs += $(iwlwifi-m)
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
index 8dda52ae3..6c2d6da7e 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
@@ -205,23 +205,6 @@ static const __le32 iwlagn_def_3w_lookup[IWLAGN_BT_DECISION_LUT_SIZE] = {
cpu_to_le32(0xf0005000),
};
-
-/* Loose Coex */
-static const __le32 iwlagn_loose_lookup[IWLAGN_BT_DECISION_LUT_SIZE] = {
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaeaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xcc00ff28),
- cpu_to_le32(0x0000aaaa),
- cpu_to_le32(0xcc00aaaa),
- cpu_to_le32(0x0000aaaa),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0xf0005000),
- cpu_to_le32(0xf0005000),
-};
-
/* Full concurrency */
static const __le32 iwlagn_concurrent_lookup[IWLAGN_BT_DECISION_LUT_SIZE] = {
cpu_to_le32(0xaaaaaaaa),
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
index 37b32a6f6..b49848683 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
@@ -1317,6 +1317,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
switch (iwlwifi_mod_params.amsdu_size) {
+ case IWL_AMSDU_DEF:
case IWL_AMSDU_4K:
trans_cfg.rx_buf_size = IWL_AMSDU_4K;
break;
@@ -1336,6 +1337,8 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
trans_cfg.command_groups_size = ARRAY_SIZE(iwl_dvm_groups);
trans_cfg.cmd_fifo = IWLAGN_CMD_FIFO_NUM;
+ trans_cfg.cb_data_offs = offsetof(struct ieee80211_tx_info,
+ driver_data[2]);
WARN_ON(sizeof(priv->transport_queue_stop) * BITS_PER_BYTE <
priv->cfg->base_params->num_of_queues);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
index b22855218..087e57985 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
@@ -523,11 +523,6 @@ static int iwlagn_rxon_connect(struct iwl_priv *priv,
return ret;
}
- if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION &&
- priv->cfg->ht_params && priv->cfg->ht_params->smps_mode)
- ieee80211_request_smps(ctx->vif,
- priv->cfg->ht_params->smps_mode);
-
return 0;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
index d01766f16..17e6a3238 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
@@ -94,10 +94,14 @@ static int iwl_send_scan_abort(struct iwl_priv *priv)
static void iwl_complete_scan(struct iwl_priv *priv, bool aborted)
{
+ struct cfg80211_scan_info info = {
+ .aborted = aborted,
+ };
+
/* check if scan was requested from mac80211 */
if (priv->scan_request) {
IWL_DEBUG_SCAN(priv, "Complete scan in mac80211\n");
- ieee80211_scan_completed(priv->hw, aborted);
+ ieee80211_scan_completed(priv->hw, &info);
}
priv->scan_type = IWL_SCAN_NORMAL;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
index 8d164d896..bea15510c 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
@@ -73,8 +73,8 @@
/* Highest firmware API version supported */
#define IWL7260_UCODE_API_MAX 17
#define IWL7265_UCODE_API_MAX 17
-#define IWL7265D_UCODE_API_MAX 21
-#define IWL3168_UCODE_API_MAX 21
+#define IWL7265D_UCODE_API_MAX 24
+#define IWL3168_UCODE_API_MAX 24
/* Lowest firmware API version supported */
#define IWL7260_UCODE_API_MIN 16
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
index 0b4684234..b50aee86b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
@@ -70,8 +70,8 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX 21
-#define IWL8265_UCODE_API_MAX 21
+#define IWL8000_UCODE_API_MAX 24
+#define IWL8265_UCODE_API_MAX 24
/* Lowest firmware API version supported */
#define IWL8000_UCODE_API_MIN 16
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
index 19569fff2..03b26168c 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
@@ -55,7 +55,7 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL9000_UCODE_API_MAX 21
+#define IWL9000_UCODE_API_MAX 24
/* Lowest firmware API version supported */
#define IWL9000_UCODE_API_MIN 16
@@ -178,6 +178,7 @@ const struct iwl_cfg iwl5165_2ac_cfg = {
.nvm_ver = IWL9000_NVM_VERSION,
.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .integrated = true,
};
/*(DEBLOBBED)*/
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-a000.c b/drivers/net/wireless/intel/iwlwifi/iwl-a000.c
new file mode 100644
index 000000000..ac29c8301
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-a000.c
@@ -0,0 +1,131 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015-2016 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015-2016 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/stringify.h>
+#include "iwl-config.h"
+#include "iwl-agn-hw.h"
+
+/* Highest firmware API version supported */
+#define IWL_A000_UCODE_API_MAX 24
+
+/* Lowest firmware API version supported */
+#define IWL_A000_UCODE_API_MIN 24
+
+/* NVM versions */
+#define IWL_A000_NVM_VERSION 0x0a1d
+#define IWL_A000_TX_POWER_VERSION 0xffff /* meaningless */
+
+/* Memory offsets and lengths */
+#define IWL_A000_DCCM_OFFSET 0x800000
+#define IWL_A000_DCCM_LEN 0x18000
+#define IWL_A000_DCCM2_OFFSET 0x880000
+#define IWL_A000_DCCM2_LEN 0x8000
+#define IWL_A000_SMEM_OFFSET 0x400000
+#define IWL_A000_SMEM_LEN 0x68000
+
+#define IWL_A000_FW_PRE "/*(DEBLOBBED)*/"
+#define IWL_A000_MODULE_FIRMWARE(api) \
+ IWL_A000_FW_PRE /*(DEBLOBBED)*/
+
+#define NVM_HW_SECTION_NUM_FAMILY_A000 10
+
+static const struct iwl_base_params iwl_a000_base_params = {
+ .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_A000,
+ .num_of_queues = 31,
+ .shadow_ram_support = true,
+ .led_compensation = 57,
+ .wd_timeout = IWL_LONG_WD_TIMEOUT,
+ .max_event_log_size = 512,
+ .shadow_reg_enable = true,
+ .pcie_l1_allowed = true,
+};
+
+static const struct iwl_ht_params iwl_a000_ht_params = {
+ .stbc = true,
+ .ldpc = true,
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+};
+
+#define IWL_DEVICE_A000 \
+ .ucode_api_max = IWL_A000_UCODE_API_MAX, \
+ .ucode_api_min = IWL_A000_UCODE_API_MIN, \
+ .device_family = IWL_DEVICE_FAMILY_8000, \
+ .max_inst_size = IWL60_RTC_INST_SIZE, \
+ .max_data_size = IWL60_RTC_DATA_SIZE, \
+ .base_params = &iwl_a000_base_params, \
+ .led_mode = IWL_LED_RF_STATE, \
+ .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_A000, \
+ .non_shared_ant = ANT_A, \
+ .dccm_offset = IWL_A000_DCCM_OFFSET, \
+ .dccm_len = IWL_A000_DCCM_LEN, \
+ .dccm2_offset = IWL_A000_DCCM2_OFFSET, \
+ .dccm2_len = IWL_A000_DCCM2_LEN, \
+ .smem_offset = IWL_A000_SMEM_OFFSET, \
+ .smem_len = IWL_A000_SMEM_LEN, \
+ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \
+ .apmg_not_supported = true, \
+ .mq_rx_supported = true, \
+ .vht_mu_mimo_supported = true, \
+ .mac_addr_from_csr = true, \
+ .use_tfh = true
+
+const struct iwl_cfg iwla000_2ac_cfg = {
+ .name = "Intel(R) Dual Band Wireless AC a000",
+ .fw_name_pre = IWL_A000_FW_PRE,
+ IWL_DEVICE_A000,
+ .ht_params = &iwl_a000_ht_params,
+ .nvm_ver = IWL_A000_NVM_VERSION,
+ .nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+/*(DEBLOBBED)*/
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 4a0af7de8..423b23320 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -66,8 +66,9 @@
#define __IWL_CONFIG_H__
#include <linux/types.h>
-#include <net/mac80211.h>
-
+#include <linux/netdevice.h>
+#include <linux/ieee80211.h>
+#include <linux/nl80211.h>
enum iwl_device_family {
IWL_DEVICE_FAMILY_UNDEFINED,
@@ -192,7 +193,6 @@ struct iwl_base_params {
* @ht40_bands: bitmap of bands (using %NL80211_BAND_*) that support HT40
*/
struct iwl_ht_params {
- enum ieee80211_smps_mode smps_mode;
u8 ht_greenfield_support:1,
stbc:1,
ldpc:1,
@@ -261,6 +261,7 @@ struct iwl_tt_params {
#define OTP_LOW_IMAGE_SIZE_FAMILY_7000 (16 * 512 * sizeof(u16)) /* 16 KB */
#define OTP_LOW_IMAGE_SIZE_FAMILY_8000 (32 * 512 * sizeof(u16)) /* 32 KB */
#define OTP_LOW_IMAGE_SIZE_FAMILY_9000 OTP_LOW_IMAGE_SIZE_FAMILY_8000
+#define OTP_LOW_IMAGE_SIZE_FAMILY_A000 OTP_LOW_IMAGE_SIZE_FAMILY_9000
struct iwl_eeprom_params {
const u8 regulatory_bands[7];
@@ -319,6 +320,7 @@ struct iwl_pwr_tx_backoff {
* @mq_rx_supported: multi-queue rx support
* @vht_mu_mimo_supported: VHT MU-MIMO support
* @rf_id: need to read rf_id to determine the firmware image
+ * @integrated: discrete or integrated
*
* We enable the driver to be backward compatible wrt. hardware features.
* API differences in uCode shouldn't be handled here but through TLVs
@@ -362,7 +364,9 @@ struct iwl_cfg {
apmg_not_supported:1,
mq_rx_supported:1,
vht_mu_mimo_supported:1,
- rf_id:1;
+ rf_id:1,
+ integrated:1,
+ use_tfh:1;
u8 valid_tx_ant;
u8 valid_rx_ant;
u8 non_shared_ant;
@@ -448,6 +452,7 @@ extern const struct iwl_cfg iwl4165_2ac_sdio_cfg;
extern const struct iwl_cfg iwl9260_2ac_cfg;
extern const struct iwl_cfg iwl9260lc_2ac_cfg;
extern const struct iwl_cfg iwl5165_2ac_cfg;
+extern const struct iwl_cfg iwla000_2ac_cfg;
#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index b52913448..871ad02fd 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -145,8 +145,10 @@
#define CSR_LED_REG (CSR_BASE+0x094)
#define CSR_DRAM_INT_TBL_REG (CSR_BASE+0x0A0)
-#define CSR_MAC_SHADOW_REG_CTRL (CSR_BASE+0x0A8) /* 6000 and up */
-
+#define CSR_MAC_SHADOW_REG_CTRL (CSR_BASE + 0x0A8) /* 6000 and up */
+#define CSR_MAC_SHADOW_REG_CTRL_RX_WAKE BIT(20)
+#define CSR_MAC_SHADOW_REG_CTL2 (CSR_BASE + 0x0AC)
+#define CSR_MAC_SHADOW_REG_CTL2_RX_WAKE 0xFFFF
/* GIO Chicken Bits (PCI Express bus link power management) */
#define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-debug.h b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h
index 110333208..cd77c6971 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h
@@ -41,6 +41,7 @@ static inline bool iwl_have_debug_level(u32 level)
#endif
}
+struct device;
void __iwl_err(struct device *dev, bool rfkill_prefix, bool only_trace,
const char *fmt, ...) __printf(4, 5);
void __iwl_warn(struct device *dev, const char *fmt, ...) __printf(2, 3);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h
index 27914eedc..1dccae653 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h
@@ -1,6 +1,7 @@
/******************************************************************************
*
* Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -83,6 +84,23 @@ TRACE_EVENT(iwlwifi_dev_iowrite32,
__get_str(dev), __entry->offs, __entry->val)
);
+TRACE_EVENT(iwlwifi_dev_iowrite64,
+ TP_PROTO(const struct device *dev, u64 offs, u64 val),
+ TP_ARGS(dev, offs, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u64, offs)
+ __field(u64, val)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->offs = offs;
+ __entry->val = val;
+ ),
+ TP_printk("[%s] write io[%llu] = %llu)",
+ __get_str(dev), __entry->offs, __entry->val)
+);
+
TRACE_EVENT(iwlwifi_dev_iowrite_prph32,
TP_PROTO(const struct device *dev, u32 offs, u32 val),
TP_ARGS(dev, offs, val),
@@ -100,6 +118,23 @@ TRACE_EVENT(iwlwifi_dev_iowrite_prph32,
__get_str(dev), __entry->offs, __entry->val)
);
+TRACE_EVENT(iwlwifi_dev_iowrite_prph64,
+ TP_PROTO(const struct device *dev, u64 offs, u64 val),
+ TP_ARGS(dev, offs, val),
+ TP_STRUCT__entry(
+ DEV_ENTRY
+ __field(u64, offs)
+ __field(u64, val)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->offs = offs;
+ __entry->val = val;
+ ),
+ TP_printk("[%s] write PRPH[%llu] = %llu)",
+ __get_str(dev), __entry->offs, __entry->val)
+);
+
TRACE_EVENT(iwlwifi_dev_ioread_prph32,
TP_PROTO(const struct device *dev, u32 offs, u32 val),
TP_ARGS(dev, offs, val),
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h
index f4d3cd010..545d14b0b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h
@@ -1,6 +1,7 @@
/******************************************************************************
*
* Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(C) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -33,11 +34,29 @@
static inline bool iwl_trace_data(struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ __le16 fc = hdr->frame_control;
+ int offs = 24; /* start with normal header length */
- if (!ieee80211_is_data(hdr->frame_control))
+ if (!ieee80211_is_data(fc))
return false;
- return !(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO);
+
+ /* Try to determine if the frame is EAPOL. This might have false
+ * positives (if there's no RFC 1042 header and we compare to some
+ * payload instead) but since we're only doing tracing that's not
+ * a problem.
+ */
+
+ if (ieee80211_has_a4(fc))
+ offs += 6;
+ if (ieee80211_is_data_qos(fc))
+ offs += 2;
+ /* don't account for crypto - these are unencrypted */
+
+ /* also account for the RFC 1042 header, of course */
+ offs += 6;
+
+ return skb->len > offs + 2 &&
+ *(__be16 *)(skb->data + offs) == cpu_to_be16(ETH_P_PAE);
}
static inline size_t iwl_rx_trace_len(const struct iwl_trans *trans,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index c695125d0..f47ff2607 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -129,8 +129,8 @@ struct iwl_drv {
};
enum {
- DVM_OP_MODE = 0,
- MVM_OP_MODE = 1,
+ DVM_OP_MODE,
+ MVM_OP_MODE,
};
/* Protects the table contents, i.e. the ops pointer & drv list */
@@ -326,8 +326,6 @@ static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len)
int i, j;
struct iwl_fw_cscheme_list *l = (struct iwl_fw_cscheme_list *)data;
struct iwl_fw_cipher_scheme *fwcs;
- struct ieee80211_cipher_scheme *cs;
- u32 cipher;
if (len < sizeof(*l) ||
len < sizeof(l->size) + l->size * sizeof(l->cs[0]))
@@ -335,22 +333,12 @@ static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len)
for (i = 0, j = 0; i < IWL_UCODE_MAX_CS && i < l->size; i++) {
fwcs = &l->cs[j];
- cipher = le32_to_cpu(fwcs->cipher);
/* we skip schemes with zero cipher suite selector */
- if (!cipher)
+ if (!fwcs->cipher)
continue;
- cs = &fw->cs[j++];
- cs->cipher = cipher;
- cs->iftype = BIT(NL80211_IFTYPE_STATION);
- cs->hdr_len = fwcs->hdr_len;
- cs->pn_len = fwcs->pn_len;
- cs->pn_off = fwcs->pn_off;
- cs->key_idx_off = fwcs->key_idx_off;
- cs->key_idx_mask = fwcs->key_idx_mask;
- cs->key_idx_shift = fwcs->key_idx_shift;
- cs->mic_len = fwcs->mic_len;
+ fw->cs[j++] = *fwcs;
}
return 0;
@@ -795,17 +783,17 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
case IWL_UCODE_TLV_SEC_RT:
iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR,
tlv_len);
- drv->fw.mvm_fw = true;
+ drv->fw.type = IWL_FW_MVM;
break;
case IWL_UCODE_TLV_SEC_INIT:
iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_INIT,
tlv_len);
- drv->fw.mvm_fw = true;
+ drv->fw.type = IWL_FW_MVM;
break;
case IWL_UCODE_TLV_SEC_WOWLAN:
iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_WOWLAN,
tlv_len);
- drv->fw.mvm_fw = true;
+ drv->fw.type = IWL_FW_MVM;
break;
case IWL_UCODE_TLV_DEF_CALIB:
if (tlv_len != sizeof(struct iwl_tlv_calib_data))
@@ -827,17 +815,17 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
case IWL_UCODE_TLV_SECURE_SEC_RT:
iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR,
tlv_len);
- drv->fw.mvm_fw = true;
+ drv->fw.type = IWL_FW_MVM;
break;
case IWL_UCODE_TLV_SECURE_SEC_INIT:
iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_INIT,
tlv_len);
- drv->fw.mvm_fw = true;
+ drv->fw.type = IWL_FW_MVM;
break;
case IWL_UCODE_TLV_SECURE_SEC_WOWLAN:
iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_WOWLAN,
tlv_len);
- drv->fw.mvm_fw = true;
+ drv->fw.type = IWL_FW_MVM;
break;
case IWL_UCODE_TLV_NUM_OF_CPU:
if (tlv_len != sizeof(u32))
@@ -1275,7 +1263,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
* In mvm uCode there is no difference between data and instructions
* sections.
*/
- if (!fw->mvm_fw && validate_sec_sizes(drv, pieces, drv->cfg))
+ if (fw->type == IWL_FW_DVM && validate_sec_sizes(drv, pieces, drv->cfg))
goto try_again;
/* Allocate ucode buffers for card's bus-master loading ... */
@@ -1403,10 +1391,16 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
release_firmware(ucode_raw);
mutex_lock(&iwlwifi_opmode_table_mtx);
- if (fw->mvm_fw)
- op = &iwlwifi_opmode_table[MVM_OP_MODE];
- else
+ switch (fw->type) {
+ case IWL_FW_DVM:
op = &iwlwifi_opmode_table[DVM_OP_MODE];
+ break;
+ default:
+ WARN(1, "Invalid fw type %d\n", fw->type);
+ case IWL_FW_MVM:
+ op = &iwlwifi_opmode_table[MVM_OP_MODE];
+ break;
+ }
IWL_INFO(drv, "loaded firmware version %s op_mode %s\n",
drv->fw.fw_version, op->name);
@@ -1658,7 +1652,8 @@ MODULE_PARM_DESC(11n_disable,
"disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX");
module_param_named(amsdu_size, iwlwifi_mod_params.amsdu_size,
int, S_IRUGO);
-MODULE_PARM_DESC(amsdu_size, "amsdu size 0:4K 1:8K 2:12K (default 0)");
+MODULE_PARM_DESC(amsdu_size,
+ "amsdu size 0: 12K for multi Rx queue devices, 4K for other devices 1:4K 2:8K 3:12K (default 0)");
module_param_named(fw_restart, iwlwifi_mod_params.restart_fw, bool, S_IRUGO);
MODULE_PARM_DESC(fw_restart, "restart firmware in case of error (default true)");
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
index bf1b69aec..3199d345b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
@@ -766,7 +766,9 @@ void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
if (cfg->ht_params->ldpc)
ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
- if (iwlwifi_mod_params.amsdu_size >= IWL_AMSDU_8K)
+ if ((cfg->mq_rx_supported &&
+ iwlwifi_mod_params.amsdu_size != IWL_AMSDU_4K) ||
+ iwlwifi_mod_params.amsdu_size >= IWL_AMSDU_8K)
ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
ht_info->ampdu_factor = cfg->max_ht_ampdu_exponent;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h
index 1f4e50289..e04a91d70 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h
@@ -66,6 +66,7 @@
#include <linux/types.h>
#include <linux/if_ether.h>
+#include <net/cfg80211.h>
#include "iwl-trans.h"
struct iwl_nvm_data {
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
index 270f39ecd..1d6f5d21a 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -77,6 +77,7 @@
*/
#define FH_MEM_LOWER_BOUND (0x1000)
#define FH_MEM_UPPER_BOUND (0x2000)
+#define TFH_MEM_LOWER_BOUND (0xA06000)
/**
* Keep-Warm (KW) buffer base address.
@@ -118,10 +119,17 @@
#define FH_MEM_CBBC_16_19_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
#define FH_MEM_CBBC_20_31_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xB20)
#define FH_MEM_CBBC_20_31_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xB80)
+/* a000 TFD table address, 64 bit */
+#define TFH_TFDQ_CBB_TABLE (TFH_MEM_LOWER_BOUND + 0x1C00)
/* Find TFD CB base pointer for given queue */
-static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
+static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
+ unsigned int chnl)
{
+ if (trans->cfg->use_tfh) {
+ WARN_ON_ONCE(chnl >= 64);
+ return TFH_TFDQ_CBB_TABLE + 8 * chnl;
+ }
if (chnl < 16)
return FH_MEM_CBBC_0_15_LOWER_BOUND + 4 * chnl;
if (chnl < 20)
@@ -130,6 +138,65 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
return FH_MEM_CBBC_20_31_LOWER_BOUND + 4 * (chnl - 20);
}
+/* a000 configuration registers */
+
+/*
+ * TFH Configuration register.
+ *
+ * BIT fields:
+ *
+ * Bits 3:0:
+ * Define the maximum number of pending read requests.
+ * Maximum configration value allowed is 0xC
+ * Bits 9:8:
+ * Define the maximum transfer size. (64 / 128 / 256)
+ * Bit 10:
+ * When bit is set and transfer size is set to 128B, the TFH will enable
+ * reading chunks of more than 64B only if the read address is aligned to 128B.
+ * In case of DRAM read address which is not aligned to 128B, the TFH will
+ * enable transfer size which doesn't cross 64B DRAM address boundary.
+*/
+#define TFH_TRANSFER_MODE (TFH_MEM_LOWER_BOUND + 0x1F40)
+#define TFH_TRANSFER_MAX_PENDING_REQ 0xc
+#define TFH_CHUNK_SIZE_128 BIT(8)
+#define TFH_CHUNK_SPLIT_MODE BIT(10)
+/*
+ * Defines the offset address in dwords referring from the beginning of the
+ * Tx CMD which will be updated in DRAM.
+ * Note that the TFH offset address for Tx CMD update is always referring to
+ * the start of the TFD first TB.
+ * In case of a DRAM Tx CMD update the TFH will update PN and Key ID
+ */
+#define TFH_TXCMD_UPDATE_CFG (TFH_MEM_LOWER_BOUND + 0x1F48)
+/*
+ * Controls TX DMA operation
+ *
+ * BIT fields:
+ *
+ * Bits 31:30: Enable the SRAM DMA channel.
+ * Turning on bit 31 will kick the SRAM2DRAM DMA.
+ * Note that the sram2dram may be enabled only after configuring the DRAM and
+ * SRAM addresses registers and the byte count register.
+ * Bits 25:24: Defines the interrupt target upon dram2sram transfer done. When
+ * set to 1 - interrupt is sent to the driver
+ * Bit 0: Indicates the snoop configuration
+*/
+#define TFH_SRV_DMA_CHNL0_CTRL (TFH_MEM_LOWER_BOUND + 0x1F60)
+#define TFH_SRV_DMA_SNOOP BIT(0)
+#define TFH_SRV_DMA_TO_DRIVER BIT(24)
+#define TFH_SRV_DMA_START BIT(31)
+
+/* Defines the DMA SRAM write start address to transfer a data block */
+#define TFH_SRV_DMA_CHNL0_SRAM_ADDR (TFH_MEM_LOWER_BOUND + 0x1F64)
+
+/* Defines the 64bits DRAM start address to read the DMA data block from */
+#define TFH_SRV_DMA_CHNL0_DRAM_ADDR (TFH_MEM_LOWER_BOUND + 0x1F68)
+
+/*
+ * Defines the number of bytes to transfer from DRAM to SRAM.
+ * Note that this register may be configured with non-dword aligned size.
+ */
+#define TFH_SRV_DMA_CHNL0_BC (TFH_MEM_LOWER_BOUND + 0x1F70)
/**
* Rx SRAM Control and Status Registers (RSCSR)
@@ -344,6 +411,32 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
#define RFH_RBDBUF_RBD0_LSB 0xA08300
#define RFH_RBDBUF_RBD_LSB(q) (RFH_RBDBUF_RBD0_LSB + (q) * 8)
+/**
+ * RFH Status Register
+ *
+ * Bit fields:
+ *
+ * Bit 29: RBD_FETCH_IDLE
+ * This status flag is set by the RFH when there is no active RBD fetch from
+ * DRAM.
+ * Once the RFH RBD controller starts fetching (or when there is a pending
+ * RBD read response from DRAM), this flag is immediately turned off.
+ *
+ * Bit 30: SRAM_DMA_IDLE
+ * This status flag is set by the RFH when there is no active transaction from
+ * SRAM to DRAM.
+ * Once the SRAM to DRAM DMA is active, this flag is immediately turned off.
+ *
+ * Bit 31: RXF_DMA_IDLE
+ * This status flag is set by the RFH when there is no active transaction from
+ * RXF to DRAM.
+ * Once the RXF-to-DRAM DMA is active, this flag is immediately turned off.
+ */
+#define RFH_GEN_STATUS 0xA09808
+#define RBD_FETCH_IDLE BIT(29)
+#define SRAM_DMA_IDLE BIT(30)
+#define RXF_DMA_IDLE BIT(31)
+
/* DMA configuration */
#define RFH_RXF_DMA_CFG 0xA09820
/* RB size */
@@ -384,7 +477,9 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
#define RFH_GEN_CFG 0xA09800
#define RFH_GEN_CFG_SERVICE_DMA_SNOOP BIT(0)
#define RFH_GEN_CFG_RFH_DMA_SNOOP BIT(1)
-#define RFH_GEN_CFG_RB_CHUNK_SIZE BIT(4) /* 0 - 64B, 1- 128B */
+#define RFH_GEN_CFG_RB_CHUNK_SIZE_POS 4
+#define RFH_GEN_CFG_RB_CHUNK_SIZE_128 1
+#define RFH_GEN_CFG_RB_CHUNK_SIZE_64 0
#define RFH_GEN_CFG_DEFAULT_RXQ_NUM_MASK 0xF00
#define RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS 8
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h
index 09b7ea28f..420c31dab 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h
@@ -89,6 +89,9 @@
* @IWL_FW_ERROR_PAGING: UMAC's image memory segments which were
* paged to the DRAM.
* @IWL_FW_ERROR_DUMP_RADIO_REG: Dump the radio registers.
+ * @IWL_FW_ERROR_DUMP_EXTERNAL: used only by external code utilities, and
+ * for that reason is not in use in any other place in the Linux Wi-Fi
+ * stack.
*/
enum iwl_fw_error_dump_type {
/* 0 is deprecated */
@@ -106,6 +109,7 @@ enum iwl_fw_error_dump_type {
IWL_FW_ERROR_DUMP_PAGING = 12,
IWL_FW_ERROR_DUMP_RADIO_REG = 13,
IWL_FW_ERROR_DUMP_INTERNAL_TXF = 14,
+ IWL_FW_ERROR_DUMP_EXTERNAL = 15, /* Do not move */
IWL_FW_ERROR_DUMP_MAX,
};
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
index 37dc09e8b..1b1e045f8 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
@@ -301,7 +301,8 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
* @IWL_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command
* @IWL_UCODE_TLV_CAPA_CSUM_SUPPORT: supports TCP Checksum Offload
* @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics
- * @IWL_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD: support p2p standalone U-APSD
+ * @IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD: supports U-APSD on p2p interface when it
+ * is standalone or with a BSS station interface in the same binding.
* @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running
* @IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC: ucode supports LAR updates with different
* sources for the MCC. This TLV bit is a future replacement to
@@ -312,6 +313,9 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
* @IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE: extended DTS measurement
* @IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS: supports short PM timeouts
* @IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT: supports bt-coex Multi-priority LUT
+ * @IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD: the firmware supports CSA
+ * countdown offloading. Beacon notifications are not sent to the host.
+ * The fw also offloads TBTT alignment.
* @IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION: firmware will decide on what
* antenna the beacon should be transmitted
* @IWL_UCODE_TLV_CAPA_BEACON_STORING: firmware will store the latest beacon
@@ -326,6 +330,9 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
* @IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG: support getting more shared
* memory addresses from the firmware.
* @IWL_UCODE_TLV_CAPA_LQM_SUPPORT: supports Link Quality Measurement
+ * @IWL_UCODE_TLV_CAPA_TX_POWER_ACK: reduced TX power API has larger
+ * command size (command version 4) that supports toggling ACK TX
+ * power reduction.
*
* @NUM_IWL_UCODE_TLV_CAPA: number of bits used
*/
@@ -347,7 +354,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT = (__force iwl_ucode_tlv_capa_t)19,
IWL_UCODE_TLV_CAPA_CSUM_SUPPORT = (__force iwl_ucode_tlv_capa_t)21,
IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = (__force iwl_ucode_tlv_capa_t)22,
- IWL_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD = (__force iwl_ucode_tlv_capa_t)26,
+ IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD = (__force iwl_ucode_tlv_capa_t)26,
IWL_UCODE_TLV_CAPA_BT_COEX_PLCR = (__force iwl_ucode_tlv_capa_t)28,
IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = (__force iwl_ucode_tlv_capa_t)29,
IWL_UCODE_TLV_CAPA_BT_COEX_RRC = (__force iwl_ucode_tlv_capa_t)30,
@@ -356,6 +363,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS = (__force iwl_ucode_tlv_capa_t)65,
IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT = (__force iwl_ucode_tlv_capa_t)67,
IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT = (__force iwl_ucode_tlv_capa_t)68,
+ IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD = (__force iwl_ucode_tlv_capa_t)70,
IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION = (__force iwl_ucode_tlv_capa_t)71,
IWL_UCODE_TLV_CAPA_BEACON_STORING = (__force iwl_ucode_tlv_capa_t)72,
IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2 = (__force iwl_ucode_tlv_capa_t)73,
@@ -365,6 +373,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED = (__force iwl_ucode_tlv_capa_t)77,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG = (__force iwl_ucode_tlv_capa_t)80,
IWL_UCODE_TLV_CAPA_LQM_SUPPORT = (__force iwl_ucode_tlv_capa_t)81,
+ IWL_UCODE_TLV_CAPA_TX_POWER_ACK = (__force iwl_ucode_tlv_capa_t)84,
NUM_IWL_UCODE_TLV_CAPA
#ifdef __CHECKER__
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw.h
index e461d6318..74ea68d10 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw.h
@@ -67,7 +67,6 @@
#ifndef __iwl_fw_h__
#define __iwl_fw_h__
#include <linux/types.h>
-#include <net/mac80211.h>
#include "iwl-fw-file.h"
#include "iwl-fw-error-dump.h"
@@ -231,6 +230,16 @@ struct iwl_gscan_capabilities {
};
/**
+ * enum iwl_fw_type - iwlwifi firmware type
+ * @IWL_FW_DVM: DVM firmware
+ * @IWL_FW_MVM: MVM firmware
+ */
+enum iwl_fw_type {
+ IWL_FW_DVM,
+ IWL_FW_MVM,
+};
+
+/**
* struct iwl_fw - variables associated with the firmware
*
* @ucode_ver: ucode version from the ucode file
@@ -244,7 +253,7 @@ struct iwl_gscan_capabilities {
* @inst_evtlog_ptr: event log offset for runtime ucode.
* @inst_evtlog_size: event log size for runtime ucode.
* @inst_errlog_ptr: error log offfset for runtime ucode.
- * @mvm_fw: indicates this is MVM firmware
+ * @type: firmware type (&enum iwl_fw_type)
* @cipher_scheme: optional external cipher scheme.
* @human_readable: human readable version
* @sdio_adma_addr: the default address to set for the ADMA in SDIO mode until
@@ -275,9 +284,9 @@ struct iwl_fw {
u8 valid_tx_ant;
u8 valid_rx_ant;
- bool mvm_fw;
+ enum iwl_fw_type type;
- struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS];
+ struct iwl_fw_cipher_scheme cs[IWL_UCODE_MAX_CS];
u8 human_readable[FW_VER_HUMAN_READABLE_SZ];
u32 sdio_adma_addr;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
index 32c8f84ae..92c8b5f9a 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
@@ -1,7 +1,7 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
*
* Portions of this file are derived from the ipw3945 project.
*
@@ -51,6 +51,14 @@ void iwl_write32(struct iwl_trans *trans, u32 ofs, u32 val)
}
IWL_EXPORT_SYMBOL(iwl_write32);
+void iwl_write64(struct iwl_trans *trans, u64 ofs, u64 val)
+{
+ trace_iwlwifi_dev_iowrite64(trans->dev, ofs, val);
+ iwl_trans_write32(trans, ofs, val & 0xffffffff);
+ iwl_trans_write32(trans, ofs + 4, val >> 32);
+}
+IWL_EXPORT_SYMBOL(iwl_write64);
+
u32 iwl_read32(struct iwl_trans *trans, u32 ofs)
{
u32 val = iwl_trans_read32(trans, ofs);
@@ -102,6 +110,17 @@ void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value)
}
IWL_EXPORT_SYMBOL(iwl_write_direct32);
+void iwl_write_direct64(struct iwl_trans *trans, u64 reg, u64 value)
+{
+ unsigned long flags;
+
+ if (iwl_trans_grab_nic_access(trans, &flags)) {
+ iwl_write64(trans, reg, value);
+ iwl_trans_release_nic_access(trans, &flags);
+ }
+}
+IWL_EXPORT_SYMBOL(iwl_write_direct64);
+
int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
int timeout)
{
@@ -133,6 +152,14 @@ void iwl_write_prph_no_grab(struct iwl_trans *trans, u32 ofs, u32 val)
}
IWL_EXPORT_SYMBOL(iwl_write_prph_no_grab);
+void iwl_write_prph64_no_grab(struct iwl_trans *trans, u64 ofs, u64 val)
+{
+ trace_iwlwifi_dev_iowrite_prph64(trans->dev, ofs, val);
+ iwl_write_prph_no_grab(trans, ofs, val & 0xffffffff);
+ iwl_write_prph_no_grab(trans, ofs + 4, val >> 32);
+}
+IWL_EXPORT_SYMBOL(iwl_write_prph64_no_grab);
+
u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs)
{
unsigned long flags;
@@ -228,9 +255,117 @@ void iwl_force_nmi(struct iwl_trans *trans)
}
IWL_EXPORT_SYMBOL(iwl_force_nmi);
-static const char *get_fh_string(int cmd)
+static const char *get_rfh_string(int cmd)
{
#define IWL_CMD(x) case x: return #x
+#define IWL_CMD_MQ(arg, reg, q) { if (arg == reg(q)) return #reg; }
+
+ int i;
+
+ for (i = 0; i < IWL_MAX_RX_HW_QUEUES; i++) {
+ IWL_CMD_MQ(cmd, RFH_Q_FRBDCB_BA_LSB, i);
+ IWL_CMD_MQ(cmd, RFH_Q_FRBDCB_WIDX, i);
+ IWL_CMD_MQ(cmd, RFH_Q_FRBDCB_RIDX, i);
+ IWL_CMD_MQ(cmd, RFH_Q_URBD_STTS_WPTR_LSB, i);
+ };
+
+ switch (cmd) {
+ IWL_CMD(RFH_RXF_DMA_CFG);
+ IWL_CMD(RFH_GEN_CFG);
+ IWL_CMD(RFH_GEN_STATUS);
+ IWL_CMD(FH_TSSR_TX_STATUS_REG);
+ IWL_CMD(FH_TSSR_TX_ERROR_REG);
+ default:
+ return "UNKNOWN";
+ }
+#undef IWL_CMD_MQ
+}
+
+struct reg {
+ u32 addr;
+ bool is64;
+};
+
+static int iwl_dump_rfh(struct iwl_trans *trans, char **buf)
+{
+ int i, q;
+ int num_q = trans->num_rx_queues;
+ static const u32 rfh_tbl[] = {
+ RFH_RXF_DMA_CFG,
+ RFH_GEN_CFG,
+ RFH_GEN_STATUS,
+ FH_TSSR_TX_STATUS_REG,
+ FH_TSSR_TX_ERROR_REG,
+ };
+ static const struct reg rfh_mq_tbl[] = {
+ { RFH_Q0_FRBDCB_BA_LSB, true },
+ { RFH_Q0_FRBDCB_WIDX, false },
+ { RFH_Q0_FRBDCB_RIDX, false },
+ { RFH_Q0_URBD_STTS_WPTR_LSB, true },
+ };
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (buf) {
+ int pos = 0;
+ /*
+ * Register (up to 34 for name + 8 blank/q for MQ): 40 chars
+ * Colon + space: 2 characters
+ * 0X%08x: 10 characters
+ * New line: 1 character
+ * Total of 53 characters
+ */
+ size_t bufsz = ARRAY_SIZE(rfh_tbl) * 53 +
+ ARRAY_SIZE(rfh_mq_tbl) * 53 * num_q + 40;
+
+ *buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!*buf)
+ return -ENOMEM;
+
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ "RFH register values:\n");
+
+ for (i = 0; i < ARRAY_SIZE(rfh_tbl); i++)
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ "%40s: 0X%08x\n",
+ get_rfh_string(rfh_tbl[i]),
+ iwl_read_prph(trans, rfh_tbl[i]));
+
+ for (i = 0; i < ARRAY_SIZE(rfh_mq_tbl); i++)
+ for (q = 0; q < num_q; q++) {
+ u32 addr = rfh_mq_tbl[i].addr;
+
+ addr += q * (rfh_mq_tbl[i].is64 ? 8 : 4);
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ "%34s(q %2d): 0X%08x\n",
+ get_rfh_string(addr), q,
+ iwl_read_prph(trans, addr));
+ }
+
+ return pos;
+ }
+#endif
+
+ IWL_ERR(trans, "RFH register values:\n");
+ for (i = 0; i < ARRAY_SIZE(rfh_tbl); i++)
+ IWL_ERR(trans, " %34s: 0X%08x\n",
+ get_rfh_string(rfh_tbl[i]),
+ iwl_read_prph(trans, rfh_tbl[i]));
+
+ for (i = 0; i < ARRAY_SIZE(rfh_mq_tbl); i++)
+ for (q = 0; q < num_q; q++) {
+ u32 addr = rfh_mq_tbl[i].addr;
+
+ addr += q * (rfh_mq_tbl[i].is64 ? 8 : 4);
+ IWL_ERR(trans, " %34s(q %d): 0X%08x\n",
+ get_rfh_string(addr), q,
+ iwl_read_prph(trans, addr));
+ }
+
+ return 0;
+}
+
+static const char *get_fh_string(int cmd)
+{
switch (cmd) {
IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
@@ -262,6 +397,9 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf)
FH_TSSR_TX_ERROR_REG
};
+ if (trans->cfg->mq_rx_supported)
+ return iwl_dump_rfh(trans, buf);
+
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (buf) {
int pos = 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.h b/drivers/net/wireless/intel/iwlwifi/iwl-io.h
index a9bcc788c..5c8c0e130 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.h
@@ -34,6 +34,7 @@
void iwl_write8(struct iwl_trans *trans, u32 ofs, u8 val);
void iwl_write32(struct iwl_trans *trans, u32 ofs, u32 val);
+void iwl_write64(struct iwl_trans *trans, u64 ofs, u64 val);
u32 iwl_read32(struct iwl_trans *trans, u32 ofs);
static inline void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask)
@@ -53,11 +54,13 @@ int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg);
void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value);
+void iwl_write_direct64(struct iwl_trans *trans, u64 reg, u64 value);
u32 iwl_read_prph_no_grab(struct iwl_trans *trans, u32 ofs);
u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs);
void iwl_write_prph_no_grab(struct iwl_trans *trans, u32 ofs, u32 val);
+void iwl_write_prph64_no_grab(struct iwl_trans *trans, u64 ofs, u64 val);
void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val);
int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
u32 bits, u32 mask, int timeout);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
index 6c5c2f9f7..4d32b10fe 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
@@ -66,7 +66,6 @@
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/gfp.h>
-#include <net/mac80211.h>
extern struct iwl_mod_params iwlwifi_mod_params;
@@ -87,9 +86,10 @@ enum iwl_disable_11n {
};
enum iwl_amsdu_size {
- IWL_AMSDU_4K = 0,
- IWL_AMSDU_8K = 1,
- IWL_AMSDU_12K = 2,
+ IWL_AMSDU_DEF = 0,
+ IWL_AMSDU_4K = 1,
+ IWL_AMSDU_8K = 2,
+ IWL_AMSDU_12K = 3,
};
enum iwl_uapsd_disable {
@@ -105,7 +105,7 @@ enum iwl_uapsd_disable {
* @sw_crypto: using hardware encryption, default = 0
* @disable_11n: disable 11n capabilities, default = 0,
* use IWL_[DIS,EN]ABLE_HT_* constants
- * @amsdu_size: enable 8K amsdu size, default = 4K. enum iwl_amsdu_size.
+ * @amsdu_size: See &enum iwl_amsdu_size.
* @restart_fw: restart firmware, default = 1
* @bt_coex_active: enable bt coex, default = true
* @led_mode: system default, default = 0
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 21653fee8..43f8f7d45 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -397,6 +397,13 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN;
switch (iwlwifi_mod_params.amsdu_size) {
+ case IWL_AMSDU_DEF:
+ if (cfg->mq_rx_supported)
+ vht_cap->cap |=
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
+ else
+ vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895;
+ break;
case IWL_AMSDU_4K:
vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895;
break;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index 6c1d20ded..459bf736f 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -417,5 +417,6 @@ enum {
};
#define UREG_CHICK (0xA05C00)
+#define UREG_CHICK_MSI_ENABLE BIT(24)
#define UREG_CHICK_MSIX_ENABLE BIT(25)
#endif /* __iwl_prph_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 8193d36ae..5535e2238 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -211,6 +211,9 @@ struct iwl_cmd_header_wide {
#define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */
#define FH_RSCSR_FRAME_INVALID 0x55550000
#define FH_RSCSR_FRAME_ALIGN 0x40
+#define FH_RSCSR_RPA_EN BIT(25)
+#define FH_RSCSR_RXQ_POS 16
+#define FH_RSCSR_RXQ_MASK 0x3F0000
struct iwl_rx_packet {
/*
@@ -220,7 +223,13 @@ struct iwl_rx_packet {
* 31: flag flush RB request
* 30: flag ignore TC (terminal counter) request
* 29: flag fast IRQ request
- * 28-14: Reserved
+ * 28-26: Reserved
+ * 25: Offload enabled
+ * 24: RPF enabled
+ * 23: RSS enabled
+ * 22: Checksum enabled
+ * 21-16: RX queue
+ * 15-14: Reserved
* 13-00: RX frame size
*/
__le32 len_n_flags;
@@ -383,11 +392,6 @@ static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
#define MAX_NO_RECLAIM_CMDS 6
-/*
- * The first entry in driver_data array in ieee80211_tx_info
- * that can be used by the transport.
- */
-#define IWL_TRANS_FIRST_DRIVER_DATA 2
#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
/*
@@ -491,6 +495,8 @@ struct iwl_hcmd_arr {
* @command_groups_size: number of command groups, to avoid illegal access
* @sdio_adma_addr: the default address to set for the ADMA in SDIO mode until
* we get the ALIVE from the uCode
+ * @cb_data_offs: offset inside skb->cb to store transport data at, must have
+ * space for at least two pointers
*/
struct iwl_trans_config {
struct iwl_op_mode *op_mode;
@@ -510,6 +516,8 @@ struct iwl_trans_config {
int command_groups_size;
u32 sdio_adma_addr;
+
+ u8 cb_data_offs;
};
struct iwl_trans_dump_data {
@@ -574,6 +582,7 @@ struct iwl_trans_txq_scd_cfg {
* configured. May sleep.
* @txq_disable: de-configure a Tx queue to send AMPDUs
* Must be atomic
+ * @txq_set_shared_mode: change Tx queue shared/unshared marking
* @wait_tx_queue_empty: wait until tx queues are empty. May sleep.
* @freeze_txq_timer: prevents the timer of the queue from firing until the
* queue is set to awake. Must be atomic.
@@ -637,6 +646,9 @@ struct iwl_trans_ops {
void (*txq_disable)(struct iwl_trans *trans, int queue,
bool configure_scd);
+ void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
+ bool shared);
+
int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm);
void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
bool freeze);
@@ -749,6 +761,7 @@ enum iwl_plat_pm_mode {
* @ops - pointer to iwl_trans_ops
* @op_mode - pointer to the op_mode
* @cfg - pointer to the configuration
+ * @drv - pointer to iwl_drv
* @status: a bit-mask of transport status flags
* @dev - pointer to struct device * that represents the device
* @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
@@ -792,6 +805,7 @@ struct iwl_trans {
const struct iwl_trans_ops *ops;
struct iwl_op_mode *op_mode;
const struct iwl_cfg *cfg;
+ struct iwl_drv *drv;
enum iwl_trans_state state;
unsigned long status;
@@ -1052,6 +1066,13 @@ iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
trans->ops->txq_enable(trans, queue, ssn, cfg, queue_wdg_timeout);
}
+static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
+ int queue, bool shared_mode)
+{
+ if (trans->ops->txq_set_shared_mode)
+ trans->ops->txq_set_shared_mode(trans, queue, shared_mode);
+}
+
static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
int fifo, int sta_id, int tid,
int frame_limit, u16 ssn,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
index a63f5bbb1..5bdb6c2c8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
@@ -142,7 +142,7 @@ static const __le64 iwl_ci_mask[][3] = {
cpu_to_le64(0x0)
},
{
- cpu_to_le64(0xFFC0000000ULL),
+ cpu_to_le64(0xFE00000000ULL),
cpu_to_le64(0x0ULL),
cpu_to_le64(0x0ULL)
},
@@ -615,8 +615,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
* don't reduce the Tx power if one of these is true:
* we are in LOOSE
* single share antenna product
- * BT is active
- * we are associated
+ * BT is inactive
+ * we are not associated
*/
if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT ||
mvm->cfg->bt_shared_single_ant || !vif->bss_conf.assoc ||
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 406cf1cb9..b34489817 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1020,6 +1020,8 @@ static ssize_t iwl_dbgfs_max_amsdu_len_write(struct iwl_mvm *mvm,
int ret;
ret = kstrtouint(buf, 0, &max_amsdu_len);
+ if (ret)
+ return ret;
if (max_amsdu_len > IEEE80211_MAX_MPDU_LEN_VHT_11454)
return -EINVAL;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-coex.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-coex.h
index 2a33b694b..204c1b139 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-coex.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-coex.h
@@ -70,85 +70,6 @@
#define BITS(nb) (BIT(nb) - 1)
-/**
- * enum iwl_bt_coex_flags - flags for BT_COEX command
- * @BT_COEX_MODE_POS:
- * @BT_COEX_MODE_MSK:
- * @BT_COEX_DISABLE_OLD:
- * @BT_COEX_2W_OLD:
- * @BT_COEX_3W_OLD:
- * @BT_COEX_NW_OLD:
- * @BT_COEX_AUTO_OLD:
- * @BT_COEX_BT_OLD: Antenna is for BT (manufacuring tests)
- * @BT_COEX_WIFI_OLD: Antenna is for BT (manufacuring tests)
- * @BT_COEX_SYNC2SCO:
- * @BT_COEX_CORUNNING:
- * @BT_COEX_MPLUT:
- * @BT_COEX_TTC:
- * @BT_COEX_RRC:
- *
- * The COEX_MODE must be set for each command. Even if it is not changed.
- */
-enum iwl_bt_coex_flags {
- BT_COEX_MODE_POS = 3,
- BT_COEX_MODE_MSK = BITS(3) << BT_COEX_MODE_POS,
- BT_COEX_DISABLE_OLD = 0x0 << BT_COEX_MODE_POS,
- BT_COEX_2W_OLD = 0x1 << BT_COEX_MODE_POS,
- BT_COEX_3W_OLD = 0x2 << BT_COEX_MODE_POS,
- BT_COEX_NW_OLD = 0x3 << BT_COEX_MODE_POS,
- BT_COEX_AUTO_OLD = 0x5 << BT_COEX_MODE_POS,
- BT_COEX_BT_OLD = 0x6 << BT_COEX_MODE_POS,
- BT_COEX_WIFI_OLD = 0x7 << BT_COEX_MODE_POS,
- BT_COEX_SYNC2SCO = BIT(7),
- BT_COEX_CORUNNING = BIT(8),
- BT_COEX_MPLUT = BIT(9),
- BT_COEX_TTC = BIT(20),
- BT_COEX_RRC = BIT(21),
-};
-
-/*
- * indicates what has changed in the BT_COEX command.
- * BT_VALID_ENABLE must be set for each command. Commands without this bit will
- * discarded by the firmware
- */
-enum iwl_bt_coex_valid_bit_msk {
- BT_VALID_ENABLE = BIT(0),
- BT_VALID_BT_PRIO_BOOST = BIT(1),
- BT_VALID_MAX_KILL = BIT(2),
- BT_VALID_3W_TMRS = BIT(3),
- BT_VALID_KILL_ACK = BIT(4),
- BT_VALID_KILL_CTS = BIT(5),
- BT_VALID_REDUCED_TX_POWER = BIT(6),
- BT_VALID_LUT = BIT(7),
- BT_VALID_WIFI_RX_SW_PRIO_BOOST = BIT(8),
- BT_VALID_WIFI_TX_SW_PRIO_BOOST = BIT(9),
- BT_VALID_MULTI_PRIO_LUT = BIT(10),
- BT_VALID_TRM_KICK_FILTER = BIT(11),
- BT_VALID_CORUN_LUT_20 = BIT(12),
- BT_VALID_CORUN_LUT_40 = BIT(13),
- BT_VALID_ANT_ISOLATION = BIT(14),
- BT_VALID_ANT_ISOLATION_THRS = BIT(15),
- BT_VALID_TXTX_DELTA_FREQ_THRS = BIT(16),
- BT_VALID_TXRX_MAX_FREQ_0 = BIT(17),
- BT_VALID_SYNC_TO_SCO = BIT(18),
- BT_VALID_TTC = BIT(20),
- BT_VALID_RRC = BIT(21),
-};
-
-/**
- * enum iwl_bt_reduced_tx_power - allows to reduce txpower for WiFi frames.
- * @BT_REDUCED_TX_POWER_CTL: reduce Tx power for control frames
- * @BT_REDUCED_TX_POWER_DATA: reduce Tx power for data frames
- *
- * This mechanism allows to have BT and WiFi run concurrently. Since WiFi
- * reduces its Tx power, it can work along with BT, hence reducing the amount
- * of WiFi frames being killed by BT.
- */
-enum iwl_bt_reduced_tx_power {
- BT_REDUCED_TX_POWER_CTL = BIT(0),
- BT_REDUCED_TX_POWER_DATA = BIT(1),
-};
-
enum iwl_bt_coex_lut_type {
BT_COEX_TIGHT_LUT = 0,
BT_COEX_LOOSE_LUT,
@@ -158,64 +79,9 @@ enum iwl_bt_coex_lut_type {
BT_COEX_INVALID_LUT = 0xff,
}; /* BT_COEX_DECISION_LUT_INDEX_API_E_VER_1 */
-#define BT_COEX_LUT_SIZE (12)
#define BT_COEX_CORUN_LUT_SIZE (32)
-#define BT_COEX_MULTI_PRIO_LUT_SIZE (2)
-#define BT_COEX_BOOST_SIZE (4)
#define BT_REDUCED_TX_POWER_BIT BIT(7)
-/**
- * struct iwl_bt_coex_cmd_old - bt coex configuration command
- * @flags:&enum iwl_bt_coex_flags
- * @max_kill:
- * @bt_reduced_tx_power: enum %iwl_bt_reduced_tx_power
- * @override_primary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT
- * should be set by default
- * @override_secondary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT
- * should be set by default
- * @bt4_antenna_isolation: antenna isolation
- * @bt4_antenna_isolation_thr: antenna threshold value
- * @bt4_tx_tx_delta_freq_thr: TxTx delta frequency
- * @bt4_tx_rx_max_freq0: TxRx max frequency
- * @bt_prio_boost: BT priority boost registers
- * @wifi_tx_prio_boost: SW boost of wifi tx priority
- * @wifi_rx_prio_boost: SW boost of wifi rx priority
- * @kill_ack_msk: kill ACK mask. 1 - Tx ACK, 0 - kill Tx of ACK.
- * @kill_cts_msk: kill CTS mask. 1 - Tx CTS, 0 - kill Tx of CTS.
- * @decision_lut: PTA decision LUT, per Prio-Ch
- * @bt4_multiprio_lut: multi priority LUT configuration
- * @bt4_corun_lut20: co-running 20 MHz LUT configuration
- * @bt4_corun_lut40: co-running 40 MHz LUT configuration
- * @valid_bit_msk: enum %iwl_bt_coex_valid_bit_msk
- *
- * The structure is used for the BT_COEX command.
- */
-struct iwl_bt_coex_cmd_old {
- __le32 flags;
- u8 max_kill;
- u8 bt_reduced_tx_power;
- u8 override_primary_lut;
- u8 override_secondary_lut;
-
- u8 bt4_antenna_isolation;
- u8 bt4_antenna_isolation_thr;
- u8 bt4_tx_tx_delta_freq_thr;
- u8 bt4_tx_rx_max_freq0;
-
- __le32 bt_prio_boost[BT_COEX_BOOST_SIZE];
- __le32 wifi_tx_prio_boost;
- __le32 wifi_rx_prio_boost;
- __le32 kill_ack_msk;
- __le32 kill_cts_msk;
-
- __le32 decision_lut[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE];
- __le32 bt4_multiprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE];
- __le32 bt4_corun_lut20[BT_COEX_CORUN_LUT_SIZE];
- __le32 bt4_corun_lut40[BT_COEX_CORUN_LUT_SIZE];
-
- __le32 valid_bit_msk;
-} __packed; /* BT_COEX_CMD_API_S_VER_5 */
-
enum iwl_bt_coex_mode {
BT_COEX_DISABLE = 0x0,
BT_COEX_NW = 0x1,
@@ -385,92 +251,4 @@ struct iwl_bt_coex_profile_notif {
u8 reserved[3];
} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_4 */
-enum iwl_bt_coex_prio_table_event {
- BT_COEX_PRIO_TBL_EVT_INIT_CALIB1 = 0,
- BT_COEX_PRIO_TBL_EVT_INIT_CALIB2 = 1,
- BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW1 = 2,
- BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW2 = 3,
- BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH1 = 4,
- BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH2 = 5,
- BT_COEX_PRIO_TBL_EVT_DTIM = 6,
- BT_COEX_PRIO_TBL_EVT_SCAN52 = 7,
- BT_COEX_PRIO_TBL_EVT_SCAN24 = 8,
- BT_COEX_PRIO_TBL_EVT_IDLE = 9,
- BT_COEX_PRIO_TBL_EVT_MAX = 16,
-}; /* BT_COEX_PRIO_TABLE_EVENTS_API_E_VER_1 */
-
-enum iwl_bt_coex_prio_table_prio {
- BT_COEX_PRIO_TBL_DISABLED = 0,
- BT_COEX_PRIO_TBL_PRIO_LOW = 1,
- BT_COEX_PRIO_TBL_PRIO_HIGH = 2,
- BT_COEX_PRIO_TBL_PRIO_BYPASS = 3,
- BT_COEX_PRIO_TBL_PRIO_COEX_OFF = 4,
- BT_COEX_PRIO_TBL_PRIO_COEX_ON = 5,
- BT_COEX_PRIO_TBL_PRIO_COEX_IDLE = 6,
- BT_COEX_PRIO_TBL_MAX = 8,
-}; /* BT_COEX_PRIO_TABLE_PRIORITIES_API_E_VER_1 */
-
-#define BT_COEX_PRIO_TBL_SHRD_ANT_POS (0)
-#define BT_COEX_PRIO_TBL_PRIO_POS (1)
-#define BT_COEX_PRIO_TBL_RESERVED_POS (4)
-
-/**
- * struct iwl_bt_coex_prio_tbl_cmd - priority table for BT coex
- * @prio_tbl:
- */
-struct iwl_bt_coex_prio_tbl_cmd {
- u8 prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX];
-} __packed;
-
-/**
- * struct iwl_bt_coex_ci_cmd_old - bt coex channel inhibition command
- * @bt_primary_ci:
- * @bt_secondary_ci:
- * @co_run_bw_primary:
- * @co_run_bw_secondary:
- * @primary_ch_phy_id:
- * @secondary_ch_phy_id:
- *
- * Used for BT_COEX_CI command
- */
-struct iwl_bt_coex_ci_cmd_old {
- __le64 bt_primary_ci;
- __le64 bt_secondary_ci;
-
- u8 co_run_bw_primary;
- u8 co_run_bw_secondary;
- u8 primary_ch_phy_id;
- u8 secondary_ch_phy_id;
-} __packed; /* BT_CI_MSG_API_S_VER_1 */
-
-/**
- * struct iwl_bt_coex_profile_notif_old - notification about BT coex
- * @mbox_msg: message from BT to WiFi
- * @msg_idx: the index of the message
- * @bt_status: 0 - off, 1 - on
- * @bt_open_conn: number of BT connections open
- * @bt_traffic_load: load of BT traffic
- * @bt_agg_traffic_load: aggregated load of BT traffic
- * @bt_ci_compliance: 0 - no CI compliance, 1 - CI compliant
- * @primary_ch_lut: LUT used for primary channel
- * @secondary_ch_lut: LUT used for secondary channel
- * @bt_activity_grading: the activity of BT enum %iwl_bt_activity_grading
- */
-struct iwl_bt_coex_profile_notif_old {
- __le32 mbox_msg[4];
- __le32 msg_idx;
- u8 bt_status;
- u8 bt_open_conn;
- u8 bt_traffic_load;
- u8 bt_agg_traffic_load;
- u8 bt_ci_compliance;
- u8 ttc_enabled;
- u8 rrc_enabled;
- u8 reserved;
-
- __le32 primary_ch_lut;
- __le32 secondary_ch_lut;
- __le32 bt_activity_grading;
-} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_3 */
-
#endif /* __fw_api_bt_coex_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h
index 95ac59d08..0246506ab 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h
@@ -72,6 +72,9 @@
#define NUM_MAC_INDEX_DRIVER MAC_INDEX_AUX
#define NUM_MAC_INDEX (MAC_INDEX_AUX + 1)
+#define IWL_MVM_STATION_COUNT 16
+#define IWL_MVM_TDLS_STA_COUNT 4
+
enum iwl_ac {
AC_BK,
AC_BE,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h
index 65a7c8a4c..404b0de9e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -310,7 +310,8 @@ enum iwl_dev_tx_power_cmd_mode {
IWL_TX_POWER_MODE_SET_MAC = 0,
IWL_TX_POWER_MODE_SET_DEVICE = 1,
IWL_TX_POWER_MODE_SET_CHAINS = 2,
-}; /* TX_POWER_REDUCED_FLAGS_TYPE_API_E_VER_2 */;
+ IWL_TX_POWER_MODE_SET_ACK = 3,
+}; /* TX_POWER_REDUCED_FLAGS_TYPE_API_E_VER_4 */;
/**
* struct iwl_dev_tx_power_cmd_v2 - TX power reduction command
@@ -338,7 +339,7 @@ struct iwl_dev_tx_power_cmd_v2 {
* @v2: version 2 of the command, embedded here for easier software handling
* @per_chain_restriction: per chain restrictions
*/
-struct iwl_dev_tx_power_cmd {
+struct iwl_dev_tx_power_cmd_v3 {
/* v3 is just an extension of v2 - keep this here */
struct iwl_dev_tx_power_cmd_v2 v2;
__le16 per_chain_restriction[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS];
@@ -347,6 +348,19 @@ struct iwl_dev_tx_power_cmd {
#define IWL_DEV_MAX_TX_POWER 0x7FFF
/**
+ * struct iwl_dev_tx_power_cmd - TX power reduction command
+ * @v3: version 3 of the command, embedded here for easier software handling
+ * @enable_ack_reduction: enable or disable close range ack TX power
+ * reduction.
+ */
+struct iwl_dev_tx_power_cmd {
+ /* v4 is just an extension of v3 - keep this here */
+ struct iwl_dev_tx_power_cmd_v3 v3;
+ u8 enable_ack_reduction;
+ u8 reserved[3];
+} __packed; /* TX_REDUCED_POWER_API_S_VER_4 */
+
+/**
* struct iwl_beacon_filter_cmd
* REPLY_BEACON_FILTERING_CMD = 0xd2 (command)
* @id_and_color: MAC contex identifier
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h
index 1ca8e4988..acc5cd53e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h
@@ -296,7 +296,7 @@ enum iwl_rx_mpdu_status {
IWL_RX_MPDU_STATUS_OVERRUN_OK = BIT(1),
IWL_RX_MPDU_STATUS_SRC_STA_FOUND = BIT(2),
IWL_RX_MPDU_STATUS_KEY_VALID = BIT(3),
- IWL_RX_MPDU_STATUS_KEY_ERROR = BIT(4),
+ IWL_RX_MPDU_STATUS_KEY_PARAM_OK = BIT(4),
IWL_RX_MPDU_STATUS_ICV_OK = BIT(5),
IWL_RX_MPDU_STATUS_MIC_OK = BIT(6),
IWL_RX_MPDU_RES_STATUS_TTAK_OK = BIT(7),
@@ -311,7 +311,7 @@ enum iwl_rx_mpdu_status {
IWL_RX_MPDU_STATUS_WEP_MATCH = BIT(12),
IWL_RX_MPDU_STATUS_EXT_IV_MATCH = BIT(13),
IWL_RX_MPDU_STATUS_KEY_ID_MATCH = BIT(14),
- IWL_RX_MPDU_STATUS_KEY_COLOR = BIT(15),
+ IWL_RX_MPDU_STATUS_ROBUST_MNG_FRAME = BIT(15),
};
enum iwl_rx_mpdu_hash_filter {
@@ -336,6 +336,18 @@ enum iwl_rx_mpdu_reorder_data {
IWL_RX_MPDU_REORDER_BA_OLD_SN = 0x80000000,
};
+enum iwl_rx_mpdu_phy_info {
+ IWL_RX_MPDU_PHY_AMPDU = BIT(5),
+ IWL_RX_MPDU_PHY_AMPDU_TOGGLE = BIT(6),
+ IWL_RX_MPDU_PHY_SHORT_PREAMBLE = BIT(7),
+ IWL_RX_MPDU_PHY_TSF_OVERLOAD = BIT(8),
+};
+
+enum iwl_rx_mpdu_mac_info {
+ IWL_RX_MPDU_PHY_MAC_INDEX_MASK = 0x0f,
+ IWL_RX_MPDU_PHY_PHY_INDEX_MASK = 0xf0,
+};
+
struct iwl_rx_mpdu_desc {
/* DW2 */
__le16 mpdu_len;
@@ -343,9 +355,9 @@ struct iwl_rx_mpdu_desc {
u8 mac_flags2;
/* DW3 */
u8 amsdu_info;
- __le16 reserved_for_software;
+ __le16 phy_info;
u8 mac_phy_idx;
- /* DW4 */
+ /* DW4 - carries csum data only when rpa_en == 1 */
__le16 raw_csum; /* alledgedly unreliable */
__le16 l3l4_flags;
/* DW5 */
@@ -354,17 +366,17 @@ struct iwl_rx_mpdu_desc {
u8 sta_id_flags;
/* DW6 */
__le32 reorder_data;
- /* DW7 */
+ /* DW7 - carries rss_hash only when rpa_en == 1 */
__le32 rss_hash;
- /* DW8 */
+ /* DW8 - carries filter_match only when rpa_en == 1 */
__le32 filter_match;
/* DW9 */
__le32 rate_n_flags;
/* DW10 */
- u8 energy_a, energy_b, channel, reserved;
+ u8 energy_a, energy_b, channel, mac_context;
/* DW11 */
__le32 gp2_on_air_rise;
- /* DW12 & DW13 */
+ /* DW12 & DW13 - carries TSF only TSF_OVERLOAD bit == 0 */
__le64 tsf_on_air_rise;
} __packed;
@@ -435,26 +447,26 @@ struct iwl_rxq_sync_notification {
} __packed; /* MULTI_QUEUE_DRV_SYNC_HDR_CMD_API_S_VER_1 */
/**
-* Internal message identifier
-*
-* @IWL_MVM_RXQ_EMPTY: empty sync notification
-* @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA
-*/
+ * Internal message identifier
+ *
+ * @IWL_MVM_RXQ_EMPTY: empty sync notification
+ * @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA
+ */
enum iwl_mvm_rxq_notif_type {
IWL_MVM_RXQ_EMPTY,
IWL_MVM_RXQ_NOTIF_DEL_BA,
};
/**
-* struct iwl_mvm_internal_rxq_notif - Internal representation of the data sent
-* in &iwl_rxq_sync_cmd. Should be DWORD aligned.
-* FW is agnostic to the payload, so there are no endianity requirements.
-*
-* @type: value from &iwl_mvm_rxq_notif_type
-* @sync: ctrl path is waiting for all notifications to be received
-* @cookie: internal cookie to identify old notifications
-* @data: payload
-*/
+ * struct iwl_mvm_internal_rxq_notif - Internal representation of the data sent
+ * in &iwl_rxq_sync_cmd. Should be DWORD aligned.
+ * FW is agnostic to the payload, so there are no endianity requirements.
+ *
+ * @type: value from &iwl_mvm_rxq_notif_type
+ * @sync: ctrl path is waiting for all notifications to be received
+ * @cookie: internal cookie to identify old notifications
+ * @data: payload
+ */
struct iwl_mvm_internal_rxq_notif {
u16 type;
u16 sync;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h
index 38b1d045b..d1c4fb849 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h
@@ -141,6 +141,7 @@ enum iwl_sta_flags {
* @STA_KEY_FLG_CCM: CCMP encryption algorithm
* @STA_KEY_FLG_TKIP: TKIP encryption algorithm
* @STA_KEY_FLG_EXT: extended cipher algorithm (depends on the FW support)
+ * @STA_KEY_FLG_GCMP: GCMP encryption algorithm
* @STA_KEY_FLG_CMAC: CMAC encryption algorithm
* @STA_KEY_FLG_ENC_UNKNOWN: unknown encryption algorithm
* @STA_KEY_FLG_EN_MSK: mask for encryption algorithmi value
@@ -149,6 +150,7 @@ enum iwl_sta_flags {
* @STA_KEY_FLG_KEYID_MSK: the index of the key
* @STA_KEY_NOT_VALID: key is invalid
* @STA_KEY_FLG_WEP_13BYTES: set for 13 bytes WEP key
+ * @STA_KEY_FLG_KEY_32BYTES for non-wep key set for 32 bytes key
* @STA_KEY_MULTICAST: set for multical key
* @STA_KEY_MFP: key is used for Management Frame Protection
*/
@@ -158,6 +160,7 @@ enum iwl_sta_key_flag {
STA_KEY_FLG_CCM = (2 << 0),
STA_KEY_FLG_TKIP = (3 << 0),
STA_KEY_FLG_EXT = (4 << 0),
+ STA_KEY_FLG_GCMP = (5 << 0),
STA_KEY_FLG_CMAC = (6 << 0),
STA_KEY_FLG_ENC_UNKNOWN = (7 << 0),
STA_KEY_FLG_EN_MSK = (7 << 0),
@@ -167,6 +170,7 @@ enum iwl_sta_key_flag {
STA_KEY_FLG_KEYID_MSK = (3 << STA_KEY_FLG_KEYID_POS),
STA_KEY_NOT_VALID = BIT(11),
STA_KEY_FLG_WEP_13BYTES = BIT(12),
+ STA_KEY_FLG_KEY_32BYTES = BIT(12),
STA_KEY_MULTICAST = BIT(14),
STA_KEY_MFP = BIT(15),
};
@@ -388,7 +392,6 @@ struct iwl_mvm_add_sta_cmd {
* @key_offset: key offset in key storage
* @key_flags: type %iwl_sta_key_flag
* @key: key material data
- * @key2: key material data
* @rx_secur_seq_cnt: RX security sequence counter for the key
* @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection
* @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx
@@ -397,8 +400,7 @@ struct iwl_mvm_add_sta_key_cmd {
u8 sta_id;
u8 key_offset;
__le16 key_flags;
- u8 key[16];
- u8 key2[16];
+ u8 key[32];
u8 rx_secur_seq_cnt[16];
u8 tkip_rx_tsc_byte2;
u8 reserved;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h
index 438665a54..4e638a44b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h
@@ -7,6 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -252,6 +253,20 @@ struct mvm_statistics_general_v8 {
u8 reserved[4 - (NUM_MAC_INDEX % 4)];
} __packed; /* STATISTICS_GENERAL_API_S_VER_8 */
+/**
+ * struct mvm_statistics_load - RX statistics for multi-queue devices
+ * @air_time: accumulated air time, per mac
+ * @byte_count: accumulated byte count, per mac
+ * @pkt_count: accumulated packet count, per mac
+ * @avg_energy: average RSSI, per station
+ */
+struct mvm_statistics_load {
+ __le32 air_time[NUM_MAC_INDEX];
+ __le32 byte_count[NUM_MAC_INDEX];
+ __le32 pkt_count[NUM_MAC_INDEX];
+ u8 avg_energy[IWL_MVM_STATION_COUNT];
+} __packed; /* STATISTICS_RX_MAC_STATION_S_VER_1 */
+
struct mvm_statistics_rx {
struct mvm_statistics_rx_phy ofdm;
struct mvm_statistics_rx_phy cck;
@@ -266,7 +281,6 @@ struct mvm_statistics_rx {
* while associated. To disable this behavior, set DISABLE_NOTIF flag in the
* STATISTICS_CMD (0x9c), below.
*/
-
struct iwl_notif_statistics_v10 {
__le32 flag;
struct mvm_statistics_rx rx;
@@ -274,6 +288,14 @@ struct iwl_notif_statistics_v10 {
struct mvm_statistics_general_v8 general;
} __packed; /* STATISTICS_NTFY_API_S_VER_10 */
+struct iwl_notif_statistics_v11 {
+ __le32 flag;
+ struct mvm_statistics_rx rx;
+ struct mvm_statistics_tx tx;
+ struct mvm_statistics_general_v8 general;
+ struct mvm_statistics_load load_stats;
+} __packed; /* STATISTICS_NTFY_API_S_VER_11 */
+
#define IWL_STATISTICS_FLG_CLEAR 0x1
#define IWL_STATISTICS_FLG_DISABLE_NOTIF 0x2
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
index dadcccd88..4144623e1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
@@ -137,17 +137,32 @@ enum iwl_tx_pm_timeouts {
PM_FRAME_ASSOC = 3,
};
-/*
- * TX command security control
- */
-#define TX_CMD_SEC_WEP 0x01
-#define TX_CMD_SEC_CCM 0x02
-#define TX_CMD_SEC_TKIP 0x03
-#define TX_CMD_SEC_EXT 0x04
#define TX_CMD_SEC_MSK 0x07
#define TX_CMD_SEC_WEP_KEY_IDX_POS 6
#define TX_CMD_SEC_WEP_KEY_IDX_MSK 0xc0
-#define TX_CMD_SEC_KEY128 0x08
+
+/**
+ * enum iwl_tx_cmd_sec_ctrl - bitmasks for security control in TX command
+ * @TX_CMD_SEC_WEP: WEP encryption algorithm.
+ * @TX_CMD_SEC_CCM: CCM encryption algorithm.
+ * @TX_CMD_SEC_TKIP: TKIP encryption algorithm.
+ * @TX_CMD_SEC_EXT: extended cipher algorithm.
+ * @TX_CMD_SEC_GCMP: GCMP encryption algorithm.
+ * @TX_CMD_SEC_KEY128: set for 104 bits WEP key.
+ * @TC_CMD_SEC_KEY_FROM_TABLE: for a non-WEP key, set if the key should be taken
+ * from the table instead of from the TX command.
+ * If the key is taken from the key table its index should be given by the
+ * first byte of the TX command key field.
+ */
+enum iwl_tx_cmd_sec_ctrl {
+ TX_CMD_SEC_WEP = 0x01,
+ TX_CMD_SEC_CCM = 0x02,
+ TX_CMD_SEC_TKIP = 0x03,
+ TX_CMD_SEC_EXT = 0x04,
+ TX_CMD_SEC_GCMP = 0x05,
+ TX_CMD_SEC_KEY128 = 0x08,
+ TC_CMD_SEC_KEY_FROM_TABLE = 0x08,
+};
/* TODO: how does these values are OK with only 16 bit variable??? */
/*
@@ -562,8 +577,8 @@ struct iwl_mvm_ba_notif {
u8 reserved1;
} __packed;
-/*
- * struct iwl_mac_beacon_cmd - beacon template command
+/**
+ * struct iwl_mac_beacon_cmd_v6 - beacon template command
* @tx: the tx commands associated with the beacon frame
* @template_id: currently equal to the mac context id of the coresponding
* mac.
@@ -571,13 +586,34 @@ struct iwl_mvm_ba_notif {
* @tim_size: the length of the tim IE
* @frame: the template of the beacon frame
*/
+struct iwl_mac_beacon_cmd_v6 {
+ struct iwl_tx_cmd tx;
+ __le32 template_id;
+ __le32 tim_idx;
+ __le32 tim_size;
+ struct ieee80211_hdr frame[0];
+} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_6 */
+
+/**
+ * struct iwl_mac_beacon_cmd - beacon template command with offloaded CSA
+ * @tx: the tx commands associated with the beacon frame
+ * @template_id: currently equal to the mac context id of the coresponding
+ * mac.
+ * @tim_idx: the offset of the tim IE in the beacon
+ * @tim_size: the length of the tim IE
+ * @ecsa_offset: offset to the ECSA IE if present
+ * @csa_offset: offset to the CSA IE if present
+ * @frame: the template of the beacon frame
+ */
struct iwl_mac_beacon_cmd {
struct iwl_tx_cmd tx;
__le32 template_id;
__le32 tim_idx;
__le32 tim_size;
+ __le32 ecsa_offset;
+ __le32 csa_offset;
struct ieee80211_hdr frame[0];
-} __packed;
+} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_7 */
struct iwl_beacon_notif {
struct iwl_mvm_tx_resp beacon_notify_hdr;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
index 41b80ae2d..71076f027 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
@@ -90,6 +90,7 @@ enum {
* DQA queue numbers
*
* @IWL_MVM_DQA_CMD_QUEUE: a queue reserved for sending HCMDs to the FW
+ * @IWL_MVM_DQA_AUX_QUEUE: a queue reserved for aux frames
* @IWL_MVM_DQA_P2P_DEVICE_QUEUE: a queue reserved for P2P device frames
* @IWL_MVM_DQA_GCAST_QUEUE: a queue reserved for P2P GO/SoftAP GCAST frames
* @IWL_MVM_DQA_BSS_CLIENT_QUEUE: a queue reserved for BSS activity, to ensure
@@ -108,6 +109,7 @@ enum {
*/
enum iwl_mvm_dqa_txq {
IWL_MVM_DQA_CMD_QUEUE = 0,
+ IWL_MVM_DQA_AUX_QUEUE = 1,
IWL_MVM_DQA_P2P_DEVICE_QUEUE = 2,
IWL_MVM_DQA_GCAST_QUEUE = 3,
IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4,
@@ -127,9 +129,6 @@ enum iwl_mvm_tx_fifo {
IWL_MVM_TX_FIFO_CMD = 7,
};
-#define IWL_MVM_STATION_COUNT 16
-
-#define IWL_MVM_TDLS_STA_COUNT 4
/* commands */
enum {
@@ -314,6 +313,7 @@ enum {
enum iwl_mac_conf_subcmd_ids {
LINK_QUALITY_MEASUREMENT_CMD = 0x1,
LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF = 0xFE,
+ CHANNEL_SWITCH_NOA_NOTIF = 0xFF,
};
enum iwl_phy_ops_subcmd_ids {
@@ -329,6 +329,7 @@ enum iwl_system_subcmd_ids {
};
enum iwl_data_path_subcmd_ids {
+ DQA_ENABLE_CMD = 0x0,
UPDATE_MU_GROUPS_CMD = 0x1,
TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2,
MU_GROUP_MGMT_NOTIF = 0xFE,
@@ -359,6 +360,14 @@ struct iwl_cmd_response {
};
/*
+ * struct iwl_dqa_enable_cmd
+ * @cmd_queue: the TXQ number of the command queue
+ */
+struct iwl_dqa_enable_cmd {
+ __le32 cmd_queue;
+} __packed; /* DQA_CONTROL_CMD_API_S_VER_1 */
+
+/*
* struct iwl_tx_ant_cfg_cmd
* @valid: valid antenna configuration
*/
@@ -732,7 +741,7 @@ enum iwl_time_event_type {
/* P2P GO Events */
TE_P2P_GO_ASSOC_PROT,
- TE_P2P_GO_REPETITIVE_NOA,
+ TE_P2P_GO_REPETITIVET_NOA,
TE_P2P_GO_CT_WINDOW,
/* WiDi Sync Events */
@@ -2111,4 +2120,13 @@ struct iwl_link_qual_msrmnt_notif {
__le32 reserved[3];
} __packed; /* LQM_MEASUREMENT_COMPLETE_NTF_API_S_VER1 */
+/**
+ * Channel switch NOA notification
+ *
+ * @id_and_color: ID and color of the MAC
+ */
+struct iwl_channel_switch_noa_notif {
+ __le32 id_and_color;
+} __packed; /* CHANNEL_SWITCH_START_NTFY_API_S_VER_1 */
+
#endif /* __fw_api_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
index e1b6b2c66..46b52bf70 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
@@ -288,7 +288,8 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
fifo_hdr->fifo_num = cpu_to_le32(i);
/* Mark the number of TXF we're pulling now */
- iwl_trans_write_prph(mvm->trans, TXF_CPU2_NUM, i);
+ iwl_trans_write_prph(mvm->trans, TXF_CPU2_NUM, i +
+ ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size));
fifo_hdr->available_bytes =
cpu_to_le32(iwl_trans_read_prph(mvm->trans,
@@ -959,5 +960,6 @@ int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id)
}
mvm->fw_dbg_conf = conf_id;
- return ret;
+
+ return 0;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h
index f7dff7612..e9f1be9da 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h
@@ -105,7 +105,8 @@ iwl_fw_dbg_trigger_vif_match(struct iwl_fw_dbg_trigger_tlv *trig,
{
u32 trig_vif = le32_to_cpu(trig->vif_type);
- return trig_vif == IWL_FW_DBG_CONF_VIF_ANY || vif->type == trig_vif;
+ return trig_vif == IWL_FW_DBG_CONF_VIF_ANY ||
+ ieee80211_vif_type_p2p(vif) == trig_vif;
}
static inline bool
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 7057f35cb..7e0cdbf8b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -65,6 +65,7 @@
*****************************************************************************/
#include <net/mac80211.h>
#include <linux/netdevice.h>
+#include <linux/acpi.h>
#include "iwl-trans.h"
#include "iwl-op-mode.h"
@@ -122,6 +123,9 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
};
+ if (mvm->trans->num_rx_queues == 1)
+ return 0;
+
/* Do not direct RSS traffic to Q 0 which is our fallback queue */
for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
cmd.indirection_table[i] =
@@ -131,6 +135,23 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
}
+static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm)
+{
+ struct iwl_dqa_enable_cmd dqa_cmd = {
+ .cmd_queue = cpu_to_le32(IWL_MVM_DQA_CMD_QUEUE),
+ };
+ u32 cmd_id = iwl_cmd_id(DQA_ENABLE_CMD, DATA_PATH_GROUP, 0);
+ int ret;
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to send DQA enabling command: %d\n", ret);
+ else
+ IWL_DEBUG_FW(mvm, "Working in DQA mode\n");
+
+ return ret;
+}
+
void iwl_free_fw_paging(struct iwl_mvm *mvm)
{
int i;
@@ -139,17 +160,21 @@ void iwl_free_fw_paging(struct iwl_mvm *mvm)
return;
for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) {
- if (!mvm->fw_paging_db[i].fw_paging_block) {
+ struct iwl_fw_paging *paging = &mvm->fw_paging_db[i];
+
+ if (!paging->fw_paging_block) {
IWL_DEBUG_FW(mvm,
"Paging: block %d already freed, continue to next page\n",
i);
continue;
}
+ dma_unmap_page(mvm->trans->dev, paging->fw_paging_phys,
+ paging->fw_paging_size, DMA_BIDIRECTIONAL);
- __free_pages(mvm->fw_paging_db[i].fw_paging_block,
- get_order(mvm->fw_paging_db[i].fw_paging_size));
- mvm->fw_paging_db[i].fw_paging_block = NULL;
+ __free_pages(paging->fw_paging_block,
+ get_order(paging->fw_paging_size));
+ paging->fw_paging_block = NULL;
}
kfree(mvm->trans->paging_download_buf);
mvm->trans->paging_download_buf = NULL;
@@ -882,6 +907,177 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
sizeof(cmd), &cmd);
}
+#define ACPI_WRDS_METHOD "WRDS"
+#define ACPI_WRDS_WIFI (0x07)
+#define ACPI_WRDS_TABLE_SIZE 10
+
+struct iwl_mvm_sar_table {
+ bool enabled;
+ u8 values[ACPI_WRDS_TABLE_SIZE];
+};
+
+#ifdef CONFIG_ACPI
+static int iwl_mvm_sar_get_wrds(struct iwl_mvm *mvm, union acpi_object *wrds,
+ struct iwl_mvm_sar_table *sar_table)
+{
+ union acpi_object *data_pkg;
+ u32 i;
+
+ /* We need at least two packages, one for the revision and one
+ * for the data itself. Also check that the revision is valid
+ * (i.e. it is an integer set to 0).
+ */
+ if (wrds->type != ACPI_TYPE_PACKAGE ||
+ wrds->package.count < 2 ||
+ wrds->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ wrds->package.elements[0].integer.value != 0) {
+ IWL_DEBUG_RADIO(mvm, "Unsupported wrds structure\n");
+ return -EINVAL;
+ }
+
+ /* loop through all the packages to find the one for WiFi */
+ for (i = 1; i < wrds->package.count; i++) {
+ union acpi_object *domain;
+
+ data_pkg = &wrds->package.elements[i];
+
+ /* Skip anything that is not a package with the right
+ * amount of elements (i.e. domain_type,
+ * enabled/disabled plus the sar table size.
+ */
+ if (data_pkg->type != ACPI_TYPE_PACKAGE ||
+ data_pkg->package.count != ACPI_WRDS_TABLE_SIZE + 2)
+ continue;
+
+ domain = &data_pkg->package.elements[0];
+ if (domain->type == ACPI_TYPE_INTEGER &&
+ domain->integer.value == ACPI_WRDS_WIFI)
+ break;
+
+ data_pkg = NULL;
+ }
+
+ if (!data_pkg)
+ return -ENOENT;
+
+ if (data_pkg->package.elements[1].type != ACPI_TYPE_INTEGER)
+ return -EINVAL;
+
+ sar_table->enabled = !!(data_pkg->package.elements[1].integer.value);
+
+ for (i = 0; i < ACPI_WRDS_TABLE_SIZE; i++) {
+ union acpi_object *entry;
+
+ entry = &data_pkg->package.elements[i + 2];
+ if ((entry->type != ACPI_TYPE_INTEGER) ||
+ (entry->integer.value > U8_MAX))
+ return -EINVAL;
+
+ sar_table->values[i] = entry->integer.value;
+ }
+
+ return 0;
+}
+
+static int iwl_mvm_sar_get_table(struct iwl_mvm *mvm,
+ struct iwl_mvm_sar_table *sar_table)
+{
+ acpi_handle root_handle;
+ acpi_handle handle;
+ struct acpi_buffer wrds = {ACPI_ALLOCATE_BUFFER, NULL};
+ acpi_status status;
+ int ret;
+
+ root_handle = ACPI_HANDLE(mvm->dev);
+ if (!root_handle) {
+ IWL_DEBUG_RADIO(mvm,
+ "Could not retrieve root port ACPI handle\n");
+ return -ENOENT;
+ }
+
+ /* Get the method's handle */
+ status = acpi_get_handle(root_handle, (acpi_string)ACPI_WRDS_METHOD,
+ &handle);
+ if (ACPI_FAILURE(status)) {
+ IWL_DEBUG_RADIO(mvm, "WRDS method not found\n");
+ return -ENOENT;
+ }
+
+ /* Call WRDS with no arguments */
+ status = acpi_evaluate_object(handle, NULL, NULL, &wrds);
+ if (ACPI_FAILURE(status)) {
+ IWL_DEBUG_RADIO(mvm, "WRDS invocation failed (0x%x)\n", status);
+ return -ENOENT;
+ }
+
+ ret = iwl_mvm_sar_get_wrds(mvm, wrds.pointer, sar_table);
+ kfree(wrds.pointer);
+
+ return ret;
+}
+#else /* CONFIG_ACPI */
+static int iwl_mvm_sar_get_table(struct iwl_mvm *mvm,
+ struct iwl_mvm_sar_table *sar_table)
+{
+ return -ENOENT;
+}
+#endif /* CONFIG_ACPI */
+
+static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
+{
+ struct iwl_mvm_sar_table sar_table;
+ struct iwl_dev_tx_power_cmd cmd = {
+ .v3.v2.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
+ };
+ int ret, i, j, idx;
+ int len = sizeof(cmd);
+
+ /* we can't do anything with the table if the FW doesn't support it */
+ if (!fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_TX_POWER_CHAIN)) {
+ IWL_DEBUG_RADIO(mvm,
+ "FW doesn't support per-chain TX power settings.\n");
+ return 0;
+ }
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
+ len = sizeof(cmd.v3);
+
+ ret = iwl_mvm_sar_get_table(mvm, &sar_table);
+ if (ret < 0) {
+ IWL_DEBUG_RADIO(mvm,
+ "SAR BIOS table invalid or unavailable. (%d)\n",
+ ret);
+ /* we don't fail if the table is not available */
+ return 0;
+ }
+
+ if (!sar_table.enabled)
+ return 0;
+
+ IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
+
+ BUILD_BUG_ON(IWL_NUM_CHAIN_LIMITS * IWL_NUM_SUB_BANDS !=
+ ACPI_WRDS_TABLE_SIZE);
+
+ for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
+ IWL_DEBUG_RADIO(mvm, " Chain[%d]:\n", i);
+ for (j = 0; j < IWL_NUM_SUB_BANDS; j++) {
+ idx = (i * IWL_NUM_SUB_BANDS) + j;
+ cmd.v3.per_chain_restriction[i][j] =
+ cpu_to_le16(sar_table.values[idx]);
+ IWL_DEBUG_RADIO(mvm, " Band[%d] = %d * .125dBm\n",
+ j, sar_table.values[idx]);
+ }
+ }
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
+ if (ret)
+ IWL_ERR(mvm, "failed to set per-chain TX power: %d\n", ret);
+
+ return ret;
+}
+
int iwl_mvm_up(struct iwl_mvm *mvm)
{
int ret, i;
@@ -976,6 +1172,15 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
/* reset quota debouncing buffer - 0xff will yield invalid data */
memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd));
+ /* Enable DQA-mode if required */
+ if (iwl_mvm_is_dqa_supported(mvm)) {
+ ret = iwl_mvm_send_dqa_cmd(mvm);
+ if (ret)
+ goto error;
+ } else {
+ IWL_DEBUG_FW(mvm, "Working in non-DQA mode\n");
+ }
+
/* Add auxiliary station for scanning */
ret = iwl_mvm_add_aux_sta(mvm);
if (ret)
@@ -1048,6 +1253,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
+ ret = iwl_mvm_sar_init(mvm);
+ if (ret)
+ goto error;
+
IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
return 0;
error:
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 7aae068c0..69c42ce45 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -1006,7 +1006,7 @@ static int iwl_mvm_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm,
}
static void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm,
- struct iwl_mac_beacon_cmd *beacon_cmd,
+ struct iwl_mac_beacon_cmd_v6 *beacon_cmd,
u8 *beacon, u32 frame_size)
{
u32 tim_idx;
@@ -1030,6 +1030,23 @@ static void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm,
}
}
+static u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size)
+{
+ struct ieee80211_mgmt *mgmt = (void *)beacon;
+ const u8 *ie;
+
+ if (WARN_ON_ONCE(frame_size <= (mgmt->u.beacon.variable - beacon)))
+ return 0;
+
+ frame_size -= mgmt->u.beacon.variable - beacon;
+
+ ie = cfg80211_find_ie(eid, mgmt->u.beacon.variable, frame_size);
+ if (!ie)
+ return 0;
+
+ return ie - beacon;
+}
+
static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct sk_buff *beacon)
@@ -1039,7 +1056,10 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
.id = BEACON_TEMPLATE_CMD,
.flags = CMD_ASYNC,
};
- struct iwl_mac_beacon_cmd beacon_cmd = {};
+ union {
+ struct iwl_mac_beacon_cmd_v6 beacon_cmd_v6;
+ struct iwl_mac_beacon_cmd beacon_cmd;
+ } u = {};
struct ieee80211_tx_info *info;
u32 beacon_skb_len;
u32 rate, tx_flags;
@@ -1051,18 +1071,18 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
/* TODO: for now the beacon template id is set to be the mac context id.
* Might be better to handle it as another resource ... */
- beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id);
+ u.beacon_cmd_v6.template_id = cpu_to_le32((u32)mvmvif->id);
info = IEEE80211_SKB_CB(beacon);
/* Set up TX command fields */
- beacon_cmd.tx.len = cpu_to_le16((u16)beacon_skb_len);
- beacon_cmd.tx.sta_id = mvmvif->bcast_sta.sta_id;
- beacon_cmd.tx.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
+ u.beacon_cmd_v6.tx.len = cpu_to_le16((u16)beacon_skb_len);
+ u.beacon_cmd_v6.tx.sta_id = mvmvif->bcast_sta.sta_id;
+ u.beacon_cmd_v6.tx.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
tx_flags = TX_CMD_FLG_SEQ_CTL | TX_CMD_FLG_TSF;
tx_flags |=
iwl_mvm_bt_coex_tx_prio(mvm, (void *)beacon->data, info, 0) <<
TX_CMD_FLG_BT_PRIO_POS;
- beacon_cmd.tx.tx_flags = cpu_to_le32(tx_flags);
+ u.beacon_cmd_v6.tx.tx_flags = cpu_to_le32(tx_flags);
if (!fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION)) {
@@ -1071,7 +1091,7 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
mvm->mgmt_last_antenna_idx);
}
- beacon_cmd.tx.rate_n_flags =
+ u.beacon_cmd_v6.tx.rate_n_flags =
cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) <<
RATE_MCS_ANT_POS);
@@ -1079,20 +1099,37 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
rate = IWL_FIRST_OFDM_RATE;
} else {
rate = IWL_FIRST_CCK_RATE;
- beacon_cmd.tx.rate_n_flags |= cpu_to_le32(RATE_MCS_CCK_MSK);
+ u.beacon_cmd_v6.tx.rate_n_flags |=
+ cpu_to_le32(RATE_MCS_CCK_MSK);
}
- beacon_cmd.tx.rate_n_flags |=
+ u.beacon_cmd_v6.tx.rate_n_flags |=
cpu_to_le32(iwl_mvm_mac80211_idx_to_hwrate(rate));
/* Set up TX beacon command fields */
if (vif->type == NL80211_IFTYPE_AP)
- iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd,
+ iwl_mvm_mac_ctxt_set_tim(mvm, &u.beacon_cmd_v6,
beacon->data,
beacon_skb_len);
/* Submit command */
- cmd.len[0] = sizeof(beacon_cmd);
- cmd.data[0] = &beacon_cmd;
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD)) {
+ u.beacon_cmd.csa_offset =
+ cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data,
+ WLAN_EID_CHANNEL_SWITCH,
+ beacon_skb_len));
+ u.beacon_cmd.ecsa_offset =
+ cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data,
+ WLAN_EID_EXT_CHANSWITCH_ANN,
+ beacon_skb_len));
+
+ cmd.len[0] = sizeof(u.beacon_cmd);
+ } else {
+ cmd.len[0] = sizeof(u.beacon_cmd_v6);
+ }
+
+ cmd.data[0] = &u;
cmd.dataflags[0] = 0;
cmd.len[1] = beacon_skb_len;
cmd.data[1] = beacon->data;
@@ -1538,3 +1575,48 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
/* pass it as regular rx to mac80211 */
ieee80211_rx_napi(mvm->hw, NULL, skb, NULL);
}
+
+void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_channel_switch_noa_notif *notif = (void *)pkt->data;
+ struct ieee80211_vif *csa_vif;
+ struct iwl_mvm_vif *mvmvif;
+ int len = iwl_rx_packet_payload_len(pkt);
+ u32 id_n_color;
+
+ if (WARN_ON_ONCE(len < sizeof(*notif)))
+ return;
+
+ rcu_read_lock();
+
+ csa_vif = rcu_dereference(mvm->csa_vif);
+ if (WARN_ON(!csa_vif || !csa_vif->csa_active))
+ goto out_unlock;
+
+ id_n_color = le32_to_cpu(notif->id_and_color);
+
+ mvmvif = iwl_mvm_vif_from_mac80211(csa_vif);
+ if (WARN(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color) != id_n_color,
+ "channel switch noa notification on unexpected vif (csa_vif=%d, notif=%d)",
+ FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color), id_n_color))
+ goto out_unlock;
+
+ IWL_DEBUG_INFO(mvm, "Channel Switch Started Notification\n");
+
+ queue_delayed_work(system_wq, &mvm->cs_tx_unblock_dwork,
+ msecs_to_jiffies(IWL_MVM_CS_UNBLOCK_TX_TIMEOUT *
+ csa_vif->bss_conf.beacon_int));
+
+ ieee80211_csa_finish(csa_vif);
+
+ rcu_read_unlock();
+
+ RCU_INIT_POINTER(mvm->csa_vif, NULL);
+
+ return;
+
+out_unlock:
+ rcu_read_unlock();
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 18a8474b5..5dd77e336 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -465,11 +465,20 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
- BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 2);
+ BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 4);
memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers));
hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers);
hw->wiphy->cipher_suites = mvm->ciphers;
+ if (iwl_mvm_has_new_rx_api(mvm)) {
+ mvm->ciphers[hw->wiphy->n_cipher_suites] =
+ WLAN_CIPHER_SUITE_GCMP;
+ hw->wiphy->n_cipher_suites++;
+ mvm->ciphers[hw->wiphy->n_cipher_suites] =
+ WLAN_CIPHER_SUITE_GCMP_256;
+ hw->wiphy->n_cipher_suites++;
+ }
+
/*
* Enable 11w if advertised by firmware and software crypto
* is not enabled (as the firmware will interpret some mgmt
@@ -485,10 +494,23 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
/* currently FW API supports only one optional cipher scheme */
if (mvm->fw->cs[0].cipher) {
+ const struct iwl_fw_cipher_scheme *fwcs = &mvm->fw->cs[0];
+ struct ieee80211_cipher_scheme *cs = &mvm->cs[0];
+
mvm->hw->n_cipher_schemes = 1;
- mvm->hw->cipher_schemes = &mvm->fw->cs[0];
- mvm->ciphers[hw->wiphy->n_cipher_suites] =
- mvm->fw->cs[0].cipher;
+
+ cs->cipher = le32_to_cpu(fwcs->cipher);
+ cs->iftype = BIT(NL80211_IFTYPE_STATION);
+ cs->hdr_len = fwcs->hdr_len;
+ cs->pn_len = fwcs->pn_len;
+ cs->pn_off = fwcs->pn_off;
+ cs->key_idx_off = fwcs->key_idx_off;
+ cs->key_idx_mask = fwcs->key_idx_mask;
+ cs->key_idx_shift = fwcs->key_idx_shift;
+ cs->mic_len = fwcs->mic_len;
+
+ mvm->hw->cipher_schemes = mvm->cs;
+ mvm->ciphers[hw->wiphy->n_cipher_suites] = cs->cipher;
hw->wiphy->n_cipher_suites++;
}
@@ -602,6 +624,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
NL80211_FEATURE_LOW_PRIORITY_SCAN |
NL80211_FEATURE_P2P_GO_OPPPS |
+ NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
NL80211_FEATURE_DYNAMIC_SMPS |
NL80211_FEATURE_STATIC_SMPS |
NL80211_FEATURE_SUPPORTS_WMM_ADMISSION;
@@ -1011,11 +1034,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames));
memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained));
memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
- memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old));
memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
- memset(&mvm->last_bt_ci_cmd_old, 0, sizeof(mvm->last_bt_ci_cmd_old));
- memset(&mvm->bt_ack_kill_msk, 0, sizeof(mvm->bt_ack_kill_msk));
- memset(&mvm->bt_cts_kill_msk, 0, sizeof(mvm->bt_cts_kill_msk));
ieee80211_wake_queues(mvm->hw);
@@ -1199,6 +1218,8 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
flush_work(&mvm->async_handlers_wk);
flush_work(&mvm->add_stream_wk);
cancel_delayed_work_sync(&mvm->fw_dump_wk);
+ cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork);
+ cancel_delayed_work_sync(&mvm->scan_timeout_dwork);
iwl_mvm_free_fw_dump_desc(mvm);
mutex_lock(&mvm->mutex);
@@ -1230,18 +1251,20 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
s16 tx_power)
{
struct iwl_dev_tx_power_cmd cmd = {
- .v2.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
- .v2.mac_context_id =
+ .v3.v2.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
+ .v3.v2.mac_context_id =
cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
- .v2.pwr_restriction = cpu_to_le16(8 * tx_power),
+ .v3.v2.pwr_restriction = cpu_to_le16(8 * tx_power),
};
int len = sizeof(cmd);
if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
- cmd.v2.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
+ cmd.v3.v2.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
+ if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
+ len = sizeof(cmd.v3);
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TX_POWER_CHAIN))
- len = sizeof(cmd.v2);
+ len = sizeof(cmd.v3.v2);
return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
}
@@ -2360,7 +2383,7 @@ static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT))
return;
- if (vif->p2p && !iwl_mvm_is_p2p_standalone_uapsd_supported(mvm)) {
+ if (vif->p2p && !iwl_mvm_is_p2p_scm_uapsd_supported(mvm)) {
vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
return;
}
@@ -2719,6 +2742,8 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
break;
case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
@@ -2780,7 +2805,8 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
sta && iwl_mvm_has_new_rx_api(mvm) &&
key->flags & IEEE80211_KEY_FLAG_PAIRWISE &&
(key->cipher == WLAN_CIPHER_SUITE_CCMP ||
- key->cipher == WLAN_CIPHER_SUITE_GCMP)) {
+ key->cipher == WLAN_CIPHER_SUITE_GCMP ||
+ key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) {
struct ieee80211_key_seq seq;
int tid, q;
@@ -2834,7 +2860,8 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
if (sta && iwl_mvm_has_new_rx_api(mvm) &&
key->flags & IEEE80211_KEY_FLAG_PAIRWISE &&
(key->cipher == WLAN_CIPHER_SUITE_CCMP ||
- key->cipher == WLAN_CIPHER_SUITE_GCMP)) {
+ key->cipher == WLAN_CIPHER_SUITE_GCMP ||
+ key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) {
mvmsta = iwl_mvm_sta_from_mac80211(sta);
ptk_pn = rcu_dereference_protected(
mvmsta->ptk_pn[keyidx],
@@ -3687,6 +3714,13 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
goto out_unlock;
}
+ /* we still didn't unblock tx. prevent new CS meanwhile */
+ if (rcu_dereference_protected(mvm->csa_tx_blocked_vif,
+ lockdep_is_held(&mvm->mutex))) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
rcu_assign_pointer(mvm->csa_vif, vif);
if (WARN_ONCE(mvmvif->csa_countdown,
@@ -3695,6 +3729,8 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
goto out_unlock;
}
+ mvmvif->csa_target_freq = chsw->chandef.chan->center_freq;
+
break;
case NL80211_IFTYPE_STATION:
if (mvmvif->lqm_active)
@@ -3898,6 +3934,11 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ if (mvmsta->avg_energy) {
+ sinfo->signal_avg = mvmsta->avg_energy;
+ sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG);
+ }
+
if (!fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index ffbd41dcc..6a615bb73 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -452,6 +452,7 @@ struct iwl_mvm_vif {
/* Indicates that CSA countdown may be started */
bool csa_countdown;
bool csa_failed;
+ u16 csa_target_freq;
/* TCP Checksum Offload */
netdev_features_t features;
@@ -466,6 +467,8 @@ struct iwl_mvm_vif {
static inline struct iwl_mvm_vif *
iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif)
{
+ if (!vif)
+ return NULL;
return (void *)vif->drv_priv;
}
@@ -686,13 +689,28 @@ struct iwl_mvm_baid_data {
* This is the state of a queue that has been fully configured (including
* SCD pointers, etc), has a specific RA/TID assigned to it, and can be
* used to send traffic.
+ * @IWL_MVM_QUEUE_SHARED: queue is shared, or in a process of becoming shared
+ * This is a state in which a single queue serves more than one TID, all of
+ * which are not aggregated. Note that the queue is only associated to one
+ * RA.
+ * @IWL_MVM_QUEUE_INACTIVE: queue is allocated but no traffic on it
+ * This is a state of a queue that has had traffic on it, but during the
+ * last %IWL_MVM_DQA_QUEUE_TIMEOUT time period there has been no traffic on
+ * it. In this state, when a new queue is needed to be allocated but no
+ * such free queue exists, an inactive queue might be freed and given to
+ * the new RA/TID.
*/
enum iwl_mvm_queue_status {
IWL_MVM_QUEUE_FREE,
IWL_MVM_QUEUE_RESERVED,
IWL_MVM_QUEUE_READY,
+ IWL_MVM_QUEUE_SHARED,
+ IWL_MVM_QUEUE_INACTIVE,
};
+#define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ)
+#define IWL_MVM_NUM_CIPHERS 8
+
struct iwl_mvm {
/* for logger access */
struct device *dev;
@@ -731,6 +749,7 @@ struct iwl_mvm {
struct iwl_sf_region sf_space;
u32 ampdu_ref;
+ bool ampdu_toggle;
struct iwl_notif_wait_data notif_wait;
@@ -748,11 +767,16 @@ struct iwl_mvm {
u32 hw_queue_to_mac80211;
u8 hw_queue_refcount;
u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
+ bool reserved; /* Is this the TXQ reserved for a STA */
+ u8 mac80211_ac; /* The mac80211 AC this queue is mapped to */
u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
+ /* Timestamp for inactivation per TID of this queue */
+ unsigned long last_frame_time[IWL_MAX_TID_COUNT + 1];
enum iwl_mvm_queue_status status;
} queue_info[IWL_MAX_HW_QUEUES];
spinlock_t queue_info_lock; /* For syncing queue mgmt operations */
struct work_struct add_stream_wk; /* To add streams to queues */
+
atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES];
const char *nvm_file_name;
@@ -787,7 +811,7 @@ struct iwl_mvm {
struct iwl_mcast_filter_cmd *mcast_filter_cmd;
enum iwl_mvm_scan_type scan_type;
enum iwl_mvm_sched_scan_pass_all_states sched_scan_pass_all;
- struct timer_list scan_timer;
+ struct delayed_work scan_timeout_dwork;
/* max number of simultaneous scans the FW supports */
unsigned int max_scans;
@@ -910,11 +934,6 @@ struct iwl_mvm {
wait_queue_head_t d0i3_exit_waitq;
/* BT-Coex */
- u8 bt_ack_kill_msk[NUM_PHY_CTX];
- u8 bt_cts_kill_msk[NUM_PHY_CTX];
-
- struct iwl_bt_coex_profile_notif_old last_bt_notif_old;
- struct iwl_bt_coex_ci_cmd_old last_bt_ci_cmd_old;
struct iwl_bt_coex_profile_notif last_bt_notif;
struct iwl_bt_coex_ci_cmd last_bt_ci_cmd;
@@ -994,7 +1013,8 @@ struct iwl_mvm {
struct iwl_mvm_shared_mem_cfg shared_mem_cfg;
- u32 ciphers[6];
+ u32 ciphers[IWL_MVM_NUM_CIPHERS];
+ struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS];
struct iwl_mvm_tof_data tof_data;
struct ieee80211_vif *nan_vif;
@@ -1006,6 +1026,8 @@ struct iwl_mvm {
* clients.
*/
bool drop_bcn_ap_mode;
+
+ struct delayed_work cs_tx_unblock_dwork;
};
/* Extract MVM priv from op_mode and _hw */
@@ -1158,10 +1180,10 @@ static inline bool iwl_mvm_is_mplut_supported(struct iwl_mvm *mvm)
}
static inline
-bool iwl_mvm_is_p2p_standalone_uapsd_supported(struct iwl_mvm *mvm)
+bool iwl_mvm_is_p2p_scm_uapsd_supported(struct iwl_mvm *mvm)
{
return fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD) &&
+ IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD) &&
!(iwlwifi_mod_params.uapsd_disable &
IWL_DISABLE_UAPSD_P2P_CLIENT);
}
@@ -1321,7 +1343,6 @@ bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb);
-void iwl_mvm_rx_phy_cmd_mq(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue);
void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
@@ -1381,6 +1402,8 @@ void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
struct ieee80211_vif *exclude_vif);
+void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
/* Bindings */
int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
@@ -1397,7 +1420,7 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm);
int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify);
int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm);
void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm);
-void iwl_mvm_scan_timeout(unsigned long data);
+void iwl_mvm_scan_timeout_wk(struct work_struct *work);
/* Scheduled scan */
void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
@@ -1613,7 +1636,7 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
*/
void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
u8 tid, u8 flags);
-int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 minq, u8 maxq);
+int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq);
/* Return a bitmask with all the hw supported queues, except for the
* command queue, which can't be flushed.
@@ -1720,6 +1743,8 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
void iwl_mvm_reorder_timer_expired(unsigned long data);
struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
+void iwl_mvm_inactivity_check(struct iwl_mvm *mvm);
+
void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error);
unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index 9a8cf2f41..fcfb7aac6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -66,7 +66,6 @@
*****************************************************************************/
#include <linux/firmware.h>
#include <linux/rtnetlink.h>
-#include <linux/pci.h>
#include <linux/acpi.h>
#include "iwl-trans.h"
#include "iwl-csr.h"
@@ -667,8 +666,7 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
.mcc = cpu_to_le16(alpha2[0] << 8 | alpha2[1]),
.source_id = (u8)src_id,
};
- struct iwl_mcc_update_resp *mcc_resp, *resp_cp = NULL;
- struct iwl_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
+ struct iwl_mcc_update_resp *resp_cp;
struct iwl_rx_packet *pkt;
struct iwl_host_cmd cmd = {
.id = MCC_UPDATE_CMD,
@@ -701,34 +699,36 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
/* Extract MCC response */
if (resp_v2) {
- mcc_resp = (void *)pkt->data;
+ struct iwl_mcc_update_resp *mcc_resp = (void *)pkt->data;
+
n_channels = __le32_to_cpu(mcc_resp->n_channels);
+ resp_len = sizeof(struct iwl_mcc_update_resp) +
+ n_channels * sizeof(__le32);
+ resp_cp = kmemdup(mcc_resp, resp_len, GFP_KERNEL);
} else {
- mcc_resp_v1 = (void *)pkt->data;
+ struct iwl_mcc_update_resp_v1 *mcc_resp_v1 = (void *)pkt->data;
+
n_channels = __le32_to_cpu(mcc_resp_v1->n_channels);
+ resp_len = sizeof(struct iwl_mcc_update_resp) +
+ n_channels * sizeof(__le32);
+ resp_cp = kzalloc(resp_len, GFP_KERNEL);
+
+ if (resp_cp) {
+ resp_cp->status = mcc_resp_v1->status;
+ resp_cp->mcc = mcc_resp_v1->mcc;
+ resp_cp->cap = mcc_resp_v1->cap;
+ resp_cp->source_id = mcc_resp_v1->source_id;
+ resp_cp->n_channels = mcc_resp_v1->n_channels;
+ memcpy(resp_cp->channels, mcc_resp_v1->channels,
+ n_channels * sizeof(__le32));
+ }
}
- resp_len = sizeof(struct iwl_mcc_update_resp) + n_channels *
- sizeof(__le32);
-
- resp_cp = kzalloc(resp_len, GFP_KERNEL);
if (!resp_cp) {
ret = -ENOMEM;
goto exit;
}
- if (resp_v2) {
- memcpy(resp_cp, mcc_resp, resp_len);
- } else {
- resp_cp->status = mcc_resp_v1->status;
- resp_cp->mcc = mcc_resp_v1->mcc;
- resp_cp->cap = mcc_resp_v1->cap;
- resp_cp->source_id = mcc_resp_v1->source_id;
- resp_cp->n_channels = mcc_resp_v1->n_channels;
- memcpy(resp_cp->channels, mcc_resp_v1->channels,
- n_channels * sizeof(__le32));
- }
-
status = le32_to_cpu(resp_cp->status);
mcc = le16_to_cpu(resp_cp->mcc);
@@ -802,9 +802,8 @@ static int iwl_mvm_get_bios_mcc(struct iwl_mvm *mvm, char *mcc)
struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL};
acpi_status status;
u32 mcc_val;
- struct pci_dev *pdev = to_pci_dev(mvm->dev);
- root_handle = ACPI_HANDLE(&pdev->dev);
+ root_handle = ACPI_HANDLE(mvm->dev);
if (!root_handle) {
IWL_DEBUG_LAR(mvm,
"Could not retrieve root port ACPI handle\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index a68054f12..55d9096da 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -431,6 +431,7 @@ static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = {
HCMD_NAME(LINK_QUALITY_MEASUREMENT_CMD),
HCMD_NAME(LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF),
+ HCMD_NAME(CHANNEL_SWITCH_NOA_NOTIF),
};
/* Please keep this array *SORTED* by hex value.
@@ -494,6 +495,29 @@ static u32 calc_min_backoff(struct iwl_trans *trans, const struct iwl_cfg *cfg)
static void iwl_mvm_fw_error_dump_wk(struct work_struct *work);
+static void iwl_mvm_tx_unblock_dwork(struct work_struct *work)
+{
+ struct iwl_mvm *mvm =
+ container_of(work, struct iwl_mvm, cs_tx_unblock_dwork.work);
+ struct ieee80211_vif *tx_blocked_vif;
+ struct iwl_mvm_vif *mvmvif;
+
+ mutex_lock(&mvm->mutex);
+
+ tx_blocked_vif =
+ rcu_dereference_protected(mvm->csa_tx_blocked_vif,
+ lockdep_is_held(&mvm->mutex));
+
+ if (!tx_blocked_vif)
+ goto unlock;
+
+ mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif);
+ iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false);
+ RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
+unlock:
+ mutex_unlock(&mvm->mutex);
+}
+
static struct iwl_op_mode *
iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
const struct iwl_fw *fw, struct dentry *dbgfs_dir)
@@ -553,18 +577,21 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->restart_fw = iwlwifi_mod_params.restart_fw ? -1 : 0;
- mvm->aux_queue = 15;
if (!iwl_mvm_is_dqa_supported(mvm)) {
- mvm->first_agg_queue = 16;
mvm->last_agg_queue = mvm->cfg->base_params->num_of_queues - 1;
+
+ if (mvm->cfg->base_params->num_of_queues == 16) {
+ mvm->aux_queue = 11;
+ mvm->first_agg_queue = 12;
+ } else {
+ mvm->aux_queue = 15;
+ mvm->first_agg_queue = 16;
+ }
} else {
+ mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE;
mvm->first_agg_queue = IWL_MVM_DQA_MIN_DATA_QUEUE;
mvm->last_agg_queue = IWL_MVM_DQA_MAX_DATA_QUEUE;
}
- if (mvm->cfg->base_params->num_of_queues == 16) {
- mvm->aux_queue = 11;
- mvm->first_agg_queue = 12;
- }
mvm->sf_state = SF_UNINIT;
mvm->cur_ucode = IWL_UCODE_INIT;
mvm->drop_bcn_ap_mode = true;
@@ -584,6 +611,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work);
INIT_DELAYED_WORK(&mvm->fw_dump_wk, iwl_mvm_fw_error_dump_wk);
INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
+ INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk);
INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
spin_lock_init(&mvm->d0i3_tx_lock);
@@ -595,6 +623,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev);
+ INIT_DELAYED_WORK(&mvm->cs_tx_unblock_dwork, iwl_mvm_tx_unblock_dwork);
+
/*
* Populate the state variables that the transport layer needs
* to know about.
@@ -603,6 +633,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
switch (iwlwifi_mod_params.amsdu_size) {
+ case IWL_AMSDU_DEF:
case IWL_AMSDU_4K:
trans_cfg.rx_buf_size = IWL_AMSDU_4K;
break;
@@ -617,6 +648,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwlwifi_mod_params.amsdu_size);
trans_cfg.rx_buf_size = IWL_AMSDU_4K;
}
+
+ /* the hardware splits the A-MSDU */
+ if (mvm->cfg->mq_rx_supported)
+ trans_cfg.rx_buf_size = IWL_AMSDU_4K;
trans_cfg.wide_cmd_header = fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_WIDE_CMD_HDR);
@@ -633,6 +668,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD;
trans_cfg.scd_set_active = true;
+ trans_cfg.cb_data_offs = offsetof(struct ieee80211_tx_info,
+ driver_data[2]);
+
trans_cfg.sdio_adma_addr = fw->sdio_adma_addr;
trans_cfg.sw_csum_tx = IWL_MVM_SW_TX_CSUM_OFFLOAD;
@@ -735,9 +773,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_mvm_tof_init(mvm);
- setup_timer(&mvm->scan_timer, iwl_mvm_scan_timeout,
- (unsigned long)mvm);
-
return op_mode;
out_unregister:
@@ -791,8 +826,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
iwl_mvm_tof_clean(mvm);
- del_timer_sync(&mvm->scan_timer);
-
mutex_destroy(&mvm->mutex);
mutex_destroy(&mvm->d0i3_suspend_mutex);
@@ -936,8 +969,6 @@ static void iwl_mvm_rx(struct iwl_op_mode *op_mode,
if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD))
iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
- else if (pkt->hdr.cmd == FRAME_RELEASE)
- iwl_mvm_rx_frame_release(mvm, napi, rxb, 0);
else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD)
iwl_mvm_rx_rx_phy_cmd(mvm, rxb);
else
@@ -953,11 +984,11 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD))
iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0);
- else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD)
- iwl_mvm_rx_phy_cmd_mq(mvm, rxb);
else if (unlikely(pkt->hdr.group_id == DATA_PATH_GROUP &&
pkt->hdr.cmd == RX_QUEUES_NOTIFICATION))
iwl_mvm_rx_queue_notif(mvm, rxb, 0);
+ else if (pkt->hdr.cmd == FRAME_RELEASE)
+ iwl_mvm_rx_frame_release(mvm, napi, rxb, 0);
else
iwl_mvm_rx_common(mvm, rxb, pkt);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
index 7b1f6ad60..ff85865b1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
@@ -308,7 +308,7 @@ static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm,
/* Allow U-APSD only if p2p is stand alone */
bool is_p2p_standalone = true;
- if (!iwl_mvm_is_p2p_standalone_uapsd_supported(mvm))
+ if (!iwl_mvm_is_p2p_scm_uapsd_supported(mvm))
return false;
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 81dd2f6a4..227c5ed9c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -211,6 +211,9 @@ static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
if (is_ht80(rate) && (vht_cap->cap &
IEEE80211_VHT_CAP_SHORT_GI_80))
return true;
+ if (is_ht160(rate) && (vht_cap->cap &
+ IEEE80211_VHT_CAP_SHORT_GI_160))
+ return true;
return false;
}
@@ -399,7 +402,7 @@ static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
static void rs_rate_scale_perform(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta,
- int tid);
+ int tid, bool ndp);
static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta,
@@ -445,6 +448,13 @@ static const u16 expected_tpt_siso_80MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 241, 0, 475, 701, 921, 1343, 1741, 1931, 2117, 2468, 2691},
};
+static const u16 expected_tpt_siso_160MHz[4][IWL_RATE_COUNT] = {
+ {0, 0, 0, 0, 191, 0, 244, 288, 298, 308, 313, 318, 323, 328, 330},
+ {0, 0, 0, 0, 200, 0, 251, 293, 302, 312, 317, 322, 327, 332, 334},
+ {0, 0, 0, 0, 439, 0, 875, 1307, 1736, 2584, 3419, 3831, 4240, 5049, 5581},
+ {0, 0, 0, 0, 488, 0, 972, 1451, 1925, 2864, 3785, 4240, 4691, 5581, 6165},
+};
+
static const u16 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250, 261, 0},
{0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256, 267, 0},
@@ -466,6 +476,13 @@ static const u16 expected_tpt_mimo2_80MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 474, 0, 920, 1338, 1732, 2464, 3116, 3418, 3705, 4225, 4545},
};
+static const u16 expected_tpt_mimo2_160MHz[4][IWL_RATE_COUNT] = {
+ {0, 0, 0, 0, 240, 0, 278, 308, 313, 319, 322, 324, 328, 330, 334},
+ {0, 0, 0, 0, 247, 0, 282, 310, 315, 320, 323, 325, 329, 332, 338},
+ {0, 0, 0, 0, 875, 0, 1735, 2582, 3414, 5043, 6619, 7389, 8147, 9629, 10592},
+ {0, 0, 0, 0, 971, 0, 1925, 2861, 3779, 5574, 7304, 8147, 8976, 10592, 11640},
+};
+
/* mbps, mcs */
static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
{ "1", "BPSK DSSS"},
@@ -901,7 +918,6 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
}
}
- WARN_ON_ONCE(rate->bw == RATE_MCS_CHAN_WIDTH_160);
WARN_ON_ONCE(rate->bw == RATE_MCS_CHAN_WIDTH_80 &&
!is_vht(rate));
@@ -1161,7 +1177,7 @@ static u8 rs_get_tid(struct ieee80211_hdr *hdr)
}
void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- int tid, struct ieee80211_tx_info *info)
+ int tid, struct ieee80211_tx_info *info, bool ndp)
{
int legacy_success;
int retries;
@@ -1384,7 +1400,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
done:
/* See if there's a better rate or modulation mode to try. */
if (sta->supp_rates[info->band])
- rs_rate_scale_perform(mvm, sta, lq_sta, tid);
+ rs_rate_scale_perform(mvm, sta, lq_sta, tid, ndp);
}
/*
@@ -1407,7 +1423,8 @@ static void rs_mac80211_tx_status(void *mvm_r,
info->flags & IEEE80211_TX_CTL_NO_ACK)
return;
- iwl_mvm_rs_tx_status(mvm, sta, rs_get_tid(hdr), info);
+ iwl_mvm_rs_tx_status(mvm, sta, rs_get_tid(hdr), info,
+ ieee80211_is_qos_nullfunc(hdr->frame_control));
}
/*
@@ -1494,6 +1511,9 @@ static const u16 *rs_get_expected_tpt_table(struct iwl_lq_sta *lq_sta,
case RATE_MCS_CHAN_WIDTH_80:
ht_tbl_pointer = expected_tpt_siso_80MHz;
break;
+ case RATE_MCS_CHAN_WIDTH_160:
+ ht_tbl_pointer = expected_tpt_siso_160MHz;
+ break;
default:
WARN_ON_ONCE(1);
}
@@ -1508,6 +1528,9 @@ static const u16 *rs_get_expected_tpt_table(struct iwl_lq_sta *lq_sta,
case RATE_MCS_CHAN_WIDTH_80:
ht_tbl_pointer = expected_tpt_mimo2_80MHz;
break;
+ case RATE_MCS_CHAN_WIDTH_160:
+ ht_tbl_pointer = expected_tpt_mimo2_160MHz;
+ break;
default:
WARN_ON_ONCE(1);
}
@@ -1582,12 +1605,17 @@ static s32 rs_get_best_rate(struct iwl_mvm *mvm,
static u32 rs_bw_from_sta_bw(struct ieee80211_sta *sta)
{
- if (sta->bandwidth >= IEEE80211_STA_RX_BW_80)
+ switch (sta->bandwidth) {
+ case IEEE80211_STA_RX_BW_160:
+ return RATE_MCS_CHAN_WIDTH_160;
+ case IEEE80211_STA_RX_BW_80:
return RATE_MCS_CHAN_WIDTH_80;
- else if (sta->bandwidth >= IEEE80211_STA_RX_BW_40)
+ case IEEE80211_STA_RX_BW_40:
return RATE_MCS_CHAN_WIDTH_40;
-
- return RATE_MCS_CHAN_WIDTH_20;
+ case IEEE80211_STA_RX_BW_20:
+ default:
+ return RATE_MCS_CHAN_WIDTH_20;
+ }
}
/*
@@ -2213,7 +2241,7 @@ static bool rs_tpc_perform(struct iwl_mvm *mvm,
static void rs_rate_scale_perform(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta,
- int tid)
+ int tid, bool ndp)
{
int low = IWL_RATE_INVALID;
int high = IWL_RATE_INVALID;
@@ -2512,7 +2540,7 @@ lq_update:
(lq_sta->tx_agg_tid_en & (1 << tid)) &&
(tid != IWL_MAX_TID_COUNT)) {
tid_data = &sta_priv->tid_data[tid];
- if (tid_data->state == IWL_AGG_OFF) {
+ if (tid_data->state == IWL_AGG_OFF && !ndp) {
IWL_DEBUG_RATE(mvm,
"try to aggregate tid %d\n",
tid);
@@ -2565,6 +2593,9 @@ static const struct rs_init_rate_info rs_optimal_rates_ht[] = {
{ S8_MIN, IWL_RATE_MCS_0_INDEX},
};
+/* MCS index 9 is not valid for 20MHz VHT channel width,
+ * but is ok for 40, 80 and 160MHz channels.
+ */
static const struct rs_init_rate_info rs_optimal_rates_vht_20mhz[] = {
{ -60, IWL_RATE_MCS_8_INDEX },
{ -64, IWL_RATE_MCS_7_INDEX },
@@ -2577,7 +2608,7 @@ static const struct rs_init_rate_info rs_optimal_rates_vht_20mhz[] = {
{ S8_MIN, IWL_RATE_MCS_0_INDEX},
};
-static const struct rs_init_rate_info rs_optimal_rates_vht_40_80mhz[] = {
+static const struct rs_init_rate_info rs_optimal_rates_vht[] = {
{ -60, IWL_RATE_MCS_9_INDEX },
{ -64, IWL_RATE_MCS_8_INDEX },
{ -68, IWL_RATE_MCS_7_INDEX },
@@ -2640,9 +2671,9 @@ static void rs_init_optimal_rate(struct iwl_mvm *mvm,
lq_sta->optimal_nentries =
ARRAY_SIZE(rs_optimal_rates_vht_20mhz);
} else {
- lq_sta->optimal_rates = rs_optimal_rates_vht_40_80mhz;
+ lq_sta->optimal_rates = rs_optimal_rates_vht;
lq_sta->optimal_nentries =
- ARRAY_SIZE(rs_optimal_rates_vht_40_80mhz);
+ ARRAY_SIZE(rs_optimal_rates_vht);
}
} else if (is_ht(rate)) {
lq_sta->optimal_rates = rs_optimal_rates_ht;
@@ -2734,23 +2765,25 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm,
*/
if (sta->vht_cap.vht_supported &&
best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) {
- if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
- initial_rates = rs_optimal_rates_vht_40_80mhz;
- nentries = ARRAY_SIZE(rs_optimal_rates_vht_40_80mhz);
- if (sta->bandwidth >= IEEE80211_STA_RX_BW_80)
- rate->bw = RATE_MCS_CHAN_WIDTH_80;
- else
- rate->bw = RATE_MCS_CHAN_WIDTH_40;
- } else if (sta->bandwidth == IEEE80211_STA_RX_BW_20) {
+ switch (sta->bandwidth) {
+ case IEEE80211_STA_RX_BW_160:
+ case IEEE80211_STA_RX_BW_80:
+ case IEEE80211_STA_RX_BW_40:
+ initial_rates = rs_optimal_rates_vht;
+ nentries = ARRAY_SIZE(rs_optimal_rates_vht);
+ break;
+ case IEEE80211_STA_RX_BW_20:
initial_rates = rs_optimal_rates_vht_20mhz;
nentries = ARRAY_SIZE(rs_optimal_rates_vht_20mhz);
- rate->bw = RATE_MCS_CHAN_WIDTH_20;
- } else {
+ break;
+ default:
IWL_ERR(mvm, "Invalid BW %d\n", sta->bandwidth);
goto out;
}
+
active_rate = lq_sta->active_siso_rate;
rate->type = LQ_VHT_SISO;
+ rate->bw = rs_bw_from_sta_bw(sta);
} else if (sta->ht_cap.ht_supported &&
best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) {
initial_rates = rs_optimal_rates_ht;
@@ -3057,6 +3090,9 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg)
case RATE_MCS_CHAN_WIDTH_80:
mvm->drv_rx_stats.bw_80_frames++;
break;
+ case RATE_MCS_CHAN_WIDTH_160:
+ mvm->drv_rx_stats.bw_160_frames++;
+ break;
default:
WARN_ONCE(1, "bad BW. rate 0x%x", rate);
}
@@ -3705,7 +3741,8 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
desc += sprintf(buff + desc, " %s",
(is_ht20(rate)) ? "20MHz" :
(is_ht40(rate)) ? "40MHz" :
- (is_ht80(rate)) ? "80Mhz" : "BAD BW");
+ (is_ht80(rate)) ? "80MHz" :
+ (is_ht160(rate)) ? "160MHz" : "BAD BW");
desc += sprintf(buff + desc, " %s %s %s %s\n",
(rate->sgi) ? "SGI" : "NGI",
(rate->ldpc) ? "LDPC" : "BCC",
@@ -3787,9 +3824,10 @@ static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file,
lq_sta->active_tbl == i ? "*" : "x",
rate->type,
rate->sgi,
- is_ht20(rate) ? "20Mhz" :
- is_ht40(rate) ? "40Mhz" :
- is_ht80(rate) ? "80Mhz" : "ERR",
+ is_ht20(rate) ? "20MHz" :
+ is_ht40(rate) ? "40MHz" :
+ is_ht80(rate) ? "80MHz" :
+ is_ht160(rate) ? "160MHz" : "ERR",
rate->index);
for (j = 0; j < IWL_RATE_COUNT; j++) {
desc += sprintf(buff+desc,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
index 90d046fb2..ee207f2c0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
@@ -205,6 +205,7 @@ struct rs_rate {
#define is_ht20(rate) ((rate)->bw == RATE_MCS_CHAN_WIDTH_20)
#define is_ht40(rate) ((rate)->bw == RATE_MCS_CHAN_WIDTH_40)
#define is_ht80(rate) ((rate)->bw == RATE_MCS_CHAN_WIDTH_80)
+#define is_ht160(rate) ((rate)->bw == RATE_MCS_CHAN_WIDTH_160)
#define IWL_MAX_MCS_DISPLAY_SIZE 12
@@ -362,7 +363,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
/* Notify RS about Tx status */
void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- int tid, struct ieee80211_tx_info *info);
+ int tid, struct ieee80211_tx_info *info, bool ndp);
/**
* iwl_rate_control_register - Register the rate control algorithm callbacks
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index ab7f7eda9..0e60e38b2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -101,7 +101,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
struct napi_struct *napi,
struct sk_buff *skb,
struct ieee80211_hdr *hdr, u16 len,
- u32 ampdu_status, u8 crypt_len,
+ u8 crypt_len,
struct iwl_rx_cmd_buffer *rxb)
{
unsigned int hdrlen, fraglen;
@@ -268,7 +268,6 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
struct ieee80211_sta *sta = NULL;
struct sk_buff *skb;
u32 len;
- u32 ampdu_status;
u32 rate_n_flags;
u32 rx_pkt_status;
u8 crypt_len = 0;
@@ -354,13 +353,22 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
if (sta) {
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct ieee80211_vif *tx_blocked_vif =
+ rcu_dereference(mvm->csa_tx_blocked_vif);
/* We have tx blocked stations (with CS bit). If we heard
* frames from a blocked station on a new channel we can
* TX to it again.
*/
- if (unlikely(mvm->csa_tx_block_bcn_timeout))
- iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, false);
+ if (unlikely(tx_blocked_vif) &&
+ mvmsta->vif == tx_blocked_vif) {
+ struct iwl_mvm_vif *mvmvif =
+ iwl_mvm_vif_from_mac80211(tx_blocked_vif);
+
+ if (mvmvif->csa_target_freq == rx_status->freq)
+ iwl_mvm_sta_modify_disable_tx_ap(mvm, sta,
+ false);
+ }
rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status);
@@ -471,7 +479,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
iwl_mvm_ref(mvm, IWL_MVM_REF_RX);
iwl_mvm_pass_packet_to_mac80211(mvm, sta, napi, skb, hdr, len,
- ampdu_status, crypt_len, rxb);
+ crypt_len, rxb);
if (take_ref)
iwl_mvm_unref(mvm, IWL_MVM_REF_RX);
@@ -490,6 +498,7 @@ struct iwl_mvm_stat_data {
__le32 mac_id;
u8 beacon_filter_average_energy;
struct mvm_statistics_general_v8 *general;
+ struct mvm_statistics_load *load;
};
static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
@@ -606,13 +615,15 @@ iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt)
void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
- struct iwl_notif_statistics_v10 *stats = (void *)&pkt->data;
+ struct iwl_notif_statistics_v11 *stats = (void *)&pkt->data;
struct iwl_mvm_stat_data data = {
.mvm = mvm,
};
+ int expected_size = iwl_mvm_has_new_rx_api(mvm) ? sizeof(*stats) :
+ sizeof(struct iwl_notif_statistics_v10);
u32 temperature;
- if (iwl_rx_packet_payload_len(pkt) != sizeof(*stats))
+ if (iwl_rx_packet_payload_len(pkt) != expected_size)
goto invalid;
temperature = le32_to_cpu(stats->general.radio_temperature);
@@ -630,6 +641,25 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
le64_to_cpu(stats->general.on_time_scan);
data.general = &stats->general;
+ if (iwl_mvm_has_new_rx_api(mvm)) {
+ int i;
+
+ data.load = &stats->load_stats;
+
+ rcu_read_lock();
+ for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
+ struct iwl_mvm_sta *sta;
+
+ if (!data.load->avg_energy[i])
+ continue;
+
+ sta = iwl_mvm_sta_from_staid_rcu(mvm, i);
+ if (!sta)
+ continue;
+ sta->avg_energy = data.load->avg_energy[i];
+ }
+ rcu_read_unlock();
+ }
iwl_mvm_rx_stats_check_trigger(mvm, pkt);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 2c61516d0..df6c32caa 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -65,19 +65,6 @@
#include "fw-api.h"
#include "fw-dbg.h"
-void iwl_mvm_rx_phy_cmd_mq(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
-{
- mvm->ampdu_ref++;
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (mvm->last_phy_info.phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) {
- spin_lock(&mvm->drv_stats_lock);
- mvm->drv_rx_stats.ampdu_count++;
- spin_unlock(&mvm->drv_stats_lock);
- }
-#endif
-}
-
static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
int queue, struct ieee80211_sta *sta)
{
@@ -489,6 +476,9 @@ void iwl_mvm_reorder_timer_expired(unsigned long data)
rcu_read_lock();
sta = rcu_dereference(buf->mvm->fw_id_to_mac_id[buf->sta_id]);
/* SN is set to the last expired frame + 1 */
+ IWL_DEBUG_HT(buf->mvm,
+ "Releasing expired frames for sta %u, sn %d\n",
+ buf->sta_id, sn);
iwl_mvm_release_frames(buf->mvm, sta, NULL, buf, sn);
rcu_read_unlock();
} else if (buf->num_stored) {
@@ -587,6 +577,8 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
struct sk_buff *tail;
u32 reorder = le32_to_cpu(desc->reorder_data);
bool amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU;
+ bool last_subframe =
+ desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME;
u8 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
u8 sub_frame_idx = desc->amsdu_info &
IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
@@ -653,7 +645,8 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
/* release immediately if allowed by nssn and no stored frames */
if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) {
if (iwl_mvm_is_sn_less(buffer->head_sn, nssn,
- buffer->buf_size))
+ buffer->buf_size) &&
+ (!amsdu || last_subframe))
buffer->head_sn = nssn;
/* No need to update AMSDU last SN - we are moving the head */
spin_unlock_bh(&buffer->lock);
@@ -687,7 +680,20 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
buffer->last_sub_index = sub_frame_idx;
}
- iwl_mvm_release_frames(mvm, sta, napi, buffer, nssn);
+ /*
+ * We cannot trust NSSN for AMSDU sub-frames that are not the last.
+ * The reason is that NSSN advances on the first sub-frame, and may
+ * cause the reorder buffer to advance before all the sub-frames arrive.
+ * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
+ * SN 1. NSSN for first sub frame will be 3 with the result of driver
+ * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
+ * already ahead and it will be dropped.
+ * If the last sub-frame is not on this queue - we will get frame
+ * release notification with up to date NSSN.
+ */
+ if (!amsdu || last_subframe)
+ iwl_mvm_release_frames(mvm, sta, napi, buffer, nssn);
+
spin_unlock_bh(&buffer->lock);
return true;
@@ -736,6 +742,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct ieee80211_hdr *hdr = (void *)(pkt->data + sizeof(*desc));
u32 len = le16_to_cpu(desc->mpdu_len);
u32 rate_n_flags = le32_to_cpu(desc->rate_n_flags);
+ u16 phy_info = le16_to_cpu(desc->phy_info);
struct ieee80211_sta *sta = NULL;
struct sk_buff *skb;
u8 crypt_len = 0;
@@ -766,16 +773,34 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
le16_to_cpu(desc->status));
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
}
-
- rx_status->mactime = le64_to_cpu(desc->tsf_on_air_rise);
+ /* set the preamble flag if appropriate */
+ if (phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE)
+ rx_status->flag |= RX_FLAG_SHORTPRE;
+
+ if (likely(!(phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) {
+ rx_status->mactime = le64_to_cpu(desc->tsf_on_air_rise);
+ /* TSF as indicated by the firmware is at INA time */
+ rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
+ }
rx_status->device_timestamp = le32_to_cpu(desc->gp2_on_air_rise);
rx_status->band = desc->channel > 14 ? NL80211_BAND_5GHZ :
NL80211_BAND_2GHZ;
rx_status->freq = ieee80211_channel_to_frequency(desc->channel,
rx_status->band);
iwl_mvm_get_signal_strength(mvm, desc, rx_status);
- /* TSF as indicated by the firmware is at INA time */
- rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
+
+ /* update aggregation data for monitor sake on default queue */
+ if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
+ bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
+
+ rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
+ rx_status->ampdu_reference = mvm->ampdu_ref;
+ /* toggle is switched whenever new aggregation starts */
+ if (toggle_bit != mvm->ampdu_toggle) {
+ mvm->ampdu_ref++;
+ mvm->ampdu_toggle = toggle_bit;
+ }
+ }
rcu_read_lock();
@@ -797,6 +822,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (sta) {
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct ieee80211_vif *tx_blocked_vif =
+ rcu_dereference(mvm->csa_tx_blocked_vif);
u8 baid = (u8)((le32_to_cpu(desc->reorder_data) &
IWL_RX_MPDU_REORDER_BAID_MASK) >>
IWL_RX_MPDU_REORDER_BAID_SHIFT);
@@ -806,8 +833,15 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
* frames from a blocked station on a new channel we can
* TX to it again.
*/
- if (unlikely(mvm->csa_tx_block_bcn_timeout))
- iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, false);
+ if (unlikely(tx_blocked_vif) &&
+ tx_blocked_vif == mvmsta->vif) {
+ struct iwl_mvm_vif *mvmvif =
+ iwl_mvm_vif_from_mac80211(tx_blocked_vif);
+
+ if (mvmvif->csa_target_freq == rx_status->freq)
+ iwl_mvm_sta_modify_disable_tx_ap(mvm, sta,
+ false);
+ }
rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status);
@@ -830,8 +864,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL);
}
- /* TODO: multi queue TCM */
-
if (ieee80211_is_data(hdr->frame_control))
iwl_mvm_rx_csum(sta, skb, desc);
@@ -856,14 +888,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
iwl_mvm_agg_rx_received(mvm, baid);
}
- /*
- * TODO: PHY info.
- * Verify we don't have the information in the MPDU descriptor and
- * that it is not needed.
- * Make sure for monitor mode that we are on default queue, update
- * ampdu_ref and the rest of phy info then
- */
-
/* Set up the HT phy flags */
switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
case RATE_MCS_CHAN_WIDTH_20:
@@ -907,8 +931,18 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->band);
}
- /* TODO: PHY info - update ampdu queue statistics (for debugfs) */
- /* TODO: PHY info - gscan */
+ /* management stuff on default queue */
+ if (!queue) {
+ if (unlikely((ieee80211_is_beacon(hdr->frame_control) ||
+ ieee80211_is_probe_resp(hdr->frame_control)) &&
+ mvm->sched_scan_pass_all ==
+ SCHED_SCAN_PASS_ALL_ENABLED))
+ mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_FOUND;
+
+ if (unlikely(ieee80211_is_beacon(hdr->frame_control) ||
+ ieee80211_is_probe_resp(hdr->frame_control)))
+ rx_status->boottime_ns = ktime_get_boot_ns();
+ }
iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb);
if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc))
@@ -927,6 +961,9 @@ void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
int baid = release->baid;
+ IWL_DEBUG_HT(mvm, "Frame release notification for BAID %u, NSSN %d\n",
+ release->baid, le16_to_cpu(release->nssn));
+
if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index e78fc567f..dac120f88 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -391,15 +391,18 @@ void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
ieee80211_sched_scan_stopped(mvm->hw);
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
} else if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) {
+ struct cfg80211_scan_info info = {
+ .aborted = aborted,
+ };
+
IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s (FW)\n",
aborted ? "aborted" : "completed",
iwl_mvm_ebs_status_str(scan_notif->ebs_status));
mvm->scan_status &= ~IWL_MVM_SCAN_REGULAR;
- ieee80211_scan_completed(mvm->hw,
- scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED);
+ ieee80211_scan_completed(mvm->hw, &info);
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
- del_timer(&mvm->scan_timer);
+ cancel_delayed_work(&mvm->scan_timeout_dwork);
} else {
IWL_ERR(mvm,
"got scan complete notification but no scan is running\n");
@@ -1222,15 +1225,16 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
return -EIO;
}
-#define SCAN_TIMEOUT (20 * HZ)
+#define SCAN_TIMEOUT 20000
-void iwl_mvm_scan_timeout(unsigned long data)
+void iwl_mvm_scan_timeout_wk(struct work_struct *work)
{
- struct iwl_mvm *mvm = (struct iwl_mvm *)data;
+ struct delayed_work *delayed_work = to_delayed_work(work);
+ struct iwl_mvm *mvm = container_of(delayed_work, struct iwl_mvm,
+ scan_timeout_dwork);
IWL_ERR(mvm, "regular scan timed out\n");
- del_timer(&mvm->scan_timer);
iwl_force_nmi(mvm->trans);
}
@@ -1313,7 +1317,8 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
mvm->scan_status |= IWL_MVM_SCAN_REGULAR;
iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
- mod_timer(&mvm->scan_timer, jiffies + SCAN_TIMEOUT);
+ queue_delayed_work(system_wq, &mvm->scan_timeout_dwork,
+ msecs_to_jiffies(SCAN_TIMEOUT));
return 0;
}
@@ -1430,9 +1435,13 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
/* if the scan is already stopping, we don't need to notify mac80211 */
if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) {
- ieee80211_scan_completed(mvm->hw, aborted);
+ struct cfg80211_scan_info info = {
+ .aborted = aborted,
+ };
+
+ ieee80211_scan_completed(mvm->hw, &info);
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
- del_timer(&mvm->scan_timer);
+ cancel_delayed_work(&mvm->scan_timeout_dwork);
} else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
ieee80211_sched_scan_stopped(mvm->hw);
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
@@ -1564,7 +1573,11 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_REGULAR);
if (uid >= 0) {
- ieee80211_scan_completed(mvm->hw, true);
+ struct cfg80211_scan_info info = {
+ .aborted = true,
+ };
+
+ ieee80211_scan_completed(mvm->hw, &info);
mvm->scan_uid_status[uid] = 0;
}
uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_SCHED);
@@ -1585,8 +1598,13 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
mvm->scan_uid_status[i] = 0;
}
} else {
- if (mvm->scan_status & IWL_MVM_SCAN_REGULAR)
- ieee80211_scan_completed(mvm->hw, true);
+ if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) {
+ struct cfg80211_scan_info info = {
+ .aborted = true,
+ };
+
+ ieee80211_scan_completed(mvm->hw, &info);
+ }
/* Sched scan will be restarted by mac80211 in
* restart_hw, so do not report if FW is about to be
@@ -1628,9 +1646,14 @@ out:
* to release the scan reference here.
*/
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
- del_timer(&mvm->scan_timer);
- if (notify)
- ieee80211_scan_completed(mvm->hw, true);
+ cancel_delayed_work(&mvm->scan_timeout_dwork);
+ if (notify) {
+ struct cfg80211_scan_info info = {
+ .aborted = true,
+ };
+
+ ieee80211_scan_completed(mvm->hw, &info);
+ }
} else if (notify) {
ieee80211_sched_scan_stopped(mvm->hw);
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
index 443a42855..101fb04a8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
@@ -215,7 +215,7 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
enum iwl_sf_state new_state)
{
struct iwl_sf_cfg_cmd sf_cmd = {
- .state = cpu_to_le32(SF_FULL_ON),
+ .state = cpu_to_le32(new_state),
};
struct ieee80211_sta *sta;
int ret = 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index b23ab4a45..3130b9c68 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -310,6 +310,304 @@ static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm,
iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0);
}
+/* Disable aggregations for a bitmap of TIDs for a given station */
+static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
+ unsigned long disable_agg_tids,
+ bool remove_queue)
+{
+ struct iwl_mvm_add_sta_cmd cmd = {};
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ u32 status;
+ u8 sta_id;
+ int ret;
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ sta_id = mvm->queue_info[queue].ra_sta_id;
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ rcu_read_lock();
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ mvmsta->tid_disable_agg |= disable_agg_tids;
+
+ cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
+ cmd.sta_id = mvmsta->sta_id;
+ cmd.add_modify = STA_MODE_MODIFY;
+ cmd.modify_mask = STA_MODIFY_QUEUES;
+ if (disable_agg_tids)
+ cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
+ if (remove_queue)
+ cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
+ cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
+ cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
+
+ rcu_read_unlock();
+
+ /* Notify FW of queue removal from the STA queues */
+ status = ADD_STA_SUCCESS;
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
+ iwl_mvm_add_sta_cmd_size(mvm),
+ &cmd, &status);
+
+ return ret;
+}
+
+static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
+{
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ unsigned long tid_bitmap;
+ unsigned long agg_tids = 0;
+ s8 sta_id;
+ int tid;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ sta_id = mvm->queue_info[queue].ra_sta_id;
+ tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
+ return -EINVAL;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ spin_lock_bh(&mvmsta->lock);
+ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+ if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
+ agg_tids |= BIT(tid);
+ }
+ spin_unlock_bh(&mvmsta->lock);
+
+ return agg_tids;
+}
+
+/*
+ * Remove a queue from a station's resources.
+ * Note that this only marks as free. It DOESN'T delete a BA agreement, and
+ * doesn't disable the queue
+ */
+static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
+{
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ unsigned long tid_bitmap;
+ unsigned long disable_agg_tids = 0;
+ u8 sta_id;
+ int tid;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ sta_id = mvm->queue_info[queue].ra_sta_id;
+ tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ rcu_read_lock();
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
+ rcu_read_unlock();
+ return 0;
+ }
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ spin_lock_bh(&mvmsta->lock);
+ /* Unmap MAC queues and TIDs from this queue */
+ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+ if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
+ disable_agg_tids |= BIT(tid);
+ mvmsta->tid_data[tid].txq_id = IEEE80211_INVAL_HW_QUEUE;
+ }
+
+ mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
+ spin_unlock_bh(&mvmsta->lock);
+
+ rcu_read_unlock();
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ /* Unmap MAC queues and TIDs from this queue */
+ mvm->queue_info[queue].hw_queue_to_mac80211 = 0;
+ mvm->queue_info[queue].hw_queue_refcount = 0;
+ mvm->queue_info[queue].tid_bitmap = 0;
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ return disable_agg_tids;
+}
+
+static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
+ unsigned long tfd_queue_mask, u8 ac)
+{
+ int queue = 0;
+ u8 ac_to_queue[IEEE80211_NUM_ACS];
+ int i;
+
+ lockdep_assert_held(&mvm->queue_info_lock);
+
+ memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
+
+ /* See what ACs the existing queues for this STA have */
+ for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
+ /* Only DATA queues can be shared */
+ if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
+ i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
+ continue;
+
+ ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
+ }
+
+ /*
+ * The queue to share is chosen only from DATA queues as follows (in
+ * descending priority):
+ * 1. An AC_BE queue
+ * 2. Same AC queue
+ * 3. Highest AC queue that is lower than new AC
+ * 4. Any existing AC (there always is at least 1 DATA queue)
+ */
+
+ /* Priority 1: An AC_BE queue */
+ if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
+ queue = ac_to_queue[IEEE80211_AC_BE];
+ /* Priority 2: Same AC queue */
+ else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
+ queue = ac_to_queue[ac];
+ /* Priority 3a: If new AC is VO and VI exists - use VI */
+ else if (ac == IEEE80211_AC_VO &&
+ ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
+ queue = ac_to_queue[IEEE80211_AC_VI];
+ /* Priority 3b: No BE so only AC less than the new one is BK */
+ else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
+ queue = ac_to_queue[IEEE80211_AC_BK];
+ /* Priority 4a: No BE nor BK - use VI if exists */
+ else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
+ queue = ac_to_queue[IEEE80211_AC_VI];
+ /* Priority 4b: No BE, BK nor VI - use VO if exists */
+ else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
+ queue = ac_to_queue[IEEE80211_AC_VO];
+
+ /* Make sure queue found (or not) is legal */
+ if (!((queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE &&
+ queue <= IWL_MVM_DQA_MAX_MGMT_QUEUE) ||
+ (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE &&
+ queue <= IWL_MVM_DQA_MAX_DATA_QUEUE) ||
+ (queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE))) {
+ IWL_ERR(mvm, "No DATA queues available to share\n");
+ queue = -ENOSPC;
+ }
+
+ return queue;
+}
+
+/*
+ * If a given queue has a higher AC than the TID stream that is being added to
+ * it, the queue needs to be redirected to the lower AC. This function does that
+ * in such a case, otherwise - if no redirection required - it does nothing,
+ * unless the %force param is true.
+ */
+static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
+ int ac, int ssn, unsigned int wdg_timeout,
+ bool force)
+{
+ struct iwl_scd_txq_cfg_cmd cmd = {
+ .scd_queue = queue,
+ .enable = 0,
+ };
+ bool shared_queue;
+ unsigned long mq;
+ int ret;
+
+ /*
+ * If the AC is lower than current one - FIFO needs to be redirected to
+ * the lowest one of the streams in the queue. Check if this is needed
+ * here.
+ * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
+ * value 3 and VO with value 0, so to check if ac X is lower than ac Y
+ * we need to check if the numerical value of X is LARGER than of Y.
+ */
+ spin_lock_bh(&mvm->queue_info_lock);
+ if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "No redirection needed on TXQ #%d\n",
+ queue);
+ return 0;
+ }
+
+ cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
+ cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
+ mq = mvm->queue_info[queue].hw_queue_to_mac80211;
+ shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1);
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ IWL_DEBUG_TX_QUEUES(mvm, "Redirecting shared TXQ #%d to FIFO #%d\n",
+ queue, iwl_mvm_ac_to_tx_fifo[ac]);
+
+ /* Stop MAC queues and wait for this queue to empty */
+ iwl_mvm_stop_mac_queues(mvm, mq);
+ ret = iwl_trans_wait_tx_queue_empty(mvm->trans, BIT(queue));
+ if (ret) {
+ IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
+ queue);
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Before redirecting the queue we need to de-activate it */
+ iwl_trans_txq_disable(mvm->trans, queue, false);
+ ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
+ ret);
+
+ /* Make sure the SCD wrptr is correctly set before reconfiguring */
+ iwl_trans_txq_enable(mvm->trans, queue, iwl_mvm_ac_to_tx_fifo[ac],
+ cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF,
+ ssn, wdg_timeout);
+
+ /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
+
+ /* Redirect to lower AC */
+ iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
+ cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF,
+ ssn);
+
+ /* Update AC marking of the queue */
+ spin_lock_bh(&mvm->queue_info_lock);
+ mvm->queue_info[queue].mac80211_ac = ac;
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ /*
+ * Mark queue as shared in transport if shared
+ * Note this has to be done after queue enablement because enablement
+ * can also set this value, and there is no indication there to shared
+ * queues
+ */
+ if (shared_queue)
+ iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
+
+out:
+ /* Continue using the MAC queues */
+ iwl_mvm_start_mac_queues(mvm, mq);
+
+ return ret;
+}
+
static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
struct ieee80211_sta *sta, u8 ac, int tid,
struct ieee80211_hdr *hdr)
@@ -325,11 +623,20 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
u8 mac_queue = mvmsta->vif->hw_queue[ac];
int queue = -1;
+ bool using_inactive_queue = false;
+ unsigned long disable_agg_tids = 0;
+ enum iwl_mvm_agg_state queue_state;
+ bool shared_queue = false;
int ssn;
+ unsigned long tfd_queue_mask;
int ret;
lockdep_assert_held(&mvm->mutex);
+ spin_lock_bh(&mvmsta->lock);
+ tfd_queue_mask = mvmsta->tfd_queue_msk;
+ spin_unlock_bh(&mvmsta->lock);
+
spin_lock_bh(&mvm->queue_info_lock);
/*
@@ -338,7 +645,8 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
*/
if (!ieee80211_is_data_qos(hdr->frame_control) ||
ieee80211_is_qos_nullfunc(hdr->frame_control)) {
- queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_MGMT_QUEUE,
+ queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
+ IWL_MVM_DQA_MIN_MGMT_QUEUE,
IWL_MVM_DQA_MAX_MGMT_QUEUE);
if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
@@ -347,29 +655,62 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
/* If no such queue is found, we'll use a DATA queue instead */
}
- if (queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
+ if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
+ (mvm->queue_info[mvmsta->reserved_queue].status ==
+ IWL_MVM_QUEUE_RESERVED ||
+ mvm->queue_info[mvmsta->reserved_queue].status ==
+ IWL_MVM_QUEUE_INACTIVE)) {
queue = mvmsta->reserved_queue;
+ mvm->queue_info[queue].reserved = true;
IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
}
if (queue < 0)
- queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE,
+ queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
+ IWL_MVM_DQA_MIN_DATA_QUEUE,
IWL_MVM_DQA_MAX_DATA_QUEUE);
/*
+ * Check if this queue is already allocated but inactive.
+ * In such a case, we'll need to first free this queue before enabling
+ * it again, so we'll mark it as reserved to make sure no new traffic
+ * arrives on it
+ */
+ if (queue > 0 &&
+ mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
+ using_inactive_queue = true;
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
+ queue, mvmsta->sta_id, tid);
+ }
+
+ /* No free queue - we'll have to share */
+ if (queue <= 0) {
+ queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
+ if (queue > 0) {
+ shared_queue = true;
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
+ }
+ }
+
+ /*
* Mark TXQ as ready, even though it hasn't been fully configured yet,
* to make sure no one else takes it.
* This will allow avoiding re-acquiring the lock at the end of the
* configuration. On error we'll mark it back as free.
*/
- if (queue >= 0)
+ if ((queue > 0) && !shared_queue)
mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
spin_unlock_bh(&mvm->queue_info_lock);
- /* TODO: support shared queues for same RA */
- if (queue < 0)
+ /* This shouldn't happen - out of queues */
+ if (WARN_ON(queue <= 0)) {
+ IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
+ tid, cfg.sta_id);
return -ENOSPC;
+ }
/*
* Actual en/disablement of aggregations is through the ADD_STA HCMD,
@@ -380,24 +721,103 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
- IWL_DEBUG_TX_QUEUES(mvm, "Allocating queue #%d to sta %d on tid %d\n",
- queue, mvmsta->sta_id, tid);
+ /*
+ * If this queue was previously inactive (idle) - we need to free it
+ * first
+ */
+ if (using_inactive_queue) {
+ struct iwl_scd_txq_cfg_cmd cmd = {
+ .scd_queue = queue,
+ .enable = 0,
+ };
+ u8 ac;
+
+ disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ ac = mvm->queue_info[queue].mac80211_ac;
+ cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
+ cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[ac];
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ /* Disable the queue */
+ iwl_mvm_invalidate_sta_queue(mvm, queue, disable_agg_tids,
+ true);
+ iwl_trans_txq_disable(mvm->trans, queue, false);
+ ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd),
+ &cmd);
+ if (ret) {
+ IWL_ERR(mvm,
+ "Failed to free inactive queue %d (ret=%d)\n",
+ queue, ret);
+
+ /* Re-mark the inactive queue as inactive */
+ spin_lock_bh(&mvm->queue_info_lock);
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ return ret;
+ }
+ }
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Allocating %squeue #%d to sta %d on tid %d\n",
+ shared_queue ? "shared " : "", queue,
+ mvmsta->sta_id, tid);
+
+ if (shared_queue) {
+ /* Disable any open aggs on this queue */
+ disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
+
+ if (disable_agg_tids) {
+ IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
+ queue);
+ iwl_mvm_invalidate_sta_queue(mvm, queue,
+ disable_agg_tids, false);
+ }
+ }
ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
iwl_mvm_enable_txq(mvm, queue, mac_queue, ssn, &cfg,
wdg_timeout);
+ /*
+ * Mark queue as shared in transport if shared
+ * Note this has to be done after queue enablement because enablement
+ * can also set this value, and there is no indication there to shared
+ * queues
+ */
+ if (shared_queue)
+ iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
+
spin_lock_bh(&mvmsta->lock);
mvmsta->tid_data[tid].txq_id = queue;
+ mvmsta->tid_data[tid].is_tid_active = true;
mvmsta->tfd_queue_msk |= BIT(queue);
+ queue_state = mvmsta->tid_data[tid].state;
if (mvmsta->reserved_queue == queue)
mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
spin_unlock_bh(&mvmsta->lock);
- ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
- if (ret)
- goto out_err;
+ if (!shared_queue) {
+ ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
+ if (ret)
+ goto out_err;
+
+ /* If we need to re-enable aggregations... */
+ if (queue_state == IWL_AGG_ON) {
+ ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
+ if (ret)
+ goto out_err;
+ }
+ } else {
+ /* Redirect queue, if needed */
+ ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn,
+ wdg_timeout, false);
+ if (ret)
+ goto out_err;
+ }
return 0;
@@ -476,6 +896,9 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
unsigned long deferred_tid_traffic;
int sta_id, tid;
+ /* Check inactivity of queues */
+ iwl_mvm_inactivity_check(mvm);
+
mutex_lock(&mvm->mutex);
/* Go over all stations with deferred traffic */
@@ -505,6 +928,12 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
int queue;
+ /*
+ * Check for inactive queues, so we don't reach a situation where we
+ * can't add a STA due to a shortage in queues that doesn't really exist
+ */
+ iwl_mvm_inactivity_check(mvm);
+
spin_lock_bh(&mvm->queue_info_lock);
/* Make sure we have free resources for this STA */
@@ -514,7 +943,8 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
IWL_MVM_QUEUE_FREE))
queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
else
- queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE,
+ queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
+ IWL_MVM_DQA_MIN_DATA_QUEUE,
IWL_MVM_DQA_MAX_DATA_QUEUE);
if (queue < 0) {
spin_unlock_bh(&mvm->queue_info_lock);
@@ -568,8 +998,11 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
mvm_sta->tfd_queue_msk = 0;
- /* allocate new queues for a TDLS station */
- if (sta->tdls) {
+ /*
+ * Allocate new queues for a TDLS station, unless we're in DQA mode,
+ * and then they'll be allocated dynamically
+ */
+ if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) {
ret = iwl_mvm_tdls_sta_init(mvm, sta);
if (ret)
return ret;
@@ -633,7 +1066,8 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
return 0;
err:
- iwl_mvm_tdls_sta_deinit(mvm, sta);
+ if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls)
+ iwl_mvm_tdls_sta_deinit(mvm, sta);
return ret;
}
@@ -819,8 +1253,9 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
if (iwl_mvm_has_new_rx_api(mvm))
kfree(mvm_sta->dup_data);
- if (vif->type == NL80211_IFTYPE_STATION &&
- mvmvif->ap_sta_id == mvm_sta->sta_id) {
+ if ((vif->type == NL80211_IFTYPE_STATION &&
+ mvmvif->ap_sta_id == mvm_sta->sta_id) ||
+ iwl_mvm_is_dqa_supported(mvm)){
ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
if (ret)
return ret;
@@ -838,16 +1273,19 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
if (iwl_mvm_is_dqa_supported(mvm))
iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
- /* if we are associated - we can't remove the AP STA now */
- if (vif->bss_conf.assoc)
- return ret;
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ mvmvif->ap_sta_id == mvm_sta->sta_id) {
+ /* if associated - we can't remove the AP STA now */
+ if (vif->bss_conf.assoc)
+ return ret;
- /* unassoc - go ahead - remove the AP STA now */
- mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
+ /* unassoc - go ahead - remove the AP STA now */
+ mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
- /* clear d0i3_ap_sta_id if no longer relevant */
- if (mvm->d0i3_ap_sta_id == mvm_sta->sta_id)
- mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
+ /* clear d0i3_ap_sta_id if no longer relevant */
+ if (mvm->d0i3_ap_sta_id == mvm_sta->sta_id)
+ mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
+ }
}
/*
@@ -885,7 +1323,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
} else {
spin_unlock_bh(&mvm_sta->lock);
- if (sta->tdls)
+ if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls)
iwl_mvm_tdls_sta_deinit(mvm, sta);
ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
@@ -983,8 +1421,9 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
lockdep_assert_held(&mvm->mutex);
/* Map Aux queue to fifo - needs to happen before adding Aux station */
- iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue,
- IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
+ if (!iwl_mvm_is_dqa_supported(mvm))
+ iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue,
+ IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
/* Allocate aux station and assign to it the aux queue */
ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
@@ -992,6 +1431,19 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
if (ret)
return ret;
+ if (iwl_mvm_is_dqa_supported(mvm)) {
+ struct iwl_trans_txq_scd_cfg cfg = {
+ .fifo = IWL_MVM_TX_FIFO_MCAST,
+ .sta_id = mvm->aux_sta.sta_id,
+ .tid = IWL_MAX_TID_COUNT,
+ .aggregate = false,
+ .frame_limit = IWL_FRAME_LIMIT,
+ };
+
+ iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg,
+ wdg_timeout);
+ }
+
ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
MAC_INDEX_AUX, 0);
@@ -1316,8 +1768,8 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
switch (status & IWL_ADD_STA_STATUS_MASK) {
case ADD_STA_SUCCESS:
- IWL_DEBUG_INFO(mvm, "RX BA Session %sed in fw\n",
- start ? "start" : "stopp");
+ IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
+ start ? "start" : "stopp");
break;
case ADD_STA_IMMEDIATE_BA_FAILURE:
IWL_WARN(mvm, "RX BA Session refused by fw\n");
@@ -1372,13 +1824,16 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
* supposed to happen) and we will free the session data while
* RX is being processed in parallel
*/
+ IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
+ mvm_sta->sta_id, tid, baid);
WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
rcu_assign_pointer(mvm->baid_map[baid], baid_data);
- } else if (mvm->rx_ba_sessions > 0) {
+ } else {
u8 baid = mvm_sta->tid_to_baid[tid];
- /* check that restart flow didn't zero the counter */
- mvm->rx_ba_sessions--;
+ if (mvm->rx_ba_sessions > 0)
+ /* check that restart flow didn't zero the counter */
+ mvm->rx_ba_sessions--;
if (!iwl_mvm_has_new_rx_api(mvm))
return 0;
@@ -1394,6 +1849,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
del_timer_sync(&baid_data->session_timer);
RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
kfree_rcu(baid_data, rcu_head);
+ IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
}
return 0;
@@ -1402,8 +1858,8 @@ out_free:
return ret;
}
-static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- int tid, u8 queue, bool start)
+int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ int tid, u8 queue, bool start)
{
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_add_sta_cmd cmd = {};
@@ -1458,6 +1914,7 @@ const u8 tid_to_mac80211_ac[] = {
IEEE80211_AC_VI,
IEEE80211_AC_VO,
IEEE80211_AC_VO,
+ IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
};
static const u8 tid_to_ucode_ac[] = {
@@ -1512,7 +1969,8 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
txq_id = mvmsta->tid_data[tid].txq_id;
if (!iwl_mvm_is_dqa_supported(mvm) ||
mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
- txq_id = iwl_mvm_find_free_queue(mvm, mvm->first_agg_queue,
+ txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
+ mvm->first_agg_queue,
mvm->last_agg_queue);
if (txq_id < 0) {
ret = txq_id;
@@ -1907,6 +2365,13 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
memcpy(cmd.key + 3, keyconf->key, keyconf->keylen);
break;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
+ /* fall through */
+ case WLAN_CIPHER_SUITE_GCMP:
+ key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
+ memcpy(cmd.key, keyconf->key, keyconf->keylen);
+ break;
default:
key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
memcpy(cmd.key, keyconf->key, keyconf->keylen);
@@ -2035,6 +2500,8 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
0, NULL, 0, key_offset);
break;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index d2c58f134..bbc1cab2c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -321,6 +321,9 @@ enum iwl_mvm_agg_state {
* Basically when next_reclaimed reaches ssn, we can tell mac80211 that
* we are ready to finish the Tx AGG stop / start flow.
* @tx_time: medium time consumed by this A-MPDU
+ * @is_tid_active: has this TID sent traffic in the last
+ * %IWL_MVM_DQA_QUEUE_TIMEOUT time period. If %txq_id is invalid, this
+ * field should be ignored.
*/
struct iwl_mvm_tid_data {
struct sk_buff_head deferred_tx_frames;
@@ -333,6 +336,7 @@ struct iwl_mvm_tid_data {
u16 txq_id;
u16 ssn;
u16 tx_time;
+ bool is_tid_active;
};
static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data)
@@ -434,6 +438,7 @@ struct iwl_mvm_sta {
bool tlc_amsdu;
u8 agg_tids;
u8 sleep_tx_count;
+ u8 avg_energy;
};
static inline struct iwl_mvm_sta *
@@ -509,6 +514,9 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid);
+int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ int tid, u8 queue, bool start);
+
int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm);
void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index b92b75fea..b3a87a31d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -138,28 +138,19 @@ static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
protocol = ipv6h->nexthdr;
while (protocol != NEXTHDR_NONE && ipv6_ext_hdr(protocol)) {
+ struct ipv6_opt_hdr *hp;
+
/* only supported extension headers */
if (protocol != NEXTHDR_ROUTING &&
protocol != NEXTHDR_HOP &&
- protocol != NEXTHDR_DEST &&
- protocol != NEXTHDR_FRAGMENT) {
+ protocol != NEXTHDR_DEST) {
skb_checksum_help(skb);
return;
}
- if (protocol == NEXTHDR_FRAGMENT) {
- struct frag_hdr *hp =
- OPT_HDR(struct frag_hdr, skb, off);
-
- protocol = hp->nexthdr;
- off += sizeof(struct frag_hdr);
- } else {
- struct ipv6_opt_hdr *hp =
- OPT_HDR(struct ipv6_opt_hdr, skb, off);
-
- protocol = hp->nexthdr;
- off += ipv6_optlen(hp);
- }
+ hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
+ protocol = hp->nexthdr;
+ off += ipv6_optlen(hp);
}
/* if we get here - protocol now should be TCP/UDP */
#endif
@@ -388,6 +379,23 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
tx_cmd->rate_n_flags = cpu_to_le32((u32)rate_plcp | rate_flags);
}
+static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info,
+ u8 *crypto_hdr)
+{
+ struct ieee80211_key_conf *keyconf = info->control.hw_key;
+ u64 pn;
+
+ pn = atomic64_inc_return(&keyconf->tx_pn);
+ crypto_hdr[0] = pn;
+ crypto_hdr[2] = 0;
+ crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6);
+ crypto_hdr[1] = pn >> 8;
+ crypto_hdr[4] = pn >> 16;
+ crypto_hdr[5] = pn >> 24;
+ crypto_hdr[6] = pn >> 32;
+ crypto_hdr[7] = pn >> 40;
+}
+
/*
* Sets the fields in the Tx cmd that are crypto related
*/
@@ -405,15 +413,7 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd);
- pn = atomic64_inc_return(&keyconf->tx_pn);
- crypto_hdr[0] = pn;
- crypto_hdr[2] = 0;
- crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6);
- crypto_hdr[1] = pn >> 8;
- crypto_hdr[4] = pn >> 16;
- crypto_hdr[5] = pn >> 24;
- crypto_hdr[6] = pn >> 32;
- crypto_hdr[7] = pn >> 40;
+ iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
break;
case WLAN_CIPHER_SUITE_TKIP:
@@ -433,6 +433,18 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
break;
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ /* TODO: Taking the key from the table might introduce a race
+ * when PTK rekeying is done, having an old packets with a PN
+ * based on the old key but the message encrypted with a new
+ * one.
+ * Need to handle this.
+ */
+ tx_cmd->sec_ctl |= TX_CMD_SEC_GCMP | TC_CMD_SEC_KEY_FROM_TABLE;
+ tx_cmd->key[0] = keyconf->hw_key_idx;
+ iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
+ break;
default:
tx_cmd->sec_ctl |= TX_CMD_SEC_EXT;
}
@@ -533,6 +545,9 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
* (this is not possible for unicast packets as a TLDS discovery
* response are sent without a station entry); otherwise use the
* AUX station.
+ * In DQA mode, if vif is of type STATION and frames are not multicast,
+ * they should be sent from the BSS queue. For example, TDLS setup
+ * frames should be sent on this queue, as they go through the AP.
*/
sta_id = mvm->aux_sta.sta_id;
if (info.control.vif) {
@@ -550,6 +565,9 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
if (ap_sta_id != IWL_MVM_STATION_COUNT)
sta_id = ap_sta_id;
+ } else if (iwl_mvm_is_dqa_supported(mvm) &&
+ info.control.vif->type == NL80211_IFTYPE_STATION) {
+ queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
}
}
@@ -883,15 +901,17 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
* nullfunc frames should go to the MGMT queue regardless of QOS
*/
tid = IWL_MAX_TID_COUNT;
- txq_id = mvmsta->tid_data[tid].txq_id;
}
+ if (iwl_mvm_is_dqa_supported(mvm))
+ txq_id = mvmsta->tid_data[tid].txq_id;
+
/* Copy MAC header from skb into command buffer */
memcpy(tx_cmd->hdr, hdr, hdrlen);
WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
- if (sta->tdls) {
+ if (sta->tdls && !iwl_mvm_is_dqa_supported(mvm)) {
/* default to TID 0 for non-QoS packets */
u8 tdls_tid = tid == IWL_MAX_TID_COUNT ? 0 : tid;
@@ -904,9 +924,12 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
txq_id = mvmsta->tid_data[tid].txq_id;
}
- if (iwl_mvm_is_dqa_supported(mvm)) {
- if (unlikely(mvmsta->tid_data[tid].txq_id ==
- IEEE80211_INVAL_HW_QUEUE)) {
+ /* Check if TXQ needs to be allocated or re-activated */
+ if (unlikely(txq_id == IEEE80211_INVAL_HW_QUEUE ||
+ !mvmsta->tid_data[tid].is_tid_active) &&
+ iwl_mvm_is_dqa_supported(mvm)) {
+ /* If TXQ needs to be allocated... */
+ if (txq_id == IEEE80211_INVAL_HW_QUEUE) {
iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
/*
@@ -916,11 +939,22 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
spin_unlock(&mvmsta->lock);
return 0;
+
}
- txq_id = mvmsta->tid_data[tid].txq_id;
+ /* If we are here - TXQ exists and needs to be re-activated */
+ spin_lock(&mvm->queue_info_lock);
+ mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
+ mvmsta->tid_data[tid].is_tid_active = true;
+ spin_unlock(&mvm->queue_info_lock);
+
+ IWL_DEBUG_TX_QUEUES(mvm, "Re-activating queue %d for TX\n",
+ txq_id);
}
+ /* Keep track of the time of the last frame for this RA/TID */
+ mvm->queue_info[txq_id].last_frame_time[tid] = jiffies;
+
IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id,
tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number));
@@ -1312,7 +1346,15 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
bool send_eosp_ndp = false;
spin_lock_bh(&mvmsta->lock);
- txq_agg = (mvmsta->tid_data[tid].state == IWL_AGG_ON);
+ if (iwl_mvm_is_dqa_supported(mvm)) {
+ enum iwl_mvm_agg_state state;
+
+ state = mvmsta->tid_data[tid].state;
+ txq_agg = (state == IWL_AGG_ON ||
+ state == IWL_EMPTYING_HW_QUEUE_DELBA);
+ } else {
+ txq_agg = txq_id >= mvm->first_agg_queue;
+ }
if (!is_ndp) {
tid_data->next_reclaimed = next_reclaimed;
@@ -1643,7 +1685,7 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
iwl_mvm_tx_info_from_ba_notif(&ba_info, ba_notif, tid_data);
IWL_DEBUG_TX_REPLY(mvm, "No reclaim. Update rs directly\n");
- iwl_mvm_rs_tx_status(mvm, sta, tid, &ba_info);
+ iwl_mvm_rs_tx_status(mvm, sta, tid, &ba_info, false);
}
out:
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 161b99efd..68f4e7fdf 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -579,17 +579,29 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
iwl_mvm_dump_umac_error_log(mvm);
}
-int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 minq, u8 maxq)
+int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq)
{
int i;
lockdep_assert_held(&mvm->queue_info_lock);
+ /* Start by looking for a free queue */
for (i = minq; i <= maxq; i++)
if (mvm->queue_info[i].hw_queue_refcount == 0 &&
mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
return i;
+ /*
+ * If no free queue found - settle for an inactive one to reconfigure
+ * Make sure that the inactive queue either already belongs to this STA,
+ * or that if it belongs to another one - it isn't the reserved queue
+ */
+ for (i = minq; i <= maxq; i++)
+ if (mvm->queue_info[i].status == IWL_MVM_QUEUE_INACTIVE &&
+ (sta_id == mvm->queue_info[i].ra_sta_id ||
+ !mvm->queue_info[i].reserved))
+ return i;
+
return -ENOSPC;
}
@@ -643,13 +655,21 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
}
/* Update mappings and refcounts */
+ if (mvm->queue_info[queue].hw_queue_refcount > 0)
+ enable_queue = false;
+
mvm->queue_info[queue].hw_queue_to_mac80211 |= BIT(mac80211_queue);
mvm->queue_info[queue].hw_queue_refcount++;
- if (mvm->queue_info[queue].hw_queue_refcount > 1)
- enable_queue = false;
- else
- mvm->queue_info[queue].ra_sta_id = cfg->sta_id;
mvm->queue_info[queue].tid_bitmap |= BIT(cfg->tid);
+ mvm->queue_info[queue].ra_sta_id = cfg->sta_id;
+
+ if (enable_queue) {
+ if (cfg->tid != IWL_MAX_TID_COUNT)
+ mvm->queue_info[queue].mac80211_ac =
+ tid_to_mac80211_ac[cfg->tid];
+ else
+ mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
+ }
IWL_DEBUG_TX_QUEUES(mvm,
"Enabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
@@ -671,6 +691,10 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
.tid = cfg->tid,
};
+ /* Set sta_id in the command, if it exists */
+ if (iwl_mvm_is_dqa_supported(mvm))
+ cmd.sta_id = cfg->sta_id;
+
iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL,
wdg_timeout);
WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd),
@@ -752,6 +776,9 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
mvm->queue_info[queue].tid_bitmap = 0;
mvm->queue_info[queue].hw_queue_to_mac80211 = 0;
+ /* Regardless if this is a reserved TXQ for a STA - mark it as false */
+ mvm->queue_info[queue].reserved = false;
+
spin_unlock_bh(&mvm->queue_info_lock);
iwl_trans_txq_disable(mvm->trans, queue, false);
@@ -1039,6 +1066,155 @@ out:
ieee80211_connection_loss(vif);
}
+/*
+ * Remove inactive TIDs of a given queue.
+ * If all queue TIDs are inactive - mark the queue as inactive
+ * If only some the queue TIDs are inactive - unmap them from the queue
+ */
+static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvmsta, int queue,
+ unsigned long tid_bitmap)
+{
+ int tid;
+
+ lockdep_assert_held(&mvmsta->lock);
+ lockdep_assert_held(&mvm->queue_info_lock);
+
+ /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
+ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+ /* If some TFDs are still queued - don't mark TID as inactive */
+ if (iwl_mvm_tid_queued(&mvmsta->tid_data[tid]))
+ tid_bitmap &= ~BIT(tid);
+ }
+
+ /* If all TIDs in the queue are inactive - mark queue as inactive. */
+ if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
+
+ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1)
+ mvmsta->tid_data[tid].is_tid_active = false;
+
+ IWL_DEBUG_TX_QUEUES(mvm, "Queue %d marked as inactive\n",
+ queue);
+ return;
+ }
+
+ /*
+ * If we are here, this is a shared queue and not all TIDs timed-out.
+ * Remove the ones that did.
+ */
+ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+ int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]];
+
+ mvmsta->tid_data[tid].txq_id = IEEE80211_INVAL_HW_QUEUE;
+ mvm->queue_info[queue].hw_queue_to_mac80211 &= ~BIT(mac_queue);
+ mvm->queue_info[queue].hw_queue_refcount--;
+ mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
+ mvmsta->tid_data[tid].is_tid_active = false;
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Removing inactive TID %d from shared Q:%d\n",
+ tid, queue);
+ }
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "TXQ #%d left with tid bitmap 0x%x\n", queue,
+ mvm->queue_info[queue].tid_bitmap);
+
+ /*
+ * There may be different TIDs with the same mac queues, so make
+ * sure all TIDs have existing corresponding mac queues enabled
+ */
+ tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+ mvm->queue_info[queue].hw_queue_to_mac80211 |=
+ BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]);
+ }
+
+ /* TODO: if queue was shared - need to re-enable AGGs */
+}
+
+void iwl_mvm_inactivity_check(struct iwl_mvm *mvm)
+{
+ unsigned long timeout_queues_map = 0;
+ unsigned long now = jiffies;
+ int i;
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ for (i = 0; i < IWL_MAX_HW_QUEUES; i++)
+ if (mvm->queue_info[i].hw_queue_refcount > 0)
+ timeout_queues_map |= BIT(i);
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ rcu_read_lock();
+
+ /*
+ * If a queue time outs - mark it as INACTIVE (don't remove right away
+ * if we don't have to.) This is an optimization in case traffic comes
+ * later, and we don't HAVE to use a currently-inactive queue
+ */
+ for_each_set_bit(i, &timeout_queues_map, IWL_MAX_HW_QUEUES) {
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ u8 sta_id;
+ int tid;
+ unsigned long inactive_tid_bitmap = 0;
+ unsigned long queue_tid_bitmap;
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
+
+ /* If TXQ isn't in active use anyway - nothing to do here... */
+ if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
+ mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED) {
+ spin_unlock_bh(&mvm->queue_info_lock);
+ continue;
+ }
+
+ /* Check to see if there are inactive TIDs on this queue */
+ for_each_set_bit(tid, &queue_tid_bitmap,
+ IWL_MAX_TID_COUNT + 1) {
+ if (time_after(mvm->queue_info[i].last_frame_time[tid] +
+ IWL_MVM_DQA_QUEUE_TIMEOUT, now))
+ continue;
+
+ inactive_tid_bitmap |= BIT(tid);
+ }
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ /* If all TIDs are active - finish check on this queue */
+ if (!inactive_tid_bitmap)
+ continue;
+
+ /*
+ * If we are here - the queue hadn't been served recently and is
+ * in use
+ */
+
+ sta_id = mvm->queue_info[i].ra_sta_id;
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+
+ /*
+ * If the STA doesn't exist anymore, it isn't an error. It could
+ * be that it was removed since getting the queues, and in this
+ * case it should've inactivated its queues anyway.
+ */
+ if (IS_ERR_OR_NULL(sta))
+ continue;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ spin_lock_bh(&mvmsta->lock);
+ spin_lock(&mvm->queue_info_lock);
+ iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
+ inactive_tid_bitmap);
+ spin_unlock(&mvm->queue_info_lock);
+ spin_unlock_bh(&mvmsta->lock);
+ }
+
+ rcu_read_unlock();
+}
+
int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif,
enum iwl_lqm_cmd_operatrions operation,
u32 duration, u32 timeout)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 6f020e4ec..78cf9a7f3 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -516,6 +516,9 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2526, 0x1420, iwl5165_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl5165_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)},
+
+/* a000 Series */
+ {IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg)},
#endif /* CONFIG_IWLMVM */
{0}
@@ -607,7 +610,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
const struct iwl_cfg *cfg_7265d __maybe_unused = NULL;
const struct iwl_cfg *cfg_9260lc __maybe_unused = NULL;
struct iwl_trans *iwl_trans;
- struct iwl_trans_pcie *trans_pcie;
int ret;
iwl_trans = iwl_trans_pcie_alloc(pdev, ent, cfg);
@@ -645,12 +647,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#endif
pci_set_drvdata(pdev, iwl_trans);
+ iwl_trans->drv = iwl_drv_start(iwl_trans, cfg);
- trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
- trans_pcie->drv = iwl_drv_start(iwl_trans, cfg);
-
- if (IS_ERR(trans_pcie->drv)) {
- ret = PTR_ERR(trans_pcie->drv);
+ if (IS_ERR(iwl_trans->drv)) {
+ ret = PTR_ERR(iwl_trans->drv);
goto out_free_trans;
}
@@ -689,7 +689,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
out_free_drv:
- iwl_drv_stop(trans_pcie->drv);
+ iwl_drv_stop(iwl_trans->drv);
out_free_trans:
iwl_trans_pcie_free(iwl_trans);
return ret;
@@ -698,7 +698,6 @@ out_free_trans:
static void iwl_pci_remove(struct pci_dev *pdev)
{
struct iwl_trans *trans = pci_get_drvdata(pdev);
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
/* if RTPM was in use, restore it to the state before probe */
if (trans->runtime_pm_mode != IWL_PLAT_PM_MODE_DISABLED) {
@@ -709,7 +708,7 @@ static void iwl_pci_remove(struct pci_dev *pdev)
pm_runtime_forbid(trans->dev);
}
- iwl_drv_stop(trans_pcie->drv);
+ iwl_drv_stop(trans->drv);
iwl_trans_pcie_free(trans);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 2d8cce290..11e347dd4 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -68,12 +68,14 @@ struct iwl_host_cmd;
* struct iwl_rx_mem_buffer
* @page_dma: bus address of rxb page
* @page: driver's pointer to the rxb page
+ * @invalid: rxb is in driver ownership - not owned by HW
* @vid: index of this rxb in the global table
*/
struct iwl_rx_mem_buffer {
dma_addr_t page_dma;
struct page *page;
u16 vid;
+ bool invalid;
struct list_head list;
};
@@ -230,15 +232,16 @@ struct iwl_queue {
#define TFD_CMD_SLOTS 32
/*
- * The FH will write back to the first TB only, so we need
- * to copy some data into the buffer regardless of whether
- * it should be mapped or not. This indicates how big the
- * first TB must be to include the scratch buffer. Since
- * the scratch is 4 bytes at offset 12, it's 16 now. If we
- * make it bigger then allocations will be bigger and copy
- * slower, so that's probably not useful.
+ * The FH will write back to the first TB only, so we need to copy some data
+ * into the buffer regardless of whether it should be mapped or not.
+ * This indicates how big the first TB must be to include the scratch buffer
+ * and the assigned PN.
+ * Since PN location is 16 bytes at offset 24, it's 40 now.
+ * If we make it bigger then allocations will be bigger and copy slower, so
+ * that's probably not useful.
*/
-#define IWL_HCMD_SCRATCHBUF_SIZE 16
+#define IWL_FIRST_TB_SIZE 40
+#define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
struct iwl_pcie_txq_entry {
struct iwl_device_cmd *cmd;
@@ -248,20 +251,18 @@ struct iwl_pcie_txq_entry {
struct iwl_cmd_meta meta;
};
-struct iwl_pcie_txq_scratch_buf {
- struct iwl_cmd_header hdr;
- u8 buf[8];
- __le32 scratch;
+struct iwl_pcie_first_tb_buf {
+ u8 buf[IWL_FIRST_TB_SIZE_ALIGN];
};
/**
* struct iwl_txq - Tx Queue for DMA
* @q: generic Rx/Tx queue descriptor
* @tfds: transmit frame descriptors (DMA memory)
- * @scratchbufs: start of command headers, including scratch buffers, for
+ * @first_tb_bufs: start of command headers, including scratch buffers, for
* the writeback -- this is DMA memory and an array holding one buffer
* for each command on the queue
- * @scratchbufs_dma: DMA address for the scratchbufs start
+ * @first_tb_dma: DMA address for the first_tb_bufs start
* @entries: transmit entries (driver state)
* @lock: queue lock
* @stuck_timer: timer that fires if queue gets stuck
@@ -279,8 +280,8 @@ struct iwl_pcie_txq_scratch_buf {
struct iwl_txq {
struct iwl_queue q;
struct iwl_tfd *tfds;
- struct iwl_pcie_txq_scratch_buf *scratchbufs;
- dma_addr_t scratchbufs_dma;
+ struct iwl_pcie_first_tb_buf *first_tb_bufs;
+ dma_addr_t first_tb_dma;
struct iwl_pcie_txq_entry *entries;
spinlock_t lock;
unsigned long frozen_expiry_remainder;
@@ -296,10 +297,10 @@ struct iwl_txq {
};
static inline dma_addr_t
-iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
+iwl_pcie_get_first_tb_dma(struct iwl_txq *txq, int idx)
{
- return txq->scratchbufs_dma +
- sizeof(struct iwl_pcie_txq_scratch_buf) * idx;
+ return txq->first_tb_dma +
+ sizeof(struct iwl_pcie_first_tb_buf) * idx;
}
struct iwl_tso_hdr_page {
@@ -313,7 +314,6 @@ struct iwl_tso_hdr_page {
* @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
* @global_table: table mapping received VID from hw to rxb
* @rba: allocator for RX replenishing
- * @drv - pointer to iwl_drv
* @trans: pointer to the generic transport area
* @scd_base_addr: scheduler sram base address in SRAM
* @scd_bc_tbls: pointer to the byte count table of the scheduler
@@ -351,7 +351,6 @@ struct iwl_trans_pcie {
struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE];
struct iwl_rb_allocator rba;
struct iwl_trans *trans;
- struct iwl_drv *drv;
struct net_device napi_dev;
@@ -385,6 +384,8 @@ struct iwl_trans_pcie {
wait_queue_head_t wait_command_queue;
wait_queue_head_t d0i3_waitq;
+ u8 page_offs, dev_cmd_offs;
+
u8 cmd_queue;
u8 cmd_fifo;
unsigned int cmd_q_wdg_timeout;
@@ -471,6 +472,10 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
unsigned int wdg_timeout);
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
bool configure_scd);
+void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
+ bool shared_mode);
+void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans,
+ struct iwl_txq *txq);
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, int txq_id);
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
@@ -690,4 +695,6 @@ static inline int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans);
int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans);
+void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable);
+
#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index aaaf2ad6e..5c36e6d00 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -161,21 +161,21 @@ static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
return cpu_to_le32((u32)(dma_addr >> 8));
}
-static void iwl_pcie_write_prph_64_no_grab(struct iwl_trans *trans, u64 ofs,
- u64 val)
-{
- iwl_write_prph_no_grab(trans, ofs, val & 0xffffffff);
- iwl_write_prph_no_grab(trans, ofs + 4, val >> 32);
-}
-
/*
* iwl_pcie_rx_stop - stops the Rx DMA
*/
int iwl_pcie_rx_stop(struct iwl_trans *trans)
{
- iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
- return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
- FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
+ if (trans->cfg->mq_rx_supported) {
+ iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
+ return iwl_poll_prph_bit(trans, RFH_GEN_STATUS,
+ RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
+ } else {
+ iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+ return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
+ FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
+ 1000);
+ }
}
/*
@@ -211,12 +211,8 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
if (trans->cfg->mq_rx_supported)
iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
rxq->write_actual);
- /*
- * write to FH_RSCSR_CHNL0_WPTR register even in MQ as a W/A to
- * hardware shadow registers bug - writing to RFH_Q_FRBDCB_WIDX will
- * not wake the NIC.
- */
- iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
+ else
+ iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
}
static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
@@ -237,10 +233,10 @@ static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
}
/*
- * iwl_pcie_rxq_mq_restock - restock implementation for multi-queue rx
+ * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx
*/
-static void iwl_pcie_rxq_mq_restock(struct iwl_trans *trans,
- struct iwl_rxq *rxq)
+static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
+ struct iwl_rxq *rxq)
{
struct iwl_rx_mem_buffer *rxb;
@@ -263,7 +259,7 @@ static void iwl_pcie_rxq_mq_restock(struct iwl_trans *trans,
rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
list);
list_del(&rxb->list);
-
+ rxb->invalid = false;
/* 12 first bits are expected to be empty */
WARN_ON(rxb->page_dma & DMA_BIT_MASK(12));
/* Point to Rx buffer via next RBD in circular buffer */
@@ -285,10 +281,10 @@ static void iwl_pcie_rxq_mq_restock(struct iwl_trans *trans,
}
/*
- * iwl_pcie_rxq_sq_restock - restock implementation for single queue rx
+ * iwl_pcie_rxsq_restock - restock implementation for single queue rx
*/
-static void iwl_pcie_rxq_sq_restock(struct iwl_trans *trans,
- struct iwl_rxq *rxq)
+static void iwl_pcie_rxsq_restock(struct iwl_trans *trans,
+ struct iwl_rxq *rxq)
{
struct iwl_rx_mem_buffer *rxb;
@@ -314,6 +310,7 @@ static void iwl_pcie_rxq_sq_restock(struct iwl_trans *trans,
rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
list);
list_del(&rxb->list);
+ rxb->invalid = false;
/* Point to Rx buffer via next RBD in circular buffer */
bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
@@ -347,9 +344,9 @@ static
void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
{
if (trans->cfg->mq_rx_supported)
- iwl_pcie_rxq_mq_restock(trans, rxq);
+ iwl_pcie_rxmq_restock(trans, rxq);
else
- iwl_pcie_rxq_sq_restock(trans, rxq);
+ iwl_pcie_rxsq_restock(trans, rxq);
}
/*
@@ -764,6 +761,23 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
}
+void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable)
+{
+ /*
+ * Turn on the chicken-bits that cause MAC wakeup for RX-related
+ * values.
+ * This costs some power, but needed for W/A 9000 integrated A-step
+ * bug where shadow registers are not in the retention list and their
+ * value is lost when NIC powers down
+ */
+ if (trans->cfg->integrated) {
+ iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL,
+ CSR_MAC_SHADOW_REG_CTRL_RX_WAKE);
+ iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTL2,
+ CSR_MAC_SHADOW_REG_CTL2_RX_WAKE);
+ }
+}
+
static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -796,17 +810,17 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
for (i = 0; i < trans->num_rx_queues; i++) {
/* Tell device where to find RBD free table in DRAM */
- iwl_pcie_write_prph_64_no_grab(trans,
- RFH_Q_FRBDCB_BA_LSB(i),
- trans_pcie->rxq[i].bd_dma);
+ iwl_write_prph64_no_grab(trans,
+ RFH_Q_FRBDCB_BA_LSB(i),
+ trans_pcie->rxq[i].bd_dma);
/* Tell device where to find RBD used table in DRAM */
- iwl_pcie_write_prph_64_no_grab(trans,
- RFH_Q_URBDCB_BA_LSB(i),
- trans_pcie->rxq[i].used_bd_dma);
+ iwl_write_prph64_no_grab(trans,
+ RFH_Q_URBDCB_BA_LSB(i),
+ trans_pcie->rxq[i].used_bd_dma);
/* Tell device where in DRAM to update its Rx status */
- iwl_pcie_write_prph_64_no_grab(trans,
- RFH_Q_URBD_STTS_WPTR_LSB(i),
- trans_pcie->rxq[i].rb_stts_dma);
+ iwl_write_prph64_no_grab(trans,
+ RFH_Q_URBD_STTS_WPTR_LSB(i),
+ trans_pcie->rxq[i].rb_stts_dma);
/* Reset device indice tables */
iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0);
iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0);
@@ -815,33 +829,32 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
enabled |= BIT(i) | BIT(i + 16);
}
- /* restock default queue */
- iwl_pcie_rxq_mq_restock(trans, &trans_pcie->rxq[0]);
-
/*
* Enable Rx DMA
- * Single frame mode
* Rx buffer size 4 or 8k or 12k
* Min RB size 4 or 8
* Drop frames that exceed RB size
* 512 RBDs
*/
iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG,
- RFH_DMA_EN_ENABLE_VAL |
- rb_size | RFH_RXF_DMA_SINGLE_FRAME_MASK |
+ RFH_DMA_EN_ENABLE_VAL | rb_size |
RFH_RXF_DMA_MIN_RB_4_8 |
RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
RFH_RXF_DMA_RBDCB_SIZE_512);
/*
* Activate DMA snooping.
- * Set RX DMA chunk size to 64B
+ * Set RX DMA chunk size to 64B for IOSF and 128B for PCIe
* Default queue is 0
*/
iwl_write_prph_no_grab(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP |
(DEFAULT_RXQ_NUM <<
RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS) |
- RFH_GEN_CFG_SERVICE_DMA_SNOOP);
+ RFH_GEN_CFG_SERVICE_DMA_SNOOP |
+ (trans->cfg->integrated ?
+ RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
+ RFH_GEN_CFG_RB_CHUNK_SIZE_128) <<
+ RFH_GEN_CFG_RB_CHUNK_SIZE_POS);
/* Enable the relevant rx queues */
iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled);
@@ -849,6 +862,8 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
/* Set interrupt coalescing timer to default (2048 usecs) */
iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
+
+ iwl_pcie_enable_rx_wake(trans, true);
}
static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
@@ -939,16 +954,18 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
else
list_add(&rxb->list, &def_rxq->rx_used);
trans_pcie->global_table[i] = rxb;
- rxb->vid = (u16)i;
+ rxb->vid = (u16)(i + 1);
+ rxb->invalid = true;
}
iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq);
- if (trans->cfg->mq_rx_supported) {
+
+ if (trans->cfg->mq_rx_supported)
iwl_pcie_rx_mq_hw_init(trans);
- } else {
- iwl_pcie_rxq_sq_restock(trans, def_rxq);
+ else
iwl_pcie_rx_hw_init(trans, def_rxq);
- }
+
+ iwl_pcie_rxq_restock(trans, def_rxq);
spin_lock(&def_rxq->lock);
iwl_pcie_rxq_inc_wr_ptr(trans, def_rxq);
@@ -1087,6 +1104,9 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID))
break;
+ WARN_ON((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
+ FH_RSCSR_RXQ_POS != rxq->id);
+
IWL_DEBUG_RX(trans,
"cmd at offset %d: %s (0x%.2x, seq 0x%x)\n",
rxcb._offset,
@@ -1224,10 +1244,19 @@ restart:
*/
u16 vid = le32_to_cpu(rxq->used_bd[i]) & 0x0FFF;
- if (WARN(vid >= ARRAY_SIZE(trans_pcie->global_table),
- "Invalid rxb index from HW %u\n", (u32)vid))
+ if (WARN(!vid ||
+ vid > ARRAY_SIZE(trans_pcie->global_table),
+ "Invalid rxb index from HW %u\n", (u32)vid)) {
+ iwl_force_nmi(trans);
goto out;
- rxb = trans_pcie->global_table[vid];
+ }
+ rxb = trans_pcie->global_table[vid - 1];
+ if (WARN(rxb->invalid,
+ "Invalid rxb from HW %u\n", (u32)vid)) {
+ iwl_force_nmi(trans);
+ goto out;
+ }
+ rxb->invalid = true;
} else {
rxb = rxq->queue[i];
rxq->queue[i] = NULL;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index d9f139462..74f2f035b 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -608,18 +608,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
/*
* ucode
*/
-static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
- dma_addr_t phy_addr, u32 byte_cnt)
+static void iwl_pcie_load_firmware_chunk_fh(struct iwl_trans *trans,
+ u32 dst_addr, dma_addr_t phy_addr,
+ u32 byte_cnt)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- unsigned long flags;
- int ret;
-
- trans_pcie->ucode_write_complete = false;
-
- if (!iwl_trans_grab_nic_access(trans, &flags))
- return -EIO;
-
iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
@@ -642,7 +634,50 @@ static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
+}
+
+static void iwl_pcie_load_firmware_chunk_tfh(struct iwl_trans *trans,
+ u32 dst_addr, dma_addr_t phy_addr,
+ u32 byte_cnt)
+{
+ /* Stop DMA channel */
+ iwl_write32(trans, TFH_SRV_DMA_CHNL0_CTRL, 0);
+
+ /* Configure SRAM address */
+ iwl_write32(trans, TFH_SRV_DMA_CHNL0_SRAM_ADDR,
+ dst_addr);
+
+ /* Configure DRAM address - 64 bit */
+ iwl_write64(trans, TFH_SRV_DMA_CHNL0_DRAM_ADDR, phy_addr);
+ /* Configure byte count to transfer */
+ iwl_write32(trans, TFH_SRV_DMA_CHNL0_BC, byte_cnt);
+
+ /* Enable the DRAM2SRAM to start */
+ iwl_write32(trans, TFH_SRV_DMA_CHNL0_CTRL, TFH_SRV_DMA_SNOOP |
+ TFH_SRV_DMA_TO_DRIVER |
+ TFH_SRV_DMA_START);
+}
+
+static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans,
+ u32 dst_addr, dma_addr_t phy_addr,
+ u32 byte_cnt)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ unsigned long flags;
+ int ret;
+
+ trans_pcie->ucode_write_complete = false;
+
+ if (!iwl_trans_grab_nic_access(trans, &flags))
+ return -EIO;
+
+ if (trans->cfg->use_tfh)
+ iwl_pcie_load_firmware_chunk_tfh(trans, dst_addr, phy_addr,
+ byte_cnt);
+ else
+ iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr,
+ byte_cnt);
iwl_trans_release_nic_access(trans, &flags);
ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
@@ -1285,6 +1320,8 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
iwl_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+ iwl_pcie_enable_rx_wake(trans, false);
+
if (reset) {
/*
* reset TX queues -- some of their registers reset during S3
@@ -1310,6 +1347,8 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
return 0;
}
+ iwl_pcie_enable_rx_wake(trans, true);
+
/*
* Also enables interrupts - none will happen as the device doesn't
* know we're waking it up, only when the opmode actually tells it
@@ -1388,8 +1427,12 @@ static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
max_rx_vector = trans_pcie->allocated_vector - 1;
- if (!trans_pcie->msix_enabled)
+ if (!trans_pcie->msix_enabled) {
+ if (trans->cfg->mq_rx_supported)
+ iwl_write_prph(trans, UREG_CHICK,
+ UREG_CHICK_MSI_ENABLE);
return;
+ }
iwl_write_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE);
@@ -1634,6 +1677,9 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
trans_pcie->scd_set_active = trans_cfg->scd_set_active;
trans_pcie->sw_csum_tx = trans_cfg->sw_csum_tx;
+ trans_pcie->page_offs = trans_cfg->cb_data_offs;
+ trans_pcie->dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);
+
trans->command_groups = trans_cfg->command_groups;
trans->command_groups_size = trans_cfg->command_groups_size;
@@ -1904,6 +1950,48 @@ static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
#define IWL_FLUSH_WAIT_MS 2000
+void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 scd_sram_addr;
+ u8 buf[16];
+ int cnt;
+
+ IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
+ txq->q.read_ptr, txq->q.write_ptr);
+
+ scd_sram_addr = trans_pcie->scd_base_addr +
+ SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
+ iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
+
+ iwl_print_hex_error(trans, buf, sizeof(buf));
+
+ for (cnt = 0; cnt < FH_TCSR_CHNL_NUM; cnt++)
+ IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", cnt,
+ iwl_read_direct32(trans, FH_TX_TRB_REG(cnt)));
+
+ for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
+ u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(cnt));
+ u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
+ bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
+ u32 tbl_dw =
+ iwl_trans_read_mem32(trans, trans_pcie->scd_base_addr +
+ SCD_TRANS_TBL_OFFSET_QUEUE(cnt));
+
+ if (cnt & 0x1)
+ tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
+ else
+ tbl_dw = tbl_dw & 0x0000FFFF;
+
+ IWL_ERR(trans,
+ "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
+ cnt, active ? "" : "in", fifo, tbl_dw,
+ iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) &
+ (TFD_QUEUE_SIZE_MAX - 1),
+ iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt)));
+ }
+}
+
static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1911,8 +1999,6 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
struct iwl_queue *q;
int cnt;
unsigned long now = jiffies;
- u32 scd_sram_addr;
- u8 buf[16];
int ret = 0;
/* waiting for all the tx frames complete might take a while */
@@ -1952,42 +2038,8 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", cnt);
}
- if (!ret)
- return 0;
-
- IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
- txq->q.read_ptr, txq->q.write_ptr);
-
- scd_sram_addr = trans_pcie->scd_base_addr +
- SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
- iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
-
- iwl_print_hex_error(trans, buf, sizeof(buf));
-
- for (cnt = 0; cnt < FH_TCSR_CHNL_NUM; cnt++)
- IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", cnt,
- iwl_read_direct32(trans, FH_TX_TRB_REG(cnt)));
-
- for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
- u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(cnt));
- u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
- bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
- u32 tbl_dw =
- iwl_trans_read_mem32(trans, trans_pcie->scd_base_addr +
- SCD_TRANS_TBL_OFFSET_QUEUE(cnt));
-
- if (cnt & 0x1)
- tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
- else
- tbl_dw = tbl_dw & 0x0000FFFF;
-
- IWL_ERR(trans,
- "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
- cnt, active ? "" : "in", fifo, tbl_dw,
- iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) &
- (TFD_QUEUE_SIZE_MAX - 1),
- iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt)));
- }
+ if (ret)
+ iwl_trans_pcie_log_scd_error(trans, txq);
return ret;
}
@@ -2736,6 +2788,8 @@ static const struct iwl_trans_ops trans_ops_pcie = {
.txq_disable = iwl_trans_pcie_txq_disable,
.txq_enable = iwl_trans_pcie_txq_enable,
+ .txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode,
+
.wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
.freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer,
.block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index d6beac9af..18650dccd 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -70,6 +70,7 @@
* Tx queue resumed.
*
***************************************************/
+
static int iwl_queue_space(const struct iwl_queue *q)
{
unsigned int max;
@@ -154,10 +155,6 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
struct iwl_txq *txq = (void *)data;
struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
- u32 scd_sram_addr = trans_pcie->scd_base_addr +
- SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
- u8 buf[16];
- int i;
spin_lock(&txq->lock);
/* check if triggered erroneously */
@@ -169,38 +166,8 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
jiffies_to_msecs(txq->wd_timeout));
- IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
- txq->q.read_ptr, txq->q.write_ptr);
-
- iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
-
- iwl_print_hex_error(trans, buf, sizeof(buf));
-
- for (i = 0; i < FH_TCSR_CHNL_NUM; i++)
- IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i,
- iwl_read_direct32(trans, FH_TX_TRB_REG(i)));
-
- for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
- u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i));
- u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
- bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
- u32 tbl_dw =
- iwl_trans_read_mem32(trans,
- trans_pcie->scd_base_addr +
- SCD_TRANS_TBL_OFFSET_QUEUE(i));
-
- if (i & 0x1)
- tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
- else
- tbl_dw = tbl_dw & 0x0000FFFF;
- IWL_ERR(trans,
- "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
- i, active ? "" : "in", fifo, tbl_dw,
- iwl_read_prph(trans, SCD_QUEUE_RDPTR(i)) &
- (TFD_QUEUE_SIZE_MAX - 1),
- iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
- }
+ iwl_trans_pcie_log_scd_error(trans, txq);
iwl_force_nmi(trans);
}
@@ -393,7 +360,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
return;
}
- /* first TB is never freed - it's the scratchbuf data */
+ /* first TB is never freed - it's the bidirectional DMA data */
for (i = 1; i < num_tbs; i++) {
if (meta->flags & BIT(i + CMD_TB_BITMAP_POS))
@@ -491,7 +458,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
- size_t scratchbuf_sz;
+ size_t tb0_buf_sz;
int i;
if (WARN_ON(txq->entries || txq->tfds))
@@ -526,17 +493,14 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
if (!txq->tfds)
goto error;
- BUILD_BUG_ON(IWL_HCMD_SCRATCHBUF_SIZE != sizeof(*txq->scratchbufs));
- BUILD_BUG_ON(offsetof(struct iwl_pcie_txq_scratch_buf, scratch) !=
- sizeof(struct iwl_cmd_header) +
- offsetof(struct iwl_tx_cmd, scratch));
+ BUILD_BUG_ON(IWL_FIRST_TB_SIZE_ALIGN != sizeof(*txq->first_tb_bufs));
- scratchbuf_sz = sizeof(*txq->scratchbufs) * slots_num;
+ tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num;
- txq->scratchbufs = dma_alloc_coherent(trans->dev, scratchbuf_sz,
- &txq->scratchbufs_dma,
+ txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz,
+ &txq->first_tb_dma,
GFP_KERNEL);
- if (!txq->scratchbufs)
+ if (!txq->first_tb_bufs)
goto err_free_tfds;
txq->q.id = txq_id;
@@ -578,22 +542,27 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
* Tell nic where to find circular buffer of Tx Frame Descriptors for
* given Tx queue, and enable the DMA channel used for that queue.
* Circular buffer (TFD queue in DRAM) physical base address */
- iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
- txq->q.dma_addr >> 8);
+ if (trans->cfg->use_tfh)
+ iwl_write_direct64(trans,
+ FH_MEM_CBBC_QUEUE(trans, txq_id),
+ txq->q.dma_addr);
+ else
+ iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id),
+ txq->q.dma_addr >> 8);
return 0;
}
-static void iwl_pcie_free_tso_page(struct sk_buff *skb)
+static void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
+ struct sk_buff *skb)
{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct page **page_ptr;
- if (info->driver_data[IWL_TRANS_FIRST_DRIVER_DATA]) {
- struct page *page =
- info->driver_data[IWL_TRANS_FIRST_DRIVER_DATA];
+ page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
- __free_page(page);
- info->driver_data[IWL_TRANS_FIRST_DRIVER_DATA] = NULL;
+ if (*page_ptr) {
+ __free_page(*page_ptr);
+ *page_ptr = NULL;
}
}
@@ -639,7 +608,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
if (WARN_ON_ONCE(!skb))
continue;
- iwl_pcie_free_tso_page(skb);
+ iwl_pcie_free_tso_page(trans_pcie, skb);
}
iwl_pcie_txq_free_tfd(trans, txq);
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr);
@@ -708,8 +677,8 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
txq->tfds = NULL;
dma_free_coherent(dev,
- sizeof(*txq->scratchbufs) * txq->q.n_window,
- txq->scratchbufs, txq->scratchbufs_dma);
+ sizeof(*txq->first_tb_bufs) * txq->q.n_window,
+ txq->first_tb_bufs, txq->first_tb_dma);
}
kfree(txq->entries);
@@ -786,9 +755,14 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
txq_id++) {
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
-
- iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
- txq->q.dma_addr >> 8);
+ if (trans->cfg->use_tfh)
+ iwl_write_direct64(trans,
+ FH_MEM_CBBC_QUEUE(trans, txq_id),
+ txq->q.dma_addr);
+ else
+ iwl_write_direct32(trans,
+ FH_MEM_CBBC_QUEUE(trans, txq_id),
+ txq->q.dma_addr >> 8);
iwl_pcie_txq_unmap(trans, txq_id);
txq->q.read_ptr = 0;
txq->q.write_ptr = 0;
@@ -996,6 +970,12 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
}
}
+ if (trans->cfg->use_tfh)
+ iwl_write_direct32(trans, TFH_TRANSFER_MODE,
+ TFH_TRANSFER_MAX_PENDING_REQ |
+ TFH_CHUNK_SIZE_128 |
+ TFH_CHUNK_SPLIT_MODE);
+
iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
if (trans->cfg->base_params->num_of_queues > 20)
iwl_set_bits_prph(trans, SCD_GP_CTRL,
@@ -1084,7 +1064,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
if (WARN_ON_ONCE(!skb))
continue;
- iwl_pcie_free_tso_page(skb);
+ iwl_pcie_free_tso_page(trans_pcie, skb);
__skb_queue_tail(skbs, skb);
@@ -1115,17 +1095,17 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
while (!skb_queue_empty(&overflow_skbs)) {
struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- u8 dev_cmd_idx = IWL_TRANS_FIRST_DRIVER_DATA + 1;
- struct iwl_device_cmd *dev_cmd =
- info->driver_data[dev_cmd_idx];
+ struct iwl_device_cmd *dev_cmd_ptr;
+
+ dev_cmd_ptr = *(void **)((u8 *)skb->cb +
+ trans_pcie->dev_cmd_offs);
/*
* Note that we can very well be overflowing again.
* In that case, iwl_queue_space will be small again
* and we won't wake mac80211's queue.
*/
- iwl_trans_pcie_tx(trans, skb, dev_cmd, txq_id);
+ iwl_trans_pcie_tx(trans, skb, dev_cmd_ptr, txq_id);
}
spin_lock_bh(&txq->lock);
@@ -1354,6 +1334,15 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
txq->active = true;
}
+void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
+ bool shared_mode)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+
+ txq->ampdu = !shared_mode;
+}
+
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
bool configure_scd)
{
@@ -1413,7 +1402,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
void *dup_buf = NULL;
dma_addr_t phys_addr;
int idx;
- u16 copy_size, cmd_size, scratch_size;
+ u16 copy_size, cmd_size, tb0_size;
bool had_nocopy = false;
u8 group_id = iwl_cmd_groupid(cmd->id);
int i, ret;
@@ -1444,9 +1433,9 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
if (!cmd->len[i])
continue;
- /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
- if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
- int copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
+ /* need at least IWL_FIRST_TB_SIZE copied */
+ if (copy_size < IWL_FIRST_TB_SIZE) {
+ int copy = IWL_FIRST_TB_SIZE - copy_size;
if (copy > cmdlen[i])
copy = cmdlen[i];
@@ -1567,8 +1556,8 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
}
/*
- * Otherwise we need at least IWL_HCMD_SCRATCHBUF_SIZE copied
- * in total (for the scratchbuf handling), but copy up to what
+ * Otherwise we need at least IWL_FIRST_TB_SIZE copied
+ * in total (for bi-directional DMA), but copy up to what
* we can fit into the payload for debug dump purposes.
*/
copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
@@ -1577,8 +1566,8 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
cmd_pos += copy;
/* However, treat copy_size the proper way, we need it below */
- if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
- copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
+ if (copy_size < IWL_FIRST_TB_SIZE) {
+ copy = IWL_FIRST_TB_SIZE - copy_size;
if (copy > cmd->len[i])
copy = cmd->len[i];
@@ -1593,18 +1582,18 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
le16_to_cpu(out_cmd->hdr.sequence),
cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
- /* start the TFD with the scratchbuf */
- scratch_size = min_t(int, copy_size, IWL_HCMD_SCRATCHBUF_SIZE);
- memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size);
+ /* start the TFD with the minimum copy bytes */
+ tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
+ memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size);
iwl_pcie_txq_build_tfd(trans, txq,
- iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr),
- scratch_size, true);
+ iwl_pcie_get_first_tb_dma(txq, idx),
+ tb0_size, true);
/* map first command fragment, if any remains */
- if (copy_size > scratch_size) {
+ if (copy_size > tb0_size) {
phys_addr = dma_map_single(trans->dev,
- ((u8 *)&out_cmd->hdr) + scratch_size,
- copy_size - scratch_size,
+ ((u8 *)&out_cmd->hdr) + tb0_size,
+ copy_size - tb0_size,
DMA_TO_DEVICE);
if (dma_mapping_error(trans->dev, phys_addr)) {
iwl_pcie_tfd_unmap(trans, out_meta,
@@ -1614,7 +1603,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
}
iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
- copy_size - scratch_size, false);
+ copy_size - tb0_size, false);
}
/* map the remaining (adjusted) nocopy/dup fragments */
@@ -1959,7 +1948,7 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
trace_iwlwifi_dev_tx(trans->dev, skb,
&txq->tfds[txq->q.write_ptr],
sizeof(struct iwl_tfd),
- &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len,
+ &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
skb->data + hdr_len, tb2_len);
trace_iwlwifi_dev_tx_data(trans->dev, skb,
hdr_len, skb->len - hdr_len);
@@ -2015,7 +2004,6 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_cmd_meta *out_meta,
struct iwl_device_cmd *dev_cmd, u16 tb1_len)
{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
@@ -2024,6 +2012,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
u16 length, iv_len, amsdu_pad;
u8 *start_hdr;
struct iwl_tso_hdr_page *hdr_page;
+ struct page **page_ptr;
int ret;
struct tso_t tso;
@@ -2035,7 +2024,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
trace_iwlwifi_dev_tx(trans->dev, skb,
&txq->tfds[txq->q.write_ptr],
sizeof(struct iwl_tfd),
- &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len,
+ &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
NULL, 0);
ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
@@ -2054,7 +2043,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
get_page(hdr_page->page);
start_hdr = hdr_page->pos;
- info->driver_data[IWL_TRANS_FIRST_DRIVER_DATA] = hdr_page->page;
+ page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
+ *page_ptr = hdr_page->page;
memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
hdr_page->pos += iv_len;
@@ -2264,10 +2254,12 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
/* don't put the packet on the ring, if there is no room */
if (unlikely(iwl_queue_space(q) < 3)) {
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct iwl_device_cmd **dev_cmd_ptr;
+
+ dev_cmd_ptr = (void *)((u8 *)skb->cb +
+ trans_pcie->dev_cmd_offs);
- info->driver_data[IWL_TRANS_FIRST_DRIVER_DATA + 1] =
- dev_cmd;
+ *dev_cmd_ptr = dev_cmd;
__skb_queue_tail(&txq->overflow_q, skb);
spin_unlock(&txq->lock);
@@ -2294,7 +2286,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
INDEX_TO_SEQ(q->write_ptr)));
- tb0_phys = iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr);
+ tb0_phys = iwl_pcie_get_first_tb_dma(txq, q->write_ptr);
scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
offsetof(struct iwl_tx_cmd, scratch);
@@ -2312,7 +2304,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
* setup of the first TB)
*/
len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
- hdr_len - IWL_HCMD_SCRATCHBUF_SIZE;
+ hdr_len - IWL_FIRST_TB_SIZE;
/* do not align A-MSDU to dword as the subframe header aligns it */
amsdu = ieee80211_is_data_qos(fc) &&
(*ieee80211_get_qos_ctl(hdr) &
@@ -2326,17 +2318,17 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
tb1_len = len;
}
- /* The first TB points to the scratchbuf data - min_copy bytes */
- memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr,
- IWL_HCMD_SCRATCHBUF_SIZE);
+ /* The first TB points to bi-directional DMA data */
+ memcpy(&txq->first_tb_bufs[q->write_ptr], &dev_cmd->hdr,
+ IWL_FIRST_TB_SIZE);
iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
- IWL_HCMD_SCRATCHBUF_SIZE, true);
+ IWL_FIRST_TB_SIZE, true);
/* there must be data left over for TB1 or this code must be changed */
- BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_HCMD_SCRATCHBUF_SIZE);
+ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE);
/* map the data for TB1 */
- tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_HCMD_SCRATCHBUF_SIZE;
+ tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
goto out_err;
diff --git a/drivers/net/wireless/intersil/orinoco/scan.c b/drivers/net/wireless/intersil/orinoco/scan.c
index d0ceb06c7..6d1d08485 100644
--- a/drivers/net/wireless/intersil/orinoco/scan.c
+++ b/drivers/net/wireless/intersil/orinoco/scan.c
@@ -237,7 +237,11 @@ void orinoco_add_hostscan_results(struct orinoco_private *priv,
scan_abort:
if (priv->scan_request) {
- cfg80211_scan_done(priv->scan_request, abort);
+ struct cfg80211_scan_info info = {
+ .aborted = abort,
+ };
+
+ cfg80211_scan_done(priv->scan_request, &info);
priv->scan_request = NULL;
}
}
@@ -245,7 +249,11 @@ void orinoco_add_hostscan_results(struct orinoco_private *priv,
void orinoco_scan_done(struct orinoco_private *priv, bool abort)
{
if (priv->scan_request) {
- cfg80211_scan_done(priv->scan_request, abort);
+ struct cfg80211_scan_info info = {
+ .aborted = abort,
+ };
+
+ cfg80211_scan_done(priv->scan_request, &info);
priv->scan_request = NULL;
}
}
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 4dd5adcdd..8c35ac838 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -30,6 +30,8 @@
#include <linux/module.h>
#include <linux/ktime.h>
#include <net/genetlink.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
#include "mac80211_hwsim.h"
#define WARN_QUEUE 100
@@ -39,8 +41,6 @@ MODULE_AUTHOR("Jouni Malinen");
MODULE_DESCRIPTION("Software simulator of 802.11 radio(s) for mac80211");
MODULE_LICENSE("GPL");
-static u32 wmediumd_portid;
-
static int radios = 2;
module_param(radios, int, 0444);
MODULE_PARM_DESC(radios, "Number of simulated radios");
@@ -250,6 +250,43 @@ static inline void hwsim_clear_chanctx_magic(struct ieee80211_chanctx_conf *c)
cp->magic = 0;
}
+static int hwsim_net_id;
+
+static int hwsim_netgroup;
+
+struct hwsim_net {
+ int netgroup;
+ u32 wmediumd;
+};
+
+static inline int hwsim_net_get_netgroup(struct net *net)
+{
+ struct hwsim_net *hwsim_net = net_generic(net, hwsim_net_id);
+
+ return hwsim_net->netgroup;
+}
+
+static inline void hwsim_net_set_netgroup(struct net *net)
+{
+ struct hwsim_net *hwsim_net = net_generic(net, hwsim_net_id);
+
+ hwsim_net->netgroup = hwsim_netgroup++;
+}
+
+static inline u32 hwsim_net_get_wmediumd(struct net *net)
+{
+ struct hwsim_net *hwsim_net = net_generic(net, hwsim_net_id);
+
+ return hwsim_net->wmediumd;
+}
+
+static inline void hwsim_net_set_wmediumd(struct net *net, u32 portid)
+{
+ struct hwsim_net *hwsim_net = net_generic(net, hwsim_net_id);
+
+ hwsim_net->wmediumd = portid;
+}
+
static struct class *hwsim_class;
static struct net_device *hwsim_mon; /* global monitor netdev */
@@ -420,10 +457,6 @@ static const struct ieee80211_iface_limit hwsim_if_limits[] = {
{ .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) }
};
-static const struct ieee80211_iface_limit hwsim_if_dfs_limits[] = {
- { .max = 8, .types = BIT(NL80211_IFTYPE_AP) },
-};
-
static const struct ieee80211_iface_combination hwsim_if_comb[] = {
{
.limits = hwsim_if_limits,
@@ -431,18 +464,12 @@ static const struct ieee80211_iface_combination hwsim_if_comb[] = {
.n_limits = ARRAY_SIZE(hwsim_if_limits) - 1,
.max_interfaces = 2048,
.num_different_channels = 1,
- },
- {
- .limits = hwsim_if_dfs_limits,
- .n_limits = ARRAY_SIZE(hwsim_if_dfs_limits),
- .max_interfaces = 8,
- .num_different_channels = 1,
.radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
BIT(NL80211_CHAN_WIDTH_20) |
BIT(NL80211_CHAN_WIDTH_40) |
BIT(NL80211_CHAN_WIDTH_80) |
BIT(NL80211_CHAN_WIDTH_160),
- }
+ },
};
static const struct ieee80211_iface_combination hwsim_if_comb_p2p_dev[] = {
@@ -451,18 +478,12 @@ static const struct ieee80211_iface_combination hwsim_if_comb_p2p_dev[] = {
.n_limits = ARRAY_SIZE(hwsim_if_limits),
.max_interfaces = 2048,
.num_different_channels = 1,
- },
- {
- .limits = hwsim_if_dfs_limits,
- .n_limits = ARRAY_SIZE(hwsim_if_dfs_limits),
- .max_interfaces = 8,
- .num_different_channels = 1,
.radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
BIT(NL80211_CHAN_WIDTH_20) |
BIT(NL80211_CHAN_WIDTH_40) |
BIT(NL80211_CHAN_WIDTH_80) |
BIT(NL80211_CHAN_WIDTH_160),
- }
+ },
};
static spinlock_t hwsim_radio_lock;
@@ -526,6 +547,11 @@ struct mac80211_hwsim_data {
*/
u64 group;
+ /* group shared by radios created in the same netns */
+ int netgroup;
+ /* wmediumd portid responsible for netgroup of this radio */
+ u32 wmediumd;
+
int power_level;
/* difference between this hw's clock and the real clock, in usecs */
@@ -568,6 +594,7 @@ static struct genl_family hwsim_genl_family = {
.name = "MAC80211_HWSIM",
.version = 1,
.maxattr = HWSIM_ATTR_MAX,
+ .netnsok = true,
};
enum hwsim_multicast_groups {
@@ -955,6 +982,29 @@ static bool hwsim_ps_rx_ok(struct mac80211_hwsim_data *data,
return true;
}
+static int hwsim_unicast_netgroup(struct mac80211_hwsim_data *data,
+ struct sk_buff *skb, int portid)
+{
+ struct net *net;
+ bool found = false;
+ int res = -ENOENT;
+
+ rcu_read_lock();
+ for_each_net_rcu(net) {
+ if (data->netgroup == hwsim_net_get_netgroup(net)) {
+ res = genlmsg_unicast(net, skb, portid);
+ found = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ if (!found)
+ nlmsg_free(skb);
+
+ return res;
+}
+
static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
struct sk_buff *my_skb,
int dst_portid)
@@ -1034,7 +1084,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
goto nla_put_failure;
genlmsg_end(skb, msg_head);
- if (genlmsg_unicast(&init_net, skb, dst_portid))
+ if (hwsim_unicast_netgroup(data, skb, dst_portid))
goto err_free_txskb;
/* Enqueue the packet */
@@ -1202,6 +1252,9 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
if (!(data->group & data2->group))
continue;
+ if (data->netgroup != data2->netgroup)
+ continue;
+
if (!hwsim_chans_compat(chan, data2->tmp_chan) &&
!hwsim_chans_compat(chan, data2->channel)) {
ieee80211_iterate_active_interfaces_atomic(
@@ -1324,7 +1377,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
mac80211_hwsim_monitor_rx(hw, skb, channel);
/* wmediumd mode check */
- _portid = ACCESS_ONCE(wmediumd_portid);
+ _portid = ACCESS_ONCE(data->wmediumd);
if (_portid)
return mac80211_hwsim_tx_frame_nl(hw, skb, _portid);
@@ -1420,7 +1473,8 @@ static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
struct sk_buff *skb,
struct ieee80211_channel *chan)
{
- u32 _pid = ACCESS_ONCE(wmediumd_portid);
+ struct mac80211_hwsim_data *data = hw->priv;
+ u32 _pid = ACCESS_ONCE(data->wmediumd);
if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE)) {
struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
@@ -1887,8 +1941,12 @@ static void hw_scan_work(struct work_struct *work)
mutex_lock(&hwsim->mutex);
if (hwsim->scan_chan_idx >= req->n_channels) {
+ struct cfg80211_scan_info info = {
+ .aborted = false,
+ };
+
wiphy_debug(hwsim->hw->wiphy, "hw scan complete\n");
- ieee80211_scan_completed(hwsim->hw, false);
+ ieee80211_scan_completed(hwsim->hw, &info);
hwsim->hw_scan_request = NULL;
hwsim->hw_scan_vif = NULL;
hwsim->tmp_chan = NULL;
@@ -1973,13 +2031,16 @@ static void mac80211_hwsim_cancel_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct mac80211_hwsim_data *hwsim = hw->priv;
+ struct cfg80211_scan_info info = {
+ .aborted = true,
+ };
wiphy_debug(hw->wiphy, "hwsim cancel_hw_scan\n");
cancel_delayed_work_sync(&hwsim->hw_scan);
mutex_lock(&hwsim->mutex);
- ieee80211_scan_completed(hwsim->hw, true);
+ ieee80211_scan_completed(hwsim->hw, &info);
hwsim->tmp_chan = NULL;
hwsim->hw_scan_request = NULL;
hwsim->hw_scan_vif = NULL;
@@ -2349,6 +2410,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
struct ieee80211_hw *hw;
enum nl80211_band band;
const struct ieee80211_ops *ops = &mac80211_hwsim_ops;
+ struct net *net;
int idx;
if (WARN_ON(param->channels > 1 && !param->use_chanctx))
@@ -2366,6 +2428,13 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
err = -ENOMEM;
goto failed;
}
+
+ if (info)
+ net = genl_info_net(info);
+ else
+ net = &init_net;
+ wiphy_net_set(hw->wiphy, net);
+
data = hw->priv;
data->hw = hw;
@@ -2409,13 +2478,14 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
hw->wiphy->max_scan_ssids = 255;
hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
hw->wiphy->max_remain_on_channel_duration = 1000;
- /* For channels > 1 DFS is not allowed */
- hw->wiphy->n_iface_combinations = 1;
hw->wiphy->iface_combinations = &data->if_combination;
if (param->p2p_device)
data->if_combination = hwsim_if_comb_p2p_dev[0];
else
data->if_combination = hwsim_if_comb[0];
+ hw->wiphy->n_iface_combinations = 1;
+ /* For channels > 1 DFS is not allowed */
+ data->if_combination.radar_detect_widths = 0;
data->if_combination.num_different_channels = data->channels;
} else if (param->p2p_device) {
hw->wiphy->iface_combinations = hwsim_if_comb_p2p_dev;
@@ -2541,6 +2611,8 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
data->group = 1;
mutex_init(&data->mutex);
+ data->netgroup = hwsim_net_get_netgroup(net);
+
/* Enable frame retransmissions for lossy channels */
hw->max_rates = 4;
hw->max_rate_tries = 11;
@@ -2755,6 +2827,20 @@ static struct mac80211_hwsim_data *get_hwsim_data_ref_from_addr(const u8 *addr)
return data;
}
+static void hwsim_register_wmediumd(struct net *net, u32 portid)
+{
+ struct mac80211_hwsim_data *data;
+
+ hwsim_net_set_wmediumd(net, portid);
+
+ spin_lock_bh(&hwsim_radio_lock);
+ list_for_each_entry(data, &hwsim_radios, list) {
+ if (data->netgroup == hwsim_net_get_netgroup(net))
+ data->wmediumd = portid;
+ }
+ spin_unlock_bh(&hwsim_radio_lock);
+}
+
static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
struct genl_info *info)
{
@@ -2770,9 +2856,6 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
int i;
bool found = false;
- if (info->snd_portid != wmediumd_portid)
- return -EINVAL;
-
if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] ||
!info->attrs[HWSIM_ATTR_FLAGS] ||
!info->attrs[HWSIM_ATTR_COOKIE] ||
@@ -2788,6 +2871,12 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
if (!data2)
goto out;
+ if (hwsim_net_get_netgroup(genl_info_net(info)) != data2->netgroup)
+ goto out;
+
+ if (info->snd_portid != data2->wmediumd)
+ goto out;
+
/* look for the skb matching the cookie passed back from user */
skb_queue_walk_safe(&data2->pending, skb, tmp) {
u64 skb_cookie;
@@ -2851,9 +2940,6 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
void *frame_data;
struct sk_buff *skb = NULL;
- if (info->snd_portid != wmediumd_portid)
- return -EINVAL;
-
if (!info->attrs[HWSIM_ATTR_ADDR_RECEIVER] ||
!info->attrs[HWSIM_ATTR_FRAME] ||
!info->attrs[HWSIM_ATTR_RX_RATE] ||
@@ -2879,6 +2965,12 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
if (!data2)
goto out;
+ if (hwsim_net_get_netgroup(genl_info_net(info)) != data2->netgroup)
+ goto out;
+
+ if (info->snd_portid != data2->wmediumd)
+ goto out;
+
/* check if radio is configured properly */
if (data2->idle || !data2->started)
@@ -2925,6 +3017,7 @@ out:
static int hwsim_register_received_nl(struct sk_buff *skb_2,
struct genl_info *info)
{
+ struct net *net = genl_info_net(info);
struct mac80211_hwsim_data *data;
int chans = 1;
@@ -2941,10 +3034,10 @@ static int hwsim_register_received_nl(struct sk_buff *skb_2,
if (chans > 1)
return -EOPNOTSUPP;
- if (wmediumd_portid)
+ if (hwsim_net_get_wmediumd(net))
return -EBUSY;
- wmediumd_portid = info->snd_portid;
+ hwsim_register_wmediumd(net, info->snd_portid);
printk(KERN_DEBUG "mac80211_hwsim: received a REGISTER, "
"switching to wmediumd mode with pid %d\n", info->snd_portid);
@@ -3014,6 +3107,9 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
continue;
}
+ if (!net_eq(wiphy_net(data->hw->wiphy), genl_info_net(info)))
+ continue;
+
list_del(&data->list);
spin_unlock_bh(&hwsim_radio_lock);
mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy),
@@ -3040,6 +3136,9 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info)
if (data->idx != idx)
continue;
+ if (!net_eq(wiphy_net(data->hw->wiphy), genl_info_net(info)))
+ continue;
+
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!skb) {
res = -ENOMEM;
@@ -3079,6 +3178,9 @@ static int hwsim_dump_radio_nl(struct sk_buff *skb,
if (data->idx < idx)
continue;
+ if (!net_eq(wiphy_net(data->hw->wiphy), sock_net(skb->sk)))
+ continue;
+
res = mac80211_hwsim_get_radio(skb, data,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, cb,
@@ -3102,7 +3204,7 @@ static const struct genl_ops hwsim_ops[] = {
.cmd = HWSIM_CMD_REGISTER,
.policy = hwsim_genl_policy,
.doit = hwsim_register_received_nl,
- .flags = GENL_ADMIN_PERM,
+ .flags = GENL_UNS_ADMIN_PERM,
},
{
.cmd = HWSIM_CMD_FRAME,
@@ -3118,13 +3220,13 @@ static const struct genl_ops hwsim_ops[] = {
.cmd = HWSIM_CMD_NEW_RADIO,
.policy = hwsim_genl_policy,
.doit = hwsim_new_radio_nl,
- .flags = GENL_ADMIN_PERM,
+ .flags = GENL_UNS_ADMIN_PERM,
},
{
.cmd = HWSIM_CMD_DEL_RADIO,
.policy = hwsim_genl_policy,
.doit = hwsim_del_radio_nl,
- .flags = GENL_ADMIN_PERM,
+ .flags = GENL_UNS_ADMIN_PERM,
},
{
.cmd = HWSIM_CMD_GET_RADIO,
@@ -3168,10 +3270,10 @@ static int mac80211_hwsim_netlink_notify(struct notifier_block *nb,
remove_user_radios(notify->portid);
- if (notify->portid == wmediumd_portid) {
+ if (notify->portid == hwsim_net_get_wmediumd(notify->net)) {
printk(KERN_INFO "mac80211_hwsim: wmediumd released netlink"
" socket, switching to perfect channel medium\n");
- wmediumd_portid = 0;
+ hwsim_register_wmediumd(notify->net, 0);
}
return NOTIFY_DONE;
@@ -3206,6 +3308,40 @@ failure:
return -EINVAL;
}
+static __net_init int hwsim_init_net(struct net *net)
+{
+ hwsim_net_set_netgroup(net);
+
+ return 0;
+}
+
+static void __net_exit hwsim_exit_net(struct net *net)
+{
+ struct mac80211_hwsim_data *data, *tmp;
+
+ spin_lock_bh(&hwsim_radio_lock);
+ list_for_each_entry_safe(data, tmp, &hwsim_radios, list) {
+ if (!net_eq(wiphy_net(data->hw->wiphy), net))
+ continue;
+
+ /* Radios created in init_net are returned to init_net. */
+ if (data->netgroup == hwsim_net_get_netgroup(&init_net))
+ continue;
+
+ list_del(&data->list);
+ INIT_WORK(&data->destroy_work, destroy_radio);
+ schedule_work(&data->destroy_work);
+ }
+ spin_unlock_bh(&hwsim_radio_lock);
+}
+
+static struct pernet_operations hwsim_net_ops = {
+ .init = hwsim_init_net,
+ .exit = hwsim_exit_net,
+ .id = &hwsim_net_id,
+ .size = sizeof(struct hwsim_net),
+};
+
static void hwsim_exit_netlink(void)
{
/* unregister the notifier */
@@ -3242,10 +3378,14 @@ static int __init init_mac80211_hwsim(void)
spin_lock_init(&hwsim_radio_lock);
INIT_LIST_HEAD(&hwsim_radios);
- err = platform_driver_register(&mac80211_hwsim_driver);
+ err = register_pernet_device(&hwsim_net_ops);
if (err)
return err;
+ err = platform_driver_register(&mac80211_hwsim_driver);
+ if (err)
+ goto out_unregister_pernet;
+
hwsim_class = class_create(THIS_MODULE, "mac80211_hwsim");
if (IS_ERR(hwsim_class)) {
err = PTR_ERR(hwsim_class);
@@ -3363,6 +3503,8 @@ out_free_radios:
mac80211_hwsim_free();
out_unregister_driver:
platform_driver_unregister(&mac80211_hwsim_driver);
+out_unregister_pernet:
+ unregister_pernet_device(&hwsim_net_ops);
return err;
}
module_init(init_mac80211_hwsim);
@@ -3376,5 +3518,6 @@ static void __exit exit_mac80211_hwsim(void)
mac80211_hwsim_free();
unregister_netdev(hwsim_mon);
platform_driver_unregister(&mac80211_hwsim_driver);
+ unregister_pernet_device(&hwsim_net_ops);
}
module_exit(exit_mac80211_hwsim);
diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c
index 776b44bfd..7ff2efadc 100644
--- a/drivers/net/wireless/marvell/libertas/cfg.c
+++ b/drivers/net/wireless/marvell/libertas/cfg.c
@@ -796,10 +796,15 @@ void lbs_scan_done(struct lbs_private *priv)
{
WARN_ON(!priv->scan_req);
- if (priv->internal_scan)
+ if (priv->internal_scan) {
kfree(priv->scan_req);
- else
- cfg80211_scan_done(priv->scan_req, false);
+ } else {
+ struct cfg80211_scan_info info = {
+ .aborted = false,
+ };
+
+ cfg80211_scan_done(priv->scan_req, &info);
+ }
priv->scan_req = NULL;
}
@@ -2039,8 +2044,8 @@ static int lbs_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
-int lbs_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
- bool enabled, int timeout)
+static int lbs_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ bool enabled, int timeout)
{
struct lbs_private *priv = wiphy_priv(wiphy);
diff --git a/drivers/net/wireless/marvell/libertas/cmdresp.c b/drivers/net/wireless/marvell/libertas/cmdresp.c
index c95bf6dc9..c753e36c2 100644
--- a/drivers/net/wireless/marvell/libertas/cmdresp.c
+++ b/drivers/net/wireless/marvell/libertas/cmdresp.c
@@ -27,6 +27,8 @@
void lbs_mac_event_disconnected(struct lbs_private *priv,
bool locally_generated)
{
+ unsigned long flags;
+
if (priv->connect_status != LBS_CONNECTED)
return;
@@ -46,9 +48,11 @@ void lbs_mac_event_disconnected(struct lbs_private *priv,
netif_carrier_off(priv->dev);
/* Free Tx and Rx packets */
+ spin_lock_irqsave(&priv->driver_lock, flags);
kfree_skb(priv->currenttxskb);
priv->currenttxskb = NULL;
priv->tx_pending_len = 0;
+ spin_unlock_irqrestore(&priv->driver_lock, flags);
priv->connect_status = LBS_DISCONNECTED;
diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c
index 5b6cf1a7b..7fd8b68e5 100644
--- a/drivers/net/wireless/marvell/libertas/if_sdio.c
+++ b/drivers/net/wireless/marvell/libertas/if_sdio.c
@@ -1215,7 +1215,7 @@ static int if_sdio_probe(struct sdio_func *func,
}
spin_lock_init(&card->lock);
- card->workqueue = create_workqueue("libertas_sdio");
+ card->workqueue = alloc_workqueue("libertas_sdio", WQ_MEM_RECLAIM, 0);
INIT_WORK(&card->packet_worker, if_sdio_host_to_card_worker);
init_waitqueue_head(&card->pwron_waitq);
@@ -1313,7 +1313,6 @@ static void if_sdio_remove(struct sdio_func *func)
lbs_stop_card(card->priv);
lbs_remove_card(card->priv);
- flush_workqueue(card->workqueue);
destroy_workqueue(card->workqueue);
while (card->packets) {
diff --git a/drivers/net/wireless/marvell/libertas/if_spi.c b/drivers/net/wireless/marvell/libertas/if_spi.c
index d51293895..e1ee7e68e 100644
--- a/drivers/net/wireless/marvell/libertas/if_spi.c
+++ b/drivers/net/wireless/marvell/libertas/if_spi.c
@@ -1172,7 +1172,7 @@ static int if_spi_probe(struct spi_device *spi)
priv->fw_ready = 1;
/* Initialize interrupt handling stuff. */
- card->workqueue = create_workqueue("libertas_spi");
+ card->workqueue = alloc_workqueue("libertas_spi", WQ_MEM_RECLAIM, 0);
INIT_WORK(&card->packet_work, if_spi_host_to_card_worker);
INIT_WORK(&card->resume_work, if_spi_resume_worker);
@@ -1200,7 +1200,6 @@ static int if_spi_probe(struct spi_device *spi)
release_irq:
free_irq(spi->irq, card);
terminate_workqueue:
- flush_workqueue(card->workqueue);
destroy_workqueue(card->workqueue);
lbs_remove_card(priv); /* will call free_netdev */
free_card:
@@ -1227,7 +1226,6 @@ static int libertas_spi_remove(struct spi_device *spi)
lbs_remove_card(priv); /* will call free_netdev */
free_irq(spi->irq, card);
- flush_workqueue(card->workqueue);
destroy_workqueue(card->workqueue);
if (card->pdata->teardown)
card->pdata->teardown(spi);
diff --git a/drivers/net/wireless/marvell/libertas_tf/main.c b/drivers/net/wireless/marvell/libertas_tf/main.c
index 0bf8916a0..54e426c1e 100644
--- a/drivers/net/wireless/marvell/libertas_tf/main.c
+++ b/drivers/net/wireless/marvell/libertas_tf/main.c
@@ -16,7 +16,6 @@
#include <linux/module.h>
#include "libertas_tf.h"
-#define DRIVER_RELEASE_VERSION "004.p0"
/* thinfirm version: 5.132.X.pX */
#define LBTF_FW_VER_MIN 0x05840300
#define LBTF_FW_VER_MAX 0x0584ffff
@@ -27,12 +26,6 @@ unsigned int lbtf_debug;
EXPORT_SYMBOL_GPL(lbtf_debug);
module_param_named(libertas_tf_debug, lbtf_debug, int, 0644);
-static const char lbtf_driver_version[] = "THINFIRM-USB8388-" DRIVER_RELEASE_VERSION
-#ifdef DEBUG
- "-dbg"
-#endif
- "";
-
struct workqueue_struct *lbtf_wq;
static const struct ieee80211_channel lbtf_channels[] = {
@@ -742,7 +735,7 @@ EXPORT_SYMBOL_GPL(lbtf_bcn_sent);
static int __init lbtf_init_module(void)
{
lbtf_deb_enter(LBTF_DEB_MAIN);
- lbtf_wq = create_workqueue("libertastf");
+ lbtf_wq = alloc_workqueue("libertastf", WQ_MEM_RECLAIM, 0);
if (lbtf_wq == NULL) {
printk(KERN_ERR "libertastf: couldn't create workqueue\n");
return -ENOMEM;
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_aggr.c b/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
index 1efef3b82..c47d63668 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
@@ -184,7 +184,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
tx_info_src = MWIFIEX_SKB_TXCB(skb_src);
skb_aggr = mwifiex_alloc_dma_align_buf(adapter->tx_buf_size,
- GFP_ATOMIC | GFP_DMA);
+ GFP_ATOMIC);
if (!skb_aggr) {
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
ra_list_flags);
@@ -205,7 +205,8 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
do {
/* Check if AMSDU can accommodate this MSDU */
- if (skb_tailroom(skb_aggr) < (skb_src->len + LLC_SNAP_LEN))
+ if ((skb_aggr->len + skb_src->len + LLC_SNAP_LEN) >
+ adapter->tx_buf_size)
break;
skb_src = skb_dequeue(&pra_list->skb_head);
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index ff948a922..a8ff969c9 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -377,6 +377,29 @@ mwifiex_cfg80211_set_tx_power(struct wiphy *wiphy,
}
/*
+ * CFG802.11 operation handler to get Tx power.
+ */
+static int
+mwifiex_cfg80211_get_tx_power(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ int *dbm)
+{
+ struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
+ struct mwifiex_private *priv = mwifiex_get_priv(adapter,
+ MWIFIEX_BSS_ROLE_ANY);
+ int ret = mwifiex_send_cmd(priv, HostCmd_CMD_RF_TX_PWR,
+ HostCmd_ACT_GEN_GET, 0, NULL, true);
+
+ if (ret < 0)
+ return ret;
+
+ /* tx_power_level is set in HostCmd_CMD_RF_TX_PWR command handler */
+ *dbm = priv->tx_power_level;
+
+ return 0;
+}
+
+/*
* CFG802.11 operation handler to set Power Save option.
*
* The timeout value, if provided, is currently ignored.
@@ -1672,6 +1695,9 @@ static int mwifiex_cfg80211_change_beacon(struct wiphy *wiphy,
struct cfg80211_beacon_data *data)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+ struct mwifiex_adapter *adapter = priv->adapter;
+
+ mwifiex_cancel_scan(adapter);
if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_UAP) {
mwifiex_dbg(priv->adapter, ERROR,
@@ -1804,6 +1830,21 @@ mwifiex_cfg80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant)
HostCmd_ACT_GEN_SET, 0, &ant_cfg, true);
}
+static int
+mwifiex_cfg80211_get_antenna(struct wiphy *wiphy, u32 *tx_ant, u32 *rx_ant)
+{
+ struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
+ struct mwifiex_private *priv = mwifiex_get_priv(adapter,
+ MWIFIEX_BSS_ROLE_ANY);
+ mwifiex_send_cmd(priv, HostCmd_CMD_RF_ANTENNA,
+ HostCmd_ACT_GEN_GET, 0, NULL, true);
+
+ *tx_ant = priv->tx_ant;
+ *rx_ant = priv->rx_ant;
+
+ return 0;
+}
+
/* cfg80211 operation handler for stop ap.
* Function stops BSS running at uAP interface.
*/
@@ -1895,10 +1936,9 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
mwifiex_set_uap_rates(bss_cfg, params);
if (mwifiex_set_secure_params(priv, bss_cfg, params)) {
- kfree(bss_cfg);
mwifiex_dbg(priv->adapter, ERROR,
"Failed to parse secuirty parameters!\n");
- return -1;
+ goto out;
}
mwifiex_set_ht_params(priv, bss_cfg, params);
@@ -1927,7 +1967,7 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
if (mwifiex_11h_activate(priv, false)) {
mwifiex_dbg(priv->adapter, ERROR,
"Failed to disable 11h extensions!!");
- return -1;
+ goto out;
}
priv->state_11h.is_11h_active = false;
}
@@ -1935,12 +1975,11 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
if (mwifiex_config_start_uap(priv, bss_cfg)) {
mwifiex_dbg(priv->adapter, ERROR,
"Failed to start AP\n");
- kfree(bss_cfg);
- return -1;
+ goto out;
}
if (mwifiex_set_mgmt_ies(priv, &params->beacon))
- return -1;
+ goto out;
if (!netif_carrier_ok(priv->netdev))
netif_carrier_on(priv->netdev);
@@ -1949,6 +1988,10 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
memcpy(&priv->bss_cfg, bss_cfg, sizeof(priv->bss_cfg));
kfree(bss_cfg);
return 0;
+
+out:
+ kfree(bss_cfg);
+ return -1;
}
/*
@@ -2209,6 +2252,9 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
return -EALREADY;
}
+ if (priv->scan_block)
+ priv->scan_block = false;
+
if (adapter->surprise_removed || adapter->is_cmd_timedout) {
mwifiex_dbg(adapter, ERROR,
"%s: Ignore connection.\t"
@@ -2427,6 +2473,9 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
return -EBUSY;
}
+ if (!priv->wdev.current_bss && priv->scan_block)
+ priv->scan_block = false;
+
if (!mwifiex_stop_bg_scan(priv))
cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy);
@@ -2734,6 +2783,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
struct mwifiex_private *priv;
struct net_device *dev;
void *mdev_priv;
+ int ret;
if (!adapter)
return ERR_PTR(-EFAULT);
@@ -2859,6 +2909,15 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
mwifiex_init_priv_params(priv, dev);
priv->netdev = dev;
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE,
+ HostCmd_ACT_GEN_SET, 0, NULL, true);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = mwifiex_sta_init_cmd(priv, false, false);
+ if (ret)
+ return ERR_PTR(ret);
+
mwifiex_setup_ht_caps(&wiphy->bands[NL80211_BAND_2GHZ]->ht_cap, priv);
if (adapter->is_hw_11ac_capable)
mwifiex_setup_vht_caps(
@@ -3262,7 +3321,10 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
struct mwifiex_ds_hs_cfg hs_cfg;
int i, ret = 0, retry_num = 10;
struct mwifiex_private *priv;
+ struct mwifiex_private *sta_priv =
+ mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
+ sta_priv->scan_aborting = true;
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
mwifiex_abort_cac(priv);
@@ -3291,21 +3353,21 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
if (!wowlan) {
mwifiex_dbg(adapter, ERROR,
"None of the WOWLAN triggers enabled\n");
- return 0;
+ ret = 0;
+ goto done;
}
- priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
-
- if (!priv->media_connected && !wowlan->nd_config) {
+ if (!sta_priv->media_connected && !wowlan->nd_config) {
mwifiex_dbg(adapter, ERROR,
"Can not configure WOWLAN in disconnected state\n");
- return 0;
+ ret = 0;
+ goto done;
}
- ret = mwifiex_set_mef_filter(priv, wowlan);
+ ret = mwifiex_set_mef_filter(sta_priv, wowlan);
if (ret) {
mwifiex_dbg(adapter, ERROR, "Failed to set MEF filter\n");
- return ret;
+ goto done;
}
memset(&hs_cfg, 0, sizeof(hs_cfg));
@@ -3314,26 +3376,25 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
if (wowlan->nd_config) {
mwifiex_dbg(adapter, INFO, "Wake on net detect\n");
hs_cfg.conditions |= HS_CFG_COND_MAC_EVENT;
- mwifiex_cfg80211_sched_scan_start(wiphy, priv->netdev,
+ mwifiex_cfg80211_sched_scan_start(wiphy, sta_priv->netdev,
wowlan->nd_config);
}
if (wowlan->disconnect) {
hs_cfg.conditions |= HS_CFG_COND_MAC_EVENT;
- mwifiex_dbg(priv->adapter, INFO, "Wake on device disconnect\n");
+ mwifiex_dbg(sta_priv->adapter, INFO, "Wake on device disconnect\n");
}
hs_cfg.is_invoke_hostcmd = false;
hs_cfg.gpio = adapter->hs_cfg.gpio;
hs_cfg.gap = adapter->hs_cfg.gap;
- ret = mwifiex_set_hs_params(priv, HostCmd_ACT_GEN_SET,
+ ret = mwifiex_set_hs_params(sta_priv, HostCmd_ACT_GEN_SET,
MWIFIEX_SYNC_CMD, &hs_cfg);
- if (ret) {
- mwifiex_dbg(adapter, ERROR,
- "Failed to set HS params\n");
- return ret;
- }
+ if (ret)
+ mwifiex_dbg(adapter, ERROR, "Failed to set HS params\n");
+done:
+ sta_priv->scan_aborting = false;
return ret;
}
@@ -3940,12 +4001,14 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
.set_default_key = mwifiex_cfg80211_set_default_key,
.set_power_mgmt = mwifiex_cfg80211_set_power_mgmt,
.set_tx_power = mwifiex_cfg80211_set_tx_power,
+ .get_tx_power = mwifiex_cfg80211_get_tx_power,
.set_bitrate_mask = mwifiex_cfg80211_set_bitrate_mask,
.start_ap = mwifiex_cfg80211_start_ap,
.stop_ap = mwifiex_cfg80211_stop_ap,
.change_beacon = mwifiex_cfg80211_change_beacon,
.set_cqm_rssi_config = mwifiex_cfg80211_set_cqm_rssi_config,
.set_antenna = mwifiex_cfg80211_set_antenna,
+ .get_antenna = mwifiex_cfg80211_get_antenna,
.del_station = mwifiex_cfg80211_del_station,
.sched_scan_start = mwifiex_cfg80211_sched_scan_start,
.sched_scan_stop = mwifiex_cfg80211_sched_scan_stop,
diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
index 6bc2011d8..c29f26d8b 100644
--- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
@@ -1020,8 +1020,6 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
{
struct cmd_ctrl_node *cmd_node = NULL, *tmp_node;
unsigned long flags, cmd_flags;
- struct mwifiex_private *priv;
- int i;
spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
/* Cancel current cmd */
@@ -1046,23 +1044,7 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
- mwifiex_cancel_pending_scan_cmd(adapter);
-
- if (adapter->scan_processing) {
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
- adapter->scan_processing = false;
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
- for (i = 0; i < adapter->priv_num; i++) {
- priv = adapter->priv[i];
- if (!priv)
- continue;
- if (priv->scan_request) {
- mwifiex_dbg(adapter, WARN, "info: aborting scan\n");
- cfg80211_scan_done(priv->scan_request, 1);
- priv->scan_request = NULL;
- }
- }
- }
+ mwifiex_cancel_scan(adapter);
}
/*
@@ -1080,8 +1062,6 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
{
struct cmd_ctrl_node *cmd_node = NULL;
unsigned long cmd_flags;
- struct mwifiex_private *priv;
- int i;
if ((adapter->curr_cmd) &&
(adapter->curr_cmd->wait_q_enabled)) {
@@ -1101,23 +1081,7 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
mwifiex_recycle_cmd_node(adapter, cmd_node);
}
- mwifiex_cancel_pending_scan_cmd(adapter);
-
- if (adapter->scan_processing) {
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
- adapter->scan_processing = false;
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
- for (i = 0; i < adapter->priv_num; i++) {
- priv = adapter->priv[i];
- if (!priv)
- continue;
- if (priv->scan_request) {
- mwifiex_dbg(adapter, WARN, "info: aborting scan\n");
- cfg80211_scan_done(priv->scan_request, 1);
- priv->scan_request = NULL;
- }
- }
- }
+ mwifiex_cancel_scan(adapter);
}
/*
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index 8e4145abd..5596b6be1 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -462,6 +462,9 @@ enum P2P_MODES {
#define HostCmd_ACT_SET_RX 0x0001
#define HostCmd_ACT_SET_TX 0x0002
#define HostCmd_ACT_SET_BOTH 0x0003
+#define HostCmd_ACT_GET_RX 0x0004
+#define HostCmd_ACT_GET_TX 0x0008
+#define HostCmd_ACT_GET_BOTH 0x000c
#define RF_ANTENNA_AUTO 0xFFFF
@@ -1958,8 +1961,8 @@ struct mwifiex_ie_types_btcoex_scan_time {
struct mwifiex_ie_types_header header;
u8 coex_scan;
u8 reserved;
- u16 min_scan_time;
- u16 max_scan_time;
+ __le16 min_scan_time;
+ __le16 max_scan_time;
} __packed;
struct mwifiex_ie_types_btcoex_aggr_win_size {
diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c
index 78c532f0d..1489c9019 100644
--- a/drivers/net/wireless/marvell/mwifiex/init.c
+++ b/drivers/net/wireless/marvell/mwifiex/init.c
@@ -60,7 +60,7 @@ static void wakeup_timer_fn(unsigned long data)
adapter->hw_status = MWIFIEX_HW_STATUS_RESET;
mwifiex_cancel_all_pending_cmd(adapter);
- if (adapter->if_ops.card_reset)
+ if (adapter->if_ops.card_reset && !adapter->hs_activated)
adapter->if_ops.card_reset(adapter);
}
@@ -110,6 +110,8 @@ int mwifiex_init_priv(struct mwifiex_private *priv)
priv->tx_power_level = 0;
priv->max_tx_power_level = 0;
priv->min_tx_power_level = 0;
+ priv->tx_ant = 0;
+ priv->rx_ant = 0;
priv->tx_rate = 0;
priv->rxpd_htinfo = 0;
priv->rxpd_rate = 0;
@@ -788,3 +790,4 @@ poll_fw:
return ret;
}
+EXPORT_SYMBOL_GPL(mwifiex_dnld_fw);
diff --git a/drivers/net/wireless/marvell/mwifiex/ioctl.h b/drivers/net/wireless/marvell/mwifiex/ioctl.h
index a5a48c183..70429815f 100644
--- a/drivers/net/wireless/marvell/mwifiex/ioctl.h
+++ b/drivers/net/wireless/marvell/mwifiex/ioctl.h
@@ -83,6 +83,8 @@ struct wep_key {
#define MWIFIEX_AUTH_MODE_AUTO 0xFF
#define BAND_CONFIG_BG 0x00
#define BAND_CONFIG_A 0x01
+#define MWIFIEX_SEC_CHAN_BELOW 0x30
+#define MWIFIEX_SEC_CHAN_ABOVE 0x10
#define MWIFIEX_SUPPORTED_RATES 14
#define MWIFIEX_SUPPORTED_RATES_EXT 32
#define MWIFIEX_TDLS_SUPPORTED_RATES 8
@@ -341,16 +343,16 @@ enum {
};
struct mwifiex_ds_reg_rw {
- __le32 type;
- __le32 offset;
- __le32 value;
+ u32 type;
+ u32 offset;
+ u32 value;
};
#define MAX_EEPROM_DATA 256
struct mwifiex_ds_read_eeprom {
- __le16 offset;
- __le16 byte_count;
+ u16 offset;
+ u16 byte_count;
u8 value[MAX_EEPROM_DATA];
};
diff --git a/drivers/net/wireless/marvell/mwifiex/join.c b/drivers/net/wireless/marvell/mwifiex/join.c
index 62211fca9..1c7b00630 100644
--- a/drivers/net/wireless/marvell/mwifiex/join.c
+++ b/drivers/net/wireless/marvell/mwifiex/join.c
@@ -647,6 +647,12 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
const u8 *ie_ptr;
struct ieee80211_ht_operation *assoc_resp_ht_oper;
+ if (!priv->attempted_bss_desc) {
+ mwifiex_dbg(priv->adapter, ERROR,
+ "ASSOC_RESP: failed, association terminated by host\n");
+ goto done;
+ }
+
assoc_rsp = (struct ieee_types_assoc_rsp *) &resp->params;
cap_info = le16_to_cpu(assoc_rsp->cap_info_bitmap);
@@ -1270,6 +1276,12 @@ int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv,
u16 cmd = le16_to_cpu(resp->command);
u8 result;
+ if (!priv->attempted_bss_desc) {
+ mwifiex_dbg(priv->adapter, ERROR,
+ "ADHOC_RESP: failed, association terminated by host\n");
+ goto done;
+ }
+
if (cmd == HostCmd_CMD_802_11_AD_HOC_START)
result = start_result->result;
else
@@ -1281,7 +1293,7 @@ int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv,
if (result) {
mwifiex_dbg(priv->adapter, ERROR, "ADHOC_RESP: failed\n");
if (priv->media_connected)
- mwifiex_reset_connect_state(priv, result);
+ mwifiex_reset_connect_state(priv, result, true);
memset(&priv->curr_bss_params.bss_descriptor,
0x00, sizeof(struct mwifiex_bssdescriptor));
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index 4dde1ae35..1afd6a94c 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -526,10 +526,12 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
fw.fw_buf = (u8 *) adapter->firmware->data;
fw.fw_len = adapter->firmware->size;
- if (adapter->if_ops.dnld_fw)
+ if (adapter->if_ops.dnld_fw) {
ret = adapter->if_ops.dnld_fw(adapter, &fw);
- else
+ } else {
ret = mwifiex_dnld_fw(adapter, &fw);
+ }
+
if (ret == -1)
goto err_dnld_fw;
@@ -695,9 +697,13 @@ mwifiex_close(struct net_device *dev)
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
if (priv->scan_request) {
+ struct cfg80211_scan_info info = {
+ .aborted = true,
+ };
+
mwifiex_dbg(priv->adapter, INFO,
"aborting scan on ndo_stop\n");
- cfg80211_scan_done(priv->scan_request, 1);
+ cfg80211_scan_done(priv->scan_request, &info);
priv->scan_request = NULL;
priv->scan_aborting = true;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 0207af00b..9f6bb400b 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -533,6 +533,8 @@ struct mwifiex_private {
u16 tx_power_level;
u8 max_tx_power_level;
u8 min_tx_power_level;
+ u32 tx_ant;
+ u32 rx_ant;
u8 tx_rate;
u8 tx_htinfo;
u8 rxpd_htinfo;
@@ -1054,6 +1056,7 @@ int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter);
void mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter);
void mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter);
void mwifiex_cancel_pending_scan_cmd(struct mwifiex_adapter *adapter);
+void mwifiex_cancel_scan(struct mwifiex_adapter *adapter);
void mwifiex_recycle_cmd_node(struct mwifiex_adapter *adapter,
struct cmd_ctrl_node *cmd_node);
@@ -1128,7 +1131,8 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc);
int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp);
-void mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason);
+void mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason,
+ bool from_ap);
u8 mwifiex_band_to_radio_type(u8 band);
int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac);
void mwifiex_deauthenticate_all(struct mwifiex_adapter *adapter);
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index 0c7937eb6..453ab6ad4 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -202,7 +202,6 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev,
if (mwifiex_add_card(card, &add_remove_card_sem, &pcie_ops,
MWIFIEX_PCIE)) {
pr_err("%s failed\n", __func__);
- kfree(card);
return -1;
}
@@ -440,6 +439,11 @@ static int mwifiex_pcie_disable_host_int(struct mwifiex_adapter *adapter)
return 0;
}
+static void mwifiex_pcie_disable_host_int_noerr(struct mwifiex_adapter *adapter)
+{
+ WARN_ON(mwifiex_pcie_disable_host_int(adapter));
+}
+
/*
* This function enables the host interrupt.
*
@@ -507,7 +511,7 @@ static int mwifiex_init_rxq_ring(struct mwifiex_adapter *adapter)
for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) {
/* Allocate skb here so that firmware can DMA data from it */
skb = mwifiex_alloc_dma_align_buf(MWIFIEX_RX_DATA_BUF_SIZE,
- GFP_KERNEL | GFP_DMA);
+ GFP_KERNEL);
if (!skb) {
mwifiex_dbg(adapter, ERROR,
"Unable to allocate skb for RX ring.\n");
@@ -1319,7 +1323,7 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
}
skb_tmp = mwifiex_alloc_dma_align_buf(MWIFIEX_RX_DATA_BUF_SIZE,
- GFP_KERNEL | GFP_DMA);
+ GFP_KERNEL);
if (!skb_tmp) {
mwifiex_dbg(adapter, ERROR,
"Unable to allocate skb.\n");
@@ -1612,6 +1616,7 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
pkt_len = *((__le16 *)skb->data);
rx_len = le16_to_cpu(pkt_len);
+ skb_put(skb, MWIFIEX_UPLD_SIZE - skb->len);
skb_trim(skb, rx_len);
skb_pull(skb, INTF_HEADER_LEN);
@@ -2086,6 +2091,13 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter,
unsigned long flags;
struct pcie_service_card *card = adapter->card;
+ if (card->msi_enable) {
+ spin_lock_irqsave(&adapter->int_lock, flags);
+ adapter->int_status = 1;
+ spin_unlock_irqrestore(&adapter->int_lock, flags);
+ return;
+ }
+
if (!mwifiex_pcie_ok_to_access_hw(adapter))
return;
@@ -2187,15 +2199,44 @@ exit:
static int mwifiex_process_pcie_int(struct mwifiex_adapter *adapter)
{
int ret;
- u32 pcie_ireg;
+ u32 pcie_ireg = 0;
unsigned long flags;
+ struct pcie_service_card *card = adapter->card;
spin_lock_irqsave(&adapter->int_lock, flags);
- /* Clear out unused interrupts */
- pcie_ireg = adapter->int_status;
+ if (!card->msi_enable) {
+ /* Clear out unused interrupts */
+ pcie_ireg = adapter->int_status;
+ }
adapter->int_status = 0;
spin_unlock_irqrestore(&adapter->int_lock, flags);
+ if (card->msi_enable) {
+ if (mwifiex_pcie_ok_to_access_hw(adapter)) {
+ if (mwifiex_read_reg(adapter, PCIE_HOST_INT_STATUS,
+ &pcie_ireg)) {
+ mwifiex_dbg(adapter, ERROR,
+ "Read register failed\n");
+ return -1;
+ }
+
+ if ((pcie_ireg != 0xFFFFFFFF) && (pcie_ireg)) {
+ if (mwifiex_write_reg(adapter,
+ PCIE_HOST_INT_STATUS,
+ ~pcie_ireg)) {
+ mwifiex_dbg(adapter, ERROR,
+ "Write register failed\n");
+ return -1;
+ }
+ if (!adapter->pps_uapsd_mode &&
+ adapter->ps_state == PS_STATE_SLEEP) {
+ adapter->ps_state = PS_STATE_AWAKE;
+ adapter->pm_wakeup_fw_try = false;
+ del_timer(&adapter->wakeup_timer);
+ }
+ }
+ }
+ }
while (pcie_ireg & HOST_INTR_MASK) {
if (pcie_ireg & HOST_INTR_DNLD_DONE) {
pcie_ireg &= ~HOST_INTR_DNLD_DONE;
@@ -2235,6 +2276,12 @@ static int mwifiex_process_pcie_int(struct mwifiex_adapter *adapter)
return ret;
}
+ if (card->msi_enable) {
+ spin_lock_irqsave(&adapter->int_lock, flags);
+ adapter->int_status = 0;
+ spin_unlock_irqrestore(&adapter->int_lock, flags);
+ }
+
if (mwifiex_pcie_ok_to_access_hw(adapter)) {
if (mwifiex_read_reg(adapter, PCIE_HOST_INT_STATUS,
&pcie_ireg)) {
@@ -2254,11 +2301,17 @@ static int mwifiex_process_pcie_int(struct mwifiex_adapter *adapter)
}
}
+ if (!card->msi_enable) {
+ spin_lock_irqsave(&adapter->int_lock, flags);
+ pcie_ireg |= adapter->int_status;
+ adapter->int_status = 0;
+ spin_unlock_irqrestore(&adapter->int_lock, flags);
+ }
}
mwifiex_dbg(adapter, INTR,
"info: cmd_sent=%d data_sent=%d\n",
adapter->cmd_sent, adapter->data_sent);
- if (adapter->ps_state != PS_STATE_SLEEP)
+ if (!card->msi_enable && adapter->ps_state != PS_STATE_SLEEP)
mwifiex_pcie_enable_host_int(adapter);
return 0;
@@ -2796,7 +2849,6 @@ static int mwifiex_pcie_request_irq(struct mwifiex_adapter *adapter)
"MRVL_PCIE", &card->share_irq_ctx);
if (ret) {
pr_err("request_irq failed: ret=%d\n", ret);
- adapter->card = NULL;
return -1;
}
@@ -2804,7 +2856,7 @@ static int mwifiex_pcie_request_irq(struct mwifiex_adapter *adapter)
}
/*
- * This function get firmare name for downloading by revision id
+ * This function gets the firmware name for downloading by revision id
*
* Read revision id register to get revision id
*/
@@ -2841,20 +2893,20 @@ static void mwifiex_pcie_get_fw_name(struct mwifiex_adapter *adapter)
version &= 0x7;
switch (revision_id) {
case PCIE8997_V2:
- if (version == CHIP_VER_PCIEUSB)
+ if (version == CHIP_VER_PCIEUART)
strcpy(adapter->fw_name,
- PCIEUSB8997_FW_NAME_V2);
+ PCIEUART8997_FW_NAME_V2);
else
strcpy(adapter->fw_name,
- PCIEUART8997_FW_NAME_V2);
+ PCIEUSB8997_FW_NAME_V2);
break;
case PCIE8997_Z:
- if (version == CHIP_VER_PCIEUSB)
+ if (version == CHIP_VER_PCIEUART)
strcpy(adapter->fw_name,
- PCIEUSB8997_FW_NAME_Z);
+ PCIEUART8997_FW_NAME_Z);
else
strcpy(adapter->fw_name,
- PCIEUART8997_FW_NAME_Z);
+ PCIEUSB8997_FW_NAME_Z);
break;
default:
strcpy(adapter->fw_name, PCIE8997_DEFAULT_FW_NAME);
@@ -2901,10 +2953,11 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
const struct mwifiex_pcie_card_reg *reg;
- struct pci_dev *pdev = card->dev;
+ struct pci_dev *pdev;
int i;
if (card) {
+ pdev = card->dev;
if (card->msix_enable) {
for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++)
synchronize_irq(card->msix_entries[i].vector);
@@ -2945,6 +2998,7 @@ static struct mwifiex_if_ops pcie_ops = {
.register_dev = mwifiex_register_dev,
.unregister_dev = mwifiex_unregister_dev,
.enable_int = mwifiex_pcie_enable_host_int,
+ .disable_int = mwifiex_pcie_disable_host_int_noerr,
.process_int_status = mwifiex_process_int_status,
.host_to_card = mwifiex_pcie_host_to_card,
.wakeup = mwifiex_pm_wakeup_card,
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h
index db18ac6f5..609588400 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.h
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.h
@@ -48,7 +48,7 @@
#define PCIE8897_B0 0x1200
#define PCIE8997_Z 0x0
#define PCIE8997_V2 0x471
-#define CHIP_VER_PCIEUSB 0x2
+#define CHIP_VER_PCIEUART 0x3
/* Constants for Buffer Descriptor (BD) rings */
#define MWIFIEX_MAX_TXRX_BD 0x20
@@ -258,7 +258,7 @@ static const struct mwifiex_pcie_card_reg mwifiex_reg_8997 = {
.fw_dump_end = 0xcff,
.fw_dump_host_ready = 0xcc,
.fw_dump_read_done = 0xdd,
- .msix_support = 1,
+ .msix_support = 0,
};
static struct memory_type_mapping mem_type_mapping_tbl_w8897[] = {
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index bc5e52ceb..21ec84794 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -1896,7 +1896,8 @@ mwifiex_active_scan_req_for_passive_chan(struct mwifiex_private *priv)
u8 id = 0;
struct mwifiex_user_scan_cfg *user_scan_cfg;
- if (adapter->active_scan_triggered || !priv->scan_request) {
+ if (adapter->active_scan_triggered || !priv->scan_request ||
+ priv->scan_aborting) {
adapter->active_scan_triggered = false;
return 0;
}
@@ -1956,10 +1957,15 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
mwifiex_complete_scan(priv);
if (priv->scan_request) {
+ struct cfg80211_scan_info info = {
+ .aborted = false,
+ };
+
mwifiex_dbg(adapter, INFO,
"info: notifying scan done\n");
- cfg80211_scan_done(priv->scan_request, 0);
+ cfg80211_scan_done(priv->scan_request, &info);
priv->scan_request = NULL;
+ priv->scan_aborting = false;
} else {
priv->scan_aborting = false;
mwifiex_dbg(adapter, INFO,
@@ -1977,10 +1983,15 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
if (!adapter->active_scan_triggered) {
if (priv->scan_request) {
+ struct cfg80211_scan_info info = {
+ .aborted = true,
+ };
+
mwifiex_dbg(adapter, INFO,
"info: aborting scan\n");
- cfg80211_scan_done(priv->scan_request, 1);
+ cfg80211_scan_done(priv->scan_request, &info);
priv->scan_request = NULL;
+ priv->scan_aborting = false;
} else {
priv->scan_aborting = false;
mwifiex_dbg(adapter, INFO,
@@ -2001,6 +2012,37 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
return;
}
+void mwifiex_cancel_scan(struct mwifiex_adapter *adapter)
+{
+ struct mwifiex_private *priv;
+ unsigned long cmd_flags;
+ int i;
+
+ mwifiex_cancel_pending_scan_cmd(adapter);
+
+ if (adapter->scan_processing) {
+ spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
+ adapter->scan_processing = false;
+ spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
+ for (i = 0; i < adapter->priv_num; i++) {
+ priv = adapter->priv[i];
+ if (!priv)
+ continue;
+ if (priv->scan_request) {
+ struct cfg80211_scan_info info = {
+ .aborted = true,
+ };
+
+ mwifiex_dbg(adapter, INFO,
+ "info: aborting scan\n");
+ cfg80211_scan_done(priv->scan_request, &info);
+ priv->scan_request = NULL;
+ priv->scan_aborting = false;
+ }
+ }
+ }
+}
+
/*
* This function handles the command response of scan.
*
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index e757dfc51..f49bfdb7f 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -102,10 +102,9 @@ static int mwifiex_sdio_probe_of(struct device *dev, struct sdio_mmc_card *card)
struct mwifiex_plt_wake_cfg *cfg;
int ret;
- if (!dev->of_node ||
- !of_match_node(mwifiex_sdio_of_match_table, dev->of_node)) {
- dev_err(dev, "sdio platform data not available\n");
- return -1;
+ if (!of_match_node(mwifiex_sdio_of_match_table, dev->of_node)) {
+ dev_err(dev, "required compatible string missing\n");
+ return -EINVAL;
}
card->plt_of_node = dev->of_node;
@@ -115,7 +114,7 @@ static int mwifiex_sdio_probe_of(struct device *dev, struct sdio_mmc_card *card)
if (cfg && card->plt_of_node) {
cfg->irq_wifi = irq_of_parse_and_map(card->plt_of_node, 0);
if (!cfg->irq_wifi) {
- dev_err(dev,
+ dev_dbg(dev,
"fail to parse irq_wifi from device tree\n");
} else {
ret = devm_request_irq(dev, cfg->irq_wifi,
@@ -183,24 +182,35 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
sdio_release_host(func);
if (ret) {
- pr_err("%s: failed to enable function\n", __func__);
- kfree(card);
- return -EIO;
+ dev_err(&func->dev, "failed to enable function\n");
+ goto err_free;
}
/* device tree node parsing and platform specific configuration*/
- mwifiex_sdio_probe_of(&func->dev, card);
-
- if (mwifiex_add_card(card, &add_remove_card_sem, &sdio_ops,
- MWIFIEX_SDIO)) {
- pr_err("%s: add card failed\n", __func__);
- kfree(card);
- sdio_claim_host(func);
- ret = sdio_disable_func(func);
- sdio_release_host(func);
- ret = -1;
+ if (func->dev.of_node) {
+ ret = mwifiex_sdio_probe_of(&func->dev, card);
+ if (ret) {
+ dev_err(&func->dev, "SDIO dt node parse failed\n");
+ goto err_disable;
+ }
+ }
+
+ ret = mwifiex_add_card(card, &add_remove_card_sem, &sdio_ops,
+ MWIFIEX_SDIO);
+ if (ret) {
+ dev_err(&func->dev, "add card failed\n");
+ goto err_disable;
}
+ return 0;
+
+err_disable:
+ sdio_claim_host(func);
+ sdio_disable_func(func);
+ sdio_release_host(func);
+err_free:
+ kfree(card);
+
return ret;
}
@@ -544,6 +554,19 @@ static int mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter)
return mwifiex_write_reg(adapter, CONFIGURATION_REG, 0);
}
+static int mwifiex_sdio_dnld_fw(struct mwifiex_adapter *adapter,
+ struct mwifiex_fw_image *fw)
+{
+ struct sdio_mmc_card *card = adapter->card;
+ int ret;
+
+ sdio_claim_host(card->func);
+ ret = mwifiex_dnld_fw(adapter, fw);
+ sdio_release_host(card->func);
+
+ return ret;
+}
+
/*
* This function is used to initialize IO ports for the
* chipsets supporting SDIO new mode eg SD8897.
@@ -1492,7 +1515,7 @@ rx_curr_single:
mwifiex_dbg(adapter, INFO, "info: RX: port: %d, rx_len: %d\n",
port, rx_len);
- skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL | GFP_DMA);
+ skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL);
if (!skb) {
mwifiex_dbg(adapter, ERROR,
"single skb allocated fail,\t"
@@ -1597,7 +1620,7 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE);
mwifiex_dbg(adapter, INFO, "info: rx_len = %d\n", rx_len);
- skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL | GFP_DMA);
+ skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL);
if (!skb)
return -1;
@@ -2732,6 +2755,7 @@ static struct mwifiex_if_ops sdio_ops = {
.cleanup_mpa_buf = mwifiex_cleanup_mpa_buf,
.cmdrsp_complete = mwifiex_sdio_cmdrsp_complete,
.event_complete = mwifiex_sdio_event_complete,
+ .dnld_fw = mwifiex_sdio_dnld_fw,
.card_reset = mwifiex_sdio_card_reset,
.reg_dump = mwifiex_sdio_reg_dump,
.device_dump = mwifiex_sdio_device_dump,
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
index e436574b1..7897037b0 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
@@ -313,23 +313,41 @@ static int mwifiex_cmd_rf_antenna(struct mwifiex_private *priv,
cmd->command = cpu_to_le16(HostCmd_CMD_RF_ANTENNA);
- if (cmd_action != HostCmd_ACT_GEN_SET)
- return 0;
-
- if (priv->adapter->hw_dev_mcs_support == HT_STREAM_2X2) {
- cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_rf_ant_mimo) +
- S_DS_GEN);
- ant_mimo->action_tx = cpu_to_le16(HostCmd_ACT_SET_TX);
- ant_mimo->tx_ant_mode = cpu_to_le16((u16)ant_cfg->tx_ant);
- ant_mimo->action_rx = cpu_to_le16(HostCmd_ACT_SET_RX);
- ant_mimo->rx_ant_mode = cpu_to_le16((u16)ant_cfg->rx_ant);
- } else {
- cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_rf_ant_siso) +
- S_DS_GEN);
- ant_siso->action = cpu_to_le16(HostCmd_ACT_SET_BOTH);
- ant_siso->ant_mode = cpu_to_le16((u16)ant_cfg->tx_ant);
+ switch (cmd_action) {
+ case HostCmd_ACT_GEN_SET:
+ if (priv->adapter->hw_dev_mcs_support == HT_STREAM_2X2) {
+ cmd->size = cpu_to_le16(sizeof(struct
+ host_cmd_ds_rf_ant_mimo)
+ + S_DS_GEN);
+ ant_mimo->action_tx = cpu_to_le16(HostCmd_ACT_SET_TX);
+ ant_mimo->tx_ant_mode = cpu_to_le16((u16)ant_cfg->
+ tx_ant);
+ ant_mimo->action_rx = cpu_to_le16(HostCmd_ACT_SET_RX);
+ ant_mimo->rx_ant_mode = cpu_to_le16((u16)ant_cfg->
+ rx_ant);
+ } else {
+ cmd->size = cpu_to_le16(sizeof(struct
+ host_cmd_ds_rf_ant_siso) +
+ S_DS_GEN);
+ ant_siso->action = cpu_to_le16(HostCmd_ACT_SET_BOTH);
+ ant_siso->ant_mode = cpu_to_le16((u16)ant_cfg->tx_ant);
+ }
+ break;
+ case HostCmd_ACT_GEN_GET:
+ if (priv->adapter->hw_dev_mcs_support == HT_STREAM_2X2) {
+ cmd->size = cpu_to_le16(sizeof(struct
+ host_cmd_ds_rf_ant_mimo) +
+ S_DS_GEN);
+ ant_mimo->action_tx = cpu_to_le16(HostCmd_ACT_GET_TX);
+ ant_mimo->action_rx = cpu_to_le16(HostCmd_ACT_GET_RX);
+ } else {
+ cmd->size = cpu_to_le16(sizeof(struct
+ host_cmd_ds_rf_ant_siso) +
+ S_DS_GEN);
+ ant_siso->action = cpu_to_le16(HostCmd_ACT_GET_BOTH);
+ }
+ break;
}
-
return 0;
}
@@ -1130,9 +1148,8 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
cmd->size = cpu_to_le16(sizeof(*mac_reg) + S_DS_GEN);
mac_reg = &cmd->params.mac_reg;
mac_reg->action = cpu_to_le16(cmd_action);
- mac_reg->offset =
- cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
- mac_reg->value = reg_rw->value;
+ mac_reg->offset = cpu_to_le16((u16) reg_rw->offset);
+ mac_reg->value = cpu_to_le32(reg_rw->value);
break;
}
case HostCmd_CMD_BBP_REG_ACCESS:
@@ -1142,9 +1159,8 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
cmd->size = cpu_to_le16(sizeof(*bbp_reg) + S_DS_GEN);
bbp_reg = &cmd->params.bbp_reg;
bbp_reg->action = cpu_to_le16(cmd_action);
- bbp_reg->offset =
- cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
- bbp_reg->value = (u8) le32_to_cpu(reg_rw->value);
+ bbp_reg->offset = cpu_to_le16((u16) reg_rw->offset);
+ bbp_reg->value = (u8) reg_rw->value;
break;
}
case HostCmd_CMD_RF_REG_ACCESS:
@@ -1154,8 +1170,8 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
cmd->size = cpu_to_le16(sizeof(*rf_reg) + S_DS_GEN);
rf_reg = &cmd->params.rf_reg;
rf_reg->action = cpu_to_le16(cmd_action);
- rf_reg->offset = cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
- rf_reg->value = (u8) le32_to_cpu(reg_rw->value);
+ rf_reg->offset = cpu_to_le16((u16) reg_rw->offset);
+ rf_reg->value = (u8) reg_rw->value;
break;
}
case HostCmd_CMD_PMIC_REG_ACCESS:
@@ -1165,9 +1181,8 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
cmd->size = cpu_to_le16(sizeof(*pmic_reg) + S_DS_GEN);
pmic_reg = &cmd->params.pmic_reg;
pmic_reg->action = cpu_to_le16(cmd_action);
- pmic_reg->offset =
- cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
- pmic_reg->value = (u8) le32_to_cpu(reg_rw->value);
+ pmic_reg->offset = cpu_to_le16((u16) reg_rw->offset);
+ pmic_reg->value = (u8) reg_rw->value;
break;
}
case HostCmd_CMD_CAU_REG_ACCESS:
@@ -1177,9 +1192,8 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
cmd->size = cpu_to_le16(sizeof(*cau_reg) + S_DS_GEN);
cau_reg = &cmd->params.rf_reg;
cau_reg->action = cpu_to_le16(cmd_action);
- cau_reg->offset =
- cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
- cau_reg->value = (u8) le32_to_cpu(reg_rw->value);
+ cau_reg->offset = cpu_to_le16((u16) reg_rw->offset);
+ cau_reg->value = (u8) reg_rw->value;
break;
}
case HostCmd_CMD_802_11_EEPROM_ACCESS:
@@ -1190,8 +1204,8 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
cmd->size = cpu_to_le16(sizeof(*cmd_eeprom) + S_DS_GEN);
cmd_eeprom->action = cpu_to_le16(cmd_action);
- cmd_eeprom->offset = rd_eeprom->offset;
- cmd_eeprom->byte_count = rd_eeprom->byte_count;
+ cmd_eeprom->offset = cpu_to_le16(rd_eeprom->offset);
+ cmd_eeprom->byte_count = cpu_to_le16(rd_eeprom->byte_count);
cmd_eeprom->value = 0;
break;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
index d18c7979d..ccf54932e 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
@@ -469,7 +469,9 @@ static int mwifiex_ret_rf_antenna(struct mwifiex_private *priv,
struct host_cmd_ds_rf_ant_siso *ant_siso = &resp->params.ant_siso;
struct mwifiex_adapter *adapter = priv->adapter;
- if (adapter->hw_dev_mcs_support == HT_STREAM_2X2)
+ if (adapter->hw_dev_mcs_support == HT_STREAM_2X2) {
+ priv->tx_ant = le16_to_cpu(ant_mimo->tx_ant_mode);
+ priv->rx_ant = le16_to_cpu(ant_mimo->rx_ant_mode);
mwifiex_dbg(adapter, INFO,
"RF_ANT_RESP: Tx action = 0x%x, Tx Mode = 0x%04x\t"
"Rx action = 0x%x, Rx Mode = 0x%04x\n",
@@ -477,12 +479,14 @@ static int mwifiex_ret_rf_antenna(struct mwifiex_private *priv,
le16_to_cpu(ant_mimo->tx_ant_mode),
le16_to_cpu(ant_mimo->action_rx),
le16_to_cpu(ant_mimo->rx_ant_mode));
- else
+ } else {
+ priv->tx_ant = le16_to_cpu(ant_siso->ant_mode);
+ priv->rx_ant = le16_to_cpu(ant_siso->ant_mode);
mwifiex_dbg(adapter, INFO,
"RF_ANT_RESP: action = 0x%x, Mode = 0x%04x\n",
le16_to_cpu(ant_siso->action),
le16_to_cpu(ant_siso->ant_mode));
-
+ }
return 0;
}
@@ -553,7 +557,8 @@ static int mwifiex_ret_802_11_deauthenticate(struct mwifiex_private *priv,
if (!memcmp(resp->params.deauth.mac_addr,
&priv->curr_bss_params.bss_descriptor.mac_address,
sizeof(resp->params.deauth.mac_addr)))
- mwifiex_reset_connect_state(priv, WLAN_REASON_DEAUTH_LEAVING);
+ mwifiex_reset_connect_state(priv, WLAN_REASON_DEAUTH_LEAVING,
+ false);
return 0;
}
@@ -566,7 +571,7 @@ static int mwifiex_ret_802_11_deauthenticate(struct mwifiex_private *priv,
static int mwifiex_ret_802_11_ad_hoc_stop(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp)
{
- mwifiex_reset_connect_state(priv, WLAN_REASON_DEAUTH_LEAVING);
+ mwifiex_reset_connect_state(priv, WLAN_REASON_DEAUTH_LEAVING, false);
return 0;
}
@@ -781,45 +786,44 @@ static int mwifiex_ret_reg_access(u16 type, struct host_cmd_ds_command *resp,
switch (type) {
case HostCmd_CMD_MAC_REG_ACCESS:
r.mac = &resp->params.mac_reg;
- reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.mac->offset));
- reg_rw->value = r.mac->value;
+ reg_rw->offset = (u32) le16_to_cpu(r.mac->offset);
+ reg_rw->value = le32_to_cpu(r.mac->value);
break;
case HostCmd_CMD_BBP_REG_ACCESS:
r.bbp = &resp->params.bbp_reg;
- reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.bbp->offset));
- reg_rw->value = cpu_to_le32((u32) r.bbp->value);
+ reg_rw->offset = (u32) le16_to_cpu(r.bbp->offset);
+ reg_rw->value = (u32) r.bbp->value;
break;
case HostCmd_CMD_RF_REG_ACCESS:
r.rf = &resp->params.rf_reg;
- reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.rf->offset));
- reg_rw->value = cpu_to_le32((u32) r.bbp->value);
+ reg_rw->offset = (u32) le16_to_cpu(r.rf->offset);
+ reg_rw->value = (u32) r.bbp->value;
break;
case HostCmd_CMD_PMIC_REG_ACCESS:
r.pmic = &resp->params.pmic_reg;
- reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.pmic->offset));
- reg_rw->value = cpu_to_le32((u32) r.pmic->value);
+ reg_rw->offset = (u32) le16_to_cpu(r.pmic->offset);
+ reg_rw->value = (u32) r.pmic->value;
break;
case HostCmd_CMD_CAU_REG_ACCESS:
r.rf = &resp->params.rf_reg;
- reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.rf->offset));
- reg_rw->value = cpu_to_le32((u32) r.rf->value);
+ reg_rw->offset = (u32) le16_to_cpu(r.rf->offset);
+ reg_rw->value = (u32) r.rf->value;
break;
case HostCmd_CMD_802_11_EEPROM_ACCESS:
r.eeprom = &resp->params.eeprom;
- pr_debug("info: EEPROM read len=%x\n", r.eeprom->byte_count);
- if (le16_to_cpu(eeprom->byte_count) <
- le16_to_cpu(r.eeprom->byte_count)) {
- eeprom->byte_count = cpu_to_le16(0);
+ pr_debug("info: EEPROM read len=%x\n",
+ le16_to_cpu(r.eeprom->byte_count));
+ if (eeprom->byte_count < le16_to_cpu(r.eeprom->byte_count)) {
+ eeprom->byte_count = 0;
pr_debug("info: EEPROM read length is too big\n");
return -1;
}
- eeprom->offset = r.eeprom->offset;
- eeprom->byte_count = r.eeprom->byte_count;
- if (le16_to_cpu(eeprom->byte_count) > 0)
+ eeprom->offset = le16_to_cpu(r.eeprom->offset);
+ eeprom->byte_count = le16_to_cpu(r.eeprom->byte_count);
+ if (eeprom->byte_count > 0)
memcpy(&eeprom->value, &r.eeprom->value,
- le16_to_cpu(r.eeprom->byte_count));
-
+ min((u16)MAX_EEPROM_DATA, eeprom->byte_count));
break;
default:
return -1;
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
index 0104108b4..a422f3306 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
@@ -40,8 +40,8 @@
* - Erases current SSID and BSSID information
* - Sends a disconnect event to upper layers/applications.
*/
-void
-mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
+void mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code,
+ bool from_ap)
{
struct mwifiex_adapter *adapter = priv->adapter;
@@ -140,7 +140,7 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
if (priv->bss_mode == NL80211_IFTYPE_STATION ||
priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) {
cfg80211_disconnected(priv->netdev, reason_code, NULL, 0,
- false, GFP_KERNEL);
+ !from_ap, GFP_KERNEL);
}
eth_zero_addr(priv->cfg_bssid);
@@ -474,8 +474,8 @@ void mwifiex_bt_coex_wlan_param_update_event(struct mwifiex_private *priv,
scantlv =
(struct mwifiex_ie_types_btcoex_scan_time *)tlv;
adapter->coex_scan = scantlv->coex_scan;
- adapter->coex_min_scan_time = scantlv->min_scan_time;
- adapter->coex_max_scan_time = scantlv->max_scan_time;
+ adapter->coex_min_scan_time = le16_to_cpu(scantlv->min_scan_time);
+ adapter->coex_max_scan_time = le16_to_cpu(scantlv->max_scan_time);
break;
default:
@@ -574,7 +574,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
if (priv->media_connected) {
reason_code =
le16_to_cpu(*(__le16 *)adapter->event_body);
- mwifiex_reset_connect_state(priv, reason_code);
+ mwifiex_reset_connect_state(priv, reason_code, true);
}
break;
@@ -589,7 +589,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
if (priv->media_connected) {
reason_code =
le16_to_cpu(*(__le16 *)adapter->event_body);
- mwifiex_reset_connect_state(priv, reason_code);
+ mwifiex_reset_connect_state(priv, reason_code, true);
}
break;
@@ -599,7 +599,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
if (priv->media_connected) {
reason_code =
le16_to_cpu(*(__le16 *)adapter->event_body);
- mwifiex_reset_connect_state(priv, reason_code);
+ mwifiex_reset_connect_state(priv, reason_code, true);
}
break;
@@ -708,7 +708,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
case EVENT_EXT_SCAN_REPORT:
mwifiex_dbg(adapter, EVENT, "event: EXT_SCAN Report\n");
- if (adapter->ext_scan)
+ if (adapter->ext_scan && !priv->scan_aborting)
ret = mwifiex_handle_event_ext_scan_report(priv,
adapter->event_skb->data);
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
index 8e0862657..e06647a32 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
@@ -426,6 +426,10 @@ done:
if (bss_desc)
kfree(bss_desc->beacon_buf);
kfree(bss_desc);
+
+ if (ret < 0)
+ priv->attempted_bss_desc = NULL;
+
return ret;
}
@@ -1247,7 +1251,7 @@ static int mwifiex_reg_mem_ioctl_reg_rw(struct mwifiex_private *priv,
{
u16 cmd_no;
- switch (le32_to_cpu(reg_rw->type)) {
+ switch (reg_rw->type) {
case MWIFIEX_REG_MAC:
cmd_no = HostCmd_CMD_MAC_REG_ACCESS;
break;
@@ -1282,9 +1286,9 @@ mwifiex_reg_write(struct mwifiex_private *priv, u32 reg_type,
{
struct mwifiex_ds_reg_rw reg_rw;
- reg_rw.type = cpu_to_le32(reg_type);
- reg_rw.offset = cpu_to_le32(reg_offset);
- reg_rw.value = cpu_to_le32(reg_value);
+ reg_rw.type = reg_type;
+ reg_rw.offset = reg_offset;
+ reg_rw.value = reg_value;
return mwifiex_reg_mem_ioctl_reg_rw(priv, &reg_rw, HostCmd_ACT_GEN_SET);
}
@@ -1302,14 +1306,14 @@ mwifiex_reg_read(struct mwifiex_private *priv, u32 reg_type,
int ret;
struct mwifiex_ds_reg_rw reg_rw;
- reg_rw.type = cpu_to_le32(reg_type);
- reg_rw.offset = cpu_to_le32(reg_offset);
+ reg_rw.type = reg_type;
+ reg_rw.offset = reg_offset;
ret = mwifiex_reg_mem_ioctl_reg_rw(priv, &reg_rw, HostCmd_ACT_GEN_GET);
if (ret)
goto done;
- *value = le32_to_cpu(reg_rw.value);
+ *value = reg_rw.value;
done:
return ret;
@@ -1328,15 +1332,16 @@ mwifiex_eeprom_read(struct mwifiex_private *priv, u16 offset, u16 bytes,
int ret;
struct mwifiex_ds_read_eeprom rd_eeprom;
- rd_eeprom.offset = cpu_to_le16((u16) offset);
- rd_eeprom.byte_count = cpu_to_le16((u16) bytes);
+ rd_eeprom.offset = offset;
+ rd_eeprom.byte_count = bytes;
/* Send request to firmware */
ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_EEPROM_ACCESS,
HostCmd_ACT_GEN_GET, 0, &rd_eeprom, true);
if (!ret)
- memcpy(value, rd_eeprom.value, MAX_EEPROM_DATA);
+ memcpy(value, rd_eeprom.value, min((u16)MAX_EEPROM_DATA,
+ rd_eeprom.byte_count));
return ret;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
index f79d00d1e..a7e9f544f 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
@@ -19,6 +19,7 @@
#include "main.h"
#include "11ac.h"
+#include "11n.h"
/* This function parses security related parameters from cfg80211_ap_settings
* and sets into FW understandable bss_config structure.
@@ -521,9 +522,9 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
tlv += sizeof(struct host_cmd_tlv_rates) + i;
}
if (bss_cfg->channel &&
- ((bss_cfg->band_cfg == BAND_CONFIG_BG &&
+ (((bss_cfg->band_cfg & BIT(0)) == BAND_CONFIG_BG &&
bss_cfg->channel <= MAX_CHANNEL_BAND_BG) ||
- (bss_cfg->band_cfg == BAND_CONFIG_A &&
+ ((bss_cfg->band_cfg & BIT(0)) == BAND_CONFIG_A &&
bss_cfg->channel <= MAX_CHANNEL_BAND_A))) {
chan_band = (struct host_cmd_tlv_channel_band *)tlv;
chan_band->header.type = cpu_to_le16(TLV_TYPE_CHANNELBANDLIST);
@@ -833,6 +834,31 @@ void mwifiex_uap_set_channel(struct mwifiex_private *priv,
config_bands |= BAND_AAC;
}
+ switch (chandef.width) {
+ case NL80211_CHAN_WIDTH_5:
+ case NL80211_CHAN_WIDTH_10:
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ if (chandef.center_freq1 < chandef.chan->center_freq)
+ bss_cfg->band_cfg |= MWIFIEX_SEC_CHAN_BELOW;
+ else
+ bss_cfg->band_cfg |= MWIFIEX_SEC_CHAN_ABOVE;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ bss_cfg->band_cfg |=
+ mwifiex_get_sec_chan_offset(bss_cfg->channel) << 4;
+ break;
+ default:
+ mwifiex_dbg(priv->adapter,
+ WARN, "Unknown channel width: %d\n",
+ chandef.width);
+ break;
+ }
+
priv->adapter->config_bands = config_bands;
if (old_bands != config_bands) {
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
index 666e91af5..bf5660eb2 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
@@ -272,7 +272,7 @@ int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
int mwifiex_uap_recv_packet(struct mwifiex_private *priv,
struct sk_buff *skb)
{
- struct mwifiex_adapter *adapter = adapter;
+ struct mwifiex_adapter *adapter = priv->adapter;
struct mwifiex_sta_node *src_node;
struct ethhdr *p_ethhdr;
struct sk_buff *skb_uap;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index 870c9cd5c..4341d5680 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -135,7 +135,8 @@ struct rtl8xxxu_rxdesc16 {
u32 seq:12;
u32 frag:4;
- u32 nextpktlen:14;
+ u32 pkt_cnt:8;
+ u32 reserved:6;
u32 nextind:1;
u32 reserved0:1;
@@ -198,7 +199,8 @@ struct rtl8xxxu_rxdesc16 {
u32 reserved0:1;
u32 nextind:1;
- u32 nextpktlen:14;
+ u32 reserved:6;
+ u32 pkt_cnt:8;
u32 frag:4;
u32 seq:12;
@@ -1245,6 +1247,7 @@ struct rtl8xxxu_priv {
u32 ep_tx_normal_queue:1;
u32 ep_tx_low_queue:1;
u32 has_xtalk:1;
+ u32 rx_buf_aggregation:1;
u8 xtalk;
unsigned int pipe_interrupt;
unsigned int pipe_in;
@@ -1315,8 +1318,7 @@ struct rtl8xxxu_fileops {
void (*phy_init_antenna_selection) (struct rtl8xxxu_priv *priv);
void (*phy_iq_calibrate) (struct rtl8xxxu_priv *priv);
void (*config_channel) (struct ieee80211_hw *hw);
- int (*parse_rx_desc) (struct rtl8xxxu_priv *priv, struct sk_buff *skb,
- struct ieee80211_rx_status *rx_status);
+ int (*parse_rx_desc) (struct rtl8xxxu_priv *priv, struct sk_buff *skb);
void (*init_aggregation) (struct rtl8xxxu_priv *priv);
void (*init_statistics) (struct rtl8xxxu_priv *priv);
void (*enable_rf) (struct rtl8xxxu_priv *priv);
@@ -1329,6 +1331,7 @@ struct rtl8xxxu_fileops {
void (*report_connect) (struct rtl8xxxu_priv *priv,
u8 macid, bool connect);
int writeN_block_size;
+ int rx_agg_buf_size;
char tx_desc_size;
char rx_desc_size;
char has_s0s1;
@@ -1409,13 +1412,12 @@ void rtl8xxxu_gen1_report_connect(struct rtl8xxxu_priv *priv,
u8 macid, bool connect);
void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv,
u8 macid, bool connect);
+void rtl8xxxu_gen1_init_aggregation(struct rtl8xxxu_priv *priv);
void rtl8xxxu_gen1_enable_rf(struct rtl8xxxu_priv *priv);
void rtl8xxxu_gen1_disable_rf(struct rtl8xxxu_priv *priv);
void rtl8xxxu_gen2_disable_rf(struct rtl8xxxu_priv *priv);
-int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb,
- struct ieee80211_rx_status *rx_status);
-int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb,
- struct ieee80211_rx_status *rx_status);
+int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb);
+int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb);
int rtl8xxxu_gen2_channel_to_group(int channel);
bool rtl8xxxu_gen2_simularity_compare(struct rtl8xxxu_priv *priv,
int result[][8], int c1, int c2);
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c
index 08066dec1..1bb6ba015 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c
@@ -413,13 +413,8 @@ static int rtl8192cu_parse_efuse(struct rtl8xxxu_priv *priv)
dev_info(&priv->udev->dev,
"%s: dumping efuse (0x%02zx bytes):\n",
__func__, sizeof(struct rtl8192cu_efuse));
- for (i = 0; i < sizeof(struct rtl8192cu_efuse); i += 8) {
- dev_info(&priv->udev->dev, "%02x: "
- "%02x %02x %02x %02x %02x %02x %02x %02x\n", i,
- raw[i], raw[i + 1], raw[i + 2],
- raw[i + 3], raw[i + 4], raw[i + 5],
- raw[i + 6], raw[i + 7]);
- }
+ for (i = 0; i < sizeof(struct rtl8192cu_efuse); i += 8)
+ dev_info(&priv->udev->dev, "%02x: %8ph\n", i, &raw[i]);
}
return 0;
}
@@ -565,6 +560,7 @@ struct rtl8xxxu_fileops rtl8192cu_fops = {
.phy_iq_calibrate = rtl8xxxu_gen1_phy_iq_calibrate,
.config_channel = rtl8xxxu_gen1_config_channel,
.parse_rx_desc = rtl8xxxu_parse_rxdesc16,
+ .init_aggregation = rtl8xxxu_gen1_init_aggregation,
.enable_rf = rtl8xxxu_gen1_enable_rf,
.disable_rf = rtl8xxxu_gen1_disable_rf,
.usb_quirks = rtl8xxxu_gen1_usb_quirks,
@@ -572,6 +568,7 @@ struct rtl8xxxu_fileops rtl8192cu_fops = {
.update_rate_mask = rtl8xxxu_update_rate_mask,
.report_connect = rtl8xxxu_gen1_report_connect,
.writeN_block_size = 128,
+ .rx_agg_buf_size = 16000,
.tx_desc_size = sizeof(struct rtl8xxxu_txdesc32),
.rx_desc_size = sizeof(struct rtl8xxxu_rxdesc16),
.adda_1t_init = 0x0b1b25a0,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
index 9461ecd31..70e7c42cc 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
@@ -622,13 +622,8 @@ static int rtl8192eu_parse_efuse(struct rtl8xxxu_priv *priv)
dev_info(&priv->udev->dev,
"%s: dumping efuse (0x%02zx bytes):\n",
__func__, sizeof(struct rtl8192eu_efuse));
- for (i = 0; i < sizeof(struct rtl8192eu_efuse); i += 8) {
- dev_info(&priv->udev->dev, "%02x: "
- "%02x %02x %02x %02x %02x %02x %02x %02x\n", i,
- raw[i], raw[i + 1], raw[i + 2],
- raw[i + 3], raw[i + 4], raw[i + 5],
- raw[i + 6], raw[i + 7]);
- }
+ for (i = 0; i < sizeof(struct rtl8192eu_efuse); i += 8)
+ dev_info(&priv->udev->dev, "%02x: %8ph\n", i, &raw[i]);
}
return 0;
}
@@ -1249,11 +1244,9 @@ static void rtl8192eu_phy_iq_calibrate(struct rtl8xxxu_priv *priv)
reg_e94 = result[i][0];
reg_e9c = result[i][1];
reg_ea4 = result[i][2];
- reg_eac = result[i][3];
reg_eb4 = result[i][4];
reg_ebc = result[i][5];
reg_ec4 = result[i][6];
- reg_ecc = result[i][7];
}
if (candidate >= 0) {
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c
index cd6bf209d..52688d5cd 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c
@@ -377,6 +377,7 @@ struct rtl8xxxu_fileops rtl8723au_fops = {
.phy_iq_calibrate = rtl8xxxu_gen1_phy_iq_calibrate,
.config_channel = rtl8xxxu_gen1_config_channel,
.parse_rx_desc = rtl8xxxu_parse_rxdesc16,
+ .init_aggregation = rtl8xxxu_gen1_init_aggregation,
.enable_rf = rtl8xxxu_gen1_enable_rf,
.disable_rf = rtl8xxxu_gen1_disable_rf,
.usb_quirks = rtl8xxxu_gen1_usb_quirks,
@@ -384,6 +385,7 @@ struct rtl8xxxu_fileops rtl8723au_fops = {
.update_rate_mask = rtl8xxxu_update_rate_mask,
.report_connect = rtl8xxxu_gen1_report_connect,
.writeN_block_size = 1024,
+ .rx_agg_buf_size = 16000,
.tx_desc_size = sizeof(struct rtl8xxxu_txdesc32),
.rx_desc_size = sizeof(struct rtl8xxxu_rxdesc16),
.adda_1t_init = 0x0b1b25a0,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
index 6f42f0a16..1348819f6 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
@@ -466,13 +466,8 @@ static int rtl8723bu_parse_efuse(struct rtl8xxxu_priv *priv)
dev_info(&priv->udev->dev,
"%s: dumping efuse (0x%02zx bytes):\n",
__func__, sizeof(struct rtl8723bu_efuse));
- for (i = 0; i < sizeof(struct rtl8723bu_efuse); i += 8) {
- dev_info(&priv->udev->dev, "%02x: "
- "%02x %02x %02x %02x %02x %02x %02x %02x\n", i,
- raw[i], raw[i + 1], raw[i + 2],
- raw[i + 3], raw[i + 4], raw[i + 5],
- raw[i + 6], raw[i + 7]);
- }
+ for (i = 0; i < sizeof(struct rtl8723bu_efuse); i += 8)
+ dev_info(&priv->udev->dev, "%02x: %8ph\n", i, &raw[i]);
}
return 0;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index 05a1ff26f..4b56f3921 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -44,6 +44,9 @@
int rtl8xxxu_debug = RTL8XXXU_DEBUG_EFUSE;
static bool rtl8xxxu_ht40_2g;
+static bool rtl8xxxu_dma_aggregation;
+static int rtl8xxxu_dma_agg_timeout = -1;
+static int rtl8xxxu_dma_agg_pages = -1;
MODULE_AUTHOR("Jes Sorensen <Jes.Sorensen@redhat.com>");
MODULE_DESCRIPTION("RTL8XXXu USB mac80211 Wireless LAN Driver");
@@ -54,10 +57,14 @@ module_param_named(debug, rtl8xxxu_debug, int, 0600);
MODULE_PARM_DESC(debug, "Set debug mask");
module_param_named(ht40_2g, rtl8xxxu_ht40_2g, bool, 0600);
MODULE_PARM_DESC(ht40_2g, "Enable HT40 support on the 2.4GHz band");
+module_param_named(dma_aggregation, rtl8xxxu_dma_aggregation, bool, 0600);
+MODULE_PARM_DESC(dma_aggregation, "Enable DMA packet aggregation");
+module_param_named(dma_agg_timeout, rtl8xxxu_dma_agg_timeout, int, 0600);
+MODULE_PARM_DESC(dma_agg_timeout, "Set DMA aggregation timeout (range 1-127)");
+module_param_named(dma_agg_pages, rtl8xxxu_dma_agg_pages, int, 0600);
+MODULE_PARM_DESC(dma_agg_pages, "Set DMA aggregation pages (range 1-127, 0 to disable)");
#define USB_VENDOR_ID_REALTEK 0x0bda
-/* Minimum IEEE80211_MAX_FRAME_LEN */
-#define RTL_RX_BUFFER_SIZE IEEE80211_MAX_FRAME_LEN
#define RTL8XXXU_RX_URBS 32
#define RTL8XXXU_RX_URB_PENDING_WATER 8
#define RTL8XXXU_TX_URBS 64
@@ -4399,6 +4406,73 @@ void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv,
rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.media_status_rpt));
}
+void rtl8xxxu_gen1_init_aggregation(struct rtl8xxxu_priv *priv)
+{
+ u8 agg_ctrl, usb_spec, page_thresh, timeout;
+
+ usb_spec = rtl8xxxu_read8(priv, REG_USB_SPECIAL_OPTION);
+ usb_spec &= ~USB_SPEC_USB_AGG_ENABLE;
+ rtl8xxxu_write8(priv, REG_USB_SPECIAL_OPTION, usb_spec);
+
+ agg_ctrl = rtl8xxxu_read8(priv, REG_TRXDMA_CTRL);
+ agg_ctrl &= ~TRXDMA_CTRL_RXDMA_AGG_EN;
+
+ if (!rtl8xxxu_dma_aggregation) {
+ rtl8xxxu_write8(priv, REG_TRXDMA_CTRL, agg_ctrl);
+ return;
+ }
+
+ agg_ctrl |= TRXDMA_CTRL_RXDMA_AGG_EN;
+ rtl8xxxu_write8(priv, REG_TRXDMA_CTRL, agg_ctrl);
+
+ /*
+ * The number of packets we can take looks to be buffer size / 512
+ * which matches the 512 byte rounding we have to do when de-muxing
+ * the packets.
+ *
+ * Sample numbers from the vendor driver:
+ * USB High-Speed mode values:
+ * RxAggBlockCount = 8 : 512 byte unit
+ * RxAggBlockTimeout = 6
+ * RxAggPageCount = 48 : 128 byte unit
+ * RxAggPageTimeout = 4 or 6 (absolute time 34ms/(2^6))
+ */
+
+ page_thresh = (priv->fops->rx_agg_buf_size / 512);
+ if (rtl8xxxu_dma_agg_pages >= 0) {
+ if (rtl8xxxu_dma_agg_pages <= page_thresh)
+ timeout = page_thresh;
+ else if (rtl8xxxu_dma_agg_pages <= 6)
+ dev_err(&priv->udev->dev,
+ "%s: dma_agg_pages=%i too small, minium is 6\n",
+ __func__, rtl8xxxu_dma_agg_pages);
+ else
+ dev_err(&priv->udev->dev,
+ "%s: dma_agg_pages=%i larger than limit %i\n",
+ __func__, rtl8xxxu_dma_agg_pages, page_thresh);
+ }
+ rtl8xxxu_write8(priv, REG_RXDMA_AGG_PG_TH, page_thresh);
+ /*
+ * REG_RXDMA_AGG_PG_TH + 1 seems to be the timeout register on
+ * gen2 chips and rtl8188eu. The rtl8723au seems unhappy if we
+ * don't set it, so better set both.
+ */
+ timeout = 4;
+
+ if (rtl8xxxu_dma_agg_timeout >= 0) {
+ if (rtl8xxxu_dma_agg_timeout <= 127)
+ timeout = rtl8xxxu_dma_agg_timeout;
+ else
+ dev_err(&priv->udev->dev,
+ "%s: Invalid dma_agg_timeout: %i\n",
+ __func__, rtl8xxxu_dma_agg_timeout);
+ }
+
+ rtl8xxxu_write8(priv, REG_RXDMA_AGG_PG_TH + 1, timeout);
+ rtl8xxxu_write8(priv, REG_USB_DMA_AGG_TO, timeout);
+ priv->rx_buf_aggregation = 1;
+}
+
static void rtl8xxxu_set_basic_rates(struct rtl8xxxu_priv *priv, u32 rate_cfg)
{
u32 val32;
@@ -5037,55 +5111,143 @@ static void rtl8xxxu_rx_urb_work(struct work_struct *work)
}
}
-int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb,
- struct ieee80211_rx_status *rx_status)
+static void rtl8723bu_handle_c2h(struct rtl8xxxu_priv *priv,
+ struct sk_buff *skb)
+{
+ struct rtl8723bu_c2h *c2h = (struct rtl8723bu_c2h *)skb->data;
+ struct device *dev = &priv->udev->dev;
+ int len;
+
+ len = skb->len - 2;
+
+ dev_dbg(dev, "C2H ID %02x seq %02x, len %02x source %02x\n",
+ c2h->id, c2h->seq, len, c2h->bt_info.response_source);
+
+ switch(c2h->id) {
+ case C2H_8723B_BT_INFO:
+ if (c2h->bt_info.response_source >
+ BT_INFO_SRC_8723B_BT_ACTIVE_SEND)
+ dev_dbg(dev, "C2H_BT_INFO WiFi only firmware\n");
+ else
+ dev_dbg(dev, "C2H_BT_INFO BT/WiFi coexist firmware\n");
+
+ if (c2h->bt_info.bt_has_reset)
+ dev_dbg(dev, "BT has been reset\n");
+ if (c2h->bt_info.tx_rx_mask)
+ dev_dbg(dev, "BT TRx mask\n");
+
+ break;
+ case C2H_8723B_BT_MP_INFO:
+ dev_dbg(dev, "C2H_MP_INFO ext ID %02x, status %02x\n",
+ c2h->bt_mp_info.ext_id, c2h->bt_mp_info.status);
+ break;
+ case C2H_8723B_RA_REPORT:
+ dev_dbg(dev,
+ "C2H RA RPT: rate %02x, unk %i, macid %02x, noise %i\n",
+ c2h->ra_report.rate, c2h->ra_report.dummy0_0,
+ c2h->ra_report.macid, c2h->ra_report.noisy_state);
+ break;
+ default:
+ dev_info(dev, "Unhandled C2H event %02x seq %02x\n",
+ c2h->id, c2h->seq);
+ print_hex_dump(KERN_INFO, "C2H content: ", DUMP_PREFIX_NONE,
+ 16, 1, c2h->raw.payload, len, false);
+ break;
+ }
+}
+
+int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
{
- struct rtl8xxxu_rxdesc16 *rx_desc =
- (struct rtl8xxxu_rxdesc16 *)skb->data;
+ struct ieee80211_hw *hw = priv->hw;
+ struct ieee80211_rx_status *rx_status;
+ struct rtl8xxxu_rxdesc16 *rx_desc;
struct rtl8723au_phy_stats *phy_stats;
- __le32 *_rx_desc_le = (__le32 *)skb->data;
- u32 *_rx_desc = (u32 *)skb->data;
+ struct sk_buff *next_skb = NULL;
+ __le32 *_rx_desc_le;
+ u32 *_rx_desc;
int drvinfo_sz, desc_shift;
- int i;
+ int i, pkt_cnt, pkt_len, urb_len, pkt_offset;
- for (i = 0; i < (sizeof(struct rtl8xxxu_rxdesc16) / sizeof(u32)); i++)
- _rx_desc[i] = le32_to_cpu(_rx_desc_le[i]);
+ urb_len = skb->len;
+ pkt_cnt = 0;
- skb_pull(skb, sizeof(struct rtl8xxxu_rxdesc16));
+ do {
+ rx_desc = (struct rtl8xxxu_rxdesc16 *)skb->data;
+ _rx_desc_le = (__le32 *)skb->data;
+ _rx_desc = (u32 *)skb->data;
- phy_stats = (struct rtl8723au_phy_stats *)skb->data;
+ for (i = 0;
+ i < (sizeof(struct rtl8xxxu_rxdesc16) / sizeof(u32)); i++)
+ _rx_desc[i] = le32_to_cpu(_rx_desc_le[i]);
- drvinfo_sz = rx_desc->drvinfo_sz * 8;
- desc_shift = rx_desc->shift;
- skb_pull(skb, drvinfo_sz + desc_shift);
+ /*
+ * Only read pkt_cnt from the header if we're parsing the
+ * first packet
+ */
+ if (!pkt_cnt)
+ pkt_cnt = rx_desc->pkt_cnt;
+ pkt_len = rx_desc->pktlen;
- if (rx_desc->phy_stats)
- rtl8xxxu_rx_parse_phystats(priv, rx_status, phy_stats,
- rx_desc->rxmcs);
+ drvinfo_sz = rx_desc->drvinfo_sz * 8;
+ desc_shift = rx_desc->shift;
+ pkt_offset = roundup(pkt_len + drvinfo_sz + desc_shift +
+ sizeof(struct rtl8xxxu_rxdesc16), 128);
- rx_status->mactime = le32_to_cpu(rx_desc->tsfl);
- rx_status->flag |= RX_FLAG_MACTIME_START;
+ if (pkt_cnt > 1)
+ next_skb = skb_clone(skb, GFP_ATOMIC);
- if (!rx_desc->swdec)
- rx_status->flag |= RX_FLAG_DECRYPTED;
- if (rx_desc->crc32)
- rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
- if (rx_desc->bw)
- rx_status->flag |= RX_FLAG_40MHZ;
+ rx_status = IEEE80211_SKB_RXCB(skb);
+ memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
- if (rx_desc->rxht) {
- rx_status->flag |= RX_FLAG_HT;
- rx_status->rate_idx = rx_desc->rxmcs - DESC_RATE_MCS0;
- } else {
- rx_status->rate_idx = rx_desc->rxmcs;
- }
+ skb_pull(skb, sizeof(struct rtl8xxxu_rxdesc16));
+
+ phy_stats = (struct rtl8723au_phy_stats *)skb->data;
+
+ skb_pull(skb, drvinfo_sz + desc_shift);
+
+ skb_trim(skb, pkt_len);
+
+ if (rx_desc->phy_stats)
+ rtl8xxxu_rx_parse_phystats(priv, rx_status, phy_stats,
+ rx_desc->rxmcs);
+
+ rx_status->mactime = le32_to_cpu(rx_desc->tsfl);
+ rx_status->flag |= RX_FLAG_MACTIME_START;
+
+ if (!rx_desc->swdec)
+ rx_status->flag |= RX_FLAG_DECRYPTED;
+ if (rx_desc->crc32)
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ if (rx_desc->bw)
+ rx_status->flag |= RX_FLAG_40MHZ;
+
+ if (rx_desc->rxht) {
+ rx_status->flag |= RX_FLAG_HT;
+ rx_status->rate_idx = rx_desc->rxmcs - DESC_RATE_MCS0;
+ } else {
+ rx_status->rate_idx = rx_desc->rxmcs;
+ }
+
+ rx_status->freq = hw->conf.chandef.chan->center_freq;
+ rx_status->band = hw->conf.chandef.chan->band;
+
+ ieee80211_rx_irqsafe(hw, skb);
+
+ skb = next_skb;
+ if (skb)
+ skb_pull(next_skb, pkt_offset);
+
+ pkt_cnt--;
+ urb_len -= pkt_offset;
+ } while (skb && urb_len > 0 && pkt_cnt > 0);
return RX_TYPE_DATA_PKT;
}
-int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb,
- struct ieee80211_rx_status *rx_status)
+int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
{
+ struct ieee80211_hw *hw = priv->hw;
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
struct rtl8xxxu_rxdesc24 *rx_desc =
(struct rtl8xxxu_rxdesc24 *)skb->data;
struct rtl8723au_phy_stats *phy_stats;
@@ -5097,6 +5259,8 @@ int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb,
for (i = 0; i < (sizeof(struct rtl8xxxu_rxdesc24) / sizeof(u32)); i++)
_rx_desc[i] = le32_to_cpu(_rx_desc_le[i]);
+ memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
+
skb_pull(skb, sizeof(struct rtl8xxxu_rxdesc24));
phy_stats = (struct rtl8723au_phy_stats *)skb->data;
@@ -5108,6 +5272,8 @@ int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb,
if (rx_desc->rpt_sel) {
struct device *dev = &priv->udev->dev;
dev_dbg(dev, "%s: C2H packet\n", __func__);
+ rtl8723bu_handle_c2h(priv, skb);
+ dev_kfree_skb(skb);
return RX_TYPE_C2H;
}
@@ -5132,52 +5298,11 @@ int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb,
rx_status->rate_idx = rx_desc->rxmcs;
}
- return RX_TYPE_DATA_PKT;
-}
-
-static void rtl8723bu_handle_c2h(struct rtl8xxxu_priv *priv,
- struct sk_buff *skb)
-{
- struct rtl8723bu_c2h *c2h = (struct rtl8723bu_c2h *)skb->data;
- struct device *dev = &priv->udev->dev;
- int len;
-
- len = skb->len - 2;
-
- dev_dbg(dev, "C2H ID %02x seq %02x, len %02x source %02x\n",
- c2h->id, c2h->seq, len, c2h->bt_info.response_source);
-
- switch(c2h->id) {
- case C2H_8723B_BT_INFO:
- if (c2h->bt_info.response_source >
- BT_INFO_SRC_8723B_BT_ACTIVE_SEND)
- dev_dbg(dev, "C2H_BT_INFO WiFi only firmware\n");
- else
- dev_dbg(dev, "C2H_BT_INFO BT/WiFi coexist firmware\n");
-
- if (c2h->bt_info.bt_has_reset)
- dev_dbg(dev, "BT has been reset\n");
- if (c2h->bt_info.tx_rx_mask)
- dev_dbg(dev, "BT TRx mask\n");
+ rx_status->freq = hw->conf.chandef.chan->center_freq;
+ rx_status->band = hw->conf.chandef.chan->band;
- break;
- case C2H_8723B_BT_MP_INFO:
- dev_dbg(dev, "C2H_MP_INFO ext ID %02x, status %02x\n",
- c2h->bt_mp_info.ext_id, c2h->bt_mp_info.status);
- break;
- case C2H_8723B_RA_REPORT:
- dev_dbg(dev,
- "C2H RA RPT: rate %02x, unk %i, macid %02x, noise %i\n",
- c2h->ra_report.rate, c2h->ra_report.dummy0_0,
- c2h->ra_report.macid, c2h->ra_report.noisy_state);
- break;
- default:
- dev_info(dev, "Unhandled C2H event %02x seq %02x\n",
- c2h->id, c2h->seq);
- print_hex_dump(KERN_INFO, "C2H content: ", DUMP_PREFIX_NONE,
- 16, 1, c2h->raw.payload, len, false);
- break;
- }
+ ieee80211_rx_irqsafe(hw, skb);
+ return RX_TYPE_DATA_PKT;
}
static void rtl8xxxu_rx_complete(struct urb *urb)
@@ -5187,26 +5312,12 @@ static void rtl8xxxu_rx_complete(struct urb *urb)
struct ieee80211_hw *hw = rx_urb->hw;
struct rtl8xxxu_priv *priv = hw->priv;
struct sk_buff *skb = (struct sk_buff *)urb->context;
- struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
struct device *dev = &priv->udev->dev;
- int rx_type;
skb_put(skb, urb->actual_length);
if (urb->status == 0) {
- memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
-
- rx_type = priv->fops->parse_rx_desc(priv, skb, rx_status);
-
- rx_status->freq = hw->conf.chandef.chan->center_freq;
- rx_status->band = hw->conf.chandef.chan->band;
-
- if (rx_type == RX_TYPE_DATA_PKT)
- ieee80211_rx_irqsafe(hw, skb);
- else {
- rtl8723bu_handle_c2h(priv, skb);
- dev_kfree_skb(skb);
- }
+ priv->fops->parse_rx_desc(priv, skb);
skb = NULL;
rx_urb->urb.context = NULL;
@@ -5226,12 +5337,20 @@ cleanup:
static int rtl8xxxu_submit_rx_urb(struct rtl8xxxu_priv *priv,
struct rtl8xxxu_rx_urb *rx_urb)
{
+ struct rtl8xxxu_fileops *fops = priv->fops;
struct sk_buff *skb;
int skb_size;
int ret, rx_desc_sz;
- rx_desc_sz = priv->fops->rx_desc_size;
- skb_size = rx_desc_sz + RTL_RX_BUFFER_SIZE;
+ rx_desc_sz = fops->rx_desc_size;
+
+ if (priv->rx_buf_aggregation && fops->rx_agg_buf_size) {
+ skb_size = fops->rx_agg_buf_size;
+ skb_size += (rx_desc_sz + sizeof(struct rtl8723au_phy_stats));
+ } else {
+ skb_size = IEEE80211_MAX_FRAME_LEN;
+ }
+
skb = __netdev_alloc_skb(NULL, skb_size, GFP_KERNEL);
if (!skb)
return -ENOMEM;
@@ -5259,7 +5378,7 @@ static void rtl8xxxu_int_complete(struct urb *urb)
if (ret)
usb_unanchor_urb(urb);
} else {
- dev_info(dev, "%s: Error %i\n", __func__, urb->status);
+ dev_dbg(dev, "%s: Error %i\n", __func__, urb->status);
}
}
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
index b0e0c6423..921c5653f 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
@@ -405,7 +405,11 @@
#define REG_DWBCN1_CTRL_8723B 0x0228
/* 0x0280 ~ 0x02FF RXDMA Configuration */
-#define REG_RXDMA_AGG_PG_TH 0x0280
+#define REG_RXDMA_AGG_PG_TH 0x0280 /* 0-7 : USB DMA size bits
+ 8-14: USB DMA timeout
+ 15 : Aggregation enable
+ Only seems to be used
+ on 8723bu/8192eu */
#define RXDMA_USB_AGG_ENABLE BIT(31)
#define REG_RXPKT_NUM 0x0284
#define RXPKT_NUM_RXDMA_IDLE BIT(17)
@@ -1052,10 +1056,14 @@
#define USB_HIMR_ROK BIT(0) /* Receive DMA OK Interrupt */
#define REG_USB_SPECIAL_OPTION 0xfe55
+#define USB_SPEC_USB_AGG_ENABLE BIT(3) /* Enable USB aggregation */
+#define USB_SPEC_INT_BULK_SELECT BIT(4) /* Use interrupt endpoint to
+ deliver interrupt packet.
+ 0: Use int, 1: use bulk */
#define REG_USB_HRPWM 0xfe58
#define REG_USB_DMA_AGG_TO 0xfe5b
-#define REG_USB_AGG_TO 0xfe5c
-#define REG_USB_AGG_TH 0xfe5d
+#define REG_USB_AGG_TIMEOUT 0xfe5c
+#define REG_USB_AGG_THRESH 0xfe5d
#define REG_NORMAL_SIE_VID 0xfe60 /* 0xfe60 - 0xfe61 */
#define REG_NORMAL_SIE_PID 0xfe62 /* 0xfe62 - 0xfe63 */
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
index b660c214d..91cc1397b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
@@ -901,7 +901,7 @@ void exhalbtc_stack_update_profile_info(void)
{
}
-void exhalbtc_update_min_bt_rssi(char bt_rssi)
+void exhalbtc_update_min_bt_rssi(s8 bt_rssi)
{
struct btc_coexist *btcoexist = &gl_bt_coexist;
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
index 3cbe34c53..3d308ebbe 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
@@ -433,7 +433,7 @@ struct btc_stack_info {
u8 num_of_hid;
bool pan_exist;
bool unknown_acl_exist;
- char min_bt_rssi;
+ s8 min_bt_rssi;
};
struct btc_statistics {
@@ -537,7 +537,7 @@ void exhalbtc_dbg_control(struct btc_coexist *btcoexist, u8 code, u8 len,
void exhalbtc_stack_update_profile_info(void);
void exhalbtc_set_hci_version(u16 hci_version);
void exhalbtc_set_bt_patch_version(u16 bt_hci_version, u16 bt_patch_version);
-void exhalbtc_update_min_bt_rssi(char bt_rssi);
+void exhalbtc_update_min_bt_rssi(s8 bt_rssi);
void exhalbtc_set_bt_exist(bool bt_exist);
void exhalbtc_set_chip_type(u8 chip_type);
void exhalbtc_set_ant_num(struct rtl_priv *rtlpriv, u8 type, u8 ant_num);
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index e6e81296c..9698cc583 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -526,7 +526,7 @@ static void _rtl_add_wowlan_patterns(struct ieee80211_hw *hw,
/* 3. calculate crc */
rtl_pattern.crc = _calculate_wol_pattern_crc(content, len);
RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "CRC_Remainder = 0x%x", rtl_pattern.crc);
+ "CRC_Remainder = 0x%x\n", rtl_pattern.crc);
/* 4. write crc & mask_for_hw to hw */
rtlpriv->cfg->ops->add_wowlan_pattern(hw, &rtl_pattern, i);
diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.c b/drivers/net/wireless/realtek/rtlwifi/debug.c
index fd25abad2..33905bbac 100644
--- a/drivers/net/wireless/realtek/rtlwifi/debug.c
+++ b/drivers/net/wireless/realtek/rtlwifi/debug.c
@@ -48,3 +48,28 @@ void rtl_dbgp_flag_init(struct ieee80211_hw *hw)
/*Init Debug flag enable condition */
}
EXPORT_SYMBOL_GPL(rtl_dbgp_flag_init);
+
+#ifdef CONFIG_RTLWIFI_DEBUG
+void _rtl_dbg_trace(struct rtl_priv *rtlpriv, int comp, int level,
+ const char *modname, const char *fmt, ...)
+{
+ if (unlikely((comp & rtlpriv->dbg.global_debugcomponents) &&
+ (level <= rtlpriv->dbg.global_debuglevel))) {
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ printk(KERN_DEBUG "%s:%ps:<%lx-%x> %pV",
+ modname, __builtin_return_address(0),
+ in_interrupt(), in_atomic(),
+ &vaf);
+
+ va_end(args);
+ }
+}
+EXPORT_SYMBOL_GPL(_rtl_dbg_trace);
+#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.h b/drivers/net/wireless/realtek/rtlwifi/debug.h
index fc794b3e9..6156a7932 100644
--- a/drivers/net/wireless/realtek/rtlwifi/debug.h
+++ b/drivers/net/wireless/realtek/rtlwifi/debug.h
@@ -174,15 +174,16 @@ do { \
} \
} while (0)
+
+struct rtl_priv;
+
+__printf(5, 6)
+void _rtl_dbg_trace(struct rtl_priv *rtlpriv, int comp, int level,
+ const char *modname, const char *fmt, ...);
+
#define RT_TRACE(rtlpriv, comp, level, fmt, ...) \
-do { \
- if (unlikely(((comp) & rtlpriv->dbg.global_debugcomponents) && \
- ((level) <= rtlpriv->dbg.global_debuglevel))) { \
- printk(KERN_DEBUG KBUILD_MODNAME ":%s():<%lx-%x> " fmt, \
- __func__, in_interrupt(), in_atomic(), \
- ##__VA_ARGS__); \
- } \
-} while (0)
+ _rtl_dbg_trace(rtlpriv, comp, level, \
+ KBUILD_MODNAME, fmt, ##__VA_ARGS__)
#define RTPRINT(rtlpriv, dbgtype, dbgflag, fmt, ...) \
do { \
diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.c b/drivers/net/wireless/realtek/rtlwifi/efuse.c
index 0b4082c92..7becfef6c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/efuse.c
+++ b/drivers/net/wireless/realtek/rtlwifi/efuse.c
@@ -24,6 +24,7 @@
*****************************************************************************/
#include "wifi.h"
#include "efuse.h"
+#include "pci.h"
#include <linux/export.h>
static const u8 MAX_PGPKT_SIZE = 9;
@@ -1243,3 +1244,80 @@ static u8 efuse_calculate_word_cnts(u8 word_en)
return word_cnts;
}
+int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv,
+ int max_size, u8 *hwinfo, int *params)
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+ struct device *dev = &rtlpcipriv->dev.pdev->dev;
+ u16 eeprom_id;
+ u16 i, usvalue;
+
+ switch (rtlefuse->epromtype) {
+ case EEPROM_BOOT_EFUSE:
+ rtl_efuse_shadow_map_update(hw);
+ break;
+
+ case EEPROM_93C46:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ "RTL8XXX did not boot from eeprom, check it !!\n");
+ return 1;
+
+ default:
+ dev_warn(dev, "no efuse data\n");
+ return 1;
+ }
+
+ memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], max_size);
+
+ RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP",
+ hwinfo, max_size);
+
+ eeprom_id = *((u16 *)&hwinfo[0]);
+ if (eeprom_id != params[0]) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ "EEPROM ID(%#x) is invalid!!\n", eeprom_id);
+ rtlefuse->autoload_failflag = true;
+ } else {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
+ rtlefuse->autoload_failflag = false;
+ }
+
+ if (rtlefuse->autoload_failflag)
+ return 1;
+
+ rtlefuse->eeprom_vid = *(u16 *)&hwinfo[params[1]];
+ rtlefuse->eeprom_did = *(u16 *)&hwinfo[params[2]];
+ rtlefuse->eeprom_svid = *(u16 *)&hwinfo[params[3]];
+ rtlefuse->eeprom_smid = *(u16 *)&hwinfo[params[4]];
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROMId = 0x%4x\n", eeprom_id);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid);
+
+ for (i = 0; i < 6; i += 2) {
+ usvalue = *(u16 *)&hwinfo[params[5] + i];
+ *((u16 *)(&rtlefuse->dev_addr[i])) = usvalue;
+ }
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "%pM\n", rtlefuse->dev_addr);
+
+ rtlefuse->eeprom_channelplan = *&hwinfo[params[6]];
+ rtlefuse->eeprom_version = *(u16 *)&hwinfo[params[7]];
+ rtlefuse->txpwr_fromeprom = true;
+ rtlefuse->eeprom_oemid = *&hwinfo[params[8]];
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
+
+ /* set channel plan to world wide 13 */
+ rtlefuse->channel_plan = params[9];
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtl_get_hwinfo);
diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.h b/drivers/net/wireless/realtek/rtlwifi/efuse.h
index be02e7894..51aa1210d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/efuse.h
+++ b/drivers/net/wireless/realtek/rtlwifi/efuse.h
@@ -109,5 +109,7 @@ bool efuse_shadow_update_chk(struct ieee80211_hw *hw);
void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw);
void efuse_force_write_vendor_Id(struct ieee80211_hw *hw);
void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx);
+int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv,
+ int max_size, u8 *hwinfo, int *params);
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
index 93579cac0..9a64f9b70 100644
--- a/drivers/net/wireless/realtek/rtlwifi/ps.c
+++ b/drivers/net/wireless/realtek/rtlwifi/ps.c
@@ -76,9 +76,9 @@ bool rtl_ps_disable_nic(struct ieee80211_hw *hw)
}
EXPORT_SYMBOL(rtl_ps_disable_nic);
-bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
- enum rf_pwrstate state_toset,
- u32 changesource, bool protect_or_not)
+static bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate state_toset,
+ u32 changesource)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
@@ -86,9 +86,6 @@ bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
bool actionallowed = false;
u16 rfwait_cnt = 0;
- if (protect_or_not)
- goto no_protect;
-
/*Only one thread can change
*the RF state at one time, and others
*should wait to be executed.
@@ -119,7 +116,6 @@ bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
}
}
-no_protect:
rtstate = ppsc->rfpwr_state;
switch (state_toset) {
@@ -162,15 +158,12 @@ no_protect:
if (actionallowed)
rtlpriv->cfg->ops->set_rf_power_state(hw, state_toset);
- if (!protect_or_not) {
- spin_lock(&rtlpriv->locks.rf_ps_lock);
- ppsc->rfchange_inprogress = false;
- spin_unlock(&rtlpriv->locks.rf_ps_lock);
- }
+ spin_lock(&rtlpriv->locks.rf_ps_lock);
+ ppsc->rfchange_inprogress = false;
+ spin_unlock(&rtlpriv->locks.rf_ps_lock);
return actionallowed;
}
-EXPORT_SYMBOL(rtl_ps_set_rf_state);
static void _rtl_ps_inactive_ps(struct ieee80211_hw *hw)
{
@@ -191,7 +184,7 @@ static void _rtl_ps_inactive_ps(struct ieee80211_hw *hw)
}
rtl_ps_set_rf_state(hw, ppsc->inactive_pwrstate,
- RF_CHANGE_BY_IPS, false);
+ RF_CHANGE_BY_IPS);
if (ppsc->inactive_pwrstate == ERFOFF &&
rtlhal->interface == INTF_PCI) {
@@ -587,7 +580,7 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw)
}
spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
- rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS, false);
+ rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS);
spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
}
@@ -630,7 +623,7 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw)
spin_unlock(&rtlpriv->locks.rf_ps_lock);
spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
- rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS , false);
+ rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS);
spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM &&
diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.h b/drivers/net/wireless/realtek/rtlwifi/ps.h
index 29dfc5142..0df2b5203 100644
--- a/drivers/net/wireless/realtek/rtlwifi/ps.h
+++ b/drivers/net/wireless/realtek/rtlwifi/ps.h
@@ -28,9 +28,6 @@
#define MAX_SW_LPS_SLEEP_INTV 5
-bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
- enum rf_pwrstate state_toset, u32 changesource,
- bool protect_or_not);
bool rtl_ps_enable_nic(struct ieee80211_hw *hw);
bool rtl_ps_disable_nic(struct ieee80211_hw *hw);
void rtl_ips_nic_off(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rc.c b/drivers/net/wireless/realtek/rtlwifi/rc.c
index 1aca77719..ce8621a0f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rc.c
@@ -94,7 +94,7 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
struct ieee80211_sta *sta,
struct ieee80211_tx_rate *rate,
struct ieee80211_tx_rate_control *txrc,
- u8 tries, char rix, int rtsctsenable,
+ u8 tries, s8 rix, int rtsctsenable,
bool not_data)
{
struct rtl_mac *mac = rtl_mac(rtlpriv);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/Makefile
index a85419a37..676e7de27 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/Makefile
@@ -12,4 +12,4 @@ rtl8188ee-objs := \
obj-$(CONFIG_RTL8188EE) += rtl8188ee.o
-ccflags-y += -Idrivers/net/wireless/rtlwifi -D__CHECK_ENDIAN__
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
index db9a7829d..f936a4913 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
@@ -886,7 +886,7 @@ static void dm_txpower_track_cb_therm(struct ieee80211_hw *hw)
u8 thermalvalue_avg_count = 0;
u32 thermalvalue_avg = 0;
long ele_d, temp_cck;
- char ofdm_index[2], cck_index = 0,
+ s8 ofdm_index[2], cck_index = 0,
ofdm_index_old[2] = {0, 0}, cck_index_old = 0;
int i = 0;
/*bool is2t = false;*/
@@ -898,7 +898,7 @@ static void dm_txpower_track_cb_therm(struct ieee80211_hw *hw)
/*0.1 the following TWO tables decide the
*final index of OFDM/CCK swing table
*/
- char delta_swing_table_idx[2][15] = {
+ s8 delta_swing_table_idx[2][15] = {
{0, 0, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11},
{0, 0, -1, -2, -3, -4, -4, -4, -4, -5, -7, -8, -9, -9, -10}
};
@@ -1790,6 +1790,7 @@ void rtl88e_dm_watchdog(struct ieee80211_hw *hw)
if (ppsc->p2p_ps_info.p2p_ps_mode)
fw_ps_awake = false;
+ spin_lock(&rtlpriv->locks.rf_ps_lock);
if ((ppsc->rfpwr_state == ERFON) &&
((!fw_current_inpsmode) && fw_ps_awake) &&
(!ppsc->rfchange_inprogress)) {
@@ -1802,4 +1803,5 @@ void rtl88e_dm_watchdog(struct ieee80211_hw *hw)
rtl88e_dm_check_edca_turbo(hw);
rtl88e_dm_antenna_diversity(hw);
}
+ spin_unlock(&rtlpriv->locks.rf_ps_lock);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
index 8ee83b093..4ab6201da 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
@@ -1835,74 +1835,24 @@ static void _rtl88ee_read_adapter_info(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- u16 i, usvalue;
- u8 hwinfo[HWSET_MAX_SIZE];
- u16 eeprom_id;
-
- if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
- rtl_efuse_shadow_map_update(hw);
-
- memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
- HWSET_MAX_SIZE);
- } else if (rtlefuse->epromtype == EEPROM_93C46) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "RTL819X Not boot from eeprom, check it !!");
- return;
- } else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "boot from neither eeprom nor efuse, check it !!");
+ int params[] = {RTL8188E_EEPROM_ID, EEPROM_VID, EEPROM_DID,
+ EEPROM_SVID, EEPROM_SMID, EEPROM_MAC_ADDR,
+ EEPROM_CHANNELPLAN, EEPROM_VERSION, EEPROM_CUSTOMER_ID,
+ COUNTRY_CODE_WORLD_WIDE_13};
+ u8 *hwinfo;
+
+ hwinfo = kzalloc(HWSET_MAX_SIZE, GFP_KERNEL);
+ if (!hwinfo)
return;
- }
-
- RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP\n",
- hwinfo, HWSET_MAX_SIZE);
- eeprom_id = *((u16 *)&hwinfo[0]);
- if (eeprom_id != RTL8188E_EEPROM_ID) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "EEPROM ID(%#x) is invalid!!\n", eeprom_id);
- rtlefuse->autoload_failflag = true;
- } else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
- rtlefuse->autoload_failflag = false;
- }
+ if (rtl_get_hwinfo(hw, rtlpriv, HWSET_MAX_SIZE, hwinfo, params))
+ goto exit;
- if (rtlefuse->autoload_failflag == true)
- return;
- /*VID DID SVID SDID*/
- rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID];
- rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID];
- rtlefuse->eeprom_svid = *(u16 *)&hwinfo[EEPROM_SVID];
- rtlefuse->eeprom_smid = *(u16 *)&hwinfo[EEPROM_SMID];
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROMId = 0x%4x\n", eeprom_id);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid);
- /*customer ID*/
- rtlefuse->eeprom_oemid = hwinfo[EEPROM_CUSTOMER_ID];
if (rtlefuse->eeprom_oemid == 0xFF)
- rtlefuse->eeprom_oemid = 0;
+ rtlefuse->eeprom_oemid = 0;
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
- /*EEPROM version*/
- rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
- /*mac address*/
- for (i = 0; i < 6; i += 2) {
- usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR + i];
- *((u16 *)(&rtlefuse->dev_addr[i])) = usvalue;
- }
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "dev_addr: %pM\n", rtlefuse->dev_addr);
- /*channel plan */
- rtlefuse->eeprom_channelplan = hwinfo[EEPROM_CHANNELPLAN];
/* set channel plan from efuse */
rtlefuse->channel_plan = rtlefuse->eeprom_channelplan;
/*tx power*/
@@ -1976,6 +1926,8 @@ static void _rtl88ee_read_adapter_info(struct ieee80211_hw *hw)
}
}
+exit:
+ kfree(hwinfo);
}
static void _rtl88ee_hal_customized_behavior(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
index 416a9ba63..7498a1218 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
@@ -373,7 +373,7 @@ static bool _rtl88e_phy_bb8188e_config_parafile(struct ieee80211_hw *hw)
rtstatus = phy_config_bb_with_headerfile(hw, BASEBAND_CONFIG_PHY_REG);
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!");
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!\n");
return false;
}
@@ -383,7 +383,7 @@ static bool _rtl88e_phy_bb8188e_config_parafile(struct ieee80211_hw *hw)
phy_config_bb_with_pghdr(hw, BASEBAND_CONFIG_PHY_REG);
}
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!");
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!\n");
return false;
}
rtstatus =
@@ -1239,7 +1239,7 @@ u8 rtl88e_phy_sw_chnl(struct ieee80211_hw *hw)
if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
rtl88e_phy_sw_chnl_callback(hw);
RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
- "sw_chnl_inprogress false schdule workitem current channel %d\n",
+ "sw_chnl_inprogress false schedule workitem current channel %d\n",
rtlphy->current_channel);
rtlphy->sw_chnl_inprogress = false;
} else {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c
index 40893cef7..26ac4c290 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c
@@ -498,7 +498,7 @@ static bool _rtl88e_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
if (rtstatus != true) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Radio[%d] Fail!!", rfpath);
+ "Radio[%d] Fail!!\n", rfpath);
return false;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
index 11701064b..3e3b88664 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
@@ -59,7 +59,7 @@ static void _rtl88ee_query_rxphystatus(struct ieee80211_hw *hw,
struct phy_status_rpt *phystrpt =
(struct phy_status_rpt *)p_drvinfo;
struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
- char rx_pwr_all = 0, rx_pwr[4];
+ s8 rx_pwr_all = 0, rx_pwr[4];
u8 rf_rx_num = 0, evm, pwdb_all;
u8 i, max_spatial_stream;
u32 rssi, total_rssi = 0;
@@ -540,7 +540,7 @@ void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw,
PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error");
+ "DMA mapping error\n");
return;
}
CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_88e));
@@ -703,7 +703,7 @@ void rtl88ee_tx_fill_cmddesc(struct ieee80211_hw *hw,
if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error");
+ "DMA mapping error\n");
return;
}
CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h
index 5a24d194a..9a1c2087a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h
@@ -593,8 +593,8 @@ struct rx_fwinfo_88e {
u8 pwdb_all;
u8 cfosho[4];
u8 cfotail[4];
- char rxevm[2];
- char rxsnr[4];
+ s8 rxevm[2];
+ s8 rxsnr[4];
u8 pdsnr[2];
u8 csi_current[2];
u8 csi_target[2];
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.h
index 4422e31fe..6a72d0c8a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.h
@@ -135,7 +135,7 @@ void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw);
void rtl92c_dm_check_txpower_tracking(struct ieee80211_hw *hw);
void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw);
void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal);
-void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta);
+void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta);
void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw);
void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery);
void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
index 77e61b19b..60ab2ec4f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
@@ -213,7 +213,7 @@ bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw)
rtstatus = rtlpriv->cfg->ops->config_bb_with_headerfile(hw,
BASEBAND_CONFIG_PHY_REG);
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!");
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!\n");
return false;
}
if (rtlphy->rf_type == RF_1T2R) {
@@ -226,7 +226,7 @@ bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw)
BASEBAND_CONFIG_PHY_REG);
}
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!");
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!\n");
return false;
}
rtstatus = rtlpriv->cfg->ops->config_bb_with_headerfile(hw,
@@ -757,7 +757,7 @@ u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw)
if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
rtl92c_phy_sw_chnl_callback(hw);
RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
- "sw_chnl_inprogress false schdule workitem\n");
+ "sw_chnl_inprogress false schedule workitem\n");
rtlphy->sw_chnl_inprogress = false;
} else {
RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
@@ -1353,7 +1353,7 @@ static void _rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw,
}
static void _rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw,
- char delta, bool is2t)
+ s8 delta, bool is2t)
{
}
@@ -1518,7 +1518,7 @@ void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw)
}
EXPORT_SYMBOL(rtl92c_phy_lc_calibrate);
-void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta)
+void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h
index 64bc49f4d..202412577 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h
@@ -210,7 +210,7 @@ u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw);
void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw,
u16 beaconinterval);
-void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta);
+void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta);
void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw);
void rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
index 04eb5c3f8..244607951 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
@@ -1680,58 +1680,18 @@ static void _rtl92ce_read_adapter_info(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- u16 i, usvalue;
- u8 hwinfo[HWSET_MAX_SIZE];
- u16 eeprom_id;
-
- if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
- rtl_efuse_shadow_map_update(hw);
-
- memcpy((void *)hwinfo,
- (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
- HWSET_MAX_SIZE);
- } else if (rtlefuse->epromtype == EEPROM_93C46) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "RTL819X Not boot from eeprom, check it !!");
- }
-
- RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP",
- hwinfo, HWSET_MAX_SIZE);
-
- eeprom_id = *((u16 *)&hwinfo[0]);
- if (eeprom_id != RTL8190_EEPROM_ID) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "EEPROM ID(%#x) is invalid!!\n", eeprom_id);
- rtlefuse->autoload_failflag = true;
- } else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
- rtlefuse->autoload_failflag = false;
- }
-
- if (rtlefuse->autoload_failflag)
+ int params[] = {RTL8190_EEPROM_ID, EEPROM_VID, EEPROM_DID,
+ EEPROM_SVID, EEPROM_SMID, EEPROM_MAC_ADDR,
+ EEPROM_CHANNELPLAN, EEPROM_VERSION, EEPROM_CUSTOMER_ID,
+ COUNTRY_CODE_WORLD_WIDE_13};
+ u8 *hwinfo;
+
+ hwinfo = kzalloc(HWSET_MAX_SIZE, GFP_KERNEL);
+ if (!hwinfo)
return;
- rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID];
- rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID];
- rtlefuse->eeprom_svid = *(u16 *)&hwinfo[EEPROM_SVID];
- rtlefuse->eeprom_smid = *(u16 *)&hwinfo[EEPROM_SMID];
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROMId = 0x%4x\n", eeprom_id);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid);
-
- for (i = 0; i < 6; i += 2) {
- usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR + i];
- *((u16 *) (&rtlefuse->dev_addr[i])) = usvalue;
- }
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "%pM\n", rtlefuse->dev_addr);
+ if (rtl_get_hwinfo(hw, rtlpriv, HWSET_MAX_SIZE, hwinfo, params))
+ goto exit;
_rtl92ce_read_txpower_info_from_hwpg(hw,
rtlefuse->autoload_failflag,
@@ -1740,18 +1700,6 @@ static void _rtl92ce_read_adapter_info(struct ieee80211_hw *hw)
rtl8192ce_read_bt_coexist_info_from_hwpg(hw,
rtlefuse->autoload_failflag,
hwinfo);
-
- rtlefuse->eeprom_channelplan = *&hwinfo[EEPROM_CHANNELPLAN];
- rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
- rtlefuse->txpwr_fromeprom = true;
- rtlefuse->eeprom_oemid = *&hwinfo[EEPROM_CUSTOMER_ID];
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
-
- /* set channel paln to world wide 13 */
- rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13;
-
if (rtlhal->oem_id == RT_CID_DEFAULT) {
switch (rtlefuse->eeprom_oemid) {
case EEPROM_CID_DEFAULT:
@@ -1775,10 +1723,10 @@ static void _rtl92ce_read_adapter_info(struct ieee80211_hw *hw)
default:
rtlhal->oem_id = RT_CID_DEFAULT;
break;
-
}
}
-
+exit:
+ kfree(hwinfo);
}
static void _rtl92ce_hal_customized_behavior(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h
index e5e1353a9..dadc02b5d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h
@@ -102,7 +102,7 @@ void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw);
u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw);
void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw, u16 beaconinterval);
-void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta);
+void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta);
void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw);
void _rtl92ce_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t);
void rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
index 84ddd4d07..781af1b99 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
@@ -49,7 +49,7 @@ static u8 _rtl92ce_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
return skb->priority;
}
-static u8 _rtl92c_query_rxpwrpercentage(char antpower)
+static u8 _rtl92c_query_rxpwrpercentage(s8 antpower)
{
if ((antpower <= -100) || (antpower >= 20))
return 0;
@@ -59,9 +59,9 @@ static u8 _rtl92c_query_rxpwrpercentage(char antpower)
return 100 + antpower;
}
-static u8 _rtl92c_evm_db_to_percentage(char value)
+static u8 _rtl92c_evm_db_to_percentage(s8 value)
{
- char ret_val;
+ s8 ret_val;
ret_val = value;
if (ret_val >= 0)
@@ -449,7 +449,7 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error");
+ "DMA mapping error\n");
return;
}
rcu_read_lock();
@@ -615,7 +615,7 @@ void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw,
if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error");
+ "DMA mapping error\n");
return;
}
CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h
index 4bec4b07e..607304586 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h
@@ -537,8 +537,8 @@ struct rx_fwinfo_92c {
u8 pwdb_all;
u8 cfosho[4];
u8 cfotail[4];
- char rxevm[2];
- char rxsnr[4];
+ s8 rxevm[2];
+ s8 rxsnr[4];
u8 pdsnr[2];
u8 csi_current[2];
u8 csi_target[2];
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
index 34ce06441..8789752f8 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
@@ -347,50 +347,24 @@ static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- u16 i, usvalue;
- u8 hwinfo[HWSET_MAX_SIZE] = {0};
- u16 eeprom_id;
-
- if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
- rtl_efuse_shadow_map_update(hw);
- memcpy((void *)hwinfo,
- (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
- HWSET_MAX_SIZE);
- } else if (rtlefuse->epromtype == EEPROM_93C46) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "RTL819X Not boot from eeprom, check it !!\n");
- }
- RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD, "MAP",
- hwinfo, HWSET_MAX_SIZE);
- eeprom_id = le16_to_cpu(*((__le16 *)&hwinfo[0]));
- if (eeprom_id != RTL8190_EEPROM_ID) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "EEPROM ID(%#x) is invalid!!\n", eeprom_id);
- rtlefuse->autoload_failflag = true;
- } else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
- rtlefuse->autoload_failflag = false;
- }
- if (rtlefuse->autoload_failflag)
+ int params[] = {RTL8190_EEPROM_ID, EEPROM_VID, EEPROM_DID,
+ EEPROM_SVID, EEPROM_SMID, EEPROM_MAC_ADDR,
+ EEPROM_CHANNELPLAN, EEPROM_VERSION, EEPROM_CUSTOMER_ID,
+ 0};
+ u8 *hwinfo;
+
+ hwinfo = kzalloc(HWSET_MAX_SIZE, GFP_KERNEL);
+ if (!hwinfo)
return;
- for (i = 0; i < 6; i += 2) {
- usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR + i];
- *((u16 *) (&rtlefuse->dev_addr[i])) = usvalue;
- }
- pr_info("MAC address: %pM\n", rtlefuse->dev_addr);
+
+ if (rtl_get_hwinfo(hw, rtlpriv, HWSET_MAX_SIZE, hwinfo, params))
+ goto exit;
+
_rtl92cu_read_txpower_info_from_hwpg(hw,
rtlefuse->autoload_failflag, hwinfo);
- rtlefuse->eeprom_vid = le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_VID]);
- rtlefuse->eeprom_did = le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_DID]);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, " VID = 0x%02x PID = 0x%02x\n",
- rtlefuse->eeprom_vid, rtlefuse->eeprom_did);
- rtlefuse->eeprom_channelplan = hwinfo[EEPROM_CHANNELPLAN];
- rtlefuse->eeprom_version =
- le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_VERSION]);
+ _rtl92cu_read_board_type(hw, hwinfo);
+
rtlefuse->txpwr_fromeprom = true;
- rtlefuse->eeprom_oemid = hwinfo[EEPROM_CUSTOMER_ID];
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROM Customer ID: 0x%2x\n",
- rtlefuse->eeprom_oemid);
if (rtlhal->oem_id == RT_CID_DEFAULT) {
switch (rtlefuse->eeprom_oemid) {
case EEPROM_CID_DEFAULT:
@@ -416,7 +390,8 @@ static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw)
break;
}
}
- _rtl92cu_read_board_type(hw, hwinfo);
+exit:
+ kfree(hwinfo);
}
static void _rtl92cu_hal_customized_behavior(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
index 035713311..68ca73485 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
@@ -596,7 +596,7 @@ void rtl92c_set_min_space(struct ieee80211_hw *hw, bool is2T)
/*==============================================================*/
-static u8 _rtl92c_query_rxpwrpercentage(char antpower)
+static u8 _rtl92c_query_rxpwrpercentage(s8 antpower)
{
if ((antpower <= -100) || (antpower >= 20))
return 0;
@@ -606,9 +606,9 @@ static u8 _rtl92c_query_rxpwrpercentage(char antpower)
return 100 + antpower;
}
-static u8 _rtl92c_evm_db_to_percentage(char value)
+static u8 _rtl92c_evm_db_to_percentage(s8 value)
{
- char ret_val;
+ s8 ret_val;
ret_val = value;
if (ret_val >= 0)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.h
index 553a4bfac..20a49ec84 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.h
@@ -79,8 +79,8 @@ struct rx_fwinfo_92c {
u8 pwdb_all;
u8 cfosho[4];
u8 cfotail[4];
- char rxevm[2];
- char rxsnr[4];
+ s8 rxevm[2];
+ s8 rxsnr[4];
u8 pdsnr[2];
u8 csi_current[2];
u8 csi_target[2];
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
index 5624ade92..ec2ea56f7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
@@ -465,7 +465,7 @@ static bool _rtl92c_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
}
if (!rtstatus) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Radio[%d] Fail!!", rfpath);
+ "Radio[%d] Fail!!\n", rfpath);
goto phy_rf_cfg_fail;
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
index f49b60d31..b0f632462 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
@@ -1744,65 +1744,26 @@ static void _rtl92de_read_adapter_info(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- u16 i, usvalue;
- u8 hwinfo[HWSET_MAX_SIZE];
- u16 eeprom_id;
- unsigned long flags;
+ int params[] = {RTL8190_EEPROM_ID, EEPROM_VID, EEPROM_DID,
+ EEPROM_SVID, EEPROM_SMID, EEPROM_MAC_ADDR_MAC0_92D,
+ EEPROM_CHANNEL_PLAN, EEPROM_VERSION, EEPROM_CUSTOMER_ID,
+ COUNTRY_CODE_WORLD_WIDE_13};
+ int i;
+ u16 usvalue;
+ u8 *hwinfo;
- if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
- spin_lock_irqsave(&globalmutex_for_power_and_efuse, flags);
- rtl_efuse_shadow_map_update(hw);
- _rtl92de_efuse_update_chip_version(hw);
- spin_unlock_irqrestore(&globalmutex_for_power_and_efuse, flags);
- memcpy((void *)hwinfo, (void *)&rtlefuse->efuse_map
- [EFUSE_INIT_MAP][0],
- HWSET_MAX_SIZE);
- } else if (rtlefuse->epromtype == EEPROM_93C46) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "RTL819X Not boot from eeprom, check it !!\n");
- }
- RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP",
- hwinfo, HWSET_MAX_SIZE);
+ hwinfo = kzalloc(HWSET_MAX_SIZE, GFP_KERNEL);
+ if (!hwinfo)
+ return;
- eeprom_id = *((u16 *)&hwinfo[0]);
- if (eeprom_id != RTL8190_EEPROM_ID) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "EEPROM ID(%#x) is invalid!!\n", eeprom_id);
- rtlefuse->autoload_failflag = true;
- } else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
- rtlefuse->autoload_failflag = false;
- }
- if (rtlefuse->autoload_failflag) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "RTL819X Not boot from eeprom, check it !!\n");
+ if (rtl_get_hwinfo(hw, rtlpriv, HWSET_MAX_SIZE, hwinfo, params))
return;
- }
- rtlefuse->eeprom_oemid = hwinfo[EEPROM_CUSTOMER_ID];
- _rtl92de_read_macphymode_and_bandtype(hw, hwinfo);
- /* VID, DID SE 0xA-D */
- rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID];
- rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID];
- rtlefuse->eeprom_svid = *(u16 *)&hwinfo[EEPROM_SVID];
- rtlefuse->eeprom_smid = *(u16 *)&hwinfo[EEPROM_SMID];
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROMId = 0x%4x\n", eeprom_id);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid);
+ _rtl92de_efuse_update_chip_version(hw);
+ _rtl92de_read_macphymode_and_bandtype(hw, hwinfo);
- /* Read Permanent MAC address */
- if (rtlhal->interfaceindex == 0) {
- for (i = 0; i < 6; i += 2) {
- usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR_MAC0_92D + i];
- *((u16 *) (&rtlefuse->dev_addr[i])) = usvalue;
- }
- } else {
+ /* Read Permanent MAC address for 2nd interface */
+ if (rtlhal->interfaceindex != 0) {
for (i = 0; i < 6; i += 2) {
usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR_MAC1_92D + i];
*((u16 *) (&rtlefuse->dev_addr[i])) = usvalue;
@@ -1828,10 +1789,8 @@ static void _rtl92de_read_adapter_info(struct ieee80211_hw *hw)
rtlefuse->channel_plan = COUNTRY_CODE_FCC;
break;
}
- rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
rtlefuse->txpwr_fromeprom = true;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
+ kfree(hwinfo);
}
void rtl92de_read_eeprom_info(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
index 7810fe87d..d334d2a5e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
@@ -2695,7 +2695,7 @@ void rtl92d_phy_lc_calibrate(struct ieee80211_hw *hw)
RTPRINT(rtlpriv, FINIT, INIT_IQK, "LCK:Finish!!!\n");
}
-void rtl92d_phy_ap_calibrate(struct ieee80211_hw *hw, char delta)
+void rtl92d_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta)
{
return;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h
index 48d5c6835..8115bf4ac 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h
@@ -160,7 +160,7 @@ void rtl92d_phy_config_maccoexist_rfpage(struct ieee80211_hw *hw);
bool rtl92d_phy_check_poweroff(struct ieee80211_hw *hw);
void rtl92d_phy_lc_calibrate(struct ieee80211_hw *hw);
void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw);
-void rtl92d_phy_ap_calibrate(struct ieee80211_hw *hw, char delta);
+void rtl92d_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta);
void rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw);
void rtl92d_phy_reset_iqk_result(struct ieee80211_hw *hw);
void rtl92d_release_cckandrw_pagea_ctl(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c
index 6a6ac540d..2f479d397 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c
@@ -601,7 +601,7 @@ bool rtl92d_phy_rf6052_config(struct ieee80211_hw *hw)
}
if (!rtstatus) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Radio[%d] Fail!!", rfpath);
+ "Radio[%d] Fail!!\n", rfpath);
goto phy_rf_cfg_fail;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
index 1feaa629d..e998e98d7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
@@ -48,7 +48,7 @@ static u8 _rtl92de_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
return skb->priority;
}
-static u8 _rtl92d_query_rxpwrpercentage(char antpower)
+static u8 _rtl92d_query_rxpwrpercentage(s8 antpower)
{
if ((antpower <= -100) || (antpower >= 20))
return 0;
@@ -58,9 +58,9 @@ static u8 _rtl92d_query_rxpwrpercentage(char antpower)
return 100 + antpower;
}
-static u8 _rtl92d_evm_db_to_percentage(char value)
+static u8 _rtl92d_evm_db_to_percentage(s8 value)
{
- char ret_val = value;
+ s8 ret_val = value;
if (ret_val >= 0)
ret_val = 0;
@@ -586,7 +586,7 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error");
+ "DMA mapping error\n");
return;
}
CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_92d));
@@ -744,7 +744,7 @@ void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw,
if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error");
+ "DMA mapping error\n");
return;
}
CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
index fb5cf0634..194d99f8b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
@@ -554,8 +554,8 @@ struct rx_fwinfo_92d {
u8 pwdb_all;
u8 cfosho[4];
u8 cfotail[4];
- char rxevm[2];
- char rxsnr[4];
+ s8 rxevm[2];
+ s8 rxsnr[4];
u8 pdsnr[2];
u8 csi_current[2];
u8 csi_target[2];
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c
index 459f3d0ef..e6b5786c7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c
@@ -496,7 +496,7 @@ static void rtl92ee_dm_find_minimum_rssi(struct ieee80211_hw *hw)
rtl_dm_dig->min_undec_pwdb_for_dm =
rtlpriv->dm.entry_min_undec_sm_pwdb;
RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- "AP Ext Port or disconnet PWDB = 0x%x\n",
+ "AP Ext Port or disconnect PWDB = 0x%x\n",
rtl_dm_dig->min_undec_pwdb_for_dm);
}
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
@@ -983,7 +983,7 @@ static bool _rtl92ee_dm_ra_state_check(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- "wrong rssi level setting %d !", *ratr_state);
+ "wrong rssi level setting %d !\n", *ratr_state);
break;
}
@@ -1219,6 +1219,7 @@ void rtl92ee_dm_watchdog(struct ieee80211_hw *hw)
if (ppsc->p2p_ps_info.p2p_ps_mode)
fw_ps_awake = false;
+ spin_lock(&rtlpriv->locks.rf_ps_lock);
if ((ppsc->rfpwr_state == ERFON) &&
((!fw_current_inpsmode) && fw_ps_awake) &&
(!ppsc->rfchange_inprogress)) {
@@ -1233,4 +1234,5 @@ void rtl92ee_dm_watchdog(struct ieee80211_hw *hw)
rtl92ee_dm_dynamic_atc_switch(hw);
rtl92ee_dm_dynamic_primary_cca_ckeck(hw);
}
+ spin_unlock(&rtlpriv->locks.rf_ps_lock);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
index 9fd3f1b6e..b07af8d15 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
@@ -2098,73 +2098,24 @@ static void _rtl92ee_read_adapter_info(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- u16 i, usvalue;
- u8 hwinfo[HWSET_MAX_SIZE];
- u16 eeprom_id;
-
- if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
- rtl_efuse_shadow_map_update(hw);
-
- memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
- HWSET_MAX_SIZE);
- } else if (rtlefuse->epromtype == EEPROM_93C46) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "RTL819X Not boot from eeprom, check it !!");
- return;
- } else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "boot from neither eeprom nor efuse, check it !!");
+ int params[] = {RTL8192E_EEPROM_ID, EEPROM_VID, EEPROM_DID,
+ EEPROM_SVID, EEPROM_SMID, EEPROM_MAC_ADDR,
+ EEPROM_CHANNELPLAN, EEPROM_VERSION, EEPROM_CUSTOMER_ID,
+ COUNTRY_CODE_WORLD_WIDE_13};
+ u8 *hwinfo;
+
+ hwinfo = kzalloc(HWSET_MAX_SIZE, GFP_KERNEL);
+ if (!hwinfo)
return;
- }
- RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP\n",
- hwinfo, HWSET_MAX_SIZE);
+ if (rtl_get_hwinfo(hw, rtlpriv, HWSET_MAX_SIZE, hwinfo, params))
+ goto exit;
- eeprom_id = *((u16 *)&hwinfo[0]);
- if (eeprom_id != RTL8192E_EEPROM_ID) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "EEPROM ID(%#x) is invalid!!\n", eeprom_id);
- rtlefuse->autoload_failflag = true;
- } else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
- rtlefuse->autoload_failflag = false;
- }
-
- if (rtlefuse->autoload_failflag)
- return;
- /*VID DID SVID SDID*/
- rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID];
- rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID];
- rtlefuse->eeprom_svid = *(u16 *)&hwinfo[EEPROM_SVID];
- rtlefuse->eeprom_smid = *(u16 *)&hwinfo[EEPROM_SMID];
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROMId = 0x%4x\n", eeprom_id);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid);
- /*customer ID*/
- rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
if (rtlefuse->eeprom_oemid == 0xFF)
rtlefuse->eeprom_oemid = 0;
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
- /*EEPROM version*/
- rtlefuse->eeprom_version = *(u8 *)&hwinfo[EEPROM_VERSION];
- /*mac address*/
- for (i = 0; i < 6; i += 2) {
- usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR + i];
- *((u16 *)(&rtlefuse->dev_addr[i])) = usvalue;
- }
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "dev_addr: %pM\n", rtlefuse->dev_addr);
- /*channel plan */
- rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
/* set channel plan from efuse */
rtlefuse->channel_plan = rtlefuse->eeprom_channelplan;
/*tx power*/
@@ -2206,6 +2157,8 @@ static void _rtl92ee_read_adapter_info(struct ieee80211_hw *hw)
break;
}
}
+exit:
+ kfree(hwinfo);
}
static void _rtl92ee_hal_customized_behavior(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
index 018340aed..beafc9a10 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
@@ -547,7 +547,7 @@ static void _rtl92ee_phy_store_txpower_by_rate_base(struct ieee80211_hw *hw)
static void _phy_convert_txpower_dbm_to_relative_value(u32 *data, u8 start,
u8 end, u8 base)
{
- char i = 0;
+ s8 i = 0;
u8 tmp = 0;
u32 temp_data = 0;
@@ -650,7 +650,7 @@ static bool _rtl92ee_phy_bb8192ee_config_parafile(struct ieee80211_hw *hw)
rtstatus = phy_config_bb_with_hdr_file(hw, BASEBAND_CONFIG_PHY_REG);
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!");
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!\n");
return false;
}
@@ -662,7 +662,7 @@ static bool _rtl92ee_phy_bb8192ee_config_parafile(struct ieee80211_hw *hw)
}
_rtl92ee_phy_txpower_by_rate_configuration(hw);
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!");
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!\n");
return false;
}
rtstatus = phy_config_bb_with_hdr_file(hw, BASEBAND_CONFIG_AGC_TAB);
@@ -1189,7 +1189,7 @@ static u8 _rtl92ee_get_txpower_by_rate(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &rtlpriv->phy;
u8 shift = 0, sec, tx_num;
- char diff = 0;
+ s8 diff = 0;
sec = _rtl92ee_phy_get_ratesection_intxpower_byrate(rf, rate);
tx_num = RF_TX_NUM_NONIMPLEMENT;
@@ -1265,14 +1265,14 @@ static u8 _rtl92ee_get_txpower_index(struct ieee80211_hw *hw,
"Illegal channel!!\n");
}
- if (IS_CCK_RATE(rate))
+ if (IS_CCK_RATE((s8)rate))
tx_power = rtlefuse->txpwrlevel_cck[rfpath][index];
else if (DESC92C_RATE6M <= rate)
tx_power = rtlefuse->txpwrlevel_ht40_1s[rfpath][index];
/* OFDM-1T*/
if (DESC92C_RATE6M <= rate && rate <= DESC92C_RATE54M &&
- !IS_CCK_RATE(rate))
+ !IS_CCK_RATE((s8)rate))
tx_power += rtlefuse->txpwr_legacyhtdiff[rfpath][TX_1S];
/* BW20-1S, BW20-2S */
@@ -1819,7 +1819,7 @@ u8 rtl92ee_phy_sw_chnl(struct ieee80211_hw *hw)
if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
rtl92ee_phy_sw_chnl_callback(hw);
RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
- "sw_chnl_inprogress false schdule workitem current channel %d\n",
+ "sw_chnl_inprogress false schedule workitem current channel %d\n",
rtlphy->current_channel);
rtlphy->sw_chnl_inprogress = false;
} else {
@@ -2414,19 +2414,10 @@ static void _rtl92ee_phy_reload_mac_registers(struct ieee80211_hw *hw,
static void _rtl92ee_phy_path_adda_on(struct ieee80211_hw *hw, u32 *addareg,
bool is_patha_on, bool is2t)
{
- u32 pathon;
u32 i;
- pathon = is_patha_on ? 0x0fc01616 : 0x0fc01616;
- if (!is2t) {
- pathon = 0x0fc01616;
- rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0fc01616);
- } else {
- rtl_set_bbreg(hw, addareg[0], MASKDWORD, pathon);
- }
-
- for (i = 1; i < IQK_ADDA_REG_NUM; i++)
- rtl_set_bbreg(hw, addareg[i], MASKDWORD, pathon);
+ for (i = 0; i < IQK_ADDA_REG_NUM; i++)
+ rtl_set_bbreg(hw, addareg[i], MASKDWORD, 0x0fc01616);
}
static void _rtl92ee_phy_mac_setting_calibration(struct ieee80211_hw *hw,
@@ -2978,7 +2969,7 @@ void rtl92ee_phy_lc_calibrate(struct ieee80211_hw *hw)
rtlphy->lck_inprogress = false;
}
-void rtl92ee_phy_ap_calibrate(struct ieee80211_hw *hw, char delta)
+void rtl92ee_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta)
{
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h
index c6e97c8df..49bd0e554 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h
@@ -141,7 +141,7 @@ void rtl92ee_phy_set_bw_mode(struct ieee80211_hw *hw,
void rtl92ee_phy_sw_chnl_callback(struct ieee80211_hw *hw);
u8 rtl92ee_phy_sw_chnl(struct ieee80211_hw *hw);
void rtl92ee_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
-void rtl92ee_phy_ap_calibrate(struct ieee80211_hw *hw, char delta);
+void rtl92ee_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta);
void rtl92ee_phy_lc_calibrate(struct ieee80211_hw *hw);
void rtl92ee_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
bool rtl92ee_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c
index c9bc33cd1..73716c07d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c
@@ -142,7 +142,7 @@ static bool _rtl92ee_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
if (!rtstatus) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Radio[%d] Fail!!", rfpath);
+ "Radio[%d] Fail!!\n", rfpath);
return false;
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
index 35e6bf7e2..2d48ccd02 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
@@ -56,7 +56,7 @@ static void _rtl92ee_query_rxphystatus(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct phy_status_rpt *p_phystrpt = (struct phy_status_rpt *)p_drvinfo;
- char rx_pwr_all = 0, rx_pwr[4];
+ s8 rx_pwr_all = 0, rx_pwr[4];
u8 rf_rx_num = 0, evm, pwdb_all;
u8 i, max_spatial_stream;
u32 rssi, total_rssi = 0;
@@ -703,7 +703,7 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw,
PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error");
+ "DMA mapping error\n");
return;
}
@@ -867,7 +867,7 @@ void rtl92ee_tx_fill_cmddesc(struct ieee80211_hw *hw,
if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error");
+ "DMA mapping error\n");
return;
}
CLEAR_PCI_TX_DESC_CONTENT(pdesc, txdesc_len);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h
index a4c383452..8053d1b12 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h
@@ -650,8 +650,8 @@ struct rx_fwinfo {
u8 pwdb_all;
u8 cfosho[4];
u8 cfotail[4];
- char rxevm[2];
- char rxsnr[4];
+ s8 rxevm[2];
+ s8 rxsnr[4];
u8 pdsnr[2];
u8 csi_current[2];
u8 csi_target[2];
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
index 12b0978ba..ddfa0aee5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
@@ -1673,23 +1673,31 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct device *dev = &rtl_pcipriv(hw)->dev.pdev->dev;
u16 i, usvalue;
u16 eeprom_id;
u8 tempval;
u8 hwinfo[HWSET_MAX_SIZE_92S];
u8 rf_path, index;
- if (rtlefuse->epromtype == EEPROM_93C46) {
+ switch (rtlefuse->epromtype) {
+ case EEPROM_BOOT_EFUSE:
+ rtl_efuse_shadow_map_update(hw);
+ break;
+
+ case EEPROM_93C46:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"RTL819X Not boot from eeprom, check it !!\n");
- } else if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
- rtl_efuse_shadow_map_update(hw);
+ return;
- memcpy((void *)hwinfo, (void *)
- &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
- HWSET_MAX_SIZE_92S);
+ default:
+ dev_warn(dev, "no efuse data\n");
+ return;
}
+ memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
+ HWSET_MAX_SIZE_92S);
+
RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP",
hwinfo, HWSET_MAX_SIZE_92S);
@@ -1995,7 +2003,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
rtlefuse->b1ss_support = rtlefuse->b1x1_recvcombine;
rtlefuse->eeprom_oemid = *&hwinfo[EEPROM_CUSTOMID];
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROM Customer ID: 0x%2x",
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROM Customer ID: 0x%2x\n",
rtlefuse->eeprom_oemid);
/* set channel paln to world wide 13 */
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c
index 9475aa2a8..34e88a3f6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c
@@ -137,7 +137,7 @@ static void _rtl92s_set_antennadiff(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
struct rtl_phy *rtlphy = &(rtlpriv->phy);
- char ant_pwr_diff = 0;
+ s8 ant_pwr_diff = 0;
u32 u4reg_val = 0;
if (rtlphy->rf_type == RF_2T2R) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
index 125b29bd2..d53bbf6be 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
@@ -360,7 +360,7 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error");
+ "DMA mapping error\n");
return;
}
if (mac->opmode == NL80211_IFTYPE_STATION) {
@@ -529,7 +529,7 @@ void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error");
+ "DMA mapping error\n");
return;
}
/* Clear all status */
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
index 4c1c96c96..42a6fba90 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
@@ -816,6 +816,7 @@ void rtl8723e_dm_watchdog(struct ieee80211_hw *hw)
if (ppsc->p2p_ps_info.p2p_ps_mode)
fw_ps_awake = false;
+ spin_lock(&rtlpriv->locks.rf_ps_lock);
if ((ppsc->rfpwr_state == ERFON) &&
((!fw_current_inpsmode) && fw_ps_awake) &&
(!ppsc->rfchange_inprogress)) {
@@ -829,6 +830,7 @@ void rtl8723e_dm_watchdog(struct ieee80211_hw *hw)
rtl8723e_dm_bt_coexist(hw);
rtl8723e_dm_check_edca_turbo(hw);
}
+ spin_unlock(&rtlpriv->locks.rf_ps_lock);
if (rtlpriv->btcoexist.init_set)
rtl_write_byte(rtlpriv, 0x76e, 0xc);
}
@@ -874,8 +876,8 @@ void rtl8723e_dm_bt_coexist(struct ieee80211_hw *hw)
tmp_byte = rtl_read_byte(rtlpriv, 0x40);
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[DM][BT], 0x40 is 0x%x", tmp_byte);
+ "[DM][BT], 0x40 is 0x%x\n", tmp_byte);
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[DM][BT], bt_dm_coexist start");
+ "[DM][BT], bt_dm_coexist start\n");
rtl8723e_dm_bt_coexist_8723(hw);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c
index 44de695dc..ec9bcf32f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c
@@ -185,7 +185,7 @@ static void rtl8723e_dm_bt_set_hw_pta_mode(struct ieee80211_hw *hw, bool b_mode)
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (BT_PTA_MODE_ON == b_mode) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "PTA mode on, ");
+ RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "PTA mode on\n");
/* Enable GPIO 0/1/2/3/8 pins for bt */
rtl_write_byte(rtlpriv, 0x40, 0x20);
rtlpriv->btcoexist.hw_coexist_all_off = false;
@@ -1401,7 +1401,7 @@ static void rtl8723e_dm_bt_inq_page_monitor(struct ieee80211_hw *hw)
(long)hal_coex_8723.bt_inq_page_start_time) / HZ)
>= 10) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT Inquiry/page >= 10sec!!!");
+ "[BTCoex], BT Inquiry/page >= 10sec!!!\n");
hal_coex_8723.bt_inq_page_start_time = 0;
rtlpriv->btcoexist.cstate &=
~BT_COEX_STATE_BT_INQ_PAGE;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
index a4b7eac68..b88c7ee72 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
@@ -1630,62 +1630,22 @@ static void _rtl8723e_read_adapter_info(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- u16 i, usvalue;
- u8 hwinfo[HWSET_MAX_SIZE];
- u16 eeprom_id;
+ int params[] = {RTL8190_EEPROM_ID, EEPROM_VID, EEPROM_DID,
+ EEPROM_SVID, EEPROM_SMID, EEPROM_MAC_ADDR,
+ EEPROM_CHANNELPLAN, EEPROM_VERSION, EEPROM_CUSTOMER_ID,
+ COUNTRY_CODE_WORLD_WIDE_13};
+ u8 *hwinfo;
if (b_pseudo_test) {
/* need add */
return;
}
- if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
- rtl_efuse_shadow_map_update(hw);
-
- memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
- HWSET_MAX_SIZE);
- } else if (rtlefuse->epromtype == EEPROM_93C46) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "RTL819X Not boot from eeprom, check it !!");
- }
-
- RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP\n",
- hwinfo, HWSET_MAX_SIZE);
-
- eeprom_id = *((u16 *)&hwinfo[0]);
- if (eeprom_id != RTL8190_EEPROM_ID) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "EEPROM ID(%#x) is invalid!!\n", eeprom_id);
- rtlefuse->autoload_failflag = true;
- } else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
- rtlefuse->autoload_failflag = false;
- }
-
- if (rtlefuse->autoload_failflag)
+ hwinfo = kzalloc(HWSET_MAX_SIZE, GFP_KERNEL);
+ if (!hwinfo)
return;
- rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID];
- rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID];
- rtlefuse->eeprom_svid = *(u16 *)&hwinfo[EEPROM_SVID];
- rtlefuse->eeprom_smid = *(u16 *)&hwinfo[EEPROM_SMID];
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROMId = 0x%4x\n", eeprom_id);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid);
-
- for (i = 0; i < 6; i += 2) {
- usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR + i];
- *((u16 *)(&rtlefuse->dev_addr[i])) = usvalue;
- }
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "dev_addr: %pM\n", rtlefuse->dev_addr);
+ if (rtl_get_hwinfo(hw, rtlpriv, HWSET_MAX_SIZE, hwinfo, params))
+ goto exit;
_rtl8723e_read_txpower_info_from_hwpg(hw, rtlefuse->autoload_failflag,
hwinfo);
@@ -1693,144 +1653,138 @@ static void _rtl8723e_read_adapter_info(struct ieee80211_hw *hw,
rtl8723e_read_bt_coexist_info_from_hwpg(hw,
rtlefuse->autoload_failflag, hwinfo);
- rtlefuse->eeprom_channelplan = hwinfo[EEPROM_CHANNELPLAN];
- rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
- rtlefuse->txpwr_fromeprom = true;
- rtlefuse->eeprom_oemid = hwinfo[EEPROM_CUSTOMER_ID];
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
-
- /* set channel paln to world wide 13 */
- rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13;
-
- if (rtlhal->oem_id == RT_CID_DEFAULT) {
- switch (rtlefuse->eeprom_oemid) {
- case EEPROM_CID_DEFAULT:
- if (rtlefuse->eeprom_did == 0x8176) {
- if (CHK_SVID_SMID(0x10EC, 0x6151) ||
- CHK_SVID_SMID(0x10EC, 0x6152) ||
- CHK_SVID_SMID(0x10EC, 0x6154) ||
- CHK_SVID_SMID(0x10EC, 0x6155) ||
- CHK_SVID_SMID(0x10EC, 0x6177) ||
- CHK_SVID_SMID(0x10EC, 0x6178) ||
- CHK_SVID_SMID(0x10EC, 0x6179) ||
- CHK_SVID_SMID(0x10EC, 0x6180) ||
- CHK_SVID_SMID(0x10EC, 0x7151) ||
- CHK_SVID_SMID(0x10EC, 0x7152) ||
- CHK_SVID_SMID(0x10EC, 0x7154) ||
- CHK_SVID_SMID(0x10EC, 0x7155) ||
- CHK_SVID_SMID(0x10EC, 0x7177) ||
- CHK_SVID_SMID(0x10EC, 0x7178) ||
- CHK_SVID_SMID(0x10EC, 0x7179) ||
- CHK_SVID_SMID(0x10EC, 0x7180) ||
- CHK_SVID_SMID(0x10EC, 0x8151) ||
- CHK_SVID_SMID(0x10EC, 0x8152) ||
- CHK_SVID_SMID(0x10EC, 0x8154) ||
- CHK_SVID_SMID(0x10EC, 0x8155) ||
- CHK_SVID_SMID(0x10EC, 0x8181) ||
- CHK_SVID_SMID(0x10EC, 0x8182) ||
- CHK_SVID_SMID(0x10EC, 0x8184) ||
- CHK_SVID_SMID(0x10EC, 0x8185) ||
- CHK_SVID_SMID(0x10EC, 0x9151) ||
- CHK_SVID_SMID(0x10EC, 0x9152) ||
- CHK_SVID_SMID(0x10EC, 0x9154) ||
- CHK_SVID_SMID(0x10EC, 0x9155) ||
- CHK_SVID_SMID(0x10EC, 0x9181) ||
- CHK_SVID_SMID(0x10EC, 0x9182) ||
- CHK_SVID_SMID(0x10EC, 0x9184) ||
- CHK_SVID_SMID(0x10EC, 0x9185))
+ if (rtlhal->oem_id != RT_CID_DEFAULT)
+ return;
+
+ switch (rtlefuse->eeprom_oemid) {
+ case EEPROM_CID_DEFAULT:
+ switch (rtlefuse->eeprom_did) {
+ case 0x8176:
+ switch (rtlefuse->eeprom_svid) {
+ case 0x10EC:
+ switch (rtlefuse->eeprom_smid) {
+ case 0x6151 ... 0x6152:
+ case 0x6154 ... 0x6155:
+ case 0x6177 ... 0x6180:
+ case 0x7151 ... 0x7152:
+ case 0x7154 ... 0x7155:
+ case 0x7177 ... 0x7180:
+ case 0x8151 ... 0x8152:
+ case 0x8154 ... 0x8155:
+ case 0x8181 ... 0x8182:
+ case 0x8184 ... 0x8185:
+ case 0x9151 ... 0x9152:
+ case 0x9154 ... 0x9155:
+ case 0x9181 ... 0x9182:
+ case 0x9184 ... 0x9185:
rtlhal->oem_id = RT_CID_TOSHIBA;
- else if (rtlefuse->eeprom_svid == 0x1025)
- rtlhal->oem_id = RT_CID_819X_ACER;
- else if (CHK_SVID_SMID(0x10EC, 0x6191) ||
- CHK_SVID_SMID(0x10EC, 0x6192) ||
- CHK_SVID_SMID(0x10EC, 0x6193) ||
- CHK_SVID_SMID(0x10EC, 0x7191) ||
- CHK_SVID_SMID(0x10EC, 0x7192) ||
- CHK_SVID_SMID(0x10EC, 0x7193) ||
- CHK_SVID_SMID(0x10EC, 0x8191) ||
- CHK_SVID_SMID(0x10EC, 0x8192) ||
- CHK_SVID_SMID(0x10EC, 0x8193) ||
- CHK_SVID_SMID(0x10EC, 0x9191) ||
- CHK_SVID_SMID(0x10EC, 0x9192) ||
- CHK_SVID_SMID(0x10EC, 0x9193))
+ break;
+ case 0x6191 ... 0x6193:
+ case 0x7191 ... 0x7193:
+ case 0x8191 ... 0x8193:
+ case 0x9191 ... 0x9193:
rtlhal->oem_id = RT_CID_819X_SAMSUNG;
- else if (CHK_SVID_SMID(0x10EC, 0x8195) ||
- CHK_SVID_SMID(0x10EC, 0x9195) ||
- CHK_SVID_SMID(0x10EC, 0x7194) ||
- CHK_SVID_SMID(0x10EC, 0x8200) ||
- CHK_SVID_SMID(0x10EC, 0x8201) ||
- CHK_SVID_SMID(0x10EC, 0x8202) ||
- CHK_SVID_SMID(0x10EC, 0x9200))
- rtlhal->oem_id = RT_CID_819X_LENOVO;
- else if (CHK_SVID_SMID(0x10EC, 0x8197) ||
- CHK_SVID_SMID(0x10EC, 0x9196))
+ break;
+ case 0x8197:
+ case 0x9196:
rtlhal->oem_id = RT_CID_819X_CLEVO;
- else if (CHK_SVID_SMID(0x1028, 0x8194) ||
- CHK_SVID_SMID(0x1028, 0x8198) ||
- CHK_SVID_SMID(0x1028, 0x9197) ||
- CHK_SVID_SMID(0x1028, 0x9198))
+ break;
+ case 0x8203:
+ rtlhal->oem_id = RT_CID_819X_PRONETS;
+ break;
+ case 0x8195:
+ case 0x9195:
+ case 0x7194:
+ case 0x8200 ... 0x8202:
+ case 0x9200:
+ rtlhal->oem_id = RT_CID_819X_LENOVO;
+ break;
+ }
+ case 0x1025:
+ rtlhal->oem_id = RT_CID_819X_ACER;
+ break;
+ case 0x1028:
+ switch (rtlefuse->eeprom_smid) {
+ case 0x8194:
+ case 0x8198:
+ case 0x9197 ... 0x9198:
rtlhal->oem_id = RT_CID_819X_DELL;
- else if (CHK_SVID_SMID(0x103C, 0x1629))
+ break;
+ }
+ break;
+ case 0x103C:
+ switch (rtlefuse->eeprom_smid) {
+ case 0x1629:
rtlhal->oem_id = RT_CID_819X_HP;
- else if (CHK_SVID_SMID(0x1A32, 0x2315))
+ }
+ break;
+ case 0x1A32:
+ switch (rtlefuse->eeprom_smid) {
+ case 0x2315:
rtlhal->oem_id = RT_CID_819X_QMI;
- else if (CHK_SVID_SMID(0x10EC, 0x8203))
- rtlhal->oem_id = RT_CID_819X_PRONETS;
- else if (CHK_SVID_SMID(0x1043, 0x84B5))
- rtlhal->oem_id =
- RT_CID_819X_EDIMAX_ASUS;
- else
- rtlhal->oem_id = RT_CID_DEFAULT;
- } else if (rtlefuse->eeprom_did == 0x8178) {
- if (CHK_SVID_SMID(0x10EC, 0x6181) ||
- CHK_SVID_SMID(0x10EC, 0x6182) ||
- CHK_SVID_SMID(0x10EC, 0x6184) ||
- CHK_SVID_SMID(0x10EC, 0x6185) ||
- CHK_SVID_SMID(0x10EC, 0x7181) ||
- CHK_SVID_SMID(0x10EC, 0x7182) ||
- CHK_SVID_SMID(0x10EC, 0x7184) ||
- CHK_SVID_SMID(0x10EC, 0x7185) ||
- CHK_SVID_SMID(0x10EC, 0x8181) ||
- CHK_SVID_SMID(0x10EC, 0x8182) ||
- CHK_SVID_SMID(0x10EC, 0x8184) ||
- CHK_SVID_SMID(0x10EC, 0x8185) ||
- CHK_SVID_SMID(0x10EC, 0x9181) ||
- CHK_SVID_SMID(0x10EC, 0x9182) ||
- CHK_SVID_SMID(0x10EC, 0x9184) ||
- CHK_SVID_SMID(0x10EC, 0x9185))
- rtlhal->oem_id = RT_CID_TOSHIBA;
- else if (rtlefuse->eeprom_svid == 0x1025)
- rtlhal->oem_id = RT_CID_819X_ACER;
- else if (CHK_SVID_SMID(0x10EC, 0x8186))
- rtlhal->oem_id = RT_CID_819X_PRONETS;
- else if (CHK_SVID_SMID(0x1043, 0x8486))
+ break;
+ }
+ break;
+ case 0x1043:
+ switch (rtlefuse->eeprom_smid) {
+ case 0x84B5:
rtlhal->oem_id =
- RT_CID_819X_EDIMAX_ASUS;
- else
- rtlhal->oem_id = RT_CID_DEFAULT;
- } else {
- rtlhal->oem_id = RT_CID_DEFAULT;
+ RT_CID_819X_EDIMAX_ASUS;
+ }
+ break;
}
break;
- case EEPROM_CID_TOSHIBA:
- rtlhal->oem_id = RT_CID_TOSHIBA;
- break;
- case EEPROM_CID_CCX:
- rtlhal->oem_id = RT_CID_CCX;
- break;
- case EEPROM_CID_QMI:
- rtlhal->oem_id = RT_CID_819X_QMI;
- break;
- case EEPROM_CID_WHQL:
+ case 0x8178:
+ switch (rtlefuse->eeprom_svid) {
+ case 0x10ec:
+ switch (rtlefuse->eeprom_smid) {
+ case 0x6181 ... 0x6182:
+ case 0x6184 ... 0x6185:
+ case 0x7181 ... 0x7182:
+ case 0x7184 ... 0x7185:
+ case 0x8181 ... 0x8182:
+ case 0x8184 ... 0x8185:
+ case 0x9181 ... 0x9182:
+ case 0x9184 ... 0x9185:
+ rtlhal->oem_id = RT_CID_TOSHIBA;
+ break;
+ case 0x8186:
+ rtlhal->oem_id =
+ RT_CID_819X_PRONETS;
+ break;
+ }
break;
- default:
- rtlhal->oem_id = RT_CID_DEFAULT;
+ case 0x1025:
+ rtlhal->oem_id = RT_CID_819X_ACER;
+ break;
+ case 0x1043:
+ switch (rtlefuse->eeprom_smid) {
+ case 0x8486:
+ rtlhal->oem_id =
+ RT_CID_819X_EDIMAX_ASUS;
+ }
+ break;
+ }
break;
-
}
+ break;
+ case EEPROM_CID_TOSHIBA:
+ rtlhal->oem_id = RT_CID_TOSHIBA;
+ break;
+ case EEPROM_CID_CCX:
+ rtlhal->oem_id = RT_CID_CCX;
+ break;
+ case EEPROM_CID_QMI:
+ rtlhal->oem_id = RT_CID_819X_QMI;
+ break;
+ case EEPROM_CID_WHQL:
+ break;
+ default:
+ rtlhal->oem_id = RT_CID_DEFAULT;
+ break;
}
+exit:
+ kfree(hwinfo);
}
static void _rtl8723e_hal_customized_behavior(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
index d367097f4..601b78efe 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
@@ -213,7 +213,7 @@ static bool _rtl8723e_phy_bb8192c_config_parafile(struct ieee80211_hw *hw)
rtstatus = _rtl8723e_phy_config_bb_with_headerfile(hw,
BASEBAND_CONFIG_PHY_REG);
if (rtstatus != true) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!");
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!\n");
return false;
}
@@ -227,7 +227,7 @@ static bool _rtl8723e_phy_bb8192c_config_parafile(struct ieee80211_hw *hw)
BASEBAND_CONFIG_PHY_REG);
}
if (rtstatus != true) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!");
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!\n");
return false;
}
rtstatus =
@@ -893,7 +893,7 @@ u8 rtl8723e_phy_sw_chnl(struct ieee80211_hw *hw)
if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
rtl8723e_phy_sw_chnl_callback(hw);
RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
- "sw_chnl_inprogress false schdule workitem\n");
+ "sw_chnl_inprogress false schedule workitem\n");
rtlphy->sw_chnl_inprogress = false;
} else {
RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c
index 9ebc8281f..422771778 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c
@@ -504,7 +504,7 @@ static bool _rtl8723e_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
if (rtstatus != true) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Radio[%d] Fail!!", rfpath);
+ "Radio[%d] Fail!!\n", rfpath);
return false;
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
index 7b4a9b635..e93125ebe 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
@@ -389,7 +389,7 @@ void rtl8723e_tx_fill_desc(struct ieee80211_hw *hw,
if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error");
+ "DMA mapping error\n");
return;
}
if (mac->opmode == NL80211_IFTYPE_STATION) {
@@ -557,7 +557,7 @@ void rtl8723e_tx_fill_cmddesc(struct ieee80211_hw *hw,
if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error");
+ "DMA mapping error\n");
return;
}
CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h
index 32970bf18..43d4c791d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h
@@ -522,8 +522,8 @@ struct rx_fwinfo_8723e {
u8 pwdb_all;
u8 cfosho[4];
u8 cfotail[4];
- char rxevm[2];
- char rxsnr[4];
+ s8 rxevm[2];
+ s8 rxsnr[4];
u8 pdsnr[2];
u8 csi_current[2];
u8 csi_target[2];
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c
index 3a81cdba8..131c0d1d6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c
@@ -758,11 +758,11 @@ static void rtl8723be_dm_txpower_tracking_callback_thermalmeter(
u8 ofdm_min_index = 6;
u8 index_for_channel = 0;
- char delta_swing_table_idx_tup_a[TXSCALE_TABLE_SIZE] = {
+ s8 delta_swing_table_idx_tup_a[TXSCALE_TABLE_SIZE] = {
0, 0, 1, 2, 2, 2, 3, 3, 3, 4, 5,
5, 6, 6, 7, 7, 8, 8, 9, 9, 9, 10,
10, 11, 11, 12, 12, 13, 14, 15};
- char delta_swing_table_idx_tdown_a[TXSCALE_TABLE_SIZE] = {
+ s8 delta_swing_table_idx_tdown_a[TXSCALE_TABLE_SIZE] = {
0, 0, 1, 2, 2, 2, 3, 3, 3, 4, 5,
5, 6, 6, 6, 6, 7, 7, 7, 8, 8, 9,
9, 10, 10, 11, 12, 13, 14, 15};
@@ -1279,6 +1279,7 @@ void rtl8723be_dm_watchdog(struct ieee80211_hw *hw)
if (ppsc->p2p_ps_info.p2p_ps_mode)
fw_ps_awake = false;
+ spin_lock(&rtlpriv->locks.rf_ps_lock);
if ((ppsc->rfpwr_state == ERFON) &&
((!fw_current_inpsmode) && fw_ps_awake) &&
(!ppsc->rfchange_inprogress)) {
@@ -1294,5 +1295,6 @@ void rtl8723be_dm_watchdog(struct ieee80211_hw *hw)
rtl8723be_dm_check_txpower_tracking(hw);
rtl8723be_dm_dynamic_txpower(hw);
}
+ spin_unlock(&rtlpriv->locks.rf_ps_lock);
rtlpriv->dm.dbginfo.num_qry_beacon_pkt = 0;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
index 5a3df9198..82e4476ca 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
@@ -1474,7 +1474,7 @@ static enum version_8723e _rtl8723be_read_chip_version(struct ieee80211_hw *hw)
value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG1);
if ((value32 & (CHIP_8723B)) != CHIP_8723B)
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "unkown chip version\n");
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "unknown chip version\n");
else
version = (enum version_8723e)CHIP_8723B;
@@ -2026,9 +2026,12 @@ static void _rtl8723be_read_adapter_info(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- u16 i, usvalue;
- u8 hwinfo[HWSET_MAX_SIZE];
- u16 eeprom_id;
+ int params[] = {RTL8723BE_EEPROM_ID, EEPROM_VID, EEPROM_DID,
+ EEPROM_SVID, EEPROM_SMID, EEPROM_MAC_ADDR,
+ EEPROM_CHANNELPLAN, EEPROM_VERSION, EEPROM_CUSTOMER_ID,
+ COUNTRY_CODE_WORLD_WIDE_13};
+ u8 *hwinfo;
+ int i;
bool is_toshiba_smid1 = false;
bool is_toshiba_smid2 = false;
bool is_samsung_smid = false;
@@ -2055,52 +2058,13 @@ static void _rtl8723be_read_adapter_info(struct ieee80211_hw *hw,
/* needs to be added */
return;
}
- if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
- rtl_efuse_shadow_map_update(hw);
- memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
- HWSET_MAX_SIZE);
- } else if (rtlefuse->epromtype == EEPROM_93C46) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "RTL819X Not boot from eeprom, check it !!");
- }
- RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, ("MAP\n"),
- hwinfo, HWSET_MAX_SIZE);
-
- eeprom_id = *((u16 *)&hwinfo[0]);
- if (eeprom_id != RTL8723BE_EEPROM_ID) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "EEPROM ID(%#x) is invalid!!\n", eeprom_id);
- rtlefuse->autoload_failflag = true;
- } else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
- rtlefuse->autoload_failflag = false;
- }
-
- if (rtlefuse->autoload_failflag)
+ hwinfo = kzalloc(HWSET_MAX_SIZE, GFP_KERNEL);
+ if (!hwinfo)
return;
- rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID];
- rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID];
- rtlefuse->eeprom_svid = *(u16 *)&hwinfo[EEPROM_SVID];
- rtlefuse->eeprom_smid = *(u16 *)&hwinfo[EEPROM_SMID];
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROMId = 0x%4x\n", eeprom_id);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid);
-
- for (i = 0; i < 6; i += 2) {
- usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR + i];
- *((u16 *)(&rtlefuse->dev_addr[i])) = usvalue;
- }
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "dev_addr: %pM\n",
- rtlefuse->dev_addr);
+ if (rtl_get_hwinfo(hw, rtlpriv, HWSET_MAX_SIZE, hwinfo, params))
+ goto exit;
/*parse xtal*/
rtlefuse->crystalcap = hwinfo[EEPROM_XTAL_8723BE];
@@ -2114,14 +2078,6 @@ static void _rtl8723be_read_adapter_info(struct ieee80211_hw *hw,
rtlefuse->autoload_failflag,
hwinfo);
- rtlefuse->eeprom_channelplan = hwinfo[EEPROM_CHANNELPLAN];
- rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
- rtlefuse->txpwr_fromeprom = true;
- rtlefuse->eeprom_oemid = hwinfo[EEPROM_CUSTOMER_ID];
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
-
/* set channel plan from efuse */
rtlefuse->channel_plan = rtlefuse->eeprom_channelplan;
@@ -2232,6 +2188,8 @@ static void _rtl8723be_read_adapter_info(struct ieee80211_hw *hw,
break;
}
}
+exit:
+ kfree(hwinfo);
}
static void _rtl8723be_hal_customized_behavior(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
index 445f681d0..285818df1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
@@ -379,7 +379,7 @@ static void _rtl8723be_phy_store_txpower_by_rate_base(struct ieee80211_hw *hw)
static void _phy_convert_txpower_dbm_to_relative_value(u32 *data, u8 start,
u8 end, u8 base_val)
{
- char i = 0;
+ s8 i = 0;
u8 temp_value = 0;
u32 temp_data = 0;
@@ -467,7 +467,7 @@ static bool _rtl8723be_phy_bb8723b_config_parafile(struct ieee80211_hw *hw)
rtstatus = _rtl8723be_phy_config_bb_with_headerfile(hw,
BASEBAND_CONFIG_PHY_REG);
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!");
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!\n");
return false;
}
_rtl8723be_phy_init_tx_power_by_rate(hw);
@@ -478,7 +478,7 @@ static bool _rtl8723be_phy_bb8723b_config_parafile(struct ieee80211_hw *hw)
}
phy_txpower_by_rate_config(hw);
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!");
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!\n");
return false;
}
rtstatus = _rtl8723be_phy_config_bb_with_headerfile(hw,
@@ -953,7 +953,7 @@ static u8 _rtl8723be_get_txpower_by_rate(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &rtlpriv->phy;
u8 shift = 0, rate_section, tx_num;
- char tx_pwr_diff = 0;
+ s8 tx_pwr_diff = 0;
rate_section = _rtl8723be_phy_get_ratesection_intxpower_byrate(rfpath,
rate);
@@ -1019,7 +1019,7 @@ static u8 _rtl8723be_get_txpower_index(struct ieee80211_hw *hw, u8 path,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
u8 index = (channel - 1);
- u8 txpower;
+ u8 txpower = 0;
u8 power_diff_byrate = 0;
if (channel > 14 || channel < 1) {
@@ -1395,7 +1395,7 @@ u8 rtl8723be_phy_sw_chnl(struct ieee80211_hw *hw)
if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
rtl8723be_phy_sw_chnl_callback(hw);
RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
- "sw_chnl_inprogress false schdule workitem current channel %d\n",
+ "sw_chnl_inprogress false schedule workitem current channel %d\n",
rtlphy->current_channel);
rtlphy->sw_chnl_inprogress = false;
} else {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c
index 97f5a0377..78f4f18d8 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c
@@ -502,7 +502,7 @@ static bool _rtl8723be_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
if (!rtstatus) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Radio[%d] Fail!!", rfpath);
+ "Radio[%d] Fail!!\n", rfpath);
return false;
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
index 60345975f..2175aecbb 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
@@ -56,7 +56,7 @@ static void _rtl8723be_query_rxphystatus(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct phy_status_rpt *p_phystrpt = (struct phy_status_rpt *)p_drvinfo;
- char rx_pwr_all = 0, rx_pwr[4];
+ s8 rx_pwr_all = 0, rx_pwr[4];
u8 rf_rx_num = 0, evm, pwdb_all, pwdb_all_bt = 0;
u8 i, max_spatial_stream;
u32 rssi, total_rssi = 0;
@@ -464,7 +464,7 @@ void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw,
mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "DMA mapping error");
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "DMA mapping error\n");
return;
}
CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_8723be));
@@ -616,7 +616,7 @@ void rtl8723be_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error");
+ "DMA mapping error\n");
return;
}
CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h
index 40c36607b..8a9fe41ac 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h
@@ -385,9 +385,9 @@ struct phy_status_rpt {
u8 cck_rpt_b_ofdm_cfosho_b;
u8 rsvd_1;/* ch_corr_msb; */
u8 noise_power_db_msb;
- char path_cfotail[2];
+ s8 path_cfotail[2];
u8 pcts_mask[2];
- char stream_rxevm[2];
+ s8 stream_rxevm[2];
u8 path_rxsnr[2];
u8 noise_power_db_lsb;
u8 rsvd_2[3];
@@ -422,8 +422,8 @@ struct rx_fwinfo_8723be {
u8 pwdb_all;
u8 cfosho[4];
u8 cfotail[4];
- char rxevm[2];
- char rxsnr[2];
+ s8 rxevm[2];
+ s8 rxsnr[2];
u8 pcts_msk_rpt[2];
u8 pdsnr[2];
u8 csi_current[2];
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
index 17a681788..bdfd44495 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
@@ -843,7 +843,7 @@ static void rtl8821ae_dm_dig(struct ieee80211_hw *hw)
dm_digtable->rssi_val_min + offset;
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "dm_digtable->rssi_val_min=0x%x,dm_digtable->rx_gain_max = 0x%x",
+ "dm_digtable->rssi_val_min=0x%x,dm_digtable->rx_gain_max = 0x%x\n",
dm_digtable->rssi_val_min,
dm_digtable->rx_gain_max);
if (rtlpriv->dm.one_entry_only) {
@@ -1355,7 +1355,7 @@ void rtl8812ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
u32 final_swing_idx[2];
u8 pwr_tracking_limit = 26; /*+1.0dB*/
u8 tx_rate = 0xFF;
- char final_ofdm_swing_index = 0;
+ s8 final_ofdm_swing_index = 0;
if (rtldm->tx_rate != 0xFF)
tx_rate =
@@ -2045,7 +2045,7 @@ void rtl8821ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
u32 final_swing_idx[1];
u8 pwr_tracking_limit = 26; /*+1.0dB*/
u8 tx_rate = 0xFF;
- char final_ofdm_swing_index = 0;
+ s8 final_ofdm_swing_index = 0;
if (rtldm->tx_rate != 0xFF)
tx_rate = rtl8821ae_hw_rate_to_mrate(hw, rtldm->tx_rate);
@@ -2682,9 +2682,9 @@ static void rtl8821ae_dm_check_edca_turbo(struct ieee80211_hw *hw)
bool b_edca_turbo_on = false;
RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD,
- "rtl8821ae_dm_check_edca_turbo=====>");
+ "rtl8821ae_dm_check_edca_turbo=====>\n");
RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD,
- "Orginial BE PARAM: 0x%x\n",
+ "Original BE PARAM: 0x%x\n",
rtl_read_dword(rtlpriv, DM_REG_EDCA_BE_11N));
if (rtlpriv->dm.dbginfo.num_non_be_pkt > 0x100)
@@ -2949,6 +2949,7 @@ void rtl8821ae_dm_watchdog(struct ieee80211_hw *hw)
if (ppsc->p2p_ps_info.p2p_ps_mode)
fw_ps_awake = false;
+ spin_lock(&rtlpriv->locks.rf_ps_lock);
if ((ppsc->rfpwr_state == ERFON) &&
((!fw_current_inpsmode) && fw_ps_awake) &&
(!ppsc->rfchange_inprogress)) {
@@ -2967,6 +2968,7 @@ void rtl8821ae_dm_watchdog(struct ieee80211_hw *hw)
rtl8821ae_dm_check_txpower_tracking_thermalmeter(hw);
rtl8821ae_dm_iq_calibrate(hw);
}
+ spin_unlock(&rtlpriv->locks.rf_ps_lock);
rtlpriv->dm.dbginfo.num_qry_beacon_pkt = 0;
RT_TRACE(rtlpriv, COMP_DIG, DBG_DMESG, "\n");
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index 71e4dd996..0cddf1ad0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -3101,79 +3101,22 @@ static void _rtl8821ae_read_adapter_info(struct ieee80211_hw *hw, bool b_pseudo_
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
- u16 i, usvalue;
- u8 hwinfo[HWSET_MAX_SIZE];
- u16 eeprom_id;
+ int params[] = {RTL_EEPROM_ID, EEPROM_VID, EEPROM_DID,
+ EEPROM_SVID, EEPROM_SMID, EEPROM_MAC_ADDR,
+ EEPROM_CHANNELPLAN, EEPROM_VERSION, EEPROM_CUSTOMER_ID,
+ COUNTRY_CODE_WORLD_WIDE_13};
+ u8 *hwinfo;
if (b_pseudo_test) {
;/* need add */
}
- if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
- rtl_efuse_shadow_map_update(hw);
- memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
- HWSET_MAX_SIZE);
- } else if (rtlefuse->epromtype == EEPROM_93C46) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "RTL819X Not boot from eeprom, check it !!");
- }
-
- RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP\n",
- hwinfo, HWSET_MAX_SIZE);
-
- eeprom_id = *((u16 *)&hwinfo[0]);
- if (eeprom_id != RTL_EEPROM_ID) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "EEPROM ID(%#x) is invalid!!\n", eeprom_id);
- rtlefuse->autoload_failflag = true;
- } else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
- rtlefuse->autoload_failflag = false;
- }
-
- if (rtlefuse->autoload_failflag) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
- "RTL8812AE autoload_failflag, check it !!");
+ hwinfo = kzalloc(HWSET_MAX_SIZE, GFP_KERNEL);
+ if (!hwinfo)
return;
- }
-
- rtlefuse->eeprom_version = *(u8 *)&hwinfo[EEPROM_VERSION];
- if (rtlefuse->eeprom_version == 0xff)
- rtlefuse->eeprom_version = 0;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM version: 0x%2x\n", rtlefuse->eeprom_version);
-
- rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID];
- rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID];
- rtlefuse->eeprom_svid = *(u16 *)&hwinfo[EEPROM_SVID];
- rtlefuse->eeprom_smid = *(u16 *)&hwinfo[EEPROM_SMID];
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROMId = 0x%4x\n", eeprom_id);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid);
-
- /*customer ID*/
- rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
- if (rtlefuse->eeprom_oemid == 0xFF)
- rtlefuse->eeprom_oemid = 0;
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
-
- for (i = 0; i < 6; i += 2) {
- usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR + i];
- *((u16 *)(&rtlefuse->dev_addr[i])) = usvalue;
- }
-
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "dev_addr: %pM\n", rtlefuse->dev_addr);
+ if (rtl_get_hwinfo(hw, rtlpriv, HWSET_MAX_SIZE, hwinfo, params))
+ goto exit;
_rtl8821ae_read_txpower_info_from_hwpg(hw, rtlefuse->autoload_failflag,
hwinfo);
@@ -3273,6 +3216,8 @@ static void _rtl8821ae_read_adapter_info(struct ieee80211_hw *hw, bool b_pseudo_
break;
}
}
+exit:
+ kfree(hwinfo);
}
/*static void _rtl8821ae_hal_customized_behavior(struct ieee80211_hw *hw)
@@ -3829,7 +3774,7 @@ void rtl8821ae_update_hal_rate_tbl(struct ieee80211_hw *hw,
rtl8821ae_update_hal_rate_mask(hw, sta, rssi_level);
else
/*RT_TRACE(rtlpriv, COMP_RATR,DBG_LOUD,
- "rtl8821ae_update_hal_rate_tbl() Error! 8821ae FW RA Only");*/
+ "rtl8821ae_update_hal_rate_tbl() Error! 8821ae FW RA Only\n");*/
rtl8821ae_update_hal_rate_table(hw, sta);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
index 0c3b9ce86..a71bfe38e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
@@ -366,12 +366,12 @@ u32 phy_get_tx_swing_8812A(struct ieee80211_hw *hw, u8 band,
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_dm *rtldm = rtl_dm(rtlpriv);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- char reg_swing_2g = -1;/* 0xff; */
- char reg_swing_5g = -1;/* 0xff; */
- char swing_2g = -1 * reg_swing_2g;
- char swing_5g = -1 * reg_swing_5g;
+ s8 reg_swing_2g = -1;/* 0xff; */
+ s8 reg_swing_5g = -1;/* 0xff; */
+ s8 swing_2g = -1 * reg_swing_2g;
+ s8 swing_5g = -1 * reg_swing_5g;
u32 out = 0x200;
- const char auto_temp = -1;
+ const s8 auto_temp = -1;
RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
"===> PHY_GetTxBBSwing_8812A, bbSwing_2G: %d, bbSwing_5G: %d,autoload_failflag=%d.\n",
@@ -524,7 +524,7 @@ void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band)
struct rtl_dm *rtldm = rtl_dm(rtlpriv);
u8 current_band = rtlhal->current_bandtype;
u32 txpath, rxpath;
- char bb_diff_between_band;
+ s8 bb_diff_between_band;
txpath = rtl8821ae_phy_query_bb_reg(hw, RTXPATH, 0xf0);
rxpath = rtl8821ae_phy_query_bb_reg(hw, RCCK_RX, 0x0f000000);
@@ -581,7 +581,7 @@ void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band)
count = 0;
reg_41a = rtl_read_word(rtlpriv, REG_TXPKT_EMPTY);
RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
- "Reg41A value %d", reg_41a);
+ "Reg41A value %d\n", reg_41a);
reg_41a &= 0x30;
while ((reg_41a != 0x30) && (count < 50)) {
udelay(50);
@@ -591,7 +591,7 @@ void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band)
reg_41a &= 0x30;
count++;
RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
- "Reg41A value %d", reg_41a);
+ "Reg41A value %d\n", reg_41a);
}
if (count != 0)
RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
@@ -986,7 +986,7 @@ static void _rtl8812ae_phy_cross_reference_ht_and_vht_txpower_limit(struct ieee8
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &rtlpriv->phy;
u8 regulation, bw, channel, rate_section;
- char temp_pwrlmt = 0;
+ s8 temp_pwrlmt = 0;
for (regulation = 0; regulation < MAX_REGULATION_NUM; ++regulation) {
for (bw = 0; bw < MAX_5G_BANDWITH_NUM; ++bw) {
@@ -1013,7 +1013,7 @@ static void _rtl8812ae_phy_cross_reference_ht_and_vht_txpower_limit(struct ieee8
rtlphy->txpwr_limit_5g[regulation][bw][3][channel][RF90_PATH_A];
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "use other value %d", temp_pwrlmt);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "use other value %d\n", temp_pwrlmt);
}
}
}
@@ -1155,7 +1155,7 @@ static void _rtl8812ae_phy_convert_txpower_limit_to_power_index(struct ieee80211
u8 regulation, bw, channel, rate_section;
u8 base_index2_4G = 0;
u8 base_index5G = 0;
- char temp_value = 0, temp_pwrlmt = 0;
+ s8 temp_value = 0, temp_pwrlmt = 0;
u8 rf_path = 0;
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
@@ -1467,11 +1467,11 @@ static bool _rtl8812ae_eq_n_byte(u8 *str1, u8 *str2, u32 num)
return true;
}
-static char _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(struct ieee80211_hw *hw,
+static s8 _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(struct ieee80211_hw *hw,
u8 band, u8 channel)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- char channel_index = -1;
+ s8 channel_index = -1;
u8 i = 0;
if (band == BAND_ON_2_4G)
@@ -1482,12 +1482,12 @@ static char _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(struct ieee80211_hw *hw,
channel_index = i;
}
} else
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Invalid Band %d in %s",
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Invalid Band %d in %s\n",
band, __func__);
if (channel_index == -1)
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Invalid Channel %d of Band %d in %s", channel,
+ "Invalid Channel %d of Band %d in %s\n", channel,
band, __func__);
return channel_index;
@@ -1502,7 +1502,7 @@ static void _rtl8812ae_phy_set_txpower_limit(struct ieee80211_hw *hw, u8 *pregul
struct rtl_phy *rtlphy = &rtlpriv->phy;
u8 regulation = 0, bandwidth = 0, rate_section = 0, channel;
u8 channel_index;
- char power_limit = 0, prev_power_limit, ret;
+ s8 power_limit = 0, prev_power_limit, ret;
if (!_rtl8812ae_get_integer_from_string((char *)pchannel, &channel) ||
!_rtl8812ae_get_integer_from_string((char *)ppower_limit,
@@ -1665,7 +1665,7 @@ static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw)
rtstatus = _rtl8821ae_phy_config_bb_with_headerfile(hw,
BASEBAND_CONFIG_PHY_REG);
if (rtstatus != true) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!");
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!\n");
return false;
}
_rtl8821ae_phy_init_tx_power_by_rate(hw);
@@ -1674,7 +1674,7 @@ static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw)
BASEBAND_CONFIG_PHY_REG);
}
if (rtstatus != true) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!");
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!\n");
return false;
}
@@ -2254,9 +2254,9 @@ static bool _rtl8821ae_phy_get_chnl_index(u8 channel, u8 *chnl_index)
return in_24g;
}
-static char _rtl8821ae_phy_get_ratesection_intxpower_byrate(u8 path, u8 rate)
+static s8 _rtl8821ae_phy_get_ratesection_intxpower_byrate(u8 path, u8 rate)
{
- char rate_section = 0;
+ s8 rate_section = 0;
switch (rate) {
case DESC_RATE1M:
case DESC_RATE2M:
@@ -2338,9 +2338,9 @@ static char _rtl8821ae_phy_get_ratesection_intxpower_byrate(u8 path, u8 rate)
return rate_section;
}
-static char _rtl8812ae_phy_get_world_wide_limit(char *limit_table)
+static s8 _rtl8812ae_phy_get_world_wide_limit(s8 *limit_table)
{
- char min = limit_table[0];
+ s8 min = limit_table[0];
u8 i = 0;
for (i = 0; i < MAX_REGULATION_NUM; ++i) {
@@ -2350,7 +2350,7 @@ static char _rtl8812ae_phy_get_world_wide_limit(char *limit_table)
return min;
}
-static char _rtl8812ae_phy_get_txpower_limit(struct ieee80211_hw *hw,
+static s8 _rtl8812ae_phy_get_txpower_limit(struct ieee80211_hw *hw,
u8 band,
enum ht_channel_width bandwidth,
enum radio_path rf_path,
@@ -2362,7 +2362,7 @@ static char _rtl8812ae_phy_get_txpower_limit(struct ieee80211_hw *hw,
short band_temp = -1, regulation = -1, bandwidth_temp = -1,
rate_section = -1, channel_temp = -1;
u16 bd, regu, bdwidth, sec, chnl;
- char power_limit = MAX_POWER_INDEX;
+ s8 power_limit = MAX_POWER_INDEX;
if (rtlefuse->eeprom_regulatory == 2)
return MAX_POWER_INDEX;
@@ -2489,7 +2489,7 @@ static char _rtl8812ae_phy_get_txpower_limit(struct ieee80211_hw *hw,
chnl = channel_temp;
if (band == BAND_ON_2_4G) {
- char limits[10] = {0};
+ s8 limits[10] = {0};
u8 i;
for (i = 0; i < 4; ++i)
@@ -2501,7 +2501,7 @@ static char _rtl8812ae_phy_get_txpower_limit(struct ieee80211_hw *hw,
rtlphy->txpwr_limit_2_4g[regu][bdwidth]
[sec][chnl][rf_path];
} else if (band == BAND_ON_5G) {
- char limits[10] = {0};
+ s8 limits[10] = {0};
u8 i;
for (i = 0; i < MAX_REGULATION_NUM; ++i)
@@ -2519,14 +2519,14 @@ static char _rtl8812ae_phy_get_txpower_limit(struct ieee80211_hw *hw,
return power_limit;
}
-static char _rtl8821ae_phy_get_txpower_by_rate(struct ieee80211_hw *hw,
+static s8 _rtl8821ae_phy_get_txpower_by_rate(struct ieee80211_hw *hw,
u8 band, u8 path, u8 rate)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &rtlpriv->phy;
u8 shift = 0, rate_section, tx_num;
- char tx_pwr_diff = 0;
- char limit = 0;
+ s8 tx_pwr_diff = 0;
+ s8 limit = 0;
rate_section = _rtl8821ae_phy_get_ratesection_intxpower_byrate(path, rate);
tx_num = RF_TX_NUM_NONIMPLEMENT;
@@ -2639,7 +2639,7 @@ static u8 _rtl8821ae_get_txpower_index(struct ieee80211_hw *hw, u8 path,
u8 index = (channel - 1);
u8 txpower = 0;
bool in_24g = false;
- char powerdiff_byrate = 0;
+ s8 powerdiff_byrate = 0;
if (((rtlhal->current_bandtype == BAND_ON_2_4G) &&
(channel > 14 || channel < 1)) ||
@@ -4637,7 +4637,7 @@ void rtl8821ae_phy_lc_calibrate(struct ieee80211_hw *hw)
{
}
-void rtl8821ae_phy_ap_calibrate(struct ieee80211_hw *hw, char delta)
+void rtl8821ae_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta)
{
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h
index c411f0a95..1285e1adf 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h
@@ -236,7 +236,7 @@ void rtl8821ae_phy_iq_calibrate(struct ieee80211_hw *hw,
bool b_recovery);
void rtl8812ae_phy_iq_calibrate(struct ieee80211_hw *hw,
bool b_recovery);
-void rtl8821ae_phy_ap_calibrate(struct ieee80211_hw *hw, char delta);
+void rtl8821ae_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta);
void rtl8821ae_phy_lc_calibrate(struct ieee80211_hw *hw);
void rtl8821ae_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
bool rtl8812ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c
index 292253816..c6ab95702 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c
@@ -454,7 +454,7 @@ static bool _rtl8821ae_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
if (!rtstatus) {
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Radio[%d] Fail!!", rfpath);
+ "Radio[%d] Fail!!\n", rfpath);
return false;
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
index 41efaa148..27727186b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
@@ -48,7 +48,7 @@ static u8 _rtl8821ae_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
return skb->priority;
}
-static u16 odm_cfo(char value)
+static u16 odm_cfo(s8 value)
{
int ret_val;
@@ -64,9 +64,9 @@ static u16 odm_cfo(char value)
return ret_val;
}
-static u8 _rtl8821ae_evm_dbm_jaguar(char value)
+static u8 _rtl8821ae_evm_dbm_jaguar(s8 value)
{
- char ret_val = value;
+ s8 ret_val = value;
/* -33dB~0dB to 33dB ~ 0dB*/
if (ret_val == -128)
@@ -88,7 +88,7 @@ static void query_rxphystatus(struct ieee80211_hw *hw,
struct phy_status_rpt *p_phystrpt = (struct phy_status_rpt *)p_drvinfo;
struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
struct rtl_phy *rtlphy = &rtlpriv->phy;
- char rx_pwr_all = 0, rx_pwr[4];
+ s8 rx_pwr_all = 0, rx_pwr[4];
u8 rf_rx_num = 0, evm, evmdbm, pwdb_all;
u8 i, max_spatial_stream;
u32 rssi, total_rssi = 0;
@@ -170,7 +170,7 @@ static void query_rxphystatus(struct ieee80211_hw *hw,
pwdb_all = 100;
}
} else { /* 8821 */
- char pout = -6;
+ s8 pout = -6;
switch (lan_idx) {
case 5:
@@ -275,7 +275,7 @@ static void query_rxphystatus(struct ieee80211_hw *hw,
if (bpacket_match_bssid) {
for (i = RF90_PATH_A; i <= RF90_PATH_B; i++)
rtl_priv(hw)->dm.cfo_tail[i] =
- (char)p_phystrpt->cfotail[i];
+ (s8)p_phystrpt->cfotail[i];
rtl_priv(hw)->dm.packet_count++;
}
@@ -716,7 +716,7 @@ void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw,
PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error");
+ "DMA mapping error\n");
return;
}
CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_8821ae));
@@ -857,7 +857,7 @@ void rtl8821ae_tx_fill_cmddesc(struct ieee80211_hw *hw,
if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error");
+ "DMA mapping error\n");
return;
}
CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h
index ad565bebf..b6f3c564b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h
@@ -390,11 +390,11 @@ struct phy_status_rpt {
u8 cfosho[4]; /* DW 1 byte 1 DW 2 byte 0 */
/* DWORD 2 */
- char cfotail[4]; /* DW 2 byte 1 DW 3 byte 0 */
+ s8 cfotail[4]; /* DW 2 byte 1 DW 3 byte 0 */
/* DWORD 3 */
- char rxevm[2]; /* DW 3 byte 1 DW 3 byte 2 */
- char rxsnr[2]; /* DW 3 byte 3 DW 4 byte 0 */
+ s8 rxevm[2]; /* DW 3 byte 1 DW 3 byte 2 */
+ s8 rxsnr[2]; /* DW 3 byte 3 DW 4 byte 0 */
/* DWORD 4 */
u8 pcts_msk_rpt[2];
@@ -418,8 +418,8 @@ struct rx_fwinfo_8821ae {
u8 pwdb_all;
u8 cfosho[4];
u8 cfotail[4];
- char rxevm[2];
- char rxsnr[4];
+ s8 rxevm[2];
+ s8 rxsnr[4];
u8 pdsnr[2];
u8 csi_current[2];
u8 csi_target[2];
diff --git a/drivers/net/wireless/realtek/rtlwifi/stats.c b/drivers/net/wireless/realtek/rtlwifi/stats.c
index d8b30690b..61700fa05 100644
--- a/drivers/net/wireless/realtek/rtlwifi/stats.c
+++ b/drivers/net/wireless/realtek/rtlwifi/stats.c
@@ -26,7 +26,7 @@
#include "stats.h"
#include <linux/export.h>
-u8 rtl_query_rxpwrpercentage(char antpower)
+u8 rtl_query_rxpwrpercentage(s8 antpower)
{
if ((antpower <= -100) || (antpower >= 20))
return 0;
@@ -37,9 +37,9 @@ u8 rtl_query_rxpwrpercentage(char antpower)
}
EXPORT_SYMBOL(rtl_query_rxpwrpercentage);
-u8 rtl_evm_db_to_percentage(char value)
+u8 rtl_evm_db_to_percentage(s8 value)
{
- char ret_val = clamp(-value, 0, 33) * 3;
+ s8 ret_val = clamp(-value, 0, 33) * 3;
if (ret_val == 99)
ret_val = 100;
diff --git a/drivers/net/wireless/realtek/rtlwifi/stats.h b/drivers/net/wireless/realtek/rtlwifi/stats.h
index 2b57dffef..bd0108f93 100644
--- a/drivers/net/wireless/realtek/rtlwifi/stats.h
+++ b/drivers/net/wireless/realtek/rtlwifi/stats.h
@@ -33,8 +33,8 @@
/* Rx smooth factor */
#define RX_SMOOTH_FACTOR 20
-u8 rtl_query_rxpwrpercentage(char antpower);
-u8 rtl_evm_db_to_percentage(char value);
+u8 rtl_query_rxpwrpercentage(s8 antpower);
+u8 rtl_evm_db_to_percentage(s8 value);
long rtl_signal_scale_mapping(struct ieee80211_hw *hw, long currsig);
void rtl_process_phyinfo(struct ieee80211_hw *hw, u8 *buffer,
struct rtl_stats *pstatus);
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index 4e0ab4d42..c5086c222 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -1089,7 +1089,7 @@ struct dynamic_primary_cca {
};
struct rtl_regulatory {
- char alpha2[2];
+ s8 alpha2[2];
u16 country_code;
u16 max_power_level;
u32 tp_scale;
@@ -1256,16 +1256,16 @@ struct rtl_phy {
u8 cur_bw20_txpwridx;
u8 cur_bw40_txpwridx;
- char txpwr_limit_2_4g[MAX_REGULATION_NUM]
- [MAX_2_4G_BANDWITH_NUM]
- [MAX_RATE_SECTION_NUM]
- [CHANNEL_MAX_NUMBER_2G]
- [MAX_RF_PATH_NUM];
- char txpwr_limit_5g[MAX_REGULATION_NUM]
- [MAX_5G_BANDWITH_NUM]
+ s8 txpwr_limit_2_4g[MAX_REGULATION_NUM]
+ [MAX_2_4G_BANDWITH_NUM]
[MAX_RATE_SECTION_NUM]
- [CHANNEL_MAX_NUMBER_5G]
+ [CHANNEL_MAX_NUMBER_2G]
[MAX_RF_PATH_NUM];
+ s8 txpwr_limit_5g[MAX_REGULATION_NUM]
+ [MAX_5G_BANDWITH_NUM]
+ [MAX_RATE_SECTION_NUM]
+ [CHANNEL_MAX_NUMBER_5G]
+ [MAX_RF_PATH_NUM];
u32 rfreg_chnlval[2];
bool apk_done;
@@ -1639,7 +1639,7 @@ struct fast_ant_training {
};
struct dm_phy_dbg_info {
- char rx_snrdb[4];
+ s8 rx_snrdb[4];
u64 num_qry_phy_status;
u64 num_qry_phy_status_cck;
u64 num_qry_phy_status_ofdm;
@@ -1688,16 +1688,16 @@ struct rtl_dm {
u8 txpower_track_control;
bool interrupt_migration;
bool disable_tx_int;
- char ofdm_index[MAX_RF_PATH];
+ s8 ofdm_index[MAX_RF_PATH];
u8 default_ofdm_index;
u8 default_cck_index;
- char cck_index;
- char delta_power_index[MAX_RF_PATH];
- char delta_power_index_last[MAX_RF_PATH];
- char power_index_offset[MAX_RF_PATH];
- char absolute_ofdm_swing_idx[MAX_RF_PATH];
- char remnant_ofdm_swing_idx[MAX_RF_PATH];
- char remnant_cck_idx;
+ s8 cck_index;
+ s8 delta_power_index[MAX_RF_PATH];
+ s8 delta_power_index_last[MAX_RF_PATH];
+ s8 power_index_offset[MAX_RF_PATH];
+ s8 absolute_ofdm_swing_idx[MAX_RF_PATH];
+ s8 remnant_ofdm_swing_idx[MAX_RF_PATH];
+ s8 remnant_cck_idx;
bool modify_txagc_flag_path_a;
bool modify_txagc_flag_path_b;
@@ -1726,8 +1726,8 @@ struct rtl_dm {
u8 swing_idx_cck_base;
bool swing_flag_cck;
- char swing_diff_2g;
- char swing_diff_5g;
+ s8 swing_diff_2g;
+ s8 swing_diff_5g;
u8 delta_swing_table_idx_24gccka_p[DEL_SW_IDX_SZ];
u8 delta_swing_table_idx_24gccka_n[DEL_SW_IDX_SZ];
@@ -1838,17 +1838,17 @@ struct rtl_efuse {
*
* Sizes of these arrays are decided by the larger ones.
*/
- char txpwr_cckdiff[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
- char txpwr_ht20diff[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
- char txpwr_ht40diff[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
- char txpwr_legacyhtdiff[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
+ s8 txpwr_cckdiff[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
+ s8 txpwr_ht20diff[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
+ s8 txpwr_ht40diff[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
+ s8 txpwr_legacyhtdiff[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
u8 txpwr_5g_bw40base[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
u8 txpwr_5g_bw80base[MAX_RF_PATH][CHANNEL_MAX_NUMBER_5G_80M];
- char txpwr_5g_ofdmdiff[MAX_RF_PATH][MAX_TX_COUNT];
- char txpwr_5g_bw20diff[MAX_RF_PATH][MAX_TX_COUNT];
- char txpwr_5g_bw40diff[MAX_RF_PATH][MAX_TX_COUNT];
- char txpwr_5g_bw80diff[MAX_RF_PATH][MAX_TX_COUNT];
+ s8 txpwr_5g_ofdmdiff[MAX_RF_PATH][MAX_TX_COUNT];
+ s8 txpwr_5g_bw20diff[MAX_RF_PATH][MAX_TX_COUNT];
+ s8 txpwr_5g_bw40diff[MAX_RF_PATH][MAX_TX_COUNT];
+ s8 txpwr_5g_bw80diff[MAX_RF_PATH][MAX_TX_COUNT];
u8 txpwr_safetyflag; /* Band edge enable flag */
u16 eeprom_txpowerdiff;
@@ -2006,7 +2006,7 @@ struct rtl_stats {
bool is_ht;
bool packet_toself;
bool packet_beacon; /*for rssi */
- char cck_adc_pwdb[4]; /*for rx path selection */
+ s8 cck_adc_pwdb[4]; /*for rx path selection */
bool is_vht;
bool is_short_gi;
@@ -2413,9 +2413,9 @@ struct dig_t {
u8 presta_cstate;
u8 curmultista_cstate;
u8 stop_dig;
- char back_val;
- char back_range_max;
- char back_range_min;
+ s8 back_val;
+ s8 back_range_max;
+ s8 back_range_min;
u8 rx_gain_max;
u8 rx_gain_min;
u8 min_undec_pwdb_for_dm;
@@ -2441,8 +2441,8 @@ struct dig_t {
u8 cur_cs_ratiostate;
u8 pre_cs_ratiostate;
u8 backoff_enable_flag;
- char backoffval_range_max;
- char backoffval_range_min;
+ s8 backoffval_range_max;
+ s8 backoffval_range_min;
u8 dig_min_0;
u8 dig_min_1;
u8 bt30_cur_igi;
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 569918c48..603c90470 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -2134,6 +2134,7 @@ static void rndis_get_scan_results(struct work_struct *work)
struct rndis_wlan_private *priv =
container_of(work, struct rndis_wlan_private, scan_work.work);
struct usbnet *usbdev = priv->usbdev;
+ struct cfg80211_scan_info info = {};
int ret;
netdev_dbg(usbdev->net, "get_scan_results\n");
@@ -2143,7 +2144,8 @@ static void rndis_get_scan_results(struct work_struct *work)
ret = rndis_check_bssid_list(usbdev, NULL, NULL);
- cfg80211_scan_done(priv->scan_request, ret < 0);
+ info.aborted = ret < 0;
+ cfg80211_scan_done(priv->scan_request, &info);
priv->scan_request = NULL;
}
@@ -3574,7 +3576,11 @@ static int rndis_wlan_stop(struct usbnet *usbdev)
flush_workqueue(priv->workqueue);
if (priv->scan_request) {
- cfg80211_scan_done(priv->scan_request, true);
+ struct cfg80211_scan_info info = {
+ .aborted = true,
+ };
+
+ cfg80211_scan_done(priv->scan_request, &info);
priv->scan_request = NULL;
}
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index 40658b62d..35c14cc3f 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -398,7 +398,7 @@ static int rsi_mgmt_pkt_to_core(struct rsi_common *common,
return -ENOLINK;
msg_len -= pad_bytes;
- if ((msg_len <= 0) || (!msg)) {
+ if (msg_len <= 0) {
rsi_dbg(MGMT_RX_ZONE,
"%s: Invalid rx msg of len = %d\n",
__func__, msg_len);
diff --git a/drivers/net/wireless/st/cw1200/scan.c b/drivers/net/wireless/st/cw1200/scan.c
index 983788156..0a0ff7e31 100644
--- a/drivers/net/wireless/st/cw1200/scan.c
+++ b/drivers/net/wireless/st/cw1200/scan.c
@@ -167,6 +167,10 @@ void cw1200_scan_work(struct work_struct *work)
}
if (!priv->scan.req || (priv->scan.curr == priv->scan.end)) {
+ struct cfg80211_scan_info info = {
+ .aborted = priv->scan.status ? 1 : 0,
+ };
+
if (priv->scan.output_power != priv->output_power)
wsm_set_output_power(priv, priv->output_power * 10);
if (priv->join_status == CW1200_JOIN_STATUS_STA &&
@@ -188,7 +192,7 @@ void cw1200_scan_work(struct work_struct *work)
cw1200_scan_restart_delayed(priv);
wsm_unlock_tx(priv);
mutex_unlock(&priv->conf_mutex);
- ieee80211_scan_completed(priv->hw, priv->scan.status ? 1 : 0);
+ ieee80211_scan_completed(priv->hw, &info);
up(&priv->scan.lock);
return;
} else {
diff --git a/drivers/net/wireless/ti/wl1251/event.c b/drivers/net/wireless/ti/wl1251/event.c
index c98630394..d0593bc1f 100644
--- a/drivers/net/wireless/ti/wl1251/event.c
+++ b/drivers/net/wireless/ti/wl1251/event.c
@@ -36,7 +36,11 @@ static int wl1251_event_scan_complete(struct wl1251 *wl,
mbox->scheduled_scan_channels);
if (wl->scanning) {
- ieee80211_scan_completed(wl->hw, false);
+ struct cfg80211_scan_info info = {
+ .aborted = false,
+ };
+
+ ieee80211_scan_completed(wl->hw, &info);
wl1251_debug(DEBUG_MAC80211, "mac80211 hw scan completed");
wl->scanning = false;
if (wl->hw->conf.flags & IEEE80211_CONF_IDLE)
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index e74d60dad..ea81be2c6 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -448,7 +448,11 @@ static void wl1251_op_stop(struct ieee80211_hw *hw)
WARN_ON(wl->state != WL1251_STATE_ON);
if (wl->scanning) {
- ieee80211_scan_completed(wl->hw, true);
+ struct cfg80211_scan_info info = {
+ .aborted = true,
+ };
+
+ ieee80211_scan_completed(wl->hw, &info);
wl->scanning = false;
}
diff --git a/drivers/net/wireless/ti/wl18xx/event.c b/drivers/net/wireless/ti/wl18xx/event.c
index ef811848d..2c5df43b8 100644
--- a/drivers/net/wireless/ti/wl18xx/event.c
+++ b/drivers/net/wireless/ti/wl18xx/event.c
@@ -112,12 +112,18 @@ static int wlcore_smart_config_decode_event(struct wl1271 *wl,
return 0;
}
-static void wlcore_event_time_sync(struct wl1271 *wl, u16 tsf_msb, u16 tsf_lsb)
+static void wlcore_event_time_sync(struct wl1271 *wl,
+ u16 tsf_high_msb, u16 tsf_high_lsb,
+ u16 tsf_low_msb, u16 tsf_low_lsb)
{
- u32 clock;
- /* convert the MSB+LSB to a u32 TSF value */
- clock = (tsf_msb << 16) | tsf_lsb;
- wl1271_info("TIME_SYNC_EVENT_ID: clock %u", clock);
+ u32 clock_low;
+ u32 clock_high;
+
+ clock_high = (tsf_high_msb << 16) | tsf_high_lsb;
+ clock_low = (tsf_low_msb << 16) | tsf_low_lsb;
+
+ wl1271_info("TIME_SYNC_EVENT_ID: clock_high %u, clock low %u",
+ clock_high, clock_low);
}
int wl18xx_process_mailbox_events(struct wl1271 *wl)
@@ -138,8 +144,10 @@ int wl18xx_process_mailbox_events(struct wl1271 *wl)
if (vector & TIME_SYNC_EVENT_ID)
wlcore_event_time_sync(wl,
- mbox->time_sync_tsf_msb,
- mbox->time_sync_tsf_lsb);
+ mbox->time_sync_tsf_high_msb,
+ mbox->time_sync_tsf_high_lsb,
+ mbox->time_sync_tsf_low_msb,
+ mbox->time_sync_tsf_low_lsb);
if (vector & RADAR_DETECTED_EVENT_ID) {
wl1271_info("radar event: channel %d type %s",
@@ -187,11 +195,11 @@ int wl18xx_process_mailbox_events(struct wl1271 *wl)
*/
if (vector & MAX_TX_FAILURE_EVENT_ID)
wlcore_event_max_tx_failure(wl,
- le32_to_cpu(mbox->tx_retry_exceeded_bitmap));
+ le16_to_cpu(mbox->tx_retry_exceeded_bitmap));
if (vector & INACTIVE_STA_EVENT_ID)
wlcore_event_inactive_sta(wl,
- le32_to_cpu(mbox->inactive_sta_bitmap));
+ le16_to_cpu(mbox->inactive_sta_bitmap));
if (vector & REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID)
wlcore_event_roc_complete(wl);
diff --git a/drivers/net/wireless/ti/wl18xx/event.h b/drivers/net/wireless/ti/wl18xx/event.h
index 070de1274..ce8ea9c04 100644
--- a/drivers/net/wireless/ti/wl18xx/event.h
+++ b/drivers/net/wireless/ti/wl18xx/event.h
@@ -74,10 +74,16 @@ struct wl18xx_event_mailbox {
__le16 bss_loss_bitmap;
/* bitmap of stations (by HLID) which exceeded max tx retries */
- __le32 tx_retry_exceeded_bitmap;
+ __le16 tx_retry_exceeded_bitmap;
+
+ /* time sync high msb*/
+ __le16 time_sync_tsf_high_msb;
/* bitmap of inactive stations (by HLID) */
- __le32 inactive_sta_bitmap;
+ __le16 inactive_sta_bitmap;
+
+ /* time sync high lsb*/
+ __le16 time_sync_tsf_high_lsb;
/* rx BA win size indicated by RX_BA_WIN_SIZE_CHANGE_EVENT_ID */
u8 rx_ba_role_id;
@@ -98,14 +104,15 @@ struct wl18xx_event_mailbox {
u8 sc_sync_channel;
u8 sc_sync_band;
- /* time sync msb*/
- u16 time_sync_tsf_msb;
+ /* time sync low msb*/
+ __le16 time_sync_tsf_low_msb;
+
/* radar detect */
u8 radar_channel;
u8 radar_type;
- /* time sync lsb*/
- u16 time_sync_tsf_lsb;
+ /* time sync low lsb*/
+ __le16 time_sync_tsf_low_lsb;
} __packed;
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index fc02c27bc..3f451467f 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -1214,6 +1214,10 @@ static void wl18xx_convert_fw_status(struct wl1271 *wl, void *raw_fw_status,
int_fw_status->counters.tx_voice_released_blks;
fw_status->counters.tx_last_rate =
int_fw_status->counters.tx_last_rate;
+ fw_status->counters.tx_last_rate_mbps =
+ int_fw_status->counters.tx_last_rate_mbps;
+ fw_status->counters.hlid =
+ int_fw_status->counters.hlid;
fw_status->log_start_addr = le32_to_cpu(int_fw_status->log_start_addr);
@@ -1821,9 +1825,12 @@ static const struct ieee80211_iface_limit wl18xx_iface_limits[] = {
},
{
.max = 1,
- .types = BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_P2P_GO) |
- BIT(NL80211_IFTYPE_P2P_CLIENT),
+ .types = BIT(NL80211_IFTYPE_AP)
+ | BIT(NL80211_IFTYPE_P2P_GO)
+ | BIT(NL80211_IFTYPE_P2P_CLIENT)
+#ifdef CONFIG_MAC80211_MESH
+ | BIT(NL80211_IFTYPE_MESH_POINT)
+#endif
},
{
.max = 1,
@@ -1836,6 +1843,12 @@ static const struct ieee80211_iface_limit wl18xx_iface_ap_limits[] = {
.max = 2,
.types = BIT(NL80211_IFTYPE_AP),
},
+#ifdef CONFIG_MAC80211_MESH
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_MESH_POINT),
+ },
+#endif
{
.max = 1,
.types = BIT(NL80211_IFTYPE_P2P_DEVICE),
diff --git a/drivers/net/wireless/ti/wl18xx/tx.c b/drivers/net/wireless/ti/wl18xx/tx.c
index ebaf66ef3..876aef10f 100644
--- a/drivers/net/wireless/ti/wl18xx/tx.c
+++ b/drivers/net/wireless/ti/wl18xx/tx.c
@@ -30,9 +30,9 @@
static
void wl18xx_get_last_tx_rate(struct wl1271 *wl, struct ieee80211_vif *vif,
- u8 band, struct ieee80211_tx_rate *rate)
+ u8 band, struct ieee80211_tx_rate *rate, u8 hlid)
{
- u8 fw_rate = wl->fw_status->counters.tx_last_rate;
+ u8 fw_rate = wl->links[hlid].fw_rate_idx;
if (fw_rate > CONF_HW_RATE_INDEX_MAX) {
wl1271_error("last Tx rate invalid: %d", fw_rate);
@@ -79,6 +79,7 @@ static void wl18xx_tx_complete_packet(struct wl1271 *wl, u8 tx_stat_byte)
struct sk_buff *skb;
int id = tx_stat_byte & WL18XX_TX_STATUS_DESC_ID_MASK;
bool tx_success;
+ struct wl1271_tx_hw_descr *tx_desc;
/* check for id legality */
if (unlikely(id >= wl->num_tx_desc || wl->tx_frames[id] == NULL)) {
@@ -91,6 +92,7 @@ static void wl18xx_tx_complete_packet(struct wl1271 *wl, u8 tx_stat_byte)
skb = wl->tx_frames[id];
info = IEEE80211_SKB_CB(skb);
+ tx_desc = (struct wl1271_tx_hw_descr *)skb->data;
if (wl12xx_is_dummy_packet(wl, skb)) {
wl1271_free_tx_id(wl, id);
@@ -105,7 +107,9 @@ static void wl18xx_tx_complete_packet(struct wl1271 *wl, u8 tx_stat_byte)
* the info->status structures
*/
wl18xx_get_last_tx_rate(wl, info->control.vif,
- info->band, &info->status.rates[0]);
+ info->band,
+ &info->status.rates[0],
+ tx_desc->hlid);
info->status.rates[0].count = 1; /* no data about retries */
info->status.ack_signal = -1;
@@ -144,12 +148,22 @@ void wl18xx_tx_immediate_complete(struct wl1271 *wl)
struct wl18xx_fw_status_priv *status_priv =
(struct wl18xx_fw_status_priv *)wl->fw_status->priv;
struct wl18xx_priv *priv = wl->priv;
- u8 i;
+ u8 i, hlid;
/* nothing to do here */
if (priv->last_fw_rls_idx == status_priv->fw_release_idx)
return;
+ /* update rates per link */
+ hlid = wl->fw_status->counters.hlid;
+
+ if (hlid < WLCORE_MAX_LINKS) {
+ wl->links[hlid].fw_rate_idx =
+ wl->fw_status->counters.tx_last_rate;
+ wl->links[hlid].fw_rate_mbps =
+ wl->fw_status->counters.tx_last_rate_mbps;
+ }
+
/* freed Tx descriptors */
wl1271_debug(DEBUG_TX, "last released desc = %d, current idx = %d",
priv->last_fw_rls_idx, status_priv->fw_release_idx);
diff --git a/drivers/net/wireless/ti/wl18xx/wl18xx.h b/drivers/net/wireless/ti/wl18xx/wl18xx.h
index 71e9e382c..5371cbdd5 100644
--- a/drivers/net/wireless/ti/wl18xx/wl18xx.h
+++ b/drivers/net/wireless/ti/wl18xx/wl18xx.h
@@ -29,7 +29,7 @@
#define WL18XX_IFTYPE_VER 9
#define WL18XX_MAJOR_VER WLCORE_FW_VER_IGNORE
#define WL18XX_SUBTYPE_VER WLCORE_FW_VER_IGNORE
-#define WL18XX_MINOR_VER 11
+#define WL18XX_MINOR_VER 58
#define WL18XX_CMD_MAX_SIZE 740
@@ -125,7 +125,11 @@ struct wl18xx_fw_packet_counters {
/* Tx rate of the last transmitted packet */
u8 tx_last_rate;
- u8 padding[2];
+ /* Tx rate or Tx rate estimate pre-calculated by fw in mbps units */
+ u8 tx_last_rate_mbps;
+
+ /* hlid for which the rates were reported */
+ u8 hlid;
} __packed;
/* FW status registers */
diff --git a/drivers/net/wireless/ti/wlcore/acx.h b/drivers/net/wireless/ti/wlcore/acx.h
index 0d61fae88..6321ed472 100644
--- a/drivers/net/wireless/ti/wlcore/acx.h
+++ b/drivers/net/wireless/ti/wlcore/acx.h
@@ -105,6 +105,7 @@ enum wl12xx_role {
WL1271_ROLE_DEVICE,
WL1271_ROLE_P2P_CL,
WL1271_ROLE_P2P_GO,
+ WL1271_ROLE_MESH_POINT,
WL12XX_INVALID_ROLE_TYPE = 0xff
};
diff --git a/drivers/net/wireless/ti/wlcore/boot.c b/drivers/net/wireless/ti/wlcore/boot.c
index 19b7ec7b6..f75d30444 100644
--- a/drivers/net/wireless/ti/wlcore/boot.c
+++ b/drivers/net/wireless/ti/wlcore/boot.c
@@ -130,7 +130,7 @@ fail:
wl1271_error("Your WiFi FW version (%u.%u.%u.%u.%u) is invalid.\n"
"Please use at least FW %s\n"
"You can get the latest firmwares at:\n"
- "git://github.com/TI-OpenLink/firmwares.git",
+ "git://git.ti.com/wilink8-wlan/wl18xx_fw.git",
fw_ver[FW_VER_CHIP], fw_ver[FW_VER_IF_TYPE],
fw_ver[FW_VER_MAJOR], fw_ver[FW_VER_SUBTYPE],
fw_ver[FW_VER_MINOR], min_fw_str);
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 33153565a..7f4da727b 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -629,11 +629,14 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
wl1271_debug(DEBUG_CMD, "cmd role start ap %d", wlvif->role_id);
- /* trying to use hidden SSID with an old hostapd version */
- if (wlvif->ssid_len == 0 && !bss_conf->hidden_ssid) {
- wl1271_error("got a null SSID from beacon/bss");
- ret = -EINVAL;
- goto out;
+ /* If MESH --> ssid_len is always 0 */
+ if (!ieee80211_vif_is_mesh(vif)) {
+ /* trying to use hidden SSID with an old hostapd version */
+ if (wlvif->ssid_len == 0 && !bss_conf->hidden_ssid) {
+ wl1271_error("got a null SSID from beacon/bss");
+ ret = -EINVAL;
+ goto out;
+ }
}
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -1566,6 +1569,13 @@ int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
cpu_to_le32(wl1271_tx_enabled_rates_get(wl, sta_rates,
wlvif->band));
+ if (!cmd->supported_rates) {
+ wl1271_debug(DEBUG_CMD,
+ "peer has no supported rates yet, configuring basic rates: 0x%x",
+ wlvif->basic_rate_set);
+ cmd->supported_rates = cpu_to_le32(wlvif->basic_rate_set);
+ }
+
wl1271_debug(DEBUG_CMD, "new peer rates=0x%x queues=0x%x",
cmd->supported_rates, sta->uapsd_queues);
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 2509c0050..04e907660 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -221,6 +221,7 @@ static void wlcore_rc_update_work(struct work_struct *work)
struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
rc_update_work);
struct wl1271 *wl = wlvif->wl;
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
mutex_lock(&wl->mutex);
@@ -231,8 +232,16 @@ static void wlcore_rc_update_work(struct work_struct *work)
if (ret < 0)
goto out;
- wlcore_hw_sta_rc_update(wl, wlvif);
+ if (ieee80211_vif_is_mesh(vif)) {
+ ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap,
+ true, wlvif->sta.hlid);
+ if (ret < 0)
+ goto out_sleep;
+ } else {
+ wlcore_hw_sta_rc_update(wl, wlvif);
+ }
+out_sleep:
wl1271_ps_elp_sleep(wl);
out:
mutex_unlock(&wl->mutex);
@@ -2153,10 +2162,14 @@ static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+
switch (wlvif->bss_type) {
case BSS_TYPE_AP_BSS:
if (wlvif->p2p)
return WL1271_ROLE_P2P_GO;
+ else if (ieee80211_vif_is_mesh(vif))
+ return WL1271_ROLE_MESH_POINT;
else
return WL1271_ROLE_AP;
@@ -2198,6 +2211,7 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
wlvif->p2p = 1;
/* fall-through */
case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_MESH_POINT:
wlvif->bss_type = BSS_TYPE_AP_BSS;
break;
default:
@@ -2615,6 +2629,10 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
wl->scan_wlvif == wlvif) {
+ struct cfg80211_scan_info info = {
+ .aborted = true,
+ };
+
/*
* Rearm the tx watchdog just before idling scan. This
* prevents just-finished scans from triggering the watchdog
@@ -2625,7 +2643,7 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
wl->scan_wlvif = NULL;
wl->scan.req = NULL;
- ieee80211_scan_completed(wl->hw, true);
+ ieee80211_scan_completed(wl->hw, &info);
}
if (wl->sched_vif == wlvif)
@@ -3649,6 +3667,9 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
{
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ struct cfg80211_scan_info info = {
+ .aborted = true,
+ };
int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
@@ -3681,7 +3702,7 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
wl->scan_wlvif = NULL;
wl->scan.req = NULL;
- ieee80211_scan_completed(wl->hw, true);
+ ieee80211_scan_completed(wl->hw, &info);
out_sleep:
wl1271_ps_elp_sleep(wl);
@@ -4124,9 +4145,14 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
if (ret < 0)
goto out;
- ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
- if (ret < 0)
- goto out;
+ /* No need to set probe resp template for mesh */
+ if (!ieee80211_vif_is_mesh(vif)) {
+ ret = wl1271_ap_set_probe_resp_tmpl(wl,
+ wlvif->basic_rate,
+ vif);
+ if (ret < 0)
+ goto out;
+ }
ret = wlcore_set_beacon_template(wl, vif, true);
if (ret < 0)
@@ -4960,6 +4986,7 @@ static int wl12xx_sta_add(struct wl1271 *wl,
return ret;
wl_sta = (struct wl1271_station *)sta->drv_priv;
+ wl_sta->wl = wl;
hlid = wl_sta->hlid;
ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
@@ -5091,6 +5118,11 @@ static int wl12xx_update_sta_state(struct wl1271 *wl,
if (ret < 0)
return ret;
+ /* reconfigure rates */
+ ret = wl12xx_cmd_add_peer(wl, wlvif, sta, wl_sta->hlid);
+ if (ret < 0)
+ return ret;
+
ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
wl_sta->hlid);
if (ret)
@@ -5629,6 +5661,7 @@ static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
/* this callback is atomic, so schedule a new work */
wlvif->rc_update_bw = sta->bandwidth;
+ memcpy(&wlvif->rc_ht_cap, &sta->ht_cap, sizeof(sta->ht_cap));
ieee80211_queue_work(hw, &wlvif->rc_update_work);
}
@@ -5667,6 +5700,17 @@ out:
mutex_unlock(&wl->mutex);
}
+static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta)
+{
+ struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv;
+ struct wl1271 *wl = hw->priv;
+ u8 hlid = wl_sta->hlid;
+
+ /* return in units of Kbps */
+ return (wl->links[hlid].fw_rate_mbps * 1000);
+}
+
static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
{
struct wl1271 *wl = hw->priv;
@@ -5867,6 +5911,7 @@ static const struct ieee80211_ops wl1271_ops = {
.switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
.sta_rc_update = wlcore_op_sta_rc_update,
.sta_statistics = wlcore_op_sta_statistics,
+ .get_expected_throughput = wlcore_op_get_expected_throughput,
CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
};
@@ -6050,7 +6095,11 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_P2P_DEVICE) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
BIT(NL80211_IFTYPE_P2P_GO);
+
wl->hw->wiphy->max_scan_ssids = 1;
wl->hw->wiphy->max_sched_scan_ssids = 16;
wl->hw->wiphy->max_match_sets = 16;
diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index c9bd294a0..b9e140451 100644
--- a/drivers/net/wireless/ti/wlcore/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -222,6 +222,13 @@ int wlcore_rx(struct wl1271 *wl, struct wl_fw_status *status)
enum wl_rx_buf_align rx_align;
int ret = 0;
+ /* update rates per link */
+ hlid = status->counters.hlid;
+
+ if (hlid < WLCORE_MAX_LINKS)
+ wl->links[hlid].fw_rate_mbps =
+ status->counters.tx_last_rate_mbps;
+
while (drv_rx_counter != fw_rx_counter) {
buf_size = 0;
rx_counter = drv_rx_counter;
diff --git a/drivers/net/wireless/ti/wlcore/scan.c b/drivers/net/wireless/ti/wlcore/scan.c
index 233436432..5612f5916 100644
--- a/drivers/net/wireless/ti/wlcore/scan.c
+++ b/drivers/net/wireless/ti/wlcore/scan.c
@@ -36,6 +36,9 @@ void wl1271_scan_complete_work(struct work_struct *work)
struct delayed_work *dwork;
struct wl1271 *wl;
struct wl12xx_vif *wlvif;
+ struct cfg80211_scan_info info = {
+ .aborted = false,
+ };
int ret;
dwork = to_delayed_work(work);
@@ -82,7 +85,7 @@ void wl1271_scan_complete_work(struct work_struct *work)
wlcore_cmd_regdomain_config_locked(wl);
- ieee80211_scan_completed(wl->hw, false);
+ ieee80211_scan_completed(wl->hw, &info);
out:
mutex_unlock(&wl->mutex);
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index c172da56b..5839acbbc 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -241,7 +241,6 @@ static int wlcore_probe_of(struct device *dev, int *irq,
*irq = irq_of_parse_and_map(np, 0);
if (!*irq) {
dev_err(dev, "No irq in platform data\n");
- kfree(pdev_data);
return -EINVAL;
}
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index cea9443c2..6d2404088 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -70,16 +70,30 @@
#define WSPI_MAX_CHUNK_SIZE 4092
/*
- * only support SPI for 12xx - this code should be reworked when 18xx
- * support is introduced
+ * wl18xx driver aggregation buffer size is (13 * PAGE_SIZE) compared to
+ * (4 * PAGE_SIZE) for wl12xx, so use the larger buffer needed for wl18xx
*/
-#define SPI_AGGR_BUFFER_SIZE (4 * PAGE_SIZE)
+#define SPI_AGGR_BUFFER_SIZE (13 * PAGE_SIZE)
/* Maximum number of SPI write chunks */
#define WSPI_MAX_NUM_OF_CHUNKS \
((SPI_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE) + 1)
+struct wilink_familiy_data {
+ char name[8];
+};
+
+const struct wilink_familiy_data *wilink_data;
+
+static const struct wilink_familiy_data wl18xx_data = {
+ .name = "wl18xx",
+};
+
+static const struct wilink_familiy_data wl12xx_data = {
+ .name = "wl12xx",
+};
+
struct wl12xx_spi_glue {
struct device *dev;
struct platform_device *core;
@@ -119,6 +133,7 @@ static void wl12xx_spi_init(struct device *child)
struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
struct spi_transfer t;
struct spi_message m;
+ struct spi_device *spi = to_spi_device(glue->dev);
u8 *cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
if (!cmd) {
@@ -151,6 +166,7 @@ static void wl12xx_spi_init(struct device *child)
cmd[6] |= WSPI_INIT_CMD_EN_FIXEDBUSY;
cmd[7] = crc7_be(0, cmd+2, WSPI_INIT_CMD_CRC_LEN) | WSPI_INIT_CMD_END;
+
/*
* The above is the logical order; it must actually be stored
* in the buffer byte-swapped.
@@ -163,6 +179,28 @@ static void wl12xx_spi_init(struct device *child)
spi_message_add_tail(&t, &m);
spi_sync(to_spi_device(glue->dev), &m);
+
+ /* Send extra clocks with inverted CS (high). this is required
+ * by the wilink family in order to successfully enter WSPI mode.
+ */
+ spi->mode ^= SPI_CS_HIGH;
+ memset(&m, 0, sizeof(m));
+ spi_message_init(&m);
+
+ cmd[0] = 0xff;
+ cmd[1] = 0xff;
+ cmd[2] = 0xff;
+ cmd[3] = 0xff;
+ __swab32s((u32 *)cmd);
+
+ t.tx_buf = cmd;
+ t.len = 4;
+ spi_message_add_tail(&t, &m);
+
+ spi_sync(to_spi_device(glue->dev), &m);
+
+ /* Restore chip select configration to normal */
+ spi->mode ^= SPI_CS_HIGH;
kfree(cmd);
}
@@ -270,22 +308,25 @@ static int __must_check wl12xx_spi_raw_read(struct device *child, int addr,
return 0;
}
-static int __must_check wl12xx_spi_raw_write(struct device *child, int addr,
- void *buf, size_t len, bool fixed)
+static int __wl12xx_spi_raw_write(struct device *child, int addr,
+ void *buf, size_t len, bool fixed)
{
struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
- /* SPI write buffers - 2 for each chunk */
- struct spi_transfer t[2 * WSPI_MAX_NUM_OF_CHUNKS];
+ struct spi_transfer *t;
struct spi_message m;
u32 commands[WSPI_MAX_NUM_OF_CHUNKS]; /* 1 command per chunk */
u32 *cmd;
u32 chunk_len;
int i;
+ /* SPI write buffers - 2 for each chunk */
+ t = kzalloc(sizeof(*t) * 2 * WSPI_MAX_NUM_OF_CHUNKS, GFP_KERNEL);
+ if (!t)
+ return -ENOMEM;
+
WARN_ON(len > SPI_AGGR_BUFFER_SIZE);
spi_message_init(&m);
- memset(t, 0, sizeof(t));
cmd = &commands[0];
i = 0;
@@ -318,9 +359,26 @@ static int __must_check wl12xx_spi_raw_write(struct device *child, int addr,
spi_sync(to_spi_device(glue->dev), &m);
+ kfree(t);
return 0;
}
+static int __must_check wl12xx_spi_raw_write(struct device *child, int addr,
+ void *buf, size_t len, bool fixed)
+{
+ int ret;
+
+ /* The ELP wakeup write may fail the first time due to internal
+ * hardware latency. It is safer to send the wakeup command twice to
+ * avoid unexpected failures.
+ */
+ if (addr == HW_ACCESS_ELP_CTRL_REG)
+ ret = __wl12xx_spi_raw_write(child, addr, buf, len, fixed);
+ ret = __wl12xx_spi_raw_write(child, addr, buf, len, fixed);
+
+ return ret;
+}
+
/**
* wl12xx_spi_set_power - power on/off the wl12xx unit
* @child: wl12xx device handle.
@@ -349,17 +407,38 @@ static int wl12xx_spi_set_power(struct device *child, bool enable)
return ret;
}
+/**
+ * wl12xx_spi_set_block_size
+ *
+ * This function is not needed for spi mode, but need to be present.
+ * Without it defined the wlcore fallback to use the wrong packet
+ * allignment on tx.
+ */
+static void wl12xx_spi_set_block_size(struct device *child,
+ unsigned int blksz)
+{
+}
+
static struct wl1271_if_operations spi_ops = {
.read = wl12xx_spi_raw_read,
.write = wl12xx_spi_raw_write,
.reset = wl12xx_spi_reset,
.init = wl12xx_spi_init,
.power = wl12xx_spi_set_power,
- .set_block_size = NULL,
+ .set_block_size = wl12xx_spi_set_block_size,
};
static const struct of_device_id wlcore_spi_of_match_table[] = {
- { .compatible = "ti,wl1271" },
+ { .compatible = "ti,wl1271", .data = &wl12xx_data},
+ { .compatible = "ti,wl1273", .data = &wl12xx_data},
+ { .compatible = "ti,wl1281", .data = &wl12xx_data},
+ { .compatible = "ti,wl1283", .data = &wl12xx_data},
+ { .compatible = "ti,wl1801", .data = &wl18xx_data},
+ { .compatible = "ti,wl1805", .data = &wl18xx_data},
+ { .compatible = "ti,wl1807", .data = &wl18xx_data},
+ { .compatible = "ti,wl1831", .data = &wl18xx_data},
+ { .compatible = "ti,wl1835", .data = &wl18xx_data},
+ { .compatible = "ti,wl1837", .data = &wl18xx_data},
{ }
};
MODULE_DEVICE_TABLE(of, wlcore_spi_of_match_table);
@@ -375,18 +454,24 @@ static int wlcore_probe_of(struct spi_device *spi, struct wl12xx_spi_glue *glue,
struct wlcore_platdev_data *pdev_data)
{
struct device_node *dt_node = spi->dev.of_node;
- int ret;
+ const struct of_device_id *of_id;
+
+ of_id = of_match_node(wlcore_spi_of_match_table, dt_node);
+ if (!of_id)
+ return -ENODEV;
+
+ wilink_data = of_id->data;
+ dev_info(&spi->dev, "selected chip familiy is %s\n",
+ wilink_data->name);
if (of_find_property(dt_node, "clock-xtal", NULL))
pdev_data->ref_clock_xtal = true;
- ret = of_property_read_u32(dt_node, "ref-clock-frequency",
- &pdev_data->ref_clock_freq);
- if (ret) {
- dev_err(glue->dev,
- "can't get reference clock frequency (%d)\n", ret);
- return ret;
- }
+ /* optional clock frequency params */
+ of_property_read_u32(dt_node, "ref-clock-frequency",
+ &pdev_data->ref_clock_freq);
+ of_property_read_u32(dt_node, "tcxo-clock-frequency",
+ &pdev_data->tcxo_clock_freq);
return 0;
}
@@ -437,7 +522,8 @@ static int wl1271_probe(struct spi_device *spi)
return ret;
}
- glue->core = platform_device_alloc("wl12xx", PLATFORM_DEVID_AUTO);
+ glue->core = platform_device_alloc(wilink_data->name,
+ PLATFORM_DEVID_AUTO);
if (!glue->core) {
dev_err(glue->dev, "can't allocate platform_device\n");
return -ENOMEM;
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index 7d43b8057..d274b0b13 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -171,6 +171,12 @@ struct wl_fw_status {
/* Tx rate of the last transmitted packet */
u8 tx_last_rate;
+
+ /* Tx rate or Tx rate estimate pre calculated by fw in mbps */
+ u8 tx_last_rate_mbps;
+
+ /* hlid for which the rates were reported */
+ u8 hlid;
} counters;
u32 log_start_addr;
@@ -273,6 +279,12 @@ struct wl1271_link {
/* bitmap of TIDs where RX BA sessions are active for this link */
u8 ba_bitmap;
+ /* the last fw rate index we used for this link */
+ u8 fw_rate_idx;
+
+ /* the last fw rate [Mbps] we used for this link */
+ u8 fw_rate_mbps;
+
/* The wlvif this link belongs to. Might be null for global links */
struct wl12xx_vif *wlvif;
@@ -335,6 +347,7 @@ struct wl1271_station {
* Used in both AP and STA mode.
*/
u64 total_freed_pkts;
+ struct wl1271 *wl;
};
struct wl12xx_vif {
@@ -472,6 +485,7 @@ struct wl12xx_vif {
/* update rate conrol */
enum ieee80211_sta_rx_bandwidth rc_update_bw;
+ struct ieee80211_sta_ht_cap rc_ht_cap;
struct work_struct rc_update_work;
/*
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 13fd734b6..82d94f83b 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -378,8 +378,7 @@ static int wl3501_esbq_exec(struct wl3501_card *this, void *sig, int sig_size)
return rc;
}
-static int wl3501_get_mib_value(struct wl3501_card *this, u8 index,
- void *bf, int size)
+static int wl3501_request_mib(struct wl3501_card *this, u8 index, void *bf)
{
struct wl3501_get_req sig = {
.sig_id = WL3501_SIG_GET_REQ,
@@ -395,20 +394,32 @@ static int wl3501_get_mib_value(struct wl3501_card *this, u8 index,
wl3501_set_to_wla(this, ptr, &sig, sizeof(sig));
wl3501_esbq_req(this, &ptr);
this->sig_get_confirm.mib_status = 255;
- spin_unlock_irqrestore(&this->lock, flags);
- rc = wait_event_interruptible(this->wait,
- this->sig_get_confirm.mib_status != 255);
- if (!rc)
- memcpy(bf, this->sig_get_confirm.mib_value,
- size);
- goto out;
+ rc = 0;
}
}
spin_unlock_irqrestore(&this->lock, flags);
-out:
+
return rc;
}
+static int wl3501_get_mib_value(struct wl3501_card *this, u8 index,
+ void *bf, int size)
+{
+ int rc;
+
+ rc = wl3501_request_mib(this, index, bf);
+ if (rc)
+ return rc;
+
+ rc = wait_event_interruptible(this->wait,
+ this->sig_get_confirm.mib_status != 255);
+ if (rc)
+ return rc;
+
+ memcpy(bf, this->sig_get_confirm.mib_value, size);
+ return 0;
+}
+
static int wl3501_pwr_mgmt(struct wl3501_card *this, int suspend)
{
struct wl3501_pwr_mgmt_req sig = {
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 6a31f2610..daf4c7867 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -271,6 +271,11 @@ static int netback_probe(struct xenbus_device *dev,
be->dev = dev;
dev_set_drvdata(&dev->dev, be);
+ be->state = XenbusStateInitialising;
+ err = xenbus_switch_state(dev, XenbusStateInitialising);
+ if (err)
+ goto fail;
+
sg = 1;
do {
@@ -383,11 +388,6 @@ static int netback_probe(struct xenbus_device *dev,
be->hotplug_script = script;
- err = xenbus_switch_state(dev, XenbusStateInitWait);
- if (err)
- goto fail;
-
- be->state = XenbusStateInitWait;
/* This kicks hotplug scripts, so do it immediately. */
err = backend_create_xenvif(be);
@@ -492,20 +492,20 @@ static inline void backend_switch_state(struct backend_info *be,
/* Handle backend state transitions:
*
- * The backend state starts in InitWait and the following transitions are
+ * The backend state starts in Initialising and the following transitions are
* allowed.
*
- * InitWait -> Connected
- *
- * ^ \ |
- * | \ |
- * | \ |
- * | \ |
- * | \ |
- * | \ |
- * | V V
+ * Initialising -> InitWait -> Connected
+ * \
+ * \ ^ \ |
+ * \ | \ |
+ * \ | \ |
+ * \ | \ |
+ * \ | \ |
+ * \ | \ |
+ * V | V V
*
- * Closed <-> Closing
+ * Closed <-> Closing
*
* The state argument specifies the eventual state of the backend and the
* function transitions to that state via the shortest path.
@@ -515,6 +515,20 @@ static void set_backend_state(struct backend_info *be,
{
while (be->state != state) {
switch (be->state) {
+ case XenbusStateInitialising:
+ switch (state) {
+ case XenbusStateInitWait:
+ case XenbusStateConnected:
+ case XenbusStateClosing:
+ backend_switch_state(be, XenbusStateInitWait);
+ break;
+ case XenbusStateClosed:
+ backend_switch_state(be, XenbusStateClosed);
+ break;
+ default:
+ BUG();
+ }
+ break;
case XenbusStateClosed:
switch (state) {
case XenbusStateInitWait:
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index ea8321a48..9d2369269 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -40,6 +40,7 @@ config NFC_MEI_PHY
config NFC_SIM
tristate "NFC hardware simulator driver"
+ depends on NFC_DIGITAL
help
This driver declares two virtual NFC devices supporting NFC-DEP
protocol. An LLCP connection can be established between them and
diff --git a/drivers/nfc/fdp/fdp.c b/drivers/nfc/fdp/fdp.c
index 5a26a1189..9cae454b5 100644
--- a/drivers/nfc/fdp/fdp.c
+++ b/drivers/nfc/fdp/fdp.c
@@ -345,7 +345,7 @@ static void fdp_nci_release_firmware(struct nci_dev *ndev)
if (info->ram_patch) {
release_firmware(info->ram_patch);
- info->otp_patch = NULL;
+ info->ram_patch = NULL;
}
}
@@ -353,7 +353,7 @@ static int fdp_nci_patch_otp(struct nci_dev *ndev)
{
struct fdp_nci_info *info = nci_get_drvdata(ndev);
struct device *dev = &info->phy->i2c_dev->dev;
- u8 conn_id;
+ int conn_id;
int r = 0;
if (info->otp_version >= info->otp_patch_version)
@@ -424,7 +424,7 @@ static int fdp_nci_patch_ram(struct nci_dev *ndev)
{
struct fdp_nci_info *info = nci_get_drvdata(ndev);
struct device *dev = &info->phy->i2c_dev->dev;
- u8 conn_id;
+ int conn_id;
int r = 0;
if (info->ram_version >= info->ram_patch_version)
diff --git a/drivers/nfc/nfcsim.c b/drivers/nfc/nfcsim.c
index 93aaca586..a466e7978 100644
--- a/drivers/nfc/nfcsim.c
+++ b/drivers/nfc/nfcsim.c
@@ -16,525 +16,492 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
#include <linux/nfc.h>
#include <net/nfc/nfc.h>
+#include <net/nfc/digital.h>
-#define DEV_ERR(_dev, fmt, args...) nfc_err(&_dev->nfc_dev->dev, \
- "%s: " fmt, __func__, ## args)
+#define NFCSIM_ERR(d, fmt, args...) nfc_err(&d->nfc_digital_dev->nfc_dev->dev, \
+ "%s: " fmt, __func__, ## args)
-#define DEV_DBG(_dev, fmt, args...) dev_dbg(&_dev->nfc_dev->dev, \
- "%s: " fmt, __func__, ## args)
+#define NFCSIM_DBG(d, fmt, args...) dev_dbg(&d->nfc_digital_dev->nfc_dev->dev, \
+ "%s: " fmt, __func__, ## args)
-#define NFCSIM_VERSION "0.1"
+#define NFCSIM_VERSION "0.2"
-#define NFCSIM_POLL_NONE 0
-#define NFCSIM_POLL_INITIATOR 1
-#define NFCSIM_POLL_TARGET 2
-#define NFCSIM_POLL_DUAL (NFCSIM_POLL_INITIATOR | NFCSIM_POLL_TARGET)
+#define NFCSIM_MODE_NONE 0
+#define NFCSIM_MODE_INITIATOR 1
+#define NFCSIM_MODE_TARGET 2
-#define RX_DEFAULT_DELAY 5
+#define NFCSIM_CAPABILITIES (NFC_DIGITAL_DRV_CAPS_IN_CRC | \
+ NFC_DIGITAL_DRV_CAPS_TG_CRC)
struct nfcsim {
- struct nfc_dev *nfc_dev;
+ struct nfc_digital_dev *nfc_digital_dev;
- struct mutex lock;
+ struct work_struct recv_work;
+ struct delayed_work send_work;
- struct delayed_work recv_work;
+ struct nfcsim_link *link_in;
+ struct nfcsim_link *link_out;
- struct sk_buff *clone_skb;
+ bool up;
+ u8 mode;
+ u8 rf_tech;
- struct delayed_work poll_work;
- u8 polling_mode;
- u8 curr_polling_mode;
+ u16 recv_timeout;
- u8 shutting_down;
+ nfc_digital_cmd_complete_t cb;
+ void *arg;
- u8 up;
+ u8 dropframe;
+};
- u8 initiator;
+struct nfcsim_link {
+ struct mutex lock;
- u32 rx_delay;
+ u8 rf_tech;
+ u8 mode;
- data_exchange_cb_t cb;
- void *cb_context;
+ u8 shutdown;
- struct nfcsim *peer_dev;
+ struct sk_buff *skb;
+ wait_queue_head_t recv_wait;
+ u8 cond;
};
-static struct nfcsim *dev0;
-static struct nfcsim *dev1;
-
-static struct workqueue_struct *wq;
-
-static void nfcsim_cleanup_dev(struct nfcsim *dev, u8 shutdown)
+static struct nfcsim_link *nfcsim_link_new(void)
{
- DEV_DBG(dev, "shutdown=%d\n", shutdown);
+ struct nfcsim_link *link;
- mutex_lock(&dev->lock);
+ link = kzalloc(sizeof(struct nfcsim_link), GFP_KERNEL);
+ if (!link)
+ return NULL;
- dev->polling_mode = NFCSIM_POLL_NONE;
- dev->shutting_down = shutdown;
- dev->cb = NULL;
- dev_kfree_skb(dev->clone_skb);
- dev->clone_skb = NULL;
+ mutex_init(&link->lock);
+ init_waitqueue_head(&link->recv_wait);
- mutex_unlock(&dev->lock);
-
- cancel_delayed_work_sync(&dev->poll_work);
- cancel_delayed_work_sync(&dev->recv_work);
+ return link;
}
-static int nfcsim_target_found(struct nfcsim *dev)
+static void nfcsim_link_free(struct nfcsim_link *link)
{
- struct nfc_target nfc_tgt;
+ dev_kfree_skb(link->skb);
+ kfree(link);
+}
- DEV_DBG(dev, "\n");
+static void nfcsim_link_recv_wake(struct nfcsim_link *link)
+{
+ link->cond = 1;
+ wake_up_interruptible(&link->recv_wait);
+}
- memset(&nfc_tgt, 0, sizeof(struct nfc_target));
+static void nfcsim_link_set_skb(struct nfcsim_link *link, struct sk_buff *skb,
+ u8 rf_tech, u8 mode)
+{
+ mutex_lock(&link->lock);
- nfc_tgt.supported_protocols = NFC_PROTO_NFC_DEP_MASK;
- nfc_targets_found(dev->nfc_dev, &nfc_tgt, 1);
+ dev_kfree_skb(link->skb);
+ link->skb = skb;
+ link->rf_tech = rf_tech;
+ link->mode = mode;
- return 0;
+ mutex_unlock(&link->lock);
}
-static int nfcsim_dev_up(struct nfc_dev *nfc_dev)
+static void nfcsim_link_recv_cancel(struct nfcsim_link *link)
{
- struct nfcsim *dev = nfc_get_drvdata(nfc_dev);
+ mutex_lock(&link->lock);
- DEV_DBG(dev, "\n");
+ link->mode = NFCSIM_MODE_NONE;
- mutex_lock(&dev->lock);
+ mutex_unlock(&link->lock);
- dev->up = 1;
-
- mutex_unlock(&dev->lock);
-
- return 0;
+ nfcsim_link_recv_wake(link);
}
-static int nfcsim_dev_down(struct nfc_dev *nfc_dev)
+static void nfcsim_link_shutdown(struct nfcsim_link *link)
{
- struct nfcsim *dev = nfc_get_drvdata(nfc_dev);
-
- DEV_DBG(dev, "\n");
+ mutex_lock(&link->lock);
- mutex_lock(&dev->lock);
+ link->shutdown = 1;
+ link->mode = NFCSIM_MODE_NONE;
- dev->up = 0;
+ mutex_unlock(&link->lock);
- mutex_unlock(&dev->lock);
-
- return 0;
+ nfcsim_link_recv_wake(link);
}
-static int nfcsim_dep_link_up(struct nfc_dev *nfc_dev,
- struct nfc_target *target,
- u8 comm_mode, u8 *gb, size_t gb_len)
+static struct sk_buff *nfcsim_link_recv_skb(struct nfcsim_link *link,
+ int timeout, u8 rf_tech, u8 mode)
{
int rc;
- struct nfcsim *dev = nfc_get_drvdata(nfc_dev);
- struct nfcsim *peer = dev->peer_dev;
- u8 *remote_gb;
- size_t remote_gb_len;
+ struct sk_buff *skb;
- DEV_DBG(dev, "target_idx: %d, comm_mode: %d\n", target->idx, comm_mode);
+ rc = wait_event_interruptible_timeout(link->recv_wait,
+ link->cond,
+ msecs_to_jiffies(timeout));
- mutex_lock(&peer->lock);
+ mutex_lock(&link->lock);
- nfc_tm_activated(peer->nfc_dev, NFC_PROTO_NFC_DEP_MASK,
- NFC_COMM_ACTIVE, gb, gb_len);
+ skb = link->skb;
+ link->skb = NULL;
- remote_gb = nfc_get_local_general_bytes(peer->nfc_dev, &remote_gb_len);
- if (!remote_gb) {
- DEV_ERR(peer, "Can't get remote general bytes\n");
+ if (!rc) {
+ rc = -ETIMEDOUT;
+ goto done;
+ }
- mutex_unlock(&peer->lock);
- return -EINVAL;
+ if (!skb || link->rf_tech != rf_tech || link->mode == mode) {
+ rc = -EINVAL;
+ goto done;
}
- mutex_unlock(&peer->lock);
+ if (link->shutdown) {
+ rc = -ENODEV;
+ goto done;
+ }
- mutex_lock(&dev->lock);
+done:
+ mutex_unlock(&link->lock);
- rc = nfc_set_remote_general_bytes(nfc_dev, remote_gb, remote_gb_len);
- if (rc) {
- DEV_ERR(dev, "Can't set remote general bytes\n");
- mutex_unlock(&dev->lock);
- return rc;
+ if (rc < 0) {
+ dev_kfree_skb(skb);
+ skb = ERR_PTR(rc);
}
- rc = nfc_dep_link_is_up(nfc_dev, target->idx, NFC_COMM_ACTIVE,
- NFC_RF_INITIATOR);
-
- mutex_unlock(&dev->lock);
+ link->cond = 0;
- return rc;
+ return skb;
}
-static int nfcsim_dep_link_down(struct nfc_dev *nfc_dev)
+static void nfcsim_send_wq(struct work_struct *work)
{
- struct nfcsim *dev = nfc_get_drvdata(nfc_dev);
+ struct nfcsim *dev = container_of(work, struct nfcsim, send_work.work);
- DEV_DBG(dev, "\n");
-
- nfcsim_cleanup_dev(dev, 0);
-
- return 0;
+ /*
+ * To effectively send data, the device just wake up its link_out which
+ * is the link_in of the peer device. The exchanged skb has already been
+ * stored in the dev->link_out through nfcsim_link_set_skb().
+ */
+ nfcsim_link_recv_wake(dev->link_out);
}
-static int nfcsim_start_poll(struct nfc_dev *nfc_dev,
- u32 im_protocols, u32 tm_protocols)
+static void nfcsim_recv_wq(struct work_struct *work)
{
- struct nfcsim *dev = nfc_get_drvdata(nfc_dev);
- int rc;
-
- mutex_lock(&dev->lock);
+ struct nfcsim *dev = container_of(work, struct nfcsim, recv_work);
+ struct sk_buff *skb;
- if (dev->polling_mode != NFCSIM_POLL_NONE) {
- DEV_ERR(dev, "Already in polling mode\n");
- rc = -EBUSY;
- goto exit;
- }
+ skb = nfcsim_link_recv_skb(dev->link_in, dev->recv_timeout,
+ dev->rf_tech, dev->mode);
- if (im_protocols & NFC_PROTO_NFC_DEP_MASK)
- dev->polling_mode |= NFCSIM_POLL_INITIATOR;
+ if (!dev->up) {
+ NFCSIM_ERR(dev, "Device is down\n");
- if (tm_protocols & NFC_PROTO_NFC_DEP_MASK)
- dev->polling_mode |= NFCSIM_POLL_TARGET;
+ if (!IS_ERR(skb))
+ dev_kfree_skb(skb);
- if (dev->polling_mode == NFCSIM_POLL_NONE) {
- DEV_ERR(dev, "Unsupported polling mode\n");
- rc = -EINVAL;
- goto exit;
+ skb = ERR_PTR(-ENODEV);
}
- dev->initiator = 0;
- dev->curr_polling_mode = NFCSIM_POLL_NONE;
+ dev->cb(dev->nfc_digital_dev, dev->arg, skb);
+}
- queue_delayed_work(wq, &dev->poll_work, 0);
+static int nfcsim_send(struct nfc_digital_dev *ddev, struct sk_buff *skb,
+ u16 timeout, nfc_digital_cmd_complete_t cb, void *arg)
+{
+ struct nfcsim *dev = nfc_digital_get_drvdata(ddev);
+ u8 delay;
- DEV_DBG(dev, "Start polling: im: 0x%X, tm: 0x%X\n", im_protocols,
- tm_protocols);
+ if (!dev->up) {
+ NFCSIM_ERR(dev, "Device is down\n");
+ return -ENODEV;
+ }
- rc = 0;
-exit:
- mutex_unlock(&dev->lock);
+ dev->recv_timeout = timeout;
+ dev->cb = cb;
+ dev->arg = arg;
- return rc;
-}
+ schedule_work(&dev->recv_work);
-static void nfcsim_stop_poll(struct nfc_dev *nfc_dev)
-{
- struct nfcsim *dev = nfc_get_drvdata(nfc_dev);
+ if (dev->dropframe) {
+ NFCSIM_DBG(dev, "dropping frame (out of %d)\n", dev->dropframe);
+ dev_kfree_skb(skb);
+ dev->dropframe--;
- DEV_DBG(dev, "Stop poll\n");
+ return 0;
+ }
- mutex_lock(&dev->lock);
+ if (skb) {
+ nfcsim_link_set_skb(dev->link_out, skb, dev->rf_tech,
+ dev->mode);
- dev->polling_mode = NFCSIM_POLL_NONE;
+ /* Add random delay (between 3 and 10 ms) before sending data */
+ get_random_bytes(&delay, 1);
+ delay = 3 + (delay & 0x07);
- mutex_unlock(&dev->lock);
+ schedule_delayed_work(&dev->send_work, msecs_to_jiffies(delay));
+ }
- cancel_delayed_work_sync(&dev->poll_work);
+ return 0;
}
-static int nfcsim_activate_target(struct nfc_dev *nfc_dev,
- struct nfc_target *target, u32 protocol)
+static void nfcsim_abort_cmd(struct nfc_digital_dev *ddev)
{
- struct nfcsim *dev = nfc_get_drvdata(nfc_dev);
-
- DEV_DBG(dev, "\n");
+ struct nfcsim *dev = nfc_digital_get_drvdata(ddev);
- return -ENOTSUPP;
+ nfcsim_link_recv_cancel(dev->link_in);
}
-static void nfcsim_deactivate_target(struct nfc_dev *nfc_dev,
- struct nfc_target *target, u8 mode)
+static int nfcsim_switch_rf(struct nfc_digital_dev *ddev, bool on)
{
- struct nfcsim *dev = nfc_get_drvdata(nfc_dev);
+ struct nfcsim *dev = nfc_digital_get_drvdata(ddev);
+
+ dev->up = on;
- DEV_DBG(dev, "\n");
+ return 0;
}
-static void nfcsim_wq_recv(struct work_struct *work)
+static int nfcsim_in_configure_hw(struct nfc_digital_dev *ddev,
+ int type, int param)
{
- struct nfcsim *dev = container_of(work, struct nfcsim,
- recv_work.work);
+ struct nfcsim *dev = nfc_digital_get_drvdata(ddev);
- mutex_lock(&dev->lock);
+ switch (type) {
+ case NFC_DIGITAL_CONFIG_RF_TECH:
+ dev->up = true;
+ dev->mode = NFCSIM_MODE_INITIATOR;
+ dev->rf_tech = param;
+ break;
- if (dev->shutting_down || !dev->up || !dev->clone_skb) {
- dev_kfree_skb(dev->clone_skb);
- goto exit;
- }
+ case NFC_DIGITAL_CONFIG_FRAMING:
+ break;
- if (dev->initiator) {
- if (!dev->cb) {
- DEV_ERR(dev, "Null recv callback\n");
- dev_kfree_skb(dev->clone_skb);
- goto exit;
- }
-
- dev->cb(dev->cb_context, dev->clone_skb, 0);
- dev->cb = NULL;
- } else {
- nfc_tm_data_received(dev->nfc_dev, dev->clone_skb);
+ default:
+ NFCSIM_ERR(dev, "Invalid configuration type: %d\n", type);
+ return -EINVAL;
}
-exit:
- dev->clone_skb = NULL;
+ return 0;
+}
- mutex_unlock(&dev->lock);
+static int nfcsim_in_send_cmd(struct nfc_digital_dev *ddev,
+ struct sk_buff *skb, u16 timeout,
+ nfc_digital_cmd_complete_t cb, void *arg)
+{
+ return nfcsim_send(ddev, skb, timeout, cb, arg);
}
-static int nfcsim_tx(struct nfc_dev *nfc_dev, struct nfc_target *target,
- struct sk_buff *skb, data_exchange_cb_t cb,
- void *cb_context)
+static int nfcsim_tg_configure_hw(struct nfc_digital_dev *ddev,
+ int type, int param)
{
- struct nfcsim *dev = nfc_get_drvdata(nfc_dev);
- struct nfcsim *peer = dev->peer_dev;
- int err;
+ struct nfcsim *dev = nfc_digital_get_drvdata(ddev);
- mutex_lock(&dev->lock);
+ switch (type) {
+ case NFC_DIGITAL_CONFIG_RF_TECH:
+ dev->up = true;
+ dev->mode = NFCSIM_MODE_TARGET;
+ dev->rf_tech = param;
+ break;
- if (dev->shutting_down || !dev->up) {
- mutex_unlock(&dev->lock);
- err = -ENODEV;
- goto exit;
+ case NFC_DIGITAL_CONFIG_FRAMING:
+ break;
+
+ default:
+ NFCSIM_ERR(dev, "Invalid configuration type: %d\n", type);
+ return -EINVAL;
}
- dev->cb = cb;
- dev->cb_context = cb_context;
+ return 0;
+}
- mutex_unlock(&dev->lock);
+static int nfcsim_tg_send_cmd(struct nfc_digital_dev *ddev,
+ struct sk_buff *skb, u16 timeout,
+ nfc_digital_cmd_complete_t cb, void *arg)
+{
+ return nfcsim_send(ddev, skb, timeout, cb, arg);
+}
- mutex_lock(&peer->lock);
+static int nfcsim_tg_listen(struct nfc_digital_dev *ddev, u16 timeout,
+ nfc_digital_cmd_complete_t cb, void *arg)
+{
+ return nfcsim_send(ddev, NULL, timeout, cb, arg);
+}
- peer->clone_skb = skb_clone(skb, GFP_KERNEL);
+static struct nfc_digital_ops nfcsim_digital_ops = {
+ .in_configure_hw = nfcsim_in_configure_hw,
+ .in_send_cmd = nfcsim_in_send_cmd,
- if (!peer->clone_skb) {
- DEV_ERR(dev, "skb_clone failed\n");
- mutex_unlock(&peer->lock);
- err = -ENOMEM;
- goto exit;
- }
+ .tg_listen = nfcsim_tg_listen,
+ .tg_configure_hw = nfcsim_tg_configure_hw,
+ .tg_send_cmd = nfcsim_tg_send_cmd,
- /* This simulates an arbitrary transmission delay between the 2 devices.
- * If packet transmission occurs immediately between them, we have a
- * non-stop flow of several tens of thousands SYMM packets per second
- * and a burning cpu.
- */
- queue_delayed_work(wq, &peer->recv_work,
- msecs_to_jiffies(dev->rx_delay));
+ .abort_cmd = nfcsim_abort_cmd,
+ .switch_rf = nfcsim_switch_rf,
+};
+
+static struct dentry *nfcsim_debugfs_root;
- mutex_unlock(&peer->lock);
+static void nfcsim_debugfs_init(void)
+{
+ nfcsim_debugfs_root = debugfs_create_dir("nfcsim", NULL);
- err = 0;
-exit:
- dev_kfree_skb(skb);
+ if (!nfcsim_debugfs_root)
+ pr_err("Could not create debugfs entry\n");
- return err;
}
-static int nfcsim_im_transceive(struct nfc_dev *nfc_dev,
- struct nfc_target *target, struct sk_buff *skb,
- data_exchange_cb_t cb, void *cb_context)
+static void nfcsim_debugfs_remove(void)
{
- return nfcsim_tx(nfc_dev, target, skb, cb, cb_context);
+ debugfs_remove_recursive(nfcsim_debugfs_root);
}
-static int nfcsim_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb)
+static void nfcsim_debugfs_init_dev(struct nfcsim *dev)
{
- return nfcsim_tx(nfc_dev, NULL, skb, NULL, NULL);
-}
-
-static struct nfc_ops nfcsim_nfc_ops = {
- .dev_up = nfcsim_dev_up,
- .dev_down = nfcsim_dev_down,
- .dep_link_up = nfcsim_dep_link_up,
- .dep_link_down = nfcsim_dep_link_down,
- .start_poll = nfcsim_start_poll,
- .stop_poll = nfcsim_stop_poll,
- .activate_target = nfcsim_activate_target,
- .deactivate_target = nfcsim_deactivate_target,
- .im_transceive = nfcsim_im_transceive,
- .tm_send = nfcsim_tm_send,
-};
+ struct dentry *dev_dir;
+ char devname[5]; /* nfcX\0 */
+ u32 idx;
+ int n;
-static void nfcsim_set_polling_mode(struct nfcsim *dev)
-{
- if (dev->polling_mode == NFCSIM_POLL_NONE) {
- dev->curr_polling_mode = NFCSIM_POLL_NONE;
+ if (!nfcsim_debugfs_root) {
+ NFCSIM_ERR(dev, "nfcsim debugfs not initialized\n");
return;
}
- if (dev->curr_polling_mode == NFCSIM_POLL_NONE) {
- if (dev->polling_mode & NFCSIM_POLL_INITIATOR)
- dev->curr_polling_mode = NFCSIM_POLL_INITIATOR;
- else
- dev->curr_polling_mode = NFCSIM_POLL_TARGET;
-
+ idx = dev->nfc_digital_dev->nfc_dev->idx;
+ n = snprintf(devname, sizeof(devname), "nfc%d", idx);
+ if (n >= sizeof(devname)) {
+ NFCSIM_ERR(dev, "Could not compute dev name for dev %d\n", idx);
return;
}
- if (dev->polling_mode == NFCSIM_POLL_DUAL) {
- if (dev->curr_polling_mode == NFCSIM_POLL_TARGET)
- dev->curr_polling_mode = NFCSIM_POLL_INITIATOR;
- else
- dev->curr_polling_mode = NFCSIM_POLL_TARGET;
+ dev_dir = debugfs_create_dir(devname, nfcsim_debugfs_root);
+ if (!dev_dir) {
+ NFCSIM_ERR(dev, "Could not create debugfs entries for nfc%d\n",
+ idx);
+ return;
}
+
+ debugfs_create_u8("dropframe", 0664, dev_dir, &dev->dropframe);
}
-static void nfcsim_wq_poll(struct work_struct *work)
+static struct nfcsim *nfcsim_device_new(struct nfcsim_link *link_in,
+ struct nfcsim_link *link_out)
{
- struct nfcsim *dev = container_of(work, struct nfcsim, poll_work.work);
- struct nfcsim *peer = dev->peer_dev;
+ struct nfcsim *dev;
+ int rc;
- /* These work items run on an ordered workqueue and are therefore
- * serialized. So we can take both mutexes without being dead locked.
- */
- mutex_lock(&dev->lock);
- mutex_lock(&peer->lock);
+ dev = kzalloc(sizeof(struct nfcsim), GFP_KERNEL);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
- nfcsim_set_polling_mode(dev);
+ INIT_DELAYED_WORK(&dev->send_work, nfcsim_send_wq);
+ INIT_WORK(&dev->recv_work, nfcsim_recv_wq);
- if (dev->curr_polling_mode == NFCSIM_POLL_NONE) {
- DEV_DBG(dev, "Not polling\n");
- goto unlock;
+ dev->nfc_digital_dev =
+ nfc_digital_allocate_device(&nfcsim_digital_ops,
+ NFC_PROTO_NFC_DEP_MASK,
+ NFCSIM_CAPABILITIES,
+ 0, 0);
+ if (!dev->nfc_digital_dev) {
+ kfree(dev);
+ return ERR_PTR(-ENOMEM);
}
- DEV_DBG(dev, "Polling as %s",
- dev->curr_polling_mode == NFCSIM_POLL_INITIATOR ?
- "initiator\n" : "target\n");
+ nfc_digital_set_drvdata(dev->nfc_digital_dev, dev);
- if (dev->curr_polling_mode == NFCSIM_POLL_TARGET)
- goto sched_work;
+ dev->link_in = link_in;
+ dev->link_out = link_out;
- if (peer->curr_polling_mode == NFCSIM_POLL_TARGET) {
- peer->polling_mode = NFCSIM_POLL_NONE;
- dev->polling_mode = NFCSIM_POLL_NONE;
-
- dev->initiator = 1;
-
- nfcsim_target_found(dev);
+ rc = nfc_digital_register_device(dev->nfc_digital_dev);
+ if (rc) {
+ pr_err("Could not register digital device (%d)\n", rc);
+ nfc_digital_free_device(dev->nfc_digital_dev);
+ kfree(dev);
- goto unlock;
+ return ERR_PTR(rc);
}
-sched_work:
- /* This defines the delay for an initiator to check if the other device
- * is polling in target mode.
- * If the device starts in dual mode polling, it switches between
- * initiator and target at every round.
- * Because the wq is ordered and only 1 work item is executed at a time,
- * we'll always have one device polling as initiator and the other as
- * target at some point, even if both are started in dual mode.
- */
- queue_delayed_work(wq, &dev->poll_work, msecs_to_jiffies(200));
+ nfcsim_debugfs_init_dev(dev);
-unlock:
- mutex_unlock(&peer->lock);
- mutex_unlock(&dev->lock);
+ return dev;
}
-static struct nfcsim *nfcsim_init_dev(void)
+static void nfcsim_device_free(struct nfcsim *dev)
{
- struct nfcsim *dev;
- int rc = -ENOMEM;
+ nfc_digital_unregister_device(dev->nfc_digital_dev);
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (dev == NULL)
- return ERR_PTR(-ENOMEM);
+ dev->up = false;
- mutex_init(&dev->lock);
+ nfcsim_link_shutdown(dev->link_in);
- INIT_DELAYED_WORK(&dev->recv_work, nfcsim_wq_recv);
- INIT_DELAYED_WORK(&dev->poll_work, nfcsim_wq_poll);
+ cancel_delayed_work_sync(&dev->send_work);
+ cancel_work_sync(&dev->recv_work);
- dev->nfc_dev = nfc_allocate_device(&nfcsim_nfc_ops,
- NFC_PROTO_NFC_DEP_MASK,
- 0, 0);
- if (!dev->nfc_dev)
- goto error;
+ nfc_digital_free_device(dev->nfc_digital_dev);
- nfc_set_drvdata(dev->nfc_dev, dev);
-
- rc = nfc_register_device(dev->nfc_dev);
- if (rc)
- goto free_nfc_dev;
-
- dev->rx_delay = RX_DEFAULT_DELAY;
- return dev;
-
-free_nfc_dev:
- nfc_free_device(dev->nfc_dev);
-
-error:
kfree(dev);
-
- return ERR_PTR(rc);
}
-static void nfcsim_free_device(struct nfcsim *dev)
-{
- nfc_unregister_device(dev->nfc_dev);
-
- nfc_free_device(dev->nfc_dev);
-
- kfree(dev);
-}
+static struct nfcsim *dev0;
+static struct nfcsim *dev1;
static int __init nfcsim_init(void)
{
+ struct nfcsim_link *link0, *link1;
int rc;
- /* We need an ordered wq to ensure that poll_work items are executed
- * one at a time.
- */
- wq = alloc_ordered_workqueue("nfcsim", 0);
- if (!wq) {
+ link0 = nfcsim_link_new();
+ link1 = nfcsim_link_new();
+ if (!link0 || !link1) {
rc = -ENOMEM;
- goto exit;
+ goto exit_err;
}
- dev0 = nfcsim_init_dev();
+ nfcsim_debugfs_init();
+
+ dev0 = nfcsim_device_new(link0, link1);
if (IS_ERR(dev0)) {
rc = PTR_ERR(dev0);
- goto exit;
+ goto exit_err;
}
- dev1 = nfcsim_init_dev();
+ dev1 = nfcsim_device_new(link1, link0);
if (IS_ERR(dev1)) {
- kfree(dev0);
+ nfcsim_device_free(dev0);
rc = PTR_ERR(dev1);
- goto exit;
+ goto exit_err;
}
- dev0->peer_dev = dev1;
- dev1->peer_dev = dev0;
+ pr_info("nfcsim " NFCSIM_VERSION " initialized\n");
+
+ return 0;
- pr_debug("NFCsim " NFCSIM_VERSION " initialized\n");
+exit_err:
+ pr_err("Failed to initialize nfcsim driver (%d)\n", rc);
- rc = 0;
-exit:
- if (rc)
- pr_err("Failed to initialize nfcsim driver (%d)\n",
- rc);
+ nfcsim_link_free(link0);
+ nfcsim_link_free(link1);
return rc;
}
static void __exit nfcsim_exit(void)
{
- nfcsim_cleanup_dev(dev0, 1);
- nfcsim_cleanup_dev(dev1, 1);
+ struct nfcsim_link *link0, *link1;
+
+ link0 = dev0->link_in;
+ link1 = dev0->link_out;
+
+ nfcsim_device_free(dev0);
+ nfcsim_device_free(dev1);
- nfcsim_free_device(dev0);
- nfcsim_free_device(dev1);
+ nfcsim_link_free(link0);
+ nfcsim_link_free(link1);
- destroy_workqueue(wq);
+ nfcsim_debugfs_remove();
}
module_init(nfcsim_init);
diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
index 0b6439996..8fd3d02ef 100644
--- a/drivers/nfc/nfcwilink.c
+++ b/drivers/nfc/nfcwilink.c
@@ -94,7 +94,7 @@ struct nfcwilink {
struct nci_dev *ndev;
unsigned long flags;
- char st_register_cb_status;
+ int st_register_cb_status;
long (*st_write) (struct sk_buff *);
struct completion completed;
@@ -320,7 +320,7 @@ exit:
}
/* Called by ST when registration is complete */
-static void nfcwilink_register_complete(void *priv_data, char data)
+static void nfcwilink_register_complete(void *priv_data, int data)
{
struct nfcwilink *drv = priv_data;
diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c
index 8ca060324..33ed78be2 100644
--- a/drivers/nfc/pn533/usb.c
+++ b/drivers/nfc/pn533/usb.c
@@ -464,10 +464,8 @@ static int pn533_usb_probe(struct usb_interface *interface,
return -ENOMEM;
in_buf = kzalloc(in_buf_len, GFP_KERNEL);
- if (!in_buf) {
- rc = -ENOMEM;
- goto out_free_phy;
- }
+ if (!in_buf)
+ return -ENOMEM;
phy->udev = usb_get_dev(interface_to_usbdev(interface));
phy->interface = interface;
@@ -554,8 +552,7 @@ error:
usb_free_urb(phy->out_urb);
usb_put_dev(phy->udev);
kfree(in_buf);
-out_free_phy:
- kfree(phy);
+
return rc;
}
diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c
index 87d509996..2b2330b23 100644
--- a/drivers/nfc/port100.c
+++ b/drivers/nfc/port100.c
@@ -343,7 +343,26 @@ in_protocols[][PORT100_IN_MAX_NUM_PROTOCOLS + 1] = {
},
[NFC_DIGITAL_FRAMING_NFCF_NFC_DEP] = {
/* nfc_digital_framing_nfcf */
- { PORT100_IN_PROT_END, 0 },
+ { PORT100_IN_PROT_INITIAL_GUARD_TIME, 18 },
+ { PORT100_IN_PROT_ADD_CRC, 1 },
+ { PORT100_IN_PROT_CHECK_CRC, 1 },
+ { PORT100_IN_PROT_MULTI_CARD, 0 },
+ { PORT100_IN_PROT_ADD_PARITY, 0 },
+ { PORT100_IN_PROT_CHECK_PARITY, 0 },
+ { PORT100_IN_PROT_BITWISE_AC_RECV_MODE, 0 },
+ { PORT100_IN_PROT_VALID_BIT_NUMBER, 8 },
+ { PORT100_IN_PROT_CRYPTO1, 0 },
+ { PORT100_IN_PROT_ADD_SOF, 0 },
+ { PORT100_IN_PROT_CHECK_SOF, 0 },
+ { PORT100_IN_PROT_ADD_EOF, 0 },
+ { PORT100_IN_PROT_CHECK_EOF, 0 },
+ { PORT100_IN_PROT_DEAF_TIME, 4 },
+ { PORT100_IN_PROT_CRM, 0 },
+ { PORT100_IN_PROT_CRM_MIN_LEN, 0 },
+ { PORT100_IN_PROT_T1_TAG_FRAME, 0 },
+ { PORT100_IN_PROT_RFCA, 0 },
+ { PORT100_IN_PROT_GUARD_TIME_AT_INITIATOR, 6 },
+ { PORT100_IN_PROT_END, 0 },
},
[NFC_DIGITAL_FRAMING_NFC_DEP_ACTIVATED] = {
{ PORT100_IN_PROT_END, 0 },
@@ -437,6 +456,12 @@ struct port100 {
struct urb *out_urb;
struct urb *in_urb;
+ /* This mutex protects the out_urb and avoids to submit a new command
+ * through port100_send_frame_async() while the previous one is being
+ * canceled through port100_abort_cmd().
+ */
+ struct mutex out_urb_lock;
+
struct work_struct cmd_complete_work;
u8 cmd_type;
@@ -445,6 +470,9 @@ struct port100 {
* for any queuing/locking mechanism at driver level.
*/
struct port100_cmd *cmd;
+
+ bool cmd_cancel;
+ struct completion cmd_cancel_done;
};
struct port100_cmd {
@@ -699,10 +727,27 @@ static int port100_send_ack(struct port100 *dev)
{
int rc;
+ mutex_lock(&dev->out_urb_lock);
+
+ init_completion(&dev->cmd_cancel_done);
+
+ usb_kill_urb(dev->out_urb);
+
dev->out_urb->transfer_buffer = ack_frame;
dev->out_urb->transfer_buffer_length = sizeof(ack_frame);
rc = usb_submit_urb(dev->out_urb, GFP_KERNEL);
+ /* Set the cmd_cancel flag only if the URB has been successfully
+ * submitted. It will be reset by the out URB completion callback
+ * port100_send_complete().
+ */
+ dev->cmd_cancel = !rc;
+
+ mutex_unlock(&dev->out_urb_lock);
+
+ if (!rc)
+ wait_for_completion(&dev->cmd_cancel_done);
+
return rc;
}
@@ -711,6 +756,16 @@ static int port100_send_frame_async(struct port100 *dev, struct sk_buff *out,
{
int rc;
+ mutex_lock(&dev->out_urb_lock);
+
+ /* A command cancel frame as been sent through dev->out_urb. Don't try
+ * to submit a new one.
+ */
+ if (dev->cmd_cancel) {
+ rc = -EAGAIN;
+ goto exit;
+ }
+
dev->out_urb->transfer_buffer = out->data;
dev->out_urb->transfer_buffer_length = out->len;
@@ -722,16 +777,15 @@ static int port100_send_frame_async(struct port100 *dev, struct sk_buff *out,
rc = usb_submit_urb(dev->out_urb, GFP_KERNEL);
if (rc)
- return rc;
+ goto exit;
rc = port100_submit_urb_for_ack(dev, GFP_KERNEL);
if (rc)
- goto error;
+ usb_unlink_urb(dev->out_urb);
- return 0;
+exit:
+ mutex_unlock(&dev->out_urb_lock);
-error:
- usb_unlink_urb(dev->out_urb);
return rc;
}
@@ -790,6 +844,12 @@ static int port100_send_cmd_async(struct port100 *dev, u8 cmd_code,
PORT100_FRAME_MAX_PAYLOAD_LEN +
PORT100_FRAME_TAIL_LEN;
+ if (dev->cmd) {
+ nfc_err(&dev->interface->dev,
+ "A command is still in process\n");
+ return -EBUSY;
+ }
+
resp = alloc_skb(resp_len, GFP_KERNEL);
if (!resp)
return -ENOMEM;
@@ -867,6 +927,11 @@ static void port100_send_complete(struct urb *urb)
{
struct port100 *dev = urb->context;
+ if (dev->cmd_cancel) {
+ dev->cmd_cancel = false;
+ complete(&dev->cmd_cancel_done);
+ }
+
switch (urb->status) {
case 0:
break; /* success */
@@ -985,6 +1050,10 @@ static int port100_switch_rf(struct nfc_digital_dev *ddev, bool on)
*skb_put(skb, 1) = on ? 1 : 0;
+ /* Cancel the last command if the device is being switched off */
+ if (!on)
+ port100_abort_cmd(ddev);
+
resp = port100_send_cmd_sync(dev, PORT100_CMD_SWITCH_RF, skb);
if (IS_ERR(resp))
@@ -1430,6 +1499,7 @@ static int port100_probe(struct usb_interface *interface,
if (!dev)
return -ENOMEM;
+ mutex_init(&dev->out_urb_lock);
dev->udev = usb_get_dev(interface_to_usbdev(interface));
dev->interface = interface;
usb_set_intfdata(interface, dev);
diff --git a/drivers/nfc/trf7970a.c b/drivers/nfc/trf7970a.c
index 10842b705..26c9dbbcc 100644
--- a/drivers/nfc/trf7970a.c
+++ b/drivers/nfc/trf7970a.c
@@ -1048,6 +1048,10 @@ static int trf7970a_init(struct trf7970a *trf)
if (ret)
goto err_out;
+ ret = trf7970a_write(trf, TRF7970A_NFC_TARGET_LEVEL, 0);
+ if (ret)
+ goto err_out;
+
usleep_range(1000, 2000);
trf->chip_status_ctrl &= ~TRF7970A_CHIP_STATUS_RF_ON;
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.c b/drivers/ntb/hw/intel/ntb_hw_intel.c
index 40d04ef5d..0d5c29ae5 100644
--- a/drivers/ntb/hw/intel/ntb_hw_intel.c
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.c
@@ -551,13 +551,15 @@ static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *offp)
{
struct intel_ntb_dev *ndev;
+ struct pci_dev *pdev;
void __iomem *mmio;
char *buf;
size_t buf_size;
ssize_t ret, off;
- union { u64 v64; u32 v32; u16 v16; } u;
+ union { u64 v64; u32 v32; u16 v16; u8 v8; } u;
ndev = filp->private_data;
+ pdev = ndev_pdev(ndev);
mmio = ndev->self_mmio;
buf_size = min(count, 0x800ul);
@@ -632,6 +634,41 @@ static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
"Doorbell Bell -\t\t%#llx\n", u.v64);
off += scnprintf(buf + off, buf_size - off,
+ "\nNTB Window Size:\n");
+
+ pci_read_config_byte(pdev, XEON_PBAR23SZ_OFFSET, &u.v8);
+ off += scnprintf(buf + off, buf_size - off,
+ "PBAR23SZ %hhu\n", u.v8);
+ if (!ndev->bar4_split) {
+ pci_read_config_byte(pdev, XEON_PBAR45SZ_OFFSET, &u.v8);
+ off += scnprintf(buf + off, buf_size - off,
+ "PBAR45SZ %hhu\n", u.v8);
+ } else {
+ pci_read_config_byte(pdev, XEON_PBAR4SZ_OFFSET, &u.v8);
+ off += scnprintf(buf + off, buf_size - off,
+ "PBAR4SZ %hhu\n", u.v8);
+ pci_read_config_byte(pdev, XEON_PBAR5SZ_OFFSET, &u.v8);
+ off += scnprintf(buf + off, buf_size - off,
+ "PBAR5SZ %hhu\n", u.v8);
+ }
+
+ pci_read_config_byte(pdev, XEON_SBAR23SZ_OFFSET, &u.v8);
+ off += scnprintf(buf + off, buf_size - off,
+ "SBAR23SZ %hhu\n", u.v8);
+ if (!ndev->bar4_split) {
+ pci_read_config_byte(pdev, XEON_SBAR45SZ_OFFSET, &u.v8);
+ off += scnprintf(buf + off, buf_size - off,
+ "SBAR45SZ %hhu\n", u.v8);
+ } else {
+ pci_read_config_byte(pdev, XEON_SBAR4SZ_OFFSET, &u.v8);
+ off += scnprintf(buf + off, buf_size - off,
+ "SBAR4SZ %hhu\n", u.v8);
+ pci_read_config_byte(pdev, XEON_SBAR5SZ_OFFSET, &u.v8);
+ off += scnprintf(buf + off, buf_size - off,
+ "SBAR5SZ %hhu\n", u.v8);
+ }
+
+ off += scnprintf(buf + off, buf_size - off,
"\nNTB Incoming XLAT:\n");
u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 2));
@@ -669,7 +706,7 @@ static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
"LMT45 -\t\t\t%#018llx\n", u.v64);
}
- if (pdev_is_xeon(ndev->ntb.pdev)) {
+ if (pdev_is_xeon(pdev)) {
if (ntb_topo_is_b2b(ndev->ntb.topo)) {
off += scnprintf(buf + off, buf_size - off,
"\nNTB Outgoing B2B XLAT:\n");
@@ -750,22 +787,22 @@ static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
off += scnprintf(buf + off, buf_size - off,
"\nXEON NTB Hardware Errors:\n");
- if (!pci_read_config_word(ndev->ntb.pdev,
+ if (!pci_read_config_word(pdev,
XEON_DEVSTS_OFFSET, &u.v16))
off += scnprintf(buf + off, buf_size - off,
"DEVSTS -\t\t%#06x\n", u.v16);
- if (!pci_read_config_word(ndev->ntb.pdev,
+ if (!pci_read_config_word(pdev,
XEON_LINK_STATUS_OFFSET, &u.v16))
off += scnprintf(buf + off, buf_size - off,
"LNKSTS -\t\t%#06x\n", u.v16);
- if (!pci_read_config_dword(ndev->ntb.pdev,
+ if (!pci_read_config_dword(pdev,
XEON_UNCERRSTS_OFFSET, &u.v32))
off += scnprintf(buf + off, buf_size - off,
"UNCERRSTS -\t\t%#06x\n", u.v32);
- if (!pci_read_config_dword(ndev->ntb.pdev,
+ if (!pci_read_config_dword(pdev,
XEON_CORERRSTS_OFFSET, &u.v32))
off += scnprintf(buf + off, buf_size - off,
"CORERRSTS -\t\t%#06x\n", u.v32);
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 2ef9d9130..d5c5894f2 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -153,6 +153,7 @@ struct ntb_transport_qp {
unsigned int rx_index;
unsigned int rx_max_entry;
unsigned int rx_max_frame;
+ unsigned int rx_alloc_entry;
dma_cookie_t last_cookie;
struct tasklet_struct rxc_db_work;
@@ -480,7 +481,9 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"rx_index - \t%u\n", qp->rx_index);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
- "rx_max_entry - \t%u\n\n", qp->rx_max_entry);
+ "rx_max_entry - \t%u\n", qp->rx_max_entry);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "rx_alloc_entry - \t%u\n\n", qp->rx_alloc_entry);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"tx_bytes - \t%llu\n", qp->tx_bytes);
@@ -597,9 +600,12 @@ static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
{
struct ntb_transport_qp *qp = &nt->qp_vec[qp_num];
struct ntb_transport_mw *mw;
+ struct ntb_dev *ndev = nt->ndev;
+ struct ntb_queue_entry *entry;
unsigned int rx_size, num_qps_mw;
unsigned int mw_num, mw_count, qp_count;
unsigned int i;
+ int node;
mw_count = nt->mw_count;
qp_count = nt->qp_count;
@@ -626,6 +632,23 @@ static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
qp->rx_max_entry = rx_size / qp->rx_max_frame;
qp->rx_index = 0;
+ /*
+ * Checking to see if we have more entries than the default.
+ * We should add additional entries if that is the case so we
+ * can be in sync with the transport frames.
+ */
+ node = dev_to_node(&ndev->dev);
+ for (i = qp->rx_alloc_entry; i < qp->rx_max_entry; i++) {
+ entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->qp = qp;
+ ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry,
+ &qp->rx_free_q);
+ qp->rx_alloc_entry++;
+ }
+
qp->remote_rx_info->entry = qp->rx_max_entry - 1;
/* setup the hdr offsets with 0's */
@@ -1037,6 +1060,13 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
int node;
int rc, i;
+ mw_count = ntb_mw_count(ndev);
+ if (ntb_spad_count(ndev) < (NUM_MWS + 1 + mw_count * 2)) {
+ dev_err(&ndev->dev, "Not enough scratch pad registers for %s",
+ NTB_TRANSPORT_NAME);
+ return -EIO;
+ }
+
if (ntb_db_is_unsafe(ndev))
dev_dbg(&ndev->dev,
"doorbell is unsafe, proceed anyway...\n");
@@ -1052,8 +1082,6 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
nt->ndev = ndev;
- mw_count = ntb_mw_count(ndev);
-
nt->mw_count = mw_count;
nt->mw_vec = kzalloc_node(mw_count * sizeof(*nt->mw_vec),
@@ -1722,8 +1750,9 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry,
&qp->rx_free_q);
}
+ qp->rx_alloc_entry = NTB_QP_DEF_NUM_ENTRIES;
- for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
+ for (i = 0; i < qp->tx_max_entry; i++) {
entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
if (!entry)
goto err2;
@@ -1744,6 +1773,7 @@ err2:
while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
kfree(entry);
err1:
+ qp->rx_alloc_entry = 0;
while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
kfree(entry);
if (qp->tx_dma_chan)
diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
index 8dfce9c9a..6a50f20bf 100644
--- a/drivers/ntb/test/ntb_perf.c
+++ b/drivers/ntb/test/ntb_perf.c
@@ -58,6 +58,7 @@
#include <linux/delay.h>
#include <linux/sizes.h>
#include <linux/ntb.h>
+#include <linux/mutex.h>
#define DRIVER_NAME "ntb_perf"
#define DRIVER_DESCRIPTION "PCIe NTB Performance Measurement Tool"
@@ -83,6 +84,10 @@ MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
static struct dentry *perf_debugfs_dir;
+static unsigned long max_mw_size;
+module_param(max_mw_size, ulong, 0644);
+MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows");
+
static unsigned int seg_order = 19; /* 512K */
module_param(seg_order, uint, 0644);
MODULE_PARM_DESC(seg_order, "size order [n^2] of buffer segment for testing");
@@ -117,6 +122,10 @@ struct pthr_ctx {
int dma_prep_err;
int src_idx;
void *srcs[MAX_SRCS];
+ wait_queue_head_t *wq;
+ int status;
+ u64 copied;
+ u64 diff_us;
};
struct perf_ctx {
@@ -124,23 +133,23 @@ struct perf_ctx {
spinlock_t db_lock;
struct perf_mw mw;
bool link_is_up;
- struct work_struct link_cleanup;
struct delayed_work link_work;
+ wait_queue_head_t link_wq;
struct dentry *debugfs_node_dir;
struct dentry *debugfs_run;
struct dentry *debugfs_threads;
u8 perf_threads;
- bool run;
+ /* mutex ensures only one set of threads run at once */
+ struct mutex run_mutex;
struct pthr_ctx pthr_ctx[MAX_THREADS];
atomic_t tsync;
+ atomic_t tdone;
};
enum {
VERSION = 0,
MW_SZ_HIGH,
MW_SZ_LOW,
- SPAD_MSG,
- SPAD_ACK,
MAX_SPAD
};
@@ -148,10 +157,16 @@ static void perf_link_event(void *ctx)
{
struct perf_ctx *perf = ctx;
- if (ntb_link_is_up(perf->ntb, NULL, NULL) == 1)
+ if (ntb_link_is_up(perf->ntb, NULL, NULL) == 1) {
schedule_delayed_work(&perf->link_work, 2*HZ);
- else
- schedule_work(&perf->link_cleanup);
+ } else {
+ dev_dbg(&perf->ntb->pdev->dev, "link down\n");
+
+ if (!perf->link_is_up)
+ cancel_delayed_work_sync(&perf->link_work);
+
+ perf->link_is_up = false;
+ }
}
static void perf_db_event(void *ctx, int vec)
@@ -271,6 +286,7 @@ static int perf_move_data(struct pthr_ctx *pctx, char __iomem *dst, char *src,
char __iomem *tmp = dst;
u64 perf, diff_us;
ktime_t kstart, kstop, kdiff;
+ unsigned long last_sleep = jiffies;
chunks = div64_u64(win_size, buf_size);
total_chunks = div64_u64(total, buf_size);
@@ -286,30 +302,40 @@ static int perf_move_data(struct pthr_ctx *pctx, char __iomem *dst, char *src,
} else
tmp += buf_size;
- /* Probably should schedule every 4GB to prevent soft hang. */
- if (((copied % SZ_4G) == 0) && !use_dma) {
+ /* Probably should schedule every 5s to prevent soft hang. */
+ if (unlikely((jiffies - last_sleep) > 5 * HZ)) {
+ last_sleep = jiffies;
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(1);
}
+
+ if (unlikely(kthread_should_stop()))
+ break;
}
if (use_dma) {
- pr_info("%s: All DMA descriptors submitted\n", current->comm);
- while (atomic_read(&pctx->dma_sync) != 0)
+ pr_debug("%s: All DMA descriptors submitted\n", current->comm);
+ while (atomic_read(&pctx->dma_sync) != 0) {
+ if (kthread_should_stop())
+ break;
msleep(20);
+ }
}
kstop = ktime_get();
kdiff = ktime_sub(kstop, kstart);
diff_us = ktime_to_us(kdiff);
- pr_info("%s: copied %llu bytes\n", current->comm, copied);
+ pr_debug("%s: copied %llu bytes\n", current->comm, copied);
- pr_info("%s: lasted %llu usecs\n", current->comm, diff_us);
+ pr_debug("%s: lasted %llu usecs\n", current->comm, diff_us);
perf = div64_u64(copied, diff_us);
- pr_info("%s: MBytes/s: %llu\n", current->comm, perf);
+ pr_debug("%s: MBytes/s: %llu\n", current->comm, perf);
+
+ pctx->copied = copied;
+ pctx->diff_us = diff_us;
return 0;
}
@@ -331,7 +357,7 @@ static int ntb_perf_thread(void *data)
int rc, node, i;
struct dma_chan *dma_chan = NULL;
- pr_info("kthread %s starting...\n", current->comm);
+ pr_debug("kthread %s starting...\n", current->comm);
node = dev_to_node(&pdev->dev);
@@ -389,7 +415,10 @@ static int ntb_perf_thread(void *data)
pctx->srcs[i] = NULL;
}
- return 0;
+ atomic_inc(&perf->tdone);
+ wake_up(pctx->wq);
+ rc = 0;
+ goto done;
err:
for (i = 0; i < MAX_SRCS; i++) {
@@ -402,6 +431,16 @@ err:
pctx->dma_chan = NULL;
}
+done:
+ /* Wait until we are told to stop */
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (kthread_should_stop())
+ break;
+ schedule();
+ }
+ __set_current_state(TASK_RUNNING);
+
return rc;
}
@@ -472,6 +511,10 @@ static void perf_link_work(struct work_struct *work)
dev_dbg(&perf->ntb->pdev->dev, "%s called\n", __func__);
size = perf->mw.phys_size;
+
+ if (max_mw_size && size > max_mw_size)
+ size = max_mw_size;
+
ntb_peer_spad_write(ndev, MW_SZ_HIGH, upper_32_bits(size));
ntb_peer_spad_write(ndev, MW_SZ_LOW, lower_32_bits(size));
ntb_peer_spad_write(ndev, VERSION, PERF_VERSION);
@@ -496,6 +539,7 @@ static void perf_link_work(struct work_struct *work)
goto out1;
perf->link_is_up = true;
+ wake_up(&perf->link_wq);
return;
@@ -508,18 +552,6 @@ out:
msecs_to_jiffies(PERF_LINK_DOWN_TIMEOUT));
}
-static void perf_link_cleanup(struct work_struct *work)
-{
- struct perf_ctx *perf = container_of(work,
- struct perf_ctx,
- link_cleanup);
-
- dev_dbg(&perf->ntb->pdev->dev, "%s called\n", __func__);
-
- if (!perf->link_is_up)
- cancel_delayed_work_sync(&perf->link_work);
-}
-
static int perf_setup_mw(struct ntb_dev *ntb, struct perf_ctx *perf)
{
struct perf_mw *mw;
@@ -544,16 +576,44 @@ static ssize_t debugfs_run_read(struct file *filp, char __user *ubuf,
{
struct perf_ctx *perf = filp->private_data;
char *buf;
- ssize_t ret, out_offset;
+ ssize_t ret, out_off = 0;
+ struct pthr_ctx *pctx;
+ int i;
+ u64 rate;
if (!perf)
return 0;
- buf = kmalloc(64, GFP_KERNEL);
+ buf = kmalloc(1024, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- out_offset = snprintf(buf, 64, "%d\n", perf->run);
- ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
+
+ if (mutex_is_locked(&perf->run_mutex)) {
+ out_off = snprintf(buf, 64, "running\n");
+ goto read_from_buf;
+ }
+
+ for (i = 0; i < MAX_THREADS; i++) {
+ pctx = &perf->pthr_ctx[i];
+
+ if (pctx->status == -ENODATA)
+ break;
+
+ if (pctx->status) {
+ out_off += snprintf(buf + out_off, 1024 - out_off,
+ "%d: error %d\n", i,
+ pctx->status);
+ continue;
+ }
+
+ rate = div64_u64(pctx->copied, pctx->diff_us);
+ out_off += snprintf(buf + out_off, 1024 - out_off,
+ "%d: copied %llu bytes in %llu usecs, %llu MBytes/s\n",
+ i, pctx->copied, pctx->diff_us, rate);
+ }
+
+read_from_buf:
+ ret = simple_read_from_buffer(ubuf, count, offp, buf, out_off);
kfree(buf);
return ret;
@@ -564,80 +624,90 @@ static void threads_cleanup(struct perf_ctx *perf)
struct pthr_ctx *pctx;
int i;
- perf->run = false;
for (i = 0; i < MAX_THREADS; i++) {
pctx = &perf->pthr_ctx[i];
if (pctx->thread) {
- kthread_stop(pctx->thread);
+ pctx->status = kthread_stop(pctx->thread);
pctx->thread = NULL;
}
}
}
+static void perf_clear_thread_status(struct perf_ctx *perf)
+{
+ int i;
+
+ for (i = 0; i < MAX_THREADS; i++)
+ perf->pthr_ctx[i].status = -ENODATA;
+}
+
static ssize_t debugfs_run_write(struct file *filp, const char __user *ubuf,
size_t count, loff_t *offp)
{
struct perf_ctx *perf = filp->private_data;
int node, i;
+ DECLARE_WAIT_QUEUE_HEAD(wq);
- if (!perf->link_is_up)
- return 0;
+ if (wait_event_interruptible(perf->link_wq, perf->link_is_up))
+ return -ENOLINK;
if (perf->perf_threads == 0)
- return 0;
+ return -EINVAL;
- if (atomic_read(&perf->tsync) == 0)
- perf->run = false;
+ if (!mutex_trylock(&perf->run_mutex))
+ return -EBUSY;
- if (perf->run)
- threads_cleanup(perf);
- else {
- perf->run = true;
+ perf_clear_thread_status(perf);
- if (perf->perf_threads > MAX_THREADS) {
- perf->perf_threads = MAX_THREADS;
- pr_info("Reset total threads to: %u\n", MAX_THREADS);
- }
+ if (perf->perf_threads > MAX_THREADS) {
+ perf->perf_threads = MAX_THREADS;
+ pr_info("Reset total threads to: %u\n", MAX_THREADS);
+ }
- /* no greater than 1M */
- if (seg_order > MAX_SEG_ORDER) {
- seg_order = MAX_SEG_ORDER;
- pr_info("Fix seg_order to %u\n", seg_order);
- }
+ /* no greater than 1M */
+ if (seg_order > MAX_SEG_ORDER) {
+ seg_order = MAX_SEG_ORDER;
+ pr_info("Fix seg_order to %u\n", seg_order);
+ }
- if (run_order < seg_order) {
- run_order = seg_order;
- pr_info("Fix run_order to %u\n", run_order);
- }
+ if (run_order < seg_order) {
+ run_order = seg_order;
+ pr_info("Fix run_order to %u\n", run_order);
+ }
- node = dev_to_node(&perf->ntb->pdev->dev);
- /* launch kernel thread */
- for (i = 0; i < perf->perf_threads; i++) {
- struct pthr_ctx *pctx;
-
- pctx = &perf->pthr_ctx[i];
- atomic_set(&pctx->dma_sync, 0);
- pctx->perf = perf;
- pctx->thread =
- kthread_create_on_node(ntb_perf_thread,
- (void *)pctx,
- node, "ntb_perf %d", i);
- if (IS_ERR(pctx->thread)) {
- pctx->thread = NULL;
- goto err;
- } else
- wake_up_process(pctx->thread);
-
- if (perf->run == false)
- return -ENXIO;
- }
+ node = dev_to_node(&perf->ntb->pdev->dev);
+ atomic_set(&perf->tdone, 0);
+ /* launch kernel thread */
+ for (i = 0; i < perf->perf_threads; i++) {
+ struct pthr_ctx *pctx;
+
+ pctx = &perf->pthr_ctx[i];
+ atomic_set(&pctx->dma_sync, 0);
+ pctx->perf = perf;
+ pctx->wq = &wq;
+ pctx->thread =
+ kthread_create_on_node(ntb_perf_thread,
+ (void *)pctx,
+ node, "ntb_perf %d", i);
+ if (IS_ERR(pctx->thread)) {
+ pctx->thread = NULL;
+ goto err;
+ } else {
+ wake_up_process(pctx->thread);
+ }
}
+ wait_event_interruptible(wq,
+ atomic_read(&perf->tdone) == perf->perf_threads);
+
+ threads_cleanup(perf);
+ mutex_unlock(&perf->run_mutex);
return count;
err:
threads_cleanup(perf);
+ mutex_unlock(&perf->run_mutex);
return -ENXIO;
}
@@ -688,6 +758,12 @@ static int perf_probe(struct ntb_client *client, struct ntb_dev *ntb)
int node;
int rc = 0;
+ if (ntb_spad_count(ntb) < MAX_SPAD) {
+ dev_err(&ntb->dev, "Not enough scratch pad registers for %s",
+ DRIVER_NAME);
+ return -EIO;
+ }
+
node = dev_to_node(&pdev->dev);
perf = kzalloc_node(sizeof(*perf), GFP_KERNEL, node);
@@ -699,11 +775,11 @@ static int perf_probe(struct ntb_client *client, struct ntb_dev *ntb)
perf->ntb = ntb;
perf->perf_threads = 1;
atomic_set(&perf->tsync, 0);
- perf->run = false;
+ mutex_init(&perf->run_mutex);
spin_lock_init(&perf->db_lock);
perf_setup_mw(ntb, perf);
+ init_waitqueue_head(&perf->link_wq);
INIT_DELAYED_WORK(&perf->link_work, perf_link_work);
- INIT_WORK(&perf->link_cleanup, perf_link_cleanup);
rc = ntb_set_ctx(ntb, perf, &perf_ops);
if (rc)
@@ -717,11 +793,12 @@ static int perf_probe(struct ntb_client *client, struct ntb_dev *ntb)
if (rc)
goto err_ctx;
+ perf_clear_thread_status(perf);
+
return 0;
err_ctx:
cancel_delayed_work_sync(&perf->link_work);
- cancel_work_sync(&perf->link_cleanup);
kfree(perf);
err_perf:
return rc;
@@ -734,8 +811,9 @@ static void perf_remove(struct ntb_client *client, struct ntb_dev *ntb)
dev_dbg(&perf->ntb->dev, "%s called\n", __func__);
+ mutex_lock(&perf->run_mutex);
+
cancel_delayed_work_sync(&perf->link_work);
- cancel_work_sync(&perf->link_cleanup);
ntb_clear_ctx(ntb);
ntb_link_disable(ntb);
diff --git a/drivers/ntb/test/ntb_pingpong.c b/drivers/ntb/test/ntb_pingpong.c
index fe1600566..7d311799f 100644
--- a/drivers/ntb/test/ntb_pingpong.c
+++ b/drivers/ntb/test/ntb_pingpong.c
@@ -61,6 +61,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/debugfs.h>
#include <linux/ntb.h>
@@ -96,8 +97,13 @@ struct pp_ctx {
spinlock_t db_lock;
struct timer_list db_timer;
unsigned long db_delay;
+ struct dentry *debugfs_node_dir;
+ struct dentry *debugfs_count;
+ atomic_t count;
};
+static struct dentry *pp_debugfs_dir;
+
static void pp_ping(unsigned long ctx)
{
struct pp_ctx *pp = (void *)ctx;
@@ -171,10 +177,32 @@ static void pp_db_event(void *ctx, int vec)
dev_dbg(&pp->ntb->dev,
"Pong vec %d bits %#llx\n",
vec, db_bits);
+ atomic_inc(&pp->count);
}
spin_unlock_irqrestore(&pp->db_lock, irqflags);
}
+static int pp_debugfs_setup(struct pp_ctx *pp)
+{
+ struct pci_dev *pdev = pp->ntb->pdev;
+
+ if (!pp_debugfs_dir)
+ return -ENODEV;
+
+ pp->debugfs_node_dir = debugfs_create_dir(pci_name(pdev),
+ pp_debugfs_dir);
+ if (!pp->debugfs_node_dir)
+ return -ENODEV;
+
+ pp->debugfs_count = debugfs_create_atomic_t("count", S_IRUSR | S_IWUSR,
+ pp->debugfs_node_dir,
+ &pp->count);
+ if (!pp->debugfs_count)
+ return -ENODEV;
+
+ return 0;
+}
+
static const struct ntb_ctx_ops pp_ops = {
.link_event = pp_link_event,
.db_event = pp_db_event,
@@ -210,6 +238,7 @@ static int pp_probe(struct ntb_client *client,
pp->ntb = ntb;
pp->db_bits = 0;
+ atomic_set(&pp->count, 0);
spin_lock_init(&pp->db_lock);
setup_timer(&pp->db_timer, pp_ping, (unsigned long)pp);
pp->db_delay = msecs_to_jiffies(delay_ms);
@@ -218,6 +247,10 @@ static int pp_probe(struct ntb_client *client,
if (rc)
goto err_ctx;
+ rc = pp_debugfs_setup(pp);
+ if (rc)
+ goto err_ctx;
+
ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
ntb_link_event(ntb);
@@ -234,6 +267,8 @@ static void pp_remove(struct ntb_client *client,
{
struct pp_ctx *pp = ntb->ctx;
+ debugfs_remove_recursive(pp->debugfs_node_dir);
+
ntb_clear_ctx(ntb);
del_timer_sync(&pp->db_timer);
ntb_link_disable(ntb);
@@ -247,4 +282,29 @@ static struct ntb_client pp_client = {
.remove = pp_remove,
},
};
-module_ntb_client(pp_client);
+
+static int __init pp_init(void)
+{
+ int rc;
+
+ if (debugfs_initialized())
+ pp_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+
+ rc = ntb_register_client(&pp_client);
+ if (rc)
+ goto err_client;
+
+ return 0;
+
+err_client:
+ debugfs_remove_recursive(pp_debugfs_dir);
+ return rc;
+}
+module_init(pp_init);
+
+static void __exit pp_exit(void)
+{
+ ntb_unregister_client(&pp_client);
+ debugfs_remove_recursive(pp_debugfs_dir);
+}
+module_exit(pp_exit);
diff --git a/drivers/ntb/test/ntb_tool.c b/drivers/ntb/test/ntb_tool.c
index 6f5dc6ca6..61bf2ef87 100644
--- a/drivers/ntb/test/ntb_tool.c
+++ b/drivers/ntb/test/ntb_tool.c
@@ -59,6 +59,12 @@
*
* Eg: check if clearing the doorbell mask generates an interrupt.
*
+ * # Check the link status
+ * root@self# cat $DBG_DIR/link
+ *
+ * # Block until the link is up
+ * root@self# echo Y > $DBG_DIR/link_event
+ *
* # Set the doorbell mask
* root@self# echo 's 1' > $DBG_DIR/mask
*
@@ -79,6 +85,13 @@
* root@self# cat $DBG_DIR/spad
*
* Observe that spad 0 and 1 have the values set by the peer.
+ *
+ * # Check the memory window translation info
+ * cat $DBG_DIR/peer_trans0
+ *
+ * # Setup a 16k memory window buffer
+ * echo 16384 > $DBG_DIR/peer_trans0
+ *
*/
#include <linux/init.h>
@@ -89,6 +102,7 @@
#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include <linux/slab.h>
+#include <linux/uaccess.h>
#include <linux/ntb.h>
@@ -105,11 +119,27 @@ MODULE_VERSION(DRIVER_VERSION);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
+#define MAX_MWS 16
+
static struct dentry *tool_dbgfs;
+struct tool_mw {
+ int idx;
+ struct tool_ctx *tc;
+ resource_size_t win_size;
+ resource_size_t size;
+ u8 __iomem *local;
+ u8 *peer;
+ dma_addr_t peer_dma;
+ struct dentry *peer_dbg_file;
+};
+
struct tool_ctx {
struct ntb_dev *ntb;
struct dentry *dbgfs;
+ wait_queue_head_t link_wq;
+ int mw_count;
+ struct tool_mw mws[MAX_MWS];
};
#define SPAD_FNAME_SIZE 0x10
@@ -135,6 +165,8 @@ static void tool_link_event(void *ctx)
dev_dbg(&tc->ntb->dev, "link is %s speed %d width %d\n",
up ? "up" : "down", speed, width);
+
+ wake_up(&tc->link_wq);
}
static void tool_db_event(void *ctx, int vec)
@@ -239,7 +271,14 @@ static ssize_t tool_spadfn_read(struct tool_ctx *tc, char __user *ubuf,
if (!spad_read_fn)
return -EINVAL;
- buf_size = min_t(size_t, size, 0x100);
+ spad_count = ntb_spad_count(tc->ntb);
+
+ /*
+ * We multiply the number of spads by 15 to get the buffer size
+ * this is from 3 for the %d, 10 for the largest hex value
+ * (0x00000000) and 2 for the tab and line feed.
+ */
+ buf_size = min_t(size_t, size, spad_count * 15);
buf = kmalloc(buf_size, GFP_KERNEL);
if (!buf)
@@ -247,7 +286,6 @@ static ssize_t tool_spadfn_read(struct tool_ctx *tc, char __user *ubuf,
pos = 0;
- spad_count = ntb_spad_count(tc->ntb);
for (i = 0; i < spad_count; ++i) {
pos += scnprintf(buf + pos, buf_size - pos, "%d\t%#x\n",
i, spad_read_fn(tc->ntb, i));
@@ -268,7 +306,7 @@ static ssize_t tool_spadfn_write(struct tool_ctx *tc,
{
int spad_idx;
u32 spad_val;
- char *buf;
+ char *buf, *buf_ptr;
int pos, n;
ssize_t rc;
@@ -288,14 +326,15 @@ static ssize_t tool_spadfn_write(struct tool_ctx *tc,
}
buf[size] = 0;
-
- n = sscanf(buf, "%d %i%n", &spad_idx, &spad_val, &pos);
+ buf_ptr = buf;
+ n = sscanf(buf_ptr, "%d %i%n", &spad_idx, &spad_val, &pos);
while (n == 2) {
+ buf_ptr += pos;
rc = spad_write_fn(tc->ntb, spad_idx, spad_val);
if (rc)
break;
- n = sscanf(buf + pos, "%d %i%n", &spad_idx, &spad_val, &pos);
+ n = sscanf(buf_ptr, "%d %i%n", &spad_idx, &spad_val, &pos);
}
if (n < 0)
@@ -442,8 +481,384 @@ static TOOL_FOPS_RDWR(tool_peer_spad_fops,
tool_peer_spad_read,
tool_peer_spad_write);
+static ssize_t tool_link_read(struct file *filep, char __user *ubuf,
+ size_t size, loff_t *offp)
+{
+ struct tool_ctx *tc = filep->private_data;
+ char buf[3];
+
+ buf[0] = ntb_link_is_up(tc->ntb, NULL, NULL) ? 'Y' : 'N';
+ buf[1] = '\n';
+ buf[2] = '\0';
+
+ return simple_read_from_buffer(ubuf, size, offp, buf, 2);
+}
+
+static ssize_t tool_link_write(struct file *filep, const char __user *ubuf,
+ size_t size, loff_t *offp)
+{
+ struct tool_ctx *tc = filep->private_data;
+ char buf[32];
+ size_t buf_size;
+ bool val;
+ int rc;
+
+ buf_size = min(size, (sizeof(buf) - 1));
+ if (copy_from_user(buf, ubuf, buf_size))
+ return -EFAULT;
+
+ buf[buf_size] = '\0';
+
+ rc = strtobool(buf, &val);
+ if (rc)
+ return rc;
+
+ if (val)
+ rc = ntb_link_enable(tc->ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
+ else
+ rc = ntb_link_disable(tc->ntb);
+
+ if (rc)
+ return rc;
+
+ return size;
+}
+
+static TOOL_FOPS_RDWR(tool_link_fops,
+ tool_link_read,
+ tool_link_write);
+
+static ssize_t tool_link_event_write(struct file *filep,
+ const char __user *ubuf,
+ size_t size, loff_t *offp)
+{
+ struct tool_ctx *tc = filep->private_data;
+ char buf[32];
+ size_t buf_size;
+ bool val;
+ int rc;
+
+ buf_size = min(size, (sizeof(buf) - 1));
+ if (copy_from_user(buf, ubuf, buf_size))
+ return -EFAULT;
+
+ buf[buf_size] = '\0';
+
+ rc = strtobool(buf, &val);
+ if (rc)
+ return rc;
+
+ if (wait_event_interruptible(tc->link_wq,
+ ntb_link_is_up(tc->ntb, NULL, NULL) == val))
+ return -ERESTART;
+
+ return size;
+}
+
+static TOOL_FOPS_RDWR(tool_link_event_fops,
+ NULL,
+ tool_link_event_write);
+
+static ssize_t tool_mw_read(struct file *filep, char __user *ubuf,
+ size_t size, loff_t *offp)
+{
+ struct tool_mw *mw = filep->private_data;
+ ssize_t rc;
+ loff_t pos = *offp;
+ void *buf;
+
+ if (mw->local == NULL)
+ return -EIO;
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= mw->win_size || !size)
+ return 0;
+ if (size > mw->win_size - pos)
+ size = mw->win_size - pos;
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ memcpy_fromio(buf, mw->local + pos, size);
+ rc = copy_to_user(ubuf, buf, size);
+ if (rc == size) {
+ rc = -EFAULT;
+ goto err_free;
+ }
+
+ size -= rc;
+ *offp = pos + size;
+ rc = size;
+
+err_free:
+ kfree(buf);
+
+ return rc;
+}
+
+static ssize_t tool_mw_write(struct file *filep, const char __user *ubuf,
+ size_t size, loff_t *offp)
+{
+ struct tool_mw *mw = filep->private_data;
+ ssize_t rc;
+ loff_t pos = *offp;
+ void *buf;
+
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= mw->win_size || !size)
+ return 0;
+ if (size > mw->win_size - pos)
+ size = mw->win_size - pos;
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ rc = copy_from_user(buf, ubuf, size);
+ if (rc == size) {
+ rc = -EFAULT;
+ goto err_free;
+ }
+
+ size -= rc;
+ *offp = pos + size;
+ rc = size;
+
+ memcpy_toio(mw->local + pos, buf, size);
+
+err_free:
+ kfree(buf);
+
+ return rc;
+}
+
+static TOOL_FOPS_RDWR(tool_mw_fops,
+ tool_mw_read,
+ tool_mw_write);
+
+static ssize_t tool_peer_mw_read(struct file *filep, char __user *ubuf,
+ size_t size, loff_t *offp)
+{
+ struct tool_mw *mw = filep->private_data;
+
+ if (!mw->peer)
+ return -ENXIO;
+
+ return simple_read_from_buffer(ubuf, size, offp, mw->peer, mw->size);
+}
+
+static ssize_t tool_peer_mw_write(struct file *filep, const char __user *ubuf,
+ size_t size, loff_t *offp)
+{
+ struct tool_mw *mw = filep->private_data;
+
+ if (!mw->peer)
+ return -ENXIO;
+
+ return simple_write_to_buffer(mw->peer, mw->size, offp, ubuf, size);
+}
+
+static TOOL_FOPS_RDWR(tool_peer_mw_fops,
+ tool_peer_mw_read,
+ tool_peer_mw_write);
+
+static int tool_setup_mw(struct tool_ctx *tc, int idx, size_t req_size)
+{
+ int rc;
+ struct tool_mw *mw = &tc->mws[idx];
+ phys_addr_t base;
+ resource_size_t size, align, align_size;
+ char buf[16];
+
+ if (mw->peer)
+ return 0;
+
+ rc = ntb_mw_get_range(tc->ntb, idx, &base, &size, &align,
+ &align_size);
+ if (rc)
+ return rc;
+
+ mw->size = min_t(resource_size_t, req_size, size);
+ mw->size = round_up(mw->size, align);
+ mw->size = round_up(mw->size, align_size);
+ mw->peer = dma_alloc_coherent(&tc->ntb->pdev->dev, mw->size,
+ &mw->peer_dma, GFP_KERNEL);
+
+ if (!mw->peer)
+ return -ENOMEM;
+
+ rc = ntb_mw_set_trans(tc->ntb, idx, mw->peer_dma, mw->size);
+ if (rc)
+ goto err_free_dma;
+
+ snprintf(buf, sizeof(buf), "peer_mw%d", idx);
+ mw->peer_dbg_file = debugfs_create_file(buf, S_IRUSR | S_IWUSR,
+ mw->tc->dbgfs, mw,
+ &tool_peer_mw_fops);
+
+ return 0;
+
+err_free_dma:
+ dma_free_coherent(&tc->ntb->pdev->dev, mw->size,
+ mw->peer,
+ mw->peer_dma);
+ mw->peer = NULL;
+ mw->peer_dma = 0;
+ mw->size = 0;
+
+ return rc;
+}
+
+static void tool_free_mw(struct tool_ctx *tc, int idx)
+{
+ struct tool_mw *mw = &tc->mws[idx];
+
+ if (mw->peer) {
+ ntb_mw_clear_trans(tc->ntb, idx);
+ dma_free_coherent(&tc->ntb->pdev->dev, mw->size,
+ mw->peer,
+ mw->peer_dma);
+ }
+
+ mw->peer = NULL;
+ mw->peer_dma = 0;
+
+ debugfs_remove(mw->peer_dbg_file);
+
+ mw->peer_dbg_file = NULL;
+}
+
+static ssize_t tool_peer_mw_trans_read(struct file *filep,
+ char __user *ubuf,
+ size_t size, loff_t *offp)
+{
+ struct tool_mw *mw = filep->private_data;
+
+ char *buf;
+ size_t buf_size;
+ ssize_t ret, off = 0;
+
+ phys_addr_t base;
+ resource_size_t mw_size;
+ resource_size_t align;
+ resource_size_t align_size;
+
+ buf_size = min_t(size_t, size, 512);
+
+ buf = kmalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ntb_mw_get_range(mw->tc->ntb, mw->idx,
+ &base, &mw_size, &align, &align_size);
+
+ off += scnprintf(buf + off, buf_size - off,
+ "Peer MW %d Information:\n", mw->idx);
+
+ off += scnprintf(buf + off, buf_size - off,
+ "Physical Address \t%pa[p]\n",
+ &base);
+
+ off += scnprintf(buf + off, buf_size - off,
+ "Window Size \t%lld\n",
+ (unsigned long long)mw_size);
+
+ off += scnprintf(buf + off, buf_size - off,
+ "Alignment \t%lld\n",
+ (unsigned long long)align);
+
+ off += scnprintf(buf + off, buf_size - off,
+ "Size Alignment \t%lld\n",
+ (unsigned long long)align_size);
+
+ off += scnprintf(buf + off, buf_size - off,
+ "Ready \t%c\n",
+ (mw->peer) ? 'Y' : 'N');
+
+ off += scnprintf(buf + off, buf_size - off,
+ "Allocated Size \t%zd\n",
+ (mw->peer) ? (size_t)mw->size : 0);
+
+ ret = simple_read_from_buffer(ubuf, size, offp, buf, off);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t tool_peer_mw_trans_write(struct file *filep,
+ const char __user *ubuf,
+ size_t size, loff_t *offp)
+{
+ struct tool_mw *mw = filep->private_data;
+
+ char buf[32];
+ size_t buf_size;
+ unsigned long long val;
+ int rc;
+
+ buf_size = min(size, (sizeof(buf) - 1));
+ if (copy_from_user(buf, ubuf, buf_size))
+ return -EFAULT;
+
+ buf[buf_size] = '\0';
+
+ rc = kstrtoull(buf, 0, &val);
+ if (rc)
+ return rc;
+
+ tool_free_mw(mw->tc, mw->idx);
+ if (val)
+ rc = tool_setup_mw(mw->tc, mw->idx, val);
+
+ if (rc)
+ return rc;
+
+ return size;
+}
+
+static TOOL_FOPS_RDWR(tool_peer_mw_trans_fops,
+ tool_peer_mw_trans_read,
+ tool_peer_mw_trans_write);
+
+static int tool_init_mw(struct tool_ctx *tc, int idx)
+{
+ struct tool_mw *mw = &tc->mws[idx];
+ phys_addr_t base;
+ int rc;
+
+ rc = ntb_mw_get_range(tc->ntb, idx, &base, &mw->win_size,
+ NULL, NULL);
+ if (rc)
+ return rc;
+
+ mw->tc = tc;
+ mw->idx = idx;
+ mw->local = ioremap_wc(base, mw->win_size);
+ if (!mw->local)
+ return -EFAULT;
+
+ return 0;
+}
+
+static void tool_free_mws(struct tool_ctx *tc)
+{
+ int i;
+
+ for (i = 0; i < tc->mw_count; i++) {
+ tool_free_mw(tc, i);
+
+ if (tc->mws[i].local)
+ iounmap(tc->mws[i].local);
+
+ tc->mws[i].local = NULL;
+ }
+}
+
static void tool_setup_dbgfs(struct tool_ctx *tc)
{
+ int i;
+
/* This modules is useless without dbgfs... */
if (!tool_dbgfs) {
tc->dbgfs = NULL;
@@ -472,12 +887,31 @@ static void tool_setup_dbgfs(struct tool_ctx *tc)
debugfs_create_file("peer_spad", S_IRUSR | S_IWUSR, tc->dbgfs,
tc, &tool_peer_spad_fops);
+
+ debugfs_create_file("link", S_IRUSR | S_IWUSR, tc->dbgfs,
+ tc, &tool_link_fops);
+
+ debugfs_create_file("link_event", S_IWUSR, tc->dbgfs,
+ tc, &tool_link_event_fops);
+
+ for (i = 0; i < tc->mw_count; i++) {
+ char buf[30];
+
+ snprintf(buf, sizeof(buf), "mw%d", i);
+ debugfs_create_file(buf, S_IRUSR | S_IWUSR, tc->dbgfs,
+ &tc->mws[i], &tool_mw_fops);
+
+ snprintf(buf, sizeof(buf), "peer_trans%d", i);
+ debugfs_create_file(buf, S_IRUSR | S_IWUSR, tc->dbgfs,
+ &tc->mws[i], &tool_peer_mw_trans_fops);
+ }
}
static int tool_probe(struct ntb_client *self, struct ntb_dev *ntb)
{
struct tool_ctx *tc;
int rc;
+ int i;
if (ntb_db_is_unsafe(ntb))
dev_dbg(&ntb->dev, "doorbell is unsafe\n");
@@ -485,13 +919,21 @@ static int tool_probe(struct ntb_client *self, struct ntb_dev *ntb)
if (ntb_spad_is_unsafe(ntb))
dev_dbg(&ntb->dev, "scratchpad is unsafe\n");
- tc = kmalloc(sizeof(*tc), GFP_KERNEL);
+ tc = kzalloc(sizeof(*tc), GFP_KERNEL);
if (!tc) {
rc = -ENOMEM;
goto err_tc;
}
tc->ntb = ntb;
+ init_waitqueue_head(&tc->link_wq);
+
+ tc->mw_count = min(ntb_mw_count(tc->ntb), MAX_MWS);
+ for (i = 0; i < tc->mw_count; i++) {
+ rc = tool_init_mw(tc, i);
+ if (rc)
+ goto err_ctx;
+ }
tool_setup_dbgfs(tc);
@@ -505,6 +947,7 @@ static int tool_probe(struct ntb_client *self, struct ntb_dev *ntb)
return 0;
err_ctx:
+ tool_free_mws(tc);
debugfs_remove_recursive(tc->dbgfs);
kfree(tc);
err_tc:
@@ -515,6 +958,8 @@ static void tool_remove(struct ntb_client *self, struct ntb_dev *ntb)
{
struct tool_ctx *tc = ntb->ctx;
+ tool_free_mws(tc);
+
ntb_clear_ctx(ntb);
ntb_link_disable(ntb);
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
index 7c8a3bf07..124c2432a 100644
--- a/drivers/nvdimm/Kconfig
+++ b/drivers/nvdimm/Kconfig
@@ -1,6 +1,7 @@
menuconfig LIBNVDIMM
tristate "NVDIMM (Non-Volatile Memory Device) Support"
depends on PHYS_ADDR_T_64BIT
+ depends on HAS_IOMEM
depends on BLK_DEV
help
Generic support for non-volatile memory devices including
@@ -19,7 +20,6 @@ if LIBNVDIMM
config BLK_DEV_PMEM
tristate "PMEM: Persistent memory block device support"
default LIBNVDIMM
- depends on HAS_IOMEM
select ND_BTT if BTT
select ND_PFN if NVDIMM_PFN
help
diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c
index 495e06d9f..9faaa9694 100644
--- a/drivers/nvdimm/blk.c
+++ b/drivers/nvdimm/blk.c
@@ -267,10 +267,8 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk)
q = blk_alloc_queue(GFP_KERNEL);
if (!q)
return -ENOMEM;
- if (devm_add_action(dev, nd_blk_release_queue, q)) {
- blk_cleanup_queue(q);
+ if (devm_add_action_or_reset(dev, nd_blk_release_queue, q))
return -ENOMEM;
- }
blk_queue_make_request(q, nd_blk_make_request);
blk_queue_max_hw_sectors(q, UINT_MAX);
@@ -282,19 +280,17 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk)
disk = alloc_disk(0);
if (!disk)
return -ENOMEM;
- if (devm_add_action(dev, nd_blk_release_disk, disk)) {
- put_disk(disk);
- return -ENOMEM;
- }
- disk->driverfs_dev = dev;
disk->first_minor = 0;
disk->fops = &nd_blk_fops;
disk->queue = q;
disk->flags = GENHD_FL_EXT_DEVT;
nvdimm_namespace_disk_name(&nsblk->common, disk->disk_name);
set_capacity(disk, 0);
- add_disk(disk);
+ device_add_disk(dev, disk);
+
+ if (devm_add_action_or_reset(dev, nd_blk_release_disk, disk))
+ return -ENOMEM;
if (nsblk_meta_size(nsblk)) {
int rc = nd_integrity_init(disk, nsblk_meta_size(nsblk));
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 68a7c3c1e..368795aad 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1133,11 +1133,11 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
struct page *page, unsigned int len, unsigned int off,
- int rw, sector_t sector)
+ bool is_write, sector_t sector)
{
int ret;
- if (rw == READ) {
+ if (!is_write) {
ret = btt_read_pg(btt, bip, page, off, sector, len);
flush_dcache_page(page);
} else {
@@ -1155,7 +1155,7 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
struct bvec_iter iter;
unsigned long start;
struct bio_vec bvec;
- int err = 0, rw;
+ int err = 0;
bool do_acct;
/*
@@ -1170,7 +1170,6 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
}
do_acct = nd_iostat_start(bio, &start);
- rw = bio_data_dir(bio);
bio_for_each_segment(bvec, bio, iter) {
unsigned int len = bvec.bv_len;
@@ -1181,11 +1180,12 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
BUG_ON(len % btt->sector_size);
err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
- rw, iter.bi_sector);
+ op_is_write(bio_op(bio)), iter.bi_sector);
if (err) {
dev_info(&btt->nd_btt->dev,
"io error in %s sector %lld, len %d,\n",
- (rw == READ) ? "READ" : "WRITE",
+ (op_is_write(bio_op(bio))) ? "WRITE" :
+ "READ",
(unsigned long long) iter.bi_sector, len);
bio->bi_error = err;
break;
@@ -1200,12 +1200,12 @@ out:
}
static int btt_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, int rw)
+ struct page *page, bool is_write)
{
struct btt *btt = bdev->bd_disk->private_data;
- btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, rw, sector);
- page_endio(page, rw & WRITE, 0);
+ btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, is_write, sector);
+ page_endio(page, is_write, 0);
return 0;
}
@@ -1243,7 +1243,6 @@ static int btt_blk_init(struct btt *btt)
}
nvdimm_namespace_disk_name(ndns, btt->btt_disk->disk_name);
- btt->btt_disk->driverfs_dev = &btt->nd_btt->dev;
btt->btt_disk->first_minor = 0;
btt->btt_disk->fops = &btt_fops;
btt->btt_disk->private_data = btt;
@@ -1258,7 +1257,7 @@ static int btt_blk_init(struct btt *btt)
btt->btt_queue->queuedata = btt;
set_capacity(btt->btt_disk, 0);
- add_disk(btt->btt_disk);
+ device_add_disk(&btt->nd_btt->dev, btt->btt_disk);
if (btt_meta_size(btt)) {
int rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt));
@@ -1270,6 +1269,7 @@ static int btt_blk_init(struct btt *btt)
}
}
set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
+ btt->nd_btt->size = btt->nlba * (u64)btt->sector_size;
revalidate_disk(btt->btt_disk);
return 0;
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c
index 816d0dae6..97dd2925e 100644
--- a/drivers/nvdimm/btt_devs.c
+++ b/drivers/nvdimm/btt_devs.c
@@ -140,10 +140,30 @@ static ssize_t namespace_store(struct device *dev,
}
static DEVICE_ATTR_RW(namespace);
+static ssize_t size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nd_btt *nd_btt = to_nd_btt(dev);
+ ssize_t rc;
+
+ device_lock(dev);
+ if (dev->driver)
+ rc = sprintf(buf, "%llu\n", nd_btt->size);
+ else {
+ /* no size to convey if the btt instance is disabled */
+ rc = -ENXIO;
+ }
+ device_unlock(dev);
+
+ return rc;
+}
+static DEVICE_ATTR_RO(size);
+
static struct attribute *nd_btt_attributes[] = {
&dev_attr_sector_size.attr,
&dev_attr_namespace.attr,
&dev_attr_uuid.attr,
+ &dev_attr_size.attr,
NULL,
};
@@ -198,8 +218,7 @@ struct device *nd_btt_create(struct nd_region *nd_region)
{
struct device *dev = __nd_btt_create(nd_region, 0, NULL, NULL);
- if (dev)
- __nd_device_register(dev);
+ __nd_device_register(dev);
return dev;
}
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index f085f8bce..935866fe5 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -31,6 +31,7 @@
int nvdimm_major;
static int nvdimm_bus_major;
static struct class *nd_class;
+static DEFINE_IDA(nd_ida);
static int to_nd_device_type(struct device *dev)
{
@@ -60,20 +61,13 @@ static int nvdimm_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
to_nd_device_type(dev));
}
-static int nvdimm_bus_match(struct device *dev, struct device_driver *drv)
-{
- struct nd_device_driver *nd_drv = to_nd_device_driver(drv);
-
- return !!test_bit(to_nd_device_type(dev), &nd_drv->type);
-}
-
static struct module *to_bus_provider(struct device *dev)
{
/* pin bus providers while regions are enabled */
if (is_nd_pmem(dev) || is_nd_blk(dev)) {
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
- return nvdimm_bus->module;
+ return nvdimm_bus->nd_desc->module;
}
return NULL;
}
@@ -136,6 +130,21 @@ static int nvdimm_bus_remove(struct device *dev)
return rc;
}
+static void nvdimm_bus_shutdown(struct device *dev)
+{
+ struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
+ struct nd_device_driver *nd_drv = NULL;
+
+ if (dev->driver)
+ nd_drv = to_nd_device_driver(dev->driver);
+
+ if (nd_drv && nd_drv->shutdown) {
+ nd_drv->shutdown(dev);
+ dev_dbg(&nvdimm_bus->dev, "%s.shutdown(%s)\n",
+ dev->driver->name, dev_name(dev));
+ }
+}
+
void nd_device_notify(struct device *dev, enum nvdimm_event event)
{
device_lock(dev);
@@ -176,8 +185,12 @@ long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
return -ENXIO;
nd_desc = nvdimm_bus->nd_desc;
+ /*
+ * if ndctl does not exist, it's PMEM_LEGACY and
+ * we want to just pretend everything is handled.
+ */
if (!nd_desc->ndctl)
- return -ENXIO;
+ return len;
memset(&ars_cap, 0, sizeof(ars_cap));
ars_cap.address = phys;
@@ -208,14 +221,187 @@ long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
}
EXPORT_SYMBOL_GPL(nvdimm_clear_poison);
+static int nvdimm_bus_match(struct device *dev, struct device_driver *drv);
+
static struct bus_type nvdimm_bus_type = {
.name = "nd",
.uevent = nvdimm_bus_uevent,
.match = nvdimm_bus_match,
.probe = nvdimm_bus_probe,
.remove = nvdimm_bus_remove,
+ .shutdown = nvdimm_bus_shutdown,
};
+static void nvdimm_bus_release(struct device *dev)
+{
+ struct nvdimm_bus *nvdimm_bus;
+
+ nvdimm_bus = container_of(dev, struct nvdimm_bus, dev);
+ ida_simple_remove(&nd_ida, nvdimm_bus->id);
+ kfree(nvdimm_bus);
+}
+
+static bool is_nvdimm_bus(struct device *dev)
+{
+ return dev->release == nvdimm_bus_release;
+}
+
+struct nvdimm_bus *walk_to_nvdimm_bus(struct device *nd_dev)
+{
+ struct device *dev;
+
+ for (dev = nd_dev; dev; dev = dev->parent)
+ if (is_nvdimm_bus(dev))
+ break;
+ dev_WARN_ONCE(nd_dev, !dev, "invalid dev, not on nd bus\n");
+ if (dev)
+ return to_nvdimm_bus(dev);
+ return NULL;
+}
+
+struct nvdimm_bus *to_nvdimm_bus(struct device *dev)
+{
+ struct nvdimm_bus *nvdimm_bus;
+
+ nvdimm_bus = container_of(dev, struct nvdimm_bus, dev);
+ WARN_ON(!is_nvdimm_bus(dev));
+ return nvdimm_bus;
+}
+EXPORT_SYMBOL_GPL(to_nvdimm_bus);
+
+struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
+ struct nvdimm_bus_descriptor *nd_desc)
+{
+ struct nvdimm_bus *nvdimm_bus;
+ int rc;
+
+ nvdimm_bus = kzalloc(sizeof(*nvdimm_bus), GFP_KERNEL);
+ if (!nvdimm_bus)
+ return NULL;
+ INIT_LIST_HEAD(&nvdimm_bus->list);
+ INIT_LIST_HEAD(&nvdimm_bus->mapping_list);
+ INIT_LIST_HEAD(&nvdimm_bus->poison_list);
+ init_waitqueue_head(&nvdimm_bus->probe_wait);
+ nvdimm_bus->id = ida_simple_get(&nd_ida, 0, 0, GFP_KERNEL);
+ mutex_init(&nvdimm_bus->reconfig_mutex);
+ if (nvdimm_bus->id < 0) {
+ kfree(nvdimm_bus);
+ return NULL;
+ }
+ nvdimm_bus->nd_desc = nd_desc;
+ nvdimm_bus->dev.parent = parent;
+ nvdimm_bus->dev.release = nvdimm_bus_release;
+ nvdimm_bus->dev.groups = nd_desc->attr_groups;
+ nvdimm_bus->dev.bus = &nvdimm_bus_type;
+ dev_set_name(&nvdimm_bus->dev, "ndbus%d", nvdimm_bus->id);
+ rc = device_register(&nvdimm_bus->dev);
+ if (rc) {
+ dev_dbg(&nvdimm_bus->dev, "registration failed: %d\n", rc);
+ goto err;
+ }
+
+ return nvdimm_bus;
+ err:
+ put_device(&nvdimm_bus->dev);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(nvdimm_bus_register);
+
+void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus)
+{
+ if (!nvdimm_bus)
+ return;
+ device_unregister(&nvdimm_bus->dev);
+}
+EXPORT_SYMBOL_GPL(nvdimm_bus_unregister);
+
+static int child_unregister(struct device *dev, void *data)
+{
+ /*
+ * the singular ndctl class device per bus needs to be
+ * "device_destroy"ed, so skip it here
+ *
+ * i.e. remove classless children
+ */
+ if (dev->class)
+ /* pass */;
+ else
+ nd_device_unregister(dev, ND_SYNC);
+ return 0;
+}
+
+static void free_poison_list(struct list_head *poison_list)
+{
+ struct nd_poison *pl, *next;
+
+ list_for_each_entry_safe(pl, next, poison_list, list) {
+ list_del(&pl->list);
+ kfree(pl);
+ }
+ list_del_init(poison_list);
+}
+
+static int nd_bus_remove(struct device *dev)
+{
+ struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
+
+ mutex_lock(&nvdimm_bus_list_mutex);
+ list_del_init(&nvdimm_bus->list);
+ mutex_unlock(&nvdimm_bus_list_mutex);
+
+ nd_synchronize();
+ device_for_each_child(&nvdimm_bus->dev, NULL, child_unregister);
+
+ nvdimm_bus_lock(&nvdimm_bus->dev);
+ free_poison_list(&nvdimm_bus->poison_list);
+ nvdimm_bus_unlock(&nvdimm_bus->dev);
+
+ nvdimm_bus_destroy_ndctl(nvdimm_bus);
+
+ return 0;
+}
+
+static int nd_bus_probe(struct device *dev)
+{
+ struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
+ int rc;
+
+ rc = nvdimm_bus_create_ndctl(nvdimm_bus);
+ if (rc)
+ return rc;
+
+ mutex_lock(&nvdimm_bus_list_mutex);
+ list_add_tail(&nvdimm_bus->list, &nvdimm_bus_list);
+ mutex_unlock(&nvdimm_bus_list_mutex);
+
+ /* enable bus provider attributes to look up their local context */
+ dev_set_drvdata(dev, nvdimm_bus->nd_desc);
+
+ return 0;
+}
+
+static struct nd_device_driver nd_bus_driver = {
+ .probe = nd_bus_probe,
+ .remove = nd_bus_remove,
+ .drv = {
+ .name = "nd_bus",
+ .suppress_bind_attrs = true,
+ .bus = &nvdimm_bus_type,
+ .owner = THIS_MODULE,
+ .mod_name = KBUILD_MODNAME,
+ },
+};
+
+static int nvdimm_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct nd_device_driver *nd_drv = to_nd_device_driver(drv);
+
+ if (is_nvdimm_bus(dev) && nd_drv == &nd_bus_driver)
+ return true;
+
+ return !!test_bit(to_nd_device_type(dev), &nd_drv->type);
+}
+
static ASYNC_DOMAIN_EXCLUSIVE(nd_async_domain);
void nd_synchronize(void)
@@ -312,7 +498,7 @@ EXPORT_SYMBOL(__nd_driver_register);
int nvdimm_revalidate_disk(struct gendisk *disk)
{
- struct device *dev = disk->driverfs_dev;
+ struct device *dev = disk_to_dev(disk)->parent;
struct nd_region *nd_region = to_nd_region(dev->parent);
const char *pol = nd_region->ro ? "only" : "write";
@@ -395,12 +581,10 @@ int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus)
dev = device_create(nd_class, &nvdimm_bus->dev, devt, nvdimm_bus,
"ndctl%d", nvdimm_bus->id);
- if (IS_ERR(dev)) {
+ if (IS_ERR(dev))
dev_dbg(&nvdimm_bus->dev, "failed to register ndctl%d: %ld\n",
nvdimm_bus->id, PTR_ERR(dev));
- return PTR_ERR(dev);
- }
- return 0;
+ return PTR_ERR_OR_ZERO(dev);
}
void nvdimm_bus_destroy_ndctl(struct nvdimm_bus *nvdimm_bus)
@@ -850,8 +1034,14 @@ int __init nvdimm_bus_init(void)
goto err_class;
}
+ rc = driver_register(&nd_bus_driver.drv);
+ if (rc)
+ goto err_nd_bus;
+
return 0;
+ err_nd_bus:
+ class_destroy(nd_class);
err_class:
unregister_chrdev(nvdimm_major, "dimmctl");
err_dimm_chrdev:
@@ -864,8 +1054,10 @@ int __init nvdimm_bus_init(void)
void nvdimm_bus_exit(void)
{
+ driver_unregister(&nd_bus_driver.drv);
class_destroy(nd_class);
unregister_chrdev(nvdimm_bus_major, "ndctl");
unregister_chrdev(nvdimm_major, "dimmctl");
bus_unregister(&nvdimm_bus_type);
+ ida_destroy(&nd_ida);
}
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
index 8b2e3c4fb..d5dc80c48 100644
--- a/drivers/nvdimm/claim.c
+++ b/drivers/nvdimm/claim.c
@@ -240,7 +240,7 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
return memcpy_from_pmem(buf, nsio->addr + offset, size);
} else {
memcpy_to_pmem(nsio->addr + offset, buf, size);
- wmb_pmem();
+ nvdimm_flush(to_nd_region(ndns->dev.parent));
}
return 0;
@@ -266,9 +266,8 @@ int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio)
nsio->addr = devm_memremap(dev, res->start, resource_size(res),
ARCH_MEMREMAP_PMEM);
- if (IS_ERR(nsio->addr))
- return PTR_ERR(nsio->addr);
- return 0;
+
+ return PTR_ERR_OR_ZERO(nsio->addr);
}
EXPORT_SYMBOL_GPL(devm_nsio_enable);
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index be8976431..4d7bbd2df 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -20,12 +20,12 @@
#include <linux/ndctl.h>
#include <linux/mutex.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include "nd-core.h"
#include "nd.h"
LIST_HEAD(nvdimm_bus_list);
DEFINE_MUTEX(nvdimm_bus_list_mutex);
-static DEFINE_IDA(nd_ida);
void nvdimm_bus_lock(struct device *dev)
{
@@ -57,6 +57,133 @@ bool is_nvdimm_bus_locked(struct device *dev)
}
EXPORT_SYMBOL(is_nvdimm_bus_locked);
+struct nvdimm_map {
+ struct nvdimm_bus *nvdimm_bus;
+ struct list_head list;
+ resource_size_t offset;
+ unsigned long flags;
+ size_t size;
+ union {
+ void *mem;
+ void __iomem *iomem;
+ };
+ struct kref kref;
+};
+
+static struct nvdimm_map *find_nvdimm_map(struct device *dev,
+ resource_size_t offset)
+{
+ struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
+ struct nvdimm_map *nvdimm_map;
+
+ list_for_each_entry(nvdimm_map, &nvdimm_bus->mapping_list, list)
+ if (nvdimm_map->offset == offset)
+ return nvdimm_map;
+ return NULL;
+}
+
+static struct nvdimm_map *alloc_nvdimm_map(struct device *dev,
+ resource_size_t offset, size_t size, unsigned long flags)
+{
+ struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
+ struct nvdimm_map *nvdimm_map;
+
+ nvdimm_map = kzalloc(sizeof(*nvdimm_map), GFP_KERNEL);
+ if (!nvdimm_map)
+ return NULL;
+
+ INIT_LIST_HEAD(&nvdimm_map->list);
+ nvdimm_map->nvdimm_bus = nvdimm_bus;
+ nvdimm_map->offset = offset;
+ nvdimm_map->flags = flags;
+ nvdimm_map->size = size;
+ kref_init(&nvdimm_map->kref);
+
+ if (!request_mem_region(offset, size, dev_name(&nvdimm_bus->dev))) {
+ dev_err(&nvdimm_bus->dev, "failed to request %pa + %zd for %s\n",
+ &offset, size, dev_name(dev));
+ goto err_request_region;
+ }
+
+ if (flags)
+ nvdimm_map->mem = memremap(offset, size, flags);
+ else
+ nvdimm_map->iomem = ioremap(offset, size);
+
+ if (!nvdimm_map->mem)
+ goto err_map;
+
+ dev_WARN_ONCE(dev, !is_nvdimm_bus_locked(dev), "%s: bus unlocked!",
+ __func__);
+ list_add(&nvdimm_map->list, &nvdimm_bus->mapping_list);
+
+ return nvdimm_map;
+
+ err_map:
+ release_mem_region(offset, size);
+ err_request_region:
+ kfree(nvdimm_map);
+ return NULL;
+}
+
+static void nvdimm_map_release(struct kref *kref)
+{
+ struct nvdimm_bus *nvdimm_bus;
+ struct nvdimm_map *nvdimm_map;
+
+ nvdimm_map = container_of(kref, struct nvdimm_map, kref);
+ nvdimm_bus = nvdimm_map->nvdimm_bus;
+
+ dev_dbg(&nvdimm_bus->dev, "%s: %pa\n", __func__, &nvdimm_map->offset);
+ list_del(&nvdimm_map->list);
+ if (nvdimm_map->flags)
+ memunmap(nvdimm_map->mem);
+ else
+ iounmap(nvdimm_map->iomem);
+ release_mem_region(nvdimm_map->offset, nvdimm_map->size);
+ kfree(nvdimm_map);
+}
+
+static void nvdimm_map_put(void *data)
+{
+ struct nvdimm_map *nvdimm_map = data;
+ struct nvdimm_bus *nvdimm_bus = nvdimm_map->nvdimm_bus;
+
+ nvdimm_bus_lock(&nvdimm_bus->dev);
+ kref_put(&nvdimm_map->kref, nvdimm_map_release);
+ nvdimm_bus_unlock(&nvdimm_bus->dev);
+}
+
+/**
+ * devm_nvdimm_memremap - map a resource that is shared across regions
+ * @dev: device that will own a reference to the shared mapping
+ * @offset: physical base address of the mapping
+ * @size: mapping size
+ * @flags: memremap flags, or, if zero, perform an ioremap instead
+ */
+void *devm_nvdimm_memremap(struct device *dev, resource_size_t offset,
+ size_t size, unsigned long flags)
+{
+ struct nvdimm_map *nvdimm_map;
+
+ nvdimm_bus_lock(dev);
+ nvdimm_map = find_nvdimm_map(dev, offset);
+ if (!nvdimm_map)
+ nvdimm_map = alloc_nvdimm_map(dev, offset, size, flags);
+ else
+ kref_get(&nvdimm_map->kref);
+ nvdimm_bus_unlock(dev);
+
+ if (!nvdimm_map)
+ return NULL;
+
+ if (devm_add_action_or_reset(dev, nvdimm_map_put, nvdimm_map))
+ return NULL;
+
+ return nvdimm_map->mem;
+}
+EXPORT_SYMBOL_GPL(devm_nvdimm_memremap);
+
u64 nd_fletcher64(void *addr, size_t len, bool le)
{
u32 *buf = addr;
@@ -73,25 +200,6 @@ u64 nd_fletcher64(void *addr, size_t len, bool le)
}
EXPORT_SYMBOL_GPL(nd_fletcher64);
-static void nvdimm_bus_release(struct device *dev)
-{
- struct nvdimm_bus *nvdimm_bus;
-
- nvdimm_bus = container_of(dev, struct nvdimm_bus, dev);
- ida_simple_remove(&nd_ida, nvdimm_bus->id);
- kfree(nvdimm_bus);
-}
-
-struct nvdimm_bus *to_nvdimm_bus(struct device *dev)
-{
- struct nvdimm_bus *nvdimm_bus;
-
- nvdimm_bus = container_of(dev, struct nvdimm_bus, dev);
- WARN_ON(nvdimm_bus->dev.release != nvdimm_bus_release);
- return nvdimm_bus;
-}
-EXPORT_SYMBOL_GPL(to_nvdimm_bus);
-
struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus)
{
/* struct nvdimm_bus definition is private to libnvdimm */
@@ -99,18 +207,12 @@ struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus)
}
EXPORT_SYMBOL_GPL(to_nd_desc);
-struct nvdimm_bus *walk_to_nvdimm_bus(struct device *nd_dev)
+struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus)
{
- struct device *dev;
-
- for (dev = nd_dev; dev; dev = dev->parent)
- if (dev->release == nvdimm_bus_release)
- break;
- dev_WARN_ONCE(nd_dev, !dev, "invalid dev, not on nd bus\n");
- if (dev)
- return to_nvdimm_bus(dev);
- return NULL;
+ /* struct nvdimm_bus definition is private to libnvdimm */
+ return &nvdimm_bus->dev;
}
+EXPORT_SYMBOL_GPL(to_nvdimm_bus_dev);
static bool is_uuid_sep(char sep)
{
@@ -325,51 +427,6 @@ struct attribute_group nvdimm_bus_attribute_group = {
};
EXPORT_SYMBOL_GPL(nvdimm_bus_attribute_group);
-struct nvdimm_bus *__nvdimm_bus_register(struct device *parent,
- struct nvdimm_bus_descriptor *nd_desc, struct module *module)
-{
- struct nvdimm_bus *nvdimm_bus;
- int rc;
-
- nvdimm_bus = kzalloc(sizeof(*nvdimm_bus), GFP_KERNEL);
- if (!nvdimm_bus)
- return NULL;
- INIT_LIST_HEAD(&nvdimm_bus->list);
- INIT_LIST_HEAD(&nvdimm_bus->poison_list);
- init_waitqueue_head(&nvdimm_bus->probe_wait);
- nvdimm_bus->id = ida_simple_get(&nd_ida, 0, 0, GFP_KERNEL);
- mutex_init(&nvdimm_bus->reconfig_mutex);
- if (nvdimm_bus->id < 0) {
- kfree(nvdimm_bus);
- return NULL;
- }
- nvdimm_bus->nd_desc = nd_desc;
- nvdimm_bus->module = module;
- nvdimm_bus->dev.parent = parent;
- nvdimm_bus->dev.release = nvdimm_bus_release;
- nvdimm_bus->dev.groups = nd_desc->attr_groups;
- dev_set_name(&nvdimm_bus->dev, "ndbus%d", nvdimm_bus->id);
- rc = device_register(&nvdimm_bus->dev);
- if (rc) {
- dev_dbg(&nvdimm_bus->dev, "registration failed: %d\n", rc);
- goto err;
- }
-
- rc = nvdimm_bus_create_ndctl(nvdimm_bus);
- if (rc)
- goto err;
-
- mutex_lock(&nvdimm_bus_list_mutex);
- list_add_tail(&nvdimm_bus->list, &nvdimm_bus_list);
- mutex_unlock(&nvdimm_bus_list_mutex);
-
- return nvdimm_bus;
- err:
- put_device(&nvdimm_bus->dev);
- return NULL;
-}
-EXPORT_SYMBOL_GPL(__nvdimm_bus_register);
-
static void set_badblock(struct badblocks *bb, sector_t s, int num)
{
dev_dbg(bb->dev, "Found a poison range (0x%llx, 0x%llx)\n",
@@ -545,54 +602,6 @@ int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
}
EXPORT_SYMBOL_GPL(nvdimm_bus_add_poison);
-static void free_poison_list(struct list_head *poison_list)
-{
- struct nd_poison *pl, *next;
-
- list_for_each_entry_safe(pl, next, poison_list, list) {
- list_del(&pl->list);
- kfree(pl);
- }
- list_del_init(poison_list);
-}
-
-static int child_unregister(struct device *dev, void *data)
-{
- /*
- * the singular ndctl class device per bus needs to be
- * "device_destroy"ed, so skip it here
- *
- * i.e. remove classless children
- */
- if (dev->class)
- /* pass */;
- else
- nd_device_unregister(dev, ND_SYNC);
- return 0;
-}
-
-void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus)
-{
- if (!nvdimm_bus)
- return;
-
- mutex_lock(&nvdimm_bus_list_mutex);
- list_del_init(&nvdimm_bus->list);
- mutex_unlock(&nvdimm_bus_list_mutex);
-
- nd_synchronize();
- device_for_each_child(&nvdimm_bus->dev, NULL, child_unregister);
-
- nvdimm_bus_lock(&nvdimm_bus->dev);
- free_poison_list(&nvdimm_bus->poison_list);
- nvdimm_bus_unlock(&nvdimm_bus->dev);
-
- nvdimm_bus_destroy_ndctl(nvdimm_bus);
-
- device_unregister(&nvdimm_bus->dev);
-}
-EXPORT_SYMBOL_GPL(nvdimm_bus_unregister);
-
#ifdef CONFIG_BLK_DEV_INTEGRITY
int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
{
@@ -601,7 +610,8 @@ int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
if (meta_size == 0)
return 0;
- bi.profile = NULL;
+ memset(&bi, 0, sizeof(bi));
+
bi.tuple_size = meta_size;
bi.tag_size = meta_size;
@@ -650,7 +660,6 @@ static __exit void libnvdimm_exit(void)
nvdimm_bus_exit();
nd_region_devs_exit();
nvdimm_devs_exit();
- ida_destroy(&nd_ida);
}
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index bbde28d3d..d9bba5edd 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -346,7 +346,8 @@ EXPORT_SYMBOL_GPL(nvdimm_attribute_group);
struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data,
const struct attribute_group **groups, unsigned long flags,
- unsigned long cmd_mask)
+ unsigned long cmd_mask, int num_flush,
+ struct resource *flush_wpq)
{
struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL);
struct device *dev;
@@ -362,6 +363,8 @@ struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data,
nvdimm->provider_data = provider_data;
nvdimm->flags = flags;
nvdimm->cmd_mask = cmd_mask;
+ nvdimm->num_flush = num_flush;
+ nvdimm->flush_wpq = flush_wpq;
atomic_set(&nvdimm->busy, 0);
dev = &nvdimm->dev;
dev_set_name(dev, "nmem%d", nvdimm->id);
diff --git a/drivers/nvdimm/e820.c b/drivers/nvdimm/e820.c
index 95825b385..11ea90120 100644
--- a/drivers/nvdimm/e820.c
+++ b/drivers/nvdimm/e820.c
@@ -47,6 +47,7 @@ static int e820_pmem_probe(struct platform_device *pdev)
nd_desc.attr_groups = e820_pmem_attribute_groups;
nd_desc.provider_name = "e820";
+ nd_desc.module = THIS_MODULE;
nvdimm_bus = nvdimm_bus_register(dev, &nd_desc);
if (!nvdimm_bus)
goto err;
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index 284cdaa26..38ce6bbbc 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -26,11 +26,11 @@ extern int nvdimm_major;
struct nvdimm_bus {
struct nvdimm_bus_descriptor *nd_desc;
wait_queue_head_t probe_wait;
- struct module *module;
struct list_head list;
struct device dev;
int id, probe_active;
struct list_head poison_list;
+ struct list_head mapping_list;
struct mutex reconfig_mutex;
};
@@ -40,7 +40,8 @@ struct nvdimm {
unsigned long cmd_mask;
struct device dev;
atomic_t busy;
- int id;
+ int id, num_flush;
+ struct resource *flush_wpq;
};
bool is_nvdimm(struct device *dev);
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index d0ac93c31..0b78a8211 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -49,11 +49,31 @@ struct nvdimm_drvdata {
struct kref kref;
};
-struct nd_region_namespaces {
- int count;
- int active;
+struct nd_region_data {
+ int ns_count;
+ int ns_active;
+ unsigned int hints_shift;
+ void __iomem *flush_wpq[0];
};
+static inline void __iomem *ndrd_get_flush_wpq(struct nd_region_data *ndrd,
+ int dimm, int hint)
+{
+ unsigned int num = 1 << ndrd->hints_shift;
+ unsigned int mask = num - 1;
+
+ return ndrd->flush_wpq[dimm * num + (hint & mask)];
+}
+
+static inline void ndrd_set_flush_wpq(struct nd_region_data *ndrd, int dimm,
+ int hint, void __iomem *flush)
+{
+ unsigned int num = 1 << ndrd->hints_shift;
+ unsigned int mask = num - 1;
+
+ ndrd->flush_wpq[dimm * num + (hint & mask)] = flush;
+}
+
static inline struct nd_namespace_index *to_namespace_index(
struct nvdimm_drvdata *ndd, int i)
{
@@ -119,7 +139,6 @@ struct nd_region {
struct nd_blk_region {
int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev);
- void (*disable)(struct nvdimm_bus *nvdimm_bus, struct device *dev);
int (*do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
void *iobuf, u64 len, int rw);
void *blk_provider_data;
@@ -142,6 +161,7 @@ struct nd_btt {
struct nd_namespace_common *ndns;
struct btt *btt;
unsigned long lbasize;
+ u64 size;
u8 *uuid;
int id;
};
@@ -325,6 +345,7 @@ static inline void devm_nsio_disable(struct device *dev,
}
#endif
int nd_blk_region_init(struct nd_region *nd_region);
+int nd_region_activate(struct nd_region *nd_region);
void __nd_iostat_start(struct bio *bio, unsigned long *start);
static inline bool nd_iostat_start(struct bio *bio, unsigned long *start)
{
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 608fc4464..571a6c7ee 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -29,27 +29,28 @@
#include <linux/slab.h>
#include <linux/pmem.h>
#include <linux/nd.h>
+#include "pmem.h"
#include "pfn.h"
#include "nd.h"
-struct pmem_device {
- /* One contiguous memory region per device */
- phys_addr_t phys_addr;
- /* when non-zero this device is hosting a 'pfn' instance */
- phys_addr_t data_offset;
- u64 pfn_flags;
- void __pmem *virt_addr;
- /* immutable base size of the namespace */
- size_t size;
- /* trim size when namespace capacity has been section aligned */
- u32 pfn_pad;
- struct badblocks bb;
-};
+static struct device *to_dev(struct pmem_device *pmem)
+{
+ /*
+ * nvdimm bus services need a 'dev' parameter, and we record the device
+ * at init in bb.dev.
+ */
+ return pmem->bb.dev;
+}
+
+static struct nd_region *to_region(struct pmem_device *pmem)
+{
+ return to_nd_region(to_dev(pmem)->parent);
+}
static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
unsigned int len)
{
- struct device *dev = pmem->bb.dev;
+ struct device *dev = to_dev(pmem);
sector_t sector;
long cleared;
@@ -57,7 +58,7 @@ static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len);
if (cleared > 0 && cleared / 512) {
- dev_dbg(dev, "%s: %llx clear %ld sector%s\n",
+ dev_dbg(dev, "%s: %#llx clear %ld sector%s\n",
__func__, (unsigned long long) sector,
cleared / 512, cleared / 512 > 1 ? "s" : "");
badblocks_clear(&pmem->bb, sector, cleared / 512);
@@ -66,19 +67,19 @@ static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
}
static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
- unsigned int len, unsigned int off, int rw,
+ unsigned int len, unsigned int off, bool is_write,
sector_t sector)
{
int rc = 0;
bool bad_pmem = false;
void *mem = kmap_atomic(page);
phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
- void __pmem *pmem_addr = pmem->virt_addr + pmem_off;
+ void *pmem_addr = pmem->virt_addr + pmem_off;
if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
bad_pmem = true;
- if (rw == READ) {
+ if (!is_write) {
if (unlikely(bad_pmem))
rc = -EIO;
else {
@@ -112,6 +113,11 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
return rc;
}
+/* account for REQ_FLUSH rename, replace with REQ_PREFLUSH after v4.8-rc1 */
+#ifndef REQ_FLUSH
+#define REQ_FLUSH REQ_PREFLUSH
+#endif
+
static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
{
int rc = 0;
@@ -120,11 +126,15 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
struct bio_vec bvec;
struct bvec_iter iter;
struct pmem_device *pmem = q->queuedata;
+ struct nd_region *nd_region = to_region(pmem);
+
+ if (bio->bi_opf & REQ_FLUSH)
+ nvdimm_flush(nd_region);
do_acct = nd_iostat_start(bio, &start);
bio_for_each_segment(bvec, bio, iter) {
rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
- bvec.bv_offset, bio_data_dir(bio),
+ bvec.bv_offset, op_is_write(bio_op(bio)),
iter.bi_sector);
if (rc) {
bio->bi_error = rc;
@@ -134,22 +144,20 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
if (do_acct)
nd_iostat_end(bio, start);
- if (bio_data_dir(bio))
- wmb_pmem();
+ if (bio->bi_opf & REQ_FUA)
+ nvdimm_flush(nd_region);
bio_endio(bio);
return BLK_QC_T_NONE;
}
static int pmem_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, int rw)
+ struct page *page, bool is_write)
{
struct pmem_device *pmem = bdev->bd_queue->queuedata;
int rc;
- rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, rw, sector);
- if (rw & WRITE)
- wmb_pmem();
+ rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, is_write, sector);
/*
* The ->rw_page interface is subtle and tricky. The core
@@ -158,13 +166,14 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
* caused by double completion.
*/
if (rc == 0)
- page_endio(page, rw & WRITE, 0);
+ page_endio(page, is_write, 0);
return rc;
}
-static long pmem_direct_access(struct block_device *bdev, sector_t sector,
- void __pmem **kaddr, pfn_t *pfn, long size)
+/* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
+__weak long pmem_direct_access(struct block_device *bdev, sector_t sector,
+ void **kaddr, pfn_t *pfn, long size)
{
struct pmem_device *pmem = bdev->bd_queue->queuedata;
resource_size_t offset = sector * 512 + pmem->data_offset;
@@ -195,7 +204,7 @@ static void pmem_release_queue(void *q)
blk_cleanup_queue(q);
}
-void pmem_release_disk(void *disk)
+static void pmem_release_disk(void *disk)
{
del_gendisk(disk);
put_disk(disk);
@@ -205,6 +214,7 @@ static int pmem_attach_disk(struct device *dev,
struct nd_namespace_common *ndns)
{
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
+ struct nd_region *nd_region = to_nd_region(dev->parent);
struct vmem_altmap __altmap, *altmap = NULL;
struct resource *res = &nsio->res;
struct nd_pfn *nd_pfn = NULL;
@@ -234,7 +244,7 @@ static int pmem_attach_disk(struct device *dev,
dev_set_drvdata(dev, pmem);
pmem->phys_addr = res->start;
pmem->size = resource_size(res);
- if (!arch_has_wmb_pmem())
+ if (nvdimm_has_flush(nd_region) < 0)
dev_warn(dev, "unable to guarantee persistence of writes\n");
if (!devm_request_mem_region(dev, res->start, resource_size(res),
@@ -269,42 +279,41 @@ static int pmem_attach_disk(struct device *dev,
* At release time the queue must be dead before
* devm_memremap_pages is unwound
*/
- if (devm_add_action(dev, pmem_release_queue, q)) {
- blk_cleanup_queue(q);
+ if (devm_add_action_or_reset(dev, pmem_release_queue, q))
return -ENOMEM;
- }
if (IS_ERR(addr))
return PTR_ERR(addr);
- pmem->virt_addr = (void __pmem *) addr;
+ pmem->virt_addr = addr;
+ blk_queue_write_cache(q, true, true);
blk_queue_make_request(q, pmem_make_request);
blk_queue_physical_block_size(q, PAGE_SIZE);
blk_queue_max_hw_sectors(q, UINT_MAX);
blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+ queue_flag_set_unlocked(QUEUE_FLAG_DAX, q);
q->queuedata = pmem;
disk = alloc_disk_node(0, nid);
if (!disk)
return -ENOMEM;
- if (devm_add_action(dev, pmem_release_disk, disk)) {
- put_disk(disk);
- return -ENOMEM;
- }
disk->fops = &pmem_fops;
disk->queue = q;
disk->flags = GENHD_FL_EXT_DEVT;
nvdimm_namespace_disk_name(ndns, disk->disk_name);
- disk->driverfs_dev = dev;
set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
/ 512);
if (devm_init_badblocks(dev, &pmem->bb))
return -ENOMEM;
- nvdimm_badblocks_populate(to_nd_region(dev->parent), &pmem->bb, res);
+ nvdimm_badblocks_populate(nd_region, &pmem->bb, res);
disk->bb = &pmem->bb;
- add_disk(disk);
+ device_add_disk(dev, disk);
+
+ if (devm_add_action_or_reset(dev, pmem_release_disk, disk))
+ return -ENOMEM;
+
revalidate_disk(disk);
return 0;
@@ -340,13 +349,20 @@ static int nd_pmem_remove(struct device *dev)
{
if (is_nd_btt(dev))
nvdimm_namespace_detach_btt(to_nd_btt(dev));
+ nvdimm_flush(to_nd_region(dev->parent));
+
return 0;
}
+static void nd_pmem_shutdown(struct device *dev)
+{
+ nvdimm_flush(to_nd_region(dev->parent));
+}
+
static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
{
- struct nd_region *nd_region = to_nd_region(dev->parent);
struct pmem_device *pmem = dev_get_drvdata(dev);
+ struct nd_region *nd_region = to_region(pmem);
resource_size_t offset = 0, end_trunc = 0;
struct nd_namespace_common *ndns;
struct nd_namespace_io *nsio;
@@ -382,6 +398,7 @@ static struct nd_device_driver nd_pmem_driver = {
.probe = nd_pmem_probe,
.remove = nd_pmem_remove,
.notify = nd_pmem_notify,
+ .shutdown = nd_pmem_shutdown,
.drv = {
.name = "nd_pmem",
},
diff --git a/drivers/nvdimm/pmem.h b/drivers/nvdimm/pmem.h
new file mode 100644
index 000000000..b4ee4f71b
--- /dev/null
+++ b/drivers/nvdimm/pmem.h
@@ -0,0 +1,24 @@
+#ifndef __NVDIMM_PMEM_H__
+#define __NVDIMM_PMEM_H__
+#include <linux/badblocks.h>
+#include <linux/types.h>
+#include <linux/pfn_t.h>
+#include <linux/fs.h>
+
+long pmem_direct_access(struct block_device *bdev, sector_t sector,
+ void **kaddr, pfn_t *pfn, long size);
+/* this definition is in it's own header for tools/testing/nvdimm to consume */
+struct pmem_device {
+ /* One contiguous memory region per device */
+ phys_addr_t phys_addr;
+ /* when non-zero this device is hosting a 'pfn' instance */
+ phys_addr_t data_offset;
+ u64 pfn_flags;
+ void *virt_addr;
+ /* immutable base size of the namespace */
+ size_t size;
+ /* trim size when namespace capacity has been section aligned */
+ u32 pfn_pad;
+ struct badblocks bb;
+};
+#endif /* __NVDIMM_PMEM_H__ */
diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c
index 05a912359..8f241772e 100644
--- a/drivers/nvdimm/region.c
+++ b/drivers/nvdimm/region.c
@@ -20,7 +20,7 @@ static int nd_region_probe(struct device *dev)
{
int err, rc;
static unsigned long once;
- struct nd_region_namespaces *num_ns;
+ struct nd_region_data *ndrd;
struct nd_region *nd_region = to_nd_region(dev);
if (nd_region->num_lanes > num_online_cpus()
@@ -33,21 +33,21 @@ static int nd_region_probe(struct device *dev)
nd_region->num_lanes);
}
+ rc = nd_region_activate(nd_region);
+ if (rc)
+ return rc;
+
rc = nd_blk_region_init(nd_region);
if (rc)
return rc;
rc = nd_region_register_namespaces(nd_region, &err);
- num_ns = devm_kzalloc(dev, sizeof(*num_ns), GFP_KERNEL);
- if (!num_ns)
- return -ENOMEM;
-
if (rc < 0)
return rc;
- num_ns->active = rc;
- num_ns->count = rc + err;
- dev_set_drvdata(dev, num_ns);
+ ndrd = dev_get_drvdata(dev);
+ ndrd->ns_active = rc;
+ ndrd->ns_count = rc + err;
if (rc && err && rc == err)
return -ENODEV;
@@ -82,6 +82,8 @@ static int nd_region_remove(struct device *dev)
{
struct nd_region *nd_region = to_nd_region(dev);
+ device_for_each_child(dev, NULL, child_unregister);
+
/* flush attribute readers and disable */
nvdimm_bus_lock(dev);
nd_region->ns_seed = NULL;
@@ -91,7 +93,6 @@ static int nd_region_remove(struct device *dev)
dev_set_drvdata(dev, NULL);
nvdimm_bus_unlock(dev);
- device_for_each_child(dev, NULL, child_unregister);
return 0;
}
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index 40fcfea26..4c0ac4abb 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -14,13 +14,101 @@
#include <linux/highmem.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/hash.h>
+#include <linux/pmem.h>
#include <linux/sort.h>
#include <linux/io.h>
#include <linux/nd.h>
#include "nd-core.h"
#include "nd.h"
+/*
+ * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
+ * irrelevant.
+ */
+#include <linux/io-64-nonatomic-hi-lo.h>
+
static DEFINE_IDA(region_ida);
+static DEFINE_PER_CPU(int, flush_idx);
+
+static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
+ struct nd_region_data *ndrd)
+{
+ int i, j;
+
+ dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm),
+ nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es");
+ for (i = 0; i < (1 << ndrd->hints_shift); i++) {
+ struct resource *res = &nvdimm->flush_wpq[i];
+ unsigned long pfn = PHYS_PFN(res->start);
+ void __iomem *flush_page;
+
+ /* check if flush hints share a page */
+ for (j = 0; j < i; j++) {
+ struct resource *res_j = &nvdimm->flush_wpq[j];
+ unsigned long pfn_j = PHYS_PFN(res_j->start);
+
+ if (pfn == pfn_j)
+ break;
+ }
+
+ if (j < i)
+ flush_page = (void __iomem *) ((unsigned long)
+ ndrd_get_flush_wpq(ndrd, dimm, j)
+ & PAGE_MASK);
+ else
+ flush_page = devm_nvdimm_ioremap(dev,
+ PFN_PHYS(pfn), PAGE_SIZE);
+ if (!flush_page)
+ return -ENXIO;
+ ndrd_set_flush_wpq(ndrd, dimm, i, flush_page
+ + (res->start & ~PAGE_MASK));
+ }
+
+ return 0;
+}
+
+int nd_region_activate(struct nd_region *nd_region)
+{
+ int i, num_flush = 0;
+ struct nd_region_data *ndrd;
+ struct device *dev = &nd_region->dev;
+ size_t flush_data_size = sizeof(void *);
+
+ nvdimm_bus_lock(&nd_region->dev);
+ for (i = 0; i < nd_region->ndr_mappings; i++) {
+ struct nd_mapping *nd_mapping = &nd_region->mapping[i];
+ struct nvdimm *nvdimm = nd_mapping->nvdimm;
+
+ /* at least one null hint slot per-dimm for the "no-hint" case */
+ flush_data_size += sizeof(void *);
+ num_flush = min_not_zero(num_flush, nvdimm->num_flush);
+ if (!nvdimm->num_flush)
+ continue;
+ flush_data_size += nvdimm->num_flush * sizeof(void *);
+ }
+ nvdimm_bus_unlock(&nd_region->dev);
+
+ ndrd = devm_kzalloc(dev, sizeof(*ndrd) + flush_data_size, GFP_KERNEL);
+ if (!ndrd)
+ return -ENOMEM;
+ dev_set_drvdata(dev, ndrd);
+
+ if (!num_flush)
+ return 0;
+
+ ndrd->hints_shift = ilog2(num_flush);
+ for (i = 0; i < nd_region->ndr_mappings; i++) {
+ struct nd_mapping *nd_mapping = &nd_region->mapping[i];
+ struct nvdimm *nvdimm = nd_mapping->nvdimm;
+ int rc = nvdimm_map_flush(&nd_region->dev, nvdimm, i, ndrd);
+
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
static void nd_region_release(struct device *dev)
{
@@ -242,12 +330,12 @@ static DEVICE_ATTR_RO(available_size);
static ssize_t init_namespaces_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct nd_region_namespaces *num_ns = dev_get_drvdata(dev);
+ struct nd_region_data *ndrd = dev_get_drvdata(dev);
ssize_t rc;
nvdimm_bus_lock(dev);
- if (num_ns)
- rc = sprintf(buf, "%d/%d\n", num_ns->active, num_ns->count);
+ if (ndrd)
+ rc = sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count);
else
rc = -ENXIO;
nvdimm_bus_unlock(dev);
@@ -433,8 +521,6 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
if (is_nd_pmem(dev))
return;
-
- to_nd_blk_region(dev)->disable(nvdimm_bus, dev);
}
if (dev->parent && is_nd_blk(dev->parent) && probe) {
nd_region = to_nd_region(dev->parent);
@@ -698,7 +784,6 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
if (ndbr) {
nd_region = &ndbr->nd_region;
ndbr->enable = ndbr_desc->enable;
- ndbr->disable = ndbr_desc->disable;
ndbr->do_io = ndbr_desc->do_io;
}
region_buf = ndbr;
@@ -794,6 +879,67 @@ struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
}
EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create);
+/**
+ * nvdimm_flush - flush any posted write queues between the cpu and pmem media
+ * @nd_region: blk or interleaved pmem region
+ */
+void nvdimm_flush(struct nd_region *nd_region)
+{
+ struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev);
+ int i, idx;
+
+ /*
+ * Try to encourage some diversity in flush hint addresses
+ * across cpus assuming a limited number of flush hints.
+ */
+ idx = this_cpu_read(flush_idx);
+ idx = this_cpu_add_return(flush_idx, hash_32(current->pid + idx, 8));
+
+ /*
+ * The first wmb() is needed to 'sfence' all previous writes
+ * such that they are architecturally visible for the platform
+ * buffer flush. Note that we've already arranged for pmem
+ * writes to avoid the cache via arch_memcpy_to_pmem(). The
+ * final wmb() ensures ordering for the NVDIMM flush write.
+ */
+ wmb();
+ for (i = 0; i < nd_region->ndr_mappings; i++)
+ if (ndrd_get_flush_wpq(ndrd, i, 0))
+ writeq(1, ndrd_get_flush_wpq(ndrd, i, idx));
+ wmb();
+}
+EXPORT_SYMBOL_GPL(nvdimm_flush);
+
+/**
+ * nvdimm_has_flush - determine write flushing requirements
+ * @nd_region: blk or interleaved pmem region
+ *
+ * Returns 1 if writes require flushing
+ * Returns 0 if writes do not require flushing
+ * Returns -ENXIO if flushing capability can not be determined
+ */
+int nvdimm_has_flush(struct nd_region *nd_region)
+{
+ struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev);
+ int i;
+
+ /* no nvdimm == flushing capability unknown */
+ if (nd_region->ndr_mappings == 0)
+ return -ENXIO;
+
+ for (i = 0; i < nd_region->ndr_mappings; i++)
+ /* flush hints present, flushing required */
+ if (ndrd_get_flush_wpq(ndrd, i, 0))
+ return 1;
+
+ /*
+ * The platform defines dimm devices without hints, assume
+ * platform persistence mechanism like ADR
+ */
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvdimm_has_flush);
+
void __exit nd_region_devs_exit(void)
{
ida_destroy(&region_ida);
diff --git a/drivers/nvme/Kconfig b/drivers/nvme/Kconfig
index a39d9431e..b7c78a5b1 100644
--- a/drivers/nvme/Kconfig
+++ b/drivers/nvme/Kconfig
@@ -1 +1,2 @@
source "drivers/nvme/host/Kconfig"
+source "drivers/nvme/target/Kconfig"
diff --git a/drivers/nvme/Makefile b/drivers/nvme/Makefile
index 9421e829d..0096a7fd1 100644
--- a/drivers/nvme/Makefile
+++ b/drivers/nvme/Makefile
@@ -1,2 +1,3 @@
obj-y += host/
+obj-y += target/
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index d296fc3ae..f7d37a62f 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -24,3 +24,22 @@ config BLK_DEV_NVME_SCSI
to say N here, unless you run a distro that abuses the SCSI
emulation to provide stable device names for mount by id, like
some OpenSuSE and SLES versions.
+
+config NVME_FABRICS
+ tristate
+
+config NVME_RDMA
+ tristate "NVM Express over Fabrics RDMA host driver"
+ depends on INFINIBAND && BLOCK
+ select NVME_CORE
+ select NVME_FABRICS
+ select SG_POOL
+ help
+ This provides support for the NVMe over Fabrics protocol using
+ the RDMA (Infiniband, RoCE, iWarp) transport. This allows you
+ to use remote block devices exported using the NVMe protocol set.
+
+ To configure a NVMe over Fabrics controller use the nvme-cli tool
+ from https://github.com/linux-nvme/nvme-cli.
+
+ If unsure, say N.
diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile
index 9a3ca892b..47abcec23 100644
--- a/drivers/nvme/host/Makefile
+++ b/drivers/nvme/host/Makefile
@@ -1,8 +1,14 @@
obj-$(CONFIG_NVME_CORE) += nvme-core.o
obj-$(CONFIG_BLK_DEV_NVME) += nvme.o
+obj-$(CONFIG_NVME_FABRICS) += nvme-fabrics.o
+obj-$(CONFIG_NVME_RDMA) += nvme-rdma.o
nvme-core-y := core.o
nvme-core-$(CONFIG_BLK_DEV_NVME_SCSI) += scsi.o
nvme-core-$(CONFIG_NVM) += lightnvm.o
nvme-y += pci.o
+
+nvme-fabrics-y += fabrics.o
+
+nvme-rdma-y += rdma.o
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index d5fb55c0a..2feacc70b 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -30,6 +30,7 @@
#include <asm/unaligned.h>
#include "nvme.h"
+#include "fabrics.h"
#define NVME_MINORS (1U << MINORBITS)
@@ -47,8 +48,10 @@ unsigned char shutdown_timeout = 5;
module_param(shutdown_timeout, byte, 0644);
MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
-static int nvme_major;
-module_param(nvme_major, int, 0);
+unsigned int nvme_max_retries = 5;
+module_param_named(max_retries, nvme_max_retries, uint, 0644);
+MODULE_PARM_DESC(max_retries, "max number of retries a command may have");
+EXPORT_SYMBOL_GPL(nvme_max_retries);
static int nvme_char_major;
module_param(nvme_char_major, int, 0);
@@ -58,17 +61,38 @@ static DEFINE_SPINLOCK(dev_list_lock);
static struct class *nvme_class;
+void nvme_cancel_request(struct request *req, void *data, bool reserved)
+{
+ int status;
+
+ if (!blk_mq_request_started(req))
+ return;
+
+ dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
+ "Cancelling I/O %d", req->tag);
+
+ status = NVME_SC_ABORT_REQ;
+ if (blk_queue_dying(req->q))
+ status |= NVME_SC_DNR;
+ blk_mq_complete_request(req, status);
+}
+EXPORT_SYMBOL_GPL(nvme_cancel_request);
+
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
enum nvme_ctrl_state new_state)
{
- enum nvme_ctrl_state old_state = ctrl->state;
+ enum nvme_ctrl_state old_state;
bool changed = false;
spin_lock_irq(&ctrl->lock);
+
+ old_state = ctrl->state;
switch (new_state) {
case NVME_CTRL_LIVE:
switch (old_state) {
+ case NVME_CTRL_NEW:
case NVME_CTRL_RESETTING:
+ case NVME_CTRL_RECONNECTING:
changed = true;
/* FALLTHRU */
default:
@@ -79,6 +103,16 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
switch (old_state) {
case NVME_CTRL_NEW:
case NVME_CTRL_LIVE:
+ case NVME_CTRL_RECONNECTING:
+ changed = true;
+ /* FALLTHRU */
+ default:
+ break;
+ }
+ break;
+ case NVME_CTRL_RECONNECTING:
+ switch (old_state) {
+ case NVME_CTRL_LIVE:
changed = true;
/* FALLTHRU */
default:
@@ -89,6 +123,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
switch (old_state) {
case NVME_CTRL_LIVE:
case NVME_CTRL_RESETTING:
+ case NVME_CTRL_RECONNECTING:
changed = true;
/* FALLTHRU */
default:
@@ -107,11 +142,12 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
default:
break;
}
- spin_unlock_irq(&ctrl->lock);
if (changed)
ctrl->state = new_state;
+ spin_unlock_irq(&ctrl->lock);
+
return changed;
}
EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
@@ -174,21 +210,21 @@ void nvme_requeue_req(struct request *req)
EXPORT_SYMBOL_GPL(nvme_requeue_req);
struct request *nvme_alloc_request(struct request_queue *q,
- struct nvme_command *cmd, unsigned int flags)
+ struct nvme_command *cmd, unsigned int flags, int qid)
{
- bool write = cmd->common.opcode & 1;
struct request *req;
- req = blk_mq_alloc_request(q, write, flags);
+ if (qid == NVME_QID_ANY) {
+ req = blk_mq_alloc_request(q, nvme_is_write(cmd), flags);
+ } else {
+ req = blk_mq_alloc_request_hctx(q, nvme_is_write(cmd), flags,
+ qid ? qid - 1 : 0);
+ }
if (IS_ERR(req))
return req;
req->cmd_type = REQ_TYPE_DRV_PRIV;
req->cmd_flags |= REQ_FAILFAST_DRIVER;
- req->__data_len = 0;
- req->__sector = (sector_t) -1;
- req->bio = req->biotail = NULL;
-
req->cmd = (unsigned char *)cmd;
req->cmd_len = sizeof(struct nvme_command);
@@ -290,9 +326,9 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
if (req->cmd_type == REQ_TYPE_DRV_PRIV)
memcpy(cmd, req->cmd, sizeof(*cmd));
- else if (req->cmd_flags & REQ_FLUSH)
+ else if (req_op(req) == REQ_OP_FLUSH)
nvme_setup_flush(ns, cmd);
- else if (req->cmd_flags & REQ_DISCARD)
+ else if (req_op(req) == REQ_OP_DISCARD)
ret = nvme_setup_discard(ns, req, cmd);
else
nvme_setup_rw(ns, req, cmd);
@@ -307,12 +343,12 @@ EXPORT_SYMBOL_GPL(nvme_setup_cmd);
*/
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
struct nvme_completion *cqe, void *buffer, unsigned bufflen,
- unsigned timeout)
+ unsigned timeout, int qid, int at_head, int flags)
{
struct request *req;
int ret;
- req = nvme_alloc_request(q, cmd, 0);
+ req = nvme_alloc_request(q, cmd, flags, qid);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -325,17 +361,19 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
goto out;
}
- blk_execute_rq(req->q, NULL, req, 0);
+ blk_execute_rq(req->q, NULL, req, at_head);
ret = req->errors;
out:
blk_mq_free_request(req);
return ret;
}
+EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buffer, unsigned bufflen)
{
- return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0);
+ return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0,
+ NVME_QID_ANY, 0, 0);
}
EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
@@ -344,7 +382,7 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
u32 *result, unsigned timeout)
{
- bool write = cmd->common.opcode & 1;
+ bool write = nvme_is_write(cmd);
struct nvme_completion cqe;
struct nvme_ns *ns = q->queuedata;
struct gendisk *disk = ns ? ns->disk : NULL;
@@ -353,7 +391,7 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
void *meta = NULL;
int ret;
- req = nvme_alloc_request(q, cmd, 0);
+ req = nvme_alloc_request(q, cmd, 0, NVME_QID_ANY);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -439,6 +477,74 @@ int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
result, timeout);
}
+static void nvme_keep_alive_end_io(struct request *rq, int error)
+{
+ struct nvme_ctrl *ctrl = rq->end_io_data;
+
+ blk_mq_free_request(rq);
+
+ if (error) {
+ dev_err(ctrl->device,
+ "failed nvme_keep_alive_end_io error=%d\n", error);
+ return;
+ }
+
+ schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+}
+
+static int nvme_keep_alive(struct nvme_ctrl *ctrl)
+{
+ struct nvme_command c;
+ struct request *rq;
+
+ memset(&c, 0, sizeof(c));
+ c.common.opcode = nvme_admin_keep_alive;
+
+ rq = nvme_alloc_request(ctrl->admin_q, &c, BLK_MQ_REQ_RESERVED,
+ NVME_QID_ANY);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ rq->timeout = ctrl->kato * HZ;
+ rq->end_io_data = ctrl;
+
+ blk_execute_rq_nowait(rq->q, NULL, rq, 0, nvme_keep_alive_end_io);
+
+ return 0;
+}
+
+static void nvme_keep_alive_work(struct work_struct *work)
+{
+ struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
+ struct nvme_ctrl, ka_work);
+
+ if (nvme_keep_alive(ctrl)) {
+ /* allocation failure, reset the controller */
+ dev_err(ctrl->device, "keep-alive failed\n");
+ ctrl->ops->reset_ctrl(ctrl);
+ return;
+ }
+}
+
+void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
+{
+ if (unlikely(ctrl->kato == 0))
+ return;
+
+ INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
+ schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+}
+EXPORT_SYMBOL_GPL(nvme_start_keep_alive);
+
+void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
+{
+ if (unlikely(ctrl->kato == 0))
+ return;
+
+ cancel_delayed_work_sync(&ctrl->ka_work);
+}
+EXPORT_SYMBOL_GPL(nvme_stop_keep_alive);
+
int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
{
struct nvme_command c = { };
@@ -500,11 +606,12 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
memset(&c, 0, sizeof(c));
c.features.opcode = nvme_admin_get_features;
c.features.nsid = cpu_to_le32(nsid);
- c.features.prp1 = cpu_to_le64(dma_addr);
+ c.features.dptr.prp1 = cpu_to_le64(dma_addr);
c.features.fid = cpu_to_le32(fid);
- ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0);
- if (ret >= 0)
+ ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0,
+ NVME_QID_ANY, 0, 0);
+ if (ret >= 0 && result)
*result = le32_to_cpu(cqe.result);
return ret;
}
@@ -518,12 +625,13 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
memset(&c, 0, sizeof(c));
c.features.opcode = nvme_admin_set_features;
- c.features.prp1 = cpu_to_le64(dma_addr);
+ c.features.dptr.prp1 = cpu_to_le64(dma_addr);
c.features.fid = cpu_to_le32(fid);
c.features.dword11 = cpu_to_le32(dword11);
- ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0);
- if (ret >= 0)
+ ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0,
+ NVME_QID_ANY, 0, 0);
+ if (ret >= 0 && result)
*result = le32_to_cpu(cqe.result);
return ret;
}
@@ -558,11 +666,22 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, 0,
&result);
- if (status)
+ if (status < 0)
return status;
- nr_io_queues = min(result & 0xffff, result >> 16) + 1;
- *count = min(*count, nr_io_queues);
+ /*
+ * Degraded controllers might return an error when setting the queue
+ * count. We still want to be able to bring them online and offer
+ * access to the admin queue, as that might be only way to fix them up.
+ */
+ if (status > 0) {
+ dev_err(ctrl->dev, "Could not set queue count (%d)\n", status);
+ *count = 0;
+ } else {
+ nr_io_queues = min(result & 0xffff, result >> 16) + 1;
+ *count = min(*count, nr_io_queues);
+ }
+
return 0;
}
EXPORT_SYMBOL_GPL(nvme_set_queue_count);
@@ -726,6 +845,7 @@ static void nvme_init_integrity(struct nvme_ns *ns)
{
struct blk_integrity integrity;
+ memset(&integrity, 0, sizeof(integrity));
switch (ns->pi_type) {
case NVME_NS_DPS_PI_TYPE3:
integrity.profile = &t10_pi_type3_crc;
@@ -764,7 +884,7 @@ static void nvme_config_discard(struct nvme_ns *ns)
ns->queue->limits.discard_alignment = logical_block_size;
ns->queue->limits.discard_granularity = logical_block_size;
- blk_queue_max_discard_sectors(ns->queue, 0xffffffff);
+ blk_queue_max_discard_sectors(ns->queue, UINT_MAX);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
}
@@ -991,6 +1111,15 @@ int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
if (ret)
return ret;
+
+ /* Checking for ctrl->tagset is a trick to avoid sleeping on module
+ * load, since we only need the quirk on reset_controller. Notice
+ * that the HGST device needs this delay only in firmware activation
+ * procedure; unfortunately we have no (easy) way to verify this.
+ */
+ if ((ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) && ctrl->tagset)
+ msleep(NVME_QUIRK_DELAY_AMOUNT);
+
return nvme_wait_ready(ctrl, cap, false);
}
EXPORT_SYMBOL_GPL(nvme_disable_ctrl);
@@ -1088,6 +1217,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
struct nvme_id_ctrl *id;
u64 cap;
int ret, page_shift;
+ u32 max_hw_sectors;
ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
if (ret) {
@@ -1120,9 +1250,11 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
memcpy(ctrl->model, id->mn, sizeof(id->mn));
memcpy(ctrl->firmware_rev, id->fr, sizeof(id->fr));
if (id->mdts)
- ctrl->max_hw_sectors = 1 << (id->mdts + page_shift - 9);
+ max_hw_sectors = 1 << (id->mdts + page_shift - 9);
else
- ctrl->max_hw_sectors = UINT_MAX;
+ max_hw_sectors = UINT_MAX;
+ ctrl->max_hw_sectors =
+ min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && id->vs[3]) {
unsigned int max_hw_sectors;
@@ -1138,9 +1270,33 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
}
nvme_set_queue_limits(ctrl, ctrl->admin_q);
+ ctrl->sgls = le32_to_cpu(id->sgls);
+ ctrl->kas = le16_to_cpu(id->kas);
+
+ if (ctrl->ops->is_fabrics) {
+ ctrl->icdoff = le16_to_cpu(id->icdoff);
+ ctrl->ioccsz = le32_to_cpu(id->ioccsz);
+ ctrl->iorcsz = le32_to_cpu(id->iorcsz);
+ ctrl->maxcmd = le16_to_cpu(id->maxcmd);
+
+ /*
+ * In fabrics we need to verify the cntlid matches the
+ * admin connect
+ */
+ if (ctrl->cntlid != le16_to_cpu(id->cntlid))
+ ret = -EINVAL;
+
+ if (!ctrl->opts->discovery_nqn && !ctrl->kas) {
+ dev_err(ctrl->dev,
+ "keep-alive support is mandatory for fabrics\n");
+ ret = -EINVAL;
+ }
+ } else {
+ ctrl->cntlid = le16_to_cpu(id->cntlid);
+ }
kfree(id);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(nvme_init_identify);
@@ -1322,7 +1478,7 @@ static struct attribute *nvme_ns_attrs[] = {
NULL,
};
-static umode_t nvme_attrs_are_visible(struct kobject *kobj,
+static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
struct attribute *a, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
@@ -1341,7 +1497,7 @@ static umode_t nvme_attrs_are_visible(struct kobject *kobj,
static const struct attribute_group nvme_ns_attr_group = {
.attrs = nvme_ns_attrs,
- .is_visible = nvme_attrs_are_visible,
+ .is_visible = nvme_ns_attrs_are_visible,
};
#define nvme_show_str_function(field) \
@@ -1367,6 +1523,49 @@ nvme_show_str_function(serial);
nvme_show_str_function(firmware_rev);
nvme_show_int_function(cntlid);
+static ssize_t nvme_sysfs_delete(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ if (device_remove_file_self(dev, attr))
+ ctrl->ops->delete_ctrl(ctrl);
+ return count;
+}
+static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete);
+
+static ssize_t nvme_sysfs_show_transport(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->ops->name);
+}
+static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);
+
+static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ ctrl->ops->get_subsysnqn(ctrl));
+}
+static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
+
+static ssize_t nvme_sysfs_show_address(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE);
+}
+static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL);
+
static struct attribute *nvme_dev_attrs[] = {
&dev_attr_reset_controller.attr,
&dev_attr_rescan_controller.attr,
@@ -1374,11 +1573,38 @@ static struct attribute *nvme_dev_attrs[] = {
&dev_attr_serial.attr,
&dev_attr_firmware_rev.attr,
&dev_attr_cntlid.attr,
+ &dev_attr_delete_controller.attr,
+ &dev_attr_transport.attr,
+ &dev_attr_subsysnqn.attr,
+ &dev_attr_address.attr,
NULL
};
+#define CHECK_ATTR(ctrl, a, name) \
+ if ((a) == &dev_attr_##name.attr && \
+ !(ctrl)->ops->get_##name) \
+ return 0
+
+static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ if (a == &dev_attr_delete_controller.attr) {
+ if (!ctrl->ops->delete_ctrl)
+ return 0;
+ }
+
+ CHECK_ATTR(ctrl, a, subsysnqn);
+ CHECK_ATTR(ctrl, a, address);
+
+ return a->mode;
+}
+
static struct attribute_group nvme_dev_attrs_group = {
- .attrs = nvme_dev_attrs,
+ .attrs = nvme_dev_attrs,
+ .is_visible = nvme_dev_attrs_are_visible,
};
static const struct attribute_group *nvme_dev_attr_groups[] = {
@@ -1446,12 +1672,9 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
nvme_set_queue_limits(ctrl, ns->queue);
- disk->major = nvme_major;
- disk->first_minor = 0;
disk->fops = &nvme_fops;
disk->private_data = ns;
disk->queue = ns->queue;
- disk->driverfs_dev = ctrl->device;
disk->flags = GENHD_FL_EXT_DEVT;
sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance, ns->instance);
@@ -1466,7 +1689,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
if (ns->type == NVME_NS_LIGHTNVM)
return;
- add_disk(ns->disk);
+ device_add_disk(ctrl->device, ns->disk);
if (sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
&nvme_ns_attr_group))
pr_warn("%s: failed to create sysfs group for identification\n",
@@ -1517,6 +1740,17 @@ static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid)
nvme_alloc_ns(ctrl, nsid);
}
+static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
+ unsigned nsid)
+{
+ struct nvme_ns *ns, *next;
+
+ list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
+ if (ns->ns_id > nsid)
+ nvme_ns_remove(ns);
+ }
+}
+
static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn)
{
struct nvme_ns *ns;
@@ -1531,7 +1765,7 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn)
for (i = 0; i < num_lists; i++) {
ret = nvme_identify_ns_list(ctrl, prev, ns_list);
if (ret)
- goto out;
+ goto free;
for (j = 0; j < min(nn, 1024U); j++) {
nsid = le32_to_cpu(ns_list[j]);
@@ -1551,22 +1785,20 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn)
nn -= j;
}
out:
+ nvme_remove_invalid_namespaces(ctrl, prev);
+ free:
kfree(ns_list);
return ret;
}
static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn)
{
- struct nvme_ns *ns, *next;
unsigned i;
for (i = 1; i <= nn; i++)
nvme_validate_ns(ctrl, i);
- list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
- if (ns->ns_id > nn)
- nvme_ns_remove(ns);
- }
+ nvme_remove_invalid_namespaces(ctrl, nn);
}
static void nvme_scan_work(struct work_struct *work)
@@ -1852,16 +2084,10 @@ int __init nvme_core_init(void)
{
int result;
- result = register_blkdev(nvme_major, "nvme");
- if (result < 0)
- return result;
- else if (result > 0)
- nvme_major = result;
-
result = __register_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme",
&nvme_dev_fops);
if (result < 0)
- goto unregister_blkdev;
+ return result;
else if (result > 0)
nvme_char_major = result;
@@ -1875,8 +2101,6 @@ int __init nvme_core_init(void)
unregister_chrdev:
__unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
- unregister_blkdev:
- unregister_blkdev(nvme_major, "nvme");
return result;
}
@@ -1884,7 +2108,6 @@ void nvme_core_exit(void)
{
class_destroy(nvme_class);
__unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
- unregister_blkdev(nvme_major, "nvme");
}
MODULE_LICENSE("GPL");
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
new file mode 100644
index 000000000..4eff49174
--- /dev/null
+++ b/drivers/nvme/host/fabrics.c
@@ -0,0 +1,961 @@
+/*
+ * NVMe over Fabrics common host code.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/init.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/parser.h>
+#include <linux/seq_file.h>
+#include "nvme.h"
+#include "fabrics.h"
+
+static LIST_HEAD(nvmf_transports);
+static DEFINE_MUTEX(nvmf_transports_mutex);
+
+static LIST_HEAD(nvmf_hosts);
+static DEFINE_MUTEX(nvmf_hosts_mutex);
+
+static struct nvmf_host *nvmf_default_host;
+
+static struct nvmf_host *__nvmf_host_find(const char *hostnqn)
+{
+ struct nvmf_host *host;
+
+ list_for_each_entry(host, &nvmf_hosts, list) {
+ if (!strcmp(host->nqn, hostnqn))
+ return host;
+ }
+
+ return NULL;
+}
+
+static struct nvmf_host *nvmf_host_add(const char *hostnqn)
+{
+ struct nvmf_host *host;
+
+ mutex_lock(&nvmf_hosts_mutex);
+ host = __nvmf_host_find(hostnqn);
+ if (host) {
+ kref_get(&host->ref);
+ goto out_unlock;
+ }
+
+ host = kmalloc(sizeof(*host), GFP_KERNEL);
+ if (!host)
+ goto out_unlock;
+
+ kref_init(&host->ref);
+ memcpy(host->nqn, hostnqn, NVMF_NQN_SIZE);
+ uuid_be_gen(&host->id);
+
+ list_add_tail(&host->list, &nvmf_hosts);
+out_unlock:
+ mutex_unlock(&nvmf_hosts_mutex);
+ return host;
+}
+
+static struct nvmf_host *nvmf_host_default(void)
+{
+ struct nvmf_host *host;
+
+ host = kmalloc(sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return NULL;
+
+ kref_init(&host->ref);
+ uuid_be_gen(&host->id);
+ snprintf(host->nqn, NVMF_NQN_SIZE,
+ "nqn.2014-08.org.nvmexpress:NVMf:uuid:%pUb", &host->id);
+
+ mutex_lock(&nvmf_hosts_mutex);
+ list_add_tail(&host->list, &nvmf_hosts);
+ mutex_unlock(&nvmf_hosts_mutex);
+
+ return host;
+}
+
+static void nvmf_host_destroy(struct kref *ref)
+{
+ struct nvmf_host *host = container_of(ref, struct nvmf_host, ref);
+
+ mutex_lock(&nvmf_hosts_mutex);
+ list_del(&host->list);
+ mutex_unlock(&nvmf_hosts_mutex);
+
+ kfree(host);
+}
+
+static void nvmf_host_put(struct nvmf_host *host)
+{
+ if (host)
+ kref_put(&host->ref, nvmf_host_destroy);
+}
+
+/**
+ * nvmf_get_address() - Get address/port
+ * @ctrl: Host NVMe controller instance which we got the address
+ * @buf: OUTPUT parameter that will contain the address/port
+ * @size: buffer size
+ */
+int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
+{
+ return snprintf(buf, size, "traddr=%s,trsvcid=%s\n",
+ ctrl->opts->traddr, ctrl->opts->trsvcid);
+}
+EXPORT_SYMBOL_GPL(nvmf_get_address);
+
+/**
+ * nvmf_get_subsysnqn() - Get subsystem NQN
+ * @ctrl: Host NVMe controller instance which we got the NQN
+ */
+const char *nvmf_get_subsysnqn(struct nvme_ctrl *ctrl)
+{
+ return ctrl->opts->subsysnqn;
+}
+EXPORT_SYMBOL_GPL(nvmf_get_subsysnqn);
+
+/**
+ * nvmf_reg_read32() - NVMe Fabrics "Property Get" API function.
+ * @ctrl: Host NVMe controller instance maintaining the admin
+ * queue used to submit the property read command to
+ * the allocated NVMe controller resource on the target system.
+ * @off: Starting offset value of the targeted property
+ * register (see the fabrics section of the NVMe standard).
+ * @val: OUTPUT parameter that will contain the value of
+ * the property after a successful read.
+ *
+ * Used by the host system to retrieve a 32-bit capsule property value
+ * from an NVMe controller on the target system.
+ *
+ * ("Capsule property" is an "PCIe register concept" applied to the
+ * NVMe fabrics space.)
+ *
+ * Return:
+ * 0: successful read
+ * > 0: NVMe error status code
+ * < 0: Linux errno error code
+ */
+int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
+{
+ struct nvme_command cmd;
+ struct nvme_completion cqe;
+ int ret;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.prop_get.opcode = nvme_fabrics_command;
+ cmd.prop_get.fctype = nvme_fabrics_type_property_get;
+ cmd.prop_get.offset = cpu_to_le32(off);
+
+ ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &cqe, NULL, 0, 0,
+ NVME_QID_ANY, 0, 0);
+
+ if (ret >= 0)
+ *val = le64_to_cpu(cqe.result64);
+ if (unlikely(ret != 0))
+ dev_err(ctrl->device,
+ "Property Get error: %d, offset %#x\n",
+ ret > 0 ? ret & ~NVME_SC_DNR : ret, off);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvmf_reg_read32);
+
+/**
+ * nvmf_reg_read64() - NVMe Fabrics "Property Get" API function.
+ * @ctrl: Host NVMe controller instance maintaining the admin
+ * queue used to submit the property read command to
+ * the allocated controller resource on the target system.
+ * @off: Starting offset value of the targeted property
+ * register (see the fabrics section of the NVMe standard).
+ * @val: OUTPUT parameter that will contain the value of
+ * the property after a successful read.
+ *
+ * Used by the host system to retrieve a 64-bit capsule property value
+ * from an NVMe controller on the target system.
+ *
+ * ("Capsule property" is an "PCIe register concept" applied to the
+ * NVMe fabrics space.)
+ *
+ * Return:
+ * 0: successful read
+ * > 0: NVMe error status code
+ * < 0: Linux errno error code
+ */
+int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
+{
+ struct nvme_command cmd;
+ struct nvme_completion cqe;
+ int ret;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.prop_get.opcode = nvme_fabrics_command;
+ cmd.prop_get.fctype = nvme_fabrics_type_property_get;
+ cmd.prop_get.attrib = 1;
+ cmd.prop_get.offset = cpu_to_le32(off);
+
+ ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &cqe, NULL, 0, 0,
+ NVME_QID_ANY, 0, 0);
+
+ if (ret >= 0)
+ *val = le64_to_cpu(cqe.result64);
+ if (unlikely(ret != 0))
+ dev_err(ctrl->device,
+ "Property Get error: %d, offset %#x\n",
+ ret > 0 ? ret & ~NVME_SC_DNR : ret, off);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvmf_reg_read64);
+
+/**
+ * nvmf_reg_write32() - NVMe Fabrics "Property Write" API function.
+ * @ctrl: Host NVMe controller instance maintaining the admin
+ * queue used to submit the property read command to
+ * the allocated NVMe controller resource on the target system.
+ * @off: Starting offset value of the targeted property
+ * register (see the fabrics section of the NVMe standard).
+ * @val: Input parameter that contains the value to be
+ * written to the property.
+ *
+ * Used by the NVMe host system to write a 32-bit capsule property value
+ * to an NVMe controller on the target system.
+ *
+ * ("Capsule property" is an "PCIe register concept" applied to the
+ * NVMe fabrics space.)
+ *
+ * Return:
+ * 0: successful write
+ * > 0: NVMe error status code
+ * < 0: Linux errno error code
+ */
+int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
+{
+ struct nvme_command cmd;
+ int ret;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.prop_set.opcode = nvme_fabrics_command;
+ cmd.prop_set.fctype = nvme_fabrics_type_property_set;
+ cmd.prop_set.attrib = 0;
+ cmd.prop_set.offset = cpu_to_le32(off);
+ cmd.prop_set.value = cpu_to_le64(val);
+
+ ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, NULL, 0, 0,
+ NVME_QID_ANY, 0, 0);
+ if (unlikely(ret))
+ dev_err(ctrl->device,
+ "Property Set error: %d, offset %#x\n",
+ ret > 0 ? ret & ~NVME_SC_DNR : ret, off);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvmf_reg_write32);
+
+/**
+ * nvmf_log_connect_error() - Error-parsing-diagnostic print
+ * out function for connect() errors.
+ *
+ * @ctrl: the specific /dev/nvmeX device that had the error.
+ *
+ * @errval: Error code to be decoded in a more human-friendly
+ * printout.
+ *
+ * @offset: For use with the NVMe error code NVME_SC_CONNECT_INVALID_PARAM.
+ *
+ * @cmd: This is the SQE portion of a submission capsule.
+ *
+ * @data: This is the "Data" portion of a submission capsule.
+ */
+static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
+ int errval, int offset, struct nvme_command *cmd,
+ struct nvmf_connect_data *data)
+{
+ int err_sctype = errval & (~NVME_SC_DNR);
+
+ switch (err_sctype) {
+
+ case (NVME_SC_CONNECT_INVALID_PARAM):
+ if (offset >> 16) {
+ char *inv_data = "Connect Invalid Data Parameter";
+
+ switch (offset & 0xffff) {
+ case (offsetof(struct nvmf_connect_data, cntlid)):
+ dev_err(ctrl->device,
+ "%s, cntlid: %d\n",
+ inv_data, data->cntlid);
+ break;
+ case (offsetof(struct nvmf_connect_data, hostnqn)):
+ dev_err(ctrl->device,
+ "%s, hostnqn \"%s\"\n",
+ inv_data, data->hostnqn);
+ break;
+ case (offsetof(struct nvmf_connect_data, subsysnqn)):
+ dev_err(ctrl->device,
+ "%s, subsysnqn \"%s\"\n",
+ inv_data, data->subsysnqn);
+ break;
+ default:
+ dev_err(ctrl->device,
+ "%s, starting byte offset: %d\n",
+ inv_data, offset & 0xffff);
+ break;
+ }
+ } else {
+ char *inv_sqe = "Connect Invalid SQE Parameter";
+
+ switch (offset) {
+ case (offsetof(struct nvmf_connect_command, qid)):
+ dev_err(ctrl->device,
+ "%s, qid %d\n",
+ inv_sqe, cmd->connect.qid);
+ break;
+ default:
+ dev_err(ctrl->device,
+ "%s, starting byte offset: %d\n",
+ inv_sqe, offset);
+ }
+ }
+ break;
+ default:
+ dev_err(ctrl->device,
+ "Connect command failed, error wo/DNR bit: %d\n",
+ err_sctype);
+ break;
+ } /* switch (err_sctype) */
+}
+
+/**
+ * nvmf_connect_admin_queue() - NVMe Fabrics Admin Queue "Connect"
+ * API function.
+ * @ctrl: Host nvme controller instance used to request
+ * a new NVMe controller allocation on the target
+ * system and establish an NVMe Admin connection to
+ * that controller.
+ *
+ * This function enables an NVMe host device to request a new allocation of
+ * an NVMe controller resource on a target system as well establish a
+ * fabrics-protocol connection of the NVMe Admin queue between the
+ * host system device and the allocated NVMe controller on the
+ * target system via a NVMe Fabrics "Connect" command.
+ *
+ * Return:
+ * 0: success
+ * > 0: NVMe error status code
+ * < 0: Linux errno error code
+ *
+ */
+int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
+{
+ struct nvme_command cmd;
+ struct nvme_completion cqe;
+ struct nvmf_connect_data *data;
+ int ret;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.connect.opcode = nvme_fabrics_command;
+ cmd.connect.fctype = nvme_fabrics_type_connect;
+ cmd.connect.qid = 0;
+
+ /*
+ * fabrics spec sets a minimum of depth 32 for admin queue,
+ * so set the queue with this depth always until
+ * justification otherwise.
+ */
+ cmd.connect.sqsize = cpu_to_le16(NVMF_AQ_DEPTH - 1);
+
+ /*
+ * Set keep-alive timeout in seconds granularity (ms * 1000)
+ * and add a grace period for controller kato enforcement
+ */
+ cmd.connect.kato = ctrl->opts->discovery_nqn ? 0 :
+ cpu_to_le32((ctrl->kato + NVME_KATO_GRACE) * 1000);
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_be));
+ data->cntlid = cpu_to_le16(0xffff);
+ strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
+ strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
+
+ ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &cqe,
+ data, sizeof(*data), 0, NVME_QID_ANY, 1,
+ BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
+ if (ret) {
+ nvmf_log_connect_error(ctrl, ret, le32_to_cpu(cqe.result),
+ &cmd, data);
+ goto out_free_data;
+ }
+
+ ctrl->cntlid = le16_to_cpu(cqe.result16);
+
+out_free_data:
+ kfree(data);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvmf_connect_admin_queue);
+
+/**
+ * nvmf_connect_io_queue() - NVMe Fabrics I/O Queue "Connect"
+ * API function.
+ * @ctrl: Host nvme controller instance used to establish an
+ * NVMe I/O queue connection to the already allocated NVMe
+ * controller on the target system.
+ * @qid: NVMe I/O queue number for the new I/O connection between
+ * host and target (note qid == 0 is illegal as this is
+ * the Admin queue, per NVMe standard).
+ *
+ * This function issues a fabrics-protocol connection
+ * of a NVMe I/O queue (via NVMe Fabrics "Connect" command)
+ * between the host system device and the allocated NVMe controller
+ * on the target system.
+ *
+ * Return:
+ * 0: success
+ * > 0: NVMe error status code
+ * < 0: Linux errno error code
+ */
+int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
+{
+ struct nvme_command cmd;
+ struct nvmf_connect_data *data;
+ struct nvme_completion cqe;
+ int ret;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.connect.opcode = nvme_fabrics_command;
+ cmd.connect.fctype = nvme_fabrics_type_connect;
+ cmd.connect.qid = cpu_to_le16(qid);
+ cmd.connect.sqsize = cpu_to_le16(ctrl->sqsize);
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_be));
+ data->cntlid = cpu_to_le16(ctrl->cntlid);
+ strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
+ strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
+
+ ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &cqe,
+ data, sizeof(*data), 0, qid, 1,
+ BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
+ if (ret) {
+ nvmf_log_connect_error(ctrl, ret, le32_to_cpu(cqe.result),
+ &cmd, data);
+ }
+ kfree(data);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvmf_connect_io_queue);
+
+/**
+ * nvmf_register_transport() - NVMe Fabrics Library registration function.
+ * @ops: Transport ops instance to be registered to the
+ * common fabrics library.
+ *
+ * API function that registers the type of specific transport fabric
+ * being implemented to the common NVMe fabrics library. Part of
+ * the overall init sequence of starting up a fabrics driver.
+ */
+void nvmf_register_transport(struct nvmf_transport_ops *ops)
+{
+ mutex_lock(&nvmf_transports_mutex);
+ list_add_tail(&ops->entry, &nvmf_transports);
+ mutex_unlock(&nvmf_transports_mutex);
+}
+EXPORT_SYMBOL_GPL(nvmf_register_transport);
+
+/**
+ * nvmf_unregister_transport() - NVMe Fabrics Library unregistration function.
+ * @ops: Transport ops instance to be unregistered from the
+ * common fabrics library.
+ *
+ * Fabrics API function that unregisters the type of specific transport
+ * fabric being implemented from the common NVMe fabrics library.
+ * Part of the overall exit sequence of unloading the implemented driver.
+ */
+void nvmf_unregister_transport(struct nvmf_transport_ops *ops)
+{
+ mutex_lock(&nvmf_transports_mutex);
+ list_del(&ops->entry);
+ mutex_unlock(&nvmf_transports_mutex);
+}
+EXPORT_SYMBOL_GPL(nvmf_unregister_transport);
+
+static struct nvmf_transport_ops *nvmf_lookup_transport(
+ struct nvmf_ctrl_options *opts)
+{
+ struct nvmf_transport_ops *ops;
+
+ lockdep_assert_held(&nvmf_transports_mutex);
+
+ list_for_each_entry(ops, &nvmf_transports, entry) {
+ if (strcmp(ops->name, opts->transport) == 0)
+ return ops;
+ }
+
+ return NULL;
+}
+
+static const match_table_t opt_tokens = {
+ { NVMF_OPT_TRANSPORT, "transport=%s" },
+ { NVMF_OPT_TRADDR, "traddr=%s" },
+ { NVMF_OPT_TRSVCID, "trsvcid=%s" },
+ { NVMF_OPT_NQN, "nqn=%s" },
+ { NVMF_OPT_QUEUE_SIZE, "queue_size=%d" },
+ { NVMF_OPT_NR_IO_QUEUES, "nr_io_queues=%d" },
+ { NVMF_OPT_RECONNECT_DELAY, "reconnect_delay=%d" },
+ { NVMF_OPT_KATO, "keep_alive_tmo=%d" },
+ { NVMF_OPT_HOSTNQN, "hostnqn=%s" },
+ { NVMF_OPT_ERR, NULL }
+};
+
+static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
+ const char *buf)
+{
+ substring_t args[MAX_OPT_ARGS];
+ char *options, *o, *p;
+ int token, ret = 0;
+ size_t nqnlen = 0;
+
+ /* Set defaults */
+ opts->queue_size = NVMF_DEF_QUEUE_SIZE;
+ opts->nr_io_queues = num_online_cpus();
+ opts->reconnect_delay = NVMF_DEF_RECONNECT_DELAY;
+
+ options = o = kstrdup(buf, GFP_KERNEL);
+ if (!options)
+ return -ENOMEM;
+
+ while ((p = strsep(&o, ",\n")) != NULL) {
+ if (!*p)
+ continue;
+
+ token = match_token(p, opt_tokens, args);
+ opts->mask |= token;
+ switch (token) {
+ case NVMF_OPT_TRANSPORT:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ opts->transport = p;
+ break;
+ case NVMF_OPT_NQN:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ opts->subsysnqn = p;
+ nqnlen = strlen(opts->subsysnqn);
+ if (nqnlen >= NVMF_NQN_SIZE) {
+ pr_err("%s needs to be < %d bytes\n",
+ opts->subsysnqn, NVMF_NQN_SIZE);
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->discovery_nqn =
+ !(strcmp(opts->subsysnqn,
+ NVME_DISC_SUBSYS_NAME));
+ if (opts->discovery_nqn)
+ opts->nr_io_queues = 0;
+ break;
+ case NVMF_OPT_TRADDR:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ opts->traddr = p;
+ break;
+ case NVMF_OPT_TRSVCID:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ opts->trsvcid = p;
+ break;
+ case NVMF_OPT_QUEUE_SIZE:
+ if (match_int(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (token < NVMF_MIN_QUEUE_SIZE ||
+ token > NVMF_MAX_QUEUE_SIZE) {
+ pr_err("Invalid queue_size %d\n", token);
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->queue_size = token;
+ break;
+ case NVMF_OPT_NR_IO_QUEUES:
+ if (match_int(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (token <= 0) {
+ pr_err("Invalid number of IOQs %d\n", token);
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->nr_io_queues = min_t(unsigned int,
+ num_online_cpus(), token);
+ break;
+ case NVMF_OPT_KATO:
+ if (match_int(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (opts->discovery_nqn) {
+ pr_err("Discovery controllers cannot accept keep_alive_tmo != 0\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (token < 0) {
+ pr_err("Invalid keep_alive_tmo %d\n", token);
+ ret = -EINVAL;
+ goto out;
+ } else if (token == 0) {
+ /* Allowed for debug */
+ pr_warn("keep_alive_tmo 0 won't execute keep alives!!!\n");
+ }
+ opts->kato = token;
+ break;
+ case NVMF_OPT_HOSTNQN:
+ if (opts->host) {
+ pr_err("hostnqn already user-assigned: %s\n",
+ opts->host->nqn);
+ ret = -EADDRINUSE;
+ goto out;
+ }
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ nqnlen = strlen(p);
+ if (nqnlen >= NVMF_NQN_SIZE) {
+ pr_err("%s needs to be < %d bytes\n",
+ p, NVMF_NQN_SIZE);
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->host = nvmf_host_add(p);
+ if (!opts->host) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ break;
+ case NVMF_OPT_RECONNECT_DELAY:
+ if (match_int(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (token <= 0) {
+ pr_err("Invalid reconnect_delay %d\n", token);
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->reconnect_delay = token;
+ break;
+ default:
+ pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n",
+ p);
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (!opts->host) {
+ kref_get(&nvmf_default_host->ref);
+ opts->host = nvmf_default_host;
+ }
+
+out:
+ if (!opts->discovery_nqn && !opts->kato)
+ opts->kato = NVME_DEFAULT_KATO;
+ kfree(options);
+ return ret;
+}
+
+static int nvmf_check_required_opts(struct nvmf_ctrl_options *opts,
+ unsigned int required_opts)
+{
+ if ((opts->mask & required_opts) != required_opts) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) {
+ if ((opt_tokens[i].token & required_opts) &&
+ !(opt_tokens[i].token & opts->mask)) {
+ pr_warn("missing parameter '%s'\n",
+ opt_tokens[i].pattern);
+ }
+ }
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int nvmf_check_allowed_opts(struct nvmf_ctrl_options *opts,
+ unsigned int allowed_opts)
+{
+ if (opts->mask & ~allowed_opts) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) {
+ if (opt_tokens[i].token & ~allowed_opts) {
+ pr_warn("invalid parameter '%s'\n",
+ opt_tokens[i].pattern);
+ }
+ }
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void nvmf_free_options(struct nvmf_ctrl_options *opts)
+{
+ nvmf_host_put(opts->host);
+ kfree(opts->transport);
+ kfree(opts->traddr);
+ kfree(opts->trsvcid);
+ kfree(opts->subsysnqn);
+ kfree(opts);
+}
+EXPORT_SYMBOL_GPL(nvmf_free_options);
+
+#define NVMF_REQUIRED_OPTS (NVMF_OPT_TRANSPORT | NVMF_OPT_NQN)
+#define NVMF_ALLOWED_OPTS (NVMF_OPT_QUEUE_SIZE | NVMF_OPT_NR_IO_QUEUES | \
+ NVMF_OPT_KATO | NVMF_OPT_HOSTNQN)
+
+static struct nvme_ctrl *
+nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
+{
+ struct nvmf_ctrl_options *opts;
+ struct nvmf_transport_ops *ops;
+ struct nvme_ctrl *ctrl;
+ int ret;
+
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return ERR_PTR(-ENOMEM);
+
+ ret = nvmf_parse_options(opts, buf);
+ if (ret)
+ goto out_free_opts;
+
+ /*
+ * Check the generic options first as we need a valid transport for
+ * the lookup below. Then clear the generic flags so that transport
+ * drivers don't have to care about them.
+ */
+ ret = nvmf_check_required_opts(opts, NVMF_REQUIRED_OPTS);
+ if (ret)
+ goto out_free_opts;
+ opts->mask &= ~NVMF_REQUIRED_OPTS;
+
+ mutex_lock(&nvmf_transports_mutex);
+ ops = nvmf_lookup_transport(opts);
+ if (!ops) {
+ pr_info("no handler found for transport %s.\n",
+ opts->transport);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ret = nvmf_check_required_opts(opts, ops->required_opts);
+ if (ret)
+ goto out_unlock;
+ ret = nvmf_check_allowed_opts(opts, NVMF_ALLOWED_OPTS |
+ ops->allowed_opts | ops->required_opts);
+ if (ret)
+ goto out_unlock;
+
+ ctrl = ops->create_ctrl(dev, opts);
+ if (IS_ERR(ctrl)) {
+ ret = PTR_ERR(ctrl);
+ goto out_unlock;
+ }
+
+ mutex_unlock(&nvmf_transports_mutex);
+ return ctrl;
+
+out_unlock:
+ mutex_unlock(&nvmf_transports_mutex);
+out_free_opts:
+ nvmf_host_put(opts->host);
+ kfree(opts);
+ return ERR_PTR(ret);
+}
+
+static struct class *nvmf_class;
+static struct device *nvmf_device;
+static DEFINE_MUTEX(nvmf_dev_mutex);
+
+static ssize_t nvmf_dev_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *pos)
+{
+ struct seq_file *seq_file = file->private_data;
+ struct nvme_ctrl *ctrl;
+ const char *buf;
+ int ret = 0;
+
+ if (count > PAGE_SIZE)
+ return -ENOMEM;
+
+ buf = memdup_user_nul(ubuf, count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ mutex_lock(&nvmf_dev_mutex);
+ if (seq_file->private) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ctrl = nvmf_create_ctrl(nvmf_device, buf, count);
+ if (IS_ERR(ctrl)) {
+ ret = PTR_ERR(ctrl);
+ goto out_unlock;
+ }
+
+ seq_file->private = ctrl;
+
+out_unlock:
+ mutex_unlock(&nvmf_dev_mutex);
+ kfree(buf);
+ return ret ? ret : count;
+}
+
+static int nvmf_dev_show(struct seq_file *seq_file, void *private)
+{
+ struct nvme_ctrl *ctrl;
+ int ret = 0;
+
+ mutex_lock(&nvmf_dev_mutex);
+ ctrl = seq_file->private;
+ if (!ctrl) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ seq_printf(seq_file, "instance=%d,cntlid=%d\n",
+ ctrl->instance, ctrl->cntlid);
+
+out_unlock:
+ mutex_unlock(&nvmf_dev_mutex);
+ return ret;
+}
+
+static int nvmf_dev_open(struct inode *inode, struct file *file)
+{
+ /*
+ * The miscdevice code initializes file->private_data, but doesn't
+ * make use of it later.
+ */
+ file->private_data = NULL;
+ return single_open(file, nvmf_dev_show, NULL);
+}
+
+static int nvmf_dev_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq_file = file->private_data;
+ struct nvme_ctrl *ctrl = seq_file->private;
+
+ if (ctrl)
+ nvme_put_ctrl(ctrl);
+ return single_release(inode, file);
+}
+
+static const struct file_operations nvmf_dev_fops = {
+ .owner = THIS_MODULE,
+ .write = nvmf_dev_write,
+ .read = seq_read,
+ .open = nvmf_dev_open,
+ .release = nvmf_dev_release,
+};
+
+static struct miscdevice nvmf_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "nvme-fabrics",
+ .fops = &nvmf_dev_fops,
+};
+
+static int __init nvmf_init(void)
+{
+ int ret;
+
+ nvmf_default_host = nvmf_host_default();
+ if (!nvmf_default_host)
+ return -ENOMEM;
+
+ nvmf_class = class_create(THIS_MODULE, "nvme-fabrics");
+ if (IS_ERR(nvmf_class)) {
+ pr_err("couldn't register class nvme-fabrics\n");
+ ret = PTR_ERR(nvmf_class);
+ goto out_free_host;
+ }
+
+ nvmf_device =
+ device_create(nvmf_class, NULL, MKDEV(0, 0), NULL, "ctl");
+ if (IS_ERR(nvmf_device)) {
+ pr_err("couldn't create nvme-fabris device!\n");
+ ret = PTR_ERR(nvmf_device);
+ goto out_destroy_class;
+ }
+
+ ret = misc_register(&nvmf_misc);
+ if (ret) {
+ pr_err("couldn't register misc device: %d\n", ret);
+ goto out_destroy_device;
+ }
+
+ return 0;
+
+out_destroy_device:
+ device_destroy(nvmf_class, MKDEV(0, 0));
+out_destroy_class:
+ class_destroy(nvmf_class);
+out_free_host:
+ nvmf_host_put(nvmf_default_host);
+ return ret;
+}
+
+static void __exit nvmf_exit(void)
+{
+ misc_deregister(&nvmf_misc);
+ device_destroy(nvmf_class, MKDEV(0, 0));
+ class_destroy(nvmf_class);
+ nvmf_host_put(nvmf_default_host);
+
+ BUILD_BUG_ON(sizeof(struct nvmf_connect_command) != 64);
+ BUILD_BUG_ON(sizeof(struct nvmf_property_get_command) != 64);
+ BUILD_BUG_ON(sizeof(struct nvmf_property_set_command) != 64);
+ BUILD_BUG_ON(sizeof(struct nvmf_connect_data) != 1024);
+}
+
+MODULE_LICENSE("GPL v2");
+
+module_init(nvmf_init);
+module_exit(nvmf_exit);
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
new file mode 100644
index 000000000..46e460aee
--- /dev/null
+++ b/drivers/nvme/host/fabrics.h
@@ -0,0 +1,132 @@
+/*
+ * NVMe over Fabrics common host code.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#ifndef _NVME_FABRICS_H
+#define _NVME_FABRICS_H 1
+
+#include <linux/in.h>
+#include <linux/inet.h>
+
+#define NVMF_MIN_QUEUE_SIZE 16
+#define NVMF_MAX_QUEUE_SIZE 1024
+#define NVMF_DEF_QUEUE_SIZE 128
+#define NVMF_DEF_RECONNECT_DELAY 10
+
+/*
+ * Define a host as seen by the target. We allocate one at boot, but also
+ * allow the override it when creating controllers. This is both to provide
+ * persistence of the Host NQN over multiple boots, and to allow using
+ * multiple ones, for example in a container scenario. Because we must not
+ * use different Host NQNs with the same Host ID we generate a Host ID and
+ * use this structure to keep track of the relation between the two.
+ */
+struct nvmf_host {
+ struct kref ref;
+ struct list_head list;
+ char nqn[NVMF_NQN_SIZE];
+ uuid_be id;
+};
+
+/**
+ * enum nvmf_parsing_opts - used to define the sysfs parsing options used.
+ */
+enum {
+ NVMF_OPT_ERR = 0,
+ NVMF_OPT_TRANSPORT = 1 << 0,
+ NVMF_OPT_NQN = 1 << 1,
+ NVMF_OPT_TRADDR = 1 << 2,
+ NVMF_OPT_TRSVCID = 1 << 3,
+ NVMF_OPT_QUEUE_SIZE = 1 << 4,
+ NVMF_OPT_NR_IO_QUEUES = 1 << 5,
+ NVMF_OPT_TL_RETRY_COUNT = 1 << 6,
+ NVMF_OPT_KATO = 1 << 7,
+ NVMF_OPT_HOSTNQN = 1 << 8,
+ NVMF_OPT_RECONNECT_DELAY = 1 << 9,
+};
+
+/**
+ * struct nvmf_ctrl_options - Used to hold the options specified
+ * with the parsing opts enum.
+ * @mask: Used by the fabrics library to parse through sysfs options
+ * on adding a NVMe controller.
+ * @transport: Holds the fabric transport "technology name" (for a lack of
+ * better description) that will be used by an NVMe controller
+ * being added.
+ * @subsysnqn: Hold the fully qualified NQN subystem name (format defined
+ * in the NVMe specification, "NVMe Qualified Names").
+ * @traddr: network address that will be used by the host to communicate
+ * to the added NVMe controller.
+ * @trsvcid: network port used for host-controller communication.
+ * @queue_size: Number of IO queue elements.
+ * @nr_io_queues: Number of controller IO queues that will be established.
+ * @reconnect_delay: Time between two consecutive reconnect attempts.
+ * @discovery_nqn: indicates if the subsysnqn is the well-known discovery NQN.
+ * @kato: Keep-alive timeout.
+ * @host: Virtual NVMe host, contains the NQN and Host ID.
+ */
+struct nvmf_ctrl_options {
+ unsigned mask;
+ char *transport;
+ char *subsysnqn;
+ char *traddr;
+ char *trsvcid;
+ size_t queue_size;
+ unsigned int nr_io_queues;
+ unsigned int reconnect_delay;
+ bool discovery_nqn;
+ unsigned int kato;
+ struct nvmf_host *host;
+};
+
+/*
+ * struct nvmf_transport_ops - used to register a specific
+ * fabric implementation of NVMe fabrics.
+ * @entry: Used by the fabrics library to add the new
+ * registration entry to its linked-list internal tree.
+ * @name: Name of the NVMe fabric driver implementation.
+ * @required_opts: sysfs command-line options that must be specified
+ * when adding a new NVMe controller.
+ * @allowed_opts: sysfs command-line options that can be specified
+ * when adding a new NVMe controller.
+ * @create_ctrl(): function pointer that points to a non-NVMe
+ * implementation-specific fabric technology
+ * that would go into starting up that fabric
+ * for the purpose of conneciton to an NVMe controller
+ * using that fabric technology.
+ *
+ * Notes:
+ * 1. At minimum, 'required_opts' and 'allowed_opts' should
+ * be set to the same enum parsing options defined earlier.
+ * 2. create_ctrl() must be defined (even if it does nothing)
+ */
+struct nvmf_transport_ops {
+ struct list_head entry;
+ const char *name;
+ int required_opts;
+ int allowed_opts;
+ struct nvme_ctrl *(*create_ctrl)(struct device *dev,
+ struct nvmf_ctrl_options *opts);
+};
+
+int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val);
+int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val);
+int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val);
+int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl);
+int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid);
+void nvmf_register_transport(struct nvmf_transport_ops *ops);
+void nvmf_unregister_transport(struct nvmf_transport_ops *ops);
+void nvmf_free_options(struct nvmf_ctrl_options *opts);
+const char *nvmf_get_subsysnqn(struct nvme_ctrl *ctrl);
+int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
+
+#endif /* _NVME_FABRICS_H */
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index a0af05583..63f483daf 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -156,7 +156,7 @@ struct nvme_nvm_completion {
#define NVME_NVM_LP_MLC_PAIRS 886
struct nvme_nvm_lp_mlc {
- __u16 num_pairs;
+ __le16 num_pairs;
__u8 pairs[NVME_NVM_LP_MLC_PAIRS];
};
@@ -500,7 +500,7 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
struct bio *bio = rqd->bio;
struct nvme_nvm_command *cmd;
- rq = blk_mq_alloc_request(q, bio_rw(bio), 0);
+ rq = blk_mq_alloc_request(q, bio_data_dir(bio), 0);
if (IS_ERR(rq))
return -ENOMEM;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 1daa0482d..ab18b7810 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -38,6 +38,11 @@ extern unsigned char admin_timeout;
extern unsigned char shutdown_timeout;
#define SHUTDOWN_TIMEOUT (shutdown_timeout * HZ)
+#define NVME_DEFAULT_KATO 5
+#define NVME_KATO_GRACE 10
+
+extern unsigned int nvme_max_retries;
+
enum {
NVME_NS_LBA = 0,
NVME_NS_LIGHTNVM = 1,
@@ -65,12 +70,26 @@ enum nvme_quirks {
* logical blocks.
*/
NVME_QUIRK_DISCARD_ZEROES = (1 << 2),
+
+ /*
+ * The controller needs a delay before starts checking the device
+ * readiness, which is done by reading the NVME_CSTS_RDY bit.
+ */
+ NVME_QUIRK_DELAY_BEFORE_CHK_RDY = (1 << 3),
};
+/* The below value is the specific amount of delay needed before checking
+ * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the
+ * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
+ * found empirically.
+ */
+#define NVME_QUIRK_DELAY_AMOUNT 2000
+
enum nvme_ctrl_state {
NVME_CTRL_NEW,
NVME_CTRL_LIVE,
NVME_CTRL_RESETTING,
+ NVME_CTRL_RECONNECTING,
NVME_CTRL_DELETING,
NVME_CTRL_DEAD,
};
@@ -80,6 +99,7 @@ struct nvme_ctrl {
spinlock_t lock;
const struct nvme_ctrl_ops *ops;
struct request_queue *admin_q;
+ struct request_queue *connect_q;
struct device *dev;
struct kref kref;
int instance;
@@ -107,10 +127,22 @@ struct nvme_ctrl {
u8 event_limit;
u8 vwc;
u32 vs;
+ u32 sgls;
+ u16 kas;
+ unsigned int kato;
bool subsystem;
unsigned long quirks;
struct work_struct scan_work;
struct work_struct async_event_work;
+ struct delayed_work ka_work;
+
+ /* Fabrics only */
+ u16 sqsize;
+ u32 ioccsz;
+ u32 iorcsz;
+ u16 icdoff;
+ u16 maxcmd;
+ struct nvmf_ctrl_options *opts;
};
/*
@@ -144,7 +176,9 @@ struct nvme_ns {
};
struct nvme_ctrl_ops {
+ const char *name;
struct module *module;
+ bool is_fabrics;
int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
@@ -152,6 +186,9 @@ struct nvme_ctrl_ops {
void (*free_ctrl)(struct nvme_ctrl *ctrl);
void (*post_scan)(struct nvme_ctrl *ctrl);
void (*submit_async_event)(struct nvme_ctrl *ctrl, int aer_idx);
+ int (*delete_ctrl)(struct nvme_ctrl *ctrl);
+ const char *(*get_subsysnqn)(struct nvme_ctrl *ctrl);
+ int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
};
static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl)
@@ -177,7 +214,7 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
static inline unsigned nvme_map_len(struct request *rq)
{
- if (rq->cmd_flags & REQ_DISCARD)
+ if (req_op(rq) == REQ_OP_DISCARD)
return sizeof(struct nvme_dsm_range);
else
return blk_rq_bytes(rq);
@@ -185,7 +222,7 @@ static inline unsigned nvme_map_len(struct request *rq)
static inline void nvme_cleanup_cmd(struct request *req)
{
- if (req->cmd_flags & REQ_DISCARD)
+ if (req_op(req) == REQ_OP_DISCARD)
kfree(req->completion_data);
}
@@ -204,9 +241,11 @@ static inline int nvme_error_status(u16 status)
static inline bool nvme_req_needs_retry(struct request *req, u16 status)
{
return !(status & NVME_SC_DNR || blk_noretry_request(req)) &&
- (jiffies - req->start_time) < req->timeout;
+ (jiffies - req->start_time) < req->timeout &&
+ req->retries < nvme_max_retries;
}
+void nvme_cancel_request(struct request *req, void *data, bool reserved);
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
enum nvme_ctrl_state new_state);
int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
@@ -230,8 +269,9 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl);
void nvme_start_queues(struct nvme_ctrl *ctrl);
void nvme_kill_queues(struct nvme_ctrl *ctrl);
+#define NVME_QID_ANY -1
struct request *nvme_alloc_request(struct request_queue *q,
- struct nvme_command *cmd, unsigned int flags);
+ struct nvme_command *cmd, unsigned int flags, int qid);
void nvme_requeue_req(struct request *req);
int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmd);
@@ -239,7 +279,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buf, unsigned bufflen);
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
struct nvme_completion *cqe, void *buffer, unsigned bufflen,
- unsigned timeout);
+ unsigned timeout, int qid, int at_head, int flags);
int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
void __user *ubuffer, unsigned bufflen, u32 *result,
unsigned timeout);
@@ -256,6 +296,8 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
dma_addr_t dma_addr, u32 *result);
int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
+void nvme_start_keep_alive(struct nvme_ctrl *ctrl);
+void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
struct sg_io_hdr;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index befac5b19..60f7eab11 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -310,6 +310,11 @@ static int nvme_init_iod(struct request *rq, unsigned size,
iod->npages = -1;
iod->nents = 0;
iod->length = size;
+
+ if (!(rq->cmd_flags & REQ_DONTPREP)) {
+ rq->retries = 0;
+ rq->cmd_flags |= REQ_DONTPREP;
+ }
return 0;
}
@@ -520,8 +525,8 @@ static int nvme_map_data(struct nvme_dev *dev, struct request *req,
goto out_unmap;
}
- cmnd->rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
- cmnd->rw.prp2 = cpu_to_le64(iod->first_dma);
+ cmnd->rw.dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+ cmnd->rw.dptr.prp2 = cpu_to_le64(iod->first_dma);
if (blk_integrity_rq(req))
cmnd->rw.metadata = cpu_to_le64(sg_dma_address(&iod->meta_sg));
return BLK_MQ_RQ_QUEUE_OK;
@@ -623,6 +628,7 @@ static void nvme_complete_rq(struct request *req)
if (unlikely(req->errors)) {
if (nvme_req_needs_retry(req, req->errors)) {
+ req->retries++;
nvme_requeue_req(req);
return;
}
@@ -901,7 +907,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
req->tag, nvmeq->qid);
abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd,
- BLK_MQ_REQ_NOWAIT);
+ BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
if (IS_ERR(abort_req)) {
atomic_inc(&dev->ctrl.abort_limit);
return BLK_EH_RESET_TIMER;
@@ -919,22 +925,6 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
return BLK_EH_RESET_TIMER;
}
-static void nvme_cancel_io(struct request *req, void *data, bool reserved)
-{
- int status;
-
- if (!blk_mq_request_started(req))
- return;
-
- dev_dbg_ratelimited(((struct nvme_dev *) data)->ctrl.device,
- "Cancelling I/O %d", req->tag);
-
- status = NVME_SC_ABORT_REQ;
- if (blk_queue_dying(req->q))
- status |= NVME_SC_DNR;
- blk_mq_complete_request(req, status);
-}
-
static void nvme_free_queue(struct nvme_queue *nvmeq)
{
dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
@@ -1399,16 +1389,8 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
if (result < 0)
return result;
- /*
- * Degraded controllers might return an error when setting the queue
- * count. We still want to be able to bring them online and offer
- * access to the admin queue, as that might be only way to fix them up.
- */
- if (result > 0) {
- dev_err(dev->ctrl.device,
- "Could not set queue count (%d)\n", result);
+ if (nr_io_queues == 0)
return 0;
- }
if (dev->cmb && NVME_CMB_SQS(dev->cmbsz)) {
result = nvme_cmb_qdepth(dev, nr_io_queues,
@@ -1536,7 +1518,7 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
cmd.delete_queue.opcode = opcode;
cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid);
- req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT);
+ req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -1561,15 +1543,10 @@ static void nvme_disable_io_queues(struct nvme_dev *dev)
reinit_completion(&dev->ioq_wait);
retry:
timeout = ADMIN_TIMEOUT;
- for (; i > 0; i--) {
- struct nvme_queue *nvmeq = dev->queues[i];
-
- if (!pass)
- nvme_suspend_queue(nvmeq);
- if (nvme_delete_queue(nvmeq, opcode))
+ for (; i > 0; i--, sent++)
+ if (nvme_delete_queue(dev->queues[i], opcode))
break;
- ++sent;
- }
+
while (sent--) {
timeout = wait_for_completion_io_timeout(&dev->ioq_wait, timeout);
if (timeout == 0)
@@ -1679,14 +1656,9 @@ static int nvme_pci_enable(struct nvme_dev *dev)
static void nvme_dev_unmap(struct nvme_dev *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
- int bars;
-
if (dev->bar)
iounmap(dev->bar);
-
- bars = pci_select_bars(pdev, IORESOURCE_MEM);
- pci_release_selected_regions(pdev, bars);
+ pci_release_mem_regions(to_pci_dev(dev->dev));
}
static void nvme_pci_disable(struct nvme_dev *dev)
@@ -1716,19 +1688,25 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
nvme_stop_queues(&dev->ctrl);
csts = readl(dev->bar + NVME_REG_CSTS);
}
+
+ for (i = dev->queue_count - 1; i > 0; i--)
+ nvme_suspend_queue(dev->queues[i]);
+
if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) {
- for (i = dev->queue_count - 1; i >= 0; i--) {
- struct nvme_queue *nvmeq = dev->queues[i];
- nvme_suspend_queue(nvmeq);
- }
+ /* A device might become IO incapable very soon during
+ * probe, before the admin queue is configured. Thus,
+ * queue_count can be 0 here.
+ */
+ if (dev->queue_count)
+ nvme_suspend_queue(dev->queues[0]);
} else {
nvme_disable_io_queues(dev);
nvme_disable_admin_queue(dev, shutdown);
}
nvme_pci_disable(dev);
- blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_io, dev);
- blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_io, dev);
+ blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl);
+ blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl);
mutex_unlock(&dev->shutdown_lock);
}
@@ -1902,6 +1880,7 @@ static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl)
}
static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
+ .name = "pcie",
.module = THIS_MODULE,
.reg_read32 = nvme_pci_reg_read32,
.reg_write32 = nvme_pci_reg_write32,
@@ -1914,13 +1893,9 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
static int nvme_dev_map(struct nvme_dev *dev)
{
- int bars;
struct pci_dev *pdev = to_pci_dev(dev->dev);
- bars = pci_select_bars(pdev, IORESOURCE_MEM);
- if (!bars)
- return -ENODEV;
- if (pci_request_selected_regions(pdev, bars, "nvme"))
+ if (pci_request_mem_regions(pdev, "nvme"))
return -ENODEV;
dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
@@ -1929,7 +1904,7 @@ static int nvme_dev_map(struct nvme_dev *dev)
return 0;
release:
- pci_release_selected_regions(pdev, bars);
+ pci_release_mem_regions(pdev);
return -ENODEV;
}
@@ -1940,7 +1915,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
node = dev_to_node(&pdev->dev);
if (node == NUMA_NO_NODE)
- set_dev_node(&pdev->dev, 0);
+ set_dev_node(&pdev->dev, first_memory_node);
dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
if (!dev)
@@ -2037,6 +2012,24 @@ static void nvme_remove(struct pci_dev *pdev)
nvme_put_ctrl(&dev->ctrl);
}
+static int nvme_pci_sriov_configure(struct pci_dev *pdev, int numvfs)
+{
+ int ret = 0;
+
+ if (numvfs == 0) {
+ if (pci_vfs_assigned(pdev)) {
+ dev_warn(&pdev->dev,
+ "Cannot disable SR-IOV VFs while assigned\n");
+ return -EPERM;
+ }
+ pci_disable_sriov(pdev);
+ return 0;
+ }
+
+ ret = pci_enable_sriov(pdev, numvfs);
+ return ret ? ret : numvfs;
+}
+
#ifdef CONFIG_PM_SLEEP
static int nvme_suspend(struct device *dev)
{
@@ -2122,6 +2115,10 @@ static const struct pci_device_id nvme_id_table[] = {
NVME_QUIRK_DISCARD_ZEROES, },
{ PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
.driver_data = NVME_QUIRK_IDENTIFY_CNS, },
+ { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */
+ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
+ { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */
+ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
{ 0, }
@@ -2137,6 +2134,7 @@ static struct pci_driver nvme_driver = {
.driver = {
.pm = &nvme_dev_pm_ops,
},
+ .sriov_configure = nvme_pci_sriov_configure,
.err_handler = &nvme_err_handler,
};
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
new file mode 100644
index 000000000..fbdb2267e
--- /dev/null
+++ b/drivers/nvme/host/rdma.c
@@ -0,0 +1,2033 @@
+/*
+ * NVMe over Fabrics RDMA host code.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/atomic.h>
+#include <linux/blk-mq.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/scatterlist.h>
+#include <linux/nvme.h>
+#include <asm/unaligned.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/rdma_cm.h>
+#include <rdma/ib_cm.h>
+#include <linux/nvme-rdma.h>
+
+#include "nvme.h"
+#include "fabrics.h"
+
+
+#define NVME_RDMA_CONNECT_TIMEOUT_MS 1000 /* 1 second */
+
+#define NVME_RDMA_MAX_SEGMENT_SIZE 0xffffff /* 24-bit SGL field */
+
+#define NVME_RDMA_MAX_SEGMENTS 256
+
+#define NVME_RDMA_MAX_INLINE_SEGMENTS 1
+
+/*
+ * We handle AEN commands ourselves and don't even let the
+ * block layer know about them.
+ */
+#define NVME_RDMA_NR_AEN_COMMANDS 1
+#define NVME_RDMA_AQ_BLKMQ_DEPTH \
+ (NVMF_AQ_DEPTH - NVME_RDMA_NR_AEN_COMMANDS)
+
+struct nvme_rdma_device {
+ struct ib_device *dev;
+ struct ib_pd *pd;
+ struct ib_mr *mr;
+ struct kref ref;
+ struct list_head entry;
+};
+
+struct nvme_rdma_qe {
+ struct ib_cqe cqe;
+ void *data;
+ u64 dma;
+};
+
+struct nvme_rdma_queue;
+struct nvme_rdma_request {
+ struct ib_mr *mr;
+ struct nvme_rdma_qe sqe;
+ struct ib_sge sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS];
+ u32 num_sge;
+ int nents;
+ bool inline_data;
+ struct ib_reg_wr reg_wr;
+ struct ib_cqe reg_cqe;
+ struct nvme_rdma_queue *queue;
+ struct sg_table sg_table;
+ struct scatterlist first_sgl[];
+};
+
+enum nvme_rdma_queue_flags {
+ NVME_RDMA_Q_CONNECTED = (1 << 0),
+ NVME_RDMA_IB_QUEUE_ALLOCATED = (1 << 1),
+ NVME_RDMA_Q_DELETING = (1 << 2),
+};
+
+struct nvme_rdma_queue {
+ struct nvme_rdma_qe *rsp_ring;
+ u8 sig_count;
+ int queue_size;
+ size_t cmnd_capsule_len;
+ struct nvme_rdma_ctrl *ctrl;
+ struct nvme_rdma_device *device;
+ struct ib_cq *ib_cq;
+ struct ib_qp *qp;
+
+ unsigned long flags;
+ struct rdma_cm_id *cm_id;
+ int cm_error;
+ struct completion cm_done;
+};
+
+struct nvme_rdma_ctrl {
+ /* read and written in the hot path */
+ spinlock_t lock;
+
+ /* read only in the hot path */
+ struct nvme_rdma_queue *queues;
+ u32 queue_count;
+
+ /* other member variables */
+ struct blk_mq_tag_set tag_set;
+ struct work_struct delete_work;
+ struct work_struct reset_work;
+ struct work_struct err_work;
+
+ struct nvme_rdma_qe async_event_sqe;
+
+ int reconnect_delay;
+ struct delayed_work reconnect_work;
+
+ struct list_head list;
+
+ struct blk_mq_tag_set admin_tag_set;
+ struct nvme_rdma_device *device;
+
+ u64 cap;
+ u32 max_fr_pages;
+
+ union {
+ struct sockaddr addr;
+ struct sockaddr_in addr_in;
+ };
+
+ struct nvme_ctrl ctrl;
+};
+
+static inline struct nvme_rdma_ctrl *to_rdma_ctrl(struct nvme_ctrl *ctrl)
+{
+ return container_of(ctrl, struct nvme_rdma_ctrl, ctrl);
+}
+
+static LIST_HEAD(device_list);
+static DEFINE_MUTEX(device_list_mutex);
+
+static LIST_HEAD(nvme_rdma_ctrl_list);
+static DEFINE_MUTEX(nvme_rdma_ctrl_mutex);
+
+static struct workqueue_struct *nvme_rdma_wq;
+
+/*
+ * Disabling this option makes small I/O goes faster, but is fundamentally
+ * unsafe. With it turned off we will have to register a global rkey that
+ * allows read and write access to all physical memory.
+ */
+static bool register_always = true;
+module_param(register_always, bool, 0444);
+MODULE_PARM_DESC(register_always,
+ "Use memory registration even for contiguous memory regions");
+
+static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *event);
+static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
+
+/* XXX: really should move to a generic header sooner or later.. */
+static inline void put_unaligned_le24(u32 val, u8 *p)
+{
+ *p++ = val;
+ *p++ = val >> 8;
+ *p++ = val >> 16;
+}
+
+static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue)
+{
+ return queue - queue->ctrl->queues;
+}
+
+static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue)
+{
+ return queue->cmnd_capsule_len - sizeof(struct nvme_command);
+}
+
+static void nvme_rdma_free_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe,
+ size_t capsule_size, enum dma_data_direction dir)
+{
+ ib_dma_unmap_single(ibdev, qe->dma, capsule_size, dir);
+ kfree(qe->data);
+}
+
+static int nvme_rdma_alloc_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe,
+ size_t capsule_size, enum dma_data_direction dir)
+{
+ qe->data = kzalloc(capsule_size, GFP_KERNEL);
+ if (!qe->data)
+ return -ENOMEM;
+
+ qe->dma = ib_dma_map_single(ibdev, qe->data, capsule_size, dir);
+ if (ib_dma_mapping_error(ibdev, qe->dma)) {
+ kfree(qe->data);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void nvme_rdma_free_ring(struct ib_device *ibdev,
+ struct nvme_rdma_qe *ring, size_t ib_queue_size,
+ size_t capsule_size, enum dma_data_direction dir)
+{
+ int i;
+
+ for (i = 0; i < ib_queue_size; i++)
+ nvme_rdma_free_qe(ibdev, &ring[i], capsule_size, dir);
+ kfree(ring);
+}
+
+static struct nvme_rdma_qe *nvme_rdma_alloc_ring(struct ib_device *ibdev,
+ size_t ib_queue_size, size_t capsule_size,
+ enum dma_data_direction dir)
+{
+ struct nvme_rdma_qe *ring;
+ int i;
+
+ ring = kcalloc(ib_queue_size, sizeof(struct nvme_rdma_qe), GFP_KERNEL);
+ if (!ring)
+ return NULL;
+
+ for (i = 0; i < ib_queue_size; i++) {
+ if (nvme_rdma_alloc_qe(ibdev, &ring[i], capsule_size, dir))
+ goto out_free_ring;
+ }
+
+ return ring;
+
+out_free_ring:
+ nvme_rdma_free_ring(ibdev, ring, i, capsule_size, dir);
+ return NULL;
+}
+
+static void nvme_rdma_qp_event(struct ib_event *event, void *context)
+{
+ pr_debug("QP event %d\n", event->event);
+}
+
+static int nvme_rdma_wait_for_cm(struct nvme_rdma_queue *queue)
+{
+ wait_for_completion_interruptible_timeout(&queue->cm_done,
+ msecs_to_jiffies(NVME_RDMA_CONNECT_TIMEOUT_MS) + 1);
+ return queue->cm_error;
+}
+
+static int nvme_rdma_create_qp(struct nvme_rdma_queue *queue, const int factor)
+{
+ struct nvme_rdma_device *dev = queue->device;
+ struct ib_qp_init_attr init_attr;
+ int ret;
+
+ memset(&init_attr, 0, sizeof(init_attr));
+ init_attr.event_handler = nvme_rdma_qp_event;
+ /* +1 for drain */
+ init_attr.cap.max_send_wr = factor * queue->queue_size + 1;
+ /* +1 for drain */
+ init_attr.cap.max_recv_wr = queue->queue_size + 1;
+ init_attr.cap.max_recv_sge = 1;
+ init_attr.cap.max_send_sge = 1 + NVME_RDMA_MAX_INLINE_SEGMENTS;
+ init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
+ init_attr.qp_type = IB_QPT_RC;
+ init_attr.send_cq = queue->ib_cq;
+ init_attr.recv_cq = queue->ib_cq;
+
+ ret = rdma_create_qp(queue->cm_id, dev->pd, &init_attr);
+
+ queue->qp = queue->cm_id->qp;
+ return ret;
+}
+
+static int nvme_rdma_reinit_request(void *data, struct request *rq)
+{
+ struct nvme_rdma_ctrl *ctrl = data;
+ struct nvme_rdma_device *dev = ctrl->device;
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ int ret = 0;
+
+ if (!req->mr->need_inval)
+ goto out;
+
+ ib_dereg_mr(req->mr);
+
+ req->mr = ib_alloc_mr(dev->pd, IB_MR_TYPE_MEM_REG,
+ ctrl->max_fr_pages);
+ if (IS_ERR(req->mr)) {
+ ret = PTR_ERR(req->mr);
+ req->mr = NULL;
+ goto out;
+ }
+
+ req->mr->need_inval = false;
+
+out:
+ return ret;
+}
+
+static void __nvme_rdma_exit_request(struct nvme_rdma_ctrl *ctrl,
+ struct request *rq, unsigned int queue_idx)
+{
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
+ struct nvme_rdma_device *dev = queue->device;
+
+ if (req->mr)
+ ib_dereg_mr(req->mr);
+
+ nvme_rdma_free_qe(dev->dev, &req->sqe, sizeof(struct nvme_command),
+ DMA_TO_DEVICE);
+}
+
+static void nvme_rdma_exit_request(void *data, struct request *rq,
+ unsigned int hctx_idx, unsigned int rq_idx)
+{
+ return __nvme_rdma_exit_request(data, rq, hctx_idx + 1);
+}
+
+static void nvme_rdma_exit_admin_request(void *data, struct request *rq,
+ unsigned int hctx_idx, unsigned int rq_idx)
+{
+ return __nvme_rdma_exit_request(data, rq, 0);
+}
+
+static int __nvme_rdma_init_request(struct nvme_rdma_ctrl *ctrl,
+ struct request *rq, unsigned int queue_idx)
+{
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
+ struct nvme_rdma_device *dev = queue->device;
+ struct ib_device *ibdev = dev->dev;
+ int ret;
+
+ BUG_ON(queue_idx >= ctrl->queue_count);
+
+ ret = nvme_rdma_alloc_qe(ibdev, &req->sqe, sizeof(struct nvme_command),
+ DMA_TO_DEVICE);
+ if (ret)
+ return ret;
+
+ req->mr = ib_alloc_mr(dev->pd, IB_MR_TYPE_MEM_REG,
+ ctrl->max_fr_pages);
+ if (IS_ERR(req->mr)) {
+ ret = PTR_ERR(req->mr);
+ goto out_free_qe;
+ }
+
+ req->queue = queue;
+
+ return 0;
+
+out_free_qe:
+ nvme_rdma_free_qe(dev->dev, &req->sqe, sizeof(struct nvme_command),
+ DMA_TO_DEVICE);
+ return -ENOMEM;
+}
+
+static int nvme_rdma_init_request(void *data, struct request *rq,
+ unsigned int hctx_idx, unsigned int rq_idx,
+ unsigned int numa_node)
+{
+ return __nvme_rdma_init_request(data, rq, hctx_idx + 1);
+}
+
+static int nvme_rdma_init_admin_request(void *data, struct request *rq,
+ unsigned int hctx_idx, unsigned int rq_idx,
+ unsigned int numa_node)
+{
+ return __nvme_rdma_init_request(data, rq, 0);
+}
+
+static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned int hctx_idx)
+{
+ struct nvme_rdma_ctrl *ctrl = data;
+ struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1];
+
+ BUG_ON(hctx_idx >= ctrl->queue_count);
+
+ hctx->driver_data = queue;
+ return 0;
+}
+
+static int nvme_rdma_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned int hctx_idx)
+{
+ struct nvme_rdma_ctrl *ctrl = data;
+ struct nvme_rdma_queue *queue = &ctrl->queues[0];
+
+ BUG_ON(hctx_idx != 0);
+
+ hctx->driver_data = queue;
+ return 0;
+}
+
+static void nvme_rdma_free_dev(struct kref *ref)
+{
+ struct nvme_rdma_device *ndev =
+ container_of(ref, struct nvme_rdma_device, ref);
+
+ mutex_lock(&device_list_mutex);
+ list_del(&ndev->entry);
+ mutex_unlock(&device_list_mutex);
+
+ if (!register_always)
+ ib_dereg_mr(ndev->mr);
+ ib_dealloc_pd(ndev->pd);
+
+ kfree(ndev);
+}
+
+static void nvme_rdma_dev_put(struct nvme_rdma_device *dev)
+{
+ kref_put(&dev->ref, nvme_rdma_free_dev);
+}
+
+static int nvme_rdma_dev_get(struct nvme_rdma_device *dev)
+{
+ return kref_get_unless_zero(&dev->ref);
+}
+
+static struct nvme_rdma_device *
+nvme_rdma_find_get_device(struct rdma_cm_id *cm_id)
+{
+ struct nvme_rdma_device *ndev;
+
+ mutex_lock(&device_list_mutex);
+ list_for_each_entry(ndev, &device_list, entry) {
+ if (ndev->dev->node_guid == cm_id->device->node_guid &&
+ nvme_rdma_dev_get(ndev))
+ goto out_unlock;
+ }
+
+ ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
+ if (!ndev)
+ goto out_err;
+
+ ndev->dev = cm_id->device;
+ kref_init(&ndev->ref);
+
+ ndev->pd = ib_alloc_pd(ndev->dev);
+ if (IS_ERR(ndev->pd))
+ goto out_free_dev;
+
+ if (!register_always) {
+ ndev->mr = ib_get_dma_mr(ndev->pd,
+ IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_REMOTE_WRITE);
+ if (IS_ERR(ndev->mr))
+ goto out_free_pd;
+ }
+
+ if (!(ndev->dev->attrs.device_cap_flags &
+ IB_DEVICE_MEM_MGT_EXTENSIONS)) {
+ dev_err(&ndev->dev->dev,
+ "Memory registrations not supported.\n");
+ goto out_free_mr;
+ }
+
+ list_add(&ndev->entry, &device_list);
+out_unlock:
+ mutex_unlock(&device_list_mutex);
+ return ndev;
+
+out_free_mr:
+ if (!register_always)
+ ib_dereg_mr(ndev->mr);
+out_free_pd:
+ ib_dealloc_pd(ndev->pd);
+out_free_dev:
+ kfree(ndev);
+out_err:
+ mutex_unlock(&device_list_mutex);
+ return NULL;
+}
+
+static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
+{
+ struct nvme_rdma_device *dev;
+ struct ib_device *ibdev;
+
+ if (!test_and_clear_bit(NVME_RDMA_IB_QUEUE_ALLOCATED, &queue->flags))
+ return;
+
+ dev = queue->device;
+ ibdev = dev->dev;
+ rdma_destroy_qp(queue->cm_id);
+ ib_free_cq(queue->ib_cq);
+
+ nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
+ sizeof(struct nvme_completion), DMA_FROM_DEVICE);
+
+ nvme_rdma_dev_put(dev);
+}
+
+static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue,
+ struct nvme_rdma_device *dev)
+{
+ struct ib_device *ibdev = dev->dev;
+ const int send_wr_factor = 3; /* MR, SEND, INV */
+ const int cq_factor = send_wr_factor + 1; /* + RECV */
+ int comp_vector, idx = nvme_rdma_queue_idx(queue);
+
+ int ret;
+
+ queue->device = dev;
+
+ /*
+ * The admin queue is barely used once the controller is live, so don't
+ * bother to spread it out.
+ */
+ if (idx == 0)
+ comp_vector = 0;
+ else
+ comp_vector = idx % ibdev->num_comp_vectors;
+
+
+ /* +1 for ib_stop_cq */
+ queue->ib_cq = ib_alloc_cq(dev->dev, queue,
+ cq_factor * queue->queue_size + 1, comp_vector,
+ IB_POLL_SOFTIRQ);
+ if (IS_ERR(queue->ib_cq)) {
+ ret = PTR_ERR(queue->ib_cq);
+ goto out;
+ }
+
+ ret = nvme_rdma_create_qp(queue, send_wr_factor);
+ if (ret)
+ goto out_destroy_ib_cq;
+
+ queue->rsp_ring = nvme_rdma_alloc_ring(ibdev, queue->queue_size,
+ sizeof(struct nvme_completion), DMA_FROM_DEVICE);
+ if (!queue->rsp_ring) {
+ ret = -ENOMEM;
+ goto out_destroy_qp;
+ }
+ set_bit(NVME_RDMA_IB_QUEUE_ALLOCATED, &queue->flags);
+
+ return 0;
+
+out_destroy_qp:
+ ib_destroy_qp(queue->qp);
+out_destroy_ib_cq:
+ ib_free_cq(queue->ib_cq);
+out:
+ return ret;
+}
+
+static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl,
+ int idx, size_t queue_size)
+{
+ struct nvme_rdma_queue *queue;
+ int ret;
+
+ queue = &ctrl->queues[idx];
+ queue->ctrl = ctrl;
+ init_completion(&queue->cm_done);
+
+ if (idx > 0)
+ queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
+ else
+ queue->cmnd_capsule_len = sizeof(struct nvme_command);
+
+ queue->queue_size = queue_size;
+
+ queue->cm_id = rdma_create_id(&init_net, nvme_rdma_cm_handler, queue,
+ RDMA_PS_TCP, IB_QPT_RC);
+ if (IS_ERR(queue->cm_id)) {
+ dev_info(ctrl->ctrl.device,
+ "failed to create CM ID: %ld\n", PTR_ERR(queue->cm_id));
+ return PTR_ERR(queue->cm_id);
+ }
+
+ queue->cm_error = -ETIMEDOUT;
+ ret = rdma_resolve_addr(queue->cm_id, NULL, &ctrl->addr,
+ NVME_RDMA_CONNECT_TIMEOUT_MS);
+ if (ret) {
+ dev_info(ctrl->ctrl.device,
+ "rdma_resolve_addr failed (%d).\n", ret);
+ goto out_destroy_cm_id;
+ }
+
+ ret = nvme_rdma_wait_for_cm(queue);
+ if (ret) {
+ dev_info(ctrl->ctrl.device,
+ "rdma_resolve_addr wait failed (%d).\n", ret);
+ goto out_destroy_cm_id;
+ }
+
+ clear_bit(NVME_RDMA_Q_DELETING, &queue->flags);
+ set_bit(NVME_RDMA_Q_CONNECTED, &queue->flags);
+
+ return 0;
+
+out_destroy_cm_id:
+ nvme_rdma_destroy_queue_ib(queue);
+ rdma_destroy_id(queue->cm_id);
+ return ret;
+}
+
+static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
+{
+ rdma_disconnect(queue->cm_id);
+ ib_drain_qp(queue->qp);
+}
+
+static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
+{
+ nvme_rdma_destroy_queue_ib(queue);
+ rdma_destroy_id(queue->cm_id);
+}
+
+static void nvme_rdma_stop_and_free_queue(struct nvme_rdma_queue *queue)
+{
+ if (test_and_set_bit(NVME_RDMA_Q_DELETING, &queue->flags))
+ return;
+ nvme_rdma_stop_queue(queue);
+ nvme_rdma_free_queue(queue);
+}
+
+static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl)
+{
+ int i;
+
+ for (i = 1; i < ctrl->queue_count; i++)
+ nvme_rdma_stop_and_free_queue(&ctrl->queues[i]);
+}
+
+static int nvme_rdma_connect_io_queues(struct nvme_rdma_ctrl *ctrl)
+{
+ int i, ret = 0;
+
+ for (i = 1; i < ctrl->queue_count; i++) {
+ ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl)
+{
+ int i, ret;
+
+ for (i = 1; i < ctrl->queue_count; i++) {
+ ret = nvme_rdma_init_queue(ctrl, i,
+ ctrl->ctrl.opts->queue_size);
+ if (ret) {
+ dev_info(ctrl->ctrl.device,
+ "failed to initialize i/o queue: %d\n", ret);
+ goto out_free_queues;
+ }
+ }
+
+ return 0;
+
+out_free_queues:
+ for (i--; i >= 1; i--)
+ nvme_rdma_stop_and_free_queue(&ctrl->queues[i]);
+
+ return ret;
+}
+
+static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl)
+{
+ nvme_rdma_free_qe(ctrl->queues[0].device->dev, &ctrl->async_event_sqe,
+ sizeof(struct nvme_command), DMA_TO_DEVICE);
+ nvme_rdma_stop_and_free_queue(&ctrl->queues[0]);
+ blk_cleanup_queue(ctrl->ctrl.admin_q);
+ blk_mq_free_tag_set(&ctrl->admin_tag_set);
+ nvme_rdma_dev_put(ctrl->device);
+}
+
+static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
+{
+ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
+
+ if (list_empty(&ctrl->list))
+ goto free_ctrl;
+
+ mutex_lock(&nvme_rdma_ctrl_mutex);
+ list_del(&ctrl->list);
+ mutex_unlock(&nvme_rdma_ctrl_mutex);
+
+ kfree(ctrl->queues);
+ nvmf_free_options(nctrl->opts);
+free_ctrl:
+ kfree(ctrl);
+}
+
+static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
+{
+ struct nvme_rdma_ctrl *ctrl = container_of(to_delayed_work(work),
+ struct nvme_rdma_ctrl, reconnect_work);
+ bool changed;
+ int ret;
+
+ if (ctrl->queue_count > 1) {
+ nvme_rdma_free_io_queues(ctrl);
+
+ ret = blk_mq_reinit_tagset(&ctrl->tag_set);
+ if (ret)
+ goto requeue;
+ }
+
+ nvme_rdma_stop_and_free_queue(&ctrl->queues[0]);
+
+ ret = blk_mq_reinit_tagset(&ctrl->admin_tag_set);
+ if (ret)
+ goto requeue;
+
+ ret = nvme_rdma_init_queue(ctrl, 0, NVMF_AQ_DEPTH);
+ if (ret)
+ goto requeue;
+
+ blk_mq_start_stopped_hw_queues(ctrl->ctrl.admin_q, true);
+
+ ret = nvmf_connect_admin_queue(&ctrl->ctrl);
+ if (ret)
+ goto stop_admin_q;
+
+ ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
+ if (ret)
+ goto stop_admin_q;
+
+ nvme_start_keep_alive(&ctrl->ctrl);
+
+ if (ctrl->queue_count > 1) {
+ ret = nvme_rdma_init_io_queues(ctrl);
+ if (ret)
+ goto stop_admin_q;
+
+ ret = nvme_rdma_connect_io_queues(ctrl);
+ if (ret)
+ goto stop_admin_q;
+ }
+
+ changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
+ WARN_ON_ONCE(!changed);
+
+ if (ctrl->queue_count > 1) {
+ nvme_start_queues(&ctrl->ctrl);
+ nvme_queue_scan(&ctrl->ctrl);
+ nvme_queue_async_events(&ctrl->ctrl);
+ }
+
+ dev_info(ctrl->ctrl.device, "Successfully reconnected\n");
+
+ return;
+
+stop_admin_q:
+ blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
+requeue:
+ /* Make sure we are not resetting/deleting */
+ if (ctrl->ctrl.state == NVME_CTRL_RECONNECTING) {
+ dev_info(ctrl->ctrl.device,
+ "Failed reconnect attempt, requeueing...\n");
+ queue_delayed_work(nvme_rdma_wq, &ctrl->reconnect_work,
+ ctrl->reconnect_delay * HZ);
+ }
+}
+
+static void nvme_rdma_error_recovery_work(struct work_struct *work)
+{
+ struct nvme_rdma_ctrl *ctrl = container_of(work,
+ struct nvme_rdma_ctrl, err_work);
+ int i;
+
+ nvme_stop_keep_alive(&ctrl->ctrl);
+
+ for (i = 0; i < ctrl->queue_count; i++)
+ clear_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[i].flags);
+
+ if (ctrl->queue_count > 1)
+ nvme_stop_queues(&ctrl->ctrl);
+ blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
+
+ /* We must take care of fastfail/requeue all our inflight requests */
+ if (ctrl->queue_count > 1)
+ blk_mq_tagset_busy_iter(&ctrl->tag_set,
+ nvme_cancel_request, &ctrl->ctrl);
+ blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
+ nvme_cancel_request, &ctrl->ctrl);
+
+ dev_info(ctrl->ctrl.device, "reconnecting in %d seconds\n",
+ ctrl->reconnect_delay);
+
+ queue_delayed_work(nvme_rdma_wq, &ctrl->reconnect_work,
+ ctrl->reconnect_delay * HZ);
+}
+
+static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
+{
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING))
+ return;
+
+ queue_work(nvme_rdma_wq, &ctrl->err_work);
+}
+
+static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc,
+ const char *op)
+{
+ struct nvme_rdma_queue *queue = cq->cq_context;
+ struct nvme_rdma_ctrl *ctrl = queue->ctrl;
+
+ if (ctrl->ctrl.state == NVME_CTRL_LIVE)
+ dev_info(ctrl->ctrl.device,
+ "%s for CQE 0x%p failed with status %s (%d)\n",
+ op, wc->wr_cqe,
+ ib_wc_status_msg(wc->status), wc->status);
+ nvme_rdma_error_recovery(ctrl);
+}
+
+static void nvme_rdma_memreg_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ if (unlikely(wc->status != IB_WC_SUCCESS))
+ nvme_rdma_wr_error(cq, wc, "MEMREG");
+}
+
+static void nvme_rdma_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ if (unlikely(wc->status != IB_WC_SUCCESS))
+ nvme_rdma_wr_error(cq, wc, "LOCAL_INV");
+}
+
+static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue,
+ struct nvme_rdma_request *req)
+{
+ struct ib_send_wr *bad_wr;
+ struct ib_send_wr wr = {
+ .opcode = IB_WR_LOCAL_INV,
+ .next = NULL,
+ .num_sge = 0,
+ .send_flags = 0,
+ .ex.invalidate_rkey = req->mr->rkey,
+ };
+
+ req->reg_cqe.done = nvme_rdma_inv_rkey_done;
+ wr.wr_cqe = &req->reg_cqe;
+
+ return ib_post_send(queue->qp, &wr, &bad_wr);
+}
+
+static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
+ struct request *rq)
+{
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_rdma_ctrl *ctrl = queue->ctrl;
+ struct nvme_rdma_device *dev = queue->device;
+ struct ib_device *ibdev = dev->dev;
+ int res;
+
+ if (!blk_rq_bytes(rq))
+ return;
+
+ if (req->mr->need_inval) {
+ res = nvme_rdma_inv_rkey(queue, req);
+ if (res < 0) {
+ dev_err(ctrl->ctrl.device,
+ "Queueing INV WR for rkey %#x failed (%d)\n",
+ req->mr->rkey, res);
+ nvme_rdma_error_recovery(queue->ctrl);
+ }
+ }
+
+ ib_dma_unmap_sg(ibdev, req->sg_table.sgl,
+ req->nents, rq_data_dir(rq) ==
+ WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+ nvme_cleanup_cmd(rq);
+ sg_free_table_chained(&req->sg_table, true);
+}
+
+static int nvme_rdma_set_sg_null(struct nvme_command *c)
+{
+ struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
+
+ sg->addr = 0;
+ put_unaligned_le24(0, sg->length);
+ put_unaligned_le32(0, sg->key);
+ sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
+ return 0;
+}
+
+static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue,
+ struct nvme_rdma_request *req, struct nvme_command *c)
+{
+ struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
+
+ req->sge[1].addr = sg_dma_address(req->sg_table.sgl);
+ req->sge[1].length = sg_dma_len(req->sg_table.sgl);
+ req->sge[1].lkey = queue->device->pd->local_dma_lkey;
+
+ sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
+ sg->length = cpu_to_le32(sg_dma_len(req->sg_table.sgl));
+ sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
+
+ req->inline_data = true;
+ req->num_sge++;
+ return 0;
+}
+
+static int nvme_rdma_map_sg_single(struct nvme_rdma_queue *queue,
+ struct nvme_rdma_request *req, struct nvme_command *c)
+{
+ struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
+
+ sg->addr = cpu_to_le64(sg_dma_address(req->sg_table.sgl));
+ put_unaligned_le24(sg_dma_len(req->sg_table.sgl), sg->length);
+ put_unaligned_le32(queue->device->mr->rkey, sg->key);
+ sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
+ return 0;
+}
+
+static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
+ struct nvme_rdma_request *req, struct nvme_command *c,
+ int count)
+{
+ struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
+ int nr;
+
+ nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, PAGE_SIZE);
+ if (nr < count) {
+ if (nr < 0)
+ return nr;
+ return -EINVAL;
+ }
+
+ ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
+
+ req->reg_cqe.done = nvme_rdma_memreg_done;
+ memset(&req->reg_wr, 0, sizeof(req->reg_wr));
+ req->reg_wr.wr.opcode = IB_WR_REG_MR;
+ req->reg_wr.wr.wr_cqe = &req->reg_cqe;
+ req->reg_wr.wr.num_sge = 0;
+ req->reg_wr.mr = req->mr;
+ req->reg_wr.key = req->mr->rkey;
+ req->reg_wr.access = IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_REMOTE_WRITE;
+
+ req->mr->need_inval = true;
+
+ sg->addr = cpu_to_le64(req->mr->iova);
+ put_unaligned_le24(req->mr->length, sg->length);
+ put_unaligned_le32(req->mr->rkey, sg->key);
+ sg->type = (NVME_KEY_SGL_FMT_DATA_DESC << 4) |
+ NVME_SGL_FMT_INVALIDATE;
+
+ return 0;
+}
+
+static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
+ struct request *rq, unsigned int map_len,
+ struct nvme_command *c)
+{
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_rdma_device *dev = queue->device;
+ struct ib_device *ibdev = dev->dev;
+ int nents, count;
+ int ret;
+
+ req->num_sge = 1;
+ req->inline_data = false;
+ req->mr->need_inval = false;
+
+ c->common.flags |= NVME_CMD_SGL_METABUF;
+
+ if (!blk_rq_bytes(rq))
+ return nvme_rdma_set_sg_null(c);
+
+ req->sg_table.sgl = req->first_sgl;
+ ret = sg_alloc_table_chained(&req->sg_table, rq->nr_phys_segments,
+ req->sg_table.sgl);
+ if (ret)
+ return -ENOMEM;
+
+ nents = blk_rq_map_sg(rq->q, rq, req->sg_table.sgl);
+ BUG_ON(nents > rq->nr_phys_segments);
+ req->nents = nents;
+
+ count = ib_dma_map_sg(ibdev, req->sg_table.sgl, nents,
+ rq_data_dir(rq) == WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ if (unlikely(count <= 0)) {
+ sg_free_table_chained(&req->sg_table, true);
+ return -EIO;
+ }
+
+ if (count == 1) {
+ if (rq_data_dir(rq) == WRITE &&
+ map_len <= nvme_rdma_inline_data_size(queue) &&
+ nvme_rdma_queue_idx(queue))
+ return nvme_rdma_map_sg_inline(queue, req, c);
+
+ if (!register_always)
+ return nvme_rdma_map_sg_single(queue, req, c);
+ }
+
+ return nvme_rdma_map_sg_fr(queue, req, c, count);
+}
+
+static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ if (unlikely(wc->status != IB_WC_SUCCESS))
+ nvme_rdma_wr_error(cq, wc, "SEND");
+}
+
+static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
+ struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge,
+ struct ib_send_wr *first, bool flush)
+{
+ struct ib_send_wr wr, *bad_wr;
+ int ret;
+
+ sge->addr = qe->dma;
+ sge->length = sizeof(struct nvme_command),
+ sge->lkey = queue->device->pd->local_dma_lkey;
+
+ qe->cqe.done = nvme_rdma_send_done;
+
+ wr.next = NULL;
+ wr.wr_cqe = &qe->cqe;
+ wr.sg_list = sge;
+ wr.num_sge = num_sge;
+ wr.opcode = IB_WR_SEND;
+ wr.send_flags = 0;
+
+ /*
+ * Unsignalled send completions are another giant desaster in the
+ * IB Verbs spec: If we don't regularly post signalled sends
+ * the send queue will fill up and only a QP reset will rescue us.
+ * Would have been way to obvious to handle this in hardware or
+ * at least the RDMA stack..
+ *
+ * This messy and racy code sniplet is copy and pasted from the iSER
+ * initiator, and the magic '32' comes from there as well.
+ *
+ * Always signal the flushes. The magic request used for the flush
+ * sequencer is not allocated in our driver's tagset and it's
+ * triggered to be freed by blk_cleanup_queue(). So we need to
+ * always mark it as signaled to ensure that the "wr_cqe", which is
+ * embeded in request's payload, is not freed when __ib_process_cq()
+ * calls wr_cqe->done().
+ */
+ if ((++queue->sig_count % 32) == 0 || flush)
+ wr.send_flags |= IB_SEND_SIGNALED;
+
+ if (first)
+ first->next = &wr;
+ else
+ first = &wr;
+
+ ret = ib_post_send(queue->qp, first, &bad_wr);
+ if (ret) {
+ dev_err(queue->ctrl->ctrl.device,
+ "%s failed with error code %d\n", __func__, ret);
+ }
+ return ret;
+}
+
+static int nvme_rdma_post_recv(struct nvme_rdma_queue *queue,
+ struct nvme_rdma_qe *qe)
+{
+ struct ib_recv_wr wr, *bad_wr;
+ struct ib_sge list;
+ int ret;
+
+ list.addr = qe->dma;
+ list.length = sizeof(struct nvme_completion);
+ list.lkey = queue->device->pd->local_dma_lkey;
+
+ qe->cqe.done = nvme_rdma_recv_done;
+
+ wr.next = NULL;
+ wr.wr_cqe = &qe->cqe;
+ wr.sg_list = &list;
+ wr.num_sge = 1;
+
+ ret = ib_post_recv(queue->qp, &wr, &bad_wr);
+ if (ret) {
+ dev_err(queue->ctrl->ctrl.device,
+ "%s failed with error code %d\n", __func__, ret);
+ }
+ return ret;
+}
+
+static struct blk_mq_tags *nvme_rdma_tagset(struct nvme_rdma_queue *queue)
+{
+ u32 queue_idx = nvme_rdma_queue_idx(queue);
+
+ if (queue_idx == 0)
+ return queue->ctrl->admin_tag_set.tags[queue_idx];
+ return queue->ctrl->tag_set.tags[queue_idx - 1];
+}
+
+static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
+{
+ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(arg);
+ struct nvme_rdma_queue *queue = &ctrl->queues[0];
+ struct ib_device *dev = queue->device->dev;
+ struct nvme_rdma_qe *sqe = &ctrl->async_event_sqe;
+ struct nvme_command *cmd = sqe->data;
+ struct ib_sge sge;
+ int ret;
+
+ if (WARN_ON_ONCE(aer_idx != 0))
+ return;
+
+ ib_dma_sync_single_for_cpu(dev, sqe->dma, sizeof(*cmd), DMA_TO_DEVICE);
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->common.opcode = nvme_admin_async_event;
+ cmd->common.command_id = NVME_RDMA_AQ_BLKMQ_DEPTH;
+ cmd->common.flags |= NVME_CMD_SGL_METABUF;
+ nvme_rdma_set_sg_null(cmd);
+
+ ib_dma_sync_single_for_device(dev, sqe->dma, sizeof(*cmd),
+ DMA_TO_DEVICE);
+
+ ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL, false);
+ WARN_ON_ONCE(ret);
+}
+
+static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
+ struct nvme_completion *cqe, struct ib_wc *wc, int tag)
+{
+ u16 status = le16_to_cpu(cqe->status);
+ struct request *rq;
+ struct nvme_rdma_request *req;
+ int ret = 0;
+
+ status >>= 1;
+
+ rq = blk_mq_tag_to_rq(nvme_rdma_tagset(queue), cqe->command_id);
+ if (!rq) {
+ dev_err(queue->ctrl->ctrl.device,
+ "tag 0x%x on QP %#x not found\n",
+ cqe->command_id, queue->qp->qp_num);
+ nvme_rdma_error_recovery(queue->ctrl);
+ return ret;
+ }
+ req = blk_mq_rq_to_pdu(rq);
+
+ if (rq->cmd_type == REQ_TYPE_DRV_PRIV && rq->special)
+ memcpy(rq->special, cqe, sizeof(*cqe));
+
+ if (rq->tag == tag)
+ ret = 1;
+
+ if ((wc->wc_flags & IB_WC_WITH_INVALIDATE) &&
+ wc->ex.invalidate_rkey == req->mr->rkey)
+ req->mr->need_inval = false;
+
+ blk_mq_complete_request(rq, status);
+
+ return ret;
+}
+
+static int __nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc, int tag)
+{
+ struct nvme_rdma_qe *qe =
+ container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe);
+ struct nvme_rdma_queue *queue = cq->cq_context;
+ struct ib_device *ibdev = queue->device->dev;
+ struct nvme_completion *cqe = qe->data;
+ const size_t len = sizeof(struct nvme_completion);
+ int ret = 0;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ nvme_rdma_wr_error(cq, wc, "RECV");
+ return 0;
+ }
+
+ ib_dma_sync_single_for_cpu(ibdev, qe->dma, len, DMA_FROM_DEVICE);
+ /*
+ * AEN requests are special as they don't time out and can
+ * survive any kind of queue freeze and often don't respond to
+ * aborts. We don't even bother to allocate a struct request
+ * for them but rather special case them here.
+ */
+ if (unlikely(nvme_rdma_queue_idx(queue) == 0 &&
+ cqe->command_id >= NVME_RDMA_AQ_BLKMQ_DEPTH))
+ nvme_complete_async_event(&queue->ctrl->ctrl, cqe);
+ else
+ ret = nvme_rdma_process_nvme_rsp(queue, cqe, wc, tag);
+ ib_dma_sync_single_for_device(ibdev, qe->dma, len, DMA_FROM_DEVICE);
+
+ nvme_rdma_post_recv(queue, qe);
+ return ret;
+}
+
+static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ __nvme_rdma_recv_done(cq, wc, -1);
+}
+
+static int nvme_rdma_conn_established(struct nvme_rdma_queue *queue)
+{
+ int ret, i;
+
+ for (i = 0; i < queue->queue_size; i++) {
+ ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]);
+ if (ret)
+ goto out_destroy_queue_ib;
+ }
+
+ return 0;
+
+out_destroy_queue_ib:
+ nvme_rdma_destroy_queue_ib(queue);
+ return ret;
+}
+
+static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue,
+ struct rdma_cm_event *ev)
+{
+ if (ev->param.conn.private_data_len) {
+ struct nvme_rdma_cm_rej *rej =
+ (struct nvme_rdma_cm_rej *)ev->param.conn.private_data;
+
+ dev_err(queue->ctrl->ctrl.device,
+ "Connect rejected, status %d.", le16_to_cpu(rej->sts));
+ /* XXX: Think of something clever to do here... */
+ } else {
+ dev_err(queue->ctrl->ctrl.device,
+ "Connect rejected, no private data.\n");
+ }
+
+ return -ECONNRESET;
+}
+
+static int nvme_rdma_addr_resolved(struct nvme_rdma_queue *queue)
+{
+ struct nvme_rdma_device *dev;
+ int ret;
+
+ dev = nvme_rdma_find_get_device(queue->cm_id);
+ if (!dev) {
+ dev_err(queue->cm_id->device->dma_device,
+ "no client data found!\n");
+ return -ECONNREFUSED;
+ }
+
+ ret = nvme_rdma_create_queue_ib(queue, dev);
+ if (ret) {
+ nvme_rdma_dev_put(dev);
+ goto out;
+ }
+
+ ret = rdma_resolve_route(queue->cm_id, NVME_RDMA_CONNECT_TIMEOUT_MS);
+ if (ret) {
+ dev_err(queue->ctrl->ctrl.device,
+ "rdma_resolve_route failed (%d).\n",
+ queue->cm_error);
+ goto out_destroy_queue;
+ }
+
+ return 0;
+
+out_destroy_queue:
+ nvme_rdma_destroy_queue_ib(queue);
+out:
+ return ret;
+}
+
+static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
+{
+ struct nvme_rdma_ctrl *ctrl = queue->ctrl;
+ struct rdma_conn_param param = { };
+ struct nvme_rdma_cm_req priv = { };
+ int ret;
+
+ param.qp_num = queue->qp->qp_num;
+ param.flow_control = 1;
+
+ param.responder_resources = queue->device->dev->attrs.max_qp_rd_atom;
+ /* maximum retry count */
+ param.retry_count = 7;
+ param.rnr_retry_count = 7;
+ param.private_data = &priv;
+ param.private_data_len = sizeof(priv);
+
+ priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
+ priv.qid = cpu_to_le16(nvme_rdma_queue_idx(queue));
+ /*
+ * set the admin queue depth to the minimum size
+ * specified by the Fabrics standard.
+ */
+ if (priv.qid == 0) {
+ priv.hrqsize = cpu_to_le16(NVMF_AQ_DEPTH);
+ priv.hsqsize = cpu_to_le16(NVMF_AQ_DEPTH - 1);
+ } else {
+ /*
+ * current interpretation of the fabrics spec
+ * is at minimum you make hrqsize sqsize+1, or a
+ * 1's based representation of sqsize.
+ */
+ priv.hrqsize = cpu_to_le16(queue->queue_size);
+ priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize);
+ }
+
+ ret = rdma_connect(queue->cm_id, &param);
+ if (ret) {
+ dev_err(ctrl->ctrl.device,
+ "rdma_connect failed (%d).\n", ret);
+ goto out_destroy_queue_ib;
+ }
+
+ return 0;
+
+out_destroy_queue_ib:
+ nvme_rdma_destroy_queue_ib(queue);
+ return ret;
+}
+
+static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *ev)
+{
+ struct nvme_rdma_queue *queue = cm_id->context;
+ int cm_error = 0;
+
+ dev_dbg(queue->ctrl->ctrl.device, "%s (%d): status %d id %p\n",
+ rdma_event_msg(ev->event), ev->event,
+ ev->status, cm_id);
+
+ switch (ev->event) {
+ case RDMA_CM_EVENT_ADDR_RESOLVED:
+ cm_error = nvme_rdma_addr_resolved(queue);
+ break;
+ case RDMA_CM_EVENT_ROUTE_RESOLVED:
+ cm_error = nvme_rdma_route_resolved(queue);
+ break;
+ case RDMA_CM_EVENT_ESTABLISHED:
+ queue->cm_error = nvme_rdma_conn_established(queue);
+ /* complete cm_done regardless of success/failure */
+ complete(&queue->cm_done);
+ return 0;
+ case RDMA_CM_EVENT_REJECTED:
+ cm_error = nvme_rdma_conn_rejected(queue, ev);
+ break;
+ case RDMA_CM_EVENT_ADDR_ERROR:
+ case RDMA_CM_EVENT_ROUTE_ERROR:
+ case RDMA_CM_EVENT_CONNECT_ERROR:
+ case RDMA_CM_EVENT_UNREACHABLE:
+ dev_dbg(queue->ctrl->ctrl.device,
+ "CM error event %d\n", ev->event);
+ cm_error = -ECONNRESET;
+ break;
+ case RDMA_CM_EVENT_DISCONNECTED:
+ case RDMA_CM_EVENT_ADDR_CHANGE:
+ case RDMA_CM_EVENT_TIMEWAIT_EXIT:
+ dev_dbg(queue->ctrl->ctrl.device,
+ "disconnect received - connection closed\n");
+ nvme_rdma_error_recovery(queue->ctrl);
+ break;
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+ /* device removal is handled via the ib_client API */
+ break;
+ default:
+ dev_err(queue->ctrl->ctrl.device,
+ "Unexpected RDMA CM event (%d)\n", ev->event);
+ nvme_rdma_error_recovery(queue->ctrl);
+ break;
+ }
+
+ if (cm_error) {
+ queue->cm_error = cm_error;
+ complete(&queue->cm_done);
+ }
+
+ return 0;
+}
+
+static enum blk_eh_timer_return
+nvme_rdma_timeout(struct request *rq, bool reserved)
+{
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+
+ /* queue error recovery */
+ nvme_rdma_error_recovery(req->queue->ctrl);
+
+ /* fail with DNR on cmd timeout */
+ rq->errors = NVME_SC_ABORT_REQ | NVME_SC_DNR;
+
+ return BLK_EH_HANDLED;
+}
+
+static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct nvme_ns *ns = hctx->queue->queuedata;
+ struct nvme_rdma_queue *queue = hctx->driver_data;
+ struct request *rq = bd->rq;
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_rdma_qe *sqe = &req->sqe;
+ struct nvme_command *c = sqe->data;
+ bool flush = false;
+ struct ib_device *dev;
+ unsigned int map_len;
+ int ret;
+
+ WARN_ON_ONCE(rq->tag < 0);
+
+ dev = queue->device->dev;
+ ib_dma_sync_single_for_cpu(dev, sqe->dma,
+ sizeof(struct nvme_command), DMA_TO_DEVICE);
+
+ ret = nvme_setup_cmd(ns, rq, c);
+ if (ret)
+ return ret;
+
+ c->common.command_id = rq->tag;
+ blk_mq_start_request(rq);
+
+ map_len = nvme_map_len(rq);
+ ret = nvme_rdma_map_data(queue, rq, map_len, c);
+ if (ret < 0) {
+ dev_err(queue->ctrl->ctrl.device,
+ "Failed to map data (%d)\n", ret);
+ nvme_cleanup_cmd(rq);
+ goto err;
+ }
+
+ ib_dma_sync_single_for_device(dev, sqe->dma,
+ sizeof(struct nvme_command), DMA_TO_DEVICE);
+
+ if (rq->cmd_type == REQ_TYPE_FS && req_op(rq) == REQ_OP_FLUSH)
+ flush = true;
+ ret = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
+ req->mr->need_inval ? &req->reg_wr.wr : NULL, flush);
+ if (ret) {
+ nvme_rdma_unmap_data(queue, rq);
+ goto err;
+ }
+
+ return BLK_MQ_RQ_QUEUE_OK;
+err:
+ return (ret == -ENOMEM || ret == -EAGAIN) ?
+ BLK_MQ_RQ_QUEUE_BUSY : BLK_MQ_RQ_QUEUE_ERROR;
+}
+
+static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
+{
+ struct nvme_rdma_queue *queue = hctx->driver_data;
+ struct ib_cq *cq = queue->ib_cq;
+ struct ib_wc wc;
+ int found = 0;
+
+ ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
+ while (ib_poll_cq(cq, 1, &wc) > 0) {
+ struct ib_cqe *cqe = wc.wr_cqe;
+
+ if (cqe) {
+ if (cqe->done == nvme_rdma_recv_done)
+ found |= __nvme_rdma_recv_done(cq, &wc, tag);
+ else
+ cqe->done(cq, &wc);
+ }
+ }
+
+ return found;
+}
+
+static void nvme_rdma_complete_rq(struct request *rq)
+{
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_rdma_queue *queue = req->queue;
+ int error = 0;
+
+ nvme_rdma_unmap_data(queue, rq);
+
+ if (unlikely(rq->errors)) {
+ if (nvme_req_needs_retry(rq, rq->errors)) {
+ nvme_requeue_req(rq);
+ return;
+ }
+
+ if (rq->cmd_type == REQ_TYPE_DRV_PRIV)
+ error = rq->errors;
+ else
+ error = nvme_error_status(rq->errors);
+ }
+
+ blk_mq_end_request(rq, error);
+}
+
+static struct blk_mq_ops nvme_rdma_mq_ops = {
+ .queue_rq = nvme_rdma_queue_rq,
+ .complete = nvme_rdma_complete_rq,
+ .map_queue = blk_mq_map_queue,
+ .init_request = nvme_rdma_init_request,
+ .exit_request = nvme_rdma_exit_request,
+ .reinit_request = nvme_rdma_reinit_request,
+ .init_hctx = nvme_rdma_init_hctx,
+ .poll = nvme_rdma_poll,
+ .timeout = nvme_rdma_timeout,
+};
+
+static struct blk_mq_ops nvme_rdma_admin_mq_ops = {
+ .queue_rq = nvme_rdma_queue_rq,
+ .complete = nvme_rdma_complete_rq,
+ .map_queue = blk_mq_map_queue,
+ .init_request = nvme_rdma_init_admin_request,
+ .exit_request = nvme_rdma_exit_admin_request,
+ .reinit_request = nvme_rdma_reinit_request,
+ .init_hctx = nvme_rdma_init_admin_hctx,
+ .timeout = nvme_rdma_timeout,
+};
+
+static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl)
+{
+ int error;
+
+ error = nvme_rdma_init_queue(ctrl, 0, NVMF_AQ_DEPTH);
+ if (error)
+ return error;
+
+ ctrl->device = ctrl->queues[0].device;
+
+ /*
+ * We need a reference on the device as long as the tag_set is alive,
+ * as the MRs in the request structures need a valid ib_device.
+ */
+ error = -EINVAL;
+ if (!nvme_rdma_dev_get(ctrl->device))
+ goto out_free_queue;
+
+ ctrl->max_fr_pages = min_t(u32, NVME_RDMA_MAX_SEGMENTS,
+ ctrl->device->dev->attrs.max_fast_reg_page_list_len);
+
+ memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
+ ctrl->admin_tag_set.ops = &nvme_rdma_admin_mq_ops;
+ ctrl->admin_tag_set.queue_depth = NVME_RDMA_AQ_BLKMQ_DEPTH;
+ ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
+ ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
+ ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_rdma_request) +
+ SG_CHUNK_SIZE * sizeof(struct scatterlist);
+ ctrl->admin_tag_set.driver_data = ctrl;
+ ctrl->admin_tag_set.nr_hw_queues = 1;
+ ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
+
+ error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
+ if (error)
+ goto out_put_dev;
+
+ ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
+ if (IS_ERR(ctrl->ctrl.admin_q)) {
+ error = PTR_ERR(ctrl->ctrl.admin_q);
+ goto out_free_tagset;
+ }
+
+ error = nvmf_connect_admin_queue(&ctrl->ctrl);
+ if (error)
+ goto out_cleanup_queue;
+
+ error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap);
+ if (error) {
+ dev_err(ctrl->ctrl.device,
+ "prop_get NVME_REG_CAP failed\n");
+ goto out_cleanup_queue;
+ }
+
+ ctrl->ctrl.sqsize =
+ min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
+
+ error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
+ if (error)
+ goto out_cleanup_queue;
+
+ ctrl->ctrl.max_hw_sectors =
+ (ctrl->max_fr_pages - 1) << (PAGE_SHIFT - 9);
+
+ error = nvme_init_identify(&ctrl->ctrl);
+ if (error)
+ goto out_cleanup_queue;
+
+ error = nvme_rdma_alloc_qe(ctrl->queues[0].device->dev,
+ &ctrl->async_event_sqe, sizeof(struct nvme_command),
+ DMA_TO_DEVICE);
+ if (error)
+ goto out_cleanup_queue;
+
+ nvme_start_keep_alive(&ctrl->ctrl);
+
+ return 0;
+
+out_cleanup_queue:
+ blk_cleanup_queue(ctrl->ctrl.admin_q);
+out_free_tagset:
+ /* disconnect and drain the queue before freeing the tagset */
+ nvme_rdma_stop_queue(&ctrl->queues[0]);
+ blk_mq_free_tag_set(&ctrl->admin_tag_set);
+out_put_dev:
+ nvme_rdma_dev_put(ctrl->device);
+out_free_queue:
+ nvme_rdma_free_queue(&ctrl->queues[0]);
+ return error;
+}
+
+static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl)
+{
+ nvme_stop_keep_alive(&ctrl->ctrl);
+ cancel_work_sync(&ctrl->err_work);
+ cancel_delayed_work_sync(&ctrl->reconnect_work);
+
+ if (ctrl->queue_count > 1) {
+ nvme_stop_queues(&ctrl->ctrl);
+ blk_mq_tagset_busy_iter(&ctrl->tag_set,
+ nvme_cancel_request, &ctrl->ctrl);
+ nvme_rdma_free_io_queues(ctrl);
+ }
+
+ if (test_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[0].flags))
+ nvme_shutdown_ctrl(&ctrl->ctrl);
+
+ blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
+ blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
+ nvme_cancel_request, &ctrl->ctrl);
+ nvme_rdma_destroy_admin_queue(ctrl);
+}
+
+static void __nvme_rdma_remove_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
+{
+ nvme_uninit_ctrl(&ctrl->ctrl);
+ if (shutdown)
+ nvme_rdma_shutdown_ctrl(ctrl);
+
+ if (ctrl->ctrl.tagset) {
+ blk_cleanup_queue(ctrl->ctrl.connect_q);
+ blk_mq_free_tag_set(&ctrl->tag_set);
+ nvme_rdma_dev_put(ctrl->device);
+ }
+
+ nvme_put_ctrl(&ctrl->ctrl);
+}
+
+static void nvme_rdma_del_ctrl_work(struct work_struct *work)
+{
+ struct nvme_rdma_ctrl *ctrl = container_of(work,
+ struct nvme_rdma_ctrl, delete_work);
+
+ __nvme_rdma_remove_ctrl(ctrl, true);
+}
+
+static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl)
+{
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
+ return -EBUSY;
+
+ if (!queue_work(nvme_rdma_wq, &ctrl->delete_work))
+ return -EBUSY;
+
+ return 0;
+}
+
+static int nvme_rdma_del_ctrl(struct nvme_ctrl *nctrl)
+{
+ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
+ int ret = 0;
+
+ /*
+ * Keep a reference until all work is flushed since
+ * __nvme_rdma_del_ctrl can free the ctrl mem
+ */
+ if (!kref_get_unless_zero(&ctrl->ctrl.kref))
+ return -EBUSY;
+ ret = __nvme_rdma_del_ctrl(ctrl);
+ if (!ret)
+ flush_work(&ctrl->delete_work);
+ nvme_put_ctrl(&ctrl->ctrl);
+ return ret;
+}
+
+static void nvme_rdma_remove_ctrl_work(struct work_struct *work)
+{
+ struct nvme_rdma_ctrl *ctrl = container_of(work,
+ struct nvme_rdma_ctrl, delete_work);
+
+ __nvme_rdma_remove_ctrl(ctrl, false);
+}
+
+static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
+{
+ struct nvme_rdma_ctrl *ctrl = container_of(work,
+ struct nvme_rdma_ctrl, reset_work);
+ int ret;
+ bool changed;
+
+ nvme_rdma_shutdown_ctrl(ctrl);
+
+ ret = nvme_rdma_configure_admin_queue(ctrl);
+ if (ret) {
+ /* ctrl is already shutdown, just remove the ctrl */
+ INIT_WORK(&ctrl->delete_work, nvme_rdma_remove_ctrl_work);
+ goto del_dead_ctrl;
+ }
+
+ if (ctrl->queue_count > 1) {
+ ret = blk_mq_reinit_tagset(&ctrl->tag_set);
+ if (ret)
+ goto del_dead_ctrl;
+
+ ret = nvme_rdma_init_io_queues(ctrl);
+ if (ret)
+ goto del_dead_ctrl;
+
+ ret = nvme_rdma_connect_io_queues(ctrl);
+ if (ret)
+ goto del_dead_ctrl;
+ }
+
+ changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
+ WARN_ON_ONCE(!changed);
+
+ if (ctrl->queue_count > 1) {
+ nvme_start_queues(&ctrl->ctrl);
+ nvme_queue_scan(&ctrl->ctrl);
+ nvme_queue_async_events(&ctrl->ctrl);
+ }
+
+ return;
+
+del_dead_ctrl:
+ /* Deleting this dead controller... */
+ dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
+ WARN_ON(!queue_work(nvme_rdma_wq, &ctrl->delete_work));
+}
+
+static int nvme_rdma_reset_ctrl(struct nvme_ctrl *nctrl)
+{
+ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
+
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
+ return -EBUSY;
+
+ if (!queue_work(nvme_rdma_wq, &ctrl->reset_work))
+ return -EBUSY;
+
+ flush_work(&ctrl->reset_work);
+
+ return 0;
+}
+
+static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
+ .name = "rdma",
+ .module = THIS_MODULE,
+ .is_fabrics = true,
+ .reg_read32 = nvmf_reg_read32,
+ .reg_read64 = nvmf_reg_read64,
+ .reg_write32 = nvmf_reg_write32,
+ .reset_ctrl = nvme_rdma_reset_ctrl,
+ .free_ctrl = nvme_rdma_free_ctrl,
+ .submit_async_event = nvme_rdma_submit_async_event,
+ .delete_ctrl = nvme_rdma_del_ctrl,
+ .get_subsysnqn = nvmf_get_subsysnqn,
+ .get_address = nvmf_get_address,
+};
+
+static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl)
+{
+ struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
+ int ret;
+
+ ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
+ if (ret)
+ return ret;
+
+ ctrl->queue_count = opts->nr_io_queues + 1;
+ if (ctrl->queue_count < 2)
+ return 0;
+
+ dev_info(ctrl->ctrl.device,
+ "creating %d I/O queues.\n", opts->nr_io_queues);
+
+ ret = nvme_rdma_init_io_queues(ctrl);
+ if (ret)
+ return ret;
+
+ /*
+ * We need a reference on the device as long as the tag_set is alive,
+ * as the MRs in the request structures need a valid ib_device.
+ */
+ ret = -EINVAL;
+ if (!nvme_rdma_dev_get(ctrl->device))
+ goto out_free_io_queues;
+
+ memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
+ ctrl->tag_set.ops = &nvme_rdma_mq_ops;
+ ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
+ ctrl->tag_set.reserved_tags = 1; /* fabric connect */
+ ctrl->tag_set.numa_node = NUMA_NO_NODE;
+ ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+ ctrl->tag_set.cmd_size = sizeof(struct nvme_rdma_request) +
+ SG_CHUNK_SIZE * sizeof(struct scatterlist);
+ ctrl->tag_set.driver_data = ctrl;
+ ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1;
+ ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
+
+ ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
+ if (ret)
+ goto out_put_dev;
+ ctrl->ctrl.tagset = &ctrl->tag_set;
+
+ ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
+ if (IS_ERR(ctrl->ctrl.connect_q)) {
+ ret = PTR_ERR(ctrl->ctrl.connect_q);
+ goto out_free_tag_set;
+ }
+
+ ret = nvme_rdma_connect_io_queues(ctrl);
+ if (ret)
+ goto out_cleanup_connect_q;
+
+ return 0;
+
+out_cleanup_connect_q:
+ blk_cleanup_queue(ctrl->ctrl.connect_q);
+out_free_tag_set:
+ blk_mq_free_tag_set(&ctrl->tag_set);
+out_put_dev:
+ nvme_rdma_dev_put(ctrl->device);
+out_free_io_queues:
+ nvme_rdma_free_io_queues(ctrl);
+ return ret;
+}
+
+static int nvme_rdma_parse_ipaddr(struct sockaddr_in *in_addr, char *p)
+{
+ u8 *addr = (u8 *)&in_addr->sin_addr.s_addr;
+ size_t buflen = strlen(p);
+
+ /* XXX: handle IPv6 addresses */
+
+ if (buflen > INET_ADDRSTRLEN)
+ return -EINVAL;
+ if (in4_pton(p, buflen, addr, '\0', NULL) == 0)
+ return -EINVAL;
+ in_addr->sin_family = AF_INET;
+ return 0;
+}
+
+static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
+ struct nvmf_ctrl_options *opts)
+{
+ struct nvme_rdma_ctrl *ctrl;
+ int ret;
+ bool changed;
+
+ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return ERR_PTR(-ENOMEM);
+ ctrl->ctrl.opts = opts;
+ INIT_LIST_HEAD(&ctrl->list);
+
+ ret = nvme_rdma_parse_ipaddr(&ctrl->addr_in, opts->traddr);
+ if (ret) {
+ pr_err("malformed IP address passed: %s\n", opts->traddr);
+ goto out_free_ctrl;
+ }
+
+ if (opts->mask & NVMF_OPT_TRSVCID) {
+ u16 port;
+
+ ret = kstrtou16(opts->trsvcid, 0, &port);
+ if (ret)
+ goto out_free_ctrl;
+
+ ctrl->addr_in.sin_port = cpu_to_be16(port);
+ } else {
+ ctrl->addr_in.sin_port = cpu_to_be16(NVME_RDMA_IP_PORT);
+ }
+
+ ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops,
+ 0 /* no quirks, we're perfect! */);
+ if (ret)
+ goto out_free_ctrl;
+
+ ctrl->reconnect_delay = opts->reconnect_delay;
+ INIT_DELAYED_WORK(&ctrl->reconnect_work,
+ nvme_rdma_reconnect_ctrl_work);
+ INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work);
+ INIT_WORK(&ctrl->delete_work, nvme_rdma_del_ctrl_work);
+ INIT_WORK(&ctrl->reset_work, nvme_rdma_reset_ctrl_work);
+ spin_lock_init(&ctrl->lock);
+
+ ctrl->queue_count = opts->nr_io_queues + 1; /* +1 for admin queue */
+ ctrl->ctrl.sqsize = opts->queue_size - 1;
+ ctrl->ctrl.kato = opts->kato;
+
+ ret = -ENOMEM;
+ ctrl->queues = kcalloc(ctrl->queue_count, sizeof(*ctrl->queues),
+ GFP_KERNEL);
+ if (!ctrl->queues)
+ goto out_uninit_ctrl;
+
+ ret = nvme_rdma_configure_admin_queue(ctrl);
+ if (ret)
+ goto out_kfree_queues;
+
+ /* sanity check icdoff */
+ if (ctrl->ctrl.icdoff) {
+ dev_err(ctrl->ctrl.device, "icdoff is not supported!\n");
+ goto out_remove_admin_queue;
+ }
+
+ /* sanity check keyed sgls */
+ if (!(ctrl->ctrl.sgls & (1 << 20))) {
+ dev_err(ctrl->ctrl.device, "Mandatory keyed sgls are not support\n");
+ goto out_remove_admin_queue;
+ }
+
+ if (opts->queue_size > ctrl->ctrl.maxcmd) {
+ /* warn if maxcmd is lower than queue_size */
+ dev_warn(ctrl->ctrl.device,
+ "queue_size %zu > ctrl maxcmd %u, clamping down\n",
+ opts->queue_size, ctrl->ctrl.maxcmd);
+ opts->queue_size = ctrl->ctrl.maxcmd;
+ }
+
+ if (opts->nr_io_queues) {
+ ret = nvme_rdma_create_io_queues(ctrl);
+ if (ret)
+ goto out_remove_admin_queue;
+ }
+
+ changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
+ WARN_ON_ONCE(!changed);
+
+ dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
+ ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
+
+ kref_get(&ctrl->ctrl.kref);
+
+ mutex_lock(&nvme_rdma_ctrl_mutex);
+ list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list);
+ mutex_unlock(&nvme_rdma_ctrl_mutex);
+
+ if (opts->nr_io_queues) {
+ nvme_queue_scan(&ctrl->ctrl);
+ nvme_queue_async_events(&ctrl->ctrl);
+ }
+
+ return &ctrl->ctrl;
+
+out_remove_admin_queue:
+ nvme_stop_keep_alive(&ctrl->ctrl);
+ nvme_rdma_destroy_admin_queue(ctrl);
+out_kfree_queues:
+ kfree(ctrl->queues);
+out_uninit_ctrl:
+ nvme_uninit_ctrl(&ctrl->ctrl);
+ nvme_put_ctrl(&ctrl->ctrl);
+ if (ret > 0)
+ ret = -EIO;
+ return ERR_PTR(ret);
+out_free_ctrl:
+ kfree(ctrl);
+ return ERR_PTR(ret);
+}
+
+static struct nvmf_transport_ops nvme_rdma_transport = {
+ .name = "rdma",
+ .required_opts = NVMF_OPT_TRADDR,
+ .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY,
+ .create_ctrl = nvme_rdma_create_ctrl,
+};
+
+static void nvme_rdma_add_one(struct ib_device *ib_device)
+{
+}
+
+static void nvme_rdma_remove_one(struct ib_device *ib_device, void *client_data)
+{
+ struct nvme_rdma_ctrl *ctrl;
+
+ /* Delete all controllers using this device */
+ mutex_lock(&nvme_rdma_ctrl_mutex);
+ list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) {
+ if (ctrl->device->dev != ib_device)
+ continue;
+ dev_info(ctrl->ctrl.device,
+ "Removing ctrl: NQN \"%s\", addr %pISp\n",
+ ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
+ __nvme_rdma_del_ctrl(ctrl);
+ }
+ mutex_unlock(&nvme_rdma_ctrl_mutex);
+
+ flush_workqueue(nvme_rdma_wq);
+}
+
+static struct ib_client nvme_rdma_ib_client = {
+ .name = "nvme_rdma",
+ .add = nvme_rdma_add_one,
+ .remove = nvme_rdma_remove_one
+};
+
+static int __init nvme_rdma_init_module(void)
+{
+ int ret;
+
+ nvme_rdma_wq = create_workqueue("nvme_rdma_wq");
+ if (!nvme_rdma_wq)
+ return -ENOMEM;
+
+ ret = ib_register_client(&nvme_rdma_ib_client);
+ if (ret) {
+ destroy_workqueue(nvme_rdma_wq);
+ return ret;
+ }
+
+ nvmf_register_transport(&nvme_rdma_transport);
+ return 0;
+}
+
+static void __exit nvme_rdma_cleanup_module(void)
+{
+ nvmf_unregister_transport(&nvme_rdma_transport);
+ ib_unregister_client(&nvme_rdma_ib_client);
+ destroy_workqueue(nvme_rdma_wq);
+}
+
+module_init(nvme_rdma_init_module);
+module_exit(nvme_rdma_cleanup_module);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
new file mode 100644
index 000000000..3a5b9d057
--- /dev/null
+++ b/drivers/nvme/target/Kconfig
@@ -0,0 +1,36 @@
+
+config NVME_TARGET
+ tristate "NVMe Target support"
+ depends on BLOCK
+ depends on CONFIGFS_FS
+ help
+ This enabled target side support for the NVMe protocol, that is
+ it allows the Linux kernel to implement NVMe subsystems and
+ controllers and export Linux block devices as NVMe namespaces.
+ You need to select at least one of the transports below to make this
+ functionality useful.
+
+ To configure the NVMe target you probably want to use the nvmetcli
+ tool from http://git.infradead.org/users/hch/nvmetcli.git.
+
+config NVME_TARGET_LOOP
+ tristate "NVMe loopback device support"
+ depends on NVME_TARGET
+ select NVME_CORE
+ select NVME_FABRICS
+ select SG_POOL
+ help
+ This enables the NVMe loopback device support, which can be useful
+ to test NVMe host and target side features.
+
+ If unsure, say N.
+
+config NVME_TARGET_RDMA
+ tristate "NVMe over Fabrics RDMA target support"
+ depends on INFINIBAND
+ depends on NVME_TARGET
+ help
+ This enables the NVMe RDMA target support, which allows exporting NVMe
+ devices over RDMA.
+
+ If unsure, say N.
diff --git a/drivers/nvme/target/Makefile b/drivers/nvme/target/Makefile
new file mode 100644
index 000000000..b7a06232c
--- /dev/null
+++ b/drivers/nvme/target/Makefile
@@ -0,0 +1,9 @@
+
+obj-$(CONFIG_NVME_TARGET) += nvmet.o
+obj-$(CONFIG_NVME_TARGET_LOOP) += nvme-loop.o
+obj-$(CONFIG_NVME_TARGET_RDMA) += nvmet-rdma.o
+
+nvmet-y += core.o configfs.o admin-cmd.o io-cmd.o fabrics-cmd.o \
+ discovery.o
+nvme-loop-y += loop.o
+nvmet-rdma-y += rdma.o
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
new file mode 100644
index 000000000..47c564b5a
--- /dev/null
+++ b/drivers/nvme/target/admin-cmd.c
@@ -0,0 +1,461 @@
+/*
+ * NVMe admin command implementation.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <generated/utsrelease.h>
+#include "nvmet.h"
+
+u32 nvmet_get_log_page_len(struct nvme_command *cmd)
+{
+ u32 len = le16_to_cpu(cmd->get_log_page.numdu);
+
+ len <<= 16;
+ len += le16_to_cpu(cmd->get_log_page.numdl);
+ /* NUMD is a 0's based value */
+ len += 1;
+ len *= sizeof(u32);
+
+ return len;
+}
+
+static void nvmet_execute_get_log_page(struct nvmet_req *req)
+{
+ size_t data_len = nvmet_get_log_page_len(req->cmd);
+ void *buf;
+ u16 status = 0;
+
+ buf = kzalloc(data_len, GFP_KERNEL);
+ if (!buf) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ switch (req->cmd->get_log_page.lid) {
+ case 0x01:
+ /*
+ * We currently never set the More bit in the status field,
+ * so all error log entries are invalid and can be zeroed out.
+ * This is called a minum viable implementation (TM) of this
+ * mandatory log page.
+ */
+ break;
+ case 0x02:
+ /*
+ * XXX: fill out actual smart log
+ *
+ * We might have a hard time coming up with useful values for
+ * many of the fields, and even when we have useful data
+ * available (e.g. units or commands read/written) those aren't
+ * persistent over power loss.
+ */
+ break;
+ case 0x03:
+ /*
+ * We only support a single firmware slot which always is
+ * active, so we can zero out the whole firmware slot log and
+ * still claim to fully implement this mandatory log page.
+ */
+ break;
+ default:
+ BUG();
+ }
+
+ status = nvmet_copy_to_sgl(req, 0, buf, data_len);
+
+ kfree(buf);
+out:
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvme_id_ctrl *id;
+ u16 status = 0;
+
+ id = kzalloc(sizeof(*id), GFP_KERNEL);
+ if (!id) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ /* XXX: figure out how to assign real vendors IDs. */
+ id->vid = 0;
+ id->ssvid = 0;
+
+ memset(id->sn, ' ', sizeof(id->sn));
+ snprintf(id->sn, sizeof(id->sn), "%llx", ctrl->serial);
+
+ memset(id->mn, ' ', sizeof(id->mn));
+ strncpy((char *)id->mn, "Linux", sizeof(id->mn));
+
+ memset(id->fr, ' ', sizeof(id->fr));
+ strncpy((char *)id->fr, UTS_RELEASE, sizeof(id->fr));
+
+ id->rab = 6;
+
+ /*
+ * XXX: figure out how we can assign a IEEE OUI, but until then
+ * the safest is to leave it as zeroes.
+ */
+
+ /* we support multiple ports and multiples hosts: */
+ id->mic = (1 << 0) | (1 << 1);
+
+ /* no limit on data transfer sizes for now */
+ id->mdts = 0;
+ id->cntlid = cpu_to_le16(ctrl->cntlid);
+ id->ver = cpu_to_le32(ctrl->subsys->ver);
+
+ /* XXX: figure out what to do about RTD3R/RTD3 */
+ id->oaes = cpu_to_le32(1 << 8);
+ id->ctratt = cpu_to_le32(1 << 0);
+
+ id->oacs = 0;
+
+ /*
+ * We don't really have a practical limit on the number of abort
+ * comands. But we don't do anything useful for abort either, so
+ * no point in allowing more abort commands than the spec requires.
+ */
+ id->acl = 3;
+
+ id->aerl = NVMET_ASYNC_EVENTS - 1;
+
+ /* first slot is read-only, only one slot supported */
+ id->frmw = (1 << 0) | (1 << 1);
+ id->lpa = (1 << 0) | (1 << 2);
+ id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
+ id->npss = 0;
+
+ /* We support keep-alive timeout in granularity of seconds */
+ id->kas = cpu_to_le16(NVMET_KAS);
+
+ id->sqes = (0x6 << 4) | 0x6;
+ id->cqes = (0x4 << 4) | 0x4;
+
+ /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
+ id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
+
+ id->nn = cpu_to_le32(ctrl->subsys->max_nsid);
+ id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM);
+
+ /* XXX: don't report vwc if the underlying device is write through */
+ id->vwc = NVME_CTRL_VWC_PRESENT;
+
+ /*
+ * We can't support atomic writes bigger than a LBA without support
+ * from the backend device.
+ */
+ id->awun = 0;
+ id->awupf = 0;
+
+ id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
+ if (ctrl->ops->has_keyed_sgls)
+ id->sgls |= cpu_to_le32(1 << 2);
+ if (ctrl->ops->sqe_inline_size)
+ id->sgls |= cpu_to_le32(1 << 20);
+
+ strcpy(id->subnqn, ctrl->subsys->subsysnqn);
+
+ /* Max command capsule size is sqe + single page of in-capsule data */
+ id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
+ ctrl->ops->sqe_inline_size) / 16);
+ /* Max response capsule size is cqe */
+ id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
+
+ id->msdbd = ctrl->ops->msdbd;
+
+ /*
+ * Meh, we don't really support any power state. Fake up the same
+ * values that qemu does.
+ */
+ id->psd[0].max_power = cpu_to_le16(0x9c4);
+ id->psd[0].entry_lat = cpu_to_le32(0x10);
+ id->psd[0].exit_lat = cpu_to_le32(0x4);
+
+ status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+
+ kfree(id);
+out:
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_identify_ns(struct nvmet_req *req)
+{
+ struct nvmet_ns *ns;
+ struct nvme_id_ns *id;
+ u16 status = 0;
+
+ ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
+ if (!ns) {
+ status = NVME_SC_INVALID_NS | NVME_SC_DNR;
+ goto out;
+ }
+
+ id = kzalloc(sizeof(*id), GFP_KERNEL);
+ if (!id) {
+ status = NVME_SC_INTERNAL;
+ goto out_put_ns;
+ }
+
+ /*
+ * nuse = ncap = nsze isn't aways true, but we have no way to find
+ * that out from the underlying device.
+ */
+ id->ncap = id->nuse = id->nsze =
+ cpu_to_le64(ns->size >> ns->blksize_shift);
+
+ /*
+ * We just provide a single LBA format that matches what the
+ * underlying device reports.
+ */
+ id->nlbaf = 0;
+ id->flbas = 0;
+
+ /*
+ * Our namespace might always be shared. Not just with other
+ * controllers, but also with any other user of the block device.
+ */
+ id->nmic = (1 << 0);
+
+ memcpy(&id->nguid, &ns->nguid, sizeof(uuid_le));
+
+ id->lbaf[0].ds = ns->blksize_shift;
+
+ status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+
+ kfree(id);
+out_put_ns:
+ nvmet_put_namespace(ns);
+out:
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_identify_nslist(struct nvmet_req *req)
+{
+ static const int buf_size = 4096;
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmet_ns *ns;
+ u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
+ __le32 *list;
+ u16 status = 0;
+ int i = 0;
+
+ list = kzalloc(buf_size, GFP_KERNEL);
+ if (!list) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
+ if (ns->nsid <= min_nsid)
+ continue;
+ list[i++] = cpu_to_le32(ns->nsid);
+ if (i == buf_size / sizeof(__le32))
+ break;
+ }
+ rcu_read_unlock();
+
+ status = nvmet_copy_to_sgl(req, 0, list, buf_size);
+
+ kfree(list);
+out:
+ nvmet_req_complete(req, status);
+}
+
+/*
+ * A "mimimum viable" abort implementation: the command is mandatory in the
+ * spec, but we are not required to do any useful work. We couldn't really
+ * do a useful abort, so don't bother even with waiting for the command
+ * to be exectuted and return immediately telling the command to abort
+ * wasn't found.
+ */
+static void nvmet_execute_abort(struct nvmet_req *req)
+{
+ nvmet_set_result(req, 1);
+ nvmet_req_complete(req, 0);
+}
+
+static void nvmet_execute_set_features(struct nvmet_req *req)
+{
+ struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
+ u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
+ u64 val;
+ u32 val32;
+ u16 status = 0;
+
+ switch (cdw10 & 0xf) {
+ case NVME_FEAT_NUM_QUEUES:
+ nvmet_set_result(req,
+ (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
+ break;
+ case NVME_FEAT_KATO:
+ val = le64_to_cpu(req->cmd->prop_set.value);
+ val32 = val & 0xffff;
+ req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
+ nvmet_set_result(req, req->sq->ctrl->kato);
+ break;
+ default:
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ break;
+ }
+
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_get_features(struct nvmet_req *req)
+{
+ struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
+ u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
+ u16 status = 0;
+
+ switch (cdw10 & 0xf) {
+ /*
+ * These features are mandatory in the spec, but we don't
+ * have a useful way to implement them. We'll eventually
+ * need to come up with some fake values for these.
+ */
+#if 0
+ case NVME_FEAT_ARBITRATION:
+ break;
+ case NVME_FEAT_POWER_MGMT:
+ break;
+ case NVME_FEAT_TEMP_THRESH:
+ break;
+ case NVME_FEAT_ERR_RECOVERY:
+ break;
+ case NVME_FEAT_IRQ_COALESCE:
+ break;
+ case NVME_FEAT_IRQ_CONFIG:
+ break;
+ case NVME_FEAT_WRITE_ATOMIC:
+ break;
+ case NVME_FEAT_ASYNC_EVENT:
+ break;
+#endif
+ case NVME_FEAT_VOLATILE_WC:
+ nvmet_set_result(req, 1);
+ break;
+ case NVME_FEAT_NUM_QUEUES:
+ nvmet_set_result(req,
+ (subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
+ break;
+ case NVME_FEAT_KATO:
+ nvmet_set_result(req, req->sq->ctrl->kato * 1000);
+ break;
+ default:
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ break;
+ }
+
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_async_event(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+
+ mutex_lock(&ctrl->lock);
+ if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
+ mutex_unlock(&ctrl->lock);
+ nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
+ return;
+ }
+ ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
+ mutex_unlock(&ctrl->lock);
+
+ schedule_work(&ctrl->async_event_work);
+}
+
+static void nvmet_execute_keep_alive(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+
+ pr_debug("ctrl %d update keep-alive timer for %d secs\n",
+ ctrl->cntlid, ctrl->kato);
+
+ mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
+ nvmet_req_complete(req, 0);
+}
+
+int nvmet_parse_admin_cmd(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+
+ req->ns = NULL;
+
+ if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
+ pr_err("nvmet: got admin cmd %d while CC.EN == 0\n",
+ cmd->common.opcode);
+ return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
+ }
+ if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
+ pr_err("nvmet: got admin cmd %d while CSTS.RDY == 0\n",
+ cmd->common.opcode);
+ return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
+ }
+
+ switch (cmd->common.opcode) {
+ case nvme_admin_get_log_page:
+ req->data_len = nvmet_get_log_page_len(cmd);
+
+ switch (cmd->get_log_page.lid) {
+ case 0x01:
+ case 0x02:
+ case 0x03:
+ req->execute = nvmet_execute_get_log_page;
+ return 0;
+ }
+ break;
+ case nvme_admin_identify:
+ req->data_len = 4096;
+ switch (le32_to_cpu(cmd->identify.cns)) {
+ case 0x00:
+ req->execute = nvmet_execute_identify_ns;
+ return 0;
+ case 0x01:
+ req->execute = nvmet_execute_identify_ctrl;
+ return 0;
+ case 0x02:
+ req->execute = nvmet_execute_identify_nslist;
+ return 0;
+ }
+ break;
+ case nvme_admin_abort_cmd:
+ req->execute = nvmet_execute_abort;
+ req->data_len = 0;
+ return 0;
+ case nvme_admin_set_features:
+ req->execute = nvmet_execute_set_features;
+ req->data_len = 0;
+ return 0;
+ case nvme_admin_get_features:
+ req->execute = nvmet_execute_get_features;
+ req->data_len = 0;
+ return 0;
+ case nvme_admin_async_event:
+ req->execute = nvmet_execute_async_event;
+ req->data_len = 0;
+ return 0;
+ case nvme_admin_keep_alive:
+ req->execute = nvmet_execute_keep_alive;
+ req->data_len = 0;
+ return 0;
+ }
+
+ pr_err("nvmet: unhandled cmd %d\n", cmd->common.opcode);
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+}
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
new file mode 100644
index 000000000..af5e2dc4a
--- /dev/null
+++ b/drivers/nvme/target/configfs.c
@@ -0,0 +1,917 @@
+/*
+ * Configfs interface for the NVMe target.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/ctype.h>
+
+#include "nvmet.h"
+
+static struct config_item_type nvmet_host_type;
+static struct config_item_type nvmet_subsys_type;
+
+/*
+ * nvmet_port Generic ConfigFS definitions.
+ * Used in any place in the ConfigFS tree that refers to an address.
+ */
+static ssize_t nvmet_addr_adrfam_show(struct config_item *item,
+ char *page)
+{
+ switch (to_nvmet_port(item)->disc_addr.adrfam) {
+ case NVMF_ADDR_FAMILY_IP4:
+ return sprintf(page, "ipv4\n");
+ case NVMF_ADDR_FAMILY_IP6:
+ return sprintf(page, "ipv6\n");
+ case NVMF_ADDR_FAMILY_IB:
+ return sprintf(page, "ib\n");
+ default:
+ return sprintf(page, "\n");
+ }
+}
+
+static ssize_t nvmet_addr_adrfam_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
+ if (port->enabled) {
+ pr_err("Cannot modify address while enabled\n");
+ pr_err("Disable the address before modifying\n");
+ return -EACCES;
+ }
+
+ if (sysfs_streq(page, "ipv4")) {
+ port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IP4;
+ } else if (sysfs_streq(page, "ipv6")) {
+ port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IP6;
+ } else if (sysfs_streq(page, "ib")) {
+ port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IB;
+ } else {
+ pr_err("Invalid value '%s' for adrfam\n", page);
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_, addr_adrfam);
+
+static ssize_t nvmet_addr_portid_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
+ return snprintf(page, PAGE_SIZE, "%d\n",
+ le16_to_cpu(port->disc_addr.portid));
+}
+
+static ssize_t nvmet_addr_portid_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+ u16 portid = 0;
+
+ if (kstrtou16(page, 0, &portid)) {
+ pr_err("Invalid value '%s' for portid\n", page);
+ return -EINVAL;
+ }
+
+ if (port->enabled) {
+ pr_err("Cannot modify address while enabled\n");
+ pr_err("Disable the address before modifying\n");
+ return -EACCES;
+ }
+ port->disc_addr.portid = cpu_to_le16(portid);
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_, addr_portid);
+
+static ssize_t nvmet_addr_traddr_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
+ return snprintf(page, PAGE_SIZE, "%s\n",
+ port->disc_addr.traddr);
+}
+
+static ssize_t nvmet_addr_traddr_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
+ if (count > NVMF_TRADDR_SIZE) {
+ pr_err("Invalid value '%s' for traddr\n", page);
+ return -EINVAL;
+ }
+
+ if (port->enabled) {
+ pr_err("Cannot modify address while enabled\n");
+ pr_err("Disable the address before modifying\n");
+ return -EACCES;
+ }
+ return snprintf(port->disc_addr.traddr,
+ sizeof(port->disc_addr.traddr), "%s", page);
+}
+
+CONFIGFS_ATTR(nvmet_, addr_traddr);
+
+static ssize_t nvmet_addr_treq_show(struct config_item *item,
+ char *page)
+{
+ switch (to_nvmet_port(item)->disc_addr.treq) {
+ case NVMF_TREQ_NOT_SPECIFIED:
+ return sprintf(page, "not specified\n");
+ case NVMF_TREQ_REQUIRED:
+ return sprintf(page, "required\n");
+ case NVMF_TREQ_NOT_REQUIRED:
+ return sprintf(page, "not required\n");
+ default:
+ return sprintf(page, "\n");
+ }
+}
+
+static ssize_t nvmet_addr_treq_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
+ if (port->enabled) {
+ pr_err("Cannot modify address while enabled\n");
+ pr_err("Disable the address before modifying\n");
+ return -EACCES;
+ }
+
+ if (sysfs_streq(page, "not specified")) {
+ port->disc_addr.treq = NVMF_TREQ_NOT_SPECIFIED;
+ } else if (sysfs_streq(page, "required")) {
+ port->disc_addr.treq = NVMF_TREQ_REQUIRED;
+ } else if (sysfs_streq(page, "not required")) {
+ port->disc_addr.treq = NVMF_TREQ_NOT_REQUIRED;
+ } else {
+ pr_err("Invalid value '%s' for treq\n", page);
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_, addr_treq);
+
+static ssize_t nvmet_addr_trsvcid_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
+ return snprintf(page, PAGE_SIZE, "%s\n",
+ port->disc_addr.trsvcid);
+}
+
+static ssize_t nvmet_addr_trsvcid_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
+ if (count > NVMF_TRSVCID_SIZE) {
+ pr_err("Invalid value '%s' for trsvcid\n", page);
+ return -EINVAL;
+ }
+ if (port->enabled) {
+ pr_err("Cannot modify address while enabled\n");
+ pr_err("Disable the address before modifying\n");
+ return -EACCES;
+ }
+ return snprintf(port->disc_addr.trsvcid,
+ sizeof(port->disc_addr.trsvcid), "%s", page);
+}
+
+CONFIGFS_ATTR(nvmet_, addr_trsvcid);
+
+static ssize_t nvmet_addr_trtype_show(struct config_item *item,
+ char *page)
+{
+ switch (to_nvmet_port(item)->disc_addr.trtype) {
+ case NVMF_TRTYPE_RDMA:
+ return sprintf(page, "rdma\n");
+ case NVMF_TRTYPE_LOOP:
+ return sprintf(page, "loop\n");
+ default:
+ return sprintf(page, "\n");
+ }
+}
+
+static void nvmet_port_init_tsas_rdma(struct nvmet_port *port)
+{
+ port->disc_addr.trtype = NVMF_TRTYPE_RDMA;
+ memset(&port->disc_addr.tsas.rdma, 0, NVMF_TSAS_SIZE);
+ port->disc_addr.tsas.rdma.qptype = NVMF_RDMA_QPTYPE_CONNECTED;
+ port->disc_addr.tsas.rdma.prtype = NVMF_RDMA_PRTYPE_NOT_SPECIFIED;
+ port->disc_addr.tsas.rdma.cms = NVMF_RDMA_CMS_RDMA_CM;
+}
+
+static void nvmet_port_init_tsas_loop(struct nvmet_port *port)
+{
+ port->disc_addr.trtype = NVMF_TRTYPE_LOOP;
+ memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE);
+}
+
+static ssize_t nvmet_addr_trtype_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
+ if (port->enabled) {
+ pr_err("Cannot modify address while enabled\n");
+ pr_err("Disable the address before modifying\n");
+ return -EACCES;
+ }
+
+ if (sysfs_streq(page, "rdma")) {
+ nvmet_port_init_tsas_rdma(port);
+ } else if (sysfs_streq(page, "loop")) {
+ nvmet_port_init_tsas_loop(port);
+ } else {
+ pr_err("Invalid value '%s' for trtype\n", page);
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_, addr_trtype);
+
+/*
+ * Namespace structures & file operation functions below
+ */
+static ssize_t nvmet_ns_device_path_show(struct config_item *item, char *page)
+{
+ return sprintf(page, "%s\n", to_nvmet_ns(item)->device_path);
+}
+
+static ssize_t nvmet_ns_device_path_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_ns *ns = to_nvmet_ns(item);
+ struct nvmet_subsys *subsys = ns->subsys;
+ int ret;
+
+ mutex_lock(&subsys->lock);
+ ret = -EBUSY;
+ if (nvmet_ns_enabled(ns))
+ goto out_unlock;
+
+ kfree(ns->device_path);
+
+ ret = -ENOMEM;
+ ns->device_path = kstrdup(page, GFP_KERNEL);
+ if (!ns->device_path)
+ goto out_unlock;
+
+ mutex_unlock(&subsys->lock);
+ return count;
+
+out_unlock:
+ mutex_unlock(&subsys->lock);
+ return ret;
+}
+
+CONFIGFS_ATTR(nvmet_ns_, device_path);
+
+static ssize_t nvmet_ns_device_nguid_show(struct config_item *item, char *page)
+{
+ return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->nguid);
+}
+
+static ssize_t nvmet_ns_device_nguid_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_ns *ns = to_nvmet_ns(item);
+ struct nvmet_subsys *subsys = ns->subsys;
+ u8 nguid[16];
+ const char *p = page;
+ int i;
+ int ret = 0;
+
+ mutex_lock(&subsys->lock);
+ if (nvmet_ns_enabled(ns)) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ for (i = 0; i < 16; i++) {
+ if (p + 2 > page + count) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ if (!isxdigit(p[0]) || !isxdigit(p[1])) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ nguid[i] = (hex_to_bin(p[0]) << 4) | hex_to_bin(p[1]);
+ p += 2;
+
+ if (*p == '-' || *p == ':')
+ p++;
+ }
+
+ memcpy(&ns->nguid, nguid, sizeof(nguid));
+out_unlock:
+ mutex_unlock(&subsys->lock);
+ return ret ? ret : count;
+}
+
+CONFIGFS_ATTR(nvmet_ns_, device_nguid);
+
+static ssize_t nvmet_ns_enable_show(struct config_item *item, char *page)
+{
+ return sprintf(page, "%d\n", nvmet_ns_enabled(to_nvmet_ns(item)));
+}
+
+static ssize_t nvmet_ns_enable_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_ns *ns = to_nvmet_ns(item);
+ bool enable;
+ int ret = 0;
+
+ if (strtobool(page, &enable))
+ return -EINVAL;
+
+ if (enable)
+ ret = nvmet_ns_enable(ns);
+ else
+ nvmet_ns_disable(ns);
+
+ return ret ? ret : count;
+}
+
+CONFIGFS_ATTR(nvmet_ns_, enable);
+
+static struct configfs_attribute *nvmet_ns_attrs[] = {
+ &nvmet_ns_attr_device_path,
+ &nvmet_ns_attr_device_nguid,
+ &nvmet_ns_attr_enable,
+ NULL,
+};
+
+static void nvmet_ns_release(struct config_item *item)
+{
+ struct nvmet_ns *ns = to_nvmet_ns(item);
+
+ nvmet_ns_free(ns);
+}
+
+static struct configfs_item_operations nvmet_ns_item_ops = {
+ .release = nvmet_ns_release,
+};
+
+static struct config_item_type nvmet_ns_type = {
+ .ct_item_ops = &nvmet_ns_item_ops,
+ .ct_attrs = nvmet_ns_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *nvmet_ns_make(struct config_group *group,
+ const char *name)
+{
+ struct nvmet_subsys *subsys = namespaces_to_subsys(&group->cg_item);
+ struct nvmet_ns *ns;
+ int ret;
+ u32 nsid;
+
+ ret = kstrtou32(name, 0, &nsid);
+ if (ret)
+ goto out;
+
+ ret = -EINVAL;
+ if (nsid == 0 || nsid == 0xffffffff)
+ goto out;
+
+ ret = -ENOMEM;
+ ns = nvmet_ns_alloc(subsys, nsid);
+ if (!ns)
+ goto out;
+ config_group_init_type_name(&ns->group, name, &nvmet_ns_type);
+
+ pr_info("adding nsid %d to subsystem %s\n", nsid, subsys->subsysnqn);
+
+ return &ns->group;
+out:
+ return ERR_PTR(ret);
+}
+
+static struct configfs_group_operations nvmet_namespaces_group_ops = {
+ .make_group = nvmet_ns_make,
+};
+
+static struct config_item_type nvmet_namespaces_type = {
+ .ct_group_ops = &nvmet_namespaces_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static int nvmet_port_subsys_allow_link(struct config_item *parent,
+ struct config_item *target)
+{
+ struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
+ struct nvmet_subsys *subsys;
+ struct nvmet_subsys_link *link, *p;
+ int ret;
+
+ if (target->ci_type != &nvmet_subsys_type) {
+ pr_err("can only link subsystems into the subsystems dir.!\n");
+ return -EINVAL;
+ }
+ subsys = to_subsys(target);
+ link = kmalloc(sizeof(*link), GFP_KERNEL);
+ if (!link)
+ return -ENOMEM;
+ link->subsys = subsys;
+
+ down_write(&nvmet_config_sem);
+ ret = -EEXIST;
+ list_for_each_entry(p, &port->subsystems, entry) {
+ if (p->subsys == subsys)
+ goto out_free_link;
+ }
+
+ if (list_empty(&port->subsystems)) {
+ ret = nvmet_enable_port(port);
+ if (ret)
+ goto out_free_link;
+ }
+
+ list_add_tail(&link->entry, &port->subsystems);
+ nvmet_genctr++;
+ up_write(&nvmet_config_sem);
+ return 0;
+
+out_free_link:
+ up_write(&nvmet_config_sem);
+ kfree(link);
+ return ret;
+}
+
+static int nvmet_port_subsys_drop_link(struct config_item *parent,
+ struct config_item *target)
+{
+ struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
+ struct nvmet_subsys *subsys = to_subsys(target);
+ struct nvmet_subsys_link *p;
+
+ down_write(&nvmet_config_sem);
+ list_for_each_entry(p, &port->subsystems, entry) {
+ if (p->subsys == subsys)
+ goto found;
+ }
+ up_write(&nvmet_config_sem);
+ return -EINVAL;
+
+found:
+ list_del(&p->entry);
+ nvmet_genctr++;
+ if (list_empty(&port->subsystems))
+ nvmet_disable_port(port);
+ up_write(&nvmet_config_sem);
+ kfree(p);
+ return 0;
+}
+
+static struct configfs_item_operations nvmet_port_subsys_item_ops = {
+ .allow_link = nvmet_port_subsys_allow_link,
+ .drop_link = nvmet_port_subsys_drop_link,
+};
+
+static struct config_item_type nvmet_port_subsys_type = {
+ .ct_item_ops = &nvmet_port_subsys_item_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static int nvmet_allowed_hosts_allow_link(struct config_item *parent,
+ struct config_item *target)
+{
+ struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
+ struct nvmet_host *host;
+ struct nvmet_host_link *link, *p;
+ int ret;
+
+ if (target->ci_type != &nvmet_host_type) {
+ pr_err("can only link hosts into the allowed_hosts directory!\n");
+ return -EINVAL;
+ }
+
+ host = to_host(target);
+ link = kmalloc(sizeof(*link), GFP_KERNEL);
+ if (!link)
+ return -ENOMEM;
+ link->host = host;
+
+ down_write(&nvmet_config_sem);
+ ret = -EINVAL;
+ if (subsys->allow_any_host) {
+ pr_err("can't add hosts when allow_any_host is set!\n");
+ goto out_free_link;
+ }
+
+ ret = -EEXIST;
+ list_for_each_entry(p, &subsys->hosts, entry) {
+ if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
+ goto out_free_link;
+ }
+ list_add_tail(&link->entry, &subsys->hosts);
+ nvmet_genctr++;
+ up_write(&nvmet_config_sem);
+ return 0;
+out_free_link:
+ up_write(&nvmet_config_sem);
+ kfree(link);
+ return ret;
+}
+
+static int nvmet_allowed_hosts_drop_link(struct config_item *parent,
+ struct config_item *target)
+{
+ struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
+ struct nvmet_host *host = to_host(target);
+ struct nvmet_host_link *p;
+
+ down_write(&nvmet_config_sem);
+ list_for_each_entry(p, &subsys->hosts, entry) {
+ if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
+ goto found;
+ }
+ up_write(&nvmet_config_sem);
+ return -EINVAL;
+
+found:
+ list_del(&p->entry);
+ nvmet_genctr++;
+ up_write(&nvmet_config_sem);
+ kfree(p);
+ return 0;
+}
+
+static struct configfs_item_operations nvmet_allowed_hosts_item_ops = {
+ .allow_link = nvmet_allowed_hosts_allow_link,
+ .drop_link = nvmet_allowed_hosts_drop_link,
+};
+
+static struct config_item_type nvmet_allowed_hosts_type = {
+ .ct_item_ops = &nvmet_allowed_hosts_item_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static ssize_t nvmet_subsys_attr_allow_any_host_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%d\n",
+ to_subsys(item)->allow_any_host);
+}
+
+static ssize_t nvmet_subsys_attr_allow_any_host_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item);
+ bool allow_any_host;
+ int ret = 0;
+
+ if (strtobool(page, &allow_any_host))
+ return -EINVAL;
+
+ down_write(&nvmet_config_sem);
+ if (allow_any_host && !list_empty(&subsys->hosts)) {
+ pr_err("Can't set allow_any_host when explicit hosts are set!\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ subsys->allow_any_host = allow_any_host;
+out_unlock:
+ up_write(&nvmet_config_sem);
+ return ret ? ret : count;
+}
+
+CONFIGFS_ATTR(nvmet_subsys_, attr_allow_any_host);
+
+static struct configfs_attribute *nvmet_subsys_attrs[] = {
+ &nvmet_subsys_attr_attr_allow_any_host,
+ NULL,
+};
+
+/*
+ * Subsystem structures & folder operation functions below
+ */
+static void nvmet_subsys_release(struct config_item *item)
+{
+ struct nvmet_subsys *subsys = to_subsys(item);
+
+ nvmet_subsys_put(subsys);
+}
+
+static struct configfs_item_operations nvmet_subsys_item_ops = {
+ .release = nvmet_subsys_release,
+};
+
+static struct config_item_type nvmet_subsys_type = {
+ .ct_item_ops = &nvmet_subsys_item_ops,
+ .ct_attrs = nvmet_subsys_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *nvmet_subsys_make(struct config_group *group,
+ const char *name)
+{
+ struct nvmet_subsys *subsys;
+
+ if (sysfs_streq(name, NVME_DISC_SUBSYS_NAME)) {
+ pr_err("can't create discovery subsystem through configfs\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ subsys = nvmet_subsys_alloc(name, NVME_NQN_NVME);
+ if (!subsys)
+ return ERR_PTR(-ENOMEM);
+
+ config_group_init_type_name(&subsys->group, name, &nvmet_subsys_type);
+
+ config_group_init_type_name(&subsys->namespaces_group,
+ "namespaces", &nvmet_namespaces_type);
+ configfs_add_default_group(&subsys->namespaces_group, &subsys->group);
+
+ config_group_init_type_name(&subsys->allowed_hosts_group,
+ "allowed_hosts", &nvmet_allowed_hosts_type);
+ configfs_add_default_group(&subsys->allowed_hosts_group,
+ &subsys->group);
+
+ return &subsys->group;
+}
+
+static struct configfs_group_operations nvmet_subsystems_group_ops = {
+ .make_group = nvmet_subsys_make,
+};
+
+static struct config_item_type nvmet_subsystems_type = {
+ .ct_group_ops = &nvmet_subsystems_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static ssize_t nvmet_referral_enable_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%d\n", to_nvmet_port(item)->enabled);
+}
+
+static ssize_t nvmet_referral_enable_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent);
+ struct nvmet_port *port = to_nvmet_port(item);
+ bool enable;
+
+ if (strtobool(page, &enable))
+ goto inval;
+
+ if (enable)
+ nvmet_referral_enable(parent, port);
+ else
+ nvmet_referral_disable(port);
+
+ return count;
+inval:
+ pr_err("Invalid value '%s' for enable\n", page);
+ return -EINVAL;
+}
+
+CONFIGFS_ATTR(nvmet_referral_, enable);
+
+/*
+ * Discovery Service subsystem definitions
+ */
+static struct configfs_attribute *nvmet_referral_attrs[] = {
+ &nvmet_attr_addr_adrfam,
+ &nvmet_attr_addr_portid,
+ &nvmet_attr_addr_treq,
+ &nvmet_attr_addr_traddr,
+ &nvmet_attr_addr_trsvcid,
+ &nvmet_attr_addr_trtype,
+ &nvmet_referral_attr_enable,
+ NULL,
+};
+
+static void nvmet_referral_release(struct config_item *item)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
+ nvmet_referral_disable(port);
+ kfree(port);
+}
+
+static struct configfs_item_operations nvmet_referral_item_ops = {
+ .release = nvmet_referral_release,
+};
+
+static struct config_item_type nvmet_referral_type = {
+ .ct_owner = THIS_MODULE,
+ .ct_attrs = nvmet_referral_attrs,
+ .ct_item_ops = &nvmet_referral_item_ops,
+};
+
+static struct config_group *nvmet_referral_make(
+ struct config_group *group, const char *name)
+{
+ struct nvmet_port *port;
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&port->entry);
+ config_group_init_type_name(&port->group, name, &nvmet_referral_type);
+
+ return &port->group;
+}
+
+static struct configfs_group_operations nvmet_referral_group_ops = {
+ .make_group = nvmet_referral_make,
+};
+
+static struct config_item_type nvmet_referrals_type = {
+ .ct_owner = THIS_MODULE,
+ .ct_group_ops = &nvmet_referral_group_ops,
+};
+
+/*
+ * Ports definitions.
+ */
+static void nvmet_port_release(struct config_item *item)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
+ kfree(port);
+}
+
+static struct configfs_attribute *nvmet_port_attrs[] = {
+ &nvmet_attr_addr_adrfam,
+ &nvmet_attr_addr_treq,
+ &nvmet_attr_addr_traddr,
+ &nvmet_attr_addr_trsvcid,
+ &nvmet_attr_addr_trtype,
+ NULL,
+};
+
+static struct configfs_item_operations nvmet_port_item_ops = {
+ .release = nvmet_port_release,
+};
+
+static struct config_item_type nvmet_port_type = {
+ .ct_attrs = nvmet_port_attrs,
+ .ct_item_ops = &nvmet_port_item_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *nvmet_ports_make(struct config_group *group,
+ const char *name)
+{
+ struct nvmet_port *port;
+ u16 portid;
+
+ if (kstrtou16(name, 0, &portid))
+ return ERR_PTR(-EINVAL);
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&port->entry);
+ INIT_LIST_HEAD(&port->subsystems);
+ INIT_LIST_HEAD(&port->referrals);
+
+ port->disc_addr.portid = cpu_to_le16(portid);
+ config_group_init_type_name(&port->group, name, &nvmet_port_type);
+
+ config_group_init_type_name(&port->subsys_group,
+ "subsystems", &nvmet_port_subsys_type);
+ configfs_add_default_group(&port->subsys_group, &port->group);
+
+ config_group_init_type_name(&port->referrals_group,
+ "referrals", &nvmet_referrals_type);
+ configfs_add_default_group(&port->referrals_group, &port->group);
+
+ return &port->group;
+}
+
+static struct configfs_group_operations nvmet_ports_group_ops = {
+ .make_group = nvmet_ports_make,
+};
+
+static struct config_item_type nvmet_ports_type = {
+ .ct_group_ops = &nvmet_ports_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group nvmet_subsystems_group;
+static struct config_group nvmet_ports_group;
+
+static void nvmet_host_release(struct config_item *item)
+{
+ struct nvmet_host *host = to_host(item);
+
+ kfree(host);
+}
+
+static struct configfs_item_operations nvmet_host_item_ops = {
+ .release = nvmet_host_release,
+};
+
+static struct config_item_type nvmet_host_type = {
+ .ct_item_ops = &nvmet_host_item_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *nvmet_hosts_make_group(struct config_group *group,
+ const char *name)
+{
+ struct nvmet_host *host;
+
+ host = kzalloc(sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return ERR_PTR(-ENOMEM);
+
+ config_group_init_type_name(&host->group, name, &nvmet_host_type);
+
+ return &host->group;
+}
+
+static struct configfs_group_operations nvmet_hosts_group_ops = {
+ .make_group = nvmet_hosts_make_group,
+};
+
+static struct config_item_type nvmet_hosts_type = {
+ .ct_group_ops = &nvmet_hosts_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group nvmet_hosts_group;
+
+static struct config_item_type nvmet_root_type = {
+ .ct_owner = THIS_MODULE,
+};
+
+static struct configfs_subsystem nvmet_configfs_subsystem = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "nvmet",
+ .ci_type = &nvmet_root_type,
+ },
+ },
+};
+
+int __init nvmet_init_configfs(void)
+{
+ int ret;
+
+ config_group_init(&nvmet_configfs_subsystem.su_group);
+ mutex_init(&nvmet_configfs_subsystem.su_mutex);
+
+ config_group_init_type_name(&nvmet_subsystems_group,
+ "subsystems", &nvmet_subsystems_type);
+ configfs_add_default_group(&nvmet_subsystems_group,
+ &nvmet_configfs_subsystem.su_group);
+
+ config_group_init_type_name(&nvmet_ports_group,
+ "ports", &nvmet_ports_type);
+ configfs_add_default_group(&nvmet_ports_group,
+ &nvmet_configfs_subsystem.su_group);
+
+ config_group_init_type_name(&nvmet_hosts_group,
+ "hosts", &nvmet_hosts_type);
+ configfs_add_default_group(&nvmet_hosts_group,
+ &nvmet_configfs_subsystem.su_group);
+
+ ret = configfs_register_subsystem(&nvmet_configfs_subsystem);
+ if (ret) {
+ pr_err("configfs_register_subsystem: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void __exit nvmet_exit_configfs(void)
+{
+ configfs_unregister_subsystem(&nvmet_configfs_subsystem);
+}
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
new file mode 100644
index 000000000..6559d5afa
--- /dev/null
+++ b/drivers/nvme/target/core.c
@@ -0,0 +1,968 @@
+/*
+ * Common code for the NVMe target.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/random.h>
+#include "nvmet.h"
+
+static struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
+
+/*
+ * This read/write semaphore is used to synchronize access to configuration
+ * information on a target system that will result in discovery log page
+ * information change for at least one host.
+ * The full list of resources to protected by this semaphore is:
+ *
+ * - subsystems list
+ * - per-subsystem allowed hosts list
+ * - allow_any_host subsystem attribute
+ * - nvmet_genctr
+ * - the nvmet_transports array
+ *
+ * When updating any of those lists/structures write lock should be obtained,
+ * while when reading (popolating discovery log page or checking host-subsystem
+ * link) read lock is obtained to allow concurrent reads.
+ */
+DECLARE_RWSEM(nvmet_config_sem);
+
+static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
+ const char *subsysnqn);
+
+u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
+ size_t len)
+{
+ if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len)
+ return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
+ return 0;
+}
+
+u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
+{
+ if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len)
+ return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
+ return 0;
+}
+
+static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
+{
+ return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
+}
+
+static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
+{
+ struct nvmet_req *req;
+
+ while (1) {
+ mutex_lock(&ctrl->lock);
+ if (!ctrl->nr_async_event_cmds) {
+ mutex_unlock(&ctrl->lock);
+ return;
+ }
+
+ req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
+ mutex_unlock(&ctrl->lock);
+ nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
+ }
+}
+
+static void nvmet_async_event_work(struct work_struct *work)
+{
+ struct nvmet_ctrl *ctrl =
+ container_of(work, struct nvmet_ctrl, async_event_work);
+ struct nvmet_async_event *aen;
+ struct nvmet_req *req;
+
+ while (1) {
+ mutex_lock(&ctrl->lock);
+ aen = list_first_entry_or_null(&ctrl->async_events,
+ struct nvmet_async_event, entry);
+ if (!aen || !ctrl->nr_async_event_cmds) {
+ mutex_unlock(&ctrl->lock);
+ return;
+ }
+
+ req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
+ nvmet_set_result(req, nvmet_async_event_result(aen));
+
+ list_del(&aen->entry);
+ kfree(aen);
+
+ mutex_unlock(&ctrl->lock);
+ nvmet_req_complete(req, 0);
+ }
+}
+
+static void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
+ u8 event_info, u8 log_page)
+{
+ struct nvmet_async_event *aen;
+
+ aen = kmalloc(sizeof(*aen), GFP_KERNEL);
+ if (!aen)
+ return;
+
+ aen->event_type = event_type;
+ aen->event_info = event_info;
+ aen->log_page = log_page;
+
+ mutex_lock(&ctrl->lock);
+ list_add_tail(&aen->entry, &ctrl->async_events);
+ mutex_unlock(&ctrl->lock);
+
+ schedule_work(&ctrl->async_event_work);
+}
+
+int nvmet_register_transport(struct nvmet_fabrics_ops *ops)
+{
+ int ret = 0;
+
+ down_write(&nvmet_config_sem);
+ if (nvmet_transports[ops->type])
+ ret = -EINVAL;
+ else
+ nvmet_transports[ops->type] = ops;
+ up_write(&nvmet_config_sem);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvmet_register_transport);
+
+void nvmet_unregister_transport(struct nvmet_fabrics_ops *ops)
+{
+ down_write(&nvmet_config_sem);
+ nvmet_transports[ops->type] = NULL;
+ up_write(&nvmet_config_sem);
+}
+EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
+
+int nvmet_enable_port(struct nvmet_port *port)
+{
+ struct nvmet_fabrics_ops *ops;
+ int ret;
+
+ lockdep_assert_held(&nvmet_config_sem);
+
+ ops = nvmet_transports[port->disc_addr.trtype];
+ if (!ops) {
+ up_write(&nvmet_config_sem);
+ request_module("nvmet-transport-%d", port->disc_addr.trtype);
+ down_write(&nvmet_config_sem);
+ ops = nvmet_transports[port->disc_addr.trtype];
+ if (!ops) {
+ pr_err("transport type %d not supported\n",
+ port->disc_addr.trtype);
+ return -EINVAL;
+ }
+ }
+
+ if (!try_module_get(ops->owner))
+ return -EINVAL;
+
+ ret = ops->add_port(port);
+ if (ret) {
+ module_put(ops->owner);
+ return ret;
+ }
+
+ port->enabled = true;
+ return 0;
+}
+
+void nvmet_disable_port(struct nvmet_port *port)
+{
+ struct nvmet_fabrics_ops *ops;
+
+ lockdep_assert_held(&nvmet_config_sem);
+
+ port->enabled = false;
+
+ ops = nvmet_transports[port->disc_addr.trtype];
+ ops->remove_port(port);
+ module_put(ops->owner);
+}
+
+static void nvmet_keep_alive_timer(struct work_struct *work)
+{
+ struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
+ struct nvmet_ctrl, ka_work);
+
+ pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
+ ctrl->cntlid, ctrl->kato);
+
+ ctrl->ops->delete_ctrl(ctrl);
+}
+
+static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
+{
+ pr_debug("ctrl %d start keep-alive timer for %d secs\n",
+ ctrl->cntlid, ctrl->kato);
+
+ INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
+ schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+}
+
+static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
+{
+ pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
+
+ cancel_delayed_work_sync(&ctrl->ka_work);
+}
+
+static struct nvmet_ns *__nvmet_find_namespace(struct nvmet_ctrl *ctrl,
+ __le32 nsid)
+{
+ struct nvmet_ns *ns;
+
+ list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
+ if (ns->nsid == le32_to_cpu(nsid))
+ return ns;
+ }
+
+ return NULL;
+}
+
+struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid)
+{
+ struct nvmet_ns *ns;
+
+ rcu_read_lock();
+ ns = __nvmet_find_namespace(ctrl, nsid);
+ if (ns)
+ percpu_ref_get(&ns->ref);
+ rcu_read_unlock();
+
+ return ns;
+}
+
+static void nvmet_destroy_namespace(struct percpu_ref *ref)
+{
+ struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref);
+
+ complete(&ns->disable_done);
+}
+
+void nvmet_put_namespace(struct nvmet_ns *ns)
+{
+ percpu_ref_put(&ns->ref);
+}
+
+int nvmet_ns_enable(struct nvmet_ns *ns)
+{
+ struct nvmet_subsys *subsys = ns->subsys;
+ struct nvmet_ctrl *ctrl;
+ int ret = 0;
+
+ mutex_lock(&subsys->lock);
+ if (!list_empty(&ns->dev_link))
+ goto out_unlock;
+
+ ns->bdev = blkdev_get_by_path(ns->device_path, FMODE_READ | FMODE_WRITE,
+ NULL);
+ if (IS_ERR(ns->bdev)) {
+ pr_err("nvmet: failed to open block device %s: (%ld)\n",
+ ns->device_path, PTR_ERR(ns->bdev));
+ ret = PTR_ERR(ns->bdev);
+ ns->bdev = NULL;
+ goto out_unlock;
+ }
+
+ ns->size = i_size_read(ns->bdev->bd_inode);
+ ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
+
+ ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
+ 0, GFP_KERNEL);
+ if (ret)
+ goto out_blkdev_put;
+
+ if (ns->nsid > subsys->max_nsid)
+ subsys->max_nsid = ns->nsid;
+
+ /*
+ * The namespaces list needs to be sorted to simplify the implementation
+ * of the Identify Namepace List subcommand.
+ */
+ if (list_empty(&subsys->namespaces)) {
+ list_add_tail_rcu(&ns->dev_link, &subsys->namespaces);
+ } else {
+ struct nvmet_ns *old;
+
+ list_for_each_entry_rcu(old, &subsys->namespaces, dev_link) {
+ BUG_ON(ns->nsid == old->nsid);
+ if (ns->nsid < old->nsid)
+ break;
+ }
+
+ list_add_tail_rcu(&ns->dev_link, &old->dev_link);
+ }
+
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
+ nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0);
+
+ ret = 0;
+out_unlock:
+ mutex_unlock(&subsys->lock);
+ return ret;
+out_blkdev_put:
+ blkdev_put(ns->bdev, FMODE_WRITE|FMODE_READ);
+ ns->bdev = NULL;
+ goto out_unlock;
+}
+
+void nvmet_ns_disable(struct nvmet_ns *ns)
+{
+ struct nvmet_subsys *subsys = ns->subsys;
+ struct nvmet_ctrl *ctrl;
+
+ mutex_lock(&subsys->lock);
+ if (list_empty(&ns->dev_link)) {
+ mutex_unlock(&subsys->lock);
+ return;
+ }
+ list_del_init(&ns->dev_link);
+ mutex_unlock(&subsys->lock);
+
+ /*
+ * Now that we removed the namespaces from the lookup list, we
+ * can kill the per_cpu ref and wait for any remaining references
+ * to be dropped, as well as a RCU grace period for anyone only
+ * using the namepace under rcu_read_lock(). Note that we can't
+ * use call_rcu here as we need to ensure the namespaces have
+ * been fully destroyed before unloading the module.
+ */
+ percpu_ref_kill(&ns->ref);
+ synchronize_rcu();
+ wait_for_completion(&ns->disable_done);
+ percpu_ref_exit(&ns->ref);
+
+ mutex_lock(&subsys->lock);
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
+ nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0);
+
+ if (ns->bdev)
+ blkdev_put(ns->bdev, FMODE_WRITE|FMODE_READ);
+ mutex_unlock(&subsys->lock);
+}
+
+void nvmet_ns_free(struct nvmet_ns *ns)
+{
+ nvmet_ns_disable(ns);
+
+ kfree(ns->device_path);
+ kfree(ns);
+}
+
+struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
+{
+ struct nvmet_ns *ns;
+
+ ns = kzalloc(sizeof(*ns), GFP_KERNEL);
+ if (!ns)
+ return NULL;
+
+ INIT_LIST_HEAD(&ns->dev_link);
+ init_completion(&ns->disable_done);
+
+ ns->nsid = nsid;
+ ns->subsys = subsys;
+
+ return ns;
+}
+
+static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
+{
+ if (status)
+ nvmet_set_status(req, status);
+
+ /* XXX: need to fill in something useful for sq_head */
+ req->rsp->sq_head = 0;
+ if (likely(req->sq)) /* may happen during early failure */
+ req->rsp->sq_id = cpu_to_le16(req->sq->qid);
+ req->rsp->command_id = req->cmd->common.command_id;
+
+ if (req->ns)
+ nvmet_put_namespace(req->ns);
+ req->ops->queue_response(req);
+}
+
+void nvmet_req_complete(struct nvmet_req *req, u16 status)
+{
+ __nvmet_req_complete(req, status);
+ percpu_ref_put(&req->sq->ref);
+}
+EXPORT_SYMBOL_GPL(nvmet_req_complete);
+
+void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
+ u16 qid, u16 size)
+{
+ cq->qid = qid;
+ cq->size = size;
+
+ ctrl->cqs[qid] = cq;
+}
+
+void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
+ u16 qid, u16 size)
+{
+ sq->qid = qid;
+ sq->size = size;
+
+ ctrl->sqs[qid] = sq;
+}
+
+void nvmet_sq_destroy(struct nvmet_sq *sq)
+{
+ /*
+ * If this is the admin queue, complete all AERs so that our
+ * queue doesn't have outstanding requests on it.
+ */
+ if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq)
+ nvmet_async_events_free(sq->ctrl);
+ percpu_ref_kill(&sq->ref);
+ wait_for_completion(&sq->free_done);
+ percpu_ref_exit(&sq->ref);
+
+ if (sq->ctrl) {
+ nvmet_ctrl_put(sq->ctrl);
+ sq->ctrl = NULL; /* allows reusing the queue later */
+ }
+}
+EXPORT_SYMBOL_GPL(nvmet_sq_destroy);
+
+static void nvmet_sq_free(struct percpu_ref *ref)
+{
+ struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
+
+ complete(&sq->free_done);
+}
+
+int nvmet_sq_init(struct nvmet_sq *sq)
+{
+ int ret;
+
+ ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL);
+ if (ret) {
+ pr_err("percpu_ref init failed!\n");
+ return ret;
+ }
+ init_completion(&sq->free_done);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvmet_sq_init);
+
+bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
+ struct nvmet_sq *sq, struct nvmet_fabrics_ops *ops)
+{
+ u8 flags = req->cmd->common.flags;
+ u16 status;
+
+ req->cq = cq;
+ req->sq = sq;
+ req->ops = ops;
+ req->sg = NULL;
+ req->sg_cnt = 0;
+ req->rsp->status = 0;
+
+ /* no support for fused commands yet */
+ if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ goto fail;
+ }
+
+ /* either variant of SGLs is fine, as we don't support metadata */
+ if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF &&
+ (flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METASEG)) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ goto fail;
+ }
+
+ if (unlikely(!req->sq->ctrl))
+ /* will return an error for any Non-connect command: */
+ status = nvmet_parse_connect_cmd(req);
+ else if (likely(req->sq->qid != 0))
+ status = nvmet_parse_io_cmd(req);
+ else if (req->cmd->common.opcode == nvme_fabrics_command)
+ status = nvmet_parse_fabrics_cmd(req);
+ else if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
+ status = nvmet_parse_discovery_cmd(req);
+ else
+ status = nvmet_parse_admin_cmd(req);
+
+ if (status)
+ goto fail;
+
+ if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ goto fail;
+ }
+
+ return true;
+
+fail:
+ __nvmet_req_complete(req, status);
+ return false;
+}
+EXPORT_SYMBOL_GPL(nvmet_req_init);
+
+static inline bool nvmet_cc_en(u32 cc)
+{
+ return cc & 0x1;
+}
+
+static inline u8 nvmet_cc_css(u32 cc)
+{
+ return (cc >> 4) & 0x7;
+}
+
+static inline u8 nvmet_cc_mps(u32 cc)
+{
+ return (cc >> 7) & 0xf;
+}
+
+static inline u8 nvmet_cc_ams(u32 cc)
+{
+ return (cc >> 11) & 0x7;
+}
+
+static inline u8 nvmet_cc_shn(u32 cc)
+{
+ return (cc >> 14) & 0x3;
+}
+
+static inline u8 nvmet_cc_iosqes(u32 cc)
+{
+ return (cc >> 16) & 0xf;
+}
+
+static inline u8 nvmet_cc_iocqes(u32 cc)
+{
+ return (cc >> 20) & 0xf;
+}
+
+static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
+{
+ lockdep_assert_held(&ctrl->lock);
+
+ if (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
+ nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES ||
+ nvmet_cc_mps(ctrl->cc) != 0 ||
+ nvmet_cc_ams(ctrl->cc) != 0 ||
+ nvmet_cc_css(ctrl->cc) != 0) {
+ ctrl->csts = NVME_CSTS_CFS;
+ return;
+ }
+
+ ctrl->csts = NVME_CSTS_RDY;
+}
+
+static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
+{
+ lockdep_assert_held(&ctrl->lock);
+
+ /* XXX: tear down queues? */
+ ctrl->csts &= ~NVME_CSTS_RDY;
+ ctrl->cc = 0;
+}
+
+void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new)
+{
+ u32 old;
+
+ mutex_lock(&ctrl->lock);
+ old = ctrl->cc;
+ ctrl->cc = new;
+
+ if (nvmet_cc_en(new) && !nvmet_cc_en(old))
+ nvmet_start_ctrl(ctrl);
+ if (!nvmet_cc_en(new) && nvmet_cc_en(old))
+ nvmet_clear_ctrl(ctrl);
+ if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) {
+ nvmet_clear_ctrl(ctrl);
+ ctrl->csts |= NVME_CSTS_SHST_CMPLT;
+ }
+ if (!nvmet_cc_shn(new) && nvmet_cc_shn(old))
+ ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
+ mutex_unlock(&ctrl->lock);
+}
+
+static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
+{
+ /* command sets supported: NVMe command set: */
+ ctrl->cap = (1ULL << 37);
+ /* CC.EN timeout in 500msec units: */
+ ctrl->cap |= (15ULL << 24);
+ /* maximum queue entries supported: */
+ ctrl->cap |= NVMET_QUEUE_SIZE - 1;
+}
+
+u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
+ struct nvmet_req *req, struct nvmet_ctrl **ret)
+{
+ struct nvmet_subsys *subsys;
+ struct nvmet_ctrl *ctrl;
+ u16 status = 0;
+
+ subsys = nvmet_find_get_subsys(req->port, subsysnqn);
+ if (!subsys) {
+ pr_warn("connect request for invalid subsystem %s!\n",
+ subsysnqn);
+ req->rsp->result = IPO_IATTR_CONNECT_DATA(subsysnqn);
+ return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+ }
+
+ mutex_lock(&subsys->lock);
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
+ if (ctrl->cntlid == cntlid) {
+ if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) {
+ pr_warn("hostnqn mismatch.\n");
+ continue;
+ }
+ if (!kref_get_unless_zero(&ctrl->ref))
+ continue;
+
+ *ret = ctrl;
+ goto out;
+ }
+ }
+
+ pr_warn("could not find controller %d for subsys %s / host %s\n",
+ cntlid, subsysnqn, hostnqn);
+ req->rsp->result = IPO_IATTR_CONNECT_DATA(cntlid);
+ status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+
+out:
+ mutex_unlock(&subsys->lock);
+ nvmet_subsys_put(subsys);
+ return status;
+}
+
+static bool __nvmet_host_allowed(struct nvmet_subsys *subsys,
+ const char *hostnqn)
+{
+ struct nvmet_host_link *p;
+
+ if (subsys->allow_any_host)
+ return true;
+
+ list_for_each_entry(p, &subsys->hosts, entry) {
+ if (!strcmp(nvmet_host_name(p->host), hostnqn))
+ return true;
+ }
+
+ return false;
+}
+
+static bool nvmet_host_discovery_allowed(struct nvmet_req *req,
+ const char *hostnqn)
+{
+ struct nvmet_subsys_link *s;
+
+ list_for_each_entry(s, &req->port->subsystems, entry) {
+ if (__nvmet_host_allowed(s->subsys, hostnqn))
+ return true;
+ }
+
+ return false;
+}
+
+bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
+ const char *hostnqn)
+{
+ lockdep_assert_held(&nvmet_config_sem);
+
+ if (subsys->type == NVME_NQN_DISC)
+ return nvmet_host_discovery_allowed(req, hostnqn);
+ else
+ return __nvmet_host_allowed(subsys, hostnqn);
+}
+
+u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
+ struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
+{
+ struct nvmet_subsys *subsys;
+ struct nvmet_ctrl *ctrl;
+ int ret;
+ u16 status;
+
+ status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+ subsys = nvmet_find_get_subsys(req->port, subsysnqn);
+ if (!subsys) {
+ pr_warn("connect request for invalid subsystem %s!\n",
+ subsysnqn);
+ req->rsp->result = IPO_IATTR_CONNECT_DATA(subsysnqn);
+ goto out;
+ }
+
+ status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+ down_read(&nvmet_config_sem);
+ if (!nvmet_host_allowed(req, subsys, hostnqn)) {
+ pr_info("connect by host %s for subsystem %s not allowed\n",
+ hostnqn, subsysnqn);
+ req->rsp->result = IPO_IATTR_CONNECT_DATA(hostnqn);
+ up_read(&nvmet_config_sem);
+ goto out_put_subsystem;
+ }
+ up_read(&nvmet_config_sem);
+
+ status = NVME_SC_INTERNAL;
+ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ goto out_put_subsystem;
+ mutex_init(&ctrl->lock);
+
+ nvmet_init_cap(ctrl);
+
+ INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
+ INIT_LIST_HEAD(&ctrl->async_events);
+
+ memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
+ memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
+
+ /* generate a random serial number as our controllers are ephemeral: */
+ get_random_bytes(&ctrl->serial, sizeof(ctrl->serial));
+
+ kref_init(&ctrl->ref);
+ ctrl->subsys = subsys;
+
+ ctrl->cqs = kcalloc(subsys->max_qid + 1,
+ sizeof(struct nvmet_cq *),
+ GFP_KERNEL);
+ if (!ctrl->cqs)
+ goto out_free_ctrl;
+
+ ctrl->sqs = kcalloc(subsys->max_qid + 1,
+ sizeof(struct nvmet_sq *),
+ GFP_KERNEL);
+ if (!ctrl->sqs)
+ goto out_free_cqs;
+
+ ret = ida_simple_get(&subsys->cntlid_ida,
+ NVME_CNTLID_MIN, NVME_CNTLID_MAX,
+ GFP_KERNEL);
+ if (ret < 0) {
+ status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
+ goto out_free_sqs;
+ }
+ ctrl->cntlid = ret;
+
+ ctrl->ops = req->ops;
+ if (ctrl->subsys->type == NVME_NQN_DISC) {
+ /* Don't accept keep-alive timeout for discovery controllers */
+ if (kato) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ goto out_free_sqs;
+ }
+
+ /*
+ * Discovery controllers use some arbitrary high value in order
+ * to cleanup stale discovery sessions
+ *
+ * From the latest base diff RC:
+ * "The Keep Alive command is not supported by
+ * Discovery controllers. A transport may specify a
+ * fixed Discovery controller activity timeout value
+ * (e.g., 2 minutes). If no commands are received
+ * by a Discovery controller within that time
+ * period, the controller may perform the
+ * actions for Keep Alive Timer expiration".
+ */
+ ctrl->kato = NVMET_DISC_KATO;
+ } else {
+ /* keep-alive timeout in seconds */
+ ctrl->kato = DIV_ROUND_UP(kato, 1000);
+ }
+ nvmet_start_keep_alive_timer(ctrl);
+
+ mutex_lock(&subsys->lock);
+ list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
+ mutex_unlock(&subsys->lock);
+
+ *ctrlp = ctrl;
+ return 0;
+
+out_free_sqs:
+ kfree(ctrl->sqs);
+out_free_cqs:
+ kfree(ctrl->cqs);
+out_free_ctrl:
+ kfree(ctrl);
+out_put_subsystem:
+ nvmet_subsys_put(subsys);
+out:
+ return status;
+}
+
+static void nvmet_ctrl_free(struct kref *ref)
+{
+ struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
+ struct nvmet_subsys *subsys = ctrl->subsys;
+
+ nvmet_stop_keep_alive_timer(ctrl);
+
+ mutex_lock(&subsys->lock);
+ list_del(&ctrl->subsys_entry);
+ mutex_unlock(&subsys->lock);
+
+ ida_simple_remove(&subsys->cntlid_ida, ctrl->cntlid);
+ nvmet_subsys_put(subsys);
+
+ kfree(ctrl->sqs);
+ kfree(ctrl->cqs);
+ kfree(ctrl);
+}
+
+void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
+{
+ kref_put(&ctrl->ref, nvmet_ctrl_free);
+}
+
+static void nvmet_fatal_error_handler(struct work_struct *work)
+{
+ struct nvmet_ctrl *ctrl =
+ container_of(work, struct nvmet_ctrl, fatal_err_work);
+
+ pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
+ ctrl->ops->delete_ctrl(ctrl);
+}
+
+void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
+{
+ ctrl->csts |= NVME_CSTS_CFS;
+ INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
+ schedule_work(&ctrl->fatal_err_work);
+}
+EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
+
+static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
+ const char *subsysnqn)
+{
+ struct nvmet_subsys_link *p;
+
+ if (!port)
+ return NULL;
+
+ if (!strncmp(NVME_DISC_SUBSYS_NAME, subsysnqn,
+ NVMF_NQN_SIZE)) {
+ if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
+ return NULL;
+ return nvmet_disc_subsys;
+ }
+
+ down_read(&nvmet_config_sem);
+ list_for_each_entry(p, &port->subsystems, entry) {
+ if (!strncmp(p->subsys->subsysnqn, subsysnqn,
+ NVMF_NQN_SIZE)) {
+ if (!kref_get_unless_zero(&p->subsys->ref))
+ break;
+ up_read(&nvmet_config_sem);
+ return p->subsys;
+ }
+ }
+ up_read(&nvmet_config_sem);
+ return NULL;
+}
+
+struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
+ enum nvme_subsys_type type)
+{
+ struct nvmet_subsys *subsys;
+
+ subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
+ if (!subsys)
+ return NULL;
+
+ subsys->ver = (1 << 16) | (2 << 8) | 1; /* NVMe 1.2.1 */
+
+ switch (type) {
+ case NVME_NQN_NVME:
+ subsys->max_qid = NVMET_NR_QUEUES;
+ break;
+ case NVME_NQN_DISC:
+ subsys->max_qid = 0;
+ break;
+ default:
+ pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
+ kfree(subsys);
+ return NULL;
+ }
+ subsys->type = type;
+ subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
+ GFP_KERNEL);
+ if (!subsys->subsysnqn) {
+ kfree(subsys);
+ return NULL;
+ }
+
+ kref_init(&subsys->ref);
+
+ mutex_init(&subsys->lock);
+ INIT_LIST_HEAD(&subsys->namespaces);
+ INIT_LIST_HEAD(&subsys->ctrls);
+
+ ida_init(&subsys->cntlid_ida);
+
+ INIT_LIST_HEAD(&subsys->hosts);
+
+ return subsys;
+}
+
+static void nvmet_subsys_free(struct kref *ref)
+{
+ struct nvmet_subsys *subsys =
+ container_of(ref, struct nvmet_subsys, ref);
+
+ WARN_ON_ONCE(!list_empty(&subsys->namespaces));
+
+ ida_destroy(&subsys->cntlid_ida);
+ kfree(subsys->subsysnqn);
+ kfree(subsys);
+}
+
+void nvmet_subsys_put(struct nvmet_subsys *subsys)
+{
+ kref_put(&subsys->ref, nvmet_subsys_free);
+}
+
+static int __init nvmet_init(void)
+{
+ int error;
+
+ error = nvmet_init_discovery();
+ if (error)
+ goto out;
+
+ error = nvmet_init_configfs();
+ if (error)
+ goto out_exit_discovery;
+ return 0;
+
+out_exit_discovery:
+ nvmet_exit_discovery();
+out:
+ return error;
+}
+
+static void __exit nvmet_exit(void)
+{
+ nvmet_exit_configfs();
+ nvmet_exit_discovery();
+
+ BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
+ BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
+}
+
+module_init(nvmet_init);
+module_exit(nvmet_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
new file mode 100644
index 000000000..6f65646e8
--- /dev/null
+++ b/drivers/nvme/target/discovery.c
@@ -0,0 +1,221 @@
+/*
+ * Discovery service for the NVMe over Fabrics target.
+ * Copyright (C) 2016 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/slab.h>
+#include <generated/utsrelease.h>
+#include "nvmet.h"
+
+struct nvmet_subsys *nvmet_disc_subsys;
+
+u64 nvmet_genctr;
+
+void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port)
+{
+ down_write(&nvmet_config_sem);
+ if (list_empty(&port->entry)) {
+ list_add_tail(&port->entry, &parent->referrals);
+ port->enabled = true;
+ nvmet_genctr++;
+ }
+ up_write(&nvmet_config_sem);
+}
+
+void nvmet_referral_disable(struct nvmet_port *port)
+{
+ down_write(&nvmet_config_sem);
+ if (!list_empty(&port->entry)) {
+ port->enabled = false;
+ list_del_init(&port->entry);
+ nvmet_genctr++;
+ }
+ up_write(&nvmet_config_sem);
+}
+
+static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr *hdr,
+ struct nvmet_port *port, char *subsys_nqn, u8 type, u32 numrec)
+{
+ struct nvmf_disc_rsp_page_entry *e = &hdr->entries[numrec];
+
+ e->trtype = port->disc_addr.trtype;
+ e->adrfam = port->disc_addr.adrfam;
+ e->treq = port->disc_addr.treq;
+ e->portid = port->disc_addr.portid;
+ /* we support only dynamic controllers */
+ e->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC);
+ e->asqsz = cpu_to_le16(NVMF_AQ_DEPTH);
+ e->nqntype = type;
+ memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE);
+ memcpy(e->traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
+ memcpy(e->tsas.common, port->disc_addr.tsas.common, NVMF_TSAS_SIZE);
+ memcpy(e->subnqn, subsys_nqn, NVMF_NQN_SIZE);
+}
+
+static void nvmet_execute_get_disc_log_page(struct nvmet_req *req)
+{
+ const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry);
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmf_disc_rsp_page_hdr *hdr;
+ size_t data_len = nvmet_get_log_page_len(req->cmd);
+ size_t alloc_len = max(data_len, sizeof(*hdr));
+ int residual_len = data_len - sizeof(*hdr);
+ struct nvmet_subsys_link *p;
+ struct nvmet_port *r;
+ u32 numrec = 0;
+ u16 status = 0;
+
+ /*
+ * Make sure we're passing at least a buffer of response header size.
+ * If host provided data len is less than the header size, only the
+ * number of bytes requested by host will be sent to host.
+ */
+ hdr = kzalloc(alloc_len, GFP_KERNEL);
+ if (!hdr) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ down_read(&nvmet_config_sem);
+ list_for_each_entry(p, &req->port->subsystems, entry) {
+ if (!nvmet_host_allowed(req, p->subsys, ctrl->hostnqn))
+ continue;
+ if (residual_len >= entry_size) {
+ nvmet_format_discovery_entry(hdr, req->port,
+ p->subsys->subsysnqn,
+ NVME_NQN_NVME, numrec);
+ residual_len -= entry_size;
+ }
+ numrec++;
+ }
+
+ list_for_each_entry(r, &req->port->referrals, entry) {
+ if (residual_len >= entry_size) {
+ nvmet_format_discovery_entry(hdr, r,
+ NVME_DISC_SUBSYS_NAME,
+ NVME_NQN_DISC, numrec);
+ residual_len -= entry_size;
+ }
+ numrec++;
+ }
+
+ hdr->genctr = cpu_to_le64(nvmet_genctr);
+ hdr->numrec = cpu_to_le64(numrec);
+ hdr->recfmt = cpu_to_le16(0);
+
+ up_read(&nvmet_config_sem);
+
+ status = nvmet_copy_to_sgl(req, 0, hdr, data_len);
+ kfree(hdr);
+out:
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_identify_disc_ctrl(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvme_id_ctrl *id;
+ u16 status = 0;
+
+ id = kzalloc(sizeof(*id), GFP_KERNEL);
+ if (!id) {
+ status = NVME_SC_INTERNAL;
+ goto out;
+ }
+
+ memset(id->fr, ' ', sizeof(id->fr));
+ strncpy((char *)id->fr, UTS_RELEASE, sizeof(id->fr));
+
+ /* no limit on data transfer sizes for now */
+ id->mdts = 0;
+ id->cntlid = cpu_to_le16(ctrl->cntlid);
+ id->ver = cpu_to_le32(ctrl->subsys->ver);
+ id->lpa = (1 << 2);
+
+ /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
+ id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
+
+ id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
+ if (ctrl->ops->has_keyed_sgls)
+ id->sgls |= cpu_to_le32(1 << 2);
+ if (ctrl->ops->sqe_inline_size)
+ id->sgls |= cpu_to_le32(1 << 20);
+
+ strcpy(id->subnqn, ctrl->subsys->subsysnqn);
+
+ status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+
+ kfree(id);
+out:
+ nvmet_req_complete(req, status);
+}
+
+int nvmet_parse_discovery_cmd(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+
+ req->ns = NULL;
+
+ if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
+ pr_err("nvmet: got cmd %d while not ready\n",
+ cmd->common.opcode);
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ }
+
+ switch (cmd->common.opcode) {
+ case nvme_admin_get_log_page:
+ req->data_len = nvmet_get_log_page_len(cmd);
+
+ switch (cmd->get_log_page.lid) {
+ case NVME_LOG_DISC:
+ req->execute = nvmet_execute_get_disc_log_page;
+ return 0;
+ default:
+ pr_err("nvmet: unsupported get_log_page lid %d\n",
+ cmd->get_log_page.lid);
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ }
+ case nvme_admin_identify:
+ req->data_len = 4096;
+ switch (le32_to_cpu(cmd->identify.cns)) {
+ case 0x01:
+ req->execute =
+ nvmet_execute_identify_disc_ctrl;
+ return 0;
+ default:
+ pr_err("nvmet: unsupported identify cns %d\n",
+ le32_to_cpu(cmd->identify.cns));
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ }
+ default:
+ pr_err("nvmet: unsupported cmd %d\n",
+ cmd->common.opcode);
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ }
+
+ pr_err("nvmet: unhandled cmd %d\n", cmd->common.opcode);
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+}
+
+int __init nvmet_init_discovery(void)
+{
+ nvmet_disc_subsys =
+ nvmet_subsys_alloc(NVME_DISC_SUBSYS_NAME, NVME_NQN_DISC);
+ if (!nvmet_disc_subsys)
+ return -ENOMEM;
+ return 0;
+}
+
+void nvmet_exit_discovery(void)
+{
+ nvmet_subsys_put(nvmet_disc_subsys);
+}
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
new file mode 100644
index 000000000..9a97ae67e
--- /dev/null
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -0,0 +1,240 @@
+/*
+ * NVMe Fabrics command implementation.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/blkdev.h>
+#include "nvmet.h"
+
+static void nvmet_execute_prop_set(struct nvmet_req *req)
+{
+ u16 status = 0;
+
+ if (!(req->cmd->prop_set.attrib & 1)) {
+ u64 val = le64_to_cpu(req->cmd->prop_set.value);
+
+ switch (le32_to_cpu(req->cmd->prop_set.offset)) {
+ case NVME_REG_CC:
+ nvmet_update_cc(req->sq->ctrl, val);
+ break;
+ default:
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ break;
+ }
+ } else {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ }
+
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_prop_get(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ u16 status = 0;
+ u64 val = 0;
+
+ if (req->cmd->prop_get.attrib & 1) {
+ switch (le32_to_cpu(req->cmd->prop_get.offset)) {
+ case NVME_REG_CAP:
+ val = ctrl->cap;
+ break;
+ default:
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ break;
+ }
+ } else {
+ switch (le32_to_cpu(req->cmd->prop_get.offset)) {
+ case NVME_REG_VS:
+ val = ctrl->subsys->ver;
+ break;
+ case NVME_REG_CC:
+ val = ctrl->cc;
+ break;
+ case NVME_REG_CSTS:
+ val = ctrl->csts;
+ break;
+ default:
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ break;
+ }
+ }
+
+ req->rsp->result64 = cpu_to_le64(val);
+ nvmet_req_complete(req, status);
+}
+
+int nvmet_parse_fabrics_cmd(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+
+ req->ns = NULL;
+
+ switch (cmd->fabrics.fctype) {
+ case nvme_fabrics_type_property_set:
+ req->data_len = 0;
+ req->execute = nvmet_execute_prop_set;
+ break;
+ case nvme_fabrics_type_property_get:
+ req->data_len = 0;
+ req->execute = nvmet_execute_prop_get;
+ break;
+ default:
+ pr_err("received unknown capsule type 0x%x\n",
+ cmd->fabrics.fctype);
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ }
+
+ return 0;
+}
+
+static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
+{
+ struct nvmf_connect_command *c = &req->cmd->connect;
+ u16 qid = le16_to_cpu(c->qid);
+ u16 sqsize = le16_to_cpu(c->sqsize);
+ struct nvmet_ctrl *old;
+
+ old = cmpxchg(&req->sq->ctrl, NULL, ctrl);
+ if (old) {
+ pr_warn("queue already connected!\n");
+ return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
+ }
+
+ nvmet_cq_setup(ctrl, req->cq, qid, sqsize);
+ nvmet_sq_setup(ctrl, req->sq, qid, sqsize);
+ return 0;
+}
+
+static void nvmet_execute_admin_connect(struct nvmet_req *req)
+{
+ struct nvmf_connect_command *c = &req->cmd->connect;
+ struct nvmf_connect_data *d;
+ struct nvmet_ctrl *ctrl = NULL;
+ u16 status = 0;
+
+ d = kmap(sg_page(req->sg)) + req->sg->offset;
+
+ /* zero out initial completion result, assign values as needed */
+ req->rsp->result = 0;
+
+ if (c->recfmt != 0) {
+ pr_warn("invalid connect version (%d).\n",
+ le16_to_cpu(c->recfmt));
+ status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR;
+ goto out;
+ }
+
+ if (unlikely(d->cntlid != cpu_to_le16(0xffff))) {
+ pr_warn("connect attempt for invalid controller ID %#x\n",
+ d->cntlid);
+ status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+ req->rsp->result = IPO_IATTR_CONNECT_DATA(cntlid);
+ goto out;
+ }
+
+ status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req,
+ le32_to_cpu(c->kato), &ctrl);
+ if (status)
+ goto out;
+
+ status = nvmet_install_queue(ctrl, req);
+ if (status) {
+ nvmet_ctrl_put(ctrl);
+ goto out;
+ }
+
+ pr_info("creating controller %d for NQN %s.\n",
+ ctrl->cntlid, ctrl->hostnqn);
+ req->rsp->result16 = cpu_to_le16(ctrl->cntlid);
+
+out:
+ kunmap(sg_page(req->sg));
+ nvmet_req_complete(req, status);
+}
+
+static void nvmet_execute_io_connect(struct nvmet_req *req)
+{
+ struct nvmf_connect_command *c = &req->cmd->connect;
+ struct nvmf_connect_data *d;
+ struct nvmet_ctrl *ctrl = NULL;
+ u16 qid = le16_to_cpu(c->qid);
+ u16 status = 0;
+
+ d = kmap(sg_page(req->sg)) + req->sg->offset;
+
+ /* zero out initial completion result, assign values as needed */
+ req->rsp->result = 0;
+
+ if (c->recfmt != 0) {
+ pr_warn("invalid connect version (%d).\n",
+ le16_to_cpu(c->recfmt));
+ status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR;
+ goto out;
+ }
+
+ status = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn,
+ le16_to_cpu(d->cntlid),
+ req, &ctrl);
+ if (status)
+ goto out;
+
+ if (unlikely(qid > ctrl->subsys->max_qid)) {
+ pr_warn("invalid queue id (%d)\n", qid);
+ status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+ req->rsp->result = IPO_IATTR_CONNECT_SQE(qid);
+ goto out_ctrl_put;
+ }
+
+ status = nvmet_install_queue(ctrl, req);
+ if (status) {
+ /* pass back cntlid that had the issue of installing queue */
+ req->rsp->result16 = cpu_to_le16(ctrl->cntlid);
+ goto out_ctrl_put;
+ }
+
+ pr_info("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
+
+out:
+ kunmap(sg_page(req->sg));
+ nvmet_req_complete(req, status);
+ return;
+
+out_ctrl_put:
+ nvmet_ctrl_put(ctrl);
+ goto out;
+}
+
+int nvmet_parse_connect_cmd(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+
+ req->ns = NULL;
+
+ if (req->cmd->common.opcode != nvme_fabrics_command) {
+ pr_err("invalid command 0x%x on unconnected queue.\n",
+ cmd->fabrics.opcode);
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ }
+ if (cmd->fabrics.fctype != nvme_fabrics_type_connect) {
+ pr_err("invalid capsule type 0x%x on unconnected queue.\n",
+ cmd->fabrics.fctype);
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ }
+
+ req->data_len = sizeof(struct nvmf_connect_data);
+ if (cmd->connect.qid == 0)
+ req->execute = nvmet_execute_admin_connect;
+ else
+ req->execute = nvmet_execute_io_connect;
+ return 0;
+}
diff --git a/drivers/nvme/target/io-cmd.c b/drivers/nvme/target/io-cmd.c
new file mode 100644
index 000000000..2cd069b69
--- /dev/null
+++ b/drivers/nvme/target/io-cmd.c
@@ -0,0 +1,215 @@
+/*
+ * NVMe I/O command implementation.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/blkdev.h>
+#include <linux/module.h>
+#include "nvmet.h"
+
+static void nvmet_bio_done(struct bio *bio)
+{
+ struct nvmet_req *req = bio->bi_private;
+
+ nvmet_req_complete(req,
+ bio->bi_error ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
+
+ if (bio != &req->inline_bio)
+ bio_put(bio);
+}
+
+static inline u32 nvmet_rw_len(struct nvmet_req *req)
+{
+ return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) <<
+ req->ns->blksize_shift;
+}
+
+static void nvmet_inline_bio_init(struct nvmet_req *req)
+{
+ struct bio *bio = &req->inline_bio;
+
+ bio_init(bio);
+ bio->bi_max_vecs = NVMET_MAX_INLINE_BIOVEC;
+ bio->bi_io_vec = req->inline_bvec;
+}
+
+static void nvmet_execute_rw(struct nvmet_req *req)
+{
+ int sg_cnt = req->sg_cnt;
+ struct scatterlist *sg;
+ struct bio *bio;
+ sector_t sector;
+ blk_qc_t cookie;
+ int op, op_flags = 0, i;
+
+ if (!req->sg_cnt) {
+ nvmet_req_complete(req, 0);
+ return;
+ }
+
+ if (req->cmd->rw.opcode == nvme_cmd_write) {
+ op = REQ_OP_WRITE;
+ if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
+ op_flags |= REQ_FUA;
+ } else {
+ op = REQ_OP_READ;
+ }
+
+ sector = le64_to_cpu(req->cmd->rw.slba);
+ sector <<= (req->ns->blksize_shift - 9);
+
+ nvmet_inline_bio_init(req);
+ bio = &req->inline_bio;
+ bio->bi_bdev = req->ns->bdev;
+ bio->bi_iter.bi_sector = sector;
+ bio->bi_private = req;
+ bio->bi_end_io = nvmet_bio_done;
+ bio_set_op_attrs(bio, op, op_flags);
+
+ for_each_sg(req->sg, sg, req->sg_cnt, i) {
+ while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
+ != sg->length) {
+ struct bio *prev = bio;
+
+ bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
+ bio->bi_bdev = req->ns->bdev;
+ bio->bi_iter.bi_sector = sector;
+ bio_set_op_attrs(bio, op, op_flags);
+
+ bio_chain(bio, prev);
+ cookie = submit_bio(prev);
+ }
+
+ sector += sg->length >> 9;
+ sg_cnt--;
+ }
+
+ cookie = submit_bio(bio);
+
+ blk_poll(bdev_get_queue(req->ns->bdev), cookie);
+}
+
+static void nvmet_execute_flush(struct nvmet_req *req)
+{
+ struct bio *bio;
+
+ nvmet_inline_bio_init(req);
+ bio = &req->inline_bio;
+
+ bio->bi_bdev = req->ns->bdev;
+ bio->bi_private = req;
+ bio->bi_end_io = nvmet_bio_done;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
+
+ submit_bio(bio);
+}
+
+static u16 nvmet_discard_range(struct nvmet_ns *ns,
+ struct nvme_dsm_range *range, struct bio **bio)
+{
+ if (__blkdev_issue_discard(ns->bdev,
+ le64_to_cpu(range->slba) << (ns->blksize_shift - 9),
+ le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
+ GFP_KERNEL, 0, bio))
+ return NVME_SC_INTERNAL | NVME_SC_DNR;
+ return 0;
+}
+
+static void nvmet_execute_discard(struct nvmet_req *req)
+{
+ struct nvme_dsm_range range;
+ struct bio *bio = NULL;
+ int i;
+ u16 status;
+
+ for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
+ status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
+ sizeof(range));
+ if (status)
+ break;
+
+ status = nvmet_discard_range(req->ns, &range, &bio);
+ if (status)
+ break;
+ }
+
+ if (bio) {
+ bio->bi_private = req;
+ bio->bi_end_io = nvmet_bio_done;
+ if (status) {
+ bio->bi_error = -EIO;
+ bio_endio(bio);
+ } else {
+ submit_bio(bio);
+ }
+ } else {
+ nvmet_req_complete(req, status);
+ }
+}
+
+static void nvmet_execute_dsm(struct nvmet_req *req)
+{
+ switch (le32_to_cpu(req->cmd->dsm.attributes)) {
+ case NVME_DSMGMT_AD:
+ nvmet_execute_discard(req);
+ return;
+ case NVME_DSMGMT_IDR:
+ case NVME_DSMGMT_IDW:
+ default:
+ /* Not supported yet */
+ nvmet_req_complete(req, 0);
+ return;
+ }
+}
+
+int nvmet_parse_io_cmd(struct nvmet_req *req)
+{
+ struct nvme_command *cmd = req->cmd;
+
+ if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
+ pr_err("nvmet: got io cmd %d while CC.EN == 0\n",
+ cmd->common.opcode);
+ req->ns = NULL;
+ return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
+ }
+
+ if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
+ pr_err("nvmet: got io cmd %d while CSTS.RDY == 0\n",
+ cmd->common.opcode);
+ req->ns = NULL;
+ return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
+ }
+
+ req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid);
+ if (!req->ns)
+ return NVME_SC_INVALID_NS | NVME_SC_DNR;
+
+ switch (cmd->common.opcode) {
+ case nvme_cmd_read:
+ case nvme_cmd_write:
+ req->execute = nvmet_execute_rw;
+ req->data_len = nvmet_rw_len(req);
+ return 0;
+ case nvme_cmd_flush:
+ req->execute = nvmet_execute_flush;
+ req->data_len = 0;
+ return 0;
+ case nvme_cmd_dsm:
+ req->execute = nvmet_execute_dsm;
+ req->data_len = le32_to_cpu(cmd->dsm.nr) *
+ sizeof(struct nvme_dsm_range);
+ return 0;
+ default:
+ pr_err("nvmet: unhandled cmd %d\n", cmd->common.opcode);
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ }
+}
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
new file mode 100644
index 000000000..395e60dad
--- /dev/null
+++ b/drivers/nvme/target/loop.c
@@ -0,0 +1,752 @@
+/*
+ * NVMe over Fabrics loopback device.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/scatterlist.h>
+#include <linux/delay.h>
+#include <linux/blk-mq.h>
+#include <linux/nvme.h>
+#include <linux/module.h>
+#include <linux/parser.h>
+#include <linux/t10-pi.h>
+#include "nvmet.h"
+#include "../host/nvme.h"
+#include "../host/fabrics.h"
+
+#define NVME_LOOP_AQ_DEPTH 256
+
+#define NVME_LOOP_MAX_SEGMENTS 256
+
+/*
+ * We handle AEN commands ourselves and don't even let the
+ * block layer know about them.
+ */
+#define NVME_LOOP_NR_AEN_COMMANDS 1
+#define NVME_LOOP_AQ_BLKMQ_DEPTH \
+ (NVME_LOOP_AQ_DEPTH - NVME_LOOP_NR_AEN_COMMANDS)
+
+struct nvme_loop_iod {
+ struct nvme_command cmd;
+ struct nvme_completion rsp;
+ struct nvmet_req req;
+ struct nvme_loop_queue *queue;
+ struct work_struct work;
+ struct sg_table sg_table;
+ struct scatterlist first_sgl[];
+};
+
+struct nvme_loop_ctrl {
+ spinlock_t lock;
+ struct nvme_loop_queue *queues;
+ u32 queue_count;
+
+ struct blk_mq_tag_set admin_tag_set;
+
+ struct list_head list;
+ u64 cap;
+ struct blk_mq_tag_set tag_set;
+ struct nvme_loop_iod async_event_iod;
+ struct nvme_ctrl ctrl;
+
+ struct nvmet_ctrl *target_ctrl;
+ struct work_struct delete_work;
+ struct work_struct reset_work;
+};
+
+static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
+{
+ return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
+}
+
+struct nvme_loop_queue {
+ struct nvmet_cq nvme_cq;
+ struct nvmet_sq nvme_sq;
+ struct nvme_loop_ctrl *ctrl;
+};
+
+static struct nvmet_port *nvmet_loop_port;
+
+static LIST_HEAD(nvme_loop_ctrl_list);
+static DEFINE_MUTEX(nvme_loop_ctrl_mutex);
+
+static void nvme_loop_queue_response(struct nvmet_req *nvme_req);
+static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);
+
+static struct nvmet_fabrics_ops nvme_loop_ops;
+
+static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
+{
+ return queue - queue->ctrl->queues;
+}
+
+static void nvme_loop_complete_rq(struct request *req)
+{
+ struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
+ int error = 0;
+
+ nvme_cleanup_cmd(req);
+ sg_free_table_chained(&iod->sg_table, true);
+
+ if (unlikely(req->errors)) {
+ if (nvme_req_needs_retry(req, req->errors)) {
+ nvme_requeue_req(req);
+ return;
+ }
+
+ if (req->cmd_type == REQ_TYPE_DRV_PRIV)
+ error = req->errors;
+ else
+ error = nvme_error_status(req->errors);
+ }
+
+ blk_mq_end_request(req, error);
+}
+
+static void nvme_loop_queue_response(struct nvmet_req *nvme_req)
+{
+ struct nvme_loop_iod *iod =
+ container_of(nvme_req, struct nvme_loop_iod, req);
+ struct nvme_completion *cqe = &iod->rsp;
+
+ /*
+ * AEN requests are special as they don't time out and can
+ * survive any kind of queue freeze and often don't respond to
+ * aborts. We don't even bother to allocate a struct request
+ * for them but rather special case them here.
+ */
+ if (unlikely(nvme_loop_queue_idx(iod->queue) == 0 &&
+ cqe->command_id >= NVME_LOOP_AQ_BLKMQ_DEPTH)) {
+ nvme_complete_async_event(&iod->queue->ctrl->ctrl, cqe);
+ } else {
+ struct request *req = blk_mq_rq_from_pdu(iod);
+
+ if (req->cmd_type == REQ_TYPE_DRV_PRIV && req->special)
+ memcpy(req->special, cqe, sizeof(*cqe));
+ blk_mq_complete_request(req, le16_to_cpu(cqe->status) >> 1);
+ }
+}
+
+static void nvme_loop_execute_work(struct work_struct *work)
+{
+ struct nvme_loop_iod *iod =
+ container_of(work, struct nvme_loop_iod, work);
+
+ iod->req.execute(&iod->req);
+}
+
+static enum blk_eh_timer_return
+nvme_loop_timeout(struct request *rq, bool reserved)
+{
+ struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(rq);
+
+ /* queue error recovery */
+ schedule_work(&iod->queue->ctrl->reset_work);
+
+ /* fail with DNR on admin cmd timeout */
+ rq->errors = NVME_SC_ABORT_REQ | NVME_SC_DNR;
+
+ return BLK_EH_HANDLED;
+}
+
+static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct nvme_ns *ns = hctx->queue->queuedata;
+ struct nvme_loop_queue *queue = hctx->driver_data;
+ struct request *req = bd->rq;
+ struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
+ int ret;
+
+ ret = nvme_setup_cmd(ns, req, &iod->cmd);
+ if (ret)
+ return ret;
+
+ iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
+ iod->req.port = nvmet_loop_port;
+ if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
+ &queue->nvme_sq, &nvme_loop_ops)) {
+ nvme_cleanup_cmd(req);
+ blk_mq_start_request(req);
+ nvme_loop_queue_response(&iod->req);
+ return 0;
+ }
+
+ if (blk_rq_bytes(req)) {
+ iod->sg_table.sgl = iod->first_sgl;
+ ret = sg_alloc_table_chained(&iod->sg_table,
+ req->nr_phys_segments, iod->sg_table.sgl);
+ if (ret)
+ return BLK_MQ_RQ_QUEUE_BUSY;
+
+ iod->req.sg = iod->sg_table.sgl;
+ iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
+ BUG_ON(iod->req.sg_cnt > req->nr_phys_segments);
+ }
+
+ iod->cmd.common.command_id = req->tag;
+ blk_mq_start_request(req);
+
+ schedule_work(&iod->work);
+ return 0;
+}
+
+static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
+{
+ struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
+ struct nvme_loop_queue *queue = &ctrl->queues[0];
+ struct nvme_loop_iod *iod = &ctrl->async_event_iod;
+
+ memset(&iod->cmd, 0, sizeof(iod->cmd));
+ iod->cmd.common.opcode = nvme_admin_async_event;
+ iod->cmd.common.command_id = NVME_LOOP_AQ_BLKMQ_DEPTH;
+ iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
+
+ if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
+ &nvme_loop_ops)) {
+ dev_err(ctrl->ctrl.device, "failed async event work\n");
+ return;
+ }
+
+ schedule_work(&iod->work);
+}
+
+static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
+ struct nvme_loop_iod *iod, unsigned int queue_idx)
+{
+ BUG_ON(queue_idx >= ctrl->queue_count);
+
+ iod->req.cmd = &iod->cmd;
+ iod->req.rsp = &iod->rsp;
+ iod->queue = &ctrl->queues[queue_idx];
+ INIT_WORK(&iod->work, nvme_loop_execute_work);
+ return 0;
+}
+
+static int nvme_loop_init_request(void *data, struct request *req,
+ unsigned int hctx_idx, unsigned int rq_idx,
+ unsigned int numa_node)
+{
+ return nvme_loop_init_iod(data, blk_mq_rq_to_pdu(req), hctx_idx + 1);
+}
+
+static int nvme_loop_init_admin_request(void *data, struct request *req,
+ unsigned int hctx_idx, unsigned int rq_idx,
+ unsigned int numa_node)
+{
+ return nvme_loop_init_iod(data, blk_mq_rq_to_pdu(req), 0);
+}
+
+static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned int hctx_idx)
+{
+ struct nvme_loop_ctrl *ctrl = data;
+ struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
+
+ BUG_ON(hctx_idx >= ctrl->queue_count);
+
+ hctx->driver_data = queue;
+ return 0;
+}
+
+static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned int hctx_idx)
+{
+ struct nvme_loop_ctrl *ctrl = data;
+ struct nvme_loop_queue *queue = &ctrl->queues[0];
+
+ BUG_ON(hctx_idx != 0);
+
+ hctx->driver_data = queue;
+ return 0;
+}
+
+static struct blk_mq_ops nvme_loop_mq_ops = {
+ .queue_rq = nvme_loop_queue_rq,
+ .complete = nvme_loop_complete_rq,
+ .map_queue = blk_mq_map_queue,
+ .init_request = nvme_loop_init_request,
+ .init_hctx = nvme_loop_init_hctx,
+ .timeout = nvme_loop_timeout,
+};
+
+static struct blk_mq_ops nvme_loop_admin_mq_ops = {
+ .queue_rq = nvme_loop_queue_rq,
+ .complete = nvme_loop_complete_rq,
+ .map_queue = blk_mq_map_queue,
+ .init_request = nvme_loop_init_admin_request,
+ .init_hctx = nvme_loop_init_admin_hctx,
+ .timeout = nvme_loop_timeout,
+};
+
+static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
+{
+ blk_cleanup_queue(ctrl->ctrl.admin_q);
+ blk_mq_free_tag_set(&ctrl->admin_tag_set);
+ nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
+}
+
+static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
+{
+ struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
+
+ if (list_empty(&ctrl->list))
+ goto free_ctrl;
+
+ mutex_lock(&nvme_loop_ctrl_mutex);
+ list_del(&ctrl->list);
+ mutex_unlock(&nvme_loop_ctrl_mutex);
+
+ if (nctrl->tagset) {
+ blk_cleanup_queue(ctrl->ctrl.connect_q);
+ blk_mq_free_tag_set(&ctrl->tag_set);
+ }
+ kfree(ctrl->queues);
+ nvmf_free_options(nctrl->opts);
+free_ctrl:
+ kfree(ctrl);
+}
+
+static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
+{
+ int error;
+
+ memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
+ ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
+ ctrl->admin_tag_set.queue_depth = NVME_LOOP_AQ_BLKMQ_DEPTH;
+ ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
+ ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
+ ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
+ SG_CHUNK_SIZE * sizeof(struct scatterlist);
+ ctrl->admin_tag_set.driver_data = ctrl;
+ ctrl->admin_tag_set.nr_hw_queues = 1;
+ ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
+
+ ctrl->queues[0].ctrl = ctrl;
+ error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
+ if (error)
+ return error;
+ ctrl->queue_count = 1;
+
+ error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
+ if (error)
+ goto out_free_sq;
+
+ ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
+ if (IS_ERR(ctrl->ctrl.admin_q)) {
+ error = PTR_ERR(ctrl->ctrl.admin_q);
+ goto out_free_tagset;
+ }
+
+ error = nvmf_connect_admin_queue(&ctrl->ctrl);
+ if (error)
+ goto out_cleanup_queue;
+
+ error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap);
+ if (error) {
+ dev_err(ctrl->ctrl.device,
+ "prop_get NVME_REG_CAP failed\n");
+ goto out_cleanup_queue;
+ }
+
+ ctrl->ctrl.sqsize =
+ min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
+
+ error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
+ if (error)
+ goto out_cleanup_queue;
+
+ ctrl->ctrl.max_hw_sectors =
+ (NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
+
+ error = nvme_init_identify(&ctrl->ctrl);
+ if (error)
+ goto out_cleanup_queue;
+
+ nvme_start_keep_alive(&ctrl->ctrl);
+
+ return 0;
+
+out_cleanup_queue:
+ blk_cleanup_queue(ctrl->ctrl.admin_q);
+out_free_tagset:
+ blk_mq_free_tag_set(&ctrl->admin_tag_set);
+out_free_sq:
+ nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
+ return error;
+}
+
+static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
+{
+ int i;
+
+ nvme_stop_keep_alive(&ctrl->ctrl);
+
+ if (ctrl->queue_count > 1) {
+ nvme_stop_queues(&ctrl->ctrl);
+ blk_mq_tagset_busy_iter(&ctrl->tag_set,
+ nvme_cancel_request, &ctrl->ctrl);
+
+ for (i = 1; i < ctrl->queue_count; i++)
+ nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
+ }
+
+ if (ctrl->ctrl.state == NVME_CTRL_LIVE)
+ nvme_shutdown_ctrl(&ctrl->ctrl);
+
+ blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
+ blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
+ nvme_cancel_request, &ctrl->ctrl);
+ nvme_loop_destroy_admin_queue(ctrl);
+}
+
+static void nvme_loop_del_ctrl_work(struct work_struct *work)
+{
+ struct nvme_loop_ctrl *ctrl = container_of(work,
+ struct nvme_loop_ctrl, delete_work);
+
+ nvme_uninit_ctrl(&ctrl->ctrl);
+ nvme_loop_shutdown_ctrl(ctrl);
+ nvme_put_ctrl(&ctrl->ctrl);
+}
+
+static int __nvme_loop_del_ctrl(struct nvme_loop_ctrl *ctrl)
+{
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
+ return -EBUSY;
+
+ if (!schedule_work(&ctrl->delete_work))
+ return -EBUSY;
+
+ return 0;
+}
+
+static int nvme_loop_del_ctrl(struct nvme_ctrl *nctrl)
+{
+ struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
+ int ret;
+
+ ret = __nvme_loop_del_ctrl(ctrl);
+ if (ret)
+ return ret;
+
+ flush_work(&ctrl->delete_work);
+
+ return 0;
+}
+
+static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
+{
+ struct nvme_loop_ctrl *ctrl;
+
+ mutex_lock(&nvme_loop_ctrl_mutex);
+ list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
+ if (ctrl->ctrl.cntlid == nctrl->cntlid)
+ __nvme_loop_del_ctrl(ctrl);
+ }
+ mutex_unlock(&nvme_loop_ctrl_mutex);
+}
+
+static void nvme_loop_reset_ctrl_work(struct work_struct *work)
+{
+ struct nvme_loop_ctrl *ctrl = container_of(work,
+ struct nvme_loop_ctrl, reset_work);
+ bool changed;
+ int i, ret;
+
+ nvme_loop_shutdown_ctrl(ctrl);
+
+ ret = nvme_loop_configure_admin_queue(ctrl);
+ if (ret)
+ goto out_disable;
+
+ for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) {
+ ctrl->queues[i].ctrl = ctrl;
+ ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
+ if (ret)
+ goto out_free_queues;
+
+ ctrl->queue_count++;
+ }
+
+ for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) {
+ ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
+ if (ret)
+ goto out_free_queues;
+ }
+
+ changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
+ WARN_ON_ONCE(!changed);
+
+ nvme_queue_scan(&ctrl->ctrl);
+ nvme_queue_async_events(&ctrl->ctrl);
+
+ nvme_start_queues(&ctrl->ctrl);
+
+ return;
+
+out_free_queues:
+ for (i = 1; i < ctrl->queue_count; i++)
+ nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
+ nvme_loop_destroy_admin_queue(ctrl);
+out_disable:
+ dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
+ nvme_uninit_ctrl(&ctrl->ctrl);
+ nvme_put_ctrl(&ctrl->ctrl);
+}
+
+static int nvme_loop_reset_ctrl(struct nvme_ctrl *nctrl)
+{
+ struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
+
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
+ return -EBUSY;
+
+ if (!schedule_work(&ctrl->reset_work))
+ return -EBUSY;
+
+ flush_work(&ctrl->reset_work);
+
+ return 0;
+}
+
+static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
+ .name = "loop",
+ .module = THIS_MODULE,
+ .is_fabrics = true,
+ .reg_read32 = nvmf_reg_read32,
+ .reg_read64 = nvmf_reg_read64,
+ .reg_write32 = nvmf_reg_write32,
+ .reset_ctrl = nvme_loop_reset_ctrl,
+ .free_ctrl = nvme_loop_free_ctrl,
+ .submit_async_event = nvme_loop_submit_async_event,
+ .delete_ctrl = nvme_loop_del_ctrl,
+ .get_subsysnqn = nvmf_get_subsysnqn,
+};
+
+static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
+{
+ struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
+ int ret, i;
+
+ ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
+ if (ret || !opts->nr_io_queues)
+ return ret;
+
+ dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n",
+ opts->nr_io_queues);
+
+ for (i = 1; i <= opts->nr_io_queues; i++) {
+ ctrl->queues[i].ctrl = ctrl;
+ ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
+ if (ret)
+ goto out_destroy_queues;
+
+ ctrl->queue_count++;
+ }
+
+ memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
+ ctrl->tag_set.ops = &nvme_loop_mq_ops;
+ ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
+ ctrl->tag_set.reserved_tags = 1; /* fabric connect */
+ ctrl->tag_set.numa_node = NUMA_NO_NODE;
+ ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+ ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
+ SG_CHUNK_SIZE * sizeof(struct scatterlist);
+ ctrl->tag_set.driver_data = ctrl;
+ ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1;
+ ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
+ ctrl->ctrl.tagset = &ctrl->tag_set;
+
+ ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
+ if (ret)
+ goto out_destroy_queues;
+
+ ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
+ if (IS_ERR(ctrl->ctrl.connect_q)) {
+ ret = PTR_ERR(ctrl->ctrl.connect_q);
+ goto out_free_tagset;
+ }
+
+ for (i = 1; i <= opts->nr_io_queues; i++) {
+ ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
+ if (ret)
+ goto out_cleanup_connect_q;
+ }
+
+ return 0;
+
+out_cleanup_connect_q:
+ blk_cleanup_queue(ctrl->ctrl.connect_q);
+out_free_tagset:
+ blk_mq_free_tag_set(&ctrl->tag_set);
+out_destroy_queues:
+ for (i = 1; i < ctrl->queue_count; i++)
+ nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
+ return ret;
+}
+
+static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
+ struct nvmf_ctrl_options *opts)
+{
+ struct nvme_loop_ctrl *ctrl;
+ bool changed;
+ int ret;
+
+ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return ERR_PTR(-ENOMEM);
+ ctrl->ctrl.opts = opts;
+ INIT_LIST_HEAD(&ctrl->list);
+
+ INIT_WORK(&ctrl->delete_work, nvme_loop_del_ctrl_work);
+ INIT_WORK(&ctrl->reset_work, nvme_loop_reset_ctrl_work);
+
+ ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
+ 0 /* no quirks, we're perfect! */);
+ if (ret)
+ goto out_put_ctrl;
+
+ spin_lock_init(&ctrl->lock);
+
+ ret = -ENOMEM;
+
+ ctrl->ctrl.sqsize = opts->queue_size - 1;
+ ctrl->ctrl.kato = opts->kato;
+
+ ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
+ GFP_KERNEL);
+ if (!ctrl->queues)
+ goto out_uninit_ctrl;
+
+ ret = nvme_loop_configure_admin_queue(ctrl);
+ if (ret)
+ goto out_free_queues;
+
+ if (opts->queue_size > ctrl->ctrl.maxcmd) {
+ /* warn if maxcmd is lower than queue_size */
+ dev_warn(ctrl->ctrl.device,
+ "queue_size %zu > ctrl maxcmd %u, clamping down\n",
+ opts->queue_size, ctrl->ctrl.maxcmd);
+ opts->queue_size = ctrl->ctrl.maxcmd;
+ }
+
+ if (opts->nr_io_queues) {
+ ret = nvme_loop_create_io_queues(ctrl);
+ if (ret)
+ goto out_remove_admin_queue;
+ }
+
+ nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0);
+
+ dev_info(ctrl->ctrl.device,
+ "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
+
+ kref_get(&ctrl->ctrl.kref);
+
+ changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
+ WARN_ON_ONCE(!changed);
+
+ mutex_lock(&nvme_loop_ctrl_mutex);
+ list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
+ mutex_unlock(&nvme_loop_ctrl_mutex);
+
+ if (opts->nr_io_queues) {
+ nvme_queue_scan(&ctrl->ctrl);
+ nvme_queue_async_events(&ctrl->ctrl);
+ }
+
+ return &ctrl->ctrl;
+
+out_remove_admin_queue:
+ nvme_loop_destroy_admin_queue(ctrl);
+out_free_queues:
+ kfree(ctrl->queues);
+out_uninit_ctrl:
+ nvme_uninit_ctrl(&ctrl->ctrl);
+out_put_ctrl:
+ nvme_put_ctrl(&ctrl->ctrl);
+ if (ret > 0)
+ ret = -EIO;
+ return ERR_PTR(ret);
+}
+
+static int nvme_loop_add_port(struct nvmet_port *port)
+{
+ /*
+ * XXX: disalow adding more than one port so
+ * there is no connection rejections when a
+ * a subsystem is assigned to a port for which
+ * loop doesn't have a pointer.
+ * This scenario would be possible if we allowed
+ * more than one port to be added and a subsystem
+ * was assigned to a port other than nvmet_loop_port.
+ */
+
+ if (nvmet_loop_port)
+ return -EPERM;
+
+ nvmet_loop_port = port;
+ return 0;
+}
+
+static void nvme_loop_remove_port(struct nvmet_port *port)
+{
+ if (port == nvmet_loop_port)
+ nvmet_loop_port = NULL;
+}
+
+static struct nvmet_fabrics_ops nvme_loop_ops = {
+ .owner = THIS_MODULE,
+ .type = NVMF_TRTYPE_LOOP,
+ .add_port = nvme_loop_add_port,
+ .remove_port = nvme_loop_remove_port,
+ .queue_response = nvme_loop_queue_response,
+ .delete_ctrl = nvme_loop_delete_ctrl,
+};
+
+static struct nvmf_transport_ops nvme_loop_transport = {
+ .name = "loop",
+ .create_ctrl = nvme_loop_create_ctrl,
+};
+
+static int __init nvme_loop_init_module(void)
+{
+ int ret;
+
+ ret = nvmet_register_transport(&nvme_loop_ops);
+ if (ret)
+ return ret;
+ nvmf_register_transport(&nvme_loop_transport);
+ return 0;
+}
+
+static void __exit nvme_loop_cleanup_module(void)
+{
+ struct nvme_loop_ctrl *ctrl, *next;
+
+ nvmf_unregister_transport(&nvme_loop_transport);
+ nvmet_unregister_transport(&nvme_loop_ops);
+
+ mutex_lock(&nvme_loop_ctrl_mutex);
+ list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
+ __nvme_loop_del_ctrl(ctrl);
+ mutex_unlock(&nvme_loop_ctrl_mutex);
+
+ flush_scheduled_work();
+}
+
+module_init(nvme_loop_init_module);
+module_exit(nvme_loop_cleanup_module);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
new file mode 100644
index 000000000..76b6eedcc
--- /dev/null
+++ b/drivers/nvme/target/nvmet.h
@@ -0,0 +1,332 @@
+/*
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _NVMET_H
+#define _NVMET_H
+
+#include <linux/dma-mapping.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/kref.h>
+#include <linux/percpu-refcount.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/nvme.h>
+#include <linux/configfs.h>
+#include <linux/rcupdate.h>
+#include <linux/blkdev.h>
+
+#define NVMET_ASYNC_EVENTS 4
+#define NVMET_ERROR_LOG_SLOTS 128
+
+/* Helper Macros when NVMe error is NVME_SC_CONNECT_INVALID_PARAM
+ * The 16 bit shift is to set IATTR bit to 1, which means offending
+ * offset starts in the data section of connect()
+ */
+#define IPO_IATTR_CONNECT_DATA(x) \
+ (cpu_to_le32((1 << 16) | (offsetof(struct nvmf_connect_data, x))))
+#define IPO_IATTR_CONNECT_SQE(x) \
+ (cpu_to_le32(offsetof(struct nvmf_connect_command, x)))
+
+struct nvmet_ns {
+ struct list_head dev_link;
+ struct percpu_ref ref;
+ struct block_device *bdev;
+ u32 nsid;
+ u32 blksize_shift;
+ loff_t size;
+ u8 nguid[16];
+
+ struct nvmet_subsys *subsys;
+ const char *device_path;
+
+ struct config_group device_group;
+ struct config_group group;
+
+ struct completion disable_done;
+};
+
+static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct nvmet_ns, group);
+}
+
+static inline bool nvmet_ns_enabled(struct nvmet_ns *ns)
+{
+ return !list_empty_careful(&ns->dev_link);
+}
+
+struct nvmet_cq {
+ u16 qid;
+ u16 size;
+};
+
+struct nvmet_sq {
+ struct nvmet_ctrl *ctrl;
+ struct percpu_ref ref;
+ u16 qid;
+ u16 size;
+ struct completion free_done;
+};
+
+/**
+ * struct nvmet_port - Common structure to keep port
+ * information for the target.
+ * @entry: List head for holding a list of these elements.
+ * @disc_addr: Address information is stored in a format defined
+ * for a discovery log page entry.
+ * @group: ConfigFS group for this element's folder.
+ * @priv: Private data for the transport.
+ */
+struct nvmet_port {
+ struct list_head entry;
+ struct nvmf_disc_rsp_page_entry disc_addr;
+ struct config_group group;
+ struct config_group subsys_group;
+ struct list_head subsystems;
+ struct config_group referrals_group;
+ struct list_head referrals;
+ void *priv;
+ bool enabled;
+};
+
+static inline struct nvmet_port *to_nvmet_port(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct nvmet_port,
+ group);
+}
+
+struct nvmet_ctrl {
+ struct nvmet_subsys *subsys;
+ struct nvmet_cq **cqs;
+ struct nvmet_sq **sqs;
+
+ struct mutex lock;
+ u64 cap;
+ u64 serial;
+ u32 cc;
+ u32 csts;
+
+ u16 cntlid;
+ u32 kato;
+
+ struct nvmet_req *async_event_cmds[NVMET_ASYNC_EVENTS];
+ unsigned int nr_async_event_cmds;
+ struct list_head async_events;
+ struct work_struct async_event_work;
+
+ struct list_head subsys_entry;
+ struct kref ref;
+ struct delayed_work ka_work;
+ struct work_struct fatal_err_work;
+
+ struct nvmet_fabrics_ops *ops;
+
+ char subsysnqn[NVMF_NQN_FIELD_LEN];
+ char hostnqn[NVMF_NQN_FIELD_LEN];
+};
+
+struct nvmet_subsys {
+ enum nvme_subsys_type type;
+
+ struct mutex lock;
+ struct kref ref;
+
+ struct list_head namespaces;
+ unsigned int max_nsid;
+
+ struct list_head ctrls;
+ struct ida cntlid_ida;
+
+ struct list_head hosts;
+ bool allow_any_host;
+
+ u16 max_qid;
+
+ u64 ver;
+ char *subsysnqn;
+
+ struct config_group group;
+
+ struct config_group namespaces_group;
+ struct config_group allowed_hosts_group;
+};
+
+static inline struct nvmet_subsys *to_subsys(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct nvmet_subsys, group);
+}
+
+static inline struct nvmet_subsys *namespaces_to_subsys(
+ struct config_item *item)
+{
+ return container_of(to_config_group(item), struct nvmet_subsys,
+ namespaces_group);
+}
+
+struct nvmet_host {
+ struct config_group group;
+};
+
+static inline struct nvmet_host *to_host(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct nvmet_host, group);
+}
+
+static inline char *nvmet_host_name(struct nvmet_host *host)
+{
+ return config_item_name(&host->group.cg_item);
+}
+
+struct nvmet_host_link {
+ struct list_head entry;
+ struct nvmet_host *host;
+};
+
+struct nvmet_subsys_link {
+ struct list_head entry;
+ struct nvmet_subsys *subsys;
+};
+
+struct nvmet_req;
+struct nvmet_fabrics_ops {
+ struct module *owner;
+ unsigned int type;
+ unsigned int sqe_inline_size;
+ unsigned int msdbd;
+ bool has_keyed_sgls : 1;
+ void (*queue_response)(struct nvmet_req *req);
+ int (*add_port)(struct nvmet_port *port);
+ void (*remove_port)(struct nvmet_port *port);
+ void (*delete_ctrl)(struct nvmet_ctrl *ctrl);
+};
+
+#define NVMET_MAX_INLINE_BIOVEC 8
+
+struct nvmet_req {
+ struct nvme_command *cmd;
+ struct nvme_completion *rsp;
+ struct nvmet_sq *sq;
+ struct nvmet_cq *cq;
+ struct nvmet_ns *ns;
+ struct scatterlist *sg;
+ struct bio inline_bio;
+ struct bio_vec inline_bvec[NVMET_MAX_INLINE_BIOVEC];
+ int sg_cnt;
+ size_t data_len;
+
+ struct nvmet_port *port;
+
+ void (*execute)(struct nvmet_req *req);
+ struct nvmet_fabrics_ops *ops;
+};
+
+static inline void nvmet_set_status(struct nvmet_req *req, u16 status)
+{
+ req->rsp->status = cpu_to_le16(status << 1);
+}
+
+static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
+{
+ req->rsp->result = cpu_to_le32(result);
+}
+
+/*
+ * NVMe command writes actually are DMA reads for us on the target side.
+ */
+static inline enum dma_data_direction
+nvmet_data_dir(struct nvmet_req *req)
+{
+ return nvme_is_write(req->cmd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+}
+
+struct nvmet_async_event {
+ struct list_head entry;
+ u8 event_type;
+ u8 event_info;
+ u8 log_page;
+};
+
+int nvmet_parse_connect_cmd(struct nvmet_req *req);
+int nvmet_parse_io_cmd(struct nvmet_req *req);
+int nvmet_parse_admin_cmd(struct nvmet_req *req);
+int nvmet_parse_discovery_cmd(struct nvmet_req *req);
+int nvmet_parse_fabrics_cmd(struct nvmet_req *req);
+
+bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
+ struct nvmet_sq *sq, struct nvmet_fabrics_ops *ops);
+void nvmet_req_complete(struct nvmet_req *req, u16 status);
+
+void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
+ u16 size);
+void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid,
+ u16 size);
+void nvmet_sq_destroy(struct nvmet_sq *sq);
+int nvmet_sq_init(struct nvmet_sq *sq);
+
+void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
+
+void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
+u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
+ struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp);
+u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
+ struct nvmet_req *req, struct nvmet_ctrl **ret);
+void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
+
+struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
+ enum nvme_subsys_type type);
+void nvmet_subsys_put(struct nvmet_subsys *subsys);
+
+struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid);
+void nvmet_put_namespace(struct nvmet_ns *ns);
+int nvmet_ns_enable(struct nvmet_ns *ns);
+void nvmet_ns_disable(struct nvmet_ns *ns);
+struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid);
+void nvmet_ns_free(struct nvmet_ns *ns);
+
+int nvmet_register_transport(struct nvmet_fabrics_ops *ops);
+void nvmet_unregister_transport(struct nvmet_fabrics_ops *ops);
+
+int nvmet_enable_port(struct nvmet_port *port);
+void nvmet_disable_port(struct nvmet_port *port);
+
+void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port);
+void nvmet_referral_disable(struct nvmet_port *port);
+
+u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
+ size_t len);
+u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
+ size_t len);
+
+u32 nvmet_get_log_page_len(struct nvme_command *cmd);
+
+#define NVMET_QUEUE_SIZE 1024
+#define NVMET_NR_QUEUES 64
+#define NVMET_MAX_CMD NVMET_QUEUE_SIZE
+#define NVMET_KAS 10
+#define NVMET_DISC_KATO 120
+
+int __init nvmet_init_configfs(void);
+void __exit nvmet_exit_configfs(void);
+
+int __init nvmet_init_discovery(void);
+void nvmet_exit_discovery(void);
+
+extern struct nvmet_subsys *nvmet_disc_subsys;
+extern u64 nvmet_genctr;
+extern struct rw_semaphore nvmet_config_sem;
+
+bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
+ const char *hostnqn);
+
+#endif /* _NVMET_H */
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
new file mode 100644
index 000000000..1cbe6e053
--- /dev/null
+++ b/drivers/nvme/target/rdma.c
@@ -0,0 +1,1497 @@
+/*
+ * NVMe over Fabrics RDMA target.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/atomic.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/nvme.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/wait.h>
+#include <linux/inet.h>
+#include <asm/unaligned.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/rdma_cm.h>
+#include <rdma/rw.h>
+
+#include <linux/nvme-rdma.h>
+#include "nvmet.h"
+
+/*
+ * We allow up to a page of inline data to go with the SQE
+ */
+#define NVMET_RDMA_INLINE_DATA_SIZE PAGE_SIZE
+
+struct nvmet_rdma_cmd {
+ struct ib_sge sge[2];
+ struct ib_cqe cqe;
+ struct ib_recv_wr wr;
+ struct scatterlist inline_sg;
+ struct page *inline_page;
+ struct nvme_command *nvme_cmd;
+ struct nvmet_rdma_queue *queue;
+};
+
+enum {
+ NVMET_RDMA_REQ_INLINE_DATA = (1 << 0),
+ NVMET_RDMA_REQ_INVALIDATE_RKEY = (1 << 1),
+};
+
+struct nvmet_rdma_rsp {
+ struct ib_sge send_sge;
+ struct ib_cqe send_cqe;
+ struct ib_send_wr send_wr;
+
+ struct nvmet_rdma_cmd *cmd;
+ struct nvmet_rdma_queue *queue;
+
+ struct ib_cqe read_cqe;
+ struct rdma_rw_ctx rw;
+
+ struct nvmet_req req;
+
+ u8 n_rdma;
+ u32 flags;
+ u32 invalidate_rkey;
+
+ struct list_head wait_list;
+ struct list_head free_list;
+};
+
+enum nvmet_rdma_queue_state {
+ NVMET_RDMA_Q_CONNECTING,
+ NVMET_RDMA_Q_LIVE,
+ NVMET_RDMA_Q_DISCONNECTING,
+ NVMET_RDMA_IN_DEVICE_REMOVAL,
+};
+
+struct nvmet_rdma_queue {
+ struct rdma_cm_id *cm_id;
+ struct nvmet_port *port;
+ struct ib_cq *cq;
+ atomic_t sq_wr_avail;
+ struct nvmet_rdma_device *dev;
+ spinlock_t state_lock;
+ enum nvmet_rdma_queue_state state;
+ struct nvmet_cq nvme_cq;
+ struct nvmet_sq nvme_sq;
+
+ struct nvmet_rdma_rsp *rsps;
+ struct list_head free_rsps;
+ spinlock_t rsps_lock;
+ struct nvmet_rdma_cmd *cmds;
+
+ struct work_struct release_work;
+ struct list_head rsp_wait_list;
+ struct list_head rsp_wr_wait_list;
+ spinlock_t rsp_wr_wait_lock;
+
+ int idx;
+ int host_qid;
+ int recv_queue_size;
+ int send_queue_size;
+
+ struct list_head queue_list;
+};
+
+struct nvmet_rdma_device {
+ struct ib_device *device;
+ struct ib_pd *pd;
+ struct ib_srq *srq;
+ struct nvmet_rdma_cmd *srq_cmds;
+ size_t srq_size;
+ struct kref ref;
+ struct list_head entry;
+};
+
+static bool nvmet_rdma_use_srq;
+module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444);
+MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
+
+static DEFINE_IDA(nvmet_rdma_queue_ida);
+static LIST_HEAD(nvmet_rdma_queue_list);
+static DEFINE_MUTEX(nvmet_rdma_queue_mutex);
+
+static LIST_HEAD(device_list);
+static DEFINE_MUTEX(device_list_mutex);
+
+static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp);
+static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc);
+static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
+static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
+static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
+static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
+
+static struct nvmet_fabrics_ops nvmet_rdma_ops;
+
+/* XXX: really should move to a generic header sooner or later.. */
+static inline u32 get_unaligned_le24(const u8 *p)
+{
+ return (u32)p[0] | (u32)p[1] << 8 | (u32)p[2] << 16;
+}
+
+static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp)
+{
+ return nvme_is_write(rsp->req.cmd) &&
+ rsp->req.data_len &&
+ !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
+}
+
+static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp)
+{
+ return !nvme_is_write(rsp->req.cmd) &&
+ rsp->req.data_len &&
+ !rsp->req.rsp->status &&
+ !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
+}
+
+static inline struct nvmet_rdma_rsp *
+nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
+{
+ struct nvmet_rdma_rsp *rsp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->rsps_lock, flags);
+ rsp = list_first_entry(&queue->free_rsps,
+ struct nvmet_rdma_rsp, free_list);
+ list_del(&rsp->free_list);
+ spin_unlock_irqrestore(&queue->rsps_lock, flags);
+
+ return rsp;
+}
+
+static inline void
+nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rsp->queue->rsps_lock, flags);
+ list_add_tail(&rsp->free_list, &rsp->queue->free_rsps);
+ spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
+}
+
+static void nvmet_rdma_free_sgl(struct scatterlist *sgl, unsigned int nents)
+{
+ struct scatterlist *sg;
+ int count;
+
+ if (!sgl || !nents)
+ return;
+
+ for_each_sg(sgl, sg, nents, count)
+ __free_page(sg_page(sg));
+ kfree(sgl);
+}
+
+static int nvmet_rdma_alloc_sgl(struct scatterlist **sgl, unsigned int *nents,
+ u32 length)
+{
+ struct scatterlist *sg;
+ struct page *page;
+ unsigned int nent;
+ int i = 0;
+
+ nent = DIV_ROUND_UP(length, PAGE_SIZE);
+ sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
+ if (!sg)
+ goto out;
+
+ sg_init_table(sg, nent);
+
+ while (length) {
+ u32 page_len = min_t(u32, length, PAGE_SIZE);
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ goto out_free_pages;
+
+ sg_set_page(&sg[i], page, page_len, 0);
+ length -= page_len;
+ i++;
+ }
+ *sgl = sg;
+ *nents = nent;
+ return 0;
+
+out_free_pages:
+ while (i > 0) {
+ i--;
+ __free_page(sg_page(&sg[i]));
+ }
+ kfree(sg);
+out:
+ return NVME_SC_INTERNAL;
+}
+
+static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev,
+ struct nvmet_rdma_cmd *c, bool admin)
+{
+ /* NVMe command / RDMA RECV */
+ c->nvme_cmd = kmalloc(sizeof(*c->nvme_cmd), GFP_KERNEL);
+ if (!c->nvme_cmd)
+ goto out;
+
+ c->sge[0].addr = ib_dma_map_single(ndev->device, c->nvme_cmd,
+ sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
+ if (ib_dma_mapping_error(ndev->device, c->sge[0].addr))
+ goto out_free_cmd;
+
+ c->sge[0].length = sizeof(*c->nvme_cmd);
+ c->sge[0].lkey = ndev->pd->local_dma_lkey;
+
+ if (!admin) {
+ c->inline_page = alloc_pages(GFP_KERNEL,
+ get_order(NVMET_RDMA_INLINE_DATA_SIZE));
+ if (!c->inline_page)
+ goto out_unmap_cmd;
+ c->sge[1].addr = ib_dma_map_page(ndev->device,
+ c->inline_page, 0, NVMET_RDMA_INLINE_DATA_SIZE,
+ DMA_FROM_DEVICE);
+ if (ib_dma_mapping_error(ndev->device, c->sge[1].addr))
+ goto out_free_inline_page;
+ c->sge[1].length = NVMET_RDMA_INLINE_DATA_SIZE;
+ c->sge[1].lkey = ndev->pd->local_dma_lkey;
+ }
+
+ c->cqe.done = nvmet_rdma_recv_done;
+
+ c->wr.wr_cqe = &c->cqe;
+ c->wr.sg_list = c->sge;
+ c->wr.num_sge = admin ? 1 : 2;
+
+ return 0;
+
+out_free_inline_page:
+ if (!admin) {
+ __free_pages(c->inline_page,
+ get_order(NVMET_RDMA_INLINE_DATA_SIZE));
+ }
+out_unmap_cmd:
+ ib_dma_unmap_single(ndev->device, c->sge[0].addr,
+ sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
+out_free_cmd:
+ kfree(c->nvme_cmd);
+
+out:
+ return -ENOMEM;
+}
+
+static void nvmet_rdma_free_cmd(struct nvmet_rdma_device *ndev,
+ struct nvmet_rdma_cmd *c, bool admin)
+{
+ if (!admin) {
+ ib_dma_unmap_page(ndev->device, c->sge[1].addr,
+ NVMET_RDMA_INLINE_DATA_SIZE, DMA_FROM_DEVICE);
+ __free_pages(c->inline_page,
+ get_order(NVMET_RDMA_INLINE_DATA_SIZE));
+ }
+ ib_dma_unmap_single(ndev->device, c->sge[0].addr,
+ sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
+ kfree(c->nvme_cmd);
+}
+
+static struct nvmet_rdma_cmd *
+nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev,
+ int nr_cmds, bool admin)
+{
+ struct nvmet_rdma_cmd *cmds;
+ int ret = -EINVAL, i;
+
+ cmds = kcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL);
+ if (!cmds)
+ goto out;
+
+ for (i = 0; i < nr_cmds; i++) {
+ ret = nvmet_rdma_alloc_cmd(ndev, cmds + i, admin);
+ if (ret)
+ goto out_free;
+ }
+
+ return cmds;
+
+out_free:
+ while (--i >= 0)
+ nvmet_rdma_free_cmd(ndev, cmds + i, admin);
+ kfree(cmds);
+out:
+ return ERR_PTR(ret);
+}
+
+static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev,
+ struct nvmet_rdma_cmd *cmds, int nr_cmds, bool admin)
+{
+ int i;
+
+ for (i = 0; i < nr_cmds; i++)
+ nvmet_rdma_free_cmd(ndev, cmds + i, admin);
+ kfree(cmds);
+}
+
+static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
+ struct nvmet_rdma_rsp *r)
+{
+ /* NVMe CQE / RDMA SEND */
+ r->req.rsp = kmalloc(sizeof(*r->req.rsp), GFP_KERNEL);
+ if (!r->req.rsp)
+ goto out;
+
+ r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.rsp,
+ sizeof(*r->req.rsp), DMA_TO_DEVICE);
+ if (ib_dma_mapping_error(ndev->device, r->send_sge.addr))
+ goto out_free_rsp;
+
+ r->send_sge.length = sizeof(*r->req.rsp);
+ r->send_sge.lkey = ndev->pd->local_dma_lkey;
+
+ r->send_cqe.done = nvmet_rdma_send_done;
+
+ r->send_wr.wr_cqe = &r->send_cqe;
+ r->send_wr.sg_list = &r->send_sge;
+ r->send_wr.num_sge = 1;
+ r->send_wr.send_flags = IB_SEND_SIGNALED;
+
+ /* Data In / RDMA READ */
+ r->read_cqe.done = nvmet_rdma_read_data_done;
+ return 0;
+
+out_free_rsp:
+ kfree(r->req.rsp);
+out:
+ return -ENOMEM;
+}
+
+static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
+ struct nvmet_rdma_rsp *r)
+{
+ ib_dma_unmap_single(ndev->device, r->send_sge.addr,
+ sizeof(*r->req.rsp), DMA_TO_DEVICE);
+ kfree(r->req.rsp);
+}
+
+static int
+nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue)
+{
+ struct nvmet_rdma_device *ndev = queue->dev;
+ int nr_rsps = queue->recv_queue_size * 2;
+ int ret = -EINVAL, i;
+
+ queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp),
+ GFP_KERNEL);
+ if (!queue->rsps)
+ goto out;
+
+ for (i = 0; i < nr_rsps; i++) {
+ struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
+
+ ret = nvmet_rdma_alloc_rsp(ndev, rsp);
+ if (ret)
+ goto out_free;
+
+ list_add_tail(&rsp->free_list, &queue->free_rsps);
+ }
+
+ return 0;
+
+out_free:
+ while (--i >= 0) {
+ struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
+
+ list_del(&rsp->free_list);
+ nvmet_rdma_free_rsp(ndev, rsp);
+ }
+ kfree(queue->rsps);
+out:
+ return ret;
+}
+
+static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue)
+{
+ struct nvmet_rdma_device *ndev = queue->dev;
+ int i, nr_rsps = queue->recv_queue_size * 2;
+
+ for (i = 0; i < nr_rsps; i++) {
+ struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
+
+ list_del(&rsp->free_list);
+ nvmet_rdma_free_rsp(ndev, rsp);
+ }
+ kfree(queue->rsps);
+}
+
+static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
+ struct nvmet_rdma_cmd *cmd)
+{
+ struct ib_recv_wr *bad_wr;
+
+ if (ndev->srq)
+ return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr);
+ return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr);
+}
+
+static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue)
+{
+ spin_lock(&queue->rsp_wr_wait_lock);
+ while (!list_empty(&queue->rsp_wr_wait_list)) {
+ struct nvmet_rdma_rsp *rsp;
+ bool ret;
+
+ rsp = list_entry(queue->rsp_wr_wait_list.next,
+ struct nvmet_rdma_rsp, wait_list);
+ list_del(&rsp->wait_list);
+
+ spin_unlock(&queue->rsp_wr_wait_lock);
+ ret = nvmet_rdma_execute_command(rsp);
+ spin_lock(&queue->rsp_wr_wait_lock);
+
+ if (!ret) {
+ list_add(&rsp->wait_list, &queue->rsp_wr_wait_list);
+ break;
+ }
+ }
+ spin_unlock(&queue->rsp_wr_wait_lock);
+}
+
+
+static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
+{
+ struct nvmet_rdma_queue *queue = rsp->queue;
+
+ atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
+
+ if (rsp->n_rdma) {
+ rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
+ queue->cm_id->port_num, rsp->req.sg,
+ rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
+ }
+
+ if (rsp->req.sg != &rsp->cmd->inline_sg)
+ nvmet_rdma_free_sgl(rsp->req.sg, rsp->req.sg_cnt);
+
+ if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))
+ nvmet_rdma_process_wr_wait_list(queue);
+
+ nvmet_rdma_put_rsp(rsp);
+}
+
+static void nvmet_rdma_error_comp(struct nvmet_rdma_queue *queue)
+{
+ if (queue->nvme_sq.ctrl) {
+ nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
+ } else {
+ /*
+ * we didn't setup the controller yet in case
+ * of admin connect error, just disconnect and
+ * cleanup the queue
+ */
+ nvmet_rdma_queue_disconnect(queue);
+ }
+}
+
+static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct nvmet_rdma_rsp *rsp =
+ container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
+
+ nvmet_rdma_release_rsp(rsp);
+
+ if (unlikely(wc->status != IB_WC_SUCCESS &&
+ wc->status != IB_WC_WR_FLUSH_ERR)) {
+ pr_err("SEND for CQE 0x%p failed with status %s (%d).\n",
+ wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
+ nvmet_rdma_error_comp(rsp->queue);
+ }
+}
+
+static void nvmet_rdma_queue_response(struct nvmet_req *req)
+{
+ struct nvmet_rdma_rsp *rsp =
+ container_of(req, struct nvmet_rdma_rsp, req);
+ struct rdma_cm_id *cm_id = rsp->queue->cm_id;
+ struct ib_send_wr *first_wr, *bad_wr;
+
+ if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) {
+ rsp->send_wr.opcode = IB_WR_SEND_WITH_INV;
+ rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey;
+ } else {
+ rsp->send_wr.opcode = IB_WR_SEND;
+ }
+
+ if (nvmet_rdma_need_data_out(rsp))
+ first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
+ cm_id->port_num, NULL, &rsp->send_wr);
+ else
+ first_wr = &rsp->send_wr;
+
+ nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
+ if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) {
+ pr_err("sending cmd response failed\n");
+ nvmet_rdma_release_rsp(rsp);
+ }
+}
+
+static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct nvmet_rdma_rsp *rsp =
+ container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe);
+ struct nvmet_rdma_queue *queue = cq->cq_context;
+
+ WARN_ON(rsp->n_rdma <= 0);
+ atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
+ rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
+ queue->cm_id->port_num, rsp->req.sg,
+ rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
+ rsp->n_rdma = 0;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ nvmet_rdma_release_rsp(rsp);
+ if (wc->status != IB_WC_WR_FLUSH_ERR) {
+ pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n",
+ wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
+ nvmet_rdma_error_comp(queue);
+ }
+ return;
+ }
+
+ rsp->req.execute(&rsp->req);
+}
+
+static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len,
+ u64 off)
+{
+ sg_init_table(&rsp->cmd->inline_sg, 1);
+ sg_set_page(&rsp->cmd->inline_sg, rsp->cmd->inline_page, len, off);
+ rsp->req.sg = &rsp->cmd->inline_sg;
+ rsp->req.sg_cnt = 1;
+}
+
+static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
+{
+ struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl;
+ u64 off = le64_to_cpu(sgl->addr);
+ u32 len = le32_to_cpu(sgl->length);
+
+ if (!nvme_is_write(rsp->req.cmd))
+ return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+
+ if (off + len > NVMET_RDMA_INLINE_DATA_SIZE) {
+ pr_err("invalid inline data offset!\n");
+ return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
+ }
+
+ /* no data command? */
+ if (!len)
+ return 0;
+
+ nvmet_rdma_use_inline_sg(rsp, len, off);
+ rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA;
+ return 0;
+}
+
+static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
+ struct nvme_keyed_sgl_desc *sgl, bool invalidate)
+{
+ struct rdma_cm_id *cm_id = rsp->queue->cm_id;
+ u64 addr = le64_to_cpu(sgl->addr);
+ u32 len = get_unaligned_le24(sgl->length);
+ u32 key = get_unaligned_le32(sgl->key);
+ int ret;
+ u16 status;
+
+ /* no data command? */
+ if (!len)
+ return 0;
+
+ status = nvmet_rdma_alloc_sgl(&rsp->req.sg, &rsp->req.sg_cnt,
+ len);
+ if (status)
+ return status;
+
+ ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
+ rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,
+ nvmet_data_dir(&rsp->req));
+ if (ret < 0)
+ return NVME_SC_INTERNAL;
+ rsp->n_rdma += ret;
+
+ if (invalidate) {
+ rsp->invalidate_rkey = key;
+ rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY;
+ }
+
+ return 0;
+}
+
+static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp)
+{
+ struct nvme_keyed_sgl_desc *sgl = &rsp->req.cmd->common.dptr.ksgl;
+
+ switch (sgl->type >> 4) {
+ case NVME_SGL_FMT_DATA_DESC:
+ switch (sgl->type & 0xf) {
+ case NVME_SGL_FMT_OFFSET:
+ return nvmet_rdma_map_sgl_inline(rsp);
+ default:
+ pr_err("invalid SGL subtype: %#x\n", sgl->type);
+ return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ }
+ case NVME_KEY_SGL_FMT_DATA_DESC:
+ switch (sgl->type & 0xf) {
+ case NVME_SGL_FMT_ADDRESS | NVME_SGL_FMT_INVALIDATE:
+ return nvmet_rdma_map_sgl_keyed(rsp, sgl, true);
+ case NVME_SGL_FMT_ADDRESS:
+ return nvmet_rdma_map_sgl_keyed(rsp, sgl, false);
+ default:
+ pr_err("invalid SGL subtype: %#x\n", sgl->type);
+ return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ }
+ default:
+ pr_err("invalid SGL type: %#x\n", sgl->type);
+ return NVME_SC_SGL_INVALID_TYPE | NVME_SC_DNR;
+ }
+}
+
+static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp)
+{
+ struct nvmet_rdma_queue *queue = rsp->queue;
+
+ if (unlikely(atomic_sub_return(1 + rsp->n_rdma,
+ &queue->sq_wr_avail) < 0)) {
+ pr_debug("IB send queue full (needed %d): queue %u cntlid %u\n",
+ 1 + rsp->n_rdma, queue->idx,
+ queue->nvme_sq.ctrl->cntlid);
+ atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
+ return false;
+ }
+
+ if (nvmet_rdma_need_data_in(rsp)) {
+ if (rdma_rw_ctx_post(&rsp->rw, queue->cm_id->qp,
+ queue->cm_id->port_num, &rsp->read_cqe, NULL))
+ nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
+ } else {
+ rsp->req.execute(&rsp->req);
+ }
+
+ return true;
+}
+
+static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
+ struct nvmet_rdma_rsp *cmd)
+{
+ u16 status;
+
+ cmd->queue = queue;
+ cmd->n_rdma = 0;
+ cmd->req.port = queue->port;
+
+ if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
+ &queue->nvme_sq, &nvmet_rdma_ops))
+ return;
+
+ status = nvmet_rdma_map_sgl(cmd);
+ if (status)
+ goto out_err;
+
+ if (unlikely(!nvmet_rdma_execute_command(cmd))) {
+ spin_lock(&queue->rsp_wr_wait_lock);
+ list_add_tail(&cmd->wait_list, &queue->rsp_wr_wait_list);
+ spin_unlock(&queue->rsp_wr_wait_lock);
+ }
+
+ return;
+
+out_err:
+ nvmet_req_complete(&cmd->req, status);
+}
+
+static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct nvmet_rdma_cmd *cmd =
+ container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe);
+ struct nvmet_rdma_queue *queue = cq->cq_context;
+ struct nvmet_rdma_rsp *rsp;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ if (wc->status != IB_WC_WR_FLUSH_ERR) {
+ pr_err("RECV for CQE 0x%p failed with status %s (%d)\n",
+ wc->wr_cqe, ib_wc_status_msg(wc->status),
+ wc->status);
+ nvmet_rdma_error_comp(queue);
+ }
+ return;
+ }
+
+ if (unlikely(wc->byte_len < sizeof(struct nvme_command))) {
+ pr_err("Ctrl Fatal Error: capsule size less than 64 bytes\n");
+ nvmet_rdma_error_comp(queue);
+ return;
+ }
+
+ cmd->queue = queue;
+ rsp = nvmet_rdma_get_rsp(queue);
+ rsp->cmd = cmd;
+ rsp->flags = 0;
+ rsp->req.cmd = cmd->nvme_cmd;
+
+ if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->state_lock, flags);
+ if (queue->state == NVMET_RDMA_Q_CONNECTING)
+ list_add_tail(&rsp->wait_list, &queue->rsp_wait_list);
+ else
+ nvmet_rdma_put_rsp(rsp);
+ spin_unlock_irqrestore(&queue->state_lock, flags);
+ return;
+ }
+
+ nvmet_rdma_handle_command(queue, rsp);
+}
+
+static void nvmet_rdma_destroy_srq(struct nvmet_rdma_device *ndev)
+{
+ if (!ndev->srq)
+ return;
+
+ nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false);
+ ib_destroy_srq(ndev->srq);
+}
+
+static int nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev)
+{
+ struct ib_srq_init_attr srq_attr = { NULL, };
+ struct ib_srq *srq;
+ size_t srq_size;
+ int ret, i;
+
+ srq_size = 4095; /* XXX: tune */
+
+ srq_attr.attr.max_wr = srq_size;
+ srq_attr.attr.max_sge = 2;
+ srq_attr.attr.srq_limit = 0;
+ srq_attr.srq_type = IB_SRQT_BASIC;
+ srq = ib_create_srq(ndev->pd, &srq_attr);
+ if (IS_ERR(srq)) {
+ /*
+ * If SRQs aren't supported we just go ahead and use normal
+ * non-shared receive queues.
+ */
+ pr_info("SRQ requested but not supported.\n");
+ return 0;
+ }
+
+ ndev->srq_cmds = nvmet_rdma_alloc_cmds(ndev, srq_size, false);
+ if (IS_ERR(ndev->srq_cmds)) {
+ ret = PTR_ERR(ndev->srq_cmds);
+ goto out_destroy_srq;
+ }
+
+ ndev->srq = srq;
+ ndev->srq_size = srq_size;
+
+ for (i = 0; i < srq_size; i++)
+ nvmet_rdma_post_recv(ndev, &ndev->srq_cmds[i]);
+
+ return 0;
+
+out_destroy_srq:
+ ib_destroy_srq(srq);
+ return ret;
+}
+
+static void nvmet_rdma_free_dev(struct kref *ref)
+{
+ struct nvmet_rdma_device *ndev =
+ container_of(ref, struct nvmet_rdma_device, ref);
+
+ mutex_lock(&device_list_mutex);
+ list_del(&ndev->entry);
+ mutex_unlock(&device_list_mutex);
+
+ nvmet_rdma_destroy_srq(ndev);
+ ib_dealloc_pd(ndev->pd);
+
+ kfree(ndev);
+}
+
+static struct nvmet_rdma_device *
+nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
+{
+ struct nvmet_rdma_device *ndev;
+ int ret;
+
+ mutex_lock(&device_list_mutex);
+ list_for_each_entry(ndev, &device_list, entry) {
+ if (ndev->device->node_guid == cm_id->device->node_guid &&
+ kref_get_unless_zero(&ndev->ref))
+ goto out_unlock;
+ }
+
+ ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
+ if (!ndev)
+ goto out_err;
+
+ ndev->device = cm_id->device;
+ kref_init(&ndev->ref);
+
+ ndev->pd = ib_alloc_pd(ndev->device);
+ if (IS_ERR(ndev->pd))
+ goto out_free_dev;
+
+ if (nvmet_rdma_use_srq) {
+ ret = nvmet_rdma_init_srq(ndev);
+ if (ret)
+ goto out_free_pd;
+ }
+
+ list_add(&ndev->entry, &device_list);
+out_unlock:
+ mutex_unlock(&device_list_mutex);
+ pr_debug("added %s.\n", ndev->device->name);
+ return ndev;
+
+out_free_pd:
+ ib_dealloc_pd(ndev->pd);
+out_free_dev:
+ kfree(ndev);
+out_err:
+ mutex_unlock(&device_list_mutex);
+ return NULL;
+}
+
+static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
+{
+ struct ib_qp_init_attr qp_attr;
+ struct nvmet_rdma_device *ndev = queue->dev;
+ int comp_vector, nr_cqe, ret, i;
+
+ /*
+ * Spread the io queues across completion vectors,
+ * but still keep all admin queues on vector 0.
+ */
+ comp_vector = !queue->host_qid ? 0 :
+ queue->idx % ndev->device->num_comp_vectors;
+
+ /*
+ * Reserve CQ slots for RECV + RDMA_READ/RDMA_WRITE + RDMA_SEND.
+ */
+ nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size;
+
+ queue->cq = ib_alloc_cq(ndev->device, queue,
+ nr_cqe + 1, comp_vector,
+ IB_POLL_WORKQUEUE);
+ if (IS_ERR(queue->cq)) {
+ ret = PTR_ERR(queue->cq);
+ pr_err("failed to create CQ cqe= %d ret= %d\n",
+ nr_cqe + 1, ret);
+ goto out;
+ }
+
+ memset(&qp_attr, 0, sizeof(qp_attr));
+ qp_attr.qp_context = queue;
+ qp_attr.event_handler = nvmet_rdma_qp_event;
+ qp_attr.send_cq = queue->cq;
+ qp_attr.recv_cq = queue->cq;
+ qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
+ qp_attr.qp_type = IB_QPT_RC;
+ /* +1 for drain */
+ qp_attr.cap.max_send_wr = queue->send_queue_size + 1;
+ qp_attr.cap.max_rdma_ctxs = queue->send_queue_size;
+ qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd,
+ ndev->device->attrs.max_sge);
+
+ if (ndev->srq) {
+ qp_attr.srq = ndev->srq;
+ } else {
+ /* +1 for drain */
+ qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size;
+ qp_attr.cap.max_recv_sge = 2;
+ }
+
+ ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr);
+ if (ret) {
+ pr_err("failed to create_qp ret= %d\n", ret);
+ goto err_destroy_cq;
+ }
+
+ atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr);
+
+ pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
+ __func__, queue->cq->cqe, qp_attr.cap.max_send_sge,
+ qp_attr.cap.max_send_wr, queue->cm_id);
+
+ if (!ndev->srq) {
+ for (i = 0; i < queue->recv_queue_size; i++) {
+ queue->cmds[i].queue = queue;
+ nvmet_rdma_post_recv(ndev, &queue->cmds[i]);
+ }
+ }
+
+out:
+ return ret;
+
+err_destroy_cq:
+ ib_free_cq(queue->cq);
+ goto out;
+}
+
+static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
+{
+ rdma_destroy_qp(queue->cm_id);
+ ib_free_cq(queue->cq);
+}
+
+static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
+{
+ pr_info("freeing queue %d\n", queue->idx);
+
+ nvmet_sq_destroy(&queue->nvme_sq);
+
+ nvmet_rdma_destroy_queue_ib(queue);
+ if (!queue->dev->srq) {
+ nvmet_rdma_free_cmds(queue->dev, queue->cmds,
+ queue->recv_queue_size,
+ !queue->host_qid);
+ }
+ nvmet_rdma_free_rsps(queue);
+ ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
+ kfree(queue);
+}
+
+static void nvmet_rdma_release_queue_work(struct work_struct *w)
+{
+ struct nvmet_rdma_queue *queue =
+ container_of(w, struct nvmet_rdma_queue, release_work);
+ struct rdma_cm_id *cm_id = queue->cm_id;
+ struct nvmet_rdma_device *dev = queue->dev;
+ enum nvmet_rdma_queue_state state = queue->state;
+
+ nvmet_rdma_free_queue(queue);
+
+ if (state != NVMET_RDMA_IN_DEVICE_REMOVAL)
+ rdma_destroy_id(cm_id);
+
+ kref_put(&dev->ref, nvmet_rdma_free_dev);
+}
+
+static int
+nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn,
+ struct nvmet_rdma_queue *queue)
+{
+ struct nvme_rdma_cm_req *req;
+
+ req = (struct nvme_rdma_cm_req *)conn->private_data;
+ if (!req || conn->private_data_len == 0)
+ return NVME_RDMA_CM_INVALID_LEN;
+
+ if (le16_to_cpu(req->recfmt) != NVME_RDMA_CM_FMT_1_0)
+ return NVME_RDMA_CM_INVALID_RECFMT;
+
+ queue->host_qid = le16_to_cpu(req->qid);
+
+ /*
+ * req->hsqsize corresponds to our recv queue size plus 1
+ * req->hrqsize corresponds to our send queue size
+ */
+ queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1;
+ queue->send_queue_size = le16_to_cpu(req->hrqsize);
+
+ if (!queue->host_qid && queue->recv_queue_size > NVMF_AQ_DEPTH)
+ return NVME_RDMA_CM_INVALID_HSQSIZE;
+
+ /* XXX: Should we enforce some kind of max for IO queues? */
+
+ return 0;
+}
+
+static int nvmet_rdma_cm_reject(struct rdma_cm_id *cm_id,
+ enum nvme_rdma_cm_status status)
+{
+ struct nvme_rdma_cm_rej rej;
+
+ rej.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
+ rej.sts = cpu_to_le16(status);
+
+ return rdma_reject(cm_id, (void *)&rej, sizeof(rej));
+}
+
+static struct nvmet_rdma_queue *
+nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
+ struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *event)
+{
+ struct nvmet_rdma_queue *queue;
+ int ret;
+
+ queue = kzalloc(sizeof(*queue), GFP_KERNEL);
+ if (!queue) {
+ ret = NVME_RDMA_CM_NO_RSC;
+ goto out_reject;
+ }
+
+ ret = nvmet_sq_init(&queue->nvme_sq);
+ if (ret)
+ goto out_free_queue;
+
+ ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue);
+ if (ret)
+ goto out_destroy_sq;
+
+ /*
+ * Schedules the actual release because calling rdma_destroy_id from
+ * inside a CM callback would trigger a deadlock. (great API design..)
+ */
+ INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work);
+ queue->dev = ndev;
+ queue->cm_id = cm_id;
+
+ spin_lock_init(&queue->state_lock);
+ queue->state = NVMET_RDMA_Q_CONNECTING;
+ INIT_LIST_HEAD(&queue->rsp_wait_list);
+ INIT_LIST_HEAD(&queue->rsp_wr_wait_list);
+ spin_lock_init(&queue->rsp_wr_wait_lock);
+ INIT_LIST_HEAD(&queue->free_rsps);
+ spin_lock_init(&queue->rsps_lock);
+
+ queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL);
+ if (queue->idx < 0) {
+ ret = NVME_RDMA_CM_NO_RSC;
+ goto out_free_queue;
+ }
+
+ ret = nvmet_rdma_alloc_rsps(queue);
+ if (ret) {
+ ret = NVME_RDMA_CM_NO_RSC;
+ goto out_ida_remove;
+ }
+
+ if (!ndev->srq) {
+ queue->cmds = nvmet_rdma_alloc_cmds(ndev,
+ queue->recv_queue_size,
+ !queue->host_qid);
+ if (IS_ERR(queue->cmds)) {
+ ret = NVME_RDMA_CM_NO_RSC;
+ goto out_free_responses;
+ }
+ }
+
+ ret = nvmet_rdma_create_queue_ib(queue);
+ if (ret) {
+ pr_err("%s: creating RDMA queue failed (%d).\n",
+ __func__, ret);
+ ret = NVME_RDMA_CM_NO_RSC;
+ goto out_free_cmds;
+ }
+
+ return queue;
+
+out_free_cmds:
+ if (!ndev->srq) {
+ nvmet_rdma_free_cmds(queue->dev, queue->cmds,
+ queue->recv_queue_size,
+ !queue->host_qid);
+ }
+out_free_responses:
+ nvmet_rdma_free_rsps(queue);
+out_ida_remove:
+ ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
+out_destroy_sq:
+ nvmet_sq_destroy(&queue->nvme_sq);
+out_free_queue:
+ kfree(queue);
+out_reject:
+ nvmet_rdma_cm_reject(cm_id, ret);
+ return NULL;
+}
+
+static void nvmet_rdma_qp_event(struct ib_event *event, void *priv)
+{
+ struct nvmet_rdma_queue *queue = priv;
+
+ switch (event->event) {
+ case IB_EVENT_COMM_EST:
+ rdma_notify(queue->cm_id, event->event);
+ break;
+ default:
+ pr_err("received unrecognized IB QP event %d\n", event->event);
+ break;
+ }
+}
+
+static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id,
+ struct nvmet_rdma_queue *queue,
+ struct rdma_conn_param *p)
+{
+ struct rdma_conn_param param = { };
+ struct nvme_rdma_cm_rep priv = { };
+ int ret = -ENOMEM;
+
+ param.rnr_retry_count = 7;
+ param.flow_control = 1;
+ param.initiator_depth = min_t(u8, p->initiator_depth,
+ queue->dev->device->attrs.max_qp_init_rd_atom);
+ param.private_data = &priv;
+ param.private_data_len = sizeof(priv);
+ priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
+ priv.crqsize = cpu_to_le16(queue->recv_queue_size);
+
+ ret = rdma_accept(cm_id, &param);
+ if (ret)
+ pr_err("rdma_accept failed (error code = %d)\n", ret);
+
+ return ret;
+}
+
+static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *event)
+{
+ struct nvmet_rdma_device *ndev;
+ struct nvmet_rdma_queue *queue;
+ int ret = -EINVAL;
+
+ ndev = nvmet_rdma_find_get_device(cm_id);
+ if (!ndev) {
+ pr_err("no client data!\n");
+ nvmet_rdma_cm_reject(cm_id, NVME_RDMA_CM_NO_RSC);
+ return -ECONNREFUSED;
+ }
+
+ queue = nvmet_rdma_alloc_queue(ndev, cm_id, event);
+ if (!queue) {
+ ret = -ENOMEM;
+ goto put_device;
+ }
+ queue->port = cm_id->context;
+
+ ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
+ if (ret)
+ goto release_queue;
+
+ mutex_lock(&nvmet_rdma_queue_mutex);
+ list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list);
+ mutex_unlock(&nvmet_rdma_queue_mutex);
+
+ return 0;
+
+release_queue:
+ nvmet_rdma_free_queue(queue);
+put_device:
+ kref_put(&ndev->ref, nvmet_rdma_free_dev);
+
+ return ret;
+}
+
+static void nvmet_rdma_queue_established(struct nvmet_rdma_queue *queue)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->state_lock, flags);
+ if (queue->state != NVMET_RDMA_Q_CONNECTING) {
+ pr_warn("trying to establish a connected queue\n");
+ goto out_unlock;
+ }
+ queue->state = NVMET_RDMA_Q_LIVE;
+
+ while (!list_empty(&queue->rsp_wait_list)) {
+ struct nvmet_rdma_rsp *cmd;
+
+ cmd = list_first_entry(&queue->rsp_wait_list,
+ struct nvmet_rdma_rsp, wait_list);
+ list_del(&cmd->wait_list);
+
+ spin_unlock_irqrestore(&queue->state_lock, flags);
+ nvmet_rdma_handle_command(queue, cmd);
+ spin_lock_irqsave(&queue->state_lock, flags);
+ }
+
+out_unlock:
+ spin_unlock_irqrestore(&queue->state_lock, flags);
+}
+
+static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
+{
+ bool disconnect = false;
+ unsigned long flags;
+
+ pr_debug("cm_id= %p queue->state= %d\n", queue->cm_id, queue->state);
+
+ spin_lock_irqsave(&queue->state_lock, flags);
+ switch (queue->state) {
+ case NVMET_RDMA_Q_CONNECTING:
+ case NVMET_RDMA_Q_LIVE:
+ queue->state = NVMET_RDMA_Q_DISCONNECTING;
+ case NVMET_RDMA_IN_DEVICE_REMOVAL:
+ disconnect = true;
+ break;
+ case NVMET_RDMA_Q_DISCONNECTING:
+ break;
+ }
+ spin_unlock_irqrestore(&queue->state_lock, flags);
+
+ if (disconnect) {
+ rdma_disconnect(queue->cm_id);
+ ib_drain_qp(queue->cm_id->qp);
+ schedule_work(&queue->release_work);
+ }
+}
+
+static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
+{
+ bool disconnect = false;
+
+ mutex_lock(&nvmet_rdma_queue_mutex);
+ if (!list_empty(&queue->queue_list)) {
+ list_del_init(&queue->queue_list);
+ disconnect = true;
+ }
+ mutex_unlock(&nvmet_rdma_queue_mutex);
+
+ if (disconnect)
+ __nvmet_rdma_queue_disconnect(queue);
+}
+
+static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
+ struct nvmet_rdma_queue *queue)
+{
+ WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING);
+
+ pr_err("failed to connect queue\n");
+ schedule_work(&queue->release_work);
+}
+
+/**
+ * nvme_rdma_device_removal() - Handle RDMA device removal
+ * @queue: nvmet rdma queue (cm id qp_context)
+ * @addr: nvmet address (cm_id context)
+ *
+ * DEVICE_REMOVAL event notifies us that the RDMA device is about
+ * to unplug so we should take care of destroying our RDMA resources.
+ * This event will be generated for each allocated cm_id.
+ *
+ * Note that this event can be generated on a normal queue cm_id
+ * and/or a device bound listener cm_id (where in this case
+ * queue will be null).
+ *
+ * we claim ownership on destroying the cm_id. For queues we move
+ * the queue state to NVMET_RDMA_IN_DEVICE_REMOVAL and for port
+ * we nullify the priv to prevent double cm_id destruction and destroying
+ * the cm_id implicitely by returning a non-zero rc to the callout.
+ */
+static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
+ struct nvmet_rdma_queue *queue)
+{
+ unsigned long flags;
+
+ if (!queue) {
+ struct nvmet_port *port = cm_id->context;
+
+ /*
+ * This is a listener cm_id. Make sure that
+ * future remove_port won't invoke a double
+ * cm_id destroy. use atomic xchg to make sure
+ * we don't compete with remove_port.
+ */
+ if (xchg(&port->priv, NULL) != cm_id)
+ return 0;
+ } else {
+ /*
+ * This is a queue cm_id. Make sure that
+ * release queue will not destroy the cm_id
+ * and schedule all ctrl queues removal (only
+ * if the queue is not disconnecting already).
+ */
+ spin_lock_irqsave(&queue->state_lock, flags);
+ if (queue->state != NVMET_RDMA_Q_DISCONNECTING)
+ queue->state = NVMET_RDMA_IN_DEVICE_REMOVAL;
+ spin_unlock_irqrestore(&queue->state_lock, flags);
+ nvmet_rdma_queue_disconnect(queue);
+ flush_scheduled_work();
+ }
+
+ /*
+ * We need to return 1 so that the core will destroy
+ * it's own ID. What a great API design..
+ */
+ return 1;
+}
+
+static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *event)
+{
+ struct nvmet_rdma_queue *queue = NULL;
+ int ret = 0;
+
+ if (cm_id->qp)
+ queue = cm_id->qp->qp_context;
+
+ pr_debug("%s (%d): status %d id %p\n",
+ rdma_event_msg(event->event), event->event,
+ event->status, cm_id);
+
+ switch (event->event) {
+ case RDMA_CM_EVENT_CONNECT_REQUEST:
+ ret = nvmet_rdma_queue_connect(cm_id, event);
+ break;
+ case RDMA_CM_EVENT_ESTABLISHED:
+ nvmet_rdma_queue_established(queue);
+ break;
+ case RDMA_CM_EVENT_ADDR_CHANGE:
+ case RDMA_CM_EVENT_DISCONNECTED:
+ case RDMA_CM_EVENT_TIMEWAIT_EXIT:
+ nvmet_rdma_queue_disconnect(queue);
+ break;
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+ ret = nvmet_rdma_device_removal(cm_id, queue);
+ break;
+ case RDMA_CM_EVENT_REJECTED:
+ case RDMA_CM_EVENT_UNREACHABLE:
+ case RDMA_CM_EVENT_CONNECT_ERROR:
+ nvmet_rdma_queue_connect_fail(cm_id, queue);
+ break;
+ default:
+ pr_err("received unrecognized RDMA CM event %d\n",
+ event->event);
+ break;
+ }
+
+ return ret;
+}
+
+static void nvmet_rdma_delete_ctrl(struct nvmet_ctrl *ctrl)
+{
+ struct nvmet_rdma_queue *queue;
+
+restart:
+ mutex_lock(&nvmet_rdma_queue_mutex);
+ list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) {
+ if (queue->nvme_sq.ctrl == ctrl) {
+ list_del_init(&queue->queue_list);
+ mutex_unlock(&nvmet_rdma_queue_mutex);
+
+ __nvmet_rdma_queue_disconnect(queue);
+ goto restart;
+ }
+ }
+ mutex_unlock(&nvmet_rdma_queue_mutex);
+}
+
+static int nvmet_rdma_add_port(struct nvmet_port *port)
+{
+ struct rdma_cm_id *cm_id;
+ struct sockaddr_in addr_in;
+ u16 port_in;
+ int ret;
+
+ switch (port->disc_addr.adrfam) {
+ case NVMF_ADDR_FAMILY_IP4:
+ break;
+ default:
+ pr_err("address family %d not supported\n",
+ port->disc_addr.adrfam);
+ return -EINVAL;
+ }
+
+ ret = kstrtou16(port->disc_addr.trsvcid, 0, &port_in);
+ if (ret)
+ return ret;
+
+ addr_in.sin_family = AF_INET;
+ addr_in.sin_addr.s_addr = in_aton(port->disc_addr.traddr);
+ addr_in.sin_port = htons(port_in);
+
+ cm_id = rdma_create_id(&init_net, nvmet_rdma_cm_handler, port,
+ RDMA_PS_TCP, IB_QPT_RC);
+ if (IS_ERR(cm_id)) {
+ pr_err("CM ID creation failed\n");
+ return PTR_ERR(cm_id);
+ }
+
+ ret = rdma_bind_addr(cm_id, (struct sockaddr *)&addr_in);
+ if (ret) {
+ pr_err("binding CM ID to %pISpc failed (%d)\n", &addr_in, ret);
+ goto out_destroy_id;
+ }
+
+ ret = rdma_listen(cm_id, 128);
+ if (ret) {
+ pr_err("listening to %pISpc failed (%d)\n", &addr_in, ret);
+ goto out_destroy_id;
+ }
+
+ pr_info("enabling port %d (%pISpc)\n",
+ le16_to_cpu(port->disc_addr.portid), &addr_in);
+ port->priv = cm_id;
+ return 0;
+
+out_destroy_id:
+ rdma_destroy_id(cm_id);
+ return ret;
+}
+
+static void nvmet_rdma_remove_port(struct nvmet_port *port)
+{
+ struct rdma_cm_id *cm_id = xchg(&port->priv, NULL);
+
+ if (cm_id)
+ rdma_destroy_id(cm_id);
+}
+
+static struct nvmet_fabrics_ops nvmet_rdma_ops = {
+ .owner = THIS_MODULE,
+ .type = NVMF_TRTYPE_RDMA,
+ .sqe_inline_size = NVMET_RDMA_INLINE_DATA_SIZE,
+ .msdbd = 1,
+ .has_keyed_sgls = 1,
+ .add_port = nvmet_rdma_add_port,
+ .remove_port = nvmet_rdma_remove_port,
+ .queue_response = nvmet_rdma_queue_response,
+ .delete_ctrl = nvmet_rdma_delete_ctrl,
+};
+
+static int __init nvmet_rdma_init(void)
+{
+ return nvmet_register_transport(&nvmet_rdma_ops);
+}
+
+static void __exit nvmet_rdma_exit(void)
+{
+ struct nvmet_rdma_queue *queue;
+
+ nvmet_unregister_transport(&nvmet_rdma_ops);
+
+ flush_scheduled_work();
+
+ mutex_lock(&nvmet_rdma_queue_mutex);
+ while ((queue = list_first_entry_or_null(&nvmet_rdma_queue_list,
+ struct nvmet_rdma_queue, queue_list))) {
+ list_del_init(&queue->queue_list);
+
+ mutex_unlock(&nvmet_rdma_queue_mutex);
+ __nvmet_rdma_queue_disconnect(queue);
+ mutex_lock(&nvmet_rdma_queue_mutex);
+ }
+ mutex_unlock(&nvmet_rdma_queue_mutex);
+
+ flush_scheduled_work();
+ ida_destroy(&nvmet_rdma_queue_ida);
+}
+
+module_init(nvmet_rdma_init);
+module_exit(nvmet_rdma_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("nvmet-transport-1"); /* 1 == NVMF_TRTYPE_RDMA */
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index 3041d48e7..f550c4596 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -15,7 +15,8 @@ if NVMEM
config NVMEM_IMX_OCOTP
tristate "i.MX6 On-Chip OTP Controller support"
- depends on SOC_IMX6
+ depends on SOC_IMX6 || COMPILE_TEST
+ depends on HAS_IOMEM
help
This is a driver for the On-Chip OTP Controller (OCOTP) available on
i.MX6 SoCs, providing access to 4 Kbits of one-time programmable
@@ -50,7 +51,6 @@ config MTK_EFUSE
tristate "Mediatek SoCs EFUSE support"
depends on ARCH_MEDIATEK || COMPILE_TEST
depends on HAS_IOMEM
- select REGMAP_MMIO
help
This is a driver to access hardware related data like sensor
calibration, HDMI impedance etc.
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index 75e66ef5b..ac27b9bac 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -15,6 +15,7 @@
* http://www.gnu.org/copyleft/gpl.html
*/
+#include <linux/clk.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -26,6 +27,7 @@
struct ocotp_priv {
struct device *dev;
+ struct clk *clk;
void __iomem *base;
unsigned int nregs;
};
@@ -36,7 +38,7 @@ static int imx_ocotp_read(void *context, unsigned int offset,
struct ocotp_priv *priv = context;
unsigned int count;
u32 *buf = val;
- int i;
+ int i, ret;
u32 index;
index = offset >> 2;
@@ -45,9 +47,16 @@ static int imx_ocotp_read(void *context, unsigned int offset,
if (count > (priv->nregs - index))
count = priv->nregs - index;
+ ret = clk_prepare_enable(priv->clk);
+ if (ret < 0) {
+ dev_err(priv->dev, "failed to prepare/enable ocotp clk\n");
+ return ret;
+ }
for (i = index; i < (index + count); i++)
*buf++ = readl(priv->base + 0x400 + i * 0x10);
+ clk_disable_unprepare(priv->clk);
+
return 0;
}
@@ -85,8 +94,12 @@ static int imx_ocotp_probe(struct platform_device *pdev)
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
of_id = of_match_device(imx_ocotp_dt_ids, dev);
- priv->nregs = (unsigned int)of_id->data;
+ priv->nregs = (unsigned long)of_id->data;
imx_ocotp_nvmem_config.size = 4 * priv->nregs;
imx_ocotp_nvmem_config.dev = dev;
imx_ocotp_nvmem_config.priv = priv;
diff --git a/drivers/nvmem/mtk-efuse.c b/drivers/nvmem/mtk-efuse.c
index 9c49369be..32fd572e1 100644
--- a/drivers/nvmem/mtk-efuse.c
+++ b/drivers/nvmem/mtk-efuse.c
@@ -14,15 +14,35 @@
#include <linux/device.h>
#include <linux/module.h>
+#include <linux/io.h>
#include <linux/nvmem-provider.h>
#include <linux/platform_device.h>
-#include <linux/regmap.h>
-static struct regmap_config mtk_regmap_config = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
-};
+static int mtk_reg_read(void *context,
+ unsigned int reg, void *_val, size_t bytes)
+{
+ void __iomem *base = context;
+ u32 *val = _val;
+ int i = 0, words = bytes / 4;
+
+ while (words--)
+ *val++ = readl(base + reg + (i++ * 4));
+
+ return 0;
+}
+
+static int mtk_reg_write(void *context,
+ unsigned int reg, void *_val, size_t bytes)
+{
+ void __iomem *base = context;
+ u32 *val = _val;
+ int i = 0, words = bytes / 4;
+
+ while (words--)
+ writel(*val++, base + reg + (i++ * 4));
+
+ return 0;
+}
static int mtk_efuse_probe(struct platform_device *pdev)
{
@@ -30,7 +50,6 @@ static int mtk_efuse_probe(struct platform_device *pdev)
struct resource *res;
struct nvmem_device *nvmem;
struct nvmem_config *econfig;
- struct regmap *regmap;
void __iomem *base;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -42,14 +61,12 @@ static int mtk_efuse_probe(struct platform_device *pdev)
if (!econfig)
return -ENOMEM;
- mtk_regmap_config.max_register = resource_size(res) - 1;
-
- regmap = devm_regmap_init_mmio(dev, base, &mtk_regmap_config);
- if (IS_ERR(regmap)) {
- dev_err(dev, "regmap init failed\n");
- return PTR_ERR(regmap);
- }
-
+ econfig->stride = 4;
+ econfig->word_size = 4;
+ econfig->reg_read = mtk_reg_read;
+ econfig->reg_write = mtk_reg_write;
+ econfig->size = resource_size(res);
+ econfig->priv = base;
econfig->dev = dev;
econfig->owner = THIS_MODULE;
nvmem = nvmem_register(econfig);
diff --git a/drivers/nvmem/mxs-ocotp.c b/drivers/nvmem/mxs-ocotp.c
index 2bb3c5799..d26dd03ce 100644
--- a/drivers/nvmem/mxs-ocotp.c
+++ b/drivers/nvmem/mxs-ocotp.c
@@ -25,7 +25,6 @@
#include <linux/nvmem-provider.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
-#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/stmp_device.h>
@@ -66,11 +65,10 @@ static int mxs_ocotp_wait(struct mxs_ocotp *otp)
return 0;
}
-static int mxs_ocotp_read(void *context, const void *reg, size_t reg_size,
- void *val, size_t val_size)
+static int mxs_ocotp_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
{
struct mxs_ocotp *otp = context;
- unsigned int offset = *(u32 *)reg;
u32 *buf = val;
int ret;
@@ -94,17 +92,16 @@ static int mxs_ocotp_read(void *context, const void *reg, size_t reg_size,
if (ret)
goto close_banks;
- while (val_size >= reg_size) {
+ while (bytes) {
if ((offset < OCOTP_DATA_OFFSET) || (offset % 16)) {
/* fill up non-data register */
- *buf = 0;
+ *buf++ = 0;
} else {
- *buf = readl(otp->base + offset);
+ *buf++ = readl(otp->base + offset);
}
- buf++;
- val_size -= reg_size;
- offset += reg_size;
+ bytes -= 4;
+ offset += 4;
}
close_banks:
@@ -117,57 +114,29 @@ disable_clk:
return ret;
}
-static int mxs_ocotp_write(void *context, const void *data, size_t count)
-{
- /* We don't want to support writing */
- return 0;
-}
-
-static bool mxs_ocotp_writeable_reg(struct device *dev, unsigned int reg)
-{
- return false;
-}
-
static struct nvmem_config ocotp_config = {
.name = "mxs-ocotp",
+ .stride = 16,
+ .word_size = 4,
.owner = THIS_MODULE,
+ .reg_read = mxs_ocotp_read,
};
-static const struct regmap_range imx23_ranges[] = {
- regmap_reg_range(OCOTP_DATA_OFFSET, 0x210),
-};
-
-static const struct regmap_access_table imx23_access = {
- .yes_ranges = imx23_ranges,
- .n_yes_ranges = ARRAY_SIZE(imx23_ranges),
-};
-
-static const struct regmap_range imx28_ranges[] = {
- regmap_reg_range(OCOTP_DATA_OFFSET, 0x290),
-};
-
-static const struct regmap_access_table imx28_access = {
- .yes_ranges = imx28_ranges,
- .n_yes_ranges = ARRAY_SIZE(imx28_ranges),
+struct mxs_data {
+ int size;
};
-static struct regmap_bus mxs_ocotp_bus = {
- .read = mxs_ocotp_read,
- .write = mxs_ocotp_write, /* make regmap_init() happy */
- .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
- .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+static const struct mxs_data imx23_data = {
+ .size = 0x220,
};
-static struct regmap_config mxs_ocotp_config = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 16,
- .writeable_reg = mxs_ocotp_writeable_reg,
+static const struct mxs_data imx28_data = {
+ .size = 0x2a0,
};
static const struct of_device_id mxs_ocotp_match[] = {
- { .compatible = "fsl,imx23-ocotp", .data = &imx23_access },
- { .compatible = "fsl,imx28-ocotp", .data = &imx28_access },
+ { .compatible = "fsl,imx23-ocotp", .data = &imx23_data },
+ { .compatible = "fsl,imx28-ocotp", .data = &imx28_data },
{ /* sentinel */},
};
MODULE_DEVICE_TABLE(of, mxs_ocotp_match);
@@ -175,11 +144,10 @@ MODULE_DEVICE_TABLE(of, mxs_ocotp_match);
static int mxs_ocotp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ const struct mxs_data *data;
struct mxs_ocotp *otp;
struct resource *res;
const struct of_device_id *match;
- struct regmap *regmap;
- const struct regmap_access_table *access;
int ret;
match = of_match_device(dev->driver->of_match_table, dev);
@@ -205,17 +173,10 @@ static int mxs_ocotp_probe(struct platform_device *pdev)
return ret;
}
- access = match->data;
- mxs_ocotp_config.rd_table = access;
- mxs_ocotp_config.max_register = access->yes_ranges[0].range_max;
-
- regmap = devm_regmap_init(dev, &mxs_ocotp_bus, otp, &mxs_ocotp_config);
- if (IS_ERR(regmap)) {
- dev_err(dev, "regmap init failed\n");
- ret = PTR_ERR(regmap);
- goto err_clk;
- }
+ data = match->data;
+ ocotp_config.size = data->size;
+ ocotp_config.priv = otp;
ocotp_config.dev = dev;
otp->nvmem = nvmem_register(&ocotp_config);
if (IS_ERR(otp->nvmem)) {
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index b3bec3aaa..bc07ad30c 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -74,6 +74,7 @@ config OF_NET
config OF_MDIO
def_tristate PHYLIB
depends on PHYLIB
+ select FIXED_PHY
help
OpenFirmware MDIO bus (Ethernet PHY) accessors
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 0a553c084..02b2903fe 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -1,4 +1,6 @@
+#define pr_fmt(fmt) "OF: " fmt
+
#include <linux/device.h>
#include <linux/io.h>
#include <linux/ioport.h>
@@ -24,10 +26,10 @@ static int __of_address_to_resource(struct device_node *dev,
#ifdef DEBUG
static void of_dump_addr(const char *s, const __be32 *addr, int na)
{
- printk(KERN_DEBUG "%s", s);
+ pr_debug("%s", s);
while (na--)
- printk(" %08x", be32_to_cpu(*(addr++)));
- printk("\n");
+ pr_cont(" %08x", be32_to_cpu(*(addr++)));
+ pr_cont("\n");
}
#else
static void of_dump_addr(const char *s, const __be32 *addr, int na) { }
@@ -68,7 +70,7 @@ static u64 of_bus_default_map(__be32 *addr, const __be32 *range,
s = of_read_number(range + na + pna, ns);
da = of_read_number(addr, na);
- pr_debug("OF: default map, cp=%llx, s=%llx, da=%llx\n",
+ pr_debug("default map, cp=%llx, s=%llx, da=%llx\n",
(unsigned long long)cp, (unsigned long long)s,
(unsigned long long)da);
@@ -156,7 +158,7 @@ static u64 of_bus_pci_map(__be32 *addr, const __be32 *range, int na, int ns,
s = of_read_number(range + na + pna, ns);
da = of_read_number(addr + 1, na - 1);
- pr_debug("OF: PCI map, cp=%llx, s=%llx, da=%llx\n",
+ pr_debug("PCI map, cp=%llx, s=%llx, da=%llx\n",
(unsigned long long)cp, (unsigned long long)s,
(unsigned long long)da);
@@ -381,7 +383,7 @@ static u64 of_bus_isa_map(__be32 *addr, const __be32 *range, int na, int ns,
s = of_read_number(range + na + pna, ns);
da = of_read_number(addr + 1, na - 1);
- pr_debug("OF: ISA map, cp=%llx, s=%llx, da=%llx\n",
+ pr_debug("ISA map, cp=%llx, s=%llx, da=%llx\n",
(unsigned long long)cp, (unsigned long long)s,
(unsigned long long)da);
@@ -504,17 +506,17 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
*/
ranges = of_get_property(parent, rprop, &rlen);
if (ranges == NULL && !of_empty_ranges_quirk(parent)) {
- pr_debug("OF: no ranges; cannot translate\n");
+ pr_debug("no ranges; cannot translate\n");
return 1;
}
if (ranges == NULL || rlen == 0) {
offset = of_read_number(addr, na);
memset(addr, 0, pna * 4);
- pr_debug("OF: empty ranges; 1:1 translation\n");
+ pr_debug("empty ranges; 1:1 translation\n");
goto finish;
}
- pr_debug("OF: walking ranges...\n");
+ pr_debug("walking ranges...\n");
/* Now walk through the ranges */
rlen /= 4;
@@ -525,14 +527,14 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
break;
}
if (offset == OF_BAD_ADDR) {
- pr_debug("OF: not found !\n");
+ pr_debug("not found !\n");
return 1;
}
memcpy(addr, ranges + na, 4 * pna);
finish:
- of_dump_addr("OF: parent translation for:", addr, pna);
- pr_debug("OF: with offset: %llx\n", (unsigned long long)offset);
+ of_dump_addr("parent translation for:", addr, pna);
+ pr_debug("with offset: %llx\n", (unsigned long long)offset);
/* Translate it into parent bus space */
return pbus->translate(addr, offset, pna);
@@ -557,7 +559,7 @@ static u64 __of_translate_address(struct device_node *dev,
int na, ns, pna, pns;
u64 result = OF_BAD_ADDR;
- pr_debug("OF: ** translation for device %s **\n", of_node_full_name(dev));
+ pr_debug("** translation for device %s **\n", of_node_full_name(dev));
/* Increase refcount at current level */
of_node_get(dev);
@@ -571,14 +573,14 @@ static u64 __of_translate_address(struct device_node *dev,
/* Count address cells & copy address locally */
bus->count_cells(dev, &na, &ns);
if (!OF_CHECK_COUNTS(na, ns)) {
- pr_debug("OF: Bad cell count for %s\n", of_node_full_name(dev));
+ pr_debug("Bad cell count for %s\n", of_node_full_name(dev));
goto bail;
}
memcpy(addr, in_addr, na * 4);
- pr_debug("OF: bus is %s (na=%d, ns=%d) on %s\n",
+ pr_debug("bus is %s (na=%d, ns=%d) on %s\n",
bus->name, na, ns, of_node_full_name(parent));
- of_dump_addr("OF: translating address:", addr, na);
+ of_dump_addr("translating address:", addr, na);
/* Translate */
for (;;) {
@@ -589,7 +591,7 @@ static u64 __of_translate_address(struct device_node *dev,
/* If root, we have finished */
if (parent == NULL) {
- pr_debug("OF: reached root node\n");
+ pr_debug("reached root node\n");
result = of_read_number(addr, na);
break;
}
@@ -598,12 +600,12 @@ static u64 __of_translate_address(struct device_node *dev,
pbus = of_match_bus(parent);
pbus->count_cells(dev, &pna, &pns);
if (!OF_CHECK_COUNTS(pna, pns)) {
- pr_err("prom_parse: Bad cell count for %s\n",
+ pr_err("Bad cell count for %s\n",
of_node_full_name(dev));
break;
}
- pr_debug("OF: parent bus is %s (na=%d, ns=%d) on %s\n",
+ pr_debug("parent bus is %s (na=%d, ns=%d) on %s\n",
pbus->name, pna, pns, of_node_full_name(parent));
/* Apply bus translation */
@@ -615,7 +617,7 @@ static u64 __of_translate_address(struct device_node *dev,
ns = pns;
bus = pbus;
- of_dump_addr("OF: one level translation:", addr, na);
+ of_dump_addr("one level translation:", addr, na);
}
bail:
of_node_put(parent);
@@ -853,8 +855,7 @@ int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *siz
}
if (!ranges) {
- pr_debug("%s: no dma-ranges found for node(%s)\n",
- __func__, np->full_name);
+ pr_debug("no dma-ranges found for node(%s)\n", np->full_name);
ret = -ENODEV;
goto out;
}
@@ -871,8 +872,8 @@ int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *siz
dmaaddr = of_read_number(ranges, naddr);
*paddr = of_translate_dma_address(np, ranges);
if (*paddr == OF_BAD_ADDR) {
- pr_err("%s: translation of DMA address(%pad) to CPU address failed node(%s)\n",
- __func__, dma_addr, np->full_name);
+ pr_err("translation of DMA address(%pad) to CPU address failed node(%s)\n",
+ dma_addr, np->full_name);
ret = -EINVAL;
goto out;
}
diff --git a/drivers/of/base.c b/drivers/of/base.c
index c6a8f47f5..3ce69536a 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -17,6 +17,9 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
+
+#define pr_fmt(fmt) "OF: " fmt
+
#include <linux/console.h>
#include <linux/ctype.h>
#include <linux/cpu.h>
@@ -130,7 +133,7 @@ static const char *safe_name(struct kobject *kobj, const char *orig_name)
if (name == orig_name) {
name = kstrdup(orig_name, GFP_KERNEL);
} else {
- pr_warn("device-tree: Duplicate name in %s, renamed to \"%s\"\n",
+ pr_warn("Duplicate name in %s, renamed to \"%s\"\n",
kobject_name(kobj), name);
}
return name;
@@ -204,7 +207,7 @@ void __init of_core_init(void)
of_kset = kset_create_and_add("devicetree", NULL, firmware_kobj);
if (!of_kset) {
mutex_unlock(&of_mutex);
- pr_err("devicetree: failed to register existing nodes\n");
+ pr_err("failed to register existing nodes\n");
return;
}
for_each_of_allnodes(np)
@@ -499,6 +502,28 @@ int of_device_is_compatible(const struct device_node *device,
}
EXPORT_SYMBOL(of_device_is_compatible);
+/** Checks if the device is compatible with any of the entries in
+ * a NULL terminated array of strings. Returns the best match
+ * score or 0.
+ */
+int of_device_compatible_match(struct device_node *device,
+ const char *const *compat)
+{
+ unsigned int tmp, score = 0;
+
+ if (!compat)
+ return 0;
+
+ while (*compat) {
+ tmp = of_device_is_compatible(device, *compat);
+ if (tmp > score)
+ score = tmp;
+ compat++;
+ }
+
+ return score;
+}
+
/**
* of_machine_is_compatible - Test root of device tree for a given compatible value
* @compat: compatible string to look for in root node's compatible property.
@@ -1606,8 +1631,7 @@ static int __of_parse_phandle_with_args(const struct device_node *np,
*/
err:
- if (it.node)
- of_node_put(it.node);
+ of_node_put(it.node);
return rc;
}
@@ -2269,8 +2293,8 @@ struct device_node *of_graph_get_next_endpoint(const struct device_node *parent,
of_node_put(node);
if (!port) {
- pr_err("%s(): no port node found in %s\n",
- __func__, parent->full_name);
+ pr_err("graph: no port node found in %s\n",
+ parent->full_name);
return NULL;
}
} else {
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index a2015599e..888fdbc09 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -6,6 +6,8 @@
* device tree nodes.
*/
+#define pr_fmt(fmt) "OF: " fmt
+
#include <linux/of.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
@@ -96,13 +98,13 @@ int of_reconfig_notify(unsigned long action, struct of_reconfig_data *p)
switch (action) {
case OF_RECONFIG_ATTACH_NODE:
case OF_RECONFIG_DETACH_NODE:
- pr_debug("of/notify %-15s %s\n", action_names[action],
+ pr_debug("notify %-15s %s\n", action_names[action],
pr->dn->full_name);
break;
case OF_RECONFIG_ADD_PROPERTY:
case OF_RECONFIG_REMOVE_PROPERTY:
case OF_RECONFIG_UPDATE_PROPERTY:
- pr_debug("of/notify %-15s %s:%s\n", action_names[action],
+ pr_debug("notify %-15s %s:%s\n", action_names[action],
pr->dn->full_name, pr->prop->name);
break;
@@ -460,12 +462,12 @@ static void __of_changeset_entry_dump(struct of_changeset_entry *ce)
case OF_RECONFIG_ADD_PROPERTY:
case OF_RECONFIG_REMOVE_PROPERTY:
case OF_RECONFIG_UPDATE_PROPERTY:
- pr_debug("of/cset<%p> %-15s %s/%s\n", ce, action_names[ce->action],
+ pr_debug("cset<%p> %-15s %s/%s\n", ce, action_names[ce->action],
ce->np->full_name, ce->prop->name);
break;
case OF_RECONFIG_ATTACH_NODE:
case OF_RECONFIG_DETACH_NODE:
- pr_debug("of/cset<%p> %-15s %s\n", ce, action_names[ce->action],
+ pr_debug("cset<%p> %-15s %s\n", ce, action_names[ce->action],
ce->np->full_name);
break;
}
@@ -531,13 +533,13 @@ static void __of_changeset_entry_notify(struct of_changeset_entry *ce, bool reve
ret = of_property_notify(ce->action, ce->np, ce->prop, ce->old_prop);
break;
default:
- pr_err("%s: invalid devicetree changeset action: %i\n", __func__,
+ pr_err("invalid devicetree changeset action: %i\n",
(int)ce->action);
return;
}
if (ret)
- pr_err("%s: notifier error @%s\n", __func__, ce->np->full_name);
+ pr_err("changeset notifier error @%s\n", ce->np->full_name);
}
static int __of_changeset_entry_apply(struct of_changeset_entry *ce)
@@ -568,8 +570,8 @@ static int __of_changeset_entry_apply(struct of_changeset_entry *ce)
ret = __of_add_property(ce->np, ce->prop);
if (ret) {
- pr_err("%s: add_property failed @%s/%s\n",
- __func__, ce->np->full_name,
+ pr_err("changeset: add_property failed @%s/%s\n",
+ ce->np->full_name,
ce->prop->name);
break;
}
@@ -577,8 +579,8 @@ static int __of_changeset_entry_apply(struct of_changeset_entry *ce)
case OF_RECONFIG_REMOVE_PROPERTY:
ret = __of_remove_property(ce->np, ce->prop);
if (ret) {
- pr_err("%s: remove_property failed @%s/%s\n",
- __func__, ce->np->full_name,
+ pr_err("changeset: remove_property failed @%s/%s\n",
+ ce->np->full_name,
ce->prop->name);
break;
}
@@ -596,8 +598,8 @@ static int __of_changeset_entry_apply(struct of_changeset_entry *ce)
ret = __of_update_property(ce->np, ce->prop, &old_prop);
if (ret) {
- pr_err("%s: update_property failed @%s/%s\n",
- __func__, ce->np->full_name,
+ pr_err("changeset: update_property failed @%s/%s\n",
+ ce->np->full_name,
ce->prop->name);
break;
}
@@ -677,24 +679,24 @@ int __of_changeset_apply(struct of_changeset *ocs)
int ret;
/* perform the rest of the work */
- pr_debug("of_changeset: applying...\n");
+ pr_debug("changeset: applying...\n");
list_for_each_entry(ce, &ocs->entries, node) {
ret = __of_changeset_entry_apply(ce);
if (ret) {
- pr_err("%s: Error applying changeset (%d)\n", __func__, ret);
+ pr_err("Error applying changeset (%d)\n", ret);
list_for_each_entry_continue_reverse(ce, &ocs->entries, node)
__of_changeset_entry_revert(ce);
return ret;
}
}
- pr_debug("of_changeset: applied, emitting notifiers.\n");
+ pr_debug("changeset: applied, emitting notifiers.\n");
/* drop the global lock while emitting notifiers */
mutex_unlock(&of_mutex);
list_for_each_entry(ce, &ocs->entries, node)
__of_changeset_entry_notify(ce, 0);
mutex_lock(&of_mutex);
- pr_debug("of_changeset: notifiers sent.\n");
+ pr_debug("changeset: notifiers sent.\n");
return 0;
}
@@ -728,24 +730,24 @@ int __of_changeset_revert(struct of_changeset *ocs)
struct of_changeset_entry *ce;
int ret;
- pr_debug("of_changeset: reverting...\n");
+ pr_debug("changeset: reverting...\n");
list_for_each_entry_reverse(ce, &ocs->entries, node) {
ret = __of_changeset_entry_revert(ce);
if (ret) {
- pr_err("%s: Error reverting changeset (%d)\n", __func__, ret);
+ pr_err("Error reverting changeset (%d)\n", ret);
list_for_each_entry_continue(ce, &ocs->entries, node)
__of_changeset_entry_apply(ce);
return ret;
}
}
- pr_debug("of_changeset: reverted, emitting notifiers.\n");
+ pr_debug("changeset: reverted, emitting notifiers.\n");
/* drop the global lock while emitting notifiers */
mutex_unlock(&of_mutex);
list_for_each_entry_reverse(ce, &ocs->entries, node)
__of_changeset_entry_notify(ce, 1);
mutex_lock(&of_mutex);
- pr_debug("of_changeset: notifiers sent.\n");
+ pr_debug("changeset: notifiers sent.\n");
return 0;
}
@@ -795,10 +797,9 @@ int of_changeset_action(struct of_changeset *ocs, unsigned long action,
struct of_changeset_entry *ce;
ce = kzalloc(sizeof(*ce), GFP_KERNEL);
- if (!ce) {
- pr_err("%s: Failed to allocate\n", __func__);
+ if (!ce)
return -ENOMEM;
- }
+
/* get a reference to the node */
ce->action = action;
ce->np = of_node_get(np);
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 33daffc43..085c6389a 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -9,6 +9,8 @@
* version 2 as published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) "OF: fdt:" fmt
+
#include <linux/crc32.h>
#include <linux/kernel.h>
#include <linux/initrd.h>
@@ -182,14 +184,12 @@ static void populate_properties(const void *blob,
val = fdt_getprop_by_offset(blob, cur, &pname, &sz);
if (!val) {
- pr_warn("%s: Cannot locate property at 0x%x\n",
- __func__, cur);
+ pr_warn("Cannot locate property at 0x%x\n", cur);
continue;
}
if (!pname) {
- pr_warn("%s: Cannot find property name at 0x%x\n",
- __func__, cur);
+ pr_warn("Cannot find property name at 0x%x\n", cur);
continue;
}
@@ -439,7 +439,7 @@ static int unflatten_dt_nodes(const void *blob,
}
if (offset < 0 && offset != -FDT_ERR_NOTFOUND) {
- pr_err("%s: Error %d processing FDT\n", __func__, offset);
+ pr_err("Error %d processing FDT\n", offset);
return -EINVAL;
}
@@ -472,7 +472,8 @@ static int unflatten_dt_nodes(const void *blob,
static void *__unflatten_device_tree(const void *blob,
struct device_node *dad,
struct device_node **mynodes,
- void *(*dt_alloc)(u64 size, u64 align))
+ void *(*dt_alloc)(u64 size, u64 align),
+ bool detached)
{
int size;
void *mem;
@@ -516,6 +517,11 @@ static void *__unflatten_device_tree(const void *blob,
pr_warning("End of tree marker overwritten: %08x\n",
be32_to_cpup(mem + size));
+ if (detached && mynodes) {
+ of_node_set_flag(*mynodes, OF_DETACHED);
+ pr_debug("unflattened tree is detached\n");
+ }
+
pr_debug(" <- unflatten_device_tree()\n");
return mem;
}
@@ -548,7 +554,8 @@ void *of_fdt_unflatten_tree(const unsigned long *blob,
void *mem;
mutex_lock(&of_fdt_unflatten_mutex);
- mem = __unflatten_device_tree(blob, dad, mynodes, &kernel_tree_alloc);
+ mem = __unflatten_device_tree(blob, dad, mynodes, &kernel_tree_alloc,
+ true);
mutex_unlock(&of_fdt_unflatten_mutex);
return mem;
@@ -744,6 +751,19 @@ int __init of_scan_flat_dt(int (*it)(unsigned long node,
}
/**
+ * of_get_flat_dt_subnode_by_name - get the subnode by given name
+ *
+ * @node: the parent node
+ * @uname: the name of subnode
+ * @return offset of the subnode, or -FDT_ERR_NOTFOUND if there is none
+ */
+
+int of_get_flat_dt_subnode_by_name(unsigned long node, const char *uname)
+{
+ return fdt_subnode_offset(initial_boot_params, node, uname);
+}
+
+/**
* of_get_flat_dt_root - find the root node in the flat blob
*/
unsigned long __init of_get_flat_dt_root(void)
@@ -1224,7 +1244,7 @@ bool __init early_init_dt_scan(void *params)
void __init unflatten_device_tree(void)
{
__unflatten_device_tree(initial_boot_params, NULL, &of_root,
- early_init_dt_alloc_memory_arch);
+ early_init_dt_alloc_memory_arch, false);
/* Get pointer to "/chosen" and "/aliases" nodes for use everywhere */
of_alias_scan(early_init_dt_alloc_memory_arch);
@@ -1281,7 +1301,7 @@ static int __init of_fdt_raw_init(void)
if (of_fdt_crc32 != crc32_be(~0, initial_boot_params,
fdt_totalsize(initial_boot_params))) {
- pr_warn("fdt: not creating '/sys/firmware/fdt': CRC check failed\n");
+ pr_warn("not creating '/sys/firmware/fdt': CRC check failed\n");
return 0;
}
of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
diff --git a/drivers/of/fdt_address.c b/drivers/of/fdt_address.c
index dca8f9b93..843a542da 100644
--- a/drivers/of/fdt_address.c
+++ b/drivers/of/fdt_address.c
@@ -12,6 +12,9 @@
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*/
+
+#define pr_fmt(fmt) "OF: fdt: " fmt
+
#include <linux/kernel.h>
#include <linux/libfdt.h>
#include <linux/of.h>
@@ -30,7 +33,7 @@ static void __init of_dump_addr(const char *s, const __be32 *addr, int na)
pr_debug("%s", s);
while(na--)
pr_cont(" %08x", *(addr++));
- pr_debug("\n");
+ pr_cont("\n");
}
#else
static void __init of_dump_addr(const char *s, const __be32 *addr, int na) { }
@@ -77,7 +80,7 @@ static u64 __init fdt_bus_default_map(__be32 *addr, const __be32 *range,
s = of_read_number(range + na + pna, ns);
da = of_read_number(addr, na);
- pr_debug("FDT: default map, cp=%llx, s=%llx, da=%llx\n",
+ pr_debug("default map, cp=%llx, s=%llx, da=%llx\n",
cp, s, da);
if (da < cp || da >= (cp + s))
@@ -123,11 +126,11 @@ static int __init fdt_translate_one(const void *blob, int parent,
if (rlen == 0) {
offset = of_read_number(addr, na);
memset(addr, 0, pna * 4);
- pr_debug("FDT: empty ranges, 1:1 translation\n");
+ pr_debug("empty ranges, 1:1 translation\n");
goto finish;
}
- pr_debug("FDT: walking ranges...\n");
+ pr_debug("walking ranges...\n");
/* Now walk through the ranges */
rlen /= 4;
@@ -138,14 +141,14 @@ static int __init fdt_translate_one(const void *blob, int parent,
break;
}
if (offset == OF_BAD_ADDR) {
- pr_debug("FDT: not found !\n");
+ pr_debug("not found !\n");
return 1;
}
memcpy(addr, ranges + na, 4 * pna);
finish:
- of_dump_addr("FDT: parent translation for:", addr, pna);
- pr_debug("FDT: with offset: %llx\n", offset);
+ of_dump_addr("parent translation for:", addr, pna);
+ pr_debug("with offset: %llx\n", offset);
/* Translate it into parent bus space */
return pbus->translate(addr, offset, pna);
@@ -170,12 +173,12 @@ static u64 __init fdt_translate_address(const void *blob, int node_offset)
int na, ns, pna, pns;
u64 result = OF_BAD_ADDR;
- pr_debug("FDT: ** translation for device %s **\n",
+ pr_debug("** translation for device %s **\n",
fdt_get_name(blob, node_offset, NULL));
reg = fdt_getprop(blob, node_offset, "reg", &len);
if (!reg) {
- pr_err("FDT: warning: device tree node '%s' has no address.\n",
+ pr_err("warning: device tree node '%s' has no address.\n",
fdt_get_name(blob, node_offset, NULL));
goto bail;
}
@@ -189,15 +192,15 @@ static u64 __init fdt_translate_address(const void *blob, int node_offset)
/* Cound address cells & copy address locally */
bus->count_cells(blob, parent, &na, &ns);
if (!OF_CHECK_COUNTS(na, ns)) {
- pr_err("FDT: Bad cell count for %s\n",
+ pr_err("Bad cell count for %s\n",
fdt_get_name(blob, node_offset, NULL));
goto bail;
}
memcpy(addr, reg, na * 4);
- pr_debug("FDT: bus (na=%d, ns=%d) on %s\n",
+ pr_debug("bus (na=%d, ns=%d) on %s\n",
na, ns, fdt_get_name(blob, parent, NULL));
- of_dump_addr("OF: translating address:", addr, na);
+ of_dump_addr("translating address:", addr, na);
/* Translate */
for (;;) {
@@ -207,7 +210,7 @@ static u64 __init fdt_translate_address(const void *blob, int node_offset)
/* If root, we have finished */
if (parent < 0) {
- pr_debug("FDT: reached root node\n");
+ pr_debug("reached root node\n");
result = of_read_number(addr, na);
break;
}
@@ -216,12 +219,12 @@ static u64 __init fdt_translate_address(const void *blob, int node_offset)
pbus = &of_busses[0];
pbus->count_cells(blob, parent, &pna, &pns);
if (!OF_CHECK_COUNTS(pna, pns)) {
- pr_err("FDT: Bad cell count for %s\n",
+ pr_err("Bad cell count for %s\n",
fdt_get_name(blob, node_offset, NULL));
break;
}
- pr_debug("FDT: parent bus (na=%d, ns=%d) on %s\n",
+ pr_debug("parent bus (na=%d, ns=%d) on %s\n",
pna, pns, fdt_get_name(blob, parent, NULL));
/* Apply bus translation */
@@ -234,7 +237,7 @@ static u64 __init fdt_translate_address(const void *blob, int node_offset)
ns = pns;
bus = pbus;
- of_dump_addr("FDT: one level translation:", addr, na);
+ of_dump_addr("one level translation:", addr, na);
}
bail:
return result;
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 6ec743faa..a2e68f740 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -18,6 +18,8 @@
* driver.
*/
+#define pr_fmt(fmt) "OF: " fmt
+
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/list.h>
@@ -542,12 +544,15 @@ void __init of_irq_init(const struct of_device_id *matches)
list_del(&desc->list);
+ of_node_set_flag(desc->dev, OF_POPULATED);
+
pr_debug("of_irq_init: init %s (%p), parent %p\n",
desc->dev->full_name,
desc->dev, desc->interrupt_parent);
ret = desc->irq_init_cb(desc->dev,
desc->interrupt_parent);
if (ret) {
+ of_node_clear_flag(desc->dev, OF_POPULATED);
kfree(desc);
continue;
}
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index e051e1b57..b470f7e35 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -19,6 +19,7 @@
#include <linux/of_gpio.h>
#include <linux/of_irq.h>
#include <linux/of_mdio.h>
+#include <linux/of_net.h>
#include <linux/module.h>
MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
@@ -332,6 +333,41 @@ struct phy_device *of_phy_connect(struct net_device *dev,
EXPORT_SYMBOL(of_phy_connect);
/**
+ * of_phy_get_and_connect
+ * - Get phy node and connect to the phy described in the device tree
+ * @dev: pointer to net_device claiming the phy
+ * @np: Pointer to device tree node for the net_device claiming the phy
+ * @hndlr: Link state callback for the network device
+ *
+ * If successful, returns a pointer to the phy_device with the embedded
+ * struct device refcount incremented by one, or NULL on failure. The
+ * refcount must be dropped by calling phy_disconnect() or phy_detach().
+ */
+struct phy_device *of_phy_get_and_connect(struct net_device *dev,
+ struct device_node *np,
+ void (*hndlr)(struct net_device *))
+{
+ phy_interface_t iface;
+ struct device_node *phy_np;
+ struct phy_device *phy;
+
+ iface = of_get_phy_mode(np);
+ if (iface < 0)
+ return NULL;
+
+ phy_np = of_parse_phandle(np, "phy-handle", 0);
+ if (!phy_np)
+ return NULL;
+
+ phy = of_phy_connect(dev, phy_np, hndlr, 0, iface);
+
+ of_node_put(phy_np);
+
+ return phy;
+}
+EXPORT_SYMBOL(of_phy_get_and_connect);
+
+/**
* of_phy_attach - Attach to a PHY without starting the state machine
* @dev: pointer to net_device claiming the phy
* @phy_np: Node pointer for the PHY
@@ -361,7 +397,6 @@ struct phy_device *of_phy_attach(struct net_device *dev,
}
EXPORT_SYMBOL(of_phy_attach);
-#if defined(CONFIG_FIXED_PHY)
/*
* of_phy_is_fixed_link() and of_phy_register_fixed_link() must
* support two DT bindings:
@@ -451,4 +486,3 @@ int of_phy_register_fixed_link(struct device_node *np)
return -ENODEV;
}
EXPORT_SYMBOL(of_phy_register_fixed_link);
-#endif
diff --git a/drivers/of/of_numa.c b/drivers/of/of_numa.c
index 0f2784bc1..ed5a097f0 100644
--- a/drivers/of/of_numa.c
+++ b/drivers/of/of_numa.c
@@ -91,8 +91,8 @@ static int __init of_numa_parse_memory_nodes(void)
pr_debug("NUMA: base = %llx len = %llx, node = %u\n",
rsrc.start, rsrc.end - rsrc.start + 1, nid);
- r = numa_add_memblk(nid, rsrc.start,
- rsrc.end - rsrc.start + 1);
+
+ r = numa_add_memblk(nid, rsrc.start, rsrc.end + 1);
if (r)
break;
}
diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c
index 13f4fed38..589b30c68 100644
--- a/drivers/of/of_pci.c
+++ b/drivers/of/of_pci.c
@@ -1,3 +1,5 @@
+#define pr_fmt(fmt) "OF: PCI: " fmt
+
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/of.h>
@@ -138,7 +140,7 @@ void of_pci_check_probe_only(void)
else
pci_clear_flags(PCI_PROBE_ONLY);
- pr_info("PCI: PROBE_ONLY %sabled\n", val ? "en" : "dis");
+ pr_info("PROBE_ONLY %sabled\n", val ? "en" : "dis");
}
EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
@@ -181,7 +183,7 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
if (!bus_range)
return -ENOMEM;
- pr_info("PCI host bridge %s ranges:\n", dev->full_name);
+ pr_info("host bridge %s ranges:\n", dev->full_name);
err = of_pci_parse_bus_range(dev, bus_range);
if (err) {
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 216648233..366d8c3c7 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -13,6 +13,8 @@
* License or (at your optional) any later version of the license.
*/
+#define pr_fmt(fmt) "OF: reserved mem: " fmt
+
#include <linux/err.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
@@ -21,6 +23,7 @@
#include <linux/sizes.h>
#include <linux/of_reserved_mem.h>
#include <linux/sort.h>
+#include <linux/slab.h>
#define MAX_RESERVED_REGIONS 16
static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
@@ -75,7 +78,7 @@ void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname,
struct reserved_mem *rmem = &reserved_mem[reserved_mem_count];
if (reserved_mem_count == ARRAY_SIZE(reserved_mem)) {
- pr_err("Reserved memory: not enough space all defined regions.\n");
+ pr_err("not enough space all defined regions.\n");
return;
}
@@ -108,8 +111,7 @@ static int __init __reserved_mem_alloc_size(unsigned long node,
return -EINVAL;
if (len != dt_root_size_cells * sizeof(__be32)) {
- pr_err("Reserved memory: invalid size property in '%s' node.\n",
- uname);
+ pr_err("invalid size property in '%s' node.\n", uname);
return -EINVAL;
}
size = dt_mem_next_cell(dt_root_size_cells, &prop);
@@ -119,7 +121,7 @@ static int __init __reserved_mem_alloc_size(unsigned long node,
prop = of_get_flat_dt_prop(node, "alignment", &len);
if (prop) {
if (len != dt_root_addr_cells * sizeof(__be32)) {
- pr_err("Reserved memory: invalid alignment property in '%s' node.\n",
+ pr_err("invalid alignment property in '%s' node.\n",
uname);
return -EINVAL;
}
@@ -141,7 +143,7 @@ static int __init __reserved_mem_alloc_size(unsigned long node,
if (prop) {
if (len % t_len != 0) {
- pr_err("Reserved memory: invalid alloc-ranges property in '%s', skipping node.\n",
+ pr_err("invalid alloc-ranges property in '%s', skipping node.\n",
uname);
return -EINVAL;
}
@@ -156,7 +158,7 @@ static int __init __reserved_mem_alloc_size(unsigned long node,
ret = early_init_dt_alloc_reserved_memory_arch(size,
align, start, end, nomap, &base);
if (ret == 0) {
- pr_debug("Reserved memory: allocated memory for '%s' node: base %pa, size %ld MiB\n",
+ pr_debug("allocated memory for '%s' node: base %pa, size %ld MiB\n",
uname, &base,
(unsigned long)size / SZ_1M);
break;
@@ -168,13 +170,12 @@ static int __init __reserved_mem_alloc_size(unsigned long node,
ret = early_init_dt_alloc_reserved_memory_arch(size, align,
0, 0, nomap, &base);
if (ret == 0)
- pr_debug("Reserved memory: allocated memory for '%s' node: base %pa, size %ld MiB\n",
+ pr_debug("allocated memory for '%s' node: base %pa, size %ld MiB\n",
uname, &base, (unsigned long)size / SZ_1M);
}
if (base == 0) {
- pr_info("Reserved memory: failed to allocate memory for node '%s'\n",
- uname);
+ pr_info("failed to allocate memory for node '%s'\n", uname);
return -ENOMEM;
}
@@ -203,7 +204,7 @@ static int __init __reserved_mem_init_node(struct reserved_mem *rmem)
continue;
if (initfn(rmem) == 0) {
- pr_info("Reserved memory: initialized node %s, compatible id %s\n",
+ pr_info("initialized node %s, compatible id %s\n",
rmem->name, compat);
return 0;
}
@@ -245,7 +246,7 @@ static void __init __rmem_check_for_overlap(void)
this_end = this->base + this->size;
next_end = next->base + next->size;
- pr_err("Reserved memory: OVERLAP DETECTED!\n%s (%pa--%pa) overlaps with %s (%pa--%pa)\n",
+ pr_err("OVERLAP DETECTED!\n%s (%pa--%pa) overlaps with %s (%pa--%pa)\n",
this->name, &this->base, &this_end,
next->name, &next->base, &next_end);
}
@@ -296,53 +297,95 @@ static inline struct reserved_mem *__find_rmem(struct device_node *node)
return NULL;
}
+struct rmem_assigned_device {
+ struct device *dev;
+ struct reserved_mem *rmem;
+ struct list_head list;
+};
+
+static LIST_HEAD(of_rmem_assigned_device_list);
+static DEFINE_MUTEX(of_rmem_assigned_device_mutex);
+
/**
- * of_reserved_mem_device_init() - assign reserved memory region to given device
+ * of_reserved_mem_device_init_by_idx() - assign reserved memory region to
+ * given device
+ * @dev: Pointer to the device to configure
+ * @np: Pointer to the device_node with 'reserved-memory' property
+ * @idx: Index of selected region
*
- * This function assign memory region pointed by "memory-region" device tree
- * property to the given device.
+ * This function assigns respective DMA-mapping operations based on reserved
+ * memory region specified by 'memory-region' property in @np node to the @dev
+ * device. When driver needs to use more than one reserved memory region, it
+ * should allocate child devices and initialize regions by name for each of
+ * child device.
+ *
+ * Returns error code or zero on success.
*/
-int of_reserved_mem_device_init(struct device *dev)
+int of_reserved_mem_device_init_by_idx(struct device *dev,
+ struct device_node *np, int idx)
{
+ struct rmem_assigned_device *rd;
+ struct device_node *target;
struct reserved_mem *rmem;
- struct device_node *np;
int ret;
- np = of_parse_phandle(dev->of_node, "memory-region", 0);
- if (!np)
+ if (!np || !dev)
+ return -EINVAL;
+
+ target = of_parse_phandle(np, "memory-region", idx);
+ if (!target)
return -ENODEV;
- rmem = __find_rmem(np);
- of_node_put(np);
+ rmem = __find_rmem(target);
+ of_node_put(target);
if (!rmem || !rmem->ops || !rmem->ops->device_init)
return -EINVAL;
+ rd = kmalloc(sizeof(struct rmem_assigned_device), GFP_KERNEL);
+ if (!rd)
+ return -ENOMEM;
+
ret = rmem->ops->device_init(rmem, dev);
- if (ret == 0)
+ if (ret == 0) {
+ rd->dev = dev;
+ rd->rmem = rmem;
+
+ mutex_lock(&of_rmem_assigned_device_mutex);
+ list_add(&rd->list, &of_rmem_assigned_device_list);
+ mutex_unlock(&of_rmem_assigned_device_mutex);
+
dev_info(dev, "assigned reserved memory node %s\n", rmem->name);
+ } else {
+ kfree(rd);
+ }
return ret;
}
-EXPORT_SYMBOL_GPL(of_reserved_mem_device_init);
+EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_idx);
/**
* of_reserved_mem_device_release() - release reserved memory device structures
+ * @dev: Pointer to the device to deconfigure
*
* This function releases structures allocated for memory region handling for
* the given device.
*/
void of_reserved_mem_device_release(struct device *dev)
{
- struct reserved_mem *rmem;
- struct device_node *np;
-
- np = of_parse_phandle(dev->of_node, "memory-region", 0);
- if (!np)
- return;
-
- rmem = __find_rmem(np);
- of_node_put(np);
+ struct rmem_assigned_device *rd;
+ struct reserved_mem *rmem = NULL;
+
+ mutex_lock(&of_rmem_assigned_device_mutex);
+ list_for_each_entry(rd, &of_rmem_assigned_device_list, list) {
+ if (rd->dev == dev) {
+ rmem = rd->rmem;
+ list_del(&rd->list);
+ kfree(rd);
+ break;
+ }
+ }
+ mutex_unlock(&of_rmem_assigned_device_mutex);
if (!rmem || !rmem->ops || !rmem->ops->device_release)
return;
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index 82250815e..318dbb51e 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -8,7 +8,9 @@
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*/
-#undef DEBUG
+
+#define pr_fmt(fmt) "OF: overlay: " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -137,8 +139,8 @@ static int of_overlay_apply_one(struct of_overlay *ov,
for_each_property_of_node(overlay, prop) {
ret = of_overlay_apply_single_property(ov, target, prop);
if (ret) {
- pr_err("%s: Failed to apply prop @%s/%s\n",
- __func__, target->full_name, prop->name);
+ pr_err("Failed to apply prop @%s/%s\n",
+ target->full_name, prop->name);
return ret;
}
}
@@ -146,9 +148,8 @@ static int of_overlay_apply_one(struct of_overlay *ov,
for_each_child_of_node(overlay, child) {
ret = of_overlay_apply_single_device_node(ov, target, child);
if (ret != 0) {
- pr_err("%s: Failed to apply single node @%s/%s\n",
- __func__, target->full_name,
- child->name);
+ pr_err("Failed to apply single node @%s/%s\n",
+ target->full_name, child->name);
of_node_put(child);
return ret;
}
@@ -176,8 +177,7 @@ static int of_overlay_apply(struct of_overlay *ov)
err = of_overlay_apply_one(ov, ovinfo->target, ovinfo->overlay);
if (err != 0) {
- pr_err("%s: overlay failed '%s'\n",
- __func__, ovinfo->target->full_name);
+ pr_err("apply failed '%s'\n", ovinfo->target->full_name);
return err;
}
}
@@ -208,7 +208,7 @@ static struct device_node *find_target_node(struct device_node *info_node)
if (ret == 0)
return of_find_node_by_path(path);
- pr_err("%s: Failed to find target for node %p (%s)\n", __func__,
+ pr_err("Failed to find target for node %p (%s)\n",
info_node, info_node->name);
return NULL;
@@ -355,8 +355,6 @@ int of_overlay_create(struct device_node *tree)
id = idr_alloc(&ov_idr, ov, 0, 0, GFP_KERNEL);
if (id < 0) {
- pr_err("%s: idr_alloc() failed for tree@%s\n",
- __func__, tree->full_name);
err = id;
goto err_destroy_trans;
}
@@ -365,26 +363,21 @@ int of_overlay_create(struct device_node *tree)
/* build the overlay info structures */
err = of_build_overlay_info(ov, tree);
if (err) {
- pr_err("%s: of_build_overlay_info() failed for tree@%s\n",
- __func__, tree->full_name);
+ pr_err("of_build_overlay_info() failed for tree@%s\n",
+ tree->full_name);
goto err_free_idr;
}
/* apply the overlay */
err = of_overlay_apply(ov);
- if (err) {
- pr_err("%s: of_overlay_apply() failed for tree@%s\n",
- __func__, tree->full_name);
+ if (err)
goto err_abort_trans;
- }
/* apply the changeset */
err = __of_changeset_apply(&ov->cset);
- if (err) {
- pr_err("%s: __of_changeset_apply() failed for tree@%s\n",
- __func__, tree->full_name);
+ if (err)
goto err_revert_overlay;
- }
+
/* add to the tail of the overlay list */
list_add_tail(&ov->node, &ov_list);
@@ -469,8 +462,7 @@ static int overlay_removal_is_ok(struct of_overlay *ov)
list_for_each_entry(ce, &ov->cset.entries, node) {
if (!overlay_is_topmost(ov, ce->np)) {
- pr_err("%s: overlay #%d is not topmost\n",
- __func__, ov->id);
+ pr_err("overlay #%d is not topmost\n", ov->id);
return 0;
}
}
@@ -496,16 +488,13 @@ int of_overlay_destroy(int id)
ov = idr_find(&ov_idr, id);
if (ov == NULL) {
err = -ENODEV;
- pr_err("%s: Could not find overlay #%d\n",
- __func__, id);
+ pr_err("destroy: Could not find overlay #%d\n", id);
goto out;
}
/* check whether the overlay is safe to remove */
if (!overlay_removal_is_ok(ov)) {
err = -EBUSY;
- pr_err("%s: removal check failed for overlay #%d\n",
- __func__, id);
goto out;
}
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 16e8daffa..f39ccd5aa 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -11,6 +11,9 @@
* 2 of the License, or (at your option) any later version.
*
*/
+
+#define pr_fmt(fmt) "OF: " fmt
+
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/amba/bus.h>
@@ -31,7 +34,6 @@ const struct of_device_id of_default_bus_match_table[] = {
#endif /* CONFIG_ARM_AMBA */
{} /* Empty terminated list */
};
-EXPORT_SYMBOL(of_default_bus_match_table);
static int of_dev_node_match(struct device *dev, void *data)
{
@@ -234,11 +236,8 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
return NULL;
dev = amba_device_alloc(NULL, 0, 0);
- if (!dev) {
- pr_err("%s(): amba_device_alloc() failed for %s\n",
- __func__, node->full_name);
+ if (!dev)
goto err_clear_flag;
- }
/* setup generic device info */
dev->dev.of_node = of_node_get(node);
@@ -261,15 +260,15 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
ret = of_address_to_resource(node, 0, &dev->res);
if (ret) {
- pr_err("%s(): of_address_to_resource() failed (%d) for %s\n",
- __func__, ret, node->full_name);
+ pr_err("amba: of_address_to_resource() failed (%d) for %s\n",
+ ret, node->full_name);
goto err_free;
}
ret = amba_device_add(dev, &iomem_resource);
if (ret) {
- pr_err("%s(): amba_device_add() failed (%d) for %s\n",
- __func__, ret, node->full_name);
+ pr_err("amba_device_add() failed (%d) for %s\n",
+ ret, node->full_name);
goto err_free;
}
@@ -363,6 +362,12 @@ static int of_platform_bus_create(struct device_node *bus,
return 0;
}
+ if (of_node_check_flag(bus, OF_POPULATED_BUS)) {
+ pr_debug("%s() - skipping %s, already populated\n",
+ __func__, bus->full_name);
+ return 0;
+ }
+
auxdata = of_dev_lookup(lookup, bus);
if (auxdata) {
bus_id = auxdata->name;
@@ -414,7 +419,7 @@ int of_platform_bus_probe(struct device_node *root,
if (!root)
return -EINVAL;
- pr_debug("of_platform_bus_probe()\n");
+ pr_debug("%s()\n", __func__);
pr_debug(" starting at: %s\n", root->full_name);
/* Do a self check of bus type, if there's a match, create children */
@@ -466,6 +471,9 @@ int of_platform_populate(struct device_node *root,
if (!root)
return -EINVAL;
+ pr_debug("%s()\n", __func__);
+ pr_debug(" starting at: %s\n", root->full_name);
+
for_each_child_of_node(root, child) {
rc = of_platform_bus_create(child, matches, lookup, parent, true);
if (rc) {
@@ -489,6 +497,33 @@ int of_platform_default_populate(struct device_node *root,
}
EXPORT_SYMBOL_GPL(of_platform_default_populate);
+#ifndef CONFIG_PPC
+static int __init of_platform_default_populate_init(void)
+{
+ struct device_node *node;
+
+ if (!of_have_populated_dt())
+ return -ENODEV;
+
+ /*
+ * Handle ramoops explicitly, since it is inside /reserved-memory,
+ * which lacks a "compatible" property.
+ */
+ node = of_find_node_by_path("/reserved-memory");
+ if (node) {
+ node = of_find_compatible_node(node, NULL, "ramoops");
+ if (node)
+ of_platform_device_create(node, NULL, NULL);
+ }
+
+ /* Populate everything else. */
+ of_platform_default_populate(NULL, NULL, NULL);
+
+ return 0;
+}
+arch_initcall_sync(of_platform_default_populate_init);
+#endif
+
static int of_platform_device_destroy(struct device *dev, void *data)
{
/* Do not touch devices not populated from the device tree */
diff --git a/drivers/of/resolver.c b/drivers/of/resolver.c
index d313d492f..46325d639 100644
--- a/drivers/of/resolver.c
+++ b/drivers/of/resolver.c
@@ -9,6 +9,8 @@
* version 2 as published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) "OF: resolver: " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -313,6 +315,11 @@ int of_resolve_phandles(struct device_node *resolve)
phandle phandle, phandle_delta;
int err;
+ if (!resolve)
+ pr_err("%s: null node\n", __func__);
+ if (resolve && !of_node_check_flag(resolve, OF_DETACHED))
+ pr_err("%s: node %s not detached\n", __func__,
+ resolve->full_name);
/* the resolve node must exist, and be detached */
if (!resolve || !of_node_check_flag(resolve, OF_DETACHED))
return -EINVAL;
@@ -369,6 +376,7 @@ int of_resolve_phandles(struct device_node *resolve)
/* we need to fixup, but no root symbols... */
if (!root_sym) {
+ pr_err("%s: no symbols in root of device tree.\n", __func__);
err = -EINVAL;
goto out;
}
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index f34ed9310..53c83d66e 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -771,7 +771,7 @@ static void __init of_unittest_platform_populate(void)
};
np = of_find_node_by_path("/testcase-data");
- of_platform_populate(np, of_default_bus_match_table, NULL, NULL);
+ of_platform_default_populate(np, NULL, NULL);
/* Test that a missing irq domain returns -EPROBE_DEFER */
np = of_find_node_by_path("/testcase-data/testcase-device1");
@@ -1871,8 +1871,7 @@ static void __init of_unittest_overlay(void)
goto out;
}
- ret = of_platform_populate(bus_np, of_default_bus_match_table,
- NULL, NULL);
+ ret = of_platform_default_populate(bus_np, NULL, NULL);
if (ret != 0) {
unittest(0, "could not populate bus @ \"%s\"\n", bus_path);
goto out;
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index e24b05996..3ed6238f8 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -790,7 +790,7 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
static dma_addr_t
ccio_map_page(struct device *dev, struct page *page, unsigned long offset,
size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return ccio_map_single(dev, page_address(page) + offset, size,
direction);
@@ -806,7 +806,7 @@ ccio_map_page(struct device *dev, struct page *page, unsigned long offset,
*/
static void
ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
- enum dma_data_direction direction, struct dma_attrs *attrs)
+ enum dma_data_direction direction, unsigned long attrs)
{
struct ioc *ioc;
unsigned long flags;
@@ -844,7 +844,7 @@ ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
*/
static void *
ccio_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *ret;
#if 0
@@ -878,9 +878,9 @@ ccio_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag,
*/
static void
ccio_free(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
- ccio_unmap_page(dev, dma_handle, size, 0, NULL);
+ ccio_unmap_page(dev, dma_handle, size, 0, 0);
free_pages((unsigned long)cpu_addr, get_order(size));
}
@@ -907,7 +907,7 @@ ccio_free(struct device *dev, size_t size, void *cpu_addr,
*/
static int
ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
- enum dma_data_direction direction, struct dma_attrs *attrs)
+ enum dma_data_direction direction, unsigned long attrs)
{
struct ioc *ioc;
int coalesced, filled = 0;
@@ -984,7 +984,7 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
*/
static void
ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
- enum dma_data_direction direction, struct dma_attrs *attrs)
+ enum dma_data_direction direction, unsigned long attrs)
{
struct ioc *ioc;
@@ -1004,7 +1004,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;
#endif
ccio_unmap_page(dev, sg_dma_address(sglist),
- sg_dma_len(sglist), direction, NULL);
+ sg_dma_len(sglist), direction, 0);
++sglist;
}
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index 42ec4600b..151b86b6d 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -783,7 +783,7 @@ sba_map_single(struct device *dev, void *addr, size_t size,
static dma_addr_t
sba_map_page(struct device *dev, struct page *page, unsigned long offset,
size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return sba_map_single(dev, page_address(page) + offset, size,
direction);
@@ -801,7 +801,7 @@ sba_map_page(struct device *dev, struct page *page, unsigned long offset,
*/
static void
sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
- enum dma_data_direction direction, struct dma_attrs *attrs)
+ enum dma_data_direction direction, unsigned long attrs)
{
struct ioc *ioc;
#if DELAYED_RESOURCE_CNT > 0
@@ -876,7 +876,7 @@ sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
* See Documentation/DMA-API-HOWTO.txt
*/
static void *sba_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle,
- gfp_t gfp, struct dma_attrs *attrs)
+ gfp_t gfp, unsigned long attrs)
{
void *ret;
@@ -908,9 +908,9 @@ static void *sba_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle
*/
static void
sba_free(struct device *hwdev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
- sba_unmap_page(hwdev, dma_handle, size, 0, NULL);
+ sba_unmap_page(hwdev, dma_handle, size, 0, 0);
free_pages((unsigned long) vaddr, get_order(size));
}
@@ -943,7 +943,7 @@ int dump_run_sg = 0;
*/
static int
sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
- enum dma_data_direction direction, struct dma_attrs *attrs)
+ enum dma_data_direction direction, unsigned long attrs)
{
struct ioc *ioc;
int coalesced, filled = 0;
@@ -1026,7 +1026,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
*/
static void
sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
- enum dma_data_direction direction, struct dma_attrs *attrs)
+ enum dma_data_direction direction, unsigned long attrs)
{
struct ioc *ioc;
#ifdef ASSERT_PDIR_SANITY
@@ -1051,7 +1051,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
while (sg_dma_len(sglist) && nents--) {
sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist),
- direction, NULL);
+ direction, 0);
#ifdef SBA_COLLECT_STATS
ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT;
ioc->usingle_calls--; /* kluge since call is unmap_sg() */
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 56389be5d..67f9916ff 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -25,7 +25,7 @@ config PCI_MSI
If you don't know what to do here, say Y.
config PCI_MSI_IRQ_DOMAIN
- bool
+ def_bool ARM || ARM64 || X86
depends on PCI_MSI
select GENERIC_MSI_IRQ_DOMAIN
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 1fa692573..8db5079f0 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -51,6 +51,9 @@ obj-$(CONFIG_ACPI) += pci-acpi.o
# SMBIOS provided firmware instance and labels
obj-$(CONFIG_PCI_LABEL) += pci-label.o
+# Intel MID platform PM support
+obj-$(CONFIG_X86_INTEL_MID) += pci-mid.o
+
obj-$(CONFIG_PCI_SYSCALL) += syscall.o
obj-$(CONFIG_PCI_STUB) += pci-stub.o
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index dd7cdbee8..c288e5a52 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -91,6 +91,35 @@ void pci_bus_remove_resources(struct pci_bus *bus)
}
}
+int devm_request_pci_bus_resources(struct device *dev,
+ struct list_head *resources)
+{
+ struct resource_entry *win;
+ struct resource *parent, *res;
+ int err;
+
+ resource_list_for_each_entry(win, resources) {
+ res = win->res;
+ switch (resource_type(res)) {
+ case IORESOURCE_IO:
+ parent = &ioport_resource;
+ break;
+ case IORESOURCE_MEM:
+ parent = &iomem_resource;
+ break;
+ default:
+ continue;
+ }
+
+ err = devm_request_resource(dev, parent, res);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_request_pci_bus_resources);
+
static struct pci_bus_region pci_32_bit = {0, 0xffffffffULL};
#ifdef CONFIG_PCI_BUS_ADDR_T_64BIT
static struct pci_bus_region pci_64_bit = {0,
@@ -291,6 +320,7 @@ void pci_bus_add_device(struct pci_dev *dev)
pci_fixup_device(pci_fixup_final, dev);
pci_create_sysfs_dev_files(dev);
pci_proc_attach_device(dev);
+ pci_bridge_d3_device_changed(dev);
dev->match_driver = true;
retval = device_attach(&dev->dev);
@@ -397,4 +427,3 @@ void pci_bus_put(struct pci_bus *bus)
put_device(&bus->dev);
}
EXPORT_SYMBOL(pci_bus_put);
-
diff --git a/drivers/pci/ecam.c b/drivers/pci/ecam.c
index f9832ad8e..43ed08dd8 100644
--- a/drivers/pci/ecam.c
+++ b/drivers/pci/ecam.c
@@ -19,16 +19,15 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/pci-ecam.h>
#include <linux/slab.h>
-#include "ecam.h"
-
/*
* On 64-bit systems, we do a single ioremap for the whole config space
* since we have enough virtual address range available. On 32-bit, we
* ioremap the config space for each bus individually.
*/
-static const bool per_bus_mapping = !config_enabled(CONFIG_64BIT);
+static const bool per_bus_mapping = !IS_ENABLED(CONFIG_64BIT);
/*
* Create a PCI config space window
@@ -52,6 +51,7 @@ struct pci_config_window *pci_ecam_create(struct device *dev,
if (!cfg)
return ERR_PTR(-ENOMEM);
+ cfg->parent = dev;
cfg->ops = ops;
cfg->busr.start = busr->start;
cfg->busr.end = busr->end;
@@ -95,7 +95,7 @@ struct pci_config_window *pci_ecam_create(struct device *dev,
}
if (ops->init) {
- err = ops->init(dev, cfg);
+ err = ops->init(cfg);
if (err)
goto err_exit;
}
diff --git a/drivers/pci/host-bridge.c b/drivers/pci/host-bridge.c
index 5f4a2e04c..add662362 100644
--- a/drivers/pci/host-bridge.c
+++ b/drivers/pci/host-bridge.c
@@ -44,6 +44,7 @@ void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
bridge->release_fn = release_fn;
bridge->release_data = release_data;
}
+EXPORT_SYMBOL_GPL(pci_set_host_bridge_release);
void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region,
struct resource *res)
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 5d2374e4e..9b485d873 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -3,8 +3,9 @@ menu "PCI host controller drivers"
config PCI_DRA7XX
bool "TI DRA7xx PCIe controller"
- select PCIE_DW
depends on OF && HAS_IOMEM && TI_PIPE3
+ depends on PCI_MSI_IRQ_DOMAIN
+ select PCIE_DW
help
Enables support for the PCIe controller in the DRA7xx SoC. There
are two instances of PCIe controller in DRA7xx. This controller can
@@ -16,11 +17,20 @@ config PCI_MVEBU
depends on ARM
depends on OF
+config PCI_AARDVARK
+ bool "Aardvark PCIe controller"
+ depends on ARCH_MVEBU && ARM64
+ depends on OF
+ depends on PCI_MSI_IRQ_DOMAIN
+ help
+ Add support for Aardvark 64bit PCIe Host Controller. This
+ controller is part of the South Bridge of the Marvel Armada
+ 3700 SoC.
config PCIE_XILINX_NWL
bool "NWL PCIe Core"
depends on ARCH_ZYNQMP
- select PCI_MSI_IRQ_DOMAIN if PCI_MSI
+ depends on PCI_MSI_IRQ_DOMAIN
help
Say 'Y' here if you want kernel support for Xilinx
NWL PCIe controller. The controller can act as Root Port
@@ -29,6 +39,7 @@ config PCIE_XILINX_NWL
config PCIE_DW_PLAT
bool "Platform bus based DesignWare PCIe Controller"
+ depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW
---help---
This selects the DesignWare PCIe controller support. Select this if
@@ -40,16 +51,19 @@ config PCIE_DW_PLAT
config PCIE_DW
bool
+ depends on PCI_MSI_IRQ_DOMAIN
config PCI_EXYNOS
bool "Samsung Exynos PCIe controller"
depends on SOC_EXYNOS5440
+ depends on PCI_MSI_IRQ_DOMAIN
select PCIEPORTBUS
select PCIE_DW
config PCI_IMX6
bool "Freescale i.MX6 PCIe controller"
depends on SOC_IMX6Q
+ depends on PCI_MSI_IRQ_DOMAIN
select PCIEPORTBUS
select PCIE_DW
@@ -72,8 +86,7 @@ config PCI_RCAR_GEN2
config PCIE_RCAR
bool "Renesas R-Car PCIe controller"
depends on ARCH_RENESAS || (ARM && COMPILE_TEST)
- select PCI_MSI
- select PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI_IRQ_DOMAIN
help
Say Y here if you want PCIe controller support on R-Car SoCs.
@@ -85,6 +98,7 @@ config PCI_HOST_GENERIC
bool "Generic PCI host controller"
depends on (ARM || ARM64) && OF
select PCI_HOST_COMMON
+ select IRQ_DOMAIN
help
Say Y here if you want to support a simple generic PCI host
controller, such as the one emulated by kvmtool.
@@ -92,6 +106,7 @@ config PCI_HOST_GENERIC
config PCIE_SPEAR13XX
bool "STMicroelectronics SPEAr PCIe controller"
depends on ARCH_SPEAR13XX
+ depends on PCI_MSI_IRQ_DOMAIN
select PCIEPORTBUS
select PCIE_DW
help
@@ -100,6 +115,7 @@ config PCIE_SPEAR13XX
config PCI_KEYSTONE
bool "TI Keystone PCIe controller"
depends on ARCH_KEYSTONE
+ depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW
select PCIEPORTBUS
help
@@ -120,7 +136,6 @@ config PCI_XGENE
depends on ARCH_XGENE
depends on OF
select PCIEPORTBUS
- select PCI_MSI_IRQ_DOMAIN if PCI_MSI
help
Say Y here if you want internal PCI support on APM X-Gene SoC.
There are 5 internal PCIe ports available. Each port is GEN3 capable
@@ -128,7 +143,8 @@ config PCI_XGENE
config PCI_XGENE_MSI
bool "X-Gene v1 PCIe MSI feature"
- depends on PCI_XGENE && PCI_MSI
+ depends on PCI_XGENE
+ depends on PCI_MSI_IRQ_DOMAIN
default y
help
Say Y here if you want PCIe MSI support for the APM X-Gene v1 SoC.
@@ -137,6 +153,7 @@ config PCI_XGENE_MSI
config PCI_LAYERSCAPE
bool "Freescale Layerscape PCIe controller"
depends on OF && (ARM || ARCH_LAYERSCAPE)
+ depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW
select MFD_SYSCON
help
@@ -177,8 +194,7 @@ config PCIE_IPROC_BCMA
config PCIE_IPROC_MSI
bool "Broadcom iProc PCIe MSI support"
depends on PCIE_IPROC_PLATFORM || PCIE_IPROC_BCMA
- depends on PCI_MSI
- select PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI_IRQ_DOMAIN
default ARCH_BCM_IPROC
help
Say Y here if you want to enable MSI support for Broadcom's iProc
@@ -195,8 +211,8 @@ config PCIE_ALTERA
config PCIE_ALTERA_MSI
bool "Altera PCIe MSI feature"
- depends on PCIE_ALTERA && PCI_MSI
- select PCI_MSI_IRQ_DOMAIN
+ depends on PCIE_ALTERA
+ depends on PCI_MSI_IRQ_DOMAIN
help
Say Y here if you want PCIe MSI support for the Altera FPGA.
This MSI driver supports Altera MSI to GIC controller IP.
@@ -204,6 +220,7 @@ config PCIE_ALTERA_MSI
config PCI_HISI
depends on OF && ARM64
bool "HiSilicon Hip05 and Hip06 SoCs PCIe controllers"
+ depends on PCI_MSI_IRQ_DOMAIN
select PCIEPORTBUS
select PCIE_DW
help
@@ -213,6 +230,7 @@ config PCI_HISI
config PCIE_QCOM
bool "Qualcomm PCIe controller"
depends on ARCH_QCOM && OF
+ depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW
select PCIEPORTBUS
help
@@ -237,6 +255,7 @@ config PCI_HOST_THUNDER_ECAM
config PCIE_ARMADA_8K
bool "Marvell Armada-8K PCIe controller"
depends on ARCH_MVEBU
+ depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW
select PCIEPORTBUS
help
@@ -245,4 +264,14 @@ config PCIE_ARMADA_8K
Designware hardware and therefore the driver re-uses the
Designware core functions to implement the driver.
+config PCIE_ARTPEC6
+ bool "Axis ARTPEC-6 PCIe controller"
+ depends on MACH_ARTPEC6
+ depends on PCI_MSI_IRQ_DOMAIN
+ select PCIE_DW
+ select PCIEPORTBUS
+ help
+ Say Y here to enable PCIe controller support on Axis ARTPEC-6
+ SoCs. This PCIe controller uses the DesignWare core.
+
endmenu
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index 9c8698e89..88434101e 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
obj-$(CONFIG_PCI_HYPERV) += pci-hyperv.o
obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
+obj-$(CONFIG_PCI_AARDVARK) += pci-aardvark.o
obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o
obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o
obj-$(CONFIG_PCIE_RCAR) += pcie-rcar.o
@@ -29,3 +30,4 @@ obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
obj-$(CONFIG_PCI_HOST_THUNDER_ECAM) += pci-thunder-ecam.o
obj-$(CONFIG_PCI_HOST_THUNDER_PEM) += pci-thunder-pem.o
obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
+obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c
new file mode 100644
index 000000000..ef9893fa3
--- /dev/null
+++ b/drivers/pci/host/pci-aardvark.c
@@ -0,0 +1,1001 @@
+/*
+ * Driver for the Aardvark PCIe controller, used on Marvell Armada
+ * 3700.
+ *
+ * Copyright (C) 2016 Marvell
+ *
+ * Author: Hezi Shahmoon <hezi.shahmoon@marvell.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+
+/* PCIe core registers */
+#define PCIE_CORE_CMD_STATUS_REG 0x4
+#define PCIE_CORE_CMD_IO_ACCESS_EN BIT(0)
+#define PCIE_CORE_CMD_MEM_ACCESS_EN BIT(1)
+#define PCIE_CORE_CMD_MEM_IO_REQ_EN BIT(2)
+#define PCIE_CORE_DEV_CTRL_STATS_REG 0xc8
+#define PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE (0 << 4)
+#define PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT 5
+#define PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE (0 << 11)
+#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT 12
+#define PCIE_CORE_LINK_CTRL_STAT_REG 0xd0
+#define PCIE_CORE_LINK_L0S_ENTRY BIT(0)
+#define PCIE_CORE_LINK_TRAINING BIT(5)
+#define PCIE_CORE_LINK_WIDTH_SHIFT 20
+#define PCIE_CORE_ERR_CAPCTL_REG 0x118
+#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX BIT(5)
+#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN BIT(6)
+#define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK BIT(7)
+#define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV BIT(8)
+
+/* PIO registers base address and register offsets */
+#define PIO_BASE_ADDR 0x4000
+#define PIO_CTRL (PIO_BASE_ADDR + 0x0)
+#define PIO_CTRL_TYPE_MASK GENMASK(3, 0)
+#define PIO_CTRL_ADDR_WIN_DISABLE BIT(24)
+#define PIO_STAT (PIO_BASE_ADDR + 0x4)
+#define PIO_COMPLETION_STATUS_SHIFT 7
+#define PIO_COMPLETION_STATUS_MASK GENMASK(9, 7)
+#define PIO_COMPLETION_STATUS_OK 0
+#define PIO_COMPLETION_STATUS_UR 1
+#define PIO_COMPLETION_STATUS_CRS 2
+#define PIO_COMPLETION_STATUS_CA 4
+#define PIO_NON_POSTED_REQ BIT(0)
+#define PIO_ADDR_LS (PIO_BASE_ADDR + 0x8)
+#define PIO_ADDR_MS (PIO_BASE_ADDR + 0xc)
+#define PIO_WR_DATA (PIO_BASE_ADDR + 0x10)
+#define PIO_WR_DATA_STRB (PIO_BASE_ADDR + 0x14)
+#define PIO_RD_DATA (PIO_BASE_ADDR + 0x18)
+#define PIO_START (PIO_BASE_ADDR + 0x1c)
+#define PIO_ISR (PIO_BASE_ADDR + 0x20)
+#define PIO_ISRM (PIO_BASE_ADDR + 0x24)
+
+/* Aardvark Control registers */
+#define CONTROL_BASE_ADDR 0x4800
+#define PCIE_CORE_CTRL0_REG (CONTROL_BASE_ADDR + 0x0)
+#define PCIE_GEN_SEL_MSK 0x3
+#define PCIE_GEN_SEL_SHIFT 0x0
+#define SPEED_GEN_1 0
+#define SPEED_GEN_2 1
+#define SPEED_GEN_3 2
+#define IS_RC_MSK 1
+#define IS_RC_SHIFT 2
+#define LANE_CNT_MSK 0x18
+#define LANE_CNT_SHIFT 0x3
+#define LANE_COUNT_1 (0 << LANE_CNT_SHIFT)
+#define LANE_COUNT_2 (1 << LANE_CNT_SHIFT)
+#define LANE_COUNT_4 (2 << LANE_CNT_SHIFT)
+#define LANE_COUNT_8 (3 << LANE_CNT_SHIFT)
+#define LINK_TRAINING_EN BIT(6)
+#define LEGACY_INTA BIT(28)
+#define LEGACY_INTB BIT(29)
+#define LEGACY_INTC BIT(30)
+#define LEGACY_INTD BIT(31)
+#define PCIE_CORE_CTRL1_REG (CONTROL_BASE_ADDR + 0x4)
+#define HOT_RESET_GEN BIT(0)
+#define PCIE_CORE_CTRL2_REG (CONTROL_BASE_ADDR + 0x8)
+#define PCIE_CORE_CTRL2_RESERVED 0x7
+#define PCIE_CORE_CTRL2_TD_ENABLE BIT(4)
+#define PCIE_CORE_CTRL2_STRICT_ORDER_ENABLE BIT(5)
+#define PCIE_CORE_CTRL2_OB_WIN_ENABLE BIT(6)
+#define PCIE_CORE_CTRL2_MSI_ENABLE BIT(10)
+#define PCIE_ISR0_REG (CONTROL_BASE_ADDR + 0x40)
+#define PCIE_ISR0_MASK_REG (CONTROL_BASE_ADDR + 0x44)
+#define PCIE_ISR0_MSI_INT_PENDING BIT(24)
+#define PCIE_ISR0_INTX_ASSERT(val) BIT(16 + (val))
+#define PCIE_ISR0_INTX_DEASSERT(val) BIT(20 + (val))
+#define PCIE_ISR0_ALL_MASK GENMASK(26, 0)
+#define PCIE_ISR1_REG (CONTROL_BASE_ADDR + 0x48)
+#define PCIE_ISR1_MASK_REG (CONTROL_BASE_ADDR + 0x4C)
+#define PCIE_ISR1_POWER_STATE_CHANGE BIT(4)
+#define PCIE_ISR1_FLUSH BIT(5)
+#define PCIE_ISR1_ALL_MASK GENMASK(5, 4)
+#define PCIE_MSI_ADDR_LOW_REG (CONTROL_BASE_ADDR + 0x50)
+#define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54)
+#define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58)
+#define PCIE_MSI_MASK_REG (CONTROL_BASE_ADDR + 0x5C)
+#define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C)
+
+/* PCIe window configuration */
+#define OB_WIN_BASE_ADDR 0x4c00
+#define OB_WIN_BLOCK_SIZE 0x20
+#define OB_WIN_REG_ADDR(win, offset) (OB_WIN_BASE_ADDR + \
+ OB_WIN_BLOCK_SIZE * (win) + \
+ (offset))
+#define OB_WIN_MATCH_LS(win) OB_WIN_REG_ADDR(win, 0x00)
+#define OB_WIN_MATCH_MS(win) OB_WIN_REG_ADDR(win, 0x04)
+#define OB_WIN_REMAP_LS(win) OB_WIN_REG_ADDR(win, 0x08)
+#define OB_WIN_REMAP_MS(win) OB_WIN_REG_ADDR(win, 0x0c)
+#define OB_WIN_MASK_LS(win) OB_WIN_REG_ADDR(win, 0x10)
+#define OB_WIN_MASK_MS(win) OB_WIN_REG_ADDR(win, 0x14)
+#define OB_WIN_ACTIONS(win) OB_WIN_REG_ADDR(win, 0x18)
+
+/* PCIe window types */
+#define OB_PCIE_MEM 0x0
+#define OB_PCIE_IO 0x4
+
+/* LMI registers base address and register offsets */
+#define LMI_BASE_ADDR 0x6000
+#define CFG_REG (LMI_BASE_ADDR + 0x0)
+#define LTSSM_SHIFT 24
+#define LTSSM_MASK 0x3f
+#define LTSSM_L0 0x10
+#define RC_BAR_CONFIG 0x300
+
+/* PCIe core controller registers */
+#define CTRL_CORE_BASE_ADDR 0x18000
+#define CTRL_CONFIG_REG (CTRL_CORE_BASE_ADDR + 0x0)
+#define CTRL_MODE_SHIFT 0x0
+#define CTRL_MODE_MASK 0x1
+#define PCIE_CORE_MODE_DIRECT 0x0
+#define PCIE_CORE_MODE_COMMAND 0x1
+
+/* PCIe Central Interrupts Registers */
+#define CENTRAL_INT_BASE_ADDR 0x1b000
+#define HOST_CTRL_INT_STATUS_REG (CENTRAL_INT_BASE_ADDR + 0x0)
+#define HOST_CTRL_INT_MASK_REG (CENTRAL_INT_BASE_ADDR + 0x4)
+#define PCIE_IRQ_CMDQ_INT BIT(0)
+#define PCIE_IRQ_MSI_STATUS_INT BIT(1)
+#define PCIE_IRQ_CMD_SENT_DONE BIT(3)
+#define PCIE_IRQ_DMA_INT BIT(4)
+#define PCIE_IRQ_IB_DXFERDONE BIT(5)
+#define PCIE_IRQ_OB_DXFERDONE BIT(6)
+#define PCIE_IRQ_OB_RXFERDONE BIT(7)
+#define PCIE_IRQ_COMPQ_INT BIT(12)
+#define PCIE_IRQ_DIR_RD_DDR_DET BIT(13)
+#define PCIE_IRQ_DIR_WR_DDR_DET BIT(14)
+#define PCIE_IRQ_CORE_INT BIT(16)
+#define PCIE_IRQ_CORE_INT_PIO BIT(17)
+#define PCIE_IRQ_DPMU_INT BIT(18)
+#define PCIE_IRQ_PCIE_MIS_INT BIT(19)
+#define PCIE_IRQ_MSI_INT1_DET BIT(20)
+#define PCIE_IRQ_MSI_INT2_DET BIT(21)
+#define PCIE_IRQ_RC_DBELL_DET BIT(22)
+#define PCIE_IRQ_EP_STATUS BIT(23)
+#define PCIE_IRQ_ALL_MASK 0xfff0fb
+#define PCIE_IRQ_ENABLE_INTS_MASK PCIE_IRQ_CORE_INT
+
+/* Transaction types */
+#define PCIE_CONFIG_RD_TYPE0 0x8
+#define PCIE_CONFIG_RD_TYPE1 0x9
+#define PCIE_CONFIG_WR_TYPE0 0xa
+#define PCIE_CONFIG_WR_TYPE1 0xb
+
+/* PCI_BDF shifts 8bit, so we need extra 4bit shift */
+#define PCIE_BDF(dev) (dev << 4)
+#define PCIE_CONF_BUS(bus) (((bus) & 0xff) << 20)
+#define PCIE_CONF_DEV(dev) (((dev) & 0x1f) << 15)
+#define PCIE_CONF_FUNC(fun) (((fun) & 0x7) << 12)
+#define PCIE_CONF_REG(reg) ((reg) & 0xffc)
+#define PCIE_CONF_ADDR(bus, devfn, where) \
+ (PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | \
+ PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where))
+
+#define PIO_TIMEOUT_MS 1
+
+#define LINK_WAIT_MAX_RETRIES 10
+#define LINK_WAIT_USLEEP_MIN 90000
+#define LINK_WAIT_USLEEP_MAX 100000
+
+#define LEGACY_IRQ_NUM 4
+#define MSI_IRQ_NUM 32
+
+struct advk_pcie {
+ struct platform_device *pdev;
+ void __iomem *base;
+ struct list_head resources;
+ struct irq_domain *irq_domain;
+ struct irq_chip irq_chip;
+ struct msi_controller msi;
+ struct irq_domain *msi_domain;
+ struct irq_chip msi_irq_chip;
+ DECLARE_BITMAP(msi_irq_in_use, MSI_IRQ_NUM);
+ struct mutex msi_used_lock;
+ u16 msi_msg;
+ int root_bus_nr;
+};
+
+static inline void advk_writel(struct advk_pcie *pcie, u32 val, u64 reg)
+{
+ writel(val, pcie->base + reg);
+}
+
+static inline u32 advk_readl(struct advk_pcie *pcie, u64 reg)
+{
+ return readl(pcie->base + reg);
+}
+
+static int advk_pcie_link_up(struct advk_pcie *pcie)
+{
+ u32 val, ltssm_state;
+
+ val = advk_readl(pcie, CFG_REG);
+ ltssm_state = (val >> LTSSM_SHIFT) & LTSSM_MASK;
+ return ltssm_state >= LTSSM_L0;
+}
+
+static int advk_pcie_wait_for_link(struct advk_pcie *pcie)
+{
+ int retries;
+
+ /* check if the link is up or not */
+ for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
+ if (advk_pcie_link_up(pcie)) {
+ dev_info(&pcie->pdev->dev, "link up\n");
+ return 0;
+ }
+
+ usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
+ }
+
+ dev_err(&pcie->pdev->dev, "link never came up\n");
+
+ return -ETIMEDOUT;
+}
+
+/*
+ * Set PCIe address window register which could be used for memory
+ * mapping.
+ */
+static void advk_pcie_set_ob_win(struct advk_pcie *pcie,
+ u32 win_num, u32 match_ms,
+ u32 match_ls, u32 mask_ms,
+ u32 mask_ls, u32 remap_ms,
+ u32 remap_ls, u32 action)
+{
+ advk_writel(pcie, match_ls, OB_WIN_MATCH_LS(win_num));
+ advk_writel(pcie, match_ms, OB_WIN_MATCH_MS(win_num));
+ advk_writel(pcie, mask_ms, OB_WIN_MASK_MS(win_num));
+ advk_writel(pcie, mask_ls, OB_WIN_MASK_LS(win_num));
+ advk_writel(pcie, remap_ms, OB_WIN_REMAP_MS(win_num));
+ advk_writel(pcie, remap_ls, OB_WIN_REMAP_LS(win_num));
+ advk_writel(pcie, action, OB_WIN_ACTIONS(win_num));
+ advk_writel(pcie, match_ls | BIT(0), OB_WIN_MATCH_LS(win_num));
+}
+
+static void advk_pcie_setup_hw(struct advk_pcie *pcie)
+{
+ u32 reg;
+ int i;
+
+ /* Point PCIe unit MBUS decode windows to DRAM space */
+ for (i = 0; i < 8; i++)
+ advk_pcie_set_ob_win(pcie, i, 0, 0, 0, 0, 0, 0, 0);
+
+ /* Set to Direct mode */
+ reg = advk_readl(pcie, CTRL_CONFIG_REG);
+ reg &= ~(CTRL_MODE_MASK << CTRL_MODE_SHIFT);
+ reg |= ((PCIE_CORE_MODE_DIRECT & CTRL_MODE_MASK) << CTRL_MODE_SHIFT);
+ advk_writel(pcie, reg, CTRL_CONFIG_REG);
+
+ /* Set PCI global control register to RC mode */
+ reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
+ reg |= (IS_RC_MSK << IS_RC_SHIFT);
+ advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
+
+ /* Set Advanced Error Capabilities and Control PF0 register */
+ reg = PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX |
+ PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN |
+ PCIE_CORE_ERR_CAPCTL_ECRC_CHCK |
+ PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV;
+ advk_writel(pcie, reg, PCIE_CORE_ERR_CAPCTL_REG);
+
+ /* Set PCIe Device Control and Status 1 PF0 register */
+ reg = PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE |
+ (7 << PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT) |
+ PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE |
+ PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT;
+ advk_writel(pcie, reg, PCIE_CORE_DEV_CTRL_STATS_REG);
+
+ /* Program PCIe Control 2 to disable strict ordering */
+ reg = PCIE_CORE_CTRL2_RESERVED |
+ PCIE_CORE_CTRL2_TD_ENABLE;
+ advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
+
+ /* Set GEN2 */
+ reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
+ reg &= ~PCIE_GEN_SEL_MSK;
+ reg |= SPEED_GEN_2;
+ advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
+
+ /* Set lane X1 */
+ reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
+ reg &= ~LANE_CNT_MSK;
+ reg |= LANE_COUNT_1;
+ advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
+
+ /* Enable link training */
+ reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
+ reg |= LINK_TRAINING_EN;
+ advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
+
+ /* Enable MSI */
+ reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
+ reg |= PCIE_CORE_CTRL2_MSI_ENABLE;
+ advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
+
+ /* Clear all interrupts */
+ advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_REG);
+ advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG);
+ advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG);
+
+ /* Disable All ISR0/1 Sources */
+ reg = PCIE_ISR0_ALL_MASK;
+ reg &= ~PCIE_ISR0_MSI_INT_PENDING;
+ advk_writel(pcie, reg, PCIE_ISR0_MASK_REG);
+
+ advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG);
+
+ /* Unmask all MSI's */
+ advk_writel(pcie, 0, PCIE_MSI_MASK_REG);
+
+ /* Enable summary interrupt for GIC SPI source */
+ reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK);
+ advk_writel(pcie, reg, HOST_CTRL_INT_MASK_REG);
+
+ reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
+ reg |= PCIE_CORE_CTRL2_OB_WIN_ENABLE;
+ advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
+
+ /* Bypass the address window mapping for PIO */
+ reg = advk_readl(pcie, PIO_CTRL);
+ reg |= PIO_CTRL_ADDR_WIN_DISABLE;
+ advk_writel(pcie, reg, PIO_CTRL);
+
+ /* Start link training */
+ reg = advk_readl(pcie, PCIE_CORE_LINK_CTRL_STAT_REG);
+ reg |= PCIE_CORE_LINK_TRAINING;
+ advk_writel(pcie, reg, PCIE_CORE_LINK_CTRL_STAT_REG);
+
+ advk_pcie_wait_for_link(pcie);
+
+ reg = PCIE_CORE_LINK_L0S_ENTRY |
+ (1 << PCIE_CORE_LINK_WIDTH_SHIFT);
+ advk_writel(pcie, reg, PCIE_CORE_LINK_CTRL_STAT_REG);
+
+ reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
+ reg |= PCIE_CORE_CMD_MEM_ACCESS_EN |
+ PCIE_CORE_CMD_IO_ACCESS_EN |
+ PCIE_CORE_CMD_MEM_IO_REQ_EN;
+ advk_writel(pcie, reg, PCIE_CORE_CMD_STATUS_REG);
+}
+
+static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
+{
+ u32 reg;
+ unsigned int status;
+ char *strcomp_status, *str_posted;
+
+ reg = advk_readl(pcie, PIO_STAT);
+ status = (reg & PIO_COMPLETION_STATUS_MASK) >>
+ PIO_COMPLETION_STATUS_SHIFT;
+
+ if (!status)
+ return;
+
+ switch (status) {
+ case PIO_COMPLETION_STATUS_UR:
+ strcomp_status = "UR";
+ break;
+ case PIO_COMPLETION_STATUS_CRS:
+ strcomp_status = "CRS";
+ break;
+ case PIO_COMPLETION_STATUS_CA:
+ strcomp_status = "CA";
+ break;
+ default:
+ strcomp_status = "Unknown";
+ break;
+ }
+
+ if (reg & PIO_NON_POSTED_REQ)
+ str_posted = "Non-posted";
+ else
+ str_posted = "Posted";
+
+ dev_err(&pcie->pdev->dev, "%s PIO Response Status: %s, %#x @ %#x\n",
+ str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS));
+}
+
+static int advk_pcie_wait_pio(struct advk_pcie *pcie)
+{
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(PIO_TIMEOUT_MS);
+
+ while (time_before(jiffies, timeout)) {
+ u32 start, isr;
+
+ start = advk_readl(pcie, PIO_START);
+ isr = advk_readl(pcie, PIO_ISR);
+ if (!start && isr)
+ return 0;
+ }
+
+ dev_err(&pcie->pdev->dev, "config read/write timed out\n");
+ return -ETIMEDOUT;
+}
+
+static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 *val)
+{
+ struct advk_pcie *pcie = bus->sysdata;
+ u32 reg;
+ int ret;
+
+ if (PCI_SLOT(devfn) != 0) {
+ *val = 0xffffffff;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ /* Start PIO */
+ advk_writel(pcie, 0, PIO_START);
+ advk_writel(pcie, 1, PIO_ISR);
+
+ /* Program the control register */
+ reg = advk_readl(pcie, PIO_CTRL);
+ reg &= ~PIO_CTRL_TYPE_MASK;
+ if (bus->number == pcie->root_bus_nr)
+ reg |= PCIE_CONFIG_RD_TYPE0;
+ else
+ reg |= PCIE_CONFIG_RD_TYPE1;
+ advk_writel(pcie, reg, PIO_CTRL);
+
+ /* Program the address registers */
+ reg = PCIE_BDF(devfn) | PCIE_CONF_REG(where);
+ advk_writel(pcie, reg, PIO_ADDR_LS);
+ advk_writel(pcie, 0, PIO_ADDR_MS);
+
+ /* Program the data strobe */
+ advk_writel(pcie, 0xf, PIO_WR_DATA_STRB);
+
+ /* Start the transfer */
+ advk_writel(pcie, 1, PIO_START);
+
+ ret = advk_pcie_wait_pio(pcie);
+ if (ret < 0)
+ return PCIBIOS_SET_FAILED;
+
+ advk_pcie_check_pio_status(pcie);
+
+ /* Get the read result */
+ *val = advk_readl(pcie, PIO_RD_DATA);
+ if (size == 1)
+ *val = (*val >> (8 * (where & 3))) & 0xff;
+ else if (size == 2)
+ *val = (*val >> (8 * (where & 3))) & 0xffff;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 val)
+{
+ struct advk_pcie *pcie = bus->sysdata;
+ u32 reg;
+ u32 data_strobe = 0x0;
+ int offset;
+ int ret;
+
+ if (PCI_SLOT(devfn) != 0)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (where % size)
+ return PCIBIOS_SET_FAILED;
+
+ /* Start PIO */
+ advk_writel(pcie, 0, PIO_START);
+ advk_writel(pcie, 1, PIO_ISR);
+
+ /* Program the control register */
+ reg = advk_readl(pcie, PIO_CTRL);
+ reg &= ~PIO_CTRL_TYPE_MASK;
+ if (bus->number == pcie->root_bus_nr)
+ reg |= PCIE_CONFIG_WR_TYPE0;
+ else
+ reg |= PCIE_CONFIG_WR_TYPE1;
+ advk_writel(pcie, reg, PIO_CTRL);
+
+ /* Program the address registers */
+ reg = PCIE_CONF_ADDR(bus->number, devfn, where);
+ advk_writel(pcie, reg, PIO_ADDR_LS);
+ advk_writel(pcie, 0, PIO_ADDR_MS);
+
+ /* Calculate the write strobe */
+ offset = where & 0x3;
+ reg = val << (8 * offset);
+ data_strobe = GENMASK(size - 1, 0) << offset;
+
+ /* Program the data register */
+ advk_writel(pcie, reg, PIO_WR_DATA);
+
+ /* Program the data strobe */
+ advk_writel(pcie, data_strobe, PIO_WR_DATA_STRB);
+
+ /* Start the transfer */
+ advk_writel(pcie, 1, PIO_START);
+
+ ret = advk_pcie_wait_pio(pcie);
+ if (ret < 0)
+ return PCIBIOS_SET_FAILED;
+
+ advk_pcie_check_pio_status(pcie);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops advk_pcie_ops = {
+ .read = advk_pcie_rd_conf,
+ .write = advk_pcie_wr_conf,
+};
+
+static int advk_pcie_alloc_msi(struct advk_pcie *pcie)
+{
+ int hwirq;
+
+ mutex_lock(&pcie->msi_used_lock);
+ hwirq = find_first_zero_bit(pcie->msi_irq_in_use, MSI_IRQ_NUM);
+ if (hwirq >= MSI_IRQ_NUM)
+ hwirq = -ENOSPC;
+ else
+ set_bit(hwirq, pcie->msi_irq_in_use);
+ mutex_unlock(&pcie->msi_used_lock);
+
+ return hwirq;
+}
+
+static void advk_pcie_free_msi(struct advk_pcie *pcie, int hwirq)
+{
+ mutex_lock(&pcie->msi_used_lock);
+ if (!test_bit(hwirq, pcie->msi_irq_in_use))
+ dev_err(&pcie->pdev->dev, "trying to free unused MSI#%d\n",
+ hwirq);
+ else
+ clear_bit(hwirq, pcie->msi_irq_in_use);
+ mutex_unlock(&pcie->msi_used_lock);
+}
+
+static int advk_pcie_setup_msi_irq(struct msi_controller *chip,
+ struct pci_dev *pdev,
+ struct msi_desc *desc)
+{
+ struct advk_pcie *pcie = pdev->bus->sysdata;
+ struct msi_msg msg;
+ int virq, hwirq;
+ phys_addr_t msi_msg_phys;
+
+ /* We support MSI, but not MSI-X */
+ if (desc->msi_attrib.is_msix)
+ return -EINVAL;
+
+ hwirq = advk_pcie_alloc_msi(pcie);
+ if (hwirq < 0)
+ return hwirq;
+
+ virq = irq_create_mapping(pcie->msi_domain, hwirq);
+ if (!virq) {
+ advk_pcie_free_msi(pcie, hwirq);
+ return -EINVAL;
+ }
+
+ irq_set_msi_desc(virq, desc);
+
+ msi_msg_phys = virt_to_phys(&pcie->msi_msg);
+
+ msg.address_lo = lower_32_bits(msi_msg_phys);
+ msg.address_hi = upper_32_bits(msi_msg_phys);
+ msg.data = virq;
+
+ pci_write_msi_msg(virq, &msg);
+
+ return 0;
+}
+
+static void advk_pcie_teardown_msi_irq(struct msi_controller *chip,
+ unsigned int irq)
+{
+ struct irq_data *d = irq_get_irq_data(irq);
+ struct msi_desc *msi = irq_data_get_msi_desc(d);
+ struct advk_pcie *pcie = msi_desc_to_pci_sysdata(msi);
+ unsigned long hwirq = d->hwirq;
+
+ irq_dispose_mapping(irq);
+ advk_pcie_free_msi(pcie, hwirq);
+}
+
+static int advk_pcie_msi_map(struct irq_domain *domain,
+ unsigned int virq, irq_hw_number_t hw)
+{
+ struct advk_pcie *pcie = domain->host_data;
+
+ irq_set_chip_and_handler(virq, &pcie->msi_irq_chip,
+ handle_simple_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops advk_pcie_msi_irq_ops = {
+ .map = advk_pcie_msi_map,
+};
+
+static void advk_pcie_irq_mask(struct irq_data *d)
+{
+ struct advk_pcie *pcie = d->domain->host_data;
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ u32 mask;
+
+ mask = advk_readl(pcie, PCIE_ISR0_MASK_REG);
+ mask |= PCIE_ISR0_INTX_ASSERT(hwirq);
+ advk_writel(pcie, mask, PCIE_ISR0_MASK_REG);
+}
+
+static void advk_pcie_irq_unmask(struct irq_data *d)
+{
+ struct advk_pcie *pcie = d->domain->host_data;
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ u32 mask;
+
+ mask = advk_readl(pcie, PCIE_ISR0_MASK_REG);
+ mask &= ~PCIE_ISR0_INTX_ASSERT(hwirq);
+ advk_writel(pcie, mask, PCIE_ISR0_MASK_REG);
+}
+
+static int advk_pcie_irq_map(struct irq_domain *h,
+ unsigned int virq, irq_hw_number_t hwirq)
+{
+ struct advk_pcie *pcie = h->host_data;
+
+ advk_pcie_irq_mask(irq_get_irq_data(virq));
+ irq_set_status_flags(virq, IRQ_LEVEL);
+ irq_set_chip_and_handler(virq, &pcie->irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(virq, pcie);
+
+ return 0;
+}
+
+static const struct irq_domain_ops advk_pcie_irq_domain_ops = {
+ .map = advk_pcie_irq_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie)
+{
+ struct device *dev = &pcie->pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct irq_chip *msi_irq_chip;
+ struct msi_controller *msi;
+ phys_addr_t msi_msg_phys;
+ int ret;
+
+ msi_irq_chip = &pcie->msi_irq_chip;
+
+ msi_irq_chip->name = devm_kasprintf(dev, GFP_KERNEL, "%s-msi",
+ dev_name(dev));
+ if (!msi_irq_chip->name)
+ return -ENOMEM;
+
+ msi_irq_chip->irq_enable = pci_msi_unmask_irq;
+ msi_irq_chip->irq_disable = pci_msi_mask_irq;
+ msi_irq_chip->irq_mask = pci_msi_mask_irq;
+ msi_irq_chip->irq_unmask = pci_msi_unmask_irq;
+
+ msi = &pcie->msi;
+
+ msi->setup_irq = advk_pcie_setup_msi_irq;
+ msi->teardown_irq = advk_pcie_teardown_msi_irq;
+ msi->of_node = node;
+
+ mutex_init(&pcie->msi_used_lock);
+
+ msi_msg_phys = virt_to_phys(&pcie->msi_msg);
+
+ advk_writel(pcie, lower_32_bits(msi_msg_phys),
+ PCIE_MSI_ADDR_LOW_REG);
+ advk_writel(pcie, upper_32_bits(msi_msg_phys),
+ PCIE_MSI_ADDR_HIGH_REG);
+
+ pcie->msi_domain =
+ irq_domain_add_linear(NULL, MSI_IRQ_NUM,
+ &advk_pcie_msi_irq_ops, pcie);
+ if (!pcie->msi_domain)
+ return -ENOMEM;
+
+ ret = of_pci_msi_chip_add(msi);
+ if (ret < 0) {
+ irq_domain_remove(pcie->msi_domain);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void advk_pcie_remove_msi_irq_domain(struct advk_pcie *pcie)
+{
+ of_pci_msi_chip_remove(&pcie->msi);
+ irq_domain_remove(pcie->msi_domain);
+}
+
+static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
+{
+ struct device *dev = &pcie->pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *pcie_intc_node;
+ struct irq_chip *irq_chip;
+
+ pcie_intc_node = of_get_next_child(node, NULL);
+ if (!pcie_intc_node) {
+ dev_err(dev, "No PCIe Intc node found\n");
+ return -ENODEV;
+ }
+
+ irq_chip = &pcie->irq_chip;
+
+ irq_chip->name = devm_kasprintf(dev, GFP_KERNEL, "%s-irq",
+ dev_name(dev));
+ if (!irq_chip->name) {
+ of_node_put(pcie_intc_node);
+ return -ENOMEM;
+ }
+
+ irq_chip->irq_mask = advk_pcie_irq_mask;
+ irq_chip->irq_mask_ack = advk_pcie_irq_mask;
+ irq_chip->irq_unmask = advk_pcie_irq_unmask;
+
+ pcie->irq_domain =
+ irq_domain_add_linear(pcie_intc_node, LEGACY_IRQ_NUM,
+ &advk_pcie_irq_domain_ops, pcie);
+ if (!pcie->irq_domain) {
+ dev_err(dev, "Failed to get a INTx IRQ domain\n");
+ of_node_put(pcie_intc_node);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void advk_pcie_remove_irq_domain(struct advk_pcie *pcie)
+{
+ irq_domain_remove(pcie->irq_domain);
+}
+
+static void advk_pcie_handle_msi(struct advk_pcie *pcie)
+{
+ u32 msi_val, msi_mask, msi_status, msi_idx;
+ u16 msi_data;
+
+ msi_mask = advk_readl(pcie, PCIE_MSI_MASK_REG);
+ msi_val = advk_readl(pcie, PCIE_MSI_STATUS_REG);
+ msi_status = msi_val & ~msi_mask;
+
+ for (msi_idx = 0; msi_idx < MSI_IRQ_NUM; msi_idx++) {
+ if (!(BIT(msi_idx) & msi_status))
+ continue;
+
+ advk_writel(pcie, BIT(msi_idx), PCIE_MSI_STATUS_REG);
+ msi_data = advk_readl(pcie, PCIE_MSI_PAYLOAD_REG) & 0xFF;
+ generic_handle_irq(msi_data);
+ }
+
+ advk_writel(pcie, PCIE_ISR0_MSI_INT_PENDING,
+ PCIE_ISR0_REG);
+}
+
+static void advk_pcie_handle_int(struct advk_pcie *pcie)
+{
+ u32 val, mask, status;
+ int i, virq;
+
+ val = advk_readl(pcie, PCIE_ISR0_REG);
+ mask = advk_readl(pcie, PCIE_ISR0_MASK_REG);
+ status = val & ((~mask) & PCIE_ISR0_ALL_MASK);
+
+ if (!status) {
+ advk_writel(pcie, val, PCIE_ISR0_REG);
+ return;
+ }
+
+ /* Process MSI interrupts */
+ if (status & PCIE_ISR0_MSI_INT_PENDING)
+ advk_pcie_handle_msi(pcie);
+
+ /* Process legacy interrupts */
+ for (i = 0; i < LEGACY_IRQ_NUM; i++) {
+ if (!(status & PCIE_ISR0_INTX_ASSERT(i)))
+ continue;
+
+ advk_writel(pcie, PCIE_ISR0_INTX_ASSERT(i),
+ PCIE_ISR0_REG);
+
+ virq = irq_find_mapping(pcie->irq_domain, i);
+ generic_handle_irq(virq);
+ }
+}
+
+static irqreturn_t advk_pcie_irq_handler(int irq, void *arg)
+{
+ struct advk_pcie *pcie = arg;
+ u32 status;
+
+ status = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG);
+ if (!(status & PCIE_IRQ_CORE_INT))
+ return IRQ_NONE;
+
+ advk_pcie_handle_int(pcie);
+
+ /* Clear interrupt */
+ advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG);
+
+ return IRQ_HANDLED;
+}
+
+static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
+{
+ int err, res_valid = 0;
+ struct device *dev = &pcie->pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct resource_entry *win;
+ resource_size_t iobase;
+
+ INIT_LIST_HEAD(&pcie->resources);
+
+ err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pcie->resources,
+ &iobase);
+ if (err)
+ return err;
+
+ err = devm_request_pci_bus_resources(dev, &pcie->resources);
+ if (err)
+ goto out_release_res;
+
+ resource_list_for_each_entry(win, &pcie->resources) {
+ struct resource *res = win->res;
+
+ switch (resource_type(res)) {
+ case IORESOURCE_IO:
+ advk_pcie_set_ob_win(pcie, 1,
+ upper_32_bits(res->start),
+ lower_32_bits(res->start),
+ 0, 0xF8000000, 0,
+ lower_32_bits(res->start),
+ OB_PCIE_IO);
+ err = pci_remap_iospace(res, iobase);
+ if (err)
+ dev_warn(dev, "error %d: failed to map resource %pR\n",
+ err, res);
+ break;
+ case IORESOURCE_MEM:
+ advk_pcie_set_ob_win(pcie, 0,
+ upper_32_bits(res->start),
+ lower_32_bits(res->start),
+ 0x0, 0xF8000000, 0,
+ lower_32_bits(res->start),
+ (2 << 20) | OB_PCIE_MEM);
+ res_valid |= !(res->flags & IORESOURCE_PREFETCH);
+ break;
+ case IORESOURCE_BUS:
+ pcie->root_bus_nr = res->start;
+ break;
+ }
+ }
+
+ if (!res_valid) {
+ dev_err(dev, "non-prefetchable memory resource required\n");
+ err = -EINVAL;
+ goto out_release_res;
+ }
+
+ return 0;
+
+out_release_res:
+ pci_free_resource_list(&pcie->resources);
+ return err;
+}
+
+static int advk_pcie_probe(struct platform_device *pdev)
+{
+ struct advk_pcie *pcie;
+ struct resource *res;
+ struct pci_bus *bus, *child;
+ struct msi_controller *msi;
+ struct device_node *msi_node;
+ int ret, irq;
+
+ pcie = devm_kzalloc(&pdev->dev, sizeof(struct advk_pcie),
+ GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pcie->pdev = pdev;
+ platform_set_drvdata(pdev, pcie);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pcie->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pcie->base)) {
+ dev_err(&pdev->dev, "Failed to map registers\n");
+ return PTR_ERR(pcie->base);
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(&pdev->dev, irq, advk_pcie_irq_handler,
+ IRQF_SHARED | IRQF_NO_THREAD, "advk-pcie",
+ pcie);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register interrupt\n");
+ return ret;
+ }
+
+ ret = advk_pcie_parse_request_of_pci_ranges(pcie);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to parse resources\n");
+ return ret;
+ }
+
+ advk_pcie_setup_hw(pcie);
+
+ ret = advk_pcie_init_irq_domain(pcie);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize irq\n");
+ return ret;
+ }
+
+ ret = advk_pcie_init_msi_irq_domain(pcie);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize irq\n");
+ advk_pcie_remove_irq_domain(pcie);
+ return ret;
+ }
+
+ msi_node = of_parse_phandle(pdev->dev.of_node, "msi-parent", 0);
+ if (msi_node)
+ msi = of_pci_find_msi_chip_by_node(msi_node);
+ else
+ msi = NULL;
+
+ bus = pci_scan_root_bus_msi(&pdev->dev, 0, &advk_pcie_ops,
+ pcie, &pcie->resources, &pcie->msi);
+ if (!bus) {
+ advk_pcie_remove_msi_irq_domain(pcie);
+ advk_pcie_remove_irq_domain(pcie);
+ return -ENOMEM;
+ }
+
+ pci_bus_assign_resources(bus);
+
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
+
+ pci_bus_add_devices(bus);
+
+ return 0;
+}
+
+static const struct of_device_id advk_pcie_of_match_table[] = {
+ { .compatible = "marvell,armada-3700-pcie", },
+ {},
+};
+
+static struct platform_driver advk_pcie_driver = {
+ .driver = {
+ .name = "advk-pcie",
+ .of_match_table = advk_pcie_of_match_table,
+ /* Driver unloading/unbinding currently not supported */
+ .suppress_bind_attrs = true,
+ },
+ .probe = advk_pcie_probe,
+};
+builtin_platform_driver(advk_pcie_driver);
diff --git a/drivers/pci/host/pci-dra7xx.c b/drivers/pci/host/pci-dra7xx.c
index f44113040..81b3949a2 100644
--- a/drivers/pci/host/pci-dra7xx.c
+++ b/drivers/pci/host/pci-dra7xx.c
@@ -181,14 +181,14 @@ static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
if (!pcie_intc_node) {
dev_err(dev, "No PCIe Intc node found\n");
- return PTR_ERR(pcie_intc_node);
+ return -ENODEV;
}
pp->irq_domain = irq_domain_add_linear(pcie_intc_node, 4,
&intx_domain_ops, pp);
if (!pp->irq_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
- return PTR_ERR(pp->irq_domain);
+ return -ENODEV;
}
return 0;
diff --git a/drivers/pci/host/pci-host-common.c b/drivers/pci/host/pci-host-common.c
index 8cba7ab73..9d9d34e95 100644
--- a/drivers/pci/host/pci-host-common.c
+++ b/drivers/pci/host/pci-host-common.c
@@ -20,10 +20,9 @@
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
+#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
-#include "../ecam.h"
-
static int gen_pci_parse_request_of_pci_ranges(struct device *dev,
struct list_head *resources, struct resource **bus_range)
{
@@ -36,44 +35,34 @@ static int gen_pci_parse_request_of_pci_ranges(struct device *dev,
if (err)
return err;
+ err = devm_request_pci_bus_resources(dev, resources);
+ if (err)
+ return err;
+
resource_list_for_each_entry(win, resources) {
- struct resource *parent, *res = win->res;
+ struct resource *res = win->res;
switch (resource_type(res)) {
case IORESOURCE_IO:
- parent = &ioport_resource;
err = pci_remap_iospace(res, iobase);
- if (err) {
+ if (err)
dev_warn(dev, "error %d: failed to map resource %pR\n",
err, res);
- continue;
- }
break;
case IORESOURCE_MEM:
- parent = &iomem_resource;
res_valid |= !(res->flags & IORESOURCE_PREFETCH);
break;
case IORESOURCE_BUS:
*bus_range = res;
- default:
- continue;
+ break;
}
-
- err = devm_request_resource(dev, parent, res);
- if (err)
- goto out_release_res;
- }
-
- if (!res_valid) {
- dev_err(dev, "non-prefetchable memory resource required\n");
- err = -EINVAL;
- goto out_release_res;
}
- return 0;
+ if (res_valid)
+ return 0;
-out_release_res:
- return err;
+ dev_err(dev, "non-prefetchable memory resource required\n");
+ return -EINVAL;
}
static void gen_pci_unmap_cfg(void *ptr)
@@ -155,7 +144,14 @@ int pci_host_common_probe(struct platform_device *pdev,
pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
- if (!pci_has_flag(PCI_PROBE_ONLY)) {
+ /*
+ * We insert PCI resources into the iomem_resource and
+ * ioport_resource trees in either pci_bus_claim_resources()
+ * or pci_bus_assign_resources().
+ */
+ if (pci_has_flag(PCI_PROBE_ONLY)) {
+ pci_bus_claim_resources(bus);
+ } else {
pci_bus_size_bridges(bus);
pci_bus_assign_resources(bus);
diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c
index 6eaceab1b..c05ea9d72 100644
--- a/drivers/pci/host/pci-host-generic.c
+++ b/drivers/pci/host/pci-host-generic.c
@@ -20,13 +20,12 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
+#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
-#include "../ecam.h"
-
static struct pci_ecam_ops gen_pci_cfg_cam_bus_ops = {
.bus_shift = 16,
.pci_ops = {
@@ -46,8 +45,6 @@ static const struct of_device_id gen_pci_of_match[] = {
{ },
};
-MODULE_DEVICE_TABLE(of, gen_pci_of_match);
-
static int gen_pci_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id;
@@ -66,8 +63,4 @@ static struct platform_driver gen_pci_driver = {
},
.probe = gen_pci_probe,
};
-module_platform_driver(gen_pci_driver);
-
-MODULE_DESCRIPTION("Generic PCI host driver");
-MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(gen_pci_driver);
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
index 7e9b2de2a..6955ffdb8 100644
--- a/drivers/pci/host/pci-hyperv.c
+++ b/drivers/pci/host/pci-hyperv.c
@@ -732,16 +732,18 @@ static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info,
pdev = msi_desc_to_pci_dev(msi);
hbus = info->data;
- hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
- if (!hpdev)
+ int_desc = irq_data_get_irq_chip_data(irq_data);
+ if (!int_desc)
return;
- int_desc = irq_data_get_irq_chip_data(irq_data);
- if (int_desc) {
- irq_data->chip_data = NULL;
- hv_int_desc_free(hpdev, int_desc);
+ irq_data->chip_data = NULL;
+ hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
+ if (!hpdev) {
+ kfree(int_desc);
+ return;
}
+ hv_int_desc_free(hpdev, int_desc);
put_pcichild(hpdev, hv_pcidev_ref_by_slot);
}
@@ -1657,14 +1659,16 @@ static void hv_pci_onchannelcallback(void *context)
continue;
}
+ /* Zero length indicates there are no more packets. */
+ if (ret || !bytes_recvd)
+ break;
+
/*
* All incoming packets must be at least as large as a
* response.
*/
- if (bytes_recvd <= sizeof(struct pci_response)) {
- kfree(buffer);
- return;
- }
+ if (bytes_recvd <= sizeof(struct pci_response))
+ continue;
desc = (struct vmpacket_descriptor *)buffer;
switch (desc->type) {
@@ -1679,8 +1683,7 @@ static void hv_pci_onchannelcallback(void *context)
comp_packet->completion_func(comp_packet->compl_ctxt,
response,
bytes_recvd);
- kfree(buffer);
- return;
+ break;
case VM_PKT_DATA_INBAND:
@@ -1727,8 +1730,9 @@ static void hv_pci_onchannelcallback(void *context)
desc->type, req_id, bytes_recvd);
break;
}
- break;
}
+
+ kfree(buffer);
}
/**
diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c
index 6b8301ef2..8ba28834d 100644
--- a/drivers/pci/host/pci-keystone.c
+++ b/drivers/pci/host/pci-keystone.c
@@ -17,7 +17,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/msi.h>
#include <linux/of_irq.h>
#include <linux/of.h>
@@ -360,7 +360,6 @@ static const struct of_device_id ks_pcie_of_match[] = {
},
{ },
};
-MODULE_DEVICE_TABLE(of, ks_pcie_of_match);
static int __exit ks_pcie_remove(struct platform_device *pdev)
{
@@ -439,9 +438,4 @@ static struct platform_driver ks_pcie_driver __refdata = {
.of_match_table = of_match_ptr(ks_pcie_of_match),
},
};
-
-module_platform_driver(ks_pcie_driver);
-
-MODULE_AUTHOR("Murali Karicheri <m-karicheri2@ti.com>");
-MODULE_DESCRIPTION("Keystone PCIe host controller driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(ks_pcie_driver);
diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c
index a21e229d9..114ba8192 100644
--- a/drivers/pci/host/pci-layerscape.c
+++ b/drivers/pci/host/pci-layerscape.c
@@ -12,7 +12,7 @@
#include <linux/kernel.h>
#include <linux/interrupt.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of_pci.h>
#include <linux/of_platform.h>
#include <linux/of_irq.h>
@@ -211,7 +211,6 @@ static const struct of_device_id ls_pcie_of_match[] = {
{ .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata },
{ },
};
-MODULE_DEVICE_TABLE(of, ls_pcie_of_match);
static int __init ls_add_pcie_port(struct pcie_port *pp,
struct platform_device *pdev)
@@ -275,9 +274,4 @@ static struct platform_driver ls_pcie_driver = {
.of_match_table = ls_pcie_of_match,
},
};
-
-module_platform_driver_probe(ls_pcie_driver, ls_pcie_probe);
-
-MODULE_AUTHOR("Minghuan Lian <Minghuan.Lian@freescale.com>");
-MODULE_DESCRIPTION("Freescale Layerscape PCIe host controller driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver_probe(ls_pcie_driver, ls_pcie_probe);
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index 6b451df65..307f81d6b 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -1,6 +1,8 @@
/*
* PCIe driver for Marvell Armada 370 and Armada XP SoCs
*
+ * Author: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
@@ -11,7 +13,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/mbus.h>
#include <linux/msi.h>
#include <linux/slab.h>
@@ -839,25 +841,22 @@ static struct pci_ops mvebu_pcie_ops = {
static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys)
{
struct mvebu_pcie *pcie = sys_to_pcie(sys);
- int i;
+ int err, i;
pcie->mem.name = "PCI MEM";
pcie->realio.name = "PCI I/O";
- if (request_resource(&iomem_resource, &pcie->mem))
- return 0;
-
- if (resource_size(&pcie->realio) != 0) {
- if (request_resource(&ioport_resource, &pcie->realio)) {
- release_resource(&pcie->mem);
- return 0;
- }
+ if (resource_size(&pcie->realio) != 0)
pci_add_resource_offset(&sys->resources, &pcie->realio,
sys->io_offset);
- }
+
pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
pci_add_resource(&sys->resources, &pcie->busn);
+ err = devm_request_pci_bus_resources(&pcie->pdev->dev, &sys->resources);
+ if (err)
+ return 0;
+
for (i = 0; i < pcie->nports; i++) {
struct mvebu_pcie_port *port = &pcie->ports[i];
@@ -1298,7 +1297,6 @@ static const struct of_device_id mvebu_pcie_of_match_table[] = {
{ .compatible = "marvell,kirkwood-pcie", },
{},
};
-MODULE_DEVICE_TABLE(of, mvebu_pcie_of_match_table);
static const struct dev_pm_ops mvebu_pcie_pm_ops = {
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume)
@@ -1314,8 +1312,4 @@ static struct platform_driver mvebu_pcie_driver = {
},
.probe = mvebu_pcie_probe,
};
-module_platform_driver(mvebu_pcie_driver);
-
-MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
-MODULE_DESCRIPTION("Marvell EBU PCIe driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(mvebu_pcie_driver);
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c
index 9980a4bda..597566f96 100644
--- a/drivers/pci/host/pci-rcar-gen2.c
+++ b/drivers/pci/host/pci-rcar-gen2.c
@@ -4,6 +4,8 @@
* Copyright (C) 2013 Renesas Solutions Corp.
* Copyright (C) 2013 Cogent Embedded, Inc.
*
+ * Author: Valentine Barshak <valentine.barshak@cogentembedded.com>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -14,7 +16,6 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
@@ -97,7 +98,6 @@
struct rcar_pci_priv {
struct device *dev;
void __iomem *reg;
- struct resource io_res;
struct resource mem_res;
struct resource *cfg_res;
unsigned busnr;
@@ -194,6 +194,7 @@ static int rcar_pci_setup(int nr, struct pci_sys_data *sys)
struct rcar_pci_priv *priv = sys->private_data;
void __iomem *reg = priv->reg;
u32 val;
+ int ret;
pm_runtime_enable(priv->dev);
pm_runtime_get_sync(priv->dev);
@@ -273,8 +274,10 @@ static int rcar_pci_setup(int nr, struct pci_sys_data *sys)
rcar_pci_setup_errirq(priv);
/* Add PCI resources */
- pci_add_resource(&sys->resources, &priv->io_res);
pci_add_resource(&sys->resources, &priv->mem_res);
+ ret = devm_request_pci_bus_resources(priv->dev, &sys->resources);
+ if (ret < 0)
+ return ret;
/* Setup bus number based on platform device id / of bus-range */
sys->busnr = priv->busnr;
@@ -371,14 +374,6 @@ static int rcar_pci_probe(struct platform_device *pdev)
return -ENOMEM;
priv->mem_res = *mem_res;
- /*
- * The controller does not support/use port I/O,
- * so setup a dummy port I/O region here.
- */
- priv->io_res.start = priv->mem_res.start;
- priv->io_res.end = priv->mem_res.end;
- priv->io_res.flags = IORESOURCE_IO;
-
priv->cfg_res = cfg_res;
priv->irq = platform_get_irq(pdev, 0);
@@ -421,6 +416,7 @@ static int rcar_pci_probe(struct platform_device *pdev)
hw_private[0] = priv;
memset(&hw, 0, sizeof(hw));
hw.nr_controllers = ARRAY_SIZE(hw_private);
+ hw.io_optional = 1;
hw.private_data = hw_private;
hw.map_irq = rcar_pci_map_irq;
hw.ops = &rcar_pci_ops;
@@ -437,8 +433,6 @@ static struct of_device_id rcar_pci_of_match[] = {
{ },
};
-MODULE_DEVICE_TABLE(of, rcar_pci_of_match);
-
static struct platform_driver rcar_pci_driver = {
.driver = {
.name = "pci-rcar-gen2",
@@ -447,9 +441,4 @@ static struct platform_driver rcar_pci_driver = {
},
.probe = rcar_pci_probe,
};
-
-module_platform_driver(rcar_pci_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Renesas R-Car Gen2 internal PCI");
-MODULE_AUTHOR("Valentine Barshak <valentine.barshak@cogentembedded.com>");
+builtin_platform_driver(rcar_pci_driver);
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index c388468c2..6de0757b1 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -9,6 +9,8 @@
*
* Bits taken from arch/arm/mach-dove/pcie.c
*
+ * Author: Thierry Reding <treding@nvidia.com>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -32,7 +34,7 @@
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
@@ -183,26 +185,26 @@
#define AFI_PEXBIAS_CTRL_0 0x168
-#define RP_VEND_XP 0x00000F00
+#define RP_VEND_XP 0x00000f00
#define RP_VEND_XP_DL_UP (1 << 30)
-#define RP_PRIV_MISC 0x00000FE0
-#define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xE << 0)
-#define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xF << 0)
+#define RP_PRIV_MISC 0x00000fe0
+#define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xe << 0)
+#define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xf << 0)
#define RP_LINK_CONTROL_STATUS 0x00000090
#define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000
#define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000
-#define PADS_CTL_SEL 0x0000009C
+#define PADS_CTL_SEL 0x0000009c
-#define PADS_CTL 0x000000A0
+#define PADS_CTL 0x000000a0
#define PADS_CTL_IDDQ_1L (1 << 0)
#define PADS_CTL_TX_DATA_EN_1L (1 << 6)
#define PADS_CTL_RX_DATA_EN_1L (1 << 10)
-#define PADS_PLL_CTL_TEGRA20 0x000000B8
-#define PADS_PLL_CTL_TEGRA30 0x000000B4
+#define PADS_PLL_CTL_TEGRA20 0x000000b8
+#define PADS_PLL_CTL_TEGRA30 0x000000b4
#define PADS_PLL_CTL_RST_B4SM (1 << 1)
#define PADS_PLL_CTL_LOCKDET (1 << 8)
#define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16)
@@ -214,9 +216,9 @@
#define PADS_PLL_CTL_TXCLKREF_DIV5 (1 << 20)
#define PADS_PLL_CTL_TXCLKREF_BUF_EN (1 << 22)
-#define PADS_REFCLK_CFG0 0x000000C8
-#define PADS_REFCLK_CFG1 0x000000CC
-#define PADS_REFCLK_BIAS 0x000000D0
+#define PADS_REFCLK_CFG0 0x000000c8
+#define PADS_REFCLK_CFG1 0x000000cc
+#define PADS_REFCLK_BIAS 0x000000d0
/*
* Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
@@ -228,15 +230,6 @@
#define PADS_REFCLK_CFG_PREDI_SHIFT 8 /* 11:8 */
#define PADS_REFCLK_CFG_DRVI_SHIFT 12 /* 15:12 */
-/* Default value provided by HW engineering is 0xfa5c */
-#define PADS_REFCLK_CFG_VALUE \
- ( \
- (0x17 << PADS_REFCLK_CFG_TERM_SHIFT) | \
- (0 << PADS_REFCLK_CFG_E_TERM_SHIFT) | \
- (0xa << PADS_REFCLK_CFG_PREDI_SHIFT) | \
- (0xf << PADS_REFCLK_CFG_DRVI_SHIFT) \
- )
-
struct tegra_msi {
struct msi_controller chip;
DECLARE_BITMAP(used, INT_PCI_MSI_NR);
@@ -252,6 +245,8 @@ struct tegra_pcie_soc_data {
unsigned int msi_base_shift;
u32 pads_pll_ctl;
u32 tx_ref_sel;
+ u32 pads_refclk_cfg0;
+ u32 pads_refclk_cfg1;
bool has_pex_clkreq_en;
bool has_pex_bias_ctrl;
bool has_intr_prsnt_sense;
@@ -274,7 +269,6 @@ struct tegra_pcie {
struct list_head buses;
struct resource *cs;
- struct resource all;
struct resource io;
struct resource pio;
struct resource mem;
@@ -623,30 +617,21 @@ static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
sys->mem_offset = pcie->offset.mem;
sys->io_offset = pcie->offset.io;
- err = devm_request_resource(pcie->dev, &pcie->all, &pcie->io);
- if (err < 0)
- return err;
-
- err = devm_request_resource(pcie->dev, &ioport_resource, &pcie->pio);
- if (err < 0)
- return err;
-
- err = devm_request_resource(pcie->dev, &pcie->all, &pcie->mem);
+ err = devm_request_resource(pcie->dev, &iomem_resource, &pcie->io);
if (err < 0)
return err;
- err = devm_request_resource(pcie->dev, &pcie->all, &pcie->prefetch);
- if (err)
- return err;
-
pci_add_resource_offset(&sys->resources, &pcie->pio, sys->io_offset);
pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
pci_add_resource_offset(&sys->resources, &pcie->prefetch,
sys->mem_offset);
pci_add_resource(&sys->resources, &pcie->busn);
- pci_ioremap_io(pcie->pio.start, pcie->io.start);
+ err = devm_request_pci_bus_resources(pcie->dev, &sys->resources);
+ if (err < 0)
+ return err;
+ pci_remap_iospace(&pcie->pio, pcie->io.start);
return 1;
}
@@ -838,12 +823,6 @@ static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
value |= PADS_PLL_CTL_RST_B4SM;
pads_writel(pcie, value, soc->pads_pll_ctl);
- /* Configure the reference clock driver */
- value = PADS_REFCLK_CFG_VALUE | (PADS_REFCLK_CFG_VALUE << 16);
- pads_writel(pcie, value, PADS_REFCLK_CFG0);
- if (soc->num_ports > 2)
- pads_writel(pcie, PADS_REFCLK_CFG_VALUE, PADS_REFCLK_CFG1);
-
/* wait for the PLL to lock */
err = tegra_pcie_pll_wait(pcie, 500);
if (err < 0) {
@@ -927,6 +906,7 @@ static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port)
static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
{
+ const struct tegra_pcie_soc_data *soc = pcie->soc_data;
struct tegra_pcie_port *port;
int err;
@@ -952,6 +932,12 @@ static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
}
}
+ /* Configure the reference clock driver */
+ pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0);
+
+ if (soc->num_ports > 2)
+ pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1);
+
return 0;
}
@@ -1822,12 +1808,6 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
struct resource res;
int err;
- memset(&pcie->all, 0, sizeof(pcie->all));
- pcie->all.flags = IORESOURCE_MEM;
- pcie->all.name = np->full_name;
- pcie->all.start = ~0;
- pcie->all.end = 0;
-
if (of_pci_range_parser_init(&parser, np)) {
dev_err(pcie->dev, "missing \"ranges\" property\n");
return -EINVAL;
@@ -1880,18 +1860,8 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
}
break;
}
-
- if (res.start <= pcie->all.start)
- pcie->all.start = res.start;
-
- if (res.end >= pcie->all.end)
- pcie->all.end = res.end;
}
- err = devm_request_resource(pcie->dev, &iomem_resource, &pcie->all);
- if (err < 0)
- return err;
-
err = of_pci_parse_bus_range(np, &pcie->busn);
if (err < 0) {
dev_err(pcie->dev, "failed to parse ranges property: %d\n",
@@ -2078,6 +2048,7 @@ static const struct tegra_pcie_soc_data tegra20_pcie_data = {
.msi_base_shift = 0,
.pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
+ .pads_refclk_cfg0 = 0xfa5cfa5c,
.has_pex_clkreq_en = false,
.has_pex_bias_ctrl = false,
.has_intr_prsnt_sense = false,
@@ -2090,6 +2061,8 @@ static const struct tegra_pcie_soc_data tegra30_pcie_data = {
.msi_base_shift = 8,
.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
+ .pads_refclk_cfg0 = 0xfa5cfa5c,
+ .pads_refclk_cfg1 = 0xfa5cfa5c,
.has_pex_clkreq_en = true,
.has_pex_bias_ctrl = true,
.has_intr_prsnt_sense = true,
@@ -2102,6 +2075,7 @@ static const struct tegra_pcie_soc_data tegra124_pcie_data = {
.msi_base_shift = 8,
.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
+ .pads_refclk_cfg0 = 0x44ac44ac,
.has_pex_clkreq_en = true,
.has_pex_bias_ctrl = true,
.has_intr_prsnt_sense = true,
@@ -2115,7 +2089,6 @@ static const struct of_device_id tegra_pcie_of_match[] = {
{ .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie_data },
{ },
};
-MODULE_DEVICE_TABLE(of, tegra_pcie_of_match);
static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
{
@@ -2249,8 +2222,6 @@ static int tegra_pcie_probe(struct platform_device *pdev)
if (err < 0)
return err;
- pcibios_min_mem = 0;
-
err = tegra_pcie_get_resources(pcie);
if (err < 0) {
dev_err(&pdev->dev, "failed to request resources: %d\n", err);
@@ -2306,8 +2277,4 @@ static struct platform_driver tegra_pcie_driver = {
},
.probe = tegra_pcie_probe,
};
-module_platform_driver(tegra_pcie_driver);
-
-MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
-MODULE_DESCRIPTION("NVIDIA Tegra PCIe driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(tegra_pcie_driver);
diff --git a/drivers/pci/host/pci-thunder-ecam.c b/drivers/pci/host/pci-thunder-ecam.c
index 540d03061..d50a3dc2d 100644
--- a/drivers/pci/host/pci-thunder-ecam.c
+++ b/drivers/pci/host/pci-thunder-ecam.c
@@ -7,14 +7,13 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/of_pci.h>
#include <linux/of.h>
+#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
-#include "../ecam.h"
-
static void set_val(u32 v, int where, int size, u32 *val)
{
int shift = (where & 3) * 8;
@@ -360,7 +359,6 @@ static const struct of_device_id thunder_ecam_of_match[] = {
{ .compatible = "cavium,pci-host-thunder-ecam" },
{ },
};
-MODULE_DEVICE_TABLE(of, thunder_ecam_of_match);
static int thunder_ecam_probe(struct platform_device *pdev)
{
@@ -374,7 +372,4 @@ static struct platform_driver thunder_ecam_driver = {
},
.probe = thunder_ecam_probe,
};
-module_platform_driver(thunder_ecam_driver);
-
-MODULE_DESCRIPTION("Thunder ECAM PCI host driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(thunder_ecam_driver);
diff --git a/drivers/pci/host/pci-thunder-pem.c b/drivers/pci/host/pci-thunder-pem.c
index 9b8ab94f3..6abaf80ff 100644
--- a/drivers/pci/host/pci-thunder-pem.c
+++ b/drivers/pci/host/pci-thunder-pem.c
@@ -15,13 +15,12 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
+#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
-#include "../ecam.h"
-
#define PEM_CFG_WR 0x28
#define PEM_CFG_RD 0x30
@@ -285,8 +284,9 @@ static int thunder_pem_config_write(struct pci_bus *bus, unsigned int devfn,
return pci_generic_config_write(bus, devfn, where, size, val);
}
-static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg)
+static int thunder_pem_init(struct pci_config_window *cfg)
{
+ struct device *dev = cfg->parent;
resource_size_t bar4_start;
struct resource *res_pem;
struct thunder_pem_pci *pem_pci;
@@ -346,7 +346,6 @@ static const struct of_device_id thunder_pem_of_match[] = {
{ .compatible = "cavium,pci-host-thunder-pem" },
{ },
};
-MODULE_DEVICE_TABLE(of, thunder_pem_of_match);
static int thunder_pem_probe(struct platform_device *pdev)
{
@@ -360,7 +359,4 @@ static struct platform_driver thunder_pem_driver = {
},
.probe = thunder_pem_probe,
};
-module_platform_driver(thunder_pem_driver);
-
-MODULE_DESCRIPTION("Thunder PEM PCIe host driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(thunder_pem_driver);
diff --git a/drivers/pci/host/pci-versatile.c b/drivers/pci/host/pci-versatile.c
index f843a72dc..f23440577 100644
--- a/drivers/pci/host/pci-versatile.c
+++ b/drivers/pci/host/pci-versatile.c
@@ -80,21 +80,21 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
if (err)
return err;
+ err = devm_request_pci_bus_resources(dev, res);
+ if (err)
+ goto out_release_res;
+
resource_list_for_each_entry(win, res) {
- struct resource *parent, *res = win->res;
+ struct resource *res = win->res;
switch (resource_type(res)) {
case IORESOURCE_IO:
- parent = &ioport_resource;
err = pci_remap_iospace(res, iobase);
- if (err) {
+ if (err)
dev_warn(dev, "error %d: failed to map resource %pR\n",
err, res);
- continue;
- }
break;
case IORESOURCE_MEM:
- parent = &iomem_resource;
res_valid |= !(res->flags & IORESOURCE_PREFETCH);
writel(res->start >> 28, PCI_IMAP(mem));
@@ -102,23 +102,14 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
mem++;
break;
- case IORESOURCE_BUS:
- default:
- continue;
}
-
- err = devm_request_resource(dev, parent, res);
- if (err)
- goto out_release_res;
}
- if (!res_valid) {
- dev_err(dev, "non-prefetchable memory resource required\n");
- err = -EINVAL;
- goto out_release_res;
- }
+ if (res_valid)
+ return 0;
- return 0;
+ dev_err(dev, "non-prefetchable memory resource required\n");
+ err = -EINVAL;
out_release_res:
pci_free_resource_list(res);
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
index ae00ce22d..a81273c23 100644
--- a/drivers/pci/host/pci-xgene.c
+++ b/drivers/pci/host/pci-xgene.c
@@ -21,7 +21,7 @@
#include <linux/io.h>
#include <linux/jiffies.h>
#include <linux/memblock.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -540,14 +540,20 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev)
if (ret)
return ret;
+ ret = devm_request_pci_bus_resources(&pdev->dev, &res);
+ if (ret)
+ goto error;
+
ret = xgene_pcie_setup(port, &res, iobase);
if (ret)
- return ret;
+ goto error;
bus = pci_create_root_bus(&pdev->dev, 0,
&xgene_pcie_ops, port, &res);
- if (!bus)
- return -ENOMEM;
+ if (!bus) {
+ ret = -ENOMEM;
+ goto error;
+ }
pci_scan_child_bus(bus);
pci_assign_unassigned_bus_resources(bus);
@@ -555,6 +561,10 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev)
platform_set_drvdata(pdev, port);
return 0;
+
+error:
+ pci_free_resource_list(&res);
+ return ret;
}
static const struct of_device_id xgene_pcie_match_table[] = {
@@ -569,8 +579,4 @@ static struct platform_driver xgene_pcie_driver = {
},
.probe = xgene_pcie_probe_bridge,
};
-module_platform_driver(xgene_pcie_driver);
-
-MODULE_AUTHOR("Tanmay Inamdar <tinamdar@apm.com>");
-MODULE_DESCRIPTION("APM X-Gene PCIe driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(xgene_pcie_driver);
diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c
index dbac6fb3f..2b7837650 100644
--- a/drivers/pci/host/pcie-altera.c
+++ b/drivers/pci/host/pcie-altera.c
@@ -61,6 +61,8 @@
#define TLP_LOOP 500
#define RP_DEVFN 0
+#define LINK_UP_TIMEOUT 5000
+
#define INTX_NUM 4
#define DWORD_MASK 3
@@ -81,9 +83,30 @@ struct tlp_rp_regpair_t {
u32 reg1;
};
+static inline void cra_writel(struct altera_pcie *pcie, const u32 value,
+ const u32 reg)
+{
+ writel_relaxed(value, pcie->cra_base + reg);
+}
+
+static inline u32 cra_readl(struct altera_pcie *pcie, const u32 reg)
+{
+ return readl_relaxed(pcie->cra_base + reg);
+}
+
+static bool altera_pcie_link_is_up(struct altera_pcie *pcie)
+{
+ return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0);
+}
+
static void altera_pcie_retrain(struct pci_dev *dev)
{
u16 linkcap, linkstat;
+ struct altera_pcie *pcie = dev->bus->sysdata;
+ int timeout = 0;
+
+ if (!altera_pcie_link_is_up(pcie))
+ return;
/*
* Set the retrain bit if the PCIe rootport support > 2.5GB/s, but
@@ -95,9 +118,16 @@ static void altera_pcie_retrain(struct pci_dev *dev)
return;
pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &linkstat);
- if ((linkstat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB)
+ if ((linkstat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) {
pcie_capability_set_word(dev, PCI_EXP_LNKCTL,
PCI_EXP_LNKCTL_RL);
+ while (!altera_pcie_link_is_up(pcie)) {
+ timeout++;
+ if (timeout > LINK_UP_TIMEOUT)
+ break;
+ udelay(5);
+ }
+ }
}
DECLARE_PCI_FIXUP_EARLY(0x1172, PCI_ANY_ID, altera_pcie_retrain);
@@ -120,17 +150,6 @@ static bool altera_pcie_hide_rc_bar(struct pci_bus *bus, unsigned int devfn,
return false;
}
-static inline void cra_writel(struct altera_pcie *pcie, const u32 value,
- const u32 reg)
-{
- writel_relaxed(value, pcie->cra_base + reg);
-}
-
-static inline u32 cra_readl(struct altera_pcie *pcie, const u32 reg)
-{
- return readl_relaxed(pcie->cra_base + reg);
-}
-
static void tlp_write_tx(struct altera_pcie *pcie,
struct tlp_rp_regpair_t *tlp_rp_regdata)
{
@@ -139,11 +158,6 @@ static void tlp_write_tx(struct altera_pcie *pcie,
cra_writel(pcie, tlp_rp_regdata->ctrl, RP_TX_CNTRL);
}
-static bool altera_pcie_link_is_up(struct altera_pcie *pcie)
-{
- return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0);
-}
-
static bool altera_pcie_valid_config(struct altera_pcie *pcie,
struct pci_bus *bus, int dev)
{
@@ -415,11 +429,6 @@ static void altera_pcie_isr(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static void altera_pcie_release_of_pci_ranges(struct altera_pcie *pcie)
-{
- pci_free_resource_list(&pcie->resources);
-}
-
static int altera_pcie_parse_request_of_pci_ranges(struct altera_pcie *pcie)
{
int err, res_valid = 0;
@@ -432,33 +441,25 @@ static int altera_pcie_parse_request_of_pci_ranges(struct altera_pcie *pcie)
if (err)
return err;
+ err = devm_request_pci_bus_resources(dev, &pcie->resources);
+ if (err)
+ goto out_release_res;
+
resource_list_for_each_entry(win, &pcie->resources) {
- struct resource *parent, *res = win->res;
+ struct resource *res = win->res;
- switch (resource_type(res)) {
- case IORESOURCE_MEM:
- parent = &iomem_resource;
+ if (resource_type(res) == IORESOURCE_MEM)
res_valid |= !(res->flags & IORESOURCE_PREFETCH);
- break;
- default:
- continue;
- }
-
- err = devm_request_resource(dev, parent, res);
- if (err)
- goto out_release_res;
}
- if (!res_valid) {
- dev_err(dev, "non-prefetchable memory resource required\n");
- err = -EINVAL;
- goto out_release_res;
- }
+ if (res_valid)
+ return 0;
- return 0;
+ dev_err(dev, "non-prefetchable memory resource required\n");
+ err = -EINVAL;
out_release_res:
- altera_pcie_release_of_pci_ranges(pcie);
+ pci_free_resource_list(&pcie->resources);
return err;
}
diff --git a/drivers/pci/host/pcie-armada8k.c b/drivers/pci/host/pcie-armada8k.c
index 55723567b..0f4f57006 100644
--- a/drivers/pci/host/pcie-armada8k.c
+++ b/drivers/pci/host/pcie-armada8k.c
@@ -5,6 +5,9 @@
*
* Copyright (C) 2016 Marvell Technology Group Ltd.
*
+ * Author: Yehuda Yitshak <yehuday@marvell.com>
+ * Author: Shadi Ammouri <shadi@marvell.com>
+ *
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
@@ -14,7 +17,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/pci.h>
#include <linux/phy/phy.h>
@@ -244,7 +247,6 @@ static const struct of_device_id armada8k_pcie_of_match[] = {
{ .compatible = "marvell,armada8k-pcie", },
{},
};
-MODULE_DEVICE_TABLE(of, armada8k_pcie_of_match);
static struct platform_driver armada8k_pcie_driver = {
.probe = armada8k_pcie_probe,
@@ -253,10 +255,4 @@ static struct platform_driver armada8k_pcie_driver = {
.of_match_table = of_match_ptr(armada8k_pcie_of_match),
},
};
-
-module_platform_driver(armada8k_pcie_driver);
-
-MODULE_DESCRIPTION("Armada 8k PCIe host controller driver");
-MODULE_AUTHOR("Yehuda Yitshak <yehuday@marvell.com>");
-MODULE_AUTHOR("Shadi Ammouri <shadi@marvell.com>");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(armada8k_pcie_driver);
diff --git a/drivers/pci/host/pcie-artpec6.c b/drivers/pci/host/pcie-artpec6.c
new file mode 100644
index 000000000..16ba70b7e
--- /dev/null
+++ b/drivers/pci/host/pcie-artpec6.c
@@ -0,0 +1,280 @@
+/*
+ * PCIe host controller driver for Axis ARTPEC-6 SoC
+ *
+ * Author: Niklas Cassel <niklas.cassel@axis.com>
+ *
+ * Based on work done by Phil Edworthy <phil@edworthys.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/signal.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+#include "pcie-designware.h"
+
+#define to_artpec6_pcie(x) container_of(x, struct artpec6_pcie, pp)
+
+struct artpec6_pcie {
+ struct pcie_port pp;
+ struct regmap *regmap;
+ void __iomem *phy_base;
+};
+
+/* PCIe Port Logic registers (memory-mapped) */
+#define PL_OFFSET 0x700
+#define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
+#define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
+
+#define MISC_CONTROL_1_OFF (PL_OFFSET + 0x1bc)
+#define DBI_RO_WR_EN 1
+
+/* ARTPEC-6 specific registers */
+#define PCIECFG 0x18
+#define PCIECFG_DBG_OEN (1 << 24)
+#define PCIECFG_CORE_RESET_REQ (1 << 21)
+#define PCIECFG_LTSSM_ENABLE (1 << 20)
+#define PCIECFG_CLKREQ_B (1 << 11)
+#define PCIECFG_REFCLK_ENABLE (1 << 10)
+#define PCIECFG_PLL_ENABLE (1 << 9)
+#define PCIECFG_PCLK_ENABLE (1 << 8)
+#define PCIECFG_RISRCREN (1 << 4)
+#define PCIECFG_MODE_TX_DRV_EN (1 << 3)
+#define PCIECFG_CISRREN (1 << 2)
+#define PCIECFG_MACRO_ENABLE (1 << 0)
+
+#define NOCCFG 0x40
+#define NOCCFG_ENABLE_CLK_PCIE (1 << 4)
+#define NOCCFG_POWER_PCIE_IDLEACK (1 << 3)
+#define NOCCFG_POWER_PCIE_IDLE (1 << 2)
+#define NOCCFG_POWER_PCIE_IDLEREQ (1 << 1)
+
+#define PHY_STATUS 0x118
+#define PHY_COSPLLLOCK (1 << 0)
+
+#define ARTPEC6_CPU_TO_BUS_ADDR 0x0fffffff
+
+static int artpec6_pcie_establish_link(struct pcie_port *pp)
+{
+ struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pp);
+ u32 val;
+ unsigned int retries;
+
+ /* Hold DW core in reset */
+ regmap_read(artpec6_pcie->regmap, PCIECFG, &val);
+ val |= PCIECFG_CORE_RESET_REQ;
+ regmap_write(artpec6_pcie->regmap, PCIECFG, val);
+
+ regmap_read(artpec6_pcie->regmap, PCIECFG, &val);
+ val |= PCIECFG_RISRCREN | /* Receiver term. 50 Ohm */
+ PCIECFG_MODE_TX_DRV_EN |
+ PCIECFG_CISRREN | /* Reference clock term. 100 Ohm */
+ PCIECFG_MACRO_ENABLE;
+ val |= PCIECFG_REFCLK_ENABLE;
+ val &= ~PCIECFG_DBG_OEN;
+ val &= ~PCIECFG_CLKREQ_B;
+ regmap_write(artpec6_pcie->regmap, PCIECFG, val);
+ usleep_range(5000, 6000);
+
+ regmap_read(artpec6_pcie->regmap, NOCCFG, &val);
+ val |= NOCCFG_ENABLE_CLK_PCIE;
+ regmap_write(artpec6_pcie->regmap, NOCCFG, val);
+ usleep_range(20, 30);
+
+ regmap_read(artpec6_pcie->regmap, PCIECFG, &val);
+ val |= PCIECFG_PCLK_ENABLE | PCIECFG_PLL_ENABLE;
+ regmap_write(artpec6_pcie->regmap, PCIECFG, val);
+ usleep_range(6000, 7000);
+
+ regmap_read(artpec6_pcie->regmap, NOCCFG, &val);
+ val &= ~NOCCFG_POWER_PCIE_IDLEREQ;
+ regmap_write(artpec6_pcie->regmap, NOCCFG, val);
+
+ retries = 50;
+ do {
+ usleep_range(1000, 2000);
+ regmap_read(artpec6_pcie->regmap, NOCCFG, &val);
+ retries--;
+ } while (retries &&
+ (val & (NOCCFG_POWER_PCIE_IDLEACK | NOCCFG_POWER_PCIE_IDLE)));
+
+ retries = 50;
+ do {
+ usleep_range(1000, 2000);
+ val = readl(artpec6_pcie->phy_base + PHY_STATUS);
+ retries--;
+ } while (retries && !(val & PHY_COSPLLLOCK));
+
+ /* Take DW core out of reset */
+ regmap_read(artpec6_pcie->regmap, PCIECFG, &val);
+ val &= ~PCIECFG_CORE_RESET_REQ;
+ regmap_write(artpec6_pcie->regmap, PCIECFG, val);
+ usleep_range(100, 200);
+
+ /*
+ * Enable writing to config regs. This is required as the Synopsys
+ * driver changes the class code. That register needs DBI write enable.
+ */
+ writel(DBI_RO_WR_EN, pp->dbi_base + MISC_CONTROL_1_OFF);
+
+ pp->io_base &= ARTPEC6_CPU_TO_BUS_ADDR;
+ pp->mem_base &= ARTPEC6_CPU_TO_BUS_ADDR;
+ pp->cfg0_base &= ARTPEC6_CPU_TO_BUS_ADDR;
+ pp->cfg1_base &= ARTPEC6_CPU_TO_BUS_ADDR;
+
+ /* setup root complex */
+ dw_pcie_setup_rc(pp);
+
+ /* assert LTSSM enable */
+ regmap_read(artpec6_pcie->regmap, PCIECFG, &val);
+ val |= PCIECFG_LTSSM_ENABLE;
+ regmap_write(artpec6_pcie->regmap, PCIECFG, val);
+
+ /* check if the link is up or not */
+ if (!dw_pcie_wait_for_link(pp))
+ return 0;
+
+ dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
+ readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
+ readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
+
+ return -ETIMEDOUT;
+}
+
+static void artpec6_pcie_enable_interrupts(struct pcie_port *pp)
+{
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ dw_pcie_msi_init(pp);
+}
+
+static void artpec6_pcie_host_init(struct pcie_port *pp)
+{
+ artpec6_pcie_establish_link(pp);
+ artpec6_pcie_enable_interrupts(pp);
+}
+
+static int artpec6_pcie_link_up(struct pcie_port *pp)
+{
+ u32 rc;
+
+ /*
+ * Get status from Synopsys IP
+ * link is debug bit 36, debug register 1 starts at bit 32
+ */
+ rc = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1) & (0x1 << (36 - 32));
+ if (rc)
+ return 1;
+
+ return 0;
+}
+
+static struct pcie_host_ops artpec6_pcie_host_ops = {
+ .link_up = artpec6_pcie_link_up,
+ .host_init = artpec6_pcie_host_init,
+};
+
+static irqreturn_t artpec6_pcie_msi_handler(int irq, void *arg)
+{
+ struct pcie_port *pp = arg;
+
+ return dw_handle_msi_irq(pp);
+}
+
+static int __init artpec6_add_pcie_port(struct pcie_port *pp,
+ struct platform_device *pdev)
+{
+ int ret;
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ pp->msi_irq = platform_get_irq_byname(pdev, "msi");
+ if (pp->msi_irq <= 0) {
+ dev_err(&pdev->dev, "failed to get MSI irq\n");
+ return -ENODEV;
+ }
+
+ ret = devm_request_irq(&pdev->dev, pp->msi_irq,
+ artpec6_pcie_msi_handler,
+ IRQF_SHARED | IRQF_NO_THREAD,
+ "artpec6-pcie-msi", pp);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request MSI irq\n");
+ return ret;
+ }
+ }
+
+ pp->root_bus_nr = -1;
+ pp->ops = &artpec6_pcie_host_ops;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize host\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int artpec6_pcie_probe(struct platform_device *pdev)
+{
+ struct artpec6_pcie *artpec6_pcie;
+ struct pcie_port *pp;
+ struct resource *dbi_base;
+ struct resource *phy_base;
+ int ret;
+
+ artpec6_pcie = devm_kzalloc(&pdev->dev, sizeof(*artpec6_pcie),
+ GFP_KERNEL);
+ if (!artpec6_pcie)
+ return -ENOMEM;
+
+ pp = &artpec6_pcie->pp;
+ pp->dev = &pdev->dev;
+
+ dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+ pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base);
+ if (IS_ERR(pp->dbi_base))
+ return PTR_ERR(pp->dbi_base);
+
+ phy_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
+ artpec6_pcie->phy_base = devm_ioremap_resource(&pdev->dev, phy_base);
+ if (IS_ERR(artpec6_pcie->phy_base))
+ return PTR_ERR(artpec6_pcie->phy_base);
+
+ artpec6_pcie->regmap =
+ syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "axis,syscon-pcie");
+ if (IS_ERR(artpec6_pcie->regmap))
+ return PTR_ERR(artpec6_pcie->regmap);
+
+ ret = artpec6_add_pcie_port(pp, pdev);
+ if (ret < 0)
+ return ret;
+
+ platform_set_drvdata(pdev, artpec6_pcie);
+ return 0;
+}
+
+static const struct of_device_id artpec6_pcie_of_match[] = {
+ { .compatible = "axis,artpec6-pcie", },
+ {},
+};
+
+static struct platform_driver artpec6_pcie_driver = {
+ .probe = artpec6_pcie_probe,
+ .driver = {
+ .name = "artpec6-pcie",
+ .of_match_table = artpec6_pcie_of_match,
+ },
+};
+builtin_platform_driver(artpec6_pcie_driver);
diff --git a/drivers/pci/host/pcie-designware-plat.c b/drivers/pci/host/pcie-designware-plat.c
index b3500994d..c8079dc81 100644
--- a/drivers/pci/host/pcie-designware-plat.c
+++ b/drivers/pci/host/pcie-designware-plat.c
@@ -14,7 +14,7 @@
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of_gpio.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
@@ -121,7 +121,6 @@ static const struct of_device_id dw_plat_pcie_of_match[] = {
{ .compatible = "snps,dw-pcie", },
{},
};
-MODULE_DEVICE_TABLE(of, dw_plat_pcie_of_match);
static struct platform_driver dw_plat_pcie_driver = {
.driver = {
@@ -130,9 +129,4 @@ static struct platform_driver dw_plat_pcie_driver = {
},
.probe = dw_plat_pcie_probe,
};
-
-module_platform_driver(dw_plat_pcie_driver);
-
-MODULE_AUTHOR("Joao Pinto <Joao.Pinto@synopsys.com>");
-MODULE_DESCRIPTION("Synopsys PCIe host controller glue platform driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(dw_plat_pcie_driver);
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index aafd76654..12afce198 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -452,6 +452,10 @@ int dw_pcie_host_init(struct pcie_port *pp)
if (ret)
return ret;
+ ret = devm_request_pci_bus_resources(&pdev->dev, &res);
+ if (ret)
+ goto error;
+
/* Get the I/O and memory ranges from DT */
resource_list_for_each_entry(win, &res) {
switch (resource_type(win->res)) {
@@ -461,11 +465,9 @@ int dw_pcie_host_init(struct pcie_port *pp)
pp->io_size = resource_size(pp->io);
pp->io_bus_addr = pp->io->start - win->offset;
ret = pci_remap_iospace(pp->io, pp->io_base);
- if (ret) {
+ if (ret)
dev_warn(pp->dev, "error %d: failed to map resource %pR\n",
ret, pp->io);
- continue;
- }
break;
case IORESOURCE_MEM:
pp->mem = win->res;
@@ -483,8 +485,6 @@ int dw_pcie_host_init(struct pcie_port *pp)
case IORESOURCE_BUS:
pp->busn = win->res;
break;
- default:
- continue;
}
}
@@ -493,7 +493,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
resource_size(pp->cfg));
if (!pp->dbi_base) {
dev_err(pp->dev, "error with ioremap\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto error;
}
}
@@ -504,7 +505,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
pp->cfg0_size);
if (!pp->va_cfg0_base) {
dev_err(pp->dev, "error with ioremap in function\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto error;
}
}
@@ -513,7 +515,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
pp->cfg1_size);
if (!pp->va_cfg1_base) {
dev_err(pp->dev, "error with ioremap\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto error;
}
}
@@ -528,7 +531,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
&dw_pcie_msi_chip);
if (!pp->irq_domain) {
dev_err(pp->dev, "irq domain init failed\n");
- return -ENXIO;
+ ret = -ENXIO;
+ goto error;
}
for (i = 0; i < MAX_MSI_IRQS; i++)
@@ -536,7 +540,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
} else {
ret = pp->ops->msi_host_init(pp, &dw_pcie_msi_chip);
if (ret < 0)
- return ret;
+ goto error;
}
}
@@ -552,8 +556,10 @@ int dw_pcie_host_init(struct pcie_port *pp)
} else
bus = pci_scan_root_bus(pp->dev, pp->root_bus_nr, &dw_pcie_ops,
pp, &res);
- if (!bus)
- return -ENOMEM;
+ if (!bus) {
+ ret = -ENOMEM;
+ goto error;
+ }
if (pp->ops->scan_bus)
pp->ops->scan_bus(pp);
@@ -571,6 +577,10 @@ int dw_pcie_host_init(struct pcie_port *pp)
pci_bus_add_devices(bus);
return 0;
+
+error:
+ pci_free_resource_list(&res);
+ return ret;
}
static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
diff --git a/drivers/pci/host/pcie-hisi.c b/drivers/pci/host/pcie-hisi.c
index 3e98d4eda..7ee9dfcc4 100644
--- a/drivers/pci/host/pcie-hisi.c
+++ b/drivers/pci/host/pcie-hisi.c
@@ -12,7 +12,7 @@
* published by the Free Software Foundation.
*/
#include <linux/interrupt.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/mfd/syscon.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
@@ -235,9 +235,6 @@ static const struct of_device_id hisi_pcie_of_match[] = {
{},
};
-
-MODULE_DEVICE_TABLE(of, hisi_pcie_of_match);
-
static struct platform_driver hisi_pcie_driver = {
.probe = hisi_pcie_probe,
.driver = {
@@ -245,10 +242,4 @@ static struct platform_driver hisi_pcie_driver = {
.of_match_table = hisi_pcie_of_match,
},
};
-
-module_platform_driver(hisi_pcie_driver);
-
-MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
-MODULE_AUTHOR("Dacai Zhu <zhudacai@hisilicon.com>");
-MODULE_AUTHOR("Gabriele Paoloni <gabriele.paoloni@huawei.com>");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(hisi_pcie_driver);
diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c
index a576aeeb2..e167b2f00 100644
--- a/drivers/pci/host/pcie-iproc.c
+++ b/drivers/pci/host/pcie-iproc.c
@@ -462,6 +462,10 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
if (!pcie || !pcie->dev || !pcie->base)
return -EINVAL;
+ ret = devm_request_pci_bus_resources(pcie->dev, res);
+ if (ret)
+ return ret;
+
ret = phy_init(pcie->phy);
if (ret) {
dev_err(pcie->dev, "unable to initialize PCIe PHY\n");
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index 350921880..65db7a221 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -7,6 +7,8 @@
* arch/sh/drivers/pci/ops-sh7786.c
* Copyright (C) 2009 - 2011 Paul Mundt
*
+ * Author: Phil Edworthy <phil.edworthy@renesas.com>
+ *
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
@@ -18,7 +20,7 @@
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -936,12 +938,6 @@ static const struct of_device_id rcar_pcie_of_match[] = {
{ .compatible = "renesas,pcie-r8a7795", .data = rcar_pcie_hw_init },
{},
};
-MODULE_DEVICE_TABLE(of, rcar_pcie_of_match);
-
-static void rcar_pcie_release_of_pci_ranges(struct rcar_pcie *pci)
-{
- pci_free_resource_list(&pci->resources);
-}
static int rcar_pcie_parse_request_of_pci_ranges(struct rcar_pcie *pci)
{
@@ -955,37 +951,25 @@ static int rcar_pcie_parse_request_of_pci_ranges(struct rcar_pcie *pci)
if (err)
return err;
+ err = devm_request_pci_bus_resources(dev, &pci->resources);
+ if (err)
+ goto out_release_res;
+
resource_list_for_each_entry(win, &pci->resources) {
- struct resource *parent, *res = win->res;
+ struct resource *res = win->res;
- switch (resource_type(res)) {
- case IORESOURCE_IO:
- parent = &ioport_resource;
+ if (resource_type(res) == IORESOURCE_IO) {
err = pci_remap_iospace(res, iobase);
- if (err) {
+ if (err)
dev_warn(dev, "error %d: failed to map resource %pR\n",
err, res);
- continue;
- }
- break;
- case IORESOURCE_MEM:
- parent = &iomem_resource;
- break;
-
- case IORESOURCE_BUS:
- default:
- continue;
}
-
- err = devm_request_resource(dev, parent, res);
- if (err)
- goto out_release_res;
}
return 0;
out_release_res:
- rcar_pcie_release_of_pci_ranges(pci);
+ pci_free_resource_list(&pci->resources);
return err;
}
@@ -1073,8 +1057,4 @@ static struct platform_driver rcar_pcie_driver = {
},
.probe = rcar_pcie_probe,
};
-module_platform_driver(rcar_pcie_driver);
-
-MODULE_AUTHOR("Phil Edworthy <phil.edworthy@renesas.com>");
-MODULE_DESCRIPTION("Renesas R-Car PCIe driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(rcar_pcie_driver);
diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c
index 3479d30e2..0b597d919 100644
--- a/drivers/pci/host/pcie-xilinx-nwl.c
+++ b/drivers/pci/host/pcie-xilinx-nwl.c
@@ -825,27 +825,33 @@ static int nwl_pcie_probe(struct platform_device *pdev)
err = of_pci_get_host_bridge_resources(node, 0, 0xff, &res, &iobase);
if (err) {
- pr_err("Getting bridge resources failed\n");
+ dev_err(pcie->dev, "Getting bridge resources failed\n");
return err;
}
+ err = devm_request_pci_bus_resources(pcie->dev, &res);
+ if (err)
+ goto error;
+
err = nwl_pcie_init_irq_domain(pcie);
if (err) {
dev_err(pcie->dev, "Failed creating IRQ Domain\n");
- return err;
+ goto error;
}
bus = pci_create_root_bus(&pdev->dev, pcie->root_busno,
&nwl_pcie_ops, pcie, &res);
- if (!bus)
- return -ENOMEM;
+ if (!bus) {
+ err = -ENOMEM;
+ goto error;
+ }
if (IS_ENABLED(CONFIG_PCI_MSI)) {
err = nwl_pcie_enable_msi(pcie, bus);
if (err < 0) {
dev_err(&pdev->dev,
"failed to enable MSI support: %d\n", err);
- return err;
+ goto error;
}
}
pci_scan_child_bus(bus);
@@ -855,6 +861,10 @@ static int nwl_pcie_probe(struct platform_device *pdev)
pci_bus_add_devices(bus);
platform_set_drvdata(pdev, pcie);
return 0;
+
+error:
+ pci_free_resource_list(&res);
+ return err;
}
static int nwl_pcie_remove(struct platform_device *pdev)
diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
index 65f0fe0c2..a30e01639 100644
--- a/drivers/pci/host/pcie-xilinx.c
+++ b/drivers/pci/host/pcie-xilinx.c
@@ -550,7 +550,7 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
pcie_intc_node = of_get_next_child(node, NULL);
if (!pcie_intc_node) {
dev_err(dev, "No PCIe Intc node found\n");
- return PTR_ERR(pcie_intc_node);
+ return -ENODEV;
}
port->irq_domain = irq_domain_add_linear(pcie_intc_node, 4,
@@ -558,7 +558,7 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
port);
if (!port->irq_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
- return PTR_ERR(port->irq_domain);
+ return -ENODEV;
}
/* Setup MSI */
@@ -569,7 +569,7 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
&xilinx_pcie_msi_chip);
if (!port->irq_domain) {
dev_err(dev, "Failed to get a MSI IRQ domain\n");
- return PTR_ERR(port->irq_domain);
+ return -ENODEV;
}
xilinx_pcie_enable_msi(port);
@@ -660,7 +660,6 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
struct xilinx_pcie_port *port;
struct device *dev = &pdev->dev;
struct pci_bus *bus;
-
int err;
resource_size_t iobase = 0;
LIST_HEAD(res);
@@ -694,10 +693,17 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
dev_err(dev, "Getting bridge resources failed\n");
return err;
}
+
+ err = devm_request_pci_bus_resources(dev, &res);
+ if (err)
+ goto error;
+
bus = pci_create_root_bus(&pdev->dev, 0,
&xilinx_pcie_ops, port, &res);
- if (!bus)
- return -ENOMEM;
+ if (!bus) {
+ err = -ENOMEM;
+ goto error;
+ }
#ifdef CONFIG_PCI_MSI
xilinx_pcie_msi_chip.dev = port->dev;
@@ -712,6 +718,10 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, port);
return 0;
+
+error:
+ pci_free_resource_list(&res);
+ return err;
}
/**
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index df8caec59..aadce45a9 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -113,6 +113,19 @@ config HOTPLUG_PCI_SHPC
When in doubt, say N.
+config HOTPLUG_PCI_POWERNV
+ tristate "PowerPC PowerNV PCI Hotplug driver"
+ depends on PPC_POWERNV && EEH
+ select OF_DYNAMIC
+ help
+ Say Y here if you run PowerPC PowerNV platform that supports
+ PCI Hotplug
+
+ To compile this driver as a module, choose M here: the
+ module will be called pnv-php.
+
+ When in doubt, say N.
+
config HOTPLUG_PCI_RPA
tristate "RPA PCI Hotplug driver"
depends on PPC_PSERIES && EEH
diff --git a/drivers/pci/hotplug/Makefile b/drivers/pci/hotplug/Makefile
index b616e7588..e33cdda45 100644
--- a/drivers/pci/hotplug/Makefile
+++ b/drivers/pci/hotplug/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_HOTPLUG_PCI_PCIE) += pciehp.o
obj-$(CONFIG_HOTPLUG_PCI_CPCI_ZT5550) += cpcihp_zt5550.o
obj-$(CONFIG_HOTPLUG_PCI_CPCI_GENERIC) += cpcihp_generic.o
obj-$(CONFIG_HOTPLUG_PCI_SHPC) += shpchp.o
+obj-$(CONFIG_HOTPLUG_PCI_POWERNV) += pnv-php.o
obj-$(CONFIG_HOTPLUG_PCI_RPA) += rpaphp.o
obj-$(CONFIG_HOTPLUG_PCI_RPA_DLPAR) += rpadlpar_io.o
obj-$(CONFIG_HOTPLUG_PCI_SGI) += sgi_hotplug.o
@@ -50,6 +51,8 @@ ibmphp-objs := ibmphp_core.o \
acpiphp-objs := acpiphp_core.o \
acpiphp_glue.o
+pnv-php-objs := pnv_php.o
+
rpaphp-objs := rpaphp_core.o \
rpaphp_pci.o \
rpaphp_slot.o
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index fa49f9143..a46b585fa 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -675,6 +675,9 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
if (bridge->is_going_away)
return;
+ if (bridge->pci_dev)
+ pm_runtime_get_sync(&bridge->pci_dev->dev);
+
list_for_each_entry(slot, &bridge->slots, node) {
struct pci_bus *bus = slot->bus;
struct pci_dev *dev, *tmp;
@@ -694,6 +697,9 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
disable_slot(slot);
}
}
+
+ if (bridge->pci_dev)
+ pm_runtime_put(&bridge->pci_dev->dev);
}
/*
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 5c24e9380..08e84d618 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -546,6 +546,10 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
u8 present;
bool link;
+ /* Interrupts cannot originate from a controller that's asleep */
+ if (pdev->current_state == PCI_D3cold)
+ return IRQ_NONE;
+
/*
* In order to guarantee that all interrupt events are
* serviced, we need to re-inspect Slot Status register after
diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c
new file mode 100644
index 000000000..e6245b03f
--- /dev/null
+++ b/drivers/pci/hotplug/pnv_php.c
@@ -0,0 +1,711 @@
+/*
+ * PCI Hotplug Driver for PowerPC PowerNV platform.
+ *
+ * Copyright Gavin Shan, IBM Corporation 2016.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/libfdt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
+
+#include <asm/opal.h>
+#include <asm/pnv-pci.h>
+#include <asm/ppc-pci.h>
+
+#define DRIVER_VERSION "0.1"
+#define DRIVER_AUTHOR "Gavin Shan, IBM Corporation"
+#define DRIVER_DESC "PowerPC PowerNV PCI Hotplug Driver"
+
+static LIST_HEAD(pnv_php_slot_list);
+static DEFINE_SPINLOCK(pnv_php_lock);
+
+static void pnv_php_register(struct device_node *dn);
+static void pnv_php_unregister_one(struct device_node *dn);
+static void pnv_php_unregister(struct device_node *dn);
+
+static void pnv_php_free_slot(struct kref *kref)
+{
+ struct pnv_php_slot *php_slot = container_of(kref,
+ struct pnv_php_slot, kref);
+
+ WARN_ON(!list_empty(&php_slot->children));
+ kfree(php_slot->name);
+ kfree(php_slot);
+}
+
+static inline void pnv_php_put_slot(struct pnv_php_slot *php_slot)
+{
+
+ if (WARN_ON(!php_slot))
+ return;
+
+ kref_put(&php_slot->kref, pnv_php_free_slot);
+}
+
+static struct pnv_php_slot *pnv_php_match(struct device_node *dn,
+ struct pnv_php_slot *php_slot)
+{
+ struct pnv_php_slot *target, *tmp;
+
+ if (php_slot->dn == dn) {
+ kref_get(&php_slot->kref);
+ return php_slot;
+ }
+
+ list_for_each_entry(tmp, &php_slot->children, link) {
+ target = pnv_php_match(dn, tmp);
+ if (target)
+ return target;
+ }
+
+ return NULL;
+}
+
+struct pnv_php_slot *pnv_php_find_slot(struct device_node *dn)
+{
+ struct pnv_php_slot *php_slot, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pnv_php_lock, flags);
+ list_for_each_entry(tmp, &pnv_php_slot_list, link) {
+ php_slot = pnv_php_match(dn, tmp);
+ if (php_slot) {
+ spin_unlock_irqrestore(&pnv_php_lock, flags);
+ return php_slot;
+ }
+ }
+ spin_unlock_irqrestore(&pnv_php_lock, flags);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(pnv_php_find_slot);
+
+/*
+ * Remove pdn for all children of the indicated device node.
+ * The function should remove pdn in a depth-first manner.
+ */
+static void pnv_php_rmv_pdns(struct device_node *dn)
+{
+ struct device_node *child;
+
+ for_each_child_of_node(dn, child) {
+ pnv_php_rmv_pdns(child);
+
+ pci_remove_device_node_info(child);
+ }
+}
+
+/*
+ * Detach all child nodes of the indicated device nodes. The
+ * function should handle device nodes in depth-first manner.
+ *
+ * We should not invoke of_node_release() as the memory for
+ * individual device node is part of large memory block. The
+ * large block is allocated from memblock (system bootup) or
+ * kmalloc() when unflattening the device tree by OF changeset.
+ * We can not free the large block allocated from memblock. For
+ * later case, it should be released at once.
+ */
+static void pnv_php_detach_device_nodes(struct device_node *parent)
+{
+ struct device_node *dn;
+ int refcount;
+
+ for_each_child_of_node(parent, dn) {
+ pnv_php_detach_device_nodes(dn);
+
+ of_node_put(dn);
+ refcount = atomic_read(&dn->kobj.kref.refcount);
+ if (unlikely(refcount != 1))
+ pr_warn("Invalid refcount %d on <%s>\n",
+ refcount, of_node_full_name(dn));
+
+ of_detach_node(dn);
+ }
+}
+
+static void pnv_php_rmv_devtree(struct pnv_php_slot *php_slot)
+{
+ pnv_php_rmv_pdns(php_slot->dn);
+
+ /*
+ * Decrease the refcount if the device nodes were created
+ * through OF changeset before detaching them.
+ */
+ if (php_slot->fdt)
+ of_changeset_destroy(&php_slot->ocs);
+ pnv_php_detach_device_nodes(php_slot->dn);
+
+ if (php_slot->fdt) {
+ kfree(php_slot->dt);
+ kfree(php_slot->fdt);
+ php_slot->dt = NULL;
+ php_slot->dn->child = NULL;
+ php_slot->fdt = NULL;
+ }
+}
+
+/*
+ * As the nodes in OF changeset are applied in reverse order, we
+ * need revert the nodes in advance so that we have correct node
+ * order after the changeset is applied.
+ */
+static void pnv_php_reverse_nodes(struct device_node *parent)
+{
+ struct device_node *child, *next;
+
+ /* In-depth first */
+ for_each_child_of_node(parent, child)
+ pnv_php_reverse_nodes(child);
+
+ /* Reverse the nodes in the child list */
+ child = parent->child;
+ parent->child = NULL;
+ while (child) {
+ next = child->sibling;
+
+ child->sibling = parent->child;
+ parent->child = child;
+ child = next;
+ }
+}
+
+static int pnv_php_populate_changeset(struct of_changeset *ocs,
+ struct device_node *dn)
+{
+ struct device_node *child;
+ int ret = 0;
+
+ for_each_child_of_node(dn, child) {
+ ret = of_changeset_attach_node(ocs, child);
+ if (unlikely(ret))
+ break;
+
+ ret = pnv_php_populate_changeset(ocs, child);
+ if (unlikely(ret))
+ break;
+ }
+
+ return ret;
+}
+
+static void *pnv_php_add_one_pdn(struct device_node *dn, void *data)
+{
+ struct pci_controller *hose = (struct pci_controller *)data;
+ struct pci_dn *pdn;
+
+ pdn = pci_add_device_node_info(hose, dn);
+ if (unlikely(!pdn))
+ return ERR_PTR(-ENOMEM);
+
+ return NULL;
+}
+
+static void pnv_php_add_pdns(struct pnv_php_slot *slot)
+{
+ struct pci_controller *hose = pci_bus_to_host(slot->bus);
+
+ pci_traverse_device_nodes(slot->dn, pnv_php_add_one_pdn, hose);
+}
+
+static int pnv_php_add_devtree(struct pnv_php_slot *php_slot)
+{
+ void *fdt, *fdt1, *dt;
+ int ret;
+
+ /* We don't know the FDT blob size. We try to get it through
+ * maximal memory chunk and then copy it to another chunk that
+ * fits the real size.
+ */
+ fdt1 = kzalloc(0x10000, GFP_KERNEL);
+ if (unlikely(!fdt1)) {
+ ret = -ENOMEM;
+ dev_warn(&php_slot->pdev->dev, "Cannot alloc FDT blob\n");
+ goto out;
+ }
+
+ ret = pnv_pci_get_device_tree(php_slot->dn->phandle, fdt1, 0x10000);
+ if (unlikely(ret)) {
+ dev_warn(&php_slot->pdev->dev, "Error %d getting FDT blob\n",
+ ret);
+ goto free_fdt1;
+ }
+
+ fdt = kzalloc(fdt_totalsize(fdt1), GFP_KERNEL);
+ if (unlikely(!fdt)) {
+ ret = -ENOMEM;
+ dev_warn(&php_slot->pdev->dev, "Cannot %d bytes memory\n",
+ fdt_totalsize(fdt1));
+ goto free_fdt1;
+ }
+
+ /* Unflatten device tree blob */
+ memcpy(fdt, fdt1, fdt_totalsize(fdt1));
+ dt = of_fdt_unflatten_tree(fdt, php_slot->dn, NULL);
+ if (unlikely(!dt)) {
+ ret = -EINVAL;
+ dev_warn(&php_slot->pdev->dev, "Cannot unflatten FDT\n");
+ goto free_fdt;
+ }
+
+ /* Initialize and apply the changeset */
+ of_changeset_init(&php_slot->ocs);
+ pnv_php_reverse_nodes(php_slot->dn);
+ ret = pnv_php_populate_changeset(&php_slot->ocs, php_slot->dn);
+ if (unlikely(ret)) {
+ pnv_php_reverse_nodes(php_slot->dn);
+ dev_warn(&php_slot->pdev->dev, "Error %d populating changeset\n",
+ ret);
+ goto free_dt;
+ }
+
+ php_slot->dn->child = NULL;
+ ret = of_changeset_apply(&php_slot->ocs);
+ if (unlikely(ret)) {
+ dev_warn(&php_slot->pdev->dev, "Error %d applying changeset\n",
+ ret);
+ goto destroy_changeset;
+ }
+
+ /* Add device node firmware data */
+ pnv_php_add_pdns(php_slot);
+ php_slot->fdt = fdt;
+ php_slot->dt = dt;
+ kfree(fdt1);
+ goto out;
+
+destroy_changeset:
+ of_changeset_destroy(&php_slot->ocs);
+free_dt:
+ kfree(dt);
+ php_slot->dn->child = NULL;
+free_fdt:
+ kfree(fdt);
+free_fdt1:
+ kfree(fdt1);
+out:
+ return ret;
+}
+
+int pnv_php_set_slot_power_state(struct hotplug_slot *slot,
+ uint8_t state)
+{
+ struct pnv_php_slot *php_slot = slot->private;
+ struct opal_msg msg;
+ int ret;
+
+ ret = pnv_pci_set_power_state(php_slot->id, state, &msg);
+ if (likely(ret > 0)) {
+ if (be64_to_cpu(msg.params[1]) != php_slot->dn->phandle ||
+ be64_to_cpu(msg.params[2]) != state ||
+ be64_to_cpu(msg.params[3]) != OPAL_SUCCESS) {
+ dev_warn(&php_slot->pdev->dev, "Wrong msg (%lld, %lld, %lld)\n",
+ be64_to_cpu(msg.params[1]),
+ be64_to_cpu(msg.params[2]),
+ be64_to_cpu(msg.params[3]));
+ return -ENOMSG;
+ }
+ } else if (unlikely(ret < 0)) {
+ dev_warn(&php_slot->pdev->dev, "Error %d powering %s\n",
+ ret, (state == OPAL_PCI_SLOT_POWER_ON) ? "on" : "off");
+ return ret;
+ }
+
+ if (state == OPAL_PCI_SLOT_POWER_OFF || state == OPAL_PCI_SLOT_OFFLINE)
+ pnv_php_rmv_devtree(php_slot);
+ else
+ ret = pnv_php_add_devtree(php_slot);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pnv_php_set_slot_power_state);
+
+static int pnv_php_get_power_state(struct hotplug_slot *slot, u8 *state)
+{
+ struct pnv_php_slot *php_slot = slot->private;
+ uint8_t power_state = OPAL_PCI_SLOT_POWER_ON;
+ int ret;
+
+ /*
+ * Retrieve power status from firmware. If we fail
+ * getting that, the power status fails back to
+ * be on.
+ */
+ ret = pnv_pci_get_power_state(php_slot->id, &power_state);
+ if (unlikely(ret)) {
+ dev_warn(&php_slot->pdev->dev, "Error %d getting power status\n",
+ ret);
+ } else {
+ *state = power_state;
+ slot->info->power_status = power_state;
+ }
+
+ return 0;
+}
+
+static int pnv_php_get_adapter_state(struct hotplug_slot *slot, u8 *state)
+{
+ struct pnv_php_slot *php_slot = slot->private;
+ uint8_t presence = OPAL_PCI_SLOT_EMPTY;
+ int ret;
+
+ /*
+ * Retrieve presence status from firmware. If we can't
+ * get that, it will fail back to be empty.
+ */
+ ret = pnv_pci_get_presence_state(php_slot->id, &presence);
+ if (likely(ret >= 0)) {
+ *state = presence;
+ slot->info->adapter_status = presence;
+ ret = 0;
+ } else {
+ dev_warn(&php_slot->pdev->dev, "Error %d getting presence\n",
+ ret);
+ }
+
+ return ret;
+}
+
+static int pnv_php_set_attention_state(struct hotplug_slot *slot, u8 state)
+{
+ /* FIXME: Make it real once firmware supports it */
+ slot->info->attention_status = state;
+
+ return 0;
+}
+
+static int pnv_php_enable(struct pnv_php_slot *php_slot, bool rescan)
+{
+ struct hotplug_slot *slot = &php_slot->slot;
+ uint8_t presence = OPAL_PCI_SLOT_EMPTY;
+ uint8_t power_status = OPAL_PCI_SLOT_POWER_ON;
+ int ret;
+
+ /* Check if the slot has been configured */
+ if (php_slot->state != PNV_PHP_STATE_REGISTERED)
+ return 0;
+
+ /* Retrieve slot presence status */
+ ret = pnv_php_get_adapter_state(slot, &presence);
+ if (unlikely(ret))
+ return ret;
+
+ /* Proceed if there have nothing behind the slot */
+ if (presence == OPAL_PCI_SLOT_EMPTY)
+ goto scan;
+
+ /*
+ * If the power supply to the slot is off, we can't detect
+ * adapter presence state. That means we have to turn the
+ * slot on before going to probe slot's presence state.
+ *
+ * On the first time, we don't change the power status to
+ * boost system boot with assumption that the firmware
+ * supplies consistent slot power status: empty slot always
+ * has its power off and non-empty slot has its power on.
+ */
+ if (!php_slot->power_state_check) {
+ php_slot->power_state_check = true;
+
+ ret = pnv_php_get_power_state(slot, &power_status);
+ if (unlikely(ret))
+ return ret;
+
+ if (power_status != OPAL_PCI_SLOT_POWER_ON)
+ return 0;
+ }
+
+ /* Check the power status. Scan the slot if it is already on */
+ ret = pnv_php_get_power_state(slot, &power_status);
+ if (unlikely(ret))
+ return ret;
+
+ if (power_status == OPAL_PCI_SLOT_POWER_ON)
+ goto scan;
+
+ /* Power is off, turn it on and then scan the slot */
+ ret = pnv_php_set_slot_power_state(slot, OPAL_PCI_SLOT_POWER_ON);
+ if (unlikely(ret))
+ return ret;
+
+scan:
+ if (presence == OPAL_PCI_SLOT_PRESENT) {
+ if (rescan) {
+ pci_lock_rescan_remove();
+ pci_hp_add_devices(php_slot->bus);
+ pci_unlock_rescan_remove();
+ }
+
+ /* Rescan for child hotpluggable slots */
+ php_slot->state = PNV_PHP_STATE_POPULATED;
+ if (rescan)
+ pnv_php_register(php_slot->dn);
+ } else {
+ php_slot->state = PNV_PHP_STATE_POPULATED;
+ }
+
+ return 0;
+}
+
+static int pnv_php_enable_slot(struct hotplug_slot *slot)
+{
+ struct pnv_php_slot *php_slot = container_of(slot,
+ struct pnv_php_slot, slot);
+
+ return pnv_php_enable(php_slot, true);
+}
+
+static int pnv_php_disable_slot(struct hotplug_slot *slot)
+{
+ struct pnv_php_slot *php_slot = slot->private;
+ int ret;
+
+ if (php_slot->state != PNV_PHP_STATE_POPULATED)
+ return 0;
+
+ /* Remove all devices behind the slot */
+ pci_lock_rescan_remove();
+ pci_hp_remove_devices(php_slot->bus);
+ pci_unlock_rescan_remove();
+
+ /* Detach the child hotpluggable slots */
+ pnv_php_unregister(php_slot->dn);
+
+ /* Notify firmware and remove device nodes */
+ ret = pnv_php_set_slot_power_state(slot, OPAL_PCI_SLOT_POWER_OFF);
+
+ php_slot->state = PNV_PHP_STATE_REGISTERED;
+ return ret;
+}
+
+static struct hotplug_slot_ops php_slot_ops = {
+ .get_power_status = pnv_php_get_power_state,
+ .get_adapter_status = pnv_php_get_adapter_state,
+ .set_attention_status = pnv_php_set_attention_state,
+ .enable_slot = pnv_php_enable_slot,
+ .disable_slot = pnv_php_disable_slot,
+};
+
+static void pnv_php_release(struct hotplug_slot *slot)
+{
+ struct pnv_php_slot *php_slot = slot->private;
+ unsigned long flags;
+
+ /* Remove from global or child list */
+ spin_lock_irqsave(&pnv_php_lock, flags);
+ list_del(&php_slot->link);
+ spin_unlock_irqrestore(&pnv_php_lock, flags);
+
+ /* Detach from parent */
+ pnv_php_put_slot(php_slot);
+ pnv_php_put_slot(php_slot->parent);
+}
+
+static struct pnv_php_slot *pnv_php_alloc_slot(struct device_node *dn)
+{
+ struct pnv_php_slot *php_slot;
+ struct pci_bus *bus;
+ const char *label;
+ uint64_t id;
+
+ label = of_get_property(dn, "ibm,slot-label", NULL);
+ if (unlikely(!label))
+ return NULL;
+
+ if (pnv_pci_get_slot_id(dn, &id))
+ return NULL;
+
+ bus = pci_find_bus_by_node(dn);
+ if (unlikely(!bus))
+ return NULL;
+
+ php_slot = kzalloc(sizeof(*php_slot), GFP_KERNEL);
+ if (unlikely(!php_slot))
+ return NULL;
+
+ php_slot->name = kstrdup(label, GFP_KERNEL);
+ if (unlikely(!php_slot->name)) {
+ kfree(php_slot);
+ return NULL;
+ }
+
+ if (likely(dn->child && PCI_DN(dn->child)))
+ php_slot->slot_no = PCI_SLOT(PCI_DN(dn->child)->devfn);
+ else
+ php_slot->slot_no = -1; /* Placeholder slot */
+
+ kref_init(&php_slot->kref);
+ php_slot->state = PNV_PHP_STATE_INITIALIZED;
+ php_slot->dn = dn;
+ php_slot->pdev = bus->self;
+ php_slot->bus = bus;
+ php_slot->id = id;
+ php_slot->power_state_check = false;
+ php_slot->slot.ops = &php_slot_ops;
+ php_slot->slot.info = &php_slot->slot_info;
+ php_slot->slot.release = pnv_php_release;
+ php_slot->slot.private = php_slot;
+
+ INIT_LIST_HEAD(&php_slot->children);
+ INIT_LIST_HEAD(&php_slot->link);
+
+ return php_slot;
+}
+
+static int pnv_php_register_slot(struct pnv_php_slot *php_slot)
+{
+ struct pnv_php_slot *parent;
+ struct device_node *dn = php_slot->dn;
+ unsigned long flags;
+ int ret;
+
+ /* Check if the slot is registered or not */
+ parent = pnv_php_find_slot(php_slot->dn);
+ if (unlikely(parent)) {
+ pnv_php_put_slot(parent);
+ return -EEXIST;
+ }
+
+ /* Register PCI slot */
+ ret = pci_hp_register(&php_slot->slot, php_slot->bus,
+ php_slot->slot_no, php_slot->name);
+ if (unlikely(ret)) {
+ dev_warn(&php_slot->pdev->dev, "Error %d registering slot\n",
+ ret);
+ return ret;
+ }
+
+ /* Attach to the parent's child list or global list */
+ while ((dn = of_get_parent(dn))) {
+ if (!PCI_DN(dn)) {
+ of_node_put(dn);
+ break;
+ }
+
+ parent = pnv_php_find_slot(dn);
+ if (parent) {
+ of_node_put(dn);
+ break;
+ }
+
+ of_node_put(dn);
+ }
+
+ spin_lock_irqsave(&pnv_php_lock, flags);
+ php_slot->parent = parent;
+ if (parent)
+ list_add_tail(&php_slot->link, &parent->children);
+ else
+ list_add_tail(&php_slot->link, &pnv_php_slot_list);
+ spin_unlock_irqrestore(&pnv_php_lock, flags);
+
+ php_slot->state = PNV_PHP_STATE_REGISTERED;
+ return 0;
+}
+
+static int pnv_php_register_one(struct device_node *dn)
+{
+ struct pnv_php_slot *php_slot;
+ const __be32 *prop32;
+ int ret;
+
+ /* Check if it's hotpluggable slot */
+ prop32 = of_get_property(dn, "ibm,slot-pluggable", NULL);
+ if (!prop32 || !of_read_number(prop32, 1))
+ return -ENXIO;
+
+ prop32 = of_get_property(dn, "ibm,reset-by-firmware", NULL);
+ if (!prop32 || !of_read_number(prop32, 1))
+ return -ENXIO;
+
+ php_slot = pnv_php_alloc_slot(dn);
+ if (unlikely(!php_slot))
+ return -ENODEV;
+
+ ret = pnv_php_register_slot(php_slot);
+ if (unlikely(ret))
+ goto free_slot;
+
+ ret = pnv_php_enable(php_slot, false);
+ if (unlikely(ret))
+ goto unregister_slot;
+
+ return 0;
+
+unregister_slot:
+ pnv_php_unregister_one(php_slot->dn);
+free_slot:
+ pnv_php_put_slot(php_slot);
+ return ret;
+}
+
+static void pnv_php_register(struct device_node *dn)
+{
+ struct device_node *child;
+
+ /*
+ * The parent slots should be registered before their
+ * child slots.
+ */
+ for_each_child_of_node(dn, child) {
+ pnv_php_register_one(child);
+ pnv_php_register(child);
+ }
+}
+
+static void pnv_php_unregister_one(struct device_node *dn)
+{
+ struct pnv_php_slot *php_slot;
+
+ php_slot = pnv_php_find_slot(dn);
+ if (!php_slot)
+ return;
+
+ php_slot->state = PNV_PHP_STATE_OFFLINE;
+ pnv_php_put_slot(php_slot);
+ pci_hp_deregister(&php_slot->slot);
+}
+
+static void pnv_php_unregister(struct device_node *dn)
+{
+ struct device_node *child;
+
+ /* The child slots should go before their parent slots */
+ for_each_child_of_node(dn, child) {
+ pnv_php_unregister(child);
+ pnv_php_unregister_one(child);
+ }
+}
+
+static int __init pnv_php_init(void)
+{
+ struct device_node *dn;
+
+ pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
+ for_each_compatible_node(dn, NULL, "ibm,ioda2-phb")
+ pnv_php_register(dn);
+
+ return 0;
+}
+
+static void __exit pnv_php_exit(void)
+{
+ struct device_node *dn;
+
+ for_each_compatible_node(dn, NULL, "ibm,ioda2-phb")
+ pnv_php_unregister(dn);
+}
+
+module_init(pnv_php_init);
+module_exit(pnv_php_exit);
+
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/pci/hotplug/rpaphp_slot.c b/drivers/pci/hotplug/rpaphp_slot.c
index 6937c725b..388c4d8fc 100644
--- a/drivers/pci/hotplug/rpaphp_slot.c
+++ b/drivers/pci/hotplug/rpaphp_slot.c
@@ -117,8 +117,10 @@ EXPORT_SYMBOL_GPL(rpaphp_deregister_slot);
int rpaphp_register_slot(struct slot *slot)
{
struct hotplug_slot *php_slot = slot->hotplug_slot;
+ struct device_node *child;
+ u32 my_index;
int retval;
- int slotno;
+ int slotno = -1;
dbg("%s registering slot:path[%s] index[%x], name[%s] pdomain[%x] type[%d]\n",
__func__, slot->dn->full_name, slot->index, slot->name,
@@ -130,10 +132,15 @@ int rpaphp_register_slot(struct slot *slot)
return -EAGAIN;
}
- if (slot->dn->child)
- slotno = PCI_SLOT(PCI_DN(slot->dn->child)->devfn);
- else
- slotno = -1;
+ for_each_child_of_node(slot->dn, child) {
+ retval = of_property_read_u32(child, "ibm,my-drc-index", &my_index);
+ if (my_index == slot->index) {
+ slotno = PCI_SLOT(PCI_DN(child)->devfn);
+ of_node_put(child);
+ break;
+ }
+ }
+
retval = pci_hp_register(php_slot, slot->bus, slotno, slot->name);
if (retval) {
err("pci_hp_register failed with error %d\n", retval);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 565e2a4e6..98f12223c 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -4,6 +4,7 @@
*
* Copyright (C) 2003-2004 Intel
* Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
+ * Copyright (C) 2016 Christoph Hellwig.
*/
#include <linux/err.h>
@@ -207,6 +208,12 @@ static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
desc->masked = __pci_msi_desc_mask_irq(desc, mask, flag);
}
+static void __iomem *pci_msix_desc_addr(struct msi_desc *desc)
+{
+ return desc->mask_base +
+ desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
+}
+
/*
* This internal function does not flush PCI writes to the device.
* All users must ensure that they read from the device before either
@@ -217,8 +224,6 @@ static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag)
{
u32 mask_bits = desc->masked;
- unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
- PCI_MSIX_ENTRY_VECTOR_CTRL;
if (pci_msi_ignore_mask)
return 0;
@@ -226,7 +231,7 @@ u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag)
mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
if (flag)
mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
- writel(mask_bits, desc->mask_base + offset);
+ writel(mask_bits, pci_msix_desc_addr(desc) + PCI_MSIX_ENTRY_VECTOR_CTRL);
return mask_bits;
}
@@ -284,8 +289,7 @@ void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
BUG_ON(dev->current_state != PCI_D0);
if (entry->msi_attrib.is_msix) {
- void __iomem *base = entry->mask_base +
- entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
+ void __iomem *base = pci_msix_desc_addr(entry);
msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR);
msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR);
@@ -315,9 +319,7 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
if (dev->current_state != PCI_D0) {
/* Don't touch the hardware now */
} else if (entry->msi_attrib.is_msix) {
- void __iomem *base;
- base = entry->mask_base +
- entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
+ void __iomem *base = pci_msix_desc_addr(entry);
writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR);
writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR);
@@ -567,6 +569,7 @@ static struct msi_desc *msi_setup_entry(struct pci_dev *dev, int nvec)
entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1;
entry->msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec));
entry->nvec_used = nvec;
+ entry->affinity = dev->irq_affinity;
if (control & PCI_MSI_FLAGS_64BIT)
entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64;
@@ -678,10 +681,18 @@ static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries)
static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
struct msix_entry *entries, int nvec)
{
+ const struct cpumask *mask = NULL;
struct msi_desc *entry;
- int i;
+ int cpu = -1, i;
for (i = 0; i < nvec; i++) {
+ if (dev->irq_affinity) {
+ cpu = cpumask_next(cpu, dev->irq_affinity);
+ if (cpu >= nr_cpu_ids)
+ cpu = cpumask_first(dev->irq_affinity);
+ mask = cpumask_of(cpu);
+ }
+
entry = alloc_msi_entry(&dev->dev);
if (!entry) {
if (!i)
@@ -694,10 +705,14 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
entry->msi_attrib.is_msix = 1;
entry->msi_attrib.is_64 = 1;
- entry->msi_attrib.entry_nr = entries[i].entry;
+ if (entries)
+ entry->msi_attrib.entry_nr = entries[i].entry;
+ else
+ entry->msi_attrib.entry_nr = i;
entry->msi_attrib.default_irq = dev->irq;
entry->mask_base = base;
entry->nvec_used = 1;
+ entry->affinity = mask;
list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
}
@@ -712,13 +727,11 @@ static void msix_program_entries(struct pci_dev *dev,
int i = 0;
for_each_pci_msi_entry(entry, dev) {
- int offset = entries[i].entry * PCI_MSIX_ENTRY_SIZE +
- PCI_MSIX_ENTRY_VECTOR_CTRL;
-
- entries[i].vector = entry->irq;
- entry->masked = readl(entry->mask_base + offset);
+ if (entries)
+ entries[i++].vector = entry->irq;
+ entry->masked = readl(pci_msix_desc_addr(entry) +
+ PCI_MSIX_ENTRY_VECTOR_CTRL);
msix_mask_irq(entry, 1);
- i++;
}
}
@@ -931,7 +944,7 @@ EXPORT_SYMBOL(pci_msix_vec_count);
/**
* pci_enable_msix - configure device's MSI-X capability structure
* @dev: pointer to the pci_dev data structure of MSI-X device function
- * @entries: pointer to an array of MSI-X entries
+ * @entries: pointer to an array of MSI-X entries (optional)
* @nvec: number of MSI-X irqs requested for allocation by device driver
*
* Setup the MSI-X capability structure of device function with the number
@@ -951,22 +964,21 @@ int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
if (!pci_msi_supported(dev, nvec))
return -EINVAL;
- if (!entries)
- return -EINVAL;
-
nr_entries = pci_msix_vec_count(dev);
if (nr_entries < 0)
return nr_entries;
if (nvec > nr_entries)
return nr_entries;
- /* Check for any invalid entries */
- for (i = 0; i < nvec; i++) {
- if (entries[i].entry >= nr_entries)
- return -EINVAL; /* invalid entry */
- for (j = i + 1; j < nvec; j++) {
- if (entries[i].entry == entries[j].entry)
- return -EINVAL; /* duplicate entry */
+ if (entries) {
+ /* Check for any invalid entries */
+ for (i = 0; i < nvec; i++) {
+ if (entries[i].entry >= nr_entries)
+ return -EINVAL; /* invalid entry */
+ for (j = i + 1; j < nvec; j++) {
+ if (entries[i].entry == entries[j].entry)
+ return -EINVAL; /* duplicate entry */
+ }
}
}
WARN_ON(!!dev->msix_enabled);
@@ -1026,19 +1038,8 @@ int pci_msi_enabled(void)
}
EXPORT_SYMBOL(pci_msi_enabled);
-/**
- * pci_enable_msi_range - configure device's MSI capability structure
- * @dev: device to configure
- * @minvec: minimal number of interrupts to configure
- * @maxvec: maximum number of interrupts to configure
- *
- * This function tries to allocate a maximum possible number of interrupts in a
- * range between @minvec and @maxvec. It returns a negative errno if an error
- * occurs. If it succeeds, it returns the actual number of interrupts allocated
- * and updates the @dev's irq member to the lowest new interrupt number;
- * the other interrupt numbers allocated to this device are consecutive.
- **/
-int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
+static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
+ unsigned int flags)
{
int nvec;
int rc;
@@ -1061,25 +1062,85 @@ int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
nvec = pci_msi_vec_count(dev);
if (nvec < 0)
return nvec;
- else if (nvec < minvec)
+ if (nvec < minvec)
return -EINVAL;
- else if (nvec > maxvec)
+
+ if (nvec > maxvec)
nvec = maxvec;
- do {
+ for (;;) {
+ if (flags & PCI_IRQ_AFFINITY) {
+ dev->irq_affinity = irq_create_affinity_mask(&nvec);
+ if (nvec < minvec)
+ return -ENOSPC;
+ }
+
rc = msi_capability_init(dev, nvec);
- if (rc < 0) {
+ if (rc == 0)
+ return nvec;
+
+ kfree(dev->irq_affinity);
+ dev->irq_affinity = NULL;
+
+ if (rc < 0)
return rc;
- } else if (rc > 0) {
- if (rc < minvec)
+ if (rc < minvec)
+ return -ENOSPC;
+
+ nvec = rc;
+ }
+}
+
+/**
+ * pci_enable_msi_range - configure device's MSI capability structure
+ * @dev: device to configure
+ * @minvec: minimal number of interrupts to configure
+ * @maxvec: maximum number of interrupts to configure
+ *
+ * This function tries to allocate a maximum possible number of interrupts in a
+ * range between @minvec and @maxvec. It returns a negative errno if an error
+ * occurs. If it succeeds, it returns the actual number of interrupts allocated
+ * and updates the @dev's irq member to the lowest new interrupt number;
+ * the other interrupt numbers allocated to this device are consecutive.
+ **/
+int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
+{
+ return __pci_enable_msi_range(dev, minvec, maxvec, 0);
+}
+EXPORT_SYMBOL(pci_enable_msi_range);
+
+static int __pci_enable_msix_range(struct pci_dev *dev,
+ struct msix_entry *entries, int minvec, int maxvec,
+ unsigned int flags)
+{
+ int nvec = maxvec;
+ int rc;
+
+ if (maxvec < minvec)
+ return -ERANGE;
+
+ for (;;) {
+ if (flags & PCI_IRQ_AFFINITY) {
+ dev->irq_affinity = irq_create_affinity_mask(&nvec);
+ if (nvec < minvec)
return -ENOSPC;
- nvec = rc;
}
- } while (rc);
- return nvec;
+ rc = pci_enable_msix(dev, entries, nvec);
+ if (rc == 0)
+ return nvec;
+
+ kfree(dev->irq_affinity);
+ dev->irq_affinity = NULL;
+
+ if (rc < 0)
+ return rc;
+ if (rc < minvec)
+ return -ENOSPC;
+
+ nvec = rc;
+ }
}
-EXPORT_SYMBOL(pci_enable_msi_range);
/**
* pci_enable_msix_range - configure device's MSI-X capability structure
@@ -1097,28 +1158,103 @@ EXPORT_SYMBOL(pci_enable_msi_range);
* with new allocated MSI-X interrupts.
**/
int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
- int minvec, int maxvec)
+ int minvec, int maxvec)
{
- int nvec = maxvec;
- int rc;
+ return __pci_enable_msix_range(dev, entries, minvec, maxvec, 0);
+}
+EXPORT_SYMBOL(pci_enable_msix_range);
- if (maxvec < minvec)
- return -ERANGE;
+/**
+ * pci_alloc_irq_vectors - allocate multiple IRQs for a device
+ * @dev: PCI device to operate on
+ * @min_vecs: minimum number of vectors required (must be >= 1)
+ * @max_vecs: maximum (desired) number of vectors
+ * @flags: flags or quirks for the allocation
+ *
+ * Allocate up to @max_vecs interrupt vectors for @dev, using MSI-X or MSI
+ * vectors if available, and fall back to a single legacy vector
+ * if neither is available. Return the number of vectors allocated,
+ * (which might be smaller than @max_vecs) if successful, or a negative
+ * error code on error. If less than @min_vecs interrupt vectors are
+ * available for @dev the function will fail with -ENOSPC.
+ *
+ * To get the Linux IRQ number used for a vector that can be passed to
+ * request_irq() use the pci_irq_vector() helper.
+ */
+int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
+ unsigned int max_vecs, unsigned int flags)
+{
+ int vecs = -ENOSPC;
- do {
- rc = pci_enable_msix(dev, entries, nvec);
- if (rc < 0) {
- return rc;
- } else if (rc > 0) {
- if (rc < minvec)
- return -ENOSPC;
- nvec = rc;
+ if (flags & PCI_IRQ_MSIX) {
+ vecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs,
+ flags);
+ if (vecs > 0)
+ return vecs;
+ }
+
+ if (flags & PCI_IRQ_MSI) {
+ vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, flags);
+ if (vecs > 0)
+ return vecs;
+ }
+
+ /* use legacy irq if allowed */
+ if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1) {
+ pci_intx(dev, 1);
+ return 1;
+ }
+
+ return vecs;
+}
+EXPORT_SYMBOL(pci_alloc_irq_vectors);
+
+/**
+ * pci_free_irq_vectors - free previously allocated IRQs for a device
+ * @dev: PCI device to operate on
+ *
+ * Undoes the allocations and enabling in pci_alloc_irq_vectors().
+ */
+void pci_free_irq_vectors(struct pci_dev *dev)
+{
+ pci_disable_msix(dev);
+ pci_disable_msi(dev);
+}
+EXPORT_SYMBOL(pci_free_irq_vectors);
+
+/**
+ * pci_irq_vector - return Linux IRQ number of a device vector
+ * @dev: PCI device to operate on
+ * @nr: device-relative interrupt vector index (0-based).
+ */
+int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
+{
+ if (dev->msix_enabled) {
+ struct msi_desc *entry;
+ int i = 0;
+
+ for_each_pci_msi_entry(entry, dev) {
+ if (i == nr)
+ return entry->irq;
+ i++;
}
- } while (rc);
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
- return nvec;
+ if (dev->msi_enabled) {
+ struct msi_desc *entry = first_pci_msi_entry(dev);
+
+ if (WARN_ON_ONCE(nr >= entry->nvec_used))
+ return -EINVAL;
+ } else {
+ if (WARN_ON_ONCE(nr > 0))
+ return -EINVAL;
+ }
+
+ return dev->irq + nr;
}
-EXPORT_SYMBOL(pci_enable_msix_range);
+EXPORT_SYMBOL(pci_irq_vector);
struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
{
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index d7ffd6681..e39a67c8e 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -777,7 +777,7 @@ static int pci_pm_suspend_noirq(struct device *dev)
if (!pci_dev->state_saved) {
pci_save_state(pci_dev);
- if (!pci_has_subordinate(pci_dev))
+ if (pci_power_manageable(pci_dev))
pci_prepare_to_sleep(pci_dev);
}
@@ -1144,7 +1144,6 @@ static int pci_pm_runtime_suspend(struct device *dev)
return -ENOSYS;
pci_dev->state_saved = false;
- pci_dev->no_d3cold = false;
error = pm->runtime_suspend(dev);
if (error) {
/*
@@ -1161,8 +1160,6 @@ static int pci_pm_runtime_suspend(struct device *dev)
return error;
}
- if (!pci_dev->d3cold_allowed)
- pci_dev->no_d3cold = true;
pci_fixup_device(pci_fixup_suspend, pci_dev);
diff --git a/drivers/pci/pci-mid.c b/drivers/pci/pci-mid.c
new file mode 100644
index 000000000..55f453de5
--- /dev/null
+++ b/drivers/pci/pci-mid.c
@@ -0,0 +1,82 @@
+/*
+ * Intel MID platform PM support
+ *
+ * Copyright (C) 2016, Intel Corporation
+ *
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/pci.h>
+
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+#include <asm/intel-mid.h>
+
+#include "pci.h"
+
+static bool mid_pci_power_manageable(struct pci_dev *dev)
+{
+ return true;
+}
+
+static int mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state)
+{
+ return intel_mid_pci_set_power_state(pdev, state);
+}
+
+static pci_power_t mid_pci_choose_state(struct pci_dev *pdev)
+{
+ return PCI_D3hot;
+}
+
+static int mid_pci_sleep_wake(struct pci_dev *dev, bool enable)
+{
+ return 0;
+}
+
+static int mid_pci_run_wake(struct pci_dev *dev, bool enable)
+{
+ return 0;
+}
+
+static bool mid_pci_need_resume(struct pci_dev *dev)
+{
+ return false;
+}
+
+static struct pci_platform_pm_ops mid_pci_platform_pm = {
+ .is_manageable = mid_pci_power_manageable,
+ .set_state = mid_pci_set_power_state,
+ .choose_state = mid_pci_choose_state,
+ .sleep_wake = mid_pci_sleep_wake,
+ .run_wake = mid_pci_run_wake,
+ .need_resume = mid_pci_need_resume,
+};
+
+#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
+
+/*
+ * This table should be in sync with the one in
+ * arch/x86/platform/intel-mid/pwr.c.
+ */
+static const struct x86_cpu_id lpss_cpu_ids[] = {
+ ICPU(INTEL_FAM6_ATOM_PENWELL),
+ ICPU(INTEL_FAM6_ATOM_MERRIFIELD),
+ {}
+};
+
+static int __init mid_pci_init(void)
+{
+ const struct x86_cpu_id *id;
+
+ id = x86_match_cpu(lpss_cpu_ids);
+ if (id)
+ pci_set_platform_pm(&mid_pci_platform_pm);
+ return 0;
+}
+arch_initcall(mid_pci_init);
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index d319a9ca9..bcd10c795 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -406,6 +406,11 @@ static ssize_t d3cold_allowed_store(struct device *dev,
return -EINVAL;
pdev->d3cold_allowed = !!val;
+ if (pdev->d3cold_allowed)
+ pci_d3cold_enable(pdev);
+ else
+ pci_d3cold_disable(pdev);
+
pm_runtime_resume(dev);
return count;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index c8b4dbdd1..aab9d5115 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -7,8 +7,10 @@
* Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
*/
+#include <linux/acpi.h>
#include <linux/kernel.h>
#include <linux/delay.h>
+#include <linux/dmi.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_pci.h>
@@ -25,7 +27,9 @@
#include <linux/device.h>
#include <linux/pm_runtime.h>
#include <linux/pci_hotplug.h>
+#include <linux/vmalloc.h>
#include <asm/setup.h>
+#include <asm/dma.h>
#include <linux/aer.h>
#include "pci.h"
@@ -81,6 +85,9 @@ unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
+#define DEFAULT_HOTPLUG_BUS_SIZE 1
+unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
+
enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
/*
@@ -101,6 +108,21 @@ unsigned int pcibios_max_latency = 255;
/* If set, the PCIe ARI capability will not be used. */
static bool pcie_ari_disabled;
+/* Disable bridge_d3 for all PCIe ports */
+static bool pci_bridge_d3_disable;
+/* Force bridge_d3 for all PCIe ports */
+static bool pci_bridge_d3_force;
+
+static int __init pcie_port_pm_setup(char *str)
+{
+ if (!strcmp(str, "off"))
+ pci_bridge_d3_disable = true;
+ else if (!strcmp(str, "force"))
+ pci_bridge_d3_force = true;
+ return 1;
+}
+__setup("pcie_port_pm=", pcie_port_pm_setup);
+
/**
* pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
* @bus: pointer to PCI bus structure to search
@@ -530,8 +552,8 @@ static const struct pci_platform_pm_ops *pci_platform_pm;
int pci_set_platform_pm(const struct pci_platform_pm_ops *ops)
{
- if (!ops->is_manageable || !ops->set_state || !ops->choose_state
- || !ops->sleep_wake)
+ if (!ops->is_manageable || !ops->set_state || !ops->choose_state ||
+ !ops->sleep_wake || !ops->run_wake || !ops->need_resume)
return -EINVAL;
pci_platform_pm = ops;
return 0;
@@ -2156,6 +2178,164 @@ void pci_config_pm_runtime_put(struct pci_dev *pdev)
}
/**
+ * pci_bridge_d3_possible - Is it possible to put the bridge into D3
+ * @bridge: Bridge to check
+ *
+ * This function checks if it is possible to move the bridge to D3.
+ * Currently we only allow D3 for recent enough PCIe ports.
+ */
+static bool pci_bridge_d3_possible(struct pci_dev *bridge)
+{
+ unsigned int year;
+
+ if (!pci_is_pcie(bridge))
+ return false;
+
+ switch (pci_pcie_type(bridge)) {
+ case PCI_EXP_TYPE_ROOT_PORT:
+ case PCI_EXP_TYPE_UPSTREAM:
+ case PCI_EXP_TYPE_DOWNSTREAM:
+ if (pci_bridge_d3_disable)
+ return false;
+ if (pci_bridge_d3_force)
+ return true;
+
+ /*
+ * It should be safe to put PCIe ports from 2015 or newer
+ * to D3.
+ */
+ if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) &&
+ year >= 2015) {
+ return true;
+ }
+ break;
+ }
+
+ return false;
+}
+
+static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
+{
+ bool *d3cold_ok = data;
+ bool no_d3cold;
+
+ /*
+ * The device needs to be allowed to go D3cold and if it is wake
+ * capable to do so from D3cold.
+ */
+ no_d3cold = dev->no_d3cold || !dev->d3cold_allowed ||
+ (device_may_wakeup(&dev->dev) && !pci_pme_capable(dev, PCI_D3cold)) ||
+ !pci_power_manageable(dev);
+
+ *d3cold_ok = !no_d3cold;
+
+ return no_d3cold;
+}
+
+/*
+ * pci_bridge_d3_update - Update bridge D3 capabilities
+ * @dev: PCI device which is changed
+ * @remove: Is the device being removed
+ *
+ * Update upstream bridge PM capabilities accordingly depending on if the
+ * device PM configuration was changed or the device is being removed. The
+ * change is also propagated upstream.
+ */
+static void pci_bridge_d3_update(struct pci_dev *dev, bool remove)
+{
+ struct pci_dev *bridge;
+ bool d3cold_ok = true;
+
+ bridge = pci_upstream_bridge(dev);
+ if (!bridge || !pci_bridge_d3_possible(bridge))
+ return;
+
+ pci_dev_get(bridge);
+ /*
+ * If the device is removed we do not care about its D3cold
+ * capabilities.
+ */
+ if (!remove)
+ pci_dev_check_d3cold(dev, &d3cold_ok);
+
+ if (d3cold_ok) {
+ /*
+ * We need to go through all children to find out if all of
+ * them can still go to D3cold.
+ */
+ pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
+ &d3cold_ok);
+ }
+
+ if (bridge->bridge_d3 != d3cold_ok) {
+ bridge->bridge_d3 = d3cold_ok;
+ /* Propagate change to upstream bridges */
+ pci_bridge_d3_update(bridge, false);
+ }
+
+ pci_dev_put(bridge);
+}
+
+/**
+ * pci_bridge_d3_device_changed - Update bridge D3 capabilities on change
+ * @dev: PCI device that was changed
+ *
+ * If a device is added or its PM configuration, such as is it allowed to
+ * enter D3cold, is changed this function updates upstream bridge PM
+ * capabilities accordingly.
+ */
+void pci_bridge_d3_device_changed(struct pci_dev *dev)
+{
+ pci_bridge_d3_update(dev, false);
+}
+
+/**
+ * pci_bridge_d3_device_removed - Update bridge D3 capabilities on remove
+ * @dev: PCI device being removed
+ *
+ * Function updates upstream bridge PM capabilities based on other devices
+ * still left on the bus.
+ */
+void pci_bridge_d3_device_removed(struct pci_dev *dev)
+{
+ pci_bridge_d3_update(dev, true);
+}
+
+/**
+ * pci_d3cold_enable - Enable D3cold for device
+ * @dev: PCI device to handle
+ *
+ * This function can be used in drivers to enable D3cold from the device
+ * they handle. It also updates upstream PCI bridge PM capabilities
+ * accordingly.
+ */
+void pci_d3cold_enable(struct pci_dev *dev)
+{
+ if (dev->no_d3cold) {
+ dev->no_d3cold = false;
+ pci_bridge_d3_device_changed(dev);
+ }
+}
+EXPORT_SYMBOL_GPL(pci_d3cold_enable);
+
+/**
+ * pci_d3cold_disable - Disable D3cold for device
+ * @dev: PCI device to handle
+ *
+ * This function can be used in drivers to disable D3cold from the device
+ * they handle. It also updates upstream PCI bridge PM capabilities
+ * accordingly.
+ */
+void pci_d3cold_disable(struct pci_dev *dev)
+{
+ if (!dev->no_d3cold) {
+ dev->no_d3cold = true;
+ pci_bridge_d3_device_changed(dev);
+ }
+}
+EXPORT_SYMBOL_GPL(pci_d3cold_disable);
+
+/**
* pci_pm_init - Initialize PM functions of given PCI device
* @dev: PCI device to handle.
*/
@@ -2189,6 +2369,7 @@ void pci_pm_init(struct pci_dev *dev)
dev->pm_cap = pm;
dev->d3_delay = PCI_PM_D3_WAIT;
dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
+ dev->bridge_d3 = pci_bridge_d3_possible(dev);
dev->d3cold_allowed = true;
dev->d1_support = false;
@@ -3165,6 +3346,23 @@ int __weak pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
#endif
}
+/**
+ * pci_unmap_iospace - Unmap the memory mapped I/O space
+ * @res: resource to be unmapped
+ *
+ * Unmap the CPU virtual address @res from virtual address space.
+ * Only architectures that have memory mapped IO functions defined
+ * (and the PCI_IOBASE value defined) should call this function.
+ */
+void pci_unmap_iospace(struct resource *res)
+{
+#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
+ unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
+
+ unmap_kernel_range(vaddr, resource_size(res));
+#endif
+}
+
static void __pci_set_master(struct pci_dev *dev, bool enable)
{
u16 old_cmd, cmd;
@@ -4755,6 +4953,7 @@ static DEFINE_SPINLOCK(resource_alignment_lock);
static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
{
int seg, bus, slot, func, align_order, count;
+ unsigned short vendor, device, subsystem_vendor, subsystem_device;
resource_size_t align = 0;
char *p;
@@ -4768,28 +4967,55 @@ static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
} else {
align_order = -1;
}
- if (sscanf(p, "%x:%x:%x.%x%n",
- &seg, &bus, &slot, &func, &count) != 4) {
- seg = 0;
- if (sscanf(p, "%x:%x.%x%n",
- &bus, &slot, &func, &count) != 3) {
- /* Invalid format */
- printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
- p);
+ if (strncmp(p, "pci:", 4) == 0) {
+ /* PCI vendor/device (subvendor/subdevice) ids are specified */
+ p += 4;
+ if (sscanf(p, "%hx:%hx:%hx:%hx%n",
+ &vendor, &device, &subsystem_vendor, &subsystem_device, &count) != 4) {
+ if (sscanf(p, "%hx:%hx%n", &vendor, &device, &count) != 2) {
+ printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: pci:%s\n",
+ p);
+ break;
+ }
+ subsystem_vendor = subsystem_device = 0;
+ }
+ p += count;
+ if ((!vendor || (vendor == dev->vendor)) &&
+ (!device || (device == dev->device)) &&
+ (!subsystem_vendor || (subsystem_vendor == dev->subsystem_vendor)) &&
+ (!subsystem_device || (subsystem_device == dev->subsystem_device))) {
+ if (align_order == -1)
+ align = PAGE_SIZE;
+ else
+ align = 1 << align_order;
+ /* Found */
break;
}
}
- p += count;
- if (seg == pci_domain_nr(dev->bus) &&
- bus == dev->bus->number &&
- slot == PCI_SLOT(dev->devfn) &&
- func == PCI_FUNC(dev->devfn)) {
- if (align_order == -1)
- align = PAGE_SIZE;
- else
- align = 1 << align_order;
- /* Found */
- break;
+ else {
+ if (sscanf(p, "%x:%x:%x.%x%n",
+ &seg, &bus, &slot, &func, &count) != 4) {
+ seg = 0;
+ if (sscanf(p, "%x:%x.%x%n",
+ &bus, &slot, &func, &count) != 3) {
+ /* Invalid format */
+ printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
+ p);
+ break;
+ }
+ }
+ p += count;
+ if (seg == pci_domain_nr(dev->bus) &&
+ bus == dev->bus->number &&
+ slot == PCI_SLOT(dev->devfn) &&
+ func == PCI_FUNC(dev->devfn)) {
+ if (align_order == -1)
+ align = PAGE_SIZE;
+ else
+ align = 1 << align_order;
+ /* Found */
+ break;
+ }
}
if (*p != ';' && *p != ',') {
/* End of param or invalid format */
@@ -4897,7 +5123,7 @@ static ssize_t pci_resource_alignment_store(struct bus_type *bus,
return pci_set_resource_alignment_param(buf, count);
}
-BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
+static BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
pci_resource_alignment_store);
static int __init pci_resource_alignment_sysfs_init(void)
@@ -4923,7 +5149,7 @@ int pci_get_new_domain_nr(void)
}
#ifdef CONFIG_PCI_DOMAINS_GENERIC
-void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent)
+static int of_pci_bus_find_domain_nr(struct device *parent)
{
static int use_dt_domains = -1;
int domain = -1;
@@ -4967,7 +5193,13 @@ void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent)
domain = -1;
}
- bus->domain_nr = domain;
+ return domain;
+}
+
+int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
+{
+ return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
+ acpi_pci_bus_find_domain_nr(bus);
}
#endif
#endif
@@ -5021,6 +5253,11 @@ static int __init pci_setup(char *str)
pci_hotplug_io_size = memparse(str + 9, &str);
} else if (!strncmp(str, "hpmemsize=", 10)) {
pci_hotplug_mem_size = memparse(str + 10, &str);
+ } else if (!strncmp(str, "hpbussize=", 10)) {
+ pci_hotplug_bus_size =
+ simple_strtoul(str + 10, &str, 0);
+ if (pci_hotplug_bus_size > 0xff)
+ pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
} else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
pcie_bus_config = PCIE_BUS_TUNE_OFF;
} else if (!strncmp(str, "pcie_bus_safe", 13)) {
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index a814bbb80..9730c474b 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -82,6 +82,8 @@ void pci_pm_init(struct pci_dev *dev);
void pci_ea_init(struct pci_dev *dev);
void pci_allocate_cap_save_buffers(struct pci_dev *dev);
void pci_free_cap_save_buffers(struct pci_dev *dev);
+void pci_bridge_d3_device_changed(struct pci_dev *dev);
+void pci_bridge_d3_device_removed(struct pci_dev *dev);
static inline void pci_wakeup_event(struct pci_dev *dev)
{
@@ -94,6 +96,15 @@ static inline bool pci_has_subordinate(struct pci_dev *pci_dev)
return !!(pci_dev->subordinate);
}
+static inline bool pci_power_manageable(struct pci_dev *pci_dev)
+{
+ /*
+ * Currently we allow normal PCI devices and PCI bridges transition
+ * into D3 if their bridge_d3 is set.
+ */
+ return !pci_has_subordinate(pci_dev) || pci_dev->bridge_d3;
+}
+
struct pci_vpd_ops {
ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 22ca6412b..7fcea75af 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -83,7 +83,7 @@ config PCIE_PME
depends on PCIEPORTBUS && PM
config PCIE_DPC
- tristate "PCIe Downstream Port Containment support"
+ bool "PCIe Downstream Port Containment support"
depends on PCIEPORTBUS
default n
help
@@ -92,6 +92,3 @@ config PCIE_DPC
will be handled by the DPC driver. If your system doesn't
have this capability or you do not want to use this feature,
it is safe to answer N.
-
- To compile this driver as a module, choose M here: the module
- will be called pcie-dpc.
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 2dfe7fdb7..0ec649d96 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -139,7 +139,7 @@ static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable)
static void pcie_set_clkpm(struct pcie_link_state *link, int enable)
{
/* Don't enable Clock PM if the link is not Clock PM capable */
- if (!link->clkpm_capable && enable)
+ if (!link->clkpm_capable)
enable = 0;
/* Need nothing if the specified equals to current state */
if (link->clkpm_enabled == enable)
diff --git a/drivers/pci/pcie/pcie-dpc.c b/drivers/pci/pcie/pcie-dpc.c
index ab552f1bc..250f87861 100644
--- a/drivers/pci/pcie/pcie-dpc.c
+++ b/drivers/pci/pcie/pcie-dpc.c
@@ -15,8 +15,8 @@
struct dpc_dev {
struct pcie_device *dev;
- struct work_struct work;
- int cap_pos;
+ struct work_struct work;
+ int cap_pos;
};
static void dpc_wait_link_inactive(struct pci_dev *pdev)
@@ -89,7 +89,7 @@ static int dpc_probe(struct pcie_device *dev)
int status;
u16 ctl, cap;
- dpc = kzalloc(sizeof(*dpc), GFP_KERNEL);
+ dpc = devm_kzalloc(&dev->device, sizeof(*dpc), GFP_KERNEL);
if (!dpc)
return -ENOMEM;
@@ -98,11 +98,12 @@ static int dpc_probe(struct pcie_device *dev)
INIT_WORK(&dpc->work, interrupt_event_handler);
set_service_data(dev, dpc);
- status = request_irq(dev->irq, dpc_irq, IRQF_SHARED, "pcie-dpc", dpc);
+ status = devm_request_irq(&dev->device, dev->irq, dpc_irq, IRQF_SHARED,
+ "pcie-dpc", dpc);
if (status) {
dev_warn(&dev->device, "request IRQ%d failed: %d\n", dev->irq,
status);
- goto out;
+ return status;
}
pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CAP, &cap);
@@ -117,9 +118,6 @@ static int dpc_probe(struct pcie_device *dev)
FLAG(cap, PCI_EXP_DPC_CAP_SW_TRIGGER), (cap >> 8) & 0xf,
FLAG(cap, PCI_EXP_DPC_CAP_DL_ACTIVE));
return status;
- out:
- kfree(dpc);
- return status;
}
static void dpc_remove(struct pcie_device *dev)
@@ -131,14 +129,11 @@ static void dpc_remove(struct pcie_device *dev)
pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, &ctl);
ctl &= ~(PCI_EXP_DPC_CTL_EN_NONFATAL | PCI_EXP_DPC_CTL_INT_EN);
pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl);
-
- free_irq(dev->irq, dpc);
- kfree(dpc);
}
static struct pcie_port_service_driver dpcdriver = {
.name = "dpc",
- .port_type = PCI_EXP_TYPE_ROOT_PORT | PCI_EXP_TYPE_DOWNSTREAM,
+ .port_type = PCIE_ANY_PORT,
.service = PCIE_PORT_SERVICE_DPC,
.probe = dpc_probe,
.remove = dpc_remove,
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 32d4d0a3d..e9270b402 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/pcieport_if.h>
@@ -342,6 +343,8 @@ static int pcie_device_init(struct pci_dev *pdev, int service, int irq)
return retval;
}
+ pm_runtime_no_callbacks(device);
+
return 0;
}
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index be35da2e1..70d7ad8c6 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -93,6 +93,26 @@ static int pcie_port_resume_noirq(struct device *dev)
return 0;
}
+static int pcie_port_runtime_suspend(struct device *dev)
+{
+ return to_pci_dev(dev)->bridge_d3 ? 0 : -EBUSY;
+}
+
+static int pcie_port_runtime_resume(struct device *dev)
+{
+ return 0;
+}
+
+static int pcie_port_runtime_idle(struct device *dev)
+{
+ /*
+ * Assume the PCI core has set bridge_d3 whenever it thinks the port
+ * should be good to go to D3. Everything else, including moving
+ * the port to D3, is handled by the PCI core.
+ */
+ return to_pci_dev(dev)->bridge_d3 ? 0 : -EBUSY;
+}
+
static const struct dev_pm_ops pcie_portdrv_pm_ops = {
.suspend = pcie_port_device_suspend,
.resume = pcie_port_device_resume,
@@ -101,6 +121,9 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = {
.poweroff = pcie_port_device_suspend,
.restore = pcie_port_device_resume,
.resume_noirq = pcie_port_resume_noirq,
+ .runtime_suspend = pcie_port_runtime_suspend,
+ .runtime_resume = pcie_port_runtime_resume,
+ .runtime_idle = pcie_port_runtime_idle,
};
#define PCIE_PORTDRV_PM_OPS (&pcie_portdrv_pm_ops)
@@ -134,16 +157,39 @@ static int pcie_portdrv_probe(struct pci_dev *dev,
return status;
pci_save_state(dev);
+
/*
- * D3cold may not work properly on some PCIe port, so disable
- * it by default.
+ * Prevent runtime PM if the port is advertising support for PCIe
+ * hotplug. Otherwise the BIOS hotplug SMI code might not be able
+ * to enumerate devices behind this port properly (the port is
+ * powered down preventing all config space accesses to the
+ * subordinate devices). We can't be sure for native PCIe hotplug
+ * either so prevent that as well.
*/
- dev->d3cold_allowed = false;
+ if (!dev->is_hotplug_bridge) {
+ /*
+ * Keep the port resumed 100ms to make sure things like
+ * config space accesses from userspace (lspci) will not
+ * cause the port to repeatedly suspend and resume.
+ */
+ pm_runtime_set_autosuspend_delay(&dev->dev, 100);
+ pm_runtime_use_autosuspend(&dev->dev);
+ pm_runtime_mark_last_busy(&dev->dev);
+ pm_runtime_put_autosuspend(&dev->dev);
+ pm_runtime_allow(&dev->dev);
+ }
+
return 0;
}
static void pcie_portdrv_remove(struct pci_dev *dev)
{
+ if (!dev->is_hotplug_bridge) {
+ pm_runtime_forbid(&dev->dev);
+ pm_runtime_get_noresume(&dev->dev);
+ pm_runtime_dont_use_autosuspend(&dev->dev);
+ }
+
pcie_port_device_remove(dev);
}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 8e3ef7209..93f280df3 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -16,6 +16,7 @@
#include <linux/aer.h>
#include <linux/acpi.h>
#include <linux/irqdomain.h>
+#include <linux/pm_runtime.h>
#include "pci.h"
#define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
@@ -832,6 +833,12 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
u8 primary, secondary, subordinate;
int broken = 0;
+ /*
+ * Make sure the bridge is powered on to be able to access config
+ * space of devices below it.
+ */
+ pm_runtime_get_sync(&dev->dev);
+
pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
primary = buses & 0xFF;
secondary = (buses >> 8) & 0xFF;
@@ -1012,6 +1019,8 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
out:
pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
+ pm_runtime_put(&dev->dev);
+
return max;
}
EXPORT_SYMBOL(pci_scan_bridge);
@@ -2077,6 +2086,15 @@ unsigned int pci_scan_child_bus(struct pci_bus *bus)
}
/*
+ * Make sure a hotplug bridge has at least the minimum requested
+ * number of buses.
+ */
+ if (bus->self && bus->self->is_hotplug_bridge && pci_hotplug_bus_size) {
+ if (max - bus->busn_res.start < pci_hotplug_bus_size - 1)
+ max = bus->busn_res.start + pci_hotplug_bus_size - 1;
+ }
+
+ /*
* We've scanned the bus and so we know all about what's on
* the other side of any bridges that may be on this bus plus
* any devices.
@@ -2127,7 +2145,9 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
b->sysdata = sysdata;
b->ops = ops;
b->number = b->busn_res.start = bus;
- pci_bus_assign_domain_nr(b, parent);
+#ifdef CONFIG_PCI_DOMAINS_GENERIC
+ b->domain_nr = pci_bus_find_domain_nr(b, parent);
+#endif
b2 = pci_find_bus(pci_domain_nr(b), bus);
if (b2) {
/* If we already got to this bus through a different bridge, ignore it */
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index 3f155e785..2408abe4e 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -231,7 +231,7 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
{
struct pci_dev *dev = PDE_DATA(file_inode(file));
struct pci_filp_private *fpriv = file->private_data;
- int i, ret;
+ int i, ret, write_combine;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
@@ -245,9 +245,12 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
if (i >= PCI_ROM_RESOURCE)
return -ENODEV;
+ if (fpriv->mmap_state == pci_mmap_mem)
+ write_combine = fpriv->write_combine;
+ else
+ write_combine = 0;
ret = pci_mmap_page_range(dev, vma,
- fpriv->mmap_state,
- fpriv->write_combine);
+ fpriv->mmap_state, write_combine);
if (ret < 0)
return ret;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 7902fbf47..44e0ff374 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3327,9 +3327,9 @@ static void quirk_apple_wait_for_thunderbolt(struct pci_dev *dev)
if (nhi->vendor != PCI_VENDOR_ID_INTEL
|| (nhi->device != PCI_DEVICE_ID_INTEL_LIGHT_RIDGE &&
nhi->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C &&
+ nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI &&
nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI)
- || nhi->subsystem_vendor != 0x2222
- || nhi->subsystem_device != 0x1111)
+ || nhi->class != PCI_CLASS_SYSTEM_OTHER << 8)
goto out;
dev_info(&dev->dev, "quirk: waiting for thunderbolt to reestablish PCI tunnels...\n");
device_pm_wait_for_dev(&dev->dev, &nhi->dev);
@@ -3344,6 +3344,9 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
quirk_apple_wait_for_thunderbolt);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE,
+ quirk_apple_wait_for_thunderbolt);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE,
quirk_apple_wait_for_thunderbolt);
#endif
@@ -3713,6 +3716,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172,
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c59 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x917a,
quirk_dma_func1_alias);
+/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c78 */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9182,
+ quirk_dma_func1_alias);
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c46 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0,
quirk_dma_func1_alias);
@@ -3749,6 +3755,9 @@ static const struct pci_device_id fixed_dma_alias_tbl[] = {
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x0285,
PCI_VENDOR_ID_ADAPTEC2, 0x02bb), /* Adaptec 3405 */
.driver_data = PCI_DEVFN(1, 0) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x0285,
+ PCI_VENDOR_ID_ADAPTEC2, 0x02bc), /* Adaptec 3805 */
+ .driver_data = PCI_DEVFN(1, 0) },
{ 0 }
};
@@ -4089,6 +4098,7 @@ static const struct pci_dev_acs_enabled {
{ PCI_VENDOR_ID_AMD, 0x7809, pci_quirk_amd_sb_acs },
{ PCI_VENDOR_ID_SOLARFLARE, 0x0903, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_SOLARFLARE, 0x0923, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_SOLARFLARE, 0x0A03, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x10C6, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x10DB, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x10DD, pci_quirk_mf_endpoint_acs },
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 898202663..f9357e09e 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -40,6 +40,7 @@ static void pci_destroy_dev(struct pci_dev *dev)
list_del(&dev->bus_list);
up_write(&pci_bus_sem);
+ pci_bridge_d3_device_removed(dev);
pci_free_resources(dev);
put_device(&dev->dev);
}
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 55641a39a..c74059e10 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -695,11 +695,16 @@ static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type)
pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
}
+void __weak pcibios_setup_bridge(struct pci_bus *bus, unsigned long type)
+{
+}
+
void pci_setup_bridge(struct pci_bus *bus)
{
unsigned long type = IORESOURCE_IO | IORESOURCE_MEM |
IORESOURCE_PREFETCH;
+ pcibios_setup_bridge(bus, type);
__pci_setup_bridge(bus, type);
}
@@ -1423,6 +1428,74 @@ void pci_bus_assign_resources(const struct pci_bus *bus)
}
EXPORT_SYMBOL(pci_bus_assign_resources);
+static void pci_claim_device_resources(struct pci_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
+ struct resource *r = &dev->resource[i];
+
+ if (!r->flags || r->parent)
+ continue;
+
+ pci_claim_resource(dev, i);
+ }
+}
+
+static void pci_claim_bridge_resources(struct pci_dev *dev)
+{
+ int i;
+
+ for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
+ struct resource *r = &dev->resource[i];
+
+ if (!r->flags || r->parent)
+ continue;
+
+ pci_claim_bridge_resource(dev, i);
+ }
+}
+
+static void pci_bus_allocate_dev_resources(struct pci_bus *b)
+{
+ struct pci_dev *dev;
+ struct pci_bus *child;
+
+ list_for_each_entry(dev, &b->devices, bus_list) {
+ pci_claim_device_resources(dev);
+
+ child = dev->subordinate;
+ if (child)
+ pci_bus_allocate_dev_resources(child);
+ }
+}
+
+static void pci_bus_allocate_resources(struct pci_bus *b)
+{
+ struct pci_bus *child;
+
+ /*
+ * Carry out a depth-first search on the PCI bus
+ * tree to allocate bridge apertures. Read the
+ * programmed bridge bases and recursively claim
+ * the respective bridge resources.
+ */
+ if (b->self) {
+ pci_read_bridge_bases(b);
+ pci_claim_bridge_resources(b->self);
+ }
+
+ list_for_each_entry(child, &b->children, node)
+ pci_bus_allocate_resources(child);
+}
+
+void pci_bus_claim_resources(struct pci_bus *b)
+{
+ pci_bus_allocate_resources(b);
+ pci_bus_allocate_dev_resources(b);
+}
+EXPORT_SYMBOL(pci_bus_claim_resources);
+
static void __pci_bridge_assign_resources(const struct pci_dev *bridge,
struct list_head *add_head,
struct list_head *fail_head)
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index 5f70fee59..d6ff5e823 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -1086,7 +1086,7 @@ out:
return err;
}
-static void __init_refok pcifront_backend_changed(struct xenbus_device *xdev,
+static void __ref pcifront_backend_changed(struct xenbus_device *xdev,
enum xenbus_state be_state)
{
struct pcifront_device *pdev = dev_get_drvdata(&xdev->dev);
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 489ea1098..69b5e811e 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -977,7 +977,7 @@ static int pcmcia_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
/************************ runtime PM support ***************************/
-static int pcmcia_dev_suspend(struct device *dev, pm_message_t state);
+static int pcmcia_dev_suspend(struct device *dev);
static int pcmcia_dev_resume(struct device *dev);
static int runtime_suspend(struct device *dev)
@@ -985,7 +985,7 @@ static int runtime_suspend(struct device *dev)
int rc;
device_lock(dev);
- rc = pcmcia_dev_suspend(dev, PMSG_SUSPEND);
+ rc = pcmcia_dev_suspend(dev);
device_unlock(dev);
return rc;
}
@@ -1135,7 +1135,7 @@ ATTRIBUTE_GROUPS(pcmcia_dev);
/* PM support, also needed for reset */
-static int pcmcia_dev_suspend(struct device *dev, pm_message_t state)
+static int pcmcia_dev_suspend(struct device *dev)
{
struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
struct pcmcia_driver *p_drv = NULL;
@@ -1410,6 +1410,9 @@ static struct class_interface pcmcia_bus_interface __refdata = {
.remove_dev = &pcmcia_bus_remove_socket,
};
+static const struct dev_pm_ops pcmcia_bus_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pcmcia_dev_suspend, pcmcia_dev_resume)
+};
struct bus_type pcmcia_bus_type = {
.name = "pcmcia",
@@ -1418,8 +1421,7 @@ struct bus_type pcmcia_bus_type = {
.dev_groups = pcmcia_dev_groups,
.probe = pcmcia_device_probe,
.remove = pcmcia_device_remove,
- .suspend = pcmcia_dev_suspend,
- .resume = pcmcia_dev_resume,
+ .pm = &pcmcia_bus_pm_ops,
};
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c
index 483f919e0..91b5f5724 100644
--- a/drivers/pcmcia/pxa2xx_base.c
+++ b/drivers/pcmcia/pxa2xx_base.c
@@ -214,9 +214,8 @@ pxa2xx_pcmcia_frequency_change(struct soc_pcmcia_socket *skt,
}
#endif
-void pxa2xx_configure_sockets(struct device *dev)
+void pxa2xx_configure_sockets(struct device *dev, struct pcmcia_low_level *ops)
{
- struct pcmcia_low_level *ops = dev->platform_data;
/*
* We have at least one socket, so set MECR:CIT
* (Card Is There)
@@ -322,7 +321,7 @@ static int pxa2xx_drv_pcmcia_probe(struct platform_device *dev)
goto err1;
}
- pxa2xx_configure_sockets(&dev->dev);
+ pxa2xx_configure_sockets(&dev->dev, ops);
dev_set_drvdata(&dev->dev, sinfo);
return 0;
@@ -348,7 +347,9 @@ static int pxa2xx_drv_pcmcia_remove(struct platform_device *dev)
static int pxa2xx_drv_pcmcia_resume(struct device *dev)
{
- pxa2xx_configure_sockets(dev);
+ struct pcmcia_low_level *ops = (struct pcmcia_low_level *)dev->platform_data;
+
+ pxa2xx_configure_sockets(dev, ops);
return 0;
}
diff --git a/drivers/pcmcia/pxa2xx_base.h b/drivers/pcmcia/pxa2xx_base.h
index b609b4546..e58c7a415 100644
--- a/drivers/pcmcia/pxa2xx_base.h
+++ b/drivers/pcmcia/pxa2xx_base.h
@@ -1,4 +1,4 @@
int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt);
void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops);
-void pxa2xx_configure_sockets(struct device *dev);
+void pxa2xx_configure_sockets(struct device *dev, struct pcmcia_low_level *ops);
diff --git a/drivers/pcmcia/sa1111_badge4.c b/drivers/pcmcia/sa1111_badge4.c
index 12f0dd091..2f4909304 100644
--- a/drivers/pcmcia/sa1111_badge4.c
+++ b/drivers/pcmcia/sa1111_badge4.c
@@ -134,20 +134,14 @@ static struct pcmcia_low_level badge4_pcmcia_ops = {
int pcmcia_badge4_init(struct sa1111_dev *dev)
{
- int ret = -ENODEV;
-
- if (machine_is_badge4()) {
- printk(KERN_INFO
- "%s: badge4_pcmvcc=%d, badge4_pcmvpp=%d, badge4_cfvcc=%d\n",
- __func__,
- badge4_pcmvcc, badge4_pcmvpp, badge4_cfvcc);
-
- sa11xx_drv_pcmcia_ops(&badge4_pcmcia_ops);
- ret = sa1111_pcmcia_add(dev, &badge4_pcmcia_ops,
- sa11xx_drv_pcmcia_add_one);
- }
-
- return ret;
+ printk(KERN_INFO
+ "%s: badge4_pcmvcc=%d, badge4_pcmvpp=%d, badge4_cfvcc=%d\n",
+ __func__,
+ badge4_pcmvcc, badge4_pcmvpp, badge4_cfvcc);
+
+ sa11xx_drv_pcmcia_ops(&badge4_pcmcia_ops);
+ return sa1111_pcmcia_add(dev, &badge4_pcmcia_ops,
+ sa11xx_drv_pcmcia_add_one);
}
static int __init pcmv_setup(char *s)
diff --git a/drivers/pcmcia/sa1111_generic.c b/drivers/pcmcia/sa1111_generic.c
index a1531feb8..3d95dffcf 100644
--- a/drivers/pcmcia/sa1111_generic.c
+++ b/drivers/pcmcia/sa1111_generic.c
@@ -18,6 +18,7 @@
#include <mach/hardware.h>
#include <asm/hardware/sa1111.h>
+#include <asm/mach-types.h>
#include <asm/irq.h>
#include "sa1111_generic.h"
@@ -203,19 +204,30 @@ static int pcmcia_probe(struct sa1111_dev *dev)
sa1111_writel(PCSSR_S0_SLEEP | PCSSR_S1_SLEEP, base + PCSSR);
sa1111_writel(PCCR_S0_FLT | PCCR_S1_FLT, base + PCCR);
+ ret = -ENODEV;
#ifdef CONFIG_SA1100_BADGE4
- pcmcia_badge4_init(dev);
+ if (machine_is_badge4())
+ ret = pcmcia_badge4_init(dev);
#endif
#ifdef CONFIG_SA1100_JORNADA720
- pcmcia_jornada720_init(dev);
+ if (machine_is_jornada720())
+ ret = pcmcia_jornada720_init(dev);
#endif
#ifdef CONFIG_ARCH_LUBBOCK
- pcmcia_lubbock_init(dev);
+ if (machine_is_lubbock())
+ ret = pcmcia_lubbock_init(dev);
#endif
#ifdef CONFIG_ASSABET_NEPONSET
- pcmcia_neponset_init(dev);
+ if (machine_is_assabet())
+ ret = pcmcia_neponset_init(dev);
#endif
- return 0;
+
+ if (ret) {
+ release_mem_region(dev->res.start, 512);
+ sa1111_disable_device(dev);
+ }
+
+ return ret;
}
static int pcmcia_remove(struct sa1111_dev *dev)
diff --git a/drivers/pcmcia/sa1111_jornada720.c b/drivers/pcmcia/sa1111_jornada720.c
index c2c30580c..480a3ede2 100644
--- a/drivers/pcmcia/sa1111_jornada720.c
+++ b/drivers/pcmcia/sa1111_jornada720.c
@@ -94,22 +94,17 @@ static struct pcmcia_low_level jornada720_pcmcia_ops = {
int pcmcia_jornada720_init(struct sa1111_dev *sadev)
{
- int ret = -ENODEV;
+ unsigned int pin = GPIO_A0 | GPIO_A1 | GPIO_A2 | GPIO_A3;
- if (machine_is_jornada720()) {
- unsigned int pin = GPIO_A0 | GPIO_A1 | GPIO_A2 | GPIO_A3;
+ /* Fixme: why messing around with SA11x0's GPIO1? */
+ GRER |= 0x00000002;
- GRER |= 0x00000002;
+ /* Set GPIO_A<3:1> to be outputs for PCMCIA/CF power controller: */
+ sa1111_set_io_dir(sadev, pin, 0, 0);
+ sa1111_set_io(sadev, pin, 0);
+ sa1111_set_sleep_io(sadev, pin, 0);
- /* Set GPIO_A<3:1> to be outputs for PCMCIA/CF power controller: */
- sa1111_set_io_dir(sadev, pin, 0, 0);
- sa1111_set_io(sadev, pin, 0);
- sa1111_set_sleep_io(sadev, pin, 0);
-
- sa11xx_drv_pcmcia_ops(&jornada720_pcmcia_ops);
- ret = sa1111_pcmcia_add(sadev, &jornada720_pcmcia_ops,
- sa11xx_drv_pcmcia_add_one);
- }
-
- return ret;
+ sa11xx_drv_pcmcia_ops(&jornada720_pcmcia_ops);
+ return sa1111_pcmcia_add(sadev, &jornada720_pcmcia_ops,
+ sa11xx_drv_pcmcia_add_one);
}
diff --git a/drivers/pcmcia/sa1111_lubbock.c b/drivers/pcmcia/sa1111_lubbock.c
index c5caf5790..e741f499c 100644
--- a/drivers/pcmcia/sa1111_lubbock.c
+++ b/drivers/pcmcia/sa1111_lubbock.c
@@ -210,27 +210,21 @@ static struct pcmcia_low_level lubbock_pcmcia_ops = {
int pcmcia_lubbock_init(struct sa1111_dev *sadev)
{
- int ret = -ENODEV;
-
- if (machine_is_lubbock()) {
- /*
- * Set GPIO_A<3:0> to be outputs for the MAX1600,
- * and switch to standby mode.
- */
- sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0);
- sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
- sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
-
- /* Set CF Socket 1 power to standby mode. */
- lubbock_set_misc_wr((1 << 15) | (1 << 14), 0);
+ /*
+ * Set GPIO_A<3:0> to be outputs for the MAX1600,
+ * and switch to standby mode.
+ */
+ sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0);
+ sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
+ sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
- pxa2xx_drv_pcmcia_ops(&lubbock_pcmcia_ops);
- pxa2xx_configure_sockets(&sadev->dev);
- ret = sa1111_pcmcia_add(sadev, &lubbock_pcmcia_ops,
- pxa2xx_drv_pcmcia_add_one);
- }
+ /* Set CF Socket 1 power to standby mode. */
+ lubbock_set_misc_wr((1 << 15) | (1 << 14), 0);
- return ret;
+ pxa2xx_drv_pcmcia_ops(&lubbock_pcmcia_ops);
+ pxa2xx_configure_sockets(&sadev->dev, &lubbock_pcmcia_ops);
+ return sa1111_pcmcia_add(sadev, &lubbock_pcmcia_ops,
+ pxa2xx_drv_pcmcia_add_one);
}
MODULE_LICENSE("GPL");
diff --git a/drivers/pcmcia/sa1111_neponset.c b/drivers/pcmcia/sa1111_neponset.c
index 1d78739c4..019c395eb 100644
--- a/drivers/pcmcia/sa1111_neponset.c
+++ b/drivers/pcmcia/sa1111_neponset.c
@@ -110,20 +110,14 @@ static struct pcmcia_low_level neponset_pcmcia_ops = {
int pcmcia_neponset_init(struct sa1111_dev *sadev)
{
- int ret = -ENODEV;
-
- if (machine_is_assabet()) {
- /*
- * Set GPIO_A<3:0> to be outputs for the MAX1600,
- * and switch to standby mode.
- */
- sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0);
- sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
- sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
- sa11xx_drv_pcmcia_ops(&neponset_pcmcia_ops);
- ret = sa1111_pcmcia_add(sadev, &neponset_pcmcia_ops,
- sa11xx_drv_pcmcia_add_one);
- }
-
- return ret;
+ /*
+ * Set GPIO_A<3:0> to be outputs for the MAX1600,
+ * and switch to standby mode.
+ */
+ sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0);
+ sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
+ sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
+ sa11xx_drv_pcmcia_ops(&neponset_pcmcia_ops);
+ return sa1111_pcmcia_add(sadev, &neponset_pcmcia_ops,
+ sa11xx_drv_pcmcia_add_one);
}
diff --git a/drivers/pcmcia/sa11xx_base.c b/drivers/pcmcia/sa11xx_base.c
index 9f6ec87b9..48140ac73 100644
--- a/drivers/pcmcia/sa11xx_base.c
+++ b/drivers/pcmcia/sa11xx_base.c
@@ -144,19 +144,19 @@ static int
sa1100_pcmcia_show_timing(struct soc_pcmcia_socket *skt, char *buf)
{
struct soc_pcmcia_timing timing;
- unsigned int clock = clk_get_rate(skt->clk);
+ unsigned int clock = clk_get_rate(skt->clk) / 1000;
unsigned long mecr = MECR;
char *p = buf;
soc_common_pcmcia_get_timing(skt, &timing);
- p+=sprintf(p, "I/O : %u (%u)\n", timing.io,
+ p+=sprintf(p, "I/O : %uns (%uns)\n", timing.io,
sa1100_pcmcia_cmd_time(clock, MECR_BSIO_GET(mecr, skt->nr)));
- p+=sprintf(p, "attribute: %u (%u)\n", timing.attr,
+ p+=sprintf(p, "attribute: %uns (%uns)\n", timing.attr,
sa1100_pcmcia_cmd_time(clock, MECR_BSA_GET(mecr, skt->nr)));
- p+=sprintf(p, "common : %u (%u)\n", timing.mem,
+ p+=sprintf(p, "common : %uns (%uns)\n", timing.mem,
sa1100_pcmcia_cmd_time(clock, MECR_BSM_GET(mecr, skt->nr)));
return p - buf;
diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c
index eed5e9c05..d5ca760c4 100644
--- a/drivers/pcmcia/soc_common.c
+++ b/drivers/pcmcia/soc_common.c
@@ -235,7 +235,7 @@ static unsigned int soc_common_pcmcia_skt_state(struct soc_pcmcia_socket *skt)
stat |= skt->cs_state.Vcc ? SS_POWERON : 0;
if (skt->cs_state.flags & SS_IOCARD)
- stat |= state.bvd1 ? SS_STSCHG : 0;
+ stat |= state.bvd1 ? 0 : SS_STSCHG;
else {
if (state.bvd1 == 0)
stat |= SS_BATDEAD;
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 140436a04..f5e1008a2 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -603,7 +603,8 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
irq = platform_get_irq(pmu_device, 0);
if (irq >= 0 && irq_is_percpu(irq)) {
- on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1);
+ on_each_cpu_mask(&cpu_pmu->supported_cpus,
+ cpu_pmu_disable_percpu_irq, &irq, 1);
free_percpu_irq(irq, &hw_events->percpu_pmu);
} else {
for (i = 0; i < irqs; ++i) {
@@ -645,7 +646,9 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
irq);
return err;
}
- on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1);
+
+ on_each_cpu_mask(&cpu_pmu->supported_cpus,
+ cpu_pmu_enable_percpu_irq, &irq, 1);
} else {
for (i = 0; i < irqs; ++i) {
int cpu = i;
@@ -685,30 +688,29 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
return 0;
}
+static DEFINE_SPINLOCK(arm_pmu_lock);
+static LIST_HEAD(arm_pmu_list);
+
/*
* PMU hardware loses all context when a CPU goes offline.
* When a CPU is hotplugged back in, since some hardware registers are
* UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
* junk values out of them.
*/
-static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
- void *hcpu)
+static int arm_perf_starting_cpu(unsigned int cpu)
{
- int cpu = (unsigned long)hcpu;
- struct arm_pmu *pmu = container_of(b, struct arm_pmu, hotplug_nb);
-
- if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
- return NOTIFY_DONE;
+ struct arm_pmu *pmu;
- if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
- return NOTIFY_DONE;
+ spin_lock(&arm_pmu_lock);
+ list_for_each_entry(pmu, &arm_pmu_list, entry) {
- if (pmu->reset)
- pmu->reset(pmu);
- else
- return NOTIFY_DONE;
-
- return NOTIFY_OK;
+ if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
+ continue;
+ if (pmu->reset)
+ pmu->reset(pmu);
+ }
+ spin_unlock(&arm_pmu_lock);
+ return 0;
}
#ifdef CONFIG_CPU_PM
@@ -819,10 +821,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
if (!cpu_hw_events)
return -ENOMEM;
- cpu_pmu->hotplug_nb.notifier_call = cpu_pmu_notify;
- err = register_cpu_notifier(&cpu_pmu->hotplug_nb);
- if (err)
- goto out_hw_events;
+ spin_lock(&arm_pmu_lock);
+ list_add_tail(&cpu_pmu->entry, &arm_pmu_list);
+ spin_unlock(&arm_pmu_lock);
err = cpu_pm_pmu_register(cpu_pmu);
if (err)
@@ -858,8 +859,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
return 0;
out_unregister:
- unregister_cpu_notifier(&cpu_pmu->hotplug_nb);
-out_hw_events:
+ spin_lock(&arm_pmu_lock);
+ list_del(&cpu_pmu->entry);
+ spin_unlock(&arm_pmu_lock);
free_percpu(cpu_hw_events);
return err;
}
@@ -867,7 +869,9 @@ out_hw_events:
static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
{
cpu_pm_pmu_unregister(cpu_pmu);
- unregister_cpu_notifier(&cpu_pmu->hotplug_nb);
+ spin_lock(&arm_pmu_lock);
+ list_del(&cpu_pmu->entry);
+ spin_unlock(&arm_pmu_lock);
free_percpu(cpu_pmu->hw_events);
}
@@ -921,6 +925,7 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
if (i > 0 && spi != using_spi) {
pr_err("PPI/SPI IRQ type mismatch for %s!\n",
dn->name);
+ of_node_put(dn);
kfree(irqs);
return -EINVAL;
}
@@ -961,9 +966,24 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
i++;
} while (1);
- /* If we didn't manage to parse anything, claim to support all CPUs */
- if (cpumask_weight(&pmu->supported_cpus) == 0)
- cpumask_setall(&pmu->supported_cpus);
+ /* If we didn't manage to parse anything, try the interrupt affinity */
+ if (cpumask_weight(&pmu->supported_cpus) == 0) {
+ int irq = platform_get_irq(pdev, 0);
+
+ if (irq >= 0 && irq_is_percpu(irq)) {
+ /* If using PPIs, check the affinity of the partition */
+ int ret;
+
+ ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus);
+ if (ret) {
+ kfree(irqs);
+ return ret;
+ }
+ } else {
+ /* Otherwise default to all CPUs */
+ cpumask_setall(&pmu->supported_cpus);
+ }
+ }
/* If we matched up the IRQ affinities, use them to route the SPIs */
if (using_spi && i == pdev->num_resources)
@@ -1044,3 +1064,17 @@ out_free:
kfree(pmu);
return ret;
}
+
+static int arm_pmu_hp_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_STARTING,
+ "AP_PERF_ARM_STARTING",
+ arm_perf_starting_cpu, NULL);
+ if (ret)
+ pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n",
+ ret);
+ return ret;
+}
+subsys_initcall(arm_pmu_hp_init);
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index b869b9883..19bff3a10 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -44,6 +44,16 @@ config ARMADA375_USBCLUSTER_PHY
depends on OF && HAS_IOMEM
select GENERIC_PHY
+config PHY_DA8XX_USB
+ tristate "TI DA8xx USB PHY Driver"
+ depends on ARCH_DAVINCI_DA8XX
+ select GENERIC_PHY
+ select MFD_SYSCON
+ help
+ Enable this to support the USB PHY on DA8xx SoCs.
+
+ This driver controls both the USB 1.1 PHY and the USB 2.0 PHY.
+
config PHY_DM816X_USB
tristate "TI dm816x USB PHY driver"
depends on ARCH_OMAP2PLUS
@@ -176,6 +186,7 @@ config TWL4030_USB
tristate "TWL4030 USB Transceiver Driver"
depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS
depends on USB_SUPPORT
+ depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't 'y'
select GENERIC_PHY
select USB_PHY
help
@@ -434,4 +445,12 @@ config PHY_CYGNUS_PCIE
source "drivers/phy/tegra/Kconfig"
+config PHY_NS2_PCIE
+ tristate "Broadcom Northstar2 PCIe PHY driver"
+ depends on OF && MDIO_BUS_MUX_BCM_IPROC
+ select GENERIC_PHY
+ default ARCH_BCM_IPROC
+ help
+ Enable this to support the Broadcom Northstar2 PCIe PHY.
+ If unsure, say N.
endmenu
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index 9c3e73cca..90ae19879 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_GENERIC_PHY) += phy-core.o
obj-$(CONFIG_PHY_BCM_NS_USB2) += phy-bcm-ns-usb2.o
obj-$(CONFIG_PHY_BERLIN_USB) += phy-berlin-usb.o
obj-$(CONFIG_PHY_BERLIN_SATA) += phy-berlin-sata.o
+obj-$(CONFIG_PHY_DA8XX_USB) += phy-da8xx-usb.o
obj-$(CONFIG_PHY_DM816X_USB) += phy-dm816x-usb.o
obj-$(CONFIG_ARMADA375_USBCLUSTER_PHY) += phy-armada375-usb2.o
obj-$(CONFIG_BCM_KONA_USB2_PHY) += phy-bcm-kona-usb2.o
@@ -53,5 +54,5 @@ obj-$(CONFIG_PHY_TUSB1210) += phy-tusb1210.o
obj-$(CONFIG_PHY_BRCM_SATA) += phy-brcm-sata.o
obj-$(CONFIG_PHY_PISTACHIO_USB) += phy-pistachio-usb.o
obj-$(CONFIG_PHY_CYGNUS_PCIE) += phy-bcm-cygnus-pcie.o
-
obj-$(CONFIG_ARCH_TEGRA) += tegra/
+obj-$(CONFIG_PHY_NS2_PCIE) += phy-bcm-ns2-pcie.o
diff --git a/drivers/phy/phy-bcm-ns2-pcie.c b/drivers/phy/phy-bcm-ns2-pcie.c
new file mode 100644
index 000000000..9513f7ab1
--- /dev/null
+++ b/drivers/phy/phy-bcm-ns2-pcie.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of_mdio.h>
+#include <linux/mdio.h>
+#include <linux/phy.h>
+#include <linux/phy/phy.h>
+
+struct ns2_pci_phy {
+ struct mdio_device *mdiodev;
+ struct phy *phy;
+};
+
+#define BLK_ADDR_REG_OFFSET 0x1f
+#define PLL_AFE1_100MHZ_BLK 0x2100
+#define PLL_CLK_AMP_OFFSET 0x03
+#define PLL_CLK_AMP_2P05V 0x2b18
+
+static int ns2_pci_phy_init(struct phy *p)
+{
+ struct ns2_pci_phy *phy = phy_get_drvdata(p);
+ int rc;
+
+ /* select the AFE 100MHz block page */
+ rc = mdiobus_write(phy->mdiodev->bus, phy->mdiodev->addr,
+ BLK_ADDR_REG_OFFSET, PLL_AFE1_100MHZ_BLK);
+ if (rc)
+ goto err;
+
+ /* set the 100 MHz reference clock amplitude to 2.05 v */
+ rc = mdiobus_write(phy->mdiodev->bus, phy->mdiodev->addr,
+ PLL_CLK_AMP_OFFSET, PLL_CLK_AMP_2P05V);
+ if (rc)
+ goto err;
+
+ return 0;
+
+err:
+ dev_err(&phy->mdiodev->dev, "Error %d writing to phy\n", rc);
+ return rc;
+}
+
+static struct phy_ops ns2_pci_phy_ops = {
+ .init = ns2_pci_phy_init,
+};
+
+static int ns2_pci_phy_probe(struct mdio_device *mdiodev)
+{
+ struct device *dev = &mdiodev->dev;
+ struct phy_provider *provider;
+ struct ns2_pci_phy *p;
+ struct phy *phy;
+
+ phy = devm_phy_create(dev, dev->of_node, &ns2_pci_phy_ops);
+ if (IS_ERR(phy)) {
+ dev_err(dev, "failed to create Phy\n");
+ return PTR_ERR(phy);
+ }
+
+ p = devm_kmalloc(dev, sizeof(struct ns2_pci_phy),
+ GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ p->mdiodev = mdiodev;
+ dev_set_drvdata(dev, p);
+
+ p->phy = phy;
+ phy_set_drvdata(phy, p);
+
+ provider = devm_of_phy_provider_register(&phy->dev,
+ of_phy_simple_xlate);
+ if (IS_ERR(provider)) {
+ dev_err(dev, "failed to register Phy provider\n");
+ return PTR_ERR(provider);
+ }
+
+ dev_info(dev, "%s PHY registered\n", dev_name(dev));
+
+ return 0;
+}
+
+static const struct of_device_id ns2_pci_phy_of_match[] = {
+ { .compatible = "brcm,ns2-pcie-phy", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, ns2_pci_phy_of_match);
+
+static struct mdio_driver ns2_pci_phy_driver = {
+ .mdiodrv = {
+ .driver = {
+ .name = "phy-bcm-ns2-pci",
+ .of_match_table = ns2_pci_phy_of_match,
+ },
+ },
+ .probe = ns2_pci_phy_probe,
+};
+mdio_module_driver(ns2_pci_phy_driver);
+
+MODULE_AUTHOR("Broadcom");
+MODULE_DESCRIPTION("Broadcom Northstar2 PCI Phy driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:phy-bcm-ns2-pci");
diff --git a/drivers/phy/phy-brcm-sata.c b/drivers/phy/phy-brcm-sata.c
index 6c4c5cb79..8ffc44afd 100644
--- a/drivers/phy/phy-brcm-sata.c
+++ b/drivers/phy/phy-brcm-sata.c
@@ -45,6 +45,7 @@ enum brcm_sata_phy_version {
BRCM_SATA_PHY_STB_28NM,
BRCM_SATA_PHY_STB_40NM,
BRCM_SATA_PHY_IPROC_NS2,
+ BRCM_SATA_PHY_IPROC_NSP,
};
struct brcm_sata_port {
@@ -73,6 +74,13 @@ enum sata_phy_regs {
PLL_REG_BANK_0 = 0x050,
PLL_REG_BANK_0_PLLCONTROL_0 = 0x81,
+ PLLCONTROL_0_FREQ_DET_RESTART = BIT(13),
+ PLLCONTROL_0_FREQ_MONITOR = BIT(12),
+ PLLCONTROL_0_SEQ_START = BIT(15),
+ PLL_CAP_CONTROL = 0x85,
+ PLL_ACTRL2 = 0x8b,
+ PLL_ACTRL2_SELDIV_MASK = 0x1f,
+ PLL_ACTRL2_SELDIV_SHIFT = 9,
PLL1_REG_BANK = 0x060,
PLL1_ACTRL2 = 0x82,
@@ -80,6 +88,7 @@ enum sata_phy_regs {
PLL1_ACTRL4 = 0x84,
OOB_REG_BANK = 0x150,
+ OOB1_REG_BANK = 0x160,
OOB_CTRL1 = 0x80,
OOB_CTRL1_BURST_MAX_MASK = 0xf,
OOB_CTRL1_BURST_MAX_SHIFT = 12,
@@ -271,6 +280,73 @@ static int brcm_ns2_sata_init(struct brcm_sata_port *port)
return 0;
}
+static int brcm_nsp_sata_init(struct brcm_sata_port *port)
+{
+ struct brcm_sata_phy *priv = port->phy_priv;
+ struct device *dev = port->phy_priv->dev;
+ void __iomem *base = priv->phy_base;
+ unsigned int oob_bank;
+ unsigned int val, try;
+
+ /* Configure OOB control */
+ if (port->portnum == 0)
+ oob_bank = OOB_REG_BANK;
+ else if (port->portnum == 1)
+ oob_bank = OOB1_REG_BANK;
+ else
+ return -EINVAL;
+
+ val = 0x0;
+ val |= (0x0f << OOB_CTRL1_BURST_MAX_SHIFT);
+ val |= (0x06 << OOB_CTRL1_BURST_MIN_SHIFT);
+ val |= (0x0f << OOB_CTRL1_WAKE_IDLE_MAX_SHIFT);
+ val |= (0x06 << OOB_CTRL1_WAKE_IDLE_MIN_SHIFT);
+ brcm_sata_phy_wr(base, oob_bank, OOB_CTRL1, 0x0, val);
+
+ val = 0x0;
+ val |= (0x2e << OOB_CTRL2_RESET_IDLE_MAX_SHIFT);
+ val |= (0x02 << OOB_CTRL2_BURST_CNT_SHIFT);
+ val |= (0x16 << OOB_CTRL2_RESET_IDLE_MIN_SHIFT);
+ brcm_sata_phy_wr(base, oob_bank, OOB_CTRL2, 0x0, val);
+
+
+ brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_ACTRL2,
+ ~(PLL_ACTRL2_SELDIV_MASK << PLL_ACTRL2_SELDIV_SHIFT),
+ 0x0c << PLL_ACTRL2_SELDIV_SHIFT);
+
+ brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_CAP_CONTROL,
+ 0xff0, 0x4f0);
+
+ val = PLLCONTROL_0_FREQ_DET_RESTART | PLLCONTROL_0_FREQ_MONITOR;
+ brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_REG_BANK_0_PLLCONTROL_0,
+ ~val, val);
+ val = PLLCONTROL_0_SEQ_START;
+ brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_REG_BANK_0_PLLCONTROL_0,
+ ~val, 0);
+ mdelay(10);
+ brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_REG_BANK_0_PLLCONTROL_0,
+ ~val, val);
+
+ /* Wait for pll_seq_done bit */
+ try = 50;
+ while (try--) {
+ val = brcm_sata_phy_rd(base, BLOCK0_REG_BANK,
+ BLOCK0_XGXSSTATUS);
+ if (val & BLOCK0_XGXSSTATUS_PLL_LOCK)
+ break;
+ msleep(20);
+ }
+ if (!try) {
+ /* PLL did not lock; give up */
+ dev_err(dev, "port%d PLL did not lock\n", port->portnum);
+ return -ETIMEDOUT;
+ }
+
+ dev_dbg(dev, "port%d initialized\n", port->portnum);
+
+ return 0;
+}
+
static int brcm_sata_phy_init(struct phy *phy)
{
int rc;
@@ -284,11 +360,14 @@ static int brcm_sata_phy_init(struct phy *phy)
case BRCM_SATA_PHY_IPROC_NS2:
rc = brcm_ns2_sata_init(port);
break;
+ case BRCM_SATA_PHY_IPROC_NSP:
+ rc = brcm_nsp_sata_init(port);
+ break;
default:
rc = -ENODEV;
};
- return 0;
+ return rc;
}
static const struct phy_ops phy_ops = {
@@ -303,6 +382,8 @@ static const struct of_device_id brcm_sata_phy_of_match[] = {
.data = (void *)BRCM_SATA_PHY_STB_40NM },
{ .compatible = "brcm,iproc-ns2-sata-phy",
.data = (void *)BRCM_SATA_PHY_IPROC_NS2 },
+ { .compatible = "brcm,iproc-nsp-sata-phy",
+ .data = (void *)BRCM_SATA_PHY_IPROC_NSP },
{},
};
MODULE_DEVICE_TABLE(of, brcm_sata_phy_of_match);
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index b72e9a3b6..8eca906b6 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -342,6 +342,21 @@ int phy_power_off(struct phy *phy)
}
EXPORT_SYMBOL_GPL(phy_power_off);
+int phy_set_mode(struct phy *phy, enum phy_mode mode)
+{
+ int ret;
+
+ if (!phy || !phy->ops->set_mode)
+ return 0;
+
+ mutex_lock(&phy->mutex);
+ ret = phy->ops->set_mode(phy, mode);
+ mutex_unlock(&phy->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(phy_set_mode);
+
/**
* _of_phy_get() - lookup and obtain a reference to a phy by phandle
* @np: device_node for which to get the phy
diff --git a/drivers/phy/phy-da8xx-usb.c b/drivers/phy/phy-da8xx-usb.c
new file mode 100644
index 000000000..b2e59b617
--- /dev/null
+++ b/drivers/phy/phy-da8xx-usb.c
@@ -0,0 +1,245 @@
+/*
+ * phy-da8xx-usb - TI DaVinci DA8xx USB PHY driver
+ *
+ * Copyright (C) 2016 David Lechner <david@lechnology.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/mfd/da8xx-cfgchip.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+struct da8xx_usb_phy {
+ struct phy_provider *phy_provider;
+ struct phy *usb11_phy;
+ struct phy *usb20_phy;
+ struct clk *usb11_clk;
+ struct clk *usb20_clk;
+ struct regmap *regmap;
+};
+
+static int da8xx_usb11_phy_power_on(struct phy *phy)
+{
+ struct da8xx_usb_phy *d_phy = phy_get_drvdata(phy);
+ int ret;
+
+ ret = clk_prepare_enable(d_phy->usb11_clk);
+ if (ret)
+ return ret;
+
+ regmap_write_bits(d_phy->regmap, CFGCHIP(2), CFGCHIP2_USB1SUSPENDM,
+ CFGCHIP2_USB1SUSPENDM);
+
+ return 0;
+}
+
+static int da8xx_usb11_phy_power_off(struct phy *phy)
+{
+ struct da8xx_usb_phy *d_phy = phy_get_drvdata(phy);
+
+ regmap_write_bits(d_phy->regmap, CFGCHIP(2), CFGCHIP2_USB1SUSPENDM, 0);
+
+ clk_disable_unprepare(d_phy->usb11_clk);
+
+ return 0;
+}
+
+static const struct phy_ops da8xx_usb11_phy_ops = {
+ .power_on = da8xx_usb11_phy_power_on,
+ .power_off = da8xx_usb11_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int da8xx_usb20_phy_power_on(struct phy *phy)
+{
+ struct da8xx_usb_phy *d_phy = phy_get_drvdata(phy);
+ int ret;
+
+ ret = clk_prepare_enable(d_phy->usb20_clk);
+ if (ret)
+ return ret;
+
+ regmap_write_bits(d_phy->regmap, CFGCHIP(2), CFGCHIP2_OTGPWRDN, 0);
+
+ return 0;
+}
+
+static int da8xx_usb20_phy_power_off(struct phy *phy)
+{
+ struct da8xx_usb_phy *d_phy = phy_get_drvdata(phy);
+
+ regmap_write_bits(d_phy->regmap, CFGCHIP(2), CFGCHIP2_OTGPWRDN,
+ CFGCHIP2_OTGPWRDN);
+
+ clk_disable_unprepare(d_phy->usb20_clk);
+
+ return 0;
+}
+
+static int da8xx_usb20_phy_set_mode(struct phy *phy, enum phy_mode mode)
+{
+ struct da8xx_usb_phy *d_phy = phy_get_drvdata(phy);
+ u32 val;
+
+ switch (mode) {
+ case PHY_MODE_USB_HOST: /* Force VBUS valid, ID = 0 */
+ val = CFGCHIP2_OTGMODE_FORCE_HOST;
+ break;
+ case PHY_MODE_USB_DEVICE: /* Force VBUS valid, ID = 1 */
+ val = CFGCHIP2_OTGMODE_FORCE_DEVICE;
+ break;
+ case PHY_MODE_USB_OTG: /* Don't override the VBUS/ID comparators */
+ val = CFGCHIP2_OTGMODE_NO_OVERRIDE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_write_bits(d_phy->regmap, CFGCHIP(2), CFGCHIP2_OTGMODE_MASK,
+ val);
+
+ return 0;
+}
+
+static const struct phy_ops da8xx_usb20_phy_ops = {
+ .power_on = da8xx_usb20_phy_power_on,
+ .power_off = da8xx_usb20_phy_power_off,
+ .set_mode = da8xx_usb20_phy_set_mode,
+ .owner = THIS_MODULE,
+};
+
+static struct phy *da8xx_usb_phy_of_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct da8xx_usb_phy *d_phy = dev_get_drvdata(dev);
+
+ if (!d_phy)
+ return ERR_PTR(-ENODEV);
+
+ switch (args->args[0]) {
+ case 0:
+ return d_phy->usb20_phy;
+ case 1:
+ return d_phy->usb11_phy;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+}
+
+static int da8xx_usb_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct da8xx_usb_phy *d_phy;
+
+ d_phy = devm_kzalloc(dev, sizeof(*d_phy), GFP_KERNEL);
+ if (!d_phy)
+ return -ENOMEM;
+
+ if (node)
+ d_phy->regmap = syscon_regmap_lookup_by_compatible(
+ "ti,da830-cfgchip");
+ else
+ d_phy->regmap = syscon_regmap_lookup_by_pdevname("syscon.0");
+ if (IS_ERR(d_phy->regmap)) {
+ dev_err(dev, "Failed to get syscon\n");
+ return PTR_ERR(d_phy->regmap);
+ }
+
+ d_phy->usb11_clk = devm_clk_get(dev, "usb11_phy");
+ if (IS_ERR(d_phy->usb11_clk)) {
+ dev_err(dev, "Failed to get usb11_phy clock\n");
+ return PTR_ERR(d_phy->usb11_clk);
+ }
+
+ d_phy->usb20_clk = devm_clk_get(dev, "usb20_phy");
+ if (IS_ERR(d_phy->usb20_clk)) {
+ dev_err(dev, "Failed to get usb20_phy clock\n");
+ return PTR_ERR(d_phy->usb20_clk);
+ }
+
+ d_phy->usb11_phy = devm_phy_create(dev, node, &da8xx_usb11_phy_ops);
+ if (IS_ERR(d_phy->usb11_phy)) {
+ dev_err(dev, "Failed to create usb11 phy\n");
+ return PTR_ERR(d_phy->usb11_phy);
+ }
+
+ d_phy->usb20_phy = devm_phy_create(dev, node, &da8xx_usb20_phy_ops);
+ if (IS_ERR(d_phy->usb20_phy)) {
+ dev_err(dev, "Failed to create usb20 phy\n");
+ return PTR_ERR(d_phy->usb20_phy);
+ }
+
+ platform_set_drvdata(pdev, d_phy);
+ phy_set_drvdata(d_phy->usb11_phy, d_phy);
+ phy_set_drvdata(d_phy->usb20_phy, d_phy);
+
+ if (node) {
+ d_phy->phy_provider = devm_of_phy_provider_register(dev,
+ da8xx_usb_phy_of_xlate);
+ if (IS_ERR(d_phy->phy_provider)) {
+ dev_err(dev, "Failed to create phy provider\n");
+ return PTR_ERR(d_phy->phy_provider);
+ }
+ } else {
+ int ret;
+
+ ret = phy_create_lookup(d_phy->usb11_phy, "usb-phy", "ohci.0");
+ if (ret)
+ dev_warn(dev, "Failed to create usb11 phy lookup\n");
+ ret = phy_create_lookup(d_phy->usb20_phy, "usb-phy",
+ "musb-da8xx");
+ if (ret)
+ dev_warn(dev, "Failed to create usb20 phy lookup\n");
+ }
+
+ return 0;
+}
+
+static int da8xx_usb_phy_remove(struct platform_device *pdev)
+{
+ struct da8xx_usb_phy *d_phy = platform_get_drvdata(pdev);
+
+ if (!pdev->dev.of_node) {
+ phy_remove_lookup(d_phy->usb20_phy, "usb-phy", "musb-da8xx");
+ phy_remove_lookup(d_phy->usb11_phy, "usb-phy", "ohci.0");
+ }
+
+ return 0;
+}
+
+static const struct of_device_id da8xx_usb_phy_ids[] = {
+ { .compatible = "ti,da830-usb-phy" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, da8xx_usb_phy_ids);
+
+static struct platform_driver da8xx_usb_phy_driver = {
+ .probe = da8xx_usb_phy_probe,
+ .remove = da8xx_usb_phy_remove,
+ .driver = {
+ .name = "da8xx-usb-phy",
+ .of_match_table = da8xx_usb_phy_ids,
+ },
+};
+
+module_platform_driver(da8xx_usb_phy_driver);
+
+MODULE_ALIAS("platform:da8xx-usb-phy");
+MODULE_AUTHOR("David Lechner <david@lechnology.com>");
+MODULE_DESCRIPTION("TI DA8xx USB PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-qcom-ufs-qmp-14nm.c b/drivers/phy/phy-qcom-ufs-qmp-14nm.c
index 56631e77c..6ee51490f 100644
--- a/drivers/phy/phy-qcom-ufs-qmp-14nm.c
+++ b/drivers/phy/phy-qcom-ufs-qmp-14nm.c
@@ -140,7 +140,6 @@ static int ufs_qcom_phy_qmp_14nm_probe(struct platform_device *pdev)
phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
if (!phy) {
- dev_err(dev, "%s: failed to allocate phy\n", __func__);
err = -ENOMEM;
goto out;
}
diff --git a/drivers/phy/phy-qcom-ufs-qmp-20nm.c b/drivers/phy/phy-qcom-ufs-qmp-20nm.c
index b16ea77d0..770087ab0 100644
--- a/drivers/phy/phy-qcom-ufs-qmp-20nm.c
+++ b/drivers/phy/phy-qcom-ufs-qmp-20nm.c
@@ -196,7 +196,6 @@ static int ufs_qcom_phy_qmp_20nm_probe(struct platform_device *pdev)
phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
if (!phy) {
- dev_err(dev, "%s: failed to allocate phy\n", __func__);
err = -ENOMEM;
goto out;
}
diff --git a/drivers/phy/phy-rockchip-emmc.c b/drivers/phy/phy-rockchip-emmc.c
index 6ebcf3e41..fd57345ff 100644
--- a/drivers/phy/phy-rockchip-emmc.c
+++ b/drivers/phy/phy-rockchip-emmc.c
@@ -14,6 +14,7 @@
* GNU General Public License for more details.
*/
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
@@ -31,42 +32,64 @@
((val) << (shift) | (mask) << ((shift) + 16))
/* Register definition */
-#define GRF_EMMCPHY_CON0 0x0
-#define GRF_EMMCPHY_CON1 0x4
-#define GRF_EMMCPHY_CON2 0x8
-#define GRF_EMMCPHY_CON3 0xc
-#define GRF_EMMCPHY_CON4 0x10
-#define GRF_EMMCPHY_CON5 0x14
-#define GRF_EMMCPHY_CON6 0x18
-#define GRF_EMMCPHY_STATUS 0x20
-
-#define PHYCTRL_PDB_MASK 0x1
-#define PHYCTRL_PDB_SHIFT 0x0
-#define PHYCTRL_PDB_PWR_ON 0x1
-#define PHYCTRL_PDB_PWR_OFF 0x0
-#define PHYCTRL_ENDLL_MASK 0x1
-#define PHYCTRL_ENDLL_SHIFT 0x1
-#define PHYCTRL_ENDLL_ENABLE 0x1
-#define PHYCTRL_ENDLL_DISABLE 0x0
-#define PHYCTRL_CALDONE_MASK 0x1
-#define PHYCTRL_CALDONE_SHIFT 0x6
-#define PHYCTRL_CALDONE_DONE 0x1
-#define PHYCTRL_CALDONE_GOING 0x0
-#define PHYCTRL_DLLRDY_MASK 0x1
-#define PHYCTRL_DLLRDY_SHIFT 0x5
-#define PHYCTRL_DLLRDY_DONE 0x1
-#define PHYCTRL_DLLRDY_GOING 0x0
+#define GRF_EMMCPHY_CON0 0x0
+#define GRF_EMMCPHY_CON1 0x4
+#define GRF_EMMCPHY_CON2 0x8
+#define GRF_EMMCPHY_CON3 0xc
+#define GRF_EMMCPHY_CON4 0x10
+#define GRF_EMMCPHY_CON5 0x14
+#define GRF_EMMCPHY_CON6 0x18
+#define GRF_EMMCPHY_STATUS 0x20
+
+#define PHYCTRL_PDB_MASK 0x1
+#define PHYCTRL_PDB_SHIFT 0x0
+#define PHYCTRL_PDB_PWR_ON 0x1
+#define PHYCTRL_PDB_PWR_OFF 0x0
+#define PHYCTRL_ENDLL_MASK 0x1
+#define PHYCTRL_ENDLL_SHIFT 0x1
+#define PHYCTRL_ENDLL_ENABLE 0x1
+#define PHYCTRL_ENDLL_DISABLE 0x0
+#define PHYCTRL_CALDONE_MASK 0x1
+#define PHYCTRL_CALDONE_SHIFT 0x6
+#define PHYCTRL_CALDONE_DONE 0x1
+#define PHYCTRL_CALDONE_GOING 0x0
+#define PHYCTRL_DLLRDY_MASK 0x1
+#define PHYCTRL_DLLRDY_SHIFT 0x5
+#define PHYCTRL_DLLRDY_DONE 0x1
+#define PHYCTRL_DLLRDY_GOING 0x0
+#define PHYCTRL_FREQSEL_200M 0x0
+#define PHYCTRL_FREQSEL_50M 0x1
+#define PHYCTRL_FREQSEL_100M 0x2
+#define PHYCTRL_FREQSEL_150M 0x3
+#define PHYCTRL_FREQSEL_MASK 0x3
+#define PHYCTRL_FREQSEL_SHIFT 0xc
+#define PHYCTRL_DR_MASK 0x7
+#define PHYCTRL_DR_SHIFT 0x4
+#define PHYCTRL_DR_50OHM 0x0
+#define PHYCTRL_DR_33OHM 0x1
+#define PHYCTRL_DR_66OHM 0x2
+#define PHYCTRL_DR_100OHM 0x3
+#define PHYCTRL_DR_40OHM 0x4
+#define PHYCTRL_OTAPDLYENA 0x1
+#define PHYCTRL_OTAPDLYENA_MASK 0x1
+#define PHYCTRL_OTAPDLYENA_SHIFT 0xb
+#define PHYCTRL_OTAPDLYSEL_MASK 0xf
+#define PHYCTRL_OTAPDLYSEL_SHIFT 0x7
struct rockchip_emmc_phy {
unsigned int reg_offset;
struct regmap *reg_base;
+ struct clk *emmcclk;
};
-static int rockchip_emmc_phy_power(struct rockchip_emmc_phy *rk_phy,
- bool on_off)
+static int rockchip_emmc_phy_power(struct phy *phy, bool on_off)
{
+ struct rockchip_emmc_phy *rk_phy = phy_get_drvdata(phy);
unsigned int caldone;
unsigned int dllrdy;
+ unsigned int freqsel = PHYCTRL_FREQSEL_200M;
+ unsigned long rate;
+ unsigned long timeout;
/*
* Keep phyctrl_pdb and phyctrl_endll low to allow
@@ -87,6 +110,43 @@ static int rockchip_emmc_phy_power(struct rockchip_emmc_phy *rk_phy,
if (on_off == PHYCTRL_PDB_PWR_OFF)
return 0;
+ rate = clk_get_rate(rk_phy->emmcclk);
+
+ if (rate != 0) {
+ unsigned long ideal_rate;
+ unsigned long diff;
+
+ switch (rate) {
+ case 1 ... 74999999:
+ ideal_rate = 50000000;
+ freqsel = PHYCTRL_FREQSEL_50M;
+ break;
+ case 75000000 ... 124999999:
+ ideal_rate = 100000000;
+ freqsel = PHYCTRL_FREQSEL_100M;
+ break;
+ case 125000000 ... 174999999:
+ ideal_rate = 150000000;
+ freqsel = PHYCTRL_FREQSEL_150M;
+ break;
+ default:
+ ideal_rate = 200000000;
+ break;
+ };
+
+ diff = (rate > ideal_rate) ?
+ rate - ideal_rate : ideal_rate - rate;
+
+ /*
+ * In order for tuning delays to be accurate we need to be
+ * pretty spot on for the DLL range, so warn if we're too
+ * far off. Also warn if we're above the 200 MHz max. Don't
+ * warn for really slow rates since we won't be tuning then.
+ */
+ if ((rate > 50000000 && diff > 15000000) || (rate > 200000000))
+ dev_warn(&phy->dev, "Unsupported rate: %lu\n", rate);
+ }
+
/*
* According to the user manual, calpad calibration
* cycle takes more than 2us without the minimal recommended
@@ -113,20 +173,62 @@ static int rockchip_emmc_phy_power(struct rockchip_emmc_phy *rk_phy,
return -ETIMEDOUT;
}
+ /* Set the frequency of the DLL operation */
+ regmap_write(rk_phy->reg_base,
+ rk_phy->reg_offset + GRF_EMMCPHY_CON0,
+ HIWORD_UPDATE(freqsel, PHYCTRL_FREQSEL_MASK,
+ PHYCTRL_FREQSEL_SHIFT));
+
+ /* Turn on the DLL */
regmap_write(rk_phy->reg_base,
rk_phy->reg_offset + GRF_EMMCPHY_CON6,
HIWORD_UPDATE(PHYCTRL_ENDLL_ENABLE,
PHYCTRL_ENDLL_MASK,
PHYCTRL_ENDLL_SHIFT));
+
/*
- * After enable analog DLL circuits, we need extra 10.2us
- * for dll to be ready for work.
+ * We turned on the DLL even though the rate was 0 because we the
+ * clock might be turned on later. ...but we can't wait for the DLL
+ * to lock when the rate is 0 because it will never lock with no
+ * input clock.
+ *
+ * Technically we should be checking the lock later when the clock
+ * is turned on, but for now we won't.
*/
- udelay(11);
- regmap_read(rk_phy->reg_base,
- rk_phy->reg_offset + GRF_EMMCPHY_STATUS,
- &dllrdy);
- dllrdy = (dllrdy >> PHYCTRL_DLLRDY_SHIFT) & PHYCTRL_DLLRDY_MASK;
+ if (rate == 0)
+ return 0;
+
+ /*
+ * After enabling analog DLL circuits docs say that we need 10.2 us if
+ * our source clock is at 50 MHz and that lock time scales linearly
+ * with clock speed. If we are powering on the PHY and the card clock
+ * is super slow (like 100 kHZ) this could take as long as 5.1 ms as
+ * per the math: 10.2 us * (50000000 Hz / 100000 Hz) => 5.1 ms
+ * Hopefully we won't be running at 100 kHz, but we should still make
+ * sure we wait long enough.
+ *
+ * NOTE: There appear to be corner cases where the DLL seems to take
+ * extra long to lock for reasons that aren't understood. In some
+ * extreme cases we've seen it take up to over 10ms (!). We'll be
+ * generous and give it 50ms. We still busy wait here because:
+ * - In most cases it should be super fast.
+ * - This is not called lots during normal operation so it shouldn't
+ * be a power or performance problem to busy wait. We expect it
+ * only at boot / resume. In both cases, eMMC is probably on the
+ * critical path so busy waiting a little extra time should be OK.
+ */
+ timeout = jiffies + msecs_to_jiffies(50);
+ do {
+ udelay(1);
+
+ regmap_read(rk_phy->reg_base,
+ rk_phy->reg_offset + GRF_EMMCPHY_STATUS,
+ &dllrdy);
+ dllrdy = (dllrdy >> PHYCTRL_DLLRDY_SHIFT) & PHYCTRL_DLLRDY_MASK;
+ if (dllrdy == PHYCTRL_DLLRDY_DONE)
+ break;
+ } while (!time_after(jiffies, timeout));
+
if (dllrdy != PHYCTRL_DLLRDY_DONE) {
pr_err("rockchip_emmc_phy_power: dllrdy timeout.\n");
return -ETIMEDOUT;
@@ -135,33 +237,82 @@ static int rockchip_emmc_phy_power(struct rockchip_emmc_phy *rk_phy,
return 0;
}
-static int rockchip_emmc_phy_power_off(struct phy *phy)
+static int rockchip_emmc_phy_init(struct phy *phy)
{
struct rockchip_emmc_phy *rk_phy = phy_get_drvdata(phy);
int ret = 0;
- /* Power down emmc phy analog blocks */
- ret = rockchip_emmc_phy_power(rk_phy, PHYCTRL_PDB_PWR_OFF);
- if (ret)
- return ret;
+ /*
+ * We purposely get the clock here and not in probe to avoid the
+ * circular dependency problem. We expect:
+ * - PHY driver to probe
+ * - SDHCI driver to start probe
+ * - SDHCI driver to register it's clock
+ * - SDHCI driver to get the PHY
+ * - SDHCI driver to init the PHY
+ *
+ * The clock is optional, so upon any error we just set to NULL.
+ *
+ * NOTE: we don't do anything special for EPROBE_DEFER here. Given the
+ * above expected use case, EPROBE_DEFER isn't sensible to expect, so
+ * it's just like any other error.
+ */
+ rk_phy->emmcclk = clk_get(&phy->dev, "emmcclk");
+ if (IS_ERR(rk_phy->emmcclk)) {
+ dev_dbg(&phy->dev, "Error getting emmcclk: %d\n", ret);
+ rk_phy->emmcclk = NULL;
+ }
+
+ return ret;
+}
+
+static int rockchip_emmc_phy_exit(struct phy *phy)
+{
+ struct rockchip_emmc_phy *rk_phy = phy_get_drvdata(phy);
+
+ clk_put(rk_phy->emmcclk);
return 0;
}
+static int rockchip_emmc_phy_power_off(struct phy *phy)
+{
+ /* Power down emmc phy analog blocks */
+ return rockchip_emmc_phy_power(phy, PHYCTRL_PDB_PWR_OFF);
+}
+
static int rockchip_emmc_phy_power_on(struct phy *phy)
{
struct rockchip_emmc_phy *rk_phy = phy_get_drvdata(phy);
- int ret = 0;
- /* Power up emmc phy analog blocks */
- ret = rockchip_emmc_phy_power(rk_phy, PHYCTRL_PDB_PWR_ON);
- if (ret)
- return ret;
+ /* Drive impedance: 50 Ohm */
+ regmap_write(rk_phy->reg_base,
+ rk_phy->reg_offset + GRF_EMMCPHY_CON6,
+ HIWORD_UPDATE(PHYCTRL_DR_50OHM,
+ PHYCTRL_DR_MASK,
+ PHYCTRL_DR_SHIFT));
- return 0;
+ /* Output tap delay: enable */
+ regmap_write(rk_phy->reg_base,
+ rk_phy->reg_offset + GRF_EMMCPHY_CON0,
+ HIWORD_UPDATE(PHYCTRL_OTAPDLYENA,
+ PHYCTRL_OTAPDLYENA_MASK,
+ PHYCTRL_OTAPDLYENA_SHIFT));
+
+ /* Output tap delay */
+ regmap_write(rk_phy->reg_base,
+ rk_phy->reg_offset + GRF_EMMCPHY_CON0,
+ HIWORD_UPDATE(4,
+ PHYCTRL_OTAPDLYSEL_MASK,
+ PHYCTRL_OTAPDLYSEL_SHIFT));
+
+ /* Power up emmc phy analog blocks */
+ return rockchip_emmc_phy_power(phy, PHYCTRL_PDB_PWR_ON);
}
static const struct phy_ops ops = {
+ .init = rockchip_emmc_phy_init,
+ .exit = rockchip_emmc_phy_exit,
.power_on = rockchip_emmc_phy_power_on,
.power_off = rockchip_emmc_phy_power_off,
.owner = THIS_MODULE,
diff --git a/drivers/phy/phy-rockchip-usb.c b/drivers/phy/phy-rockchip-usb.c
index d60b149cf..2a7381f4f 100644
--- a/drivers/phy/phy-rockchip-usb.c
+++ b/drivers/phy/phy-rockchip-usb.c
@@ -236,9 +236,10 @@ static int rockchip_usb_phy_init(struct rockchip_usb_phy_base *base,
goto err_clk_prov;
}
- err = devm_add_action(base->dev, rockchip_usb_phy_action, rk_phy);
+ err = devm_add_action_or_reset(base->dev, rockchip_usb_phy_action,
+ rk_phy);
if (err)
- goto err_devm_action;
+ return err;
rk_phy->phy = devm_phy_create(base->dev, child, &ops);
if (IS_ERR(rk_phy->phy)) {
@@ -256,9 +257,6 @@ static int rockchip_usb_phy_init(struct rockchip_usb_phy_base *base,
else
return rockchip_usb_phy_power(rk_phy, 1);
-err_devm_action:
- if (!rk_phy->uart_enabled)
- of_clk_del_provider(child);
err_clk_prov:
if (!rk_phy->uart_enabled)
clk_unregister(rk_phy->clk480m);
@@ -397,8 +395,13 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev)
phy_base->pdata = match->data;
phy_base->dev = dev;
- phy_base->reg_base = syscon_regmap_lookup_by_phandle(dev->of_node,
- "rockchip,grf");
+ phy_base->reg_base = ERR_PTR(-ENODEV);
+ if (dev->parent && dev->parent->of_node)
+ phy_base->reg_base = syscon_node_to_regmap(
+ dev->parent->of_node);
+ if (IS_ERR(phy_base->reg_base))
+ phy_base->reg_base = syscon_regmap_lookup_by_phandle(
+ dev->of_node, "rockchip,grf");
if (IS_ERR(phy_base->reg_base)) {
dev_err(&pdev->dev, "Missing rockchip,grf property\n");
return PTR_ERR(phy_base->reg_base);
@@ -463,7 +466,11 @@ static int __init rockchip_init_usb_uart(void)
return -ENOTSUPP;
}
- grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
+ grf = ERR_PTR(-ENODEV);
+ if (np->parent)
+ grf = syscon_node_to_regmap(np->parent);
+ if (IS_ERR(grf))
+ grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
if (IS_ERR(grf)) {
pr_err("%s: Missing rockchip,grf property, %lu\n",
__func__, PTR_ERR(grf));
diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c
index de3101fbb..4d74ca918 100644
--- a/drivers/phy/phy-sun4i-usb.c
+++ b/drivers/phy/phy-sun4i-usb.c
@@ -40,6 +40,8 @@
#include <linux/power_supply.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
+#include <linux/spinlock.h>
+#include <linux/usb/of.h>
#include <linux/workqueue.h>
#define REG_ISCR 0x00
@@ -94,6 +96,7 @@
enum sun4i_usb_phy_type {
sun4i_a10_phy,
+ sun6i_a31_phy,
sun8i_a33_phy,
sun8i_h3_phy,
};
@@ -109,7 +112,8 @@ struct sun4i_usb_phy_cfg {
struct sun4i_usb_phy_data {
void __iomem *base;
const struct sun4i_usb_phy_cfg *cfg;
- struct mutex mutex;
+ enum usb_dr_mode dr_mode;
+ spinlock_t reg_lock; /* guard access to phyctl reg */
struct sun4i_usb_phy {
struct phy *phy;
void __iomem *pmu;
@@ -119,10 +123,10 @@ struct sun4i_usb_phy_data {
bool regulator_on;
int index;
} phys[MAX_PHYS];
+ int first_phy;
/* phy0 / otg related variables */
struct extcon_dev *extcon;
bool phy0_init;
- bool phy0_poll;
struct gpio_desc *id_det_gpio;
struct gpio_desc *vbus_det_gpio;
struct power_supply *vbus_power_supply;
@@ -176,9 +180,10 @@ static void sun4i_usb_phy_write(struct sun4i_usb_phy *phy, u32 addr, u32 data,
struct sun4i_usb_phy_data *phy_data = to_sun4i_usb_phy_data(phy);
u32 temp, usbc_bit = BIT(phy->index * 2);
void __iomem *phyctl = phy_data->base + phy_data->cfg->phyctl_offset;
+ unsigned long flags;
int i;
- mutex_lock(&phy_data->mutex);
+ spin_lock_irqsave(&phy_data->reg_lock, flags);
if (phy_data->cfg->type == sun8i_a33_phy) {
/* A33 needs us to set phyctl to 0 explicitly */
@@ -215,7 +220,8 @@ static void sun4i_usb_phy_write(struct sun4i_usb_phy *phy, u32 addr, u32 data,
data >>= 1;
}
- mutex_unlock(&phy_data->mutex);
+
+ spin_unlock_irqrestore(&phy_data->reg_lock, flags);
}
static void sun4i_usb_phy_passby(struct sun4i_usb_phy *phy, int enable)
@@ -285,16 +291,10 @@ static int sun4i_usb_phy_init(struct phy *_phy)
sun4i_usb_phy0_update_iscr(_phy, 0, ISCR_DPDM_PULLUP_EN);
sun4i_usb_phy0_update_iscr(_phy, 0, ISCR_ID_PULLUP_EN);
- if (data->id_det_gpio) {
- /* OTG mode, force ISCR and cable state updates */
- data->id_det = -1;
- data->vbus_det = -1;
- queue_delayed_work(system_wq, &data->detect, 0);
- } else {
- /* Host only mode */
- sun4i_usb_phy0_set_id_detect(_phy, 0);
- sun4i_usb_phy0_set_vbus_detect(_phy, 1);
- }
+ /* Force ISCR and cable state updates */
+ data->id_det = -1;
+ data->vbus_det = -1;
+ queue_delayed_work(system_wq, &data->detect, 0);
}
return 0;
@@ -319,6 +319,19 @@ static int sun4i_usb_phy_exit(struct phy *_phy)
return 0;
}
+static int sun4i_usb_phy0_get_id_det(struct sun4i_usb_phy_data *data)
+{
+ switch (data->dr_mode) {
+ case USB_DR_MODE_OTG:
+ return gpiod_get_value_cansleep(data->id_det_gpio);
+ case USB_DR_MODE_HOST:
+ return 0;
+ case USB_DR_MODE_PERIPHERAL:
+ default:
+ return 1;
+ }
+}
+
static int sun4i_usb_phy0_get_vbus_det(struct sun4i_usb_phy_data *data)
{
if (data->vbus_det_gpio)
@@ -343,6 +356,24 @@ static bool sun4i_usb_phy0_have_vbus_det(struct sun4i_usb_phy_data *data)
return data->vbus_det_gpio || data->vbus_power_supply;
}
+static bool sun4i_usb_phy0_poll(struct sun4i_usb_phy_data *data)
+{
+ if ((data->id_det_gpio && data->id_det_irq <= 0) ||
+ (data->vbus_det_gpio && data->vbus_det_irq <= 0))
+ return true;
+
+ /*
+ * The A31 companion pmic (axp221) does not generate vbus change
+ * interrupts when the board is driving vbus, so we must poll
+ * when using the pmic for vbus-det _and_ we're driving vbus.
+ */
+ if (data->cfg->type == sun6i_a31_phy &&
+ data->vbus_power_supply && data->phys[0].regulator_on)
+ return true;
+
+ return false;
+}
+
static int sun4i_usb_phy_power_on(struct phy *_phy)
{
struct sun4i_usb_phy *phy = phy_get_drvdata(_phy);
@@ -364,7 +395,7 @@ static int sun4i_usb_phy_power_on(struct phy *_phy)
phy->regulator_on = true;
/* We must report Vbus high within OTG_TIME_A_WAIT_VRISE msec. */
- if (phy->index == 0 && data->vbus_det_gpio && data->phy0_poll)
+ if (phy->index == 0 && sun4i_usb_phy0_poll(data))
mod_delayed_work(system_wq, &data->detect, DEBOUNCE_TIME);
return 0;
@@ -385,7 +416,7 @@ static int sun4i_usb_phy_power_off(struct phy *_phy)
* phy0 vbus typically slowly discharges, sometimes this causes the
* Vbus gpio to not trigger an edge irq on Vbus off, so force a rescan.
*/
- if (phy->index == 0 && data->vbus_det_gpio && !data->phy0_poll)
+ if (phy->index == 0 && !sun4i_usb_phy0_poll(data))
mod_delayed_work(system_wq, &data->detect, POLL_TIME);
return 0;
@@ -414,7 +445,10 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
struct phy *phy0 = data->phys[0].phy;
int id_det, vbus_det, id_notify = 0, vbus_notify = 0;
- id_det = gpiod_get_value_cansleep(data->id_det_gpio);
+ if (phy0 == NULL)
+ return;
+
+ id_det = sun4i_usb_phy0_get_id_det(data);
vbus_det = sun4i_usb_phy0_get_vbus_det(data);
mutex_lock(&phy0->mutex);
@@ -430,7 +464,8 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
* without vbus detection report vbus low for long enough for
* the musb-ip to end the current device session.
*/
- if (!sun4i_usb_phy0_have_vbus_det(data) && id_det == 0) {
+ if (data->dr_mode == USB_DR_MODE_OTG &&
+ !sun4i_usb_phy0_have_vbus_det(data) && id_det == 0) {
sun4i_usb_phy0_set_vbus_detect(phy0, 0);
msleep(200);
sun4i_usb_phy0_set_vbus_detect(phy0, 1);
@@ -456,7 +491,8 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
* without vbus detection report vbus low for long enough to
* the musb-ip to end the current host session.
*/
- if (!sun4i_usb_phy0_have_vbus_det(data) && id_det == 1) {
+ if (data->dr_mode == USB_DR_MODE_OTG &&
+ !sun4i_usb_phy0_have_vbus_det(data) && id_det == 1) {
mutex_lock(&phy0->mutex);
sun4i_usb_phy0_set_vbus_detect(phy0, 0);
msleep(1000);
@@ -468,7 +504,7 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
if (vbus_notify)
extcon_set_cable_state_(data->extcon, EXTCON_USB, vbus_det);
- if (data->phy0_poll)
+ if (sun4i_usb_phy0_poll(data))
queue_delayed_work(system_wq, &data->detect, POLL_TIME);
}
@@ -501,7 +537,8 @@ static struct phy *sun4i_usb_phy_xlate(struct device *dev,
{
struct sun4i_usb_phy_data *data = dev_get_drvdata(dev);
- if (args->args[0] >= data->cfg->num_phys)
+ if (args->args[0] < data->first_phy ||
+ args->args[0] >= data->cfg->num_phys)
return ERR_PTR(-ENODEV);
return data->phys[args->args[0]].phy;
@@ -543,7 +580,7 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
if (!data)
return -ENOMEM;
- mutex_init(&data->mutex);
+ spin_lock_init(&data->reg_lock);
INIT_DELAYED_WORK(&data->detect, sun4i_usb_phy0_id_vbus_det_scan);
dev_set_drvdata(dev, data);
data->cfg = of_device_get_match_data(dev);
@@ -575,13 +612,17 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
}
- /* vbus_det without id_det makes no sense, and is not supported */
- if (sun4i_usb_phy0_have_vbus_det(data) && !data->id_det_gpio) {
- dev_err(dev, "usb0_id_det missing or invalid\n");
- return -ENODEV;
- }
-
- if (data->id_det_gpio) {
+ data->dr_mode = of_usb_get_dr_mode_by_phy(np, 0);
+ switch (data->dr_mode) {
+ case USB_DR_MODE_OTG:
+ /* otg without id_det makes no sense, and is not supported */
+ if (!data->id_det_gpio) {
+ dev_err(dev, "usb0_id_det missing or invalid\n");
+ return -ENODEV;
+ }
+ /* fall through */
+ case USB_DR_MODE_HOST:
+ case USB_DR_MODE_PERIPHERAL:
data->extcon = devm_extcon_dev_allocate(dev,
sun4i_usb_phy0_cable);
if (IS_ERR(data->extcon))
@@ -592,9 +633,13 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
dev_err(dev, "failed to register extcon: %d\n", ret);
return ret;
}
+ break;
+ default:
+ dev_info(dev, "dr_mode unknown, not registering usb phy0\n");
+ data->first_phy = 1;
}
- for (i = 0; i < data->cfg->num_phys; i++) {
+ for (i = data->first_phy; i < data->cfg->num_phys; i++) {
struct sun4i_usb_phy *phy = data->phys + i;
char name[16];
@@ -644,11 +689,6 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
}
data->id_det_irq = gpiod_to_irq(data->id_det_gpio);
- data->vbus_det_irq = gpiod_to_irq(data->vbus_det_gpio);
- if ((data->id_det_gpio && data->id_det_irq <= 0) ||
- (data->vbus_det_gpio && data->vbus_det_irq <= 0))
- data->phy0_poll = true;
-
if (data->id_det_irq > 0) {
ret = devm_request_irq(dev, data->id_det_irq,
sun4i_usb_phy0_id_vbus_det_irq,
@@ -660,6 +700,7 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
}
}
+ data->vbus_det_irq = gpiod_to_irq(data->vbus_det_gpio);
if (data->vbus_det_irq > 0) {
ret = devm_request_irq(dev, data->vbus_det_irq,
sun4i_usb_phy0_id_vbus_det_irq,
@@ -711,7 +752,7 @@ static const struct sun4i_usb_phy_cfg sun5i_a13_cfg = {
static const struct sun4i_usb_phy_cfg sun6i_a31_cfg = {
.num_phys = 3,
- .type = sun4i_a10_phy,
+ .type = sun6i_a31_phy,
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A10,
.dedicated_clocks = true,
diff --git a/drivers/phy/phy-sun9i-usb.c b/drivers/phy/phy-sun9i-usb.c
index ac4f31abe..28fce4bce 100644
--- a/drivers/phy/phy-sun9i-usb.c
+++ b/drivers/phy/phy-sun9i-usb.c
@@ -141,9 +141,9 @@ static int sun9i_usb_phy_probe(struct platform_device *pdev)
}
phy->hsic_clk = devm_clk_get(dev, "hsic_12M");
- if (IS_ERR(phy->clk)) {
+ if (IS_ERR(phy->hsic_clk)) {
dev_err(dev, "failed to get hsic_12M clock\n");
- return PTR_ERR(phy->clk);
+ return PTR_ERR(phy->hsic_clk);
}
phy->reset = devm_reset_control_get(dev, "hsic");
diff --git a/drivers/phy/phy-xgene.c b/drivers/phy/phy-xgene.c
index 385362e5b..ae266e0c8 100644
--- a/drivers/phy/phy-xgene.c
+++ b/drivers/phy/phy-xgene.c
@@ -518,7 +518,7 @@ enum clk_type_t {
CLK_INT_SING = 2, /* Internal single ended */
};
-enum phy_mode {
+enum xgene_phy_mode {
MODE_SATA = 0, /* List them for simple reference */
MODE_SGMII = 1,
MODE_PCIE = 2,
@@ -542,7 +542,7 @@ struct xgene_sata_override_param {
struct xgene_phy_ctx {
struct device *dev;
struct phy *phy;
- enum phy_mode mode; /* Mode of operation */
+ enum xgene_phy_mode mode; /* Mode of operation */
enum clk_type_t clk_type; /* Input clock selection */
void __iomem *sds_base; /* PHY CSR base addr */
struct clk *clk; /* Optional clock */
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index fb8200b8e..b3fe1d339 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -35,7 +35,7 @@ config PINCTRL_ADI2
machine and arch are selected to build.
config PINCTRL_AS3722
- bool "Pinctrl and GPIO driver for ams AS3722 PMIC"
+ tristate "Pinctrl and GPIO driver for ams AS3722 PMIC"
depends on MFD_AS3722 && GPIOLIB
select PINMUX
select GENERIC_PINCONF
@@ -129,6 +129,17 @@ config PINCTRL_MESON
select OF_GPIO
select REGMAP_MMIO
+config PINCTRL_OXNAS
+ bool
+ depends on OF
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ select GPIOLIB
+ select OF_GPIO
+ select GPIOLIB_IRQCHIP
+ select MFD_SYSCON
+
config PINCTRL_ROCKCHIP
bool
select PINMUX
@@ -196,8 +207,19 @@ config PINCTRL_COH901
COH 901 335 and COH 901 571/3. They contain 3, 5 or 7
ports of 8 GPIO pins each.
+config PINCTRL_MAX77620
+ tristate "MAX77620/MAX20024 Pincontrol support"
+ depends on MFD_MAX77620
+ select PINMUX
+ select GENERIC_PINCONF
+ help
+ Say Yes here to enable Pin control support for Maxim PMIC MAX77620.
+ This PMIC has 8 GPIO pins that work as GPIO as well as special
+ function in alternate mode. This driver also configure push-pull,
+ open drain, FPS slots etc.
+
config PINCTRL_PALMAS
- bool "Pinctrl driver for the PALMAS Series MFD devices"
+ tristate "Pinctrl driver for the PALMAS Series MFD devices"
depends on OF && MFD_PALMAS
select PINMUX
select GENERIC_PINCONF
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 42a5c1ddd..8ebd7b8e1 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -16,7 +16,9 @@ obj-$(CONFIG_PINCTRL_AT91PIO4) += pinctrl-at91-pio4.o
obj-$(CONFIG_PINCTRL_AMD) += pinctrl-amd.o
obj-$(CONFIG_PINCTRL_DIGICOLOR) += pinctrl-digicolor.o
obj-$(CONFIG_PINCTRL_FALCON) += pinctrl-falcon.o
+obj-$(CONFIG_PINCTRL_MAX77620) += pinctrl-max77620.o
obj-$(CONFIG_PINCTRL_MESON) += meson/
+obj-$(CONFIG_PINCTRL_OXNAS) += pinctrl-oxnas.o
obj-$(CONFIG_PINCTRL_PALMAS) += pinctrl-palmas.o
obj-$(CONFIG_PINCTRL_PIC32) += pinctrl-pic32.o
obj-$(CONFIG_PINCTRL_PISTACHIO) += pinctrl-pistachio.o
@@ -35,7 +37,7 @@ obj-$(CONFIG_PINCTRL_TB10X) += pinctrl-tb10x.o
obj-$(CONFIG_PINCTRL_ST) += pinctrl-st.o
obj-$(CONFIG_PINCTRL_ZYNQ) += pinctrl-zynq.o
-obj-$(CONFIG_ARCH_BCM) += bcm/
+obj-y += bcm/
obj-$(CONFIG_PINCTRL_BERLIN) += berlin/
obj-y += freescale/
obj-$(CONFIG_X86) += intel/
diff --git a/drivers/pinctrl/bcm/Kconfig b/drivers/pinctrl/bcm/Kconfig
index c356223e1..63246770b 100644
--- a/drivers/pinctrl/bcm/Kconfig
+++ b/drivers/pinctrl/bcm/Kconfig
@@ -60,6 +60,7 @@ config PINCTRL_IPROC_GPIO
config PINCTRL_CYGNUS_MUX
bool "Broadcom Cygnus IOMUX driver"
depends on (ARCH_BCM_CYGNUS || COMPILE_TEST)
+ depends on OF
select PINMUX
select GENERIC_PINCONF
default ARCH_BCM_CYGNUS
@@ -99,3 +100,17 @@ config PINCTRL_NS2_MUX
The Broadcom Northstar2 IOMUX driver supports group based IOMUX
configuration.
+
+config PINCTRL_NSP_MUX
+ bool "Broadcom NSP IOMUX driver"
+ depends on (ARCH_BCM_NSP || COMPILE_TEST)
+ depends on OF
+ select PINMUX
+ select GENERIC_PINCONF
+ default ARCH_BCM_NSP
+ help
+ Say yes here to enable the Broadcom NSP SOC IOMUX driver.
+
+ The Broadcom Northstar Plus IOMUX driver supports pin based IOMUX
+ configuration, with certain individual pins can be overridden
+ to GPIO function.
diff --git a/drivers/pinctrl/bcm/Makefile b/drivers/pinctrl/bcm/Makefile
index 3861a1c1f..2a65111f3 100644
--- a/drivers/pinctrl/bcm/Makefile
+++ b/drivers/pinctrl/bcm/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_PINCTRL_IPROC_GPIO) += pinctrl-iproc-gpio.o
obj-$(CONFIG_PINCTRL_CYGNUS_MUX) += pinctrl-cygnus-mux.o
obj-$(CONFIG_PINCTRL_NSP_GPIO) += pinctrl-nsp-gpio.o
obj-$(CONFIG_PINCTRL_NS2_MUX) += pinctrl-ns2-mux.o
+obj-$(CONFIG_PINCTRL_NSP_MUX) += pinctrl-nsp-mux.o
diff --git a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
index 3670f5ea7..7f7700716 100644
--- a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
@@ -66,6 +66,14 @@
#define GPIO_DRV_STRENGTH_BITS 3
#define GPIO_DRV_STRENGTH_BIT_MASK ((1 << GPIO_DRV_STRENGTH_BITS) - 1)
+enum iproc_pinconf_param {
+ IPROC_PINCONF_DRIVE_STRENGTH = 0,
+ IPROC_PINCONF_BIAS_DISABLE,
+ IPROC_PINCONF_BIAS_PULL_UP,
+ IPROC_PINCONF_BIAS_PULL_DOWN,
+ IPROC_PINCON_MAX,
+};
+
/*
* Iproc GPIO core
*
@@ -78,6 +86,10 @@
* @num_banks: number of GPIO banks, each bank supports up to 32 GPIOs
* @pinmux_is_supported: flag to indicate this GPIO controller contains pins
* that can be individually muxed to GPIO
+ * @pinconf_disable: contains a list of PINCONF parameters that need to be
+ * disabled
+ * @nr_pinconf_disable: total number of PINCONF parameters that need to be
+ * disabled
* @pctl: pointer to pinctrl_dev
* @pctldesc: pinctrl descriptor
*/
@@ -94,6 +106,9 @@ struct iproc_gpio {
bool pinmux_is_supported;
+ enum pin_config_param *pinconf_disable;
+ unsigned int nr_pinconf_disable;
+
struct pinctrl_dev *pctl;
struct pinctrl_desc pctldesc;
};
@@ -360,6 +375,65 @@ static int iproc_gpio_get(struct gpio_chip *gc, unsigned gpio)
return !!(readl(chip->base + offset) & BIT(shift));
}
+/*
+ * Mapping of the iProc PINCONF parameters to the generic pin configuration
+ * parameters
+ */
+static const enum pin_config_param iproc_pinconf_disable_map[] = {
+ [IPROC_PINCONF_DRIVE_STRENGTH] = PIN_CONFIG_DRIVE_STRENGTH,
+ [IPROC_PINCONF_BIAS_DISABLE] = PIN_CONFIG_BIAS_DISABLE,
+ [IPROC_PINCONF_BIAS_PULL_UP] = PIN_CONFIG_BIAS_PULL_UP,
+ [IPROC_PINCONF_BIAS_PULL_DOWN] = PIN_CONFIG_BIAS_PULL_DOWN,
+};
+
+static bool iproc_pinconf_param_is_disabled(struct iproc_gpio *chip,
+ enum pin_config_param param)
+{
+ unsigned int i;
+
+ if (!chip->nr_pinconf_disable)
+ return false;
+
+ for (i = 0; i < chip->nr_pinconf_disable; i++)
+ if (chip->pinconf_disable[i] == param)
+ return true;
+
+ return false;
+}
+
+static int iproc_pinconf_disable_map_create(struct iproc_gpio *chip,
+ unsigned long disable_mask)
+{
+ unsigned int map_size = ARRAY_SIZE(iproc_pinconf_disable_map);
+ unsigned int bit, nbits = 0;
+
+ /* figure out total number of PINCONF parameters to disable */
+ for_each_set_bit(bit, &disable_mask, map_size)
+ nbits++;
+
+ if (!nbits)
+ return 0;
+
+ /*
+ * Allocate an array to store PINCONF parameters that need to be
+ * disabled
+ */
+ chip->pinconf_disable = devm_kcalloc(chip->dev, nbits,
+ sizeof(*chip->pinconf_disable),
+ GFP_KERNEL);
+ if (!chip->pinconf_disable)
+ return -ENOMEM;
+
+ chip->nr_pinconf_disable = nbits;
+
+ /* now store these parameters */
+ nbits = 0;
+ for_each_set_bit(bit, &disable_mask, map_size)
+ chip->pinconf_disable[nbits++] = iproc_pinconf_disable_map[bit];
+
+ return 0;
+}
+
static int iproc_get_groups_count(struct pinctrl_dev *pctldev)
{
return 1;
@@ -500,6 +574,9 @@ static int iproc_pin_config_get(struct pinctrl_dev *pctldev, unsigned pin,
bool disable, pull_up;
int ret;
+ if (iproc_pinconf_param_is_disabled(chip, param))
+ return -ENOTSUPP;
+
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
iproc_gpio_get_pull(chip, gpio, &disable, &pull_up);
@@ -548,6 +625,10 @@ static int iproc_pin_config_set(struct pinctrl_dev *pctldev, unsigned pin,
for (i = 0; i < num_configs; i++) {
param = pinconf_to_config_param(configs[i]);
+
+ if (iproc_pinconf_param_is_disabled(chip, param))
+ return -ENOTSUPP;
+
arg = pinconf_to_config_argument(configs[i]);
switch (param) {
@@ -633,11 +714,13 @@ static int iproc_gpio_register_pinconf(struct iproc_gpio *chip)
}
static const struct of_device_id iproc_gpio_of_match[] = {
+ { .compatible = "brcm,iproc-gpio" },
{ .compatible = "brcm,cygnus-ccm-gpio" },
{ .compatible = "brcm,cygnus-asiu-gpio" },
{ .compatible = "brcm,cygnus-crmu-gpio" },
- { .compatible = "brcm,iproc-gpio" },
- { }
+ { .compatible = "brcm,iproc-nsp-gpio" },
+ { .compatible = "brcm,iproc-stingray-gpio" },
+ { /* sentinel */ }
};
static int iproc_gpio_probe(struct platform_device *pdev)
@@ -646,8 +729,17 @@ static int iproc_gpio_probe(struct platform_device *pdev)
struct resource *res;
struct iproc_gpio *chip;
struct gpio_chip *gc;
- u32 ngpios;
+ u32 ngpios, pinconf_disable_mask = 0;
int irq, ret;
+ bool no_pinconf = false;
+
+ /* NSP does not support drive strength config */
+ if (of_device_is_compatible(dev->of_node, "brcm,iproc-nsp-gpio"))
+ pinconf_disable_mask = BIT(IPROC_PINCONF_DRIVE_STRENGTH);
+ /* Stingray does not support pinconf in this controller */
+ else if (of_device_is_compatible(dev->of_node,
+ "brcm,iproc-stingray-gpio"))
+ no_pinconf = true;
chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
@@ -702,10 +794,22 @@ static int iproc_gpio_probe(struct platform_device *pdev)
return ret;
}
- ret = iproc_gpio_register_pinconf(chip);
- if (ret) {
- dev_err(dev, "unable to register pinconf\n");
- goto err_rm_gpiochip;
+ if (!no_pinconf) {
+ ret = iproc_gpio_register_pinconf(chip);
+ if (ret) {
+ dev_err(dev, "unable to register pinconf\n");
+ goto err_rm_gpiochip;
+ }
+
+ if (pinconf_disable_mask) {
+ ret = iproc_pinconf_disable_map_create(chip,
+ pinconf_disable_mask);
+ if (ret) {
+ dev_err(dev,
+ "unable to create pinconf disable map\n");
+ goto err_rm_gpiochip;
+ }
+ }
}
/* optional GPIO interrupt support */
diff --git a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
index 3fefd14ac..ca817896e 100644
--- a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
+++ b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
@@ -1044,10 +1044,8 @@ static int ns2_pinmux_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pinctrl->base0 = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(pinctrl->base0)) {
- dev_err(&pdev->dev, "unable to map I/O space\n");
+ if (IS_ERR(pinctrl->base0))
return PTR_ERR(pinctrl->base0);
- }
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
pinctrl->base1 = devm_ioremap_nocache(&pdev->dev, res->start,
@@ -1059,10 +1057,8 @@ static int ns2_pinmux_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
pinctrl->pinconf_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(pinctrl->pinconf_base)) {
- dev_err(&pdev->dev, "unable to map I/O space\n");
+ if (IS_ERR(pinctrl->pinconf_base))
return PTR_ERR(pinctrl->pinconf_base);
- }
ret = ns2_mux_log_init(pinctrl);
if (ret) {
@@ -1089,9 +1085,9 @@ static int ns2_pinmux_probe(struct platform_device *pdev)
pinctrl->pctl = pinctrl_register(&ns2_pinctrl_desc, &pdev->dev,
pinctrl);
- if (!pinctrl->pctl) {
+ if (IS_ERR(pinctrl->pctl)) {
dev_err(&pdev->dev, "unable to register IOMUX pinctrl\n");
- return -EINVAL;
+ return PTR_ERR(pinctrl->pctl);
}
return 0;
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
index a8b37a9a8..35783db1c 100644
--- a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
@@ -458,13 +458,15 @@ static int nsp_gpio_get_strength(struct nsp_gpio *chip, unsigned gpio,
return 0;
}
-int nsp_pin_config_group_get(struct pinctrl_dev *pctldev, unsigned selector,
+static int nsp_pin_config_group_get(struct pinctrl_dev *pctldev,
+ unsigned selector,
unsigned long *config)
{
return 0;
}
-int nsp_pin_config_group_set(struct pinctrl_dev *pctldev, unsigned selector,
+static int nsp_pin_config_group_set(struct pinctrl_dev *pctldev,
+ unsigned selector,
unsigned long *configs, unsigned num_configs)
{
return 0;
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-mux.c b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
new file mode 100644
index 000000000..4149db309
--- /dev/null
+++ b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
@@ -0,0 +1,642 @@
+/* Copyright (C) 2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file contains the Northstar plus (NSP) IOMUX driver that supports
+ * group based PINMUX configuration. The Northstar plus IOMUX controller
+ * allows pins to be individually muxed to GPIO function. The NAND and MMC is
+ * a group based selection. The gpio_a 8 - 11 are muxed with gpio_b and pwm.
+ * To select PWM, one need to enable the corresponding gpio_b as well.
+ *
+ * gpio_a (8 - 11)
+ * +----------
+ * |
+ * gpio_a (8-11) | gpio_b (0 - 3)
+ * ------------------------+-------+----------
+ * |
+ * | pwm (0 - 3)
+ * +----------
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "../core.h"
+#include "../pinctrl-utils.h"
+
+#define NSP_MUX_BASE0 0x00
+#define NSP_MUX_BASE1 0x01
+#define NSP_MUX_BASE2 0x02
+/*
+ * nsp IOMUX register description
+ *
+ * @base: base 0 or base 1
+ * @shift: bit shift for mux configuration of a group
+ * @mask: bit mask of the function
+ * @alt: alternate function to set to
+ */
+struct nsp_mux {
+ unsigned int base;
+ unsigned int shift;
+ unsigned int mask;
+ unsigned int alt;
+};
+
+/*
+ * Keep track of nsp IOMUX configuration and prevent double configuration
+ *
+ * @nsp_mux: nsp IOMUX register description
+ * @is_configured: flag to indicate whether a mux setting has already been
+ * configured
+ */
+struct nsp_mux_log {
+ struct nsp_mux mux;
+ bool is_configured;
+};
+
+/*
+ * Group based IOMUX configuration
+ *
+ * @name: name of the group
+ * @pins: array of pins used by this group
+ * @num_pins: total number of pins used by this group
+ * @mux: nsp group based IOMUX configuration
+ */
+struct nsp_pin_group {
+ const char *name;
+ const unsigned int *pins;
+ const unsigned int num_pins;
+ const struct nsp_mux mux;
+};
+
+/*
+ * nsp mux function and supported pin groups
+ *
+ * @name: name of the function
+ * @groups: array of groups that can be supported by this function
+ * @num_groups: total number of groups that can be supported by this function
+ */
+struct nsp_pin_function {
+ const char *name;
+ const char * const *groups;
+ const unsigned int num_groups;
+};
+
+/*
+ * nsp IOMUX pinctrl core
+ *
+ * @pctl: pointer to pinctrl_dev
+ * @dev: pointer to device
+ * @base0: first mux register
+ * @base1: second mux register
+ * @base2: third mux register
+ * @groups: pointer to array of groups
+ * @num_groups: total number of groups
+ * @functions: pointer to array of functions
+ * @num_functions: total number of functions
+ * @mux_log: pointer to the array of mux logs
+ * @lock: lock to protect register access
+ */
+struct nsp_pinctrl {
+ struct pinctrl_dev *pctl;
+ struct device *dev;
+ void __iomem *base0;
+ void __iomem *base1;
+ void __iomem *base2;
+ const struct nsp_pin_group *groups;
+ unsigned int num_groups;
+ const struct nsp_pin_function *functions;
+ unsigned int num_functions;
+ struct nsp_mux_log *mux_log;
+ spinlock_t lock;
+};
+
+/*
+ * Description of a pin in nsp
+ *
+ * @pin: pin number
+ * @name: pin name
+ * @gpio_select: reg data to select GPIO
+ */
+struct nsp_pin {
+ unsigned int pin;
+ char *name;
+ unsigned int gpio_select;
+};
+
+#define NSP_PIN_DESC(p, n, g) \
+{ \
+ .pin = p, \
+ .name = n, \
+ .gpio_select = g, \
+}
+
+/*
+ * List of muxable pins in nsp
+ */
+static struct nsp_pin nsp_pins[] = {
+ NSP_PIN_DESC(0, "spi_clk", 1),
+ NSP_PIN_DESC(1, "spi_ss", 1),
+ NSP_PIN_DESC(2, "spi_mosi", 1),
+ NSP_PIN_DESC(3, "spi_miso", 1),
+ NSP_PIN_DESC(4, "scl", 1),
+ NSP_PIN_DESC(5, "sda", 1),
+ NSP_PIN_DESC(6, "mdc", 1),
+ NSP_PIN_DESC(7, "mdio", 1),
+ NSP_PIN_DESC(8, "pwm0", 1),
+ NSP_PIN_DESC(9, "pwm1", 1),
+ NSP_PIN_DESC(10, "pwm2", 1),
+ NSP_PIN_DESC(11, "pwm3", 1),
+ NSP_PIN_DESC(12, "uart1_rx", 1),
+ NSP_PIN_DESC(13, "uart1_tx", 1),
+ NSP_PIN_DESC(14, "uart1_cts", 1),
+ NSP_PIN_DESC(15, "uart1_rts", 1),
+ NSP_PIN_DESC(16, "uart2_rx", 1),
+ NSP_PIN_DESC(17, "uart2_tx", 1),
+ NSP_PIN_DESC(18, "synce", 0),
+ NSP_PIN_DESC(19, "sata0_led", 0),
+ NSP_PIN_DESC(20, "sata1_led", 0),
+ NSP_PIN_DESC(21, "xtal_out", 1),
+ NSP_PIN_DESC(22, "sdio_pwr", 1),
+ NSP_PIN_DESC(23, "sdio_en_1p8v", 1),
+ NSP_PIN_DESC(24, "gpio_24", 1),
+ NSP_PIN_DESC(25, "gpio_25", 1),
+ NSP_PIN_DESC(26, "p5_led0", 0),
+ NSP_PIN_DESC(27, "p5_led1", 0),
+ NSP_PIN_DESC(28, "gpio_28", 1),
+ NSP_PIN_DESC(29, "gpio_29", 1),
+ NSP_PIN_DESC(30, "gpio_30", 1),
+ NSP_PIN_DESC(31, "gpio_31", 1),
+ NSP_PIN_DESC(32, "nand_ale", 0),
+ NSP_PIN_DESC(33, "nand_ce0", 0),
+ NSP_PIN_DESC(34, "nand_r/b", 0),
+ NSP_PIN_DESC(35, "nand_dq0", 0),
+ NSP_PIN_DESC(36, "nand_dq1", 0),
+ NSP_PIN_DESC(37, "nand_dq2", 0),
+ NSP_PIN_DESC(38, "nand_dq3", 0),
+ NSP_PIN_DESC(39, "nand_dq4", 0),
+ NSP_PIN_DESC(40, "nand_dq5", 0),
+ NSP_PIN_DESC(41, "nand_dq6", 0),
+ NSP_PIN_DESC(42, "nand_dq7", 0),
+};
+
+/*
+ * List of groups of pins
+ */
+
+static const unsigned int spi_pins[] = {0, 1, 2, 3};
+static const unsigned int i2c_pins[] = {4, 5};
+static const unsigned int mdio_pins[] = {6, 7};
+static const unsigned int pwm0_pins[] = {8};
+static const unsigned int gpio_b_0_pins[] = {8};
+static const unsigned int pwm1_pins[] = {9};
+static const unsigned int gpio_b_1_pins[] = {9};
+static const unsigned int pwm2_pins[] = {10};
+static const unsigned int gpio_b_2_pins[] = {10};
+static const unsigned int pwm3_pins[] = {11};
+static const unsigned int gpio_b_3_pins[] = {11};
+static const unsigned int uart1_pins[] = {12, 13, 14, 15};
+static const unsigned int uart2_pins[] = {16, 17};
+static const unsigned int synce_pins[] = {18};
+static const unsigned int sata0_led_pins[] = {19};
+static const unsigned int sata1_led_pins[] = {20};
+static const unsigned int xtal_out_pins[] = {21};
+static const unsigned int sdio_pwr_pins[] = {22};
+static const unsigned int sdio_1p8v_pins[] = {23};
+static const unsigned int switch_p05_led0_pins[] = {26};
+static const unsigned int switch_p05_led1_pins[] = {27};
+static const unsigned int nand_pins[] = {32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42};
+static const unsigned int emmc_pins[] = {32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42};
+
+#define NSP_PIN_GROUP(group_name, ba, sh, ma, al) \
+{ \
+ .name = __stringify(group_name) "_grp", \
+ .pins = group_name ## _pins, \
+ .num_pins = ARRAY_SIZE(group_name ## _pins), \
+ .mux = { \
+ .base = ba, \
+ .shift = sh, \
+ .mask = ma, \
+ .alt = al, \
+ } \
+}
+
+/*
+ * List of nsp pin groups
+ */
+static const struct nsp_pin_group nsp_pin_groups[] = {
+ NSP_PIN_GROUP(spi, NSP_MUX_BASE0, 0, 0x0f, 0x00),
+ NSP_PIN_GROUP(i2c, NSP_MUX_BASE0, 3, 0x03, 0x00),
+ NSP_PIN_GROUP(mdio, NSP_MUX_BASE0, 5, 0x03, 0x00),
+ NSP_PIN_GROUP(gpio_b_0, NSP_MUX_BASE0, 7, 0x01, 0x00),
+ NSP_PIN_GROUP(pwm0, NSP_MUX_BASE1, 0, 0x01, 0x01),
+ NSP_PIN_GROUP(gpio_b_1, NSP_MUX_BASE0, 8, 0x01, 0x00),
+ NSP_PIN_GROUP(pwm1, NSP_MUX_BASE1, 1, 0x01, 0x01),
+ NSP_PIN_GROUP(gpio_b_2, NSP_MUX_BASE0, 9, 0x01, 0x00),
+ NSP_PIN_GROUP(pwm2, NSP_MUX_BASE1, 2, 0x01, 0x01),
+ NSP_PIN_GROUP(gpio_b_3, NSP_MUX_BASE0, 10, 0x01, 0x00),
+ NSP_PIN_GROUP(pwm3, NSP_MUX_BASE1, 3, 0x01, 0x01),
+ NSP_PIN_GROUP(uart1, NSP_MUX_BASE0, 11, 0x0f, 0x00),
+ NSP_PIN_GROUP(uart2, NSP_MUX_BASE0, 15, 0x03, 0x00),
+ NSP_PIN_GROUP(synce, NSP_MUX_BASE0, 17, 0x01, 0x01),
+ NSP_PIN_GROUP(sata0_led, NSP_MUX_BASE0, 18, 0x01, 0x01),
+ NSP_PIN_GROUP(sata1_led, NSP_MUX_BASE0, 19, 0x01, 0x01),
+ NSP_PIN_GROUP(xtal_out, NSP_MUX_BASE0, 20, 0x01, 0x00),
+ NSP_PIN_GROUP(sdio_pwr, NSP_MUX_BASE0, 21, 0x01, 0x00),
+ NSP_PIN_GROUP(sdio_1p8v, NSP_MUX_BASE0, 22, 0x01, 0x00),
+ NSP_PIN_GROUP(switch_p05_led0, NSP_MUX_BASE0, 26, 0x01, 0x01),
+ NSP_PIN_GROUP(switch_p05_led1, NSP_MUX_BASE0, 27, 0x01, 0x01),
+ NSP_PIN_GROUP(nand, NSP_MUX_BASE2, 0, 0x01, 0x00),
+ NSP_PIN_GROUP(emmc, NSP_MUX_BASE2, 0, 0x01, 0x01)
+};
+
+/*
+ * List of groups supported by functions
+ */
+
+static const char * const spi_grps[] = {"spi_grp"};
+static const char * const i2c_grps[] = {"i2c_grp"};
+static const char * const mdio_grps[] = {"mdio_grp"};
+static const char * const pwm_grps[] = {"pwm0_grp", "pwm1_grp", "pwm2_grp"
+ , "pwm3_grp"};
+static const char * const gpio_b_grps[] = {"gpio_b_0_grp", "gpio_b_1_grp",
+ "gpio_b_2_grp", "gpio_b_3_grp"};
+static const char * const uart1_grps[] = {"uart1_grp"};
+static const char * const uart2_grps[] = {"uart2_grp"};
+static const char * const synce_grps[] = {"synce_grp"};
+static const char * const sata_led_grps[] = {"sata0_led_grp", "sata1_led_grp"};
+static const char * const xtal_out_grps[] = {"xtal_out_grp"};
+static const char * const sdio_grps[] = {"sdio_pwr_grp", "sdio_1p8v_grp"};
+static const char * const switch_led_grps[] = {"switch_p05_led0_grp",
+ "switch_p05_led1_grp"};
+static const char * const nand_grps[] = {"nand_grp"};
+static const char * const emmc_grps[] = {"emmc_grp"};
+
+#define NSP_PIN_FUNCTION(func) \
+{ \
+ .name = #func, \
+ .groups = func ## _grps, \
+ .num_groups = ARRAY_SIZE(func ## _grps), \
+}
+
+/*
+ * List of supported functions in nsp
+ */
+static const struct nsp_pin_function nsp_pin_functions[] = {
+ NSP_PIN_FUNCTION(spi),
+ NSP_PIN_FUNCTION(i2c),
+ NSP_PIN_FUNCTION(mdio),
+ NSP_PIN_FUNCTION(pwm),
+ NSP_PIN_FUNCTION(gpio_b),
+ NSP_PIN_FUNCTION(uart1),
+ NSP_PIN_FUNCTION(uart2),
+ NSP_PIN_FUNCTION(synce),
+ NSP_PIN_FUNCTION(sata_led),
+ NSP_PIN_FUNCTION(xtal_out),
+ NSP_PIN_FUNCTION(sdio),
+ NSP_PIN_FUNCTION(switch_led),
+ NSP_PIN_FUNCTION(nand),
+ NSP_PIN_FUNCTION(emmc)
+};
+
+static int nsp_get_groups_count(struct pinctrl_dev *pctrl_dev)
+{
+ struct nsp_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
+
+ return pinctrl->num_groups;
+}
+
+static const char *nsp_get_group_name(struct pinctrl_dev *pctrl_dev,
+ unsigned int selector)
+{
+ struct nsp_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
+
+ return pinctrl->groups[selector].name;
+}
+
+static int nsp_get_group_pins(struct pinctrl_dev *pctrl_dev,
+ unsigned int selector, const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ struct nsp_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
+
+ *pins = pinctrl->groups[selector].pins;
+ *num_pins = pinctrl->groups[selector].num_pins;
+
+ return 0;
+}
+
+static void nsp_pin_dbg_show(struct pinctrl_dev *pctrl_dev,
+ struct seq_file *s, unsigned int offset)
+{
+ seq_printf(s, " %s", dev_name(pctrl_dev->dev));
+}
+
+static struct pinctrl_ops nsp_pinctrl_ops = {
+ .get_groups_count = nsp_get_groups_count,
+ .get_group_name = nsp_get_group_name,
+ .get_group_pins = nsp_get_group_pins,
+ .pin_dbg_show = nsp_pin_dbg_show,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_group,
+ .dt_free_map = pinctrl_utils_free_map,
+};
+
+static int nsp_get_functions_count(struct pinctrl_dev *pctrl_dev)
+{
+ struct nsp_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
+
+ return pinctrl->num_functions;
+}
+
+static const char *nsp_get_function_name(struct pinctrl_dev *pctrl_dev,
+ unsigned int selector)
+{
+ struct nsp_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
+
+ return pinctrl->functions[selector].name;
+}
+
+static int nsp_get_function_groups(struct pinctrl_dev *pctrl_dev,
+ unsigned int selector,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ struct nsp_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
+
+ *groups = pinctrl->functions[selector].groups;
+ *num_groups = pinctrl->functions[selector].num_groups;
+
+ return 0;
+}
+
+static int nsp_pinmux_set(struct nsp_pinctrl *pinctrl,
+ const struct nsp_pin_function *func,
+ const struct nsp_pin_group *grp,
+ struct nsp_mux_log *mux_log)
+{
+ const struct nsp_mux *mux = &grp->mux;
+ int i;
+ u32 val, mask;
+ unsigned long flags;
+ void __iomem *base_address;
+
+ for (i = 0; i < pinctrl->num_groups; i++) {
+ if ((mux->shift != mux_log[i].mux.shift) ||
+ (mux->base != mux_log[i].mux.base))
+ continue;
+
+ /* if this is a new configuration, just do it! */
+ if (!mux_log[i].is_configured)
+ break;
+
+ /*
+ * IOMUX has been configured previously and one is trying to
+ * configure it to a different function
+ */
+ if (mux_log[i].mux.alt != mux->alt) {
+ dev_err(pinctrl->dev,
+ "double configuration error detected!\n");
+ dev_err(pinctrl->dev, "func:%s grp:%s\n",
+ func->name, grp->name);
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+ if (i == pinctrl->num_groups)
+ return -EINVAL;
+
+ mask = mux->mask;
+ mux_log[i].mux.alt = mux->alt;
+ mux_log[i].is_configured = true;
+
+ switch (mux->base) {
+ case NSP_MUX_BASE0:
+ base_address = pinctrl->base0;
+ break;
+
+ case NSP_MUX_BASE1:
+ base_address = pinctrl->base1;
+ break;
+
+ case NSP_MUX_BASE2:
+ base_address = pinctrl->base2;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&pinctrl->lock, flags);
+ val = readl(base_address);
+ val &= ~(mask << grp->mux.shift);
+ val |= grp->mux.alt << grp->mux.shift;
+ writel(val, base_address);
+ spin_unlock_irqrestore(&pinctrl->lock, flags);
+
+ return 0;
+}
+
+static int nsp_pinmux_enable(struct pinctrl_dev *pctrl_dev,
+ unsigned int func_select, unsigned int grp_select)
+{
+ struct nsp_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
+ const struct nsp_pin_function *func;
+ const struct nsp_pin_group *grp;
+
+ if (grp_select > pinctrl->num_groups ||
+ func_select > pinctrl->num_functions)
+ return -EINVAL;
+
+ func = &pinctrl->functions[func_select];
+ grp = &pinctrl->groups[grp_select];
+
+ dev_dbg(pctrl_dev->dev, "func:%u name:%s grp:%u name:%s\n",
+ func_select, func->name, grp_select, grp->name);
+
+ dev_dbg(pctrl_dev->dev, "shift:%u alt:%u\n", grp->mux.shift,
+ grp->mux.alt);
+
+ return nsp_pinmux_set(pinctrl, func, grp, pinctrl->mux_log);
+}
+
+
+static int nsp_gpio_request_enable(struct pinctrl_dev *pctrl_dev,
+ struct pinctrl_gpio_range *range,
+ unsigned int pin)
+{
+ struct nsp_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
+ u32 *gpio_select = pctrl_dev->desc->pins[pin].drv_data;
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pinctrl->lock, flags);
+ val = readl(pinctrl->base0);
+ if ((val & BIT(pin)) != (*gpio_select << pin)) {
+ val &= ~BIT(pin);
+ val |= *gpio_select << pin;
+ writel(val, pinctrl->base0);
+ }
+ spin_unlock_irqrestore(&pinctrl->lock, flags);
+
+ return 0;
+}
+
+static void nsp_gpio_disable_free(struct pinctrl_dev *pctrl_dev,
+ struct pinctrl_gpio_range *range,
+ unsigned int pin)
+{
+ struct nsp_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
+ u32 *gpio_select = pctrl_dev->desc->pins[pin].drv_data;
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pinctrl->lock, flags);
+ val = readl(pinctrl->base0);
+ if ((val & (1 << pin)) == (*gpio_select << pin)) {
+ val &= ~(1 << pin);
+ if (!(*gpio_select))
+ val |= (1 << pin);
+ writel(val, pinctrl->base0);
+ }
+ spin_unlock_irqrestore(&pinctrl->lock, flags);
+}
+
+static struct pinmux_ops nsp_pinmux_ops = {
+ .get_functions_count = nsp_get_functions_count,
+ .get_function_name = nsp_get_function_name,
+ .get_function_groups = nsp_get_function_groups,
+ .set_mux = nsp_pinmux_enable,
+ .gpio_request_enable = nsp_gpio_request_enable,
+ .gpio_disable_free = nsp_gpio_disable_free,
+};
+
+static struct pinctrl_desc nsp_pinctrl_desc = {
+ .name = "nsp-pinmux",
+ .pctlops = &nsp_pinctrl_ops,
+ .pmxops = &nsp_pinmux_ops,
+};
+
+static int nsp_mux_log_init(struct nsp_pinctrl *pinctrl)
+{
+ struct nsp_mux_log *log;
+ unsigned int i;
+ u32 no_of_groups = ARRAY_SIZE(nsp_pin_groups);
+
+ pinctrl->mux_log = devm_kcalloc(pinctrl->dev, no_of_groups,
+ sizeof(struct nsp_mux_log),
+ GFP_KERNEL);
+ if (!pinctrl->mux_log)
+ return -ENOMEM;
+
+ for (i = 0; i < no_of_groups; i++) {
+ log = &pinctrl->mux_log[i];
+ log->mux.base = nsp_pin_groups[i].mux.base;
+ log->mux.shift = nsp_pin_groups[i].mux.shift;
+ log->mux.alt = 0;
+ log->is_configured = false;
+ }
+
+ return 0;
+}
+
+static int nsp_pinmux_probe(struct platform_device *pdev)
+{
+ struct nsp_pinctrl *pinctrl;
+ struct resource *res;
+ int i, ret;
+ struct pinctrl_pin_desc *pins;
+ unsigned int num_pins = ARRAY_SIZE(nsp_pins);
+
+ pinctrl = devm_kzalloc(&pdev->dev, sizeof(*pinctrl), GFP_KERNEL);
+ if (!pinctrl)
+ return -ENOMEM;
+ pinctrl->dev = &pdev->dev;
+ platform_set_drvdata(pdev, pinctrl);
+ spin_lock_init(&pinctrl->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pinctrl->base0 = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pinctrl->base0))
+ return PTR_ERR(pinctrl->base0);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ pinctrl->base1 = devm_ioremap_nocache(&pdev->dev, res->start,
+ resource_size(res));
+ if (!pinctrl->base1) {
+ dev_err(&pdev->dev, "unable to map I/O space\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ pinctrl->base2 = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pinctrl->base2))
+ return PTR_ERR(pinctrl->base2);
+
+ ret = nsp_mux_log_init(pinctrl);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to initialize IOMUX log\n");
+ return ret;
+ }
+
+ pins = devm_kcalloc(&pdev->dev, num_pins, sizeof(*pins), GFP_KERNEL);
+ if (!pins)
+ return -ENOMEM;
+
+ for (i = 0; i < num_pins; i++) {
+ pins[i].number = nsp_pins[i].pin;
+ pins[i].name = nsp_pins[i].name;
+ pins[i].drv_data = &nsp_pins[i].gpio_select;
+ }
+
+ pinctrl->groups = nsp_pin_groups;
+ pinctrl->num_groups = ARRAY_SIZE(nsp_pin_groups);
+ pinctrl->functions = nsp_pin_functions;
+ pinctrl->num_functions = ARRAY_SIZE(nsp_pin_functions);
+ nsp_pinctrl_desc.pins = pins;
+ nsp_pinctrl_desc.npins = num_pins;
+
+ pinctrl->pctl = devm_pinctrl_register(&pdev->dev, &nsp_pinctrl_desc,
+ pinctrl);
+ if (IS_ERR(pinctrl->pctl)) {
+ dev_err(&pdev->dev, "unable to register nsp IOMUX pinctrl\n");
+ return PTR_ERR(pinctrl->pctl);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id nsp_pinmux_of_match[] = {
+ { .compatible = "brcm,nsp-pinmux" },
+ { }
+};
+
+static struct platform_driver nsp_pinmux_driver = {
+ .driver = {
+ .name = "nsp-pinmux",
+ .of_match_table = nsp_pinmux_of_match,
+ },
+ .probe = nsp_pinmux_probe,
+};
+
+static int __init nsp_pinmux_init(void)
+{
+ return platform_driver_register(&nsp_pinmux_driver);
+}
+arch_initcall(nsp_pinmux_init);
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 98d2a1bb4..fb38e208f 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -225,13 +225,14 @@ static void pinctrl_free_pindescs(struct pinctrl_dev *pctldev,
}
static int pinctrl_register_one_pin(struct pinctrl_dev *pctldev,
- unsigned number, const char *name)
+ const struct pinctrl_pin_desc *pin)
{
struct pin_desc *pindesc;
- pindesc = pin_desc_get(pctldev, number);
+ pindesc = pin_desc_get(pctldev, pin->number);
if (pindesc != NULL) {
- dev_err(pctldev->dev, "pin %d already registered\n", number);
+ dev_err(pctldev->dev, "pin %d already registered\n",
+ pin->number);
return -EINVAL;
}
@@ -245,10 +246,10 @@ static int pinctrl_register_one_pin(struct pinctrl_dev *pctldev,
pindesc->pctldev = pctldev;
/* Copy basic pin info */
- if (name) {
- pindesc->name = name;
+ if (pin->name) {
+ pindesc->name = pin->name;
} else {
- pindesc->name = kasprintf(GFP_KERNEL, "PIN%u", number);
+ pindesc->name = kasprintf(GFP_KERNEL, "PIN%u", pin->number);
if (pindesc->name == NULL) {
kfree(pindesc);
return -ENOMEM;
@@ -256,9 +257,11 @@ static int pinctrl_register_one_pin(struct pinctrl_dev *pctldev,
pindesc->dynamic_name = true;
}
- radix_tree_insert(&pctldev->pin_desc_tree, number, pindesc);
+ pindesc->drv_data = pin->drv_data;
+
+ radix_tree_insert(&pctldev->pin_desc_tree, pin->number, pindesc);
pr_debug("registered pin %d (%s) on %s\n",
- number, pindesc->name, pctldev->desc->name);
+ pin->number, pindesc->name, pctldev->desc->name);
return 0;
}
@@ -270,8 +273,7 @@ static int pinctrl_register_pins(struct pinctrl_dev *pctldev,
int ret = 0;
for (i = 0; i < num_descs; i++) {
- ret = pinctrl_register_one_pin(pctldev,
- pins[i].number, pins[i].name);
+ ret = pinctrl_register_one_pin(pctldev, &pins[i]);
if (ret)
return ret;
}
@@ -1367,8 +1369,7 @@ static int pinctrl_pins_show(struct seq_file *s, void *what)
if (desc == NULL)
continue;
- seq_printf(s, "pin %d (%s) ", pin,
- desc->name ? desc->name : "unnamed");
+ seq_printf(s, "pin %d (%s) ", pin, desc->name);
/* Driver-specific info per pin */
if (ops->pin_dbg_show)
diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h
index ca08723b9..747c423c1 100644
--- a/drivers/pinctrl/core.h
+++ b/drivers/pinctrl/core.h
@@ -134,6 +134,7 @@ struct pinctrl_setting {
* @name: a name for the pin, e.g. the name of the pin/pad/finger on a
* datasheet or such
* @dynamic_name: if the name of this pin was dynamically allocated
+ * @drv_data: driver-defined per-pin data. pinctrl core does not touch this
* @mux_usecount: If zero, the pin is not claimed, and @owner should be NULL.
* If non-zero, this pin is claimed by @owner. This field is an integer
* rather than a boolean, since pinctrl_get() might process multiple
@@ -148,6 +149,7 @@ struct pin_desc {
struct pinctrl_dev *pctldev;
const char *name;
bool dynamic_name;
+ void *drv_data;
/* These fields only added when supporting pinmux drivers */
#ifdef CONFIG_PINMUX
unsigned mux_usecount;
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index fe04e748d..54dad89fc 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -195,8 +195,13 @@ int pinctrl_dt_to_map(struct pinctrl *p)
propname = kasprintf(GFP_KERNEL, "pinctrl-%d", state);
prop = of_find_property(np, propname, &size);
kfree(propname);
- if (!prop)
+ if (!prop) {
+ if (state == 0) {
+ of_node_put(np);
+ return -ENODEV;
+ }
break;
+ }
list = prop->value;
size /= sizeof(*list);
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index eccb47480..713917579 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -16,7 +16,6 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
-#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_address.h>
@@ -46,7 +45,7 @@ struct imx_pinctrl {
const struct imx_pinctrl_soc_info *info;
};
-static const inline struct imx_pin_group *imx_pinctrl_find_group_by_name(
+static inline const struct imx_pin_group *imx_pinctrl_find_group_by_name(
const struct imx_pinctrl_soc_info *info,
const char *name)
{
@@ -513,13 +512,6 @@ static const struct pinconf_ops imx_pinconf_ops = {
.pin_config_group_dbg_show = imx_pinconf_group_dbg_show,
};
-static struct pinctrl_desc imx_pinctrl_desc = {
- .pctlops = &imx_pctrl_ops,
- .pmxops = &imx_pmx_ops,
- .confops = &imx_pinconf_ops,
- .owner = THIS_MODULE,
-};
-
/*
* Each pin represented in fsl,pins consists of 5 u32 PIN_FUNC_ID and
* 1 u32 CONFIG, so 24 types in total for each pin.
@@ -722,6 +714,7 @@ int imx_pinctrl_probe(struct platform_device *pdev,
{
struct regmap_config config = { .name = "gpr" };
struct device_node *dev_np = pdev->dev.of_node;
+ struct pinctrl_desc *imx_pinctrl_desc;
struct device_node *np;
struct imx_pinctrl *ipctl;
struct resource *res;
@@ -776,9 +769,18 @@ int imx_pinctrl_probe(struct platform_device *pdev,
}
}
- imx_pinctrl_desc.name = dev_name(&pdev->dev);
- imx_pinctrl_desc.pins = info->pins;
- imx_pinctrl_desc.npins = info->npins;
+ imx_pinctrl_desc = devm_kzalloc(&pdev->dev, sizeof(*imx_pinctrl_desc),
+ GFP_KERNEL);
+ if (!imx_pinctrl_desc)
+ return -ENOMEM;
+
+ imx_pinctrl_desc->name = dev_name(&pdev->dev);
+ imx_pinctrl_desc->pins = info->pins;
+ imx_pinctrl_desc->npins = info->npins;
+ imx_pinctrl_desc->pctlops = &imx_pctrl_ops,
+ imx_pinctrl_desc->pmxops = &imx_pmx_ops,
+ imx_pinctrl_desc->confops = &imx_pinconf_ops,
+ imx_pinctrl_desc->owner = THIS_MODULE,
ret = imx_pinctrl_probe_dt(pdev, info);
if (ret) {
@@ -789,7 +791,8 @@ int imx_pinctrl_probe(struct platform_device *pdev,
ipctl->info = info;
ipctl->dev = info->dev;
platform_set_drvdata(pdev, ipctl);
- ipctl->pctl = devm_pinctrl_register(&pdev->dev, &imx_pinctrl_desc, ipctl);
+ ipctl->pctl = devm_pinctrl_register(&pdev->dev,
+ imx_pinctrl_desc, ipctl);
if (IS_ERR(ipctl->pctl)) {
dev_err(&pdev->dev, "could not register IMX pinctrl driver\n");
return PTR_ERR(ipctl->pctl);
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
index b4400cb19..a4e9f430d 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
@@ -19,7 +19,6 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/machine.h>
@@ -157,7 +156,7 @@ static int imx1_read_bit(struct imx1_pinctrl *ipctl, unsigned int pin_id,
return !!(readl(reg) & BIT(offset));
}
-static const inline struct imx1_pin_group *imx1_pinctrl_find_group_by_name(
+static inline const struct imx1_pin_group *imx1_pinctrl_find_group_by_name(
const struct imx1_pinctrl_soc_info *info,
const char *name)
{
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1.c b/drivers/pinctrl/freescale/pinctrl-imx1.c
index 04723455d..fc8efc748 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx1.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx1.c
@@ -9,7 +9,7 @@
* (at your option) any later version.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pinctrl/pinctrl.h>
@@ -262,7 +262,6 @@ static const struct of_device_id imx1_pinctrl_of_match[] = {
{ .compatible = "fsl,imx1-iomuxc", },
{ }
};
-MODULE_DEVICE_TABLE(of, imx1_pinctrl_of_match);
static struct platform_driver imx1_pinctrl_driver = {
.driver = {
@@ -270,8 +269,4 @@ static struct platform_driver imx1_pinctrl_driver = {
.of_match_table = imx1_pinctrl_of_match,
},
};
-module_platform_driver_probe(imx1_pinctrl_driver, imx1_pinctrl_probe);
-
-MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>");
-MODULE_DESCRIPTION("Freescale i.MX1 pinctrl driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver_probe(imx1_pinctrl_driver, imx1_pinctrl_probe);
diff --git a/drivers/pinctrl/freescale/pinctrl-imx21.c b/drivers/pinctrl/freescale/pinctrl-imx21.c
index aa1221f4d..73e26bc12 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx21.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx21.c
@@ -9,7 +9,7 @@
* (at your option) any later version.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pinctrl/pinctrl.h>
@@ -325,7 +325,6 @@ static const struct of_device_id imx21_pinctrl_of_match[] = {
{ .compatible = "fsl,imx21-iomuxc", },
{ }
};
-MODULE_DEVICE_TABLE(of, imx21_pinctrl_of_match);
static struct platform_driver imx21_pinctrl_driver = {
.driver = {
@@ -333,8 +332,4 @@ static struct platform_driver imx21_pinctrl_driver = {
.of_match_table = imx21_pinctrl_of_match,
},
};
-module_platform_driver_probe(imx21_pinctrl_driver, imx21_pinctrl_probe);
-
-MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>");
-MODULE_DESCRIPTION("Freescale i.MX21 pinctrl driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver_probe(imx21_pinctrl_driver, imx21_pinctrl_probe);
diff --git a/drivers/pinctrl/freescale/pinctrl-imx23.c b/drivers/pinctrl/freescale/pinctrl-imx23.c
index 955cbf4f0..89b4f1601 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx23.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx23.c
@@ -1,4 +1,7 @@
/*
+ * Freescale i.MX23 pinctrl driver
+ *
+ * Author: Shawn Guo <shawn.guo@linaro.org>
* Copyright 2012 Freescale Semiconductor, Inc.
*
* The code contained herein is licensed under the GNU General Public
@@ -10,7 +13,6 @@
*/
#include <linux/init.h>
-#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
#include "pinctrl-mxs.h"
@@ -276,15 +278,14 @@ static const struct of_device_id imx23_pinctrl_of_match[] = {
{ .compatible = "fsl,imx23-pinctrl", },
{ /* sentinel */ }
};
-MODULE_DEVICE_TABLE(of, imx23_pinctrl_of_match);
static struct platform_driver imx23_pinctrl_driver = {
.driver = {
.name = "imx23-pinctrl",
+ .suppress_bind_attrs = true,
.of_match_table = imx23_pinctrl_of_match,
},
.probe = imx23_pinctrl_probe,
- .remove = mxs_pinctrl_remove,
};
static int __init imx23_pinctrl_init(void)
@@ -292,13 +293,3 @@ static int __init imx23_pinctrl_init(void)
return platform_driver_register(&imx23_pinctrl_driver);
}
postcore_initcall(imx23_pinctrl_init);
-
-static void __exit imx23_pinctrl_exit(void)
-{
- platform_driver_unregister(&imx23_pinctrl_driver);
-}
-module_exit(imx23_pinctrl_exit);
-
-MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
-MODULE_DESCRIPTION("Freescale i.MX23 pinctrl driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/freescale/pinctrl-imx25.c b/drivers/pinctrl/freescale/pinctrl-imx25.c
index 81ad546d7..d7367fabe 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx25.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx25.c
@@ -18,7 +18,6 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
@@ -338,12 +337,3 @@ static int __init imx25_pinctrl_init(void)
return platform_driver_register(&imx25_pinctrl_driver);
}
arch_initcall(imx25_pinctrl_init);
-
-static void __exit imx25_pinctrl_exit(void)
-{
- platform_driver_unregister(&imx25_pinctrl_driver);
-}
-module_exit(imx25_pinctrl_exit);
-MODULE_AUTHOR("Denis Carikli <denis@eukrea.com>");
-MODULE_DESCRIPTION("Freescale IMX25 pinctrl driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/freescale/pinctrl-imx27.c b/drivers/pinctrl/freescale/pinctrl-imx27.c
index f828fbbba..e5992036f 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx27.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx27.c
@@ -14,7 +14,6 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
@@ -412,12 +411,3 @@ static int __init imx27_pinctrl_init(void)
return platform_driver_register(&imx27_pinctrl_driver);
}
arch_initcall(imx27_pinctrl_init);
-
-static void __exit imx27_pinctrl_exit(void)
-{
- platform_driver_unregister(&imx27_pinctrl_driver);
-}
-module_exit(imx27_pinctrl_exit);
-MODULE_AUTHOR("Markus Pargmann <mpa@pengutronix.de>");
-MODULE_DESCRIPTION("Freescale IMX27 pinctrl driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/freescale/pinctrl-imx28.c b/drivers/pinctrl/freescale/pinctrl-imx28.c
index 5082efec4..295236dfb 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx28.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx28.c
@@ -1,4 +1,7 @@
/*
+ * Freescale i.MX28 pinctrl driver
+ *
+ * Author: Shawn Guo <shawn.guo@linaro.org>
* Copyright 2012 Freescale Semiconductor, Inc.
*
* The code contained herein is licensed under the GNU General Public
@@ -10,7 +13,6 @@
*/
#include <linux/init.h>
-#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
#include "pinctrl-mxs.h"
@@ -392,15 +394,14 @@ static const struct of_device_id imx28_pinctrl_of_match[] = {
{ .compatible = "fsl,imx28-pinctrl", },
{ /* sentinel */ }
};
-MODULE_DEVICE_TABLE(of, imx28_pinctrl_of_match);
static struct platform_driver imx28_pinctrl_driver = {
.driver = {
.name = "imx28-pinctrl",
+ .suppress_bind_attrs = true,
.of_match_table = imx28_pinctrl_of_match,
},
.probe = imx28_pinctrl_probe,
- .remove = mxs_pinctrl_remove,
};
static int __init imx28_pinctrl_init(void)
@@ -408,13 +409,3 @@ static int __init imx28_pinctrl_init(void)
return platform_driver_register(&imx28_pinctrl_driver);
}
postcore_initcall(imx28_pinctrl_init);
-
-static void __exit imx28_pinctrl_exit(void)
-{
- platform_driver_unregister(&imx28_pinctrl_driver);
-}
-module_exit(imx28_pinctrl_exit);
-
-MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
-MODULE_DESCRIPTION("Freescale i.MX28 pinctrl driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/freescale/pinctrl-imx35.c b/drivers/pinctrl/freescale/pinctrl-imx35.c
index 13eb224a2..6315ba6af 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx35.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx35.c
@@ -16,7 +16,6 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
@@ -1028,12 +1027,3 @@ static int __init imx35_pinctrl_init(void)
return platform_driver_register(&imx35_pinctrl_driver);
}
arch_initcall(imx35_pinctrl_init);
-
-static void __exit imx35_pinctrl_exit(void)
-{
- platform_driver_unregister(&imx35_pinctrl_driver);
-}
-module_exit(imx35_pinctrl_exit);
-MODULE_AUTHOR("Dong Aisheng <dong.aisheng@linaro.org>");
-MODULE_DESCRIPTION("Freescale IMX35 pinctrl driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/freescale/pinctrl-imx50.c b/drivers/pinctrl/freescale/pinctrl-imx50.c
index 95a36c88b..8e3a17df5 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx50.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx50.c
@@ -14,7 +14,6 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
@@ -415,11 +414,3 @@ static int __init imx50_pinctrl_init(void)
return platform_driver_register(&imx50_pinctrl_driver);
}
arch_initcall(imx50_pinctrl_init);
-
-static void __exit imx50_pinctrl_exit(void)
-{
- platform_driver_unregister(&imx50_pinctrl_driver);
-}
-module_exit(imx50_pinctrl_exit);
-MODULE_DESCRIPTION("Freescale IMX50 pinctrl driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/freescale/pinctrl-imx51.c b/drivers/pinctrl/freescale/pinctrl-imx51.c
index 0863e5279..eeac64ba2 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx51.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx51.c
@@ -15,7 +15,6 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
@@ -791,12 +790,3 @@ static int __init imx51_pinctrl_init(void)
return platform_driver_register(&imx51_pinctrl_driver);
}
arch_initcall(imx51_pinctrl_init);
-
-static void __exit imx51_pinctrl_exit(void)
-{
- platform_driver_unregister(&imx51_pinctrl_driver);
-}
-module_exit(imx51_pinctrl_exit);
-MODULE_AUTHOR("Dong Aisheng <dong.aisheng@linaro.org>");
-MODULE_DESCRIPTION("Freescale IMX51 pinctrl driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/freescale/pinctrl-imx53.c b/drivers/pinctrl/freescale/pinctrl-imx53.c
index 64c9cbe2a..46a9572f3 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx53.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx53.c
@@ -15,7 +15,6 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
@@ -478,12 +477,3 @@ static int __init imx53_pinctrl_init(void)
return platform_driver_register(&imx53_pinctrl_driver);
}
arch_initcall(imx53_pinctrl_init);
-
-static void __exit imx53_pinctrl_exit(void)
-{
- platform_driver_unregister(&imx53_pinctrl_driver);
-}
-module_exit(imx53_pinctrl_exit);
-MODULE_AUTHOR("Dong Aisheng <dong.aisheng@linaro.org>");
-MODULE_DESCRIPTION("Freescale IMX53 pinctrl driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6dl.c b/drivers/pinctrl/freescale/pinctrl-imx6dl.c
index de17bac8a..3f25ca586 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6dl.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6dl.c
@@ -1,4 +1,7 @@
/*
+ * Freescale imx6dl pinctrl driver
+ *
+ * Author: Shawn Guo <shawn.guo@linaro.org>
* Copyright (C) 2013 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify
@@ -9,7 +12,6 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
@@ -484,13 +486,3 @@ static int __init imx6dl_pinctrl_init(void)
return platform_driver_register(&imx6dl_pinctrl_driver);
}
arch_initcall(imx6dl_pinctrl_init);
-
-static void __exit imx6dl_pinctrl_exit(void)
-{
- platform_driver_unregister(&imx6dl_pinctrl_driver);
-}
-module_exit(imx6dl_pinctrl_exit);
-
-MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
-MODULE_DESCRIPTION("Freescale imx6dl pinctrl driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6q.c b/drivers/pinctrl/freescale/pinctrl-imx6q.c
index 55cd8a0e3..d61651c40 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6q.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6q.c
@@ -15,7 +15,6 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
@@ -490,12 +489,3 @@ static int __init imx6q_pinctrl_init(void)
return platform_driver_register(&imx6q_pinctrl_driver);
}
arch_initcall(imx6q_pinctrl_init);
-
-static void __exit imx6q_pinctrl_exit(void)
-{
- platform_driver_unregister(&imx6q_pinctrl_driver);
-}
-module_exit(imx6q_pinctrl_exit);
-MODULE_AUTHOR("Dong Aisheng <dong.aisheng@linaro.org>");
-MODULE_DESCRIPTION("Freescale IMX6Q pinctrl driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6sl.c b/drivers/pinctrl/freescale/pinctrl-imx6sl.c
index bf455b8e7..d023f6b00 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6sl.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6sl.c
@@ -1,4 +1,7 @@
/*
+ * Freescale imx6sl pinctrl driver
+ *
+ * Author: Shawn Guo <shawn.guo@linaro.org>
* Copyright (C) 2013 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify
@@ -9,7 +12,6 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
@@ -371,7 +373,6 @@ static const struct of_device_id imx6sl_pinctrl_of_match[] = {
{ .compatible = "fsl,imx6sl-iomuxc", },
{ /* sentinel */ }
};
-MODULE_DEVICE_TABLE(of, imx6sl_pinctrl_of_match);
static int imx6sl_pinctrl_probe(struct platform_device *pdev)
{
@@ -391,13 +392,3 @@ static int __init imx6sl_pinctrl_init(void)
return platform_driver_register(&imx6sl_pinctrl_driver);
}
arch_initcall(imx6sl_pinctrl_init);
-
-static void __exit imx6sl_pinctrl_exit(void)
-{
- platform_driver_unregister(&imx6sl_pinctrl_driver);
-}
-module_exit(imx6sl_pinctrl_exit);
-
-MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
-MODULE_DESCRIPTION("Freescale imx6sl pinctrl driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6sx.c b/drivers/pinctrl/freescale/pinctrl-imx6sx.c
index 84118c388..898b78170 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6sx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6sx.c
@@ -1,4 +1,7 @@
/*
+ * Freescale imx6sx pinctrl driver
+ *
+ * Author: Anson Huang <Anson.Huang@freescale.com>
* Copyright (C) 2014 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify
@@ -9,7 +12,6 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
@@ -394,13 +396,3 @@ static int __init imx6sx_pinctrl_init(void)
return platform_driver_register(&imx6sx_pinctrl_driver);
}
arch_initcall(imx6sx_pinctrl_init);
-
-static void __exit imx6sx_pinctrl_exit(void)
-{
- platform_driver_unregister(&imx6sx_pinctrl_driver);
-}
-module_exit(imx6sx_pinctrl_exit);
-
-MODULE_AUTHOR("Anson Huang <Anson.Huang@freescale.com>");
-MODULE_DESCRIPTION("Freescale imx6sx pinctrl driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6ul.c b/drivers/pinctrl/freescale/pinctrl-imx6ul.c
index c707fdd93..1aeb840aa 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6ul.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6ul.c
@@ -1,4 +1,7 @@
/*
+ * Freescale imx6ul pinctrl driver
+ *
+ * Author: Anson Huang <Anson.Huang@freescale.com>
* Copyright (C) 2015 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify
@@ -9,7 +12,6 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
@@ -310,13 +312,3 @@ static int __init imx6ul_pinctrl_init(void)
return platform_driver_register(&imx6ul_pinctrl_driver);
}
arch_initcall(imx6ul_pinctrl_init);
-
-static void __exit imx6ul_pinctrl_exit(void)
-{
- platform_driver_unregister(&imx6ul_pinctrl_driver);
-}
-module_exit(imx6ul_pinctrl_exit);
-
-MODULE_AUTHOR("Anson Huang <Anson.Huang@freescale.com>");
-MODULE_DESCRIPTION("Freescale imx6ul pinctrl driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/freescale/pinctrl-imx7d.c b/drivers/pinctrl/freescale/pinctrl-imx7d.c
index d30d91f80..a465a66c3 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx7d.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx7d.c
@@ -1,4 +1,7 @@
/*
+ * Freescale imx7d pinctrl driver
+ *
+ * Author: Anson Huang <Anson.Huang@freescale.com>
* Copyright (C) 2014-2015 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify
@@ -9,7 +12,6 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
@@ -402,13 +404,3 @@ static int __init imx7d_pinctrl_init(void)
return platform_driver_register(&imx7d_pinctrl_driver);
}
arch_initcall(imx7d_pinctrl_init);
-
-static void __exit imx7d_pinctrl_exit(void)
-{
- platform_driver_unregister(&imx7d_pinctrl_driver);
-}
-module_exit(imx7d_pinctrl_exit);
-
-MODULE_AUTHOR("Anson Huang <Anson.Huang@freescale.com>");
-MODULE_DESCRIPTION("Freescale imx7d pinctrl driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/freescale/pinctrl-mxs.c b/drivers/pinctrl/freescale/pinctrl-mxs.c
index 6bbda6b4a..41b5b07d5 100644
--- a/drivers/pinctrl/freescale/pinctrl-mxs.c
+++ b/drivers/pinctrl/freescale/pinctrl-mxs.c
@@ -12,7 +12,6 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/pinctrl/machine.h>
@@ -553,14 +552,3 @@ err:
return ret;
}
EXPORT_SYMBOL_GPL(mxs_pinctrl_probe);
-
-int mxs_pinctrl_remove(struct platform_device *pdev)
-{
- struct mxs_pinctrl_data *d = platform_get_drvdata(pdev);
-
- pinctrl_unregister(d->pctl);
- iounmap(d->base);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(mxs_pinctrl_remove);
diff --git a/drivers/pinctrl/freescale/pinctrl-mxs.h b/drivers/pinctrl/freescale/pinctrl-mxs.h
index fdd88d0ba..34dbf7520 100644
--- a/drivers/pinctrl/freescale/pinctrl-mxs.h
+++ b/drivers/pinctrl/freescale/pinctrl-mxs.h
@@ -86,6 +86,5 @@ struct mxs_pinctrl_soc_data {
int mxs_pinctrl_probe(struct platform_device *pdev,
struct mxs_pinctrl_soc_data *soc);
-int mxs_pinctrl_remove(struct platform_device *pdev);
#endif /* __PINCTRL_MXS_H */
diff --git a/drivers/pinctrl/freescale/pinctrl-vf610.c b/drivers/pinctrl/freescale/pinctrl-vf610.c
index 6d81be096..2b1e198e3 100644
--- a/drivers/pinctrl/freescale/pinctrl-vf610.c
+++ b/drivers/pinctrl/freescale/pinctrl-vf610.c
@@ -12,7 +12,6 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
@@ -325,12 +324,3 @@ static int __init vf610_pinctrl_init(void)
return platform_driver_register(&vf610_pinctrl_driver);
}
arch_initcall(vf610_pinctrl_init);
-
-static void __exit vf610_pinctrl_exit(void)
-{
- platform_driver_unregister(&vf610_pinctrl_driver);
-}
-module_exit(vf610_pinctrl_exit);
-
-MODULE_DESCRIPTION("Freescale VF610 pinctrl driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/intel/Kconfig b/drivers/pinctrl/intel/Kconfig
index 1c74e038b..00fb055a4 100644
--- a/drivers/pinctrl/intel/Kconfig
+++ b/drivers/pinctrl/intel/Kconfig
@@ -29,6 +29,17 @@ config PINCTRL_CHERRYVIEW
Cherryview/Braswell pinctrl driver provides an interface that
allows configuring of SoC pins and using them as GPIOs.
+config PINCTRL_MERRIFIELD
+ tristate "Intel Merrifield pinctrl driver"
+ depends on X86_INTEL_MID
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ help
+ Merrifield Family-Level Interface Shim (FLIS) driver provides an
+ interface that allows configuring of SoC pins and using them as
+ GPIOs.
+
config PINCTRL_INTEL
tristate
select PINMUX
diff --git a/drivers/pinctrl/intel/Makefile b/drivers/pinctrl/intel/Makefile
index 03bc68e35..30803078f 100644
--- a/drivers/pinctrl/intel/Makefile
+++ b/drivers/pinctrl/intel/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_PINCTRL_BAYTRAIL) += pinctrl-baytrail.o
obj-$(CONFIG_PINCTRL_CHERRYVIEW) += pinctrl-cherryview.o
+obj-$(CONFIG_PINCTRL_MERRIFIELD) += pinctrl-merrifield.o
obj-$(CONFIG_PINCTRL_INTEL) += pinctrl-intel.o
obj-$(CONFIG_PINCTRL_BROXTON) += pinctrl-broxton.o
obj-$(CONFIG_PINCTRL_SUNRISEPOINT) += pinctrl-sunrisepoint.o
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 7abfd42e8..d22a9fe2e 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -15,7 +15,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/bitops.h>
@@ -1822,17 +1821,6 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
return 0;
}
-static int byt_pinctrl_remove(struct platform_device *pdev)
-{
- struct byt_gpio *vg = platform_get_drvdata(pdev);
-
- pm_runtime_disable(&pdev->dev);
- gpiochip_remove(&vg->chip);
- pinctrl_unregister(vg->pctl_dev);
-
- return 0;
-}
-
#ifdef CONFIG_PM_SLEEP
static int byt_gpio_suspend(struct device *dev)
{
@@ -1930,10 +1918,11 @@ static const struct dev_pm_ops byt_gpio_pm_ops = {
static struct platform_driver byt_gpio_driver = {
.probe = byt_pinctrl_probe,
- .remove = byt_pinctrl_remove,
.driver = {
- .name = "byt_gpio",
- .pm = &byt_gpio_pm_ops,
+ .name = "byt_gpio",
+ .pm = &byt_gpio_pm_ops,
+ .suppress_bind_attrs = true,
+
.acpi_match_table = ACPI_PTR(byt_gpio_acpi_match),
},
};
@@ -1943,9 +1932,3 @@ static int __init byt_gpio_init(void)
return platform_driver_register(&byt_gpio_driver);
}
subsys_initcall(byt_gpio_init);
-
-static void __exit byt_gpio_exit(void)
-{
- platform_driver_unregister(&byt_gpio_driver);
-}
-module_exit(byt_gpio_exit);
diff --git a/drivers/pinctrl/intel/pinctrl-broxton.c b/drivers/pinctrl/intel/pinctrl-broxton.c
index 5979d38c4..59cb7a6fc 100644
--- a/drivers/pinctrl/intel/pinctrl-broxton.c
+++ b/drivers/pinctrl/intel/pinctrl-broxton.c
@@ -1,7 +1,7 @@
/*
* Intel Broxton SoC pinctrl/GPIO driver
*
- * Copyright (C) 2015, Intel Corporation
+ * Copyright (C) 2015, 2016 Intel Corporation
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -1003,29 +1003,46 @@ static const struct acpi_device_id bxt_pinctrl_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, bxt_pinctrl_acpi_match);
+static const struct platform_device_id bxt_pinctrl_platform_ids[] = {
+ { "apl-pinctrl", (kernel_ulong_t)&apl_pinctrl_soc_data },
+ { "broxton-pinctrl", (kernel_ulong_t)&bxt_pinctrl_soc_data },
+ { },
+};
+
static int bxt_pinctrl_probe(struct platform_device *pdev)
{
const struct intel_pinctrl_soc_data *soc_data = NULL;
const struct intel_pinctrl_soc_data **soc_table;
- const struct acpi_device_id *id;
struct acpi_device *adev;
int i;
adev = ACPI_COMPANION(&pdev->dev);
- if (!adev)
- return -ENODEV;
+ if (adev) {
+ const struct acpi_device_id *id;
- id = acpi_match_device(bxt_pinctrl_acpi_match, &pdev->dev);
- if (!id)
- return -ENODEV;
+ id = acpi_match_device(bxt_pinctrl_acpi_match, &pdev->dev);
+ if (!id)
+ return -ENODEV;
- soc_table = (const struct intel_pinctrl_soc_data **)id->driver_data;
+ soc_table = (const struct intel_pinctrl_soc_data **)
+ id->driver_data;
- for (i = 0; soc_table[i]; i++) {
- if (!strcmp(adev->pnp.unique_id, soc_table[i]->uid)) {
- soc_data = soc_table[i];
- break;
+ for (i = 0; soc_table[i]; i++) {
+ if (!strcmp(adev->pnp.unique_id, soc_table[i]->uid)) {
+ soc_data = soc_table[i];
+ break;
+ }
}
+ } else {
+ const struct platform_device_id *pid;
+
+ pid = platform_get_device_id(pdev);
+ if (!pid)
+ return -ENODEV;
+
+ soc_table = (const struct intel_pinctrl_soc_data **)
+ pid->driver_data;
+ soc_data = soc_table[pdev->id];
}
if (!soc_data)
@@ -1047,6 +1064,7 @@ static struct platform_driver bxt_pinctrl_driver = {
.acpi_match_table = bxt_pinctrl_acpi_match,
.pm = &bxt_pinctrl_pm_ops,
},
+ .id_table = bxt_pinctrl_platform_ids,
};
static int __init bxt_pinctrl_init(void)
@@ -1064,3 +1082,4 @@ module_exit(bxt_pinctrl_exit);
MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
MODULE_DESCRIPTION("Intel Broxton SoC pinctrl/GPIO driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:broxton-pinctrl");
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index bf65c948b..0fe8fad25 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1108,6 +1108,27 @@ static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned pin,
return 0;
}
+static int chv_config_set_oden(struct chv_pinctrl *pctrl, unsigned int pin,
+ bool enable)
+{
+ void __iomem *reg = chv_padreg(pctrl, pin, CHV_PADCTRL1);
+ unsigned long flags;
+ u32 ctrl1;
+
+ raw_spin_lock_irqsave(&chv_lock, flags);
+ ctrl1 = readl(reg);
+
+ if (enable)
+ ctrl1 |= CHV_PADCTRL1_ODEN;
+ else
+ ctrl1 &= ~CHV_PADCTRL1_ODEN;
+
+ chv_writel(ctrl1, reg);
+ raw_spin_unlock_irqrestore(&chv_lock, flags);
+
+ return 0;
+}
+
static int chv_config_set(struct pinctrl_dev *pctldev, unsigned pin,
unsigned long *configs, unsigned nconfigs)
{
@@ -1132,6 +1153,18 @@ static int chv_config_set(struct pinctrl_dev *pctldev, unsigned pin,
return ret;
break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ ret = chv_config_set_oden(pctrl, pin, false);
+ if (ret)
+ return ret;
+ break;
+
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ ret = chv_config_set_oden(pctrl, pin, true);
+ if (ret)
+ return ret;
+ break;
+
default:
return -ENOTSUPP;
}
@@ -1143,10 +1176,52 @@ static int chv_config_set(struct pinctrl_dev *pctldev, unsigned pin,
return 0;
}
+static int chv_config_group_get(struct pinctrl_dev *pctldev,
+ unsigned int group,
+ unsigned long *config)
+{
+ const unsigned int *pins;
+ unsigned int npins;
+ int ret;
+
+ ret = chv_get_group_pins(pctldev, group, &pins, &npins);
+ if (ret)
+ return ret;
+
+ ret = chv_config_get(pctldev, pins[0], config);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int chv_config_group_set(struct pinctrl_dev *pctldev,
+ unsigned int group, unsigned long *configs,
+ unsigned int num_configs)
+{
+ const unsigned int *pins;
+ unsigned int npins;
+ int i, ret;
+
+ ret = chv_get_group_pins(pctldev, group, &pins, &npins);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < npins; i++) {
+ ret = chv_config_set(pctldev, pins[i], configs, num_configs);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static const struct pinconf_ops chv_pinconf_ops = {
.is_generic = true,
.pin_config_set = chv_config_set,
.pin_config_get = chv_config_get,
+ .pin_config_group_get = chv_config_group_get,
+ .pin_config_group_set = chv_config_group_set,
};
static struct pinctrl_desc chv_pinctrl_desc = {
@@ -1464,12 +1539,11 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
offset += range->npins;
}
- /* Mask and clear all interrupts */
- chv_writel(0, pctrl->regs + CHV_INTMASK);
+ /* Clear all interrupts */
chv_writel(0xffff, pctrl->regs + CHV_INTSTAT);
ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, 0,
- handle_simple_irq, IRQ_TYPE_NONE);
+ handle_bad_irq, IRQ_TYPE_NONE);
if (ret) {
dev_err(pctrl->dev, "failed to add IRQ chip\n");
goto fail;
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 3584e50fa..257cab129 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -89,7 +89,7 @@ struct intel_pinctrl_context {
*/
struct intel_pinctrl {
struct device *dev;
- spinlock_t lock;
+ raw_spinlock_t lock;
struct pinctrl_desc pctldesc;
struct pinctrl_dev *pctldev;
struct gpio_chip chip;
@@ -318,7 +318,7 @@ static int intel_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function,
unsigned long flags;
int i;
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
/*
* All pins in the groups needs to be accessible and writable
@@ -326,7 +326,7 @@ static int intel_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function,
*/
for (i = 0; i < grp->npins; i++) {
if (!intel_pad_usable(pctrl, grp->pins[i])) {
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return -EBUSY;
}
}
@@ -345,7 +345,7 @@ static int intel_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function,
writel(value, padcfg0);
}
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return 0;
}
@@ -359,10 +359,10 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
unsigned long flags;
u32 value;
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
if (!intel_pad_usable(pctrl, pin)) {
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return -EBUSY;
}
@@ -377,7 +377,7 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
value |= PADCFG0_GPIOTXDIS;
writel(value, padcfg0);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return 0;
}
@@ -391,7 +391,7 @@ static int intel_gpio_set_direction(struct pinctrl_dev *pctldev,
unsigned long flags;
u32 value;
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
@@ -402,7 +402,7 @@ static int intel_gpio_set_direction(struct pinctrl_dev *pctldev,
value &= ~PADCFG0_GPIOTXDIS;
writel(value, padcfg0);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return 0;
}
@@ -490,7 +490,7 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned pin,
int ret = 0;
u32 value;
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
padcfg1 = intel_get_padcfg(pctrl, pin, PADCFG1);
value = readl(padcfg1);
@@ -544,7 +544,7 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned pin,
if (!ret)
writel(value, padcfg1);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return ret;
}
@@ -611,14 +611,14 @@ static void intel_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
unsigned long flags;
u32 padcfg0;
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
padcfg0 = readl(reg);
if (value)
padcfg0 |= PADCFG0_GPIOTXSTATE;
else
padcfg0 &= ~PADCFG0_GPIOTXSTATE;
writel(padcfg0, reg);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
}
}
@@ -651,7 +651,7 @@ static void intel_gpio_irq_ack(struct irq_data *d)
const struct intel_community *community;
unsigned pin = irqd_to_hwirq(d);
- spin_lock(&pctrl->lock);
+ raw_spin_lock(&pctrl->lock);
community = intel_get_community(pctrl, pin);
if (community) {
@@ -662,7 +662,7 @@ static void intel_gpio_irq_ack(struct irq_data *d)
writel(BIT(gpp_offset), community->regs + GPI_IS + gpp * 4);
}
- spin_unlock(&pctrl->lock);
+ raw_spin_unlock(&pctrl->lock);
}
static void intel_gpio_irq_enable(struct irq_data *d)
@@ -673,7 +673,7 @@ static void intel_gpio_irq_enable(struct irq_data *d)
unsigned pin = irqd_to_hwirq(d);
unsigned long flags;
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
community = intel_get_community(pctrl, pin);
if (community) {
@@ -691,7 +691,7 @@ static void intel_gpio_irq_enable(struct irq_data *d)
writel(value, community->regs + community->ie_offset + gpp * 4);
}
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
}
static void intel_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
@@ -702,7 +702,7 @@ static void intel_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
unsigned pin = irqd_to_hwirq(d);
unsigned long flags;
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
community = intel_get_community(pctrl, pin);
if (community) {
@@ -721,7 +721,7 @@ static void intel_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
writel(value, reg);
}
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
}
static void intel_gpio_irq_mask(struct irq_data *d)
@@ -757,7 +757,7 @@ static int intel_gpio_irq_type(struct irq_data *d, unsigned type)
return -EPERM;
}
- spin_lock_irqsave(&pctrl->lock, flags);
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
value = readl(reg);
@@ -784,7 +784,7 @@ static int intel_gpio_irq_type(struct irq_data *d, unsigned type)
else if (type & IRQ_TYPE_LEVEL_MASK)
irq_set_handler_locked(d, handle_level_irq);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return 0;
}
@@ -796,12 +796,15 @@ static int intel_gpio_irq_wake(struct irq_data *d, unsigned int on)
const struct intel_community *community;
unsigned pin = irqd_to_hwirq(d);
unsigned padno, gpp, gpp_offset;
+ unsigned long flags;
u32 gpe_en;
community = intel_get_community(pctrl, pin);
if (!community)
return -EINVAL;
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
+
padno = pin_to_padno(community, pin);
gpp = padno / community->gpp_size;
gpp_offset = padno % community->gpp_size;
@@ -821,6 +824,8 @@ static int intel_gpio_irq_wake(struct irq_data *d, unsigned int on)
gpe_en &= ~BIT(gpp_offset);
writel(gpe_en, community->regs + GPI_GPE_EN + gpp * 4);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
+
dev_dbg(pctrl->dev, "%sable wake for pin %u\n", on ? "en" : "dis", pin);
return 0;
}
@@ -919,7 +924,8 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
* to the irq directly) because on some platforms several GPIO
* controllers share the same interrupt line.
*/
- ret = devm_request_irq(pctrl->dev, irq, intel_gpio_irq, IRQF_SHARED,
+ ret = devm_request_irq(pctrl->dev, irq, intel_gpio_irq,
+ IRQF_SHARED | IRQF_NO_THREAD,
dev_name(pctrl->dev), pctrl);
if (ret) {
dev_err(pctrl->dev, "failed to request interrupt\n");
@@ -995,7 +1001,7 @@ int intel_pinctrl_probe(struct platform_device *pdev,
pctrl->dev = &pdev->dev;
pctrl->soc = soc_data;
- spin_lock_init(&pctrl->lock);
+ raw_spin_lock_init(&pctrl->lock);
/*
* Make a copy of the communities which we can use to hold pointers
diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c
new file mode 100644
index 000000000..7fb765642
--- /dev/null
+++ b/drivers/pinctrl/intel/pinctrl-merrifield.c
@@ -0,0 +1,912 @@
+/*
+ * Intel Merrifield SoC pinctrl driver
+ *
+ * Copyright (C) 2016, Intel Corporation
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include "pinctrl-intel.h"
+
+#define MRFLD_FAMILY_NR 64
+#define MRFLD_FAMILY_LEN 0x400
+
+#define SLEW_OFFSET 0x000
+#define BUFCFG_OFFSET 0x100
+#define MISC_OFFSET 0x300
+
+#define BUFCFG_PINMODE_SHIFT 0
+#define BUFCFG_PINMODE_MASK GENMASK(2, 0)
+#define BUFCFG_PINMODE_GPIO 0
+#define BUFCFG_PUPD_VAL_SHIFT 4
+#define BUFCFG_PUPD_VAL_MASK GENMASK(5, 4)
+#define BUFCFG_PUPD_VAL_2K 0
+#define BUFCFG_PUPD_VAL_20K 1
+#define BUFCFG_PUPD_VAL_50K 2
+#define BUFCFG_PUPD_VAL_910 3
+#define BUFCFG_PU_EN BIT(8)
+#define BUFCFG_PD_EN BIT(9)
+#define BUFCFG_Px_EN_MASK GENMASK(9, 8)
+#define BUFCFG_SLEWSEL BIT(10)
+#define BUFCFG_OVINEN BIT(12)
+#define BUFCFG_OVINEN_EN BIT(13)
+#define BUFCFG_OVINEN_MASK GENMASK(13, 12)
+#define BUFCFG_OVOUTEN BIT(14)
+#define BUFCFG_OVOUTEN_EN BIT(15)
+#define BUFCFG_OVOUTEN_MASK GENMASK(15, 14)
+#define BUFCFG_INDATAOV_VAL BIT(16)
+#define BUFCFG_INDATAOV_EN BIT(17)
+#define BUFCFG_INDATAOV_MASK GENMASK(17, 16)
+#define BUFCFG_OUTDATAOV_VAL BIT(18)
+#define BUFCFG_OUTDATAOV_EN BIT(19)
+#define BUFCFG_OUTDATAOV_MASK GENMASK(19, 18)
+#define BUFCFG_OD_EN BIT(21)
+
+/**
+ * struct mrfld_family - Intel pin family description
+ * @barno: MMIO BAR number where registers for this family reside
+ * @pin_base: Starting pin of pins in this family
+ * @npins: Number of pins in this family
+ * @protected: True if family is protected by access
+ * @regs: family specific common registers
+ */
+struct mrfld_family {
+ unsigned int barno;
+ unsigned int pin_base;
+ size_t npins;
+ bool protected;
+ void __iomem *regs;
+};
+
+#define MRFLD_FAMILY(b, s, e) \
+ { \
+ .barno = (b), \
+ .pin_base = (s), \
+ .npins = (e) - (s) + 1, \
+ }
+
+#define MRFLD_FAMILY_PROTECTED(b, s, e) \
+ { \
+ .barno = (b), \
+ .pin_base = (s), \
+ .npins = (e) - (s) + 1, \
+ .protected = true, \
+ }
+
+static const struct pinctrl_pin_desc mrfld_pins[] = {
+ /* Family 0: OCP2SSC (0 pins) */
+ /* Family 1: ULPI (13 pins) */
+ PINCTRL_PIN(0, "ULPI_CLK"),
+ PINCTRL_PIN(1, "ULPI_D0"),
+ PINCTRL_PIN(2, "ULPI_D1"),
+ PINCTRL_PIN(3, "ULPI_D2"),
+ PINCTRL_PIN(4, "ULPI_D3"),
+ PINCTRL_PIN(5, "ULPI_D4"),
+ PINCTRL_PIN(6, "ULPI_D5"),
+ PINCTRL_PIN(7, "ULPI_D6"),
+ PINCTRL_PIN(8, "ULPI_D7"),
+ PINCTRL_PIN(9, "ULPI_DIR"),
+ PINCTRL_PIN(10, "ULPI_NXT"),
+ PINCTRL_PIN(11, "ULPI_REFCLK"),
+ PINCTRL_PIN(12, "ULPI_STP"),
+ /* Family 2: eMMC (24 pins) */
+ PINCTRL_PIN(13, "EMMC_CLK"),
+ PINCTRL_PIN(14, "EMMC_CMD"),
+ PINCTRL_PIN(15, "EMMC_D0"),
+ PINCTRL_PIN(16, "EMMC_D1"),
+ PINCTRL_PIN(17, "EMMC_D2"),
+ PINCTRL_PIN(18, "EMMC_D3"),
+ PINCTRL_PIN(19, "EMMC_D4"),
+ PINCTRL_PIN(20, "EMMC_D5"),
+ PINCTRL_PIN(21, "EMMC_D6"),
+ PINCTRL_PIN(22, "EMMC_D7"),
+ PINCTRL_PIN(23, "EMMC_RST_N"),
+ PINCTRL_PIN(24, "GP154"),
+ PINCTRL_PIN(25, "GP155"),
+ PINCTRL_PIN(26, "GP156"),
+ PINCTRL_PIN(27, "GP157"),
+ PINCTRL_PIN(28, "GP158"),
+ PINCTRL_PIN(29, "GP159"),
+ PINCTRL_PIN(30, "GP160"),
+ PINCTRL_PIN(31, "GP161"),
+ PINCTRL_PIN(32, "GP162"),
+ PINCTRL_PIN(33, "GP163"),
+ PINCTRL_PIN(34, "GP97"),
+ PINCTRL_PIN(35, "GP14"),
+ PINCTRL_PIN(36, "GP15"),
+ /* Family 3: SDIO (20 pins) */
+ PINCTRL_PIN(37, "GP77_SD_CD"),
+ PINCTRL_PIN(38, "GP78_SD_CLK"),
+ PINCTRL_PIN(39, "GP79_SD_CMD"),
+ PINCTRL_PIN(40, "GP80_SD_D0"),
+ PINCTRL_PIN(41, "GP81_SD_D1"),
+ PINCTRL_PIN(42, "GP82_SD_D2"),
+ PINCTRL_PIN(43, "GP83_SD_D3"),
+ PINCTRL_PIN(44, "GP84_SD_LS_CLK_FB"),
+ PINCTRL_PIN(45, "GP85_SD_LS_CMD_DIR"),
+ PINCTRL_PIN(46, "GP86_SD_LVL_D_DIR"),
+ PINCTRL_PIN(47, "GP88_SD_LS_SEL"),
+ PINCTRL_PIN(48, "GP87_SD_PD"),
+ PINCTRL_PIN(49, "GP89_SD_WP"),
+ PINCTRL_PIN(50, "GP90_SDIO_CLK"),
+ PINCTRL_PIN(51, "GP91_SDIO_CMD"),
+ PINCTRL_PIN(52, "GP92_SDIO_D0"),
+ PINCTRL_PIN(53, "GP93_SDIO_D1"),
+ PINCTRL_PIN(54, "GP94_SDIO_D2"),
+ PINCTRL_PIN(55, "GP95_SDIO_D3"),
+ PINCTRL_PIN(56, "GP96_SDIO_PD"),
+ /* Family 4: HSI (8 pins) */
+ PINCTRL_PIN(57, "HSI_ACDATA"),
+ PINCTRL_PIN(58, "HSI_ACFLAG"),
+ PINCTRL_PIN(59, "HSI_ACREADY"),
+ PINCTRL_PIN(60, "HSI_ACWAKE"),
+ PINCTRL_PIN(61, "HSI_CADATA"),
+ PINCTRL_PIN(62, "HSI_CAFLAG"),
+ PINCTRL_PIN(63, "HSI_CAREADY"),
+ PINCTRL_PIN(64, "HSI_CAWAKE"),
+ /* Family 5: SSP Audio (14 pins) */
+ PINCTRL_PIN(65, "GP70"),
+ PINCTRL_PIN(66, "GP71"),
+ PINCTRL_PIN(67, "GP32_I2S_0_CLK"),
+ PINCTRL_PIN(68, "GP33_I2S_0_FS"),
+ PINCTRL_PIN(69, "GP34_I2S_0_RXD"),
+ PINCTRL_PIN(70, "GP35_I2S_0_TXD"),
+ PINCTRL_PIN(71, "GP36_I2S_1_CLK"),
+ PINCTRL_PIN(72, "GP37_I2S_1_FS"),
+ PINCTRL_PIN(73, "GP38_I2S_1_RXD"),
+ PINCTRL_PIN(74, "GP39_I2S_1_TXD"),
+ PINCTRL_PIN(75, "GP40_I2S_2_CLK"),
+ PINCTRL_PIN(76, "GP41_I2S_2_FS"),
+ PINCTRL_PIN(77, "GP42_I2S_2_RXD"),
+ PINCTRL_PIN(78, "GP43_I2S_2_TXD"),
+ /* Family 6: GP SSP (22 pins) */
+ PINCTRL_PIN(79, "GP120_SPI_3_CLK"),
+ PINCTRL_PIN(80, "GP121_SPI_3_SS"),
+ PINCTRL_PIN(81, "GP122_SPI_3_RXD"),
+ PINCTRL_PIN(82, "GP123_SPI_3_TXD"),
+ PINCTRL_PIN(83, "GP102_SPI_4_CLK"),
+ PINCTRL_PIN(84, "GP103_SPI_4_SS_0"),
+ PINCTRL_PIN(85, "GP104_SPI_4_SS_1"),
+ PINCTRL_PIN(86, "GP105_SPI_4_SS_2"),
+ PINCTRL_PIN(87, "GP106_SPI_4_SS_3"),
+ PINCTRL_PIN(88, "GP107_SPI_4_RXD"),
+ PINCTRL_PIN(89, "GP108_SPI_4_TXD"),
+ PINCTRL_PIN(90, "GP109_SPI_5_CLK"),
+ PINCTRL_PIN(91, "GP110_SPI_5_SS_0"),
+ PINCTRL_PIN(92, "GP111_SPI_5_SS_1"),
+ PINCTRL_PIN(93, "GP112_SPI_5_SS_2"),
+ PINCTRL_PIN(94, "GP113_SPI_5_SS_3"),
+ PINCTRL_PIN(95, "GP114_SPI_5_RXD"),
+ PINCTRL_PIN(96, "GP115_SPI_5_TXD"),
+ PINCTRL_PIN(97, "GP116_SPI_6_CLK"),
+ PINCTRL_PIN(98, "GP117_SPI_6_SS"),
+ PINCTRL_PIN(99, "GP118_SPI_6_RXD"),
+ PINCTRL_PIN(100, "GP119_SPI_6_TXD"),
+ /* Family 7: I2C (14 pins) */
+ PINCTRL_PIN(101, "GP19_I2C_1_SCL"),
+ PINCTRL_PIN(102, "GP20_I2C_1_SDA"),
+ PINCTRL_PIN(103, "GP21_I2C_2_SCL"),
+ PINCTRL_PIN(104, "GP22_I2C_2_SDA"),
+ PINCTRL_PIN(105, "GP17_I2C_3_SCL_HDMI"),
+ PINCTRL_PIN(106, "GP18_I2C_3_SDA_HDMI"),
+ PINCTRL_PIN(107, "GP23_I2C_4_SCL"),
+ PINCTRL_PIN(108, "GP24_I2C_4_SDA"),
+ PINCTRL_PIN(109, "GP25_I2C_5_SCL"),
+ PINCTRL_PIN(110, "GP26_I2C_5_SDA"),
+ PINCTRL_PIN(111, "GP27_I2C_6_SCL"),
+ PINCTRL_PIN(112, "GP28_I2C_6_SDA"),
+ PINCTRL_PIN(113, "GP29_I2C_7_SCL"),
+ PINCTRL_PIN(114, "GP30_I2C_7_SDA"),
+ /* Family 8: UART (12 pins) */
+ PINCTRL_PIN(115, "GP124_UART_0_CTS"),
+ PINCTRL_PIN(116, "GP125_UART_0_RTS"),
+ PINCTRL_PIN(117, "GP126_UART_0_RX"),
+ PINCTRL_PIN(118, "GP127_UART_0_TX"),
+ PINCTRL_PIN(119, "GP128_UART_1_CTS"),
+ PINCTRL_PIN(120, "GP129_UART_1_RTS"),
+ PINCTRL_PIN(121, "GP130_UART_1_RX"),
+ PINCTRL_PIN(122, "GP131_UART_1_TX"),
+ PINCTRL_PIN(123, "GP132_UART_2_CTS"),
+ PINCTRL_PIN(124, "GP133_UART_2_RTS"),
+ PINCTRL_PIN(125, "GP134_UART_2_RX"),
+ PINCTRL_PIN(126, "GP135_UART_2_TX"),
+ /* Family 9: GPIO South (19 pins) */
+ PINCTRL_PIN(127, "GP177"),
+ PINCTRL_PIN(128, "GP178"),
+ PINCTRL_PIN(129, "GP179"),
+ PINCTRL_PIN(130, "GP180"),
+ PINCTRL_PIN(131, "GP181"),
+ PINCTRL_PIN(132, "GP182_PWM2"),
+ PINCTRL_PIN(133, "GP183_PWM3"),
+ PINCTRL_PIN(134, "GP184"),
+ PINCTRL_PIN(135, "GP185"),
+ PINCTRL_PIN(136, "GP186"),
+ PINCTRL_PIN(137, "GP187"),
+ PINCTRL_PIN(138, "GP188"),
+ PINCTRL_PIN(139, "GP189"),
+ PINCTRL_PIN(140, "GP64_FAST_INT0"),
+ PINCTRL_PIN(141, "GP65_FAST_INT1"),
+ PINCTRL_PIN(142, "GP66_FAST_INT2"),
+ PINCTRL_PIN(143, "GP67_FAST_INT3"),
+ PINCTRL_PIN(144, "GP12_PWM0"),
+ PINCTRL_PIN(145, "GP13_PWM1"),
+ /* Family 10: Camera Sideband (12 pins) */
+ PINCTRL_PIN(146, "GP0"),
+ PINCTRL_PIN(147, "GP1"),
+ PINCTRL_PIN(148, "GP2"),
+ PINCTRL_PIN(149, "GP3"),
+ PINCTRL_PIN(150, "GP4"),
+ PINCTRL_PIN(151, "GP5"),
+ PINCTRL_PIN(152, "GP6"),
+ PINCTRL_PIN(153, "GP7"),
+ PINCTRL_PIN(154, "GP8"),
+ PINCTRL_PIN(155, "GP9"),
+ PINCTRL_PIN(156, "GP10"),
+ PINCTRL_PIN(157, "GP11"),
+ /* Family 11: Clock (22 pins) */
+ PINCTRL_PIN(158, "GP137"),
+ PINCTRL_PIN(159, "GP138"),
+ PINCTRL_PIN(160, "GP139"),
+ PINCTRL_PIN(161, "GP140"),
+ PINCTRL_PIN(162, "GP141"),
+ PINCTRL_PIN(163, "GP142"),
+ PINCTRL_PIN(164, "GP16_HDMI_HPD"),
+ PINCTRL_PIN(165, "GP68_DSI_A_TE"),
+ PINCTRL_PIN(166, "GP69_DSI_C_TE"),
+ PINCTRL_PIN(167, "OSC_CLK_CTRL0"),
+ PINCTRL_PIN(168, "OSC_CLK_CTRL1"),
+ PINCTRL_PIN(169, "OSC_CLK0"),
+ PINCTRL_PIN(170, "OSC_CLK1"),
+ PINCTRL_PIN(171, "OSC_CLK2"),
+ PINCTRL_PIN(172, "OSC_CLK3"),
+ PINCTRL_PIN(173, "OSC_CLK4"),
+ PINCTRL_PIN(174, "RESETOUT"),
+ PINCTRL_PIN(175, "PMODE"),
+ PINCTRL_PIN(176, "PRDY"),
+ PINCTRL_PIN(177, "PREQ"),
+ PINCTRL_PIN(178, "GP190"),
+ PINCTRL_PIN(179, "GP191"),
+ /* Family 12: MSIC (15 pins) */
+ PINCTRL_PIN(180, "I2C_0_SCL"),
+ PINCTRL_PIN(181, "I2C_0_SDA"),
+ PINCTRL_PIN(182, "IERR"),
+ PINCTRL_PIN(183, "JTAG_TCK"),
+ PINCTRL_PIN(184, "JTAG_TDI"),
+ PINCTRL_PIN(185, "JTAG_TDO"),
+ PINCTRL_PIN(186, "JTAG_TMS"),
+ PINCTRL_PIN(187, "JTAG_TRST"),
+ PINCTRL_PIN(188, "PROCHOT"),
+ PINCTRL_PIN(189, "RTC_CLK"),
+ PINCTRL_PIN(190, "SVID_ALERT"),
+ PINCTRL_PIN(191, "SVID_CLK"),
+ PINCTRL_PIN(192, "SVID_D"),
+ PINCTRL_PIN(193, "THERMTRIP"),
+ PINCTRL_PIN(194, "STANDBY"),
+ /* Family 13: Keyboard (20 pins) */
+ PINCTRL_PIN(195, "GP44"),
+ PINCTRL_PIN(196, "GP45"),
+ PINCTRL_PIN(197, "GP46"),
+ PINCTRL_PIN(198, "GP47"),
+ PINCTRL_PIN(199, "GP48"),
+ PINCTRL_PIN(200, "GP49"),
+ PINCTRL_PIN(201, "GP50"),
+ PINCTRL_PIN(202, "GP51"),
+ PINCTRL_PIN(203, "GP52"),
+ PINCTRL_PIN(204, "GP53"),
+ PINCTRL_PIN(205, "GP54"),
+ PINCTRL_PIN(206, "GP55"),
+ PINCTRL_PIN(207, "GP56"),
+ PINCTRL_PIN(208, "GP57"),
+ PINCTRL_PIN(209, "GP58"),
+ PINCTRL_PIN(210, "GP59"),
+ PINCTRL_PIN(211, "GP60"),
+ PINCTRL_PIN(212, "GP61"),
+ PINCTRL_PIN(213, "GP62"),
+ PINCTRL_PIN(214, "GP63"),
+ /* Family 14: GPIO North (13 pins) */
+ PINCTRL_PIN(215, "GP164"),
+ PINCTRL_PIN(216, "GP165"),
+ PINCTRL_PIN(217, "GP166"),
+ PINCTRL_PIN(218, "GP167"),
+ PINCTRL_PIN(219, "GP168_MJTAG_TCK"),
+ PINCTRL_PIN(220, "GP169_MJTAG_TDI"),
+ PINCTRL_PIN(221, "GP170_MJTAG_TDO"),
+ PINCTRL_PIN(222, "GP171_MJTAG_TMS"),
+ PINCTRL_PIN(223, "GP172_MJTAG_TRST"),
+ PINCTRL_PIN(224, "GP173"),
+ PINCTRL_PIN(225, "GP174"),
+ PINCTRL_PIN(226, "GP175"),
+ PINCTRL_PIN(227, "GP176"),
+ /* Family 15: PTI (5 pins) */
+ PINCTRL_PIN(228, "GP72_PTI_CLK"),
+ PINCTRL_PIN(229, "GP73_PTI_D0"),
+ PINCTRL_PIN(230, "GP74_PTI_D1"),
+ PINCTRL_PIN(231, "GP75_PTI_D2"),
+ PINCTRL_PIN(232, "GP76_PTI_D3"),
+ /* Family 16: USB3 (0 pins) */
+ /* Family 17: HSIC (0 pins) */
+ /* Family 18: Broadcast (0 pins) */
+};
+
+static const unsigned int mrfld_sdio_pins[] = { 50, 51, 52, 53, 54, 55, 56 };
+static const unsigned int mrfld_spi5_pins[] = { 90, 91, 92, 93, 94, 95, 96 };
+static const unsigned int mrfld_uart0_pins[] = { 124, 125, 126, 127 };
+static const unsigned int mrfld_uart1_pins[] = { 128, 129, 130, 131 };
+static const unsigned int mrfld_uart2_pins[] = { 132, 133, 134, 135 };
+static const unsigned int mrfld_pwm0_pins[] = { 144 };
+static const unsigned int mrfld_pwm1_pins[] = { 145 };
+static const unsigned int mrfld_pwm2_pins[] = { 132 };
+static const unsigned int mrfld_pwm3_pins[] = { 133 };
+
+static const struct intel_pingroup mrfld_groups[] = {
+ PIN_GROUP("sdio_grp", mrfld_sdio_pins, 1),
+ PIN_GROUP("spi5_grp", mrfld_spi5_pins, 1),
+ PIN_GROUP("uart0_grp", mrfld_uart0_pins, 1),
+ PIN_GROUP("uart1_grp", mrfld_uart1_pins, 1),
+ PIN_GROUP("uart2_grp", mrfld_uart2_pins, 1),
+ PIN_GROUP("pwm0_grp", mrfld_pwm0_pins, 1),
+ PIN_GROUP("pwm1_grp", mrfld_pwm1_pins, 1),
+ PIN_GROUP("pwm2_grp", mrfld_pwm2_pins, 1),
+ PIN_GROUP("pwm3_grp", mrfld_pwm3_pins, 1),
+};
+
+static const char * const mrfld_sdio_groups[] = { "sdio_grp" };
+static const char * const mrfld_spi5_groups[] = { "spi5_grp" };
+static const char * const mrfld_uart0_groups[] = { "uart0_grp" };
+static const char * const mrfld_uart1_groups[] = { "uart1_grp" };
+static const char * const mrfld_uart2_groups[] = { "uart2_grp" };
+static const char * const mrfld_pwm0_groups[] = { "pwm0_grp" };
+static const char * const mrfld_pwm1_groups[] = { "pwm1_grp" };
+static const char * const mrfld_pwm2_groups[] = { "pwm2_grp" };
+static const char * const mrfld_pwm3_groups[] = { "pwm3_grp" };
+
+static const struct intel_function mrfld_functions[] = {
+ FUNCTION("sdio", mrfld_sdio_groups),
+ FUNCTION("spi5", mrfld_spi5_groups),
+ FUNCTION("uart0", mrfld_uart0_groups),
+ FUNCTION("uart1", mrfld_uart1_groups),
+ FUNCTION("uart2", mrfld_uart2_groups),
+ FUNCTION("pwm0", mrfld_pwm0_groups),
+ FUNCTION("pwm1", mrfld_pwm1_groups),
+ FUNCTION("pwm2", mrfld_pwm2_groups),
+ FUNCTION("pwm3", mrfld_pwm3_groups),
+};
+
+static const struct mrfld_family mrfld_families[] = {
+ MRFLD_FAMILY(1, 0, 12),
+ MRFLD_FAMILY(2, 13, 36),
+ MRFLD_FAMILY(3, 37, 56),
+ MRFLD_FAMILY(4, 57, 64),
+ MRFLD_FAMILY(5, 65, 78),
+ MRFLD_FAMILY(6, 79, 100),
+ MRFLD_FAMILY_PROTECTED(7, 101, 114),
+ MRFLD_FAMILY(8, 115, 126),
+ MRFLD_FAMILY(9, 127, 145),
+ MRFLD_FAMILY(10, 146, 157),
+ MRFLD_FAMILY(11, 158, 179),
+ MRFLD_FAMILY_PROTECTED(12, 180, 194),
+ MRFLD_FAMILY(13, 195, 214),
+ MRFLD_FAMILY(14, 215, 227),
+ MRFLD_FAMILY(15, 228, 232),
+};
+
+/**
+ * struct mrfld_pinctrl - Intel Merrifield pinctrl private structure
+ * @dev: Pointer to the device structure
+ * @lock: Lock to serialize register access
+ * @pctldesc: Pin controller description
+ * @pctldev: Pointer to the pin controller device
+ * @families: Array of families this pinctrl handles
+ * @nfamilies: Number of families in the array
+ * @functions: Array of functions
+ * @nfunctions: Number of functions in the array
+ * @groups: Array of pin groups
+ * @ngroups: Number of groups in the array
+ * @pins: Array of pins this pinctrl controls
+ * @npins: Number of pins in the array
+ */
+struct mrfld_pinctrl {
+ struct device *dev;
+ raw_spinlock_t lock;
+ struct pinctrl_desc pctldesc;
+ struct pinctrl_dev *pctldev;
+
+ /* Pin controller configuration */
+ const struct mrfld_family *families;
+ size_t nfamilies;
+ const struct intel_function *functions;
+ size_t nfunctions;
+ const struct intel_pingroup *groups;
+ size_t ngroups;
+ const struct pinctrl_pin_desc *pins;
+ size_t npins;
+};
+
+#define pin_to_bufno(f, p) ((p) - (f)->pin_base)
+
+static const struct mrfld_family *mrfld_get_family(struct mrfld_pinctrl *mp,
+ unsigned int pin)
+{
+ const struct mrfld_family *family;
+ unsigned int i;
+
+ for (i = 0; i < mp->nfamilies; i++) {
+ family = &mp->families[i];
+ if (pin >= family->pin_base &&
+ pin < family->pin_base + family->npins)
+ return family;
+ }
+
+ dev_warn(mp->dev, "failed to find family for pin %u\n", pin);
+ return NULL;
+}
+
+static bool mrfld_buf_available(struct mrfld_pinctrl *mp, unsigned int pin)
+{
+ const struct mrfld_family *family;
+
+ family = mrfld_get_family(mp, pin);
+ if (!family)
+ return false;
+
+ return !family->protected;
+}
+
+static void __iomem *mrfld_get_bufcfg(struct mrfld_pinctrl *mp, unsigned int pin)
+{
+ const struct mrfld_family *family;
+ unsigned int bufno;
+
+ family = mrfld_get_family(mp, pin);
+ if (!family)
+ return NULL;
+
+ bufno = pin_to_bufno(family, pin);
+ return family->regs + BUFCFG_OFFSET + bufno * 4;
+}
+
+static int mrfld_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct mrfld_pinctrl *mp = pinctrl_dev_get_drvdata(pctldev);
+
+ return mp->ngroups;
+}
+
+static const char *mrfld_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int group)
+{
+ struct mrfld_pinctrl *mp = pinctrl_dev_get_drvdata(pctldev);
+
+ return mp->groups[group].name;
+}
+
+static int mrfld_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group,
+ const unsigned int **pins, unsigned int *npins)
+{
+ struct mrfld_pinctrl *mp = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = mp->groups[group].pins;
+ *npins = mp->groups[group].npins;
+ return 0;
+}
+
+static void mrfld_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
+ unsigned int pin)
+{
+ struct mrfld_pinctrl *mp = pinctrl_dev_get_drvdata(pctldev);
+ void __iomem *bufcfg;
+ u32 value, mode;
+
+ if (!mrfld_buf_available(mp, pin)) {
+ seq_puts(s, "not available");
+ return;
+ }
+
+ bufcfg = mrfld_get_bufcfg(mp, pin);
+ value = readl(bufcfg);
+
+ mode = (value & BUFCFG_PINMODE_MASK) >> BUFCFG_PINMODE_SHIFT;
+ if (!mode)
+ seq_puts(s, "GPIO ");
+ else
+ seq_printf(s, "mode %d ", mode);
+
+ seq_printf(s, "0x%08x", value);
+}
+
+static const struct pinctrl_ops mrfld_pinctrl_ops = {
+ .get_groups_count = mrfld_get_groups_count,
+ .get_group_name = mrfld_get_group_name,
+ .get_group_pins = mrfld_get_group_pins,
+ .pin_dbg_show = mrfld_pin_dbg_show,
+};
+
+static int mrfld_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ struct mrfld_pinctrl *mp = pinctrl_dev_get_drvdata(pctldev);
+
+ return mp->nfunctions;
+}
+
+static const char *mrfld_get_function_name(struct pinctrl_dev *pctldev,
+ unsigned int function)
+{
+ struct mrfld_pinctrl *mp = pinctrl_dev_get_drvdata(pctldev);
+
+ return mp->functions[function].name;
+}
+
+static int mrfld_get_function_groups(struct pinctrl_dev *pctldev,
+ unsigned int function,
+ const char * const **groups,
+ unsigned int * const ngroups)
+{
+ struct mrfld_pinctrl *mp = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = mp->functions[function].groups;
+ *ngroups = mp->functions[function].ngroups;
+ return 0;
+}
+
+static void mrfld_update_bufcfg(struct mrfld_pinctrl *mp, unsigned int pin,
+ u32 bits, u32 mask)
+{
+ void __iomem *bufcfg;
+ u32 value;
+
+ bufcfg = mrfld_get_bufcfg(mp, pin);
+ value = readl(bufcfg);
+
+ value &= ~mask;
+ value |= bits & mask;
+
+ writel(value, bufcfg);
+}
+
+static int mrfld_pinmux_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int function,
+ unsigned int group)
+{
+ struct mrfld_pinctrl *mp = pinctrl_dev_get_drvdata(pctldev);
+ const struct intel_pingroup *grp = &mp->groups[group];
+ u32 bits = grp->mode << BUFCFG_PINMODE_SHIFT;
+ u32 mask = BUFCFG_PINMODE_MASK;
+ unsigned long flags;
+ unsigned int i;
+
+ /*
+ * All pins in the groups needs to be accessible and writable
+ * before we can enable the mux for this group.
+ */
+ for (i = 0; i < grp->npins; i++) {
+ if (!mrfld_buf_available(mp, grp->pins[i]))
+ return -EBUSY;
+ }
+
+ /* Now enable the mux setting for each pin in the group */
+ raw_spin_lock_irqsave(&mp->lock, flags);
+ for (i = 0; i < grp->npins; i++)
+ mrfld_update_bufcfg(mp, grp->pins[i], bits, mask);
+ raw_spin_unlock_irqrestore(&mp->lock, flags);
+
+ return 0;
+}
+
+static int mrfld_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int pin)
+{
+ struct mrfld_pinctrl *mp = pinctrl_dev_get_drvdata(pctldev);
+ u32 bits = BUFCFG_PINMODE_GPIO << BUFCFG_PINMODE_SHIFT;
+ u32 mask = BUFCFG_PINMODE_MASK;
+ unsigned long flags;
+
+ if (!mrfld_buf_available(mp, pin))
+ return -EBUSY;
+
+ raw_spin_lock_irqsave(&mp->lock, flags);
+ mrfld_update_bufcfg(mp, pin, bits, mask);
+ raw_spin_unlock_irqrestore(&mp->lock, flags);
+
+ return 0;
+}
+
+static const struct pinmux_ops mrfld_pinmux_ops = {
+ .get_functions_count = mrfld_get_functions_count,
+ .get_function_name = mrfld_get_function_name,
+ .get_function_groups = mrfld_get_function_groups,
+ .set_mux = mrfld_pinmux_set_mux,
+ .gpio_request_enable = mrfld_gpio_request_enable,
+};
+
+static int mrfld_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *config)
+{
+ struct mrfld_pinctrl *mp = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ u32 value, term;
+ u16 arg = 0;
+
+ if (!mrfld_buf_available(mp, pin))
+ return -ENOTSUPP;
+
+ value = readl(mrfld_get_bufcfg(mp, pin));
+ term = (value & BUFCFG_PUPD_VAL_MASK) >> BUFCFG_PUPD_VAL_SHIFT;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ if (value & BUFCFG_Px_EN_MASK)
+ return -EINVAL;
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_UP:
+ if ((value & BUFCFG_Px_EN_MASK) != BUFCFG_PU_EN)
+ return -EINVAL;
+
+ switch (term) {
+ case BUFCFG_PUPD_VAL_910:
+ arg = 910;
+ break;
+ case BUFCFG_PUPD_VAL_2K:
+ arg = 2000;
+ break;
+ case BUFCFG_PUPD_VAL_20K:
+ arg = 20000;
+ break;
+ case BUFCFG_PUPD_VAL_50K:
+ arg = 50000;
+ break;
+ }
+
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ if ((value & BUFCFG_Px_EN_MASK) != BUFCFG_PD_EN)
+ return -EINVAL;
+
+ switch (term) {
+ case BUFCFG_PUPD_VAL_910:
+ arg = 910;
+ break;
+ case BUFCFG_PUPD_VAL_2K:
+ arg = 2000;
+ break;
+ case BUFCFG_PUPD_VAL_20K:
+ arg = 20000;
+ break;
+ case BUFCFG_PUPD_VAL_50K:
+ arg = 50000;
+ break;
+ }
+
+ break;
+
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ if (!(value & BUFCFG_OD_EN))
+ return -EINVAL;
+ break;
+
+ case PIN_CONFIG_SLEW_RATE:
+ if (!(value & BUFCFG_SLEWSEL))
+ arg = 0;
+ else
+ arg = 1;
+ break;
+
+ default:
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+ return 0;
+}
+
+static int mrfld_config_set_pin(struct mrfld_pinctrl *mp, unsigned int pin,
+ unsigned long config)
+{
+ unsigned int param = pinconf_to_config_param(config);
+ unsigned int arg = pinconf_to_config_argument(config);
+ u32 bits = 0, mask = 0;
+ unsigned long flags;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ mask |= BUFCFG_Px_EN_MASK | BUFCFG_PUPD_VAL_MASK;
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_UP:
+ mask |= BUFCFG_Px_EN_MASK | BUFCFG_PUPD_VAL_MASK;
+ bits |= BUFCFG_PU_EN;
+
+ switch (arg) {
+ case 50000:
+ bits |= BUFCFG_PUPD_VAL_50K << BUFCFG_PUPD_VAL_SHIFT;
+ break;
+ case 20000:
+ bits |= BUFCFG_PUPD_VAL_20K << BUFCFG_PUPD_VAL_SHIFT;
+ break;
+ case 2000:
+ bits |= BUFCFG_PUPD_VAL_2K << BUFCFG_PUPD_VAL_SHIFT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ mask |= BUFCFG_Px_EN_MASK | BUFCFG_PUPD_VAL_MASK;
+ bits |= BUFCFG_PD_EN;
+
+ switch (arg) {
+ case 50000:
+ bits |= BUFCFG_PUPD_VAL_50K << BUFCFG_PUPD_VAL_SHIFT;
+ break;
+ case 20000:
+ bits |= BUFCFG_PUPD_VAL_20K << BUFCFG_PUPD_VAL_SHIFT;
+ break;
+ case 2000:
+ bits |= BUFCFG_PUPD_VAL_2K << BUFCFG_PUPD_VAL_SHIFT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ break;
+
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ mask |= BUFCFG_OD_EN;
+ if (arg)
+ bits |= BUFCFG_OD_EN;
+ break;
+
+ case PIN_CONFIG_SLEW_RATE:
+ mask |= BUFCFG_SLEWSEL;
+ if (arg)
+ bits |= BUFCFG_SLEWSEL;
+ break;
+ }
+
+ raw_spin_lock_irqsave(&mp->lock, flags);
+ mrfld_update_bufcfg(mp, pin, bits, mask);
+ raw_spin_unlock_irqrestore(&mp->lock, flags);
+
+ return 0;
+}
+
+static int mrfld_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned int nconfigs)
+{
+ struct mrfld_pinctrl *mp = pinctrl_dev_get_drvdata(pctldev);
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < nconfigs; i++) {
+ switch (pinconf_to_config_param(configs[i])) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ case PIN_CONFIG_BIAS_PULL_UP:
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ case PIN_CONFIG_SLEW_RATE:
+ ret = mrfld_config_set_pin(mp, pin, configs[i]);
+ if (ret)
+ return ret;
+ break;
+
+ default:
+ return -ENOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static const struct pinconf_ops mrfld_pinconf_ops = {
+ .is_generic = true,
+ .pin_config_get = mrfld_config_get,
+ .pin_config_set = mrfld_config_set,
+};
+
+static const struct pinctrl_desc mrfld_pinctrl_desc = {
+ .pctlops = &mrfld_pinctrl_ops,
+ .pmxops = &mrfld_pinmux_ops,
+ .confops = &mrfld_pinconf_ops,
+ .owner = THIS_MODULE,
+};
+
+static int mrfld_pinctrl_probe(struct platform_device *pdev)
+{
+ struct mrfld_family *families;
+ struct mrfld_pinctrl *mp;
+ struct resource *mem;
+ void __iomem *regs;
+ size_t nfamilies;
+ unsigned int i;
+
+ mp = devm_kzalloc(&pdev->dev, sizeof(*mp), GFP_KERNEL);
+ if (!mp)
+ return -ENOMEM;
+
+ mp->dev = &pdev->dev;
+ raw_spin_lock_init(&mp->lock);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ /*
+ * Make a copy of the families which we can use to hold pointers
+ * to the registers.
+ */
+ nfamilies = ARRAY_SIZE(mrfld_families),
+ families = devm_kmemdup(&pdev->dev, mrfld_families,
+ nfamilies * sizeof(mrfld_families),
+ GFP_KERNEL);
+ if (!families)
+ return -ENOMEM;
+
+ /* Splice memory resource by chunk per family */
+ for (i = 0; i < nfamilies; i++) {
+ struct mrfld_family *family = &families[i];
+
+ family->regs = regs + family->barno * MRFLD_FAMILY_LEN;
+ }
+
+ mp->families = families;
+ mp->nfamilies = nfamilies;
+ mp->functions = mrfld_functions;
+ mp->nfunctions = ARRAY_SIZE(mrfld_functions);
+ mp->groups = mrfld_groups;
+ mp->ngroups = ARRAY_SIZE(mrfld_groups);
+ mp->pctldesc = mrfld_pinctrl_desc;
+ mp->pctldesc.name = dev_name(&pdev->dev);
+ mp->pctldesc.pins = mrfld_pins;
+ mp->pctldesc.npins = ARRAY_SIZE(mrfld_pins);
+
+ mp->pctldev = devm_pinctrl_register(&pdev->dev, &mp->pctldesc, mp);
+ if (IS_ERR(mp->pctldev)) {
+ dev_err(&pdev->dev, "failed to register pinctrl driver\n");
+ return PTR_ERR(mp->pctldev);
+ }
+
+ platform_set_drvdata(pdev, mp);
+ return 0;
+}
+
+static struct platform_driver mrfld_pinctrl_driver = {
+ .probe = mrfld_pinctrl_probe,
+ .driver = {
+ .name = "pinctrl-merrifield",
+ },
+};
+
+static int __init mrfld_pinctrl_init(void)
+{
+ return platform_driver_register(&mrfld_pinctrl_driver);
+}
+subsys_initcall(mrfld_pinctrl_init);
+
+static void __exit mrfld_pinctrl_exit(void)
+{
+ platform_driver_unregister(&mrfld_pinctrl_driver);
+}
+module_exit(mrfld_pinctrl_exit);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_DESCRIPTION("Intel Merrifield SoC pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:pinctrl-merrifield");
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index a607655d7..ce554e0d6 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -1183,8 +1183,8 @@ static int mtk_eint_resume(struct device *device)
}
const struct dev_pm_ops mtk_eint_pm_ops = {
- .suspend = mtk_eint_suspend,
- .resume = mtk_eint_resume,
+ .suspend_noirq = mtk_eint_suspend,
+ .resume_noirq = mtk_eint_resume,
};
static void mtk_eint_ack(struct irq_data *d)
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
index eeabafbbf..cb4d6ad30 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
@@ -147,6 +147,52 @@ static const struct pinctrl_pin_desc meson_gxbb_periphs_pins[] = {
MESON_PIN(GPIO_TEST_N, EE_OFF),
};
+static const unsigned int emmc_nand_d07_pins[] = {
+ PIN(BOOT_0, EE_OFF), PIN(BOOT_1, EE_OFF), PIN(BOOT_2, EE_OFF),
+ PIN(BOOT_3, EE_OFF), PIN(BOOT_4, EE_OFF), PIN(BOOT_5, EE_OFF),
+ PIN(BOOT_6, EE_OFF), PIN(BOOT_7, EE_OFF),
+};
+static const unsigned int emmc_clk_pins[] = { PIN(BOOT_8, EE_OFF) };
+static const unsigned int emmc_cmd_pins[] = { PIN(BOOT_10, EE_OFF) };
+static const unsigned int emmc_ds_pins[] = { PIN(BOOT_15, EE_OFF) };
+
+static const unsigned int sdcard_d0_pins[] = { PIN(CARD_1, EE_OFF) };
+static const unsigned int sdcard_d1_pins[] = { PIN(CARD_0, EE_OFF) };
+static const unsigned int sdcard_d2_pins[] = { PIN(CARD_5, EE_OFF) };
+static const unsigned int sdcard_d3_pins[] = { PIN(CARD_4, EE_OFF) };
+static const unsigned int sdcard_cmd_pins[] = { PIN(CARD_3, EE_OFF) };
+static const unsigned int sdcard_clk_pins[] = { PIN(CARD_2, EE_OFF) };
+
+static const unsigned int uart_tx_a_pins[] = { PIN(GPIOX_12, EE_OFF) };
+static const unsigned int uart_rx_a_pins[] = { PIN(GPIOX_13, EE_OFF) };
+static const unsigned int uart_cts_a_pins[] = { PIN(GPIOX_14, EE_OFF) };
+static const unsigned int uart_rts_a_pins[] = { PIN(GPIOX_15, EE_OFF) };
+
+static const unsigned int uart_tx_b_pins[] = { PIN(GPIODV_24, EE_OFF) };
+static const unsigned int uart_rx_b_pins[] = { PIN(GPIODV_25, EE_OFF) };
+static const unsigned int uart_cts_b_pins[] = { PIN(GPIODV_26, EE_OFF) };
+static const unsigned int uart_rts_b_pins[] = { PIN(GPIODV_27, EE_OFF) };
+
+static const unsigned int uart_tx_c_pins[] = { PIN(GPIOY_13, EE_OFF) };
+static const unsigned int uart_rx_c_pins[] = { PIN(GPIOY_14, EE_OFF) };
+static const unsigned int uart_cts_c_pins[] = { PIN(GPIOX_11, EE_OFF) };
+static const unsigned int uart_rts_c_pins[] = { PIN(GPIOX_12, EE_OFF) };
+
+static const unsigned int eth_mdio_pins[] = { PIN(GPIOZ_0, EE_OFF) };
+static const unsigned int eth_mdc_pins[] = { PIN(GPIOZ_1, EE_OFF) };
+static const unsigned int eth_clk_rx_clk_pins[] = { PIN(GPIOZ_2, EE_OFF) };
+static const unsigned int eth_rx_dv_pins[] = { PIN(GPIOZ_3, EE_OFF) };
+static const unsigned int eth_rxd0_pins[] = { PIN(GPIOZ_4, EE_OFF) };
+static const unsigned int eth_rxd1_pins[] = { PIN(GPIOZ_5, EE_OFF) };
+static const unsigned int eth_rxd2_pins[] = { PIN(GPIOZ_6, EE_OFF) };
+static const unsigned int eth_rxd3_pins[] = { PIN(GPIOZ_7, EE_OFF) };
+static const unsigned int eth_rgmii_tx_clk_pins[] = { PIN(GPIOZ_8, EE_OFF) };
+static const unsigned int eth_tx_en_pins[] = { PIN(GPIOZ_9, EE_OFF) };
+static const unsigned int eth_txd0_pins[] = { PIN(GPIOZ_10, EE_OFF) };
+static const unsigned int eth_txd1_pins[] = { PIN(GPIOZ_11, EE_OFF) };
+static const unsigned int eth_txd2_pins[] = { PIN(GPIOZ_12, EE_OFF) };
+static const unsigned int eth_txd3_pins[] = { PIN(GPIOZ_13, EE_OFF) };
+
static const struct pinctrl_pin_desc meson_gxbb_aobus_pins[] = {
MESON_PIN(GPIOAO_0, 0),
MESON_PIN(GPIOAO_1, 0),
@@ -168,6 +214,16 @@ static const unsigned int uart_tx_ao_a_pins[] = { PIN(GPIOAO_0, 0) };
static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, 0) };
static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) };
static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) };
+static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_0, 0) };
+static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_1, 0),
+ PIN(GPIOAO_5, 0) };
+static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) };
+static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) };
+
+static const unsigned int i2c_sck_ao_pins[] = {PIN(GPIOAO_4, 0) };
+static const unsigned int i2c_sda_ao_pins[] = {PIN(GPIOAO_5, 0) };
+static const unsigned int i2c_slave_sck_ao_pins[] = {PIN(GPIOAO_4, 0) };
+static const unsigned int i2c_slave_sda_ao_pins[] = {PIN(GPIOAO_5, 0) };
static struct meson_pmx_group meson_gxbb_periphs_groups[] = {
GPIO_GROUP(GPIOZ_0, EE_OFF),
@@ -297,6 +353,54 @@ static struct meson_pmx_group meson_gxbb_periphs_groups[] = {
GPIO_GROUP(GPIOCLK_3, EE_OFF),
GPIO_GROUP(GPIO_TEST_N, EE_OFF),
+
+ /* Bank X */
+ GROUP(uart_tx_a, 4, 13),
+ GROUP(uart_rx_a, 4, 12),
+ GROUP(uart_cts_a, 4, 11),
+ GROUP(uart_rts_a, 4, 10),
+
+ /* Bank Y */
+ GROUP(uart_cts_c, 1, 19),
+ GROUP(uart_rts_c, 1, 18),
+ GROUP(uart_tx_c, 1, 17),
+ GROUP(uart_rx_c, 1, 16),
+
+ /* Bank Z */
+ GROUP(eth_mdio, 6, 1),
+ GROUP(eth_mdc, 6, 0),
+ GROUP(eth_clk_rx_clk, 6, 13),
+ GROUP(eth_rx_dv, 6, 12),
+ GROUP(eth_rxd0, 6, 11),
+ GROUP(eth_rxd1, 6, 10),
+ GROUP(eth_rxd2, 6, 9),
+ GROUP(eth_rxd3, 6, 8),
+ GROUP(eth_rgmii_tx_clk, 6, 7),
+ GROUP(eth_tx_en, 6, 6),
+ GROUP(eth_txd0, 6, 5),
+ GROUP(eth_txd1, 6, 4),
+ GROUP(eth_txd2, 6, 3),
+ GROUP(eth_txd3, 6, 2),
+
+ /* Bank DV */
+ GROUP(uart_tx_b, 2, 29),
+ GROUP(uart_rx_b, 2, 28),
+ GROUP(uart_cts_b, 2, 27),
+ GROUP(uart_rts_b, 2, 26),
+
+ /* Bank BOOT */
+ GROUP(emmc_nand_d07, 4, 30),
+ GROUP(emmc_clk, 4, 18),
+ GROUP(emmc_cmd, 4, 19),
+ GROUP(emmc_ds, 4, 31),
+
+ /* Bank CARD */
+ GROUP(sdcard_d1, 2, 14),
+ GROUP(sdcard_d0, 2, 15),
+ GROUP(sdcard_d3, 2, 12),
+ GROUP(sdcard_d2, 2, 13),
+ GROUP(sdcard_cmd, 2, 10),
+ GROUP(sdcard_clk, 2, 11),
};
static struct meson_pmx_group meson_gxbb_aobus_groups[] = {
@@ -316,10 +420,18 @@ static struct meson_pmx_group meson_gxbb_aobus_groups[] = {
GPIO_GROUP(GPIOAO_13, 0),
/* bank AO */
+ GROUP(uart_tx_ao_b, 0, 26),
+ GROUP(uart_rx_ao_b, 0, 25),
GROUP(uart_tx_ao_a, 0, 12),
GROUP(uart_rx_ao_a, 0, 11),
GROUP(uart_cts_ao_a, 0, 10),
GROUP(uart_rts_ao_a, 0, 9),
+ GROUP(uart_cts_ao_b, 0, 8),
+ GROUP(uart_rts_ao_b, 0, 7),
+ GROUP(i2c_sck_ao, 0, 6),
+ GROUP(i2c_sda_ao, 0, 5),
+ GROUP(i2c_slave_sck_ao, 0, 2),
+ GROUP(i2c_slave_sda_ao, 0, 1),
};
static const char * const gpio_periphs_groups[] = {
@@ -359,6 +471,34 @@ static const char * const gpio_periphs_groups[] = {
"GPIO_TEST_N",
};
+static const char * const emmc_groups[] = {
+ "emmc_nand_d07", "emmc_clk", "emmc_cmd", "emmc_ds",
+};
+
+static const char * const sdcard_groups[] = {
+ "sdcard_d0", "sdcard_d1", "sdcard_d2", "sdcard_d3",
+ "sdcard_cmd", "sdcard_clk",
+};
+
+static const char * const uart_a_groups[] = {
+ "uart_tx_a", "uart_rx_a", "uart_cts_a", "uart_rts_a",
+};
+
+static const char * const uart_b_groups[] = {
+ "uart_tx_b", "uart_rx_b", "uart_cts_b", "uart_rts_b",
+};
+
+static const char * const uart_c_groups[] = {
+ "uart_tx_c", "uart_rx_c", "uart_cts_c", "uart_rts_c",
+};
+
+static const char * const eth_groups[] = {
+ "eth_mdio", "eth_mdc", "eth_clk_rx_clk", "eth_rx_dv",
+ "eth_rxd0", "eth_rxd1", "eth_rxd2", "eth_rxd3",
+ "eth_rgmii_tx_clk", "eth_tx_en",
+ "eth_txd0", "eth_txd1", "eth_txd2", "eth_txd3",
+};
+
static const char * const gpio_aobus_groups[] = {
"GPIOAO_0", "GPIOAO_1", "GPIOAO_2", "GPIOAO_3", "GPIOAO_4",
"GPIOAO_5", "GPIOAO_6", "GPIOAO_7", "GPIOAO_8", "GPIOAO_9",
@@ -366,16 +506,37 @@ static const char * const gpio_aobus_groups[] = {
};
static const char * const uart_ao_groups[] = {
- "uart_tx_ao_a", "uart_rx_ao_a", "uart_cts_ao_a", "uart_rts_ao_a"
+ "uart_tx_ao_a", "uart_rx_ao_a", "uart_cts_ao_a", "uart_rts_ao_a",
+};
+
+static const char * const uart_ao_b_groups[] = {
+ "uart_tx_ao_b", "uart_rx_ao_b", "uart_cts_ao_b", "uart_rts_ao_b",
+};
+
+static const char * const i2c_ao_groups[] = {
+ "i2c_sdk_ao", "i2c_sda_ao",
+};
+
+static const char * const i2c_slave_ao_groups[] = {
+ "i2c_slave_sdk_ao", "i2c_slave_sda_ao",
};
static struct meson_pmx_func meson_gxbb_periphs_functions[] = {
FUNCTION(gpio_periphs),
+ FUNCTION(emmc),
+ FUNCTION(sdcard),
+ FUNCTION(uart_a),
+ FUNCTION(uart_b),
+ FUNCTION(uart_c),
+ FUNCTION(eth),
};
static struct meson_pmx_func meson_gxbb_aobus_functions[] = {
FUNCTION(gpio_aobus),
FUNCTION(uart_ao),
+ FUNCTION(uart_ao_b),
+ FUNCTION(i2c_ao),
+ FUNCTION(i2c_slave_ao),
};
static struct meson_bank meson_gxbb_periphs_banks[] = {
diff --git a/drivers/pinctrl/mvebu/pinctrl-kirkwood.c b/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
index a78e9a499..5f89c26f3 100644
--- a/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
+++ b/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
@@ -168,87 +168,87 @@ static struct mvebu_mpp_mode mv88f6xxx_mpp_modes[] = {
MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(1, 1, 1, 1, 1, 1)),
MPP_VAR_FUNCTION(0x1, "nand", "io1", V(1, 1, 1, 1, 1, 1))),
MPP_MODE(20,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0x1, "ts", "mp0", V(0, 0, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0x2, "tdm", "tx0ql", V(0, 0, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0x3, "ge1", "txd0", V(0, 1, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0x4, "audio", "spdifi", V(0, 0, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0x5, "sata1", "act", V(0, 0, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0xb, "lcd", "d0", V(0, 0, 0, 0, 1, 0)),
- MPP_VAR_FUNCTION(0xc, "mii", "rxerr", V(1, 0, 0, 0, 0, 0))),
+ MPP_VAR_FUNCTION(0xc, "mii", "rxerr", V(0, 0, 0, 0, 0, 0))),
MPP_MODE(21,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0x1, "ts", "mp1", V(0, 0, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0x2, "tdm", "rx0ql", V(0, 0, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0x3, "ge1", "txd1", V(0, 1, 1, 1, 1, 0)),
- MPP_VAR_FUNCTION(0x4, "audio", "spdifi", V(1, 0, 0, 0, 0, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "spdifi", V(0, 0, 0, 0, 0, 0)),
MPP_VAR_FUNCTION(0x4, "audio", "spdifo", V(0, 0, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0x5, "sata0", "act", V(0, 1, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0xb, "lcd", "d1", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(22,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0x1, "ts", "mp2", V(0, 0, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0x2, "tdm", "tx2ql", V(0, 0, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0x3, "ge1", "txd2", V(0, 1, 1, 1, 1, 0)),
- MPP_VAR_FUNCTION(0x4, "audio", "spdifo", V(1, 0, 0, 0, 0, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "spdifo", V(0, 0, 0, 0, 0, 0)),
MPP_VAR_FUNCTION(0x4, "audio", "rmclk", V(0, 0, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0x5, "sata1", "prsnt", V(0, 0, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0xb, "lcd", "d2", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(23,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0x1, "ts", "mp3", V(0, 0, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0x2, "tdm", "rx2ql", V(0, 0, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0x3, "ge1", "txd3", V(0, 1, 1, 1, 1, 0)),
- MPP_VAR_FUNCTION(0x4, "audio", "rmclk", V(1, 0, 0, 0, 0, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "rmclk", V(0, 0, 0, 0, 0, 0)),
MPP_VAR_FUNCTION(0x4, "audio", "bclk", V(0, 0, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0x5, "sata0", "prsnt", V(0, 1, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0xb, "lcd", "d3", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(24,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0x1, "ts", "mp4", V(0, 0, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0x2, "tdm", "spi-cs0", V(0, 0, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0x3, "ge1", "rxd0", V(0, 1, 1, 1, 1, 0)),
- MPP_VAR_FUNCTION(0x4, "audio", "bclk", V(1, 0, 0, 0, 0, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "bclk", V(0, 0, 0, 0, 0, 0)),
MPP_VAR_FUNCTION(0x4, "audio", "sdo", V(0, 0, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0xb, "lcd", "d4", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(25,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0x1, "ts", "mp5", V(0, 0, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0x2, "tdm", "spi-sck", V(0, 0, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0x3, "ge1", "rxd1", V(0, 1, 1, 1, 1, 0)),
- MPP_VAR_FUNCTION(0x4, "audio", "sdo", V(1, 0, 0, 0, 0, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "sdo", V(0, 0, 0, 0, 0, 0)),
MPP_VAR_FUNCTION(0x4, "audio", "lrclk", V(0, 0, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0xb, "lcd", "d5", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(26,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0x1, "ts", "mp6", V(0, 0, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0x2, "tdm", "spi-miso", V(0, 0, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0x3, "ge1", "rxd2", V(0, 1, 1, 1, 1, 0)),
- MPP_VAR_FUNCTION(0x4, "audio", "lrclk", V(1, 0, 0, 0, 0, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "lrclk", V(0, 0, 0, 0, 0, 0)),
MPP_VAR_FUNCTION(0x4, "audio", "mclk", V(0, 0, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0xb, "lcd", "d6", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(27,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0x1, "ts", "mp7", V(0, 0, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0x2, "tdm", "spi-mosi", V(0, 0, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0x3, "ge1", "rxd3", V(0, 1, 1, 1, 1, 0)),
- MPP_VAR_FUNCTION(0x4, "audio", "mclk", V(1, 0, 0, 0, 0, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "mclk", V(0, 0, 0, 0, 0, 0)),
MPP_VAR_FUNCTION(0x4, "audio", "sdi", V(0, 0, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0xb, "lcd", "d7", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(28,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0x1, "ts", "mp8", V(0, 0, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0x2, "tdm", "int", V(0, 0, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0x3, "ge1", "col", V(0, 1, 1, 1, 1, 0)),
- MPP_VAR_FUNCTION(0x4, "audio", "sdi", V(1, 0, 0, 0, 0, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "sdi", V(0, 0, 0, 0, 0, 0)),
MPP_VAR_FUNCTION(0x4, "audio", "extclk", V(0, 0, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0xb, "lcd", "d8", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(29,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0x1, "ts", "mp9", V(0, 0, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0x2, "tdm", "rst", V(0, 0, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0x3, "ge1", "txclk", V(0, 1, 1, 1, 1, 0)),
- MPP_VAR_FUNCTION(0x4, "audio", "extclk", V(1, 0, 0, 0, 0, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "extclk", V(0, 0, 0, 0, 0, 0)),
MPP_VAR_FUNCTION(0xb, "lcd", "d9", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(30,
MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1, 0)),
@@ -280,65 +280,65 @@ static struct mvebu_mpp_mode mv88f6xxx_mpp_modes[] = {
MPP_VAR_FUNCTION(0x5, "sata1", "act", V(0, 0, 0, 1, 1, 0)),
MPP_VAR_FUNCTION(0xb, "lcd", "d14", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(35,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1, 1)),
MPP_VAR_FUNCTION(0x2, "tdm", "tx0ql", V(0, 0, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0x3, "ge1", "rxerr", V(0, 1, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0x5, "sata0", "act", V(0, 1, 1, 1, 1, 0)),
MPP_VAR_FUNCTION(0xb, "lcd", "d15", V(0, 0, 0, 0, 1, 0)),
- MPP_VAR_FUNCTION(0xc, "mii", "rxerr", V(0, 1, 1, 1, 1, 0))),
+ MPP_VAR_FUNCTION(0xc, "mii", "rxerr", V(1, 1, 1, 1, 1, 0))),
MPP_MODE(36,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 0, 0, 1, 1, 1)),
MPP_VAR_FUNCTION(0x1, "ts", "mp0", V(0, 0, 0, 1, 1, 0)),
MPP_VAR_FUNCTION(0x2, "tdm", "spi-cs1", V(0, 0, 0, 1, 1, 0)),
- MPP_VAR_FUNCTION(0x4, "audio", "spdifi", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "spdifi", V(1, 0, 0, 1, 1, 0)),
MPP_VAR_FUNCTION(0xb, "twsi1", "sda", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(37,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 0, 0, 1, 1, 1)),
MPP_VAR_FUNCTION(0x1, "ts", "mp1", V(0, 0, 0, 1, 1, 0)),
MPP_VAR_FUNCTION(0x2, "tdm", "tx2ql", V(0, 0, 0, 1, 1, 0)),
- MPP_VAR_FUNCTION(0x4, "audio", "spdifo", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "spdifo", V(1, 0, 0, 1, 1, 0)),
MPP_VAR_FUNCTION(0xb, "twsi1", "sck", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(38,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 0, 0, 1, 1, 1)),
MPP_VAR_FUNCTION(0x1, "ts", "mp2", V(0, 0, 0, 1, 1, 0)),
MPP_VAR_FUNCTION(0x2, "tdm", "rx2ql", V(0, 0, 0, 1, 1, 0)),
- MPP_VAR_FUNCTION(0x4, "audio", "rmclk", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "rmclk", V(1, 0, 0, 1, 1, 0)),
MPP_VAR_FUNCTION(0xb, "lcd", "d18", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(39,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 0, 0, 1, 1, 1)),
MPP_VAR_FUNCTION(0x1, "ts", "mp3", V(0, 0, 0, 1, 1, 0)),
MPP_VAR_FUNCTION(0x2, "tdm", "spi-cs0", V(0, 0, 0, 1, 1, 0)),
- MPP_VAR_FUNCTION(0x4, "audio", "bclk", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "bclk", V(1, 0, 0, 1, 1, 0)),
MPP_VAR_FUNCTION(0xb, "lcd", "d19", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(40,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 0, 0, 1, 1, 1)),
MPP_VAR_FUNCTION(0x1, "ts", "mp4", V(0, 0, 0, 1, 1, 0)),
MPP_VAR_FUNCTION(0x2, "tdm", "spi-sck", V(0, 0, 0, 1, 1, 0)),
- MPP_VAR_FUNCTION(0x4, "audio", "sdo", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "sdo", V(1, 0, 0, 1, 1, 0)),
MPP_VAR_FUNCTION(0xb, "lcd", "d20", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(41,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 0, 0, 1, 1, 1)),
MPP_VAR_FUNCTION(0x1, "ts", "mp5", V(0, 0, 0, 1, 1, 0)),
MPP_VAR_FUNCTION(0x2, "tdm", "spi-miso", V(0, 0, 0, 1, 1, 0)),
- MPP_VAR_FUNCTION(0x4, "audio", "lrclk", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "lrclk", V(1, 0, 0, 1, 1, 0)),
MPP_VAR_FUNCTION(0xb, "lcd", "d21", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(42,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 0, 0, 1, 1, 1)),
MPP_VAR_FUNCTION(0x1, "ts", "mp6", V(0, 0, 0, 1, 1, 0)),
MPP_VAR_FUNCTION(0x2, "tdm", "spi-mosi", V(0, 0, 0, 1, 1, 0)),
- MPP_VAR_FUNCTION(0x4, "audio", "mclk", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "mclk", V(1, 0, 0, 1, 1, 0)),
MPP_VAR_FUNCTION(0xb, "lcd", "d22", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(43,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 0, 0, 1, 1, 1)),
MPP_VAR_FUNCTION(0x1, "ts", "mp7", V(0, 0, 0, 1, 1, 0)),
MPP_VAR_FUNCTION(0x2, "tdm", "int", V(0, 0, 0, 1, 1, 0)),
- MPP_VAR_FUNCTION(0x4, "audio", "sdi", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "sdi", V(1, 0, 0, 1, 1, 0)),
MPP_VAR_FUNCTION(0xb, "lcd", "d23", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(44,
- MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1, 1)),
+ MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 0, 0, 1, 1, 1)),
MPP_VAR_FUNCTION(0x1, "ts", "mp8", V(0, 0, 0, 1, 1, 0)),
MPP_VAR_FUNCTION(0x2, "tdm", "rst", V(0, 0, 0, 1, 1, 0)),
- MPP_VAR_FUNCTION(0x4, "audio", "extclk", V(0, 0, 0, 1, 1, 0)),
+ MPP_VAR_FUNCTION(0x4, "audio", "extclk", V(1, 0, 0, 1, 1, 0)),
MPP_VAR_FUNCTION(0xb, "lcd", "clk", V(0, 0, 0, 0, 1, 0))),
MPP_MODE(45,
MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1, 1)),
@@ -371,11 +371,12 @@ static struct mvebu_mpp_mode mv88f6xxx_mpp_modes[] = {
};
static struct mvebu_mpp_ctrl mv88f6180_mpp_controls[] = {
- MPP_FUNC_CTRL(0, 29, NULL, kirkwood_mpp_ctrl),
+ MPP_FUNC_CTRL(0, 44, NULL, kirkwood_mpp_ctrl),
};
static struct pinctrl_gpio_range mv88f6180_gpio_ranges[] = {
- MPP_GPIO_RANGE(0, 0, 0, 30),
+ MPP_GPIO_RANGE(0, 0, 0, 20),
+ MPP_GPIO_RANGE(1, 35, 35, 10),
};
static struct mvebu_mpp_ctrl mv88f619x_mpp_controls[] = {
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index 38faceff2..35f62180d 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -1033,102 +1033,6 @@ static inline void nmk_gpio_dbg_show_one(struct seq_file *s,
#define nmk_gpio_dbg_show NULL
#endif
-void nmk_gpio_clocks_enable(void)
-{
- int i;
-
- for (i = 0; i < NUM_BANKS; i++) {
- struct nmk_gpio_chip *chip = nmk_gpio_chips[i];
-
- if (!chip)
- continue;
-
- clk_enable(chip->clk);
- }
-}
-
-void nmk_gpio_clocks_disable(void)
-{
- int i;
-
- for (i = 0; i < NUM_BANKS; i++) {
- struct nmk_gpio_chip *chip = nmk_gpio_chips[i];
-
- if (!chip)
- continue;
-
- clk_disable(chip->clk);
- }
-}
-
-/*
- * Called from the suspend/resume path to only keep the real wakeup interrupts
- * (those that have had set_irq_wake() called on them) as wakeup interrupts,
- * and not the rest of the interrupts which we needed to have as wakeups for
- * cpuidle.
- *
- * PM ops are not used since this needs to be done at the end, after all the
- * other drivers are done with their suspend callbacks.
- */
-void nmk_gpio_wakeups_suspend(void)
-{
- int i;
-
- for (i = 0; i < NUM_BANKS; i++) {
- struct nmk_gpio_chip *chip = nmk_gpio_chips[i];
-
- if (!chip)
- break;
-
- clk_enable(chip->clk);
-
- writel(chip->rwimsc & chip->real_wake,
- chip->addr + NMK_GPIO_RWIMSC);
- writel(chip->fwimsc & chip->real_wake,
- chip->addr + NMK_GPIO_FWIMSC);
-
- clk_disable(chip->clk);
- }
-}
-
-void nmk_gpio_wakeups_resume(void)
-{
- int i;
-
- for (i = 0; i < NUM_BANKS; i++) {
- struct nmk_gpio_chip *chip = nmk_gpio_chips[i];
-
- if (!chip)
- break;
-
- clk_enable(chip->clk);
-
- writel(chip->rwimsc, chip->addr + NMK_GPIO_RWIMSC);
- writel(chip->fwimsc, chip->addr + NMK_GPIO_FWIMSC);
-
- clk_disable(chip->clk);
- }
-}
-
-/*
- * Read the pull up/pull down status.
- * A bit set in 'pull_up' means that pull up
- * is selected if pull is enabled in PDIS register.
- * Note: only pull up/down set via this driver can
- * be detected due to HW limitations.
- */
-void nmk_gpio_read_pull(int gpio_bank, u32 *pull_up)
-{
- if (gpio_bank < NUM_BANKS) {
- struct nmk_gpio_chip *chip = nmk_gpio_chips[gpio_bank];
-
- if (!chip)
- return;
-
- *pull_up = chip->pull_up;
- }
-}
-
/*
* We will allocate memory for the state container using devm* allocators
* binding to the first device reaching this point, it doesn't matter if
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
index d5bf9fae2..5020ae534 100644
--- a/drivers/pinctrl/pinconf-generic.c
+++ b/drivers/pinctrl/pinconf-generic.c
@@ -53,7 +53,7 @@ static void pinconf_generic_dump_one(struct pinctrl_dev *pctldev,
struct seq_file *s, const char *gname,
unsigned pin,
const struct pin_config_item *items,
- int nitems)
+ int nitems, int *print_sep)
{
int i;
@@ -75,8 +75,10 @@ static void pinconf_generic_dump_one(struct pinctrl_dev *pctldev,
seq_printf(s, "ERROR READING CONFIG SETTING %d ", i);
continue;
}
- /* Space between multiple configs */
- seq_puts(s, " ");
+ /* comma between multiple configs */
+ if (*print_sep)
+ seq_puts(s, ", ");
+ *print_sep = 1;
seq_puts(s, items[i].display);
/* Print unit if available */
if (items[i].has_arg) {
@@ -105,19 +107,21 @@ void pinconf_generic_dump_pins(struct pinctrl_dev *pctldev, struct seq_file *s,
const char *gname, unsigned pin)
{
const struct pinconf_ops *ops = pctldev->desc->confops;
+ int print_sep = 0;
if (!ops->is_generic)
return;
/* generic parameters */
pinconf_generic_dump_one(pctldev, s, gname, pin, conf_items,
- ARRAY_SIZE(conf_items));
+ ARRAY_SIZE(conf_items), &print_sep);
/* driver-specific parameters */
if (pctldev->desc->num_custom_params &&
pctldev->desc->custom_conf_items)
pinconf_generic_dump_one(pctldev, s, gname, pin,
pctldev->desc->custom_conf_items,
- pctldev->desc->num_custom_params);
+ pctldev->desc->num_custom_params,
+ &print_sep);
}
void pinconf_generic_dump_config(struct pinctrl_dev *pctldev,
@@ -391,4 +395,12 @@ exit:
}
EXPORT_SYMBOL_GPL(pinconf_generic_dt_node_to_map);
+void pinconf_generic_dt_free_map(struct pinctrl_dev *pctldev,
+ struct pinctrl_map *map,
+ unsigned num_maps)
+{
+ pinctrl_utils_free_map(pctldev, map, num_maps);
+}
+EXPORT_SYMBOL_GPL(pinconf_generic_dt_free_map);
+
#endif
diff --git a/drivers/pinctrl/pinconf.c b/drivers/pinctrl/pinconf.c
index 4dd7722f9..799048f3c 100644
--- a/drivers/pinctrl/pinconf.c
+++ b/drivers/pinctrl/pinconf.c
@@ -258,8 +258,7 @@ void pinconf_show_setting(struct seq_file *s,
case PIN_MAP_TYPE_CONFIGS_PIN:
desc = pin_desc_get(setting->pctldev,
setting->data.configs.group_or_pin);
- seq_printf(s, "pin %s (%d)",
- desc->name ? desc->name : "unnamed",
+ seq_printf(s, "pin %s (%d)", desc->name,
setting->data.configs.group_or_pin);
break;
case PIN_MAP_TYPE_CONFIGS_GROUP:
@@ -311,8 +310,7 @@ static int pinconf_pins_show(struct seq_file *s, void *what)
if (desc == NULL)
continue;
- seq_printf(s, "pin %d (%s):", pin,
- desc->name ? desc->name : "unnamed");
+ seq_printf(s, "pin %d (%s): ", pin, desc->name);
pinconf_dump_pin(pctldev, s, pin);
@@ -349,7 +347,7 @@ static int pinconf_groups_show(struct seq_file *s, void *what)
while (selector < ngroups) {
const char *gname = pctlops->get_group_name(pctldev, selector);
- seq_printf(s, "%u (%s):", selector, gname);
+ seq_printf(s, "%u (%s): ", selector, gname);
pinconf_dump_group(pctldev, s, selector, gname);
seq_printf(s, "\n");
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index a025b40d2..28bbc1bb9 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -20,7 +20,7 @@
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pinctrl/pinconf.h>
@@ -421,8 +421,8 @@ static int atmel_pctl_get_group_pins(struct pinctrl_dev *pctldev,
return 0;
}
-struct atmel_group *atmel_pctl_find_group_by_pin(struct pinctrl_dev *pctldev,
- unsigned pin)
+static struct atmel_group *
+atmel_pctl_find_group_by_pin(struct pinctrl_dev *pctldev, unsigned pin)
{
struct atmel_pioctrl *atmel_pioctrl = pinctrl_dev_get_drvdata(pctldev);
int i;
@@ -879,7 +879,6 @@ static const struct of_device_id atmel_pctrl_of_match[] = {
/* sentinel */
}
};
-MODULE_DEVICE_TABLE(of, atmel_pctrl_of_match);
static int atmel_pinctrl_probe(struct platform_device *pdev)
{
@@ -1074,28 +1073,13 @@ clk_prepare_enable_error:
return ret;
}
-int atmel_pinctrl_remove(struct platform_device *pdev)
-{
- struct atmel_pioctrl *atmel_pioctrl = platform_get_drvdata(pdev);
-
- irq_domain_remove(atmel_pioctrl->irq_domain);
- clk_disable_unprepare(atmel_pioctrl->clk);
- gpiochip_remove(atmel_pioctrl->gpio_chip);
-
- return 0;
-}
-
static struct platform_driver atmel_pinctrl_driver = {
.driver = {
.name = "pinctrl-at91-pio4",
.of_match_table = atmel_pctrl_of_match,
.pm = &atmel_pctrl_pm_ops,
+ .suppress_bind_attrs = true,
},
.probe = atmel_pinctrl_probe,
- .remove = atmel_pinctrl_remove,
};
-module_platform_driver(atmel_pinctrl_driver);
-
-MODULE_AUTHOR(Ludovic Desroches <ludovic.desroches@atmel.com>);
-MODULE_DESCRIPTION("Atmel PIO4 pinctrl driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(atmel_pinctrl_driver);
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index b7c0d6f7c..80daead3a 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -9,7 +9,6 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/init.h>
-#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_address.h>
@@ -189,7 +188,7 @@ struct at91_pinctrl {
struct at91_pinctrl_mux_ops *ops;
};
-static const inline struct at91_pin_group *at91_pinctrl_find_group_by_name(
+static inline const struct at91_pin_group *at91_pinctrl_find_group_by_name(
const struct at91_pinctrl *info,
const char *name)
{
@@ -1818,13 +1817,3 @@ static int __init at91_pinctrl_init(void)
return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
}
arch_initcall(at91_pinctrl_init);
-
-static void __exit at91_pinctrl_exit(void)
-{
- platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
-}
-
-module_exit(at91_pinctrl_exit);
-MODULE_AUTHOR("Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>");
-MODULE_DESCRIPTION("Atmel AT91 pinctrl driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-digicolor.c b/drivers/pinctrl/pinctrl-digicolor.c
index 30ee56427..639a57ecc 100644
--- a/drivers/pinctrl/pinctrl-digicolor.c
+++ b/drivers/pinctrl/pinctrl-digicolor.c
@@ -15,7 +15,7 @@
* - Pin pad configuration (pull up/down, strength)
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -335,27 +335,17 @@ static int dc_pinctrl_probe(struct platform_device *pdev)
return dc_gpiochip_add(pmap, pdev->dev.of_node);
}
-static int dc_pinctrl_remove(struct platform_device *pdev)
-{
- struct dc_pinmap *pmap = platform_get_drvdata(pdev);
-
- gpiochip_remove(&pmap->chip);
-
- return 0;
-}
-
static const struct of_device_id dc_pinctrl_ids[] = {
{ .compatible = "cnxt,cx92755-pinctrl" },
{ /* sentinel */ }
};
-MODULE_DEVICE_TABLE(of, dc_pinctrl_ids);
static struct platform_driver dc_pinctrl_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = dc_pinctrl_ids,
+ .suppress_bind_attrs = true,
},
.probe = dc_pinctrl_probe,
- .remove = dc_pinctrl_remove,
};
-module_platform_driver(dc_pinctrl_driver);
+builtin_platform_driver(dc_pinctrl_driver);
diff --git a/drivers/pinctrl/pinctrl-lpc18xx.c b/drivers/pinctrl/pinctrl-lpc18xx.c
index 8a931c7ba..e053f1fa5 100644
--- a/drivers/pinctrl/pinctrl-lpc18xx.c
+++ b/drivers/pinctrl/pinctrl-lpc18xx.c
@@ -11,7 +11,7 @@
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/io.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
@@ -1365,31 +1365,17 @@ static int lpc18xx_scu_probe(struct platform_device *pdev)
return 0;
}
-static int lpc18xx_scu_remove(struct platform_device *pdev)
-{
- struct lpc18xx_scu_data *scu = platform_get_drvdata(pdev);
-
- clk_disable_unprepare(scu->clk);
-
- return 0;
-}
-
static const struct of_device_id lpc18xx_scu_match[] = {
{ .compatible = "nxp,lpc1850-scu" },
{},
};
-MODULE_DEVICE_TABLE(of, lpc18xx_scu_match);
static struct platform_driver lpc18xx_scu_driver = {
.probe = lpc18xx_scu_probe,
- .remove = lpc18xx_scu_remove,
.driver = {
.name = "lpc18xx-scu",
.of_match_table = lpc18xx_scu_match,
+ .suppress_bind_attrs = true,
},
};
-module_platform_driver(lpc18xx_scu_driver);
-
-MODULE_AUTHOR("Joachim Eastwood <manabian@gmail.com>");
-MODULE_DESCRIPTION("Pinctrl driver for NXP LPC18xx/43xx SCU");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(lpc18xx_scu_driver);
diff --git a/drivers/pinctrl/pinctrl-max77620.c b/drivers/pinctrl/pinctrl-max77620.c
new file mode 100644
index 000000000..d9ff53e8f
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-max77620.c
@@ -0,0 +1,673 @@
+/*
+ * MAX77620 pin control driver.
+ *
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Author:
+ * Chaitanya Bandi <bandik@nvidia.com>
+ * Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/mfd/max77620.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "core.h"
+#include "pinconf.h"
+#include "pinctrl-utils.h"
+
+#define MAX77620_PIN_NUM 8
+
+enum max77620_pin_ppdrv {
+ MAX77620_PIN_UNCONFIG_DRV,
+ MAX77620_PIN_OD_DRV,
+ MAX77620_PIN_PP_DRV,
+};
+
+enum max77620_pinconf_param {
+ MAX77620_ACTIVE_FPS_SOURCE = PIN_CONFIG_END + 1,
+ MAX77620_ACTIVE_FPS_POWER_ON_SLOTS,
+ MAX77620_ACTIVE_FPS_POWER_DOWN_SLOTS,
+ MAX77620_SUSPEND_FPS_SOURCE,
+ MAX77620_SUSPEND_FPS_POWER_ON_SLOTS,
+ MAX77620_SUSPEND_FPS_POWER_DOWN_SLOTS,
+};
+
+struct max77620_pin_function {
+ const char *name;
+ const char * const *groups;
+ unsigned int ngroups;
+ int mux_option;
+};
+
+static const struct pinconf_generic_params max77620_cfg_params[] = {
+ {
+ .property = "maxim,active-fps-source",
+ .param = MAX77620_ACTIVE_FPS_SOURCE,
+ }, {
+ .property = "maxim,active-fps-power-up-slot",
+ .param = MAX77620_ACTIVE_FPS_POWER_ON_SLOTS,
+ }, {
+ .property = "maxim,active-fps-power-down-slot",
+ .param = MAX77620_ACTIVE_FPS_POWER_DOWN_SLOTS,
+ }, {
+ .property = "maxim,suspend-fps-source",
+ .param = MAX77620_SUSPEND_FPS_SOURCE,
+ }, {
+ .property = "maxim,suspend-fps-power-up-slot",
+ .param = MAX77620_SUSPEND_FPS_POWER_ON_SLOTS,
+ }, {
+ .property = "maxim,suspend-fps-power-down-slot",
+ .param = MAX77620_SUSPEND_FPS_POWER_DOWN_SLOTS,
+ },
+};
+
+enum max77620_alternate_pinmux_option {
+ MAX77620_PINMUX_GPIO = 0,
+ MAX77620_PINMUX_LOW_POWER_MODE_CONTROL_IN = 1,
+ MAX77620_PINMUX_FLEXIBLE_POWER_SEQUENCER_OUT = 2,
+ MAX77620_PINMUX_32K_OUT1 = 3,
+ MAX77620_PINMUX_SD0_DYNAMIC_VOLTAGE_SCALING_IN = 4,
+ MAX77620_PINMUX_SD1_DYNAMIC_VOLTAGE_SCALING_IN = 5,
+ MAX77620_PINMUX_REFERENCE_OUT = 6,
+};
+
+struct max77620_pingroup {
+ const char *name;
+ const unsigned int pins[1];
+ unsigned int npins;
+ enum max77620_alternate_pinmux_option alt_option;
+};
+
+struct max77620_pin_info {
+ enum max77620_pin_ppdrv drv_type;
+ int pull_config;
+};
+
+struct max77620_fps_config {
+ int active_fps_src;
+ int active_power_up_slots;
+ int active_power_down_slots;
+ int suspend_fps_src;
+ int suspend_power_up_slots;
+ int suspend_power_down_slots;
+};
+
+struct max77620_pctrl_info {
+ struct device *dev;
+ struct pinctrl_dev *pctl;
+ struct regmap *rmap;
+ int pins_current_opt[MAX77620_GPIO_NR];
+ const struct max77620_pin_function *functions;
+ unsigned int num_functions;
+ const struct max77620_pingroup *pin_groups;
+ int num_pin_groups;
+ const struct pinctrl_pin_desc *pins;
+ unsigned int num_pins;
+ struct max77620_pin_info pin_info[MAX77620_PIN_NUM];
+ struct max77620_fps_config fps_config[MAX77620_PIN_NUM];
+};
+
+static const struct pinctrl_pin_desc max77620_pins_desc[] = {
+ PINCTRL_PIN(MAX77620_GPIO0, "gpio0"),
+ PINCTRL_PIN(MAX77620_GPIO1, "gpio1"),
+ PINCTRL_PIN(MAX77620_GPIO2, "gpio2"),
+ PINCTRL_PIN(MAX77620_GPIO3, "gpio3"),
+ PINCTRL_PIN(MAX77620_GPIO4, "gpio4"),
+ PINCTRL_PIN(MAX77620_GPIO5, "gpio5"),
+ PINCTRL_PIN(MAX77620_GPIO6, "gpio6"),
+ PINCTRL_PIN(MAX77620_GPIO7, "gpio7"),
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0",
+ "gpio1",
+ "gpio2",
+ "gpio3",
+ "gpio4",
+ "gpio5",
+ "gpio6",
+ "gpio7",
+};
+
+#define FUNCTION_GROUP(fname, mux) \
+ { \
+ .name = fname, \
+ .groups = gpio_groups, \
+ .ngroups = ARRAY_SIZE(gpio_groups), \
+ .mux_option = MAX77620_PINMUX_##mux, \
+ }
+
+static const struct max77620_pin_function max77620_pin_function[] = {
+ FUNCTION_GROUP("gpio", GPIO),
+ FUNCTION_GROUP("lpm-control-in", LOW_POWER_MODE_CONTROL_IN),
+ FUNCTION_GROUP("fps-out", FLEXIBLE_POWER_SEQUENCER_OUT),
+ FUNCTION_GROUP("32k-out1", 32K_OUT1),
+ FUNCTION_GROUP("sd0-dvs-in", SD0_DYNAMIC_VOLTAGE_SCALING_IN),
+ FUNCTION_GROUP("sd1-dvs-in", SD1_DYNAMIC_VOLTAGE_SCALING_IN),
+ FUNCTION_GROUP("reference-out", REFERENCE_OUT),
+};
+
+#define MAX77620_PINGROUP(pg_name, pin_id, option) \
+ { \
+ .name = #pg_name, \
+ .pins = {MAX77620_##pin_id}, \
+ .npins = 1, \
+ .alt_option = MAX77620_PINMUX_##option, \
+ }
+
+static const struct max77620_pingroup max77620_pingroups[] = {
+ MAX77620_PINGROUP(gpio0, GPIO0, LOW_POWER_MODE_CONTROL_IN),
+ MAX77620_PINGROUP(gpio1, GPIO1, FLEXIBLE_POWER_SEQUENCER_OUT),
+ MAX77620_PINGROUP(gpio2, GPIO2, FLEXIBLE_POWER_SEQUENCER_OUT),
+ MAX77620_PINGROUP(gpio3, GPIO3, FLEXIBLE_POWER_SEQUENCER_OUT),
+ MAX77620_PINGROUP(gpio4, GPIO4, 32K_OUT1),
+ MAX77620_PINGROUP(gpio5, GPIO5, SD0_DYNAMIC_VOLTAGE_SCALING_IN),
+ MAX77620_PINGROUP(gpio6, GPIO6, SD1_DYNAMIC_VOLTAGE_SCALING_IN),
+ MAX77620_PINGROUP(gpio7, GPIO7, REFERENCE_OUT),
+};
+
+static int max77620_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct max77620_pctrl_info *mpci = pinctrl_dev_get_drvdata(pctldev);
+
+ return mpci->num_pin_groups;
+}
+
+static const char *max77620_pinctrl_get_group_name(
+ struct pinctrl_dev *pctldev, unsigned int group)
+{
+ struct max77620_pctrl_info *mpci = pinctrl_dev_get_drvdata(pctldev);
+
+ return mpci->pin_groups[group].name;
+}
+
+static int max77620_pinctrl_get_group_pins(
+ struct pinctrl_dev *pctldev, unsigned int group,
+ const unsigned int **pins, unsigned int *num_pins)
+{
+ struct max77620_pctrl_info *mpci = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = mpci->pin_groups[group].pins;
+ *num_pins = mpci->pin_groups[group].npins;
+
+ return 0;
+}
+
+static const struct pinctrl_ops max77620_pinctrl_ops = {
+ .get_groups_count = max77620_pinctrl_get_groups_count,
+ .get_group_name = max77620_pinctrl_get_group_name,
+ .get_group_pins = max77620_pinctrl_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+ .dt_free_map = pinctrl_utils_free_map,
+};
+
+static int max77620_pinctrl_get_funcs_count(struct pinctrl_dev *pctldev)
+{
+ struct max77620_pctrl_info *mpci = pinctrl_dev_get_drvdata(pctldev);
+
+ return mpci->num_functions;
+}
+
+static const char *max77620_pinctrl_get_func_name(struct pinctrl_dev *pctldev,
+ unsigned int function)
+{
+ struct max77620_pctrl_info *mpci = pinctrl_dev_get_drvdata(pctldev);
+
+ return mpci->functions[function].name;
+}
+
+static int max77620_pinctrl_get_func_groups(struct pinctrl_dev *pctldev,
+ unsigned int function,
+ const char * const **groups,
+ unsigned int * const num_groups)
+{
+ struct max77620_pctrl_info *mpci = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = mpci->functions[function].groups;
+ *num_groups = mpci->functions[function].ngroups;
+
+ return 0;
+}
+
+static int max77620_pinctrl_enable(struct pinctrl_dev *pctldev,
+ unsigned int function, unsigned int group)
+{
+ struct max77620_pctrl_info *mpci = pinctrl_dev_get_drvdata(pctldev);
+ u8 val;
+ int ret;
+
+ if (function == MAX77620_PINMUX_GPIO) {
+ val = 0;
+ } else if (function == mpci->pin_groups[group].alt_option) {
+ val = 1 << group;
+ } else {
+ dev_err(mpci->dev, "GPIO %u doesn't have function %u\n",
+ group, function);
+ return -EINVAL;
+ }
+ ret = regmap_update_bits(mpci->rmap, MAX77620_REG_AME_GPIO,
+ BIT(group), val);
+ if (ret < 0)
+ dev_err(mpci->dev, "REG AME GPIO update failed: %d\n", ret);
+
+ return ret;
+}
+
+static const struct pinmux_ops max77620_pinmux_ops = {
+ .get_functions_count = max77620_pinctrl_get_funcs_count,
+ .get_function_name = max77620_pinctrl_get_func_name,
+ .get_function_groups = max77620_pinctrl_get_func_groups,
+ .set_mux = max77620_pinctrl_enable,
+};
+
+static int max77620_pinconf_get(struct pinctrl_dev *pctldev,
+ unsigned int pin, unsigned long *config)
+{
+ struct max77620_pctrl_info *mpci = pinctrl_dev_get_drvdata(pctldev);
+ struct device *dev = mpci->dev;
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ unsigned int val;
+ int arg = 0;
+ int ret;
+
+ switch (param) {
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ if (mpci->pin_info[pin].drv_type == MAX77620_PIN_OD_DRV)
+ arg = 1;
+ break;
+
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ if (mpci->pin_info[pin].drv_type == MAX77620_PIN_PP_DRV)
+ arg = 1;
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_UP:
+ ret = regmap_read(mpci->rmap, MAX77620_REG_PUE_GPIO, &val);
+ if (ret < 0) {
+ dev_err(dev, "Reg PUE_GPIO read failed: %d\n", ret);
+ return ret;
+ }
+ if (val & BIT(pin))
+ arg = 1;
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ ret = regmap_read(mpci->rmap, MAX77620_REG_PDE_GPIO, &val);
+ if (ret < 0) {
+ dev_err(dev, "Reg PDE_GPIO read failed: %d\n", ret);
+ return ret;
+ }
+ if (val & BIT(pin))
+ arg = 1;
+ break;
+
+ default:
+ dev_err(dev, "Properties not supported\n");
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, (u16)arg);
+
+ return 0;
+}
+
+static int max77620_get_default_fps(struct max77620_pctrl_info *mpci,
+ int addr, int *fps)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(mpci->rmap, addr, &val);
+ if (ret < 0) {
+ dev_err(mpci->dev, "Reg PUE_GPIO read failed: %d\n", ret);
+ return ret;
+ }
+ *fps = (val & MAX77620_FPS_SRC_MASK) >> MAX77620_FPS_SRC_SHIFT;
+
+ return 0;
+}
+
+static int max77620_set_fps_param(struct max77620_pctrl_info *mpci,
+ int pin, int param)
+{
+ struct max77620_fps_config *fps_config = &mpci->fps_config[pin];
+ int addr, ret;
+ int param_val;
+ int mask, shift;
+
+ if ((pin < MAX77620_GPIO1) || (pin > MAX77620_GPIO3))
+ return 0;
+
+ addr = MAX77620_REG_FPS_GPIO1 + pin - 1;
+ switch (param) {
+ case MAX77620_ACTIVE_FPS_SOURCE:
+ case MAX77620_SUSPEND_FPS_SOURCE:
+ mask = MAX77620_FPS_SRC_MASK;
+ shift = MAX77620_FPS_SRC_SHIFT;
+ param_val = fps_config->active_fps_src;
+ if (param == MAX77620_SUSPEND_FPS_SOURCE)
+ param_val = fps_config->suspend_fps_src;
+ break;
+
+ case MAX77620_ACTIVE_FPS_POWER_ON_SLOTS:
+ case MAX77620_SUSPEND_FPS_POWER_ON_SLOTS:
+ mask = MAX77620_FPS_PU_PERIOD_MASK;
+ shift = MAX77620_FPS_PU_PERIOD_SHIFT;
+ param_val = fps_config->active_power_up_slots;
+ if (param == MAX77620_SUSPEND_FPS_POWER_ON_SLOTS)
+ param_val = fps_config->suspend_power_up_slots;
+ break;
+
+ case MAX77620_ACTIVE_FPS_POWER_DOWN_SLOTS:
+ case MAX77620_SUSPEND_FPS_POWER_DOWN_SLOTS:
+ mask = MAX77620_FPS_PD_PERIOD_MASK;
+ shift = MAX77620_FPS_PD_PERIOD_SHIFT;
+ param_val = fps_config->active_power_down_slots;
+ if (param == MAX77620_SUSPEND_FPS_POWER_DOWN_SLOTS)
+ param_val = fps_config->suspend_power_down_slots;
+ break;
+
+ default:
+ dev_err(mpci->dev, "Invalid parameter %d for pin %d\n",
+ param, pin);
+ return -EINVAL;
+ }
+
+ if (param_val < 0)
+ return 0;
+
+ ret = regmap_update_bits(mpci->rmap, addr, mask, param_val << shift);
+ if (ret < 0)
+ dev_err(mpci->dev, "Reg 0x%02x update failed %d\n", addr, ret);
+
+ return ret;
+}
+
+static int max77620_pinconf_set(struct pinctrl_dev *pctldev,
+ unsigned int pin, unsigned long *configs,
+ unsigned int num_configs)
+{
+ struct max77620_pctrl_info *mpci = pinctrl_dev_get_drvdata(pctldev);
+ struct device *dev = mpci->dev;
+ struct max77620_fps_config *fps_config;
+ int param;
+ u16 param_val;
+ unsigned int val;
+ unsigned int pu_val;
+ unsigned int pd_val;
+ int addr, ret;
+ int i;
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ param_val = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ val = param_val ? 0 : 1;
+ ret = regmap_update_bits(mpci->rmap,
+ MAX77620_REG_GPIO0 + pin,
+ MAX77620_CNFG_GPIO_DRV_MASK,
+ val);
+ if (ret < 0) {
+ dev_err(dev, "Reg 0x%02x update failed %d\n",
+ MAX77620_REG_GPIO0 + pin, ret);
+ return ret;
+ }
+ mpci->pin_info[pin].drv_type = val ?
+ MAX77620_PIN_PP_DRV : MAX77620_PIN_OD_DRV;
+ break;
+
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ val = param_val ? 1 : 0;
+ ret = regmap_update_bits(mpci->rmap,
+ MAX77620_REG_GPIO0 + pin,
+ MAX77620_CNFG_GPIO_DRV_MASK,
+ val);
+ if (ret < 0) {
+ dev_err(dev, "Reg 0x%02x update failed %d\n",
+ MAX77620_REG_GPIO0 + pin, ret);
+ return ret;
+ }
+ mpci->pin_info[pin].drv_type = val ?
+ MAX77620_PIN_PP_DRV : MAX77620_PIN_OD_DRV;
+ break;
+
+ case MAX77620_ACTIVE_FPS_SOURCE:
+ case MAX77620_ACTIVE_FPS_POWER_ON_SLOTS:
+ case MAX77620_ACTIVE_FPS_POWER_DOWN_SLOTS:
+ if ((pin < MAX77620_GPIO1) || (pin > MAX77620_GPIO3))
+ return -EINVAL;
+
+ fps_config = &mpci->fps_config[pin];
+
+ if ((param == MAX77620_ACTIVE_FPS_SOURCE) &&
+ (param_val == MAX77620_FPS_SRC_DEF)) {
+ addr = MAX77620_REG_FPS_GPIO1 + pin - 1;
+ ret = max77620_get_default_fps(
+ mpci, addr,
+ &fps_config->active_fps_src);
+ if (ret < 0)
+ return ret;
+ break;
+ }
+
+ if (param == MAX77620_ACTIVE_FPS_SOURCE)
+ fps_config->active_fps_src = param_val;
+ else if (param == MAX77620_ACTIVE_FPS_POWER_ON_SLOTS)
+ fps_config->active_power_up_slots = param_val;
+ else
+ fps_config->active_power_down_slots = param_val;
+
+ ret = max77620_set_fps_param(mpci, pin, param);
+ if (ret < 0)
+ return ret;
+ break;
+
+ case MAX77620_SUSPEND_FPS_SOURCE:
+ case MAX77620_SUSPEND_FPS_POWER_ON_SLOTS:
+ case MAX77620_SUSPEND_FPS_POWER_DOWN_SLOTS:
+ if ((pin < MAX77620_GPIO1) || (pin > MAX77620_GPIO3))
+ return -EINVAL;
+
+ fps_config = &mpci->fps_config[pin];
+
+ if ((param == MAX77620_SUSPEND_FPS_SOURCE) &&
+ (param_val == MAX77620_FPS_SRC_DEF)) {
+ addr = MAX77620_REG_FPS_GPIO1 + pin - 1;
+ ret = max77620_get_default_fps(
+ mpci, addr,
+ &fps_config->suspend_fps_src);
+ if (ret < 0)
+ return ret;
+ break;
+ }
+
+ if (param == MAX77620_SUSPEND_FPS_SOURCE)
+ fps_config->suspend_fps_src = param_val;
+ else if (param == MAX77620_SUSPEND_FPS_POWER_ON_SLOTS)
+ fps_config->suspend_power_up_slots = param_val;
+ else
+ fps_config->suspend_power_down_slots =
+ param_val;
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_UP:
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ pu_val = (param == PIN_CONFIG_BIAS_PULL_UP) ?
+ BIT(pin) : 0;
+ pd_val = (param == PIN_CONFIG_BIAS_PULL_DOWN) ?
+ BIT(pin) : 0;
+
+ ret = regmap_update_bits(mpci->rmap,
+ MAX77620_REG_PUE_GPIO,
+ BIT(pin), pu_val);
+ if (ret < 0) {
+ dev_err(dev, "PUE_GPIO update failed: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = regmap_update_bits(mpci->rmap,
+ MAX77620_REG_PDE_GPIO,
+ BIT(pin), pd_val);
+ if (ret < 0) {
+ dev_err(dev, "PDE_GPIO update failed: %d\n",
+ ret);
+ return ret;
+ }
+ break;
+
+ default:
+ dev_err(dev, "Properties not supported\n");
+ return -ENOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static const struct pinconf_ops max77620_pinconf_ops = {
+ .pin_config_get = max77620_pinconf_get,
+ .pin_config_set = max77620_pinconf_set,
+};
+
+static struct pinctrl_desc max77620_pinctrl_desc = {
+ .pctlops = &max77620_pinctrl_ops,
+ .pmxops = &max77620_pinmux_ops,
+ .confops = &max77620_pinconf_ops,
+};
+
+static int max77620_pinctrl_probe(struct platform_device *pdev)
+{
+ struct max77620_chip *max77620 = dev_get_drvdata(pdev->dev.parent);
+ struct max77620_pctrl_info *mpci;
+ int i;
+
+ mpci = devm_kzalloc(&pdev->dev, sizeof(*mpci), GFP_KERNEL);
+ if (!mpci)
+ return -ENOMEM;
+
+ mpci->dev = &pdev->dev;
+ mpci->dev->of_node = pdev->dev.parent->of_node;
+ mpci->rmap = max77620->rmap;
+
+ mpci->pins = max77620_pins_desc;
+ mpci->num_pins = ARRAY_SIZE(max77620_pins_desc);
+ mpci->functions = max77620_pin_function;
+ mpci->num_functions = ARRAY_SIZE(max77620_pin_function);
+ mpci->pin_groups = max77620_pingroups;
+ mpci->num_pin_groups = ARRAY_SIZE(max77620_pingroups);
+ platform_set_drvdata(pdev, mpci);
+
+ max77620_pinctrl_desc.name = dev_name(&pdev->dev);
+ max77620_pinctrl_desc.pins = max77620_pins_desc;
+ max77620_pinctrl_desc.npins = ARRAY_SIZE(max77620_pins_desc);
+ max77620_pinctrl_desc.num_custom_params =
+ ARRAY_SIZE(max77620_cfg_params);
+ max77620_pinctrl_desc.custom_params = max77620_cfg_params;
+
+ for (i = 0; i < MAX77620_PIN_NUM; ++i) {
+ mpci->fps_config[i].active_fps_src = -1;
+ mpci->fps_config[i].active_power_up_slots = -1;
+ mpci->fps_config[i].active_power_down_slots = -1;
+ mpci->fps_config[i].suspend_fps_src = -1;
+ mpci->fps_config[i].suspend_power_up_slots = -1;
+ mpci->fps_config[i].suspend_power_down_slots = -1;
+ }
+
+ mpci->pctl = devm_pinctrl_register(&pdev->dev, &max77620_pinctrl_desc,
+ mpci);
+ if (IS_ERR(mpci->pctl)) {
+ dev_err(&pdev->dev, "Couldn't register pinctrl driver\n");
+ return PTR_ERR(mpci->pctl);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int max77620_suspend_fps_param[] = {
+ MAX77620_SUSPEND_FPS_SOURCE,
+ MAX77620_SUSPEND_FPS_POWER_ON_SLOTS,
+ MAX77620_SUSPEND_FPS_POWER_DOWN_SLOTS,
+};
+
+static int max77620_active_fps_param[] = {
+ MAX77620_ACTIVE_FPS_SOURCE,
+ MAX77620_ACTIVE_FPS_POWER_ON_SLOTS,
+ MAX77620_ACTIVE_FPS_POWER_DOWN_SLOTS,
+};
+
+static int max77620_pinctrl_suspend(struct device *dev)
+{
+ struct max77620_pctrl_info *mpci = dev_get_drvdata(dev);
+ int pin, p;
+
+ for (pin = 0; pin < MAX77620_PIN_NUM; ++pin) {
+ if ((pin < MAX77620_GPIO1) || (pin > MAX77620_GPIO3))
+ continue;
+ for (p = 0; p < 3; ++p)
+ max77620_set_fps_param(
+ mpci, pin, max77620_suspend_fps_param[p]);
+ }
+
+ return 0;
+};
+
+static int max77620_pinctrl_resume(struct device *dev)
+{
+ struct max77620_pctrl_info *mpci = dev_get_drvdata(dev);
+ int pin, p;
+
+ for (pin = 0; pin < MAX77620_PIN_NUM; ++pin) {
+ if ((pin < MAX77620_GPIO1) || (pin > MAX77620_GPIO3))
+ continue;
+ for (p = 0; p < 3; ++p)
+ max77620_set_fps_param(
+ mpci, pin, max77620_active_fps_param[p]);
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops max77620_pinctrl_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(
+ max77620_pinctrl_suspend, max77620_pinctrl_resume)
+};
+
+static const struct platform_device_id max77620_pinctrl_devtype[] = {
+ { .name = "max77620-pinctrl", },
+ { .name = "max20024-pinctrl", },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, max77620_pinctrl_devtype);
+
+static struct platform_driver max77620_pinctrl_driver = {
+ .driver = {
+ .name = "max77620-pinctrl",
+ .pm = &max77620_pinctrl_pm_ops,
+ },
+ .probe = max77620_pinctrl_probe,
+ .id_table = max77620_pinctrl_devtype,
+};
+
+module_platform_driver(max77620_pinctrl_driver);
+
+MODULE_DESCRIPTION("MAX77620/MAX20024 pin control driver");
+MODULE_AUTHOR("Chaitanya Bandi<bandik@nvidia.com>");
+MODULE_AUTHOR("Laxman Dewangan<ldewangan@nvidia.com>");
+MODULE_ALIAS("platform:max77620-pinctrl");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-oxnas.c b/drivers/pinctrl/pinctrl-oxnas.c
new file mode 100644
index 000000000..917a7d253
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-oxnas.c
@@ -0,0 +1,846 @@
+/*
+ * Oxford Semiconductor OXNAS SoC Family pinctrl driver
+ *
+ * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * Based on pinctrl-pic32.c
+ * Joshua Henderson, <joshua.henderson@microchip.com>
+ * Copyright (C) 2015 Microchip Technology Inc. All rights reserved.
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+
+#include "pinctrl-utils.h"
+
+#define PINS_PER_BANK 32
+
+#define GPIO_BANK_START(bank) ((bank) * PINS_PER_BANK)
+
+/* Regmap Offsets */
+#define PINMUX_PRIMARY_SEL0 0x0c
+#define PINMUX_SECONDARY_SEL0 0x14
+#define PINMUX_TERTIARY_SEL0 0x8c
+#define PINMUX_PRIMARY_SEL1 0x10
+#define PINMUX_SECONDARY_SEL1 0x18
+#define PINMUX_TERTIARY_SEL1 0x90
+#define PINMUX_PULLUP_CTRL0 0xac
+#define PINMUX_PULLUP_CTRL1 0xb0
+
+/* GPIO Registers */
+#define INPUT_VALUE 0x00
+#define OUTPUT_EN 0x04
+#define IRQ_PENDING 0x0c
+#define OUTPUT_SET 0x14
+#define OUTPUT_CLEAR 0x18
+#define OUTPUT_EN_SET 0x1c
+#define OUTPUT_EN_CLEAR 0x20
+#define RE_IRQ_ENABLE 0x28
+#define FE_IRQ_ENABLE 0x2c
+
+struct oxnas_function {
+ const char *name;
+ const char * const *groups;
+ unsigned int ngroups;
+};
+
+struct oxnas_pin_group {
+ const char *name;
+ unsigned int pin;
+ unsigned int bank;
+ struct oxnas_desc_function *functions;
+};
+
+struct oxnas_desc_function {
+ const char *name;
+ unsigned int fct;
+};
+
+struct oxnas_gpio_bank {
+ void __iomem *reg_base;
+ struct gpio_chip gpio_chip;
+ struct irq_chip irq_chip;
+ unsigned int id;
+};
+
+struct oxnas_pinctrl {
+ struct regmap *regmap;
+ struct device *dev;
+ struct pinctrl_dev *pctldev;
+ const struct pinctrl_pin_desc *pins;
+ unsigned int npins;
+ const struct oxnas_function *functions;
+ unsigned int nfunctions;
+ const struct oxnas_pin_group *groups;
+ unsigned int ngroups;
+ struct oxnas_gpio_bank *gpio_banks;
+ unsigned int nbanks;
+};
+
+static const struct pinctrl_pin_desc oxnas_pins[] = {
+ PINCTRL_PIN(0, "gpio0"),
+ PINCTRL_PIN(1, "gpio1"),
+ PINCTRL_PIN(2, "gpio2"),
+ PINCTRL_PIN(3, "gpio3"),
+ PINCTRL_PIN(4, "gpio4"),
+ PINCTRL_PIN(5, "gpio5"),
+ PINCTRL_PIN(6, "gpio6"),
+ PINCTRL_PIN(7, "gpio7"),
+ PINCTRL_PIN(8, "gpio8"),
+ PINCTRL_PIN(9, "gpio9"),
+ PINCTRL_PIN(10, "gpio10"),
+ PINCTRL_PIN(11, "gpio11"),
+ PINCTRL_PIN(12, "gpio12"),
+ PINCTRL_PIN(13, "gpio13"),
+ PINCTRL_PIN(14, "gpio14"),
+ PINCTRL_PIN(15, "gpio15"),
+ PINCTRL_PIN(16, "gpio16"),
+ PINCTRL_PIN(17, "gpio17"),
+ PINCTRL_PIN(18, "gpio18"),
+ PINCTRL_PIN(19, "gpio19"),
+ PINCTRL_PIN(20, "gpio20"),
+ PINCTRL_PIN(21, "gpio21"),
+ PINCTRL_PIN(22, "gpio22"),
+ PINCTRL_PIN(23, "gpio23"),
+ PINCTRL_PIN(24, "gpio24"),
+ PINCTRL_PIN(25, "gpio25"),
+ PINCTRL_PIN(26, "gpio26"),
+ PINCTRL_PIN(27, "gpio27"),
+ PINCTRL_PIN(28, "gpio28"),
+ PINCTRL_PIN(29, "gpio29"),
+ PINCTRL_PIN(30, "gpio30"),
+ PINCTRL_PIN(31, "gpio31"),
+ PINCTRL_PIN(32, "gpio32"),
+ PINCTRL_PIN(33, "gpio33"),
+ PINCTRL_PIN(34, "gpio34"),
+};
+
+static const char * const oxnas_fct0_group[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+ "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11",
+ "gpio12", "gpio13", "gpio14", "gpio15",
+ "gpio16", "gpio17", "gpio18", "gpio19",
+ "gpio20", "gpio21", "gpio22", "gpio23",
+ "gpio24", "gpio25", "gpio26", "gpio27",
+ "gpio28", "gpio29", "gpio30", "gpio31",
+ "gpio32", "gpio33", "gpio34"
+};
+
+static const char * const oxnas_fct3_group[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+ "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9",
+ "gpio20",
+ "gpio22", "gpio23", "gpio24", "gpio25",
+ "gpio26", "gpio27", "gpio28", "gpio29",
+ "gpio30", "gpio31", "gpio32", "gpio33",
+ "gpio34"
+};
+
+#define FUNCTION(_name, _gr) \
+ { \
+ .name = #_name, \
+ .groups = oxnas_##_gr##_group, \
+ .ngroups = ARRAY_SIZE(oxnas_##_gr##_group), \
+ }
+
+static const struct oxnas_function oxnas_functions[] = {
+ FUNCTION(gpio, fct0),
+ FUNCTION(fct3, fct3),
+};
+
+#define OXNAS_PINCTRL_GROUP(_pin, _name, ...) \
+ { \
+ .name = #_name, \
+ .pin = _pin, \
+ .bank = _pin / PINS_PER_BANK, \
+ .functions = (struct oxnas_desc_function[]){ \
+ __VA_ARGS__, { } }, \
+ }
+
+#define OXNAS_PINCTRL_FUNCTION(_name, _fct) \
+ { \
+ .name = #_name, \
+ .fct = _fct, \
+ }
+
+static const struct oxnas_pin_group oxnas_groups[] = {
+ OXNAS_PINCTRL_GROUP(0, gpio0,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct3, 3)),
+ OXNAS_PINCTRL_GROUP(1, gpio1,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct3, 3)),
+ OXNAS_PINCTRL_GROUP(2, gpio2,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct3, 3)),
+ OXNAS_PINCTRL_GROUP(3, gpio3,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct3, 3)),
+ OXNAS_PINCTRL_GROUP(4, gpio4,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct3, 3)),
+ OXNAS_PINCTRL_GROUP(5, gpio5,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct3, 3)),
+ OXNAS_PINCTRL_GROUP(6, gpio6,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct3, 3)),
+ OXNAS_PINCTRL_GROUP(7, gpio7,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct3, 3)),
+ OXNAS_PINCTRL_GROUP(8, gpio8,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct3, 3)),
+ OXNAS_PINCTRL_GROUP(9, gpio9,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct3, 3)),
+ OXNAS_PINCTRL_GROUP(10, gpio10,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0)),
+ OXNAS_PINCTRL_GROUP(11, gpio11,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0)),
+ OXNAS_PINCTRL_GROUP(12, gpio12,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0)),
+ OXNAS_PINCTRL_GROUP(13, gpio13,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0)),
+ OXNAS_PINCTRL_GROUP(14, gpio14,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0)),
+ OXNAS_PINCTRL_GROUP(15, gpio15,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0)),
+ OXNAS_PINCTRL_GROUP(16, gpio16,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0)),
+ OXNAS_PINCTRL_GROUP(17, gpio17,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0)),
+ OXNAS_PINCTRL_GROUP(18, gpio18,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0)),
+ OXNAS_PINCTRL_GROUP(19, gpio19,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0)),
+ OXNAS_PINCTRL_GROUP(20, gpio20,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct3, 3)),
+ OXNAS_PINCTRL_GROUP(21, gpio21,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0)),
+ OXNAS_PINCTRL_GROUP(22, gpio22,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct3, 3)),
+ OXNAS_PINCTRL_GROUP(23, gpio23,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct3, 3)),
+ OXNAS_PINCTRL_GROUP(24, gpio24,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct3, 3)),
+ OXNAS_PINCTRL_GROUP(25, gpio25,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct3, 3)),
+ OXNAS_PINCTRL_GROUP(26, gpio26,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct3, 3)),
+ OXNAS_PINCTRL_GROUP(27, gpio27,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct3, 3)),
+ OXNAS_PINCTRL_GROUP(28, gpio28,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct3, 3)),
+ OXNAS_PINCTRL_GROUP(29, gpio29,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct3, 3)),
+ OXNAS_PINCTRL_GROUP(30, gpio30,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct3, 3)),
+ OXNAS_PINCTRL_GROUP(31, gpio31,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct3, 3)),
+ OXNAS_PINCTRL_GROUP(32, gpio32,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct3, 3)),
+ OXNAS_PINCTRL_GROUP(33, gpio33,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct3, 3)),
+ OXNAS_PINCTRL_GROUP(34, gpio34,
+ OXNAS_PINCTRL_FUNCTION(gpio, 0),
+ OXNAS_PINCTRL_FUNCTION(fct3, 3)),
+};
+
+static inline struct oxnas_gpio_bank *pctl_to_bank(struct oxnas_pinctrl *pctl,
+ unsigned int pin)
+{
+ return &pctl->gpio_banks[pin / PINS_PER_BANK];
+}
+
+static int oxnas_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct oxnas_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctl->ngroups;
+}
+
+static const char *oxnas_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int group)
+{
+ struct oxnas_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctl->groups[group].name;
+}
+
+static int oxnas_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int group,
+ const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ struct oxnas_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = &pctl->groups[group].pin;
+ *num_pins = 1;
+
+ return 0;
+}
+
+static const struct pinctrl_ops oxnas_pinctrl_ops = {
+ .get_groups_count = oxnas_pinctrl_get_groups_count,
+ .get_group_name = oxnas_pinctrl_get_group_name,
+ .get_group_pins = oxnas_pinctrl_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+ .dt_free_map = pinctrl_utils_free_map,
+};
+
+static int oxnas_pinmux_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ struct oxnas_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctl->nfunctions;
+}
+
+static const char *
+oxnas_pinmux_get_function_name(struct pinctrl_dev *pctldev, unsigned int func)
+{
+ struct oxnas_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctl->functions[func].name;
+}
+
+static int oxnas_pinmux_get_function_groups(struct pinctrl_dev *pctldev,
+ unsigned int func,
+ const char * const **groups,
+ unsigned int * const num_groups)
+{
+ struct oxnas_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = pctl->functions[func].groups;
+ *num_groups = pctl->functions[func].ngroups;
+
+ return 0;
+}
+
+static int oxnas_pinmux_enable(struct pinctrl_dev *pctldev,
+ unsigned int func, unsigned int group)
+{
+ struct oxnas_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ const struct oxnas_pin_group *pg = &pctl->groups[group];
+ const struct oxnas_function *pf = &pctl->functions[func];
+ const char *fname = pf->name;
+ struct oxnas_desc_function *functions = pg->functions;
+ u32 mask = BIT(pg->pin);
+
+ while (functions->name) {
+ if (!strcmp(functions->name, fname)) {
+ dev_dbg(pctl->dev,
+ "setting function %s bank %d pin %d fct %d mask %x\n",
+ fname, pg->bank, pg->pin,
+ functions->fct, mask);
+
+ regmap_write_bits(pctl->regmap,
+ (pg->bank ?
+ PINMUX_PRIMARY_SEL1 :
+ PINMUX_PRIMARY_SEL0),
+ mask,
+ (functions->fct == 1 ?
+ mask : 0));
+ regmap_write_bits(pctl->regmap,
+ (pg->bank ?
+ PINMUX_SECONDARY_SEL1 :
+ PINMUX_SECONDARY_SEL0),
+ mask,
+ (functions->fct == 2 ?
+ mask : 0));
+ regmap_write_bits(pctl->regmap,
+ (pg->bank ?
+ PINMUX_TERTIARY_SEL1 :
+ PINMUX_TERTIARY_SEL0),
+ mask,
+ (functions->fct == 3 ?
+ mask : 0));
+
+ return 0;
+ }
+
+ functions++;
+ }
+
+ dev_err(pctl->dev, "cannot mux pin %u to function %u\n", group, func);
+
+ return -EINVAL;
+}
+
+static int oxnas_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset)
+{
+ struct oxnas_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ struct oxnas_gpio_bank *bank = gpiochip_get_data(range->gc);
+ u32 mask = BIT(offset - bank->gpio_chip.base);
+
+ dev_dbg(pctl->dev, "requesting gpio %d in bank %d (id %d) with mask 0x%x\n",
+ offset, bank->gpio_chip.base, bank->id, mask);
+
+ regmap_write_bits(pctl->regmap,
+ (bank->id ?
+ PINMUX_PRIMARY_SEL1 :
+ PINMUX_PRIMARY_SEL0),
+ mask, 0);
+ regmap_write_bits(pctl->regmap,
+ (bank->id ?
+ PINMUX_SECONDARY_SEL1 :
+ PINMUX_SECONDARY_SEL0),
+ mask, 0);
+ regmap_write_bits(pctl->regmap,
+ (bank->id ?
+ PINMUX_TERTIARY_SEL1 :
+ PINMUX_TERTIARY_SEL0),
+ mask, 0);
+
+ return 0;
+}
+
+static int oxnas_gpio_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct oxnas_gpio_bank *bank = gpiochip_get_data(chip);
+ u32 mask = BIT(offset);
+
+ return !(readl_relaxed(bank->reg_base + OUTPUT_EN) & mask);
+}
+
+static int oxnas_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct oxnas_gpio_bank *bank = gpiochip_get_data(chip);
+ u32 mask = BIT(offset);
+
+ writel_relaxed(mask, bank->reg_base + OUTPUT_EN_CLEAR);
+
+ return 0;
+}
+
+static int oxnas_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct oxnas_gpio_bank *bank = gpiochip_get_data(chip);
+ u32 mask = BIT(offset);
+
+ return (readl_relaxed(bank->reg_base + INPUT_VALUE) & mask) != 0;
+}
+
+static void oxnas_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct oxnas_gpio_bank *bank = gpiochip_get_data(chip);
+ u32 mask = BIT(offset);
+
+ if (value)
+ writel_relaxed(mask, bank->reg_base + OUTPUT_SET);
+ else
+ writel_relaxed(mask, bank->reg_base + OUTPUT_CLEAR);
+}
+
+static int oxnas_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct oxnas_gpio_bank *bank = gpiochip_get_data(chip);
+ u32 mask = BIT(offset);
+
+ oxnas_gpio_set(chip, offset, value);
+ writel_relaxed(mask, bank->reg_base + OUTPUT_EN_SET);
+
+ return 0;
+}
+
+static int oxnas_gpio_set_direction(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset, bool input)
+{
+ struct gpio_chip *chip = range->gc;
+
+ if (input)
+ oxnas_gpio_direction_input(chip, offset);
+ else
+ oxnas_gpio_direction_output(chip, offset, 0);
+
+ return 0;
+}
+
+static const struct pinmux_ops oxnas_pinmux_ops = {
+ .get_functions_count = oxnas_pinmux_get_functions_count,
+ .get_function_name = oxnas_pinmux_get_function_name,
+ .get_function_groups = oxnas_pinmux_get_function_groups,
+ .set_mux = oxnas_pinmux_enable,
+ .gpio_request_enable = oxnas_gpio_request_enable,
+ .gpio_set_direction = oxnas_gpio_set_direction,
+};
+
+static int oxnas_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *config)
+{
+ struct oxnas_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ struct oxnas_gpio_bank *bank = pctl_to_bank(pctl, pin);
+ unsigned int param = pinconf_to_config_param(*config);
+ u32 mask = BIT(pin - bank->gpio_chip.base);
+ int ret;
+ u32 arg;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ ret = regmap_read(pctl->regmap,
+ (bank->id ?
+ PINMUX_PULLUP_CTRL1 :
+ PINMUX_PULLUP_CTRL0),
+ &arg);
+ if (ret)
+ return ret;
+
+ arg = !!(arg & mask);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+
+ return 0;
+}
+
+static int oxnas_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned int num_configs)
+{
+ struct oxnas_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ struct oxnas_gpio_bank *bank = pctl_to_bank(pctl, pin);
+ unsigned int param;
+ u32 arg;
+ unsigned int i;
+ u32 offset = pin - bank->gpio_chip.base;
+ u32 mask = BIT(offset);
+
+ dev_dbg(pctl->dev, "setting pin %d bank %d mask 0x%x\n",
+ pin, bank->gpio_chip.base, mask);
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ dev_dbg(pctl->dev, " pullup\n");
+ regmap_write_bits(pctl->regmap,
+ (bank->id ?
+ PINMUX_PULLUP_CTRL1 :
+ PINMUX_PULLUP_CTRL0),
+ mask, mask);
+ break;
+ default:
+ dev_err(pctl->dev, "Property %u not supported\n",
+ param);
+ return -ENOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static const struct pinconf_ops oxnas_pinconf_ops = {
+ .pin_config_get = oxnas_pinconf_get,
+ .pin_config_set = oxnas_pinconf_set,
+ .is_generic = true,
+};
+
+static struct pinctrl_desc oxnas_pinctrl_desc = {
+ .name = "oxnas-pinctrl",
+ .pctlops = &oxnas_pinctrl_ops,
+ .pmxops = &oxnas_pinmux_ops,
+ .confops = &oxnas_pinconf_ops,
+ .owner = THIS_MODULE,
+};
+
+static void oxnas_gpio_irq_ack(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct oxnas_gpio_bank *bank = gpiochip_get_data(chip);
+ u32 mask = BIT(data->hwirq);
+
+ writel(mask, bank->reg_base + IRQ_PENDING);
+}
+
+static void oxnas_gpio_irq_mask(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct oxnas_gpio_bank *bank = gpiochip_get_data(chip);
+ unsigned int type = irqd_get_trigger_type(data);
+ u32 mask = BIT(data->hwirq);
+
+ if (type & IRQ_TYPE_EDGE_RISING)
+ writel(readl(bank->reg_base + RE_IRQ_ENABLE) & ~mask,
+ bank->reg_base + RE_IRQ_ENABLE);
+
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ writel(readl(bank->reg_base + FE_IRQ_ENABLE) & ~mask,
+ bank->reg_base + FE_IRQ_ENABLE);
+}
+
+static void oxnas_gpio_irq_unmask(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct oxnas_gpio_bank *bank = gpiochip_get_data(chip);
+ unsigned int type = irqd_get_trigger_type(data);
+ u32 mask = BIT(data->hwirq);
+
+ if (type & IRQ_TYPE_EDGE_RISING)
+ writel(readl(bank->reg_base + RE_IRQ_ENABLE) | mask,
+ bank->reg_base + RE_IRQ_ENABLE);
+
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ writel(readl(bank->reg_base + FE_IRQ_ENABLE) | mask,
+ bank->reg_base + FE_IRQ_ENABLE);
+}
+
+static unsigned int oxnas_gpio_irq_startup(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+
+ oxnas_gpio_direction_input(chip, data->hwirq);
+ oxnas_gpio_irq_unmask(data);
+
+ return 0;
+}
+
+static int oxnas_gpio_irq_set_type(struct irq_data *data, unsigned int type)
+{
+ if ((type & (IRQ_TYPE_EDGE_RISING|IRQ_TYPE_EDGE_FALLING)) == 0)
+ return -EINVAL;
+
+ irq_set_handler_locked(data, handle_edge_irq);
+
+ return 0;
+}
+
+static void oxnas_gpio_irq_handler(struct irq_desc *desc)
+{
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+ struct oxnas_gpio_bank *bank = gpiochip_get_data(gc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned long stat;
+ unsigned int pin;
+
+ chained_irq_enter(chip, desc);
+
+ stat = readl(bank->reg_base + IRQ_PENDING);
+
+ for_each_set_bit(pin, &stat, BITS_PER_LONG)
+ generic_handle_irq(irq_linear_revmap(gc->irqdomain, pin));
+
+ chained_irq_exit(chip, desc);
+}
+
+#define GPIO_BANK(_bank) \
+ { \
+ .gpio_chip = { \
+ .label = "GPIO" #_bank, \
+ .request = gpiochip_generic_request, \
+ .free = gpiochip_generic_free, \
+ .get_direction = oxnas_gpio_get_direction, \
+ .direction_input = oxnas_gpio_direction_input, \
+ .direction_output = oxnas_gpio_direction_output, \
+ .get = oxnas_gpio_get, \
+ .set = oxnas_gpio_set, \
+ .ngpio = PINS_PER_BANK, \
+ .base = GPIO_BANK_START(_bank), \
+ .owner = THIS_MODULE, \
+ .can_sleep = 0, \
+ }, \
+ .irq_chip = { \
+ .name = "GPIO" #_bank, \
+ .irq_startup = oxnas_gpio_irq_startup, \
+ .irq_ack = oxnas_gpio_irq_ack, \
+ .irq_mask = oxnas_gpio_irq_mask, \
+ .irq_unmask = oxnas_gpio_irq_unmask, \
+ .irq_set_type = oxnas_gpio_irq_set_type, \
+ }, \
+ }
+
+static struct oxnas_gpio_bank oxnas_gpio_banks[] = {
+ GPIO_BANK(0),
+ GPIO_BANK(1),
+};
+
+static int oxnas_pinctrl_probe(struct platform_device *pdev)
+{
+ struct oxnas_pinctrl *pctl;
+
+ pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL);
+ if (!pctl)
+ return -ENOMEM;
+ pctl->dev = &pdev->dev;
+ dev_set_drvdata(&pdev->dev, pctl);
+
+ pctl->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "oxsemi,sys-ctrl");
+ if (IS_ERR(pctl->regmap)) {
+ dev_err(&pdev->dev, "failed to get sys ctrl regmap\n");
+ return -ENODEV;
+ }
+
+ pctl->pins = oxnas_pins;
+ pctl->npins = ARRAY_SIZE(oxnas_pins);
+ pctl->functions = oxnas_functions;
+ pctl->nfunctions = ARRAY_SIZE(oxnas_functions);
+ pctl->groups = oxnas_groups;
+ pctl->ngroups = ARRAY_SIZE(oxnas_groups);
+ pctl->gpio_banks = oxnas_gpio_banks;
+ pctl->nbanks = ARRAY_SIZE(oxnas_gpio_banks);
+
+ oxnas_pinctrl_desc.pins = pctl->pins;
+ oxnas_pinctrl_desc.npins = pctl->npins;
+
+ pctl->pctldev = pinctrl_register(&oxnas_pinctrl_desc,
+ &pdev->dev, pctl);
+ if (IS_ERR(pctl->pctldev)) {
+ dev_err(&pdev->dev, "Failed to register pinctrl device\n");
+ return PTR_ERR(pctl->pctldev);
+ }
+
+ return 0;
+}
+
+static int oxnas_gpio_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct of_phandle_args pinspec;
+ struct oxnas_gpio_bank *bank;
+ unsigned int id, ngpios;
+ int irq, ret;
+ struct resource *res;
+
+ if (of_parse_phandle_with_fixed_args(np, "gpio-ranges",
+ 3, 0, &pinspec)) {
+ dev_err(&pdev->dev, "gpio-ranges property not found\n");
+ return -EINVAL;
+ }
+
+ id = pinspec.args[1] / PINS_PER_BANK;
+ ngpios = pinspec.args[2];
+
+ if (id >= ARRAY_SIZE(oxnas_gpio_banks)) {
+ dev_err(&pdev->dev, "invalid gpio-ranges base arg\n");
+ return -EINVAL;
+ }
+
+ if (ngpios > PINS_PER_BANK) {
+ dev_err(&pdev->dev, "invalid gpio-ranges count arg\n");
+ return -EINVAL;
+ }
+
+ bank = &oxnas_gpio_banks[id];
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ bank->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(bank->reg_base))
+ return PTR_ERR(bank->reg_base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "irq get failed\n");
+ return irq;
+ }
+
+ bank->id = id;
+ bank->gpio_chip.parent = &pdev->dev;
+ bank->gpio_chip.of_node = np;
+ bank->gpio_chip.ngpio = ngpios;
+ ret = gpiochip_add_data(&bank->gpio_chip, bank);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to add GPIO chip %u: %d\n",
+ id, ret);
+ return ret;
+ }
+
+ ret = gpiochip_irqchip_add(&bank->gpio_chip, &bank->irq_chip,
+ 0, handle_level_irq, IRQ_TYPE_NONE);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to add IRQ chip %u: %d\n",
+ id, ret);
+ gpiochip_remove(&bank->gpio_chip);
+ return ret;
+ }
+
+ gpiochip_set_chained_irqchip(&bank->gpio_chip, &bank->irq_chip,
+ irq, oxnas_gpio_irq_handler);
+
+ return 0;
+}
+
+static const struct of_device_id oxnas_pinctrl_of_match[] = {
+ { .compatible = "oxsemi,ox810se-pinctrl", },
+ { },
+};
+
+static struct platform_driver oxnas_pinctrl_driver = {
+ .driver = {
+ .name = "oxnas-pinctrl",
+ .of_match_table = oxnas_pinctrl_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = oxnas_pinctrl_probe,
+};
+
+static const struct of_device_id oxnas_gpio_of_match[] = {
+ { .compatible = "oxsemi,ox810se-gpio", },
+ { },
+};
+
+static struct platform_driver oxnas_gpio_driver = {
+ .driver = {
+ .name = "oxnas-gpio",
+ .of_match_table = oxnas_gpio_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = oxnas_gpio_probe,
+};
+
+static int __init oxnas_gpio_register(void)
+{
+ return platform_driver_register(&oxnas_gpio_driver);
+}
+arch_initcall(oxnas_gpio_register);
+
+static int __init oxnas_pinctrl_register(void)
+{
+ return platform_driver_register(&oxnas_pinctrl_driver);
+}
+arch_initcall(oxnas_pinctrl_register);
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index 5bf8e78e0..55375b1b3 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -1432,7 +1432,6 @@ static int pistachio_pinctrl_probe(struct platform_device *pdev)
{
struct pistachio_pinctrl *pctl;
struct resource *res;
- int ret;
pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL);
if (!pctl)
@@ -1464,13 +1463,7 @@ static int pistachio_pinctrl_probe(struct platform_device *pdev)
return PTR_ERR(pctl->pctldev);
}
- ret = pistachio_gpio_register(pctl);
- if (ret < 0) {
- pinctrl_unregister(pctl->pctldev);
- return ret;
- }
-
- return 0;
+ return pistachio_gpio_register(pctl);
}
static struct platform_driver pistachio_pinctrl_driver = {
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index a91026e8c..44902c63f 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -360,7 +360,7 @@ static struct regmap_config rockchip_regmap_config = {
.reg_stride = 4,
};
-static const inline struct rockchip_pin_group *pinctrl_name_to_group(
+static inline const struct rockchip_pin_group *pinctrl_name_to_group(
const struct rockchip_pinctrl *info,
const char *name)
{
@@ -2007,7 +2007,7 @@ static void rockchip_irq_gc_mask_clr_bit(struct irq_data *d)
irq_gc_mask_clr_bit(d);
}
-void rockchip_irq_gc_mask_set_bit(struct irq_data *d)
+static void rockchip_irq_gc_mask_set_bit(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct rockchip_pin_bank *bank = gc->private;
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index d0ba968af..0de1c67df 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -844,7 +844,7 @@ static int st_pctl_get_group_pins(struct pinctrl_dev *pctldev,
return 0;
}
-static const inline struct st_pctl_group *st_pctl_find_group_by_name(
+static inline const struct st_pctl_group *st_pctl_find_group_by_name(
const struct st_pinctrl *info, const char *name)
{
int i;
diff --git a/drivers/pinctrl/pinctrl-u300.c b/drivers/pinctrl/pinctrl-u300.c
index d1af908a7..9cc80a500 100644
--- a/drivers/pinctrl/pinctrl-u300.c
+++ b/drivers/pinctrl/pinctrl-u300.c
@@ -670,7 +670,7 @@ struct u300_pmx {
* u300_pmx_registers - the array of registers read/written for each pinmux
* shunt setting
*/
-const u32 u300_pmx_registers[] = {
+static const u32 u300_pmx_registers[] = {
U300_SYSCON_PMC1LR,
U300_SYSCON_PMC1HR,
U300_SYSCON_PMC2R,
diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c
index a13f2b6f6..dd85ad180 100644
--- a/drivers/pinctrl/pinctrl-xway.c
+++ b/drivers/pinctrl/pinctrl-xway.c
@@ -1616,50 +1616,74 @@ struct pinctrl_xway_soc {
/* xway xr9 series (DEPRECATED: Use XWAY xRX100/xRX200 Family) */
static struct pinctrl_xway_soc xr9_pinctrl = {
- XR9_MAX_PIN, xway_mfp,
- xway_grps, ARRAY_SIZE(xway_grps),
- xrx_funcs, ARRAY_SIZE(xrx_funcs),
- xway_exin_pin_map, 6
+ .pin_count = XR9_MAX_PIN,
+ .mfp = xway_mfp,
+ .grps = xway_grps,
+ .num_grps = ARRAY_SIZE(xway_grps),
+ .funcs = xrx_funcs,
+ .num_funcs = ARRAY_SIZE(xrx_funcs),
+ .exin = xway_exin_pin_map,
+ .num_exin = 6
};
/* XWAY AMAZON Family */
static struct pinctrl_xway_soc ase_pinctrl = {
- ASE_MAX_PIN, ase_mfp,
- ase_grps, ARRAY_SIZE(ase_grps),
- ase_funcs, ARRAY_SIZE(ase_funcs),
- ase_exin_pin_map, 3
+ .pin_count = ASE_MAX_PIN,
+ .mfp = ase_mfp,
+ .grps = ase_grps,
+ .num_grps = ARRAY_SIZE(ase_grps),
+ .funcs = ase_funcs,
+ .num_funcs = ARRAY_SIZE(ase_funcs),
+ .exin = ase_exin_pin_map,
+ .num_exin = 3
};
/* XWAY DANUBE Family */
static struct pinctrl_xway_soc danube_pinctrl = {
- DANUBE_MAX_PIN, danube_mfp,
- danube_grps, ARRAY_SIZE(danube_grps),
- danube_funcs, ARRAY_SIZE(danube_funcs),
- danube_exin_pin_map, 3
+ .pin_count = DANUBE_MAX_PIN,
+ .mfp = danube_mfp,
+ .grps = danube_grps,
+ .num_grps = ARRAY_SIZE(danube_grps),
+ .funcs = danube_funcs,
+ .num_funcs = ARRAY_SIZE(danube_funcs),
+ .exin = danube_exin_pin_map,
+ .num_exin = 3
};
/* XWAY xRX100 Family */
static struct pinctrl_xway_soc xrx100_pinctrl = {
- XRX100_MAX_PIN, xrx100_mfp,
- xrx100_grps, ARRAY_SIZE(xrx100_grps),
- xrx100_funcs, ARRAY_SIZE(xrx100_funcs),
- xrx100_exin_pin_map, 6
+ .pin_count = XRX100_MAX_PIN,
+ .mfp = xrx100_mfp,
+ .grps = xrx100_grps,
+ .num_grps = ARRAY_SIZE(xrx100_grps),
+ .funcs = xrx100_funcs,
+ .num_funcs = ARRAY_SIZE(xrx100_funcs),
+ .exin = xrx100_exin_pin_map,
+ .num_exin = 6
};
/* XWAY xRX200 Family */
static struct pinctrl_xway_soc xrx200_pinctrl = {
- XRX200_MAX_PIN, xrx200_mfp,
- xrx200_grps, ARRAY_SIZE(xrx200_grps),
- xrx200_funcs, ARRAY_SIZE(xrx200_funcs),
- xrx200_exin_pin_map, 6
+ .pin_count = XRX200_MAX_PIN,
+ .mfp = xrx200_mfp,
+ .grps = xrx200_grps,
+ .num_grps = ARRAY_SIZE(xrx200_grps),
+ .funcs = xrx200_funcs,
+ .num_funcs = ARRAY_SIZE(xrx200_funcs),
+ .exin = xrx200_exin_pin_map,
+ .num_exin = 6
};
/* XWAY xRX300 Family */
static struct pinctrl_xway_soc xrx300_pinctrl = {
- XRX300_MAX_PIN, xrx300_mfp,
- xrx300_grps, ARRAY_SIZE(xrx300_grps),
- xrx300_funcs, ARRAY_SIZE(xrx300_funcs),
- xrx300_exin_pin_map, 5
+ .pin_count = XRX300_MAX_PIN,
+ .mfp = xrx300_mfp,
+ .grps = xrx300_grps,
+ .num_grps = ARRAY_SIZE(xrx300_grps),
+ .funcs = xrx300_funcs,
+ .num_funcs = ARRAY_SIZE(xrx300_funcs),
+ .exin = xrx300_exin_pin_map,
+ .num_exin = 5
};
static struct pinctrl_gpio_range xway_gpio_range = {
@@ -1724,9 +1748,9 @@ static int pinmux_xway_probe(struct platform_device *pdev)
}
xway_pctrl_desc.pins = xway_info.pads;
- /* load the gpio chip */
+ /* register the gpio chip */
xway_chip.parent = &pdev->dev;
- ret = gpiochip_add(&xway_chip);
+ ret = devm_gpiochip_add_data(&pdev->dev, &xway_chip, NULL);
if (ret) {
dev_err(&pdev->dev, "Failed to register gpio chip\n");
return ret;
@@ -1749,7 +1773,6 @@ static int pinmux_xway_probe(struct platform_device *pdev)
/* register with the generic lantiq layer */
ret = ltq_pinctrl_register(pdev, &xway_info);
if (ret) {
- gpiochip_remove(&xway_chip);
dev_err(&pdev->dev, "Failed to register pinctrl driver\n");
return ret;
}
diff --git a/drivers/pinctrl/pinctrl-zynq.c b/drivers/pinctrl/pinctrl-zynq.c
index 8fdc60c5a..7afdbede6 100644
--- a/drivers/pinctrl/pinctrl-zynq.c
+++ b/drivers/pinctrl/pinctrl-zynq.c
@@ -20,7 +20,7 @@
*/
#include <linux/io.h>
#include <linux/mfd/syscon.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pinctrl/pinctrl.h>
@@ -1210,7 +1210,6 @@ static const struct of_device_id zynq_pinctrl_of_match[] = {
{ .compatible = "xlnx,pinctrl-zynq" },
{ }
};
-MODULE_DEVICE_TABLE(of, zynq_pinctrl_of_match);
static struct platform_driver zynq_pinctrl_driver = {
.driver = {
@@ -1225,13 +1224,3 @@ static int __init zynq_pinctrl_init(void)
return platform_driver_register(&zynq_pinctrl_driver);
}
arch_initcall(zynq_pinctrl_init);
-
-static void __exit zynq_pinctrl_exit(void)
-{
- platform_driver_unregister(&zynq_pinctrl_driver);
-}
-module_exit(zynq_pinctrl_exit);
-
-MODULE_AUTHOR("Sören Brinkmann <soren.brinkmann@xilinx.com>");
-MODULE_DESCRIPTION("Xilinx Zynq pinctrl driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index c223a9ef1..ece702881 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -256,7 +256,7 @@ int pinmux_request_gpio(struct pinctrl_dev *pctldev,
/* Conjure some name stating what chip and pin this is taken by */
owner = kasprintf(GFP_KERNEL, "%s:%d", range->name, gpio);
if (!owner)
- return -EINVAL;
+ return -ENOMEM;
ret = pin_request(pctldev, pin, owner, range);
if (ret < 0)
@@ -606,23 +606,17 @@ static int pinmux_pins_show(struct seq_file *s, void *what)
if (pmxops->strict) {
if (desc->mux_owner)
seq_printf(s, "pin %d (%s): device %s%s",
- pin,
- desc->name ? desc->name : "unnamed",
- desc->mux_owner,
+ pin, desc->name, desc->mux_owner,
is_hog ? " (HOG)" : "");
else if (desc->gpio_owner)
seq_printf(s, "pin %d (%s): GPIO %s",
- pin,
- desc->name ? desc->name : "unnamed",
- desc->gpio_owner);
+ pin, desc->name, desc->gpio_owner);
else
seq_printf(s, "pin %d (%s): UNCLAIMED",
- pin,
- desc->name ? desc->name : "unnamed");
+ pin, desc->name);
} else {
/* For non-strict controllers */
- seq_printf(s, "pin %d (%s): %s %s%s", pin,
- desc->name ? desc->name : "unnamed",
+ seq_printf(s, "pin %d (%s): %s %s%s", pin, desc->name,
desc->mux_owner ? desc->mux_owner
: "(MUX UNCLAIMED)",
desc->gpio_owner ? desc->gpio_owner
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 67bc70dcd..93ef268d5 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -55,6 +55,14 @@ config PINCTRL_MSM8960
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm TLMM block found in the Qualcomm 8960 platform.
+config PINCTRL_MDM9615
+ tristate "Qualcomm 9615 pin controller driver"
+ depends on GPIOLIB && OF
+ select PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm TLMM block found in the Qualcomm 9615 platform.
+
config PINCTRL_MSM8X74
tristate "Qualcomm 8x74 pin controller driver"
depends on GPIOLIB && OF
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index c964a2c4b..8319e11ce 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_PINCTRL_MSM8X74) += pinctrl-msm8x74.o
obj-$(CONFIG_PINCTRL_MSM8916) += pinctrl-msm8916.o
obj-$(CONFIG_PINCTRL_MSM8996) += pinctrl-msm8996.o
obj-$(CONFIG_PINCTRL_QDF2XXX) += pinctrl-qdf2xxx.o
+obj-$(CONFIG_PINCTRL_MDM9615) += pinctrl-mdm9615.o
obj-$(CONFIG_PINCTRL_QCOM_SPMI_PMIC) += pinctrl-spmi-gpio.o
obj-$(CONFIG_PINCTRL_QCOM_SPMI_PMIC) += pinctrl-spmi-mpp.o
obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-gpio.o
diff --git a/drivers/pinctrl/qcom/pinctrl-mdm9615.c b/drivers/pinctrl/qcom/pinctrl-mdm9615.c
new file mode 100644
index 000000000..2b8f45216
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-mdm9615.c
@@ -0,0 +1,483 @@
+/*
+ * Copyright (c) 2014, Sony Mobile Communications AB.
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author : Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include "pinctrl-msm.h"
+
+static const struct pinctrl_pin_desc mdm9615_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+
+#define FUNCTION(fname) \
+ [MSM_MUX_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ MSM_MUX_gpio, \
+ MSM_MUX_##f1, \
+ MSM_MUX_##f2, \
+ MSM_MUX_##f3, \
+ MSM_MUX_##f4, \
+ MSM_MUX_##f5, \
+ MSM_MUX_##f6, \
+ MSM_MUX_##f7, \
+ MSM_MUX_##f8, \
+ MSM_MUX_##f9, \
+ MSM_MUX_##f10, \
+ MSM_MUX_##f11 \
+ }, \
+ .nfuncs = 12, \
+ .ctl_reg = 0x1000 + 0x10 * id, \
+ .io_reg = 0x1004 + 0x10 * id, \
+ .intr_cfg_reg = 0x1008 + 0x10 * id, \
+ .intr_status_reg = 0x100c + 0x10 * id, \
+ .intr_target_reg = 0x400 + 0x4 * id, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_ack_high = 1, \
+ .intr_target_bit = 0, \
+ .intr_target_kpss_val = 4, \
+ .intr_raw_status_bit = 3, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 1, \
+ }
+
+enum mdm9615_functions {
+ MSM_MUX_gpio,
+ MSM_MUX_gsbi2_i2c,
+ MSM_MUX_gsbi3,
+ MSM_MUX_gsbi4,
+ MSM_MUX_gsbi5_i2c,
+ MSM_MUX_gsbi5_uart,
+ MSM_MUX_sdc2,
+ MSM_MUX_ebi2_lcdc,
+ MSM_MUX_ps_hold,
+ MSM_MUX_prim_audio,
+ MSM_MUX_sec_audio,
+ MSM_MUX_cdc_mclk,
+ MSM_MUX_NA,
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+ "gpio85", "gpio86", "gpio87"
+};
+
+static const char * const gsbi2_i2c_groups[] = {
+ "gpio4", "gpio5"
+};
+
+static const char * const gsbi3_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11"
+};
+
+static const char * const gsbi4_groups[] = {
+ "gpio12", "gpio13", "gpio14", "gpio15"
+};
+
+static const char * const gsbi5_i2c_groups[] = {
+ "gpio16", "gpio17"
+};
+
+static const char * const gsbi5_uart_groups[] = {
+ "gpio18", "gpio19"
+};
+
+static const char * const sdc2_groups[] = {
+ "gpio25", "gpio26", "gpio27", "gpio28", "gpio29", "gpio30",
+};
+
+static const char * const ebi2_lcdc_groups[] = {
+ "gpio21", "gpio22", "gpio24",
+};
+
+static const char * const ps_hold_groups[] = {
+ "gpio83",
+};
+
+static const char * const prim_audio_groups[] = {
+ "gpio20", "gpio21", "gpio22", "gpio23",
+};
+
+static const char * const sec_audio_groups[] = {
+ "gpio25", "gpio26", "gpio27", "gpio28",
+};
+
+static const char * const cdc_mclk_groups[] = {
+ "gpio24",
+};
+
+static const struct msm_function mdm9615_functions[] = {
+ FUNCTION(gpio),
+ FUNCTION(gsbi2_i2c),
+ FUNCTION(gsbi3),
+ FUNCTION(gsbi4),
+ FUNCTION(gsbi5_i2c),
+ FUNCTION(gsbi5_uart),
+ FUNCTION(sdc2),
+ FUNCTION(ebi2_lcdc),
+ FUNCTION(ps_hold),
+ FUNCTION(prim_audio),
+ FUNCTION(sec_audio),
+ FUNCTION(cdc_mclk),
+};
+
+static const struct msm_pingroup mdm9615_groups[] = {
+ PINGROUP(0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(2, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(3, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(4, gsbi2_i2c, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(5, gsbi2_i2c, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(6, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(7, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(8, gsbi3, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(9, gsbi3, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(10, gsbi3, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(11, gsbi3, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(12, gsbi4, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(13, gsbi4, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(14, gsbi4, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(15, gsbi4, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(16, gsbi5_i2c, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(17, gsbi5_i2c, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(18, gsbi5_uart, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(19, gsbi5_uart, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(20, prim_audio, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(21, prim_audio, ebi2_lcdc, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(22, prim_audio, ebi2_lcdc, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(23, prim_audio, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(24, cdc_mclk, NA, ebi2_lcdc, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(25, sdc2, sec_audio, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(26, sdc2, sec_audio, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(27, sdc2, sec_audio, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(28, sdc2, sec_audio, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(29, sdc2, sec_audio, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(30, sdc2, sec_audio, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(31, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(32, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(33, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(34, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(35, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(36, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(37, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(38, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(39, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(40, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(41, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(42, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(43, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(44, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(45, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(46, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(47, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(48, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(49, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(50, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(51, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(52, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(53, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(54, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(55, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(56, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(57, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(58, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(59, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(60, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(61, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(62, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(63, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(64, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(65, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(66, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(67, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(68, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(69, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(70, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(71, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(72, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(73, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(74, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(75, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(76, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(77, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(78, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(79, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(80, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(81, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(82, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(83, ps_hold, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(84, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(85, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(86, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(87, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+};
+
+#define NUM_GPIO_PINGROUPS 88
+
+static const struct msm_pinctrl_soc_data mdm9615_pinctrl = {
+ .pins = mdm9615_pins,
+ .npins = ARRAY_SIZE(mdm9615_pins),
+ .functions = mdm9615_functions,
+ .nfunctions = ARRAY_SIZE(mdm9615_functions),
+ .groups = mdm9615_groups,
+ .ngroups = ARRAY_SIZE(mdm9615_groups),
+ .ngpios = NUM_GPIO_PINGROUPS,
+};
+
+static int mdm9615_pinctrl_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &mdm9615_pinctrl);
+}
+
+static const struct of_device_id mdm9615_pinctrl_of_match[] = {
+ { .compatible = "qcom,mdm9615-pinctrl", },
+ { },
+};
+
+static struct platform_driver mdm9615_pinctrl_driver = {
+ .driver = {
+ .name = "mdm9615-pinctrl",
+ .of_match_table = mdm9615_pinctrl_of_match,
+ },
+ .probe = mdm9615_pinctrl_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init mdm9615_pinctrl_init(void)
+{
+ return platform_driver_register(&mdm9615_pinctrl_driver);
+}
+arch_initcall(mdm9615_pinctrl_init);
+
+static void __exit mdm9615_pinctrl_exit(void)
+{
+ platform_driver_unregister(&mdm9615_pinctrl_driver);
+}
+module_exit(mdm9615_pinctrl_exit);
+
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_DESCRIPTION("Qualcomm MDM9615 pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, mdm9615_pinctrl_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 1a44e1d03..51c42d746 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -29,6 +29,7 @@
#include <linux/spinlock.h>
#include <linux/reboot.h>
#include <linux/pm.h>
+#include <linux/log2.h>
#include "../core.h"
#include "../pinconf.h"
@@ -138,10 +139,11 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
const struct msm_pingroup *g;
unsigned long flags;
- u32 val;
+ u32 val, mask;
int i;
g = &pctrl->soc->groups[group];
+ mask = GENMASK(g->mux_bit + order_base_2(g->nfuncs) - 1, g->mux_bit);
for (i = 0; i < g->nfuncs; i++) {
if (g->funcs[i] == function)
@@ -154,7 +156,7 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
spin_lock_irqsave(&pctrl->lock, flags);
val = readl(pctrl->regs + g->ctl_reg);
- val &= ~(0x7 << g->mux_bit);
+ val &= mask;
val |= i << g->mux_bit;
writel(val, pctrl->regs + g->ctl_reg);
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8660.c b/drivers/pinctrl/qcom/pinctrl-msm8660.c
index 3e8f7ac2a..5591d093b 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8660.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8660.c
@@ -506,6 +506,8 @@ enum msm8660_functions {
MSM_MUX_usb_fs2_oe_n,
MSM_MUX_vfe,
MSM_MUX_vsens_alarm,
+ MSM_MUX_ebi2cs,
+ MSM_MUX_ebi2,
MSM_MUX__,
};
@@ -696,6 +698,36 @@ static const char * const vfe_groups[] = {
static const char * const vsens_alarm_groups[] = {
"gpio127"
};
+static const char * const ebi2cs_groups[] = {
+ "gpio39", /* CS1A */
+ "gpio40", /* CS2A */
+ "gpio123", /* CS1B */
+ "gpio124", /* CS2B */
+ "gpio131", /* CS5 */
+ "gpio132", /* CS4 */
+ "gpio133", /* CS3 */
+ "gpio134", /* CS0 */
+};
+static const char * const ebi2_groups[] = {
+ /* ADDR9 & ADDR8 */
+ "gpio37", "gpio38",
+ /* ADDR7 - ADDR 0 */
+ "gpio123", "gpio124", "gpio125", "gpio126",
+ "gpio127", "gpio128", "gpio129", "gpio130",
+ /* (muxed address+data) AD15 - AD0 */
+ "gpio135", "gpio136", "gpio137", "gpio138", "gpio139",
+ "gpio140", "gpio141", "gpio142", "gpio143", "gpio144",
+ "gpio145", "gpio146", "gpio147", "gpio148", "gpio149",
+ "gpio150",
+ "gpio151", /* OE output enable */
+ "gpio152", /* clock */
+ "gpio153", /* ADV */
+ "gpio154", /* WAIT (input) */
+ "gpio155", /* UB Upper Byte Enable */
+ "gpio156", /* LB Lower Byte Enable */
+ "gpio157", /* WE Write Enable */
+ "gpio158", /* busy */
+};
static const struct msm_function msm8660_functions[] = {
FUNCTION(gpio),
@@ -749,6 +781,8 @@ static const struct msm_function msm8660_functions[] = {
FUNCTION(usb_fs2_oe_n),
FUNCTION(vfe),
FUNCTION(vsens_alarm),
+ FUNCTION(ebi2cs), /* for EBI2 chip selects */
+ FUNCTION(ebi2), /* for general EBI2 pins */
};
static const struct msm_pingroup msm8660_groups[] = {
@@ -789,10 +823,10 @@ static const struct msm_pingroup msm8660_groups[] = {
PINGROUP(34, gsbi1, _, _, _, _, _, _),
PINGROUP(35, gsbi1, _, _, _, _, _, _),
PINGROUP(36, gsbi1, _, _, _, _, _, _),
- PINGROUP(37, gsbi2, _, _, _, _, _, _),
- PINGROUP(38, gsbi2, _, _, _, _, _, _),
- PINGROUP(39, gsbi2, _, mdp_vsync, _, _, _, _),
- PINGROUP(40, gsbi2, _, _, _, _, _, _),
+ PINGROUP(37, gsbi2, ebi2, _, _, _, _, _),
+ PINGROUP(38, gsbi2, ebi2, _, _, _, _, _),
+ PINGROUP(39, gsbi2, ebi2cs, mdp_vsync, _, _, _, _),
+ PINGROUP(40, gsbi2, ebi2cs, _, _, _, _, _),
PINGROUP(41, gsbi3, mdp_vsync, _, _, _, _, _),
PINGROUP(42, gsbi3, vfe, _, _, _, _, _),
PINGROUP(43, gsbi3, _, _, _, _, _, _),
@@ -875,42 +909,42 @@ static const struct msm_pingroup msm8660_groups[] = {
PINGROUP(120, i2s, _, _, _, _, _, _),
PINGROUP(121, i2s, _, _, _, _, _, _),
PINGROUP(122, i2s, gp_clk_1b, _, _, _, _, _),
- PINGROUP(123, _, gsbi2_spi_cs1_n, _, _, _, _, _),
- PINGROUP(124, _, gsbi2_spi_cs2_n, _, _, _, _, _),
- PINGROUP(125, _, gsbi2_spi_cs3_n, _, _, _, _, _),
- PINGROUP(126, _, _, _, _, _, _, _),
- PINGROUP(127, _, vsens_alarm, _, _, _, _, _),
- PINGROUP(128, _, _, _, _, _, _, _),
- PINGROUP(129, _, _, _, _, _, _, _),
- PINGROUP(130, _, _, _, _, _, _, _),
- PINGROUP(131, _, _, _, _, _, _, _),
- PINGROUP(132, _, _, _, _, _, _, _),
- PINGROUP(133, _, _, _, _, _, _, _),
- PINGROUP(134, _, _, _, _, _, _, _),
- PINGROUP(135, _, _, _, _, _, _, _),
- PINGROUP(136, _, _, _, _, _, _, _),
- PINGROUP(137, _, _, _, _, _, _, _),
- PINGROUP(138, _, _, _, _, _, _, _),
- PINGROUP(139, _, _, _, _, _, _, _),
- PINGROUP(140, _, _, _, _, _, _, _),
- PINGROUP(141, _, _, _, _, _, _, _),
- PINGROUP(142, _, _, _, _, _, _, _),
- PINGROUP(143, _, sdc2, _, _, _, _, _),
- PINGROUP(144, _, sdc2, _, _, _, _, _),
- PINGROUP(145, _, sdc2, _, _, _, _, _),
- PINGROUP(146, _, sdc2, _, _, _, _, _),
- PINGROUP(147, _, sdc2, _, _, _, _, _),
- PINGROUP(148, _, sdc2, _, _, _, _, _),
- PINGROUP(149, _, sdc2, _, _, _, _, _),
- PINGROUP(150, _, sdc2, _, _, _, _, _),
- PINGROUP(151, _, sdc2, _, _, _, _, _),
- PINGROUP(152, _, sdc2, _, _, _, _, _),
- PINGROUP(153, _, _, _, _, _, _, _),
- PINGROUP(154, _, _, _, _, _, _, _),
- PINGROUP(155, _, _, _, _, _, _, _),
- PINGROUP(156, _, _, _, _, _, _, _),
- PINGROUP(157, _, _, _, _, _, _, _),
- PINGROUP(158, _, _, _, _, _, _, _),
+ PINGROUP(123, ebi2, gsbi2_spi_cs1_n, ebi2cs, _, _, _, _),
+ PINGROUP(124, ebi2, gsbi2_spi_cs2_n, ebi2cs, _, _, _, _),
+ PINGROUP(125, ebi2, gsbi2_spi_cs3_n, _, _, _, _, _),
+ PINGROUP(126, ebi2, _, _, _, _, _, _),
+ PINGROUP(127, ebi2, vsens_alarm, _, _, _, _, _),
+ PINGROUP(128, ebi2, _, _, _, _, _, _),
+ PINGROUP(129, ebi2, _, _, _, _, _, _),
+ PINGROUP(130, ebi2, _, _, _, _, _, _),
+ PINGROUP(131, ebi2cs, _, _, _, _, _, _),
+ PINGROUP(132, ebi2cs, _, _, _, _, _, _),
+ PINGROUP(133, ebi2cs, _, _, _, _, _, _),
+ PINGROUP(134, ebi2cs, _, _, _, _, _, _),
+ PINGROUP(135, ebi2, _, _, _, _, _, _),
+ PINGROUP(136, ebi2, _, _, _, _, _, _),
+ PINGROUP(137, ebi2, _, _, _, _, _, _),
+ PINGROUP(138, ebi2, _, _, _, _, _, _),
+ PINGROUP(139, ebi2, _, _, _, _, _, _),
+ PINGROUP(140, ebi2, _, _, _, _, _, _),
+ PINGROUP(141, ebi2, _, _, _, _, _, _),
+ PINGROUP(142, ebi2, _, _, _, _, _, _),
+ PINGROUP(143, ebi2, sdc2, _, _, _, _, _),
+ PINGROUP(144, ebi2, sdc2, _, _, _, _, _),
+ PINGROUP(145, ebi2, sdc2, _, _, _, _, _),
+ PINGROUP(146, ebi2, sdc2, _, _, _, _, _),
+ PINGROUP(147, ebi2, sdc2, _, _, _, _, _),
+ PINGROUP(148, ebi2, sdc2, _, _, _, _, _),
+ PINGROUP(149, ebi2, sdc2, _, _, _, _, _),
+ PINGROUP(150, ebi2, sdc2, _, _, _, _, _),
+ PINGROUP(151, ebi2, sdc2, _, _, _, _, _),
+ PINGROUP(152, ebi2, sdc2, _, _, _, _, _),
+ PINGROUP(153, ebi2, _, _, _, _, _, _),
+ PINGROUP(154, ebi2, _, _, _, _, _, _),
+ PINGROUP(155, ebi2, _, _, _, _, _, _),
+ PINGROUP(156, ebi2, _, _, _, _, _, _),
+ PINGROUP(157, ebi2, _, _, _, _, _, _),
+ PINGROUP(158, ebi2, _, _, _, _, _, _),
PINGROUP(159, sdc1, _, _, _, _, _, _),
PINGROUP(160, sdc1, _, _, _, _, _, _),
PINGROUP(161, sdc1, _, _, _, _, _, _),
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8x74.c b/drivers/pinctrl/qcom/pinctrl-msm8x74.c
index 46fe6ad5f..9eb63d340 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8x74.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8x74.c
@@ -172,6 +172,8 @@ static const struct pinctrl_pin_desc msm8x74_pins[] = {
PINCTRL_PIN(149, "SDC2_CLK"),
PINCTRL_PIN(150, "SDC2_CMD"),
PINCTRL_PIN(151, "SDC2_DATA"),
+ PINCTRL_PIN(152, "HSIC_STROBE"),
+ PINCTRL_PIN(153, "HSIC_DATA"),
};
#define DECLARE_MSM_GPIO_PINS(pin) static const unsigned int gpio##pin##_pins[] = { pin }
@@ -328,6 +330,8 @@ static const unsigned int sdc1_data_pins[] = { 148 };
static const unsigned int sdc2_clk_pins[] = { 149 };
static const unsigned int sdc2_cmd_pins[] = { 150 };
static const unsigned int sdc2_data_pins[] = { 151 };
+static const unsigned int hsic_strobe_pins[] = { 152 };
+static const unsigned int hsic_data_pins[] = { 153 };
#define FUNCTION(fname) \
[MSM_MUX_##fname] = { \
@@ -399,6 +403,37 @@ static const unsigned int sdc2_data_pins[] = { 151 };
.intr_detection_width = -1, \
}
+#define HSIC_PINGROUP(pg_name, ctl) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = ARRAY_SIZE(pg_name##_pins), \
+ .funcs = (int[]){ \
+ MSM_MUX_gpio, \
+ MSM_MUX_hsic_ctl, \
+ }, \
+ .nfuncs = 2, \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = 25, \
+ .pull_bit = -1, \
+ .drv_bit = -1, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_target_kpss_val = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
/*
* TODO: Add the rest of the possible functions and fill out
* the pingroup table below.
@@ -509,6 +544,7 @@ enum msm8x74_functions {
MSM_MUX_fm,
MSM_MUX_wlan,
MSM_MUX_slimbus,
+ MSM_MUX_hsic_ctl,
MSM_MUX_NA,
};
@@ -534,7 +570,8 @@ static const char * const gpio_groups[] = {
"gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128",
"gpio129", "gpio130", "gpio131", "gpio132", "gpio133", "gpio134",
"gpio135", "gpio136", "gpio137", "gpio138", "gpio139", "gpio140",
- "gpio141", "gpio142", "gpio143", "gpio144", "gpio145"
+ "gpio141", "gpio142", "gpio143", "gpio144", "gpio145", "hsic_data",
+ "hsic_strobe",
};
static const char * const blsp_uart1_groups[] = {
@@ -754,6 +791,7 @@ static const char * const wlan_groups[] = {
};
static const char * const slimbus_groups[] = { "gpio70", "gpio71" };
+static const char * const hsic_ctl_groups[] = { "hsic_strobe", "hsic_data" };
static const struct msm_function msm8x74_functions[] = {
FUNCTION(gpio),
@@ -861,6 +899,7 @@ static const struct msm_function msm8x74_functions[] = {
FUNCTION(fm),
FUNCTION(wlan),
FUNCTION(slimbus),
+ FUNCTION(hsic_ctl),
};
static const struct msm_pingroup msm8x74_groups[] = {
@@ -1016,6 +1055,8 @@ static const struct msm_pingroup msm8x74_groups[] = {
SDC_PINGROUP(sdc2_clk, 0x2048, 14, 6),
SDC_PINGROUP(sdc2_cmd, 0x2048, 11, 3),
SDC_PINGROUP(sdc2_data, 0x2048, 9, 0),
+ HSIC_PINGROUP(hsic_strobe, 0x2050),
+ HSIC_PINGROUP(hsic_data, 0x2054),
};
#define NUM_GPIO_PINGROUPS 146
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
index 9191727af..0d1392fc3 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
@@ -744,6 +744,7 @@ static int pm8xxx_pin_populate(struct pm8xxx_mpp *pctrl,
static const struct of_device_id pm8xxx_mpp_of_match[] = {
{ .compatible = "qcom,pm8018-mpp" },
{ .compatible = "qcom,pm8038-mpp" },
+ { .compatible = "qcom,pm8058-mpp" },
{ .compatible = "qcom,pm8917-mpp" },
{ .compatible = "qcom,pm8821-mpp" },
{ .compatible = "qcom,pm8921-mpp" },
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos5440.c b/drivers/pinctrl/samsung/pinctrl-exynos5440.c
index fb71fc3e5..3000df807 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos5440.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos5440.c
@@ -998,6 +998,7 @@ static struct platform_driver exynos5440_pinctrl_driver = {
.driver = {
.name = "exynos5440-pinctrl",
.of_match_table = exynos5440_pinctrl_dt_match,
+ .suppress_bind_attrs = true,
},
};
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index ed0b70881..513fe6b23 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -1274,6 +1274,7 @@ static struct platform_driver samsung_pinctrl_driver = {
.driver = {
.name = "samsung-pinctrl",
.of_match_table = samsung_pinctrl_dt_match,
+ .suppress_bind_attrs = true,
},
};
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index 9b9cee06e..a3b82041b 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -598,15 +598,6 @@ static int sh_pfc_probe(struct platform_device *pdev)
return 0;
}
-static int sh_pfc_remove(struct platform_device *pdev)
-{
-#ifdef CONFIG_PINCTRL_SH_PFC_GPIO
- sh_pfc_unregister_gpiochip(platform_get_drvdata(pdev));
-#endif
-
- return 0;
-}
-
static const struct platform_device_id sh_pfc_id_table[] = {
#ifdef CONFIG_PINCTRL_PFC_SH7203
{ "pfc-sh7203", (kernel_ulong_t)&sh7203_pinmux_info },
@@ -650,7 +641,6 @@ static const struct platform_device_id sh_pfc_id_table[] = {
static struct platform_driver sh_pfc_driver = {
.probe = sh_pfc_probe,
- .remove = sh_pfc_remove,
.id_table = sh_pfc_id_table,
.driver = {
.name = DRV_NAME,
diff --git a/drivers/pinctrl/sh-pfc/core.h b/drivers/pinctrl/sh-pfc/core.h
index dc1b2adb2..0bbdea584 100644
--- a/drivers/pinctrl/sh-pfc/core.h
+++ b/drivers/pinctrl/sh-pfc/core.h
@@ -10,50 +10,16 @@
#ifndef __SH_PFC_CORE_H__
#define __SH_PFC_CORE_H__
-#include <linux/compiler.h>
-#include <linux/spinlock.h>
#include <linux/types.h>
#include "sh_pfc.h"
-struct sh_pfc_window {
- phys_addr_t phys;
- void __iomem *virt;
- unsigned long size;
-};
-
-struct sh_pfc_chip;
-struct sh_pfc_pinctrl;
-
struct sh_pfc_pin_range {
u16 start;
u16 end;
};
-struct sh_pfc {
- struct device *dev;
- const struct sh_pfc_soc_info *info;
- spinlock_t lock;
-
- unsigned int num_windows;
- struct sh_pfc_window *windows;
- unsigned int num_irqs;
- unsigned int *irqs;
-
- struct sh_pfc_pin_range *ranges;
- unsigned int nr_ranges;
-
- unsigned int nr_gpio_pins;
-
- struct sh_pfc_chip *gpio;
-#ifdef CONFIG_SUPERH
- struct sh_pfc_chip *func;
-#endif
-
-};
-
int sh_pfc_register_gpiochip(struct sh_pfc *pfc);
-int sh_pfc_unregister_gpiochip(struct sh_pfc *pfc);
int sh_pfc_register_pinctrl(struct sh_pfc *pfc);
@@ -67,28 +33,4 @@ void sh_pfc_write_reg(struct sh_pfc *pfc, u32 reg, unsigned int width,
int sh_pfc_get_pin_index(struct sh_pfc *pfc, unsigned int pin);
int sh_pfc_config_mux(struct sh_pfc *pfc, unsigned mark, int pinmux_type);
-extern const struct sh_pfc_soc_info emev2_pinmux_info;
-extern const struct sh_pfc_soc_info r8a73a4_pinmux_info;
-extern const struct sh_pfc_soc_info r8a7740_pinmux_info;
-extern const struct sh_pfc_soc_info r8a7778_pinmux_info;
-extern const struct sh_pfc_soc_info r8a7779_pinmux_info;
-extern const struct sh_pfc_soc_info r8a7790_pinmux_info;
-extern const struct sh_pfc_soc_info r8a7791_pinmux_info;
-extern const struct sh_pfc_soc_info r8a7793_pinmux_info;
-extern const struct sh_pfc_soc_info r8a7794_pinmux_info;
-extern const struct sh_pfc_soc_info r8a7795_pinmux_info;
-extern const struct sh_pfc_soc_info sh7203_pinmux_info;
-extern const struct sh_pfc_soc_info sh7264_pinmux_info;
-extern const struct sh_pfc_soc_info sh7269_pinmux_info;
-extern const struct sh_pfc_soc_info sh73a0_pinmux_info;
-extern const struct sh_pfc_soc_info sh7720_pinmux_info;
-extern const struct sh_pfc_soc_info sh7722_pinmux_info;
-extern const struct sh_pfc_soc_info sh7723_pinmux_info;
-extern const struct sh_pfc_soc_info sh7724_pinmux_info;
-extern const struct sh_pfc_soc_info sh7734_pinmux_info;
-extern const struct sh_pfc_soc_info sh7757_pinmux_info;
-extern const struct sh_pfc_soc_info sh7785_pinmux_info;
-extern const struct sh_pfc_soc_info sh7786_pinmux_info;
-extern const struct sh_pfc_soc_info shx3_pinmux_info;
-
#endif /* __SH_PFC_CORE_H__ */
diff --git a/drivers/pinctrl/sh-pfc/gpio.c b/drivers/pinctrl/sh-pfc/gpio.c
index 97dff6a09..6b5422766 100644
--- a/drivers/pinctrl/sh-pfc/gpio.c
+++ b/drivers/pinctrl/sh-pfc/gpio.c
@@ -318,7 +318,7 @@ sh_pfc_add_gpiochip(struct sh_pfc *pfc, int(*setup)(struct sh_pfc_chip *),
if (ret < 0)
return ERR_PTR(ret);
- ret = gpiochip_add_data(&chip->gpio_chip, chip);
+ ret = devm_gpiochip_add_data(pfc->dev, &chip->gpio_chip, chip);
if (unlikely(ret < 0))
return ERR_PTR(ret);
@@ -399,18 +399,7 @@ int sh_pfc_register_gpiochip(struct sh_pfc *pfc)
chip = sh_pfc_add_gpiochip(pfc, gpio_function_setup, NULL);
if (IS_ERR(chip))
return PTR_ERR(chip);
-
- pfc->func = chip;
#endif /* CONFIG_SUPERH */
return 0;
}
-
-int sh_pfc_unregister_gpiochip(struct sh_pfc *pfc)
-{
- gpiochip_remove(&pfc->gpio->gpio_chip);
-#ifdef CONFIG_SUPERH
- gpiochip_remove(&pfc->func->gpio_chip);
-#endif
- return 0;
-}
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c b/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c
index d9d9228b1..ff5655dee 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c
@@ -21,7 +21,6 @@
#include <linux/kernel.h>
#include <linux/pinctrl/pinconf-generic.h>
-#include "core.h"
#include "sh_pfc.h"
#define CPU_ALL_PORT(fn, pfx, sfx) \
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
index 7f7c8a6e7..35f436bcb 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
@@ -22,7 +22,6 @@
#include <linux/kernel.h>
#include <linux/pinctrl/pinconf-generic.h>
-#include "core.h"
#include "sh_pfc.h"
#define CPU_ALL_PORT(fn, pfx, sfx) \
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
index 411d0887b..18ef7042b 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
@@ -23,7 +23,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/pinctrl/pinconf-generic.h>
-#include "core.h"
+
#include "sh_pfc.h"
#define PORT_GP_PUP_1(bank, pin, fn, sfx) \
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
index eed8daa46..b769c0548 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
@@ -24,7 +24,6 @@
#include <linux/io.h>
#include <linux/kernel.h>
-#include "core.h"
#include "sh_pfc.h"
/*
@@ -4696,47 +4695,6 @@ static const char * const vin3_groups[] = {
"vin3_clk",
};
-#define IOCTRL6 0x8c
-
-static int r8a7790_get_io_voltage(struct sh_pfc *pfc, unsigned int pin)
-{
- u32 data, mask;
-
- if (WARN(pin < RCAR_GP_PIN(3, 0) || pin > RCAR_GP_PIN(3, 31), "invalid pin %#x", pin))
- return -EINVAL;
-
- data = ioread32(pfc->windows->virt + IOCTRL6),
- /* Bits in IOCTRL6 are numbered in opposite order to pins */
- mask = 0x80000000 >> (pin & 0x1f);
-
- return (data & mask) ? 3300 : 1800;
-}
-
-static int r8a7790_set_io_voltage(struct sh_pfc *pfc, unsigned int pin, u16 mV)
-{
- u32 data, mask;
-
- if (WARN(pin < RCAR_GP_PIN(3, 0) || pin > RCAR_GP_PIN(3, 31), "invalid pin %#x", pin))
- return -EINVAL;
-
- if (mV != 1800 && mV != 3300)
- return -EINVAL;
-
- data = ioread32(pfc->windows->virt + IOCTRL6);
- /* Bits in IOCTRL6 are numbered in opposite order to pins */
- mask = 0x80000000 >> (pin & 0x1f);
-
- if (mV == 3300)
- data |= mask;
- else
- data &= ~mask;
-
- iowrite32(~data, pfc->windows->virt); /* unlock reg */
- iowrite32(data, pfc->windows->virt + IOCTRL6);
-
- return 0;
-}
-
static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(audio_clk),
SH_PFC_FUNCTION(avb),
@@ -5736,14 +5694,23 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
{ },
};
-static const struct sh_pfc_soc_operations pinmux_ops = {
- .get_io_voltage = r8a7790_get_io_voltage,
- .set_io_voltage = r8a7790_set_io_voltage,
+static int r8a7790_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin, u32 *pocctrl)
+{
+ if (pin < RCAR_GP_PIN(3, 0) || pin > RCAR_GP_PIN(3, 31))
+ return -EINVAL;
+
+ *pocctrl = 0xe606008c;
+
+ return 31 - (pin & 0x1f);
+}
+
+static const struct sh_pfc_soc_operations r8a7790_pinmux_ops = {
+ .pin_to_pocctrl = r8a7790_pin_to_pocctrl,
};
const struct sh_pfc_soc_info r8a7790_pinmux_info = {
.name = "r8a77900_pfc",
- .ops = &pinmux_ops,
+ .ops = &r8a7790_pinmux_ops,
.unlock_reg = 0xe6060000, /* PMMR */
.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
index 01abbd5b4..0c1a60c9a 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
@@ -11,7 +11,6 @@
#include <linux/kernel.h>
-#include "core.h"
#include "sh_pfc.h"
#define CPU_ALL_PORT(fn, sfx) \
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
index 44632b1a5..b74cdd310 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
@@ -17,8 +17,12 @@
PORT_GP_CFG_16(0, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
PORT_GP_CFG_28(1, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
PORT_GP_CFG_15(2, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
- PORT_GP_CFG_16(3, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
- PORT_GP_CFG_18(4, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
+ PORT_GP_CFG_12(3, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH | SH_PFC_PIN_CFG_IO_VOLTAGE), \
+ PORT_GP_CFG_1(3, 12, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
+ PORT_GP_CFG_1(3, 13, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
+ PORT_GP_CFG_1(3, 14, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
+ PORT_GP_CFG_1(3, 15, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
+ PORT_GP_CFG_18(4, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH | SH_PFC_PIN_CFG_IO_VOLTAGE), \
PORT_GP_CFG_26(5, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
PORT_GP_CFG_32(6, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
PORT_GP_CFG_4(7, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH)
@@ -552,6 +556,9 @@ static const u16 pinmux_data[] = {
PINMUX_SINGLE(AVS2),
PINMUX_SINGLE(HDMI0_CEC),
PINMUX_SINGLE(HDMI1_CEC),
+ PINMUX_SINGLE(I2C_SEL_0_1),
+ PINMUX_SINGLE(I2C_SEL_3_1),
+ PINMUX_SINGLE(I2C_SEL_5_1),
PINMUX_SINGLE(MSIOF0_RXD),
PINMUX_SINGLE(MSIOF0_SCK),
PINMUX_SINGLE(MSIOF0_TXD),
@@ -1401,11 +1408,6 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP17_7_4, STP_ISSYNC_0_E, SEL_SSP1_0_4),
PINMUX_IPSR_MSEL(IP17_7_4, RIF2_D1_B, SEL_DRIF2_1),
PINMUX_IPSR_GPSR(IP17_7_4, TPU0TO3),
-
- /* I2C */
- PINMUX_IPSR_NOGP(0, I2C_SEL_0_1),
- PINMUX_IPSR_NOGP(0, I2C_SEL_3_1),
- PINMUX_IPSR_NOGP(0, I2C_SEL_5_1),
};
static const struct sh_pfc_pin pinmux_pins[] = {
@@ -1654,6 +1656,221 @@ static const unsigned int canfd1_data_mux[] = {
CANFD1_TX_MARK, CANFD1_RX_MARK,
};
+/* - DRIF0 --------------------------------------------------------------- */
+static const unsigned int drif0_ctrl_a_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
+};
+static const unsigned int drif0_ctrl_a_mux[] = {
+ RIF0_CLK_A_MARK, RIF0_SYNC_A_MARK,
+};
+static const unsigned int drif0_data0_a_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(6, 10),
+};
+static const unsigned int drif0_data0_a_mux[] = {
+ RIF0_D0_A_MARK,
+};
+static const unsigned int drif0_data1_a_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(6, 7),
+};
+static const unsigned int drif0_data1_a_mux[] = {
+ RIF0_D1_A_MARK,
+};
+static const unsigned int drif0_ctrl_b_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(5, 0), RCAR_GP_PIN(5, 4),
+};
+static const unsigned int drif0_ctrl_b_mux[] = {
+ RIF0_CLK_B_MARK, RIF0_SYNC_B_MARK,
+};
+static const unsigned int drif0_data0_b_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(5, 1),
+};
+static const unsigned int drif0_data0_b_mux[] = {
+ RIF0_D0_B_MARK,
+};
+static const unsigned int drif0_data1_b_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(5, 2),
+};
+static const unsigned int drif0_data1_b_mux[] = {
+ RIF0_D1_B_MARK,
+};
+static const unsigned int drif0_ctrl_c_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(5, 12), RCAR_GP_PIN(5, 15),
+};
+static const unsigned int drif0_ctrl_c_mux[] = {
+ RIF0_CLK_C_MARK, RIF0_SYNC_C_MARK,
+};
+static const unsigned int drif0_data0_c_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(5, 13),
+};
+static const unsigned int drif0_data0_c_mux[] = {
+ RIF0_D0_C_MARK,
+};
+static const unsigned int drif0_data1_c_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(5, 14),
+};
+static const unsigned int drif0_data1_c_mux[] = {
+ RIF0_D1_C_MARK,
+};
+/* - DRIF1 --------------------------------------------------------------- */
+static const unsigned int drif1_ctrl_a_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18),
+};
+static const unsigned int drif1_ctrl_a_mux[] = {
+ RIF1_CLK_A_MARK, RIF1_SYNC_A_MARK,
+};
+static const unsigned int drif1_data0_a_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(6, 19),
+};
+static const unsigned int drif1_data0_a_mux[] = {
+ RIF1_D0_A_MARK,
+};
+static const unsigned int drif1_data1_a_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(6, 20),
+};
+static const unsigned int drif1_data1_a_mux[] = {
+ RIF1_D1_A_MARK,
+};
+static const unsigned int drif1_ctrl_b_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(5, 9), RCAR_GP_PIN(5, 3),
+};
+static const unsigned int drif1_ctrl_b_mux[] = {
+ RIF1_CLK_B_MARK, RIF1_SYNC_B_MARK,
+};
+static const unsigned int drif1_data0_b_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(5, 7),
+};
+static const unsigned int drif1_data0_b_mux[] = {
+ RIF1_D0_B_MARK,
+};
+static const unsigned int drif1_data1_b_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(5, 8),
+};
+static const unsigned int drif1_data1_b_mux[] = {
+ RIF1_D1_B_MARK,
+};
+static const unsigned int drif1_ctrl_c_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(5, 5), RCAR_GP_PIN(5, 11),
+};
+static const unsigned int drif1_ctrl_c_mux[] = {
+ RIF1_CLK_C_MARK, RIF1_SYNC_C_MARK,
+};
+static const unsigned int drif1_data0_c_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(5, 6),
+};
+static const unsigned int drif1_data0_c_mux[] = {
+ RIF1_D0_C_MARK,
+};
+static const unsigned int drif1_data1_c_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(5, 10),
+};
+static const unsigned int drif1_data1_c_mux[] = {
+ RIF1_D1_C_MARK,
+};
+/* - DRIF2 --------------------------------------------------------------- */
+static const unsigned int drif2_ctrl_a_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
+};
+static const unsigned int drif2_ctrl_a_mux[] = {
+ RIF2_CLK_A_MARK, RIF2_SYNC_A_MARK,
+};
+static const unsigned int drif2_data0_a_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(6, 7),
+};
+static const unsigned int drif2_data0_a_mux[] = {
+ RIF2_D0_A_MARK,
+};
+static const unsigned int drif2_data1_a_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(6, 10),
+};
+static const unsigned int drif2_data1_a_mux[] = {
+ RIF2_D1_A_MARK,
+};
+static const unsigned int drif2_ctrl_b_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(6, 26), RCAR_GP_PIN(6, 27),
+};
+static const unsigned int drif2_ctrl_b_mux[] = {
+ RIF2_CLK_B_MARK, RIF2_SYNC_B_MARK,
+};
+static const unsigned int drif2_data0_b_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(6, 30),
+};
+static const unsigned int drif2_data0_b_mux[] = {
+ RIF2_D0_B_MARK,
+};
+static const unsigned int drif2_data1_b_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(6, 31),
+};
+static const unsigned int drif2_data1_b_mux[] = {
+ RIF2_D1_B_MARK,
+};
+/* - DRIF3 --------------------------------------------------------------- */
+static const unsigned int drif3_ctrl_a_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18),
+};
+static const unsigned int drif3_ctrl_a_mux[] = {
+ RIF3_CLK_A_MARK, RIF3_SYNC_A_MARK,
+};
+static const unsigned int drif3_data0_a_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(6, 19),
+};
+static const unsigned int drif3_data0_a_mux[] = {
+ RIF3_D0_A_MARK,
+};
+static const unsigned int drif3_data1_a_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(6, 20),
+};
+static const unsigned int drif3_data1_a_mux[] = {
+ RIF3_D1_A_MARK,
+};
+static const unsigned int drif3_ctrl_b_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(6, 24), RCAR_GP_PIN(6, 25),
+};
+static const unsigned int drif3_ctrl_b_mux[] = {
+ RIF3_CLK_B_MARK, RIF3_SYNC_B_MARK,
+};
+static const unsigned int drif3_data0_b_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(6, 28),
+};
+static const unsigned int drif3_data0_b_mux[] = {
+ RIF3_D0_B_MARK,
+};
+static const unsigned int drif3_data1_b_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(6, 29),
+};
+static const unsigned int drif3_data1_b_mux[] = {
+ RIF3_D1_B_MARK,
+};
+
/* - HSCIF0 ----------------------------------------------------------------- */
static const unsigned int hscif0_data_pins[] = {
/* RX, TX */
@@ -3346,6 +3563,36 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(canfd0_data_a),
SH_PFC_PIN_GROUP(canfd0_data_b),
SH_PFC_PIN_GROUP(canfd1_data),
+ SH_PFC_PIN_GROUP(drif0_ctrl_a),
+ SH_PFC_PIN_GROUP(drif0_data0_a),
+ SH_PFC_PIN_GROUP(drif0_data1_a),
+ SH_PFC_PIN_GROUP(drif0_ctrl_b),
+ SH_PFC_PIN_GROUP(drif0_data0_b),
+ SH_PFC_PIN_GROUP(drif0_data1_b),
+ SH_PFC_PIN_GROUP(drif0_ctrl_c),
+ SH_PFC_PIN_GROUP(drif0_data0_c),
+ SH_PFC_PIN_GROUP(drif0_data1_c),
+ SH_PFC_PIN_GROUP(drif1_ctrl_a),
+ SH_PFC_PIN_GROUP(drif1_data0_a),
+ SH_PFC_PIN_GROUP(drif1_data1_a),
+ SH_PFC_PIN_GROUP(drif1_ctrl_b),
+ SH_PFC_PIN_GROUP(drif1_data0_b),
+ SH_PFC_PIN_GROUP(drif1_data1_b),
+ SH_PFC_PIN_GROUP(drif1_ctrl_c),
+ SH_PFC_PIN_GROUP(drif1_data0_c),
+ SH_PFC_PIN_GROUP(drif1_data1_c),
+ SH_PFC_PIN_GROUP(drif2_ctrl_a),
+ SH_PFC_PIN_GROUP(drif2_data0_a),
+ SH_PFC_PIN_GROUP(drif2_data1_a),
+ SH_PFC_PIN_GROUP(drif2_ctrl_b),
+ SH_PFC_PIN_GROUP(drif2_data0_b),
+ SH_PFC_PIN_GROUP(drif2_data1_b),
+ SH_PFC_PIN_GROUP(drif3_ctrl_a),
+ SH_PFC_PIN_GROUP(drif3_data0_a),
+ SH_PFC_PIN_GROUP(drif3_data1_a),
+ SH_PFC_PIN_GROUP(drif3_ctrl_b),
+ SH_PFC_PIN_GROUP(drif3_data0_b),
+ SH_PFC_PIN_GROUP(drif3_data1_b),
SH_PFC_PIN_GROUP(hscif0_data),
SH_PFC_PIN_GROUP(hscif0_clk),
SH_PFC_PIN_GROUP(hscif0_ctrl),
@@ -3629,6 +3876,48 @@ static const char * const canfd1_groups[] = {
"canfd1_data",
};
+static const char * const drif0_groups[] = {
+ "drif0_ctrl_a",
+ "drif0_data0_a",
+ "drif0_data1_a",
+ "drif0_ctrl_b",
+ "drif0_data0_b",
+ "drif0_data1_b",
+ "drif0_ctrl_c",
+ "drif0_data0_c",
+ "drif0_data1_c",
+};
+
+static const char * const drif1_groups[] = {
+ "drif1_ctrl_a",
+ "drif1_data0_a",
+ "drif1_data1_a",
+ "drif1_ctrl_b",
+ "drif1_data0_b",
+ "drif1_data1_b",
+ "drif1_ctrl_c",
+ "drif1_data0_c",
+ "drif1_data1_c",
+};
+
+static const char * const drif2_groups[] = {
+ "drif2_ctrl_a",
+ "drif2_data0_a",
+ "drif2_data1_a",
+ "drif2_ctrl_b",
+ "drif2_data0_b",
+ "drif2_data1_b",
+};
+
+static const char * const drif3_groups[] = {
+ "drif3_ctrl_a",
+ "drif3_data0_a",
+ "drif3_data1_a",
+ "drif3_ctrl_b",
+ "drif3_data0_b",
+ "drif3_data1_b",
+};
+
static const char * const hscif0_groups[] = {
"hscif0_data",
"hscif0_clk",
@@ -3972,6 +4261,10 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(can_clk),
SH_PFC_FUNCTION(canfd0),
SH_PFC_FUNCTION(canfd1),
+ SH_PFC_FUNCTION(drif0),
+ SH_PFC_FUNCTION(drif1),
+ SH_PFC_FUNCTION(drif2),
+ SH_PFC_FUNCTION(drif3),
SH_PFC_FUNCTION(hscif0),
SH_PFC_FUNCTION(hscif1),
SH_PFC_FUNCTION(hscif2),
@@ -4765,8 +5058,28 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = {
{ },
};
+static int r8a7795_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin, u32 *pocctrl)
+{
+ int bit = -EINVAL;
+
+ *pocctrl = 0xe6060380;
+
+ if (pin >= RCAR_GP_PIN(3, 0) && pin <= RCAR_GP_PIN(3, 11))
+ bit = pin & 0x1f;
+
+ if (pin >= RCAR_GP_PIN(4, 0) && pin <= RCAR_GP_PIN(4, 17))
+ bit = (pin & 0x1f) + 12;
+
+ return bit;
+}
+
+static const struct sh_pfc_soc_operations r8a7795_pinmux_ops = {
+ .pin_to_pocctrl = r8a7795_pin_to_pocctrl,
+};
+
const struct sh_pfc_soc_info r8a7795_pinmux_info = {
.name = "r8a77950_pfc",
+ .ops = &r8a7795_pinmux_ops,
.unlock_reg = 0xe6060000, /* PMMR */
.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7757.c b/drivers/pinctrl/sh-pfc/pfc-sh7757.c
index 0555a1fe0..6d8c31cae 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7757.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7757.c
@@ -1625,7 +1625,6 @@ static const struct pinmux_func pinmux_func_gpios[] = {
GPIO_FN(VBIOS_CS),
/* PTW (mobule: LBSC, EVC, SCIF) */
- GPIO_FN(A16),
GPIO_FN(A15),
GPIO_FN(A14),
GPIO_FN(A13),
diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c
index fdb445d68..e208ee04a 100644
--- a/drivers/pinctrl/sh-pfc/pinctrl.c
+++ b/drivers/pinctrl/sh-pfc/pinctrl.c
@@ -632,19 +632,21 @@ static int sh_pfc_pinconf_get(struct pinctrl_dev *pctldev, unsigned _pin,
}
case PIN_CONFIG_POWER_SOURCE: {
- int ret;
+ u32 pocctrl, val;
+ int bit;
- if (!pfc->info->ops || !pfc->info->ops->get_io_voltage)
+ if (!pfc->info->ops || !pfc->info->ops->pin_to_pocctrl)
return -ENOTSUPP;
+ bit = pfc->info->ops->pin_to_pocctrl(pfc, _pin, &pocctrl);
+ if (WARN(bit < 0, "invalid pin %#x", _pin))
+ return bit;
+
spin_lock_irqsave(&pfc->lock, flags);
- ret = pfc->info->ops->get_io_voltage(pfc, _pin);
+ val = sh_pfc_read_reg(pfc, pocctrl, 32);
spin_unlock_irqrestore(&pfc->lock, flags);
- if (ret < 0)
- return ret;
-
- *config = ret;
+ *config = (val & BIT(bit)) ? 3300 : 1800;
break;
}
@@ -696,20 +698,29 @@ static int sh_pfc_pinconf_set(struct pinctrl_dev *pctldev, unsigned _pin,
}
case PIN_CONFIG_POWER_SOURCE: {
- unsigned int arg =
- pinconf_to_config_argument(configs[i]);
- int ret;
+ unsigned int mV = pinconf_to_config_argument(configs[i]);
+ u32 pocctrl, val;
+ int bit;
- if (!pfc->info->ops || !pfc->info->ops->set_io_voltage)
+ if (!pfc->info->ops || !pfc->info->ops->pin_to_pocctrl)
return -ENOTSUPP;
+ bit = pfc->info->ops->pin_to_pocctrl(pfc, _pin, &pocctrl);
+ if (WARN(bit < 0, "invalid pin %#x", _pin))
+ return bit;
+
+ if (mV != 1800 && mV != 3300)
+ return -EINVAL;
+
spin_lock_irqsave(&pfc->lock, flags);
- ret = pfc->info->ops->set_io_voltage(pfc, _pin, arg);
+ val = sh_pfc_read_reg(pfc, pocctrl, 32);
+ if (mV == 3300)
+ val |= BIT(bit);
+ else
+ val &= ~BIT(bit);
+ sh_pfc_write_reg(pfc, pocctrl, 32, val);
spin_unlock_irqrestore(&pfc->lock, flags);
- if (ret)
- return ret;
-
break;
}
@@ -803,8 +814,5 @@ int sh_pfc_register_pinctrl(struct sh_pfc *pfc)
pmx->pctl_desc.npins = pfc->info->nr_pins;
pmx->pctl = devm_pinctrl_register(pfc->dev, &pmx->pctl_desc, pmx);
- if (IS_ERR(pmx->pctl))
- return PTR_ERR(pmx->pctl);
-
- return 0;
+ return PTR_ERR_OR_ZERO(pmx->pctl);
}
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index 656ea32f7..5e966c094 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -13,6 +13,7 @@
#include <linux/bug.h>
#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/spinlock.h>
#include <linux/stringify.h>
enum {
@@ -182,16 +183,38 @@ struct pinmux_range {
u16 force;
};
-struct sh_pfc;
+struct sh_pfc_window {
+ phys_addr_t phys;
+ void __iomem *virt;
+ unsigned long size;
+};
+
+struct sh_pfc_pin_range;
+
+struct sh_pfc {
+ struct device *dev;
+ const struct sh_pfc_soc_info *info;
+ spinlock_t lock;
+
+ unsigned int num_windows;
+ struct sh_pfc_window *windows;
+ unsigned int num_irqs;
+ unsigned int *irqs;
+
+ struct sh_pfc_pin_range *ranges;
+ unsigned int nr_ranges;
+
+ unsigned int nr_gpio_pins;
+
+ struct sh_pfc_chip *gpio;
+};
struct sh_pfc_soc_operations {
int (*init)(struct sh_pfc *pfc);
unsigned int (*get_bias)(struct sh_pfc *pfc, unsigned int pin);
void (*set_bias)(struct sh_pfc *pfc, unsigned int pin,
unsigned int bias);
- int (*get_io_voltage)(struct sh_pfc *pfc, unsigned int pin);
- int (*set_io_voltage)(struct sh_pfc *pfc, unsigned int pin,
- u16 voltage_mV);
+ int (*pin_to_pocctrl)(struct sh_pfc *pfc, unsigned int pin, u32 *pocctrl);
};
struct sh_pfc_soc_info {
@@ -227,6 +250,30 @@ struct sh_pfc_soc_info {
u32 unlock_reg;
};
+extern const struct sh_pfc_soc_info emev2_pinmux_info;
+extern const struct sh_pfc_soc_info r8a73a4_pinmux_info;
+extern const struct sh_pfc_soc_info r8a7740_pinmux_info;
+extern const struct sh_pfc_soc_info r8a7778_pinmux_info;
+extern const struct sh_pfc_soc_info r8a7779_pinmux_info;
+extern const struct sh_pfc_soc_info r8a7790_pinmux_info;
+extern const struct sh_pfc_soc_info r8a7791_pinmux_info;
+extern const struct sh_pfc_soc_info r8a7793_pinmux_info;
+extern const struct sh_pfc_soc_info r8a7794_pinmux_info;
+extern const struct sh_pfc_soc_info r8a7795_pinmux_info;
+extern const struct sh_pfc_soc_info sh7203_pinmux_info;
+extern const struct sh_pfc_soc_info sh7264_pinmux_info;
+extern const struct sh_pfc_soc_info sh7269_pinmux_info;
+extern const struct sh_pfc_soc_info sh73a0_pinmux_info;
+extern const struct sh_pfc_soc_info sh7720_pinmux_info;
+extern const struct sh_pfc_soc_info sh7722_pinmux_info;
+extern const struct sh_pfc_soc_info sh7723_pinmux_info;
+extern const struct sh_pfc_soc_info sh7724_pinmux_info;
+extern const struct sh_pfc_soc_info sh7734_pinmux_info;
+extern const struct sh_pfc_soc_info sh7757_pinmux_info;
+extern const struct sh_pfc_soc_info sh7785_pinmux_info;
+extern const struct sh_pfc_soc_info sh7786_pinmux_info;
+extern const struct sh_pfc_soc_info shx3_pinmux_info;
+
/* -----------------------------------------------------------------------------
* Helper macros to create pin and port lists
*/
diff --git a/drivers/pinctrl/sirf/pinctrl-atlas7.c b/drivers/pinctrl/sirf/pinctrl-atlas7.c
index 168c0f5d4..19952f73f 100644
--- a/drivers/pinctrl/sirf/pinctrl-atlas7.c
+++ b/drivers/pinctrl/sirf/pinctrl-atlas7.c
@@ -5424,8 +5424,10 @@ static int atlas7_pinmux_probe(struct platform_device *pdev)
if (ret)
return ret;
pmx->sys2pci_base = devm_ioremap_resource(&pdev->dev, &res);
- if (IS_ERR(pmx->sys2pci_base))
+ if (IS_ERR(pmx->sys2pci_base)) {
+ of_node_put(sys2pci_np);
return -ENOMEM;
+ }
pmx->dev = &pdev->dev;
diff --git a/drivers/pinctrl/stm32/Kconfig b/drivers/pinctrl/stm32/Kconfig
index 0f28841b2..4c40dae38 100644
--- a/drivers/pinctrl/stm32/Kconfig
+++ b/drivers/pinctrl/stm32/Kconfig
@@ -13,4 +13,10 @@ config PINCTRL_STM32F429
default MACH_STM32F429
select PINCTRL_STM32
+config PINCTRL_STM32F746
+ bool "STMicroelectronics STM32F746 pin control" if COMPILE_TEST && !MACH_STM32F746
+ depends on OF
+ default MACH_STM32F746
+ select PINCTRL_STM32
+
endif
diff --git a/drivers/pinctrl/stm32/Makefile b/drivers/pinctrl/stm32/Makefile
index fc17d4238..4a1ee7484 100644
--- a/drivers/pinctrl/stm32/Makefile
+++ b/drivers/pinctrl/stm32/Makefile
@@ -3,3 +3,4 @@ obj-$(CONFIG_PINCTRL_STM32) += pinctrl-stm32.o
# SoC Drivers
obj-$(CONFIG_PINCTRL_STM32F429) += pinctrl-stm32f429.o
+obj-$(CONFIG_PINCTRL_STM32F746) += pinctrl-stm32f746.o
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index ae9fab82a..4ae596bf1 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -638,8 +638,8 @@ static u32 stm32_pconf_get_bias(struct stm32_gpio_bank *bank,
return (val >> (offset * 2));
}
-static bool stm32_pconf_input_get(struct stm32_gpio_bank *bank,
- unsigned int offset)
+static bool stm32_pconf_get(struct stm32_gpio_bank *bank,
+ unsigned int offset, bool dir)
{
unsigned long flags;
u32 val;
@@ -647,23 +647,12 @@ static bool stm32_pconf_input_get(struct stm32_gpio_bank *bank,
clk_enable(bank->clk);
spin_lock_irqsave(&bank->lock, flags);
- val = !!(readl_relaxed(bank->base + STM32_GPIO_IDR) & BIT(offset));
-
- spin_unlock_irqrestore(&bank->lock, flags);
- clk_disable(bank->clk);
-
- return val;
-}
-
-static bool stm32_pconf_output_get(struct stm32_gpio_bank *bank,
- unsigned int offset)
-{
- unsigned long flags;
- u32 val;
-
- clk_enable(bank->clk);
- spin_lock_irqsave(&bank->lock, flags);
- val = !!(readl_relaxed(bank->base + STM32_GPIO_ODR) & BIT(offset));
+ if (dir)
+ val = !!(readl_relaxed(bank->base + STM32_GPIO_IDR) &
+ BIT(offset));
+ else
+ val = !!(readl_relaxed(bank->base + STM32_GPIO_ODR) &
+ BIT(offset));
spin_unlock_irqrestore(&bank->lock, flags);
clk_disable(bank->clk);
@@ -772,7 +761,7 @@ static void stm32_pconf_dbg_show(struct pinctrl_dev *pctldev,
switch (mode) {
/* input */
case 0:
- val = stm32_pconf_input_get(bank, offset);
+ val = stm32_pconf_get(bank, offset, true);
seq_printf(s, "- %s - %s",
val ? "high" : "low",
biasing[bias]);
@@ -782,7 +771,7 @@ static void stm32_pconf_dbg_show(struct pinctrl_dev *pctldev,
case 1:
drive = stm32_pconf_get_driving(bank, offset);
speed = stm32_pconf_get_speed(bank, offset);
- val = stm32_pconf_output_get(bank, offset);
+ val = stm32_pconf_get(bank, offset, false);
seq_printf(s, "- %s - %s - %s - %s %s",
val ? "high" : "low",
drive ? "open drain" : "push pull",
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32f746.c b/drivers/pinctrl/stm32/pinctrl-stm32f746.c
new file mode 100644
index 000000000..c0b4462ce
--- /dev/null
+++ b/drivers/pinctrl/stm32/pinctrl-stm32f746.c
@@ -0,0 +1,1681 @@
+/*
+ * Copyright (C) Maxime Coquelin 2015
+ * Author: Maxime Coquelin <mcoquelin.stm32@gmail.com>
+ * License terms: GNU General Public License (GPL), version 2
+ */
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-stm32.h"
+
+static const struct stm32_desc_pin stm32f746_pins[] = {
+ STM32_PIN(
+ PINCTRL_PIN(0, "PA0"),
+ STM32_FUNCTION(0, "GPIOA0"),
+ STM32_FUNCTION(2, "TIM2_CH1 TIM2_ETR"),
+ STM32_FUNCTION(3, "TIM5_CH1"),
+ STM32_FUNCTION(4, "TIM8_ETR"),
+ STM32_FUNCTION(8, "USART2_CTS"),
+ STM32_FUNCTION(9, "UART4_TX"),
+ STM32_FUNCTION(11, "SAI2_SD_B"),
+ STM32_FUNCTION(12, "ETH_MII_CRS"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(1, "PA1"),
+ STM32_FUNCTION(0, "GPIOA1"),
+ STM32_FUNCTION(2, "TIM2_CH2"),
+ STM32_FUNCTION(3, "TIM5_CH2"),
+ STM32_FUNCTION(8, "USART2_RTS"),
+ STM32_FUNCTION(9, "UART4_RX"),
+ STM32_FUNCTION(10, "QUADSPI_BK1_IO3"),
+ STM32_FUNCTION(11, "SAI2_MCLK_B"),
+ STM32_FUNCTION(12, "ETH_MII_RX_CLK ETH_RMII_REF_CLK"),
+ STM32_FUNCTION(15, "LCD_R2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(2, "PA2"),
+ STM32_FUNCTION(0, "GPIOA2"),
+ STM32_FUNCTION(2, "TIM2_CH3"),
+ STM32_FUNCTION(3, "TIM5_CH3"),
+ STM32_FUNCTION(4, "TIM9_CH1"),
+ STM32_FUNCTION(8, "USART2_TX"),
+ STM32_FUNCTION(9, "SAI2_SCK_B"),
+ STM32_FUNCTION(12, "ETH_MDIO"),
+ STM32_FUNCTION(15, "LCD_R1"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(3, "PA3"),
+ STM32_FUNCTION(0, "GPIOA3"),
+ STM32_FUNCTION(2, "TIM2_CH4"),
+ STM32_FUNCTION(3, "TIM5_CH4"),
+ STM32_FUNCTION(4, "TIM9_CH2"),
+ STM32_FUNCTION(8, "USART2_RX"),
+ STM32_FUNCTION(11, "OTG_HS_ULPI_D0"),
+ STM32_FUNCTION(12, "ETH_MII_COL"),
+ STM32_FUNCTION(15, "LCD_B5"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(4, "PA4"),
+ STM32_FUNCTION(0, "GPIOA4"),
+ STM32_FUNCTION(6, "SPI1_NSS I2S1_WS"),
+ STM32_FUNCTION(7, "SPI3_NSS I2S3_WS"),
+ STM32_FUNCTION(8, "USART2_CK"),
+ STM32_FUNCTION(13, "OTG_HS_SOF"),
+ STM32_FUNCTION(14, "DCMI_HSYNC"),
+ STM32_FUNCTION(15, "LCD_VSYNC"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(5, "PA5"),
+ STM32_FUNCTION(0, "GPIOA5"),
+ STM32_FUNCTION(2, "TIM2_CH1 TIM2_ETR"),
+ STM32_FUNCTION(4, "TIM8_CH1N"),
+ STM32_FUNCTION(6, "SPI1_SCK I2S1_CK"),
+ STM32_FUNCTION(11, "OTG_HS_ULPI_CK"),
+ STM32_FUNCTION(15, "LCD_R4"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(6, "PA6"),
+ STM32_FUNCTION(0, "GPIOA6"),
+ STM32_FUNCTION(2, "TIM1_BKIN"),
+ STM32_FUNCTION(3, "TIM3_CH1"),
+ STM32_FUNCTION(4, "TIM8_BKIN"),
+ STM32_FUNCTION(6, "SPI1_MISO"),
+ STM32_FUNCTION(10, "TIM13_CH1"),
+ STM32_FUNCTION(14, "DCMI_PIXCLK"),
+ STM32_FUNCTION(15, "LCD_G2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(7, "PA7"),
+ STM32_FUNCTION(0, "GPIOA7"),
+ STM32_FUNCTION(2, "TIM1_CH1N"),
+ STM32_FUNCTION(3, "TIM3_CH2"),
+ STM32_FUNCTION(4, "TIM8_CH1N"),
+ STM32_FUNCTION(6, "SPI1_MOSI I2S1_SD"),
+ STM32_FUNCTION(10, "TIM14_CH1"),
+ STM32_FUNCTION(12, "ETH_MII_RX_DV ETH_RMII_CRS_DV"),
+ STM32_FUNCTION(13, "FMC_SDNWE"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(8, "PA8"),
+ STM32_FUNCTION(0, "GPIOA8"),
+ STM32_FUNCTION(1, "MCO1"),
+ STM32_FUNCTION(2, "TIM1_CH1"),
+ STM32_FUNCTION(4, "TIM8_BKIN2"),
+ STM32_FUNCTION(5, "I2C3_SCL"),
+ STM32_FUNCTION(8, "USART1_CK"),
+ STM32_FUNCTION(11, "OTG_FS_SOF"),
+ STM32_FUNCTION(15, "LCD_R6"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(9, "PA9"),
+ STM32_FUNCTION(0, "GPIOA9"),
+ STM32_FUNCTION(2, "TIM1_CH2"),
+ STM32_FUNCTION(5, "I2C3_SMBA"),
+ STM32_FUNCTION(6, "SPI2_SCK I2S2_CK"),
+ STM32_FUNCTION(8, "USART1_TX"),
+ STM32_FUNCTION(14, "DCMI_D0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(10, "PA10"),
+ STM32_FUNCTION(0, "GPIOA10"),
+ STM32_FUNCTION(2, "TIM1_CH3"),
+ STM32_FUNCTION(8, "USART1_RX"),
+ STM32_FUNCTION(11, "OTG_FS_ID"),
+ STM32_FUNCTION(14, "DCMI_D1"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(11, "PA11"),
+ STM32_FUNCTION(0, "GPIOA11"),
+ STM32_FUNCTION(2, "TIM1_CH4"),
+ STM32_FUNCTION(8, "USART1_CTS"),
+ STM32_FUNCTION(10, "CAN1_RX"),
+ STM32_FUNCTION(11, "OTG_FS_DM"),
+ STM32_FUNCTION(15, "LCD_R4"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(12, "PA12"),
+ STM32_FUNCTION(0, "GPIOA12"),
+ STM32_FUNCTION(2, "TIM1_ETR"),
+ STM32_FUNCTION(8, "USART1_RTS"),
+ STM32_FUNCTION(9, "SAI2_FS_B"),
+ STM32_FUNCTION(10, "CAN1_TX"),
+ STM32_FUNCTION(11, "OTG_FS_DP"),
+ STM32_FUNCTION(15, "LCD_R5"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(13, "PA13"),
+ STM32_FUNCTION(0, "GPIOA13"),
+ STM32_FUNCTION(1, "JTMS SWDIO"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(14, "PA14"),
+ STM32_FUNCTION(0, "GPIOA14"),
+ STM32_FUNCTION(1, "JTCK SWCLK"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(15, "PA15"),
+ STM32_FUNCTION(0, "GPIOA15"),
+ STM32_FUNCTION(1, "JTDI"),
+ STM32_FUNCTION(2, "TIM2_CH1 TIM2_ETR"),
+ STM32_FUNCTION(5, "HDMI_CEC"),
+ STM32_FUNCTION(6, "SPI1_NSS I2S1_WS"),
+ STM32_FUNCTION(7, "SPI3_NSS I2S3_WS"),
+ STM32_FUNCTION(9, "UART4_RTS"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(16, "PB0"),
+ STM32_FUNCTION(0, "GPIOB0"),
+ STM32_FUNCTION(2, "TIM1_CH2N"),
+ STM32_FUNCTION(3, "TIM3_CH3"),
+ STM32_FUNCTION(4, "TIM8_CH2N"),
+ STM32_FUNCTION(9, "UART4_CTS"),
+ STM32_FUNCTION(10, "LCD_R3"),
+ STM32_FUNCTION(11, "OTG_HS_ULPI_D1"),
+ STM32_FUNCTION(12, "ETH_MII_RXD2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(17, "PB1"),
+ STM32_FUNCTION(0, "GPIOB1"),
+ STM32_FUNCTION(2, "TIM1_CH3N"),
+ STM32_FUNCTION(3, "TIM3_CH4"),
+ STM32_FUNCTION(4, "TIM8_CH3N"),
+ STM32_FUNCTION(10, "LCD_R6"),
+ STM32_FUNCTION(11, "OTG_HS_ULPI_D2"),
+ STM32_FUNCTION(12, "ETH_MII_RXD3"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(18, "PB2"),
+ STM32_FUNCTION(0, "GPIOB2"),
+ STM32_FUNCTION(7, "SAI1_SD_A"),
+ STM32_FUNCTION(8, "SPI3_MOSI I2S3_SD"),
+ STM32_FUNCTION(10, "QUADSPI_CLK"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(19, "PB3"),
+ STM32_FUNCTION(0, "GPIOB3"),
+ STM32_FUNCTION(1, "JTDO TRACESWO"),
+ STM32_FUNCTION(2, "TIM2_CH2"),
+ STM32_FUNCTION(6, "SPI1_SCK I2S1_CK"),
+ STM32_FUNCTION(7, "SPI3_SCK I2S3_CK"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(20, "PB4"),
+ STM32_FUNCTION(0, "GPIOB4"),
+ STM32_FUNCTION(1, "NJTRST"),
+ STM32_FUNCTION(3, "TIM3_CH1"),
+ STM32_FUNCTION(6, "SPI1_MISO"),
+ STM32_FUNCTION(7, "SPI3_MISO"),
+ STM32_FUNCTION(8, "SPI2_NSS I2S2_WS"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(21, "PB5"),
+ STM32_FUNCTION(0, "GPIOB5"),
+ STM32_FUNCTION(3, "TIM3_CH2"),
+ STM32_FUNCTION(5, "I2C1_SMBA"),
+ STM32_FUNCTION(6, "SPI1_MOSI I2S1_SD"),
+ STM32_FUNCTION(7, "SPI3_MOSI I2S3_SD"),
+ STM32_FUNCTION(10, "CAN2_RX"),
+ STM32_FUNCTION(11, "OTG_HS_ULPI_D7"),
+ STM32_FUNCTION(12, "ETH_PPS_OUT"),
+ STM32_FUNCTION(13, "FMC_SDCKE1"),
+ STM32_FUNCTION(14, "DCMI_D10"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(22, "PB6"),
+ STM32_FUNCTION(0, "GPIOB6"),
+ STM32_FUNCTION(3, "TIM4_CH1"),
+ STM32_FUNCTION(4, "HDMI_CEC"),
+ STM32_FUNCTION(5, "I2C1_SCL"),
+ STM32_FUNCTION(8, "USART1_TX"),
+ STM32_FUNCTION(10, "CAN2_TX"),
+ STM32_FUNCTION(11, "QUADSPI_BK1_NCS"),
+ STM32_FUNCTION(13, "FMC_SDNE1"),
+ STM32_FUNCTION(14, "DCMI_D5"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(23, "PB7"),
+ STM32_FUNCTION(0, "GPIOB7"),
+ STM32_FUNCTION(3, "TIM4_CH2"),
+ STM32_FUNCTION(5, "I2C1_SDA"),
+ STM32_FUNCTION(8, "USART1_RX"),
+ STM32_FUNCTION(13, "FMC_NL"),
+ STM32_FUNCTION(14, "DCMI_VSYNC"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(24, "PB8"),
+ STM32_FUNCTION(0, "GPIOB8"),
+ STM32_FUNCTION(3, "TIM4_CH3"),
+ STM32_FUNCTION(4, "TIM10_CH1"),
+ STM32_FUNCTION(5, "I2C1_SCL"),
+ STM32_FUNCTION(10, "CAN1_RX"),
+ STM32_FUNCTION(12, "ETH_MII_TXD3"),
+ STM32_FUNCTION(13, "SDMMC1_D4"),
+ STM32_FUNCTION(14, "DCMI_D6"),
+ STM32_FUNCTION(15, "LCD_B6"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(25, "PB9"),
+ STM32_FUNCTION(0, "GPIOB9"),
+ STM32_FUNCTION(3, "TIM4_CH4"),
+ STM32_FUNCTION(4, "TIM11_CH1"),
+ STM32_FUNCTION(5, "I2C1_SDA"),
+ STM32_FUNCTION(6, "SPI2_NSS I2S2_WS"),
+ STM32_FUNCTION(10, "CAN1_TX"),
+ STM32_FUNCTION(13, "SDMMC1_D5"),
+ STM32_FUNCTION(14, "DCMI_D7"),
+ STM32_FUNCTION(15, "LCD_B7"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(26, "PB10"),
+ STM32_FUNCTION(0, "GPIOB10"),
+ STM32_FUNCTION(2, "TIM2_CH3"),
+ STM32_FUNCTION(5, "I2C2_SCL"),
+ STM32_FUNCTION(6, "SPI2_SCK I2S2_CK"),
+ STM32_FUNCTION(8, "USART3_TX"),
+ STM32_FUNCTION(11, "OTG_HS_ULPI_D3"),
+ STM32_FUNCTION(12, "ETH_MII_RX_ER"),
+ STM32_FUNCTION(15, "LCD_G4"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(27, "PB11"),
+ STM32_FUNCTION(0, "GPIOB11"),
+ STM32_FUNCTION(2, "TIM2_CH4"),
+ STM32_FUNCTION(5, "I2C2_SDA"),
+ STM32_FUNCTION(8, "USART3_RX"),
+ STM32_FUNCTION(11, "OTG_HS_ULPI_D4"),
+ STM32_FUNCTION(12, "ETH_MII_TX_EN ETH_RMII_TX_EN"),
+ STM32_FUNCTION(15, "LCD_G5"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(28, "PB12"),
+ STM32_FUNCTION(0, "GPIOB12"),
+ STM32_FUNCTION(2, "TIM1_BKIN"),
+ STM32_FUNCTION(5, "I2C2_SMBA"),
+ STM32_FUNCTION(6, "SPI2_NSS I2S2_WS"),
+ STM32_FUNCTION(8, "USART3_CK"),
+ STM32_FUNCTION(10, "CAN2_RX"),
+ STM32_FUNCTION(11, "OTG_HS_ULPI_D5"),
+ STM32_FUNCTION(12, "ETH_MII_TXD0 ETH_RMII_TXD0"),
+ STM32_FUNCTION(13, "OTG_HS_ID"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(29, "PB13"),
+ STM32_FUNCTION(0, "GPIOB13"),
+ STM32_FUNCTION(2, "TIM1_CH1N"),
+ STM32_FUNCTION(6, "SPI2_SCK I2S2_CK"),
+ STM32_FUNCTION(8, "USART3_CTS"),
+ STM32_FUNCTION(10, "CAN2_TX"),
+ STM32_FUNCTION(11, "OTG_HS_ULPI_D6"),
+ STM32_FUNCTION(12, "ETH_MII_TXD1 ETH_RMII_TXD1"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(30, "PB14"),
+ STM32_FUNCTION(0, "GPIOB14"),
+ STM32_FUNCTION(2, "TIM1_CH2N"),
+ STM32_FUNCTION(4, "TIM8_CH2N"),
+ STM32_FUNCTION(6, "SPI2_MISO"),
+ STM32_FUNCTION(8, "USART3_RTS"),
+ STM32_FUNCTION(10, "TIM12_CH1"),
+ STM32_FUNCTION(13, "OTG_HS_DM"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(31, "PB15"),
+ STM32_FUNCTION(0, "GPIOB15"),
+ STM32_FUNCTION(1, "RTC_REFIN"),
+ STM32_FUNCTION(2, "TIM1_CH3N"),
+ STM32_FUNCTION(4, "TIM8_CH3N"),
+ STM32_FUNCTION(6, "SPI2_MOSI I2S2_SD"),
+ STM32_FUNCTION(10, "TIM12_CH2"),
+ STM32_FUNCTION(13, "OTG_HS_DP"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(32, "PC0"),
+ STM32_FUNCTION(0, "GPIOC0"),
+ STM32_FUNCTION(9, "SAI2_FS_B"),
+ STM32_FUNCTION(11, "OTG_HS_ULPI_STP"),
+ STM32_FUNCTION(13, "FMC_SDNWE"),
+ STM32_FUNCTION(15, "LCD_R5"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(33, "PC1"),
+ STM32_FUNCTION(0, "GPIOC1"),
+ STM32_FUNCTION(1, "TRACED0"),
+ STM32_FUNCTION(6, "SPI2_MOSI I2S2_SD"),
+ STM32_FUNCTION(7, "SAI1_SD_A"),
+ STM32_FUNCTION(12, "ETH_MDC"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(34, "PC2"),
+ STM32_FUNCTION(0, "GPIOC2"),
+ STM32_FUNCTION(6, "SPI2_MISO"),
+ STM32_FUNCTION(11, "OTG_HS_ULPI_DIR"),
+ STM32_FUNCTION(12, "ETH_MII_TXD2"),
+ STM32_FUNCTION(13, "FMC_SDNE0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(35, "PC3"),
+ STM32_FUNCTION(0, "GPIOC3"),
+ STM32_FUNCTION(6, "SPI2_MOSI I2S2_SD"),
+ STM32_FUNCTION(11, "OTG_HS_ULPI_NXT"),
+ STM32_FUNCTION(12, "ETH_MII_TX_CLK"),
+ STM32_FUNCTION(13, "FMC_SDCKE0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(36, "PC4"),
+ STM32_FUNCTION(0, "GPIOC4"),
+ STM32_FUNCTION(6, "I2S1_MCK"),
+ STM32_FUNCTION(9, "SPDIFRX_IN2"),
+ STM32_FUNCTION(12, "ETH_MII_RXD0 ETH_RMII_RXD0"),
+ STM32_FUNCTION(13, "FMC_SDNE0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(37, "PC5"),
+ STM32_FUNCTION(0, "GPIOC5"),
+ STM32_FUNCTION(9, "SPDIFRX_IN3"),
+ STM32_FUNCTION(12, "ETH_MII_RXD1 ETH_RMII_RXD1"),
+ STM32_FUNCTION(13, "FMC_SDCKE0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(38, "PC6"),
+ STM32_FUNCTION(0, "GPIOC6"),
+ STM32_FUNCTION(3, "TIM3_CH1"),
+ STM32_FUNCTION(4, "TIM8_CH1"),
+ STM32_FUNCTION(6, "I2S2_MCK"),
+ STM32_FUNCTION(9, "USART6_TX"),
+ STM32_FUNCTION(13, "SDMMC1_D6"),
+ STM32_FUNCTION(14, "DCMI_D0"),
+ STM32_FUNCTION(15, "LCD_HSYNC"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(39, "PC7"),
+ STM32_FUNCTION(0, "GPIOC7"),
+ STM32_FUNCTION(3, "TIM3_CH2"),
+ STM32_FUNCTION(4, "TIM8_CH2"),
+ STM32_FUNCTION(7, "I2S3_MCK"),
+ STM32_FUNCTION(9, "USART6_RX"),
+ STM32_FUNCTION(13, "SDMMC1_D7"),
+ STM32_FUNCTION(14, "DCMI_D1"),
+ STM32_FUNCTION(15, "LCD_G6"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(40, "PC8"),
+ STM32_FUNCTION(0, "GPIOC8"),
+ STM32_FUNCTION(1, "TRACED1"),
+ STM32_FUNCTION(3, "TIM3_CH3"),
+ STM32_FUNCTION(4, "TIM8_CH3"),
+ STM32_FUNCTION(8, "UART5_RTS"),
+ STM32_FUNCTION(9, "USART6_CK"),
+ STM32_FUNCTION(13, "SDMMC1_D0"),
+ STM32_FUNCTION(14, "DCMI_D2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(41, "PC9"),
+ STM32_FUNCTION(0, "GPIOC9"),
+ STM32_FUNCTION(1, "MCO2"),
+ STM32_FUNCTION(3, "TIM3_CH4"),
+ STM32_FUNCTION(4, "TIM8_CH4"),
+ STM32_FUNCTION(5, "I2C3_SDA"),
+ STM32_FUNCTION(6, "I2S_CKIN"),
+ STM32_FUNCTION(8, "UART5_CTS"),
+ STM32_FUNCTION(10, "QUADSPI_BK1_IO0"),
+ STM32_FUNCTION(13, "SDMMC1_D1"),
+ STM32_FUNCTION(14, "DCMI_D3"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(42, "PC10"),
+ STM32_FUNCTION(0, "GPIOC10"),
+ STM32_FUNCTION(7, "SPI3_SCK I2S3_CK"),
+ STM32_FUNCTION(8, "USART3_TX"),
+ STM32_FUNCTION(9, "UART4_TX"),
+ STM32_FUNCTION(10, "QUADSPI_BK1_IO1"),
+ STM32_FUNCTION(13, "SDMMC1_D2"),
+ STM32_FUNCTION(14, "DCMI_D8"),
+ STM32_FUNCTION(15, "LCD_R2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(43, "PC11"),
+ STM32_FUNCTION(0, "GPIOC11"),
+ STM32_FUNCTION(7, "SPI3_MISO"),
+ STM32_FUNCTION(8, "USART3_RX"),
+ STM32_FUNCTION(9, "UART4_RX"),
+ STM32_FUNCTION(10, "QUADSPI_BK2_NCS"),
+ STM32_FUNCTION(13, "SDMMC1_D3"),
+ STM32_FUNCTION(14, "DCMI_D4"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(44, "PC12"),
+ STM32_FUNCTION(0, "GPIOC12"),
+ STM32_FUNCTION(1, "TRACED3"),
+ STM32_FUNCTION(7, "SPI3_MOSI I2S3_SD"),
+ STM32_FUNCTION(8, "USART3_CK"),
+ STM32_FUNCTION(9, "UART5_TX"),
+ STM32_FUNCTION(13, "SDMMC1_CK"),
+ STM32_FUNCTION(14, "DCMI_D9"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(45, "PC13"),
+ STM32_FUNCTION(0, "GPIOC13"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(46, "PC14"),
+ STM32_FUNCTION(0, "GPIOC14"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(47, "PC15"),
+ STM32_FUNCTION(0, "GPIOC15"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(48, "PD0"),
+ STM32_FUNCTION(0, "GPIOD0"),
+ STM32_FUNCTION(10, "CAN1_RX"),
+ STM32_FUNCTION(13, "FMC_D2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(49, "PD1"),
+ STM32_FUNCTION(0, "GPIOD1"),
+ STM32_FUNCTION(10, "CAN1_TX"),
+ STM32_FUNCTION(13, "FMC_D3"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(50, "PD2"),
+ STM32_FUNCTION(0, "GPIOD2"),
+ STM32_FUNCTION(1, "TRACED2"),
+ STM32_FUNCTION(3, "TIM3_ETR"),
+ STM32_FUNCTION(9, "UART5_RX"),
+ STM32_FUNCTION(13, "SDMMC1_CMD"),
+ STM32_FUNCTION(14, "DCMI_D11"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(51, "PD3"),
+ STM32_FUNCTION(0, "GPIOD3"),
+ STM32_FUNCTION(6, "SPI2_SCK I2S2_CK"),
+ STM32_FUNCTION(8, "USART2_CTS"),
+ STM32_FUNCTION(13, "FMC_CLK"),
+ STM32_FUNCTION(14, "DCMI_D5"),
+ STM32_FUNCTION(15, "LCD_G7"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(52, "PD4"),
+ STM32_FUNCTION(0, "GPIOD4"),
+ STM32_FUNCTION(8, "USART2_RTS"),
+ STM32_FUNCTION(13, "FMC_NOE"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(53, "PD5"),
+ STM32_FUNCTION(0, "GPIOD5"),
+ STM32_FUNCTION(8, "USART2_TX"),
+ STM32_FUNCTION(13, "FMC_NWE"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(54, "PD6"),
+ STM32_FUNCTION(0, "GPIOD6"),
+ STM32_FUNCTION(6, "SPI3_MOSI I2S3_SD"),
+ STM32_FUNCTION(7, "SAI1_SD_A"),
+ STM32_FUNCTION(8, "USART2_RX"),
+ STM32_FUNCTION(13, "FMC_NWAIT"),
+ STM32_FUNCTION(14, "DCMI_D10"),
+ STM32_FUNCTION(15, "LCD_B2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(55, "PD7"),
+ STM32_FUNCTION(0, "GPIOD7"),
+ STM32_FUNCTION(8, "USART2_CK"),
+ STM32_FUNCTION(9, "SPDIFRX_IN0"),
+ STM32_FUNCTION(13, "FMC_NE1"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(56, "PD8"),
+ STM32_FUNCTION(0, "GPIOD8"),
+ STM32_FUNCTION(8, "USART3_TX"),
+ STM32_FUNCTION(9, "SPDIFRX_IN1"),
+ STM32_FUNCTION(13, "FMC_D13"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(57, "PD9"),
+ STM32_FUNCTION(0, "GPIOD9"),
+ STM32_FUNCTION(8, "USART3_RX"),
+ STM32_FUNCTION(13, "FMC_D14"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(58, "PD10"),
+ STM32_FUNCTION(0, "GPIOD10"),
+ STM32_FUNCTION(8, "USART3_CK"),
+ STM32_FUNCTION(13, "FMC_D15"),
+ STM32_FUNCTION(15, "LCD_B3"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(59, "PD11"),
+ STM32_FUNCTION(0, "GPIOD11"),
+ STM32_FUNCTION(5, "I2C4_SMBA"),
+ STM32_FUNCTION(8, "USART3_CTS"),
+ STM32_FUNCTION(10, "QUADSPI_BK1_IO0"),
+ STM32_FUNCTION(11, "SAI2_SD_A"),
+ STM32_FUNCTION(13, "FMC_A16 FMC_CLE"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(60, "PD12"),
+ STM32_FUNCTION(0, "GPIOD12"),
+ STM32_FUNCTION(3, "TIM4_CH1"),
+ STM32_FUNCTION(4, "LPTIM1_IN1"),
+ STM32_FUNCTION(5, "I2C4_SCL"),
+ STM32_FUNCTION(8, "USART3_RTS"),
+ STM32_FUNCTION(10, "QUADSPI_BK1_IO1"),
+ STM32_FUNCTION(11, "SAI2_FS_A"),
+ STM32_FUNCTION(13, "FMC_A17 FMC_ALE"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(61, "PD13"),
+ STM32_FUNCTION(0, "GPIOD13"),
+ STM32_FUNCTION(3, "TIM4_CH2"),
+ STM32_FUNCTION(4, "LPTIM1_OUT"),
+ STM32_FUNCTION(5, "I2C4_SDA"),
+ STM32_FUNCTION(10, "QUADSPI_BK1_IO3"),
+ STM32_FUNCTION(11, "SAI2_SCK_A"),
+ STM32_FUNCTION(13, "FMC_A18"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(62, "PD14"),
+ STM32_FUNCTION(0, "GPIOD14"),
+ STM32_FUNCTION(3, "TIM4_CH3"),
+ STM32_FUNCTION(9, "UART8_CTS"),
+ STM32_FUNCTION(13, "FMC_D0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(63, "PD15"),
+ STM32_FUNCTION(0, "GPIOD15"),
+ STM32_FUNCTION(3, "TIM4_CH4"),
+ STM32_FUNCTION(9, "UART8_RTS"),
+ STM32_FUNCTION(13, "FMC_D1"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(64, "PE0"),
+ STM32_FUNCTION(0, "GPIOE0"),
+ STM32_FUNCTION(3, "TIM4_ETR"),
+ STM32_FUNCTION(4, "LPTIM1_ETR"),
+ STM32_FUNCTION(9, "UART8_RX"),
+ STM32_FUNCTION(11, "SAI2_MCLK_A"),
+ STM32_FUNCTION(13, "FMC_NBL0"),
+ STM32_FUNCTION(14, "DCMI_D2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(65, "PE1"),
+ STM32_FUNCTION(0, "GPIOE1"),
+ STM32_FUNCTION(4, "LPTIM1_IN2"),
+ STM32_FUNCTION(9, "UART8_TX"),
+ STM32_FUNCTION(13, "FMC_NBL1"),
+ STM32_FUNCTION(14, "DCMI_D3"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(66, "PE2"),
+ STM32_FUNCTION(0, "GPIOE2"),
+ STM32_FUNCTION(1, "TRACECLK"),
+ STM32_FUNCTION(6, "SPI4_SCK"),
+ STM32_FUNCTION(7, "SAI1_MCLK_A"),
+ STM32_FUNCTION(10, "QUADSPI_BK1_IO2"),
+ STM32_FUNCTION(12, "ETH_MII_TXD3"),
+ STM32_FUNCTION(13, "FMC_A23"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(67, "PE3"),
+ STM32_FUNCTION(0, "GPIOE3"),
+ STM32_FUNCTION(1, "TRACED0"),
+ STM32_FUNCTION(7, "SAI1_SD_B"),
+ STM32_FUNCTION(13, "FMC_A19"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(68, "PE4"),
+ STM32_FUNCTION(0, "GPIOE4"),
+ STM32_FUNCTION(1, "TRACED1"),
+ STM32_FUNCTION(6, "SPI4_NSS"),
+ STM32_FUNCTION(7, "SAI1_FS_A"),
+ STM32_FUNCTION(13, "FMC_A20"),
+ STM32_FUNCTION(14, "DCMI_D4"),
+ STM32_FUNCTION(15, "LCD_B0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(69, "PE5"),
+ STM32_FUNCTION(0, "GPIOE5"),
+ STM32_FUNCTION(1, "TRACED2"),
+ STM32_FUNCTION(4, "TIM9_CH1"),
+ STM32_FUNCTION(6, "SPI4_MISO"),
+ STM32_FUNCTION(7, "SAI1_SCK_A"),
+ STM32_FUNCTION(13, "FMC_A21"),
+ STM32_FUNCTION(14, "DCMI_D6"),
+ STM32_FUNCTION(15, "LCD_G0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(70, "PE6"),
+ STM32_FUNCTION(0, "GPIOE6"),
+ STM32_FUNCTION(1, "TRACED3"),
+ STM32_FUNCTION(2, "TIM1_BKIN2"),
+ STM32_FUNCTION(4, "TIM9_CH2"),
+ STM32_FUNCTION(6, "SPI4_MOSI"),
+ STM32_FUNCTION(7, "SAI1_SD_A"),
+ STM32_FUNCTION(11, "SAI2_MCLK_B"),
+ STM32_FUNCTION(13, "FMC_A22"),
+ STM32_FUNCTION(14, "DCMI_D7"),
+ STM32_FUNCTION(15, "LCD_G1"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(71, "PE7"),
+ STM32_FUNCTION(0, "GPIOE7"),
+ STM32_FUNCTION(2, "TIM1_ETR"),
+ STM32_FUNCTION(9, "UART7_RX"),
+ STM32_FUNCTION(11, "QUADSPI_BK2_IO0"),
+ STM32_FUNCTION(13, "FMC_D4"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(72, "PE8"),
+ STM32_FUNCTION(0, "GPIOE8"),
+ STM32_FUNCTION(2, "TIM1_CH1N"),
+ STM32_FUNCTION(9, "UART7_TX"),
+ STM32_FUNCTION(11, "QUADSPI_BK2_IO1"),
+ STM32_FUNCTION(13, "FMC_D5"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(73, "PE9"),
+ STM32_FUNCTION(0, "GPIOE9"),
+ STM32_FUNCTION(2, "TIM1_CH1"),
+ STM32_FUNCTION(9, "UART7_RTS"),
+ STM32_FUNCTION(11, "QUADSPI_BK2_IO2"),
+ STM32_FUNCTION(13, "FMC_D6"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(74, "PE10"),
+ STM32_FUNCTION(0, "GPIOE10"),
+ STM32_FUNCTION(2, "TIM1_CH2N"),
+ STM32_FUNCTION(9, "UART7_CTS"),
+ STM32_FUNCTION(11, "QUADSPI_BK2_IO3"),
+ STM32_FUNCTION(13, "FMC_D7"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(75, "PE11"),
+ STM32_FUNCTION(0, "GPIOE11"),
+ STM32_FUNCTION(2, "TIM1_CH2"),
+ STM32_FUNCTION(6, "SPI4_NSS"),
+ STM32_FUNCTION(11, "SAI2_SD_B"),
+ STM32_FUNCTION(13, "FMC_D8"),
+ STM32_FUNCTION(15, "LCD_G3"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(76, "PE12"),
+ STM32_FUNCTION(0, "GPIOE12"),
+ STM32_FUNCTION(2, "TIM1_CH3N"),
+ STM32_FUNCTION(6, "SPI4_SCK"),
+ STM32_FUNCTION(11, "SAI2_SCK_B"),
+ STM32_FUNCTION(13, "FMC_D9"),
+ STM32_FUNCTION(15, "LCD_B4"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(77, "PE13"),
+ STM32_FUNCTION(0, "GPIOE13"),
+ STM32_FUNCTION(2, "TIM1_CH3"),
+ STM32_FUNCTION(6, "SPI4_MISO"),
+ STM32_FUNCTION(11, "SAI2_FS_B"),
+ STM32_FUNCTION(13, "FMC_D10"),
+ STM32_FUNCTION(15, "LCD_DE"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(78, "PE14"),
+ STM32_FUNCTION(0, "GPIOE14"),
+ STM32_FUNCTION(2, "TIM1_CH4"),
+ STM32_FUNCTION(6, "SPI4_MOSI"),
+ STM32_FUNCTION(11, "SAI2_MCLK_B"),
+ STM32_FUNCTION(13, "FMC_D11"),
+ STM32_FUNCTION(15, "LCD_CLK"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(79, "PE15"),
+ STM32_FUNCTION(0, "GPIOE15"),
+ STM32_FUNCTION(2, "TIM1_BKIN"),
+ STM32_FUNCTION(13, "FMC_D12"),
+ STM32_FUNCTION(15, "LCD_R7"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(80, "PF0"),
+ STM32_FUNCTION(0, "GPIOF0"),
+ STM32_FUNCTION(5, "I2C2_SDA"),
+ STM32_FUNCTION(13, "FMC_A0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(81, "PF1"),
+ STM32_FUNCTION(0, "GPIOF1"),
+ STM32_FUNCTION(5, "I2C2_SCL"),
+ STM32_FUNCTION(13, "FMC_A1"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(82, "PF2"),
+ STM32_FUNCTION(0, "GPIOF2"),
+ STM32_FUNCTION(5, "I2C2_SMBA"),
+ STM32_FUNCTION(13, "FMC_A2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(83, "PF3"),
+ STM32_FUNCTION(0, "GPIOF3"),
+ STM32_FUNCTION(13, "FMC_A3"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(84, "PF4"),
+ STM32_FUNCTION(0, "GPIOF4"),
+ STM32_FUNCTION(13, "FMC_A4"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(85, "PF5"),
+ STM32_FUNCTION(0, "GPIOF5"),
+ STM32_FUNCTION(13, "FMC_A5"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(86, "PF6"),
+ STM32_FUNCTION(0, "GPIOF6"),
+ STM32_FUNCTION(4, "TIM10_CH1"),
+ STM32_FUNCTION(6, "SPI5_NSS"),
+ STM32_FUNCTION(7, "SAI1_SD_B"),
+ STM32_FUNCTION(9, "UART7_RX"),
+ STM32_FUNCTION(10, "QUADSPI_BK1_IO3"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(87, "PF7"),
+ STM32_FUNCTION(0, "GPIOF7"),
+ STM32_FUNCTION(4, "TIM11_CH1"),
+ STM32_FUNCTION(6, "SPI5_SCK"),
+ STM32_FUNCTION(7, "SAI1_MCLK_B"),
+ STM32_FUNCTION(9, "UART7_TX"),
+ STM32_FUNCTION(10, "QUADSPI_BK1_IO2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(88, "PF8"),
+ STM32_FUNCTION(0, "GPIOF8"),
+ STM32_FUNCTION(6, "SPI5_MISO"),
+ STM32_FUNCTION(7, "SAI1_SCK_B"),
+ STM32_FUNCTION(9, "UART7_RTS"),
+ STM32_FUNCTION(10, "TIM13_CH1"),
+ STM32_FUNCTION(11, "QUADSPI_BK1_IO0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(89, "PF9"),
+ STM32_FUNCTION(0, "GPIOF9"),
+ STM32_FUNCTION(6, "SPI5_MOSI"),
+ STM32_FUNCTION(7, "SAI1_FS_B"),
+ STM32_FUNCTION(9, "UART7_CTS"),
+ STM32_FUNCTION(10, "TIM14_CH1"),
+ STM32_FUNCTION(11, "QUADSPI_BK1_IO1"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(90, "PF10"),
+ STM32_FUNCTION(0, "GPIOF10"),
+ STM32_FUNCTION(14, "DCMI_D11"),
+ STM32_FUNCTION(15, "LCD_DE"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(91, "PF11"),
+ STM32_FUNCTION(0, "GPIOF11"),
+ STM32_FUNCTION(6, "SPI5_MOSI"),
+ STM32_FUNCTION(11, "SAI2_SD_B"),
+ STM32_FUNCTION(13, "FMC_SDNRAS"),
+ STM32_FUNCTION(14, "DCMI_D12"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(92, "PF12"),
+ STM32_FUNCTION(0, "GPIOF12"),
+ STM32_FUNCTION(13, "FMC_A6"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(93, "PF13"),
+ STM32_FUNCTION(0, "GPIOF13"),
+ STM32_FUNCTION(5, "I2C4_SMBA"),
+ STM32_FUNCTION(13, "FMC_A7"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(94, "PF14"),
+ STM32_FUNCTION(0, "GPIOF14"),
+ STM32_FUNCTION(5, "I2C4_SCL"),
+ STM32_FUNCTION(13, "FMC_A8"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(95, "PF15"),
+ STM32_FUNCTION(0, "GPIOF15"),
+ STM32_FUNCTION(5, "I2C4_SDA"),
+ STM32_FUNCTION(13, "FMC_A9"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(96, "PG0"),
+ STM32_FUNCTION(0, "GPIOG0"),
+ STM32_FUNCTION(13, "FMC_A10"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(97, "PG1"),
+ STM32_FUNCTION(0, "GPIOG1"),
+ STM32_FUNCTION(13, "FMC_A11"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(98, "PG2"),
+ STM32_FUNCTION(0, "GPIOG2"),
+ STM32_FUNCTION(13, "FMC_A12"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(99, "PG3"),
+ STM32_FUNCTION(0, "GPIOG3"),
+ STM32_FUNCTION(13, "FMC_A13"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(100, "PG4"),
+ STM32_FUNCTION(0, "GPIOG4"),
+ STM32_FUNCTION(13, "FMC_A14 FMC_BA0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(101, "PG5"),
+ STM32_FUNCTION(0, "GPIOG5"),
+ STM32_FUNCTION(13, "FMC_A15 FMC_BA1"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(102, "PG6"),
+ STM32_FUNCTION(0, "GPIOG6"),
+ STM32_FUNCTION(14, "DCMI_D12"),
+ STM32_FUNCTION(15, "LCD_R7"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(103, "PG7"),
+ STM32_FUNCTION(0, "GPIOG7"),
+ STM32_FUNCTION(9, "USART6_CK"),
+ STM32_FUNCTION(13, "FMC_INT"),
+ STM32_FUNCTION(14, "DCMI_D13"),
+ STM32_FUNCTION(15, "LCD_CLK"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(104, "PG8"),
+ STM32_FUNCTION(0, "GPIOG8"),
+ STM32_FUNCTION(6, "SPI6_NSS"),
+ STM32_FUNCTION(8, "SPDIFRX_IN2"),
+ STM32_FUNCTION(9, "USART6_RTS"),
+ STM32_FUNCTION(12, "ETH_PPS_OUT"),
+ STM32_FUNCTION(13, "FMC_SDCLK"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(105, "PG9"),
+ STM32_FUNCTION(0, "GPIOG9"),
+ STM32_FUNCTION(8, "SPDIFRX_IN3"),
+ STM32_FUNCTION(9, "USART6_RX"),
+ STM32_FUNCTION(10, "QUADSPI_BK2_IO2"),
+ STM32_FUNCTION(11, "SAI2_FS_B"),
+ STM32_FUNCTION(13, "FMC_NE2 FMC_NCE"),
+ STM32_FUNCTION(14, "DCMI_VSYNC"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(106, "PG10"),
+ STM32_FUNCTION(0, "GPIOG10"),
+ STM32_FUNCTION(10, "LCD_G3"),
+ STM32_FUNCTION(11, "SAI2_SD_B"),
+ STM32_FUNCTION(13, "FMC_NE3"),
+ STM32_FUNCTION(14, "DCMI_D2"),
+ STM32_FUNCTION(15, "LCD_B2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(107, "PG11"),
+ STM32_FUNCTION(0, "GPIOG11"),
+ STM32_FUNCTION(8, "SPDIFRX_IN0"),
+ STM32_FUNCTION(12, "ETH_MII_TX_EN ETH_RMII_TX_EN"),
+ STM32_FUNCTION(14, "DCMI_D3"),
+ STM32_FUNCTION(15, "LCD_B3"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(108, "PG12"),
+ STM32_FUNCTION(0, "GPIOG12"),
+ STM32_FUNCTION(4, "LPTIM1_IN1"),
+ STM32_FUNCTION(6, "SPI6_MISO"),
+ STM32_FUNCTION(8, "SPDIFRX_IN1"),
+ STM32_FUNCTION(9, "USART6_RTS"),
+ STM32_FUNCTION(10, "LCD_B4"),
+ STM32_FUNCTION(13, "FMC_NE4"),
+ STM32_FUNCTION(15, "LCD_B1"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(109, "PG13"),
+ STM32_FUNCTION(0, "GPIOG13"),
+ STM32_FUNCTION(1, "TRACED0"),
+ STM32_FUNCTION(4, "LPTIM1_OUT"),
+ STM32_FUNCTION(6, "SPI6_SCK"),
+ STM32_FUNCTION(9, "USART6_CTS"),
+ STM32_FUNCTION(12, "ETH_MII_TXD0 ETH_RMII_TXD0"),
+ STM32_FUNCTION(13, "FMC_A24"),
+ STM32_FUNCTION(15, "LCD_R0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(110, "PG14"),
+ STM32_FUNCTION(0, "GPIOG14"),
+ STM32_FUNCTION(1, "TRACED1"),
+ STM32_FUNCTION(4, "LPTIM1_ETR"),
+ STM32_FUNCTION(6, "SPI6_MOSI"),
+ STM32_FUNCTION(9, "USART6_TX"),
+ STM32_FUNCTION(10, "QUADSPI_BK2_IO3"),
+ STM32_FUNCTION(12, "ETH_MII_TXD1 ETH_RMII_TXD1"),
+ STM32_FUNCTION(13, "FMC_A25"),
+ STM32_FUNCTION(15, "LCD_B0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(111, "PG15"),
+ STM32_FUNCTION(0, "GPIOG15"),
+ STM32_FUNCTION(9, "USART6_CTS"),
+ STM32_FUNCTION(13, "FMC_SDNCAS"),
+ STM32_FUNCTION(14, "DCMI_D13"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(112, "PH0"),
+ STM32_FUNCTION(0, "GPIOH0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(113, "PH1"),
+ STM32_FUNCTION(0, "GPIOH1"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(114, "PH2"),
+ STM32_FUNCTION(0, "GPIOH2"),
+ STM32_FUNCTION(4, "LPTIM1_IN2"),
+ STM32_FUNCTION(10, "QUADSPI_BK2_IO0"),
+ STM32_FUNCTION(11, "SAI2_SCK_B"),
+ STM32_FUNCTION(12, "ETH_MII_CRS"),
+ STM32_FUNCTION(13, "FMC_SDCKE0"),
+ STM32_FUNCTION(15, "LCD_R0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(115, "PH3"),
+ STM32_FUNCTION(0, "GPIOH3"),
+ STM32_FUNCTION(10, "QUADSPI_BK2_IO1"),
+ STM32_FUNCTION(11, "SAI2_MCLK_B"),
+ STM32_FUNCTION(12, "ETH_MII_COL"),
+ STM32_FUNCTION(13, "FMC_SDNE0"),
+ STM32_FUNCTION(15, "LCD_R1"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(116, "PH4"),
+ STM32_FUNCTION(0, "GPIOH4"),
+ STM32_FUNCTION(5, "I2C2_SCL"),
+ STM32_FUNCTION(11, "OTG_HS_ULPI_NXT"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(117, "PH5"),
+ STM32_FUNCTION(0, "GPIOH5"),
+ STM32_FUNCTION(5, "I2C2_SDA"),
+ STM32_FUNCTION(6, "SPI5_NSS"),
+ STM32_FUNCTION(13, "FMC_SDNWE"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(118, "PH6"),
+ STM32_FUNCTION(0, "GPIOH6"),
+ STM32_FUNCTION(5, "I2C2_SMBA"),
+ STM32_FUNCTION(6, "SPI5_SCK"),
+ STM32_FUNCTION(10, "TIM12_CH1"),
+ STM32_FUNCTION(12, "ETH_MII_RXD2"),
+ STM32_FUNCTION(13, "FMC_SDNE1"),
+ STM32_FUNCTION(14, "DCMI_D8"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(119, "PH7"),
+ STM32_FUNCTION(0, "GPIOH7"),
+ STM32_FUNCTION(5, "I2C3_SCL"),
+ STM32_FUNCTION(6, "SPI5_MISO"),
+ STM32_FUNCTION(12, "ETH_MII_RXD3"),
+ STM32_FUNCTION(13, "FMC_SDCKE1"),
+ STM32_FUNCTION(14, "DCMI_D9"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(120, "PH8"),
+ STM32_FUNCTION(0, "GPIOH8"),
+ STM32_FUNCTION(5, "I2C3_SDA"),
+ STM32_FUNCTION(13, "FMC_D16"),
+ STM32_FUNCTION(14, "DCMI_HSYNC"),
+ STM32_FUNCTION(15, "LCD_R2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(121, "PH9"),
+ STM32_FUNCTION(0, "GPIOH9"),
+ STM32_FUNCTION(5, "I2C3_SMBA"),
+ STM32_FUNCTION(10, "TIM12_CH2"),
+ STM32_FUNCTION(13, "FMC_D17"),
+ STM32_FUNCTION(14, "DCMI_D0"),
+ STM32_FUNCTION(15, "LCD_R3"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(122, "PH10"),
+ STM32_FUNCTION(0, "GPIOH10"),
+ STM32_FUNCTION(3, "TIM5_CH1"),
+ STM32_FUNCTION(5, "I2C4_SMBA"),
+ STM32_FUNCTION(13, "FMC_D18"),
+ STM32_FUNCTION(14, "DCMI_D1"),
+ STM32_FUNCTION(15, "LCD_R4"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(123, "PH11"),
+ STM32_FUNCTION(0, "GPIOH11"),
+ STM32_FUNCTION(3, "TIM5_CH2"),
+ STM32_FUNCTION(5, "I2C4_SCL"),
+ STM32_FUNCTION(13, "FMC_D19"),
+ STM32_FUNCTION(14, "DCMI_D2"),
+ STM32_FUNCTION(15, "LCD_R5"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(124, "PH12"),
+ STM32_FUNCTION(0, "GPIOH12"),
+ STM32_FUNCTION(3, "TIM5_CH3"),
+ STM32_FUNCTION(5, "I2C4_SDA"),
+ STM32_FUNCTION(13, "FMC_D20"),
+ STM32_FUNCTION(14, "DCMI_D3"),
+ STM32_FUNCTION(15, "LCD_R6"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(125, "PH13"),
+ STM32_FUNCTION(0, "GPIOH13"),
+ STM32_FUNCTION(4, "TIM8_CH1N"),
+ STM32_FUNCTION(10, "CAN1_TX"),
+ STM32_FUNCTION(13, "FMC_D21"),
+ STM32_FUNCTION(15, "LCD_G2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(126, "PH14"),
+ STM32_FUNCTION(0, "GPIOH14"),
+ STM32_FUNCTION(4, "TIM8_CH2N"),
+ STM32_FUNCTION(13, "FMC_D22"),
+ STM32_FUNCTION(14, "DCMI_D4"),
+ STM32_FUNCTION(15, "LCD_G3"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(127, "PH15"),
+ STM32_FUNCTION(0, "GPIOH15"),
+ STM32_FUNCTION(4, "TIM8_CH3N"),
+ STM32_FUNCTION(13, "FMC_D23"),
+ STM32_FUNCTION(14, "DCMI_D11"),
+ STM32_FUNCTION(15, "LCD_G4"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(128, "PI0"),
+ STM32_FUNCTION(0, "GPIOI0"),
+ STM32_FUNCTION(3, "TIM5_CH4"),
+ STM32_FUNCTION(6, "SPI2_NSS I2S2_WS"),
+ STM32_FUNCTION(13, "FMC_D24"),
+ STM32_FUNCTION(14, "DCMI_D13"),
+ STM32_FUNCTION(15, "LCD_G5"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(129, "PI1"),
+ STM32_FUNCTION(0, "GPIOI1"),
+ STM32_FUNCTION(4, "TIM8_BKIN2"),
+ STM32_FUNCTION(6, "SPI2_SCK I2S2_CK"),
+ STM32_FUNCTION(13, "FMC_D25"),
+ STM32_FUNCTION(14, "DCMI_D8"),
+ STM32_FUNCTION(15, "LCD_G6"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(130, "PI2"),
+ STM32_FUNCTION(0, "GPIOI2"),
+ STM32_FUNCTION(4, "TIM8_CH4"),
+ STM32_FUNCTION(6, "SPI2_MISO"),
+ STM32_FUNCTION(13, "FMC_D26"),
+ STM32_FUNCTION(14, "DCMI_D9"),
+ STM32_FUNCTION(15, "LCD_G7"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(131, "PI3"),
+ STM32_FUNCTION(0, "GPIOI3"),
+ STM32_FUNCTION(4, "TIM8_ETR"),
+ STM32_FUNCTION(6, "SPI2_MOSI I2S2_SD"),
+ STM32_FUNCTION(13, "FMC_D27"),
+ STM32_FUNCTION(14, "DCMI_D10"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(132, "PI4"),
+ STM32_FUNCTION(0, "GPIOI4"),
+ STM32_FUNCTION(4, "TIM8_BKIN"),
+ STM32_FUNCTION(11, "SAI2_MCLK_A"),
+ STM32_FUNCTION(13, "FMC_NBL2"),
+ STM32_FUNCTION(14, "DCMI_D5"),
+ STM32_FUNCTION(15, "LCD_B4"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(133, "PI5"),
+ STM32_FUNCTION(0, "GPIOI5"),
+ STM32_FUNCTION(4, "TIM8_CH1"),
+ STM32_FUNCTION(11, "SAI2_SCK_A"),
+ STM32_FUNCTION(13, "FMC_NBL3"),
+ STM32_FUNCTION(14, "DCMI_VSYNC"),
+ STM32_FUNCTION(15, "LCD_B5"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(134, "PI6"),
+ STM32_FUNCTION(0, "GPIOI6"),
+ STM32_FUNCTION(4, "TIM8_CH2"),
+ STM32_FUNCTION(11, "SAI2_SD_A"),
+ STM32_FUNCTION(13, "FMC_D28"),
+ STM32_FUNCTION(14, "DCMI_D6"),
+ STM32_FUNCTION(15, "LCD_B6"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(135, "PI7"),
+ STM32_FUNCTION(0, "GPIOI7"),
+ STM32_FUNCTION(4, "TIM8_CH3"),
+ STM32_FUNCTION(11, "SAI2_FS_A"),
+ STM32_FUNCTION(13, "FMC_D29"),
+ STM32_FUNCTION(14, "DCMI_D7"),
+ STM32_FUNCTION(15, "LCD_B7"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(136, "PI8"),
+ STM32_FUNCTION(0, "GPIOI8"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(137, "PI9"),
+ STM32_FUNCTION(0, "GPIOI9"),
+ STM32_FUNCTION(10, "CAN1_RX"),
+ STM32_FUNCTION(13, "FMC_D30"),
+ STM32_FUNCTION(15, "LCD_VSYNC"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(138, "PI10"),
+ STM32_FUNCTION(0, "GPIOI10"),
+ STM32_FUNCTION(12, "ETH_MII_RX_ER"),
+ STM32_FUNCTION(13, "FMC_D31"),
+ STM32_FUNCTION(15, "LCD_HSYNC"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(139, "PI11"),
+ STM32_FUNCTION(0, "GPIOI11"),
+ STM32_FUNCTION(11, "OTG_HS_ULPI_DIR"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(140, "PI12"),
+ STM32_FUNCTION(0, "GPIOI12"),
+ STM32_FUNCTION(15, "LCD_HSYNC"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(141, "PI13"),
+ STM32_FUNCTION(0, "GPIOI13"),
+ STM32_FUNCTION(15, "LCD_VSYNC"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(142, "PI14"),
+ STM32_FUNCTION(0, "GPIOI14"),
+ STM32_FUNCTION(15, "LCD_CLK"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(143, "PI15"),
+ STM32_FUNCTION(0, "GPIOI15"),
+ STM32_FUNCTION(15, "LCD_R0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(144, "PJ0"),
+ STM32_FUNCTION(0, "GPIOJ0"),
+ STM32_FUNCTION(15, "LCD_R1"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(145, "PJ1"),
+ STM32_FUNCTION(0, "GPIOJ1"),
+ STM32_FUNCTION(15, "LCD_R2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(146, "PJ2"),
+ STM32_FUNCTION(0, "GPIOJ2"),
+ STM32_FUNCTION(15, "LCD_R3"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(147, "PJ3"),
+ STM32_FUNCTION(0, "GPIOJ3"),
+ STM32_FUNCTION(15, "LCD_R4"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(148, "PJ4"),
+ STM32_FUNCTION(0, "GPIOJ4"),
+ STM32_FUNCTION(15, "LCD_R5"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(149, "PJ5"),
+ STM32_FUNCTION(0, "GPIOJ5"),
+ STM32_FUNCTION(15, "LCD_R6"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(150, "PJ6"),
+ STM32_FUNCTION(0, "GPIOJ6"),
+ STM32_FUNCTION(15, "LCD_R7"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(151, "PJ7"),
+ STM32_FUNCTION(0, "GPIOJ7"),
+ STM32_FUNCTION(15, "LCD_G0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(152, "PJ8"),
+ STM32_FUNCTION(0, "GPIOJ8"),
+ STM32_FUNCTION(15, "LCD_G1"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(153, "PJ9"),
+ STM32_FUNCTION(0, "GPIOJ9"),
+ STM32_FUNCTION(15, "LCD_G2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(154, "PJ10"),
+ STM32_FUNCTION(0, "GPIOJ10"),
+ STM32_FUNCTION(15, "LCD_G3"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(155, "PJ11"),
+ STM32_FUNCTION(0, "GPIOJ11"),
+ STM32_FUNCTION(15, "LCD_G4"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(156, "PJ12"),
+ STM32_FUNCTION(0, "GPIOJ12"),
+ STM32_FUNCTION(15, "LCD_B0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(157, "PJ13"),
+ STM32_FUNCTION(0, "GPIOJ13"),
+ STM32_FUNCTION(15, "LCD_B1"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(158, "PJ14"),
+ STM32_FUNCTION(0, "GPIOJ14"),
+ STM32_FUNCTION(15, "LCD_B2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(159, "PJ15"),
+ STM32_FUNCTION(0, "GPIOJ15"),
+ STM32_FUNCTION(15, "LCD_B3"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(160, "PK0"),
+ STM32_FUNCTION(0, "GPIOK0"),
+ STM32_FUNCTION(15, "LCD_G5"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(161, "PK1"),
+ STM32_FUNCTION(0, "GPIOK1"),
+ STM32_FUNCTION(15, "LCD_G6"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(162, "PK2"),
+ STM32_FUNCTION(0, "GPIOK2"),
+ STM32_FUNCTION(15, "LCD_G7"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(163, "PK3"),
+ STM32_FUNCTION(0, "GPIOK3"),
+ STM32_FUNCTION(15, "LCD_B4"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(164, "PK4"),
+ STM32_FUNCTION(0, "GPIOK4"),
+ STM32_FUNCTION(15, "LCD_B5"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(165, "PK5"),
+ STM32_FUNCTION(0, "GPIOK5"),
+ STM32_FUNCTION(15, "LCD_B6"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(166, "PK6"),
+ STM32_FUNCTION(0, "GPIOK6"),
+ STM32_FUNCTION(15, "LCD_B7"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(167, "PK7"),
+ STM32_FUNCTION(0, "GPIOK7"),
+ STM32_FUNCTION(15, "LCD_DE"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+};
+
+static struct stm32_pinctrl_match_data stm32f746_match_data = {
+ .pins = stm32f746_pins,
+ .npins = ARRAY_SIZE(stm32f746_pins),
+};
+
+static const struct of_device_id stm32f746_pctrl_match[] = {
+ {
+ .compatible = "st,stm32f746-pinctrl",
+ .data = &stm32f746_match_data,
+ },
+ { }
+};
+
+static struct platform_driver stm32f746_pinctrl_driver = {
+ .probe = stm32_pctl_probe,
+ .driver = {
+ .name = "stm32f746-pinctrl",
+ .of_match_table = stm32f746_pctrl_match,
+ },
+};
+builtin_platform_driver(stm32f746_pinctrl_driver);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c
index 51fbf8530..f9d661e5c 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c
@@ -180,17 +180,17 @@ static const struct sunxi_desc_pin sun8i_a23_pins[] = {
SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 14),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand"), /* DQ6 */
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ6 */
SUNXI_FUNCTION(0x3, "mmc2")), /* D6 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 15),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand"), /* DQ7 */
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ7 */
SUNXI_FUNCTION(0x3, "mmc2")), /* D7 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 16),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand"), /* DQS */
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQS */
SUNXI_FUNCTION(0x3, "mmc2")), /* RST */
SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 17),
SUNXI_FUNCTION(0x0, "gpio_in"),
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
index 584cdedea..3131cac2b 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
@@ -140,17 +140,17 @@ static const struct sunxi_desc_pin sun8i_a33_pins[] = {
SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 14),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand"), /* DQ6 */
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ6 */
SUNXI_FUNCTION(0x3, "mmc2")), /* D6 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 15),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand"), /* DQ7 */
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ7 */
SUNXI_FUNCTION(0x3, "mmc2")), /* D7 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 16),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand"), /* DQS */
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQS */
SUNXI_FUNCTION(0x3, "mmc2")), /* RST */
/* Hole */
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 2),
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c
index 11760bbe9..26a2ad3b6 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c
@@ -219,17 +219,17 @@ static const struct sunxi_desc_pin sun8i_h3_pins[] = {
SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 14),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand"), /* DQ6 */
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ6 */
SUNXI_FUNCTION(0x3, "mmc2")), /* D6 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 15),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand"), /* DQ7 */
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ7 */
SUNXI_FUNCTION(0x3, "mmc2")), /* D7 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 16),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand"), /* DQS */
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQS */
SUNXI_FUNCTION(0x3, "mmc2")), /* RST */
/* Hole */
SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 0),
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
index 6e82b290c..277622b4b 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
@@ -632,11 +632,11 @@ static void tegra_pinctrl_clear_parked_bits(struct tegra_pmx *pmx)
u32 val;
for (i = 0; i < pmx->soc->ngroups; ++i) {
- if (pmx->soc->groups[i].parked_reg >= 0) {
- g = &pmx->soc->groups[i];
- val = pmx_readl(pmx, g->parked_bank, g->parked_reg);
+ g = &pmx->soc->groups[i];
+ if (g->parked_bit >= 0) {
+ val = pmx_readl(pmx, g->mux_bank, g->mux_reg);
val &= ~(1 << g->parked_bit);
- pmx_writel(pmx, val, g->parked_bank, g->parked_reg);
+ pmx_writel(pmx, val, g->mux_bank, g->mux_reg);
}
}
}
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.h b/drivers/pinctrl/tegra/pinctrl-tegra.h
index d2ced1738..33b17cb14 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra.h
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.h
@@ -93,9 +93,7 @@ struct tegra_function {
* @tri_reg: Tri-state register offset.
* @tri_bank: Tri-state register bank.
* @tri_bit: Tri-state register bit.
- * @parked_reg: Parked register offset. -1 if unsupported.
- * @parked_bank: Parked register bank. 0 if unsupported.
- * @parked_bit: Parked register bit. 0 if unsupported.
+ * @parked_bit: Parked register bit. -1 if unsupported.
* @einput_bit: Enable-input register bit.
* @odrain_bit: Open-drain register bit.
* @lock_bit: Lock register bit.
@@ -138,12 +136,10 @@ struct tegra_pingroup {
s16 pupd_reg;
s16 tri_reg;
s16 drv_reg;
- s16 parked_reg;
u32 mux_bank:2;
u32 pupd_bank:2;
u32 tri_bank:2;
u32 drv_bank:2;
- u32 parked_bank:2;
s32 mux_bit:6;
s32 pupd_bit:6;
s32 tri_bit:6;
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra114.c b/drivers/pinctrl/tegra/pinctrl-tegra114.c
index 4851d169f..952132ce5 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra114.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra114.c
@@ -1578,7 +1578,7 @@ static struct tegra_function tegra114_functions[] = {
.lock_bit = 7, \
.ioreset_bit = PINGROUP_BIT_##ior(8), \
.rcv_sel_bit = PINGROUP_BIT_##rcv_sel(9), \
- .parked_reg = -1, \
+ .parked_bit = -1, \
.drv_reg = -1, \
}
@@ -1599,7 +1599,7 @@ static struct tegra_function tegra114_functions[] = {
.rcv_sel_bit = -1, \
.drv_reg = DRV_PINGROUP_REG(r), \
.drv_bank = 0, \
- .parked_reg = -1, \
+ .parked_bit = -1, \
.hsm_bit = hsm_b, \
.schmitt_bit = schmitt_b, \
.lpmd_bit = lpmd_b, \
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra124.c b/drivers/pinctrl/tegra/pinctrl-tegra124.c
index a0ce723a9..bca239e3a 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra124.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra124.c
@@ -1747,7 +1747,7 @@ static struct tegra_function tegra124_functions[] = {
.lock_bit = 7, \
.ioreset_bit = PINGROUP_BIT_##ior(8), \
.rcv_sel_bit = PINGROUP_BIT_##rcv_sel(9), \
- .parked_reg = -1, \
+ .parked_bit = -1, \
.drv_reg = -1, \
}
@@ -1768,7 +1768,7 @@ static struct tegra_function tegra124_functions[] = {
.rcv_sel_bit = -1, \
.drv_reg = DRV_PINGROUP_REG(r), \
.drv_bank = 0, \
- .parked_reg = -1, \
+ .parked_bit = -1, \
.hsm_bit = hsm_b, \
.schmitt_bit = schmitt_b, \
.lpmd_bit = lpmd_b, \
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra20.c b/drivers/pinctrl/tegra/pinctrl-tegra20.c
index 09bad6980..ad62451a5 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra20.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra20.c
@@ -1994,7 +1994,7 @@ static struct tegra_function tegra20_functions[] = {
.tri_reg = ((tri_r) - TRISTATE_REG_A), \
.tri_bank = 0, \
.tri_bit = tri_b, \
- .parked_reg = -1, \
+ .parked_bit = -1, \
.einput_bit = -1, \
.odrain_bit = -1, \
.lock_bit = -1, \
@@ -2014,7 +2014,7 @@ static struct tegra_function tegra20_functions[] = {
.pupd_bank = 2, \
.pupd_bit = pupd_b, \
.drv_reg = -1, \
- .parked_reg = -1, \
+ .parked_bit = -1, \
}
/* Pin groups for drive strength registers (configurable version) */
@@ -2030,7 +2030,7 @@ static struct tegra_function tegra20_functions[] = {
.tri_reg = -1, \
.drv_reg = ((r) - PINGROUP_REG_A), \
.drv_bank = 3, \
- .parked_reg = -1, \
+ .parked_bit = -1, \
.hsm_bit = hsm_b, \
.schmitt_bit = schmitt_b, \
.lpmd_bit = lpmd_b, \
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra210.c b/drivers/pinctrl/tegra/pinctrl-tegra210.c
index 2d856af38..2b70e93da 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra210.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra210.c
@@ -1310,8 +1310,6 @@ static struct tegra_function tegra210_functions[] = {
.lock_bit = 7, \
.ioreset_bit = -1, \
.rcv_sel_bit = PINGROUP_BIT_##e_io_hv(10), \
- .parked_reg = PINGROUP_REG(r), \
- .parked_bank = 1, \
.parked_bit = 5, \
.hsm_bit = PINGROUP_BIT_##hsm(9), \
.schmitt_bit = 12, \
@@ -1345,7 +1343,7 @@ static struct tegra_function tegra210_functions[] = {
.rcv_sel_bit = -1, \
.drv_reg = DRV_PINGROUP_REG(r), \
.drv_bank = 0, \
- .parked_reg = -1, \
+ .parked_bit = -1, \
.hsm_bit = -1, \
.schmitt_bit = -1, \
.lpmd_bit = -1, \
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra30.c b/drivers/pinctrl/tegra/pinctrl-tegra30.c
index fb7817fea..474ac6daf 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra30.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra30.c
@@ -2139,7 +2139,7 @@ static struct tegra_function tegra30_functions[] = {
.lock_bit = 7, \
.ioreset_bit = PINGROUP_BIT_##ior(8), \
.rcv_sel_bit = -1, \
- .parked_reg = -1, \
+ .parked_bit = -1, \
.drv_reg = -1, \
}
@@ -2160,7 +2160,7 @@ static struct tegra_function tegra30_functions[] = {
.rcv_sel_bit = -1, \
.drv_reg = DRV_PINGROUP_REG(r), \
.drv_bank = 0, \
- .parked_reg = -1, \
+ .parked_bit = -1, \
.hsm_bit = hsm_b, \
.schmitt_bit = schmitt_b, \
.lpmd_bit = lpmd_b, \
diff --git a/drivers/pinctrl/uniphier/Kconfig b/drivers/pinctrl/uniphier/Kconfig
index 0b40ded57..e077a9ec2 100644
--- a/drivers/pinctrl/uniphier/Kconfig
+++ b/drivers/pinctrl/uniphier/Kconfig
@@ -10,26 +10,34 @@ if PINCTRL_UNIPHIER
config PINCTRL_UNIPHIER_LD4
tristate "UniPhier PH1-LD4 SoC pinctrl driver"
- default y
+ default ARM
config PINCTRL_UNIPHIER_PRO4
tristate "UniPhier PH1-Pro4 SoC pinctrl driver"
- default y
+ default ARM
config PINCTRL_UNIPHIER_SLD8
tristate "UniPhier PH1-sLD8 SoC pinctrl driver"
- default y
+ default ARM
config PINCTRL_UNIPHIER_PRO5
tristate "UniPhier PH1-Pro5 SoC pinctrl driver"
- default y
+ default ARM
config PINCTRL_UNIPHIER_PXS2
tristate "UniPhier ProXstream2 SoC pinctrl driver"
- default y
+ default ARM
config PINCTRL_UNIPHIER_LD6B
tristate "UniPhier PH1-LD6b SoC pinctrl driver"
- default y
+ default ARM
+
+config PINCTRL_UNIPHIER_LD11
+ tristate "UniPhier PH1-LD11 SoC pinctrl driver"
+ default ARM64
+
+config PINCTRL_UNIPHIER_LD20
+ tristate "UniPhier PH1-LD20 SoC pinctrl driver"
+ default ARM64
endif
diff --git a/drivers/pinctrl/uniphier/Makefile b/drivers/pinctrl/uniphier/Makefile
index 3b8f9ee0b..9f4bc8aa6 100644
--- a/drivers/pinctrl/uniphier/Makefile
+++ b/drivers/pinctrl/uniphier/Makefile
@@ -6,3 +6,5 @@ obj-$(CONFIG_PINCTRL_UNIPHIER_SLD8) += pinctrl-uniphier-sld8.o
obj-$(CONFIG_PINCTRL_UNIPHIER_PRO5) += pinctrl-uniphier-pro5.o
obj-$(CONFIG_PINCTRL_UNIPHIER_PXS2) += pinctrl-uniphier-pxs2.o
obj-$(CONFIG_PINCTRL_UNIPHIER_LD6B) += pinctrl-uniphier-ld6b.o
+obj-$(CONFIG_PINCTRL_UNIPHIER_LD11) += pinctrl-uniphier-ld11.o
+obj-$(CONFIG_PINCTRL_UNIPHIER_LD20) += pinctrl-uniphier-ld20.o
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
index 967400971..9b2ee717b 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
@@ -14,6 +14,7 @@
#include <linux/export.h>
#include <linux/mfd/syscon.h>
+#include <linux/of.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinctrl.h>
@@ -26,8 +27,10 @@
#include "pinctrl-uniphier.h"
struct uniphier_pinctrl_priv {
+ struct pinctrl_desc pctldesc;
struct pinctrl_dev *pctldev;
struct regmap *regmap;
+ unsigned int regbase;
struct uniphier_pinctrl_socdata *socdata;
};
@@ -63,16 +66,22 @@ static int uniphier_pctl_get_group_pins(struct pinctrl_dev *pctldev,
static void uniphier_pctl_pin_dbg_show(struct pinctrl_dev *pctldev,
struct seq_file *s, unsigned offset)
{
- const struct pinctrl_pin_desc *pin = &pctldev->desc->pins[offset];
- const char *pull_dir, *drv_str;
+ const struct pin_desc *desc = pin_desc_get(pctldev, offset);
+ const char *pull_dir, *drv_type;
- switch (uniphier_pin_get_pull_dir(pin->drv_data)) {
+ switch (uniphier_pin_get_pull_dir(desc->drv_data)) {
case UNIPHIER_PIN_PULL_UP:
pull_dir = "UP";
break;
case UNIPHIER_PIN_PULL_DOWN:
pull_dir = "DOWN";
break;
+ case UNIPHIER_PIN_PULL_UP_FIXED:
+ pull_dir = "UP(FIXED)";
+ break;
+ case UNIPHIER_PIN_PULL_DOWN_FIXED:
+ pull_dir = "DOWN(FIXED)";
+ break;
case UNIPHIER_PIN_PULL_NONE:
pull_dir = "NONE";
break;
@@ -80,30 +89,33 @@ static void uniphier_pctl_pin_dbg_show(struct pinctrl_dev *pctldev,
BUG();
}
- switch (uniphier_pin_get_drv_str(pin->drv_data)) {
- case UNIPHIER_PIN_DRV_4_8:
- drv_str = "4/8(mA)";
+ switch (uniphier_pin_get_drv_type(desc->drv_data)) {
+ case UNIPHIER_PIN_DRV_1BIT:
+ drv_type = "4/8(mA)";
+ break;
+ case UNIPHIER_PIN_DRV_2BIT:
+ drv_type = "8/12/16/20(mA)";
break;
- case UNIPHIER_PIN_DRV_8_12_16_20:
- drv_str = "8/12/16/20(mA)";
+ case UNIPHIER_PIN_DRV_3BIT:
+ drv_type = "4/5/7/9/11/12/14/16(mA)";
break;
- case UNIPHIER_PIN_DRV_FIXED_4:
- drv_str = "4(mA)";
+ case UNIPHIER_PIN_DRV_FIXED4:
+ drv_type = "4(mA)";
break;
- case UNIPHIER_PIN_DRV_FIXED_5:
- drv_str = "5(mA)";
+ case UNIPHIER_PIN_DRV_FIXED5:
+ drv_type = "5(mA)";
break;
- case UNIPHIER_PIN_DRV_FIXED_8:
- drv_str = "8(mA)";
+ case UNIPHIER_PIN_DRV_FIXED8:
+ drv_type = "8(mA)";
break;
case UNIPHIER_PIN_DRV_NONE:
- drv_str = "NONE";
+ drv_type = "NONE";
break;
default:
BUG();
}
- seq_printf(s, " PULL_DIR=%s DRV_STR=%s", pull_dir, drv_str);
+ seq_printf(s, " PULL_DIR=%s DRV_TYPE=%s", pull_dir, drv_type);
}
#endif
@@ -119,12 +131,12 @@ static const struct pinctrl_ops uniphier_pctlops = {
};
static int uniphier_conf_pin_bias_get(struct pinctrl_dev *pctldev,
- const struct pinctrl_pin_desc *pin,
+ const struct pin_desc *desc,
enum pin_config_param param)
{
struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
enum uniphier_pin_pull_dir pull_dir =
- uniphier_pin_get_pull_dir(pin->drv_data);
+ uniphier_pin_get_pull_dir(desc->drv_data);
unsigned int pupdctrl, reg, shift, val;
unsigned int expected = 1;
int ret;
@@ -154,12 +166,12 @@ static int uniphier_conf_pin_bias_get(struct pinctrl_dev *pctldev,
BUG();
}
- pupdctrl = uniphier_pin_get_pupdctrl(pin->drv_data);
+ pupdctrl = uniphier_pin_get_pupdctrl(desc->drv_data);
reg = UNIPHIER_PINCTRL_PUPDCTRL_BASE + pupdctrl / 32 * 4;
shift = pupdctrl % 32;
- ret = regmap_read(priv->regmap, reg, &val);
+ ret = regmap_read(priv->regmap, priv->regbase + reg, &val);
if (ret)
return ret;
@@ -169,34 +181,42 @@ static int uniphier_conf_pin_bias_get(struct pinctrl_dev *pctldev,
}
static int uniphier_conf_pin_drive_get(struct pinctrl_dev *pctldev,
- const struct pinctrl_pin_desc *pin,
+ const struct pin_desc *desc,
u16 *strength)
{
struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
- enum uniphier_pin_drv_str drv_str =
- uniphier_pin_get_drv_str(pin->drv_data);
- const unsigned int strength_4_8[] = {4, 8};
- const unsigned int strength_8_12_16_20[] = {8, 12, 16, 20};
+ enum uniphier_pin_drv_type type =
+ uniphier_pin_get_drv_type(desc->drv_data);
+ const unsigned int strength_1bit[] = {4, 8};
+ const unsigned int strength_2bit[] = {8, 12, 16, 20};
+ const unsigned int strength_3bit[] = {4, 5, 7, 9, 11, 12, 14, 16};
const unsigned int *supported_strength;
unsigned int drvctrl, reg, shift, mask, width, val;
int ret;
- switch (drv_str) {
- case UNIPHIER_PIN_DRV_4_8:
- supported_strength = strength_4_8;
+ switch (type) {
+ case UNIPHIER_PIN_DRV_1BIT:
+ supported_strength = strength_1bit;
+ reg = UNIPHIER_PINCTRL_DRVCTRL_BASE;
width = 1;
break;
- case UNIPHIER_PIN_DRV_8_12_16_20:
- supported_strength = strength_8_12_16_20;
+ case UNIPHIER_PIN_DRV_2BIT:
+ supported_strength = strength_2bit;
+ reg = UNIPHIER_PINCTRL_DRV2CTRL_BASE;
width = 2;
break;
- case UNIPHIER_PIN_DRV_FIXED_4:
+ case UNIPHIER_PIN_DRV_3BIT:
+ supported_strength = strength_3bit;
+ reg = UNIPHIER_PINCTRL_DRV3CTRL_BASE;
+ width = 4;
+ break;
+ case UNIPHIER_PIN_DRV_FIXED4:
*strength = 4;
return 0;
- case UNIPHIER_PIN_DRV_FIXED_5:
+ case UNIPHIER_PIN_DRV_FIXED5:
*strength = 5;
return 0;
- case UNIPHIER_PIN_DRV_FIXED_8:
+ case UNIPHIER_PIN_DRV_FIXED8:
*strength = 8;
return 0;
default:
@@ -204,17 +224,14 @@ static int uniphier_conf_pin_drive_get(struct pinctrl_dev *pctldev,
return -EINVAL;
}
- drvctrl = uniphier_pin_get_drvctrl(pin->drv_data);
+ drvctrl = uniphier_pin_get_drvctrl(desc->drv_data);
drvctrl *= width;
- reg = (width == 2) ? UNIPHIER_PINCTRL_DRV2CTRL_BASE :
- UNIPHIER_PINCTRL_DRVCTRL_BASE;
-
reg += drvctrl / 32 * 4;
shift = drvctrl % 32;
mask = (1U << width) - 1;
- ret = regmap_read(priv->regmap, reg, &val);
+ ret = regmap_read(priv->regmap, priv->regbase + reg, &val);
if (ret)
return ret;
@@ -224,10 +241,10 @@ static int uniphier_conf_pin_drive_get(struct pinctrl_dev *pctldev,
}
static int uniphier_conf_pin_input_enable_get(struct pinctrl_dev *pctldev,
- const struct pinctrl_pin_desc *pin)
+ const struct pin_desc *desc)
{
struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
- unsigned int iectrl = uniphier_pin_get_iectrl(pin->drv_data);
+ unsigned int iectrl = uniphier_pin_get_iectrl(desc->drv_data);
unsigned int val;
int ret;
@@ -235,7 +252,8 @@ static int uniphier_conf_pin_input_enable_get(struct pinctrl_dev *pctldev,
/* This pin is always input-enabled. */
return 0;
- ret = regmap_read(priv->regmap, UNIPHIER_PINCTRL_IECTRL, &val);
+ ret = regmap_read(priv->regmap,
+ priv->regbase + UNIPHIER_PINCTRL_IECTRL, &val);
if (ret)
return ret;
@@ -246,7 +264,7 @@ static int uniphier_conf_pin_config_get(struct pinctrl_dev *pctldev,
unsigned pin,
unsigned long *configs)
{
- const struct pinctrl_pin_desc *pin_desc = &pctldev->desc->pins[pin];
+ const struct pin_desc *desc = pin_desc_get(pctldev, pin);
enum pin_config_param param = pinconf_to_config_param(*configs);
bool has_arg = false;
u16 arg;
@@ -256,14 +274,14 @@ static int uniphier_conf_pin_config_get(struct pinctrl_dev *pctldev,
case PIN_CONFIG_BIAS_DISABLE:
case PIN_CONFIG_BIAS_PULL_UP:
case PIN_CONFIG_BIAS_PULL_DOWN:
- ret = uniphier_conf_pin_bias_get(pctldev, pin_desc, param);
+ ret = uniphier_conf_pin_bias_get(pctldev, desc, param);
break;
case PIN_CONFIG_DRIVE_STRENGTH:
- ret = uniphier_conf_pin_drive_get(pctldev, pin_desc, &arg);
+ ret = uniphier_conf_pin_drive_get(pctldev, desc, &arg);
has_arg = true;
break;
case PIN_CONFIG_INPUT_ENABLE:
- ret = uniphier_conf_pin_input_enable_get(pctldev, pin_desc);
+ ret = uniphier_conf_pin_input_enable_get(pctldev, desc);
break;
default:
/* unsupported parameter */
@@ -278,13 +296,12 @@ static int uniphier_conf_pin_config_get(struct pinctrl_dev *pctldev,
}
static int uniphier_conf_pin_bias_set(struct pinctrl_dev *pctldev,
- const struct pinctrl_pin_desc *pin,
- enum pin_config_param param,
- u16 arg)
+ const struct pin_desc *desc,
+ enum pin_config_param param, u16 arg)
{
struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
enum uniphier_pin_pull_dir pull_dir =
- uniphier_pin_get_pull_dir(pin->drv_data);
+ uniphier_pin_get_pull_dir(desc->drv_data);
unsigned int pupdctrl, reg, shift;
unsigned int val = 1;
@@ -295,8 +312,8 @@ static int uniphier_conf_pin_bias_set(struct pinctrl_dev *pctldev,
if (pull_dir == UNIPHIER_PIN_PULL_UP_FIXED ||
pull_dir == UNIPHIER_PIN_PULL_DOWN_FIXED) {
dev_err(pctldev->dev,
- "can not disable pull register for pin %u (%s)\n",
- pin->number, pin->name);
+ "can not disable pull register for pin %s\n",
+ desc->name);
return -EINVAL;
}
val = 0;
@@ -306,8 +323,8 @@ static int uniphier_conf_pin_bias_set(struct pinctrl_dev *pctldev,
return 0;
if (pull_dir != UNIPHIER_PIN_PULL_UP) {
dev_err(pctldev->dev,
- "pull-up is unsupported for pin %u (%s)\n",
- pin->number, pin->name);
+ "pull-up is unsupported for pin %s\n",
+ desc->name);
return -EINVAL;
}
if (arg == 0) {
@@ -320,8 +337,8 @@ static int uniphier_conf_pin_bias_set(struct pinctrl_dev *pctldev,
return 0;
if (pull_dir != UNIPHIER_PIN_PULL_DOWN) {
dev_err(pctldev->dev,
- "pull-down is unsupported for pin %u (%s)\n",
- pin->number, pin->name);
+ "pull-down is unsupported for pin %s\n",
+ desc->name);
return -EINVAL;
}
if (arg == 0) {
@@ -332,8 +349,8 @@ static int uniphier_conf_pin_bias_set(struct pinctrl_dev *pctldev,
case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
if (pull_dir == UNIPHIER_PIN_PULL_NONE) {
dev_err(pctldev->dev,
- "pull-up/down is unsupported for pin %u (%s)\n",
- pin->number, pin->name);
+ "pull-up/down is unsupported for pin %s\n",
+ desc->name);
return -EINVAL;
}
@@ -344,39 +361,48 @@ static int uniphier_conf_pin_bias_set(struct pinctrl_dev *pctldev,
BUG();
}
- pupdctrl = uniphier_pin_get_pupdctrl(pin->drv_data);
+ pupdctrl = uniphier_pin_get_pupdctrl(desc->drv_data);
reg = UNIPHIER_PINCTRL_PUPDCTRL_BASE + pupdctrl / 32 * 4;
shift = pupdctrl % 32;
- return regmap_update_bits(priv->regmap, reg, 1 << shift, val << shift);
+ return regmap_update_bits(priv->regmap, priv->regbase + reg,
+ 1 << shift, val << shift);
}
static int uniphier_conf_pin_drive_set(struct pinctrl_dev *pctldev,
- const struct pinctrl_pin_desc *pin,
+ const struct pin_desc *desc,
u16 strength)
{
struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
- enum uniphier_pin_drv_str drv_str =
- uniphier_pin_get_drv_str(pin->drv_data);
- const unsigned int strength_4_8[] = {4, 8, -1};
- const unsigned int strength_8_12_16_20[] = {8, 12, 16, 20, -1};
+ enum uniphier_pin_drv_type type =
+ uniphier_pin_get_drv_type(desc->drv_data);
+ const unsigned int strength_1bit[] = {4, 8, -1};
+ const unsigned int strength_2bit[] = {8, 12, 16, 20, -1};
+ const unsigned int strength_3bit[] = {4, 5, 7, 9, 11, 12, 14, 16, -1};
const unsigned int *supported_strength;
unsigned int drvctrl, reg, shift, mask, width, val;
- switch (drv_str) {
- case UNIPHIER_PIN_DRV_4_8:
- supported_strength = strength_4_8;
+ switch (type) {
+ case UNIPHIER_PIN_DRV_1BIT:
+ supported_strength = strength_1bit;
+ reg = UNIPHIER_PINCTRL_DRVCTRL_BASE;
width = 1;
break;
- case UNIPHIER_PIN_DRV_8_12_16_20:
- supported_strength = strength_8_12_16_20;
+ case UNIPHIER_PIN_DRV_2BIT:
+ supported_strength = strength_2bit;
+ reg = UNIPHIER_PINCTRL_DRV2CTRL_BASE;
width = 2;
break;
+ case UNIPHIER_PIN_DRV_3BIT:
+ supported_strength = strength_3bit;
+ reg = UNIPHIER_PINCTRL_DRV3CTRL_BASE;
+ width = 4;
+ break;
default:
dev_err(pctldev->dev,
- "cannot change drive strength for pin %u (%s)\n",
- pin->number, pin->name);
+ "cannot change drive strength for pin %s\n",
+ desc->name);
return -EINVAL;
}
@@ -387,49 +413,48 @@ static int uniphier_conf_pin_drive_set(struct pinctrl_dev *pctldev,
if (val == 0) {
dev_err(pctldev->dev,
- "unsupported drive strength %u mA for pin %u (%s)\n",
- strength, pin->number, pin->name);
+ "unsupported drive strength %u mA for pin %s\n",
+ strength, desc->name);
return -EINVAL;
}
val--;
- drvctrl = uniphier_pin_get_drvctrl(pin->drv_data);
+ drvctrl = uniphier_pin_get_drvctrl(desc->drv_data);
drvctrl *= width;
- reg = (width == 2) ? UNIPHIER_PINCTRL_DRV2CTRL_BASE :
- UNIPHIER_PINCTRL_DRVCTRL_BASE;
-
reg += drvctrl / 32 * 4;
shift = drvctrl % 32;
mask = (1U << width) - 1;
- return regmap_update_bits(priv->regmap, reg,
+ return regmap_update_bits(priv->regmap, priv->regbase + reg,
mask << shift, val << shift);
}
static int uniphier_conf_pin_input_enable(struct pinctrl_dev *pctldev,
- const struct pinctrl_pin_desc *pin,
+ const struct pin_desc *desc,
u16 enable)
{
struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
- unsigned int iectrl = uniphier_pin_get_iectrl(pin->drv_data);
+ unsigned int iectrl = uniphier_pin_get_iectrl(desc->drv_data);
+ unsigned int reg, mask;
- if (enable == 0) {
- /*
- * Multiple pins share one input enable, so per-pin disabling
- * is impossible.
- */
- dev_err(pctldev->dev, "unable to disable input\n");
+ /*
+ * Multiple pins share one input enable, per-pin disabling is
+ * impossible.
+ */
+ if (!(priv->socdata->caps & UNIPHIER_PINCTRL_CAPS_PERPIN_IECTRL) &&
+ !enable)
return -EINVAL;
- }
+ /* UNIPHIER_PIN_IECTRL_NONE means the pin is always input-enabled */
if (iectrl == UNIPHIER_PIN_IECTRL_NONE)
- /* This pin is always input-enabled. nothing to do. */
- return 0;
+ return enable ? 0 : -EINVAL;
+
+ reg = priv->regbase + UNIPHIER_PINCTRL_IECTRL + iectrl / 32 * 4;
+ mask = BIT(iectrl % 32);
- return regmap_update_bits(priv->regmap, UNIPHIER_PINCTRL_IECTRL,
- BIT(iectrl), BIT(iectrl));
+ return regmap_update_bits(priv->regmap, reg, mask, enable ? mask : 0);
}
static int uniphier_conf_pin_config_set(struct pinctrl_dev *pctldev,
@@ -437,7 +462,7 @@ static int uniphier_conf_pin_config_set(struct pinctrl_dev *pctldev,
unsigned long *configs,
unsigned num_configs)
{
- const struct pinctrl_pin_desc *pin_desc = &pctldev->desc->pins[pin];
+ const struct pin_desc *desc = pin_desc_get(pctldev, pin);
int i, ret;
for (i = 0; i < num_configs; i++) {
@@ -450,16 +475,15 @@ static int uniphier_conf_pin_config_set(struct pinctrl_dev *pctldev,
case PIN_CONFIG_BIAS_PULL_UP:
case PIN_CONFIG_BIAS_PULL_DOWN:
case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
- ret = uniphier_conf_pin_bias_set(pctldev, pin_desc,
+ ret = uniphier_conf_pin_bias_set(pctldev, desc,
param, arg);
break;
case PIN_CONFIG_DRIVE_STRENGTH:
- ret = uniphier_conf_pin_drive_set(pctldev, pin_desc,
- arg);
+ ret = uniphier_conf_pin_drive_set(pctldev, desc, arg);
break;
case PIN_CONFIG_INPUT_ENABLE:
- ret = uniphier_conf_pin_input_enable(pctldev,
- pin_desc, arg);
+ ret = uniphier_conf_pin_input_enable(pctldev, desc,
+ arg);
break;
default:
dev_err(pctldev->dev,
@@ -531,20 +555,42 @@ static int uniphier_pmx_get_function_groups(struct pinctrl_dev *pctldev,
}
static int uniphier_pmx_set_one_mux(struct pinctrl_dev *pctldev, unsigned pin,
- unsigned muxval)
+ int muxval)
{
struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
- unsigned mux_bits = priv->socdata->mux_bits;
- unsigned reg_stride = priv->socdata->reg_stride;
- unsigned reg, reg_end, shift, mask;
+ unsigned int mux_bits, reg_stride, reg, reg_end, shift, mask;
+ bool load_pinctrl;
int ret;
/* some pins need input-enabling */
ret = uniphier_conf_pin_input_enable(pctldev,
- &pctldev->desc->pins[pin], 1);
+ pin_desc_get(pctldev, pin), 1);
if (ret)
return ret;
+ if (muxval < 0)
+ return 0; /* dedicated pin; nothing to do for pin-mux */
+
+ if (priv->socdata->caps & UNIPHIER_PINCTRL_CAPS_DBGMUX_SEPARATE) {
+ /*
+ * Mode reg_offset bit_position
+ * Normal 4 * n shift+3:shift
+ * Debug 4 * n shift+7:shift+4
+ */
+ mux_bits = 4;
+ reg_stride = 8;
+ load_pinctrl = true;
+ } else {
+ /*
+ * Mode reg_offset bit_position
+ * Normal 8 * n shift+3:shift
+ * Debug 8 * n + 4 shift+3:shift
+ */
+ mux_bits = 8;
+ reg_stride = 4;
+ load_pinctrl = false;
+ }
+
reg = UNIPHIER_PINCTRL_PINMUX_BASE + pin * mux_bits / 32 * reg_stride;
reg_end = reg + reg_stride;
shift = pin * mux_bits % 32;
@@ -555,16 +601,17 @@ static int uniphier_pmx_set_one_mux(struct pinctrl_dev *pctldev, unsigned pin,
* stored in the offset+4.
*/
for (; reg < reg_end; reg += 4) {
- ret = regmap_update_bits(priv->regmap, reg,
+ ret = regmap_update_bits(priv->regmap, priv->regbase + reg,
mask << shift, muxval << shift);
if (ret)
return ret;
muxval >>= mux_bits;
}
- if (priv->socdata->load_pinctrl) {
+ if (load_pinctrl) {
ret = regmap_write(priv->regmap,
- UNIPHIER_PINCTRL_LOAD_PINMUX, 1);
+ priv->regbase + UNIPHIER_PINCTRL_LOAD_PINMUX,
+ 1);
if (ret)
return ret;
}
@@ -633,19 +680,16 @@ static const struct pinmux_ops uniphier_pmxops = {
};
int uniphier_pinctrl_probe(struct platform_device *pdev,
- struct pinctrl_desc *desc,
struct uniphier_pinctrl_socdata *socdata)
{
struct device *dev = &pdev->dev;
struct uniphier_pinctrl_priv *priv;
+ struct device_node *parent;
if (!socdata ||
- !socdata->groups ||
- !socdata->groups_count ||
- !socdata->functions ||
- !socdata->functions_count ||
- !socdata->mux_bits ||
- !socdata->reg_stride) {
+ !socdata->pins || !socdata->npins ||
+ !socdata->groups || !socdata->groups_count ||
+ !socdata->functions || !socdata->functions_count) {
dev_err(dev, "pinctrl socdata lacks necessary members\n");
return -EINVAL;
}
@@ -654,18 +698,36 @@ int uniphier_pinctrl_probe(struct platform_device *pdev,
if (!priv)
return -ENOMEM;
- priv->regmap = syscon_node_to_regmap(dev->of_node);
+ if (of_device_is_compatible(dev->of_node, "socionext,ph1-ld4-pinctrl") ||
+ of_device_is_compatible(dev->of_node, "socionext,ph1-pro4-pinctrl") ||
+ of_device_is_compatible(dev->of_node, "socionext,ph1-sld8-pinctrl") ||
+ of_device_is_compatible(dev->of_node, "socionext,ph1-pro5-pinctrl") ||
+ of_device_is_compatible(dev->of_node, "socionext,proxstream2-pinctrl") ||
+ of_device_is_compatible(dev->of_node, "socionext,ph1-ld6b-pinctrl")) {
+ /* old binding */
+ priv->regmap = syscon_node_to_regmap(dev->of_node);
+ } else {
+ priv->regbase = 0x1000;
+ parent = of_get_parent(dev->of_node);
+ priv->regmap = syscon_node_to_regmap(parent);
+ of_node_put(parent);
+ }
+
if (IS_ERR(priv->regmap)) {
dev_err(dev, "failed to get regmap\n");
return PTR_ERR(priv->regmap);
}
priv->socdata = socdata;
- desc->pctlops = &uniphier_pctlops;
- desc->pmxops = &uniphier_pmxops;
- desc->confops = &uniphier_confops;
-
- priv->pctldev = devm_pinctrl_register(dev, desc, priv);
+ priv->pctldesc.name = dev->driver->name;
+ priv->pctldesc.pins = socdata->pins;
+ priv->pctldesc.npins = socdata->npins;
+ priv->pctldesc.pctlops = &uniphier_pctlops;
+ priv->pctldesc.pmxops = &uniphier_pmxops;
+ priv->pctldesc.confops = &uniphier_confops;
+ priv->pctldesc.owner = dev->driver->owner;
+
+ priv->pctldev = devm_pinctrl_register(dev, &priv->pctldesc, priv);
if (IS_ERR(priv->pctldev)) {
dev_err(dev, "failed to register UniPhier pinctrl driver\n");
return PTR_ERR(priv->pctldev);
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
new file mode 100644
index 000000000..77a0236ee
--- /dev/null
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
@@ -0,0 +1,952 @@
+/*
+ * Copyright (C) 2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-uniphier.h"
+
+static const struct pinctrl_pin_desc uniphier_ld11_pins[] = {
+ UNIPHIER_PINCTRL_PIN(0, "XECS1", 0,
+ 0, UNIPHIER_PIN_DRV_1BIT,
+ 0, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(1, "ERXW", UNIPHIER_PIN_IECTRL_NONE,
+ 1, UNIPHIER_PIN_DRV_1BIT,
+ 1, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(2, "XERWE1", UNIPHIER_PIN_IECTRL_NONE,
+ 2, UNIPHIER_PIN_DRV_1BIT,
+ 2, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(3, "XNFWP", 3,
+ 3, UNIPHIER_PIN_DRV_1BIT,
+ 3, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(4, "XNFCE0", 4,
+ 4, UNIPHIER_PIN_DRV_1BIT,
+ 4, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(5, "NFRYBY0", 5,
+ 5, UNIPHIER_PIN_DRV_1BIT,
+ 5, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(6, "XNFRE", UNIPHIER_PIN_IECTRL_NONE,
+ 6, UNIPHIER_PIN_DRV_1BIT,
+ 6, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(7, "XNFWE", UNIPHIER_PIN_IECTRL_NONE,
+ 7, UNIPHIER_PIN_DRV_1BIT,
+ 7, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(8, "NFALE", UNIPHIER_PIN_IECTRL_NONE,
+ 8, UNIPHIER_PIN_DRV_1BIT,
+ 8, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(9, "NFCLE", UNIPHIER_PIN_IECTRL_NONE,
+ 9, UNIPHIER_PIN_DRV_1BIT,
+ 9, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(10, "NFD0", 10,
+ 10, UNIPHIER_PIN_DRV_1BIT,
+ 10, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(11, "NFD1", 11,
+ 11, UNIPHIER_PIN_DRV_1BIT,
+ 11, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(12, "NFD2", 12,
+ 12, UNIPHIER_PIN_DRV_1BIT,
+ 12, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(13, "NFD3", 13,
+ 13, UNIPHIER_PIN_DRV_1BIT,
+ 13, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(14, "NFD4", 14,
+ 14, UNIPHIER_PIN_DRV_1BIT,
+ 14, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(15, "NFD5", 15,
+ 15, UNIPHIER_PIN_DRV_1BIT,
+ 15, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(16, "NFD6", 16,
+ 16, UNIPHIER_PIN_DRV_1BIT,
+ 16, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(17, "NFD7", 17,
+ 17, UNIPHIER_PIN_DRV_1BIT,
+ 17, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(18, "XERST", 18,
+ 0, UNIPHIER_PIN_DRV_2BIT,
+ 18, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(19, "MMCCLK", 19,
+ 1, UNIPHIER_PIN_DRV_2BIT,
+ 19, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(20, "MMCCMD", 20,
+ 2, UNIPHIER_PIN_DRV_2BIT,
+ 20, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(21, "MMCDS", 21,
+ 3, UNIPHIER_PIN_DRV_2BIT,
+ 21, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(22, "MMCDAT0", 22,
+ 4, UNIPHIER_PIN_DRV_2BIT,
+ 22, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(23, "MMCDAT1", 23,
+ 5, UNIPHIER_PIN_DRV_2BIT,
+ 23, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(24, "MMCDAT2", 24,
+ 6, UNIPHIER_PIN_DRV_2BIT,
+ 24, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(25, "MMCDAT3", 25,
+ 7, UNIPHIER_PIN_DRV_2BIT,
+ 25, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(26, "MMCDAT4", 26,
+ 8, UNIPHIER_PIN_DRV_2BIT,
+ 26, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(27, "MMCDAT5", 27,
+ 9, UNIPHIER_PIN_DRV_2BIT,
+ 27, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(28, "MMCDAT6", 28,
+ 10, UNIPHIER_PIN_DRV_2BIT,
+ 28, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(29, "MMCDAT7", 29,
+ 11, UNIPHIER_PIN_DRV_2BIT,
+ 29, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(46, "USB0VBUS", 46,
+ 46, UNIPHIER_PIN_DRV_1BIT,
+ 46, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(47, "USB0OD", UNIPHIER_PIN_IECTRL_NONE,
+ 47, UNIPHIER_PIN_DRV_1BIT,
+ 47, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(48, "USB1VBUS", 48,
+ 48, UNIPHIER_PIN_DRV_1BIT,
+ 48, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(49, "USB1OD", 49,
+ 49, UNIPHIER_PIN_DRV_1BIT,
+ 49, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(50, "USB2VBUS", 50,
+ 50, UNIPHIER_PIN_DRV_1BIT,
+ 50, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(51, "USB2OD", 51,
+ 51, UNIPHIER_PIN_DRV_1BIT,
+ 51, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(54, "TXD0", 54,
+ 54, UNIPHIER_PIN_DRV_1BIT,
+ 54, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(55, "RXD0", 55,
+ 55, UNIPHIER_PIN_DRV_1BIT,
+ 55, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(56, "SPISYNC0", 56,
+ 56, UNIPHIER_PIN_DRV_1BIT,
+ 56, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(57, "SPISCLK0", 57,
+ 57, UNIPHIER_PIN_DRV_1BIT,
+ 57, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(58, "SPITXD0", 58,
+ 58, UNIPHIER_PIN_DRV_1BIT,
+ 58, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(59, "SPIRXD0", 59,
+ 59, UNIPHIER_PIN_DRV_1BIT,
+ 59, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(60, "AGCI", 60,
+ 60, UNIPHIER_PIN_DRV_1BIT,
+ 60, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(61, "DMDSDA0", 61,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(62, "DMDSCL0", 62,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(63, "SDA0", 63,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(64, "SCL0", 64,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(65, "SDA1", 65,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(66, "SCL1", 66,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(67, "HIN", 67,
+ -1, UNIPHIER_PIN_DRV_FIXED5,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(68, "VIN", 68,
+ -1, UNIPHIER_PIN_DRV_FIXED5,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(69, "PCA00", 69,
+ 69, UNIPHIER_PIN_DRV_1BIT,
+ 69, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(70, "PCA01", 70,
+ 70, UNIPHIER_PIN_DRV_1BIT,
+ 70, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(71, "PCA02", 71,
+ 71, UNIPHIER_PIN_DRV_1BIT,
+ 71, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(72, "PCA03", 72,
+ 72, UNIPHIER_PIN_DRV_1BIT,
+ 72, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(73, "PCA04", 73,
+ 73, UNIPHIER_PIN_DRV_1BIT,
+ 73, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(74, "PCA05", 74,
+ 74, UNIPHIER_PIN_DRV_1BIT,
+ 74, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(75, "PCA06", 75,
+ 75, UNIPHIER_PIN_DRV_1BIT,
+ 75, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(76, "PCA07", 76,
+ 76, UNIPHIER_PIN_DRV_1BIT,
+ 76, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(77, "PCA08", 77,
+ 77, UNIPHIER_PIN_DRV_1BIT,
+ 77, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(78, "PCA09", 78,
+ 78, UNIPHIER_PIN_DRV_1BIT,
+ 78, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(79, "PCA10", 79,
+ 79, UNIPHIER_PIN_DRV_1BIT,
+ 79, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(80, "PCA11", 80,
+ 80, UNIPHIER_PIN_DRV_1BIT,
+ 80, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(81, "PCA12", 81,
+ 81, UNIPHIER_PIN_DRV_1BIT,
+ 81, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(82, "PCA13", 82,
+ 82, UNIPHIER_PIN_DRV_1BIT,
+ 82, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(83, "PCA14", 83,
+ 83, UNIPHIER_PIN_DRV_1BIT,
+ 83, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(84, "PC0READY", 84,
+ 84, UNIPHIER_PIN_DRV_1BIT,
+ 84, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(85, "PC0CD1", 85,
+ 85, UNIPHIER_PIN_DRV_1BIT,
+ 85, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(86, "PC0CD2", 86,
+ 86, UNIPHIER_PIN_DRV_1BIT,
+ 86, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(87, "PC0WAIT", 87,
+ 87, UNIPHIER_PIN_DRV_1BIT,
+ 87, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(88, "PC0RESET", 88,
+ 88, UNIPHIER_PIN_DRV_1BIT,
+ 88, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(89, "PC0CE1", 89,
+ 89, UNIPHIER_PIN_DRV_1BIT,
+ 89, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(90, "PC0WE", 90,
+ 90, UNIPHIER_PIN_DRV_1BIT,
+ 90, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(91, "PC0OE", 91,
+ 91, UNIPHIER_PIN_DRV_1BIT,
+ 91, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(92, "PC0IOWR", 92,
+ 92, UNIPHIER_PIN_DRV_1BIT,
+ 92, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(93, "PC0IORD", 93,
+ 93, UNIPHIER_PIN_DRV_1BIT,
+ 93, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(94, "PCD00", 94,
+ 94, UNIPHIER_PIN_DRV_1BIT,
+ 94, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(95, "PCD01", 95,
+ 95, UNIPHIER_PIN_DRV_1BIT,
+ 95, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(96, "PCD02", 96,
+ 96, UNIPHIER_PIN_DRV_1BIT,
+ 96, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(97, "PCD03", 97,
+ 97, UNIPHIER_PIN_DRV_1BIT,
+ 97, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(98, "PCD04", 98,
+ 98, UNIPHIER_PIN_DRV_1BIT,
+ 98, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(99, "PCD05", 99,
+ 99, UNIPHIER_PIN_DRV_1BIT,
+ 99, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(100, "PCD06", 100,
+ 100, UNIPHIER_PIN_DRV_1BIT,
+ 100, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(101, "PCD07", 101,
+ 101, UNIPHIER_PIN_DRV_1BIT,
+ 101, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(102, "HS0BCLKIN", 102,
+ 102, UNIPHIER_PIN_DRV_1BIT,
+ 102, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(103, "HS0SYNCIN", 103,
+ 103, UNIPHIER_PIN_DRV_1BIT,
+ 103, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(104, "HS0VALIN", 104,
+ 104, UNIPHIER_PIN_DRV_1BIT,
+ 104, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(105, "HS0DIN0", 105,
+ 105, UNIPHIER_PIN_DRV_1BIT,
+ 105, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(106, "HS0DIN1", 106,
+ 106, UNIPHIER_PIN_DRV_1BIT,
+ 106, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(107, "HS0DIN2", 107,
+ 107, UNIPHIER_PIN_DRV_1BIT,
+ 107, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(108, "HS0DIN3", 108,
+ 108, UNIPHIER_PIN_DRV_1BIT,
+ 108, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(109, "HS0DIN4", 109,
+ 109, UNIPHIER_PIN_DRV_1BIT,
+ 109, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(110, "HS0DIN5", 110,
+ 110, UNIPHIER_PIN_DRV_1BIT,
+ 110, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(111, "HS0DIN6", 111,
+ 111, UNIPHIER_PIN_DRV_1BIT,
+ 111, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(112, "HS0DIN7", 112,
+ 112, UNIPHIER_PIN_DRV_1BIT,
+ 112, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(113, "HS0BCLKOUT", 113,
+ 113, UNIPHIER_PIN_DRV_1BIT,
+ 113, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(114, "HS0SYNCOUT", 114,
+ 114, UNIPHIER_PIN_DRV_1BIT,
+ 114, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(115, "HS0VALOUT", 115,
+ 115, UNIPHIER_PIN_DRV_1BIT,
+ 115, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(116, "HS0DOUT0", 116,
+ 116, UNIPHIER_PIN_DRV_1BIT,
+ 116, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(117, "HS0DOUT1", 117,
+ 117, UNIPHIER_PIN_DRV_1BIT,
+ 117, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(118, "HS0DOUT2", 118,
+ 118, UNIPHIER_PIN_DRV_1BIT,
+ 118, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(119, "HS0DOUT3", 119,
+ 119, UNIPHIER_PIN_DRV_1BIT,
+ 119, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(120, "HS0DOUT4", 120,
+ 120, UNIPHIER_PIN_DRV_1BIT,
+ 120, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(121, "HS0DOUT5", 121,
+ 121, UNIPHIER_PIN_DRV_1BIT,
+ 121, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(122, "HS0DOUT6", 122,
+ 122, UNIPHIER_PIN_DRV_1BIT,
+ 122, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(123, "HS0DOUT7", 123,
+ 123, UNIPHIER_PIN_DRV_1BIT,
+ 123, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(124, "HS1BCLKIN", 124,
+ 124, UNIPHIER_PIN_DRV_1BIT,
+ 124, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(125, "HS1SYNCIN", 125,
+ 125, UNIPHIER_PIN_DRV_1BIT,
+ 125, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(126, "HS1VALIN", 126,
+ 126, UNIPHIER_PIN_DRV_1BIT,
+ 126, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(127, "HS1DIN0", 127,
+ 127, UNIPHIER_PIN_DRV_1BIT,
+ 127, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(128, "HS1DIN1", 128,
+ 128, UNIPHIER_PIN_DRV_1BIT,
+ 128, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(129, "HS1DIN2", 129,
+ 129, UNIPHIER_PIN_DRV_1BIT,
+ 129, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(130, "HS1DIN3", 130,
+ 130, UNIPHIER_PIN_DRV_1BIT,
+ 130, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(131, "HS1DIN4", 131,
+ 131, UNIPHIER_PIN_DRV_1BIT,
+ 131, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(132, "HS1DIN5", 132,
+ 132, UNIPHIER_PIN_DRV_1BIT,
+ 132, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(133, "HS1DIN6", 133,
+ 133, UNIPHIER_PIN_DRV_1BIT,
+ 133, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(134, "HS1DIN7", 134,
+ 134, UNIPHIER_PIN_DRV_1BIT,
+ 134, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(135, "AO1IEC", 135,
+ 135, UNIPHIER_PIN_DRV_1BIT,
+ 135, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(136, "AO1ARC", 136,
+ 136, UNIPHIER_PIN_DRV_1BIT,
+ 136, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(137, "AO1DACCK", 137,
+ 137, UNIPHIER_PIN_DRV_1BIT,
+ 137, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(138, "AO1BCK", 138,
+ 138, UNIPHIER_PIN_DRV_1BIT,
+ 138, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(139, "AO1LRCK", 139,
+ 139, UNIPHIER_PIN_DRV_1BIT,
+ 139, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(140, "AO1D0", 140,
+ 140, UNIPHIER_PIN_DRV_1BIT,
+ 140, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(141, "TCON0", 141,
+ 141, UNIPHIER_PIN_DRV_1BIT,
+ 141, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(142, "TCON1", 142,
+ 142, UNIPHIER_PIN_DRV_1BIT,
+ 142, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(143, "TCON2", 143,
+ 143, UNIPHIER_PIN_DRV_1BIT,
+ 143, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(144, "TCON3", 144,
+ 144, UNIPHIER_PIN_DRV_1BIT,
+ 144, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(145, "TCON4", 145,
+ 145, UNIPHIER_PIN_DRV_1BIT,
+ 145, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(146, "TCON5", 146,
+ 146, UNIPHIER_PIN_DRV_1BIT,
+ 146, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(147, "PWMA", 147,
+ 147, UNIPHIER_PIN_DRV_1BIT,
+ 147, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(148, "LR_GOUT", 148,
+ 148, UNIPHIER_PIN_DRV_1BIT,
+ 148, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(149, "XIRQ0", 149,
+ 149, UNIPHIER_PIN_DRV_1BIT,
+ 149, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(150, "XIRQ1", 150,
+ 150, UNIPHIER_PIN_DRV_1BIT,
+ 150, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(151, "XIRQ2", 151,
+ 151, UNIPHIER_PIN_DRV_1BIT,
+ 151, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(152, "XIRQ3", 152,
+ 152, UNIPHIER_PIN_DRV_1BIT,
+ 152, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(153, "XIRQ4", 153,
+ 153, UNIPHIER_PIN_DRV_1BIT,
+ 153, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(154, "XIRQ5", 154,
+ 154, UNIPHIER_PIN_DRV_1BIT,
+ 154, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(155, "XIRQ6", 155,
+ 155, UNIPHIER_PIN_DRV_1BIT,
+ 155, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(156, "XIRQ7", 156,
+ 156, UNIPHIER_PIN_DRV_1BIT,
+ 156, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(157, "XIRQ8", 157,
+ 157, UNIPHIER_PIN_DRV_1BIT,
+ 157, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(158, "AGCBS", 158,
+ 158, UNIPHIER_PIN_DRV_1BIT,
+ 158, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(159, "XIRQ21", 159,
+ 159, UNIPHIER_PIN_DRV_1BIT,
+ 159, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(160, "XIRQ22", 160,
+ 160, UNIPHIER_PIN_DRV_1BIT,
+ 160, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(161, "XIRQ23", 161,
+ 161, UNIPHIER_PIN_DRV_1BIT,
+ 161, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(162, "CH2CLK", 162,
+ 162, UNIPHIER_PIN_DRV_1BIT,
+ 162, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(163, "CH2PSYNC", 163,
+ 163, UNIPHIER_PIN_DRV_1BIT,
+ 163, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(164, "CH2VAL", 164,
+ 164, UNIPHIER_PIN_DRV_1BIT,
+ 164, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(165, "CH2DATA", 165,
+ 165, UNIPHIER_PIN_DRV_1BIT,
+ 165, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(166, "CK25O", 166,
+ 166, UNIPHIER_PIN_DRV_1BIT,
+ 166, UNIPHIER_PIN_PULL_DOWN),
+};
+
+static const unsigned emmc_pins[] = {18, 19, 20, 21, 22, 23, 24, 25};
+static const int emmc_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned emmc_dat8_pins[] = {26, 27, 28, 29};
+static const int emmc_dat8_muxvals[] = {0, 0, 0, 0};
+static const unsigned ether_rmii_pins[] = {6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17};
+static const int ether_rmii_muxvals[] = {4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4};
+static const unsigned i2c0_pins[] = {63, 64};
+static const int i2c0_muxvals[] = {0, 0};
+static const unsigned i2c1_pins[] = {65, 66};
+static const int i2c1_muxvals[] = {0, 0};
+static const unsigned i2c3_pins[] = {67, 68};
+static const int i2c3_muxvals[] = {1, 1};
+static const unsigned i2c4_pins[] = {61, 62};
+static const int i2c4_muxvals[] = {1, 1};
+static const unsigned nand_pins[] = {3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17};
+static const int nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned system_bus_pins[] = {1, 2, 6, 7, 8, 9, 10, 11, 12, 13,
+ 14, 15, 16, 17};
+static const int system_bus_muxvals[] = {0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2};
+static const unsigned system_bus_cs1_pins[] = {0};
+static const int system_bus_cs1_muxvals[] = {0};
+static const unsigned uart0_pins[] = {54, 55};
+static const int uart0_muxvals[] = {0, 0};
+static const unsigned uart1_pins[] = {58, 59};
+static const int uart1_muxvals[] = {1, 1};
+static const unsigned uart2_pins[] = {90, 91};
+static const int uart2_muxvals[] = {1, 1};
+static const unsigned uart3_pins[] = {94, 95};
+static const int uart3_muxvals[] = {1, 1};
+static const unsigned usb0_pins[] = {46, 47};
+static const int usb0_muxvals[] = {0, 0};
+static const unsigned usb1_pins[] = {48, 49};
+static const int usb1_muxvals[] = {0, 0};
+static const unsigned usb2_pins[] = {50, 51};
+static const int usb2_muxvals[] = {0, 0};
+static const unsigned port_range_pins[] = {
+ 159, 160, 161, 162, 163, 164, 165, 166, /* PORT0x */
+ 0, 1, 2, 3, 4, 5, 6, 7, /* PORT1x */
+ 8, 9, 10, 11, 12, 13, 14, 15, /* PORT2x */
+ 16, 17, 18, -1, -1, -1, -1, -1, /* PORT3x */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* PORT4x */
+ -1, -1, -1, 46, 47, 48, 49, 50, /* PORT5x */
+ 51, -1, -1, 54, 55, 56, 57, 58, /* PORT6x */
+ 59, 60, 69, 70, 71, 72, 73, 74, /* PORT7x */
+ 75, 76, 77, 78, 79, 80, 81, 82, /* PORT8x */
+ 83, 84, 85, 86, 87, 88, 89, 90, /* PORT9x */
+ 91, 92, 93, 94, 95, 96, 97, 98, /* PORT10x */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* PORT11x */
+ 99, 100, 101, 102, 103, 104, 105, 106, /* PORT12x */
+ 107, 108, 109, 110, 111, 112, 113, 114, /* PORT13x */
+ 115, 116, 117, 118, 119, 120, 121, 122, /* PORT14x */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* PORT15x */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* PORT16x */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* PORT17x */
+ 61, 62, 63, 64, 65, 66, 67, 68, /* PORT18x */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* PORT19x */
+ 123, 124, 125, 126, 127, 128, 129, 130, /* PORT20x */
+ 131, 132, 133, 134, 135, 136, 137, 138, /* PORT21x */
+ 139, 140, 141, 142, -1, -1, -1, -1, /* PORT22x */
+ 147, 148, 149, 150, 151, 152, 153, 154, /* PORT23x */
+ 155, 156, 157, 143, 144, 145, 146, 158, /* PORT24x */
+};
+static const int port_range_muxvals[] = {
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT0x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT1x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT2x */
+ 15, 15, 15, -1, -1, -1, -1, -1, /* PORT3x */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* PORT4x */
+ -1, -1, -1, 15, 15, 15, 15, 15, /* PORT5x */
+ 15, -1, -1, 15, 15, 15, 15, 15, /* PORT6x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT7x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT8x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT9x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT10x */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* PORT11x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT12x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT13x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT14x */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* PORT15x */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* PORT16x */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* PORT17x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT18x */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* PORT19x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT20x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT21x */
+ 15, 15, 15, 15, -1, -1, -1, -1, /* PORT22x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT23x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT24x */
+};
+static const unsigned xirq_pins[] = {
+ 149, 150, 151, 152, 153, 154, 155, 156, /* XIRQ0-7 */
+ 157, 143, 144, 145, 85, 146, 158, 84, /* XIRQ8-15 */
+ 141, 142, 148, 50, 51, 159, 160, 161, /* XIRQ16-23 */
+};
+static const int xirq_muxvals[] = {
+ 14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ0-7 */
+ 14, 14, 14, 14, 13, 14, 14, 13, /* XIRQ8-15 */
+ 14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ16-23 */
+};
+static const unsigned xirq_alternatives_pins[] = {
+ 94, 95, 96, 97, 98, 99, 100, 101, /* XIRQ0-7 */
+ 102, 103, 104, 105, 106, 107, /* XIRQ8-11,13,14 */
+ 108, 109, 110, 111, 112, 113, 114, 115, /* XIRQ16-23 */
+ 9, 10, 11, 12, 13, 14, 15, 16, /* XIRQ4-11 */
+ 17, 0, 1, 2, 3, 4, 5, 6, 7, 8, /* XIRQ13,14,16-23 */
+ 139, 140, 135, 147, /* XIRQ17,18,21,22 */
+};
+static const int xirq_alternatives_muxvals[] = {
+ 14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ0-7 */
+ 14, 14, 14, 14, 14, 14, /* XIRQ8-11,13,14 */
+ 14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ16-23 */
+ 14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ4-11 */
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ13,14,16-23 */
+ 14, 14, 14, 14, /* XIRQ17,18,21,22 */
+};
+
+static const struct uniphier_pinctrl_group uniphier_ld11_groups[] = {
+ UNIPHIER_PINCTRL_GROUP(emmc),
+ UNIPHIER_PINCTRL_GROUP(emmc_dat8),
+ UNIPHIER_PINCTRL_GROUP(ether_rmii),
+ UNIPHIER_PINCTRL_GROUP(i2c0),
+ UNIPHIER_PINCTRL_GROUP(i2c1),
+ UNIPHIER_PINCTRL_GROUP(i2c3),
+ UNIPHIER_PINCTRL_GROUP(i2c4),
+ UNIPHIER_PINCTRL_GROUP(nand),
+ UNIPHIER_PINCTRL_GROUP(system_bus),
+ UNIPHIER_PINCTRL_GROUP(system_bus_cs1),
+ UNIPHIER_PINCTRL_GROUP(uart0),
+ UNIPHIER_PINCTRL_GROUP(uart1),
+ UNIPHIER_PINCTRL_GROUP(uart2),
+ UNIPHIER_PINCTRL_GROUP(uart3),
+ UNIPHIER_PINCTRL_GROUP(usb0),
+ UNIPHIER_PINCTRL_GROUP(usb1),
+ UNIPHIER_PINCTRL_GROUP(usb2),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq_alternatives),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range, 0),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range, 1),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range, 2),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range, 3),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range, 4),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range, 5),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range, 6),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range, 7),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range, 8),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range, 9),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range, 10),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range, 11),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range, 12),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range, 13),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range, 14),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range, 15),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range, 16),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range, 17),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range, 18),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range, 19),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range, 20),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range, 21),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range, 22),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range, 23),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range, 24),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range, 25),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range, 26),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range, 43),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range, 44),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range, 45),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range, 46),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range, 47),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range, 48),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range, 51),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range, 52),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range, 53),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range, 54),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range, 55),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range, 56),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range, 57),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range, 58),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range, 59),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range, 60),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range, 61),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range, 62),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range, 63),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range, 64),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range, 65),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range, 66),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range, 67),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range, 68),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range, 69),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range, 70),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range, 71),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range, 72),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range, 73),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range, 74),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range, 75),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range, 76),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range, 77),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range, 78),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range, 79),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range, 80),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range, 81),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range, 82),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range, 83),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range, 84),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range, 85),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range, 86),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range, 87),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range, 96),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range, 97),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range, 98),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range, 99),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range, 100),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port125, port_range, 101),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port126, port_range, 102),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port127, port_range, 103),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range, 104),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range, 105),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port132, port_range, 106),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port133, port_range, 107),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port134, port_range, 108),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port135, port_range, 109),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port136, port_range, 110),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port137, port_range, 111),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range, 112),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range, 113),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range, 114),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range, 115),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range, 116),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range, 117),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range, 118),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range, 119),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port180, port_range, 144),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port181, port_range, 145),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port182, port_range, 146),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port183, port_range, 147),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port184, port_range, 148),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port185, port_range, 149),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port186, port_range, 150),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port187, port_range, 151),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port200, port_range, 160),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port201, port_range, 161),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port202, port_range, 162),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port203, port_range, 163),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port204, port_range, 164),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port205, port_range, 165),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port206, port_range, 166),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port207, port_range, 167),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port210, port_range, 168),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port211, port_range, 169),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port212, port_range, 170),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port213, port_range, 171),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port214, port_range, 172),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port215, port_range, 173),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port216, port_range, 174),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port217, port_range, 175),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port220, port_range, 176),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port221, port_range, 177),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port222, port_range, 178),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port223, port_range, 179),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port230, port_range, 184),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port231, port_range, 185),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port232, port_range, 186),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port233, port_range, 187),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port234, port_range, 188),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port235, port_range, 189),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port236, port_range, 190),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port237, port_range, 191),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port240, port_range, 192),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port241, port_range, 193),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port242, port_range, 194),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port243, port_range, 195),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port244, port_range, 196),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port245, port_range, 197),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port246, port_range, 198),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port247, port_range, 199),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq0, xirq, 0),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq1, xirq, 1),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq2, xirq, 2),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq3, xirq, 3),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq4, xirq, 4),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq5, xirq, 5),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq6, xirq, 6),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq7, xirq, 7),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq8, xirq, 8),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq9, xirq, 9),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq10, xirq, 10),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq11, xirq, 11),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq12, xirq, 12),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq13, xirq, 13),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq14, xirq, 14),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq15, xirq, 15),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq16, xirq, 16),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq17, xirq, 17),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq18, xirq, 18),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq19, xirq, 19),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq20, xirq, 20),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq21, xirq, 21),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq22, xirq, 22),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq23, xirq, 23),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq0b, xirq_alternatives, 0),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq1b, xirq_alternatives, 1),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq2b, xirq_alternatives, 2),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq3b, xirq_alternatives, 3),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq4b, xirq_alternatives, 4),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq5b, xirq_alternatives, 5),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq6b, xirq_alternatives, 6),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq7b, xirq_alternatives, 7),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq8b, xirq_alternatives, 8),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq9b, xirq_alternatives, 9),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq10b, xirq_alternatives, 10),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq11b, xirq_alternatives, 11),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq13b, xirq_alternatives, 12),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq14b, xirq_alternatives, 13),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq16b, xirq_alternatives, 14),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq17b, xirq_alternatives, 15),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq18b, xirq_alternatives, 16),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq19b, xirq_alternatives, 17),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq20b, xirq_alternatives, 18),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq21b, xirq_alternatives, 19),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq22b, xirq_alternatives, 20),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq23b, xirq_alternatives, 21),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq4c, xirq_alternatives, 22),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq5c, xirq_alternatives, 23),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq6c, xirq_alternatives, 24),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq7c, xirq_alternatives, 25),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq8c, xirq_alternatives, 26),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq9c, xirq_alternatives, 27),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq10c, xirq_alternatives, 28),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq11c, xirq_alternatives, 29),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq13c, xirq_alternatives, 30),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq14c, xirq_alternatives, 31),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq16c, xirq_alternatives, 32),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq17c, xirq_alternatives, 33),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq18c, xirq_alternatives, 34),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq19c, xirq_alternatives, 35),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq20c, xirq_alternatives, 36),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq21c, xirq_alternatives, 37),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq22c, xirq_alternatives, 38),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq23c, xirq_alternatives, 39),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq17d, xirq_alternatives, 40),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq18d, xirq_alternatives, 41),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq21d, xirq_alternatives, 42),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq22d, xirq_alternatives, 43),
+};
+
+static const char * const emmc_groups[] = {"emmc", "emmc_dat8"};
+static const char * const ether_rmii_groups[] = {"ether_rmii"};
+static const char * const i2c0_groups[] = {"i2c0"};
+static const char * const i2c1_groups[] = {"i2c1"};
+static const char * const i2c3_groups[] = {"i2c3"};
+static const char * const i2c4_groups[] = {"i2c4"};
+static const char * const nand_groups[] = {"nand"};
+static const char * const system_bus_groups[] = {"system_bus",
+ "system_bus_cs1"};
+static const char * const uart0_groups[] = {"uart0"};
+static const char * const uart1_groups[] = {"uart1"};
+static const char * const uart2_groups[] = {"uart2"};
+static const char * const uart3_groups[] = {"uart3"};
+static const char * const usb0_groups[] = {"usb0"};
+static const char * const usb1_groups[] = {"usb1"};
+static const char * const usb2_groups[] = {"usb2"};
+static const char * const port_groups[] = {
+ "port00", "port01", "port02", "port03",
+ "port04", "port05", "port06", "port07",
+ "port10", "port11", "port12", "port13",
+ "port14", "port15", "port16", "port17",
+ "port20", "port21", "port22", "port23",
+ "port24", "port25", "port26", "port27",
+ "port30", "port31", "port32",
+ /* port33-52 missing */ "port53",
+ "port54", "port55", "port56", "port57",
+ "port60", /* port61-62 missing*/ "port63",
+ "port64", "port65", "port66", "port67",
+ "port70", "port71", "port72", "port73",
+ "port74", "port75", "port76", "port77",
+ "port80", "port81", "port82", "port83",
+ "port84", "port85", "port86", "port87",
+ "port90", "port91", "port92", "port93",
+ "port94", "port95", "port96", "port97",
+ "port100", "port101", "port102", "port103",
+ "port104", "port105", "port106", "port107",
+ /* port110-117 missing */
+ "port120", "port121", "port122", "port123",
+ "port124", "port125", "port126", "port127",
+ "port130", "port131", "port132", "port133",
+ "port134", "port135", "port136", "port137",
+ "port140", "port141", "port142", "port143",
+ "port144", "port145", "port146", "port147",
+ /* port150-177 missing */
+ "port180", "port181", "port182", "port183",
+ "port184", "port185", "port186", "port187",
+ /* port190-197 missing */
+ "port200", "port201", "port202", "port203",
+ "port204", "port205", "port206", "port207",
+ "port210", "port211", "port212", "port213",
+ "port214", "port215", "port216", "port217",
+ "port220", "port221", "port222", "port223",
+ /* port224-227 missing */
+ "port230", "port231", "port232", "port233",
+ "port234", "port235", "port236", "port237",
+ "port240", "port241", "port242", "port243",
+ "port244", "port245", "port246", "port247",
+};
+static const char * const xirq_groups[] = {
+ "xirq0", "xirq1", "xirq2", "xirq3",
+ "xirq4", "xirq5", "xirq6", "xirq7",
+ "xirq8", "xirq9", "xirq10", "xirq11",
+ "xirq12", "xirq13", "xirq14", "xirq15",
+ "xirq16", "xirq17", "xirq18", "xirq19",
+ "xirq20", "xirq21", "xirq22", "xirq23",
+ "xirq0b", "xirq1b", "xirq2b", "xirq3b",
+ "xirq4b", "xirq5b", "xirq6b", "xirq7b",
+ "xirq8b", "xirq9b", "xirq10b", "xirq11b",
+ /* none */ "xirq13b", "xirq14b", /* none */
+ "xirq16b", "xirq17b", "xirq18b", "xirq19b",
+ "xirq20b", "xirq21b", "xirq22b", "xirq23b",
+ "xirq4c", "xirq5c", "xirq6c", "xirq7c",
+ "xirq8c", "xirq9c", "xirq10c", "xirq11c",
+ /* none */ "xirq13c", "xirq14c", /* none */
+ "xirq16c", "xirq17c", "xirq18c", "xirq19c",
+ "xirq20c", "xirq21c", "xirq22c", "xirq23c",
+ "xirq17d", "xirq18d", "xirq21d", "xirq22d",
+};
+
+static const struct uniphier_pinmux_function uniphier_ld11_functions[] = {
+ UNIPHIER_PINMUX_FUNCTION(emmc),
+ UNIPHIER_PINMUX_FUNCTION(ether_rmii),
+ UNIPHIER_PINMUX_FUNCTION(i2c0),
+ UNIPHIER_PINMUX_FUNCTION(i2c1),
+ UNIPHIER_PINMUX_FUNCTION(i2c3),
+ UNIPHIER_PINMUX_FUNCTION(i2c4),
+ UNIPHIER_PINMUX_FUNCTION(nand),
+ UNIPHIER_PINMUX_FUNCTION(system_bus),
+ UNIPHIER_PINMUX_FUNCTION(uart0),
+ UNIPHIER_PINMUX_FUNCTION(uart1),
+ UNIPHIER_PINMUX_FUNCTION(uart2),
+ UNIPHIER_PINMUX_FUNCTION(uart3),
+ UNIPHIER_PINMUX_FUNCTION(usb0),
+ UNIPHIER_PINMUX_FUNCTION(usb1),
+ UNIPHIER_PINMUX_FUNCTION(usb2),
+ UNIPHIER_PINMUX_FUNCTION(port),
+ UNIPHIER_PINMUX_FUNCTION(xirq),
+};
+
+static struct uniphier_pinctrl_socdata uniphier_ld11_pindata = {
+ .pins = uniphier_ld11_pins,
+ .npins = ARRAY_SIZE(uniphier_ld11_pins),
+ .groups = uniphier_ld11_groups,
+ .groups_count = ARRAY_SIZE(uniphier_ld11_groups),
+ .functions = uniphier_ld11_functions,
+ .functions_count = ARRAY_SIZE(uniphier_ld11_functions),
+ .caps = UNIPHIER_PINCTRL_CAPS_PERPIN_IECTRL,
+};
+
+static int uniphier_ld11_pinctrl_probe(struct platform_device *pdev)
+{
+ return uniphier_pinctrl_probe(pdev, &uniphier_ld11_pindata);
+}
+
+static const struct of_device_id uniphier_ld11_pinctrl_match[] = {
+ { .compatible = "socionext,uniphier-ld11-pinctrl" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, uniphier_ld11_pinctrl_match);
+
+static struct platform_driver uniphier_ld11_pinctrl_driver = {
+ .probe = uniphier_ld11_pinctrl_probe,
+ .driver = {
+ .name = "uniphier-ld11-pinctrl",
+ .of_match_table = uniphier_ld11_pinctrl_match,
+ },
+};
+module_platform_driver(uniphier_ld11_pinctrl_driver);
+
+MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
+MODULE_DESCRIPTION("UniPhier PH1-LD11 pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
new file mode 100644
index 000000000..aa8bd9794
--- /dev/null
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
@@ -0,0 +1,1050 @@
+/*
+ * Copyright (C) 2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-uniphier.h"
+
+static const struct pinctrl_pin_desc uniphier_ld20_pins[] = {
+ UNIPHIER_PINCTRL_PIN(0, "XECS1", 0,
+ 0, UNIPHIER_PIN_DRV_3BIT,
+ 0, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(1, "ERXW", 1,
+ 1, UNIPHIER_PIN_DRV_3BIT,
+ 1, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(2, "XERWE1", 2,
+ 2, UNIPHIER_PIN_DRV_3BIT,
+ 2, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(3, "XNFWP", 3,
+ 3, UNIPHIER_PIN_DRV_3BIT,
+ 3, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(4, "XNFCE0", 4,
+ 4, UNIPHIER_PIN_DRV_3BIT,
+ 4, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(5, "NFRYBY0", 5,
+ 5, UNIPHIER_PIN_DRV_3BIT,
+ 5, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(6, "XNFRE", 6,
+ 6, UNIPHIER_PIN_DRV_3BIT,
+ 6, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(7, "XNFWE", 7,
+ 7, UNIPHIER_PIN_DRV_3BIT,
+ 7, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(8, "NFALE", 8,
+ 8, UNIPHIER_PIN_DRV_3BIT,
+ 8, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(9, "NFCLE", 9,
+ 9, UNIPHIER_PIN_DRV_3BIT,
+ 9, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(10, "NFD0", 10,
+ 10, UNIPHIER_PIN_DRV_3BIT,
+ 10, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(11, "NFD1", 11,
+ 11, UNIPHIER_PIN_DRV_3BIT,
+ 11, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(12, "NFD2", 12,
+ 12, UNIPHIER_PIN_DRV_3BIT,
+ 12, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(13, "NFD3", 13,
+ 13, UNIPHIER_PIN_DRV_3BIT,
+ 13, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(14, "NFD4", 14,
+ 14, UNIPHIER_PIN_DRV_3BIT,
+ 14, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(15, "NFD5", 15,
+ 15, UNIPHIER_PIN_DRV_3BIT,
+ 15, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(16, "NFD6", 16,
+ 16, UNIPHIER_PIN_DRV_3BIT,
+ 16, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(17, "NFD7", 17,
+ 17, UNIPHIER_PIN_DRV_3BIT,
+ 17, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(18, "XERST", 18,
+ 0, UNIPHIER_PIN_DRV_2BIT,
+ 18, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(19, "MMCCLK", 19,
+ 1, UNIPHIER_PIN_DRV_2BIT,
+ 19, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(20, "MMCCMD", 20,
+ 2, UNIPHIER_PIN_DRV_2BIT,
+ 20, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(21, "MMCDS", 21,
+ 3, UNIPHIER_PIN_DRV_2BIT,
+ 21, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(22, "MMCDAT0", 22,
+ 4, UNIPHIER_PIN_DRV_2BIT,
+ 22, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(23, "MMCDAT1", 23,
+ 5, UNIPHIER_PIN_DRV_2BIT,
+ 23, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(24, "MMCDAT2", 24,
+ 6, UNIPHIER_PIN_DRV_2BIT,
+ 24, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(25, "MMCDAT3", 25,
+ 7, UNIPHIER_PIN_DRV_2BIT,
+ 25, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(26, "MMCDAT4", 26,
+ 8, UNIPHIER_PIN_DRV_2BIT,
+ 26, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(27, "MMCDAT5", 27,
+ 9, UNIPHIER_PIN_DRV_2BIT,
+ 27, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(28, "MMCDAT6", 28,
+ 10, UNIPHIER_PIN_DRV_2BIT,
+ 28, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(29, "MMCDAT7", 29,
+ 11, UNIPHIER_PIN_DRV_2BIT,
+ 29, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(30, "MDC", 30,
+ 18, UNIPHIER_PIN_DRV_3BIT,
+ 30, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(31, "MDIO", 31,
+ 19, UNIPHIER_PIN_DRV_3BIT,
+ 31, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(32, "MDIO_INTL", 32,
+ 20, UNIPHIER_PIN_DRV_3BIT,
+ 32, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(33, "PHYRSTL", 33,
+ 21, UNIPHIER_PIN_DRV_3BIT,
+ 33, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(34, "RGMII_RXCLK", 34,
+ 22, UNIPHIER_PIN_DRV_3BIT,
+ 34, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(35, "RGMII_RXD0", 35,
+ 23, UNIPHIER_PIN_DRV_3BIT,
+ 35, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(36, "RGMII_RXD1", 36,
+ 24, UNIPHIER_PIN_DRV_3BIT,
+ 36, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(37, "RGMII_RXD2", 37,
+ 25, UNIPHIER_PIN_DRV_3BIT,
+ 37, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(38, "RGMII_RXD3", 38,
+ 26, UNIPHIER_PIN_DRV_3BIT,
+ 38, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(39, "RGMII_RXCTL", 39,
+ 27, UNIPHIER_PIN_DRV_3BIT,
+ 39, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(40, "RGMII_TXCLK", 40,
+ 28, UNIPHIER_PIN_DRV_3BIT,
+ 40, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(41, "RGMII_TXD0", 41,
+ 29, UNIPHIER_PIN_DRV_3BIT,
+ 41, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(42, "RGMII_TXD1", 42,
+ 30, UNIPHIER_PIN_DRV_3BIT,
+ 42, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(43, "RGMII_TXD2", 43,
+ 31, UNIPHIER_PIN_DRV_3BIT,
+ 43, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(44, "RGMII_TXD3", 44,
+ 32, UNIPHIER_PIN_DRV_3BIT,
+ 44, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(45, "RGMII_TXCTL", 45,
+ 33, UNIPHIER_PIN_DRV_3BIT,
+ 45, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(46, "USB0VBUS", 46,
+ 34, UNIPHIER_PIN_DRV_3BIT,
+ 46, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(47, "USB0OD", 47,
+ 35, UNIPHIER_PIN_DRV_3BIT,
+ 47, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(48, "USB1VBUS", 48,
+ 36, UNIPHIER_PIN_DRV_3BIT,
+ 48, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(49, "USB1OD", 49,
+ 37, UNIPHIER_PIN_DRV_3BIT,
+ 49, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(50, "USB2VBUS", 50,
+ 38, UNIPHIER_PIN_DRV_3BIT,
+ 50, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(51, "USB2OD", 51,
+ 39, UNIPHIER_PIN_DRV_3BIT,
+ 51, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(52, "USB3VBUS", 52,
+ 40, UNIPHIER_PIN_DRV_3BIT,
+ 52, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(53, "USB3OD", 53,
+ 41, UNIPHIER_PIN_DRV_3BIT,
+ 53, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(54, "TXD0", 54,
+ 42, UNIPHIER_PIN_DRV_3BIT,
+ 54, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(55, "RXD0", 55,
+ 43, UNIPHIER_PIN_DRV_3BIT,
+ 55, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(56, "SPISYNC0", 56,
+ 44, UNIPHIER_PIN_DRV_3BIT,
+ 56, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(57, "SPISCLK0", 57,
+ 45, UNIPHIER_PIN_DRV_3BIT,
+ 57, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(58, "SPITXD0", 58,
+ 46, UNIPHIER_PIN_DRV_3BIT,
+ 58, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(59, "SPIRXD0", 59,
+ 47, UNIPHIER_PIN_DRV_3BIT,
+ 59, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(60, "AGCI", 60,
+ 48, UNIPHIER_PIN_DRV_3BIT,
+ 60, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(61, "DMDSDA0", 61,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(62, "DMDSCL0", 62,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(63, "SDA0", 63,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(64, "SCL0", 64,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(65, "SDA1", 65,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(66, "SCL1", 66,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(67, "HIN", 67,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(68, "VIN", 68,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
+ -1, UNIPHIER_PIN_PULL_NONE),
+ UNIPHIER_PINCTRL_PIN(69, "PCA00", 69,
+ 49, UNIPHIER_PIN_DRV_3BIT,
+ 69, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(70, "PCA01", 70,
+ 50, UNIPHIER_PIN_DRV_3BIT,
+ 70, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(71, "PCA02", 71,
+ 51, UNIPHIER_PIN_DRV_3BIT,
+ 71, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(72, "PCA03", 72,
+ 52, UNIPHIER_PIN_DRV_3BIT,
+ 72, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(73, "PCA04", 73,
+ 53, UNIPHIER_PIN_DRV_3BIT,
+ 73, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(74, "PCA05", 74,
+ 54, UNIPHIER_PIN_DRV_3BIT,
+ 74, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(75, "PCA06", 75,
+ 55, UNIPHIER_PIN_DRV_3BIT,
+ 75, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(76, "PCA07", 76,
+ 56, UNIPHIER_PIN_DRV_3BIT,
+ 76, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(77, "PCA08", 77,
+ 57, UNIPHIER_PIN_DRV_3BIT,
+ 77, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(78, "PCA09", 78,
+ 58, UNIPHIER_PIN_DRV_3BIT,
+ 78, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(79, "PCA10", 79,
+ 59, UNIPHIER_PIN_DRV_3BIT,
+ 79, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(80, "PCA11", 80,
+ 60, UNIPHIER_PIN_DRV_3BIT,
+ 80, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(81, "PCA12", 81,
+ 61, UNIPHIER_PIN_DRV_3BIT,
+ 81, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(82, "PCA13", 82,
+ 62, UNIPHIER_PIN_DRV_3BIT,
+ 82, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(83, "PCA14", 83,
+ 63, UNIPHIER_PIN_DRV_3BIT,
+ 83, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(84, "PC0READY", 84,
+ 0, UNIPHIER_PIN_DRV_1BIT,
+ 84, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(85, "PC0CD1", 85,
+ 1, UNIPHIER_PIN_DRV_1BIT,
+ 85, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(86, "PC0CD2", 86,
+ 2, UNIPHIER_PIN_DRV_1BIT,
+ 86, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(87, "PC0WAIT", 87,
+ 3, UNIPHIER_PIN_DRV_1BIT,
+ 87, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(88, "PC0RESET", 88,
+ 4, UNIPHIER_PIN_DRV_1BIT,
+ 88, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(89, "PC0CE1", 89,
+ 5, UNIPHIER_PIN_DRV_1BIT,
+ 89, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(90, "PC0WE", 90,
+ 6, UNIPHIER_PIN_DRV_1BIT,
+ 90, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(91, "PC0OE", 91,
+ 7, UNIPHIER_PIN_DRV_1BIT,
+ 91, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(92, "PC0IOWR", 92,
+ 8, UNIPHIER_PIN_DRV_1BIT,
+ 92, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(93, "PC0IORD", 93,
+ 9, UNIPHIER_PIN_DRV_1BIT,
+ 93, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(94, "PCD00", 94,
+ 10, UNIPHIER_PIN_DRV_1BIT,
+ 94, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(95, "PCD01", 95,
+ 11, UNIPHIER_PIN_DRV_1BIT,
+ 95, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(96, "PCD02", 96,
+ 12, UNIPHIER_PIN_DRV_1BIT,
+ 96, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(97, "PCD03", 97,
+ 13, UNIPHIER_PIN_DRV_1BIT,
+ 97, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(98, "PCD04", 98,
+ 14, UNIPHIER_PIN_DRV_1BIT,
+ 98, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(99, "PCD05", 99,
+ 15, UNIPHIER_PIN_DRV_1BIT,
+ 99, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(100, "PCD06", 100,
+ 16, UNIPHIER_PIN_DRV_1BIT,
+ 100, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(101, "PCD07", 101,
+ 17, UNIPHIER_PIN_DRV_1BIT,
+ 101, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(102, "HS0BCLKIN", 102,
+ 18, UNIPHIER_PIN_DRV_1BIT,
+ 102, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(103, "HS0SYNCIN", 103,
+ 19, UNIPHIER_PIN_DRV_1BIT,
+ 103, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(104, "HS0VALIN", 104,
+ 20, UNIPHIER_PIN_DRV_1BIT,
+ 104, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(105, "HS0DIN0", 105,
+ 21, UNIPHIER_PIN_DRV_1BIT,
+ 105, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(106, "HS0DIN1", 106,
+ 22, UNIPHIER_PIN_DRV_1BIT,
+ 106, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(107, "HS0DIN2", 107,
+ 23, UNIPHIER_PIN_DRV_1BIT,
+ 107, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(108, "HS0DIN3", 108,
+ 24, UNIPHIER_PIN_DRV_1BIT,
+ 108, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(109, "HS0DIN4", 109,
+ 25, UNIPHIER_PIN_DRV_1BIT,
+ 109, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(110, "HS0DIN5", 110,
+ 26, UNIPHIER_PIN_DRV_1BIT,
+ 110, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(111, "HS0DIN6", 111,
+ 27, UNIPHIER_PIN_DRV_1BIT,
+ 111, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(112, "HS0DIN7", 112,
+ 28, UNIPHIER_PIN_DRV_1BIT,
+ 112, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(113, "HS0BCLKOUT", 113,
+ 64, UNIPHIER_PIN_DRV_3BIT,
+ 113, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(114, "HS0SYNCOUT", 114,
+ 65, UNIPHIER_PIN_DRV_3BIT,
+ 114, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(115, "HS0VALOUT", 115,
+ 66, UNIPHIER_PIN_DRV_3BIT,
+ 115, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(116, "HS0DOUT0", 116,
+ 67, UNIPHIER_PIN_DRV_3BIT,
+ 116, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(117, "HS0DOUT1", 117,
+ 68, UNIPHIER_PIN_DRV_3BIT,
+ 117, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(118, "HS0DOUT2", 118,
+ 69, UNIPHIER_PIN_DRV_3BIT,
+ 118, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(119, "HS0DOUT3", 119,
+ 70, UNIPHIER_PIN_DRV_3BIT,
+ 119, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(120, "HS0DOUT4", 120,
+ 71, UNIPHIER_PIN_DRV_3BIT,
+ 120, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(121, "HS0DOUT5", 121,
+ 72, UNIPHIER_PIN_DRV_3BIT,
+ 121, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(122, "HS0DOUT6", 122,
+ 73, UNIPHIER_PIN_DRV_3BIT,
+ 122, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(123, "HS0DOUT7", 123,
+ 74, UNIPHIER_PIN_DRV_3BIT,
+ 123, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(124, "HS1BCLKIN", 124,
+ 75, UNIPHIER_PIN_DRV_3BIT,
+ 124, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(125, "HS1SYNCIN", 125,
+ 76, UNIPHIER_PIN_DRV_3BIT,
+ 125, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(126, "HS1VALIN", 126,
+ 77, UNIPHIER_PIN_DRV_3BIT,
+ 126, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(127, "HS1DIN0", 127,
+ 78, UNIPHIER_PIN_DRV_3BIT,
+ 127, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(128, "HS1DIN1", 128,
+ 79, UNIPHIER_PIN_DRV_3BIT,
+ 128, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(129, "HS1DIN2", 129,
+ 80, UNIPHIER_PIN_DRV_3BIT,
+ 129, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(130, "HS1DIN3", 130,
+ 81, UNIPHIER_PIN_DRV_3BIT,
+ 130, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(131, "HS1DIN4", 131,
+ 82, UNIPHIER_PIN_DRV_3BIT,
+ 131, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(132, "HS1DIN5", 132,
+ 83, UNIPHIER_PIN_DRV_3BIT,
+ 132, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(133, "HS1DIN6", 133,
+ 84, UNIPHIER_PIN_DRV_3BIT,
+ 133, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(134, "HS1DIN7", 134,
+ 85, UNIPHIER_PIN_DRV_3BIT,
+ 134, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(135, "AO1IEC", 135,
+ 86, UNIPHIER_PIN_DRV_3BIT,
+ 135, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(136, "AO1ARC", 136,
+ 87, UNIPHIER_PIN_DRV_3BIT,
+ 136, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(137, "AO1DACCK", 137,
+ 88, UNIPHIER_PIN_DRV_3BIT,
+ 137, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(138, "AO1BCK", 138,
+ 89, UNIPHIER_PIN_DRV_3BIT,
+ 138, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(139, "AO1LRCK", 139,
+ 90, UNIPHIER_PIN_DRV_3BIT,
+ 139, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(140, "AO1D0", 140,
+ 91, UNIPHIER_PIN_DRV_3BIT,
+ 140, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(141, "AO1D1", 141,
+ 92, UNIPHIER_PIN_DRV_3BIT,
+ 141, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(142, "AO1D2", 142,
+ 93, UNIPHIER_PIN_DRV_3BIT,
+ 142, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(143, "HTPDN0", 143,
+ 94, UNIPHIER_PIN_DRV_3BIT,
+ 143, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(144, "LOCKN0", 144,
+ 95, UNIPHIER_PIN_DRV_3BIT,
+ 144, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(145, "HTPDN1", 145,
+ 96, UNIPHIER_PIN_DRV_3BIT,
+ 145, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(146, "LOCKN1", 146,
+ 97, UNIPHIER_PIN_DRV_3BIT,
+ 146, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(147, "PWMA", 147,
+ 98, UNIPHIER_PIN_DRV_3BIT,
+ 147, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(148, "LR_GOUT", 148,
+ 99, UNIPHIER_PIN_DRV_3BIT,
+ 148, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(149, "XIRQ0", 149,
+ 100, UNIPHIER_PIN_DRV_3BIT,
+ 149, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(150, "XIRQ1", 150,
+ 101, UNIPHIER_PIN_DRV_3BIT,
+ 150, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(151, "XIRQ2", 151,
+ 102, UNIPHIER_PIN_DRV_3BIT,
+ 151, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(152, "XIRQ3", 152,
+ 103, UNIPHIER_PIN_DRV_3BIT,
+ 152, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(153, "XIRQ4", 153,
+ 104, UNIPHIER_PIN_DRV_3BIT,
+ 153, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(154, "XIRQ5", 154,
+ 105, UNIPHIER_PIN_DRV_3BIT,
+ 154, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(155, "XIRQ6", 155,
+ 106, UNIPHIER_PIN_DRV_3BIT,
+ 155, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(156, "XIRQ7", 156,
+ 107, UNIPHIER_PIN_DRV_3BIT,
+ 156, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(157, "XIRQ8", 157,
+ 108, UNIPHIER_PIN_DRV_3BIT,
+ 157, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(158, "XIRQ9", 158,
+ 109, UNIPHIER_PIN_DRV_3BIT,
+ 158, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(159, "XIRQ10", 159,
+ 110, UNIPHIER_PIN_DRV_3BIT,
+ 159, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(160, "XIRQ11", 160,
+ 111, UNIPHIER_PIN_DRV_3BIT,
+ 160, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(161, "XIRQ13", 161,
+ 112, UNIPHIER_PIN_DRV_3BIT,
+ 161, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(162, "XIRQ14", 162,
+ 113, UNIPHIER_PIN_DRV_3BIT,
+ 162, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(163, "XIRQ16", 163,
+ 114, UNIPHIER_PIN_DRV_3BIT,
+ 163, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(164, "XIRQ17", 164,
+ 115, UNIPHIER_PIN_DRV_3BIT,
+ 164, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(165, "XIRQ18", 165,
+ 116, UNIPHIER_PIN_DRV_3BIT,
+ 165, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(166, "XIRQ19", 166,
+ 117, UNIPHIER_PIN_DRV_3BIT,
+ 166, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(167, "XIRQ20", 167,
+ 118, UNIPHIER_PIN_DRV_3BIT,
+ 167, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(168, "PORT00", 168,
+ 119, UNIPHIER_PIN_DRV_3BIT,
+ 168, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(169, "PORT01", 169,
+ 120, UNIPHIER_PIN_DRV_3BIT,
+ 169, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(170, "PORT02", 170,
+ 121, UNIPHIER_PIN_DRV_3BIT,
+ 170, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(171, "PORT03", 171,
+ 122, UNIPHIER_PIN_DRV_3BIT,
+ 171, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(172, "PORT04", 172,
+ 123, UNIPHIER_PIN_DRV_3BIT,
+ 172, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(173, "CK27FO", 173,
+ 124, UNIPHIER_PIN_DRV_3BIT,
+ 173, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(174, "PHSYNCO", 174,
+ 125, UNIPHIER_PIN_DRV_3BIT,
+ 174, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(175, "PVSYNCO", 175,
+ 126, UNIPHIER_PIN_DRV_3BIT,
+ 175, UNIPHIER_PIN_PULL_DOWN),
+};
+
+static const unsigned emmc_pins[] = {18, 19, 20, 21, 22, 23, 24, 25};
+static const int emmc_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned emmc_dat8_pins[] = {26, 27, 28, 29};
+static const int emmc_dat8_muxvals[] = {0, 0, 0, 0};
+static const unsigned ether_rgmii_pins[] = {30, 31, 32, 33, 34, 35, 36, 37, 38,
+ 39, 40, 41, 42, 43, 44, 45};
+static const int ether_rgmii_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0};
+static const unsigned ether_rmii_pins[] = {30, 31, 32, 33, 34, 35, 36, 37, 39,
+ 41, 42, 45};
+static const int ether_rmii_muxvals[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
+static const unsigned i2c0_pins[] = {63, 64};
+static const int i2c0_muxvals[] = {0, 0};
+static const unsigned i2c1_pins[] = {65, 66};
+static const int i2c1_muxvals[] = {0, 0};
+static const unsigned i2c3_pins[] = {67, 68};
+static const int i2c3_muxvals[] = {1, 1};
+static const unsigned i2c4_pins[] = {61, 62};
+static const int i2c4_muxvals[] = {1, 1};
+static const unsigned nand_pins[] = {3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17};
+static const int nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned sd_pins[] = {10, 11, 12, 13, 14, 15, 16, 17};
+static const int sd_muxvals[] = {3, 3, 3, 3, 3, 3, 3, 3}; /* No SDVOLC */
+static const unsigned system_bus_pins[] = {1, 2, 6, 7, 8, 9, 10, 11, 12, 13,
+ 14, 15, 16, 17};
+static const int system_bus_muxvals[] = {0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2};
+static const unsigned system_bus_cs1_pins[] = {0};
+static const int system_bus_cs1_muxvals[] = {0};
+static const unsigned uart0_pins[] = {54, 55};
+static const int uart0_muxvals[] = {0, 0};
+static const unsigned uart1_pins[] = {58, 59};
+static const int uart1_muxvals[] = {1, 1};
+static const unsigned uart2_pins[] = {90, 91};
+static const int uart2_muxvals[] = {1, 1};
+static const unsigned uart3_pins[] = {94, 95};
+static const int uart3_muxvals[] = {1, 1};
+static const unsigned usb0_pins[] = {46, 47};
+static const int usb0_muxvals[] = {0, 0};
+static const unsigned usb1_pins[] = {48, 49};
+static const int usb1_muxvals[] = {0, 0};
+static const unsigned usb2_pins[] = {50, 51};
+static const int usb2_muxvals[] = {0, 0};
+static const unsigned usb3_pins[] = {52, 53};
+static const int usb3_muxvals[] = {0, 0};
+static const unsigned port_range_pins[] = {
+ 168, 169, 170, 171, 172, 173, 174, 175, /* PORT0x */
+ 0, 1, 2, 3, 4, 5, 6, 7, /* PORT1x */
+ 8, 9, 10, 11, 12, 13, 14, 15, /* PORT2x */
+ 16, 17, 18, 30, 31, 32, 33, 34, /* PORT3x */
+ 35, 36, 37, 38, 39, 40, 41, 42, /* PORT4x */
+ 43, 44, 45, 46, 47, 48, 49, 50, /* PORT5x */
+ 51, 52, 53, 54, 55, 56, 57, 58, /* PORT6x */
+ 59, 60, 69, 70, 71, 72, 73, 74, /* PORT7x */
+ 75, 76, 77, 78, 79, 80, 81, 82, /* PORT8x */
+ 83, 84, 85, 86, 87, 88, 89, 90, /* PORT9x */
+ 91, 92, 93, 94, 95, 96, 97, 98, /* PORT10x */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* PORT11x */
+ 99, 100, 101, 102, 103, 104, 105, 106, /* PORT12x */
+ 107, 108, 109, 110, 111, 112, 113, 114, /* PORT13x */
+ 115, 116, 117, 118, 119, 120, 121, 122, /* PORT14x */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* PORT15x */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* PORT16x */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* PORT17x */
+ 61, 62, 63, 64, 65, 66, 67, 68, /* PORT18x */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* PORT19x */
+ 123, 124, 125, 126, 127, 128, 129, 130, /* PORT20x */
+ 131, 132, 133, 134, 135, 136, 137, 138, /* PORT21x */
+ 139, 140, 141, 142, 143, 144, 145, 146, /* PORT22x */
+ 147, 148, 149, 150, 151, 152, 153, 154, /* PORT23x */
+ 155, 156, 157, 158, 159, 160, 161, 162, /* PORT24x */
+ 163, 164, 165, 166, 167, /* PORT25x */
+};
+static const int port_range_muxvals[] = {
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT0x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT1x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT2x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT3x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT4x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT5x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT6x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT7x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT8x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT9x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT10x */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* PORT11x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT12x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT13x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT14x */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* PORT15x */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* PORT16x */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* PORT17x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT18x */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* PORT19x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT20x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT21x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT22x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT23x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT24x */
+ 15, 15, 15, 15, 15, /* PORT25x */
+};
+static const unsigned xirq_pins[] = {
+ 149, 150, 151, 152, 153, 154, 155, 156, /* XIRQ0-7 */
+ 157, 158, 159, 160, 85, 161, 162, 84, /* XIRQ8-15 */
+ 163, 164, 165, 166, 167, 146, 52, 53, /* XIRQ16-23 */
+};
+static const int xirq_muxvals[] = {
+ 14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ0-7 */
+ 14, 14, 14, 14, 13, 14, 14, 13, /* XIRQ8-15 */
+ 14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ16-23 */
+};
+static const unsigned xirq_alternatives_pins[] = {
+ 94, 95, 96, 97, 98, 99, 100, 101, /* XIRQ0-7 */
+ 102, 103, 104, 105, 106, 107, /* XIRQ8-11,13,14 */
+ 108, 109, 110, 111, 112, 147, 141, 142, /* XIRQ16-23 */
+};
+static const int xirq_alternatives_muxvals[] = {
+ 14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ0-7 */
+ 14, 14, 14, 14, 14, 14, /* XIRQ8-11,13,14 */
+ 14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ16-23 */
+};
+
+static const struct uniphier_pinctrl_group uniphier_ld20_groups[] = {
+ UNIPHIER_PINCTRL_GROUP(emmc),
+ UNIPHIER_PINCTRL_GROUP(emmc_dat8),
+ UNIPHIER_PINCTRL_GROUP(ether_rgmii),
+ UNIPHIER_PINCTRL_GROUP(ether_rmii),
+ UNIPHIER_PINCTRL_GROUP(i2c0),
+ UNIPHIER_PINCTRL_GROUP(i2c1),
+ UNIPHIER_PINCTRL_GROUP(i2c3),
+ UNIPHIER_PINCTRL_GROUP(i2c4),
+ UNIPHIER_PINCTRL_GROUP(nand),
+ UNIPHIER_PINCTRL_GROUP(sd),
+ UNIPHIER_PINCTRL_GROUP(system_bus),
+ UNIPHIER_PINCTRL_GROUP(system_bus_cs1),
+ UNIPHIER_PINCTRL_GROUP(uart0),
+ UNIPHIER_PINCTRL_GROUP(uart1),
+ UNIPHIER_PINCTRL_GROUP(uart2),
+ UNIPHIER_PINCTRL_GROUP(uart3),
+ UNIPHIER_PINCTRL_GROUP(usb0),
+ UNIPHIER_PINCTRL_GROUP(usb1),
+ UNIPHIER_PINCTRL_GROUP(usb2),
+ UNIPHIER_PINCTRL_GROUP(usb3),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq_alternatives),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range, 0),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range, 1),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range, 2),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range, 3),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range, 4),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range, 5),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range, 6),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range, 7),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range, 8),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range, 9),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range, 10),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range, 11),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range, 12),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range, 13),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range, 14),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range, 15),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range, 16),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range, 17),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range, 18),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range, 19),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range, 20),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range, 21),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range, 22),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range, 23),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range, 24),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range, 25),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range, 26),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port33, port_range, 27),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port34, port_range, 28),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port35, port_range, 29),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port36, port_range, 30),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port37, port_range, 31),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port40, port_range, 32),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port41, port_range, 33),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port42, port_range, 34),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port43, port_range, 35),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port44, port_range, 36),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port45, port_range, 37),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port46, port_range, 38),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port47, port_range, 39),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port50, port_range, 40),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port51, port_range, 41),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port52, port_range, 42),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range, 43),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range, 44),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range, 45),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range, 46),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range, 47),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range, 48),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port61, port_range, 49),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port62, port_range, 50),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range, 51),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range, 52),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range, 53),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range, 54),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range, 55),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range, 56),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range, 57),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range, 58),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range, 59),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range, 60),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range, 61),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range, 62),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range, 63),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range, 64),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range, 65),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range, 66),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range, 67),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range, 68),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range, 69),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range, 70),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range, 71),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range, 72),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range, 73),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range, 74),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range, 75),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range, 76),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range, 77),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range, 78),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range, 79),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range, 80),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range, 81),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range, 82),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range, 83),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range, 84),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range, 85),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range, 86),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range, 87),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range, 96),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range, 97),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range, 98),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range, 99),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range, 100),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port125, port_range, 101),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port126, port_range, 102),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port127, port_range, 103),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range, 104),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range, 105),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port132, port_range, 106),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port133, port_range, 107),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port134, port_range, 108),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port135, port_range, 109),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port136, port_range, 110),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port137, port_range, 111),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range, 112),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range, 113),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range, 114),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range, 115),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range, 116),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range, 117),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range, 118),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range, 119),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port180, port_range, 144),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port181, port_range, 145),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port182, port_range, 146),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port183, port_range, 147),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port184, port_range, 148),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port185, port_range, 149),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port186, port_range, 150),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port187, port_range, 151),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port200, port_range, 160),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port201, port_range, 161),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port202, port_range, 162),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port203, port_range, 163),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port204, port_range, 164),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port205, port_range, 165),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port206, port_range, 166),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port207, port_range, 167),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port210, port_range, 168),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port211, port_range, 169),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port212, port_range, 170),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port213, port_range, 171),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port214, port_range, 172),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port215, port_range, 173),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port216, port_range, 174),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port217, port_range, 175),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port220, port_range, 176),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port221, port_range, 177),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port222, port_range, 178),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port223, port_range, 179),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port224, port_range, 180),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port225, port_range, 181),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port226, port_range, 182),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port227, port_range, 183),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port230, port_range, 184),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port231, port_range, 185),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port232, port_range, 186),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port233, port_range, 187),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port234, port_range, 188),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port235, port_range, 189),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port236, port_range, 190),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port237, port_range, 191),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port240, port_range, 192),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port241, port_range, 193),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port242, port_range, 194),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port243, port_range, 195),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port244, port_range, 196),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port245, port_range, 197),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port246, port_range, 198),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port247, port_range, 199),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port250, port_range, 200),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port251, port_range, 201),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port252, port_range, 202),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port253, port_range, 203),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port254, port_range, 204),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq0, xirq, 0),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq1, xirq, 1),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq2, xirq, 2),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq3, xirq, 3),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq4, xirq, 4),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq5, xirq, 5),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq6, xirq, 6),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq7, xirq, 7),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq8, xirq, 8),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq9, xirq, 9),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq10, xirq, 10),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq11, xirq, 11),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq12, xirq, 12),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq13, xirq, 13),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq14, xirq, 14),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq15, xirq, 15),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq16, xirq, 16),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq17, xirq, 17),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq18, xirq, 18),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq19, xirq, 19),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq20, xirq, 20),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq21, xirq, 21),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq22, xirq, 22),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq23, xirq, 23),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq0b, xirq_alternatives, 0),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq1b, xirq_alternatives, 1),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq2b, xirq_alternatives, 2),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq3b, xirq_alternatives, 3),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq4b, xirq_alternatives, 4),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq5b, xirq_alternatives, 5),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq6b, xirq_alternatives, 6),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq7b, xirq_alternatives, 7),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq8b, xirq_alternatives, 8),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq9b, xirq_alternatives, 9),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq10b, xirq_alternatives, 10),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq11b, xirq_alternatives, 11),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq13b, xirq_alternatives, 12),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq14b, xirq_alternatives, 13),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq16b, xirq_alternatives, 14),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq17b, xirq_alternatives, 15),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq18b, xirq_alternatives, 16),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq19b, xirq_alternatives, 17),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq20b, xirq_alternatives, 18),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq21b, xirq_alternatives, 19),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq22b, xirq_alternatives, 20),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(xirq23b, xirq_alternatives, 21),
+};
+
+static const char * const emmc_groups[] = {"emmc", "emmc_dat8"};
+static const char * const ether_rgmii_groups[] = {"ether_rgmii"};
+static const char * const ether_rmii_groups[] = {"ether_rmii"};
+static const char * const i2c0_groups[] = {"i2c0"};
+static const char * const i2c1_groups[] = {"i2c1"};
+static const char * const i2c3_groups[] = {"i2c3"};
+static const char * const i2c4_groups[] = {"i2c4"};
+static const char * const nand_groups[] = {"nand"};
+static const char * const sd_groups[] = {"sd"};
+static const char * const system_bus_groups[] = {"system_bus",
+ "system_bus_cs1"};
+static const char * const uart0_groups[] = {"uart0"};
+static const char * const uart1_groups[] = {"uart1"};
+static const char * const uart2_groups[] = {"uart2"};
+static const char * const uart3_groups[] = {"uart3"};
+static const char * const usb0_groups[] = {"usb0"};
+static const char * const usb1_groups[] = {"usb1"};
+static const char * const usb2_groups[] = {"usb2"};
+static const char * const usb3_groups[] = {"usb3"};
+static const char * const port_groups[] = {
+ "port00", "port01", "port02", "port03",
+ "port04", "port05", "port06", "port07",
+ "port10", "port11", "port12", "port13",
+ "port14", "port15", "port16", "port17",
+ "port20", "port21", "port22", "port23",
+ "port24", "port25", "port26", "port27",
+ "port30", "port31", "port32", "port33",
+ "port34", "port35", "port36", "port37",
+ "port40", "port41", "port42", "port43",
+ "port44", "port45", "port46", "port47",
+ "port50", "port51", "port52", "port53",
+ "port54", "port55", "port56", "port57",
+ "port60", "port61", "port62", "port63",
+ "port64", "port65", "port66", "port67",
+ "port70", "port71", "port72", "port73",
+ "port74", "port75", "port76", "port77",
+ "port80", "port81", "port82", "port83",
+ "port84", "port85", "port86", "port87",
+ "port90", "port91", "port92", "port93",
+ "port94", "port95", "port96", "port97",
+ "port100", "port101", "port102", "port103",
+ "port104", "port105", "port106", "port107",
+ /* port110-117 missing */
+ "port120", "port121", "port122", "port123",
+ "port124", "port125", "port126", "port127",
+ "port130", "port131", "port132", "port133",
+ "port134", "port135", "port136", "port137",
+ "port140", "port141", "port142", "port143",
+ "port144", "port145", "port146", "port147",
+ /* port150-177 missing */
+ "port180", "port181", "port182", "port183",
+ "port184", "port185", "port186", "port187",
+ /* port190-197 missing */
+ "port200", "port201", "port202", "port203",
+ "port204", "port205", "port206", "port207",
+ "port210", "port211", "port212", "port213",
+ "port214", "port215", "port216", "port217",
+ "port220", "port221", "port222", "port223",
+ "port224", "port225", "port226", "port227",
+ "port230", "port231", "port232", "port233",
+ "port234", "port235", "port236", "port237",
+ "port240", "port241", "port242", "port243",
+ "port244", "port245", "port246", "port247",
+ "port250", "port251", "port252", "port253",
+ "port254",
+};
+static const char * const xirq_groups[] = {
+ "xirq0", "xirq1", "xirq2", "xirq3",
+ "xirq4", "xirq5", "xirq6", "xirq7",
+ "xirq8", "xirq9", "xirq10", "xirq11",
+ "xirq12", "xirq13", "xirq14", "xirq15",
+ "xirq16", "xirq17", "xirq18", "xirq19",
+ "xirq20", "xirq21", "xirq22", "xirq23",
+ "xirq0b", "xirq1b", "xirq2b", "xirq3b",
+ "xirq4b", "xirq5b", "xirq6b", "xirq7b",
+ "xirq8b", "xirq9b", "xirq10b", "xirq11b",
+ /* none */ "xirq13b", "xirq14b", /* none */
+ "xirq16b", "xirq17b", "xirq18b", "xirq19b",
+ "xirq20b", "xirq21b", "xirq22b", "xirq23b",
+};
+
+static const struct uniphier_pinmux_function uniphier_ld20_functions[] = {
+ UNIPHIER_PINMUX_FUNCTION(emmc),
+ UNIPHIER_PINMUX_FUNCTION(ether_rgmii),
+ UNIPHIER_PINMUX_FUNCTION(ether_rmii),
+ UNIPHIER_PINMUX_FUNCTION(i2c0),
+ UNIPHIER_PINMUX_FUNCTION(i2c1),
+ UNIPHIER_PINMUX_FUNCTION(i2c3),
+ UNIPHIER_PINMUX_FUNCTION(i2c4),
+ UNIPHIER_PINMUX_FUNCTION(nand),
+ UNIPHIER_PINMUX_FUNCTION(sd),
+ UNIPHIER_PINMUX_FUNCTION(system_bus),
+ UNIPHIER_PINMUX_FUNCTION(uart0),
+ UNIPHIER_PINMUX_FUNCTION(uart1),
+ UNIPHIER_PINMUX_FUNCTION(uart2),
+ UNIPHIER_PINMUX_FUNCTION(uart3),
+ UNIPHIER_PINMUX_FUNCTION(usb0),
+ UNIPHIER_PINMUX_FUNCTION(usb1),
+ UNIPHIER_PINMUX_FUNCTION(usb2),
+ UNIPHIER_PINMUX_FUNCTION(usb3),
+ UNIPHIER_PINMUX_FUNCTION(port),
+ UNIPHIER_PINMUX_FUNCTION(xirq),
+};
+
+static struct uniphier_pinctrl_socdata uniphier_ld20_pindata = {
+ .pins = uniphier_ld20_pins,
+ .npins = ARRAY_SIZE(uniphier_ld20_pins),
+ .groups = uniphier_ld20_groups,
+ .groups_count = ARRAY_SIZE(uniphier_ld20_groups),
+ .functions = uniphier_ld20_functions,
+ .functions_count = ARRAY_SIZE(uniphier_ld20_functions),
+ .caps = UNIPHIER_PINCTRL_CAPS_PERPIN_IECTRL,
+};
+
+static int uniphier_ld20_pinctrl_probe(struct platform_device *pdev)
+{
+ return uniphier_pinctrl_probe(pdev, &uniphier_ld20_pindata);
+}
+
+static const struct of_device_id uniphier_ld20_pinctrl_match[] = {
+ { .compatible = "socionext,uniphier-ld20-pinctrl" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, uniphier_ld20_pinctrl_match);
+
+static struct platform_driver uniphier_ld20_pinctrl_driver = {
+ .probe = uniphier_ld20_pinctrl_probe,
+ .driver = {
+ .name = "uniphier-ld20-pinctrl",
+ .of_match_table = uniphier_ld20_pinctrl_match,
+ },
+};
+module_platform_driver(uniphier_ld20_pinctrl_driver);
+
+MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
+MODULE_DESCRIPTION("UniPhier PH1-LD20 pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld4.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld4.c
index 4a0439c80..3edfb6f9d 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld4.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld4.c
@@ -19,544 +19,592 @@
#include "pinctrl-uniphier.h"
-#define DRIVER_NAME "ph1-ld4-pinctrl"
-
-static const struct pinctrl_pin_desc ph1_ld4_pins[] = {
+static const struct pinctrl_pin_desc uniphier_ld4_pins[] = {
UNIPHIER_PINCTRL_PIN(0, "EA1", UNIPHIER_PIN_IECTRL_NONE,
- 8, UNIPHIER_PIN_DRV_4_8,
+ 8, UNIPHIER_PIN_DRV_1BIT,
8, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(1, "EA2", UNIPHIER_PIN_IECTRL_NONE,
- 9, UNIPHIER_PIN_DRV_4_8,
+ 9, UNIPHIER_PIN_DRV_1BIT,
9, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(2, "EA3", UNIPHIER_PIN_IECTRL_NONE,
- 10, UNIPHIER_PIN_DRV_4_8,
+ 10, UNIPHIER_PIN_DRV_1BIT,
10, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(3, "EA4", UNIPHIER_PIN_IECTRL_NONE,
- 11, UNIPHIER_PIN_DRV_4_8,
+ 11, UNIPHIER_PIN_DRV_1BIT,
11, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(4, "EA5", UNIPHIER_PIN_IECTRL_NONE,
- 12, UNIPHIER_PIN_DRV_4_8,
+ 12, UNIPHIER_PIN_DRV_1BIT,
12, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(5, "EA6", UNIPHIER_PIN_IECTRL_NONE,
- 13, UNIPHIER_PIN_DRV_4_8,
+ 13, UNIPHIER_PIN_DRV_1BIT,
13, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(6, "EA7", UNIPHIER_PIN_IECTRL_NONE,
- 14, UNIPHIER_PIN_DRV_4_8,
+ 14, UNIPHIER_PIN_DRV_1BIT,
14, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(7, "EA8", 0,
- 15, UNIPHIER_PIN_DRV_4_8,
+ 15, UNIPHIER_PIN_DRV_1BIT,
15, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(8, "EA9", 0,
- 16, UNIPHIER_PIN_DRV_4_8,
+ 16, UNIPHIER_PIN_DRV_1BIT,
16, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(9, "EA10", 0,
- 17, UNIPHIER_PIN_DRV_4_8,
+ 17, UNIPHIER_PIN_DRV_1BIT,
17, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(10, "EA11", 0,
- 18, UNIPHIER_PIN_DRV_4_8,
+ 18, UNIPHIER_PIN_DRV_1BIT,
18, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(11, "EA12", 0,
- 19, UNIPHIER_PIN_DRV_4_8,
+ 19, UNIPHIER_PIN_DRV_1BIT,
19, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(12, "EA13", 0,
- 20, UNIPHIER_PIN_DRV_4_8,
+ 20, UNIPHIER_PIN_DRV_1BIT,
20, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(13, "EA14", 0,
- 21, UNIPHIER_PIN_DRV_4_8,
+ 21, UNIPHIER_PIN_DRV_1BIT,
21, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(14, "EA15", 0,
- 22, UNIPHIER_PIN_DRV_4_8,
+ 22, UNIPHIER_PIN_DRV_1BIT,
22, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(15, "ECLK", UNIPHIER_PIN_IECTRL_NONE,
- 23, UNIPHIER_PIN_DRV_4_8,
+ 23, UNIPHIER_PIN_DRV_1BIT,
23, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(16, "XERWE0", UNIPHIER_PIN_IECTRL_NONE,
- 24, UNIPHIER_PIN_DRV_4_8,
+ 24, UNIPHIER_PIN_DRV_1BIT,
24, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(17, "XERWE1", UNIPHIER_PIN_IECTRL_NONE,
- 25, UNIPHIER_PIN_DRV_4_8,
+ 25, UNIPHIER_PIN_DRV_1BIT,
25, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(18, "ES0", UNIPHIER_PIN_IECTRL_NONE,
- 27, UNIPHIER_PIN_DRV_4_8,
+ 27, UNIPHIER_PIN_DRV_1BIT,
27, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(19, "ES1", UNIPHIER_PIN_IECTRL_NONE,
- 28, UNIPHIER_PIN_DRV_4_8,
+ 28, UNIPHIER_PIN_DRV_1BIT,
28, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(20, "ES2", UNIPHIER_PIN_IECTRL_NONE,
- 29, UNIPHIER_PIN_DRV_4_8,
+ 29, UNIPHIER_PIN_DRV_1BIT,
29, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(21, "XERST", UNIPHIER_PIN_IECTRL_NONE,
- 38, UNIPHIER_PIN_DRV_4_8,
+ 38, UNIPHIER_PIN_DRV_1BIT,
38, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(22, "MMCCLK", UNIPHIER_PIN_IECTRL_NONE,
- 0, UNIPHIER_PIN_DRV_8_12_16_20,
+ 0, UNIPHIER_PIN_DRV_2BIT,
146, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(23, "MMCCMD", UNIPHIER_PIN_IECTRL_NONE,
- 4, UNIPHIER_PIN_DRV_8_12_16_20,
+ 1, UNIPHIER_PIN_DRV_2BIT,
147, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(24, "MMCDAT0", UNIPHIER_PIN_IECTRL_NONE,
- 8, UNIPHIER_PIN_DRV_8_12_16_20,
+ 2, UNIPHIER_PIN_DRV_2BIT,
148, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(25, "MMCDAT1", UNIPHIER_PIN_IECTRL_NONE,
- 12, UNIPHIER_PIN_DRV_8_12_16_20,
+ 3, UNIPHIER_PIN_DRV_2BIT,
149, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(26, "MMCDAT2", UNIPHIER_PIN_IECTRL_NONE,
- 16, UNIPHIER_PIN_DRV_8_12_16_20,
+ 4, UNIPHIER_PIN_DRV_2BIT,
150, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(27, "MMCDAT3", UNIPHIER_PIN_IECTRL_NONE,
- 20, UNIPHIER_PIN_DRV_8_12_16_20,
+ 5, UNIPHIER_PIN_DRV_2BIT,
151, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(28, "MMCDAT4", UNIPHIER_PIN_IECTRL_NONE,
- 24, UNIPHIER_PIN_DRV_8_12_16_20,
+ 6, UNIPHIER_PIN_DRV_2BIT,
152, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(29, "MMCDAT5", UNIPHIER_PIN_IECTRL_NONE,
- 28, UNIPHIER_PIN_DRV_8_12_16_20,
+ 7, UNIPHIER_PIN_DRV_2BIT,
153, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(30, "MMCDAT6", UNIPHIER_PIN_IECTRL_NONE,
- 32, UNIPHIER_PIN_DRV_8_12_16_20,
+ 8, UNIPHIER_PIN_DRV_2BIT,
154, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(31, "MMCDAT7", UNIPHIER_PIN_IECTRL_NONE,
- 36, UNIPHIER_PIN_DRV_8_12_16_20,
+ 9, UNIPHIER_PIN_DRV_2BIT,
155, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(32, "RMII_RXD0", 6,
- 39, UNIPHIER_PIN_DRV_4_8,
+ 39, UNIPHIER_PIN_DRV_1BIT,
39, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(33, "RMII_RXD1", 6,
- 40, UNIPHIER_PIN_DRV_4_8,
+ 40, UNIPHIER_PIN_DRV_1BIT,
40, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(34, "RMII_CRS_DV", 6,
- 41, UNIPHIER_PIN_DRV_4_8,
+ 41, UNIPHIER_PIN_DRV_1BIT,
41, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(35, "RMII_RXER", 6,
- 42, UNIPHIER_PIN_DRV_4_8,
+ 42, UNIPHIER_PIN_DRV_1BIT,
42, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(36, "RMII_REFCLK", 6,
- 43, UNIPHIER_PIN_DRV_4_8,
+ 43, UNIPHIER_PIN_DRV_1BIT,
43, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(37, "RMII_TXD0", 6,
- 44, UNIPHIER_PIN_DRV_4_8,
+ 44, UNIPHIER_PIN_DRV_1BIT,
44, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(38, "RMII_TXD1", 6,
- 45, UNIPHIER_PIN_DRV_4_8,
+ 45, UNIPHIER_PIN_DRV_1BIT,
45, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(39, "RMII_TXEN", 6,
- 46, UNIPHIER_PIN_DRV_4_8,
+ 46, UNIPHIER_PIN_DRV_1BIT,
46, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(40, "MDC", 6,
- 47, UNIPHIER_PIN_DRV_4_8,
+ 47, UNIPHIER_PIN_DRV_1BIT,
47, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(41, "MDIO", 6,
- 48, UNIPHIER_PIN_DRV_4_8,
+ 48, UNIPHIER_PIN_DRV_1BIT,
48, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(42, "MDIO_INTL", 6,
- 49, UNIPHIER_PIN_DRV_4_8,
+ 49, UNIPHIER_PIN_DRV_1BIT,
49, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(43, "PHYRSTL", 6,
- 50, UNIPHIER_PIN_DRV_4_8,
+ 50, UNIPHIER_PIN_DRV_1BIT,
50, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(44, "SDCLK", UNIPHIER_PIN_IECTRL_NONE,
- 40, UNIPHIER_PIN_DRV_8_12_16_20,
+ 10, UNIPHIER_PIN_DRV_2BIT,
156, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(45, "SDCMD", UNIPHIER_PIN_IECTRL_NONE,
- 44, UNIPHIER_PIN_DRV_8_12_16_20,
+ 11, UNIPHIER_PIN_DRV_2BIT,
157, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(46, "SDDAT0", UNIPHIER_PIN_IECTRL_NONE,
- 48, UNIPHIER_PIN_DRV_8_12_16_20,
+ 12, UNIPHIER_PIN_DRV_2BIT,
158, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(47, "SDDAT1", UNIPHIER_PIN_IECTRL_NONE,
- 52, UNIPHIER_PIN_DRV_8_12_16_20,
+ 13, UNIPHIER_PIN_DRV_2BIT,
159, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(48, "SDDAT2", UNIPHIER_PIN_IECTRL_NONE,
- 56, UNIPHIER_PIN_DRV_8_12_16_20,
+ 14, UNIPHIER_PIN_DRV_2BIT,
160, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(49, "SDDAT3", UNIPHIER_PIN_IECTRL_NONE,
- 60, UNIPHIER_PIN_DRV_8_12_16_20,
+ 15, UNIPHIER_PIN_DRV_2BIT,
161, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(50, "SDCD", UNIPHIER_PIN_IECTRL_NONE,
- 51, UNIPHIER_PIN_DRV_4_8,
+ 51, UNIPHIER_PIN_DRV_1BIT,
51, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(51, "SDWP", UNIPHIER_PIN_IECTRL_NONE,
- 52, UNIPHIER_PIN_DRV_4_8,
+ 52, UNIPHIER_PIN_DRV_1BIT,
52, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(52, "SDVOLC", UNIPHIER_PIN_IECTRL_NONE,
- 53, UNIPHIER_PIN_DRV_4_8,
+ 53, UNIPHIER_PIN_DRV_1BIT,
53, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(53, "USB0VBUS", 0,
- 54, UNIPHIER_PIN_DRV_4_8,
+ 54, UNIPHIER_PIN_DRV_1BIT,
54, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(54, "USB0OD", 0,
- 55, UNIPHIER_PIN_DRV_4_8,
+ 55, UNIPHIER_PIN_DRV_1BIT,
55, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(55, "USB1VBUS", 0,
- 56, UNIPHIER_PIN_DRV_4_8,
+ 56, UNIPHIER_PIN_DRV_1BIT,
56, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(56, "USB1OD", 0,
- 57, UNIPHIER_PIN_DRV_4_8,
+ 57, UNIPHIER_PIN_DRV_1BIT,
57, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(57, "PCRESET", 0,
- 58, UNIPHIER_PIN_DRV_4_8,
+ 58, UNIPHIER_PIN_DRV_1BIT,
58, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(58, "PCREG", 0,
- 59, UNIPHIER_PIN_DRV_4_8,
+ 59, UNIPHIER_PIN_DRV_1BIT,
59, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(59, "PCCE2", 0,
- 60, UNIPHIER_PIN_DRV_4_8,
+ 60, UNIPHIER_PIN_DRV_1BIT,
60, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(60, "PCVS1", 0,
- 61, UNIPHIER_PIN_DRV_4_8,
+ 61, UNIPHIER_PIN_DRV_1BIT,
61, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(61, "PCCD2", 0,
- 62, UNIPHIER_PIN_DRV_4_8,
+ 62, UNIPHIER_PIN_DRV_1BIT,
62, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(62, "PCCD1", 0,
- 63, UNIPHIER_PIN_DRV_4_8,
+ 63, UNIPHIER_PIN_DRV_1BIT,
63, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(63, "PCREADY", 0,
- 64, UNIPHIER_PIN_DRV_4_8,
+ 64, UNIPHIER_PIN_DRV_1BIT,
64, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(64, "PCDOE", 0,
- 65, UNIPHIER_PIN_DRV_4_8,
+ 65, UNIPHIER_PIN_DRV_1BIT,
65, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(65, "PCCE1", 0,
- 66, UNIPHIER_PIN_DRV_4_8,
+ 66, UNIPHIER_PIN_DRV_1BIT,
66, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(66, "PCWE", 0,
- 67, UNIPHIER_PIN_DRV_4_8,
+ 67, UNIPHIER_PIN_DRV_1BIT,
67, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(67, "PCOE", 0,
- 68, UNIPHIER_PIN_DRV_4_8,
+ 68, UNIPHIER_PIN_DRV_1BIT,
68, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(68, "PCWAIT", 0,
- 69, UNIPHIER_PIN_DRV_4_8,
+ 69, UNIPHIER_PIN_DRV_1BIT,
69, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(69, "PCIOWR", 0,
- 70, UNIPHIER_PIN_DRV_4_8,
+ 70, UNIPHIER_PIN_DRV_1BIT,
70, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(70, "PCIORD", 0,
- 71, UNIPHIER_PIN_DRV_4_8,
+ 71, UNIPHIER_PIN_DRV_1BIT,
71, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(71, "HS0DIN0", 0,
- 72, UNIPHIER_PIN_DRV_4_8,
+ 72, UNIPHIER_PIN_DRV_1BIT,
72, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(72, "HS0DIN1", 0,
- 73, UNIPHIER_PIN_DRV_4_8,
+ 73, UNIPHIER_PIN_DRV_1BIT,
73, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(73, "HS0DIN2", 0,
- 74, UNIPHIER_PIN_DRV_4_8,
+ 74, UNIPHIER_PIN_DRV_1BIT,
74, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(74, "HS0DIN3", 0,
- 75, UNIPHIER_PIN_DRV_4_8,
+ 75, UNIPHIER_PIN_DRV_1BIT,
75, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(75, "HS0DIN4", 0,
- 76, UNIPHIER_PIN_DRV_4_8,
+ 76, UNIPHIER_PIN_DRV_1BIT,
76, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(76, "HS0DIN5", 0,
- 77, UNIPHIER_PIN_DRV_4_8,
+ 77, UNIPHIER_PIN_DRV_1BIT,
77, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(77, "HS0DIN6", 0,
- 78, UNIPHIER_PIN_DRV_4_8,
+ 78, UNIPHIER_PIN_DRV_1BIT,
78, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(78, "HS0DIN7", 0,
- 79, UNIPHIER_PIN_DRV_4_8,
+ 79, UNIPHIER_PIN_DRV_1BIT,
79, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(79, "HS0BCLKIN", 0,
- 80, UNIPHIER_PIN_DRV_4_8,
+ 80, UNIPHIER_PIN_DRV_1BIT,
80, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(80, "HS0VALIN", 0,
- 81, UNIPHIER_PIN_DRV_4_8,
+ 81, UNIPHIER_PIN_DRV_1BIT,
81, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(81, "HS0SYNCIN", 0,
- 82, UNIPHIER_PIN_DRV_4_8,
+ 82, UNIPHIER_PIN_DRV_1BIT,
82, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(82, "HSDOUT0", 0,
- 83, UNIPHIER_PIN_DRV_4_8,
+ 83, UNIPHIER_PIN_DRV_1BIT,
83, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(83, "HSDOUT1", 0,
- 84, UNIPHIER_PIN_DRV_4_8,
+ 84, UNIPHIER_PIN_DRV_1BIT,
84, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(84, "HSDOUT2", 0,
- 85, UNIPHIER_PIN_DRV_4_8,
+ 85, UNIPHIER_PIN_DRV_1BIT,
85, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(85, "HSDOUT3", 0,
- 86, UNIPHIER_PIN_DRV_4_8,
+ 86, UNIPHIER_PIN_DRV_1BIT,
86, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(86, "HSDOUT4", 0,
- 87, UNIPHIER_PIN_DRV_4_8,
+ 87, UNIPHIER_PIN_DRV_1BIT,
87, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(87, "HSDOUT5", 0,
- 88, UNIPHIER_PIN_DRV_4_8,
+ 88, UNIPHIER_PIN_DRV_1BIT,
88, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(88, "HSDOUT6", 0,
- 89, UNIPHIER_PIN_DRV_4_8,
+ 89, UNIPHIER_PIN_DRV_1BIT,
89, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(89, "HSDOUT7", 0,
- 90, UNIPHIER_PIN_DRV_4_8,
+ 90, UNIPHIER_PIN_DRV_1BIT,
90, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(90, "HSBCLKOUT", 0,
- 91, UNIPHIER_PIN_DRV_4_8,
+ 91, UNIPHIER_PIN_DRV_1BIT,
91, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(91, "HSVALOUT", 0,
- 92, UNIPHIER_PIN_DRV_4_8,
+ 92, UNIPHIER_PIN_DRV_1BIT,
92, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(92, "HSSYNCOUT", 0,
- 93, UNIPHIER_PIN_DRV_4_8,
+ 93, UNIPHIER_PIN_DRV_1BIT,
93, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(93, "AGCI", 3,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
162, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(94, "AGCR", 4,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
163, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(95, "AGCBS", 5,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
164, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(96, "IECOUT", 0,
- 94, UNIPHIER_PIN_DRV_4_8,
+ 94, UNIPHIER_PIN_DRV_1BIT,
94, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(97, "ASMCK", 0,
- 95, UNIPHIER_PIN_DRV_4_8,
+ 95, UNIPHIER_PIN_DRV_1BIT,
95, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(98, "ABCKO", UNIPHIER_PIN_IECTRL_NONE,
- 96, UNIPHIER_PIN_DRV_4_8,
+ 96, UNIPHIER_PIN_DRV_1BIT,
96, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(99, "ALRCKO", UNIPHIER_PIN_IECTRL_NONE,
- 97, UNIPHIER_PIN_DRV_4_8,
+ 97, UNIPHIER_PIN_DRV_1BIT,
97, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(100, "ASDOUT0", UNIPHIER_PIN_IECTRL_NONE,
- 98, UNIPHIER_PIN_DRV_4_8,
+ 98, UNIPHIER_PIN_DRV_1BIT,
98, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(101, "ARCOUT", 0,
- 99, UNIPHIER_PIN_DRV_4_8,
+ 99, UNIPHIER_PIN_DRV_1BIT,
99, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(102, "SDA0", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
-1, UNIPHIER_PIN_PULL_NONE),
UNIPHIER_PINCTRL_PIN(103, "SCL0", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
-1, UNIPHIER_PIN_PULL_NONE),
UNIPHIER_PINCTRL_PIN(104, "SDA1", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
-1, UNIPHIER_PIN_PULL_NONE),
UNIPHIER_PINCTRL_PIN(105, "SCL1", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
-1, UNIPHIER_PIN_PULL_NONE),
UNIPHIER_PINCTRL_PIN(106, "DMDSDA0", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
-1, UNIPHIER_PIN_PULL_NONE),
UNIPHIER_PINCTRL_PIN(107, "DMDSCL0", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
-1, UNIPHIER_PIN_PULL_NONE),
UNIPHIER_PINCTRL_PIN(108, "DMDSDA1", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
-1, UNIPHIER_PIN_PULL_NONE),
UNIPHIER_PINCTRL_PIN(109, "DMDSCL1", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
-1, UNIPHIER_PIN_PULL_NONE),
UNIPHIER_PINCTRL_PIN(110, "SBO0", UNIPHIER_PIN_IECTRL_NONE,
- 100, UNIPHIER_PIN_DRV_4_8,
+ 100, UNIPHIER_PIN_DRV_1BIT,
100, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(111, "SBI0", UNIPHIER_PIN_IECTRL_NONE,
- 101, UNIPHIER_PIN_DRV_4_8,
+ 101, UNIPHIER_PIN_DRV_1BIT,
101, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(112, "HIN", 1,
- -1, UNIPHIER_PIN_DRV_FIXED_5,
+ -1, UNIPHIER_PIN_DRV_FIXED5,
-1, UNIPHIER_PIN_PULL_NONE),
UNIPHIER_PINCTRL_PIN(113, "VIN", 2,
- -1, UNIPHIER_PIN_DRV_FIXED_5,
+ -1, UNIPHIER_PIN_DRV_FIXED5,
-1, UNIPHIER_PIN_PULL_NONE),
UNIPHIER_PINCTRL_PIN(114, "TCON0", UNIPHIER_PIN_IECTRL_NONE,
- 102, UNIPHIER_PIN_DRV_4_8,
+ 102, UNIPHIER_PIN_DRV_1BIT,
102, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(115, "TCON1", UNIPHIER_PIN_IECTRL_NONE,
- 103, UNIPHIER_PIN_DRV_4_8,
+ 103, UNIPHIER_PIN_DRV_1BIT,
103, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(116, "TCON2", UNIPHIER_PIN_IECTRL_NONE,
- 104, UNIPHIER_PIN_DRV_4_8,
+ 104, UNIPHIER_PIN_DRV_1BIT,
104, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(117, "TCON3", UNIPHIER_PIN_IECTRL_NONE,
- 105, UNIPHIER_PIN_DRV_4_8,
+ 105, UNIPHIER_PIN_DRV_1BIT,
105, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(118, "TCON4", UNIPHIER_PIN_IECTRL_NONE,
- 106, UNIPHIER_PIN_DRV_4_8,
+ 106, UNIPHIER_PIN_DRV_1BIT,
106, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(119, "TCON5", UNIPHIER_PIN_IECTRL_NONE,
- 107, UNIPHIER_PIN_DRV_4_8,
+ 107, UNIPHIER_PIN_DRV_1BIT,
107, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(120, "TCON6", 0,
- 108, UNIPHIER_PIN_DRV_4_8,
+ 108, UNIPHIER_PIN_DRV_1BIT,
108, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(121, "TCON7", 0,
- 109, UNIPHIER_PIN_DRV_4_8,
+ 109, UNIPHIER_PIN_DRV_1BIT,
109, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(122, "PWMA", 0,
- 110, UNIPHIER_PIN_DRV_4_8,
+ 110, UNIPHIER_PIN_DRV_1BIT,
110, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(123, "XIRQ1", 0,
- 111, UNIPHIER_PIN_DRV_4_8,
+ 111, UNIPHIER_PIN_DRV_1BIT,
111, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(124, "XIRQ2", 0,
- 112, UNIPHIER_PIN_DRV_4_8,
+ 112, UNIPHIER_PIN_DRV_1BIT,
112, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(125, "XIRQ3", 0,
- 113, UNIPHIER_PIN_DRV_4_8,
+ 113, UNIPHIER_PIN_DRV_1BIT,
113, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(126, "XIRQ4", 0,
- 114, UNIPHIER_PIN_DRV_4_8,
+ 114, UNIPHIER_PIN_DRV_1BIT,
114, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(127, "XIRQ5", 0,
- 115, UNIPHIER_PIN_DRV_4_8,
+ 115, UNIPHIER_PIN_DRV_1BIT,
115, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(128, "XIRQ6", 0,
- 116, UNIPHIER_PIN_DRV_4_8,
+ 116, UNIPHIER_PIN_DRV_1BIT,
116, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(129, "XIRQ7", 0,
- 117, UNIPHIER_PIN_DRV_4_8,
+ 117, UNIPHIER_PIN_DRV_1BIT,
117, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(130, "XIRQ8", 0,
- 118, UNIPHIER_PIN_DRV_4_8,
+ 118, UNIPHIER_PIN_DRV_1BIT,
118, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(131, "XIRQ9", 0,
- 119, UNIPHIER_PIN_DRV_4_8,
+ 119, UNIPHIER_PIN_DRV_1BIT,
119, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(132, "XIRQ10", 0,
- 120, UNIPHIER_PIN_DRV_4_8,
+ 120, UNIPHIER_PIN_DRV_1BIT,
120, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(133, "XIRQ11", 0,
- 121, UNIPHIER_PIN_DRV_4_8,
+ 121, UNIPHIER_PIN_DRV_1BIT,
121, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(134, "XIRQ14", 0,
- 122, UNIPHIER_PIN_DRV_4_8,
+ 122, UNIPHIER_PIN_DRV_1BIT,
122, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(135, "PORT00", 0,
- 123, UNIPHIER_PIN_DRV_4_8,
+ 123, UNIPHIER_PIN_DRV_1BIT,
123, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(136, "PORT01", 0,
- 124, UNIPHIER_PIN_DRV_4_8,
+ 124, UNIPHIER_PIN_DRV_1BIT,
124, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(137, "PORT02", 0,
- 125, UNIPHIER_PIN_DRV_4_8,
+ 125, UNIPHIER_PIN_DRV_1BIT,
125, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(138, "PORT03", 0,
- 126, UNIPHIER_PIN_DRV_4_8,
+ 126, UNIPHIER_PIN_DRV_1BIT,
126, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(139, "PORT04", 0,
- 127, UNIPHIER_PIN_DRV_4_8,
+ 127, UNIPHIER_PIN_DRV_1BIT,
127, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(140, "PORT05", 0,
- 128, UNIPHIER_PIN_DRV_4_8,
+ 128, UNIPHIER_PIN_DRV_1BIT,
128, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(141, "PORT06", 0,
- 129, UNIPHIER_PIN_DRV_4_8,
+ 129, UNIPHIER_PIN_DRV_1BIT,
129, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(142, "PORT07", 0,
- 130, UNIPHIER_PIN_DRV_4_8,
+ 130, UNIPHIER_PIN_DRV_1BIT,
130, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(143, "PORT10", 0,
- 131, UNIPHIER_PIN_DRV_4_8,
+ 131, UNIPHIER_PIN_DRV_1BIT,
131, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(144, "PORT11", 0,
- 132, UNIPHIER_PIN_DRV_4_8,
+ 132, UNIPHIER_PIN_DRV_1BIT,
132, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(145, "PORT12", 0,
- 133, UNIPHIER_PIN_DRV_4_8,
+ 133, UNIPHIER_PIN_DRV_1BIT,
133, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(146, "PORT13", 0,
- 134, UNIPHIER_PIN_DRV_4_8,
+ 134, UNIPHIER_PIN_DRV_1BIT,
134, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(147, "PORT14", 0,
- 135, UNIPHIER_PIN_DRV_4_8,
+ 135, UNIPHIER_PIN_DRV_1BIT,
135, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(148, "PORT15", 0,
- 136, UNIPHIER_PIN_DRV_4_8,
+ 136, UNIPHIER_PIN_DRV_1BIT,
136, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(149, "PORT16", 0,
- 137, UNIPHIER_PIN_DRV_4_8,
+ 137, UNIPHIER_PIN_DRV_1BIT,
137, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(150, "PORT17", UNIPHIER_PIN_IECTRL_NONE,
- 138, UNIPHIER_PIN_DRV_4_8,
+ 138, UNIPHIER_PIN_DRV_1BIT,
138, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(151, "PORT20", 0,
- 139, UNIPHIER_PIN_DRV_4_8,
+ 139, UNIPHIER_PIN_DRV_1BIT,
139, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(152, "PORT21", 0,
- 140, UNIPHIER_PIN_DRV_4_8,
+ 140, UNIPHIER_PIN_DRV_1BIT,
140, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(153, "PORT22", 0,
- 141, UNIPHIER_PIN_DRV_4_8,
+ 141, UNIPHIER_PIN_DRV_1BIT,
141, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(154, "PORT23", 0,
- 142, UNIPHIER_PIN_DRV_4_8,
+ 142, UNIPHIER_PIN_DRV_1BIT,
142, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(155, "PORT24", UNIPHIER_PIN_IECTRL_NONE,
- 143, UNIPHIER_PIN_DRV_4_8,
+ 143, UNIPHIER_PIN_DRV_1BIT,
143, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(156, "PORT25", 0,
- 144, UNIPHIER_PIN_DRV_4_8,
+ 144, UNIPHIER_PIN_DRV_1BIT,
144, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(157, "PORT26", 0,
- 145, UNIPHIER_PIN_DRV_4_8,
+ 145, UNIPHIER_PIN_DRV_1BIT,
145, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(158, "XNFRE", UNIPHIER_PIN_IECTRL_NONE,
- 31, UNIPHIER_PIN_DRV_4_8,
+ 31, UNIPHIER_PIN_DRV_1BIT,
31, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(159, "XNFWE", UNIPHIER_PIN_IECTRL_NONE,
- 32, UNIPHIER_PIN_DRV_4_8,
+ 32, UNIPHIER_PIN_DRV_1BIT,
32, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(160, "NFALE", UNIPHIER_PIN_IECTRL_NONE,
- 33, UNIPHIER_PIN_DRV_4_8,
+ 33, UNIPHIER_PIN_DRV_1BIT,
33, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(161, "NFCLE", UNIPHIER_PIN_IECTRL_NONE,
- 34, UNIPHIER_PIN_DRV_4_8,
+ 34, UNIPHIER_PIN_DRV_1BIT,
34, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(162, "XNFWP", UNIPHIER_PIN_IECTRL_NONE,
- 35, UNIPHIER_PIN_DRV_4_8,
+ 35, UNIPHIER_PIN_DRV_1BIT,
35, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(163, "XNFCE0", UNIPHIER_PIN_IECTRL_NONE,
- 36, UNIPHIER_PIN_DRV_4_8,
+ 36, UNIPHIER_PIN_DRV_1BIT,
36, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(164, "NANDRYBY0", UNIPHIER_PIN_IECTRL_NONE,
- 37, UNIPHIER_PIN_DRV_4_8,
+ 37, UNIPHIER_PIN_DRV_1BIT,
37, UNIPHIER_PIN_PULL_UP),
+ /* dedicated pins */
+ UNIPHIER_PINCTRL_PIN(165, "ED0", -1,
+ 0, UNIPHIER_PIN_DRV_1BIT,
+ 0, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(166, "ED1", -1,
+ 1, UNIPHIER_PIN_DRV_1BIT,
+ 1, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(167, "ED2", -1,
+ 2, UNIPHIER_PIN_DRV_1BIT,
+ 2, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(168, "ED3", -1,
+ 3, UNIPHIER_PIN_DRV_1BIT,
+ 3, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(169, "ED4", -1,
+ 4, UNIPHIER_PIN_DRV_1BIT,
+ 4, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(170, "ED5", -1,
+ 5, UNIPHIER_PIN_DRV_1BIT,
+ 5, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(171, "ED6", -1,
+ 6, UNIPHIER_PIN_DRV_1BIT,
+ 6, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(172, "ED7", -1,
+ 7, UNIPHIER_PIN_DRV_1BIT,
+ 7, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(173, "ERXW", -1,
+ 26, UNIPHIER_PIN_DRV_1BIT,
+ 26, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(174, "XECS1", -1,
+ 30, UNIPHIER_PIN_DRV_1BIT,
+ 30, UNIPHIER_PIN_PULL_UP),
};
static const unsigned emmc_pins[] = {21, 22, 23, 24, 25, 26, 27};
-static const unsigned emmc_muxvals[] = {0, 1, 1, 1, 1, 1, 1};
+static const int emmc_muxvals[] = {0, 1, 1, 1, 1, 1, 1};
static const unsigned emmc_dat8_pins[] = {28, 29, 30, 31};
-static const unsigned emmc_dat8_muxvals[] = {1, 1, 1, 1};
+static const int emmc_dat8_muxvals[] = {1, 1, 1, 1};
+static const unsigned ether_mii_pins[] = {32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 136, 137, 138, 139, 140,
+ 141, 142};
+static const int ether_mii_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 4, 4, 4, 4, 4, 4, 4};
+static const unsigned ether_rmii_pins[] = {32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43};
+static const int ether_rmii_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
static const unsigned i2c0_pins[] = {102, 103};
-static const unsigned i2c0_muxvals[] = {0, 0};
+static const int i2c0_muxvals[] = {0, 0};
static const unsigned i2c1_pins[] = {104, 105};
-static const unsigned i2c1_muxvals[] = {0, 0};
+static const int i2c1_muxvals[] = {0, 0};
static const unsigned i2c2_pins[] = {108, 109};
-static const unsigned i2c2_muxvals[] = {2, 2};
+static const int i2c2_muxvals[] = {2, 2};
static const unsigned i2c3_pins[] = {108, 109};
-static const unsigned i2c3_muxvals[] = {3, 3};
+static const int i2c3_muxvals[] = {3, 3};
static const unsigned nand_pins[] = {24, 25, 26, 27, 28, 29, 30, 31, 158, 159,
160, 161, 162, 163, 164};
-static const unsigned nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0};
+static const int nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
static const unsigned nand_cs1_pins[] = {22, 23};
-static const unsigned nand_cs1_muxvals[] = {0, 0};
+static const int nand_cs1_muxvals[] = {0, 0};
static const unsigned sd_pins[] = {44, 45, 46, 47, 48, 49, 50, 51, 52};
-static const unsigned sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
+static const int sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned system_bus_pins[] = {16, 17, 18, 19, 20, 165, 166, 167,
+ 168, 169, 170, 171, 172, 173};
+static const int system_bus_muxvals[] = {0, 0, 0, 0, 0, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1};
+static const unsigned system_bus_cs0_pins[] = {155};
+static const int system_bus_cs0_muxvals[] = {1};
+static const unsigned system_bus_cs1_pins[] = {174};
+static const int system_bus_cs1_muxvals[] = {-1};
+static const unsigned system_bus_cs2_pins[] = {64};
+static const int system_bus_cs2_muxvals[] = {1};
+static const unsigned system_bus_cs3_pins[] = {156};
+static const int system_bus_cs3_muxvals[] = {1};
static const unsigned uart0_pins[] = {85, 88};
-static const unsigned uart0_muxvals[] = {1, 1};
+static const int uart0_muxvals[] = {1, 1};
static const unsigned uart1_pins[] = {155, 156};
-static const unsigned uart1_muxvals[] = {13, 13};
+static const int uart1_muxvals[] = {13, 13};
static const unsigned uart1b_pins[] = {69, 70};
-static const unsigned uart1b_muxvals[] = {23, 23};
+static const int uart1b_muxvals[] = {23, 23};
static const unsigned uart2_pins[] = {128, 129};
-static const unsigned uart2_muxvals[] = {13, 13};
+static const int uart2_muxvals[] = {13, 13};
static const unsigned uart3_pins[] = {110, 111};
-static const unsigned uart3_muxvals[] = {1, 1};
+static const int uart3_muxvals[] = {1, 1};
static const unsigned usb0_pins[] = {53, 54};
-static const unsigned usb0_muxvals[] = {0, 0};
+static const int usb0_muxvals[] = {0, 0};
static const unsigned usb1_pins[] = {55, 56};
-static const unsigned usb1_muxvals[] = {0, 0};
+static const int usb1_muxvals[] = {0, 0};
static const unsigned usb2_pins[] = {155, 156};
-static const unsigned usb2_muxvals[] = {4, 4};
+static const int usb2_muxvals[] = {4, 4};
static const unsigned usb2b_pins[] = {67, 68};
-static const unsigned usb2b_muxvals[] = {23, 23};
+static const int usb2b_muxvals[] = {23, 23};
static const unsigned port_range0_pins[] = {
135, 136, 137, 138, 139, 140, 141, 142, /* PORT0x */
143, 144, 145, 146, 147, 148, 149, 150, /* PORT1x */
@@ -574,7 +622,7 @@ static const unsigned port_range0_pins[] = {
98, 99, 100, 6, 101, 114, 115, 116, /* PORT13x */
103, 108, 21, 22, 23, 117, 118, 119, /* PORT14x */
};
-static const unsigned port_range0_muxvals[] = {
+static const int port_range0_muxvals[] = {
0, 0, 0, 0, 0, 0, 0, 0, /* PORT0x */
0, 0, 0, 0, 0, 0, 0, 0, /* PORT1x */
0, 0, 0, 0, 0, 0, 0, 15, /* PORT2x */
@@ -594,27 +642,29 @@ static const unsigned port_range0_muxvals[] = {
static const unsigned port_range1_pins[] = {
7, /* PORT166 */
};
-static const unsigned port_range1_muxvals[] = {
+static const int port_range1_muxvals[] = {
15, /* PORT166 */
};
static const unsigned xirq_range0_pins[] = {
151, 123, 124, 125, 126, 127, 128, 129, /* XIRQ0-7 */
130, 131, 132, 133, 62, /* XIRQ8-12 */
};
-static const unsigned xirq_range0_muxvals[] = {
+static const int xirq_range0_muxvals[] = {
14, 0, 0, 0, 0, 0, 0, 0, /* XIRQ0-7 */
0, 0, 0, 0, 14, /* XIRQ8-12 */
};
static const unsigned xirq_range1_pins[] = {
134, 63, /* XIRQ14-15 */
};
-static const unsigned xirq_range1_muxvals[] = {
+static const int xirq_range1_muxvals[] = {
0, 14, /* XIRQ14-15 */
};
-static const struct uniphier_pinctrl_group ph1_ld4_groups[] = {
+static const struct uniphier_pinctrl_group uniphier_ld4_groups[] = {
UNIPHIER_PINCTRL_GROUP(emmc),
UNIPHIER_PINCTRL_GROUP(emmc_dat8),
+ UNIPHIER_PINCTRL_GROUP(ether_mii),
+ UNIPHIER_PINCTRL_GROUP(ether_rmii),
UNIPHIER_PINCTRL_GROUP(i2c0),
UNIPHIER_PINCTRL_GROUP(i2c1),
UNIPHIER_PINCTRL_GROUP(i2c2),
@@ -622,6 +672,11 @@ static const struct uniphier_pinctrl_group ph1_ld4_groups[] = {
UNIPHIER_PINCTRL_GROUP(nand),
UNIPHIER_PINCTRL_GROUP(nand_cs1),
UNIPHIER_PINCTRL_GROUP(sd),
+ UNIPHIER_PINCTRL_GROUP(system_bus),
+ UNIPHIER_PINCTRL_GROUP(system_bus_cs0),
+ UNIPHIER_PINCTRL_GROUP(system_bus_cs1),
+ UNIPHIER_PINCTRL_GROUP(system_bus_cs2),
+ UNIPHIER_PINCTRL_GROUP(system_bus_cs3),
UNIPHIER_PINCTRL_GROUP(uart0),
UNIPHIER_PINCTRL_GROUP(uart1),
UNIPHIER_PINCTRL_GROUP(uart1b),
@@ -774,12 +829,19 @@ static const struct uniphier_pinctrl_group ph1_ld4_groups[] = {
};
static const char * const emmc_groups[] = {"emmc", "emmc_dat8"};
+static const char * const ether_mii_groups[] = {"ether_mii"};
+static const char * const ether_rmii_groups[] = {"ether_rmii"};
static const char * const i2c0_groups[] = {"i2c0"};
static const char * const i2c1_groups[] = {"i2c1"};
static const char * const i2c2_groups[] = {"i2c2"};
static const char * const i2c3_groups[] = {"i2c3"};
static const char * const nand_groups[] = {"nand", "nand_cs1"};
static const char * const sd_groups[] = {"sd"};
+static const char * const system_bus_groups[] = {"system_bus",
+ "system_bus_cs0",
+ "system_bus_cs1",
+ "system_bus_cs2",
+ "system_bus_cs3"};
static const char * const uart0_groups[] = {"uart0"};
static const char * const uart1_groups[] = {"uart1", "uart1b"};
static const char * const uart2_groups[] = {"uart2"};
@@ -828,14 +890,17 @@ static const char * const xirq_groups[] = {
"xirq12", /* none*/ "xirq14", "xirq15",
};
-static const struct uniphier_pinmux_function ph1_ld4_functions[] = {
+static const struct uniphier_pinmux_function uniphier_ld4_functions[] = {
UNIPHIER_PINMUX_FUNCTION(emmc),
+ UNIPHIER_PINMUX_FUNCTION(ether_mii),
+ UNIPHIER_PINMUX_FUNCTION(ether_rmii),
UNIPHIER_PINMUX_FUNCTION(i2c0),
UNIPHIER_PINMUX_FUNCTION(i2c1),
UNIPHIER_PINMUX_FUNCTION(i2c2),
UNIPHIER_PINMUX_FUNCTION(i2c3),
UNIPHIER_PINMUX_FUNCTION(nand),
UNIPHIER_PINMUX_FUNCTION(sd),
+ UNIPHIER_PINMUX_FUNCTION(system_bus),
UNIPHIER_PINMUX_FUNCTION(uart0),
UNIPHIER_PINMUX_FUNCTION(uart1),
UNIPHIER_PINMUX_FUNCTION(uart2),
@@ -847,43 +912,36 @@ static const struct uniphier_pinmux_function ph1_ld4_functions[] = {
UNIPHIER_PINMUX_FUNCTION(xirq),
};
-static struct uniphier_pinctrl_socdata ph1_ld4_pindata = {
- .groups = ph1_ld4_groups,
- .groups_count = ARRAY_SIZE(ph1_ld4_groups),
- .functions = ph1_ld4_functions,
- .functions_count = ARRAY_SIZE(ph1_ld4_functions),
- .mux_bits = 8,
- .reg_stride = 4,
- .load_pinctrl = false,
-};
-
-static struct pinctrl_desc ph1_ld4_pinctrl_desc = {
- .name = DRIVER_NAME,
- .pins = ph1_ld4_pins,
- .npins = ARRAY_SIZE(ph1_ld4_pins),
- .owner = THIS_MODULE,
+static struct uniphier_pinctrl_socdata uniphier_ld4_pindata = {
+ .pins = uniphier_ld4_pins,
+ .npins = ARRAY_SIZE(uniphier_ld4_pins),
+ .groups = uniphier_ld4_groups,
+ .groups_count = ARRAY_SIZE(uniphier_ld4_groups),
+ .functions = uniphier_ld4_functions,
+ .functions_count = ARRAY_SIZE(uniphier_ld4_functions),
+ .caps = 0,
};
-static int ph1_ld4_pinctrl_probe(struct platform_device *pdev)
+static int uniphier_ld4_pinctrl_probe(struct platform_device *pdev)
{
- return uniphier_pinctrl_probe(pdev, &ph1_ld4_pinctrl_desc,
- &ph1_ld4_pindata);
+ return uniphier_pinctrl_probe(pdev, &uniphier_ld4_pindata);
}
-static const struct of_device_id ph1_ld4_pinctrl_match[] = {
+static const struct of_device_id uniphier_ld4_pinctrl_match[] = {
+ { .compatible = "socionext,uniphier-ld4-pinctrl" },
{ .compatible = "socionext,ph1-ld4-pinctrl" },
{ /* sentinel */ }
};
-MODULE_DEVICE_TABLE(of, ph1_ld4_pinctrl_match);
+MODULE_DEVICE_TABLE(of, uniphier_ld4_pinctrl_match);
-static struct platform_driver ph1_ld4_pinctrl_driver = {
- .probe = ph1_ld4_pinctrl_probe,
+static struct platform_driver uniphier_ld4_pinctrl_driver = {
+ .probe = uniphier_ld4_pinctrl_probe,
.driver = {
- .name = DRIVER_NAME,
- .of_match_table = ph1_ld4_pinctrl_match,
+ .name = "uniphier-ld4-pinctrl",
+ .of_match_table = uniphier_ld4_pinctrl_match,
},
};
-module_platform_driver(ph1_ld4_pinctrl_driver);
+module_platform_driver(uniphier_ld4_pinctrl_driver);
MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
MODULE_DESCRIPTION("UniPhier PH1-LD4 pinctrl driver");
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld6b.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld6b.c
index 150d33928..708e5100c 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld6b.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld6b.c
@@ -19,713 +19,711 @@
#include "pinctrl-uniphier.h"
-#define DRIVER_NAME "ph1-ld6b-pinctrl"
-
-static const struct pinctrl_pin_desc ph1_ld6b_pins[] = {
+static const struct pinctrl_pin_desc uniphier_ld6b_pins[] = {
UNIPHIER_PINCTRL_PIN(0, "ED0", UNIPHIER_PIN_IECTRL_NONE,
- 0, UNIPHIER_PIN_DRV_4_8,
+ 0, UNIPHIER_PIN_DRV_1BIT,
0, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(1, "ED1", UNIPHIER_PIN_IECTRL_NONE,
- 1, UNIPHIER_PIN_DRV_4_8,
+ 1, UNIPHIER_PIN_DRV_1BIT,
1, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(2, "ED2", UNIPHIER_PIN_IECTRL_NONE,
- 2, UNIPHIER_PIN_DRV_4_8,
+ 2, UNIPHIER_PIN_DRV_1BIT,
2, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(3, "ED3", UNIPHIER_PIN_IECTRL_NONE,
- 3, UNIPHIER_PIN_DRV_4_8,
+ 3, UNIPHIER_PIN_DRV_1BIT,
3, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(4, "ED4", UNIPHIER_PIN_IECTRL_NONE,
- 4, UNIPHIER_PIN_DRV_4_8,
+ 4, UNIPHIER_PIN_DRV_1BIT,
4, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(5, "ED5", UNIPHIER_PIN_IECTRL_NONE,
- 5, UNIPHIER_PIN_DRV_4_8,
+ 5, UNIPHIER_PIN_DRV_1BIT,
5, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(6, "ED6", UNIPHIER_PIN_IECTRL_NONE,
- 6, UNIPHIER_PIN_DRV_4_8,
+ 6, UNIPHIER_PIN_DRV_1BIT,
6, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(7, "ED7", UNIPHIER_PIN_IECTRL_NONE,
- 7, UNIPHIER_PIN_DRV_4_8,
+ 7, UNIPHIER_PIN_DRV_1BIT,
7, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(8, "XERWE0", UNIPHIER_PIN_IECTRL_NONE,
- 8, UNIPHIER_PIN_DRV_4_8,
+ 8, UNIPHIER_PIN_DRV_1BIT,
8, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(9, "XERWE1", UNIPHIER_PIN_IECTRL_NONE,
- 9, UNIPHIER_PIN_DRV_4_8,
+ 9, UNIPHIER_PIN_DRV_1BIT,
9, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(10, "ERXW", UNIPHIER_PIN_IECTRL_NONE,
- 10, UNIPHIER_PIN_DRV_4_8,
+ 10, UNIPHIER_PIN_DRV_1BIT,
10, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(11, "ES0", UNIPHIER_PIN_IECTRL_NONE,
- 11, UNIPHIER_PIN_DRV_4_8,
+ 11, UNIPHIER_PIN_DRV_1BIT,
11, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(12, "ES1", UNIPHIER_PIN_IECTRL_NONE,
- 12, UNIPHIER_PIN_DRV_4_8,
+ 12, UNIPHIER_PIN_DRV_1BIT,
12, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(13, "ES2", UNIPHIER_PIN_IECTRL_NONE,
- 13, UNIPHIER_PIN_DRV_4_8,
+ 13, UNIPHIER_PIN_DRV_1BIT,
13, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(14, "XECS1", UNIPHIER_PIN_IECTRL_NONE,
- 14, UNIPHIER_PIN_DRV_4_8,
+ 14, UNIPHIER_PIN_DRV_1BIT,
14, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(15, "PCA00", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
15, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(16, "PCA01", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
16, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(17, "PCA02", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
17, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(18, "PCA03", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
18, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(19, "PCA04", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
19, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(20, "PCA05", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
20, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(21, "PCA06", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
21, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(22, "PCA07", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
22, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(23, "PCA08", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
23, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(24, "PCA09", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
24, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(25, "PCA10", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
25, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(26, "PCA11", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
26, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(27, "PCA12", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
27, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(28, "PCA13", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
28, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(29, "PCA14", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
29, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(30, "XNFRE", UNIPHIER_PIN_IECTRL_NONE,
- 30, UNIPHIER_PIN_DRV_4_8,
+ 30, UNIPHIER_PIN_DRV_1BIT,
30, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(31, "XNFWE", UNIPHIER_PIN_IECTRL_NONE,
- 31, UNIPHIER_PIN_DRV_4_8,
+ 31, UNIPHIER_PIN_DRV_1BIT,
31, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(32, "NFALE", UNIPHIER_PIN_IECTRL_NONE,
- 32, UNIPHIER_PIN_DRV_4_8,
+ 32, UNIPHIER_PIN_DRV_1BIT,
32, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(33, "NFCLE", UNIPHIER_PIN_IECTRL_NONE,
- 33, UNIPHIER_PIN_DRV_4_8,
+ 33, UNIPHIER_PIN_DRV_1BIT,
33, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(34, "XNFWP", UNIPHIER_PIN_IECTRL_NONE,
- 34, UNIPHIER_PIN_DRV_4_8,
+ 34, UNIPHIER_PIN_DRV_1BIT,
34, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(35, "XNFCE0", UNIPHIER_PIN_IECTRL_NONE,
- 35, UNIPHIER_PIN_DRV_4_8,
+ 35, UNIPHIER_PIN_DRV_1BIT,
35, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(36, "NFRYBY0", UNIPHIER_PIN_IECTRL_NONE,
- 36, UNIPHIER_PIN_DRV_4_8,
+ 36, UNIPHIER_PIN_DRV_1BIT,
36, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(37, "XNFCE1", UNIPHIER_PIN_IECTRL_NONE,
- 37, UNIPHIER_PIN_DRV_4_8,
+ 37, UNIPHIER_PIN_DRV_1BIT,
37, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(38, "NFRYBY1", UNIPHIER_PIN_IECTRL_NONE,
- 38, UNIPHIER_PIN_DRV_4_8,
+ 38, UNIPHIER_PIN_DRV_1BIT,
38, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(39, "NFD0", UNIPHIER_PIN_IECTRL_NONE,
- 39, UNIPHIER_PIN_DRV_4_8,
+ 39, UNIPHIER_PIN_DRV_1BIT,
39, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(40, "NFD1", UNIPHIER_PIN_IECTRL_NONE,
- 40, UNIPHIER_PIN_DRV_4_8,
+ 40, UNIPHIER_PIN_DRV_1BIT,
40, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(41, "NFD2", UNIPHIER_PIN_IECTRL_NONE,
- 41, UNIPHIER_PIN_DRV_4_8,
+ 41, UNIPHIER_PIN_DRV_1BIT,
41, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(42, "NFD3", UNIPHIER_PIN_IECTRL_NONE,
- 42, UNIPHIER_PIN_DRV_4_8,
+ 42, UNIPHIER_PIN_DRV_1BIT,
42, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(43, "NFD4", UNIPHIER_PIN_IECTRL_NONE,
- 43, UNIPHIER_PIN_DRV_4_8,
+ 43, UNIPHIER_PIN_DRV_1BIT,
43, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(44, "NFD5", UNIPHIER_PIN_IECTRL_NONE,
- 44, UNIPHIER_PIN_DRV_4_8,
+ 44, UNIPHIER_PIN_DRV_1BIT,
44, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(45, "NFD6", UNIPHIER_PIN_IECTRL_NONE,
- 45, UNIPHIER_PIN_DRV_4_8,
+ 45, UNIPHIER_PIN_DRV_1BIT,
45, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(46, "NFD7", UNIPHIER_PIN_IECTRL_NONE,
- 46, UNIPHIER_PIN_DRV_4_8,
+ 46, UNIPHIER_PIN_DRV_1BIT,
46, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(47, "SDCLK", UNIPHIER_PIN_IECTRL_NONE,
- 0, UNIPHIER_PIN_DRV_8_12_16_20,
+ 0, UNIPHIER_PIN_DRV_2BIT,
-1, UNIPHIER_PIN_PULL_UP_FIXED),
UNIPHIER_PINCTRL_PIN(48, "SDCMD", UNIPHIER_PIN_IECTRL_NONE,
- 4, UNIPHIER_PIN_DRV_8_12_16_20,
+ 1, UNIPHIER_PIN_DRV_2BIT,
-1, UNIPHIER_PIN_PULL_UP_FIXED),
UNIPHIER_PINCTRL_PIN(49, "SDDAT0", UNIPHIER_PIN_IECTRL_NONE,
- 8, UNIPHIER_PIN_DRV_8_12_16_20,
+ 2, UNIPHIER_PIN_DRV_2BIT,
-1, UNIPHIER_PIN_PULL_UP_FIXED),
UNIPHIER_PINCTRL_PIN(50, "SDDAT1", UNIPHIER_PIN_IECTRL_NONE,
- 12, UNIPHIER_PIN_DRV_8_12_16_20,
+ 3, UNIPHIER_PIN_DRV_2BIT,
-1, UNIPHIER_PIN_PULL_UP_FIXED),
UNIPHIER_PINCTRL_PIN(51, "SDDAT2", UNIPHIER_PIN_IECTRL_NONE,
- 16, UNIPHIER_PIN_DRV_8_12_16_20,
+ 4, UNIPHIER_PIN_DRV_2BIT,
-1, UNIPHIER_PIN_PULL_UP_FIXED),
UNIPHIER_PINCTRL_PIN(52, "SDDAT3", UNIPHIER_PIN_IECTRL_NONE,
- 20, UNIPHIER_PIN_DRV_8_12_16_20,
+ 5, UNIPHIER_PIN_DRV_2BIT,
-1, UNIPHIER_PIN_PULL_UP_FIXED),
UNIPHIER_PINCTRL_PIN(53, "SDCD", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
53, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(54, "SDWP", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
54, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(55, "SDVOLC", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
55, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(56, "USB0VBUS", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
56, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(57, "USB0OD", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
57, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(58, "USB1VBUS", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
58, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(59, "USB1OD", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
59, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(60, "USB2VBUS", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
60, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(61, "USB2OD", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
61, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(62, "USB3VBUS", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
62, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(63, "USB3OD", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
63, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(64, "HS0BCLKOUT", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
64, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(65, "HS0SYNCOUT", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
65, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(66, "HS0VALOUT", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
66, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(67, "HS0DOUT0", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
67, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(68, "HS0DOUT1", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
68, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(69, "HS0DOUT2", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
69, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(70, "HS0DOUT3", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
70, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(71, "HS0DOUT4", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
71, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(72, "HS0DOUT5", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
72, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(73, "HS0DOUT6", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
73, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(74, "HS0DOUT7", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
74, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(75, "HS1BCLKIN", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
75, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(76, "HS1SYNCIN", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
76, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(77, "HS1VALIN", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
77, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(78, "HS1DIN0", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
78, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(79, "HS1DIN1", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
79, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(80, "HS1DIN2", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
80, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(81, "HS1DIN3", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
81, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(82, "HS1DIN4", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
82, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(83, "HS1DIN5", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
83, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(84, "HS1DIN6", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
84, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(85, "HS1DIN7", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
85, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(86, "HS2BCLKIN", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
86, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(87, "HS2SYNCIN", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
87, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(88, "HS2VALIN", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
88, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(89, "HS2DIN0", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
89, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(90, "HS2DIN1", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
90, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(91, "HS2DIN2", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
91, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(92, "HS2DIN3", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
92, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(93, "HS2DIN4", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
93, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(94, "HS2DIN5", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
94, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(95, "HS2DIN6", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
95, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(96, "HS2DIN7", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
96, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(97, "AO1IEC", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
97, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(98, "AO1DACCK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
98, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(99, "AO1BCK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
99, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(100, "AO1LRCK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
100, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(101, "AO1D0", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
101, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(102, "AO1D1", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
102, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(103, "AO1D2", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
103, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(104, "AO1D3", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
104, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(105, "AO2DACCK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
105, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(106, "AO2BCK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
106, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(107, "AO2LRCK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
107, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(108, "AO2D0", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
108, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(109, "SDA0", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
109, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(110, "SCL0", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
110, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(111, "SDA1", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
111, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(112, "SCL1", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
112, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(113, "SBO0", 0,
- 113, UNIPHIER_PIN_DRV_4_8,
+ 113, UNIPHIER_PIN_DRV_1BIT,
113, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(114, "SBI0", 0,
- 114, UNIPHIER_PIN_DRV_4_8,
+ 114, UNIPHIER_PIN_DRV_1BIT,
114, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(115, "TXD1", 0,
- 115, UNIPHIER_PIN_DRV_4_8,
+ 115, UNIPHIER_PIN_DRV_1BIT,
115, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(116, "RXD1", 0,
- 116, UNIPHIER_PIN_DRV_4_8,
+ 116, UNIPHIER_PIN_DRV_1BIT,
116, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(117, "PWSRA", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
117, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(118, "XIRQ0", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
118, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(119, "XIRQ1", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
119, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(120, "XIRQ2", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
120, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(121, "XIRQ3", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
121, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(122, "XIRQ4", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
122, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(123, "XIRQ5", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
123, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(124, "XIRQ6", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
124, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(125, "XIRQ7", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
125, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(126, "XIRQ8", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
126, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(127, "PORT00", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
127, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(128, "PORT01", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
128, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(129, "PORT02", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
129, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(130, "PORT03", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
130, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(131, "PORT04", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
131, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(132, "PORT05", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
132, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(133, "PORT06", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
133, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(134, "PORT07", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
134, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(135, "PORT10", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
135, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(136, "PORT11", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
136, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(137, "PORT12", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
137, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(138, "PORT13", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
138, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(139, "PORT14", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
139, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(140, "PORT15", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
140, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(141, "PORT16", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
141, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(142, "LPST", UNIPHIER_PIN_IECTRL_NONE,
- 142, UNIPHIER_PIN_DRV_4_8,
+ 142, UNIPHIER_PIN_DRV_1BIT,
142, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(143, "MDC", 0,
- 143, UNIPHIER_PIN_DRV_4_8,
+ 143, UNIPHIER_PIN_DRV_1BIT,
143, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(144, "MDIO", 0,
- 144, UNIPHIER_PIN_DRV_4_8,
+ 144, UNIPHIER_PIN_DRV_1BIT,
144, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(145, "MDIO_INTL", 0,
- 145, UNIPHIER_PIN_DRV_4_8,
+ 145, UNIPHIER_PIN_DRV_1BIT,
145, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(146, "PHYRSTL", 0,
- 146, UNIPHIER_PIN_DRV_4_8,
+ 146, UNIPHIER_PIN_DRV_1BIT,
146, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(147, "RGMII_RXCLK", 0,
- 147, UNIPHIER_PIN_DRV_4_8,
+ 147, UNIPHIER_PIN_DRV_1BIT,
147, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(148, "RGMII_RXD0", 0,
- 148, UNIPHIER_PIN_DRV_4_8,
+ 148, UNIPHIER_PIN_DRV_1BIT,
148, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(149, "RGMII_RXD1", 0,
- 149, UNIPHIER_PIN_DRV_4_8,
+ 149, UNIPHIER_PIN_DRV_1BIT,
149, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(150, "RGMII_RXD2", 0,
- 150, UNIPHIER_PIN_DRV_4_8,
+ 150, UNIPHIER_PIN_DRV_1BIT,
150, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(151, "RGMII_RXD3", 0,
- 151, UNIPHIER_PIN_DRV_4_8,
+ 151, UNIPHIER_PIN_DRV_1BIT,
151, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(152, "RGMII_RXCTL", 0,
- 152, UNIPHIER_PIN_DRV_4_8,
+ 152, UNIPHIER_PIN_DRV_1BIT,
152, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(153, "RGMII_TXCLK", 0,
- 153, UNIPHIER_PIN_DRV_4_8,
+ 153, UNIPHIER_PIN_DRV_1BIT,
153, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(154, "RGMII_TXD0", 0,
- 154, UNIPHIER_PIN_DRV_4_8,
+ 154, UNIPHIER_PIN_DRV_1BIT,
154, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(155, "RGMII_TXD1", 0,
- 155, UNIPHIER_PIN_DRV_4_8,
+ 155, UNIPHIER_PIN_DRV_1BIT,
155, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(156, "RGMII_TXD2", 0,
- 156, UNIPHIER_PIN_DRV_4_8,
+ 156, UNIPHIER_PIN_DRV_1BIT,
156, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(157, "RGMII_TXD3", 0,
- 157, UNIPHIER_PIN_DRV_4_8,
+ 157, UNIPHIER_PIN_DRV_1BIT,
157, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(158, "RGMII_TXCTL", 0,
- 158, UNIPHIER_PIN_DRV_4_8,
+ 158, UNIPHIER_PIN_DRV_1BIT,
158, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(159, "A_D_PCD00OUT", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
159, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(160, "A_D_PCD01OUT", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
160, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(161, "A_D_PCD02OUT", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
161, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(162, "A_D_PCD03OUT", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
162, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(163, "A_D_PCD04OUT", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
163, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(164, "A_D_PCD05OUT", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
164, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(165, "A_D_PCD06OUT", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
165, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(166, "A_D_PCD07OUT", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
166, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(167, "A_D_PCD00IN", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
167, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(168, "A_D_PCD01IN", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
168, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(169, "A_D_PCD02IN", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
169, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(170, "A_D_PCD03IN", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
170, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(171, "A_D_PCD04IN", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
171, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(172, "A_D_PCD05IN", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
172, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(173, "A_D_PCD06IN", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
173, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(174, "A_D_PCD07IN", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
174, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(175, "A_D_PCDNOE", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
175, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(176, "A_D_PC0READY", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
176, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(177, "A_D_PC0CD1", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
177, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(178, "A_D_PC0CD2", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
178, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(179, "A_D_PC0WAIT", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
179, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(180, "A_D_PC0RESET", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
180, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(181, "A_D_PC0CE1", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
181, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(182, "A_D_PC0WE", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
182, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(183, "A_D_PC0OE", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
183, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(184, "A_D_PC0IOWR", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
184, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(185, "A_D_PC0IORD", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
185, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(186, "A_D_PC0NOE", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
186, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(187, "A_D_HS0BCLKIN", 0,
- 187, UNIPHIER_PIN_DRV_4_8,
+ 187, UNIPHIER_PIN_DRV_1BIT,
187, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(188, "A_D_HS0SYNCIN", 0,
- 188, UNIPHIER_PIN_DRV_4_8,
+ 188, UNIPHIER_PIN_DRV_1BIT,
188, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(189, "A_D_HS0VALIN", 0,
- 189, UNIPHIER_PIN_DRV_4_8,
+ 189, UNIPHIER_PIN_DRV_1BIT,
189, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(190, "A_D_HS0DIN0", 0,
- 190, UNIPHIER_PIN_DRV_4_8,
+ 190, UNIPHIER_PIN_DRV_1BIT,
190, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(191, "A_D_HS0DIN1", 0,
- 191, UNIPHIER_PIN_DRV_4_8,
+ 191, UNIPHIER_PIN_DRV_1BIT,
191, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(192, "A_D_HS0DIN2", 0,
- 192, UNIPHIER_PIN_DRV_4_8,
+ 192, UNIPHIER_PIN_DRV_1BIT,
192, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(193, "A_D_HS0DIN3", 0,
- 193, UNIPHIER_PIN_DRV_4_8,
+ 193, UNIPHIER_PIN_DRV_1BIT,
193, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(194, "A_D_HS0DIN4", 0,
- 194, UNIPHIER_PIN_DRV_4_8,
+ 194, UNIPHIER_PIN_DRV_1BIT,
194, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(195, "A_D_HS0DIN5", 0,
- 195, UNIPHIER_PIN_DRV_4_8,
+ 195, UNIPHIER_PIN_DRV_1BIT,
195, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(196, "A_D_HS0DIN6", 0,
- 196, UNIPHIER_PIN_DRV_4_8,
+ 196, UNIPHIER_PIN_DRV_1BIT,
196, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(197, "A_D_HS0DIN7", 0,
- 197, UNIPHIER_PIN_DRV_4_8,
+ 197, UNIPHIER_PIN_DRV_1BIT,
197, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(198, "A_D_AO1ARC", 0,
- 198, UNIPHIER_PIN_DRV_4_8,
+ 198, UNIPHIER_PIN_DRV_1BIT,
198, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(199, "A_D_SPIXRST", UNIPHIER_PIN_IECTRL_NONE,
- 199, UNIPHIER_PIN_DRV_4_8,
+ 199, UNIPHIER_PIN_DRV_1BIT,
199, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(200, "A_D_SPISCLK0", UNIPHIER_PIN_IECTRL_NONE,
- 200, UNIPHIER_PIN_DRV_4_8,
+ 200, UNIPHIER_PIN_DRV_1BIT,
200, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(201, "A_D_SPITXD0", UNIPHIER_PIN_IECTRL_NONE,
- 201, UNIPHIER_PIN_DRV_4_8,
+ 201, UNIPHIER_PIN_DRV_1BIT,
201, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(202, "A_D_SPIRXD0", UNIPHIER_PIN_IECTRL_NONE,
- 202, UNIPHIER_PIN_DRV_4_8,
+ 202, UNIPHIER_PIN_DRV_1BIT,
202, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(203, "A_D_DMDCLK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
203, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(204, "A_D_DMDPSYNC", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
204, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(205, "A_D_DMDVAL", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
205, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(206, "A_D_DMDDATA", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
206, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(207, "A_D_HDMIRXXIRQ", 0,
- 207, UNIPHIER_PIN_DRV_4_8,
+ 207, UNIPHIER_PIN_DRV_1BIT,
207, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(208, "A_D_VBIXIRQ", 0,
- 208, UNIPHIER_PIN_DRV_4_8,
+ 208, UNIPHIER_PIN_DRV_1BIT,
208, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(209, "A_D_HDMITXXIRQ", 0,
- 209, UNIPHIER_PIN_DRV_4_8,
+ 209, UNIPHIER_PIN_DRV_1BIT,
209, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(210, "A_D_DMDIRQ", UNIPHIER_PIN_IECTRL_NONE,
- 210, UNIPHIER_PIN_DRV_4_8,
+ 210, UNIPHIER_PIN_DRV_1BIT,
210, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(211, "A_D_SPICIRQ", UNIPHIER_PIN_IECTRL_NONE,
- 211, UNIPHIER_PIN_DRV_4_8,
+ 211, UNIPHIER_PIN_DRV_1BIT,
211, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(212, "A_D_SPIBIRQ", UNIPHIER_PIN_IECTRL_NONE,
- 212, UNIPHIER_PIN_DRV_4_8,
+ 212, UNIPHIER_PIN_DRV_1BIT,
212, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(213, "A_D_BESDAOUT", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
213, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(214, "A_D_BESDAIN", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
214, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(215, "A_D_BESCLOUT", UNIPHIER_PIN_IECTRL_NONE,
- 215, UNIPHIER_PIN_DRV_4_8,
+ 215, UNIPHIER_PIN_DRV_1BIT,
215, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(216, "A_D_VDACCLKOUT", 0,
- 216, UNIPHIER_PIN_DRV_4_8,
+ 216, UNIPHIER_PIN_DRV_1BIT,
216, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(217, "A_D_VDACDOUT5", 0,
- 217, UNIPHIER_PIN_DRV_4_8,
+ 217, UNIPHIER_PIN_DRV_1BIT,
217, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(218, "A_D_VDACDOUT6", 0,
- 218, UNIPHIER_PIN_DRV_4_8,
+ 218, UNIPHIER_PIN_DRV_1BIT,
218, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(219, "A_D_VDACDOUT7", 0,
- 219, UNIPHIER_PIN_DRV_4_8,
+ 219, UNIPHIER_PIN_DRV_1BIT,
219, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(220, "A_D_VDACDOUT8", 0,
- 220, UNIPHIER_PIN_DRV_4_8,
+ 220, UNIPHIER_PIN_DRV_1BIT,
220, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(221, "A_D_VDACDOUT9", 0,
- 221, UNIPHIER_PIN_DRV_4_8,
+ 221, UNIPHIER_PIN_DRV_1BIT,
221, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(222, "A_D_SIFBCKIN", 0,
- 222, UNIPHIER_PIN_DRV_4_8,
+ 222, UNIPHIER_PIN_DRV_1BIT,
222, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(223, "A_D_SIFLRCKIN", 0,
- 223, UNIPHIER_PIN_DRV_4_8,
+ 223, UNIPHIER_PIN_DRV_1BIT,
223, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(224, "A_D_SIFDIN", 0,
- 224, UNIPHIER_PIN_DRV_4_8,
+ 224, UNIPHIER_PIN_DRV_1BIT,
224, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(225, "A_D_LIBCKOUT", 0,
- 225, UNIPHIER_PIN_DRV_4_8,
+ 225, UNIPHIER_PIN_DRV_1BIT,
225, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(226, "A_D_LILRCKOUT", 0,
- 226, UNIPHIER_PIN_DRV_4_8,
+ 226, UNIPHIER_PIN_DRV_1BIT,
226, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(227, "A_D_LIDIN", 0,
- 227, UNIPHIER_PIN_DRV_4_8,
+ 227, UNIPHIER_PIN_DRV_1BIT,
227, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(228, "A_D_LODOUT", 0,
- 228, UNIPHIER_PIN_DRV_4_8,
+ 228, UNIPHIER_PIN_DRV_1BIT,
228, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(229, "A_D_HPDOUT", 0,
- 229, UNIPHIER_PIN_DRV_4_8,
+ 229, UNIPHIER_PIN_DRV_1BIT,
229, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(230, "A_D_MCLK", 0,
- 230, UNIPHIER_PIN_DRV_4_8,
+ 230, UNIPHIER_PIN_DRV_1BIT,
230, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(231, "A_D_A2PLLREFOUT", 0,
- 231, UNIPHIER_PIN_DRV_4_8,
+ 231, UNIPHIER_PIN_DRV_1BIT,
231, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(232, "A_D_HDMI3DSDAOUT", 0,
- 232, UNIPHIER_PIN_DRV_4_8,
+ 232, UNIPHIER_PIN_DRV_1BIT,
232, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(233, "A_D_HDMI3DSDAIN", 0,
- 233, UNIPHIER_PIN_DRV_4_8,
+ 233, UNIPHIER_PIN_DRV_1BIT,
233, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(234, "A_D_HDMI3DSCLIN", 0,
- 234, UNIPHIER_PIN_DRV_4_8,
+ 234, UNIPHIER_PIN_DRV_1BIT,
234, UNIPHIER_PIN_PULL_DOWN),
};
@@ -737,52 +735,73 @@ static const unsigned adinter_pins[] = {
215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228,
229, 230, 231, 232, 233, 234,
};
-static const unsigned adinter_muxvals[] = {
+static const int adinter_muxvals[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0,
};
static const unsigned emmc_pins[] = {36, 37, 38, 39, 40, 41, 42};
-static const unsigned emmc_muxvals[] = {1, 1, 1, 1, 1, 1, 1};
+static const int emmc_muxvals[] = {1, 1, 1, 1, 1, 1, 1};
static const unsigned emmc_dat8_pins[] = {43, 44, 45, 46};
-static const unsigned emmc_dat8_muxvals[] = {1, 1, 1, 1};
+static const int emmc_dat8_muxvals[] = {1, 1, 1, 1};
+static const unsigned ether_rgmii_pins[] = {143, 144, 145, 146, 147, 148, 149,
+ 150, 151, 152, 153, 154, 155, 156,
+ 157, 158};
+static const int ether_rgmii_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0};
+static const unsigned ether_rmii_pins[] = {143, 144, 145, 146, 147, 148, 149,
+ 150, 152, 154, 155, 158};
+static const int ether_rmii_muxvals[] = {0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1};
static const unsigned i2c0_pins[] = {109, 110};
-static const unsigned i2c0_muxvals[] = {0, 0};
+static const int i2c0_muxvals[] = {0, 0};
static const unsigned i2c1_pins[] = {111, 112};
-static const unsigned i2c1_muxvals[] = {0, 0};
+static const int i2c1_muxvals[] = {0, 0};
static const unsigned i2c2_pins[] = {115, 116};
-static const unsigned i2c2_muxvals[] = {1, 1};
+static const int i2c2_muxvals[] = {1, 1};
static const unsigned i2c3_pins[] = {118, 119};
-static const unsigned i2c3_muxvals[] = {1, 1};
+static const int i2c3_muxvals[] = {1, 1};
static const unsigned nand_pins[] = {30, 31, 32, 33, 34, 35, 36, 39, 40, 41,
42, 43, 44, 45, 46};
-static const unsigned nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0};
+static const int nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
static const unsigned nand_cs1_pins[] = {37, 38};
-static const unsigned nand_cs1_muxvals[] = {0, 0};
+static const int nand_cs1_muxvals[] = {0, 0};
static const unsigned sd_pins[] = {47, 48, 49, 50, 51, 52, 53, 54, 55};
-static const unsigned sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
+static const int sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned system_bus_pins[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13};
+static const int system_bus_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0};
+static const unsigned system_bus_cs1_pins[] = {14};
+static const int system_bus_cs1_muxvals[] = {0};
+static const unsigned system_bus_cs2_pins[] = {37};
+static const int system_bus_cs2_muxvals[] = {6};
+static const unsigned system_bus_cs3_pins[] = {38};
+static const int system_bus_cs3_muxvals[] = {6};
+static const unsigned system_bus_cs4_pins[] = {115};
+static const int system_bus_cs4_muxvals[] = {6};
+static const unsigned system_bus_cs5_pins[] = {55};
+static const int system_bus_cs5_muxvals[] = {6};
static const unsigned uart0_pins[] = {135, 136};
-static const unsigned uart0_muxvals[] = {3, 3};
+static const int uart0_muxvals[] = {3, 3};
static const unsigned uart0b_pins[] = {11, 12};
-static const unsigned uart0b_muxvals[] = {2, 2};
+static const int uart0b_muxvals[] = {2, 2};
static const unsigned uart1_pins[] = {115, 116};
-static const unsigned uart1_muxvals[] = {0, 0};
+static const int uart1_muxvals[] = {0, 0};
static const unsigned uart1b_pins[] = {113, 114};
-static const unsigned uart1b_muxvals[] = {1, 1};
+static const int uart1b_muxvals[] = {1, 1};
static const unsigned uart2_pins[] = {113, 114};
-static const unsigned uart2_muxvals[] = {2, 2};
+static const int uart2_muxvals[] = {2, 2};
static const unsigned uart2b_pins[] = {86, 87};
-static const unsigned uart2b_muxvals[] = {1, 1};
+static const int uart2b_muxvals[] = {1, 1};
static const unsigned usb0_pins[] = {56, 57};
-static const unsigned usb0_muxvals[] = {0, 0};
+static const int usb0_muxvals[] = {0, 0};
static const unsigned usb1_pins[] = {58, 59};
-static const unsigned usb1_muxvals[] = {0, 0};
+static const int usb1_muxvals[] = {0, 0};
static const unsigned usb2_pins[] = {60, 61};
-static const unsigned usb2_muxvals[] = {0, 0};
+static const int usb2_muxvals[] = {0, 0};
static const unsigned usb3_pins[] = {62, 63};
-static const unsigned usb3_muxvals[] = {0, 0};
+static const int usb3_muxvals[] = {0, 0};
static const unsigned port_range0_pins[] = {
127, 128, 129, 130, 131, 132, 133, 134, /* PORT0x */
135, 136, 137, 138, 139, 140, 141, 142, /* PORT1x */
@@ -796,7 +815,7 @@ static const unsigned port_range0_pins[] = {
61, 62, 63, 64, 65, 66, 67, 68, /* PORT9x */
69, 70, 71, 76, 77, 78, 79, 80, /* PORT10x */
};
-static const unsigned port_range0_muxvals[] = {
+static const int port_range0_muxvals[] = {
15, 15, 15, 15, 15, 15, 15, 15, /* PORT0x */
15, 15, 15, 15, 15, 15, 15, 15, /* PORT1x */
15, 15, 15, 15, 15, 15, 15, 15, /* PORT2x */
@@ -828,7 +847,7 @@ static const unsigned port_range1_pins[] = {
218, 219, 220, 221, 223, 224, 225, 226, /* PORT27x */
227, 228, 229, 230, 231, 232, 233, 234, /* PORT28x */
};
-static const unsigned port_range1_muxvals[] = {
+static const int port_range1_muxvals[] = {
15, 15, 15, 15, 15, 15, 15, 15, /* PORT12x */
15, 15, 15, 15, 15, 15, 15, 15, /* PORT13x */
15, 15, 15, 15, 15, 15, 15, 15, /* PORT14x */
@@ -852,16 +871,18 @@ static const unsigned xirq_pins[] = {
126, 72, 73, 92, 177, 93, 94, 176, /* XIRQ8-15 */
74, 91, 27, 28, 29, 75, 20, 26, /* XIRQ16-23 */
};
-static const unsigned xirq_muxvals[] = {
+static const int xirq_muxvals[] = {
14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ0-7 */
14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ8-15 */
14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ16-23 */
};
-static const struct uniphier_pinctrl_group ph1_ld6b_groups[] = {
+static const struct uniphier_pinctrl_group uniphier_ld6b_groups[] = {
UNIPHIER_PINCTRL_GROUP(adinter),
UNIPHIER_PINCTRL_GROUP(emmc),
UNIPHIER_PINCTRL_GROUP(emmc_dat8),
+ UNIPHIER_PINCTRL_GROUP(ether_rgmii),
+ UNIPHIER_PINCTRL_GROUP(ether_rmii),
UNIPHIER_PINCTRL_GROUP(i2c0),
UNIPHIER_PINCTRL_GROUP(i2c1),
UNIPHIER_PINCTRL_GROUP(i2c2),
@@ -869,6 +890,12 @@ static const struct uniphier_pinctrl_group ph1_ld6b_groups[] = {
UNIPHIER_PINCTRL_GROUP(nand),
UNIPHIER_PINCTRL_GROUP(nand_cs1),
UNIPHIER_PINCTRL_GROUP(sd),
+ UNIPHIER_PINCTRL_GROUP(system_bus),
+ UNIPHIER_PINCTRL_GROUP(system_bus_cs1),
+ UNIPHIER_PINCTRL_GROUP(system_bus_cs2),
+ UNIPHIER_PINCTRL_GROUP(system_bus_cs3),
+ UNIPHIER_PINCTRL_GROUP(system_bus_cs4),
+ UNIPHIER_PINCTRL_GROUP(system_bus_cs5),
UNIPHIER_PINCTRL_GROUP(uart0),
UNIPHIER_PINCTRL_GROUP(uart0b),
UNIPHIER_PINCTRL_GROUP(uart1),
@@ -1134,12 +1161,20 @@ static const struct uniphier_pinctrl_group ph1_ld6b_groups[] = {
static const char * const adinter_groups[] = {"adinter"};
static const char * const emmc_groups[] = {"emmc", "emmc_dat8"};
+static const char * const ether_rgmii_groups[] = {"ether_rgmii"};
+static const char * const ether_rmii_groups[] = {"ether_rmii"};
static const char * const i2c0_groups[] = {"i2c0"};
static const char * const i2c1_groups[] = {"i2c1"};
static const char * const i2c2_groups[] = {"i2c2"};
static const char * const i2c3_groups[] = {"i2c3"};
static const char * const nand_groups[] = {"nand", "nand_cs1"};
static const char * const sd_groups[] = {"sd"};
+static const char * const system_bus_groups[] = {"system_bus",
+ "system_bus_cs1",
+ "system_bus_cs2",
+ "system_bus_cs3",
+ "system_bus_cs4",
+ "system_bus_cs5"};
static const char * const uart0_groups[] = {"uart0", "uart0b"};
static const char * const uart1_groups[] = {"uart1", "uart1b"};
static const char * const uart2_groups[] = {"uart2", "uart2b"};
@@ -1215,15 +1250,18 @@ static const char * const xirq_groups[] = {
"xirq20", "xirq21", "xirq22", "xirq23",
};
-static const struct uniphier_pinmux_function ph1_ld6b_functions[] = {
+static const struct uniphier_pinmux_function uniphier_ld6b_functions[] = {
UNIPHIER_PINMUX_FUNCTION(adinter), /* Achip-Dchip interconnect */
UNIPHIER_PINMUX_FUNCTION(emmc),
+ UNIPHIER_PINMUX_FUNCTION(ether_rgmii),
+ UNIPHIER_PINMUX_FUNCTION(ether_rmii),
UNIPHIER_PINMUX_FUNCTION(i2c0),
UNIPHIER_PINMUX_FUNCTION(i2c1),
UNIPHIER_PINMUX_FUNCTION(i2c2),
UNIPHIER_PINMUX_FUNCTION(i2c3),
UNIPHIER_PINMUX_FUNCTION(nand),
UNIPHIER_PINMUX_FUNCTION(sd),
+ UNIPHIER_PINMUX_FUNCTION(system_bus),
UNIPHIER_PINMUX_FUNCTION(uart0),
UNIPHIER_PINMUX_FUNCTION(uart1),
UNIPHIER_PINMUX_FUNCTION(uart2),
@@ -1235,43 +1273,36 @@ static const struct uniphier_pinmux_function ph1_ld6b_functions[] = {
UNIPHIER_PINMUX_FUNCTION(xirq),
};
-static struct uniphier_pinctrl_socdata ph1_ld6b_pindata = {
- .groups = ph1_ld6b_groups,
- .groups_count = ARRAY_SIZE(ph1_ld6b_groups),
- .functions = ph1_ld6b_functions,
- .functions_count = ARRAY_SIZE(ph1_ld6b_functions),
- .mux_bits = 8,
- .reg_stride = 4,
- .load_pinctrl = false,
-};
-
-static struct pinctrl_desc ph1_ld6b_pinctrl_desc = {
- .name = DRIVER_NAME,
- .pins = ph1_ld6b_pins,
- .npins = ARRAY_SIZE(ph1_ld6b_pins),
- .owner = THIS_MODULE,
+static struct uniphier_pinctrl_socdata uniphier_ld6b_pindata = {
+ .pins = uniphier_ld6b_pins,
+ .npins = ARRAY_SIZE(uniphier_ld6b_pins),
+ .groups = uniphier_ld6b_groups,
+ .groups_count = ARRAY_SIZE(uniphier_ld6b_groups),
+ .functions = uniphier_ld6b_functions,
+ .functions_count = ARRAY_SIZE(uniphier_ld6b_functions),
+ .caps = 0,
};
-static int ph1_ld6b_pinctrl_probe(struct platform_device *pdev)
+static int uniphier_ld6b_pinctrl_probe(struct platform_device *pdev)
{
- return uniphier_pinctrl_probe(pdev, &ph1_ld6b_pinctrl_desc,
- &ph1_ld6b_pindata);
+ return uniphier_pinctrl_probe(pdev, &uniphier_ld6b_pindata);
}
-static const struct of_device_id ph1_ld6b_pinctrl_match[] = {
+static const struct of_device_id uniphier_ld6b_pinctrl_match[] = {
+ { .compatible = "socionext,uniphier-ld6b-pinctrl" },
{ .compatible = "socionext,ph1-ld6b-pinctrl" },
{ /* sentinel */ }
};
-MODULE_DEVICE_TABLE(of, ph1_ld6b_pinctrl_match);
+MODULE_DEVICE_TABLE(of, uniphier_ld6b_pinctrl_match);
-static struct platform_driver ph1_ld6b_pinctrl_driver = {
- .probe = ph1_ld6b_pinctrl_probe,
+static struct platform_driver uniphier_ld6b_pinctrl_driver = {
+ .probe = uniphier_ld6b_pinctrl_probe,
.driver = {
- .name = DRIVER_NAME,
- .of_match_table = ph1_ld6b_pinctrl_match,
+ .name = "uniphier-ld6b-pinctrl",
+ .of_match_table = uniphier_ld6b_pinctrl_match,
},
};
-module_platform_driver(ph1_ld6b_pinctrl_driver);
+module_platform_driver(uniphier_ld6b_pinctrl_driver);
MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
MODULE_DESCRIPTION("UniPhier PH1-LD6b pinctrl driver");
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c
index b1f09e68f..c306e844f 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c
@@ -19,1039 +19,1072 @@
#include "pinctrl-uniphier.h"
-#define DRIVER_NAME "ph1-pro4-pinctrl"
-
-static const struct pinctrl_pin_desc ph1_pro4_pins[] = {
+static const struct pinctrl_pin_desc uniphier_pro4_pins[] = {
UNIPHIER_PINCTRL_PIN(0, "CK24O", UNIPHIER_PIN_IECTRL_NONE,
- 0, UNIPHIER_PIN_DRV_4_8,
+ 0, UNIPHIER_PIN_DRV_1BIT,
0, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(1, "VC27A", UNIPHIER_PIN_IECTRL_NONE,
- 1, UNIPHIER_PIN_DRV_4_8,
+ 1, UNIPHIER_PIN_DRV_1BIT,
1, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(2, "CK27AI", UNIPHIER_PIN_IECTRL_NONE,
- 2, UNIPHIER_PIN_DRV_4_8,
+ 2, UNIPHIER_PIN_DRV_1BIT,
2, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(3, "CK27AO", UNIPHIER_PIN_IECTRL_NONE,
- 3, UNIPHIER_PIN_DRV_4_8,
+ 3, UNIPHIER_PIN_DRV_1BIT,
3, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(4, "CKSEL", UNIPHIER_PIN_IECTRL_NONE,
- 4, UNIPHIER_PIN_DRV_4_8,
+ 4, UNIPHIER_PIN_DRV_1BIT,
4, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(5, "CK27AV", UNIPHIER_PIN_IECTRL_NONE,
- 5, UNIPHIER_PIN_DRV_4_8,
+ 5, UNIPHIER_PIN_DRV_1BIT,
5, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(6, "AEXCKA", UNIPHIER_PIN_IECTRL_NONE,
- 6, UNIPHIER_PIN_DRV_4_8,
+ 6, UNIPHIER_PIN_DRV_1BIT,
6, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(7, "ASEL", UNIPHIER_PIN_IECTRL_NONE,
- 7, UNIPHIER_PIN_DRV_4_8,
+ 7, UNIPHIER_PIN_DRV_1BIT,
7, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(8, "ARCRESET", UNIPHIER_PIN_IECTRL_NONE,
- 8, UNIPHIER_PIN_DRV_4_8,
+ 8, UNIPHIER_PIN_DRV_1BIT,
8, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(9, "ARCUNLOCK", UNIPHIER_PIN_IECTRL_NONE,
- 9, UNIPHIER_PIN_DRV_4_8,
+ 9, UNIPHIER_PIN_DRV_1BIT,
9, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(10, "XSRST", UNIPHIER_PIN_IECTRL_NONE,
- 10, UNIPHIER_PIN_DRV_4_8,
+ 10, UNIPHIER_PIN_DRV_1BIT,
10, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(11, "XNMIRQ", UNIPHIER_PIN_IECTRL_NONE,
- 11, UNIPHIER_PIN_DRV_4_8,
+ 11, UNIPHIER_PIN_DRV_1BIT,
11, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(12, "XSCIRQ", UNIPHIER_PIN_IECTRL_NONE,
- 12, UNIPHIER_PIN_DRV_4_8,
+ 12, UNIPHIER_PIN_DRV_1BIT,
12, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(13, "EXTRG", UNIPHIER_PIN_IECTRL_NONE,
- 13, UNIPHIER_PIN_DRV_4_8,
+ 13, UNIPHIER_PIN_DRV_1BIT,
13, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(14, "TRCCLK", UNIPHIER_PIN_IECTRL_NONE,
- 14, UNIPHIER_PIN_DRV_4_8,
+ 14, UNIPHIER_PIN_DRV_1BIT,
14, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(15, "TRCCTL", UNIPHIER_PIN_IECTRL_NONE,
- 15, UNIPHIER_PIN_DRV_4_8,
+ 15, UNIPHIER_PIN_DRV_1BIT,
15, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(16, "TRCD0", UNIPHIER_PIN_IECTRL_NONE,
- 16, UNIPHIER_PIN_DRV_4_8,
+ 16, UNIPHIER_PIN_DRV_1BIT,
16, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(17, "TRCD1", UNIPHIER_PIN_IECTRL_NONE,
- 17, UNIPHIER_PIN_DRV_4_8,
+ 17, UNIPHIER_PIN_DRV_1BIT,
17, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(18, "TRCD2", UNIPHIER_PIN_IECTRL_NONE,
- 18, UNIPHIER_PIN_DRV_4_8,
+ 18, UNIPHIER_PIN_DRV_1BIT,
18, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(19, "TRCD3", UNIPHIER_PIN_IECTRL_NONE,
- 19, UNIPHIER_PIN_DRV_4_8,
+ 19, UNIPHIER_PIN_DRV_1BIT,
19, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(20, "TRCD4", UNIPHIER_PIN_IECTRL_NONE,
- 20, UNIPHIER_PIN_DRV_4_8,
+ 20, UNIPHIER_PIN_DRV_1BIT,
20, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(21, "TRCD5", UNIPHIER_PIN_IECTRL_NONE,
- 21, UNIPHIER_PIN_DRV_4_8,
+ 21, UNIPHIER_PIN_DRV_1BIT,
21, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(22, "TRCD6", UNIPHIER_PIN_IECTRL_NONE,
- 22, UNIPHIER_PIN_DRV_4_8,
+ 22, UNIPHIER_PIN_DRV_1BIT,
22, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(23, "TRCD7", UNIPHIER_PIN_IECTRL_NONE,
- 23, UNIPHIER_PIN_DRV_4_8,
+ 23, UNIPHIER_PIN_DRV_1BIT,
23, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(24, "XECS1", UNIPHIER_PIN_IECTRL_NONE,
- 24, UNIPHIER_PIN_DRV_4_8,
+ 24, UNIPHIER_PIN_DRV_1BIT,
24, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(25, "ERXW", UNIPHIER_PIN_IECTRL_NONE,
- 25, UNIPHIER_PIN_DRV_4_8,
+ 25, UNIPHIER_PIN_DRV_1BIT,
25, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(26, "XERWE0", UNIPHIER_PIN_IECTRL_NONE,
- 26, UNIPHIER_PIN_DRV_4_8,
+ 26, UNIPHIER_PIN_DRV_1BIT,
26, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(27, "XERWE1", UNIPHIER_PIN_IECTRL_NONE,
- 27, UNIPHIER_PIN_DRV_4_8,
+ 27, UNIPHIER_PIN_DRV_1BIT,
27, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(28, "ES0", UNIPHIER_PIN_IECTRL_NONE,
- 28, UNIPHIER_PIN_DRV_4_8,
+ 28, UNIPHIER_PIN_DRV_1BIT,
28, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(29, "ES1", UNIPHIER_PIN_IECTRL_NONE,
- 29, UNIPHIER_PIN_DRV_4_8,
+ 29, UNIPHIER_PIN_DRV_1BIT,
29, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(30, "ES2", UNIPHIER_PIN_IECTRL_NONE,
- 30, UNIPHIER_PIN_DRV_4_8,
+ 30, UNIPHIER_PIN_DRV_1BIT,
30, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(31, "ED0", UNIPHIER_PIN_IECTRL_NONE,
- 31, UNIPHIER_PIN_DRV_4_8,
+ 31, UNIPHIER_PIN_DRV_1BIT,
31, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(32, "ED1", UNIPHIER_PIN_IECTRL_NONE,
- 32, UNIPHIER_PIN_DRV_4_8,
+ 32, UNIPHIER_PIN_DRV_1BIT,
32, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(33, "ED2", UNIPHIER_PIN_IECTRL_NONE,
- 33, UNIPHIER_PIN_DRV_4_8,
+ 33, UNIPHIER_PIN_DRV_1BIT,
33, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(34, "ED3", UNIPHIER_PIN_IECTRL_NONE,
- 34, UNIPHIER_PIN_DRV_4_8,
+ 34, UNIPHIER_PIN_DRV_1BIT,
34, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(35, "ED4", UNIPHIER_PIN_IECTRL_NONE,
- 35, UNIPHIER_PIN_DRV_4_8,
+ 35, UNIPHIER_PIN_DRV_1BIT,
35, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(36, "ED5", UNIPHIER_PIN_IECTRL_NONE,
- 36, UNIPHIER_PIN_DRV_4_8,
+ 36, UNIPHIER_PIN_DRV_1BIT,
36, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(37, "ED6", UNIPHIER_PIN_IECTRL_NONE,
- 37, UNIPHIER_PIN_DRV_4_8,
+ 37, UNIPHIER_PIN_DRV_1BIT,
37, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(38, "ED7", UNIPHIER_PIN_IECTRL_NONE,
- 38, UNIPHIER_PIN_DRV_4_8,
+ 38, UNIPHIER_PIN_DRV_1BIT,
38, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(39, "BOOTSWAP", UNIPHIER_PIN_IECTRL_NONE,
- 39, UNIPHIER_PIN_DRV_NONE,
+ -1, UNIPHIER_PIN_DRV_NONE,
39, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(40, "NFD0", UNIPHIER_PIN_IECTRL_NONE,
- 2, UNIPHIER_PIN_DRV_8_12_16_20,
+ 2, UNIPHIER_PIN_DRV_2BIT,
40, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(41, "NFD1", UNIPHIER_PIN_IECTRL_NONE,
- 3, UNIPHIER_PIN_DRV_8_12_16_20,
+ 3, UNIPHIER_PIN_DRV_2BIT,
41, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(42, "NFD2", UNIPHIER_PIN_IECTRL_NONE,
- 4, UNIPHIER_PIN_DRV_8_12_16_20,
+ 4, UNIPHIER_PIN_DRV_2BIT,
42, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(43, "NFD3", UNIPHIER_PIN_IECTRL_NONE,
- 5, UNIPHIER_PIN_DRV_8_12_16_20,
+ 5, UNIPHIER_PIN_DRV_2BIT,
43, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(44, "NFD4", UNIPHIER_PIN_IECTRL_NONE,
- 6, UNIPHIER_PIN_DRV_8_12_16_20,
+ 6, UNIPHIER_PIN_DRV_2BIT,
44, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(45, "NFD5", UNIPHIER_PIN_IECTRL_NONE,
- 7, UNIPHIER_PIN_DRV_8_12_16_20,
+ 7, UNIPHIER_PIN_DRV_2BIT,
45, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(46, "NFD6", UNIPHIER_PIN_IECTRL_NONE,
- 8, UNIPHIER_PIN_DRV_8_12_16_20,
+ 8, UNIPHIER_PIN_DRV_2BIT,
46, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(47, "NFD7", UNIPHIER_PIN_IECTRL_NONE,
- 9, UNIPHIER_PIN_DRV_8_12_16_20,
+ 9, UNIPHIER_PIN_DRV_2BIT,
47, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(48, "NFALE", UNIPHIER_PIN_IECTRL_NONE,
- 48, UNIPHIER_PIN_DRV_4_8,
+ 48, UNIPHIER_PIN_DRV_1BIT,
48, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(49, "NFCLE", UNIPHIER_PIN_IECTRL_NONE,
- 49, UNIPHIER_PIN_DRV_4_8,
+ 49, UNIPHIER_PIN_DRV_1BIT,
49, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(50, "XNFRE", UNIPHIER_PIN_IECTRL_NONE,
- 50, UNIPHIER_PIN_DRV_4_8,
+ 50, UNIPHIER_PIN_DRV_1BIT,
50, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(51, "XNFWE", UNIPHIER_PIN_IECTRL_NONE,
- 0, UNIPHIER_PIN_DRV_8_12_16_20,
+ 0, UNIPHIER_PIN_DRV_2BIT,
51, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(52, "XNFWP", UNIPHIER_PIN_IECTRL_NONE,
- 52, UNIPHIER_PIN_DRV_4_8,
+ 52, UNIPHIER_PIN_DRV_1BIT,
52, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(53, "XNFCE0", UNIPHIER_PIN_IECTRL_NONE,
- 1, UNIPHIER_PIN_DRV_8_12_16_20,
+ 1, UNIPHIER_PIN_DRV_2BIT,
53, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(54, "NRYBY0", UNIPHIER_PIN_IECTRL_NONE,
- 54, UNIPHIER_PIN_DRV_4_8,
+ 54, UNIPHIER_PIN_DRV_1BIT,
54, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(55, "DMDSCLTST", UNIPHIER_PIN_IECTRL_NONE,
-1, UNIPHIER_PIN_DRV_NONE,
-1, UNIPHIER_PIN_PULL_NONE),
UNIPHIER_PINCTRL_PIN(56, "DMDSDATST", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
-1, UNIPHIER_PIN_PULL_NONE),
UNIPHIER_PINCTRL_PIN(57, "AGCI0", 3,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
55, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(58, "DMDSCL0", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
-1, UNIPHIER_PIN_PULL_NONE),
UNIPHIER_PINCTRL_PIN(59, "DMDSDA0", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
-1, UNIPHIER_PIN_PULL_NONE),
UNIPHIER_PINCTRL_PIN(60, "AGCBS0", 5,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
56, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(61, "DMDSCL1", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
-1, UNIPHIER_PIN_PULL_NONE),
UNIPHIER_PINCTRL_PIN(62, "DMDSDA1", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
-1, UNIPHIER_PIN_PULL_NONE),
UNIPHIER_PINCTRL_PIN(63, "ANTSHORT", UNIPHIER_PIN_IECTRL_NONE,
- 57, UNIPHIER_PIN_DRV_4_8,
+ 57, UNIPHIER_PIN_DRV_1BIT,
57, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(64, "CH0CLK", UNIPHIER_PIN_IECTRL_NONE,
- 58, UNIPHIER_PIN_DRV_4_8,
+ 58, UNIPHIER_PIN_DRV_1BIT,
58, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(65, "CH0VAL", UNIPHIER_PIN_IECTRL_NONE,
- 59, UNIPHIER_PIN_DRV_4_8,
+ 59, UNIPHIER_PIN_DRV_1BIT,
59, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(66, "CH0PSYNC", UNIPHIER_PIN_IECTRL_NONE,
- 60, UNIPHIER_PIN_DRV_4_8,
+ 60, UNIPHIER_PIN_DRV_1BIT,
60, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(67, "CH0DATA", UNIPHIER_PIN_IECTRL_NONE,
- 61, UNIPHIER_PIN_DRV_4_8,
+ 61, UNIPHIER_PIN_DRV_1BIT,
61, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(68, "CH1CLK", UNIPHIER_PIN_IECTRL_NONE,
- 62, UNIPHIER_PIN_DRV_4_8,
+ 62, UNIPHIER_PIN_DRV_1BIT,
62, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(69, "CH1VAL", UNIPHIER_PIN_IECTRL_NONE,
- 63, UNIPHIER_PIN_DRV_4_8,
+ 63, UNIPHIER_PIN_DRV_1BIT,
63, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(70, "CH1PSYNC", UNIPHIER_PIN_IECTRL_NONE,
- 64, UNIPHIER_PIN_DRV_4_8,
+ 64, UNIPHIER_PIN_DRV_1BIT,
64, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(71, "CH1DATA", UNIPHIER_PIN_IECTRL_NONE,
- 65, UNIPHIER_PIN_DRV_4_8,
+ 65, UNIPHIER_PIN_DRV_1BIT,
65, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(72, "CH2CLK", UNIPHIER_PIN_IECTRL_NONE,
- 66, UNIPHIER_PIN_DRV_4_8,
+ 66, UNIPHIER_PIN_DRV_1BIT,
66, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(73, "CH2VAL", UNIPHIER_PIN_IECTRL_NONE,
- 67, UNIPHIER_PIN_DRV_4_8,
+ 67, UNIPHIER_PIN_DRV_1BIT,
67, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(74, "CH2PSYNC", UNIPHIER_PIN_IECTRL_NONE,
- 68, UNIPHIER_PIN_DRV_4_8,
+ 68, UNIPHIER_PIN_DRV_1BIT,
68, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(75, "CH2DATA", UNIPHIER_PIN_IECTRL_NONE,
- 69, UNIPHIER_PIN_DRV_4_8,
+ 69, UNIPHIER_PIN_DRV_1BIT,
69, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(76, "CH3CLK", UNIPHIER_PIN_IECTRL_NONE,
- 70, UNIPHIER_PIN_DRV_4_8,
+ 70, UNIPHIER_PIN_DRV_1BIT,
70, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(77, "CH3VAL", UNIPHIER_PIN_IECTRL_NONE,
- 71, UNIPHIER_PIN_DRV_4_8,
+ 71, UNIPHIER_PIN_DRV_1BIT,
71, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(78, "CH3PSYNC", UNIPHIER_PIN_IECTRL_NONE,
- 72, UNIPHIER_PIN_DRV_4_8,
+ 72, UNIPHIER_PIN_DRV_1BIT,
72, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(79, "CH3DATA", UNIPHIER_PIN_IECTRL_NONE,
- 73, UNIPHIER_PIN_DRV_4_8,
+ 73, UNIPHIER_PIN_DRV_1BIT,
73, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(80, "CH4CLK", UNIPHIER_PIN_IECTRL_NONE,
- 74, UNIPHIER_PIN_DRV_4_8,
+ 74, UNIPHIER_PIN_DRV_1BIT,
74, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(81, "CH4VAL", UNIPHIER_PIN_IECTRL_NONE,
- 75, UNIPHIER_PIN_DRV_4_8,
+ 75, UNIPHIER_PIN_DRV_1BIT,
75, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(82, "CH4PSYNC", UNIPHIER_PIN_IECTRL_NONE,
- 76, UNIPHIER_PIN_DRV_4_8,
+ 76, UNIPHIER_PIN_DRV_1BIT,
76, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(83, "CH4DATA", UNIPHIER_PIN_IECTRL_NONE,
- 77, UNIPHIER_PIN_DRV_4_8,
+ 77, UNIPHIER_PIN_DRV_1BIT,
77, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(84, "CH5CLK", UNIPHIER_PIN_IECTRL_NONE,
- 78, UNIPHIER_PIN_DRV_4_8,
+ 78, UNIPHIER_PIN_DRV_1BIT,
78, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(85, "CH5VAL", UNIPHIER_PIN_IECTRL_NONE,
- 79, UNIPHIER_PIN_DRV_4_8,
+ 79, UNIPHIER_PIN_DRV_1BIT,
79, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(86, "CH5PSYNC", UNIPHIER_PIN_IECTRL_NONE,
- 80, UNIPHIER_PIN_DRV_4_8,
+ 80, UNIPHIER_PIN_DRV_1BIT,
80, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(87, "CH5DATA", UNIPHIER_PIN_IECTRL_NONE,
- 81, UNIPHIER_PIN_DRV_4_8,
+ 81, UNIPHIER_PIN_DRV_1BIT,
81, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(88, "CH6CLK", UNIPHIER_PIN_IECTRL_NONE,
- 82, UNIPHIER_PIN_DRV_4_8,
+ 82, UNIPHIER_PIN_DRV_1BIT,
82, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(89, "CH6VAL", UNIPHIER_PIN_IECTRL_NONE,
- 83, UNIPHIER_PIN_DRV_4_8,
+ 83, UNIPHIER_PIN_DRV_1BIT,
83, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(90, "CH6PSYNC", UNIPHIER_PIN_IECTRL_NONE,
- 84, UNIPHIER_PIN_DRV_4_8,
+ 84, UNIPHIER_PIN_DRV_1BIT,
84, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(91, "CH6DATA", UNIPHIER_PIN_IECTRL_NONE,
- 85, UNIPHIER_PIN_DRV_4_8,
+ 85, UNIPHIER_PIN_DRV_1BIT,
85, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(92, "CKFEO", UNIPHIER_PIN_IECTRL_NONE,
- 86, UNIPHIER_PIN_DRV_4_8,
+ 86, UNIPHIER_PIN_DRV_1BIT,
86, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(93, "XFERST", UNIPHIER_PIN_IECTRL_NONE,
- 87, UNIPHIER_PIN_DRV_4_8,
+ 87, UNIPHIER_PIN_DRV_1BIT,
87, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(94, "P_FE_ON", UNIPHIER_PIN_IECTRL_NONE,
- 88, UNIPHIER_PIN_DRV_4_8,
+ 88, UNIPHIER_PIN_DRV_1BIT,
88, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(95, "P_TU0_ON", UNIPHIER_PIN_IECTRL_NONE,
- 89, UNIPHIER_PIN_DRV_4_8,
+ 89, UNIPHIER_PIN_DRV_1BIT,
89, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(96, "XFEIRQ0", UNIPHIER_PIN_IECTRL_NONE,
- 90, UNIPHIER_PIN_DRV_4_8,
+ 90, UNIPHIER_PIN_DRV_1BIT,
90, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(97, "XFEIRQ1", UNIPHIER_PIN_IECTRL_NONE,
- 91, UNIPHIER_PIN_DRV_4_8,
+ 91, UNIPHIER_PIN_DRV_1BIT,
91, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(98, "XFEIRQ2", UNIPHIER_PIN_IECTRL_NONE,
- 92, UNIPHIER_PIN_DRV_4_8,
+ 92, UNIPHIER_PIN_DRV_1BIT,
92, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(99, "XFEIRQ3", UNIPHIER_PIN_IECTRL_NONE,
- 93, UNIPHIER_PIN_DRV_4_8,
+ 93, UNIPHIER_PIN_DRV_1BIT,
93, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(100, "XFEIRQ4", UNIPHIER_PIN_IECTRL_NONE,
- 94, UNIPHIER_PIN_DRV_4_8,
+ 94, UNIPHIER_PIN_DRV_1BIT,
94, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(101, "XFEIRQ5", UNIPHIER_PIN_IECTRL_NONE,
- 95, UNIPHIER_PIN_DRV_4_8,
+ 95, UNIPHIER_PIN_DRV_1BIT,
95, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(102, "XFEIRQ6", UNIPHIER_PIN_IECTRL_NONE,
- 96, UNIPHIER_PIN_DRV_4_8,
+ 96, UNIPHIER_PIN_DRV_1BIT,
96, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(103, "SMTCLK0", UNIPHIER_PIN_IECTRL_NONE,
- 97, UNIPHIER_PIN_DRV_4_8,
+ 97, UNIPHIER_PIN_DRV_1BIT,
97, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(104, "SMTRST0", UNIPHIER_PIN_IECTRL_NONE,
- 98, UNIPHIER_PIN_DRV_4_8,
+ 98, UNIPHIER_PIN_DRV_1BIT,
98, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(105, "SMTCMD0", UNIPHIER_PIN_IECTRL_NONE,
- 99, UNIPHIER_PIN_DRV_4_8,
+ 99, UNIPHIER_PIN_DRV_1BIT,
99, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(106, "SMTD0", UNIPHIER_PIN_IECTRL_NONE,
- 100, UNIPHIER_PIN_DRV_4_8,
+ 100, UNIPHIER_PIN_DRV_1BIT,
100, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(107, "SMTSEL0", UNIPHIER_PIN_IECTRL_NONE,
- 101, UNIPHIER_PIN_DRV_4_8,
+ 101, UNIPHIER_PIN_DRV_1BIT,
101, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(108, "SMTDET0", UNIPHIER_PIN_IECTRL_NONE,
- 102, UNIPHIER_PIN_DRV_4_8,
+ 102, UNIPHIER_PIN_DRV_1BIT,
102, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(109, "SMTCLK1", UNIPHIER_PIN_IECTRL_NONE,
- 103, UNIPHIER_PIN_DRV_4_8,
+ 103, UNIPHIER_PIN_DRV_1BIT,
103, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(110, "SMTRST1", UNIPHIER_PIN_IECTRL_NONE,
- 104, UNIPHIER_PIN_DRV_4_8,
+ 104, UNIPHIER_PIN_DRV_1BIT,
104, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(111, "SMTCMD1", UNIPHIER_PIN_IECTRL_NONE,
- 105, UNIPHIER_PIN_DRV_4_8,
+ 105, UNIPHIER_PIN_DRV_1BIT,
105, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(112, "SMTD1", UNIPHIER_PIN_IECTRL_NONE,
- 106, UNIPHIER_PIN_DRV_4_8,
+ 106, UNIPHIER_PIN_DRV_1BIT,
106, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(113, "SMTSEL1", UNIPHIER_PIN_IECTRL_NONE,
- 107, UNIPHIER_PIN_DRV_4_8,
+ 107, UNIPHIER_PIN_DRV_1BIT,
107, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(114, "SMTDET1", UNIPHIER_PIN_IECTRL_NONE,
- 108, UNIPHIER_PIN_DRV_4_8,
+ 108, UNIPHIER_PIN_DRV_1BIT,
108, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(115, "XINTM", UNIPHIER_PIN_IECTRL_NONE,
- 109, UNIPHIER_PIN_DRV_4_8,
+ 109, UNIPHIER_PIN_DRV_1BIT,
109, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(116, "SCLKM", UNIPHIER_PIN_IECTRL_NONE,
- 110, UNIPHIER_PIN_DRV_4_8,
+ 110, UNIPHIER_PIN_DRV_1BIT,
110, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(117, "SBMTP", UNIPHIER_PIN_IECTRL_NONE,
- 111, UNIPHIER_PIN_DRV_4_8,
+ 111, UNIPHIER_PIN_DRV_1BIT,
111, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(118, "SBPTM", UNIPHIER_PIN_IECTRL_NONE,
- 112, UNIPHIER_PIN_DRV_4_8,
+ 112, UNIPHIER_PIN_DRV_1BIT,
112, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(119, "XMPREQ", UNIPHIER_PIN_IECTRL_NONE,
- 113, UNIPHIER_PIN_DRV_4_8,
+ 113, UNIPHIER_PIN_DRV_1BIT,
113, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(120, "XINTP", UNIPHIER_PIN_IECTRL_NONE,
- 114, UNIPHIER_PIN_DRV_4_8,
+ 114, UNIPHIER_PIN_DRV_1BIT,
114, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(121, "LPST", UNIPHIER_PIN_IECTRL_NONE,
- 115, UNIPHIER_PIN_DRV_4_8,
+ 115, UNIPHIER_PIN_DRV_1BIT,
115, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(122, "SDBOOT", UNIPHIER_PIN_IECTRL_NONE,
- 116, UNIPHIER_PIN_DRV_4_8,
+ 116, UNIPHIER_PIN_DRV_1BIT,
116, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(123, "BFAIL", UNIPHIER_PIN_IECTRL_NONE,
- 117, UNIPHIER_PIN_DRV_4_8,
+ 117, UNIPHIER_PIN_DRV_1BIT,
117, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(124, "XFWE", UNIPHIER_PIN_IECTRL_NONE,
- 118, UNIPHIER_PIN_DRV_4_8,
+ 118, UNIPHIER_PIN_DRV_1BIT,
118, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(125, "RF_COM_RDY", UNIPHIER_PIN_IECTRL_NONE,
- 119, UNIPHIER_PIN_DRV_4_8,
+ 119, UNIPHIER_PIN_DRV_1BIT,
119, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(126, "XDIAG0", UNIPHIER_PIN_IECTRL_NONE,
- 120, UNIPHIER_PIN_DRV_4_8,
+ 120, UNIPHIER_PIN_DRV_1BIT,
120, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(127, "RXD0", UNIPHIER_PIN_IECTRL_NONE,
- 121, UNIPHIER_PIN_DRV_4_8,
+ 121, UNIPHIER_PIN_DRV_1BIT,
121, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(128, "TXD0", UNIPHIER_PIN_IECTRL_NONE,
- 122, UNIPHIER_PIN_DRV_4_8,
+ 122, UNIPHIER_PIN_DRV_1BIT,
122, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(129, "RXD1", UNIPHIER_PIN_IECTRL_NONE,
- 123, UNIPHIER_PIN_DRV_4_8,
+ 123, UNIPHIER_PIN_DRV_1BIT,
123, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(130, "TXD1", UNIPHIER_PIN_IECTRL_NONE,
- 124, UNIPHIER_PIN_DRV_4_8,
+ 124, UNIPHIER_PIN_DRV_1BIT,
124, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(131, "RXD2", UNIPHIER_PIN_IECTRL_NONE,
- 125, UNIPHIER_PIN_DRV_4_8,
+ 125, UNIPHIER_PIN_DRV_1BIT,
125, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(132, "TXD2", UNIPHIER_PIN_IECTRL_NONE,
- 126, UNIPHIER_PIN_DRV_4_8,
+ 126, UNIPHIER_PIN_DRV_1BIT,
126, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(133, "SS0CS", UNIPHIER_PIN_IECTRL_NONE,
- 127, UNIPHIER_PIN_DRV_4_8,
+ 127, UNIPHIER_PIN_DRV_1BIT,
127, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(134, "SS0CLK", UNIPHIER_PIN_IECTRL_NONE,
- 128, UNIPHIER_PIN_DRV_4_8,
+ 128, UNIPHIER_PIN_DRV_1BIT,
128, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(135, "SS0DO", UNIPHIER_PIN_IECTRL_NONE,
- 129, UNIPHIER_PIN_DRV_4_8,
+ 129, UNIPHIER_PIN_DRV_1BIT,
129, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(136, "SS0DI", UNIPHIER_PIN_IECTRL_NONE,
- 130, UNIPHIER_PIN_DRV_4_8,
+ 130, UNIPHIER_PIN_DRV_1BIT,
130, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(137, "MS0CS0", UNIPHIER_PIN_IECTRL_NONE,
- 131, UNIPHIER_PIN_DRV_4_8,
+ 131, UNIPHIER_PIN_DRV_1BIT,
131, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(138, "MS0CLK", UNIPHIER_PIN_IECTRL_NONE,
- 132, UNIPHIER_PIN_DRV_4_8,
+ 132, UNIPHIER_PIN_DRV_1BIT,
132, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(139, "MS0DI", UNIPHIER_PIN_IECTRL_NONE,
- 133, UNIPHIER_PIN_DRV_4_8,
+ 133, UNIPHIER_PIN_DRV_1BIT,
133, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(140, "MS0DO", UNIPHIER_PIN_IECTRL_NONE,
- 134, UNIPHIER_PIN_DRV_4_8,
+ 134, UNIPHIER_PIN_DRV_1BIT,
134, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(141, "XMDMRST", UNIPHIER_PIN_IECTRL_NONE,
- 135, UNIPHIER_PIN_DRV_4_8,
+ 135, UNIPHIER_PIN_DRV_1BIT,
135, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(142, "SCL0", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
-1, UNIPHIER_PIN_PULL_NONE),
UNIPHIER_PINCTRL_PIN(143, "SDA0", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
-1, UNIPHIER_PIN_PULL_NONE),
UNIPHIER_PINCTRL_PIN(144, "SCL1", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
-1, UNIPHIER_PIN_PULL_NONE),
UNIPHIER_PINCTRL_PIN(145, "SDA1", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
-1, UNIPHIER_PIN_PULL_NONE),
UNIPHIER_PINCTRL_PIN(146, "SCL2", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
-1, UNIPHIER_PIN_PULL_NONE),
UNIPHIER_PINCTRL_PIN(147, "SDA2", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
-1, UNIPHIER_PIN_PULL_NONE),
UNIPHIER_PINCTRL_PIN(148, "SCL3", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
-1, UNIPHIER_PIN_PULL_NONE),
UNIPHIER_PINCTRL_PIN(149, "SDA3", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
-1, UNIPHIER_PIN_PULL_NONE),
UNIPHIER_PINCTRL_PIN(150, "SD0DAT0", UNIPHIER_PIN_IECTRL_NONE,
- 12, UNIPHIER_PIN_DRV_8_12_16_20,
+ 12, UNIPHIER_PIN_DRV_2BIT,
136, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(151, "SD0DAT1", UNIPHIER_PIN_IECTRL_NONE,
- 13, UNIPHIER_PIN_DRV_8_12_16_20,
+ 13, UNIPHIER_PIN_DRV_2BIT,
137, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(152, "SD0DAT2", UNIPHIER_PIN_IECTRL_NONE,
- 14, UNIPHIER_PIN_DRV_8_12_16_20,
+ 14, UNIPHIER_PIN_DRV_2BIT,
138, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(153, "SD0DAT3", UNIPHIER_PIN_IECTRL_NONE,
- 15, UNIPHIER_PIN_DRV_8_12_16_20,
+ 15, UNIPHIER_PIN_DRV_2BIT,
139, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(154, "SD0CMD", UNIPHIER_PIN_IECTRL_NONE,
- 11, UNIPHIER_PIN_DRV_8_12_16_20,
+ 11, UNIPHIER_PIN_DRV_2BIT,
141, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(155, "SD0CLK", UNIPHIER_PIN_IECTRL_NONE,
- 10, UNIPHIER_PIN_DRV_8_12_16_20,
+ 10, UNIPHIER_PIN_DRV_2BIT,
140, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(156, "SD0CD", UNIPHIER_PIN_IECTRL_NONE,
- 142, UNIPHIER_PIN_DRV_4_8,
+ 142, UNIPHIER_PIN_DRV_1BIT,
142, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(157, "SD0WP", UNIPHIER_PIN_IECTRL_NONE,
- 143, UNIPHIER_PIN_DRV_4_8,
+ 143, UNIPHIER_PIN_DRV_1BIT,
143, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(158, "SD0VTCG", UNIPHIER_PIN_IECTRL_NONE,
- 144, UNIPHIER_PIN_DRV_4_8,
+ 144, UNIPHIER_PIN_DRV_1BIT,
144, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(159, "CK25O", UNIPHIER_PIN_IECTRL_NONE,
- 145, UNIPHIER_PIN_DRV_4_8,
+ 145, UNIPHIER_PIN_DRV_1BIT,
145, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(160, "RGMII_TXCLK", 6,
- 146, UNIPHIER_PIN_DRV_4_8,
+ 146, UNIPHIER_PIN_DRV_1BIT,
146, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(161, "RGMII_TXD0", 6,
- 147, UNIPHIER_PIN_DRV_4_8,
+ 147, UNIPHIER_PIN_DRV_1BIT,
147, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(162, "RGMII_TXD1", 6,
- 148, UNIPHIER_PIN_DRV_4_8,
+ 148, UNIPHIER_PIN_DRV_1BIT,
148, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(163, "RGMII_TXD2", 6,
- 149, UNIPHIER_PIN_DRV_4_8,
+ 149, UNIPHIER_PIN_DRV_1BIT,
149, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(164, "RGMII_TXD3", 6,
- 150, UNIPHIER_PIN_DRV_4_8,
+ 150, UNIPHIER_PIN_DRV_1BIT,
150, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(165, "RGMII_TXCTL", 6,
- 151, UNIPHIER_PIN_DRV_4_8,
+ 151, UNIPHIER_PIN_DRV_1BIT,
151, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(166, "MII_TXER", UNIPHIER_PIN_IECTRL_NONE,
- 152, UNIPHIER_PIN_DRV_4_8,
+ 152, UNIPHIER_PIN_DRV_1BIT,
152, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(167, "RGMII_RXCLK", 6,
- 153, UNIPHIER_PIN_DRV_4_8,
+ 153, UNIPHIER_PIN_DRV_1BIT,
153, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(168, "RGMII_RXD0", 6,
- 154, UNIPHIER_PIN_DRV_4_8,
+ 154, UNIPHIER_PIN_DRV_1BIT,
154, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(169, "RGMII_RXD1", 6,
- 155, UNIPHIER_PIN_DRV_4_8,
+ 155, UNIPHIER_PIN_DRV_1BIT,
155, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(170, "RGMII_RXD2", 6,
- 156, UNIPHIER_PIN_DRV_4_8,
+ 156, UNIPHIER_PIN_DRV_1BIT,
156, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(171, "RGMII_RXD3", 6,
- 157, UNIPHIER_PIN_DRV_4_8,
+ 157, UNIPHIER_PIN_DRV_1BIT,
157, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(172, "RGMII_RXCTL", 6,
- 158, UNIPHIER_PIN_DRV_4_8,
+ 158, UNIPHIER_PIN_DRV_1BIT,
158, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(173, "MII_RXER", 6,
- 159, UNIPHIER_PIN_DRV_4_8,
+ 159, UNIPHIER_PIN_DRV_1BIT,
159, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(174, "MII_CRS", 6,
- 160, UNIPHIER_PIN_DRV_4_8,
+ 160, UNIPHIER_PIN_DRV_1BIT,
160, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(175, "MII_COL", 6,
- 161, UNIPHIER_PIN_DRV_4_8,
+ 161, UNIPHIER_PIN_DRV_1BIT,
161, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(176, "MDC", 6,
- 162, UNIPHIER_PIN_DRV_4_8,
+ 162, UNIPHIER_PIN_DRV_1BIT,
162, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(177, "MDIO", 6,
- 163, UNIPHIER_PIN_DRV_4_8,
+ 163, UNIPHIER_PIN_DRV_1BIT,
163, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(178, "MDIO_INTL", 6,
- 164, UNIPHIER_PIN_DRV_4_8,
+ 164, UNIPHIER_PIN_DRV_1BIT,
164, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(179, "XETH_RST", 6,
- 165, UNIPHIER_PIN_DRV_4_8,
+ 165, UNIPHIER_PIN_DRV_1BIT,
165, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(180, "USB0VBUS", UNIPHIER_PIN_IECTRL_NONE,
- 166, UNIPHIER_PIN_DRV_4_8,
+ 166, UNIPHIER_PIN_DRV_1BIT,
166, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(181, "USB0OD", UNIPHIER_PIN_IECTRL_NONE,
- 167, UNIPHIER_PIN_DRV_4_8,
+ 167, UNIPHIER_PIN_DRV_1BIT,
167, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(182, "USB1VBUS", UNIPHIER_PIN_IECTRL_NONE,
- 168, UNIPHIER_PIN_DRV_4_8,
+ 168, UNIPHIER_PIN_DRV_1BIT,
168, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(183, "USB1OD", UNIPHIER_PIN_IECTRL_NONE,
- 169, UNIPHIER_PIN_DRV_4_8,
+ 169, UNIPHIER_PIN_DRV_1BIT,
169, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(184, "USB2VBUS", UNIPHIER_PIN_IECTRL_NONE,
- 170, UNIPHIER_PIN_DRV_4_8,
+ 170, UNIPHIER_PIN_DRV_1BIT,
170, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(185, "USB2OD", UNIPHIER_PIN_IECTRL_NONE,
- 171, UNIPHIER_PIN_DRV_4_8,
+ 171, UNIPHIER_PIN_DRV_1BIT,
171, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(186, "USB2ID", UNIPHIER_PIN_IECTRL_NONE,
- 172, UNIPHIER_PIN_DRV_4_8,
+ 172, UNIPHIER_PIN_DRV_1BIT,
172, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(187, "USB3VBUS", UNIPHIER_PIN_IECTRL_NONE,
- 173, UNIPHIER_PIN_DRV_4_8,
+ 173, UNIPHIER_PIN_DRV_1BIT,
173, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(188, "USB3OD", UNIPHIER_PIN_IECTRL_NONE,
- 174, UNIPHIER_PIN_DRV_4_8,
+ 174, UNIPHIER_PIN_DRV_1BIT,
174, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(189, "LINKCLK", UNIPHIER_PIN_IECTRL_NONE,
- 175, UNIPHIER_PIN_DRV_4_8,
+ 175, UNIPHIER_PIN_DRV_1BIT,
175, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(190, "LINKREQ", UNIPHIER_PIN_IECTRL_NONE,
- 176, UNIPHIER_PIN_DRV_4_8,
+ 176, UNIPHIER_PIN_DRV_1BIT,
176, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(191, "LINKCTL0", UNIPHIER_PIN_IECTRL_NONE,
- 177, UNIPHIER_PIN_DRV_4_8,
+ 177, UNIPHIER_PIN_DRV_1BIT,
177, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(192, "LINKCTL1", UNIPHIER_PIN_IECTRL_NONE,
- 178, UNIPHIER_PIN_DRV_4_8,
+ 178, UNIPHIER_PIN_DRV_1BIT,
178, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(193, "LINKDT0", UNIPHIER_PIN_IECTRL_NONE,
- 179, UNIPHIER_PIN_DRV_4_8,
+ 179, UNIPHIER_PIN_DRV_1BIT,
179, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(194, "LINKDT1", UNIPHIER_PIN_IECTRL_NONE,
- 180, UNIPHIER_PIN_DRV_4_8,
+ 180, UNIPHIER_PIN_DRV_1BIT,
180, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(195, "LINKDT2", UNIPHIER_PIN_IECTRL_NONE,
- 181, UNIPHIER_PIN_DRV_4_8,
+ 181, UNIPHIER_PIN_DRV_1BIT,
181, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(196, "LINKDT3", UNIPHIER_PIN_IECTRL_NONE,
- 182, UNIPHIER_PIN_DRV_4_8,
+ 182, UNIPHIER_PIN_DRV_1BIT,
182, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(197, "LINKDT4", UNIPHIER_PIN_IECTRL_NONE,
- 183, UNIPHIER_PIN_DRV_4_8,
+ 183, UNIPHIER_PIN_DRV_1BIT,
183, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(198, "LINKDT5", UNIPHIER_PIN_IECTRL_NONE,
- 184, UNIPHIER_PIN_DRV_4_8,
+ 184, UNIPHIER_PIN_DRV_1BIT,
184, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(199, "LINKDT6", UNIPHIER_PIN_IECTRL_NONE,
- 185, UNIPHIER_PIN_DRV_4_8,
+ 185, UNIPHIER_PIN_DRV_1BIT,
185, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(200, "LINKDT7", UNIPHIER_PIN_IECTRL_NONE,
- 186, UNIPHIER_PIN_DRV_4_8,
+ 186, UNIPHIER_PIN_DRV_1BIT,
186, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(201, "CKDVO", UNIPHIER_PIN_IECTRL_NONE,
- 187, UNIPHIER_PIN_DRV_4_8,
+ 187, UNIPHIER_PIN_DRV_1BIT,
187, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(202, "PHY_PD", UNIPHIER_PIN_IECTRL_NONE,
- 188, UNIPHIER_PIN_DRV_4_8,
+ 188, UNIPHIER_PIN_DRV_1BIT,
188, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(203, "X1394_RST", UNIPHIER_PIN_IECTRL_NONE,
- 189, UNIPHIER_PIN_DRV_4_8,
+ 189, UNIPHIER_PIN_DRV_1BIT,
189, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(204, "VOUT_MUTE_L", UNIPHIER_PIN_IECTRL_NONE,
- 190, UNIPHIER_PIN_DRV_4_8,
+ 190, UNIPHIER_PIN_DRV_1BIT,
190, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(205, "CLK54O", UNIPHIER_PIN_IECTRL_NONE,
- 191, UNIPHIER_PIN_DRV_4_8,
+ 191, UNIPHIER_PIN_DRV_1BIT,
191, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(206, "CLK54I", UNIPHIER_PIN_IECTRL_NONE,
- 192, UNIPHIER_PIN_DRV_NONE,
+ -1, UNIPHIER_PIN_DRV_NONE,
192, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(207, "YIN0", UNIPHIER_PIN_IECTRL_NONE,
- 193, UNIPHIER_PIN_DRV_4_8,
+ 193, UNIPHIER_PIN_DRV_1BIT,
193, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(208, "YIN1", UNIPHIER_PIN_IECTRL_NONE,
- 194, UNIPHIER_PIN_DRV_4_8,
+ 194, UNIPHIER_PIN_DRV_1BIT,
194, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(209, "YIN2", UNIPHIER_PIN_IECTRL_NONE,
- 195, UNIPHIER_PIN_DRV_4_8,
+ 195, UNIPHIER_PIN_DRV_1BIT,
195, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(210, "YIN3", UNIPHIER_PIN_IECTRL_NONE,
- 196, UNIPHIER_PIN_DRV_4_8,
+ 196, UNIPHIER_PIN_DRV_1BIT,
196, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(211, "YIN4", UNIPHIER_PIN_IECTRL_NONE,
- 197, UNIPHIER_PIN_DRV_4_8,
+ 197, UNIPHIER_PIN_DRV_1BIT,
197, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(212, "YIN5", UNIPHIER_PIN_IECTRL_NONE,
- 198, UNIPHIER_PIN_DRV_4_8,
+ 198, UNIPHIER_PIN_DRV_1BIT,
198, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(213, "CIN0", UNIPHIER_PIN_IECTRL_NONE,
- 199, UNIPHIER_PIN_DRV_4_8,
+ 199, UNIPHIER_PIN_DRV_1BIT,
199, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(214, "CIN1", UNIPHIER_PIN_IECTRL_NONE,
- 200, UNIPHIER_PIN_DRV_4_8,
+ 200, UNIPHIER_PIN_DRV_1BIT,
200, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(215, "CIN2", UNIPHIER_PIN_IECTRL_NONE,
- 201, UNIPHIER_PIN_DRV_4_8,
+ 201, UNIPHIER_PIN_DRV_1BIT,
201, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(216, "CIN3", UNIPHIER_PIN_IECTRL_NONE,
- 202, UNIPHIER_PIN_DRV_4_8,
+ 202, UNIPHIER_PIN_DRV_1BIT,
202, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(217, "CIN4", UNIPHIER_PIN_IECTRL_NONE,
- 203, UNIPHIER_PIN_DRV_4_8,
+ 203, UNIPHIER_PIN_DRV_1BIT,
203, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(218, "CIN5", UNIPHIER_PIN_IECTRL_NONE,
- 204, UNIPHIER_PIN_DRV_4_8,
+ 204, UNIPHIER_PIN_DRV_1BIT,
204, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(219, "GCP", UNIPHIER_PIN_IECTRL_NONE,
- 205, UNIPHIER_PIN_DRV_4_8,
+ 205, UNIPHIER_PIN_DRV_1BIT,
205, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(220, "ADFLG", UNIPHIER_PIN_IECTRL_NONE,
- 206, UNIPHIER_PIN_DRV_4_8,
+ 206, UNIPHIER_PIN_DRV_1BIT,
206, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(221, "CK27AIOF", UNIPHIER_PIN_IECTRL_NONE,
- 207, UNIPHIER_PIN_DRV_4_8,
+ 207, UNIPHIER_PIN_DRV_1BIT,
207, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(222, "DACOUT", UNIPHIER_PIN_IECTRL_NONE,
- 208, UNIPHIER_PIN_DRV_4_8,
+ 208, UNIPHIER_PIN_DRV_1BIT,
208, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(223, "DAFLG", UNIPHIER_PIN_IECTRL_NONE,
- 209, UNIPHIER_PIN_DRV_4_8,
+ 209, UNIPHIER_PIN_DRV_1BIT,
209, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(224, "VBIH", UNIPHIER_PIN_IECTRL_NONE,
- 210, UNIPHIER_PIN_DRV_4_8,
+ 210, UNIPHIER_PIN_DRV_1BIT,
210, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(225, "VBIL", UNIPHIER_PIN_IECTRL_NONE,
- 211, UNIPHIER_PIN_DRV_4_8,
+ 211, UNIPHIER_PIN_DRV_1BIT,
211, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(226, "XSUB_RST", UNIPHIER_PIN_IECTRL_NONE,
- 212, UNIPHIER_PIN_DRV_4_8,
+ 212, UNIPHIER_PIN_DRV_1BIT,
212, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(227, "XADC_PD", UNIPHIER_PIN_IECTRL_NONE,
- 213, UNIPHIER_PIN_DRV_4_8,
+ 213, UNIPHIER_PIN_DRV_1BIT,
213, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(228, "AI1ADCCK", UNIPHIER_PIN_IECTRL_NONE,
- 214, UNIPHIER_PIN_DRV_4_8,
+ 214, UNIPHIER_PIN_DRV_1BIT,
214, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(229, "AI1BCK", UNIPHIER_PIN_IECTRL_NONE,
- 215, UNIPHIER_PIN_DRV_4_8,
+ 215, UNIPHIER_PIN_DRV_1BIT,
215, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(230, "AI1LRCK", UNIPHIER_PIN_IECTRL_NONE,
- 216, UNIPHIER_PIN_DRV_4_8,
+ 216, UNIPHIER_PIN_DRV_1BIT,
216, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(231, "AI1DMIX", UNIPHIER_PIN_IECTRL_NONE,
- 217, UNIPHIER_PIN_DRV_4_8,
+ 217, UNIPHIER_PIN_DRV_1BIT,
217, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(232, "CK27HD", UNIPHIER_PIN_IECTRL_NONE,
- 218, UNIPHIER_PIN_DRV_4_8,
+ 218, UNIPHIER_PIN_DRV_1BIT,
218, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(233, "XHD_RST", UNIPHIER_PIN_IECTRL_NONE,
- 219, UNIPHIER_PIN_DRV_4_8,
+ 219, UNIPHIER_PIN_DRV_1BIT,
219, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(234, "INTHD", UNIPHIER_PIN_IECTRL_NONE,
- 220, UNIPHIER_PIN_DRV_4_8,
+ 220, UNIPHIER_PIN_DRV_1BIT,
220, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(235, "VO1HDCK", UNIPHIER_PIN_IECTRL_NONE,
- 221, UNIPHIER_PIN_DRV_4_8,
+ 221, UNIPHIER_PIN_DRV_1BIT,
221, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(236, "VO1HSYNC", UNIPHIER_PIN_IECTRL_NONE,
- 222, UNIPHIER_PIN_DRV_4_8,
+ 222, UNIPHIER_PIN_DRV_1BIT,
222, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(237, "VO1VSYNC", UNIPHIER_PIN_IECTRL_NONE,
- 223, UNIPHIER_PIN_DRV_4_8,
+ 223, UNIPHIER_PIN_DRV_1BIT,
223, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(238, "VO1DE", UNIPHIER_PIN_IECTRL_NONE,
- 224, UNIPHIER_PIN_DRV_4_8,
+ 224, UNIPHIER_PIN_DRV_1BIT,
224, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(239, "VO1Y0", UNIPHIER_PIN_IECTRL_NONE,
- 225, UNIPHIER_PIN_DRV_4_8,
+ 225, UNIPHIER_PIN_DRV_1BIT,
225, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(240, "VO1Y1", UNIPHIER_PIN_IECTRL_NONE,
- 226, UNIPHIER_PIN_DRV_4_8,
+ 226, UNIPHIER_PIN_DRV_1BIT,
226, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(241, "VO1Y2", UNIPHIER_PIN_IECTRL_NONE,
- 227, UNIPHIER_PIN_DRV_4_8,
+ 227, UNIPHIER_PIN_DRV_1BIT,
227, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(242, "VO1Y3", UNIPHIER_PIN_IECTRL_NONE,
- 228, UNIPHIER_PIN_DRV_4_8,
+ 228, UNIPHIER_PIN_DRV_1BIT,
228, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(243, "VO1Y4", UNIPHIER_PIN_IECTRL_NONE,
- 229, UNIPHIER_PIN_DRV_4_8,
+ 229, UNIPHIER_PIN_DRV_1BIT,
229, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(244, "VO1Y5", UNIPHIER_PIN_IECTRL_NONE,
- 230, UNIPHIER_PIN_DRV_4_8,
+ 230, UNIPHIER_PIN_DRV_1BIT,
230, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(245, "VO1Y6", UNIPHIER_PIN_IECTRL_NONE,
- 231, UNIPHIER_PIN_DRV_4_8,
+ 231, UNIPHIER_PIN_DRV_1BIT,
231, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(246, "VO1Y7", UNIPHIER_PIN_IECTRL_NONE,
- 232, UNIPHIER_PIN_DRV_4_8,
+ 232, UNIPHIER_PIN_DRV_1BIT,
232, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(247, "VO1Y8", UNIPHIER_PIN_IECTRL_NONE,
- 233, UNIPHIER_PIN_DRV_4_8,
+ 233, UNIPHIER_PIN_DRV_1BIT,
233, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(248, "VO1Y9", UNIPHIER_PIN_IECTRL_NONE,
- 234, UNIPHIER_PIN_DRV_4_8,
+ 234, UNIPHIER_PIN_DRV_1BIT,
234, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(249, "VO1Y10", UNIPHIER_PIN_IECTRL_NONE,
- 235, UNIPHIER_PIN_DRV_4_8,
+ 235, UNIPHIER_PIN_DRV_1BIT,
235, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(250, "VO1Y11", UNIPHIER_PIN_IECTRL_NONE,
- 236, UNIPHIER_PIN_DRV_4_8,
+ 236, UNIPHIER_PIN_DRV_1BIT,
236, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(251, "VO1CB0", UNIPHIER_PIN_IECTRL_NONE,
- 237, UNIPHIER_PIN_DRV_4_8,
+ 237, UNIPHIER_PIN_DRV_1BIT,
237, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(252, "VO1CB1", UNIPHIER_PIN_IECTRL_NONE,
- 238, UNIPHIER_PIN_DRV_4_8,
+ 238, UNIPHIER_PIN_DRV_1BIT,
238, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(253, "VO1CB2", UNIPHIER_PIN_IECTRL_NONE,
- 239, UNIPHIER_PIN_DRV_4_8,
+ 239, UNIPHIER_PIN_DRV_1BIT,
239, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(254, "VO1CB3", UNIPHIER_PIN_IECTRL_NONE,
- 240, UNIPHIER_PIN_DRV_4_8,
+ 240, UNIPHIER_PIN_DRV_1BIT,
240, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(255, "VO1CB4", UNIPHIER_PIN_IECTRL_NONE,
- 241, UNIPHIER_PIN_DRV_4_8,
+ 241, UNIPHIER_PIN_DRV_1BIT,
241, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(256, "VO1CB5", UNIPHIER_PIN_IECTRL_NONE,
- 242, UNIPHIER_PIN_DRV_4_8,
+ 242, UNIPHIER_PIN_DRV_1BIT,
242, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(257, "VO1CB6", UNIPHIER_PIN_IECTRL_NONE,
- 243, UNIPHIER_PIN_DRV_4_8,
+ 243, UNIPHIER_PIN_DRV_1BIT,
243, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(258, "VO1CB7", UNIPHIER_PIN_IECTRL_NONE,
- 244, UNIPHIER_PIN_DRV_4_8,
+ 244, UNIPHIER_PIN_DRV_1BIT,
244, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(259, "VO1CB8", UNIPHIER_PIN_IECTRL_NONE,
- 245, UNIPHIER_PIN_DRV_4_8,
+ 245, UNIPHIER_PIN_DRV_1BIT,
245, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(260, "VO1CB9", UNIPHIER_PIN_IECTRL_NONE,
- 246, UNIPHIER_PIN_DRV_4_8,
+ 246, UNIPHIER_PIN_DRV_1BIT,
246, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(261, "VO1CB10", UNIPHIER_PIN_IECTRL_NONE,
- 247, UNIPHIER_PIN_DRV_4_8,
+ 247, UNIPHIER_PIN_DRV_1BIT,
247, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(262, "VO1CB11", UNIPHIER_PIN_IECTRL_NONE,
- 248, UNIPHIER_PIN_DRV_4_8,
+ 248, UNIPHIER_PIN_DRV_1BIT,
248, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(263, "VO1CR0", UNIPHIER_PIN_IECTRL_NONE,
- 249, UNIPHIER_PIN_DRV_4_8,
+ 249, UNIPHIER_PIN_DRV_1BIT,
249, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(264, "VO1CR1", UNIPHIER_PIN_IECTRL_NONE,
- 250, UNIPHIER_PIN_DRV_4_8,
+ 250, UNIPHIER_PIN_DRV_1BIT,
250, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(265, "VO1CR2", UNIPHIER_PIN_IECTRL_NONE,
- 251, UNIPHIER_PIN_DRV_4_8,
+ 251, UNIPHIER_PIN_DRV_1BIT,
251, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(266, "VO1CR3", UNIPHIER_PIN_IECTRL_NONE,
- 252, UNIPHIER_PIN_DRV_4_8,
+ 252, UNIPHIER_PIN_DRV_1BIT,
252, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(267, "VO1CR4", UNIPHIER_PIN_IECTRL_NONE,
- 253, UNIPHIER_PIN_DRV_4_8,
+ 253, UNIPHIER_PIN_DRV_1BIT,
253, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(268, "VO1CR5", UNIPHIER_PIN_IECTRL_NONE,
- 254, UNIPHIER_PIN_DRV_4_8,
+ 254, UNIPHIER_PIN_DRV_1BIT,
254, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(269, "VO1CR6", UNIPHIER_PIN_IECTRL_NONE,
- 255, UNIPHIER_PIN_DRV_4_8,
+ 255, UNIPHIER_PIN_DRV_1BIT,
255, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(270, "VO1CR7", UNIPHIER_PIN_IECTRL_NONE,
- 256, UNIPHIER_PIN_DRV_4_8,
+ 256, UNIPHIER_PIN_DRV_1BIT,
256, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(271, "VO1CR8", UNIPHIER_PIN_IECTRL_NONE,
- 257, UNIPHIER_PIN_DRV_4_8,
+ 257, UNIPHIER_PIN_DRV_1BIT,
257, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(272, "VO1CR9", UNIPHIER_PIN_IECTRL_NONE,
- 258, UNIPHIER_PIN_DRV_4_8,
+ 258, UNIPHIER_PIN_DRV_1BIT,
258, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(273, "VO1CR10", UNIPHIER_PIN_IECTRL_NONE,
- 259, UNIPHIER_PIN_DRV_4_8,
+ 259, UNIPHIER_PIN_DRV_1BIT,
259, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(274, "VO1CR11", UNIPHIER_PIN_IECTRL_NONE,
- 260, UNIPHIER_PIN_DRV_4_8,
+ 260, UNIPHIER_PIN_DRV_1BIT,
260, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(275, "VO1EX0", UNIPHIER_PIN_IECTRL_NONE,
- 261, UNIPHIER_PIN_DRV_4_8,
+ 261, UNIPHIER_PIN_DRV_1BIT,
261, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(276, "VO1EX1", UNIPHIER_PIN_IECTRL_NONE,
- 262, UNIPHIER_PIN_DRV_4_8,
+ 262, UNIPHIER_PIN_DRV_1BIT,
262, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(277, "VO1EX2", UNIPHIER_PIN_IECTRL_NONE,
- 263, UNIPHIER_PIN_DRV_4_8,
+ 263, UNIPHIER_PIN_DRV_1BIT,
263, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(278, "VO1EX3", UNIPHIER_PIN_IECTRL_NONE,
- 264, UNIPHIER_PIN_DRV_4_8,
+ 264, UNIPHIER_PIN_DRV_1BIT,
264, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(279, "VEXCKA", UNIPHIER_PIN_IECTRL_NONE,
- 265, UNIPHIER_PIN_DRV_4_8,
+ 265, UNIPHIER_PIN_DRV_1BIT,
265, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(280, "VSEL0", UNIPHIER_PIN_IECTRL_NONE,
- 266, UNIPHIER_PIN_DRV_4_8,
+ 266, UNIPHIER_PIN_DRV_1BIT,
266, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(281, "VSEL1", UNIPHIER_PIN_IECTRL_NONE,
- 267, UNIPHIER_PIN_DRV_4_8,
+ 267, UNIPHIER_PIN_DRV_1BIT,
267, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(282, "AO1DACCK", UNIPHIER_PIN_IECTRL_NONE,
- 268, UNIPHIER_PIN_DRV_4_8,
+ 268, UNIPHIER_PIN_DRV_1BIT,
268, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(283, "AO1BCK", UNIPHIER_PIN_IECTRL_NONE,
- 269, UNIPHIER_PIN_DRV_4_8,
+ 269, UNIPHIER_PIN_DRV_1BIT,
269, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(284, "AO1LRCK", UNIPHIER_PIN_IECTRL_NONE,
- 270, UNIPHIER_PIN_DRV_4_8,
+ 270, UNIPHIER_PIN_DRV_1BIT,
270, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(285, "AO1D0", UNIPHIER_PIN_IECTRL_NONE,
- 271, UNIPHIER_PIN_DRV_4_8,
+ 271, UNIPHIER_PIN_DRV_1BIT,
271, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(286, "AO1D1", UNIPHIER_PIN_IECTRL_NONE,
- 272, UNIPHIER_PIN_DRV_4_8,
+ 272, UNIPHIER_PIN_DRV_1BIT,
272, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(287, "AO1D2", UNIPHIER_PIN_IECTRL_NONE,
- 273, UNIPHIER_PIN_DRV_4_8,
+ 273, UNIPHIER_PIN_DRV_1BIT,
273, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(288, "AO1D3", UNIPHIER_PIN_IECTRL_NONE,
- 274, UNIPHIER_PIN_DRV_4_8,
+ 274, UNIPHIER_PIN_DRV_1BIT,
274, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(289, "AO1IEC", UNIPHIER_PIN_IECTRL_NONE,
- 275, UNIPHIER_PIN_DRV_4_8,
+ 275, UNIPHIER_PIN_DRV_1BIT,
275, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(290, "XDAC_PD", UNIPHIER_PIN_IECTRL_NONE,
- 276, UNIPHIER_PIN_DRV_4_8,
+ 276, UNIPHIER_PIN_DRV_1BIT,
276, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(291, "EX_A_MUTE", UNIPHIER_PIN_IECTRL_NONE,
- 277, UNIPHIER_PIN_DRV_4_8,
+ 277, UNIPHIER_PIN_DRV_1BIT,
277, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(292, "AO2DACCK", UNIPHIER_PIN_IECTRL_NONE,
- 278, UNIPHIER_PIN_DRV_4_8,
+ 278, UNIPHIER_PIN_DRV_1BIT,
278, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(293, "AO2BCK", UNIPHIER_PIN_IECTRL_NONE,
- 279, UNIPHIER_PIN_DRV_4_8,
+ 279, UNIPHIER_PIN_DRV_1BIT,
279, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(294, "AO2LRCK", UNIPHIER_PIN_IECTRL_NONE,
- 280, UNIPHIER_PIN_DRV_4_8,
+ 280, UNIPHIER_PIN_DRV_1BIT,
280, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(295, "AO2DMIX", UNIPHIER_PIN_IECTRL_NONE,
- 281, UNIPHIER_PIN_DRV_4_8,
+ 281, UNIPHIER_PIN_DRV_1BIT,
281, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(296, "AO2IEC", UNIPHIER_PIN_IECTRL_NONE,
- 282, UNIPHIER_PIN_DRV_4_8,
+ 282, UNIPHIER_PIN_DRV_1BIT,
282, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(297, "HTHPD", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_5,
+ -1, UNIPHIER_PIN_DRV_FIXED5,
-1, UNIPHIER_PIN_PULL_NONE),
UNIPHIER_PINCTRL_PIN(298, "HTSCL", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_5,
+ -1, UNIPHIER_PIN_DRV_FIXED5,
-1, UNIPHIER_PIN_PULL_NONE),
UNIPHIER_PINCTRL_PIN(299, "HTSDA", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_5,
+ -1, UNIPHIER_PIN_DRV_FIXED5,
-1, UNIPHIER_PIN_PULL_NONE),
UNIPHIER_PINCTRL_PIN(300, "PORT00", UNIPHIER_PIN_IECTRL_NONE,
- 284, UNIPHIER_PIN_DRV_4_8,
+ 284, UNIPHIER_PIN_DRV_1BIT,
284, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(301, "PORT01", UNIPHIER_PIN_IECTRL_NONE,
- 285, UNIPHIER_PIN_DRV_4_8,
+ 285, UNIPHIER_PIN_DRV_1BIT,
285, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(302, "PORT02", UNIPHIER_PIN_IECTRL_NONE,
- 286, UNIPHIER_PIN_DRV_4_8,
+ 286, UNIPHIER_PIN_DRV_1BIT,
286, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(303, "PORT03", UNIPHIER_PIN_IECTRL_NONE,
- 287, UNIPHIER_PIN_DRV_4_8,
+ 287, UNIPHIER_PIN_DRV_1BIT,
287, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(304, "PORT04", UNIPHIER_PIN_IECTRL_NONE,
- 288, UNIPHIER_PIN_DRV_4_8,
+ 288, UNIPHIER_PIN_DRV_1BIT,
288, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(305, "PORT05", UNIPHIER_PIN_IECTRL_NONE,
- 289, UNIPHIER_PIN_DRV_4_8,
+ 289, UNIPHIER_PIN_DRV_1BIT,
289, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(306, "PORT06", UNIPHIER_PIN_IECTRL_NONE,
- 290, UNIPHIER_PIN_DRV_4_8,
+ 290, UNIPHIER_PIN_DRV_1BIT,
290, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(307, "PORT07", UNIPHIER_PIN_IECTRL_NONE,
- 291, UNIPHIER_PIN_DRV_4_8,
+ 291, UNIPHIER_PIN_DRV_1BIT,
291, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(308, "PORT10", UNIPHIER_PIN_IECTRL_NONE,
- 292, UNIPHIER_PIN_DRV_4_8,
+ 292, UNIPHIER_PIN_DRV_1BIT,
292, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(309, "PORT11", UNIPHIER_PIN_IECTRL_NONE,
- 293, UNIPHIER_PIN_DRV_4_8,
+ 293, UNIPHIER_PIN_DRV_1BIT,
293, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(310, "PORT12", UNIPHIER_PIN_IECTRL_NONE,
- 294, UNIPHIER_PIN_DRV_4_8,
+ 294, UNIPHIER_PIN_DRV_1BIT,
294, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(311, "PORT13", UNIPHIER_PIN_IECTRL_NONE,
- 295, UNIPHIER_PIN_DRV_4_8,
+ 295, UNIPHIER_PIN_DRV_1BIT,
295, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(312, "PORT14", UNIPHIER_PIN_IECTRL_NONE,
- 296, UNIPHIER_PIN_DRV_4_8,
+ 296, UNIPHIER_PIN_DRV_1BIT,
296, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(313, "PORT15", UNIPHIER_PIN_IECTRL_NONE,
- 297, UNIPHIER_PIN_DRV_4_8,
+ 297, UNIPHIER_PIN_DRV_1BIT,
297, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(314, "PORT16", UNIPHIER_PIN_IECTRL_NONE,
- 298, UNIPHIER_PIN_DRV_4_8,
+ 298, UNIPHIER_PIN_DRV_1BIT,
298, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(315, "PORT17", UNIPHIER_PIN_IECTRL_NONE,
- 299, UNIPHIER_PIN_DRV_4_8,
+ 299, UNIPHIER_PIN_DRV_1BIT,
299, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(316, "PORT20", UNIPHIER_PIN_IECTRL_NONE,
- 300, UNIPHIER_PIN_DRV_4_8,
+ 300, UNIPHIER_PIN_DRV_1BIT,
300, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(317, "PORT21", UNIPHIER_PIN_IECTRL_NONE,
- 301, UNIPHIER_PIN_DRV_4_8,
+ 301, UNIPHIER_PIN_DRV_1BIT,
301, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(318, "PORT22", UNIPHIER_PIN_IECTRL_NONE,
- 302, UNIPHIER_PIN_DRV_4_8,
+ 302, UNIPHIER_PIN_DRV_1BIT,
302, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(319, "SD1DAT0", UNIPHIER_PIN_IECTRL_NONE,
- 303, UNIPHIER_PIN_DRV_4_8,
+ 303, UNIPHIER_PIN_DRV_1BIT,
303, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(320, "SD1DAT1", UNIPHIER_PIN_IECTRL_NONE,
- 304, UNIPHIER_PIN_DRV_4_8,
+ 304, UNIPHIER_PIN_DRV_1BIT,
304, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(321, "SD1DAT2", UNIPHIER_PIN_IECTRL_NONE,
- 305, UNIPHIER_PIN_DRV_4_8,
+ 305, UNIPHIER_PIN_DRV_1BIT,
305, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(322, "SD1DAT3", UNIPHIER_PIN_IECTRL_NONE,
- 306, UNIPHIER_PIN_DRV_4_8,
+ 306, UNIPHIER_PIN_DRV_1BIT,
306, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(323, "SD1CMD", UNIPHIER_PIN_IECTRL_NONE,
- 307, UNIPHIER_PIN_DRV_4_8,
+ 307, UNIPHIER_PIN_DRV_1BIT,
307, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(324, "SD1CLK", UNIPHIER_PIN_IECTRL_NONE,
- 308, UNIPHIER_PIN_DRV_4_8,
+ 308, UNIPHIER_PIN_DRV_1BIT,
308, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(325, "SD1CD", UNIPHIER_PIN_IECTRL_NONE,
- 309, UNIPHIER_PIN_DRV_4_8,
+ 309, UNIPHIER_PIN_DRV_1BIT,
309, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(326, "SD1WP", UNIPHIER_PIN_IECTRL_NONE,
- 310, UNIPHIER_PIN_DRV_4_8,
+ 310, UNIPHIER_PIN_DRV_1BIT,
310, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(327, "SD1VTCG", UNIPHIER_PIN_IECTRL_NONE,
- 311, UNIPHIER_PIN_DRV_4_8,
+ 311, UNIPHIER_PIN_DRV_1BIT,
311, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(328, "DMDISO", UNIPHIER_PIN_IECTRL_NONE,
- 312, UNIPHIER_PIN_DRV_NONE,
+ -1, UNIPHIER_PIN_DRV_NONE,
312, UNIPHIER_PIN_PULL_DOWN),
};
static const unsigned emmc_pins[] = {40, 41, 42, 43, 51, 52, 53};
-static const unsigned emmc_muxvals[] = {1, 1, 1, 1, 1, 1, 1};
+static const int emmc_muxvals[] = {1, 1, 1, 1, 1, 1, 1};
static const unsigned emmc_dat8_pins[] = {44, 45, 46, 47};
-static const unsigned emmc_dat8_muxvals[] = {1, 1, 1, 1};
+static const int emmc_dat8_muxvals[] = {1, 1, 1, 1};
+static const unsigned ether_mii_pins[] = {160, 161, 162, 163, 164, 165, 166,
+ 167, 168, 169, 170, 171, 172, 173,
+ 174, 175, 176, 177, 178, 179};
+static const int ether_mii_muxvals[] = {1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0};
+static const unsigned ether_rgmii_pins[] = {160, 161, 162, 163, 164, 165, 167,
+ 168, 169, 170, 171, 172, 176, 177,
+ 178, 179};
+static const int ether_rgmii_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0};
+static const unsigned ether_rmii_pins[] = {160, 161, 162, 165, 168, 169, 172,
+ 173, 176, 177, 178, 179};
+static const int ether_rmii_muxvals[] = {1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned ether_rmiib_pins[] = {161, 162, 165, 167, 168, 169, 172,
+ 173, 176, 177, 178, 179};
+static const int ether_rmiib_muxvals[] = {0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0};
static const unsigned i2c0_pins[] = {142, 143};
-static const unsigned i2c0_muxvals[] = {0, 0};
+static const int i2c0_muxvals[] = {0, 0};
static const unsigned i2c1_pins[] = {144, 145};
-static const unsigned i2c1_muxvals[] = {0, 0};
+static const int i2c1_muxvals[] = {0, 0};
static const unsigned i2c2_pins[] = {146, 147};
-static const unsigned i2c2_muxvals[] = {0, 0};
+static const int i2c2_muxvals[] = {0, 0};
static const unsigned i2c3_pins[] = {148, 149};
-static const unsigned i2c3_muxvals[] = {0, 0};
+static const int i2c3_muxvals[] = {0, 0};
static const unsigned i2c6_pins[] = {308, 309};
-static const unsigned i2c6_muxvals[] = {6, 6};
+static const int i2c6_muxvals[] = {6, 6};
static const unsigned nand_pins[] = {40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54};
-static const unsigned nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0};
+static const int nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
static const unsigned nand_cs1_pins[] = {131, 132};
-static const unsigned nand_cs1_muxvals[] = {1, 1};
+static const int nand_cs1_muxvals[] = {1, 1};
static const unsigned sd_pins[] = {150, 151, 152, 153, 154, 155, 156, 157, 158};
-static const unsigned sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
+static const int sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
static const unsigned sd1_pins[] = {319, 320, 321, 322, 323, 324, 325, 326,
327};
-static const unsigned sd1_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
+static const int sd1_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned system_bus_pins[] = {25, 26, 27, 28, 29, 30, 31, 32, 33,
+ 34, 35, 36, 37, 38};
+static const int system_bus_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0};
+static const unsigned system_bus_cs0_pins[] = {318};
+static const int system_bus_cs0_muxvals[] = {5};
+static const unsigned system_bus_cs1_pins[] = {24};
+static const int system_bus_cs1_muxvals[] = {0};
+static const unsigned system_bus_cs2_pins[] = {315};
+static const int system_bus_cs2_muxvals[] = {5};
+static const unsigned system_bus_cs3_pins[] = {313};
+static const int system_bus_cs3_muxvals[] = {5};
+static const unsigned system_bus_cs4_pins[] = {305};
+static const int system_bus_cs4_muxvals[] = {5};
+static const unsigned system_bus_cs5_pins[] = {303};
+static const int system_bus_cs5_muxvals[] = {6};
+static const unsigned system_bus_cs6_pins[] = {307};
+static const int system_bus_cs6_muxvals[] = {6};
+static const unsigned system_bus_cs7_pins[] = {312};
+static const int system_bus_cs7_muxvals[] = {6};
static const unsigned uart0_pins[] = {127, 128};
-static const unsigned uart0_muxvals[] = {0, 0};
+static const int uart0_muxvals[] = {0, 0};
static const unsigned uart1_pins[] = {129, 130};
-static const unsigned uart1_muxvals[] = {0, 0};
+static const int uart1_muxvals[] = {0, 0};
static const unsigned uart2_pins[] = {131, 132};
-static const unsigned uart2_muxvals[] = {0, 0};
+static const int uart2_muxvals[] = {0, 0};
static const unsigned uart3_pins[] = {88, 89};
-static const unsigned uart3_muxvals[] = {2, 2};
+static const int uart3_muxvals[] = {2, 2};
static const unsigned usb0_pins[] = {180, 181};
-static const unsigned usb0_muxvals[] = {0, 0};
+static const int usb0_muxvals[] = {0, 0};
static const unsigned usb1_pins[] = {182, 183};
-static const unsigned usb1_muxvals[] = {0, 0};
+static const int usb1_muxvals[] = {0, 0};
static const unsigned usb2_pins[] = {184, 185};
-static const unsigned usb2_muxvals[] = {0, 0};
+static const int usb2_muxvals[] = {0, 0};
static const unsigned usb3_pins[] = {186, 187};
-static const unsigned usb3_muxvals[] = {0, 0};
+static const int usb3_muxvals[] = {0, 0};
static const unsigned port_range0_pins[] = {
300, 301, 302, 303, 304, 305, 306, 307, /* PORT0x */
308, 309, 310, 311, 312, 313, 314, 315, /* PORT1x */
@@ -1069,7 +1102,7 @@ static const unsigned port_range0_pins[] = {
76, 77, 78, 79, 80, 81, 82, 83, /* PORT13x */
84, 85, 86, 87, 88, 89, 90, 91, /* PORT14x */
};
-static const unsigned port_range0_muxvals[] = {
+static const int port_range0_muxvals[] = {
7, 7, 7, 7, 7, 7, 7, 7, /* PORT0x */
7, 7, 7, 7, 7, 7, 7, 7, /* PORT1x */
7, 7, 7, 7, 7, 7, 7, 7, /* PORT2x */
@@ -1102,7 +1135,7 @@ static const unsigned port_range1_pins[] = {
251, 252, 261, 262, 263, 264, 273, 274, /* PORT29x */
31, 32, 33, 34, 35, 36, 37, 38, /* PORT30x */
};
-static const unsigned port_range1_muxvals[] = {
+static const int port_range1_muxvals[] = {
7, 7, 7, /* PORT175-177 */
7, 7, 7, 7, 7, 7, 7, 7, /* PORT18x */
7, 7, 7, 7, 7, 7, 7, 7, /* PORT19x */
@@ -1123,7 +1156,7 @@ static const unsigned xirq_pins[] = {
234, 186, 99, 100, 101, 102, 184, 301, /* XIRQ8-15 */
302, 303, 304, 305, 306, /* XIRQ16-20 */
};
-static const unsigned xirq_muxvals[] = {
+static const int xirq_muxvals[] = {
7, 7, 7, 7, 7, 7, 7, 7, /* XIRQ0-7 */
7, 7, 7, 7, 7, 7, 2, 2, /* XIRQ8-15 */
2, 2, 2, 2, 2, /* XIRQ16-20 */
@@ -1131,13 +1164,17 @@ static const unsigned xirq_muxvals[] = {
static const unsigned xirq_alternatives_pins[] = {
184, 310, 316,
};
-static const unsigned xirq_alternatives_muxvals[] = {
+static const int xirq_alternatives_muxvals[] = {
2, 2, 2,
};
-static const struct uniphier_pinctrl_group ph1_pro4_groups[] = {
+static const struct uniphier_pinctrl_group uniphier_pro4_groups[] = {
UNIPHIER_PINCTRL_GROUP(emmc),
UNIPHIER_PINCTRL_GROUP(emmc_dat8),
+ UNIPHIER_PINCTRL_GROUP(ether_mii),
+ UNIPHIER_PINCTRL_GROUP(ether_rgmii),
+ UNIPHIER_PINCTRL_GROUP(ether_rmii),
+ UNIPHIER_PINCTRL_GROUP(ether_rmiib),
UNIPHIER_PINCTRL_GROUP(i2c0),
UNIPHIER_PINCTRL_GROUP(i2c1),
UNIPHIER_PINCTRL_GROUP(i2c2),
@@ -1147,6 +1184,15 @@ static const struct uniphier_pinctrl_group ph1_pro4_groups[] = {
UNIPHIER_PINCTRL_GROUP(nand_cs1),
UNIPHIER_PINCTRL_GROUP(sd),
UNIPHIER_PINCTRL_GROUP(sd1),
+ UNIPHIER_PINCTRL_GROUP(system_bus),
+ UNIPHIER_PINCTRL_GROUP(system_bus_cs0),
+ UNIPHIER_PINCTRL_GROUP(system_bus_cs1),
+ UNIPHIER_PINCTRL_GROUP(system_bus_cs2),
+ UNIPHIER_PINCTRL_GROUP(system_bus_cs3),
+ UNIPHIER_PINCTRL_GROUP(system_bus_cs4),
+ UNIPHIER_PINCTRL_GROUP(system_bus_cs5),
+ UNIPHIER_PINCTRL_GROUP(system_bus_cs6),
+ UNIPHIER_PINCTRL_GROUP(system_bus_cs7),
UNIPHIER_PINCTRL_GROUP(uart0),
UNIPHIER_PINCTRL_GROUP(uart1),
UNIPHIER_PINCTRL_GROUP(uart2),
@@ -1413,6 +1459,9 @@ static const struct uniphier_pinctrl_group ph1_pro4_groups[] = {
};
static const char * const emmc_groups[] = {"emmc", "emmc_dat8"};
+static const char * const ether_mii_groups[] = {"ether_mii"};
+static const char * const ether_rgmii_groups[] = {"ether_rgmii"};
+static const char * const ether_rmii_groups[] = {"ether_rgmii", "ether_rgmiib"};
static const char * const i2c0_groups[] = {"i2c0"};
static const char * const i2c1_groups[] = {"i2c1"};
static const char * const i2c2_groups[] = {"i2c2"};
@@ -1421,6 +1470,15 @@ static const char * const i2c6_groups[] = {"i2c6"};
static const char * const nand_groups[] = {"nand", "nand_cs1"};
static const char * const sd_groups[] = {"sd"};
static const char * const sd1_groups[] = {"sd1"};
+static const char * const system_bus_groups[] = {"system_bus",
+ "system_bus_cs0",
+ "system_bus_cs1",
+ "system_bus_cs2",
+ "system_bus_cs3",
+ "system_bus_cs4",
+ "system_bus_cs5",
+ "system_bus_cs6",
+ "system_bus_cs7"};
static const char * const uart0_groups[] = {"uart0"};
static const char * const uart1_groups[] = {"uart1"};
static const char * const uart2_groups[] = {"uart2"};
@@ -1499,8 +1557,11 @@ static const char * const xirq_groups[] = {
"xirq14b", "xirq17b", "xirq18b",
};
-static const struct uniphier_pinmux_function ph1_pro4_functions[] = {
+static const struct uniphier_pinmux_function uniphier_pro4_functions[] = {
UNIPHIER_PINMUX_FUNCTION(emmc),
+ UNIPHIER_PINMUX_FUNCTION(ether_mii),
+ UNIPHIER_PINMUX_FUNCTION(ether_rgmii),
+ UNIPHIER_PINMUX_FUNCTION(ether_rmii),
UNIPHIER_PINMUX_FUNCTION(i2c0),
UNIPHIER_PINMUX_FUNCTION(i2c1),
UNIPHIER_PINMUX_FUNCTION(i2c2),
@@ -1509,6 +1570,7 @@ static const struct uniphier_pinmux_function ph1_pro4_functions[] = {
UNIPHIER_PINMUX_FUNCTION(nand),
UNIPHIER_PINMUX_FUNCTION(sd),
UNIPHIER_PINMUX_FUNCTION(sd1),
+ UNIPHIER_PINMUX_FUNCTION(system_bus),
UNIPHIER_PINMUX_FUNCTION(uart0),
UNIPHIER_PINMUX_FUNCTION(uart1),
UNIPHIER_PINMUX_FUNCTION(uart2),
@@ -1521,43 +1583,36 @@ static const struct uniphier_pinmux_function ph1_pro4_functions[] = {
UNIPHIER_PINMUX_FUNCTION(xirq),
};
-static struct uniphier_pinctrl_socdata ph1_pro4_pindata = {
- .groups = ph1_pro4_groups,
- .groups_count = ARRAY_SIZE(ph1_pro4_groups),
- .functions = ph1_pro4_functions,
- .functions_count = ARRAY_SIZE(ph1_pro4_functions),
- .mux_bits = 4,
- .reg_stride = 8,
- .load_pinctrl = true,
-};
-
-static struct pinctrl_desc ph1_pro4_pinctrl_desc = {
- .name = DRIVER_NAME,
- .pins = ph1_pro4_pins,
- .npins = ARRAY_SIZE(ph1_pro4_pins),
- .owner = THIS_MODULE,
+static struct uniphier_pinctrl_socdata uniphier_pro4_pindata = {
+ .pins = uniphier_pro4_pins,
+ .npins = ARRAY_SIZE(uniphier_pro4_pins),
+ .groups = uniphier_pro4_groups,
+ .groups_count = ARRAY_SIZE(uniphier_pro4_groups),
+ .functions = uniphier_pro4_functions,
+ .functions_count = ARRAY_SIZE(uniphier_pro4_functions),
+ .caps = UNIPHIER_PINCTRL_CAPS_DBGMUX_SEPARATE,
};
-static int ph1_pro4_pinctrl_probe(struct platform_device *pdev)
+static int uniphier_pro4_pinctrl_probe(struct platform_device *pdev)
{
- return uniphier_pinctrl_probe(pdev, &ph1_pro4_pinctrl_desc,
- &ph1_pro4_pindata);
+ return uniphier_pinctrl_probe(pdev, &uniphier_pro4_pindata);
}
-static const struct of_device_id ph1_pro4_pinctrl_match[] = {
+static const struct of_device_id uniphier_pro4_pinctrl_match[] = {
+ { .compatible = "socionext,uniphier-pro4-pinctrl" },
{ .compatible = "socionext,ph1-pro4-pinctrl" },
{ /* sentinel */ }
};
-MODULE_DEVICE_TABLE(of, ph1_pro4_pinctrl_match);
+MODULE_DEVICE_TABLE(of, uniphier_pro4_pinctrl_match);
-static struct platform_driver ph1_pro4_pinctrl_driver = {
- .probe = ph1_pro4_pinctrl_probe,
+static struct platform_driver uniphier_pro4_pinctrl_driver = {
+ .probe = uniphier_pro4_pinctrl_probe,
.driver = {
- .name = DRIVER_NAME,
- .of_match_table = ph1_pro4_pinctrl_match,
+ .name = "uniphier-pro4-pinctrl",
+ .of_match_table = uniphier_pro4_pinctrl_match,
},
};
-module_platform_driver(ph1_pro4_pinctrl_driver);
+module_platform_driver(uniphier_pro4_pinctrl_driver);
MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
MODULE_DESCRIPTION("UniPhier PH1-Pro4 pinctrl driver");
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-pro5.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro5.c
index 3087f7675..55d4a1228 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-pro5.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro5.c
@@ -19,823 +19,840 @@
#include "pinctrl-uniphier.h"
-#define DRIVER_NAME "ph1-pro5-pinctrl"
-
-static const struct pinctrl_pin_desc ph1_pro5_pins[] = {
+static const struct pinctrl_pin_desc uniphier_pro5_pins[] = {
UNIPHIER_PINCTRL_PIN(0, "AEXCKA1", 0,
- 0, UNIPHIER_PIN_DRV_4_8,
+ 0, UNIPHIER_PIN_DRV_1BIT,
0, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(1, "AEXCKA2", 0,
- 1, UNIPHIER_PIN_DRV_4_8,
+ 1, UNIPHIER_PIN_DRV_1BIT,
1, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(2, "CK27EXI", 0,
- 2, UNIPHIER_PIN_DRV_4_8,
+ 2, UNIPHIER_PIN_DRV_1BIT,
2, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(3, "CK54EXI", 0,
- 3, UNIPHIER_PIN_DRV_4_8,
+ 3, UNIPHIER_PIN_DRV_1BIT,
3, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(4, "ED0", UNIPHIER_PIN_IECTRL_NONE,
- 4, UNIPHIER_PIN_DRV_4_8,
+ 4, UNIPHIER_PIN_DRV_1BIT,
4, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(5, "ED1", UNIPHIER_PIN_IECTRL_NONE,
- 5, UNIPHIER_PIN_DRV_4_8,
+ 5, UNIPHIER_PIN_DRV_1BIT,
5, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(6, "ED2", UNIPHIER_PIN_IECTRL_NONE,
- 6, UNIPHIER_PIN_DRV_4_8,
+ 6, UNIPHIER_PIN_DRV_1BIT,
6, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(7, "ED3", UNIPHIER_PIN_IECTRL_NONE,
- 7, UNIPHIER_PIN_DRV_4_8,
+ 7, UNIPHIER_PIN_DRV_1BIT,
7, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(8, "ED4", UNIPHIER_PIN_IECTRL_NONE,
- 8, UNIPHIER_PIN_DRV_4_8,
+ 8, UNIPHIER_PIN_DRV_1BIT,
8, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(9, "ED5", UNIPHIER_PIN_IECTRL_NONE,
- 9, UNIPHIER_PIN_DRV_4_8,
+ 9, UNIPHIER_PIN_DRV_1BIT,
9, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(10, "ED6", UNIPHIER_PIN_IECTRL_NONE,
- 10, UNIPHIER_PIN_DRV_4_8,
+ 10, UNIPHIER_PIN_DRV_1BIT,
10, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(11, "ED7", UNIPHIER_PIN_IECTRL_NONE,
- 11, UNIPHIER_PIN_DRV_4_8,
+ 11, UNIPHIER_PIN_DRV_1BIT,
11, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(12, "XERWE0", UNIPHIER_PIN_IECTRL_NONE,
- 12, UNIPHIER_PIN_DRV_4_8,
+ 12, UNIPHIER_PIN_DRV_1BIT,
12, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(13, "XERWE1", UNIPHIER_PIN_IECTRL_NONE,
- 13, UNIPHIER_PIN_DRV_4_8,
+ 13, UNIPHIER_PIN_DRV_1BIT,
13, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(14, "ERXW", UNIPHIER_PIN_IECTRL_NONE,
- 14, UNIPHIER_PIN_DRV_4_8,
+ 14, UNIPHIER_PIN_DRV_1BIT,
14, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(15, "ES0", UNIPHIER_PIN_IECTRL_NONE,
- 15, UNIPHIER_PIN_DRV_4_8,
+ 15, UNIPHIER_PIN_DRV_1BIT,
15, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(16, "ES1", UNIPHIER_PIN_IECTRL_NONE,
- 16, UNIPHIER_PIN_DRV_4_8,
+ 16, UNIPHIER_PIN_DRV_1BIT,
16, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(17, "ES2", UNIPHIER_PIN_IECTRL_NONE,
- 17, UNIPHIER_PIN_DRV_4_8,
+ 17, UNIPHIER_PIN_DRV_1BIT,
17, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(18, "XECS1", UNIPHIER_PIN_IECTRL_NONE,
- 18, UNIPHIER_PIN_DRV_4_8,
+ 18, UNIPHIER_PIN_DRV_1BIT,
18, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(19, "XNFRE", UNIPHIER_PIN_IECTRL_NONE,
- 19, UNIPHIER_PIN_DRV_4_8,
+ 19, UNIPHIER_PIN_DRV_1BIT,
19, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(20, "XNFWE", UNIPHIER_PIN_IECTRL_NONE,
- 20, UNIPHIER_PIN_DRV_4_8,
+ 20, UNIPHIER_PIN_DRV_1BIT,
20, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(21, "NFALE", UNIPHIER_PIN_IECTRL_NONE,
- 21, UNIPHIER_PIN_DRV_4_8,
+ 21, UNIPHIER_PIN_DRV_1BIT,
21, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(22, "NFCLE", UNIPHIER_PIN_IECTRL_NONE,
- 22, UNIPHIER_PIN_DRV_4_8,
+ 22, UNIPHIER_PIN_DRV_1BIT,
22, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(23, "XNFWP", UNIPHIER_PIN_IECTRL_NONE,
- 23, UNIPHIER_PIN_DRV_4_8,
+ 23, UNIPHIER_PIN_DRV_1BIT,
23, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(24, "XNFCE0", UNIPHIER_PIN_IECTRL_NONE,
- 24, UNIPHIER_PIN_DRV_4_8,
+ 24, UNIPHIER_PIN_DRV_1BIT,
24, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(25, "NFRYBY0", UNIPHIER_PIN_IECTRL_NONE,
- 25, UNIPHIER_PIN_DRV_4_8,
+ 25, UNIPHIER_PIN_DRV_1BIT,
25, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(26, "XNFCE1", UNIPHIER_PIN_IECTRL_NONE,
- 26, UNIPHIER_PIN_DRV_4_8,
+ 26, UNIPHIER_PIN_DRV_1BIT,
26, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(27, "NFRYBY1", UNIPHIER_PIN_IECTRL_NONE,
- 27, UNIPHIER_PIN_DRV_4_8,
+ 27, UNIPHIER_PIN_DRV_1BIT,
27, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(28, "NFD0", UNIPHIER_PIN_IECTRL_NONE,
- 28, UNIPHIER_PIN_DRV_4_8,
+ 28, UNIPHIER_PIN_DRV_1BIT,
28, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(29, "NFD1", UNIPHIER_PIN_IECTRL_NONE,
- 29, UNIPHIER_PIN_DRV_4_8,
+ 29, UNIPHIER_PIN_DRV_1BIT,
29, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(30, "NFD2", UNIPHIER_PIN_IECTRL_NONE,
- 30, UNIPHIER_PIN_DRV_4_8,
+ 30, UNIPHIER_PIN_DRV_1BIT,
30, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(31, "NFD3", UNIPHIER_PIN_IECTRL_NONE,
- 31, UNIPHIER_PIN_DRV_4_8,
+ 31, UNIPHIER_PIN_DRV_1BIT,
31, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(32, "NFD4", UNIPHIER_PIN_IECTRL_NONE,
- 32, UNIPHIER_PIN_DRV_4_8,
+ 32, UNIPHIER_PIN_DRV_1BIT,
32, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(33, "NFD5", UNIPHIER_PIN_IECTRL_NONE,
- 33, UNIPHIER_PIN_DRV_4_8,
+ 33, UNIPHIER_PIN_DRV_1BIT,
33, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(34, "NFD6", UNIPHIER_PIN_IECTRL_NONE,
- 34, UNIPHIER_PIN_DRV_4_8,
+ 34, UNIPHIER_PIN_DRV_1BIT,
34, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(35, "NFD7", UNIPHIER_PIN_IECTRL_NONE,
- 35, UNIPHIER_PIN_DRV_4_8,
+ 35, UNIPHIER_PIN_DRV_1BIT,
35, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(36, "XERST", UNIPHIER_PIN_IECTRL_NONE,
- 36, UNIPHIER_PIN_DRV_4_8,
+ 36, UNIPHIER_PIN_DRV_1BIT,
36, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(37, "MMCCLK", UNIPHIER_PIN_IECTRL_NONE,
- 37, UNIPHIER_PIN_DRV_4_8,
+ 37, UNIPHIER_PIN_DRV_1BIT,
37, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(38, "MMCCMD", UNIPHIER_PIN_IECTRL_NONE,
- 38, UNIPHIER_PIN_DRV_4_8,
+ 38, UNIPHIER_PIN_DRV_1BIT,
38, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(39, "MMCDAT0", UNIPHIER_PIN_IECTRL_NONE,
- 39, UNIPHIER_PIN_DRV_4_8,
+ 39, UNIPHIER_PIN_DRV_1BIT,
39, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(40, "MMCDAT1", UNIPHIER_PIN_IECTRL_NONE,
- 40, UNIPHIER_PIN_DRV_4_8,
+ 40, UNIPHIER_PIN_DRV_1BIT,
40, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(41, "MMCDAT2", UNIPHIER_PIN_IECTRL_NONE,
- 41, UNIPHIER_PIN_DRV_4_8,
+ 41, UNIPHIER_PIN_DRV_1BIT,
41, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(42, "MMCDAT3", UNIPHIER_PIN_IECTRL_NONE,
- 42, UNIPHIER_PIN_DRV_4_8,
+ 42, UNIPHIER_PIN_DRV_1BIT,
42, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(43, "MMCDAT4", UNIPHIER_PIN_IECTRL_NONE,
- 43, UNIPHIER_PIN_DRV_4_8,
+ 43, UNIPHIER_PIN_DRV_1BIT,
43, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(44, "MMCDAT5", UNIPHIER_PIN_IECTRL_NONE,
- 44, UNIPHIER_PIN_DRV_4_8,
+ 44, UNIPHIER_PIN_DRV_1BIT,
44, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(45, "MMCDAT6", UNIPHIER_PIN_IECTRL_NONE,
- 45, UNIPHIER_PIN_DRV_4_8,
+ 45, UNIPHIER_PIN_DRV_1BIT,
45, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(46, "MMCDAT7", UNIPHIER_PIN_IECTRL_NONE,
- 46, UNIPHIER_PIN_DRV_4_8,
+ 46, UNIPHIER_PIN_DRV_1BIT,
46, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(47, "TXD0", 0,
- 47, UNIPHIER_PIN_DRV_4_8,
+ 47, UNIPHIER_PIN_DRV_1BIT,
47, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(48, "RXD0", 0,
- 48, UNIPHIER_PIN_DRV_4_8,
+ 48, UNIPHIER_PIN_DRV_1BIT,
48, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(49, "TXD1", 0,
- 49, UNIPHIER_PIN_DRV_4_8,
+ 49, UNIPHIER_PIN_DRV_1BIT,
49, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(50, "RXD1", 0,
- 50, UNIPHIER_PIN_DRV_4_8,
+ 50, UNIPHIER_PIN_DRV_1BIT,
50, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(51, "TXD2", UNIPHIER_PIN_IECTRL_NONE,
- 51, UNIPHIER_PIN_DRV_4_8,
+ 51, UNIPHIER_PIN_DRV_1BIT,
51, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(52, "RXD2", UNIPHIER_PIN_IECTRL_NONE,
- 52, UNIPHIER_PIN_DRV_4_8,
+ 52, UNIPHIER_PIN_DRV_1BIT,
52, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(53, "TXD3", 0,
- 53, UNIPHIER_PIN_DRV_4_8,
+ 53, UNIPHIER_PIN_DRV_1BIT,
53, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(54, "RXD3", 0,
- 54, UNIPHIER_PIN_DRV_4_8,
+ 54, UNIPHIER_PIN_DRV_1BIT,
54, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(55, "MS0CS0", 0,
- 55, UNIPHIER_PIN_DRV_4_8,
+ 55, UNIPHIER_PIN_DRV_1BIT,
55, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(56, "MS0DO", 0,
- 56, UNIPHIER_PIN_DRV_4_8,
+ 56, UNIPHIER_PIN_DRV_1BIT,
56, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(57, "MS0DI", 0,
- 57, UNIPHIER_PIN_DRV_4_8,
+ 57, UNIPHIER_PIN_DRV_1BIT,
57, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(58, "MS0CLK", 0,
- 58, UNIPHIER_PIN_DRV_4_8,
+ 58, UNIPHIER_PIN_DRV_1BIT,
58, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(59, "CSCLK", 0,
- 59, UNIPHIER_PIN_DRV_4_8,
+ 59, UNIPHIER_PIN_DRV_1BIT,
59, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(60, "CSBPTM", 0,
- 60, UNIPHIER_PIN_DRV_4_8,
+ 60, UNIPHIER_PIN_DRV_1BIT,
60, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(61, "CSBMTP", 0,
- 61, UNIPHIER_PIN_DRV_4_8,
+ 61, UNIPHIER_PIN_DRV_1BIT,
61, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(62, "XCINTP", 0,
- 62, UNIPHIER_PIN_DRV_4_8,
+ 62, UNIPHIER_PIN_DRV_1BIT,
62, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(63, "XCINTM", 0,
- 63, UNIPHIER_PIN_DRV_4_8,
+ 63, UNIPHIER_PIN_DRV_1BIT,
63, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(64, "XCMPREQ", 0,
- 64, UNIPHIER_PIN_DRV_4_8,
+ 64, UNIPHIER_PIN_DRV_1BIT,
64, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(65, "XSRST", 0,
- 65, UNIPHIER_PIN_DRV_4_8,
+ 65, UNIPHIER_PIN_DRV_1BIT,
65, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(66, "LPST", UNIPHIER_PIN_IECTRL_NONE,
- 66, UNIPHIER_PIN_DRV_4_8,
+ 66, UNIPHIER_PIN_DRV_1BIT,
66, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(67, "PWMA", 0,
- 67, UNIPHIER_PIN_DRV_4_8,
+ 67, UNIPHIER_PIN_DRV_1BIT,
67, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(68, "XIRQ0", 0,
- 68, UNIPHIER_PIN_DRV_4_8,
+ 68, UNIPHIER_PIN_DRV_1BIT,
68, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(69, "XIRQ1", 0,
- 69, UNIPHIER_PIN_DRV_4_8,
+ 69, UNIPHIER_PIN_DRV_1BIT,
69, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(70, "XIRQ2", 0,
- 70, UNIPHIER_PIN_DRV_4_8,
+ 70, UNIPHIER_PIN_DRV_1BIT,
70, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(71, "XIRQ3", 0,
- 71, UNIPHIER_PIN_DRV_4_8,
+ 71, UNIPHIER_PIN_DRV_1BIT,
71, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(72, "XIRQ4", 0,
- 72, UNIPHIER_PIN_DRV_4_8,
+ 72, UNIPHIER_PIN_DRV_1BIT,
72, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(73, "XIRQ5", 0,
- 73, UNIPHIER_PIN_DRV_4_8,
+ 73, UNIPHIER_PIN_DRV_1BIT,
73, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(74, "XIRQ6", 0,
- 74, UNIPHIER_PIN_DRV_4_8,
+ 74, UNIPHIER_PIN_DRV_1BIT,
74, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(75, "XIRQ7", 0,
- 75, UNIPHIER_PIN_DRV_4_8,
+ 75, UNIPHIER_PIN_DRV_1BIT,
75, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(76, "XIRQ8", 0,
- 76, UNIPHIER_PIN_DRV_4_8,
+ 76, UNIPHIER_PIN_DRV_1BIT,
76, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(77, "XIRQ9", 0,
- 77, UNIPHIER_PIN_DRV_4_8,
+ 77, UNIPHIER_PIN_DRV_1BIT,
77, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(78, "XIRQ10", 0,
- 78, UNIPHIER_PIN_DRV_4_8,
+ 78, UNIPHIER_PIN_DRV_1BIT,
78, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(79, "XIRQ11", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
79, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(80, "XIRQ12", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
80, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(81, "XIRQ13", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
81, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(82, "XIRQ14", 0,
- 82, UNIPHIER_PIN_DRV_4_8,
+ 82, UNIPHIER_PIN_DRV_1BIT,
82, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(83, "XIRQ15", 0,
- 83, UNIPHIER_PIN_DRV_4_8,
+ 83, UNIPHIER_PIN_DRV_1BIT,
83, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(84, "XIRQ16", 0,
- 84, UNIPHIER_PIN_DRV_4_8,
+ 84, UNIPHIER_PIN_DRV_1BIT,
84, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(85, "XIRQ17", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
85, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(86, "XIRQ18", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
86, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(87, "XIRQ19", 0,
- 87, UNIPHIER_PIN_DRV_4_8,
+ 87, UNIPHIER_PIN_DRV_1BIT,
87, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(88, "XIRQ20", 0,
- 88, UNIPHIER_PIN_DRV_4_8,
+ 88, UNIPHIER_PIN_DRV_1BIT,
88, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(89, "PORT00", 0,
- 89, UNIPHIER_PIN_DRV_4_8,
+ 89, UNIPHIER_PIN_DRV_1BIT,
89, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(90, "PORT01", 0,
- 90, UNIPHIER_PIN_DRV_4_8,
+ 90, UNIPHIER_PIN_DRV_1BIT,
90, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(91, "PORT02", 0,
- 91, UNIPHIER_PIN_DRV_4_8,
+ 91, UNIPHIER_PIN_DRV_1BIT,
91, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(92, "PORT03", 0,
- 92, UNIPHIER_PIN_DRV_4_8,
+ 92, UNIPHIER_PIN_DRV_1BIT,
92, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(93, "PORT04", 0,
- 93, UNIPHIER_PIN_DRV_4_8,
+ 93, UNIPHIER_PIN_DRV_1BIT,
93, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(94, "PORT05", 0,
- 94, UNIPHIER_PIN_DRV_4_8,
+ 94, UNIPHIER_PIN_DRV_1BIT,
94, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(95, "PORT06", 0,
- 95, UNIPHIER_PIN_DRV_4_8,
+ 95, UNIPHIER_PIN_DRV_1BIT,
95, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(96, "PORT07", 0,
- 96, UNIPHIER_PIN_DRV_4_8,
+ 96, UNIPHIER_PIN_DRV_1BIT,
96, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(97, "PORT10", 0,
- 97, UNIPHIER_PIN_DRV_4_8,
+ 97, UNIPHIER_PIN_DRV_1BIT,
97, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(98, "PORT11", 0,
- 98, UNIPHIER_PIN_DRV_4_8,
+ 98, UNIPHIER_PIN_DRV_1BIT,
98, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(99, "PORT12", 0,
- 99, UNIPHIER_PIN_DRV_4_8,
+ 99, UNIPHIER_PIN_DRV_1BIT,
99, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(100, "PORT13", 0,
- 100, UNIPHIER_PIN_DRV_4_8,
+ 100, UNIPHIER_PIN_DRV_1BIT,
100, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(101, "PORT14", 0,
- 101, UNIPHIER_PIN_DRV_4_8,
+ 101, UNIPHIER_PIN_DRV_1BIT,
101, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(102, "PORT15", 0,
- 102, UNIPHIER_PIN_DRV_4_8,
+ 102, UNIPHIER_PIN_DRV_1BIT,
102, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(103, "PORT16", 0,
- 103, UNIPHIER_PIN_DRV_4_8,
+ 103, UNIPHIER_PIN_DRV_1BIT,
103, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(104, "PORT17", 0,
- 104, UNIPHIER_PIN_DRV_4_8,
+ 104, UNIPHIER_PIN_DRV_1BIT,
104, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(105, "T0HPD", 0,
- 105, UNIPHIER_PIN_DRV_4_8,
+ 105, UNIPHIER_PIN_DRV_1BIT,
105, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(106, "T1HPD", 0,
- 106, UNIPHIER_PIN_DRV_4_8,
+ 106, UNIPHIER_PIN_DRV_1BIT,
106, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(107, "R0HPD", 0,
- 107, UNIPHIER_PIN_DRV_4_8,
+ 107, UNIPHIER_PIN_DRV_1BIT,
107, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(108, "R1HPD", 0,
- 108, UNIPHIER_PIN_DRV_4_8,
+ 108, UNIPHIER_PIN_DRV_1BIT,
108, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(109, "XPERST", 0,
- 109, UNIPHIER_PIN_DRV_4_8,
+ 109, UNIPHIER_PIN_DRV_1BIT,
109, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(110, "XPEWAKE", 0,
- 110, UNIPHIER_PIN_DRV_4_8,
+ 110, UNIPHIER_PIN_DRV_1BIT,
110, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(111, "XPECLKRQ", 0,
- 111, UNIPHIER_PIN_DRV_4_8,
+ 111, UNIPHIER_PIN_DRV_1BIT,
111, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(112, "SDA0", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
112, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(113, "SCL0", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
113, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(114, "SDA1", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
114, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(115, "SCL1", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
115, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(116, "SDA2", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
116, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(117, "SCL2", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
117, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(118, "SDA3", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
118, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(119, "SCL3", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
119, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(120, "SPISYNC", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
120, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(121, "SPISCLK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
121, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(122, "SPITXD", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
122, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(123, "SPIRXD", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
123, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(124, "USB0VBUS", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
124, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(125, "USB0OD", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
125, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(126, "USB1VBUS", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
126, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(127, "USB1OD", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
127, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(128, "USB2VBUS", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
128, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(129, "USB2OD", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
129, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(130, "SMTRST0", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
130, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(131, "SMTCMD0", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
131, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(132, "SMTD0", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
132, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(133, "SMTSEL0", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
133, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(134, "SMTCLK0", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
134, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(135, "SMTRST1", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
135, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(136, "SMTCMD1", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
136, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(137, "SMTD1", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
137, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(138, "SMTSEL1", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
138, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(139, "SMTCLK1", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
139, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(140, "CH0CLK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
140, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(141, "CH0PSYNC", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
141, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(142, "CH0VAL", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
142, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(143, "CH0DATA", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
143, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(144, "CH1CLK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
144, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(145, "CH1PSYNC", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
145, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(146, "CH1VAL", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
146, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(147, "CH1DATA", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
147, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(148, "CH2CLK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
148, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(149, "CH2PSYNC", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
149, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(150, "CH2VAL", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
150, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(151, "CH2DATA", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
151, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(152, "CH3CLK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
152, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(153, "CH3PSYNC", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
153, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(154, "CH3VAL", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
154, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(155, "CH3DATA", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
155, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(156, "CH4CLK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
156, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(157, "CH4PSYNC", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
157, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(158, "CH4VAL", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
158, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(159, "CH4DATA", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
159, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(160, "CH5CLK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
160, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(161, "CH5PSYNC", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
161, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(162, "CH5VAL", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
162, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(163, "CH5DATA", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
163, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(164, "CH6CLK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
164, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(165, "CH6PSYNC", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
165, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(166, "CH6VAL", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
166, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(167, "CH6DATA", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
167, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(168, "CH7CLK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
168, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(169, "CH7PSYNC", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
169, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(170, "CH7VAL", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
170, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(171, "CH7DATA", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
171, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(172, "AI1ADCCK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
172, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(173, "AI1BCK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
173, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(174, "AI1LRCK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
174, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(175, "AI1D0", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
175, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(176, "AI1D1", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
176, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(177, "AI1D2", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
177, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(178, "AI1D3", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
178, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(179, "AI2ADCCK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
179, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(180, "AI2BCK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
180, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(181, "AI2LRCK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
181, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(182, "AI2D0", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
182, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(183, "AI2D1", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
183, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(184, "AI2D2", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
184, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(185, "AI2D3", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
185, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(186, "AI3ADCCK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
186, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(187, "AI3BCK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
187, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(188, "AI3LRCK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
188, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(189, "AI3D0", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
189, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(190, "AO1IEC", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
190, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(191, "AO1DACCK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
191, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(192, "AO1BCK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
192, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(193, "AO1LRCK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
193, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(194, "AO1D0", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
194, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(195, "AO1D1", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
195, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(196, "AO1D2", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
196, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(197, "AO1D3", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
197, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(198, "AO2IEC", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
198, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(199, "AO2DACCK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
199, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(200, "AO2BCK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
200, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(201, "AO2LRCK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
201, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(202, "AO2D0", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
202, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(203, "AO2D1", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
203, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(204, "AO2D2", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
204, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(205, "AO2D3", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
205, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(206, "AO3DACCK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
206, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(207, "AO3BCK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
207, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(208, "AO3LRCK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
208, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(209, "AO3DMIX", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
209, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(210, "AO4DACCK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
210, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(211, "AO4BCK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
211, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(212, "AO4LRCK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
212, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(213, "AO4DMIX", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
213, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(214, "VI1CLK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
214, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(215, "VI1C0", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
215, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(216, "VI1C1", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
216, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(217, "VI1C2", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
217, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(218, "VI1C3", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
218, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(219, "VI1C4", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
219, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(220, "VI1C5", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
220, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(221, "VI1C6", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
221, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(222, "VI1C7", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
222, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(223, "VI1C8", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
223, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(224, "VI1C9", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
224, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(225, "VI1Y0", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
225, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(226, "VI1Y1", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
226, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(227, "VI1Y2", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
227, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(228, "VI1Y3", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
228, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(229, "VI1Y4", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
229, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(230, "VI1Y5", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
230, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(231, "VI1Y6", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
231, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(232, "VI1Y7", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
232, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(233, "VI1Y8", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
233, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(234, "VI1Y9", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
234, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(235, "VI1DE", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
235, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(236, "VI1HSYNC", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
236, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(237, "VI1VSYNC", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
237, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(238, "VO1CLK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
238, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(239, "VO1D0", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
239, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(240, "VO1D1", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
240, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(241, "VO1D2", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
241, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(242, "VO1D3", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
242, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(243, "VO1D4", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
243, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(244, "VO1D5", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
244, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(245, "VO1D6", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
245, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(246, "VO1D7", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
246, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(247, "SDCD", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
247, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(248, "SDWP", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
248, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(249, "SDVOLC", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
249, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(250, "SDCLK", UNIPHIER_PIN_IECTRL_NONE,
- 40, UNIPHIER_PIN_DRV_8_12_16_20,
+ 10, UNIPHIER_PIN_DRV_2BIT,
-1, UNIPHIER_PIN_PULL_UP_FIXED),
UNIPHIER_PINCTRL_PIN(251, "SDCMD", UNIPHIER_PIN_IECTRL_NONE,
- 44, UNIPHIER_PIN_DRV_8_12_16_20,
+ 11, UNIPHIER_PIN_DRV_2BIT,
-1, UNIPHIER_PIN_PULL_UP_FIXED),
UNIPHIER_PINCTRL_PIN(252, "SDDAT0", UNIPHIER_PIN_IECTRL_NONE,
- 48, UNIPHIER_PIN_DRV_8_12_16_20,
+ 12, UNIPHIER_PIN_DRV_2BIT,
-1, UNIPHIER_PIN_PULL_UP_FIXED),
UNIPHIER_PINCTRL_PIN(253, "SDDAT1", UNIPHIER_PIN_IECTRL_NONE,
- 52, UNIPHIER_PIN_DRV_8_12_16_20,
+ 13, UNIPHIER_PIN_DRV_2BIT,
-1, UNIPHIER_PIN_PULL_UP_FIXED),
UNIPHIER_PINCTRL_PIN(254, "SDDAT2", UNIPHIER_PIN_IECTRL_NONE,
- 56, UNIPHIER_PIN_DRV_8_12_16_20,
+ 14, UNIPHIER_PIN_DRV_2BIT,
-1, UNIPHIER_PIN_PULL_UP_FIXED),
UNIPHIER_PINCTRL_PIN(255, "SDDAT3", UNIPHIER_PIN_IECTRL_NONE,
- 60, UNIPHIER_PIN_DRV_8_12_16_20,
+ 15, UNIPHIER_PIN_DRV_2BIT,
-1, UNIPHIER_PIN_PULL_UP_FIXED),
};
static const unsigned emmc_pins[] = {36, 37, 38, 39, 40, 41, 42};
-static const unsigned emmc_muxvals[] = {0, 0, 0, 0, 0, 0, 0};
+static const int emmc_muxvals[] = {0, 0, 0, 0, 0, 0, 0};
static const unsigned emmc_dat8_pins[] = {43, 44, 45, 46};
-static const unsigned emmc_dat8_muxvals[] = {0, 0, 0, 0};
+static const int emmc_dat8_muxvals[] = {0, 0, 0, 0};
static const unsigned i2c0_pins[] = {112, 113};
-static const unsigned i2c0_muxvals[] = {0, 0};
+static const int i2c0_muxvals[] = {0, 0};
static const unsigned i2c1_pins[] = {114, 115};
-static const unsigned i2c1_muxvals[] = {0, 0};
+static const int i2c1_muxvals[] = {0, 0};
static const unsigned i2c2_pins[] = {116, 117};
-static const unsigned i2c2_muxvals[] = {0, 0};
+static const int i2c2_muxvals[] = {0, 0};
static const unsigned i2c3_pins[] = {118, 119};
-static const unsigned i2c3_muxvals[] = {0, 0};
+static const int i2c3_muxvals[] = {0, 0};
static const unsigned i2c5_pins[] = {87, 88};
-static const unsigned i2c5_muxvals[] = {2, 2};
+static const int i2c5_muxvals[] = {2, 2};
static const unsigned i2c5b_pins[] = {196, 197};
-static const unsigned i2c5b_muxvals[] = {2, 2};
+static const int i2c5b_muxvals[] = {2, 2};
static const unsigned i2c5c_pins[] = {215, 216};
-static const unsigned i2c5c_muxvals[] = {2, 2};
+static const int i2c5c_muxvals[] = {2, 2};
static const unsigned i2c6_pins[] = {101, 102};
-static const unsigned i2c6_muxvals[] = {2, 2};
+static const int i2c6_muxvals[] = {2, 2};
static const unsigned nand_pins[] = {19, 20, 21, 22, 23, 24, 25, 28, 29, 30,
31, 32, 33, 34, 35};
-static const unsigned nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0};
+static const int nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
static const unsigned nand_cs1_pins[] = {26, 27};
-static const unsigned nand_cs1_muxvals[] = {0, 0};
+static const int nand_cs1_muxvals[] = {0, 0};
static const unsigned sd_pins[] = {250, 251, 252, 253, 254, 255, 256, 257, 258};
-static const unsigned sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
+static const int sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned system_bus_pins[] = {4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+ 14, 15, 16, 17};
+static const int system_bus_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0};
+static const unsigned system_bus_cs0_pins[] = {105};
+static const int system_bus_cs0_muxvals[] = {1};
+static const unsigned system_bus_cs1_pins[] = {18};
+static const int system_bus_cs1_muxvals[] = {0};
+static const unsigned system_bus_cs2_pins[] = {106};
+static const int system_bus_cs2_muxvals[] = {1};
+static const unsigned system_bus_cs3_pins[] = {100};
+static const int system_bus_cs3_muxvals[] = {1};
+static const unsigned system_bus_cs4_pins[] = {101};
+static const int system_bus_cs4_muxvals[] = {1};
+static const unsigned system_bus_cs5_pins[] = {102};
+static const int system_bus_cs5_muxvals[] = {1};
+static const unsigned system_bus_cs6_pins[] = {69};
+static const int system_bus_cs6_muxvals[] = {5};
+static const unsigned system_bus_cs7_pins[] = {70};
+static const int system_bus_cs7_muxvals[] = {5};
static const unsigned uart0_pins[] = {47, 48};
-static const unsigned uart0_muxvals[] = {0, 0};
+static const int uart0_muxvals[] = {0, 0};
static const unsigned uart0b_pins[] = {227, 228};
-static const unsigned uart0b_muxvals[] = {3, 3};
+static const int uart0b_muxvals[] = {3, 3};
static const unsigned uart1_pins[] = {49, 50};
-static const unsigned uart1_muxvals[] = {0, 0};
+static const int uart1_muxvals[] = {0, 0};
static const unsigned uart2_pins[] = {51, 52};
-static const unsigned uart2_muxvals[] = {0, 0};
+static const int uart2_muxvals[] = {0, 0};
static const unsigned uart3_pins[] = {53, 54};
-static const unsigned uart3_muxvals[] = {0, 0};
+static const int uart3_muxvals[] = {0, 0};
static const unsigned usb0_pins[] = {124, 125};
-static const unsigned usb0_muxvals[] = {0, 0};
+static const int usb0_muxvals[] = {0, 0};
static const unsigned usb1_pins[] = {126, 127};
-static const unsigned usb1_muxvals[] = {0, 0};
+static const int usb1_muxvals[] = {0, 0};
static const unsigned usb2_pins[] = {128, 129};
-static const unsigned usb2_muxvals[] = {0, 0};
+static const int usb2_muxvals[] = {0, 0};
static const unsigned port_range0_pins[] = {
89, 90, 91, 92, 93, 94, 95, 96, /* PORT0x */
97, 98, 99, 100, 101, 102, 103, 104, /* PORT1x */
@@ -853,7 +870,7 @@ static const unsigned port_range0_pins[] = {
179, 180, 181, 182, 186, 187, 188, 189, /* PORT13x */
4, 5, 6, 7, 8, 9, 10, 11, /* PORT14x */
};
-static const unsigned port_range0_muxvals[] = {
+static const int port_range0_muxvals[] = {
15, 15, 15, 15, 15, 15, 15, 15, /* PORT0x */
15, 15, 15, 15, 15, 15, 15, 15, /* PORT1x */
15, 15, 15, 15, 15, 15, 15, 15, /* PORT2x */
@@ -886,7 +903,7 @@ static const unsigned port_range1_pins[] = {
105, 106, 18, 27, 36, 128, 132, 137, /* PORT29x */
183, 184, 185, 84, 47, 48, 51, 52, /* PORT30x */
};
-static const unsigned port_range1_muxvals[] = {
+static const int port_range1_muxvals[] = {
15, 15, 15, /* PORT175-177 */
15, 15, 15, 15, 15, 15, 15, 15, /* PORT18x */
15, 15, 15, 15, 15, 15, 15, 15, /* PORT19x */
@@ -907,7 +924,7 @@ static const unsigned xirq_pins[] = {
76, 77, 78, 79, 80, 81, 82, 83, /* XIRQ8-15 */
84, 85, 86, 87, 88, /* XIRQ16-20 */
};
-static const unsigned xirq_muxvals[] = {
+static const int xirq_muxvals[] = {
14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ0-7 */
14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ8-15 */
14, 14, 14, 14, 14, /* XIRQ16-20 */
@@ -915,11 +932,11 @@ static const unsigned xirq_muxvals[] = {
static const unsigned xirq_alternatives_pins[] = {
91, 92, 239, 144, 240, 156, 241, 106, 128,
};
-static const unsigned xirq_alternatives_muxvals[] = {
+static const int xirq_alternatives_muxvals[] = {
14, 14, 14, 14, 14, 14, 14, 14, 14,
};
-static const struct uniphier_pinctrl_group ph1_pro5_groups[] = {
+static const struct uniphier_pinctrl_group uniphier_pro5_groups[] = {
UNIPHIER_PINCTRL_GROUP(nand),
UNIPHIER_PINCTRL_GROUP(nand_cs1),
UNIPHIER_PINCTRL_GROUP(emmc),
@@ -933,6 +950,15 @@ static const struct uniphier_pinctrl_group ph1_pro5_groups[] = {
UNIPHIER_PINCTRL_GROUP(i2c5c),
UNIPHIER_PINCTRL_GROUP(i2c6),
UNIPHIER_PINCTRL_GROUP(sd),
+ UNIPHIER_PINCTRL_GROUP(system_bus),
+ UNIPHIER_PINCTRL_GROUP(system_bus_cs0),
+ UNIPHIER_PINCTRL_GROUP(system_bus_cs1),
+ UNIPHIER_PINCTRL_GROUP(system_bus_cs2),
+ UNIPHIER_PINCTRL_GROUP(system_bus_cs3),
+ UNIPHIER_PINCTRL_GROUP(system_bus_cs4),
+ UNIPHIER_PINCTRL_GROUP(system_bus_cs5),
+ UNIPHIER_PINCTRL_GROUP(system_bus_cs6),
+ UNIPHIER_PINCTRL_GROUP(system_bus_cs7),
UNIPHIER_PINCTRL_GROUP(uart0),
UNIPHIER_PINCTRL_GROUP(uart0b),
UNIPHIER_PINCTRL_GROUP(uart1),
@@ -1213,6 +1239,15 @@ static const char * const i2c5_groups[] = {"i2c5", "i2c5b", "i2c5c"};
static const char * const i2c6_groups[] = {"i2c6"};
static const char * const nand_groups[] = {"nand", "nand_cs1"};
static const char * const sd_groups[] = {"sd"};
+static const char * const system_bus_groups[] = {"system_bus",
+ "system_bus_cs0",
+ "system_bus_cs1",
+ "system_bus_cs2",
+ "system_bus_cs3",
+ "system_bus_cs4",
+ "system_bus_cs5",
+ "system_bus_cs6",
+ "system_bus_cs7"};
static const char * const uart0_groups[] = {"uart0", "uart0b"};
static const char * const uart1_groups[] = {"uart1"};
static const char * const uart2_groups[] = {"uart2"};
@@ -1291,7 +1326,7 @@ static const char * const xirq_groups[] = {
"xirq18b", "xirq18c", "xirq19b", "xirq20b",
};
-static const struct uniphier_pinmux_function ph1_pro5_functions[] = {
+static const struct uniphier_pinmux_function uniphier_pro5_functions[] = {
UNIPHIER_PINMUX_FUNCTION(emmc),
UNIPHIER_PINMUX_FUNCTION(i2c0),
UNIPHIER_PINMUX_FUNCTION(i2c1),
@@ -1301,6 +1336,7 @@ static const struct uniphier_pinmux_function ph1_pro5_functions[] = {
UNIPHIER_PINMUX_FUNCTION(i2c6),
UNIPHIER_PINMUX_FUNCTION(nand),
UNIPHIER_PINMUX_FUNCTION(sd),
+ UNIPHIER_PINMUX_FUNCTION(system_bus),
UNIPHIER_PINMUX_FUNCTION(uart0),
UNIPHIER_PINMUX_FUNCTION(uart1),
UNIPHIER_PINMUX_FUNCTION(uart2),
@@ -1312,43 +1348,36 @@ static const struct uniphier_pinmux_function ph1_pro5_functions[] = {
UNIPHIER_PINMUX_FUNCTION(xirq),
};
-static struct uniphier_pinctrl_socdata ph1_pro5_pindata = {
- .groups = ph1_pro5_groups,
- .groups_count = ARRAY_SIZE(ph1_pro5_groups),
- .functions = ph1_pro5_functions,
- .functions_count = ARRAY_SIZE(ph1_pro5_functions),
- .mux_bits = 4,
- .reg_stride = 8,
- .load_pinctrl = true,
-};
-
-static struct pinctrl_desc ph1_pro5_pinctrl_desc = {
- .name = DRIVER_NAME,
- .pins = ph1_pro5_pins,
- .npins = ARRAY_SIZE(ph1_pro5_pins),
- .owner = THIS_MODULE,
+static struct uniphier_pinctrl_socdata uniphier_pro5_pindata = {
+ .pins = uniphier_pro5_pins,
+ .npins = ARRAY_SIZE(uniphier_pro5_pins),
+ .groups = uniphier_pro5_groups,
+ .groups_count = ARRAY_SIZE(uniphier_pro5_groups),
+ .functions = uniphier_pro5_functions,
+ .functions_count = ARRAY_SIZE(uniphier_pro5_functions),
+ .caps = UNIPHIER_PINCTRL_CAPS_DBGMUX_SEPARATE,
};
-static int ph1_pro5_pinctrl_probe(struct platform_device *pdev)
+static int uniphier_pro5_pinctrl_probe(struct platform_device *pdev)
{
- return uniphier_pinctrl_probe(pdev, &ph1_pro5_pinctrl_desc,
- &ph1_pro5_pindata);
+ return uniphier_pinctrl_probe(pdev, &uniphier_pro5_pindata);
}
-static const struct of_device_id ph1_pro5_pinctrl_match[] = {
+static const struct of_device_id uniphier_pro5_pinctrl_match[] = {
+ { .compatible = "socionext,uniphier-pro5-pinctrl" },
{ .compatible = "socionext,ph1-pro5-pinctrl" },
{ /* sentinel */ }
};
-MODULE_DEVICE_TABLE(of, ph1_pro5_pinctrl_match);
+MODULE_DEVICE_TABLE(of, uniphier_pro5_pinctrl_match);
-static struct platform_driver ph1_pro5_pinctrl_driver = {
- .probe = ph1_pro5_pinctrl_probe,
+static struct platform_driver uniphier_pro5_pinctrl_driver = {
+ .probe = uniphier_pro5_pinctrl_probe,
.driver = {
- .name = DRIVER_NAME,
- .of_match_table = ph1_pro5_pinctrl_match,
+ .name = "uniphier-pro5-pinctrl",
+ .of_match_table = uniphier_pro5_pinctrl_match,
},
};
-module_platform_driver(ph1_pro5_pinctrl_driver);
+module_platform_driver(uniphier_pro5_pinctrl_driver);
MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
MODULE_DESCRIPTION("UniPhier PH1-Pro5 pinctrl driver");
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs2.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs2.c
index e868030ff..85ca5e2d8 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs2.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs2.c
@@ -19,760 +19,776 @@
#include "pinctrl-uniphier.h"
-#define DRIVER_NAME "proxstream2-pinctrl"
-
-static const struct pinctrl_pin_desc proxstream2_pins[] = {
+static const struct pinctrl_pin_desc uniphier_pxs2_pins[] = {
UNIPHIER_PINCTRL_PIN(0, "ED0", UNIPHIER_PIN_IECTRL_NONE,
- 0, UNIPHIER_PIN_DRV_4_8,
+ 0, UNIPHIER_PIN_DRV_1BIT,
0, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(1, "ED1", UNIPHIER_PIN_IECTRL_NONE,
- 1, UNIPHIER_PIN_DRV_4_8,
+ 1, UNIPHIER_PIN_DRV_1BIT,
1, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(2, "ED2", UNIPHIER_PIN_IECTRL_NONE,
- 2, UNIPHIER_PIN_DRV_4_8,
+ 2, UNIPHIER_PIN_DRV_1BIT,
2, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(3, "ED3", UNIPHIER_PIN_IECTRL_NONE,
- 3, UNIPHIER_PIN_DRV_4_8,
+ 3, UNIPHIER_PIN_DRV_1BIT,
3, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(4, "ED4", UNIPHIER_PIN_IECTRL_NONE,
- 4, UNIPHIER_PIN_DRV_4_8,
+ 4, UNIPHIER_PIN_DRV_1BIT,
4, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(5, "ED5", UNIPHIER_PIN_IECTRL_NONE,
- 5, UNIPHIER_PIN_DRV_4_8,
+ 5, UNIPHIER_PIN_DRV_1BIT,
5, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(6, "ED6", UNIPHIER_PIN_IECTRL_NONE,
- 6, UNIPHIER_PIN_DRV_4_8,
+ 6, UNIPHIER_PIN_DRV_1BIT,
6, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(7, "ED7", UNIPHIER_PIN_IECTRL_NONE,
- 7, UNIPHIER_PIN_DRV_4_8,
+ 7, UNIPHIER_PIN_DRV_1BIT,
7, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(8, "XERWE0", UNIPHIER_PIN_IECTRL_NONE,
- 8, UNIPHIER_PIN_DRV_4_8,
+ 8, UNIPHIER_PIN_DRV_1BIT,
8, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(9, "XERWE1", UNIPHIER_PIN_IECTRL_NONE,
- 9, UNIPHIER_PIN_DRV_4_8,
+ 9, UNIPHIER_PIN_DRV_1BIT,
9, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(10, "ERXW", UNIPHIER_PIN_IECTRL_NONE,
- 10, UNIPHIER_PIN_DRV_4_8,
+ 10, UNIPHIER_PIN_DRV_1BIT,
10, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(11, "ES0", UNIPHIER_PIN_IECTRL_NONE,
- 11, UNIPHIER_PIN_DRV_4_8,
+ 11, UNIPHIER_PIN_DRV_1BIT,
11, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(12, "ES1", UNIPHIER_PIN_IECTRL_NONE,
- 12, UNIPHIER_PIN_DRV_4_8,
+ 12, UNIPHIER_PIN_DRV_1BIT,
12, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(13, "ES2", UNIPHIER_PIN_IECTRL_NONE,
- 13, UNIPHIER_PIN_DRV_4_8,
+ 13, UNIPHIER_PIN_DRV_1BIT,
13, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(14, "XECS1", UNIPHIER_PIN_IECTRL_NONE,
- 14, UNIPHIER_PIN_DRV_4_8,
+ 14, UNIPHIER_PIN_DRV_1BIT,
14, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(15, "SMTRST0", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
15, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(16, "SMTCMD0", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
16, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(17, "SMTD0", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
17, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(18, "SMTSEL0", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
18, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(19, "SMTCLK0CG", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
19, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(20, "SMTDET0", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
20, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(21, "SMTRST1", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
21, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(22, "SMTCMD1", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
22, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(23, "SMTD1", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
23, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(24, "SMTSEL1", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
24, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(25, "SMTCLK1CG", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
25, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(26, "SMTDET1", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
26, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(27, "XIRQ18", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
27, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(28, "XIRQ19", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
28, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(29, "XIRQ20", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
29, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(30, "XNFRE", UNIPHIER_PIN_IECTRL_NONE,
- 30, UNIPHIER_PIN_DRV_4_8,
+ 30, UNIPHIER_PIN_DRV_1BIT,
30, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(31, "XNFWE", UNIPHIER_PIN_IECTRL_NONE,
- 31, UNIPHIER_PIN_DRV_4_8,
+ 31, UNIPHIER_PIN_DRV_1BIT,
31, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(32, "NFALE", UNIPHIER_PIN_IECTRL_NONE,
- 32, UNIPHIER_PIN_DRV_4_8,
+ 32, UNIPHIER_PIN_DRV_1BIT,
32, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(33, "NFCLE", UNIPHIER_PIN_IECTRL_NONE,
- 33, UNIPHIER_PIN_DRV_4_8,
+ 33, UNIPHIER_PIN_DRV_1BIT,
33, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(34, "XNFWP", UNIPHIER_PIN_IECTRL_NONE,
- 34, UNIPHIER_PIN_DRV_4_8,
+ 34, UNIPHIER_PIN_DRV_1BIT,
34, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(35, "XNFCE0", UNIPHIER_PIN_IECTRL_NONE,
- 35, UNIPHIER_PIN_DRV_4_8,
+ 35, UNIPHIER_PIN_DRV_1BIT,
35, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(36, "NFRYBY0", UNIPHIER_PIN_IECTRL_NONE,
- 36, UNIPHIER_PIN_DRV_4_8,
+ 36, UNIPHIER_PIN_DRV_1BIT,
36, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(37, "XNFCE1", UNIPHIER_PIN_IECTRL_NONE,
- 37, UNIPHIER_PIN_DRV_4_8,
+ 37, UNIPHIER_PIN_DRV_1BIT,
37, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(38, "NFRYBY1", UNIPHIER_PIN_IECTRL_NONE,
- 38, UNIPHIER_PIN_DRV_4_8,
+ 38, UNIPHIER_PIN_DRV_1BIT,
38, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(39, "NFD0", UNIPHIER_PIN_IECTRL_NONE,
- 39, UNIPHIER_PIN_DRV_4_8,
+ 39, UNIPHIER_PIN_DRV_1BIT,
39, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(40, "NFD1", UNIPHIER_PIN_IECTRL_NONE,
- 40, UNIPHIER_PIN_DRV_4_8,
+ 40, UNIPHIER_PIN_DRV_1BIT,
40, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(41, "NFD2", UNIPHIER_PIN_IECTRL_NONE,
- 41, UNIPHIER_PIN_DRV_4_8,
+ 41, UNIPHIER_PIN_DRV_1BIT,
41, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(42, "NFD3", UNIPHIER_PIN_IECTRL_NONE,
- 42, UNIPHIER_PIN_DRV_4_8,
+ 42, UNIPHIER_PIN_DRV_1BIT,
42, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(43, "NFD4", UNIPHIER_PIN_IECTRL_NONE,
- 43, UNIPHIER_PIN_DRV_4_8,
+ 43, UNIPHIER_PIN_DRV_1BIT,
43, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(44, "NFD5", UNIPHIER_PIN_IECTRL_NONE,
- 44, UNIPHIER_PIN_DRV_4_8,
+ 44, UNIPHIER_PIN_DRV_1BIT,
44, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(45, "NFD6", UNIPHIER_PIN_IECTRL_NONE,
- 45, UNIPHIER_PIN_DRV_4_8,
+ 45, UNIPHIER_PIN_DRV_1BIT,
45, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(46, "NFD7", UNIPHIER_PIN_IECTRL_NONE,
- 46, UNIPHIER_PIN_DRV_4_8,
+ 46, UNIPHIER_PIN_DRV_1BIT,
46, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(47, "SDCLK", UNIPHIER_PIN_IECTRL_NONE,
- 0, UNIPHIER_PIN_DRV_8_12_16_20,
+ 0, UNIPHIER_PIN_DRV_2BIT,
-1, UNIPHIER_PIN_PULL_UP_FIXED),
UNIPHIER_PINCTRL_PIN(48, "SDCMD", UNIPHIER_PIN_IECTRL_NONE,
- 4, UNIPHIER_PIN_DRV_8_12_16_20,
+ 1, UNIPHIER_PIN_DRV_2BIT,
-1, UNIPHIER_PIN_PULL_UP_FIXED),
UNIPHIER_PINCTRL_PIN(49, "SDDAT0", UNIPHIER_PIN_IECTRL_NONE,
- 8, UNIPHIER_PIN_DRV_8_12_16_20,
+ 2, UNIPHIER_PIN_DRV_2BIT,
-1, UNIPHIER_PIN_PULL_UP_FIXED),
UNIPHIER_PINCTRL_PIN(50, "SDDAT1", UNIPHIER_PIN_IECTRL_NONE,
- 12, UNIPHIER_PIN_DRV_8_12_16_20,
+ 3, UNIPHIER_PIN_DRV_2BIT,
-1, UNIPHIER_PIN_PULL_UP_FIXED),
UNIPHIER_PINCTRL_PIN(51, "SDDAT2", UNIPHIER_PIN_IECTRL_NONE,
- 16, UNIPHIER_PIN_DRV_8_12_16_20,
+ 4, UNIPHIER_PIN_DRV_2BIT,
-1, UNIPHIER_PIN_PULL_UP_FIXED),
UNIPHIER_PINCTRL_PIN(52, "SDDAT3", UNIPHIER_PIN_IECTRL_NONE,
- 20, UNIPHIER_PIN_DRV_8_12_16_20,
+ 5, UNIPHIER_PIN_DRV_2BIT,
-1, UNIPHIER_PIN_PULL_UP_FIXED),
UNIPHIER_PINCTRL_PIN(53, "SDCD", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
53, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(54, "SDWP", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
54, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(55, "SDVOLC", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
55, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(56, "USB0VBUS", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
56, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(57, "USB0OD", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
57, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(58, "USB1VBUS", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
58, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(59, "USB1OD", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
59, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(60, "USB2VBUS", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
60, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(61, "USB2OD", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
61, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(62, "USB3VBUS", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
62, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(63, "USB3OD", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
63, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(64, "CH0CLK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
64, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(65, "CH0PSYNC", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
65, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(66, "CH0VAL", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
66, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(67, "CH0DATA", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
67, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(68, "CH1CLK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
68, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(69, "CH1PSYNC", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
69, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(70, "CH1VAL", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
70, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(71, "CH1DATA", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
71, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(72, "XIRQ9", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
72, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(73, "XIRQ10", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
73, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(74, "XIRQ16", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
74, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(75, "CH4CLK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
75, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(76, "CH4PSYNC", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
76, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(77, "CH4VAL", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
77, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(78, "CH4DATA", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
78, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(79, "CH5CLK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
79, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(80, "CH5PSYNC", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
80, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(81, "CH5VAL", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
81, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(82, "CH5DATA", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
82, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(83, "CH6CLK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
83, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(84, "CH6PSYNC", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
84, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(85, "CH6VAL", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
85, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(86, "CH6DATA", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
86, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(87, "STS0CLKO", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
87, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(88, "STS0SYNCO", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
88, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(89, "STS0VALO", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
89, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(90, "STS0DATAO", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
90, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(91, "XIRQ17", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
91, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(92, "PORT163", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
92, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(93, "PORT165", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
93, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(94, "PORT166", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
94, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(95, "PORT132", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
95, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(96, "PORT133", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
96, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(97, "AO2IEC", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
97, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(98, "AI2ADCCK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
98, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(99, "AI2BCK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
99, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(100, "AI2LRCK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
100, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(101, "AI2D0", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
101, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(102, "AI2D1", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
102, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(103, "AI2D2", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
103, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(104, "AI2D3", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
104, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(105, "AO3DACCK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
105, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(106, "AO3BCK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
106, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(107, "AO3LRCK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
107, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(108, "AO3DMIX", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
108, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(109, "SDA0", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
109, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(110, "SCL0", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
110, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(111, "SDA1", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
111, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(112, "SCL1", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
112, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(113, "TXD2", 0,
- 113, UNIPHIER_PIN_DRV_4_8,
+ 113, UNIPHIER_PIN_DRV_1BIT,
113, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(114, "RXD2", 0,
- 114, UNIPHIER_PIN_DRV_4_8,
+ 114, UNIPHIER_PIN_DRV_1BIT,
114, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(115, "TXD1", 0,
- 115, UNIPHIER_PIN_DRV_4_8,
+ 115, UNIPHIER_PIN_DRV_1BIT,
115, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(116, "RXD1", 0,
- 116, UNIPHIER_PIN_DRV_4_8,
+ 116, UNIPHIER_PIN_DRV_1BIT,
116, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(117, "PORT190", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
117, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(118, "VI1HSYNC", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
118, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(119, "VI1VSYNC", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
119, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(120, "VI1DE", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
120, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(121, "XIRQ3", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
121, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(122, "XIRQ4", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
122, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(123, "VI1G2", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
123, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(124, "VI1G3", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
124, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(125, "VI1G4", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
125, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(126, "VI1G5", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
126, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(127, "VI1G6", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
127, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(128, "VI1G7", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
128, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(129, "VI1G8", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
129, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(130, "VI1G9", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
130, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(131, "VI1CLK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
131, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(132, "PORT05", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
132, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(133, "PORT06", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
133, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(134, "VI1R2", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
134, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(135, "VI1R3", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
135, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(136, "VI1R4", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
136, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(137, "VI1R5", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
137, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(138, "VI1R6", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
138, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(139, "VI1R7", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
139, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(140, "VI1R8", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
140, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(141, "VI1R9", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
141, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(142, "LPST", UNIPHIER_PIN_IECTRL_NONE,
- 142, UNIPHIER_PIN_DRV_4_8,
+ 142, UNIPHIER_PIN_DRV_1BIT,
142, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(143, "MDC", 0,
- 143, UNIPHIER_PIN_DRV_4_8,
+ 143, UNIPHIER_PIN_DRV_1BIT,
143, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(144, "MDIO", 0,
- 144, UNIPHIER_PIN_DRV_4_8,
+ 144, UNIPHIER_PIN_DRV_1BIT,
144, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(145, "MDIO_INTL", 0,
- 145, UNIPHIER_PIN_DRV_4_8,
+ 145, UNIPHIER_PIN_DRV_1BIT,
145, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(146, "PHYRSTL", 0,
- 146, UNIPHIER_PIN_DRV_4_8,
+ 146, UNIPHIER_PIN_DRV_1BIT,
146, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(147, "RGMII_RXCLK", 0,
- 147, UNIPHIER_PIN_DRV_4_8,
+ 147, UNIPHIER_PIN_DRV_1BIT,
147, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(148, "RGMII_RXD0", 0,
- 148, UNIPHIER_PIN_DRV_4_8,
+ 148, UNIPHIER_PIN_DRV_1BIT,
148, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(149, "RGMII_RXD1", 0,
- 149, UNIPHIER_PIN_DRV_4_8,
+ 149, UNIPHIER_PIN_DRV_1BIT,
149, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(150, "RGMII_RXD2", 0,
- 150, UNIPHIER_PIN_DRV_4_8,
+ 150, UNIPHIER_PIN_DRV_1BIT,
150, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(151, "RGMII_RXD3", 0,
- 151, UNIPHIER_PIN_DRV_4_8,
+ 151, UNIPHIER_PIN_DRV_1BIT,
151, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(152, "RGMII_RXCTL", 0,
- 152, UNIPHIER_PIN_DRV_4_8,
+ 152, UNIPHIER_PIN_DRV_1BIT,
152, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(153, "RGMII_TXCLK", 0,
- 153, UNIPHIER_PIN_DRV_4_8,
+ 153, UNIPHIER_PIN_DRV_1BIT,
153, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(154, "RGMII_TXD0", 0,
- 154, UNIPHIER_PIN_DRV_4_8,
+ 154, UNIPHIER_PIN_DRV_1BIT,
154, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(155, "RGMII_TXD1", 0,
- 155, UNIPHIER_PIN_DRV_4_8,
+ 155, UNIPHIER_PIN_DRV_1BIT,
155, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(156, "RGMII_TXD2", 0,
- 156, UNIPHIER_PIN_DRV_4_8,
+ 156, UNIPHIER_PIN_DRV_1BIT,
156, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(157, "RGMII_TXD3", 0,
- 157, UNIPHIER_PIN_DRV_4_8,
+ 157, UNIPHIER_PIN_DRV_1BIT,
157, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(158, "RGMII_TXCTL", 0,
- 158, UNIPHIER_PIN_DRV_4_8,
+ 158, UNIPHIER_PIN_DRV_1BIT,
158, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(159, "SDA3", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
159, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(160, "SCL3", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
160, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(161, "AI1ADCCK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
161, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(162, "AI1BCK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
162, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(163, "CH2CLK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
163, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(164, "CH2PSYNC", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
164, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(165, "CH2VAL", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
165, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(166, "CH2DATA", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
166, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(167, "CH3CLK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
167, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(168, "CH3PSYNC", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
168, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(169, "CH3VAL", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
169, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(170, "CH3DATA", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
170, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(171, "SDA2", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
171, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(172, "SCL2", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
172, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(173, "AI1LRCK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
173, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(174, "AI1D0", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
174, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(175, "AO2LRCK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
175, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(176, "AO2D0", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
176, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(177, "AO2DACCK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
177, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(178, "AO2BCK", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
178, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(179, "PORT222", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
179, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(180, "PORT223", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
180, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(181, "PORT224", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
181, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(182, "PORT225", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
182, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(183, "PORT226", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
183, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(184, "PORT227", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
184, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(185, "PORT230", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
185, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(186, "FANPWM", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
186, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(187, "HRDDCSDA0", 0,
- 187, UNIPHIER_PIN_DRV_4_8,
+ 187, UNIPHIER_PIN_DRV_1BIT,
187, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(188, "HRDDCSCL0", 0,
- 188, UNIPHIER_PIN_DRV_4_8,
+ 188, UNIPHIER_PIN_DRV_1BIT,
188, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(189, "HRDDCSDA1", 0,
- 189, UNIPHIER_PIN_DRV_4_8,
+ 189, UNIPHIER_PIN_DRV_1BIT,
189, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(190, "HRDDCSCL1", 0,
- 190, UNIPHIER_PIN_DRV_4_8,
+ 190, UNIPHIER_PIN_DRV_1BIT,
190, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(191, "HTDDCSDA0", 0,
- 191, UNIPHIER_PIN_DRV_4_8,
+ 191, UNIPHIER_PIN_DRV_1BIT,
191, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(192, "HTDDCSCL0", 0,
- 192, UNIPHIER_PIN_DRV_4_8,
+ 192, UNIPHIER_PIN_DRV_1BIT,
192, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(193, "HTDDCSDA1", 0,
- 193, UNIPHIER_PIN_DRV_4_8,
+ 193, UNIPHIER_PIN_DRV_1BIT,
193, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(194, "HTDDCSCL1", 0,
- 194, UNIPHIER_PIN_DRV_4_8,
+ 194, UNIPHIER_PIN_DRV_1BIT,
194, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(195, "PORT241", 0,
- 195, UNIPHIER_PIN_DRV_4_8,
+ 195, UNIPHIER_PIN_DRV_1BIT,
195, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(196, "PORT242", 0,
- 196, UNIPHIER_PIN_DRV_4_8,
+ 196, UNIPHIER_PIN_DRV_1BIT,
196, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(197, "PORT243", 0,
- 197, UNIPHIER_PIN_DRV_4_8,
+ 197, UNIPHIER_PIN_DRV_1BIT,
197, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(198, "MVSYNC", 0,
- 198, UNIPHIER_PIN_DRV_4_8,
+ 198, UNIPHIER_PIN_DRV_1BIT,
198, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(199, "SPISYNC0", UNIPHIER_PIN_IECTRL_NONE,
- 199, UNIPHIER_PIN_DRV_4_8,
+ 199, UNIPHIER_PIN_DRV_1BIT,
199, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(200, "SPISCLK0", UNIPHIER_PIN_IECTRL_NONE,
- 200, UNIPHIER_PIN_DRV_4_8,
+ 200, UNIPHIER_PIN_DRV_1BIT,
200, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(201, "SPITXD0", UNIPHIER_PIN_IECTRL_NONE,
- 201, UNIPHIER_PIN_DRV_4_8,
+ 201, UNIPHIER_PIN_DRV_1BIT,
201, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(202, "SPIRXD0", UNIPHIER_PIN_IECTRL_NONE,
- 202, UNIPHIER_PIN_DRV_4_8,
+ 202, UNIPHIER_PIN_DRV_1BIT,
202, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(203, "CK54EXI", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
203, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(204, "AEXCKA1", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
204, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(205, "AEXCKA2", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
205, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(206, "CK27EXI", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_8,
+ -1, UNIPHIER_PIN_DRV_FIXED8,
206, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(207, "STCDIN", 0,
- 207, UNIPHIER_PIN_DRV_4_8,
+ 207, UNIPHIER_PIN_DRV_1BIT,
207, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(208, "PHSYNI", 0,
- 208, UNIPHIER_PIN_DRV_4_8,
+ 208, UNIPHIER_PIN_DRV_1BIT,
208, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(209, "PVSYNI", 0,
- 209, UNIPHIER_PIN_DRV_4_8,
+ 209, UNIPHIER_PIN_DRV_1BIT,
209, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(210, "MVSYN", UNIPHIER_PIN_IECTRL_NONE,
- 210, UNIPHIER_PIN_DRV_4_8,
+ 210, UNIPHIER_PIN_DRV_1BIT,
210, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(211, "STCV", UNIPHIER_PIN_IECTRL_NONE,
- 211, UNIPHIER_PIN_DRV_4_8,
+ 211, UNIPHIER_PIN_DRV_1BIT,
211, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(212, "PORT262", UNIPHIER_PIN_IECTRL_NONE,
- 212, UNIPHIER_PIN_DRV_4_8,
+ 212, UNIPHIER_PIN_DRV_1BIT,
212, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(213, "USB0VBUS_IRQ", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
213, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(214, "USB1VBUS_IRQ", UNIPHIER_PIN_IECTRL_NONE,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
214, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(215, "PORT265", UNIPHIER_PIN_IECTRL_NONE,
- 215, UNIPHIER_PIN_DRV_4_8,
+ 215, UNIPHIER_PIN_DRV_1BIT,
215, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(216, "CK25O", 0,
- 216, UNIPHIER_PIN_DRV_4_8,
+ 216, UNIPHIER_PIN_DRV_1BIT,
216, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(217, "TXD0", 0,
- 217, UNIPHIER_PIN_DRV_4_8,
+ 217, UNIPHIER_PIN_DRV_1BIT,
217, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(218, "RXD0", 0,
- 218, UNIPHIER_PIN_DRV_4_8,
+ 218, UNIPHIER_PIN_DRV_1BIT,
218, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(219, "TXD3", 0,
- 219, UNIPHIER_PIN_DRV_4_8,
+ 219, UNIPHIER_PIN_DRV_1BIT,
219, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(220, "RXD3", 0,
- 220, UNIPHIER_PIN_DRV_4_8,
+ 220, UNIPHIER_PIN_DRV_1BIT,
220, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(221, "PORT273", 0,
- 221, UNIPHIER_PIN_DRV_4_8,
+ 221, UNIPHIER_PIN_DRV_1BIT,
221, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(222, "STCDOUTC", 0,
- 222, UNIPHIER_PIN_DRV_4_8,
+ 222, UNIPHIER_PIN_DRV_1BIT,
222, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(223, "PORT274", 0,
- 223, UNIPHIER_PIN_DRV_4_8,
+ 223, UNIPHIER_PIN_DRV_1BIT,
223, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(224, "PORT275", 0,
- 224, UNIPHIER_PIN_DRV_4_8,
+ 224, UNIPHIER_PIN_DRV_1BIT,
224, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(225, "PORT276", 0,
- 225, UNIPHIER_PIN_DRV_4_8,
+ 225, UNIPHIER_PIN_DRV_1BIT,
225, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(226, "PORT277", 0,
- 226, UNIPHIER_PIN_DRV_4_8,
+ 226, UNIPHIER_PIN_DRV_1BIT,
226, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(227, "PORT280", 0,
- 227, UNIPHIER_PIN_DRV_4_8,
+ 227, UNIPHIER_PIN_DRV_1BIT,
227, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(228, "PORT281", 0,
- 228, UNIPHIER_PIN_DRV_4_8,
+ 228, UNIPHIER_PIN_DRV_1BIT,
228, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(229, "PORT282", 0,
- 229, UNIPHIER_PIN_DRV_4_8,
+ 229, UNIPHIER_PIN_DRV_1BIT,
229, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(230, "PORT283", 0,
- 230, UNIPHIER_PIN_DRV_4_8,
+ 230, UNIPHIER_PIN_DRV_1BIT,
230, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(231, "PORT284", 0,
- 231, UNIPHIER_PIN_DRV_4_8,
+ 231, UNIPHIER_PIN_DRV_1BIT,
231, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(232, "PORT285", 0,
- 232, UNIPHIER_PIN_DRV_4_8,
+ 232, UNIPHIER_PIN_DRV_1BIT,
232, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(233, "T0HPD", 0,
- 233, UNIPHIER_PIN_DRV_4_8,
+ 233, UNIPHIER_PIN_DRV_1BIT,
233, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(234, "T1HPD", 0,
- 234, UNIPHIER_PIN_DRV_4_8,
+ 234, UNIPHIER_PIN_DRV_1BIT,
234, UNIPHIER_PIN_PULL_DOWN),
};
static const unsigned emmc_pins[] = {36, 37, 38, 39, 40, 41, 42};
-static const unsigned emmc_muxvals[] = {9, 9, 9, 9, 9, 9, 9};
+static const int emmc_muxvals[] = {9, 9, 9, 9, 9, 9, 9};
static const unsigned emmc_dat8_pins[] = {43, 44, 45, 46};
-static const unsigned emmc_dat8_muxvals[] = {9, 9, 9, 9};
+static const int emmc_dat8_muxvals[] = {9, 9, 9, 9};
+static const unsigned ether_mii_pins[] = {143, 144, 145, 146, 147, 148, 149,
+ 150, 151, 152, 153, 154, 155, 156,
+ 158, 159, 199, 200, 201, 202};
+static const int ether_mii_muxvals[] = {8, 8, 8, 8, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 12, 12, 12, 12};
+static const unsigned ether_rgmii_pins[] = {143, 144, 145, 146, 147, 148, 149,
+ 150, 151, 152, 153, 154, 155, 156,
+ 157, 158};
+static const int ether_rgmii_muxvals[] = {8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8};
+static const unsigned ether_rmii_pins[] = {143, 144, 145, 146, 147, 148, 149,
+ 150, 152, 154, 155, 158};
+static const int ether_rmii_muxvals[] = {8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9};
static const unsigned i2c0_pins[] = {109, 110};
-static const unsigned i2c0_muxvals[] = {8, 8};
+static const int i2c0_muxvals[] = {8, 8};
static const unsigned i2c1_pins[] = {111, 112};
-static const unsigned i2c1_muxvals[] = {8, 8};
+static const int i2c1_muxvals[] = {8, 8};
static const unsigned i2c2_pins[] = {171, 172};
-static const unsigned i2c2_muxvals[] = {8, 8};
+static const int i2c2_muxvals[] = {8, 8};
static const unsigned i2c3_pins[] = {159, 160};
-static const unsigned i2c3_muxvals[] = {8, 8};
+static const int i2c3_muxvals[] = {8, 8};
static const unsigned i2c5_pins[] = {183, 184};
-static const unsigned i2c5_muxvals[] = {11, 11};
+static const int i2c5_muxvals[] = {11, 11};
static const unsigned i2c6_pins[] = {185, 186};
-static const unsigned i2c6_muxvals[] = {11, 11};
+static const int i2c6_muxvals[] = {11, 11};
static const unsigned nand_pins[] = {30, 31, 32, 33, 34, 35, 36, 39, 40, 41,
42, 43, 44, 45, 46};
-static const unsigned nand_muxvals[] = {8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
- 8, 8};
+static const int nand_muxvals[] = {8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8};
static const unsigned nand_cs1_pins[] = {37, 38};
-static const unsigned nand_cs1_muxvals[] = {8, 8};
+static const int nand_cs1_muxvals[] = {8, 8};
static const unsigned sd_pins[] = {47, 48, 49, 50, 51, 52, 53, 54, 55};
-static const unsigned sd_muxvals[] = {8, 8, 8, 8, 8, 8, 8, 8, 8};
+static const int sd_muxvals[] = {8, 8, 8, 8, 8, 8, 8, 8, 8};
+static const unsigned system_bus_pins[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13};
+static const int system_bus_muxvals[] = {8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ 8};
+static const unsigned system_bus_cs1_pins[] = {14};
+static const int system_bus_cs1_muxvals[] = {8};
static const unsigned uart0_pins[] = {217, 218};
-static const unsigned uart0_muxvals[] = {8, 8};
+static const int uart0_muxvals[] = {8, 8};
static const unsigned uart0b_pins[] = {179, 180};
-static const unsigned uart0b_muxvals[] = {10, 10};
+static const int uart0b_muxvals[] = {10, 10};
static const unsigned uart1_pins[] = {115, 116};
-static const unsigned uart1_muxvals[] = {8, 8};
+static const int uart1_muxvals[] = {8, 8};
static const unsigned uart2_pins[] = {113, 114};
-static const unsigned uart2_muxvals[] = {8, 8};
+static const int uart2_muxvals[] = {8, 8};
static const unsigned uart3_pins[] = {219, 220};
-static const unsigned uart3_muxvals[] = {8, 8};
+static const int uart3_muxvals[] = {8, 8};
static const unsigned uart3b_pins[] = {181, 182};
-static const unsigned uart3b_muxvals[] = {10, 10};
+static const int uart3b_muxvals[] = {10, 10};
static const unsigned usb0_pins[] = {56, 57};
-static const unsigned usb0_muxvals[] = {8, 8};
+static const int usb0_muxvals[] = {8, 8};
static const unsigned usb1_pins[] = {58, 59};
-static const unsigned usb1_muxvals[] = {8, 8};
+static const int usb1_muxvals[] = {8, 8};
static const unsigned usb2_pins[] = {60, 61};
-static const unsigned usb2_muxvals[] = {8, 8};
+static const int usb2_muxvals[] = {8, 8};
static const unsigned usb3_pins[] = {62, 63};
-static const unsigned usb3_muxvals[] = {8, 8};
+static const int usb3_muxvals[] = {8, 8};
static const unsigned port_range0_pins[] = {
127, 128, 129, 130, 131, 132, 133, 134, /* PORT0x */
135, 136, 137, 138, 139, 140, 141, 142, /* PORT1x */
@@ -786,7 +802,7 @@ static const unsigned port_range0_pins[] = {
61, 62, 63, 64, 65, 66, 67, 68, /* PORT9x */
69, 70, 71, 76, 77, 78, 79, 80, /* PORT10x */
};
-static const unsigned port_range0_muxvals[] = {
+static const int port_range0_muxvals[] = {
15, 15, 15, 15, 15, 15, 15, 15, /* PORT0x */
15, 15, 15, 15, 15, 15, 15, 15, /* PORT1x */
15, 15, 15, 15, 15, 15, 15, 15, /* PORT2x */
@@ -818,7 +834,7 @@ static const unsigned port_range1_pins[] = {
218, 219, 220, 221, 223, 224, 225, 226, /* PORT27x */
227, 228, 229, 230, 231, 232, 233, 234, /* PORT28x */
};
-static const unsigned port_range1_muxvals[] = {
+static const int port_range1_muxvals[] = {
15, 15, 15, 15, 15, 15, 15, 15, /* PORT12x */
15, 15, 15, 15, 15, 15, 15, 15, /* PORT13x */
15, 15, 15, 15, 15, 15, 15, 15, /* PORT14x */
@@ -842,15 +858,18 @@ static const unsigned xirq_pins[] = {
126, 72, 73, 92, 177, 93, 94, 176, /* XIRQ8-15 */
74, 91, 27, 28, 29, 75, 20, 26, /* XIRQ16-23 */
};
-static const unsigned xirq_muxvals[] = {
+static const int xirq_muxvals[] = {
14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ0-7 */
14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ8-15 */
14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ16-23 */
};
-static const struct uniphier_pinctrl_group proxstream2_groups[] = {
+static const struct uniphier_pinctrl_group uniphier_pxs2_groups[] = {
UNIPHIER_PINCTRL_GROUP(emmc),
UNIPHIER_PINCTRL_GROUP(emmc_dat8),
+ UNIPHIER_PINCTRL_GROUP(ether_mii),
+ UNIPHIER_PINCTRL_GROUP(ether_rgmii),
+ UNIPHIER_PINCTRL_GROUP(ether_rmii),
UNIPHIER_PINCTRL_GROUP(i2c0),
UNIPHIER_PINCTRL_GROUP(i2c1),
UNIPHIER_PINCTRL_GROUP(i2c2),
@@ -860,6 +879,8 @@ static const struct uniphier_pinctrl_group proxstream2_groups[] = {
UNIPHIER_PINCTRL_GROUP(nand),
UNIPHIER_PINCTRL_GROUP(nand_cs1),
UNIPHIER_PINCTRL_GROUP(sd),
+ UNIPHIER_PINCTRL_GROUP(system_bus),
+ UNIPHIER_PINCTRL_GROUP(system_bus_cs1),
UNIPHIER_PINCTRL_GROUP(uart0),
UNIPHIER_PINCTRL_GROUP(uart0b),
UNIPHIER_PINCTRL_GROUP(uart1),
@@ -1124,6 +1145,9 @@ static const struct uniphier_pinctrl_group proxstream2_groups[] = {
};
static const char * const emmc_groups[] = {"emmc", "emmc_dat8"};
+static const char * const ether_mii_groups[] = {"ether_mii"};
+static const char * const ether_rgmii_groups[] = {"ether_rgmii"};
+static const char * const ether_rmii_groups[] = {"ether_rmii"};
static const char * const i2c0_groups[] = {"i2c0"};
static const char * const i2c1_groups[] = {"i2c1"};
static const char * const i2c2_groups[] = {"i2c2"};
@@ -1132,6 +1156,8 @@ static const char * const i2c5_groups[] = {"i2c5"};
static const char * const i2c6_groups[] = {"i2c6"};
static const char * const nand_groups[] = {"nand", "nand_cs1"};
static const char * const sd_groups[] = {"sd"};
+static const char * const system_bus_groups[] = {"system_bus",
+ "system_bus_cs1"};
static const char * const uart0_groups[] = {"uart0", "uart0b"};
static const char * const uart1_groups[] = {"uart1"};
static const char * const uart2_groups[] = {"uart2"};
@@ -1208,8 +1234,11 @@ static const char * const xirq_groups[] = {
"xirq20", "xirq21", "xirq22", "xirq23",
};
-static const struct uniphier_pinmux_function proxstream2_functions[] = {
+static const struct uniphier_pinmux_function uniphier_pxs2_functions[] = {
UNIPHIER_PINMUX_FUNCTION(emmc),
+ UNIPHIER_PINMUX_FUNCTION(ether_mii),
+ UNIPHIER_PINMUX_FUNCTION(ether_rgmii),
+ UNIPHIER_PINMUX_FUNCTION(ether_rmii),
UNIPHIER_PINMUX_FUNCTION(i2c0),
UNIPHIER_PINMUX_FUNCTION(i2c1),
UNIPHIER_PINMUX_FUNCTION(i2c2),
@@ -1218,6 +1247,7 @@ static const struct uniphier_pinmux_function proxstream2_functions[] = {
UNIPHIER_PINMUX_FUNCTION(i2c6),
UNIPHIER_PINMUX_FUNCTION(nand),
UNIPHIER_PINMUX_FUNCTION(sd),
+ UNIPHIER_PINMUX_FUNCTION(system_bus),
UNIPHIER_PINMUX_FUNCTION(uart0),
UNIPHIER_PINMUX_FUNCTION(uart1),
UNIPHIER_PINMUX_FUNCTION(uart2),
@@ -1230,43 +1260,36 @@ static const struct uniphier_pinmux_function proxstream2_functions[] = {
UNIPHIER_PINMUX_FUNCTION(xirq),
};
-static struct uniphier_pinctrl_socdata proxstream2_pindata = {
- .groups = proxstream2_groups,
- .groups_count = ARRAY_SIZE(proxstream2_groups),
- .functions = proxstream2_functions,
- .functions_count = ARRAY_SIZE(proxstream2_functions),
- .mux_bits = 8,
- .reg_stride = 4,
- .load_pinctrl = false,
-};
-
-static struct pinctrl_desc proxstream2_pinctrl_desc = {
- .name = DRIVER_NAME,
- .pins = proxstream2_pins,
- .npins = ARRAY_SIZE(proxstream2_pins),
- .owner = THIS_MODULE,
+static struct uniphier_pinctrl_socdata uniphier_pxs2_pindata = {
+ .pins = uniphier_pxs2_pins,
+ .npins = ARRAY_SIZE(uniphier_pxs2_pins),
+ .groups = uniphier_pxs2_groups,
+ .groups_count = ARRAY_SIZE(uniphier_pxs2_groups),
+ .functions = uniphier_pxs2_functions,
+ .functions_count = ARRAY_SIZE(uniphier_pxs2_functions),
+ .caps = 0,
};
-static int proxstream2_pinctrl_probe(struct platform_device *pdev)
+static int uniphier_pxs2_pinctrl_probe(struct platform_device *pdev)
{
- return uniphier_pinctrl_probe(pdev, &proxstream2_pinctrl_desc,
- &proxstream2_pindata);
+ return uniphier_pinctrl_probe(pdev, &uniphier_pxs2_pindata);
}
-static const struct of_device_id proxstream2_pinctrl_match[] = {
+static const struct of_device_id uniphier_pxs2_pinctrl_match[] = {
+ { .compatible = "socionext,uniphier-pxs2-pinctrl" },
{ .compatible = "socionext,proxstream2-pinctrl" },
{ /* sentinel */ }
};
-MODULE_DEVICE_TABLE(of, proxstream2_pinctrl_match);
+MODULE_DEVICE_TABLE(of, uniphier_pxs2_pinctrl_match);
-static struct platform_driver proxstream2_pinctrl_driver = {
- .probe = proxstream2_pinctrl_probe,
+static struct platform_driver uniphier_pxs2_pinctrl_driver = {
+ .probe = uniphier_pxs2_pinctrl_probe,
.driver = {
- .name = DRIVER_NAME,
- .of_match_table = proxstream2_pinctrl_match,
+ .name = "uniphier-pxs2-pinctrl",
+ .of_match_table = uniphier_pxs2_pinctrl_match,
},
};
-module_platform_driver(proxstream2_pinctrl_driver);
+module_platform_driver(uniphier_pxs2_pinctrl_driver);
MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
MODULE_DESCRIPTION("UniPhier ProXstream2 pinctrl driver");
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-sld8.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-sld8.c
index ceb7a9899..da689d880 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-sld8.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-sld8.c
@@ -19,453 +19,518 @@
#include "pinctrl-uniphier.h"
-#define DRIVER_NAME "ph1-sld8-pinctrl"
-
-static const struct pinctrl_pin_desc ph1_sld8_pins[] = {
+static const struct pinctrl_pin_desc uniphier_sld8_pins[] = {
UNIPHIER_PINCTRL_PIN(0, "PCA00", 0,
- 15, UNIPHIER_PIN_DRV_4_8,
+ 15, UNIPHIER_PIN_DRV_1BIT,
15, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(1, "PCA01", 0,
- 16, UNIPHIER_PIN_DRV_4_8,
+ 16, UNIPHIER_PIN_DRV_1BIT,
16, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(2, "PCA02", 0,
- 17, UNIPHIER_PIN_DRV_4_8,
+ 17, UNIPHIER_PIN_DRV_1BIT,
17, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(3, "PCA03", 0,
- 18, UNIPHIER_PIN_DRV_4_8,
+ 18, UNIPHIER_PIN_DRV_1BIT,
18, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(4, "PCA04", 0,
- 19, UNIPHIER_PIN_DRV_4_8,
+ 19, UNIPHIER_PIN_DRV_1BIT,
19, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(5, "PCA05", 0,
- 20, UNIPHIER_PIN_DRV_4_8,
+ 20, UNIPHIER_PIN_DRV_1BIT,
20, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(6, "PCA06", 0,
- 21, UNIPHIER_PIN_DRV_4_8,
+ 21, UNIPHIER_PIN_DRV_1BIT,
21, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(7, "PCA07", 0,
- 22, UNIPHIER_PIN_DRV_4_8,
+ 22, UNIPHIER_PIN_DRV_1BIT,
22, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(8, "PCA08", 0,
- 23, UNIPHIER_PIN_DRV_4_8,
+ 23, UNIPHIER_PIN_DRV_1BIT,
23, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(9, "PCA09", 0,
- 24, UNIPHIER_PIN_DRV_4_8,
+ 24, UNIPHIER_PIN_DRV_1BIT,
24, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(10, "PCA10", 0,
- 25, UNIPHIER_PIN_DRV_4_8,
+ 25, UNIPHIER_PIN_DRV_1BIT,
25, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(11, "PCA11", 0,
- 26, UNIPHIER_PIN_DRV_4_8,
+ 26, UNIPHIER_PIN_DRV_1BIT,
26, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(12, "PCA12", 0,
- 27, UNIPHIER_PIN_DRV_4_8,
+ 27, UNIPHIER_PIN_DRV_1BIT,
27, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(13, "PCA13", 0,
- 28, UNIPHIER_PIN_DRV_4_8,
+ 28, UNIPHIER_PIN_DRV_1BIT,
28, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(14, "PCA14", 0,
- 29, UNIPHIER_PIN_DRV_4_8,
+ 29, UNIPHIER_PIN_DRV_1BIT,
29, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(15, "XNFRE_GB", UNIPHIER_PIN_IECTRL_NONE,
- 30, UNIPHIER_PIN_DRV_4_8,
+ 30, UNIPHIER_PIN_DRV_1BIT,
30, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(16, "XNFWE_GB", UNIPHIER_PIN_IECTRL_NONE,
- 31, UNIPHIER_PIN_DRV_4_8,
+ 31, UNIPHIER_PIN_DRV_1BIT,
31, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(17, "NFALE_GB", UNIPHIER_PIN_IECTRL_NONE,
- 32, UNIPHIER_PIN_DRV_4_8,
+ 32, UNIPHIER_PIN_DRV_1BIT,
32, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(18, "NFCLE_GB", UNIPHIER_PIN_IECTRL_NONE,
- 33, UNIPHIER_PIN_DRV_4_8,
+ 33, UNIPHIER_PIN_DRV_1BIT,
33, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(19, "XNFWP_GB", UNIPHIER_PIN_IECTRL_NONE,
- 34, UNIPHIER_PIN_DRV_4_8,
+ 34, UNIPHIER_PIN_DRV_1BIT,
34, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(20, "XNFCE0_GB", UNIPHIER_PIN_IECTRL_NONE,
- 35, UNIPHIER_PIN_DRV_4_8,
+ 35, UNIPHIER_PIN_DRV_1BIT,
35, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(21, "NANDRYBY0_GB", UNIPHIER_PIN_IECTRL_NONE,
- 36, UNIPHIER_PIN_DRV_4_8,
+ 36, UNIPHIER_PIN_DRV_1BIT,
36, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(22, "XNFCE1_GB", UNIPHIER_PIN_IECTRL_NONE,
- 0, UNIPHIER_PIN_DRV_8_12_16_20,
+ 0, UNIPHIER_PIN_DRV_2BIT,
119, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(23, "NANDRYBY1_GB", UNIPHIER_PIN_IECTRL_NONE,
- 4, UNIPHIER_PIN_DRV_8_12_16_20,
+ 1, UNIPHIER_PIN_DRV_2BIT,
120, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(24, "NFD0_GB", UNIPHIER_PIN_IECTRL_NONE,
- 8, UNIPHIER_PIN_DRV_8_12_16_20,
+ 2, UNIPHIER_PIN_DRV_2BIT,
121, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(25, "NFD1_GB", UNIPHIER_PIN_IECTRL_NONE,
- 12, UNIPHIER_PIN_DRV_8_12_16_20,
+ 3, UNIPHIER_PIN_DRV_2BIT,
122, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(26, "NFD2_GB", UNIPHIER_PIN_IECTRL_NONE,
- 16, UNIPHIER_PIN_DRV_8_12_16_20,
+ 4, UNIPHIER_PIN_DRV_2BIT,
123, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(27, "NFD3_GB", UNIPHIER_PIN_IECTRL_NONE,
- 20, UNIPHIER_PIN_DRV_8_12_16_20,
+ 5, UNIPHIER_PIN_DRV_2BIT,
124, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(28, "NFD4_GB", UNIPHIER_PIN_IECTRL_NONE,
- 24, UNIPHIER_PIN_DRV_8_12_16_20,
+ 6, UNIPHIER_PIN_DRV_2BIT,
125, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(29, "NFD5_GB", UNIPHIER_PIN_IECTRL_NONE,
- 28, UNIPHIER_PIN_DRV_8_12_16_20,
+ 7, UNIPHIER_PIN_DRV_2BIT,
126, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(30, "NFD6_GB", UNIPHIER_PIN_IECTRL_NONE,
- 32, UNIPHIER_PIN_DRV_8_12_16_20,
+ 8, UNIPHIER_PIN_DRV_2BIT,
127, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(31, "NFD7_GB", UNIPHIER_PIN_IECTRL_NONE,
- 36, UNIPHIER_PIN_DRV_8_12_16_20,
+ 9, UNIPHIER_PIN_DRV_2BIT,
128, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(32, "SDCLK", 8,
- 40, UNIPHIER_PIN_DRV_8_12_16_20,
+ 10, UNIPHIER_PIN_DRV_2BIT,
-1, UNIPHIER_PIN_PULL_NONE),
UNIPHIER_PINCTRL_PIN(33, "SDCMD", 8,
- 44, UNIPHIER_PIN_DRV_8_12_16_20,
+ 11, UNIPHIER_PIN_DRV_2BIT,
-1, UNIPHIER_PIN_PULL_NONE),
UNIPHIER_PINCTRL_PIN(34, "SDDAT0", 8,
- 48, UNIPHIER_PIN_DRV_8_12_16_20,
+ 12, UNIPHIER_PIN_DRV_2BIT,
-1, UNIPHIER_PIN_PULL_NONE),
UNIPHIER_PINCTRL_PIN(35, "SDDAT1", 8,
- 52, UNIPHIER_PIN_DRV_8_12_16_20,
+ 13, UNIPHIER_PIN_DRV_2BIT,
-1, UNIPHIER_PIN_PULL_NONE),
UNIPHIER_PINCTRL_PIN(36, "SDDAT2", 8,
- 56, UNIPHIER_PIN_DRV_8_12_16_20,
+ 14, UNIPHIER_PIN_DRV_2BIT,
-1, UNIPHIER_PIN_PULL_NONE),
UNIPHIER_PINCTRL_PIN(37, "SDDAT3", 8,
- 60, UNIPHIER_PIN_DRV_8_12_16_20,
+ 15, UNIPHIER_PIN_DRV_2BIT,
-1, UNIPHIER_PIN_PULL_NONE),
UNIPHIER_PINCTRL_PIN(38, "SDCD", 8,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
129, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(39, "SDWP", 8,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
130, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(40, "SDVOLC", 9,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
131, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(41, "USB0VBUS", 0,
- 37, UNIPHIER_PIN_DRV_4_8,
+ 37, UNIPHIER_PIN_DRV_1BIT,
37, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(42, "USB0OD", 0,
- 38, UNIPHIER_PIN_DRV_4_8,
+ 38, UNIPHIER_PIN_DRV_1BIT,
38, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(43, "USB1VBUS", 0,
- 39, UNIPHIER_PIN_DRV_4_8,
+ 39, UNIPHIER_PIN_DRV_1BIT,
39, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(44, "USB1OD", 0,
- 40, UNIPHIER_PIN_DRV_4_8,
+ 40, UNIPHIER_PIN_DRV_1BIT,
40, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(45, "PCRESET", 0,
- 41, UNIPHIER_PIN_DRV_4_8,
+ 41, UNIPHIER_PIN_DRV_1BIT,
41, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(46, "PCREG", 0,
- 42, UNIPHIER_PIN_DRV_4_8,
+ 42, UNIPHIER_PIN_DRV_1BIT,
42, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(47, "PCCE2", 0,
- 43, UNIPHIER_PIN_DRV_4_8,
+ 43, UNIPHIER_PIN_DRV_1BIT,
43, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(48, "PCVS1", 0,
- 44, UNIPHIER_PIN_DRV_4_8,
+ 44, UNIPHIER_PIN_DRV_1BIT,
44, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(49, "PCCD2", 0,
- 45, UNIPHIER_PIN_DRV_4_8,
+ 45, UNIPHIER_PIN_DRV_1BIT,
45, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(50, "PCCD1", 0,
- 46, UNIPHIER_PIN_DRV_4_8,
+ 46, UNIPHIER_PIN_DRV_1BIT,
46, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(51, "PCREADY", 0,
- 47, UNIPHIER_PIN_DRV_4_8,
+ 47, UNIPHIER_PIN_DRV_1BIT,
47, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(52, "PCDOE", 0,
- 48, UNIPHIER_PIN_DRV_4_8,
+ 48, UNIPHIER_PIN_DRV_1BIT,
48, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(53, "PCCE1", 0,
- 49, UNIPHIER_PIN_DRV_4_8,
+ 49, UNIPHIER_PIN_DRV_1BIT,
49, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(54, "PCWE", 0,
- 50, UNIPHIER_PIN_DRV_4_8,
+ 50, UNIPHIER_PIN_DRV_1BIT,
50, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(55, "PCOE", 0,
- 51, UNIPHIER_PIN_DRV_4_8,
+ 51, UNIPHIER_PIN_DRV_1BIT,
51, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(56, "PCWAIT", 0,
- 52, UNIPHIER_PIN_DRV_4_8,
+ 52, UNIPHIER_PIN_DRV_1BIT,
52, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(57, "PCIOWR", 0,
- 53, UNIPHIER_PIN_DRV_4_8,
+ 53, UNIPHIER_PIN_DRV_1BIT,
53, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(58, "PCIORD", 0,
- 54, UNIPHIER_PIN_DRV_4_8,
+ 54, UNIPHIER_PIN_DRV_1BIT,
54, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(59, "HS0DIN0", 0,
- 55, UNIPHIER_PIN_DRV_4_8,
+ 55, UNIPHIER_PIN_DRV_1BIT,
55, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(60, "HS0DIN1", 0,
- 56, UNIPHIER_PIN_DRV_4_8,
+ 56, UNIPHIER_PIN_DRV_1BIT,
56, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(61, "HS0DIN2", 0,
- 57, UNIPHIER_PIN_DRV_4_8,
+ 57, UNIPHIER_PIN_DRV_1BIT,
57, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(62, "HS0DIN3", 0,
- 58, UNIPHIER_PIN_DRV_4_8,
+ 58, UNIPHIER_PIN_DRV_1BIT,
58, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(63, "HS0DIN4", 0,
- 59, UNIPHIER_PIN_DRV_4_8,
+ 59, UNIPHIER_PIN_DRV_1BIT,
59, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(64, "HS0DIN5", 0,
- 60, UNIPHIER_PIN_DRV_4_8,
+ 60, UNIPHIER_PIN_DRV_1BIT,
60, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(65, "HS0DIN6", 0,
- 61, UNIPHIER_PIN_DRV_4_8,
+ 61, UNIPHIER_PIN_DRV_1BIT,
61, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(66, "HS0DIN7", 0,
- 62, UNIPHIER_PIN_DRV_4_8,
+ 62, UNIPHIER_PIN_DRV_1BIT,
62, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(67, "HS0BCLKIN", 0,
- 63, UNIPHIER_PIN_DRV_4_8,
+ 63, UNIPHIER_PIN_DRV_1BIT,
63, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(68, "HS0VALIN", 0,
- 64, UNIPHIER_PIN_DRV_4_8,
+ 64, UNIPHIER_PIN_DRV_1BIT,
64, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(69, "HS0SYNCIN", 0,
- 65, UNIPHIER_PIN_DRV_4_8,
+ 65, UNIPHIER_PIN_DRV_1BIT,
65, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(70, "HSDOUT0", 0,
- 66, UNIPHIER_PIN_DRV_4_8,
+ 66, UNIPHIER_PIN_DRV_1BIT,
66, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(71, "HSDOUT1", 0,
- 67, UNIPHIER_PIN_DRV_4_8,
+ 67, UNIPHIER_PIN_DRV_1BIT,
67, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(72, "HSDOUT2", 0,
- 68, UNIPHIER_PIN_DRV_4_8,
+ 68, UNIPHIER_PIN_DRV_1BIT,
68, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(73, "HSDOUT3", 0,
- 69, UNIPHIER_PIN_DRV_4_8,
+ 69, UNIPHIER_PIN_DRV_1BIT,
69, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(74, "HSDOUT4", 0,
- 70, UNIPHIER_PIN_DRV_4_8,
+ 70, UNIPHIER_PIN_DRV_1BIT,
70, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(75, "HSDOUT5", 0,
- 71, UNIPHIER_PIN_DRV_4_8,
+ 71, UNIPHIER_PIN_DRV_1BIT,
71, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(76, "HSDOUT6", 0,
- 72, UNIPHIER_PIN_DRV_4_8,
+ 72, UNIPHIER_PIN_DRV_1BIT,
72, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(77, "HSDOUT7", 0,
- 73, UNIPHIER_PIN_DRV_4_8,
+ 73, UNIPHIER_PIN_DRV_1BIT,
73, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(78, "HSBCLKOUT", 0,
- 74, UNIPHIER_PIN_DRV_4_8,
+ 74, UNIPHIER_PIN_DRV_1BIT,
74, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(79, "HSVALOUT", 0,
- 75, UNIPHIER_PIN_DRV_4_8,
+ 75, UNIPHIER_PIN_DRV_1BIT,
75, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(80, "HSSYNCOUT", 0,
- 76, UNIPHIER_PIN_DRV_4_8,
+ 76, UNIPHIER_PIN_DRV_1BIT,
76, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(81, "HS1DIN0", 0,
- 77, UNIPHIER_PIN_DRV_4_8,
+ 77, UNIPHIER_PIN_DRV_1BIT,
77, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(82, "HS1DIN1", 0,
- 78, UNIPHIER_PIN_DRV_4_8,
+ 78, UNIPHIER_PIN_DRV_1BIT,
78, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(83, "HS1DIN2", 0,
- 79, UNIPHIER_PIN_DRV_4_8,
+ 79, UNIPHIER_PIN_DRV_1BIT,
79, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(84, "HS1DIN3", 0,
- 80, UNIPHIER_PIN_DRV_4_8,
+ 80, UNIPHIER_PIN_DRV_1BIT,
80, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(85, "HS1DIN4", 0,
- 81, UNIPHIER_PIN_DRV_4_8,
+ 81, UNIPHIER_PIN_DRV_1BIT,
81, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(86, "HS1DIN5", 0,
- 82, UNIPHIER_PIN_DRV_4_8,
+ 82, UNIPHIER_PIN_DRV_1BIT,
82, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(87, "HS1DIN6", 0,
- 83, UNIPHIER_PIN_DRV_4_8,
+ 83, UNIPHIER_PIN_DRV_1BIT,
83, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(88, "HS1DIN7", 0,
- 84, UNIPHIER_PIN_DRV_4_8,
+ 84, UNIPHIER_PIN_DRV_1BIT,
84, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(89, "HS1BCLKIN", 0,
- 85, UNIPHIER_PIN_DRV_4_8,
+ 85, UNIPHIER_PIN_DRV_1BIT,
85, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(90, "HS1VALIN", 0,
- 86, UNIPHIER_PIN_DRV_4_8,
+ 86, UNIPHIER_PIN_DRV_1BIT,
86, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(91, "HS1SYNCIN", 0,
- 87, UNIPHIER_PIN_DRV_4_8,
+ 87, UNIPHIER_PIN_DRV_1BIT,
87, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(92, "AGCI", 3,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
132, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(93, "AGCR", 4,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
133, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(94, "AGCBS", 5,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
134, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(95, "IECOUT", 0,
- 88, UNIPHIER_PIN_DRV_4_8,
+ 88, UNIPHIER_PIN_DRV_1BIT,
88, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(96, "ASMCK", 0,
- 89, UNIPHIER_PIN_DRV_4_8,
+ 89, UNIPHIER_PIN_DRV_1BIT,
89, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(97, "ABCKO", UNIPHIER_PIN_IECTRL_NONE,
- 90, UNIPHIER_PIN_DRV_4_8,
+ 90, UNIPHIER_PIN_DRV_1BIT,
90, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(98, "ALRCKO", UNIPHIER_PIN_IECTRL_NONE,
- 91, UNIPHIER_PIN_DRV_4_8,
+ 91, UNIPHIER_PIN_DRV_1BIT,
91, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(99, "ASDOUT0", UNIPHIER_PIN_IECTRL_NONE,
- 92, UNIPHIER_PIN_DRV_4_8,
+ 92, UNIPHIER_PIN_DRV_1BIT,
92, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(100, "ASDOUT1", UNIPHIER_PIN_IECTRL_NONE,
- 93, UNIPHIER_PIN_DRV_4_8,
+ 93, UNIPHIER_PIN_DRV_1BIT,
93, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(101, "ARCOUT", 0,
- 94, UNIPHIER_PIN_DRV_4_8,
+ 94, UNIPHIER_PIN_DRV_1BIT,
94, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(102, "SDA0", 10,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
-1, UNIPHIER_PIN_PULL_NONE),
UNIPHIER_PINCTRL_PIN(103, "SCL0", 10,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
-1, UNIPHIER_PIN_PULL_NONE),
UNIPHIER_PINCTRL_PIN(104, "SDA1", 11,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
-1, UNIPHIER_PIN_PULL_NONE),
UNIPHIER_PINCTRL_PIN(105, "SCL1", 11,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
-1, UNIPHIER_PIN_PULL_NONE),
UNIPHIER_PINCTRL_PIN(106, "DMDSDA0", 12,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
-1, UNIPHIER_PIN_PULL_NONE),
UNIPHIER_PINCTRL_PIN(107, "DMDSCL0", 12,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
-1, UNIPHIER_PIN_PULL_NONE),
UNIPHIER_PINCTRL_PIN(108, "DMDSDA1", 13,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
-1, UNIPHIER_PIN_PULL_NONE),
UNIPHIER_PINCTRL_PIN(109, "DMDSCL1", 13,
- -1, UNIPHIER_PIN_DRV_FIXED_4,
+ -1, UNIPHIER_PIN_DRV_FIXED4,
-1, UNIPHIER_PIN_PULL_NONE),
UNIPHIER_PINCTRL_PIN(110, "SBO0", UNIPHIER_PIN_IECTRL_NONE,
- 95, UNIPHIER_PIN_DRV_4_8,
+ 95, UNIPHIER_PIN_DRV_1BIT,
95, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(111, "SBI0", UNIPHIER_PIN_IECTRL_NONE,
- 96, UNIPHIER_PIN_DRV_4_8,
+ 96, UNIPHIER_PIN_DRV_1BIT,
96, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(112, "SBO1", 0,
- 97, UNIPHIER_PIN_DRV_4_8,
+ 97, UNIPHIER_PIN_DRV_1BIT,
97, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(113, "SBI1", 0,
- 98, UNIPHIER_PIN_DRV_4_8,
+ 98, UNIPHIER_PIN_DRV_1BIT,
98, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(114, "TXD1", 0,
- 99, UNIPHIER_PIN_DRV_4_8,
+ 99, UNIPHIER_PIN_DRV_1BIT,
99, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(115, "RXD1", 0,
- 100, UNIPHIER_PIN_DRV_4_8,
+ 100, UNIPHIER_PIN_DRV_1BIT,
100, UNIPHIER_PIN_PULL_UP),
UNIPHIER_PINCTRL_PIN(116, "HIN", 1,
- -1, UNIPHIER_PIN_DRV_FIXED_5,
+ -1, UNIPHIER_PIN_DRV_FIXED5,
-1, UNIPHIER_PIN_PULL_NONE),
UNIPHIER_PINCTRL_PIN(117, "VIN", 2,
- -1, UNIPHIER_PIN_DRV_FIXED_5,
+ -1, UNIPHIER_PIN_DRV_FIXED5,
-1, UNIPHIER_PIN_PULL_NONE),
UNIPHIER_PINCTRL_PIN(118, "TCON0", 0,
- 101, UNIPHIER_PIN_DRV_4_8,
+ 101, UNIPHIER_PIN_DRV_1BIT,
101, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(119, "TCON1", 0,
- 102, UNIPHIER_PIN_DRV_4_8,
+ 102, UNIPHIER_PIN_DRV_1BIT,
102, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(120, "TCON2", 0,
- 103, UNIPHIER_PIN_DRV_4_8,
+ 103, UNIPHIER_PIN_DRV_1BIT,
103, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(121, "TCON3", 0,
- 104, UNIPHIER_PIN_DRV_4_8,
+ 104, UNIPHIER_PIN_DRV_1BIT,
104, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(122, "TCON4", 0,
- 105, UNIPHIER_PIN_DRV_4_8,
+ 105, UNIPHIER_PIN_DRV_1BIT,
105, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(123, "TCON5", 0,
- 106, UNIPHIER_PIN_DRV_4_8,
+ 106, UNIPHIER_PIN_DRV_1BIT,
106, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(124, "TCON6", 0,
- 107, UNIPHIER_PIN_DRV_4_8,
+ 107, UNIPHIER_PIN_DRV_1BIT,
107, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(125, "TCON7", 0,
- 108, UNIPHIER_PIN_DRV_4_8,
+ 108, UNIPHIER_PIN_DRV_1BIT,
108, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(126, "TCON8", 0,
- 109, UNIPHIER_PIN_DRV_4_8,
+ 109, UNIPHIER_PIN_DRV_1BIT,
109, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(127, "PWMA", 0,
- 110, UNIPHIER_PIN_DRV_4_8,
+ 110, UNIPHIER_PIN_DRV_1BIT,
110, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(128, "XIRQ0", 0,
- 111, UNIPHIER_PIN_DRV_4_8,
+ 111, UNIPHIER_PIN_DRV_1BIT,
111, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(129, "XIRQ1", 0,
- 112, UNIPHIER_PIN_DRV_4_8,
+ 112, UNIPHIER_PIN_DRV_1BIT,
112, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(130, "XIRQ2", 0,
- 113, UNIPHIER_PIN_DRV_4_8,
+ 113, UNIPHIER_PIN_DRV_1BIT,
113, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(131, "XIRQ3", 0,
- 114, UNIPHIER_PIN_DRV_4_8,
+ 114, UNIPHIER_PIN_DRV_1BIT,
114, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(132, "XIRQ4", 0,
- 115, UNIPHIER_PIN_DRV_4_8,
+ 115, UNIPHIER_PIN_DRV_1BIT,
115, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(133, "XIRQ5", 0,
- 116, UNIPHIER_PIN_DRV_4_8,
+ 116, UNIPHIER_PIN_DRV_1BIT,
116, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(134, "XIRQ6", 0,
- 117, UNIPHIER_PIN_DRV_4_8,
+ 117, UNIPHIER_PIN_DRV_1BIT,
117, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(135, "XIRQ7", 0,
- 118, UNIPHIER_PIN_DRV_4_8,
+ 118, UNIPHIER_PIN_DRV_1BIT,
118, UNIPHIER_PIN_PULL_DOWN),
+ /* dedicated pins */
+ UNIPHIER_PINCTRL_PIN(136, "ED0", -1,
+ 0, UNIPHIER_PIN_DRV_1BIT,
+ 0, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(137, "ED1", -1,
+ 1, UNIPHIER_PIN_DRV_1BIT,
+ 1, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(138, "ED2", -1,
+ 2, UNIPHIER_PIN_DRV_1BIT,
+ 2, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(139, "ED3", -1,
+ 3, UNIPHIER_PIN_DRV_1BIT,
+ 3, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(140, "ED4", -1,
+ 4, UNIPHIER_PIN_DRV_1BIT,
+ 4, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(141, "ED5", -1,
+ 5, UNIPHIER_PIN_DRV_1BIT,
+ 5, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(142, "ED6", -1,
+ 6, UNIPHIER_PIN_DRV_1BIT,
+ 6, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(143, "ED7", -1,
+ 7, UNIPHIER_PIN_DRV_1BIT,
+ 7, UNIPHIER_PIN_PULL_DOWN),
+ UNIPHIER_PINCTRL_PIN(144, "XERWE0", -1,
+ 8, UNIPHIER_PIN_DRV_1BIT,
+ 8, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(145, "XERWE1", -1,
+ 9, UNIPHIER_PIN_DRV_1BIT,
+ 9, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(146, "ERXW", -1,
+ 10, UNIPHIER_PIN_DRV_1BIT,
+ 10, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(147, "ES0", -1,
+ 11, UNIPHIER_PIN_DRV_1BIT,
+ 11, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(148, "ES1", -1,
+ 12, UNIPHIER_PIN_DRV_1BIT,
+ 12, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(149, "ES2", -1,
+ 13, UNIPHIER_PIN_DRV_1BIT,
+ 13, UNIPHIER_PIN_PULL_UP),
+ UNIPHIER_PINCTRL_PIN(150, "XECS1", -1,
+ 14, UNIPHIER_PIN_DRV_1BIT,
+ 14, UNIPHIER_PIN_PULL_DOWN),
};
static const unsigned emmc_pins[] = {21, 22, 23, 24, 25, 26, 27};
-static const unsigned emmc_muxvals[] = {1, 1, 1, 1, 1, 1, 1};
+static const int emmc_muxvals[] = {1, 1, 1, 1, 1, 1, 1};
static const unsigned emmc_dat8_pins[] = {28, 29, 30, 31};
-static const unsigned emmc_dat8_muxvals[] = {1, 1, 1, 1};
+static const int emmc_dat8_muxvals[] = {1, 1, 1, 1};
+static const unsigned ether_mii_pins[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 13, 14,
+ 61, 63, 64, 65, 66, 67, 68};
+static const int ether_mii_muxvals[] = {13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 27, 27, 27, 27, 27, 27, 27};
+static const unsigned ether_rmii_pins[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 13,
+ 14};
+static const int ether_rmii_muxvals[] = {13, 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13};
static const unsigned i2c0_pins[] = {102, 103};
-static const unsigned i2c0_muxvals[] = {0, 0};
+static const int i2c0_muxvals[] = {0, 0};
static const unsigned i2c1_pins[] = {104, 105};
-static const unsigned i2c1_muxvals[] = {0, 0};
+static const int i2c1_muxvals[] = {0, 0};
static const unsigned i2c2_pins[] = {108, 109};
-static const unsigned i2c2_muxvals[] = {2, 2};
+static const int i2c2_muxvals[] = {2, 2};
static const unsigned i2c3_pins[] = {108, 109};
-static const unsigned i2c3_muxvals[] = {3, 3};
+static const int i2c3_muxvals[] = {3, 3};
static const unsigned nand_pins[] = {15, 16, 17, 18, 19, 20, 21, 24, 25, 26,
27, 28, 29, 30, 31};
-static const unsigned nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0};
+static const int nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
static const unsigned nand_cs1_pins[] = {22, 23};
-static const unsigned nand_cs1_muxvals[] = {0, 0};
+static const int nand_cs1_muxvals[] = {0, 0};
static const unsigned sd_pins[] = {32, 33, 34, 35, 36, 37, 38, 39, 40};
-static const unsigned sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
+static const int sd_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned system_bus_pins[] = {136, 137, 138, 139, 140, 141, 142,
+ 143, 144, 145, 146, 147, 148, 149};
+static const int system_bus_muxvals[] = {-1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1};
+static const unsigned system_bus_cs1_pins[] = {150};
+static const int system_bus_cs1_muxvals[] = {-1};
+static const unsigned system_bus_cs2_pins[] = {10};
+static const int system_bus_cs2_muxvals[] = {1};
+static const unsigned system_bus_cs3_pins[] = {11};
+static const int system_bus_cs3_muxvals[] = {1};
+static const unsigned system_bus_cs4_pins[] = {12};
+static const int system_bus_cs4_muxvals[] = {1};
+static const unsigned system_bus_cs5_pins[] = {13};
+static const int system_bus_cs5_muxvals[] = {1};
static const unsigned uart0_pins[] = {70, 71};
-static const unsigned uart0_muxvals[] = {3, 3};
+static const int uart0_muxvals[] = {3, 3};
static const unsigned uart1_pins[] = {114, 115};
-static const unsigned uart1_muxvals[] = {0, 0};
+static const int uart1_muxvals[] = {0, 0};
static const unsigned uart2_pins[] = {112, 113};
-static const unsigned uart2_muxvals[] = {1, 1};
+static const int uart2_muxvals[] = {1, 1};
static const unsigned uart3_pins[] = {110, 111};
-static const unsigned uart3_muxvals[] = {1, 1};
+static const int uart3_muxvals[] = {1, 1};
static const unsigned usb0_pins[] = {41, 42};
-static const unsigned usb0_muxvals[] = {0, 0};
+static const int usb0_muxvals[] = {0, 0};
static const unsigned usb1_pins[] = {43, 44};
-static const unsigned usb1_muxvals[] = {0, 0};
+static const int usb1_muxvals[] = {0, 0};
static const unsigned usb2_pins[] = {114, 115};
-static const unsigned usb2_muxvals[] = {1, 1};
+static const int usb2_muxvals[] = {1, 1};
static const unsigned port_range0_pins[] = {
0, 1, 2, 3, 4, 5, 6, 7, /* PORT0x */
8, 9, 10, 11, 12, 13, 14, 15, /* PORT1x */
@@ -481,7 +546,7 @@ static const unsigned port_range0_pins[] = {
48, 49, 46, 45, 123, 124, 125, 126, /* PORT11x */
47, 127, 20, 56, 22, /* PORT120-124 */
};
-static const unsigned port_range0_muxvals[] = {
+static const int port_range0_muxvals[] = {
15, 15, 15, 15, 15, 15, 15, 15, /* PORT0x */
15, 15, 15, 15, 15, 15, 15, 15, /* PORT1x */
15, 15, 15, 15, 15, 15, 15, 15, /* PORT2x */
@@ -499,39 +564,41 @@ static const unsigned port_range0_muxvals[] = {
static const unsigned port_range1_pins[] = {
116, 117, /* PORT130-131 */
};
-static const unsigned port_range1_muxvals[] = {
+static const int port_range1_muxvals[] = {
15, 15, /* PORT130-131 */
};
static const unsigned port_range2_pins[] = {
102, 103, 104, 105, 106, 107, 108, 109, /* PORT14x */
};
-static const unsigned port_range2_muxvals[] = {
+static const int port_range2_muxvals[] = {
15, 15, 15, 15, 15, 15, 15, 15, /* PORT14x */
};
static const unsigned port_range3_pins[] = {
23, /* PORT166 */
};
-static const unsigned port_range3_muxvals[] = {
+static const int port_range3_muxvals[] = {
15, /* PORT166 */
};
static const unsigned xirq_range0_pins[] = {
128, 129, 130, 131, 132, 133, 134, 135, /* XIRQ0-7 */
82, 87, 88, 50, 51, /* XIRQ8-12 */
};
-static const unsigned xirq_range0_muxvals[] = {
+static const int xirq_range0_muxvals[] = {
0, 0, 0, 0, 0, 0, 0, 0, /* XIRQ0-7 */
14, 14, 14, 14, 14, /* XIRQ8-12 */
};
static const unsigned xirq_range1_pins[] = {
52, 58, /* XIRQ14-15 */
};
-static const unsigned xirq_range1_muxvals[] = {
+static const int xirq_range1_muxvals[] = {
14, 14, /* XIRQ14-15 */
};
-static const struct uniphier_pinctrl_group ph1_sld8_groups[] = {
+static const struct uniphier_pinctrl_group uniphier_sld8_groups[] = {
UNIPHIER_PINCTRL_GROUP(emmc),
UNIPHIER_PINCTRL_GROUP(emmc_dat8),
+ UNIPHIER_PINCTRL_GROUP(ether_mii),
+ UNIPHIER_PINCTRL_GROUP(ether_rmii),
UNIPHIER_PINCTRL_GROUP(i2c0),
UNIPHIER_PINCTRL_GROUP(i2c1),
UNIPHIER_PINCTRL_GROUP(i2c2),
@@ -539,6 +606,12 @@ static const struct uniphier_pinctrl_group ph1_sld8_groups[] = {
UNIPHIER_PINCTRL_GROUP(nand),
UNIPHIER_PINCTRL_GROUP(nand_cs1),
UNIPHIER_PINCTRL_GROUP(sd),
+ UNIPHIER_PINCTRL_GROUP(system_bus),
+ UNIPHIER_PINCTRL_GROUP(system_bus_cs1),
+ UNIPHIER_PINCTRL_GROUP(system_bus_cs2),
+ UNIPHIER_PINCTRL_GROUP(system_bus_cs3),
+ UNIPHIER_PINCTRL_GROUP(system_bus_cs4),
+ UNIPHIER_PINCTRL_GROUP(system_bus_cs5),
UNIPHIER_PINCTRL_GROUP(uart0),
UNIPHIER_PINCTRL_GROUP(uart1),
UNIPHIER_PINCTRL_GROUP(uart2),
@@ -682,12 +755,20 @@ static const struct uniphier_pinctrl_group ph1_sld8_groups[] = {
};
static const char * const emmc_groups[] = {"emmc", "emmc_dat8"};
+static const char * const ether_mii_groups[] = {"ether_mii"};
+static const char * const ether_rmii_groups[] = {"ether_rmii"};
static const char * const i2c0_groups[] = {"i2c0"};
static const char * const i2c1_groups[] = {"i2c1"};
static const char * const i2c2_groups[] = {"i2c2"};
static const char * const i2c3_groups[] = {"i2c3"};
static const char * const nand_groups[] = {"nand", "nand_cs1"};
static const char * const sd_groups[] = {"sd"};
+static const char * const system_bus_groups[] = {"system_bus",
+ "system_bus_cs1",
+ "system_bus_cs2",
+ "system_bus_cs3",
+ "system_bus_cs4",
+ "system_bus_cs5"};
static const char * const uart0_groups[] = {"uart0"};
static const char * const uart1_groups[] = {"uart1"};
static const char * const uart2_groups[] = {"uart2"};
@@ -736,14 +817,17 @@ static const char * const xirq_groups[] = {
"xirq12", /* none*/ "xirq14", "xirq15",
};
-static const struct uniphier_pinmux_function ph1_sld8_functions[] = {
+static const struct uniphier_pinmux_function uniphier_sld8_functions[] = {
UNIPHIER_PINMUX_FUNCTION(emmc),
+ UNIPHIER_PINMUX_FUNCTION(ether_mii),
+ UNIPHIER_PINMUX_FUNCTION(ether_rmii),
UNIPHIER_PINMUX_FUNCTION(i2c0),
UNIPHIER_PINMUX_FUNCTION(i2c1),
UNIPHIER_PINMUX_FUNCTION(i2c2),
UNIPHIER_PINMUX_FUNCTION(i2c3),
UNIPHIER_PINMUX_FUNCTION(nand),
UNIPHIER_PINMUX_FUNCTION(sd),
+ UNIPHIER_PINMUX_FUNCTION(system_bus),
UNIPHIER_PINMUX_FUNCTION(uart0),
UNIPHIER_PINMUX_FUNCTION(uart1),
UNIPHIER_PINMUX_FUNCTION(uart2),
@@ -755,43 +839,36 @@ static const struct uniphier_pinmux_function ph1_sld8_functions[] = {
UNIPHIER_PINMUX_FUNCTION(xirq),
};
-static struct uniphier_pinctrl_socdata ph1_sld8_pindata = {
- .groups = ph1_sld8_groups,
- .groups_count = ARRAY_SIZE(ph1_sld8_groups),
- .functions = ph1_sld8_functions,
- .functions_count = ARRAY_SIZE(ph1_sld8_functions),
- .mux_bits = 8,
- .reg_stride = 4,
- .load_pinctrl = false,
-};
-
-static struct pinctrl_desc ph1_sld8_pinctrl_desc = {
- .name = DRIVER_NAME,
- .pins = ph1_sld8_pins,
- .npins = ARRAY_SIZE(ph1_sld8_pins),
- .owner = THIS_MODULE,
+static struct uniphier_pinctrl_socdata uniphier_sld8_pindata = {
+ .pins = uniphier_sld8_pins,
+ .npins = ARRAY_SIZE(uniphier_sld8_pins),
+ .groups = uniphier_sld8_groups,
+ .groups_count = ARRAY_SIZE(uniphier_sld8_groups),
+ .functions = uniphier_sld8_functions,
+ .functions_count = ARRAY_SIZE(uniphier_sld8_functions),
+ .caps = 0,
};
-static int ph1_sld8_pinctrl_probe(struct platform_device *pdev)
+static int uniphier_sld8_pinctrl_probe(struct platform_device *pdev)
{
- return uniphier_pinctrl_probe(pdev, &ph1_sld8_pinctrl_desc,
- &ph1_sld8_pindata);
+ return uniphier_pinctrl_probe(pdev, &uniphier_sld8_pindata);
}
-static const struct of_device_id ph1_sld8_pinctrl_match[] = {
+static const struct of_device_id uniphier_sld8_pinctrl_match[] = {
+ { .compatible = "socionext,uniphier-sld8-pinctrl" },
{ .compatible = "socionext,ph1-sld8-pinctrl" },
{ /* sentinel */ }
};
-MODULE_DEVICE_TABLE(of, ph1_sld8_pinctrl_match);
+MODULE_DEVICE_TABLE(of, uniphier_sld8_pinctrl_match);
-static struct platform_driver ph1_sld8_pinctrl_driver = {
- .probe = ph1_sld8_pinctrl_probe,
+static struct platform_driver uniphier_sld8_pinctrl_driver = {
+ .probe = uniphier_sld8_pinctrl_probe,
.driver = {
- .name = DRIVER_NAME,
- .of_match_table = ph1_sld8_pinctrl_match,
+ .name = "uniphier-sld8-pinctrl",
+ .of_match_table = uniphier_sld8_pinctrl_match,
},
};
-module_platform_driver(ph1_sld8_pinctrl_driver);
+module_platform_driver(uniphier_sld8_pinctrl_driver);
MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
MODULE_DESCRIPTION("UniPhier PH1-sLD8 pinctrl driver");
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier.h b/drivers/pinctrl/uniphier/pinctrl-uniphier.h
index a21154f4b..923f36cb2 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier.h
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier.h
@@ -15,14 +15,18 @@
#ifndef __PINCTRL_UNIPHIER_H__
#define __PINCTRL_UNIPHIER_H__
+#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/types.h>
+struct platform_device;
+
#define UNIPHIER_PINCTRL_PINMUX_BASE 0x0
#define UNIPHIER_PINCTRL_LOAD_PINMUX 0x700
#define UNIPHIER_PINCTRL_DRVCTRL_BASE 0x800
#define UNIPHIER_PINCTRL_DRV2CTRL_BASE 0x900
+#define UNIPHIER_PINCTRL_DRV3CTRL_BASE 0x980
#define UNIPHIER_PINCTRL_PUPDCTRL_BASE 0xa00
#define UNIPHIER_PINCTRL_IECTRL 0xd00
@@ -39,16 +43,16 @@
#define UNIPHIER_PIN_DRVCTRL_MASK ((1UL << (UNIPHIER_PIN_DRVCTRL_BITS)) \
- 1)
-/* supported drive strength (mA) */
-#define UNIPHIER_PIN_DRV_STR_SHIFT ((UNIPHIER_PIN_DRVCTRL_SHIFT) + \
+/* drive control type */
+#define UNIPHIER_PIN_DRV_TYPE_SHIFT ((UNIPHIER_PIN_DRVCTRL_SHIFT) + \
(UNIPHIER_PIN_DRVCTRL_BITS))
-#define UNIPHIER_PIN_DRV_STR_BITS 3
-#define UNIPHIER_PIN_DRV_STR_MASK ((1UL << (UNIPHIER_PIN_DRV_STR_BITS)) \
+#define UNIPHIER_PIN_DRV_TYPE_BITS 3
+#define UNIPHIER_PIN_DRV_TYPE_MASK ((1UL << (UNIPHIER_PIN_DRV_TYPE_BITS)) \
- 1)
/* pull-up / pull-down register number */
-#define UNIPHIER_PIN_PUPDCTRL_SHIFT ((UNIPHIER_PIN_DRV_STR_SHIFT) + \
- (UNIPHIER_PIN_DRV_STR_BITS))
+#define UNIPHIER_PIN_PUPDCTRL_SHIFT ((UNIPHIER_PIN_DRV_TYPE_SHIFT) + \
+ (UNIPHIER_PIN_DRV_TYPE_BITS))
#define UNIPHIER_PIN_PUPDCTRL_BITS 9
#define UNIPHIER_PIN_PUPDCTRL_MASK ((1UL << (UNIPHIER_PIN_PUPDCTRL_BITS))\
- 1)
@@ -66,13 +70,14 @@
#define UNIPHIER_PIN_IECTRL_NONE (UNIPHIER_PIN_IECTRL_MASK)
-/* selectable drive strength */
-enum uniphier_pin_drv_str {
- UNIPHIER_PIN_DRV_4_8, /* 2 level control: 4/8 mA */
- UNIPHIER_PIN_DRV_8_12_16_20, /* 4 level control: 8/12/16/20 mA */
- UNIPHIER_PIN_DRV_FIXED_4, /* fixed to 4mA */
- UNIPHIER_PIN_DRV_FIXED_5, /* fixed to 5mA */
- UNIPHIER_PIN_DRV_FIXED_8, /* fixed to 8mA */
+/* drive control type */
+enum uniphier_pin_drv_type {
+ UNIPHIER_PIN_DRV_1BIT, /* 2 level control: 4/8 mA */
+ UNIPHIER_PIN_DRV_2BIT, /* 4 level control: 8/12/16/20 mA */
+ UNIPHIER_PIN_DRV_3BIT, /* 8 level control: 4/5/7/9/11/12/14/16 mA */
+ UNIPHIER_PIN_DRV_FIXED4, /* fixed to 4mA */
+ UNIPHIER_PIN_DRV_FIXED5, /* fixed to 5mA */
+ UNIPHIER_PIN_DRV_FIXED8, /* fixed to 8mA */
UNIPHIER_PIN_DRV_NONE, /* no support (input only pin) */
};
@@ -89,17 +94,17 @@ enum uniphier_pin_pull_dir {
(((x) & (UNIPHIER_PIN_IECTRL_MASK)) << (UNIPHIER_PIN_IECTRL_SHIFT))
#define UNIPHIER_PIN_DRVCTRL(x) \
(((x) & (UNIPHIER_PIN_DRVCTRL_MASK)) << (UNIPHIER_PIN_DRVCTRL_SHIFT))
-#define UNIPHIER_PIN_DRV_STR(x) \
- (((x) & (UNIPHIER_PIN_DRV_STR_MASK)) << (UNIPHIER_PIN_DRV_STR_SHIFT))
+#define UNIPHIER_PIN_DRV_TYPE(x) \
+ (((x) & (UNIPHIER_PIN_DRV_TYPE_MASK)) << (UNIPHIER_PIN_DRV_TYPE_SHIFT))
#define UNIPHIER_PIN_PUPDCTRL(x) \
(((x) & (UNIPHIER_PIN_PUPDCTRL_MASK)) << (UNIPHIER_PIN_PUPDCTRL_SHIFT))
#define UNIPHIER_PIN_PULL_DIR(x) \
(((x) & (UNIPHIER_PIN_PULL_DIR_MASK)) << (UNIPHIER_PIN_PULL_DIR_SHIFT))
-#define UNIPHIER_PIN_ATTR_PACKED(iectrl, drvctrl, drv_str, pupdctrl, pull_dir)\
+#define UNIPHIER_PIN_ATTR_PACKED(iectrl, drvctrl, drv_type, pupdctrl, pull_dir)\
(UNIPHIER_PIN_IECTRL(iectrl) | \
UNIPHIER_PIN_DRVCTRL(drvctrl) | \
- UNIPHIER_PIN_DRV_STR(drv_str) | \
+ UNIPHIER_PIN_DRV_TYPE(drv_type) | \
UNIPHIER_PIN_PUPDCTRL(pupdctrl) | \
UNIPHIER_PIN_PULL_DIR(pull_dir))
@@ -115,10 +120,10 @@ static inline unsigned int uniphier_pin_get_drvctrl(void *drv_data)
UNIPHIER_PIN_DRVCTRL_MASK;
}
-static inline unsigned int uniphier_pin_get_drv_str(void *drv_data)
+static inline unsigned int uniphier_pin_get_drv_type(void *drv_data)
{
- return ((unsigned long)drv_data >> UNIPHIER_PIN_DRV_STR_SHIFT) &
- UNIPHIER_PIN_DRV_STR_MASK;
+ return ((unsigned long)drv_data >> UNIPHIER_PIN_DRV_TYPE_SHIFT) &
+ UNIPHIER_PIN_DRV_TYPE_MASK;
}
static inline unsigned int uniphier_pin_get_pupdctrl(void *drv_data)
@@ -143,7 +148,7 @@ struct uniphier_pinctrl_group {
const char *name;
const unsigned *pins;
unsigned num_pins;
- const unsigned *muxvals;
+ const int *muxvals;
enum uniphier_pinmux_gpio_range_type range_type;
};
@@ -154,13 +159,15 @@ struct uniphier_pinmux_function {
};
struct uniphier_pinctrl_socdata {
+ const struct pinctrl_pin_desc *pins;
+ unsigned int npins;
const struct uniphier_pinctrl_group *groups;
int groups_count;
const struct uniphier_pinmux_function *functions;
int functions_count;
- unsigned mux_bits;
- unsigned reg_stride;
- bool load_pinctrl;
+ unsigned int caps;
+#define UNIPHIER_PINCTRL_CAPS_PERPIN_IECTRL BIT(1)
+#define UNIPHIER_PINCTRL_CAPS_DBGMUX_SEPARATE BIT(0)
};
#define UNIPHIER_PINCTRL_PIN(a, b, c, d, e, f, g) \
@@ -205,11 +212,7 @@ struct uniphier_pinctrl_socdata {
.num_groups = ARRAY_SIZE(func##_groups), \
}
-struct platform_device;
-struct pinctrl_desc;
-
int uniphier_pinctrl_probe(struct platform_device *pdev,
- struct pinctrl_desc *desc,
struct uniphier_pinctrl_socdata *socdata);
#endif /* __PINCTRL_UNIPHIER_H__ */
diff --git a/drivers/platform/olpc/olpc-ec.c b/drivers/platform/olpc/olpc-ec.c
index f99b183d5..374a8028f 100644
--- a/drivers/platform/olpc/olpc-ec.c
+++ b/drivers/platform/olpc/olpc-ec.c
@@ -1,6 +1,8 @@
/*
* Generic driver for the OLPC Embedded Controller.
*
+ * Author: Andres Salomon <dilinger@queued.net>
+ *
* Copyright (C) 2011-2012 One Laptop per Child Foundation.
*
* Licensed under the GPL v2 or later.
@@ -12,7 +14,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/list.h>
#include <linux/olpc-ec.h>
#include <asm/olpc.h>
@@ -326,8 +328,4 @@ static int __init olpc_ec_init_module(void)
{
return platform_driver_register(&olpc_ec_plat_driver);
}
-
arch_initcall(olpc_ec_init_module);
-
-MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index fc7c29c93..7435fa5eb 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -622,6 +622,8 @@ config ASUS_WIRELESS
tristate "Asus Wireless Radio Control Driver"
depends on ACPI
depends on INPUT
+ select NEW_LEDS
+ select LEDS_CLASS
---help---
The Asus Wireless Radio Control handles the airplane mode hotkey
present on some Asus laptops.
@@ -687,6 +689,7 @@ config ACPI_TOSHIBA
depends on SERIO_I8042 || SERIO_I8042 = n
depends on ACPI_VIDEO || ACPI_VIDEO = n
depends on RFKILL || RFKILL = n
+ depends on IIO
select INPUT_POLLDEV
select INPUT_SPARSEKMAP
---help---
@@ -789,6 +792,18 @@ config INTEL_HID_EVENT
To compile this driver as a module, choose M here: the module will
be called intel_hid.
+config INTEL_VBTN
+ tristate "INTEL VIRTUAL BUTTON"
+ depends on ACPI
+ depends on INPUT
+ select INPUT_SPARSEKMAP
+ help
+ This driver provides support for the Intel Virtual Button interface.
+ Some laptops require this driver for power button support.
+
+ To compile this driver as a module, choose M here: the module will
+ be called intel_vbtn.
+
config INTEL_SCU_IPC
bool "Intel SCU IPC Support"
depends on X86_INTEL_MID
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 829f52154..ae14f8e21 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -46,6 +46,7 @@ obj-$(CONFIG_TOSHIBA_BT_RFKILL) += toshiba_bluetooth.o
obj-$(CONFIG_TOSHIBA_HAPS) += toshiba_haps.o
obj-$(CONFIG_TOSHIBA_WMI) += toshiba-wmi.o
obj-$(CONFIG_INTEL_HID_EVENT) += intel-hid.o
+obj-$(CONFIG_INTEL_VBTN) += intel-vbtn.o
obj-$(CONFIG_INTEL_SCU_IPC) += intel_scu_ipc.o
obj-$(CONFIG_INTEL_SCU_IPC_UTIL) += intel_scu_ipcutil.o
obj-$(CONFIG_INTEL_MFLD_THERMAL) += intel_mid_thermal.o
diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
index 4034d2d4c..a66be1373 100644
--- a/drivers/platform/x86/apple-gmux.c
+++ b/drivers/platform/x86/apple-gmux.c
@@ -31,19 +31,21 @@
/**
* DOC: Overview
*
- * :1: http://www.latticesemi.com/en/Products/FPGAandCPLD/LatticeXP2.aspx
- * :2: http://www.renesas.com/products/mpumcu/h8s/h8s2100/h8s2113/index.jsp
- *
* gmux is a microcontroller built into the MacBook Pro to support dual GPUs:
- * A {1}[Lattice XP2] on pre-retinas, a {2}[Renesas R4F2113] on retinas.
+ * A `Lattice XP2`_ on pre-retinas, a `Renesas R4F2113`_ on retinas.
*
* (The MacPro6,1 2013 also has a gmux, however it is unclear why since it has
* dual GPUs but no built-in display.)
*
* gmux is connected to the LPC bus of the southbridge. Its I/O ports are
* accessed differently depending on the microcontroller: Driver functions
- * to access a pre-retina gmux are infixed `_pio_`, those for a retina gmux
- * are infixed `_index_`.
+ * to access a pre-retina gmux are infixed ``_pio_``, those for a retina gmux
+ * are infixed ``_index_``.
+ *
+ * .. _Lattice XP2:
+ * http://www.latticesemi.com/en/Products/FPGAandCPLD/LatticeXP2.aspx
+ * .. _Renesas R4F2113:
+ * http://www.renesas.com/products/mpumcu/h8s/h8s2100/h8s2113/index.jsp
*/
struct apple_gmux_data {
@@ -272,15 +274,15 @@ static bool gmux_is_indexed(struct apple_gmux_data *gmux_data)
/**
* DOC: Backlight control
*
- * :3: http://www.ti.com/lit/ds/symlink/lp8543.pdf
- * :4: http://www.ti.com/lit/ds/symlink/lp8545.pdf
- *
* On single GPU MacBooks, the PWM signal for the backlight is generated by
* the GPU. On dual GPU MacBook Pros by contrast, either GPU may be suspended
* to conserve energy. Hence the PWM signal needs to be generated by a separate
* backlight driver which is controlled by gmux. The earliest generation
- * MBP5 2008/09 uses a {3}[TI LP8543] backlight driver. All newer models
- * use a {4}[TI LP8545].
+ * MBP5 2008/09 uses a `TI LP8543`_ backlight driver. All newer models
+ * use a `TI LP8545`_.
+ *
+ * .. _TI LP8543: http://www.ti.com/lit/ds/symlink/lp8543.pdf
+ * .. _TI LP8545: http://www.ti.com/lit/ds/symlink/lp8545.pdf
*/
static int gmux_get_brightness(struct backlight_device *bd)
@@ -312,28 +314,20 @@ static const struct backlight_ops gmux_bl_ops = {
/**
* DOC: Graphics mux
*
- * :5: http://pimg-fpiw.uspto.gov/fdd/07/870/086/0.pdf
- * :6: http://www.nxp.com/documents/data_sheet/CBTL06141.pdf
- * :7: http://www.ti.com/lit/ds/symlink/hd3ss212.pdf
- * :8: https://www.pericom.com/assets/Datasheets/PI3VDP12412.pdf
- * :9: http://www.ti.com/lit/ds/symlink/sn74lv4066a.pdf
- * :10: http://pdf.datasheetarchive.com/indexerfiles/Datasheets-SW16/DSASW00308511.pdf
- * :11: http://www.ti.com/lit/ds/symlink/ts3ds10224.pdf
- *
* On pre-retinas, the LVDS outputs of both GPUs feed into gmux which muxes
* either of them to the panel. One of the tricks gmux has up its sleeve is
* to lengthen the blanking interval of its output during a switch to
* synchronize it with the GPU switched to. This allows for a flicker-free
- * switch that is imperceptible by the user ({5}[US 8,687,007 B2]).
+ * switch that is imperceptible by the user (`US 8,687,007 B2`_).
*
* On retinas, muxing is no longer done by gmux itself, but by a separate
* chip which is controlled by gmux. The chip is triple sourced, it is
- * either an {6}[NXP CBTL06142], {7}[TI HD3SS212] or {8}[Pericom PI3VDP12412].
+ * either an `NXP CBTL06142`_, `TI HD3SS212`_ or `Pericom PI3VDP12412`_.
* The panel is driven with eDP instead of LVDS since the pixel clock
* required for retina resolution exceeds LVDS' limits.
*
* Pre-retinas are able to switch the panel's DDC pins separately.
- * This is handled by a {9}[TI SN74LV4066A] which is controlled by gmux.
+ * This is handled by a `TI SN74LV4066A`_ which is controlled by gmux.
* The inactive GPU can thus probe the panel's EDID without switching over
* the entire panel. Retinas lack this functionality as the chips used for
* eDP muxing are incapable of switching the AUX channel separately (see
@@ -344,15 +338,15 @@ static const struct backlight_ops gmux_bl_ops = {
*
* The external DP port is only fully switchable on the first two unibody
* MacBook Pro generations, MBP5 2008/09 and MBP6 2010. This is done by an
- * {6}[NXP CBTL06141] which is controlled by gmux. It's the predecessor of the
+ * `NXP CBTL06141`_ which is controlled by gmux. It's the predecessor of the
* eDP mux on retinas, the difference being support for 2.7 versus 5.4 Gbit/s.
*
* The following MacBook Pro generations replaced the external DP port with a
* combined DP/Thunderbolt port and lost the ability to switch it between GPUs,
* connecting it either to the discrete GPU or the Thunderbolt controller.
* Oddly enough, while the full port is no longer switchable, AUX and HPD
- * are still switchable by way of an {10}[NXP CBTL03062] (on pre-retinas
- * MBP8 2011 and MBP9 2012) or two {11}[TI TS3DS10224] (on retinas) under the
+ * are still switchable by way of an `NXP CBTL03062`_ (on pre-retinas
+ * MBP8 2011 and MBP9 2012) or two `TI TS3DS10224`_ (on retinas) under the
* control of gmux. Since the integrated GPU is missing the main link,
* external displays appear to it as phantoms which fail to link-train.
*
@@ -365,10 +359,19 @@ static const struct backlight_ops gmux_bl_ops = {
* of this feature.
*
* gmux' initial switch state on bootup is user configurable via the EFI
- * variable `gpu-power-prefs-fa4ce28d-b62f-4c99-9cc3-6815686e30f9` (5th byte,
+ * variable ``gpu-power-prefs-fa4ce28d-b62f-4c99-9cc3-6815686e30f9`` (5th byte,
* 1 = IGD, 0 = DIS). Based on this setting, the EFI firmware tells gmux to
* switch the panel and the external DP connector and allocates a framebuffer
* for the selected GPU.
+ *
+ * .. _US 8,687,007 B2: http://pimg-fpiw.uspto.gov/fdd/07/870/086/0.pdf
+ * .. _NXP CBTL06141: http://www.nxp.com/documents/data_sheet/CBTL06141.pdf
+ * .. _NXP CBTL06142: http://www.nxp.com/documents/data_sheet/CBTL06141.pdf
+ * .. _TI HD3SS212: http://www.ti.com/lit/ds/symlink/hd3ss212.pdf
+ * .. _Pericom PI3VDP12412: https://www.pericom.com/assets/Datasheets/PI3VDP12412.pdf
+ * .. _TI SN74LV4066A: http://www.ti.com/lit/ds/symlink/sn74lv4066a.pdf
+ * .. _NXP CBTL03062: http://pdf.datasheetarchive.com/indexerfiles/Datasheets-SW16/DSASW00308511.pdf
+ * .. _TI TS3DS10224: http://www.ti.com/lit/ds/symlink/ts3ds10224.pdf
*/
static void gmux_read_switch_state(struct apple_gmux_data *gmux_data)
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index 091ca7ada..adecc1c55 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -78,6 +78,15 @@ static struct quirk_entry quirk_asus_x200ca = {
.wapf = 2,
};
+static struct quirk_entry quirk_no_rfkill = {
+ .no_rfkill = true,
+};
+
+static struct quirk_entry quirk_no_rfkill_wapf4 = {
+ .wapf = 4,
+ .no_rfkill = true,
+};
+
static int dmi_matched(const struct dmi_system_id *dmi)
{
quirks = dmi->driver_data;
@@ -133,7 +142,7 @@ static const struct dmi_system_id asus_quirks[] = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "X456UA"),
},
- .driver_data = &quirk_asus_wapf4,
+ .driver_data = &quirk_no_rfkill_wapf4,
},
{
.callback = dmi_matched,
@@ -142,7 +151,7 @@ static const struct dmi_system_id asus_quirks[] = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "X456UF"),
},
- .driver_data = &quirk_asus_wapf4,
+ .driver_data = &quirk_no_rfkill_wapf4,
},
{
.callback = dmi_matched,
@@ -306,6 +315,42 @@ static const struct dmi_system_id asus_quirks[] = {
},
.driver_data = &quirk_asus_x200ca,
},
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X555UB",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X555UB"),
+ },
+ .driver_data = &quirk_no_rfkill,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. N552VW",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "N552VW"),
+ },
+ .driver_data = &quirk_no_rfkill,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. U303LB",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "U303LB"),
+ },
+ .driver_data = &quirk_no_rfkill,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. Z550MA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Z550MA"),
+ },
+ .driver_data = &quirk_no_rfkill,
+ },
{},
};
@@ -356,6 +401,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_KEY, 0x67, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV */
{ KE_KEY, 0x6B, { KEY_TOUCHPAD_TOGGLE } },
{ KE_IGNORE, 0x6E, }, /* Low Battery notification */
+ { KE_KEY, 0x7a, { KEY_ALS_TOGGLE } }, /* Ambient Light Sensor Toggle */
{ KE_KEY, 0x7D, { KEY_BLUETOOTH } }, /* Bluetooth Enable */
{ KE_KEY, 0x7E, { KEY_BLUETOOTH } }, /* Bluetooth Disable */
{ KE_KEY, 0x82, { KEY_CAMERA } },
diff --git a/drivers/platform/x86/asus-wireless.c b/drivers/platform/x86/asus-wireless.c
index 9ec721e26..9f31bc1a4 100644
--- a/drivers/platform/x86/asus-wireless.c
+++ b/drivers/platform/x86/asus-wireless.c
@@ -15,11 +15,78 @@
#include <linux/acpi.h>
#include <linux/input.h>
#include <linux/pci_ids.h>
+#include <linux/leds.h>
+
+#define ASUS_WIRELESS_LED_STATUS 0x2
+#define ASUS_WIRELESS_LED_OFF 0x4
+#define ASUS_WIRELESS_LED_ON 0x5
struct asus_wireless_data {
struct input_dev *idev;
+ struct acpi_device *adev;
+ struct workqueue_struct *wq;
+ struct work_struct led_work;
+ struct led_classdev led;
+ int led_state;
};
+static u64 asus_wireless_method(acpi_handle handle, const char *method,
+ int param)
+{
+ struct acpi_object_list p;
+ union acpi_object obj;
+ acpi_status s;
+ u64 ret;
+
+ acpi_handle_debug(handle, "Evaluating method %s, parameter %#x\n",
+ method, param);
+ obj.type = ACPI_TYPE_INTEGER;
+ obj.integer.value = param;
+ p.count = 1;
+ p.pointer = &obj;
+
+ s = acpi_evaluate_integer(handle, (acpi_string) method, &p, &ret);
+ if (ACPI_FAILURE(s))
+ acpi_handle_err(handle,
+ "Failed to eval method %s, param %#x (%d)\n",
+ method, param, s);
+ acpi_handle_debug(handle, "%s returned %#x\n", method, (uint) ret);
+ return ret;
+}
+
+static enum led_brightness led_state_get(struct led_classdev *led)
+{
+ struct asus_wireless_data *data;
+ int s;
+
+ data = container_of(led, struct asus_wireless_data, led);
+ s = asus_wireless_method(acpi_device_handle(data->adev), "HSWC",
+ ASUS_WIRELESS_LED_STATUS);
+ if (s == ASUS_WIRELESS_LED_ON)
+ return LED_FULL;
+ return LED_OFF;
+}
+
+static void led_state_update(struct work_struct *work)
+{
+ struct asus_wireless_data *data;
+
+ data = container_of(work, struct asus_wireless_data, led_work);
+ asus_wireless_method(acpi_device_handle(data->adev), "HSWC",
+ data->led_state);
+}
+
+static void led_state_set(struct led_classdev *led,
+ enum led_brightness value)
+{
+ struct asus_wireless_data *data;
+
+ data = container_of(led, struct asus_wireless_data, led);
+ data->led_state = value == LED_OFF ? ASUS_WIRELESS_LED_OFF :
+ ASUS_WIRELESS_LED_ON;
+ queue_work(data->wq, &data->led_work);
+}
+
static void asus_wireless_notify(struct acpi_device *adev, u32 event)
{
struct asus_wireless_data *data = acpi_driver_data(adev);
@@ -37,6 +104,7 @@ static void asus_wireless_notify(struct acpi_device *adev, u32 event)
static int asus_wireless_add(struct acpi_device *adev)
{
struct asus_wireless_data *data;
+ int err;
data = devm_kzalloc(&adev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
@@ -52,11 +120,32 @@ static int asus_wireless_add(struct acpi_device *adev)
data->idev->id.vendor = PCI_VENDOR_ID_ASUSTEK;
set_bit(EV_KEY, data->idev->evbit);
set_bit(KEY_RFKILL, data->idev->keybit);
- return input_register_device(data->idev);
+ err = input_register_device(data->idev);
+ if (err)
+ return err;
+
+ data->adev = adev;
+ data->wq = create_singlethread_workqueue("asus_wireless_workqueue");
+ if (!data->wq)
+ return -ENOMEM;
+ INIT_WORK(&data->led_work, led_state_update);
+ data->led.name = "asus-wireless::airplane";
+ data->led.brightness_set = led_state_set;
+ data->led.brightness_get = led_state_get;
+ data->led.flags = LED_CORE_SUSPENDRESUME;
+ data->led.max_brightness = 1;
+ err = devm_led_classdev_register(&adev->dev, &data->led);
+ if (err)
+ destroy_workqueue(data->wq);
+ return err;
}
static int asus_wireless_remove(struct acpi_device *adev)
{
+ struct asus_wireless_data *data = acpi_driver_data(adev);
+
+ if (data->wq)
+ destroy_workqueue(data->wq);
return 0;
}
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index a26dca364..7c093a0b7 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -2069,9 +2069,11 @@ static int asus_wmi_add(struct platform_device *pdev)
if (err)
goto fail_leds;
- err = asus_wmi_rfkill_init(asus);
- if (err)
- goto fail_rfkill;
+ if (!asus->driver->quirks->no_rfkill) {
+ err = asus_wmi_rfkill_init(asus);
+ if (err)
+ goto fail_rfkill;
+ }
/* Some Asus desktop boards export an acpi-video backlight interface,
stop this from showing up */
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
index 4da4c8baf..5de1df510 100644
--- a/drivers/platform/x86/asus-wmi.h
+++ b/drivers/platform/x86/asus-wmi.h
@@ -38,6 +38,7 @@ struct key_entry;
struct asus_wmi;
struct quirk_entry {
+ bool no_rfkill;
bool hotplug_wireless;
bool scalar_panel_brightness;
bool store_backlight_power;
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index 15c6f1191..da2fe1816 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -80,66 +80,115 @@ static const struct dmi_system_id dell_wmi_smbios_list[] __initconst = {
};
/*
+ * Keymap for WMI events of type 0x0000
+ *
* Certain keys are flagged as KE_IGNORE. All of these are either
* notifications (rather than requests for change) or are also sent
* via the keyboard controller so should not be sent again.
*/
-
-static const struct key_entry dell_wmi_legacy_keymap[] __initconst = {
+static const struct key_entry dell_wmi_keymap_type_0000[] __initconst = {
{ KE_IGNORE, 0x003a, { KEY_CAPSLOCK } },
- { KE_KEY, 0xe045, { KEY_PROG1 } },
- { KE_KEY, 0xe009, { KEY_EJECTCD } },
-
- /* These also contain the brightness level at offset 6 */
- { KE_KEY, 0xe006, { KEY_BRIGHTNESSUP } },
- { KE_KEY, 0xe005, { KEY_BRIGHTNESSDOWN } },
+ /* Key code is followed by brightness level */
+ { KE_KEY, 0xe005, { KEY_BRIGHTNESSDOWN } },
+ { KE_KEY, 0xe006, { KEY_BRIGHTNESSUP } },
/* Battery health status button */
- { KE_KEY, 0xe007, { KEY_BATTERY } },
+ { KE_KEY, 0xe007, { KEY_BATTERY } },
- /* Radio devices state change */
+ /* Radio devices state change, key code is followed by other values */
{ KE_IGNORE, 0xe008, { KEY_RFKILL } },
- /* The next device is at offset 6, the active devices are at
- offset 8 and the attached devices at offset 10 */
- { KE_KEY, 0xe00b, { KEY_SWITCHVIDEOMODE } },
+ { KE_KEY, 0xe009, { KEY_EJECTCD } },
+ /* Key code is followed by: next, active and attached devices */
+ { KE_KEY, 0xe00b, { KEY_SWITCHVIDEOMODE } },
+
+ /* Key code is followed by keyboard illumination level */
{ KE_IGNORE, 0xe00c, { KEY_KBDILLUMTOGGLE } },
/* BIOS error detected */
{ KE_IGNORE, 0xe00d, { KEY_RESERVED } },
+ /* Battery was removed or inserted */
+ { KE_IGNORE, 0xe00e, { KEY_RESERVED } },
+
/* Wifi Catcher */
- { KE_KEY, 0xe011, {KEY_PROG2 } },
+ { KE_KEY, 0xe011, { KEY_PROG2 } },
/* Ambient light sensor toggle */
{ KE_IGNORE, 0xe013, { KEY_RESERVED } },
{ KE_IGNORE, 0xe020, { KEY_MUTE } },
+ /* Unknown, defined in ACPI DSDT */
+ /* { KE_IGNORE, 0xe023, { KEY_RESERVED } }, */
+
+ /* Untested, Dell Instant Launch key on Inspiron 7520 */
+ /* { KE_IGNORE, 0xe024, { KEY_RESERVED } }, */
+
/* Dell Instant Launch key */
- { KE_KEY, 0xe025, { KEY_PROG4 } },
- { KE_KEY, 0xe029, { KEY_PROG4 } },
+ { KE_KEY, 0xe025, { KEY_PROG4 } },
/* Audio panel key */
{ KE_IGNORE, 0xe026, { KEY_RESERVED } },
+ /* LCD Display On/Off Control key */
+ { KE_KEY, 0xe027, { KEY_DISPLAYTOGGLE } },
+
+ /* Untested, Multimedia key on Dell Vostro 3560 */
+ /* { KE_IGNORE, 0xe028, { KEY_RESERVED } }, */
+
+ /* Dell Instant Launch key */
+ { KE_KEY, 0xe029, { KEY_PROG4 } },
+
+ /* Untested, Windows Mobility Center button on Inspiron 7520 */
+ /* { KE_IGNORE, 0xe02a, { KEY_RESERVED } }, */
+
+ /* Unknown, defined in ACPI DSDT */
+ /* { KE_IGNORE, 0xe02b, { KEY_RESERVED } }, */
+
+ /* Untested, Dell Audio With Preset Switch button on Inspiron 7520 */
+ /* { KE_IGNORE, 0xe02c, { KEY_RESERVED } }, */
+
{ KE_IGNORE, 0xe02e, { KEY_VOLUMEDOWN } },
{ KE_IGNORE, 0xe030, { KEY_VOLUMEUP } },
{ KE_IGNORE, 0xe033, { KEY_KBDILLUMUP } },
{ KE_IGNORE, 0xe034, { KEY_KBDILLUMDOWN } },
{ KE_IGNORE, 0xe03a, { KEY_CAPSLOCK } },
+
+ /* NIC Link is Up */
+ { KE_IGNORE, 0xe043, { KEY_RESERVED } },
+
+ /* NIC Link is Down */
+ { KE_IGNORE, 0xe044, { KEY_RESERVED } },
+
+ /*
+ * This entry is very suspicious!
+ * Originally Matthew Garrett created this dell-wmi driver specially for
+ * "button with a picture of a battery" which has event code 0xe045.
+ * Later Mario Limonciello from Dell told us that event code 0xe045 is
+ * reported by Num Lock and should be ignored because key is send also
+ * by keyboard controller.
+ * So for now we will ignore this event to prevent potential double
+ * Num Lock key press.
+ */
{ KE_IGNORE, 0xe045, { KEY_NUMLOCK } },
+
+ /* Scroll lock and also going to tablet mode on portable devices */
{ KE_IGNORE, 0xe046, { KEY_SCROLLLOCK } },
+
+ /* Untested, going from tablet mode on portable devices */
+ /* { KE_IGNORE, 0xe047, { KEY_RESERVED } }, */
+
+ /* Dell Support Center key */
+ { KE_IGNORE, 0xe06e, { KEY_RESERVED } },
+
{ KE_IGNORE, 0xe0f7, { KEY_MUTE } },
{ KE_IGNORE, 0xe0f8, { KEY_VOLUMEDOWN } },
{ KE_IGNORE, 0xe0f9, { KEY_VOLUMEUP } },
- { KE_END, 0 }
};
-static bool dell_new_hk_type;
-
struct dell_bios_keymap_entry {
u16 scancode;
u16 keycode;
@@ -153,6 +202,7 @@ struct dell_bios_hotkey_table {
struct dell_dmi_results {
int err;
+ int keymap_size;
struct key_entry *keymap;
};
@@ -201,10 +251,12 @@ static const u16 bios_to_linux_keycode[256] __initconst = {
};
/*
+ * Keymap for WMI events of type 0x0010
+ *
* These are applied if the 0xB2 DMI hotkey table is present and doesn't
* override them.
*/
-static const struct key_entry dell_wmi_extra_keymap[] __initconst = {
+static const struct key_entry dell_wmi_keymap_type_0010[] __initconst = {
/* Fn-lock */
{ KE_IGNORE, 0x151, { KEY_RESERVED } },
@@ -224,21 +276,39 @@ static const struct key_entry dell_wmi_extra_keymap[] __initconst = {
{ KE_IGNORE, 0x155, { KEY_RESERVED } },
};
+/*
+ * Keymap for WMI events of type 0x0011
+ */
+static const struct key_entry dell_wmi_keymap_type_0011[] __initconst = {
+ /* Battery unplugged */
+ { KE_IGNORE, 0xfff0, { KEY_RESERVED } },
+
+ /* Battery inserted */
+ { KE_IGNORE, 0xfff1, { KEY_RESERVED } },
+
+ /* Keyboard backlight level changed */
+ { KE_IGNORE, 0x01e1, { KEY_RESERVED } },
+ { KE_IGNORE, 0x02ea, { KEY_RESERVED } },
+ { KE_IGNORE, 0x02eb, { KEY_RESERVED } },
+ { KE_IGNORE, 0x02ec, { KEY_RESERVED } },
+ { KE_IGNORE, 0x02f6, { KEY_RESERVED } },
+};
+
static struct input_dev *dell_wmi_input_dev;
-static void dell_wmi_process_key(int reported_key)
+static void dell_wmi_process_key(int type, int code)
{
const struct key_entry *key;
key = sparse_keymap_entry_from_scancode(dell_wmi_input_dev,
- reported_key);
+ (type << 16) | code);
if (!key) {
- pr_info("Unknown key with scancode 0x%x pressed\n",
- reported_key);
+ pr_info("Unknown key with type 0x%04x and code 0x%04x pressed\n",
+ type, code);
return;
}
- pr_debug("Key %x pressed\n", reported_key);
+ pr_debug("Key with type 0x%04x and code 0x%04x pressed\n", type, code);
/* Don't report brightness notifications that will also come via ACPI */
if ((key->keycode == KEY_BRIGHTNESSUP ||
@@ -246,7 +316,7 @@ static void dell_wmi_process_key(int reported_key)
acpi_video_handles_brightness_key_presses())
return;
- if (reported_key == 0xe025 && !wmi_requires_smbios_request)
+ if (type == 0x0000 && code == 0xe025 && !wmi_requires_smbios_request)
return;
sparse_keymap_report_entry(dell_wmi_input_dev, key, 1, true);
@@ -284,18 +354,6 @@ static void dell_wmi_notify(u32 value, void *context)
buffer_entry = (u16 *)obj->buffer.pointer;
buffer_size = obj->buffer.length/2;
-
- if (!dell_new_hk_type) {
- if (buffer_size >= 3 && buffer_entry[1] == 0x0)
- dell_wmi_process_key(buffer_entry[2]);
- else if (buffer_size >= 2)
- dell_wmi_process_key(buffer_entry[1]);
- else
- pr_info("Received unknown WMI event\n");
- kfree(obj);
- return;
- }
-
buffer_end = buffer_entry + buffer_size;
/*
@@ -330,62 +388,18 @@ static void dell_wmi_notify(u32 value, void *context)
pr_debug("Process buffer (%*ph)\n", len*2, buffer_entry);
switch (buffer_entry[1]) {
- case 0x00:
- for (i = 2; i < len; ++i) {
- switch (buffer_entry[i]) {
- case 0xe043:
- /* NIC Link is Up */
- pr_debug("NIC Link is Up\n");
- break;
- case 0xe044:
- /* NIC Link is Down */
- pr_debug("NIC Link is Down\n");
- break;
- case 0xe045:
- /* Unknown event but defined in DSDT */
- default:
- /* Unknown event */
- pr_info("Unknown WMI event type 0x00: "
- "0x%x\n", (int)buffer_entry[i]);
- break;
- }
- }
+ case 0x0000: /* One key pressed or event occurred */
+ if (len > 2)
+ dell_wmi_process_key(0x0000, buffer_entry[2]);
+ /* Other entries could contain additional information */
break;
- case 0x10:
- /* Keys pressed */
+ case 0x0010: /* Sequence of keys pressed */
+ case 0x0011: /* Sequence of events occurred */
for (i = 2; i < len; ++i)
- dell_wmi_process_key(buffer_entry[i]);
- break;
- case 0x11:
- for (i = 2; i < len; ++i) {
- switch (buffer_entry[i]) {
- case 0xfff0:
- /* Battery unplugged */
- pr_debug("Battery unplugged\n");
- break;
- case 0xfff1:
- /* Battery inserted */
- pr_debug("Battery inserted\n");
- break;
- case 0x01e1:
- case 0x02ea:
- case 0x02eb:
- case 0x02ec:
- case 0x02f6:
- /* Keyboard backlight level changed */
- pr_debug("Keyboard backlight level "
- "changed\n");
- break;
- default:
- /* Unknown event */
- pr_info("Unknown WMI event type 0x11: "
- "0x%x\n", (int)buffer_entry[i]);
- break;
- }
- }
+ dell_wmi_process_key(buffer_entry[1],
+ buffer_entry[i]);
break;
- default:
- /* Unknown event */
+ default: /* Unknown event */
pr_info("Unknown WMI event type 0x%x\n",
(int)buffer_entry[1]);
break;
@@ -410,7 +424,6 @@ static bool have_scancode(u32 scancode, const struct key_entry *keymap, int len)
}
static void __init handle_dmi_entry(const struct dmi_header *dm,
-
void *opaque)
{
@@ -418,7 +431,6 @@ static void __init handle_dmi_entry(const struct dmi_header *dm,
struct dell_bios_hotkey_table *table;
int hotkey_num, i, pos = 0;
struct key_entry *keymap;
- int num_bios_keys;
if (results->err || results->keymap)
return; /* We already found the hotkey table. */
@@ -442,8 +454,7 @@ static void __init handle_dmi_entry(const struct dmi_header *dm,
return;
}
- keymap = kcalloc(hotkey_num + ARRAY_SIZE(dell_wmi_extra_keymap) + 1,
- sizeof(struct key_entry), GFP_KERNEL);
+ keymap = kcalloc(hotkey_num, sizeof(struct key_entry), GFP_KERNEL);
if (!keymap) {
results->err = -ENOMEM;
return;
@@ -480,31 +491,15 @@ static void __init handle_dmi_entry(const struct dmi_header *dm,
pos++;
}
- num_bios_keys = pos;
-
- for (i = 0; i < ARRAY_SIZE(dell_wmi_extra_keymap); i++) {
- const struct key_entry *entry = &dell_wmi_extra_keymap[i];
-
- /*
- * Check if we've already found this scancode. This takes
- * quadratic time, but it doesn't matter unless the list
- * of extra keys gets very long.
- */
- if (!have_scancode(entry->code, keymap, num_bios_keys)) {
- keymap[pos] = *entry;
- pos++;
- }
- }
-
- keymap[pos].type = KE_END;
-
results->keymap = keymap;
+ results->keymap_size = pos;
}
static int __init dell_wmi_input_setup(void)
{
struct dell_dmi_results dmi_results = {};
- int err;
+ struct key_entry *keymap;
+ int err, i, pos = 0;
dell_wmi_input_dev = input_allocate_device();
if (!dell_wmi_input_dev)
@@ -528,21 +523,71 @@ static int __init dell_wmi_input_setup(void)
goto err_free_dev;
}
- if (dmi_results.keymap) {
- dell_new_hk_type = true;
+ keymap = kcalloc(dmi_results.keymap_size +
+ ARRAY_SIZE(dell_wmi_keymap_type_0000) +
+ ARRAY_SIZE(dell_wmi_keymap_type_0010) +
+ ARRAY_SIZE(dell_wmi_keymap_type_0011) +
+ 1,
+ sizeof(struct key_entry), GFP_KERNEL);
+ if (!keymap) {
+ kfree(dmi_results.keymap);
+ err = -ENOMEM;
+ goto err_free_dev;
+ }
+
+ /* Append table with events of type 0x0010 which comes from DMI */
+ for (i = 0; i < dmi_results.keymap_size; i++) {
+ keymap[pos] = dmi_results.keymap[i];
+ keymap[pos].code |= (0x0010 << 16);
+ pos++;
+ }
+
+ kfree(dmi_results.keymap);
- err = sparse_keymap_setup(dell_wmi_input_dev,
- dmi_results.keymap, NULL);
+ /* Append table with extra events of type 0x0010 which are not in DMI */
+ for (i = 0; i < ARRAY_SIZE(dell_wmi_keymap_type_0010); i++) {
+ const struct key_entry *entry = &dell_wmi_keymap_type_0010[i];
/*
- * Sparse keymap library makes a copy of keymap so we
- * don't need the original one that was allocated.
+ * Check if we've already found this scancode. This takes
+ * quadratic time, but it doesn't matter unless the list
+ * of extra keys gets very long.
*/
- kfree(dmi_results.keymap);
- } else {
- err = sparse_keymap_setup(dell_wmi_input_dev,
- dell_wmi_legacy_keymap, NULL);
+ if (dmi_results.keymap_size &&
+ have_scancode(entry->code | (0x0010 << 16),
+ keymap, dmi_results.keymap_size)
+ )
+ continue;
+
+ keymap[pos] = *entry;
+ keymap[pos].code |= (0x0010 << 16);
+ pos++;
+ }
+
+ /* Append table with events of type 0x0011 */
+ for (i = 0; i < ARRAY_SIZE(dell_wmi_keymap_type_0011); i++) {
+ keymap[pos] = dell_wmi_keymap_type_0011[i];
+ keymap[pos].code |= (0x0011 << 16);
+ pos++;
}
+
+ /*
+ * Now append also table with "legacy" events of type 0x0000. Some of
+ * them are reported also on laptops which have scancodes in DMI.
+ */
+ for (i = 0; i < ARRAY_SIZE(dell_wmi_keymap_type_0000); i++) {
+ keymap[pos] = dell_wmi_keymap_type_0000[i];
+ pos++;
+ }
+
+ keymap[pos].type = KE_END;
+
+ err = sparse_keymap_setup(dell_wmi_input_dev, keymap, NULL);
+ /*
+ * Sparse keymap library makes a copy of keymap so we don't need the
+ * original one that was allocated.
+ */
+ kfree(keymap);
if (err)
goto err_free_dev;
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index ce41bc342..61f39abf5 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -88,9 +88,6 @@
#define ACPI_FUJITSU_NOTIFY_CODE1 0x80
-#define ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS 0x86
-#define ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS 0x87
-
/* FUNC interface - command values */
#define FUNC_RFKILL 0x1000
#define FUNC_LEDS 0x1001
@@ -108,6 +105,8 @@
#define LOGOLAMP_POWERON 0x2000
#define LOGOLAMP_ALWAYS 0x4000
#define RADIO_LED_ON 0x20
+#define ECO_LED 0x10000
+#define ECO_LED_ON 0x80000
#endif
/* Hotkey details */
@@ -121,13 +120,6 @@
#define RINGBUFFERSIZE 40
/* Debugging */
-#define FUJLAPTOP_LOG ACPI_FUJITSU_HID ": "
-#define FUJLAPTOP_ERR KERN_ERR FUJLAPTOP_LOG
-#define FUJLAPTOP_NOTICE KERN_NOTICE FUJLAPTOP_LOG
-#define FUJLAPTOP_INFO KERN_INFO FUJLAPTOP_LOG
-#define FUJLAPTOP_DEBUG KERN_DEBUG FUJLAPTOP_LOG
-
-#define FUJLAPTOP_DBG_ALL 0xffff
#define FUJLAPTOP_DBG_ERROR 0x0001
#define FUJLAPTOP_DBG_WARN 0x0002
#define FUJLAPTOP_DBG_INFO 0x0004
@@ -136,7 +128,7 @@
#ifdef CONFIG_FUJITSU_LAPTOP_DEBUG
#define vdbg_printk(a_dbg_level, format, arg...) \
do { if (dbg_level & a_dbg_level) \
- printk(FUJLAPTOP_DEBUG "%s: " format, __func__ , ## arg); \
+ printk(KERN_DEBUG pr_fmt("%s: " format), __func__, ## arg); \
} while (0)
#else
#define vdbg_printk(a_dbg_level, format, arg...) \
@@ -176,6 +168,7 @@ struct fujitsu_hotkey_t {
int logolamp_registered;
int kblamps_registered;
int radio_led_registered;
+ int eco_led_registered;
};
static struct fujitsu_hotkey_t *fujitsu_hotkey;
@@ -212,6 +205,16 @@ static struct led_classdev radio_led = {
.brightness_get = radio_led_get,
.brightness_set = radio_led_set
};
+
+static enum led_brightness eco_led_get(struct led_classdev *cdev);
+static void eco_led_set(struct led_classdev *cdev,
+ enum led_brightness brightness);
+
+static struct led_classdev eco_led = {
+ .name = "fujitsu::eco_led",
+ .brightness_get = eco_led_get,
+ .brightness_set = eco_led_set
+};
#endif
#ifdef CONFIG_FUJITSU_LAPTOP_DEBUG
@@ -296,6 +299,18 @@ static void radio_led_set(struct led_classdev *cdev,
call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, 0x0);
}
+static void eco_led_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ int curr;
+
+ curr = call_fext_func(FUNC_LEDS, 0x2, ECO_LED, 0x0);
+ if (brightness >= LED_FULL)
+ call_fext_func(FUNC_LEDS, 0x1, ECO_LED, curr | ECO_LED_ON);
+ else
+ call_fext_func(FUNC_LEDS, 0x1, ECO_LED, curr & ~ECO_LED_ON);
+}
+
static enum led_brightness logolamp_get(struct led_classdev *cdev)
{
enum led_brightness brightness = LED_OFF;
@@ -330,6 +345,16 @@ static enum led_brightness radio_led_get(struct led_classdev *cdev)
return brightness;
}
+
+static enum led_brightness eco_led_get(struct led_classdev *cdev)
+{
+ enum led_brightness brightness = LED_OFF;
+
+ if (call_fext_func(FUNC_LEDS, 0x2, ECO_LED, 0x0) & ECO_LED_ON)
+ brightness = LED_FULL;
+
+ return brightness;
+}
#endif
/* Hardware access for LCD brightness control */
@@ -856,6 +881,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
set_bit(fujitsu->keycode3, input->keybit);
set_bit(fujitsu->keycode4, input->keybit);
set_bit(fujitsu->keycode5, input->keybit);
+ set_bit(KEY_TOUCHPAD_TOGGLE, input->keybit);
set_bit(KEY_UNKNOWN, input->keybit);
error = input_register_device(input);
@@ -943,6 +969,23 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
result);
}
}
+
+ /* Support for eco led is not always signaled in bit corresponding
+ * to the bit used to control the led. According to the DSDT table,
+ * bit 14 seems to indicate presence of said led as well.
+ * Confirm by testing the status.
+ */
+ if ((call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & BIT(14)) &&
+ (call_fext_func(FUNC_LEDS, 0x2, ECO_LED, 0x0) != UNSUPPORTED_CMD)) {
+ result = led_classdev_register(&fujitsu->pf_device->dev,
+ &eco_led);
+ if (result == 0) {
+ fujitsu_hotkey->eco_led_registered = 1;
+ } else {
+ pr_err("Could not register LED handler for eco LED, error %i\n",
+ result);
+ }
+ }
#endif
return result;
@@ -972,6 +1015,9 @@ static int acpi_fujitsu_hotkey_remove(struct acpi_device *device)
if (fujitsu_hotkey->radio_led_registered)
led_classdev_unregister(&radio_led);
+
+ if (fujitsu_hotkey->eco_led_registered)
+ led_classdev_unregister(&eco_led);
#endif
input_unregister_device(input);
@@ -1060,6 +1106,19 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event)
}
}
+ /* On some models (first seen on the Skylake-based Lifebook
+ * E736/E746/E756), the touchpad toggle hotkey (Fn+F4) is
+ * handled in software; its state is queried using FUNC_RFKILL
+ */
+ if ((fujitsu_hotkey->rfkill_supported & BIT(26)) &&
+ (call_fext_func(FUNC_RFKILL, 0x1, 0x0, 0x0) & BIT(26))) {
+ keycode = KEY_TOUCHPAD_TOGGLE;
+ input_report_key(input, keycode, 1);
+ input_sync(input);
+ input_report_key(input, keycode, 0);
+ input_sync(input);
+ }
+
break;
default:
keycode = KEY_UNKNOWN;
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index a818db6aa..ed5874217 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -122,8 +122,8 @@ static int intel_hid_input_setup(struct platform_device *device)
return 0;
err_free_device:
- input_free_device(priv->input_dev);
- return ret;
+ input_free_device(priv->input_dev);
+ return ret;
}
static void intel_hid_input_destroy(struct platform_device *device)
@@ -224,7 +224,6 @@ static int intel_hid_remove(struct platform_device *device)
acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler);
intel_hid_input_destroy(device);
intel_hid_set_enable(&device->dev, 0);
- acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler);
/*
* Even if we failed to shut off the event stream, we can still
diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
new file mode 100644
index 000000000..146d02f8c
--- /dev/null
+++ b/drivers/platform/x86/intel-vbtn.c
@@ -0,0 +1,188 @@
+/*
+ * Intel Virtual Button driver for Windows 8.1+
+ *
+ * Copyright (C) 2016 AceLan Kao <acelan.kao@canonical.com>
+ * Copyright (C) 2016 Alex Hung <alex.hung@canonical.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/acpi.h>
+#include <acpi/acpi_bus.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("AceLan Kao");
+
+static const struct acpi_device_id intel_vbtn_ids[] = {
+ {"INT33D6", 0},
+ {"", 0},
+};
+
+/* In theory, these are HID usages. */
+static const struct key_entry intel_vbtn_keymap[] = {
+ { KE_IGNORE, 0xC0, { KEY_POWER } }, /* power key press */
+ { KE_KEY, 0xC1, { KEY_POWER } }, /* power key release */
+ { KE_END },
+};
+
+struct intel_vbtn_priv {
+ struct input_dev *input_dev;
+};
+
+static int intel_vbtn_input_setup(struct platform_device *device)
+{
+ struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev);
+ int ret;
+
+ priv->input_dev = input_allocate_device();
+ if (!priv->input_dev)
+ return -ENOMEM;
+
+ ret = sparse_keymap_setup(priv->input_dev, intel_vbtn_keymap, NULL);
+ if (ret)
+ goto err_free_device;
+
+ priv->input_dev->dev.parent = &device->dev;
+ priv->input_dev->name = "Intel Virtual Button driver";
+ priv->input_dev->id.bustype = BUS_HOST;
+
+ ret = input_register_device(priv->input_dev);
+ if (ret)
+ goto err_free_device;
+
+ return 0;
+
+err_free_device:
+ input_free_device(priv->input_dev);
+ return ret;
+}
+
+static void intel_vbtn_input_destroy(struct platform_device *device)
+{
+ struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev);
+
+ input_unregister_device(priv->input_dev);
+}
+
+static void notify_handler(acpi_handle handle, u32 event, void *context)
+{
+ struct platform_device *device = context;
+ struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev);
+
+ if (!sparse_keymap_report_event(priv->input_dev, event, 1, true))
+ dev_info(&device->dev, "unknown event index 0x%x\n",
+ event);
+}
+
+static int intel_vbtn_probe(struct platform_device *device)
+{
+ acpi_handle handle = ACPI_HANDLE(&device->dev);
+ struct intel_vbtn_priv *priv;
+ acpi_status status;
+ int err;
+
+ status = acpi_evaluate_object(handle, "VBDL", NULL, NULL);
+ if (!ACPI_SUCCESS(status)) {
+ dev_warn(&device->dev, "failed to read Intel Virtual Button driver\n");
+ return -ENODEV;
+ }
+
+ priv = devm_kzalloc(&device->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ dev_set_drvdata(&device->dev, priv);
+
+ err = intel_vbtn_input_setup(device);
+ if (err) {
+ pr_err("Failed to setup Intel Virtual Button\n");
+ return err;
+ }
+
+ status = acpi_install_notify_handler(handle,
+ ACPI_DEVICE_NOTIFY,
+ notify_handler,
+ device);
+ if (ACPI_FAILURE(status)) {
+ err = -EBUSY;
+ goto err_remove_input;
+ }
+
+ return 0;
+
+err_remove_input:
+ intel_vbtn_input_destroy(device);
+
+ return err;
+}
+
+static int intel_vbtn_remove(struct platform_device *device)
+{
+ acpi_handle handle = ACPI_HANDLE(&device->dev);
+
+ intel_vbtn_input_destroy(device);
+ acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler);
+
+ /*
+ * Even if we failed to shut off the event stream, we can still
+ * safely detach from the device.
+ */
+ return 0;
+}
+
+static struct platform_driver intel_vbtn_pl_driver = {
+ .driver = {
+ .name = "intel-vbtn",
+ .acpi_match_table = intel_vbtn_ids,
+ },
+ .probe = intel_vbtn_probe,
+ .remove = intel_vbtn_remove,
+};
+MODULE_DEVICE_TABLE(acpi, intel_vbtn_ids);
+
+static acpi_status __init
+check_acpi_dev(acpi_handle handle, u32 lvl, void *context, void **rv)
+{
+ const struct acpi_device_id *ids = context;
+ struct acpi_device *dev;
+
+ if (acpi_bus_get_device(handle, &dev) != 0)
+ return AE_OK;
+
+ if (acpi_match_device_ids(dev, ids) == 0)
+ if (acpi_create_platform_device(dev))
+ dev_info(&dev->dev,
+ "intel-vbtn: created platform device\n");
+
+ return AE_OK;
+}
+
+static int __init intel_vbtn_init(void)
+{
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, check_acpi_dev, NULL,
+ (void *)intel_vbtn_ids, NULL);
+
+ return platform_driver_register(&intel_vbtn_pl_driver);
+}
+module_init(intel_vbtn_init);
+
+static void __exit intel_vbtn_exit(void)
+{
+ platform_driver_unregister(&intel_vbtn_pl_driver);
+}
+module_exit(intel_vbtn_exit);
diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
index 2776bec89..520b58a04 100644
--- a/drivers/platform/x86/intel_pmc_core.c
+++ b/drivers/platform/x86/intel_pmc_core.c
@@ -23,9 +23,9 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/pci.h>
-#include <linux/seq_file.h>
#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
#include <asm/pmc_core.h>
#include "intel_pmc_core.h"
@@ -77,30 +77,18 @@ int intel_pmc_slp_s0_counter_read(u32 *data)
}
EXPORT_SYMBOL_GPL(intel_pmc_slp_s0_counter_read);
-#if IS_ENABLED(CONFIG_DEBUG_FS)
-static int pmc_core_dev_state_show(struct seq_file *s, void *unused)
+static int pmc_core_dev_state_get(void *data, u64 *val)
{
- struct pmc_dev *pmcdev = s->private;
- u32 counter_val;
+ struct pmc_dev *pmcdev = data;
+ u32 value;
- counter_val = pmc_core_reg_read(pmcdev,
- SPT_PMC_SLP_S0_RES_COUNTER_OFFSET);
- seq_printf(s, "%u\n", pmc_core_adjust_slp_s0_step(counter_val));
+ value = pmc_core_reg_read(pmcdev, SPT_PMC_SLP_S0_RES_COUNTER_OFFSET);
+ *val = pmc_core_adjust_slp_s0_step(value);
return 0;
}
-static int pmc_core_dev_state_open(struct inode *inode, struct file *file)
-{
- return single_open(file, pmc_core_dev_state_show, inode->i_private);
-}
-
-static const struct file_operations pmc_core_dev_state_ops = {
- .open = pmc_core_dev_state_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_DEBUGFS_ATTRIBUTE(pmc_core_dev_state, pmc_core_dev_state_get, NULL, "%llu\n");
static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
{
@@ -112,12 +100,12 @@ static int pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
struct dentry *dir, *file;
dir = debugfs_create_dir("pmc_core", NULL);
- if (!dir)
+ if (IS_ERR_OR_NULL(dir))
return -ENOMEM;
pmcdev->dbgfs_dir = dir;
file = debugfs_create_file("slp_s0_residency_usec", S_IFREG | S_IRUGO,
- dir, pmcdev, &pmc_core_dev_state_ops);
+ dir, pmcdev, &pmc_core_dev_state);
if (!file) {
pmc_core_dbgfs_unregister(pmcdev);
@@ -126,22 +114,12 @@ static int pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
return 0;
}
-#else
-static inline int pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
-{
- return 0;
-}
-
-static inline void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
-{
-}
-#endif /* CONFIG_DEBUG_FS */
static const struct x86_cpu_id intel_pmc_core_ids[] = {
- { X86_VENDOR_INTEL, 6, 0x4e, X86_FEATURE_MWAIT,
- (kernel_ulong_t)NULL}, /* Skylake CPUID Signature */
- { X86_VENDOR_INTEL, 6, 0x5e, X86_FEATURE_MWAIT,
- (kernel_ulong_t)NULL}, /* Skylake CPUID Signature */
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_MOBILE, X86_FEATURE_MWAIT,
+ (kernel_ulong_t)NULL},
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_DESKTOP, X86_FEATURE_MWAIT,
+ (kernel_ulong_t)NULL},
{}
};
@@ -182,10 +160,8 @@ static int pmc_core_probe(struct pci_dev *dev, const struct pci_device_id *id)
}
err = pmc_core_dbgfs_register(pmcdev);
- if (err < 0) {
- dev_err(&dev->dev, "PMC Core: debugfs register failed.\n");
- return err;
- }
+ if (err < 0)
+ dev_warn(&dev->dev, "PMC Core: debugfs register failed.\n");
pmc.has_slp_s0_res = true;
return 0;
diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel_pmc_core.h
index a9dadaf78..e3f671f4d 100644
--- a/drivers/platform/x86/intel_pmc_core.h
+++ b/drivers/platform/x86/intel_pmc_core.h
@@ -23,6 +23,7 @@
/* Sunrise Point Power Management Controller PCI Device ID */
#define SPT_PMC_PCI_DEVICE_ID 0x9d21
+
#define SPT_PMC_BASE_ADDR_OFFSET 0x48
#define SPT_PMC_SLP_S0_RES_COUNTER_OFFSET 0x13c
#define SPT_PMC_MMIO_REG_LEN 0x100
@@ -42,9 +43,7 @@
struct pmc_dev {
u32 base_addr;
void __iomem *regbase;
-#if IS_ENABLED(CONFIG_DEBUG_FS)
struct dentry *dbgfs_dir;
-#endif /* CONFIG_DEBUG_FS */
bool has_slp_s0_res;
};
diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c
index 6f497e80c..b86e1bcaa 100644
--- a/drivers/platform/x86/intel_pmc_ipc.c
+++ b/drivers/platform/x86/intel_pmc_ipc.c
@@ -85,7 +85,7 @@
* platform device and to export resources for those functions.
*/
#define TCO_DEVICE_NAME "iTCO_wdt"
-#define SMI_EN_OFFSET 0x30
+#define SMI_EN_OFFSET 0x40
#define SMI_EN_SIZE 4
#define TCO_BASE_OFFSET 0x60
#define TCO_REGS_SIZE 16
@@ -94,6 +94,8 @@
#define TELEM_SSRAM_SIZE 240
#define TELEM_PMC_SSRAM_OFFSET 0x1B00
#define TELEM_PUNIT_SSRAM_OFFSET 0x1A00
+#define TCO_PMC_OFFSET 0x8
+#define TCO_PMC_SIZE 0x4
static const int iTCO_version = 3;
@@ -502,7 +504,7 @@ static struct resource tco_res[] = {
static struct itco_wdt_platform_data tco_info = {
.name = "Apollo Lake SoC",
- .version = 3,
+ .version = 5,
};
#define TELEMETRY_RESOURCE_PUNIT_SSRAM 0
@@ -572,8 +574,8 @@ static int ipc_create_tco_device(void)
res->end = res->start + SMI_EN_SIZE - 1;
res = tco_res + TCO_RESOURCE_GCR_MEM;
- res->start = ipcdev.gcr_base;
- res->end = res->start + ipcdev.gcr_size - 1;
+ res->start = ipcdev.gcr_base + TCO_PMC_OFFSET;
+ res->end = res->start + TCO_PMC_SIZE - 1;
ret = platform_device_add_resources(pdev, tco_res, ARRAY_SIZE(tco_res));
if (ret) {
diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c
index 63b371d6e..91ae58510 100644
--- a/drivers/platform/x86/intel_pmic_gpio.c
+++ b/drivers/platform/x86/intel_pmic_gpio.c
@@ -1,6 +1,8 @@
/* Moorestown PMIC GPIO (access through IPC) driver
* Copyright (c) 2008 - 2009, Intel Corporation.
*
+ * Author: Alek Du <alek.du@intel.com>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -21,7 +23,6 @@
#define pr_fmt(fmt) "%s: " fmt, __func__
-#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
@@ -322,9 +323,4 @@ static int __init platform_pmic_gpio_init(void)
{
return platform_driver_register(&platform_pmic_gpio_driver);
}
-
subsys_initcall(platform_pmic_gpio_init);
-
-MODULE_AUTHOR("Alek Du <alek.du@intel.com>");
-MODULE_DESCRIPTION("Intel Moorestown PMIC GPIO driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel_telemetry_debugfs.c b/drivers/platform/x86/intel_telemetry_debugfs.c
index f5134acd6..ef29f18b1 100644
--- a/drivers/platform/x86/intel_telemetry_debugfs.c
+++ b/drivers/platform/x86/intel_telemetry_debugfs.c
@@ -32,6 +32,7 @@
#include <linux/suspend.h>
#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
#include <asm/intel_pmc_ipc.h>
#include <asm/intel_punit_ipc.h>
#include <asm/intel_telemetry.h>
@@ -78,7 +79,7 @@
#define TELEM_EVT_LEN(x) (sizeof(x)/sizeof((x)[0]))
#define TELEM_DEBUGFS_CPU(model, data) \
- { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&data}
+ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&data}
#define TELEM_CHECK_AND_PARSE_EVTS(EVTID, EVTNUM, BUF, EVTLOG, EVTDAT, MASK) { \
if (evtlog[index].telem_evtid == (EVTID)) { \
@@ -331,7 +332,7 @@ static struct telemetry_debugfs_conf telem_apl_debugfs_conf = {
};
static const struct x86_cpu_id telemetry_debugfs_cpu_ids[] = {
- TELEM_DEBUGFS_CPU(0x5c, telem_apl_debugfs_conf),
+ TELEM_DEBUGFS_CPU(INTEL_FAM6_ATOM_GOLDMONT, telem_apl_debugfs_conf),
{}
};
diff --git a/drivers/platform/x86/intel_telemetry_pltdrv.c b/drivers/platform/x86/intel_telemetry_pltdrv.c
index 09c84a2b1..6ebdbd2b0 100644
--- a/drivers/platform/x86/intel_telemetry_pltdrv.c
+++ b/drivers/platform/x86/intel_telemetry_pltdrv.c
@@ -28,6 +28,7 @@
#include <linux/platform_device.h>
#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
#include <asm/intel_pmc_ipc.h>
#include <asm/intel_punit_ipc.h>
#include <asm/intel_telemetry.h>
@@ -82,7 +83,7 @@
#define TELEM_SET_VERBOSITY_BITS(x, y) ((x) |= ((y) << 27))
#define TELEM_CPU(model, data) \
- { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&data }
+ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&data }
enum telemetry_action {
TELEM_UPDATE = 0,
@@ -163,7 +164,7 @@ static struct telemetry_plt_config telem_apl_config = {
};
static const struct x86_cpu_id telemetry_cpu_ids[] = {
- TELEM_CPU(0x5c, telem_apl_config),
+ TELEM_CPU(INTEL_FAM6_ATOM_GOLDMONT, telem_apl_config),
{}
};
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 01e12d221..9d60a40d8 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -4,7 +4,7 @@
* Copyright (C) 2002-2004 John Belmonte
* Copyright (C) 2008 Philip Langdale
* Copyright (C) 2010 Pierre Ducroquet
- * Copyright (C) 2014-2015 Azael Avalos
+ * Copyright (C) 2014-2016 Azael Avalos
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -31,7 +31,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#define TOSHIBA_ACPI_VERSION "0.23"
+#define TOSHIBA_ACPI_VERSION "0.24"
#define PROC_INTERFACE_VERSION 1
#include <linux/kernel.h>
@@ -53,6 +53,7 @@
#include <linux/uaccess.h>
#include <linux/miscdevice.h>
#include <linux/rfkill.h>
+#include <linux/iio/iio.h>
#include <linux/toshiba.h>
#include <acpi/video.h>
@@ -134,6 +135,7 @@ MODULE_LICENSE("GPL");
/* Field definitions */
#define HCI_ACCEL_MASK 0x7fff
+#define HCI_ACCEL_DIRECTION_MASK 0x8000
#define HCI_HOTKEY_DISABLE 0x0b
#define HCI_HOTKEY_ENABLE 0x09
#define HCI_HOTKEY_SPECIAL_FUNCTIONS 0x10
@@ -178,6 +180,7 @@ struct toshiba_acpi_dev {
struct led_classdev eco_led;
struct miscdevice miscdev;
struct rfkill *wwan_rfk;
+ struct iio_dev *indio_dev;
int force_fan;
int last_key_event;
@@ -1958,28 +1961,6 @@ static ssize_t touchpad_show(struct device *dev,
}
static DEVICE_ATTR_RW(touchpad);
-static ssize_t position_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
- u32 xyval, zval, tmp;
- u16 x, y, z;
- int ret;
-
- xyval = zval = 0;
- ret = toshiba_accelerometer_get(toshiba, &xyval, &zval);
- if (ret < 0)
- return ret;
-
- x = xyval & HCI_ACCEL_MASK;
- tmp = xyval >> HCI_MISC_SHIFT;
- y = tmp & HCI_ACCEL_MASK;
- z = zval & HCI_ACCEL_MASK;
-
- return sprintf(buf, "%d %d %d\n", x, y, z);
-}
-static DEVICE_ATTR_RO(position);
-
static ssize_t usb_sleep_charge_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -2350,7 +2331,6 @@ static struct attribute *toshiba_attributes[] = {
&dev_attr_available_kbd_modes.attr,
&dev_attr_kbd_backlight_timeout.attr,
&dev_attr_touchpad.attr,
- &dev_attr_position.attr,
&dev_attr_usb_sleep_charge.attr,
&dev_attr_sleep_functions_on_battery.attr,
&dev_attr_usb_rapid_charge.attr,
@@ -2377,8 +2357,6 @@ static umode_t toshiba_sysfs_is_visible(struct kobject *kobj,
exists = (drv->kbd_mode == SCI_KBD_MODE_AUTO) ? true : false;
else if (attr == &dev_attr_touchpad.attr)
exists = (drv->touchpad_supported) ? true : false;
- else if (attr == &dev_attr_position.attr)
- exists = (drv->accelerometer_supported) ? true : false;
else if (attr == &dev_attr_usb_sleep_charge.attr)
exists = (drv->usb_sleep_charge_supported) ? true : false;
else if (attr == &dev_attr_sleep_functions_on_battery.attr)
@@ -2420,6 +2398,81 @@ static void toshiba_acpi_kbd_bl_work(struct work_struct *work)
}
/*
+ * IIO device
+ */
+
+enum toshiba_iio_accel_chan {
+ AXIS_X,
+ AXIS_Y,
+ AXIS_Z
+};
+
+static int toshiba_iio_accel_get_axis(enum toshiba_iio_accel_chan chan)
+{
+ u32 xyval, zval;
+ int ret;
+
+ ret = toshiba_accelerometer_get(toshiba_acpi, &xyval, &zval);
+ if (ret < 0)
+ return ret;
+
+ switch (chan) {
+ case AXIS_X:
+ return xyval & HCI_ACCEL_DIRECTION_MASK ?
+ -(xyval & HCI_ACCEL_MASK) : xyval & HCI_ACCEL_MASK;
+ case AXIS_Y:
+ return (xyval >> HCI_MISC_SHIFT) & HCI_ACCEL_DIRECTION_MASK ?
+ -((xyval >> HCI_MISC_SHIFT) & HCI_ACCEL_MASK) :
+ (xyval >> HCI_MISC_SHIFT) & HCI_ACCEL_MASK;
+ case AXIS_Z:
+ return zval & HCI_ACCEL_DIRECTION_MASK ?
+ -(zval & HCI_ACCEL_MASK) : zval & HCI_ACCEL_MASK;
+ }
+
+ return ret;
+}
+
+static int toshiba_iio_accel_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = toshiba_iio_accel_get_axis(chan->channel);
+ if (ret == -EIO || ret == -ENODEV)
+ return ret;
+
+ *val = ret;
+
+ return IIO_VAL_INT;
+ }
+
+ return -EINVAL;
+}
+
+#define TOSHIBA_IIO_ACCEL_CHANNEL(axis, chan) { \
+ .type = IIO_ACCEL, \
+ .modified = 1, \
+ .channel = chan, \
+ .channel2 = IIO_MOD_##axis, \
+ .output = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+}
+
+static const struct iio_chan_spec toshiba_iio_accel_channels[] = {
+ TOSHIBA_IIO_ACCEL_CHANNEL(X, AXIS_X),
+ TOSHIBA_IIO_ACCEL_CHANNEL(Y, AXIS_Y),
+ TOSHIBA_IIO_ACCEL_CHANNEL(Z, AXIS_Z),
+};
+
+static const struct iio_info toshiba_iio_accel_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &toshiba_iio_accel_read_raw,
+};
+
+/*
* Misc device
*/
static int toshiba_acpi_smm_bridge(SMMRegisters *regs)
@@ -2904,6 +2957,11 @@ static int toshiba_acpi_remove(struct acpi_device *acpi_dev)
remove_toshiba_proc_entries(dev);
+ if (dev->accelerometer_supported && dev->indio_dev) {
+ iio_device_unregister(dev->indio_dev);
+ iio_device_free(dev->indio_dev);
+ }
+
if (dev->sysfs_created)
sysfs_remove_group(&dev->acpi_dev->dev.kobj,
&toshiba_attr_group);
@@ -3051,6 +3109,30 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
dev->touchpad_supported = !ret;
toshiba_accelerometer_available(dev);
+ if (dev->accelerometer_supported) {
+ dev->indio_dev = iio_device_alloc(sizeof(*dev));
+ if (!dev->indio_dev) {
+ pr_err("Unable to allocate iio device\n");
+ goto iio_error;
+ }
+
+ pr_info("Registering Toshiba accelerometer iio device\n");
+
+ dev->indio_dev->info = &toshiba_iio_accel_info;
+ dev->indio_dev->name = "Toshiba accelerometer";
+ dev->indio_dev->dev.parent = &acpi_dev->dev;
+ dev->indio_dev->modes = INDIO_DIRECT_MODE;
+ dev->indio_dev->channels = toshiba_iio_accel_channels;
+ dev->indio_dev->num_channels =
+ ARRAY_SIZE(toshiba_iio_accel_channels);
+
+ ret = iio_device_register(dev->indio_dev);
+ if (ret < 0) {
+ pr_err("Unable to register iio device\n");
+ iio_device_free(dev->indio_dev);
+ }
+ }
+iio_error:
toshiba_usb_sleep_charge_available(dev);
diff --git a/drivers/pnp/isapnp/proc.c b/drivers/pnp/isapnp/proc.c
index 5edee645d..262285e48 100644
--- a/drivers/pnp/isapnp/proc.c
+++ b/drivers/pnp/isapnp/proc.c
@@ -21,7 +21,7 @@
#include <linux/isapnp.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
extern struct pnp_protocol isapnp_protocol;
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
index 81603d990..c38a5b973 100644
--- a/drivers/pnp/pnpbios/core.c
+++ b/drivers/pnp/pnpbios/core.c
@@ -46,7 +46,6 @@
*/
#include <linux/types.h>
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/linkage.h>
#include <linux/kernel.h>
@@ -61,6 +60,7 @@
#include <linux/delay.h>
#include <linux/acpi.h>
#include <linux/freezer.h>
+#include <linux/kmod.h>
#include <linux/kthread.h>
#include <asm/page.h>
@@ -587,6 +587,6 @@ static int __init pnpbios_thread_init(void)
}
/* Start the kernel thread later: */
-module_init(pnpbios_thread_init);
+device_initcall(pnpbios_thread_init);
EXPORT_SYMBOL(pnpbios_protocol);
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 421770dda..acd4a1524 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -309,6 +309,7 @@ config BATTERY_RX51
config CHARGER_ISP1704
tristate "ISP1704 USB Charger Detection"
depends on USB_PHY
+ depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
help
Say Y to enable support for USB Charger Detection with
ISP1707/ISP1704 USB transceivers.
@@ -393,6 +394,7 @@ config CHARGER_QCOM_SMBB
tristate "Qualcomm Switch-Mode Battery Charger and Boost"
depends on MFD_SPMI_PMIC || COMPILE_TEST
depends on OF
+ depends on EXTCON
help
Say Y to include support for the Switch-Mode Battery Charger and
Boost (SMBB) hardware found in Qualcomm PM8941 PMICs. The charger
diff --git a/drivers/power/axp20x_usb_power.c b/drivers/power/axp20x_usb_power.c
index 421a90b83..6af6feb70 100644
--- a/drivers/power/axp20x_usb_power.c
+++ b/drivers/power/axp20x_usb_power.c
@@ -42,6 +42,7 @@
#define AXP20X_VBUS_MON_VBUS_VALID BIT(3)
struct axp20x_usb_power {
+ struct device_node *np;
struct regmap *regmap;
struct power_supply *supply;
};
@@ -85,7 +86,12 @@ static int axp20x_usb_power_get_property(struct power_supply *psy,
switch (v & AXP20X_VBUS_CLIMIT_MASK) {
case AXP20X_VBUC_CLIMIT_100mA:
- val->intval = 100000;
+ if (of_device_is_compatible(power->np,
+ "x-powers,axp202-usb-power-supply")) {
+ val->intval = 100000;
+ } else {
+ val->intval = -1; /* No 100mA limit */
+ }
break;
case AXP20X_VBUC_CLIMIT_500mA:
val->intval = 500000;
@@ -122,16 +128,19 @@ static int axp20x_usb_power_get_property(struct power_supply *psy,
break;
}
- ret = regmap_read(power->regmap, AXP20X_USB_OTG_STATUS, &v);
- if (ret)
- return ret;
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
- if (!(v & AXP20X_USB_STATUS_VBUS_VALID)) {
- val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
- break;
- }
+ if (of_device_is_compatible(power->np,
+ "x-powers,axp202-usb-power-supply")) {
+ ret = regmap_read(power->regmap,
+ AXP20X_USB_OTG_STATUS, &v);
+ if (ret)
+ return ret;
- val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ if (!(v & AXP20X_USB_STATUS_VBUS_VALID))
+ val->intval =
+ POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+ }
break;
case POWER_SUPPLY_PROP_PRESENT:
val->intval = !!(input & AXP20X_PWR_STATUS_VBUS_PRESENT);
@@ -156,6 +165,14 @@ static enum power_supply_property axp20x_usb_power_properties[] = {
POWER_SUPPLY_PROP_CURRENT_NOW,
};
+static enum power_supply_property axp22x_usb_power_properties[] = {
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+};
+
static const struct power_supply_desc axp20x_usb_power_desc = {
.name = "axp20x-usb",
.type = POWER_SUPPLY_TYPE_USB,
@@ -164,13 +181,25 @@ static const struct power_supply_desc axp20x_usb_power_desc = {
.get_property = axp20x_usb_power_get_property,
};
+static const struct power_supply_desc axp22x_usb_power_desc = {
+ .name = "axp20x-usb",
+ .type = POWER_SUPPLY_TYPE_USB,
+ .properties = axp22x_usb_power_properties,
+ .num_properties = ARRAY_SIZE(axp22x_usb_power_properties),
+ .get_property = axp20x_usb_power_get_property,
+};
+
static int axp20x_usb_power_probe(struct platform_device *pdev)
{
struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
struct power_supply_config psy_cfg = {};
struct axp20x_usb_power *power;
- static const char * const irq_names[] = { "VBUS_PLUGIN",
- "VBUS_REMOVAL", "VBUS_VALID", "VBUS_NOT_VALID" };
+ static const char * const axp20x_irq_names[] = { "VBUS_PLUGIN",
+ "VBUS_REMOVAL", "VBUS_VALID", "VBUS_NOT_VALID", NULL };
+ static const char * const axp22x_irq_names[] = {
+ "VBUS_PLUGIN", "VBUS_REMOVAL", NULL };
+ static const char * const *irq_names;
+ const struct power_supply_desc *usb_power_desc;
int i, irq, ret;
if (!of_device_is_available(pdev->dev.of_node))
@@ -185,31 +214,47 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
if (!power)
return -ENOMEM;
+ power->np = pdev->dev.of_node;
power->regmap = axp20x->regmap;
- /* Enable vbus valid checking */
- ret = regmap_update_bits(power->regmap, AXP20X_VBUS_MON,
- AXP20X_VBUS_MON_VBUS_VALID, AXP20X_VBUS_MON_VBUS_VALID);
- if (ret)
- return ret;
+ if (of_device_is_compatible(power->np,
+ "x-powers,axp202-usb-power-supply")) {
+ /* Enable vbus valid checking */
+ ret = regmap_update_bits(power->regmap, AXP20X_VBUS_MON,
+ AXP20X_VBUS_MON_VBUS_VALID,
+ AXP20X_VBUS_MON_VBUS_VALID);
+ if (ret)
+ return ret;
- /* Enable vbus voltage and current measurement */
- ret = regmap_update_bits(power->regmap, AXP20X_ADC_EN1,
+ /* Enable vbus voltage and current measurement */
+ ret = regmap_update_bits(power->regmap, AXP20X_ADC_EN1,
AXP20X_ADC_EN1_VBUS_CURR | AXP20X_ADC_EN1_VBUS_VOLT,
AXP20X_ADC_EN1_VBUS_CURR | AXP20X_ADC_EN1_VBUS_VOLT);
- if (ret)
- return ret;
+ if (ret)
+ return ret;
+
+ usb_power_desc = &axp20x_usb_power_desc;
+ irq_names = axp20x_irq_names;
+ } else if (of_device_is_compatible(power->np,
+ "x-powers,axp221-usb-power-supply")) {
+ usb_power_desc = &axp22x_usb_power_desc;
+ irq_names = axp22x_irq_names;
+ } else {
+ dev_err(&pdev->dev, "Unsupported AXP variant: %ld\n",
+ axp20x->variant);
+ return -EINVAL;
+ }
psy_cfg.of_node = pdev->dev.of_node;
psy_cfg.drv_data = power;
- power->supply = devm_power_supply_register(&pdev->dev,
- &axp20x_usb_power_desc, &psy_cfg);
+ power->supply = devm_power_supply_register(&pdev->dev, usb_power_desc,
+ &psy_cfg);
if (IS_ERR(power->supply))
return PTR_ERR(power->supply);
/* Request irqs after registering, as irqs may trigger immediately */
- for (i = 0; i < ARRAY_SIZE(irq_names); i++) {
+ for (i = 0; irq_names[i]; i++) {
irq = platform_get_irq_byname(pdev, irq_names[i]);
if (irq < 0) {
dev_warn(&pdev->dev, "No IRQ for %s: %d\n",
@@ -229,6 +274,7 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
static const struct of_device_id axp20x_usb_power_match[] = {
{ .compatible = "x-powers,axp202-usb-power-supply" },
+ { .compatible = "x-powers,axp221-usb-power-supply" },
{ }
};
MODULE_DEVICE_TABLE(of, axp20x_usb_power_match);
diff --git a/drivers/power/axp288_charger.c b/drivers/power/axp288_charger.c
index e4d569f57..4030eeb7c 100644
--- a/drivers/power/axp288_charger.c
+++ b/drivers/power/axp288_charger.c
@@ -129,10 +129,6 @@
#define AXP288_EXTCON_DEV_NAME "axp288_extcon"
-#define AXP288_EXTCON_SLOW_CHARGER "SLOW-CHARGER"
-#define AXP288_EXTCON_DOWNSTREAM_CHARGER "CHARGE-DOWNSTREAM"
-#define AXP288_EXTCON_FAST_CHARGER "FAST-CHARGER"
-
enum {
VBUS_OV_IRQ = 0,
CHARGE_DONE_IRQ,
@@ -158,7 +154,7 @@ struct axp288_chrg_info {
/* OTG/Host mode */
struct {
struct work_struct work;
- struct extcon_specific_cable_nb cable;
+ struct extcon_dev *cable;
struct notifier_block id_nb;
bool id_short;
} otg;
@@ -586,17 +582,15 @@ static void axp288_charger_extcon_evt_worker(struct work_struct *work)
bool old_connected = info->cable.connected;
/* Determine cable/charger type */
- if (extcon_get_cable_state(edev, AXP288_EXTCON_SLOW_CHARGER) > 0) {
+ if (extcon_get_cable_state_(edev, EXTCON_CHG_USB_SDP) > 0) {
dev_dbg(&info->pdev->dev, "USB SDP charger is connected");
info->cable.connected = true;
info->cable.chg_type = POWER_SUPPLY_TYPE_USB;
- } else if (extcon_get_cable_state(edev,
- AXP288_EXTCON_DOWNSTREAM_CHARGER) > 0) {
+ } else if (extcon_get_cable_state_(edev, EXTCON_CHG_USB_CDP) > 0) {
dev_dbg(&info->pdev->dev, "USB CDP charger is connected");
info->cable.connected = true;
info->cable.chg_type = POWER_SUPPLY_TYPE_USB_CDP;
- } else if (extcon_get_cable_state(edev,
- AXP288_EXTCON_FAST_CHARGER) > 0) {
+ } else if (extcon_get_cable_state_(edev, EXTCON_CHG_USB_DCP) > 0) {
dev_dbg(&info->pdev->dev, "USB DCP charger is connected");
info->cable.connected = true;
info->cable.chg_type = POWER_SUPPLY_TYPE_USB_DCP;
@@ -692,8 +686,8 @@ static int axp288_charger_handle_otg_evt(struct notifier_block *nb,
{
struct axp288_chrg_info *info =
container_of(nb, struct axp288_chrg_info, otg.id_nb);
- struct extcon_dev *edev = param;
- int usb_host = extcon_get_cable_state(edev, "USB-Host");
+ struct extcon_dev *edev = info->otg.cable;
+ int usb_host = extcon_get_cable_state_(edev, EXTCON_USB_HOST);
dev_dbg(&info->pdev->dev, "external connector USB-Host is %s\n",
usb_host ? "attached" : "detached");
@@ -848,10 +842,33 @@ static int axp288_charger_probe(struct platform_device *pdev)
/* Register for extcon notification */
INIT_WORK(&info->cable.work, axp288_charger_extcon_evt_worker);
info->cable.nb.notifier_call = axp288_charger_handle_cable_evt;
- ret = extcon_register_notifier(info->cable.edev, EXTCON_NONE, &info->cable.nb);
+ ret = extcon_register_notifier(info->cable.edev, EXTCON_CHG_USB_SDP,
+ &info->cable.nb);
+ if (ret) {
+ dev_err(&info->pdev->dev,
+ "failed to register extcon notifier for SDP %d\n", ret);
+ return ret;
+ }
+
+ ret = extcon_register_notifier(info->cable.edev, EXTCON_CHG_USB_CDP,
+ &info->cable.nb);
+ if (ret) {
+ dev_err(&info->pdev->dev,
+ "failed to register extcon notifier for CDP %d\n", ret);
+ extcon_unregister_notifier(info->cable.edev,
+ EXTCON_CHG_USB_SDP, &info->cable.nb);
+ return ret;
+ }
+
+ ret = extcon_register_notifier(info->cable.edev, EXTCON_CHG_USB_DCP,
+ &info->cable.nb);
if (ret) {
dev_err(&info->pdev->dev,
- "failed to register extcon notifier %d\n", ret);
+ "failed to register extcon notifier for DCP %d\n", ret);
+ extcon_unregister_notifier(info->cable.edev,
+ EXTCON_CHG_USB_SDP, &info->cable.nb);
+ extcon_unregister_notifier(info->cable.edev,
+ EXTCON_CHG_USB_CDP, &info->cable.nb);
return ret;
}
@@ -871,14 +888,14 @@ static int axp288_charger_probe(struct platform_device *pdev)
/* Register for OTG notification */
INIT_WORK(&info->otg.work, axp288_charger_otg_evt_worker);
info->otg.id_nb.notifier_call = axp288_charger_handle_otg_evt;
- ret = extcon_register_interest(&info->otg.cable, NULL, "USB-Host",
+ ret = extcon_register_notifier(info->otg.cable, EXTCON_USB_HOST,
&info->otg.id_nb);
if (ret)
dev_warn(&pdev->dev, "failed to register otg notifier\n");
- if (info->otg.cable.edev)
- info->otg.id_short = extcon_get_cable_state(
- info->otg.cable.edev, "USB-Host");
+ if (info->otg.cable)
+ info->otg.id_short = extcon_get_cable_state_(
+ info->otg.cable, EXTCON_USB_HOST);
/* Register charger interrupts */
for (i = 0; i < CHRG_INTR_END; i++) {
@@ -905,11 +922,17 @@ static int axp288_charger_probe(struct platform_device *pdev)
return 0;
intr_reg_failed:
- if (info->otg.cable.edev)
- extcon_unregister_interest(&info->otg.cable);
+ if (info->otg.cable)
+ extcon_unregister_notifier(info->otg.cable, EXTCON_USB_HOST,
+ &info->otg.id_nb);
power_supply_unregister(info->psy_usb);
psy_reg_failed:
- extcon_unregister_notifier(info->cable.edev, EXTCON_NONE, &info->cable.nb);
+ extcon_unregister_notifier(info->cable.edev, EXTCON_CHG_USB_SDP,
+ &info->cable.nb);
+ extcon_unregister_notifier(info->cable.edev, EXTCON_CHG_USB_CDP,
+ &info->cable.nb);
+ extcon_unregister_notifier(info->cable.edev, EXTCON_CHG_USB_DCP,
+ &info->cable.nb);
return ret;
}
@@ -917,10 +940,16 @@ static int axp288_charger_remove(struct platform_device *pdev)
{
struct axp288_chrg_info *info = dev_get_drvdata(&pdev->dev);
- if (info->otg.cable.edev)
- extcon_unregister_interest(&info->otg.cable);
+ if (info->otg.cable)
+ extcon_unregister_notifier(info->otg.cable, EXTCON_USB_HOST,
+ &info->otg.id_nb);
- extcon_unregister_notifier(info->cable.edev, EXTCON_NONE, &info->cable.nb);
+ extcon_unregister_notifier(info->cable.edev, EXTCON_CHG_USB_SDP,
+ &info->cable.nb);
+ extcon_unregister_notifier(info->cable.edev, EXTCON_CHG_USB_CDP,
+ &info->cable.nb);
+ extcon_unregister_notifier(info->cable.edev, EXTCON_CHG_USB_DCP,
+ &info->cable.nb);
power_supply_unregister(info->psy_usb);
return 0;
diff --git a/drivers/power/bq27xxx_battery.c b/drivers/power/bq27xxx_battery.c
index 45f6ebf88..323d05a12 100644
--- a/drivers/power/bq27xxx_battery.c
+++ b/drivers/power/bq27xxx_battery.c
@@ -82,6 +82,7 @@
*
* These are indexes into a device's register mapping array.
*/
+
enum bq27xxx_reg_index {
BQ27XXX_REG_CTRL = 0, /* Control */
BQ27XXX_REG_TEMP, /* Temperature */
@@ -100,157 +101,144 @@ enum bq27xxx_reg_index {
BQ27XXX_REG_SOC, /* State-of-Charge */
BQ27XXX_REG_DCAP, /* Design Capacity */
BQ27XXX_REG_AP, /* Average Power */
+ BQ27XXX_REG_MAX, /* sentinel */
};
/* Register mappings */
-static u8 bq27000_regs[] = {
- 0x00, /* CONTROL */
- 0x06, /* TEMP */
- INVALID_REG_ADDR, /* INT TEMP - NA*/
- 0x08, /* VOLT */
- 0x14, /* AVG CURR */
- 0x0a, /* FLAGS */
- 0x16, /* TTE */
- 0x18, /* TTF */
- 0x1c, /* TTES */
- 0x26, /* TTECP */
- 0x0c, /* NAC */
- 0x12, /* LMD(FCC) */
- 0x2a, /* CYCT */
- 0x22, /* AE */
- 0x0b, /* SOC(RSOC) */
- 0x76, /* DCAP(ILMD) */
- 0x24, /* AP */
-};
-
-static u8 bq27010_regs[] = {
- 0x00, /* CONTROL */
- 0x06, /* TEMP */
- INVALID_REG_ADDR, /* INT TEMP - NA*/
- 0x08, /* VOLT */
- 0x14, /* AVG CURR */
- 0x0a, /* FLAGS */
- 0x16, /* TTE */
- 0x18, /* TTF */
- 0x1c, /* TTES */
- 0x26, /* TTECP */
- 0x0c, /* NAC */
- 0x12, /* LMD(FCC) */
- 0x2a, /* CYCT */
- INVALID_REG_ADDR, /* AE - NA */
- 0x0b, /* SOC(RSOC) */
- 0x76, /* DCAP(ILMD) */
- INVALID_REG_ADDR, /* AP - NA */
-};
-
-static u8 bq27500_regs[] = {
- 0x00, /* CONTROL */
- 0x06, /* TEMP */
- 0x28, /* INT TEMP */
- 0x08, /* VOLT */
- 0x14, /* AVG CURR */
- 0x0a, /* FLAGS */
- 0x16, /* TTE */
- INVALID_REG_ADDR, /* TTF - NA */
- 0x1a, /* TTES */
- INVALID_REG_ADDR, /* TTECP - NA */
- 0x0c, /* NAC */
- 0x12, /* LMD(FCC) */
- 0x2a, /* CYCT */
- INVALID_REG_ADDR, /* AE - NA */
- 0x2c, /* SOC(RSOC) */
- 0x3c, /* DCAP(ILMD) */
- INVALID_REG_ADDR, /* AP - NA */
-};
-
-static u8 bq27530_regs[] = {
- 0x00, /* CONTROL */
- 0x06, /* TEMP */
- 0x32, /* INT TEMP */
- 0x08, /* VOLT */
- 0x14, /* AVG CURR */
- 0x0a, /* FLAGS */
- 0x16, /* TTE */
- INVALID_REG_ADDR, /* TTF - NA */
- INVALID_REG_ADDR, /* TTES - NA */
- INVALID_REG_ADDR, /* TTECP - NA */
- 0x0c, /* NAC */
- 0x12, /* LMD(FCC) */
- 0x2a, /* CYCT */
- INVALID_REG_ADDR, /* AE - NA */
- 0x2c, /* SOC(RSOC) */
- INVALID_REG_ADDR, /* DCAP - NA */
- 0x24, /* AP */
-};
-
-static u8 bq27541_regs[] = {
- 0x00, /* CONTROL */
- 0x06, /* TEMP */
- 0x28, /* INT TEMP */
- 0x08, /* VOLT */
- 0x14, /* AVG CURR */
- 0x0a, /* FLAGS */
- 0x16, /* TTE */
- INVALID_REG_ADDR, /* TTF - NA */
- INVALID_REG_ADDR, /* TTES - NA */
- INVALID_REG_ADDR, /* TTECP - NA */
- 0x0c, /* NAC */
- 0x12, /* LMD(FCC) */
- 0x2a, /* CYCT */
- INVALID_REG_ADDR, /* AE - NA */
- 0x2c, /* SOC(RSOC) */
- 0x3c, /* DCAP */
- 0x24, /* AP */
-};
-
-static u8 bq27545_regs[] = {
- 0x00, /* CONTROL */
- 0x06, /* TEMP */
- 0x28, /* INT TEMP */
- 0x08, /* VOLT */
- 0x14, /* AVG CURR */
- 0x0a, /* FLAGS */
- 0x16, /* TTE */
- INVALID_REG_ADDR, /* TTF - NA */
- INVALID_REG_ADDR, /* TTES - NA */
- INVALID_REG_ADDR, /* TTECP - NA */
- 0x0c, /* NAC */
- 0x12, /* LMD(FCC) */
- 0x2a, /* CYCT */
- INVALID_REG_ADDR, /* AE - NA */
- 0x2c, /* SOC(RSOC) */
- INVALID_REG_ADDR, /* DCAP - NA */
- 0x24, /* AP */
-};
-
-static u8 bq27421_regs[] = {
- 0x00, /* CONTROL */
- 0x02, /* TEMP */
- 0x1e, /* INT TEMP */
- 0x04, /* VOLT */
- 0x10, /* AVG CURR */
- 0x06, /* FLAGS */
- INVALID_REG_ADDR, /* TTE - NA */
- INVALID_REG_ADDR, /* TTF - NA */
- INVALID_REG_ADDR, /* TTES - NA */
- INVALID_REG_ADDR, /* TTECP - NA */
- 0x08, /* NAC */
- 0x0e, /* FCC */
- INVALID_REG_ADDR, /* CYCT - NA */
- INVALID_REG_ADDR, /* AE - NA */
- 0x1c, /* SOC */
- 0x3c, /* DCAP */
- 0x18, /* AP */
-};
-
-static u8 *bq27xxx_regs[] = {
- [BQ27000] = bq27000_regs,
- [BQ27010] = bq27010_regs,
- [BQ27500] = bq27500_regs,
- [BQ27530] = bq27530_regs,
- [BQ27541] = bq27541_regs,
- [BQ27545] = bq27545_regs,
- [BQ27421] = bq27421_regs,
+static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = {
+ [BQ27000] = {
+ [BQ27XXX_REG_CTRL] = 0x00,
+ [BQ27XXX_REG_TEMP] = 0x06,
+ [BQ27XXX_REG_INT_TEMP] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_VOLT] = 0x08,
+ [BQ27XXX_REG_AI] = 0x14,
+ [BQ27XXX_REG_FLAGS] = 0x0a,
+ [BQ27XXX_REG_TTE] = 0x16,
+ [BQ27XXX_REG_TTF] = 0x18,
+ [BQ27XXX_REG_TTES] = 0x1c,
+ [BQ27XXX_REG_TTECP] = 0x26,
+ [BQ27XXX_REG_NAC] = 0x0c,
+ [BQ27XXX_REG_FCC] = 0x12,
+ [BQ27XXX_REG_CYCT] = 0x2a,
+ [BQ27XXX_REG_AE] = 0x22,
+ [BQ27XXX_REG_SOC] = 0x0b,
+ [BQ27XXX_REG_DCAP] = 0x76,
+ [BQ27XXX_REG_AP] = 0x24,
+ },
+ [BQ27010] = {
+ [BQ27XXX_REG_CTRL] = 0x00,
+ [BQ27XXX_REG_TEMP] = 0x06,
+ [BQ27XXX_REG_INT_TEMP] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_VOLT] = 0x08,
+ [BQ27XXX_REG_AI] = 0x14,
+ [BQ27XXX_REG_FLAGS] = 0x0a,
+ [BQ27XXX_REG_TTE] = 0x16,
+ [BQ27XXX_REG_TTF] = 0x18,
+ [BQ27XXX_REG_TTES] = 0x1c,
+ [BQ27XXX_REG_TTECP] = 0x26,
+ [BQ27XXX_REG_NAC] = 0x0c,
+ [BQ27XXX_REG_FCC] = 0x12,
+ [BQ27XXX_REG_CYCT] = 0x2a,
+ [BQ27XXX_REG_AE] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_SOC] = 0x0b,
+ [BQ27XXX_REG_DCAP] = 0x76,
+ [BQ27XXX_REG_AP] = INVALID_REG_ADDR,
+ },
+ [BQ27500] = {
+ [BQ27XXX_REG_CTRL] = 0x00,
+ [BQ27XXX_REG_TEMP] = 0x06,
+ [BQ27XXX_REG_INT_TEMP] = 0x28,
+ [BQ27XXX_REG_VOLT] = 0x08,
+ [BQ27XXX_REG_AI] = 0x14,
+ [BQ27XXX_REG_FLAGS] = 0x0a,
+ [BQ27XXX_REG_TTE] = 0x16,
+ [BQ27XXX_REG_TTF] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_TTES] = 0x1a,
+ [BQ27XXX_REG_TTECP] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_NAC] = 0x0c,
+ [BQ27XXX_REG_FCC] = 0x12,
+ [BQ27XXX_REG_CYCT] = 0x2a,
+ [BQ27XXX_REG_AE] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_SOC] = 0x2c,
+ [BQ27XXX_REG_DCAP] = 0x3c,
+ [BQ27XXX_REG_AP] = INVALID_REG_ADDR,
+ },
+ [BQ27530] = {
+ [BQ27XXX_REG_CTRL] = 0x00,
+ [BQ27XXX_REG_TEMP] = 0x06,
+ [BQ27XXX_REG_INT_TEMP] = 0x32,
+ [BQ27XXX_REG_VOLT] = 0x08,
+ [BQ27XXX_REG_AI] = 0x14,
+ [BQ27XXX_REG_FLAGS] = 0x0a,
+ [BQ27XXX_REG_TTE] = 0x16,
+ [BQ27XXX_REG_TTF] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_TTES] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_TTECP] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_NAC] = 0x0c,
+ [BQ27XXX_REG_FCC] = 0x12,
+ [BQ27XXX_REG_CYCT] = 0x2a,
+ [BQ27XXX_REG_AE] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_SOC] = 0x2c,
+ [BQ27XXX_REG_DCAP] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_AP] = 0x24,
+ },
+ [BQ27541] = {
+ [BQ27XXX_REG_CTRL] = 0x00,
+ [BQ27XXX_REG_TEMP] = 0x06,
+ [BQ27XXX_REG_INT_TEMP] = 0x28,
+ [BQ27XXX_REG_VOLT] = 0x08,
+ [BQ27XXX_REG_AI] = 0x14,
+ [BQ27XXX_REG_FLAGS] = 0x0a,
+ [BQ27XXX_REG_TTE] = 0x16,
+ [BQ27XXX_REG_TTF] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_TTES] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_TTECP] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_NAC] = 0x0c,
+ [BQ27XXX_REG_FCC] = 0x12,
+ [BQ27XXX_REG_CYCT] = 0x2a,
+ [BQ27XXX_REG_AE] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_SOC] = 0x2c,
+ [BQ27XXX_REG_DCAP] = 0x3c,
+ [BQ27XXX_REG_AP] = 0x24,
+ },
+ [BQ27545] = {
+ [BQ27XXX_REG_CTRL] = 0x00,
+ [BQ27XXX_REG_TEMP] = 0x06,
+ [BQ27XXX_REG_INT_TEMP] = 0x28,
+ [BQ27XXX_REG_VOLT] = 0x08,
+ [BQ27XXX_REG_AI] = 0x14,
+ [BQ27XXX_REG_FLAGS] = 0x0a,
+ [BQ27XXX_REG_TTE] = 0x16,
+ [BQ27XXX_REG_TTF] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_TTES] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_TTECP] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_NAC] = 0x0c,
+ [BQ27XXX_REG_FCC] = 0x12,
+ [BQ27XXX_REG_CYCT] = 0x2a,
+ [BQ27XXX_REG_AE] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_SOC] = 0x2c,
+ [BQ27XXX_REG_DCAP] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_AP] = 0x24,
+ },
+ [BQ27421] = {
+ [BQ27XXX_REG_CTRL] = 0x00,
+ [BQ27XXX_REG_TEMP] = 0x02,
+ [BQ27XXX_REG_INT_TEMP] = 0x1e,
+ [BQ27XXX_REG_VOLT] = 0x04,
+ [BQ27XXX_REG_AI] = 0x10,
+ [BQ27XXX_REG_FLAGS] = 0x06,
+ [BQ27XXX_REG_TTE] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_TTF] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_TTES] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_TTECP] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_NAC] = 0x08,
+ [BQ27XXX_REG_FCC] = 0x0e,
+ [BQ27XXX_REG_CYCT] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_AE] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_SOC] = 0x1c,
+ [BQ27XXX_REG_DCAP] = 0x3c,
+ [BQ27XXX_REG_AP] = 0x18,
+ },
};
static enum power_supply_property bq27000_battery_props[] = {
@@ -735,11 +723,8 @@ static void bq27xxx_battery_poll(struct work_struct *work)
bq27xxx_battery_update(di);
- if (poll_interval > 0) {
- /* The timer does not have to be accurate. */
- set_timer_slack(&di->work.timer, poll_interval * HZ / 4);
+ if (poll_interval > 0)
schedule_delayed_work(&di->work, poll_interval * HZ);
- }
}
/*
diff --git a/drivers/power/bq27xxx_battery_i2c.c b/drivers/power/bq27xxx_battery_i2c.c
index b8f8d3ade..85d4ea2a9 100644
--- a/drivers/power/bq27xxx_battery_i2c.c
+++ b/drivers/power/bq27xxx_battery_i2c.c
@@ -1,5 +1,5 @@
/*
- * SCI Reset driver for Keystone based devices
+ * BQ27xxx battery monitor I2C driver
*
* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
* Andrew F. Davis <afd@ti.com>
diff --git a/drivers/power/max8903_charger.c b/drivers/power/max8903_charger.c
index 17876caf3..fdc73d686 100644
--- a/drivers/power/max8903_charger.c
+++ b/drivers/power/max8903_charger.c
@@ -23,13 +23,16 @@
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
#include <linux/slab.h>
#include <linux/power_supply.h>
#include <linux/platform_device.h>
#include <linux/power/max8903_charger.h>
struct max8903_data {
- struct max8903_pdata pdata;
+ struct max8903_pdata *pdata;
struct device *dev;
struct power_supply *psy;
struct power_supply_desc psy_desc;
@@ -53,8 +56,8 @@ static int max8903_get_property(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
- if (data->pdata.chg) {
- if (gpio_get_value(data->pdata.chg) == 0)
+ if (gpio_is_valid(data->pdata->chg)) {
+ if (gpio_get_value(data->pdata->chg) == 0)
val->intval = POWER_SUPPLY_STATUS_CHARGING;
else if (data->usb_in || data->ta_in)
val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
@@ -75,13 +78,14 @@ static int max8903_get_property(struct power_supply *psy,
default:
return -EINVAL;
}
+
return 0;
}
static irqreturn_t max8903_dcin(int irq, void *_data)
{
struct max8903_data *data = _data;
- struct max8903_pdata *pdata = &data->pdata;
+ struct max8903_pdata *pdata = data->pdata;
bool ta_in;
enum power_supply_type old_type;
@@ -93,11 +97,11 @@ static irqreturn_t max8903_dcin(int irq, void *_data)
data->ta_in = ta_in;
/* Set Current-Limit-Mode 1:DC 0:USB */
- if (pdata->dcm)
+ if (gpio_is_valid(pdata->dcm))
gpio_set_value(pdata->dcm, ta_in ? 1 : 0);
/* Charger Enable / Disable (cen is negated) */
- if (pdata->cen)
+ if (gpio_is_valid(pdata->cen))
gpio_set_value(pdata->cen, ta_in ? 0 :
(data->usb_in ? 0 : 1));
@@ -122,7 +126,7 @@ static irqreturn_t max8903_dcin(int irq, void *_data)
static irqreturn_t max8903_usbin(int irq, void *_data)
{
struct max8903_data *data = _data;
- struct max8903_pdata *pdata = &data->pdata;
+ struct max8903_pdata *pdata = data->pdata;
bool usb_in;
enum power_supply_type old_type;
@@ -136,7 +140,7 @@ static irqreturn_t max8903_usbin(int irq, void *_data)
/* Do not touch Current-Limit-Mode */
/* Charger Enable / Disable (cen is negated) */
- if (pdata->cen)
+ if (gpio_is_valid(pdata->cen))
gpio_set_value(pdata->cen, usb_in ? 0 :
(data->ta_in ? 0 : 1));
@@ -161,7 +165,7 @@ static irqreturn_t max8903_usbin(int irq, void *_data)
static irqreturn_t max8903_fault(int irq, void *_data)
{
struct max8903_data *data = _data;
- struct max8903_pdata *pdata = &data->pdata;
+ struct max8903_pdata *pdata = data->pdata;
bool fault;
fault = gpio_get_value(pdata->flt) ? false : true;
@@ -179,57 +183,109 @@ static irqreturn_t max8903_fault(int irq, void *_data)
return IRQ_HANDLED;
}
-static int max8903_probe(struct platform_device *pdev)
+static struct max8903_pdata *max8903_parse_dt_data(struct device *dev)
{
- struct max8903_data *data;
+ struct device_node *np = dev->of_node;
+ struct max8903_pdata *pdata = NULL;
+
+ if (!np)
+ return NULL;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ pdata->dc_valid = false;
+ pdata->usb_valid = false;
+
+ pdata->cen = of_get_named_gpio(np, "cen-gpios", 0);
+ if (!gpio_is_valid(pdata->cen))
+ pdata->cen = -EINVAL;
+
+ pdata->chg = of_get_named_gpio(np, "chg-gpios", 0);
+ if (!gpio_is_valid(pdata->chg))
+ pdata->chg = -EINVAL;
+
+ pdata->flt = of_get_named_gpio(np, "flt-gpios", 0);
+ if (!gpio_is_valid(pdata->flt))
+ pdata->flt = -EINVAL;
+
+ pdata->usus = of_get_named_gpio(np, "usus-gpios", 0);
+ if (!gpio_is_valid(pdata->usus))
+ pdata->usus = -EINVAL;
+
+ pdata->dcm = of_get_named_gpio(np, "dcm-gpios", 0);
+ if (!gpio_is_valid(pdata->dcm))
+ pdata->dcm = -EINVAL;
+
+ pdata->dok = of_get_named_gpio(np, "dok-gpios", 0);
+ if (!gpio_is_valid(pdata->dok))
+ pdata->dok = -EINVAL;
+ else
+ pdata->dc_valid = true;
+
+ pdata->uok = of_get_named_gpio(np, "uok-gpios", 0);
+ if (!gpio_is_valid(pdata->uok))
+ pdata->uok = -EINVAL;
+ else
+ pdata->usb_valid = true;
+
+ return pdata;
+}
+
+static int max8903_setup_gpios(struct platform_device *pdev)
+{
+ struct max8903_data *data = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
struct max8903_pdata *pdata = pdev->dev.platform_data;
- struct power_supply_config psy_cfg = {};
int ret = 0;
int gpio;
int ta_in = 0;
int usb_in = 0;
- data = devm_kzalloc(dev, sizeof(struct max8903_data), GFP_KERNEL);
- if (data == NULL) {
- dev_err(dev, "Cannot allocate memory.\n");
- return -ENOMEM;
- }
- memcpy(&data->pdata, pdata, sizeof(struct max8903_pdata));
- data->dev = dev;
- platform_set_drvdata(pdev, data);
-
- if (pdata->dc_valid == false && pdata->usb_valid == false) {
- dev_err(dev, "No valid power sources.\n");
- return -EINVAL;
- }
-
if (pdata->dc_valid) {
- if (pdata->dok && gpio_is_valid(pdata->dok) &&
- pdata->dcm && gpio_is_valid(pdata->dcm)) {
+ if (gpio_is_valid(pdata->dok)) {
+ ret = devm_gpio_request(dev, pdata->dok,
+ data->psy_desc.name);
+ if (ret) {
+ dev_err(dev,
+ "Failed GPIO request for dok: %d err %d\n",
+ pdata->dok, ret);
+ return ret;
+ }
+
gpio = pdata->dok; /* PULL_UPed Interrupt */
ta_in = gpio_get_value(gpio) ? 0 : 1;
-
- gpio = pdata->dcm; /* Output */
- gpio_set_value(gpio, ta_in);
} else {
- dev_err(dev, "When DC is wired, DOK and DCM should"
- " be wired as well.\n");
+ dev_err(dev, "When DC is wired, DOK should be wired as well.\n");
return -EINVAL;
}
- } else {
- if (pdata->dcm) {
- if (gpio_is_valid(pdata->dcm))
- gpio_set_value(pdata->dcm, 0);
- else {
- dev_err(dev, "Invalid pin: dcm.\n");
- return -EINVAL;
- }
+ }
+
+ if (gpio_is_valid(pdata->dcm)) {
+ ret = devm_gpio_request(dev, pdata->dcm, data->psy_desc.name);
+ if (ret) {
+ dev_err(dev,
+ "Failed GPIO request for dcm: %d err %d\n",
+ pdata->dcm, ret);
+ return ret;
}
+
+ gpio = pdata->dcm; /* Output */
+ gpio_set_value(gpio, ta_in);
}
if (pdata->usb_valid) {
- if (pdata->uok && gpio_is_valid(pdata->uok)) {
+ if (gpio_is_valid(pdata->uok)) {
+ ret = devm_gpio_request(dev, pdata->uok,
+ data->psy_desc.name);
+ if (ret) {
+ dev_err(dev,
+ "Failed GPIO request for uok: %d err %d\n",
+ pdata->uok, ret);
+ return ret;
+ }
+
gpio = pdata->uok;
usb_in = gpio_get_value(gpio) ? 0 : 1;
} else {
@@ -239,33 +295,45 @@ static int max8903_probe(struct platform_device *pdev)
}
}
- if (pdata->cen) {
- if (gpio_is_valid(pdata->cen)) {
- gpio_set_value(pdata->cen, (ta_in || usb_in) ? 0 : 1);
- } else {
- dev_err(dev, "Invalid pin: cen.\n");
- return -EINVAL;
+ if (gpio_is_valid(pdata->cen)) {
+ ret = devm_gpio_request(dev, pdata->cen, data->psy_desc.name);
+ if (ret) {
+ dev_err(dev,
+ "Failed GPIO request for cen: %d err %d\n",
+ pdata->cen, ret);
+ return ret;
}
+
+ gpio_set_value(pdata->cen, (ta_in || usb_in) ? 0 : 1);
}
- if (pdata->chg) {
- if (!gpio_is_valid(pdata->chg)) {
- dev_err(dev, "Invalid pin: chg.\n");
- return -EINVAL;
+ if (gpio_is_valid(pdata->chg)) {
+ ret = devm_gpio_request(dev, pdata->chg, data->psy_desc.name);
+ if (ret) {
+ dev_err(dev,
+ "Failed GPIO request for chg: %d err %d\n",
+ pdata->chg, ret);
+ return ret;
}
}
- if (pdata->flt) {
- if (!gpio_is_valid(pdata->flt)) {
- dev_err(dev, "Invalid pin: flt.\n");
- return -EINVAL;
+ if (gpio_is_valid(pdata->flt)) {
+ ret = devm_gpio_request(dev, pdata->flt, data->psy_desc.name);
+ if (ret) {
+ dev_err(dev,
+ "Failed GPIO request for flt: %d err %d\n",
+ pdata->flt, ret);
+ return ret;
}
}
- if (pdata->usus) {
- if (!gpio_is_valid(pdata->usus)) {
- dev_err(dev, "Invalid pin: usus.\n");
- return -EINVAL;
+ if (gpio_is_valid(pdata->usus)) {
+ ret = devm_gpio_request(dev, pdata->usus, data->psy_desc.name);
+ if (ret) {
+ dev_err(dev,
+ "Failed GPIO request for usus: %d err %d\n",
+ pdata->usus, ret);
+ return ret;
}
}
@@ -273,14 +341,52 @@ static int max8903_probe(struct platform_device *pdev)
data->ta_in = ta_in;
data->usb_in = usb_in;
+ return 0;
+}
+
+static int max8903_probe(struct platform_device *pdev)
+{
+ struct max8903_data *data;
+ struct device *dev = &pdev->dev;
+ struct max8903_pdata *pdata = pdev->dev.platform_data;
+ struct power_supply_config psy_cfg = {};
+ int ret = 0;
+
+ data = devm_kzalloc(dev, sizeof(struct max8903_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ if (IS_ENABLED(CONFIG_OF) && !pdata && dev->of_node)
+ pdata = max8903_parse_dt_data(dev);
+
+ if (!pdata) {
+ dev_err(dev, "No platform data.\n");
+ return -EINVAL;
+ }
+
+ pdev->dev.platform_data = pdata;
+ data->pdata = pdata;
+ data->dev = dev;
+ platform_set_drvdata(pdev, data);
+
+ if (pdata->dc_valid == false && pdata->usb_valid == false) {
+ dev_err(dev, "No valid power sources.\n");
+ return -EINVAL;
+ }
+
+ ret = max8903_setup_gpios(pdev);
+ if (ret)
+ return ret;
+
data->psy_desc.name = "max8903_charger";
- data->psy_desc.type = (ta_in) ? POWER_SUPPLY_TYPE_MAINS :
- ((usb_in) ? POWER_SUPPLY_TYPE_USB :
+ data->psy_desc.type = (data->ta_in) ? POWER_SUPPLY_TYPE_MAINS :
+ ((data->usb_in) ? POWER_SUPPLY_TYPE_USB :
POWER_SUPPLY_TYPE_BATTERY);
data->psy_desc.get_property = max8903_get_property;
data->psy_desc.properties = max8903_charger_props;
data->psy_desc.num_properties = ARRAY_SIZE(max8903_charger_props);
+ psy_cfg.of_node = dev->of_node;
psy_cfg.drv_data = data;
data->psy = devm_power_supply_register(dev, &data->psy_desc, &psy_cfg);
@@ -315,7 +421,7 @@ static int max8903_probe(struct platform_device *pdev)
}
}
- if (pdata->flt) {
+ if (gpio_is_valid(pdata->flt)) {
ret = devm_request_threaded_irq(dev, gpio_to_irq(pdata->flt),
NULL, max8903_fault,
IRQF_TRIGGER_FALLING |
@@ -331,10 +437,17 @@ static int max8903_probe(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id max8903_match_ids[] = {
+ { .compatible = "maxim,max8903", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, max8903_match_ids);
+
static struct platform_driver max8903_driver = {
.probe = max8903_probe,
.driver = {
.name = "max8903-charger",
+ .of_match_table = max8903_match_ids
},
};
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index b13cd074c..a74d8ca38 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -491,8 +491,11 @@ int power_supply_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
- if (atomic_read(&psy->use_cnt) <= 0)
+ if (atomic_read(&psy->use_cnt) <= 0) {
+ if (!psy->initialized)
+ return -EAGAIN;
return -ENODEV;
+ }
return psy->desc->get_property(psy, psp, val);
}
@@ -785,6 +788,7 @@ __power_supply_register(struct device *parent,
* after calling power_supply_register()).
*/
atomic_inc(&psy->use_cnt);
+ psy->initialized = true;
queue_delayed_work(system_power_efficient_wq,
&psy->deferred_register_work,
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 80fed9883..bcde8d134 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -83,7 +83,7 @@ static ssize_t power_supply_show_property(struct device *dev,
if (ret == -ENODATA)
dev_dbg(dev, "driver has no data for `%s' property\n",
attr->attr.name);
- else if (ret != -ENODEV)
+ else if (ret != -ENODEV && ret != -EAGAIN)
dev_err(dev, "driver failed to report `%s' property: %zd\n",
attr->attr.name, ret);
return ret;
diff --git a/drivers/power/qcom_smbb.c b/drivers/power/qcom_smbb.c
index 5eb1e9e54..b5896ba2a 100644
--- a/drivers/power/qcom_smbb.c
+++ b/drivers/power/qcom_smbb.c
@@ -34,6 +34,7 @@
#include <linux/power_supply.h>
#include <linux/regmap.h>
#include <linux/slab.h>
+#include <linux/extcon.h>
#define SMBB_CHG_VMAX 0x040
#define SMBB_CHG_VSAFE 0x041
@@ -111,6 +112,7 @@ struct smbb_charger {
unsigned int revision;
unsigned int addr;
struct device *dev;
+ struct extcon_dev *edev;
bool dc_disabled;
bool jeita_ext_temp;
@@ -125,6 +127,11 @@ struct smbb_charger {
struct regmap *regmap;
};
+static const unsigned int smbb_usb_extcon_cable[] = {
+ EXTCON_USB,
+ EXTCON_NONE,
+};
+
static int smbb_vbat_weak_fn(unsigned int index)
{
return 2100000 + index * 100000;
@@ -371,6 +378,8 @@ static irqreturn_t smbb_usb_valid_handler(int irq, void *_data)
struct smbb_charger *chg = _data;
smbb_set_line_flag(chg, irq, STATUS_USBIN_VALID);
+ extcon_set_cable_state_(chg->edev, EXTCON_USB,
+ chg->status & STATUS_USBIN_VALID);
power_supply_changed(chg->usb_psy);
return IRQ_HANDLED;
@@ -849,6 +858,18 @@ static int smbb_charger_probe(struct platform_device *pdev)
return PTR_ERR(chg->usb_psy);
}
+ chg->edev = devm_extcon_dev_allocate(&pdev->dev, smbb_usb_extcon_cable);
+ if (IS_ERR(chg->edev)) {
+ dev_err(&pdev->dev, "failed to allocate extcon device\n");
+ return -ENOMEM;
+ }
+
+ rc = devm_extcon_dev_register(&pdev->dev, chg->edev);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "failed to register extcon device\n");
+ return rc;
+ }
+
if (!chg->dc_disabled) {
dc_cfg.drv_data = chg;
dc_cfg.supplied_to = smbb_bif;
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index 9bb2622c2..c74c3f67b 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -46,6 +46,16 @@ config POWER_RESET_AXXIA
Say Y if you have an Axxia family SoC.
+config POWER_RESET_BRCMKONA
+ bool "Broadcom Kona reset driver"
+ depends on ARM || COMPILE_TEST
+ default ARCH_BCM_MOBILE
+ help
+ This driver provides restart support for Broadcom Kona chips.
+
+ Say Y here if you have a Broadcom Kona-based board and you wish
+ to have restart support.
+
config POWER_RESET_BRCMSTB
bool "Broadcom STB reset driver"
depends on ARM || MIPS || COMPILE_TEST
@@ -148,7 +158,8 @@ config POWER_RESET_XGENE
config POWER_RESET_KEYSTONE
bool "Keystone reset driver"
- depends on ARCH_KEYSTONE
+ depends on ARCH_KEYSTONE || COMPILE_TEST
+ depends on HAS_IOMEM
select MFD_SYSCON
help
Reboot support for the KEYSTONE SoCs.
@@ -183,5 +194,19 @@ config POWER_RESET_ZX
help
Reboot support for ZTE SoCs.
+config REBOOT_MODE
+ tristate
+
+config SYSCON_REBOOT_MODE
+ tristate "Generic SYSCON regmap reboot mode driver"
+ depends on OF
+ depends on MFD_SYSCON
+ select REBOOT_MODE
+ help
+ Say y here will enable reboot mode driver. This will
+ get reboot mode arguments and store it in SYSCON mapped
+ register, then the bootloader can read it to take different
+ action according to the mode.
+
endif
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index ab7aa8614..1be307c7f 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_POWER_RESET_AT91_POWEROFF) += at91-poweroff.o
obj-$(CONFIG_POWER_RESET_AT91_RESET) += at91-reset.o
obj-$(CONFIG_POWER_RESET_AT91_SAMA5D2_SHDWC) += at91-sama5d2_shdwc.o
obj-$(CONFIG_POWER_RESET_AXXIA) += axxia-reset.o
+obj-$(CONFIG_POWER_RESET_BRCMKONA) += brcm-kona-reset.o
obj-$(CONFIG_POWER_RESET_BRCMSTB) += brcmstb-reboot.o
obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o
obj-$(CONFIG_POWER_RESET_GPIO_RESTART) += gpio-restart.o
@@ -21,3 +22,5 @@ obj-$(CONFIG_POWER_RESET_SYSCON) += syscon-reboot.o
obj-$(CONFIG_POWER_RESET_SYSCON_POWEROFF) += syscon-poweroff.o
obj-$(CONFIG_POWER_RESET_RMOBILE) += rmobile-reset.o
obj-$(CONFIG_POWER_RESET_ZX) += zx-reboot.o
+obj-$(CONFIG_REBOOT_MODE) += reboot-mode.o
+obj-$(CONFIG_SYSCON_REBOOT_MODE) += syscon-reboot-mode.o
diff --git a/drivers/power/reset/brcm-kona-reset.c b/drivers/power/reset/brcm-kona-reset.c
new file mode 100644
index 000000000..8eaa959d8
--- /dev/null
+++ b/drivers/power/reset/brcm-kona-reset.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/reboot.h>
+
+#define RSTMGR_REG_WR_ACCESS_OFFSET 0
+#define RSTMGR_REG_CHIP_SOFT_RST_OFFSET 4
+
+#define RSTMGR_WR_PASSWORD 0xa5a5
+#define RSTMGR_WR_PASSWORD_SHIFT 8
+#define RSTMGR_WR_ACCESS_ENABLE 1
+
+static void __iomem *kona_reset_base;
+
+static int kona_reset_handler(struct notifier_block *this,
+ unsigned long mode, void *cmd)
+{
+ /*
+ * A soft reset is triggered by writing a 0 to bit 0 of the soft reset
+ * register. To write to that register we must first write the password
+ * and the enable bit in the write access enable register.
+ */
+ writel((RSTMGR_WR_PASSWORD << RSTMGR_WR_PASSWORD_SHIFT) |
+ RSTMGR_WR_ACCESS_ENABLE,
+ kona_reset_base + RSTMGR_REG_WR_ACCESS_OFFSET);
+ writel(0, kona_reset_base + RSTMGR_REG_CHIP_SOFT_RST_OFFSET);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block kona_reset_nb = {
+ .notifier_call = kona_reset_handler,
+ .priority = 128,
+};
+
+static int kona_reset_probe(struct platform_device *pdev)
+{
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ kona_reset_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(kona_reset_base))
+ return PTR_ERR(kona_reset_base);
+
+ return register_restart_handler(&kona_reset_nb);
+}
+
+static const struct of_device_id of_match[] = {
+ { .compatible = "brcm,bcm21664-resetmgr" },
+ {},
+};
+
+static struct platform_driver bcm_kona_reset_driver = {
+ .probe = kona_reset_probe,
+ .driver = {
+ .name = "brcm-kona-reset",
+ .of_match_table = of_match,
+ },
+};
+
+builtin_platform_driver(bcm_kona_reset_driver);
diff --git a/drivers/power/reset/reboot-mode.c b/drivers/power/reset/reboot-mode.c
new file mode 100644
index 000000000..2dfbbce0f
--- /dev/null
+++ b/drivers/power/reset/reboot-mode.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2016, Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/reboot.h>
+#include "reboot-mode.h"
+
+#define PREFIX "mode-"
+
+struct mode_info {
+ const char *mode;
+ u32 magic;
+ struct list_head list;
+};
+
+static unsigned int get_reboot_mode_magic(struct reboot_mode_driver *reboot,
+ const char *cmd)
+{
+ const char *normal = "normal";
+ int magic = 0;
+ struct mode_info *info;
+
+ if (!cmd)
+ cmd = normal;
+
+ list_for_each_entry(info, &reboot->head, list) {
+ if (!strcmp(info->mode, cmd)) {
+ magic = info->magic;
+ break;
+ }
+ }
+
+ return magic;
+}
+
+static int reboot_mode_notify(struct notifier_block *this,
+ unsigned long mode, void *cmd)
+{
+ struct reboot_mode_driver *reboot;
+ unsigned int magic;
+
+ reboot = container_of(this, struct reboot_mode_driver, reboot_notifier);
+ magic = get_reboot_mode_magic(reboot, cmd);
+ if (magic)
+ reboot->write(reboot, magic);
+
+ return NOTIFY_DONE;
+}
+
+/**
+ * reboot_mode_register - register a reboot mode driver
+ * @reboot: reboot mode driver
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int reboot_mode_register(struct reboot_mode_driver *reboot)
+{
+ struct mode_info *info;
+ struct property *prop;
+ struct device_node *np = reboot->dev->of_node;
+ size_t len = strlen(PREFIX);
+ int ret;
+
+ INIT_LIST_HEAD(&reboot->head);
+
+ for_each_property_of_node(np, prop) {
+ if (strncmp(prop->name, PREFIX, len))
+ continue;
+
+ info = devm_kzalloc(reboot->dev, sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ if (of_property_read_u32(np, prop->name, &info->magic)) {
+ dev_err(reboot->dev, "reboot mode %s without magic number\n",
+ info->mode);
+ devm_kfree(reboot->dev, info);
+ continue;
+ }
+
+ info->mode = kstrdup_const(prop->name + len, GFP_KERNEL);
+ if (!info->mode) {
+ ret = -ENOMEM;
+ goto error;
+ } else if (info->mode[0] == '\0') {
+ kfree_const(info->mode);
+ ret = -EINVAL;
+ dev_err(reboot->dev, "invalid mode name(%s): too short!\n",
+ prop->name);
+ goto error;
+ }
+
+ list_add_tail(&info->list, &reboot->head);
+ }
+
+ reboot->reboot_notifier.notifier_call = reboot_mode_notify;
+ register_reboot_notifier(&reboot->reboot_notifier);
+
+ return 0;
+
+error:
+ list_for_each_entry(info, &reboot->head, list)
+ kfree_const(info->mode);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(reboot_mode_register);
+
+/**
+ * reboot_mode_unregister - unregister a reboot mode driver
+ * @reboot: reboot mode driver
+ */
+int reboot_mode_unregister(struct reboot_mode_driver *reboot)
+{
+ struct mode_info *info;
+
+ unregister_reboot_notifier(&reboot->reboot_notifier);
+
+ list_for_each_entry(info, &reboot->head, list)
+ kfree_const(info->mode);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(reboot_mode_unregister);
+
+MODULE_AUTHOR("Andy Yan <andy.yan@rock-chips.com");
+MODULE_DESCRIPTION("System reboot mode core library");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/reset/reboot-mode.h b/drivers/power/reset/reboot-mode.h
new file mode 100644
index 000000000..2491bb71f
--- /dev/null
+++ b/drivers/power/reset/reboot-mode.h
@@ -0,0 +1,14 @@
+#ifndef __REBOOT_MODE_H__
+#define __REBOOT_MODE_H__
+
+struct reboot_mode_driver {
+ struct device *dev;
+ struct list_head head;
+ int (*write)(struct reboot_mode_driver *reboot, unsigned int magic);
+ struct notifier_block reboot_notifier;
+};
+
+int reboot_mode_register(struct reboot_mode_driver *reboot);
+int reboot_mode_unregister(struct reboot_mode_driver *reboot);
+
+#endif
diff --git a/drivers/power/reset/syscon-poweroff.c b/drivers/power/reset/syscon-poweroff.c
index 5560b0dbc..b68338399 100644
--- a/drivers/power/reset/syscon-poweroff.c
+++ b/drivers/power/reset/syscon-poweroff.c
@@ -30,7 +30,7 @@ static struct regmap *map;
static u32 offset;
static u32 mask;
-void syscon_poweroff(void)
+static void syscon_poweroff(void)
{
/* Issue the poweroff */
regmap_write(map, offset, mask);
diff --git a/drivers/power/reset/syscon-reboot-mode.c b/drivers/power/reset/syscon-reboot-mode.c
new file mode 100644
index 000000000..9e1cba5dd
--- /dev/null
+++ b/drivers/power/reset/syscon-reboot-mode.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2016, Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include "reboot-mode.h"
+
+struct syscon_reboot_mode {
+ struct regmap *map;
+ struct reboot_mode_driver reboot;
+ u32 offset;
+ u32 mask;
+};
+
+static int syscon_reboot_mode_write(struct reboot_mode_driver *reboot,
+ unsigned int magic)
+{
+ struct syscon_reboot_mode *syscon_rbm;
+ int ret;
+
+ syscon_rbm = container_of(reboot, struct syscon_reboot_mode, reboot);
+
+ ret = regmap_update_bits(syscon_rbm->map, syscon_rbm->offset,
+ syscon_rbm->mask, magic);
+ if (ret < 0)
+ dev_err(reboot->dev, "update reboot mode bits failed\n");
+
+ return ret;
+}
+
+static int syscon_reboot_mode_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct syscon_reboot_mode *syscon_rbm;
+
+ syscon_rbm = devm_kzalloc(&pdev->dev, sizeof(*syscon_rbm), GFP_KERNEL);
+ if (!syscon_rbm)
+ return -ENOMEM;
+
+ syscon_rbm->reboot.dev = &pdev->dev;
+ syscon_rbm->reboot.write = syscon_reboot_mode_write;
+ syscon_rbm->mask = 0xffffffff;
+
+ dev_set_drvdata(&pdev->dev, syscon_rbm);
+
+ syscon_rbm->map = syscon_node_to_regmap(pdev->dev.parent->of_node);
+ if (IS_ERR(syscon_rbm->map))
+ return PTR_ERR(syscon_rbm->map);
+
+ if (of_property_read_u32(pdev->dev.of_node, "offset",
+ &syscon_rbm->offset))
+ return -EINVAL;
+
+ of_property_read_u32(pdev->dev.of_node, "mask", &syscon_rbm->mask);
+
+ ret = reboot_mode_register(&syscon_rbm->reboot);
+ if (ret)
+ dev_err(&pdev->dev, "can't register reboot mode\n");
+
+ return ret;
+}
+
+static int syscon_reboot_mode_remove(struct platform_device *pdev)
+{
+ struct syscon_reboot_mode *syscon_rbm = dev_get_drvdata(&pdev->dev);
+
+ return reboot_mode_unregister(&syscon_rbm->reboot);
+}
+
+static const struct of_device_id syscon_reboot_mode_of_match[] = {
+ { .compatible = "syscon-reboot-mode" },
+ {}
+};
+
+static struct platform_driver syscon_reboot_mode_driver = {
+ .probe = syscon_reboot_mode_probe,
+ .remove = syscon_reboot_mode_remove,
+ .driver = {
+ .name = "syscon-reboot-mode",
+ .of_match_table = syscon_reboot_mode_of_match,
+ },
+};
+module_platform_driver(syscon_reboot_mode_driver);
+
+MODULE_AUTHOR("Andy Yan <andy.yan@rock-chips.com");
+MODULE_DESCRIPTION("SYSCON reboot mode driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/reset/vexpress-poweroff.c b/drivers/power/reset/vexpress-poweroff.c
index 6a9bf7089..102f95a09 100644
--- a/drivers/power/reset/vexpress-poweroff.c
+++ b/drivers/power/reset/vexpress-poweroff.c
@@ -74,8 +74,8 @@ static ssize_t vexpress_reset_active_store(struct device *dev,
return err ? err : count;
}
-DEVICE_ATTR(active, S_IRUGO | S_IWUSR, vexpress_reset_active_show,
- vexpress_reset_active_store);
+static DEVICE_ATTR(active, S_IRUGO | S_IWUSR, vexpress_reset_active_show,
+ vexpress_reset_active_store);
enum vexpress_reset_func { FUNC_RESET, FUNC_SHUTDOWN, FUNC_REBOOT };
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index b2766b867..243b233ff 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -33,6 +33,7 @@
#include <asm/processor.h>
#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
/* Local defines */
#define MSR_PLATFORM_POWER_LIMIT 0x0000065C
@@ -335,14 +336,14 @@ static int release_zone(struct powercap_zone *power_zone)
static int find_nr_power_limit(struct rapl_domain *rd)
{
- int i;
+ int i, nr_pl = 0;
for (i = 0; i < NR_POWER_LIMITS; i++) {
- if (rd->rpl[i].name == NULL)
- break;
+ if (rd->rpl[i].name)
+ nr_pl++;
}
- return i;
+ return nr_pl;
}
static int set_domain_enable(struct powercap_zone *power_zone, bool mode)
@@ -425,15 +426,38 @@ static const struct powercap_zone_ops zone_ops[] = {
},
};
-static int set_power_limit(struct powercap_zone *power_zone, int id,
+
+/*
+ * Constraint index used by powercap can be different than power limit (PL)
+ * index in that some PLs maybe missing due to non-existant MSRs. So we
+ * need to convert here by finding the valid PLs only (name populated).
+ */
+static int contraint_to_pl(struct rapl_domain *rd, int cid)
+{
+ int i, j;
+
+ for (i = 0, j = 0; i < NR_POWER_LIMITS; i++) {
+ if ((rd->rpl[i].name) && j++ == cid) {
+ pr_debug("%s: index %d\n", __func__, i);
+ return i;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int set_power_limit(struct powercap_zone *power_zone, int cid,
u64 power_limit)
{
struct rapl_domain *rd;
struct rapl_package *rp;
int ret = 0;
+ int id;
get_online_cpus();
rd = power_zone_to_rapl_domain(power_zone);
+ id = contraint_to_pl(rd, cid);
+
rp = rd->rp;
if (rd->state & DOMAIN_STATE_BIOS_LOCKED) {
@@ -460,16 +484,18 @@ set_exit:
return ret;
}
-static int get_current_power_limit(struct powercap_zone *power_zone, int id,
+static int get_current_power_limit(struct powercap_zone *power_zone, int cid,
u64 *data)
{
struct rapl_domain *rd;
u64 val;
int prim;
int ret = 0;
+ int id;
get_online_cpus();
rd = power_zone_to_rapl_domain(power_zone);
+ id = contraint_to_pl(rd, cid);
switch (rd->rpl[id].prim_id) {
case PL1_ENABLE:
prim = POWER_LIMIT1;
@@ -491,14 +517,17 @@ static int get_current_power_limit(struct powercap_zone *power_zone, int id,
return ret;
}
-static int set_time_window(struct powercap_zone *power_zone, int id,
+static int set_time_window(struct powercap_zone *power_zone, int cid,
u64 window)
{
struct rapl_domain *rd;
int ret = 0;
+ int id;
get_online_cpus();
rd = power_zone_to_rapl_domain(power_zone);
+ id = contraint_to_pl(rd, cid);
+
switch (rd->rpl[id].prim_id) {
case PL1_ENABLE:
rapl_write_data_raw(rd, TIME_WINDOW1, window);
@@ -513,14 +542,17 @@ static int set_time_window(struct powercap_zone *power_zone, int id,
return ret;
}
-static int get_time_window(struct powercap_zone *power_zone, int id, u64 *data)
+static int get_time_window(struct powercap_zone *power_zone, int cid, u64 *data)
{
struct rapl_domain *rd;
u64 val;
int ret = 0;
+ int id;
get_online_cpus();
rd = power_zone_to_rapl_domain(power_zone);
+ id = contraint_to_pl(rd, cid);
+
switch (rd->rpl[id].prim_id) {
case PL1_ENABLE:
ret = rapl_read_data_raw(rd, TIME_WINDOW1, true, &val);
@@ -539,15 +571,17 @@ static int get_time_window(struct powercap_zone *power_zone, int id, u64 *data)
return ret;
}
-static const char *get_constraint_name(struct powercap_zone *power_zone, int id)
+static const char *get_constraint_name(struct powercap_zone *power_zone, int cid)
{
- struct rapl_power_limit *rpl;
struct rapl_domain *rd;
+ int id;
rd = power_zone_to_rapl_domain(power_zone);
- rpl = (struct rapl_power_limit *) &rd->rpl[id];
+ id = contraint_to_pl(rd, cid);
+ if (id >= 0)
+ return rd->rpl[id].name;
- return rpl->name;
+ return NULL;
}
@@ -1096,27 +1130,36 @@ static const struct rapl_defaults rapl_defaults_cht = {
}
static const struct x86_cpu_id rapl_ids[] __initconst = {
- RAPL_CPU(0x2a, rapl_defaults_core),/* Sandy Bridge */
- RAPL_CPU(0x2d, rapl_defaults_core),/* Sandy Bridge EP */
- RAPL_CPU(0x37, rapl_defaults_byt),/* Valleyview */
- RAPL_CPU(0x3a, rapl_defaults_core),/* Ivy Bridge */
- RAPL_CPU(0x3c, rapl_defaults_core),/* Haswell */
- RAPL_CPU(0x3d, rapl_defaults_core),/* Broadwell */
- RAPL_CPU(0x3f, rapl_defaults_hsw_server),/* Haswell servers */
- RAPL_CPU(0x4f, rapl_defaults_hsw_server),/* Broadwell servers */
- RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */
- RAPL_CPU(0x46, rapl_defaults_core),/* Haswell */
- RAPL_CPU(0x47, rapl_defaults_core),/* Broadwell-H */
- RAPL_CPU(0x4E, rapl_defaults_core),/* Skylake */
- RAPL_CPU(0x4C, rapl_defaults_cht),/* Braswell/Cherryview */
- RAPL_CPU(0x4A, rapl_defaults_tng),/* Tangier */
- RAPL_CPU(0x56, rapl_defaults_core),/* Future Xeon */
- RAPL_CPU(0x5A, rapl_defaults_ann),/* Annidale */
- RAPL_CPU(0X5C, rapl_defaults_core),/* Broxton */
- RAPL_CPU(0x5E, rapl_defaults_core),/* Skylake-H/S */
- RAPL_CPU(0x57, rapl_defaults_hsw_server),/* Knights Landing */
- RAPL_CPU(0x8E, rapl_defaults_core),/* Kabylake */
- RAPL_CPU(0x9E, rapl_defaults_core),/* Kabylake */
+ RAPL_CPU(INTEL_FAM6_SANDYBRIDGE, rapl_defaults_core),
+ RAPL_CPU(INTEL_FAM6_SANDYBRIDGE_X, rapl_defaults_core),
+
+ RAPL_CPU(INTEL_FAM6_IVYBRIDGE, rapl_defaults_core),
+ RAPL_CPU(INTEL_FAM6_IVYBRIDGE_X, rapl_defaults_core),
+
+ RAPL_CPU(INTEL_FAM6_HASWELL_CORE, rapl_defaults_core),
+ RAPL_CPU(INTEL_FAM6_HASWELL_ULT, rapl_defaults_core),
+ RAPL_CPU(INTEL_FAM6_HASWELL_GT3E, rapl_defaults_core),
+ RAPL_CPU(INTEL_FAM6_HASWELL_X, rapl_defaults_hsw_server),
+
+ RAPL_CPU(INTEL_FAM6_BROADWELL_CORE, rapl_defaults_core),
+ RAPL_CPU(INTEL_FAM6_BROADWELL_GT3E, rapl_defaults_core),
+ RAPL_CPU(INTEL_FAM6_BROADWELL_XEON_D, rapl_defaults_core),
+ RAPL_CPU(INTEL_FAM6_BROADWELL_X, rapl_defaults_hsw_server),
+
+ RAPL_CPU(INTEL_FAM6_SKYLAKE_DESKTOP, rapl_defaults_core),
+ RAPL_CPU(INTEL_FAM6_SKYLAKE_MOBILE, rapl_defaults_core),
+ RAPL_CPU(INTEL_FAM6_SKYLAKE_X, rapl_defaults_hsw_server),
+ RAPL_CPU(INTEL_FAM6_KABYLAKE_MOBILE, rapl_defaults_core),
+ RAPL_CPU(INTEL_FAM6_KABYLAKE_DESKTOP, rapl_defaults_core),
+
+ RAPL_CPU(INTEL_FAM6_ATOM_SILVERMONT1, rapl_defaults_byt),
+ RAPL_CPU(INTEL_FAM6_ATOM_AIRMONT, rapl_defaults_cht),
+ RAPL_CPU(INTEL_FAM6_ATOM_MERRIFIELD, rapl_defaults_tng),
+ RAPL_CPU(INTEL_FAM6_ATOM_MOOREFIELD, rapl_defaults_ann),
+ RAPL_CPU(INTEL_FAM6_ATOM_GOLDMONT, rapl_defaults_core),
+ RAPL_CPU(INTEL_FAM6_ATOM_DENVERTON, rapl_defaults_core),
+
+ RAPL_CPU(INTEL_FAM6_XEON_PHI_KNL, rapl_defaults_hsw_server),
{}
};
MODULE_DEVICE_TABLE(x86cpu, rapl_ids);
@@ -1373,6 +1416,37 @@ static int rapl_check_domain(int cpu, int domain)
return 0;
}
+
+/*
+ * Check if power limits are available. Two cases when they are not available:
+ * 1. Locked by BIOS, in this case we still provide read-only access so that
+ * users can see what limit is set by the BIOS.
+ * 2. Some CPUs make some domains monitoring only which means PLx MSRs may not
+ * exist at all. In this case, we do not show the contraints in powercap.
+ *
+ * Called after domains are detected and initialized.
+ */
+static void rapl_detect_powerlimit(struct rapl_domain *rd)
+{
+ u64 val64;
+ int i;
+
+ /* check if the domain is locked by BIOS, ignore if MSR doesn't exist */
+ if (!rapl_read_data_raw(rd, FW_LOCK, false, &val64)) {
+ if (val64) {
+ pr_info("RAPL package %d domain %s locked by BIOS\n",
+ rd->rp->id, rd->name);
+ rd->state |= DOMAIN_STATE_BIOS_LOCKED;
+ }
+ }
+ /* check if power limit MSRs exists, otherwise domain is monitoring only */
+ for (i = 0; i < NR_POWER_LIMITS; i++) {
+ int prim = rd->rpl[i].prim_id;
+ if (rapl_read_data_raw(rd, prim, false, &val64))
+ rd->rpl[i].name = NULL;
+ }
+}
+
/* Detect active and valid domains for the given CPU, caller must
* ensure the CPU belongs to the targeted package and CPU hotlug is disabled.
*/
@@ -1381,7 +1455,6 @@ static int rapl_detect_domains(struct rapl_package *rp, int cpu)
int i;
int ret = 0;
struct rapl_domain *rd;
- u64 locked;
for (i = 0; i < RAPL_DOMAIN_MAX; i++) {
/* use physical package id to read counters */
@@ -1392,7 +1465,7 @@ static int rapl_detect_domains(struct rapl_package *rp, int cpu)
}
rp->nr_domains = bitmap_weight(&rp->domain_map, RAPL_DOMAIN_MAX);
if (!rp->nr_domains) {
- pr_err("no valid rapl domains found in package %d\n", rp->id);
+ pr_debug("no valid rapl domains found in package %d\n", rp->id);
ret = -ENODEV;
goto done;
}
@@ -1406,17 +1479,9 @@ static int rapl_detect_domains(struct rapl_package *rp, int cpu)
}
rapl_init_domains(rp);
- for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
- /* check if the domain is locked by BIOS */
- ret = rapl_read_data_raw(rd, FW_LOCK, false, &locked);
- if (ret)
- return ret;
- if (locked) {
- pr_info("RAPL package %d domain %s locked by BIOS\n",
- rp->id, rd->name);
- rd->state |= DOMAIN_STATE_BIOS_LOCKED;
- }
- }
+ for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++)
+ rapl_detect_powerlimit(rd);
+
done:
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index c182efc62..80a566a00 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -74,6 +74,16 @@ config PWM_ATMEL_TCB
To compile this driver as a module, choose M here: the module
will be called pwm-atmel-tcb.
+config PWM_BCM_IPROC
+ tristate "iProc PWM support"
+ depends on ARCH_BCM_IPROC
+ help
+ Generic PWM framework driver for Broadcom iProc PWM block. This
+ block is used in Broadcom iProc SoC's.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-bcm-iproc.
+
config PWM_BCM_KONA
tristate "Kona PWM support"
depends on ARCH_BCM_MOBILE
@@ -137,6 +147,13 @@ config PWM_CRC
Generic PWM framework driver for Crystalcove (CRC) PMIC based PWM
control.
+config PWM_CROS_EC
+ tristate "ChromeOS EC PWM driver"
+ depends on MFD_CROS_EC
+ help
+ PWM driver for exposing a PWM attached to the ChromeOS Embedded
+ Controller.
+
config PWM_EP93XX
tristate "Cirrus Logic EP93xx PWM support"
depends on ARCH_EP93XX
@@ -305,7 +322,7 @@ config PWM_PXA
config PWM_RCAR
tristate "Renesas R-Car PWM support"
- depends on ARCH_RCAR_GEN1 || ARCH_RCAR_GEN2 || COMPILE_TEST
+ depends on ARCH_RENESAS || COMPILE_TEST
depends on HAS_IOMEM
help
This driver exposes the PWM Timer controller found in Renesas
@@ -362,6 +379,13 @@ config PWM_STI
To compile this driver as a module, choose M here: the module
will be called pwm-sti.
+config PWM_STMPE
+ bool "STMPE expander PWM export"
+ depends on MFD_STMPE
+ help
+ This enables support for the PWMs found in the STMPE I/O
+ expanders.
+
config PWM_SUN4I
tristate "Allwinner PWM support"
depends on ARCH_SUNXI || COMPILE_TEST
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index dd35bc121..feef1dd29 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_PWM_AB8500) += pwm-ab8500.o
obj-$(CONFIG_PWM_ATMEL) += pwm-atmel.o
obj-$(CONFIG_PWM_ATMEL_HLCDC_PWM) += pwm-atmel-hlcdc.o
obj-$(CONFIG_PWM_ATMEL_TCB) += pwm-atmel-tcb.o
+obj-$(CONFIG_PWM_BCM_IPROC) += pwm-bcm-iproc.o
obj-$(CONFIG_PWM_BCM_KONA) += pwm-bcm-kona.o
obj-$(CONFIG_PWM_BCM2835) += pwm-bcm2835.o
obj-$(CONFIG_PWM_BERLIN) += pwm-berlin.o
@@ -11,6 +12,7 @@ obj-$(CONFIG_PWM_BFIN) += pwm-bfin.o
obj-$(CONFIG_PWM_BRCMSTB) += pwm-brcmstb.o
obj-$(CONFIG_PWM_CLPS711X) += pwm-clps711x.o
obj-$(CONFIG_PWM_CRC) += pwm-crc.o
+obj-$(CONFIG_PWM_CROS_EC) += pwm-cros-ec.o
obj-$(CONFIG_PWM_EP93XX) += pwm-ep93xx.o
obj-$(CONFIG_PWM_FSL_FTM) += pwm-fsl-ftm.o
obj-$(CONFIG_PWM_IMG) += pwm-img.o
@@ -34,6 +36,7 @@ obj-$(CONFIG_PWM_ROCKCHIP) += pwm-rockchip.o
obj-$(CONFIG_PWM_SAMSUNG) += pwm-samsung.o
obj-$(CONFIG_PWM_SPEAR) += pwm-spear.o
obj-$(CONFIG_PWM_STI) += pwm-sti.o
+obj-$(CONFIG_PWM_STMPE) += pwm-stmpe.o
obj-$(CONFIG_PWM_SUN4I) += pwm-sun4i.o
obj-$(CONFIG_PWM_TEGRA) += pwm-tegra.o
obj-$(CONFIG_PWM_TIECAP) += pwm-tiecap.o
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index ed337a8c3..0dbd29e28 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -526,6 +526,33 @@ int pwm_apply_state(struct pwm_device *pwm, struct pwm_state *state)
EXPORT_SYMBOL_GPL(pwm_apply_state);
/**
+ * pwm_capture() - capture and report a PWM signal
+ * @pwm: PWM device
+ * @result: structure to fill with capture result
+ * @timeout: time to wait, in milliseconds, before giving up on capture
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int pwm_capture(struct pwm_device *pwm, struct pwm_capture *result,
+ unsigned long timeout)
+{
+ int err;
+
+ if (!pwm || !pwm->chip->ops)
+ return -EINVAL;
+
+ if (!pwm->chip->ops->capture)
+ return -ENOSYS;
+
+ mutex_lock(&pwm_lock);
+ err = pwm->chip->ops->capture(pwm->chip, pwm, result, timeout);
+ mutex_unlock(&pwm_lock);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(pwm_capture);
+
+/**
* pwm_adjust_config() - adjust the current PWM config to the PWM arguments
* @pwm: PWM device
*
diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c
index 0e4bd4e8e..e6b8b1b7e 100644
--- a/drivers/pwm/pwm-atmel.c
+++ b/drivers/pwm/pwm-atmel.c
@@ -64,7 +64,8 @@ struct atmel_pwm_chip {
void __iomem *base;
unsigned int updated_pwms;
- struct mutex isr_lock; /* ISR is cleared when read, ensure only one thread does that */
+ /* ISR is cleared when read, ensure only one thread does that */
+ struct mutex isr_lock;
void (*config)(struct pwm_chip *chip, struct pwm_device *pwm,
unsigned long dty, unsigned long prd);
@@ -271,6 +272,16 @@ static void atmel_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
mutex_unlock(&atmel_pwm->isr_lock);
atmel_pwm_writel(atmel_pwm, PWM_DIS, 1 << pwm->hwpwm);
+ /*
+ * Wait for the PWM channel disable operation to be effective before
+ * stopping the clock.
+ */
+ timeout = jiffies + 2 * HZ;
+
+ while ((atmel_pwm_readl(atmel_pwm, PWM_SR) & (1 << pwm->hwpwm)) &&
+ time_before(jiffies, timeout))
+ usleep_range(10, 100);
+
clk_disable(atmel_pwm->clk);
}
@@ -324,21 +335,14 @@ MODULE_DEVICE_TABLE(of, atmel_pwm_dt_ids);
static inline const struct atmel_pwm_data *
atmel_pwm_get_driver_data(struct platform_device *pdev)
{
- if (pdev->dev.of_node) {
- const struct of_device_id *match;
-
- match = of_match_device(atmel_pwm_dt_ids, &pdev->dev);
- if (!match)
- return NULL;
+ const struct platform_device_id *id;
- return match->data;
- } else {
- const struct platform_device_id *id;
+ if (pdev->dev.of_node)
+ return of_device_get_match_data(&pdev->dev);
- id = platform_get_device_id(pdev);
+ id = platform_get_device_id(pdev);
- return (struct atmel_pwm_data *)id->driver_data;
- }
+ return (struct atmel_pwm_data *)id->driver_data;
}
static int atmel_pwm_probe(struct platform_device *pdev)
diff --git a/drivers/pwm/pwm-bcm-iproc.c b/drivers/pwm/pwm-bcm-iproc.c
new file mode 100644
index 000000000..d961a8207
--- /dev/null
+++ b/drivers/pwm/pwm-bcm-iproc.c
@@ -0,0 +1,277 @@
+/*
+ * Copyright (C) 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+
+#define IPROC_PWM_CTRL_OFFSET 0x00
+#define IPROC_PWM_CTRL_TYPE_SHIFT(ch) (15 + (ch))
+#define IPROC_PWM_CTRL_POLARITY_SHIFT(ch) (8 + (ch))
+#define IPROC_PWM_CTRL_EN_SHIFT(ch) (ch)
+
+#define IPROC_PWM_PERIOD_OFFSET(ch) (0x04 + ((ch) << 3))
+#define IPROC_PWM_PERIOD_MIN 0x02
+#define IPROC_PWM_PERIOD_MAX 0xffff
+
+#define IPROC_PWM_DUTY_CYCLE_OFFSET(ch) (0x08 + ((ch) << 3))
+#define IPROC_PWM_DUTY_CYCLE_MIN 0x00
+#define IPROC_PWM_DUTY_CYCLE_MAX 0xffff
+
+#define IPROC_PWM_PRESCALE_OFFSET 0x24
+#define IPROC_PWM_PRESCALE_BITS 0x06
+#define IPROC_PWM_PRESCALE_SHIFT(ch) ((3 - (ch)) * \
+ IPROC_PWM_PRESCALE_BITS)
+#define IPROC_PWM_PRESCALE_MASK(ch) (IPROC_PWM_PRESCALE_MAX << \
+ IPROC_PWM_PRESCALE_SHIFT(ch))
+#define IPROC_PWM_PRESCALE_MIN 0x00
+#define IPROC_PWM_PRESCALE_MAX 0x3f
+
+struct iproc_pwmc {
+ struct pwm_chip chip;
+ void __iomem *base;
+ struct clk *clk;
+};
+
+static inline struct iproc_pwmc *to_iproc_pwmc(struct pwm_chip *chip)
+{
+ return container_of(chip, struct iproc_pwmc, chip);
+}
+
+static void iproc_pwmc_enable(struct iproc_pwmc *ip, unsigned int channel)
+{
+ u32 value;
+
+ value = readl(ip->base + IPROC_PWM_CTRL_OFFSET);
+ value |= 1 << IPROC_PWM_CTRL_EN_SHIFT(channel);
+ writel(value, ip->base + IPROC_PWM_CTRL_OFFSET);
+
+ /* must be a 400 ns delay between clearing and setting enable bit */
+ ndelay(400);
+}
+
+static void iproc_pwmc_disable(struct iproc_pwmc *ip, unsigned int channel)
+{
+ u32 value;
+
+ value = readl(ip->base + IPROC_PWM_CTRL_OFFSET);
+ value &= ~(1 << IPROC_PWM_CTRL_EN_SHIFT(channel));
+ writel(value, ip->base + IPROC_PWM_CTRL_OFFSET);
+
+ /* must be a 400 ns delay between clearing and setting enable bit */
+ ndelay(400);
+}
+
+static void iproc_pwmc_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct iproc_pwmc *ip = to_iproc_pwmc(chip);
+ u64 tmp, multi, rate;
+ u32 value, prescale;
+
+ rate = clk_get_rate(ip->clk);
+
+ value = readl(ip->base + IPROC_PWM_CTRL_OFFSET);
+
+ if (value & BIT(IPROC_PWM_CTRL_EN_SHIFT(pwm->hwpwm)))
+ state->enabled = true;
+ else
+ state->enabled = false;
+
+ if (value & BIT(IPROC_PWM_CTRL_POLARITY_SHIFT(pwm->hwpwm)))
+ state->polarity = PWM_POLARITY_NORMAL;
+ else
+ state->polarity = PWM_POLARITY_INVERSED;
+
+ value = readl(ip->base + IPROC_PWM_PRESCALE_OFFSET);
+ prescale = value >> IPROC_PWM_PRESCALE_SHIFT(pwm->hwpwm);
+ prescale &= IPROC_PWM_PRESCALE_MAX;
+
+ multi = NSEC_PER_SEC * (prescale + 1);
+
+ value = readl(ip->base + IPROC_PWM_PERIOD_OFFSET(pwm->hwpwm));
+ tmp = (value & IPROC_PWM_PERIOD_MAX) * multi;
+ state->period = div64_u64(tmp, rate);
+
+ value = readl(ip->base + IPROC_PWM_DUTY_CYCLE_OFFSET(pwm->hwpwm));
+ tmp = (value & IPROC_PWM_PERIOD_MAX) * multi;
+ state->duty_cycle = div64_u64(tmp, rate);
+}
+
+static int iproc_pwmc_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ unsigned long prescale = IPROC_PWM_PRESCALE_MIN;
+ struct iproc_pwmc *ip = to_iproc_pwmc(chip);
+ u32 value, period, duty;
+ u64 rate;
+
+ rate = clk_get_rate(ip->clk);
+
+ /*
+ * Find period count, duty count and prescale to suit duty_cycle and
+ * period. This is done according to formulas described below:
+ *
+ * period_ns = 10^9 * (PRESCALE + 1) * PC / PWM_CLK_RATE
+ * duty_ns = 10^9 * (PRESCALE + 1) * DC / PWM_CLK_RATE
+ *
+ * PC = (PWM_CLK_RATE * period_ns) / (10^9 * (PRESCALE + 1))
+ * DC = (PWM_CLK_RATE * duty_ns) / (10^9 * (PRESCALE + 1))
+ */
+ while (1) {
+ u64 value, div;
+
+ div = NSEC_PER_SEC * (prescale + 1);
+ value = rate * state->period;
+ period = div64_u64(value, div);
+ value = rate * state->duty_cycle;
+ duty = div64_u64(value, div);
+
+ if (period < IPROC_PWM_PERIOD_MIN ||
+ duty < IPROC_PWM_DUTY_CYCLE_MIN)
+ return -EINVAL;
+
+ if (period <= IPROC_PWM_PERIOD_MAX &&
+ duty <= IPROC_PWM_DUTY_CYCLE_MAX)
+ break;
+
+ /* Otherwise, increase prescale and recalculate counts */
+ if (++prescale > IPROC_PWM_PRESCALE_MAX)
+ return -EINVAL;
+ }
+
+ iproc_pwmc_disable(ip, pwm->hwpwm);
+
+ /* Set prescale */
+ value = readl(ip->base + IPROC_PWM_PRESCALE_OFFSET);
+ value &= ~IPROC_PWM_PRESCALE_MASK(pwm->hwpwm);
+ value |= prescale << IPROC_PWM_PRESCALE_SHIFT(pwm->hwpwm);
+ writel(value, ip->base + IPROC_PWM_PRESCALE_OFFSET);
+
+ /* set period and duty cycle */
+ writel(period, ip->base + IPROC_PWM_PERIOD_OFFSET(pwm->hwpwm));
+ writel(duty, ip->base + IPROC_PWM_DUTY_CYCLE_OFFSET(pwm->hwpwm));
+
+ /* set polarity */
+ value = readl(ip->base + IPROC_PWM_CTRL_OFFSET);
+
+ if (state->polarity == PWM_POLARITY_NORMAL)
+ value |= 1 << IPROC_PWM_CTRL_POLARITY_SHIFT(pwm->hwpwm);
+ else
+ value &= ~(1 << IPROC_PWM_CTRL_POLARITY_SHIFT(pwm->hwpwm));
+
+ writel(value, ip->base + IPROC_PWM_CTRL_OFFSET);
+
+ if (state->enabled)
+ iproc_pwmc_enable(ip, pwm->hwpwm);
+
+ return 0;
+}
+
+static const struct pwm_ops iproc_pwm_ops = {
+ .apply = iproc_pwmc_apply,
+ .get_state = iproc_pwmc_get_state,
+};
+
+static int iproc_pwmc_probe(struct platform_device *pdev)
+{
+ struct iproc_pwmc *ip;
+ struct resource *res;
+ unsigned int i;
+ u32 value;
+ int ret;
+
+ ip = devm_kzalloc(&pdev->dev, sizeof(*ip), GFP_KERNEL);
+ if (!ip)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ip);
+
+ ip->chip.dev = &pdev->dev;
+ ip->chip.ops = &iproc_pwm_ops;
+ ip->chip.base = -1;
+ ip->chip.npwm = 4;
+ ip->chip.of_xlate = of_pwm_xlate_with_flags;
+ ip->chip.of_pwm_n_cells = 3;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ip->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ip->base))
+ return PTR_ERR(ip->base);
+
+ ip->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(ip->clk)) {
+ dev_err(&pdev->dev, "failed to get clock: %ld\n",
+ PTR_ERR(ip->clk));
+ return PTR_ERR(ip->clk);
+ }
+
+ ret = clk_prepare_enable(ip->clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to enable clock: %d\n", ret);
+ return ret;
+ }
+
+ /* Set full drive and normal polarity for all channels */
+ value = readl(ip->base + IPROC_PWM_CTRL_OFFSET);
+
+ for (i = 0; i < ip->chip.npwm; i++) {
+ value &= ~(1 << IPROC_PWM_CTRL_TYPE_SHIFT(i));
+ value |= 1 << IPROC_PWM_CTRL_POLARITY_SHIFT(i);
+ }
+
+ writel(value, ip->base + IPROC_PWM_CTRL_OFFSET);
+
+ ret = pwmchip_add(&ip->chip);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
+ clk_disable_unprepare(ip->clk);
+ }
+
+ return ret;
+}
+
+static int iproc_pwmc_remove(struct platform_device *pdev)
+{
+ struct iproc_pwmc *ip = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(ip->clk);
+
+ return pwmchip_remove(&ip->chip);
+}
+
+static const struct of_device_id bcm_iproc_pwmc_dt[] = {
+ { .compatible = "brcm,iproc-pwm" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, bcm_iproc_pwmc_dt);
+
+static struct platform_driver iproc_pwmc_driver = {
+ .driver = {
+ .name = "bcm-iproc-pwm",
+ .of_match_table = bcm_iproc_pwmc_dt,
+ },
+ .probe = iproc_pwmc_probe,
+ .remove = iproc_pwmc_remove,
+};
+module_platform_driver(iproc_pwmc_driver);
+
+MODULE_AUTHOR("Yendapally Reddy Dhananjaya Reddy <yendapally.reddy@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom iProc PWM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pwm/pwm-clps711x.c b/drivers/pwm/pwm-clps711x.c
index 7d335422c..26ec24e45 100644
--- a/drivers/pwm/pwm-clps711x.c
+++ b/drivers/pwm/pwm-clps711x.c
@@ -155,7 +155,7 @@ static int clps711x_pwm_remove(struct platform_device *pdev)
}
static const struct of_device_id __maybe_unused clps711x_pwm_dt_ids[] = {
- { .compatible = "cirrus,clps711x-pwm", },
+ { .compatible = "cirrus,ep7209-pwm", },
{ }
};
MODULE_DEVICE_TABLE(of, clps711x_pwm_dt_ids);
diff --git a/drivers/pwm/pwm-cros-ec.c b/drivers/pwm/pwm-cros-ec.c
new file mode 100644
index 000000000..99b9acc1a
--- /dev/null
+++ b/drivers/pwm/pwm-cros-ec.c
@@ -0,0 +1,260 @@
+/*
+ * Copyright (C) 2016 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2, as published by
+ * the Free Software Foundation.
+ *
+ * Expose a PWM controlled by the ChromeOS EC to the host processor.
+ */
+
+#include <linux/module.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/mfd/cros_ec_commands.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/slab.h>
+
+/**
+ * struct cros_ec_pwm_device - Driver data for EC PWM
+ *
+ * @dev: Device node
+ * @ec: Pointer to EC device
+ * @chip: PWM controller chip
+ */
+struct cros_ec_pwm_device {
+ struct device *dev;
+ struct cros_ec_device *ec;
+ struct pwm_chip chip;
+};
+
+static inline struct cros_ec_pwm_device *pwm_to_cros_ec_pwm(struct pwm_chip *c)
+{
+ return container_of(c, struct cros_ec_pwm_device, chip);
+}
+
+static int cros_ec_pwm_set_duty(struct cros_ec_device *ec, u8 index, u16 duty)
+{
+ struct {
+ struct cros_ec_command msg;
+ struct ec_params_pwm_set_duty params;
+ } buf;
+ struct ec_params_pwm_set_duty *params = &buf.params;
+ struct cros_ec_command *msg = &buf.msg;
+
+ memset(&buf, 0, sizeof(buf));
+
+ msg->version = 0;
+ msg->command = EC_CMD_PWM_SET_DUTY;
+ msg->insize = 0;
+ msg->outsize = sizeof(*params);
+
+ params->duty = duty;
+ params->pwm_type = EC_PWM_TYPE_GENERIC;
+ params->index = index;
+
+ return cros_ec_cmd_xfer_status(ec, msg);
+}
+
+static int __cros_ec_pwm_get_duty(struct cros_ec_device *ec, u8 index,
+ u32 *result)
+{
+ struct {
+ struct cros_ec_command msg;
+ union {
+ struct ec_params_pwm_get_duty params;
+ struct ec_response_pwm_get_duty resp;
+ };
+ } buf;
+ struct ec_params_pwm_get_duty *params = &buf.params;
+ struct ec_response_pwm_get_duty *resp = &buf.resp;
+ struct cros_ec_command *msg = &buf.msg;
+ int ret;
+
+ memset(&buf, 0, sizeof(buf));
+
+ msg->version = 0;
+ msg->command = EC_CMD_PWM_GET_DUTY;
+ msg->insize = sizeof(*params);
+ msg->outsize = sizeof(*resp);
+
+ params->pwm_type = EC_PWM_TYPE_GENERIC;
+ params->index = index;
+
+ ret = cros_ec_cmd_xfer_status(ec, msg);
+ if (result)
+ *result = msg->result;
+ if (ret < 0)
+ return ret;
+
+ return resp->duty;
+}
+
+static int cros_ec_pwm_get_duty(struct cros_ec_device *ec, u8 index)
+{
+ return __cros_ec_pwm_get_duty(ec, index, NULL);
+}
+
+static int cros_ec_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct cros_ec_pwm_device *ec_pwm = pwm_to_cros_ec_pwm(chip);
+ int duty_cycle;
+
+ /* The EC won't let us change the period */
+ if (state->period != EC_PWM_MAX_DUTY)
+ return -EINVAL;
+
+ /*
+ * EC doesn't separate the concept of duty cycle and enabled, but
+ * kernel does. Translate.
+ */
+ duty_cycle = state->enabled ? state->duty_cycle : 0;
+
+ return cros_ec_pwm_set_duty(ec_pwm->ec, pwm->hwpwm, duty_cycle);
+}
+
+static void cros_ec_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct cros_ec_pwm_device *ec_pwm = pwm_to_cros_ec_pwm(chip);
+ int ret;
+
+ ret = cros_ec_pwm_get_duty(ec_pwm->ec, pwm->hwpwm);
+ if (ret < 0) {
+ dev_err(chip->dev, "error getting initial duty: %d\n", ret);
+ return;
+ }
+
+ state->enabled = (ret > 0);
+ state->period = EC_PWM_MAX_DUTY;
+
+ /* Note that "disabled" and "duty cycle == 0" are treated the same */
+ state->duty_cycle = ret;
+}
+
+static struct pwm_device *
+cros_ec_pwm_xlate(struct pwm_chip *pc, const struct of_phandle_args *args)
+{
+ struct pwm_device *pwm;
+
+ if (args->args[0] >= pc->npwm)
+ return ERR_PTR(-EINVAL);
+
+ pwm = pwm_request_from_chip(pc, args->args[0], NULL);
+ if (IS_ERR(pwm))
+ return pwm;
+
+ /* The EC won't let us change the period */
+ pwm->args.period = EC_PWM_MAX_DUTY;
+
+ return pwm;
+}
+
+static const struct pwm_ops cros_ec_pwm_ops = {
+ .get_state = cros_ec_pwm_get_state,
+ .apply = cros_ec_pwm_apply,
+ .owner = THIS_MODULE,
+};
+
+static int cros_ec_num_pwms(struct cros_ec_device *ec)
+{
+ int i, ret;
+
+ /* The index field is only 8 bits */
+ for (i = 0; i <= U8_MAX; i++) {
+ u32 result = 0;
+
+ ret = __cros_ec_pwm_get_duty(ec, i, &result);
+ /* We want to parse EC protocol errors */
+ if (ret < 0 && !(ret == -EPROTO && result))
+ return ret;
+
+ /*
+ * We look for SUCCESS, INVALID_COMMAND, or INVALID_PARAM
+ * responses; everything else is treated as an error.
+ */
+ if (result == EC_RES_INVALID_COMMAND)
+ return -ENODEV;
+ else if (result == EC_RES_INVALID_PARAM)
+ return i;
+ else if (result)
+ return -EPROTO;
+ }
+
+ return U8_MAX;
+}
+
+static int cros_ec_pwm_probe(struct platform_device *pdev)
+{
+ struct cros_ec_device *ec = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct cros_ec_pwm_device *ec_pwm;
+ struct pwm_chip *chip;
+ int ret;
+
+ if (!ec) {
+ dev_err(dev, "no parent EC device\n");
+ return -EINVAL;
+ }
+
+ ec_pwm = devm_kzalloc(dev, sizeof(*ec_pwm), GFP_KERNEL);
+ if (!ec_pwm)
+ return -ENOMEM;
+ chip = &ec_pwm->chip;
+ ec_pwm->ec = ec;
+
+ /* PWM chip */
+ chip->dev = dev;
+ chip->ops = &cros_ec_pwm_ops;
+ chip->of_xlate = cros_ec_pwm_xlate;
+ chip->of_pwm_n_cells = 1;
+ chip->base = -1;
+ ret = cros_ec_num_pwms(ec);
+ if (ret < 0) {
+ dev_err(dev, "Couldn't find PWMs: %d\n", ret);
+ return ret;
+ }
+ chip->npwm = ret;
+ dev_dbg(dev, "Probed %u PWMs\n", chip->npwm);
+
+ ret = pwmchip_add(chip);
+ if (ret < 0) {
+ dev_err(dev, "cannot register PWM: %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, ec_pwm);
+
+ return ret;
+}
+
+static int cros_ec_pwm_remove(struct platform_device *dev)
+{
+ struct cros_ec_pwm_device *ec_pwm = platform_get_drvdata(dev);
+ struct pwm_chip *chip = &ec_pwm->chip;
+
+ return pwmchip_remove(chip);
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id cros_ec_pwm_of_match[] = {
+ { .compatible = "google,cros-ec-pwm" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, cros_ec_pwm_of_match);
+#endif
+
+static struct platform_driver cros_ec_pwm_driver = {
+ .probe = cros_ec_pwm_probe,
+ .remove = cros_ec_pwm_remove,
+ .driver = {
+ .name = "cros-ec-pwm",
+ .of_match_table = of_match_ptr(cros_ec_pwm_of_match),
+ },
+};
+module_platform_driver(cros_ec_pwm_driver);
+
+MODULE_ALIAS("platform:cros-ec-pwm");
+MODULE_DESCRIPTION("ChromeOS EC PWM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pwm/pwm-lpc32xx.c b/drivers/pwm/pwm-lpc32xx.c
index 4d470c1a4..a9b3cff96 100644
--- a/drivers/pwm/pwm-lpc32xx.c
+++ b/drivers/pwm/pwm-lpc32xx.c
@@ -25,6 +25,7 @@ struct lpc32xx_pwm_chip {
};
#define PWM_ENABLE BIT(31)
+#define PWM_PIN_LEVEL BIT(30)
#define to_lpc32xx_pwm_chip(_chip) \
container_of(_chip, struct lpc32xx_pwm_chip, chip)
@@ -103,6 +104,7 @@ static int lpc32xx_pwm_probe(struct platform_device *pdev)
struct lpc32xx_pwm_chip *lpc32xx;
struct resource *res;
int ret;
+ u32 val;
lpc32xx = devm_kzalloc(&pdev->dev, sizeof(*lpc32xx), GFP_KERNEL);
if (!lpc32xx)
@@ -128,6 +130,11 @@ static int lpc32xx_pwm_probe(struct platform_device *pdev)
return ret;
}
+ /* When PWM is disable, configure the output to the default value */
+ val = readl(lpc32xx->base + (lpc32xx->chip.pwms[0].hwpwm << 2));
+ val &= ~PWM_PIN_LEVEL;
+ writel(val, lpc32xx->base + (lpc32xx->chip.pwms[0].hwpwm << 2));
+
platform_set_drvdata(pdev, lpc32xx);
return 0;
diff --git a/drivers/pwm/pwm-lpss-pci.c b/drivers/pwm/pwm-lpss-pci.c
index 7160e8ab3..3622f0934 100644
--- a/drivers/pwm/pwm-lpss-pci.c
+++ b/drivers/pwm/pwm-lpss-pci.c
@@ -76,6 +76,7 @@ static const struct pci_device_id pwm_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x0ac8), (unsigned long)&pwm_lpss_bxt_info},
{ PCI_VDEVICE(INTEL, 0x0f08), (unsigned long)&pwm_lpss_byt_info},
{ PCI_VDEVICE(INTEL, 0x0f09), (unsigned long)&pwm_lpss_byt_info},
+ { PCI_VDEVICE(INTEL, 0x11a5), (unsigned long)&pwm_lpss_bxt_info},
{ PCI_VDEVICE(INTEL, 0x1ac8), (unsigned long)&pwm_lpss_bxt_info},
{ PCI_VDEVICE(INTEL, 0x2288), (unsigned long)&pwm_lpss_bsw_info},
{ PCI_VDEVICE(INTEL, 0x2289), (unsigned long)&pwm_lpss_bsw_info},
diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c
index 295b963db..72c0bce5a 100644
--- a/drivers/pwm/pwm-lpss.c
+++ b/drivers/pwm/pwm-lpss.c
@@ -27,7 +27,6 @@
#define PWM_SW_UPDATE BIT(30)
#define PWM_BASE_UNIT_SHIFT 8
#define PWM_ON_TIME_DIV_MASK 0x000000ff
-#define PWM_DIVISION_CORRECTION 0x2
/* Size of each PWM register space if multiple */
#define PWM_SIZE 0x400
@@ -92,8 +91,8 @@ static int pwm_lpss_config(struct pwm_chip *chip, struct pwm_device *pwm,
int duty_ns, int period_ns)
{
struct pwm_lpss_chip *lpwm = to_lpwm(chip);
- u8 on_time_div;
- unsigned long c, base_unit_range;
+ unsigned long long on_time_div;
+ unsigned long c = lpwm->info->clk_rate, base_unit_range;
unsigned long long base_unit, freq = NSEC_PER_SEC;
u32 ctrl;
@@ -101,21 +100,18 @@ static int pwm_lpss_config(struct pwm_chip *chip, struct pwm_device *pwm,
/*
* The equation is:
- * base_unit = ((freq / c) * base_unit_range) + correction
+ * base_unit = round(base_unit_range * freq / c)
*/
base_unit_range = BIT(lpwm->info->base_unit_bits);
- base_unit = freq * base_unit_range;
+ freq *= base_unit_range;
- c = lpwm->info->clk_rate;
- if (!c)
- return -EINVAL;
-
- do_div(base_unit, c);
- base_unit += PWM_DIVISION_CORRECTION;
+ base_unit = DIV_ROUND_CLOSEST_ULL(freq, c);
if (duty_ns <= 0)
duty_ns = 1;
- on_time_div = 255 - (255 * duty_ns / period_ns);
+ on_time_div = 255ULL * duty_ns;
+ do_div(on_time_div, period_ns);
+ on_time_div = 255ULL - on_time_div;
pm_runtime_get_sync(chip->dev);
@@ -169,6 +165,7 @@ struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r,
const struct pwm_lpss_boardinfo *info)
{
struct pwm_lpss_chip *lpwm;
+ unsigned long c;
int ret;
lpwm = devm_kzalloc(dev, sizeof(*lpwm), GFP_KERNEL);
@@ -180,6 +177,11 @@ struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r,
return ERR_CAST(lpwm->regs);
lpwm->info = info;
+
+ c = lpwm->info->clk_rate;
+ if (!c)
+ return ERR_PTR(-EINVAL);
+
lpwm->chip.dev = dev;
lpwm->chip.ops = &pwm_lpss_ops;
lpwm->chip.base = -1;
diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c
index 3e95090cd..5ad42f33e 100644
--- a/drivers/pwm/pwm-omap-dmtimer.c
+++ b/drivers/pwm/pwm-omap-dmtimer.c
@@ -245,7 +245,7 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
struct pwm_omap_dmtimer_chip *omap;
struct pwm_omap_dmtimer_pdata *pdata;
pwm_omap_dmtimer *dm_timer;
- u32 prescaler;
+ u32 v;
int status;
pdata = dev_get_platdata(&pdev->dev);
@@ -306,10 +306,12 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
if (pm_runtime_active(&omap->dm_timer_pdev->dev))
omap->pdata->stop(omap->dm_timer);
- /* setup dmtimer prescaler */
- if (!of_property_read_u32(pdev->dev.of_node, "ti,prescaler",
- &prescaler))
- omap->pdata->set_prescaler(omap->dm_timer, prescaler);
+ if (!of_property_read_u32(pdev->dev.of_node, "ti,prescaler", &v))
+ omap->pdata->set_prescaler(omap->dm_timer, v);
+
+ /* setup dmtimer clock source */
+ if (!of_property_read_u32(pdev->dev.of_node, "ti,clock-source", &v))
+ omap->pdata->set_source(omap->dm_timer, v);
omap->chip.dev = &pdev->dev;
omap->chip.ops = &pwm_omap_dmtimer_ops;
diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
index 7d9cc9049..ef89df1f7 100644
--- a/drivers/pwm/pwm-rockchip.c
+++ b/drivers/pwm/pwm-rockchip.c
@@ -47,10 +47,14 @@ struct rockchip_pwm_regs {
struct rockchip_pwm_data {
struct rockchip_pwm_regs regs;
unsigned int prescaler;
+ bool supports_polarity;
const struct pwm_ops *ops;
void (*set_enable)(struct pwm_chip *chip,
- struct pwm_device *pwm, bool enable);
+ struct pwm_device *pwm, bool enable,
+ enum pwm_polarity polarity);
+ void (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state);
};
static inline struct rockchip_pwm_chip *to_rockchip_pwm_chip(struct pwm_chip *c)
@@ -59,7 +63,8 @@ static inline struct rockchip_pwm_chip *to_rockchip_pwm_chip(struct pwm_chip *c)
}
static void rockchip_pwm_set_enable_v1(struct pwm_chip *chip,
- struct pwm_device *pwm, bool enable)
+ struct pwm_device *pwm, bool enable,
+ enum pwm_polarity polarity)
{
struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
u32 enable_conf = PWM_CTRL_OUTPUT_EN | PWM_CTRL_TIMER_EN;
@@ -75,15 +80,29 @@ static void rockchip_pwm_set_enable_v1(struct pwm_chip *chip,
writel_relaxed(val, pc->base + pc->data->regs.ctrl);
}
+static void rockchip_pwm_get_state_v1(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
+ u32 enable_conf = PWM_CTRL_OUTPUT_EN | PWM_CTRL_TIMER_EN;
+ u32 val;
+
+ val = readl_relaxed(pc->base + pc->data->regs.ctrl);
+ if ((val & enable_conf) == enable_conf)
+ state->enabled = true;
+}
+
static void rockchip_pwm_set_enable_v2(struct pwm_chip *chip,
- struct pwm_device *pwm, bool enable)
+ struct pwm_device *pwm, bool enable,
+ enum pwm_polarity polarity)
{
struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
u32 enable_conf = PWM_OUTPUT_LEFT | PWM_LP_DISABLE | PWM_ENABLE |
PWM_CONTINUOUS;
u32 val;
- if (pwm_get_polarity(pwm) == PWM_POLARITY_INVERSED)
+ if (polarity == PWM_POLARITY_INVERSED)
enable_conf |= PWM_DUTY_NEGATIVE | PWM_INACTIVE_POSITIVE;
else
enable_conf |= PWM_DUTY_POSITIVE | PWM_INACTIVE_NEGATIVE;
@@ -98,13 +117,59 @@ static void rockchip_pwm_set_enable_v2(struct pwm_chip *chip,
writel_relaxed(val, pc->base + pc->data->regs.ctrl);
}
+static void rockchip_pwm_get_state_v2(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
+ u32 enable_conf = PWM_OUTPUT_LEFT | PWM_LP_DISABLE | PWM_ENABLE |
+ PWM_CONTINUOUS;
+ u32 val;
+
+ val = readl_relaxed(pc->base + pc->data->regs.ctrl);
+ if ((val & enable_conf) != enable_conf)
+ return;
+
+ state->enabled = true;
+
+ if (!(val & PWM_DUTY_POSITIVE))
+ state->polarity = PWM_POLARITY_INVERSED;
+}
+
+static void rockchip_pwm_get_state(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
+ unsigned long clk_rate;
+ u64 tmp;
+ int ret;
+
+ ret = clk_enable(pc->clk);
+ if (ret)
+ return;
+
+ clk_rate = clk_get_rate(pc->clk);
+
+ tmp = readl_relaxed(pc->base + pc->data->regs.period);
+ tmp *= pc->data->prescaler * NSEC_PER_SEC;
+ state->period = DIV_ROUND_CLOSEST_ULL(tmp, clk_rate);
+
+ tmp = readl_relaxed(pc->base + pc->data->regs.duty);
+ tmp *= pc->data->prescaler * NSEC_PER_SEC;
+ state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, clk_rate);
+
+ pc->data->get_state(chip, pwm, state);
+
+ clk_disable(pc->clk);
+}
+
static int rockchip_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
int duty_ns, int period_ns)
{
struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
unsigned long period, duty;
u64 clk_rate, div;
- int ret;
clk_rate = clk_get_rate(pc->clk);
@@ -114,74 +179,72 @@ static int rockchip_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
* default prescaler value for all practical clock rate values.
*/
div = clk_rate * period_ns;
- do_div(div, pc->data->prescaler * NSEC_PER_SEC);
- period = div;
+ period = DIV_ROUND_CLOSEST_ULL(div,
+ pc->data->prescaler * NSEC_PER_SEC);
div = clk_rate * duty_ns;
- do_div(div, pc->data->prescaler * NSEC_PER_SEC);
- duty = div;
-
- ret = clk_enable(pc->clk);
- if (ret)
- return ret;
+ duty = DIV_ROUND_CLOSEST_ULL(div, pc->data->prescaler * NSEC_PER_SEC);
writel(period, pc->base + pc->data->regs.period);
writel(duty, pc->base + pc->data->regs.duty);
- writel(0, pc->base + pc->data->regs.cntr);
-
- clk_disable(pc->clk);
-
- return 0;
-}
-
-static int rockchip_pwm_set_polarity(struct pwm_chip *chip,
- struct pwm_device *pwm,
- enum pwm_polarity polarity)
-{
- /*
- * No action needed here because pwm->polarity will be set by the core
- * and the core will only change polarity when the PWM is not enabled.
- * We'll handle things in set_enable().
- */
return 0;
}
-static int rockchip_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
{
struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
+ struct pwm_state curstate;
+ bool enabled;
int ret;
+ pwm_get_state(pwm, &curstate);
+ enabled = curstate.enabled;
+
ret = clk_enable(pc->clk);
if (ret)
return ret;
- pc->data->set_enable(chip, pwm, true);
+ if (state->polarity != curstate.polarity && enabled) {
+ pc->data->set_enable(chip, pwm, false, state->polarity);
+ enabled = false;
+ }
- return 0;
-}
+ ret = rockchip_pwm_config(chip, pwm, state->duty_cycle, state->period);
+ if (ret) {
+ if (enabled != curstate.enabled)
+ pc->data->set_enable(chip, pwm, !enabled,
+ state->polarity);
-static void rockchip_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
+ goto out;
+ }
+
+ if (state->enabled != enabled)
+ pc->data->set_enable(chip, pwm, state->enabled,
+ state->polarity);
- pc->data->set_enable(chip, pwm, false);
+ /*
+ * Update the state with the real hardware, which can differ a bit
+ * because of period/duty_cycle approximation.
+ */
+ rockchip_pwm_get_state(chip, pwm, state);
+out:
clk_disable(pc->clk);
+
+ return ret;
}
static const struct pwm_ops rockchip_pwm_ops_v1 = {
- .config = rockchip_pwm_config,
- .enable = rockchip_pwm_enable,
- .disable = rockchip_pwm_disable,
+ .get_state = rockchip_pwm_get_state,
+ .apply = rockchip_pwm_apply,
.owner = THIS_MODULE,
};
static const struct pwm_ops rockchip_pwm_ops_v2 = {
- .config = rockchip_pwm_config,
- .set_polarity = rockchip_pwm_set_polarity,
- .enable = rockchip_pwm_enable,
- .disable = rockchip_pwm_disable,
+ .get_state = rockchip_pwm_get_state,
+ .apply = rockchip_pwm_apply,
.owner = THIS_MODULE,
};
@@ -195,6 +258,7 @@ static const struct rockchip_pwm_data pwm_data_v1 = {
.prescaler = 2,
.ops = &rockchip_pwm_ops_v1,
.set_enable = rockchip_pwm_set_enable_v1,
+ .get_state = rockchip_pwm_get_state_v1,
};
static const struct rockchip_pwm_data pwm_data_v2 = {
@@ -205,8 +269,10 @@ static const struct rockchip_pwm_data pwm_data_v2 = {
.ctrl = 0x0c,
},
.prescaler = 1,
+ .supports_polarity = true,
.ops = &rockchip_pwm_ops_v2,
.set_enable = rockchip_pwm_set_enable_v2,
+ .get_state = rockchip_pwm_get_state_v2,
};
static const struct rockchip_pwm_data pwm_data_vop = {
@@ -217,8 +283,10 @@ static const struct rockchip_pwm_data pwm_data_vop = {
.ctrl = 0x00,
},
.prescaler = 1,
+ .supports_polarity = true,
.ops = &rockchip_pwm_ops_v2,
.set_enable = rockchip_pwm_set_enable_v2,
+ .get_state = rockchip_pwm_get_state_v2,
};
static const struct of_device_id rockchip_pwm_dt_ids[] = {
@@ -253,7 +321,7 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
if (IS_ERR(pc->clk))
return PTR_ERR(pc->clk);
- ret = clk_prepare(pc->clk);
+ ret = clk_prepare_enable(pc->clk);
if (ret)
return ret;
@@ -265,7 +333,7 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
pc->chip.base = -1;
pc->chip.npwm = 1;
- if (pc->data->ops->set_polarity) {
+ if (pc->data->supports_polarity) {
pc->chip.of_xlate = of_pwm_xlate_with_flags;
pc->chip.of_pwm_n_cells = 3;
}
@@ -276,6 +344,10 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
}
+ /* Keep the PWM clk enabled if the PWM appears to be up and running. */
+ if (!pwm_is_enabled(pc->chip.pwms))
+ clk_disable(pc->clk);
+
return ret;
}
@@ -283,6 +355,20 @@ static int rockchip_pwm_remove(struct platform_device *pdev)
{
struct rockchip_pwm_chip *pc = platform_get_drvdata(pdev);
+ /*
+ * Disable the PWM clk before unpreparing it if the PWM device is still
+ * running. This should only happen when the last PWM user left it
+ * enabled, or when nobody requested a PWM that was previously enabled
+ * by the bootloader.
+ *
+ * FIXME: Maybe the core should disable all PWM devices in
+ * pwmchip_remove(). In this case we'd only have to call
+ * clk_unprepare() after pwmchip_remove().
+ *
+ */
+ if (pwm_is_enabled(pc->chip.pwms))
+ clk_disable(pc->clk);
+
clk_unprepare(pc->clk);
return pwmchip_remove(&pc->chip);
diff --git a/drivers/pwm/pwm-stmpe.c b/drivers/pwm/pwm-stmpe.c
new file mode 100644
index 000000000..e464582a3
--- /dev/null
+++ b/drivers/pwm/pwm-stmpe.c
@@ -0,0 +1,319 @@
+/*
+ * Copyright (C) 2016 Linaro Ltd.
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/mfd/stmpe.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/slab.h>
+
+#define STMPE24XX_PWMCS 0x30
+#define PWMCS_EN_PWM0 BIT(0)
+#define PWMCS_EN_PWM1 BIT(1)
+#define PWMCS_EN_PWM2 BIT(2)
+#define STMPE24XX_PWMIC0 0x38
+#define STMPE24XX_PWMIC1 0x39
+#define STMPE24XX_PWMIC2 0x3a
+
+#define STMPE_PWM_24XX_PINBASE 21
+
+struct stmpe_pwm {
+ struct stmpe *stmpe;
+ struct pwm_chip chip;
+ u8 last_duty;
+};
+
+static inline struct stmpe_pwm *to_stmpe_pwm(struct pwm_chip *chip)
+{
+ return container_of(chip, struct stmpe_pwm, chip);
+}
+
+static int stmpe_24xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct stmpe_pwm *stmpe_pwm = to_stmpe_pwm(chip);
+ u8 value;
+ int ret;
+
+ ret = stmpe_reg_read(stmpe_pwm->stmpe, STMPE24XX_PWMCS);
+ if (ret < 0) {
+ dev_err(chip->dev, "error reading PWM#%u control\n",
+ pwm->hwpwm);
+ return ret;
+ }
+
+ value = ret | BIT(pwm->hwpwm);
+
+ ret = stmpe_reg_write(stmpe_pwm->stmpe, STMPE24XX_PWMCS, value);
+ if (ret) {
+ dev_err(chip->dev, "error writing PWM#%u control\n",
+ pwm->hwpwm);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void stmpe_24xx_pwm_disable(struct pwm_chip *chip,
+ struct pwm_device *pwm)
+{
+ struct stmpe_pwm *stmpe_pwm = to_stmpe_pwm(chip);
+ u8 value;
+ int ret;
+
+ ret = stmpe_reg_read(stmpe_pwm->stmpe, STMPE24XX_PWMCS);
+ if (ret < 0) {
+ dev_err(chip->dev, "error reading PWM#%u control\n",
+ pwm->hwpwm);
+ return;
+ }
+
+ value = ret & ~BIT(pwm->hwpwm);
+
+ ret = stmpe_reg_write(stmpe_pwm->stmpe, STMPE24XX_PWMCS, value);
+ if (ret) {
+ dev_err(chip->dev, "error writing PWM#%u control\n",
+ pwm->hwpwm);
+ return;
+ }
+}
+
+/* STMPE 24xx PWM instructions */
+#define SMAX 0x007f
+#define SMIN 0x00ff
+#define GTS 0x0000
+#define LOAD BIT(14) /* Only available on 2403 */
+#define RAMPUP 0x0000
+#define RAMPDOWN BIT(7)
+#define PRESCALE_512 BIT(14)
+#define STEPTIME_1 BIT(8)
+#define BRANCH (BIT(15) | BIT(13))
+
+static int stmpe_24xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ struct stmpe_pwm *stmpe_pwm = to_stmpe_pwm(chip);
+ unsigned int i, pin;
+ u16 program[3] = {
+ SMAX,
+ GTS,
+ GTS,
+ };
+ u8 offset;
+ int ret;
+
+ /* Make sure we are disabled */
+ if (pwm_is_enabled(pwm)) {
+ stmpe_24xx_pwm_disable(chip, pwm);
+ } else {
+ /* Connect the PWM to the pin */
+ pin = pwm->hwpwm;
+
+ /* On STMPE2401 and 2403 pins 21,22,23 are used */
+ if (stmpe_pwm->stmpe->partnum == STMPE2401 ||
+ stmpe_pwm->stmpe->partnum == STMPE2403)
+ pin += STMPE_PWM_24XX_PINBASE;
+
+ ret = stmpe_set_altfunc(stmpe_pwm->stmpe, BIT(pin),
+ STMPE_BLOCK_PWM);
+ if (ret) {
+ dev_err(chip->dev, "unable to connect PWM#%u to pin\n",
+ pwm->hwpwm);
+ return ret;
+ }
+ }
+
+ /* STMPE24XX */
+ switch (pwm->hwpwm) {
+ case 0:
+ offset = STMPE24XX_PWMIC0;
+ break;
+
+ case 1:
+ offset = STMPE24XX_PWMIC1;
+ break;
+
+ case 2:
+ offset = STMPE24XX_PWMIC1;
+ break;
+
+ default:
+ /* Should not happen as npwm is 3 */
+ return -ENODEV;
+ }
+
+ dev_dbg(chip->dev, "PWM#%u: config duty %d ns, period %d ns\n",
+ pwm->hwpwm, duty_ns, period_ns);
+
+ if (duty_ns == 0) {
+ if (stmpe_pwm->stmpe->partnum == STMPE2401)
+ program[0] = SMAX; /* off all the time */
+
+ if (stmpe_pwm->stmpe->partnum == STMPE2403)
+ program[0] = LOAD | 0xff; /* LOAD 0xff */
+
+ stmpe_pwm->last_duty = 0x00;
+ } else if (duty_ns == period_ns) {
+ if (stmpe_pwm->stmpe->partnum == STMPE2401)
+ program[0] = SMIN; /* on all the time */
+
+ if (stmpe_pwm->stmpe->partnum == STMPE2403)
+ program[0] = LOAD | 0x00; /* LOAD 0x00 */
+
+ stmpe_pwm->last_duty = 0xff;
+ } else {
+ u8 value, last = stmpe_pwm->last_duty;
+ unsigned long duty;
+
+ /*
+ * Counter goes from 0x00 to 0xff repeatedly at 32768 Hz,
+ * (means a period of 30517 ns) then this is compared to the
+ * counter from the ramp, if this is >= PWM counter the output
+ * is high. With LOAD we can define how much of the cycle it
+ * is on.
+ *
+ * Prescale = 0 -> 2 kHz -> T = 1/f = 488281.25 ns
+ */
+
+ /* Scale to 0..0xff */
+ duty = duty_ns * 256;
+ duty = DIV_ROUND_CLOSEST(duty, period_ns);
+ value = duty;
+
+ if (value == last) {
+ /* Run the old program */
+ if (pwm_is_enabled(pwm))
+ stmpe_24xx_pwm_enable(chip, pwm);
+
+ return 0;
+ } else if (stmpe_pwm->stmpe->partnum == STMPE2403) {
+ /* STMPE2403 can simply set the right PWM value */
+ program[0] = LOAD | value;
+ program[1] = 0x0000;
+ } else if (stmpe_pwm->stmpe->partnum == STMPE2401) {
+ /* STMPE2401 need a complex program */
+ u16 incdec = 0x0000;
+
+ if (last < value)
+ /* Count up */
+ incdec = RAMPUP | (value - last);
+ else
+ /* Count down */
+ incdec = RAMPDOWN | (last - value);
+
+ /* Step to desired value, smoothly */
+ program[0] = PRESCALE_512 | STEPTIME_1 | incdec;
+
+ /* Loop eternally to 0x00 */
+ program[1] = BRANCH;
+ }
+
+ dev_dbg(chip->dev,
+ "PWM#%u: value = %02x, last_duty = %02x, program=%04x,%04x,%04x\n",
+ pwm->hwpwm, value, last, program[0], program[1],
+ program[2]);
+ stmpe_pwm->last_duty = value;
+ }
+
+ /*
+ * We can write programs of up to 64 16-bit words into this channel.
+ */
+ for (i = 0; i < ARRAY_SIZE(program); i++) {
+ u8 value;
+
+ value = (program[i] >> 8) & 0xff;
+
+ ret = stmpe_reg_write(stmpe_pwm->stmpe, offset, value);
+ if (ret) {
+ dev_err(chip->dev, "error writing register %02x: %d\n",
+ offset, ret);
+ return ret;
+ }
+
+ value = program[i] & 0xff;
+
+ ret = stmpe_reg_write(stmpe_pwm->stmpe, offset, value);
+ if (ret) {
+ dev_err(chip->dev, "error writing register %02x: %d\n",
+ offset, ret);
+ return ret;
+ }
+ }
+
+ /* If we were enabled, re-enable this PWM */
+ if (pwm_is_enabled(pwm))
+ stmpe_24xx_pwm_enable(chip, pwm);
+
+ /* Sleep for 200ms so we're sure it will take effect */
+ msleep(200);
+
+ dev_dbg(chip->dev, "programmed PWM#%u, %u bytes\n", pwm->hwpwm, i);
+
+ return 0;
+}
+
+static const struct pwm_ops stmpe_24xx_pwm_ops = {
+ .config = stmpe_24xx_pwm_config,
+ .enable = stmpe_24xx_pwm_enable,
+ .disable = stmpe_24xx_pwm_disable,
+ .owner = THIS_MODULE,
+};
+
+static int __init stmpe_pwm_probe(struct platform_device *pdev)
+{
+ struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent);
+ struct stmpe_pwm *pwm;
+ int ret;
+
+ pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
+ if (!pwm)
+ return -ENOMEM;
+
+ pwm->stmpe = stmpe;
+ pwm->chip.dev = &pdev->dev;
+ pwm->chip.base = -1;
+
+ if (stmpe->partnum == STMPE2401 || stmpe->partnum == STMPE2403) {
+ pwm->chip.ops = &stmpe_24xx_pwm_ops;
+ pwm->chip.npwm = 3;
+ } else {
+ if (stmpe->partnum == STMPE1601)
+ dev_err(&pdev->dev, "STMPE1601 not yet supported\n");
+ else
+ dev_err(&pdev->dev, "Unknown STMPE PWM\n");
+
+ return -ENODEV;
+ }
+
+ ret = stmpe_enable(stmpe, STMPE_BLOCK_PWM);
+ if (ret)
+ return ret;
+
+ ret = pwmchip_add(&pwm->chip);
+ if (ret) {
+ stmpe_disable(stmpe, STMPE_BLOCK_PWM);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, pwm);
+
+ return 0;
+}
+
+static struct platform_driver stmpe_pwm_driver = {
+ .driver = {
+ .name = "stmpe-pwm",
+ },
+};
+builtin_platform_driver_probe(stmpe_pwm_driver, stmpe_pwm_probe);
diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c
index d4de0607b..e4647840c 100644
--- a/drivers/pwm/pwm-tegra.c
+++ b/drivers/pwm/pwm-tegra.c
@@ -26,9 +26,11 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/pwm.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/reset.h>
#define PWM_ENABLE (1 << 31)
#define PWM_DUTY_WIDTH 8
@@ -36,15 +38,20 @@
#define PWM_SCALE_WIDTH 13
#define PWM_SCALE_SHIFT 0
-#define NUM_PWM 4
+struct tegra_pwm_soc {
+ unsigned int num_channels;
+};
struct tegra_pwm_chip {
- struct pwm_chip chip;
- struct device *dev;
+ struct pwm_chip chip;
+ struct device *dev;
+
+ struct clk *clk;
+ struct reset_control*rst;
- struct clk *clk;
+ void __iomem *regs;
- void __iomem *mmio_base;
+ const struct tegra_pwm_soc *soc;
};
static inline struct tegra_pwm_chip *to_tegra_pwm_chip(struct pwm_chip *chip)
@@ -54,20 +61,20 @@ static inline struct tegra_pwm_chip *to_tegra_pwm_chip(struct pwm_chip *chip)
static inline u32 pwm_readl(struct tegra_pwm_chip *chip, unsigned int num)
{
- return readl(chip->mmio_base + (num << 4));
+ return readl(chip->regs + (num << 4));
}
static inline void pwm_writel(struct tegra_pwm_chip *chip, unsigned int num,
unsigned long val)
{
- writel(val, chip->mmio_base + (num << 4));
+ writel(val, chip->regs + (num << 4));
}
static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
int duty_ns, int period_ns)
{
struct tegra_pwm_chip *pc = to_tegra_pwm_chip(chip);
- unsigned long long c;
+ unsigned long long c = duty_ns;
unsigned long rate, hz;
u32 val = 0;
int err;
@@ -77,7 +84,8 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
* per (1 << PWM_DUTY_WIDTH) cycles and make sure to round to the
* nearest integer during division.
*/
- c = duty_ns * ((1 << PWM_DUTY_WIDTH) - 1) + period_ns / 2;
+ c *= (1 << PWM_DUTY_WIDTH);
+ c += period_ns / 2;
do_div(c, period_ns);
val = (u32)c << PWM_DUTY_SHIFT;
@@ -176,12 +184,13 @@ static int tegra_pwm_probe(struct platform_device *pdev)
if (!pwm)
return -ENOMEM;
+ pwm->soc = of_device_get_match_data(&pdev->dev);
pwm->dev = &pdev->dev;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pwm->mmio_base = devm_ioremap_resource(&pdev->dev, r);
- if (IS_ERR(pwm->mmio_base))
- return PTR_ERR(pwm->mmio_base);
+ pwm->regs = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(pwm->regs))
+ return PTR_ERR(pwm->regs);
platform_set_drvdata(pdev, pwm);
@@ -189,14 +198,24 @@ static int tegra_pwm_probe(struct platform_device *pdev)
if (IS_ERR(pwm->clk))
return PTR_ERR(pwm->clk);
+ pwm->rst = devm_reset_control_get(&pdev->dev, "pwm");
+ if (IS_ERR(pwm->rst)) {
+ ret = PTR_ERR(pwm->rst);
+ dev_err(&pdev->dev, "Reset control is not found: %d\n", ret);
+ return ret;
+ }
+
+ reset_control_deassert(pwm->rst);
+
pwm->chip.dev = &pdev->dev;
pwm->chip.ops = &tegra_pwm_ops;
pwm->chip.base = -1;
- pwm->chip.npwm = NUM_PWM;
+ pwm->chip.npwm = pwm->soc->num_channels;
ret = pwmchip_add(&pwm->chip);
if (ret < 0) {
dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
+ reset_control_assert(pwm->rst);
return ret;
}
@@ -206,12 +225,17 @@ static int tegra_pwm_probe(struct platform_device *pdev)
static int tegra_pwm_remove(struct platform_device *pdev)
{
struct tegra_pwm_chip *pc = platform_get_drvdata(pdev);
- int i;
+ unsigned int i;
+ int err;
if (WARN_ON(!pc))
return -ENODEV;
- for (i = 0; i < NUM_PWM; i++) {
+ err = clk_prepare_enable(pc->clk);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < pc->chip.npwm; i++) {
struct pwm_device *pwm = &pc->chip.pwms[i];
if (!pwm_is_enabled(pwm))
@@ -223,12 +247,23 @@ static int tegra_pwm_remove(struct platform_device *pdev)
clk_disable_unprepare(pc->clk);
}
+ reset_control_assert(pc->rst);
+ clk_disable_unprepare(pc->clk);
+
return pwmchip_remove(&pc->chip);
}
+static const struct tegra_pwm_soc tegra20_pwm_soc = {
+ .num_channels = 4,
+};
+
+static const struct tegra_pwm_soc tegra186_pwm_soc = {
+ .num_channels = 1,
+};
+
static const struct of_device_id tegra_pwm_of_match[] = {
- { .compatible = "nvidia,tegra20-pwm" },
- { .compatible = "nvidia,tegra30-pwm" },
+ { .compatible = "nvidia,tegra20-pwm", .data = &tegra20_pwm_soc },
+ { .compatible = "nvidia,tegra186-pwm", .data = &tegra186_pwm_soc },
{ }
};
diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c
index 616af764a..6ec342dd3 100644
--- a/drivers/pwm/pwm-tiecap.c
+++ b/drivers/pwm/pwm-tiecap.c
@@ -27,8 +27,6 @@
#include <linux/pwm.h>
#include <linux/of_device.h>
-#include "pwm-tipwmss.h"
-
/* ECAP registers and bits definitions */
#define CAP1 0x08
#define CAP2 0x0C
@@ -195,6 +193,7 @@ static const struct pwm_ops ecap_pwm_ops = {
};
static const struct of_device_id ecap_of_match[] = {
+ { .compatible = "ti,am3352-ecap" },
{ .compatible = "ti,am33xx-ecap" },
{},
};
@@ -202,11 +201,11 @@ MODULE_DEVICE_TABLE(of, ecap_of_match);
static int ecap_pwm_probe(struct platform_device *pdev)
{
+ struct device_node *np = pdev->dev.of_node;
int ret;
struct resource *r;
struct clk *clk;
struct ecap_pwm_chip *pc;
- u16 status;
pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
if (!pc)
@@ -214,6 +213,13 @@ static int ecap_pwm_probe(struct platform_device *pdev)
clk = devm_clk_get(&pdev->dev, "fck");
if (IS_ERR(clk)) {
+ if (of_device_is_compatible(np, "ti,am33xx-ecap")) {
+ dev_warn(&pdev->dev, "Binding is obsolete.\n");
+ clk = devm_clk_get(pdev->dev.parent, "fck");
+ }
+ }
+
+ if (IS_ERR(clk)) {
dev_err(&pdev->dev, "failed to get clock\n");
return PTR_ERR(clk);
}
@@ -243,40 +249,15 @@ static int ecap_pwm_probe(struct platform_device *pdev)
}
pm_runtime_enable(&pdev->dev);
- pm_runtime_get_sync(&pdev->dev);
-
- status = pwmss_submodule_state_change(pdev->dev.parent,
- PWMSS_ECAPCLK_EN);
- if (!(status & PWMSS_ECAPCLK_EN_ACK)) {
- dev_err(&pdev->dev, "PWMSS config space clock enable failed\n");
- ret = -EINVAL;
- goto pwmss_clk_failure;
- }
-
- pm_runtime_put_sync(&pdev->dev);
platform_set_drvdata(pdev, pc);
return 0;
-
-pwmss_clk_failure:
- pm_runtime_put_sync(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
- pwmchip_remove(&pc->chip);
- return ret;
}
static int ecap_pwm_remove(struct platform_device *pdev)
{
struct ecap_pwm_chip *pc = platform_get_drvdata(pdev);
- pm_runtime_get_sync(&pdev->dev);
- /*
- * Due to hardware misbehaviour, acknowledge of the stop_req
- * is missing. Hence checking of the status bit skipped.
- */
- pwmss_submodule_state_change(pdev->dev.parent, PWMSS_ECAPCLK_STOP_REQ);
- pm_runtime_put_sync(&pdev->dev);
-
pm_runtime_disable(&pdev->dev);
return pwmchip_remove(&pc->chip);
}
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index 6a41e6601..b5c6b0636 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -27,8 +27,6 @@
#include <linux/pm_runtime.h>
#include <linux/of_device.h>
-#include "pwm-tipwmss.h"
-
/* EHRPWM registers and bits definitions */
/* Time base module registers */
@@ -426,6 +424,7 @@ static const struct pwm_ops ehrpwm_pwm_ops = {
};
static const struct of_device_id ehrpwm_of_match[] = {
+ { .compatible = "ti,am3352-ehrpwm" },
{ .compatible = "ti,am33xx-ehrpwm" },
{},
};
@@ -433,11 +432,11 @@ MODULE_DEVICE_TABLE(of, ehrpwm_of_match);
static int ehrpwm_pwm_probe(struct platform_device *pdev)
{
+ struct device_node *np = pdev->dev.of_node;
int ret;
struct resource *r;
struct clk *clk;
struct ehrpwm_pwm_chip *pc;
- u16 status;
pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
if (!pc)
@@ -445,6 +444,13 @@ static int ehrpwm_pwm_probe(struct platform_device *pdev)
clk = devm_clk_get(&pdev->dev, "fck");
if (IS_ERR(clk)) {
+ if (of_device_is_compatible(np, "ti,am33xx-ecap")) {
+ dev_warn(&pdev->dev, "Binding is obsolete.\n");
+ clk = devm_clk_get(pdev->dev.parent, "fck");
+ }
+ }
+
+ if (IS_ERR(clk)) {
dev_err(&pdev->dev, "failed to get clock\n");
return PTR_ERR(clk);
}
@@ -487,27 +493,9 @@ static int ehrpwm_pwm_probe(struct platform_device *pdev)
}
pm_runtime_enable(&pdev->dev);
- pm_runtime_get_sync(&pdev->dev);
-
- status = pwmss_submodule_state_change(pdev->dev.parent,
- PWMSS_EPWMCLK_EN);
- if (!(status & PWMSS_EPWMCLK_EN_ACK)) {
- dev_err(&pdev->dev, "PWMSS config space clock enable failed\n");
- ret = -EINVAL;
- goto pwmss_clk_failure;
- }
-
- pm_runtime_put_sync(&pdev->dev);
platform_set_drvdata(pdev, pc);
return 0;
-
-pwmss_clk_failure:
- pm_runtime_put_sync(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
- pwmchip_remove(&pc->chip);
- clk_unprepare(pc->tbclk);
- return ret;
}
static int ehrpwm_pwm_remove(struct platform_device *pdev)
@@ -516,14 +504,6 @@ static int ehrpwm_pwm_remove(struct platform_device *pdev)
clk_unprepare(pc->tbclk);
- pm_runtime_get_sync(&pdev->dev);
- /*
- * Due to hardware misbehaviour, acknowledge of the stop_req
- * is missing. Hence checking of the status bit skipped.
- */
- pwmss_submodule_state_change(pdev->dev.parent, PWMSS_EPWMCLK_STOP_REQ);
- pm_runtime_put_sync(&pdev->dev);
-
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return pwmchip_remove(&pc->chip);
diff --git a/drivers/pwm/pwm-tipwmss.c b/drivers/pwm/pwm-tipwmss.c
index 5cf65a15d..829f4991c 100644
--- a/drivers/pwm/pwm-tipwmss.c
+++ b/drivers/pwm/pwm-tipwmss.c
@@ -22,32 +22,6 @@
#include <linux/pm_runtime.h>
#include <linux/of_device.h>
-#include "pwm-tipwmss.h"
-
-#define PWMSS_CLKCONFIG 0x8 /* Clock gating reg */
-#define PWMSS_CLKSTATUS 0xc /* Clock gating status reg */
-
-struct pwmss_info {
- void __iomem *mmio_base;
- struct mutex pwmss_lock;
- u16 pwmss_clkconfig;
-};
-
-u16 pwmss_submodule_state_change(struct device *dev, int set)
-{
- struct pwmss_info *info = dev_get_drvdata(dev);
- u16 val;
-
- mutex_lock(&info->pwmss_lock);
- val = readw(info->mmio_base + PWMSS_CLKCONFIG);
- val |= set;
- writew(val , info->mmio_base + PWMSS_CLKCONFIG);
- mutex_unlock(&info->pwmss_lock);
-
- return readw(info->mmio_base + PWMSS_CLKSTATUS);
-}
-EXPORT_SYMBOL(pwmss_submodule_state_change);
-
static const struct of_device_id pwmss_of_match[] = {
{ .compatible = "ti,am33xx-pwmss" },
{},
@@ -57,24 +31,10 @@ MODULE_DEVICE_TABLE(of, pwmss_of_match);
static int pwmss_probe(struct platform_device *pdev)
{
int ret;
- struct resource *r;
- struct pwmss_info *info;
struct device_node *node = pdev->dev.of_node;
- info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- mutex_init(&info->pwmss_lock);
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
- if (IS_ERR(info->mmio_base))
- return PTR_ERR(info->mmio_base);
-
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
- platform_set_drvdata(pdev, info);
/* Populate all the child nodes here... */
ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
@@ -86,30 +46,21 @@ static int pwmss_probe(struct platform_device *pdev)
static int pwmss_remove(struct platform_device *pdev)
{
- struct pwmss_info *info = platform_get_drvdata(pdev);
-
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- mutex_destroy(&info->pwmss_lock);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int pwmss_suspend(struct device *dev)
{
- struct pwmss_info *info = dev_get_drvdata(dev);
-
- info->pwmss_clkconfig = readw(info->mmio_base + PWMSS_CLKCONFIG);
pm_runtime_put_sync(dev);
return 0;
}
static int pwmss_resume(struct device *dev)
{
- struct pwmss_info *info = dev_get_drvdata(dev);
-
pm_runtime_get_sync(dev);
- writew(info->pwmss_clkconfig, info->mmio_base + PWMSS_CLKCONFIG);
return 0;
}
#endif
diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
index 01695d48d..18ed72559 100644
--- a/drivers/pwm/sysfs.c
+++ b/drivers/pwm/sysfs.c
@@ -208,16 +208,33 @@ static ssize_t polarity_store(struct device *child,
return ret ? : size;
}
+static ssize_t capture_show(struct device *child,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pwm_device *pwm = child_to_pwm_device(child);
+ struct pwm_capture result;
+ int ret;
+
+ ret = pwm_capture(pwm, &result, jiffies_to_msecs(HZ));
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%u %u\n", result.period, result.duty_cycle);
+}
+
static DEVICE_ATTR_RW(period);
static DEVICE_ATTR_RW(duty_cycle);
static DEVICE_ATTR_RW(enable);
static DEVICE_ATTR_RW(polarity);
+static DEVICE_ATTR_RO(capture);
static struct attribute *pwm_attrs[] = {
&dev_attr_period.attr,
&dev_attr_duty_cycle.attr,
&dev_attr_enable.attr,
&dev_attr_polarity.attr,
+ &dev_attr_capture.attr,
NULL
};
ATTRIBUTE_GROUPS(pwm);
diff --git a/drivers/rapidio/Kconfig b/drivers/rapidio/Kconfig
index b5a10d3c9..d6d2f20c4 100644
--- a/drivers/rapidio/Kconfig
+++ b/drivers/rapidio/Kconfig
@@ -67,6 +67,15 @@ config RAPIDIO_ENUM_BASIC
endchoice
+config RAPIDIO_CHMAN
+ tristate "RapidIO Channelized Messaging driver"
+ depends on RAPIDIO
+ help
+ This option includes RapidIO channelized messaging driver which
+ provides socket-like interface to allow sharing of single RapidIO
+ messaging mailbox between multiple user-space applications.
+ See "Documentation/rapidio/rio_cm.txt" for driver description.
+
config RAPIDIO_MPORT_CDEV
tristate "RapidIO /dev mport device driver"
depends on RAPIDIO
diff --git a/drivers/rapidio/Makefile b/drivers/rapidio/Makefile
index 6271ada69..74dcea45a 100644
--- a/drivers/rapidio/Makefile
+++ b/drivers/rapidio/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_RAPIDIO) += rapidio.o
rapidio-y := rio.o rio-access.o rio-driver.o rio-sysfs.o
obj-$(CONFIG_RAPIDIO_ENUM_BASIC) += rio-scan.o
+obj-$(CONFIG_RAPIDIO_CHMAN) += rio_cm.o
obj-$(CONFIG_RAPIDIO) += switches/
obj-$(CONFIG_RAPIDIO) += devices/
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index e165b7ce2..436dfe871 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -1813,7 +1813,7 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
if (rdev->pef & RIO_PEF_EXT_FEATURES) {
rdev->efptr = rval & 0xffff;
rdev->phys_efptr = rio_mport_get_physefb(mport, 0, destid,
- hopcount);
+ hopcount, &rdev->phys_rmap);
rdev->em_efptr = rio_mport_get_feature(mport, 0, destid,
hopcount, RIO_EFB_ERR_MGMNT);
@@ -2242,7 +2242,7 @@ static void mport_mm_open(struct vm_area_struct *vma)
{
struct rio_mport_mapping *map = vma->vm_private_data;
-rmcd_debug(MMAP, "0x%pad", &map->phys_addr);
+ rmcd_debug(MMAP, "%pad", &map->phys_addr);
kref_get(&map->ref);
}
@@ -2250,7 +2250,7 @@ static void mport_mm_close(struct vm_area_struct *vma)
{
struct rio_mport_mapping *map = vma->vm_private_data;
-rmcd_debug(MMAP, "0x%pad", &map->phys_addr);
+ rmcd_debug(MMAP, "%pad", &map->phys_addr);
mutex_lock(&map->md->buf_mutex);
kref_put(&map->ref, mport_release_mapping);
mutex_unlock(&map->md->buf_mutex);
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 68d2bae00..9d19b9a62 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -37,11 +37,20 @@
#include "tsi721.h"
#ifdef DEBUG
-u32 dbg_level = DBG_INIT | DBG_EXIT;
+u32 dbg_level;
module_param(dbg_level, uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)");
#endif
+static int pcie_mrrs = -1;
+module_param(pcie_mrrs, int, S_IRUGO);
+MODULE_PARM_DESC(pcie_mrrs, "PCIe MRRS override value (0...5)");
+
+static u8 mbox_sel = 0x0f;
+module_param(mbox_sel, byte, S_IRUGO);
+MODULE_PARM_DESC(mbox_sel,
+ "RIO Messaging MBOX Selection Mask (default: 0x0f = all)");
+
static void tsi721_omsg_handler(struct tsi721_device *priv, int ch);
static void tsi721_imsg_handler(struct tsi721_device *priv, int ch);
@@ -1081,7 +1090,7 @@ static void tsi721_init_pc2sr_mapping(struct tsi721_device *priv)
* from rstart to lstart.
*/
static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart,
- u64 rstart, u32 size, u32 flags)
+ u64 rstart, u64 size, u32 flags)
{
struct tsi721_device *priv = mport->priv;
int i, avail = -1;
@@ -1094,6 +1103,10 @@ static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart,
struct tsi721_ib_win_mapping *map = NULL;
int ret = -EBUSY;
+ /* Max IBW size supported by HW is 16GB */
+ if (size > 0x400000000UL)
+ return -EINVAL;
+
if (direct) {
/* Calculate minimal acceptable window size and base address */
@@ -1101,15 +1114,15 @@ static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart,
ibw_start = lstart & ~(ibw_size - 1);
tsi_debug(IBW, &priv->pdev->dev,
- "Direct (RIO_0x%llx -> PCIe_0x%pad), size=0x%x, ibw_start = 0x%llx",
+ "Direct (RIO_0x%llx -> PCIe_%pad), size=0x%llx, ibw_start = 0x%llx",
rstart, &lstart, size, ibw_start);
while ((lstart + size) > (ibw_start + ibw_size)) {
ibw_size *= 2;
ibw_start = lstart & ~(ibw_size - 1);
- if (ibw_size > 0x80000000) { /* Limit max size to 2GB */
+ /* Check for crossing IBW max size 16GB */
+ if (ibw_size > 0x400000000UL)
return -EBUSY;
- }
}
loc_start = ibw_start;
@@ -1120,7 +1133,7 @@ static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart,
} else {
tsi_debug(IBW, &priv->pdev->dev,
- "Translated (RIO_0x%llx -> PCIe_0x%pad), size=0x%x",
+ "Translated (RIO_0x%llx -> PCIe_%pad), size=0x%llx",
rstart, &lstart, size);
if (!is_power_of_2(size) || size < 0x1000 ||
@@ -1215,7 +1228,7 @@ static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart,
priv->ibwin_cnt--;
tsi_debug(IBW, &priv->pdev->dev,
- "Configured IBWIN%d (RIO_0x%llx -> PCIe_0x%pad), size=0x%llx",
+ "Configured IBWIN%d (RIO_0x%llx -> PCIe_%pad), size=0x%llx",
i, ibw_start, &loc_start, ibw_size);
return 0;
@@ -1237,7 +1250,7 @@ static void tsi721_rio_unmap_inb_mem(struct rio_mport *mport,
int i;
tsi_debug(IBW, &priv->pdev->dev,
- "Unmap IBW mapped to PCIe_0x%pad", &lstart);
+ "Unmap IBW mapped to PCIe_%pad", &lstart);
/* Search for matching active inbound translation window */
for (i = 0; i < TSI721_IBWIN_NUM; i++) {
@@ -1877,6 +1890,11 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
goto out;
}
+ if ((mbox_sel & (1 << mbox)) == 0) {
+ rc = -ENODEV;
+ goto out;
+ }
+
priv->omsg_ring[mbox].dev_id = dev_id;
priv->omsg_ring[mbox].size = entries;
priv->omsg_ring[mbox].sts_rdptr = 0;
@@ -2161,6 +2179,11 @@ static int tsi721_open_inb_mbox(struct rio_mport *mport, void *dev_id,
goto out;
}
+ if ((mbox_sel & (1 << mbox)) == 0) {
+ rc = -ENODEV;
+ goto out;
+ }
+
/* Initialize IB Messaging Ring */
priv->imsg_ring[mbox].dev_id = dev_id;
priv->imsg_ring[mbox].size = entries;
@@ -2532,11 +2555,11 @@ static int tsi721_query_mport(struct rio_mport *mport,
struct tsi721_device *priv = mport->priv;
u32 rval;
- rval = ioread32(priv->regs + (0x100 + RIO_PORT_N_ERR_STS_CSR(0)));
+ rval = ioread32(priv->regs + 0x100 + RIO_PORT_N_ERR_STS_CSR(0, 0));
if (rval & RIO_PORT_N_ERR_STS_PORT_OK) {
- rval = ioread32(priv->regs + (0x100 + RIO_PORT_N_CTL2_CSR(0)));
+ rval = ioread32(priv->regs + 0x100 + RIO_PORT_N_CTL2_CSR(0, 0));
attr->link_speed = (rval & RIO_PORT_N_CTL2_SEL_BAUD) >> 28;
- rval = ioread32(priv->regs + (0x100 + RIO_PORT_N_CTL_CSR(0)));
+ rval = ioread32(priv->regs + 0x100 + RIO_PORT_N_CTL_CSR(0, 0));
attr->link_width = (rval & RIO_PORT_N_CTL_IPW) >> 27;
} else
attr->link_speed = RIO_LINK_DOWN;
@@ -2650,9 +2673,9 @@ static int tsi721_setup_mport(struct tsi721_device *priv)
mport->ops = &tsi721_rio_ops;
mport->index = 0;
mport->sys_size = 0; /* small system */
- mport->phy_type = RIO_PHY_SERIAL;
mport->priv = (void *)priv;
mport->phys_efptr = 0x100;
+ mport->phys_rmap = 1;
mport->dev.parent = &pdev->dev;
mport->dev.release = tsi721_mport_release;
@@ -2840,6 +2863,16 @@ static int tsi721_probe(struct pci_dev *pdev,
pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN, 0);
+ /* Override PCIe Maximum Read Request Size setting if requested */
+ if (pcie_mrrs >= 0) {
+ if (pcie_mrrs <= 5)
+ pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_READRQ, pcie_mrrs << 12);
+ else
+ tsi_info(&pdev->dev,
+ "Invalid MRRS override value %d", pcie_mrrs);
+ }
+
/* Adjust PCIe completion timeout. */
pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL2, 0xf, 0x2);
diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h
index 5456dbddc..5941437cb 100644
--- a/drivers/rapidio/devices/tsi721.h
+++ b/drivers/rapidio/devices/tsi721.h
@@ -661,7 +661,7 @@ enum dma_rtype {
*/
#define TSI721_DMA_CHNUM TSI721_DMA_MAXCH
-#define TSI721_DMACH_MAINT 0 /* DMA channel for maint requests */
+#define TSI721_DMACH_MAINT 7 /* DMA channel for maint requests */
#define TSI721_DMACH_MAINT_NBD 32 /* Number of BDs for maint requests */
#define TSI721_DMACH_DMA 1 /* DMA channel for data transfers */
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
index 155cae1e6..e2a418598 100644
--- a/drivers/rapidio/devices/tsi721_dma.c
+++ b/drivers/rapidio/devices/tsi721_dma.c
@@ -36,18 +36,26 @@
#include "tsi721.h"
-#define TSI721_DMA_TX_QUEUE_SZ 16 /* number of transaction descriptors */
-
#ifdef CONFIG_PCI_MSI
static irqreturn_t tsi721_bdma_msix(int irq, void *ptr);
#endif
static int tsi721_submit_sg(struct tsi721_tx_desc *desc);
static unsigned int dma_desc_per_channel = 128;
-module_param(dma_desc_per_channel, uint, S_IWUSR | S_IRUGO);
+module_param(dma_desc_per_channel, uint, S_IRUGO);
MODULE_PARM_DESC(dma_desc_per_channel,
"Number of DMA descriptors per channel (default: 128)");
+static unsigned int dma_txqueue_sz = 16;
+module_param(dma_txqueue_sz, uint, S_IRUGO);
+MODULE_PARM_DESC(dma_txqueue_sz,
+ "DMA Transactions Queue Size (default: 16)");
+
+static u8 dma_sel = 0x7f;
+module_param(dma_sel, byte, S_IRUGO);
+MODULE_PARM_DESC(dma_sel,
+ "DMA Channel Selection Mask (default: 0x7f = all)");
+
static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan)
{
return container_of(chan, struct tsi721_bdma_chan, dchan);
@@ -718,6 +726,7 @@ static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd)
cookie = dma_cookie_assign(txd);
desc->status = DMA_IN_PROGRESS;
list_add_tail(&desc->desc_node, &bdma_chan->queue);
+ tsi721_advance_work(bdma_chan, NULL);
spin_unlock_bh(&bdma_chan->lock);
return cookie;
@@ -732,7 +741,7 @@ static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
if (bdma_chan->bd_base)
- return TSI721_DMA_TX_QUEUE_SZ;
+ return dma_txqueue_sz;
/* Initialize BDMA channel */
if (tsi721_bdma_ch_init(bdma_chan, dma_desc_per_channel)) {
@@ -742,7 +751,7 @@ static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
}
/* Allocate queue of transaction descriptors */
- desc = kcalloc(TSI721_DMA_TX_QUEUE_SZ, sizeof(struct tsi721_tx_desc),
+ desc = kcalloc(dma_txqueue_sz, sizeof(struct tsi721_tx_desc),
GFP_ATOMIC);
if (!desc) {
tsi_err(&dchan->dev->device,
@@ -754,7 +763,7 @@ static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
bdma_chan->tx_desc = desc;
- for (i = 0; i < TSI721_DMA_TX_QUEUE_SZ; i++) {
+ for (i = 0; i < dma_txqueue_sz; i++) {
dma_async_tx_descriptor_init(&desc[i].txd, dchan);
desc[i].txd.tx_submit = tsi721_tx_submit;
desc[i].txd.flags = DMA_CTRL_ACK;
@@ -766,7 +775,7 @@ static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
bdma_chan->active = true;
tsi721_bdma_interrupt_enable(bdma_chan, 1);
- return TSI721_DMA_TX_QUEUE_SZ;
+ return dma_txqueue_sz;
}
static void tsi721_sync_dma_irq(struct tsi721_bdma_chan *bdma_chan)
@@ -962,7 +971,7 @@ void tsi721_dma_stop_all(struct tsi721_device *priv)
int i;
for (i = 0; i < TSI721_DMA_MAXCH; i++) {
- if (i != TSI721_DMACH_MAINT)
+ if ((i != TSI721_DMACH_MAINT) && (dma_sel & (1 << i)))
tsi721_dma_stop(&priv->bdma[i]);
}
}
@@ -979,7 +988,7 @@ int tsi721_register_dma(struct tsi721_device *priv)
for (i = 0; i < TSI721_DMA_MAXCH; i++) {
struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i];
- if (i == TSI721_DMACH_MAINT)
+ if ((i == TSI721_DMACH_MAINT) || (dma_sel & (1 << i)) == 0)
continue;
bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i);
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
index a63a38080..23429bdac 100644
--- a/drivers/rapidio/rio-scan.c
+++ b/drivers/rapidio/rio-scan.c
@@ -49,15 +49,6 @@ struct rio_id_table {
static int next_destid = 0;
static int next_comptag = 1;
-static int rio_mport_phys_table[] = {
- RIO_EFB_PAR_EP_ID,
- RIO_EFB_PAR_EP_REC_ID,
- RIO_EFB_SER_EP_ID,
- RIO_EFB_SER_EP_REC_ID,
- -1,
-};
-
-
/**
* rio_destid_alloc - Allocate next available destID for given network
* @net: RIO network
@@ -380,10 +371,15 @@ static struct rio_dev *rio_setup_device(struct rio_net *net,
if (rdev->pef & RIO_PEF_EXT_FEATURES) {
rdev->efptr = result & 0xffff;
rdev->phys_efptr = rio_mport_get_physefb(port, 0, destid,
- hopcount);
+ hopcount, &rdev->phys_rmap);
+ pr_debug("RIO: %s Register Map %d device\n",
+ __func__, rdev->phys_rmap);
rdev->em_efptr = rio_mport_get_feature(port, 0, destid,
hopcount, RIO_EFB_ERR_MGMNT);
+ if (!rdev->em_efptr)
+ rdev->em_efptr = rio_mport_get_feature(port, 0, destid,
+ hopcount, RIO_EFB_ERR_MGMNT_HS);
}
rio_mport_read_config_32(port, destid, hopcount, RIO_SRC_OPS_CAR,
@@ -445,7 +441,7 @@ static struct rio_dev *rio_setup_device(struct rio_net *net,
rio_route_clr_table(rdev, RIO_GLOBAL_TABLE, 0);
} else {
if (do_enum)
- /*Enable Input Output Port (transmitter reviever)*/
+ /*Enable Input Output Port (transmitter receiver)*/
rio_enable_rx_tx_port(port, 0, destid, hopcount, 0);
dev_set_name(&rdev->dev, "%02x:e:%04x", rdev->net->id,
@@ -481,10 +477,8 @@ cleanup:
/**
* rio_sport_is_active- Tests if a switch port has an active connection.
- * @port: Master port to send transaction
- * @destid: Associated destination ID for switch
- * @hopcount: Hopcount to reach switch
- * @sport: Switch port number
+ * @rdev: RapidIO device object
+ * @sp: Switch port number
*
* Reads the port error status CSR for a particular switch port to
* determine if the port has an active link. Returns
@@ -492,31 +486,12 @@ cleanup:
* inactive.
*/
static int
-rio_sport_is_active(struct rio_mport *port, u16 destid, u8 hopcount, int sport)
+rio_sport_is_active(struct rio_dev *rdev, int sp)
{
u32 result = 0;
- u32 ext_ftr_ptr;
- ext_ftr_ptr = rio_mport_get_efb(port, 0, destid, hopcount, 0);
-
- while (ext_ftr_ptr) {
- rio_mport_read_config_32(port, destid, hopcount,
- ext_ftr_ptr, &result);
- result = RIO_GET_BLOCK_ID(result);
- if ((result == RIO_EFB_SER_EP_FREE_ID) ||
- (result == RIO_EFB_SER_EP_FREE_ID_V13P) ||
- (result == RIO_EFB_SER_EP_FREC_ID))
- break;
-
- ext_ftr_ptr = rio_mport_get_efb(port, 0, destid, hopcount,
- ext_ftr_ptr);
- }
-
- if (ext_ftr_ptr)
- rio_mport_read_config_32(port, destid, hopcount,
- ext_ftr_ptr +
- RIO_PORT_N_ERR_STS_CSR(sport),
- &result);
+ rio_read_config_32(rdev, RIO_DEV_PORT_N_ERR_STS_CSR(rdev, sp),
+ &result);
return result & RIO_PORT_N_ERR_STS_PORT_OK;
}
@@ -655,9 +630,7 @@ static int rio_enum_peer(struct rio_net *net, struct rio_mport *port,
cur_destid = next_destid;
- if (rio_sport_is_active
- (port, RIO_ANY_DESTID(port->sys_size), hopcount,
- port_num)) {
+ if (rio_sport_is_active(rdev, port_num)) {
pr_debug(
"RIO: scanning device on port %d\n",
port_num);
@@ -785,8 +758,7 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,
if (RIO_GET_PORT_NUM(rdev->swpinfo) == port_num)
continue;
- if (rio_sport_is_active
- (port, destid, hopcount, port_num)) {
+ if (rio_sport_is_active(rdev, port_num)) {
pr_debug(
"RIO: scanning device on port %d\n",
port_num);
@@ -831,21 +803,11 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,
static int rio_mport_is_active(struct rio_mport *port)
{
u32 result = 0;
- u32 ext_ftr_ptr;
- int *entry = rio_mport_phys_table;
-
- do {
- if ((ext_ftr_ptr =
- rio_mport_get_feature(port, 1, 0, 0, *entry)))
- break;
- } while (*++entry >= 0);
-
- if (ext_ftr_ptr)
- rio_local_read_config_32(port,
- ext_ftr_ptr +
- RIO_PORT_N_ERR_STS_CSR(port->index),
- &result);
+ rio_local_read_config_32(port,
+ port->phys_efptr +
+ RIO_PORT_N_ERR_STS_CSR(port->index, port->phys_rmap),
+ &result);
return result & RIO_PORT_N_ERR_STS_PORT_OK;
}
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index 0dcaa660c..37042858c 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -268,6 +268,12 @@ int rio_request_inb_mbox(struct rio_mport *mport,
mport->inb_msg[mbox].mcback = minb;
rc = mport->ops->open_inb_mbox(mport, dev_id, mbox, entries);
+ if (rc) {
+ mport->inb_msg[mbox].mcback = NULL;
+ mport->inb_msg[mbox].res = NULL;
+ release_resource(res);
+ kfree(res);
+ }
} else
rc = -ENOMEM;
@@ -285,13 +291,22 @@ int rio_request_inb_mbox(struct rio_mport *mport,
*/
int rio_release_inb_mbox(struct rio_mport *mport, int mbox)
{
- if (mport->ops->close_inb_mbox) {
- mport->ops->close_inb_mbox(mport, mbox);
+ int rc;
- /* Release the mailbox resource */
- return release_resource(mport->inb_msg[mbox].res);
- } else
- return -ENOSYS;
+ if (!mport->ops->close_inb_mbox || !mport->inb_msg[mbox].res)
+ return -EINVAL;
+
+ mport->ops->close_inb_mbox(mport, mbox);
+ mport->inb_msg[mbox].mcback = NULL;
+
+ rc = release_resource(mport->inb_msg[mbox].res);
+ if (rc)
+ return rc;
+
+ kfree(mport->inb_msg[mbox].res);
+ mport->inb_msg[mbox].res = NULL;
+
+ return 0;
}
/**
@@ -336,6 +351,12 @@ int rio_request_outb_mbox(struct rio_mport *mport,
mport->outb_msg[mbox].mcback = moutb;
rc = mport->ops->open_outb_mbox(mport, dev_id, mbox, entries);
+ if (rc) {
+ mport->outb_msg[mbox].mcback = NULL;
+ mport->outb_msg[mbox].res = NULL;
+ release_resource(res);
+ kfree(res);
+ }
} else
rc = -ENOMEM;
@@ -353,13 +374,22 @@ int rio_request_outb_mbox(struct rio_mport *mport,
*/
int rio_release_outb_mbox(struct rio_mport *mport, int mbox)
{
- if (mport->ops->close_outb_mbox) {
- mport->ops->close_outb_mbox(mport, mbox);
+ int rc;
- /* Release the mailbox resource */
- return release_resource(mport->outb_msg[mbox].res);
- } else
- return -ENOSYS;
+ if (!mport->ops->close_outb_mbox || !mport->outb_msg[mbox].res)
+ return -EINVAL;
+
+ mport->ops->close_outb_mbox(mport, mbox);
+ mport->outb_msg[mbox].mcback = NULL;
+
+ rc = release_resource(mport->outb_msg[mbox].res);
+ if (rc)
+ return rc;
+
+ kfree(mport->outb_msg[mbox].res);
+ mport->outb_msg[mbox].res = NULL;
+
+ return 0;
}
/**
@@ -756,10 +786,11 @@ EXPORT_SYMBOL_GPL(rio_unmap_outb_region);
* @local: Indicate a local master port or remote device access
* @destid: Destination ID of the device
* @hopcount: Number of switch hops to the device
+ * @rmap: pointer to location to store register map type info
*/
u32
rio_mport_get_physefb(struct rio_mport *port, int local,
- u16 destid, u8 hopcount)
+ u16 destid, u8 hopcount, u32 *rmap)
{
u32 ext_ftr_ptr;
u32 ftr_header;
@@ -777,14 +808,21 @@ rio_mport_get_physefb(struct rio_mport *port, int local,
ftr_header = RIO_GET_BLOCK_ID(ftr_header);
switch (ftr_header) {
- case RIO_EFB_SER_EP_ID_V13P:
- case RIO_EFB_SER_EP_REC_ID_V13P:
- case RIO_EFB_SER_EP_FREE_ID_V13P:
case RIO_EFB_SER_EP_ID:
case RIO_EFB_SER_EP_REC_ID:
case RIO_EFB_SER_EP_FREE_ID:
- case RIO_EFB_SER_EP_FREC_ID:
+ case RIO_EFB_SER_EP_M1_ID:
+ case RIO_EFB_SER_EP_SW_M1_ID:
+ case RIO_EFB_SER_EPF_M1_ID:
+ case RIO_EFB_SER_EPF_SW_M1_ID:
+ *rmap = 1;
+ return ext_ftr_ptr;
+ case RIO_EFB_SER_EP_M2_ID:
+ case RIO_EFB_SER_EP_SW_M2_ID:
+ case RIO_EFB_SER_EPF_M2_ID:
+ case RIO_EFB_SER_EPF_SW_M2_ID:
+ *rmap = 2;
return ext_ftr_ptr;
default:
@@ -843,16 +881,16 @@ int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock)
u32 regval;
rio_read_config_32(rdev,
- rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum),
- &regval);
+ RIO_DEV_PORT_N_CTL_CSR(rdev, pnum),
+ &regval);
if (lock)
regval |= RIO_PORT_N_CTL_LOCKOUT;
else
regval &= ~RIO_PORT_N_CTL_LOCKOUT;
rio_write_config_32(rdev,
- rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum),
- regval);
+ RIO_DEV_PORT_N_CTL_CSR(rdev, pnum),
+ regval);
return 0;
}
EXPORT_SYMBOL_GPL(rio_set_port_lockout);
@@ -876,6 +914,7 @@ int rio_enable_rx_tx_port(struct rio_mport *port,
#ifdef CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS
u32 regval;
u32 ext_ftr_ptr;
+ u32 rmap;
/*
* enable rx input tx output port
@@ -883,34 +922,29 @@ int rio_enable_rx_tx_port(struct rio_mport *port,
pr_debug("rio_enable_rx_tx_port(local = %d, destid = %d, hopcount = "
"%d, port_num = %d)\n", local, destid, hopcount, port_num);
- ext_ftr_ptr = rio_mport_get_physefb(port, local, destid, hopcount);
+ ext_ftr_ptr = rio_mport_get_physefb(port, local, destid,
+ hopcount, &rmap);
if (local) {
- rio_local_read_config_32(port, ext_ftr_ptr +
- RIO_PORT_N_CTL_CSR(0),
+ rio_local_read_config_32(port,
+ ext_ftr_ptr + RIO_PORT_N_CTL_CSR(0, rmap),
&regval);
} else {
if (rio_mport_read_config_32(port, destid, hopcount,
- ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), &regval) < 0)
+ ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num, rmap),
+ &regval) < 0)
return -EIO;
}
- if (regval & RIO_PORT_N_CTL_P_TYP_SER) {
- /* serial */
- regval = regval | RIO_PORT_N_CTL_EN_RX_SER
- | RIO_PORT_N_CTL_EN_TX_SER;
- } else {
- /* parallel */
- regval = regval | RIO_PORT_N_CTL_EN_RX_PAR
- | RIO_PORT_N_CTL_EN_TX_PAR;
- }
+ regval = regval | RIO_PORT_N_CTL_EN_RX | RIO_PORT_N_CTL_EN_TX;
if (local) {
- rio_local_write_config_32(port, ext_ftr_ptr +
- RIO_PORT_N_CTL_CSR(0), regval);
+ rio_local_write_config_32(port,
+ ext_ftr_ptr + RIO_PORT_N_CTL_CSR(0, rmap), regval);
} else {
if (rio_mport_write_config_32(port, destid, hopcount,
- ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), regval) < 0)
+ ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num, rmap),
+ regval) < 0)
return -EIO;
}
#endif
@@ -1012,14 +1046,14 @@ rio_get_input_status(struct rio_dev *rdev, int pnum, u32 *lnkresp)
/* Read from link maintenance response register
* to clear valid bit */
rio_read_config_32(rdev,
- rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(pnum),
+ RIO_DEV_PORT_N_MNT_RSP_CSR(rdev, pnum),
&regval);
udelay(50);
}
/* Issue Input-status command */
rio_write_config_32(rdev,
- rdev->phys_efptr + RIO_PORT_N_MNT_REQ_CSR(pnum),
+ RIO_DEV_PORT_N_MNT_REQ_CSR(rdev, pnum),
RIO_MNT_REQ_CMD_IS);
/* Exit if the response is not expected */
@@ -1030,7 +1064,7 @@ rio_get_input_status(struct rio_dev *rdev, int pnum, u32 *lnkresp)
while (checkcount--) {
udelay(50);
rio_read_config_32(rdev,
- rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(pnum),
+ RIO_DEV_PORT_N_MNT_RSP_CSR(rdev, pnum),
&regval);
if (regval & RIO_PORT_N_MNT_RSP_RVAL) {
*lnkresp = regval;
@@ -1046,6 +1080,13 @@ rio_get_input_status(struct rio_dev *rdev, int pnum, u32 *lnkresp)
* @rdev: Pointer to RIO device control structure
* @pnum: Switch port number to clear errors
* @err_status: port error status (if 0 reads register from device)
+ *
+ * TODO: Currently this routine is not compatible with recovery process
+ * specified for idt_gen3 RapidIO switch devices. It has to be reviewed
+ * to implement universal recovery process that is compatible full range
+ * off available devices.
+ * IDT gen3 switch driver now implements HW-specific error handler that
+ * issues soft port reset to the port to reset ERR_STOP bits and ackIDs.
*/
static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status)
{
@@ -1055,10 +1096,10 @@ static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status)
if (err_status == 0)
rio_read_config_32(rdev,
- rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum),
+ RIO_DEV_PORT_N_ERR_STS_CSR(rdev, pnum),
&err_status);
- if (err_status & RIO_PORT_N_ERR_STS_PW_OUT_ES) {
+ if (err_status & RIO_PORT_N_ERR_STS_OUT_ES) {
pr_debug("RIO_EM: servicing Output Error-Stopped state\n");
/*
* Send a Link-Request/Input-Status control symbol
@@ -1073,7 +1114,7 @@ static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status)
far_ackid = (regval & RIO_PORT_N_MNT_RSP_ASTAT) >> 5;
far_linkstat = regval & RIO_PORT_N_MNT_RSP_LSTAT;
rio_read_config_32(rdev,
- rdev->phys_efptr + RIO_PORT_N_ACK_STS_CSR(pnum),
+ RIO_DEV_PORT_N_ACK_STS_CSR(rdev, pnum),
&regval);
pr_debug("RIO_EM: SP%d_ACK_STS_CSR=0x%08x\n", pnum, regval);
near_ackid = (regval & RIO_PORT_N_ACK_INBOUND) >> 24;
@@ -1091,43 +1132,43 @@ static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status)
* far inbound.
*/
rio_write_config_32(rdev,
- rdev->phys_efptr + RIO_PORT_N_ACK_STS_CSR(pnum),
+ RIO_DEV_PORT_N_ACK_STS_CSR(rdev, pnum),
(near_ackid << 24) |
(far_ackid << 8) | far_ackid);
/* Align far outstanding/outbound ackIDs with
* near inbound.
*/
far_ackid++;
- if (nextdev)
- rio_write_config_32(nextdev,
- nextdev->phys_efptr +
- RIO_PORT_N_ACK_STS_CSR(RIO_GET_PORT_NUM(nextdev->swpinfo)),
- (far_ackid << 24) |
- (near_ackid << 8) | near_ackid);
- else
- pr_debug("RIO_EM: Invalid nextdev pointer (NULL)\n");
+ if (!nextdev) {
+ pr_debug("RIO_EM: nextdev pointer == NULL\n");
+ goto rd_err;
+ }
+
+ rio_write_config_32(nextdev,
+ RIO_DEV_PORT_N_ACK_STS_CSR(nextdev,
+ RIO_GET_PORT_NUM(nextdev->swpinfo)),
+ (far_ackid << 24) |
+ (near_ackid << 8) | near_ackid);
}
rd_err:
- rio_read_config_32(rdev,
- rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum),
- &err_status);
+ rio_read_config_32(rdev, RIO_DEV_PORT_N_ERR_STS_CSR(rdev, pnum),
+ &err_status);
pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status);
}
- if ((err_status & RIO_PORT_N_ERR_STS_PW_INP_ES) && nextdev) {
+ if ((err_status & RIO_PORT_N_ERR_STS_INP_ES) && nextdev) {
pr_debug("RIO_EM: servicing Input Error-Stopped state\n");
rio_get_input_status(nextdev,
RIO_GET_PORT_NUM(nextdev->swpinfo), NULL);
udelay(50);
- rio_read_config_32(rdev,
- rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum),
- &err_status);
+ rio_read_config_32(rdev, RIO_DEV_PORT_N_ERR_STS_CSR(rdev, pnum),
+ &err_status);
pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status);
}
- return (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES |
- RIO_PORT_N_ERR_STS_PW_INP_ES)) ? 1 : 0;
+ return (err_status & (RIO_PORT_N_ERR_STS_OUT_ES |
+ RIO_PORT_N_ERR_STS_INP_ES)) ? 1 : 0;
}
/**
@@ -1227,9 +1268,8 @@ int rio_inb_pwrite_handler(struct rio_mport *mport, union rio_pw_msg *pw_msg)
if (rdev->rswitch->ops && rdev->rswitch->ops->em_handle)
rdev->rswitch->ops->em_handle(rdev, portnum);
- rio_read_config_32(rdev,
- rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
- &err_status);
+ rio_read_config_32(rdev, RIO_DEV_PORT_N_ERR_STS_CSR(rdev, portnum),
+ &err_status);
pr_debug("RIO_PW: SP%d_ERR_STS_CSR=0x%08x\n", portnum, err_status);
if (err_status & RIO_PORT_N_ERR_STS_PORT_OK) {
@@ -1246,8 +1286,8 @@ int rio_inb_pwrite_handler(struct rio_mport *mport, union rio_pw_msg *pw_msg)
* Depending on the link partner state, two attempts
* may be needed for successful recovery.
*/
- if (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES |
- RIO_PORT_N_ERR_STS_PW_INP_ES)) {
+ if (err_status & (RIO_PORT_N_ERR_STS_OUT_ES |
+ RIO_PORT_N_ERR_STS_INP_ES)) {
if (rio_clr_err_stopped(rdev, portnum, err_status))
rio_clr_err_stopped(rdev, portnum, 0);
}
@@ -1257,10 +1297,18 @@ int rio_inb_pwrite_handler(struct rio_mport *mport, union rio_pw_msg *pw_msg)
rdev->rswitch->port_ok &= ~(1 << portnum);
rio_set_port_lockout(rdev, portnum, 1);
+ if (rdev->phys_rmap == 1) {
rio_write_config_32(rdev,
- rdev->phys_efptr +
- RIO_PORT_N_ACK_STS_CSR(portnum),
+ RIO_DEV_PORT_N_ACK_STS_CSR(rdev, portnum),
RIO_PORT_N_ACK_CLEAR);
+ } else {
+ rio_write_config_32(rdev,
+ RIO_DEV_PORT_N_OB_ACK_CSR(rdev, portnum),
+ RIO_PORT_N_OB_ACK_CLEAR);
+ rio_write_config_32(rdev,
+ RIO_DEV_PORT_N_IB_ACK_CSR(rdev, portnum),
+ 0);
+ }
/* Schedule Extraction Service */
pr_debug("RIO_PW: Device Extraction on [%s]-P%d\n",
@@ -1289,9 +1337,8 @@ int rio_inb_pwrite_handler(struct rio_mport *mport, union rio_pw_msg *pw_msg)
}
/* Clear remaining error bits and Port-Write Pending bit */
- rio_write_config_32(rdev,
- rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
- err_status);
+ rio_write_config_32(rdev, RIO_DEV_PORT_N_ERR_STS_CSR(rdev, portnum),
+ err_status);
return 0;
}
@@ -1342,20 +1389,7 @@ EXPORT_SYMBOL_GPL(rio_mport_get_efb);
* Tell if a device supports a given RapidIO capability.
* Returns the offset of the requested extended feature
* block within the device's RIO configuration space or
- * 0 in case the device does not support it. Possible
- * values for @ftr:
- *
- * %RIO_EFB_PAR_EP_ID LP/LVDS EP Devices
- *
- * %RIO_EFB_PAR_EP_REC_ID LP/LVDS EP Recovery Devices
- *
- * %RIO_EFB_PAR_EP_FREE_ID LP/LVDS EP Free Devices
- *
- * %RIO_EFB_SER_EP_ID LP/Serial EP Devices
- *
- * %RIO_EFB_SER_EP_REC_ID LP/Serial EP Recovery Devices
- *
- * %RIO_EFB_SER_EP_FREE_ID LP/Serial EP Free Devices
+ * 0 in case the device does not support it.
*/
u32
rio_mport_get_feature(struct rio_mport * port, int local, u16 destid,
@@ -1848,7 +1882,9 @@ EXPORT_SYMBOL_GPL(rio_release_dma);
* Initializes RapidIO capable DMA channel for the specified data transfer.
* Uses DMA channel private extension to pass information related to remote
* target RIO device.
- * Returns pointer to DMA transaction descriptor or NULL if failed.
+ *
+ * Returns: pointer to DMA transaction descriptor if successful,
+ * error-valued pointer or NULL if failed.
*/
struct dma_async_tx_descriptor *rio_dma_prep_xfer(struct dma_chan *dchan,
u16 destid, struct rio_dma_data *data,
@@ -1883,7 +1919,9 @@ EXPORT_SYMBOL_GPL(rio_dma_prep_xfer);
* Initializes RapidIO capable DMA channel for the specified data transfer.
* Uses DMA channel private extension to pass information related to remote
* target RIO device.
- * Returns pointer to DMA transaction descriptor or NULL if failed.
+ *
+ * Returns: pointer to DMA transaction descriptor if successful,
+ * error-valued pointer or NULL if failed.
*/
struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(struct rio_dev *rdev,
struct dma_chan *dchan, struct rio_dma_data *data,
diff --git a/drivers/rapidio/rio.h b/drivers/rapidio/rio.h
index 625d09add..9796b3fee 100644
--- a/drivers/rapidio/rio.h
+++ b/drivers/rapidio/rio.h
@@ -22,7 +22,7 @@
extern u32 rio_mport_get_feature(struct rio_mport *mport, int local, u16 destid,
u8 hopcount, int ftr);
extern u32 rio_mport_get_physefb(struct rio_mport *port, int local,
- u16 destid, u8 hopcount);
+ u16 destid, u8 hopcount, u32 *rmap);
extern u32 rio_mport_get_efb(struct rio_mport *port, int local, u16 destid,
u8 hopcount, u32 from);
extern int rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid,
diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c
new file mode 100644
index 000000000..cebc29646
--- /dev/null
+++ b/drivers/rapidio/rio_cm.c
@@ -0,0 +1,2381 @@
+/*
+ * rio_cm - RapidIO Channelized Messaging Driver
+ *
+ * Copyright 2013-2016 Integrated Device Technology, Inc.
+ * Copyright (c) 2015, Prodrive Technologies
+ * Copyright (c) 2015, RapidIO Trade Association
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS PROGRAM IS DISTRIBUTED IN THE HOPE THAT IT WILL BE USEFUL,
+ * BUT WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED WARRANTY OF
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. SEE THE
+ * GNU GENERAL PUBLIC LICENSE FOR MORE DETAILS.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/rio.h>
+#include <linux/rio_drv.h>
+#include <linux/slab.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <linux/reboot.h>
+#include <linux/bitops.h>
+#include <linux/printk.h>
+#include <linux/rio_cm_cdev.h>
+
+#define DRV_NAME "rio_cm"
+#define DRV_VERSION "1.0.0"
+#define DRV_AUTHOR "Alexandre Bounine <alexandre.bounine@idt.com>"
+#define DRV_DESC "RapidIO Channelized Messaging Driver"
+#define DEV_NAME "rio_cm"
+
+/* Debug output filtering masks */
+enum {
+ DBG_NONE = 0,
+ DBG_INIT = BIT(0), /* driver init */
+ DBG_EXIT = BIT(1), /* driver exit */
+ DBG_MPORT = BIT(2), /* mport add/remove */
+ DBG_RDEV = BIT(3), /* RapidIO device add/remove */
+ DBG_CHOP = BIT(4), /* channel operations */
+ DBG_WAIT = BIT(5), /* waiting for events */
+ DBG_TX = BIT(6), /* message TX */
+ DBG_TX_EVENT = BIT(7), /* message TX event */
+ DBG_RX_DATA = BIT(8), /* inbound data messages */
+ DBG_RX_CMD = BIT(9), /* inbound REQ/ACK/NACK messages */
+ DBG_ALL = ~0,
+};
+
+#ifdef DEBUG
+#define riocm_debug(level, fmt, arg...) \
+ do { \
+ if (DBG_##level & dbg_level) \
+ pr_debug(DRV_NAME ": %s " fmt "\n", \
+ __func__, ##arg); \
+ } while (0)
+#else
+#define riocm_debug(level, fmt, arg...) \
+ no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg)
+#endif
+
+#define riocm_warn(fmt, arg...) \
+ pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg)
+
+#define riocm_error(fmt, arg...) \
+ pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg)
+
+
+static int cmbox = 1;
+module_param(cmbox, int, S_IRUGO);
+MODULE_PARM_DESC(cmbox, "RapidIO Mailbox number (default 1)");
+
+static int chstart = 256;
+module_param(chstart, int, S_IRUGO);
+MODULE_PARM_DESC(chstart,
+ "Start channel number for dynamic allocation (default 256)");
+
+#ifdef DEBUG
+static u32 dbg_level = DBG_NONE;
+module_param(dbg_level, uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)");
+#endif
+
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+#define RIOCM_TX_RING_SIZE 128
+#define RIOCM_RX_RING_SIZE 128
+#define RIOCM_CONNECT_TO 3 /* connect response TO (in sec) */
+
+#define RIOCM_MAX_CHNUM 0xffff /* Use full range of u16 field */
+#define RIOCM_CHNUM_AUTO 0
+#define RIOCM_MAX_EP_COUNT 0x10000 /* Max number of endpoints */
+
+enum rio_cm_state {
+ RIO_CM_IDLE,
+ RIO_CM_CONNECT,
+ RIO_CM_CONNECTED,
+ RIO_CM_DISCONNECT,
+ RIO_CM_CHAN_BOUND,
+ RIO_CM_LISTEN,
+ RIO_CM_DESTROYING,
+};
+
+enum rio_cm_pkt_type {
+ RIO_CM_SYS = 0xaa,
+ RIO_CM_CHAN = 0x55,
+};
+
+enum rio_cm_chop {
+ CM_CONN_REQ,
+ CM_CONN_ACK,
+ CM_CONN_CLOSE,
+ CM_DATA_MSG,
+};
+
+struct rio_ch_base_bhdr {
+ u32 src_id;
+ u32 dst_id;
+#define RIO_HDR_LETTER_MASK 0xffff0000
+#define RIO_HDR_MBOX_MASK 0x0000ffff
+ u8 src_mbox;
+ u8 dst_mbox;
+ u8 type;
+} __attribute__((__packed__));
+
+struct rio_ch_chan_hdr {
+ struct rio_ch_base_bhdr bhdr;
+ u8 ch_op;
+ u16 dst_ch;
+ u16 src_ch;
+ u16 msg_len;
+ u16 rsrvd;
+} __attribute__((__packed__));
+
+struct tx_req {
+ struct list_head node;
+ struct rio_dev *rdev;
+ void *buffer;
+ size_t len;
+};
+
+struct cm_dev {
+ struct list_head list;
+ struct rio_mport *mport;
+ void *rx_buf[RIOCM_RX_RING_SIZE];
+ int rx_slots;
+ struct mutex rx_lock;
+
+ void *tx_buf[RIOCM_TX_RING_SIZE];
+ int tx_slot;
+ int tx_cnt;
+ int tx_ack_slot;
+ struct list_head tx_reqs;
+ spinlock_t tx_lock;
+
+ struct list_head peers;
+ u32 npeers;
+ struct workqueue_struct *rx_wq;
+ struct work_struct rx_work;
+};
+
+struct chan_rx_ring {
+ void *buf[RIOCM_RX_RING_SIZE];
+ int head;
+ int tail;
+ int count;
+
+ /* Tracking RX buffers reported to upper level */
+ void *inuse[RIOCM_RX_RING_SIZE];
+ int inuse_cnt;
+};
+
+struct rio_channel {
+ u16 id; /* local channel ID */
+ struct kref ref; /* channel refcount */
+ struct file *filp;
+ struct cm_dev *cmdev; /* associated CM device object */
+ struct rio_dev *rdev; /* remote RapidIO device */
+ enum rio_cm_state state;
+ int error;
+ spinlock_t lock;
+ void *context;
+ u32 loc_destid; /* local destID */
+ u32 rem_destid; /* remote destID */
+ u16 rem_channel; /* remote channel ID */
+ struct list_head accept_queue;
+ struct list_head ch_node;
+ struct completion comp;
+ struct completion comp_close;
+ struct chan_rx_ring rx_ring;
+};
+
+struct cm_peer {
+ struct list_head node;
+ struct rio_dev *rdev;
+};
+
+struct rio_cm_work {
+ struct work_struct work;
+ struct cm_dev *cm;
+ void *data;
+};
+
+struct conn_req {
+ struct list_head node;
+ u32 destid; /* requester destID */
+ u16 chan; /* requester channel ID */
+ struct cm_dev *cmdev;
+};
+
+/*
+ * A channel_dev structure represents a CM_CDEV
+ * @cdev Character device
+ * @dev Associated device object
+ */
+struct channel_dev {
+ struct cdev cdev;
+ struct device *dev;
+};
+
+static struct rio_channel *riocm_ch_alloc(u16 ch_num);
+static void riocm_ch_free(struct kref *ref);
+static int riocm_post_send(struct cm_dev *cm, struct rio_dev *rdev,
+ void *buffer, size_t len);
+static int riocm_ch_close(struct rio_channel *ch);
+
+static DEFINE_SPINLOCK(idr_lock);
+static DEFINE_IDR(ch_idr);
+
+static LIST_HEAD(cm_dev_list);
+static DECLARE_RWSEM(rdev_sem);
+
+static struct class *dev_class;
+static unsigned int dev_major;
+static unsigned int dev_minor_base;
+static dev_t dev_number;
+static struct channel_dev riocm_cdev;
+
+#define is_msg_capable(src_ops, dst_ops) \
+ ((src_ops & RIO_SRC_OPS_DATA_MSG) && \
+ (dst_ops & RIO_DST_OPS_DATA_MSG))
+#define dev_cm_capable(dev) \
+ is_msg_capable(dev->src_ops, dev->dst_ops)
+
+static int riocm_cmp(struct rio_channel *ch, enum rio_cm_state cmp)
+{
+ int ret;
+
+ spin_lock_bh(&ch->lock);
+ ret = (ch->state == cmp);
+ spin_unlock_bh(&ch->lock);
+ return ret;
+}
+
+static int riocm_cmp_exch(struct rio_channel *ch,
+ enum rio_cm_state cmp, enum rio_cm_state exch)
+{
+ int ret;
+
+ spin_lock_bh(&ch->lock);
+ ret = (ch->state == cmp);
+ if (ret)
+ ch->state = exch;
+ spin_unlock_bh(&ch->lock);
+ return ret;
+}
+
+static enum rio_cm_state riocm_exch(struct rio_channel *ch,
+ enum rio_cm_state exch)
+{
+ enum rio_cm_state old;
+
+ spin_lock_bh(&ch->lock);
+ old = ch->state;
+ ch->state = exch;
+ spin_unlock_bh(&ch->lock);
+ return old;
+}
+
+static struct rio_channel *riocm_get_channel(u16 nr)
+{
+ struct rio_channel *ch;
+
+ spin_lock_bh(&idr_lock);
+ ch = idr_find(&ch_idr, nr);
+ if (ch)
+ kref_get(&ch->ref);
+ spin_unlock_bh(&idr_lock);
+ return ch;
+}
+
+static void riocm_put_channel(struct rio_channel *ch)
+{
+ kref_put(&ch->ref, riocm_ch_free);
+}
+
+static void *riocm_rx_get_msg(struct cm_dev *cm)
+{
+ void *msg;
+ int i;
+
+ msg = rio_get_inb_message(cm->mport, cmbox);
+ if (msg) {
+ for (i = 0; i < RIOCM_RX_RING_SIZE; i++) {
+ if (cm->rx_buf[i] == msg) {
+ cm->rx_buf[i] = NULL;
+ cm->rx_slots++;
+ break;
+ }
+ }
+
+ if (i == RIOCM_RX_RING_SIZE)
+ riocm_warn("no record for buffer 0x%p", msg);
+ }
+
+ return msg;
+}
+
+/*
+ * riocm_rx_fill - fills a ring of receive buffers for given cm device
+ * @cm: cm_dev object
+ * @nent: max number of entries to fill
+ *
+ * Returns: none
+ */
+static void riocm_rx_fill(struct cm_dev *cm, int nent)
+{
+ int i;
+
+ if (cm->rx_slots == 0)
+ return;
+
+ for (i = 0; i < RIOCM_RX_RING_SIZE && cm->rx_slots && nent; i++) {
+ if (cm->rx_buf[i] == NULL) {
+ cm->rx_buf[i] = kmalloc(RIO_MAX_MSG_SIZE, GFP_KERNEL);
+ if (cm->rx_buf[i] == NULL)
+ break;
+ rio_add_inb_buffer(cm->mport, cmbox, cm->rx_buf[i]);
+ cm->rx_slots--;
+ nent--;
+ }
+ }
+}
+
+/*
+ * riocm_rx_free - frees all receive buffers associated with given cm device
+ * @cm: cm_dev object
+ *
+ * Returns: none
+ */
+static void riocm_rx_free(struct cm_dev *cm)
+{
+ int i;
+
+ for (i = 0; i < RIOCM_RX_RING_SIZE; i++) {
+ if (cm->rx_buf[i] != NULL) {
+ kfree(cm->rx_buf[i]);
+ cm->rx_buf[i] = NULL;
+ }
+ }
+}
+
+/*
+ * riocm_req_handler - connection request handler
+ * @cm: cm_dev object
+ * @req_data: pointer to the request packet
+ *
+ * Returns: 0 if success, or
+ * -EINVAL if channel is not in correct state,
+ * -ENODEV if cannot find a channel with specified ID,
+ * -ENOMEM if unable to allocate memory to store the request
+ */
+static int riocm_req_handler(struct cm_dev *cm, void *req_data)
+{
+ struct rio_channel *ch;
+ struct conn_req *req;
+ struct rio_ch_chan_hdr *hh = req_data;
+ u16 chnum;
+
+ chnum = ntohs(hh->dst_ch);
+
+ ch = riocm_get_channel(chnum);
+
+ if (!ch)
+ return -ENODEV;
+
+ if (ch->state != RIO_CM_LISTEN) {
+ riocm_debug(RX_CMD, "channel %d is not in listen state", chnum);
+ riocm_put_channel(ch);
+ return -EINVAL;
+ }
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req) {
+ riocm_put_channel(ch);
+ return -ENOMEM;
+ }
+
+ req->destid = ntohl(hh->bhdr.src_id);
+ req->chan = ntohs(hh->src_ch);
+ req->cmdev = cm;
+
+ spin_lock_bh(&ch->lock);
+ list_add_tail(&req->node, &ch->accept_queue);
+ spin_unlock_bh(&ch->lock);
+ complete(&ch->comp);
+ riocm_put_channel(ch);
+
+ return 0;
+}
+
+/*
+ * riocm_resp_handler - response to connection request handler
+ * @resp_data: pointer to the response packet
+ *
+ * Returns: 0 if success, or
+ * -EINVAL if channel is not in correct state,
+ * -ENODEV if cannot find a channel with specified ID,
+ */
+static int riocm_resp_handler(void *resp_data)
+{
+ struct rio_channel *ch;
+ struct rio_ch_chan_hdr *hh = resp_data;
+ u16 chnum;
+
+ chnum = ntohs(hh->dst_ch);
+ ch = riocm_get_channel(chnum);
+ if (!ch)
+ return -ENODEV;
+
+ if (ch->state != RIO_CM_CONNECT) {
+ riocm_put_channel(ch);
+ return -EINVAL;
+ }
+
+ riocm_exch(ch, RIO_CM_CONNECTED);
+ ch->rem_channel = ntohs(hh->src_ch);
+ complete(&ch->comp);
+ riocm_put_channel(ch);
+
+ return 0;
+}
+
+/*
+ * riocm_close_handler - channel close request handler
+ * @req_data: pointer to the request packet
+ *
+ * Returns: 0 if success, or
+ * -ENODEV if cannot find a channel with specified ID,
+ * + error codes returned by riocm_ch_close.
+ */
+static int riocm_close_handler(void *data)
+{
+ struct rio_channel *ch;
+ struct rio_ch_chan_hdr *hh = data;
+ int ret;
+
+ riocm_debug(RX_CMD, "for ch=%d", ntohs(hh->dst_ch));
+
+ spin_lock_bh(&idr_lock);
+ ch = idr_find(&ch_idr, ntohs(hh->dst_ch));
+ if (!ch) {
+ spin_unlock_bh(&idr_lock);
+ return -ENODEV;
+ }
+ idr_remove(&ch_idr, ch->id);
+ spin_unlock_bh(&idr_lock);
+
+ riocm_exch(ch, RIO_CM_DISCONNECT);
+
+ ret = riocm_ch_close(ch);
+ if (ret)
+ riocm_debug(RX_CMD, "riocm_ch_close() returned %d", ret);
+
+ return 0;
+}
+
+/*
+ * rio_cm_handler - function that services request (non-data) packets
+ * @cm: cm_dev object
+ * @data: pointer to the packet
+ */
+static void rio_cm_handler(struct cm_dev *cm, void *data)
+{
+ struct rio_ch_chan_hdr *hdr;
+
+ if (!rio_mport_is_running(cm->mport))
+ goto out;
+
+ hdr = data;
+
+ riocm_debug(RX_CMD, "OP=%x for ch=%d from %d",
+ hdr->ch_op, ntohs(hdr->dst_ch), ntohs(hdr->src_ch));
+
+ switch (hdr->ch_op) {
+ case CM_CONN_REQ:
+ riocm_req_handler(cm, data);
+ break;
+ case CM_CONN_ACK:
+ riocm_resp_handler(data);
+ break;
+ case CM_CONN_CLOSE:
+ riocm_close_handler(data);
+ break;
+ default:
+ riocm_error("Invalid packet header");
+ break;
+ }
+out:
+ kfree(data);
+}
+
+/*
+ * rio_rx_data_handler - received data packet handler
+ * @cm: cm_dev object
+ * @buf: data packet
+ *
+ * Returns: 0 if success, or
+ * -ENODEV if cannot find a channel with specified ID,
+ * -EIO if channel is not in CONNECTED state,
+ * -ENOMEM if channel RX queue is full (packet discarded)
+ */
+static int rio_rx_data_handler(struct cm_dev *cm, void *buf)
+{
+ struct rio_ch_chan_hdr *hdr;
+ struct rio_channel *ch;
+
+ hdr = buf;
+
+ riocm_debug(RX_DATA, "for ch=%d", ntohs(hdr->dst_ch));
+
+ ch = riocm_get_channel(ntohs(hdr->dst_ch));
+ if (!ch) {
+ /* Discard data message for non-existing channel */
+ kfree(buf);
+ return -ENODEV;
+ }
+
+ /* Place pointer to the buffer into channel's RX queue */
+ spin_lock(&ch->lock);
+
+ if (ch->state != RIO_CM_CONNECTED) {
+ /* Channel is not ready to receive data, discard a packet */
+ riocm_debug(RX_DATA, "ch=%d is in wrong state=%d",
+ ch->id, ch->state);
+ spin_unlock(&ch->lock);
+ kfree(buf);
+ riocm_put_channel(ch);
+ return -EIO;
+ }
+
+ if (ch->rx_ring.count == RIOCM_RX_RING_SIZE) {
+ /* If RX ring is full, discard a packet */
+ riocm_debug(RX_DATA, "ch=%d is full", ch->id);
+ spin_unlock(&ch->lock);
+ kfree(buf);
+ riocm_put_channel(ch);
+ return -ENOMEM;
+ }
+
+ ch->rx_ring.buf[ch->rx_ring.head] = buf;
+ ch->rx_ring.head++;
+ ch->rx_ring.count++;
+ ch->rx_ring.head %= RIOCM_RX_RING_SIZE;
+
+ complete(&ch->comp);
+
+ spin_unlock(&ch->lock);
+ riocm_put_channel(ch);
+
+ return 0;
+}
+
+/*
+ * rio_ibmsg_handler - inbound message packet handler
+ */
+static void rio_ibmsg_handler(struct work_struct *work)
+{
+ struct cm_dev *cm = container_of(work, struct cm_dev, rx_work);
+ void *data;
+ struct rio_ch_chan_hdr *hdr;
+
+ if (!rio_mport_is_running(cm->mport))
+ return;
+
+ while (1) {
+ mutex_lock(&cm->rx_lock);
+ data = riocm_rx_get_msg(cm);
+ if (data)
+ riocm_rx_fill(cm, 1);
+ mutex_unlock(&cm->rx_lock);
+
+ if (data == NULL)
+ break;
+
+ hdr = data;
+
+ if (hdr->bhdr.type != RIO_CM_CHAN) {
+ /* For now simply discard packets other than channel */
+ riocm_error("Unsupported TYPE code (0x%x). Msg dropped",
+ hdr->bhdr.type);
+ kfree(data);
+ continue;
+ }
+
+ /* Process a channel message */
+ if (hdr->ch_op == CM_DATA_MSG)
+ rio_rx_data_handler(cm, data);
+ else
+ rio_cm_handler(cm, data);
+ }
+}
+
+static void riocm_inb_msg_event(struct rio_mport *mport, void *dev_id,
+ int mbox, int slot)
+{
+ struct cm_dev *cm = dev_id;
+
+ if (rio_mport_is_running(cm->mport) && !work_pending(&cm->rx_work))
+ queue_work(cm->rx_wq, &cm->rx_work);
+}
+
+/*
+ * rio_txcq_handler - TX completion handler
+ * @cm: cm_dev object
+ * @slot: TX queue slot
+ *
+ * TX completion handler also ensures that pending request packets are placed
+ * into transmit queue as soon as a free slot becomes available. This is done
+ * to give higher priority to request packets during high intensity data flow.
+ */
+static void rio_txcq_handler(struct cm_dev *cm, int slot)
+{
+ int ack_slot;
+
+ /* ATTN: Add TX completion notification if/when direct buffer
+ * transfer is implemented. At this moment only correct tracking
+ * of tx_count is important.
+ */
+ riocm_debug(TX_EVENT, "for mport_%d slot %d tx_cnt %d",
+ cm->mport->id, slot, cm->tx_cnt);
+
+ spin_lock(&cm->tx_lock);
+ ack_slot = cm->tx_ack_slot;
+
+ if (ack_slot == slot)
+ riocm_debug(TX_EVENT, "slot == ack_slot");
+
+ while (cm->tx_cnt && ((ack_slot != slot) ||
+ (cm->tx_cnt == RIOCM_TX_RING_SIZE))) {
+
+ cm->tx_buf[ack_slot] = NULL;
+ ++ack_slot;
+ ack_slot &= (RIOCM_TX_RING_SIZE - 1);
+ cm->tx_cnt--;
+ }
+
+ if (cm->tx_cnt < 0 || cm->tx_cnt > RIOCM_TX_RING_SIZE)
+ riocm_error("tx_cnt %d out of sync", cm->tx_cnt);
+
+ WARN_ON((cm->tx_cnt < 0) || (cm->tx_cnt > RIOCM_TX_RING_SIZE));
+
+ cm->tx_ack_slot = ack_slot;
+
+ /*
+ * If there are pending requests, insert them into transmit queue
+ */
+ if (!list_empty(&cm->tx_reqs) && (cm->tx_cnt < RIOCM_TX_RING_SIZE)) {
+ struct tx_req *req, *_req;
+ int rc;
+
+ list_for_each_entry_safe(req, _req, &cm->tx_reqs, node) {
+ list_del(&req->node);
+ cm->tx_buf[cm->tx_slot] = req->buffer;
+ rc = rio_add_outb_message(cm->mport, req->rdev, cmbox,
+ req->buffer, req->len);
+ kfree(req->buffer);
+ kfree(req);
+
+ ++cm->tx_cnt;
+ ++cm->tx_slot;
+ cm->tx_slot &= (RIOCM_TX_RING_SIZE - 1);
+ if (cm->tx_cnt == RIOCM_TX_RING_SIZE)
+ break;
+ }
+ }
+
+ spin_unlock(&cm->tx_lock);
+}
+
+static void riocm_outb_msg_event(struct rio_mport *mport, void *dev_id,
+ int mbox, int slot)
+{
+ struct cm_dev *cm = dev_id;
+
+ if (cm && rio_mport_is_running(cm->mport))
+ rio_txcq_handler(cm, slot);
+}
+
+static int riocm_queue_req(struct cm_dev *cm, struct rio_dev *rdev,
+ void *buffer, size_t len)
+{
+ unsigned long flags;
+ struct tx_req *treq;
+
+ treq = kzalloc(sizeof(*treq), GFP_KERNEL);
+ if (treq == NULL)
+ return -ENOMEM;
+
+ treq->rdev = rdev;
+ treq->buffer = buffer;
+ treq->len = len;
+
+ spin_lock_irqsave(&cm->tx_lock, flags);
+ list_add_tail(&treq->node, &cm->tx_reqs);
+ spin_unlock_irqrestore(&cm->tx_lock, flags);
+ return 0;
+}
+
+/*
+ * riocm_post_send - helper function that places packet into msg TX queue
+ * @cm: cm_dev object
+ * @rdev: target RapidIO device object (required by outbound msg interface)
+ * @buffer: pointer to a packet buffer to send
+ * @len: length of data to transfer
+ * @req: request priority flag
+ *
+ * Returns: 0 if success, or error code otherwise.
+ */
+static int riocm_post_send(struct cm_dev *cm, struct rio_dev *rdev,
+ void *buffer, size_t len)
+{
+ int rc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cm->tx_lock, flags);
+
+ if (cm->mport == NULL) {
+ rc = -ENODEV;
+ goto err_out;
+ }
+
+ if (cm->tx_cnt == RIOCM_TX_RING_SIZE) {
+ riocm_debug(TX, "Tx Queue is full");
+ rc = -EBUSY;
+ goto err_out;
+ }
+
+ cm->tx_buf[cm->tx_slot] = buffer;
+ rc = rio_add_outb_message(cm->mport, rdev, cmbox, buffer, len);
+
+ riocm_debug(TX, "Add buf@%p destid=%x tx_slot=%d tx_cnt=%d",
+ buffer, rdev->destid, cm->tx_slot, cm->tx_cnt);
+
+ ++cm->tx_cnt;
+ ++cm->tx_slot;
+ cm->tx_slot &= (RIOCM_TX_RING_SIZE - 1);
+
+err_out:
+ spin_unlock_irqrestore(&cm->tx_lock, flags);
+ return rc;
+}
+
+/*
+ * riocm_ch_send - sends a data packet to a remote device
+ * @ch_id: local channel ID
+ * @buf: pointer to a data buffer to send (including CM header)
+ * @len: length of data to transfer (including CM header)
+ *
+ * ATTN: ASSUMES THAT THE HEADER SPACE IS RESERVED PART OF THE DATA PACKET
+ *
+ * Returns: 0 if success, or
+ * -EINVAL if one or more input parameters is/are not valid,
+ * -ENODEV if cannot find a channel with specified ID,
+ * -EAGAIN if a channel is not in CONNECTED state,
+ * + error codes returned by HW send routine.
+ */
+static int riocm_ch_send(u16 ch_id, void *buf, int len)
+{
+ struct rio_channel *ch;
+ struct rio_ch_chan_hdr *hdr;
+ int ret;
+
+ if (buf == NULL || ch_id == 0 || len == 0 || len > RIO_MAX_MSG_SIZE)
+ return -EINVAL;
+
+ ch = riocm_get_channel(ch_id);
+ if (!ch) {
+ riocm_error("%s(%d) ch_%d not found", current->comm,
+ task_pid_nr(current), ch_id);
+ return -ENODEV;
+ }
+
+ if (!riocm_cmp(ch, RIO_CM_CONNECTED)) {
+ ret = -EAGAIN;
+ goto err_out;
+ }
+
+ /*
+ * Fill buffer header section with corresponding channel data
+ */
+ hdr = buf;
+
+ hdr->bhdr.src_id = htonl(ch->loc_destid);
+ hdr->bhdr.dst_id = htonl(ch->rem_destid);
+ hdr->bhdr.src_mbox = cmbox;
+ hdr->bhdr.dst_mbox = cmbox;
+ hdr->bhdr.type = RIO_CM_CHAN;
+ hdr->ch_op = CM_DATA_MSG;
+ hdr->dst_ch = htons(ch->rem_channel);
+ hdr->src_ch = htons(ch->id);
+ hdr->msg_len = htons((u16)len);
+
+ /* ATTN: the function call below relies on the fact that underlying
+ * HW-specific add_outb_message() routine copies TX data into its own
+ * internal transfer buffer (true for all RIONET compatible mport
+ * drivers). Must be reviewed if mport driver uses the buffer directly.
+ */
+
+ ret = riocm_post_send(ch->cmdev, ch->rdev, buf, len);
+ if (ret)
+ riocm_debug(TX, "ch %d send_err=%d", ch->id, ret);
+err_out:
+ riocm_put_channel(ch);
+ return ret;
+}
+
+static int riocm_ch_free_rxbuf(struct rio_channel *ch, void *buf)
+{
+ int i, ret = -EINVAL;
+
+ spin_lock_bh(&ch->lock);
+
+ for (i = 0; i < RIOCM_RX_RING_SIZE; i++) {
+ if (ch->rx_ring.inuse[i] == buf) {
+ ch->rx_ring.inuse[i] = NULL;
+ ch->rx_ring.inuse_cnt--;
+ ret = 0;
+ break;
+ }
+ }
+
+ spin_unlock_bh(&ch->lock);
+
+ if (!ret)
+ kfree(buf);
+
+ return ret;
+}
+
+/*
+ * riocm_ch_receive - fetch a data packet received for the specified channel
+ * @ch: local channel ID
+ * @buf: pointer to a packet buffer
+ * @timeout: timeout to wait for incoming packet (in jiffies)
+ *
+ * Returns: 0 and valid buffer pointer if success, or NULL pointer and one of:
+ * -EAGAIN if a channel is not in CONNECTED state,
+ * -ENOMEM if in-use tracking queue is full,
+ * -ETIME if wait timeout expired,
+ * -EINTR if wait was interrupted.
+ */
+static int riocm_ch_receive(struct rio_channel *ch, void **buf, long timeout)
+{
+ void *rxmsg = NULL;
+ int i, ret = 0;
+ long wret;
+
+ if (!riocm_cmp(ch, RIO_CM_CONNECTED)) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ if (ch->rx_ring.inuse_cnt == RIOCM_RX_RING_SIZE) {
+ /* If we do not have entries to track buffers given to upper
+ * layer, reject request.
+ */
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ wret = wait_for_completion_interruptible_timeout(&ch->comp, timeout);
+
+ riocm_debug(WAIT, "wait on %d returned %ld", ch->id, wret);
+
+ if (!wret)
+ ret = -ETIME;
+ else if (wret == -ERESTARTSYS)
+ ret = -EINTR;
+ else
+ ret = riocm_cmp(ch, RIO_CM_CONNECTED) ? 0 : -ECONNRESET;
+
+ if (ret)
+ goto out;
+
+ spin_lock_bh(&ch->lock);
+
+ rxmsg = ch->rx_ring.buf[ch->rx_ring.tail];
+ ch->rx_ring.buf[ch->rx_ring.tail] = NULL;
+ ch->rx_ring.count--;
+ ch->rx_ring.tail++;
+ ch->rx_ring.tail %= RIOCM_RX_RING_SIZE;
+ ret = -ENOMEM;
+
+ for (i = 0; i < RIOCM_RX_RING_SIZE; i++) {
+ if (ch->rx_ring.inuse[i] == NULL) {
+ ch->rx_ring.inuse[i] = rxmsg;
+ ch->rx_ring.inuse_cnt++;
+ ret = 0;
+ break;
+ }
+ }
+
+ if (ret) {
+ /* We have no entry to store pending message: drop it */
+ kfree(rxmsg);
+ rxmsg = NULL;
+ }
+
+ spin_unlock_bh(&ch->lock);
+out:
+ *buf = rxmsg;
+ return ret;
+}
+
+/*
+ * riocm_ch_connect - sends a connect request to a remote device
+ * @loc_ch: local channel ID
+ * @cm: CM device to send connect request
+ * @peer: target RapidIO device
+ * @rem_ch: remote channel ID
+ *
+ * Returns: 0 if success, or
+ * -EINVAL if the channel is not in IDLE state,
+ * -EAGAIN if no connection request available immediately,
+ * -ETIME if ACK response timeout expired,
+ * -EINTR if wait for response was interrupted.
+ */
+static int riocm_ch_connect(u16 loc_ch, struct cm_dev *cm,
+ struct cm_peer *peer, u16 rem_ch)
+{
+ struct rio_channel *ch = NULL;
+ struct rio_ch_chan_hdr *hdr;
+ int ret;
+ long wret;
+
+ ch = riocm_get_channel(loc_ch);
+ if (!ch)
+ return -ENODEV;
+
+ if (!riocm_cmp_exch(ch, RIO_CM_IDLE, RIO_CM_CONNECT)) {
+ ret = -EINVAL;
+ goto conn_done;
+ }
+
+ ch->cmdev = cm;
+ ch->rdev = peer->rdev;
+ ch->context = NULL;
+ ch->loc_destid = cm->mport->host_deviceid;
+ ch->rem_channel = rem_ch;
+
+ /*
+ * Send connect request to the remote RapidIO device
+ */
+
+ hdr = kzalloc(sizeof(*hdr), GFP_KERNEL);
+ if (hdr == NULL) {
+ ret = -ENOMEM;
+ goto conn_done;
+ }
+
+ hdr->bhdr.src_id = htonl(ch->loc_destid);
+ hdr->bhdr.dst_id = htonl(peer->rdev->destid);
+ hdr->bhdr.src_mbox = cmbox;
+ hdr->bhdr.dst_mbox = cmbox;
+ hdr->bhdr.type = RIO_CM_CHAN;
+ hdr->ch_op = CM_CONN_REQ;
+ hdr->dst_ch = htons(rem_ch);
+ hdr->src_ch = htons(loc_ch);
+
+ /* ATTN: the function call below relies on the fact that underlying
+ * HW-specific add_outb_message() routine copies TX data into its
+ * internal transfer buffer. Must be reviewed if mport driver uses
+ * this buffer directly.
+ */
+ ret = riocm_post_send(cm, peer->rdev, hdr, sizeof(*hdr));
+
+ if (ret != -EBUSY) {
+ kfree(hdr);
+ } else {
+ ret = riocm_queue_req(cm, peer->rdev, hdr, sizeof(*hdr));
+ if (ret)
+ kfree(hdr);
+ }
+
+ if (ret) {
+ riocm_cmp_exch(ch, RIO_CM_CONNECT, RIO_CM_IDLE);
+ goto conn_done;
+ }
+
+ /* Wait for connect response from the remote device */
+ wret = wait_for_completion_interruptible_timeout(&ch->comp,
+ RIOCM_CONNECT_TO * HZ);
+ riocm_debug(WAIT, "wait on %d returns %ld", ch->id, wret);
+
+ if (!wret)
+ ret = -ETIME;
+ else if (wret == -ERESTARTSYS)
+ ret = -EINTR;
+ else
+ ret = riocm_cmp(ch, RIO_CM_CONNECTED) ? 0 : -1;
+
+conn_done:
+ riocm_put_channel(ch);
+ return ret;
+}
+
+static int riocm_send_ack(struct rio_channel *ch)
+{
+ struct rio_ch_chan_hdr *hdr;
+ int ret;
+
+ hdr = kzalloc(sizeof(*hdr), GFP_KERNEL);
+ if (hdr == NULL)
+ return -ENOMEM;
+
+ hdr->bhdr.src_id = htonl(ch->loc_destid);
+ hdr->bhdr.dst_id = htonl(ch->rem_destid);
+ hdr->dst_ch = htons(ch->rem_channel);
+ hdr->src_ch = htons(ch->id);
+ hdr->bhdr.src_mbox = cmbox;
+ hdr->bhdr.dst_mbox = cmbox;
+ hdr->bhdr.type = RIO_CM_CHAN;
+ hdr->ch_op = CM_CONN_ACK;
+
+ /* ATTN: the function call below relies on the fact that underlying
+ * add_outb_message() routine copies TX data into its internal transfer
+ * buffer. Review if switching to direct buffer version.
+ */
+ ret = riocm_post_send(ch->cmdev, ch->rdev, hdr, sizeof(*hdr));
+
+ if (ret == -EBUSY && !riocm_queue_req(ch->cmdev,
+ ch->rdev, hdr, sizeof(*hdr)))
+ return 0;
+ kfree(hdr);
+
+ if (ret)
+ riocm_error("send ACK to ch_%d on %s failed (ret=%d)",
+ ch->id, rio_name(ch->rdev), ret);
+ return ret;
+}
+
+/*
+ * riocm_ch_accept - accept incoming connection request
+ * @ch_id: channel ID
+ * @new_ch_id: local mport device
+ * @timeout: wait timeout (if 0 non-blocking call, do not wait if connection
+ * request is not available).
+ *
+ * Returns: pointer to new channel struct if success, or error-valued pointer:
+ * -ENODEV - cannot find specified channel or mport,
+ * -EINVAL - the channel is not in IDLE state,
+ * -EAGAIN - no connection request available immediately (timeout=0),
+ * -ENOMEM - unable to allocate new channel,
+ * -ETIME - wait timeout expired,
+ * -EINTR - wait was interrupted.
+ */
+static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id,
+ long timeout)
+{
+ struct rio_channel *ch;
+ struct rio_channel *new_ch;
+ struct conn_req *req;
+ struct cm_peer *peer;
+ int found = 0;
+ int err = 0;
+ long wret;
+
+ ch = riocm_get_channel(ch_id);
+ if (!ch)
+ return ERR_PTR(-EINVAL);
+
+ if (!riocm_cmp(ch, RIO_CM_LISTEN)) {
+ err = -EINVAL;
+ goto err_put;
+ }
+
+ /* Don't sleep if this is a non blocking call */
+ if (!timeout) {
+ if (!try_wait_for_completion(&ch->comp)) {
+ err = -EAGAIN;
+ goto err_put;
+ }
+ } else {
+ riocm_debug(WAIT, "on %d", ch->id);
+
+ wret = wait_for_completion_interruptible_timeout(&ch->comp,
+ timeout);
+ if (!wret) {
+ err = -ETIME;
+ goto err_put;
+ } else if (wret == -ERESTARTSYS) {
+ err = -EINTR;
+ goto err_put;
+ }
+ }
+
+ spin_lock_bh(&ch->lock);
+
+ if (ch->state != RIO_CM_LISTEN) {
+ err = -ECANCELED;
+ } else if (list_empty(&ch->accept_queue)) {
+ riocm_debug(WAIT, "on %d accept_queue is empty on completion",
+ ch->id);
+ err = -EIO;
+ }
+
+ spin_unlock_bh(&ch->lock);
+
+ if (err) {
+ riocm_debug(WAIT, "on %d returns %d", ch->id, err);
+ goto err_put;
+ }
+
+ /* Create new channel for this connection */
+ new_ch = riocm_ch_alloc(RIOCM_CHNUM_AUTO);
+
+ if (IS_ERR(new_ch)) {
+ riocm_error("failed to get channel for new req (%ld)",
+ PTR_ERR(new_ch));
+ err = -ENOMEM;
+ goto err_put;
+ }
+
+ spin_lock_bh(&ch->lock);
+
+ req = list_first_entry(&ch->accept_queue, struct conn_req, node);
+ list_del(&req->node);
+ new_ch->cmdev = ch->cmdev;
+ new_ch->loc_destid = ch->loc_destid;
+ new_ch->rem_destid = req->destid;
+ new_ch->rem_channel = req->chan;
+
+ spin_unlock_bh(&ch->lock);
+ riocm_put_channel(ch);
+ ch = NULL;
+ kfree(req);
+
+ down_read(&rdev_sem);
+ /* Find requester's device object */
+ list_for_each_entry(peer, &new_ch->cmdev->peers, node) {
+ if (peer->rdev->destid == new_ch->rem_destid) {
+ riocm_debug(RX_CMD, "found matching device(%s)",
+ rio_name(peer->rdev));
+ found = 1;
+ break;
+ }
+ }
+ up_read(&rdev_sem);
+
+ if (!found) {
+ /* If peer device object not found, simply ignore the request */
+ err = -ENODEV;
+ goto err_put_new_ch;
+ }
+
+ new_ch->rdev = peer->rdev;
+ new_ch->state = RIO_CM_CONNECTED;
+ spin_lock_init(&new_ch->lock);
+
+ /* Acknowledge the connection request. */
+ riocm_send_ack(new_ch);
+
+ *new_ch_id = new_ch->id;
+ return new_ch;
+
+err_put_new_ch:
+ spin_lock_bh(&idr_lock);
+ idr_remove(&ch_idr, new_ch->id);
+ spin_unlock_bh(&idr_lock);
+ riocm_put_channel(new_ch);
+
+err_put:
+ if (ch)
+ riocm_put_channel(ch);
+ *new_ch_id = 0;
+ return ERR_PTR(err);
+}
+
+/*
+ * riocm_ch_listen - puts a channel into LISTEN state
+ * @ch_id: channel ID
+ *
+ * Returns: 0 if success, or
+ * -EINVAL if the specified channel does not exists or
+ * is not in CHAN_BOUND state.
+ */
+static int riocm_ch_listen(u16 ch_id)
+{
+ struct rio_channel *ch = NULL;
+ int ret = 0;
+
+ riocm_debug(CHOP, "(ch_%d)", ch_id);
+
+ ch = riocm_get_channel(ch_id);
+ if (!ch || !riocm_cmp_exch(ch, RIO_CM_CHAN_BOUND, RIO_CM_LISTEN))
+ ret = -EINVAL;
+ riocm_put_channel(ch);
+ return ret;
+}
+
+/*
+ * riocm_ch_bind - associate a channel object and an mport device
+ * @ch_id: channel ID
+ * @mport_id: local mport device ID
+ * @context: pointer to the additional caller's context
+ *
+ * Returns: 0 if success, or
+ * -ENODEV if cannot find specified mport,
+ * -EINVAL if the specified channel does not exist or
+ * is not in IDLE state.
+ */
+static int riocm_ch_bind(u16 ch_id, u8 mport_id, void *context)
+{
+ struct rio_channel *ch = NULL;
+ struct cm_dev *cm;
+ int rc = -ENODEV;
+
+ riocm_debug(CHOP, "ch_%d to mport_%d", ch_id, mport_id);
+
+ /* Find matching cm_dev object */
+ down_read(&rdev_sem);
+ list_for_each_entry(cm, &cm_dev_list, list) {
+ if ((cm->mport->id == mport_id) &&
+ rio_mport_is_running(cm->mport)) {
+ rc = 0;
+ break;
+ }
+ }
+
+ if (rc)
+ goto exit;
+
+ ch = riocm_get_channel(ch_id);
+ if (!ch) {
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ spin_lock_bh(&ch->lock);
+ if (ch->state != RIO_CM_IDLE) {
+ spin_unlock_bh(&ch->lock);
+ rc = -EINVAL;
+ goto err_put;
+ }
+
+ ch->cmdev = cm;
+ ch->loc_destid = cm->mport->host_deviceid;
+ ch->context = context;
+ ch->state = RIO_CM_CHAN_BOUND;
+ spin_unlock_bh(&ch->lock);
+err_put:
+ riocm_put_channel(ch);
+exit:
+ up_read(&rdev_sem);
+ return rc;
+}
+
+/*
+ * riocm_ch_alloc - channel object allocation helper routine
+ * @ch_num: channel ID (1 ... RIOCM_MAX_CHNUM, 0 = automatic)
+ *
+ * Return value: pointer to newly created channel object,
+ * or error-valued pointer
+ */
+static struct rio_channel *riocm_ch_alloc(u16 ch_num)
+{
+ int id;
+ int start, end;
+ struct rio_channel *ch;
+
+ ch = kzalloc(sizeof(*ch), GFP_KERNEL);
+ if (!ch)
+ return ERR_PTR(-ENOMEM);
+
+ if (ch_num) {
+ /* If requested, try to obtain the specified channel ID */
+ start = ch_num;
+ end = ch_num + 1;
+ } else {
+ /* Obtain channel ID from the dynamic allocation range */
+ start = chstart;
+ end = RIOCM_MAX_CHNUM + 1;
+ }
+
+ idr_preload(GFP_KERNEL);
+ spin_lock_bh(&idr_lock);
+ id = idr_alloc_cyclic(&ch_idr, ch, start, end, GFP_NOWAIT);
+ spin_unlock_bh(&idr_lock);
+ idr_preload_end();
+
+ if (id < 0) {
+ kfree(ch);
+ return ERR_PTR(id == -ENOSPC ? -EBUSY : id);
+ }
+
+ ch->id = (u16)id;
+ ch->state = RIO_CM_IDLE;
+ spin_lock_init(&ch->lock);
+ INIT_LIST_HEAD(&ch->accept_queue);
+ INIT_LIST_HEAD(&ch->ch_node);
+ init_completion(&ch->comp);
+ init_completion(&ch->comp_close);
+ kref_init(&ch->ref);
+ ch->rx_ring.head = 0;
+ ch->rx_ring.tail = 0;
+ ch->rx_ring.count = 0;
+ ch->rx_ring.inuse_cnt = 0;
+
+ return ch;
+}
+
+/*
+ * riocm_ch_create - creates a new channel object and allocates ID for it
+ * @ch_num: channel ID (1 ... RIOCM_MAX_CHNUM, 0 = automatic)
+ *
+ * Allocates and initializes a new channel object. If the parameter ch_num > 0
+ * and is within the valid range, riocm_ch_create tries to allocate the
+ * specified ID for the new channel. If ch_num = 0, channel ID will be assigned
+ * automatically from the range (chstart ... RIOCM_MAX_CHNUM).
+ * Module parameter 'chstart' defines start of an ID range available for dynamic
+ * allocation. Range below 'chstart' is reserved for pre-defined ID numbers.
+ * Available channel numbers are limited by 16-bit size of channel numbers used
+ * in the packet header.
+ *
+ * Return value: PTR to rio_channel structure if successful (with channel number
+ * updated via pointer) or error-valued pointer if error.
+ */
+static struct rio_channel *riocm_ch_create(u16 *ch_num)
+{
+ struct rio_channel *ch = NULL;
+
+ ch = riocm_ch_alloc(*ch_num);
+
+ if (IS_ERR(ch))
+ riocm_debug(CHOP, "Failed to allocate channel %d (err=%ld)",
+ *ch_num, PTR_ERR(ch));
+ else
+ *ch_num = ch->id;
+
+ return ch;
+}
+
+/*
+ * riocm_ch_free - channel object release routine
+ * @ref: pointer to a channel's kref structure
+ */
+static void riocm_ch_free(struct kref *ref)
+{
+ struct rio_channel *ch = container_of(ref, struct rio_channel, ref);
+ int i;
+
+ riocm_debug(CHOP, "(ch_%d)", ch->id);
+
+ if (ch->rx_ring.inuse_cnt) {
+ for (i = 0;
+ i < RIOCM_RX_RING_SIZE && ch->rx_ring.inuse_cnt; i++) {
+ if (ch->rx_ring.inuse[i] != NULL) {
+ kfree(ch->rx_ring.inuse[i]);
+ ch->rx_ring.inuse_cnt--;
+ }
+ }
+ }
+
+ if (ch->rx_ring.count)
+ for (i = 0; i < RIOCM_RX_RING_SIZE && ch->rx_ring.count; i++) {
+ if (ch->rx_ring.buf[i] != NULL) {
+ kfree(ch->rx_ring.buf[i]);
+ ch->rx_ring.count--;
+ }
+ }
+
+ complete(&ch->comp_close);
+}
+
+static int riocm_send_close(struct rio_channel *ch)
+{
+ struct rio_ch_chan_hdr *hdr;
+ int ret;
+
+ /*
+ * Send CH_CLOSE notification to the remote RapidIO device
+ */
+
+ hdr = kzalloc(sizeof(*hdr), GFP_KERNEL);
+ if (hdr == NULL)
+ return -ENOMEM;
+
+ hdr->bhdr.src_id = htonl(ch->loc_destid);
+ hdr->bhdr.dst_id = htonl(ch->rem_destid);
+ hdr->bhdr.src_mbox = cmbox;
+ hdr->bhdr.dst_mbox = cmbox;
+ hdr->bhdr.type = RIO_CM_CHAN;
+ hdr->ch_op = CM_CONN_CLOSE;
+ hdr->dst_ch = htons(ch->rem_channel);
+ hdr->src_ch = htons(ch->id);
+
+ /* ATTN: the function call below relies on the fact that underlying
+ * add_outb_message() routine copies TX data into its internal transfer
+ * buffer. Needs to be reviewed if switched to direct buffer mode.
+ */
+ ret = riocm_post_send(ch->cmdev, ch->rdev, hdr, sizeof(*hdr));
+
+ if (ret == -EBUSY && !riocm_queue_req(ch->cmdev, ch->rdev,
+ hdr, sizeof(*hdr)))
+ return 0;
+ kfree(hdr);
+
+ if (ret)
+ riocm_error("ch(%d) send CLOSE failed (ret=%d)", ch->id, ret);
+
+ return ret;
+}
+
+/*
+ * riocm_ch_close - closes a channel object with specified ID (by local request)
+ * @ch: channel to be closed
+ */
+static int riocm_ch_close(struct rio_channel *ch)
+{
+ unsigned long tmo = msecs_to_jiffies(3000);
+ enum rio_cm_state state;
+ long wret;
+ int ret = 0;
+
+ riocm_debug(CHOP, "ch_%d by %s(%d)",
+ ch->id, current->comm, task_pid_nr(current));
+
+ state = riocm_exch(ch, RIO_CM_DESTROYING);
+ if (state == RIO_CM_CONNECTED)
+ riocm_send_close(ch);
+
+ complete_all(&ch->comp);
+
+ riocm_put_channel(ch);
+ wret = wait_for_completion_interruptible_timeout(&ch->comp_close, tmo);
+
+ riocm_debug(WAIT, "wait on %d returns %ld", ch->id, wret);
+
+ if (wret == 0) {
+ /* Timeout on wait occurred */
+ riocm_debug(CHOP, "%s(%d) timed out waiting for ch %d",
+ current->comm, task_pid_nr(current), ch->id);
+ ret = -ETIMEDOUT;
+ } else if (wret == -ERESTARTSYS) {
+ /* Wait_for_completion was interrupted by a signal */
+ riocm_debug(CHOP, "%s(%d) wait for ch %d was interrupted",
+ current->comm, task_pid_nr(current), ch->id);
+ ret = -EINTR;
+ }
+
+ if (!ret) {
+ riocm_debug(CHOP, "ch_%d resources released", ch->id);
+ kfree(ch);
+ } else {
+ riocm_debug(CHOP, "failed to release ch_%d resources", ch->id);
+ }
+
+ return ret;
+}
+
+/*
+ * riocm_cdev_open() - Open character device
+ */
+static int riocm_cdev_open(struct inode *inode, struct file *filp)
+{
+ riocm_debug(INIT, "by %s(%d) filp=%p ",
+ current->comm, task_pid_nr(current), filp);
+
+ if (list_empty(&cm_dev_list))
+ return -ENODEV;
+
+ return 0;
+}
+
+/*
+ * riocm_cdev_release() - Release character device
+ */
+static int riocm_cdev_release(struct inode *inode, struct file *filp)
+{
+ struct rio_channel *ch, *_c;
+ unsigned int i;
+ LIST_HEAD(list);
+
+ riocm_debug(EXIT, "by %s(%d) filp=%p",
+ current->comm, task_pid_nr(current), filp);
+
+ /* Check if there are channels associated with this file descriptor */
+ spin_lock_bh(&idr_lock);
+ idr_for_each_entry(&ch_idr, ch, i) {
+ if (ch && ch->filp == filp) {
+ riocm_debug(EXIT, "ch_%d not released by %s(%d)",
+ ch->id, current->comm,
+ task_pid_nr(current));
+ idr_remove(&ch_idr, ch->id);
+ list_add(&ch->ch_node, &list);
+ }
+ }
+ spin_unlock_bh(&idr_lock);
+
+ if (!list_empty(&list)) {
+ list_for_each_entry_safe(ch, _c, &list, ch_node) {
+ list_del(&ch->ch_node);
+ riocm_ch_close(ch);
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * cm_ep_get_list_size() - Reports number of endpoints in the network
+ */
+static int cm_ep_get_list_size(void __user *arg)
+{
+ u32 __user *p = arg;
+ u32 mport_id;
+ u32 count = 0;
+ struct cm_dev *cm;
+
+ if (get_user(mport_id, p))
+ return -EFAULT;
+ if (mport_id >= RIO_MAX_MPORTS)
+ return -EINVAL;
+
+ /* Find a matching cm_dev object */
+ down_read(&rdev_sem);
+ list_for_each_entry(cm, &cm_dev_list, list) {
+ if (cm->mport->id == mport_id) {
+ count = cm->npeers;
+ up_read(&rdev_sem);
+ if (copy_to_user(arg, &count, sizeof(u32)))
+ return -EFAULT;
+ return 0;
+ }
+ }
+ up_read(&rdev_sem);
+
+ return -ENODEV;
+}
+
+/*
+ * cm_ep_get_list() - Returns list of attached endpoints
+ */
+static int cm_ep_get_list(void __user *arg)
+{
+ struct cm_dev *cm;
+ struct cm_peer *peer;
+ u32 info[2];
+ void *buf;
+ u32 nent;
+ u32 *entry_ptr;
+ u32 i = 0;
+ int ret = 0;
+
+ if (copy_from_user(&info, arg, sizeof(info)))
+ return -EFAULT;
+
+ if (info[1] >= RIO_MAX_MPORTS || info[0] > RIOCM_MAX_EP_COUNT)
+ return -EINVAL;
+
+ /* Find a matching cm_dev object */
+ down_read(&rdev_sem);
+ list_for_each_entry(cm, &cm_dev_list, list)
+ if (cm->mport->id == (u8)info[1])
+ goto found;
+
+ up_read(&rdev_sem);
+ return -ENODEV;
+
+found:
+ nent = min(info[0], cm->npeers);
+ buf = kcalloc(nent + 2, sizeof(u32), GFP_KERNEL);
+ if (!buf) {
+ up_read(&rdev_sem);
+ return -ENOMEM;
+ }
+
+ entry_ptr = (u32 *)((uintptr_t)buf + 2*sizeof(u32));
+
+ list_for_each_entry(peer, &cm->peers, node) {
+ *entry_ptr = (u32)peer->rdev->destid;
+ entry_ptr++;
+ if (++i == nent)
+ break;
+ }
+ up_read(&rdev_sem);
+
+ ((u32 *)buf)[0] = i; /* report an updated number of entries */
+ ((u32 *)buf)[1] = info[1]; /* put back an mport ID */
+ if (copy_to_user(arg, buf, sizeof(u32) * (info[0] + 2)))
+ ret = -EFAULT;
+
+ kfree(buf);
+ return ret;
+}
+
+/*
+ * cm_mport_get_list() - Returns list of available local mport devices
+ */
+static int cm_mport_get_list(void __user *arg)
+{
+ int ret = 0;
+ u32 entries;
+ void *buf;
+ struct cm_dev *cm;
+ u32 *entry_ptr;
+ int count = 0;
+
+ if (copy_from_user(&entries, arg, sizeof(entries)))
+ return -EFAULT;
+ if (entries == 0 || entries > RIO_MAX_MPORTS)
+ return -EINVAL;
+ buf = kcalloc(entries + 1, sizeof(u32), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* Scan all registered cm_dev objects */
+ entry_ptr = (u32 *)((uintptr_t)buf + sizeof(u32));
+ down_read(&rdev_sem);
+ list_for_each_entry(cm, &cm_dev_list, list) {
+ if (count++ < entries) {
+ *entry_ptr = (cm->mport->id << 16) |
+ cm->mport->host_deviceid;
+ entry_ptr++;
+ }
+ }
+ up_read(&rdev_sem);
+
+ *((u32 *)buf) = count; /* report a real number of entries */
+ if (copy_to_user(arg, buf, sizeof(u32) * (count + 1)))
+ ret = -EFAULT;
+
+ kfree(buf);
+ return ret;
+}
+
+/*
+ * cm_chan_create() - Create a message exchange channel
+ */
+static int cm_chan_create(struct file *filp, void __user *arg)
+{
+ u16 __user *p = arg;
+ u16 ch_num;
+ struct rio_channel *ch;
+
+ if (get_user(ch_num, p))
+ return -EFAULT;
+
+ riocm_debug(CHOP, "ch_%d requested by %s(%d)",
+ ch_num, current->comm, task_pid_nr(current));
+ ch = riocm_ch_create(&ch_num);
+ if (IS_ERR(ch))
+ return PTR_ERR(ch);
+
+ ch->filp = filp;
+ riocm_debug(CHOP, "ch_%d created by %s(%d)",
+ ch_num, current->comm, task_pid_nr(current));
+ return put_user(ch_num, p);
+}
+
+/*
+ * cm_chan_close() - Close channel
+ * @filp: Pointer to file object
+ * @arg: Channel to close
+ */
+static int cm_chan_close(struct file *filp, void __user *arg)
+{
+ u16 __user *p = arg;
+ u16 ch_num;
+ struct rio_channel *ch;
+
+ if (get_user(ch_num, p))
+ return -EFAULT;
+
+ riocm_debug(CHOP, "ch_%d by %s(%d)",
+ ch_num, current->comm, task_pid_nr(current));
+
+ spin_lock_bh(&idr_lock);
+ ch = idr_find(&ch_idr, ch_num);
+ if (!ch) {
+ spin_unlock_bh(&idr_lock);
+ return 0;
+ }
+ if (ch->filp != filp) {
+ spin_unlock_bh(&idr_lock);
+ return -EINVAL;
+ }
+ idr_remove(&ch_idr, ch->id);
+ spin_unlock_bh(&idr_lock);
+
+ return riocm_ch_close(ch);
+}
+
+/*
+ * cm_chan_bind() - Bind channel
+ * @arg: Channel number
+ */
+static int cm_chan_bind(void __user *arg)
+{
+ struct rio_cm_channel chan;
+
+ if (copy_from_user(&chan, arg, sizeof(chan)))
+ return -EFAULT;
+ if (chan.mport_id >= RIO_MAX_MPORTS)
+ return -EINVAL;
+
+ return riocm_ch_bind(chan.id, chan.mport_id, NULL);
+}
+
+/*
+ * cm_chan_listen() - Listen on channel
+ * @arg: Channel number
+ */
+static int cm_chan_listen(void __user *arg)
+{
+ u16 __user *p = arg;
+ u16 ch_num;
+
+ if (get_user(ch_num, p))
+ return -EFAULT;
+
+ return riocm_ch_listen(ch_num);
+}
+
+/*
+ * cm_chan_accept() - Accept incoming connection
+ * @filp: Pointer to file object
+ * @arg: Channel number
+ */
+static int cm_chan_accept(struct file *filp, void __user *arg)
+{
+ struct rio_cm_accept param;
+ long accept_to;
+ struct rio_channel *ch;
+
+ if (copy_from_user(&param, arg, sizeof(param)))
+ return -EFAULT;
+
+ riocm_debug(CHOP, "on ch_%d by %s(%d)",
+ param.ch_num, current->comm, task_pid_nr(current));
+
+ accept_to = param.wait_to ?
+ msecs_to_jiffies(param.wait_to) : 0;
+
+ ch = riocm_ch_accept(param.ch_num, &param.ch_num, accept_to);
+ if (IS_ERR(ch))
+ return PTR_ERR(ch);
+ ch->filp = filp;
+
+ riocm_debug(CHOP, "new ch_%d for %s(%d)",
+ ch->id, current->comm, task_pid_nr(current));
+
+ if (copy_to_user(arg, &param, sizeof(param)))
+ return -EFAULT;
+ return 0;
+}
+
+/*
+ * cm_chan_connect() - Connect on channel
+ * @arg: Channel information
+ */
+static int cm_chan_connect(void __user *arg)
+{
+ struct rio_cm_channel chan;
+ struct cm_dev *cm;
+ struct cm_peer *peer;
+ int ret = -ENODEV;
+
+ if (copy_from_user(&chan, arg, sizeof(chan)))
+ return -EFAULT;
+ if (chan.mport_id >= RIO_MAX_MPORTS)
+ return -EINVAL;
+
+ down_read(&rdev_sem);
+
+ /* Find matching cm_dev object */
+ list_for_each_entry(cm, &cm_dev_list, list) {
+ if (cm->mport->id == chan.mport_id) {
+ ret = 0;
+ break;
+ }
+ }
+
+ if (ret)
+ goto err_out;
+
+ if (chan.remote_destid >= RIO_ANY_DESTID(cm->mport->sys_size)) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ /* Find corresponding RapidIO endpoint device object */
+ ret = -ENODEV;
+
+ list_for_each_entry(peer, &cm->peers, node) {
+ if (peer->rdev->destid == chan.remote_destid) {
+ ret = 0;
+ break;
+ }
+ }
+
+ if (ret)
+ goto err_out;
+
+ up_read(&rdev_sem);
+
+ return riocm_ch_connect(chan.id, cm, peer, chan.remote_channel);
+err_out:
+ up_read(&rdev_sem);
+ return ret;
+}
+
+/*
+ * cm_chan_msg_send() - Send a message through channel
+ * @arg: Outbound message information
+ */
+static int cm_chan_msg_send(void __user *arg)
+{
+ struct rio_cm_msg msg;
+ void *buf;
+ int ret = 0;
+
+ if (copy_from_user(&msg, arg, sizeof(msg)))
+ return -EFAULT;
+ if (msg.size > RIO_MAX_MSG_SIZE)
+ return -EINVAL;
+
+ buf = kmalloc(msg.size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf, (void __user *)(uintptr_t)msg.msg, msg.size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ret = riocm_ch_send(msg.ch_num, buf, msg.size);
+out:
+ kfree(buf);
+ return ret;
+}
+
+/*
+ * cm_chan_msg_rcv() - Receive a message through channel
+ * @arg: Inbound message information
+ */
+static int cm_chan_msg_rcv(void __user *arg)
+{
+ struct rio_cm_msg msg;
+ struct rio_channel *ch;
+ void *buf;
+ long rxto;
+ int ret = 0, msg_size;
+
+ if (copy_from_user(&msg, arg, sizeof(msg)))
+ return -EFAULT;
+
+ if (msg.ch_num == 0 || msg.size == 0)
+ return -EINVAL;
+
+ ch = riocm_get_channel(msg.ch_num);
+ if (!ch)
+ return -ENODEV;
+
+ rxto = msg.rxto ? msecs_to_jiffies(msg.rxto) : MAX_SCHEDULE_TIMEOUT;
+
+ ret = riocm_ch_receive(ch, &buf, rxto);
+ if (ret)
+ goto out;
+
+ msg_size = min(msg.size, (u16)(RIO_MAX_MSG_SIZE));
+
+ if (copy_to_user((void __user *)(uintptr_t)msg.msg, buf, msg_size))
+ ret = -EFAULT;
+
+ riocm_ch_free_rxbuf(ch, buf);
+out:
+ riocm_put_channel(ch);
+ return ret;
+}
+
+/*
+ * riocm_cdev_ioctl() - IOCTL requests handler
+ */
+static long
+riocm_cdev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case RIO_CM_EP_GET_LIST_SIZE:
+ return cm_ep_get_list_size((void __user *)arg);
+ case RIO_CM_EP_GET_LIST:
+ return cm_ep_get_list((void __user *)arg);
+ case RIO_CM_CHAN_CREATE:
+ return cm_chan_create(filp, (void __user *)arg);
+ case RIO_CM_CHAN_CLOSE:
+ return cm_chan_close(filp, (void __user *)arg);
+ case RIO_CM_CHAN_BIND:
+ return cm_chan_bind((void __user *)arg);
+ case RIO_CM_CHAN_LISTEN:
+ return cm_chan_listen((void __user *)arg);
+ case RIO_CM_CHAN_ACCEPT:
+ return cm_chan_accept(filp, (void __user *)arg);
+ case RIO_CM_CHAN_CONNECT:
+ return cm_chan_connect((void __user *)arg);
+ case RIO_CM_CHAN_SEND:
+ return cm_chan_msg_send((void __user *)arg);
+ case RIO_CM_CHAN_RECEIVE:
+ return cm_chan_msg_rcv((void __user *)arg);
+ case RIO_CM_MPORT_GET_LIST:
+ return cm_mport_get_list((void __user *)arg);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct file_operations riocm_cdev_fops = {
+ .owner = THIS_MODULE,
+ .open = riocm_cdev_open,
+ .release = riocm_cdev_release,
+ .unlocked_ioctl = riocm_cdev_ioctl,
+};
+
+/*
+ * riocm_add_dev - add new remote RapidIO device into channel management core
+ * @dev: device object associated with RapidIO device
+ * @sif: subsystem interface
+ *
+ * Adds the specified RapidIO device (if applicable) into peers list of
+ * the corresponding channel management device (cm_dev).
+ */
+static int riocm_add_dev(struct device *dev, struct subsys_interface *sif)
+{
+ struct cm_peer *peer;
+ struct rio_dev *rdev = to_rio_dev(dev);
+ struct cm_dev *cm;
+
+ /* Check if the remote device has capabilities required to support CM */
+ if (!dev_cm_capable(rdev))
+ return 0;
+
+ riocm_debug(RDEV, "(%s)", rio_name(rdev));
+
+ peer = kmalloc(sizeof(*peer), GFP_KERNEL);
+ if (!peer)
+ return -ENOMEM;
+
+ /* Find a corresponding cm_dev object */
+ down_write(&rdev_sem);
+ list_for_each_entry(cm, &cm_dev_list, list) {
+ if (cm->mport == rdev->net->hport)
+ goto found;
+ }
+
+ up_write(&rdev_sem);
+ kfree(peer);
+ return -ENODEV;
+
+found:
+ peer->rdev = rdev;
+ list_add_tail(&peer->node, &cm->peers);
+ cm->npeers++;
+
+ up_write(&rdev_sem);
+ return 0;
+}
+
+/*
+ * riocm_remove_dev - remove remote RapidIO device from channel management core
+ * @dev: device object associated with RapidIO device
+ * @sif: subsystem interface
+ *
+ * Removes the specified RapidIO device (if applicable) from peers list of
+ * the corresponding channel management device (cm_dev).
+ */
+static void riocm_remove_dev(struct device *dev, struct subsys_interface *sif)
+{
+ struct rio_dev *rdev = to_rio_dev(dev);
+ struct cm_dev *cm;
+ struct cm_peer *peer;
+ struct rio_channel *ch, *_c;
+ unsigned int i;
+ bool found = false;
+ LIST_HEAD(list);
+
+ /* Check if the remote device has capabilities required to support CM */
+ if (!dev_cm_capable(rdev))
+ return;
+
+ riocm_debug(RDEV, "(%s)", rio_name(rdev));
+
+ /* Find matching cm_dev object */
+ down_write(&rdev_sem);
+ list_for_each_entry(cm, &cm_dev_list, list) {
+ if (cm->mport == rdev->net->hport) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ up_write(&rdev_sem);
+ return;
+ }
+
+ /* Remove remote device from the list of peers */
+ found = false;
+ list_for_each_entry(peer, &cm->peers, node) {
+ if (peer->rdev == rdev) {
+ riocm_debug(RDEV, "removing peer %s", rio_name(rdev));
+ found = true;
+ list_del(&peer->node);
+ cm->npeers--;
+ kfree(peer);
+ break;
+ }
+ }
+
+ up_write(&rdev_sem);
+
+ if (!found)
+ return;
+
+ /*
+ * Release channels associated with this peer
+ */
+
+ spin_lock_bh(&idr_lock);
+ idr_for_each_entry(&ch_idr, ch, i) {
+ if (ch && ch->rdev == rdev) {
+ if (atomic_read(&rdev->state) != RIO_DEVICE_SHUTDOWN)
+ riocm_exch(ch, RIO_CM_DISCONNECT);
+ idr_remove(&ch_idr, ch->id);
+ list_add(&ch->ch_node, &list);
+ }
+ }
+ spin_unlock_bh(&idr_lock);
+
+ if (!list_empty(&list)) {
+ list_for_each_entry_safe(ch, _c, &list, ch_node) {
+ list_del(&ch->ch_node);
+ riocm_ch_close(ch);
+ }
+ }
+}
+
+/*
+ * riocm_cdev_add() - Create rio_cm char device
+ * @devno: device number assigned to device (MAJ + MIN)
+ */
+static int riocm_cdev_add(dev_t devno)
+{
+ int ret;
+
+ cdev_init(&riocm_cdev.cdev, &riocm_cdev_fops);
+ riocm_cdev.cdev.owner = THIS_MODULE;
+ ret = cdev_add(&riocm_cdev.cdev, devno, 1);
+ if (ret < 0) {
+ riocm_error("Cannot register a device with error %d", ret);
+ return ret;
+ }
+
+ riocm_cdev.dev = device_create(dev_class, NULL, devno, NULL, DEV_NAME);
+ if (IS_ERR(riocm_cdev.dev)) {
+ cdev_del(&riocm_cdev.cdev);
+ return PTR_ERR(riocm_cdev.dev);
+ }
+
+ riocm_debug(MPORT, "Added %s cdev(%d:%d)",
+ DEV_NAME, MAJOR(devno), MINOR(devno));
+
+ return 0;
+}
+
+/*
+ * riocm_add_mport - add new local mport device into channel management core
+ * @dev: device object associated with mport
+ * @class_intf: class interface
+ *
+ * When a new mport device is added, CM immediately reserves inbound and
+ * outbound RapidIO mailboxes that will be used.
+ */
+static int riocm_add_mport(struct device *dev,
+ struct class_interface *class_intf)
+{
+ int rc;
+ int i;
+ struct cm_dev *cm;
+ struct rio_mport *mport = to_rio_mport(dev);
+
+ riocm_debug(MPORT, "add mport %s", mport->name);
+
+ cm = kzalloc(sizeof(*cm), GFP_KERNEL);
+ if (!cm)
+ return -ENOMEM;
+
+ cm->mport = mport;
+
+ rc = rio_request_outb_mbox(mport, cm, cmbox,
+ RIOCM_TX_RING_SIZE, riocm_outb_msg_event);
+ if (rc) {
+ riocm_error("failed to allocate OBMBOX_%d on %s",
+ cmbox, mport->name);
+ kfree(cm);
+ return -ENODEV;
+ }
+
+ rc = rio_request_inb_mbox(mport, cm, cmbox,
+ RIOCM_RX_RING_SIZE, riocm_inb_msg_event);
+ if (rc) {
+ riocm_error("failed to allocate IBMBOX_%d on %s",
+ cmbox, mport->name);
+ rio_release_outb_mbox(mport, cmbox);
+ kfree(cm);
+ return -ENODEV;
+ }
+
+ /*
+ * Allocate and register inbound messaging buffers to be ready
+ * to receive channel and system management requests
+ */
+ for (i = 0; i < RIOCM_RX_RING_SIZE; i++)
+ cm->rx_buf[i] = NULL;
+
+ cm->rx_slots = RIOCM_RX_RING_SIZE;
+ mutex_init(&cm->rx_lock);
+ riocm_rx_fill(cm, RIOCM_RX_RING_SIZE);
+ cm->rx_wq = create_workqueue(DRV_NAME "/rxq");
+ INIT_WORK(&cm->rx_work, rio_ibmsg_handler);
+
+ cm->tx_slot = 0;
+ cm->tx_cnt = 0;
+ cm->tx_ack_slot = 0;
+ spin_lock_init(&cm->tx_lock);
+
+ INIT_LIST_HEAD(&cm->peers);
+ cm->npeers = 0;
+ INIT_LIST_HEAD(&cm->tx_reqs);
+
+ down_write(&rdev_sem);
+ list_add_tail(&cm->list, &cm_dev_list);
+ up_write(&rdev_sem);
+
+ return 0;
+}
+
+/*
+ * riocm_remove_mport - remove local mport device from channel management core
+ * @dev: device object associated with mport
+ * @class_intf: class interface
+ *
+ * Removes a local mport device from the list of registered devices that provide
+ * channel management services. Returns an error if the specified mport is not
+ * registered with the CM core.
+ */
+static void riocm_remove_mport(struct device *dev,
+ struct class_interface *class_intf)
+{
+ struct rio_mport *mport = to_rio_mport(dev);
+ struct cm_dev *cm;
+ struct cm_peer *peer, *temp;
+ struct rio_channel *ch, *_c;
+ unsigned int i;
+ bool found = false;
+ LIST_HEAD(list);
+
+ riocm_debug(MPORT, "%s", mport->name);
+
+ /* Find a matching cm_dev object */
+ down_write(&rdev_sem);
+ list_for_each_entry(cm, &cm_dev_list, list) {
+ if (cm->mport == mport) {
+ list_del(&cm->list);
+ found = true;
+ break;
+ }
+ }
+ up_write(&rdev_sem);
+ if (!found)
+ return;
+
+ flush_workqueue(cm->rx_wq);
+ destroy_workqueue(cm->rx_wq);
+
+ /* Release channels bound to this mport */
+ spin_lock_bh(&idr_lock);
+ idr_for_each_entry(&ch_idr, ch, i) {
+ if (ch->cmdev == cm) {
+ riocm_debug(RDEV, "%s drop ch_%d",
+ mport->name, ch->id);
+ idr_remove(&ch_idr, ch->id);
+ list_add(&ch->ch_node, &list);
+ }
+ }
+ spin_unlock_bh(&idr_lock);
+
+ if (!list_empty(&list)) {
+ list_for_each_entry_safe(ch, _c, &list, ch_node) {
+ list_del(&ch->ch_node);
+ riocm_ch_close(ch);
+ }
+ }
+
+ rio_release_inb_mbox(mport, cmbox);
+ rio_release_outb_mbox(mport, cmbox);
+
+ /* Remove and free peer entries */
+ if (!list_empty(&cm->peers))
+ riocm_debug(RDEV, "ATTN: peer list not empty");
+ list_for_each_entry_safe(peer, temp, &cm->peers, node) {
+ riocm_debug(RDEV, "removing peer %s", rio_name(peer->rdev));
+ list_del(&peer->node);
+ kfree(peer);
+ }
+
+ riocm_rx_free(cm);
+ kfree(cm);
+ riocm_debug(MPORT, "%s done", mport->name);
+}
+
+static int rio_cm_shutdown(struct notifier_block *nb, unsigned long code,
+ void *unused)
+{
+ struct rio_channel *ch;
+ unsigned int i;
+ LIST_HEAD(list);
+
+ riocm_debug(EXIT, ".");
+
+ /*
+ * If there are any channels left in connected state send
+ * close notification to the connection partner.
+ * First build a list of channels that require a closing
+ * notification because function riocm_send_close() should
+ * be called outside of spinlock protected code.
+ */
+ spin_lock_bh(&idr_lock);
+ idr_for_each_entry(&ch_idr, ch, i) {
+ if (ch->state == RIO_CM_CONNECTED) {
+ riocm_debug(EXIT, "close ch %d", ch->id);
+ idr_remove(&ch_idr, ch->id);
+ list_add(&ch->ch_node, &list);
+ }
+ }
+ spin_unlock_bh(&idr_lock);
+
+ list_for_each_entry(ch, &list, ch_node)
+ riocm_send_close(ch);
+
+ return NOTIFY_DONE;
+}
+
+/*
+ * riocm_interface handles addition/removal of remote RapidIO devices
+ */
+static struct subsys_interface riocm_interface = {
+ .name = "rio_cm",
+ .subsys = &rio_bus_type,
+ .add_dev = riocm_add_dev,
+ .remove_dev = riocm_remove_dev,
+};
+
+/*
+ * rio_mport_interface handles addition/removal local mport devices
+ */
+static struct class_interface rio_mport_interface __refdata = {
+ .class = &rio_mport_class,
+ .add_dev = riocm_add_mport,
+ .remove_dev = riocm_remove_mport,
+};
+
+static struct notifier_block rio_cm_notifier = {
+ .notifier_call = rio_cm_shutdown,
+};
+
+static int __init riocm_init(void)
+{
+ int ret;
+
+ /* Create device class needed by udev */
+ dev_class = class_create(THIS_MODULE, DRV_NAME);
+ if (IS_ERR(dev_class)) {
+ riocm_error("Cannot create " DRV_NAME " class");
+ return PTR_ERR(dev_class);
+ }
+
+ ret = alloc_chrdev_region(&dev_number, 0, 1, DRV_NAME);
+ if (ret) {
+ class_destroy(dev_class);
+ return ret;
+ }
+
+ dev_major = MAJOR(dev_number);
+ dev_minor_base = MINOR(dev_number);
+ riocm_debug(INIT, "Registered class with %d major", dev_major);
+
+ /*
+ * Register as rapidio_port class interface to get notifications about
+ * mport additions and removals.
+ */
+ ret = class_interface_register(&rio_mport_interface);
+ if (ret) {
+ riocm_error("class_interface_register error: %d", ret);
+ goto err_reg;
+ }
+
+ /*
+ * Register as RapidIO bus interface to get notifications about
+ * addition/removal of remote RapidIO devices.
+ */
+ ret = subsys_interface_register(&riocm_interface);
+ if (ret) {
+ riocm_error("subsys_interface_register error: %d", ret);
+ goto err_cl;
+ }
+
+ ret = register_reboot_notifier(&rio_cm_notifier);
+ if (ret) {
+ riocm_error("failed to register reboot notifier (err=%d)", ret);
+ goto err_sif;
+ }
+
+ ret = riocm_cdev_add(dev_number);
+ if (ret) {
+ unregister_reboot_notifier(&rio_cm_notifier);
+ ret = -ENODEV;
+ goto err_sif;
+ }
+
+ return 0;
+err_sif:
+ subsys_interface_unregister(&riocm_interface);
+err_cl:
+ class_interface_unregister(&rio_mport_interface);
+err_reg:
+ unregister_chrdev_region(dev_number, 1);
+ class_destroy(dev_class);
+ return ret;
+}
+
+static void __exit riocm_exit(void)
+{
+ riocm_debug(EXIT, "enter");
+ unregister_reboot_notifier(&rio_cm_notifier);
+ subsys_interface_unregister(&riocm_interface);
+ class_interface_unregister(&rio_mport_interface);
+ idr_destroy(&ch_idr);
+
+ device_unregister(riocm_cdev.dev);
+ cdev_del(&(riocm_cdev.cdev));
+
+ class_destroy(dev_class);
+ unregister_chrdev_region(dev_number, 1);
+}
+
+late_initcall(riocm_init);
+module_exit(riocm_exit);
diff --git a/drivers/rapidio/switches/Kconfig b/drivers/rapidio/switches/Kconfig
index 345841562..92767fd3b 100644
--- a/drivers/rapidio/switches/Kconfig
+++ b/drivers/rapidio/switches/Kconfig
@@ -22,3 +22,9 @@ config RAPIDIO_CPS_GEN2
default n
---help---
Includes support for ITD CPS Gen.2 serial RapidIO switches.
+
+config RAPIDIO_RXS_GEN3
+ tristate "IDT RXS Gen.3 SRIO switch support"
+ default n
+ ---help---
+ Includes support for ITD RXS Gen.3 serial RapidIO switches.
diff --git a/drivers/rapidio/switches/Makefile b/drivers/rapidio/switches/Makefile
index 051cc6b38..6bdd54c4e 100644
--- a/drivers/rapidio/switches/Makefile
+++ b/drivers/rapidio/switches/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_RAPIDIO_TSI57X) += tsi57x.o
obj-$(CONFIG_RAPIDIO_CPS_XX) += idtcps.o
obj-$(CONFIG_RAPIDIO_TSI568) += tsi568.o
obj-$(CONFIG_RAPIDIO_CPS_GEN2) += idt_gen2.o
+obj-$(CONFIG_RAPIDIO_RXS_GEN3) += idt_gen3.o
diff --git a/drivers/rapidio/switches/idt_gen2.c b/drivers/rapidio/switches/idt_gen2.c
index 9f7fe2158..e67b923b1 100644
--- a/drivers/rapidio/switches/idt_gen2.c
+++ b/drivers/rapidio/switches/idt_gen2.c
@@ -436,10 +436,11 @@ static int idtg2_probe(struct rio_dev *rdev, const struct rio_device_id *id)
RIO_STD_RTE_DEFAULT_PORT, IDT_NO_ROUTE);
}
+ spin_unlock(&rdev->rswitch->lock);
+
/* Create device-specific sysfs attributes */
idtg2_sysfs(rdev, true);
- spin_unlock(&rdev->rswitch->lock);
return 0;
}
@@ -452,11 +453,9 @@ static void idtg2_remove(struct rio_dev *rdev)
return;
}
rdev->rswitch->ops = NULL;
-
+ spin_unlock(&rdev->rswitch->lock);
/* Remove device-specific sysfs attributes */
idtg2_sysfs(rdev, false);
-
- spin_unlock(&rdev->rswitch->lock);
}
static struct rio_device_id idtg2_id_table[] = {
diff --git a/drivers/rapidio/switches/idt_gen3.c b/drivers/rapidio/switches/idt_gen3.c
new file mode 100644
index 000000000..c5923a547
--- /dev/null
+++ b/drivers/rapidio/switches/idt_gen3.c
@@ -0,0 +1,382 @@
+/*
+ * IDT RXS Gen.3 Serial RapidIO switch family support
+ *
+ * Copyright 2016 Integrated Device Technology, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/stat.h>
+#include <linux/module.h>
+#include <linux/rio.h>
+#include <linux/rio_drv.h>
+#include <linux/rio_ids.h>
+#include <linux/delay.h>
+
+#include <asm/page.h>
+#include "../rio.h"
+
+#define RIO_EM_PW_STAT 0x40020
+#define RIO_PW_CTL 0x40204
+#define RIO_PW_CTL_PW_TMR 0xffffff00
+#define RIO_PW_ROUTE 0x40208
+
+#define RIO_EM_DEV_INT_EN 0x40030
+
+#define RIO_PLM_SPx_IMP_SPEC_CTL(x) (0x10100 + (x)*0x100)
+#define RIO_PLM_SPx_IMP_SPEC_CTL_SOFT_RST 0x02000000
+
+#define RIO_PLM_SPx_PW_EN(x) (0x10118 + (x)*0x100)
+#define RIO_PLM_SPx_PW_EN_OK2U 0x40000000
+#define RIO_PLM_SPx_PW_EN_LINIT 0x10000000
+
+#define RIO_BC_L2_Gn_ENTRYx_CSR(n, x) (0x31000 + (n)*0x400 + (x)*0x4)
+#define RIO_SPx_L2_Gn_ENTRYy_CSR(x, n, y) \
+ (0x51000 + (x)*0x2000 + (n)*0x400 + (y)*0x4)
+
+static int
+idtg3_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
+ u16 table, u16 route_destid, u8 route_port)
+{
+ u32 rval;
+ u32 entry = route_port;
+ int err = 0;
+
+ pr_debug("RIO: %s t=0x%x did_%x to p_%x\n",
+ __func__, table, route_destid, entry);
+
+ if (route_destid > 0xFF)
+ return -EINVAL;
+
+ if (route_port == RIO_INVALID_ROUTE)
+ entry = RIO_RT_ENTRY_DROP_PKT;
+
+ if (table == RIO_GLOBAL_TABLE) {
+ /* Use broadcast register to update all per-port tables */
+ err = rio_mport_write_config_32(mport, destid, hopcount,
+ RIO_BC_L2_Gn_ENTRYx_CSR(0, route_destid),
+ entry);
+ return err;
+ }
+
+ /*
+ * Verify that specified port/table number is valid
+ */
+ err = rio_mport_read_config_32(mport, destid, hopcount,
+ RIO_SWP_INFO_CAR, &rval);
+ if (err)
+ return err;
+
+ if (table >= RIO_GET_TOTAL_PORTS(rval))
+ return -EINVAL;
+
+ err = rio_mport_write_config_32(mport, destid, hopcount,
+ RIO_SPx_L2_Gn_ENTRYy_CSR(table, 0, route_destid),
+ entry);
+ return err;
+}
+
+static int
+idtg3_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
+ u16 table, u16 route_destid, u8 *route_port)
+{
+ u32 rval;
+ int err;
+
+ if (route_destid > 0xFF)
+ return -EINVAL;
+
+ err = rio_mport_read_config_32(mport, destid, hopcount,
+ RIO_SWP_INFO_CAR, &rval);
+ if (err)
+ return err;
+
+ /*
+ * This switch device does not have the dedicated global routing table.
+ * It is substituted by reading routing table of the ingress port of
+ * maintenance read requests.
+ */
+ if (table == RIO_GLOBAL_TABLE)
+ table = RIO_GET_PORT_NUM(rval);
+ else if (table >= RIO_GET_TOTAL_PORTS(rval))
+ return -EINVAL;
+
+ err = rio_mport_read_config_32(mport, destid, hopcount,
+ RIO_SPx_L2_Gn_ENTRYy_CSR(table, 0, route_destid),
+ &rval);
+ if (err)
+ return err;
+
+ if (rval == RIO_RT_ENTRY_DROP_PKT)
+ *route_port = RIO_INVALID_ROUTE;
+ else
+ *route_port = (u8)rval;
+
+ return 0;
+}
+
+static int
+idtg3_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
+ u16 table)
+{
+ u32 i;
+ u32 rval;
+ int err;
+
+ if (table == RIO_GLOBAL_TABLE) {
+ for (i = 0; i <= 0xff; i++) {
+ err = rio_mport_write_config_32(mport, destid, hopcount,
+ RIO_BC_L2_Gn_ENTRYx_CSR(0, i),
+ RIO_RT_ENTRY_DROP_PKT);
+ if (err)
+ break;
+ }
+
+ return err;
+ }
+
+ err = rio_mport_read_config_32(mport, destid, hopcount,
+ RIO_SWP_INFO_CAR, &rval);
+ if (err)
+ return err;
+
+ if (table >= RIO_GET_TOTAL_PORTS(rval))
+ return -EINVAL;
+
+ for (i = 0; i <= 0xff; i++) {
+ err = rio_mport_write_config_32(mport, destid, hopcount,
+ RIO_SPx_L2_Gn_ENTRYy_CSR(table, 0, i),
+ RIO_RT_ENTRY_DROP_PKT);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+/*
+ * This routine performs device-specific initialization only.
+ * All standard EM configuration should be performed at upper level.
+ */
+static int
+idtg3_em_init(struct rio_dev *rdev)
+{
+ int i, tmp;
+ u32 rval;
+
+ pr_debug("RIO: %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount);
+
+ /* Disable assertion of interrupt signal */
+ rio_write_config_32(rdev, RIO_EM_DEV_INT_EN, 0);
+
+ /* Disable port-write event notifications during initialization */
+ rio_write_config_32(rdev, rdev->em_efptr + RIO_EM_PW_TX_CTRL,
+ RIO_EM_PW_TX_CTRL_PW_DIS);
+
+ /* Configure Port-Write notifications for hot-swap events */
+ tmp = RIO_GET_TOTAL_PORTS(rdev->swpinfo);
+ for (i = 0; i < tmp; i++) {
+
+ rio_read_config_32(rdev,
+ RIO_DEV_PORT_N_ERR_STS_CSR(rdev, i),
+ &rval);
+ if (rval & RIO_PORT_N_ERR_STS_PORT_UA)
+ continue;
+
+ /* Clear events signaled before enabling notification */
+ rio_write_config_32(rdev,
+ rdev->em_efptr + RIO_EM_PN_ERR_DETECT(i), 0);
+
+ /* Enable event notifications */
+ rio_write_config_32(rdev,
+ rdev->em_efptr + RIO_EM_PN_ERRRATE_EN(i),
+ RIO_EM_PN_ERRRATE_EN_OK2U | RIO_EM_PN_ERRRATE_EN_U2OK);
+ /* Enable port-write generation on events */
+ rio_write_config_32(rdev, RIO_PLM_SPx_PW_EN(i),
+ RIO_PLM_SPx_PW_EN_OK2U | RIO_PLM_SPx_PW_EN_LINIT);
+
+ }
+
+ /* Set Port-Write destination port */
+ tmp = RIO_GET_PORT_NUM(rdev->swpinfo);
+ rio_write_config_32(rdev, RIO_PW_ROUTE, 1 << tmp);
+
+
+ /* Enable sending port-write event notifications */
+ rio_write_config_32(rdev, rdev->em_efptr + RIO_EM_PW_TX_CTRL, 0);
+
+ /* set TVAL = ~50us */
+ rio_write_config_32(rdev,
+ rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8);
+ return 0;
+}
+
+
+/*
+ * idtg3_em_handler - device-specific error handler
+ *
+ * If the link is down (PORT_UNINIT) does nothing - this is considered
+ * as link partner removal from the port.
+ *
+ * If the link is up (PORT_OK) - situation is handled as *new* device insertion.
+ * In this case ERR_STOP bits are cleared by issuing soft reset command to the
+ * reporting port. Inbound and outbound ackIDs are cleared by the reset as well.
+ * This way the port is synchronized with freshly inserted device (assuming it
+ * was reset/powered-up on insertion).
+ *
+ * TODO: This is not sufficient in a situation when a link between two devices
+ * was down and up again (e.g. cable disconnect). For that situation full ackID
+ * realignment process has to be implemented.
+ */
+static int
+idtg3_em_handler(struct rio_dev *rdev, u8 pnum)
+{
+ u32 err_status;
+ u32 rval;
+
+ rio_read_config_32(rdev,
+ RIO_DEV_PORT_N_ERR_STS_CSR(rdev, pnum),
+ &err_status);
+
+ /* Do nothing for device/link removal */
+ if (err_status & RIO_PORT_N_ERR_STS_PORT_UNINIT)
+ return 0;
+
+ /* When link is OK we have a device insertion.
+ * Request port soft reset to clear errors if they present.
+ * Inbound and outbound ackIDs will be 0 after reset.
+ */
+ if (err_status & (RIO_PORT_N_ERR_STS_OUT_ES |
+ RIO_PORT_N_ERR_STS_INP_ES)) {
+ rio_read_config_32(rdev, RIO_PLM_SPx_IMP_SPEC_CTL(pnum), &rval);
+ rio_write_config_32(rdev, RIO_PLM_SPx_IMP_SPEC_CTL(pnum),
+ rval | RIO_PLM_SPx_IMP_SPEC_CTL_SOFT_RST);
+ udelay(10);
+ rio_write_config_32(rdev, RIO_PLM_SPx_IMP_SPEC_CTL(pnum), rval);
+ msleep(500);
+ }
+
+ return 0;
+}
+
+static struct rio_switch_ops idtg3_switch_ops = {
+ .owner = THIS_MODULE,
+ .add_entry = idtg3_route_add_entry,
+ .get_entry = idtg3_route_get_entry,
+ .clr_table = idtg3_route_clr_table,
+ .em_init = idtg3_em_init,
+ .em_handle = idtg3_em_handler,
+};
+
+static int idtg3_probe(struct rio_dev *rdev, const struct rio_device_id *id)
+{
+ pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
+
+ spin_lock(&rdev->rswitch->lock);
+
+ if (rdev->rswitch->ops) {
+ spin_unlock(&rdev->rswitch->lock);
+ return -EINVAL;
+ }
+
+ rdev->rswitch->ops = &idtg3_switch_ops;
+
+ if (rdev->do_enum) {
+ /* Disable hierarchical routing support: Existing fabric
+ * enumeration/discovery process (see rio-scan.c) uses 8-bit
+ * flat destination ID routing only.
+ */
+ rio_write_config_32(rdev, 0x5000 + RIO_BC_RT_CTL_CSR, 0);
+ }
+
+ spin_unlock(&rdev->rswitch->lock);
+
+ return 0;
+}
+
+static void idtg3_remove(struct rio_dev *rdev)
+{
+ pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
+ spin_lock(&rdev->rswitch->lock);
+ if (rdev->rswitch->ops == &idtg3_switch_ops)
+ rdev->rswitch->ops = NULL;
+ spin_unlock(&rdev->rswitch->lock);
+}
+
+/*
+ * Gen3 switches repeat sending PW messages until a corresponding event flag
+ * is cleared. Use shutdown notification to disable generation of port-write
+ * messages if their destination node is shut down.
+ */
+static void idtg3_shutdown(struct rio_dev *rdev)
+{
+ int i;
+ u32 rval;
+ u16 destid;
+
+ /* Currently the enumerator node acts also as PW handler */
+ if (!rdev->do_enum)
+ return;
+
+ pr_debug("RIO: %s(%s)\n", __func__, rio_name(rdev));
+
+ rio_read_config_32(rdev, RIO_PW_ROUTE, &rval);
+ i = RIO_GET_PORT_NUM(rdev->swpinfo);
+
+ /* Check port-write destination port */
+ if (!((1 << i) & rval))
+ return;
+
+ /* Disable sending port-write event notifications if PW destID
+ * matches to one of the enumerator node
+ */
+ rio_read_config_32(rdev, rdev->em_efptr + RIO_EM_PW_TGT_DEVID, &rval);
+
+ if (rval & RIO_EM_PW_TGT_DEVID_DEV16)
+ destid = rval >> 16;
+ else
+ destid = ((rval & RIO_EM_PW_TGT_DEVID_D8) >> 16);
+
+ if (rdev->net->hport->host_deviceid == destid) {
+ rio_write_config_32(rdev,
+ rdev->em_efptr + RIO_EM_PW_TX_CTRL, 0);
+ pr_debug("RIO: %s(%s) PW transmission disabled\n",
+ __func__, rio_name(rdev));
+ }
+}
+
+static struct rio_device_id idtg3_id_table[] = {
+ {RIO_DEVICE(RIO_DID_IDTRXS1632, RIO_VID_IDT)},
+ {RIO_DEVICE(RIO_DID_IDTRXS2448, RIO_VID_IDT)},
+ { 0, } /* terminate list */
+};
+
+static struct rio_driver idtg3_driver = {
+ .name = "idt_gen3",
+ .id_table = idtg3_id_table,
+ .probe = idtg3_probe,
+ .remove = idtg3_remove,
+ .shutdown = idtg3_shutdown,
+};
+
+static int __init idtg3_init(void)
+{
+ return rio_register_driver(&idtg3_driver);
+}
+
+static void __exit idtg3_exit(void)
+{
+ pr_debug("RIO: %s\n", __func__);
+ rio_unregister_driver(&idtg3_driver);
+ pr_debug("RIO: %s done\n", __func__);
+}
+
+device_initcall(idtg3_init);
+module_exit(idtg3_exit);
+
+MODULE_DESCRIPTION("IDT RXS Gen.3 Serial RapidIO switch family driver");
+MODULE_AUTHOR("Integrated Device Technology, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rapidio/switches/tsi57x.c b/drivers/rapidio/switches/tsi57x.c
index 42c8b014f..2700d15f7 100644
--- a/drivers/rapidio/switches/tsi57x.c
+++ b/drivers/rapidio/switches/tsi57x.c
@@ -175,12 +175,10 @@ tsi57x_em_init(struct rio_dev *rdev)
/* Clear all pending interrupts */
rio_read_config_32(rdev,
- rdev->phys_efptr +
- RIO_PORT_N_ERR_STS_CSR(portnum),
+ RIO_DEV_PORT_N_ERR_STS_CSR(rdev, portnum),
&regval);
rio_write_config_32(rdev,
- rdev->phys_efptr +
- RIO_PORT_N_ERR_STS_CSR(portnum),
+ RIO_DEV_PORT_N_ERR_STS_CSR(rdev, portnum),
regval & 0x07120214);
rio_read_config_32(rdev,
@@ -198,7 +196,7 @@ tsi57x_em_init(struct rio_dev *rdev)
/* Skip next (odd) port if the current port is in x4 mode */
rio_read_config_32(rdev,
- rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
+ RIO_DEV_PORT_N_CTL_CSR(rdev, portnum),
&regval);
if ((regval & RIO_PORT_N_CTL_PWIDTH) == RIO_PORT_N_CTL_PWIDTH_4)
portnum++;
@@ -221,23 +219,23 @@ tsi57x_em_handler(struct rio_dev *rdev, u8 portnum)
u32 regval;
rio_read_config_32(rdev,
- rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
+ RIO_DEV_PORT_N_ERR_STS_CSR(rdev, portnum),
&err_status);
if ((err_status & RIO_PORT_N_ERR_STS_PORT_OK) &&
- (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES |
- RIO_PORT_N_ERR_STS_PW_INP_ES))) {
+ (err_status & (RIO_PORT_N_ERR_STS_OUT_ES |
+ RIO_PORT_N_ERR_STS_INP_ES))) {
/* Remove any queued packets by locking/unlocking port */
rio_read_config_32(rdev,
- rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
+ RIO_DEV_PORT_N_CTL_CSR(rdev, portnum),
&regval);
if (!(regval & RIO_PORT_N_CTL_LOCKOUT)) {
rio_write_config_32(rdev,
- rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
+ RIO_DEV_PORT_N_CTL_CSR(rdev, portnum),
regval | RIO_PORT_N_CTL_LOCKOUT);
udelay(50);
rio_write_config_32(rdev,
- rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
+ RIO_DEV_PORT_N_CTL_CSR(rdev, portnum),
regval);
}
@@ -245,7 +243,7 @@ tsi57x_em_handler(struct rio_dev *rdev, u8 portnum)
* valid bit
*/
rio_read_config_32(rdev,
- rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(portnum),
+ RIO_DEV_PORT_N_MNT_RSP_CSR(rdev, portnum),
&regval);
/* Send a Packet-Not-Accepted/Link-Request-Input-Status control
@@ -259,8 +257,8 @@ tsi57x_em_handler(struct rio_dev *rdev, u8 portnum)
while (checkcount--) {
udelay(50);
rio_read_config_32(rdev,
- rdev->phys_efptr +
- RIO_PORT_N_MNT_RSP_CSR(portnum),
+ RIO_DEV_PORT_N_MNT_RSP_CSR(rdev,
+ portnum),
&regval);
if (regval & RIO_PORT_N_MNT_RSP_RVAL)
goto exit_es;
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 144cbf5b3..6c88e31c0 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -498,6 +498,15 @@ config REGULATOR_MT6311
This driver supports the control of different power rails of device
through regulator interface.
+config REGULATOR_MT6323
+ tristate "MediaTek MT6323 PMIC"
+ depends on MFD_MT6397
+ help
+ Say y here to select this option to enable the power regulator of
+ MediaTek MT6323 PMIC.
+ This driver supports the control of different power rails of device
+ through regulator interface.
+
config REGULATOR_MT6397
tristate "MediaTek MT6397 PMIC"
depends on MFD_MT6397
@@ -543,12 +552,12 @@ config REGULATOR_PCF50633
on PCF50633
config REGULATOR_PFUZE100
- tristate "Freescale PFUZE100/PFUZE200 regulator driver"
+ tristate "Freescale PFUZE100/200/3000 regulator driver"
depends on I2C
select REGMAP_I2C
help
Say y here to support the regulators found on the Freescale
- PFUZE100/PFUZE200 PMIC.
+ PFUZE100/200/3000 PMIC.
config REGULATOR_PV88060
tristate "Powerventure Semiconductor PV88060 regulator"
@@ -636,10 +645,11 @@ config REGULATOR_RK808
outputs which can be controlled by i2c communication.
config REGULATOR_RN5T618
- tristate "Ricoh RN5T618 voltage regulators"
+ tristate "Ricoh RN5T567/618 voltage regulators"
depends on MFD_RN5T618
help
- Say y here to support the regulators found on Ricoh RN5T618 PMIC.
+ Say y here to support the regulators found on Ricoh RN5T567 or
+ RN5T618 PMIC.
config REGULATOR_RT5033
tristate "Richtek RT5033 Regulators"
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 85a1d44a3..f3da9eea9 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -65,6 +65,7 @@ obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o
obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o
obj-$(CONFIG_REGULATOR_MT6311) += mt6311-regulator.o
+obj-$(CONFIG_REGULATOR_MT6323) += mt6323-regulator.o
obj-$(CONFIG_REGULATOR_MT6397) += mt6397-regulator.o
obj-$(CONFIG_REGULATOR_QCOM_RPM) += qcom_rpm-regulator.o
obj-$(CONFIG_REGULATOR_QCOM_SMD_RPM) += qcom_smd-regulator.o
diff --git a/drivers/regulator/ab8500-ext.c b/drivers/regulator/ab8500-ext.c
index 84c1ee39d..2ca00045e 100644
--- a/drivers/regulator/ab8500-ext.c
+++ b/drivers/regulator/ab8500-ext.c
@@ -25,6 +25,456 @@
#include <linux/mfd/abx500/ab8500.h>
#include <linux/regulator/ab8500.h>
+static struct regulator_consumer_supply ab8500_vaux1_consumers[] = {
+ /* Main display, u8500 R3 uib */
+ REGULATOR_SUPPLY("vddi", "mcde_disp_sony_acx424akp.0"),
+ /* Main display, u8500 uib and ST uib */
+ REGULATOR_SUPPLY("vdd1", "samsung_s6d16d0.0"),
+ /* Secondary display, ST uib */
+ REGULATOR_SUPPLY("vdd1", "samsung_s6d16d0.1"),
+ /* SFH7741 proximity sensor */
+ REGULATOR_SUPPLY("vcc", "gpio-keys.0"),
+ /* BH1780GLS ambient light sensor */
+ REGULATOR_SUPPLY("vcc", "2-0029"),
+ /* lsm303dlh accelerometer */
+ REGULATOR_SUPPLY("vdd", "2-0018"),
+ /* lsm303dlhc accelerometer */
+ REGULATOR_SUPPLY("vdd", "2-0019"),
+ /* lsm303dlh magnetometer */
+ REGULATOR_SUPPLY("vdd", "2-001e"),
+ /* Rohm BU21013 Touchscreen devices */
+ REGULATOR_SUPPLY("avdd", "3-005c"),
+ REGULATOR_SUPPLY("avdd", "3-005d"),
+ /* Synaptics RMI4 Touchscreen device */
+ REGULATOR_SUPPLY("vdd", "3-004b"),
+ /* L3G4200D Gyroscope device */
+ REGULATOR_SUPPLY("vdd", "2-0068"),
+ /* Ambient light sensor device */
+ REGULATOR_SUPPLY("vdd", "3-0029"),
+ /* Pressure sensor device */
+ REGULATOR_SUPPLY("vdd", "2-005c"),
+ /* Cypress TrueTouch Touchscreen device */
+ REGULATOR_SUPPLY("vcpin", "spi8.0"),
+ /* Camera device */
+ REGULATOR_SUPPLY("vaux12v5", "mmio_camera"),
+};
+
+static struct regulator_consumer_supply ab8500_vaux2_consumers[] = {
+ /* On-board eMMC power */
+ REGULATOR_SUPPLY("vmmc", "sdi4"),
+ /* AB8500 audio codec */
+ REGULATOR_SUPPLY("vcc-N2158", "ab8500-codec.0"),
+ /* AB8500 accessory detect 1 */
+ REGULATOR_SUPPLY("vcc-N2158", "ab8500-acc-det.0"),
+ /* AB8500 Tv-out device */
+ REGULATOR_SUPPLY("vcc-N2158", "mcde_tv_ab8500.4"),
+ /* AV8100 HDMI device */
+ REGULATOR_SUPPLY("vcc-N2158", "av8100_hdmi.3"),
+};
+
+static struct regulator_consumer_supply ab8500_vaux3_consumers[] = {
+ REGULATOR_SUPPLY("v-SD-STM", "stm"),
+ /* External MMC slot power */
+ REGULATOR_SUPPLY("vmmc", "sdi0"),
+};
+
+static struct regulator_consumer_supply ab8500_vtvout_consumers[] = {
+ /* TV-out DENC supply */
+ REGULATOR_SUPPLY("vtvout", "ab8500-denc.0"),
+ /* Internal general-purpose ADC */
+ REGULATOR_SUPPLY("vddadc", "ab8500-gpadc.0"),
+ /* ADC for charger */
+ REGULATOR_SUPPLY("vddadc", "ab8500-charger.0"),
+ /* AB8500 Tv-out device */
+ REGULATOR_SUPPLY("vtvout", "mcde_tv_ab8500.4"),
+};
+
+static struct regulator_consumer_supply ab8500_vaud_consumers[] = {
+ /* AB8500 audio-codec main supply */
+ REGULATOR_SUPPLY("vaud", "ab8500-codec.0"),
+};
+
+static struct regulator_consumer_supply ab8500_vamic1_consumers[] = {
+ /* AB8500 audio-codec Mic1 supply */
+ REGULATOR_SUPPLY("vamic1", "ab8500-codec.0"),
+};
+
+static struct regulator_consumer_supply ab8500_vamic2_consumers[] = {
+ /* AB8500 audio-codec Mic2 supply */
+ REGULATOR_SUPPLY("vamic2", "ab8500-codec.0"),
+};
+
+static struct regulator_consumer_supply ab8500_vdmic_consumers[] = {
+ /* AB8500 audio-codec DMic supply */
+ REGULATOR_SUPPLY("vdmic", "ab8500-codec.0"),
+};
+
+static struct regulator_consumer_supply ab8500_vintcore_consumers[] = {
+ /* SoC core supply, no device */
+ REGULATOR_SUPPLY("v-intcore", NULL),
+ /* USB Transceiver */
+ REGULATOR_SUPPLY("vddulpivio18", "ab8500-usb.0"),
+ /* Handled by abx500 clk driver */
+ REGULATOR_SUPPLY("v-intcore", "abx500-clk.0"),
+};
+
+static struct regulator_consumer_supply ab8500_vana_consumers[] = {
+ /* DB8500 DSI */
+ REGULATOR_SUPPLY("vdddsi1v2", "mcde"),
+ REGULATOR_SUPPLY("vdddsi1v2", "b2r2_core"),
+ REGULATOR_SUPPLY("vdddsi1v2", "b2r2_1_core"),
+ REGULATOR_SUPPLY("vdddsi1v2", "dsilink.0"),
+ REGULATOR_SUPPLY("vdddsi1v2", "dsilink.1"),
+ REGULATOR_SUPPLY("vdddsi1v2", "dsilink.2"),
+ /* DB8500 CSI */
+ REGULATOR_SUPPLY("vddcsi1v2", "mmio_camera"),
+};
+
+/* ab8500 regulator register initialization */
+static struct ab8500_regulator_reg_init ab8500_reg_init[] = {
+ /*
+ * VanaRequestCtrl = HP/LP depending on VxRequest
+ * VextSupply1RequestCtrl = HP/LP depending on VxRequest
+ */
+ INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL2, 0xf0, 0x00),
+ /*
+ * VextSupply2RequestCtrl = HP/LP depending on VxRequest
+ * VextSupply3RequestCtrl = HP/LP depending on VxRequest
+ * Vaux1RequestCtrl = HP/LP depending on VxRequest
+ * Vaux2RequestCtrl = HP/LP depending on VxRequest
+ */
+ INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL3, 0xff, 0x00),
+ /*
+ * Vaux3RequestCtrl = HP/LP depending on VxRequest
+ * SwHPReq = Control through SWValid disabled
+ */
+ INIT_REGULATOR_REGISTER(AB8500_REGUREQUESTCTRL4, 0x07, 0x00),
+ /*
+ * VanaSysClkReq1HPValid = disabled
+ * Vaux1SysClkReq1HPValid = disabled
+ * Vaux2SysClkReq1HPValid = disabled
+ * Vaux3SysClkReq1HPValid = disabled
+ */
+ INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQ1HPVALID1, 0xe8, 0x00),
+ /*
+ * VextSupply1SysClkReq1HPValid = disabled
+ * VextSupply2SysClkReq1HPValid = disabled
+ * VextSupply3SysClkReq1HPValid = SysClkReq1 controlled
+ */
+ INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQ1HPVALID2, 0x70, 0x40),
+ /*
+ * VanaHwHPReq1Valid = disabled
+ * Vaux1HwHPreq1Valid = disabled
+ * Vaux2HwHPReq1Valid = disabled
+ * Vaux3HwHPReqValid = disabled
+ */
+ INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ1VALID1, 0xe8, 0x00),
+ /*
+ * VextSupply1HwHPReq1Valid = disabled
+ * VextSupply2HwHPReq1Valid = disabled
+ * VextSupply3HwHPReq1Valid = disabled
+ */
+ INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ1VALID2, 0x07, 0x00),
+ /*
+ * VanaHwHPReq2Valid = disabled
+ * Vaux1HwHPReq2Valid = disabled
+ * Vaux2HwHPReq2Valid = disabled
+ * Vaux3HwHPReq2Valid = disabled
+ */
+ INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ2VALID1, 0xe8, 0x00),
+ /*
+ * VextSupply1HwHPReq2Valid = disabled
+ * VextSupply2HwHPReq2Valid = disabled
+ * VextSupply3HwHPReq2Valid = HWReq2 controlled
+ */
+ INIT_REGULATOR_REGISTER(AB8500_REGUHWHPREQ2VALID2, 0x07, 0x04),
+ /*
+ * VanaSwHPReqValid = disabled
+ * Vaux1SwHPReqValid = disabled
+ */
+ INIT_REGULATOR_REGISTER(AB8500_REGUSWHPREQVALID1, 0xa0, 0x00),
+ /*
+ * Vaux2SwHPReqValid = disabled
+ * Vaux3SwHPReqValid = disabled
+ * VextSupply1SwHPReqValid = disabled
+ * VextSupply2SwHPReqValid = disabled
+ * VextSupply3SwHPReqValid = disabled
+ */
+ INIT_REGULATOR_REGISTER(AB8500_REGUSWHPREQVALID2, 0x1f, 0x00),
+ /*
+ * SysClkReq2Valid1 = SysClkReq2 controlled
+ * SysClkReq3Valid1 = disabled
+ * SysClkReq4Valid1 = SysClkReq4 controlled
+ * SysClkReq5Valid1 = disabled
+ * SysClkReq6Valid1 = SysClkReq6 controlled
+ * SysClkReq7Valid1 = disabled
+ * SysClkReq8Valid1 = disabled
+ */
+ INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQVALID1, 0xfe, 0x2a),
+ /*
+ * SysClkReq2Valid2 = disabled
+ * SysClkReq3Valid2 = disabled
+ * SysClkReq4Valid2 = disabled
+ * SysClkReq5Valid2 = disabled
+ * SysClkReq6Valid2 = SysClkReq6 controlled
+ * SysClkReq7Valid2 = disabled
+ * SysClkReq8Valid2 = disabled
+ */
+ INIT_REGULATOR_REGISTER(AB8500_REGUSYSCLKREQVALID2, 0xfe, 0x20),
+ /*
+ * VTVoutEna = disabled
+ * Vintcore12Ena = disabled
+ * Vintcore12Sel = 1.25 V
+ * Vintcore12LP = inactive (HP)
+ * VTVoutLP = inactive (HP)
+ */
+ INIT_REGULATOR_REGISTER(AB8500_REGUMISC1, 0xfe, 0x10),
+ /*
+ * VaudioEna = disabled
+ * VdmicEna = disabled
+ * Vamic1Ena = disabled
+ * Vamic2Ena = disabled
+ */
+ INIT_REGULATOR_REGISTER(AB8500_VAUDIOSUPPLY, 0x1e, 0x00),
+ /*
+ * Vamic1_dzout = high-Z when Vamic1 is disabled
+ * Vamic2_dzout = high-Z when Vamic2 is disabled
+ */
+ INIT_REGULATOR_REGISTER(AB8500_REGUCTRL1VAMIC, 0x03, 0x00),
+ /*
+ * VPll = Hw controlled (NOTE! PRCMU bits)
+ * VanaRegu = force off
+ */
+ INIT_REGULATOR_REGISTER(AB8500_VPLLVANAREGU, 0x0f, 0x02),
+ /*
+ * VrefDDREna = disabled
+ * VrefDDRSleepMode = inactive (no pulldown)
+ */
+ INIT_REGULATOR_REGISTER(AB8500_VREFDDR, 0x03, 0x00),
+ /*
+ * VextSupply1Regu = force LP
+ * VextSupply2Regu = force OFF
+ * VextSupply3Regu = force HP (-> STBB2=LP and TPS=LP)
+ * ExtSupply2Bypass = ExtSupply12LPn ball is 0 when Ena is 0
+ * ExtSupply3Bypass = ExtSupply3LPn ball is 0 when Ena is 0
+ */
+ INIT_REGULATOR_REGISTER(AB8500_EXTSUPPLYREGU, 0xff, 0x13),
+ /*
+ * Vaux1Regu = force HP
+ * Vaux2Regu = force off
+ */
+ INIT_REGULATOR_REGISTER(AB8500_VAUX12REGU, 0x0f, 0x01),
+ /*
+ * Vaux3Regu = force off
+ */
+ INIT_REGULATOR_REGISTER(AB8500_VRF1VAUX3REGU, 0x03, 0x00),
+ /*
+ * Vaux1Sel = 2.8 V
+ */
+ INIT_REGULATOR_REGISTER(AB8500_VAUX1SEL, 0x0f, 0x0C),
+ /*
+ * Vaux2Sel = 2.9 V
+ */
+ INIT_REGULATOR_REGISTER(AB8500_VAUX2SEL, 0x0f, 0x0d),
+ /*
+ * Vaux3Sel = 2.91 V
+ */
+ INIT_REGULATOR_REGISTER(AB8500_VRF1VAUX3SEL, 0x07, 0x07),
+ /*
+ * VextSupply12LP = disabled (no LP)
+ */
+ INIT_REGULATOR_REGISTER(AB8500_REGUCTRL2SPARE, 0x01, 0x00),
+ /*
+ * Vaux1Disch = short discharge time
+ * Vaux2Disch = short discharge time
+ * Vaux3Disch = short discharge time
+ * Vintcore12Disch = short discharge time
+ * VTVoutDisch = short discharge time
+ * VaudioDisch = short discharge time
+ */
+ INIT_REGULATOR_REGISTER(AB8500_REGUCTRLDISCH, 0xfc, 0x00),
+ /*
+ * VanaDisch = short discharge time
+ * VdmicPullDownEna = pulldown disabled when Vdmic is disabled
+ * VdmicDisch = short discharge time
+ */
+ INIT_REGULATOR_REGISTER(AB8500_REGUCTRLDISCH2, 0x16, 0x00),
+};
+
+/* AB8500 regulators */
+static struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
+ /* supplies to the display/camera */
+ [AB8500_LDO_AUX1] = {
+ .supply_regulator = "ab8500-ext-supply3",
+ .constraints = {
+ .name = "V-DISPLAY",
+ .min_uV = 2800000,
+ .max_uV = 3300000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS,
+ .boot_on = 1, /* display is on at boot */
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux1_consumers),
+ .consumer_supplies = ab8500_vaux1_consumers,
+ },
+ /* supplies to the on-board eMMC */
+ [AB8500_LDO_AUX2] = {
+ .supply_regulator = "ab8500-ext-supply3",
+ .constraints = {
+ .name = "V-eMMC1",
+ .min_uV = 1100000,
+ .max_uV = 3300000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_MODE,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL |
+ REGULATOR_MODE_IDLE,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux2_consumers),
+ .consumer_supplies = ab8500_vaux2_consumers,
+ },
+ /* supply for VAUX3, supplies to SDcard slots */
+ [AB8500_LDO_AUX3] = {
+ .supply_regulator = "ab8500-ext-supply3",
+ .constraints = {
+ .name = "V-MMC-SD",
+ .min_uV = 1100000,
+ .max_uV = 3300000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_MODE,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL |
+ REGULATOR_MODE_IDLE,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vaux3_consumers),
+ .consumer_supplies = ab8500_vaux3_consumers,
+ },
+ /* supply for tvout, gpadc, TVOUT LDO */
+ [AB8500_LDO_TVOUT] = {
+ .constraints = {
+ .name = "V-TVOUT",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vtvout_consumers),
+ .consumer_supplies = ab8500_vtvout_consumers,
+ },
+ /* supply for ab8500-vaudio, VAUDIO LDO */
+ [AB8500_LDO_AUDIO] = {
+ .constraints = {
+ .name = "V-AUD",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vaud_consumers),
+ .consumer_supplies = ab8500_vaud_consumers,
+ },
+ /* supply for v-anamic1 VAMic1-LDO */
+ [AB8500_LDO_ANAMIC1] = {
+ .constraints = {
+ .name = "V-AMIC1",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vamic1_consumers),
+ .consumer_supplies = ab8500_vamic1_consumers,
+ },
+ /* supply for v-amic2, VAMIC2 LDO, reuse constants for AMIC1 */
+ [AB8500_LDO_ANAMIC2] = {
+ .constraints = {
+ .name = "V-AMIC2",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vamic2_consumers),
+ .consumer_supplies = ab8500_vamic2_consumers,
+ },
+ /* supply for v-dmic, VDMIC LDO */
+ [AB8500_LDO_DMIC] = {
+ .constraints = {
+ .name = "V-DMIC",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vdmic_consumers),
+ .consumer_supplies = ab8500_vdmic_consumers,
+ },
+ /* supply for v-intcore12, VINTCORE12 LDO */
+ [AB8500_LDO_INTCORE] = {
+ .constraints = {
+ .name = "V-INTCORE",
+ .min_uV = 1250000,
+ .max_uV = 1350000,
+ .input_uV = 1800000,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_MODE |
+ REGULATOR_CHANGE_DRMS,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL |
+ REGULATOR_MODE_IDLE,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vintcore_consumers),
+ .consumer_supplies = ab8500_vintcore_consumers,
+ },
+ /* supply for U8500 CSI-DSI, VANA LDO */
+ [AB8500_LDO_ANA] = {
+ .constraints = {
+ .name = "V-CSI-DSI",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(ab8500_vana_consumers),
+ .consumer_supplies = ab8500_vana_consumers,
+ },
+};
+
+/* supply for VextSupply3 */
+static struct regulator_consumer_supply ab8500_ext_supply3_consumers[] = {
+ /* SIM supply for 3 V SIM cards */
+ REGULATOR_SUPPLY("vinvsim", "sim-detect.0"),
+};
+
+/*
+ * AB8500 external regulators
+ */
+static struct regulator_init_data ab8500_ext_regulators[] = {
+ /* fixed Vbat supplies VSMPS1_EXT_1V8 */
+ [AB8500_EXT_SUPPLY1] = {
+ .constraints = {
+ .name = "ab8500-ext-supply1",
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .initial_mode = REGULATOR_MODE_IDLE,
+ .boot_on = 1,
+ .always_on = 1,
+ },
+ },
+ /* fixed Vbat supplies VSMPS2_EXT_1V36 and VSMPS5_EXT_1V15 */
+ [AB8500_EXT_SUPPLY2] = {
+ .constraints = {
+ .name = "ab8500-ext-supply2",
+ .min_uV = 1360000,
+ .max_uV = 1360000,
+ },
+ },
+ /* fixed Vbat supplies VSMPS3_EXT_3V4 and VSMPS4_EXT_3V4 */
+ [AB8500_EXT_SUPPLY3] = {
+ .constraints = {
+ .name = "ab8500-ext-supply3",
+ .min_uV = 3400000,
+ .max_uV = 3400000,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ .boot_on = 1,
+ },
+ .num_consumer_supplies =
+ ARRAY_SIZE(ab8500_ext_supply3_consumers),
+ .consumer_supplies = ab8500_ext_supply3_consumers,
+ },
+};
+
+static struct ab8500_regulator_platform_data ab8500_regulator_plat_data = {
+ .reg_init = ab8500_reg_init,
+ .num_reg_init = ARRAY_SIZE(ab8500_reg_init),
+ .regulator = ab8500_regulators,
+ .num_regulator = ARRAY_SIZE(ab8500_regulators),
+ .ext_regulator = ab8500_ext_regulators,
+ .num_ext_regulator = ARRAY_SIZE(ab8500_ext_regulators),
+};
+
/**
* struct ab8500_ext_regulator_info - ab8500 regulator information
* @dev: device pointer
@@ -344,8 +794,7 @@ static struct of_regulator_match ab8500_ext_regulator_match[] = {
static int ab8500_ext_regulator_probe(struct platform_device *pdev)
{
struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
- struct ab8500_platform_data *ppdata;
- struct ab8500_regulator_platform_data *pdata;
+ struct ab8500_regulator_platform_data *pdata = &ab8500_regulator_plat_data;
struct device_node *np = pdev->dev.of_node;
struct regulator_config config = { };
int i, err;
@@ -366,18 +815,6 @@ static int ab8500_ext_regulator_probe(struct platform_device *pdev)
return -EINVAL;
}
- ppdata = dev_get_platdata(ab8500->dev);
- if (!ppdata) {
- dev_err(&pdev->dev, "null parent pdata\n");
- return -EINVAL;
- }
-
- pdata = ppdata->regulator;
- if (!pdata) {
- dev_err(&pdev->dev, "null pdata\n");
- return -EINVAL;
- }
-
/* make sure the platform data has the correct size */
if (pdata->num_ext_regulator != ARRAY_SIZE(ab8500_ext_regulator_info)) {
dev_err(&pdev->dev, "Configuration error: size mismatch.\n");
diff --git a/drivers/regulator/act8865-regulator.c b/drivers/regulator/act8865-regulator.c
index a1cd0d4f8..7652477e6 100644
--- a/drivers/regulator/act8865-regulator.c
+++ b/drivers/regulator/act8865-regulator.c
@@ -395,12 +395,6 @@ static int act8865_pdata_from_dt(struct device *dev,
struct act8865_regulator_data *regulator;
struct of_regulator_match *matches;
- np = of_get_child_by_name(dev->of_node, "regulators");
- if (!np) {
- dev_err(dev, "missing 'regulators' subnode in DT\n");
- return -EINVAL;
- }
-
switch (type) {
case ACT8600:
matches = act8600_matches;
@@ -419,6 +413,12 @@ static int act8865_pdata_from_dt(struct device *dev,
return -EINVAL;
}
+ np = of_get_child_by_name(dev->of_node, "regulators");
+ if (!np) {
+ dev_err(dev, "missing 'regulators' subnode in DT\n");
+ return -EINVAL;
+ }
+
matched = of_regulator_match(dev, np, matches, num_matches);
of_node_put(np);
if (matched <= 0)
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index 514a5e8fd..6d9ac76a7 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -36,6 +36,8 @@
#define AXP20X_FREQ_DCDC_MASK 0x0f
+#define AXP22X_MISC_N_VBUSEN_FUNC BIT(4)
+
#define AXP_DESC_IO(_family, _id, _match, _supply, _min, _max, _step, _vreg, \
_vmask, _ereg, _emask, _enable_val, _disable_val) \
[_family##_##_id] = { \
@@ -230,6 +232,73 @@ static const struct regulator_desc axp22x_regulators[] = {
AXP_DESC_FIXED(AXP22X, RTC_LDO, "rtc_ldo", "ips", 3000),
};
+static const struct regulator_desc axp22x_drivevbus_regulator = {
+ .name = "drivevbus",
+ .supply_name = "drivevbus",
+ .of_match = of_match_ptr("drivevbus"),
+ .regulators_node = of_match_ptr("regulators"),
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .enable_reg = AXP20X_VBUS_IPSOUT_MGMT,
+ .enable_mask = BIT(2),
+ .ops = &axp20x_ops_sw,
+};
+
+static const struct regulator_linear_range axp809_dcdc4_ranges[] = {
+ REGULATOR_LINEAR_RANGE(600000, 0x0, 0x2f, 20000),
+ REGULATOR_LINEAR_RANGE(1800000, 0x30, 0x38, 100000),
+};
+
+static const struct regulator_linear_range axp809_dldo1_ranges[] = {
+ REGULATOR_LINEAR_RANGE(700000, 0x0, 0x1a, 100000),
+ REGULATOR_LINEAR_RANGE(3400000, 0x1b, 0x1f, 200000),
+};
+
+static const struct regulator_desc axp809_regulators[] = {
+ AXP_DESC(AXP809, DCDC1, "dcdc1", "vin1", 1600, 3400, 100,
+ AXP22X_DCDC1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(1)),
+ AXP_DESC(AXP809, DCDC2, "dcdc2", "vin2", 600, 1540, 20,
+ AXP22X_DCDC2_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(2)),
+ AXP_DESC(AXP809, DCDC3, "dcdc3", "vin3", 600, 1860, 20,
+ AXP22X_DCDC3_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(3)),
+ AXP_DESC_RANGES(AXP809, DCDC4, "dcdc4", "vin4", axp809_dcdc4_ranges,
+ 57, AXP22X_DCDC4_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1,
+ BIT(4)),
+ AXP_DESC(AXP809, DCDC5, "dcdc5", "vin5", 1000, 2550, 50,
+ AXP22X_DCDC5_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(5)),
+ /* secondary switchable output of DCDC1 */
+ AXP_DESC_SW(AXP809, DC1SW, "dc1sw", NULL, AXP22X_PWR_OUT_CTRL2,
+ BIT(7)),
+ /* LDO regulator internally chained to DCDC5 */
+ AXP_DESC(AXP809, DC5LDO, "dc5ldo", NULL, 700, 1400, 100,
+ AXP22X_DC5LDO_V_OUT, 0x7, AXP22X_PWR_OUT_CTRL1, BIT(0)),
+ AXP_DESC(AXP809, ALDO1, "aldo1", "aldoin", 700, 3300, 100,
+ AXP22X_ALDO1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(6)),
+ AXP_DESC(AXP809, ALDO2, "aldo2", "aldoin", 700, 3300, 100,
+ AXP22X_ALDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(7)),
+ AXP_DESC(AXP809, ALDO3, "aldo3", "aldoin", 700, 3300, 100,
+ AXP22X_ALDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(5)),
+ AXP_DESC_RANGES(AXP809, DLDO1, "dldo1", "dldoin", axp809_dldo1_ranges,
+ 32, AXP22X_DLDO1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2,
+ BIT(3)),
+ AXP_DESC(AXP809, DLDO2, "dldo2", "dldoin", 700, 3300, 100,
+ AXP22X_DLDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(4)),
+ AXP_DESC(AXP809, ELDO1, "eldo1", "eldoin", 700, 3300, 100,
+ AXP22X_ELDO1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(0)),
+ AXP_DESC(AXP809, ELDO2, "eldo2", "eldoin", 700, 3300, 100,
+ AXP22X_ELDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(1)),
+ AXP_DESC(AXP809, ELDO3, "eldo3", "eldoin", 700, 3300, 100,
+ AXP22X_ELDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(2)),
+ AXP_DESC_IO(AXP809, LDO_IO0, "ldo_io0", "ips", 700, 3300, 100,
+ AXP22X_LDO_IO0_V_OUT, 0x1f, AXP20X_GPIO0_CTRL, 0x07,
+ AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
+ AXP_DESC_IO(AXP809, LDO_IO1, "ldo_io1", "ips", 700, 3300, 100,
+ AXP22X_LDO_IO1_V_OUT, 0x1f, AXP20X_GPIO1_CTRL, 0x07,
+ AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
+ AXP_DESC_FIXED(AXP809, RTC_LDO, "rtc_ldo", "ips", 1800),
+ AXP_DESC_SW(AXP809, SW, "sw", "swin", AXP22X_PWR_OUT_CTRL2, BIT(6)),
+};
+
static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq)
{
struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
@@ -245,6 +314,7 @@ static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq)
break;
case AXP221_ID:
case AXP223_ID:
+ case AXP809_ID:
min = 1800;
max = 4050;
def = 3000;
@@ -324,6 +394,7 @@ static int axp20x_set_dcdc_workmode(struct regulator_dev *rdev, int id, u32 work
case AXP221_ID:
case AXP223_ID:
+ case AXP809_ID:
if (id < AXP22X_DCDC1 || id > AXP22X_DCDC5)
return -EINVAL;
@@ -352,8 +423,9 @@ static int axp20x_regulator_probe(struct platform_device *pdev)
};
int ret, i, nregulators;
u32 workmode;
- const char *axp22x_dc1_name = axp22x_regulators[AXP22X_DCDC1].name;
- const char *axp22x_dc5_name = axp22x_regulators[AXP22X_DCDC5].name;
+ const char *dcdc1_name = axp22x_regulators[AXP22X_DCDC1].name;
+ const char *dcdc5_name = axp22x_regulators[AXP22X_DCDC5].name;
+ bool drivevbus = false;
switch (axp20x->variant) {
case AXP202_ID:
@@ -365,6 +437,12 @@ static int axp20x_regulator_probe(struct platform_device *pdev)
case AXP223_ID:
regulators = axp22x_regulators;
nregulators = AXP22X_REG_ID_MAX;
+ drivevbus = of_property_read_bool(pdev->dev.parent->of_node,
+ "x-powers,drive-vbus-en");
+ break;
+ case AXP809_ID:
+ regulators = axp809_regulators;
+ nregulators = AXP809_REG_ID_MAX;
break;
default:
dev_err(&pdev->dev, "Unsupported AXP variant: %ld\n",
@@ -388,22 +466,22 @@ static int axp20x_regulator_probe(struct platform_device *pdev)
* part of this loop to see where we save the DT defined
* name.
*/
- if (regulators == axp22x_regulators) {
- if (i == AXP22X_DC1SW) {
- new_desc = devm_kzalloc(&pdev->dev,
- sizeof(*desc),
- GFP_KERNEL);
- *new_desc = regulators[i];
- new_desc->supply_name = axp22x_dc1_name;
- desc = new_desc;
- } else if (i == AXP22X_DC5LDO) {
- new_desc = devm_kzalloc(&pdev->dev,
- sizeof(*desc),
- GFP_KERNEL);
- *new_desc = regulators[i];
- new_desc->supply_name = axp22x_dc5_name;
- desc = new_desc;
- }
+ if ((regulators == axp22x_regulators && i == AXP22X_DC1SW) ||
+ (regulators == axp809_regulators && i == AXP809_DC1SW)) {
+ new_desc = devm_kzalloc(&pdev->dev, sizeof(*desc),
+ GFP_KERNEL);
+ *new_desc = regulators[i];
+ new_desc->supply_name = dcdc1_name;
+ desc = new_desc;
+ }
+
+ if ((regulators == axp22x_regulators && i == AXP22X_DC5LDO) ||
+ (regulators == axp809_regulators && i == AXP809_DC5LDO)) {
+ new_desc = devm_kzalloc(&pdev->dev, sizeof(*desc),
+ GFP_KERNEL);
+ *new_desc = regulators[i];
+ new_desc->supply_name = dcdc5_name;
+ desc = new_desc;
}
rdev = devm_regulator_register(&pdev->dev, desc, &config);
@@ -426,16 +504,29 @@ static int axp20x_regulator_probe(struct platform_device *pdev)
/*
* Save AXP22X DCDC1 / DCDC5 regulator names for later.
*/
- if (regulators == axp22x_regulators) {
- /* Can we use rdev->constraints->name instead? */
- if (i == AXP22X_DCDC1)
- of_property_read_string(rdev->dev.of_node,
- "regulator-name",
- &axp22x_dc1_name);
- else if (i == AXP22X_DCDC5)
- of_property_read_string(rdev->dev.of_node,
- "regulator-name",
- &axp22x_dc5_name);
+ if ((regulators == axp22x_regulators && i == AXP22X_DCDC1) ||
+ (regulators == axp809_regulators && i == AXP809_DCDC1))
+ of_property_read_string(rdev->dev.of_node,
+ "regulator-name",
+ &dcdc1_name);
+
+ if ((regulators == axp22x_regulators && i == AXP22X_DCDC5) ||
+ (regulators == axp809_regulators && i == AXP809_DCDC5))
+ of_property_read_string(rdev->dev.of_node,
+ "regulator-name",
+ &dcdc5_name);
+ }
+
+ if (drivevbus) {
+ /* Change N_VBUSEN sense pin to DRIVEVBUS output pin */
+ regmap_update_bits(axp20x->regmap, AXP20X_OVER_TMP,
+ AXP22X_MISC_N_VBUSEN_FUNC, 0);
+ rdev = devm_regulator_register(&pdev->dev,
+ &axp22x_drivevbus_regulator,
+ &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "Failed to register drivevbus\n");
+ return PTR_ERR(rdev);
}
}
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index ec8184d53..db320e8fa 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2509,33 +2509,6 @@ int regulator_is_enabled(struct regulator *regulator)
EXPORT_SYMBOL_GPL(regulator_is_enabled);
/**
- * regulator_can_change_voltage - check if regulator can change voltage
- * @regulator: regulator source
- *
- * Returns positive if the regulator driver backing the source/client
- * can change its voltage, false otherwise. Useful for detecting fixed
- * or dummy regulators and disabling voltage change logic in the client
- * driver.
- */
-int regulator_can_change_voltage(struct regulator *regulator)
-{
- struct regulator_dev *rdev = regulator->rdev;
-
- if (regulator_ops_is_valid(rdev, REGULATOR_CHANGE_VOLTAGE)) {
- if (rdev->desc->n_voltages - rdev->desc->linear_min_sel > 1)
- return 1;
-
- if (rdev->desc->continuous_voltage_range &&
- rdev->constraints->min_uV && rdev->constraints->max_uV &&
- rdev->constraints->min_uV != rdev->constraints->max_uV)
- return 1;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(regulator_can_change_voltage);
-
-/**
* regulator_count_voltages - count regulator_list_voltage() selectors
* @regulator: regulator source
*
diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c
index 1050cb775..9ececfef4 100644
--- a/drivers/regulator/da9052-regulator.c
+++ b/drivers/regulator/da9052-regulator.c
@@ -333,7 +333,7 @@ static const struct regulator_ops da9052_ldo_ops = {
static struct da9052_regulator_info da9052_regulator_info[] = {
DA9052_DCDC(BUCK1, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBCOREGO),
DA9052_DCDC(BUCK2, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBPROGO),
- DA9052_DCDC(BUCK3, 25, 925, 2500, 6, 6, DA9052_SUPPLY_VBMEMGO),
+ DA9052_DCDC(BUCK3, 25, 950, 2525, 6, 6, DA9052_SUPPLY_VBMEMGO),
DA9052_DCDC(BUCK4, 50, 1800, 3600, 5, 6, 0),
DA9052_LDO(LDO1, 50, 600, 1800, 5, 6, 0),
DA9052_LDO(LDO2, 25, 600, 1800, 6, 6, DA9052_SUPPLY_VLDO2GO),
@@ -350,8 +350,8 @@ static struct da9052_regulator_info da9052_regulator_info[] = {
static struct da9052_regulator_info da9053_regulator_info[] = {
DA9052_DCDC(BUCK1, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBCOREGO),
DA9052_DCDC(BUCK2, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBPROGO),
- DA9052_DCDC(BUCK3, 25, 925, 2500, 6, 6, DA9052_SUPPLY_VBMEMGO),
- DA9052_DCDC(BUCK4, 25, 925, 2500, 6, 6, 0),
+ DA9052_DCDC(BUCK3, 25, 950, 2525, 6, 6, DA9052_SUPPLY_VBMEMGO),
+ DA9052_DCDC(BUCK4, 25, 950, 2525, 6, 6, 0),
DA9052_LDO(LDO1, 50, 600, 1800, 5, 6, 0),
DA9052_LDO(LDO2, 25, 600, 1800, 6, 6, DA9052_SUPPLY_VLDO2GO),
DA9052_LDO(LDO3, 25, 1725, 3300, 6, 6, DA9052_SUPPLY_VLDO3GO),
diff --git a/drivers/regulator/da9210-regulator.c b/drivers/regulator/da9210-regulator.c
index 01c0e3709..d0496d6b0 100644
--- a/drivers/regulator/da9210-regulator.c
+++ b/drivers/regulator/da9210-regulator.c
@@ -21,12 +21,11 @@
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/slab.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
+#include <linux/of_device.h>
#include <linux/regulator/of_regulator.h>
#include <linux/regmap.h>
@@ -179,6 +178,13 @@ error_i2c:
/*
* I2C driver interface functions
*/
+
+static const struct of_device_id da9210_dt_ids[] = {
+ { .compatible = "dlg,da9210", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, da9210_dt_ids);
+
static int da9210_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
@@ -188,6 +194,16 @@ static int da9210_i2c_probe(struct i2c_client *i2c,
struct regulator_dev *rdev = NULL;
struct regulator_config config = { };
int error;
+ const struct of_device_id *match;
+
+ if (i2c->dev.of_node && !pdata) {
+ match = of_match_device(of_match_ptr(da9210_dt_ids),
+ &i2c->dev);
+ if (!match) {
+ dev_err(&i2c->dev, "Error: No device match found\n");
+ return -ENODEV;
+ }
+ }
chip = devm_kzalloc(&i2c->dev, sizeof(struct da9210), GFP_KERNEL);
if (!chip)
@@ -264,6 +280,7 @@ MODULE_DEVICE_TABLE(i2c, da9210_i2c_id);
static struct i2c_driver da9210_regulator_driver = {
.driver = {
.name = "da9210",
+ .of_match_table = of_match_ptr(da9210_dt_ids),
},
.probe = da9210_i2c_probe,
.id_table = da9210_i2c_id,
diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c
index 236abf473..aa47280ef 100644
--- a/drivers/regulator/da9211-regulator.c
+++ b/drivers/regulator/da9211-regulator.c
@@ -1,5 +1,6 @@
/*
- * da9211-regulator.c - Regulator device driver for DA9211/DA9213/DA9215
+ * da9211-regulator.c - Regulator device driver for DA9211/DA9212
+ * /DA9213/DA9214/DA9215
* Copyright (C) 2015 Dialog Semiconductor Ltd.
*
* This library is free software; you can redistribute it and/or
@@ -493,7 +494,9 @@ static int da9211_i2c_probe(struct i2c_client *i2c,
static const struct i2c_device_id da9211_i2c_id[] = {
{"da9211", DA9211},
+ {"da9212", DA9212},
{"da9213", DA9213},
+ {"da9214", DA9214},
{"da9215", DA9215},
{},
};
@@ -502,8 +505,10 @@ MODULE_DEVICE_TABLE(i2c, da9211_i2c_id);
#ifdef CONFIG_OF
static const struct of_device_id da9211_dt_ids[] = {
{ .compatible = "dlg,da9211", .data = &da9211_i2c_id[0] },
- { .compatible = "dlg,da9213", .data = &da9211_i2c_id[1] },
- { .compatible = "dlg,da9215", .data = &da9211_i2c_id[2] },
+ { .compatible = "dlg,da9212", .data = &da9211_i2c_id[1] },
+ { .compatible = "dlg,da9213", .data = &da9211_i2c_id[2] },
+ { .compatible = "dlg,da9214", .data = &da9211_i2c_id[3] },
+ { .compatible = "dlg,da9215", .data = &da9211_i2c_id[4] },
{},
};
MODULE_DEVICE_TABLE(of, da9211_dt_ids);
@@ -521,5 +526,5 @@ static struct i2c_driver da9211_regulator_driver = {
module_i2c_driver(da9211_regulator_driver);
MODULE_AUTHOR("James Ban <James.Ban.opensource@diasemi.com>");
-MODULE_DESCRIPTION("Regulator device driver for Dialog DA9211/DA9213/DA9215");
+MODULE_DESCRIPTION("DA9211/DA9212/DA9213/DA9214/DA9215 regulator driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/da9211-regulator.h b/drivers/regulator/da9211-regulator.h
index d6ad96fc6..b841bbf33 100644
--- a/drivers/regulator/da9211-regulator.h
+++ b/drivers/regulator/da9211-regulator.h
@@ -1,5 +1,6 @@
/*
- * da9211-regulator.h - Regulator definitions for DA9211/DA9213/DA9215
+ * da9211-regulator.h - Regulator definitions for DA9211/DA9212
+ * /DA9213/DA9214/DA9215
* Copyright (C) 2015 Dialog Semiconductor Ltd.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index ff62d69ba..988a7472c 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -79,18 +79,8 @@ of_get_fixed_voltage_config(struct device *dev,
config->enabled_at_boot = true;
config->gpio = of_get_named_gpio(np, "gpio", 0);
- /*
- * of_get_named_gpio() currently returns ENODEV rather than
- * EPROBE_DEFER. This code attempts to be compatible with both
- * for now; the ENODEV check can be removed once the API is fixed.
- * of_get_named_gpio() doesn't differentiate between a missing
- * property (which would be fine here, since the GPIO is optional)
- * and some other error. Patches have been posted for both issues.
- * Once they are check in, we should replace this with:
- * if (config->gpio < 0 && config->gpio != -ENOENT)
- */
- if ((config->gpio == -ENODEV) || (config->gpio == -EPROBE_DEFER))
- return ERR_PTR(-EPROBE_DEFER);
+ if ((config->gpio < 0) && (config->gpio != -ENOENT))
+ return ERR_PTR(config->gpio);
of_property_read_u32(np, "startup-delay-us", &config->startup_delay);
diff --git a/drivers/regulator/lp873x-regulator.c b/drivers/regulator/lp873x-regulator.c
index b4ffd113b..e504b9148 100644
--- a/drivers/regulator/lp873x-regulator.c
+++ b/drivers/regulator/lp873x-regulator.c
@@ -20,7 +20,7 @@
#include <linux/mfd/lp873x.h>
#define LP873X_REGULATOR(_name, _id, _of, _ops, _n, _vr, _vm, _er, _em, \
- _delay, _lr, _nlr, _cr) \
+ _delay, _lr, _cr) \
[_id] = { \
.desc = { \
.name = _name, \
@@ -37,7 +37,7 @@
.enable_mask = _em, \
.ramp_delay = _delay, \
.linear_ranges = _lr, \
- .n_linear_ranges = _nlr, \
+ .n_linear_ranges = ARRAY_SIZE(_lr), \
}, \
.ctrl2_reg = _cr, \
}
@@ -175,22 +175,20 @@ static const struct lp873x_regulator regulators[] = {
256, LP873X_REG_BUCK0_VOUT,
LP873X_BUCK0_VOUT_BUCK0_VSET, LP873X_REG_BUCK0_CTRL_1,
LP873X_BUCK0_CTRL_1_BUCK0_EN, 10000,
- buck0_buck1_ranges, 4, LP873X_REG_BUCK0_CTRL_2),
+ buck0_buck1_ranges, LP873X_REG_BUCK0_CTRL_2),
LP873X_REGULATOR("BUCK1", LP873X_BUCK_1, "buck1", lp873x_buck01_ops,
256, LP873X_REG_BUCK1_VOUT,
LP873X_BUCK1_VOUT_BUCK1_VSET, LP873X_REG_BUCK1_CTRL_1,
LP873X_BUCK1_CTRL_1_BUCK1_EN, 10000,
- buck0_buck1_ranges, 4, LP873X_REG_BUCK1_CTRL_2),
+ buck0_buck1_ranges, LP873X_REG_BUCK1_CTRL_2),
LP873X_REGULATOR("LDO0", LP873X_LDO_0, "ldo0", lp873x_ldo01_ops, 26,
LP873X_REG_LDO0_VOUT, LP873X_LDO0_VOUT_LDO0_VSET,
LP873X_REG_LDO0_CTRL,
- LP873X_LDO0_CTRL_LDO0_EN, 0, ldo0_ldo1_ranges, 1,
- 0xFF),
+ LP873X_LDO0_CTRL_LDO0_EN, 0, ldo0_ldo1_ranges, 0xFF),
LP873X_REGULATOR("LDO1", LP873X_LDO_1, "ldo1", lp873x_ldo01_ops, 26,
LP873X_REG_LDO1_VOUT, LP873X_LDO1_VOUT_LDO1_VSET,
LP873X_REG_LDO1_CTRL,
- LP873X_LDO1_CTRL_LDO1_EN, 0, ldo0_ldo1_ranges, 1,
- 0xFF),
+ LP873X_LDO1_CTRL_LDO1_EN, 0, ldo0_ldo1_ranges, 0xFF),
};
static int lp873x_regulator_probe(struct platform_device *pdev)
diff --git a/drivers/regulator/max14577-regulator.c b/drivers/regulator/max14577-regulator.c
index b2daa6641..c9ff26199 100644
--- a/drivers/regulator/max14577-regulator.c
+++ b/drivers/regulator/max14577-regulator.c
@@ -2,7 +2,7 @@
* max14577.c - Regulator driver for the Maxim 14577/77836
*
* Copyright (C) 2013,2014 Samsung Electronics
- * Krzysztof Kozlowski <k.kozlowski@samsung.com>
+ * Krzysztof Kozlowski <krzk@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -331,7 +331,7 @@ static void __exit max14577_regulator_exit(void)
}
module_exit(max14577_regulator_exit);
-MODULE_AUTHOR("Krzysztof Kozlowski <k.kozlowski@samsung.com>");
+MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
MODULE_DESCRIPTION("Maxim 14577/77836 regulator driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:max14577-regulator");
diff --git a/drivers/regulator/max77693-regulator.c b/drivers/regulator/max77693-regulator.c
index de730fd3f..cfbb9512e 100644
--- a/drivers/regulator/max77693-regulator.c
+++ b/drivers/regulator/max77693-regulator.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2013-2015 Samsung Electronics
* Jonghwa Lee <jonghwa3.lee@samsung.com>
- * Krzysztof Kozlowski <k.kozlowski.k@gmail.com>
+ * Krzysztof Kozlowski <krzk@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -314,5 +314,5 @@ module_exit(max77693_pmic_cleanup);
MODULE_DESCRIPTION("MAXIM 77693/77843 regulator driver");
MODULE_AUTHOR("Jonghwa Lee <jonghwa3.lee@samsung.com>");
-MODULE_AUTHOR("Krzysztof Kozlowski <k.kozlowski.k@gmail.com>");
+MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
index 08d2f13ec..3958f50c5 100644
--- a/drivers/regulator/max8973-regulator.c
+++ b/drivers/regulator/max8973-regulator.c
@@ -271,22 +271,18 @@ static int max8973_set_ramp_delay(struct regulator_dev *rdev,
struct max8973_chip *max = rdev_get_drvdata(rdev);
unsigned int control;
int ret;
- int ret_val;
/* Set ramp delay */
- if (ramp_delay < 25000) {
+ if (ramp_delay <= 12000)
control = MAX8973_RAMP_12mV_PER_US;
- ret_val = 12000;
- } else if (ramp_delay < 50000) {
+ else if (ramp_delay <= 25000)
control = MAX8973_RAMP_25mV_PER_US;
- ret_val = 25000;
- } else if (ramp_delay < 200000) {
+ else if (ramp_delay <= 50000)
control = MAX8973_RAMP_50mV_PER_US;
- ret_val = 50000;
- } else {
+ else if (ramp_delay <= 200000)
control = MAX8973_RAMP_200mV_PER_US;
- ret_val = 200000;
- }
+ else
+ return -EINVAL;
ret = regmap_update_bits(max->regmap, MAX8973_CONTROL1,
MAX8973_RAMP_MASK, control);
diff --git a/drivers/regulator/mt6323-regulator.c b/drivers/regulator/mt6323-regulator.c
new file mode 100644
index 000000000..b7b9670f0
--- /dev/null
+++ b/drivers/regulator/mt6323-regulator.c
@@ -0,0 +1,425 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Chen Zhong <chen.zhong@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/mt6397/core.h>
+#include <linux/mfd/mt6323/registers.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/mt6323-regulator.h>
+#include <linux/regulator/of_regulator.h>
+
+#define MT6323_LDO_MODE_NORMAL 0
+#define MT6323_LDO_MODE_LP 1
+
+/*
+ * MT6323 regulators' information
+ *
+ * @desc: standard fields of regulator description.
+ * @qi: Mask for query enable signal status of regulators
+ * @vselon_reg: Register sections for hardware control mode of bucks
+ * @vselctrl_reg: Register for controlling the buck control mode.
+ * @vselctrl_mask: Mask for query buck's voltage control mode.
+ */
+struct mt6323_regulator_info {
+ struct regulator_desc desc;
+ u32 qi;
+ u32 vselon_reg;
+ u32 vselctrl_reg;
+ u32 vselctrl_mask;
+ u32 modeset_reg;
+ u32 modeset_mask;
+};
+
+#define MT6323_BUCK(match, vreg, min, max, step, volt_ranges, enreg, \
+ vosel, vosel_mask, voselon, vosel_ctrl) \
+[MT6323_ID_##vreg] = { \
+ .desc = { \
+ .name = #vreg, \
+ .of_match = of_match_ptr(match), \
+ .ops = &mt6323_volt_range_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MT6323_ID_##vreg, \
+ .owner = THIS_MODULE, \
+ .n_voltages = (max - min)/step + 1, \
+ .linear_ranges = volt_ranges, \
+ .n_linear_ranges = ARRAY_SIZE(volt_ranges), \
+ .vsel_reg = vosel, \
+ .vsel_mask = vosel_mask, \
+ .enable_reg = enreg, \
+ .enable_mask = BIT(0), \
+ }, \
+ .qi = BIT(13), \
+ .vselon_reg = voselon, \
+ .vselctrl_reg = vosel_ctrl, \
+ .vselctrl_mask = BIT(1), \
+}
+
+#define MT6323_LDO(match, vreg, ldo_volt_table, enreg, enbit, vosel, \
+ vosel_mask, _modeset_reg, _modeset_mask) \
+[MT6323_ID_##vreg] = { \
+ .desc = { \
+ .name = #vreg, \
+ .of_match = of_match_ptr(match), \
+ .ops = &mt6323_volt_table_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MT6323_ID_##vreg, \
+ .owner = THIS_MODULE, \
+ .n_voltages = ARRAY_SIZE(ldo_volt_table), \
+ .volt_table = ldo_volt_table, \
+ .vsel_reg = vosel, \
+ .vsel_mask = vosel_mask, \
+ .enable_reg = enreg, \
+ .enable_mask = BIT(enbit), \
+ }, \
+ .qi = BIT(15), \
+ .modeset_reg = _modeset_reg, \
+ .modeset_mask = _modeset_mask, \
+}
+
+#define MT6323_REG_FIXED(match, vreg, enreg, enbit, volt, \
+ _modeset_reg, _modeset_mask) \
+[MT6323_ID_##vreg] = { \
+ .desc = { \
+ .name = #vreg, \
+ .of_match = of_match_ptr(match), \
+ .ops = &mt6323_volt_fixed_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MT6323_ID_##vreg, \
+ .owner = THIS_MODULE, \
+ .n_voltages = 1, \
+ .enable_reg = enreg, \
+ .enable_mask = BIT(enbit), \
+ .min_uV = volt, \
+ }, \
+ .qi = BIT(15), \
+ .modeset_reg = _modeset_reg, \
+ .modeset_mask = _modeset_mask, \
+}
+
+static const struct regulator_linear_range buck_volt_range1[] = {
+ REGULATOR_LINEAR_RANGE(700000, 0, 0x7f, 6250),
+};
+
+static const struct regulator_linear_range buck_volt_range2[] = {
+ REGULATOR_LINEAR_RANGE(1400000, 0, 0x7f, 12500),
+};
+
+static const struct regulator_linear_range buck_volt_range3[] = {
+ REGULATOR_LINEAR_RANGE(500000, 0, 0x3f, 50000),
+};
+
+static const u32 ldo_volt_table1[] = {
+ 3300000, 3400000, 3500000, 3600000,
+};
+
+static const u32 ldo_volt_table2[] = {
+ 1500000, 1800000, 2500000, 2800000,
+};
+
+static const u32 ldo_volt_table3[] = {
+ 1800000, 3300000,
+};
+
+static const u32 ldo_volt_table4[] = {
+ 3000000, 3300000,
+};
+
+static const u32 ldo_volt_table5[] = {
+ 1200000, 1300000, 1500000, 1800000, 2000000, 2800000, 3000000, 3300000,
+};
+
+static const u32 ldo_volt_table6[] = {
+ 1200000, 1300000, 1500000, 1800000, 2500000, 2800000, 3000000, 2000000,
+};
+
+static const u32 ldo_volt_table7[] = {
+ 1200000, 1300000, 1500000, 1800000,
+};
+
+static const u32 ldo_volt_table8[] = {
+ 1800000, 3000000,
+};
+
+static const u32 ldo_volt_table9[] = {
+ 1200000, 1350000, 1500000, 1800000,
+};
+
+static const u32 ldo_volt_table10[] = {
+ 1200000, 1300000, 1500000, 1800000,
+};
+
+static int mt6323_get_status(struct regulator_dev *rdev)
+{
+ int ret;
+ u32 regval;
+ struct mt6323_regulator_info *info = rdev_get_drvdata(rdev);
+
+ ret = regmap_read(rdev->regmap, info->desc.enable_reg, &regval);
+ if (ret != 0) {
+ dev_err(&rdev->dev, "Failed to get enable reg: %d\n", ret);
+ return ret;
+ }
+
+ return (regval & info->qi) ? REGULATOR_STATUS_ON : REGULATOR_STATUS_OFF;
+}
+
+static int mt6323_ldo_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ int ret, val = 0;
+ struct mt6323_regulator_info *info = rdev_get_drvdata(rdev);
+
+ if (!info->modeset_mask) {
+ dev_err(&rdev->dev, "regulator %s doesn't support set_mode\n",
+ info->desc.name);
+ return -EINVAL;
+ }
+
+ switch (mode) {
+ case REGULATOR_MODE_STANDBY:
+ val = MT6323_LDO_MODE_LP;
+ break;
+ case REGULATOR_MODE_NORMAL:
+ val = MT6323_LDO_MODE_NORMAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ val <<= ffs(info->modeset_mask) - 1;
+
+ ret = regmap_update_bits(rdev->regmap, info->modeset_reg,
+ info->modeset_mask, val);
+
+ return ret;
+}
+
+static unsigned int mt6323_ldo_get_mode(struct regulator_dev *rdev)
+{
+ unsigned int val;
+ unsigned int mode;
+ int ret;
+ struct mt6323_regulator_info *info = rdev_get_drvdata(rdev);
+
+ if (!info->modeset_mask) {
+ dev_err(&rdev->dev, "regulator %s doesn't support get_mode\n",
+ info->desc.name);
+ return -EINVAL;
+ }
+
+ ret = regmap_read(rdev->regmap, info->modeset_reg, &val);
+ if (ret < 0)
+ return ret;
+
+ val &= info->modeset_mask;
+ val >>= ffs(info->modeset_mask) - 1;
+
+ if (val & 0x1)
+ mode = REGULATOR_MODE_STANDBY;
+ else
+ mode = REGULATOR_MODE_NORMAL;
+
+ return mode;
+}
+
+static const struct regulator_ops mt6323_volt_range_ops = {
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_status = mt6323_get_status,
+};
+
+static const struct regulator_ops mt6323_volt_table_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_iterate,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_status = mt6323_get_status,
+ .set_mode = mt6323_ldo_set_mode,
+ .get_mode = mt6323_ldo_get_mode,
+};
+
+static const struct regulator_ops mt6323_volt_fixed_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_status = mt6323_get_status,
+ .set_mode = mt6323_ldo_set_mode,
+ .get_mode = mt6323_ldo_get_mode,
+};
+
+/* The array is indexed by id(MT6323_ID_XXX) */
+static struct mt6323_regulator_info mt6323_regulators[] = {
+ MT6323_BUCK("buck_vproc", VPROC, 700000, 1493750, 6250,
+ buck_volt_range1, MT6323_VPROC_CON7, MT6323_VPROC_CON9, 0x7f,
+ MT6323_VPROC_CON10, MT6323_VPROC_CON5),
+ MT6323_BUCK("buck_vsys", VSYS, 1400000, 2987500, 12500,
+ buck_volt_range2, MT6323_VSYS_CON7, MT6323_VSYS_CON9, 0x7f,
+ MT6323_VSYS_CON10, MT6323_VSYS_CON5),
+ MT6323_BUCK("buck_vpa", VPA, 500000, 3650000, 50000,
+ buck_volt_range3, MT6323_VPA_CON7, MT6323_VPA_CON9,
+ 0x3f, MT6323_VPA_CON10, MT6323_VPA_CON5),
+ MT6323_REG_FIXED("ldo_vtcxo", VTCXO, MT6323_ANALDO_CON1, 10, 2800000,
+ MT6323_ANALDO_CON1, 0x2),
+ MT6323_REG_FIXED("ldo_vcn28", VCN28, MT6323_ANALDO_CON19, 12, 2800000,
+ MT6323_ANALDO_CON20, 0x2),
+ MT6323_LDO("ldo_vcn33_bt", VCN33_BT, ldo_volt_table1,
+ MT6323_ANALDO_CON16, 7, MT6323_ANALDO_CON16, 0xC,
+ MT6323_ANALDO_CON21, 0x2),
+ MT6323_LDO("ldo_vcn33_wifi", VCN33_WIFI, ldo_volt_table1,
+ MT6323_ANALDO_CON17, 12, MT6323_ANALDO_CON16, 0xC,
+ MT6323_ANALDO_CON21, 0x2),
+ MT6323_REG_FIXED("ldo_va", VA, MT6323_ANALDO_CON2, 14, 2800000,
+ MT6323_ANALDO_CON2, 0x2),
+ MT6323_LDO("ldo_vcama", VCAMA, ldo_volt_table2,
+ MT6323_ANALDO_CON4, 15, MT6323_ANALDO_CON10, 0x60, -1, 0),
+ MT6323_REG_FIXED("ldo_vio28", VIO28, MT6323_DIGLDO_CON0, 14, 2800000,
+ MT6323_DIGLDO_CON0, 0x2),
+ MT6323_REG_FIXED("ldo_vusb", VUSB, MT6323_DIGLDO_CON2, 14, 3300000,
+ MT6323_DIGLDO_CON2, 0x2),
+ MT6323_LDO("ldo_vmc", VMC, ldo_volt_table3,
+ MT6323_DIGLDO_CON3, 12, MT6323_DIGLDO_CON24, 0x10,
+ MT6323_DIGLDO_CON3, 0x2),
+ MT6323_LDO("ldo_vmch", VMCH, ldo_volt_table4,
+ MT6323_DIGLDO_CON5, 14, MT6323_DIGLDO_CON26, 0x80,
+ MT6323_DIGLDO_CON5, 0x2),
+ MT6323_LDO("ldo_vemc3v3", VEMC3V3, ldo_volt_table4,
+ MT6323_DIGLDO_CON6, 14, MT6323_DIGLDO_CON27, 0x80,
+ MT6323_DIGLDO_CON6, 0x2),
+ MT6323_LDO("ldo_vgp1", VGP1, ldo_volt_table5,
+ MT6323_DIGLDO_CON7, 15, MT6323_DIGLDO_CON28, 0xE0,
+ MT6323_DIGLDO_CON7, 0x2),
+ MT6323_LDO("ldo_vgp2", VGP2, ldo_volt_table6,
+ MT6323_DIGLDO_CON8, 15, MT6323_DIGLDO_CON29, 0xE0,
+ MT6323_DIGLDO_CON8, 0x2),
+ MT6323_LDO("ldo_vgp3", VGP3, ldo_volt_table7,
+ MT6323_DIGLDO_CON9, 15, MT6323_DIGLDO_CON30, 0x60,
+ MT6323_DIGLDO_CON9, 0x2),
+ MT6323_REG_FIXED("ldo_vcn18", VCN18, MT6323_DIGLDO_CON11, 14, 1800000,
+ MT6323_DIGLDO_CON11, 0x2),
+ MT6323_LDO("ldo_vsim1", VSIM1, ldo_volt_table8,
+ MT6323_DIGLDO_CON13, 15, MT6323_DIGLDO_CON34, 0x20,
+ MT6323_DIGLDO_CON13, 0x2),
+ MT6323_LDO("ldo_vsim2", VSIM2, ldo_volt_table8,
+ MT6323_DIGLDO_CON14, 15, MT6323_DIGLDO_CON35, 0x20,
+ MT6323_DIGLDO_CON14, 0x2),
+ MT6323_REG_FIXED("ldo_vrtc", VRTC, MT6323_DIGLDO_CON15, 8, 2800000,
+ -1, 0),
+ MT6323_LDO("ldo_vcamaf", VCAMAF, ldo_volt_table5,
+ MT6323_DIGLDO_CON31, 15, MT6323_DIGLDO_CON32, 0xE0,
+ MT6323_DIGLDO_CON31, 0x2),
+ MT6323_LDO("ldo_vibr", VIBR, ldo_volt_table5,
+ MT6323_DIGLDO_CON39, 15, MT6323_DIGLDO_CON40, 0xE0,
+ MT6323_DIGLDO_CON39, 0x2),
+ MT6323_REG_FIXED("ldo_vrf18", VRF18, MT6323_DIGLDO_CON45, 15, 1825000,
+ MT6323_DIGLDO_CON45, 0x2),
+ MT6323_LDO("ldo_vm", VM, ldo_volt_table9,
+ MT6323_DIGLDO_CON47, 14, MT6323_DIGLDO_CON48, 0x30,
+ MT6323_DIGLDO_CON47, 0x2),
+ MT6323_REG_FIXED("ldo_vio18", VIO18, MT6323_DIGLDO_CON49, 14, 1800000,
+ MT6323_DIGLDO_CON49, 0x2),
+ MT6323_LDO("ldo_vcamd", VCAMD, ldo_volt_table10,
+ MT6323_DIGLDO_CON51, 14, MT6323_DIGLDO_CON52, 0x60,
+ MT6323_DIGLDO_CON51, 0x2),
+ MT6323_REG_FIXED("ldo_vcamio", VCAMIO, MT6323_DIGLDO_CON53, 14, 1800000,
+ MT6323_DIGLDO_CON53, 0x2),
+};
+
+static int mt6323_set_buck_vosel_reg(struct platform_device *pdev)
+{
+ struct mt6397_chip *mt6323 = dev_get_drvdata(pdev->dev.parent);
+ int i;
+ u32 regval;
+
+ for (i = 0; i < MT6323_MAX_REGULATOR; i++) {
+ if (mt6323_regulators[i].vselctrl_reg) {
+ if (regmap_read(mt6323->regmap,
+ mt6323_regulators[i].vselctrl_reg,
+ &regval) < 0) {
+ dev_err(&pdev->dev,
+ "Failed to read buck ctrl\n");
+ return -EIO;
+ }
+
+ if (regval & mt6323_regulators[i].vselctrl_mask) {
+ mt6323_regulators[i].desc.vsel_reg =
+ mt6323_regulators[i].vselon_reg;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int mt6323_regulator_probe(struct platform_device *pdev)
+{
+ struct mt6397_chip *mt6323 = dev_get_drvdata(pdev->dev.parent);
+ struct regulator_config config = {};
+ struct regulator_dev *rdev;
+ int i;
+ u32 reg_value;
+
+ /* Query buck controller to select activated voltage register part */
+ if (mt6323_set_buck_vosel_reg(pdev))
+ return -EIO;
+
+ /* Read PMIC chip revision to update constraints and voltage table */
+ if (regmap_read(mt6323->regmap, MT6323_CID, &reg_value) < 0) {
+ dev_err(&pdev->dev, "Failed to read Chip ID\n");
+ return -EIO;
+ }
+ dev_info(&pdev->dev, "Chip ID = 0x%x\n", reg_value);
+
+ for (i = 0; i < MT6323_MAX_REGULATOR; i++) {
+ config.dev = &pdev->dev;
+ config.driver_data = &mt6323_regulators[i];
+ config.regmap = mt6323->regmap;
+ rdev = devm_regulator_register(&pdev->dev,
+ &mt6323_regulators[i].desc, &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "failed to register %s\n",
+ mt6323_regulators[i].desc.name);
+ return PTR_ERR(rdev);
+ }
+ }
+ return 0;
+}
+
+static const struct platform_device_id mt6323_platform_ids[] = {
+ {"mt6323-regulator", 0},
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(platform, mt6323_platform_ids);
+
+static struct platform_driver mt6323_regulator_driver = {
+ .driver = {
+ .name = "mt6323-regulator",
+ },
+ .probe = mt6323_regulator_probe,
+ .id_table = mt6323_platform_ids,
+};
+
+module_platform_driver(mt6323_regulator_driver);
+
+MODULE_AUTHOR("Chen Zhong <chen.zhong@mediatek.com>");
+MODULE_DESCRIPTION("Regulator Driver for MediaTek MT6323 PMIC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/mt6397-regulator.c b/drivers/regulator/mt6397-regulator.c
index 17a5b6c2d..c6c6aa85e 100644
--- a/drivers/regulator/mt6397-regulator.c
+++ b/drivers/regulator/mt6397-regulator.c
@@ -23,6 +23,9 @@
#include <linux/regulator/mt6397-regulator.h>
#include <linux/regulator/of_regulator.h>
+#define MT6397_BUCK_MODE_AUTO 0
+#define MT6397_BUCK_MODE_FORCE_PWM 1
+
/*
* MT6397 regulators' information
*
@@ -38,10 +41,14 @@ struct mt6397_regulator_info {
u32 vselon_reg;
u32 vselctrl_reg;
u32 vselctrl_mask;
+ u32 modeset_reg;
+ u32 modeset_mask;
+ u32 modeset_shift;
};
#define MT6397_BUCK(match, vreg, min, max, step, volt_ranges, enreg, \
- vosel, vosel_mask, voselon, vosel_ctrl) \
+ vosel, vosel_mask, voselon, vosel_ctrl, _modeset_reg, \
+ _modeset_shift) \
[MT6397_ID_##vreg] = { \
.desc = { \
.name = #vreg, \
@@ -62,6 +69,9 @@ struct mt6397_regulator_info {
.vselon_reg = voselon, \
.vselctrl_reg = vosel_ctrl, \
.vselctrl_mask = BIT(1), \
+ .modeset_reg = _modeset_reg, \
+ .modeset_mask = BIT(_modeset_shift), \
+ .modeset_shift = _modeset_shift \
}
#define MT6397_LDO(match, vreg, ldo_volt_table, enreg, enbit, vosel, \
@@ -145,6 +155,63 @@ static const u32 ldo_volt_table7[] = {
1300000, 1500000, 1800000, 2000000, 2500000, 2800000, 3000000, 3300000,
};
+static int mt6397_regulator_set_mode(struct regulator_dev *rdev,
+ unsigned int mode)
+{
+ struct mt6397_regulator_info *info = rdev_get_drvdata(rdev);
+ int ret, val;
+
+ switch (mode) {
+ case REGULATOR_MODE_FAST:
+ val = MT6397_BUCK_MODE_FORCE_PWM;
+ break;
+ case REGULATOR_MODE_NORMAL:
+ val = MT6397_BUCK_MODE_AUTO;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err_mode;
+ }
+
+ dev_dbg(&rdev->dev, "mt6397 buck set_mode %#x, %#x, %#x, %#x\n",
+ info->modeset_reg, info->modeset_mask,
+ info->modeset_shift, val);
+
+ val <<= info->modeset_shift;
+ ret = regmap_update_bits(rdev->regmap, info->modeset_reg,
+ info->modeset_mask, val);
+err_mode:
+ if (ret != 0) {
+ dev_err(&rdev->dev,
+ "Failed to set mt6397 buck mode: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static unsigned int mt6397_regulator_get_mode(struct regulator_dev *rdev)
+{
+ struct mt6397_regulator_info *info = rdev_get_drvdata(rdev);
+ int ret, regval;
+
+ ret = regmap_read(rdev->regmap, info->modeset_reg, &regval);
+ if (ret != 0) {
+ dev_err(&rdev->dev,
+ "Failed to get mt6397 buck mode: %d\n", ret);
+ return ret;
+ }
+
+ switch ((regval & info->modeset_mask) >> info->modeset_shift) {
+ case MT6397_BUCK_MODE_AUTO:
+ return REGULATOR_MODE_NORMAL;
+ case MT6397_BUCK_MODE_FORCE_PWM:
+ return REGULATOR_MODE_FAST;
+ default:
+ return -EINVAL;
+ }
+}
+
static int mt6397_get_status(struct regulator_dev *rdev)
{
int ret;
@@ -160,7 +227,7 @@ static int mt6397_get_status(struct regulator_dev *rdev)
return (regval & info->qi) ? REGULATOR_STATUS_ON : REGULATOR_STATUS_OFF;
}
-static struct regulator_ops mt6397_volt_range_ops = {
+static const struct regulator_ops mt6397_volt_range_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
@@ -170,9 +237,11 @@ static struct regulator_ops mt6397_volt_range_ops = {
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.get_status = mt6397_get_status,
+ .set_mode = mt6397_regulator_set_mode,
+ .get_mode = mt6397_regulator_get_mode,
};
-static struct regulator_ops mt6397_volt_table_ops = {
+static const struct regulator_ops mt6397_volt_table_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_iterate,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
@@ -184,7 +253,7 @@ static struct regulator_ops mt6397_volt_table_ops = {
.get_status = mt6397_get_status,
};
-static struct regulator_ops mt6397_volt_fixed_ops = {
+static const struct regulator_ops mt6397_volt_fixed_ops = {
.list_voltage = regulator_list_voltage_linear,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -196,28 +265,30 @@ static struct regulator_ops mt6397_volt_fixed_ops = {
static struct mt6397_regulator_info mt6397_regulators[] = {
MT6397_BUCK("buck_vpca15", VPCA15, 700000, 1493750, 6250,
buck_volt_range1, MT6397_VCA15_CON7, MT6397_VCA15_CON9, 0x7f,
- MT6397_VCA15_CON10, MT6397_VCA15_CON5),
+ MT6397_VCA15_CON10, MT6397_VCA15_CON5, MT6397_VCA15_CON2, 11),
MT6397_BUCK("buck_vpca7", VPCA7, 700000, 1493750, 6250,
buck_volt_range1, MT6397_VPCA7_CON7, MT6397_VPCA7_CON9, 0x7f,
- MT6397_VPCA7_CON10, MT6397_VPCA7_CON5),
+ MT6397_VPCA7_CON10, MT6397_VPCA7_CON5, MT6397_VPCA7_CON2, 8),
MT6397_BUCK("buck_vsramca15", VSRAMCA15, 700000, 1493750, 6250,
buck_volt_range1, MT6397_VSRMCA15_CON7, MT6397_VSRMCA15_CON9,
- 0x7f, MT6397_VSRMCA15_CON10, MT6397_VSRMCA15_CON5),
+ 0x7f, MT6397_VSRMCA15_CON10, MT6397_VSRMCA15_CON5,
+ MT6397_VSRMCA15_CON2, 8),
MT6397_BUCK("buck_vsramca7", VSRAMCA7, 700000, 1493750, 6250,
buck_volt_range1, MT6397_VSRMCA7_CON7, MT6397_VSRMCA7_CON9,
- 0x7f, MT6397_VSRMCA7_CON10, MT6397_VSRMCA7_CON5),
+ 0x7f, MT6397_VSRMCA7_CON10, MT6397_VSRMCA7_CON5,
+ MT6397_VSRMCA7_CON2, 8),
MT6397_BUCK("buck_vcore", VCORE, 700000, 1493750, 6250,
buck_volt_range1, MT6397_VCORE_CON7, MT6397_VCORE_CON9, 0x7f,
- MT6397_VCORE_CON10, MT6397_VCORE_CON5),
+ MT6397_VCORE_CON10, MT6397_VCORE_CON5, MT6397_VCORE_CON2, 8),
MT6397_BUCK("buck_vgpu", VGPU, 700000, 1493750, 6250, buck_volt_range1,
MT6397_VGPU_CON7, MT6397_VGPU_CON9, 0x7f,
- MT6397_VGPU_CON10, MT6397_VGPU_CON5),
+ MT6397_VGPU_CON10, MT6397_VGPU_CON5, MT6397_VGPU_CON2, 8),
MT6397_BUCK("buck_vdrm", VDRM, 800000, 1593750, 6250, buck_volt_range2,
MT6397_VDRM_CON7, MT6397_VDRM_CON9, 0x7f,
- MT6397_VDRM_CON10, MT6397_VDRM_CON5),
+ MT6397_VDRM_CON10, MT6397_VDRM_CON5, MT6397_VDRM_CON2, 8),
MT6397_BUCK("buck_vio18", VIO18, 1500000, 2120000, 20000,
buck_volt_range3, MT6397_VIO18_CON7, MT6397_VIO18_CON9, 0x1f,
- MT6397_VIO18_CON10, MT6397_VIO18_CON5),
+ MT6397_VIO18_CON10, MT6397_VIO18_CON5, MT6397_VIO18_CON2, 8),
MT6397_REG_FIXED("ldo_vtcxo", VTCXO, MT6397_ANALDO_CON0, 10, 2800000),
MT6397_REG_FIXED("ldo_va28", VA28, MT6397_ANALDO_CON1, 14, 2800000),
MT6397_LDO("ldo_vcama", VCAMA, ldo_volt_table1,
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index cd828dbf9..4f613ec99 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -163,6 +163,9 @@ static void of_get_regulation_constraints(struct device_node *np,
"regulator-suspend-microvolt", &pval))
suspend_state->uV = pval;
+ if (i == PM_SUSPEND_MEM)
+ constraints->initial_state = PM_SUSPEND_MEM;
+
of_node_put(suspend_np);
suspend_state = NULL;
suspend_np = NULL;
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
index 2a44e5dd9..cb18b5c4f 100644
--- a/drivers/regulator/pfuze100-regulator.c
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -70,6 +70,7 @@ struct pfuze_chip {
struct device *dev;
struct pfuze_regulator regulator_descs[PFUZE100_MAX_REGULATOR];
struct regulator_dev *regulators[PFUZE100_MAX_REGULATOR];
+ struct pfuze_regulator *pfuze_regulators;
};
static const int pfuze100_swbst[] = {
@@ -334,8 +335,6 @@ static struct pfuze_regulator pfuze3000_regulators[] = {
PFUZE100_VGEN_REG(PFUZE3000, VLDO4, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000),
};
-static struct pfuze_regulator *pfuze_regulators;
-
#ifdef CONFIG_OF
/* PFUZE100 */
static struct of_regulator_match pfuze100_matches[] = {
@@ -563,21 +562,21 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
/* use the right regulators after identify the right device */
switch (pfuze_chip->chip_id) {
case PFUZE3000:
- pfuze_regulators = pfuze3000_regulators;
+ pfuze_chip->pfuze_regulators = pfuze3000_regulators;
regulator_num = ARRAY_SIZE(pfuze3000_regulators);
sw_check_start = PFUZE3000_SW2;
sw_check_end = PFUZE3000_SW2;
sw_hi = 1 << 3;
break;
case PFUZE200:
- pfuze_regulators = pfuze200_regulators;
+ pfuze_chip->pfuze_regulators = pfuze200_regulators;
regulator_num = ARRAY_SIZE(pfuze200_regulators);
sw_check_start = PFUZE200_SW2;
sw_check_end = PFUZE200_SW3B;
break;
case PFUZE100:
default:
- pfuze_regulators = pfuze100_regulators;
+ pfuze_chip->pfuze_regulators = pfuze100_regulators;
regulator_num = ARRAY_SIZE(pfuze100_regulators);
sw_check_start = PFUZE100_SW2;
sw_check_end = PFUZE100_SW4;
@@ -587,7 +586,7 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
(pfuze_chip->chip_id == PFUZE100) ? "100" :
((pfuze_chip->chip_id == PFUZE200) ? "200" : "3000"));
- memcpy(pfuze_chip->regulator_descs, pfuze_regulators,
+ memcpy(pfuze_chip->regulator_descs, pfuze_chip->pfuze_regulators,
sizeof(pfuze_chip->regulator_descs));
ret = pfuze_parse_regulators_dt(pfuze_chip);
@@ -631,7 +630,7 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
devm_regulator_register(&client->dev, desc, &config);
if (IS_ERR(pfuze_chip->regulators[i])) {
dev_err(&client->dev, "register regulator%s failed\n",
- pfuze_regulators[i].desc.name);
+ pfuze_chip->pfuze_regulators[i].desc.name);
return PTR_ERR(pfuze_chip->regulators[i]);
}
}
@@ -650,5 +649,5 @@ static struct i2c_driver pfuze_driver = {
module_i2c_driver(pfuze_driver);
MODULE_AUTHOR("Robin Gong <b38343@freescale.com>");
-MODULE_DESCRIPTION("Regulator Driver for Freescale PFUZE100/PFUZE200 PMIC");
+MODULE_DESCRIPTION("Regulator Driver for Freescale PFUZE100/200/3000 PMIC");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/pv88060-regulator.c b/drivers/regulator/pv88060-regulator.c
index c448b727f..6c4afc73e 100644
--- a/drivers/regulator/pv88060-regulator.c
+++ b/drivers/regulator/pv88060-regulator.c
@@ -14,7 +14,6 @@
*/
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -25,8 +24,6 @@
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/regulator/of_regulator.h>
-#include <linux/proc_fs.h>
-#include <linux/uaccess.h>
#include "pv88060-regulator.h"
#define PV88060_MAX_REGULATORS 14
diff --git a/drivers/regulator/pv88080-regulator.c b/drivers/regulator/pv88080-regulator.c
index d7107566c..81950bdb1 100644
--- a/drivers/regulator/pv88080-regulator.c
+++ b/drivers/regulator/pv88080-regulator.c
@@ -14,7 +14,6 @@
*/
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -25,8 +24,6 @@
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/regulator/of_regulator.h>
-#include <linux/proc_fs.h>
-#include <linux/uaccess.h>
#include "pv88080-regulator.h"
#define PV88080_MAX_REGULATORS 3
diff --git a/drivers/regulator/pv88090-regulator.c b/drivers/regulator/pv88090-regulator.c
index 0057c6740..421641175 100644
--- a/drivers/regulator/pv88090-regulator.c
+++ b/drivers/regulator/pv88090-regulator.c
@@ -14,7 +14,6 @@
*/
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -25,8 +24,6 @@
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/regulator/of_regulator.h>
-#include <linux/proc_fs.h>
-#include <linux/uaccess.h>
#include "pv88090-regulator.h"
#define PV88090_MAX_REGULATORS 5
diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c
index fafa3488e..c24524242 100644
--- a/drivers/regulator/pwm-regulator.c
+++ b/drivers/regulator/pwm-regulator.c
@@ -20,6 +20,13 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pwm.h>
+#include <linux/gpio/consumer.h>
+
+struct pwm_continuous_reg_data {
+ unsigned int min_uV_dutycycle;
+ unsigned int max_uV_dutycycle;
+ unsigned int dutycycle_unit;
+};
struct pwm_regulator_data {
/* Shared */
@@ -28,6 +35,9 @@ struct pwm_regulator_data {
/* Voltage table */
struct pwm_voltages *duty_cycle_table;
+ /* Continuous mode info */
+ struct pwm_continuous_reg_data continuous;
+
/* regulator descriptor */
struct regulator_desc desc;
@@ -36,8 +46,8 @@ struct pwm_regulator_data {
int state;
- /* Continuous voltage */
- int volt_uV;
+ /* Enable GPIO */
+ struct gpio_desc *enb_gpio;
};
struct pwm_voltages {
@@ -48,10 +58,31 @@ struct pwm_voltages {
/**
* Voltage table call-backs
*/
+static void pwm_regulator_init_state(struct regulator_dev *rdev)
+{
+ struct pwm_regulator_data *drvdata = rdev_get_drvdata(rdev);
+ struct pwm_state pwm_state;
+ unsigned int dutycycle;
+ int i;
+
+ pwm_get_state(drvdata->pwm, &pwm_state);
+ dutycycle = pwm_get_relative_duty_cycle(&pwm_state, 100);
+
+ for (i = 0; i < rdev->desc->n_voltages; i++) {
+ if (dutycycle == drvdata->duty_cycle_table[i].dutycycle) {
+ drvdata->state = i;
+ return;
+ }
+ }
+}
+
static int pwm_regulator_get_voltage_sel(struct regulator_dev *rdev)
{
struct pwm_regulator_data *drvdata = rdev_get_drvdata(rdev);
+ if (drvdata->state < 0)
+ pwm_regulator_init_state(rdev);
+
return drvdata->state;
}
@@ -59,16 +90,14 @@ static int pwm_regulator_set_voltage_sel(struct regulator_dev *rdev,
unsigned selector)
{
struct pwm_regulator_data *drvdata = rdev_get_drvdata(rdev);
- struct pwm_args pargs;
- int dutycycle;
+ struct pwm_state pstate;
int ret;
- pwm_get_args(drvdata->pwm, &pargs);
-
- dutycycle = (pargs.period *
- drvdata->duty_cycle_table[selector].dutycycle) / 100;
+ pwm_init_state(drvdata->pwm, &pstate);
+ pwm_set_relative_duty_cycle(&pstate,
+ drvdata->duty_cycle_table[selector].dutycycle, 100);
- ret = pwm_config(drvdata->pwm, dutycycle, pargs.period);
+ ret = pwm_apply_state(drvdata->pwm, &pstate);
if (ret) {
dev_err(&rdev->dev, "Failed to configure PWM: %d\n", ret);
return ret;
@@ -94,6 +123,9 @@ static int pwm_regulator_enable(struct regulator_dev *dev)
{
struct pwm_regulator_data *drvdata = rdev_get_drvdata(dev);
+ if (drvdata->enb_gpio)
+ gpiod_set_value_cansleep(drvdata->enb_gpio, 1);
+
return pwm_enable(drvdata->pwm);
}
@@ -103,6 +135,9 @@ static int pwm_regulator_disable(struct regulator_dev *dev)
pwm_disable(drvdata->pwm);
+ if (drvdata->enb_gpio)
+ gpiod_set_value_cansleep(drvdata->enb_gpio, 0);
+
return 0;
}
@@ -110,64 +145,100 @@ static int pwm_regulator_is_enabled(struct regulator_dev *dev)
{
struct pwm_regulator_data *drvdata = rdev_get_drvdata(dev);
+ if (drvdata->enb_gpio && !gpiod_get_value_cansleep(drvdata->enb_gpio))
+ return false;
+
return pwm_is_enabled(drvdata->pwm);
}
static int pwm_regulator_get_voltage(struct regulator_dev *rdev)
{
struct pwm_regulator_data *drvdata = rdev_get_drvdata(rdev);
+ unsigned int min_uV_duty = drvdata->continuous.min_uV_dutycycle;
+ unsigned int max_uV_duty = drvdata->continuous.max_uV_dutycycle;
+ unsigned int duty_unit = drvdata->continuous.dutycycle_unit;
+ int min_uV = rdev->constraints->min_uV;
+ int max_uV = rdev->constraints->max_uV;
+ int diff_uV = max_uV - min_uV;
+ struct pwm_state pstate;
+ unsigned int diff_duty;
+ unsigned int voltage;
+
+ pwm_get_state(drvdata->pwm, &pstate);
+
+ voltage = pwm_get_relative_duty_cycle(&pstate, duty_unit);
+
+ /*
+ * The dutycycle for min_uV might be greater than the one for max_uV.
+ * This is happening when the user needs an inversed polarity, but the
+ * PWM device does not support inversing it in hardware.
+ */
+ if (max_uV_duty < min_uV_duty) {
+ voltage = min_uV_duty - voltage;
+ diff_duty = min_uV_duty - max_uV_duty;
+ } else {
+ voltage = voltage - min_uV_duty;
+ diff_duty = max_uV_duty - min_uV_duty;
+ }
+
+ voltage = DIV_ROUND_CLOSEST_ULL((u64)voltage * diff_uV, diff_duty);
- return drvdata->volt_uV;
+ return voltage + min_uV;
}
static int pwm_regulator_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV,
- unsigned *selector)
+ int req_min_uV, int req_max_uV,
+ unsigned int *selector)
{
struct pwm_regulator_data *drvdata = rdev_get_drvdata(rdev);
+ unsigned int min_uV_duty = drvdata->continuous.min_uV_dutycycle;
+ unsigned int max_uV_duty = drvdata->continuous.max_uV_dutycycle;
+ unsigned int duty_unit = drvdata->continuous.dutycycle_unit;
unsigned int ramp_delay = rdev->constraints->ramp_delay;
- struct pwm_args pargs;
- unsigned int req_diff = min_uV - rdev->constraints->min_uV;
- unsigned int diff;
- unsigned int duty_pulse;
- u64 req_period;
- u32 rem;
+ int min_uV = rdev->constraints->min_uV;
+ int max_uV = rdev->constraints->max_uV;
+ int diff_uV = max_uV - min_uV;
+ struct pwm_state pstate;
+ int old_uV = pwm_regulator_get_voltage(rdev);
+ unsigned int diff_duty;
+ unsigned int dutycycle;
int ret;
- pwm_get_args(drvdata->pwm, &pargs);
- diff = rdev->constraints->max_uV - rdev->constraints->min_uV;
+ pwm_init_state(drvdata->pwm, &pstate);
- /* First try to find out if we get the iduty cycle time which is
- * factor of PWM period time. If (request_diff_to_min * pwm_period)
- * is perfect divided by voltage_range_diff then it is possible to
- * get duty cycle time which is factor of PWM period. This will help
- * to get output voltage nearer to requested value as there is no
- * calculation loss.
+ /*
+ * The dutycycle for min_uV might be greater than the one for max_uV.
+ * This is happening when the user needs an inversed polarity, but the
+ * PWM device does not support inversing it in hardware.
*/
- req_period = req_diff * pargs.period;
- div_u64_rem(req_period, diff, &rem);
- if (!rem) {
- do_div(req_period, diff);
- duty_pulse = (unsigned int)req_period;
- } else {
- duty_pulse = (pargs.period / 100) * ((req_diff * 100) / diff);
- }
+ if (max_uV_duty < min_uV_duty)
+ diff_duty = min_uV_duty - max_uV_duty;
+ else
+ diff_duty = max_uV_duty - min_uV_duty;
+
+ dutycycle = DIV_ROUND_CLOSEST_ULL((u64)(req_min_uV - min_uV) *
+ diff_duty,
+ diff_uV);
+
+ if (max_uV_duty < min_uV_duty)
+ dutycycle = min_uV_duty - dutycycle;
+ else
+ dutycycle = min_uV_duty + dutycycle;
+
+ pwm_set_relative_duty_cycle(&pstate, dutycycle, duty_unit);
- ret = pwm_config(drvdata->pwm, duty_pulse, pargs.period);
+ ret = pwm_apply_state(drvdata->pwm, &pstate);
if (ret) {
dev_err(&rdev->dev, "Failed to configure PWM: %d\n", ret);
return ret;
}
- ret = pwm_enable(drvdata->pwm);
- if (ret) {
- dev_err(&rdev->dev, "Failed to enable PWM: %d\n", ret);
- return ret;
- }
- drvdata->volt_uV = min_uV;
+ if ((ramp_delay == 0) || !pwm_regulator_is_enabled(rdev))
+ return 0;
- /* Delay required by PWM regulator to settle to the new voltage */
- usleep_range(ramp_delay, ramp_delay + 1000);
+ /* Ramp delay is in uV/uS. Adjust to uS and delay */
+ ramp_delay = DIV_ROUND_UP(abs(req_min_uV - old_uV), ramp_delay);
+ usleep_range(ramp_delay, ramp_delay + DIV_ROUND_UP(ramp_delay, 10));
return 0;
}
@@ -226,6 +297,7 @@ static int pwm_regulator_init_table(struct platform_device *pdev,
return ret;
}
+ drvdata->state = -EINVAL;
drvdata->duty_cycle_table = duty_cycle_table;
memcpy(&drvdata->ops, &pwm_regulator_voltage_table_ops,
sizeof(drvdata->ops));
@@ -238,11 +310,28 @@ static int pwm_regulator_init_table(struct platform_device *pdev,
static int pwm_regulator_init_continuous(struct platform_device *pdev,
struct pwm_regulator_data *drvdata)
{
+ u32 dutycycle_range[2] = { 0, 100 };
+ u32 dutycycle_unit = 100;
+
memcpy(&drvdata->ops, &pwm_regulator_voltage_continuous_ops,
sizeof(drvdata->ops));
drvdata->desc.ops = &drvdata->ops;
drvdata->desc.continuous_voltage_range = true;
+ of_property_read_u32_array(pdev->dev.of_node,
+ "pwm-dutycycle-range",
+ dutycycle_range, 2);
+ of_property_read_u32(pdev->dev.of_node, "pwm-dutycycle-unit",
+ &dutycycle_unit);
+
+ if (dutycycle_range[0] > dutycycle_unit ||
+ dutycycle_range[1] > dutycycle_unit)
+ return -EINVAL;
+
+ drvdata->continuous.dutycycle_unit = dutycycle_unit;
+ drvdata->continuous.min_uV_dutycycle = dutycycle_range[0];
+ drvdata->continuous.max_uV_dutycycle = dutycycle_range[1];
+
return 0;
}
@@ -253,6 +342,7 @@ static int pwm_regulator_probe(struct platform_device *pdev)
struct regulator_dev *regulator;
struct regulator_config config = { };
struct device_node *np = pdev->dev.of_node;
+ enum gpiod_flags gpio_flags;
int ret;
if (!np) {
@@ -290,11 +380,21 @@ static int pwm_regulator_probe(struct platform_device *pdev)
return ret;
}
- /*
- * FIXME: pwm_apply_args() should be removed when switching to the
- * atomic PWM API.
- */
- pwm_apply_args(drvdata->pwm);
+ if (init_data->constraints.boot_on || init_data->constraints.always_on)
+ gpio_flags = GPIOD_OUT_HIGH;
+ else
+ gpio_flags = GPIOD_OUT_LOW;
+ drvdata->enb_gpio = devm_gpiod_get_optional(&pdev->dev, "enable",
+ gpio_flags);
+ if (IS_ERR(drvdata->enb_gpio)) {
+ ret = PTR_ERR(drvdata->enb_gpio);
+ dev_err(&pdev->dev, "Failed to get enable GPIO: %d\n", ret);
+ return ret;
+ }
+
+ ret = pwm_adjust_config(drvdata->pwm);
+ if (ret)
+ return ret;
regulator = devm_regulator_register(&pdev->dev,
&drvdata->desc, &config);
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
index 6c7fe4778..8ed46a9a5 100644
--- a/drivers/regulator/qcom_smd-regulator.c
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -178,20 +178,21 @@ static const struct regulator_desc pma8084_hfsmps = {
static const struct regulator_desc pma8084_ftsmps = {
.linear_ranges = (struct regulator_linear_range[]) {
REGULATOR_LINEAR_RANGE(350000, 0, 184, 5000),
- REGULATOR_LINEAR_RANGE(700000, 185, 339, 10000),
+ REGULATOR_LINEAR_RANGE(1280000, 185, 261, 10000),
},
.n_linear_ranges = 2,
- .n_voltages = 340,
+ .n_voltages = 262,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pma8084_pldo = {
.linear_ranges = (struct regulator_linear_range[]) {
- REGULATOR_LINEAR_RANGE(750000, 0, 30, 25000),
- REGULATOR_LINEAR_RANGE(1500000, 31, 99, 50000),
+ REGULATOR_LINEAR_RANGE( 750000, 0, 63, 12500),
+ REGULATOR_LINEAR_RANGE(1550000, 64, 126, 25000),
+ REGULATOR_LINEAR_RANGE(3100000, 127, 163, 50000),
},
- .n_linear_ranges = 2,
- .n_voltages = 100,
+ .n_linear_ranges = 3,
+ .n_voltages = 164,
.ops = &rpm_smps_ldo_ops,
};
@@ -211,7 +212,7 @@ static const struct regulator_desc pma8084_switch = {
static const struct regulator_desc pm8x41_hfsmps = {
.linear_ranges = (struct regulator_linear_range[]) {
REGULATOR_LINEAR_RANGE( 375000, 0, 95, 12500),
- REGULATOR_LINEAR_RANGE(1550000, 96, 158, 25000),
+ REGULATOR_LINEAR_RANGE(1575000, 96, 158, 25000),
},
.n_linear_ranges = 2,
.n_voltages = 159,
@@ -221,29 +222,30 @@ static const struct regulator_desc pm8x41_hfsmps = {
static const struct regulator_desc pm8841_ftsmps = {
.linear_ranges = (struct regulator_linear_range[]) {
REGULATOR_LINEAR_RANGE(350000, 0, 184, 5000),
- REGULATOR_LINEAR_RANGE(700000, 185, 339, 10000),
+ REGULATOR_LINEAR_RANGE(1280000, 185, 261, 10000),
},
.n_linear_ranges = 2,
- .n_voltages = 340,
+ .n_voltages = 262,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pm8941_boost = {
.linear_ranges = (struct regulator_linear_range[]) {
- REGULATOR_LINEAR_RANGE(4000000, 0, 15, 100000),
+ REGULATOR_LINEAR_RANGE(4000000, 0, 30, 50000),
},
.n_linear_ranges = 1,
- .n_voltages = 16,
+ .n_voltages = 31,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pm8941_pldo = {
.linear_ranges = (struct regulator_linear_range[]) {
- REGULATOR_LINEAR_RANGE( 750000, 0, 30, 25000),
- REGULATOR_LINEAR_RANGE(1500000, 31, 99, 50000),
+ REGULATOR_LINEAR_RANGE( 750000, 0, 63, 12500),
+ REGULATOR_LINEAR_RANGE(1550000, 64, 126, 25000),
+ REGULATOR_LINEAR_RANGE(3100000, 127, 163, 50000),
},
- .n_linear_ranges = 2,
- .n_voltages = 100,
+ .n_linear_ranges = 3,
+ .n_voltages = 164,
.ops = &rpm_smps_ldo_ops,
};
diff --git a/drivers/regulator/qcom_spmi-regulator.c b/drivers/regulator/qcom_spmi-regulator.c
index 84cce21e9..16c5f84e0 100644
--- a/drivers/regulator/qcom_spmi-regulator.c
+++ b/drivers/regulator/qcom_spmi-regulator.c
@@ -1085,6 +1085,8 @@ static struct regulator_ops spmi_vs_ops = {
.set_pull_down = spmi_regulator_common_set_pull_down,
.set_soft_start = spmi_regulator_common_set_soft_start,
.set_over_current_protection = spmi_regulator_vs_ocp,
+ .set_mode = spmi_regulator_common_set_mode,
+ .get_mode = spmi_regulator_common_get_mode,
};
static struct regulator_ops spmi_boost_ops = {
@@ -1496,6 +1498,7 @@ static const struct spmi_regulator_data pm8941_regulators[] = {
{ "s1", 0x1400, "vdd_s1", },
{ "s2", 0x1700, "vdd_s2", },
{ "s3", 0x1a00, "vdd_s3", },
+ { "s4", 0xa000, },
{ "l1", 0x4000, "vdd_l1_l3", },
{ "l2", 0x4100, "vdd_l2_lvs_1_2_3", },
{ "l3", 0x4200, "vdd_l1_l3", },
@@ -1523,8 +1526,8 @@ static const struct spmi_regulator_data pm8941_regulators[] = {
{ "lvs1", 0x8000, "vdd_l2_lvs_1_2_3", },
{ "lvs2", 0x8100, "vdd_l2_lvs_1_2_3", },
{ "lvs3", 0x8200, "vdd_l2_lvs_1_2_3", },
- { "mvs1", 0x8300, "vin_5vs", },
- { "mvs2", 0x8400, "vin_5vs", },
+ { "5vs1", 0x8300, "vin_5vs", "ocp-5vs1", },
+ { "5vs2", 0x8400, "vin_5vs", "ocp-5vs2", },
{ }
};
diff --git a/drivers/regulator/rn5t618-regulator.c b/drivers/regulator/rn5t618-regulator.c
index b85ceb8ff..9c930eb68 100644
--- a/drivers/regulator/rn5t618-regulator.c
+++ b/drivers/regulator/rn5t618-regulator.c
@@ -46,6 +46,23 @@ static struct regulator_ops rn5t618_reg_ops = {
.vsel_mask = (vmask), \
}
+static struct regulator_desc rn5t567_regulators[] = {
+ /* DCDC */
+ REG(DCDC1, DC1CTL, BIT(0), DC1DAC, 0xff, 600000, 3500000, 12500),
+ REG(DCDC2, DC2CTL, BIT(0), DC2DAC, 0xff, 600000, 3500000, 12500),
+ REG(DCDC3, DC3CTL, BIT(0), DC3DAC, 0xff, 600000, 3500000, 12500),
+ REG(DCDC4, DC4CTL, BIT(0), DC4DAC, 0xff, 600000, 3500000, 12500),
+ /* LDO */
+ REG(LDO1, LDOEN1, BIT(0), LDO1DAC, 0x7f, 900000, 3500000, 25000),
+ REG(LDO2, LDOEN1, BIT(1), LDO2DAC, 0x7f, 900000, 3500000, 25000),
+ REG(LDO3, LDOEN1, BIT(2), LDO3DAC, 0x7f, 600000, 3500000, 25000),
+ REG(LDO4, LDOEN1, BIT(3), LDO4DAC, 0x7f, 900000, 3500000, 25000),
+ REG(LDO5, LDOEN1, BIT(4), LDO5DAC, 0x7f, 900000, 3500000, 25000),
+ /* LDO RTC */
+ REG(LDORTC1, LDOEN2, BIT(4), LDORTCDAC, 0x7f, 1200000, 3500000, 25000),
+ REG(LDORTC2, LDOEN2, BIT(5), LDORTC2DAC, 0x7f, 900000, 3500000, 25000),
+};
+
static struct regulator_desc rn5t618_regulators[] = {
/* DCDC */
REG(DCDC1, DC1CTL, BIT(0), DC1DAC, 0xff, 600000, 3500000, 12500),
@@ -67,18 +84,33 @@ static int rn5t618_regulator_probe(struct platform_device *pdev)
struct rn5t618 *rn5t618 = dev_get_drvdata(pdev->dev.parent);
struct regulator_config config = { };
struct regulator_dev *rdev;
+ struct regulator_desc *regulators;
int i;
+ switch (rn5t618->variant) {
+ case RN5T567:
+ regulators = rn5t567_regulators;
+ break;
+ case RN5T618:
+ regulators = rn5t618_regulators;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ config.dev = pdev->dev.parent;
+ config.regmap = rn5t618->regmap;
+
for (i = 0; i < RN5T618_REG_NUM; i++) {
- config.dev = pdev->dev.parent;
- config.regmap = rn5t618->regmap;
+ if (!regulators[i].name)
+ continue;
rdev = devm_regulator_register(&pdev->dev,
- &rn5t618_regulators[i],
+ &regulators[i],
&config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register %s regulator\n",
- rn5t618_regulators[i].name);
+ regulators[i].name);
return PTR_ERR(rdev);
}
}
diff --git a/drivers/regulator/tps65217-regulator.c b/drivers/regulator/tps65217-regulator.c
index adbe4fc5c..2d12b9af3 100644
--- a/drivers/regulator/tps65217-regulator.c
+++ b/drivers/regulator/tps65217-regulator.c
@@ -28,7 +28,7 @@
#include <linux/mfd/tps65217.h>
#define TPS65217_REGULATOR(_name, _id, _of_match, _ops, _n, _vr, _vm, _em, \
- _t, _lr, _nlr) \
+ _t, _lr, _nlr, _sr, _sm) \
{ \
.name = _name, \
.id = _id, \
@@ -45,6 +45,8 @@
.volt_table = _t, \
.linear_ranges = _lr, \
.n_linear_ranges = _nlr, \
+ .bypass_reg = _sr, \
+ .bypass_mask = _sm, \
} \
static const unsigned int LDO1_VSEL_table[] = {
@@ -118,6 +120,35 @@ static int tps65217_pmic_set_voltage_sel(struct regulator_dev *dev,
return ret;
}
+static int tps65217_pmic_set_suspend_enable(struct regulator_dev *dev)
+{
+ struct tps65217 *tps = rdev_get_drvdata(dev);
+ unsigned int rid = rdev_get_id(dev);
+
+ if (rid < TPS65217_DCDC_1 || rid > TPS65217_LDO_4)
+ return -EINVAL;
+
+ return tps65217_clear_bits(tps, dev->desc->bypass_reg,
+ dev->desc->bypass_mask,
+ TPS65217_PROTECT_L1);
+}
+
+static int tps65217_pmic_set_suspend_disable(struct regulator_dev *dev)
+{
+ struct tps65217 *tps = rdev_get_drvdata(dev);
+ unsigned int rid = rdev_get_id(dev);
+
+ if (rid < TPS65217_DCDC_1 || rid > TPS65217_LDO_4)
+ return -EINVAL;
+
+ if (!tps->strobes[rid])
+ return -EINVAL;
+
+ return tps65217_set_bits(tps, dev->desc->bypass_reg,
+ dev->desc->bypass_mask,
+ tps->strobes[rid], TPS65217_PROTECT_L1);
+}
+
/* Operations permitted on DCDCx, LDO2, LDO3 and LDO4 */
static struct regulator_ops tps65217_pmic_ops = {
.is_enabled = regulator_is_enabled_regmap,
@@ -127,6 +158,8 @@ static struct regulator_ops tps65217_pmic_ops = {
.set_voltage_sel = tps65217_pmic_set_voltage_sel,
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
+ .set_suspend_enable = tps65217_pmic_set_suspend_enable,
+ .set_suspend_disable = tps65217_pmic_set_suspend_disable,
};
/* Operations permitted on LDO1 */
@@ -138,41 +171,50 @@ static struct regulator_ops tps65217_pmic_ldo1_ops = {
.set_voltage_sel = tps65217_pmic_set_voltage_sel,
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
+ .set_suspend_enable = tps65217_pmic_set_suspend_enable,
+ .set_suspend_disable = tps65217_pmic_set_suspend_disable,
};
static const struct regulator_desc regulators[] = {
TPS65217_REGULATOR("DCDC1", TPS65217_DCDC_1, "dcdc1",
tps65217_pmic_ops, 64, TPS65217_REG_DEFDCDC1,
TPS65217_DEFDCDCX_DCDC_MASK, TPS65217_ENABLE_DC1_EN,
- NULL, tps65217_uv1_ranges, 2),
+ NULL, tps65217_uv1_ranges, 2, TPS65217_REG_SEQ1,
+ TPS65217_SEQ1_DC1_SEQ_MASK),
TPS65217_REGULATOR("DCDC2", TPS65217_DCDC_2, "dcdc2",
tps65217_pmic_ops, 64, TPS65217_REG_DEFDCDC2,
TPS65217_DEFDCDCX_DCDC_MASK, TPS65217_ENABLE_DC2_EN,
NULL, tps65217_uv1_ranges,
- ARRAY_SIZE(tps65217_uv1_ranges)),
+ ARRAY_SIZE(tps65217_uv1_ranges), TPS65217_REG_SEQ1,
+ TPS65217_SEQ1_DC2_SEQ_MASK),
TPS65217_REGULATOR("DCDC3", TPS65217_DCDC_3, "dcdc3",
tps65217_pmic_ops, 64, TPS65217_REG_DEFDCDC3,
TPS65217_DEFDCDCX_DCDC_MASK, TPS65217_ENABLE_DC3_EN,
- NULL, tps65217_uv1_ranges, 1),
+ NULL, tps65217_uv1_ranges, 1, TPS65217_REG_SEQ2,
+ TPS65217_SEQ2_DC3_SEQ_MASK),
TPS65217_REGULATOR("LDO1", TPS65217_LDO_1, "ldo1",
tps65217_pmic_ldo1_ops, 16, TPS65217_REG_DEFLDO1,
TPS65217_DEFLDO1_LDO1_MASK, TPS65217_ENABLE_LDO1_EN,
- LDO1_VSEL_table, NULL, 0),
+ LDO1_VSEL_table, NULL, 0, TPS65217_REG_SEQ2,
+ TPS65217_SEQ2_LDO1_SEQ_MASK),
TPS65217_REGULATOR("LDO2", TPS65217_LDO_2, "ldo2", tps65217_pmic_ops,
64, TPS65217_REG_DEFLDO2,
TPS65217_DEFLDO2_LDO2_MASK, TPS65217_ENABLE_LDO2_EN,
NULL, tps65217_uv1_ranges,
- ARRAY_SIZE(tps65217_uv1_ranges)),
+ ARRAY_SIZE(tps65217_uv1_ranges), TPS65217_REG_SEQ3,
+ TPS65217_SEQ3_LDO2_SEQ_MASK),
TPS65217_REGULATOR("LDO3", TPS65217_LDO_3, "ldo3", tps65217_pmic_ops,
32, TPS65217_REG_DEFLS1, TPS65217_DEFLDO3_LDO3_MASK,
TPS65217_ENABLE_LS1_EN | TPS65217_DEFLDO3_LDO3_EN,
NULL, tps65217_uv2_ranges,
- ARRAY_SIZE(tps65217_uv2_ranges)),
+ ARRAY_SIZE(tps65217_uv2_ranges), TPS65217_REG_SEQ3,
+ TPS65217_SEQ3_LDO3_SEQ_MASK),
TPS65217_REGULATOR("LDO4", TPS65217_LDO_4, "ldo4", tps65217_pmic_ops,
32, TPS65217_REG_DEFLS2, TPS65217_DEFLDO4_LDO4_MASK,
TPS65217_ENABLE_LS2_EN | TPS65217_DEFLDO4_LDO4_EN,
NULL, tps65217_uv2_ranges,
- ARRAY_SIZE(tps65217_uv2_ranges)),
+ ARRAY_SIZE(tps65217_uv2_ranges), TPS65217_REG_SEQ4,
+ TPS65217_SEQ4_LDO4_SEQ_MASK),
};
static int tps65217_regulator_probe(struct platform_device *pdev)
@@ -181,13 +223,18 @@ static int tps65217_regulator_probe(struct platform_device *pdev)
struct tps65217_board *pdata = dev_get_platdata(tps->dev);
struct regulator_dev *rdev;
struct regulator_config config = { };
- int i;
+ int i, ret;
+ unsigned int val;
if (tps65217_chip_id(tps) != TPS65217) {
dev_err(&pdev->dev, "Invalid tps chip version\n");
return -ENODEV;
}
+ /* Allocate memory for strobes */
+ tps->strobes = devm_kzalloc(&pdev->dev, sizeof(u8) *
+ TPS65217_NUM_REGULATOR, GFP_KERNEL);
+
platform_set_drvdata(pdev, tps);
for (i = 0; i < TPS65217_NUM_REGULATOR; i++) {
@@ -205,6 +252,10 @@ static int tps65217_regulator_probe(struct platform_device *pdev)
pdev->name);
return PTR_ERR(rdev);
}
+
+ /* Store default strobe info */
+ ret = tps65217_reg_read(tps, regulators[i].bypass_reg, &val);
+ tps->strobes[i] = val & regulators[i].bypass_mask;
}
return 0;
diff --git a/drivers/regulator/tps65218-regulator.c b/drivers/regulator/tps65218-regulator.c
index a5e5634ee..d1e631d64 100644
--- a/drivers/regulator/tps65218-regulator.c
+++ b/drivers/regulator/tps65218-regulator.c
@@ -31,7 +31,7 @@ enum tps65218_regulators { DCDC1, DCDC2, DCDC3, DCDC4,
DCDC5, DCDC6, LDO1, LS3 };
#define TPS65218_REGULATOR(_name, _id, _type, _ops, _n, _vr, _vm, _er, _em, \
- _cr, _cm, _lr, _nlr, _delay, _fuv) \
+ _cr, _cm, _lr, _nlr, _delay, _fuv, _sr, _sm) \
{ \
.name = _name, \
.id = _id, \
@@ -49,7 +49,9 @@ enum tps65218_regulators { DCDC1, DCDC2, DCDC3, DCDC4,
.linear_ranges = _lr, \
.n_linear_ranges = _nlr, \
.ramp_delay = _delay, \
- .fixed_uV = _fuv \
+ .fixed_uV = _fuv, \
+ .bypass_reg = _sr, \
+ .bypass_mask = _sm, \
} \
#define TPS65218_INFO(_id, _nm, _min, _max) \
@@ -157,6 +159,40 @@ static int tps65218_pmic_disable(struct regulator_dev *dev)
dev->desc->enable_mask, TPS65218_PROTECT_L1);
}
+static int tps65218_pmic_set_suspend_enable(struct regulator_dev *dev)
+{
+ struct tps65218 *tps = rdev_get_drvdata(dev);
+ unsigned int rid = rdev_get_id(dev);
+
+ if (rid < TPS65218_DCDC_1 || rid > TPS65218_LDO_1)
+ return -EINVAL;
+
+ return tps65218_clear_bits(tps, dev->desc->bypass_reg,
+ dev->desc->bypass_mask,
+ TPS65218_PROTECT_L1);
+}
+
+static int tps65218_pmic_set_suspend_disable(struct regulator_dev *dev)
+{
+ struct tps65218 *tps = rdev_get_drvdata(dev);
+ unsigned int rid = rdev_get_id(dev);
+
+ if (rid < TPS65218_DCDC_1 || rid > TPS65218_LDO_1)
+ return -EINVAL;
+
+ if (!tps->info[rid]->strobe) {
+ if (rid == TPS65218_DCDC_3)
+ tps->info[rid]->strobe = 3;
+ else
+ return -EINVAL;
+ }
+
+ return tps65218_set_bits(tps, dev->desc->bypass_reg,
+ dev->desc->bypass_mask,
+ tps->info[rid]->strobe,
+ TPS65218_PROTECT_L1);
+}
+
/* Operations permitted on DCDC1, DCDC2 */
static struct regulator_ops tps65218_dcdc12_ops = {
.is_enabled = regulator_is_enabled_regmap,
@@ -167,6 +203,8 @@ static struct regulator_ops tps65218_dcdc12_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .set_suspend_enable = tps65218_pmic_set_suspend_enable,
+ .set_suspend_disable = tps65218_pmic_set_suspend_disable,
};
/* Operations permitted on DCDC3, DCDC4 and LDO1 */
@@ -178,6 +216,8 @@ static struct regulator_ops tps65218_ldo1_dcdc34_ops = {
.set_voltage_sel = tps65218_pmic_set_voltage_sel,
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
+ .set_suspend_enable = tps65218_pmic_set_suspend_enable,
+ .set_suspend_disable = tps65218_pmic_set_suspend_disable,
};
static const int ls3_currents[] = { 100, 200, 500, 1000 };
@@ -247,6 +287,8 @@ static struct regulator_ops tps65218_dcdc56_pmic_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = tps65218_pmic_enable,
.disable = tps65218_pmic_disable,
+ .set_suspend_enable = tps65218_pmic_set_suspend_enable,
+ .set_suspend_disable = tps65218_pmic_set_suspend_disable,
};
static const struct regulator_desc regulators[] = {
@@ -254,42 +296,47 @@ static const struct regulator_desc regulators[] = {
tps65218_dcdc12_ops, 64, TPS65218_REG_CONTROL_DCDC1,
TPS65218_CONTROL_DCDC1_MASK, TPS65218_REG_ENABLE1,
TPS65218_ENABLE1_DC1_EN, 0, 0, dcdc1_dcdc2_ranges,
- 2, 4000, 0),
+ 2, 4000, 0, TPS65218_REG_SEQ3,
+ TPS65218_SEQ3_DC1_SEQ_MASK),
TPS65218_REGULATOR("DCDC2", TPS65218_DCDC_2, REGULATOR_VOLTAGE,
tps65218_dcdc12_ops, 64, TPS65218_REG_CONTROL_DCDC2,
TPS65218_CONTROL_DCDC2_MASK, TPS65218_REG_ENABLE1,
TPS65218_ENABLE1_DC2_EN, 0, 0, dcdc1_dcdc2_ranges,
- 2, 4000, 0),
+ 2, 4000, 0, TPS65218_REG_SEQ3,
+ TPS65218_SEQ3_DC2_SEQ_MASK),
TPS65218_REGULATOR("DCDC3", TPS65218_DCDC_3, REGULATOR_VOLTAGE,
tps65218_ldo1_dcdc34_ops, 64,
TPS65218_REG_CONTROL_DCDC3,
TPS65218_CONTROL_DCDC3_MASK, TPS65218_REG_ENABLE1,
TPS65218_ENABLE1_DC3_EN, 0, 0, ldo1_dcdc3_ranges, 2,
- 0, 0),
+ 0, 0, TPS65218_REG_SEQ4, TPS65218_SEQ4_DC3_SEQ_MASK),
TPS65218_REGULATOR("DCDC4", TPS65218_DCDC_4, REGULATOR_VOLTAGE,
tps65218_ldo1_dcdc34_ops, 53,
TPS65218_REG_CONTROL_DCDC4,
TPS65218_CONTROL_DCDC4_MASK, TPS65218_REG_ENABLE1,
TPS65218_ENABLE1_DC4_EN, 0, 0, dcdc4_ranges, 2,
- 0, 0),
+ 0, 0, TPS65218_REG_SEQ4, TPS65218_SEQ4_DC4_SEQ_MASK),
TPS65218_REGULATOR("DCDC5", TPS65218_DCDC_5, REGULATOR_VOLTAGE,
tps65218_dcdc56_pmic_ops, 1, -1, -1,
TPS65218_REG_ENABLE1, TPS65218_ENABLE1_DC5_EN, 0, 0,
- NULL, 0, 0, 1000000),
+ NULL, 0, 0, 1000000, TPS65218_REG_SEQ5,
+ TPS65218_SEQ5_DC5_SEQ_MASK),
TPS65218_REGULATOR("DCDC6", TPS65218_DCDC_6, REGULATOR_VOLTAGE,
tps65218_dcdc56_pmic_ops, 1, -1, -1,
TPS65218_REG_ENABLE1, TPS65218_ENABLE1_DC6_EN, 0, 0,
- NULL, 0, 0, 1800000),
+ NULL, 0, 0, 1800000, TPS65218_REG_SEQ5,
+ TPS65218_SEQ5_DC6_SEQ_MASK),
TPS65218_REGULATOR("LDO1", TPS65218_LDO_1, REGULATOR_VOLTAGE,
tps65218_ldo1_dcdc34_ops, 64,
TPS65218_REG_CONTROL_LDO1,
TPS65218_CONTROL_LDO1_MASK, TPS65218_REG_ENABLE2,
TPS65218_ENABLE2_LDO1_EN, 0, 0, ldo1_dcdc3_ranges,
- 2, 0, 0),
+ 2, 0, 0, TPS65218_REG_SEQ6,
+ TPS65218_SEQ6_LDO1_SEQ_MASK),
TPS65218_REGULATOR("LS3", TPS65218_LS_3, REGULATOR_CURRENT,
tps65218_ls3_ops, 0, 0, 0, TPS65218_REG_ENABLE2,
TPS65218_ENABLE2_LS3_EN, TPS65218_REG_CONFIG2,
- TPS65218_CONFIG2_LS3ILIM_MASK, NULL, 0, 0, 0),
+ TPS65218_CONFIG2_LS3ILIM_MASK, NULL, 0, 0, 0, 0, 0),
};
static int tps65218_regulator_probe(struct platform_device *pdev)
@@ -300,7 +347,8 @@ static int tps65218_regulator_probe(struct platform_device *pdev)
struct regulator_dev *rdev;
const struct of_device_id *match;
struct regulator_config config = { };
- int id;
+ int id, ret;
+ unsigned int val;
match = of_match_device(tps65218_of_match, &pdev->dev);
if (!match)
@@ -327,6 +375,12 @@ static int tps65218_regulator_probe(struct platform_device *pdev)
return PTR_ERR(rdev);
}
+ ret = tps65218_reg_read(tps, regulators[id].bypass_reg, &val);
+ if (ret)
+ return ret;
+
+ tps->info[id]->strobe = val & regulators[id].bypass_mask;
+
return 0;
}
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index faeb5ee92..210681d6b 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -905,7 +905,7 @@ static struct regulator_ops twlsmps_ops = {
twl4030reg_map_mode)
#define TWL6030_FIXED_LDO(label, offset, mVolts, turnon_delay) \
TWL_FIXED_LDO(label, offset, mVolts, 0x0, turnon_delay, \
- 0x0, TWL6030, twl6030fixed_ops, 0x0)
+ 0x0, TWL6030, twl6030fixed_ops, NULL)
#define TWL4030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf) \
static const struct twlreg_info TWL4030_INFO_##label = { \
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index 72e97d7a5..1a8bf76a9 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -77,6 +77,20 @@ config DA8XX_REMOTEPROC
It's safe to say n here if you're not interested in multimedia
offloading.
+config QCOM_MDT_LOADER
+ tristate
+
+config QCOM_Q6V5_PIL
+ tristate "Qualcomm Hexagon V5 Peripherial Image Loader"
+ depends on OF && ARCH_QCOM
+ depends on QCOM_SMEM
+ select MFD_SYSCON
+ select QCOM_MDT_LOADER
+ select REMOTEPROC
+ help
+ Say y here to support the Qualcomm Peripherial Image Loader for the
+ Hexagon V5 based remote processors.
+
config ST_REMOTEPROC
tristate "ST remoteproc support"
depends on ARCH_STI
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
index 279cb2edc..92d3758bd 100644
--- a/drivers/remoteproc/Makefile
+++ b/drivers/remoteproc/Makefile
@@ -11,4 +11,6 @@ obj-$(CONFIG_OMAP_REMOTEPROC) += omap_remoteproc.o
obj-$(CONFIG_STE_MODEM_RPROC) += ste_modem_rproc.o
obj-$(CONFIG_WKUP_M3_RPROC) += wkup_m3_rproc.o
obj-$(CONFIG_DA8XX_REMOTEPROC) += da8xx_remoteproc.o
+obj-$(CONFIG_QCOM_MDT_LOADER) += qcom_mdt_loader.o
+obj-$(CONFIG_QCOM_Q6V5_PIL) += qcom_q6v5_pil.o
obj-$(CONFIG_ST_REMOTEPROC) += st_remoteproc.o
diff --git a/drivers/remoteproc/qcom_mdt_loader.c b/drivers/remoteproc/qcom_mdt_loader.c
new file mode 100644
index 000000000..513e7f7c1
--- /dev/null
+++ b/drivers/remoteproc/qcom_mdt_loader.c
@@ -0,0 +1,179 @@
+/*
+ * Qualcomm Peripheral Image Loader
+ *
+ * Copyright (C) 2016 Linaro Ltd
+ * Copyright (C) 2015 Sony Mobile Communications Inc
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/elf.h>
+#include <linux/firmware.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/remoteproc.h>
+#include <linux/slab.h>
+
+#include "remoteproc_internal.h"
+#include "qcom_mdt_loader.h"
+
+/**
+ * qcom_mdt_find_rsc_table() - provide dummy resource table for remoteproc
+ * @rproc: remoteproc handle
+ * @fw: firmware header
+ * @tablesz: outgoing size of the table
+ *
+ * Returns a dummy table.
+ */
+struct resource_table *qcom_mdt_find_rsc_table(struct rproc *rproc,
+ const struct firmware *fw,
+ int *tablesz)
+{
+ static struct resource_table table = { .ver = 1, };
+
+ *tablesz = sizeof(table);
+ return &table;
+}
+EXPORT_SYMBOL_GPL(qcom_mdt_find_rsc_table);
+
+/**
+ * qcom_mdt_parse() - extract useful parameters from the mdt header
+ * @fw: firmware handle
+ * @fw_addr: optional reference for base of the firmware's memory region
+ * @fw_size: optional reference for size of the firmware's memory region
+ * @fw_relocate: optional reference for flagging if the firmware is relocatable
+ *
+ * Returns 0 on success, negative errno otherwise.
+ */
+int qcom_mdt_parse(const struct firmware *fw, phys_addr_t *fw_addr,
+ size_t *fw_size, bool *fw_relocate)
+{
+ const struct elf32_phdr *phdrs;
+ const struct elf32_phdr *phdr;
+ const struct elf32_hdr *ehdr;
+ phys_addr_t min_addr = (phys_addr_t)ULLONG_MAX;
+ phys_addr_t max_addr = 0;
+ bool relocate = false;
+ int i;
+
+ ehdr = (struct elf32_hdr *)fw->data;
+ phdrs = (struct elf32_phdr *)(ehdr + 1);
+
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ phdr = &phdrs[i];
+
+ if (phdr->p_type != PT_LOAD)
+ continue;
+
+ if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
+ continue;
+
+ if (!phdr->p_memsz)
+ continue;
+
+ if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
+ relocate = true;
+
+ if (phdr->p_paddr < min_addr)
+ min_addr = phdr->p_paddr;
+
+ if (phdr->p_paddr + phdr->p_memsz > max_addr)
+ max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
+ }
+
+ if (fw_addr)
+ *fw_addr = min_addr;
+ if (fw_size)
+ *fw_size = max_addr - min_addr;
+ if (fw_relocate)
+ *fw_relocate = relocate;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_mdt_parse);
+
+/**
+ * qcom_mdt_load() - load the firmware which header is defined in fw
+ * @rproc: rproc handle
+ * @fw: frimware object for the header
+ * @firmware: filename of the firmware, for building .bXX names
+ *
+ * Returns 0 on success, negative errno otherwise.
+ */
+int qcom_mdt_load(struct rproc *rproc,
+ const struct firmware *fw,
+ const char *firmware)
+{
+ const struct elf32_phdr *phdrs;
+ const struct elf32_phdr *phdr;
+ const struct elf32_hdr *ehdr;
+ size_t fw_name_len;
+ char *fw_name;
+ void *ptr;
+ int ret;
+ int i;
+
+ ehdr = (struct elf32_hdr *)fw->data;
+ phdrs = (struct elf32_phdr *)(ehdr + 1);
+
+ fw_name_len = strlen(firmware);
+ if (fw_name_len <= 4)
+ return -EINVAL;
+
+ fw_name = kstrdup(firmware, GFP_KERNEL);
+ if (!fw_name)
+ return -ENOMEM;
+
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ phdr = &phdrs[i];
+
+ if (phdr->p_type != PT_LOAD)
+ continue;
+
+ if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
+ continue;
+
+ if (!phdr->p_memsz)
+ continue;
+
+ ptr = rproc_da_to_va(rproc, phdr->p_paddr, phdr->p_memsz);
+ if (!ptr) {
+ dev_err(&rproc->dev, "segment outside memory range\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ if (phdr->p_filesz) {
+ sprintf(fw_name + fw_name_len - 3, "b%02d", i);
+ ret = reject_firmware(&fw, fw_name, &rproc->dev);
+ if (ret) {
+ dev_err(&rproc->dev, "failed to load %s\n",
+ fw_name);
+ break;
+ }
+
+ memcpy(ptr, fw->data, fw->size);
+
+ release_firmware(fw);
+ }
+
+ if (phdr->p_memsz > phdr->p_filesz)
+ memset(ptr + phdr->p_filesz, 0, phdr->p_memsz - phdr->p_filesz);
+ }
+
+ kfree(fw_name);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(qcom_mdt_load);
+
+MODULE_DESCRIPTION("Firmware parser for Qualcomm MDT format");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/remoteproc/qcom_mdt_loader.h b/drivers/remoteproc/qcom_mdt_loader.h
new file mode 100644
index 000000000..c5d712275
--- /dev/null
+++ b/drivers/remoteproc/qcom_mdt_loader.h
@@ -0,0 +1,13 @@
+#ifndef __QCOM_MDT_LOADER_H__
+#define __QCOM_MDT_LOADER_H__
+
+#define QCOM_MDT_TYPE_MASK (7 << 24)
+#define QCOM_MDT_TYPE_HASH (2 << 24)
+#define QCOM_MDT_RELOCATABLE BIT(27)
+
+struct resource_table * qcom_mdt_find_rsc_table(struct rproc *rproc, const struct firmware *fw, int *tablesz);
+int qcom_mdt_load(struct rproc *rproc, const struct firmware *fw, const char *fw_name);
+
+int qcom_mdt_parse(const struct firmware *fw, phys_addr_t *fw_addr, size_t *fw_size, bool *fw_relocate);
+
+#endif
diff --git a/drivers/remoteproc/qcom_q6v5_pil.c b/drivers/remoteproc/qcom_q6v5_pil.c
new file mode 100644
index 000000000..5e41b11ba
--- /dev/null
+++ b/drivers/remoteproc/qcom_q6v5_pil.c
@@ -0,0 +1,907 @@
+/*
+ * Qualcomm Peripheral Image Loader
+ *
+ * Copyright (C) 2016 Linaro Ltd.
+ * Copyright (C) 2014 Sony Mobile Communications AB
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/remoteproc.h>
+#include <linux/reset.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/soc/qcom/smem_state.h>
+
+#include "remoteproc_internal.h"
+#include "qcom_mdt_loader.h"
+
+#include <linux/qcom_scm.h>
+
+#define MBA_FIRMWARE_NAME "/*(DEBLOBBED)*/"
+#define MPSS_FIRMWARE_NAME "/*(DEBLOBBED)*/"
+
+#define MPSS_CRASH_REASON_SMEM 421
+
+/* RMB Status Register Values */
+#define RMB_PBL_SUCCESS 0x1
+
+#define RMB_MBA_XPU_UNLOCKED 0x1
+#define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2
+#define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3
+#define RMB_MBA_AUTH_COMPLETE 0x4
+
+/* PBL/MBA interface registers */
+#define RMB_MBA_IMAGE_REG 0x00
+#define RMB_PBL_STATUS_REG 0x04
+#define RMB_MBA_COMMAND_REG 0x08
+#define RMB_MBA_STATUS_REG 0x0C
+#define RMB_PMI_META_DATA_REG 0x10
+#define RMB_PMI_CODE_START_REG 0x14
+#define RMB_PMI_CODE_LENGTH_REG 0x18
+
+#define RMB_CMD_META_DATA_READY 0x1
+#define RMB_CMD_LOAD_READY 0x2
+
+/* QDSP6SS Register Offsets */
+#define QDSP6SS_RESET_REG 0x014
+#define QDSP6SS_GFMUX_CTL_REG 0x020
+#define QDSP6SS_PWR_CTL_REG 0x030
+
+/* AXI Halt Register Offsets */
+#define AXI_HALTREQ_REG 0x0
+#define AXI_HALTACK_REG 0x4
+#define AXI_IDLE_REG 0x8
+
+#define HALT_ACK_TIMEOUT_MS 100
+
+/* QDSP6SS_RESET */
+#define Q6SS_STOP_CORE BIT(0)
+#define Q6SS_CORE_ARES BIT(1)
+#define Q6SS_BUS_ARES_ENABLE BIT(2)
+
+/* QDSP6SS_GFMUX_CTL */
+#define Q6SS_CLK_ENABLE BIT(1)
+
+/* QDSP6SS_PWR_CTL */
+#define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0)
+#define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1)
+#define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2)
+#define Q6SS_L2TAG_SLP_NRET_N BIT(16)
+#define Q6SS_ETB_SLP_NRET_N BIT(17)
+#define Q6SS_L2DATA_STBY_N BIT(18)
+#define Q6SS_SLP_RET_N BIT(19)
+#define Q6SS_CLAMP_IO BIT(20)
+#define QDSS_BHS_ON BIT(21)
+#define QDSS_LDO_BYP BIT(22)
+
+struct q6v5 {
+ struct device *dev;
+ struct rproc *rproc;
+
+ void __iomem *reg_base;
+ void __iomem *rmb_base;
+
+ struct regmap *halt_map;
+ u32 halt_q6;
+ u32 halt_modem;
+ u32 halt_nc;
+
+ struct reset_control *mss_restart;
+
+ struct qcom_smem_state *state;
+ unsigned stop_bit;
+
+ struct regulator_bulk_data supply[4];
+
+ struct clk *ahb_clk;
+ struct clk *axi_clk;
+ struct clk *rom_clk;
+
+ struct completion start_done;
+ struct completion stop_done;
+ bool running;
+
+ phys_addr_t mba_phys;
+ void *mba_region;
+ size_t mba_size;
+
+ phys_addr_t mpss_phys;
+ phys_addr_t mpss_reloc;
+ void *mpss_region;
+ size_t mpss_size;
+};
+
+enum {
+ Q6V5_SUPPLY_CX,
+ Q6V5_SUPPLY_MX,
+ Q6V5_SUPPLY_MSS,
+ Q6V5_SUPPLY_PLL,
+};
+
+static int q6v5_regulator_init(struct q6v5 *qproc)
+{
+ int ret;
+
+ qproc->supply[Q6V5_SUPPLY_CX].supply = "cx";
+ qproc->supply[Q6V5_SUPPLY_MX].supply = "mx";
+ qproc->supply[Q6V5_SUPPLY_MSS].supply = "mss";
+ qproc->supply[Q6V5_SUPPLY_PLL].supply = "pll";
+
+ ret = devm_regulator_bulk_get(qproc->dev,
+ ARRAY_SIZE(qproc->supply), qproc->supply);
+ if (ret < 0) {
+ dev_err(qproc->dev, "failed to get supplies\n");
+ return ret;
+ }
+
+ regulator_set_load(qproc->supply[Q6V5_SUPPLY_CX].consumer, 100000);
+ regulator_set_load(qproc->supply[Q6V5_SUPPLY_MSS].consumer, 100000);
+ regulator_set_load(qproc->supply[Q6V5_SUPPLY_PLL].consumer, 10000);
+
+ return 0;
+}
+
+static int q6v5_regulator_enable(struct q6v5 *qproc)
+{
+ struct regulator *mss = qproc->supply[Q6V5_SUPPLY_MSS].consumer;
+ struct regulator *mx = qproc->supply[Q6V5_SUPPLY_MX].consumer;
+ int ret;
+
+ /* TODO: Q6V5_SUPPLY_CX is supposed to be set to super-turbo here */
+
+ ret = regulator_set_voltage(mx, 1050000, INT_MAX);
+ if (ret)
+ return ret;
+
+ regulator_set_voltage(mss, 1000000, 1150000);
+
+ return regulator_bulk_enable(ARRAY_SIZE(qproc->supply), qproc->supply);
+}
+
+static void q6v5_regulator_disable(struct q6v5 *qproc)
+{
+ struct regulator *mss = qproc->supply[Q6V5_SUPPLY_MSS].consumer;
+ struct regulator *mx = qproc->supply[Q6V5_SUPPLY_MX].consumer;
+
+ /* TODO: Q6V5_SUPPLY_CX corner votes should be released */
+
+ regulator_bulk_disable(ARRAY_SIZE(qproc->supply), qproc->supply);
+ regulator_set_voltage(mx, 0, INT_MAX);
+ regulator_set_voltage(mss, 0, 1150000);
+}
+
+static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
+{
+ struct q6v5 *qproc = rproc->priv;
+
+ memcpy(qproc->mba_region, fw->data, fw->size);
+
+ return 0;
+}
+
+static const struct rproc_fw_ops q6v5_fw_ops = {
+ .find_rsc_table = qcom_mdt_find_rsc_table,
+ .load = q6v5_load,
+};
+
+static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms)
+{
+ unsigned long timeout;
+ s32 val;
+
+ timeout = jiffies + msecs_to_jiffies(ms);
+ for (;;) {
+ val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG);
+ if (val)
+ break;
+
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+
+ msleep(1);
+ }
+
+ return val;
+}
+
+static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms)
+{
+
+ unsigned long timeout;
+ s32 val;
+
+ timeout = jiffies + msecs_to_jiffies(ms);
+ for (;;) {
+ val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
+ if (val < 0)
+ break;
+
+ if (!status && val)
+ break;
+ else if (status && val == status)
+ break;
+
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+
+ msleep(1);
+ }
+
+ return val;
+}
+
+static int q6v5proc_reset(struct q6v5 *qproc)
+{
+ u32 val;
+ int ret;
+
+ /* Assert resets, stop core */
+ val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
+ val |= (Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE);
+ writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
+
+ /* Enable power block headswitch, and wait for it to stabilize */
+ val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val |= QDSS_BHS_ON | QDSS_LDO_BYP;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ udelay(1);
+
+ /*
+ * Turn on memories. L2 banks should be done individually
+ * to minimize inrush current.
+ */
+ val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
+ Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val |= Q6SS_L2DATA_SLP_NRET_N_2;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val |= Q6SS_L2DATA_SLP_NRET_N_1;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val |= Q6SS_L2DATA_SLP_NRET_N_0;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+
+ /* Remove IO clamp */
+ val &= ~Q6SS_CLAMP_IO;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+
+ /* Bring core out of reset */
+ val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
+ val &= ~Q6SS_CORE_ARES;
+ writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
+
+ /* Turn on core clock */
+ val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
+ val |= Q6SS_CLK_ENABLE;
+ writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
+
+ /* Start core execution */
+ val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
+ val &= ~Q6SS_STOP_CORE;
+ writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
+
+ /* Wait for PBL status */
+ ret = q6v5_rmb_pbl_wait(qproc, 1000);
+ if (ret == -ETIMEDOUT) {
+ dev_err(qproc->dev, "PBL boot timed out\n");
+ } else if (ret != RMB_PBL_SUCCESS) {
+ dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret);
+ ret = -EINVAL;
+ } else {
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
+ struct regmap *halt_map,
+ u32 offset)
+{
+ unsigned long timeout;
+ unsigned int val;
+ int ret;
+
+ /* Check if we're already idle */
+ ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
+ if (!ret && val)
+ return;
+
+ /* Assert halt request */
+ regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1);
+
+ /* Wait for halt */
+ timeout = jiffies + msecs_to_jiffies(HALT_ACK_TIMEOUT_MS);
+ for (;;) {
+ ret = regmap_read(halt_map, offset + AXI_HALTACK_REG, &val);
+ if (ret || val || time_after(jiffies, timeout))
+ break;
+
+ msleep(1);
+ }
+
+ ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
+ if (ret || !val)
+ dev_err(qproc->dev, "port failed halt\n");
+
+ /* Clear halt request (port will remain halted until reset) */
+ regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
+}
+
+static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
+{
+ unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
+ dma_addr_t phys;
+ void *ptr;
+ int ret;
+
+ ptr = dma_alloc_attrs(qproc->dev, fw->size, &phys, GFP_KERNEL, dma_attrs);
+ if (!ptr) {
+ dev_err(qproc->dev, "failed to allocate mdt buffer\n");
+ return -ENOMEM;
+ }
+
+ memcpy(ptr, fw->data, fw->size);
+
+ writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG);
+ writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
+
+ ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000);
+ if (ret == -ETIMEDOUT)
+ dev_err(qproc->dev, "MPSS header authentication timed out\n");
+ else if (ret < 0)
+ dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret);
+
+ dma_free_attrs(qproc->dev, fw->size, ptr, phys, dma_attrs);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int q6v5_mpss_validate(struct q6v5 *qproc, const struct firmware *fw)
+{
+ const struct elf32_phdr *phdrs;
+ const struct elf32_phdr *phdr;
+ struct elf32_hdr *ehdr;
+ phys_addr_t boot_addr;
+ phys_addr_t fw_addr;
+ bool relocate;
+ size_t size;
+ int ret;
+ int i;
+
+ ret = qcom_mdt_parse(fw, &fw_addr, NULL, &relocate);
+ if (ret) {
+ dev_err(qproc->dev, "failed to parse mdt header\n");
+ return ret;
+ }
+
+ if (relocate)
+ boot_addr = qproc->mpss_phys;
+ else
+ boot_addr = fw_addr;
+
+ ehdr = (struct elf32_hdr *)fw->data;
+ phdrs = (struct elf32_phdr *)(ehdr + 1);
+ for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
+ phdr = &phdrs[i];
+
+ if (phdr->p_type != PT_LOAD)
+ continue;
+
+ if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
+ continue;
+
+ if (!phdr->p_memsz)
+ continue;
+
+ size = readl(qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
+ if (!size) {
+ writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
+ writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
+ }
+
+ size += phdr->p_memsz;
+ writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
+ }
+
+ ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000);
+ if (ret == -ETIMEDOUT)
+ dev_err(qproc->dev, "MPSS authentication timed out\n");
+ else if (ret < 0)
+ dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int q6v5_mpss_load(struct q6v5 *qproc)
+{
+ const struct firmware *fw;
+ phys_addr_t fw_addr;
+ bool relocate;
+ int ret;
+
+ ret = reject_firmware(&fw, MPSS_FIRMWARE_NAME, qproc->dev);
+ if (ret < 0) {
+ dev_err(qproc->dev, "unable to load " MPSS_FIRMWARE_NAME "\n");
+ return ret;
+ }
+
+ ret = qcom_mdt_parse(fw, &fw_addr, NULL, &relocate);
+ if (ret) {
+ dev_err(qproc->dev, "failed to parse mdt header\n");
+ goto release_firmware;
+ }
+
+ if (relocate)
+ qproc->mpss_reloc = fw_addr;
+
+ /* Initialize the RMB validator */
+ writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
+
+ ret = q6v5_mpss_init_image(qproc, fw);
+ if (ret)
+ goto release_firmware;
+
+ ret = qcom_mdt_load(qproc->rproc, fw, MPSS_FIRMWARE_NAME);
+ if (ret)
+ goto release_firmware;
+
+ ret = q6v5_mpss_validate(qproc, fw);
+
+release_firmware:
+ release_firmware(fw);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int q6v5_start(struct rproc *rproc)
+{
+ struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
+ int ret;
+
+ ret = q6v5_regulator_enable(qproc);
+ if (ret) {
+ dev_err(qproc->dev, "failed to enable supplies\n");
+ return ret;
+ }
+
+ ret = reset_control_deassert(qproc->mss_restart);
+ if (ret) {
+ dev_err(qproc->dev, "failed to deassert mss restart\n");
+ goto disable_vdd;
+ }
+
+ ret = clk_prepare_enable(qproc->ahb_clk);
+ if (ret)
+ goto assert_reset;
+
+ ret = clk_prepare_enable(qproc->axi_clk);
+ if (ret)
+ goto disable_ahb_clk;
+
+ ret = clk_prepare_enable(qproc->rom_clk);
+ if (ret)
+ goto disable_axi_clk;
+
+ writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
+
+ ret = q6v5proc_reset(qproc);
+ if (ret)
+ goto halt_axi_ports;
+
+ ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
+ if (ret == -ETIMEDOUT) {
+ dev_err(qproc->dev, "MBA boot timed out\n");
+ goto halt_axi_ports;
+ } else if (ret != RMB_MBA_XPU_UNLOCKED &&
+ ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) {
+ dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret);
+ ret = -EINVAL;
+ goto halt_axi_ports;
+ }
+
+ dev_info(qproc->dev, "MBA booted, loading mpss\n");
+
+ ret = q6v5_mpss_load(qproc);
+ if (ret)
+ goto halt_axi_ports;
+
+ ret = wait_for_completion_timeout(&qproc->start_done,
+ msecs_to_jiffies(5000));
+ if (ret == 0) {
+ dev_err(qproc->dev, "start timed out\n");
+ ret = -ETIMEDOUT;
+ goto halt_axi_ports;
+ }
+
+ qproc->running = true;
+
+ /* TODO: All done, release the handover resources */
+
+ return 0;
+
+halt_axi_ports:
+ q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
+ q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
+ q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
+
+ clk_disable_unprepare(qproc->rom_clk);
+disable_axi_clk:
+ clk_disable_unprepare(qproc->axi_clk);
+disable_ahb_clk:
+ clk_disable_unprepare(qproc->ahb_clk);
+assert_reset:
+ reset_control_assert(qproc->mss_restart);
+disable_vdd:
+ q6v5_regulator_disable(qproc);
+
+ return ret;
+}
+
+static int q6v5_stop(struct rproc *rproc)
+{
+ struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
+ int ret;
+
+ qproc->running = false;
+
+ qcom_smem_state_update_bits(qproc->state,
+ BIT(qproc->stop_bit), BIT(qproc->stop_bit));
+
+ ret = wait_for_completion_timeout(&qproc->stop_done,
+ msecs_to_jiffies(5000));
+ if (ret == 0)
+ dev_err(qproc->dev, "timed out on wait\n");
+
+ qcom_smem_state_update_bits(qproc->state, BIT(qproc->stop_bit), 0);
+
+ q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
+ q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
+ q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
+
+ reset_control_assert(qproc->mss_restart);
+ clk_disable_unprepare(qproc->rom_clk);
+ clk_disable_unprepare(qproc->axi_clk);
+ clk_disable_unprepare(qproc->ahb_clk);
+ q6v5_regulator_disable(qproc);
+
+ return 0;
+}
+
+static void *q6v5_da_to_va(struct rproc *rproc, u64 da, int len)
+{
+ struct q6v5 *qproc = rproc->priv;
+ int offset;
+
+ offset = da - qproc->mpss_reloc;
+ if (offset < 0 || offset + len > qproc->mpss_size)
+ return NULL;
+
+ return qproc->mpss_region + offset;
+}
+
+static const struct rproc_ops q6v5_ops = {
+ .start = q6v5_start,
+ .stop = q6v5_stop,
+ .da_to_va = q6v5_da_to_va,
+};
+
+static irqreturn_t q6v5_wdog_interrupt(int irq, void *dev)
+{
+ struct q6v5 *qproc = dev;
+ size_t len;
+ char *msg;
+
+ /* Sometimes the stop triggers a watchdog rather than a stop-ack */
+ if (!qproc->running) {
+ complete(&qproc->stop_done);
+ return IRQ_HANDLED;
+ }
+
+ msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, MPSS_CRASH_REASON_SMEM, &len);
+ if (!IS_ERR(msg) && len > 0 && msg[0])
+ dev_err(qproc->dev, "watchdog received: %s\n", msg);
+ else
+ dev_err(qproc->dev, "watchdog without message\n");
+
+ rproc_report_crash(qproc->rproc, RPROC_WATCHDOG);
+
+ if (!IS_ERR(msg))
+ msg[0] = '\0';
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t q6v5_fatal_interrupt(int irq, void *dev)
+{
+ struct q6v5 *qproc = dev;
+ size_t len;
+ char *msg;
+
+ msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, MPSS_CRASH_REASON_SMEM, &len);
+ if (!IS_ERR(msg) && len > 0 && msg[0])
+ dev_err(qproc->dev, "fatal error received: %s\n", msg);
+ else
+ dev_err(qproc->dev, "fatal error without message\n");
+
+ rproc_report_crash(qproc->rproc, RPROC_FATAL_ERROR);
+
+ if (!IS_ERR(msg))
+ msg[0] = '\0';
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t q6v5_handover_interrupt(int irq, void *dev)
+{
+ struct q6v5 *qproc = dev;
+
+ complete(&qproc->start_done);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t q6v5_stop_ack_interrupt(int irq, void *dev)
+{
+ struct q6v5 *qproc = dev;
+
+ complete(&qproc->stop_done);
+ return IRQ_HANDLED;
+}
+
+static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
+{
+ struct of_phandle_args args;
+ struct resource *res;
+ int ret;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6");
+ qproc->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(qproc->reg_base))
+ return PTR_ERR(qproc->reg_base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb");
+ qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(qproc->rmb_base))
+ return PTR_ERR(qproc->rmb_base);
+
+ ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
+ "qcom,halt-regs", 3, 0, &args);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
+ return -EINVAL;
+ }
+
+ qproc->halt_map = syscon_node_to_regmap(args.np);
+ of_node_put(args.np);
+ if (IS_ERR(qproc->halt_map))
+ return PTR_ERR(qproc->halt_map);
+
+ qproc->halt_q6 = args.args[0];
+ qproc->halt_modem = args.args[1];
+ qproc->halt_nc = args.args[2];
+
+ return 0;
+}
+
+static int q6v5_init_clocks(struct q6v5 *qproc)
+{
+ qproc->ahb_clk = devm_clk_get(qproc->dev, "iface");
+ if (IS_ERR(qproc->ahb_clk)) {
+ dev_err(qproc->dev, "failed to get iface clock\n");
+ return PTR_ERR(qproc->ahb_clk);
+ }
+
+ qproc->axi_clk = devm_clk_get(qproc->dev, "bus");
+ if (IS_ERR(qproc->axi_clk)) {
+ dev_err(qproc->dev, "failed to get bus clock\n");
+ return PTR_ERR(qproc->axi_clk);
+ }
+
+ qproc->rom_clk = devm_clk_get(qproc->dev, "mem");
+ if (IS_ERR(qproc->rom_clk)) {
+ dev_err(qproc->dev, "failed to get mem clock\n");
+ return PTR_ERR(qproc->rom_clk);
+ }
+
+ return 0;
+}
+
+static int q6v5_init_reset(struct q6v5 *qproc)
+{
+ qproc->mss_restart = devm_reset_control_get(qproc->dev, NULL);
+ if (IS_ERR(qproc->mss_restart)) {
+ dev_err(qproc->dev, "failed to acquire mss restart\n");
+ return PTR_ERR(qproc->mss_restart);
+ }
+
+ return 0;
+}
+
+static int q6v5_request_irq(struct q6v5 *qproc,
+ struct platform_device *pdev,
+ const char *name,
+ irq_handler_t thread_fn)
+{
+ int ret;
+
+ ret = platform_get_irq_byname(pdev, name);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no %s IRQ defined\n", name);
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, ret,
+ NULL, thread_fn,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "q6v5", qproc);
+ if (ret)
+ dev_err(&pdev->dev, "request %s IRQ failed\n", name);
+
+ return ret;
+}
+
+static int q6v5_alloc_memory_region(struct q6v5 *qproc)
+{
+ struct device_node *child;
+ struct device_node *node;
+ struct resource r;
+ int ret;
+
+ child = of_get_child_by_name(qproc->dev->of_node, "mba");
+ node = of_parse_phandle(child, "memory-region", 0);
+ ret = of_address_to_resource(node, 0, &r);
+ if (ret) {
+ dev_err(qproc->dev, "unable to resolve mba region\n");
+ return ret;
+ }
+
+ qproc->mba_phys = r.start;
+ qproc->mba_size = resource_size(&r);
+ qproc->mba_region = devm_ioremap_wc(qproc->dev, qproc->mba_phys, qproc->mba_size);
+ if (!qproc->mba_region) {
+ dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
+ &r.start, qproc->mba_size);
+ return -EBUSY;
+ }
+
+ child = of_get_child_by_name(qproc->dev->of_node, "mpss");
+ node = of_parse_phandle(child, "memory-region", 0);
+ ret = of_address_to_resource(node, 0, &r);
+ if (ret) {
+ dev_err(qproc->dev, "unable to resolve mpss region\n");
+ return ret;
+ }
+
+ qproc->mpss_phys = qproc->mpss_reloc = r.start;
+ qproc->mpss_size = resource_size(&r);
+ qproc->mpss_region = devm_ioremap_wc(qproc->dev, qproc->mpss_phys, qproc->mpss_size);
+ if (!qproc->mpss_region) {
+ dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
+ &r.start, qproc->mpss_size);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int q6v5_probe(struct platform_device *pdev)
+{
+ struct q6v5 *qproc;
+ struct rproc *rproc;
+ int ret;
+
+ rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
+ MBA_FIRMWARE_NAME, sizeof(*qproc));
+ if (!rproc) {
+ dev_err(&pdev->dev, "failed to allocate rproc\n");
+ return -ENOMEM;
+ }
+
+ rproc->fw_ops = &q6v5_fw_ops;
+
+ qproc = (struct q6v5 *)rproc->priv;
+ qproc->dev = &pdev->dev;
+ qproc->rproc = rproc;
+ platform_set_drvdata(pdev, qproc);
+
+ init_completion(&qproc->start_done);
+ init_completion(&qproc->stop_done);
+
+ ret = q6v5_init_mem(qproc, pdev);
+ if (ret)
+ goto free_rproc;
+
+ ret = q6v5_alloc_memory_region(qproc);
+ if (ret)
+ goto free_rproc;
+
+ ret = q6v5_init_clocks(qproc);
+ if (ret)
+ goto free_rproc;
+
+ ret = q6v5_regulator_init(qproc);
+ if (ret)
+ goto free_rproc;
+
+ ret = q6v5_init_reset(qproc);
+ if (ret)
+ goto free_rproc;
+
+ ret = q6v5_request_irq(qproc, pdev, "wdog", q6v5_wdog_interrupt);
+ if (ret < 0)
+ goto free_rproc;
+
+ ret = q6v5_request_irq(qproc, pdev, "fatal", q6v5_fatal_interrupt);
+ if (ret < 0)
+ goto free_rproc;
+
+ ret = q6v5_request_irq(qproc, pdev, "handover", q6v5_handover_interrupt);
+ if (ret < 0)
+ goto free_rproc;
+
+ ret = q6v5_request_irq(qproc, pdev, "stop-ack", q6v5_stop_ack_interrupt);
+ if (ret < 0)
+ goto free_rproc;
+
+ qproc->state = qcom_smem_state_get(&pdev->dev, "stop", &qproc->stop_bit);
+ if (IS_ERR(qproc->state))
+ goto free_rproc;
+
+ ret = rproc_add(rproc);
+ if (ret)
+ goto free_rproc;
+
+ return 0;
+
+free_rproc:
+ rproc_put(rproc);
+
+ return ret;
+}
+
+static int q6v5_remove(struct platform_device *pdev)
+{
+ struct q6v5 *qproc = platform_get_drvdata(pdev);
+
+ rproc_del(qproc->rproc);
+ rproc_put(qproc->rproc);
+
+ return 0;
+}
+
+static const struct of_device_id q6v5_of_match[] = {
+ { .compatible = "qcom,q6v5-pil", },
+ { },
+};
+
+static struct platform_driver q6v5_driver = {
+ .probe = q6v5_probe,
+ .remove = q6v5_remove,
+ .driver = {
+ .name = "qcom-q6v5-pil",
+ .of_match_table = q6v5_of_match,
+ },
+};
+module_platform_driver(q6v5_driver);
+
+MODULE_DESCRIPTION("Peripheral Image Loader for Hexagon");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 0b2733db0..4be1b8c21 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -12,8 +12,22 @@ menuconfig RESET_CONTROLLER
If unsure, say no.
+if RESET_CONTROLLER
+
config RESET_OXNAS
bool
+config TI_SYSCON_RESET
+ tristate "TI SYSCON Reset Driver"
+ depends on HAS_IOMEM
+ select MFD_SYSCON
+ help
+ This enables the reset driver support for TI devices with
+ memory-mapped reset registers as part of a syscon device node. If
+ you wish to use the reset framework for such memory-mapped devices,
+ say Y here. Otherwise, say N.
+
source "drivers/reset/sti/Kconfig"
source "drivers/reset/hisilicon/Kconfig"
+
+endif
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index f173fc384..5d65a93d3 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -3,9 +3,11 @@ obj-$(CONFIG_ARCH_LPC18XX) += reset-lpc18xx.o
obj-$(CONFIG_ARCH_SOCFPGA) += reset-socfpga.o
obj-$(CONFIG_ARCH_BERLIN) += reset-berlin.o
obj-$(CONFIG_MACH_PISTACHIO) += reset-pistachio.o
+obj-$(CONFIG_ARCH_MESON) += reset-meson.o
obj-$(CONFIG_ARCH_SUNXI) += reset-sunxi.o
obj-$(CONFIG_ARCH_STI) += sti/
obj-$(CONFIG_ARCH_HISI) += hisilicon/
obj-$(CONFIG_ARCH_ZYNQ) += reset-zynq.o
obj-$(CONFIG_ATH79) += reset-ath79.o
obj-$(CONFIG_RESET_OXNAS) += reset-oxnas.o
+obj-$(CONFIG_TI_SYSCON_RESET) += reset-ti-syscon.o
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index 72b32bd15..395dc9ce4 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -93,6 +93,43 @@ void reset_controller_unregister(struct reset_controller_dev *rcdev)
}
EXPORT_SYMBOL_GPL(reset_controller_unregister);
+static void devm_reset_controller_release(struct device *dev, void *res)
+{
+ reset_controller_unregister(*(struct reset_controller_dev **)res);
+}
+
+/**
+ * devm_reset_controller_register - resource managed reset_controller_register()
+ * @dev: device that is registering this reset controller
+ * @rcdev: a pointer to the initialized reset controller device
+ *
+ * Managed reset_controller_register(). For reset controllers registered by
+ * this function, reset_controller_unregister() is automatically called on
+ * driver detach. See reset_controller_register() for more information.
+ */
+int devm_reset_controller_register(struct device *dev,
+ struct reset_controller_dev *rcdev)
+{
+ struct reset_controller_dev **rcdevp;
+ int ret;
+
+ rcdevp = devres_alloc(devm_reset_controller_release, sizeof(*rcdevp),
+ GFP_KERNEL);
+ if (!rcdevp)
+ return -ENOMEM;
+
+ ret = reset_controller_register(rcdev);
+ if (!ret) {
+ *rcdevp = rcdev;
+ devres_add(dev, rcdevp);
+ } else {
+ devres_free(rcdevp);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_reset_controller_register);
+
/**
* reset_control_reset - reset the controlled device
* @rstc: reset controller
diff --git a/drivers/reset/hisilicon/hi6220_reset.c b/drivers/reset/hisilicon/hi6220_reset.c
index 8f55fd4a2..35ce53eda 100644
--- a/drivers/reset/hisilicon/hi6220_reset.c
+++ b/drivers/reset/hisilicon/hi6220_reset.c
@@ -1,7 +1,8 @@
/*
* Hisilicon Hi6220 reset controller driver
*
- * Copyright (c) 2015 Hisilicon Limited.
+ * Copyright (c) 2016 Linaro Limited.
+ * Copyright (c) 2015-2016 Hisilicon Limited.
*
* Author: Feng Chen <puck.chen@hisilicon.com>
*
@@ -15,81 +16,130 @@
#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
#include <linux/reset-controller.h>
#include <linux/reset.h>
#include <linux/platform_device.h>
-#define ASSERT_OFFSET 0x300
-#define DEASSERT_OFFSET 0x304
-#define MAX_INDEX 0x509
+#define PERIPH_ASSERT_OFFSET 0x300
+#define PERIPH_DEASSERT_OFFSET 0x304
+#define PERIPH_MAX_INDEX 0x509
+
+#define SC_MEDIA_RSTEN 0x052C
+#define SC_MEDIA_RSTDIS 0x0530
+#define MEDIA_MAX_INDEX 8
#define to_reset_data(x) container_of(x, struct hi6220_reset_data, rc_dev)
+enum hi6220_reset_ctrl_type {
+ PERIPHERAL,
+ MEDIA,
+};
+
struct hi6220_reset_data {
- void __iomem *assert_base;
- void __iomem *deassert_base;
- struct reset_controller_dev rc_dev;
+ struct reset_controller_dev rc_dev;
+ struct regmap *regmap;
};
-static int hi6220_reset_assert(struct reset_controller_dev *rc_dev,
- unsigned long idx)
+static int hi6220_peripheral_assert(struct reset_controller_dev *rc_dev,
+ unsigned long idx)
{
struct hi6220_reset_data *data = to_reset_data(rc_dev);
+ struct regmap *regmap = data->regmap;
+ u32 bank = idx >> 8;
+ u32 offset = idx & 0xff;
+ u32 reg = PERIPH_ASSERT_OFFSET + bank * 0x10;
- int bank = idx >> 8;
- int offset = idx & 0xff;
+ return regmap_write(regmap, reg, BIT(offset));
+}
- writel(BIT(offset), data->assert_base + (bank * 0x10));
+static int hi6220_peripheral_deassert(struct reset_controller_dev *rc_dev,
+ unsigned long idx)
+{
+ struct hi6220_reset_data *data = to_reset_data(rc_dev);
+ struct regmap *regmap = data->regmap;
+ u32 bank = idx >> 8;
+ u32 offset = idx & 0xff;
+ u32 reg = PERIPH_DEASSERT_OFFSET + bank * 0x10;
- return 0;
+ return regmap_write(regmap, reg, BIT(offset));
}
-static int hi6220_reset_deassert(struct reset_controller_dev *rc_dev,
- unsigned long idx)
+static const struct reset_control_ops hi6220_peripheral_reset_ops = {
+ .assert = hi6220_peripheral_assert,
+ .deassert = hi6220_peripheral_deassert,
+};
+
+static int hi6220_media_assert(struct reset_controller_dev *rc_dev,
+ unsigned long idx)
{
struct hi6220_reset_data *data = to_reset_data(rc_dev);
+ struct regmap *regmap = data->regmap;
- int bank = idx >> 8;
- int offset = idx & 0xff;
+ return regmap_write(regmap, SC_MEDIA_RSTEN, BIT(idx));
+}
- writel(BIT(offset), data->deassert_base + (bank * 0x10));
+static int hi6220_media_deassert(struct reset_controller_dev *rc_dev,
+ unsigned long idx)
+{
+ struct hi6220_reset_data *data = to_reset_data(rc_dev);
+ struct regmap *regmap = data->regmap;
- return 0;
+ return regmap_write(regmap, SC_MEDIA_RSTDIS, BIT(idx));
}
-static const struct reset_control_ops hi6220_reset_ops = {
- .assert = hi6220_reset_assert,
- .deassert = hi6220_reset_deassert,
+static const struct reset_control_ops hi6220_media_reset_ops = {
+ .assert = hi6220_media_assert,
+ .deassert = hi6220_media_deassert,
};
static int hi6220_reset_probe(struct platform_device *pdev)
{
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ enum hi6220_reset_ctrl_type type;
struct hi6220_reset_data *data;
- struct resource *res;
- void __iomem *src_base;
+ struct regmap *regmap;
- data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- src_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(src_base))
- return PTR_ERR(src_base);
+ type = (enum hi6220_reset_ctrl_type)of_device_get_match_data(dev);
+
+ regmap = syscon_node_to_regmap(np);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "failed to get reset controller regmap\n");
+ return PTR_ERR(regmap);
+ }
- data->assert_base = src_base + ASSERT_OFFSET;
- data->deassert_base = src_base + DEASSERT_OFFSET;
- data->rc_dev.nr_resets = MAX_INDEX;
- data->rc_dev.ops = &hi6220_reset_ops;
- data->rc_dev.of_node = pdev->dev.of_node;
+ data->regmap = regmap;
+ data->rc_dev.of_node = np;
+ if (type == MEDIA) {
+ data->rc_dev.ops = &hi6220_media_reset_ops;
+ data->rc_dev.nr_resets = MEDIA_MAX_INDEX;
+ } else {
+ data->rc_dev.ops = &hi6220_peripheral_reset_ops;
+ data->rc_dev.nr_resets = PERIPH_MAX_INDEX;
+ }
return reset_controller_register(&data->rc_dev);
}
static const struct of_device_id hi6220_reset_match[] = {
- { .compatible = "hisilicon,hi6220-sysctrl" },
- { },
+ {
+ .compatible = "hisilicon,hi6220-sysctrl",
+ .data = (void *)PERIPHERAL,
+ },
+ {
+ .compatible = "hisilicon,hi6220-mediactrl",
+ .data = (void *)MEDIA,
+ },
+ { /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, hi6220_reset_match);
static struct platform_driver hi6220_reset_driver = {
.probe = hi6220_reset_probe,
diff --git a/drivers/reset/reset-ath79.c b/drivers/reset/reset-ath79.c
index ccb940a8d..16d410cd6 100644
--- a/drivers/reset/reset-ath79.c
+++ b/drivers/reset/reset-ath79.c
@@ -112,7 +112,7 @@ static int ath79_reset_probe(struct platform_device *pdev)
ath79_reset->rcdev.of_reset_n_cells = 1;
ath79_reset->rcdev.nr_resets = 32;
- err = reset_controller_register(&ath79_reset->rcdev);
+ err = devm_reset_controller_register(&pdev->dev, &ath79_reset->rcdev);
if (err)
return err;
@@ -131,7 +131,6 @@ static int ath79_reset_remove(struct platform_device *pdev)
struct ath79_reset *ath79_reset = platform_get_drvdata(pdev);
unregister_restart_handler(&ath79_reset->restart_nb);
- reset_controller_unregister(&ath79_reset->rcdev);
return 0;
}
diff --git a/drivers/reset/reset-meson.c b/drivers/reset/reset-meson.c
new file mode 100644
index 000000000..c32f11a30
--- /dev/null
+++ b/drivers/reset/reset-meson.c
@@ -0,0 +1,136 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#define REG_COUNT 8
+#define BITS_PER_REG 32
+
+struct meson_reset {
+ void __iomem *reg_base;
+ struct reset_controller_dev rcdev;
+};
+
+static int meson_reset_reset(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct meson_reset *data =
+ container_of(rcdev, struct meson_reset, rcdev);
+ unsigned int bank = id / BITS_PER_REG;
+ unsigned int offset = id % BITS_PER_REG;
+ void __iomem *reg_addr = data->reg_base + (bank << 2);
+
+ if (bank >= REG_COUNT)
+ return -EINVAL;
+
+ writel(BIT(offset), reg_addr);
+
+ return 0;
+}
+
+static const struct reset_control_ops meson_reset_ops = {
+ .reset = meson_reset_reset,
+};
+
+static const struct of_device_id meson_reset_dt_ids[] = {
+ { .compatible = "amlogic,meson8b-reset", },
+ { .compatible = "amlogic,meson-gxbb-reset", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, meson_reset_dt_ids);
+
+static int meson_reset_probe(struct platform_device *pdev)
+{
+ struct meson_reset *data;
+ struct resource *res;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(data->reg_base))
+ return PTR_ERR(data->reg_base);
+
+ platform_set_drvdata(pdev, data);
+
+ data->rcdev.owner = THIS_MODULE;
+ data->rcdev.nr_resets = REG_COUNT * BITS_PER_REG;
+ data->rcdev.ops = &meson_reset_ops;
+ data->rcdev.of_node = pdev->dev.of_node;
+
+ return devm_reset_controller_register(&pdev->dev, &data->rcdev);
+}
+
+static struct platform_driver meson_reset_driver = {
+ .probe = meson_reset_probe,
+ .driver = {
+ .name = "meson_reset",
+ .of_match_table = meson_reset_dt_ids,
+ },
+};
+
+module_platform_driver(meson_reset_driver);
+
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_DESCRIPTION("Amlogic Meson Reset Controller driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/reset/reset-oxnas.c b/drivers/reset/reset-oxnas.c
index c60fb2dac..944980572 100644
--- a/drivers/reset/reset-oxnas.c
+++ b/drivers/reset/reset-oxnas.c
@@ -112,21 +112,11 @@ static int oxnas_reset_probe(struct platform_device *pdev)
data->rcdev.ops = &oxnas_reset_ops;
data->rcdev.of_node = pdev->dev.of_node;
- return reset_controller_register(&data->rcdev);
-}
-
-static int oxnas_reset_remove(struct platform_device *pdev)
-{
- struct oxnas_reset *data = platform_get_drvdata(pdev);
-
- reset_controller_unregister(&data->rcdev);
-
- return 0;
+ return devm_reset_controller_register(&pdev->dev, &data->rcdev);
}
static struct platform_driver oxnas_reset_driver = {
.probe = oxnas_reset_probe,
- .remove = oxnas_reset_remove,
.driver = {
.name = "oxnas-reset",
.of_match_table = oxnas_reset_dt_ids,
diff --git a/drivers/reset/reset-pistachio.c b/drivers/reset/reset-pistachio.c
index 72a97a15a..bbc4c06dd 100644
--- a/drivers/reset/reset-pistachio.c
+++ b/drivers/reset/reset-pistachio.c
@@ -121,16 +121,7 @@ static int pistachio_reset_probe(struct platform_device *pdev)
rd->rcdev.ops = &pistachio_reset_ops;
rd->rcdev.of_node = np;
- return reset_controller_register(&rd->rcdev);
-}
-
-static int pistachio_reset_remove(struct platform_device *pdev)
-{
- struct pistachio_reset_data *data = platform_get_drvdata(pdev);
-
- reset_controller_unregister(&data->rcdev);
-
- return 0;
+ return devm_reset_controller_register(dev, &rd->rcdev);
}
static const struct of_device_id pistachio_reset_dt_ids[] = {
@@ -141,7 +132,6 @@ MODULE_DEVICE_TABLE(of, pistachio_reset_dt_ids);
static struct platform_driver pistachio_reset_driver = {
.probe = pistachio_reset_probe,
- .remove = pistachio_reset_remove,
.driver = {
.name = "pistachio-reset",
.of_match_table = pistachio_reset_dt_ids,
diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c
index cd05a7032..12add9b0f 100644
--- a/drivers/reset/reset-socfpga.c
+++ b/drivers/reset/reset-socfpga.c
@@ -134,16 +134,7 @@ static int socfpga_reset_probe(struct platform_device *pdev)
data->rcdev.ops = &socfpga_reset_ops;
data->rcdev.of_node = pdev->dev.of_node;
- return reset_controller_register(&data->rcdev);
-}
-
-static int socfpga_reset_remove(struct platform_device *pdev)
-{
- struct socfpga_reset_data *data = platform_get_drvdata(pdev);
-
- reset_controller_unregister(&data->rcdev);
-
- return 0;
+ return devm_reset_controller_register(dev, &data->rcdev);
}
static const struct of_device_id socfpga_reset_dt_ids[] = {
@@ -153,7 +144,6 @@ static const struct of_device_id socfpga_reset_dt_ids[] = {
static struct platform_driver socfpga_reset_driver = {
.probe = socfpga_reset_probe,
- .remove = socfpga_reset_remove,
.driver = {
.name = "socfpga-reset",
.of_match_table = socfpga_reset_dt_ids,
diff --git a/drivers/reset/reset-sunxi.c b/drivers/reset/reset-sunxi.c
index 677f86555..3080190b3 100644
--- a/drivers/reset/reset-sunxi.c
+++ b/drivers/reset/reset-sunxi.c
@@ -165,21 +165,11 @@ static int sunxi_reset_probe(struct platform_device *pdev)
data->rcdev.ops = &sunxi_reset_ops;
data->rcdev.of_node = pdev->dev.of_node;
- return reset_controller_register(&data->rcdev);
-}
-
-static int sunxi_reset_remove(struct platform_device *pdev)
-{
- struct sunxi_reset_data *data = platform_get_drvdata(pdev);
-
- reset_controller_unregister(&data->rcdev);
-
- return 0;
+ return devm_reset_controller_register(&pdev->dev, &data->rcdev);
}
static struct platform_driver sunxi_reset_driver = {
.probe = sunxi_reset_probe,
- .remove = sunxi_reset_remove,
.driver = {
.name = "sunxi-reset",
.of_match_table = sunxi_reset_dt_ids,
diff --git a/drivers/reset/reset-ti-syscon.c b/drivers/reset/reset-ti-syscon.c
new file mode 100644
index 000000000..47f0ffd3b
--- /dev/null
+++ b/drivers/reset/reset-ti-syscon.c
@@ -0,0 +1,237 @@
+/*
+ * TI SYSCON regmap reset driver
+ *
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
+ * Andrew F. Davis <afd@ti.com>
+ * Suman Anna <afd@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/reset/ti-syscon.h>
+
+/**
+ * struct ti_syscon_reset_control - reset control structure
+ * @assert_offset: reset assert control register offset from syscon base
+ * @assert_bit: reset assert bit in the reset assert control register
+ * @deassert_offset: reset deassert control register offset from syscon base
+ * @deassert_bit: reset deassert bit in the reset deassert control register
+ * @status_offset: reset status register offset from syscon base
+ * @status_bit: reset status bit in the reset status register
+ * @flags: reset flag indicating how the (de)assert and status are handled
+ */
+struct ti_syscon_reset_control {
+ unsigned int assert_offset;
+ unsigned int assert_bit;
+ unsigned int deassert_offset;
+ unsigned int deassert_bit;
+ unsigned int status_offset;
+ unsigned int status_bit;
+ u32 flags;
+};
+
+/**
+ * struct ti_syscon_reset_data - reset controller information structure
+ * @rcdev: reset controller entity
+ * @regmap: regmap handle containing the memory-mapped reset registers
+ * @controls: array of reset controls
+ * @nr_controls: number of controls in control array
+ */
+struct ti_syscon_reset_data {
+ struct reset_controller_dev rcdev;
+ struct regmap *regmap;
+ struct ti_syscon_reset_control *controls;
+ unsigned int nr_controls;
+};
+
+#define to_ti_syscon_reset_data(rcdev) \
+ container_of(rcdev, struct ti_syscon_reset_data, rcdev)
+
+/**
+ * ti_syscon_reset_assert() - assert device reset
+ * @rcdev: reset controller entity
+ * @id: ID of the reset to be asserted
+ *
+ * This function implements the reset driver op to assert a device's reset.
+ * This asserts the reset in a manner prescribed by the reset flags.
+ *
+ * Return: 0 for successful request, else a corresponding error value
+ */
+static int ti_syscon_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct ti_syscon_reset_data *data = to_ti_syscon_reset_data(rcdev);
+ struct ti_syscon_reset_control *control;
+ unsigned int mask, value;
+
+ if (id >= data->nr_controls)
+ return -EINVAL;
+
+ control = &data->controls[id];
+
+ if (control->flags & ASSERT_NONE)
+ return -ENOTSUPP; /* assert not supported for this reset */
+
+ mask = BIT(control->assert_bit);
+ value = (control->flags & ASSERT_SET) ? mask : 0x0;
+
+ return regmap_update_bits(data->regmap, control->assert_offset, mask, value);
+}
+
+/**
+ * ti_syscon_reset_deassert() - deassert device reset
+ * @rcdev: reset controller entity
+ * @id: ID of reset to be deasserted
+ *
+ * This function implements the reset driver op to deassert a device's reset.
+ * This deasserts the reset in a manner prescribed by the reset flags.
+ *
+ * Return: 0 for successful request, else a corresponding error value
+ */
+static int ti_syscon_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct ti_syscon_reset_data *data = to_ti_syscon_reset_data(rcdev);
+ struct ti_syscon_reset_control *control;
+ unsigned int mask, value;
+
+ if (id >= data->nr_controls)
+ return -EINVAL;
+
+ control = &data->controls[id];
+
+ if (control->flags & DEASSERT_NONE)
+ return -ENOTSUPP; /* deassert not supported for this reset */
+
+ mask = BIT(control->deassert_bit);
+ value = (control->flags & DEASSERT_SET) ? mask : 0x0;
+
+ return regmap_update_bits(data->regmap, control->deassert_offset, mask, value);
+}
+
+/**
+ * ti_syscon_reset_status() - check device reset status
+ * @rcdev: reset controller entity
+ * @id: ID of the reset for which the status is being requested
+ *
+ * This function implements the reset driver op to return the status of a
+ * device's reset.
+ *
+ * Return: 0 if reset is deasserted, true if reset is asserted, else a
+ * corresponding error value
+ */
+static int ti_syscon_reset_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct ti_syscon_reset_data *data = to_ti_syscon_reset_data(rcdev);
+ struct ti_syscon_reset_control *control;
+ unsigned int reset_state;
+ int ret;
+
+ if (id >= data->nr_controls)
+ return -EINVAL;
+
+ control = &data->controls[id];
+
+ if (control->flags & STATUS_NONE)
+ return -ENOTSUPP; /* status not supported for this reset */
+
+ ret = regmap_read(data->regmap, control->status_offset, &reset_state);
+ if (ret)
+ return ret;
+
+ return (reset_state & BIT(control->status_bit)) &&
+ (control->flags & STATUS_SET);
+}
+
+static struct reset_control_ops ti_syscon_reset_ops = {
+ .assert = ti_syscon_reset_assert,
+ .deassert = ti_syscon_reset_deassert,
+ .status = ti_syscon_reset_status,
+};
+
+static int ti_syscon_reset_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct ti_syscon_reset_data *data;
+ struct regmap *regmap;
+ const __be32 *list;
+ struct ti_syscon_reset_control *controls;
+ int size, nr_controls, i;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ regmap = syscon_node_to_regmap(np->parent);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ list = of_get_property(np, "ti,reset-bits", &size);
+ if (!list || (size / sizeof(*list)) % 7 != 0) {
+ dev_err(dev, "invalid DT reset description\n");
+ return -EINVAL;
+ }
+
+ nr_controls = (size / sizeof(*list)) / 7;
+ controls = devm_kzalloc(dev, nr_controls * sizeof(*controls), GFP_KERNEL);
+ if (!controls)
+ return -ENOMEM;
+
+ for (i = 0; i < nr_controls; i++) {
+ controls[i].assert_offset = be32_to_cpup(list++);
+ controls[i].assert_bit = be32_to_cpup(list++);
+ controls[i].deassert_offset = be32_to_cpup(list++);
+ controls[i].deassert_bit = be32_to_cpup(list++);
+ controls[i].status_offset = be32_to_cpup(list++);
+ controls[i].status_bit = be32_to_cpup(list++);
+ controls[i].flags = be32_to_cpup(list++);
+ }
+
+ data->rcdev.ops = &ti_syscon_reset_ops;
+ data->rcdev.owner = THIS_MODULE;
+ data->rcdev.of_node = np;
+ data->rcdev.nr_resets = nr_controls;
+ data->regmap = regmap;
+ data->controls = controls;
+ data->nr_controls = nr_controls;
+
+ platform_set_drvdata(pdev, data);
+
+ return devm_reset_controller_register(dev, &data->rcdev);
+}
+
+static const struct of_device_id ti_syscon_reset_of_match[] = {
+ { .compatible = "ti,syscon-reset", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, ti_syscon_reset_of_match);
+
+static struct platform_driver ti_syscon_reset_driver = {
+ .probe = ti_syscon_reset_probe,
+ .driver = {
+ .name = "ti-syscon-reset",
+ .of_match_table = ti_syscon_reset_of_match,
+ },
+};
+module_platform_driver(ti_syscon_reset_driver);
+
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_AUTHOR("Suman Anna <s-anna@ti.com>");
+MODULE_DESCRIPTION("TI SYSCON Regmap Reset Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/reset/reset-zynq.c b/drivers/reset/reset-zynq.c
index a7e87bc45..138f2f205 100644
--- a/drivers/reset/reset-zynq.c
+++ b/drivers/reset/reset-zynq.c
@@ -122,16 +122,7 @@ static int zynq_reset_probe(struct platform_device *pdev)
priv->rcdev.ops = &zynq_reset_ops;
priv->rcdev.of_node = pdev->dev.of_node;
- return reset_controller_register(&priv->rcdev);
-}
-
-static int zynq_reset_remove(struct platform_device *pdev)
-{
- struct zynq_reset_data *priv = platform_get_drvdata(pdev);
-
- reset_controller_unregister(&priv->rcdev);
-
- return 0;
+ return devm_reset_controller_register(&pdev->dev, &priv->rcdev);
}
static const struct of_device_id zynq_reset_dt_ids[] = {
@@ -141,7 +132,6 @@ static const struct of_device_id zynq_reset_dt_ids[] = {
static struct platform_driver zynq_reset_driver = {
.probe = zynq_reset_probe,
- .remove = zynq_reset_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = zynq_reset_dt_ids,
diff --git a/drivers/reset/sti/Kconfig b/drivers/reset/sti/Kconfig
index f8c15a37f..613178553 100644
--- a/drivers/reset/sti/Kconfig
+++ b/drivers/reset/sti/Kconfig
@@ -2,7 +2,6 @@ if ARCH_STI
config STI_RESET_SYSCFG
bool
- select RESET_CONTROLLER
config STIH415_RESET
bool
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 18639e0cb..e215f5079 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -5,6 +5,10 @@
config RTC_LIB
bool
+config RTC_MC146818_LIB
+ bool
+ select RTC_LIB
+
menuconfig RTC_CLASS
bool "Real Time Clock"
default n
@@ -574,10 +578,10 @@ config RTC_DRV_EM3027
will be called rtc-em3027.
config RTC_DRV_RV8803
- tristate "Micro Crystal RV8803"
+ tristate "Micro Crystal RV8803, Epson RX8900"
help
- If you say yes here you get support for the Micro Crystal
- RV8803 RTC chips.
+ If you say yes here you get support for the Micro Crystal RV8803 and
+ Epson RX8900 RTC chips.
This driver can also be built as a module. If so, the module
will be called rtc-rv8803.
@@ -670,6 +674,18 @@ config RTC_DRV_DS1390
This driver can also be built as a module. If so, the module
will be called rtc-ds1390.
+config RTC_DRV_MAX6916
+ tristate "Maxim MAX6916"
+ help
+ If you say yes here you will get support for the
+ Maxim MAX6916 SPI RTC chip.
+
+ This driver only supports the RTC feature, and not other chip
+ features such as alarms.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-max6916.
+
config RTC_DRV_R9701
tristate "Epson RTC-9701JE"
help
@@ -795,8 +811,9 @@ comment "Platform RTC drivers"
config RTC_DRV_CMOS
tristate "PC-style 'CMOS'"
- depends on X86 || ARM || M32R || PPC || MIPS || SPARC64
+ depends on X86 || ARM || M32R || PPC || MIPS || SPARC64 || MN10300
default y if X86
+ select RTC_MC146818_LIB
help
Say "yes" here to get direct support for the real time clock
found in every PC or ACPI-based system, and some other boards.
@@ -815,6 +832,7 @@ config RTC_DRV_CMOS
config RTC_DRV_ALPHA
bool "Alpha PC-style CMOS"
depends on ALPHA
+ select RTC_MC146818_LIB
default y
help
Direct support for the real-time clock found on every Alpha
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index ea2833723..7cf7ad559 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_RTC_LIB) += rtc-lib.o
obj-$(CONFIG_RTC_HCTOSYS) += hctosys.o
obj-$(CONFIG_RTC_SYSTOHC) += systohc.o
obj-$(CONFIG_RTC_CLASS) += rtc-core.o
+obj-$(CONFIG_RTC_MC146818_LIB) += rtc-mc146818-lib.o
rtc-core-y := class.o interface.o
ifdef CONFIG_RTC_DRV_EFI
@@ -85,6 +86,7 @@ obj-$(CONFIG_RTC_DRV_M48T59) += rtc-m48t59.o
obj-$(CONFIG_RTC_DRV_M48T86) += rtc-m48t86.o
obj-$(CONFIG_RTC_DRV_MAX6900) += rtc-max6900.o
obj-$(CONFIG_RTC_DRV_MAX6902) += rtc-max6902.o
+obj-$(CONFIG_RTC_DRV_MAX6916) += rtc-max6916.o
obj-$(CONFIG_RTC_DRV_MAX77686) += rtc-max77686.o
obj-$(CONFIG_RTC_DRV_MAX8907) += rtc-max8907.o
obj-$(CONFIG_RTC_DRV_MAX8925) += rtc-max8925.o
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 9ef5f6f89..84a52db9b 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -104,7 +104,17 @@ static int rtc_read_alarm_internal(struct rtc_device *rtc, struct rtc_wkalrm *al
else if (!rtc->ops->read_alarm)
err = -EINVAL;
else {
- memset(alarm, 0, sizeof(struct rtc_wkalrm));
+ alarm->enabled = 0;
+ alarm->pending = 0;
+ alarm->time.tm_sec = -1;
+ alarm->time.tm_min = -1;
+ alarm->time.tm_hour = -1;
+ alarm->time.tm_mday = -1;
+ alarm->time.tm_mon = -1;
+ alarm->time.tm_year = -1;
+ alarm->time.tm_wday = -1;
+ alarm->time.tm_yday = -1;
+ alarm->time.tm_isdst = -1;
err = rtc->ops->read_alarm(rtc->dev.parent, alarm);
}
@@ -383,7 +393,7 @@ int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time);
rtc->aie_timer.period = ktime_set(0, 0);
- /* Alarm has to be enabled & in the futrure for us to enqueue it */
+ /* Alarm has to be enabled & in the future for us to enqueue it */
if (alarm->enabled && (rtc_tm_to_ktime(now).tv64 <
rtc->aie_timer.node.expires.tv64)) {
@@ -395,8 +405,6 @@ int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
}
EXPORT_SYMBOL_GPL(rtc_initialize_alarm);
-
-
int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled)
{
int err = mutex_lock_interruptible(&rtc->ops_lock);
@@ -748,9 +756,23 @@ EXPORT_SYMBOL_GPL(rtc_irq_set_freq);
*/
static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
{
+ struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue);
+ struct rtc_time tm;
+ ktime_t now;
+
timer->enabled = 1;
+ __rtc_read_time(rtc, &tm);
+ now = rtc_tm_to_ktime(tm);
+
+ /* Skip over expired timers */
+ while (next) {
+ if (next->expires.tv64 >= now.tv64)
+ break;
+ next = timerqueue_iterate_next(next);
+ }
+
timerqueue_add(&rtc->timerqueue, &timer->node);
- if (&timer->node == timerqueue_getnext(&rtc->timerqueue)) {
+ if (!next) {
struct rtc_wkalrm alarm;
int err;
alarm.time = rtc_ktime_to_tm(timer->node.expires);
diff --git a/drivers/rtc/rtc-abx80x.c b/drivers/rtc/rtc-abx80x.c
index ba0d61934..fea9a60b0 100644
--- a/drivers/rtc/rtc-abx80x.c
+++ b/drivers/rtc/rtc-abx80x.c
@@ -643,17 +643,15 @@ static int abx80x_probe(struct i2c_client *client,
return err;
}
- err = devm_add_action(&client->dev, rtc_calib_remove_sysfs_group,
- &client->dev);
- if (err) {
- rtc_calib_remove_sysfs_group(&client->dev);
+ err = devm_add_action_or_reset(&client->dev,
+ rtc_calib_remove_sysfs_group,
+ &client->dev);
+ if (err)
dev_err(&client->dev,
"Failed to add sysfs cleanup action: %d\n",
err);
- return err;
- }
- return 0;
+ return err;
}
static int abx80x_remove(struct i2c_client *client)
diff --git a/drivers/rtc/rtc-asm9260.c b/drivers/rtc/rtc-asm9260.c
index 355fdb97a..5219916ce 100644
--- a/drivers/rtc/rtc-asm9260.c
+++ b/drivers/rtc/rtc-asm9260.c
@@ -343,7 +343,6 @@ static struct platform_driver asm9260_rtc_driver = {
.remove = asm9260_rtc_remove,
.driver = {
.name = "asm9260-rtc",
- .owner = THIS_MODULE,
.of_match_table = asm9260_dt_ids,
},
};
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index 99732e6f8..7418a763c 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -375,6 +375,7 @@ static int at91_rtc_probe(struct platform_device *pdev)
if (!rtc)
return -ENOMEM;
+ spin_lock_init(&rtc->lock);
rtc->irq = irq;
/* platform setup code should have handled this; sigh */
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index fbe9c7243..43745cac0 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -43,7 +43,7 @@
#include <linux/of_platform.h>
/* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */
-#include <asm-generic/rtc.h>
+#include <linux/mc146818rtc.h>
struct cmos_rtc {
struct rtc_device *rtc;
@@ -190,10 +190,10 @@ static inline void cmos_write_bank2(unsigned char val, unsigned char addr)
static int cmos_read_time(struct device *dev, struct rtc_time *t)
{
/* REVISIT: if the clock has a "century" register, use
- * that instead of the heuristic in get_rtc_time().
+ * that instead of the heuristic in mc146818_get_time().
* That'll make Y3K compatility (year > 2070) easy!
*/
- get_rtc_time(t);
+ mc146818_get_time(t);
return 0;
}
@@ -205,7 +205,7 @@ static int cmos_set_time(struct device *dev, struct rtc_time *t)
* takes effect exactly 500ms after we write the register.
* (Also queueing and other delays before we get this far.)
*/
- return set_rtc_time(t);
+ return mc146818_set_time(t);
}
static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t)
@@ -220,8 +220,6 @@ static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t)
* Some also support day and month, for alarms up to a year in
* the future.
*/
- t->time.tm_mday = -1;
- t->time.tm_mon = -1;
spin_lock_irq(&rtc_lock);
t->time.tm_sec = CMOS_READ(RTC_SECONDS_ALARM);
@@ -272,7 +270,6 @@ static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t)
}
}
}
- t->time.tm_year = -1;
t->enabled = !!(rtc_control & RTC_AIE);
t->pending = 0;
@@ -630,7 +627,7 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
address_space = 64;
#elif defined(__i386__) || defined(__x86_64__) || defined(__arm__) \
|| defined(__sparc__) || defined(__mips__) \
- || defined(__powerpc__)
+ || defined(__powerpc__) || defined(CONFIG_MN10300)
address_space = 128;
#else
#warning Assuming 128 bytes of RTC+NVRAM address space, not 64 bytes.
@@ -1142,14 +1139,14 @@ static __init void cmos_of_init(struct platform_device *pdev)
if (val)
CMOS_WRITE(be32_to_cpup(val), RTC_FREQ_SELECT);
- get_rtc_time(&time);
+ cmos_read_time(&pdev->dev, &time);
ret = rtc_valid_tm(&time);
if (ret) {
struct rtc_time def_time = {
.tm_year = 1,
.tm_mday = 1,
};
- set_rtc_time(&def_time);
+ cmos_set_time(&pdev->dev, &def_time);
}
}
#else
diff --git a/drivers/rtc/rtc-da9052.c b/drivers/rtc/rtc-da9052.c
index a20bcf0e3..427337756 100644
--- a/drivers/rtc/rtc-da9052.c
+++ b/drivers/rtc/rtc-da9052.c
@@ -85,6 +85,7 @@ static int da9052_read_alarm(struct da9052_rtc *rtc, struct rtc_time *rtc_tm)
rtc_tm->tm_mday = v[0][2] & DA9052_RTC_DAY;
rtc_tm->tm_hour = v[0][1] & DA9052_RTC_HOUR;
rtc_tm->tm_min = v[0][0] & DA9052_RTC_MIN;
+ rtc_tm->tm_sec = 0;
ret = rtc_valid_tm(rtc_tm);
return ret;
diff --git a/drivers/rtc/rtc-da9055.c b/drivers/rtc/rtc-da9055.c
index 7ec0872d5..678af8648 100644
--- a/drivers/rtc/rtc-da9055.c
+++ b/drivers/rtc/rtc-da9055.c
@@ -74,6 +74,7 @@ static int da9055_read_alarm(struct da9055 *da9055, struct rtc_time *rtc_tm)
rtc_tm->tm_mday = v[2] & DA9055_RTC_ALM_DAY;
rtc_tm->tm_hour = v[1] & DA9055_RTC_ALM_HOUR;
rtc_tm->tm_min = v[0] & DA9055_RTC_ALM_MIN;
+ rtc_tm->tm_sec = 0;
return rtc_valid_tm(rtc_tm);
}
diff --git a/drivers/rtc/rtc-davinci.c b/drivers/rtc/rtc-davinci.c
index c5432bf64..dba60c1df 100644
--- a/drivers/rtc/rtc-davinci.c
+++ b/drivers/rtc/rtc-davinci.c
@@ -388,6 +388,8 @@ static int davinci_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
u8 day0, day1;
unsigned long flags;
+ alm->time.tm_sec = 0;
+
spin_lock_irqsave(&davinci_rtc_lock, flags);
davinci_rtcss_calendar_wait(davinci_rtc);
diff --git a/drivers/rtc/rtc-ds1286.c b/drivers/rtc/rtc-ds1286.c
index 756e509f6..ef75c349d 100644
--- a/drivers/rtc/rtc-ds1286.c
+++ b/drivers/rtc/rtc-ds1286.c
@@ -16,7 +16,7 @@
#include <linux/rtc.h>
#include <linux/platform_device.h>
#include <linux/bcd.h>
-#include <linux/ds1286.h>
+#include <linux/rtc/ds1286.h>
#include <linux/io.h>
#include <linux/slab.h>
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index 8e41c4613..72b22935e 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -313,13 +313,6 @@ static int ds1305_get_alarm(struct device *dev, struct rtc_wkalrm *alm)
alm->time.tm_sec = bcd2bin(buf[DS1305_SEC]);
alm->time.tm_min = bcd2bin(buf[DS1305_MIN]);
alm->time.tm_hour = bcd2hour(buf[DS1305_HOUR]);
- alm->time.tm_mday = -1;
- alm->time.tm_mon = -1;
- alm->time.tm_year = -1;
- /* next three fields are unused by Linux */
- alm->time.tm_wday = -1;
- alm->time.tm_mday = -1;
- alm->time.tm_isdst = -1;
return 0;
}
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index f25f7dce6..8e1c5cb6e 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -482,11 +482,6 @@ static int ds1337_read_alarm(struct device *dev, struct rtc_wkalrm *t)
t->time.tm_min = bcd2bin(ds1307->regs[1] & 0x7f);
t->time.tm_hour = bcd2bin(ds1307->regs[2] & 0x3f);
t->time.tm_mday = bcd2bin(ds1307->regs[3] & 0x3f);
- t->time.tm_mon = -1;
- t->time.tm_year = -1;
- t->time.tm_wday = -1;
- t->time.tm_yday = -1;
- t->time.tm_isdst = -1;
/* ... and status */
t->enabled = !!(ds1307->regs[7] & DS1337_BIT_A1IE);
diff --git a/drivers/rtc/rtc-ds1343.c b/drivers/rtc/rtc-ds1343.c
index 23fa9f0cb..895fbeeb4 100644
--- a/drivers/rtc/rtc-ds1343.c
+++ b/drivers/rtc/rtc-ds1343.c
@@ -504,12 +504,6 @@ static int ds1343_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
alarm->time.tm_hour = priv->alarm_hour < 0 ? 0 : priv->alarm_hour;
alarm->time.tm_mday = priv->alarm_mday < 0 ? 0 : priv->alarm_mday;
- alarm->time.tm_mon = -1;
- alarm->time.tm_year = -1;
- alarm->time.tm_wday = -1;
- alarm->time.tm_yday = -1;
- alarm->time.tm_isdst = -1;
-
out:
mutex_unlock(&priv->mutex);
return res;
diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
index b3ce3c652..ed43b4311 100644
--- a/drivers/rtc/rtc-ds1685.c
+++ b/drivers/rtc/rtc-ds1685.c
@@ -103,6 +103,26 @@ ds1685_rtc_bin2bcd(struct ds1685_priv *rtc, u8 val, u8 bin_mask, u8 bcd_mask)
}
/**
+ * s1685_rtc_check_mday - check validity of the day of month.
+ * @rtc: pointer to the ds1685 rtc structure.
+ * @mday: day of month.
+ *
+ * Returns -EDOM if the day of month is not within 1..31 range.
+ */
+static inline int
+ds1685_rtc_check_mday(struct ds1685_priv *rtc, u8 mday)
+{
+ if (rtc->bcd_mode) {
+ if (mday < 0x01 || mday > 0x31 || (mday & 0x0f) > 0x09)
+ return -EDOM;
+ } else {
+ if (mday < 1 || mday > 31)
+ return -EDOM;
+ }
+ return 0;
+}
+
+/**
* ds1685_rtc_switch_to_bank0 - switch the rtc to bank 0.
* @rtc: pointer to the ds1685 rtc structure.
*/
@@ -377,6 +397,7 @@ ds1685_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
struct platform_device *pdev = to_platform_device(dev);
struct ds1685_priv *rtc = platform_get_drvdata(pdev);
u8 seconds, minutes, hours, mday, ctrlb, ctrlc;
+ int ret;
/* Fetch the alarm info from the RTC alarm registers. */
ds1685_rtc_begin_data_access(rtc);
@@ -388,34 +409,29 @@ ds1685_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
ctrlc = rtc->read(rtc, RTC_CTRL_C);
ds1685_rtc_end_data_access(rtc);
- /* Check month date. */
- if (!(mday >= 1) && (mday <= 31))
- return -EDOM;
+ /* Check the month date for validity. */
+ ret = ds1685_rtc_check_mday(rtc, mday);
+ if (ret)
+ return ret;
/*
* Check the three alarm bytes.
*
* The Linux RTC system doesn't support the "don't care" capability
* of this RTC chip. We check for it anyways in case support is
- * added in the future.
+ * added in the future and only assign when we care.
*/
- if (unlikely(seconds >= 0xc0))
- alrm->time.tm_sec = -1;
- else
+ if (likely(seconds < 0xc0))
alrm->time.tm_sec = ds1685_rtc_bcd2bin(rtc, seconds,
RTC_SECS_BCD_MASK,
RTC_SECS_BIN_MASK);
- if (unlikely(minutes >= 0xc0))
- alrm->time.tm_min = -1;
- else
+ if (likely(minutes < 0xc0))
alrm->time.tm_min = ds1685_rtc_bcd2bin(rtc, minutes,
RTC_MINS_BCD_MASK,
RTC_MINS_BIN_MASK);
- if (unlikely(hours >= 0xc0))
- alrm->time.tm_hour = -1;
- else
+ if (likely(hours < 0xc0))
alrm->time.tm_hour = ds1685_rtc_bcd2bin(rtc, hours,
RTC_HRS_24_BCD_MASK,
RTC_HRS_24_BIN_MASK);
@@ -423,11 +439,6 @@ ds1685_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
/* Write the data to rtc_wkalrm. */
alrm->time.tm_mday = ds1685_rtc_bcd2bin(rtc, mday, RTC_MDAY_BCD_MASK,
RTC_MDAY_BIN_MASK);
- alrm->time.tm_mon = -1;
- alrm->time.tm_year = -1;
- alrm->time.tm_wday = -1;
- alrm->time.tm_yday = -1;
- alrm->time.tm_isdst = -1;
alrm->enabled = !!(ctrlb & RTC_CTRL_B_AIE);
alrm->pending = !!(ctrlc & RTC_CTRL_C_AF);
@@ -445,6 +456,7 @@ ds1685_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
struct platform_device *pdev = to_platform_device(dev);
struct ds1685_priv *rtc = platform_get_drvdata(pdev);
u8 ctrlb, seconds, minutes, hours, mday;
+ int ret;
/* Fetch the alarm info and convert to BCD. */
seconds = ds1685_rtc_bin2bcd(rtc, alrm->time.tm_sec,
@@ -461,8 +473,9 @@ ds1685_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
RTC_MDAY_BCD_MASK);
/* Check the month date for validity. */
- if (!(mday >= 1) && (mday <= 31))
- return -EDOM;
+ ret = ds1685_rtc_check_mday(rtc, mday);
+ if (ret)
+ return ret;
/*
* Check the three alarm bytes.
diff --git a/drivers/rtc/rtc-ds2404.c b/drivers/rtc/rtc-ds2404.c
index 16310fe79..9a1582ed7 100644
--- a/drivers/rtc/rtc-ds2404.c
+++ b/drivers/rtc/rtc-ds2404.c
@@ -13,7 +13,7 @@
#include <linux/rtc.h>
#include <linux/types.h>
#include <linux/bcd.h>
-#include <linux/rtc-ds2404.h>
+#include <linux/platform_data/rtc-ds2404.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/slab.h>
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index 04fbd7fff..b1f20d8c3 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -197,12 +197,6 @@ static int ds3232_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
alarm->time.tm_hour = bcd2bin(buf[2] & 0x7F);
alarm->time.tm_mday = bcd2bin(buf[3] & 0x7F);
- alarm->time.tm_mon = -1;
- alarm->time.tm_year = -1;
- alarm->time.tm_wday = -1;
- alarm->time.tm_yday = -1;
- alarm->time.tm_isdst = -1;
-
alarm->enabled = !!(control & DS3232_REG_CR_A1IE);
alarm->pending = !!(stat & DS3232_REG_SR_A1F);
diff --git a/drivers/rtc/rtc-efi.c b/drivers/rtc/rtc-efi.c
index 96d38609d..0130afd7f 100644
--- a/drivers/rtc/rtc-efi.c
+++ b/drivers/rtc/rtc-efi.c
@@ -259,6 +259,12 @@ static const struct rtc_class_ops efi_rtc_ops = {
static int __init efi_rtc_probe(struct platform_device *dev)
{
struct rtc_device *rtc;
+ efi_time_t eft;
+ efi_time_cap_t cap;
+
+ /* First check if the RTC is usable */
+ if (efi.get_time(&eft, &cap) != EFI_SUCCESS)
+ return -ENODEV;
rtc = devm_rtc_device_register(&dev->dev, "rtc-efi", &efi_rtc_ops,
THIS_MODULE);
diff --git a/drivers/rtc/rtc-generic.c b/drivers/rtc/rtc-generic.c
index d726c6aa9..1bf5d2347 100644
--- a/drivers/rtc/rtc-generic.c
+++ b/drivers/rtc/rtc-generic.c
@@ -9,44 +9,10 @@
#include <linux/platform_device.h>
#include <linux/rtc.h>
-#if defined(CONFIG_M68K) || defined(CONFIG_PARISC) || \
- defined(CONFIG_PPC) || defined(CONFIG_SUPERH32)
-#include <asm/rtc.h>
-
-static int generic_get_time(struct device *dev, struct rtc_time *tm)
-{
- unsigned int ret = get_rtc_time(tm);
-
- if (ret & RTC_BATT_BAD)
- return -EOPNOTSUPP;
-
- return rtc_valid_tm(tm);
-}
-
-static int generic_set_time(struct device *dev, struct rtc_time *tm)
-{
- if (set_rtc_time(tm) < 0)
- return -EOPNOTSUPP;
-
- return 0;
-}
-
-static const struct rtc_class_ops generic_rtc_ops = {
- .read_time = generic_get_time,
- .set_time = generic_set_time,
-};
-#else
-#define generic_rtc_ops *(struct rtc_class_ops*)NULL
-#endif
-
static int __init generic_rtc_probe(struct platform_device *dev)
{
struct rtc_device *rtc;
- const struct rtc_class_ops *ops;
-
- ops = dev_get_platdata(&dev->dev);
- if (!ops)
- ops = &generic_rtc_ops;
+ const struct rtc_class_ops *ops = dev_get_platdata(&dev->dev);
rtc = devm_rtc_device_register(&dev->dev, "rtc-generic",
ops, THIS_MODULE);
diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
index 207270376..e5ad527cb 100644
--- a/drivers/rtc/rtc-hym8563.c
+++ b/drivers/rtc/rtc-hym8563.c
@@ -198,7 +198,7 @@ static int hym8563_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
return ret;
/* The alarm only has a minute accuracy */
- alm_tm->tm_sec = -1;
+ alm_tm->tm_sec = 0;
alm_tm->tm_min = (buf[0] & HYM8563_ALM_BIT_DISABLE) ?
-1 :
@@ -213,9 +213,6 @@ static int hym8563_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
-1 :
bcd2bin(buf[3] & HYM8563_WEEKDAY_MASK);
- alm_tm->tm_mon = -1;
- alm_tm->tm_year = -1;
-
ret = i2c_smbus_read_byte_data(client, HYM8563_CTL2);
if (ret < 0)
return ret;
diff --git a/drivers/rtc/rtc-isl12057.c b/drivers/rtc/rtc-isl12057.c
index 54328d4ac..0e7f0f52b 100644
--- a/drivers/rtc/rtc-isl12057.c
+++ b/drivers/rtc/rtc-isl12057.c
@@ -245,8 +245,7 @@ static int isl12057_rtc_update_alarm(struct device *dev, int enable)
static int isl12057_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
struct isl12057_rtc_data *data = dev_get_drvdata(dev);
- struct rtc_time rtc_tm, *alarm_tm = &alarm->time;
- unsigned long rtc_secs, alarm_secs;
+ struct rtc_time *alarm_tm = &alarm->time;
u8 regs[ISL12057_A1_SEC_LEN];
unsigned int ir;
int ret;
@@ -264,36 +263,6 @@ static int isl12057_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
alarm_tm->tm_min = bcd2bin(regs[1] & 0x7f);
alarm_tm->tm_hour = bcd2bin(regs[2] & 0x3f);
alarm_tm->tm_mday = bcd2bin(regs[3] & 0x3f);
- alarm_tm->tm_wday = -1;
-
- /*
- * The alarm section does not store year/month. We use the ones in rtc
- * section as a basis and increment month and then year if needed to get
- * alarm after current time.
- */
- ret = _isl12057_rtc_read_time(dev, &rtc_tm);
- if (ret)
- goto err_unlock;
-
- alarm_tm->tm_year = rtc_tm.tm_year;
- alarm_tm->tm_mon = rtc_tm.tm_mon;
-
- ret = rtc_tm_to_time(&rtc_tm, &rtc_secs);
- if (ret)
- goto err_unlock;
-
- ret = rtc_tm_to_time(alarm_tm, &alarm_secs);
- if (ret)
- goto err_unlock;
-
- if (alarm_secs < rtc_secs) {
- if (alarm_tm->tm_mon == 11) {
- alarm_tm->tm_mon = 0;
- alarm_tm->tm_year += 1;
- } else {
- alarm_tm->tm_mon += 1;
- }
- }
ret = regmap_read(data->regmap, ISL12057_REG_INT, &ir);
if (ret) {
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index d1bf93a87..58698d21c 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -244,7 +244,7 @@ static int m41t80_alarm_irq_enable(struct device *dev, unsigned int enabled)
retval = i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, flags);
if (retval < 0) {
- dev_info(dev, "Unable to enable alarm IRQ %d\n", retval);
+ dev_err(dev, "Unable to enable alarm IRQ %d\n", retval);
return retval;
}
return 0;
@@ -320,10 +320,8 @@ static int m41t80_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
alrm->time.tm_sec = bcd2bin(alarmvals[4] & 0x7f);
alrm->time.tm_min = bcd2bin(alarmvals[3] & 0x7f);
alrm->time.tm_hour = bcd2bin(alarmvals[2] & 0x3f);
- alrm->time.tm_wday = -1;
alrm->time.tm_mday = bcd2bin(alarmvals[1] & 0x3f);
alrm->time.tm_mon = bcd2bin(alarmvals[0] & 0x3f);
- alrm->time.tm_year = -1;
alrm->enabled = !!(alarmvals[0] & M41T80_ALMON_AFE);
alrm->pending = (flags & M41T80_FLAGS_AF) && alrm->enabled;
@@ -337,6 +335,30 @@ static struct rtc_class_ops m41t80_rtc_ops = {
.proc = m41t80_rtc_proc,
};
+#ifdef CONFIG_PM_SLEEP
+static int m41t80_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (client->irq >= 0 && device_may_wakeup(dev))
+ enable_irq_wake(client->irq);
+
+ return 0;
+}
+
+static int m41t80_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (client->irq >= 0 && device_may_wakeup(dev))
+ disable_irq_wake(client->irq);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(m41t80_pm, m41t80_suspend, m41t80_resume);
+
static ssize_t flags_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -831,10 +853,9 @@ static int m41t80_probe(struct i2c_client *client,
return rc;
}
- rc = devm_add_action(&client->dev, m41t80_remove_sysfs_group,
- &client->dev);
+ rc = devm_add_action_or_reset(&client->dev, m41t80_remove_sysfs_group,
+ &client->dev);
if (rc) {
- m41t80_remove_sysfs_group(&client->dev);
dev_err(&client->dev,
"Failed to add sysfs cleanup action: %d\n", rc);
return rc;
@@ -873,6 +894,7 @@ static int m41t80_remove(struct i2c_client *client)
static struct i2c_driver m41t80_driver = {
.driver = {
.name = "rtc-m41t80",
+ .pm = &m41t80_pm,
},
.probe = m41t80_probe,
.remove = m41t80_remove,
diff --git a/drivers/rtc/rtc-m48t86.c b/drivers/rtc/rtc-m48t86.c
index f72b91f25..0eeb5714c 100644
--- a/drivers/rtc/rtc-m48t86.c
+++ b/drivers/rtc/rtc-m48t86.c
@@ -16,7 +16,7 @@
#include <linux/module.h>
#include <linux/rtc.h>
#include <linux/platform_device.h>
-#include <linux/m48t86.h>
+#include <linux/platform_data/rtc-m48t86.h>
#include <linux/bcd.h>
#define M48T86_REG_SEC 0x00
diff --git a/drivers/rtc/rtc-max6916.c b/drivers/rtc/rtc-max6916.c
new file mode 100644
index 000000000..623ab27b2
--- /dev/null
+++ b/drivers/rtc/rtc-max6916.c
@@ -0,0 +1,164 @@
+/* rtc-max6916.c
+ *
+ * Driver for MAXIM max6916 Low Current, SPI Compatible
+ * Real Time Clock
+ *
+ * Author : Venkat Prashanth B U <venkat.prashanth2498@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/spi/spi.h>
+#include <linux/bcd.h>
+
+/* Registers in max6916 rtc */
+
+#define MAX6916_SECONDS_REG 0x01
+#define MAX6916_MINUTES_REG 0x02
+#define MAX6916_HOURS_REG 0x03
+#define MAX6916_DATE_REG 0x04
+#define MAX6916_MONTH_REG 0x05
+#define MAX6916_DAY_REG 0x06
+#define MAX6916_YEAR_REG 0x07
+#define MAX6916_CONTROL_REG 0x08
+#define MAX6916_STATUS_REG 0x0C
+#define MAX6916_CLOCK_BURST 0x3F
+
+static int max6916_read_reg(struct device *dev, unsigned char address,
+ unsigned char *data)
+{
+ struct spi_device *spi = to_spi_device(dev);
+
+ *data = address | 0x80;
+
+ return spi_write_then_read(spi, data, 1, data, 1);
+}
+
+static int max6916_write_reg(struct device *dev, unsigned char address,
+ unsigned char data)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ unsigned char buf[2];
+
+ buf[0] = address & 0x7F;
+ buf[1] = data;
+
+ return spi_write_then_read(spi, buf, 2, NULL, 0);
+}
+
+static int max6916_read_time(struct device *dev, struct rtc_time *dt)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ int err;
+ unsigned char buf[8];
+
+ buf[0] = MAX6916_CLOCK_BURST | 0x80;
+
+ err = spi_write_then_read(spi, buf, 1, buf, 8);
+
+ if (err)
+ return err;
+
+ dt->tm_sec = bcd2bin(buf[0]);
+ dt->tm_min = bcd2bin(buf[1]);
+ dt->tm_hour = bcd2bin(buf[2] & 0x3F);
+ dt->tm_mday = bcd2bin(buf[3]);
+ dt->tm_mon = bcd2bin(buf[4]) - 1;
+ dt->tm_wday = bcd2bin(buf[5]) - 1;
+ dt->tm_year = bcd2bin(buf[6]) + 100;
+
+ return rtc_valid_tm(dt);
+}
+
+static int max6916_set_time(struct device *dev, struct rtc_time *dt)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ unsigned char buf[9];
+
+ if (dt->tm_year < 100 || dt->tm_year > 199) {
+ dev_err(&spi->dev, "Year must be between 2000 and 2099. It's %d.\n",
+ dt->tm_year + 1900);
+ return -EINVAL;
+ }
+
+ buf[0] = MAX6916_CLOCK_BURST & 0x7F;
+ buf[1] = bin2bcd(dt->tm_sec);
+ buf[2] = bin2bcd(dt->tm_min);
+ buf[3] = (bin2bcd(dt->tm_hour) & 0X3F);
+ buf[4] = bin2bcd(dt->tm_mday);
+ buf[5] = bin2bcd(dt->tm_mon + 1);
+ buf[6] = bin2bcd(dt->tm_wday + 1);
+ buf[7] = bin2bcd(dt->tm_year % 100);
+ buf[8] = bin2bcd(0x00);
+
+ /* write the rtc settings */
+ return spi_write_then_read(spi, buf, 9, NULL, 0);
+}
+
+static const struct rtc_class_ops max6916_rtc_ops = {
+ .read_time = max6916_read_time,
+ .set_time = max6916_set_time,
+};
+
+static int max6916_probe(struct spi_device *spi)
+{
+ struct rtc_device *rtc;
+ unsigned char data;
+ int res;
+
+ /* spi setup with max6916 in mode 3 and bits per word as 8 */
+ spi->mode = SPI_MODE_3;
+ spi->bits_per_word = 8;
+ spi_setup(spi);
+
+ /* RTC Settings */
+ res = max6916_read_reg(&spi->dev, MAX6916_SECONDS_REG, &data);
+ if (res)
+ return res;
+
+ /* Disable the write protect of rtc */
+ max6916_read_reg(&spi->dev, MAX6916_CONTROL_REG, &data);
+ data = data & ~(1 << 7);
+ max6916_write_reg(&spi->dev, MAX6916_CONTROL_REG, data);
+
+ /*Enable oscillator,disable oscillator stop flag, glitch filter*/
+ max6916_read_reg(&spi->dev, MAX6916_STATUS_REG, &data);
+ data = data & 0x1B;
+ max6916_write_reg(&spi->dev, MAX6916_STATUS_REG, data);
+
+ /* display the settings */
+ max6916_read_reg(&spi->dev, MAX6916_CONTROL_REG, &data);
+ dev_info(&spi->dev, "MAX6916 RTC CTRL Reg = 0x%02x\n", data);
+
+ max6916_read_reg(&spi->dev, MAX6916_STATUS_REG, &data);
+ dev_info(&spi->dev, "MAX6916 RTC Status Reg = 0x%02x\n", data);
+
+ rtc = devm_rtc_device_register(&spi->dev, "max6916",
+ &max6916_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
+
+ spi_set_drvdata(spi, rtc);
+
+ return 0;
+}
+
+static struct spi_driver max6916_driver = {
+ .driver = {
+ .name = "max6916",
+ },
+ .probe = max6916_probe,
+};
+module_spi_driver(max6916_driver);
+
+MODULE_DESCRIPTION("MAX6916 SPI RTC DRIVER");
+MODULE_AUTHOR("Venkat Prashanth B U <venkat.prashanth2498@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c
new file mode 100644
index 000000000..2f1772a35
--- /dev/null
+++ b/drivers/rtc/rtc-mc146818-lib.c
@@ -0,0 +1,198 @@
+#include <linux/bcd.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/mc146818rtc.h>
+
+#ifdef CONFIG_ACPI
+#include <linux/acpi.h>
+#endif
+
+/*
+ * Returns true if a clock update is in progress
+ */
+static inline unsigned char mc146818_is_updating(void)
+{
+ unsigned char uip;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtc_lock, flags);
+ uip = (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP);
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ return uip;
+}
+
+unsigned int mc146818_get_time(struct rtc_time *time)
+{
+ unsigned char ctrl;
+ unsigned long flags;
+ unsigned char century = 0;
+
+#ifdef CONFIG_MACH_DECSTATION
+ unsigned int real_year;
+#endif
+
+ /*
+ * read RTC once any update in progress is done. The update
+ * can take just over 2ms. We wait 20ms. There is no need to
+ * to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP.
+ * If you need to know *exactly* when a second has started, enable
+ * periodic update complete interrupts, (via ioctl) and then
+ * immediately read /dev/rtc which will block until you get the IRQ.
+ * Once the read clears, read the RTC time (again via ioctl). Easy.
+ */
+ if (mc146818_is_updating())
+ mdelay(20);
+
+ /*
+ * Only the values that we read from the RTC are set. We leave
+ * tm_wday, tm_yday and tm_isdst untouched. Even though the
+ * RTC has RTC_DAY_OF_WEEK, we ignore it, as it is only updated
+ * by the RTC when initially set to a non-zero value.
+ */
+ spin_lock_irqsave(&rtc_lock, flags);
+ time->tm_sec = CMOS_READ(RTC_SECONDS);
+ time->tm_min = CMOS_READ(RTC_MINUTES);
+ time->tm_hour = CMOS_READ(RTC_HOURS);
+ time->tm_mday = CMOS_READ(RTC_DAY_OF_MONTH);
+ time->tm_mon = CMOS_READ(RTC_MONTH);
+ time->tm_year = CMOS_READ(RTC_YEAR);
+#ifdef CONFIG_MACH_DECSTATION
+ real_year = CMOS_READ(RTC_DEC_YEAR);
+#endif
+#ifdef CONFIG_ACPI
+ if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
+ acpi_gbl_FADT.century)
+ century = CMOS_READ(acpi_gbl_FADT.century);
+#endif
+ ctrl = CMOS_READ(RTC_CONTROL);
+ spin_unlock_irqrestore(&rtc_lock, flags);
+
+ if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+ {
+ time->tm_sec = bcd2bin(time->tm_sec);
+ time->tm_min = bcd2bin(time->tm_min);
+ time->tm_hour = bcd2bin(time->tm_hour);
+ time->tm_mday = bcd2bin(time->tm_mday);
+ time->tm_mon = bcd2bin(time->tm_mon);
+ time->tm_year = bcd2bin(time->tm_year);
+ century = bcd2bin(century);
+ }
+
+#ifdef CONFIG_MACH_DECSTATION
+ time->tm_year += real_year - 72;
+#endif
+
+ if (century)
+ time->tm_year += (century - 19) * 100;
+
+ /*
+ * Account for differences between how the RTC uses the values
+ * and how they are defined in a struct rtc_time;
+ */
+ if (time->tm_year <= 69)
+ time->tm_year += 100;
+
+ time->tm_mon--;
+
+ return RTC_24H;
+}
+EXPORT_SYMBOL_GPL(mc146818_get_time);
+
+/* Set the current date and time in the real time clock. */
+int mc146818_set_time(struct rtc_time *time)
+{
+ unsigned long flags;
+ unsigned char mon, day, hrs, min, sec;
+ unsigned char save_control, save_freq_select;
+ unsigned int yrs;
+#ifdef CONFIG_MACH_DECSTATION
+ unsigned int real_yrs, leap_yr;
+#endif
+ unsigned char century = 0;
+
+ yrs = time->tm_year;
+ mon = time->tm_mon + 1; /* tm_mon starts at zero */
+ day = time->tm_mday;
+ hrs = time->tm_hour;
+ min = time->tm_min;
+ sec = time->tm_sec;
+
+ if (yrs > 255) /* They are unsigned */
+ return -EINVAL;
+
+ spin_lock_irqsave(&rtc_lock, flags);
+#ifdef CONFIG_MACH_DECSTATION
+ real_yrs = yrs;
+ leap_yr = ((!((yrs + 1900) % 4) && ((yrs + 1900) % 100)) ||
+ !((yrs + 1900) % 400));
+ yrs = 72;
+
+ /*
+ * We want to keep the year set to 73 until March
+ * for non-leap years, so that Feb, 29th is handled
+ * correctly.
+ */
+ if (!leap_yr && mon < 3) {
+ real_yrs--;
+ yrs = 73;
+ }
+#endif
+
+#ifdef CONFIG_ACPI
+ if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
+ acpi_gbl_FADT.century) {
+ century = (yrs + 1900) / 100;
+ yrs %= 100;
+ }
+#endif
+
+ /* These limits and adjustments are independent of
+ * whether the chip is in binary mode or not.
+ */
+ if (yrs > 169) {
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ return -EINVAL;
+ }
+
+ if (yrs >= 100)
+ yrs -= 100;
+
+ if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY)
+ || RTC_ALWAYS_BCD) {
+ sec = bin2bcd(sec);
+ min = bin2bcd(min);
+ hrs = bin2bcd(hrs);
+ day = bin2bcd(day);
+ mon = bin2bcd(mon);
+ yrs = bin2bcd(yrs);
+ century = bin2bcd(century);
+ }
+
+ save_control = CMOS_READ(RTC_CONTROL);
+ CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
+ save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
+ CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
+
+#ifdef CONFIG_MACH_DECSTATION
+ CMOS_WRITE(real_yrs, RTC_DEC_YEAR);
+#endif
+ CMOS_WRITE(yrs, RTC_YEAR);
+ CMOS_WRITE(mon, RTC_MONTH);
+ CMOS_WRITE(day, RTC_DAY_OF_MONTH);
+ CMOS_WRITE(hrs, RTC_HOURS);
+ CMOS_WRITE(min, RTC_MINUTES);
+ CMOS_WRITE(sec, RTC_SECONDS);
+#ifdef CONFIG_ACPI
+ if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
+ acpi_gbl_FADT.century)
+ CMOS_WRITE(century, acpi_gbl_FADT.century);
+#endif
+
+ CMOS_WRITE(save_control, RTC_CONTROL);
+ CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
+
+ spin_unlock_irqrestore(&rtc_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mc146818_set_time);
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c
index 0094d9bdd..7334c44fa 100644
--- a/drivers/rtc/rtc-mrst.c
+++ b/drivers/rtc/rtc-mrst.c
@@ -32,11 +32,11 @@
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/kernel.h>
+#include <linux/mc146818rtc.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sfi.h>
-#include <asm-generic/rtc.h>
#include <asm/intel_scu_ipc.h>
#include <asm/intel-mid.h>
#include <asm/intel_mid_vrtc.h>
@@ -149,14 +149,6 @@ static int mrst_read_alarm(struct device *dev, struct rtc_wkalrm *t)
if (mrst->irq <= 0)
return -EIO;
- /* Basic alarms only support hour, minute, and seconds fields.
- * Some also support day and month, for alarms up to a year in
- * the future.
- */
- t->time.tm_mday = -1;
- t->time.tm_mon = -1;
- t->time.tm_year = -1;
-
/* vRTC only supports binary mode */
spin_lock_irq(&rtc_lock);
t->time.tm_sec = vrtc_cmos_read(RTC_SECONDS_ALARM);
diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c
index 9c18d6fd8..ea20f627d 100644
--- a/drivers/rtc/rtc-opal.c
+++ b/drivers/rtc/rtc-opal.c
@@ -134,7 +134,7 @@ static int opal_get_tpo_time(struct device *dev, struct rtc_wkalrm *alarm)
goto exit;
}
- rc = be64_to_cpu(msg.params[1]);
+ rc = opal_get_async_rc(msg);
if (rc != OPAL_SUCCESS) {
rc = -EIO;
goto exit;
@@ -181,7 +181,7 @@ static int opal_set_tpo_time(struct device *dev, struct rtc_wkalrm *alarm)
goto exit;
}
- rc = be64_to_cpu(msg.params[1]);
+ rc = opal_get_async_rc(msg);
if (rc != OPAL_SUCCESS)
rc = -EIO;
diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c
index f22e06070..b4478cc92 100644
--- a/drivers/rtc/rtc-pcf2123.c
+++ b/drivers/rtc/rtc-pcf2123.c
@@ -96,7 +96,7 @@
#define CD_TMR_TE BIT(3) /* Countdown timer enable */
/* PCF2123_REG_OFFSET BITS */
-#define OFFSET_SIGN_BIT BIT(6) /* 2's complement sign bit */
+#define OFFSET_SIGN_BIT 6 /* 2's complement sign bit */
#define OFFSET_COARSE BIT(7) /* Coarse mode offset */
#define OFFSET_STEP (2170) /* Offset step in parts per billion */
@@ -217,7 +217,7 @@ static int pcf2123_read_offset(struct device *dev, long *offset)
if (reg & OFFSET_COARSE)
reg <<= 1; /* multiply by 2 and sign extend */
else
- reg |= (reg & OFFSET_SIGN_BIT) << 1; /* sign extend only */
+ reg = sign_extend32(reg, OFFSET_SIGN_BIT);
*offset = ((long)reg) * OFFSET_STEP;
diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
index e8ddbb359..efb0a08ac 100644
--- a/drivers/rtc/rtc-pcf85063.c
+++ b/drivers/rtc/rtc-pcf85063.c
@@ -16,6 +16,16 @@
#include <linux/rtc.h>
#include <linux/module.h>
+/*
+ * Information for this driver was pulled from the following datasheets.
+ *
+ * http://www.nxp.com/documents/data_sheet/PCF85063A.pdf
+ * http://www.nxp.com/documents/data_sheet/PCF85063TP.pdf
+ *
+ * PCF85063A -- Rev. 6 — 18 November 2015
+ * PCF85063TP -- Rev. 4 — 6 May 2015
+*/
+
#define PCF85063_REG_CTRL1 0x00 /* status */
#define PCF85063_REG_CTRL1_STOP BIT(5)
#define PCF85063_REG_CTRL2 0x01
@@ -55,10 +65,22 @@ static int pcf85063_stop_clock(struct i2c_client *client, u8 *ctrl1)
return 0;
}
-/*
- * In the routines that deal directly with the pcf85063 hardware, we use
- * rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch.
- */
+static int pcf85063_start_clock(struct i2c_client *client, u8 ctrl1)
+{
+ s32 ret;
+
+ /* start the clock */
+ ctrl1 &= PCF85063_REG_CTRL1_STOP;
+
+ ret = i2c_smbus_write_byte_data(client, PCF85063_REG_CTRL1, ctrl1);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failing to start the clock\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
static int pcf85063_get_datetime(struct i2c_client *client, struct rtc_time *tm)
{
int rc;
@@ -90,8 +112,7 @@ static int pcf85063_get_datetime(struct i2c_client *client, struct rtc_time *tm)
tm->tm_wday = regs[4] & 0x07;
tm->tm_mon = bcd2bin(regs[5] & 0x1F) - 1; /* rtc mn 1-12 */
tm->tm_year = bcd2bin(regs[6]);
- if (tm->tm_year < 70)
- tm->tm_year += 100; /* assume we are in 1970...2069 */
+ tm->tm_year += 100;
return rtc_valid_tm(tm);
}
@@ -99,13 +120,17 @@ static int pcf85063_get_datetime(struct i2c_client *client, struct rtc_time *tm)
static int pcf85063_set_datetime(struct i2c_client *client, struct rtc_time *tm)
{
int rc;
- u8 regs[8];
+ u8 regs[7];
+ u8 ctrl1;
+
+ if ((tm->tm_year < 100) || (tm->tm_year > 199))
+ return -EINVAL;
/*
* to accurately set the time, reset the divider chain and keep it in
* reset state until all time/date registers are written
*/
- rc = pcf85063_stop_clock(client, &regs[7]);
+ rc = pcf85063_stop_clock(client, &ctrl1);
if (rc != 0)
return rc;
@@ -125,14 +150,7 @@ static int pcf85063_set_datetime(struct i2c_client *client, struct rtc_time *tm)
regs[5] = bin2bcd(tm->tm_mon + 1);
/* year and century */
- regs[6] = bin2bcd(tm->tm_year % 100);
-
- /*
- * after all time/date registers are written, let the 'address auto
- * increment' feature wrap around and write register CTRL1 to re-enable
- * the clock divider chain again
- */
- regs[7] &= ~PCF85063_REG_CTRL1_STOP;
+ regs[6] = bin2bcd(tm->tm_year - 100);
/* write all registers at once */
rc = i2c_smbus_write_i2c_block_data(client, PCF85063_REG_SC,
@@ -142,6 +160,15 @@ static int pcf85063_set_datetime(struct i2c_client *client, struct rtc_time *tm)
return rc;
}
+ /*
+ * Write the control register as a separate action since the size of
+ * the register space is different between the PCF85063TP and
+ * PCF85063A devices. The rollover point can not be used.
+ */
+ rc = pcf85063_start_clock(client, ctrl1);
+ if (rc != 0)
+ return rc;
+
return 0;
}
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index b9ddbb001..1227ceab6 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -341,14 +341,11 @@ static int pcf8563_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *tm)
"%s: raw data is min=%02x, hr=%02x, mday=%02x, wday=%02x\n",
__func__, buf[0], buf[1], buf[2], buf[3]);
+ tm->time.tm_sec = 0;
tm->time.tm_min = bcd2bin(buf[0] & 0x7F);
tm->time.tm_hour = bcd2bin(buf[1] & 0x3F);
tm->time.tm_mday = bcd2bin(buf[2] & 0x3F);
tm->time.tm_wday = bcd2bin(buf[3] & 0x7);
- tm->time.tm_mon = -1;
- tm->time.tm_year = -1;
- tm->time.tm_yday = -1;
- tm->time.tm_isdst = -1;
err = pcf8563_get_alarm_mode(client, &tm->enabled, &tm->pending);
if (err < 0)
diff --git a/drivers/rtc/rtc-rc5t583.c b/drivers/rtc/rtc-rc5t583.c
index f28d57788..68ce77414 100644
--- a/drivers/rtc/rtc-rc5t583.c
+++ b/drivers/rtc/rtc-rc5t583.c
@@ -128,6 +128,7 @@ static int rc5t583_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
return ret;
}
+ alm->time.tm_sec = 0;
alm->time.tm_min = bcd2bin(alarm_data[0]);
alm->time.tm_hour = bcd2bin(alarm_data[1]);
alm->time.tm_mday = bcd2bin(alarm_data[2]);
diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c
index ef8622942..c8c757466 100644
--- a/drivers/rtc/rtc-rs5c372.c
+++ b/drivers/rtc/rtc-rs5c372.c
@@ -341,12 +341,6 @@ static int rs5c_read_alarm(struct device *dev, struct rtc_wkalrm *t)
t->time.tm_sec = 0;
t->time.tm_min = bcd2bin(rs5c->regs[RS5C_REG_ALARM_A_MIN] & 0x7f);
t->time.tm_hour = rs5c_reg2hr(rs5c, rs5c->regs[RS5C_REG_ALARM_A_HOURS]);
- t->time.tm_mday = -1;
- t->time.tm_mon = -1;
- t->time.tm_year = -1;
- t->time.tm_wday = -1;
- t->time.tm_yday = -1;
- t->time.tm_isdst = -1;
/* ... and status */
t->enabled = !!(rs5c->regs[RS5C_REG_CTRL1] & RS5C_CTRL1_AALE);
diff --git a/drivers/rtc/rtc-rv8803.c b/drivers/rtc/rtc-rv8803.c
index f623038e5..9a2f6a95d 100644
--- a/drivers/rtc/rtc-rv8803.c
+++ b/drivers/rtc/rtc-rv8803.c
@@ -13,12 +13,15 @@
#include <linux/bcd.h>
#include <linux/bitops.h>
+#include <linux/log2.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/rtc.h>
+#define RV8803_I2C_TRY_COUNT 4
+
#define RV8803_SEC 0x00
#define RV8803_MIN 0x01
#define RV8803_HOUR 0x02
@@ -56,19 +59,85 @@ struct rv8803_data {
u8 ctrl;
};
+static int rv8803_read_reg(const struct i2c_client *client, u8 reg)
+{
+ int try = RV8803_I2C_TRY_COUNT;
+ s32 ret;
+
+ /*
+ * There is a 61µs window during which the RTC does not acknowledge I2C
+ * transfers. In that case, ensure that there are multiple attempts.
+ */
+ do
+ ret = i2c_smbus_read_byte_data(client, reg);
+ while ((ret == -ENXIO || ret == -EIO) && --try);
+ if (ret < 0)
+ dev_err(&client->dev, "Unable to read register 0x%02x\n", reg);
+
+ return ret;
+}
+
+static int rv8803_read_regs(const struct i2c_client *client,
+ u8 reg, u8 count, u8 *values)
+{
+ int try = RV8803_I2C_TRY_COUNT;
+ s32 ret;
+
+ do
+ ret = i2c_smbus_read_i2c_block_data(client, reg, count, values);
+ while ((ret == -ENXIO || ret == -EIO) && --try);
+ if (ret != count) {
+ dev_err(&client->dev,
+ "Unable to read registers 0x%02x..0x%02x\n",
+ reg, reg + count - 1);
+ return ret < 0 ? ret : -EIO;
+ }
+
+ return 0;
+}
+
+static int rv8803_write_reg(const struct i2c_client *client, u8 reg, u8 value)
+{
+ int try = RV8803_I2C_TRY_COUNT;
+ s32 ret;
+
+ do
+ ret = i2c_smbus_write_byte_data(client, reg, value);
+ while ((ret == -ENXIO || ret == -EIO) && --try);
+ if (ret)
+ dev_err(&client->dev, "Unable to write register 0x%02x\n", reg);
+
+ return ret;
+}
+
+static int rv8803_write_regs(const struct i2c_client *client,
+ u8 reg, u8 count, const u8 *values)
+{
+ int try = RV8803_I2C_TRY_COUNT;
+ s32 ret;
+
+ do
+ ret = i2c_smbus_write_i2c_block_data(client, reg, count,
+ values);
+ while ((ret == -ENXIO || ret == -EIO) && --try);
+ if (ret)
+ dev_err(&client->dev,
+ "Unable to write registers 0x%02x..0x%02x\n",
+ reg, reg + count - 1);
+
+ return ret;
+}
+
static irqreturn_t rv8803_handle_irq(int irq, void *dev_id)
{
struct i2c_client *client = dev_id;
struct rv8803_data *rv8803 = i2c_get_clientdata(client);
unsigned long events = 0;
- int flags, try = 0;
+ int flags;
mutex_lock(&rv8803->flags_lock);
- do {
- flags = i2c_smbus_read_byte_data(client, RV8803_FLAG);
- try++;
- } while ((flags == -ENXIO) && (try < 3));
+ flags = rv8803_read_reg(client, RV8803_FLAG);
if (flags <= 0) {
mutex_unlock(&rv8803->flags_lock);
return IRQ_NONE;
@@ -100,9 +169,8 @@ static irqreturn_t rv8803_handle_irq(int irq, void *dev_id)
if (events) {
rtc_update_irq(rv8803->rtc, 1, events);
- i2c_smbus_write_byte_data(client, RV8803_FLAG, flags);
- i2c_smbus_write_byte_data(rv8803->client, RV8803_CTRL,
- rv8803->ctrl);
+ rv8803_write_reg(client, RV8803_FLAG, flags);
+ rv8803_write_reg(rv8803->client, RV8803_CTRL, rv8803->ctrl);
}
mutex_unlock(&rv8803->flags_lock);
@@ -118,7 +186,7 @@ static int rv8803_get_time(struct device *dev, struct rtc_time *tm)
u8 *date = date1;
int ret, flags;
- flags = i2c_smbus_read_byte_data(rv8803->client, RV8803_FLAG);
+ flags = rv8803_read_reg(rv8803->client, RV8803_FLAG);
if (flags < 0)
return flags;
@@ -127,16 +195,14 @@ static int rv8803_get_time(struct device *dev, struct rtc_time *tm)
return -EINVAL;
}
- ret = i2c_smbus_read_i2c_block_data(rv8803->client, RV8803_SEC,
- 7, date);
- if (ret != 7)
- return ret < 0 ? ret : -EIO;
+ ret = rv8803_read_regs(rv8803->client, RV8803_SEC, 7, date);
+ if (ret)
+ return ret;
if ((date1[RV8803_SEC] & 0x7f) == bin2bcd(59)) {
- ret = i2c_smbus_read_i2c_block_data(rv8803->client, RV8803_SEC,
- 7, date2);
- if (ret != 7)
- return ret < 0 ? ret : -EIO;
+ ret = rv8803_read_regs(rv8803->client, RV8803_SEC, 7, date2);
+ if (ret)
+ return ret;
if ((date2[RV8803_SEC] & 0x7f) != bin2bcd(59))
date = date2;
@@ -145,23 +211,33 @@ static int rv8803_get_time(struct device *dev, struct rtc_time *tm)
tm->tm_sec = bcd2bin(date[RV8803_SEC] & 0x7f);
tm->tm_min = bcd2bin(date[RV8803_MIN] & 0x7f);
tm->tm_hour = bcd2bin(date[RV8803_HOUR] & 0x3f);
- tm->tm_wday = ffs(date[RV8803_WEEK] & 0x7f);
+ tm->tm_wday = ilog2(date[RV8803_WEEK] & 0x7f);
tm->tm_mday = bcd2bin(date[RV8803_DAY] & 0x3f);
tm->tm_mon = bcd2bin(date[RV8803_MONTH] & 0x1f) - 1;
tm->tm_year = bcd2bin(date[RV8803_YEAR]) + 100;
- return rtc_valid_tm(tm);
+ return 0;
}
static int rv8803_set_time(struct device *dev, struct rtc_time *tm)
{
struct rv8803_data *rv8803 = dev_get_drvdata(dev);
u8 date[7];
- int flags, ret;
+ int ctrl, flags, ret;
if ((tm->tm_year < 100) || (tm->tm_year > 199))
return -EINVAL;
+ ctrl = rv8803_read_reg(rv8803->client, RV8803_CTRL);
+ if (ctrl < 0)
+ return ctrl;
+
+ /* Stop the clock */
+ ret = rv8803_write_reg(rv8803->client, RV8803_CTRL,
+ ctrl | RV8803_CTRL_RESET);
+ if (ret)
+ return ret;
+
date[RV8803_SEC] = bin2bcd(tm->tm_sec);
date[RV8803_MIN] = bin2bcd(tm->tm_min);
date[RV8803_HOUR] = bin2bcd(tm->tm_hour);
@@ -170,21 +246,26 @@ static int rv8803_set_time(struct device *dev, struct rtc_time *tm)
date[RV8803_MONTH] = bin2bcd(tm->tm_mon + 1);
date[RV8803_YEAR] = bin2bcd(tm->tm_year - 100);
- ret = i2c_smbus_write_i2c_block_data(rv8803->client, RV8803_SEC,
- 7, date);
- if (ret < 0)
+ ret = rv8803_write_regs(rv8803->client, RV8803_SEC, 7, date);
+ if (ret)
+ return ret;
+
+ /* Restart the clock */
+ ret = rv8803_write_reg(rv8803->client, RV8803_CTRL,
+ ctrl & ~RV8803_CTRL_RESET);
+ if (ret)
return ret;
mutex_lock(&rv8803->flags_lock);
- flags = i2c_smbus_read_byte_data(rv8803->client, RV8803_FLAG);
+ flags = rv8803_read_reg(rv8803->client, RV8803_FLAG);
if (flags < 0) {
mutex_unlock(&rv8803->flags_lock);
return flags;
}
- ret = i2c_smbus_write_byte_data(rv8803->client, RV8803_FLAG,
- flags & ~RV8803_FLAG_V2F);
+ ret = rv8803_write_reg(rv8803->client, RV8803_FLAG,
+ flags & ~(RV8803_FLAG_V1F | RV8803_FLAG_V2F));
mutex_unlock(&rv8803->flags_lock);
@@ -198,22 +279,18 @@ static int rv8803_get_alarm(struct device *dev, struct rtc_wkalrm *alrm)
u8 alarmvals[3];
int flags, ret;
- ret = i2c_smbus_read_i2c_block_data(client, RV8803_ALARM_MIN,
- 3, alarmvals);
- if (ret != 3)
- return ret < 0 ? ret : -EIO;
+ ret = rv8803_read_regs(client, RV8803_ALARM_MIN, 3, alarmvals);
+ if (ret)
+ return ret;
- flags = i2c_smbus_read_byte_data(client, RV8803_FLAG);
+ flags = rv8803_read_reg(client, RV8803_FLAG);
if (flags < 0)
return flags;
alrm->time.tm_sec = 0;
alrm->time.tm_min = bcd2bin(alarmvals[0] & 0x7f);
alrm->time.tm_hour = bcd2bin(alarmvals[1] & 0x3f);
- alrm->time.tm_wday = -1;
alrm->time.tm_mday = bcd2bin(alarmvals[2] & 0x3f);
- alrm->time.tm_mon = -1;
- alrm->time.tm_year = -1;
alrm->enabled = !!(rv8803->ctrl & RV8803_CTRL_AIE);
alrm->pending = (flags & RV8803_FLAG_AF) && alrm->enabled;
@@ -239,10 +316,10 @@ static int rv8803_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
mutex_lock(&rv8803->flags_lock);
- ret = i2c_smbus_read_i2c_block_data(client, RV8803_FLAG, 2, ctrl);
- if (ret != 2) {
+ ret = rv8803_read_regs(client, RV8803_FLAG, 2, ctrl);
+ if (ret) {
mutex_unlock(&rv8803->flags_lock);
- return ret < 0 ? ret : -EIO;
+ return ret;
}
alarmvals[0] = bin2bcd(alrm->time.tm_min);
@@ -251,8 +328,8 @@ static int rv8803_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
if (rv8803->ctrl & (RV8803_CTRL_AIE | RV8803_CTRL_UIE)) {
rv8803->ctrl &= ~(RV8803_CTRL_AIE | RV8803_CTRL_UIE);
- err = i2c_smbus_write_byte_data(rv8803->client, RV8803_CTRL,
- rv8803->ctrl);
+ err = rv8803_write_reg(rv8803->client, RV8803_CTRL,
+ rv8803->ctrl);
if (err) {
mutex_unlock(&rv8803->flags_lock);
return err;
@@ -260,13 +337,12 @@ static int rv8803_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
}
ctrl[1] &= ~RV8803_FLAG_AF;
- err = i2c_smbus_write_byte_data(rv8803->client, RV8803_FLAG, ctrl[1]);
+ err = rv8803_write_reg(rv8803->client, RV8803_FLAG, ctrl[1]);
mutex_unlock(&rv8803->flags_lock);
if (err)
return err;
- err = i2c_smbus_write_i2c_block_data(rv8803->client, RV8803_ALARM_MIN,
- 3, alarmvals);
+ err = rv8803_write_regs(rv8803->client, RV8803_ALARM_MIN, 3, alarmvals);
if (err)
return err;
@@ -276,8 +352,8 @@ static int rv8803_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
if (rv8803->rtc->aie_timer.enabled)
rv8803->ctrl |= RV8803_CTRL_AIE;
- err = i2c_smbus_write_byte_data(rv8803->client, RV8803_CTRL,
- rv8803->ctrl);
+ err = rv8803_write_reg(rv8803->client, RV8803_CTRL,
+ rv8803->ctrl);
if (err)
return err;
}
@@ -306,21 +382,20 @@ static int rv8803_alarm_irq_enable(struct device *dev, unsigned int enabled)
}
mutex_lock(&rv8803->flags_lock);
- flags = i2c_smbus_read_byte_data(client, RV8803_FLAG);
+ flags = rv8803_read_reg(client, RV8803_FLAG);
if (flags < 0) {
mutex_unlock(&rv8803->flags_lock);
return flags;
}
flags &= ~(RV8803_FLAG_AF | RV8803_FLAG_UF);
- err = i2c_smbus_write_byte_data(client, RV8803_FLAG, flags);
+ err = rv8803_write_reg(client, RV8803_FLAG, flags);
mutex_unlock(&rv8803->flags_lock);
if (err)
return err;
if (ctrl != rv8803->ctrl) {
rv8803->ctrl = ctrl;
- err = i2c_smbus_write_byte_data(client, RV8803_CTRL,
- rv8803->ctrl);
+ err = rv8803_write_reg(client, RV8803_CTRL, rv8803->ctrl);
if (err)
return err;
}
@@ -336,7 +411,7 @@ static int rv8803_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
switch (cmd) {
case RTC_VL_READ:
- flags = i2c_smbus_read_byte_data(client, RV8803_FLAG);
+ flags = rv8803_read_reg(client, RV8803_FLAG);
if (flags < 0)
return flags;
@@ -355,16 +430,16 @@ static int rv8803_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
case RTC_VL_CLR:
mutex_lock(&rv8803->flags_lock);
- flags = i2c_smbus_read_byte_data(client, RV8803_FLAG);
+ flags = rv8803_read_reg(client, RV8803_FLAG);
if (flags < 0) {
mutex_unlock(&rv8803->flags_lock);
return flags;
}
flags &= ~(RV8803_FLAG_V1F | RV8803_FLAG_V2F);
- ret = i2c_smbus_write_byte_data(client, RV8803_FLAG, flags);
+ ret = rv8803_write_reg(client, RV8803_FLAG, flags);
mutex_unlock(&rv8803->flags_lock);
- if (ret < 0)
+ if (ret)
return ret;
return 0;
@@ -382,8 +457,8 @@ static ssize_t rv8803_nvram_write(struct file *filp, struct kobject *kobj,
struct i2c_client *client = to_i2c_client(dev);
int ret;
- ret = i2c_smbus_write_byte_data(client, RV8803_RAM, buf[0]);
- if (ret < 0)
+ ret = rv8803_write_reg(client, RV8803_RAM, buf[0]);
+ if (ret)
return ret;
return 1;
@@ -397,7 +472,7 @@ static ssize_t rv8803_nvram_read(struct file *filp, struct kobject *kobj,
struct i2c_client *client = to_i2c_client(dev);
int ret;
- ret = i2c_smbus_read_byte_data(client, RV8803_RAM);
+ ret = rv8803_read_reg(client, RV8803_RAM);
if (ret < 0)
return ret;
@@ -427,7 +502,7 @@ static int rv8803_probe(struct i2c_client *client,
{
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
struct rv8803_data *rv8803;
- int err, flags, try = 0;
+ int err, flags;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
I2C_FUNC_SMBUS_I2C_BLOCK)) {
@@ -444,16 +519,7 @@ static int rv8803_probe(struct i2c_client *client,
rv8803->client = client;
i2c_set_clientdata(client, rv8803);
- /*
- * There is a 60µs window where the RTC may not reply on the i2c bus in
- * that case, the transfer is not ACKed. In that case, ensure there are
- * multiple attempts.
- */
- do {
- flags = i2c_smbus_read_byte_data(client, RV8803_FLAG);
- try++;
- } while ((flags == -ENXIO) && (try < 3));
-
+ flags = rv8803_read_reg(client, RV8803_FLAG);
if (flags < 0)
return flags;
@@ -488,12 +554,7 @@ static int rv8803_probe(struct i2c_client *client,
return PTR_ERR(rv8803->rtc);
}
- try = 0;
- do {
- err = i2c_smbus_write_byte_data(rv8803->client, RV8803_EXT,
- RV8803_EXT_WADA);
- try++;
- } while ((err == -ENXIO) && (try < 3));
+ err = rv8803_write_reg(rv8803->client, RV8803_EXT, RV8803_EXT_WADA);
if (err)
return err;
diff --git a/drivers/rtc/rtc-rx8010.c b/drivers/rtc/rtc-rx8010.c
index 772d221ec..7163b91bb 100644
--- a/drivers/rtc/rtc-rx8010.c
+++ b/drivers/rtc/rtc-rx8010.c
@@ -272,15 +272,9 @@ static int rx8010_read_alarm(struct device *dev, struct rtc_wkalrm *t)
t->time.tm_min = bcd2bin(alarmvals[0] & 0x7f);
t->time.tm_hour = bcd2bin(alarmvals[1] & 0x3f);
- if (alarmvals[2] & RX8010_ALARM_AE)
- t->time.tm_mday = -1;
- else
+ if (!(alarmvals[2] & RX8010_ALARM_AE))
t->time.tm_mday = bcd2bin(alarmvals[2] & 0x7f);
- t->time.tm_wday = -1;
- t->time.tm_mon = -1;
- t->time.tm_year = -1;
-
t->enabled = !!(rx8010->ctrlreg & RX8010_CTRL_AIE);
t->pending = (flagreg & RX8010_FLAG_AF) && t->enabled;
diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
index 9f105efbc..2b85cc7a2 100644
--- a/drivers/rtc/rtc-rx8025.c
+++ b/drivers/rtc/rtc-rx8025.c
@@ -319,11 +319,6 @@ static int rx8025_read_alarm(struct device *dev, struct rtc_wkalrm *t)
t->time.tm_hour = bcd2bin(ald[1] & 0x1f) % 12
+ (ald[1] & 0x20 ? 12 : 0);
- t->time.tm_wday = -1;
- t->time.tm_mday = -1;
- t->time.tm_mon = -1;
- t->time.tm_year = -1;
-
dev_dbg(dev, "%s: date: %ds %dm %dh %dmd %dm %dy\n",
__func__,
t->time.tm_sec, t->time.tm_min, t->time.tm_hour,
diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c
index f40afdd0e..5dab4665c 100644
--- a/drivers/rtc/rtc-s35390a.c
+++ b/drivers/rtc/rtc-s35390a.c
@@ -15,6 +15,7 @@
#include <linux/bitrev.h>
#include <linux/bcd.h>
#include <linux/slab.h>
+#include <linux/delay.h>
#define S35390A_CMD_STATUS1 0
#define S35390A_CMD_STATUS2 1
@@ -34,10 +35,14 @@
#define S35390A_ALRM_BYTE_HOURS 1
#define S35390A_ALRM_BYTE_MINS 2
+/* flags for STATUS1 */
#define S35390A_FLAG_POC 0x01
#define S35390A_FLAG_BLD 0x02
+#define S35390A_FLAG_INT2 0x04
#define S35390A_FLAG_24H 0x40
#define S35390A_FLAG_RESET 0x80
+
+/* flag for STATUS2 */
#define S35390A_FLAG_TEST 0x01
#define S35390A_INT2_MODE_MASK 0xF0
@@ -94,19 +99,63 @@ static int s35390a_get_reg(struct s35390a *s35390a, int reg, char *buf, int len)
return 0;
}
-static int s35390a_reset(struct s35390a *s35390a)
+/*
+ * Returns <0 on error, 0 if rtc is setup fine and 1 if the chip was reset.
+ * To keep the information if an irq is pending, pass the value read from
+ * STATUS1 to the caller.
+ */
+static int s35390a_reset(struct s35390a *s35390a, char *status1)
{
- char buf[1];
-
- if (s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf)) < 0)
- return -EIO;
-
- if (!(buf[0] & (S35390A_FLAG_POC | S35390A_FLAG_BLD)))
+ char buf;
+ int ret;
+ unsigned initcount = 0;
+
+ ret = s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, status1, 1);
+ if (ret < 0)
+ return ret;
+
+ if (*status1 & S35390A_FLAG_POC)
+ /*
+ * Do not communicate for 0.5 seconds since the power-on
+ * detection circuit is in operation.
+ */
+ msleep(500);
+ else if (!(*status1 & S35390A_FLAG_BLD))
+ /*
+ * If both POC and BLD are unset everything is fine.
+ */
return 0;
- buf[0] |= (S35390A_FLAG_RESET | S35390A_FLAG_24H);
- buf[0] &= 0xf0;
- return s35390a_set_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf));
+ /*
+ * At least one of POC and BLD are set, so reinitialise chip. Keeping
+ * this information in the hardware to know later that the time isn't
+ * valid is unfortunately not possible because POC and BLD are cleared
+ * on read. So the reset is best done now.
+ *
+ * The 24H bit is kept over reset, so set it already here.
+ */
+initialize:
+ *status1 = S35390A_FLAG_24H;
+ buf = S35390A_FLAG_RESET | S35390A_FLAG_24H;
+ ret = s35390a_set_reg(s35390a, S35390A_CMD_STATUS1, &buf, 1);
+
+ if (ret < 0)
+ return ret;
+
+ ret = s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, &buf, 1);
+ if (ret < 0)
+ return ret;
+
+ if (buf & (S35390A_FLAG_POC | S35390A_FLAG_BLD)) {
+ /* Try up to five times to reset the chip */
+ if (initcount < 5) {
+ ++initcount;
+ goto initialize;
+ } else
+ return -EIO;
+ }
+
+ return 1;
}
static int s35390a_disable_test_mode(struct s35390a *s35390a)
@@ -217,12 +266,12 @@ static int s35390a_set_alarm(struct i2c_client *client, struct rtc_wkalrm *alm)
alm->time.tm_min, alm->time.tm_hour, alm->time.tm_mday,
alm->time.tm_mon, alm->time.tm_year, alm->time.tm_wday);
- /* disable interrupt */
+ /* disable interrupt (which deasserts the irq line) */
err = s35390a_set_reg(s35390a, S35390A_CMD_STATUS2, &sts, sizeof(sts));
if (err < 0)
return err;
- /* clear pending interrupt, if any */
+ /* clear pending interrupt (in STATUS1 only), if any */
err = s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, &sts, sizeof(sts));
if (err < 0)
return err;
@@ -242,6 +291,8 @@ static int s35390a_set_alarm(struct i2c_client *client, struct rtc_wkalrm *alm)
if (alm->time.tm_wday != -1)
buf[S35390A_ALRM_BYTE_WDAY] = bin2bcd(alm->time.tm_wday) | 0x80;
+ else
+ buf[S35390A_ALRM_BYTE_WDAY] = 0;
buf[S35390A_ALRM_BYTE_HOURS] = s35390a_hr2reg(s35390a,
alm->time.tm_hour) | 0x80;
@@ -269,23 +320,43 @@ static int s35390a_read_alarm(struct i2c_client *client, struct rtc_wkalrm *alm)
if (err < 0)
return err;
- if (bitrev8(sts) != S35390A_INT2_MODE_ALARM)
- return -EINVAL;
+ if ((bitrev8(sts) & S35390A_INT2_MODE_MASK) != S35390A_INT2_MODE_ALARM) {
+ /*
+ * When the alarm isn't enabled, the register to configure
+ * the alarm time isn't accessible.
+ */
+ alm->enabled = 0;
+ return 0;
+ } else {
+ alm->enabled = 1;
+ }
err = s35390a_get_reg(s35390a, S35390A_CMD_INT2_REG1, buf, sizeof(buf));
if (err < 0)
return err;
/* This chip returns the bits of each byte in reverse order */
- for (i = 0; i < 3; ++i) {
+ for (i = 0; i < 3; ++i)
buf[i] = bitrev8(buf[i]);
- buf[i] &= ~0x80;
- }
- alm->time.tm_wday = bcd2bin(buf[S35390A_ALRM_BYTE_WDAY]);
- alm->time.tm_hour = s35390a_reg2hr(s35390a,
- buf[S35390A_ALRM_BYTE_HOURS]);
- alm->time.tm_min = bcd2bin(buf[S35390A_ALRM_BYTE_MINS]);
+ /*
+ * B0 of the three matching registers is an enable flag. Iff it is set
+ * the configured value is used for matching.
+ */
+ if (buf[S35390A_ALRM_BYTE_WDAY] & 0x80)
+ alm->time.tm_wday =
+ bcd2bin(buf[S35390A_ALRM_BYTE_WDAY] & ~0x80);
+
+ if (buf[S35390A_ALRM_BYTE_HOURS] & 0x80)
+ alm->time.tm_hour =
+ s35390a_reg2hr(s35390a,
+ buf[S35390A_ALRM_BYTE_HOURS] & ~0x80);
+
+ if (buf[S35390A_ALRM_BYTE_MINS] & 0x80)
+ alm->time.tm_min = bcd2bin(buf[S35390A_ALRM_BYTE_MINS] & ~0x80);
+
+ /* alarm triggers always at s=0 */
+ alm->time.tm_sec = 0;
dev_dbg(&client->dev, "%s: alm is mins=%d, hours=%d, wday=%d\n",
__func__, alm->time.tm_min, alm->time.tm_hour,
@@ -327,11 +398,11 @@ static struct i2c_driver s35390a_driver;
static int s35390a_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- int err;
+ int err, err_reset;
unsigned int i;
struct s35390a *s35390a;
struct rtc_time tm;
- char buf[1];
+ char buf, status1;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
err = -ENODEV;
@@ -360,29 +431,35 @@ static int s35390a_probe(struct i2c_client *client,
}
}
- err = s35390a_reset(s35390a);
- if (err < 0) {
+ err_reset = s35390a_reset(s35390a, &status1);
+ if (err_reset < 0) {
+ err = err_reset;
dev_err(&client->dev, "error resetting chip\n");
goto exit_dummy;
}
- err = s35390a_disable_test_mode(s35390a);
- if (err < 0) {
- dev_err(&client->dev, "error disabling test mode\n");
- goto exit_dummy;
- }
-
- err = s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf));
- if (err < 0) {
- dev_err(&client->dev, "error checking 12/24 hour mode\n");
- goto exit_dummy;
- }
- if (buf[0] & S35390A_FLAG_24H)
+ if (status1 & S35390A_FLAG_24H)
s35390a->twentyfourhour = 1;
else
s35390a->twentyfourhour = 0;
- if (s35390a_get_datetime(client, &tm) < 0)
+ if (status1 & S35390A_FLAG_INT2) {
+ /* disable alarm (and maybe test mode) */
+ buf = 0;
+ err = s35390a_set_reg(s35390a, S35390A_CMD_STATUS2, &buf, 1);
+ if (err < 0) {
+ dev_err(&client->dev, "error disabling alarm");
+ goto exit_dummy;
+ }
+ } else {
+ err = s35390a_disable_test_mode(s35390a);
+ if (err < 0) {
+ dev_err(&client->dev, "error disabling test mode\n");
+ goto exit_dummy;
+ }
+ }
+
+ if (err_reset > 0 || s35390a_get_datetime(client, &tm) < 0)
dev_warn(&client->dev, "clock needs to be set\n");
device_set_wakeup_capable(&client->dev, 1);
@@ -395,6 +472,10 @@ static int s35390a_probe(struct i2c_client *client,
err = PTR_ERR(s35390a->rtc);
goto exit_dummy;
}
+
+ if (status1 & S35390A_FLAG_INT2)
+ rtc_update_irq(s35390a->rtc, 1, RTC_AF);
+
return 0;
exit_dummy:
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 4e823c4b7..d44fb34df 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -266,35 +266,23 @@ static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
/* decode the alarm enable field */
if (alm_en & S3C2410_RTCALM_SECEN)
alm_tm->tm_sec = bcd2bin(alm_tm->tm_sec);
- else
- alm_tm->tm_sec = -1;
if (alm_en & S3C2410_RTCALM_MINEN)
alm_tm->tm_min = bcd2bin(alm_tm->tm_min);
- else
- alm_tm->tm_min = -1;
if (alm_en & S3C2410_RTCALM_HOUREN)
alm_tm->tm_hour = bcd2bin(alm_tm->tm_hour);
- else
- alm_tm->tm_hour = -1;
if (alm_en & S3C2410_RTCALM_DAYEN)
alm_tm->tm_mday = bcd2bin(alm_tm->tm_mday);
- else
- alm_tm->tm_mday = -1;
if (alm_en & S3C2410_RTCALM_MONEN) {
alm_tm->tm_mon = bcd2bin(alm_tm->tm_mon);
alm_tm->tm_mon -= 1;
- } else {
- alm_tm->tm_mon = -1;
}
if (alm_en & S3C2410_RTCALM_YEAREN)
alm_tm->tm_year = bcd2bin(alm_tm->tm_year);
- else
- alm_tm->tm_year = -1;
return 0;
}
@@ -579,8 +567,6 @@ static int s3c_rtc_probe(struct platform_device *pdev)
s3c_rtc_setfreq(info, 1);
- s3c_rtc_disable_clk(info);
-
return 0;
err_nortc:
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index a45845a57..17b6235d6 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -481,7 +481,6 @@ static int sh_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
tm->tm_mon = sh_rtc_read_alarm_value(rtc, RMONAR);
if (tm->tm_mon > 0)
tm->tm_mon -= 1; /* RTC is 1-12, tm_mon is 0-11 */
- tm->tm_year = 0xffff;
wkalrm->enabled = (readb(rtc->regbase + RCR1) & RCR1_AIE) ? 1 : 0;
@@ -500,52 +499,13 @@ static inline void sh_rtc_write_alarm_value(struct sh_rtc *rtc,
writeb(bin2bcd(value) | AR_ENB, rtc->regbase + reg_off);
}
-static int sh_rtc_check_alarm(struct rtc_time *tm)
-{
- /*
- * The original rtc says anything > 0xc0 is "don't care" or "match
- * all" - most users use 0xff but rtc-dev uses -1 for the same thing.
- * The original rtc doesn't support years - some things use -1 and
- * some 0xffff. We use -1 to make out tests easier.
- */
- if (tm->tm_year == 0xffff)
- tm->tm_year = -1;
- if (tm->tm_mon >= 0xff)
- tm->tm_mon = -1;
- if (tm->tm_mday >= 0xff)
- tm->tm_mday = -1;
- if (tm->tm_wday >= 0xff)
- tm->tm_wday = -1;
- if (tm->tm_hour >= 0xff)
- tm->tm_hour = -1;
- if (tm->tm_min >= 0xff)
- tm->tm_min = -1;
- if (tm->tm_sec >= 0xff)
- tm->tm_sec = -1;
-
- if (tm->tm_year > 9999 ||
- tm->tm_mon >= 12 ||
- tm->tm_mday == 0 || tm->tm_mday >= 32 ||
- tm->tm_wday >= 7 ||
- tm->tm_hour >= 24 ||
- tm->tm_min >= 60 ||
- tm->tm_sec >= 60)
- return -EINVAL;
-
- return 0;
-}
-
static int sh_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
{
struct platform_device *pdev = to_platform_device(dev);
struct sh_rtc *rtc = platform_get_drvdata(pdev);
unsigned int rcr1;
struct rtc_time *tm = &wkalrm->time;
- int mon, err;
-
- err = sh_rtc_check_alarm(tm);
- if (unlikely(err < 0))
- return err;
+ int mon;
spin_lock_irq(&rtc->lock);
diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c
index 60232bd36..15ac597d5 100644
--- a/drivers/rtc/rtc-tegra.c
+++ b/drivers/rtc/rtc-tegra.c
@@ -179,12 +179,6 @@ static int tegra_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
if (sec == 0) {
/* alarm is disabled. */
alarm->enabled = 0;
- alarm->time.tm_mon = -1;
- alarm->time.tm_mday = -1;
- alarm->time.tm_year = -1;
- alarm->time.tm_hour = -1;
- alarm->time.tm_min = -1;
- alarm->time.tm_sec = -1;
} else {
/* alarm is enabled. */
alarm->enabled = 1;
diff --git a/drivers/rtc/rtc-v3020.c b/drivers/rtc/rtc-v3020.c
index 7a0436329..1f3117b5a 100644
--- a/drivers/rtc/rtc-v3020.c
+++ b/drivers/rtc/rtc-v3020.c
@@ -25,7 +25,7 @@
#include <linux/rtc.h>
#include <linux/types.h>
#include <linux/bcd.h>
-#include <linux/rtc-v3020.h>
+#include <linux/platform_data/rtc-v3020.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/slab.h>
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 42b34cd1f..98bbec44b 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -228,7 +228,7 @@ check_XRC (struct ccw1 *de_ccw,
data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */
data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
- rc = get_sync_clock(&data->ep_sys_time);
+ rc = get_phys_clock(&data->ep_sys_time);
/* Ignore return code if sync clock is switched off. */
if (rc == -EOPNOTSUPP || rc == -EACCES)
rc = 0;
@@ -339,7 +339,7 @@ static int check_XRC_on_prefix(struct PFX_eckd_data *pfxdata,
pfxdata->define_extent.ga_extended |= 0x02; /* 'Extended Parameter' */
pfxdata->validity.time_stamp = 1; /* 'Time Stamp Valid' */
- rc = get_sync_clock(&pfxdata->define_extent.ep_sys_time);
+ rc = get_phys_clock(&pfxdata->define_extent.ep_sys_time);
/* Ignore return code if sync clock is switched off. */
if (rc == -EOPNOTSUPP || rc == -EACCES)
rc = 0;
@@ -5078,6 +5078,8 @@ static int dasd_eckd_read_message_buffer(struct dasd_device *device,
return PTR_ERR(cqr);
}
+ cqr->lpm = lpum;
+retry:
cqr->startdev = device;
cqr->memdev = device;
cqr->block = NULL;
@@ -5122,6 +5124,14 @@ static int dasd_eckd_read_message_buffer(struct dasd_device *device,
(prssdp + 1);
memcpy(messages, message_buf,
sizeof(struct dasd_rssd_messages));
+ } else if (cqr->lpm) {
+ /*
+ * on z/VM we might not be able to do I/O on the requested path
+ * but instead we get the required information on any path
+ * so retry with open path mask
+ */
+ cqr->lpm = 0;
+ goto retry;
} else
DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
"Reading messages failed with rc=%d\n"
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index 31d544a87..e2fa759bf 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -45,7 +45,6 @@ int dasd_gendisk_alloc(struct dasd_block *block)
gdp->major = DASD_MAJOR;
gdp->first_minor = base->devindex << DASD_PARTN_BITS;
gdp->fops = &dasd_device_operations;
- gdp->driverfs_dev = &base->cdev->dev;
/*
* Set device name.
@@ -76,7 +75,7 @@ int dasd_gendisk_alloc(struct dasd_block *block)
gdp->queue = block->request_queue;
block->gdp = gdp;
set_capacity(block->gdp, 0);
- add_disk(block->gdp);
+ device_add_disk(&base->cdev->dev, block->gdp);
return 0;
}
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index bed53c46d..9d66b4fb1 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -31,7 +31,7 @@ static void dcssblk_release(struct gendisk *disk, fmode_t mode);
static blk_qc_t dcssblk_make_request(struct request_queue *q,
struct bio *bio);
static long dcssblk_direct_access(struct block_device *bdev, sector_t secnum,
- void __pmem **kaddr, pfn_t *pfn, long size);
+ void **kaddr, pfn_t *pfn, long size);
static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0";
@@ -615,9 +615,9 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
dev_info->dcssblk_queue = blk_alloc_queue(GFP_KERNEL);
dev_info->gd->queue = dev_info->dcssblk_queue;
dev_info->gd->private_data = dev_info;
- dev_info->gd->driverfs_dev = &dev_info->dev;
blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request);
blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096);
+ queue_flag_set_unlocked(QUEUE_FLAG_DAX, dev_info->dcssblk_queue);
seg_byte_size = (dev_info->end - dev_info->start + 1);
set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors
@@ -655,7 +655,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
goto put_dev;
get_device(&dev_info->dev);
- add_disk(dev_info->gd);
+ device_add_disk(&dev_info->dev, dev_info->gd);
switch (dev_info->segment_type) {
case SEG_TYPE_SR:
@@ -884,7 +884,7 @@ fail:
static long
dcssblk_direct_access (struct block_device *bdev, sector_t secnum,
- void __pmem **kaddr, pfn_t *pfn, long size)
+ void **kaddr, pfn_t *pfn, long size)
{
struct dcssblk_dev_info *dev_info;
unsigned long offset, dev_sz;
@@ -894,7 +894,7 @@ dcssblk_direct_access (struct block_device *bdev, sector_t secnum,
return -ENODEV;
dev_sz = dev_info->end - dev_info->start;
offset = secnum * 512;
- *kaddr = (void __pmem *) (dev_info->start + offset);
+ *kaddr = (void *) dev_info->start + offset;
*pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset), PFN_DEV);
return dev_sz - offset;
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index e6f54d3b8..9f16ea696 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -512,7 +512,6 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
goto out_queue;
rq->queuedata = scmdev;
- bdev->gendisk->driverfs_dev = &scmdev->dev;
bdev->gendisk->private_data = scmdev;
bdev->gendisk->fops = &scm_blk_devops;
bdev->gendisk->queue = rq;
@@ -531,7 +530,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
/* 512 byte sectors */
set_capacity(bdev->gendisk, scmdev->size >> 9);
- add_disk(bdev->gendisk);
+ device_add_disk(&scmdev->dev, bdev->gendisk);
return 0;
out_queue:
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
index ef04a9f7a..7b9c50aa4 100644
--- a/drivers/s390/char/keyboard.c
+++ b/drivers/s390/char/keyboard.c
@@ -438,18 +438,9 @@ do_kdgkb_ioctl(struct kbd_data *kbd, struct kbsentry __user *u_kbs,
return -EFAULT;
if (len > sizeof(u_kbs->kb_string))
return -EINVAL;
- p = kmalloc(len, GFP_KERNEL);
- if (!p)
- return -ENOMEM;
- if (copy_from_user(p, u_kbs->kb_string, len)) {
- kfree(p);
- return -EFAULT;
- }
- /*
- * Make sure the string is terminated by 0. User could have
- * modified it between us running strnlen_user() and copying it.
- */
- p[len - 1] = 0;
+ p = memdup_user_nul(u_kbs->kb_string, len);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
kfree(kbd->func_table[kb_func]);
kbd->func_table[kb_func] = p;
break;
diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c
index 5880def98..6037bc87e 100644
--- a/drivers/s390/char/sclp_con.c
+++ b/drivers/s390/char/sclp_con.c
@@ -319,7 +319,8 @@ sclp_console_init(void)
int i;
int rc;
- if (!CONSOLE_IS_SCLP)
+ /* SCLP consoles are handled together */
+ if (!(CONSOLE_IS_SCLP || CONSOLE_IS_VT220))
return 0;
rc = sclp_rw_init();
if (rc)
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
index 2ced50ccc..1406fb688 100644
--- a/drivers/s390/char/sclp_config.c
+++ b/drivers/s390/char/sclp_config.c
@@ -47,7 +47,7 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
int cpu;
struct device *dev;
- s390_adjust_jiffies();
+ s390_update_cpu_mhz();
pr_info("CPU capability may have changed\n");
get_online_cpus();
for_each_online_cpu(cpu) {
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index 0ac520dd1..c71df0c7d 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -46,7 +46,8 @@ struct read_info_sccb {
u64 rnmax2; /* 104-111 */
u8 _pad_112[116 - 112]; /* 112-115 */
u8 fac116; /* 116 */
- u8 _pad_117[119 - 117]; /* 117-118 */
+ u8 fac117; /* 117 */
+ u8 _pad_118; /* 118 */
u8 fac119; /* 119 */
u16 hcpua; /* 120-121 */
u8 _pad_122[124 - 122]; /* 122-123 */
@@ -114,7 +115,12 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb)
sclp.facilities = sccb->facilities;
sclp.has_sprp = !!(sccb->fac84 & 0x02);
sclp.has_core_type = !!(sccb->fac84 & 0x01);
+ sclp.has_gsls = !!(sccb->fac85 & 0x80);
+ sclp.has_64bscao = !!(sccb->fac116 & 0x80);
+ sclp.has_cmma = !!(sccb->fac116 & 0x40);
sclp.has_esca = !!(sccb->fac116 & 0x08);
+ sclp.has_pfmfi = !!(sccb->fac117 & 0x40);
+ sclp.has_ibs = !!(sccb->fac117 & 0x20);
sclp.has_hvs = !!(sccb->fac119 & 0x80);
if (sccb->fac85 & 0x02)
S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
@@ -145,6 +151,10 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb)
sclp.has_siif = cpue->siif;
sclp.has_sigpif = cpue->sigpif;
sclp.has_sief2 = cpue->sief2;
+ sclp.has_gpere = cpue->gpere;
+ sclp.has_ib = cpue->ib;
+ sclp.has_cei = cpue->cei;
+ sclp.has_skey = cpue->skey;
break;
}
diff --git a/drivers/s390/char/sclp_ocf.c b/drivers/s390/char/sclp_ocf.c
index 2553db0fd..f59b71776 100644
--- a/drivers/s390/char/sclp_ocf.c
+++ b/drivers/s390/char/sclp_ocf.c
@@ -26,7 +26,7 @@
#define OCF_LENGTH_CPC_NAME 8UL
static char hmc_network[OCF_LENGTH_HMC_NETWORK + 1];
-static char cpc_name[OCF_LENGTH_CPC_NAME + 1];
+static char cpc_name[OCF_LENGTH_CPC_NAME]; /* in EBCDIC */
static DEFINE_SPINLOCK(sclp_ocf_lock);
static struct work_struct sclp_ocf_change_work;
@@ -72,9 +72,8 @@ static void sclp_ocf_handler(struct evbuf_header *evbuf)
}
if (cpc) {
size = min(OCF_LENGTH_CPC_NAME, (size_t) cpc->length);
+ memset(cpc_name, 0, OCF_LENGTH_CPC_NAME);
memcpy(cpc_name, cpc + 1, size);
- EBCASC(cpc_name, size);
- cpc_name[size] = 0;
}
spin_unlock(&sclp_ocf_lock);
schedule_work(&sclp_ocf_change_work);
@@ -85,15 +84,23 @@ static struct sclp_register sclp_ocf_event = {
.receiver_fn = sclp_ocf_handler,
};
+void sclp_ocf_cpc_name_copy(char *dst)
+{
+ spin_lock_irq(&sclp_ocf_lock);
+ memcpy(dst, cpc_name, OCF_LENGTH_CPC_NAME);
+ spin_unlock_irq(&sclp_ocf_lock);
+}
+EXPORT_SYMBOL(sclp_ocf_cpc_name_copy);
+
static ssize_t cpc_name_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
- int rc;
+ char name[OCF_LENGTH_CPC_NAME + 1];
- spin_lock_irq(&sclp_ocf_lock);
- rc = snprintf(page, PAGE_SIZE, "%s\n", cpc_name);
- spin_unlock_irq(&sclp_ocf_lock);
- return rc;
+ sclp_ocf_cpc_name_copy(name);
+ name[OCF_LENGTH_CPC_NAME] = 0;
+ EBCASC(name, OCF_LENGTH_CPC_NAME);
+ return snprintf(page, PAGE_SIZE, "%s\n", name);
}
static struct kobj_attribute cpc_name_attr =
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 5043ecfa1..16992e2a4 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -185,7 +185,7 @@ static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf,
{
if (ipl_block) {
diag308(DIAG308_SET, ipl_block);
- diag308(DIAG308_IPL, NULL);
+ diag308(DIAG308_LOAD_CLEAR, NULL);
}
return count;
}
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 50597f952..46be25c74 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -37,8 +37,7 @@ enum cfg_task_t {
/* Map for pending configure tasks. */
static enum cfg_task_t chp_cfg_task[__MAX_CSSID + 1][__MAX_CHPID + 1];
-static DEFINE_MUTEX(cfg_lock);
-static int cfg_busy;
+static DEFINE_SPINLOCK(cfg_lock);
/* Map for channel-path status. */
static struct sclp_chp_info chp_info;
@@ -47,8 +46,6 @@ static DEFINE_MUTEX(info_lock);
/* Time after which channel-path status may be outdated. */
static unsigned long chp_info_expires;
-/* Workqueue to perform pending configure tasks. */
-static struct workqueue_struct *chp_wq;
static struct work_struct cfg_work;
/* Wait queue for configure completion events. */
@@ -428,11 +425,14 @@ int chp_update_desc(struct channel_path *chp)
if (rc)
return rc;
- rc = chsc_determine_fmt1_channel_path_desc(chp->chpid, &chp->desc_fmt1);
- if (rc)
- return rc;
+ /*
+ * Fetching the following data is optional. Not all machines or
+ * hypervisors implement the required chsc commands.
+ */
+ chsc_determine_fmt1_channel_path_desc(chp->chpid, &chp->desc_fmt1);
+ chsc_get_channel_measurement_chars(chp);
- return chsc_get_channel_measurement_chars(chp);
+ return 0;
}
/**
@@ -665,6 +665,20 @@ static void cfg_set_task(struct chp_id chpid, enum cfg_task_t cfg)
chp_cfg_task[chpid.cssid][chpid.id] = cfg;
}
+/* Fetch the first configure task. Set chpid accordingly. */
+static enum cfg_task_t chp_cfg_fetch_task(struct chp_id *chpid)
+{
+ enum cfg_task_t t = cfg_none;
+
+ chp_id_for_each(chpid) {
+ t = cfg_get_task(*chpid);
+ if (t != cfg_none)
+ break;
+ }
+
+ return t;
+}
+
/* Perform one configure/deconfigure request. Reschedule work function until
* last request. */
static void cfg_func(struct work_struct *work)
@@ -673,16 +687,9 @@ static void cfg_func(struct work_struct *work)
enum cfg_task_t t;
int rc;
- mutex_lock(&cfg_lock);
- t = cfg_none;
- chp_id_for_each(&chpid) {
- t = cfg_get_task(chpid);
- if (t != cfg_none) {
- cfg_set_task(chpid, cfg_none);
- break;
- }
- }
- mutex_unlock(&cfg_lock);
+ spin_lock(&cfg_lock);
+ t = chp_cfg_fetch_task(&chpid);
+ spin_unlock(&cfg_lock);
switch (t) {
case cfg_configure:
@@ -708,13 +715,14 @@ static void cfg_func(struct work_struct *work)
case cfg_none:
/* Get updated information after last change. */
info_update();
- mutex_lock(&cfg_lock);
- cfg_busy = 0;
- mutex_unlock(&cfg_lock);
wake_up_interruptible(&cfg_wait_queue);
return;
}
- queue_work(chp_wq, &cfg_work);
+ spin_lock(&cfg_lock);
+ if (t == cfg_get_task(chpid))
+ cfg_set_task(chpid, cfg_none);
+ spin_unlock(&cfg_lock);
+ schedule_work(&cfg_work);
}
/**
@@ -728,11 +736,10 @@ void chp_cfg_schedule(struct chp_id chpid, int configure)
{
CIO_MSG_EVENT(2, "chp_cfg_sched%x.%02x=%d\n", chpid.cssid, chpid.id,
configure);
- mutex_lock(&cfg_lock);
+ spin_lock(&cfg_lock);
cfg_set_task(chpid, configure ? cfg_configure : cfg_deconfigure);
- cfg_busy = 1;
- mutex_unlock(&cfg_lock);
- queue_work(chp_wq, &cfg_work);
+ spin_unlock(&cfg_lock);
+ schedule_work(&cfg_work);
}
/**
@@ -745,15 +752,27 @@ void chp_cfg_schedule(struct chp_id chpid, int configure)
void chp_cfg_cancel_deconfigure(struct chp_id chpid)
{
CIO_MSG_EVENT(2, "chp_cfg_cancel:%x.%02x\n", chpid.cssid, chpid.id);
- mutex_lock(&cfg_lock);
+ spin_lock(&cfg_lock);
if (cfg_get_task(chpid) == cfg_deconfigure)
cfg_set_task(chpid, cfg_none);
- mutex_unlock(&cfg_lock);
+ spin_unlock(&cfg_lock);
+}
+
+static bool cfg_idle(void)
+{
+ struct chp_id chpid;
+ enum cfg_task_t t;
+
+ spin_lock(&cfg_lock);
+ t = chp_cfg_fetch_task(&chpid);
+ spin_unlock(&cfg_lock);
+
+ return t == cfg_none;
}
static int cfg_wait_idle(void)
{
- if (wait_event_interruptible(cfg_wait_queue, !cfg_busy))
+ if (wait_event_interruptible(cfg_wait_queue, cfg_idle()))
return -ERESTARTSYS;
return 0;
}
@@ -766,11 +785,6 @@ static int __init chp_init(void)
ret = crw_register_handler(CRW_RSC_CPATH, chp_process_crw);
if (ret)
return ret;
- chp_wq = create_singlethread_workqueue("cio_chp");
- if (!chp_wq) {
- crw_unregister_handler(CRW_RSC_CPATH);
- return -ENOMEM;
- }
INIT_WORK(&cfg_work, cfg_func);
init_waitqueue_head(&cfg_wait_queue);
if (info_update())
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h
index af0232290..bb5a68226 100644
--- a/drivers/s390/cio/chp.h
+++ b/drivers/s390/cio/chp.h
@@ -4,7 +4,7 @@
*/
#ifndef S390_CHP_H
-#define S390_CHP_H S390_CHP_H
+#define S390_CHP_H
#include <linux/types.h>
#include <linux/device.h>
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index c424c0c73..940e725bd 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -907,7 +907,8 @@ int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
struct chsc_scpd *scpd_area;
int ccode, ret;
- if ((rfmt == 1) && !css_general_characteristics.fcs)
+ if ((rfmt == 1 || rfmt == 0) && c == 1 &&
+ !css_general_characteristics.fcs)
return -EINVAL;
if ((rfmt == 2) && !css_general_characteristics.cib)
return -EINVAL;
@@ -939,7 +940,6 @@ EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc);
int chsc_determine_base_channel_path_desc(struct chp_id chpid,
struct channel_path_desc *desc)
{
- struct chsc_response_struct *chsc_resp;
struct chsc_scpd *scpd_area;
unsigned long flags;
int ret;
@@ -949,8 +949,8 @@ int chsc_determine_base_channel_path_desc(struct chp_id chpid,
ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, scpd_area);
if (ret)
goto out;
- chsc_resp = (void *)&scpd_area->response;
- memcpy(desc, &chsc_resp->data, sizeof(*desc));
+
+ memcpy(desc, scpd_area->data, sizeof(*desc));
out:
spin_unlock_irqrestore(&chsc_page_lock, flags);
return ret;
@@ -959,18 +959,17 @@ out:
int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
struct channel_path_desc_fmt1 *desc)
{
- struct chsc_response_struct *chsc_resp;
struct chsc_scpd *scpd_area;
unsigned long flags;
int ret;
spin_lock_irqsave(&chsc_page_lock, flags);
scpd_area = chsc_page;
- ret = chsc_determine_channel_path_desc(chpid, 0, 0, 1, 0, scpd_area);
+ ret = chsc_determine_channel_path_desc(chpid, 0, 1, 1, 0, scpd_area);
if (ret)
goto out;
- chsc_resp = (void *)&scpd_area->response;
- memcpy(desc, &chsc_resp->data, sizeof(*desc));
+
+ memcpy(desc, scpd_area->data, sizeof(*desc));
out:
spin_unlock_irqrestore(&chsc_page_lock, flags);
return ret;
@@ -1020,7 +1019,7 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
chp->cmg = -1;
if (!css_chsc_characteristics.scmc || !css_chsc_characteristics.secm)
- return 0;
+ return -EINVAL;
spin_lock_irq(&chsc_page_lock);
memset(chsc_page, 0, PAGE_SIZE);
@@ -1176,7 +1175,7 @@ exit:
EXPORT_SYMBOL_GPL(css_general_characteristics);
EXPORT_SYMBOL_GPL(css_chsc_characteristics);
-int chsc_sstpc(void *page, unsigned int op, u16 ctrl)
+int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta)
{
struct {
struct chsc_header request;
@@ -1186,7 +1185,9 @@ int chsc_sstpc(void *page, unsigned int op, u16 ctrl)
unsigned int ctrl : 16;
unsigned int rsvd2[5];
struct chsc_header response;
- unsigned int rsvd3[7];
+ unsigned int rsvd3[3];
+ u64 clock_delta;
+ unsigned int rsvd4[2];
} __attribute__ ((packed)) *rr;
int rc;
@@ -1200,6 +1201,8 @@ int chsc_sstpc(void *page, unsigned int op, u16 ctrl)
if (rc)
return -EIO;
rc = (rr->response.code == 0x0001) ? 0 : -EIO;
+ if (clock_delta)
+ *clock_delta = rr->clock_delta;
return rc;
}
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 0de134c3a..67c87b6e6 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -112,8 +112,9 @@ struct chsc_scpd {
u32 last_chpid:8;
u32 zeroes1;
struct chsc_header response;
- u8 data[PAGE_SIZE - 20];
-} __attribute__ ((packed));
+ u32:32;
+ u8 data[0];
+} __packed;
struct chsc_sda_area {
struct chsc_header request;
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index b6f12c2bb..735052ecd 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -552,7 +552,7 @@ static int chsc_ioctl_info_cu(void __user *user_cd)
goto out_free;
}
scucd_area->request.length = 0x0010;
- scucd_area->request.code = 0x0028;
+ scucd_area->request.code = 0x0026;
scucd_area->m = cd->m;
scucd_area->fmt1 = cd->fmt;
scucd_area->cssid = cd->cssid;
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 2a34eb5f6..268aa23af 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -164,6 +164,9 @@ static inline u64 time_to_avg_nsec(u32 value, u32 count)
return ret;
}
+#define CMF_OFF 0
+#define CMF_ON 2
+
/*
* Activate or deactivate the channel monitor. When area is NULL,
* the monitor is deactivated. The channel monitor needs to
@@ -176,7 +179,7 @@ static inline void cmf_activate(void *area, unsigned int onoff)
register long __gpr1 asm("1");
__gpr2 = area;
- __gpr1 = onoff ? 2 : 0;
+ __gpr1 = onoff;
/* activate channel measurement */
asm("schm" : : "d" (__gpr2), "d" (__gpr1) );
}
@@ -587,7 +590,7 @@ static int alloc_cmb(struct ccw_device *cdev)
/* everything ok */
memset(mem, 0, size);
cmb_area.mem = mem;
- cmf_activate(cmb_area.mem, 1);
+ cmf_activate(cmb_area.mem, CMF_ON);
}
}
@@ -621,7 +624,7 @@ static void free_cmb(struct ccw_device *cdev)
if (list_empty(&cmb_area.list)) {
ssize_t size;
size = sizeof(struct cmb) * cmb_area.num_channels;
- cmf_activate(NULL, 0);
+ cmf_activate(NULL, CMF_OFF);
free_pages((unsigned long)cmb_area.mem, get_order(size));
cmb_area.mem = NULL;
}
@@ -841,7 +844,7 @@ static int alloc_cmbe(struct ccw_device *cdev)
/* activate global measurement if this is the first channel */
if (list_empty(&cmb_area.list))
- cmf_activate(NULL, 1);
+ cmf_activate(NULL, CMF_ON);
list_add_tail(&cdev->private->cmb_list, &cmb_area.list);
spin_unlock_irq(cdev->ccwlock);
@@ -878,7 +881,7 @@ static void free_cmbe(struct ccw_device *cdev)
/* deactivate global measurement if this is the last channel */
list_del_init(&cdev->private->cmb_list);
if (list_empty(&cmb_area.list))
- cmf_activate(NULL, 0);
+ cmf_activate(NULL, CMF_OFF);
spin_unlock_irq(cdev->ccwlock);
spin_unlock(&cmb_area.lock);
}
@@ -1332,7 +1335,7 @@ void cmf_reactivate(void)
{
spin_lock(&cmb_area.lock);
if (!list_empty(&cmb_area.list))
- cmf_activate(cmb_area.mem, 1);
+ cmf_activate(cmb_area.mem, CMF_ON);
spin_unlock(&cmb_area.lock);
}
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 7ada078ff..6a58bc8f4 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -762,7 +762,6 @@ static int io_subchannel_initialize_dev(struct subchannel *sch,
priv->state = DEV_STATE_NOT_OPER;
priv->dev_id.devno = sch->schib.pmcw.dev;
priv->dev_id.ssid = sch->schid.ssid;
- priv->schid = sch->schid;
INIT_WORK(&priv->todo_work, ccw_device_todo);
INIT_LIST_HEAD(&priv->cmb_list);
@@ -1000,7 +999,6 @@ static int ccw_device_move_to_sch(struct ccw_device *cdev,
put_device(&old_sch->dev);
/* Initialize new subchannel. */
spin_lock_irq(sch->lock);
- cdev->private->schid = sch->schid;
cdev->ccwlock = sch->lock;
if (!sch_is_pseudo_sch(sch))
sch_set_cdev(sch, cdev);
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index a69f702a2..877d9f601 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -97,7 +97,7 @@ void ccw_device_clear_options(struct ccw_device *cdev, unsigned long flags)
}
/**
- * ccw_device_is_pathgroup - determine if paths to this device are grouped
+ * ccw_device_is_pathgroup() - determine if paths to this device are grouped
* @cdev: ccw device
*
* Return non-zero if there is a path group, zero otherwise.
@@ -109,7 +109,7 @@ int ccw_device_is_pathgroup(struct ccw_device *cdev)
EXPORT_SYMBOL(ccw_device_is_pathgroup);
/**
- * ccw_device_is_multipath - determine if device is operating in multipath mode
+ * ccw_device_is_multipath() - determine if device is operating in multipath mode
* @cdev: ccw device
*
* Return non-zero if device is operating in multipath mode, zero otherwise.
@@ -457,7 +457,7 @@ __u8 ccw_device_get_path_mask(struct ccw_device *cdev)
}
/**
- * chp_get_chp_desc - return newly allocated channel-path descriptor
+ * ccw_device_get_chp_desc() - return newly allocated channel-path descriptor
* @cdev: device to obtain the descriptor for
* @chp_idx: index of the channel path
*
@@ -477,7 +477,7 @@ struct channel_path_desc *ccw_device_get_chp_desc(struct ccw_device *cdev,
}
/**
- * ccw_device_get_id - obtain a ccw device id
+ * ccw_device_get_id() - obtain a ccw device id
* @cdev: device to obtain the id for
* @dev_id: where to fill in the values
*/
@@ -488,7 +488,7 @@ void ccw_device_get_id(struct ccw_device *cdev, struct ccw_dev_id *dev_id)
EXPORT_SYMBOL(ccw_device_get_id);
/**
- * ccw_device_tm_start_key - perform start function
+ * ccw_device_tm_start_key() - perform start function
* @cdev: ccw device on which to perform the start function
* @tcw: transport-command word to be started
* @intparm: user defined parameter to be passed to the interrupt handler
@@ -533,7 +533,7 @@ int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
EXPORT_SYMBOL(ccw_device_tm_start_key);
/**
- * ccw_device_tm_start_timeout_key - perform start function
+ * ccw_device_tm_start_timeout_key() - perform start function
* @cdev: ccw device on which to perform the start function
* @tcw: transport-command word to be started
* @intparm: user defined parameter to be passed to the interrupt handler
@@ -559,7 +559,7 @@ int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw,
EXPORT_SYMBOL(ccw_device_tm_start_timeout_key);
/**
- * ccw_device_tm_start - perform start function
+ * ccw_device_tm_start() - perform start function
* @cdev: ccw device on which to perform the start function
* @tcw: transport-command word to be started
* @intparm: user defined parameter to be passed to the interrupt handler
@@ -577,7 +577,7 @@ int ccw_device_tm_start(struct ccw_device *cdev, struct tcw *tcw,
EXPORT_SYMBOL(ccw_device_tm_start);
/**
- * ccw_device_tm_start_timeout - perform start function
+ * ccw_device_tm_start_timeout() - perform start function
* @cdev: ccw device on which to perform the start function
* @tcw: transport-command word to be started
* @intparm: user defined parameter to be passed to the interrupt handler
@@ -596,7 +596,7 @@ int ccw_device_tm_start_timeout(struct ccw_device *cdev, struct tcw *tcw,
EXPORT_SYMBOL(ccw_device_tm_start_timeout);
/**
- * ccw_device_get_mdc - accumulate max data count
+ * ccw_device_get_mdc() - accumulate max data count
* @cdev: ccw device for which the max data count is accumulated
* @mask: mask of paths to use
*
@@ -642,7 +642,7 @@ int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask)
EXPORT_SYMBOL(ccw_device_get_mdc);
/**
- * ccw_device_tm_intrg - perform interrogate function
+ * ccw_device_tm_intrg() - perform interrogate function
* @cdev: ccw device on which to perform the interrogate function
*
* Perform an interrogate function on the given ccw device. Return zero on
@@ -664,7 +664,7 @@ int ccw_device_tm_intrg(struct ccw_device *cdev)
EXPORT_SYMBOL(ccw_device_tm_intrg);
/**
- * ccw_device_get_schid - obtain a subchannel id
+ * ccw_device_get_schid() - obtain a subchannel id
* @cdev: device to obtain the id for
* @schid: where to fill in the values
*/
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index 15b56a15d..9bc351237 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -26,6 +26,7 @@
static void
ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
char dbf_text[15];
if (!scsw_is_valid_cstat(&irb->scsw) ||
@@ -36,10 +37,10 @@ ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
"received"
" ... device %04x on subchannel 0.%x.%04x, dev_stat "
": %02X sch_stat : %02X\n",
- cdev->private->dev_id.devno, cdev->private->schid.ssid,
- cdev->private->schid.sch_no,
+ cdev->private->dev_id.devno, sch->schid.ssid,
+ sch->schid.sch_no,
scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw));
- sprintf(dbf_text, "chk%x", cdev->private->schid.sch_no);
+ sprintf(dbf_text, "chk%x", sch->schid.sch_no);
CIO_TRACE_EVENT(0, dbf_text);
CIO_HEX_EVENT(0, irb, sizeof(struct irb));
}
diff --git a/drivers/s390/cio/idset.h b/drivers/s390/cio/idset.h
index 22b581046..89a787790 100644
--- a/drivers/s390/cio/idset.h
+++ b/drivers/s390/cio/idset.h
@@ -4,7 +4,7 @@
*/
#ifndef S390_IDSET_H
-#define S390_IDSET_H S390_IDSET_H
+#define S390_IDSET_H
#include <asm/schid.h>
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index 8975060af..220f49145 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -120,7 +120,6 @@ struct ccw_device_private {
int state; /* device state */
atomic_t onoff;
struct ccw_dev_id dev_id; /* device id */
- struct subchannel_id schid; /* subchannel number */
struct ccw_request req; /* internal I/O request */
int iretry;
u8 pgid_valid_mask; /* mask of valid PGIDs */
diff --git a/drivers/s390/cio/ioasm.c b/drivers/s390/cio/ioasm.c
index 989848186..8225da619 100644
--- a/drivers/s390/cio/ioasm.c
+++ b/drivers/s390/cio/ioasm.c
@@ -12,7 +12,7 @@
#include "orb.h"
#include "cio.h"
-int stsch(struct subchannel_id schid, struct schib *addr)
+static inline int __stsch(struct subchannel_id schid, struct schib *addr)
{
register struct subchannel_id reg1 asm ("1") = schid;
int ccode = -EIO;
@@ -26,13 +26,21 @@ int stsch(struct subchannel_id schid, struct schib *addr)
: "+d" (ccode), "=m" (*addr)
: "d" (reg1), "a" (addr)
: "cc");
+ return ccode;
+}
+
+int stsch(struct subchannel_id schid, struct schib *addr)
+{
+ int ccode;
+
+ ccode = __stsch(schid, addr);
trace_s390_cio_stsch(schid, addr, ccode);
return ccode;
}
EXPORT_SYMBOL(stsch);
-int msch(struct subchannel_id schid, struct schib *addr)
+static inline int __msch(struct subchannel_id schid, struct schib *addr)
{
register struct subchannel_id reg1 asm ("1") = schid;
int ccode = -EIO;
@@ -46,12 +54,20 @@ int msch(struct subchannel_id schid, struct schib *addr)
: "+d" (ccode)
: "d" (reg1), "a" (addr), "m" (*addr)
: "cc");
+ return ccode;
+}
+
+int msch(struct subchannel_id schid, struct schib *addr)
+{
+ int ccode;
+
+ ccode = __msch(schid, addr);
trace_s390_cio_msch(schid, addr, ccode);
return ccode;
}
-int tsch(struct subchannel_id schid, struct irb *addr)
+static inline int __tsch(struct subchannel_id schid, struct irb *addr)
{
register struct subchannel_id reg1 asm ("1") = schid;
int ccode;
@@ -63,12 +79,20 @@ int tsch(struct subchannel_id schid, struct irb *addr)
: "=d" (ccode), "=m" (*addr)
: "d" (reg1), "a" (addr)
: "cc");
+ return ccode;
+}
+
+int tsch(struct subchannel_id schid, struct irb *addr)
+{
+ int ccode;
+
+ ccode = __tsch(schid, addr);
trace_s390_cio_tsch(schid, addr, ccode);
return ccode;
}
-int ssch(struct subchannel_id schid, union orb *addr)
+static inline int __ssch(struct subchannel_id schid, union orb *addr)
{
register struct subchannel_id reg1 asm("1") = schid;
int ccode = -EIO;
@@ -82,13 +106,21 @@ int ssch(struct subchannel_id schid, union orb *addr)
: "+d" (ccode)
: "d" (reg1), "a" (addr), "m" (*addr)
: "cc", "memory");
+ return ccode;
+}
+
+int ssch(struct subchannel_id schid, union orb *addr)
+{
+ int ccode;
+
+ ccode = __ssch(schid, addr);
trace_s390_cio_ssch(schid, addr, ccode);
return ccode;
}
EXPORT_SYMBOL(ssch);
-int csch(struct subchannel_id schid)
+static inline int __csch(struct subchannel_id schid)
{
register struct subchannel_id reg1 asm("1") = schid;
int ccode;
@@ -100,6 +132,14 @@ int csch(struct subchannel_id schid)
: "=d" (ccode)
: "d" (reg1)
: "cc");
+ return ccode;
+}
+
+int csch(struct subchannel_id schid)
+{
+ int ccode;
+
+ ccode = __csch(schid);
trace_s390_cio_csch(schid, ccode);
return ccode;
@@ -140,7 +180,7 @@ int chsc(void *chsc_area)
}
EXPORT_SYMBOL(chsc);
-int rchp(struct chp_id chpid)
+static inline int __rchp(struct chp_id chpid)
{
register struct chp_id reg1 asm ("1") = chpid;
int ccode;
@@ -151,12 +191,20 @@ int rchp(struct chp_id chpid)
" ipm %0\n"
" srl %0,28"
: "=d" (ccode) : "d" (reg1) : "cc");
+ return ccode;
+}
+
+int rchp(struct chp_id chpid)
+{
+ int ccode;
+
+ ccode = __rchp(chpid);
trace_s390_cio_rchp(chpid, ccode);
return ccode;
}
-int rsch(struct subchannel_id schid)
+static inline int __rsch(struct subchannel_id schid)
{
register struct subchannel_id reg1 asm("1") = schid;
int ccode;
@@ -168,12 +216,21 @@ int rsch(struct subchannel_id schid)
: "=d" (ccode)
: "d" (reg1)
: "cc", "memory");
+
+ return ccode;
+}
+
+int rsch(struct subchannel_id schid)
+{
+ int ccode;
+
+ ccode = __rsch(schid);
trace_s390_cio_rsch(schid, ccode);
return ccode;
}
-int hsch(struct subchannel_id schid)
+static inline int __hsch(struct subchannel_id schid)
{
register struct subchannel_id reg1 asm("1") = schid;
int ccode;
@@ -185,12 +242,20 @@ int hsch(struct subchannel_id schid)
: "=d" (ccode)
: "d" (reg1)
: "cc");
+ return ccode;
+}
+
+int hsch(struct subchannel_id schid)
+{
+ int ccode;
+
+ ccode = __hsch(schid);
trace_s390_cio_hsch(schid, ccode);
return ccode;
}
-int xsch(struct subchannel_id schid)
+static inline int __xsch(struct subchannel_id schid)
{
register struct subchannel_id reg1 asm("1") = schid;
int ccode;
@@ -202,6 +267,14 @@ int xsch(struct subchannel_id schid)
: "=d" (ccode)
: "d" (reg1)
: "cc");
+ return ccode;
+}
+
+int xsch(struct subchannel_id schid)
+{
+ int ccode;
+
+ ccode = __xsch(schid);
trace_s390_cio_xsch(schid, ccode);
return ccode;
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 4bb5262f7..71bf9bded 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -686,6 +686,15 @@ static void qdio_kick_handler(struct qdio_q *q)
q->qdio_error = 0;
}
+static inline int qdio_tasklet_schedule(struct qdio_q *q)
+{
+ if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) {
+ tasklet_schedule(&q->tasklet);
+ return 0;
+ }
+ return -EPERM;
+}
+
static void __qdio_inbound_processing(struct qdio_q *q)
{
qperf_inc(q, tasklet_inbound);
@@ -698,10 +707,8 @@ static void __qdio_inbound_processing(struct qdio_q *q)
if (!qdio_inbound_q_done(q)) {
/* means poll time is not yet over */
qperf_inc(q, tasklet_inbound_resched);
- if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
- tasklet_schedule(&q->tasklet);
+ if (!qdio_tasklet_schedule(q))
return;
- }
}
qdio_stop_polling(q);
@@ -711,8 +718,7 @@ static void __qdio_inbound_processing(struct qdio_q *q)
*/
if (!qdio_inbound_q_done(q)) {
qperf_inc(q, tasklet_inbound_resched2);
- if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
- tasklet_schedule(&q->tasklet);
+ qdio_tasklet_schedule(q);
}
}
@@ -869,16 +875,15 @@ static void __qdio_outbound_processing(struct qdio_q *q)
* is noticed and outbound_handler is called after some time.
*/
if (qdio_outbound_q_done(q))
- del_timer(&q->u.out.timer);
+ del_timer_sync(&q->u.out.timer);
else
- if (!timer_pending(&q->u.out.timer))
+ if (!timer_pending(&q->u.out.timer) &&
+ likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
return;
sched:
- if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
- return;
- tasklet_schedule(&q->tasklet);
+ qdio_tasklet_schedule(q);
}
/* outbound tasklet */
@@ -892,9 +897,7 @@ void qdio_outbound_timer(unsigned long data)
{
struct qdio_q *q = (struct qdio_q *)data;
- if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
- return;
- tasklet_schedule(&q->tasklet);
+ qdio_tasklet_schedule(q);
}
static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
@@ -907,7 +910,7 @@ static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
for_each_output_queue(q->irq_ptr, out, i)
if (!qdio_outbound_q_done(out))
- tasklet_schedule(&out->tasklet);
+ qdio_tasklet_schedule(out);
}
static void __tiqdio_inbound_processing(struct qdio_q *q)
@@ -929,10 +932,8 @@ static void __tiqdio_inbound_processing(struct qdio_q *q)
if (!qdio_inbound_q_done(q)) {
qperf_inc(q, tasklet_inbound_resched);
- if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
- tasklet_schedule(&q->tasklet);
+ if (!qdio_tasklet_schedule(q))
return;
- }
}
qdio_stop_polling(q);
@@ -942,8 +943,7 @@ static void __tiqdio_inbound_processing(struct qdio_q *q)
*/
if (!qdio_inbound_q_done(q)) {
qperf_inc(q, tasklet_inbound_resched2);
- if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
- tasklet_schedule(&q->tasklet);
+ qdio_tasklet_schedule(q);
}
}
@@ -977,7 +977,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
int i;
struct qdio_q *q;
- if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
+ if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
return;
for_each_input_queue(irq_ptr, q, i) {
@@ -1003,7 +1003,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
continue;
if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
qdio_siga_sync_q(q);
- tasklet_schedule(&q->tasklet);
+ qdio_tasklet_schedule(q);
}
}
@@ -1066,10 +1066,12 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
struct irb *irb)
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+ struct subchannel_id schid;
int cstat, dstat;
if (!intparm || !irq_ptr) {
- DBF_ERROR("qint:%4x", cdev->private->schid.sch_no);
+ ccw_device_get_schid(cdev, &schid);
+ DBF_ERROR("qint:%4x", schid.sch_no);
return;
}
@@ -1122,12 +1124,14 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
int qdio_get_ssqd_desc(struct ccw_device *cdev,
struct qdio_ssqd_desc *data)
{
+ struct subchannel_id schid;
if (!cdev || !cdev->private)
return -EINVAL;
- DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no);
- return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data);
+ ccw_device_get_schid(cdev, &schid);
+ DBF_EVENT("get ssqd:%4x", schid.sch_no);
+ return qdio_setup_get_ssqd(NULL, &schid, data);
}
EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
@@ -1141,7 +1145,7 @@ static void qdio_shutdown_queues(struct ccw_device *cdev)
tasklet_kill(&q->tasklet);
for_each_output_queue(irq_ptr, q, i) {
- del_timer(&q->u.out.timer);
+ del_timer_sync(&q->u.out.timer);
tasklet_kill(&q->tasklet);
}
}
@@ -1154,14 +1158,15 @@ static void qdio_shutdown_queues(struct ccw_device *cdev)
int qdio_shutdown(struct ccw_device *cdev, int how)
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+ struct subchannel_id schid;
int rc;
- unsigned long flags;
if (!irq_ptr)
return -ENODEV;
WARN_ON_ONCE(irqs_disabled());
- DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no);
+ ccw_device_get_schid(cdev, &schid);
+ DBF_EVENT("qshutdown:%4x", schid.sch_no);
mutex_lock(&irq_ptr->setup_mutex);
/*
@@ -1184,7 +1189,7 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
qdio_shutdown_debug_entries(irq_ptr);
/* cleanup subchannel */
- spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+ spin_lock_irq(get_ccwdev_lock(cdev));
if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
@@ -1198,12 +1203,12 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
}
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
- spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+ spin_unlock_irq(get_ccwdev_lock(cdev));
wait_event_interruptible_timeout(cdev->private->wait_q,
irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
irq_ptr->state == QDIO_IRQ_STATE_ERR,
10 * HZ);
- spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+ spin_lock_irq(get_ccwdev_lock(cdev));
no_cleanup:
qdio_shutdown_thinint(irq_ptr);
@@ -1211,7 +1216,7 @@ no_cleanup:
/* restore interrupt handler */
if ((void *)cdev->handler == (void *)qdio_int_handler)
cdev->handler = irq_ptr->orig_handler;
- spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+ spin_unlock_irq(get_ccwdev_lock(cdev));
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
mutex_unlock(&irq_ptr->setup_mutex);
@@ -1228,11 +1233,13 @@ EXPORT_SYMBOL_GPL(qdio_shutdown);
int qdio_free(struct ccw_device *cdev)
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+ struct subchannel_id schid;
if (!irq_ptr)
return -ENODEV;
- DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no);
+ ccw_device_get_schid(cdev, &schid);
+ DBF_EVENT("qfree:%4x", schid.sch_no);
DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned");
mutex_lock(&irq_ptr->setup_mutex);
@@ -1251,9 +1258,11 @@ EXPORT_SYMBOL_GPL(qdio_free);
*/
int qdio_allocate(struct qdio_initialize *init_data)
{
+ struct subchannel_id schid;
struct qdio_irq *irq_ptr;
- DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no);
+ ccw_device_get_schid(init_data->cdev, &schid);
+ DBF_EVENT("qallocate:%4x", schid.sch_no);
if ((init_data->no_input_qs && !init_data->input_handler) ||
(init_data->no_output_qs && !init_data->output_handler))
@@ -1331,20 +1340,18 @@ static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
*/
int qdio_establish(struct qdio_initialize *init_data)
{
- struct qdio_irq *irq_ptr;
struct ccw_device *cdev = init_data->cdev;
- unsigned long saveflags;
+ struct subchannel_id schid;
+ struct qdio_irq *irq_ptr;
int rc;
- DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no);
+ ccw_device_get_schid(cdev, &schid);
+ DBF_EVENT("qestablish:%4x", schid.sch_no);
irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
- if (cdev->private->state != DEV_STATE_ONLINE)
- return -EINVAL;
-
mutex_lock(&irq_ptr->setup_mutex);
qdio_setup_irq(init_data);
@@ -1361,17 +1368,14 @@ int qdio_establish(struct qdio_initialize *init_data)
irq_ptr->ccw.count = irq_ptr->equeue.count;
irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
- spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
+ spin_lock_irq(get_ccwdev_lock(cdev));
ccw_device_set_options_mask(cdev, 0);
rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
+ spin_unlock_irq(get_ccwdev_lock(cdev));
if (rc) {
DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
DBF_ERROR("rc:%4x", rc);
- }
- spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
-
- if (rc) {
mutex_unlock(&irq_ptr->setup_mutex);
qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
return rc;
@@ -1407,19 +1411,17 @@ EXPORT_SYMBOL_GPL(qdio_establish);
*/
int qdio_activate(struct ccw_device *cdev)
{
+ struct subchannel_id schid;
struct qdio_irq *irq_ptr;
int rc;
- unsigned long saveflags;
- DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no);
+ ccw_device_get_schid(cdev, &schid);
+ DBF_EVENT("qactivate:%4x", schid.sch_no);
irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
- if (cdev->private->state != DEV_STATE_ONLINE)
- return -EINVAL;
-
mutex_lock(&irq_ptr->setup_mutex);
if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
rc = -EBUSY;
@@ -1431,19 +1433,17 @@ int qdio_activate(struct ccw_device *cdev)
irq_ptr->ccw.count = irq_ptr->aqueue.count;
irq_ptr->ccw.cda = 0;
- spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
+ spin_lock_irq(get_ccwdev_lock(cdev));
ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
0, DOIO_DENY_PREFETCH);
+ spin_unlock_irq(get_ccwdev_lock(cdev));
if (rc) {
DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
DBF_ERROR("rc:%4x", rc);
- }
- spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
-
- if (rc)
goto out;
+ }
if (is_thinint_irq(irq_ptr))
tiqdio_add_input_queues(irq_ptr);
@@ -1585,10 +1585,11 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
/* in case of SIGA errors we must process the error immediately */
if (used >= q->u.out.scan_threshold || rc)
- tasklet_schedule(&q->tasklet);
+ qdio_tasklet_schedule(q);
else
/* free the SBALs in case of no further traffic */
- if (!timer_pending(&q->u.out.timer))
+ if (!timer_pending(&q->u.out.timer) &&
+ likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
mod_timer(&q->u.out.timer, jiffies + HZ);
return rc;
}
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 327255da1..ed92fb09f 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -169,6 +169,19 @@ static int ap_configuration_available(void)
return test_facility(12);
}
+static inline struct ap_queue_status
+__pqap_tapq(ap_qid_t qid, unsigned long *info)
+{
+ register unsigned long reg0 asm ("0") = qid;
+ register struct ap_queue_status reg1 asm ("1");
+ register unsigned long reg2 asm ("2") = 0UL;
+
+ asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */
+ : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
+ *info = reg2;
+ return reg1;
+}
+
/**
* ap_test_queue(): Test adjunct processor queue.
* @qid: The AP queue number
@@ -179,17 +192,15 @@ static int ap_configuration_available(void)
static inline struct ap_queue_status
ap_test_queue(ap_qid_t qid, unsigned long *info)
{
- register unsigned long reg0 asm ("0") = qid;
- register struct ap_queue_status reg1 asm ("1");
- register unsigned long reg2 asm ("2") = 0UL;
+ struct ap_queue_status aqs;
+ unsigned long _info;
if (test_facility(15))
- reg0 |= 1UL << 23; /* set APFT T bit*/
- asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */
- : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
+ qid |= 1UL << 23; /* set APFT T bit*/
+ aqs = __pqap_tapq(qid, &_info);
if (info)
- *info = reg2;
- return reg1;
+ *info = _info;
+ return aqs;
}
/**
@@ -237,14 +248,12 @@ ap_queue_interruption_control(ap_qid_t qid, void *ind)
*
* Returns 0 on success, or -EOPNOTSUPP.
*/
-static inline int ap_query_configuration(void)
+static inline int __ap_query_configuration(void)
{
register unsigned long reg0 asm ("0") = 0x04000000UL;
register unsigned long reg1 asm ("1") = -EINVAL;
register void *reg2 asm ("2") = (void *) ap_configuration;
- if (!ap_configuration)
- return -EOPNOTSUPP;
asm volatile(
".long 0xb2af0000\n" /* PQAP(QCI) */
"0: la %1,0\n"
@@ -257,6 +266,13 @@ static inline int ap_query_configuration(void)
return reg1;
}
+static inline int ap_query_configuration(void)
+{
+ if (!ap_configuration)
+ return -EOPNOTSUPP;
+ return __ap_query_configuration();
+}
+
/**
* ap_init_configuration(): Allocate and query configuration array.
*/
@@ -346,6 +362,26 @@ static int ap_queue_enable_interruption(struct ap_device *ap_dev, void *ind)
}
}
+static inline struct ap_queue_status
+__nqap(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
+{
+ typedef struct { char _[length]; } msgblock;
+ register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
+ register struct ap_queue_status reg1 asm ("1");
+ register unsigned long reg2 asm ("2") = (unsigned long) msg;
+ register unsigned long reg3 asm ("3") = (unsigned long) length;
+ register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
+ register unsigned long reg5 asm ("5") = psmid & 0xffffffff;
+
+ asm volatile (
+ "0: .long 0xb2ad0042\n" /* NQAP */
+ " brc 2,0b"
+ : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
+ : "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg)
+ : "cc");
+ return reg1;
+}
+
/**
* __ap_send(): Send message to adjunct processor queue.
* @qid: The AP queue number
@@ -363,24 +399,9 @@ static inline struct ap_queue_status
__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
unsigned int special)
{
- typedef struct { char _[length]; } msgblock;
- register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
- register struct ap_queue_status reg1 asm ("1");
- register unsigned long reg2 asm ("2") = (unsigned long) msg;
- register unsigned long reg3 asm ("3") = (unsigned long) length;
- register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
- register unsigned long reg5 asm ("5") = psmid & 0xffffffff;
-
if (special == 1)
- reg0 |= 0x400000UL;
-
- asm volatile (
- "0: .long 0xb2ad0042\n" /* NQAP */
- " brc 2,0b"
- : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
- : "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg)
- : "cc" );
- return reg1;
+ qid |= 0x400000UL;
+ return __nqap(qid, psmid, msg, length);
}
int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
@@ -447,6 +468,8 @@ int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
{
struct ap_queue_status status;
+ if (msg == NULL)
+ return -EINVAL;
status = __ap_recv(qid, psmid, msg, length);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
@@ -596,6 +619,8 @@ static enum ap_wait ap_sm_read(struct ap_device *ap_dev)
{
struct ap_queue_status status;
+ if (!ap_dev->reply)
+ return AP_WAIT_NONE;
status = ap_sm_recv(ap_dev);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
@@ -617,6 +642,31 @@ static enum ap_wait ap_sm_read(struct ap_device *ap_dev)
}
/**
+ * ap_sm_suspend_read(): Receive pending reply messages from an AP device
+ * without changing the device state in between. In suspend mode we don't
+ * allow sending new requests, therefore just fetch pending replies.
+ * @ap_dev: pointer to the AP device
+ *
+ * Returns AP_WAIT_NONE or AP_WAIT_AGAIN
+ */
+static enum ap_wait ap_sm_suspend_read(struct ap_device *ap_dev)
+{
+ struct ap_queue_status status;
+
+ if (!ap_dev->reply)
+ return AP_WAIT_NONE;
+ status = ap_sm_recv(ap_dev);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ if (ap_dev->queue_count > 0)
+ return AP_WAIT_AGAIN;
+ /* fall through */
+ default:
+ return AP_WAIT_NONE;
+ }
+}
+
+/**
* ap_sm_write(): Send messages from the request queue to an AP device.
* @ap_dev: pointer to the AP device
*
@@ -717,7 +767,7 @@ static enum ap_wait ap_sm_reset_wait(struct ap_device *ap_dev)
struct ap_queue_status status;
unsigned long info;
- if (ap_dev->queue_count > 0)
+ if (ap_dev->queue_count > 0 && ap_dev->reply)
/* Try to read a completed message and get the status */
status = ap_sm_recv(ap_dev);
else
@@ -757,7 +807,7 @@ static enum ap_wait ap_sm_setirq_wait(struct ap_device *ap_dev)
struct ap_queue_status status;
unsigned long info;
- if (ap_dev->queue_count > 0)
+ if (ap_dev->queue_count > 0 && ap_dev->reply)
/* Try to read a completed message and get the status */
status = ap_sm_recv(ap_dev);
else
@@ -813,7 +863,7 @@ static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = {
[AP_EVENT_TIMEOUT] = ap_sm_reset,
},
[AP_STATE_SUSPEND_WAIT] = {
- [AP_EVENT_POLL] = ap_sm_read,
+ [AP_EVENT_POLL] = ap_sm_suspend_read,
[AP_EVENT_TIMEOUT] = ap_sm_nop,
},
[AP_STATE_BORKED] = {
@@ -1314,6 +1364,17 @@ static struct bus_type ap_bus_type = {
.resume = ap_dev_resume,
};
+void ap_device_init_reply(struct ap_device *ap_dev,
+ struct ap_message *reply)
+{
+ ap_dev->reply = reply;
+
+ spin_lock_bh(&ap_dev->lock);
+ ap_sm_wait(ap_sm_event(ap_dev, AP_EVENT_POLL));
+ spin_unlock_bh(&ap_dev->lock);
+}
+EXPORT_SYMBOL(ap_device_init_reply);
+
static int ap_device_probe(struct device *dev)
{
struct ap_device *ap_dev = to_ap_dev(dev);
@@ -1758,7 +1819,8 @@ int __init ap_module_init(void)
if (ap_domain_index < -1 || ap_domain_index > max_domain_id) {
pr_warn("%d is not a valid cryptographic domain\n",
ap_domain_index);
- return -EINVAL;
+ rc = -EINVAL;
+ goto out_free;
}
/* In resume callback we need to know if the user had set the domain.
* If so, we can not just reset it.
@@ -1831,6 +1893,7 @@ out:
unregister_reset_call(&ap_reset_call);
if (ap_using_interrupts())
unregister_adapter_interrupt(&ap_airq);
+out_free:
kfree(ap_configuration);
return rc;
}
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 6adcbdf22..d7fdf5c02 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -262,6 +262,7 @@ void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg);
void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg);
void ap_flush_queue(struct ap_device *ap_dev);
void ap_bus_force_rescan(void);
+void ap_device_init_reply(struct ap_device *ap_dev, struct ap_message *ap_msg);
int ap_module_init(void);
void ap_module_exit(void);
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index 1e849d6e1..15104aaa0 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -126,7 +126,7 @@ static int zcrypt_cex2a_probe(struct ap_device *ap_dev)
MSGTYPE50_VARIANT_DEFAULT);
zdev->ap_dev = ap_dev;
zdev->online = 1;
- ap_dev->reply = &zdev->reply;
+ ap_device_init_reply(ap_dev, &zdev->reply);
ap_dev->private = zdev;
rc = zcrypt_device_register(zdev);
if (rc) {
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index bb3908818..ccb2e78eb 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -147,7 +147,7 @@ static int zcrypt_cex4_probe(struct ap_device *ap_dev)
return -ENODEV;
zdev->ap_dev = ap_dev;
zdev->online = 1;
- ap_dev->reply = &zdev->reply;
+ ap_device_init_reply(ap_dev, &zdev->reply);
ap_dev->private = zdev;
rc = zcrypt_device_register(zdev);
if (rc) {
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index f41852768..df8f0c4da 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -327,7 +327,7 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
else
zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME,
MSGTYPE06_VARIANT_NORNG);
- ap_dev->reply = &zdev->reply;
+ ap_device_init_reply(ap_dev, &zdev->reply);
ap_dev->private = zdev;
rc = zcrypt_device_register(zdev);
if (rc)
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index ec2e014e8..6d4b68c48 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -19,6 +19,7 @@
#include <linux/seq_file.h>
#include <linux/ethtool.h>
#include <linux/hashtable.h>
+#include <linux/ip.h>
#include <net/ipv6.h>
#include <net/if_inet6.h>
@@ -144,6 +145,7 @@ struct qeth_perf_stats {
unsigned int sg_alloc_page_rx;
unsigned int tx_csum;
unsigned int tx_lin;
+ unsigned int tx_linfail;
};
/* Routing stuff */
@@ -559,7 +561,6 @@ enum qeth_ip_types {
QETH_IP_TYPE_NORMAL,
QETH_IP_TYPE_VIPA,
QETH_IP_TYPE_RXIP,
- QETH_IP_TYPE_DEL_ALL_MC,
};
enum qeth_cmd_buffer_state {
@@ -740,17 +741,10 @@ struct qeth_vlan_vid {
unsigned short vid;
};
-enum qeth_mac_disposition {
- QETH_DISP_MAC_DELETE = 0,
- QETH_DISP_MAC_DO_NOTHING = 1,
- QETH_DISP_MAC_ADD = 2,
-};
-
-struct qeth_mac {
- u8 mac_addr[OSA_ADDR_LEN];
- u8 is_uc:1;
- u8 disp_flag:2;
- struct hlist_node hnode;
+enum qeth_addr_disposition {
+ QETH_DISP_ADDR_DELETE = 0,
+ QETH_DISP_ADDR_DO_NOTHING = 1,
+ QETH_DISP_ADDR_ADD = 2,
};
struct qeth_rx {
@@ -798,6 +792,8 @@ struct qeth_card {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct list_head vid_list;
DECLARE_HASHTABLE(mac_htable, 4);
+ DECLARE_HASHTABLE(ip_htable, 4);
+ DECLARE_HASHTABLE(ip_mc_htable, 4);
struct work_struct kernel_thread_starter;
spinlock_t thread_mask_lock;
unsigned long thread_start_mask;
@@ -805,8 +801,6 @@ struct qeth_card {
unsigned long thread_running_mask;
struct task_struct *recovery_task;
spinlock_t ip_lock;
- struct list_head ip_list;
- struct list_head *ip_tbd_list;
struct qeth_ipato ipato;
struct list_head cmd_waiter_list;
/* QDIO buffer handling */
@@ -844,6 +838,19 @@ struct qeth_trap_id {
/*some helper functions*/
#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
+/**
+ * qeth_get_elements_for_range() - find number of SBALEs to cover range.
+ * @start: Start of the address range.
+ * @end: Address after the end of the range.
+ *
+ * Returns the number of pages, and thus QDIO buffer elements, needed to cover
+ * the specified address range.
+ */
+static inline int qeth_get_elements_for_range(addr_t start, addr_t end)
+{
+ return PFN_UP(end - 1) - PFN_DOWN(start);
+}
+
static inline int qeth_get_micros(void)
{
return (int) (get_tod_clock() >> 12);
@@ -865,6 +872,11 @@ static inline int qeth_get_ip_version(struct sk_buff *skb)
}
}
+static inline int qeth_get_ip_protocol(struct sk_buff *skb)
+{
+ return ip_hdr(skb)->protocol;
+}
+
static inline void qeth_put_buffer_pool_entry(struct qeth_card *card,
struct qeth_buffer_pool_entry *entry)
{
@@ -981,12 +993,14 @@ int qeth_send_setassparms(struct qeth_card *, struct qeth_cmd_buffer *, __u16,
int (*reply_cb)(struct qeth_card *,
struct qeth_reply *, unsigned long),
void *);
+int qeth_setassparms_cb(struct qeth_card *, struct qeth_reply *, unsigned long);
struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
enum qeth_ipa_funcs,
__u16, __u16,
enum qeth_prot_versions);
-int qeth_start_ipa_tx_checksum(struct qeth_card *);
-int qeth_set_rx_csum(struct qeth_card *, int);
+int qeth_set_features(struct net_device *, netdev_features_t);
+int qeth_recover_features(struct net_device *);
+netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
/* exports for OSN */
int qeth_osn_assist(struct net_device *, void *, int);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index b7b74776e..20cf29613 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1464,8 +1464,6 @@ static int qeth_setup_card(struct qeth_card *card)
card->thread_allowed_mask = 0;
card->thread_running_mask = 0;
INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
- INIT_LIST_HEAD(&card->ip_list);
- INIT_LIST_HEAD(card->ip_tbd_list);
INIT_LIST_HEAD(&card->cmd_waiter_list);
init_waitqueue_head(&card->wait_q);
/* initial options */
@@ -1500,11 +1498,6 @@ static struct qeth_card *qeth_alloc_card(void)
if (!card)
goto out;
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
- card->ip_tbd_list = kzalloc(sizeof(struct list_head), GFP_KERNEL);
- if (!card->ip_tbd_list) {
- QETH_DBF_TEXT(SETUP, 0, "iptbdnom");
- goto out_card;
- }
if (qeth_setup_channel(&card->read))
goto out_ip;
if (qeth_setup_channel(&card->write))
@@ -1517,8 +1510,6 @@ static struct qeth_card *qeth_alloc_card(void)
out_channel:
qeth_clean_channel(&card->read);
out_ip:
- kfree(card->ip_tbd_list);
-out_card:
kfree(card);
out:
return NULL;
@@ -3628,7 +3619,8 @@ static void qeth_qdio_cq_handler(struct qeth_card *card,
int e;
e = 0;
- while (buffer->element[e].addr) {
+ while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
+ buffer->element[e].addr) {
unsigned long phys_aob_addr;
phys_aob_addr = (unsigned long) buffer->element[e].addr;
@@ -3757,6 +3749,14 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev,
}
EXPORT_SYMBOL_GPL(qeth_qdio_output_handler);
+/* We cannot use outbound queue 3 for unicast packets on HiperSockets */
+static inline int qeth_cut_iqd_prio(struct qeth_card *card, int queue_num)
+{
+ if ((card->info.type == QETH_CARD_TYPE_IQD) && (queue_num == 3))
+ return 2;
+ return queue_num;
+}
+
/**
* Note: Function assumes that we have 4 outbound queues.
*/
@@ -3784,9 +3784,9 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
return card->qdio.default_out_queue;
}
if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
- return ~tos >> 6 & 3;
+ return qeth_cut_iqd_prio(card, ~tos >> 6 & 3);
if (tos & IPTOS_MINCOST)
- return 3;
+ return qeth_cut_iqd_prio(card, 3);
if (tos & IPTOS_RELIABILITY)
return 2;
if (tos & IPTOS_THROUGHPUT)
@@ -3797,11 +3797,12 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
case QETH_PRIO_Q_ING_SKB:
if (skb->priority > 5)
return 0;
- return ~skb->priority >> 1 & 3;
+ return qeth_cut_iqd_prio(card, ~skb->priority >> 1 & 3);
case QETH_PRIO_Q_ING_VLAN:
tci = &((struct ethhdr *)skb->data)->h_proto;
if (*tci == ETH_P_8021Q)
- return ~*(tci + 1) >> (VLAN_PRIO_SHIFT + 1) & 3;
+ return qeth_cut_iqd_prio(card, ~*(tci + 1) >>
+ (VLAN_PRIO_SHIFT + 1) & 3);
break;
default:
break;
@@ -3810,41 +3811,54 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
}
EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
+/**
+ * qeth_get_elements_for_frags() - find number of SBALEs for skb frags.
+ * @skb: SKB address
+ *
+ * Returns the number of pages, and thus QDIO buffer elements, needed to cover
+ * fragmented part of the SKB. Returns zero for linear SKB.
+ */
int qeth_get_elements_for_frags(struct sk_buff *skb)
{
- int cnt, length, e, elements = 0;
- struct skb_frag_struct *frag;
- char *data;
+ int cnt, elements = 0;
for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
- frag = &skb_shinfo(skb)->frags[cnt];
- data = (char *)page_to_phys(skb_frag_page(frag)) +
- frag->page_offset;
- length = frag->size;
- e = PFN_UP((unsigned long)data + length - 1) -
- PFN_DOWN((unsigned long)data);
- elements += e;
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[cnt];
+
+ elements += qeth_get_elements_for_range(
+ (addr_t)skb_frag_address(frag),
+ (addr_t)skb_frag_address(frag) + skb_frag_size(frag));
}
return elements;
}
EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
+/**
+ * qeth_get_elements_no() - find number of SBALEs for skb data, inc. frags.
+ * @card: qeth card structure, to check max. elems.
+ * @skb: SKB address
+ * @extra_elems: extra elems needed, to check against max.
+ *
+ * Returns the number of pages, and thus QDIO buffer elements, needed to cover
+ * skb data, including linear part and fragments. Checks if the result plus
+ * extra_elems fits under the limit for the card. Returns 0 if it does not.
+ * Note: extra_elems is not included in the returned result.
+ */
int qeth_get_elements_no(struct qeth_card *card,
- struct sk_buff *skb, int elems)
+ struct sk_buff *skb, int extra_elems)
{
- int dlen = skb->len - skb->data_len;
- int elements_needed = PFN_UP((unsigned long)skb->data + dlen - 1) -
- PFN_DOWN((unsigned long)skb->data);
-
- elements_needed += qeth_get_elements_for_frags(skb);
+ int elements = qeth_get_elements_for_range(
+ (addr_t)skb->data,
+ (addr_t)skb->data + skb_headlen(skb)) +
+ qeth_get_elements_for_frags(skb);
- if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
+ if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
QETH_DBF_MESSAGE(2, "Invalid size of IP packet "
"(Number=%d / Length=%d). Discarded.\n",
- (elements_needed+elems), skb->len);
+ elements + extra_elems, skb->len);
return 0;
}
- return elements_needed;
+ return elements;
}
EXPORT_SYMBOL_GPL(qeth_get_elements_no);
@@ -3859,7 +3873,7 @@ int qeth_hdr_chk_and_bounce(struct sk_buff *skb, struct qeth_hdr **hdr, int len)
rest = len - inpage;
if (rest > hroom)
return 1;
- memmove(skb->data - rest, skb->data, skb->len - skb->data_len);
+ memmove(skb->data - rest, skb->data, skb_headlen(skb));
skb->data -= rest;
skb->tail -= rest;
*hdr = (struct qeth_hdr *)skb->data;
@@ -3873,7 +3887,7 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb,
struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill,
int offset)
{
- int length = skb->len - skb->data_len;
+ int length = skb_headlen(skb);
int length_here;
int element;
char *data;
@@ -4967,7 +4981,6 @@ static void qeth_core_free_card(struct qeth_card *card)
qeth_clean_channel(&card->write);
if (card->dev)
free_netdev(card->dev);
- kfree(card->ip_tbd_list);
qeth_free_qdio_buffers(card);
unregister_service_level(&card->qeth_service_level);
kfree(card);
@@ -5265,8 +5278,8 @@ no_mem:
}
EXPORT_SYMBOL_GPL(qeth_core_get_next_skb);
-static int qeth_setassparms_cb(struct qeth_card *card,
- struct qeth_reply *reply, unsigned long data)
+int qeth_setassparms_cb(struct qeth_card *card,
+ struct qeth_reply *reply, unsigned long data)
{
struct qeth_ipa_cmd *cmd;
@@ -5294,6 +5307,7 @@ static int qeth_setassparms_cb(struct qeth_card *card,
return 0;
}
+EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
enum qeth_ipa_funcs ipa_func,
@@ -5788,6 +5802,7 @@ static struct {
{"tx do_QDIO count"},
{"tx csum"},
{"tx lin"},
+ {"tx linfail"},
{"cq handler count"},
{"cq handler time"}
};
@@ -5848,8 +5863,9 @@ void qeth_core_get_ethtool_stats(struct net_device *dev,
data[32] = card->perf_stats.outbound_do_qdio_cnt;
data[33] = card->perf_stats.tx_csum;
data[34] = card->perf_stats.tx_lin;
- data[35] = card->perf_stats.cq_cnt;
- data[36] = card->perf_stats.cq_time;
+ data[35] = card->perf_stats.tx_linfail;
+ data[36] = card->perf_stats.cq_cnt;
+ data[37] = card->perf_stats.cq_time;
}
EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats);
@@ -6048,74 +6064,165 @@ int qeth_core_ethtool_get_settings(struct net_device *netdev,
}
EXPORT_SYMBOL_GPL(qeth_core_ethtool_get_settings);
-static int qeth_send_checksum_command(struct qeth_card *card)
+static int qeth_send_checksum_on(struct qeth_card *card, int cstype)
{
+ long rxtx_arg;
int rc;
- rc = qeth_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
- IPA_CMD_ASS_START, 0);
+ rc = qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_START, 0);
if (rc) {
- dev_warn(&card->gdev->dev, "Starting HW checksumming for %s "
- "failed, using SW checksumming\n",
- QETH_CARD_IFNAME(card));
+ dev_warn(&card->gdev->dev,
+ "Starting HW checksumming for %s failed, using SW checksumming\n",
+ QETH_CARD_IFNAME(card));
return rc;
}
- rc = qeth_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM,
- IPA_CMD_ASS_ENABLE,
- card->info.csum_mask);
+ rxtx_arg = (cstype == IPA_OUTBOUND_CHECKSUM) ? card->info.tx_csum_mask
+ : card->info.csum_mask;
+ rc = qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_ENABLE,
+ rxtx_arg);
if (rc) {
- dev_warn(&card->gdev->dev, "Enabling HW checksumming for %s "
- "failed, using SW checksumming\n",
- QETH_CARD_IFNAME(card));
+ dev_warn(&card->gdev->dev,
+ "Enabling HW checksumming for %s failed, using SW checksumming\n",
+ QETH_CARD_IFNAME(card));
return rc;
}
+
+ dev_info(&card->gdev->dev, "HW Checksumming (%sbound) enabled\n",
+ cstype == IPA_INBOUND_CHECKSUM ? "in" : "out");
return 0;
}
-int qeth_set_rx_csum(struct qeth_card *card, int on)
+static int qeth_set_ipa_csum(struct qeth_card *card, int on, int cstype)
{
int rc;
if (on) {
- rc = qeth_send_checksum_command(card);
+ rc = qeth_send_checksum_on(card, cstype);
if (rc)
return -EIO;
- dev_info(&card->gdev->dev,
- "HW Checksumming (inbound) enabled\n");
} else {
- rc = qeth_send_simple_setassparms(card,
- IPA_INBOUND_CHECKSUM, IPA_CMD_ASS_STOP, 0);
+ rc = qeth_send_simple_setassparms(card, cstype,
+ IPA_CMD_ASS_STOP, 0);
if (rc)
return -EIO;
}
return 0;
}
-EXPORT_SYMBOL_GPL(qeth_set_rx_csum);
-int qeth_start_ipa_tx_checksum(struct qeth_card *card)
+static int qeth_set_ipa_tso(struct qeth_card *card, int on)
{
- int rc = 0;
+ int rc;
- if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
- return rc;
- rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_CHECKSUM,
- IPA_CMD_ASS_START, 0);
- if (rc)
- goto err_out;
- rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_CHECKSUM,
- IPA_CMD_ASS_ENABLE,
- card->info.tx_csum_mask);
- if (rc)
- goto err_out;
+ QETH_CARD_TEXT(card, 3, "sttso");
- dev_info(&card->gdev->dev, "HW TX Checksumming enabled\n");
- return rc;
-err_out:
- dev_warn(&card->gdev->dev, "Enabling HW TX checksumming for %s "
- "failed, using SW TX checksumming\n", QETH_CARD_IFNAME(card));
+ if (on) {
+ rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_TSO,
+ IPA_CMD_ASS_START, 0);
+ if (rc) {
+ dev_warn(&card->gdev->dev,
+ "Starting outbound TCP segmentation offload for %s failed\n",
+ QETH_CARD_IFNAME(card));
+ return -EIO;
+ }
+ dev_info(&card->gdev->dev, "Outbound TSO enabled\n");
+ } else {
+ rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_TSO,
+ IPA_CMD_ASS_STOP, 0);
+ }
return rc;
}
-EXPORT_SYMBOL_GPL(qeth_start_ipa_tx_checksum);
+
+/* try to restore device features on a device after recovery */
+int qeth_recover_features(struct net_device *dev)
+{
+ struct qeth_card *card = dev->ml_priv;
+ netdev_features_t recover = dev->features;
+
+ if (recover & NETIF_F_IP_CSUM) {
+ if (qeth_set_ipa_csum(card, 1, IPA_OUTBOUND_CHECKSUM))
+ recover ^= NETIF_F_IP_CSUM;
+ }
+ if (recover & NETIF_F_RXCSUM) {
+ if (qeth_set_ipa_csum(card, 1, IPA_INBOUND_CHECKSUM))
+ recover ^= NETIF_F_RXCSUM;
+ }
+ if (recover & NETIF_F_TSO) {
+ if (qeth_set_ipa_tso(card, 1))
+ recover ^= NETIF_F_TSO;
+ }
+
+ if (recover == dev->features)
+ return 0;
+
+ dev_warn(&card->gdev->dev,
+ "Device recovery failed to restore all offload features\n");
+ dev->features = recover;
+ return -EIO;
+}
+EXPORT_SYMBOL_GPL(qeth_recover_features);
+
+int qeth_set_features(struct net_device *dev, netdev_features_t features)
+{
+ struct qeth_card *card = dev->ml_priv;
+ netdev_features_t changed = dev->features ^ features;
+ int rc = 0;
+
+ QETH_DBF_TEXT(SETUP, 2, "setfeat");
+ QETH_DBF_HEX(SETUP, 2, &features, sizeof(features));
+
+ if ((changed & NETIF_F_IP_CSUM)) {
+ rc = qeth_set_ipa_csum(card,
+ features & NETIF_F_IP_CSUM ? 1 : 0,
+ IPA_OUTBOUND_CHECKSUM);
+ if (rc)
+ changed ^= NETIF_F_IP_CSUM;
+ }
+ if ((changed & NETIF_F_RXCSUM)) {
+ rc = qeth_set_ipa_csum(card,
+ features & NETIF_F_RXCSUM ? 1 : 0,
+ IPA_INBOUND_CHECKSUM);
+ if (rc)
+ changed ^= NETIF_F_RXCSUM;
+ }
+ if ((changed & NETIF_F_TSO)) {
+ rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO ? 1 : 0);
+ if (rc)
+ changed ^= NETIF_F_TSO;
+ }
+
+ /* everything changed successfully? */
+ if ((dev->features ^ features) == changed)
+ return 0;
+ /* something went wrong. save changed features and return error */
+ dev->features ^= changed;
+ return -EIO;
+}
+EXPORT_SYMBOL_GPL(qeth_set_features);
+
+netdev_features_t qeth_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ QETH_DBF_TEXT(SETUP, 2, "fixfeat");
+ if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
+ features &= ~NETIF_F_IP_CSUM;
+ if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
+ features &= ~NETIF_F_RXCSUM;
+ if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
+ features &= ~NETIF_F_TSO;
+ dev_info(&card->gdev->dev, "Outbound TSO not supported on %s\n",
+ QETH_CARD_IFNAME(card));
+ }
+ /* if the card isn't up, remove features that require hw changes */
+ if (card->state == CARD_STATE_DOWN ||
+ card->state == CARD_STATE_RECOVER)
+ features = features & ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_TSO);
+ QETH_DBF_HEX(SETUP, 2, &features, sizeof(features));
+ return features;
+}
+EXPORT_SYMBOL_GPL(qeth_fix_features);
static int __init qeth_core_init(void)
{
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index e6e5b9671..75b29fd2f 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -243,6 +243,10 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
card->qdio.default_out_queue = 2;
} else if (sysfs_streq(buf, "no_prio_queueing:3")) {
+ if (card->info.type == QETH_CARD_TYPE_IQD) {
+ rc = -EPERM;
+ goto out;
+ }
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
card->qdio.default_out_queue = 3;
} else if (sysfs_streq(buf, "no_prio_queueing")) {
diff --git a/drivers/s390/net/qeth_l2.h b/drivers/s390/net/qeth_l2.h
index 076755640..29d9fb389 100644
--- a/drivers/s390/net/qeth_l2.h
+++ b/drivers/s390/net/qeth_l2.h
@@ -12,4 +12,11 @@ int qeth_l2_create_device_attributes(struct device *);
void qeth_l2_remove_device_attributes(struct device *);
void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card);
+struct qeth_mac {
+ u8 mac_addr[OSA_ADDR_LEN];
+ u8 is_uc:1;
+ u8 disp_flag:2;
+ struct hlist_node hnode;
+};
+
#endif /* __QETH_L2_H__ */
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index df036b872..bb27058fa 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -404,38 +404,6 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
return rc;
}
-static netdev_features_t qeth_l2_fix_features(struct net_device *dev,
- netdev_features_t features)
-{
- struct qeth_card *card = dev->ml_priv;
-
- QETH_DBF_TEXT(SETUP, 2, "fixfeat");
- if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
- features &= ~NETIF_F_IP_CSUM;
- if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
- features &= ~NETIF_F_RXCSUM;
- QETH_DBF_HEX(SETUP, 2, &features, sizeof(features));
- return features;
-}
-
-static int qeth_l2_set_features(struct net_device *dev,
- netdev_features_t features)
-{
- struct qeth_card *card = dev->ml_priv;
- netdev_features_t changed = dev->features ^ features;
-
- QETH_DBF_TEXT(SETUP, 2, "setfeat");
- QETH_DBF_HEX(SETUP, 2, &features, sizeof(features));
-
- if (card->state == CARD_STATE_DOWN ||
- card->state == CARD_STATE_RECOVER)
- return 0;
-
- if (!(changed & NETIF_F_RXCSUM))
- return 0;
- return qeth_set_rx_csum(card, features & NETIF_F_RXCSUM ? 1 : 0);
-}
-
static void qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
{
QETH_DBF_TEXT(SETUP , 2, "stopcard");
@@ -780,7 +748,7 @@ qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha, u8 is_uc)
qeth_l2_mac_hash(ha->addr)) {
if (is_uc == mac->is_uc &&
!memcmp(ha->addr, mac->mac_addr, OSA_ADDR_LEN)) {
- mac->disp_flag = QETH_DISP_MAC_DO_NOTHING;
+ mac->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
return;
}
}
@@ -792,7 +760,7 @@ qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha, u8 is_uc)
memcpy(mac->mac_addr, ha->addr, OSA_ADDR_LEN);
mac->is_uc = is_uc;
- mac->disp_flag = QETH_DISP_MAC_ADD;
+ mac->disp_flag = QETH_DISP_ADDR_ADD;
hash_add(card->mac_htable, &mac->hnode,
qeth_l2_mac_hash(mac->mac_addr));
@@ -825,7 +793,7 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
qeth_l2_add_mac(card, ha, 1);
hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) {
- if (mac->disp_flag == QETH_DISP_MAC_DELETE) {
+ if (mac->disp_flag == QETH_DISP_ADDR_DELETE) {
if (!mac->is_uc)
rc = qeth_l2_send_delgroupmac(card,
mac->mac_addr);
@@ -837,15 +805,15 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
hash_del(&mac->hnode);
kfree(mac);
- } else if (mac->disp_flag == QETH_DISP_MAC_ADD) {
+ } else if (mac->disp_flag == QETH_DISP_ADDR_ADD) {
rc = qeth_l2_write_mac(card, mac);
if (rc) {
hash_del(&mac->hnode);
kfree(mac);
} else
- mac->disp_flag = QETH_DISP_MAC_DELETE;
+ mac->disp_flag = QETH_DISP_ADDR_DELETE;
} else
- mac->disp_flag = QETH_DISP_MAC_DELETE;
+ mac->disp_flag = QETH_DISP_ADDR_DELETE;
}
spin_unlock_bh(&card->mclock);
@@ -869,6 +837,7 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
int data_offset = -1;
int elements_needed = 0;
int hd_len = 0;
+ int nr_frags;
if (card->qdio.do_prio_queueing || (cast_type &&
card->info.is_multicast_different))
@@ -892,6 +861,23 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
netif_stop_queue(dev);
+ /* fix hardware limitation: as long as we do not have sbal
+ * chaining we can not send long frag lists
+ */
+ if ((card->info.type != QETH_CARD_TYPE_IQD) &&
+ !qeth_get_elements_no(card, new_skb, 0)) {
+ int lin_rc = skb_linearize(new_skb);
+
+ if (card->options.performance_stats) {
+ if (lin_rc)
+ card->perf_stats.tx_linfail++;
+ else
+ card->perf_stats.tx_lin++;
+ }
+ if (lin_rc)
+ goto tx_drop;
+ }
+
if (card->info.type == QETH_CARD_TYPE_OSN)
hdr = (struct qeth_hdr *)skb->data;
else {
@@ -943,6 +929,14 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (!rc) {
card->stats.tx_packets++;
card->stats.tx_bytes += tx_bytes;
+ if (card->options.performance_stats) {
+ nr_frags = skb_shinfo(new_skb)->nr_frags;
+ if (nr_frags) {
+ card->perf_stats.sg_skbs_sent++;
+ /* nr_frags + skb->data */
+ card->perf_stats.sg_frags_sent += nr_frags + 1;
+ }
+ }
if (new_skb != skb)
dev_kfree_skb_any(skb);
rc = NETDEV_TX_OK;
@@ -1087,8 +1081,8 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
.ndo_vlan_rx_add_vid = qeth_l2_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qeth_l2_vlan_rx_kill_vid,
.ndo_tx_timeout = qeth_tx_timeout,
- .ndo_fix_features = qeth_l2_fix_features,
- .ndo_set_features = qeth_l2_set_features
+ .ndo_fix_features = qeth_fix_features,
+ .ndo_set_features = qeth_set_features
};
static int qeth_l2_setup_netdev(struct qeth_card *card)
@@ -1119,12 +1113,22 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
&qeth_l2_ethtool_ops : &qeth_l2_osn_ops;
card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) {
- card->dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
- /* Turn on RX offloading per default */
- card->dev->features |= NETIF_F_RXCSUM;
+ card->dev->hw_features = NETIF_F_SG;
+ card->dev->vlan_features = NETIF_F_SG;
+ /* OSA 3S and earlier has no RX/TX support */
+ if (qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) {
+ card->dev->hw_features |= NETIF_F_IP_CSUM;
+ card->dev->vlan_features |= NETIF_F_IP_CSUM;
+ }
+ if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) {
+ card->dev->hw_features |= NETIF_F_RXCSUM;
+ card->dev->vlan_features |= NETIF_F_RXCSUM;
+ }
}
card->info.broadcast_capable = 1;
qeth_l2_request_initial_mac(card);
+ card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
+ PAGE_SIZE;
SET_NETDEV_DEV(card->dev, &card->gdev->dev);
netif_napi_add(card->dev, &card->napi, qeth_l2_poll, QETH_NAPI_WEIGHT);
netif_carrier_off(card->dev);
@@ -1136,9 +1140,6 @@ static int qeth_l2_start_ipassists(struct qeth_card *card)
/* configure isolation level */
if (qeth_set_access_ctrl_online(card, 0))
return -ENODEV;
- if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
- qeth_set_rx_csum(card, 1);
- qeth_start_ipa_tx_checksum(card);
return 0;
}
@@ -1207,7 +1208,8 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
contin:
if ((card->info.type == QETH_CARD_TYPE_OSD) ||
(card->info.type == QETH_CARD_TYPE_OSX)) {
- if (qeth_l2_start_ipassists(card))
+ rc = qeth_l2_start_ipassists(card);
+ if (rc)
goto out_remove;
}
@@ -1241,6 +1243,9 @@ contin:
}
/* this also sets saved unicast addresses */
qeth_l2_set_rx_mode(card->dev);
+ rtnl_lock();
+ qeth_recover_features(card->dev);
+ rtnl_unlock();
}
/* let user_space know that device is online */
kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
@@ -1801,6 +1806,12 @@ static int qeth_bridgeport_makerc(struct qeth_card *card,
dev_err(&card->gdev->dev,
"The device is not configured as a Bridge Port\n");
break;
+ case 0x2B10:
+ case 0x0010: /* OS mismatch */
+ rc = -EPERM;
+ dev_err(&card->gdev->dev,
+ "A Bridge Port is already configured by a different operating system\n");
+ break;
case 0x2B14:
case 0x0014: /* Another device is Primary */
switch (setcmd) {
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index 551a4b4c0..26f79533e 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -10,16 +10,23 @@
#define __QETH_L3_H__
#include "qeth_core.h"
+#include <linux/hashtable.h>
#define QETH_SNIFF_AVAIL 0x0008
struct qeth_ipaddr {
- struct list_head entry;
+ struct hlist_node hnode;
enum qeth_ip_types type;
enum qeth_ipa_setdelip_flags set_flags;
enum qeth_ipa_setdelip_flags del_flags;
- int is_multicast;
- int users;
+ u8 is_multicast:1;
+ u8 in_progress:1;
+ u8 disp_flag:2;
+
+ /* is changed only for normal ip addresses
+ * for non-normal addresses it always is 1
+ */
+ int ref_counter;
enum qeth_prot_versions proto;
unsigned char mac[OSA_ADDR_LEN];
union {
@@ -32,7 +39,24 @@ struct qeth_ipaddr {
unsigned int pfxlen;
} a6;
} u;
+
};
+static inline u64 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr)
+{
+ u64 ret = 0;
+ u8 *point;
+
+ if (addr->proto == QETH_PROT_IPV6) {
+ point = (u8 *) &addr->u.a6.addr;
+ ret = get_unaligned((u64 *)point) ^
+ get_unaligned((u64 *) (point + 8));
+ }
+ if (addr->proto == QETH_PROT_IPV4) {
+ point = (u8 *) &addr->u.a4.addr;
+ ret = get_unaligned((u32 *) point);
+ }
+ return ret;
+}
struct qeth_ipato_entry {
struct list_head entry;
@@ -60,6 +84,5 @@ int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *);
struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions);
int qeth_l3_add_ip(struct qeth_card *, struct qeth_ipaddr *);
int qeth_l3_delete_ip(struct qeth_card *, struct qeth_ipaddr *);
-void qeth_l3_set_ip_addr_list(struct qeth_card *);
#endif /* __QETH_L3_H__ */
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 709b52339..272d9e741 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -30,6 +30,7 @@
#include <net/ip6_fib.h>
#include <net/ip6_checksum.h>
#include <net/iucv/af_iucv.h>
+#include <linux/hashtable.h>
#include "qeth_l3.h"
@@ -57,7 +58,7 @@ static int qeth_l3_isxdigit(char *buf)
static void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf)
{
- sprintf(buf, "%i.%i.%i.%i", addr[0], addr[1], addr[2], addr[3]);
+ sprintf(buf, "%pI4", addr);
}
static int qeth_l3_string_to_ipaddr4(const char *buf, __u8 *addr)
@@ -204,104 +205,139 @@ int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
return rc;
}
-/*
- * Add IP to be added to todo list. If there is already an "add todo"
- * in this list we just incremenent the reference count.
- * Returns 0 if we just incremented reference count.
- */
-static int __qeth_l3_insert_ip_todo(struct qeth_card *card,
- struct qeth_ipaddr *addr, int add)
+inline int
+qeth_l3_ipaddrs_is_equal(struct qeth_ipaddr *addr1, struct qeth_ipaddr *addr2)
{
- struct qeth_ipaddr *tmp, *t;
- int found = 0;
+ return addr1->proto == addr2->proto &&
+ !memcmp(&addr1->u, &addr2->u, sizeof(addr1->u)) &&
+ !memcmp(&addr1->mac, &addr2->mac, sizeof(addr1->mac));
+}
- if (card->options.sniffer)
- return 0;
- list_for_each_entry_safe(tmp, t, card->ip_tbd_list, entry) {
- if ((addr->type == QETH_IP_TYPE_DEL_ALL_MC) &&
- (tmp->type == QETH_IP_TYPE_DEL_ALL_MC))
- return 0;
- if ((tmp->proto == QETH_PROT_IPV4) &&
- (addr->proto == QETH_PROT_IPV4) &&
- (tmp->type == addr->type) &&
- (tmp->is_multicast == addr->is_multicast) &&
- (tmp->u.a4.addr == addr->u.a4.addr) &&
- (tmp->u.a4.mask == addr->u.a4.mask)) {
- found = 1;
- break;
- }
- if ((tmp->proto == QETH_PROT_IPV6) &&
- (addr->proto == QETH_PROT_IPV6) &&
- (tmp->type == addr->type) &&
- (tmp->is_multicast == addr->is_multicast) &&
- (tmp->u.a6.pfxlen == addr->u.a6.pfxlen) &&
- (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr,
- sizeof(struct in6_addr)) == 0)) {
- found = 1;
- break;
- }
- }
- if (found) {
- if (addr->users != 0)
- tmp->users += addr->users;
- else
- tmp->users += add ? 1 : -1;
- if (tmp->users == 0) {
- list_del(&tmp->entry);
- kfree(tmp);
- }
- return 0;
+static struct qeth_ipaddr *
+qeth_l3_ip_from_hash(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
+{
+ struct qeth_ipaddr *addr;
+
+ if (tmp_addr->is_multicast) {
+ hash_for_each_possible(card->ip_mc_htable, addr,
+ hnode, qeth_l3_ipaddr_hash(tmp_addr))
+ if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr))
+ return addr;
} else {
- if (addr->type == QETH_IP_TYPE_DEL_ALL_MC)
- list_add(&addr->entry, card->ip_tbd_list);
- else {
- if (addr->users == 0)
- addr->users += add ? 1 : -1;
- if (add && (addr->type == QETH_IP_TYPE_NORMAL) &&
- qeth_l3_is_addr_covered_by_ipato(card, addr)) {
- QETH_CARD_TEXT(card, 2, "tkovaddr");
- addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
- }
- list_add_tail(&addr->entry, card->ip_tbd_list);
- }
- return 1;
+ hash_for_each_possible(card->ip_htable, addr,
+ hnode, qeth_l3_ipaddr_hash(tmp_addr))
+ if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr))
+ return addr;
}
+
+ return NULL;
}
-int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
+int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
{
- unsigned long flags;
int rc = 0;
+ struct qeth_ipaddr *addr;
QETH_CARD_TEXT(card, 4, "delip");
- if (addr->proto == QETH_PROT_IPV4)
- QETH_CARD_HEX(card, 4, &addr->u.a4.addr, 4);
+ if (tmp_addr->proto == QETH_PROT_IPV4)
+ QETH_CARD_HEX(card, 4, &tmp_addr->u.a4.addr, 4);
else {
- QETH_CARD_HEX(card, 4, &addr->u.a6.addr, 8);
- QETH_CARD_HEX(card, 4, ((char *)&addr->u.a6.addr) + 8, 8);
+ QETH_CARD_HEX(card, 4, &tmp_addr->u.a6.addr, 8);
+ QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
+ }
+
+ addr = qeth_l3_ip_from_hash(card, tmp_addr);
+ if (!addr)
+ return -ENOENT;
+
+ addr->ref_counter--;
+ if (addr->type == QETH_IP_TYPE_NORMAL && addr->ref_counter > 0)
+ return rc;
+ if (addr->in_progress)
+ return -EINPROGRESS;
+
+ if (!qeth_card_hw_is_reachable(card)) {
+ addr->disp_flag = QETH_DISP_ADDR_DELETE;
+ return 0;
}
- spin_lock_irqsave(&card->ip_lock, flags);
- rc = __qeth_l3_insert_ip_todo(card, addr, 0);
- spin_unlock_irqrestore(&card->ip_lock, flags);
+
+ rc = qeth_l3_deregister_addr_entry(card, addr);
+
+ hash_del(&addr->hnode);
+ kfree(addr);
+
return rc;
}
-int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
+int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
{
- unsigned long flags;
int rc = 0;
+ struct qeth_ipaddr *addr;
QETH_CARD_TEXT(card, 4, "addip");
- if (addr->proto == QETH_PROT_IPV4)
- QETH_CARD_HEX(card, 4, &addr->u.a4.addr, 4);
+
+ if (tmp_addr->proto == QETH_PROT_IPV4)
+ QETH_CARD_HEX(card, 4, &tmp_addr->u.a4.addr, 4);
else {
- QETH_CARD_HEX(card, 4, &addr->u.a6.addr, 8);
- QETH_CARD_HEX(card, 4, ((char *)&addr->u.a6.addr) + 8, 8);
+ QETH_CARD_HEX(card, 4, &tmp_addr->u.a6.addr, 8);
+ QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
+ }
+
+ addr = qeth_l3_ip_from_hash(card, tmp_addr);
+ if (!addr) {
+ addr = qeth_l3_get_addr_buffer(tmp_addr->proto);
+ if (!addr)
+ return -ENOMEM;
+
+ memcpy(addr, tmp_addr, sizeof(struct qeth_ipaddr));
+ addr->ref_counter = 1;
+
+ if (addr->type == QETH_IP_TYPE_NORMAL &&
+ qeth_l3_is_addr_covered_by_ipato(card, addr)) {
+ QETH_CARD_TEXT(card, 2, "tkovaddr");
+ addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
+ }
+ hash_add(card->ip_htable, &addr->hnode,
+ qeth_l3_ipaddr_hash(addr));
+
+ if (!qeth_card_hw_is_reachable(card)) {
+ addr->disp_flag = QETH_DISP_ADDR_ADD;
+ return 0;
+ }
+
+ /* qeth_l3_register_addr_entry can go to sleep
+ * if we add a IPV4 addr. It is caused by the reason
+ * that SETIP ipa cmd starts ARP staff for IPV4 addr.
+ * Thus we should unlock spinlock, and make a protection
+ * using in_progress variable to indicate that there is
+ * an hardware operation with this IPV4 address
+ */
+ if (addr->proto == QETH_PROT_IPV4) {
+ addr->in_progress = 1;
+ spin_unlock_bh(&card->ip_lock);
+ rc = qeth_l3_register_addr_entry(card, addr);
+ spin_lock_bh(&card->ip_lock);
+ addr->in_progress = 0;
+ } else
+ rc = qeth_l3_register_addr_entry(card, addr);
+
+ if (!rc || (rc == IPA_RC_DUPLICATE_IP_ADDRESS) ||
+ (rc == IPA_RC_LAN_OFFLINE)) {
+ addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
+ if (addr->ref_counter < 1) {
+ qeth_l3_delete_ip(card, addr);
+ kfree(addr);
+ }
+ } else {
+ hash_del(&addr->hnode);
+ kfree(addr);
+ }
+ } else {
+ if (addr->type == QETH_IP_TYPE_NORMAL)
+ addr->ref_counter++;
}
- spin_lock_irqsave(&card->ip_lock, flags);
- rc = __qeth_l3_insert_ip_todo(card, addr, 1);
- spin_unlock_irqrestore(&card->ip_lock, flags);
+
return rc;
}
@@ -312,229 +348,90 @@ struct qeth_ipaddr *qeth_l3_get_addr_buffer(
struct qeth_ipaddr *addr;
addr = kzalloc(sizeof(struct qeth_ipaddr), GFP_ATOMIC);
- if (addr == NULL) {
+ if (!addr)
return NULL;
- }
+
addr->type = QETH_IP_TYPE_NORMAL;
+ addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
addr->proto = prot;
+
return addr;
}
-static void qeth_l3_delete_mc_addresses(struct qeth_card *card)
+static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover)
{
- struct qeth_ipaddr *iptodo;
- unsigned long flags;
+ struct qeth_ipaddr *addr;
+ struct hlist_node *tmp;
+ int i;
- QETH_CARD_TEXT(card, 4, "delmc");
- iptodo = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
- if (!iptodo) {
- QETH_CARD_TEXT(card, 2, "dmcnomem");
+ QETH_CARD_TEXT(card, 4, "clearip");
+
+ if (recover && card->options.sniffer)
return;
- }
- iptodo->type = QETH_IP_TYPE_DEL_ALL_MC;
- spin_lock_irqsave(&card->ip_lock, flags);
- if (!__qeth_l3_insert_ip_todo(card, iptodo, 0))
- kfree(iptodo);
- spin_unlock_irqrestore(&card->ip_lock, flags);
-}
-/*
- * Add/remove address to/from card's ip list, i.e. try to add or remove
- * reference to/from an IP address that is already registered on the card.
- * Returns:
- * 0 address was on card and its reference count has been adjusted,
- * but is still > 0, so nothing has to be done
- * also returns 0 if card was not on card and the todo was to delete
- * the address -> there is also nothing to be done
- * 1 address was not on card and the todo is to add it to the card's ip
- * list
- * -1 address was on card and its reference count has been decremented
- * to <= 0 by the todo -> address must be removed from card
- */
-static int __qeth_l3_ref_ip_on_card(struct qeth_card *card,
- struct qeth_ipaddr *todo, struct qeth_ipaddr **__addr)
-{
- struct qeth_ipaddr *addr;
- int found = 0;
-
- list_for_each_entry(addr, &card->ip_list, entry) {
- if ((addr->proto == QETH_PROT_IPV4) &&
- (todo->proto == QETH_PROT_IPV4) &&
- (addr->type == todo->type) &&
- (addr->u.a4.addr == todo->u.a4.addr) &&
- (addr->u.a4.mask == todo->u.a4.mask)) {
- found = 1;
- break;
- }
- if ((addr->proto == QETH_PROT_IPV6) &&
- (todo->proto == QETH_PROT_IPV6) &&
- (addr->type == todo->type) &&
- (addr->u.a6.pfxlen == todo->u.a6.pfxlen) &&
- (memcmp(&addr->u.a6.addr, &todo->u.a6.addr,
- sizeof(struct in6_addr)) == 0)) {
- found = 1;
- break;
- }
- }
- if (found) {
- addr->users += todo->users;
- if (addr->users <= 0) {
- *__addr = addr;
- return -1;
- } else {
- /* for VIPA and RXIP limit refcount to 1 */
- if (addr->type != QETH_IP_TYPE_NORMAL)
- addr->users = 1;
- return 0;
+ spin_lock_bh(&card->ip_lock);
+
+ hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
+ if (!recover) {
+ hash_del(&addr->hnode);
+ kfree(addr);
+ continue;
}
+ addr->disp_flag = QETH_DISP_ADDR_ADD;
}
- if (todo->users > 0) {
- /* for VIPA and RXIP limit refcount to 1 */
- if (todo->type != QETH_IP_TYPE_NORMAL)
- todo->users = 1;
- return 1;
- } else
- return 0;
-}
-static void __qeth_l3_delete_all_mc(struct qeth_card *card,
- unsigned long *flags)
-{
- struct list_head fail_list;
- struct qeth_ipaddr *addr, *tmp;
- int rc;
+ spin_unlock_bh(&card->ip_lock);
- INIT_LIST_HEAD(&fail_list);
-again:
- list_for_each_entry_safe(addr, tmp, &card->ip_list, entry) {
- if (addr->is_multicast) {
- list_del(&addr->entry);
- spin_unlock_irqrestore(&card->ip_lock, *flags);
- rc = qeth_l3_deregister_addr_entry(card, addr);
- spin_lock_irqsave(&card->ip_lock, *flags);
- if (!rc || (rc == IPA_RC_MC_ADDR_NOT_FOUND))
- kfree(addr);
- else
- list_add_tail(&addr->entry, &fail_list);
- goto again;
- }
- }
- list_splice(&fail_list, &card->ip_list);
-}
+ spin_lock_bh(&card->mclock);
-void qeth_l3_set_ip_addr_list(struct qeth_card *card)
-{
- struct list_head *tbd_list;
- struct qeth_ipaddr *todo, *addr;
- unsigned long flags;
- int rc;
+ hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
+ hash_del(&addr->hnode);
+ kfree(addr);
+ }
- QETH_CARD_TEXT(card, 2, "sdiplist");
- QETH_CARD_HEX(card, 2, &card, sizeof(void *));
+ spin_unlock_bh(&card->mclock);
- if (!qeth_card_hw_is_reachable(card) || card->options.sniffer)
- return;
- spin_lock_irqsave(&card->ip_lock, flags);
- tbd_list = card->ip_tbd_list;
- card->ip_tbd_list = kzalloc(sizeof(struct list_head), GFP_ATOMIC);
- if (!card->ip_tbd_list) {
- QETH_CARD_TEXT(card, 0, "silnomem");
- card->ip_tbd_list = tbd_list;
- spin_unlock_irqrestore(&card->ip_lock, flags);
- return;
- } else
- INIT_LIST_HEAD(card->ip_tbd_list);
-
- while (!list_empty(tbd_list)) {
- todo = list_entry(tbd_list->next, struct qeth_ipaddr, entry);
- list_del(&todo->entry);
- if (todo->type == QETH_IP_TYPE_DEL_ALL_MC) {
- __qeth_l3_delete_all_mc(card, &flags);
- kfree(todo);
- continue;
- }
- rc = __qeth_l3_ref_ip_on_card(card, todo, &addr);
- if (rc == 0) {
- /* nothing to be done; only adjusted refcount */
- kfree(todo);
- } else if (rc == 1) {
- /* new entry to be added to on-card list */
- spin_unlock_irqrestore(&card->ip_lock, flags);
- rc = qeth_l3_register_addr_entry(card, todo);
- spin_lock_irqsave(&card->ip_lock, flags);
- if (!rc || (rc == IPA_RC_LAN_OFFLINE))
- list_add_tail(&todo->entry, &card->ip_list);
- else
- kfree(todo);
- } else if (rc == -1) {
- /* on-card entry to be removed */
- list_del_init(&addr->entry);
- spin_unlock_irqrestore(&card->ip_lock, flags);
- rc = qeth_l3_deregister_addr_entry(card, addr);
- spin_lock_irqsave(&card->ip_lock, flags);
- if (!rc || (rc == IPA_RC_IP_ADDRESS_NOT_DEFINED))
- kfree(addr);
- else
- list_add_tail(&addr->entry, &card->ip_list);
- kfree(todo);
- }
- }
- spin_unlock_irqrestore(&card->ip_lock, flags);
- kfree(tbd_list);
}
-
-static void qeth_l3_clear_ip_list(struct qeth_card *card, int recover)
+static void qeth_l3_recover_ip(struct qeth_card *card)
{
- struct qeth_ipaddr *addr, *tmp;
- unsigned long flags;
+ struct qeth_ipaddr *addr;
+ struct hlist_node *tmp;
+ int i;
+ int rc;
- QETH_CARD_TEXT(card, 4, "clearip");
- if (recover && card->options.sniffer)
- return;
- spin_lock_irqsave(&card->ip_lock, flags);
- /* clear todo list */
- list_for_each_entry_safe(addr, tmp, card->ip_tbd_list, entry) {
- list_del(&addr->entry);
- kfree(addr);
- }
+ QETH_CARD_TEXT(card, 4, "recovrip");
+
+ spin_lock_bh(&card->ip_lock);
- while (!list_empty(&card->ip_list)) {
- addr = list_entry(card->ip_list.next,
- struct qeth_ipaddr, entry);
- list_del_init(&addr->entry);
- if (!recover || addr->is_multicast) {
+ hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
+ if (addr->disp_flag == QETH_DISP_ADDR_DELETE) {
+ qeth_l3_deregister_addr_entry(card, addr);
+ hash_del(&addr->hnode);
kfree(addr);
- continue;
+ } else if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
+ if (addr->proto == QETH_PROT_IPV4) {
+ addr->in_progress = 1;
+ spin_unlock_bh(&card->ip_lock);
+ rc = qeth_l3_register_addr_entry(card, addr);
+ spin_lock_bh(&card->ip_lock);
+ addr->in_progress = 0;
+ } else
+ rc = qeth_l3_register_addr_entry(card, addr);
+
+ if (!rc) {
+ addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
+ if (addr->ref_counter < 1)
+ qeth_l3_delete_ip(card, addr);
+ } else {
+ hash_del(&addr->hnode);
+ kfree(addr);
+ }
}
- list_add_tail(&addr->entry, card->ip_tbd_list);
}
- spin_unlock_irqrestore(&card->ip_lock, flags);
-}
-
-static int qeth_l3_address_exists_in_list(struct list_head *list,
- struct qeth_ipaddr *addr, int same_type)
-{
- struct qeth_ipaddr *tmp;
- list_for_each_entry(tmp, list, entry) {
- if ((tmp->proto == QETH_PROT_IPV4) &&
- (addr->proto == QETH_PROT_IPV4) &&
- ((same_type && (tmp->type == addr->type)) ||
- (!same_type && (tmp->type != addr->type))) &&
- (tmp->u.a4.addr == addr->u.a4.addr))
- return 1;
+ spin_unlock_bh(&card->ip_lock);
- if ((tmp->proto == QETH_PROT_IPV6) &&
- (addr->proto == QETH_PROT_IPV6) &&
- ((same_type && (tmp->type == addr->type)) ||
- (!same_type && (tmp->type != addr->type))) &&
- (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr,
- sizeof(struct in6_addr)) == 0))
- return 1;
-
- }
- return 0;
}
static int qeth_l3_send_setdelmc(struct qeth_card *card,
@@ -712,27 +609,28 @@ int qeth_l3_setrouting_v6(struct qeth_card *card)
*/
static void qeth_l3_clear_ipato_list(struct qeth_card *card)
{
-
struct qeth_ipato_entry *ipatoe, *tmp;
- unsigned long flags;
- spin_lock_irqsave(&card->ip_lock, flags);
+ spin_lock_bh(&card->ip_lock);
+
list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
list_del(&ipatoe->entry);
kfree(ipatoe);
}
- spin_unlock_irqrestore(&card->ip_lock, flags);
+
+ spin_unlock_bh(&card->ip_lock);
}
int qeth_l3_add_ipato_entry(struct qeth_card *card,
struct qeth_ipato_entry *new)
{
struct qeth_ipato_entry *ipatoe;
- unsigned long flags;
int rc = 0;
QETH_CARD_TEXT(card, 2, "addipato");
- spin_lock_irqsave(&card->ip_lock, flags);
+
+ spin_lock_bh(&card->ip_lock);
+
list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
if (ipatoe->proto != new->proto)
continue;
@@ -743,10 +641,12 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card,
break;
}
}
+
if (!rc)
list_add_tail(&new->entry, &card->ipato.entries);
- spin_unlock_irqrestore(&card->ip_lock, flags);
+ spin_unlock_bh(&card->ip_lock);
+
return rc;
}
@@ -754,10 +654,11 @@ void qeth_l3_del_ipato_entry(struct qeth_card *card,
enum qeth_prot_versions proto, u8 *addr, int mask_bits)
{
struct qeth_ipato_entry *ipatoe, *tmp;
- unsigned long flags;
QETH_CARD_TEXT(card, 2, "delipato");
- spin_lock_irqsave(&card->ip_lock, flags);
+
+ spin_lock_bh(&card->ip_lock);
+
list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
if (ipatoe->proto != proto)
continue;
@@ -768,7 +669,8 @@ void qeth_l3_del_ipato_entry(struct qeth_card *card,
kfree(ipatoe);
}
}
- spin_unlock_irqrestore(&card->ip_lock, flags);
+
+ spin_unlock_bh(&card->ip_lock);
}
/*
@@ -778,7 +680,6 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
const u8 *addr)
{
struct qeth_ipaddr *ipaddr;
- unsigned long flags;
int rc = 0;
ipaddr = qeth_l3_get_addr_buffer(proto);
@@ -797,18 +698,18 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
ipaddr->del_flags = QETH_IPA_DELIP_VIPA_FLAG;
} else
return -ENOMEM;
- spin_lock_irqsave(&card->ip_lock, flags);
- if (qeth_l3_address_exists_in_list(&card->ip_list, ipaddr, 0) ||
- qeth_l3_address_exists_in_list(card->ip_tbd_list, ipaddr, 0))
+
+ spin_lock_bh(&card->ip_lock);
+
+ if (qeth_l3_ip_from_hash(card, ipaddr))
rc = -EEXIST;
- spin_unlock_irqrestore(&card->ip_lock, flags);
- if (rc) {
- kfree(ipaddr);
- return rc;
- }
- if (!qeth_l3_add_ip(card, ipaddr))
- kfree(ipaddr);
- qeth_l3_set_ip_addr_list(card);
+ else
+ qeth_l3_add_ip(card, ipaddr);
+
+ spin_unlock_bh(&card->ip_lock);
+
+ kfree(ipaddr);
+
return rc;
}
@@ -831,9 +732,12 @@ void qeth_l3_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
ipaddr->type = QETH_IP_TYPE_VIPA;
} else
return;
- if (!qeth_l3_delete_ip(card, ipaddr))
- kfree(ipaddr);
- qeth_l3_set_ip_addr_list(card);
+
+ spin_lock_bh(&card->ip_lock);
+ qeth_l3_delete_ip(card, ipaddr);
+ spin_unlock_bh(&card->ip_lock);
+
+ kfree(ipaddr);
}
/*
@@ -843,7 +747,6 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
const u8 *addr)
{
struct qeth_ipaddr *ipaddr;
- unsigned long flags;
int rc = 0;
ipaddr = qeth_l3_get_addr_buffer(proto);
@@ -857,24 +760,25 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
memcpy(&ipaddr->u.a6.addr, addr, 16);
ipaddr->u.a6.pfxlen = 0;
}
+
ipaddr->type = QETH_IP_TYPE_RXIP;
ipaddr->set_flags = QETH_IPA_SETIP_TAKEOVER_FLAG;
ipaddr->del_flags = 0;
} else
return -ENOMEM;
- spin_lock_irqsave(&card->ip_lock, flags);
- if (qeth_l3_address_exists_in_list(&card->ip_list, ipaddr, 0) ||
- qeth_l3_address_exists_in_list(card->ip_tbd_list, ipaddr, 0))
+
+ spin_lock_bh(&card->ip_lock);
+
+ if (qeth_l3_ip_from_hash(card, ipaddr))
rc = -EEXIST;
- spin_unlock_irqrestore(&card->ip_lock, flags);
- if (rc) {
- kfree(ipaddr);
- return rc;
- }
- if (!qeth_l3_add_ip(card, ipaddr))
- kfree(ipaddr);
- qeth_l3_set_ip_addr_list(card);
- return 0;
+ else
+ qeth_l3_add_ip(card, ipaddr);
+
+ spin_unlock_bh(&card->ip_lock);
+
+ kfree(ipaddr);
+
+ return rc;
}
void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
@@ -896,9 +800,12 @@ void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
ipaddr->type = QETH_IP_TYPE_RXIP;
} else
return;
- if (!qeth_l3_delete_ip(card, ipaddr))
- kfree(ipaddr);
- qeth_l3_set_ip_addr_list(card);
+
+ spin_lock_bh(&card->ip_lock);
+ qeth_l3_delete_ip(card, ipaddr);
+ spin_unlock_bh(&card->ip_lock);
+
+ kfree(ipaddr);
}
static int qeth_l3_register_addr_entry(struct qeth_card *card,
@@ -908,6 +815,7 @@ static int qeth_l3_register_addr_entry(struct qeth_card *card,
int rc = 0;
int cnt = 3;
+
if (addr->proto == QETH_PROT_IPV4) {
QETH_CARD_TEXT(card, 2, "setaddr4");
QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int));
@@ -1013,36 +921,6 @@ static int qeth_l3_setadapter_parms(struct qeth_card *card)
return rc;
}
-static int qeth_l3_default_setassparms_cb(struct qeth_card *card,
- struct qeth_reply *reply, unsigned long data)
-{
- struct qeth_ipa_cmd *cmd;
-
- QETH_CARD_TEXT(card, 4, "defadpcb");
-
- cmd = (struct qeth_ipa_cmd *) data;
- if (cmd->hdr.return_code == 0) {
- cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
- if (cmd->hdr.prot_version == QETH_PROT_IPV4)
- card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
- if (cmd->hdr.prot_version == QETH_PROT_IPV6)
- card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
- }
- if (cmd->data.setassparms.hdr.assist_no == IPA_INBOUND_CHECKSUM &&
- cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
- card->info.csum_mask = cmd->data.setassparms.data.flags_32bit;
- QETH_CARD_TEXT_(card, 3, "csum:%d", card->info.csum_mask);
- }
- if (cmd->data.setassparms.hdr.assist_no == IPA_OUTBOUND_CHECKSUM &&
- cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
- card->info.tx_csum_mask =
- cmd->data.setassparms.data.flags_32bit;
- QETH_CARD_TEXT_(card, 3, "tcsu:%d", card->info.tx_csum_mask);
- }
-
- return 0;
-}
-
#ifdef CONFIG_QETH_IPV6
static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,
enum qeth_ipa_funcs ipa_func, __u16 cmd_code)
@@ -1056,7 +934,7 @@ static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,
if (!iob)
return -ENOMEM;
rc = qeth_send_setassparms(card, iob, 0, 0,
- qeth_l3_default_setassparms_cb, NULL);
+ qeth_setassparms_cb, NULL);
return rc;
}
#endif
@@ -1291,47 +1169,6 @@ out:
return rc;
}
-static void qeth_l3_start_ipa_checksum(struct qeth_card *card)
-{
- QETH_CARD_TEXT(card, 3, "strtcsum");
- if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM)
- && (card->dev->features & NETIF_F_RXCSUM))
- qeth_set_rx_csum(card, 1);
-}
-
-static void qeth_l3_start_ipa_tx_checksum(struct qeth_card *card)
-{
- QETH_CARD_TEXT(card, 3, "strttxcs");
- qeth_start_ipa_tx_checksum(card);
-}
-
-static int qeth_l3_start_ipa_tso(struct qeth_card *card)
-{
- int rc;
-
- QETH_CARD_TEXT(card, 3, "sttso");
-
- if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
- dev_info(&card->gdev->dev,
- "Outbound TSO not supported on %s\n",
- QETH_CARD_IFNAME(card));
- rc = -EOPNOTSUPP;
- } else {
- rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_TSO,
- IPA_CMD_ASS_START, 0);
- if (rc)
- dev_warn(&card->gdev->dev, "Starting outbound TCP "
- "segmentation offload for %s failed\n",
- QETH_CARD_IFNAME(card));
- else
- dev_info(&card->gdev->dev,
- "Outbound TSO enabled\n");
- }
- if (rc)
- card->dev->features &= ~NETIF_F_TSO;
- return rc;
-}
-
static int qeth_l3_start_ipassists(struct qeth_card *card)
{
QETH_CARD_TEXT(card, 3, "strtipas");
@@ -1345,9 +1182,6 @@ static int qeth_l3_start_ipassists(struct qeth_card *card)
qeth_l3_start_ipa_multicast(card); /* go on*/
qeth_l3_start_ipa_ipv6(card); /* go on*/
qeth_l3_start_ipa_broadcast(card); /* go on*/
- qeth_l3_start_ipa_checksum(card); /* go on*/
- qeth_l3_start_ipa_tx_checksum(card);
- qeth_l3_start_ipa_tso(card); /* go on*/
return 0;
}
@@ -1507,31 +1341,99 @@ qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd)
return qeth_send_ipa_cmd(card, iob, qeth_diags_trace_cb, NULL);
}
-static void qeth_l3_get_mac_for_ipm(__u32 ipm, char *mac,
- struct net_device *dev)
+static void qeth_l3_get_mac_for_ipm(__u32 ipm, char *mac)
{
ip_eth_mc_map(ipm, mac);
}
-static void qeth_l3_add_mc(struct qeth_card *card, struct in_device *in4_dev)
+static void qeth_l3_mark_all_mc_to_be_deleted(struct qeth_card *card)
+{
+ struct qeth_ipaddr *addr;
+ int i;
+
+ hash_for_each(card->ip_mc_htable, i, addr, hnode)
+ addr->disp_flag = QETH_DISP_ADDR_DELETE;
+
+}
+
+static void qeth_l3_add_all_new_mc(struct qeth_card *card)
+{
+ struct qeth_ipaddr *addr;
+ struct hlist_node *tmp;
+ int i;
+ int rc;
+
+ hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
+ if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
+ rc = qeth_l3_register_addr_entry(card, addr);
+ if (!rc || (rc == IPA_RC_LAN_OFFLINE))
+ addr->ref_counter = 1;
+ else {
+ hash_del(&addr->hnode);
+ kfree(addr);
+ }
+ }
+ }
+
+}
+
+static void qeth_l3_delete_nonused_mc(struct qeth_card *card)
+{
+ struct qeth_ipaddr *addr;
+ struct hlist_node *tmp;
+ int i;
+ int rc;
+
+ hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
+ if (addr->disp_flag == QETH_DISP_ADDR_DELETE) {
+ rc = qeth_l3_deregister_addr_entry(card, addr);
+ if (!rc || (rc == IPA_RC_MC_ADDR_NOT_FOUND)) {
+ hash_del(&addr->hnode);
+ kfree(addr);
+ }
+ }
+ }
+
+}
+
+
+static void
+qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
{
- struct qeth_ipaddr *ipm;
struct ip_mc_list *im4;
+ struct qeth_ipaddr *tmp, *ipm;
char buf[MAX_ADDR_LEN];
QETH_CARD_TEXT(card, 4, "addmc");
+
+ tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
+ if (!tmp)
+ return;
+
for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL;
im4 = rcu_dereference(im4->next_rcu)) {
- qeth_l3_get_mac_for_ipm(im4->multiaddr, buf, in4_dev->dev);
- ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
- if (!ipm)
- continue;
- ipm->u.a4.addr = im4->multiaddr;
- memcpy(ipm->mac, buf, OSA_ADDR_LEN);
- ipm->is_multicast = 1;
- if (!qeth_l3_add_ip(card, ipm))
- kfree(ipm);
+ qeth_l3_get_mac_for_ipm(im4->multiaddr, buf);
+
+ tmp->u.a4.addr = im4->multiaddr;
+ memcpy(tmp->mac, buf, sizeof(tmp->mac));
+
+ ipm = qeth_l3_ip_from_hash(card, tmp);
+ if (ipm) {
+ ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
+ } else {
+ ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
+ if (!ipm)
+ continue;
+ memcpy(ipm->mac, buf, sizeof(tmp->mac));
+ ipm->u.a4.addr = im4->multiaddr;
+ ipm->is_multicast = 1;
+ ipm->disp_flag = QETH_DISP_ADDR_ADD;
+ hash_add(card->ip_mc_htable,
+ &ipm->hnode, qeth_l3_ipaddr_hash(ipm));
+ }
}
+
+ kfree(tmp);
}
/* called with rcu_read_lock */
@@ -1541,6 +1443,7 @@ static void qeth_l3_add_vlan_mc(struct qeth_card *card)
u16 vid;
QETH_CARD_TEXT(card, 4, "addmcvl");
+
if (!qeth_is_supported(card, IPA_FULL_VLAN))
return;
@@ -1555,7 +1458,7 @@ static void qeth_l3_add_vlan_mc(struct qeth_card *card)
in_dev = __in_dev_get_rcu(netdev);
if (!in_dev)
continue;
- qeth_l3_add_mc(card, in_dev);
+ qeth_l3_add_mc_to_hash(card, in_dev);
}
}
@@ -1564,36 +1467,60 @@ static void qeth_l3_add_multicast_ipv4(struct qeth_card *card)
struct in_device *in4_dev;
QETH_CARD_TEXT(card, 4, "chkmcv4");
+
rcu_read_lock();
in4_dev = __in_dev_get_rcu(card->dev);
if (in4_dev == NULL)
goto unlock;
- qeth_l3_add_mc(card, in4_dev);
+ qeth_l3_add_mc_to_hash(card, in4_dev);
qeth_l3_add_vlan_mc(card);
unlock:
rcu_read_unlock();
}
#ifdef CONFIG_QETH_IPV6
-static void qeth_l3_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev)
+static void
+qeth_l3_add_mc6_to_hash(struct qeth_card *card, struct inet6_dev *in6_dev)
{
struct qeth_ipaddr *ipm;
struct ifmcaddr6 *im6;
+ struct qeth_ipaddr *tmp;
char buf[MAX_ADDR_LEN];
QETH_CARD_TEXT(card, 4, "addmc6");
+
+ tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
+ if (!tmp)
+ return;
+
for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) {
ndisc_mc_map(&im6->mca_addr, buf, in6_dev->dev, 0);
+
+ memcpy(tmp->mac, buf, sizeof(tmp->mac));
+ memcpy(&tmp->u.a6.addr, &im6->mca_addr.s6_addr,
+ sizeof(struct in6_addr));
+ tmp->is_multicast = 1;
+
+ ipm = qeth_l3_ip_from_hash(card, tmp);
+ if (ipm) {
+ ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
+ continue;
+ }
+
ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
if (!ipm)
continue;
- ipm->is_multicast = 1;
+
memcpy(ipm->mac, buf, OSA_ADDR_LEN);
memcpy(&ipm->u.a6.addr, &im6->mca_addr.s6_addr,
sizeof(struct in6_addr));
- if (!qeth_l3_add_ip(card, ipm))
- kfree(ipm);
+ ipm->is_multicast = 1;
+ ipm->disp_flag = QETH_DISP_ADDR_ADD;
+ hash_add(card->ip_mc_htable,
+ &ipm->hnode, qeth_l3_ipaddr_hash(ipm));
+
}
+ kfree(tmp);
}
/* called with rcu_read_lock */
@@ -1603,6 +1530,7 @@ static void qeth_l3_add_vlan_mc6(struct qeth_card *card)
u16 vid;
QETH_CARD_TEXT(card, 4, "admc6vl");
+
if (!qeth_is_supported(card, IPA_FULL_VLAN))
return;
@@ -1618,7 +1546,7 @@ static void qeth_l3_add_vlan_mc6(struct qeth_card *card)
if (!in_dev)
continue;
read_lock_bh(&in_dev->lock);
- qeth_l3_add_mc6(card, in_dev);
+ qeth_l3_add_mc6_to_hash(card, in_dev);
read_unlock_bh(&in_dev->lock);
in6_dev_put(in_dev);
}
@@ -1629,14 +1557,16 @@ static void qeth_l3_add_multicast_ipv6(struct qeth_card *card)
struct inet6_dev *in6_dev;
QETH_CARD_TEXT(card, 4, "chkmcv6");
+
if (!qeth_is_supported(card, IPA_IPV6))
return ;
in6_dev = in6_dev_get(card->dev);
- if (in6_dev == NULL)
+ if (!in6_dev)
return;
+
rcu_read_lock();
read_lock_bh(&in6_dev->lock);
- qeth_l3_add_mc6(card, in6_dev);
+ qeth_l3_add_mc6_to_hash(card, in6_dev);
qeth_l3_add_vlan_mc6(card);
read_unlock_bh(&in6_dev->lock);
rcu_read_unlock();
@@ -1660,16 +1590,23 @@ static void qeth_l3_free_vlan_addresses4(struct qeth_card *card,
in_dev = in_dev_get(netdev);
if (!in_dev)
return;
+
+ addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
+ if (!addr)
+ return;
+
+ spin_lock_bh(&card->ip_lock);
+
for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
- addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
- if (addr) {
- addr->u.a4.addr = ifa->ifa_address;
- addr->u.a4.mask = ifa->ifa_mask;
- addr->type = QETH_IP_TYPE_NORMAL;
- if (!qeth_l3_delete_ip(card, addr))
- kfree(addr);
- }
+ addr->u.a4.addr = ifa->ifa_address;
+ addr->u.a4.mask = ifa->ifa_mask;
+ addr->type = QETH_IP_TYPE_NORMAL;
+ qeth_l3_delete_ip(card, addr);
}
+
+ spin_unlock_bh(&card->ip_lock);
+
+ kfree(addr);
in_dev_put(in_dev);
}
@@ -1687,20 +1624,28 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q), vid);
if (!netdev)
return;
+
in6_dev = in6_dev_get(netdev);
if (!in6_dev)
return;
+
+ addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
+ if (!addr)
+ return;
+
+ spin_lock_bh(&card->ip_lock);
+
list_for_each_entry(ifa, &in6_dev->addr_list, if_list) {
- addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
- if (addr) {
- memcpy(&addr->u.a6.addr, &ifa->addr,
- sizeof(struct in6_addr));
- addr->u.a6.pfxlen = ifa->prefix_len;
- addr->type = QETH_IP_TYPE_NORMAL;
- if (!qeth_l3_delete_ip(card, addr))
- kfree(addr);
- }
+ memcpy(&addr->u.a6.addr, &ifa->addr,
+ sizeof(struct in6_addr));
+ addr->u.a6.pfxlen = ifa->prefix_len;
+ addr->type = QETH_IP_TYPE_NORMAL;
+ qeth_l3_delete_ip(card, addr);
}
+
+ spin_unlock_bh(&card->ip_lock);
+
+ kfree(addr);
in6_dev_put(in6_dev);
#endif /* CONFIG_QETH_IPV6 */
}
@@ -1727,18 +1672,16 @@ static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev,
__be16 proto, u16 vid)
{
struct qeth_card *card = dev->ml_priv;
- unsigned long flags;
QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
+
if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
QETH_CARD_TEXT(card, 3, "kidREC");
return 0;
}
- spin_lock_irqsave(&card->vlanlock, flags);
/* unregister IP addresses of vlan device */
qeth_l3_free_vlan_addresses(card, vid);
clear_bit(vid, card->active_vlans);
- spin_unlock_irqrestore(&card->vlanlock, flags);
qeth_l3_set_multicast_list(card->dev);
return 0;
}
@@ -1994,8 +1937,8 @@ static int qeth_l3_verify_vlan_dev(struct net_device *dev,
static int qeth_l3_verify_dev(struct net_device *dev)
{
struct qeth_card *card;
- unsigned long flags;
int rc = 0;
+ unsigned long flags;
read_lock_irqsave(&qeth_core_card_list.rwlock, flags);
list_for_each_entry(card, &qeth_core_card_list.list, list) {
@@ -2051,7 +1994,7 @@ static void qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
card->state = CARD_STATE_SOFTSETUP;
}
if (card->state == CARD_STATE_SOFTSETUP) {
- qeth_l3_clear_ip_list(card, 1);
+ qeth_l3_clear_ip_htable(card, 1);
qeth_clear_ipacmd_list(card);
card->state = CARD_STATE_HARDSETUP;
}
@@ -2106,12 +2049,20 @@ static void qeth_l3_set_multicast_list(struct net_device *dev)
(card->state != CARD_STATE_UP))
return;
if (!card->options.sniffer) {
- qeth_l3_delete_mc_addresses(card);
+
+ spin_lock_bh(&card->mclock);
+
+ qeth_l3_mark_all_mc_to_be_deleted(card);
+
qeth_l3_add_multicast_ipv4(card);
#ifdef CONFIG_QETH_IPV6
qeth_l3_add_multicast_ipv6(card);
#endif
- qeth_l3_set_ip_addr_list(card);
+ qeth_l3_delete_nonused_mc(card);
+ qeth_l3_add_all_new_mc(card);
+
+ spin_unlock_bh(&card->mclock);
+
if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
return;
}
@@ -2375,22 +2326,21 @@ static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata)
if (rc) {
if (copy_to_user(udata, qinfo.udata, 4))
rc = -EFAULT;
- goto free_and_out;
- } else {
+ goto free_and_out;
+ }
#ifdef CONFIG_QETH_IPV6
- if (qinfo.mask_bits & QETH_QARP_WITH_IPV6) {
- /* fails in case of GuestLAN QDIO mode */
- qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV6,
- &qinfo);
- }
+ if (qinfo.mask_bits & QETH_QARP_WITH_IPV6) {
+ /* fails in case of GuestLAN QDIO mode */
+ qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV6, &qinfo);
+ }
#endif
- if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) {
- QETH_CARD_TEXT(card, 4, "qactf");
- rc = -EFAULT;
- goto free_and_out;
- }
- QETH_CARD_TEXT(card, 4, "qacts");
+ if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) {
+ QETH_CARD_TEXT(card, 4, "qactf");
+ rc = -EFAULT;
+ goto free_and_out;
}
+ QETH_CARD_TEXT(card, 4, "qacts");
+
free_and_out:
kfree(qinfo.udata);
out:
@@ -2427,7 +2377,7 @@ static int qeth_l3_arp_add_entry(struct qeth_card *card,
rc = qeth_send_setassparms(card, iob,
sizeof(struct qeth_arp_cache_entry),
(unsigned long) entry,
- qeth_l3_default_setassparms_cb, NULL);
+ qeth_setassparms_cb, NULL);
if (rc) {
tmp = rc;
qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
@@ -2467,7 +2417,7 @@ static int qeth_l3_arp_remove_entry(struct qeth_card *card,
return -ENOMEM;
rc = qeth_send_setassparms(card, iob,
12, (unsigned long)buf,
- qeth_l3_default_setassparms_cb, NULL);
+ qeth_setassparms_cb, NULL);
if (rc) {
tmp = rc;
memset(buf, 0, 16);
@@ -2793,15 +2743,34 @@ static void qeth_tso_fill_header(struct qeth_card *card,
}
}
-static inline int qeth_l3_tso_elements(struct sk_buff *skb)
+/**
+ * qeth_l3_get_elements_no_tso() - find number of SBALEs for skb data for tso
+ * @card: qeth card structure, to check max. elems.
+ * @skb: SKB address
+ * @extra_elems: extra elems needed, to check against max.
+ *
+ * Returns the number of pages, and thus QDIO buffer elements, needed to cover
+ * skb data, including linear part and fragments, but excluding TCP header.
+ * (Exclusion of TCP header distinguishes it from qeth_get_elements_no().)
+ * Checks if the result plus extra_elems fits under the limit for the card.
+ * Returns 0 if it does not.
+ * Note: extra_elems is not included in the returned result.
+ */
+static int qeth_l3_get_elements_no_tso(struct qeth_card *card,
+ struct sk_buff *skb, int extra_elems)
{
- unsigned long tcpd = (unsigned long)tcp_hdr(skb) +
- tcp_hdr(skb)->doff * 4;
- int tcpd_len = skb_headlen(skb) - (tcpd - (unsigned long)skb->data);
- int elements = PFN_UP(tcpd + tcpd_len - 1) - PFN_DOWN(tcpd);
-
- elements += qeth_get_elements_for_frags(skb);
+ addr_t tcpdptr = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb);
+ int elements = qeth_get_elements_for_range(
+ tcpdptr,
+ (addr_t)skb->data + skb_headlen(skb)) +
+ qeth_get_elements_for_frags(skb);
+ if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
+ QETH_DBF_MESSAGE(2,
+ "Invalid size of TSO IP packet (Number=%d / Length=%d). Discarded.\n",
+ elements + extra_elems, skb->len);
+ return 0;
+ }
return elements;
}
@@ -2810,8 +2779,8 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
int rc;
u16 *tag;
struct qeth_hdr *hdr = NULL;
- int elements_needed = 0;
- int elems;
+ int hdr_elements = 0;
+ int elements;
struct qeth_card *card = dev->ml_priv;
struct sk_buff *new_skb = NULL;
int ipv = qeth_get_ip_version(skb);
@@ -2822,7 +2791,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
qeth_get_priority_queue(card, skb, ipv, cast_type) :
card->qdio.default_out_queue];
int tx_bytes = skb->len;
- bool large_send;
+ bool use_tso;
int data_offset = -1;
int nr_frags;
@@ -2847,10 +2816,12 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
card->perf_stats.outbound_start_time = qeth_get_micros();
}
- large_send = skb_is_gso(skb);
+ /* Ignore segment size from skb_is_gso(), 1 page is always used. */
+ use_tso = skb_is_gso(skb) &&
+ (qeth_get_ip_protocol(skb) == IPPROTO_TCP) && (ipv == 4);
- if ((card->info.type == QETH_CARD_TYPE_IQD) && (!large_send) &&
- (skb_shinfo(skb)->nr_frags == 0)) {
+ if ((card->info.type == QETH_CARD_TYPE_IQD) &&
+ !skb_is_nonlinear(skb)) {
new_skb = skb;
if (new_skb->protocol == ETH_P_AF_IUCV)
data_offset = 0;
@@ -2859,7 +2830,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
if (!hdr)
goto tx_drop;
- elements_needed++;
+ hdr_elements++;
} else {
/* create a clone with writeable headroom */
new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso)
@@ -2894,22 +2865,28 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* fix hardware limitation: as long as we do not have sbal
* chaining we can not send long frag lists
*/
- if (large_send) {
- if (qeth_l3_tso_elements(new_skb) + 1 > 16) {
- if (skb_linearize(new_skb))
- goto tx_drop;
- if (card->options.performance_stats)
+ if ((card->info.type != QETH_CARD_TYPE_IQD) &&
+ ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) ||
+ (!use_tso && !qeth_get_elements_no(card, new_skb, 0)))) {
+ int lin_rc = skb_linearize(new_skb);
+
+ if (card->options.performance_stats) {
+ if (lin_rc)
+ card->perf_stats.tx_linfail++;
+ else
card->perf_stats.tx_lin++;
}
+ if (lin_rc)
+ goto tx_drop;
}
- if (large_send && (cast_type == RTN_UNSPEC)) {
+ if (use_tso) {
hdr = (struct qeth_hdr *)skb_push(new_skb,
sizeof(struct qeth_hdr_tso));
memset(hdr, 0, sizeof(struct qeth_hdr_tso));
qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type);
qeth_tso_fill_header(card, hdr, new_skb);
- elements_needed++;
+ hdr_elements++;
} else {
if (data_offset < 0) {
hdr = (struct qeth_hdr *)skb_push(new_skb,
@@ -2930,31 +2907,31 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
qeth_l3_hdr_csum(card, hdr, new_skb);
}
- elems = qeth_get_elements_no(card, new_skb, elements_needed);
- if (!elems) {
+ elements = use_tso ?
+ qeth_l3_get_elements_no_tso(card, new_skb, hdr_elements) :
+ qeth_get_elements_no(card, new_skb, hdr_elements);
+ if (!elements) {
if (data_offset >= 0)
kmem_cache_free(qeth_core_header_cache, hdr);
goto tx_drop;
}
- elements_needed += elems;
- nr_frags = skb_shinfo(new_skb)->nr_frags;
+ elements += hdr_elements;
if (card->info.type != QETH_CARD_TYPE_IQD) {
int len;
- if (large_send)
+ if (use_tso)
len = ((unsigned long)tcp_hdr(new_skb) +
- tcp_hdr(new_skb)->doff * 4) -
+ tcp_hdrlen(new_skb)) -
(unsigned long)new_skb->data;
else
len = sizeof(struct qeth_hdr_layer3);
if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len))
goto tx_drop;
- rc = qeth_do_send_packet(card, queue, new_skb, hdr,
- elements_needed);
+ rc = qeth_do_send_packet(card, queue, new_skb, hdr, elements);
} else
rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
- elements_needed, data_offset, 0);
+ elements, data_offset, 0);
if (!rc) {
card->stats.tx_packets++;
@@ -2962,7 +2939,8 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (new_skb != skb)
dev_kfree_skb_any(skb);
if (card->options.performance_stats) {
- if (large_send) {
+ nr_frags = skb_shinfo(new_skb)->nr_frags;
+ if (use_tso) {
card->perf_stats.large_send_bytes += tx_bytes;
card->perf_stats.large_send_cnt++;
}
@@ -3048,36 +3026,6 @@ static int qeth_l3_stop(struct net_device *dev)
return 0;
}
-static netdev_features_t qeth_l3_fix_features(struct net_device *dev,
- netdev_features_t features)
-{
- struct qeth_card *card = dev->ml_priv;
-
- if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
- features &= ~NETIF_F_IP_CSUM;
- if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
- features &= ~NETIF_F_TSO;
- if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
- features &= ~NETIF_F_RXCSUM;
- return features;
-}
-
-static int qeth_l3_set_features(struct net_device *dev,
- netdev_features_t features)
-{
- struct qeth_card *card = dev->ml_priv;
- netdev_features_t changed = dev->features ^ features;
-
- if (!(changed & NETIF_F_RXCSUM))
- return 0;
-
- if (card->state == CARD_STATE_DOWN ||
- card->state == CARD_STATE_RECOVER)
- return 0;
-
- return qeth_set_rx_csum(card, features & NETIF_F_RXCSUM ? 1 : 0);
-}
-
static const struct ethtool_ops qeth_l3_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_strings = qeth_core_get_strings,
@@ -3120,8 +3068,8 @@ static const struct net_device_ops qeth_l3_netdev_ops = {
.ndo_set_rx_mode = qeth_l3_set_multicast_list,
.ndo_do_ioctl = qeth_l3_do_ioctl,
.ndo_change_mtu = qeth_change_mtu,
- .ndo_fix_features = qeth_l3_fix_features,
- .ndo_set_features = qeth_l3_set_features,
+ .ndo_fix_features = qeth_fix_features,
+ .ndo_set_features = qeth_set_features,
.ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid,
.ndo_tx_timeout = qeth_tx_timeout,
@@ -3136,8 +3084,8 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
.ndo_set_rx_mode = qeth_l3_set_multicast_list,
.ndo_do_ioctl = qeth_l3_do_ioctl,
.ndo_change_mtu = qeth_change_mtu,
- .ndo_fix_features = qeth_l3_fix_features,
- .ndo_set_features = qeth_l3_set_features,
+ .ndo_fix_features = qeth_fix_features,
+ .ndo_set_features = qeth_set_features,
.ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid,
.ndo_tx_timeout = qeth_tx_timeout,
@@ -3169,7 +3117,9 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
card->dev->hw_features = NETIF_F_SG |
NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
NETIF_F_TSO;
- card->dev->features = NETIF_F_RXCSUM;
+ card->dev->vlan_features = NETIF_F_SG |
+ NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
+ NETIF_F_TSO;
}
}
} else if (card->info.type == QETH_CARD_TYPE_IQD) {
@@ -3195,7 +3145,8 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER;
netif_keep_dst(card->dev);
- card->dev->gso_max_size = 15 * PAGE_SIZE;
+ card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
+ PAGE_SIZE;
SET_NETDEV_DEV(card->dev, &card->gdev->dev);
netif_napi_add(card->dev, &card->napi, qeth_l3_poll, QETH_NAPI_WEIGHT);
@@ -3231,7 +3182,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
card->dev = NULL;
}
- qeth_l3_clear_ip_list(card, 0);
+ qeth_l3_clear_ip_htable(card, 0);
qeth_l3_clear_ipato_list(card);
return;
}
@@ -3316,7 +3267,7 @@ contin:
card->state = CARD_STATE_SOFTSETUP;
qeth_set_allowed_threads(card, 0xffffffff, 0);
- qeth_l3_set_ip_addr_list(card);
+ qeth_l3_recover_ip(card);
if (card->lan_online)
netif_carrier_on(card->dev);
else
@@ -3328,6 +3279,7 @@ contin:
else
dev_open(card->dev);
qeth_l3_set_multicast_list(card->dev);
+ qeth_recover_features(card->dev);
rtnl_unlock();
}
qeth_trace_features(card);
@@ -3517,6 +3469,7 @@ EXPORT_SYMBOL_GPL(qeth_l3_discipline);
static int qeth_l3_ip_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
+
struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
struct net_device *dev = (struct net_device *)ifa->ifa_dev->dev;
struct qeth_ipaddr *addr;
@@ -3531,27 +3484,27 @@ static int qeth_l3_ip_event(struct notifier_block *this,
QETH_CARD_TEXT(card, 3, "ipevent");
addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
- if (addr != NULL) {
+ if (addr) {
addr->u.a4.addr = ifa->ifa_address;
addr->u.a4.mask = ifa->ifa_mask;
addr->type = QETH_IP_TYPE_NORMAL;
} else
- goto out;
+ return NOTIFY_DONE;
switch (event) {
case NETDEV_UP:
- if (!qeth_l3_add_ip(card, addr))
- kfree(addr);
+ spin_lock_bh(&card->ip_lock);
+ qeth_l3_add_ip(card, addr);
+ spin_unlock_bh(&card->ip_lock);
break;
case NETDEV_DOWN:
- if (!qeth_l3_delete_ip(card, addr))
- kfree(addr);
- break;
- default:
+ spin_lock_bh(&card->ip_lock);
+ qeth_l3_delete_ip(card, addr);
+ spin_unlock_bh(&card->ip_lock);
break;
}
- qeth_l3_set_ip_addr_list(card);
-out:
+
+ kfree(addr);
return NOTIFY_DONE;
}
@@ -3580,27 +3533,27 @@ static int qeth_l3_ip6_event(struct notifier_block *this,
return NOTIFY_DONE;
addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
- if (addr != NULL) {
+ if (addr) {
memcpy(&addr->u.a6.addr, &ifa->addr, sizeof(struct in6_addr));
addr->u.a6.pfxlen = ifa->prefix_len;
addr->type = QETH_IP_TYPE_NORMAL;
} else
- goto out;
+ return NOTIFY_DONE;
switch (event) {
case NETDEV_UP:
- if (!qeth_l3_add_ip(card, addr))
- kfree(addr);
+ spin_lock_bh(&card->ip_lock);
+ qeth_l3_add_ip(card, addr);
+ spin_unlock_bh(&card->ip_lock);
break;
case NETDEV_DOWN:
- if (!qeth_l3_delete_ip(card, addr))
- kfree(addr);
- break;
- default:
+ spin_lock_bh(&card->ip_lock);
+ qeth_l3_delete_ip(card, addr);
+ spin_unlock_bh(&card->ip_lock);
break;
}
- qeth_l3_set_ip_addr_list(card);
-out:
+
+ kfree(addr);
return NOTIFY_DONE;
}
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index 386eb7b89..0e00a5ce0 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -8,6 +8,7 @@
#include <linux/slab.h>
#include <asm/ebcdic.h>
+#include <linux/hashtable.h>
#include "qeth_l3.h"
#define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \
@@ -285,19 +286,21 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
if (card->options.hsuid[0]) {
/* delete old ip address */
addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
- if (addr != NULL) {
- addr->u.a6.addr.s6_addr32[0] = 0xfe800000;
- addr->u.a6.addr.s6_addr32[1] = 0x00000000;
- for (i = 8; i < 16; i++)
- addr->u.a6.addr.s6_addr[i] =
- card->options.hsuid[i - 8];
- addr->u.a6.pfxlen = 0;
- addr->type = QETH_IP_TYPE_NORMAL;
- } else
+ if (!addr)
return -ENOMEM;
- if (!qeth_l3_delete_ip(card, addr))
- kfree(addr);
- qeth_l3_set_ip_addr_list(card);
+
+ addr->u.a6.addr.s6_addr32[0] = 0xfe800000;
+ addr->u.a6.addr.s6_addr32[1] = 0x00000000;
+ for (i = 8; i < 16; i++)
+ addr->u.a6.addr.s6_addr[i] =
+ card->options.hsuid[i - 8];
+ addr->u.a6.pfxlen = 0;
+ addr->type = QETH_IP_TYPE_NORMAL;
+
+ spin_lock_bh(&card->ip_lock);
+ qeth_l3_delete_ip(card, addr);
+ spin_unlock_bh(&card->ip_lock);
+ kfree(addr);
}
if (strlen(tmp) == 0) {
@@ -328,9 +331,11 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
addr->type = QETH_IP_TYPE_NORMAL;
} else
return -ENOMEM;
- if (!qeth_l3_add_ip(card, addr))
- kfree(addr);
- qeth_l3_set_ip_addr_list(card);
+
+ spin_lock_bh(&card->ip_lock);
+ qeth_l3_add_ip(card, addr);
+ spin_unlock_bh(&card->ip_lock);
+ kfree(addr);
return count;
}
@@ -367,8 +372,8 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
- struct qeth_ipaddr *tmpipa, *t;
- int rc = 0;
+ struct qeth_ipaddr *addr;
+ int i, rc = 0;
if (!card)
return -EINVAL;
@@ -384,21 +389,20 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
card->ipato.enabled = (card->ipato.enabled)? 0 : 1;
} else if (sysfs_streq(buf, "1")) {
card->ipato.enabled = 1;
- list_for_each_entry_safe(tmpipa, t, card->ip_tbd_list, entry) {
- if ((tmpipa->type == QETH_IP_TYPE_NORMAL) &&
- qeth_l3_is_addr_covered_by_ipato(card, tmpipa))
- tmpipa->set_flags |=
+ hash_for_each(card->ip_htable, i, addr, hnode) {
+ if ((addr->type == QETH_IP_TYPE_NORMAL) &&
+ qeth_l3_is_addr_covered_by_ipato(card, addr))
+ addr->set_flags |=
QETH_IPA_SETIP_TAKEOVER_FLAG;
- }
-
+ }
} else if (sysfs_streq(buf, "0")) {
card->ipato.enabled = 0;
- list_for_each_entry_safe(tmpipa, t, card->ip_tbd_list, entry) {
- if (tmpipa->set_flags &
- QETH_IPA_SETIP_TAKEOVER_FLAG)
- tmpipa->set_flags &=
- ~QETH_IPA_SETIP_TAKEOVER_FLAG;
- }
+ hash_for_each(card->ip_htable, i, addr, hnode) {
+ if (addr->set_flags &
+ QETH_IPA_SETIP_TAKEOVER_FLAG)
+ addr->set_flags &=
+ ~QETH_IPA_SETIP_TAKEOVER_FLAG;
+ }
} else
rc = -EINVAL;
out:
@@ -452,7 +456,6 @@ static ssize_t qeth_l3_dev_ipato_add_show(char *buf, struct qeth_card *card,
enum qeth_prot_versions proto)
{
struct qeth_ipato_entry *ipatoe;
- unsigned long flags;
char addr_str[40];
int entry_len; /* length of 1 entry string, differs between v4 and v6 */
int i = 0;
@@ -460,7 +463,7 @@ static ssize_t qeth_l3_dev_ipato_add_show(char *buf, struct qeth_card *card,
entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
/* add strlen for "/<mask>\n" */
entry_len += (proto == QETH_PROT_IPV4)? 5 : 6;
- spin_lock_irqsave(&card->ip_lock, flags);
+ spin_lock_bh(&card->ip_lock);
list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
if (ipatoe->proto != proto)
continue;
@@ -473,7 +476,7 @@ static ssize_t qeth_l3_dev_ipato_add_show(char *buf, struct qeth_card *card,
i += snprintf(buf + i, PAGE_SIZE - i,
"%s/%i\n", addr_str, ipatoe->mask_bits);
}
- spin_unlock_irqrestore(&card->ip_lock, flags);
+ spin_unlock_bh(&card->ip_lock);
i += snprintf(buf + i, PAGE_SIZE - i, "\n");
return i;
@@ -689,15 +692,15 @@ static ssize_t qeth_l3_dev_vipa_add_show(char *buf, struct qeth_card *card,
enum qeth_prot_versions proto)
{
struct qeth_ipaddr *ipaddr;
+ struct hlist_node *tmp;
char addr_str[40];
int entry_len; /* length of 1 entry string, differs between v4 and v6 */
- unsigned long flags;
int i = 0;
entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
entry_len += 2; /* \n + terminator */
- spin_lock_irqsave(&card->ip_lock, flags);
- list_for_each_entry(ipaddr, &card->ip_list, entry) {
+ spin_lock_bh(&card->ip_lock);
+ hash_for_each_safe(card->ip_htable, i, tmp, ipaddr, hnode) {
if (ipaddr->proto != proto)
continue;
if (ipaddr->type != QETH_IP_TYPE_VIPA)
@@ -711,7 +714,7 @@ static ssize_t qeth_l3_dev_vipa_add_show(char *buf, struct qeth_card *card,
addr_str);
i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str);
}
- spin_unlock_irqrestore(&card->ip_lock, flags);
+ spin_unlock_bh(&card->ip_lock);
i += snprintf(buf + i, PAGE_SIZE - i, "\n");
return i;
@@ -851,15 +854,15 @@ static ssize_t qeth_l3_dev_rxip_add_show(char *buf, struct qeth_card *card,
enum qeth_prot_versions proto)
{
struct qeth_ipaddr *ipaddr;
+ struct hlist_node *tmp;
char addr_str[40];
int entry_len; /* length of 1 entry string, differs between v4 and v6 */
- unsigned long flags;
int i = 0;
entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
entry_len += 2; /* \n + terminator */
- spin_lock_irqsave(&card->ip_lock, flags);
- list_for_each_entry(ipaddr, &card->ip_list, entry) {
+ spin_lock_bh(&card->ip_lock);
+ hash_for_each_safe(card->ip_htable, i, tmp, ipaddr, hnode) {
if (ipaddr->proto != proto)
continue;
if (ipaddr->type != QETH_IP_TYPE_RXIP)
@@ -873,7 +876,7 @@ static ssize_t qeth_l3_dev_rxip_add_show(char *buf, struct qeth_card *card,
addr_str);
i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str);
}
- spin_unlock_irqrestore(&card->ip_lock, flags);
+ spin_unlock_bh(&card->ip_lock);
i += snprintf(buf + i, PAGE_SIZE - i, "\n");
return i;
diff --git a/drivers/s390/virtio/Makefile b/drivers/s390/virtio/Makefile
index 241891a57..df40692a9 100644
--- a/drivers/s390/virtio/Makefile
+++ b/drivers/s390/virtio/Makefile
@@ -6,4 +6,8 @@
# it under the terms of the GNU General Public License (version 2 only)
# as published by the Free Software Foundation.
-obj-$(CONFIG_S390_GUEST) += kvm_virtio.o virtio_ccw.o
+s390-virtio-objs := virtio_ccw.o
+ifdef CONFIG_S390_GUEST_OLD_TRANSPORT
+s390-virtio-objs += kvm_virtio.o
+endif
+obj-$(CONFIG_S390_GUEST) += $(s390-virtio-objs)
diff --git a/drivers/s390/virtio/kvm_virtio.c b/drivers/s390/virtio/kvm_virtio.c
index 1d060fd29..5e5c11f37 100644
--- a/drivers/s390/virtio/kvm_virtio.c
+++ b/drivers/s390/virtio/kvm_virtio.c
@@ -458,6 +458,8 @@ static int __init kvm_devices_init(void)
if (test_devices_support(total_memory_size) < 0)
return -ENODEV;
+ pr_warn("The s390-virtio transport is deprecated. Please switch to a modern host providing virtio-ccw.\n");
+
rc = vmem_add_mapping(total_memory_size, PAGE_SIZE);
if (rc)
return rc;
@@ -482,7 +484,7 @@ static int __init kvm_devices_init(void)
}
/* code for early console output with virtio_console */
-static __init int early_put_chars(u32 vtermno, const char *buf, int count)
+static int early_put_chars(u32 vtermno, const char *buf, int count)
{
char scratch[17];
unsigned int len = count;
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 3ddc85e6e..95e32a47f 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -1120,9 +1120,9 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
"reselection is tag %d, slot %p(%d)\n",
hostdata->msgin[2], slot, slot->tag);
} else {
- struct scsi_cmnd *SCp;
+ struct NCR_700_Device_Parameters *p = SDp->hostdata;
+ struct scsi_cmnd *SCp = p->current_cmnd;
- SCp = SDp->current_cmnd;
if(unlikely(SCp == NULL)) {
sdev_printk(KERN_ERR, SDp,
"no saved request for untagged cmd\n");
@@ -1825,9 +1825,11 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)
CDEBUG(KERN_DEBUG, SCp, "sending out tag %d, slot %p\n",
slot->tag, slot);
} else {
+ struct NCR_700_Device_Parameters *p = SCp->device->hostdata;
+
slot->tag = SCSI_NO_TAG;
/* save current command for reselection */
- SCp->device->current_cmnd = SCp;
+ p->current_cmnd = SCp;
}
/* sanity check: some of the commands generated by the mid-layer
* have an eccentric idea of their sc_data_direction */
@@ -1892,7 +1894,7 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)
slot->SG[i].ins = bS_to_host(SCRIPT_RETURN);
slot->SG[i].pAddr = 0;
dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
- DEBUG((" SETTING %08lx to %x\n",
+ DEBUG((" SETTING %p to %x\n",
(&slot->pSG[i].ins),
slot->SG[i].ins));
}
diff --git a/drivers/scsi/53c700.h b/drivers/scsi/53c700.h
index e06bdfeab..f34c916b9 100644
--- a/drivers/scsi/53c700.h
+++ b/drivers/scsi/53c700.h
@@ -82,6 +82,7 @@ struct NCR_700_Device_Parameters {
* cmnd[1], this could be in static storage */
unsigned char cmnd[MAX_COMMAND_SIZE];
__u8 depth;
+ struct scsi_cmnd *current_cmnd; /* currently active command */
};
@@ -423,23 +424,25 @@ struct NCR_700_Host_Parameters {
#define script_patch_32(dev, script, symbol, value) \
{ \
int i; \
+ dma_addr_t da = value; \
for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \
- __u32 val = bS_to_cpu((script)[A_##symbol##_used[i]]) + value; \
+ __u32 val = bS_to_cpu((script)[A_##symbol##_used[i]]) + da; \
(script)[A_##symbol##_used[i]] = bS_to_host(val); \
dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
- DEBUG((" script, patching %s at %d to 0x%lx\n", \
- #symbol, A_##symbol##_used[i], (value))); \
+ DEBUG((" script, patching %s at %d to %pad\n", \
+ #symbol, A_##symbol##_used[i], &da)); \
} \
}
#define script_patch_32_abs(dev, script, symbol, value) \
{ \
int i; \
+ dma_addr_t da = value; \
for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \
- (script)[A_##symbol##_used[i]] = bS_to_host(value); \
+ (script)[A_##symbol##_used[i]] = bS_to_host(da); \
dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
- DEBUG((" script, patching %s at %d to 0x%lx\n", \
- #symbol, A_##symbol##_used[i], (value))); \
+ DEBUG((" script, patching %s at %d to %pad\n", \
+ #symbol, A_##symbol##_used[i], &da)); \
} \
}
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 98e5d51a3..7d1b4317e 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -838,6 +838,23 @@ config SCSI_IBMVSCSI
To compile this driver as a module, choose M here: the
module will be called ibmvscsi.
+config SCSI_IBMVSCSIS
+ tristate "IBM Virtual SCSI Server support"
+ depends on PPC_PSERIES && TARGET_CORE && SCSI && PCI
+ help
+ This is the IBM POWER Virtual SCSI Target Server
+ This driver uses the SRP protocol for communication betwen servers
+ guest and/or the host that run on the same server.
+ More information on VSCSI protocol can be found at www.power.org
+
+ The userspace configuration needed to initialize the driver can be
+ be found here:
+
+ https://github.com/powervm/ibmvscsis/wiki/Configuration
+
+ To compile this driver as a module, choose M here: the
+ module will be called ibmvscsis.
+
config SCSI_IBMVFC
tristate "IBM Virtual FC support"
depends on PPC_PSERIES && SCSI
@@ -1433,7 +1450,7 @@ config SCSI_U14_34F_MAX_TAGS
config SCSI_ULTRASTOR
tristate "UltraStor SCSI support"
- depends on X86 && ISA && SCSI
+ depends on X86 && ISA && SCSI && ISA_DMA_API
---help---
This is support for the UltraStor 14F, 24F and 34F SCSI-2 host
adapter family. This driver is explained in section 3.12 of the
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 862ab4efa..d5397987e 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -128,6 +128,7 @@ obj-$(CONFIG_SCSI_SNI_53C710) += 53c700.o sni_53c710.o
obj-$(CONFIG_SCSI_NSP32) += nsp32.o
obj-$(CONFIG_SCSI_IPR) += ipr.o
obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/
+obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi_tgt/
obj-$(CONFIG_SCSI_IBMVFC) += ibmvscsi/
obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o
obj-$(CONFIG_SCSI_STEX) += stex.o
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index c424e8bc2..5648b715f 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -644,15 +644,14 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
}
} else {
struct user_sgmap* usg;
- usg = kmalloc(actual_fibsize - sizeof(struct aac_srb)
- + sizeof(struct sgmap), GFP_KERNEL);
+ usg = kmemdup(upsg,
+ actual_fibsize - sizeof(struct aac_srb)
+ + sizeof(struct sgmap), GFP_KERNEL);
if (!usg) {
dprintk((KERN_DEBUG"aacraid: Allocation error in Raw SRB command\n"));
rcode = -ENOMEM;
goto cleanup;
}
- memcpy (usg, upsg, actual_fibsize - sizeof(struct aac_srb)
- + sizeof(struct sgmap));
actual_fibsize = actual_fibsize64;
for (i = 0; i < usg->count; i++) {
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index a18819939..a5052dd8d 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -57,7 +57,7 @@ static struct scsi_host_template bnx2fc_shost_template;
static struct fc_function_template bnx2fc_transport_function;
static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ;
static struct fc_function_template bnx2fc_vport_xport_function;
-static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode);
+static int bnx2fc_create(struct net_device *netdev, enum fip_mode fip_mode);
static void __bnx2fc_destroy(struct bnx2fc_interface *interface);
static int bnx2fc_destroy(struct net_device *net_device);
static int bnx2fc_enable(struct net_device *netdev);
@@ -486,7 +486,7 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
__skb_queue_tail(&bg->fcoe_rx_list, skb);
if (bg->fcoe_rx_list.qlen == 1)
- wake_up_process(bg->thread);
+ wake_up_process(bg->kthread);
spin_unlock(&bg->fcoe_rx_list.lock);
@@ -2260,7 +2260,7 @@ enum bnx2fc_create_link_state {
* Returns: 0 for success
*/
static int _bnx2fc_create(struct net_device *netdev,
- enum fip_state fip_mode,
+ enum fip_mode fip_mode,
enum bnx2fc_create_link_state link_state)
{
struct fcoe_ctlr_device *cdev;
@@ -2412,7 +2412,7 @@ mod_err:
*
* Returns: 0 for success
*/
-static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
+static int bnx2fc_create(struct net_device *netdev, enum fip_mode fip_mode)
{
return _bnx2fc_create(netdev, fip_mode, BNX2FC_CREATE_LINK_UP);
}
@@ -2715,7 +2715,7 @@ static int __init bnx2fc_mod_init(void)
}
wake_up_process(l2_thread);
spin_lock_bh(&bg->fcoe_rx_list.lock);
- bg->thread = l2_thread;
+ bg->kthread = l2_thread;
spin_unlock_bh(&bg->fcoe_rx_list.lock);
for_each_possible_cpu(cpu) {
@@ -2788,8 +2788,8 @@ static void __exit bnx2fc_mod_exit(void)
/* Destroy global thread */
bg = &bnx2fc_global;
spin_lock_bh(&bg->fcoe_rx_list.lock);
- l2_thread = bg->thread;
- bg->thread = NULL;
+ l2_thread = bg->kthread;
+ bg->kthread = NULL;
while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL)
kfree_skb(skb);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 026f394a3..8f24d60f0 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1758,7 +1758,7 @@ static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) {
/* Only for task management function */
io_req->fcp_rsp_code = rq_data[3];
- printk(KERN_ERR PFX "fcp_rsp_code = %d\n",
+ BNX2FC_IO_DBG(io_req, "fcp_rsp_code = %d\n",
io_req->fcp_rsp_code);
}
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index fb072cc5e..42921dbba 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -2417,7 +2417,7 @@ static void bnx2i_process_conn_destroy_cmpl(struct bnx2i_hba *hba,
ep = bnx2i_find_ep_in_destroy_list(hba, conn_destroy->iscsi_conn_id);
if (!ep) {
printk(KERN_ALERT "bnx2i_conn_destroy_cmpl: no pending "
- "offload request, unexpected complection\n");
+ "offload request, unexpected completion\n");
return;
}
diff --git a/drivers/scsi/cxgbi/Makefile b/drivers/scsi/cxgbi/Makefile
index 86007e344..a73781ac1 100644
--- a/drivers/scsi/cxgbi/Makefile
+++ b/drivers/scsi/cxgbi/Makefile
@@ -1,2 +1,4 @@
+ccflags-y += -Idrivers/net/ethernet/chelsio/libcxgb
+
obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libcxgbi.o cxgb3i/
obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libcxgbi.o cxgb4i/
diff --git a/drivers/scsi/cxgbi/cxgb3i/Kbuild b/drivers/scsi/cxgbi/cxgb3i/Kbuild
index 961a12f6d..663c52e05 100644
--- a/drivers/scsi/cxgbi/cxgb3i/Kbuild
+++ b/drivers/scsi/cxgbi/cxgb3i/Kbuild
@@ -1,3 +1,4 @@
ccflags-y += -I$(srctree)/drivers/net/ethernet/chelsio/cxgb3
+ccflags-y += -I$(srctree)/drivers/net/ethernet/chelsio/libcxgb
obj-$(CONFIG_SCSI_CXGB3_ISCSI) += cxgb3i.o
diff --git a/drivers/scsi/cxgbi/cxgb3i/Kconfig b/drivers/scsi/cxgbi/cxgb3i/Kconfig
index e4603985d..f68c871b1 100644
--- a/drivers/scsi/cxgbi/cxgb3i/Kconfig
+++ b/drivers/scsi/cxgbi/cxgb3i/Kconfig
@@ -5,6 +5,7 @@ config SCSI_CXGB3_ISCSI
select ETHERNET
select NET_VENDOR_CHELSIO
select CHELSIO_T3
+ select CHELSIO_LIB
select SCSI_ISCSI_ATTRS
---help---
This driver supports iSCSI offload for the Chelsio T3 devices.
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index e22a268fd..33e83464e 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -1028,7 +1028,7 @@ cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS] = {
* cxgb3i_ofld_init - allocate and initialize resources for each adapter found
* @cdev: cxgbi adapter
*/
-int cxgb3i_ofld_init(struct cxgbi_device *cdev)
+static int cxgb3i_ofld_init(struct cxgbi_device *cdev)
{
struct t3cdev *t3dev = (struct t3cdev *)cdev->lldev;
struct adap_ports port;
@@ -1076,64 +1076,69 @@ static inline void ulp_mem_io_set_hdr(struct sk_buff *skb, unsigned int addr)
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS));
req->cmd_lock_addr = htonl(V_ULP_MEMIO_ADDR(addr >> 5) |
V_ULPTX_CMD(ULP_MEM_WRITE));
- req->len = htonl(V_ULP_MEMIO_DATA_LEN(PPOD_SIZE >> 5) |
- V_ULPTX_NFLITS((PPOD_SIZE >> 3) + 1));
+ req->len = htonl(V_ULP_MEMIO_DATA_LEN(IPPOD_SIZE >> 5) |
+ V_ULPTX_NFLITS((IPPOD_SIZE >> 3) + 1));
}
-static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr,
- unsigned int idx, unsigned int npods,
- struct cxgbi_gather_list *gl)
+static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev)
{
- struct cxgbi_device *cdev = csk->cdev;
- struct cxgbi_ddp_info *ddp = cdev->ddp;
- unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit;
- int i;
+ return ((struct t3cdev *)cdev->lldev)->ulp_iscsi;
+}
- log_debug(1 << CXGBI_DBG_DDP,
- "csk 0x%p, idx %u, npods %u, gl 0x%p.\n",
- csk, idx, npods, gl);
+static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk,
+ struct cxgbi_task_tag_info *ttinfo)
+{
+ unsigned int idx = ttinfo->idx;
+ unsigned int npods = ttinfo->npods;
+ struct scatterlist *sg = ttinfo->sgl;
+ struct cxgbi_pagepod *ppod;
+ struct ulp_mem_io *req;
+ unsigned int sg_off;
+ unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit;
+ int i;
- for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
+ for (i = 0; i < npods; i++, idx++, pm_addr += IPPOD_SIZE) {
struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) +
- PPOD_SIZE, 0, GFP_ATOMIC);
+ IPPOD_SIZE, 0, GFP_ATOMIC);
if (!skb)
return -ENOMEM;
-
ulp_mem_io_set_hdr(skb, pm_addr);
- cxgbi_ddp_ppod_set((struct cxgbi_pagepod *)(skb->head +
- sizeof(struct ulp_mem_io)),
- hdr, gl, i * PPOD_PAGES_MAX);
+ req = (struct ulp_mem_io *)skb->head;
+ ppod = (struct cxgbi_pagepod *)(req + 1);
+ sg_off = i * PPOD_PAGES_MAX;
+ cxgbi_ddp_set_one_ppod(ppod, ttinfo, &sg,
+ &sg_off);
skb->priority = CPL_PRIORITY_CONTROL;
- cxgb3_ofld_send(cdev->lldev, skb);
+ cxgb3_ofld_send(ppm->lldev, skb);
}
return 0;
}
-static void ddp_clear_map(struct cxgbi_hba *chba, unsigned int tag,
- unsigned int idx, unsigned int npods)
+static void ddp_clear_map(struct cxgbi_device *cdev, struct cxgbi_ppm *ppm,
+ struct cxgbi_task_tag_info *ttinfo)
{
- struct cxgbi_device *cdev = chba->cdev;
- struct cxgbi_ddp_info *ddp = cdev->ddp;
- unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit;
+ unsigned int idx = ttinfo->idx;
+ unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit;
+ unsigned int npods = ttinfo->npods;
int i;
log_debug(1 << CXGBI_DBG_DDP,
- "cdev 0x%p, idx %u, npods %u, tag 0x%x.\n",
- cdev, idx, npods, tag);
+ "cdev 0x%p, clear idx %u, npods %u.\n",
+ cdev, idx, npods);
- for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
+ for (i = 0; i < npods; i++, idx++, pm_addr += IPPOD_SIZE) {
struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) +
- PPOD_SIZE, 0, GFP_ATOMIC);
+ IPPOD_SIZE, 0, GFP_ATOMIC);
if (!skb) {
- pr_err("tag 0x%x, 0x%x, %d/%u, skb OOM.\n",
- tag, idx, i, npods);
+ pr_err("cdev 0x%p, clear ddp, %u,%d/%u, skb OOM.\n",
+ cdev, idx, i, npods);
continue;
}
ulp_mem_io_set_hdr(skb, pm_addr);
skb->priority = CPL_PRIORITY_CONTROL;
- cxgb3_ofld_send(cdev->lldev, skb);
+ cxgb3_ofld_send(ppm->lldev, skb);
}
}
@@ -1203,82 +1208,68 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
}
/**
- * t3_ddp_cleanup - release the cxgb3 adapter's ddp resource
- * @cdev: cxgb3i adapter
- * release all the resource held by the ddp pagepod manager for a given
- * adapter if needed
- */
-
-static void t3_ddp_cleanup(struct cxgbi_device *cdev)
-{
- struct t3cdev *tdev = (struct t3cdev *)cdev->lldev;
-
- if (cxgbi_ddp_cleanup(cdev)) {
- pr_info("t3dev 0x%p, ulp_iscsi no more user.\n", tdev);
- tdev->ulp_iscsi = NULL;
- }
-}
-
-/**
- * ddp_init - initialize the cxgb3 adapter's ddp resource
+ * cxgb3i_ddp_init - initialize the cxgb3 adapter's ddp resource
* @cdev: cxgb3i adapter
* initialize the ddp pagepod manager for a given adapter
*/
static int cxgb3i_ddp_init(struct cxgbi_device *cdev)
{
struct t3cdev *tdev = (struct t3cdev *)cdev->lldev;
- struct cxgbi_ddp_info *ddp = tdev->ulp_iscsi;
+ struct net_device *ndev = cdev->ports[0];
+ struct cxgbi_tag_format tformat;
+ unsigned int ppmax, tagmask = 0;
struct ulp_iscsi_info uinfo;
- unsigned int pgsz_factor[4];
int i, err;
- if (ddp) {
- kref_get(&ddp->refcnt);
- pr_warn("t3dev 0x%p, ddp 0x%p already set up.\n",
- tdev, tdev->ulp_iscsi);
- cdev->ddp = ddp;
- return -EALREADY;
- }
-
err = tdev->ctl(tdev, ULP_ISCSI_GET_PARAMS, &uinfo);
if (err < 0) {
- pr_err("%s, failed to get iscsi param err=%d.\n",
- tdev->name, err);
+ pr_err("%s, failed to get iscsi param %d.\n",
+ ndev->name, err);
return err;
}
+ if (uinfo.llimit >= uinfo.ulimit) {
+ pr_warn("T3 %s, iscsi NOT enabled %u ~ %u!\n",
+ ndev->name, uinfo.llimit, uinfo.ulimit);
+ return -EACCES;
+ }
- err = cxgbi_ddp_init(cdev, uinfo.llimit, uinfo.ulimit,
- uinfo.max_txsz, uinfo.max_rxsz);
- if (err < 0)
- return err;
+ ppmax = (uinfo.ulimit - uinfo.llimit + 1) >> PPOD_SIZE_SHIFT;
+ tagmask = cxgbi_tagmask_set(ppmax);
- ddp = cdev->ddp;
+ pr_info("T3 %s: 0x%x~0x%x, 0x%x, tagmask 0x%x -> 0x%x.\n",
+ ndev->name, uinfo.llimit, uinfo.ulimit, ppmax, uinfo.tagmask,
+ tagmask);
- uinfo.tagmask = ddp->idx_mask << PPOD_IDX_SHIFT;
- cxgbi_ddp_page_size_factor(pgsz_factor);
+ memset(&tformat, 0, sizeof(struct cxgbi_tag_format));
for (i = 0; i < 4; i++)
- uinfo.pgsz_factor[i] = pgsz_factor[i];
- uinfo.ulimit = uinfo.llimit + (ddp->nppods << PPOD_SIZE_SHIFT);
+ tformat.pgsz_order[i] = uinfo.pgsz_factor[i];
+ cxgbi_tagmask_check(tagmask, &tformat);
- err = tdev->ctl(tdev, ULP_ISCSI_SET_PARAMS, &uinfo);
- if (err < 0) {
- pr_warn("%s unable to set iscsi param err=%d, ddp disabled.\n",
- tdev->name, err);
- cxgbi_ddp_cleanup(cdev);
- return err;
+ cxgbi_ddp_ppm_setup(&tdev->ulp_iscsi, cdev, &tformat, ppmax,
+ uinfo.llimit, uinfo.llimit, 0);
+ if (!(cdev->flags & CXGBI_FLAG_DDP_OFF)) {
+ uinfo.tagmask = tagmask;
+ uinfo.ulimit = uinfo.llimit + (ppmax << PPOD_SIZE_SHIFT);
+
+ err = tdev->ctl(tdev, ULP_ISCSI_SET_PARAMS, &uinfo);
+ if (err < 0) {
+ pr_err("T3 %s fail to set iscsi param %d.\n",
+ ndev->name, err);
+ cdev->flags |= CXGBI_FLAG_DDP_OFF;
+ }
+ err = 0;
}
- tdev->ulp_iscsi = ddp;
cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
- cdev->csk_ddp_set = ddp_set_map;
- cdev->csk_ddp_clear = ddp_clear_map;
-
- pr_info("tdev 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u, "
- "%u/%u.\n",
- tdev, ddp->nppods, ddp->idx_bits, ddp->idx_mask,
- ddp->rsvd_tag_mask, ddp->max_txsz, uinfo.max_txsz,
- ddp->max_rxsz, uinfo.max_rxsz);
+ cdev->csk_ddp_set_map = ddp_set_map;
+ cdev->csk_ddp_clear_map = ddp_clear_map;
+ cdev->cdev2ppm = cdev2ppm;
+ cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
+ uinfo.max_txsz - ISCSI_PDU_NONPAYLOAD_LEN);
+ cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
+ uinfo.max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN);
+
return 0;
}
@@ -1325,7 +1316,6 @@ static void cxgb3i_dev_open(struct t3cdev *t3dev)
cdev->rx_credit_thres = cxgb3i_rx_credit_thres;
cdev->skb_tx_rsvd = CXGB3I_TX_HEADER_LEN;
cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr_norss);
- cdev->dev_ddp_cleanup = t3_ddp_cleanup;
cdev->itp = &cxgb3i_iscsi_transport;
err = cxgb3i_ddp_init(cdev);
diff --git a/drivers/scsi/cxgbi/cxgb4i/Kbuild b/drivers/scsi/cxgbi/cxgb4i/Kbuild
index 374586437..38e03c280 100644
--- a/drivers/scsi/cxgbi/cxgb4i/Kbuild
+++ b/drivers/scsi/cxgbi/cxgb4i/Kbuild
@@ -1,3 +1,4 @@
ccflags-y += -I$(srctree)/drivers/net/ethernet/chelsio/cxgb4
+ccflags-y += -I$(srctree)/drivers/net/ethernet/chelsio/libcxgb
obj-$(CONFIG_SCSI_CXGB4_ISCSI) += cxgb4i.o
diff --git a/drivers/scsi/cxgbi/cxgb4i/Kconfig b/drivers/scsi/cxgbi/cxgb4i/Kconfig
index 8c4e42303..594f593c8 100644
--- a/drivers/scsi/cxgbi/cxgb4i/Kconfig
+++ b/drivers/scsi/cxgbi/cxgb4i/Kconfig
@@ -5,6 +5,7 @@ config SCSI_CXGB4_ISCSI
select ETHERNET
select NET_VENDOR_CHELSIO
select CHELSIO_T4
+ select CHELSIO_LIB
select SCSI_ISCSI_ATTRS
---help---
This driver supports iSCSI offload for the Chelsio T4 devices.
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 339f6b7f4..e4ba2d261 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1503,7 +1503,7 @@ rel_resource_without_clip:
return -EINVAL;
}
-cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = {
+static cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = {
[CPL_ACT_ESTABLISH] = do_act_establish,
[CPL_ACT_OPEN_RPL] = do_act_open_rpl,
[CPL_PEER_CLOSE] = do_peer_close,
@@ -1519,7 +1519,7 @@ cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = {
[CPL_RX_DATA] = do_rx_data,
};
-int cxgb4i_ofld_init(struct cxgbi_device *cdev)
+static int cxgb4i_ofld_init(struct cxgbi_device *cdev)
{
int rc;
@@ -1543,24 +1543,22 @@ int cxgb4i_ofld_init(struct cxgbi_device *cdev)
return 0;
}
-/*
- * functions to program the pagepod in h/w
- */
-#define ULPMEM_IDATA_MAX_NPPODS 4 /* 256/PPOD_SIZE */
-static inline void ulp_mem_io_set_hdr(struct cxgb4_lld_info *lldi,
- struct ulp_mem_io *req,
- unsigned int wr_len, unsigned int dlen,
- unsigned int pm_addr)
+static inline void
+ulp_mem_io_set_hdr(struct cxgbi_device *cdev,
+ struct ulp_mem_io *req,
+ unsigned int wr_len, unsigned int dlen,
+ unsigned int pm_addr,
+ int tid)
{
+ struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
struct ulptx_idata *idata = (struct ulptx_idata *)(req + 1);
- INIT_ULPTX_WR(req, wr_len, 0, 0);
- if (is_t4(lldi->adapter_type))
- req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
- (ULP_MEMIO_ORDER_F));
- else
- req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
- (T5_ULP_MEMIO_IMM_F));
+ INIT_ULPTX_WR(req, wr_len, 0, tid);
+ req->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) |
+ FW_WR_ATOMIC_V(0));
+ req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
+ ULP_MEMIO_ORDER_V(is_t4(lldi->adapter_type)) |
+ T5_ULP_MEMIO_IMM_V(!is_t4(lldi->adapter_type)));
req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5));
req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5));
req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16));
@@ -1569,82 +1567,89 @@ static inline void ulp_mem_io_set_hdr(struct cxgb4_lld_info *lldi,
idata->len = htonl(dlen);
}
-static int ddp_ppod_write_idata(struct cxgbi_device *cdev, unsigned int port_id,
- struct cxgbi_pagepod_hdr *hdr, unsigned int idx,
- unsigned int npods,
- struct cxgbi_gather_list *gl,
- unsigned int gl_pidx)
+static struct sk_buff *
+ddp_ppod_init_idata(struct cxgbi_device *cdev,
+ struct cxgbi_ppm *ppm,
+ unsigned int idx, unsigned int npods,
+ unsigned int tid)
{
- struct cxgbi_ddp_info *ddp = cdev->ddp;
- struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
- struct sk_buff *skb;
- struct ulp_mem_io *req;
- struct ulptx_idata *idata;
- struct cxgbi_pagepod *ppod;
- unsigned int pm_addr = idx * PPOD_SIZE + ddp->llimit;
- unsigned int dlen = PPOD_SIZE * npods;
+ unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit;
+ unsigned int dlen = npods << PPOD_SIZE_SHIFT;
unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) +
sizeof(struct ulptx_idata) + dlen, 16);
- unsigned int i;
+ struct sk_buff *skb = alloc_wr(wr_len, 0, GFP_ATOMIC);
- skb = alloc_wr(wr_len, 0, GFP_ATOMIC);
if (!skb) {
- pr_err("cdev 0x%p, idx %u, npods %u, OOM.\n",
- cdev, idx, npods);
- return -ENOMEM;
+ pr_err("%s: %s idx %u, npods %u, OOM.\n",
+ __func__, ppm->ndev->name, idx, npods);
+ return NULL;
}
- req = (struct ulp_mem_io *)skb->head;
- set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
- ulp_mem_io_set_hdr(lldi, req, wr_len, dlen, pm_addr);
+ ulp_mem_io_set_hdr(cdev, (struct ulp_mem_io *)skb->head, wr_len, dlen,
+ pm_addr, tid);
+
+ return skb;
+}
+
+static int ddp_ppod_write_idata(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk,
+ struct cxgbi_task_tag_info *ttinfo,
+ unsigned int idx, unsigned int npods,
+ struct scatterlist **sg_pp,
+ unsigned int *sg_off)
+{
+ struct cxgbi_device *cdev = csk->cdev;
+ struct sk_buff *skb = ddp_ppod_init_idata(cdev, ppm, idx, npods,
+ csk->tid);
+ struct ulp_mem_io *req;
+ struct ulptx_idata *idata;
+ struct cxgbi_pagepod *ppod;
+ int i;
+
+ if (!skb)
+ return -ENOMEM;
+
+ req = (struct ulp_mem_io *)skb->head;
idata = (struct ulptx_idata *)(req + 1);
ppod = (struct cxgbi_pagepod *)(idata + 1);
- for (i = 0; i < npods; i++, ppod++, gl_pidx += PPOD_PAGES_MAX) {
- if (!hdr && !gl)
- cxgbi_ddp_ppod_clear(ppod);
- else
- cxgbi_ddp_ppod_set(ppod, hdr, gl, gl_pidx);
- }
+ for (i = 0; i < npods; i++, ppod++)
+ cxgbi_ddp_set_one_ppod(ppod, ttinfo, sg_pp, sg_off);
+
+ cxgbi_skcb_set_flag(skb, SKCBF_TX_MEM_WRITE);
+ cxgbi_skcb_set_flag(skb, SKCBF_TX_FLAG_COMPL);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
+
+ spin_lock_bh(&csk->lock);
+ cxgbi_sock_skb_entail(csk, skb);
+ spin_unlock_bh(&csk->lock);
- cxgb4_ofld_send(cdev->ports[port_id], skb);
return 0;
}
-static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr,
- unsigned int idx, unsigned int npods,
- struct cxgbi_gather_list *gl)
+static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk,
+ struct cxgbi_task_tag_info *ttinfo)
{
+ unsigned int pidx = ttinfo->idx;
+ unsigned int npods = ttinfo->npods;
unsigned int i, cnt;
int err = 0;
+ struct scatterlist *sg = ttinfo->sgl;
+ unsigned int offset = 0;
- for (i = 0; i < npods; i += cnt, idx += cnt) {
- cnt = npods - i;
- if (cnt > ULPMEM_IDATA_MAX_NPPODS)
- cnt = ULPMEM_IDATA_MAX_NPPODS;
- err = ddp_ppod_write_idata(csk->cdev, csk->port_id, hdr,
- idx, cnt, gl, 4 * i);
- if (err < 0)
- break;
- }
- return err;
-}
-
-static void ddp_clear_map(struct cxgbi_hba *chba, unsigned int tag,
- unsigned int idx, unsigned int npods)
-{
- unsigned int i, cnt;
- int err;
+ ttinfo->cid = csk->port_id;
- for (i = 0; i < npods; i += cnt, idx += cnt) {
+ for (i = 0; i < npods; i += cnt, pidx += cnt) {
cnt = npods - i;
+
if (cnt > ULPMEM_IDATA_MAX_NPPODS)
cnt = ULPMEM_IDATA_MAX_NPPODS;
- err = ddp_ppod_write_idata(chba->cdev, chba->port_id, NULL,
- idx, cnt, NULL, 0);
+ err = ddp_ppod_write_idata(ppm, csk, ttinfo, pidx, cnt,
+ &sg, &offset);
if (err < 0)
break;
}
+
+ return err;
}
static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
@@ -1710,48 +1715,46 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
return 0;
}
+static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev)
+{
+ return (struct cxgbi_ppm *)(*((struct cxgb4_lld_info *)
+ (cxgbi_cdev_priv(cdev)))->iscsi_ppm);
+}
+
static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
{
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
- struct cxgbi_ddp_info *ddp = cdev->ddp;
- unsigned int tagmask, pgsz_factor[4];
- int err;
-
- if (ddp) {
- kref_get(&ddp->refcnt);
- pr_warn("cdev 0x%p, ddp 0x%p already set up.\n",
- cdev, cdev->ddp);
- return -EALREADY;
+ struct net_device *ndev = cdev->ports[0];
+ struct cxgbi_tag_format tformat;
+ unsigned int ppmax;
+ int i;
+
+ if (!lldi->vr->iscsi.size) {
+ pr_warn("%s, iscsi NOT enabled, check config!\n", ndev->name);
+ return -EACCES;
}
- err = cxgbi_ddp_init(cdev, lldi->vr->iscsi.start,
- lldi->vr->iscsi.start + lldi->vr->iscsi.size - 1,
- lldi->iscsi_iolen, lldi->iscsi_iolen);
- if (err < 0)
- return err;
+ cdev->flags |= CXGBI_FLAG_USE_PPOD_OFLDQ;
+ ppmax = lldi->vr->iscsi.size >> PPOD_SIZE_SHIFT;
- ddp = cdev->ddp;
+ memset(&tformat, 0, sizeof(struct cxgbi_tag_format));
+ for (i = 0; i < 4; i++)
+ tformat.pgsz_order[i] = (lldi->iscsi_pgsz_order >> (i << 3))
+ & 0xF;
+ cxgbi_tagmask_check(lldi->iscsi_tagmask, &tformat);
- tagmask = ddp->idx_mask << PPOD_IDX_SHIFT;
- cxgbi_ddp_page_size_factor(pgsz_factor);
- cxgb4_iscsi_init(lldi->ports[0], tagmask, pgsz_factor);
+ cxgbi_ddp_ppm_setup(lldi->iscsi_ppm, cdev, &tformat, ppmax,
+ lldi->iscsi_llimit, lldi->vr->iscsi.start, 2);
cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
- cdev->csk_ddp_set = ddp_set_map;
- cdev->csk_ddp_clear = ddp_clear_map;
-
- pr_info("cxgb4i 0x%p tag: sw %u, rsvd %u,%u, mask 0x%x.\n",
- cdev, cdev->tag_format.sw_bits, cdev->tag_format.rsvd_bits,
- cdev->tag_format.rsvd_shift, cdev->tag_format.rsvd_mask);
- pr_info("cxgb4i 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u, "
- " %u/%u.\n",
- cdev, ddp->nppods, ddp->idx_bits, ddp->idx_mask,
- ddp->rsvd_tag_mask, ddp->max_txsz, lldi->iscsi_iolen,
- ddp->max_rxsz, lldi->iscsi_iolen);
- pr_info("cxgb4i 0x%p max payload size: %u/%u, %u/%u.\n",
- cdev, cdev->tx_max_size, ddp->max_txsz, cdev->rx_max_size,
- ddp->max_rxsz);
+ cdev->csk_ddp_set_map = ddp_set_map;
+ cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
+ lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN);
+ cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
+ lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN);
+ cdev->cdev2ppm = cdev2ppm;
+
return 0;
}
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index ead83a24b..d1421139e 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -64,6 +64,14 @@ static DEFINE_MUTEX(cdev_mutex);
static LIST_HEAD(cdev_rcu_list);
static DEFINE_SPINLOCK(cdev_rcu_lock);
+static inline void cxgbi_decode_sw_tag(u32 sw_tag, int *idx, int *age)
+{
+ if (age)
+ *age = sw_tag & 0x7FFF;
+ if (idx)
+ *idx = (sw_tag >> 16) & 0x7FFF;
+}
+
int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base,
unsigned int max_conn)
{
@@ -113,12 +121,7 @@ static inline void cxgbi_device_destroy(struct cxgbi_device *cdev)
"cdev 0x%p, p# %u.\n", cdev, cdev->nports);
cxgbi_hbas_remove(cdev);
cxgbi_device_portmap_cleanup(cdev);
- if (cdev->dev_ddp_cleanup)
- cdev->dev_ddp_cleanup(cdev);
- else
- cxgbi_ddp_cleanup(cdev);
- if (cdev->ddp)
- cxgbi_ddp_cleanup(cdev);
+ cxgbi_ppm_release(cdev->cdev2ppm(cdev));
if (cdev->pmap.max_connect)
cxgbi_free_big_mem(cdev->pmap.port_csk);
kfree(cdev);
@@ -1182,525 +1185,245 @@ out_err:
goto done;
}
-/*
- * Direct Data Placement -
- * Directly place the iSCSI Data-In or Data-Out PDU's payload into pre-posted
- * final destination host-memory buffers based on the Initiator Task Tag (ITT)
- * in Data-In or Target Task Tag (TTT) in Data-Out PDUs.
- * The host memory address is programmed into h/w in the format of pagepod
- * entries.
- * The location of the pagepod entry is encoded into ddp tag which is used as
- * the base for ITT/TTT.
- */
-
-static unsigned char ddp_page_order[DDP_PGIDX_MAX] = {0, 1, 2, 4};
-static unsigned char ddp_page_shift[DDP_PGIDX_MAX] = {12, 13, 14, 16};
-static unsigned char page_idx = DDP_PGIDX_MAX;
-
-static unsigned char sw_tag_idx_bits;
-static unsigned char sw_tag_age_bits;
-
-/*
- * Direct-Data Placement page size adjustment
- */
-static int ddp_adjust_page_table(void)
+static inline void
+scmd_get_params(struct scsi_cmnd *sc, struct scatterlist **sgl,
+ unsigned int *sgcnt, unsigned int *dlen,
+ unsigned int prot)
{
- int i;
- unsigned int base_order, order;
-
- if (PAGE_SIZE < (1UL << ddp_page_shift[0])) {
- pr_info("PAGE_SIZE 0x%lx too small, min 0x%lx\n",
- PAGE_SIZE, 1UL << ddp_page_shift[0]);
- return -EINVAL;
- }
-
- base_order = get_order(1UL << ddp_page_shift[0]);
- order = get_order(1UL << PAGE_SHIFT);
+ struct scsi_data_buffer *sdb = prot ? scsi_prot(sc) : scsi_out(sc);
- for (i = 0; i < DDP_PGIDX_MAX; i++) {
- /* first is the kernel page size, then just doubling */
- ddp_page_order[i] = order - base_order + i;
- ddp_page_shift[i] = PAGE_SHIFT + i;
- }
- return 0;
+ *sgl = sdb->table.sgl;
+ *sgcnt = sdb->table.nents;
+ *dlen = sdb->length;
+ /* Caution: for protection sdb, sdb->length is invalid */
}
-static int ddp_find_page_index(unsigned long pgsz)
+void cxgbi_ddp_set_one_ppod(struct cxgbi_pagepod *ppod,
+ struct cxgbi_task_tag_info *ttinfo,
+ struct scatterlist **sg_pp, unsigned int *sg_off)
{
+ struct scatterlist *sg = sg_pp ? *sg_pp : NULL;
+ unsigned int offset = sg_off ? *sg_off : 0;
+ dma_addr_t addr = 0UL;
+ unsigned int len = 0;
int i;
- for (i = 0; i < DDP_PGIDX_MAX; i++) {
- if (pgsz == (1UL << ddp_page_shift[i]))
- return i;
- }
- pr_info("ddp page size %lu not supported.\n", pgsz);
- return DDP_PGIDX_MAX;
-}
+ memcpy(ppod, &ttinfo->hdr, sizeof(struct cxgbi_pagepod_hdr));
-static void ddp_setup_host_page_size(void)
-{
- if (page_idx == DDP_PGIDX_MAX) {
- page_idx = ddp_find_page_index(PAGE_SIZE);
+ if (sg) {
+ addr = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+ }
- if (page_idx == DDP_PGIDX_MAX) {
- pr_info("system PAGE %lu, update hw.\n", PAGE_SIZE);
- if (ddp_adjust_page_table() < 0) {
- pr_info("PAGE %lu, disable ddp.\n", PAGE_SIZE);
- return;
+ for (i = 0; i < PPOD_PAGES_MAX; i++) {
+ if (sg) {
+ ppod->addr[i] = cpu_to_be64(addr + offset);
+ offset += PAGE_SIZE;
+ if (offset == (len + sg->offset)) {
+ offset = 0;
+ sg = sg_next(sg);
+ if (sg) {
+ addr = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+ }
}
- page_idx = ddp_find_page_index(PAGE_SIZE);
+ } else {
+ ppod->addr[i] = 0ULL;
}
- pr_info("system PAGE %lu, ddp idx %u.\n", PAGE_SIZE, page_idx);
}
-}
-
-void cxgbi_ddp_page_size_factor(int *pgsz_factor)
-{
- int i;
-
- for (i = 0; i < DDP_PGIDX_MAX; i++)
- pgsz_factor[i] = ddp_page_order[i];
-}
-EXPORT_SYMBOL_GPL(cxgbi_ddp_page_size_factor);
-
-/*
- * DDP setup & teardown
- */
-
-void cxgbi_ddp_ppod_set(struct cxgbi_pagepod *ppod,
- struct cxgbi_pagepod_hdr *hdr,
- struct cxgbi_gather_list *gl, unsigned int gidx)
-{
- int i;
-
- memcpy(ppod, hdr, sizeof(*hdr));
- for (i = 0; i < (PPOD_PAGES_MAX + 1); i++, gidx++) {
- ppod->addr[i] = gidx < gl->nelem ?
- cpu_to_be64(gl->phys_addr[gidx]) : 0ULL;
- }
-}
-EXPORT_SYMBOL_GPL(cxgbi_ddp_ppod_set);
-
-void cxgbi_ddp_ppod_clear(struct cxgbi_pagepod *ppod)
-{
- memset(ppod, 0, sizeof(*ppod));
-}
-EXPORT_SYMBOL_GPL(cxgbi_ddp_ppod_clear);
-
-static inline int ddp_find_unused_entries(struct cxgbi_ddp_info *ddp,
- unsigned int start, unsigned int max,
- unsigned int count,
- struct cxgbi_gather_list *gl)
-{
- unsigned int i, j, k;
- /* not enough entries */
- if ((max - start) < count) {
- log_debug(1 << CXGBI_DBG_DDP,
- "NOT enough entries %u+%u < %u.\n", start, count, max);
- return -EBUSY;
+ /*
+ * the fifth address needs to be repeated in the next ppod, so do
+ * not move sg
+ */
+ if (sg_pp) {
+ *sg_pp = sg;
+ *sg_off = offset;
}
- max -= count;
- spin_lock(&ddp->map_lock);
- for (i = start; i < max;) {
- for (j = 0, k = i; j < count; j++, k++) {
- if (ddp->gl_map[k])
- break;
- }
- if (j == count) {
- for (j = 0, k = i; j < count; j++, k++)
- ddp->gl_map[k] = gl;
- spin_unlock(&ddp->map_lock);
- return i;
+ if (offset == len) {
+ offset = 0;
+ sg = sg_next(sg);
+ if (sg) {
+ addr = sg_dma_address(sg);
+ len = sg_dma_len(sg);
}
- i += j + 1;
}
- spin_unlock(&ddp->map_lock);
- log_debug(1 << CXGBI_DBG_DDP,
- "NO suitable entries %u available.\n", count);
- return -EBUSY;
-}
-
-static inline void ddp_unmark_entries(struct cxgbi_ddp_info *ddp,
- int start, int count)
-{
- spin_lock(&ddp->map_lock);
- memset(&ddp->gl_map[start], 0,
- count * sizeof(struct cxgbi_gather_list *));
- spin_unlock(&ddp->map_lock);
+ ppod->addr[i] = sg ? cpu_to_be64(addr + offset) : 0ULL;
}
+EXPORT_SYMBOL_GPL(cxgbi_ddp_set_one_ppod);
-static inline void ddp_gl_unmap(struct pci_dev *pdev,
- struct cxgbi_gather_list *gl)
-{
- int i;
+/*
+ * APIs interacting with open-iscsi libraries
+ */
- for (i = 0; i < gl->nelem; i++)
- dma_unmap_page(&pdev->dev, gl->phys_addr[i], PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
-}
+static unsigned char padding[4];
-static inline int ddp_gl_map(struct pci_dev *pdev,
- struct cxgbi_gather_list *gl)
+void cxgbi_ddp_ppm_setup(void **ppm_pp, struct cxgbi_device *cdev,
+ struct cxgbi_tag_format *tformat, unsigned int ppmax,
+ unsigned int llimit, unsigned int start,
+ unsigned int rsvd_factor)
{
- int i;
+ int err = cxgbi_ppm_init(ppm_pp, cdev->ports[0], cdev->pdev,
+ cdev->lldev, tformat, ppmax, llimit, start,
+ rsvd_factor);
- for (i = 0; i < gl->nelem; i++) {
- gl->phys_addr[i] = dma_map_page(&pdev->dev, gl->pages[i], 0,
- PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
- if (unlikely(dma_mapping_error(&pdev->dev, gl->phys_addr[i]))) {
- log_debug(1 << CXGBI_DBG_DDP,
- "page %d 0x%p, 0x%p dma mapping err.\n",
- i, gl->pages[i], pdev);
- goto unmap;
- }
- }
- return i;
-unmap:
- if (i) {
- unsigned int nelem = gl->nelem;
+ if (err >= 0) {
+ struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*ppm_pp);
- gl->nelem = i;
- ddp_gl_unmap(pdev, gl);
- gl->nelem = nelem;
+ if (ppm->ppmax < 1024 ||
+ ppm->tformat.pgsz_idx_dflt >= DDP_PGIDX_MAX)
+ cdev->flags |= CXGBI_FLAG_DDP_OFF;
+ err = 0;
+ } else {
+ cdev->flags |= CXGBI_FLAG_DDP_OFF;
}
- return -EINVAL;
-}
-
-static void ddp_release_gl(struct cxgbi_gather_list *gl,
- struct pci_dev *pdev)
-{
- ddp_gl_unmap(pdev, gl);
- kfree(gl);
}
+EXPORT_SYMBOL_GPL(cxgbi_ddp_ppm_setup);
-static struct cxgbi_gather_list *ddp_make_gl(unsigned int xferlen,
- struct scatterlist *sgl,
- unsigned int sgcnt,
- struct pci_dev *pdev,
- gfp_t gfp)
+static int cxgbi_ddp_sgl_check(struct scatterlist *sgl, int nents)
{
- struct cxgbi_gather_list *gl;
+ int i;
+ int last_sgidx = nents - 1;
struct scatterlist *sg = sgl;
- struct page *sgpage = sg_page(sg);
- unsigned int sglen = sg->length;
- unsigned int sgoffset = sg->offset;
- unsigned int npages = (xferlen + sgoffset + PAGE_SIZE - 1) >>
- PAGE_SHIFT;
- int i = 1, j = 0;
-
- if (xferlen < DDP_THRESHOLD) {
- log_debug(1 << CXGBI_DBG_DDP,
- "xfer %u < threshold %u, no ddp.\n",
- xferlen, DDP_THRESHOLD);
- return NULL;
- }
-
- gl = kzalloc(sizeof(struct cxgbi_gather_list) +
- npages * (sizeof(dma_addr_t) +
- sizeof(struct page *)), gfp);
- if (!gl) {
- log_debug(1 << CXGBI_DBG_DDP,
- "xfer %u, %u pages, OOM.\n", xferlen, npages);
- return NULL;
- }
- log_debug(1 << CXGBI_DBG_DDP,
- "xfer %u, sgl %u, gl max %u.\n", xferlen, sgcnt, npages);
-
- gl->pages = (struct page **)&gl->phys_addr[npages];
- gl->nelem = npages;
- gl->length = xferlen;
- gl->offset = sgoffset;
- gl->pages[0] = sgpage;
-
- for (i = 1, sg = sg_next(sgl), j = 0; i < sgcnt;
- i++, sg = sg_next(sg)) {
- struct page *page = sg_page(sg);
-
- if (sgpage == page && sg->offset == sgoffset + sglen)
- sglen += sg->length;
- else {
- /* make sure the sgl is fit for ddp:
- * each has the same page size, and
- * all of the middle pages are used completely
- */
- if ((j && sgoffset) || ((i != sgcnt - 1) &&
- ((sglen + sgoffset) & ~PAGE_MASK))) {
- log_debug(1 << CXGBI_DBG_DDP,
- "page %d/%u, %u + %u.\n",
- i, sgcnt, sgoffset, sglen);
- goto error_out;
- }
+ for (i = 0; i < nents; i++, sg = sg_next(sg)) {
+ unsigned int len = sg->length + sg->offset;
- j++;
- if (j == gl->nelem || sg->offset) {
- log_debug(1 << CXGBI_DBG_DDP,
- "page %d/%u, offset %u.\n",
- j, gl->nelem, sg->offset);
- goto error_out;
- }
- gl->pages[j] = page;
- sglen = sg->length;
- sgoffset = sg->offset;
- sgpage = page;
- }
- }
- gl->nelem = ++j;
-
- if (ddp_gl_map(pdev, gl) < 0)
- goto error_out;
-
- return gl;
-
-error_out:
- kfree(gl);
- return NULL;
-}
-
-static void ddp_tag_release(struct cxgbi_hba *chba, u32 tag)
-{
- struct cxgbi_device *cdev = chba->cdev;
- struct cxgbi_ddp_info *ddp = cdev->ddp;
- u32 idx;
-
- idx = (tag >> PPOD_IDX_SHIFT) & ddp->idx_mask;
- if (idx < ddp->nppods) {
- struct cxgbi_gather_list *gl = ddp->gl_map[idx];
- unsigned int npods;
-
- if (!gl || !gl->nelem) {
- pr_warn("tag 0x%x, idx %u, gl 0x%p, %u.\n",
- tag, idx, gl, gl ? gl->nelem : 0);
- return;
- }
- npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
- log_debug(1 << CXGBI_DBG_DDP,
- "tag 0x%x, release idx %u, npods %u.\n",
- tag, idx, npods);
- cdev->csk_ddp_clear(chba, tag, idx, npods);
- ddp_unmark_entries(ddp, idx, npods);
- ddp_release_gl(gl, ddp->pdev);
- } else
- pr_warn("tag 0x%x, idx %u > max %u.\n", tag, idx, ddp->nppods);
-}
-
-static int ddp_tag_reserve(struct cxgbi_sock *csk, unsigned int tid,
- u32 sw_tag, u32 *tagp, struct cxgbi_gather_list *gl,
- gfp_t gfp)
-{
- struct cxgbi_device *cdev = csk->cdev;
- struct cxgbi_ddp_info *ddp = cdev->ddp;
- struct cxgbi_tag_format *tformat = &cdev->tag_format;
- struct cxgbi_pagepod_hdr hdr;
- unsigned int npods;
- int idx = -1;
- int err = -ENOMEM;
- u32 tag;
-
- npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
- if (ddp->idx_last == ddp->nppods)
- idx = ddp_find_unused_entries(ddp, 0, ddp->nppods,
- npods, gl);
- else {
- idx = ddp_find_unused_entries(ddp, ddp->idx_last + 1,
- ddp->nppods, npods,
- gl);
- if (idx < 0 && ddp->idx_last >= npods) {
- idx = ddp_find_unused_entries(ddp, 0,
- min(ddp->idx_last + npods, ddp->nppods),
- npods, gl);
+ if ((sg->offset & 0x3) || (i && sg->offset) ||
+ ((i != last_sgidx) && len != PAGE_SIZE)) {
+ log_debug(1 << CXGBI_DBG_DDP,
+ "sg %u/%u, %u,%u, not aligned.\n",
+ i, nents, sg->offset, sg->length);
+ goto err_out;
}
}
- if (idx < 0) {
- log_debug(1 << CXGBI_DBG_DDP,
- "xferlen %u, gl %u, npods %u NO DDP.\n",
- gl->length, gl->nelem, npods);
- return idx;
- }
-
- tag = cxgbi_ddp_tag_base(tformat, sw_tag);
- tag |= idx << PPOD_IDX_SHIFT;
-
- hdr.rsvd = 0;
- hdr.vld_tid = htonl(PPOD_VALID_FLAG | PPOD_TID(tid));
- hdr.pgsz_tag_clr = htonl(tag & ddp->rsvd_tag_mask);
- hdr.max_offset = htonl(gl->length);
- hdr.page_offset = htonl(gl->offset);
-
- err = cdev->csk_ddp_set(csk, &hdr, idx, npods, gl);
- if (err < 0)
- goto unmark_entries;
-
- ddp->idx_last = idx;
- log_debug(1 << CXGBI_DBG_DDP,
- "xfer %u, gl %u,%u, tid 0x%x, tag 0x%x->0x%x(%u,%u).\n",
- gl->length, gl->nelem, gl->offset, tid, sw_tag, tag, idx,
- npods);
- *tagp = tag;
return 0;
-
-unmark_entries:
- ddp_unmark_entries(ddp, idx, npods);
- return err;
+err_out:
+ return -EINVAL;
}
-int cxgbi_ddp_reserve(struct cxgbi_sock *csk, unsigned int *tagp,
- unsigned int sw_tag, unsigned int xferlen,
- struct scatterlist *sgl, unsigned int sgcnt, gfp_t gfp)
+static int cxgbi_ddp_reserve(struct cxgbi_conn *cconn,
+ struct cxgbi_task_data *tdata, u32 sw_tag,
+ unsigned int xferlen)
{
+ struct cxgbi_sock *csk = cconn->cep->csk;
struct cxgbi_device *cdev = csk->cdev;
- struct cxgbi_tag_format *tformat = &cdev->tag_format;
- struct cxgbi_gather_list *gl;
+ struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev);
+ struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo;
+ struct scatterlist *sgl = ttinfo->sgl;
+ unsigned int sgcnt = ttinfo->nents;
+ unsigned int sg_offset = sgl->offset;
int err;
- if (page_idx >= DDP_PGIDX_MAX || !cdev->ddp ||
- xferlen < DDP_THRESHOLD) {
+ if (cdev->flags & CXGBI_FLAG_DDP_OFF) {
log_debug(1 << CXGBI_DBG_DDP,
- "pgidx %u, xfer %u, NO ddp.\n", page_idx, xferlen);
+ "cdev 0x%p DDP off.\n", cdev);
return -EINVAL;
}
- if (!cxgbi_sw_tag_usable(tformat, sw_tag)) {
+ if (!ppm || xferlen < DDP_THRESHOLD || !sgcnt ||
+ ppm->tformat.pgsz_idx_dflt >= DDP_PGIDX_MAX) {
log_debug(1 << CXGBI_DBG_DDP,
- "sw_tag 0x%x NOT usable.\n", sw_tag);
+ "ppm 0x%p, pgidx %u, xfer %u, sgcnt %u, NO ddp.\n",
+ ppm, ppm ? ppm->tformat.pgsz_idx_dflt : DDP_PGIDX_MAX,
+ xferlen, ttinfo->nents);
return -EINVAL;
}
- gl = ddp_make_gl(xferlen, sgl, sgcnt, cdev->pdev, gfp);
- if (!gl)
- return -ENOMEM;
+ /* make sure the buffer is suitable for ddp */
+ if (cxgbi_ddp_sgl_check(sgl, sgcnt) < 0)
+ return -EINVAL;
- err = ddp_tag_reserve(csk, csk->tid, sw_tag, tagp, gl, gfp);
- if (err < 0)
- ddp_release_gl(gl, cdev->pdev);
+ ttinfo->nr_pages = (xferlen + sgl->offset + (1 << PAGE_SHIFT) - 1) >>
+ PAGE_SHIFT;
- return err;
-}
+ /*
+ * the ddp tag will be used for the itt in the outgoing pdu,
+ * the itt genrated by libiscsi is saved in the ppm and can be
+ * retrieved via the ddp tag
+ */
+ err = cxgbi_ppm_ppods_reserve(ppm, ttinfo->nr_pages, 0, &ttinfo->idx,
+ &ttinfo->tag, (unsigned long)sw_tag);
+ if (err < 0) {
+ cconn->ddp_full++;
+ return err;
+ }
+ ttinfo->npods = err;
-static void ddp_destroy(struct kref *kref)
-{
- struct cxgbi_ddp_info *ddp = container_of(kref,
- struct cxgbi_ddp_info,
- refcnt);
- struct cxgbi_device *cdev = ddp->cdev;
- int i = 0;
+ /* setup dma from scsi command sgl */
+ sgl->offset = 0;
+ err = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE);
+ sgl->offset = sg_offset;
+ if (err == 0) {
+ pr_info("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n",
+ __func__, sw_tag, xferlen, sgcnt);
+ goto rel_ppods;
+ }
+ if (err != ttinfo->nr_pages) {
+ log_debug(1 << CXGBI_DBG_DDP,
+ "%s: sw tag 0x%x, xfer %u, sgl %u, dma count %d.\n",
+ __func__, sw_tag, xferlen, sgcnt, err);
+ }
- pr_info("kref 0, destroy ddp 0x%p, cdev 0x%p.\n", ddp, cdev);
+ ttinfo->flags |= CXGBI_PPOD_INFO_FLAG_MAPPED;
+ ttinfo->cid = csk->port_id;
- while (i < ddp->nppods) {
- struct cxgbi_gather_list *gl = ddp->gl_map[i];
+ cxgbi_ppm_make_ppod_hdr(ppm, ttinfo->tag, csk->tid, sgl->offset,
+ xferlen, &ttinfo->hdr);
- if (gl) {
- int npods = (gl->nelem + PPOD_PAGES_MAX - 1)
- >> PPOD_PAGES_SHIFT;
- pr_info("cdev 0x%p, ddp %d + %d.\n", cdev, i, npods);
- kfree(gl);
- i += npods;
- } else
- i++;
+ if (cdev->flags & CXGBI_FLAG_USE_PPOD_OFLDQ) {
+ /* write ppod from xmit_pdu (of iscsi_scsi_command pdu) */
+ ttinfo->flags |= CXGBI_PPOD_INFO_FLAG_VALID;
+ } else {
+ /* write ppod from control queue now */
+ err = cdev->csk_ddp_set_map(ppm, csk, ttinfo);
+ if (err < 0)
+ goto rel_ppods;
}
- cxgbi_free_big_mem(ddp);
-}
-
-int cxgbi_ddp_cleanup(struct cxgbi_device *cdev)
-{
- struct cxgbi_ddp_info *ddp = cdev->ddp;
- log_debug(1 << CXGBI_DBG_DDP,
- "cdev 0x%p, release ddp 0x%p.\n", cdev, ddp);
- cdev->ddp = NULL;
- if (ddp)
- return kref_put(&ddp->refcnt, ddp_destroy);
return 0;
-}
-EXPORT_SYMBOL_GPL(cxgbi_ddp_cleanup);
-int cxgbi_ddp_init(struct cxgbi_device *cdev,
- unsigned int llimit, unsigned int ulimit,
- unsigned int max_txsz, unsigned int max_rxsz)
-{
- struct cxgbi_ddp_info *ddp;
- unsigned int ppmax, bits;
+rel_ppods:
+ cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
- ppmax = (ulimit - llimit + 1) >> PPOD_SIZE_SHIFT;
- bits = __ilog2_u32(ppmax) + 1;
- if (bits > PPOD_IDX_MAX_SIZE)
- bits = PPOD_IDX_MAX_SIZE;
- ppmax = (1 << (bits - 1)) - 1;
-
- ddp = cxgbi_alloc_big_mem(sizeof(struct cxgbi_ddp_info) +
- ppmax * (sizeof(struct cxgbi_gather_list *) +
- sizeof(struct sk_buff *)),
- GFP_KERNEL);
- if (!ddp) {
- pr_warn("cdev 0x%p, ddp ppmax %u OOM.\n", cdev, ppmax);
- return -ENOMEM;
+ if (ttinfo->flags & CXGBI_PPOD_INFO_FLAG_MAPPED) {
+ ttinfo->flags &= ~CXGBI_PPOD_INFO_FLAG_MAPPED;
+ dma_unmap_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE);
}
- ddp->gl_map = (struct cxgbi_gather_list **)(ddp + 1);
- cdev->ddp = ddp;
-
- spin_lock_init(&ddp->map_lock);
- kref_init(&ddp->refcnt);
-
- ddp->cdev = cdev;
- ddp->pdev = cdev->pdev;
- ddp->llimit = llimit;
- ddp->ulimit = ulimit;
- ddp->max_txsz = min_t(unsigned int, max_txsz, ULP2_MAX_PKT_SIZE);
- ddp->max_rxsz = min_t(unsigned int, max_rxsz, ULP2_MAX_PKT_SIZE);
- ddp->nppods = ppmax;
- ddp->idx_last = ppmax;
- ddp->idx_bits = bits;
- ddp->idx_mask = (1 << bits) - 1;
- ddp->rsvd_tag_mask = (1 << (bits + PPOD_IDX_SHIFT)) - 1;
-
- cdev->tag_format.sw_bits = sw_tag_idx_bits + sw_tag_age_bits;
- cdev->tag_format.rsvd_bits = ddp->idx_bits;
- cdev->tag_format.rsvd_shift = PPOD_IDX_SHIFT;
- cdev->tag_format.rsvd_mask = (1 << cdev->tag_format.rsvd_bits) - 1;
-
- pr_info("%s tag format, sw %u, rsvd %u,%u, mask 0x%x.\n",
- cdev->ports[0]->name, cdev->tag_format.sw_bits,
- cdev->tag_format.rsvd_bits, cdev->tag_format.rsvd_shift,
- cdev->tag_format.rsvd_mask);
-
- cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
- ddp->max_txsz - ISCSI_PDU_NONPAYLOAD_LEN);
- cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
- ddp->max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN);
-
- log_debug(1 << CXGBI_DBG_DDP,
- "%s max payload size: %u/%u, %u/%u.\n",
- cdev->ports[0]->name, cdev->tx_max_size, ddp->max_txsz,
- cdev->rx_max_size, ddp->max_rxsz);
- return 0;
+ return -EINVAL;
}
-EXPORT_SYMBOL_GPL(cxgbi_ddp_init);
-
-/*
- * APIs interacting with open-iscsi libraries
- */
-
-static unsigned char padding[4];
static void task_release_itt(struct iscsi_task *task, itt_t hdr_itt)
{
struct scsi_cmnd *sc = task->sc;
struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
struct cxgbi_conn *cconn = tcp_conn->dd_data;
- struct cxgbi_hba *chba = cconn->chba;
- struct cxgbi_tag_format *tformat = &chba->cdev->tag_format;
+ struct cxgbi_device *cdev = cconn->chba->cdev;
+ struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev);
u32 tag = ntohl((__force u32)hdr_itt);
log_debug(1 << CXGBI_DBG_DDP,
- "cdev 0x%p, release tag 0x%x.\n", chba->cdev, tag);
+ "cdev 0x%p, task 0x%p, release tag 0x%x.\n",
+ cdev, task, tag);
if (sc &&
(scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) &&
- cxgbi_is_ddp_tag(tformat, tag))
- ddp_tag_release(chba, tag);
+ cxgbi_ppm_is_ddp_tag(ppm, tag)) {
+ struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
+ struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo;
+
+ if (!(cdev->flags & CXGBI_FLAG_USE_PPOD_OFLDQ))
+ cdev->csk_ddp_clear_map(cdev, ppm, ttinfo);
+ cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
+ dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl, ttinfo->nents,
+ DMA_FROM_DEVICE);
+ }
+}
+
+static inline u32 cxgbi_build_sw_tag(u32 idx, u32 age)
+{
+ /* assume idx and age both are < 0x7FFF (32767) */
+ return (idx << 16) | age;
}
static int task_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt)
@@ -1710,34 +1433,41 @@ static int task_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt)
struct iscsi_session *sess = conn->session;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct cxgbi_conn *cconn = tcp_conn->dd_data;
- struct cxgbi_hba *chba = cconn->chba;
- struct cxgbi_tag_format *tformat = &chba->cdev->tag_format;
- u32 sw_tag = (sess->age << cconn->task_idx_bits) | task->itt;
+ struct cxgbi_device *cdev = cconn->chba->cdev;
+ struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev);
+ u32 sw_tag = cxgbi_build_sw_tag(task->itt, sess->age);
u32 tag = 0;
int err = -EINVAL;
if (sc &&
- (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE)) {
- err = cxgbi_ddp_reserve(cconn->cep->csk, &tag, sw_tag,
- scsi_in(sc)->length,
- scsi_in(sc)->table.sgl,
- scsi_in(sc)->table.nents,
- GFP_ATOMIC);
- if (err < 0)
- log_debug(1 << CXGBI_DBG_DDP,
- "csk 0x%p, R task 0x%p, %u,%u, no ddp.\n",
- cconn->cep->csk, task, scsi_in(sc)->length,
- scsi_in(sc)->table.nents);
+ (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE)
+ ) {
+ struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
+ struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo;
+
+ scmd_get_params(sc, &ttinfo->sgl, &ttinfo->nents,
+ &tdata->dlen, 0);
+ err = cxgbi_ddp_reserve(cconn, tdata, sw_tag, tdata->dlen);
+ if (!err)
+ tag = ttinfo->tag;
+ else
+ log_debug(1 << CXGBI_DBG_DDP,
+ "csk 0x%p, R task 0x%p, %u,%u, no ddp.\n",
+ cconn->cep->csk, task, tdata->dlen,
+ ttinfo->nents);
}
- if (err < 0)
- tag = cxgbi_set_non_ddp_tag(tformat, sw_tag);
+ if (err < 0) {
+ err = cxgbi_ppm_make_non_ddp_tag(ppm, sw_tag, &tag);
+ if (err < 0)
+ return err;
+ }
/* the itt need to sent in big-endian order */
*hdr_itt = (__force itt_t)htonl(tag);
log_debug(1 << CXGBI_DBG_DDP,
- "cdev 0x%p, task 0x%p, 0x%x(0x%x,0x%x)->0x%x/0x%x.\n",
- chba->cdev, task, sw_tag, task->itt, sess->age, tag, *hdr_itt);
+ "cdev 0x%p, task 0x%p, 0x%x(0x%x,0x%x)->0x%x/0x%x.\n",
+ cdev, task, sw_tag, task->itt, sess->age, tag, *hdr_itt);
return 0;
}
@@ -1746,19 +1476,24 @@ void cxgbi_parse_pdu_itt(struct iscsi_conn *conn, itt_t itt, int *idx, int *age)
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct cxgbi_conn *cconn = tcp_conn->dd_data;
struct cxgbi_device *cdev = cconn->chba->cdev;
- u32 tag = ntohl((__force u32) itt);
+ struct cxgbi_ppm *ppm = cdev->cdev2ppm(cdev);
+ u32 tag = ntohl((__force u32)itt);
u32 sw_bits;
- sw_bits = cxgbi_tag_nonrsvd_bits(&cdev->tag_format, tag);
- if (idx)
- *idx = sw_bits & ((1 << cconn->task_idx_bits) - 1);
- if (age)
- *age = (sw_bits >> cconn->task_idx_bits) & ISCSI_AGE_MASK;
+ if (ppm) {
+ if (cxgbi_ppm_is_ddp_tag(ppm, tag))
+ sw_bits = cxgbi_ppm_get_tag_caller_data(ppm, tag);
+ else
+ sw_bits = cxgbi_ppm_decode_non_ddp_tag(ppm, tag);
+ } else {
+ sw_bits = tag;
+ }
+ cxgbi_decode_sw_tag(sw_bits, idx, age);
log_debug(1 << CXGBI_DBG_DDP,
- "cdev 0x%p, tag 0x%x/0x%x, -> 0x%x(0x%x,0x%x).\n",
- cdev, tag, itt, sw_bits, idx ? *idx : 0xFFFFF,
- age ? *age : 0xFF);
+ "cdev 0x%p, tag 0x%x/0x%x, -> 0x%x(0x%x,0x%x).\n",
+ cdev, tag, itt, sw_bits, idx ? *idx : 0xFFFFF,
+ age ? *age : 0xFF);
}
EXPORT_SYMBOL_GPL(cxgbi_parse_pdu_itt);
@@ -2260,7 +1995,9 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
struct cxgbi_conn *cconn = tcp_conn->dd_data;
struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
+ struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo;
struct sk_buff *skb = tdata->skb;
+ struct cxgbi_sock *csk = NULL;
unsigned int datalen;
int err;
@@ -2270,8 +2007,28 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
return 0;
}
+ if (cconn && cconn->cep)
+ csk = cconn->cep->csk;
+ if (!csk) {
+ log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
+ "task 0x%p, csk gone.\n", task);
+ return -EPIPE;
+ }
+
datalen = skb->data_len;
tdata->skb = NULL;
+
+ /* write ppod first if using ofldq to write ppod */
+ if (ttinfo->flags & CXGBI_PPOD_INFO_FLAG_VALID) {
+ struct cxgbi_ppm *ppm = csk->cdev->cdev2ppm(csk->cdev);
+
+ ttinfo->flags &= ~CXGBI_PPOD_INFO_FLAG_VALID;
+ if (csk->cdev->csk_ddp_set_map(ppm, csk, ttinfo) < 0)
+ pr_err("task 0x%p, ppod writing using ofldq failed.\n",
+ task);
+ /* continue. Let fl get the data */
+ }
+
err = cxgbi_sock_send_pdus(cconn->cep->csk, skb);
if (err > 0) {
int pdulen = err;
@@ -2313,12 +2070,14 @@ EXPORT_SYMBOL_GPL(cxgbi_conn_xmit_pdu);
void cxgbi_cleanup_task(struct iscsi_task *task)
{
+ struct iscsi_tcp_task *tcp_task = task->dd_data;
struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
log_debug(1 << CXGBI_DBG_ISCSI,
"task 0x%p, skb 0x%p, itt 0x%x.\n",
task, tdata->skb, task->hdr_itt);
+ tcp_task->dd_data = NULL;
/* never reached the xmit task callout */
if (tdata->skb)
__kfree_skb(tdata->skb);
@@ -2528,6 +2287,7 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct cxgbi_conn *cconn = tcp_conn->dd_data;
+ struct cxgbi_ppm *ppm;
struct iscsi_endpoint *ep;
struct cxgbi_endpoint *cep;
struct cxgbi_sock *csk;
@@ -2540,7 +2300,10 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
/* setup ddp pagesize */
cep = ep->dd_data;
csk = cep->csk;
- err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid, page_idx, 0);
+
+ ppm = csk->cdev->cdev2ppm(csk->cdev);
+ err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid,
+ ppm->tformat.pgsz_idx_dflt, 0);
if (err < 0)
return err;
@@ -2915,16 +2678,7 @@ EXPORT_SYMBOL_GPL(cxgbi_attr_is_visible);
static int __init libcxgbi_init_module(void)
{
- sw_tag_idx_bits = (__ilog2_u32(ISCSI_ITT_MASK)) + 1;
- sw_tag_age_bits = (__ilog2_u32(ISCSI_AGE_MASK)) + 1;
-
pr_info("%s", version);
-
- pr_info("tag itt 0x%x, %u bits, age 0x%x, %u bits.\n",
- ISCSI_ITT_MASK, sw_tag_idx_bits,
- ISCSI_AGE_MASK, sw_tag_age_bits);
-
- ddp_setup_host_page_size();
return 0;
}
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index 9842301f7..e7802738f 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -24,9 +24,12 @@
#include <linux/scatterlist.h>
#include <linux/skbuff.h>
#include <linux/vmalloc.h>
+#include <linux/version.h>
#include <scsi/scsi_device.h>
#include <scsi/libiscsi_tcp.h>
+#include <libcxgb_ppm.h>
+
enum cxgbi_dbg_flag {
CXGBI_DBG_ISCSI,
CXGBI_DBG_DDP,
@@ -84,92 +87,11 @@ static inline unsigned int cxgbi_ulp_extra_len(int submode)
return ulp2_extra_len[submode & 3];
}
-/*
- * struct pagepod_hdr, pagepod - pagepod format
- */
-
#define CPL_RX_DDP_STATUS_DDP_SHIFT 16 /* ddp'able */
#define CPL_RX_DDP_STATUS_PAD_SHIFT 19 /* pad error */
#define CPL_RX_DDP_STATUS_HCRC_SHIFT 20 /* hcrc error */
#define CPL_RX_DDP_STATUS_DCRC_SHIFT 21 /* dcrc error */
-struct cxgbi_pagepod_hdr {
- u32 vld_tid;
- u32 pgsz_tag_clr;
- u32 max_offset;
- u32 page_offset;
- u64 rsvd;
-};
-
-#define PPOD_PAGES_MAX 4
-struct cxgbi_pagepod {
- struct cxgbi_pagepod_hdr hdr;
- u64 addr[PPOD_PAGES_MAX + 1];
-};
-
-struct cxgbi_tag_format {
- unsigned char sw_bits;
- unsigned char rsvd_bits;
- unsigned char rsvd_shift;
- unsigned char filler[1];
- u32 rsvd_mask;
-};
-
-struct cxgbi_gather_list {
- unsigned int tag;
- unsigned int length;
- unsigned int offset;
- unsigned int nelem;
- struct page **pages;
- dma_addr_t phys_addr[0];
-};
-
-struct cxgbi_ddp_info {
- struct kref refcnt;
- struct cxgbi_device *cdev;
- struct pci_dev *pdev;
- unsigned int max_txsz;
- unsigned int max_rxsz;
- unsigned int llimit;
- unsigned int ulimit;
- unsigned int nppods;
- unsigned int idx_last;
- unsigned char idx_bits;
- unsigned char filler[3];
- unsigned int idx_mask;
- unsigned int rsvd_tag_mask;
- spinlock_t map_lock;
- struct cxgbi_gather_list **gl_map;
-};
-
-#define DDP_PGIDX_MAX 4
-#define DDP_THRESHOLD 2048
-
-#define PPOD_PAGES_SHIFT 2 /* 4 pages per pod */
-
-#define PPOD_SIZE sizeof(struct cxgbi_pagepod) /* 64 */
-#define PPOD_SIZE_SHIFT 6
-
-#define ULPMEM_DSGL_MAX_NPPODS 16 /* 1024/PPOD_SIZE */
-#define ULPMEM_IDATA_MAX_NPPODS 4 /* 256/PPOD_SIZE */
-#define PCIE_MEMWIN_MAX_NPPODS 16 /* 1024/PPOD_SIZE */
-
-#define PPOD_COLOR_SHIFT 0
-#define PPOD_COLOR(x) ((x) << PPOD_COLOR_SHIFT)
-
-#define PPOD_IDX_SHIFT 6
-#define PPOD_IDX_MAX_SIZE 24
-
-#define PPOD_TID_SHIFT 0
-#define PPOD_TID(x) ((x) << PPOD_TID_SHIFT)
-
-#define PPOD_TAG_SHIFT 6
-#define PPOD_TAG(x) ((x) << PPOD_TAG_SHIFT)
-
-#define PPOD_VALID_SHIFT 24
-#define PPOD_VALID(x) ((x) << PPOD_VALID_SHIFT)
-#define PPOD_VALID_FLAG PPOD_VALID(1U)
-
/*
* sge_opaque_hdr -
* Opaque version of structure the SGE stores at skb->head of TX_DATA packets
@@ -279,6 +201,8 @@ struct cxgbi_skb_tx_cb {
enum cxgbi_skcb_flags {
SKCBF_TX_NEED_HDR, /* packet needs a header */
+ SKCBF_TX_MEM_WRITE, /* memory write */
+ SKCBF_TX_FLAG_COMPL, /* wr completion flag */
SKCBF_RX_COALESCED, /* received whole pdu */
SKCBF_RX_HDR, /* received pdu header */
SKCBF_RX_DATA, /* received pdu payload */
@@ -527,6 +451,9 @@ struct cxgbi_ports_map {
#define CXGBI_FLAG_DEV_T4 0x2
#define CXGBI_FLAG_ADAPTER_RESET 0x4
#define CXGBI_FLAG_IPV4_SET 0x10
+#define CXGBI_FLAG_USE_PPOD_OFLDQ 0x40
+#define CXGBI_FLAG_DDP_OFF 0x100
+
struct cxgbi_device {
struct list_head list_head;
struct list_head rcu_node;
@@ -548,15 +475,14 @@ struct cxgbi_device {
unsigned int tx_max_size;
unsigned int rx_max_size;
struct cxgbi_ports_map pmap;
- struct cxgbi_tag_format tag_format;
- struct cxgbi_ddp_info *ddp;
void (*dev_ddp_cleanup)(struct cxgbi_device *);
- int (*csk_ddp_set)(struct cxgbi_sock *, struct cxgbi_pagepod_hdr *,
- unsigned int, unsigned int,
- struct cxgbi_gather_list *);
- void (*csk_ddp_clear)(struct cxgbi_hba *,
- unsigned int, unsigned int, unsigned int);
+ struct cxgbi_ppm* (*cdev2ppm)(struct cxgbi_device *);
+ int (*csk_ddp_set_map)(struct cxgbi_ppm *, struct cxgbi_sock *,
+ struct cxgbi_task_tag_info *);
+ void (*csk_ddp_clear_map)(struct cxgbi_device *cdev,
+ struct cxgbi_ppm *,
+ struct cxgbi_task_tag_info *);
int (*csk_ddp_setup_digest)(struct cxgbi_sock *,
unsigned int, int, int, int);
int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *,
@@ -580,6 +506,8 @@ struct cxgbi_conn {
struct iscsi_conn *iconn;
struct cxgbi_hba *chba;
u32 task_idx_bits;
+ unsigned int ddp_full;
+ unsigned int ddp_tag_full;
};
struct cxgbi_endpoint {
@@ -593,85 +521,15 @@ struct cxgbi_task_data {
unsigned short nr_frags;
struct page_frag frags[MAX_PDU_FRAGS];
struct sk_buff *skb;
+ unsigned int dlen;
unsigned int offset;
unsigned int count;
unsigned int sgoffset;
+ struct cxgbi_task_tag_info ttinfo;
};
#define iscsi_task_cxgbi_data(task) \
((task)->dd_data + sizeof(struct iscsi_tcp_task))
-static inline int cxgbi_is_ddp_tag(struct cxgbi_tag_format *tformat, u32 tag)
-{
- return !(tag & (1 << (tformat->rsvd_bits + tformat->rsvd_shift - 1)));
-}
-
-static inline int cxgbi_sw_tag_usable(struct cxgbi_tag_format *tformat,
- u32 sw_tag)
-{
- sw_tag >>= (32 - tformat->rsvd_bits);
- return !sw_tag;
-}
-
-static inline u32 cxgbi_set_non_ddp_tag(struct cxgbi_tag_format *tformat,
- u32 sw_tag)
-{
- unsigned char shift = tformat->rsvd_bits + tformat->rsvd_shift - 1;
- u32 mask = (1 << shift) - 1;
-
- if (sw_tag && (sw_tag & ~mask)) {
- u32 v1 = sw_tag & ((1 << shift) - 1);
- u32 v2 = (sw_tag >> (shift - 1)) << shift;
-
- return v2 | v1 | 1 << shift;
- }
-
- return sw_tag | 1 << shift;
-}
-
-static inline u32 cxgbi_ddp_tag_base(struct cxgbi_tag_format *tformat,
- u32 sw_tag)
-{
- u32 mask = (1 << tformat->rsvd_shift) - 1;
-
- if (sw_tag && (sw_tag & ~mask)) {
- u32 v1 = sw_tag & mask;
- u32 v2 = sw_tag >> tformat->rsvd_shift;
-
- v2 <<= tformat->rsvd_bits + tformat->rsvd_shift;
-
- return v2 | v1;
- }
-
- return sw_tag;
-}
-
-static inline u32 cxgbi_tag_rsvd_bits(struct cxgbi_tag_format *tformat,
- u32 tag)
-{
- if (cxgbi_is_ddp_tag(tformat, tag))
- return (tag >> tformat->rsvd_shift) & tformat->rsvd_mask;
-
- return 0;
-}
-
-static inline u32 cxgbi_tag_nonrsvd_bits(struct cxgbi_tag_format *tformat,
- u32 tag)
-{
- unsigned char shift = tformat->rsvd_bits + tformat->rsvd_shift - 1;
- u32 v1, v2;
-
- if (cxgbi_is_ddp_tag(tformat, tag)) {
- v1 = tag & ((1 << tformat->rsvd_shift) - 1);
- v2 = (tag >> (shift + 1)) << tformat->rsvd_shift;
- } else {
- u32 mask = (1 << shift) - 1;
- tag &= ~(1 << shift);
- v1 = tag & mask;
- v2 = (tag >> 1) & ~mask;
- }
- return v1 | v2;
-}
-
static inline void *cxgbi_alloc_big_mem(unsigned int size,
gfp_t gfp)
{
@@ -749,7 +607,11 @@ int cxgbi_ddp_init(struct cxgbi_device *, unsigned int, unsigned int,
unsigned int, unsigned int);
int cxgbi_ddp_cleanup(struct cxgbi_device *);
void cxgbi_ddp_page_size_factor(int *);
-void cxgbi_ddp_ppod_clear(struct cxgbi_pagepod *);
-void cxgbi_ddp_ppod_set(struct cxgbi_pagepod *, struct cxgbi_pagepod_hdr *,
- struct cxgbi_gather_list *, unsigned int);
+void cxgbi_ddp_set_one_ppod(struct cxgbi_pagepod *,
+ struct cxgbi_task_tag_info *,
+ struct scatterlist **sg_pp, unsigned int *sg_off);
+void cxgbi_ddp_ppm_setup(void **ppm_pp, struct cxgbi_device *,
+ struct cxgbi_tag_format *, unsigned int ppmax,
+ unsigned int llimit, unsigned int start,
+ unsigned int rsvd_factor);
#endif /*__LIBCXGBI_H__*/
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 8fb9643fe..661bb94e2 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -765,6 +765,75 @@ static void term_afu(struct cxlflash_cfg *cfg)
}
/**
+ * notify_shutdown() - notifies device of pending shutdown
+ * @cfg: Internal structure associated with the host.
+ * @wait: Whether to wait for shutdown processing to complete.
+ *
+ * This function will notify the AFU that the adapter is being shutdown
+ * and will wait for shutdown processing to complete if wait is true.
+ * This notification should flush pending I/Os to the device and halt
+ * further I/Os until the next AFU reset is issued and device restarted.
+ */
+static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
+{
+ struct afu *afu = cfg->afu;
+ struct device *dev = &cfg->dev->dev;
+ struct sisl_global_map __iomem *global;
+ struct dev_dependent_vals *ddv;
+ u64 reg, status;
+ int i, retry_cnt = 0;
+
+ ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data;
+ if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN))
+ return;
+
+ if (!afu || !afu->afu_map) {
+ dev_dbg(dev, "%s: The problem state area is not mapped\n",
+ __func__);
+ return;
+ }
+
+ global = &afu->afu_map->global;
+
+ /* Notify AFU */
+ for (i = 0; i < NUM_FC_PORTS; i++) {
+ reg = readq_be(&global->fc_regs[i][FC_CONFIG2 / 8]);
+ reg |= SISL_FC_SHUTDOWN_NORMAL;
+ writeq_be(reg, &global->fc_regs[i][FC_CONFIG2 / 8]);
+ }
+
+ if (!wait)
+ return;
+
+ /* Wait up to 1.5 seconds for shutdown processing to complete */
+ for (i = 0; i < NUM_FC_PORTS; i++) {
+ retry_cnt = 0;
+ while (true) {
+ status = readq_be(&global->fc_regs[i][FC_STATUS / 8]);
+ if (status & SISL_STATUS_SHUTDOWN_COMPLETE)
+ break;
+ if (++retry_cnt >= MC_RETRY_CNT) {
+ dev_dbg(dev, "%s: port %d shutdown processing "
+ "not yet completed\n", __func__, i);
+ break;
+ }
+ msleep(100 * retry_cnt);
+ }
+ }
+}
+
+/**
+ * cxlflash_shutdown() - shutdown handler
+ * @pdev: PCI device associated with the host.
+ */
+static void cxlflash_shutdown(struct pci_dev *pdev)
+{
+ struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
+
+ notify_shutdown(cfg, false);
+}
+
+/**
* cxlflash_remove() - PCI entry point to tear down host
* @pdev: PCI device associated with the host.
*
@@ -785,6 +854,9 @@ static void cxlflash_remove(struct pci_dev *pdev)
cfg->tmf_slock);
spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
+ /* Notify AFU and wait for shutdown processing to complete */
+ notify_shutdown(cfg, true);
+
cfg->state = STATE_FAILTERM;
cxlflash_stop_term_user_contexts(cfg);
@@ -1916,6 +1988,19 @@ static int afu_reset(struct cxlflash_cfg *cfg)
}
/**
+ * drain_ioctls() - wait until all currently executing ioctls have completed
+ * @cfg: Internal structure associated with the host.
+ *
+ * Obtain write access to read/write semaphore that wraps ioctl
+ * handling to 'drain' ioctls currently executing.
+ */
+static void drain_ioctls(struct cxlflash_cfg *cfg)
+{
+ down_write(&cfg->ioctl_rwsem);
+ up_write(&cfg->ioctl_rwsem);
+}
+
+/**
* cxlflash_eh_device_reset_handler() - reset a single LUN
* @scp: SCSI command to send.
*
@@ -1986,6 +2071,7 @@ static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
switch (cfg->state) {
case STATE_NORMAL:
cfg->state = STATE_RESET;
+ drain_ioctls(cfg);
cxlflash_mark_contexts_error(cfg);
rcr = afu_reset(cfg);
if (rcr) {
@@ -2319,8 +2405,10 @@ static struct scsi_host_template driver_template = {
/*
* Device dependent values
*/
-static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS };
-static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS };
+static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS,
+ 0ULL };
+static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS,
+ CXLFLASH_NOTIFY_SHUTDOWN };
/*
* PCI device binding table
@@ -2504,19 +2592,6 @@ out_remove:
}
/**
- * drain_ioctls() - wait until all currently executing ioctls have completed
- * @cfg: Internal structure associated with the host.
- *
- * Obtain write access to read/write semaphore that wraps ioctl
- * handling to 'drain' ioctls currently executing.
- */
-static void drain_ioctls(struct cxlflash_cfg *cfg)
-{
- down_write(&cfg->ioctl_rwsem);
- up_write(&cfg->ioctl_rwsem);
-}
-
-/**
* cxlflash_pci_error_detected() - called when a PCI error is detected
* @pdev: PCI device struct.
* @state: PCI channel state.
@@ -2610,6 +2685,7 @@ static struct pci_driver cxlflash_driver = {
.id_table = cxlflash_pci_table,
.probe = cxlflash_probe,
.remove = cxlflash_remove,
+ .shutdown = cxlflash_shutdown,
.err_handler = &cxlflash_err_handler,
};
diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h
index eb9d8f730..e43545c86 100644
--- a/drivers/scsi/cxlflash/main.h
+++ b/drivers/scsi/cxlflash/main.h
@@ -88,6 +88,8 @@ enum undo_level {
struct dev_dependent_vals {
u64 max_sectors;
+ u64 flags;
+#define CXLFLASH_NOTIFY_SHUTDOWN 0x0000000000000001ULL
};
struct asyc_intr_info {
@@ -100,8 +102,4 @@ struct asyc_intr_info {
#define SCAN_HOST 0x04
};
-#ifndef CONFIG_CXL_EEH
-#define cxl_perst_reloads_same_image(_a, _b) do { } while (0)
-#endif
-
#endif /* _CXLFLASH_MAIN_H */
diff --git a/drivers/scsi/cxlflash/sislite.h b/drivers/scsi/cxlflash/sislite.h
index 0b3366f5e..347fc1671 100644
--- a/drivers/scsi/cxlflash/sislite.h
+++ b/drivers/scsi/cxlflash/sislite.h
@@ -311,6 +311,12 @@ struct sisl_global_regs {
#define SISL_FC_INTERNAL_MASK ~(SISL_FC_INTERNAL_UNMASK)
#define SISL_FC_INTERNAL_SHIFT 32
+#define SISL_FC_SHUTDOWN_NORMAL 0x0000000000000010ULL
+#define SISL_FC_SHUTDOWN_ABRUPT 0x0000000000000020ULL
+
+#define SISL_STATUS_SHUTDOWN_ACTIVE 0x0000000000000010ULL
+#define SISL_STATUS_SHUTDOWN_COMPLETE 0x0000000000000020ULL
+
#define SISL_ASTATUS_UNMASK 0xFFFFULL /* 1 means unmasked */
#define SISL_ASTATUS_MASK ~(SISL_ASTATUS_UNMASK) /* 1 means masked */
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 0efe7112f..9bd41a35a 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -67,9 +67,6 @@ static DEFINE_MUTEX(fcoe_config_mutex);
static struct workqueue_struct *fcoe_wq;
-/* fcoe_percpu_clean completion. Waiter protected by fcoe_create_mutex */
-static DECLARE_COMPLETION(fcoe_flush_completion);
-
/* fcoe host list */
/* must only by accessed under the RTNL mutex */
static LIST_HEAD(fcoe_hostlist);
@@ -80,7 +77,6 @@ static int fcoe_reset(struct Scsi_Host *);
static int fcoe_xmit(struct fc_lport *, struct fc_frame *);
static int fcoe_rcv(struct sk_buff *, struct net_device *,
struct packet_type *, struct net_device *);
-static int fcoe_percpu_receive_thread(void *);
static void fcoe_percpu_clean(struct fc_lport *);
static int fcoe_link_ok(struct fc_lport *);
@@ -96,6 +92,8 @@ static struct fcoe_interface
static int fcoe_fip_recv(struct sk_buff *, struct net_device *,
struct packet_type *, struct net_device *);
+static int fcoe_fip_vlan_recv(struct sk_buff *, struct net_device *,
+ struct packet_type *, struct net_device *);
static void fcoe_fip_send(struct fcoe_ctlr *, struct sk_buff *);
static void fcoe_update_src_mac(struct fc_lport *, u8 *);
@@ -107,12 +105,11 @@ static int fcoe_ddp_setup(struct fc_lport *, u16, struct scatterlist *,
static int fcoe_ddp_done(struct fc_lport *, u16);
static int fcoe_ddp_target(struct fc_lport *, u16, struct scatterlist *,
unsigned int);
-static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *);
static int fcoe_dcb_app_notification(struct notifier_block *notifier,
ulong event, void *ptr);
static bool fcoe_match(struct net_device *netdev);
-static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode);
+static int fcoe_create(struct net_device *netdev, enum fip_mode fip_mode);
static int fcoe_destroy(struct net_device *netdev);
static int fcoe_enable(struct net_device *netdev);
static int fcoe_disable(struct net_device *netdev);
@@ -120,7 +117,7 @@ static int fcoe_disable(struct net_device *netdev);
/* fcoe_syfs control interface handlers */
static int fcoe_ctlr_alloc(struct net_device *netdev);
static int fcoe_ctlr_enabled(struct fcoe_ctlr_device *cdev);
-
+static void fcoe_ctlr_mode(struct fcoe_ctlr_device *ctlr_dev);
static struct fc_seq *fcoe_elsct_send(struct fc_lport *,
u32 did, struct fc_frame *,
@@ -136,11 +133,6 @@ static struct notifier_block fcoe_notifier = {
.notifier_call = fcoe_device_notification,
};
-/* notification function for CPU hotplug events */
-static struct notifier_block fcoe_cpu_notifier = {
- .notifier_call = fcoe_cpu_callback,
-};
-
/* notification function for DCB events */
static struct notifier_block dcb_notifier = {
.notifier_call = fcoe_dcb_app_notification,
@@ -156,8 +148,9 @@ static void fcoe_set_vport_symbolic_name(struct fc_vport *);
static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *);
+
static struct fcoe_sysfs_function_template fcoe_sysfs_templ = {
- .set_fcoe_ctlr_mode = fcoe_ctlr_set_fip_mode,
+ .set_fcoe_ctlr_mode = fcoe_ctlr_mode,
.set_fcoe_ctlr_enabled = fcoe_ctlr_enabled,
.get_fcoe_ctlr_link_fail = fcoe_ctlr_get_lesb,
.get_fcoe_ctlr_vlink_fail = fcoe_ctlr_get_lesb,
@@ -372,6 +365,12 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
fcoe->fip_packet_type.dev = netdev;
dev_add_pack(&fcoe->fip_packet_type);
+ if (netdev != real_dev) {
+ fcoe->fip_vlan_packet_type.func = fcoe_fip_vlan_recv;
+ fcoe->fip_vlan_packet_type.type = htons(ETH_P_FIP);
+ fcoe->fip_vlan_packet_type.dev = real_dev;
+ dev_add_pack(&fcoe->fip_vlan_packet_type);
+ }
return 0;
}
@@ -459,6 +458,8 @@ static void fcoe_interface_remove(struct fcoe_interface *fcoe)
*/
__dev_remove_pack(&fcoe->fcoe_packet_type);
__dev_remove_pack(&fcoe->fip_packet_type);
+ if (netdev != fcoe->realdev)
+ __dev_remove_pack(&fcoe->fip_vlan_packet_type);
synchronize_net();
/* Delete secondary MAC addresses */
@@ -529,6 +530,29 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev,
}
/**
+ * fcoe_fip_vlan_recv() - Handler for received FIP VLAN discovery frames
+ * @skb: The receive skb
+ * @netdev: The associated net device
+ * @ptype: The packet_type structure which was used to register this handler
+ * @orig_dev: The original net_device the the skb was received on.
+ * (in case dev is a bond)
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_fip_vlan_recv(struct sk_buff *skb, struct net_device *netdev,
+ struct packet_type *ptype,
+ struct net_device *orig_dev)
+{
+ struct fcoe_interface *fcoe;
+ struct fcoe_ctlr *ctlr;
+
+ fcoe = container_of(ptype, struct fcoe_interface, fip_vlan_packet_type);
+ ctlr = fcoe_to_ctlr(fcoe);
+ fcoe_ctlr_recv(ctlr, skb);
+ return 0;
+}
+
+/**
* fcoe_port_send() - Send an Ethernet-encapsulated FIP/FCoE frame
* @port: The FCoE port
* @skb: The FIP/FCoE packet to be sent
@@ -548,7 +572,21 @@ static void fcoe_port_send(struct fcoe_port *port, struct sk_buff *skb)
*/
static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
{
- skb->dev = fcoe_from_ctlr(fip)->netdev;
+ struct fcoe_interface *fcoe = fcoe_from_ctlr(fip);
+ struct fip_frame {
+ struct ethhdr eth;
+ struct fip_header fip;
+ } __packed *frame;
+
+ /*
+ * Use default VLAN for FIP VLAN discovery protocol
+ */
+ frame = (struct fip_frame *)skb->data;
+ if (frame->fip.fip_op == ntohs(FIP_OP_VLAN) &&
+ fcoe->realdev != fcoe->netdev)
+ skb->dev = fcoe->realdev;
+ else
+ skb->dev = fcoe->netdev;
fcoe_port_send(lport_priv(fip->lp), skb);
}
@@ -682,6 +720,12 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
fcoe = port->priv;
ctlr = fcoe_to_ctlr(fcoe);
+ /* Figure out the VLAN ID, if any */
+ if (netdev->priv_flags & IFF_802_1Q_VLAN)
+ lport->vlan = vlan_dev_vlan_id(netdev);
+ else
+ lport->vlan = 0;
+
/*
* Determine max frame size based on underlying device and optional
* user-configured limit. If the MFS is too low, fcoe_link_ok()
@@ -780,9 +824,6 @@ static void fcoe_fdmi_info(struct fc_lport *lport, struct net_device *netdev)
fcoe = port->priv;
realdev = fcoe->realdev;
- if (!realdev)
- return;
-
/* No FDMI state m/c for NPIV ports */
if (lport->vport)
return;
@@ -1245,152 +1286,21 @@ static int __exit fcoe_if_exit(void)
return 0;
}
-/**
- * fcoe_percpu_thread_create() - Create a receive thread for an online CPU
- * @cpu: The CPU index of the CPU to create a receive thread for
- */
-static void fcoe_percpu_thread_create(unsigned int cpu)
+static void fcoe_thread_cleanup_local(unsigned int cpu)
{
- struct fcoe_percpu_s *p;
- struct task_struct *thread;
-
- p = &per_cpu(fcoe_percpu, cpu);
-
- thread = kthread_create_on_node(fcoe_percpu_receive_thread,
- (void *)p, cpu_to_node(cpu),
- "fcoethread/%d", cpu);
-
- if (likely(!IS_ERR(thread))) {
- kthread_bind(thread, cpu);
- wake_up_process(thread);
-
- spin_lock_bh(&p->fcoe_rx_list.lock);
- p->thread = thread;
- spin_unlock_bh(&p->fcoe_rx_list.lock);
- }
-}
-
-/**
- * fcoe_percpu_thread_destroy() - Remove the receive thread of a CPU
- * @cpu: The CPU index of the CPU whose receive thread is to be destroyed
- *
- * Destroys a per-CPU Rx thread. Any pending skbs are moved to the
- * current CPU's Rx thread. If the thread being destroyed is bound to
- * the CPU processing this context the skbs will be freed.
- */
-static void fcoe_percpu_thread_destroy(unsigned int cpu)
-{
- struct fcoe_percpu_s *p;
- struct task_struct *thread;
struct page *crc_eof;
- struct sk_buff *skb;
-#ifdef CONFIG_SMP
- struct fcoe_percpu_s *p0;
- unsigned targ_cpu = get_cpu();
-#endif /* CONFIG_SMP */
-
- FCOE_DBG("Destroying receive thread for CPU %d\n", cpu);
+ struct fcoe_percpu_s *p;
- /* Prevent any new skbs from being queued for this CPU. */
- p = &per_cpu(fcoe_percpu, cpu);
+ p = per_cpu_ptr(&fcoe_percpu, cpu);
spin_lock_bh(&p->fcoe_rx_list.lock);
- thread = p->thread;
- p->thread = NULL;
crc_eof = p->crc_eof_page;
p->crc_eof_page = NULL;
p->crc_eof_offset = 0;
spin_unlock_bh(&p->fcoe_rx_list.lock);
-#ifdef CONFIG_SMP
- /*
- * Don't bother moving the skb's if this context is running
- * on the same CPU that is having its thread destroyed. This
- * can easily happen when the module is removed.
- */
- if (cpu != targ_cpu) {
- p0 = &per_cpu(fcoe_percpu, targ_cpu);
- spin_lock_bh(&p0->fcoe_rx_list.lock);
- if (p0->thread) {
- FCOE_DBG("Moving frames from CPU %d to CPU %d\n",
- cpu, targ_cpu);
-
- while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
- __skb_queue_tail(&p0->fcoe_rx_list, skb);
- spin_unlock_bh(&p0->fcoe_rx_list.lock);
- } else {
- /*
- * The targeted CPU is not initialized and cannot accept
- * new skbs. Unlock the targeted CPU and drop the skbs
- * on the CPU that is going offline.
- */
- while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
- kfree_skb(skb);
- spin_unlock_bh(&p0->fcoe_rx_list.lock);
- }
- } else {
- /*
- * This scenario occurs when the module is being removed
- * and all threads are being destroyed. skbs will continue
- * to be shifted from the CPU thread that is being removed
- * to the CPU thread associated with the CPU that is processing
- * the module removal. Once there is only one CPU Rx thread it
- * will reach this case and we will drop all skbs and later
- * stop the thread.
- */
- spin_lock_bh(&p->fcoe_rx_list.lock);
- while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
- kfree_skb(skb);
- spin_unlock_bh(&p->fcoe_rx_list.lock);
- }
- put_cpu();
-#else
- /*
- * This a non-SMP scenario where the singular Rx thread is
- * being removed. Free all skbs and stop the thread.
- */
- spin_lock_bh(&p->fcoe_rx_list.lock);
- while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
- kfree_skb(skb);
- spin_unlock_bh(&p->fcoe_rx_list.lock);
-#endif
-
- if (thread)
- kthread_stop(thread);
-
if (crc_eof)
put_page(crc_eof);
-}
-
-/**
- * fcoe_cpu_callback() - Handler for CPU hotplug events
- * @nfb: The callback data block
- * @action: The event triggering the callback
- * @hcpu: The index of the CPU that the event is for
- *
- * This creates or destroys per-CPU data for fcoe
- *
- * Returns NOTIFY_OK always.
- */
-static int fcoe_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- unsigned cpu = (unsigned long)hcpu;
-
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- FCOE_DBG("CPU %x online: Create Rx thread\n", cpu);
- fcoe_percpu_thread_create(cpu);
- break;
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- FCOE_DBG("CPU %x offline: Remove Rx thread\n", cpu);
- fcoe_percpu_thread_destroy(cpu);
- break;
- default:
- break;
- }
- return NOTIFY_OK;
+ flush_work(&p->work);
}
/**
@@ -1509,26 +1419,6 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
fps = &per_cpu(fcoe_percpu, cpu);
spin_lock(&fps->fcoe_rx_list.lock);
- if (unlikely(!fps->thread)) {
- /*
- * The targeted CPU is not ready, let's target
- * the first CPU now. For non-SMP systems this
- * will check the same CPU twice.
- */
- FCOE_NETDEV_DBG(netdev, "CPU is online, but no receive thread "
- "ready for incoming skb- using first online "
- "CPU.\n");
-
- spin_unlock(&fps->fcoe_rx_list.lock);
- cpu = cpumask_first(cpu_online_mask);
- fps = &per_cpu(fcoe_percpu, cpu);
- spin_lock(&fps->fcoe_rx_list.lock);
- if (!fps->thread) {
- spin_unlock(&fps->fcoe_rx_list.lock);
- goto err;
- }
- }
-
/*
* We now have a valid CPU that we're targeting for
* this skb. We also have this receive thread locked,
@@ -1543,8 +1433,7 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
* in softirq context.
*/
__skb_queue_tail(&fps->fcoe_rx_list, skb);
- if (fps->thread->state == TASK_INTERRUPTIBLE)
- wake_up_process(fps->thread);
+ schedule_work_on(cpu, &fps->work);
spin_unlock(&fps->fcoe_rx_list.lock);
return NET_RX_SUCCESS;
@@ -1713,15 +1602,6 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
}
/**
- * fcoe_percpu_flush_done() - Indicate per-CPU queue flush completion
- * @skb: The completed skb (argument required by destructor)
- */
-static void fcoe_percpu_flush_done(struct sk_buff *skb)
-{
- complete(&fcoe_flush_completion);
-}
-
-/**
* fcoe_filter_frames() - filter out bad fcoe frames, i.e. bad CRC
* @lport: The local port the frame was received on
* @fp: The received frame
@@ -1792,8 +1672,7 @@ static void fcoe_recv_frame(struct sk_buff *skb)
fr = fcoe_dev_from_skb(skb);
lport = fr->fr_dev;
if (unlikely(!lport)) {
- if (skb->destructor != fcoe_percpu_flush_done)
- FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb\n");
+ FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb\n");
kfree_skb(skb);
return;
}
@@ -1857,40 +1736,28 @@ drop:
}
/**
- * fcoe_percpu_receive_thread() - The per-CPU packet receive thread
- * @arg: The per-CPU context
+ * fcoe_receive_work() - The per-CPU worker
+ * @work: The work struct
*
- * Return: 0 for success
*/
-static int fcoe_percpu_receive_thread(void *arg)
+static void fcoe_receive_work(struct work_struct *work)
{
- struct fcoe_percpu_s *p = arg;
+ struct fcoe_percpu_s *p;
struct sk_buff *skb;
struct sk_buff_head tmp;
+ p = container_of(work, struct fcoe_percpu_s, work);
skb_queue_head_init(&tmp);
- set_user_nice(current, MIN_NICE);
-
- while (!kthread_should_stop()) {
-
- spin_lock_bh(&p->fcoe_rx_list.lock);
- skb_queue_splice_init(&p->fcoe_rx_list, &tmp);
-
- if (!skb_queue_len(&tmp)) {
- set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_bh(&p->fcoe_rx_list.lock);
- schedule();
- continue;
- }
-
- spin_unlock_bh(&p->fcoe_rx_list.lock);
+ spin_lock_bh(&p->fcoe_rx_list.lock);
+ skb_queue_splice_init(&p->fcoe_rx_list, &tmp);
+ spin_unlock_bh(&p->fcoe_rx_list.lock);
- while ((skb = __skb_dequeue(&tmp)) != NULL)
- fcoe_recv_frame(skb);
+ if (!skb_queue_len(&tmp))
+ return;
- }
- return 0;
+ while ((skb = __skb_dequeue(&tmp)))
+ fcoe_recv_frame(skb);
}
/**
@@ -2163,6 +2030,32 @@ static int fcoe_ctlr_enabled(struct fcoe_ctlr_device *cdev)
}
/**
+ * fcoe_ctlr_mode() - Switch FIP mode
+ * @cdev: The FCoE Controller that is being modified
+ *
+ * When the FIP mode has been changed we need to update
+ * the multicast addresses to ensure we get the correct
+ * frames.
+ */
+static void fcoe_ctlr_mode(struct fcoe_ctlr_device *ctlr_dev)
+{
+ struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+ struct fcoe_interface *fcoe = fcoe_ctlr_priv(ctlr);
+
+ if (ctlr_dev->mode == FIP_CONN_TYPE_VN2VN &&
+ ctlr->mode != FIP_MODE_VN2VN) {
+ dev_mc_del(fcoe->netdev, FIP_ALL_ENODE_MACS);
+ dev_mc_add(fcoe->netdev, FIP_ALL_VN2VN_MACS);
+ dev_mc_add(fcoe->netdev, FIP_ALL_P2P_MACS);
+ } else if (ctlr->mode != FIP_MODE_FABRIC) {
+ dev_mc_del(fcoe->netdev, FIP_ALL_VN2VN_MACS);
+ dev_mc_del(fcoe->netdev, FIP_ALL_P2P_MACS);
+ dev_mc_add(fcoe->netdev, FIP_ALL_ENODE_MACS);
+ }
+ fcoe_ctlr_set_fip_mode(ctlr_dev);
+}
+
+/**
* fcoe_destroy() - Destroy a FCoE interface
* @netdev : The net_device object the Ethernet interface to create on
*
@@ -2317,7 +2210,7 @@ enum fcoe_create_link_state {
* consolidation of code can be done when that interface is
* removed.
*/
-static int _fcoe_create(struct net_device *netdev, enum fip_state fip_mode,
+static int _fcoe_create(struct net_device *netdev, enum fip_mode fip_mode,
enum fcoe_create_link_state link_state)
{
int rc = 0;
@@ -2406,7 +2299,7 @@ out:
*
* Returns: 0 for success
*/
-static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
+static int fcoe_create(struct net_device *netdev, enum fip_mode fip_mode)
{
return _fcoe_create(netdev, fip_mode, FCOE_CREATE_LINK_UP);
}
@@ -2450,36 +2343,19 @@ static int fcoe_link_ok(struct fc_lport *lport)
*
* Must be called with fcoe_create_mutex held to single-thread completion.
*
- * This flushes the pending skbs by adding a new skb to each queue and
- * waiting until they are all freed. This assures us that not only are
- * there no packets that will be handled by the lport, but also that any
- * threads already handling packet have returned.
+ * This flushes the pending skbs by flush the work item for each CPU. The work
+ * item on each possible CPU is flushed because we may have used the per-CPU
+ * struct of an offline CPU.
*/
static void fcoe_percpu_clean(struct fc_lport *lport)
{
struct fcoe_percpu_s *pp;
- struct sk_buff *skb;
unsigned int cpu;
for_each_possible_cpu(cpu) {
pp = &per_cpu(fcoe_percpu, cpu);
- if (!pp->thread || !cpu_online(cpu))
- continue;
-
- skb = dev_alloc_skb(0);
- if (!skb)
- continue;
-
- skb->destructor = fcoe_percpu_flush_done;
-
- spin_lock_bh(&pp->fcoe_rx_list.lock);
- __skb_queue_tail(&pp->fcoe_rx_list, skb);
- if (pp->fcoe_rx_list.qlen == 1)
- wake_up_process(pp->thread);
- spin_unlock_bh(&pp->fcoe_rx_list.lock);
-
- wait_for_completion(&fcoe_flush_completion);
+ flush_work(&pp->work);
}
}
@@ -2619,28 +2495,17 @@ static int __init fcoe_init(void)
if (rc) {
printk(KERN_ERR "failed to register an fcoe transport, check "
"if libfcoe is loaded\n");
- return rc;
+ goto out_destroy;
}
mutex_lock(&fcoe_config_mutex);
for_each_possible_cpu(cpu) {
- p = &per_cpu(fcoe_percpu, cpu);
+ p = per_cpu_ptr(&fcoe_percpu, cpu);
+ INIT_WORK(&p->work, fcoe_receive_work);
skb_queue_head_init(&p->fcoe_rx_list);
}
- cpu_notifier_register_begin();
-
- for_each_online_cpu(cpu)
- fcoe_percpu_thread_create(cpu);
-
- /* Initialize per CPU interrupt thread */
- rc = __register_hotcpu_notifier(&fcoe_cpu_notifier);
- if (rc)
- goto out_free;
-
- cpu_notifier_register_done();
-
/* Setup link change notification */
fcoe_dev_setup();
@@ -2652,13 +2517,8 @@ static int __init fcoe_init(void)
return 0;
out_free:
- for_each_online_cpu(cpu) {
- fcoe_percpu_thread_destroy(cpu);
- }
-
- cpu_notifier_register_done();
-
mutex_unlock(&fcoe_config_mutex);
+out_destroy:
destroy_workqueue(fcoe_wq);
return rc;
}
@@ -2690,14 +2550,8 @@ static void __exit fcoe_exit(void)
}
rtnl_unlock();
- cpu_notifier_register_begin();
-
- for_each_online_cpu(cpu)
- fcoe_percpu_thread_destroy(cpu);
-
- __unregister_hotcpu_notifier(&fcoe_cpu_notifier);
-
- cpu_notifier_register_done();
+ for_each_possible_cpu(cpu)
+ fcoe_thread_cleanup_local(cpu);
mutex_unlock(&fcoe_config_mutex);
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index 2b53672bf..6aa4820f6 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -80,6 +80,7 @@ struct fcoe_interface {
struct net_device *realdev;
struct packet_type fcoe_packet_type;
struct packet_type fip_packet_type;
+ struct packet_type fip_vlan_packet_type;
struct fc_exch_mgr *oem;
u8 removed;
u8 priority;
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 3e83d485f..dcf36537a 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -59,6 +59,8 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *, struct sk_buff *);
static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *);
static int fcoe_ctlr_vn_lookup(struct fcoe_ctlr *, u32, u8 *);
+static int fcoe_ctlr_vlan_recv(struct fcoe_ctlr *, struct sk_buff *);
+
static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS;
static u8 fcoe_all_enode[ETH_ALEN] = FIP_ALL_ENODE_MACS;
static u8 fcoe_all_vn2vn[ETH_ALEN] = FIP_ALL_VN2VN_MACS;
@@ -149,6 +151,7 @@ void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_state mode)
{
fcoe_ctlr_set_state(fip, FIP_ST_LINK_WAIT);
fip->mode = mode;
+ fip->fip_resp = false;
INIT_LIST_HEAD(&fip->fcfs);
mutex_init(&fip->ctlr_mutex);
spin_lock_init(&fip->ctlr_lock);
@@ -991,7 +994,7 @@ static int fcoe_ctlr_parse_adv(struct fcoe_ctlr *fip,
LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x "
"in FIP adv\n", desc->fip_dtype);
/* standard says ignore unknown descriptors >= 128 */
- if (desc->fip_dtype < FIP_DT_VENDOR_BASE)
+ if (desc->fip_dtype < FIP_DT_NON_CRITICAL)
return -EINVAL;
break;
}
@@ -1232,7 +1235,7 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x "
"in FIP adv\n", desc->fip_dtype);
/* standard says ignore unknown descriptors >= 128 */
- if (desc->fip_dtype < FIP_DT_VENDOR_BASE)
+ if (desc->fip_dtype < FIP_DT_NON_CRITICAL)
goto drop;
if (desc_cnt <= 2) {
LIBFCOE_FIP_DBG(fip, "FIP descriptors "
@@ -1410,7 +1413,7 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
break;
default:
/* standard says ignore unknown descriptors >= 128 */
- if (desc->fip_dtype < FIP_DT_VENDOR_BASE)
+ if (desc->fip_dtype < FIP_DT_NON_CRITICAL)
goto err;
break;
}
@@ -1513,6 +1516,7 @@ static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb)
struct fip_header *fiph;
struct ethhdr *eh;
enum fip_state state;
+ bool fip_vlan_resp = false;
u16 op;
u8 sub;
@@ -1546,11 +1550,17 @@ static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb)
state = FIP_ST_ENABLED;
LIBFCOE_FIP_DBG(fip, "Using FIP mode\n");
}
+ fip_vlan_resp = fip->fip_resp;
mutex_unlock(&fip->ctlr_mutex);
if (fip->mode == FIP_MODE_VN2VN && op == FIP_OP_VN2VN)
return fcoe_ctlr_vn_recv(fip, skb);
+ if (fip_vlan_resp && op == FIP_OP_VLAN) {
+ LIBFCOE_FIP_DBG(fip, "fip vlan discovery\n");
+ return fcoe_ctlr_vlan_recv(fip, skb);
+ }
+
if (state != FIP_ST_ENABLED && state != FIP_ST_VNMP_UP &&
state != FIP_ST_VNMP_CLAIM)
goto drop;
@@ -1989,7 +1999,7 @@ static void fcoe_ctlr_vn_send(struct fcoe_ctlr *fip,
const u8 *dest, size_t min_len)
{
struct sk_buff *skb;
- struct fip_frame {
+ struct fip_vn2vn_probe_frame {
struct ethhdr eth;
struct fip_header fip;
struct fip_mac_desc mac;
@@ -2016,7 +2026,7 @@ static void fcoe_ctlr_vn_send(struct fcoe_ctlr *fip,
if (!skb)
return;
- frame = (struct fip_frame *)skb->data;
+ frame = (struct fip_vn2vn_probe_frame *)skb->data;
memset(frame, 0, len);
memcpy(frame->eth.h_dest, dest, ETH_ALEN);
@@ -2338,7 +2348,7 @@ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x "
"in FIP probe\n", dtype);
/* standard says ignore unknown descriptors >= 128 */
- if (dtype < FIP_DT_VENDOR_BASE)
+ if (dtype < FIP_DT_NON_CRITICAL)
return -EINVAL;
break;
}
@@ -2496,14 +2506,13 @@ static int fcoe_ctlr_vn_lookup(struct fcoe_ctlr *fip, u32 port_id, u8 *mac)
struct fcoe_rport *frport;
int ret = -1;
- rcu_read_lock();
rdata = lport->tt.rport_lookup(lport, port_id);
if (rdata) {
frport = fcoe_ctlr_rport(rdata);
memcpy(mac, frport->enode_mac, ETH_ALEN);
ret = 0;
+ kref_put(&rdata->kref, lport->tt.rport_destroy);
}
- rcu_read_unlock();
return ret;
}
@@ -2585,11 +2594,7 @@ static void fcoe_ctlr_vn_beacon(struct fcoe_ctlr *fip,
fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
return;
}
- mutex_lock(&lport->disc.disc_mutex);
rdata = lport->tt.rport_lookup(lport, new->ids.port_id);
- if (rdata)
- kref_get(&rdata->kref);
- mutex_unlock(&lport->disc.disc_mutex);
if (rdata) {
if (rdata->ids.node_name == new->ids.node_name &&
rdata->ids.port_name == new->ids.port_name) {
@@ -2709,6 +2714,220 @@ drop:
}
/**
+ * fcoe_ctlr_vlan_parse - parse vlan discovery request or response
+ * @fip: The FCoE controller
+ * @skb: incoming packet
+ * @rdata: buffer for resulting parsed VLAN entry plus fcoe_rport
+ *
+ * Returns non-zero error number on error.
+ * Does not consume the packet.
+ */
+static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip,
+ struct sk_buff *skb,
+ struct fc_rport_priv *rdata)
+{
+ struct fip_header *fiph;
+ struct fip_desc *desc = NULL;
+ struct fip_mac_desc *macd = NULL;
+ struct fip_wwn_desc *wwn = NULL;
+ struct fcoe_rport *frport;
+ size_t rlen;
+ size_t dlen;
+ u32 desc_mask = 0;
+ u32 dtype;
+ u8 sub;
+
+ memset(rdata, 0, sizeof(*rdata) + sizeof(*frport));
+ frport = fcoe_ctlr_rport(rdata);
+
+ fiph = (struct fip_header *)skb->data;
+ frport->flags = ntohs(fiph->fip_flags);
+
+ sub = fiph->fip_subcode;
+ switch (sub) {
+ case FIP_SC_VL_REQ:
+ desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME);
+ break;
+ default:
+ LIBFCOE_FIP_DBG(fip, "vn_parse unknown subcode %u\n", sub);
+ return -EINVAL;
+ }
+
+ rlen = ntohs(fiph->fip_dl_len) * 4;
+ if (rlen + sizeof(*fiph) > skb->len)
+ return -EINVAL;
+
+ desc = (struct fip_desc *)(fiph + 1);
+ while (rlen > 0) {
+ dlen = desc->fip_dlen * FIP_BPW;
+ if (dlen < sizeof(*desc) || dlen > rlen)
+ return -EINVAL;
+
+ dtype = desc->fip_dtype;
+ if (dtype < 32) {
+ if (!(desc_mask & BIT(dtype))) {
+ LIBFCOE_FIP_DBG(fip,
+ "unexpected or duplicated desc "
+ "desc type %u in "
+ "FIP VN2VN subtype %u\n",
+ dtype, sub);
+ return -EINVAL;
+ }
+ desc_mask &= ~BIT(dtype);
+ }
+
+ switch (dtype) {
+ case FIP_DT_MAC:
+ if (dlen != sizeof(struct fip_mac_desc))
+ goto len_err;
+ macd = (struct fip_mac_desc *)desc;
+ if (!is_valid_ether_addr(macd->fd_mac)) {
+ LIBFCOE_FIP_DBG(fip,
+ "Invalid MAC addr %pM in FIP VN2VN\n",
+ macd->fd_mac);
+ return -EINVAL;
+ }
+ memcpy(frport->enode_mac, macd->fd_mac, ETH_ALEN);
+ break;
+ case FIP_DT_NAME:
+ if (dlen != sizeof(struct fip_wwn_desc))
+ goto len_err;
+ wwn = (struct fip_wwn_desc *)desc;
+ rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn);
+ break;
+ default:
+ LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x "
+ "in FIP probe\n", dtype);
+ /* standard says ignore unknown descriptors >= 128 */
+ if (dtype < FIP_DT_NON_CRITICAL)
+ return -EINVAL;
+ break;
+ }
+ desc = (struct fip_desc *)((char *)desc + dlen);
+ rlen -= dlen;
+ }
+ return 0;
+
+len_err:
+ LIBFCOE_FIP_DBG(fip, "FIP length error in descriptor type %x len %zu\n",
+ dtype, dlen);
+ return -EINVAL;
+}
+
+/**
+ * fcoe_ctlr_vlan_send() - Send a FIP VLAN Notification
+ * @fip: The FCoE controller
+ * @sub: sub-opcode for vlan notification or vn2vn vlan notification
+ * @dest: The destination Ethernet MAC address
+ * @min_len: minimum size of the Ethernet payload to be sent
+ */
+static void fcoe_ctlr_vlan_send(struct fcoe_ctlr *fip,
+ enum fip_vlan_subcode sub,
+ const u8 *dest)
+{
+ struct sk_buff *skb;
+ struct fip_vlan_notify_frame {
+ struct ethhdr eth;
+ struct fip_header fip;
+ struct fip_mac_desc mac;
+ struct fip_vlan_desc vlan;
+ } __packed * frame;
+ size_t len;
+ size_t dlen;
+
+ len = sizeof(*frame);
+ dlen = sizeof(frame->mac) + sizeof(frame->vlan);
+ len = max(len, sizeof(struct ethhdr));
+
+ skb = dev_alloc_skb(len);
+ if (!skb)
+ return;
+
+ LIBFCOE_FIP_DBG(fip, "fip %s vlan notification, vlan %d\n",
+ fip->mode == FIP_MODE_VN2VN ? "vn2vn" : "fcf",
+ fip->lp->vlan);
+
+ frame = (struct fip_vlan_notify_frame *)skb->data;
+ memset(frame, 0, len);
+ memcpy(frame->eth.h_dest, dest, ETH_ALEN);
+
+ memcpy(frame->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
+ frame->eth.h_proto = htons(ETH_P_FIP);
+
+ frame->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
+ frame->fip.fip_op = htons(FIP_OP_VLAN);
+ frame->fip.fip_subcode = sub;
+ frame->fip.fip_dl_len = htons(dlen / FIP_BPW);
+
+ frame->mac.fd_desc.fip_dtype = FIP_DT_MAC;
+ frame->mac.fd_desc.fip_dlen = sizeof(frame->mac) / FIP_BPW;
+ memcpy(frame->mac.fd_mac, fip->ctl_src_addr, ETH_ALEN);
+
+ frame->vlan.fd_desc.fip_dtype = FIP_DT_VLAN;
+ frame->vlan.fd_desc.fip_dlen = sizeof(frame->vlan) / FIP_BPW;
+ put_unaligned_be16(fip->lp->vlan, &frame->vlan.fd_vlan);
+
+ skb_put(skb, len);
+ skb->protocol = htons(ETH_P_FIP);
+ skb->priority = fip->priority;
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+
+ fip->send(fip, skb);
+}
+
+/**
+ * fcoe_ctlr_vlan_disk_reply() - send FIP VLAN Discovery Notification.
+ * @fip: The FCoE controller
+ *
+ * Called with ctlr_mutex held.
+ */
+static void fcoe_ctlr_vlan_disc_reply(struct fcoe_ctlr *fip,
+ struct fc_rport_priv *rdata)
+{
+ struct fcoe_rport *frport = fcoe_ctlr_rport(rdata);
+ enum fip_vlan_subcode sub = FIP_SC_VL_NOTE;
+
+ if (fip->mode == FIP_MODE_VN2VN)
+ sub = FIP_SC_VL_VN2VN_NOTE;
+
+ fcoe_ctlr_vlan_send(fip, sub, frport->enode_mac);
+}
+
+/**
+ * fcoe_ctlr_vlan_recv - vlan request receive handler for VN2VN mode.
+ * @lport: The local port
+ * @fp: The received frame
+ *
+ */
+static int fcoe_ctlr_vlan_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
+{
+ struct fip_header *fiph;
+ enum fip_vlan_subcode sub;
+ struct {
+ struct fc_rport_priv rdata;
+ struct fcoe_rport frport;
+ } buf;
+ int rc;
+
+ fiph = (struct fip_header *)skb->data;
+ sub = fiph->fip_subcode;
+ rc = fcoe_ctlr_vlan_parse(fip, skb, &buf.rdata);
+ if (rc) {
+ LIBFCOE_FIP_DBG(fip, "vlan_recv vlan_parse error %d\n", rc);
+ goto drop;
+ }
+ mutex_lock(&fip->ctlr_mutex);
+ if (sub == FIP_SC_VL_REQ)
+ fcoe_ctlr_vlan_disc_reply(fip, &buf.rdata);
+ mutex_unlock(&fip->ctlr_mutex);
+
+drop:
+ kfree_skb(skb);
+ return rc;
+}
+
+/**
* fcoe_ctlr_disc_recv - discovery receive handler for VN2VN mode.
* @lport: The local port
* @fp: The received frame
@@ -2869,7 +3088,7 @@ unlock:
* when nothing is happening.
*/
static void fcoe_ctlr_mode_set(struct fc_lport *lport, struct fcoe_ctlr *fip,
- enum fip_state fip_mode)
+ enum fip_mode fip_mode)
{
void *priv;
diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
index 045c4e11e..0675fd128 100644
--- a/drivers/scsi/fcoe/fcoe_sysfs.c
+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
@@ -385,6 +385,44 @@ static FCOE_DEVICE_ATTR(ctlr, enabled, S_IRUGO | S_IWUSR,
show_ctlr_enabled_state,
store_ctlr_enabled);
+static ssize_t store_ctlr_fip_resp(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+ struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr);
+
+ mutex_lock(&fip->ctlr_mutex);
+ if ((buf[1] == '\0') || ((buf[1] == '\n') && (buf[2] == '\0'))) {
+ if (buf[0] == '1') {
+ fip->fip_resp = 1;
+ mutex_unlock(&fip->ctlr_mutex);
+ return count;
+ }
+ if (buf[0] == '0') {
+ fip->fip_resp = 0;
+ mutex_unlock(&fip->ctlr_mutex);
+ return count;
+ }
+ }
+ mutex_unlock(&fip->ctlr_mutex);
+ return -EINVAL;
+}
+
+static ssize_t show_ctlr_fip_resp(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+ struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr);
+
+ return sprintf(buf, "%d\n", fip->fip_resp ? 1 : 0);
+}
+
+static FCOE_DEVICE_ATTR(ctlr, fip_vlan_responder, S_IRUGO | S_IWUSR,
+ show_ctlr_fip_resp,
+ store_ctlr_fip_resp);
+
static ssize_t
store_private_fcoe_ctlr_fcf_dev_loss_tmo(struct device *dev,
struct device_attribute *attr,
@@ -467,6 +505,7 @@ static struct attribute_group fcoe_ctlr_lesb_attr_group = {
};
static struct attribute *fcoe_ctlr_attrs[] = {
+ &device_attr_fcoe_ctlr_fip_vlan_responder.attr,
&device_attr_fcoe_ctlr_fcf_dev_loss_tmo.attr,
&device_attr_fcoe_ctlr_enabled.attr,
&device_attr_fcoe_ctlr_mode.attr,
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index 641c60e8f..7028dd37e 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -133,10 +133,10 @@ int fcoe_link_speed_update(struct fc_lport *lport)
case SPEED_10000:
lport->link_speed = FC_PORTSPEED_10GBIT;
break;
- case 20000:
+ case SPEED_20000:
lport->link_speed = FC_PORTSPEED_20GBIT;
break;
- case 40000:
+ case SPEED_40000:
lport->link_speed = FC_PORTSPEED_40GBIT;
break;
default:
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 67669a9e7..3b7da66e2 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -359,7 +359,7 @@ static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
vlan->fip.fip_op = htons(FIP_OP_VLAN);
- vlan->fip.fip_subcode = FIP_SC_VL_REQ;
+ vlan->fip.fip_subcode = FIP_SC_VL_NOTE;
vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW);
vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC;
@@ -551,7 +551,7 @@ static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb)
goto drop;
/* pass it on to fcoe */
ret = 1;
- } else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_REP) {
+ } else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_NOTE) {
/* set the vlan as used */
fnic_fcoe_process_vlan_resp(fnic, skb);
ret = 0;
@@ -954,8 +954,8 @@ int fnic_alloc_rq_frame(struct vnic_rq *rq)
skb_put(skb, len);
pa = pci_map_single(fnic->pdev, skb->data, len, PCI_DMA_FROMDEVICE);
- r = pci_dma_mapping_error(fnic->pdev, pa);
- if (r) {
+ if (pci_dma_mapping_error(fnic->pdev, pa)) {
+ r = -ENOMEM;
printk(KERN_ERR "PCI mapping failed with error %d\n", r);
goto free_skb;
}
@@ -1093,8 +1093,8 @@ static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE);
- ret = pci_dma_mapping_error(fnic->pdev, pa);
- if (ret) {
+ if (pci_dma_mapping_error(fnic->pdev, pa)) {
+ ret = -ENOMEM;
printk(KERN_ERR "DMA map failed with error %d\n", ret);
goto free_skb_on_err;
}
@@ -1308,7 +1308,7 @@ void fnic_handle_fip_timer(struct fnic *fnic)
}
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- if (fnic->ctlr.mode == FIP_ST_NON_FIP)
+ if (fnic->ctlr.mode == FIP_MODE_NON_FIP)
return;
spin_lock_irqsave(&fnic->vlans_lock, flags);
diff --git a/drivers/scsi/fnic/fnic_fip.h b/drivers/scsi/fnic/fnic_fip.h
index 87e74c2ab..7761f33ab 100644
--- a/drivers/scsi/fnic/fnic_fip.h
+++ b/drivers/scsi/fnic/fnic_fip.h
@@ -26,14 +26,6 @@
#define FINC_MAX_FLOGI_REJECTS 8
-/*
- * FIP_DT_VLAN descriptor.
- */
-struct fip_vlan_desc {
- struct fip_desc fd_desc;
- __be16 fd_vlan;
-} __attribute__((packed));
-
struct vlan {
__be16 vid;
__be16 type;
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index d7cab724f..4731d3241 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -23,7 +23,7 @@
#include <scsi/sas_ata.h>
#include <scsi/libsas.h>
-#define DRV_VERSION "v1.4"
+#define DRV_VERSION "v1.5"
#define HISI_SAS_MAX_PHYS 9
#define HISI_SAS_MAX_QUEUES 32
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index bd20c5488..f96560431 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -721,30 +721,41 @@ static int reset_hw_v2_hw(struct hisi_hba *hisi_hba)
return -EIO;
}
- /* reset and disable clock*/
- regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg,
- reset_val);
- regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg + 4,
- reset_val);
- msleep(1);
- regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, &val);
- if (reset_val != (val & reset_val)) {
- dev_err(dev, "SAS reset fail.\n");
- return -EIO;
- }
+ if (ACPI_HANDLE(dev)) {
+ acpi_status s;
- /* De-reset and enable clock*/
- regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg + 4,
- reset_val);
- regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg,
- reset_val);
- msleep(1);
- regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg,
- &val);
- if (val & reset_val) {
- dev_err(dev, "SAS de-reset fail.\n");
- return -EIO;
- }
+ s = acpi_evaluate_object(ACPI_HANDLE(dev), "_RST", NULL, NULL);
+ if (ACPI_FAILURE(s)) {
+ dev_err(dev, "Reset failed\n");
+ return -EIO;
+ }
+ } else if (hisi_hba->ctrl) {
+ /* reset and disable clock*/
+ regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg,
+ reset_val);
+ regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg + 4,
+ reset_val);
+ msleep(1);
+ regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, &val);
+ if (reset_val != (val & reset_val)) {
+ dev_err(dev, "SAS reset fail.\n");
+ return -EIO;
+ }
+
+ /* De-reset and enable clock*/
+ regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg + 4,
+ reset_val);
+ regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg,
+ reset_val);
+ msleep(1);
+ regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg,
+ &val);
+ if (val & reset_val) {
+ dev_err(dev, "SAS de-reset fail.\n");
+ return -EIO;
+ }
+ } else
+ dev_warn(dev, "no reset method\n");
return 0;
}
@@ -752,13 +763,12 @@ static int reset_hw_v2_hw(struct hisi_hba *hisi_hba)
static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
{
struct device *dev = &hisi_hba->pdev->dev;
- struct device_node *np = dev->of_node;
int i;
/* Global registers init */
/* Deal with am-max-transmissions quirk */
- if (of_get_property(np, "hip06-sas-v2-quirk-amt", NULL)) {
+ if (device_property_present(dev, "hip06-sas-v2-quirk-amt")) {
hisi_sas_write32(hisi_hba, AM_CFG_MAX_TRANS, 0x2020);
hisi_sas_write32(hisi_hba, AM_CFG_SINGLE_PORT_MAX_TRANS,
0x2020);
@@ -1902,14 +1912,9 @@ static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
struct sas_ha_struct *sas_ha = &hisi_hba->sha;
- unsigned long flags;
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
-
- spin_lock_irqsave(&hisi_hba->lock, flags);
sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
-
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
CHL_INT0_SL_RX_BCST_ACK_MSK);
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
@@ -2260,12 +2265,20 @@ static const struct of_device_id sas_v2_of_match[] = {
};
MODULE_DEVICE_TABLE(of, sas_v2_of_match);
+static const struct acpi_device_id sas_v2_acpi_match[] = {
+ { "HISI0162", 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(acpi, sas_v2_acpi_match);
+
static struct platform_driver hisi_sas_v2_driver = {
.probe = hisi_sas_v2_probe,
.remove = hisi_sas_v2_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = sas_v2_of_match,
+ .acpi_match_table = ACPI_PTR(sas_v2_acpi_match),
},
};
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 1547bd93c..ec6381e57 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -486,7 +486,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
else
shost->dma_boundary = 0xffffffff;
- shost->use_blk_mq = scsi_use_blk_mq && !shost->hostt->disable_blk_mq;
+ shost->use_blk_mq = scsi_use_blk_mq;
device_initialize(&shost->shost_gendev);
dev_set_name(&shost->shost_gendev, "host%d", shost->host_no);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index ff8dcd5b0..030d0023e 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -4105,6 +4105,70 @@ static int hpsa_set_local_logical_count(struct ctlr_info *h,
return rc;
}
+static bool hpsa_is_disk_spare(struct ctlr_info *h, u8 *lunaddrbytes)
+{
+ struct bmic_identify_physical_device *id_phys;
+ bool is_spare = false;
+ int rc;
+
+ id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
+ if (!id_phys)
+ return false;
+
+ rc = hpsa_bmic_id_physical_device(h,
+ lunaddrbytes,
+ GET_BMIC_DRIVE_NUMBER(lunaddrbytes),
+ id_phys, sizeof(*id_phys));
+ if (rc == 0)
+ is_spare = (id_phys->more_flags >> 6) & 0x01;
+
+ kfree(id_phys);
+ return is_spare;
+}
+
+#define RPL_DEV_FLAG_NON_DISK 0x1
+#define RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED 0x2
+#define RPL_DEV_FLAG_UNCONFIG_DISK 0x4
+
+#define BMIC_DEVICE_TYPE_ENCLOSURE 6
+
+static bool hpsa_skip_device(struct ctlr_info *h, u8 *lunaddrbytes,
+ struct ext_report_lun_entry *rle)
+{
+ u8 device_flags;
+ u8 device_type;
+
+ if (!MASKED_DEVICE(lunaddrbytes))
+ return false;
+
+ device_flags = rle->device_flags;
+ device_type = rle->device_type;
+
+ if (device_flags & RPL_DEV_FLAG_NON_DISK) {
+ if (device_type == BMIC_DEVICE_TYPE_ENCLOSURE)
+ return false;
+ return true;
+ }
+
+ if (!(device_flags & RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED))
+ return false;
+
+ if (device_flags & RPL_DEV_FLAG_UNCONFIG_DISK)
+ return false;
+
+ /*
+ * Spares may be spun down, we do not want to
+ * do an Inquiry to a RAID set spare drive as
+ * that would have them spun up, that is a
+ * performance hit because I/O to the RAID device
+ * stops while the spin up occurs which can take
+ * over 50 seconds.
+ */
+ if (hpsa_is_disk_spare(h, lunaddrbytes))
+ return true;
+
+ return false;
+}
static void hpsa_update_scsi_devices(struct ctlr_info *h)
{
@@ -4198,6 +4262,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
u8 *lunaddrbytes, is_OBDR = 0;
int rc = 0;
int phys_dev_index = i - (raid_ctlr_position == 0);
+ bool skip_device = false;
physical_device = i < nphysicals + (raid_ctlr_position == 0);
@@ -4205,11 +4270,15 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
i, nphysicals, nlogicals, physdev_list, logdev_list);
- /* skip masked non-disk devices */
- if (MASKED_DEVICE(lunaddrbytes) && physical_device &&
- (physdev_list->LUN[phys_dev_index].device_type != 0x06) &&
- (physdev_list->LUN[phys_dev_index].device_flags & 0x01))
- continue;
+ /*
+ * Skip over some devices such as a spare.
+ */
+ if (!tmpdevice->external && physical_device) {
+ skip_device = hpsa_skip_device(h, lunaddrbytes,
+ &physdev_list->LUN[phys_dev_index]);
+ if (skip_device)
+ continue;
+ }
/* Get device type, vendor, model, device id */
rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
@@ -6455,7 +6524,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
}
rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
- DEFAULT_TIMEOUT);
+ NO_TIMEOUT);
if (iocommand.buf_size > 0)
hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
check_ioctl_unit_attention(h, c);
@@ -6588,7 +6657,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
}
status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
- DEFAULT_TIMEOUT);
+ NO_TIMEOUT);
if (sg_used)
hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
check_ioctl_unit_attention(h, c);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index fc523c3e5..ab67ec4b6 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -4722,6 +4722,8 @@ static void ibmvfc_rport_add_thread(struct work_struct *work)
tgt_dbg(tgt, "Setting rport roles\n");
fc_remote_port_rolechg(rport, tgt->ids.roles);
put_device(&rport->dev);
+ } else {
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
}
kref_put(&tgt->kref, ibmvfc_release_tgt);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 8fae03215..5c70a52ad 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -26,7 +26,7 @@
#include <linux/list.h>
#include <linux/types.h>
-#include "viosrp.h"
+#include <scsi/viosrp.h>
#define IBMVFC_NAME "ibmvfc"
#define IBMVFC_DRIVER_VERSION "1.0.11"
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h
index 106736739..e0f6c3aeb 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.h
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.h
@@ -33,7 +33,7 @@
#include <linux/list.h>
#include <linux/completion.h>
#include <linux/interrupt.h>
-#include "viosrp.h"
+#include <scsi/viosrp.h>
struct scsi_cmnd;
struct Scsi_Host;
diff --git a/drivers/scsi/ibmvscsi_tgt/Makefile b/drivers/scsi/ibmvscsi_tgt/Makefile
new file mode 100644
index 000000000..0c060ce64
--- /dev/null
+++ b/drivers/scsi/ibmvscsi_tgt/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsis.o
+
+ibmvscsis-y := libsrp.o ibmvscsi_tgt.o
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
new file mode 100644
index 000000000..b29fef9d0
--- /dev/null
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -0,0 +1,4087 @@
+/*******************************************************************************
+ * IBM Virtual SCSI Target Driver
+ * Copyright (C) 2003-2005 Dave Boutcher (boutcher@us.ibm.com) IBM Corp.
+ * Santiago Leon (santil@us.ibm.com) IBM Corp.
+ * Linda Xie (lxie@us.ibm.com) IBM Corp.
+ *
+ * Copyright (C) 2005-2011 FUJITA Tomonori <tomof@acm.org>
+ * Copyright (C) 2010 Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * Authors: Bryant G. Ly <bryantly@linux.vnet.ibm.com>
+ * Authors: Michael Cyr <mikecyr@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ ****************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/string.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+
+#include <asm/hvcall.h>
+#include <asm/vio.h>
+
+#include <scsi/viosrp.h>
+
+#include "ibmvscsi_tgt.h"
+
+#define IBMVSCSIS_VERSION "v0.2"
+
+#define INITIAL_SRP_LIMIT 800
+#define DEFAULT_MAX_SECTORS 256
+
+static uint max_vdma_size = MAX_H_COPY_RDMA;
+
+static char system_id[SYS_ID_NAME_LEN] = "";
+static char partition_name[PARTITION_NAMELEN] = "UNKNOWN";
+static uint partition_number = -1;
+
+/* Adapter list and lock to control it */
+static DEFINE_SPINLOCK(ibmvscsis_dev_lock);
+static LIST_HEAD(ibmvscsis_dev_list);
+
+static long ibmvscsis_parse_command(struct scsi_info *vscsi,
+ struct viosrp_crq *crq);
+
+static void ibmvscsis_adapter_idle(struct scsi_info *vscsi);
+
+static void ibmvscsis_determine_resid(struct se_cmd *se_cmd,
+ struct srp_rsp *rsp)
+{
+ u32 residual_count = se_cmd->residual_count;
+
+ if (!residual_count)
+ return;
+
+ if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
+ if (se_cmd->data_direction == DMA_TO_DEVICE) {
+ /* residual data from an underflow write */
+ rsp->flags = SRP_RSP_FLAG_DOUNDER;
+ rsp->data_out_res_cnt = cpu_to_be32(residual_count);
+ } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
+ /* residual data from an underflow read */
+ rsp->flags = SRP_RSP_FLAG_DIUNDER;
+ rsp->data_in_res_cnt = cpu_to_be32(residual_count);
+ }
+ } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+ if (se_cmd->data_direction == DMA_TO_DEVICE) {
+ /* residual data from an overflow write */
+ rsp->flags = SRP_RSP_FLAG_DOOVER;
+ rsp->data_out_res_cnt = cpu_to_be32(residual_count);
+ } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
+ /* residual data from an overflow read */
+ rsp->flags = SRP_RSP_FLAG_DIOVER;
+ rsp->data_in_res_cnt = cpu_to_be32(residual_count);
+ }
+ }
+}
+
+/**
+ * connection_broken() - Determine if the connection to the client is good
+ * @vscsi: Pointer to our adapter structure
+ *
+ * This function attempts to send a ping MAD to the client. If the call to
+ * queue the request returns H_CLOSED then the connection has been broken
+ * and the function returns TRUE.
+ *
+ * EXECUTION ENVIRONMENT:
+ * Interrupt or Process environment
+ */
+static bool connection_broken(struct scsi_info *vscsi)
+{
+ struct viosrp_crq *crq;
+ u64 buffer[2] = { 0, 0 };
+ long h_return_code;
+ bool rc = false;
+
+ /* create a PING crq */
+ crq = (struct viosrp_crq *)&buffer;
+ crq->valid = VALID_CMD_RESP_EL;
+ crq->format = MESSAGE_IN_CRQ;
+ crq->status = PING;
+
+ h_return_code = h_send_crq(vscsi->dds.unit_id,
+ cpu_to_be64(buffer[MSG_HI]),
+ cpu_to_be64(buffer[MSG_LOW]));
+
+ pr_debug("connection_broken: rc %ld\n", h_return_code);
+
+ if (h_return_code == H_CLOSED)
+ rc = true;
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_unregister_command_q() - Helper Function-Unregister Command Queue
+ * @vscsi: Pointer to our adapter structure
+ *
+ * This function calls h_free_q then frees the interrupt bit etc.
+ * It must release the lock before doing so because of the time it can take
+ * for h_free_crq in PHYP
+ * NOTE: the caller must make sure that state and or flags will prevent
+ * interrupt handler from scheduling work.
+ * NOTE: anyone calling this function may need to set the CRQ_CLOSED flag
+ * we can't do it here, because we don't have the lock
+ *
+ * EXECUTION ENVIRONMENT:
+ * Process level
+ */
+static long ibmvscsis_unregister_command_q(struct scsi_info *vscsi)
+{
+ long qrc;
+ long rc = ADAPT_SUCCESS;
+ int ticks = 0;
+
+ do {
+ qrc = h_free_crq(vscsi->dds.unit_id);
+ switch (qrc) {
+ case H_SUCCESS:
+ break;
+
+ case H_HARDWARE:
+ case H_PARAMETER:
+ dev_err(&vscsi->dev, "unregister_command_q: error from h_free_crq %ld\n",
+ qrc);
+ rc = ERROR;
+ break;
+
+ case H_BUSY:
+ case H_LONG_BUSY_ORDER_1_MSEC:
+ /* msleep not good for small values */
+ usleep_range(1000, 2000);
+ ticks += 1;
+ break;
+ case H_LONG_BUSY_ORDER_10_MSEC:
+ usleep_range(10000, 20000);
+ ticks += 10;
+ break;
+ case H_LONG_BUSY_ORDER_100_MSEC:
+ msleep(100);
+ ticks += 100;
+ break;
+ case H_LONG_BUSY_ORDER_1_SEC:
+ ssleep(1);
+ ticks += 1000;
+ break;
+ case H_LONG_BUSY_ORDER_10_SEC:
+ ssleep(10);
+ ticks += 10000;
+ break;
+ case H_LONG_BUSY_ORDER_100_SEC:
+ ssleep(100);
+ ticks += 100000;
+ break;
+ default:
+ dev_err(&vscsi->dev, "unregister_command_q: unknown error %ld from h_free_crq\n",
+ qrc);
+ rc = ERROR;
+ break;
+ }
+
+ /*
+ * dont wait more then 300 seconds
+ * ticks are in milliseconds more or less
+ */
+ if (ticks > 300000 && qrc != H_SUCCESS) {
+ rc = ERROR;
+ dev_err(&vscsi->dev, "Excessive wait for h_free_crq\n");
+ }
+ } while (qrc != H_SUCCESS && rc == ADAPT_SUCCESS);
+
+ pr_debug("Freeing CRQ: phyp rc %ld, rc %ld\n", qrc, rc);
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_delete_client_info() - Helper function to Delete Client Info
+ * @vscsi: Pointer to our adapter structure
+ * @client_closed: True if client closed its queue
+ *
+ * Deletes information specific to the client when the client goes away
+ *
+ * EXECUTION ENVIRONMENT:
+ * Interrupt or Process
+ */
+static void ibmvscsis_delete_client_info(struct scsi_info *vscsi,
+ bool client_closed)
+{
+ vscsi->client_cap = 0;
+
+ /*
+ * Some things we don't want to clear if we're closing the queue,
+ * because some clients don't resend the host handshake when they
+ * get a transport event.
+ */
+ if (client_closed)
+ vscsi->client_data.os_type = 0;
+}
+
+/**
+ * ibmvscsis_free_command_q() - Free Command Queue
+ * @vscsi: Pointer to our adapter structure
+ *
+ * This function calls unregister_command_q, then clears interrupts and
+ * any pending interrupt acknowledgments associated with the command q.
+ * It also clears memory if there is no error.
+ *
+ * PHYP did not meet the PAPR architecture so that we must give up the
+ * lock. This causes a timing hole regarding state change. To close the
+ * hole this routine does accounting on any change that occurred during
+ * the time the lock is not held.
+ * NOTE: must give up and then acquire the interrupt lock, the caller must
+ * make sure that state and or flags will prevent interrupt handler from
+ * scheduling work.
+ *
+ * EXECUTION ENVIRONMENT:
+ * Process level, interrupt lock is held
+ */
+static long ibmvscsis_free_command_q(struct scsi_info *vscsi)
+{
+ int bytes;
+ u32 flags_under_lock;
+ u16 state_under_lock;
+ long rc = ADAPT_SUCCESS;
+
+ if (!(vscsi->flags & CRQ_CLOSED)) {
+ vio_disable_interrupts(vscsi->dma_dev);
+
+ state_under_lock = vscsi->new_state;
+ flags_under_lock = vscsi->flags;
+ vscsi->phyp_acr_state = 0;
+ vscsi->phyp_acr_flags = 0;
+
+ spin_unlock_bh(&vscsi->intr_lock);
+ rc = ibmvscsis_unregister_command_q(vscsi);
+ spin_lock_bh(&vscsi->intr_lock);
+
+ if (state_under_lock != vscsi->new_state)
+ vscsi->phyp_acr_state = vscsi->new_state;
+
+ vscsi->phyp_acr_flags = ((~flags_under_lock) & vscsi->flags);
+
+ if (rc == ADAPT_SUCCESS) {
+ bytes = vscsi->cmd_q.size * PAGE_SIZE;
+ memset(vscsi->cmd_q.base_addr, 0, bytes);
+ vscsi->cmd_q.index = 0;
+ vscsi->flags |= CRQ_CLOSED;
+
+ ibmvscsis_delete_client_info(vscsi, false);
+ }
+
+ pr_debug("free_command_q: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
+ vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
+ vscsi->phyp_acr_state);
+ }
+ return rc;
+}
+
+/**
+ * ibmvscsis_cmd_q_dequeue() - Get valid Command element
+ * @mask: Mask to use in case index wraps
+ * @current_index: Current index into command queue
+ * @base_addr: Pointer to start of command queue
+ *
+ * Returns a pointer to a valid command element or NULL, if the command
+ * queue is empty
+ *
+ * EXECUTION ENVIRONMENT:
+ * Interrupt environment, interrupt lock held
+ */
+static struct viosrp_crq *ibmvscsis_cmd_q_dequeue(uint mask,
+ uint *current_index,
+ struct viosrp_crq *base_addr)
+{
+ struct viosrp_crq *ptr;
+
+ ptr = base_addr + *current_index;
+
+ if (ptr->valid) {
+ *current_index = (*current_index + 1) & mask;
+ dma_rmb();
+ } else {
+ ptr = NULL;
+ }
+
+ return ptr;
+}
+
+/**
+ * ibmvscsis_send_init_message() - send initialize message to the client
+ * @vscsi: Pointer to our adapter structure
+ * @format: Which Init Message format to send
+ *
+ * EXECUTION ENVIRONMENT:
+ * Interrupt environment interrupt lock held
+ */
+static long ibmvscsis_send_init_message(struct scsi_info *vscsi, u8 format)
+{
+ struct viosrp_crq *crq;
+ u64 buffer[2] = { 0, 0 };
+ long rc;
+
+ crq = (struct viosrp_crq *)&buffer;
+ crq->valid = VALID_INIT_MSG;
+ crq->format = format;
+ rc = h_send_crq(vscsi->dds.unit_id, cpu_to_be64(buffer[MSG_HI]),
+ cpu_to_be64(buffer[MSG_LOW]));
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_check_init_msg() - Check init message valid
+ * @vscsi: Pointer to our adapter structure
+ * @format: Pointer to return format of Init Message, if any.
+ * Set to UNUSED_FORMAT if no Init Message in queue.
+ *
+ * Checks if an initialize message was queued by the initiatior
+ * after the queue was created and before the interrupt was enabled.
+ *
+ * EXECUTION ENVIRONMENT:
+ * Process level only, interrupt lock held
+ */
+static long ibmvscsis_check_init_msg(struct scsi_info *vscsi, uint *format)
+{
+ struct viosrp_crq *crq;
+ long rc = ADAPT_SUCCESS;
+
+ crq = ibmvscsis_cmd_q_dequeue(vscsi->cmd_q.mask, &vscsi->cmd_q.index,
+ vscsi->cmd_q.base_addr);
+ if (!crq) {
+ *format = (uint)UNUSED_FORMAT;
+ } else if (crq->valid == VALID_INIT_MSG && crq->format == INIT_MSG) {
+ *format = (uint)INIT_MSG;
+ crq->valid = INVALIDATE_CMD_RESP_EL;
+ dma_rmb();
+
+ /*
+ * the caller has ensured no initialize message was
+ * sent after the queue was
+ * created so there should be no other message on the queue.
+ */
+ crq = ibmvscsis_cmd_q_dequeue(vscsi->cmd_q.mask,
+ &vscsi->cmd_q.index,
+ vscsi->cmd_q.base_addr);
+ if (crq) {
+ *format = (uint)(crq->format);
+ rc = ERROR;
+ crq->valid = INVALIDATE_CMD_RESP_EL;
+ dma_rmb();
+ }
+ } else {
+ *format = (uint)(crq->format);
+ rc = ERROR;
+ crq->valid = INVALIDATE_CMD_RESP_EL;
+ dma_rmb();
+ }
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_establish_new_q() - Establish new CRQ queue
+ * @vscsi: Pointer to our adapter structure
+ * @new_state: New state being established after resetting the queue
+ *
+ * Must be called with interrupt lock held.
+ */
+static long ibmvscsis_establish_new_q(struct scsi_info *vscsi, uint new_state)
+{
+ long rc = ADAPT_SUCCESS;
+ uint format;
+
+ vscsi->flags &= PRESERVE_FLAG_FIELDS;
+ vscsi->rsp_q_timer.timer_pops = 0;
+ vscsi->debit = 0;
+ vscsi->credit = 0;
+
+ rc = vio_enable_interrupts(vscsi->dma_dev);
+ if (rc) {
+ pr_warn("reset_queue: failed to enable interrupts, rc %ld\n",
+ rc);
+ return rc;
+ }
+
+ rc = ibmvscsis_check_init_msg(vscsi, &format);
+ if (rc) {
+ dev_err(&vscsi->dev, "reset_queue: check_init_msg failed, rc %ld\n",
+ rc);
+ return rc;
+ }
+
+ if (format == UNUSED_FORMAT && new_state == WAIT_CONNECTION) {
+ rc = ibmvscsis_send_init_message(vscsi, INIT_MSG);
+ switch (rc) {
+ case H_SUCCESS:
+ case H_DROPPED:
+ case H_CLOSED:
+ rc = ADAPT_SUCCESS;
+ break;
+
+ case H_PARAMETER:
+ case H_HARDWARE:
+ break;
+
+ default:
+ vscsi->state = UNDEFINED;
+ rc = H_HARDWARE;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_reset_queue() - Reset CRQ Queue
+ * @vscsi: Pointer to our adapter structure
+ * @new_state: New state to establish after resetting the queue
+ *
+ * This function calls h_free_q and then calls h_reg_q and does all
+ * of the bookkeeping to get us back to where we can communicate.
+ *
+ * Actually, we don't always call h_free_crq. A problem was discovered
+ * where one partition would close and reopen his queue, which would
+ * cause his partner to get a transport event, which would cause him to
+ * close and reopen his queue, which would cause the original partition
+ * to get a transport event, etc., etc. To prevent this, we don't
+ * actually close our queue if the client initiated the reset, (i.e.
+ * either we got a transport event or we have detected that the client's
+ * queue is gone)
+ *
+ * EXECUTION ENVIRONMENT:
+ * Process environment, called with interrupt lock held
+ */
+static void ibmvscsis_reset_queue(struct scsi_info *vscsi, uint new_state)
+{
+ int bytes;
+ long rc = ADAPT_SUCCESS;
+
+ pr_debug("reset_queue: flags 0x%x\n", vscsi->flags);
+
+ /* don't reset, the client did it for us */
+ if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) {
+ vscsi->flags &= PRESERVE_FLAG_FIELDS;
+ vscsi->rsp_q_timer.timer_pops = 0;
+ vscsi->debit = 0;
+ vscsi->credit = 0;
+ vscsi->state = new_state;
+ vio_enable_interrupts(vscsi->dma_dev);
+ } else {
+ rc = ibmvscsis_free_command_q(vscsi);
+ if (rc == ADAPT_SUCCESS) {
+ vscsi->state = new_state;
+
+ bytes = vscsi->cmd_q.size * PAGE_SIZE;
+ rc = h_reg_crq(vscsi->dds.unit_id,
+ vscsi->cmd_q.crq_token, bytes);
+ if (rc == H_CLOSED || rc == H_SUCCESS) {
+ rc = ibmvscsis_establish_new_q(vscsi,
+ new_state);
+ }
+
+ if (rc != ADAPT_SUCCESS) {
+ pr_debug("reset_queue: reg_crq rc %ld\n", rc);
+
+ vscsi->state = ERR_DISCONNECTED;
+ vscsi->flags |= RESPONSE_Q_DOWN;
+ ibmvscsis_free_command_q(vscsi);
+ }
+ } else {
+ vscsi->state = ERR_DISCONNECTED;
+ vscsi->flags |= RESPONSE_Q_DOWN;
+ }
+ }
+}
+
+/**
+ * ibmvscsis_free_cmd_resources() - Free command resources
+ * @vscsi: Pointer to our adapter structure
+ * @cmd: Command which is not longer in use
+ *
+ * Must be called with interrupt lock held.
+ */
+static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi,
+ struct ibmvscsis_cmd *cmd)
+{
+ struct iu_entry *iue = cmd->iue;
+
+ switch (cmd->type) {
+ case TASK_MANAGEMENT:
+ case SCSI_CDB:
+ /*
+ * When the queue goes down this value is cleared, so it
+ * cannot be cleared in this general purpose function.
+ */
+ if (vscsi->debit)
+ vscsi->debit -= 1;
+ break;
+ case ADAPTER_MAD:
+ vscsi->flags &= ~PROCESSING_MAD;
+ break;
+ case UNSET_TYPE:
+ break;
+ default:
+ dev_err(&vscsi->dev, "free_cmd_resources unknown type %d\n",
+ cmd->type);
+ break;
+ }
+
+ cmd->iue = NULL;
+ list_add_tail(&cmd->list, &vscsi->free_cmd);
+ srp_iu_put(iue);
+
+ if (list_empty(&vscsi->active_q) && list_empty(&vscsi->schedule_q) &&
+ list_empty(&vscsi->waiting_rsp) && (vscsi->flags & WAIT_FOR_IDLE)) {
+ vscsi->flags &= ~WAIT_FOR_IDLE;
+ complete(&vscsi->wait_idle);
+ }
+}
+
+/**
+ * ibmvscsis_disconnect() - Helper function to disconnect
+ * @work: Pointer to work_struct, gives access to our adapter structure
+ *
+ * An error has occurred or the driver received a Transport event,
+ * and the driver is requesting that the command queue be de-registered
+ * in a safe manner. If there is no outstanding I/O then we can stop the
+ * queue. If we are restarting the queue it will be reflected in the
+ * the state of the adapter.
+ *
+ * EXECUTION ENVIRONMENT:
+ * Process environment
+ */
+static void ibmvscsis_disconnect(struct work_struct *work)
+{
+ struct scsi_info *vscsi = container_of(work, struct scsi_info,
+ proc_work);
+ u16 new_state;
+ bool wait_idle = false;
+ long rc = ADAPT_SUCCESS;
+
+ spin_lock_bh(&vscsi->intr_lock);
+ new_state = vscsi->new_state;
+ vscsi->new_state = 0;
+
+ pr_debug("disconnect: flags 0x%x, state 0x%hx\n", vscsi->flags,
+ vscsi->state);
+
+ /*
+ * check which state we are in and see if we
+ * should transitition to the new state
+ */
+ switch (vscsi->state) {
+ /* Should never be called while in this state. */
+ case NO_QUEUE:
+ /*
+ * Can never transition from this state;
+ * igonore errors and logout.
+ */
+ case UNCONFIGURING:
+ break;
+
+ /* can transition from this state to UNCONFIGURING */
+ case ERR_DISCONNECT:
+ if (new_state == UNCONFIGURING)
+ vscsi->state = new_state;
+ break;
+
+ /*
+ * Can transition from this state to to unconfiguring
+ * or err disconnect.
+ */
+ case ERR_DISCONNECT_RECONNECT:
+ switch (new_state) {
+ case UNCONFIGURING:
+ case ERR_DISCONNECT:
+ vscsi->state = new_state;
+ break;
+
+ case WAIT_IDLE:
+ break;
+ default:
+ break;
+ }
+ break;
+
+ /* can transition from this state to UNCONFIGURING */
+ case ERR_DISCONNECTED:
+ if (new_state == UNCONFIGURING)
+ vscsi->state = new_state;
+ break;
+
+ /*
+ * If this is a transition into an error state.
+ * a client is attempting to establish a connection
+ * and has violated the RPA protocol.
+ * There can be nothing pending on the adapter although
+ * there can be requests in the command queue.
+ */
+ case WAIT_ENABLED:
+ case PART_UP_WAIT_ENAB:
+ switch (new_state) {
+ case ERR_DISCONNECT:
+ vscsi->flags |= RESPONSE_Q_DOWN;
+ vscsi->state = new_state;
+ vscsi->flags &= ~(SCHEDULE_DISCONNECT |
+ DISCONNECT_SCHEDULED);
+ ibmvscsis_free_command_q(vscsi);
+ break;
+ case ERR_DISCONNECT_RECONNECT:
+ ibmvscsis_reset_queue(vscsi, WAIT_ENABLED);
+ break;
+
+ /* should never happen */
+ case WAIT_IDLE:
+ rc = ERROR;
+ dev_err(&vscsi->dev, "disconnect: invalid state %d for WAIT_IDLE\n",
+ vscsi->state);
+ break;
+ }
+ break;
+
+ case WAIT_IDLE:
+ switch (new_state) {
+ case ERR_DISCONNECT:
+ case ERR_DISCONNECT_RECONNECT:
+ vscsi->state = new_state;
+ break;
+ }
+ break;
+
+ /*
+ * Initiator has not done a successful srp login
+ * or has done a successful srp logout ( adapter was not
+ * busy). In the first case there can be responses queued
+ * waiting for space on the initiators response queue (MAD)
+ * The second case the adapter is idle. Assume the worse case,
+ * i.e. the second case.
+ */
+ case WAIT_CONNECTION:
+ case CONNECTED:
+ case SRP_PROCESSING:
+ wait_idle = true;
+ vscsi->state = new_state;
+ break;
+
+ /* can transition from this state to UNCONFIGURING */
+ case UNDEFINED:
+ if (new_state == UNCONFIGURING)
+ vscsi->state = new_state;
+ break;
+ default:
+ break;
+ }
+
+ if (wait_idle) {
+ pr_debug("disconnect start wait, active %d, sched %d\n",
+ (int)list_empty(&vscsi->active_q),
+ (int)list_empty(&vscsi->schedule_q));
+ if (!list_empty(&vscsi->active_q) ||
+ !list_empty(&vscsi->schedule_q)) {
+ vscsi->flags |= WAIT_FOR_IDLE;
+ pr_debug("disconnect flags 0x%x\n", vscsi->flags);
+ /*
+ * This routine is can not be called with the interrupt
+ * lock held.
+ */
+ spin_unlock_bh(&vscsi->intr_lock);
+ wait_for_completion(&vscsi->wait_idle);
+ spin_lock_bh(&vscsi->intr_lock);
+ }
+ pr_debug("disconnect stop wait\n");
+
+ ibmvscsis_adapter_idle(vscsi);
+ }
+
+ spin_unlock_bh(&vscsi->intr_lock);
+}
+
+/**
+ * ibmvscsis_post_disconnect() - Schedule the disconnect
+ * @vscsi: Pointer to our adapter structure
+ * @new_state: State to move to after disconnecting
+ * @flag_bits: Flags to turn on in adapter structure
+ *
+ * If it's already been scheduled, then see if we need to "upgrade"
+ * the new state (if the one passed in is more "severe" than the
+ * previous one).
+ *
+ * PRECONDITION:
+ * interrupt lock is held
+ */
+static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state,
+ uint flag_bits)
+{
+ uint state;
+
+ /* check the validity of the new state */
+ switch (new_state) {
+ case UNCONFIGURING:
+ case ERR_DISCONNECT:
+ case ERR_DISCONNECT_RECONNECT:
+ case WAIT_IDLE:
+ break;
+
+ default:
+ dev_err(&vscsi->dev, "post_disconnect: Invalid new state %d\n",
+ new_state);
+ return;
+ }
+
+ vscsi->flags |= flag_bits;
+
+ pr_debug("post_disconnect: new_state 0x%x, flag_bits 0x%x, vscsi->flags 0x%x, state %hx\n",
+ new_state, flag_bits, vscsi->flags, vscsi->state);
+
+ if (!(vscsi->flags & (DISCONNECT_SCHEDULED | SCHEDULE_DISCONNECT))) {
+ vscsi->flags |= SCHEDULE_DISCONNECT;
+ vscsi->new_state = new_state;
+
+ INIT_WORK(&vscsi->proc_work, ibmvscsis_disconnect);
+ (void)queue_work(vscsi->work_q, &vscsi->proc_work);
+ } else {
+ if (vscsi->new_state)
+ state = vscsi->new_state;
+ else
+ state = vscsi->state;
+
+ switch (state) {
+ case NO_QUEUE:
+ case UNCONFIGURING:
+ break;
+
+ case ERR_DISCONNECTED:
+ case ERR_DISCONNECT:
+ case UNDEFINED:
+ if (new_state == UNCONFIGURING)
+ vscsi->new_state = new_state;
+ break;
+
+ case ERR_DISCONNECT_RECONNECT:
+ switch (new_state) {
+ case UNCONFIGURING:
+ case ERR_DISCONNECT:
+ vscsi->new_state = new_state;
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case WAIT_ENABLED:
+ case PART_UP_WAIT_ENAB:
+ case WAIT_IDLE:
+ case WAIT_CONNECTION:
+ case CONNECTED:
+ case SRP_PROCESSING:
+ vscsi->new_state = new_state;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ pr_debug("Leaving post_disconnect: flags 0x%x, new_state 0x%x\n",
+ vscsi->flags, vscsi->new_state);
+}
+
+/**
+ * ibmvscsis_trans_event() - Handle a Transport Event
+ * @vscsi: Pointer to our adapter structure
+ * @crq: Pointer to CRQ entry containing the Transport Event
+ *
+ * Do the logic to close the I_T nexus. This function may not
+ * behave to specification.
+ *
+ * EXECUTION ENVIRONMENT:
+ * Interrupt, interrupt lock held
+ */
+static long ibmvscsis_trans_event(struct scsi_info *vscsi,
+ struct viosrp_crq *crq)
+{
+ long rc = ADAPT_SUCCESS;
+
+ pr_debug("trans_event: format %d, flags 0x%x, state 0x%hx\n",
+ (int)crq->format, vscsi->flags, vscsi->state);
+
+ switch (crq->format) {
+ case MIGRATED:
+ case PARTNER_FAILED:
+ case PARTNER_DEREGISTER:
+ ibmvscsis_delete_client_info(vscsi, true);
+ break;
+
+ default:
+ rc = ERROR;
+ dev_err(&vscsi->dev, "trans_event: invalid format %d\n",
+ (uint)crq->format);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT,
+ RESPONSE_Q_DOWN);
+ break;
+ }
+
+ if (rc == ADAPT_SUCCESS) {
+ switch (vscsi->state) {
+ case NO_QUEUE:
+ case ERR_DISCONNECTED:
+ case UNDEFINED:
+ break;
+
+ case UNCONFIGURING:
+ vscsi->flags |= (RESPONSE_Q_DOWN | TRANS_EVENT);
+ break;
+
+ case WAIT_ENABLED:
+ break;
+
+ case WAIT_CONNECTION:
+ break;
+
+ case CONNECTED:
+ ibmvscsis_post_disconnect(vscsi, WAIT_IDLE,
+ (RESPONSE_Q_DOWN |
+ TRANS_EVENT));
+ break;
+
+ case PART_UP_WAIT_ENAB:
+ vscsi->state = WAIT_ENABLED;
+ break;
+
+ case SRP_PROCESSING:
+ if ((vscsi->debit > 0) ||
+ !list_empty(&vscsi->schedule_q) ||
+ !list_empty(&vscsi->waiting_rsp) ||
+ !list_empty(&vscsi->active_q)) {
+ pr_debug("debit %d, sched %d, wait %d, active %d\n",
+ vscsi->debit,
+ (int)list_empty(&vscsi->schedule_q),
+ (int)list_empty(&vscsi->waiting_rsp),
+ (int)list_empty(&vscsi->active_q));
+ pr_warn("connection lost with outstanding work\n");
+ } else {
+ pr_debug("trans_event: SRP Processing, but no outstanding work\n");
+ }
+
+ ibmvscsis_post_disconnect(vscsi, WAIT_IDLE,
+ (RESPONSE_Q_DOWN |
+ TRANS_EVENT));
+ break;
+
+ case ERR_DISCONNECT:
+ case ERR_DISCONNECT_RECONNECT:
+ case WAIT_IDLE:
+ vscsi->flags |= (RESPONSE_Q_DOWN | TRANS_EVENT);
+ break;
+ }
+ }
+
+ rc = vscsi->flags & SCHEDULE_DISCONNECT;
+
+ pr_debug("Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n",
+ vscsi->flags, vscsi->state, rc);
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_poll_cmd_q() - Poll Command Queue
+ * @vscsi: Pointer to our adapter structure
+ *
+ * Called to handle command elements that may have arrived while
+ * interrupts were disabled.
+ *
+ * EXECUTION ENVIRONMENT:
+ * intr_lock must be held
+ */
+static void ibmvscsis_poll_cmd_q(struct scsi_info *vscsi)
+{
+ struct viosrp_crq *crq;
+ long rc;
+ bool ack = true;
+ volatile u8 valid;
+
+ pr_debug("poll_cmd_q: flags 0x%x, state 0x%hx, q index %ud\n",
+ vscsi->flags, vscsi->state, vscsi->cmd_q.index);
+
+ rc = vscsi->flags & SCHEDULE_DISCONNECT;
+ crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
+ valid = crq->valid;
+ dma_rmb();
+
+ while (valid) {
+poll_work:
+ vscsi->cmd_q.index =
+ (vscsi->cmd_q.index + 1) & vscsi->cmd_q.mask;
+
+ if (!rc) {
+ rc = ibmvscsis_parse_command(vscsi, crq);
+ } else {
+ if ((uint)crq->valid == VALID_TRANS_EVENT) {
+ /*
+ * must service the transport layer events even
+ * in an error state, dont break out until all
+ * the consecutive transport events have been
+ * processed
+ */
+ rc = ibmvscsis_trans_event(vscsi, crq);
+ } else if (vscsi->flags & TRANS_EVENT) {
+ /*
+ * if a tranport event has occurred leave
+ * everything but transport events on the queue
+ */
+ pr_debug("poll_cmd_q, ignoring\n");
+
+ /*
+ * need to decrement the queue index so we can
+ * look at the elment again
+ */
+ if (vscsi->cmd_q.index)
+ vscsi->cmd_q.index -= 1;
+ else
+ /*
+ * index is at 0 it just wrapped.
+ * have it index last element in q
+ */
+ vscsi->cmd_q.index = vscsi->cmd_q.mask;
+ break;
+ }
+ }
+
+ crq->valid = INVALIDATE_CMD_RESP_EL;
+
+ crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
+ valid = crq->valid;
+ dma_rmb();
+ }
+
+ if (!rc) {
+ if (ack) {
+ vio_enable_interrupts(vscsi->dma_dev);
+ ack = false;
+ pr_debug("poll_cmd_q, reenabling interrupts\n");
+ }
+ valid = crq->valid;
+ dma_rmb();
+ if (valid)
+ goto poll_work;
+ }
+
+ pr_debug("Leaving poll_cmd_q: rc %ld\n", rc);
+}
+
+/**
+ * ibmvscsis_free_cmd_qs() - Free elements in queue
+ * @vscsi: Pointer to our adapter structure
+ *
+ * Free all of the elements on all queues that are waiting for
+ * whatever reason.
+ *
+ * PRECONDITION:
+ * Called with interrupt lock held
+ */
+static void ibmvscsis_free_cmd_qs(struct scsi_info *vscsi)
+{
+ struct ibmvscsis_cmd *cmd, *nxt;
+
+ pr_debug("free_cmd_qs: waiting_rsp empty %d, timer starter %d\n",
+ (int)list_empty(&vscsi->waiting_rsp),
+ vscsi->rsp_q_timer.started);
+
+ list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) {
+ list_del(&cmd->list);
+ ibmvscsis_free_cmd_resources(vscsi, cmd);
+ }
+}
+
+/**
+ * ibmvscsis_get_free_cmd() - Get free command from list
+ * @vscsi: Pointer to our adapter structure
+ *
+ * Must be called with interrupt lock held.
+ */
+static struct ibmvscsis_cmd *ibmvscsis_get_free_cmd(struct scsi_info *vscsi)
+{
+ struct ibmvscsis_cmd *cmd = NULL;
+ struct iu_entry *iue;
+
+ iue = srp_iu_get(&vscsi->target);
+ if (iue) {
+ cmd = list_first_entry_or_null(&vscsi->free_cmd,
+ struct ibmvscsis_cmd, list);
+ if (cmd) {
+ list_del(&cmd->list);
+ cmd->iue = iue;
+ cmd->type = UNSET_TYPE;
+ memset(&cmd->se_cmd, 0, sizeof(cmd->se_cmd));
+ } else {
+ srp_iu_put(iue);
+ }
+ }
+
+ return cmd;
+}
+
+/**
+ * ibmvscsis_adapter_idle() - Helper function to handle idle adapter
+ * @vscsi: Pointer to our adapter structure
+ *
+ * This function is called when the adapter is idle when the driver
+ * is attempting to clear an error condition.
+ * The adapter is considered busy if any of its cmd queues
+ * are non-empty. This function can be invoked
+ * from the off level disconnect function.
+ *
+ * EXECUTION ENVIRONMENT:
+ * Process environment called with interrupt lock held
+ */
+static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
+{
+ int free_qs = false;
+
+ pr_debug("adapter_idle: flags 0x%x, state 0x%hx\n", vscsi->flags,
+ vscsi->state);
+
+ /* Only need to free qs if we're disconnecting from client */
+ if (vscsi->state != WAIT_CONNECTION || vscsi->flags & TRANS_EVENT)
+ free_qs = true;
+
+ switch (vscsi->state) {
+ case ERR_DISCONNECT_RECONNECT:
+ ibmvscsis_reset_queue(vscsi, WAIT_CONNECTION);
+ pr_debug("adapter_idle, disc_rec: flags 0x%x\n", vscsi->flags);
+ break;
+
+ case ERR_DISCONNECT:
+ ibmvscsis_free_command_q(vscsi);
+ vscsi->flags &= ~DISCONNECT_SCHEDULED;
+ vscsi->flags |= RESPONSE_Q_DOWN;
+ vscsi->state = ERR_DISCONNECTED;
+ pr_debug("adapter_idle, disc: flags 0x%x, state 0x%hx\n",
+ vscsi->flags, vscsi->state);
+ break;
+
+ case WAIT_IDLE:
+ vscsi->rsp_q_timer.timer_pops = 0;
+ vscsi->debit = 0;
+ vscsi->credit = 0;
+ if (vscsi->flags & TRANS_EVENT) {
+ vscsi->state = WAIT_CONNECTION;
+ vscsi->flags &= PRESERVE_FLAG_FIELDS;
+ } else {
+ vscsi->state = CONNECTED;
+ vscsi->flags &= ~DISCONNECT_SCHEDULED;
+ }
+
+ pr_debug("adapter_idle, wait: flags 0x%x, state 0x%hx\n",
+ vscsi->flags, vscsi->state);
+ ibmvscsis_poll_cmd_q(vscsi);
+ break;
+
+ case ERR_DISCONNECTED:
+ vscsi->flags &= ~DISCONNECT_SCHEDULED;
+ pr_debug("adapter_idle, disconnected: flags 0x%x, state 0x%hx\n",
+ vscsi->flags, vscsi->state);
+ break;
+
+ default:
+ dev_err(&vscsi->dev, "adapter_idle: in invalid state %d\n",
+ vscsi->state);
+ break;
+ }
+
+ if (free_qs)
+ ibmvscsis_free_cmd_qs(vscsi);
+
+ /*
+ * There is a timing window where we could lose a disconnect request.
+ * The known path to this window occurs during the DISCONNECT_RECONNECT
+ * case above: reset_queue calls free_command_q, which will release the
+ * interrupt lock. During that time, a new post_disconnect call can be
+ * made with a "more severe" state (DISCONNECT or UNCONFIGURING).
+ * Because the DISCONNECT_SCHEDULED flag is already set, post_disconnect
+ * will only set the new_state. Now free_command_q reacquires the intr
+ * lock and clears the DISCONNECT_SCHEDULED flag (using PRESERVE_FLAG_
+ * FIELDS), and the disconnect is lost. This is particularly bad when
+ * the new disconnect was for UNCONFIGURING, since the unconfigure hangs
+ * forever.
+ * Fix is that free command queue sets acr state and acr flags if there
+ * is a change under the lock
+ * note free command queue writes to this state it clears it
+ * before releasing the lock, different drivers call the free command
+ * queue different times so dont initialize above
+ */
+ if (vscsi->phyp_acr_state != 0) {
+ /*
+ * set any bits in flags that may have been cleared by
+ * a call to free command queue in switch statement
+ * or reset queue
+ */
+ vscsi->flags |= vscsi->phyp_acr_flags;
+ ibmvscsis_post_disconnect(vscsi, vscsi->phyp_acr_state, 0);
+ vscsi->phyp_acr_state = 0;
+ vscsi->phyp_acr_flags = 0;
+
+ pr_debug("adapter_idle: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
+ vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
+ vscsi->phyp_acr_state);
+ }
+
+ pr_debug("Leaving adapter_idle: flags 0x%x, state 0x%hx, new_state 0x%x\n",
+ vscsi->flags, vscsi->state, vscsi->new_state);
+}
+
+/**
+ * ibmvscsis_copy_crq_packet() - Copy CRQ Packet
+ * @vscsi: Pointer to our adapter structure
+ * @cmd: Pointer to command element to use to process the request
+ * @crq: Pointer to CRQ entry containing the request
+ *
+ * Copy the srp information unit from the hosted
+ * partition using remote dma
+ *
+ * EXECUTION ENVIRONMENT:
+ * Interrupt, interrupt lock held
+ */
+static long ibmvscsis_copy_crq_packet(struct scsi_info *vscsi,
+ struct ibmvscsis_cmd *cmd,
+ struct viosrp_crq *crq)
+{
+ struct iu_entry *iue = cmd->iue;
+ long rc = 0;
+ u16 len;
+
+ len = be16_to_cpu(crq->IU_length);
+ if ((len > SRP_MAX_IU_LEN) || (len == 0)) {
+ dev_err(&vscsi->dev, "copy_crq: Invalid len %d passed", len);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ return SRP_VIOLATION;
+ }
+
+ rc = h_copy_rdma(len, vscsi->dds.window[REMOTE].liobn,
+ be64_to_cpu(crq->IU_data_ptr),
+ vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma);
+
+ switch (rc) {
+ case H_SUCCESS:
+ cmd->init_time = mftb();
+ iue->remote_token = crq->IU_data_ptr;
+ iue->iu_len = len;
+ pr_debug("copy_crq: ioba 0x%llx, init_time 0x%llx\n",
+ be64_to_cpu(crq->IU_data_ptr), cmd->init_time);
+ break;
+ case H_PERMISSION:
+ if (connection_broken(vscsi))
+ ibmvscsis_post_disconnect(vscsi,
+ ERR_DISCONNECT_RECONNECT,
+ (RESPONSE_Q_DOWN |
+ CLIENT_FAILED));
+ else
+ ibmvscsis_post_disconnect(vscsi,
+ ERR_DISCONNECT_RECONNECT, 0);
+
+ dev_err(&vscsi->dev, "copy_crq: h_copy_rdma failed, rc %ld\n",
+ rc);
+ break;
+ case H_DEST_PARM:
+ case H_SOURCE_PARM:
+ default:
+ dev_err(&vscsi->dev, "copy_crq: h_copy_rdma failed, rc %ld\n",
+ rc);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_adapter_info - Service an Adapter Info MAnagement Data gram
+ * @vscsi: Pointer to our adapter structure
+ * @iue: Information Unit containing the Adapter Info MAD request
+ *
+ * EXECUTION ENVIRONMENT:
+ * Interrupt adpater lock is held
+ */
+static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
+ struct iu_entry *iue)
+{
+ struct viosrp_adapter_info *mad = &vio_iu(iue)->mad.adapter_info;
+ struct mad_adapter_info_data *info;
+ uint flag_bits = 0;
+ dma_addr_t token;
+ long rc;
+
+ mad->common.status = cpu_to_be16(VIOSRP_MAD_SUCCESS);
+
+ if (be16_to_cpu(mad->common.length) > sizeof(*info)) {
+ mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
+ return 0;
+ }
+
+ info = dma_alloc_coherent(&vscsi->dma_dev->dev, sizeof(*info), &token,
+ GFP_KERNEL);
+ if (!info) {
+ dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
+ iue->target);
+ mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
+ return 0;
+ }
+
+ /* Get remote info */
+ rc = h_copy_rdma(be16_to_cpu(mad->common.length),
+ vscsi->dds.window[REMOTE].liobn,
+ be64_to_cpu(mad->buffer),
+ vscsi->dds.window[LOCAL].liobn, token);
+
+ if (rc != H_SUCCESS) {
+ if (rc == H_PERMISSION) {
+ if (connection_broken(vscsi))
+ flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
+ }
+ pr_warn("adapter_info: h_copy_rdma from client failed, rc %ld\n",
+ rc);
+ pr_debug("adapter_info: ioba 0x%llx, flags 0x%x, flag_bits 0x%x\n",
+ be64_to_cpu(mad->buffer), vscsi->flags, flag_bits);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
+ flag_bits);
+ goto free_dma;
+ }
+
+ /*
+ * Copy client info, but ignore partition number, which we
+ * already got from phyp - unless we failed to get it from
+ * phyp (e.g. if we're running on a p5 system).
+ */
+ if (vscsi->client_data.partition_number == 0)
+ vscsi->client_data.partition_number =
+ be32_to_cpu(info->partition_number);
+ strncpy(vscsi->client_data.srp_version, info->srp_version,
+ sizeof(vscsi->client_data.srp_version));
+ strncpy(vscsi->client_data.partition_name, info->partition_name,
+ sizeof(vscsi->client_data.partition_name));
+ vscsi->client_data.mad_version = be32_to_cpu(info->mad_version);
+ vscsi->client_data.os_type = be32_to_cpu(info->os_type);
+
+ /* Copy our info */
+ strncpy(info->srp_version, SRP_VERSION,
+ sizeof(info->srp_version));
+ strncpy(info->partition_name, vscsi->dds.partition_name,
+ sizeof(info->partition_name));
+ info->partition_number = cpu_to_be32(vscsi->dds.partition_num);
+ info->mad_version = cpu_to_be32(MAD_VERSION_1);
+ info->os_type = cpu_to_be32(LINUX);
+ memset(&info->port_max_txu[0], 0, sizeof(info->port_max_txu));
+ info->port_max_txu[0] = cpu_to_be32(128 * PAGE_SIZE);
+
+ dma_wmb();
+ rc = h_copy_rdma(sizeof(*info), vscsi->dds.window[LOCAL].liobn,
+ token, vscsi->dds.window[REMOTE].liobn,
+ be64_to_cpu(mad->buffer));
+ switch (rc) {
+ case H_SUCCESS:
+ break;
+
+ case H_SOURCE_PARM:
+ case H_DEST_PARM:
+ case H_PERMISSION:
+ if (connection_broken(vscsi))
+ flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
+ default:
+ dev_err(&vscsi->dev, "adapter_info: h_copy_rdma to client failed, rc %ld\n",
+ rc);
+ ibmvscsis_post_disconnect(vscsi,
+ ERR_DISCONNECT_RECONNECT,
+ flag_bits);
+ break;
+ }
+
+free_dma:
+ dma_free_coherent(&vscsi->dma_dev->dev, sizeof(*info), info, token);
+ pr_debug("Leaving adapter_info, rc %ld\n", rc);
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_cap_mad() - Service a Capabilities MAnagement Data gram
+ * @vscsi: Pointer to our adapter structure
+ * @iue: Information Unit containing the Capabilities MAD request
+ *
+ * NOTE: if you return an error from this routine you must be
+ * disconnecting or you will cause a hang
+ *
+ * EXECUTION ENVIRONMENT:
+ * Interrupt called with adapter lock held
+ */
+static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
+{
+ struct viosrp_capabilities *mad = &vio_iu(iue)->mad.capabilities;
+ struct capabilities *cap;
+ struct mad_capability_common *common;
+ dma_addr_t token;
+ u16 olen, len, status, min_len, cap_len;
+ u32 flag;
+ uint flag_bits = 0;
+ long rc = 0;
+
+ olen = be16_to_cpu(mad->common.length);
+ /*
+ * struct capabilities hardcodes a couple capabilities after the
+ * header, but the capabilities can actually be in any order.
+ */
+ min_len = offsetof(struct capabilities, migration);
+ if ((olen < min_len) || (olen > PAGE_SIZE)) {
+ pr_warn("cap_mad: invalid len %d\n", olen);
+ mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
+ return 0;
+ }
+
+ cap = dma_alloc_coherent(&vscsi->dma_dev->dev, olen, &token,
+ GFP_KERNEL);
+ if (!cap) {
+ dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
+ iue->target);
+ mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
+ return 0;
+ }
+ rc = h_copy_rdma(olen, vscsi->dds.window[REMOTE].liobn,
+ be64_to_cpu(mad->buffer),
+ vscsi->dds.window[LOCAL].liobn, token);
+ if (rc == H_SUCCESS) {
+ strncpy(cap->name, dev_name(&vscsi->dma_dev->dev),
+ SRP_MAX_LOC_LEN);
+
+ len = olen - min_len;
+ status = VIOSRP_MAD_SUCCESS;
+ common = (struct mad_capability_common *)&cap->migration;
+
+ while ((len > 0) && (status == VIOSRP_MAD_SUCCESS) && !rc) {
+ pr_debug("cap_mad: len left %hd, cap type %d, cap len %hd\n",
+ len, be32_to_cpu(common->cap_type),
+ be16_to_cpu(common->length));
+
+ cap_len = be16_to_cpu(common->length);
+ if (cap_len > len) {
+ dev_err(&vscsi->dev, "cap_mad: cap len mismatch with total len\n");
+ status = VIOSRP_MAD_FAILED;
+ break;
+ }
+
+ if (cap_len == 0) {
+ dev_err(&vscsi->dev, "cap_mad: cap len is 0\n");
+ status = VIOSRP_MAD_FAILED;
+ break;
+ }
+
+ switch (common->cap_type) {
+ default:
+ pr_debug("cap_mad: unsupported capability\n");
+ common->server_support = 0;
+ flag = cpu_to_be32((u32)CAP_LIST_SUPPORTED);
+ cap->flags &= ~flag;
+ break;
+ }
+
+ len = len - cap_len;
+ common = (struct mad_capability_common *)
+ ((char *)common + cap_len);
+ }
+
+ mad->common.status = cpu_to_be16(status);
+
+ dma_wmb();
+ rc = h_copy_rdma(olen, vscsi->dds.window[LOCAL].liobn, token,
+ vscsi->dds.window[REMOTE].liobn,
+ be64_to_cpu(mad->buffer));
+
+ if (rc != H_SUCCESS) {
+ pr_debug("cap_mad: failed to copy to client, rc %ld\n",
+ rc);
+
+ if (rc == H_PERMISSION) {
+ if (connection_broken(vscsi))
+ flag_bits = (RESPONSE_Q_DOWN |
+ CLIENT_FAILED);
+ }
+
+ pr_warn("cap_mad: error copying data to client, rc %ld\n",
+ rc);
+ ibmvscsis_post_disconnect(vscsi,
+ ERR_DISCONNECT_RECONNECT,
+ flag_bits);
+ }
+ }
+
+ dma_free_coherent(&vscsi->dma_dev->dev, olen, cap, token);
+
+ pr_debug("Leaving cap_mad, rc %ld, client_cap 0x%x\n",
+ rc, vscsi->client_cap);
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_process_mad() - Service a MAnagement Data gram
+ * @vscsi: Pointer to our adapter structure
+ * @iue: Information Unit containing the MAD request
+ *
+ * Must be called with interrupt lock held.
+ */
+static long ibmvscsis_process_mad(struct scsi_info *vscsi, struct iu_entry *iue)
+{
+ struct mad_common *mad = (struct mad_common *)&vio_iu(iue)->mad;
+ struct viosrp_empty_iu *empty;
+ long rc = ADAPT_SUCCESS;
+
+ switch (be32_to_cpu(mad->type)) {
+ case VIOSRP_EMPTY_IU_TYPE:
+ empty = &vio_iu(iue)->mad.empty_iu;
+ vscsi->empty_iu_id = be64_to_cpu(empty->buffer);
+ vscsi->empty_iu_tag = be64_to_cpu(empty->common.tag);
+ mad->status = cpu_to_be16(VIOSRP_MAD_SUCCESS);
+ break;
+ case VIOSRP_ADAPTER_INFO_TYPE:
+ rc = ibmvscsis_adapter_info(vscsi, iue);
+ break;
+ case VIOSRP_CAPABILITIES_TYPE:
+ rc = ibmvscsis_cap_mad(vscsi, iue);
+ break;
+ case VIOSRP_ENABLE_FAST_FAIL:
+ if (vscsi->state == CONNECTED) {
+ vscsi->fast_fail = true;
+ mad->status = cpu_to_be16(VIOSRP_MAD_SUCCESS);
+ } else {
+ pr_warn("fast fail mad sent after login\n");
+ mad->status = cpu_to_be16(VIOSRP_MAD_FAILED);
+ }
+ break;
+ default:
+ mad->status = cpu_to_be16(VIOSRP_MAD_NOT_SUPPORTED);
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * srp_snd_msg_failed() - Handle an error when sending a response
+ * @vscsi: Pointer to our adapter structure
+ * @rc: The return code from the h_send_crq command
+ *
+ * Must be called with interrupt lock held.
+ */
+static void srp_snd_msg_failed(struct scsi_info *vscsi, long rc)
+{
+ ktime_t kt;
+
+ if (rc != H_DROPPED) {
+ ibmvscsis_free_cmd_qs(vscsi);
+
+ if (rc == H_CLOSED)
+ vscsi->flags |= CLIENT_FAILED;
+
+ /* don't flag the same problem multiple times */
+ if (!(vscsi->flags & RESPONSE_Q_DOWN)) {
+ vscsi->flags |= RESPONSE_Q_DOWN;
+ if (!(vscsi->state & (ERR_DISCONNECT |
+ ERR_DISCONNECT_RECONNECT |
+ ERR_DISCONNECTED | UNDEFINED))) {
+ dev_err(&vscsi->dev, "snd_msg_failed: setting RESPONSE_Q_DOWN, state 0x%hx, flags 0x%x, rc %ld\n",
+ vscsi->state, vscsi->flags, rc);
+ }
+ ibmvscsis_post_disconnect(vscsi,
+ ERR_DISCONNECT_RECONNECT, 0);
+ }
+ return;
+ }
+
+ /*
+ * The response queue is full.
+ * If the server is processing SRP requests, i.e.
+ * the client has successfully done an
+ * SRP_LOGIN, then it will wait forever for room in
+ * the queue. However if the system admin
+ * is attempting to unconfigure the server then one
+ * or more children will be in a state where
+ * they are being removed. So if there is even one
+ * child being removed then the driver assumes
+ * the system admin is attempting to break the
+ * connection with the client and MAX_TIMER_POPS
+ * is honored.
+ */
+ if ((vscsi->rsp_q_timer.timer_pops < MAX_TIMER_POPS) ||
+ (vscsi->state == SRP_PROCESSING)) {
+ pr_debug("snd_msg_failed: response queue full, flags 0x%x, timer started %d, pops %d\n",
+ vscsi->flags, (int)vscsi->rsp_q_timer.started,
+ vscsi->rsp_q_timer.timer_pops);
+
+ /*
+ * Check if the timer is running; if it
+ * is not then start it up.
+ */
+ if (!vscsi->rsp_q_timer.started) {
+ if (vscsi->rsp_q_timer.timer_pops <
+ MAX_TIMER_POPS) {
+ kt = ktime_set(0, WAIT_NANO_SECONDS);
+ } else {
+ /*
+ * slide the timeslice if the maximum
+ * timer pops have already happened
+ */
+ kt = ktime_set(WAIT_SECONDS, 0);
+ }
+
+ vscsi->rsp_q_timer.started = true;
+ hrtimer_start(&vscsi->rsp_q_timer.timer, kt,
+ HRTIMER_MODE_REL);
+ }
+ } else {
+ /*
+ * TBD: Do we need to worry about this? Need to get
+ * remove working.
+ */
+ /*
+ * waited a long time and it appears the system admin
+ * is bring this driver down
+ */
+ vscsi->flags |= RESPONSE_Q_DOWN;
+ ibmvscsis_free_cmd_qs(vscsi);
+ /*
+ * if the driver is already attempting to disconnect
+ * from the client and has already logged an error
+ * trace this event but don't put it in the error log
+ */
+ if (!(vscsi->state & (ERR_DISCONNECT |
+ ERR_DISCONNECT_RECONNECT |
+ ERR_DISCONNECTED | UNDEFINED))) {
+ dev_err(&vscsi->dev, "client crq full too long\n");
+ ibmvscsis_post_disconnect(vscsi,
+ ERR_DISCONNECT_RECONNECT,
+ 0);
+ }
+ }
+}
+
+/**
+ * ibmvscsis_send_messages() - Send a Response
+ * @vscsi: Pointer to our adapter structure
+ *
+ * Send a response, first checking the waiting queue. Responses are
+ * sent in order they are received. If the response cannot be sent,
+ * because the client queue is full, it stays on the waiting queue.
+ *
+ * PRECONDITION:
+ * Called with interrupt lock held
+ */
+static void ibmvscsis_send_messages(struct scsi_info *vscsi)
+{
+ u64 msg_hi = 0;
+ /* note do not attmempt to access the IU_data_ptr with this pointer
+ * it is not valid
+ */
+ struct viosrp_crq *crq = (struct viosrp_crq *)&msg_hi;
+ struct ibmvscsis_cmd *cmd, *nxt;
+ struct iu_entry *iue;
+ long rc = ADAPT_SUCCESS;
+
+ if (!(vscsi->flags & RESPONSE_Q_DOWN)) {
+ list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) {
+ pr_debug("send_messages cmd %p\n", cmd);
+
+ iue = cmd->iue;
+
+ crq->valid = VALID_CMD_RESP_EL;
+ crq->format = cmd->rsp.format;
+
+ if (cmd->flags & CMD_FAST_FAIL)
+ crq->status = VIOSRP_ADAPTER_FAIL;
+
+ crq->IU_length = cpu_to_be16(cmd->rsp.len);
+
+ rc = h_send_crq(vscsi->dma_dev->unit_address,
+ be64_to_cpu(msg_hi),
+ be64_to_cpu(cmd->rsp.tag));
+
+ pr_debug("send_messages: tag 0x%llx, rc %ld\n",
+ be64_to_cpu(cmd->rsp.tag), rc);
+
+ /* if all ok free up the command element resources */
+ if (rc == H_SUCCESS) {
+ /* some movement has occurred */
+ vscsi->rsp_q_timer.timer_pops = 0;
+ list_del(&cmd->list);
+
+ ibmvscsis_free_cmd_resources(vscsi, cmd);
+ } else {
+ srp_snd_msg_failed(vscsi, rc);
+ break;
+ }
+ }
+
+ if (!rc) {
+ /*
+ * The timer could pop with the queue empty. If
+ * this happens, rc will always indicate a
+ * success; clear the pop count.
+ */
+ vscsi->rsp_q_timer.timer_pops = 0;
+ }
+ } else {
+ ibmvscsis_free_cmd_qs(vscsi);
+ }
+}
+
+/* Called with intr lock held */
+static void ibmvscsis_send_mad_resp(struct scsi_info *vscsi,
+ struct ibmvscsis_cmd *cmd,
+ struct viosrp_crq *crq)
+{
+ struct iu_entry *iue = cmd->iue;
+ struct mad_common *mad = (struct mad_common *)&vio_iu(iue)->mad;
+ uint flag_bits = 0;
+ long rc;
+
+ dma_wmb();
+ rc = h_copy_rdma(sizeof(struct mad_common),
+ vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma,
+ vscsi->dds.window[REMOTE].liobn,
+ be64_to_cpu(crq->IU_data_ptr));
+ if (!rc) {
+ cmd->rsp.format = VIOSRP_MAD_FORMAT;
+ cmd->rsp.len = sizeof(struct mad_common);
+ cmd->rsp.tag = mad->tag;
+ list_add_tail(&cmd->list, &vscsi->waiting_rsp);
+ ibmvscsis_send_messages(vscsi);
+ } else {
+ pr_debug("Error sending mad response, rc %ld\n", rc);
+ if (rc == H_PERMISSION) {
+ if (connection_broken(vscsi))
+ flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
+ }
+ dev_err(&vscsi->dev, "mad: failed to copy to client, rc %ld\n",
+ rc);
+
+ ibmvscsis_free_cmd_resources(vscsi, cmd);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
+ flag_bits);
+ }
+}
+
+/**
+ * ibmvscsis_mad() - Service a MAnagement Data gram.
+ * @vscsi: Pointer to our adapter structure
+ * @crq: Pointer to the CRQ entry containing the MAD request
+ *
+ * EXECUTION ENVIRONMENT:
+ * Interrupt called with adapter lock held
+ */
+static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
+{
+ struct iu_entry *iue;
+ struct ibmvscsis_cmd *cmd;
+ struct mad_common *mad;
+ long rc = ADAPT_SUCCESS;
+
+ switch (vscsi->state) {
+ /*
+ * We have not exchanged Init Msgs yet, so this MAD was sent
+ * before the last Transport Event; client will not be
+ * expecting a response.
+ */
+ case WAIT_CONNECTION:
+ pr_debug("mad: in Wait Connection state, ignoring MAD, flags %d\n",
+ vscsi->flags);
+ return ADAPT_SUCCESS;
+
+ case SRP_PROCESSING:
+ case CONNECTED:
+ break;
+
+ /*
+ * We should never get here while we're in these states.
+ * Just log an error and get out.
+ */
+ case UNCONFIGURING:
+ case WAIT_IDLE:
+ case ERR_DISCONNECT:
+ case ERR_DISCONNECT_RECONNECT:
+ default:
+ dev_err(&vscsi->dev, "mad: invalid adapter state %d for mad\n",
+ vscsi->state);
+ return ADAPT_SUCCESS;
+ }
+
+ cmd = ibmvscsis_get_free_cmd(vscsi);
+ if (!cmd) {
+ dev_err(&vscsi->dev, "mad: failed to get cmd, debit %d\n",
+ vscsi->debit);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ return ERROR;
+ }
+ iue = cmd->iue;
+ cmd->type = ADAPTER_MAD;
+
+ rc = ibmvscsis_copy_crq_packet(vscsi, cmd, crq);
+ if (!rc) {
+ mad = (struct mad_common *)&vio_iu(iue)->mad;
+
+ pr_debug("mad: type %d\n", be32_to_cpu(mad->type));
+
+ if (be16_to_cpu(mad->length) < 0) {
+ dev_err(&vscsi->dev, "mad: length is < 0\n");
+ ibmvscsis_post_disconnect(vscsi,
+ ERR_DISCONNECT_RECONNECT, 0);
+ rc = SRP_VIOLATION;
+ } else {
+ rc = ibmvscsis_process_mad(vscsi, iue);
+ }
+
+ pr_debug("mad: status %hd, rc %ld\n", be16_to_cpu(mad->status),
+ rc);
+
+ if (!rc)
+ ibmvscsis_send_mad_resp(vscsi, cmd, crq);
+ } else {
+ ibmvscsis_free_cmd_resources(vscsi, cmd);
+ }
+
+ pr_debug("Leaving mad, rc %ld\n", rc);
+ return rc;
+}
+
+/**
+ * ibmvscsis_login_rsp() - Create/copy a login response notice to the client
+ * @vscsi: Pointer to our adapter structure
+ * @cmd: Pointer to the command for the SRP Login request
+ *
+ * EXECUTION ENVIRONMENT:
+ * Interrupt, interrupt lock held
+ */
+static long ibmvscsis_login_rsp(struct scsi_info *vscsi,
+ struct ibmvscsis_cmd *cmd)
+{
+ struct iu_entry *iue = cmd->iue;
+ struct srp_login_rsp *rsp = &vio_iu(iue)->srp.login_rsp;
+ struct format_code *fmt;
+ uint flag_bits = 0;
+ long rc = ADAPT_SUCCESS;
+
+ memset(rsp, 0, sizeof(struct srp_login_rsp));
+
+ rsp->opcode = SRP_LOGIN_RSP;
+ rsp->req_lim_delta = cpu_to_be32(vscsi->request_limit);
+ rsp->tag = cmd->rsp.tag;
+ rsp->max_it_iu_len = cpu_to_be32(SRP_MAX_IU_LEN);
+ rsp->max_ti_iu_len = cpu_to_be32(SRP_MAX_IU_LEN);
+ fmt = (struct format_code *)&rsp->buf_fmt;
+ fmt->buffers = SUPPORTED_FORMATS;
+ vscsi->credit = 0;
+
+ cmd->rsp.len = sizeof(struct srp_login_rsp);
+
+ dma_wmb();
+ rc = h_copy_rdma(cmd->rsp.len, vscsi->dds.window[LOCAL].liobn,
+ iue->sbuf->dma, vscsi->dds.window[REMOTE].liobn,
+ be64_to_cpu(iue->remote_token));
+
+ switch (rc) {
+ case H_SUCCESS:
+ break;
+
+ case H_PERMISSION:
+ if (connection_broken(vscsi))
+ flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED;
+ dev_err(&vscsi->dev, "login_rsp: error copying to client, rc %ld\n",
+ rc);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
+ flag_bits);
+ break;
+ case H_SOURCE_PARM:
+ case H_DEST_PARM:
+ default:
+ dev_err(&vscsi->dev, "login_rsp: error copying to client, rc %ld\n",
+ rc);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_srp_login_rej() - Create/copy a login rejection notice to client
+ * @vscsi: Pointer to our adapter structure
+ * @cmd: Pointer to the command for the SRP Login request
+ * @reason: The reason the SRP Login is being rejected, per SRP protocol
+ *
+ * EXECUTION ENVIRONMENT:
+ * Interrupt, interrupt lock held
+ */
+static long ibmvscsis_srp_login_rej(struct scsi_info *vscsi,
+ struct ibmvscsis_cmd *cmd, u32 reason)
+{
+ struct iu_entry *iue = cmd->iue;
+ struct srp_login_rej *rej = &vio_iu(iue)->srp.login_rej;
+ struct format_code *fmt;
+ uint flag_bits = 0;
+ long rc = ADAPT_SUCCESS;
+
+ memset(rej, 0, sizeof(*rej));
+
+ rej->opcode = SRP_LOGIN_REJ;
+ rej->reason = cpu_to_be32(reason);
+ rej->tag = cmd->rsp.tag;
+ fmt = (struct format_code *)&rej->buf_fmt;
+ fmt->buffers = SUPPORTED_FORMATS;
+
+ cmd->rsp.len = sizeof(*rej);
+
+ dma_wmb();
+ rc = h_copy_rdma(cmd->rsp.len, vscsi->dds.window[LOCAL].liobn,
+ iue->sbuf->dma, vscsi->dds.window[REMOTE].liobn,
+ be64_to_cpu(iue->remote_token));
+
+ switch (rc) {
+ case H_SUCCESS:
+ break;
+ case H_PERMISSION:
+ if (connection_broken(vscsi))
+ flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED;
+ dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n",
+ rc);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
+ flag_bits);
+ break;
+ case H_SOURCE_PARM:
+ case H_DEST_PARM:
+ default:
+ dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n",
+ rc);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ break;
+ }
+
+ return rc;
+}
+
+static int ibmvscsis_make_nexus(struct ibmvscsis_tport *tport)
+{
+ char *name = tport->tport_name;
+ struct ibmvscsis_nexus *nexus;
+ int rc;
+
+ if (tport->ibmv_nexus) {
+ pr_debug("tport->ibmv_nexus already exists\n");
+ return 0;
+ }
+
+ nexus = kzalloc(sizeof(*nexus), GFP_KERNEL);
+ if (!nexus) {
+ pr_err("Unable to allocate struct ibmvscsis_nexus\n");
+ return -ENOMEM;
+ }
+
+ nexus->se_sess = target_alloc_session(&tport->se_tpg, 0, 0,
+ TARGET_PROT_NORMAL, name, nexus,
+ NULL);
+ if (IS_ERR(nexus->se_sess)) {
+ rc = PTR_ERR(nexus->se_sess);
+ goto transport_init_fail;
+ }
+
+ tport->ibmv_nexus = nexus;
+
+ return 0;
+
+transport_init_fail:
+ kfree(nexus);
+ return rc;
+}
+
+static int ibmvscsis_drop_nexus(struct ibmvscsis_tport *tport)
+{
+ struct se_session *se_sess;
+ struct ibmvscsis_nexus *nexus;
+
+ nexus = tport->ibmv_nexus;
+ if (!nexus)
+ return -ENODEV;
+
+ se_sess = nexus->se_sess;
+ if (!se_sess)
+ return -ENODEV;
+
+ /*
+ * Release the SCSI I_T Nexus to the emulated ibmvscsis Target Port
+ */
+ transport_deregister_session(se_sess);
+ tport->ibmv_nexus = NULL;
+ kfree(nexus);
+
+ return 0;
+}
+
+/**
+ * ibmvscsis_srp_login() - Process an SRP Login Request
+ * @vscsi: Pointer to our adapter structure
+ * @cmd: Command element to use to process the SRP Login request
+ * @crq: Pointer to CRQ entry containing the SRP Login request
+ *
+ * EXECUTION ENVIRONMENT:
+ * Interrupt, called with interrupt lock held
+ */
+static long ibmvscsis_srp_login(struct scsi_info *vscsi,
+ struct ibmvscsis_cmd *cmd,
+ struct viosrp_crq *crq)
+{
+ struct iu_entry *iue = cmd->iue;
+ struct srp_login_req *req = &vio_iu(iue)->srp.login_req;
+ struct port_id {
+ __be64 id_extension;
+ __be64 io_guid;
+ } *iport, *tport;
+ struct format_code *fmt;
+ u32 reason = 0x0;
+ long rc = ADAPT_SUCCESS;
+
+ iport = (struct port_id *)req->initiator_port_id;
+ tport = (struct port_id *)req->target_port_id;
+ fmt = (struct format_code *)&req->req_buf_fmt;
+ if (be32_to_cpu(req->req_it_iu_len) > SRP_MAX_IU_LEN)
+ reason = SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE;
+ else if (be32_to_cpu(req->req_it_iu_len) < 64)
+ reason = SRP_LOGIN_REJ_UNABLE_ESTABLISH_CHANNEL;
+ else if ((be64_to_cpu(iport->id_extension) > (MAX_NUM_PORTS - 1)) ||
+ (be64_to_cpu(tport->id_extension) > (MAX_NUM_PORTS - 1)))
+ reason = SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL;
+ else if (req->req_flags & SRP_MULTICHAN_MULTI)
+ reason = SRP_LOGIN_REJ_MULTI_CHANNEL_UNSUPPORTED;
+ else if (fmt->buffers & (~SUPPORTED_FORMATS))
+ reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT;
+ else if ((fmt->buffers | SUPPORTED_FORMATS) == 0)
+ reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT;
+
+ if (vscsi->state == SRP_PROCESSING)
+ reason = SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED;
+
+ rc = ibmvscsis_make_nexus(&vscsi->tport);
+ if (rc)
+ reason = SRP_LOGIN_REJ_UNABLE_ESTABLISH_CHANNEL;
+
+ cmd->rsp.format = VIOSRP_SRP_FORMAT;
+ cmd->rsp.tag = req->tag;
+
+ pr_debug("srp_login: reason 0x%x\n", reason);
+
+ if (reason)
+ rc = ibmvscsis_srp_login_rej(vscsi, cmd, reason);
+ else
+ rc = ibmvscsis_login_rsp(vscsi, cmd);
+
+ if (!rc) {
+ if (!reason)
+ vscsi->state = SRP_PROCESSING;
+
+ list_add_tail(&cmd->list, &vscsi->waiting_rsp);
+ ibmvscsis_send_messages(vscsi);
+ } else {
+ ibmvscsis_free_cmd_resources(vscsi, cmd);
+ }
+
+ pr_debug("Leaving srp_login, rc %ld\n", rc);
+ return rc;
+}
+
+/**
+ * ibmvscsis_srp_i_logout() - Helper Function to close I_T Nexus
+ * @vscsi: Pointer to our adapter structure
+ * @cmd: Command element to use to process the Implicit Logout request
+ * @crq: Pointer to CRQ entry containing the Implicit Logout request
+ *
+ * Do the logic to close the I_T nexus. This function may not
+ * behave to specification.
+ *
+ * EXECUTION ENVIRONMENT:
+ * Interrupt, interrupt lock held
+ */
+static long ibmvscsis_srp_i_logout(struct scsi_info *vscsi,
+ struct ibmvscsis_cmd *cmd,
+ struct viosrp_crq *crq)
+{
+ struct iu_entry *iue = cmd->iue;
+ struct srp_i_logout *log_out = &vio_iu(iue)->srp.i_logout;
+ long rc = ADAPT_SUCCESS;
+
+ if ((vscsi->debit > 0) || !list_empty(&vscsi->schedule_q) ||
+ !list_empty(&vscsi->waiting_rsp)) {
+ dev_err(&vscsi->dev, "i_logout: outstanding work\n");
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
+ } else {
+ cmd->rsp.format = SRP_FORMAT;
+ cmd->rsp.tag = log_out->tag;
+ cmd->rsp.len = sizeof(struct mad_common);
+ list_add_tail(&cmd->list, &vscsi->waiting_rsp);
+ ibmvscsis_send_messages(vscsi);
+
+ ibmvscsis_post_disconnect(vscsi, WAIT_IDLE, 0);
+ }
+
+ return rc;
+}
+
+/* Called with intr lock held */
+static void ibmvscsis_srp_cmd(struct scsi_info *vscsi, struct viosrp_crq *crq)
+{
+ struct ibmvscsis_cmd *cmd;
+ struct iu_entry *iue;
+ struct srp_cmd *srp;
+ struct srp_tsk_mgmt *tsk;
+ long rc;
+
+ if (vscsi->request_limit - vscsi->debit <= 0) {
+ /* Client has exceeded request limit */
+ dev_err(&vscsi->dev, "Client exceeded the request limit (%d), debit %d\n",
+ vscsi->request_limit, vscsi->debit);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ return;
+ }
+
+ cmd = ibmvscsis_get_free_cmd(vscsi);
+ if (!cmd) {
+ dev_err(&vscsi->dev, "srp_cmd failed to get cmd, debit %d\n",
+ vscsi->debit);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ return;
+ }
+ iue = cmd->iue;
+ srp = &vio_iu(iue)->srp.cmd;
+
+ rc = ibmvscsis_copy_crq_packet(vscsi, cmd, crq);
+ if (rc) {
+ ibmvscsis_free_cmd_resources(vscsi, cmd);
+ return;
+ }
+
+ if (vscsi->state == SRP_PROCESSING) {
+ switch (srp->opcode) {
+ case SRP_LOGIN_REQ:
+ rc = ibmvscsis_srp_login(vscsi, cmd, crq);
+ break;
+
+ case SRP_TSK_MGMT:
+ tsk = &vio_iu(iue)->srp.tsk_mgmt;
+ pr_debug("tsk_mgmt tag: %llu (0x%llx)\n", tsk->tag,
+ tsk->tag);
+ cmd->rsp.tag = tsk->tag;
+ vscsi->debit += 1;
+ cmd->type = TASK_MANAGEMENT;
+ list_add_tail(&cmd->list, &vscsi->schedule_q);
+ queue_work(vscsi->work_q, &cmd->work);
+ break;
+
+ case SRP_CMD:
+ pr_debug("srp_cmd tag: %llu (0x%llx)\n", srp->tag,
+ srp->tag);
+ cmd->rsp.tag = srp->tag;
+ vscsi->debit += 1;
+ cmd->type = SCSI_CDB;
+ /*
+ * We want to keep track of work waiting for
+ * the workqueue.
+ */
+ list_add_tail(&cmd->list, &vscsi->schedule_q);
+ queue_work(vscsi->work_q, &cmd->work);
+ break;
+
+ case SRP_I_LOGOUT:
+ rc = ibmvscsis_srp_i_logout(vscsi, cmd, crq);
+ break;
+
+ case SRP_CRED_RSP:
+ case SRP_AER_RSP:
+ default:
+ ibmvscsis_free_cmd_resources(vscsi, cmd);
+ dev_err(&vscsi->dev, "invalid srp cmd, opcode %d\n",
+ (uint)srp->opcode);
+ ibmvscsis_post_disconnect(vscsi,
+ ERR_DISCONNECT_RECONNECT, 0);
+ break;
+ }
+ } else if (srp->opcode == SRP_LOGIN_REQ && vscsi->state == CONNECTED) {
+ rc = ibmvscsis_srp_login(vscsi, cmd, crq);
+ } else {
+ ibmvscsis_free_cmd_resources(vscsi, cmd);
+ dev_err(&vscsi->dev, "Invalid state %d to handle srp cmd\n",
+ vscsi->state);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ }
+}
+
+/**
+ * ibmvscsis_ping_response() - Respond to a ping request
+ * @vscsi: Pointer to our adapter structure
+ *
+ * Let the client know that the server is alive and waiting on
+ * its native I/O stack.
+ * If any type of error occurs from the call to queue a ping
+ * response then the client is either not accepting or receiving
+ * interrupts. Disconnect with an error.
+ *
+ * EXECUTION ENVIRONMENT:
+ * Interrupt, interrupt lock held
+ */
+static long ibmvscsis_ping_response(struct scsi_info *vscsi)
+{
+ struct viosrp_crq *crq;
+ u64 buffer[2] = { 0, 0 };
+ long rc;
+
+ crq = (struct viosrp_crq *)&buffer;
+ crq->valid = VALID_CMD_RESP_EL;
+ crq->format = (u8)MESSAGE_IN_CRQ;
+ crq->status = PING_RESPONSE;
+
+ rc = h_send_crq(vscsi->dds.unit_id, cpu_to_be64(buffer[MSG_HI]),
+ cpu_to_be64(buffer[MSG_LOW]));
+
+ switch (rc) {
+ case H_SUCCESS:
+ break;
+ case H_CLOSED:
+ vscsi->flags |= CLIENT_FAILED;
+ case H_DROPPED:
+ vscsi->flags |= RESPONSE_Q_DOWN;
+ case H_REMOTE_PARM:
+ dev_err(&vscsi->dev, "ping_response: h_send_crq failed, rc %ld\n",
+ rc);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ break;
+ default:
+ dev_err(&vscsi->dev, "ping_response: h_send_crq returned unknown rc %ld\n",
+ rc);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message
+ * @vscsi: Pointer to our adapter structure
+ *
+ * Must be called with interrupt lock held.
+ */
+static long ibmvscsis_handle_init_compl_msg(struct scsi_info *vscsi)
+{
+ long rc = ADAPT_SUCCESS;
+
+ switch (vscsi->state) {
+ case NO_QUEUE:
+ case ERR_DISCONNECT:
+ case ERR_DISCONNECT_RECONNECT:
+ case ERR_DISCONNECTED:
+ case UNCONFIGURING:
+ case UNDEFINED:
+ rc = ERROR;
+ break;
+
+ case WAIT_CONNECTION:
+ vscsi->state = CONNECTED;
+ break;
+
+ case WAIT_IDLE:
+ case SRP_PROCESSING:
+ case CONNECTED:
+ case WAIT_ENABLED:
+ case PART_UP_WAIT_ENAB:
+ default:
+ rc = ERROR;
+ dev_err(&vscsi->dev, "init_msg: invalid state %d to get init compl msg\n",
+ vscsi->state);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_handle_init_msg() - Respond to an Init Message
+ * @vscsi: Pointer to our adapter structure
+ *
+ * Must be called with interrupt lock held.
+ */
+static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi)
+{
+ long rc = ADAPT_SUCCESS;
+
+ switch (vscsi->state) {
+ case WAIT_ENABLED:
+ vscsi->state = PART_UP_WAIT_ENAB;
+ break;
+
+ case WAIT_CONNECTION:
+ rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG);
+ switch (rc) {
+ case H_SUCCESS:
+ vscsi->state = CONNECTED;
+ break;
+
+ case H_PARAMETER:
+ dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
+ rc);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
+ break;
+
+ case H_DROPPED:
+ dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
+ rc);
+ rc = ERROR;
+ ibmvscsis_post_disconnect(vscsi,
+ ERR_DISCONNECT_RECONNECT, 0);
+ break;
+
+ case H_CLOSED:
+ pr_warn("init_msg: failed to send, rc %ld\n", rc);
+ rc = 0;
+ break;
+ }
+ break;
+
+ case UNDEFINED:
+ rc = ERROR;
+ break;
+
+ case UNCONFIGURING:
+ break;
+
+ case PART_UP_WAIT_ENAB:
+ case CONNECTED:
+ case SRP_PROCESSING:
+ case WAIT_IDLE:
+ case NO_QUEUE:
+ case ERR_DISCONNECT:
+ case ERR_DISCONNECT_RECONNECT:
+ case ERR_DISCONNECTED:
+ default:
+ rc = ERROR;
+ dev_err(&vscsi->dev, "init_msg: invalid state %d to get init msg\n",
+ vscsi->state);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_init_msg() - Respond to an init message
+ * @vscsi: Pointer to our adapter structure
+ * @crq: Pointer to CRQ element containing the Init Message
+ *
+ * EXECUTION ENVIRONMENT:
+ * Interrupt, interrupt lock held
+ */
+static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq)
+{
+ long rc = ADAPT_SUCCESS;
+
+ pr_debug("init_msg: state 0x%hx\n", vscsi->state);
+
+ rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
+ (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
+ 0);
+ if (rc == H_SUCCESS) {
+ vscsi->client_data.partition_number =
+ be64_to_cpu(*(u64 *)vscsi->map_buf);
+ pr_debug("init_msg, part num %d\n",
+ vscsi->client_data.partition_number);
+ } else {
+ pr_debug("init_msg h_vioctl rc %ld\n", rc);
+ rc = ADAPT_SUCCESS;
+ }
+
+ if (crq->format == INIT_MSG) {
+ rc = ibmvscsis_handle_init_msg(vscsi);
+ } else if (crq->format == INIT_COMPLETE_MSG) {
+ rc = ibmvscsis_handle_init_compl_msg(vscsi);
+ } else {
+ rc = ERROR;
+ dev_err(&vscsi->dev, "init_msg: invalid format %d\n",
+ (uint)crq->format);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ }
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_parse_command() - Parse an element taken from the cmd rsp queue.
+ * @vscsi: Pointer to our adapter structure
+ * @crq: Pointer to CRQ element containing the SRP request
+ *
+ * This function will return success if the command queue element is valid
+ * and the srp iu or MAD request it pointed to was also valid. That does
+ * not mean that an error was not returned to the client.
+ *
+ * EXECUTION ENVIRONMENT:
+ * Interrupt, intr lock held
+ */
+static long ibmvscsis_parse_command(struct scsi_info *vscsi,
+ struct viosrp_crq *crq)
+{
+ long rc = ADAPT_SUCCESS;
+
+ switch (crq->valid) {
+ case VALID_CMD_RESP_EL:
+ switch (crq->format) {
+ case OS400_FORMAT:
+ case AIX_FORMAT:
+ case LINUX_FORMAT:
+ case MAD_FORMAT:
+ if (vscsi->flags & PROCESSING_MAD) {
+ rc = ERROR;
+ dev_err(&vscsi->dev, "parse_command: already processing mad\n");
+ ibmvscsis_post_disconnect(vscsi,
+ ERR_DISCONNECT_RECONNECT,
+ 0);
+ } else {
+ vscsi->flags |= PROCESSING_MAD;
+ rc = ibmvscsis_mad(vscsi, crq);
+ }
+ break;
+
+ case SRP_FORMAT:
+ ibmvscsis_srp_cmd(vscsi, crq);
+ break;
+
+ case MESSAGE_IN_CRQ:
+ if (crq->status == PING)
+ ibmvscsis_ping_response(vscsi);
+ break;
+
+ default:
+ dev_err(&vscsi->dev, "parse_command: invalid format %d\n",
+ (uint)crq->format);
+ ibmvscsis_post_disconnect(vscsi,
+ ERR_DISCONNECT_RECONNECT, 0);
+ break;
+ }
+ break;
+
+ case VALID_TRANS_EVENT:
+ rc = ibmvscsis_trans_event(vscsi, crq);
+ break;
+
+ case VALID_INIT_MSG:
+ rc = ibmvscsis_init_msg(vscsi, crq);
+ break;
+
+ default:
+ dev_err(&vscsi->dev, "parse_command: invalid valid field %d\n",
+ (uint)crq->valid);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ break;
+ }
+
+ /*
+ * Return only what the interrupt handler cares
+ * about. Most errors we keep right on trucking.
+ */
+ rc = vscsi->flags & SCHEDULE_DISCONNECT;
+
+ return rc;
+}
+
+static int read_dma_window(struct scsi_info *vscsi)
+{
+ struct vio_dev *vdev = vscsi->dma_dev;
+ const __be32 *dma_window;
+ const __be32 *prop;
+
+ /* TODO Using of_parse_dma_window would be better, but it doesn't give
+ * a way to read multiple windows without already knowing the size of
+ * a window or the number of windows.
+ */
+ dma_window = (const __be32 *)vio_get_attribute(vdev,
+ "ibm,my-dma-window",
+ NULL);
+ if (!dma_window) {
+ pr_err("Couldn't find ibm,my-dma-window property\n");
+ return -1;
+ }
+
+ vscsi->dds.window[LOCAL].liobn = be32_to_cpu(*dma_window);
+ dma_window++;
+
+ prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-address-cells",
+ NULL);
+ if (!prop) {
+ pr_warn("Couldn't find ibm,#dma-address-cells property\n");
+ dma_window++;
+ } else {
+ dma_window += be32_to_cpu(*prop);
+ }
+
+ prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-size-cells",
+ NULL);
+ if (!prop) {
+ pr_warn("Couldn't find ibm,#dma-size-cells property\n");
+ dma_window++;
+ } else {
+ dma_window += be32_to_cpu(*prop);
+ }
+
+ /* dma_window should point to the second window now */
+ vscsi->dds.window[REMOTE].liobn = be32_to_cpu(*dma_window);
+
+ return 0;
+}
+
+static struct ibmvscsis_tport *ibmvscsis_lookup_port(const char *name)
+{
+ struct ibmvscsis_tport *tport = NULL;
+ struct vio_dev *vdev;
+ struct scsi_info *vscsi;
+
+ spin_lock_bh(&ibmvscsis_dev_lock);
+ list_for_each_entry(vscsi, &ibmvscsis_dev_list, list) {
+ vdev = vscsi->dma_dev;
+ if (!strcmp(dev_name(&vdev->dev), name)) {
+ tport = &vscsi->tport;
+ break;
+ }
+ }
+ spin_unlock_bh(&ibmvscsis_dev_lock);
+
+ return tport;
+}
+
+/**
+ * ibmvscsis_parse_cmd() - Parse SRP Command
+ * @vscsi: Pointer to our adapter structure
+ * @cmd: Pointer to command element with SRP command
+ *
+ * Parse the srp command; if it is valid then submit it to tcm.
+ * Note: The return code does not reflect the status of the SCSI CDB.
+ *
+ * EXECUTION ENVIRONMENT:
+ * Process level
+ */
+static void ibmvscsis_parse_cmd(struct scsi_info *vscsi,
+ struct ibmvscsis_cmd *cmd)
+{
+ struct iu_entry *iue = cmd->iue;
+ struct srp_cmd *srp = (struct srp_cmd *)iue->sbuf->buf;
+ struct ibmvscsis_nexus *nexus;
+ u64 data_len = 0;
+ enum dma_data_direction dir;
+ int attr = 0;
+ int rc = 0;
+
+ nexus = vscsi->tport.ibmv_nexus;
+ /*
+ * additional length in bytes. Note that the SRP spec says that
+ * additional length is in 4-byte words, but technically the
+ * additional length field is only the upper 6 bits of the byte.
+ * The lower 2 bits are reserved. If the lower 2 bits are 0 (as
+ * all reserved fields should be), then interpreting the byte as
+ * an int will yield the length in bytes.
+ */
+ if (srp->add_cdb_len & 0x03) {
+ dev_err(&vscsi->dev, "parse_cmd: reserved bits set in IU\n");
+ spin_lock_bh(&vscsi->intr_lock);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ ibmvscsis_free_cmd_resources(vscsi, cmd);
+ spin_unlock_bh(&vscsi->intr_lock);
+ return;
+ }
+
+ if (srp_get_desc_table(srp, &dir, &data_len)) {
+ dev_err(&vscsi->dev, "0x%llx: parsing SRP descriptor table failed.\n",
+ srp->tag);
+ goto fail;
+ return;
+ }
+
+ cmd->rsp.sol_not = srp->sol_not;
+
+ switch (srp->task_attr) {
+ case SRP_SIMPLE_TASK:
+ attr = TCM_SIMPLE_TAG;
+ break;
+ case SRP_ORDERED_TASK:
+ attr = TCM_ORDERED_TAG;
+ break;
+ case SRP_HEAD_TASK:
+ attr = TCM_HEAD_TAG;
+ break;
+ case SRP_ACA_TASK:
+ attr = TCM_ACA_TAG;
+ break;
+ default:
+ dev_err(&vscsi->dev, "Invalid task attribute %d\n",
+ srp->task_attr);
+ goto fail;
+ }
+
+ cmd->se_cmd.tag = be64_to_cpu(srp->tag);
+
+ spin_lock_bh(&vscsi->intr_lock);
+ list_add_tail(&cmd->list, &vscsi->active_q);
+ spin_unlock_bh(&vscsi->intr_lock);
+
+ srp->lun.scsi_lun[0] &= 0x3f;
+
+ pr_debug("calling submit_cmd, se_cmd %p, lun 0x%llx, cdb 0x%x, attr:%d\n",
+ &cmd->se_cmd, scsilun_to_int(&srp->lun), (int)srp->cdb[0],
+ attr);
+
+ rc = target_submit_cmd(&cmd->se_cmd, nexus->se_sess, srp->cdb,
+ cmd->sense_buf, scsilun_to_int(&srp->lun),
+ data_len, attr, dir, 0);
+ if (rc) {
+ dev_err(&vscsi->dev, "target_submit_cmd failed, rc %d\n", rc);
+ goto fail;
+ }
+ return;
+
+fail:
+ spin_lock_bh(&vscsi->intr_lock);
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ spin_unlock_bh(&vscsi->intr_lock);
+}
+
+/**
+ * ibmvscsis_parse_task() - Parse SRP Task Management Request
+ * @vscsi: Pointer to our adapter structure
+ * @cmd: Pointer to command element with SRP task management request
+ *
+ * Parse the srp task management request; if it is valid then submit it to tcm.
+ * Note: The return code does not reflect the status of the task management
+ * request.
+ *
+ * EXECUTION ENVIRONMENT:
+ * Processor level
+ */
+static void ibmvscsis_parse_task(struct scsi_info *vscsi,
+ struct ibmvscsis_cmd *cmd)
+{
+ struct iu_entry *iue = cmd->iue;
+ struct srp_tsk_mgmt *srp_tsk = &vio_iu(iue)->srp.tsk_mgmt;
+ int tcm_type;
+ u64 tag_to_abort = 0;
+ int rc = 0;
+ struct ibmvscsis_nexus *nexus;
+
+ nexus = vscsi->tport.ibmv_nexus;
+
+ cmd->rsp.sol_not = srp_tsk->sol_not;
+
+ switch (srp_tsk->tsk_mgmt_func) {
+ case SRP_TSK_ABORT_TASK:
+ tcm_type = TMR_ABORT_TASK;
+ tag_to_abort = be64_to_cpu(srp_tsk->task_tag);
+ break;
+ case SRP_TSK_ABORT_TASK_SET:
+ tcm_type = TMR_ABORT_TASK_SET;
+ break;
+ case SRP_TSK_CLEAR_TASK_SET:
+ tcm_type = TMR_CLEAR_TASK_SET;
+ break;
+ case SRP_TSK_LUN_RESET:
+ tcm_type = TMR_LUN_RESET;
+ break;
+ case SRP_TSK_CLEAR_ACA:
+ tcm_type = TMR_CLEAR_ACA;
+ break;
+ default:
+ dev_err(&vscsi->dev, "unknown task mgmt func %d\n",
+ srp_tsk->tsk_mgmt_func);
+ cmd->se_cmd.se_tmr_req->response =
+ TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
+ rc = -1;
+ break;
+ }
+
+ if (!rc) {
+ cmd->se_cmd.tag = be64_to_cpu(srp_tsk->tag);
+
+ spin_lock_bh(&vscsi->intr_lock);
+ list_add_tail(&cmd->list, &vscsi->active_q);
+ spin_unlock_bh(&vscsi->intr_lock);
+
+ srp_tsk->lun.scsi_lun[0] &= 0x3f;
+
+ pr_debug("calling submit_tmr, func %d\n",
+ srp_tsk->tsk_mgmt_func);
+ rc = target_submit_tmr(&cmd->se_cmd, nexus->se_sess, NULL,
+ scsilun_to_int(&srp_tsk->lun), srp_tsk,
+ tcm_type, GFP_KERNEL, tag_to_abort, 0);
+ if (rc) {
+ dev_err(&vscsi->dev, "target_submit_tmr failed, rc %d\n",
+ rc);
+ cmd->se_cmd.se_tmr_req->response =
+ TMR_FUNCTION_REJECTED;
+ }
+ }
+
+ if (rc)
+ transport_send_check_condition_and_sense(&cmd->se_cmd, 0, 0);
+}
+
+static void ibmvscsis_scheduler(struct work_struct *work)
+{
+ struct ibmvscsis_cmd *cmd = container_of(work, struct ibmvscsis_cmd,
+ work);
+ struct scsi_info *vscsi = cmd->adapter;
+
+ spin_lock_bh(&vscsi->intr_lock);
+
+ /* Remove from schedule_q */
+ list_del(&cmd->list);
+
+ /* Don't submit cmd if we're disconnecting */
+ if (vscsi->flags & (SCHEDULE_DISCONNECT | DISCONNECT_SCHEDULED)) {
+ ibmvscsis_free_cmd_resources(vscsi, cmd);
+
+ /* ibmvscsis_disconnect might be waiting for us */
+ if (list_empty(&vscsi->active_q) &&
+ list_empty(&vscsi->schedule_q) &&
+ (vscsi->flags & WAIT_FOR_IDLE)) {
+ vscsi->flags &= ~WAIT_FOR_IDLE;
+ complete(&vscsi->wait_idle);
+ }
+
+ spin_unlock_bh(&vscsi->intr_lock);
+ return;
+ }
+
+ spin_unlock_bh(&vscsi->intr_lock);
+
+ switch (cmd->type) {
+ case SCSI_CDB:
+ ibmvscsis_parse_cmd(vscsi, cmd);
+ break;
+ case TASK_MANAGEMENT:
+ ibmvscsis_parse_task(vscsi, cmd);
+ break;
+ default:
+ dev_err(&vscsi->dev, "scheduler, invalid cmd type %d\n",
+ cmd->type);
+ spin_lock_bh(&vscsi->intr_lock);
+ ibmvscsis_free_cmd_resources(vscsi, cmd);
+ spin_unlock_bh(&vscsi->intr_lock);
+ break;
+ }
+}
+
+static int ibmvscsis_alloc_cmds(struct scsi_info *vscsi, int num)
+{
+ struct ibmvscsis_cmd *cmd;
+ int i;
+
+ INIT_LIST_HEAD(&vscsi->free_cmd);
+ vscsi->cmd_pool = kcalloc(num, sizeof(struct ibmvscsis_cmd),
+ GFP_KERNEL);
+ if (!vscsi->cmd_pool)
+ return -ENOMEM;
+
+ for (i = 0, cmd = (struct ibmvscsis_cmd *)vscsi->cmd_pool; i < num;
+ i++, cmd++) {
+ cmd->adapter = vscsi;
+ INIT_WORK(&cmd->work, ibmvscsis_scheduler);
+ list_add_tail(&cmd->list, &vscsi->free_cmd);
+ }
+
+ return 0;
+}
+
+static void ibmvscsis_free_cmds(struct scsi_info *vscsi)
+{
+ kfree(vscsi->cmd_pool);
+ vscsi->cmd_pool = NULL;
+ INIT_LIST_HEAD(&vscsi->free_cmd);
+}
+
+/**
+ * ibmvscsis_service_wait_q() - Service Waiting Queue
+ * @timer: Pointer to timer which has expired
+ *
+ * This routine is called when the timer pops to service the waiting
+ * queue. Elements on the queue have completed, their responses have been
+ * copied to the client, but the client's response queue was full so
+ * the queue message could not be sent. The routine grabs the proper locks
+ * and calls send messages.
+ *
+ * EXECUTION ENVIRONMENT:
+ * called at interrupt level
+ */
+static enum hrtimer_restart ibmvscsis_service_wait_q(struct hrtimer *timer)
+{
+ struct timer_cb *p_timer = container_of(timer, struct timer_cb, timer);
+ struct scsi_info *vscsi = container_of(p_timer, struct scsi_info,
+ rsp_q_timer);
+
+ spin_lock_bh(&vscsi->intr_lock);
+ p_timer->timer_pops += 1;
+ p_timer->started = false;
+ ibmvscsis_send_messages(vscsi);
+ spin_unlock_bh(&vscsi->intr_lock);
+
+ return HRTIMER_NORESTART;
+}
+
+static long ibmvscsis_alloctimer(struct scsi_info *vscsi)
+{
+ struct timer_cb *p_timer;
+
+ p_timer = &vscsi->rsp_q_timer;
+ hrtimer_init(&p_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+
+ p_timer->timer.function = ibmvscsis_service_wait_q;
+ p_timer->started = false;
+ p_timer->timer_pops = 0;
+
+ return ADAPT_SUCCESS;
+}
+
+static void ibmvscsis_freetimer(struct scsi_info *vscsi)
+{
+ struct timer_cb *p_timer;
+
+ p_timer = &vscsi->rsp_q_timer;
+
+ (void)hrtimer_cancel(&p_timer->timer);
+
+ p_timer->started = false;
+ p_timer->timer_pops = 0;
+}
+
+static irqreturn_t ibmvscsis_interrupt(int dummy, void *data)
+{
+ struct scsi_info *vscsi = data;
+
+ vio_disable_interrupts(vscsi->dma_dev);
+ tasklet_schedule(&vscsi->work_task);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ibmvscsis_check_q() - Helper function to Check Init Message Valid
+ * @vscsi: Pointer to our adapter structure
+ *
+ * Checks if a initialize message was queued by the initiatior
+ * while the timing window was open. This function is called from
+ * probe after the CRQ is created and interrupts are enabled.
+ * It would only be used by adapters who wait for some event before
+ * completing the init handshake with the client. For ibmvscsi, this
+ * event is waiting for the port to be enabled.
+ *
+ * EXECUTION ENVIRONMENT:
+ * Process level only, interrupt lock held
+ */
+static long ibmvscsis_check_q(struct scsi_info *vscsi)
+{
+ uint format;
+ long rc;
+
+ rc = ibmvscsis_check_init_msg(vscsi, &format);
+ if (rc)
+ ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+ else if (format == UNUSED_FORMAT)
+ vscsi->state = WAIT_ENABLED;
+ else
+ vscsi->state = PART_UP_WAIT_ENAB;
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_enable_change_state() - Set new state based on enabled status
+ * @vscsi: Pointer to our adapter structure
+ *
+ * This function determines our new state now that we are enabled. This
+ * may involve sending an Init Complete message to the client.
+ *
+ * Must be called with interrupt lock held.
+ */
+static long ibmvscsis_enable_change_state(struct scsi_info *vscsi)
+{
+ long rc = ADAPT_SUCCESS;
+
+handle_state_change:
+ switch (vscsi->state) {
+ case WAIT_ENABLED:
+ rc = ibmvscsis_send_init_message(vscsi, INIT_MSG);
+ switch (rc) {
+ case H_SUCCESS:
+ case H_DROPPED:
+ case H_CLOSED:
+ vscsi->state = WAIT_CONNECTION;
+ rc = ADAPT_SUCCESS;
+ break;
+
+ case H_PARAMETER:
+ break;
+
+ case H_HARDWARE:
+ break;
+
+ default:
+ vscsi->state = UNDEFINED;
+ rc = H_HARDWARE;
+ break;
+ }
+ break;
+ case PART_UP_WAIT_ENAB:
+ rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG);
+ switch (rc) {
+ case H_SUCCESS:
+ vscsi->state = CONNECTED;
+ rc = ADAPT_SUCCESS;
+ break;
+
+ case H_DROPPED:
+ case H_CLOSED:
+ vscsi->state = WAIT_ENABLED;
+ goto handle_state_change;
+
+ case H_PARAMETER:
+ break;
+
+ case H_HARDWARE:
+ break;
+
+ default:
+ rc = H_HARDWARE;
+ break;
+ }
+ break;
+
+ case WAIT_CONNECTION:
+ case WAIT_IDLE:
+ case SRP_PROCESSING:
+ case CONNECTED:
+ rc = ADAPT_SUCCESS;
+ break;
+ /* should not be able to get here */
+ case UNCONFIGURING:
+ rc = ERROR;
+ vscsi->state = UNDEFINED;
+ break;
+
+ /* driver should never allow this to happen */
+ case ERR_DISCONNECT:
+ case ERR_DISCONNECT_RECONNECT:
+ default:
+ dev_err(&vscsi->dev, "in invalid state %d during enable_change_state\n",
+ vscsi->state);
+ rc = ADAPT_SUCCESS;
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_create_command_q() - Create Command Queue
+ * @vscsi: Pointer to our adapter structure
+ * @num_cmds: Currently unused. In the future, may be used to determine
+ * the size of the CRQ.
+ *
+ * Allocates memory for command queue maps remote memory into an ioba
+ * initializes the command response queue
+ *
+ * EXECUTION ENVIRONMENT:
+ * Process level only
+ */
+static long ibmvscsis_create_command_q(struct scsi_info *vscsi, int num_cmds)
+{
+ long rc = 0;
+ int pages;
+ struct vio_dev *vdev = vscsi->dma_dev;
+
+ /* We might support multiple pages in the future, but just 1 for now */
+ pages = 1;
+
+ vscsi->cmd_q.size = pages;
+
+ vscsi->cmd_q.base_addr =
+ (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);
+ if (!vscsi->cmd_q.base_addr)
+ return -ENOMEM;
+
+ vscsi->cmd_q.mask = ((uint)pages * CRQ_PER_PAGE) - 1;
+
+ vscsi->cmd_q.crq_token = dma_map_single(&vdev->dev,
+ vscsi->cmd_q.base_addr,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&vdev->dev, vscsi->cmd_q.crq_token)) {
+ free_page((unsigned long)vscsi->cmd_q.base_addr);
+ return -ENOMEM;
+ }
+
+ rc = h_reg_crq(vscsi->dds.unit_id, vscsi->cmd_q.crq_token, PAGE_SIZE);
+ if (rc) {
+ if (rc == H_CLOSED) {
+ vscsi->state = WAIT_ENABLED;
+ rc = 0;
+ } else {
+ dma_unmap_single(&vdev->dev, vscsi->cmd_q.crq_token,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ free_page((unsigned long)vscsi->cmd_q.base_addr);
+ rc = -ENODEV;
+ }
+ } else {
+ vscsi->state = WAIT_ENABLED;
+ }
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_destroy_command_q - Destroy Command Queue
+ * @vscsi: Pointer to our adapter structure
+ *
+ * Releases memory for command queue and unmaps mapped remote memory.
+ *
+ * EXECUTION ENVIRONMENT:
+ * Process level only
+ */
+static void ibmvscsis_destroy_command_q(struct scsi_info *vscsi)
+{
+ dma_unmap_single(&vscsi->dma_dev->dev, vscsi->cmd_q.crq_token,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ free_page((unsigned long)vscsi->cmd_q.base_addr);
+ vscsi->cmd_q.base_addr = NULL;
+ vscsi->state = NO_QUEUE;
+}
+
+static u8 ibmvscsis_fast_fail(struct scsi_info *vscsi,
+ struct ibmvscsis_cmd *cmd)
+{
+ struct iu_entry *iue = cmd->iue;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ struct srp_cmd *srp = (struct srp_cmd *)iue->sbuf->buf;
+ struct scsi_sense_hdr sshdr;
+ u8 rc = se_cmd->scsi_status;
+
+ if (vscsi->fast_fail && (READ_CMD(srp->cdb) || WRITE_CMD(srp->cdb)))
+ if (scsi_normalize_sense(se_cmd->sense_buffer,
+ se_cmd->scsi_sense_length, &sshdr))
+ if (sshdr.sense_key == HARDWARE_ERROR &&
+ (se_cmd->residual_count == 0 ||
+ se_cmd->residual_count == se_cmd->data_length)) {
+ rc = NO_SENSE;
+ cmd->flags |= CMD_FAST_FAIL;
+ }
+
+ return rc;
+}
+
+/**
+ * srp_build_response() - Build an SRP response buffer
+ * @vscsi: Pointer to our adapter structure
+ * @cmd: Pointer to command for which to send the response
+ * @len_p: Where to return the length of the IU response sent. This
+ * is needed to construct the CRQ response.
+ *
+ * Build the SRP response buffer and copy it to the client's memory space.
+ */
+static long srp_build_response(struct scsi_info *vscsi,
+ struct ibmvscsis_cmd *cmd, uint *len_p)
+{
+ struct iu_entry *iue = cmd->iue;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ struct srp_rsp *rsp;
+ uint len;
+ u32 rsp_code;
+ char *data;
+ u32 *tsk_status;
+ long rc = ADAPT_SUCCESS;
+
+ spin_lock_bh(&vscsi->intr_lock);
+
+ rsp = &vio_iu(iue)->srp.rsp;
+ len = sizeof(*rsp);
+ memset(rsp, 0, len);
+ data = rsp->data;
+
+ rsp->opcode = SRP_RSP;
+
+ if (vscsi->credit > 0 && vscsi->state == SRP_PROCESSING)
+ rsp->req_lim_delta = cpu_to_be32(vscsi->credit);
+ else
+ rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit);
+ rsp->tag = cmd->rsp.tag;
+ rsp->flags = 0;
+
+ if (cmd->type == SCSI_CDB) {
+ rsp->status = ibmvscsis_fast_fail(vscsi, cmd);
+ if (rsp->status) {
+ pr_debug("build_resp: cmd %p, scsi status %d\n", cmd,
+ (int)rsp->status);
+ ibmvscsis_determine_resid(se_cmd, rsp);
+ if (se_cmd->scsi_sense_length && se_cmd->sense_buffer) {
+ rsp->sense_data_len =
+ cpu_to_be32(se_cmd->scsi_sense_length);
+ rsp->flags |= SRP_RSP_FLAG_SNSVALID;
+ len += se_cmd->scsi_sense_length;
+ memcpy(data, se_cmd->sense_buffer,
+ se_cmd->scsi_sense_length);
+ }
+ rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
+ UCSOLNT_RESP_SHIFT;
+ } else if (cmd->flags & CMD_FAST_FAIL) {
+ pr_debug("build_resp: cmd %p, fast fail\n", cmd);
+ rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
+ UCSOLNT_RESP_SHIFT;
+ } else {
+ rsp->sol_not = (cmd->rsp.sol_not & SCSOLNT) >>
+ SCSOLNT_RESP_SHIFT;
+ }
+ } else {
+ /* this is task management */
+ rsp->status = 0;
+ rsp->resp_data_len = cpu_to_be32(4);
+ rsp->flags |= SRP_RSP_FLAG_RSPVALID;
+
+ switch (se_cmd->se_tmr_req->response) {
+ case TMR_FUNCTION_COMPLETE:
+ case TMR_TASK_DOES_NOT_EXIST:
+ rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_COMPLETE;
+ rsp->sol_not = (cmd->rsp.sol_not & SCSOLNT) >>
+ SCSOLNT_RESP_SHIFT;
+ break;
+ case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
+ case TMR_LUN_DOES_NOT_EXIST:
+ rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_NOT_SUPPORTED;
+ rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
+ UCSOLNT_RESP_SHIFT;
+ break;
+ case TMR_FUNCTION_FAILED:
+ case TMR_FUNCTION_REJECTED:
+ default:
+ rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_FAILED;
+ rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
+ UCSOLNT_RESP_SHIFT;
+ break;
+ }
+
+ tsk_status = (u32 *)data;
+ *tsk_status = cpu_to_be32(rsp_code);
+ data = (char *)(tsk_status + 1);
+ len += 4;
+ }
+
+ dma_wmb();
+ rc = h_copy_rdma(len, vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma,
+ vscsi->dds.window[REMOTE].liobn,
+ be64_to_cpu(iue->remote_token));
+
+ switch (rc) {
+ case H_SUCCESS:
+ vscsi->credit = 0;
+ *len_p = len;
+ break;
+ case H_PERMISSION:
+ if (connection_broken(vscsi))
+ vscsi->flags |= RESPONSE_Q_DOWN | CLIENT_FAILED;
+
+ dev_err(&vscsi->dev, "build_response: error copying to client, rc %ld, flags 0x%x, state 0x%hx\n",
+ rc, vscsi->flags, vscsi->state);
+ break;
+ case H_SOURCE_PARM:
+ case H_DEST_PARM:
+ default:
+ dev_err(&vscsi->dev, "build_response: error copying to client, rc %ld\n",
+ rc);
+ break;
+ }
+
+ spin_unlock_bh(&vscsi->intr_lock);
+
+ return rc;
+}
+
+static int ibmvscsis_rdma(struct ibmvscsis_cmd *cmd, struct scatterlist *sg,
+ int nsg, struct srp_direct_buf *md, int nmd,
+ enum dma_data_direction dir, unsigned int bytes)
+{
+ struct iu_entry *iue = cmd->iue;
+ struct srp_target *target = iue->target;
+ struct scsi_info *vscsi = target->ldata;
+ struct scatterlist *sgp;
+ dma_addr_t client_ioba, server_ioba;
+ ulong buf_len;
+ ulong client_len, server_len;
+ int md_idx;
+ long tx_len;
+ long rc = 0;
+
+ pr_debug("rdma: dir %d, bytes 0x%x\n", dir, bytes);
+
+ if (bytes == 0)
+ return 0;
+
+ sgp = sg;
+ client_len = 0;
+ server_len = 0;
+ md_idx = 0;
+ tx_len = bytes;
+
+ do {
+ if (client_len == 0) {
+ if (md_idx >= nmd) {
+ dev_err(&vscsi->dev, "rdma: ran out of client memory descriptors\n");
+ rc = -EIO;
+ break;
+ }
+ client_ioba = be64_to_cpu(md[md_idx].va);
+ client_len = be32_to_cpu(md[md_idx].len);
+ }
+ if (server_len == 0) {
+ if (!sgp) {
+ dev_err(&vscsi->dev, "rdma: ran out of scatter/gather list\n");
+ rc = -EIO;
+ break;
+ }
+ server_ioba = sg_dma_address(sgp);
+ server_len = sg_dma_len(sgp);
+ }
+
+ buf_len = tx_len;
+
+ if (buf_len > client_len)
+ buf_len = client_len;
+
+ if (buf_len > server_len)
+ buf_len = server_len;
+
+ if (buf_len > max_vdma_size)
+ buf_len = max_vdma_size;
+
+ if (dir == DMA_TO_DEVICE) {
+ /* read from client */
+ rc = h_copy_rdma(buf_len,
+ vscsi->dds.window[REMOTE].liobn,
+ client_ioba,
+ vscsi->dds.window[LOCAL].liobn,
+ server_ioba);
+ } else {
+ /* write to client */
+ struct srp_cmd *srp = (struct srp_cmd *)iue->sbuf->buf;
+
+ if (!READ_CMD(srp->cdb))
+ print_hex_dump_bytes(" data:", DUMP_PREFIX_NONE,
+ sg_virt(sgp), buf_len);
+ /* The h_copy_rdma will cause phyp, running in another
+ * partition, to read memory, so we need to make sure
+ * the data has been written out, hence these syncs.
+ */
+ /* ensure that everything is in memory */
+ isync();
+ /* ensure that memory has been made visible */
+ dma_wmb();
+ rc = h_copy_rdma(buf_len,
+ vscsi->dds.window[LOCAL].liobn,
+ server_ioba,
+ vscsi->dds.window[REMOTE].liobn,
+ client_ioba);
+ }
+ switch (rc) {
+ case H_SUCCESS:
+ break;
+ case H_PERMISSION:
+ case H_SOURCE_PARM:
+ case H_DEST_PARM:
+ if (connection_broken(vscsi)) {
+ spin_lock_bh(&vscsi->intr_lock);
+ vscsi->flags |=
+ (RESPONSE_Q_DOWN | CLIENT_FAILED);
+ spin_unlock_bh(&vscsi->intr_lock);
+ }
+ dev_err(&vscsi->dev, "rdma: h_copy_rdma failed, rc %ld\n",
+ rc);
+ break;
+
+ default:
+ dev_err(&vscsi->dev, "rdma: unknown error %ld from h_copy_rdma\n",
+ rc);
+ break;
+ }
+
+ if (!rc) {
+ tx_len -= buf_len;
+ if (tx_len) {
+ client_len -= buf_len;
+ if (client_len == 0)
+ md_idx++;
+ else
+ client_ioba += buf_len;
+
+ server_len -= buf_len;
+ if (server_len == 0)
+ sgp = sg_next(sgp);
+ else
+ server_ioba += buf_len;
+ } else {
+ break;
+ }
+ }
+ } while (!rc);
+
+ return rc;
+}
+
+/**
+ * ibmvscsis_handle_crq() - Handle CRQ
+ * @data: Pointer to our adapter structure
+ *
+ * Read the command elements from the command queue and copy the payloads
+ * associated with the command elements to local memory and execute the
+ * SRP requests.
+ *
+ * Note: this is an edge triggered interrupt. It can not be shared.
+ */
+static void ibmvscsis_handle_crq(unsigned long data)
+{
+ struct scsi_info *vscsi = (struct scsi_info *)data;
+ struct viosrp_crq *crq;
+ long rc;
+ bool ack = true;
+ volatile u8 valid;
+
+ spin_lock_bh(&vscsi->intr_lock);
+
+ pr_debug("got interrupt\n");
+
+ /*
+ * if we are in a path where we are waiting for all pending commands
+ * to complete because we received a transport event and anything in
+ * the command queue is for a new connection, do nothing
+ */
+ if (TARGET_STOP(vscsi)) {
+ vio_enable_interrupts(vscsi->dma_dev);
+
+ pr_debug("handle_crq, don't process: flags 0x%x, state 0x%hx\n",
+ vscsi->flags, vscsi->state);
+ spin_unlock_bh(&vscsi->intr_lock);
+ return;
+ }
+
+ rc = vscsi->flags & SCHEDULE_DISCONNECT;
+ crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
+ valid = crq->valid;
+ dma_rmb();
+
+ while (valid) {
+ /*
+ * These are edege triggered interrupts. After dropping out of
+ * the while loop, the code must check for work since an
+ * interrupt could be lost, and an elment be left on the queue,
+ * hence the label.
+ */
+cmd_work:
+ vscsi->cmd_q.index =
+ (vscsi->cmd_q.index + 1) & vscsi->cmd_q.mask;
+
+ if (!rc) {
+ rc = ibmvscsis_parse_command(vscsi, crq);
+ } else {
+ if ((uint)crq->valid == VALID_TRANS_EVENT) {
+ /*
+ * must service the transport layer events even
+ * in an error state, dont break out until all
+ * the consecutive transport events have been
+ * processed
+ */
+ rc = ibmvscsis_trans_event(vscsi, crq);
+ } else if (vscsi->flags & TRANS_EVENT) {
+ /*
+ * if a tranport event has occurred leave
+ * everything but transport events on the queue
+ */
+ pr_debug("handle_crq, ignoring\n");
+
+ /*
+ * need to decrement the queue index so we can
+ * look at the elment again
+ */
+ if (vscsi->cmd_q.index)
+ vscsi->cmd_q.index -= 1;
+ else
+ /*
+ * index is at 0 it just wrapped.
+ * have it index last element in q
+ */
+ vscsi->cmd_q.index = vscsi->cmd_q.mask;
+ break;
+ }
+ }
+
+ crq->valid = INVALIDATE_CMD_RESP_EL;
+
+ crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
+ valid = crq->valid;
+ dma_rmb();
+ }
+
+ if (!rc) {
+ if (ack) {
+ vio_enable_interrupts(vscsi->dma_dev);
+ ack = false;
+ pr_debug("handle_crq, reenabling interrupts\n");
+ }
+ valid = crq->valid;
+ dma_rmb();
+ if (valid)
+ goto cmd_work;
+ } else {
+ pr_debug("handle_crq, error: flags 0x%x, state 0x%hx, crq index 0x%x\n",
+ vscsi->flags, vscsi->state, vscsi->cmd_q.index);
+ }
+
+ pr_debug("Leaving handle_crq: schedule_q empty %d, flags 0x%x, state 0x%hx\n",
+ (int)list_empty(&vscsi->schedule_q), vscsi->flags,
+ vscsi->state);
+
+ spin_unlock_bh(&vscsi->intr_lock);
+}
+
+static int ibmvscsis_probe(struct vio_dev *vdev,
+ const struct vio_device_id *id)
+{
+ struct scsi_info *vscsi;
+ int rc = 0;
+ long hrc = 0;
+ char wq_name[24];
+
+ vscsi = kzalloc(sizeof(*vscsi), GFP_KERNEL);
+ if (!vscsi) {
+ rc = -ENOMEM;
+ pr_err("probe: allocation of adapter failed\n");
+ return rc;
+ }
+
+ vscsi->dma_dev = vdev;
+ vscsi->dev = vdev->dev;
+ INIT_LIST_HEAD(&vscsi->schedule_q);
+ INIT_LIST_HEAD(&vscsi->waiting_rsp);
+ INIT_LIST_HEAD(&vscsi->active_q);
+
+ snprintf(vscsi->tport.tport_name, 256, "%s", dev_name(&vdev->dev));
+
+ pr_debug("probe tport_name: %s\n", vscsi->tport.tport_name);
+
+ rc = read_dma_window(vscsi);
+ if (rc)
+ goto free_adapter;
+ pr_debug("Probe: liobn 0x%x, riobn 0x%x\n",
+ vscsi->dds.window[LOCAL].liobn,
+ vscsi->dds.window[REMOTE].liobn);
+
+ strcpy(vscsi->eye, "VSCSI ");
+ strncat(vscsi->eye, vdev->name, MAX_EYE);
+
+ vscsi->dds.unit_id = vdev->unit_address;
+
+ spin_lock_bh(&ibmvscsis_dev_lock);
+ list_add_tail(&vscsi->list, &ibmvscsis_dev_list);
+ spin_unlock_bh(&ibmvscsis_dev_lock);
+
+ /*
+ * TBD: How do we determine # of cmds to request? Do we know how
+ * many "children" we have?
+ */
+ vscsi->request_limit = INITIAL_SRP_LIMIT;
+ rc = srp_target_alloc(&vscsi->target, &vdev->dev, vscsi->request_limit,
+ SRP_MAX_IU_LEN);
+ if (rc)
+ goto rem_list;
+
+ vscsi->target.ldata = vscsi;
+
+ rc = ibmvscsis_alloc_cmds(vscsi, vscsi->request_limit);
+ if (rc) {
+ dev_err(&vscsi->dev, "alloc_cmds failed, rc %d, num %d\n",
+ rc, vscsi->request_limit);
+ goto free_target;
+ }
+
+ /*
+ * Note: the lock is used in freeing timers, so must initialize
+ * first so that ordering in case of error is correct.
+ */
+ spin_lock_init(&vscsi->intr_lock);
+
+ rc = ibmvscsis_alloctimer(vscsi);
+ if (rc) {
+ dev_err(&vscsi->dev, "probe: alloctimer failed, rc %d\n", rc);
+ goto free_cmds;
+ }
+
+ rc = ibmvscsis_create_command_q(vscsi, 256);
+ if (rc) {
+ dev_err(&vscsi->dev, "probe: create_command_q failed, rc %d\n",
+ rc);
+ goto free_timer;
+ }
+
+ vscsi->map_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!vscsi->map_buf) {
+ rc = -ENOMEM;
+ dev_err(&vscsi->dev, "probe: allocating cmd buffer failed\n");
+ goto destroy_queue;
+ }
+
+ vscsi->map_ioba = dma_map_single(&vdev->dev, vscsi->map_buf, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&vdev->dev, vscsi->map_ioba)) {
+ dev_err(&vscsi->dev, "probe: error mapping command buffer\n");
+ goto free_buf;
+ }
+
+ hrc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
+ (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
+ 0);
+ if (hrc == H_SUCCESS)
+ vscsi->client_data.partition_number =
+ be64_to_cpu(*(u64 *)vscsi->map_buf);
+ /*
+ * We expect the VIOCTL to fail if we're configured as "any
+ * client can connect" and the client isn't activated yet.
+ * We'll make the call again when he sends an init msg.
+ */
+ pr_debug("probe hrc %ld, client partition num %d\n",
+ hrc, vscsi->client_data.partition_number);
+
+ tasklet_init(&vscsi->work_task, ibmvscsis_handle_crq,
+ (unsigned long)vscsi);
+
+ init_completion(&vscsi->wait_idle);
+
+ snprintf(wq_name, 24, "ibmvscsis%s", dev_name(&vdev->dev));
+ vscsi->work_q = create_workqueue(wq_name);
+ if (!vscsi->work_q) {
+ rc = -ENOMEM;
+ dev_err(&vscsi->dev, "create_workqueue failed\n");
+ goto unmap_buf;
+ }
+
+ rc = request_irq(vdev->irq, ibmvscsis_interrupt, 0, "ibmvscsis", vscsi);
+ if (rc) {
+ rc = -EPERM;
+ dev_err(&vscsi->dev, "probe: request_irq failed, rc %d\n", rc);
+ goto destroy_WQ;
+ }
+
+ spin_lock_bh(&vscsi->intr_lock);
+ vio_enable_interrupts(vdev);
+ if (rc) {
+ dev_err(&vscsi->dev, "enabling interrupts failed, rc %d\n", rc);
+ rc = -ENODEV;
+ spin_unlock_bh(&vscsi->intr_lock);
+ goto free_irq;
+ }
+
+ if (ibmvscsis_check_q(vscsi)) {
+ rc = ERROR;
+ dev_err(&vscsi->dev, "probe: check_q failed, rc %d\n", rc);
+ spin_unlock_bh(&vscsi->intr_lock);
+ goto disable_interrupt;
+ }
+ spin_unlock_bh(&vscsi->intr_lock);
+
+ dev_set_drvdata(&vdev->dev, vscsi);
+
+ return 0;
+
+disable_interrupt:
+ vio_disable_interrupts(vdev);
+free_irq:
+ free_irq(vdev->irq, vscsi);
+destroy_WQ:
+ destroy_workqueue(vscsi->work_q);
+unmap_buf:
+ dma_unmap_single(&vdev->dev, vscsi->map_ioba, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+free_buf:
+ kfree(vscsi->map_buf);
+destroy_queue:
+ tasklet_kill(&vscsi->work_task);
+ ibmvscsis_unregister_command_q(vscsi);
+ ibmvscsis_destroy_command_q(vscsi);
+free_timer:
+ ibmvscsis_freetimer(vscsi);
+free_cmds:
+ ibmvscsis_free_cmds(vscsi);
+free_target:
+ srp_target_free(&vscsi->target);
+rem_list:
+ spin_lock_bh(&ibmvscsis_dev_lock);
+ list_del(&vscsi->list);
+ spin_unlock_bh(&ibmvscsis_dev_lock);
+free_adapter:
+ kfree(vscsi);
+
+ return rc;
+}
+
+static int ibmvscsis_remove(struct vio_dev *vdev)
+{
+ struct scsi_info *vscsi = dev_get_drvdata(&vdev->dev);
+
+ pr_debug("remove (%s)\n", dev_name(&vscsi->dma_dev->dev));
+
+ /*
+ * TBD: Need to handle if there are commands on the waiting_rsp q
+ * Actually, can there still be cmds outstanding to tcm?
+ */
+
+ vio_disable_interrupts(vdev);
+ free_irq(vdev->irq, vscsi);
+ destroy_workqueue(vscsi->work_q);
+ dma_unmap_single(&vdev->dev, vscsi->map_ioba, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ kfree(vscsi->map_buf);
+ tasklet_kill(&vscsi->work_task);
+ ibmvscsis_unregister_command_q(vscsi);
+ ibmvscsis_destroy_command_q(vscsi);
+ ibmvscsis_freetimer(vscsi);
+ ibmvscsis_free_cmds(vscsi);
+ srp_target_free(&vscsi->target);
+ spin_lock_bh(&ibmvscsis_dev_lock);
+ list_del(&vscsi->list);
+ spin_unlock_bh(&ibmvscsis_dev_lock);
+ kfree(vscsi);
+
+ return 0;
+}
+
+static ssize_t system_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", system_id);
+}
+
+static ssize_t partition_number_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%x\n", partition_number);
+}
+
+static ssize_t unit_address_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_info *vscsi = container_of(dev, struct scsi_info, dev);
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", vscsi->dma_dev->unit_address);
+}
+
+static int ibmvscsis_get_system_info(void)
+{
+ struct device_node *rootdn, *vdevdn;
+ const char *id, *model, *name;
+ const uint *num;
+
+ rootdn = of_find_node_by_path("/");
+ if (!rootdn)
+ return -ENOENT;
+
+ model = of_get_property(rootdn, "model", NULL);
+ id = of_get_property(rootdn, "system-id", NULL);
+ if (model && id)
+ snprintf(system_id, sizeof(system_id), "%s-%s", model, id);
+
+ name = of_get_property(rootdn, "ibm,partition-name", NULL);
+ if (name)
+ strncpy(partition_name, name, sizeof(partition_name));
+
+ num = of_get_property(rootdn, "ibm,partition-no", NULL);
+ if (num)
+ partition_number = *num;
+
+ of_node_put(rootdn);
+
+ vdevdn = of_find_node_by_path("/vdevice");
+ if (vdevdn) {
+ const uint *mvds;
+
+ mvds = of_get_property(vdevdn, "ibm,max-virtual-dma-size",
+ NULL);
+ if (mvds)
+ max_vdma_size = *mvds;
+ of_node_put(vdevdn);
+ }
+
+ return 0;
+}
+
+static char *ibmvscsis_get_fabric_name(void)
+{
+ return "ibmvscsis";
+}
+
+static char *ibmvscsis_get_fabric_wwn(struct se_portal_group *se_tpg)
+{
+ struct ibmvscsis_tport *tport =
+ container_of(se_tpg, struct ibmvscsis_tport, se_tpg);
+
+ return tport->tport_name;
+}
+
+static u16 ibmvscsis_get_tag(struct se_portal_group *se_tpg)
+{
+ struct ibmvscsis_tport *tport =
+ container_of(se_tpg, struct ibmvscsis_tport, se_tpg);
+
+ return tport->tport_tpgt;
+}
+
+static u32 ibmvscsis_get_default_depth(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static int ibmvscsis_check_true(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static int ibmvscsis_check_false(struct se_portal_group *se_tpg)
+{
+ return 0;
+}
+
+static u32 ibmvscsis_tpg_get_inst_index(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static int ibmvscsis_check_stop_free(struct se_cmd *se_cmd)
+{
+ return target_put_sess_cmd(se_cmd);
+}
+
+static void ibmvscsis_release_cmd(struct se_cmd *se_cmd)
+{
+ struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
+ se_cmd);
+ struct scsi_info *vscsi = cmd->adapter;
+
+ pr_debug("release_cmd %p, flags %d\n", se_cmd, cmd->flags);
+
+ spin_lock_bh(&vscsi->intr_lock);
+ /* Remove from active_q */
+ list_del(&cmd->list);
+ list_add_tail(&cmd->list, &vscsi->waiting_rsp);
+ ibmvscsis_send_messages(vscsi);
+ spin_unlock_bh(&vscsi->intr_lock);
+}
+
+static u32 ibmvscsis_sess_get_index(struct se_session *se_sess)
+{
+ return 0;
+}
+
+static int ibmvscsis_write_pending(struct se_cmd *se_cmd)
+{
+ struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
+ se_cmd);
+ struct iu_entry *iue = cmd->iue;
+ int rc;
+
+ pr_debug("write_pending, se_cmd %p, length 0x%x\n",
+ se_cmd, se_cmd->data_length);
+
+ rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma,
+ 1, 1);
+ if (rc) {
+ pr_err("srp_transfer_data() failed: %d\n", rc);
+ return -EAGAIN;
+ }
+ /*
+ * We now tell TCM to add this WRITE CDB directly into the TCM storage
+ * object execution queue.
+ */
+ target_execute_cmd(se_cmd);
+ return 0;
+}
+
+static int ibmvscsis_write_pending_status(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+static void ibmvscsis_set_default_node_attrs(struct se_node_acl *nacl)
+{
+}
+
+static int ibmvscsis_get_cmd_state(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+static int ibmvscsis_queue_data_in(struct se_cmd *se_cmd)
+{
+ struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
+ se_cmd);
+ struct iu_entry *iue = cmd->iue;
+ struct scsi_info *vscsi = cmd->adapter;
+ char *sd;
+ uint len = 0;
+ int rc;
+
+ pr_debug("queue_data_in, se_cmd %p, length 0x%x\n",
+ se_cmd, se_cmd->data_length);
+
+ rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, 1,
+ 1);
+ if (rc) {
+ pr_err("srp_transfer_data failed: %d\n", rc);
+ sd = se_cmd->sense_buffer;
+ se_cmd->scsi_sense_length = 18;
+ memset(se_cmd->sense_buffer, 0, se_cmd->scsi_sense_length);
+ /* Logical Unit Communication Time-out asc/ascq = 0x0801 */
+ scsi_build_sense_buffer(0, se_cmd->sense_buffer, MEDIUM_ERROR,
+ 0x08, 0x01);
+ }
+
+ srp_build_response(vscsi, cmd, &len);
+ cmd->rsp.format = SRP_FORMAT;
+ cmd->rsp.len = len;
+
+ return 0;
+}
+
+static int ibmvscsis_queue_status(struct se_cmd *se_cmd)
+{
+ struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
+ se_cmd);
+ struct scsi_info *vscsi = cmd->adapter;
+ uint len;
+
+ pr_debug("queue_status %p\n", se_cmd);
+
+ srp_build_response(vscsi, cmd, &len);
+ cmd->rsp.format = SRP_FORMAT;
+ cmd->rsp.len = len;
+
+ return 0;
+}
+
+static void ibmvscsis_queue_tm_rsp(struct se_cmd *se_cmd)
+{
+ struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
+ se_cmd);
+ struct scsi_info *vscsi = cmd->adapter;
+ uint len;
+
+ pr_debug("queue_tm_rsp %p, status %d\n",
+ se_cmd, (int)se_cmd->se_tmr_req->response);
+
+ srp_build_response(vscsi, cmd, &len);
+ cmd->rsp.format = SRP_FORMAT;
+ cmd->rsp.len = len;
+}
+
+static void ibmvscsis_aborted_task(struct se_cmd *se_cmd)
+{
+ /* TBD: What (if anything) should we do here? */
+ pr_debug("ibmvscsis_aborted_task %p\n", se_cmd);
+}
+
+static struct se_wwn *ibmvscsis_make_tport(struct target_fabric_configfs *tf,
+ struct config_group *group,
+ const char *name)
+{
+ struct ibmvscsis_tport *tport;
+
+ tport = ibmvscsis_lookup_port(name);
+ if (tport) {
+ tport->tport_proto_id = SCSI_PROTOCOL_SRP;
+ pr_debug("make_tport(%s), pointer:%p, tport_id:%x\n",
+ name, tport, tport->tport_proto_id);
+ return &tport->tport_wwn;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static void ibmvscsis_drop_tport(struct se_wwn *wwn)
+{
+ struct ibmvscsis_tport *tport = container_of(wwn,
+ struct ibmvscsis_tport,
+ tport_wwn);
+
+ pr_debug("drop_tport(%s)\n",
+ config_item_name(&tport->tport_wwn.wwn_group.cg_item));
+}
+
+static struct se_portal_group *ibmvscsis_make_tpg(struct se_wwn *wwn,
+ struct config_group *group,
+ const char *name)
+{
+ struct ibmvscsis_tport *tport =
+ container_of(wwn, struct ibmvscsis_tport, tport_wwn);
+ int rc;
+
+ tport->releasing = false;
+
+ rc = core_tpg_register(&tport->tport_wwn, &tport->se_tpg,
+ tport->tport_proto_id);
+ if (rc)
+ return ERR_PTR(rc);
+
+ return &tport->se_tpg;
+}
+
+static void ibmvscsis_drop_tpg(struct se_portal_group *se_tpg)
+{
+ struct ibmvscsis_tport *tport = container_of(se_tpg,
+ struct ibmvscsis_tport,
+ se_tpg);
+
+ tport->releasing = true;
+ tport->enabled = false;
+
+ /*
+ * Release the virtual I_T Nexus for this ibmvscsis TPG
+ */
+ ibmvscsis_drop_nexus(tport);
+ /*
+ * Deregister the se_tpg from TCM..
+ */
+ core_tpg_deregister(se_tpg);
+}
+
+static ssize_t ibmvscsis_wwn_version_show(struct config_item *item,
+ char *page)
+{
+ return scnprintf(page, PAGE_SIZE, "%s\n", IBMVSCSIS_VERSION);
+}
+CONFIGFS_ATTR_RO(ibmvscsis_wwn_, version);
+
+static struct configfs_attribute *ibmvscsis_wwn_attrs[] = {
+ &ibmvscsis_wwn_attr_version,
+ NULL,
+};
+
+static ssize_t ibmvscsis_tpg_enable_show(struct config_item *item,
+ char *page)
+{
+ struct se_portal_group *se_tpg = to_tpg(item);
+ struct ibmvscsis_tport *tport = container_of(se_tpg,
+ struct ibmvscsis_tport,
+ se_tpg);
+
+ return snprintf(page, PAGE_SIZE, "%d\n", (tport->enabled) ? 1 : 0);
+}
+
+static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_portal_group *se_tpg = to_tpg(item);
+ struct ibmvscsis_tport *tport = container_of(se_tpg,
+ struct ibmvscsis_tport,
+ se_tpg);
+ struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport);
+ unsigned long tmp;
+ int rc;
+ long lrc;
+
+ rc = kstrtoul(page, 0, &tmp);
+ if (rc < 0) {
+ pr_err("Unable to extract srpt_tpg_store_enable\n");
+ return -EINVAL;
+ }
+
+ if ((tmp != 0) && (tmp != 1)) {
+ pr_err("Illegal value for srpt_tpg_store_enable\n");
+ return -EINVAL;
+ }
+
+ if (tmp) {
+ tport->enabled = true;
+ spin_lock_bh(&vscsi->intr_lock);
+ lrc = ibmvscsis_enable_change_state(vscsi);
+ if (lrc)
+ pr_err("enable_change_state failed, rc %ld state %d\n",
+ lrc, vscsi->state);
+ spin_unlock_bh(&vscsi->intr_lock);
+ } else {
+ tport->enabled = false;
+ }
+
+ pr_debug("tpg_enable_store, state %d\n", vscsi->state);
+
+ return count;
+}
+CONFIGFS_ATTR(ibmvscsis_tpg_, enable);
+
+static struct configfs_attribute *ibmvscsis_tpg_attrs[] = {
+ &ibmvscsis_tpg_attr_enable,
+ NULL,
+};
+
+static const struct target_core_fabric_ops ibmvscsis_ops = {
+ .module = THIS_MODULE,
+ .name = "ibmvscsis",
+ .get_fabric_name = ibmvscsis_get_fabric_name,
+ .tpg_get_wwn = ibmvscsis_get_fabric_wwn,
+ .tpg_get_tag = ibmvscsis_get_tag,
+ .tpg_get_default_depth = ibmvscsis_get_default_depth,
+ .tpg_check_demo_mode = ibmvscsis_check_true,
+ .tpg_check_demo_mode_cache = ibmvscsis_check_true,
+ .tpg_check_demo_mode_write_protect = ibmvscsis_check_false,
+ .tpg_check_prod_mode_write_protect = ibmvscsis_check_false,
+ .tpg_get_inst_index = ibmvscsis_tpg_get_inst_index,
+ .check_stop_free = ibmvscsis_check_stop_free,
+ .release_cmd = ibmvscsis_release_cmd,
+ .sess_get_index = ibmvscsis_sess_get_index,
+ .write_pending = ibmvscsis_write_pending,
+ .write_pending_status = ibmvscsis_write_pending_status,
+ .set_default_node_attributes = ibmvscsis_set_default_node_attrs,
+ .get_cmd_state = ibmvscsis_get_cmd_state,
+ .queue_data_in = ibmvscsis_queue_data_in,
+ .queue_status = ibmvscsis_queue_status,
+ .queue_tm_rsp = ibmvscsis_queue_tm_rsp,
+ .aborted_task = ibmvscsis_aborted_task,
+ /*
+ * Setup function pointers for logic in target_core_fabric_configfs.c
+ */
+ .fabric_make_wwn = ibmvscsis_make_tport,
+ .fabric_drop_wwn = ibmvscsis_drop_tport,
+ .fabric_make_tpg = ibmvscsis_make_tpg,
+ .fabric_drop_tpg = ibmvscsis_drop_tpg,
+
+ .tfc_wwn_attrs = ibmvscsis_wwn_attrs,
+ .tfc_tpg_base_attrs = ibmvscsis_tpg_attrs,
+};
+
+static void ibmvscsis_dev_release(struct device *dev) {};
+
+static struct class_attribute ibmvscsis_class_attrs[] = {
+ __ATTR_NULL,
+};
+
+static struct device_attribute dev_attr_system_id =
+ __ATTR(system_id, S_IRUGO, system_id_show, NULL);
+
+static struct device_attribute dev_attr_partition_number =
+ __ATTR(partition_number, S_IRUGO, partition_number_show, NULL);
+
+static struct device_attribute dev_attr_unit_address =
+ __ATTR(unit_address, S_IRUGO, unit_address_show, NULL);
+
+static struct attribute *ibmvscsis_dev_attrs[] = {
+ &dev_attr_system_id.attr,
+ &dev_attr_partition_number.attr,
+ &dev_attr_unit_address.attr,
+};
+ATTRIBUTE_GROUPS(ibmvscsis_dev);
+
+static struct class ibmvscsis_class = {
+ .name = "ibmvscsis",
+ .dev_release = ibmvscsis_dev_release,
+ .class_attrs = ibmvscsis_class_attrs,
+ .dev_groups = ibmvscsis_dev_groups,
+};
+
+static struct vio_device_id ibmvscsis_device_table[] = {
+ { "v-scsi-host", "IBM,v-scsi-host" },
+ { "", "" }
+};
+MODULE_DEVICE_TABLE(vio, ibmvscsis_device_table);
+
+static struct vio_driver ibmvscsis_driver = {
+ .name = "ibmvscsis",
+ .id_table = ibmvscsis_device_table,
+ .probe = ibmvscsis_probe,
+ .remove = ibmvscsis_remove,
+};
+
+/*
+ * ibmvscsis_init() - Kernel Module initialization
+ *
+ * Note: vio_register_driver() registers callback functions, and at least one
+ * of those callback functions calls TCM - Linux IO Target Subsystem, thus
+ * the SCSI Target template must be registered before vio_register_driver()
+ * is called.
+ */
+static int __init ibmvscsis_init(void)
+{
+ int rc = 0;
+
+ rc = ibmvscsis_get_system_info();
+ if (rc) {
+ pr_err("rc %d from get_system_info\n", rc);
+ goto out;
+ }
+
+ rc = class_register(&ibmvscsis_class);
+ if (rc) {
+ pr_err("failed class register\n");
+ goto out;
+ }
+
+ rc = target_register_template(&ibmvscsis_ops);
+ if (rc) {
+ pr_err("rc %d from target_register_template\n", rc);
+ goto unregister_class;
+ }
+
+ rc = vio_register_driver(&ibmvscsis_driver);
+ if (rc) {
+ pr_err("rc %d from vio_register_driver\n", rc);
+ goto unregister_target;
+ }
+
+ return 0;
+
+unregister_target:
+ target_unregister_template(&ibmvscsis_ops);
+unregister_class:
+ class_unregister(&ibmvscsis_class);
+out:
+ return rc;
+}
+
+static void __exit ibmvscsis_exit(void)
+{
+ pr_info("Unregister IBM virtual SCSI host driver\n");
+ vio_unregister_driver(&ibmvscsis_driver);
+ target_unregister_template(&ibmvscsis_ops);
+ class_unregister(&ibmvscsis_class);
+}
+
+MODULE_DESCRIPTION("IBMVSCSIS fabric driver");
+MODULE_AUTHOR("Bryant G. Ly and Michael Cyr");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(IBMVSCSIS_VERSION);
+module_init(ibmvscsis_init);
+module_exit(ibmvscsis_exit);
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
new file mode 100644
index 000000000..981a0c992
--- /dev/null
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
@@ -0,0 +1,346 @@
+/*******************************************************************************
+ * IBM Virtual SCSI Target Driver
+ * Copyright (C) 2003-2005 Dave Boutcher (boutcher@us.ibm.com) IBM Corp.
+ * Santiago Leon (santil@us.ibm.com) IBM Corp.
+ * Linda Xie (lxie@us.ibm.com) IBM Corp.
+ *
+ * Copyright (C) 2005-2011 FUJITA Tomonori <tomof@acm.org>
+ * Copyright (C) 2010 Nicholas A. Bellinger <nab@kernel.org>
+ * Copyright (C) 2016 Bryant G. Ly <bryantly@linux.vnet.ibm.com> IBM Corp.
+ *
+ * Authors: Bryant G. Ly <bryantly@linux.vnet.ibm.com>
+ * Authors: Michael Cyr <mikecyr@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ ****************************************************************************/
+
+#ifndef __H_IBMVSCSI_TGT
+#define __H_IBMVSCSI_TGT
+
+#include "libsrp.h"
+
+#define SYS_ID_NAME_LEN 64
+#define PARTITION_NAMELEN 96
+#define IBMVSCSIS_NAMELEN 32
+
+#define MSG_HI 0
+#define MSG_LOW 1
+
+#define MAX_CMD_Q_PAGES 4
+#define CRQ_PER_PAGE (PAGE_SIZE / sizeof(struct viosrp_crq))
+/* in terms of number of elements */
+#define DEFAULT_CMD_Q_SIZE CRQ_PER_PAGE
+#define MAX_CMD_Q_SIZE (DEFAULT_CMD_Q_SIZE * MAX_CMD_Q_PAGES)
+
+#define SRP_VIOLATION 0x102 /* general error code */
+
+/*
+ * SRP buffer formats defined as of 16.a supported by this driver.
+ */
+#define SUPPORTED_FORMATS ((SRP_DATA_DESC_DIRECT << 1) | \
+ (SRP_DATA_DESC_INDIRECT << 1))
+
+#define SCSI_LUN_ADDR_METHOD_FLAT 1
+
+struct dma_window {
+ u32 liobn; /* Unique per vdevice */
+ u64 tce_base; /* Physical location of the TCE table */
+ u64 tce_size; /* Size of the TCE table in bytes */
+};
+
+struct target_dds {
+ u64 unit_id; /* 64 bit will force alignment */
+#define NUM_DMA_WINDOWS 2
+#define LOCAL 0
+#define REMOTE 1
+ struct dma_window window[NUM_DMA_WINDOWS];
+
+ /* root node property "ibm,partition-no" */
+ uint partition_num;
+ char partition_name[PARTITION_NAMELEN];
+};
+
+#define MAX_NUM_PORTS 1
+#define MAX_H_COPY_RDMA (128 * 1024)
+
+#define MAX_EYE 64
+
+/* Return codes */
+#define ADAPT_SUCCESS 0L
+/* choose error codes that do not conflict with PHYP */
+#define ERROR -40L
+
+struct format_code {
+ u8 reserved;
+ u8 buffers;
+};
+
+struct client_info {
+#define SRP_VERSION "16.a"
+ char srp_version[8];
+ /* root node property ibm,partition-name */
+ char partition_name[PARTITION_NAMELEN];
+ /* root node property ibm,partition-no */
+ u32 partition_number;
+ /* initially 1 */
+ u32 mad_version;
+ u32 os_type;
+};
+
+/*
+ * Changing this constant changes the number of seconds to wait before
+ * considering the client will never service its queue again.
+ */
+#define SECONDS_TO_CONSIDER_FAILED 30
+/*
+ * These constants set the polling period used to determine if the client
+ * has freed at least one element in the response queue.
+ */
+#define WAIT_SECONDS 1
+#define WAIT_NANO_SECONDS 5000
+#define MAX_TIMER_POPS ((1000000 / WAIT_NANO_SECONDS) * \
+ SECONDS_TO_CONSIDER_FAILED)
+/*
+ * general purpose timer control block
+ * which can be used for multiple functions
+ */
+struct timer_cb {
+ struct hrtimer timer;
+ /*
+ * how long has it been since the client
+ * serviced the queue. The variable is incrmented
+ * in the service_wait_q routine and cleared
+ * in send messages
+ */
+ int timer_pops;
+ /* the timer is started */
+ bool started;
+};
+
+struct cmd_queue {
+ /* kva */
+ struct viosrp_crq *base_addr;
+ dma_addr_t crq_token;
+ /* used to maintain index */
+ uint mask;
+ /* current element */
+ uint index;
+ int size;
+};
+
+#define SCSOLNT_RESP_SHIFT 1
+#define UCSOLNT_RESP_SHIFT 2
+
+#define SCSOLNT BIT(SCSOLNT_RESP_SHIFT)
+#define UCSOLNT BIT(UCSOLNT_RESP_SHIFT)
+
+enum cmd_type {
+ SCSI_CDB = 0x01,
+ TASK_MANAGEMENT = 0x02,
+ /* MAD or addressed to port 0 */
+ ADAPTER_MAD = 0x04,
+ UNSET_TYPE = 0x08,
+};
+
+struct iu_rsp {
+ u8 format;
+ u8 sol_not;
+ u16 len;
+ /* tag is just to help client identify cmd, so don't translate be/le */
+ u64 tag;
+};
+
+struct ibmvscsis_cmd {
+ struct list_head list;
+ /* Used for TCM Core operations */
+ struct se_cmd se_cmd;
+ struct iu_entry *iue;
+ struct iu_rsp rsp;
+ struct work_struct work;
+ struct scsi_info *adapter;
+ /* Sense buffer that will be mapped into outgoing status */
+ unsigned char sense_buf[TRANSPORT_SENSE_BUFFER];
+ u64 init_time;
+#define CMD_FAST_FAIL BIT(0)
+ u32 flags;
+ char type;
+};
+
+struct ibmvscsis_nexus {
+ struct se_session *se_sess;
+};
+
+struct ibmvscsis_tport {
+ /* SCSI protocol the tport is providing */
+ u8 tport_proto_id;
+ /* ASCII formatted WWPN for SRP Target port */
+ char tport_name[IBMVSCSIS_NAMELEN];
+ /* Returned by ibmvscsis_make_tport() */
+ struct se_wwn tport_wwn;
+ /* Returned by ibmvscsis_make_tpg() */
+ struct se_portal_group se_tpg;
+ /* ibmvscsis port target portal group tag for TCM */
+ u16 tport_tpgt;
+ /* Pointer to TCM session for I_T Nexus */
+ struct ibmvscsis_nexus *ibmv_nexus;
+ bool enabled;
+ bool releasing;
+};
+
+struct scsi_info {
+ struct list_head list;
+ char eye[MAX_EYE];
+
+ /* commands waiting for space on repsonse queue */
+ struct list_head waiting_rsp;
+#define NO_QUEUE 0x00
+#define WAIT_ENABLED 0X01
+ /* driver has received an initialize command */
+#define PART_UP_WAIT_ENAB 0x02
+#define WAIT_CONNECTION 0x04
+ /* have established a connection */
+#define CONNECTED 0x08
+ /* at least one port is processing SRP IU */
+#define SRP_PROCESSING 0x10
+ /* remove request received */
+#define UNCONFIGURING 0x20
+ /* disconnect by letting adapter go idle, no error */
+#define WAIT_IDLE 0x40
+ /* disconnecting to clear an error */
+#define ERR_DISCONNECT 0x80
+ /* disconnect to clear error state, then come back up */
+#define ERR_DISCONNECT_RECONNECT 0x100
+ /* disconnected after clearing an error */
+#define ERR_DISCONNECTED 0x200
+ /* A series of errors caused unexpected errors */
+#define UNDEFINED 0x400
+ u16 state;
+ int fast_fail;
+ struct target_dds dds;
+ char *cmd_pool;
+ /* list of free commands */
+ struct list_head free_cmd;
+ /* command elements ready for scheduler */
+ struct list_head schedule_q;
+ /* commands sent to TCM */
+ struct list_head active_q;
+ caddr_t *map_buf;
+ /* ioba of map buffer */
+ dma_addr_t map_ioba;
+ /* allowable number of outstanding SRP requests */
+ int request_limit;
+ /* extra credit */
+ int credit;
+ /* outstanding transactions against credit limit */
+ int debit;
+
+ /* allow only one outstanding mad request */
+#define PROCESSING_MAD 0x00002
+ /* Waiting to go idle */
+#define WAIT_FOR_IDLE 0x00004
+ /* H_REG_CRQ called */
+#define CRQ_CLOSED 0x00010
+ /* detected that client has failed */
+#define CLIENT_FAILED 0x00040
+ /* detected that transport event occurred */
+#define TRANS_EVENT 0x00080
+ /* don't attempt to send anything to the client */
+#define RESPONSE_Q_DOWN 0x00100
+ /* request made to schedule disconnect handler */
+#define SCHEDULE_DISCONNECT 0x00400
+ /* disconnect handler is scheduled */
+#define DISCONNECT_SCHEDULED 0x00800
+ u32 flags;
+ /* adapter lock */
+ spinlock_t intr_lock;
+ /* information needed to manage command queue */
+ struct cmd_queue cmd_q;
+ /* used in hcall to copy response back into srp buffer */
+ u64 empty_iu_id;
+ /* used in crq, to tag what iu the response is for */
+ u64 empty_iu_tag;
+ uint new_state;
+ /* control block for the response queue timer */
+ struct timer_cb rsp_q_timer;
+ /* keep last client to enable proper accounting */
+ struct client_info client_data;
+ /* what can this client do */
+ u32 client_cap;
+ /*
+ * The following two fields capture state and flag changes that
+ * can occur when the lock is given up. In the orginal design,
+ * the lock was held during calls into phyp;
+ * however, phyp did not meet PAPR architecture. This is
+ * a work around.
+ */
+ u16 phyp_acr_state;
+ u32 phyp_acr_flags;
+
+ struct workqueue_struct *work_q;
+ struct completion wait_idle;
+ struct device dev;
+ struct vio_dev *dma_dev;
+ struct srp_target target;
+ struct ibmvscsis_tport tport;
+ struct tasklet_struct work_task;
+ struct work_struct proc_work;
+};
+
+/*
+ * Provide a constant that allows software to detect the adapter is
+ * disconnecting from the client from one of several states.
+ */
+#define IS_DISCONNECTING (UNCONFIGURING | ERR_DISCONNECT_RECONNECT | \
+ ERR_DISCONNECT)
+
+/*
+ * Provide a constant that can be used with interrupt handling that
+ * essentially lets the interrupt handler know that all requests should
+ * be thrown out,
+ */
+#define DONT_PROCESS_STATE (IS_DISCONNECTING | UNDEFINED | \
+ ERR_DISCONNECTED | WAIT_IDLE)
+
+/*
+ * If any of these flag bits are set then do not allow the interrupt
+ * handler to schedule the off level handler.
+ */
+#define BLOCK (DISCONNECT_SCHEDULED)
+
+/* State and transition events that stop the interrupt handler */
+#define TARGET_STOP(VSCSI) (long)(((VSCSI)->state & DONT_PROCESS_STATE) | \
+ ((VSCSI)->flags & BLOCK))
+
+/* flag bit that are not reset during disconnect */
+#define PRESERVE_FLAG_FIELDS 0
+
+#define vio_iu(IUE) ((union viosrp_iu *)((IUE)->sbuf->buf))
+
+#define READ_CMD(cdb) (((cdb)[0] & 0x1F) == 8)
+#define WRITE_CMD(cdb) (((cdb)[0] & 0x1F) == 0xA)
+
+#ifndef H_GET_PARTNER_INFO
+#define H_GET_PARTNER_INFO 0x0000000000000008LL
+#endif
+
+#define h_copy_rdma(l, sa, sb, da, db) \
+ plpar_hcall_norets(H_COPY_RDMA, l, sa, sb, da, db)
+#define h_vioctl(u, o, a, u1, u2, u3, u4) \
+ plpar_hcall_norets(H_VIOCTL, u, o, a, u1, u2)
+#define h_reg_crq(ua, tok, sz) \
+ plpar_hcall_norets(H_REG_CRQ, ua, tok, sz)
+#define h_free_crq(ua) \
+ plpar_hcall_norets(H_FREE_CRQ, ua)
+#define h_send_crq(ua, d1, d2) \
+ plpar_hcall_norets(H_SEND_CRQ, ua, d1, d2)
+
+#endif
diff --git a/drivers/scsi/ibmvscsi_tgt/libsrp.c b/drivers/scsi/ibmvscsi_tgt/libsrp.c
new file mode 100644
index 000000000..5a4cc28ca
--- /dev/null
+++ b/drivers/scsi/ibmvscsi_tgt/libsrp.c
@@ -0,0 +1,427 @@
+/*******************************************************************************
+ * SCSI RDMA Protocol lib functions
+ *
+ * Copyright (C) 2006 FUJITA Tomonori <tomof@acm.org>
+ * Copyright (C) 2016 Bryant G. Ly <bryantly@linux.vnet.ibm.com> IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ ***********************************************************************/
+
+#define pr_fmt(fmt) "libsrp: " fmt
+
+#include <linux/printk.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/kfifo.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <scsi/srp.h>
+#include <target/target_core_base.h>
+#include "libsrp.h"
+#include "ibmvscsi_tgt.h"
+
+static int srp_iu_pool_alloc(struct srp_queue *q, size_t max,
+ struct srp_buf **ring)
+{
+ struct iu_entry *iue;
+ int i;
+
+ q->pool = kcalloc(max, sizeof(struct iu_entry *), GFP_KERNEL);
+ if (!q->pool)
+ return -ENOMEM;
+ q->items = kcalloc(max, sizeof(struct iu_entry), GFP_KERNEL);
+ if (!q->items)
+ goto free_pool;
+
+ spin_lock_init(&q->lock);
+ kfifo_init(&q->queue, (void *)q->pool, max * sizeof(void *));
+
+ for (i = 0, iue = q->items; i < max; i++) {
+ kfifo_in(&q->queue, (void *)&iue, sizeof(void *));
+ iue->sbuf = ring[i];
+ iue++;
+ }
+ return 0;
+
+free_pool:
+ kfree(q->pool);
+ return -ENOMEM;
+}
+
+static void srp_iu_pool_free(struct srp_queue *q)
+{
+ kfree(q->items);
+ kfree(q->pool);
+}
+
+static struct srp_buf **srp_ring_alloc(struct device *dev,
+ size_t max, size_t size)
+{
+ struct srp_buf **ring;
+ int i;
+
+ ring = kcalloc(max, sizeof(struct srp_buf *), GFP_KERNEL);
+ if (!ring)
+ return NULL;
+
+ for (i = 0; i < max; i++) {
+ ring[i] = kzalloc(sizeof(*ring[i]), GFP_KERNEL);
+ if (!ring[i])
+ goto out;
+ ring[i]->buf = dma_alloc_coherent(dev, size, &ring[i]->dma,
+ GFP_KERNEL);
+ if (!ring[i]->buf)
+ goto out;
+ }
+ return ring;
+
+out:
+ for (i = 0; i < max && ring[i]; i++) {
+ if (ring[i]->buf) {
+ dma_free_coherent(dev, size, ring[i]->buf,
+ ring[i]->dma);
+ }
+ kfree(ring[i]);
+ }
+ kfree(ring);
+
+ return NULL;
+}
+
+static void srp_ring_free(struct device *dev, struct srp_buf **ring,
+ size_t max, size_t size)
+{
+ int i;
+
+ for (i = 0; i < max; i++) {
+ dma_free_coherent(dev, size, ring[i]->buf, ring[i]->dma);
+ kfree(ring[i]);
+ }
+ kfree(ring);
+}
+
+int srp_target_alloc(struct srp_target *target, struct device *dev,
+ size_t nr, size_t iu_size)
+{
+ int err;
+
+ spin_lock_init(&target->lock);
+
+ target->dev = dev;
+
+ target->srp_iu_size = iu_size;
+ target->rx_ring_size = nr;
+ target->rx_ring = srp_ring_alloc(target->dev, nr, iu_size);
+ if (!target->rx_ring)
+ return -ENOMEM;
+ err = srp_iu_pool_alloc(&target->iu_queue, nr, target->rx_ring);
+ if (err)
+ goto free_ring;
+
+ dev_set_drvdata(target->dev, target);
+ return 0;
+
+free_ring:
+ srp_ring_free(target->dev, target->rx_ring, nr, iu_size);
+ return -ENOMEM;
+}
+
+void srp_target_free(struct srp_target *target)
+{
+ dev_set_drvdata(target->dev, NULL);
+ srp_ring_free(target->dev, target->rx_ring, target->rx_ring_size,
+ target->srp_iu_size);
+ srp_iu_pool_free(&target->iu_queue);
+}
+
+struct iu_entry *srp_iu_get(struct srp_target *target)
+{
+ struct iu_entry *iue = NULL;
+
+ if (kfifo_out_locked(&target->iu_queue.queue, (void *)&iue,
+ sizeof(void *),
+ &target->iu_queue.lock) != sizeof(void *)) {
+ WARN_ONCE(1, "unexpected fifo state");
+ return NULL;
+ }
+ if (!iue)
+ return iue;
+ iue->target = target;
+ iue->flags = 0;
+ return iue;
+}
+
+void srp_iu_put(struct iu_entry *iue)
+{
+ kfifo_in_locked(&iue->target->iu_queue.queue, (void *)&iue,
+ sizeof(void *), &iue->target->iu_queue.lock);
+}
+
+static int srp_direct_data(struct ibmvscsis_cmd *cmd, struct srp_direct_buf *md,
+ enum dma_data_direction dir, srp_rdma_t rdma_io,
+ int dma_map, int ext_desc)
+{
+ struct iu_entry *iue = NULL;
+ struct scatterlist *sg = NULL;
+ int err, nsg = 0, len;
+
+ if (dma_map) {
+ iue = cmd->iue;
+ sg = cmd->se_cmd.t_data_sg;
+ nsg = dma_map_sg(iue->target->dev, sg, cmd->se_cmd.t_data_nents,
+ DMA_BIDIRECTIONAL);
+ if (!nsg) {
+ pr_err("fail to map %p %d\n", iue,
+ cmd->se_cmd.t_data_nents);
+ return 0;
+ }
+ len = min(cmd->se_cmd.data_length, be32_to_cpu(md->len));
+ } else {
+ len = be32_to_cpu(md->len);
+ }
+
+ err = rdma_io(cmd, sg, nsg, md, 1, dir, len);
+
+ if (dma_map)
+ dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL);
+
+ return err;
+}
+
+static int srp_indirect_data(struct ibmvscsis_cmd *cmd, struct srp_cmd *srp_cmd,
+ struct srp_indirect_buf *id,
+ enum dma_data_direction dir, srp_rdma_t rdma_io,
+ int dma_map, int ext_desc)
+{
+ struct iu_entry *iue = NULL;
+ struct srp_direct_buf *md = NULL;
+ struct scatterlist dummy, *sg = NULL;
+ dma_addr_t token = 0;
+ int err = 0;
+ int nmd, nsg = 0, len;
+
+ if (dma_map || ext_desc) {
+ iue = cmd->iue;
+ sg = cmd->se_cmd.t_data_sg;
+ }
+
+ nmd = be32_to_cpu(id->table_desc.len) / sizeof(struct srp_direct_buf);
+
+ if ((dir == DMA_FROM_DEVICE && nmd == srp_cmd->data_in_desc_cnt) ||
+ (dir == DMA_TO_DEVICE && nmd == srp_cmd->data_out_desc_cnt)) {
+ md = &id->desc_list[0];
+ goto rdma;
+ }
+
+ if (ext_desc && dma_map) {
+ md = dma_alloc_coherent(iue->target->dev,
+ be32_to_cpu(id->table_desc.len),
+ &token, GFP_KERNEL);
+ if (!md) {
+ pr_err("Can't get dma memory %u\n",
+ be32_to_cpu(id->table_desc.len));
+ return -ENOMEM;
+ }
+
+ sg_init_one(&dummy, md, be32_to_cpu(id->table_desc.len));
+ sg_dma_address(&dummy) = token;
+ sg_dma_len(&dummy) = be32_to_cpu(id->table_desc.len);
+ err = rdma_io(cmd, &dummy, 1, &id->table_desc, 1, DMA_TO_DEVICE,
+ be32_to_cpu(id->table_desc.len));
+ if (err) {
+ pr_err("Error copying indirect table %d\n", err);
+ goto free_mem;
+ }
+ } else {
+ pr_err("This command uses external indirect buffer\n");
+ return -EINVAL;
+ }
+
+rdma:
+ if (dma_map) {
+ nsg = dma_map_sg(iue->target->dev, sg, cmd->se_cmd.t_data_nents,
+ DMA_BIDIRECTIONAL);
+ if (!nsg) {
+ pr_err("fail to map %p %d\n", iue,
+ cmd->se_cmd.t_data_nents);
+ err = -EIO;
+ goto free_mem;
+ }
+ len = min(cmd->se_cmd.data_length, be32_to_cpu(id->len));
+ } else {
+ len = be32_to_cpu(id->len);
+ }
+
+ err = rdma_io(cmd, sg, nsg, md, nmd, dir, len);
+
+ if (dma_map)
+ dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL);
+
+free_mem:
+ if (token && dma_map) {
+ dma_free_coherent(iue->target->dev,
+ be32_to_cpu(id->table_desc.len), md, token);
+ }
+ return err;
+}
+
+static int data_out_desc_size(struct srp_cmd *cmd)
+{
+ int size = 0;
+ u8 fmt = cmd->buf_fmt >> 4;
+
+ switch (fmt) {
+ case SRP_NO_DATA_DESC:
+ break;
+ case SRP_DATA_DESC_DIRECT:
+ size = sizeof(struct srp_direct_buf);
+ break;
+ case SRP_DATA_DESC_INDIRECT:
+ size = sizeof(struct srp_indirect_buf) +
+ sizeof(struct srp_direct_buf) * cmd->data_out_desc_cnt;
+ break;
+ default:
+ pr_err("client error. Invalid data_out_format %x\n", fmt);
+ break;
+ }
+ return size;
+}
+
+/*
+ * TODO: this can be called multiple times for a single command if it
+ * has very long data.
+ */
+int srp_transfer_data(struct ibmvscsis_cmd *cmd, struct srp_cmd *srp_cmd,
+ srp_rdma_t rdma_io, int dma_map, int ext_desc)
+{
+ struct srp_direct_buf *md;
+ struct srp_indirect_buf *id;
+ enum dma_data_direction dir;
+ int offset, err = 0;
+ u8 format;
+
+ if (!cmd->se_cmd.t_data_nents)
+ return 0;
+
+ offset = srp_cmd->add_cdb_len & ~3;
+
+ dir = srp_cmd_direction(srp_cmd);
+ if (dir == DMA_FROM_DEVICE)
+ offset += data_out_desc_size(srp_cmd);
+
+ if (dir == DMA_TO_DEVICE)
+ format = srp_cmd->buf_fmt >> 4;
+ else
+ format = srp_cmd->buf_fmt & ((1U << 4) - 1);
+
+ switch (format) {
+ case SRP_NO_DATA_DESC:
+ break;
+ case SRP_DATA_DESC_DIRECT:
+ md = (struct srp_direct_buf *)(srp_cmd->add_data + offset);
+ err = srp_direct_data(cmd, md, dir, rdma_io, dma_map, ext_desc);
+ break;
+ case SRP_DATA_DESC_INDIRECT:
+ id = (struct srp_indirect_buf *)(srp_cmd->add_data + offset);
+ err = srp_indirect_data(cmd, srp_cmd, id, dir, rdma_io, dma_map,
+ ext_desc);
+ break;
+ default:
+ pr_err("Unknown format %d %x\n", dir, format);
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+u64 srp_data_length(struct srp_cmd *cmd, enum dma_data_direction dir)
+{
+ struct srp_direct_buf *md;
+ struct srp_indirect_buf *id;
+ u64 len = 0;
+ uint offset = cmd->add_cdb_len & ~3;
+ u8 fmt;
+
+ if (dir == DMA_TO_DEVICE) {
+ fmt = cmd->buf_fmt >> 4;
+ } else {
+ fmt = cmd->buf_fmt & ((1U << 4) - 1);
+ offset += data_out_desc_size(cmd);
+ }
+
+ switch (fmt) {
+ case SRP_NO_DATA_DESC:
+ break;
+ case SRP_DATA_DESC_DIRECT:
+ md = (struct srp_direct_buf *)(cmd->add_data + offset);
+ len = be32_to_cpu(md->len);
+ break;
+ case SRP_DATA_DESC_INDIRECT:
+ id = (struct srp_indirect_buf *)(cmd->add_data + offset);
+ len = be32_to_cpu(id->len);
+ break;
+ default:
+ pr_err("invalid data format %x\n", fmt);
+ break;
+ }
+ return len;
+}
+
+int srp_get_desc_table(struct srp_cmd *srp_cmd, enum dma_data_direction *dir,
+ u64 *data_len)
+{
+ struct srp_indirect_buf *idb;
+ struct srp_direct_buf *db;
+ uint add_cdb_offset;
+ int rc;
+
+ /*
+ * The pointer computations below will only be compiled correctly
+ * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check
+ * whether srp_cmd::add_data has been declared as a byte pointer.
+ */
+ BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0)
+ && !__same_type(srp_cmd->add_data[0], (u8)0));
+
+ BUG_ON(!dir);
+ BUG_ON(!data_len);
+
+ rc = 0;
+ *data_len = 0;
+
+ *dir = DMA_NONE;
+
+ if (srp_cmd->buf_fmt & 0xf)
+ *dir = DMA_FROM_DEVICE;
+ else if (srp_cmd->buf_fmt >> 4)
+ *dir = DMA_TO_DEVICE;
+
+ add_cdb_offset = srp_cmd->add_cdb_len & ~3;
+ if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) ||
+ ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) {
+ db = (struct srp_direct_buf *)(srp_cmd->add_data
+ + add_cdb_offset);
+ *data_len = be32_to_cpu(db->len);
+ } else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) ||
+ ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) {
+ idb = (struct srp_indirect_buf *)(srp_cmd->add_data
+ + add_cdb_offset);
+
+ *data_len = be32_to_cpu(idb->len);
+ }
+ return rc;
+}
+
+MODULE_DESCRIPTION("SCSI RDMA Protocol lib functions");
+MODULE_AUTHOR("FUJITA Tomonori");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/ibmvscsi_tgt/libsrp.h b/drivers/scsi/ibmvscsi_tgt/libsrp.h
new file mode 100644
index 000000000..4696f3314
--- /dev/null
+++ b/drivers/scsi/ibmvscsi_tgt/libsrp.h
@@ -0,0 +1,123 @@
+#ifndef __LIBSRP_H__
+#define __LIBSRP_H__
+
+#include <linux/list.h>
+#include <linux/kfifo.h>
+#include <scsi/srp.h>
+
+enum srp_valid {
+ INVALIDATE_CMD_RESP_EL = 0,
+ VALID_CMD_RESP_EL = 0x80,
+ VALID_INIT_MSG = 0xC0,
+ VALID_TRANS_EVENT = 0xFF
+};
+
+enum srp_format {
+ SRP_FORMAT = 1,
+ MAD_FORMAT = 2,
+ OS400_FORMAT = 3,
+ AIX_FORMAT = 4,
+ LINUX_FORMAT = 5,
+ MESSAGE_IN_CRQ = 6
+};
+
+enum srp_init_msg {
+ INIT_MSG = 1,
+ INIT_COMPLETE_MSG = 2
+};
+
+enum srp_trans_event {
+ UNUSED_FORMAT = 0,
+ PARTNER_FAILED = 1,
+ PARTNER_DEREGISTER = 2,
+ MIGRATED = 6
+};
+
+enum srp_status {
+ HEADER_DESCRIPTOR = 0xF1,
+ PING = 0xF5,
+ PING_RESPONSE = 0xF6
+};
+
+enum srp_mad_version {
+ MAD_VERSION_1 = 1
+};
+
+enum srp_os_type {
+ OS400 = 1,
+ LINUX = 2,
+ AIX = 3,
+ OFW = 4
+};
+
+enum srp_task_attributes {
+ SRP_SIMPLE_TASK = 0,
+ SRP_HEAD_TASK = 1,
+ SRP_ORDERED_TASK = 2,
+ SRP_ACA_TASK = 4
+};
+
+enum {
+ SRP_TASK_MANAGEMENT_FUNCTION_COMPLETE = 0,
+ SRP_REQUEST_FIELDS_INVALID = 2,
+ SRP_TASK_MANAGEMENT_FUNCTION_NOT_SUPPORTED = 4,
+ SRP_TASK_MANAGEMENT_FUNCTION_FAILED = 5
+};
+
+struct srp_buf {
+ dma_addr_t dma;
+ void *buf;
+};
+
+struct srp_queue {
+ void *pool;
+ void *items;
+ struct kfifo queue;
+ spinlock_t lock;
+};
+
+struct srp_target {
+ struct device *dev;
+
+ spinlock_t lock;
+ struct list_head cmd_queue;
+
+ size_t srp_iu_size;
+ struct srp_queue iu_queue;
+ size_t rx_ring_size;
+ struct srp_buf **rx_ring;
+
+ void *ldata;
+};
+
+struct iu_entry {
+ struct srp_target *target;
+
+ struct list_head ilist;
+ dma_addr_t remote_token;
+ unsigned long flags;
+
+ struct srp_buf *sbuf;
+ u16 iu_len;
+};
+
+struct ibmvscsis_cmd;
+
+typedef int (srp_rdma_t)(struct ibmvscsis_cmd *, struct scatterlist *, int,
+ struct srp_direct_buf *, int,
+ enum dma_data_direction, unsigned int);
+int srp_target_alloc(struct srp_target *, struct device *, size_t, size_t);
+void srp_target_free(struct srp_target *);
+struct iu_entry *srp_iu_get(struct srp_target *);
+void srp_iu_put(struct iu_entry *);
+int srp_transfer_data(struct ibmvscsis_cmd *, struct srp_cmd *,
+ srp_rdma_t, int, int);
+u64 srp_data_length(struct srp_cmd *cmd, enum dma_data_direction dir);
+int srp_get_desc_table(struct srp_cmd *srp_cmd, enum dma_data_direction *dir,
+ u64 *data_len);
+static inline int srp_cmd_direction(struct srp_cmd *cmd)
+{
+ return (cmd->buf_fmt >> 4) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+}
+
+#endif
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index d6803a9e5..17d04c702 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -98,7 +98,7 @@ static unsigned int ipr_transop_timeout = 0;
static unsigned int ipr_debug = 0;
static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
static unsigned int ipr_dual_ioa_raid = 1;
-static unsigned int ipr_number_of_msix = 2;
+static unsigned int ipr_number_of_msix = 16;
static unsigned int ipr_fast_reboot;
static DEFINE_SPINLOCK(ipr_driver_lock);
@@ -194,7 +194,8 @@ static const struct ipr_chip_t ipr_chip[] = {
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
- { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
};
static int ipr_max_bus_speeds[] = {
@@ -221,7 +222,7 @@ module_param_named(max_devs, ipr_max_devs, int, 0);
MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
"[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
-MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:2)");
+MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:16)");
module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
MODULE_LICENSE("GPL");
@@ -3287,6 +3288,11 @@ static void ipr_worker_thread(struct work_struct *work)
return;
}
+ if (!ioa_cfg->scan_enabled) {
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return;
+ }
+
restart:
do {
did_work = 0;
@@ -10213,6 +10219,7 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
if (!ioa_cfg->reset_work_q) {
dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
+ rc = -ENOMEM;
goto out_free_irq;
}
} else
@@ -10361,6 +10368,7 @@ static void ipr_remove(struct pci_dev *pdev)
static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
{
struct ipr_ioa_cfg *ioa_cfg;
+ unsigned long flags;
int rc, i;
rc = ipr_probe_ioa(pdev, dev_id);
@@ -10402,8 +10410,11 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
__ipr_remove(pdev);
return rc;
}
+ spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
+ ioa_cfg->scan_enabled = 1;
+ schedule_work(&ioa_cfg->work_q);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
- scsi_scan_host(ioa_cfg->host);
ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
@@ -10413,7 +10424,8 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
}
}
- schedule_work(&ioa_cfg->work_q);
+ scsi_scan_host(ioa_cfg->host);
+
return 0;
}
@@ -10565,6 +10577,10 @@ static struct pci_device_id ipr_pci_table[] = {
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 },
{ }
};
MODULE_DEVICE_TABLE(pci, ipr_pci_table);
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 56c570683..cdb51960b 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -60,6 +60,7 @@
#define PCI_DEVICE_ID_IBM_CROC_FPGA_E2 0x033D
#define PCI_DEVICE_ID_IBM_CROCODILE 0x034A
+#define PCI_DEVICE_ID_IBM_RATTLESNAKE 0x04DA
#define IPR_SUBS_DEV_ID_2780 0x0264
#define IPR_SUBS_DEV_ID_5702 0x0266
@@ -111,6 +112,8 @@
#define IPR_SUBS_DEV_ID_2CCA 0x04C7
#define IPR_SUBS_DEV_ID_2CD2 0x04C8
#define IPR_SUBS_DEV_ID_2CCD 0x04C9
+#define IPR_SUBS_DEV_ID_580A 0x04FC
+#define IPR_SUBS_DEV_ID_580B 0x04FB
#define IPR_NAME "ipr"
/*
@@ -1475,6 +1478,7 @@ struct ipr_ioa_cfg {
u8 in_ioa_bringdown:1;
u8 ioa_unit_checked:1;
u8 dump_taken:1;
+ u8 scan_enabled:1;
u8 scan_done:1;
u8 needs_hard_reset:1;
u8 dual_raid:1;
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 30f9ef0c0..e72673b0a 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -908,9 +908,17 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
{
struct fc_exch_pool *pool;
struct fc_exch *ep = NULL;
+ u16 cpu = xid & fc_cpu_mask;
+
+ if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
+ printk_ratelimited(KERN_ERR
+ "libfc: lookup request for XID = %d, "
+ "indicates invalid CPU %d\n", xid, cpu);
+ return NULL;
+ }
if ((xid >= mp->min_xid) && (xid <= mp->max_xid)) {
- pool = per_cpu_ptr(mp->pool, xid & fc_cpu_mask);
+ pool = per_cpu_ptr(mp->pool, cpu);
spin_lock_bh(&pool->lock);
ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order);
if (ep) {
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index e01a29863..04ce7cfb6 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -301,7 +301,6 @@ struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
{
struct fc_host_statistics *fc_stats;
struct fc_lport *lport = shost_priv(shost);
- struct timespec v0, v1;
unsigned int cpu;
u64 fcp_in_bytes = 0;
u64 fcp_out_bytes = 0;
@@ -309,9 +308,7 @@ struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
fc_stats = &lport->host_stats;
memset(fc_stats, 0, sizeof(struct fc_host_statistics));
- jiffies_to_timespec(jiffies, &v0);
- jiffies_to_timespec(lport->boot_time, &v1);
- fc_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec);
+ fc_stats->seconds_since_last_reset = (lport->boot_time - jiffies) / HZ;
for_each_possible_cpu(cpu) {
struct fc_stats *stats;
@@ -2090,7 +2087,7 @@ int fc_lport_bsg_request(struct fc_bsg_job *job)
struct fc_rport *rport;
struct fc_rport_priv *rdata;
int rc = -EINVAL;
- u32 did;
+ u32 did, tov;
job->reply->reply_payload_rcv_len = 0;
if (rsp)
@@ -2121,15 +2118,20 @@ int fc_lport_bsg_request(struct fc_bsg_job *job)
case FC_BSG_HST_CT:
did = ntoh24(job->request->rqst_data.h_ct.port_id);
- if (did == FC_FID_DIR_SERV)
+ if (did == FC_FID_DIR_SERV) {
rdata = lport->dns_rdata;
- else
+ if (!rdata)
+ break;
+ tov = rdata->e_d_tov;
+ } else {
rdata = lport->tt.rport_lookup(lport, did);
+ if (!rdata)
+ break;
+ tov = rdata->e_d_tov;
+ kref_put(&rdata->kref, lport->tt.rport_destroy);
+ }
- if (!rdata)
- break;
-
- rc = fc_lport_ct_request(job, lport, did, rdata->e_d_tov);
+ rc = fc_lport_ct_request(job, lport, did, tov);
break;
case FC_BSG_HST_ELS_NOLOGIN:
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 589ff9aed..93f596182 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -95,17 +95,23 @@ static const char *fc_rport_state_names[] = {
* @lport: The local port to lookup the remote port on
* @port_id: The remote port ID to look up
*
- * The caller must hold either disc_mutex or rcu_read_lock().
+ * The reference count of the fc_rport_priv structure is
+ * increased by one.
*/
static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
u32 port_id)
{
- struct fc_rport_priv *rdata;
+ struct fc_rport_priv *rdata = NULL, *tmp_rdata;
- list_for_each_entry_rcu(rdata, &lport->disc.rports, peers)
- if (rdata->ids.port_id == port_id)
- return rdata;
- return NULL;
+ rcu_read_lock();
+ list_for_each_entry_rcu(tmp_rdata, &lport->disc.rports, peers)
+ if (tmp_rdata->ids.port_id == port_id &&
+ kref_get_unless_zero(&tmp_rdata->kref)) {
+ rdata = tmp_rdata;
+ break;
+ }
+ rcu_read_unlock();
+ return rdata;
}
/**
@@ -340,7 +346,6 @@ static void fc_rport_work(struct work_struct *work)
fc_remote_port_delete(rport);
}
- mutex_lock(&lport->disc.disc_mutex);
mutex_lock(&rdata->rp_mutex);
if (rdata->rp_state == RPORT_ST_DELETE) {
if (port_id == FC_FID_DIR_SERV) {
@@ -370,7 +375,6 @@ static void fc_rport_work(struct work_struct *work)
fc_rport_enter_ready(rdata);
mutex_unlock(&rdata->rp_mutex);
}
- mutex_unlock(&lport->disc.disc_mutex);
break;
default:
@@ -702,7 +706,7 @@ out:
err:
mutex_unlock(&rdata->rp_mutex);
put:
- kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
+ kref_put(&rdata->kref, lport->tt.rport_destroy);
return;
bad:
FC_RPORT_DBG(rdata, "Bad FLOGI response\n");
@@ -762,8 +766,6 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport,
FC_RPORT_ID_DBG(lport, sid, "Received FLOGI request\n");
disc = &lport->disc;
- mutex_lock(&disc->disc_mutex);
-
if (!lport->point_to_multipoint) {
rjt_data.reason = ELS_RJT_UNSUP;
rjt_data.explan = ELS_EXPL_NONE;
@@ -808,7 +810,7 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport,
mutex_unlock(&rdata->rp_mutex);
rjt_data.reason = ELS_RJT_FIP;
rjt_data.explan = ELS_EXPL_NOT_NEIGHBOR;
- goto reject;
+ goto reject_put;
case RPORT_ST_FLOGI:
case RPORT_ST_PLOGI_WAIT:
case RPORT_ST_PLOGI:
@@ -825,13 +827,13 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport,
mutex_unlock(&rdata->rp_mutex);
rjt_data.reason = ELS_RJT_BUSY;
rjt_data.explan = ELS_EXPL_NONE;
- goto reject;
+ goto reject_put;
}
if (fc_rport_login_complete(rdata, fp)) {
mutex_unlock(&rdata->rp_mutex);
rjt_data.reason = ELS_RJT_LOGIC;
rjt_data.explan = ELS_EXPL_NONE;
- goto reject;
+ goto reject_put;
}
fp = fc_frame_alloc(lport, sizeof(*flp));
@@ -851,12 +853,13 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport,
fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT);
out:
mutex_unlock(&rdata->rp_mutex);
- mutex_unlock(&disc->disc_mutex);
+ kref_put(&rdata->kref, lport->tt.rport_destroy);
fc_frame_free(rx_fp);
return;
+reject_put:
+ kref_put(&rdata->kref, lport->tt.rport_destroy);
reject:
- mutex_unlock(&disc->disc_mutex);
lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
fc_frame_free(rx_fp);
}
@@ -923,7 +926,7 @@ out:
fc_frame_free(fp);
err:
mutex_unlock(&rdata->rp_mutex);
- kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
+ kref_put(&rdata->kref, lport->tt.rport_destroy);
}
static bool
@@ -1477,14 +1480,11 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp)
struct fc_rport_priv *rdata;
struct fc_seq_els_data els_data;
- mutex_lock(&lport->disc.disc_mutex);
rdata = lport->tt.rport_lookup(lport, fc_frame_sid(fp));
- if (!rdata) {
- mutex_unlock(&lport->disc.disc_mutex);
+ if (!rdata)
goto reject;
- }
+
mutex_lock(&rdata->rp_mutex);
- mutex_unlock(&lport->disc.disc_mutex);
switch (rdata->rp_state) {
case RPORT_ST_PRLI:
@@ -1494,6 +1494,7 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp)
break;
default:
mutex_unlock(&rdata->rp_mutex);
+ kref_put(&rdata->kref, lport->tt.rport_destroy);
goto reject;
}
@@ -1524,6 +1525,7 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp)
}
mutex_unlock(&rdata->rp_mutex);
+ kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
return;
reject:
@@ -1907,7 +1909,6 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
sid = fc_frame_sid(fp);
- mutex_lock(&lport->disc.disc_mutex);
rdata = lport->tt.rport_lookup(lport, sid);
if (rdata) {
mutex_lock(&rdata->rp_mutex);
@@ -1916,10 +1917,10 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
mutex_unlock(&rdata->rp_mutex);
+ kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
} else
FC_RPORT_ID_DBG(lport, sid,
"Received LOGO from non-logged-in port\n");
- mutex_unlock(&lport->disc.disc_mutex);
fc_frame_free(fp);
}
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 935c43095..763f012fd 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -233,15 +233,8 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
task->task_state_flags = SAS_TASK_STATE_PENDING;
qc->lldd_task = task;
- switch (qc->tf.protocol) {
- case ATA_PROT_NCQ:
- task->ata_task.use_ncq = 1;
- /* fall through */
- case ATAPI_PROT_DMA:
- case ATA_PROT_DMA:
- task->ata_task.dma_xfer = 1;
- break;
- }
+ task->ata_task.use_ncq = ata_is_ncq(qc->tf.protocol);
+ task->ata_task.dma_xfer = ata_is_dma(qc->tf.protocol);
if (qc->scsicmd)
ASSIGN_SAS_TASK(qc->scsicmd, task);
@@ -253,6 +246,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
if (qc->scsicmd)
ASSIGN_SAS_TASK(qc->scsicmd, NULL);
sas_free_task(task);
+ qc->lldd_task = NULL;
ret = AC_ERR_SYSTEM;
}
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index d5bd42059..b48485946 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -647,6 +647,7 @@ struct lpfc_hba {
#define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */
#define HBA_FCP_IOQ_FLUSH 0x8000 /* FCP I/O queues being flushed */
#define HBA_FW_DUMP_OP 0x10000 /* Skips fn reset before FW dump */
+#define HBA_RECOVERABLE_UE 0x20000 /* Firmware supports recoverable UE */
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
struct lpfc_dmabuf slim2p;
@@ -694,7 +695,8 @@ struct lpfc_hba {
uint8_t wwnn[8];
uint8_t wwpn[8];
uint32_t RandomData[7];
- uint32_t fcp_embed_io;
+ uint8_t fcp_embed_io;
+ uint8_t mds_diags_support;
/* HBA Config Parameters */
uint32_t cfg_ack0;
@@ -741,6 +743,7 @@ struct lpfc_hba {
#define OAS_FIND_ANY_VPORT 0x01
#define OAS_FIND_ANY_TARGET 0x02
#define OAS_LUN_VALID 0x04
+ uint32_t cfg_oas_priority;
uint32_t cfg_XLanePriority;
uint32_t cfg_enable_bg;
uint32_t cfg_hostmem_hgp;
@@ -751,6 +754,8 @@ struct lpfc_hba {
uint32_t cfg_iocb_cnt;
uint32_t cfg_suppress_link_up;
uint32_t cfg_rrq_xri_bitmap_sz;
+ uint32_t cfg_delay_discovery;
+ uint32_t cfg_sli_mode;
#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */
#define LPFC_DELAY_INIT_LINK 1 /* layered driver hold off */
#define LPFC_DELAY_INIT_LINK_INDEFINITELY 2 /* wait, manual intervention */
@@ -759,6 +764,7 @@ struct lpfc_hba {
#define LPFC_FDMI_NO_SUPPORT 0 /* FDMI not supported */
#define LPFC_FDMI_SUPPORT 1 /* FDMI supported? */
uint32_t cfg_enable_SmartSAN;
+ uint32_t cfg_enable_mds_diags;
lpfc_vpd_t vpd; /* vital product data */
struct pci_dev *pcidev;
@@ -779,9 +785,9 @@ struct lpfc_hba {
atomic_t fcp_qidx; /* next work queue to post work to */
- unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */
- unsigned long pci_bar1_map; /* Physical address for PCI BAR1 */
- unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */
+ phys_addr_t pci_bar0_map; /* Physical address for PCI BAR0 */
+ phys_addr_t pci_bar1_map; /* Physical address for PCI BAR1 */
+ phys_addr_t pci_bar2_map; /* Physical address for PCI BAR2 */
void __iomem *slim_memmap_p; /* Kernel memory mapped address for
PCI BAR0 */
void __iomem *ctrl_regs_memmap_p;/* Kernel memory mapped address for
@@ -827,6 +833,7 @@ struct lpfc_hba {
struct timer_list fcp_poll_timer;
struct timer_list eratt_poll;
+ uint32_t eratt_poll_interval;
/*
* stat counters
@@ -999,6 +1006,18 @@ struct lpfc_hba {
spinlock_t devicelock; /* lock for luns list */
mempool_t *device_data_mem_pool;
struct list_head luns;
+#define LPFC_TRANSGRESSION_HIGH_TEMPERATURE 0x0080
+#define LPFC_TRANSGRESSION_LOW_TEMPERATURE 0x0040
+#define LPFC_TRANSGRESSION_HIGH_VOLTAGE 0x0020
+#define LPFC_TRANSGRESSION_LOW_VOLTAGE 0x0010
+#define LPFC_TRANSGRESSION_HIGH_TXBIAS 0x0008
+#define LPFC_TRANSGRESSION_LOW_TXBIAS 0x0004
+#define LPFC_TRANSGRESSION_HIGH_TXPOWER 0x0002
+#define LPFC_TRANSGRESSION_LOW_TXPOWER 0x0001
+#define LPFC_TRANSGRESSION_HIGH_RXPOWER 0x8000
+#define LPFC_TRANSGRESSION_LOW_RXPOWER 0x4000
+ uint16_t sfp_alarm;
+ uint16_t sfp_warning;
};
static inline struct Scsi_Host *
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index cfec2eca4..f10199088 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -48,6 +48,7 @@
#include "lpfc_compat.h"
#include "lpfc_crtn.h"
#include "lpfc_vport.h"
+#include "lpfc_attr.h"
#define LPFC_DEF_DEVLOSS_TMO 30
#define LPFC_MIN_DEVLOSS_TMO 1
@@ -1620,6 +1621,11 @@ lpfc_sriov_hw_max_virtfn_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
}
+static inline bool lpfc_rangecheck(uint val, uint min, uint max)
+{
+ return val >= min && val <= max;
+}
+
/**
* lpfc_param_show - Return a cfg attribute value in decimal
*
@@ -1697,7 +1703,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
static int \
lpfc_##attr##_init(struct lpfc_hba *phba, uint val) \
{ \
- if (val >= minval && val <= maxval) {\
+ if (lpfc_rangecheck(val, minval, maxval)) {\
phba->cfg_##attr = val;\
return 0;\
}\
@@ -1732,7 +1738,7 @@ lpfc_##attr##_init(struct lpfc_hba *phba, uint val) \
static int \
lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \
{ \
- if (val >= minval && val <= maxval) {\
+ if (lpfc_rangecheck(val, minval, maxval)) {\
lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
"3052 lpfc_" #attr " changed from %d to %d\n", \
phba->cfg_##attr, val); \
@@ -1856,7 +1862,7 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
static int \
lpfc_##attr##_init(struct lpfc_vport *vport, uint val) \
{ \
- if (val >= minval && val <= maxval) {\
+ if (lpfc_rangecheck(val, minval, maxval)) {\
vport->cfg_##attr = val;\
return 0;\
}\
@@ -1888,7 +1894,7 @@ lpfc_##attr##_init(struct lpfc_vport *vport, uint val) \
static int \
lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \
{ \
- if (val >= minval && val <= maxval) {\
+ if (lpfc_rangecheck(val, minval, maxval)) {\
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
"3053 lpfc_" #attr \
" changed from %d (x%x) to %d (x%x)\n", \
@@ -1939,102 +1945,6 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
}
-#define LPFC_ATTR(name, defval, minval, maxval, desc) \
-static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, S_IRUGO);\
-MODULE_PARM_DESC(lpfc_##name, desc);\
-lpfc_param_init(name, defval, minval, maxval)
-
-#define LPFC_ATTR_R(name, defval, minval, maxval, desc) \
-static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, S_IRUGO);\
-MODULE_PARM_DESC(lpfc_##name, desc);\
-lpfc_param_show(name)\
-lpfc_param_init(name, defval, minval, maxval)\
-static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
-
-#define LPFC_ATTR_RW(name, defval, minval, maxval, desc) \
-static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, S_IRUGO);\
-MODULE_PARM_DESC(lpfc_##name, desc);\
-lpfc_param_show(name)\
-lpfc_param_init(name, defval, minval, maxval)\
-lpfc_param_set(name, defval, minval, maxval)\
-lpfc_param_store(name)\
-static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
- lpfc_##name##_show, lpfc_##name##_store)
-
-#define LPFC_ATTR_HEX_R(name, defval, minval, maxval, desc) \
-static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, S_IRUGO);\
-MODULE_PARM_DESC(lpfc_##name, desc);\
-lpfc_param_hex_show(name)\
-lpfc_param_init(name, defval, minval, maxval)\
-static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
-
-#define LPFC_ATTR_HEX_RW(name, defval, minval, maxval, desc) \
-static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, S_IRUGO);\
-MODULE_PARM_DESC(lpfc_##name, desc);\
-lpfc_param_hex_show(name)\
-lpfc_param_init(name, defval, minval, maxval)\
-lpfc_param_set(name, defval, minval, maxval)\
-lpfc_param_store(name)\
-static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
- lpfc_##name##_show, lpfc_##name##_store)
-
-#define LPFC_VPORT_ATTR(name, defval, minval, maxval, desc) \
-static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, S_IRUGO);\
-MODULE_PARM_DESC(lpfc_##name, desc);\
-lpfc_vport_param_init(name, defval, minval, maxval)
-
-#define LPFC_VPORT_ATTR_R(name, defval, minval, maxval, desc) \
-static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, S_IRUGO);\
-MODULE_PARM_DESC(lpfc_##name, desc);\
-lpfc_vport_param_show(name)\
-lpfc_vport_param_init(name, defval, minval, maxval)\
-static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
-
-#define LPFC_VPORT_ULL_ATTR_R(name, defval, minval, maxval, desc) \
-static uint64_t lpfc_##name = defval;\
-module_param(lpfc_##name, ullong, S_IRUGO);\
-MODULE_PARM_DESC(lpfc_##name, desc);\
-lpfc_vport_param_show(name)\
-lpfc_vport_param_init(name, defval, minval, maxval)\
-static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
-
-#define LPFC_VPORT_ATTR_RW(name, defval, minval, maxval, desc) \
-static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, S_IRUGO);\
-MODULE_PARM_DESC(lpfc_##name, desc);\
-lpfc_vport_param_show(name)\
-lpfc_vport_param_init(name, defval, minval, maxval)\
-lpfc_vport_param_set(name, defval, minval, maxval)\
-lpfc_vport_param_store(name)\
-static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
- lpfc_##name##_show, lpfc_##name##_store)
-
-#define LPFC_VPORT_ATTR_HEX_R(name, defval, minval, maxval, desc) \
-static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, S_IRUGO);\
-MODULE_PARM_DESC(lpfc_##name, desc);\
-lpfc_vport_param_hex_show(name)\
-lpfc_vport_param_init(name, defval, minval, maxval)\
-static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
-
-#define LPFC_VPORT_ATTR_HEX_RW(name, defval, minval, maxval, desc) \
-static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, S_IRUGO);\
-MODULE_PARM_DESC(lpfc_##name, desc);\
-lpfc_vport_param_hex_show(name)\
-lpfc_vport_param_init(name, defval, minval, maxval)\
-lpfc_vport_param_set(name, defval, minval, maxval)\
-lpfc_vport_param_store(name)\
-static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
- lpfc_##name##_show, lpfc_##name##_store)
-
static DEVICE_ATTR(bg_info, S_IRUGO, lpfc_bg_info_show, NULL);
static DEVICE_ATTR(bg_guard_err, S_IRUGO, lpfc_bg_guard_err_show, NULL);
static DEVICE_ATTR(bg_apptag_err, S_IRUGO, lpfc_bg_apptag_err_show, NULL);
@@ -2401,6 +2311,69 @@ static DEVICE_ATTR(lpfc_xlane_tgt, S_IRUGO | S_IWUSR,
lpfc_oas_tgt_show, lpfc_oas_tgt_store);
/**
+ * lpfc_oas_priority_show - Return wwpn of target whose luns maybe enabled for
+ * Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ *
+ * Returns:
+ * value of count
+ **/
+static ssize_t
+lpfc_oas_priority_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_priority);
+}
+
+/**
+ * lpfc_oas_priority_store - Store wwpn of target whose luns maybe enabled for
+ * Optimized Access Storage (OAS) operations.
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: buffer for passing information.
+ * @count: Size of the data buffer.
+ *
+ * Returns:
+ * -EINVAL count is invalid, invalid wwpn byte invalid
+ * -EPERM oas is not supported by hba
+ * value of count on success
+ **/
+static ssize_t
+lpfc_oas_priority_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ unsigned int cnt = count;
+ unsigned long val;
+ int ret;
+
+ if (!phba->cfg_fof)
+ return -EPERM;
+
+ /* count may include a LF at end of string */
+ if (buf[cnt-1] == '\n')
+ cnt--;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret || (val > 0x7f))
+ return -EINVAL;
+
+ if (val)
+ phba->cfg_oas_priority = (uint8_t)val;
+ else
+ phba->cfg_oas_priority = phba->cfg_XLanePriority;
+ return count;
+}
+static DEVICE_ATTR(lpfc_xlane_priority, S_IRUGO | S_IWUSR,
+ lpfc_oas_priority_show, lpfc_oas_priority_store);
+
+/**
* lpfc_oas_vpt_show - Return wwpn of vport whose targets maybe enabled
* for Optimized Access Storage (OAS) operations.
* @dev: class device that is converted into a Scsi_host.
@@ -2462,6 +2435,7 @@ lpfc_oas_vpt_store(struct device *dev, struct device_attribute *attr,
else
phba->cfg_oas_flags &= ~OAS_FIND_ANY_VPORT;
phba->cfg_oas_flags &= ~OAS_LUN_VALID;
+ phba->cfg_oas_priority = phba->cfg_XLanePriority;
phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN;
return count;
}
@@ -2524,7 +2498,6 @@ lpfc_oas_lun_state_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
phba->cfg_oas_lun_state = val;
-
return strlen(buf);
}
static DEVICE_ATTR(lpfc_xlane_lun_state, S_IRUGO | S_IWUSR,
@@ -2572,7 +2545,8 @@ static DEVICE_ATTR(lpfc_xlane_lun_status, S_IRUGO,
*/
static size_t
lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
- uint8_t tgt_wwpn[], uint64_t lun, uint32_t oas_state)
+ uint8_t tgt_wwpn[], uint64_t lun,
+ uint32_t oas_state, uint8_t pri)
{
int rc = 0;
@@ -2582,7 +2556,8 @@ lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
if (oas_state) {
if (!lpfc_enable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn,
- (struct lpfc_name *)tgt_wwpn, lun))
+ (struct lpfc_name *)tgt_wwpn,
+ lun, pri))
rc = -ENOMEM;
} else {
lpfc_disable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn,
@@ -2648,13 +2623,13 @@ lpfc_oas_lun_get_next(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
static ssize_t
lpfc_oas_lun_state_change(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
uint8_t tgt_wwpn[], uint64_t lun,
- uint32_t oas_state)
+ uint32_t oas_state, uint8_t pri)
{
int rc;
rc = lpfc_oas_lun_state_set(phba, vpt_wwpn, tgt_wwpn, lun,
- oas_state);
+ oas_state, pri);
return rc;
}
@@ -2744,16 +2719,16 @@ lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "3372 Try to set vport 0x%llx target 0x%llx lun:%lld "
- "with oas set to %d\n",
+ "3372 Try to set vport 0x%llx target 0x%llx lun:0x%llx "
+ "priority 0x%x with oas state %d\n",
wwn_to_u64(phba->cfg_oas_vpt_wwpn),
wwn_to_u64(phba->cfg_oas_tgt_wwpn), scsi_lun,
- phba->cfg_oas_lun_state);
+ phba->cfg_oas_priority, phba->cfg_oas_lun_state);
rc = lpfc_oas_lun_state_change(phba, phba->cfg_oas_vpt_wwpn,
- phba->cfg_oas_tgt_wwpn, scsi_lun,
- phba->cfg_oas_lun_state);
-
+ phba->cfg_oas_tgt_wwpn, scsi_lun,
+ phba->cfg_oas_lun_state,
+ phba->cfg_oas_priority);
if (rc)
return rc;
@@ -2772,19 +2747,14 @@ MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:"
static DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR,
lpfc_poll_show, lpfc_poll_store);
-int lpfc_sli_mode = 0;
-module_param(lpfc_sli_mode, int, S_IRUGO);
-MODULE_PARM_DESC(lpfc_sli_mode, "SLI mode selector:"
- " 0 - auto (SLI-3 if supported),"
- " 2 - select SLI-2 even on SLI-3 capable HBAs,"
- " 3 - select SLI-3");
+LPFC_ATTR(sli_mode, 0, 0, 3,
+ "SLI mode selector:"
+ " 0 - auto (SLI-3 if supported),"
+ " 2 - select SLI-2 even on SLI-3 capable HBAs,"
+ " 3 - select SLI-3");
-int lpfc_enable_npiv = 1;
-module_param(lpfc_enable_npiv, int, S_IRUGO);
-MODULE_PARM_DESC(lpfc_enable_npiv, "Enable NPIV functionality");
-lpfc_param_show(enable_npiv);
-lpfc_param_init(enable_npiv, 1, 0, 1);
-static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL);
+LPFC_ATTR_R(enable_npiv, 1, 0, 1,
+ "Enable NPIV functionality");
LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2,
"FCF Fast failover=1 Priority failover=2");
@@ -4754,11 +4724,8 @@ MODULE_PARM_DESC(lpfc_prot_guard, "host protection guard type");
* accept and FCID/Fabric name/Fabric portname is changed.
* Default value is 0.
*/
-int lpfc_delay_discovery;
-module_param(lpfc_delay_discovery, int, S_IRUGO);
-MODULE_PARM_DESC(lpfc_delay_discovery,
- "Delay NPort discovery when Clean Address bit is cleared. "
- "Allowed values: 0,1.");
+LPFC_ATTR(delay_discovery, 0, 0, 1,
+ "Delay NPort discovery when Clean Address bit is cleared.");
/*
* lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count
@@ -4780,6 +4747,14 @@ LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT,
LPFC_DEFAULT_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT,
"Max Protection Scatter Gather Segment Count");
+/*
+ * lpfc_enable_mds_diags: Enable MDS Diagnostics
+ * 0 = MDS Diagnostics disabled (default)
+ * 1 = MDS Diagnostics enabled
+ * Value range is [0,1]. Default value is 0.
+ */
+LPFC_ATTR_R(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics");
+
struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_bg_info,
&dev_attr_bg_guard_err,
@@ -4857,6 +4832,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_xlane_vpt,
&dev_attr_lpfc_xlane_lun_state,
&dev_attr_lpfc_xlane_lun_status,
+ &dev_attr_lpfc_xlane_priority,
&dev_attr_lpfc_sg_seg_cnt,
&dev_attr_lpfc_max_scsicmpl_time,
&dev_attr_lpfc_stat_data_ctrl,
@@ -4876,6 +4852,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_sriov_hw_max_virtfn,
&dev_attr_protocol,
&dev_attr_lpfc_xlane_supported,
+ &dev_attr_lpfc_enable_mds_diags,
NULL,
};
@@ -5849,6 +5826,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
phba->cfg_oas_lun_state = 0;
phba->cfg_oas_lun_status = 0;
phba->cfg_oas_flags = 0;
+ phba->cfg_oas_priority = 0;
lpfc_enable_bg_init(phba, lpfc_enable_bg);
if (phba->sli_rev == LPFC_SLI_REV4)
phba->cfg_poll = 0;
@@ -5866,7 +5844,10 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_request_firmware_upgrade_init(phba, lpfc_req_fw_upgrade);
lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt);
+ lpfc_delay_discovery_init(phba, lpfc_delay_discovery);
+ lpfc_sli_mode_init(phba, lpfc_sli_mode);
phba->cfg_enable_dss = 1;
+ lpfc_enable_mds_diags_init(phba, lpfc_enable_mds_diags);
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_attr.h b/drivers/scsi/lpfc/lpfc_attr.h
new file mode 100644
index 000000000..b2bd28e96
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_attr.h
@@ -0,0 +1,116 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2004-2016 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+#define LPFC_ATTR(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_param_init(name, defval, minval, maxval)
+
+#define LPFC_ATTR_R(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_param_show(name)\
+lpfc_param_init(name, defval, minval, maxval)\
+static DEVICE_ATTR(lpfc_##name, S_IRUGO, lpfc_##name##_show, NULL)
+
+#define LPFC_ATTR_RW(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_param_show(name)\
+lpfc_param_init(name, defval, minval, maxval)\
+lpfc_param_set(name, defval, minval, maxval)\
+lpfc_param_store(name)\
+static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
+ lpfc_##name##_show, lpfc_##name##_store)
+
+#define LPFC_ATTR_HEX_R(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_param_hex_show(name)\
+lpfc_param_init(name, defval, minval, maxval)\
+static DEVICE_ATTR(lpfc_##name, S_IRUGO, lpfc_##name##_show, NULL)
+
+#define LPFC_ATTR_HEX_RW(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_param_hex_show(name)\
+lpfc_param_init(name, defval, minval, maxval)\
+lpfc_param_set(name, defval, minval, maxval)\
+lpfc_param_store(name)\
+static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
+ lpfc_##name##_show, lpfc_##name##_store)
+
+#define LPFC_VPORT_ATTR(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_vport_param_init(name, defval, minval, maxval)
+
+#define LPFC_VPORT_ATTR_R(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_vport_param_show(name)\
+lpfc_vport_param_init(name, defval, minval, maxval)\
+static DEVICE_ATTR(lpfc_##name, S_IRUGO, lpfc_##name##_show, NULL)
+
+#define LPFC_VPORT_ULL_ATTR_R(name, defval, minval, maxval, desc) \
+static uint64_t lpfc_##name = defval;\
+module_param(lpfc_##name, ullong, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_vport_param_show(name)\
+lpfc_vport_param_init(name, defval, minval, maxval)\
+static DEVICE_ATTR(lpfc_##name, S_IRUGO, lpfc_##name##_show, NULL)
+
+#define LPFC_VPORT_ATTR_RW(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_vport_param_show(name)\
+lpfc_vport_param_init(name, defval, minval, maxval)\
+lpfc_vport_param_set(name, defval, minval, maxval)\
+lpfc_vport_param_store(name)\
+static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
+ lpfc_##name##_show, lpfc_##name##_store)
+
+#define LPFC_VPORT_ATTR_HEX_R(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_vport_param_hex_show(name)\
+lpfc_vport_param_init(name, defval, minval, maxval)\
+static DEVICE_ATTR(lpfc_##name, S_IRUGO, lpfc_##name##_show, NULL)
+
+#define LPFC_VPORT_ATTR_HEX_RW(name, defval, minval, maxval, desc) \
+static uint lpfc_##name = defval;\
+module_param(lpfc_##name, uint, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_vport_param_hex_show(name)\
+lpfc_vport_param_init(name, defval, minval, maxval)\
+lpfc_vport_param_set(name, defval, minval, maxval)\
+lpfc_vport_param_store(name)\
+static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
+ lpfc_##name##_show, lpfc_##name##_store)
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 4e55b3518..bd7576d45 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2015 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -359,9 +359,6 @@ extern struct scsi_host_template lpfc_template_s3;
extern struct scsi_host_template lpfc_vport_template;
extern struct fc_function_template lpfc_transport_functions;
extern struct fc_function_template lpfc_vport_transport_functions;
-extern int lpfc_sli_mode;
-extern int lpfc_enable_npiv;
-extern int lpfc_delay_discovery;
int lpfc_vport_symbolic_node_name(struct lpfc_vport *, char *, size_t);
int lpfc_vport_symbolic_port_name(struct lpfc_vport *, char *, size_t);
@@ -492,7 +489,7 @@ struct lpfc_device_data *__lpfc_get_device_data(struct lpfc_hba *,
struct lpfc_name *,
struct lpfc_name *, uint64_t);
bool lpfc_enable_oas_lun(struct lpfc_hba *, struct lpfc_name *,
- struct lpfc_name *, uint64_t);
+ struct lpfc_name *, uint64_t, uint8_t);
bool lpfc_disable_oas_lun(struct lpfc_hba *, struct lpfc_name *,
struct lpfc_name *, uint64_t);
bool lpfc_find_next_oas_lun(struct lpfc_hba *, struct lpfc_name *,
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index a38816e96..63e48d427 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1510,6 +1510,10 @@ lpfc_fdmi_num_disc_check(struct lpfc_vport *vport)
if (!lpfc_is_link_up(phba))
return;
+ /* Must be connected to a Fabric */
+ if (!(vport->fc_flag & FC_FABRIC))
+ return;
+
if (!(vport->fdmi_port_mask & LPFC_FDMI_PORT_ATTR_num_disc))
return;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 0498f5760..c0af32f24 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -594,6 +594,7 @@ static uint8_t
lpfc_check_clean_addr_bit(struct lpfc_vport *vport,
struct serv_parm *sp)
{
+ struct lpfc_hba *phba = vport->phba;
uint8_t fabric_param_changed = 0;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
@@ -615,7 +616,7 @@ lpfc_check_clean_addr_bit(struct lpfc_vport *vport,
* - lpfc_delay_discovery module parameter is set.
*/
if (fabric_param_changed && !sp->cmn.clean_address_bit &&
- (vport->fc_prevDID || lpfc_delay_discovery)) {
+ (vport->fc_prevDID || phba->cfg_delay_discovery)) {
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_DISC_DELAYED;
spin_unlock_irq(shost->host_lock);
@@ -3299,6 +3300,12 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
FC_VPORT_FABRIC_REJ_WWN);
}
break;
+ case LSRJT_VENDOR_UNIQUE:
+ if ((stat.un.b.vendorUnique == 0x45) &&
+ (cmd == ELS_CMD_FLOGI)) {
+ goto out_retry;
+ }
+ break;
}
break;
@@ -3344,6 +3351,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if ((vport->load_flag & FC_UNLOADING) != 0)
retry = 0;
+out_retry:
if (retry) {
if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) {
/* Stop retrying PLOGI and FDISC if in FCF discovery */
@@ -4609,7 +4617,7 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport)
return sentplogi;
}
-void
+uint32_t
lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc,
uint32_t word0)
{
@@ -4617,9 +4625,11 @@ lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc,
desc->tag = cpu_to_be32(RDP_LINK_SERVICE_DESC_TAG);
desc->payload.els_req = word0;
desc->length = cpu_to_be32(sizeof(desc->payload));
+
+ return sizeof(struct fc_rdp_link_service_desc);
}
-void
+uint32_t
lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc,
uint8_t *page_a0, uint8_t *page_a2)
{
@@ -4680,9 +4690,11 @@ lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc,
desc->sfp_info.flags = cpu_to_be16(flag);
desc->length = cpu_to_be32(sizeof(desc->sfp_info));
+
+ return sizeof(struct fc_rdp_sfp_desc);
}
-void
+uint32_t
lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc,
READ_LNK_VAR *stat)
{
@@ -4707,134 +4719,181 @@ lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc,
desc->info.link_status.invalid_crc_cnt = cpu_to_be32(stat->crcCnt);
desc->length = cpu_to_be32(sizeof(desc->info));
+
+ return sizeof(struct fc_rdp_link_error_status_desc);
}
-void
+uint32_t
lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat,
struct lpfc_vport *vport)
{
+ uint32_t bbCredit;
+
desc->tag = cpu_to_be32(RDP_BBC_DESC_TAG);
- desc->bbc_info.port_bbc = cpu_to_be32(
- vport->fc_sparam.cmn.bbCreditMsb |
- vport->fc_sparam.cmn.bbCreditlsb << 8);
- if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP)
- desc->bbc_info.attached_port_bbc = cpu_to_be32(
- vport->phba->fc_fabparam.cmn.bbCreditMsb |
- vport->phba->fc_fabparam.cmn.bbCreditlsb << 8);
- else
+ bbCredit = vport->fc_sparam.cmn.bbCreditLsb |
+ (vport->fc_sparam.cmn.bbCreditMsb << 8);
+ desc->bbc_info.port_bbc = cpu_to_be32(bbCredit);
+ if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
+ bbCredit = vport->phba->fc_fabparam.cmn.bbCreditLsb |
+ (vport->phba->fc_fabparam.cmn.bbCreditMsb << 8);
+ desc->bbc_info.attached_port_bbc = cpu_to_be32(bbCredit);
+ } else {
desc->bbc_info.attached_port_bbc = 0;
+ }
desc->bbc_info.rtt = 0;
desc->length = cpu_to_be32(sizeof(desc->bbc_info));
+
+ return sizeof(struct fc_rdp_bbc_desc);
}
-void
-lpfc_rdp_res_oed_temp_desc(struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2)
+uint32_t
+lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba,
+ struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2)
{
- uint32_t flags;
+ uint32_t flags = 0;
desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
- desc->oed_info.hi_alarm =
- cpu_to_be16(page_a2[SSF_TEMP_HIGH_ALARM]);
- desc->oed_info.lo_alarm = cpu_to_be16(page_a2[SSF_TEMP_LOW_ALARM]);
- desc->oed_info.hi_warning =
- cpu_to_be16(page_a2[SSF_TEMP_HIGH_WARNING]);
- desc->oed_info.lo_warning =
- cpu_to_be16(page_a2[SSF_TEMP_LOW_WARNING]);
- flags = 0xf; /* All four are valid */
+ desc->oed_info.hi_alarm = page_a2[SSF_TEMP_HIGH_ALARM];
+ desc->oed_info.lo_alarm = page_a2[SSF_TEMP_LOW_ALARM];
+ desc->oed_info.hi_warning = page_a2[SSF_TEMP_HIGH_WARNING];
+ desc->oed_info.lo_warning = page_a2[SSF_TEMP_LOW_WARNING];
+
+ if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TEMPERATURE)
+ flags |= RDP_OET_HIGH_ALARM;
+ if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TEMPERATURE)
+ flags |= RDP_OET_LOW_ALARM;
+ if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TEMPERATURE)
+ flags |= RDP_OET_HIGH_WARNING;
+ if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TEMPERATURE)
+ flags |= RDP_OET_LOW_WARNING;
+
flags |= ((0xf & RDP_OED_TEMPERATURE) << RDP_OED_TYPE_SHIFT);
desc->oed_info.function_flags = cpu_to_be32(flags);
desc->length = cpu_to_be32(sizeof(desc->oed_info));
+ return sizeof(struct fc_rdp_oed_sfp_desc);
}
-void
-lpfc_rdp_res_oed_voltage_desc(struct fc_rdp_oed_sfp_desc *desc,
+uint32_t
+lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba,
+ struct fc_rdp_oed_sfp_desc *desc,
uint8_t *page_a2)
{
- uint32_t flags;
+ uint32_t flags = 0;
desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
- desc->oed_info.hi_alarm =
- cpu_to_be16(page_a2[SSF_VOLTAGE_HIGH_ALARM]);
- desc->oed_info.lo_alarm = cpu_to_be16(page_a2[SSF_VOLTAGE_LOW_ALARM]);
- desc->oed_info.hi_warning =
- cpu_to_be16(page_a2[SSF_VOLTAGE_HIGH_WARNING]);
- desc->oed_info.lo_warning =
- cpu_to_be16(page_a2[SSF_VOLTAGE_LOW_WARNING]);
- flags = 0xf; /* All four are valid */
+ desc->oed_info.hi_alarm = page_a2[SSF_VOLTAGE_HIGH_ALARM];
+ desc->oed_info.lo_alarm = page_a2[SSF_VOLTAGE_LOW_ALARM];
+ desc->oed_info.hi_warning = page_a2[SSF_VOLTAGE_HIGH_WARNING];
+ desc->oed_info.lo_warning = page_a2[SSF_VOLTAGE_LOW_WARNING];
+
+ if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_VOLTAGE)
+ flags |= RDP_OET_HIGH_ALARM;
+ if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_VOLTAGE)
+ flags |= RDP_OET_LOW_ALARM;
+ if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_VOLTAGE)
+ flags |= RDP_OET_HIGH_WARNING;
+ if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_VOLTAGE)
+ flags |= RDP_OET_LOW_WARNING;
+
flags |= ((0xf & RDP_OED_VOLTAGE) << RDP_OED_TYPE_SHIFT);
desc->oed_info.function_flags = cpu_to_be32(flags);
desc->length = cpu_to_be32(sizeof(desc->oed_info));
+ return sizeof(struct fc_rdp_oed_sfp_desc);
}
-void
-lpfc_rdp_res_oed_txbias_desc(struct fc_rdp_oed_sfp_desc *desc,
+uint32_t
+lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba,
+ struct fc_rdp_oed_sfp_desc *desc,
uint8_t *page_a2)
{
- uint32_t flags;
+ uint32_t flags = 0;
desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
- desc->oed_info.hi_alarm =
- cpu_to_be16(page_a2[SSF_BIAS_HIGH_ALARM]);
- desc->oed_info.lo_alarm = cpu_to_be16(page_a2[SSF_BIAS_LOW_ALARM]);
- desc->oed_info.hi_warning =
- cpu_to_be16(page_a2[SSF_BIAS_HIGH_WARNING]);
- desc->oed_info.lo_warning =
- cpu_to_be16(page_a2[SSF_BIAS_LOW_WARNING]);
- flags = 0xf; /* All four are valid */
+ desc->oed_info.hi_alarm = page_a2[SSF_BIAS_HIGH_ALARM];
+ desc->oed_info.lo_alarm = page_a2[SSF_BIAS_LOW_ALARM];
+ desc->oed_info.hi_warning = page_a2[SSF_BIAS_HIGH_WARNING];
+ desc->oed_info.lo_warning = page_a2[SSF_BIAS_LOW_WARNING];
+
+ if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXBIAS)
+ flags |= RDP_OET_HIGH_ALARM;
+ if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXBIAS)
+ flags |= RDP_OET_LOW_ALARM;
+ if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXBIAS)
+ flags |= RDP_OET_HIGH_WARNING;
+ if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXBIAS)
+ flags |= RDP_OET_LOW_WARNING;
+
flags |= ((0xf & RDP_OED_TXBIAS) << RDP_OED_TYPE_SHIFT);
desc->oed_info.function_flags = cpu_to_be32(flags);
desc->length = cpu_to_be32(sizeof(desc->oed_info));
+ return sizeof(struct fc_rdp_oed_sfp_desc);
}
-void
-lpfc_rdp_res_oed_txpower_desc(struct fc_rdp_oed_sfp_desc *desc,
+uint32_t
+lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba,
+ struct fc_rdp_oed_sfp_desc *desc,
uint8_t *page_a2)
{
- uint32_t flags;
+ uint32_t flags = 0;
desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
- desc->oed_info.hi_alarm =
- cpu_to_be16(page_a2[SSF_TXPOWER_HIGH_ALARM]);
- desc->oed_info.lo_alarm = cpu_to_be16(page_a2[SSF_TXPOWER_LOW_ALARM]);
- desc->oed_info.hi_warning =
- cpu_to_be16(page_a2[SSF_TXPOWER_HIGH_WARNING]);
- desc->oed_info.lo_warning =
- cpu_to_be16(page_a2[SSF_TXPOWER_LOW_WARNING]);
- flags = 0xf; /* All four are valid */
+ desc->oed_info.hi_alarm = page_a2[SSF_TXPOWER_HIGH_ALARM];
+ desc->oed_info.lo_alarm = page_a2[SSF_TXPOWER_LOW_ALARM];
+ desc->oed_info.hi_warning = page_a2[SSF_TXPOWER_HIGH_WARNING];
+ desc->oed_info.lo_warning = page_a2[SSF_TXPOWER_LOW_WARNING];
+
+ if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXPOWER)
+ flags |= RDP_OET_HIGH_ALARM;
+ if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXPOWER)
+ flags |= RDP_OET_LOW_ALARM;
+ if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXPOWER)
+ flags |= RDP_OET_HIGH_WARNING;
+ if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXPOWER)
+ flags |= RDP_OET_LOW_WARNING;
+
flags |= ((0xf & RDP_OED_TXPOWER) << RDP_OED_TYPE_SHIFT);
desc->oed_info.function_flags = cpu_to_be32(flags);
desc->length = cpu_to_be32(sizeof(desc->oed_info));
+ return sizeof(struct fc_rdp_oed_sfp_desc);
}
-void
-lpfc_rdp_res_oed_rxpower_desc(struct fc_rdp_oed_sfp_desc *desc,
+uint32_t
+lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba,
+ struct fc_rdp_oed_sfp_desc *desc,
uint8_t *page_a2)
{
- uint32_t flags;
+ uint32_t flags = 0;
desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
- desc->oed_info.hi_alarm =
- cpu_to_be16(page_a2[SSF_RXPOWER_HIGH_ALARM]);
- desc->oed_info.lo_alarm = cpu_to_be16(page_a2[SSF_RXPOWER_LOW_ALARM]);
- desc->oed_info.hi_warning =
- cpu_to_be16(page_a2[SSF_RXPOWER_HIGH_WARNING]);
- desc->oed_info.lo_warning =
- cpu_to_be16(page_a2[SSF_RXPOWER_LOW_WARNING]);
- flags = 0xf; /* All four are valid */
+ desc->oed_info.hi_alarm = page_a2[SSF_RXPOWER_HIGH_ALARM];
+ desc->oed_info.lo_alarm = page_a2[SSF_RXPOWER_LOW_ALARM];
+ desc->oed_info.hi_warning = page_a2[SSF_RXPOWER_HIGH_WARNING];
+ desc->oed_info.lo_warning = page_a2[SSF_RXPOWER_LOW_WARNING];
+
+ if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_RXPOWER)
+ flags |= RDP_OET_HIGH_ALARM;
+ if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_RXPOWER)
+ flags |= RDP_OET_LOW_ALARM;
+ if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_RXPOWER)
+ flags |= RDP_OET_HIGH_WARNING;
+ if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_RXPOWER)
+ flags |= RDP_OET_LOW_WARNING;
+
flags |= ((0xf & RDP_OED_RXPOWER) << RDP_OED_TYPE_SHIFT);
desc->oed_info.function_flags = cpu_to_be32(flags);
desc->length = cpu_to_be32(sizeof(desc->oed_info));
+ return sizeof(struct fc_rdp_oed_sfp_desc);
}
-void
+uint32_t
lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc,
uint8_t *page_a0, struct lpfc_vport *vport)
{
@@ -4845,9 +4904,10 @@ lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc,
memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 2);
memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8);
desc->length = cpu_to_be32(sizeof(desc->opd_info));
+ return sizeof(struct fc_rdp_opd_sfp_desc);
}
-int
+uint32_t
lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat)
{
if (bf_get(lpfc_read_link_stat_gec2, stat) == 0)
@@ -4864,7 +4924,7 @@ lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat)
return sizeof(struct fc_fec_rdp_desc);
}
-void
+uint32_t
lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
{
uint16_t rdp_cap = 0;
@@ -4923,9 +4983,10 @@ lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap);
desc->length = cpu_to_be32(sizeof(desc->info));
+ return sizeof(struct fc_rdp_port_speed_desc);
}
-void
+uint32_t
lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc,
struct lpfc_hba *phba)
{
@@ -4939,9 +5000,10 @@ lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc,
sizeof(desc->port_names.wwpn));
desc->length = cpu_to_be32(sizeof(desc->port_names));
+ return sizeof(struct fc_rdp_port_name_desc);
}
-void
+uint32_t
lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc,
struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
@@ -4962,6 +5024,7 @@ lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc,
}
desc->length = cpu_to_be32(sizeof(desc->port_names));
+ return sizeof(struct fc_rdp_port_name_desc);
}
void
@@ -4976,8 +5039,9 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
uint8_t *pcmd;
struct ls_rjt *stat;
struct fc_rdp_res_frame *rdp_res;
- uint32_t cmdsize;
- int rc, fec_size;
+ uint32_t cmdsize, len;
+ uint16_t *flag_ptr;
+ int rc;
if (status != SUCCESS)
goto error;
@@ -5008,39 +5072,61 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
memset(pcmd, 0, sizeof(struct fc_rdp_res_frame));
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+ /* Update Alarm and Warning */
+ flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_ALARM_FLAGS);
+ phba->sfp_alarm |= *flag_ptr;
+ flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_WARNING_FLAGS);
+ phba->sfp_warning |= *flag_ptr;
+
/* For RDP payload */
- lpfc_rdp_res_link_service(&rdp_res->link_service_desc, ELS_CMD_RDP);
+ len = 8;
+ len += lpfc_rdp_res_link_service((struct fc_rdp_link_service_desc *)
+ (len + pcmd), ELS_CMD_RDP);
- lpfc_rdp_res_sfp_desc(&rdp_res->sfp_desc,
+ len += lpfc_rdp_res_sfp_desc((struct fc_rdp_sfp_desc *)(len + pcmd),
rdp_context->page_a0, rdp_context->page_a2);
- lpfc_rdp_res_speed(&rdp_res->portspeed_desc, phba);
- lpfc_rdp_res_link_error(&rdp_res->link_error_desc,
- &rdp_context->link_stat);
- lpfc_rdp_res_diag_port_names(&rdp_res->diag_port_names_desc, phba);
- lpfc_rdp_res_attach_port_names(&rdp_res->attached_port_names_desc,
- vport, ndlp);
- lpfc_rdp_res_bbc_desc(&rdp_res->bbc_desc, &rdp_context->link_stat,
- vport);
- lpfc_rdp_res_oed_temp_desc(&rdp_res->oed_temp_desc,
- rdp_context->page_a2);
- lpfc_rdp_res_oed_voltage_desc(&rdp_res->oed_voltage_desc,
- rdp_context->page_a2);
- lpfc_rdp_res_oed_txbias_desc(&rdp_res->oed_txbias_desc,
- rdp_context->page_a2);
- lpfc_rdp_res_oed_txpower_desc(&rdp_res->oed_txpower_desc,
- rdp_context->page_a2);
- lpfc_rdp_res_oed_rxpower_desc(&rdp_res->oed_rxpower_desc,
- rdp_context->page_a2);
- lpfc_rdp_res_opd_desc(&rdp_res->opd_desc, rdp_context->page_a0, vport);
- fec_size = lpfc_rdp_res_fec_desc(&rdp_res->fec_desc,
+ len += lpfc_rdp_res_speed((struct fc_rdp_port_speed_desc *)(len + pcmd),
+ phba);
+ len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *)
+ (len + pcmd), &rdp_context->link_stat);
+ len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *)
+ (len + pcmd), phba);
+ len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *)
+ (len + pcmd), vport, ndlp);
+ len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd),
&rdp_context->link_stat);
- rdp_res->length = cpu_to_be32(fec_size + RDP_DESC_PAYLOAD_SIZE);
+ /* Check if nport is logged, BZ190632 */
+ if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED))
+ goto lpfc_skip_descriptor;
+
+ len += lpfc_rdp_res_bbc_desc((struct fc_rdp_bbc_desc *)(len + pcmd),
+ &rdp_context->link_stat, vport);
+ len += lpfc_rdp_res_oed_temp_desc(phba,
+ (struct fc_rdp_oed_sfp_desc *)(len + pcmd),
+ rdp_context->page_a2);
+ len += lpfc_rdp_res_oed_voltage_desc(phba,
+ (struct fc_rdp_oed_sfp_desc *)(len + pcmd),
+ rdp_context->page_a2);
+ len += lpfc_rdp_res_oed_txbias_desc(phba,
+ (struct fc_rdp_oed_sfp_desc *)(len + pcmd),
+ rdp_context->page_a2);
+ len += lpfc_rdp_res_oed_txpower_desc(phba,
+ (struct fc_rdp_oed_sfp_desc *)(len + pcmd),
+ rdp_context->page_a2);
+ len += lpfc_rdp_res_oed_rxpower_desc(phba,
+ (struct fc_rdp_oed_sfp_desc *)(len + pcmd),
+ rdp_context->page_a2);
+ len += lpfc_rdp_res_opd_desc((struct fc_rdp_opd_sfp_desc *)(len + pcmd),
+ rdp_context->page_a0, vport);
+
+lpfc_skip_descriptor:
+ rdp_res->length = cpu_to_be32(len - 8);
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
/* Now that we know the true size of the payload, update the BPL */
bpl = (struct ulp_bde64 *)
(((struct lpfc_dmabuf *)(elsiocb->context3))->virt);
- bpl->tus.f.bdeSize = (fec_size + RDP_DESC_PAYLOAD_SIZE + 8);
+ bpl->tus.f.bdeSize = len;
bpl->tus.f.bdeFlags = 0;
bpl->tus.w = le32_to_cpu(bpl->tus.w);
@@ -5165,6 +5251,12 @@ lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
be32_to_cpu(rdp_req->nport_id_desc.nport_id),
be32_to_cpu(rdp_req->nport_id_desc.length));
+ if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED) &&
+ !phba->cfg_enable_SmartSAN) {
+ rjt_err = LSRJT_UNABLE_TPC;
+ rjt_expl = LSEXP_PORT_LOGIN_REQ;
+ goto error;
+ }
if (sizeof(struct fc_rdp_nport_desc) !=
be32_to_cpu(rdp_req->rdp_des_length))
goto rjt_logerr;
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 39f0fd000..822654322 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -346,7 +346,7 @@ struct csp {
uint8_t fcphHigh; /* FC Word 0, byte 0 */
uint8_t fcphLow;
uint8_t bbCreditMsb;
- uint8_t bbCreditlsb; /* FC Word 0, byte 3 */
+ uint8_t bbCreditLsb; /* FC Word 0, byte 3 */
/*
* Word 1 Bit 31 in common service parameter is overloaded.
@@ -1206,6 +1206,12 @@ struct fc_rdp_bbc_desc {
struct fc_rdp_bbc_info bbc_info;
};
+/* Optical Element Type Transgression Flags */
+#define RDP_OET_LOW_WARNING 0x1
+#define RDP_OET_HIGH_WARNING 0x2
+#define RDP_OET_LOW_ALARM 0x4
+#define RDP_OET_HIGH_ALARM 0x8
+
#define RDP_OED_TEMPERATURE 0x1
#define RDP_OED_VOLTAGE 0x2
#define RDP_OED_TXBIAS 0x3
@@ -1233,8 +1239,8 @@ struct fc_rdp_opd_sfp_info {
uint8_t vendor_name[16];
uint8_t model_number[16];
uint8_t serial_number[16];
- uint8_t reserved[2];
uint8_t revision[2];
+ uint8_t reserved[2];
uint8_t date[8];
};
@@ -1261,27 +1267,17 @@ struct fc_rdp_res_frame {
struct fc_rdp_link_error_status_desc link_error_desc; /* Word 13-21 */
struct fc_rdp_port_name_desc diag_port_names_desc; /* Word 22-27 */
struct fc_rdp_port_name_desc attached_port_names_desc;/* Word 28-33 */
- struct fc_rdp_bbc_desc bbc_desc; /* FC Word 34-38*/
- struct fc_rdp_oed_sfp_desc oed_temp_desc; /* FC Word 39-43*/
- struct fc_rdp_oed_sfp_desc oed_voltage_desc; /* FC word 44-48*/
- struct fc_rdp_oed_sfp_desc oed_txbias_desc; /* FC word 49-53*/
- struct fc_rdp_oed_sfp_desc oed_txpower_desc; /* FC word 54-58*/
- struct fc_rdp_oed_sfp_desc oed_rxpower_desc; /* FC word 59-63*/
- struct fc_rdp_opd_sfp_desc opd_desc; /* FC word 64-80*/
- struct fc_fec_rdp_desc fec_desc; /* FC word 81-84*/
+ struct fc_fec_rdp_desc fec_desc; /* FC word 34-37*/
+ struct fc_rdp_bbc_desc bbc_desc; /* FC Word 38-42*/
+ struct fc_rdp_oed_sfp_desc oed_temp_desc; /* FC Word 43-47*/
+ struct fc_rdp_oed_sfp_desc oed_voltage_desc; /* FC word 48-52*/
+ struct fc_rdp_oed_sfp_desc oed_txbias_desc; /* FC word 53-57*/
+ struct fc_rdp_oed_sfp_desc oed_txpower_desc; /* FC word 58-62*/
+ struct fc_rdp_oed_sfp_desc oed_rxpower_desc; /* FC word 63-67*/
+ struct fc_rdp_opd_sfp_desc opd_desc; /* FC word 68-84*/
};
-#define RDP_DESC_PAYLOAD_SIZE (sizeof(struct fc_rdp_link_service_desc) \
- + sizeof(struct fc_rdp_sfp_desc) \
- + sizeof(struct fc_rdp_port_speed_desc) \
- + sizeof(struct fc_rdp_link_error_status_desc) \
- + (sizeof(struct fc_rdp_port_name_desc) * 2) \
- + sizeof(struct fc_rdp_bbc_desc) \
- + (sizeof(struct fc_rdp_oed_sfp_desc) * 5) \
- + sizeof(struct fc_rdp_opd_sfp_desc))
-
-
/******** FDMI ********/
/* lpfc_sli_ct_request defines the CT_IU preamble for FDMI commands */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 0c7070bf2..ee8022737 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -544,6 +544,8 @@ struct lpfc_register {
uint32_t word0;
};
+#define LPFC_PORT_SEM_UE_RECOVERABLE 0xE000
+#define LPFC_PORT_SEM_MASK 0xF000
/* The following BAR0 Registers apply to SLI4 if_type 0 UCNAs. */
#define LPFC_UERR_STATUS_HI 0x00A4
#define LPFC_UERR_STATUS_LO 0x00A0
@@ -937,6 +939,7 @@ struct mbox_header {
#define LPFC_MBOX_OPCODE_READ_OBJECT_LIST 0xAD
#define LPFC_MBOX_OPCODE_DELETE_OBJECT 0xAE
#define LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS 0xB5
+#define LPFC_MBOX_OPCODE_SET_FEATURES 0xBF
/* FCoE Opcodes */
#define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE 0x01
@@ -2590,10 +2593,8 @@ struct lpfc_mbx_memory_dump_type3 {
#define SFF_RXPOWER_B1 104
#define SFF_RXPOWER_B0 105
#define SSF_STATUS_CONTROL 110
-#define SSF_ALARM_FLAGS_B1 112
-#define SSF_ALARM_FLAGS_B0 113
-#define SSF_WARNING_FLAGS_B1 116
-#define SSF_WARNING_FLAGS_B0 117
+#define SSF_ALARM_FLAGS 112
+#define SSF_WARNING_FLAGS 116
#define SSF_EXT_TATUS_CONTROL_B1 118
#define SSF_EXT_TATUS_CONTROL_B0 119
#define SSF_A2_VENDOR_SPECIFIC 120
@@ -2887,8 +2888,37 @@ struct lpfc_sli4_parameters {
#define cfg_ext_embed_cb_SHIFT 0
#define cfg_ext_embed_cb_MASK 0x00000001
#define cfg_ext_embed_cb_WORD word19
+#define cfg_mds_diags_SHIFT 1
+#define cfg_mds_diags_MASK 0x00000001
+#define cfg_mds_diags_WORD word19
};
+#define LPFC_SET_UE_RECOVERY 0x10
+#define LPFC_SET_MDS_DIAGS 0x11
+struct lpfc_mbx_set_feature {
+ struct mbox_header header;
+ uint32_t feature;
+ uint32_t param_len;
+ uint32_t word6;
+#define lpfc_mbx_set_feature_UER_SHIFT 0
+#define lpfc_mbx_set_feature_UER_MASK 0x00000001
+#define lpfc_mbx_set_feature_UER_WORD word6
+#define lpfc_mbx_set_feature_mds_SHIFT 0
+#define lpfc_mbx_set_feature_mds_MASK 0x00000001
+#define lpfc_mbx_set_feature_mds_WORD word6
+#define lpfc_mbx_set_feature_mds_deep_loopbk_SHIFT 1
+#define lpfc_mbx_set_feature_mds_deep_loopbk_MASK 0x00000001
+#define lpfc_mbx_set_feature_mds_deep_loopbk_WORD word6
+ uint32_t word7;
+#define lpfc_mbx_set_feature_UERP_SHIFT 0
+#define lpfc_mbx_set_feature_UERP_MASK 0x0000ffff
+#define lpfc_mbx_set_feature_UERP_WORD word7
+#define lpfc_mbx_set_feature_UESR_SHIFT 16
+#define lpfc_mbx_set_feature_UESR_MASK 0x0000ffff
+#define lpfc_mbx_set_feature_UESR_WORD word7
+};
+
+
struct lpfc_mbx_get_sli4_parameters {
struct mbox_header header;
struct lpfc_sli4_parameters sli4_parameters;
@@ -3281,6 +3311,7 @@ struct lpfc_mqe {
struct lpfc_mbx_get_prof_cfg get_prof_cfg;
struct lpfc_mbx_wr_object wr_object;
struct lpfc_mbx_get_port_name get_port_name;
+ struct lpfc_mbx_set_feature set_feature;
struct lpfc_mbx_memory_dump_type3 mem_dump_type3;
struct lpfc_mbx_nop nop;
} un;
@@ -3443,6 +3474,8 @@ struct lpfc_acqe_fc_la {
#define LPFC_FC_LA_TYPE_LINK_UP 0x1
#define LPFC_FC_LA_TYPE_LINK_DOWN 0x2
#define LPFC_FC_LA_TYPE_NO_HARD_ALPA 0x3
+#define LPFC_FC_LA_TYPE_MDS_LINK_DOWN 0x4
+#define LPFC_FC_LA_TYPE_MDS_LOOPBACK 0x5
#define lpfc_acqe_fc_la_port_type_SHIFT 6
#define lpfc_acqe_fc_la_port_type_MASK 0x00000003
#define lpfc_acqe_fc_la_port_type_WORD word0
diff --git a/drivers/scsi/lpfc/lpfc_ids.h b/drivers/scsi/lpfc/lpfc_ids.h
new file mode 100644
index 000000000..5733feafe
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_ids.h
@@ -0,0 +1,122 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2004-2016 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.emulex.com *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+#include <linux/pci.h>
+
+const struct pci_device_id lpfc_id_table[] = {
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_G6_FC,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK_VF,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ { 0 }
+};
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 6029c4839..b100a22b3 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -52,6 +52,7 @@
#include "lpfc_crtn.h"
#include "lpfc_vport.h"
#include "lpfc_version.h"
+#include "lpfc_ids.h"
char *_dump_buf_data;
unsigned long _dump_buf_data_order;
@@ -568,7 +569,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
phba->last_completion_time = jiffies;
/* Set up error attention (ERATT) polling timer */
mod_timer(&phba->eratt_poll,
- jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
+ jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
if (phba->hba_flag & LINK_DISABLED) {
lpfc_printf_log(phba,
@@ -1587,35 +1588,39 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
int rc;
uint32_t intr_mode;
- /*
- * On error status condition, driver need to wait for port
- * ready before performing reset.
- */
- rc = lpfc_sli4_pdev_status_reg_wait(phba);
- if (!rc) {
- /* need reset: attempt for port recovery */
- if (en_rn_msg)
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2887 Reset Needed: Attempting Port "
- "Recovery...\n");
- lpfc_offline_prep(phba, mbx_action);
- lpfc_offline(phba);
- /* release interrupt for possible resource change */
- lpfc_sli4_disable_intr(phba);
- lpfc_sli_brdrestart(phba);
- /* request and enable interrupt */
- intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
- if (intr_mode == LPFC_INTR_ERROR) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3175 Failed to enable interrupt\n");
- return -EIO;
- } else {
- phba->intr_mode = intr_mode;
- }
- rc = lpfc_online(phba);
- if (rc == 0)
- lpfc_unblock_mgmt_io(phba);
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+ LPFC_SLI_INTF_IF_TYPE_2) {
+ /*
+ * On error status condition, driver need to wait for port
+ * ready before performing reset.
+ */
+ rc = lpfc_sli4_pdev_status_reg_wait(phba);
+ if (rc)
+ return rc;
}
+
+ /* need reset: attempt for port recovery */
+ if (en_rn_msg)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2887 Reset Needed: Attempting Port "
+ "Recovery...\n");
+ lpfc_offline_prep(phba, mbx_action);
+ lpfc_offline(phba);
+ /* release interrupt for possible resource change */
+ lpfc_sli4_disable_intr(phba);
+ lpfc_sli_brdrestart(phba);
+ /* request and enable interrupt */
+ intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
+ if (intr_mode == LPFC_INTR_ERROR) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3175 Failed to enable interrupt\n");
+ return -EIO;
+ }
+ phba->intr_mode = intr_mode;
+ rc = lpfc_online(phba);
+ if (rc == 0)
+ lpfc_unblock_mgmt_io(phba);
+
return rc;
}
@@ -1636,10 +1641,11 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
struct lpfc_register portstat_reg = {0};
uint32_t reg_err1, reg_err2;
uint32_t uerrlo_reg, uemasklo_reg;
- uint32_t pci_rd_rc1, pci_rd_rc2;
+ uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
bool en_rn_msg = true;
struct temp_event temp_event_data;
- int rc;
+ struct lpfc_register portsmphr_reg;
+ int rc, i;
/* If the pci channel is offline, ignore possible errors, since
* we cannot communicate with the pci card anyway.
@@ -1647,6 +1653,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
if (pci_channel_offline(phba->pcidev))
return;
+ memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
switch (if_type) {
case LPFC_SLI_INTF_IF_TYPE_0:
@@ -1659,6 +1666,55 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
/* consider PCI bus read error as pci_channel_offline */
if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
return;
+ if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
+ lpfc_sli4_offline_eratt(phba);
+ return;
+ }
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "7623 Checking UE recoverable");
+
+ for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
+ if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
+ &portsmphr_reg.word0))
+ continue;
+
+ smphr_port_status = bf_get(lpfc_port_smphr_port_status,
+ &portsmphr_reg);
+ if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
+ LPFC_PORT_SEM_UE_RECOVERABLE)
+ break;
+ /*Sleep for 1Sec, before checking SEMAPHORE */
+ msleep(1000);
+ }
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "4827 smphr_port_status x%x : Waited %dSec",
+ smphr_port_status, i);
+
+ /* Recoverable UE, reset the HBA device */
+ if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
+ LPFC_PORT_SEM_UE_RECOVERABLE) {
+ for (i = 0; i < 20; i++) {
+ msleep(1000);
+ if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
+ &portsmphr_reg.word0) &&
+ (LPFC_POST_STAGE_PORT_READY ==
+ bf_get(lpfc_port_smphr_port_status,
+ &portsmphr_reg))) {
+ rc = lpfc_sli4_port_sta_fn_reset(phba,
+ LPFC_MBX_NO_WAIT, en_rn_msg);
+ if (rc == 0)
+ return;
+ lpfc_printf_log(phba,
+ KERN_ERR, LOG_INIT,
+ "4215 Failed to recover UE");
+ break;
+ }
+ }
+ }
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "7624 Firmware not ready: Failing UE recovery,"
+ " waited %dSec", i);
lpfc_sli4_offline_eratt(phba);
break;
@@ -1681,6 +1737,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
"taking port offline Data: x%x x%x\n",
reg_err1, reg_err2);
+ phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
temp_event_data.event_code = LPFC_CRIT_TEMP;
temp_event_data.data = 0xFFFFFFFF;
@@ -3985,6 +4042,8 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
{
struct lpfc_dmabuf *mp;
LPFC_MBOXQ_t *pmb;
+ MAILBOX_t *mb;
+ struct lpfc_mbx_read_top *la;
int rc;
if (bf_get(lpfc_trailer_type, acqe_fc) !=
@@ -4055,6 +4114,24 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
pmb->vport = phba->pport;
+ if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
+ /* Parse and translate status field */
+ mb = &pmb->u.mb;
+ mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba,
+ (void *)acqe_fc);
+
+ /* Parse and translate link attention fields */
+ la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
+ la->eventTag = acqe_fc->event_tag;
+ bf_set(lpfc_mbx_read_top_att_type, la,
+ LPFC_FC_LA_TYPE_LINK_DOWN);
+
+ /* Invoke the mailbox command callback function */
+ lpfc_mbx_cmpl_read_topology(phba, pmb);
+
+ return;
+ }
+
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED)
goto out_free_dmabuf;
@@ -4107,6 +4184,7 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
"3190 Over Temperature:%d Celsius- Port Name %c\n",
acqe_sli->event_data1, port_name);
+ phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
shost = lpfc_shost_from_vport(phba->pport);
fc_host_post_vendor_event(shost, fc_get_event_number(),
sizeof(temp_event_data),
@@ -4408,7 +4486,8 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
* the corresponding FCF bit in the roundrobin bitmap.
*/
spin_lock_irq(&phba->hbalock);
- if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
+ if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
+ (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) {
spin_unlock_irq(&phba->hbalock);
/* Update FLOGI FCF failover eligible FCF bmask */
lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
@@ -4775,20 +4854,17 @@ static int
lpfc_enable_pci_dev(struct lpfc_hba *phba)
{
struct pci_dev *pdev;
- int bars = 0;
/* Obtain PCI device reference */
if (!phba->pcidev)
goto out_error;
else
pdev = phba->pcidev;
- /* Select PCI BARs */
- bars = pci_select_bars(pdev, IORESOURCE_MEM);
/* Enable PCI device */
if (pci_enable_device_mem(pdev))
goto out_error;
/* Request PCI resource for the device */
- if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
+ if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME))
goto out_disable_device;
/* Set up device as PCI master and save state for EEH */
pci_set_master(pdev);
@@ -4805,7 +4881,7 @@ out_disable_device:
pci_disable_device(pdev);
out_error:
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "1401 Failed to enable pci device, bars:x%x\n", bars);
+ "1401 Failed to enable pci device\n");
return -ENODEV;
}
@@ -4820,17 +4896,14 @@ static void
lpfc_disable_pci_dev(struct lpfc_hba *phba)
{
struct pci_dev *pdev;
- int bars;
/* Obtain PCI device reference */
if (!phba->pcidev)
return;
else
pdev = phba->pcidev;
- /* Select PCI BARs */
- bars = pci_select_bars(pdev, IORESOURCE_MEM);
/* Release PCI resource and disable PCI device */
- pci_release_selected_regions(pdev, bars);
+ pci_release_mem_regions(pdev);
pci_disable_device(pdev);
return;
@@ -5363,6 +5436,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
goto out_free_bsmbx;
}
}
+
/*
* Get sli4 parameters that override parameters from Port capabilities.
* If this call fails, it isn't critical unless the SLI4 parameters come
@@ -6091,6 +6165,7 @@ lpfc_hba_alloc(struct pci_dev *pdev)
kfree(phba);
return NULL;
}
+ phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL;
spin_lock_init(&phba->ct_ev_lock);
INIT_LIST_HEAD(&phba->ct_ev_waiters);
@@ -9527,6 +9602,14 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
phba->fcp_embed_io = 1;
else
phba->fcp_embed_io = 0;
+
+ /*
+ * Check if the SLI port supports MDS Diagnostics
+ */
+ if (bf_get(cfg_mds_diags, mbx_sli4_parameters))
+ phba->mds_diags_support = 1;
+ else
+ phba->mds_diags_support = 0;
return 0;
}
@@ -9722,7 +9805,6 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
struct lpfc_vport **vports;
struct lpfc_hba *phba = vport->phba;
int i;
- int bars = pci_select_bars(pdev, IORESOURCE_MEM);
spin_lock_irq(&phba->hbalock);
vport->load_flag |= FC_UNLOADING;
@@ -9797,7 +9879,7 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
lpfc_hba_free(phba);
- pci_release_selected_regions(pdev, bars);
+ pci_release_mem_regions(pdev);
pci_disable_device(pdev);
}
@@ -11298,106 +11380,6 @@ lpfc_fof_queue_destroy(struct lpfc_hba *phba)
return 0;
}
-static struct pci_device_id lpfc_id_table[] = {
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_G6_FC,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK,
- PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK_VF,
- PCI_ANY_ID, PCI_ANY_ID, },
- { 0 }
-};
-
MODULE_DEVICE_TABLE(pci, lpfc_id_table);
static const struct pci_error_handlers lpfc_err_handler = {
@@ -11452,21 +11434,17 @@ lpfc_init(void)
printk(KERN_ERR "Could not register lpfcmgmt device, "
"misc_register returned with status %d", error);
- if (lpfc_enable_npiv) {
- lpfc_transport_functions.vport_create = lpfc_vport_create;
- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
- }
+ lpfc_transport_functions.vport_create = lpfc_vport_create;
+ lpfc_transport_functions.vport_delete = lpfc_vport_delete;
lpfc_transport_template =
fc_attach_transport(&lpfc_transport_functions);
if (lpfc_transport_template == NULL)
return -ENOMEM;
- if (lpfc_enable_npiv) {
- lpfc_vport_transport_template =
- fc_attach_transport(&lpfc_vport_transport_functions);
- if (lpfc_vport_transport_template == NULL) {
- fc_release_transport(lpfc_transport_template);
- return -ENOMEM;
- }
+ lpfc_vport_transport_template =
+ fc_attach_transport(&lpfc_vport_transport_functions);
+ if (lpfc_vport_transport_template == NULL) {
+ fc_release_transport(lpfc_transport_template);
+ return -ENOMEM;
}
/* Initialize in case vector mapping is needed */
@@ -11478,8 +11456,7 @@ lpfc_init(void)
error = pci_register_driver(&lpfc_driver);
if (error) {
fc_release_transport(lpfc_transport_template);
- if (lpfc_enable_npiv)
- fc_release_transport(lpfc_vport_transport_template);
+ fc_release_transport(lpfc_vport_transport_template);
}
return error;
@@ -11498,8 +11475,7 @@ lpfc_exit(void)
misc_deregister(&lpfc_mgmt_dev);
pci_unregister_driver(&lpfc_driver);
fc_release_transport(lpfc_transport_template);
- if (lpfc_enable_npiv)
- fc_release_transport(lpfc_vport_transport_template);
+ fc_release_transport(lpfc_vport_transport_template);
if (_dump_buf_data) {
printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for "
"_dump_buf_data at 0x%p\n",
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index c7e5695da..d197aa176 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2015 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -3335,8 +3335,11 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
* OAS, set the oas iocb related flags.
*/
if ((phba->cfg_fof) && ((struct lpfc_device_data *)
- scsi_cmnd->device->hostdata)->oas_enabled)
+ scsi_cmnd->device->hostdata)->oas_enabled) {
lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
+ lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *)
+ scsi_cmnd->device->hostdata)->priority;
+ }
return 0;
}
@@ -5607,6 +5610,7 @@ lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
sizeof(struct lpfc_name));
lun_info->device_id.lun = lun;
lun_info->oas_enabled = false;
+ lun_info->priority = phba->cfg_XLanePriority;
lun_info->available = false;
return lun_info;
}
@@ -5798,7 +5802,7 @@ lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
**/
bool
lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
- struct lpfc_name *target_wwpn, uint64_t lun)
+ struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
{
struct lpfc_device_data *lun_info;
@@ -5825,6 +5829,7 @@ lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
false);
if (lun_info) {
lun_info->oas_enabled = true;
+ lun_info->priority = pri;
lun_info->available = false;
list_add_tail(&lun_info->listentry, &phba->luns);
spin_unlock_irqrestore(&phba->devicelock, flags);
@@ -5886,6 +5891,7 @@ lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
struct scsi_host_template lpfc_template_s3 = {
.module = THIS_MODULE,
.name = LPFC_DRIVER_NAME,
+ .proc_name = LPFC_DRIVER_NAME,
.info = lpfc_info,
.queuecommand = lpfc_queuecommand,
.eh_abort_handler = lpfc_abort_handler,
@@ -5910,6 +5916,7 @@ struct scsi_host_template lpfc_template_s3 = {
struct scsi_host_template lpfc_template = {
.module = THIS_MODULE,
.name = LPFC_DRIVER_NAME,
+ .proc_name = LPFC_DRIVER_NAME,
.info = lpfc_info,
.queuecommand = lpfc_queuecommand,
.eh_abort_handler = lpfc_abort_handler,
@@ -5935,6 +5942,7 @@ struct scsi_host_template lpfc_template = {
struct scsi_host_template lpfc_vport_template = {
.module = THIS_MODULE,
.name = LPFC_DRIVER_NAME,
+ .proc_name = LPFC_DRIVER_NAME,
.info = lpfc_info,
.queuecommand = lpfc_queuecommand,
.eh_abort_handler = lpfc_abort_handler,
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index 18b9260cc..8cb80daba 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2015 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -51,6 +51,7 @@ struct lpfc_device_data {
struct list_head listentry;
struct lpfc_rport_data *rport_data;
struct lpfc_device_id device_id;
+ uint8_t priority;
bool oas_enabled;
bool available;
};
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 70edf21ae..7080ce292 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1323,21 +1323,18 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
{
lockdep_assert_held(&phba->hbalock);
+ BUG_ON(!piocb || !piocb->vport);
+
list_add_tail(&piocb->list, &pring->txcmplq);
piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
(piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
(piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN) &&
- (!(piocb->vport->load_flag & FC_UNLOADING))) {
- if (!piocb->vport)
- BUG();
- else
- mod_timer(&piocb->vport->els_tmofunc,
- jiffies +
- msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
- }
-
+ (!(piocb->vport->load_flag & FC_UNLOADING)))
+ mod_timer(&piocb->vport->els_tmofunc,
+ jiffies +
+ msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
return 0;
}
@@ -2947,8 +2944,8 @@ void lpfc_poll_eratt(unsigned long ptr)
else
cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
- /* 64-bit integer division not supporte on 32-bit x86 - use do_div */
- do_div(cnt, LPFC_ERATT_POLL_INTERVAL);
+ /* 64-bit integer division not supported on 32-bit x86 - use do_div */
+ do_div(cnt, phba->eratt_poll_interval);
phba->sli.slistat.sli_ips = cnt;
phba->sli.slistat.sli_prev_intr = sli_intr;
@@ -2963,7 +2960,7 @@ void lpfc_poll_eratt(unsigned long ptr)
/* Restart the timer for next eratt poll */
mod_timer(&phba->eratt_poll,
jiffies +
- msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
+ msecs_to_jiffies(1000 * phba->eratt_poll_interval));
return;
}
@@ -4665,13 +4662,13 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
int mode = 3, i;
int longs;
- switch (lpfc_sli_mode) {
+ switch (phba->cfg_sli_mode) {
case 2:
if (phba->cfg_enable_npiv) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
- "1824 NPIV enabled: Override lpfc_sli_mode "
+ "1824 NPIV enabled: Override sli_mode "
"parameter (%d) to auto (0).\n",
- lpfc_sli_mode);
+ phba->cfg_sli_mode);
break;
}
mode = 2;
@@ -4681,8 +4678,8 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
- "1819 Unrecognized lpfc_sli_mode "
- "parameter: %d.\n", lpfc_sli_mode);
+ "1819 Unrecognized sli_mode parameter: %d.\n",
+ phba->cfg_sli_mode);
break;
}
@@ -4690,12 +4687,14 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
rc = lpfc_sli_config_port(phba, mode);
- if (rc && lpfc_sli_mode == 3)
+ if (rc && phba->cfg_sli_mode == 3)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
"1820 Unable to select SLI-3. "
"Not supported by adapter.\n");
if (rc && mode != 2)
rc = lpfc_sli_config_port(phba, 2);
+ else if (rc && mode == 2)
+ rc = lpfc_sli_config_port(phba, 3);
if (rc)
goto lpfc_sli_hba_setup_error;
@@ -5690,6 +5689,38 @@ lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
return rc;
}
+void
+lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
+ uint32_t feature)
+{
+ uint32_t len;
+
+ len = sizeof(struct lpfc_mbx_set_feature) -
+ sizeof(struct lpfc_sli4_cfg_mhdr);
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_SET_FEATURES, len,
+ LPFC_SLI4_MBX_EMBED);
+
+ switch (feature) {
+ case LPFC_SET_UE_RECOVERY:
+ bf_set(lpfc_mbx_set_feature_UER,
+ &mbox->u.mqe.un.set_feature, 1);
+ mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
+ mbox->u.mqe.un.set_feature.param_len = 8;
+ break;
+ case LPFC_SET_MDS_DIAGS:
+ bf_set(lpfc_mbx_set_feature_mds,
+ &mbox->u.mqe.un.set_feature, 1);
+ bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
+ &mbox->u.mqe.un.set_feature, 0);
+ mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
+ mbox->u.mqe.un.set_feature.param_len = 8;
+ break;
+ }
+
+ return;
+}
+
/**
* lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
* @phba: Pointer to HBA context object.
@@ -6414,6 +6445,30 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
phba->pport->cfg_lun_queue_depth = rc;
}
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+ LPFC_SLI_INTF_IF_TYPE_0) {
+ lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ if (rc == MBX_SUCCESS) {
+ phba->hba_flag |= HBA_RECOVERABLE_UE;
+ /* Set 1Sec interval to detect UE */
+ phba->eratt_poll_interval = 1;
+ phba->sli4_hba.ue_to_sr = bf_get(
+ lpfc_mbx_set_feature_UESR,
+ &mboxq->u.mqe.un.set_feature);
+ phba->sli4_hba.ue_to_rp = bf_get(
+ lpfc_mbx_set_feature_UERP,
+ &mboxq->u.mqe.un.set_feature);
+ }
+ }
+
+ if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
+ /* Enable MDS Diagnostics only if the SLI Port supports it */
+ lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ if (rc != MBX_SUCCESS)
+ phba->mds_diags_support = 0;
+ }
/*
* Discover the port's supported feature set and match it against the
@@ -6612,7 +6667,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
/* Start error attention (ERATT) polling timer */
mod_timer(&phba->eratt_poll,
- jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
+ jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
/* Enable PCIe device Advanced Error Reporting (AER) if configured */
if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
@@ -8383,8 +8438,11 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
if (iocbq->iocb_flag & LPFC_IO_OAS) {
bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
- if (phba->cfg_XLanePriority) {
- bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
+ bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
+ if (iocbq->priority) {
+ bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
+ (iocbq->priority << 1));
+ } else {
bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
(phba->cfg_XLanePriority << 1));
}
@@ -8439,8 +8497,11 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
if (iocbq->iocb_flag & LPFC_IO_OAS) {
bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
- if (phba->cfg_XLanePriority) {
- bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
+ bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
+ if (iocbq->priority) {
+ bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
+ (iocbq->priority << 1));
+ } else {
bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
(phba->cfg_XLanePriority << 1));
}
@@ -8494,8 +8555,11 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
iocbq->iocb.ulpFCP2Rcvy);
if (iocbq->iocb_flag & LPFC_IO_OAS) {
bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
- if (phba->cfg_XLanePriority) {
- bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
+ bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
+ if (iocbq->priority) {
+ bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
+ (iocbq->priority << 1));
+ } else {
bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
(phba->cfg_XLanePriority << 1));
}
@@ -10136,6 +10200,7 @@ lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
struct lpfc_iocbq *iocbq;
int sum, i;
+ spin_lock_irq(&phba->hbalock);
for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
iocbq = phba->sli.iocbq_lookup[i];
@@ -10143,6 +10208,7 @@ lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
ctx_cmd) == 0)
sum++;
}
+ spin_unlock_irq(&phba->hbalock);
return sum;
}
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 7fe99ff80..74227a28b 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2015 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -57,6 +57,7 @@ struct lpfc_iocbq {
struct lpfc_cq_event cq_event;
IOCB_t iocb; /* IOCB cmd */
+ uint8_t priority; /* OAS priority */
uint8_t retry; /* retry counter for IOCB cmd - if needed */
uint32_t iocb_flag;
#define LPFC_IO_LIBDFC 1 /* libdfc iocb */
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index cd780c294..0b88b5703 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2009-2015 Emulex. All rights reserved. *
+ * Copyright (C) 2009-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -511,6 +511,8 @@ struct lpfc_sli4_hba {
uint32_t ue_mask_lo;
uint32_t ue_mask_hi;
+ uint32_t ue_to_sr;
+ uint32_t ue_to_rp;
struct lpfc_register sli_intf;
struct lpfc_pc_sli4_params pc_sli4_params;
struct msix_entry *msix_entries;
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index fa0d531bf..c9bf20eb7 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "11.1.0.0."
+#define LPFC_DRIVER_VERSION "11.2.0.0."
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 6bff13e7a..cd91a684c 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -4903,13 +4903,22 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
u16 ioc_status;
u16 sz;
u8 device_missing_delay;
+ u8 num_phys;
- mpt3sas_config_get_number_hba_phys(ioc, &ioc->sas_hba.num_phys);
- if (!ioc->sas_hba.num_phys) {
+ mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
+ if (!num_phys) {
pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
return;
}
+ ioc->sas_hba.phy = kcalloc(num_phys,
+ sizeof(struct _sas_phy), GFP_KERNEL);
+ if (!ioc->sas_hba.phy) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ ioc->sas_hba.num_phys = num_phys;
/* sas_iounit page 0 */
sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys *
@@ -4969,13 +4978,6 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev;
- ioc->sas_hba.phy = kcalloc(ioc->sas_hba.num_phys,
- sizeof(struct _sas_phy), GFP_KERNEL);
- if (!ioc->sas_hba.phy) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- goto out;
- }
for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
i))) {
@@ -9033,8 +9035,11 @@ scsih_pci_mmio_enabled(struct pci_dev *pdev)
/* TODO - dump whatever for debugging purposes */
- /* Request a slot reset. */
- return PCI_ERS_RESULT_NEED_RESET;
+ /* This called only if scsih_pci_error_detected returns
+ * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
+ * works, no need to reset slot.
+ */
+ return PCI_ERS_RESULT_RECOVERED;
}
/*
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index 6a84b82d7..ff93286bc 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -705,6 +705,11 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
goto out_fail;
}
+ if (!sas_node->parent_dev) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
port = sas_port_alloc_num(sas_node->parent_dev);
if ((sas_port_add(port))) {
pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index 3b11aad03..2f2a9910e 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -726,7 +726,7 @@ static int _osd_req_list_objects(struct osd_request *or,
return PTR_ERR(bio);
}
- bio->bi_rw &= ~REQ_WRITE;
+ bio_set_op_attrs(bio, REQ_OP_READ, 0);
or->in.bio = bio;
or->in.total_bytes = bio->bi_iter.bi_size;
return 0;
@@ -824,7 +824,7 @@ void osd_req_write(struct osd_request *or,
{
_osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, len);
WARN_ON(or->out.bio || or->out.total_bytes);
- WARN_ON(0 == (bio->bi_rw & REQ_WRITE));
+ WARN_ON(!op_is_write(bio_op(bio)));
or->out.bio = bio;
or->out.total_bytes = len;
}
@@ -839,7 +839,7 @@ int osd_req_write_kern(struct osd_request *or,
if (IS_ERR(bio))
return PTR_ERR(bio);
- bio->bi_rw |= REQ_WRITE; /* FIXME: bio_set_dir() */
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
osd_req_write(or, obj, offset, bio, len);
return 0;
}
@@ -875,7 +875,7 @@ void osd_req_read(struct osd_request *or,
{
_osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len);
WARN_ON(or->in.bio || or->in.total_bytes);
- WARN_ON(bio->bi_rw & REQ_WRITE);
+ WARN_ON(op_is_write(bio_op(bio)));
or->in.bio = bio;
or->in.total_bytes = len;
}
@@ -956,7 +956,7 @@ static int _osd_req_finalize_cdb_cont(struct osd_request *or, const u8 *cap_key)
if (IS_ERR(bio))
return PTR_ERR(bio);
- bio->bi_rw |= REQ_WRITE;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
/* integrity check the continuation before the bio is linked
* with the other data segments since the continuation
@@ -1077,7 +1077,7 @@ int osd_req_write_sg_kern(struct osd_request *or,
if (IS_ERR(bio))
return PTR_ERR(bio);
- bio->bi_rw |= REQ_WRITE;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
osd_req_write_sg(or, obj, bio, sglist, numentries);
return 0;
@@ -1558,18 +1558,25 @@ static int _osd_req_finalize_data_integrity(struct osd_request *or,
static struct request *_make_request(struct request_queue *q, bool has_write,
struct _osd_io_info *oii, gfp_t flags)
{
- if (oii->bio)
- return blk_make_request(q, oii->bio, flags);
- else {
- struct request *req;
-
- req = blk_get_request(q, has_write ? WRITE : READ, flags);
- if (IS_ERR(req))
- return req;
+ struct request *req;
+ struct bio *bio = oii->bio;
+ int ret;
- blk_rq_set_block_pc(req);
+ req = blk_get_request(q, has_write ? WRITE : READ, flags);
+ if (IS_ERR(req))
return req;
+ blk_rq_set_block_pc(req);
+
+ for_each_bio(bio) {
+ struct bio *bounce_bio = bio;
+
+ blk_queue_bounce(req->q, &bounce_bio);
+ ret = blk_rq_append_bio(req, bounce_bio);
+ if (ret)
+ return ERR_PTR(ret);
}
+
+ return req;
}
static int _init_blk_request(struct osd_request *or,
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 6bd7bf4f4..9fc675f57 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -1249,7 +1249,7 @@ static int pm8001_pci_resume(struct pci_dev *pdev)
/* Chip documentation for the 8070 and 8072 SPCv */
/* states that a 500ms minimum delay is required */
- /* before issuing commands. Otherwise, the firmare */
+ /* before issuing commands. Otherwise, the firmware */
/* will enter an unrecoverable state. */
if (pm8001_ha->chip_id == chip_8070 ||
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 4dc06a13c..fe7469c90 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -147,92 +147,6 @@ static struct bin_attribute sysfs_fw_dump_attr = {
};
static ssize_t
-qla2x00_sysfs_read_fw_dump_template(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
-{
- struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
- struct device, kobj)));
- struct qla_hw_data *ha = vha->hw;
-
- if (!ha->fw_dump_template || !ha->fw_dump_template_len)
- return 0;
-
- ql_dbg(ql_dbg_user, vha, 0x70e2,
- "chunk <- off=%llx count=%zx\n", off, count);
- return memory_read_from_buffer(buf, count, &off,
- ha->fw_dump_template, ha->fw_dump_template_len);
-}
-
-static ssize_t
-qla2x00_sysfs_write_fw_dump_template(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
-{
- struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
- struct device, kobj)));
- struct qla_hw_data *ha = vha->hw;
- uint32_t size;
-
- if (off == 0) {
- if (ha->fw_dump)
- vfree(ha->fw_dump);
- if (ha->fw_dump_template)
- vfree(ha->fw_dump_template);
-
- ha->fw_dump = NULL;
- ha->fw_dump_len = 0;
- ha->fw_dump_template = NULL;
- ha->fw_dump_template_len = 0;
-
- size = qla27xx_fwdt_template_size(buf);
- ql_dbg(ql_dbg_user, vha, 0x70d1,
- "-> allocating fwdt (%x bytes)...\n", size);
- ha->fw_dump_template = vmalloc(size);
- if (!ha->fw_dump_template) {
- ql_log(ql_log_warn, vha, 0x70d2,
- "Failed allocate fwdt (%x bytes).\n", size);
- return -ENOMEM;
- }
- ha->fw_dump_template_len = size;
- }
-
- if (off + count > ha->fw_dump_template_len) {
- count = ha->fw_dump_template_len - off;
- ql_dbg(ql_dbg_user, vha, 0x70d3,
- "chunk -> truncating to %zx bytes.\n", count);
- }
-
- ql_dbg(ql_dbg_user, vha, 0x70d4,
- "chunk -> off=%llx count=%zx\n", off, count);
- memcpy(ha->fw_dump_template + off, buf, count);
-
- if (off + count == ha->fw_dump_template_len) {
- size = qla27xx_fwdt_calculate_dump_size(vha);
- ql_dbg(ql_dbg_user, vha, 0x70d5,
- "-> allocating fwdump (%x bytes)...\n", size);
- ha->fw_dump = vmalloc(size);
- if (!ha->fw_dump) {
- ql_log(ql_log_warn, vha, 0x70d6,
- "Failed allocate fwdump (%x bytes).\n", size);
- return -ENOMEM;
- }
- ha->fw_dump_len = size;
- }
-
- return count;
-}
-static struct bin_attribute sysfs_fw_dump_template_attr = {
- .attr = {
- .name = "fw_dump_template",
- .mode = S_IRUSR | S_IWUSR,
- },
- .size = 0,
- .read = qla2x00_sysfs_read_fw_dump_template,
- .write = qla2x00_sysfs_write_fw_dump_template,
-};
-
-static ssize_t
qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
@@ -973,7 +887,6 @@ static struct sysfs_entry {
int is4GBp_only;
} bin_file_entries[] = {
{ "fw_dump", &sysfs_fw_dump_attr, },
- { "fw_dump_template", &sysfs_fw_dump_template_attr, 0x27 },
{ "nvram", &sysfs_nvram_attr, },
{ "optrom", &sysfs_optrom_attr, },
{ "optrom_ctl", &sysfs_optrom_ctl_attr, },
@@ -1000,8 +913,6 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
continue;
if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw)))
continue;
- if (iter->is4GBp_only == 0x27 && !IS_QLA27XX(vha->hw))
- continue;
ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
iter->attr);
@@ -1858,6 +1769,9 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
if (!fcport)
return;
+ if (test_bit(UNLOADING, &fcport->vha->dpc_flags))
+ return;
+
if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
return;
@@ -1900,10 +1814,9 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
int rval;
struct link_statistics *stats;
dma_addr_t stats_dma;
- struct fc_host_statistics *pfc_host_stat;
+ struct fc_host_statistics *p = &vha->fc_host_stat;
- pfc_host_stat = &vha->fc_host_stat;
- memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
+ memset(p, -1, sizeof(*p));
if (IS_QLAFX00(vha->hw))
goto done;
@@ -1918,17 +1831,17 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
goto done;
stats = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(struct link_statistics), &stats_dma, GFP_KERNEL);
- if (stats == NULL) {
+ sizeof(*stats), &stats_dma, GFP_KERNEL);
+ if (!stats) {
ql_log(ql_log_warn, vha, 0x707d,
"Failed to allocate memory for stats.\n");
goto done;
}
- memset(stats, 0, DMA_POOL_SIZE);
+ memset(stats, 0, sizeof(*stats));
rval = QLA_FUNCTION_FAILED;
if (IS_FWI2_CAPABLE(ha)) {
- rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma);
+ rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, 0);
} else if (atomic_read(&base_vha->loop_state) == LOOP_READY &&
!ha->dpc_active) {
/* Must be in a 'READY' state for statistics retrieval. */
@@ -1939,47 +1852,68 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
if (rval != QLA_SUCCESS)
goto done_free;
- pfc_host_stat->link_failure_count = stats->link_fail_cnt;
- pfc_host_stat->loss_of_sync_count = stats->loss_sync_cnt;
- pfc_host_stat->loss_of_signal_count = stats->loss_sig_cnt;
- pfc_host_stat->prim_seq_protocol_err_count = stats->prim_seq_err_cnt;
- pfc_host_stat->invalid_tx_word_count = stats->inval_xmit_word_cnt;
- pfc_host_stat->invalid_crc_count = stats->inval_crc_cnt;
+ p->link_failure_count = stats->link_fail_cnt;
+ p->loss_of_sync_count = stats->loss_sync_cnt;
+ p->loss_of_signal_count = stats->loss_sig_cnt;
+ p->prim_seq_protocol_err_count = stats->prim_seq_err_cnt;
+ p->invalid_tx_word_count = stats->inval_xmit_word_cnt;
+ p->invalid_crc_count = stats->inval_crc_cnt;
if (IS_FWI2_CAPABLE(ha)) {
- pfc_host_stat->lip_count = stats->lip_cnt;
- pfc_host_stat->tx_frames = stats->tx_frames;
- pfc_host_stat->rx_frames = stats->rx_frames;
- pfc_host_stat->dumped_frames = stats->discarded_frames;
- pfc_host_stat->nos_count = stats->nos_rcvd;
- pfc_host_stat->error_frames =
+ p->lip_count = stats->lip_cnt;
+ p->tx_frames = stats->tx_frames;
+ p->rx_frames = stats->rx_frames;
+ p->dumped_frames = stats->discarded_frames;
+ p->nos_count = stats->nos_rcvd;
+ p->error_frames =
stats->dropped_frames + stats->discarded_frames;
- pfc_host_stat->rx_words = vha->qla_stats.input_bytes;
- pfc_host_stat->tx_words = vha->qla_stats.output_bytes;
+ p->rx_words = vha->qla_stats.input_bytes;
+ p->tx_words = vha->qla_stats.output_bytes;
}
- pfc_host_stat->fcp_control_requests = vha->qla_stats.control_requests;
- pfc_host_stat->fcp_input_requests = vha->qla_stats.input_requests;
- pfc_host_stat->fcp_output_requests = vha->qla_stats.output_requests;
- pfc_host_stat->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20;
- pfc_host_stat->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20;
- pfc_host_stat->seconds_since_last_reset =
+ p->fcp_control_requests = vha->qla_stats.control_requests;
+ p->fcp_input_requests = vha->qla_stats.input_requests;
+ p->fcp_output_requests = vha->qla_stats.output_requests;
+ p->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20;
+ p->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20;
+ p->seconds_since_last_reset =
get_jiffies_64() - vha->qla_stats.jiffies_at_last_reset;
- do_div(pfc_host_stat->seconds_since_last_reset, HZ);
+ do_div(p->seconds_since_last_reset, HZ);
done_free:
dma_free_coherent(&ha->pdev->dev, sizeof(struct link_statistics),
stats, stats_dma);
done:
- return pfc_host_stat;
+ return p;
}
static void
qla2x00_reset_host_stats(struct Scsi_Host *shost)
{
scsi_qla_host_t *vha = shost_priv(shost);
+ struct qla_hw_data *ha = vha->hw;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ struct link_statistics *stats;
+ dma_addr_t stats_dma;
+ memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
+
+ if (IS_FWI2_CAPABLE(ha)) {
+ stats = dma_alloc_coherent(&ha->pdev->dev,
+ sizeof(*stats), &stats_dma, GFP_KERNEL);
+ if (!stats) {
+ ql_log(ql_log_warn, vha, 0x70d7,
+ "Failed to allocate memory for stats.\n");
+ return;
+ }
+
+ /* reset firmware statistics */
+ qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0);
+
+ dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
+ stats, stats_dma);
+ }
}
static void
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 392c147d5..643014f82 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -2246,53 +2246,94 @@ qla2x00_get_priv_stats(struct fc_bsg_job *bsg_job)
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
struct link_statistics *stats = NULL;
dma_addr_t stats_dma;
- int rval = QLA_FUNCTION_FAILED;
+ int rval;
+ uint32_t *cmd = bsg_job->request->rqst_data.h_vendor.vendor_cmd;
+ uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0;
if (test_bit(UNLOADING, &vha->dpc_flags))
- goto done;
+ return -ENODEV;
if (unlikely(pci_channel_offline(ha->pdev)))
- goto done;
+ return -ENODEV;
if (qla2x00_reset_active(vha))
- goto done;
+ return -EBUSY;
if (!IS_FWI2_CAPABLE(ha))
- goto done;
+ return -EPERM;
stats = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(struct link_statistics), &stats_dma, GFP_KERNEL);
+ sizeof(*stats), &stats_dma, GFP_KERNEL);
if (!stats) {
ql_log(ql_log_warn, vha, 0x70e2,
- "Failed to allocate memory for stats.\n");
- goto done;
+ "Failed to allocate memory for stats.\n");
+ return -ENOMEM;
}
- memset(stats, 0, sizeof(struct link_statistics));
+ memset(stats, 0, sizeof(*stats));
- rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma);
+ rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options);
- if (rval != QLA_SUCCESS)
- goto done_free;
+ if (rval == QLA_SUCCESS) {
+ ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e3,
+ (uint8_t *)stats, sizeof(*stats));
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats));
+ }
- ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e3,
- (uint8_t *)stats, sizeof(struct link_statistics));
+ bsg_job->reply->reply_payload_rcv_len = sizeof(*stats);
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
- sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
- bsg_job->reply_payload.sg_cnt, stats, sizeof(struct link_statistics));
- bsg_job->reply->reply_payload_rcv_len = sizeof(struct link_statistics);
+ bsg_job->reply_len = sizeof(*bsg_job->reply);
+ bsg_job->reply->result = DID_OK << 16;
+ bsg_job->job_done(bsg_job);
- bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
+ dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
+ stats, stats_dma);
- bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ return 0;
+}
+
+static int
+qla2x00_do_dport_diagnostics(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ int rval;
+ struct qla_dport_diag *dd;
+
+ if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
+ return -EPERM;
+
+ dd = kmalloc(sizeof(*dd), GFP_KERNEL);
+ if (!dd) {
+ ql_log(ql_log_warn, vha, 0x70db,
+ "Failed to allocate memory for dport.\n");
+ return -ENOMEM;
+ }
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, dd, sizeof(*dd));
+
+ rval = qla26xx_dport_diagnostics(
+ vha, dd->buf, sizeof(dd->buf), dd->options);
+ if (rval == QLA_SUCCESS) {
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
+ }
+
+ bsg_job->reply->reply_payload_rcv_len = sizeof(*dd);
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
+
+ bsg_job->reply_len = sizeof(*bsg_job->reply);
bsg_job->reply->result = DID_OK << 16;
bsg_job->job_done(bsg_job);
-done_free:
- dma_free_coherent(&ha->pdev->dev, sizeof(struct link_statistics),
- stats, stats_dma);
-done:
- return rval;
+ kfree(dd);
+
+ return 0;
}
static int
@@ -2360,8 +2401,12 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
return qla27xx_get_bbcr_data(bsg_job);
case QL_VND_GET_PRIV_STATS:
+ case QL_VND_GET_PRIV_STATS_EX:
return qla2x00_get_priv_stats(bsg_job);
+ case QL_VND_DPORT_DIAGNOSTICS:
+ return qla2x00_do_dport_diagnostics(bsg_job);
+
default:
return -ENOSYS;
}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
index c80192d45..d97dfd521 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -29,6 +29,8 @@
#define QL_VND_SET_FLASH_UPDATE_CAPS 0x16
#define QL_VND_GET_BBCR_DATA 0x17
#define QL_VND_GET_PRIV_STATS 0x18
+#define QL_VND_DPORT_DIAGNOSTICS 0x19
+#define QL_VND_GET_PRIV_STATS_EX 0x1A
/* BSG Vendor specific subcode returns */
#define EXT_STATUS_OK 0
@@ -266,4 +268,15 @@ struct qla_bbcr_data {
uint16_t mbx1; /* Port state */
uint8_t reserved[9];
} __packed;
+
+struct qla_dport_diag {
+ uint16_t options;
+ uint32_t buf[16];
+ uint8_t unused[62];
+} __packed;
+
+/* D_Port options */
+#define QLA_DPORT_RESULT 0x0
+#define QLA_DPORT_START 0x2
+
#endif
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index b64c504ff..45af34ddc 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,12 +11,11 @@
* ----------------------------------------------------------------------
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
- * | Module Init and Probe | 0x018f | 0x0146 |
+ * | Module Init and Probe | 0x0191 | 0x0146 |
* | | | 0x015b-0x0160 |
- * | | | 0x016e-0x0170 |
- * | Mailbox commands | 0x1192 | |
- * | | | |
- * | Device Discovery | 0x2016 | 0x2020-0x2022, |
+ * | | | 0x016e |
+ * | Mailbox commands | 0x1199 | 0x1193 |
+ * | Device Discovery | 0x2004 | 0x2016 |
* | | | 0x2011-0x2012, |
* | | | 0x2099-0x20a4 |
* | Queue Command and IO tracing | 0x3074 | 0x300b |
@@ -26,11 +25,11 @@
* | | | 0x3036,0x3038 |
* | | | 0x303a |
* | DPC Thread | 0x4023 | 0x4002,0x4013 |
- * | Async Events | 0x5089 | 0x502b-0x502f |
- * | | | 0x505e |
+ * | Async Events | 0x5090 | 0x502b-0x502f |
+ * | | | 0x5047 |
* | | | 0x5084,0x5075 |
* | | | 0x503d,0x5044 |
- * | | | 0x507b,0x505f |
+ * | | | 0x505f |
* | Timer Routines | 0x6012 | |
* | User Space Interactions | 0x70e3 | 0x7018,0x702e |
* | | | 0x7020,0x7024 |
@@ -39,9 +38,9 @@
* | | | 0x70a5-0x70a6 |
* | | | 0x70a8,0x70ab |
* | | | 0x70ad-0x70ae |
+ * | | | 0x70d0-0x70d6 |
* | | | 0x70d7-0x70db |
- * | | | 0x70de-0x70df |
- * | Task Management | 0x803d | 0x8000,0x800b |
+ * | Task Management | 0x8042 | 0x8000,0x800b |
* | | | 0x8019 |
* | | | 0x8025,0x8026 |
* | | | 0x8031,0x8032 |
@@ -2697,29 +2696,24 @@ ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id)
void
ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id,
- uint8_t *b, uint32_t size)
+ uint8_t *buf, uint size)
{
- uint32_t cnt;
- uint8_t c;
+ uint cnt;
if (!ql_mask_match(level))
return;
- ql_dbg(level, vha, id, " 0 1 2 3 4 5 6 7 8 "
- "9 Ah Bh Ch Dh Eh Fh\n");
- ql_dbg(level, vha, id, "----------------------------------"
- "----------------------------\n");
-
- ql_dbg(level, vha, id, " ");
- for (cnt = 0; cnt < size;) {
- c = *b++;
- printk("%02x", (uint32_t) c);
- cnt++;
- if (!(cnt % 16))
+ ql_dbg(level, vha, id,
+ "%-+5d 0 1 2 3 4 5 6 7 8 9 A B C D E F\n", size);
+ ql_dbg(level, vha, id,
+ "----- -----------------------------------------------\n");
+ for (cnt = 0; cnt < size; cnt++, buf++) {
+ if (cnt % 16 == 0)
+ ql_dbg(level, vha, id, "%04x:", cnt & ~0xFU);
+ printk(" %02x", *buf);
+ if (cnt % 16 == 15)
printk("\n");
- else
- printk(" ");
}
- if (cnt % 16)
- ql_dbg(level, vha, id, "\n");
+ if (cnt % 16 != 0)
+ printk("\n");
}
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 47f8b9b49..ae4a74756 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -829,6 +829,7 @@ struct mbx_cmd_32 {
#define MBA_FW_RESTART_CMPLT 0x8060 /* Firmware restart complete */
#define MBA_INIT_REQUIRED 0x8061 /* Initialization required */
#define MBA_SHUTDOWN_REQUESTED 0x8062 /* Shutdown Requested */
+#define MBA_TEMPERATURE_ALERT 0x8070 /* Temperature Alert */
#define MBA_DPORT_DIAGNOSTICS 0x8080 /* D-port Diagnostics */
#define MBA_FW_INIT_FAILURE 0x8401 /* Firmware initialization failure */
#define MBA_MIRROR_LUN_CHANGE 0x8402 /* Mirror LUN State Change
@@ -3028,6 +3029,7 @@ struct qla_hw_data {
uint32_t mr_reset_hdlr_active:1;
uint32_t mr_intr_valid:1;
+ uint32_t dport_enabled:1;
uint32_t fawwpn_enabled:1;
uint32_t exlogins_enabled:1;
uint32_t exchoffld_enabled:1;
@@ -3128,7 +3130,7 @@ struct qla_hw_data {
#define PCI_DEVICE_ID_QLOGIC_ISP2271 0x2271
#define PCI_DEVICE_ID_QLOGIC_ISP2261 0x2261
- uint32_t device_type;
+ uint32_t isp_type;
#define DT_ISP2100 BIT_0
#define DT_ISP2200 BIT_1
#define DT_ISP2300 BIT_2
@@ -3153,6 +3155,7 @@ struct qla_hw_data {
#define DT_ISP2261 BIT_21
#define DT_ISP_LAST (DT_ISP2261 << 1)
+ uint32_t device_type;
#define DT_T10_PI BIT_25
#define DT_IIDMA BIT_26
#define DT_FWI2 BIT_27
@@ -3160,7 +3163,8 @@ struct qla_hw_data {
#define DT_OEM_001 BIT_29
#define DT_ISP2200A BIT_30
#define DT_EXTENDED_IDS BIT_31
-#define DT_MASK(ha) ((ha)->device_type & (DT_ISP_LAST - 1))
+
+#define DT_MASK(ha) ((ha)->isp_type & (DT_ISP_LAST - 1))
#define IS_QLA2100(ha) (DT_MASK(ha) & DT_ISP2100)
#define IS_QLA2200(ha) (DT_MASK(ha) & DT_ISP2200)
#define IS_QLA2300(ha) (DT_MASK(ha) & DT_ISP2300)
@@ -3370,6 +3374,8 @@ struct qla_hw_data {
uint32_t fw_shared_ram_start;
uint32_t fw_shared_ram_end;
+ uint32_t fw_ddr_ram_start;
+ uint32_t fw_ddr_ram_end;
uint16_t fw_options[16]; /* slots: 1,2,3,10,11 */
uint8_t fw_seriallink_options[4];
@@ -3505,7 +3511,6 @@ struct qla_hw_data {
int cur_vport_count;
struct qla_chip_state_84xx *cs84xx;
- struct qla_statistics qla_stats;
struct isp_operations *isp_ops;
struct workqueue_struct *wq;
struct qlfc_fw fw_buf;
@@ -3656,6 +3661,7 @@ typedef struct scsi_qla_host {
#define PFLG_DISCONNECTED 0 /* PCI device removed */
#define PFLG_DRIVER_REMOVING 1 /* PCI driver .remove */
#define PFLG_DRIVER_PROBING 2 /* PCI driver .probe */
+#define PCI_ERR 30
uint32_t device_flags;
#define SWITCH_FOUND BIT_0
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 4c0f3a774..8a2368b32 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1288,7 +1288,7 @@ struct vp_rpt_id_entry_24xx {
uint8_t vp_idx_map[16];
- uint8_t reserved_4[28];
+ uint8_t reserved_4[24];
uint16_t bbcr;
uint8_t reserved_5[6];
};
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index fe943772f..6ca00813c 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -344,7 +344,7 @@ qla2x00_get_link_status(scsi_qla_host_t *, uint16_t, struct link_statistics *,
extern int
qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *,
- dma_addr_t);
+ dma_addr_t, uint);
extern int qla24xx_abort_command(srb_t *);
extern int qla24xx_async_abort_command(srb_t *);
@@ -445,6 +445,9 @@ qla2x00_port_logout(scsi_qla_host_t *, struct fc_port *);
extern int
qla2x00_dump_mctp_data(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t);
+extern int
+qla26xx_dport_diagnostics(scsi_qla_host_t *, void *, uint, uint);
+
/*
* Global Function Prototypes in qla_isr.c source file.
*/
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index c56cdb35f..5b09296b4 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -624,6 +624,9 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
+ memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
+ memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
+
/* Clear adapter flags. */
vha->flags.online = 0;
ha->flags.chip_reset_done = 0;
@@ -2053,6 +2056,14 @@ qla2x00_update_fw_options(scsi_qla_host_t *vha)
if (IS_QLA6312(ha))
ha->fw_options[2] |= BIT_13;
+ /* Set Retry FLOGI in case of P2P connection */
+ if (ha->operating_mode == P2P) {
+ ha->fw_options[2] |= BIT_3;
+ ql_dbg(ql_dbg_disc, vha, 0x2100,
+ "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
+ __func__, ha->fw_options[2]);
+ }
+
/* Update firmware options. */
qla2x00_set_fw_options(vha, ha->fw_options);
}
@@ -2070,6 +2081,14 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
if (ql2xfwholdabts)
ha->fw_options[3] |= BIT_12;
+ /* Set Retry FLOGI in case of P2P connection */
+ if (ha->operating_mode == P2P) {
+ ha->fw_options[2] |= BIT_3;
+ ql_dbg(ql_dbg_disc, vha, 0x2101,
+ "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
+ __func__, ha->fw_options[2]);
+ }
+
/* Update Serial Link options. */
if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
return;
@@ -2269,13 +2288,13 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
mid_init_cb->options = cpu_to_le16(BIT_1);
mid_init_cb->init_cb.execution_throttle =
cpu_to_le16(ha->cur_fw_xcb_count);
- /* D-Port Status */
- if (IS_DPORT_CAPABLE(ha))
- mid_init_cb->init_cb.firmware_options_1 |=
- cpu_to_le16(BIT_7);
- /* Enable FA-WWPN */
+ ha->flags.dport_enabled =
+ (mid_init_cb->init_cb.firmware_options_1 & BIT_7) != 0;
+ ql_dbg(ql_dbg_init, vha, 0x0191, "DPORT Support: %s.\n",
+ (ha->flags.dport_enabled) ? "enabled" : "disabled");
+ /* FA-WWPN Status */
ha->flags.fawwpn_enabled =
- (mid_init_cb->init_cb.firmware_options_1 & BIT_6) ? 1 : 0;
+ (mid_init_cb->init_cb.firmware_options_1 & BIT_6) != 0;
ql_dbg(ql_dbg_init, vha, 0x0141, "FA-WWPN Support: %s.\n",
(ha->flags.fawwpn_enabled) ? "enabled" : "disabled");
}
@@ -6513,6 +6532,14 @@ qla81xx_update_fw_options(scsi_qla_host_t *vha)
if (ql2xfwholdabts)
ha->fw_options[3] |= BIT_12;
+ /* Set Retry FLOGI in case of P2P connection */
+ if (ha->operating_mode == P2P) {
+ ha->fw_options[2] |= BIT_3;
+ ql_dbg(ql_dbg_disc, vha, 0x2103,
+ "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
+ __func__, ha->fw_options[2]);
+ }
+
if (!ql2xetsenable)
goto out;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index a92a62dea..987f1c729 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -710,16 +710,23 @@ skip_rio:
case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
ql_log(ql_log_warn, vha, 0x5007,
- "ISP Response Transfer Error.\n");
+ "ISP Response Transfer Error (%x).\n", mb[1]);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break;
case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
ql_dbg(ql_dbg_async, vha, 0x5008,
- "Asynchronous WAKEUP_THRES.\n");
+ "Asynchronous WAKEUP_THRES (%x).\n", mb[1]);
+ break;
+ case MBA_LOOP_INIT_ERR:
+ ql_log(ql_log_warn, vha, 0x5090,
+ "LOOP INIT ERROR (%x).\n", mb[1]);
+ ha->isp_ops->fw_dump(vha, 1);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break;
+
case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
ql_dbg(ql_dbg_async, vha, 0x5009,
"LIP occurred (%x).\n", mb[1]);
@@ -1152,12 +1159,20 @@ global_port_update:
case MBA_DPORT_DIAGNOSTICS:
ql_dbg(ql_dbg_async, vha, 0x5052,
- "D-Port Diagnostics: %04x %04x=%s\n", mb[0], mb[1],
+ "D-Port Diagnostics: %04x result=%s\n",
+ mb[0],
mb[1] == 0 ? "start" :
- mb[1] == 1 ? "done (ok)" :
+ mb[1] == 1 ? "done (pass)" :
mb[1] == 2 ? "done (error)" : "other");
break;
+ case MBA_TEMPERATURE_ALERT:
+ ql_dbg(ql_dbg_async, vha, 0x505e,
+ "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]);
+ if (mb[1] == 0x12)
+ schedule_work(&ha->board_disable);
+ break;
+
default:
ql_dbg(ql_dbg_async, vha, 0x5057,
"Unknown AEN:%04x %04x %04x %04x\n",
@@ -3086,6 +3101,8 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
/* Enable MSI-X vectors for the base queue */
for (i = 0; i < 2; i++) {
qentry = &ha->msix_entries[i];
+ qentry->rsp = rsp;
+ rsp->msix = qentry;
if (IS_P3P_TYPE(ha))
ret = request_irq(qentry->vector,
qla82xx_msix_entries[i].handler,
@@ -3097,8 +3114,6 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
if (ret)
goto msix_register_fail;
qentry->have_irq = 1;
- qentry->rsp = rsp;
- rsp->msix = qentry;
/* Register for CPU affinity notification. */
irq_set_affinity_notifier(qentry->vector, &qentry->irq_notify);
@@ -3119,12 +3134,12 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
*/
if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
qentry = &ha->msix_entries[ATIO_VECTOR];
+ qentry->rsp = rsp;
+ rsp->msix = qentry;
ret = request_irq(qentry->vector,
qla83xx_msix_entries[ATIO_VECTOR].handler,
0, qla83xx_msix_entries[ATIO_VECTOR].name, rsp);
qentry->have_irq = 1;
- qentry->rsp = rsp;
- rsp->msix = qentry;
}
msix_register_fail:
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 968b84613..23698c998 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -64,6 +64,13 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
return QLA_FUNCTION_TIMEOUT;
}
+ /* if PCI error, then avoid mbx processing.*/
+ if (test_bit(PCI_ERR, &base_vha->dpc_flags)) {
+ ql_log(ql_log_warn, vha, 0x1191,
+ "PCI error, exiting.\n");
+ return QLA_FUNCTION_TIMEOUT;
+ }
+
reg = ha->iobase;
io_lock_on = base_vha->flags.init_done;
@@ -266,6 +273,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
uint16_t mb0;
uint32_t ictrl;
+ uint16_t w;
if (IS_FWI2_CAPABLE(ha)) {
mb0 = RD_REG_WORD(&reg->isp24.mailbox0);
@@ -279,15 +287,32 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
"mb[0]=0x%x\n", command, ictrl, jiffies, mb0);
ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
- /*
- * Attempt to capture a firmware dump for further analysis
- * of the current firmware state. We do not need to do this
- * if we are intentionally generating a dump.
- */
- if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR)
- ha->isp_ops->fw_dump(vha, 0);
+ /* Capture FW dump only, if PCI device active */
+ if (!pci_channel_offline(vha->hw->pdev)) {
+ pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
+ if (w == 0xffff || ictrl == 0xffffffff) {
+ /* This is special case if there is unload
+ * of driver happening and if PCI device go
+ * into bad state due to PCI error condition
+ * then only PCI ERR flag would be set.
+ * we will do premature exit for above case.
+ */
+ if (test_bit(UNLOADING, &base_vha->dpc_flags))
+ set_bit(PCI_ERR, &base_vha->dpc_flags);
+ ha->flags.mbox_busy = 0;
+ rval = QLA_FUNCTION_TIMEOUT;
+ goto premature_exit;
+ }
- rval = QLA_FUNCTION_TIMEOUT;
+ /* Attempt to capture firmware dump for further
+ * anallysis of the current formware state. we do not
+ * need to do this if we are intentionally generating
+ * a dump
+ */
+ if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR)
+ ha->isp_ops->fw_dump(vha, 0);
+ rval = QLA_FUNCTION_TIMEOUT;
+ }
}
ha->flags.mbox_busy = 0;
@@ -379,7 +404,7 @@ mbx_done:
"**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n",
mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command);
- ql_dbg(ql_dbg_disc, vha, 0x1115,
+ ql_dbg(ql_dbg_mbx, vha, 0x1198,
"host status: 0x%x, flags:0x%lx, intr ctrl reg:0x%x, intr status:0x%x\n",
RD_REG_DWORD(&reg->isp24.host_status),
ha->fw_dump_cap_flags,
@@ -388,7 +413,7 @@ mbx_done:
mbx_reg = &reg->isp24.mailbox0;
for (i = 0; i < 6; i++)
- ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x1116,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1199,
"mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++));
} else {
ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
@@ -782,8 +807,9 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
if (IS_FWI2_CAPABLE(ha))
mcp->in_mb |= MBX_17|MBX_16|MBX_15;
if (IS_QLA27XX(ha))
- mcp->in_mb |= MBX_23 | MBX_22 | MBX_21 | MBX_20 | MBX_19 |
- MBX_18 | MBX_14 | MBX_13 | MBX_11 | MBX_10 | MBX_9 | MBX_8;
+ mcp->in_mb |=
+ MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18|
+ MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8;
mcp->flags = 0;
mcp->tov = MBX_TOV_SECONDS;
@@ -842,6 +868,8 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
ha->pep_version[2] = mcp->mb[14] & 0xff;
ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18];
ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
+ ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22];
+ ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24];
}
failed:
@@ -1844,7 +1872,7 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
states[0] = mcp->mb[1];
if (IS_FWI2_CAPABLE(vha->hw)) {
states[1] = mcp->mb[2];
- states[2] = mcp->mb[3];
+ states[2] = mcp->mb[3]; /* SFP info */
states[3] = mcp->mb[4];
states[4] = mcp->mb[5];
states[5] = mcp->mb[6]; /* DPORT status */
@@ -2759,15 +2787,16 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- uint32_t *iter, dwords;
+ uint32_t *iter = (void *)stats;
+ ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter);
struct qla_hw_data *ha = vha->hw;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
"Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_LINK_STATUS;
- mcp->mb[2] = MSW(stats_dma);
- mcp->mb[3] = LSW(stats_dma);
+ mcp->mb[2] = MSW(LSD(stats_dma));
+ mcp->mb[3] = LSW(LSD(stats_dma));
mcp->mb[6] = MSW(MSD(stats_dma));
mcp->mb[7] = LSW(MSD(stats_dma));
mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
@@ -2796,12 +2825,9 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
rval = QLA_FUNCTION_FAILED;
} else {
- /* Copy over data -- firmware data is LE. */
+ /* Re-endianize - firmware data is le32. */
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
"Done %s.\n", __func__);
- dwords = offsetof(struct link_statistics,
- link_up_cnt) / 4;
- iter = &stats->link_fail_cnt;
for ( ; dwords--; iter++)
le32_to_cpus(iter);
}
@@ -2815,7 +2841,7 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
int
qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
- dma_addr_t stats_dma)
+ dma_addr_t stats_dma, uint options)
{
int rval;
mbx_cmd_t mc;
@@ -2832,7 +2858,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
mcp->mb[7] = LSW(MSD(stats_dma));
mcp->mb[8] = sizeof(struct link_statistics) / 4;
mcp->mb[9] = vha->vp_idx;
- mcp->mb[10] = 0;
+ mcp->mb[10] = options;
mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
mcp->in_mb = MBX_2|MBX_1|MBX_0;
mcp->tov = MBX_TOV_SECONDS;
@@ -2847,7 +2873,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
} else {
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
"Done %s.\n", __func__);
- /* Copy over data -- firmware data is LE. */
+ /* Re-endianize - firmware data is le32. */
dwords = sizeof(struct link_statistics) / 4;
iter = &stats->link_fail_cnt;
for ( ; dwords--; iter++)
@@ -5722,3 +5748,54 @@ qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
return rval;
}
+
+int
+qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
+ void *dd_buf, uint size, uint options)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ dma_addr_t dd_dma;
+
+ if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192,
+ "Entered %s.\n", __func__);
+
+ dd_dma = dma_map_single(&vha->hw->pdev->dev,
+ dd_buf, size, DMA_FROM_DEVICE);
+ if (!dd_dma) {
+ ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n");
+ return QLA_MEMORY_ALLOC_FAILED;
+ }
+
+ memset(dd_buf, 0, size);
+
+ mcp->mb[0] = MBC_DPORT_DIAGNOSTICS;
+ mcp->mb[1] = options;
+ mcp->mb[2] = MSW(LSD(dd_dma));
+ mcp->mb[3] = LSW(LSD(dd_dma));
+ mcp->mb[6] = MSW(MSD(dd_dma));
+ mcp->mb[7] = LSW(MSD(dd_dma));
+ mcp->mb[8] = size;
+ mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->buf_size = size;
+ mcp->flags = MBX_DMA_IN;
+ mcp->tov = MBX_TOV_SECONDS * 4;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196,
+ "Done %s.\n", __func__);
+ }
+
+ dma_unmap_single(&vha->hw->pdev->dev, dd_dma,
+ size, DMA_FROM_DEVICE);
+
+ return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 59c477883..6201dce35 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -1183,7 +1183,6 @@ static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC,
#define CRB_NIU_XG_PAUSE_CTL_P1 0x8
#define qla82xx_get_temp_val(x) ((x) >> 16)
-#define qla82xx_get_temp_val1(x) ((x) && 0x0000FFFF)
#define qla82xx_get_temp_state(x) ((x) & 0xffff)
#define qla82xx_encode_temp(val, state) (((val) << 16) | (state))
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index e007785db..a44c7c9bd 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -80,6 +80,7 @@ MODULE_PARM_DESC(ql2xallocfwdump,
int ql2xextended_error_logging;
module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
+module_param_named(logging, ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xextended_error_logging,
"Option to enable extended error logging,\n"
"\t\tDefault is 0 - no logging. 0x40000000 - Module Init & Probe.\n"
@@ -106,6 +107,7 @@ MODULE_PARM_DESC(ql2xshiftctondsd,
int ql2xfdmienable=1;
module_param(ql2xfdmienable, int, S_IRUGO|S_IWUSR);
+module_param_named(fdmi, ql2xfdmienable, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xfdmienable,
"Enables FDMI registrations. "
"0 - no FDMI. Default is 1 - perform FDMI.");
@@ -157,6 +159,7 @@ MODULE_PARM_DESC(ql2xmultique_tag,
int ql2xfwloadbin;
module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR);
+module_param_named(fwload, ql2xfwloadbin, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xfwloadbin,
"Option to specify location from which to load ISP firmware:.\n"
" 2 -- load firmware via the reject_firmware() (hotplug).\n"
@@ -894,12 +897,16 @@ static void
qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
while (((qla2x00_reset_active(vha)) || ha->dpc_active ||
ha->flags.mbox_busy) ||
test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags) ||
- test_bit(FX00_TARGET_SCAN, &vha->dpc_flags))
+ test_bit(FX00_TARGET_SCAN, &vha->dpc_flags)) {
+ if (test_bit(UNLOADING, &base_vha->dpc_flags))
+ break;
msleep(1000);
+ }
}
int
@@ -936,6 +943,30 @@ sp_get(struct srb *sp)
atomic_inc(&sp->ref_count);
}
+#define ISP_REG_DISCONNECT 0xffffffffU
+/**************************************************************************
+* qla2x00_isp_reg_stat
+*
+* Description:
+* Read the host status register of ISP before aborting the command.
+*
+* Input:
+* ha = pointer to host adapter structure.
+*
+*
+* Returns:
+* Either true or false.
+*
+* Note: Return true if there is register disconnect.
+**************************************************************************/
+static inline
+uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha)
+{
+ struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+
+ return ((RD_REG_DWORD(&reg->host_status)) == ISP_REG_DISCONNECT);
+}
+
/**************************************************************************
* qla2xxx_eh_abort
*
@@ -963,6 +994,11 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
int rval, wait = 0;
struct qla_hw_data *ha = vha->hw;
+ if (qla2x00_isp_reg_stat(ha)) {
+ ql_log(ql_log_info, vha, 0x8042,
+ "PCI/Register disconnect, exiting.\n");
+ return FAILED;
+ }
if (!CMD_SP(cmd))
return SUCCESS;
@@ -1146,6 +1182,12 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
struct qla_hw_data *ha = vha->hw;
+ if (qla2x00_isp_reg_stat(ha)) {
+ ql_log(ql_log_info, vha, 0x803e,
+ "PCI/Register disconnect, exiting.\n");
+ return FAILED;
+ }
+
return __qla2xxx_eh_generic_reset("DEVICE", WAIT_LUN, cmd,
ha->isp_ops->lun_reset);
}
@@ -1156,6 +1198,12 @@ qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
struct qla_hw_data *ha = vha->hw;
+ if (qla2x00_isp_reg_stat(ha)) {
+ ql_log(ql_log_info, vha, 0x803f,
+ "PCI/Register disconnect, exiting.\n");
+ return FAILED;
+ }
+
return __qla2xxx_eh_generic_reset("TARGET", WAIT_TARGET, cmd,
ha->isp_ops->target_reset);
}
@@ -1183,6 +1231,13 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
int ret = FAILED;
unsigned int id;
uint64_t lun;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (qla2x00_isp_reg_stat(ha)) {
+ ql_log(ql_log_info, vha, 0x8040,
+ "PCI/Register disconnect, exiting.\n");
+ return FAILED;
+ }
id = cmd->device->id;
lun = cmd->device->lun;
@@ -1252,6 +1307,13 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
uint64_t lun;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+ if (qla2x00_isp_reg_stat(ha)) {
+ ql_log(ql_log_info, vha, 0x8041,
+ "PCI/Register disconnect, exiting.\n");
+ schedule_work(&ha->board_disable);
+ return SUCCESS;
+ }
+
id = cmd->device->id;
lun = cmd->device->lun;
@@ -2103,27 +2165,27 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
ha->device_type = DT_EXTENDED_IDS;
switch (ha->pdev->device) {
case PCI_DEVICE_ID_QLOGIC_ISP2100:
- ha->device_type |= DT_ISP2100;
+ ha->isp_type |= DT_ISP2100;
ha->device_type &= ~DT_EXTENDED_IDS;
ha->fw_srisc_address = RISC_START_ADDRESS_2100;
break;
case PCI_DEVICE_ID_QLOGIC_ISP2200:
- ha->device_type |= DT_ISP2200;
+ ha->isp_type |= DT_ISP2200;
ha->device_type &= ~DT_EXTENDED_IDS;
ha->fw_srisc_address = RISC_START_ADDRESS_2100;
break;
case PCI_DEVICE_ID_QLOGIC_ISP2300:
- ha->device_type |= DT_ISP2300;
+ ha->isp_type |= DT_ISP2300;
ha->device_type |= DT_ZIO_SUPPORTED;
ha->fw_srisc_address = RISC_START_ADDRESS_2300;
break;
case PCI_DEVICE_ID_QLOGIC_ISP2312:
- ha->device_type |= DT_ISP2312;
+ ha->isp_type |= DT_ISP2312;
ha->device_type |= DT_ZIO_SUPPORTED;
ha->fw_srisc_address = RISC_START_ADDRESS_2300;
break;
case PCI_DEVICE_ID_QLOGIC_ISP2322:
- ha->device_type |= DT_ISP2322;
+ ha->isp_type |= DT_ISP2322;
ha->device_type |= DT_ZIO_SUPPORTED;
if (ha->pdev->subsystem_vendor == 0x1028 &&
ha->pdev->subsystem_device == 0x0170)
@@ -2131,60 +2193,60 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
ha->fw_srisc_address = RISC_START_ADDRESS_2300;
break;
case PCI_DEVICE_ID_QLOGIC_ISP6312:
- ha->device_type |= DT_ISP6312;
+ ha->isp_type |= DT_ISP6312;
ha->fw_srisc_address = RISC_START_ADDRESS_2300;
break;
case PCI_DEVICE_ID_QLOGIC_ISP6322:
- ha->device_type |= DT_ISP6322;
+ ha->isp_type |= DT_ISP6322;
ha->fw_srisc_address = RISC_START_ADDRESS_2300;
break;
case PCI_DEVICE_ID_QLOGIC_ISP2422:
- ha->device_type |= DT_ISP2422;
+ ha->isp_type |= DT_ISP2422;
ha->device_type |= DT_ZIO_SUPPORTED;
ha->device_type |= DT_FWI2;
ha->device_type |= DT_IIDMA;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
case PCI_DEVICE_ID_QLOGIC_ISP2432:
- ha->device_type |= DT_ISP2432;
+ ha->isp_type |= DT_ISP2432;
ha->device_type |= DT_ZIO_SUPPORTED;
ha->device_type |= DT_FWI2;
ha->device_type |= DT_IIDMA;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
case PCI_DEVICE_ID_QLOGIC_ISP8432:
- ha->device_type |= DT_ISP8432;
+ ha->isp_type |= DT_ISP8432;
ha->device_type |= DT_ZIO_SUPPORTED;
ha->device_type |= DT_FWI2;
ha->device_type |= DT_IIDMA;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
case PCI_DEVICE_ID_QLOGIC_ISP5422:
- ha->device_type |= DT_ISP5422;
+ ha->isp_type |= DT_ISP5422;
ha->device_type |= DT_FWI2;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
case PCI_DEVICE_ID_QLOGIC_ISP5432:
- ha->device_type |= DT_ISP5432;
+ ha->isp_type |= DT_ISP5432;
ha->device_type |= DT_FWI2;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
case PCI_DEVICE_ID_QLOGIC_ISP2532:
- ha->device_type |= DT_ISP2532;
+ ha->isp_type |= DT_ISP2532;
ha->device_type |= DT_ZIO_SUPPORTED;
ha->device_type |= DT_FWI2;
ha->device_type |= DT_IIDMA;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
case PCI_DEVICE_ID_QLOGIC_ISP8001:
- ha->device_type |= DT_ISP8001;
+ ha->isp_type |= DT_ISP8001;
ha->device_type |= DT_ZIO_SUPPORTED;
ha->device_type |= DT_FWI2;
ha->device_type |= DT_IIDMA;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
case PCI_DEVICE_ID_QLOGIC_ISP8021:
- ha->device_type |= DT_ISP8021;
+ ha->isp_type |= DT_ISP8021;
ha->device_type |= DT_ZIO_SUPPORTED;
ha->device_type |= DT_FWI2;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
@@ -2192,7 +2254,7 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
qla82xx_init_flags(ha);
break;
case PCI_DEVICE_ID_QLOGIC_ISP8044:
- ha->device_type |= DT_ISP8044;
+ ha->isp_type |= DT_ISP8044;
ha->device_type |= DT_ZIO_SUPPORTED;
ha->device_type |= DT_FWI2;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
@@ -2200,7 +2262,7 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
qla82xx_init_flags(ha);
break;
case PCI_DEVICE_ID_QLOGIC_ISP2031:
- ha->device_type |= DT_ISP2031;
+ ha->isp_type |= DT_ISP2031;
ha->device_type |= DT_ZIO_SUPPORTED;
ha->device_type |= DT_FWI2;
ha->device_type |= DT_IIDMA;
@@ -2208,7 +2270,7 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
case PCI_DEVICE_ID_QLOGIC_ISP8031:
- ha->device_type |= DT_ISP8031;
+ ha->isp_type |= DT_ISP8031;
ha->device_type |= DT_ZIO_SUPPORTED;
ha->device_type |= DT_FWI2;
ha->device_type |= DT_IIDMA;
@@ -2216,10 +2278,10 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
case PCI_DEVICE_ID_QLOGIC_ISPF001:
- ha->device_type |= DT_ISPFX00;
+ ha->isp_type |= DT_ISPFX00;
break;
case PCI_DEVICE_ID_QLOGIC_ISP2071:
- ha->device_type |= DT_ISP2071;
+ ha->isp_type |= DT_ISP2071;
ha->device_type |= DT_ZIO_SUPPORTED;
ha->device_type |= DT_FWI2;
ha->device_type |= DT_IIDMA;
@@ -2227,7 +2289,7 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
case PCI_DEVICE_ID_QLOGIC_ISP2271:
- ha->device_type |= DT_ISP2271;
+ ha->isp_type |= DT_ISP2271;
ha->device_type |= DT_ZIO_SUPPORTED;
ha->device_type |= DT_FWI2;
ha->device_type |= DT_IIDMA;
@@ -2235,7 +2297,7 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
case PCI_DEVICE_ID_QLOGIC_ISP2261:
- ha->device_type |= DT_ISP2261;
+ ha->isp_type |= DT_ISP2261;
ha->device_type |= DT_ZIO_SUPPORTED;
ha->device_type |= DT_FWI2;
ha->device_type |= DT_IIDMA;
@@ -2901,6 +2963,10 @@ skip_dpc:
qlt_add_target(ha, base_vha);
clear_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags);
+
+ if (test_bit(UNLOADING, &base_vha->dpc_flags))
+ return -ENODEV;
+
return 0;
probe_init_failed:
@@ -3128,6 +3194,12 @@ qla2x00_remove_one(struct pci_dev *pdev)
qla2x00_wait_for_hba_ready(base_vha);
+ /* if UNLOAD flag is already set, then continue unload,
+ * where it was set first.
+ */
+ if (test_bit(UNLOADING, &base_vha->dpc_flags))
+ return;
+
set_bit(UNLOADING, &base_vha->dpc_flags);
if (IS_QLAFX00(ha))
@@ -4907,6 +4979,12 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work)
struct pci_dev *pdev = ha->pdev;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+ /* if UNLOAD flag is already set, then continue unload,
+ * where it was set first.
+ */
+ if (test_bit(UNLOADING, &base_vha->dpc_flags))
+ return;
+
ql_log(ql_log_warn, base_vha, 0x015b,
"Disabling adapter.\n");
@@ -5002,6 +5080,9 @@ qla2x00_do_dpc(void *data)
"DPC handler waking up, dpc_flags=0x%lx.\n",
base_vha->dpc_flags);
+ if (test_bit(UNLOADING, &base_vha->dpc_flags))
+ break;
+
qla2x00_do_work(base_vha);
if (IS_P3P_TYPE(ha)) {
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index ca39deb4f..bff9689f5 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -215,8 +215,8 @@ static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha)
spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
vha->hw->tgt.num_pend_cmds++;
- if (vha->hw->tgt.num_pend_cmds > vha->hw->qla_stats.stat_max_pend_cmds)
- vha->hw->qla_stats.stat_max_pend_cmds =
+ if (vha->hw->tgt.num_pend_cmds > vha->qla_stats.stat_max_pend_cmds)
+ vha->qla_stats.stat_max_pend_cmds =
vha->hw->tgt.num_pend_cmds;
spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
}
@@ -5231,8 +5231,8 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) {
vha->hw->tgt.num_qfull_cmds_dropped++;
if (vha->hw->tgt.num_qfull_cmds_dropped >
- vha->hw->qla_stats.stat_max_qfull_cmds_dropped)
- vha->hw->qla_stats.stat_max_qfull_cmds_dropped =
+ vha->qla_stats.stat_max_qfull_cmds_dropped)
+ vha->qla_stats.stat_max_qfull_cmds_dropped =
vha->hw->tgt.num_qfull_cmds_dropped;
ql_dbg(ql_dbg_io, vha, 0x3068,
@@ -5263,8 +5263,8 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
vha->hw->tgt.num_qfull_cmds_dropped++;
if (vha->hw->tgt.num_qfull_cmds_dropped >
- vha->hw->qla_stats.stat_max_qfull_cmds_dropped)
- vha->hw->qla_stats.stat_max_qfull_cmds_dropped =
+ vha->qla_stats.stat_max_qfull_cmds_dropped)
+ vha->qla_stats.stat_max_qfull_cmds_dropped =
vha->hw->tgt.num_qfull_cmds_dropped;
qlt_chk_exch_leak_thresh_hold(vha);
@@ -5293,8 +5293,8 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
vha->hw->tgt.num_qfull_cmds_alloc++;
if (vha->hw->tgt.num_qfull_cmds_alloc >
- vha->hw->qla_stats.stat_max_qfull_cmds_alloc)
- vha->hw->qla_stats.stat_max_qfull_cmds_alloc =
+ vha->qla_stats.stat_max_qfull_cmds_alloc)
+ vha->qla_stats.stat_max_qfull_cmds_alloc =
vha->hw->tgt.num_qfull_cmds_alloc;
}
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index c3e622524..36935c9ed 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -357,6 +357,13 @@ qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
ent->t262.start_addr = start;
ent->t262.end_addr = end;
}
+ } else if (ent->t262.ram_area == T262_RAM_AREA_DDR_RAM) {
+ start = vha->hw->fw_ddr_ram_start;
+ end = vha->hw->fw_ddr_ram_end;
+ if (buf) {
+ ent->t262.start_addr = start;
+ ent->t262.end_addr = end;
+ }
} else {
ql_dbg(ql_dbg_misc, vha, 0xd022,
"%s: unknown area %x\n", __func__, ent->t262.ram_area);
@@ -364,7 +371,7 @@ qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
goto done;
}
- if (end < start || end == 0) {
+ if (end <= start || start == 0 || end == 0) {
ql_dbg(ql_dbg_misc, vha, 0xd023,
"%s: unusable range (start=%x end=%x)\n", __func__,
ent->t262.end_addr, ent->t262.start_addr);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 0bc93fa46..3cb1964b7 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.07.00.33-k"
+#define QLA2XXX_VERSION "8.07.00.38-k"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 7
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 0f9ba41e2..6a219a084 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -890,7 +890,7 @@ static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
return 0;
}
-/* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
+/* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
int arr_len)
{
@@ -909,7 +909,35 @@ static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
return 0;
}
-/* Returns number of bytes fetched into 'arr' or -1 if error. */
+/* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
+ * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
+ * calls, not required to write in ascending offset order. Assumes resid
+ * set to scsi_bufflen() prior to any calls.
+ */
+static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
+ int arr_len, unsigned int off_dst)
+{
+ int act_len, n;
+ struct scsi_data_buffer *sdb = scsi_in(scp);
+ off_t skip = off_dst;
+
+ if (sdb->length <= off_dst)
+ return 0;
+ if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
+ return DID_ERROR << 16;
+
+ act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
+ arr, arr_len, skip);
+ pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
+ __func__, off_dst, scsi_bufflen(scp), act_len, sdb->resid);
+ n = (int)scsi_bufflen(scp) - ((int)off_dst + act_len);
+ sdb->resid = min(sdb->resid, n);
+ return 0;
+}
+
+/* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
+ * 'arr' or -1 if error.
+ */
static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
int arr_len)
{
@@ -3269,6 +3297,8 @@ static int resp_get_lba_status(struct scsi_cmnd *scp,
return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
}
+#define RL_BUCKET_ELEMS 8
+
/* Even though each pseudo target has a REPORT LUNS "well known logical unit"
* (W-LUN), the normal Linux scanning logic does not associate it with a
* device (e.g. /dev/sg7). The following magic will make that association:
@@ -3285,12 +3315,14 @@ static int resp_report_luns(struct scsi_cmnd *scp,
unsigned char select_report;
u64 lun;
struct scsi_lun *lun_p;
- u8 *arr;
+ u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
unsigned int lun_cnt; /* normal LUN count (max: 256) */
unsigned int wlun_cnt; /* report luns W-LUN count */
unsigned int tlun_cnt; /* total LUN count */
unsigned int rlen; /* response length (in bytes) */
- int i, res;
+ int k, j, n, res;
+ unsigned int off_rsp = 0;
+ const int sz_lun = sizeof(struct scsi_lun);
clear_luns_changed_on_target(devip);
@@ -3329,33 +3361,40 @@ static int resp_report_luns(struct scsi_cmnd *scp,
--lun_cnt;
tlun_cnt = lun_cnt + wlun_cnt;
-
- rlen = (tlun_cnt * sizeof(struct scsi_lun)) + 8;
- arr = vmalloc(rlen);
- if (!arr) {
- mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
- INSUFF_RES_ASCQ);
- return check_condition_result;
- }
- memset(arr, 0, rlen);
+ rlen = tlun_cnt * sz_lun; /* excluding 8 byte header */
+ scsi_set_resid(scp, scsi_bufflen(scp));
pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
- /* luns start at byte 8 in response following the header */
- lun_p = (struct scsi_lun *)&arr[8];
-
- /* LUNs use single level peripheral device addressing method */
+ /* loops rely on sizeof response header same as sizeof lun (both 8) */
lun = sdebug_no_lun_0 ? 1 : 0;
- for (i = 0; i < lun_cnt; i++)
- int_to_scsilun(lun++, lun_p++);
-
- if (wlun_cnt)
- int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p++);
-
- put_unaligned_be32(rlen - 8, &arr[0]);
-
- res = fill_from_dev_buffer(scp, arr, rlen);
- vfree(arr);
+ for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
+ memset(arr, 0, sizeof(arr));
+ lun_p = (struct scsi_lun *)&arr[0];
+ if (k == 0) {
+ put_unaligned_be32(rlen, &arr[0]);
+ ++lun_p;
+ j = 1;
+ }
+ for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
+ if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
+ break;
+ int_to_scsilun(lun++, lun_p);
+ }
+ if (j < RL_BUCKET_ELEMS)
+ break;
+ n = j * sz_lun;
+ res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
+ if (res)
+ return res;
+ off_rsp += n;
+ }
+ if (wlun_cnt) {
+ int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
+ ++j;
+ }
+ if (j > 0)
+ res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
return res;
}
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index eaccd651c..246456925 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -246,6 +246,10 @@ static struct {
{"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"SUN", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"DELL", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+ {"STK", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+ {"NETAPP", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+ {"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+ {"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
{"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN},
{"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 57a4b9973..85c8a51bc 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -29,6 +29,7 @@ extern int scsi_init_hosts(void);
extern void scsi_exit_hosts(void);
/* scsi.c */
+extern bool scsi_use_blk_mq;
extern int scsi_setup_command_freelist(struct Scsi_Host *shost);
extern void scsi_destroy_command_freelist(struct Scsi_Host *shost);
#ifdef CONFIG_SCSI_LOGGING
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 3f0ff0721..60b651bfa 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -341,22 +341,6 @@ static int do_sas_phy_delete(struct device *dev, void *data)
}
/**
- * is_sas_attached - check if device is SAS attached
- * @sdev: scsi device to check
- *
- * returns true if the device is SAS attached
- */
-int is_sas_attached(struct scsi_device *sdev)
-{
- struct Scsi_Host *shost = sdev->host;
-
- return shost->transportt->host_attrs.ac.class ==
- &sas_host_class.class;
-}
-EXPORT_SYMBOL(is_sas_attached);
-
-
-/**
* sas_remove_children - tear down a devices SAS data structures
* @dev: device belonging to the sas object
*
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 60bff78e9..d3e852ad5 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1012,7 +1012,8 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
} else if (rq_data_dir(rq) == READ) {
SCpnt->cmnd[0] = READ_6;
} else {
- scmd_printk(KERN_ERR, SCpnt, "Unknown command %llx\n", (unsigned long long) rq->cmd_flags);
+ scmd_printk(KERN_ERR, SCpnt, "Unknown command %llu,%llx\n",
+ req_op(rq), (unsigned long long) rq->cmd_flags);
goto out;
}
@@ -1137,21 +1138,26 @@ static int sd_init_command(struct scsi_cmnd *cmd)
{
struct request *rq = cmd->request;
- if (rq->cmd_flags & REQ_DISCARD)
+ switch (req_op(rq)) {
+ case REQ_OP_DISCARD:
return sd_setup_discard_cmnd(cmd);
- else if (rq->cmd_flags & REQ_WRITE_SAME)
+ case REQ_OP_WRITE_SAME:
return sd_setup_write_same_cmnd(cmd);
- else if (rq->cmd_flags & REQ_FLUSH)
+ case REQ_OP_FLUSH:
return sd_setup_flush_cmnd(cmd);
- else
+ case REQ_OP_READ:
+ case REQ_OP_WRITE:
return sd_setup_read_write_cmnd(cmd);
+ default:
+ BUG();
+ }
}
static void sd_uninit_command(struct scsi_cmnd *SCpnt)
{
struct request *rq = SCpnt->request;
- if (rq->cmd_flags & REQ_DISCARD)
+ if (req_op(rq) == REQ_OP_DISCARD)
__free_page(rq->completion_data);
if (SCpnt->cmnd != rq->cmd) {
@@ -1613,8 +1619,7 @@ static int sd_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
return -EOPNOTSUPP;
return sd_pr_command(bdev, (flags & PR_FL_IGNORE_KEY) ? 0x06 : 0x00,
old_key, new_key, 0,
- (1 << 0) /* APTPL */ |
- (1 << 2) /* ALL_TG_PT */);
+ (1 << 0) /* APTPL */);
}
static int sd_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
@@ -1774,7 +1779,7 @@ static int sd_done(struct scsi_cmnd *SCpnt)
unsigned char op = SCpnt->cmnd[0];
unsigned char unmap = SCpnt->cmnd[1] & 8;
- if (req->cmd_flags & REQ_DISCARD || req->cmd_flags & REQ_WRITE_SAME) {
+ if (req_op(req) == REQ_OP_DISCARD || req_op(req) == REQ_OP_WRITE_SAME) {
if (!result) {
good_bytes = blk_rq_bytes(req);
scsi_set_resid(SCpnt, 0);
@@ -2988,7 +2993,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
sd_revalidate_disk(gd);
- gd->driverfs_dev = &sdp->sdev_gendev;
gd->flags = GENHD_FL_EXT_DEVT;
if (sdp->removable) {
gd->flags |= GENHD_FL_REMOVABLE;
@@ -2996,7 +3000,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
}
blk_pm_runtime_init(sdp->request_queue, dev);
- add_disk(gd);
+ device_add_disk(dev, gd);
if (sdkp->capacity)
sd_dif_config_host(sdkp);
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 53ef1cb64..8c9a35c91 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -587,7 +587,7 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,
ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0);
- if (is_sas_attached(sdev))
+ if (scsi_is_sas_rphy(&sdev->sdev_gendev))
efd.addr = sas_get_address(sdev);
if (efd.addr) {
@@ -778,6 +778,8 @@ static void ses_intf_remove_enclosure(struct scsi_device *sdev)
if (!edev)
return;
+ enclosure_unregister(edev);
+
ses_dev = edev->scratch;
edev->scratch = NULL;
@@ -789,7 +791,6 @@ static void ses_intf_remove_enclosure(struct scsi_device *sdev)
kfree(edev->component[0].scratch);
put_device(&edev->edev);
- enclosure_unregister(edev);
}
static void ses_intf_remove(struct device *cdev,
diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c
index b0fefd67c..b106596cc 100644
--- a/drivers/scsi/snic/snic_disc.c
+++ b/drivers/scsi/snic/snic_disc.c
@@ -113,11 +113,11 @@ snic_queue_report_tgt_req(struct snic *snic)
pa = pci_map_single(snic->pdev, buf, buf_len, PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(snic->pdev, pa)) {
- kfree(buf);
- snic_req_free(snic, rqi);
SNIC_HOST_ERR(snic->shost,
"Rpt-tgt rspbuf %p: PCI DMA Mapping Failed\n",
buf);
+ kfree(buf);
+ snic_req_free(snic, rqi);
ret = -EINVAL;
goto error;
diff --git a/drivers/scsi/snic/snic_fwint.h b/drivers/scsi/snic/snic_fwint.h
index c5f9e1917..2a045a57e 100644
--- a/drivers/scsi/snic/snic_fwint.h
+++ b/drivers/scsi/snic/snic_fwint.h
@@ -92,7 +92,7 @@ enum snic_io_status {
}; /* end of enum snic_io_status */
/*
- * snic_io_hdr : host <--> firmare
+ * snic_io_hdr : host <--> firmware
*
* for any other message that will be queued to firmware should
* have the following request header
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 64c867405..ed179348d 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -713,7 +713,6 @@ static int sr_probe(struct device *dev)
get_capabilities(cd);
sr_vendor_init(cd);
- disk->driverfs_dev = &sdev->sdev_gendev;
set_capacity(disk, cd->capacity);
disk->private_data = &cd->driver;
disk->queue = sdev->request_queue;
@@ -730,7 +729,7 @@ static int sr_probe(struct device *dev)
dev_set_drvdata(dev, cd);
disk->flags |= GENHD_FL_REMOVABLE;
- add_disk(disk);
+ device_add_disk(&sdev->sdev_gendev, disk);
sdev_printk(KERN_DEBUG, sdev,
"Attached scsi CD-ROM %s\n", cd->cdi.name);
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 3ddcabb79..8ccfc9ea8 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -966,6 +966,8 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request,
if (scmnd->result) {
if (scsi_normalize_sense(scmnd->sense_buffer,
SCSI_SENSE_BUFFERSIZE, &sense_hdr) &&
+ !(sense_hdr.sense_key == NOT_READY &&
+ sense_hdr.asc == 0x03A) &&
do_logging(STORVSC_LOGGING_ERROR))
scsi_print_sense_hdr(scmnd->device, "storvsc",
&sense_hdr);
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 097894a1f..479669092 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -61,6 +61,14 @@ config SCSI_UFSHCD_PCI
If unsure, say N.
+config SCSI_UFS_DWC_TC_PCI
+ tristate "DesignWare pci support using a G210 Test Chip"
+ depends on SCSI_UFSHCD && PCI
+ ---help---
+ Synopsys Test Chip is a PHY for prototyping purposes.
+
+ If unsure, say N.
+
config SCSI_UFSHCD_PLATFORM
tristate "Platform bus based UFS Controller support"
depends on SCSI_UFSHCD
@@ -72,6 +80,14 @@ config SCSI_UFSHCD_PLATFORM
If unsure, say N.
+config SCSI_UFS_DWC_TC_PLATFORM
+ tristate "DesignWare platform support using a G210 Test Chip"
+ depends on SCSI_UFSHCD_PLATFORM
+ ---help---
+ Synopsys Test Chip is a PHY for prototyping purposes.
+
+ If unsure, say N.
+
config SCSI_UFS_QCOM
tristate "QCOM specific hooks to UFS controller platform driver"
depends on SCSI_UFSHCD_PLATFORM && ARCH_QCOM
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
index 8303bcce7..6e77cb0bf 100644
--- a/drivers/scsi/ufs/Makefile
+++ b/drivers/scsi/ufs/Makefile
@@ -1,4 +1,6 @@
# UFSHCD makefile
+obj-$(CONFIG_SCSI_UFS_DWC_TC_PCI) += tc-dwc-g210-pci.o ufshcd-dwc.o tc-dwc-g210.o
+obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o
obj-$(CONFIG_SCSI_UFS_QCOM) += ufs-qcom.o
obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o
obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
diff --git a/drivers/scsi/ufs/tc-dwc-g210-pci.c b/drivers/scsi/ufs/tc-dwc-g210-pci.c
new file mode 100644
index 000000000..c09a0fef0
--- /dev/null
+++ b/drivers/scsi/ufs/tc-dwc-g210-pci.c
@@ -0,0 +1,181 @@
+/*
+ * Synopsys G210 Test Chip driver
+ *
+ * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Authors: Joao Pinto <jpinto@synopsys.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "ufshcd.h"
+#include "ufshcd-dwc.h"
+#include "tc-dwc-g210.h"
+
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+
+/* Test Chip type expected values */
+#define TC_G210_20BIT 20
+#define TC_G210_40BIT 40
+#define TC_G210_INV 0
+
+static int tc_type = TC_G210_INV;
+module_param(tc_type, int, 0);
+MODULE_PARM_DESC(tc_type, "Test Chip Type (20 = 20-bit, 40 = 40-bit)");
+
+static int tc_dwc_g210_pci_suspend(struct device *dev)
+{
+ return ufshcd_system_suspend(dev_get_drvdata(dev));
+}
+
+static int tc_dwc_g210_pci_resume(struct device *dev)
+{
+ return ufshcd_system_resume(dev_get_drvdata(dev));
+}
+
+static int tc_dwc_g210_pci_runtime_suspend(struct device *dev)
+{
+ return ufshcd_runtime_suspend(dev_get_drvdata(dev));
+}
+
+static int tc_dwc_g210_pci_runtime_resume(struct device *dev)
+{
+ return ufshcd_runtime_resume(dev_get_drvdata(dev));
+}
+
+static int tc_dwc_g210_pci_runtime_idle(struct device *dev)
+{
+ return ufshcd_runtime_idle(dev_get_drvdata(dev));
+}
+
+/**
+ * struct ufs_hba_dwc_vops - UFS DWC specific variant operations
+ */
+static struct ufs_hba_variant_ops tc_dwc_g210_pci_hba_vops = {
+ .name = "tc-dwc-g210-pci",
+ .link_startup_notify = ufshcd_dwc_link_startup_notify,
+};
+
+/**
+ * tc_dwc_g210_pci_shutdown - main function to put the controller in reset state
+ * @pdev: pointer to PCI device handle
+ */
+static void tc_dwc_g210_pci_shutdown(struct pci_dev *pdev)
+{
+ ufshcd_shutdown((struct ufs_hba *)pci_get_drvdata(pdev));
+}
+
+/**
+ * tc_dwc_g210_pci_remove - de-allocate PCI/SCSI host and host memory space
+ * data structure memory
+ * @pdev - pointer to PCI handle
+ */
+static void tc_dwc_g210_pci_remove(struct pci_dev *pdev)
+{
+ struct ufs_hba *hba = pci_get_drvdata(pdev);
+
+ pm_runtime_forbid(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+ ufshcd_remove(hba);
+}
+
+/**
+ * tc_dwc_g210_pci_probe - probe routine of the driver
+ * @pdev: pointer to PCI device handle
+ * @id: PCI device id
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int
+tc_dwc_g210_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct ufs_hba *hba;
+ void __iomem *mmio_base;
+ int err;
+
+ /* Check Test Chip type and set the specific setup routine */
+ if (tc_type == TC_G210_20BIT) {
+ tc_dwc_g210_pci_hba_vops.phy_initialization =
+ tc_dwc_g210_config_20_bit;
+ } else if (tc_type == TC_G210_40BIT) {
+ tc_dwc_g210_pci_hba_vops.phy_initialization =
+ tc_dwc_g210_config_40_bit;
+ } else {
+ dev_err(&pdev->dev, "test chip version not specified\n");
+ return -EPERM;
+ }
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "pcim_enable_device failed\n");
+ return err;
+ }
+
+ pci_set_master(pdev);
+
+ err = pcim_iomap_regions(pdev, 1 << 0, UFSHCD);
+ if (err < 0) {
+ dev_err(&pdev->dev, "request and iomap failed\n");
+ return err;
+ }
+
+ mmio_base = pcim_iomap_table(pdev)[0];
+
+ err = ufshcd_alloc_host(&pdev->dev, &hba);
+ if (err) {
+ dev_err(&pdev->dev, "Allocation failed\n");
+ return err;
+ }
+
+ INIT_LIST_HEAD(&hba->clk_list_head);
+
+ hba->vops = &tc_dwc_g210_pci_hba_vops;
+
+ err = ufshcd_init(hba, mmio_base, pdev->irq);
+ if (err) {
+ dev_err(&pdev->dev, "Initialization failed\n");
+ return err;
+ }
+
+ pci_set_drvdata(pdev, hba);
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_allow(&pdev->dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops tc_dwc_g210_pci_pm_ops = {
+ .suspend = tc_dwc_g210_pci_suspend,
+ .resume = tc_dwc_g210_pci_resume,
+ .runtime_suspend = tc_dwc_g210_pci_runtime_suspend,
+ .runtime_resume = tc_dwc_g210_pci_runtime_resume,
+ .runtime_idle = tc_dwc_g210_pci_runtime_idle,
+};
+
+static const struct pci_device_id tc_dwc_g210_pci_tbl[] = {
+ { PCI_VENDOR_ID_SYNOPSYS, 0xB101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_SYNOPSYS, 0xB102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { } /* terminate list */
+};
+
+MODULE_DEVICE_TABLE(pci, tc_dwc_g210_pci_tbl);
+
+static struct pci_driver tc_dwc_g210_pci_driver = {
+ .name = "tc-dwc-g210-pci",
+ .id_table = tc_dwc_g210_pci_tbl,
+ .probe = tc_dwc_g210_pci_probe,
+ .remove = tc_dwc_g210_pci_remove,
+ .shutdown = tc_dwc_g210_pci_shutdown,
+ .driver = {
+ .pm = &tc_dwc_g210_pci_pm_ops
+ },
+};
+
+module_pci_driver(tc_dwc_g210_pci_driver);
+
+MODULE_AUTHOR("Joao Pinto <Joao.Pinto@synopsys.com>");
+MODULE_DESCRIPTION("Synopsys Test Chip G210 PCI glue driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/scsi/ufs/tc-dwc-g210-pltfrm.c b/drivers/scsi/ufs/tc-dwc-g210-pltfrm.c
new file mode 100644
index 000000000..2d3f5270f
--- /dev/null
+++ b/drivers/scsi/ufs/tc-dwc-g210-pltfrm.c
@@ -0,0 +1,113 @@
+/*
+ * Synopsys G210 Test Chip driver
+ *
+ * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Authors: Joao Pinto <jpinto@synopsys.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/delay.h>
+
+#include "ufshcd-pltfrm.h"
+#include "ufshcd-dwc.h"
+#include "tc-dwc-g210.h"
+
+/**
+ * UFS DWC specific variant operations
+ */
+static struct ufs_hba_variant_ops tc_dwc_g210_20bit_pltfm_hba_vops = {
+ .name = "tc-dwc-g210-pltfm",
+ .link_startup_notify = ufshcd_dwc_link_startup_notify,
+ .phy_initialization = tc_dwc_g210_config_20_bit,
+};
+
+static struct ufs_hba_variant_ops tc_dwc_g210_40bit_pltfm_hba_vops = {
+ .name = "tc-dwc-g210-pltfm",
+ .link_startup_notify = ufshcd_dwc_link_startup_notify,
+ .phy_initialization = tc_dwc_g210_config_40_bit,
+};
+
+static const struct of_device_id tc_dwc_g210_pltfm_match[] = {
+ {
+ .compatible = "snps,g210-tc-6.00-20bit",
+ .data = &tc_dwc_g210_20bit_pltfm_hba_vops,
+ },
+ {
+ .compatible = "snps,g210-tc-6.00-40bit",
+ .data = &tc_dwc_g210_40bit_pltfm_hba_vops,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tc_dwc_g210_pltfm_match);
+
+/**
+ * tc_dwc_g210_pltfm_probe()
+ * @pdev: pointer to platform device structure
+ *
+ */
+static int tc_dwc_g210_pltfm_probe(struct platform_device *pdev)
+{
+ int err;
+ const struct of_device_id *of_id;
+ struct ufs_hba_variant_ops *vops;
+ struct device *dev = &pdev->dev;
+
+ of_id = of_match_node(tc_dwc_g210_pltfm_match, dev->of_node);
+ vops = (struct ufs_hba_variant_ops *)of_id->data;
+
+ /* Perform generic probe */
+ err = ufshcd_pltfrm_init(pdev, vops);
+ if (err)
+ dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
+
+ return err;
+}
+
+/**
+ * tc_dwc_g210_pltfm_remove()
+ * @pdev: pointer to platform device structure
+ *
+ */
+static int tc_dwc_g210_pltfm_remove(struct platform_device *pdev)
+{
+ struct ufs_hba *hba = platform_get_drvdata(pdev);
+
+ pm_runtime_get_sync(&(pdev)->dev);
+ ufshcd_remove(hba);
+
+ return 0;
+}
+
+static const struct dev_pm_ops tc_dwc_g210_pltfm_pm_ops = {
+ .suspend = ufshcd_pltfrm_suspend,
+ .resume = ufshcd_pltfrm_resume,
+ .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
+ .runtime_resume = ufshcd_pltfrm_runtime_resume,
+ .runtime_idle = ufshcd_pltfrm_runtime_idle,
+};
+
+static struct platform_driver tc_dwc_g210_pltfm_driver = {
+ .probe = tc_dwc_g210_pltfm_probe,
+ .remove = tc_dwc_g210_pltfm_remove,
+ .shutdown = ufshcd_pltfrm_shutdown,
+ .driver = {
+ .name = "tc-dwc-g210-pltfm",
+ .pm = &tc_dwc_g210_pltfm_pm_ops,
+ .of_match_table = of_match_ptr(tc_dwc_g210_pltfm_match),
+ },
+};
+
+module_platform_driver(tc_dwc_g210_pltfm_driver);
+
+MODULE_ALIAS("platform:tc-dwc-g210-pltfm");
+MODULE_DESCRIPTION("Synopsys Test Chip G210 platform glue driver");
+MODULE_AUTHOR("Joao Pinto <Joao.Pinto@synopsys.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/scsi/ufs/tc-dwc-g210.c b/drivers/scsi/ufs/tc-dwc-g210.c
new file mode 100644
index 000000000..70db6d999
--- /dev/null
+++ b/drivers/scsi/ufs/tc-dwc-g210.c
@@ -0,0 +1,319 @@
+/*
+ * Synopsys G210 Test Chip driver
+ *
+ * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Authors: Joao Pinto <jpinto@synopsys.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "ufshcd.h"
+#include "unipro.h"
+
+#include "ufshcd-dwc.h"
+#include "ufshci-dwc.h"
+
+/**
+ * tc_dwc_g210_setup_40bit_rmmi()
+ * This function configures Synopsys TC specific atributes (40-bit RMMI)
+ * @hba: Pointer to drivers structure
+ *
+ * Returns 0 on success or non-zero value on failure
+ */
+static int tc_dwc_g210_setup_40bit_rmmi(struct ufs_hba *hba)
+{
+ const struct ufshcd_dme_attr_val setup_attrs[] = {
+ { UIC_ARG_MIB(TX_GLOBALHIBERNATE), 0x00, DME_LOCAL },
+ { UIC_ARG_MIB(REFCLKMODE), 0x01, DME_LOCAL },
+ { UIC_ARG_MIB(CDIRECTCTRL6), 0x80, DME_LOCAL },
+ { UIC_ARG_MIB(CBDIVFACTOR), 0x08, DME_LOCAL },
+ { UIC_ARG_MIB(CBDCOCTRL5), 0x64, DME_LOCAL },
+ { UIC_ARG_MIB(CBPRGTUNING), 0x09, DME_LOCAL },
+ { UIC_ARG_MIB(RTOBSERVESELECT), 0x00, DME_LOCAL },
+ { UIC_ARG_MIB_SEL(TX_REFCLKFREQ, SELIND_LN0_TX), 0x01,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(TX_CFGCLKFREQVAL, SELIND_LN0_TX), 0x19,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(CFGEXTRATTR, SELIND_LN0_TX), 0x14,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(DITHERCTRL2, SELIND_LN0_TX), 0xd6,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(RX_REFCLKFREQ, SELIND_LN0_RX), 0x01,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(RX_CFGCLKFREQVAL, SELIND_LN0_RX), 0x19,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(CFGWIDEINLN, SELIND_LN0_RX), 4,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(CFGRXCDR8, SELIND_LN0_RX), 0x80,
+ DME_LOCAL },
+ { UIC_ARG_MIB(DIRECTCTRL10), 0x04, DME_LOCAL },
+ { UIC_ARG_MIB(DIRECTCTRL19), 0x02, DME_LOCAL },
+ { UIC_ARG_MIB_SEL(CFGRXCDR8, SELIND_LN0_RX), 0x80,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(ENARXDIRECTCFG4, SELIND_LN0_RX), 0x03,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(CFGRXOVR8, SELIND_LN0_RX), 0x16,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(RXDIRECTCTRL2, SELIND_LN0_RX), 0x42,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(ENARXDIRECTCFG3, SELIND_LN0_RX), 0xa4,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(RXCALCTRL, SELIND_LN0_RX), 0x01,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(ENARXDIRECTCFG2, SELIND_LN0_RX), 0x01,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(CFGRXOVR4, SELIND_LN0_RX), 0x28,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(RXSQCTRL, SELIND_LN0_RX), 0x1E,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(CFGRXOVR6, SELIND_LN0_RX), 0x2f,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(CFGRXOVR6, SELIND_LN0_RX), 0x2f,
+ DME_LOCAL },
+ { UIC_ARG_MIB(CBPRGPLL2), 0x00, DME_LOCAL },
+ };
+
+ return ufshcd_dwc_dme_set_attrs(hba, setup_attrs,
+ ARRAY_SIZE(setup_attrs));
+}
+
+/**
+ * tc_dwc_g210_setup_20bit_rmmi_lane0()
+ * This function configures Synopsys TC 20-bit RMMI Lane 0
+ * @hba: Pointer to drivers structure
+ *
+ * Returns 0 on success or non-zero value on failure
+ */
+static int tc_dwc_g210_setup_20bit_rmmi_lane0(struct ufs_hba *hba)
+{
+ const struct ufshcd_dme_attr_val setup_attrs[] = {
+ { UIC_ARG_MIB_SEL(TX_REFCLKFREQ, SELIND_LN0_TX), 0x01,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(TX_CFGCLKFREQVAL, SELIND_LN0_TX), 0x19,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(RX_CFGCLKFREQVAL, SELIND_LN0_RX), 0x19,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(CFGEXTRATTR, SELIND_LN0_TX), 0x12,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(DITHERCTRL2, SELIND_LN0_TX), 0xd6,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(RX_REFCLKFREQ, SELIND_LN0_RX), 0x01,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(CFGWIDEINLN, SELIND_LN0_RX), 2,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(CFGRXCDR8, SELIND_LN0_RX), 0x80,
+ DME_LOCAL },
+ { UIC_ARG_MIB(DIRECTCTRL10), 0x04, DME_LOCAL },
+ { UIC_ARG_MIB(DIRECTCTRL19), 0x02, DME_LOCAL },
+ { UIC_ARG_MIB_SEL(ENARXDIRECTCFG4, SELIND_LN0_RX), 0x03,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(CFGRXOVR8, SELIND_LN0_RX), 0x16,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(RXDIRECTCTRL2, SELIND_LN0_RX), 0x42,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(ENARXDIRECTCFG3, SELIND_LN0_RX), 0xa4,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(RXCALCTRL, SELIND_LN0_RX), 0x01,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(ENARXDIRECTCFG2, SELIND_LN0_RX), 0x01,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(CFGRXOVR4, SELIND_LN0_RX), 0x28,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(RXSQCTRL, SELIND_LN0_RX), 0x1E,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(CFGRXOVR6, SELIND_LN0_RX), 0x2f,
+ DME_LOCAL },
+ { UIC_ARG_MIB(CBPRGPLL2), 0x00, DME_LOCAL },
+ };
+
+ return ufshcd_dwc_dme_set_attrs(hba, setup_attrs,
+ ARRAY_SIZE(setup_attrs));
+}
+
+/**
+ * tc_dwc_g210_setup_20bit_rmmi_lane1()
+ * This function configures Synopsys TC 20-bit RMMI Lane 1
+ * @hba: Pointer to drivers structure
+ *
+ * Returns 0 on success or non-zero value on failure
+ */
+static int tc_dwc_g210_setup_20bit_rmmi_lane1(struct ufs_hba *hba)
+{
+ int connected_rx_lanes = 0;
+ int connected_tx_lanes = 0;
+ int ret = 0;
+
+ const struct ufshcd_dme_attr_val setup_tx_attrs[] = {
+ { UIC_ARG_MIB_SEL(TX_REFCLKFREQ, SELIND_LN1_TX), 0x0d,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(TX_CFGCLKFREQVAL, SELIND_LN1_TX), 0x19,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(CFGEXTRATTR, SELIND_LN1_TX), 0x12,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(DITHERCTRL2, SELIND_LN0_TX), 0xd6,
+ DME_LOCAL },
+ };
+
+ const struct ufshcd_dme_attr_val setup_rx_attrs[] = {
+ { UIC_ARG_MIB_SEL(RX_REFCLKFREQ, SELIND_LN1_RX), 0x01,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(RX_CFGCLKFREQVAL, SELIND_LN1_RX), 0x19,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(CFGWIDEINLN, SELIND_LN1_RX), 2,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(CFGRXCDR8, SELIND_LN1_RX), 0x80,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(ENARXDIRECTCFG4, SELIND_LN1_RX), 0x03,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(CFGRXOVR8, SELIND_LN1_RX), 0x16,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(RXDIRECTCTRL2, SELIND_LN1_RX), 0x42,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(ENARXDIRECTCFG3, SELIND_LN1_RX), 0xa4,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(RXCALCTRL, SELIND_LN1_RX), 0x01,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(ENARXDIRECTCFG2, SELIND_LN1_RX), 0x01,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(CFGRXOVR4, SELIND_LN1_RX), 0x28,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(RXSQCTRL, SELIND_LN1_RX), 0x1E,
+ DME_LOCAL },
+ { UIC_ARG_MIB_SEL(CFGRXOVR6, SELIND_LN1_RX), 0x2f,
+ DME_LOCAL },
+ };
+
+ /* Get the available lane count */
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_AVAILRXDATALANES),
+ &connected_rx_lanes);
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_AVAILTXDATALANES),
+ &connected_tx_lanes);
+
+ if (connected_tx_lanes == 2) {
+
+ ret = ufshcd_dwc_dme_set_attrs(hba, setup_tx_attrs,
+ ARRAY_SIZE(setup_tx_attrs));
+
+ if (ret)
+ goto out;
+ }
+
+ if (connected_rx_lanes == 2) {
+ ret = ufshcd_dwc_dme_set_attrs(hba, setup_rx_attrs,
+ ARRAY_SIZE(setup_rx_attrs));
+ }
+
+out:
+ return ret;
+}
+
+/**
+ * tc_dwc_g210_setup_20bit_rmmi()
+ * This function configures Synopsys TC specific atributes (20-bit RMMI)
+ * @hba: Pointer to drivers structure
+ *
+ * Returns 0 on success or non-zero value on failure
+ */
+static int tc_dwc_g210_setup_20bit_rmmi(struct ufs_hba *hba)
+{
+ int ret = 0;
+
+ const struct ufshcd_dme_attr_val setup_attrs[] = {
+ { UIC_ARG_MIB(TX_GLOBALHIBERNATE), 0x00, DME_LOCAL },
+ { UIC_ARG_MIB(REFCLKMODE), 0x01, DME_LOCAL },
+ { UIC_ARG_MIB(CDIRECTCTRL6), 0xc0, DME_LOCAL },
+ { UIC_ARG_MIB(CBDIVFACTOR), 0x44, DME_LOCAL },
+ { UIC_ARG_MIB(CBDCOCTRL5), 0x64, DME_LOCAL },
+ { UIC_ARG_MIB(CBPRGTUNING), 0x09, DME_LOCAL },
+ { UIC_ARG_MIB(RTOBSERVESELECT), 0x00, DME_LOCAL },
+ };
+
+ ret = ufshcd_dwc_dme_set_attrs(hba, setup_attrs,
+ ARRAY_SIZE(setup_attrs));
+ if (ret)
+ goto out;
+
+ /* Lane 0 configuration*/
+ ret = tc_dwc_g210_setup_20bit_rmmi_lane0(hba);
+ if (ret)
+ goto out;
+
+ /* Lane 1 configuration*/
+ ret = tc_dwc_g210_setup_20bit_rmmi_lane1(hba);
+ if (ret)
+ goto out;
+
+out:
+ return ret;
+}
+
+/**
+ * tc_dwc_g210_config_40_bit()
+ * This function configures Local (host) Synopsys 40-bit TC specific attributes
+ *
+ * @hba: Pointer to drivers structure
+ *
+ * Returns 0 on success non-zero value on failure
+ */
+int tc_dwc_g210_config_40_bit(struct ufs_hba *hba)
+{
+ int ret = 0;
+
+ dev_info(hba->dev, "Configuring Test Chip 40-bit RMMI\n");
+ ret = tc_dwc_g210_setup_40bit_rmmi(hba);
+ if (ret) {
+ dev_err(hba->dev, "Configuration failed\n");
+ goto out;
+ }
+
+ /* To write Shadow register bank to effective configuration block */
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 0x01);
+ if (ret)
+ goto out;
+
+ /* To configure Debug OMC */
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_DEBUGOMC), 0x01);
+
+out:
+ return ret;
+}
+EXPORT_SYMBOL(tc_dwc_g210_config_40_bit);
+
+/**
+ * tc_dwc_g210_config_20_bit()
+ * This function configures Local (host) Synopsys 20-bit TC specific attributes
+ *
+ * @hba: Pointer to drivers structure
+ *
+ * Returns 0 on success non-zero value on failure
+ */
+int tc_dwc_g210_config_20_bit(struct ufs_hba *hba)
+{
+ int ret = 0;
+
+ dev_info(hba->dev, "Configuring Test Chip 20-bit RMMI\n");
+ ret = tc_dwc_g210_setup_20bit_rmmi(hba);
+ if (ret) {
+ dev_err(hba->dev, "Configuration failed\n");
+ goto out;
+ }
+
+ /* To write Shadow register bank to effective configuration block */
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 0x01);
+ if (ret)
+ goto out;
+
+ /* To configure Debug OMC */
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_DEBUGOMC), 0x01);
+
+out:
+ return ret;
+}
+EXPORT_SYMBOL(tc_dwc_g210_config_20_bit);
+
+MODULE_AUTHOR("Joao Pinto <Joao.Pinto@synopsys.com>");
+MODULE_DESCRIPTION("Synopsys G210 Test Chip driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/scsi/ufs/tc-dwc-g210.h b/drivers/scsi/ufs/tc-dwc-g210.h
new file mode 100644
index 000000000..fb177db12
--- /dev/null
+++ b/drivers/scsi/ufs/tc-dwc-g210.h
@@ -0,0 +1,19 @@
+/*
+ * Synopsys G210 Test Chip driver
+ *
+ * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Authors: Joao Pinto <jpinto@synopsys.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _TC_DWC_G210_H
+#define _TC_DWC_G210_H
+
+int tc_dwc_g210_config_40_bit(struct ufs_hba *hba);
+int tc_dwc_g210_config_20_bit(struct ufs_hba *hba);
+
+#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshcd-dwc.c b/drivers/scsi/ufs/ufshcd-dwc.c
new file mode 100644
index 000000000..5fd16c722
--- /dev/null
+++ b/drivers/scsi/ufs/ufshcd-dwc.c
@@ -0,0 +1,154 @@
+/*
+ * UFS Host driver for Synopsys Designware Core
+ *
+ * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Authors: Joao Pinto <jpinto@synopsys.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "ufshcd.h"
+#include "unipro.h"
+
+#include "ufshcd-dwc.h"
+#include "ufshci-dwc.h"
+
+int ufshcd_dwc_dme_set_attrs(struct ufs_hba *hba,
+ const struct ufshcd_dme_attr_val *v, int n)
+{
+ int ret = 0;
+ int attr_node = 0;
+
+ for (attr_node = 0; attr_node < n; attr_node++) {
+ ret = ufshcd_dme_set_attr(hba, v[attr_node].attr_sel,
+ ATTR_SET_NOR, v[attr_node].mib_val, v[attr_node].peer);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ufshcd_dwc_dme_set_attrs);
+
+/**
+ * ufshcd_dwc_program_clk_div()
+ * This function programs the clk divider value. This value is needed to
+ * provide 1 microsecond tick to unipro layer.
+ * @hba: Private Structure pointer
+ * @divider_val: clock divider value to be programmed
+ *
+ */
+static void ufshcd_dwc_program_clk_div(struct ufs_hba *hba, u32 divider_val)
+{
+ ufshcd_writel(hba, divider_val, DWC_UFS_REG_HCLKDIV);
+}
+
+/**
+ * ufshcd_dwc_link_is_up()
+ * Check if link is up
+ * @hba: private structure poitner
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_dwc_link_is_up(struct ufs_hba *hba)
+{
+ int dme_result = 0;
+
+ ufshcd_dme_get(hba, UIC_ARG_MIB(VS_POWERSTATE), &dme_result);
+
+ if (dme_result == UFSHCD_LINK_IS_UP) {
+ ufshcd_set_link_active(hba);
+ return 0;
+ }
+
+ return 1;
+}
+
+/**
+ * ufshcd_dwc_connection_setup()
+ * This function configures both the local side (host) and the peer side
+ * (device) unipro attributes to establish the connection to application/
+ * cport.
+ * This function is not required if the hardware is properly configured to
+ * have this connection setup on reset. But invoking this function does no
+ * harm and should be fine even working with any ufs device.
+ *
+ * @hba: pointer to drivers private data
+ *
+ * Returns 0 on success non-zero value on failure
+ */
+static int ufshcd_dwc_connection_setup(struct ufs_hba *hba)
+{
+ const struct ufshcd_dme_attr_val setup_attrs[] = {
+ { UIC_ARG_MIB(T_CONNECTIONSTATE), 0, DME_LOCAL },
+ { UIC_ARG_MIB(N_DEVICEID), 0, DME_LOCAL },
+ { UIC_ARG_MIB(N_DEVICEID_VALID), 0, DME_LOCAL },
+ { UIC_ARG_MIB(T_PEERDEVICEID), 1, DME_LOCAL },
+ { UIC_ARG_MIB(T_PEERCPORTID), 0, DME_LOCAL },
+ { UIC_ARG_MIB(T_TRAFFICCLASS), 0, DME_LOCAL },
+ { UIC_ARG_MIB(T_CPORTFLAGS), 0x6, DME_LOCAL },
+ { UIC_ARG_MIB(T_CPORTMODE), 1, DME_LOCAL },
+ { UIC_ARG_MIB(T_CONNECTIONSTATE), 1, DME_LOCAL },
+ { UIC_ARG_MIB(T_CONNECTIONSTATE), 0, DME_PEER },
+ { UIC_ARG_MIB(N_DEVICEID), 1, DME_PEER },
+ { UIC_ARG_MIB(N_DEVICEID_VALID), 1, DME_PEER },
+ { UIC_ARG_MIB(T_PEERDEVICEID), 1, DME_PEER },
+ { UIC_ARG_MIB(T_PEERCPORTID), 0, DME_PEER },
+ { UIC_ARG_MIB(T_TRAFFICCLASS), 0, DME_PEER },
+ { UIC_ARG_MIB(T_CPORTFLAGS), 0x6, DME_PEER },
+ { UIC_ARG_MIB(T_CPORTMODE), 1, DME_PEER },
+ { UIC_ARG_MIB(T_CONNECTIONSTATE), 1, DME_PEER }
+ };
+
+ return ufshcd_dwc_dme_set_attrs(hba, setup_attrs, ARRAY_SIZE(setup_attrs));
+}
+
+/**
+ * ufshcd_dwc_link_startup_notify()
+ * UFS Host DWC specific link startup sequence
+ * @hba: private structure poitner
+ * @status: Callback notify status
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+int ufshcd_dwc_link_startup_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status)
+{
+ int err = 0;
+
+ if (status == PRE_CHANGE) {
+ ufshcd_dwc_program_clk_div(hba, DWC_UFS_REG_HCLKDIV_DIV_125);
+
+ if (hba->vops->phy_initialization) {
+ err = hba->vops->phy_initialization(hba);
+ if (err) {
+ dev_err(hba->dev, "Phy setup failed (%d)\n",
+ err);
+ goto out;
+ }
+ }
+ } else { /* POST_CHANGE */
+ err = ufshcd_dwc_link_is_up(hba);
+ if (err) {
+ dev_err(hba->dev, "Link is not up\n");
+ goto out;
+ }
+
+ err = ufshcd_dwc_connection_setup(hba);
+ if (err)
+ dev_err(hba->dev, "Connection setup failed (%d)\n",
+ err);
+ }
+
+out:
+ return err;
+}
+EXPORT_SYMBOL(ufshcd_dwc_link_startup_notify);
+
+MODULE_AUTHOR("Joao Pinto <Joao.Pinto@synopsys.com>");
+MODULE_DESCRIPTION("UFS Host driver for Synopsys Designware Core");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/scsi/ufs/ufshcd-dwc.h b/drivers/scsi/ufs/ufshcd-dwc.h
new file mode 100644
index 000000000..c8be295e0
--- /dev/null
+++ b/drivers/scsi/ufs/ufshcd-dwc.h
@@ -0,0 +1,26 @@
+/*
+ * UFS Host driver for Synopsys Designware Core
+ *
+ * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Authors: Joao Pinto <jpinto@synopsys.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _UFSHCD_DWC_H
+#define _UFSHCD_DWC_H
+
+struct ufshcd_dme_attr_val {
+ u32 attr_sel;
+ u32 mib_val;
+ u8 peer;
+};
+
+int ufshcd_dwc_link_startup_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status);
+int ufshcd_dwc_dme_set_attrs(struct ufs_hba *hba,
+ const struct ufshcd_dme_attr_val *v, int n);
+#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index 718f12e09..db53f38da 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -372,6 +372,6 @@ EXPORT_SYMBOL_GPL(ufshcd_pltfrm_init);
MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
-MODULE_DESCRIPTION("UFS host controller Pltform bus based glue driver");
+MODULE_DESCRIPTION("UFS host controller Platform bus based glue driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(UFSHCD_DRIVER_VERSION);
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index f8fa72c31..f08d41a2d 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -1173,7 +1173,7 @@ static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
* @cmd_dir: requests data direction
*/
static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
- u32 *upiu_flags, enum dma_data_direction cmd_dir)
+ u32 *upiu_flags, enum dma_data_direction cmd_dir)
{
struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
u32 data_direction;
@@ -1299,47 +1299,55 @@ static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
}
/**
- * ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU)
+ * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
+ * for Device Management Purposes
* @hba - per adapter instance
* @lrb - pointer to local reference block
*/
-static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
{
u32 upiu_flags;
int ret = 0;
- switch (lrbp->command_type) {
- case UTP_CMD_TYPE_SCSI:
- if (likely(lrbp->cmd)) {
- ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
- lrbp->cmd->sc_data_direction);
- ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
- } else {
- ret = -EINVAL;
- }
- break;
- case UTP_CMD_TYPE_DEV_MANAGE:
- ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
- if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
- ufshcd_prepare_utp_query_req_upiu(
- hba, lrbp, upiu_flags);
- else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
- ufshcd_prepare_utp_nop_upiu(lrbp);
- else
- ret = -EINVAL;
- break;
- case UTP_CMD_TYPE_UFS:
- /* For UFS native command implementation */
- ret = -ENOTSUPP;
- dev_err(hba->dev, "%s: UFS native command are not supported\n",
- __func__);
- break;
- default:
- ret = -ENOTSUPP;
- dev_err(hba->dev, "%s: unknown command type: 0x%x\n",
- __func__, lrbp->command_type);
- break;
- } /* end of switch */
+ if (hba->ufs_version == UFSHCI_VERSION_20)
+ lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
+ else
+ lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
+
+ ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
+ if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
+ ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
+ else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
+ ufshcd_prepare_utp_nop_upiu(lrbp);
+ else
+ ret = -EINVAL;
+
+ return ret;
+}
+
+/**
+ * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
+ * for SCSI Purposes
+ * @hba - per adapter instance
+ * @lrb - pointer to local reference block
+ */
+static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ u32 upiu_flags;
+ int ret = 0;
+
+ if (hba->ufs_version == UFSHCI_VERSION_20)
+ lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
+ else
+ lrbp->command_type = UTP_CMD_TYPE_SCSI;
+
+ if (likely(lrbp->cmd)) {
+ ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
+ lrbp->cmd->sc_data_direction);
+ ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
+ } else {
+ ret = -EINVAL;
+ }
return ret;
}
@@ -1451,10 +1459,9 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
lrbp->task_tag = tag;
lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
- lrbp->command_type = UTP_CMD_TYPE_SCSI;
- /* form UPIU before issuing the command */
- ufshcd_compose_upiu(hba, lrbp);
+ ufshcd_comp_scsi_upiu(hba, lrbp);
+
err = ufshcd_map_sg(lrbp);
if (err) {
lrbp->cmd = NULL;
@@ -1479,11 +1486,10 @@ static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
lrbp->sense_buffer = NULL;
lrbp->task_tag = tag;
lrbp->lun = 0; /* device management cmd is not specific to any LUN */
- lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
lrbp->intr_cmd = true; /* No interrupt aggregation */
hba->dev_cmd.type = cmd_type;
- return ufshcd_compose_upiu(hba, lrbp);
+ return ufshcd_comp_devman_upiu(hba, lrbp);
}
static int
@@ -2131,7 +2137,7 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf,
buff_ascii = kmalloc(ascii_len, GFP_KERNEL);
if (!buff_ascii) {
err = -ENOMEM;
- goto out_free_buff;
+ goto out;
}
/*
@@ -2150,7 +2156,6 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf,
size - QUERY_DESC_HDR_SIZE);
memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
-out_free_buff:
kfree(buff_ascii);
}
out:
@@ -3539,7 +3544,8 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
/* Do not touch lrbp after scsi done */
cmd->scsi_done(cmd);
__ufshcd_release(hba);
- } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
+ } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
+ lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
if (hba->dev_cmd.complete)
complete(hba->dev_cmd.complete);
}
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 4bb65669f..430bef111 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -264,6 +264,7 @@ struct ufs_pwr_mode_info {
* @suspend: called during host controller PM callback
* @resume: called during host controller PM callback
* @dbg_register_dump: used to dump controller debug information
+ * @phy_initialization: used to initialize phys
*/
struct ufs_hba_variant_ops {
const char *name;
@@ -285,6 +286,7 @@ struct ufs_hba_variant_ops {
int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
int (*resume)(struct ufs_hba *, enum ufs_pm_op);
void (*dbg_register_dump)(struct ufs_hba *hba);
+ int (*phy_initialization)(struct ufs_hba *);
};
/* clock gating state */
@@ -567,11 +569,16 @@ static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba)
static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba)
{
+/* DWC UFS Core has the Interrupt aggregation feature but is not detectable*/
+#ifndef CONFIG_SCSI_UFS_DWC
if ((hba->caps & UFSHCD_CAP_INTR_AGGR) &&
!(hba->quirks & UFSHCD_QUIRK_BROKEN_INTR_AGGR))
return true;
else
return false;
+#else
+return true;
+#endif
}
#define ufshcd_writel(hba, val, reg) \
diff --git a/drivers/scsi/ufs/ufshci-dwc.h b/drivers/scsi/ufs/ufshci-dwc.h
new file mode 100644
index 000000000..ca341fece
--- /dev/null
+++ b/drivers/scsi/ufs/ufshci-dwc.h
@@ -0,0 +1,36 @@
+/*
+ * UFS Host driver for Synopsys Designware Core
+ *
+ * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Authors: Joao Pinto <jpinto@synopsys.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _UFSHCI_DWC_H
+#define _UFSHCI_DWC_H
+
+/* DWC HC UFSHCI specific Registers */
+enum dwc_specific_registers {
+ DWC_UFS_REG_HCLKDIV = 0xFC,
+};
+
+/* Clock Divider Values: Hex equivalent of frequency in MHz */
+enum clk_div_values {
+ DWC_UFS_REG_HCLKDIV_DIV_62_5 = 0x3e,
+ DWC_UFS_REG_HCLKDIV_DIV_125 = 0x7d,
+ DWC_UFS_REG_HCLKDIV_DIV_200 = 0xc8,
+};
+
+/* Selector Index */
+enum selector_index {
+ SELIND_LN0_TX = 0x00,
+ SELIND_LN1_TX = 0x01,
+ SELIND_LN0_RX = 0x04,
+ SELIND_LN1_RX = 0x05,
+};
+
+#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index 4cb1cc63f..9599741ff 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -220,6 +220,12 @@ enum {
#define UIC_ARG_ATTR_TYPE(t) (((t) & 0xFF) << 16)
#define UIC_GET_ATTR_ID(v) (((v) >> 16) & 0xFFFF)
+/* Link Status*/
+enum link_status {
+ UFSHCD_LINK_IS_DOWN = 1,
+ UFSHCD_LINK_IS_UP = 2,
+};
+
/* UIC Commands */
enum uic_cmd_dme {
UIC_CMD_DME_GET = 0x01,
@@ -279,6 +285,11 @@ enum {
UTP_CMD_TYPE_DEV_MANAGE = 0x2,
};
+/* To accommodate UFS2.0 required Command type */
+enum {
+ UTP_CMD_TYPE_UFS_STORAGE = 0x1,
+};
+
enum {
UTP_SCSI_COMMAND = 0x00000000,
UTP_NATIVE_UFS_COMMAND = 0x10000000,
diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h
index e2854e45f..eff8b5675 100644
--- a/drivers/scsi/ufs/unipro.h
+++ b/drivers/scsi/ufs/unipro.h
@@ -36,6 +36,10 @@
#define TX_LCC_SEQUENCER 0x0032
#define TX_MIN_ACTIVATETIME 0x0033
#define TX_PWM_G6_G7_SYNC_LENGTH 0x0034
+#define TX_REFCLKFREQ 0x00EB
+#define TX_CFGCLKFREQVAL 0x00EC
+#define CFGEXTRATTR 0x00F0
+#define DITHERCTRL2 0x00F1
/*
* M-RX Configuration Attributes
@@ -51,10 +55,40 @@
#define RX_TERMINATION_FORCE_ENABLE 0x0089
#define RX_MIN_ACTIVATETIME_CAPABILITY 0x008F
#define RX_HIBERN8TIME_CAPABILITY 0x0092
+#define RX_REFCLKFREQ 0x00EB
+#define RX_CFGCLKFREQVAL 0x00EC
+#define CFGWIDEINLN 0x00F0
+#define CFGRXCDR8 0x00BA
+#define ENARXDIRECTCFG4 0x00F2
+#define CFGRXOVR8 0x00BD
+#define RXDIRECTCTRL2 0x00C7
+#define ENARXDIRECTCFG3 0x00F3
+#define RXCALCTRL 0x00B4
+#define ENARXDIRECTCFG2 0x00F4
+#define CFGRXOVR4 0x00E9
+#define RXSQCTRL 0x00B5
+#define CFGRXOVR6 0x00BF
#define is_mphy_tx_attr(attr) (attr < RX_MODE)
#define RX_MIN_ACTIVATETIME_UNIT_US 100
#define HIBERN8TIME_UNIT_US 100
+
+/*
+ * Common Block Attributes
+ */
+#define TX_GLOBALHIBERNATE UNIPRO_CB_OFFSET(0x002B)
+#define REFCLKMODE UNIPRO_CB_OFFSET(0x00BF)
+#define DIRECTCTRL19 UNIPRO_CB_OFFSET(0x00CD)
+#define DIRECTCTRL10 UNIPRO_CB_OFFSET(0x00E6)
+#define CDIRECTCTRL6 UNIPRO_CB_OFFSET(0x00EA)
+#define RTOBSERVESELECT UNIPRO_CB_OFFSET(0x00F0)
+#define CBDIVFACTOR UNIPRO_CB_OFFSET(0x00F1)
+#define CBDCOCTRL5 UNIPRO_CB_OFFSET(0x00F3)
+#define CBPRGPLL2 UNIPRO_CB_OFFSET(0x00F8)
+#define CBPRGTUNING UNIPRO_CB_OFFSET(0x00FB)
+
+#define UNIPRO_CB_OFFSET(x) (0x8000 | x)
+
/*
* PHY Adpater attributes
*/
@@ -119,6 +153,11 @@
#define PA_TACTIVATE_TIME_UNIT_US 10
#define PA_HIBERN8_TIME_UNIT_US 100
+/*Other attributes*/
+#define VS_MPHYCFGUPDT 0xD085
+#define VS_DEBUGOMC 0xD09E
+#define VS_POWERSTATE 0xD083
+
/* PHY Adapter Protocol Constants */
#define PA_MAXDATALANES 4
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index 6164634af..4a0d3cdc6 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -17,7 +17,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
- * Maintained by: Arvind Kumar <arvindkumar@vmware.com>
+ * Maintained by: Jim Gill <jgill@vmware.com>
*
*/
diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h
index 12712c92f..c097d2ccb 100644
--- a/drivers/scsi/vmw_pvscsi.h
+++ b/drivers/scsi/vmw_pvscsi.h
@@ -17,7 +17,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
- * Maintained by: Arvind Kumar <arvindkumar@vmware.com>
+ * Maintained by: Jim Gill <jgill@vmware.com>
*
*/
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c
index 0c0f17b9a..409f95984 100644
--- a/drivers/scsi/wd7000.c
+++ b/drivers/scsi/wd7000.c
@@ -192,7 +192,7 @@
#ifdef WD7000_DEBUG
#define dprintk printk
#else
-#define dprintk(format,args...)
+#define dprintk no_printk
#endif
/*
@@ -1591,8 +1591,8 @@ static int wd7000_biosparam(struct scsi_device *sdev,
{
char b[BDEVNAME_SIZE];
- dprintk("wd7000_biosparam: dev=%s, size=%d, ",
- bdevname(bdev, b), capacity);
+ dprintk("wd7000_biosparam: dev=%s, size=%llu, ",
+ bdevname(bdev, b), (u64)capacity);
(void)b; /* unused var warning? */
/*
diff --git a/drivers/scsi/wd719x.c b/drivers/scsi/wd719x.c
index eb51e612c..04573a56c 100644
--- a/drivers/scsi/wd719x.c
+++ b/drivers/scsi/wd719x.c
@@ -962,7 +962,7 @@ static void wd719x_pci_remove(struct pci_dev *pdev)
scsi_host_put(sh);
}
-static DEFINE_PCI_DEVICE_TABLE(wd719x_pci_table) = {
+static const struct pci_device_id wd719x_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_WD, 0x3296) },
{}
};
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c
index a9bac3bf2..c887ecdaf 100644
--- a/drivers/sh/pm_runtime.c
+++ b/drivers/sh/pm_runtime.c
@@ -34,15 +34,6 @@ static struct pm_clk_notifier_block platform_bus_notifier = {
static int __init sh_pm_runtime_init(void)
{
- if (IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_ARCH_SHMOBILE)) {
- if (!of_find_compatible_node(NULL, NULL,
- "renesas,cpg-mstp-clocks"))
- return 0;
- if (IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS_OF) &&
- of_find_node_with_property(NULL, "#power-domain-cells"))
- return 0;
- }
-
pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier);
return 0;
}
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index cb58ef0d9..fe42a2fdf 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -1,7 +1,6 @@
menu "SOC (System On Chip) specific Drivers"
source "drivers/soc/bcm/Kconfig"
-source "drivers/soc/brcmstb/Kconfig"
source "drivers/soc/fsl/qe/Kconfig"
source "drivers/soc/mediatek/Kconfig"
source "drivers/soc/qcom/Kconfig"
@@ -10,6 +9,7 @@ source "drivers/soc/samsung/Kconfig"
source "drivers/soc/sunxi/Kconfig"
source "drivers/soc/tegra/Kconfig"
source "drivers/soc/ti/Kconfig"
+source "drivers/soc/ux500/Kconfig"
source "drivers/soc/versatile/Kconfig"
endmenu
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 380230f03..50c23d0bd 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -3,7 +3,6 @@
#
obj-y += bcm/
-obj-$(CONFIG_SOC_BRCMSTB) += brcmstb/
obj-$(CONFIG_ARCH_DOVE) += dove/
obj-$(CONFIG_MACH_DOVE) += dove/
obj-y += fsl/
@@ -15,4 +14,5 @@ obj-$(CONFIG_SOC_SAMSUNG) += samsung/
obj-$(CONFIG_ARCH_SUNXI) += sunxi/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-$(CONFIG_SOC_TI) += ti/
+obj-$(CONFIG_ARCH_U8500) += ux500/
obj-$(CONFIG_PLAT_VERSATILE) += versatile/
diff --git a/drivers/soc/bcm/Kconfig b/drivers/soc/bcm/Kconfig
index 3066edea1..a39b0d58d 100644
--- a/drivers/soc/bcm/Kconfig
+++ b/drivers/soc/bcm/Kconfig
@@ -1,9 +1,23 @@
+menu "Broadcom SoC drivers"
+
config RASPBERRYPI_POWER
bool "Raspberry Pi power domain driver"
- depends on ARCH_BCM2835 || COMPILE_TEST
+ depends on ARCH_BCM2835 || (COMPILE_TEST && OF)
depends on RASPBERRYPI_FIRMWARE=y
select PM_GENERIC_DOMAINS if PM
- select PM_GENERIC_DOMAINS_OF if PM
help
This enables support for the RPi power domains which can be enabled
or disabled via the RPi firmware.
+
+config SOC_BRCMSTB
+ bool "Broadcom STB SoC drivers"
+ depends on ARM
+ select SOC_BUS
+ help
+ Enables drivers for the Broadcom Set-Top Box (STB) series of chips.
+ This option alone enables only some support code, while the drivers
+ can be enabled individually within this menu.
+
+ If unsure, say N.
+
+endmenu
diff --git a/drivers/soc/bcm/Makefile b/drivers/soc/bcm/Makefile
index 63aa3eb23..dc4fced72 100644
--- a/drivers/soc/bcm/Makefile
+++ b/drivers/soc/bcm/Makefile
@@ -1 +1,2 @@
obj-$(CONFIG_RASPBERRYPI_POWER) += raspberrypi-power.o
+obj-$(CONFIG_SOC_BRCMSTB) += brcmstb/
diff --git a/drivers/soc/bcm/brcmstb/Makefile b/drivers/soc/bcm/brcmstb/Makefile
new file mode 100644
index 000000000..9120b2715
--- /dev/null
+++ b/drivers/soc/bcm/brcmstb/Makefile
@@ -0,0 +1 @@
+obj-y += common.o biuctrl.o
diff --git a/drivers/soc/bcm/brcmstb/biuctrl.c b/drivers/soc/bcm/brcmstb/biuctrl.c
new file mode 100644
index 000000000..3c39415d4
--- /dev/null
+++ b/drivers/soc/bcm/brcmstb/biuctrl.c
@@ -0,0 +1,117 @@
+/*
+ * Broadcom STB SoCs Bus Unit Interface controls
+ *
+ * Copyright (C) 2015, Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "brcmstb: " KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/syscore_ops.h>
+#include <linux/soc/brcmstb/brcmstb.h>
+
+#define CPU_CREDIT_REG_OFFSET 0x184
+#define CPU_CREDIT_REG_MCPx_WR_PAIRING_EN_MASK 0x70000000
+
+static void __iomem *cpubiuctrl_base;
+static bool mcp_wr_pairing_en;
+
+static int __init mcp_write_pairing_set(void)
+{
+ u32 creds = 0;
+
+ if (!cpubiuctrl_base)
+ return -1;
+
+ creds = readl_relaxed(cpubiuctrl_base + CPU_CREDIT_REG_OFFSET);
+ if (mcp_wr_pairing_en) {
+ pr_info("MCP: Enabling write pairing\n");
+ writel_relaxed(creds | CPU_CREDIT_REG_MCPx_WR_PAIRING_EN_MASK,
+ cpubiuctrl_base + CPU_CREDIT_REG_OFFSET);
+ } else if (creds & CPU_CREDIT_REG_MCPx_WR_PAIRING_EN_MASK) {
+ pr_info("MCP: Disabling write pairing\n");
+ writel_relaxed(creds & ~CPU_CREDIT_REG_MCPx_WR_PAIRING_EN_MASK,
+ cpubiuctrl_base + CPU_CREDIT_REG_OFFSET);
+ } else {
+ pr_info("MCP: Write pairing already disabled\n");
+ }
+
+ return 0;
+}
+
+static int __init setup_hifcpubiuctrl_regs(void)
+{
+ struct device_node *np;
+ int ret = 0;
+
+ np = of_find_compatible_node(NULL, NULL, "brcm,brcmstb-cpu-biu-ctrl");
+ if (!np) {
+ pr_err("missing BIU control node\n");
+ return -ENODEV;
+ }
+
+ cpubiuctrl_base = of_iomap(np, 0);
+ if (!cpubiuctrl_base) {
+ pr_err("failed to remap BIU control base\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mcp_wr_pairing_en = of_property_read_bool(np, "brcm,write-pairing");
+out:
+ of_node_put(np);
+ return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static u32 cpu_credit_reg_dump; /* for save/restore */
+
+static int brcmstb_cpu_credit_reg_suspend(void)
+{
+ if (cpubiuctrl_base)
+ cpu_credit_reg_dump =
+ readl_relaxed(cpubiuctrl_base + CPU_CREDIT_REG_OFFSET);
+ return 0;
+}
+
+static void brcmstb_cpu_credit_reg_resume(void)
+{
+ if (cpubiuctrl_base)
+ writel_relaxed(cpu_credit_reg_dump,
+ cpubiuctrl_base + CPU_CREDIT_REG_OFFSET);
+}
+
+static struct syscore_ops brcmstb_cpu_credit_syscore_ops = {
+ .suspend = brcmstb_cpu_credit_reg_suspend,
+ .resume = brcmstb_cpu_credit_reg_resume,
+};
+#endif
+
+
+void __init brcmstb_biuctrl_init(void)
+{
+ int ret;
+
+ setup_hifcpubiuctrl_regs();
+
+ ret = mcp_write_pairing_set();
+ if (ret) {
+ pr_err("MCP: Unable to disable write pairing!\n");
+ return;
+ }
+
+#ifdef CONFIG_PM_SLEEP
+ register_syscore_ops(&brcmstb_cpu_credit_syscore_ops);
+#endif
+}
diff --git a/drivers/soc/bcm/brcmstb/common.c b/drivers/soc/bcm/brcmstb/common.c
new file mode 100644
index 000000000..94e733555
--- /dev/null
+++ b/drivers/soc/bcm/brcmstb/common.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright © 2014 NVIDIA Corporation
+ * Copyright © 2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/soc/brcmstb/brcmstb.h>
+#include <linux/sys_soc.h>
+
+#include <soc/brcmstb/common.h>
+
+static u32 family_id;
+static u32 product_id;
+
+static const struct of_device_id brcmstb_machine_match[] = {
+ { .compatible = "brcm,brcmstb", },
+ { }
+};
+
+bool soc_is_brcmstb(void)
+{
+ struct device_node *root;
+
+ root = of_find_node_by_path("/");
+ if (!root)
+ return false;
+
+ return of_match_node(brcmstb_machine_match, root) != NULL;
+}
+
+static const struct of_device_id sun_top_ctrl_match[] = {
+ { .compatible = "brcm,brcmstb-sun-top-ctrl", },
+ { }
+};
+
+static int __init brcmstb_soc_device_init(void)
+{
+ struct soc_device_attribute *soc_dev_attr;
+ struct soc_device *soc_dev;
+ struct device_node *sun_top_ctrl;
+ void __iomem *sun_top_ctrl_base;
+ int ret = 0;
+
+ sun_top_ctrl = of_find_matching_node(NULL, sun_top_ctrl_match);
+ if (!sun_top_ctrl)
+ return -ENODEV;
+
+ sun_top_ctrl_base = of_iomap(sun_top_ctrl, 0);
+ if (!sun_top_ctrl_base)
+ return -ENODEV;
+
+ family_id = readl(sun_top_ctrl_base);
+ product_id = readl(sun_top_ctrl_base + 0x4);
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ soc_dev_attr->family = kasprintf(GFP_KERNEL, "%x",
+ family_id >> 28 ?
+ family_id >> 16 : family_id >> 8);
+ soc_dev_attr->soc_id = kasprintf(GFP_KERNEL, "%x",
+ product_id >> 28 ?
+ product_id >> 16 : product_id >> 8);
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%c%d",
+ ((product_id & 0xf0) >> 4) + 'A',
+ product_id & 0xf);
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ kfree(soc_dev_attr->family);
+ kfree(soc_dev_attr->soc_id);
+ kfree(soc_dev_attr->revision);
+ kfree(soc_dev_attr);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ return 0;
+
+out:
+ iounmap(sun_top_ctrl_base);
+ return ret;
+}
+arch_initcall(brcmstb_soc_device_init);
diff --git a/drivers/soc/fsl/qe/Kconfig b/drivers/soc/fsl/qe/Kconfig
index 20978f205..73a2e08b4 100644
--- a/drivers/soc/fsl/qe/Kconfig
+++ b/drivers/soc/fsl/qe/Kconfig
@@ -22,7 +22,7 @@ config UCC_SLOW
config UCC_FAST
bool
- default y if UCC_GETH
+ default y if UCC_GETH || QE_TDM
help
This option provides qe_lib support to UCC fast
protocols: HDLC, Ethernet, ATM, transparent
@@ -31,6 +31,10 @@ config UCC
bool
default y if UCC_FAST || UCC_SLOW
+config QE_TDM
+ bool
+ default y if FSL_UCC_HDLC
+
config QE_USB
bool
default y if USB_FSL_QE
diff --git a/drivers/soc/fsl/qe/Makefile b/drivers/soc/fsl/qe/Makefile
index ffac5410c..2031d385b 100644
--- a/drivers/soc/fsl/qe/Makefile
+++ b/drivers/soc/fsl/qe/Makefile
@@ -6,5 +6,6 @@ obj-$(CONFIG_CPM) += qe_common.o
obj-$(CONFIG_UCC) += ucc.o
obj-$(CONFIG_UCC_SLOW) += ucc_slow.o
obj-$(CONFIG_UCC_FAST) += ucc_fast.o
+obj-$(CONFIG_QE_TDM) += qe_tdm.o
obj-$(CONFIG_QE_USB) += usb.o
obj-$(CONFIG_QE_GPIO) += gpio.o
diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c
index 709fc6380..7026507e6 100644
--- a/drivers/soc/fsl/qe/qe.c
+++ b/drivers/soc/fsl/qe/qe.c
@@ -239,6 +239,12 @@ enum qe_clock qe_clock_source(const char *source)
if (strcasecmp(source, "none") == 0)
return QE_CLK_NONE;
+ if (strcmp(source, "tsync_pin") == 0)
+ return QE_TSYNC_PIN;
+
+ if (strcmp(source, "rsync_pin") == 0)
+ return QE_RSYNC_PIN;
+
if (strncasecmp(source, "brg", 3) == 0) {
i = simple_strtoul(source + 3, NULL, 10);
if ((i >= 1) && (i <= 16))
diff --git a/drivers/soc/fsl/qe/qe_tdm.c b/drivers/soc/fsl/qe/qe_tdm.c
new file mode 100644
index 000000000..5e48b1470
--- /dev/null
+++ b/drivers/soc/fsl/qe/qe_tdm.c
@@ -0,0 +1,276 @@
+/*
+ * Copyright (C) 2015 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Authors: Zhao Qiang <qiang.zhao@nxp.com>
+ *
+ * Description:
+ * QE TDM API Set - TDM specific routines implementations.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <soc/fsl/qe/qe_tdm.h>
+
+static int set_tdm_framer(const char *tdm_framer_type)
+{
+ if (strcmp(tdm_framer_type, "e1") == 0)
+ return TDM_FRAMER_E1;
+ else if (strcmp(tdm_framer_type, "t1") == 0)
+ return TDM_FRAMER_T1;
+ else
+ return -EINVAL;
+}
+
+static void set_si_param(struct ucc_tdm *utdm, struct ucc_tdm_info *ut_info)
+{
+ struct si_mode_info *si_info = &ut_info->si_info;
+
+ if (utdm->tdm_mode == TDM_INTERNAL_LOOPBACK) {
+ si_info->simr_crt = 1;
+ si_info->simr_rfsd = 0;
+ }
+}
+
+int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm,
+ struct ucc_tdm_info *ut_info)
+{
+ const char *sprop;
+ int ret = 0;
+ u32 val;
+ struct resource *res;
+ struct device_node *np2;
+ static int siram_init_flag;
+ struct platform_device *pdev;
+
+ sprop = of_get_property(np, "fsl,rx-sync-clock", NULL);
+ if (sprop) {
+ ut_info->uf_info.rx_sync = qe_clock_source(sprop);
+ if ((ut_info->uf_info.rx_sync < QE_CLK_NONE) ||
+ (ut_info->uf_info.rx_sync > QE_RSYNC_PIN)) {
+ pr_err("QE-TDM: Invalid rx-sync-clock property\n");
+ return -EINVAL;
+ }
+ } else {
+ pr_err("QE-TDM: Invalid rx-sync-clock property\n");
+ return -EINVAL;
+ }
+
+ sprop = of_get_property(np, "fsl,tx-sync-clock", NULL);
+ if (sprop) {
+ ut_info->uf_info.tx_sync = qe_clock_source(sprop);
+ if ((ut_info->uf_info.tx_sync < QE_CLK_NONE) ||
+ (ut_info->uf_info.tx_sync > QE_TSYNC_PIN)) {
+ pr_err("QE-TDM: Invalid tx-sync-clock property\n");
+ return -EINVAL;
+ }
+ } else {
+ pr_err("QE-TDM: Invalid tx-sync-clock property\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32_index(np, "fsl,tx-timeslot-mask", 0, &val);
+ if (ret) {
+ pr_err("QE-TDM: Invalid tx-timeslot-mask property\n");
+ return -EINVAL;
+ }
+ utdm->tx_ts_mask = val;
+
+ ret = of_property_read_u32_index(np, "fsl,rx-timeslot-mask", 0, &val);
+ if (ret) {
+ ret = -EINVAL;
+ pr_err("QE-TDM: Invalid rx-timeslot-mask property\n");
+ return ret;
+ }
+ utdm->rx_ts_mask = val;
+
+ ret = of_property_read_u32_index(np, "fsl,tdm-id", 0, &val);
+ if (ret) {
+ ret = -EINVAL;
+ pr_err("QE-TDM: No fsl,tdm-id property for this UCC\n");
+ return ret;
+ }
+ utdm->tdm_port = val;
+ ut_info->uf_info.tdm_num = utdm->tdm_port;
+
+ if (of_get_property(np, "fsl,tdm-internal-loopback", NULL))
+ utdm->tdm_mode = TDM_INTERNAL_LOOPBACK;
+ else
+ utdm->tdm_mode = TDM_NORMAL;
+
+ sprop = of_get_property(np, "fsl,tdm-framer-type", NULL);
+ if (!sprop) {
+ ret = -EINVAL;
+ pr_err("QE-TDM: No tdm-framer-type property for UCC\n");
+ return ret;
+ }
+ ret = set_tdm_framer(sprop);
+ if (ret < 0)
+ return -EINVAL;
+ utdm->tdm_framer_type = ret;
+
+ ret = of_property_read_u32_index(np, "fsl,siram-entry-id", 0, &val);
+ if (ret) {
+ ret = -EINVAL;
+ pr_err("QE-TDM: No siram entry id for UCC\n");
+ return ret;
+ }
+ utdm->siram_entry_id = val;
+
+ set_si_param(utdm, ut_info);
+
+ np2 = of_find_compatible_node(NULL, NULL, "fsl,t1040-qe-si");
+ if (!np2)
+ return -EINVAL;
+
+ pdev = of_find_device_by_node(np2);
+ if (!pdev) {
+ pr_err("%s: failed to lookup pdev\n", np2->name);
+ of_node_put(np2);
+ return -EINVAL;
+ }
+
+ of_node_put(np2);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ utdm->si_regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(utdm->si_regs)) {
+ ret = PTR_ERR(utdm->si_regs);
+ goto err_miss_siram_property;
+ }
+
+ np2 = of_find_compatible_node(NULL, NULL, "fsl,t1040-qe-siram");
+ if (!np2) {
+ ret = -EINVAL;
+ goto err_miss_siram_property;
+ }
+
+ pdev = of_find_device_by_node(np2);
+ if (!pdev) {
+ ret = -EINVAL;
+ pr_err("%s: failed to lookup pdev\n", np2->name);
+ of_node_put(np2);
+ goto err_miss_siram_property;
+ }
+
+ of_node_put(np2);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ utdm->siram = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(utdm->siram)) {
+ ret = PTR_ERR(utdm->siram);
+ goto err_miss_siram_property;
+ }
+
+ if (siram_init_flag == 0) {
+ memset_io(utdm->siram, 0, res->end - res->start + 1);
+ siram_init_flag = 1;
+ }
+
+ return ret;
+
+err_miss_siram_property:
+ devm_iounmap(&pdev->dev, utdm->si_regs);
+ return ret;
+}
+
+void ucc_tdm_init(struct ucc_tdm *utdm, struct ucc_tdm_info *ut_info)
+{
+ struct si1 __iomem *si_regs;
+ u16 __iomem *siram;
+ u16 siram_entry_valid;
+ u16 siram_entry_closed;
+ u16 ucc_num;
+ u8 csel;
+ u16 sixmr;
+ u16 tdm_port;
+ u32 siram_entry_id;
+ u32 mask;
+ int i;
+
+ si_regs = utdm->si_regs;
+ siram = utdm->siram;
+ ucc_num = ut_info->uf_info.ucc_num;
+ tdm_port = utdm->tdm_port;
+ siram_entry_id = utdm->siram_entry_id;
+
+ if (utdm->tdm_framer_type == TDM_FRAMER_T1)
+ utdm->num_of_ts = 24;
+ if (utdm->tdm_framer_type == TDM_FRAMER_E1)
+ utdm->num_of_ts = 32;
+
+ /* set siram table */
+ csel = (ucc_num < 4) ? ucc_num + 9 : ucc_num - 3;
+
+ siram_entry_valid = SIR_CSEL(csel) | SIR_BYTE | SIR_CNT(0);
+ siram_entry_closed = SIR_IDLE | SIR_BYTE | SIR_CNT(0);
+
+ for (i = 0; i < utdm->num_of_ts; i++) {
+ mask = 0x01 << i;
+
+ if (utdm->tx_ts_mask & mask)
+ iowrite16be(siram_entry_valid,
+ &siram[siram_entry_id * 32 + i]);
+ else
+ iowrite16be(siram_entry_closed,
+ &siram[siram_entry_id * 32 + i]);
+
+ if (utdm->rx_ts_mask & mask)
+ iowrite16be(siram_entry_valid,
+ &siram[siram_entry_id * 32 + 0x200 + i]);
+ else
+ iowrite16be(siram_entry_closed,
+ &siram[siram_entry_id * 32 + 0x200 + i]);
+ }
+
+ setbits16(&siram[(siram_entry_id * 32) + (utdm->num_of_ts - 1)],
+ SIR_LAST);
+ setbits16(&siram[(siram_entry_id * 32) + 0x200 + (utdm->num_of_ts - 1)],
+ SIR_LAST);
+
+ /* Set SIxMR register */
+ sixmr = SIMR_SAD(siram_entry_id);
+
+ sixmr &= ~SIMR_SDM_MASK;
+
+ if (utdm->tdm_mode == TDM_INTERNAL_LOOPBACK)
+ sixmr |= SIMR_SDM_INTERNAL_LOOPBACK;
+ else
+ sixmr |= SIMR_SDM_NORMAL;
+
+ sixmr |= SIMR_RFSD(ut_info->si_info.simr_rfsd) |
+ SIMR_TFSD(ut_info->si_info.simr_tfsd);
+
+ if (ut_info->si_info.simr_crt)
+ sixmr |= SIMR_CRT;
+ if (ut_info->si_info.simr_sl)
+ sixmr |= SIMR_SL;
+ if (ut_info->si_info.simr_ce)
+ sixmr |= SIMR_CE;
+ if (ut_info->si_info.simr_fe)
+ sixmr |= SIMR_FE;
+ if (ut_info->si_info.simr_gm)
+ sixmr |= SIMR_GM;
+
+ switch (tdm_port) {
+ case 0:
+ iowrite16be(sixmr, &si_regs->sixmr1[0]);
+ break;
+ case 1:
+ iowrite16be(sixmr, &si_regs->sixmr1[1]);
+ break;
+ case 2:
+ iowrite16be(sixmr, &si_regs->sixmr1[2]);
+ break;
+ case 3:
+ iowrite16be(sixmr, &si_regs->sixmr1[3]);
+ break;
+ default:
+ pr_err("QE-TDM: can not find tdm sixmr reg\n");
+ break;
+ }
+}
diff --git a/drivers/soc/fsl/qe/ucc.c b/drivers/soc/fsl/qe/ucc.c
index b59d3358f..c646d8713 100644
--- a/drivers/soc/fsl/qe/ucc.c
+++ b/drivers/soc/fsl/qe/ucc.c
@@ -25,6 +25,12 @@
#include <soc/fsl/qe/qe.h>
#include <soc/fsl/qe/ucc.h>
+#define UCC_TDM_NUM 8
+#define RX_SYNC_SHIFT_BASE 30
+#define TX_SYNC_SHIFT_BASE 14
+#define RX_CLK_SHIFT_BASE 28
+#define TX_CLK_SHIFT_BASE 12
+
int ucc_set_qe_mux_mii_mng(unsigned int ucc_num)
{
unsigned long flags;
@@ -210,3 +216,447 @@ int ucc_set_qe_mux_rxtx(unsigned int ucc_num, enum qe_clock clock,
return 0;
}
+
+static int ucc_get_tdm_common_clk(u32 tdm_num, enum qe_clock clock)
+{
+ int clock_bits = -EINVAL;
+
+ /*
+ * for TDM[0, 1, 2, 3], TX and RX use common
+ * clock source BRG3,4 and CLK1,2
+ * for TDM[4, 5, 6, 7], TX and RX use common
+ * clock source BRG12,13 and CLK23,24
+ */
+ switch (tdm_num) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ switch (clock) {
+ case QE_BRG3:
+ clock_bits = 1;
+ break;
+ case QE_BRG4:
+ clock_bits = 2;
+ break;
+ case QE_CLK1:
+ clock_bits = 4;
+ break;
+ case QE_CLK2:
+ clock_bits = 5;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ switch (clock) {
+ case QE_BRG12:
+ clock_bits = 1;
+ break;
+ case QE_BRG13:
+ clock_bits = 2;
+ break;
+ case QE_CLK23:
+ clock_bits = 4;
+ break;
+ case QE_CLK24:
+ clock_bits = 5;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return clock_bits;
+}
+
+static int ucc_get_tdm_rx_clk(u32 tdm_num, enum qe_clock clock)
+{
+ int clock_bits = -EINVAL;
+
+ switch (tdm_num) {
+ case 0:
+ switch (clock) {
+ case QE_CLK3:
+ clock_bits = 6;
+ break;
+ case QE_CLK8:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 1:
+ switch (clock) {
+ case QE_CLK5:
+ clock_bits = 6;
+ break;
+ case QE_CLK10:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 2:
+ switch (clock) {
+ case QE_CLK7:
+ clock_bits = 6;
+ break;
+ case QE_CLK12:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 3:
+ switch (clock) {
+ case QE_CLK9:
+ clock_bits = 6;
+ break;
+ case QE_CLK14:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 4:
+ switch (clock) {
+ case QE_CLK11:
+ clock_bits = 6;
+ break;
+ case QE_CLK16:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 5:
+ switch (clock) {
+ case QE_CLK13:
+ clock_bits = 6;
+ break;
+ case QE_CLK18:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 6:
+ switch (clock) {
+ case QE_CLK15:
+ clock_bits = 6;
+ break;
+ case QE_CLK20:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 7:
+ switch (clock) {
+ case QE_CLK17:
+ clock_bits = 6;
+ break;
+ case QE_CLK22:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+
+ return clock_bits;
+}
+
+static int ucc_get_tdm_tx_clk(u32 tdm_num, enum qe_clock clock)
+{
+ int clock_bits = -EINVAL;
+
+ switch (tdm_num) {
+ case 0:
+ switch (clock) {
+ case QE_CLK4:
+ clock_bits = 6;
+ break;
+ case QE_CLK9:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 1:
+ switch (clock) {
+ case QE_CLK6:
+ clock_bits = 6;
+ break;
+ case QE_CLK11:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 2:
+ switch (clock) {
+ case QE_CLK8:
+ clock_bits = 6;
+ break;
+ case QE_CLK13:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 3:
+ switch (clock) {
+ case QE_CLK10:
+ clock_bits = 6;
+ break;
+ case QE_CLK15:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 4:
+ switch (clock) {
+ case QE_CLK12:
+ clock_bits = 6;
+ break;
+ case QE_CLK17:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 5:
+ switch (clock) {
+ case QE_CLK14:
+ clock_bits = 6;
+ break;
+ case QE_CLK19:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 6:
+ switch (clock) {
+ case QE_CLK16:
+ clock_bits = 6;
+ break;
+ case QE_CLK21:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 7:
+ switch (clock) {
+ case QE_CLK18:
+ clock_bits = 6;
+ break;
+ case QE_CLK3:
+ clock_bits = 7;
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+
+ return clock_bits;
+}
+
+/* tdm_num: TDM A-H port num is 0-7 */
+static int ucc_get_tdm_rxtx_clk(enum comm_dir mode, u32 tdm_num,
+ enum qe_clock clock)
+{
+ int clock_bits;
+
+ clock_bits = ucc_get_tdm_common_clk(tdm_num, clock);
+ if (clock_bits > 0)
+ return clock_bits;
+ if (mode == COMM_DIR_RX)
+ clock_bits = ucc_get_tdm_rx_clk(tdm_num, clock);
+ if (mode == COMM_DIR_TX)
+ clock_bits = ucc_get_tdm_tx_clk(tdm_num, clock);
+ return clock_bits;
+}
+
+static u32 ucc_get_tdm_clk_shift(enum comm_dir mode, u32 tdm_num)
+{
+ u32 shift;
+
+ shift = (mode == COMM_DIR_RX) ? RX_CLK_SHIFT_BASE : TX_CLK_SHIFT_BASE;
+ if (tdm_num < 4)
+ shift -= tdm_num * 4;
+ else
+ shift -= (tdm_num - 4) * 4;
+
+ return shift;
+}
+
+int ucc_set_tdm_rxtx_clk(u32 tdm_num, enum qe_clock clock,
+ enum comm_dir mode)
+{
+ int clock_bits;
+ u32 shift;
+ struct qe_mux __iomem *qe_mux_reg;
+ __be32 __iomem *cmxs1cr;
+
+ qe_mux_reg = &qe_immr->qmx;
+
+ if (tdm_num > 7 || tdm_num < 0)
+ return -EINVAL;
+
+ /* The communications direction must be RX or TX */
+ if (mode != COMM_DIR_RX && mode != COMM_DIR_TX)
+ return -EINVAL;
+
+ clock_bits = ucc_get_tdm_rxtx_clk(mode, tdm_num, clock);
+ if (clock_bits < 0)
+ return -EINVAL;
+
+ shift = ucc_get_tdm_clk_shift(mode, tdm_num);
+
+ cmxs1cr = (tdm_num < 4) ? &qe_mux_reg->cmxsi1cr_l :
+ &qe_mux_reg->cmxsi1cr_h;
+
+ qe_clrsetbits32(cmxs1cr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
+ clock_bits << shift);
+
+ return 0;
+}
+
+static int ucc_get_tdm_sync_source(u32 tdm_num, enum qe_clock clock,
+ enum comm_dir mode)
+{
+ int source = -EINVAL;
+
+ if (mode == COMM_DIR_RX && clock == QE_RSYNC_PIN) {
+ source = 0;
+ return source;
+ }
+ if (mode == COMM_DIR_TX && clock == QE_TSYNC_PIN) {
+ source = 0;
+ return source;
+ }
+
+ switch (tdm_num) {
+ case 0:
+ case 1:
+ switch (clock) {
+ case QE_BRG9:
+ source = 1;
+ break;
+ case QE_BRG10:
+ source = 2;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 2:
+ case 3:
+ switch (clock) {
+ case QE_BRG9:
+ source = 1;
+ break;
+ case QE_BRG11:
+ source = 2;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 4:
+ case 5:
+ switch (clock) {
+ case QE_BRG13:
+ source = 1;
+ break;
+ case QE_BRG14:
+ source = 2;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 6:
+ case 7:
+ switch (clock) {
+ case QE_BRG13:
+ source = 1;
+ break;
+ case QE_BRG15:
+ source = 2;
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+
+ return source;
+}
+
+static u32 ucc_get_tdm_sync_shift(enum comm_dir mode, u32 tdm_num)
+{
+ u32 shift;
+
+ shift = (mode == COMM_DIR_RX) ? RX_SYNC_SHIFT_BASE : RX_SYNC_SHIFT_BASE;
+ shift -= tdm_num * 2;
+
+ return shift;
+}
+
+int ucc_set_tdm_rxtx_sync(u32 tdm_num, enum qe_clock clock,
+ enum comm_dir mode)
+{
+ int source;
+ u32 shift;
+ struct qe_mux *qe_mux_reg;
+
+ qe_mux_reg = &qe_immr->qmx;
+
+ if (tdm_num >= UCC_TDM_NUM)
+ return -EINVAL;
+
+ /* The communications direction must be RX or TX */
+ if (mode != COMM_DIR_RX && mode != COMM_DIR_TX)
+ return -EINVAL;
+
+ source = ucc_get_tdm_sync_source(tdm_num, clock, mode);
+ if (source < 0)
+ return -EINVAL;
+
+ shift = ucc_get_tdm_sync_shift(mode, tdm_num);
+
+ qe_clrsetbits32(&qe_mux_reg->cmxsi1syr,
+ QE_CMXUCR_TX_CLK_SRC_MASK << shift,
+ source << shift);
+
+ return 0;
+}
diff --git a/drivers/soc/fsl/qe/ucc_fast.c b/drivers/soc/fsl/qe/ucc_fast.c
index a7689310f..83d8d16e3 100644
--- a/drivers/soc/fsl/qe/ucc_fast.c
+++ b/drivers/soc/fsl/qe/ucc_fast.c
@@ -327,6 +327,42 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
ucc_fast_free(uccf);
return -EINVAL;
}
+ } else {
+ /* tdm Rx clock routing */
+ if ((uf_info->rx_clock != QE_CLK_NONE) &&
+ ucc_set_tdm_rxtx_clk(uf_info->tdm_num, uf_info->rx_clock,
+ COMM_DIR_RX)) {
+ pr_err("%s: illegal value for RX clock", __func__);
+ ucc_fast_free(uccf);
+ return -EINVAL;
+ }
+
+ /* tdm Tx clock routing */
+ if ((uf_info->tx_clock != QE_CLK_NONE) &&
+ ucc_set_tdm_rxtx_clk(uf_info->tdm_num, uf_info->tx_clock,
+ COMM_DIR_TX)) {
+ pr_err("%s: illegal value for TX clock", __func__);
+ ucc_fast_free(uccf);
+ return -EINVAL;
+ }
+
+ /* tdm Rx sync clock routing */
+ if ((uf_info->rx_sync != QE_CLK_NONE) &&
+ ucc_set_tdm_rxtx_sync(uf_info->tdm_num, uf_info->rx_sync,
+ COMM_DIR_RX)) {
+ pr_err("%s: illegal value for RX clock", __func__);
+ ucc_fast_free(uccf);
+ return -EINVAL;
+ }
+
+ /* tdm Tx sync clock routing */
+ if ((uf_info->tx_sync != QE_CLK_NONE) &&
+ ucc_set_tdm_rxtx_sync(uf_info->tdm_num, uf_info->tx_sync,
+ COMM_DIR_TX)) {
+ pr_err("%s: illegal value for TX clock", __func__);
+ ucc_fast_free(uccf);
+ return -EINVAL;
+ }
}
/* Set interrupt mask register at UCC level. */
diff --git a/drivers/soc/qcom/smem_state.c b/drivers/soc/qcom/smem_state.c
index 54261decb..d5437ca76 100644
--- a/drivers/soc/qcom/smem_state.c
+++ b/drivers/soc/qcom/smem_state.c
@@ -104,26 +104,26 @@ struct qcom_smem_state *qcom_smem_state_get(struct device *dev,
if (con_id) {
index = of_property_match_string(dev->of_node,
- "qcom,state-names",
+ "qcom,smem-state-names",
con_id);
if (index < 0) {
- dev_err(dev, "missing qcom,state-names\n");
+ dev_err(dev, "missing qcom,smem-state-names\n");
return ERR_PTR(index);
}
}
ret = of_parse_phandle_with_args(dev->of_node,
- "qcom,state",
- "#qcom,state-cells",
+ "qcom,smem-states",
+ "#qcom,smem-state-cells",
index,
&args);
if (ret) {
- dev_err(dev, "failed to parse qcom,state property\n");
+ dev_err(dev, "failed to parse qcom,smem-states property\n");
return ERR_PTR(ret);
}
if (args.args_count != 1) {
- dev_err(dev, "invalid #qcom,state-cells\n");
+ dev_err(dev, "invalid #qcom,smem-state-cells\n");
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
index 9c2788b8f..f51fb2ea7 100644
--- a/drivers/soc/qcom/smp2p.c
+++ b/drivers/soc/qcom/smp2p.c
@@ -196,7 +196,7 @@ static irqreturn_t qcom_smp2p_intr(int irq, void *data)
/* Match newly created entries */
for (i = smp2p->valid_entries; i < in->valid_entries; i++) {
list_for_each_entry(entry, &smp2p->inbound, node) {
- memcpy_fromio(buf, in->entries[i].name, sizeof(buf));
+ memcpy(buf, in->entries[i].name, sizeof(buf));
if (!strcmp(buf, entry->name)) {
entry->value = &in->entries[i].value;
break;
@@ -343,7 +343,7 @@ static int qcom_smp2p_outbound_entry(struct qcom_smp2p *smp2p,
/* Allocate an entry from the smem item */
strlcpy(buf, entry->name, SMP2P_MAX_ENTRY_NAME);
- memcpy_toio(out->entries[out->valid_entries].name, buf, SMP2P_MAX_ENTRY_NAME);
+ memcpy(out->entries[out->valid_entries].name, buf, SMP2P_MAX_ENTRY_NAME);
/* Make the logical entry reference the physical value */
entry->value = &out->entries[out->valid_entries].value;
diff --git a/drivers/soc/qcom/smsm.c b/drivers/soc/qcom/smsm.c
index 6b777af1b..d0337b2a7 100644
--- a/drivers/soc/qcom/smsm.c
+++ b/drivers/soc/qcom/smsm.c
@@ -495,7 +495,7 @@ static int qcom_smsm_probe(struct platform_device *pdev)
if (!smsm->hosts)
return -ENOMEM;
- local_node = of_find_node_with_property(pdev->dev.of_node, "#qcom,state-cells");
+ local_node = of_find_node_with_property(pdev->dev.of_node, "#qcom,smem-state-cells");
if (!local_node) {
dev_err(&pdev->dev, "no state entry\n");
return -EINVAL;
diff --git a/drivers/soc/qcom/wcnss_ctrl.c b/drivers/soc/qcom/wcnss_ctrl.c
index f633dcc96..859445eb9 100644
--- a/drivers/soc/qcom/wcnss_ctrl.c
+++ b/drivers/soc/qcom/wcnss_ctrl.c
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2016, Linaro Ltd.
* Copyright (c) 2015, Sony Mobile Communications Inc.
*
* This program is free software; you can redistribute it and/or modify
@@ -14,8 +15,16 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/soc/qcom/smd.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/soc/qcom/wcnss_ctrl.h>
#define WCNSS_REQUEST_TIMEOUT (5 * HZ)
+#define WCNSS_CBC_TIMEOUT (10 * HZ)
+
+#define WCNSS_ACK_DONE_BOOTING 1
+#define WCNSS_ACK_COLD_BOOTING 2
#define NV_FRAGMENT_SIZE 3072
#define NVBIN_FILE "/*(DEBLOBBED)*/"
@@ -25,17 +34,19 @@
* @dev: device handle
* @channel: SMD channel handle
* @ack: completion for outstanding requests
+ * @cbc: completion for cbc complete indication
* @ack_status: status of the outstanding request
- * @download_nv_work: worker for uploading nv binary
+ * @probe_work: worker for uploading nv binary
*/
struct wcnss_ctrl {
struct device *dev;
struct qcom_smd_channel *channel;
struct completion ack;
+ struct completion cbc;
int ack_status;
- struct work_struct download_nv_work;
+ struct work_struct probe_work;
};
/* message types */
@@ -48,6 +59,11 @@ enum {
WCNSS_UPLOAD_CAL_RESP,
WCNSS_DOWNLOAD_CAL_REQ,
WCNSS_DOWNLOAD_CAL_RESP,
+ WCNSS_VBAT_LEVEL_IND,
+ WCNSS_BUILD_VERSION_REQ,
+ WCNSS_BUILD_VERSION_RESP,
+ WCNSS_PM_CONFIG_REQ,
+ WCNSS_CBC_COMPLETE_IND,
};
/**
@@ -128,7 +144,7 @@ static int wcnss_ctrl_smd_callback(struct qcom_smd_channel *channel,
version->major, version->minor,
version->version, version->revision);
- schedule_work(&wcnss->download_nv_work);
+ complete(&wcnss->ack);
break;
case WCNSS_DOWNLOAD_NV_RESP:
if (count != sizeof(*nvresp)) {
@@ -141,6 +157,10 @@ static int wcnss_ctrl_smd_callback(struct qcom_smd_channel *channel,
wcnss->ack_status = nvresp->status;
complete(&wcnss->ack);
break;
+ case WCNSS_CBC_COMPLETE_IND:
+ dev_dbg(wcnss->dev, "cold boot complete\n");
+ complete(&wcnss->cbc);
+ break;
default:
dev_info(wcnss->dev, "unknown message type %d\n", hdr->type);
break;
@@ -156,20 +176,32 @@ static int wcnss_ctrl_smd_callback(struct qcom_smd_channel *channel,
static int wcnss_request_version(struct wcnss_ctrl *wcnss)
{
struct wcnss_msg_hdr msg;
+ int ret;
msg.type = WCNSS_VERSION_REQ;
msg.len = sizeof(msg);
+ ret = qcom_smd_send(wcnss->channel, &msg, sizeof(msg));
+ if (ret < 0)
+ return ret;
+
+ ret = wait_for_completion_timeout(&wcnss->ack, WCNSS_CBC_TIMEOUT);
+ if (!ret) {
+ dev_err(wcnss->dev, "timeout waiting for version response\n");
+ return -ETIMEDOUT;
+ }
- return qcom_smd_send(wcnss->channel, &msg, sizeof(msg));
+ return 0;
}
/**
* wcnss_download_nv() - send nv binary to WCNSS
- * @work: work struct to acquire wcnss context
+ * @wcnss: wcnss_ctrl state handle
+ * @expect_cbc: indicator to caller that an cbc event is expected
+ *
+ * Returns 0 on success. Negative errno on failure.
*/
-static void wcnss_download_nv(struct work_struct *work)
+static int wcnss_download_nv(struct wcnss_ctrl *wcnss, bool *expect_cbc)
{
- struct wcnss_ctrl *wcnss = container_of(work, struct wcnss_ctrl, download_nv_work);
struct wcnss_download_nv_req *req;
const struct firmware *fw;
const void *data;
@@ -178,10 +210,10 @@ static void wcnss_download_nv(struct work_struct *work)
req = kzalloc(sizeof(*req) + NV_FRAGMENT_SIZE, GFP_KERNEL);
if (!req)
- return;
+ return -ENOMEM;
ret = reject_firmware(&fw, NVBIN_FILE, wcnss->dev);
- if (ret) {
+ if (ret < 0) {
dev_err(wcnss->dev, "Failed to load nv file %s: %d\n",
NVBIN_FILE, ret);
goto free_req;
@@ -207,7 +239,7 @@ static void wcnss_download_nv(struct work_struct *work)
memcpy(req->fragment, data, req->frag_size);
ret = qcom_smd_send(wcnss->channel, req, req->hdr.len);
- if (ret) {
+ if (ret < 0) {
dev_err(wcnss->dev, "failed to send smd packet\n");
goto release_fw;
}
@@ -220,16 +252,58 @@ static void wcnss_download_nv(struct work_struct *work)
} while (left > 0);
ret = wait_for_completion_timeout(&wcnss->ack, WCNSS_REQUEST_TIMEOUT);
- if (!ret)
+ if (!ret) {
dev_err(wcnss->dev, "timeout waiting for nv upload ack\n");
- else if (wcnss->ack_status != 1)
- dev_err(wcnss->dev, "nv upload response failed err: %d\n",
- wcnss->ack_status);
+ ret = -ETIMEDOUT;
+ } else {
+ *expect_cbc = wcnss->ack_status == WCNSS_ACK_COLD_BOOTING;
+ ret = 0;
+ }
release_fw:
release_firmware(fw);
free_req:
kfree(req);
+
+ return ret;
+}
+
+/**
+ * qcom_wcnss_open_channel() - open additional SMD channel to WCNSS
+ * @wcnss: wcnss handle, retrieved from drvdata
+ * @name: SMD channel name
+ * @cb: callback to handle incoming data on the channel
+ */
+struct qcom_smd_channel *qcom_wcnss_open_channel(void *wcnss, const char *name, qcom_smd_cb_t cb)
+{
+ struct wcnss_ctrl *_wcnss = wcnss;
+
+ return qcom_smd_open_channel(_wcnss->channel, name, cb);
+}
+EXPORT_SYMBOL(qcom_wcnss_open_channel);
+
+static void wcnss_async_probe(struct work_struct *work)
+{
+ struct wcnss_ctrl *wcnss = container_of(work, struct wcnss_ctrl, probe_work);
+ bool expect_cbc;
+ int ret;
+
+ ret = wcnss_request_version(wcnss);
+ if (ret < 0)
+ return;
+
+ ret = wcnss_download_nv(wcnss, &expect_cbc);
+ if (ret < 0)
+ return;
+
+ /* Wait for pending cold boot completion if indicated by the nv downloader */
+ if (expect_cbc) {
+ ret = wait_for_completion_timeout(&wcnss->cbc, WCNSS_REQUEST_TIMEOUT);
+ if (!ret)
+ dev_err(wcnss->dev, "expected cold boot completion\n");
+ }
+
+ of_platform_populate(wcnss->dev->of_node, NULL, NULL, wcnss->dev);
}
static int wcnss_ctrl_probe(struct qcom_smd_device *sdev)
@@ -244,25 +318,38 @@ static int wcnss_ctrl_probe(struct qcom_smd_device *sdev)
wcnss->channel = sdev->channel;
init_completion(&wcnss->ack);
- INIT_WORK(&wcnss->download_nv_work, wcnss_download_nv);
+ init_completion(&wcnss->cbc);
+ INIT_WORK(&wcnss->probe_work, wcnss_async_probe);
qcom_smd_set_drvdata(sdev->channel, wcnss);
+ dev_set_drvdata(&sdev->dev, wcnss);
+
+ schedule_work(&wcnss->probe_work);
+
+ return 0;
+}
+
+static void wcnss_ctrl_remove(struct qcom_smd_device *sdev)
+{
+ struct wcnss_ctrl *wcnss = qcom_smd_get_drvdata(sdev->channel);
- return wcnss_request_version(wcnss);
+ cancel_work_sync(&wcnss->probe_work);
+ of_platform_depopulate(&sdev->dev);
}
-static const struct qcom_smd_id wcnss_ctrl_smd_match[] = {
- { .name = "WCNSS_CTRL" },
+static const struct of_device_id wcnss_ctrl_of_match[] = {
+ { .compatible = "qcom,wcnss", },
{}
};
static struct qcom_smd_driver wcnss_ctrl_driver = {
.probe = wcnss_ctrl_probe,
+ .remove = wcnss_ctrl_remove,
.callback = wcnss_ctrl_smd_callback,
- .smd_match_table = wcnss_ctrl_smd_match,
.driver = {
.name = "qcom_wcnss_ctrl",
.owner = THIS_MODULE,
+ .of_match_table = wcnss_ctrl_of_match,
},
};
diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile
index 151fcd3f0..623039c35 100644
--- a/drivers/soc/renesas/Makefile
+++ b/drivers/soc/renesas/Makefile
@@ -1,7 +1,9 @@
obj-$(CONFIG_ARCH_R8A7779) += rcar-sysc.o r8a7779-sysc.o
obj-$(CONFIG_ARCH_R8A7790) += rcar-sysc.o r8a7790-sysc.o
obj-$(CONFIG_ARCH_R8A7791) += rcar-sysc.o r8a7791-sysc.o
+obj-$(CONFIG_ARCH_R8A7792) += rcar-sysc.o r8a7792-sysc.o
# R-Car M2-N is identical to R-Car M2-W w.r.t. power domains.
obj-$(CONFIG_ARCH_R8A7793) += rcar-sysc.o r8a7791-sysc.o
obj-$(CONFIG_ARCH_R8A7794) += rcar-sysc.o r8a7794-sysc.o
obj-$(CONFIG_ARCH_R8A7795) += rcar-sysc.o r8a7795-sysc.o
+obj-$(CONFIG_ARCH_R8A7796) += rcar-sysc.o r8a7796-sysc.o
diff --git a/drivers/soc/renesas/r8a7792-sysc.c b/drivers/soc/renesas/r8a7792-sysc.c
new file mode 100644
index 000000000..ca7467d7b
--- /dev/null
+++ b/drivers/soc/renesas/r8a7792-sysc.c
@@ -0,0 +1,34 @@
+/*
+ * Renesas R-Car V2H (R8A7792) System Controller
+ *
+ * Copyright (C) 2016 Cogent Embedded Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/bug.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a7792-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a7792_areas[] __initconst = {
+ { "always-on", 0, 0, R8A7792_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "ca15-scu", 0x180, 0, R8A7792_PD_CA15_SCU, R8A7792_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca15-cpu0", 0x40, 0, R8A7792_PD_CA15_CPU0, R8A7792_PD_CA15_SCU,
+ PD_CPU_NOCR },
+ { "ca15-cpu1", 0x40, 1, R8A7792_PD_CA15_CPU1, R8A7792_PD_CA15_SCU,
+ PD_CPU_NOCR },
+ { "sgx", 0xc0, 0, R8A7792_PD_SGX, R8A7792_PD_ALWAYS_ON },
+ { "imp", 0x140, 0, R8A7792_PD_IMP, R8A7792_PD_ALWAYS_ON },
+};
+
+const struct rcar_sysc_info r8a7792_sysc_info __initconst = {
+ .areas = r8a7792_areas,
+ .num_areas = ARRAY_SIZE(r8a7792_areas),
+};
diff --git a/drivers/soc/renesas/r8a7796-sysc.c b/drivers/soc/renesas/r8a7796-sysc.c
new file mode 100644
index 000000000..f700c842b
--- /dev/null
+++ b/drivers/soc/renesas/r8a7796-sysc.c
@@ -0,0 +1,48 @@
+/*
+ * Renesas R-Car M3-W System Controller
+ *
+ * Copyright (C) 2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a7796-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a7796_areas[] __initconst = {
+ { "always-on", 0, 0, R8A7796_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "ca57-scu", 0x1c0, 0, R8A7796_PD_CA57_SCU, R8A7796_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca57-cpu0", 0x80, 0, R8A7796_PD_CA57_CPU0, R8A7796_PD_CA57_SCU,
+ PD_CPU_NOCR },
+ { "ca57-cpu1", 0x80, 1, R8A7796_PD_CA57_CPU1, R8A7796_PD_CA57_SCU,
+ PD_CPU_NOCR },
+ { "ca53-scu", 0x140, 0, R8A7796_PD_CA53_SCU, R8A7796_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca53-cpu0", 0x200, 0, R8A7796_PD_CA53_CPU0, R8A7796_PD_CA53_SCU,
+ PD_CPU_NOCR },
+ { "ca53-cpu1", 0x200, 1, R8A7796_PD_CA53_CPU1, R8A7796_PD_CA53_SCU,
+ PD_CPU_NOCR },
+ { "ca53-cpu2", 0x200, 2, R8A7796_PD_CA53_CPU2, R8A7796_PD_CA53_SCU,
+ PD_CPU_NOCR },
+ { "ca53-cpu3", 0x200, 3, R8A7796_PD_CA53_CPU3, R8A7796_PD_CA53_SCU,
+ PD_CPU_NOCR },
+ { "cr7", 0x240, 0, R8A7796_PD_CR7, R8A7796_PD_ALWAYS_ON },
+ { "a3vc", 0x380, 0, R8A7796_PD_A3VC, R8A7796_PD_ALWAYS_ON },
+ { "a2vc0", 0x3c0, 0, R8A7796_PD_A2VC0, R8A7796_PD_A3VC },
+ { "a2vc1", 0x3c0, 1, R8A7796_PD_A2VC1, R8A7796_PD_A3VC },
+ { "3dg-a", 0x100, 0, R8A7796_PD_3DG_A, R8A7796_PD_ALWAYS_ON },
+ { "3dg-b", 0x100, 1, R8A7796_PD_3DG_B, R8A7796_PD_3DG_A },
+ { "a3ir", 0x180, 0, R8A7796_PD_A3IR, R8A7796_PD_ALWAYS_ON },
+};
+
+const struct rcar_sysc_info r8a7796_sysc_info __initconst = {
+ .areas = r8a7796_areas,
+ .num_areas = ARRAY_SIZE(r8a7796_areas),
+};
diff --git a/drivers/soc/renesas/rcar-sysc.c b/drivers/soc/renesas/rcar-sysc.c
index 79dbc7708..65c8e1eb9 100644
--- a/drivers/soc/renesas/rcar-sysc.c
+++ b/drivers/soc/renesas/rcar-sysc.c
@@ -164,15 +164,6 @@ static bool rcar_sysc_power_is_off(const struct rcar_sysc_ch *sysc_ch)
return false;
}
-void __iomem *rcar_sysc_init(phys_addr_t base)
-{
- rcar_sysc_base = ioremap_nocache(base, PAGE_SIZE);
- if (!rcar_sysc_base)
- panic("unable to ioremap R-Car SYSC hardware block\n");
-
- return rcar_sysc_base;
-}
-
struct rcar_sysc_pd {
struct generic_pm_domain genpd;
struct rcar_sysc_ch ch;
@@ -293,6 +284,9 @@ static const struct of_device_id rcar_sysc_matches[] = {
#ifdef CONFIG_ARCH_R8A7791
{ .compatible = "renesas,r8a7791-sysc", .data = &r8a7791_sysc_info },
#endif
+#ifdef CONFIG_ARCH_R8A7792
+ { .compatible = "renesas,r8a7792-sysc", .data = &r8a7792_sysc_info },
+#endif
#ifdef CONFIG_ARCH_R8A7793
/* R-Car M2-N is identical to R-Car M2-W w.r.t. power domains. */
{ .compatible = "renesas,r8a7793-sysc", .data = &r8a7791_sysc_info },
@@ -303,6 +297,9 @@ static const struct of_device_id rcar_sysc_matches[] = {
#ifdef CONFIG_ARCH_R8A7795
{ .compatible = "renesas,r8a7795-sysc", .data = &r8a7795_sysc_info },
#endif
+#ifdef CONFIG_ARCH_R8A7796
+ { .compatible = "renesas,r8a7796-sysc", .data = &r8a7796_sysc_info },
+#endif
{ /* sentinel */ }
};
@@ -322,6 +319,9 @@ static int __init rcar_sysc_pd_init(void)
unsigned int i;
int error;
+ if (rcar_sysc_base)
+ return 0;
+
np = of_find_matching_node_and_match(NULL, rcar_sysc_matches, &match);
if (!np)
return -ENODEV;
@@ -392,10 +392,35 @@ static int __init rcar_sysc_pd_init(void)
domains->domains[area->isr_bit] = &pd->genpd;
}
- of_genpd_add_provider_onecell(np, &domains->onecell_data);
+ error = of_genpd_add_provider_onecell(np, &domains->onecell_data);
out_put:
of_node_put(np);
return error;
}
early_initcall(rcar_sysc_pd_init);
+
+void __init rcar_sysc_init(phys_addr_t base, u32 syscier)
+{
+ u32 syscimr;
+
+ if (!rcar_sysc_pd_init())
+ return;
+
+ rcar_sysc_base = ioremap_nocache(base, PAGE_SIZE);
+
+ /*
+ * Mask all interrupt sources to prevent the CPU from receiving them.
+ * Make sure not to clear reserved bits that were set before.
+ */
+ syscimr = ioread32(rcar_sysc_base + SYSCIMR);
+ syscimr |= syscier;
+ pr_debug("%s: syscimr = 0x%08x\n", __func__, syscimr);
+ iowrite32(syscimr, rcar_sysc_base + SYSCIMR);
+
+ /*
+ * SYSC needs all interrupt sources enabled to control power.
+ */
+ pr_debug("%s: syscier = 0x%08x\n", __func__, syscier);
+ iowrite32(syscier, rcar_sysc_base + SYSCIER);
+}
diff --git a/drivers/soc/renesas/rcar-sysc.h b/drivers/soc/renesas/rcar-sysc.h
index 5e766174c..77dbe8614 100644
--- a/drivers/soc/renesas/rcar-sysc.h
+++ b/drivers/soc/renesas/rcar-sysc.h
@@ -53,6 +53,8 @@ struct rcar_sysc_info {
extern const struct rcar_sysc_info r8a7779_sysc_info;
extern const struct rcar_sysc_info r8a7790_sysc_info;
extern const struct rcar_sysc_info r8a7791_sysc_info;
+extern const struct rcar_sysc_info r8a7792_sysc_info;
extern const struct rcar_sysc_info r8a7794_sysc_info;
extern const struct rcar_sysc_info r8a7795_sysc_info;
+extern const struct rcar_sysc_info r8a7796_sysc_info;
#endif /* __SOC_RENESAS_RCAR_SYSC_H__ */
diff --git a/drivers/soc/samsung/Kconfig b/drivers/soc/samsung/Kconfig
index d7fc12300..245533907 100644
--- a/drivers/soc/samsung/Kconfig
+++ b/drivers/soc/samsung/Kconfig
@@ -10,4 +10,8 @@ config EXYNOS_PMU
bool "Exynos PMU controller driver" if COMPILE_TEST
depends on (ARM && ARCH_EXYNOS) || ((ARM || ARM64) && COMPILE_TEST)
+config EXYNOS_PM_DOMAINS
+ bool "Exynos PM domains" if COMPILE_TEST
+ depends on PM_GENERIC_DOMAINS || COMPILE_TEST
+
endif
diff --git a/drivers/soc/samsung/Makefile b/drivers/soc/samsung/Makefile
index f64ac4d80..3619f2ecd 100644
--- a/drivers/soc/samsung/Makefile
+++ b/drivers/soc/samsung/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_EXYNOS_PMU) += exynos-pmu.o exynos3250-pmu.o exynos4-pmu.o \
exynos5250-pmu.o exynos5420-pmu.o
+obj-$(CONFIG_EXYNOS_PM_DOMAINS) += pm_domains.o
diff --git a/drivers/soc/samsung/exynos3250-pmu.c b/drivers/soc/samsung/exynos3250-pmu.c
index 20b3ab8aa..af2f54e14 100644
--- a/drivers/soc/samsung/exynos3250-pmu.c
+++ b/drivers/soc/samsung/exynos3250-pmu.c
@@ -14,7 +14,7 @@
#include "exynos-pmu.h"
-static struct exynos_pmu_conf exynos3250_pmu_config[] = {
+static const struct exynos_pmu_conf exynos3250_pmu_config[] = {
/* { .offset = offset, .val = { AFTR, W-AFTR, SLEEP } */
{ EXYNOS3_ARM_CORE0_SYS_PWR_REG, { 0x0, 0x0, 0x2} },
{ EXYNOS3_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
diff --git a/drivers/soc/samsung/exynos5420-pmu.c b/drivers/soc/samsung/exynos5420-pmu.c
index b962fb6a5..3f2c64180 100644
--- a/drivers/soc/samsung/exynos5420-pmu.c
+++ b/drivers/soc/samsung/exynos5420-pmu.c
@@ -17,7 +17,7 @@
#include "exynos-pmu.h"
-static struct exynos_pmu_conf exynos5420_pmu_config[] = {
+static const struct exynos_pmu_conf exynos5420_pmu_config[] = {
/* { .offset = offset, .val = { AFTR, LPA, SLEEP } */
{ EXYNOS5_ARM_CORE0_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
{ EXYNOS5_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
diff --git a/drivers/soc/samsung/pm_domains.c b/drivers/soc/samsung/pm_domains.c
new file mode 100644
index 000000000..4822346aa
--- /dev/null
+++ b/drivers/soc/samsung/pm_domains.c
@@ -0,0 +1,245 @@
+/*
+ * Exynos Generic power domain support.
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Implementation of Exynos specific power domain control which is used in
+ * conjunction with runtime-pm. Support for both device-tree and non-device-tree
+ * based power domain support is included.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/pm_domain.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/sched.h>
+
+#define MAX_CLK_PER_DOMAIN 4
+
+struct exynos_pm_domain_config {
+ /* Value for LOCAL_PWR_CFG and STATUS fields for each domain */
+ u32 local_pwr_cfg;
+};
+
+/*
+ * Exynos specific wrapper around the generic power domain
+ */
+struct exynos_pm_domain {
+ void __iomem *base;
+ char const *name;
+ bool is_off;
+ struct generic_pm_domain pd;
+ struct clk *oscclk;
+ struct clk *clk[MAX_CLK_PER_DOMAIN];
+ struct clk *pclk[MAX_CLK_PER_DOMAIN];
+ struct clk *asb_clk[MAX_CLK_PER_DOMAIN];
+ u32 local_pwr_cfg;
+};
+
+static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
+{
+ struct exynos_pm_domain *pd;
+ void __iomem *base;
+ u32 timeout, pwr;
+ char *op;
+ int i;
+
+ pd = container_of(domain, struct exynos_pm_domain, pd);
+ base = pd->base;
+
+ for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
+ if (IS_ERR(pd->asb_clk[i]))
+ break;
+ clk_prepare_enable(pd->asb_clk[i]);
+ }
+
+ /* Set oscclk before powering off a domain*/
+ if (!power_on) {
+ for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
+ if (IS_ERR(pd->clk[i]))
+ break;
+ pd->pclk[i] = clk_get_parent(pd->clk[i]);
+ if (clk_set_parent(pd->clk[i], pd->oscclk))
+ pr_err("%s: error setting oscclk as parent to clock %d\n",
+ pd->name, i);
+ }
+ }
+
+ pwr = power_on ? pd->local_pwr_cfg : 0;
+ writel_relaxed(pwr, base);
+
+ /* Wait max 1ms */
+ timeout = 10;
+
+ while ((readl_relaxed(base + 0x4) & pd->local_pwr_cfg) != pwr) {
+ if (!timeout) {
+ op = (power_on) ? "enable" : "disable";
+ pr_err("Power domain %s %s failed\n", domain->name, op);
+ return -ETIMEDOUT;
+ }
+ timeout--;
+ cpu_relax();
+ usleep_range(80, 100);
+ }
+
+ /* Restore clocks after powering on a domain*/
+ if (power_on) {
+ for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
+ if (IS_ERR(pd->clk[i]))
+ break;
+
+ if (IS_ERR(pd->pclk[i]))
+ continue; /* Skip on first power up */
+ if (clk_set_parent(pd->clk[i], pd->pclk[i]))
+ pr_err("%s: error setting parent to clock%d\n",
+ pd->name, i);
+ }
+ }
+
+ for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
+ if (IS_ERR(pd->asb_clk[i]))
+ break;
+ clk_disable_unprepare(pd->asb_clk[i]);
+ }
+
+ return 0;
+}
+
+static int exynos_pd_power_on(struct generic_pm_domain *domain)
+{
+ return exynos_pd_power(domain, true);
+}
+
+static int exynos_pd_power_off(struct generic_pm_domain *domain)
+{
+ return exynos_pd_power(domain, false);
+}
+
+static const struct exynos_pm_domain_config exynos4210_cfg __initconst = {
+ .local_pwr_cfg = 0x7,
+};
+
+static const struct of_device_id exynos_pm_domain_of_match[] __initconst = {
+ {
+ .compatible = "samsung,exynos4210-pd",
+ .data = &exynos4210_cfg,
+ },
+ { },
+};
+
+static __init int exynos4_pm_init_power_domain(void)
+{
+ struct device_node *np;
+ const struct of_device_id *match;
+
+ for_each_matching_node_and_match(np, exynos_pm_domain_of_match, &match) {
+ const struct exynos_pm_domain_config *pm_domain_cfg;
+ struct exynos_pm_domain *pd;
+ int on, i;
+
+ pm_domain_cfg = match->data;
+
+ pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+ if (!pd) {
+ pr_err("%s: failed to allocate memory for domain\n",
+ __func__);
+ of_node_put(np);
+ return -ENOMEM;
+ }
+ pd->pd.name = kstrdup_const(strrchr(np->full_name, '/') + 1,
+ GFP_KERNEL);
+ if (!pd->pd.name) {
+ kfree(pd);
+ of_node_put(np);
+ return -ENOMEM;
+ }
+
+ pd->name = pd->pd.name;
+ pd->base = of_iomap(np, 0);
+ if (!pd->base) {
+ pr_warn("%s: failed to map memory\n", __func__);
+ kfree_const(pd->pd.name);
+ kfree(pd);
+ continue;
+ }
+
+ pd->pd.power_off = exynos_pd_power_off;
+ pd->pd.power_on = exynos_pd_power_on;
+ pd->local_pwr_cfg = pm_domain_cfg->local_pwr_cfg;
+
+ for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
+ char clk_name[8];
+
+ snprintf(clk_name, sizeof(clk_name), "asb%d", i);
+ pd->asb_clk[i] = of_clk_get_by_name(np, clk_name);
+ if (IS_ERR(pd->asb_clk[i]))
+ break;
+ }
+
+ pd->oscclk = of_clk_get_by_name(np, "oscclk");
+ if (IS_ERR(pd->oscclk))
+ goto no_clk;
+
+ for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
+ char clk_name[8];
+
+ snprintf(clk_name, sizeof(clk_name), "clk%d", i);
+ pd->clk[i] = of_clk_get_by_name(np, clk_name);
+ if (IS_ERR(pd->clk[i]))
+ break;
+ /*
+ * Skip setting parent on first power up.
+ * The parent at this time may not be useful at all.
+ */
+ pd->pclk[i] = ERR_PTR(-EINVAL);
+ }
+
+ if (IS_ERR(pd->clk[0]))
+ clk_put(pd->oscclk);
+
+no_clk:
+ on = readl_relaxed(pd->base + 0x4) & pd->local_pwr_cfg;
+
+ pm_genpd_init(&pd->pd, NULL, !on);
+ of_genpd_add_provider_simple(np, &pd->pd);
+ }
+
+ /* Assign the child power domains to their parents */
+ for_each_matching_node(np, exynos_pm_domain_of_match) {
+ struct generic_pm_domain *child_domain, *parent_domain;
+ struct of_phandle_args args;
+
+ args.np = np;
+ args.args_count = 0;
+ child_domain = of_genpd_get_from_provider(&args);
+ if (IS_ERR(child_domain))
+ continue;
+
+ if (of_parse_phandle_with_args(np, "power-domains",
+ "#power-domain-cells", 0, &args) != 0)
+ continue;
+
+ parent_domain = of_genpd_get_from_provider(&args);
+ if (IS_ERR(parent_domain))
+ continue;
+
+ if (pm_genpd_add_subdomain(parent_domain, child_domain))
+ pr_warn("%s failed to add subdomain: %s\n",
+ parent_domain->name, child_domain->name);
+ else
+ pr_info("%s has as child subdomain: %s.\n",
+ parent_domain->name, child_domain->name);
+ }
+
+ return 0;
+}
+core_initcall(exynos4_pm_init_power_domain);
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 5031c89b0..71c834f38 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -51,6 +51,7 @@
#define PMC_CNTRL_CPU_PWRREQ_POLARITY (1 << 15) /* CPU pwr req polarity */
#define PMC_CNTRL_CPU_PWRREQ_OE (1 << 16) /* CPU pwr req enable */
#define PMC_CNTRL_INTR_POLARITY (1 << 17) /* inverts INTR polarity */
+#define PMC_CNTRL_MAIN_RST (1 << 4)
#define DPD_SAMPLE 0x020
#define DPD_SAMPLE_ENABLE (1 << 0)
@@ -80,6 +81,14 @@
#define PMC_SENSOR_CTRL_SCRATCH_WRITE (1 << 2)
#define PMC_SENSOR_CTRL_ENABLE_RST (1 << 1)
+#define PMC_RST_STATUS 0x1b4
+#define PMC_RST_STATUS_POR 0
+#define PMC_RST_STATUS_WATCHDOG 1
+#define PMC_RST_STATUS_SENSOR 2
+#define PMC_RST_STATUS_SW_MAIN 3
+#define PMC_RST_STATUS_LP0 4
+#define PMC_RST_STATUS_AOTAG 5
+
#define IO_DPD_REQ 0x1b8
#define IO_DPD_REQ_CODE_IDLE (0 << 30)
#define IO_DPD_REQ_CODE_OFF (1 << 30)
@@ -399,6 +408,7 @@ static int tegra_powergate_power_up(struct tegra_powergate *pg,
disable_clks:
tegra_powergate_disable_clocks(pg);
usleep_range(10, 20);
+
powergate_off:
tegra_powergate_set(pg->id, false);
@@ -436,6 +446,7 @@ assert_resets:
usleep_range(10, 20);
tegra_powergate_reset_deassert(pg);
usleep_range(10, 20);
+
disable_clks:
tegra_powergate_disable_clocks(pg);
@@ -540,6 +551,9 @@ int tegra_powergate_sequence_power_up(unsigned int id, struct clk *clk,
struct tegra_powergate pg;
int err;
+ if (!tegra_powergate_is_available(id))
+ return -EINVAL;
+
pg.id = id;
pg.clks = &clk;
pg.num_clks = 1;
@@ -638,9 +652,10 @@ static int tegra_pmc_restart_notify(struct notifier_block *this,
tegra_pmc_writel(value, PMC_SCRATCH0);
- value = tegra_pmc_readl(0);
- value |= 0x10;
- tegra_pmc_writel(value, 0);
+ /* reset everything but PMC_SCRATCH0 and PMC_RST_STATUS */
+ value = tegra_pmc_readl(PMC_CNTRL);
+ value |= PMC_CNTRL_MAIN_RST;
+ tegra_pmc_writel(value, PMC_CNTRL);
return NOTIFY_DONE;
}
@@ -722,13 +737,14 @@ static int tegra_powergate_of_get_clks(struct tegra_powergate *pg,
err:
while (i--)
clk_put(pg->clks[i]);
+
kfree(pg->clks);
return err;
}
static int tegra_powergate_of_get_resets(struct tegra_powergate *pg,
- struct device_node *np)
+ struct device_node *np, bool off)
{
struct reset_control *rst;
unsigned int i, count;
@@ -748,6 +764,16 @@ static int tegra_powergate_of_get_resets(struct tegra_powergate *pg,
err = PTR_ERR(pg->resets[i]);
goto error;
}
+
+ if (off)
+ err = reset_control_assert(pg->resets[i]);
+ else
+ err = reset_control_deassert(pg->resets[i]);
+
+ if (err) {
+ reset_control_put(pg->resets[i]);
+ goto error;
+ }
}
pg->num_resets = count;
@@ -757,6 +783,7 @@ static int tegra_powergate_of_get_resets(struct tegra_powergate *pg,
error:
while (i--)
reset_control_put(pg->resets[i]);
+
kfree(pg->resets);
return err;
@@ -765,16 +792,19 @@ error:
static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
{
struct tegra_powergate *pg;
+ int id, err;
bool off;
- int id;
pg = kzalloc(sizeof(*pg), GFP_KERNEL);
if (!pg)
- goto error;
+ return;
id = tegra_powergate_lookup(pmc, np->name);
- if (id < 0)
+ if (id < 0) {
+ dev_err(pmc->dev, "powergate lookup failed for %s: %d\n",
+ np->name, id);
goto free_mem;
+ }
/*
* Clear the bit for this powergate so it cannot be managed
@@ -788,31 +818,64 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
pg->genpd.power_on = tegra_genpd_power_on;
pg->pmc = pmc;
- if (tegra_powergate_of_get_clks(pg, np))
+ off = !tegra_powergate_is_powered(pg->id);
+
+ err = tegra_powergate_of_get_clks(pg, np);
+ if (err < 0) {
+ dev_err(pmc->dev, "failed to get clocks for %s: %d\n",
+ np->name, err);
goto set_available;
+ }
- if (tegra_powergate_of_get_resets(pg, np))
+ err = tegra_powergate_of_get_resets(pg, np, off);
+ if (err < 0) {
+ dev_err(pmc->dev, "failed to get resets for %s: %d\n",
+ np->name, err);
goto remove_clks;
+ }
- off = !tegra_powergate_is_powered(pg->id);
+ if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS))
+ goto power_on_cleanup;
+
+ /*
+ * FIXME: If XHCI is enabled for Tegra, then power-up the XUSB
+ * host and super-speed partitions. Once the XHCI driver
+ * manages the partitions itself this code can be removed. Note
+ * that we don't register these partitions with the genpd core
+ * to avoid it from powering down the partitions as they appear
+ * to be unused.
+ */
+ if (IS_ENABLED(CONFIG_USB_XHCI_TEGRA) &&
+ (id == TEGRA_POWERGATE_XUSBA || id == TEGRA_POWERGATE_XUSBC))
+ goto power_on_cleanup;
pm_genpd_init(&pg->genpd, NULL, off);
- if (of_genpd_add_provider_simple(np, &pg->genpd))
+ err = of_genpd_add_provider_simple(np, &pg->genpd);
+ if (err < 0) {
+ dev_err(pmc->dev, "failed to add genpd provider for %s: %d\n",
+ np->name, err);
goto remove_resets;
+ }
dev_dbg(pmc->dev, "added power domain %s\n", pg->genpd.name);
return;
+power_on_cleanup:
+ if (off)
+ WARN_ON(tegra_powergate_power_up(pg, true));
+
remove_resets:
while (pg->num_resets--)
reset_control_put(pg->resets[pg->num_resets]);
+
kfree(pg->resets);
remove_clks:
while (pg->num_clks--)
clk_put(pg->clks[pg->num_clks]);
+
kfree(pg->clks);
set_available:
@@ -820,16 +883,20 @@ set_available:
free_mem:
kfree(pg);
-
-error:
- dev_err(pmc->dev, "failed to create power domain for %s\n", np->name);
}
-static void tegra_powergate_init(struct tegra_pmc *pmc)
+static void tegra_powergate_init(struct tegra_pmc *pmc,
+ struct device_node *parent)
{
struct device_node *np, *child;
+ unsigned int i;
- np = of_get_child_by_name(pmc->dev->of_node, "powergates");
+ /* Create a bitmap of the available and valid partitions */
+ for (i = 0; i < pmc->soc->num_powergates; i++)
+ if (pmc->soc->powergates[i])
+ set_bit(i, pmc->powergates_available);
+
+ np = of_get_child_by_name(parent, "powergates");
if (!np)
return;
@@ -1250,8 +1317,6 @@ static int tegra_pmc_probe(struct platform_device *pdev)
return err;
}
- tegra_powergate_init(pmc);
-
mutex_lock(&pmc->powergates_lock);
iounmap(pmc->base);
pmc->base = base;
@@ -1485,10 +1550,11 @@ static int __init tegra_pmc_early_init(void)
const struct of_device_id *match;
struct device_node *np;
struct resource regs;
- unsigned int i;
bool invert;
u32 value;
+ mutex_init(&pmc->powergates_lock);
+
np = of_find_matching_node_and_match(NULL, tegra_pmc_match, &match);
if (!np) {
/*
@@ -1523,39 +1589,40 @@ static int __init tegra_pmc_early_init(void)
*/
if (of_address_to_resource(np, 0, &regs) < 0) {
pr_err("failed to get PMC registers\n");
+ of_node_put(np);
return -ENXIO;
}
-
- pmc->soc = match->data;
}
pmc->base = ioremap_nocache(regs.start, resource_size(&regs));
if (!pmc->base) {
pr_err("failed to map PMC registers\n");
+ of_node_put(np);
return -ENXIO;
}
- /* Create a bit-map of the available and valid partitions */
- for (i = 0; i < pmc->soc->num_powergates; i++)
- if (pmc->soc->powergates[i])
- set_bit(i, pmc->powergates_available);
+ if (np) {
+ pmc->soc = match->data;
- mutex_init(&pmc->powergates_lock);
+ tegra_powergate_init(pmc, np);
- /*
- * Invert the interrupt polarity if a PMC device tree node exists and
- * contains the nvidia,invert-interrupt property.
- */
- invert = of_property_read_bool(np, "nvidia,invert-interrupt");
+ /*
+ * Invert the interrupt polarity if a PMC device tree node
+ * exists and contains the nvidia,invert-interrupt property.
+ */
+ invert = of_property_read_bool(np, "nvidia,invert-interrupt");
- value = tegra_pmc_readl(PMC_CNTRL);
+ value = tegra_pmc_readl(PMC_CNTRL);
- if (invert)
- value |= PMC_CNTRL_INTR_POLARITY;
- else
- value &= ~PMC_CNTRL_INTR_POLARITY;
+ if (invert)
+ value |= PMC_CNTRL_INTR_POLARITY;
+ else
+ value &= ~PMC_CNTRL_INTR_POLARITY;
- tegra_pmc_writel(value, PMC_CNTRL);
+ tegra_pmc_writel(value, PMC_CNTRL);
+
+ of_node_put(np);
+ }
return 0;
}
diff --git a/drivers/soc/ux500/Kconfig b/drivers/soc/ux500/Kconfig
new file mode 100644
index 000000000..025a44aef
--- /dev/null
+++ b/drivers/soc/ux500/Kconfig
@@ -0,0 +1,7 @@
+config UX500_SOC_ID
+ bool "SoC bus for ST-Ericsson ux500"
+ depends on ARCH_U8500 || COMPILE_TEST
+ default ARCH_U8500
+ help
+ Include support for the SoC bus on the ARM RealView platforms
+ providing some sysfs information about the ASIC variant.
diff --git a/drivers/soc/ux500/Makefile b/drivers/soc/ux500/Makefile
new file mode 100644
index 000000000..0b87ad04b
--- /dev/null
+++ b/drivers/soc/ux500/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_UX500_SOC_ID) += ux500-soc-id.o
diff --git a/drivers/soc/ux500/ux500-soc-id.c b/drivers/soc/ux500/ux500-soc-id.c
new file mode 100644
index 000000000..6c1be74e5
--- /dev/null
+++ b/drivers/soc/ux500/ux500-soc-id.c
@@ -0,0 +1,222 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/sys_soc.h>
+
+#include <asm/cputype.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+#include <asm/mach/map.h>
+
+/**
+ * struct dbx500_asic_id - fields of the ASIC ID
+ * @process: the manufacturing process, 0x40 is 40 nm 0x00 is "standard"
+ * @partnumber: hithereto 0x8500 for DB8500
+ * @revision: version code in the series
+ */
+struct dbx500_asic_id {
+ u16 partnumber;
+ u8 revision;
+ u8 process;
+};
+
+static struct dbx500_asic_id dbx500_id;
+
+static unsigned int __init ux500_read_asicid(phys_addr_t addr)
+{
+ void __iomem *virt = ioremap(addr, 4);
+ unsigned int asicid;
+
+ if (!virt)
+ return 0;
+
+ asicid = readl(virt);
+ iounmap(virt);
+
+ return asicid;
+}
+
+static void ux500_print_soc_info(unsigned int asicid)
+{
+ unsigned int rev = dbx500_id.revision;
+
+ pr_info("DB%4x ", dbx500_id.partnumber);
+
+ if (rev == 0x01)
+ pr_cont("Early Drop");
+ else if (rev >= 0xA0)
+ pr_cont("v%d.%d" , (rev >> 4) - 0xA + 1, rev & 0xf);
+ else
+ pr_cont("Unknown");
+
+ pr_cont(" [%#010x]\n", asicid);
+}
+
+static unsigned int partnumber(unsigned int asicid)
+{
+ return (asicid >> 8) & 0xffff;
+}
+
+/*
+ * SOC MIDR ASICID ADDRESS ASICID VALUE
+ * DB8500ed 0x410fc090 0x9001FFF4 0x00850001
+ * DB8500v1 0x411fc091 0x9001FFF4 0x008500A0
+ * DB8500v1.1 0x411fc091 0x9001FFF4 0x008500A1
+ * DB8500v2 0x412fc091 0x9001DBF4 0x008500B0
+ * DB8520v2.2 0x412fc091 0x9001DBF4 0x008500B2
+ * DB5500v1 0x412fc091 0x9001FFF4 0x005500A0
+ * DB9540 0x413fc090 0xFFFFDBF4 0x009540xx
+ */
+
+static void __init ux500_setup_id(void)
+{
+ unsigned int cpuid = read_cpuid_id();
+ unsigned int asicid = 0;
+ phys_addr_t addr = 0;
+
+ switch (cpuid) {
+ case 0x410fc090: /* DB8500ed */
+ case 0x411fc091: /* DB8500v1 */
+ addr = 0x9001FFF4;
+ break;
+
+ case 0x412fc091: /* DB8520 / DB8500v2 / DB5500v1 */
+ asicid = ux500_read_asicid(0x9001DBF4);
+ if (partnumber(asicid) == 0x8500 ||
+ partnumber(asicid) == 0x8520)
+ /* DB8500v2 */
+ break;
+
+ /* DB5500v1 */
+ addr = 0x9001FFF4;
+ break;
+
+ case 0x413fc090: /* DB9540 */
+ addr = 0xFFFFDBF4;
+ break;
+ }
+
+ if (addr)
+ asicid = ux500_read_asicid(addr);
+
+ if (!asicid) {
+ pr_err("Unable to identify SoC\n");
+ BUG();
+ }
+
+ dbx500_id.process = asicid >> 24;
+ dbx500_id.partnumber = partnumber(asicid);
+ dbx500_id.revision = asicid & 0xff;
+
+ ux500_print_soc_info(asicid);
+}
+
+static const char * __init ux500_get_machine(void)
+{
+ return kasprintf(GFP_KERNEL, "DB%4x", dbx500_id.partnumber);
+}
+
+static const char * __init ux500_get_family(void)
+{
+ return kasprintf(GFP_KERNEL, "ux500");
+}
+
+static const char * __init ux500_get_revision(void)
+{
+ unsigned int rev = dbx500_id.revision;
+
+ if (rev == 0x01)
+ return kasprintf(GFP_KERNEL, "%s", "ED");
+ else if (rev >= 0xA0)
+ return kasprintf(GFP_KERNEL, "%d.%d",
+ (rev >> 4) - 0xA + 1, rev & 0xf);
+
+ return kasprintf(GFP_KERNEL, "%s", "Unknown");
+}
+
+static ssize_t ux500_get_process(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ if (dbx500_id.process == 0x00)
+ return sprintf(buf, "Standard\n");
+
+ return sprintf(buf, "%02xnm\n", dbx500_id.process);
+}
+
+static const char *db8500_read_soc_id(struct device_node *backupram)
+{
+ void __iomem *base;
+ void __iomem *uid;
+ const char *retstr;
+
+ base = of_iomap(backupram, 0);
+ if (!base)
+ return NULL;
+ uid = base + 0x1fc0;
+
+ /* Throw these device-specific numbers into the entropy pool */
+ add_device_randomness(uid, 0x14);
+ retstr = kasprintf(GFP_KERNEL, "%08x%08x%08x%08x%08x",
+ readl((u32 *)uid+0),
+ readl((u32 *)uid+1), readl((u32 *)uid+2),
+ readl((u32 *)uid+3), readl((u32 *)uid+4));
+ iounmap(base);
+ return retstr;
+}
+
+static void __init soc_info_populate(struct soc_device_attribute *soc_dev_attr,
+ struct device_node *backupram)
+{
+ soc_dev_attr->soc_id = db8500_read_soc_id(backupram);
+ soc_dev_attr->machine = ux500_get_machine();
+ soc_dev_attr->family = ux500_get_family();
+ soc_dev_attr->revision = ux500_get_revision();
+}
+
+static const struct device_attribute ux500_soc_attr =
+ __ATTR(process, S_IRUGO, ux500_get_process, NULL);
+
+static int __init ux500_soc_device_init(void)
+{
+ struct device *parent;
+ struct soc_device *soc_dev;
+ struct soc_device_attribute *soc_dev_attr;
+ struct device_node *backupram;
+
+ backupram = of_find_compatible_node(NULL, NULL, "ste,dbx500-backupram");
+ if (!backupram)
+ return 0;
+
+ ux500_setup_id();
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENOMEM;
+
+ soc_info_populate(soc_dev_attr, backupram);
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ kfree(soc_dev_attr);
+ return PTR_ERR(soc_dev);
+ }
+
+ parent = soc_device_to_device(soc_dev);
+ device_create_file(parent, &ux500_soc_attr);
+
+ return 0;
+}
+subsys_initcall(ux500_soc_device_init);
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 4b931ec8d..d6fb8d4b7 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -411,6 +411,7 @@ config SPI_OMAP24XX
tristate "McSPI driver for OMAP"
depends on HAS_DMA
depends on ARCH_OMAP2PLUS || COMPILE_TEST
+ select SG_SPLIT
help
SPI master controller for OMAP24XX and later Multichannel SPI
(McSPI) modules.
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 3c74d0035..185367ef6 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -56,6 +56,7 @@ obj-$(CONFIG_SPI_MT65XX) += spi-mt65xx.o
obj-$(CONFIG_SPI_MXS) += spi-mxs.o
obj-$(CONFIG_SPI_NUC900) += spi-nuc900.o
obj-$(CONFIG_SPI_OC_TINY) += spi-oc-tiny.o
+spi-octeon-objs := spi-cavium.o spi-cavium-octeon.o
obj-$(CONFIG_SPI_OCTEON) += spi-octeon.o
obj-$(CONFIG_SPI_OMAP_UWIRE) += spi-omap-uwire.o
obj-$(CONFIG_SPI_OMAP_100K) += spi-omap-100k.o
diff --git a/drivers/spi/spi-bfin-sport.c b/drivers/spi/spi-bfin-sport.c
index 6c967555a..01d0ba9c5 100644
--- a/drivers/spi/spi-bfin-sport.c
+++ b/drivers/spi/spi-bfin-sport.c
@@ -64,8 +64,6 @@ struct bfin_sport_spi_master_data {
/* Pin request list */
u16 *pin_req;
- /* Driver message queue */
- struct workqueue_struct *workqueue;
struct work_struct pump_messages;
spinlock_t lock;
struct list_head queue;
@@ -300,7 +298,7 @@ bfin_sport_spi_giveback(struct bfin_sport_spi_master_data *drv_data)
drv_data->cur_msg = NULL;
drv_data->cur_transfer = NULL;
drv_data->cur_chip = NULL;
- queue_work(drv_data->workqueue, &drv_data->pump_messages);
+ schedule_work(&drv_data->pump_messages);
spin_unlock_irqrestore(&drv_data->lock, flags);
if (!drv_data->cs_change)
@@ -556,7 +554,7 @@ bfin_sport_spi_transfer(struct spi_device *spi, struct spi_message *msg)
list_add_tail(&msg->queue, &drv_data->queue);
if (drv_data->run && !drv_data->busy)
- queue_work(drv_data->workqueue, &drv_data->pump_messages);
+ schedule_work(&drv_data->pump_messages);
spin_unlock_irqrestore(&drv_data->lock, flags);
@@ -666,12 +664,7 @@ bfin_sport_spi_init_queue(struct bfin_sport_spi_master_data *drv_data)
tasklet_init(&drv_data->pump_transfers,
bfin_sport_spi_pump_transfers, (unsigned long)drv_data);
- /* init messages workqueue */
INIT_WORK(&drv_data->pump_messages, bfin_sport_spi_pump_messages);
- drv_data->workqueue =
- create_singlethread_workqueue(dev_name(drv_data->master->dev.parent));
- if (drv_data->workqueue == NULL)
- return -EBUSY;
return 0;
}
@@ -694,7 +687,7 @@ bfin_sport_spi_start_queue(struct bfin_sport_spi_master_data *drv_data)
drv_data->cur_chip = NULL;
spin_unlock_irqrestore(&drv_data->lock, flags);
- queue_work(drv_data->workqueue, &drv_data->pump_messages);
+ schedule_work(&drv_data->pump_messages);
return 0;
}
@@ -738,7 +731,7 @@ bfin_sport_spi_destroy_queue(struct bfin_sport_spi_master_data *drv_data)
if (status)
return status;
- destroy_workqueue(drv_data->workqueue);
+ flush_work(&drv_data->pump_messages);
return 0;
}
diff --git a/drivers/spi/spi-bfin5xx.c b/drivers/spi/spi-bfin5xx.c
index 1e91325bf..249c7a367 100644
--- a/drivers/spi/spi-bfin5xx.c
+++ b/drivers/spi/spi-bfin5xx.c
@@ -67,8 +67,6 @@ struct bfin_spi_master_data {
/* BFIN hookup */
struct bfin5xx_spi_master *master_info;
- /* Driver message queue */
- struct workqueue_struct *workqueue;
struct work_struct pump_messages;
spinlock_t lock;
struct list_head queue;
@@ -359,7 +357,7 @@ static void bfin_spi_giveback(struct bfin_spi_master_data *drv_data)
drv_data->cur_msg = NULL;
drv_data->cur_transfer = NULL;
drv_data->cur_chip = NULL;
- queue_work(drv_data->workqueue, &drv_data->pump_messages);
+ schedule_work(&drv_data->pump_messages);
spin_unlock_irqrestore(&drv_data->lock, flags);
msg->state = NULL;
@@ -946,7 +944,7 @@ static int bfin_spi_transfer(struct spi_device *spi, struct spi_message *msg)
list_add_tail(&msg->queue, &drv_data->queue);
if (drv_data->running && !drv_data->busy)
- queue_work(drv_data->workqueue, &drv_data->pump_messages);
+ schedule_work(&drv_data->pump_messages);
spin_unlock_irqrestore(&drv_data->lock, flags);
@@ -1177,12 +1175,7 @@ static int bfin_spi_init_queue(struct bfin_spi_master_data *drv_data)
tasklet_init(&drv_data->pump_transfers,
bfin_spi_pump_transfers, (unsigned long)drv_data);
- /* init messages workqueue */
INIT_WORK(&drv_data->pump_messages, bfin_spi_pump_messages);
- drv_data->workqueue = create_singlethread_workqueue(
- dev_name(drv_data->master->dev.parent));
- if (drv_data->workqueue == NULL)
- return -EBUSY;
return 0;
}
@@ -1204,7 +1197,7 @@ static int bfin_spi_start_queue(struct bfin_spi_master_data *drv_data)
drv_data->cur_chip = NULL;
spin_unlock_irqrestore(&drv_data->lock, flags);
- queue_work(drv_data->workqueue, &drv_data->pump_messages);
+ schedule_work(&drv_data->pump_messages);
return 0;
}
@@ -1246,7 +1239,7 @@ static int bfin_spi_destroy_queue(struct bfin_spi_master_data *drv_data)
if (status != 0)
return status;
- destroy_workqueue(drv_data->workqueue);
+ flush_work(&drv_data->pump_messages);
return 0;
}
diff --git a/drivers/spi/spi-cavium-octeon.c b/drivers/spi/spi-cavium-octeon.c
new file mode 100644
index 000000000..ee4703e84
--- /dev/null
+++ b/drivers/spi/spi-cavium-octeon.c
@@ -0,0 +1,104 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2011, 2012 Cavium, Inc.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/of.h>
+
+#include <asm/octeon/octeon.h>
+
+#include "spi-cavium.h"
+
+static int octeon_spi_probe(struct platform_device *pdev)
+{
+ struct resource *res_mem;
+ void __iomem *reg_base;
+ struct spi_master *master;
+ struct octeon_spi *p;
+ int err = -ENOENT;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct octeon_spi));
+ if (!master)
+ return -ENOMEM;
+ p = spi_master_get_devdata(master);
+ platform_set_drvdata(pdev, master);
+
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ reg_base = devm_ioremap_resource(&pdev->dev, res_mem);
+ if (IS_ERR(reg_base)) {
+ err = PTR_ERR(reg_base);
+ goto fail;
+ }
+
+ p->register_base = reg_base;
+ p->sys_freq = octeon_get_io_clock_rate();
+
+ p->regs.config = 0;
+ p->regs.status = 0x08;
+ p->regs.tx = 0x10;
+ p->regs.data = 0x80;
+
+ master->num_chipselect = 4;
+ master->mode_bits = SPI_CPHA |
+ SPI_CPOL |
+ SPI_CS_HIGH |
+ SPI_LSB_FIRST |
+ SPI_3WIRE;
+
+ master->transfer_one_message = octeon_spi_transfer_one_message;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->max_speed_hz = OCTEON_SPI_MAX_CLOCK_HZ;
+
+ master->dev.of_node = pdev->dev.of_node;
+ err = devm_spi_register_master(&pdev->dev, master);
+ if (err) {
+ dev_err(&pdev->dev, "register master failed: %d\n", err);
+ goto fail;
+ }
+
+ dev_info(&pdev->dev, "OCTEON SPI bus driver\n");
+
+ return 0;
+fail:
+ spi_master_put(master);
+ return err;
+}
+
+static int octeon_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct octeon_spi *p = spi_master_get_devdata(master);
+
+ /* Clear the CSENA* and put everything in a known state. */
+ writeq(0, p->register_base + OCTEON_SPI_CFG(p));
+
+ return 0;
+}
+
+static const struct of_device_id octeon_spi_match[] = {
+ { .compatible = "cavium,octeon-3010-spi", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, octeon_spi_match);
+
+static struct platform_driver octeon_spi_driver = {
+ .driver = {
+ .name = "spi-octeon",
+ .of_match_table = octeon_spi_match,
+ },
+ .probe = octeon_spi_probe,
+ .remove = octeon_spi_remove,
+};
+
+module_platform_driver(octeon_spi_driver);
+
+MODULE_DESCRIPTION("Cavium, Inc. OCTEON SPI bus driver");
+MODULE_AUTHOR("David Daney");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-cavium.c b/drivers/spi/spi-cavium.c
new file mode 100644
index 000000000..5aaf21582
--- /dev/null
+++ b/drivers/spi/spi-cavium.c
@@ -0,0 +1,151 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2011, 2012 Cavium, Inc.
+ */
+
+#include <linux/spi/spi.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include "spi-cavium.h"
+
+static void octeon_spi_wait_ready(struct octeon_spi *p)
+{
+ union cvmx_mpi_sts mpi_sts;
+ unsigned int loops = 0;
+
+ do {
+ if (loops++)
+ __delay(500);
+ mpi_sts.u64 = readq(p->register_base + OCTEON_SPI_STS(p));
+ } while (mpi_sts.s.busy);
+}
+
+static int octeon_spi_do_transfer(struct octeon_spi *p,
+ struct spi_message *msg,
+ struct spi_transfer *xfer,
+ bool last_xfer)
+{
+ struct spi_device *spi = msg->spi;
+ union cvmx_mpi_cfg mpi_cfg;
+ union cvmx_mpi_tx mpi_tx;
+ unsigned int clkdiv;
+ int mode;
+ bool cpha, cpol;
+ const u8 *tx_buf;
+ u8 *rx_buf;
+ int len;
+ int i;
+
+ mode = spi->mode;
+ cpha = mode & SPI_CPHA;
+ cpol = mode & SPI_CPOL;
+
+ clkdiv = p->sys_freq / (2 * xfer->speed_hz);
+
+ mpi_cfg.u64 = 0;
+
+ mpi_cfg.s.clkdiv = clkdiv;
+ mpi_cfg.s.cshi = (mode & SPI_CS_HIGH) ? 1 : 0;
+ mpi_cfg.s.lsbfirst = (mode & SPI_LSB_FIRST) ? 1 : 0;
+ mpi_cfg.s.wireor = (mode & SPI_3WIRE) ? 1 : 0;
+ mpi_cfg.s.idlelo = cpha != cpol;
+ mpi_cfg.s.cslate = cpha ? 1 : 0;
+ mpi_cfg.s.enable = 1;
+
+ if (spi->chip_select < 4)
+ p->cs_enax |= 1ull << (12 + spi->chip_select);
+ mpi_cfg.u64 |= p->cs_enax;
+
+ if (mpi_cfg.u64 != p->last_cfg) {
+ p->last_cfg = mpi_cfg.u64;
+ writeq(mpi_cfg.u64, p->register_base + OCTEON_SPI_CFG(p));
+ }
+ tx_buf = xfer->tx_buf;
+ rx_buf = xfer->rx_buf;
+ len = xfer->len;
+ while (len > OCTEON_SPI_MAX_BYTES) {
+ for (i = 0; i < OCTEON_SPI_MAX_BYTES; i++) {
+ u8 d;
+ if (tx_buf)
+ d = *tx_buf++;
+ else
+ d = 0;
+ writeq(d, p->register_base + OCTEON_SPI_DAT0(p) + (8 * i));
+ }
+ mpi_tx.u64 = 0;
+ mpi_tx.s.csid = spi->chip_select;
+ mpi_tx.s.leavecs = 1;
+ mpi_tx.s.txnum = tx_buf ? OCTEON_SPI_MAX_BYTES : 0;
+ mpi_tx.s.totnum = OCTEON_SPI_MAX_BYTES;
+ writeq(mpi_tx.u64, p->register_base + OCTEON_SPI_TX(p));
+
+ octeon_spi_wait_ready(p);
+ if (rx_buf)
+ for (i = 0; i < OCTEON_SPI_MAX_BYTES; i++) {
+ u64 v = readq(p->register_base + OCTEON_SPI_DAT0(p) + (8 * i));
+ *rx_buf++ = (u8)v;
+ }
+ len -= OCTEON_SPI_MAX_BYTES;
+ }
+
+ for (i = 0; i < len; i++) {
+ u8 d;
+ if (tx_buf)
+ d = *tx_buf++;
+ else
+ d = 0;
+ writeq(d, p->register_base + OCTEON_SPI_DAT0(p) + (8 * i));
+ }
+
+ mpi_tx.u64 = 0;
+ mpi_tx.s.csid = spi->chip_select;
+ if (last_xfer)
+ mpi_tx.s.leavecs = xfer->cs_change;
+ else
+ mpi_tx.s.leavecs = !xfer->cs_change;
+ mpi_tx.s.txnum = tx_buf ? len : 0;
+ mpi_tx.s.totnum = len;
+ writeq(mpi_tx.u64, p->register_base + OCTEON_SPI_TX(p));
+
+ octeon_spi_wait_ready(p);
+ if (rx_buf)
+ for (i = 0; i < len; i++) {
+ u64 v = readq(p->register_base + OCTEON_SPI_DAT0(p) + (8 * i));
+ *rx_buf++ = (u8)v;
+ }
+
+ if (xfer->delay_usecs)
+ udelay(xfer->delay_usecs);
+
+ return xfer->len;
+}
+
+int octeon_spi_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct octeon_spi *p = spi_master_get_devdata(master);
+ unsigned int total_len = 0;
+ int status = 0;
+ struct spi_transfer *xfer;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ bool last_xfer = list_is_last(&xfer->transfer_list,
+ &msg->transfers);
+ int r = octeon_spi_do_transfer(p, msg, xfer, last_xfer);
+ if (r < 0) {
+ status = r;
+ goto err;
+ }
+ total_len += r;
+ }
+err:
+ msg->status = status;
+ msg->actual_length = total_len;
+ spi_finalize_current_message(master);
+ return status;
+}
diff --git a/drivers/spi/spi-cavium.h b/drivers/spi/spi-cavium.h
new file mode 100644
index 000000000..88c5f36e7
--- /dev/null
+++ b/drivers/spi/spi-cavium.h
@@ -0,0 +1,329 @@
+#ifndef __SPI_CAVIUM_H
+#define __SPI_CAVIUM_H
+
+#define OCTEON_SPI_MAX_BYTES 9
+#define OCTEON_SPI_MAX_CLOCK_HZ 16000000
+
+struct octeon_spi_regs {
+ int config;
+ int status;
+ int tx;
+ int data;
+};
+
+struct octeon_spi {
+ void __iomem *register_base;
+ u64 last_cfg;
+ u64 cs_enax;
+ int sys_freq;
+ struct octeon_spi_regs regs;
+};
+
+#define OCTEON_SPI_CFG(x) (x->regs.config)
+#define OCTEON_SPI_STS(x) (x->regs.status)
+#define OCTEON_SPI_TX(x) (x->regs.tx)
+#define OCTEON_SPI_DAT0(x) (x->regs.data)
+
+int octeon_spi_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg);
+
+/* MPI register descriptions */
+
+#define CVMX_MPI_CFG (CVMX_ADD_IO_SEG(0x0001070000001000ull))
+#define CVMX_MPI_DATX(offset) (CVMX_ADD_IO_SEG(0x0001070000001080ull) + ((offset) & 15) * 8)
+#define CVMX_MPI_STS (CVMX_ADD_IO_SEG(0x0001070000001008ull))
+#define CVMX_MPI_TX (CVMX_ADD_IO_SEG(0x0001070000001010ull))
+
+union cvmx_mpi_cfg {
+ uint64_t u64;
+ struct cvmx_mpi_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t clkdiv:13;
+ uint64_t csena3:1;
+ uint64_t csena2:1;
+ uint64_t csena1:1;
+ uint64_t csena0:1;
+ uint64_t cslate:1;
+ uint64_t tritx:1;
+ uint64_t idleclks:2;
+ uint64_t cshi:1;
+ uint64_t csena:1;
+ uint64_t int_ena:1;
+ uint64_t lsbfirst:1;
+ uint64_t wireor:1;
+ uint64_t clk_cont:1;
+ uint64_t idlelo:1;
+ uint64_t enable:1;
+#else
+ uint64_t enable:1;
+ uint64_t idlelo:1;
+ uint64_t clk_cont:1;
+ uint64_t wireor:1;
+ uint64_t lsbfirst:1;
+ uint64_t int_ena:1;
+ uint64_t csena:1;
+ uint64_t cshi:1;
+ uint64_t idleclks:2;
+ uint64_t tritx:1;
+ uint64_t cslate:1;
+ uint64_t csena0:1;
+ uint64_t csena1:1;
+ uint64_t csena2:1;
+ uint64_t csena3:1;
+ uint64_t clkdiv:13;
+ uint64_t reserved_29_63:35;
+#endif
+ } s;
+ struct cvmx_mpi_cfg_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t clkdiv:13;
+ uint64_t reserved_12_15:4;
+ uint64_t cslate:1;
+ uint64_t tritx:1;
+ uint64_t idleclks:2;
+ uint64_t cshi:1;
+ uint64_t csena:1;
+ uint64_t int_ena:1;
+ uint64_t lsbfirst:1;
+ uint64_t wireor:1;
+ uint64_t clk_cont:1;
+ uint64_t idlelo:1;
+ uint64_t enable:1;
+#else
+ uint64_t enable:1;
+ uint64_t idlelo:1;
+ uint64_t clk_cont:1;
+ uint64_t wireor:1;
+ uint64_t lsbfirst:1;
+ uint64_t int_ena:1;
+ uint64_t csena:1;
+ uint64_t cshi:1;
+ uint64_t idleclks:2;
+ uint64_t tritx:1;
+ uint64_t cslate:1;
+ uint64_t reserved_12_15:4;
+ uint64_t clkdiv:13;
+ uint64_t reserved_29_63:35;
+#endif
+ } cn30xx;
+ struct cvmx_mpi_cfg_cn31xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t clkdiv:13;
+ uint64_t reserved_11_15:5;
+ uint64_t tritx:1;
+ uint64_t idleclks:2;
+ uint64_t cshi:1;
+ uint64_t csena:1;
+ uint64_t int_ena:1;
+ uint64_t lsbfirst:1;
+ uint64_t wireor:1;
+ uint64_t clk_cont:1;
+ uint64_t idlelo:1;
+ uint64_t enable:1;
+#else
+ uint64_t enable:1;
+ uint64_t idlelo:1;
+ uint64_t clk_cont:1;
+ uint64_t wireor:1;
+ uint64_t lsbfirst:1;
+ uint64_t int_ena:1;
+ uint64_t csena:1;
+ uint64_t cshi:1;
+ uint64_t idleclks:2;
+ uint64_t tritx:1;
+ uint64_t reserved_11_15:5;
+ uint64_t clkdiv:13;
+ uint64_t reserved_29_63:35;
+#endif
+ } cn31xx;
+ struct cvmx_mpi_cfg_cn30xx cn50xx;
+ struct cvmx_mpi_cfg_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t clkdiv:13;
+ uint64_t reserved_14_15:2;
+ uint64_t csena1:1;
+ uint64_t csena0:1;
+ uint64_t cslate:1;
+ uint64_t tritx:1;
+ uint64_t idleclks:2;
+ uint64_t cshi:1;
+ uint64_t reserved_6_6:1;
+ uint64_t int_ena:1;
+ uint64_t lsbfirst:1;
+ uint64_t wireor:1;
+ uint64_t clk_cont:1;
+ uint64_t idlelo:1;
+ uint64_t enable:1;
+#else
+ uint64_t enable:1;
+ uint64_t idlelo:1;
+ uint64_t clk_cont:1;
+ uint64_t wireor:1;
+ uint64_t lsbfirst:1;
+ uint64_t int_ena:1;
+ uint64_t reserved_6_6:1;
+ uint64_t cshi:1;
+ uint64_t idleclks:2;
+ uint64_t tritx:1;
+ uint64_t cslate:1;
+ uint64_t csena0:1;
+ uint64_t csena1:1;
+ uint64_t reserved_14_15:2;
+ uint64_t clkdiv:13;
+ uint64_t reserved_29_63:35;
+#endif
+ } cn61xx;
+ struct cvmx_mpi_cfg_cn66xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_29_63:35;
+ uint64_t clkdiv:13;
+ uint64_t csena3:1;
+ uint64_t csena2:1;
+ uint64_t reserved_12_13:2;
+ uint64_t cslate:1;
+ uint64_t tritx:1;
+ uint64_t idleclks:2;
+ uint64_t cshi:1;
+ uint64_t reserved_6_6:1;
+ uint64_t int_ena:1;
+ uint64_t lsbfirst:1;
+ uint64_t wireor:1;
+ uint64_t clk_cont:1;
+ uint64_t idlelo:1;
+ uint64_t enable:1;
+#else
+ uint64_t enable:1;
+ uint64_t idlelo:1;
+ uint64_t clk_cont:1;
+ uint64_t wireor:1;
+ uint64_t lsbfirst:1;
+ uint64_t int_ena:1;
+ uint64_t reserved_6_6:1;
+ uint64_t cshi:1;
+ uint64_t idleclks:2;
+ uint64_t tritx:1;
+ uint64_t cslate:1;
+ uint64_t reserved_12_13:2;
+ uint64_t csena2:1;
+ uint64_t csena3:1;
+ uint64_t clkdiv:13;
+ uint64_t reserved_29_63:35;
+#endif
+ } cn66xx;
+ struct cvmx_mpi_cfg_cn61xx cnf71xx;
+};
+
+union cvmx_mpi_datx {
+ uint64_t u64;
+ struct cvmx_mpi_datx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_8_63:56;
+ uint64_t data:8;
+#else
+ uint64_t data:8;
+ uint64_t reserved_8_63:56;
+#endif
+ } s;
+ struct cvmx_mpi_datx_s cn30xx;
+ struct cvmx_mpi_datx_s cn31xx;
+ struct cvmx_mpi_datx_s cn50xx;
+ struct cvmx_mpi_datx_s cn61xx;
+ struct cvmx_mpi_datx_s cn66xx;
+ struct cvmx_mpi_datx_s cnf71xx;
+};
+
+union cvmx_mpi_sts {
+ uint64_t u64;
+ struct cvmx_mpi_sts_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_13_63:51;
+ uint64_t rxnum:5;
+ uint64_t reserved_1_7:7;
+ uint64_t busy:1;
+#else
+ uint64_t busy:1;
+ uint64_t reserved_1_7:7;
+ uint64_t rxnum:5;
+ uint64_t reserved_13_63:51;
+#endif
+ } s;
+ struct cvmx_mpi_sts_s cn30xx;
+ struct cvmx_mpi_sts_s cn31xx;
+ struct cvmx_mpi_sts_s cn50xx;
+ struct cvmx_mpi_sts_s cn61xx;
+ struct cvmx_mpi_sts_s cn66xx;
+ struct cvmx_mpi_sts_s cnf71xx;
+};
+
+union cvmx_mpi_tx {
+ uint64_t u64;
+ struct cvmx_mpi_tx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_22_63:42;
+ uint64_t csid:2;
+ uint64_t reserved_17_19:3;
+ uint64_t leavecs:1;
+ uint64_t reserved_13_15:3;
+ uint64_t txnum:5;
+ uint64_t reserved_5_7:3;
+ uint64_t totnum:5;
+#else
+ uint64_t totnum:5;
+ uint64_t reserved_5_7:3;
+ uint64_t txnum:5;
+ uint64_t reserved_13_15:3;
+ uint64_t leavecs:1;
+ uint64_t reserved_17_19:3;
+ uint64_t csid:2;
+ uint64_t reserved_22_63:42;
+#endif
+ } s;
+ struct cvmx_mpi_tx_cn30xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_17_63:47;
+ uint64_t leavecs:1;
+ uint64_t reserved_13_15:3;
+ uint64_t txnum:5;
+ uint64_t reserved_5_7:3;
+ uint64_t totnum:5;
+#else
+ uint64_t totnum:5;
+ uint64_t reserved_5_7:3;
+ uint64_t txnum:5;
+ uint64_t reserved_13_15:3;
+ uint64_t leavecs:1;
+ uint64_t reserved_17_63:47;
+#endif
+ } cn30xx;
+ struct cvmx_mpi_tx_cn30xx cn31xx;
+ struct cvmx_mpi_tx_cn30xx cn50xx;
+ struct cvmx_mpi_tx_cn61xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_21_63:43;
+ uint64_t csid:1;
+ uint64_t reserved_17_19:3;
+ uint64_t leavecs:1;
+ uint64_t reserved_13_15:3;
+ uint64_t txnum:5;
+ uint64_t reserved_5_7:3;
+ uint64_t totnum:5;
+#else
+ uint64_t totnum:5;
+ uint64_t reserved_5_7:3;
+ uint64_t txnum:5;
+ uint64_t reserved_13_15:3;
+ uint64_t leavecs:1;
+ uint64_t reserved_17_19:3;
+ uint64_t csid:1;
+ uint64_t reserved_21_63:43;
+#endif
+ } cn61xx;
+ struct cvmx_mpi_tx_s cn66xx;
+ struct cvmx_mpi_tx_cn61xx cnf71xx;
+};
+
+#endif /* __SPI_CAVIUM_H */
diff --git a/drivers/spi/spi-clps711x.c b/drivers/spi/spi-clps711x.c
index 8c30de031..18193df2e 100644
--- a/drivers/spi/spi-clps711x.c
+++ b/drivers/spi/spi-clps711x.c
@@ -1,7 +1,7 @@
/*
* CLPS711X SPI bus driver
*
- * Copyright (C) 2012-2014 Alexander Shiyan <shc_work@mail.ru>
+ * Copyright (C) 2012-2016 Alexander Shiyan <shc_work@mail.ru>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -12,7 +12,6 @@
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/gpio.h>
-#include <linux/delay.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
@@ -20,9 +19,8 @@
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/clps711x.h>
#include <linux/spi/spi.h>
-#include <linux/platform_data/spi-clps711x.h>
-#define DRIVER_NAME "spi-clps711x"
+#define DRIVER_NAME "clps711x-spi"
#define SYNCIO_FRMLEN(x) ((x) << 8)
#define SYNCIO_TXFRMEN (1 << 14)
@@ -40,6 +38,17 @@ struct spi_clps711x_data {
static int spi_clps711x_setup(struct spi_device *spi)
{
+ if (!spi->controller_state) {
+ int ret;
+
+ ret = devm_gpio_request(&spi->master->dev, spi->cs_gpio,
+ dev_name(&spi->master->dev));
+ if (ret)
+ return ret;
+
+ spi->controller_state = spi;
+ }
+
/* We are expect that SPI-device is not selected */
gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
@@ -104,20 +113,9 @@ static irqreturn_t spi_clps711x_isr(int irq, void *dev_id)
static int spi_clps711x_probe(struct platform_device *pdev)
{
struct spi_clps711x_data *hw;
- struct spi_clps711x_pdata *pdata = dev_get_platdata(&pdev->dev);
struct spi_master *master;
struct resource *res;
- int i, irq, ret;
-
- if (!pdata) {
- dev_err(&pdev->dev, "No platform data supplied\n");
- return -EINVAL;
- }
-
- if (pdata->num_chipselect < 1) {
- dev_err(&pdev->dev, "At least one CS must be defined\n");
- return -EINVAL;
- }
+ int irq, ret;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
@@ -127,40 +125,24 @@ static int spi_clps711x_probe(struct platform_device *pdev)
if (!master)
return -ENOMEM;
- master->cs_gpios = devm_kzalloc(&pdev->dev, sizeof(int) *
- pdata->num_chipselect, GFP_KERNEL);
- if (!master->cs_gpios) {
- ret = -ENOMEM;
- goto err_out;
- }
-
- master->bus_num = pdev->id;
+ master->bus_num = -1;
master->mode_bits = SPI_CPHA | SPI_CS_HIGH;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 8);
- master->num_chipselect = pdata->num_chipselect;
+ master->dev.of_node = pdev->dev.of_node;
master->setup = spi_clps711x_setup;
master->prepare_message = spi_clps711x_prepare_message;
master->transfer_one = spi_clps711x_transfer_one;
hw = spi_master_get_devdata(master);
- for (i = 0; i < master->num_chipselect; i++) {
- master->cs_gpios[i] = pdata->chipselect[i];
- ret = devm_gpio_request(&pdev->dev, master->cs_gpios[i],
- DRIVER_NAME);
- if (ret) {
- dev_err(&pdev->dev, "Can't get CS GPIO %i\n", i);
- goto err_out;
- }
- }
-
hw->spi_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(hw->spi_clk)) {
ret = PTR_ERR(hw->spi_clk);
goto err_out;
}
- hw->syscon = syscon_regmap_lookup_by_pdevname("syscon.3");
+ hw->syscon =
+ syscon_regmap_lookup_by_compatible("cirrus,ep7209-syscon3");
if (IS_ERR(hw->syscon)) {
ret = PTR_ERR(hw->syscon);
goto err_out;
@@ -185,14 +167,8 @@ static int spi_clps711x_probe(struct platform_device *pdev)
goto err_out;
ret = devm_spi_register_master(&pdev->dev, master);
- if (!ret) {
- dev_info(&pdev->dev,
- "SPI bus driver initialized. Master clock %u Hz\n",
- master->max_speed_hz);
+ if (!ret)
return 0;
- }
-
- dev_err(&pdev->dev, "Failed to register master\n");
err_out:
spi_master_put(master);
@@ -200,9 +176,16 @@ err_out:
return ret;
}
+static const struct of_device_id clps711x_spi_dt_ids[] = {
+ { .compatible = "cirrus,ep7209-spi", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, clps711x_spi_dt_ids);
+
static struct platform_driver clps711x_spi_driver = {
.driver = {
.name = DRIVER_NAME,
+ .of_match_table = clps711x_spi_dt_ids,
},
.probe = spi_clps711x_probe,
};
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index 823cbc92d..7a37090da 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -720,8 +720,6 @@ static int img_spfi_remove(struct platform_device *pdev)
clk_disable_unprepare(spfi->sys_clk);
}
- spi_master_put(master);
-
return 0;
}
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 50769078e..f63cb30f9 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -59,8 +59,6 @@
struct spi_imx_config {
unsigned int speed_hz;
unsigned int bpw;
- unsigned int mode;
- u8 cs;
};
enum spi_imx_devtype {
@@ -76,7 +74,7 @@ struct spi_imx_data;
struct spi_imx_devtype_data {
void (*intctrl)(struct spi_imx_data *, int);
- int (*config)(struct spi_imx_data *, struct spi_imx_config *);
+ int (*config)(struct spi_device *, struct spi_imx_config *);
void (*trigger)(struct spi_imx_data *);
int (*rx_available)(struct spi_imx_data *);
void (*reset)(struct spi_imx_data *);
@@ -112,7 +110,6 @@ struct spi_imx_data {
struct completion dma_tx_completion;
const struct spi_imx_devtype_data *devtype_data;
- int chipselect[0];
};
static inline int is_imx27_cspi(struct spi_imx_data *d)
@@ -312,7 +309,7 @@ static unsigned int mx51_ecspi_clkdiv(struct spi_imx_data *spi_imx,
(post << MX51_ECSPI_CTRL_POSTDIV_OFFSET);
}
-static void __maybe_unused mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int enable)
+static void mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int enable)
{
unsigned val = 0;
@@ -325,7 +322,7 @@ static void __maybe_unused mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int
writel(val, spi_imx->base + MX51_ECSPI_INT);
}
-static void __maybe_unused mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
+static void mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
{
u32 reg;
@@ -334,9 +331,10 @@ static void __maybe_unused mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
writel(reg, spi_imx->base + MX51_ECSPI_CTRL);
}
-static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
- struct spi_imx_config *config)
+static int mx51_ecspi_config(struct spi_device *spi,
+ struct spi_imx_config *config)
{
+ struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
u32 ctrl = MX51_ECSPI_CTRL_ENABLE;
u32 clk = config->speed_hz, delay, reg;
u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
@@ -355,28 +353,28 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
spi_imx->spi_bus_clk = clk;
/* set chip select to use */
- ctrl |= MX51_ECSPI_CTRL_CS(config->cs);
+ ctrl |= MX51_ECSPI_CTRL_CS(spi->chip_select);
ctrl |= (config->bpw - 1) << MX51_ECSPI_CTRL_BL_OFFSET;
- cfg |= MX51_ECSPI_CONFIG_SBBCTRL(config->cs);
+ cfg |= MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select);
- if (config->mode & SPI_CPHA)
- cfg |= MX51_ECSPI_CONFIG_SCLKPHA(config->cs);
+ if (spi->mode & SPI_CPHA)
+ cfg |= MX51_ECSPI_CONFIG_SCLKPHA(spi->chip_select);
else
- cfg &= ~MX51_ECSPI_CONFIG_SCLKPHA(config->cs);
+ cfg &= ~MX51_ECSPI_CONFIG_SCLKPHA(spi->chip_select);
- if (config->mode & SPI_CPOL) {
- cfg |= MX51_ECSPI_CONFIG_SCLKPOL(config->cs);
- cfg |= MX51_ECSPI_CONFIG_SCLKCTL(config->cs);
+ if (spi->mode & SPI_CPOL) {
+ cfg |= MX51_ECSPI_CONFIG_SCLKPOL(spi->chip_select);
+ cfg |= MX51_ECSPI_CONFIG_SCLKCTL(spi->chip_select);
} else {
- cfg &= ~MX51_ECSPI_CONFIG_SCLKPOL(config->cs);
- cfg &= ~MX51_ECSPI_CONFIG_SCLKCTL(config->cs);
+ cfg &= ~MX51_ECSPI_CONFIG_SCLKPOL(spi->chip_select);
+ cfg &= ~MX51_ECSPI_CONFIG_SCLKCTL(spi->chip_select);
}
- if (config->mode & SPI_CS_HIGH)
- cfg |= MX51_ECSPI_CONFIG_SSBPOL(config->cs);
+ if (spi->mode & SPI_CS_HIGH)
+ cfg |= MX51_ECSPI_CONFIG_SSBPOL(spi->chip_select);
else
- cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(config->cs);
+ cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(spi->chip_select);
if (spi_imx->usedma)
ctrl |= MX51_ECSPI_CTRL_SMC;
@@ -385,7 +383,7 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
reg = readl(spi_imx->base + MX51_ECSPI_TESTREG);
- if (config->mode & SPI_LOOP)
+ if (spi->mode & SPI_LOOP)
reg |= MX51_ECSPI_TESTREG_LBC;
else
reg &= ~MX51_ECSPI_TESTREG_LBC;
@@ -424,12 +422,12 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
return 0;
}
-static int __maybe_unused mx51_ecspi_rx_available(struct spi_imx_data *spi_imx)
+static int mx51_ecspi_rx_available(struct spi_imx_data *spi_imx)
{
return readl(spi_imx->base + MX51_ECSPI_STAT) & MX51_ECSPI_STAT_RR;
}
-static void __maybe_unused mx51_ecspi_reset(struct spi_imx_data *spi_imx)
+static void mx51_ecspi_reset(struct spi_imx_data *spi_imx)
{
/* drain receive buffer */
while (mx51_ecspi_rx_available(spi_imx))
@@ -459,7 +457,7 @@ static void __maybe_unused mx51_ecspi_reset(struct spi_imx_data *spi_imx)
* the i.MX35 has a slightly different register layout for bits
* we do not use here.
*/
-static void __maybe_unused mx31_intctrl(struct spi_imx_data *spi_imx, int enable)
+static void mx31_intctrl(struct spi_imx_data *spi_imx, int enable)
{
unsigned int val = 0;
@@ -471,7 +469,7 @@ static void __maybe_unused mx31_intctrl(struct spi_imx_data *spi_imx, int enable
writel(val, spi_imx->base + MXC_CSPIINT);
}
-static void __maybe_unused mx31_trigger(struct spi_imx_data *spi_imx)
+static void mx31_trigger(struct spi_imx_data *spi_imx)
{
unsigned int reg;
@@ -480,11 +478,10 @@ static void __maybe_unused mx31_trigger(struct spi_imx_data *spi_imx)
writel(reg, spi_imx->base + MXC_CSPICTRL);
}
-static int __maybe_unused mx31_config(struct spi_imx_data *spi_imx,
- struct spi_imx_config *config)
+static int mx31_config(struct spi_device *spi, struct spi_imx_config *config)
{
+ struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER;
- int cs = spi_imx->chipselect[config->cs];
reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) <<
MX31_CSPICTRL_DR_SHIFT;
@@ -496,14 +493,14 @@ static int __maybe_unused mx31_config(struct spi_imx_data *spi_imx,
reg |= (config->bpw - 1) << MX31_CSPICTRL_BC_SHIFT;
}
- if (config->mode & SPI_CPHA)
+ if (spi->mode & SPI_CPHA)
reg |= MX31_CSPICTRL_PHA;
- if (config->mode & SPI_CPOL)
+ if (spi->mode & SPI_CPOL)
reg |= MX31_CSPICTRL_POL;
- if (config->mode & SPI_CS_HIGH)
+ if (spi->mode & SPI_CS_HIGH)
reg |= MX31_CSPICTRL_SSPOL;
- if (cs < 0)
- reg |= (cs + 32) <<
+ if (spi->cs_gpio < 0)
+ reg |= (spi->cs_gpio + 32) <<
(is_imx35_cspi(spi_imx) ? MX35_CSPICTRL_CS_SHIFT :
MX31_CSPICTRL_CS_SHIFT);
@@ -512,12 +509,12 @@ static int __maybe_unused mx31_config(struct spi_imx_data *spi_imx,
return 0;
}
-static int __maybe_unused mx31_rx_available(struct spi_imx_data *spi_imx)
+static int mx31_rx_available(struct spi_imx_data *spi_imx)
{
return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR;
}
-static void __maybe_unused mx31_reset(struct spi_imx_data *spi_imx)
+static void mx31_reset(struct spi_imx_data *spi_imx)
{
/* drain receive buffer */
while (readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR)
@@ -537,7 +534,7 @@ static void __maybe_unused mx31_reset(struct spi_imx_data *spi_imx)
#define MX21_CSPICTRL_DR_SHIFT 14
#define MX21_CSPICTRL_CS_SHIFT 19
-static void __maybe_unused mx21_intctrl(struct spi_imx_data *spi_imx, int enable)
+static void mx21_intctrl(struct spi_imx_data *spi_imx, int enable)
{
unsigned int val = 0;
@@ -549,7 +546,7 @@ static void __maybe_unused mx21_intctrl(struct spi_imx_data *spi_imx, int enable
writel(val, spi_imx->base + MXC_CSPIINT);
}
-static void __maybe_unused mx21_trigger(struct spi_imx_data *spi_imx)
+static void mx21_trigger(struct spi_imx_data *spi_imx)
{
unsigned int reg;
@@ -558,37 +555,36 @@ static void __maybe_unused mx21_trigger(struct spi_imx_data *spi_imx)
writel(reg, spi_imx->base + MXC_CSPICTRL);
}
-static int __maybe_unused mx21_config(struct spi_imx_data *spi_imx,
- struct spi_imx_config *config)
+static int mx21_config(struct spi_device *spi, struct spi_imx_config *config)
{
+ struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_MASTER;
- int cs = spi_imx->chipselect[config->cs];
unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18;
reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, config->speed_hz, max) <<
MX21_CSPICTRL_DR_SHIFT;
reg |= config->bpw - 1;
- if (config->mode & SPI_CPHA)
+ if (spi->mode & SPI_CPHA)
reg |= MX21_CSPICTRL_PHA;
- if (config->mode & SPI_CPOL)
+ if (spi->mode & SPI_CPOL)
reg |= MX21_CSPICTRL_POL;
- if (config->mode & SPI_CS_HIGH)
+ if (spi->mode & SPI_CS_HIGH)
reg |= MX21_CSPICTRL_SSPOL;
- if (cs < 0)
- reg |= (cs + 32) << MX21_CSPICTRL_CS_SHIFT;
+ if (spi->cs_gpio < 0)
+ reg |= (spi->cs_gpio + 32) << MX21_CSPICTRL_CS_SHIFT;
writel(reg, spi_imx->base + MXC_CSPICTRL);
return 0;
}
-static int __maybe_unused mx21_rx_available(struct spi_imx_data *spi_imx)
+static int mx21_rx_available(struct spi_imx_data *spi_imx)
{
return readl(spi_imx->base + MXC_CSPIINT) & MX21_INTREG_RR;
}
-static void __maybe_unused mx21_reset(struct spi_imx_data *spi_imx)
+static void mx21_reset(struct spi_imx_data *spi_imx)
{
writel(1, spi_imx->base + MXC_RESET);
}
@@ -604,7 +600,7 @@ static void __maybe_unused mx21_reset(struct spi_imx_data *spi_imx)
#define MX1_CSPICTRL_MASTER (1 << 10)
#define MX1_CSPICTRL_DR_SHIFT 13
-static void __maybe_unused mx1_intctrl(struct spi_imx_data *spi_imx, int enable)
+static void mx1_intctrl(struct spi_imx_data *spi_imx, int enable)
{
unsigned int val = 0;
@@ -616,7 +612,7 @@ static void __maybe_unused mx1_intctrl(struct spi_imx_data *spi_imx, int enable)
writel(val, spi_imx->base + MXC_CSPIINT);
}
-static void __maybe_unused mx1_trigger(struct spi_imx_data *spi_imx)
+static void mx1_trigger(struct spi_imx_data *spi_imx)
{
unsigned int reg;
@@ -625,18 +621,18 @@ static void __maybe_unused mx1_trigger(struct spi_imx_data *spi_imx)
writel(reg, spi_imx->base + MXC_CSPICTRL);
}
-static int __maybe_unused mx1_config(struct spi_imx_data *spi_imx,
- struct spi_imx_config *config)
+static int mx1_config(struct spi_device *spi, struct spi_imx_config *config)
{
+ struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER;
reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) <<
MX1_CSPICTRL_DR_SHIFT;
reg |= config->bpw - 1;
- if (config->mode & SPI_CPHA)
+ if (spi->mode & SPI_CPHA)
reg |= MX1_CSPICTRL_PHA;
- if (config->mode & SPI_CPOL)
+ if (spi->mode & SPI_CPOL)
reg |= MX1_CSPICTRL_POL;
writel(reg, spi_imx->base + MXC_CSPICTRL);
@@ -644,12 +640,12 @@ static int __maybe_unused mx1_config(struct spi_imx_data *spi_imx,
return 0;
}
-static int __maybe_unused mx1_rx_available(struct spi_imx_data *spi_imx)
+static int mx1_rx_available(struct spi_imx_data *spi_imx)
{
return readl(spi_imx->base + MXC_CSPIINT) & MX1_INTREG_RR;
}
-static void __maybe_unused mx1_reset(struct spi_imx_data *spi_imx)
+static void mx1_reset(struct spi_imx_data *spi_imx)
{
writel(1, spi_imx->base + MXC_RESET);
}
@@ -747,15 +743,13 @@ MODULE_DEVICE_TABLE(of, spi_imx_dt_ids);
static void spi_imx_chipselect(struct spi_device *spi, int is_active)
{
- struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
- int gpio = spi_imx->chipselect[spi->chip_select];
int active = is_active != BITBANG_CS_INACTIVE;
int dev_is_lowactive = !(spi->mode & SPI_CS_HIGH);
- if (!gpio_is_valid(gpio))
+ if (!gpio_is_valid(spi->cs_gpio))
return;
- gpio_set_value(gpio, dev_is_lowactive ^ active);
+ gpio_set_value(spi->cs_gpio, dev_is_lowactive ^ active);
}
static void spi_imx_push(struct spi_imx_data *spi_imx)
@@ -859,8 +853,6 @@ static int spi_imx_setupxfer(struct spi_device *spi,
config.bpw = t ? t->bits_per_word : spi->bits_per_word;
config.speed_hz = t ? t->speed_hz : spi->max_speed_hz;
- config.mode = spi->mode;
- config.cs = spi->chip_select;
if (!config.speed_hz)
config.speed_hz = spi->max_speed_hz;
@@ -891,7 +883,7 @@ static int spi_imx_setupxfer(struct spi_device *spi,
return ret;
}
- spi_imx->devtype_data->config(spi_imx, &config);
+ spi_imx->devtype_data->config(spi, &config);
return 0;
}
@@ -1050,6 +1042,8 @@ static int spi_imx_pio_transfer(struct spi_device *spi,
struct spi_transfer *transfer)
{
struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
+ unsigned long transfer_timeout;
+ unsigned long timeout;
spi_imx->tx_buf = transfer->tx_buf;
spi_imx->rx_buf = transfer->rx_buf;
@@ -1062,7 +1056,15 @@ static int spi_imx_pio_transfer(struct spi_device *spi,
spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE);
- wait_for_completion(&spi_imx->xfer_done);
+ transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len);
+
+ timeout = wait_for_completion_timeout(&spi_imx->xfer_done,
+ transfer_timeout);
+ if (!timeout) {
+ dev_err(&spi->dev, "I/O Error in PIO\n");
+ spi_imx->devtype_data->reset(spi_imx);
+ return -ETIMEDOUT;
+ }
return transfer->len;
}
@@ -1080,14 +1082,12 @@ static int spi_imx_transfer(struct spi_device *spi,
static int spi_imx_setup(struct spi_device *spi)
{
- struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
- int gpio = spi_imx->chipselect[spi->chip_select];
-
dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __func__,
spi->mode, spi->bits_per_word, spi->max_speed_hz);
- if (gpio_is_valid(gpio))
- gpio_direction_output(gpio, spi->mode & SPI_CS_HIGH ? 0 : 1);
+ if (gpio_is_valid(spi->cs_gpio))
+ gpio_direction_output(spi->cs_gpio,
+ spi->mode & SPI_CS_HIGH ? 0 : 1);
spi_imx_chipselect(spi, BITBANG_CS_INACTIVE);
@@ -1137,31 +1137,21 @@ static int spi_imx_probe(struct platform_device *pdev)
struct spi_master *master;
struct spi_imx_data *spi_imx;
struct resource *res;
- int i, ret, num_cs, irq;
+ int i, ret, irq;
if (!np && !mxc_platform_info) {
dev_err(&pdev->dev, "can't get the platform data\n");
return -EINVAL;
}
- ret = of_property_read_u32(np, "fsl,spi-num-chipselects", &num_cs);
- if (ret < 0) {
- if (mxc_platform_info)
- num_cs = mxc_platform_info->num_chipselect;
- else
- return ret;
- }
-
- master = spi_alloc_master(&pdev->dev,
- sizeof(struct spi_imx_data) + sizeof(int) * num_cs);
+ master = spi_alloc_master(&pdev->dev, sizeof(struct spi_imx_data));
if (!master)
return -ENOMEM;
platform_set_drvdata(pdev, master);
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
- master->bus_num = pdev->id;
- master->num_chipselect = num_cs;
+ master->bus_num = np ? -1 : pdev->id;
spi_imx = spi_master_get_devdata(master);
spi_imx->bitbang.master = master;
@@ -1170,22 +1160,16 @@ static int spi_imx_probe(struct platform_device *pdev)
spi_imx->devtype_data = of_id ? of_id->data :
(struct spi_imx_devtype_data *)pdev->id_entry->driver_data;
- for (i = 0; i < master->num_chipselect; i++) {
- int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
- if (!gpio_is_valid(cs_gpio) && mxc_platform_info)
- cs_gpio = mxc_platform_info->chipselect[i];
+ if (mxc_platform_info) {
+ master->num_chipselect = mxc_platform_info->num_chipselect;
+ master->cs_gpios = devm_kzalloc(&master->dev,
+ sizeof(int) * master->num_chipselect, GFP_KERNEL);
+ if (!master->cs_gpios)
+ return -ENOMEM;
- spi_imx->chipselect[i] = cs_gpio;
- if (!gpio_is_valid(cs_gpio))
- continue;
-
- ret = devm_gpio_request(&pdev->dev, spi_imx->chipselect[i],
- DRIVER_NAME);
- if (ret) {
- dev_err(&pdev->dev, "can't get cs gpios\n");
- goto out_master_put;
- }
- }
+ for (i = 0; i < master->num_chipselect; i++)
+ master->cs_gpios[i] = mxc_platform_info->chipselect[i];
+ }
spi_imx->bitbang.chipselect = spi_imx_chipselect;
spi_imx->bitbang.setup_transfer = spi_imx_setupxfer;
@@ -1267,6 +1251,19 @@ static int spi_imx_probe(struct platform_device *pdev)
goto out_clk_put;
}
+ for (i = 0; i < master->num_chipselect; i++) {
+ if (!gpio_is_valid(master->cs_gpios[i]))
+ continue;
+
+ ret = devm_gpio_request(&pdev->dev, master->cs_gpios[i],
+ DRIVER_NAME);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't get CS GPIO %i\n",
+ master->cs_gpios[i]);
+ goto out_clk_put;
+ }
+ }
+
dev_info(&pdev->dev, "probed\n");
clk_disable(spi_imx->clk_ipg);
diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c
index cf4bb36be..faa1b133b 100644
--- a/drivers/spi/spi-loopback-test.c
+++ b/drivers/spi/spi-loopback-test.c
@@ -536,7 +536,7 @@ static int spi_test_check_loopback_result(struct spi_device *spi,
mismatch_error:
dev_err(&spi->dev,
- "loopback strangeness - transfer missmatch on byte %04zx - expected 0x%02x, but got 0x%02x\n",
+ "loopback strangeness - transfer mismatch on byte %04zx - expected 0x%02x, but got 0x%02x\n",
i, txb, rxb);
return -EINVAL;
diff --git a/drivers/spi/spi-mpc52xx-psc.c b/drivers/spi/spi-mpc52xx-psc.c
index 72d11ebef..42a8b8521 100644
--- a/drivers/spi/spi-mpc52xx-psc.c
+++ b/drivers/spi/spi-mpc52xx-psc.c
@@ -42,7 +42,6 @@ struct mpc52xx_psc_spi {
u8 bits_per_word;
u8 busy;
- struct workqueue_struct *workqueue;
struct work_struct work;
struct list_head queue;
@@ -299,7 +298,7 @@ static int mpc52xx_psc_spi_transfer(struct spi_device *spi,
spin_lock_irqsave(&mps->lock, flags);
list_add_tail(&m->queue, &mps->queue);
- queue_work(mps->workqueue, &mps->work);
+ schedule_work(&mps->work);
spin_unlock_irqrestore(&mps->lock, flags);
return 0;
@@ -425,21 +424,12 @@ static int mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr,
INIT_WORK(&mps->work, mpc52xx_psc_spi_work);
INIT_LIST_HEAD(&mps->queue);
- mps->workqueue = create_singlethread_workqueue(
- dev_name(master->dev.parent));
- if (mps->workqueue == NULL) {
- ret = -EBUSY;
- goto free_irq;
- }
-
ret = spi_register_master(master);
if (ret < 0)
- goto unreg_master;
+ goto free_irq;
return ret;
-unreg_master:
- destroy_workqueue(mps->workqueue);
free_irq:
free_irq(mps->irq, mps);
free_master:
@@ -484,8 +474,7 @@ static int mpc52xx_psc_spi_of_remove(struct platform_device *op)
struct spi_master *master = spi_master_get(platform_get_drvdata(op));
struct mpc52xx_psc_spi *mps = spi_master_get_devdata(master);
- flush_workqueue(mps->workqueue);
- destroy_workqueue(mps->workqueue);
+ flush_work(&mps->work);
spi_unregister_master(master);
free_irq(mps->irq, mps);
if (mps->psc)
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 0be89e052..899d7a8f0 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -685,7 +685,6 @@ static int mtk_spi_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
mtk_spi_reset(mdata);
- spi_master_put(master);
return 0;
}
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 1d237e93a..d5157b222 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -419,16 +419,13 @@ static void omap2_mcspi_tx_dma(struct spi_device *spi,
if (mcspi_dma->dma_tx) {
struct dma_async_tx_descriptor *tx;
- struct scatterlist sg;
dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);
- sg_init_table(&sg, 1);
- sg_dma_address(&sg) = xfer->tx_dma;
- sg_dma_len(&sg) = xfer->len;
-
- tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, &sg, 1,
- DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, xfer->tx_sg.sgl,
+ xfer->tx_sg.nents,
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (tx) {
tx->callback = omap2_mcspi_tx_callback;
tx->callback_param = spi;
@@ -449,7 +446,10 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
{
struct omap2_mcspi *mcspi;
struct omap2_mcspi_dma *mcspi_dma;
- unsigned int count, dma_count;
+ unsigned int count, transfer_reduction = 0;
+ struct scatterlist *sg_out[2];
+ int nb_sizes = 0, out_mapped_nents[2], ret, x;
+ size_t sizes[2];
u32 l;
int elements = 0;
int word_len, element_count;
@@ -457,10 +457,14 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
mcspi = spi_master_get_devdata(spi->master);
mcspi_dma = &mcspi->dma_channels[spi->chip_select];
count = xfer->len;
- dma_count = xfer->len;
+ /*
+ * In the "End-of-Transfer Procedure" section for DMA RX in OMAP35x TRM
+ * it mentions reducing DMA transfer length by one element in master
+ * normal mode.
+ */
if (mcspi->fifo_depth == 0)
- dma_count -= es;
+ transfer_reduction = es;
word_len = cs->word_len;
l = mcspi_cached_chconf0(spi);
@@ -474,20 +478,46 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
if (mcspi_dma->dma_rx) {
struct dma_async_tx_descriptor *tx;
- struct scatterlist sg;
dmaengine_slave_config(mcspi_dma->dma_rx, &cfg);
+ /*
+ * Reduce DMA transfer length by one more if McSPI is
+ * configured in turbo mode.
+ */
if ((l & OMAP2_MCSPI_CHCONF_TURBO) && mcspi->fifo_depth == 0)
- dma_count -= es;
+ transfer_reduction += es;
+
+ if (transfer_reduction) {
+ /* Split sgl into two. The second sgl won't be used. */
+ sizes[0] = count - transfer_reduction;
+ sizes[1] = transfer_reduction;
+ nb_sizes = 2;
+ } else {
+ /*
+ * Don't bother splitting the sgl. This essentially
+ * clones the original sgl.
+ */
+ sizes[0] = count;
+ nb_sizes = 1;
+ }
+
+ ret = sg_split(xfer->rx_sg.sgl, xfer->rx_sg.nents,
+ 0, nb_sizes,
+ sizes,
+ sg_out, out_mapped_nents,
+ GFP_KERNEL);
- sg_init_table(&sg, 1);
- sg_dma_address(&sg) = xfer->rx_dma;
- sg_dma_len(&sg) = dma_count;
+ if (ret < 0) {
+ dev_err(&spi->dev, "sg_split failed\n");
+ return 0;
+ }
- tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, &sg, 1,
- DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT |
- DMA_CTRL_ACK);
+ tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx,
+ sg_out[0],
+ out_mapped_nents[0],
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (tx) {
tx->callback = omap2_mcspi_rx_callback;
tx->callback_param = spi;
@@ -501,12 +531,17 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
omap2_mcspi_set_dma_req(spi, 1, 1);
wait_for_completion(&mcspi_dma->dma_rx_completion);
- dma_unmap_single(mcspi->dev, xfer->rx_dma, count,
- DMA_FROM_DEVICE);
+
+ for (x = 0; x < nb_sizes; x++)
+ kfree(sg_out[x]);
if (mcspi->fifo_depth > 0)
return count;
+ /*
+ * Due to the DMA transfer length reduction the missing bytes must
+ * be read manually to receive all of the expected data.
+ */
omap2_mcspi_set_enable(spi, 0);
elements = element_count - 1;
@@ -615,8 +650,6 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
if (tx != NULL) {
wait_for_completion(&mcspi_dma->dma_tx_completion);
- dma_unmap_single(mcspi->dev, xfer->tx_dma, xfer->len,
- DMA_TO_DEVICE);
if (mcspi->fifo_depth > 0) {
irqstat_reg = mcspi->base + OMAP2_MCSPI_IRQSTATUS;
@@ -1074,8 +1107,9 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
gpio_free(spi->cs_gpio);
}
-static int omap2_mcspi_work_one(struct omap2_mcspi *mcspi,
- struct spi_device *spi, struct spi_transfer *t)
+static int omap2_mcspi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
{
/* We only enable one channel at a time -- the one whose message is
@@ -1085,7 +1119,7 @@ static int omap2_mcspi_work_one(struct omap2_mcspi *mcspi,
* chipselect with the FORCE bit ... CS != channel enable.
*/
- struct spi_master *master;
+ struct omap2_mcspi *mcspi;
struct omap2_mcspi_dma *mcspi_dma;
struct omap2_mcspi_cs *cs;
struct omap2_mcspi_device_config *cd;
@@ -1093,7 +1127,7 @@ static int omap2_mcspi_work_one(struct omap2_mcspi *mcspi,
int status = 0;
u32 chconf;
- master = spi->master;
+ mcspi = spi_master_get_devdata(master);
mcspi_dma = mcspi->dma_channels + spi->chip_select;
cs = spi->controller_state;
cd = spi->controller_data;
@@ -1153,7 +1187,8 @@ static int omap2_mcspi_work_one(struct omap2_mcspi *mcspi,
unsigned count;
if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
- (t->len >= DMA_MIN_BYTES))
+ master->cur_msg_mapped &&
+ master->can_dma(master, spi, t))
omap2_mcspi_set_fifo(spi, t, 1);
omap2_mcspi_set_enable(spi, 1);
@@ -1164,7 +1199,8 @@ static int omap2_mcspi_work_one(struct omap2_mcspi *mcspi,
+ OMAP2_MCSPI_TX0);
if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
- (t->len >= DMA_MIN_BYTES))
+ master->cur_msg_mapped &&
+ master->can_dma(master, spi, t))
count = omap2_mcspi_txrx_dma(spi, t);
else
count = omap2_mcspi_txrx_pio(spi, t);
@@ -1233,55 +1269,11 @@ static int omap2_mcspi_prepare_message(struct spi_master *master,
return 0;
}
-static int omap2_mcspi_transfer_one(struct spi_master *master,
- struct spi_device *spi, struct spi_transfer *t)
+static bool omap2_mcspi_can_dma(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
{
- struct omap2_mcspi *mcspi;
- struct omap2_mcspi_dma *mcspi_dma;
- const void *tx_buf = t->tx_buf;
- void *rx_buf = t->rx_buf;
- unsigned len = t->len;
-
- mcspi = spi_master_get_devdata(master);
- mcspi_dma = mcspi->dma_channels + spi->chip_select;
-
- if ((len && !(rx_buf || tx_buf))) {
- dev_dbg(mcspi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n",
- t->speed_hz,
- len,
- tx_buf ? "tx" : "",
- rx_buf ? "rx" : "",
- t->bits_per_word);
- return -EINVAL;
- }
-
- if (len < DMA_MIN_BYTES)
- goto skip_dma_map;
-
- if (mcspi_dma->dma_tx && tx_buf != NULL) {
- t->tx_dma = dma_map_single(mcspi->dev, (void *) tx_buf,
- len, DMA_TO_DEVICE);
- if (dma_mapping_error(mcspi->dev, t->tx_dma)) {
- dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
- 'T', len);
- return -EINVAL;
- }
- }
- if (mcspi_dma->dma_rx && rx_buf != NULL) {
- t->rx_dma = dma_map_single(mcspi->dev, rx_buf, t->len,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(mcspi->dev, t->rx_dma)) {
- dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
- 'R', len);
- if (tx_buf != NULL)
- dma_unmap_single(mcspi->dev, t->tx_dma,
- len, DMA_TO_DEVICE);
- return -EINVAL;
- }
- }
-
-skip_dma_map:
- return omap2_mcspi_work_one(mcspi, spi, t);
+ return (xfer->len >= DMA_MIN_BYTES);
}
static int omap2_mcspi_master_setup(struct omap2_mcspi *mcspi)
@@ -1361,6 +1353,7 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
master->setup = omap2_mcspi_setup;
master->auto_runtime_pm = true;
master->prepare_message = omap2_mcspi_prepare_message;
+ master->can_dma = omap2_mcspi_can_dma;
master->transfer_one = omap2_mcspi_transfer_one;
master->set_cs = omap2_mcspi_set_cs;
master->cleanup = omap2_mcspi_cleanup;
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index a87cfd4ba..ded37025b 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/clk.h>
#include <linux/sizes.h>
@@ -43,6 +44,9 @@
#define ORION_SPI_INT_CAUSE_REG 0x10
#define ORION_SPI_TIMING_PARAMS_REG 0x18
+/* Register for the "Direct Mode" */
+#define SPI_DIRECT_WRITE_CONFIG_REG 0x20
+
#define ORION_SPI_TMISO_SAMPLE_MASK (0x3 << 6)
#define ORION_SPI_TMISO_SAMPLE_1 (1 << 6)
#define ORION_SPI_TMISO_SAMPLE_2 (2 << 6)
@@ -78,11 +82,18 @@ struct orion_spi_dev {
bool is_errata_50mhz_ac;
};
+struct orion_direct_acc {
+ void __iomem *vaddr;
+ u32 size;
+};
+
struct orion_spi {
struct spi_master *master;
void __iomem *base;
struct clk *clk;
const struct orion_spi_dev *devdata;
+
+ struct orion_direct_acc direct_access[ORION_NUM_CHIPSELECTS];
};
static inline void __iomem *spi_reg(struct orion_spi *orion_spi, u32 reg)
@@ -372,10 +383,39 @@ orion_spi_write_read(struct spi_device *spi, struct spi_transfer *xfer)
{
unsigned int count;
int word_len;
+ struct orion_spi *orion_spi;
+ int cs = spi->chip_select;
word_len = spi->bits_per_word;
count = xfer->len;
+ orion_spi = spi_master_get_devdata(spi->master);
+
+ /*
+ * Use SPI direct write mode if base address is available. Otherwise
+ * fall back to PIO mode for this transfer.
+ */
+ if ((orion_spi->direct_access[cs].vaddr) && (xfer->tx_buf) &&
+ (word_len == 8)) {
+ unsigned int cnt = count / 4;
+ unsigned int rem = count % 4;
+
+ /*
+ * Send the TX-data to the SPI device via the direct
+ * mapped address window
+ */
+ iowrite32_rep(orion_spi->direct_access[cs].vaddr,
+ xfer->tx_buf, cnt);
+ if (rem) {
+ u32 *buf = (u32 *)xfer->tx_buf;
+
+ iowrite8_rep(orion_spi->direct_access[cs].vaddr,
+ &buf[cnt], rem);
+ }
+
+ return count;
+ }
+
if (word_len == 8) {
const u8 *tx = xfer->tx_buf;
u8 *rx = xfer->rx_buf;
@@ -425,6 +465,10 @@ static int orion_spi_reset(struct orion_spi *orion_spi)
{
/* Verify that the CS is deasserted */
orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1);
+
+ /* Don't deassert CS between the direct mapped SPI transfers */
+ writel(0, spi_reg(orion_spi, SPI_DIRECT_WRITE_CONFIG_REG));
+
return 0;
}
@@ -504,6 +548,7 @@ static int orion_spi_probe(struct platform_device *pdev)
struct resource *r;
unsigned long tclk_hz;
int status = 0;
+ struct device_node *np;
master = spi_alloc_master(&pdev->dev, sizeof(*spi));
if (master == NULL) {
@@ -576,6 +621,49 @@ static int orion_spi_probe(struct platform_device *pdev)
goto out_rel_clk;
}
+ /* Scan all SPI devices of this controller for direct mapped devices */
+ for_each_available_child_of_node(pdev->dev.of_node, np) {
+ u32 cs;
+
+ /* Get chip-select number from the "reg" property */
+ status = of_property_read_u32(np, "reg", &cs);
+ if (status) {
+ dev_err(&pdev->dev,
+ "%s has no valid 'reg' property (%d)\n",
+ np->full_name, status);
+ status = 0;
+ continue;
+ }
+
+ /*
+ * Check if an address is configured for this SPI device. If
+ * not, the MBus mapping via the 'ranges' property in the 'soc'
+ * node is not configured and this device should not use the
+ * direct mode. In this case, just continue with the next
+ * device.
+ */
+ status = of_address_to_resource(pdev->dev.of_node, cs + 1, r);
+ if (status)
+ continue;
+
+ /*
+ * Only map one page for direct access. This is enough for the
+ * simple TX transfer which only writes to the first word.
+ * This needs to get extended for the direct SPI-NOR / SPI-NAND
+ * support, once this gets implemented.
+ */
+ spi->direct_access[cs].vaddr = devm_ioremap(&pdev->dev,
+ r->start,
+ PAGE_SIZE);
+ if (!spi->direct_access[cs].vaddr) {
+ status = -ENOMEM;
+ goto out_rel_clk;
+ }
+ spi->direct_access[cs].size = PAGE_SIZE;
+
+ dev_info(&pdev->dev, "CS%d configured for direct access\n", cs);
+ }
+
pm_runtime_set_active(&pdev->dev);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
diff --git a/drivers/spi/spi-pic32-sqi.c b/drivers/spi/spi-pic32-sqi.c
index ca3c8d94b..c41abddab 100644
--- a/drivers/spi/spi-pic32-sqi.c
+++ b/drivers/spi/spi-pic32-sqi.c
@@ -354,6 +354,7 @@ static int pic32_sqi_one_message(struct spi_master *master,
struct spi_transfer *xfer;
struct pic32_sqi *sqi;
int ret = 0, mode;
+ unsigned long timeout;
u32 val;
sqi = spi_master_get_devdata(master);
@@ -419,10 +420,10 @@ static int pic32_sqi_one_message(struct spi_master *master,
writel(val, sqi->regs + PESQI_BD_CTRL_REG);
/* wait for xfer completion */
- ret = wait_for_completion_timeout(&sqi->xfer_done, 5 * HZ);
- if (ret <= 0) {
+ timeout = wait_for_completion_timeout(&sqi->xfer_done, 5 * HZ);
+ if (timeout == 0) {
dev_err(&sqi->master->dev, "wait timedout/interrupted\n");
- ret = -EIO;
+ ret = -ETIMEDOUT;
msg->status = ret;
} else {
/* success */
diff --git a/drivers/spi/spi-pic32.c b/drivers/spi/spi-pic32.c
index 73db87f80..fefb688a3 100644
--- a/drivers/spi/spi-pic32.c
+++ b/drivers/spi/spi-pic32.c
@@ -507,6 +507,7 @@ static int pic32_spi_one_transfer(struct spi_master *master,
{
struct pic32_spi *pic32s;
bool dma_issued = false;
+ unsigned long timeout;
int ret;
pic32s = spi_master_get_devdata(master);
@@ -553,8 +554,8 @@ static int pic32_spi_one_transfer(struct spi_master *master,
}
/* wait for completion */
- ret = wait_for_completion_timeout(&pic32s->xfer_done, 2 * HZ);
- if (ret <= 0) {
+ timeout = wait_for_completion_timeout(&pic32s->xfer_done, 2 * HZ);
+ if (timeout == 0) {
dev_err(&spi->dev, "wait error/timedout\n");
if (dma_issued) {
dmaengine_terminate_all(master->dma_rx);
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c
index a18a03d0a..db3ae1dd8 100644
--- a/drivers/spi/spi-pxa2xx-dma.c
+++ b/drivers/spi/spi-pxa2xx-dma.c
@@ -20,79 +20,6 @@
#include "spi-pxa2xx.h"
-static int pxa2xx_spi_map_dma_buffer(struct driver_data *drv_data,
- enum dma_data_direction dir)
-{
- int i, nents, len = drv_data->len;
- struct scatterlist *sg;
- struct device *dmadev;
- struct sg_table *sgt;
- void *buf, *pbuf;
-
- if (dir == DMA_TO_DEVICE) {
- dmadev = drv_data->tx_chan->device->dev;
- sgt = &drv_data->tx_sgt;
- buf = drv_data->tx;
- } else {
- dmadev = drv_data->rx_chan->device->dev;
- sgt = &drv_data->rx_sgt;
- buf = drv_data->rx;
- }
-
- nents = DIV_ROUND_UP(len, SZ_2K);
- if (nents != sgt->nents) {
- int ret;
-
- sg_free_table(sgt);
- ret = sg_alloc_table(sgt, nents, GFP_ATOMIC);
- if (ret)
- return ret;
- }
-
- pbuf = buf;
- for_each_sg(sgt->sgl, sg, sgt->nents, i) {
- size_t bytes = min_t(size_t, len, SZ_2K);
-
- sg_set_buf(sg, pbuf, bytes);
- pbuf += bytes;
- len -= bytes;
- }
-
- nents = dma_map_sg(dmadev, sgt->sgl, sgt->nents, dir);
- if (!nents)
- return -ENOMEM;
-
- return nents;
-}
-
-static void pxa2xx_spi_unmap_dma_buffer(struct driver_data *drv_data,
- enum dma_data_direction dir)
-{
- struct device *dmadev;
- struct sg_table *sgt;
-
- if (dir == DMA_TO_DEVICE) {
- dmadev = drv_data->tx_chan->device->dev;
- sgt = &drv_data->tx_sgt;
- } else {
- dmadev = drv_data->rx_chan->device->dev;
- sgt = &drv_data->rx_sgt;
- }
-
- dma_unmap_sg(dmadev, sgt->sgl, sgt->nents, dir);
-}
-
-static void pxa2xx_spi_unmap_dma_buffers(struct driver_data *drv_data)
-{
- if (!drv_data->dma_mapped)
- return;
-
- pxa2xx_spi_unmap_dma_buffer(drv_data, DMA_FROM_DEVICE);
- pxa2xx_spi_unmap_dma_buffer(drv_data, DMA_TO_DEVICE);
-
- drv_data->dma_mapped = 0;
-}
-
static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
bool error)
{
@@ -125,8 +52,6 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
pxa2xx_spi_write(drv_data, SSTO, 0);
if (!error) {
- pxa2xx_spi_unmap_dma_buffers(drv_data);
-
msg->actual_length += drv_data->len;
msg->state = pxa2xx_spi_next_transfer(drv_data);
} else {
@@ -152,11 +77,12 @@ pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data,
enum dma_transfer_direction dir)
{
struct chip_data *chip = drv_data->cur_chip;
+ struct spi_transfer *xfer = drv_data->cur_transfer;
enum dma_slave_buswidth width;
struct dma_slave_config cfg;
struct dma_chan *chan;
struct sg_table *sgt;
- int nents, ret;
+ int ret;
switch (drv_data->n_bytes) {
case 1:
@@ -178,17 +104,15 @@ pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data,
cfg.dst_addr_width = width;
cfg.dst_maxburst = chip->dma_burst_size;
- sgt = &drv_data->tx_sgt;
- nents = drv_data->tx_nents;
- chan = drv_data->tx_chan;
+ sgt = &xfer->tx_sg;
+ chan = drv_data->master->dma_tx;
} else {
cfg.src_addr = drv_data->ssdr_physical;
cfg.src_addr_width = width;
cfg.src_maxburst = chip->dma_burst_size;
- sgt = &drv_data->rx_sgt;
- nents = drv_data->rx_nents;
- chan = drv_data->rx_chan;
+ sgt = &xfer->rx_sg;
+ chan = drv_data->master->dma_rx;
}
ret = dmaengine_slave_config(chan, &cfg);
@@ -197,46 +121,10 @@ pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data,
return NULL;
}
- return dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir,
+ return dmaengine_prep_slave_sg(chan, sgt->sgl, sgt->nents, dir,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
}
-bool pxa2xx_spi_dma_is_possible(size_t len)
-{
- return len <= MAX_DMA_LEN;
-}
-
-int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data)
-{
- const struct chip_data *chip = drv_data->cur_chip;
- int ret;
-
- if (!chip->enable_dma)
- return 0;
-
- /* Don't bother with DMA if we can't do even a single burst */
- if (drv_data->len < chip->dma_burst_size)
- return 0;
-
- ret = pxa2xx_spi_map_dma_buffer(drv_data, DMA_TO_DEVICE);
- if (ret <= 0) {
- dev_warn(&drv_data->pdev->dev, "failed to DMA map TX\n");
- return 0;
- }
-
- drv_data->tx_nents = ret;
-
- ret = pxa2xx_spi_map_dma_buffer(drv_data, DMA_FROM_DEVICE);
- if (ret <= 0) {
- pxa2xx_spi_unmap_dma_buffer(drv_data, DMA_TO_DEVICE);
- dev_warn(&drv_data->pdev->dev, "failed to DMA map RX\n");
- return 0;
- }
-
- drv_data->rx_nents = ret;
- return 1;
-}
-
irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
{
u32 status;
@@ -245,8 +133,8 @@ irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
if (status & SSSR_ROR) {
dev_err(&drv_data->pdev->dev, "FIFO overrun\n");
- dmaengine_terminate_async(drv_data->rx_chan);
- dmaengine_terminate_async(drv_data->tx_chan);
+ dmaengine_terminate_async(drv_data->master->dma_rx);
+ dmaengine_terminate_async(drv_data->master->dma_tx);
pxa2xx_spi_dma_transfer_complete(drv_data, true);
return IRQ_HANDLED;
@@ -285,16 +173,15 @@ int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst)
return 0;
err_rx:
- dmaengine_terminate_async(drv_data->tx_chan);
+ dmaengine_terminate_async(drv_data->master->dma_tx);
err_tx:
- pxa2xx_spi_unmap_dma_buffers(drv_data);
return err;
}
void pxa2xx_spi_dma_start(struct driver_data *drv_data)
{
- dma_async_issue_pending(drv_data->rx_chan);
- dma_async_issue_pending(drv_data->tx_chan);
+ dma_async_issue_pending(drv_data->master->dma_rx);
+ dma_async_issue_pending(drv_data->master->dma_tx);
atomic_set(&drv_data->dma_running, 1);
}
@@ -303,21 +190,22 @@ int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
{
struct pxa2xx_spi_master *pdata = drv_data->master_info;
struct device *dev = &drv_data->pdev->dev;
+ struct spi_master *master = drv_data->master;
dma_cap_mask_t mask;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- drv_data->tx_chan = dma_request_slave_channel_compat(mask,
+ master->dma_tx = dma_request_slave_channel_compat(mask,
pdata->dma_filter, pdata->tx_param, dev, "tx");
- if (!drv_data->tx_chan)
+ if (!master->dma_tx)
return -ENODEV;
- drv_data->rx_chan = dma_request_slave_channel_compat(mask,
+ master->dma_rx = dma_request_slave_channel_compat(mask,
pdata->dma_filter, pdata->rx_param, dev, "rx");
- if (!drv_data->rx_chan) {
- dma_release_channel(drv_data->tx_chan);
- drv_data->tx_chan = NULL;
+ if (!master->dma_rx) {
+ dma_release_channel(master->dma_tx);
+ master->dma_tx = NULL;
return -ENODEV;
}
@@ -326,17 +214,17 @@ int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
void pxa2xx_spi_dma_release(struct driver_data *drv_data)
{
- if (drv_data->rx_chan) {
- dmaengine_terminate_sync(drv_data->rx_chan);
- dma_release_channel(drv_data->rx_chan);
- sg_free_table(&drv_data->rx_sgt);
- drv_data->rx_chan = NULL;
+ struct spi_master *master = drv_data->master;
+
+ if (master->dma_rx) {
+ dmaengine_terminate_sync(master->dma_rx);
+ dma_release_channel(master->dma_rx);
+ master->dma_rx = NULL;
}
- if (drv_data->tx_chan) {
- dmaengine_terminate_sync(drv_data->tx_chan);
- dma_release_channel(drv_data->tx_chan);
- sg_free_table(&drv_data->tx_sgt);
- drv_data->tx_chan = NULL;
+ if (master->dma_tx) {
+ dmaengine_terminate_sync(master->dma_tx);
+ dma_release_channel(master->dma_tx);
+ master->dma_tx = NULL;
}
}
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index 5202de94f..58d2d48e1 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -1,24 +1,26 @@
/*
* CE4100's SPI device is more or less the same one as found on PXA
*
+ * Copyright (C) 2016, Intel Corporation
*/
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
-#include <linux/of_device.h>
-#include <linux/module.h>
#include <linux/spi/pxa2xx_spi.h>
-#include <linux/clk-provider.h>
#include <linux/dmaengine.h>
#include <linux/platform_data/dma-dw.h>
enum {
- PORT_CE4100,
+ PORT_QUARK_X1000,
PORT_BYT,
+ PORT_MRFLD,
PORT_BSW0,
PORT_BSW1,
PORT_BSW2,
- PORT_QUARK_X1000,
+ PORT_CE4100,
PORT_LPT,
};
@@ -29,8 +31,11 @@ struct pxa_spi_info {
unsigned long max_clk_rate;
/* DMA channel request parameters */
+ bool (*dma_filter)(struct dma_chan *chan, void *param);
void *tx_param;
void *rx_param;
+
+ int (*setup)(struct pci_dev *pdev, struct pxa_spi_info *c);
};
static struct dw_dma_slave byt_tx_param = { .dst_id = 0 };
@@ -57,6 +62,56 @@ static bool lpss_dma_filter(struct dma_chan *chan, void *param)
return true;
}
+static int lpss_spi_setup(struct pci_dev *dev, struct pxa_spi_info *c)
+{
+ struct pci_dev *dma_dev;
+
+ c->num_chipselect = 1;
+ c->max_clk_rate = 50000000;
+
+ dma_dev = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+
+ if (c->tx_param) {
+ struct dw_dma_slave *slave = c->tx_param;
+
+ slave->dma_dev = &dma_dev->dev;
+ slave->m_master = 0;
+ slave->p_master = 1;
+ }
+
+ if (c->rx_param) {
+ struct dw_dma_slave *slave = c->rx_param;
+
+ slave->dma_dev = &dma_dev->dev;
+ slave->m_master = 0;
+ slave->p_master = 1;
+ }
+
+ c->dma_filter = lpss_dma_filter;
+ return 0;
+}
+
+static int mrfld_spi_setup(struct pci_dev *dev, struct pxa_spi_info *c)
+{
+ switch (PCI_FUNC(dev->devfn)) {
+ case 0:
+ c->port_id = 3;
+ c->num_chipselect = 1;
+ break;
+ case 1:
+ c->port_id = 5;
+ c->num_chipselect = 4;
+ break;
+ case 2:
+ c->port_id = 6;
+ c->num_chipselect = 1;
+ break;
+ default:
+ return -ENODEV;
+ }
+ return 0;
+}
+
static struct pxa_spi_info spi_info_configs[] = {
[PORT_CE4100] = {
.type = PXA25x_SSP,
@@ -67,35 +122,36 @@ static struct pxa_spi_info spi_info_configs[] = {
[PORT_BYT] = {
.type = LPSS_BYT_SSP,
.port_id = 0,
- .num_chipselect = 1,
- .max_clk_rate = 50000000,
+ .setup = lpss_spi_setup,
.tx_param = &byt_tx_param,
.rx_param = &byt_rx_param,
},
[PORT_BSW0] = {
- .type = LPSS_BYT_SSP,
+ .type = LPSS_BSW_SSP,
.port_id = 0,
- .num_chipselect = 1,
- .max_clk_rate = 50000000,
+ .setup = lpss_spi_setup,
.tx_param = &bsw0_tx_param,
.rx_param = &bsw0_rx_param,
},
[PORT_BSW1] = {
- .type = LPSS_BYT_SSP,
+ .type = LPSS_BSW_SSP,
.port_id = 1,
- .num_chipselect = 1,
- .max_clk_rate = 50000000,
+ .setup = lpss_spi_setup,
.tx_param = &bsw1_tx_param,
.rx_param = &bsw1_rx_param,
},
[PORT_BSW2] = {
- .type = LPSS_BYT_SSP,
+ .type = LPSS_BSW_SSP,
.port_id = 2,
- .num_chipselect = 1,
- .max_clk_rate = 50000000,
+ .setup = lpss_spi_setup,
.tx_param = &bsw2_tx_param,
.rx_param = &bsw2_rx_param,
},
+ [PORT_MRFLD] = {
+ .type = PXA27x_SSP,
+ .max_clk_rate = 25000000,
+ .setup = mrfld_spi_setup,
+ },
[PORT_QUARK_X1000] = {
.type = QUARK_X1000_SSP,
.port_id = -1,
@@ -105,8 +161,7 @@ static struct pxa_spi_info spi_info_configs[] = {
[PORT_LPT] = {
.type = LPSS_LPT_SSP,
.port_id = 0,
- .num_chipselect = 1,
- .max_clk_rate = 50000000,
+ .setup = lpss_spi_setup,
.tx_param = &lpt_tx_param,
.rx_param = &lpt_rx_param,
},
@@ -122,7 +177,6 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
struct ssp_device *ssp;
struct pxa_spi_info *c;
char buf[40];
- struct pci_dev *dma_dev;
ret = pcim_enable_device(dev);
if (ret)
@@ -133,30 +187,15 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
return ret;
c = &spi_info_configs[ent->driver_data];
-
- memset(&spi_pdata, 0, sizeof(spi_pdata));
- spi_pdata.num_chipselect = (c->num_chipselect > 0) ?
- c->num_chipselect : dev->devfn;
-
- dma_dev = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
-
- if (c->tx_param) {
- struct dw_dma_slave *slave = c->tx_param;
-
- slave->dma_dev = &dma_dev->dev;
- slave->m_master = 0;
- slave->p_master = 1;
+ if (c->setup) {
+ ret = c->setup(dev, c);
+ if (ret)
+ return ret;
}
- if (c->rx_param) {
- struct dw_dma_slave *slave = c->rx_param;
-
- slave->dma_dev = &dma_dev->dev;
- slave->m_master = 0;
- slave->p_master = 1;
- }
-
- spi_pdata.dma_filter = lpss_dma_filter;
+ memset(&spi_pdata, 0, sizeof(spi_pdata));
+ spi_pdata.num_chipselect = (c->num_chipselect > 0) ? c->num_chipselect : dev->devfn;
+ spi_pdata.dma_filter = c->dma_filter;
spi_pdata.tx_param = c->tx_param;
spi_pdata.rx_param = c->rx_param;
spi_pdata.enable_dma = c->rx_param && c->tx_param;
@@ -164,10 +203,6 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
ssp = &spi_pdata.ssp;
ssp->phys_base = pci_resource_start(dev, 0);
ssp->mmio_base = pcim_iomap_table(dev)[0];
- if (!ssp->mmio_base) {
- dev_err(&dev->dev, "failed to ioremap() registers\n");
- return -EIO;
- }
ssp->irq = dev->irq;
ssp->port_id = (c->port_id >= 0) ? c->port_id : dev->devfn;
ssp->type = c->type;
@@ -179,6 +214,7 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
return PTR_ERR(ssp->clk);
memset(&pi, 0, sizeof(pi));
+ pi.fwnode = dev->dev.fwnode;
pi.parent = &dev->dev;
pi.name = "pxa2xx-spi";
pi.id = ssp->port_id;
@@ -208,12 +244,13 @@ static void pxa2xx_spi_pci_remove(struct pci_dev *dev)
}
static const struct pci_device_id pxa2xx_spi_pci_devices[] = {
- { PCI_VDEVICE(INTEL, 0x2e6a), PORT_CE4100 },
{ PCI_VDEVICE(INTEL, 0x0935), PORT_QUARK_X1000 },
{ PCI_VDEVICE(INTEL, 0x0f0e), PORT_BYT },
+ { PCI_VDEVICE(INTEL, 0x1194), PORT_MRFLD },
{ PCI_VDEVICE(INTEL, 0x228e), PORT_BSW0 },
{ PCI_VDEVICE(INTEL, 0x2290), PORT_BSW1 },
{ PCI_VDEVICE(INTEL, 0x22ac), PORT_BSW2 },
+ { PCI_VDEVICE(INTEL, 0x2e6a), PORT_CE4100 },
{ PCI_VDEVICE(INTEL, 0x9ce6), PORT_LPT },
{ },
};
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index daf28443b..87150a104 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -919,9 +919,21 @@ static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data,
return clk_div << 8;
}
+static bool pxa2xx_spi_can_dma(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct chip_data *chip = spi_get_ctldata(spi);
+
+ return chip->enable_dma &&
+ xfer->len <= MAX_DMA_LEN &&
+ xfer->len >= chip->dma_burst_size;
+}
+
static void pump_transfers(unsigned long data)
{
struct driver_data *drv_data = (struct driver_data *)data;
+ struct spi_master *master = drv_data->master;
struct spi_message *message = NULL;
struct spi_transfer *transfer = NULL;
struct spi_transfer *previous = NULL;
@@ -935,6 +947,7 @@ static void pump_transfers(unsigned long data)
u32 dma_burst = drv_data->cur_chip->dma_burst_size;
u32 change_mask = pxa2xx_spi_get_ssrc1_change_mask(drv_data);
int err;
+ int dma_mapped;
/* Get current state information */
message = drv_data->cur_msg;
@@ -969,7 +982,7 @@ static void pump_transfers(unsigned long data)
}
/* Check if we can DMA this transfer */
- if (!pxa2xx_spi_dma_is_possible(transfer->len) && chip->enable_dma) {
+ if (transfer->len > MAX_DMA_LEN && chip->enable_dma) {
/* reject already-mapped transfers; PIO won't always work */
if (message->is_dma_mapped
@@ -1046,10 +1059,10 @@ static void pump_transfers(unsigned long data)
message->state = RUNNING_STATE;
- drv_data->dma_mapped = 0;
- if (pxa2xx_spi_dma_is_possible(drv_data->len))
- drv_data->dma_mapped = pxa2xx_spi_map_dma_buffers(drv_data);
- if (drv_data->dma_mapped) {
+ dma_mapped = master->can_dma &&
+ master->can_dma(master, message->spi, transfer) &&
+ master->cur_msg_mapped;
+ if (dma_mapped) {
/* Ensure we have the correct interrupt handler */
drv_data->transfer_handler = pxa2xx_spi_dma_transfer;
@@ -1079,14 +1092,14 @@ static void pump_transfers(unsigned long data)
cr0 = pxa2xx_configure_sscr0(drv_data, clk_div, bits);
if (!pxa25x_ssp_comp(drv_data))
dev_dbg(&message->spi->dev, "%u Hz actual, %s\n",
- drv_data->master->max_speed_hz
+ master->max_speed_hz
/ (1 + ((cr0 & SSCR0_SCR(0xfff)) >> 8)),
- drv_data->dma_mapped ? "DMA" : "PIO");
+ dma_mapped ? "DMA" : "PIO");
else
dev_dbg(&message->spi->dev, "%u Hz actual, %s\n",
- drv_data->master->max_speed_hz / 2
+ master->max_speed_hz / 2
/ (1 + ((cr0 & SSCR0_SCR(0x0ff)) >> 8)),
- drv_data->dma_mapped ? "DMA" : "PIO");
+ dma_mapped ? "DMA" : "PIO");
if (is_lpss_ssp(drv_data)) {
if ((pxa2xx_spi_read(drv_data, SSIRF) & 0xff)
@@ -1247,7 +1260,7 @@ static int setup(struct spi_device *spi)
chip->frm = spi->chip_select;
} else
chip->gpio_cs = -1;
- chip->enable_dma = 0;
+ chip->enable_dma = drv_data->master_info->enable_dma;
chip->timeout = TIMOUT_DFLT;
}
@@ -1266,17 +1279,9 @@ static int setup(struct spi_device *spi)
tx_hi_thres = chip_info->tx_hi_threshold;
if (chip_info->rx_threshold)
rx_thres = chip_info->rx_threshold;
- chip->enable_dma = drv_data->master_info->enable_dma;
chip->dma_threshold = 0;
if (chip_info->enable_loopback)
chip->cr1 = SSCR1_LBM;
- } else if (ACPI_HANDLE(&spi->dev)) {
- /*
- * Slave devices enumerated from ACPI namespace don't
- * usually have chip_info but we still might want to use
- * DMA with them.
- */
- chip->enable_dma = drv_data->master_info->enable_dma;
}
chip->lpss_rx_threshold = SSIRF_RxThresh(rx_thres);
@@ -1396,6 +1401,9 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
/* SPT-H */
{ PCI_VDEVICE(INTEL, 0xa129), LPSS_SPT_SSP },
{ PCI_VDEVICE(INTEL, 0xa12a), LPSS_SPT_SSP },
+ /* KBL-H */
+ { PCI_VDEVICE(INTEL, 0xa2a9), LPSS_SPT_SSP },
+ { PCI_VDEVICE(INTEL, 0xa2aa), LPSS_SPT_SSP },
/* BXT A-Step */
{ PCI_VDEVICE(INTEL, 0x0ac2), LPSS_BXT_SSP },
{ PCI_VDEVICE(INTEL, 0x0ac4), LPSS_BXT_SSP },
@@ -1608,6 +1616,8 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
if (status) {
dev_dbg(dev, "no DMA channels available, using PIO\n");
platform_info->enable_dma = false;
+ } else {
+ master->can_dma = pxa2xx_spi_can_dma;
}
}
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
index e6b09000f..d217ad55c 100644
--- a/drivers/spi/spi-pxa2xx.h
+++ b/drivers/spi/spi-pxa2xx.h
@@ -50,12 +50,6 @@ struct driver_data {
struct tasklet_struct pump_transfers;
/* DMA engine support */
- struct dma_chan *rx_chan;
- struct dma_chan *tx_chan;
- struct sg_table rx_sgt;
- struct sg_table tx_sgt;
- int rx_nents;
- int tx_nents;
atomic_t dma_running;
/* Current message transfer state info */
@@ -67,7 +61,6 @@ struct driver_data {
void *tx_end;
void *rx;
void *rx_end;
- int dma_mapped;
u8 n_bytes;
int (*write)(struct driver_data *drv_data);
int (*read)(struct driver_data *drv_data);
@@ -145,8 +138,6 @@ extern void *pxa2xx_spi_next_transfer(struct driver_data *drv_data);
#define MAX_DMA_LEN SZ_64K
#define DEFAULT_DMA_CR1 (SSCR1_TSRE | SSCR1_RSRE | SSCR1_TRAIL)
-extern bool pxa2xx_spi_dma_is_possible(size_t len);
-extern int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data);
extern irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data);
extern int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst);
extern void pxa2xx_spi_dma_start(struct driver_data *drv_data);
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index c338ef113..7f1555621 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -1030,7 +1030,6 @@ static int spi_qup_remove(struct platform_device *pdev)
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- spi_master_put(master);
return 0;
}
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 1026e180e..0f89c2169 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -142,6 +142,12 @@
/* sclk_out: spi master internal logic in rk3x can support 50Mhz */
#define MAX_SCLK_OUT 50000000
+/*
+ * SPI_CTRLR1 is 16-bits, so we should support lengths of 0xffff + 1. However,
+ * the controller seems to hang when given 0x10000, so stick with this for now.
+ */
+#define ROCKCHIP_SPI_MAX_TRANLEN 0xffff
+
enum rockchip_ssi_type {
SSI_MOTO_SPI = 0,
SSI_TI_SSP,
@@ -573,6 +579,11 @@ static void rockchip_spi_config(struct rockchip_spi *rs)
dev_dbg(rs->dev, "cr0 0x%x, div %d\n", cr0, div);
}
+static size_t rockchip_spi_max_transfer_size(struct spi_device *spi)
+{
+ return ROCKCHIP_SPI_MAX_TRANLEN;
+}
+
static int rockchip_spi_transfer_one(
struct spi_master *master,
struct spi_device *spi,
@@ -589,6 +600,11 @@ static int rockchip_spi_transfer_one(
return -EINVAL;
}
+ if (xfer->len > ROCKCHIP_SPI_MAX_TRANLEN) {
+ dev_err(rs->dev, "Transfer is too long (%d)\n", xfer->len);
+ return -EINVAL;
+ }
+
rs->speed = xfer->speed_hz;
rs->bpw = xfer->bits_per_word;
rs->n_bytes = rs->bpw >> 3;
@@ -730,6 +746,7 @@ static int rockchip_spi_probe(struct platform_device *pdev)
master->prepare_message = rockchip_spi_prepare_message;
master->unprepare_message = rockchip_spi_unprepare_message;
master->transfer_one = rockchip_spi_transfer_one;
+ master->max_transfer_size = rockchip_spi_max_transfer_size;
master->handle_err = rockchip_spi_handle_err;
rs->dma_tx.ch = dma_request_chan(rs->dev, "tx");
@@ -894,9 +911,12 @@ static const struct dev_pm_ops rockchip_spi_pm = {
};
static const struct of_device_id rockchip_spi_dt_match[] = {
+ { .compatible = "rockchip,rk3036-spi", },
{ .compatible = "rockchip,rk3066-spi", },
{ .compatible = "rockchip,rk3188-spi", },
+ { .compatible = "rockchip,rk3228-spi", },
{ .compatible = "rockchip,rk3288-spi", },
+ { .compatible = "rockchip,rk3368-spi", },
{ .compatible = "rockchip,rk3399-spi", },
{ },
};
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 5a76a5006..3c09e94cf 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -156,12 +156,14 @@ struct s3c64xx_spi_port_config {
int quirks;
bool high_speed;
bool clk_from_cmu;
+ bool clk_ioclk;
};
/**
* struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver.
* @clk: Pointer to the spi clock.
* @src_clk: Pointer to the clock used to generate SPI signals.
+ * @ioclk: Pointer to the i/o clock between master and slave
* @master: Pointer to the SPI Protocol master.
* @cntrlr_info: Platform specific data for the controller this driver manages.
* @tgl_spi: Pointer to the last CS left untoggled by the cs_change hint.
@@ -181,6 +183,7 @@ struct s3c64xx_spi_driver_data {
void __iomem *regs;
struct clk *clk;
struct clk *src_clk;
+ struct clk *ioclk;
struct platform_device *pdev;
struct spi_master *master;
struct s3c64xx_spi_info *cntrlr_info;
@@ -310,44 +313,63 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
dma_async_issue_pending(dma->ch);
}
+static void s3c64xx_spi_set_cs(struct spi_device *spi, bool enable)
+{
+ struct s3c64xx_spi_driver_data *sdd =
+ spi_master_get_devdata(spi->master);
+
+ if (sdd->cntrlr_info->no_cs)
+ return;
+
+ if (enable) {
+ if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO)) {
+ writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
+ } else {
+ u32 ssel = readl(sdd->regs + S3C64XX_SPI_SLAVE_SEL);
+
+ ssel |= (S3C64XX_SPI_SLAVE_AUTO |
+ S3C64XX_SPI_SLAVE_NSC_CNT_2);
+ writel(ssel, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
+ }
+ } else {
+ if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO))
+ writel(S3C64XX_SPI_SLAVE_SIG_INACT,
+ sdd->regs + S3C64XX_SPI_SLAVE_SEL);
+ }
+}
+
static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
{
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
dma_filter_fn filter = sdd->cntrlr_info->filter;
struct device *dev = &sdd->pdev->dev;
dma_cap_mask_t mask;
- int ret;
- if (!is_polling(sdd)) {
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
-
- /* Acquire DMA channels */
- sdd->rx_dma.ch = dma_request_slave_channel_compat(mask, filter,
- sdd->cntrlr_info->dma_rx, dev, "rx");
- if (!sdd->rx_dma.ch) {
- dev_err(dev, "Failed to get RX DMA channel\n");
- ret = -EBUSY;
- goto out;
- }
- spi->dma_rx = sdd->rx_dma.ch;
-
- sdd->tx_dma.ch = dma_request_slave_channel_compat(mask, filter,
- sdd->cntrlr_info->dma_tx, dev, "tx");
- if (!sdd->tx_dma.ch) {
- dev_err(dev, "Failed to get TX DMA channel\n");
- ret = -EBUSY;
- goto out_rx;
- }
- spi->dma_tx = sdd->tx_dma.ch;
+ if (is_polling(sdd))
+ return 0;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ /* Acquire DMA channels */
+ sdd->rx_dma.ch = dma_request_slave_channel_compat(mask, filter,
+ sdd->cntrlr_info->dma_rx, dev, "rx");
+ if (!sdd->rx_dma.ch) {
+ dev_err(dev, "Failed to get RX DMA channel\n");
+ return -EBUSY;
}
+ spi->dma_rx = sdd->rx_dma.ch;
- return 0;
+ sdd->tx_dma.ch = dma_request_slave_channel_compat(mask, filter,
+ sdd->cntrlr_info->dma_tx, dev, "tx");
+ if (!sdd->tx_dma.ch) {
+ dev_err(dev, "Failed to get TX DMA channel\n");
+ dma_release_channel(sdd->rx_dma.ch);
+ return -EBUSY;
+ }
+ spi->dma_tx = sdd->tx_dma.ch;
-out_rx:
- dma_release_channel(sdd->rx_dma.ch);
-out:
- return ret;
+ return 0;
}
static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
@@ -577,9 +599,7 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
u32 val;
/* Disable Clock */
- if (sdd->port_conf->clk_from_cmu) {
- clk_disable_unprepare(sdd->src_clk);
- } else {
+ if (!sdd->port_conf->clk_from_cmu) {
val = readl(regs + S3C64XX_SPI_CLK_CFG);
val &= ~S3C64XX_SPI_ENCLK_ENABLE;
writel(val, regs + S3C64XX_SPI_CLK_CFG);
@@ -622,11 +642,8 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
writel(val, regs + S3C64XX_SPI_MODE_CFG);
if (sdd->port_conf->clk_from_cmu) {
- /* Configure Clock */
- /* There is half-multiplier before the SPI */
+ /* The src_clk clock is divided internally by 2 */
clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
- /* Enable Clock */
- clk_prepare_enable(sdd->src_clk);
} else {
/* Configure Clock */
val = readl(regs + S3C64XX_SPI_CLK_CFG);
@@ -651,16 +668,6 @@ static int s3c64xx_spi_prepare_message(struct spi_master *master,
struct spi_device *spi = msg->spi;
struct s3c64xx_spi_csinfo *cs = spi->controller_data;
- /* If Master's(controller) state differs from that needed by Slave */
- if (sdd->cur_speed != spi->max_speed_hz
- || sdd->cur_mode != spi->mode
- || sdd->cur_bpw != spi->bits_per_word) {
- sdd->cur_bpw = spi->bits_per_word;
- sdd->cur_speed = spi->max_speed_hz;
- sdd->cur_mode = spi->mode;
- s3c64xx_spi_config(sdd);
- }
-
/* Configure feedback delay */
writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK);
@@ -687,6 +694,7 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) {
sdd->cur_bpw = bpw;
sdd->cur_speed = speed;
+ sdd->cur_mode = spi->mode;
s3c64xx_spi_config(sdd);
}
@@ -706,12 +714,7 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
enable_datapath(sdd, spi, xfer, use_dma);
/* Start the signals */
- if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO))
- writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
- else
- writel(readl(sdd->regs + S3C64XX_SPI_SLAVE_SEL)
- | S3C64XX_SPI_SLAVE_AUTO | S3C64XX_SPI_SLAVE_NSC_CNT_2,
- sdd->regs + S3C64XX_SPI_SLAVE_SEL);
+ s3c64xx_spi_set_cs(spi, true);
spin_unlock_irqrestore(&sdd->lock, flags);
@@ -861,16 +864,15 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
pm_runtime_mark_last_busy(&sdd->pdev->dev);
pm_runtime_put_autosuspend(&sdd->pdev->dev);
- if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO))
- writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
+ s3c64xx_spi_set_cs(spi, false);
+
return 0;
setup_exit:
pm_runtime_mark_last_busy(&sdd->pdev->dev);
pm_runtime_put_autosuspend(&sdd->pdev->dev);
/* setup() returns with device de-selected */
- if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO))
- writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
+ s3c64xx_spi_set_cs(spi, false);
if (gpio_is_valid(spi->cs_gpio))
gpio_free(spi->cs_gpio);
@@ -944,7 +946,9 @@ static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel)
sdd->cur_speed = 0;
- if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO))
+ if (sci->no_cs)
+ writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
+ else if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO))
writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
/* Disable Interrupts - we use Polling if not DMA mode */
@@ -999,6 +1003,8 @@ static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
sci->num_cs = temp;
}
+ sci->no_cs = of_property_read_bool(dev->of_node, "broken-cs");
+
return sci;
}
#else
@@ -1076,7 +1082,7 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev, "failed to get alias id, errno %d\n",
ret);
- goto err0;
+ goto err_deref_master;
}
sdd->port_id = ret;
} else {
@@ -1114,13 +1120,13 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
sdd->regs = devm_ioremap_resource(&pdev->dev, mem_res);
if (IS_ERR(sdd->regs)) {
ret = PTR_ERR(sdd->regs);
- goto err0;
+ goto err_deref_master;
}
if (sci->cfg_gpio && sci->cfg_gpio()) {
dev_err(&pdev->dev, "Unable to config gpio\n");
ret = -EBUSY;
- goto err0;
+ goto err_deref_master;
}
/* Setup clocks */
@@ -1128,13 +1134,13 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
if (IS_ERR(sdd->clk)) {
dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n");
ret = PTR_ERR(sdd->clk);
- goto err0;
+ goto err_deref_master;
}
- if (clk_prepare_enable(sdd->clk)) {
+ ret = clk_prepare_enable(sdd->clk);
+ if (ret) {
dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n");
- ret = -EBUSY;
- goto err0;
+ goto err_deref_master;
}
sprintf(clk_name, "spi_busclk%d", sci->src_clk_nr);
@@ -1143,13 +1149,28 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"Unable to acquire clock '%s'\n", clk_name);
ret = PTR_ERR(sdd->src_clk);
- goto err2;
+ goto err_disable_clk;
}
- if (clk_prepare_enable(sdd->src_clk)) {
+ ret = clk_prepare_enable(sdd->src_clk);
+ if (ret) {
dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", clk_name);
- ret = -EBUSY;
- goto err2;
+ goto err_disable_clk;
+ }
+
+ if (sdd->port_conf->clk_ioclk) {
+ sdd->ioclk = devm_clk_get(&pdev->dev, "spi_ioclk");
+ if (IS_ERR(sdd->ioclk)) {
+ dev_err(&pdev->dev, "Unable to acquire 'ioclk'\n");
+ ret = PTR_ERR(sdd->ioclk);
+ goto err_disable_src_clk;
+ }
+
+ ret = clk_prepare_enable(sdd->ioclk);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't enable clock 'ioclk'\n");
+ goto err_disable_src_clk;
+ }
}
pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT);
@@ -1169,7 +1190,7 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request IRQ %d: %d\n",
irq, ret);
- goto err3;
+ goto err_pm_put;
}
writel(S3C64XX_SPI_INT_RX_OVERRUN_EN | S3C64XX_SPI_INT_RX_UNDERRUN_EN |
@@ -1179,7 +1200,7 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
ret = devm_spi_register_master(&pdev->dev, master);
if (ret != 0) {
dev_err(&pdev->dev, "cannot register SPI master: %d\n", ret);
- goto err3;
+ goto err_pm_put;
}
dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Slaves attached\n",
@@ -1193,15 +1214,17 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
return 0;
-err3:
+err_pm_put:
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
+ clk_disable_unprepare(sdd->ioclk);
+err_disable_src_clk:
clk_disable_unprepare(sdd->src_clk);
-err2:
+err_disable_clk:
clk_disable_unprepare(sdd->clk);
-err0:
+err_deref_master:
spi_master_put(master);
return ret;
@@ -1209,13 +1232,15 @@ err0:
static int s3c64xx_spi_remove(struct platform_device *pdev)
{
- struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
+ struct spi_master *master = platform_get_drvdata(pdev);
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
pm_runtime_get_sync(&pdev->dev);
writel(0, sdd->regs + S3C64XX_SPI_INT_EN);
+ clk_disable_unprepare(sdd->ioclk);
+
clk_disable_unprepare(sdd->src_clk);
clk_disable_unprepare(sdd->clk);
@@ -1274,6 +1299,7 @@ static int s3c64xx_spi_runtime_suspend(struct device *dev)
clk_disable_unprepare(sdd->clk);
clk_disable_unprepare(sdd->src_clk);
+ clk_disable_unprepare(sdd->ioclk);
return 0;
}
@@ -1284,17 +1310,28 @@ static int s3c64xx_spi_runtime_resume(struct device *dev)
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
int ret;
+ if (sdd->port_conf->clk_ioclk) {
+ ret = clk_prepare_enable(sdd->ioclk);
+ if (ret != 0)
+ return ret;
+ }
+
ret = clk_prepare_enable(sdd->src_clk);
if (ret != 0)
- return ret;
+ goto err_disable_ioclk;
ret = clk_prepare_enable(sdd->clk);
- if (ret != 0) {
- clk_disable_unprepare(sdd->src_clk);
- return ret;
- }
+ if (ret != 0)
+ goto err_disable_src_clk;
return 0;
+
+err_disable_src_clk:
+ clk_disable_unprepare(sdd->src_clk);
+err_disable_ioclk:
+ clk_disable_unprepare(sdd->ioclk);
+
+ return ret;
}
#endif /* CONFIG_PM */
@@ -1350,6 +1387,16 @@ static struct s3c64xx_spi_port_config exynos7_spi_port_config = {
.quirks = S3C64XX_SPI_QUIRK_CS_AUTO,
};
+static struct s3c64xx_spi_port_config exynos5433_spi_port_config = {
+ .fifo_lvl_mask = { 0x1ff, 0x7f, 0x7f, 0x7f, 0x7f, 0x1ff},
+ .rx_lvl_offset = 15,
+ .tx_st_done = 25,
+ .high_speed = true,
+ .clk_from_cmu = true,
+ .clk_ioclk = true,
+ .quirks = S3C64XX_SPI_QUIRK_CS_AUTO,
+};
+
static const struct platform_device_id s3c64xx_spi_driver_ids[] = {
{
.name = "s3c2443-spi",
@@ -1380,6 +1427,9 @@ static const struct of_device_id s3c64xx_spi_dt_match[] = {
{ .compatible = "samsung,exynos7-spi",
.data = (void *)&exynos7_spi_port_config,
},
+ { .compatible = "samsung,exynos5433-spi",
+ .data = (void *)&exynos5433_spi_port_config,
+ },
{ },
};
MODULE_DEVICE_TABLE(of, s3c64xx_spi_dt_match);
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index a7934ab00..1de3a772e 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -45,7 +45,6 @@ struct sh_msiof_spi_priv {
void __iomem *mapbase;
struct clk *clk;
struct platform_device *pdev;
- const struct sh_msiof_chipdata *chipdata;
struct sh_msiof_spi_info *info;
struct completion done;
unsigned int tx_fifo_size;
@@ -263,6 +262,9 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
for (k = 0; k < ARRAY_SIZE(sh_msiof_spi_div_table); k++) {
brps = DIV_ROUND_UP(div, sh_msiof_spi_div_table[k].div);
+ /* SCR_BRDV_DIV_1 is valid only if BRPS is x 1/1 or x 1/2 */
+ if (sh_msiof_spi_div_table[k].div == 1 && brps > 2)
+ continue;
if (brps <= 32) /* max of brdv is 32 */
break;
}
@@ -271,7 +273,7 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
scr = sh_msiof_spi_div_table[k].brdv | SCR_BRPS(brps);
sh_msiof_write(p, TSCR, scr);
- if (!(p->chipdata->master_flags & SPI_MASTER_MUST_TX))
+ if (!(p->master->flags & SPI_MASTER_MUST_TX))
sh_msiof_write(p, RSCR, scr);
}
@@ -336,7 +338,7 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
tmp |= lsb_first << MDR1_BITLSB_SHIFT;
tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p);
sh_msiof_write(p, TMDR1, tmp | MDR1_TRMD | TMDR1_PCON);
- if (p->chipdata->master_flags & SPI_MASTER_MUST_TX) {
+ if (p->master->flags & SPI_MASTER_MUST_TX) {
/* These bits are reserved if RX needs TX */
tmp &= ~0x0000ffff;
}
@@ -360,7 +362,7 @@ static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p,
{
u32 dr2 = MDR2_BITLEN1(bits) | MDR2_WDLEN1(words);
- if (tx_buf || (p->chipdata->master_flags & SPI_MASTER_MUST_TX))
+ if (tx_buf || (p->master->flags & SPI_MASTER_MUST_TX))
sh_msiof_write(p, TMDR2, dr2);
else
sh_msiof_write(p, TMDR2, dr2 | MDR2_GRPMASK1);
@@ -1152,6 +1154,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
{
struct resource *r;
struct spi_master *master;
+ const struct sh_msiof_chipdata *chipdata;
const struct of_device_id *of_id;
struct sh_msiof_spi_priv *p;
int i;
@@ -1170,10 +1173,10 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
of_id = of_match_device(sh_msiof_match, &pdev->dev);
if (of_id) {
- p->chipdata = of_id->data;
+ chipdata = of_id->data;
p->info = sh_msiof_spi_parse_dt(&pdev->dev);
} else {
- p->chipdata = (const void *)pdev->id_entry->driver_data;
+ chipdata = (const void *)pdev->id_entry->driver_data;
p->info = dev_get_platdata(&pdev->dev);
}
@@ -1217,8 +1220,8 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
/* Platform data may override FIFO sizes */
- p->tx_fifo_size = p->chipdata->tx_fifo_size;
- p->rx_fifo_size = p->chipdata->rx_fifo_size;
+ p->tx_fifo_size = chipdata->tx_fifo_size;
+ p->rx_fifo_size = chipdata->rx_fifo_size;
if (p->info->tx_fifo_override)
p->tx_fifo_size = p->info->tx_fifo_override;
if (p->info->rx_fifo_override)
@@ -1227,7 +1230,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
/* init master code */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE;
- master->flags = p->chipdata->master_flags;
+ master->flags = chipdata->master_flags;
master->bus_num = pdev->id;
master->dev.of_node = pdev->dev.of_node;
master->num_chipselect = p->info->num_chipselect;
diff --git a/drivers/spi/spi-sh.c b/drivers/spi/spi-sh.c
index 502501187..2bf53f0e2 100644
--- a/drivers/spi/spi-sh.c
+++ b/drivers/spi/spi-sh.c
@@ -82,7 +82,6 @@ struct spi_sh_data {
int irq;
struct spi_master *master;
struct list_head queue;
- struct workqueue_struct *workqueue;
struct work_struct ws;
unsigned long cr1;
wait_queue_head_t wait;
@@ -380,7 +379,7 @@ static int spi_sh_transfer(struct spi_device *spi, struct spi_message *mesg)
spi_sh_clear_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
list_add_tail(&mesg->queue, &ss->queue);
- queue_work(ss->workqueue, &ss->ws);
+ schedule_work(&ss->ws);
spin_unlock_irqrestore(&ss->lock, flags);
@@ -425,7 +424,7 @@ static int spi_sh_remove(struct platform_device *pdev)
struct spi_sh_data *ss = platform_get_drvdata(pdev);
spi_unregister_master(ss->master);
- destroy_workqueue(ss->workqueue);
+ flush_work(&ss->ws);
free_irq(ss->irq, ss);
return 0;
@@ -484,18 +483,11 @@ static int spi_sh_probe(struct platform_device *pdev)
spin_lock_init(&ss->lock);
INIT_WORK(&ss->ws, spi_sh_work);
init_waitqueue_head(&ss->wait);
- ss->workqueue = create_singlethread_workqueue(
- dev_name(master->dev.parent));
- if (ss->workqueue == NULL) {
- dev_err(&pdev->dev, "create workqueue error\n");
- ret = -EBUSY;
- goto error1;
- }
ret = request_irq(irq, spi_sh_irq, 0, "spi_sh", ss);
if (ret < 0) {
dev_err(&pdev->dev, "request_irq error\n");
- goto error2;
+ goto error1;
}
master->num_chipselect = 2;
@@ -514,8 +506,6 @@ static int spi_sh_probe(struct platform_device *pdev)
error3:
free_irq(irq, ss);
- error2:
- destroy_workqueue(ss->workqueue);
error1:
spi_master_put(master);
diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c
index d5adf9f31..a56eca0e9 100644
--- a/drivers/spi/spi-st-ssc4.c
+++ b/drivers/spi/spi-st-ssc4.c
@@ -68,32 +68,6 @@ struct spi_st {
struct completion done;
};
-static int spi_st_clk_enable(struct spi_st *spi_st)
-{
- /*
- * Current platforms use one of the core clocks for SPI and I2C.
- * If we attempt to disable the clock, the system will hang.
- *
- * TODO: Remove this when platform supports power domains.
- */
- return 0;
-
- return clk_prepare_enable(spi_st->clk);
-}
-
-static void spi_st_clk_disable(struct spi_st *spi_st)
-{
- /*
- * Current platforms use one of the core clocks for SPI and I2C.
- * If we attempt to disable the clock, the system will hang.
- *
- * TODO: Remove this when platform supports power domains.
- */
- return;
-
- clk_disable_unprepare(spi_st->clk);
-}
-
/* Load the TX FIFO */
static void ssc_write_tx_fifo(struct spi_st *spi_st)
{
@@ -349,7 +323,7 @@ static int spi_st_probe(struct platform_device *pdev)
goto put_master;
}
- ret = spi_st_clk_enable(spi_st);
+ ret = clk_prepare_enable(spi_st->clk);
if (ret)
goto put_master;
@@ -408,7 +382,7 @@ static int spi_st_probe(struct platform_device *pdev)
return 0;
clk_disable:
- spi_st_clk_disable(spi_st);
+ clk_disable_unprepare(spi_st->clk);
put_master:
spi_master_put(master);
return ret;
@@ -419,7 +393,7 @@ static int spi_st_remove(struct platform_device *pdev)
struct spi_master *master = platform_get_drvdata(pdev);
struct spi_st *spi_st = spi_master_get_devdata(master);
- spi_st_clk_disable(spi_st);
+ clk_disable_unprepare(spi_st->clk);
pinctrl_pm_select_sleep_state(&pdev->dev);
@@ -435,7 +409,7 @@ static int spi_st_runtime_suspend(struct device *dev)
writel_relaxed(0, spi_st->base + SSC_IEN);
pinctrl_pm_select_sleep_state(dev);
- spi_st_clk_disable(spi_st);
+ clk_disable_unprepare(spi_st->clk);
return 0;
}
@@ -446,7 +420,7 @@ static int spi_st_runtime_resume(struct device *dev)
struct spi_st *spi_st = spi_master_get_devdata(master);
int ret;
- ret = spi_st_clk_enable(spi_st);
+ ret = clk_prepare_enable(spi_st->clk);
pinctrl_pm_select_default_state(dev);
return ret;
diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c
index cf007f3b8..4969dc106 100644
--- a/drivers/spi/spi-sun4i.c
+++ b/drivers/spi/spi-sun4i.c
@@ -167,6 +167,11 @@ static void sun4i_spi_set_cs(struct spi_device *spi, bool enable)
sun4i_spi_write(sspi, SUN4I_CTL_REG, reg);
}
+static size_t sun4i_spi_max_transfer_size(struct spi_device *spi)
+{
+ return SUN4I_FIFO_DEPTH - 1;
+}
+
static int sun4i_spi_transfer_one(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *tfr)
@@ -402,6 +407,8 @@ static int sun4i_spi_probe(struct platform_device *pdev)
}
sspi->master = master;
+ master->max_speed_hz = 100 * 1000 * 1000;
+ master->min_speed_hz = 3 * 1000;
master->set_cs = sun4i_spi_set_cs;
master->transfer_one = sun4i_spi_transfer_one;
master->num_chipselect = 4;
@@ -409,6 +416,7 @@ static int sun4i_spi_probe(struct platform_device *pdev)
master->bits_per_word_mask = SPI_BPW_MASK(8);
master->dev.of_node = pdev->dev.of_node;
master->auto_runtime_pm = true;
+ master->max_transfer_size = sun4i_spi_max_transfer_size;
sspi->hclk = devm_clk_get(&pdev->dev, "ahb");
if (IS_ERR(sspi->hclk)) {
diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
index 7fce79a60..9918a57a6 100644
--- a/drivers/spi/spi-sun6i.c
+++ b/drivers/spi/spi-sun6i.c
@@ -153,6 +153,10 @@ static void sun6i_spi_set_cs(struct spi_device *spi, bool enable)
sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg);
}
+static size_t sun6i_spi_max_transfer_size(struct spi_device *spi)
+{
+ return SUN6I_FIFO_DEPTH - 1;
+}
static int sun6i_spi_transfer_one(struct spi_master *master,
struct spi_device *spi,
@@ -394,6 +398,8 @@ static int sun6i_spi_probe(struct platform_device *pdev)
}
sspi->master = master;
+ master->max_speed_hz = 100 * 1000 * 1000;
+ master->min_speed_hz = 3 * 1000;
master->set_cs = sun6i_spi_set_cs;
master->transfer_one = sun6i_spi_transfer_one;
master->num_chipselect = 4;
@@ -401,6 +407,7 @@ static int sun6i_spi_probe(struct platform_device *pdev)
master->bits_per_word_mask = SPI_BPW_MASK(8);
master->dev.of_node = pdev->dev.of_node;
master->auto_runtime_pm = true;
+ master->max_transfer_size = sun6i_spi_max_transfer_size;
sspi->hclk = devm_clk_get(&pdev->dev, "ahb");
if (IS_ERR(sspi->hclk)) {
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index 29ea8d2f9..ac0b07281 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -141,7 +141,7 @@ static int ti_qspi_setup(struct spi_device *spi)
u32 clk_ctrl_reg, clk_rate, clk_mask;
if (spi->master->busy) {
- dev_dbg(qspi->dev, "master busy doing other trasnfers\n");
+ dev_dbg(qspi->dev, "master busy doing other transfers\n");
return -EBUSY;
}
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index 93dfcee0f..c54ee6674 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -133,8 +133,6 @@ struct pch_spi_dma_ctrl {
* @io_remap_addr: The remapped PCI base address
* @master: Pointer to the SPI master structure
* @work: Reference to work queue handler
- * @wk: Workqueue for carrying out execution of the
- * requests
* @wait: Wait queue for waking up upon receiving an
* interrupt.
* @transfer_complete: Status of SPI Transfer
@@ -169,7 +167,6 @@ struct pch_spi_data {
unsigned long io_base_addr;
struct spi_master *master;
struct work_struct work;
- struct workqueue_struct *wk;
wait_queue_head_t wait;
u8 transfer_complete;
u8 bcurrent_msg_processing;
@@ -517,8 +514,7 @@ static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
dev_dbg(&pspi->dev, "%s - Invoked list_add_tail\n", __func__);
- /* schedule work queue to run */
- queue_work(data->wk, &data->work);
+ schedule_work(&data->work);
dev_dbg(&pspi->dev, "%s - Invoked queue work\n", __func__);
retval = 0;
@@ -674,7 +670,7 @@ static void pch_spi_nomore_transfer(struct pch_spi_data *data)
*more messages)
*/
dev_dbg(&data->master->dev, "%s:Invoke queue_work\n", __func__);
- queue_work(data->wk, &data->work);
+ schedule_work(&data->work);
} else if (data->board_dat->suspend_sts ||
data->status == STATUS_EXITING) {
dev_dbg(&data->master->dev,
@@ -1266,14 +1262,7 @@ static void pch_spi_free_resources(struct pch_spi_board_data *board_dat,
{
dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__);
- /* free workqueue */
- if (data->wk != NULL) {
- destroy_workqueue(data->wk);
- data->wk = NULL;
- dev_dbg(&board_dat->pdev->dev,
- "%s destroy_workqueue invoked successfully\n",
- __func__);
- }
+ flush_work(&data->work);
}
static int pch_spi_get_resources(struct pch_spi_board_data *board_dat,
@@ -1283,14 +1272,6 @@ static int pch_spi_get_resources(struct pch_spi_board_data *board_dat,
dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__);
- /* create workqueue */
- data->wk = create_singlethread_workqueue(KBUILD_MODNAME);
- if (!data->wk) {
- dev_err(&board_dat->pdev->dev,
- "%s create_singlet hread_workqueue failed\n", __func__);
- retval = -EBUSY;
- goto err_return;
- }
/* reset PCH SPI h/w */
pch_spi_reset(data->master);
@@ -1299,7 +1280,6 @@ static int pch_spi_get_resources(struct pch_spi_board_data *board_dat,
dev_dbg(&board_dat->pdev->dev, "%s data->irq_reg_sts=true\n", __func__);
-err_return:
if (retval != 0) {
dev_err(&board_dat->pdev->dev,
"%s FAIL:invoking pch_spi_free_resources\n", __func__);
diff --git a/drivers/spi/spi-txx9.c b/drivers/spi/spi-txx9.c
index d69f8f8f3..7492ea346 100644
--- a/drivers/spi/spi-txx9.c
+++ b/drivers/spi/spi-txx9.c
@@ -72,7 +72,6 @@
struct txx9spi {
- struct workqueue_struct *workqueue;
struct work_struct work;
spinlock_t lock; /* protect 'queue' */
struct list_head queue;
@@ -315,7 +314,7 @@ static int txx9spi_transfer(struct spi_device *spi, struct spi_message *m)
spin_lock_irqsave(&c->lock, flags);
list_add_tail(&m->queue, &c->queue);
- queue_work(c->workqueue, &c->work);
+ schedule_work(&c->work);
spin_unlock_irqrestore(&c->lock, flags);
return 0;
@@ -374,10 +373,6 @@ static int txx9spi_probe(struct platform_device *dev)
if (ret)
goto exit;
- c->workqueue = create_singlethread_workqueue(
- dev_name(master->dev.parent));
- if (!c->workqueue)
- goto exit_busy;
c->last_chipselect = -1;
dev_info(&dev->dev, "at %#llx, irq %d, %dMHz\n",
@@ -400,8 +395,6 @@ static int txx9spi_probe(struct platform_device *dev)
exit_busy:
ret = -EBUSY;
exit:
- if (c->workqueue)
- destroy_workqueue(c->workqueue);
clk_disable(c->clk);
spi_master_put(master);
return ret;
@@ -412,7 +405,7 @@ static int txx9spi_remove(struct platform_device *dev)
struct spi_master *master = platform_get_drvdata(dev);
struct txx9spi *c = spi_master_get_devdata(master);
- destroy_workqueue(c->workqueue);
+ flush_work(&c->work);
clk_disable(c->clk);
return 0;
}
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index 300912117..bc7100b93 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -341,9 +341,10 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
if (ipif_isr & XSPI_INTR_TX_EMPTY) { /* Transmission completed */
complete(&xspi->done);
+ return IRQ_HANDLED;
}
- return IRQ_HANDLED;
+ return IRQ_NONE;
}
static int xilinx_spi_find_buffer_size(struct xilinx_spi *xspi)
@@ -455,7 +456,10 @@ static int xilinx_spi_probe(struct platform_device *pdev)
xspi->buffer_size = xilinx_spi_find_buffer_size(xspi);
xspi->irq = platform_get_irq(pdev, 0);
- if (xspi->irq >= 0) {
+ if (xspi->irq < 0 && xspi->irq != -ENXIO) {
+ ret = xspi->irq;
+ goto put_master;
+ } else if (xspi->irq >= 0) {
/* Register for SPI Interrupt */
ret = devm_request_irq(&pdev->dev, xspi->irq, xilinx_spi_irq, 0,
dev_name(&pdev->dev), xspi);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 77e6e4595..200ca228d 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -622,6 +622,8 @@ void spi_unregister_device(struct spi_device *spi)
if (spi->dev.of_node)
of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
+ if (ACPI_COMPANION(&spi->dev))
+ acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
device_unregister(&spi->dev);
}
EXPORT_SYMBOL_GPL(spi_unregister_device);
@@ -849,6 +851,20 @@ static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
return 0;
}
#else /* !CONFIG_HAS_DMA */
+static inline int spi_map_buf(struct spi_master *master,
+ struct device *dev, struct sg_table *sgt,
+ void *buf, size_t len,
+ enum dma_data_direction dir)
+{
+ return -EINVAL;
+}
+
+static inline void spi_unmap_buf(struct spi_master *master,
+ struct device *dev, struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+}
+
static inline int __spi_map_msg(struct spi_master *master,
struct spi_message *msg)
{
@@ -944,7 +960,7 @@ static int spi_transfer_one_message(struct spi_master *master,
struct spi_transfer *xfer;
bool keep_cs = false;
int ret = 0;
- unsigned long ms = 1;
+ unsigned long long ms = 1;
struct spi_statistics *statm = &master->statistics;
struct spi_statistics *stats = &msg->spi->statistics;
@@ -975,9 +991,13 @@ static int spi_transfer_one_message(struct spi_master *master,
if (ret > 0) {
ret = 0;
- ms = xfer->len * 8 * 1000 / xfer->speed_hz;
+ ms = 8LL * 1000LL * xfer->len;
+ do_div(ms, xfer->speed_hz);
ms += ms + 100; /* some tolerance */
+ if (ms > UINT_MAX)
+ ms = UINT_MAX;
+
ms = wait_for_completion_timeout(&master->xfer_completion,
msecs_to_jiffies(ms));
}
@@ -1055,7 +1075,6 @@ EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
* __spi_pump_messages - function which processes spi message queue
* @master: master to process queue for
* @in_kthread: true if we are in the context of the message pump thread
- * @bus_locked: true if the bus mutex is held when calling this function
*
* This function checks if there is any spi message in the queue that
* needs processing and if so call out to the driver to initialize hardware
@@ -1065,8 +1084,7 @@ EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
* inside spi_sync(); the queue extraction handling at the top of the
* function should deal with this safely.
*/
-static void __spi_pump_messages(struct spi_master *master, bool in_kthread,
- bool bus_locked)
+static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
{
unsigned long flags;
bool was_busy = false;
@@ -1138,11 +1156,14 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread,
master->busy = true;
spin_unlock_irqrestore(&master->queue_lock, flags);
+ mutex_lock(&master->io_mutex);
+
if (!was_busy && master->auto_runtime_pm) {
ret = pm_runtime_get_sync(master->dev.parent);
if (ret < 0) {
dev_err(&master->dev, "Failed to power device: %d\n",
ret);
+ mutex_unlock(&master->io_mutex);
return;
}
}
@@ -1158,13 +1179,11 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread,
if (master->auto_runtime_pm)
pm_runtime_put(master->dev.parent);
+ mutex_unlock(&master->io_mutex);
return;
}
}
- if (!bus_locked)
- mutex_lock(&master->bus_lock_mutex);
-
trace_spi_message_start(master->cur_msg);
if (master->prepare_message) {
@@ -1194,8 +1213,7 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread,
}
out:
- if (!bus_locked)
- mutex_unlock(&master->bus_lock_mutex);
+ mutex_unlock(&master->io_mutex);
/* Prod the scheduler in case transfer_one() was busy waiting */
if (!ret)
@@ -1211,7 +1229,7 @@ static void spi_pump_messages(struct kthread_work *work)
struct spi_master *master =
container_of(work, struct spi_master, pump_messages);
- __spi_pump_messages(master, true, master->bus_lock_flag);
+ __spi_pump_messages(master, true);
}
static int spi_init_queue(struct spi_master *master)
@@ -1646,18 +1664,15 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
return 1;
}
-static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
- void *data, void **return_value)
+static acpi_status acpi_register_spi_device(struct spi_master *master,
+ struct acpi_device *adev)
{
- struct spi_master *master = data;
struct list_head resource_list;
- struct acpi_device *adev;
struct spi_device *spi;
int ret;
- if (acpi_bus_get_device(handle, &adev))
- return AE_OK;
- if (acpi_bus_get_status(adev) || !adev->status.present)
+ if (acpi_bus_get_status(adev) || !adev->status.present ||
+ acpi_device_enumerated(adev))
return AE_OK;
spi = spi_alloc_device(master);
@@ -1683,6 +1698,8 @@ static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
if (spi->irq < 0)
spi->irq = acpi_dev_gpio_irq_get(adev, 0);
+ acpi_device_set_enumerated(adev);
+
adev->power.flags.ignore_parent = true;
strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias));
if (spi_add_device(spi)) {
@@ -1695,6 +1712,18 @@ static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
return AE_OK;
}
+static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
+ void *data, void **return_value)
+{
+ struct spi_master *master = data;
+ struct acpi_device *adev;
+
+ if (acpi_bus_get_device(handle, &adev))
+ return AE_OK;
+
+ return acpi_register_spi_device(master, adev);
+}
+
static void acpi_register_spi_devices(struct spi_master *master)
{
acpi_status status;
@@ -1873,6 +1902,7 @@ int spi_register_master(struct spi_master *master)
spin_lock_init(&master->queue_lock);
spin_lock_init(&master->bus_lock_spinlock);
mutex_init(&master->bus_lock_mutex);
+ mutex_init(&master->io_mutex);
master->bus_lock_flag = 0;
init_completion(&master->xfer_completion);
if (!master->max_dma_len)
@@ -2725,6 +2755,7 @@ int spi_flash_read(struct spi_device *spi,
{
struct spi_master *master = spi->master;
+ struct device *rx_dev = NULL;
int ret;
if ((msg->opcode_nbits == SPI_NBITS_DUAL ||
@@ -2750,9 +2781,24 @@ int spi_flash_read(struct spi_device *spi,
return ret;
}
}
+
mutex_lock(&master->bus_lock_mutex);
+ mutex_lock(&master->io_mutex);
+ if (master->dma_rx) {
+ rx_dev = master->dma_rx->device->dev;
+ ret = spi_map_buf(master, rx_dev, &msg->rx_sg,
+ msg->buf, msg->len,
+ DMA_FROM_DEVICE);
+ if (!ret)
+ msg->cur_msg_mapped = true;
+ }
ret = master->spi_flash_read(spi, msg);
+ if (msg->cur_msg_mapped)
+ spi_unmap_buf(master, rx_dev, &msg->rx_sg,
+ DMA_FROM_DEVICE);
+ mutex_unlock(&master->io_mutex);
mutex_unlock(&master->bus_lock_mutex);
+
if (master->auto_runtime_pm)
pm_runtime_put(master->dev.parent);
@@ -2772,8 +2818,7 @@ static void spi_complete(void *arg)
complete(arg);
}
-static int __spi_sync(struct spi_device *spi, struct spi_message *message,
- int bus_locked)
+static int __spi_sync(struct spi_device *spi, struct spi_message *message)
{
DECLARE_COMPLETION_ONSTACK(done);
int status;
@@ -2791,9 +2836,6 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message,
SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_sync);
SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
- if (!bus_locked)
- mutex_lock(&master->bus_lock_mutex);
-
/* If we're not using the legacy transfer method then we will
* try to transfer in the calling context so special case.
* This code would be less tricky if we could remove the
@@ -2811,9 +2853,6 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message,
status = spi_async_locked(spi, message);
}
- if (!bus_locked)
- mutex_unlock(&master->bus_lock_mutex);
-
if (status == 0) {
/* Push out the messages in the calling context if we
* can.
@@ -2823,7 +2862,7 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message,
spi_sync_immediate);
SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
spi_sync_immediate);
- __spi_pump_messages(master, false, bus_locked);
+ __spi_pump_messages(master, false);
}
wait_for_completion(&done);
@@ -2856,7 +2895,13 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message,
*/
int spi_sync(struct spi_device *spi, struct spi_message *message)
{
- return __spi_sync(spi, message, spi->master->bus_lock_flag);
+ int ret;
+
+ mutex_lock(&spi->master->bus_lock_mutex);
+ ret = __spi_sync(spi, message);
+ mutex_unlock(&spi->master->bus_lock_mutex);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(spi_sync);
@@ -2878,7 +2923,7 @@ EXPORT_SYMBOL_GPL(spi_sync);
*/
int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
{
- return __spi_sync(spi, message, 1);
+ return __spi_sync(spi, message);
}
EXPORT_SYMBOL_GPL(spi_sync_locked);
@@ -3107,6 +3152,77 @@ static struct notifier_block spi_of_notifier = {
extern struct notifier_block spi_of_notifier;
#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
+#if IS_ENABLED(CONFIG_ACPI)
+static int spi_acpi_master_match(struct device *dev, const void *data)
+{
+ return ACPI_COMPANION(dev->parent) == data;
+}
+
+static int spi_acpi_device_match(struct device *dev, void *data)
+{
+ return ACPI_COMPANION(dev) == data;
+}
+
+static struct spi_master *acpi_spi_find_master_by_adev(struct acpi_device *adev)
+{
+ struct device *dev;
+
+ dev = class_find_device(&spi_master_class, NULL, adev,
+ spi_acpi_master_match);
+ if (!dev)
+ return NULL;
+
+ return container_of(dev, struct spi_master, dev);
+}
+
+static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&spi_bus_type, NULL, adev, spi_acpi_device_match);
+
+ return dev ? to_spi_device(dev) : NULL;
+}
+
+static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
+ void *arg)
+{
+ struct acpi_device *adev = arg;
+ struct spi_master *master;
+ struct spi_device *spi;
+
+ switch (value) {
+ case ACPI_RECONFIG_DEVICE_ADD:
+ master = acpi_spi_find_master_by_adev(adev->parent);
+ if (!master)
+ break;
+
+ acpi_register_spi_device(master, adev);
+ put_device(&master->dev);
+ break;
+ case ACPI_RECONFIG_DEVICE_REMOVE:
+ if (!acpi_device_enumerated(adev))
+ break;
+
+ spi = acpi_spi_find_device_by_adev(adev);
+ if (!spi)
+ break;
+
+ spi_unregister_device(spi);
+ put_device(&spi->dev);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block spi_acpi_notifier = {
+ .notifier_call = acpi_spi_notify,
+};
+#else
+extern struct notifier_block spi_acpi_notifier;
+#endif
+
static int __init spi_init(void)
{
int status;
@@ -3127,6 +3243,8 @@ static int __init spi_init(void)
if (IS_ENABLED(CONFIG_OF_DYNAMIC))
WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
+ if (IS_ENABLED(CONFIG_ACPI))
+ WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
return 0;
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index e3c19f30f..2e05046f8 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -29,6 +29,7 @@
#include <linux/compat.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/acpi.h>
#include <linux/spi/spi.h>
#include <linux/spi/spidev.h>
@@ -700,6 +701,43 @@ static const struct of_device_id spidev_dt_ids[] = {
MODULE_DEVICE_TABLE(of, spidev_dt_ids);
#endif
+#ifdef CONFIG_ACPI
+
+/* Dummy SPI devices not to be used in production systems */
+#define SPIDEV_ACPI_DUMMY 1
+
+static const struct acpi_device_id spidev_acpi_ids[] = {
+ /*
+ * The ACPI SPT000* devices are only meant for development and
+ * testing. Systems used in production should have a proper ACPI
+ * description of the connected peripheral and they should also use
+ * a proper driver instead of poking directly to the SPI bus.
+ */
+ { "SPT0001", SPIDEV_ACPI_DUMMY },
+ { "SPT0002", SPIDEV_ACPI_DUMMY },
+ { "SPT0003", SPIDEV_ACPI_DUMMY },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, spidev_acpi_ids);
+
+static void spidev_probe_acpi(struct spi_device *spi)
+{
+ const struct acpi_device_id *id;
+
+ if (!has_acpi_companion(&spi->dev))
+ return;
+
+ id = acpi_match_device(spidev_acpi_ids, &spi->dev);
+ if (WARN_ON(!id))
+ return;
+
+ if (id->driver_data == SPIDEV_ACPI_DUMMY)
+ dev_warn(&spi->dev, "do not use this driver in production systems!\n");
+}
+#else
+static inline void spidev_probe_acpi(struct spi_device *spi) {}
+#endif
+
/*-------------------------------------------------------------------------*/
static int spidev_probe(struct spi_device *spi)
@@ -719,6 +757,8 @@ static int spidev_probe(struct spi_device *spi)
!of_match_device(spidev_dt_ids, &spi->dev));
}
+ spidev_probe_acpi(spi);
+
/* Allocate driver data */
spidev = kzalloc(sizeof(*spidev), GFP_KERNEL);
if (!spidev)
@@ -789,6 +829,7 @@ static struct spi_driver spidev_spi_driver = {
.driver = {
.name = "spidev",
.of_match_table = of_match_ptr(spidev_dt_ids),
+ .acpi_match_table = ACPI_PTR(spidev_acpi_ids),
},
.probe = spidev_probe,
.remove = spidev_remove,
diff --git a/drivers/ssb/driver_gpio.c b/drivers/ssb/driver_gpio.c
index 180e027b1..796e22037 100644
--- a/drivers/ssb/driver_gpio.c
+++ b/drivers/ssb/driver_gpio.c
@@ -23,7 +23,7 @@
**************************************************/
#if IS_ENABLED(CONFIG_SSB_EMBEDDED)
-static int ssb_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
+static int ssb_gpio_to_irq(struct gpio_chip *chip, unsigned int gpio)
{
struct ssb_bus *bus = gpiochip_get_data(chip);
@@ -38,14 +38,14 @@ static int ssb_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
* ChipCommon
**************************************************/
-static int ssb_gpio_chipco_get_value(struct gpio_chip *chip, unsigned gpio)
+static int ssb_gpio_chipco_get_value(struct gpio_chip *chip, unsigned int gpio)
{
struct ssb_bus *bus = gpiochip_get_data(chip);
return !!ssb_chipco_gpio_in(&bus->chipco, 1 << gpio);
}
-static void ssb_gpio_chipco_set_value(struct gpio_chip *chip, unsigned gpio,
+static void ssb_gpio_chipco_set_value(struct gpio_chip *chip, unsigned int gpio,
int value)
{
struct ssb_bus *bus = gpiochip_get_data(chip);
@@ -54,7 +54,7 @@ static void ssb_gpio_chipco_set_value(struct gpio_chip *chip, unsigned gpio,
}
static int ssb_gpio_chipco_direction_input(struct gpio_chip *chip,
- unsigned gpio)
+ unsigned int gpio)
{
struct ssb_bus *bus = gpiochip_get_data(chip);
@@ -63,7 +63,7 @@ static int ssb_gpio_chipco_direction_input(struct gpio_chip *chip,
}
static int ssb_gpio_chipco_direction_output(struct gpio_chip *chip,
- unsigned gpio, int value)
+ unsigned int gpio, int value)
{
struct ssb_bus *bus = gpiochip_get_data(chip);
@@ -72,7 +72,7 @@ static int ssb_gpio_chipco_direction_output(struct gpio_chip *chip,
return 0;
}
-static int ssb_gpio_chipco_request(struct gpio_chip *chip, unsigned gpio)
+static int ssb_gpio_chipco_request(struct gpio_chip *chip, unsigned int gpio)
{
struct ssb_bus *bus = gpiochip_get_data(chip);
@@ -85,7 +85,7 @@ static int ssb_gpio_chipco_request(struct gpio_chip *chip, unsigned gpio)
return 0;
}
-static void ssb_gpio_chipco_free(struct gpio_chip *chip, unsigned gpio)
+static void ssb_gpio_chipco_free(struct gpio_chip *chip, unsigned int gpio)
{
struct ssb_bus *bus = gpiochip_get_data(chip);
@@ -256,14 +256,14 @@ static int ssb_gpio_chipco_init(struct ssb_bus *bus)
#ifdef CONFIG_SSB_DRIVER_EXTIF
-static int ssb_gpio_extif_get_value(struct gpio_chip *chip, unsigned gpio)
+static int ssb_gpio_extif_get_value(struct gpio_chip *chip, unsigned int gpio)
{
struct ssb_bus *bus = gpiochip_get_data(chip);
return !!ssb_extif_gpio_in(&bus->extif, 1 << gpio);
}
-static void ssb_gpio_extif_set_value(struct gpio_chip *chip, unsigned gpio,
+static void ssb_gpio_extif_set_value(struct gpio_chip *chip, unsigned int gpio,
int value)
{
struct ssb_bus *bus = gpiochip_get_data(chip);
@@ -272,7 +272,7 @@ static void ssb_gpio_extif_set_value(struct gpio_chip *chip, unsigned gpio,
}
static int ssb_gpio_extif_direction_input(struct gpio_chip *chip,
- unsigned gpio)
+ unsigned int gpio)
{
struct ssb_bus *bus = gpiochip_get_data(chip);
@@ -281,7 +281,7 @@ static int ssb_gpio_extif_direction_input(struct gpio_chip *chip,
}
static int ssb_gpio_extif_direction_output(struct gpio_chip *chip,
- unsigned gpio, int value)
+ unsigned int gpio, int value)
{
struct ssb_bus *bus = gpiochip_get_data(chip);
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index a81bdb894..c49844498 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -102,6 +102,8 @@ source "drivers/staging/most/Kconfig"
source "drivers/staging/i4l/Kconfig"
+source "drivers/staging/ks7010/Kconfig"
+
source "drivers/staging/vhba/Kconfig"
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index d112e0819..ab0b46b1c 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -41,3 +41,4 @@ obj-$(CONFIG_FSL_MC_BUS) += fsl-mc/
obj-$(CONFIG_WILC1000) += wilc1000/
obj-$(CONFIG_MOST) += most/
obj-$(CONFIG_ISDN_I4L) += i4l/
+obj-$(CONFIG_KS7010) += ks7010/
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index 6480f60eb..06e41d24e 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -24,26 +24,19 @@ config ANDROID_LOW_MEMORY_KILLER
scripts (/init.rc), and it defines priority values with minimum free memory size
for each priority.
-config SYNC
- bool "Synchronization framework"
- default n
- select ANON_INODES
- select DMA_SHARED_BUFFER
- ---help---
- This option enables the framework for synchronization between multiple
- drivers. Sync implementations can take advantage of hardware
- synchronization built into devices like GPUs.
-
config SW_SYNC
- bool "Software synchronization objects"
+ bool "Software synchronization framework"
default n
- depends on SYNC
depends on SYNC_FILE
+ depends on DEBUG_FS
---help---
A sync object driver that uses a 32bit counter to coordinate
synchronization. Useful when there is no hardware primitive backing
the synchronization.
+ WARNING: improper use of this can result in deadlocking kernel
+ drivers from userspace. Intended for test and debug only.
+
source "drivers/staging/android/ion/Kconfig"
endif # if ANDROID
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
index 980d6dc4b..7ca61b77a 100644
--- a/drivers/staging/android/Makefile
+++ b/drivers/staging/android/Makefile
@@ -4,5 +4,4 @@ obj-y += ion/
obj-$(CONFIG_ASHMEM) += ashmem.o
obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o
-obj-$(CONFIG_SYNC) += sync.o sync_debug.o
-obj-$(CONFIG_SW_SYNC) += sw_sync.o
+obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 2650202d7..ca9a53c03 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -400,7 +400,7 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
name = asma->name;
/* ... and allocate the backing shmem file */
- vmfile = shmem_file_setup(name, asma->size, vma->vm_flags, 0);
+ vmfile = shmem_file_setup(name, asma->size, vma->vm_flags);
if (IS_ERR(vmfile)) {
ret = PTR_ERR(vmfile);
goto out;
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index 24d2745e9..45a1b4ec4 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -72,10 +72,10 @@ static unsigned long lowmem_deathpending_timeout;
static unsigned long lowmem_count(struct shrinker *s,
struct shrink_control *sc)
{
- return global_page_state(NR_ACTIVE_ANON) +
- global_page_state(NR_ACTIVE_FILE) +
- global_page_state(NR_INACTIVE_ANON) +
- global_page_state(NR_INACTIVE_FILE);
+ return global_node_page_state(NR_ACTIVE_ANON) +
+ global_node_page_state(NR_ACTIVE_FILE) +
+ global_node_page_state(NR_INACTIVE_ANON) +
+ global_node_page_state(NR_INACTIVE_FILE);
}
static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
@@ -91,8 +91,8 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
short selected_oom_score_adj;
int array_size = ARRAY_SIZE(lowmem_adj);
int other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
- int other_file = global_page_state(NR_FILE_PAGES) -
- global_page_state(NR_SHMEM) -
+ int other_file = global_node_page_state(NR_FILE_PAGES) -
+ global_node_page_state(NR_SHMEM) -
total_swapcache_pages();
if (lowmem_adj_size < array_size)
diff --git a/drivers/staging/android/sw_sync.c b/drivers/staging/android/sw_sync.c
index af39ff58f..115c91747 100644
--- a/drivers/staging/android/sw_sync.c
+++ b/drivers/staging/android/sw_sync.c
@@ -1,5 +1,5 @@
/*
- * drivers/base/sw_sync.c
+ * drivers/dma-buf/sw_sync.c
*
* Copyright (C) 2012 Google, Inc.
*
@@ -14,76 +14,331 @@
*
*/
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/export.h>
#include <linux/file.h>
#include <linux/fs.h>
-#include <linux/miscdevice.h>
-#include <linux/syscalls.h>
#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/sync_file.h>
-#include "sw_sync.h"
+#include "sync_debug.h"
-struct fence *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value)
+#define CREATE_TRACE_POINTS
+#include "trace/sync.h"
+
+struct sw_sync_create_fence_data {
+ __u32 value;
+ char name[32];
+ __s32 fence; /* fd of new fence */
+};
+
+#define SW_SYNC_IOC_MAGIC 'W'
+
+#define SW_SYNC_IOC_CREATE_FENCE _IOWR(SW_SYNC_IOC_MAGIC, 0,\
+ struct sw_sync_create_fence_data)
+#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
+
+static const struct fence_ops timeline_fence_ops;
+
+static inline struct sync_pt *fence_to_sync_pt(struct fence *fence)
+{
+ if (fence->ops != &timeline_fence_ops)
+ return NULL;
+ return container_of(fence, struct sync_pt, base);
+}
+
+/**
+ * sync_timeline_create() - creates a sync object
+ * @name: sync_timeline name
+ *
+ * Creates a new sync_timeline. Returns the sync_timeline object or NULL in
+ * case of error.
+ */
+struct sync_timeline *sync_timeline_create(const char *name)
+{
+ struct sync_timeline *obj;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return NULL;
+
+ kref_init(&obj->kref);
+ obj->context = fence_context_alloc(1);
+ strlcpy(obj->name, name, sizeof(obj->name));
+
+ INIT_LIST_HEAD(&obj->child_list_head);
+ INIT_LIST_HEAD(&obj->active_list_head);
+ spin_lock_init(&obj->child_list_lock);
+
+ sync_timeline_debug_add(obj);
+
+ return obj;
+}
+
+static void sync_timeline_free(struct kref *kref)
+{
+ struct sync_timeline *obj =
+ container_of(kref, struct sync_timeline, kref);
+
+ sync_timeline_debug_remove(obj);
+
+ kfree(obj);
+}
+
+static void sync_timeline_get(struct sync_timeline *obj)
+{
+ kref_get(&obj->kref);
+}
+
+static void sync_timeline_put(struct sync_timeline *obj)
+{
+ kref_put(&obj->kref, sync_timeline_free);
+}
+
+/**
+ * sync_timeline_signal() - signal a status change on a sync_timeline
+ * @obj: sync_timeline to signal
+ * @inc: num to increment on timeline->value
+ *
+ * A sync implementation should call this any time one of it's fences
+ * has signaled or has an error condition.
+ */
+static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
+{
+ unsigned long flags;
+ struct sync_pt *pt, *next;
+
+ trace_sync_timeline(obj);
+
+ spin_lock_irqsave(&obj->child_list_lock, flags);
+
+ obj->value += inc;
+
+ list_for_each_entry_safe(pt, next, &obj->active_list_head,
+ active_list) {
+ if (fence_is_signaled_locked(&pt->base))
+ list_del_init(&pt->active_list);
+ }
+
+ spin_unlock_irqrestore(&obj->child_list_lock, flags);
+}
+
+/**
+ * sync_pt_create() - creates a sync pt
+ * @parent: fence's parent sync_timeline
+ * @size: size to allocate for this pt
+ * @inc: value of the fence
+ *
+ * Creates a new sync_pt as a child of @parent. @size bytes will be
+ * allocated allowing for implementation specific data to be kept after
+ * the generic sync_timeline struct. Returns the sync_pt object or
+ * NULL in case of error.
+ */
+static struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size,
+ unsigned int value)
+{
+ unsigned long flags;
+ struct sync_pt *pt;
+
+ if (size < sizeof(*pt))
+ return NULL;
+
+ pt = kzalloc(size, GFP_KERNEL);
+ if (!pt)
+ return NULL;
+
+ spin_lock_irqsave(&obj->child_list_lock, flags);
+ sync_timeline_get(obj);
+ fence_init(&pt->base, &timeline_fence_ops, &obj->child_list_lock,
+ obj->context, value);
+ list_add_tail(&pt->child_list, &obj->child_list_head);
+ INIT_LIST_HEAD(&pt->active_list);
+ spin_unlock_irqrestore(&obj->child_list_lock, flags);
+ return pt;
+}
+
+static const char *timeline_fence_get_driver_name(struct fence *fence)
+{
+ return "sw_sync";
+}
+
+static const char *timeline_fence_get_timeline_name(struct fence *fence)
+{
+ struct sync_timeline *parent = fence_parent(fence);
+
+ return parent->name;
+}
+
+static void timeline_fence_release(struct fence *fence)
{
- struct sw_sync_pt *pt;
+ struct sync_pt *pt = fence_to_sync_pt(fence);
+ struct sync_timeline *parent = fence_parent(fence);
+ unsigned long flags;
- pt = (struct sw_sync_pt *)
- sync_pt_create(&obj->obj, sizeof(struct sw_sync_pt));
+ spin_lock_irqsave(fence->lock, flags);
+ list_del(&pt->child_list);
+ if (WARN_ON_ONCE(!list_empty(&pt->active_list)))
+ list_del(&pt->active_list);
+ spin_unlock_irqrestore(fence->lock, flags);
- pt->value = value;
+ sync_timeline_put(parent);
+ fence_free(fence);
+}
+
+static bool timeline_fence_signaled(struct fence *fence)
+{
+ struct sync_timeline *parent = fence_parent(fence);
- return (struct fence *)pt;
+ return (fence->seqno > parent->value) ? false : true;
}
-EXPORT_SYMBOL(sw_sync_pt_create);
-static int sw_sync_fence_has_signaled(struct fence *fence)
+static bool timeline_fence_enable_signaling(struct fence *fence)
{
- struct sw_sync_pt *pt = (struct sw_sync_pt *)fence;
- struct sw_sync_timeline *obj =
- (struct sw_sync_timeline *)fence_parent(fence);
+ struct sync_pt *pt = fence_to_sync_pt(fence);
+ struct sync_timeline *parent = fence_parent(fence);
- return (pt->value > obj->value) ? 0 : 1;
+ if (timeline_fence_signaled(fence))
+ return false;
+
+ list_add_tail(&pt->active_list, &parent->active_list_head);
+ return true;
}
-static void sw_sync_timeline_value_str(struct sync_timeline *sync_timeline,
- char *str, int size)
+static void timeline_fence_value_str(struct fence *fence,
+ char *str, int size)
{
- struct sw_sync_timeline *timeline =
- (struct sw_sync_timeline *)sync_timeline;
- snprintf(str, size, "%d", timeline->value);
+ snprintf(str, size, "%d", fence->seqno);
}
-static void sw_sync_fence_value_str(struct fence *fence, char *str, int size)
+static void timeline_fence_timeline_value_str(struct fence *fence,
+ char *str, int size)
{
- struct sw_sync_pt *pt = (struct sw_sync_pt *)fence;
+ struct sync_timeline *parent = fence_parent(fence);
- snprintf(str, size, "%d", pt->value);
+ snprintf(str, size, "%d", parent->value);
}
-static struct sync_timeline_ops sw_sync_timeline_ops = {
- .driver_name = "sw_sync",
- .has_signaled = sw_sync_fence_has_signaled,
- .timeline_value_str = sw_sync_timeline_value_str,
- .fence_value_str = sw_sync_fence_value_str,
+static const struct fence_ops timeline_fence_ops = {
+ .get_driver_name = timeline_fence_get_driver_name,
+ .get_timeline_name = timeline_fence_get_timeline_name,
+ .enable_signaling = timeline_fence_enable_signaling,
+ .signaled = timeline_fence_signaled,
+ .wait = fence_default_wait,
+ .release = timeline_fence_release,
+ .fence_value_str = timeline_fence_value_str,
+ .timeline_value_str = timeline_fence_timeline_value_str,
};
-struct sw_sync_timeline *sw_sync_timeline_create(const char *name)
+/*
+ * *WARNING*
+ *
+ * improper use of this can result in deadlocking kernel drivers from userspace.
+ */
+
+/* opening sw_sync create a new sync obj */
+static int sw_sync_debugfs_open(struct inode *inode, struct file *file)
{
- struct sw_sync_timeline *obj = (struct sw_sync_timeline *)
- sync_timeline_create(&sw_sync_timeline_ops,
- sizeof(struct sw_sync_timeline),
- name);
+ struct sync_timeline *obj;
+ char task_comm[TASK_COMM_LEN];
- return obj;
+ get_task_comm(task_comm, current);
+
+ obj = sync_timeline_create(task_comm);
+ if (!obj)
+ return -ENOMEM;
+
+ file->private_data = obj;
+
+ return 0;
}
-EXPORT_SYMBOL(sw_sync_timeline_create);
-void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc)
+static int sw_sync_debugfs_release(struct inode *inode, struct file *file)
{
- obj->value += inc;
+ struct sync_timeline *obj = file->private_data;
+
+ smp_wmb();
+
+ sync_timeline_put(obj);
+ return 0;
+}
+
+static long sw_sync_ioctl_create_fence(struct sync_timeline *obj,
+ unsigned long arg)
+{
+ int fd = get_unused_fd_flags(O_CLOEXEC);
+ int err;
+ struct sync_pt *pt;
+ struct sync_file *sync_file;
+ struct sw_sync_create_fence_data data;
+
+ if (fd < 0)
+ return fd;
+
+ if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
+ err = -EFAULT;
+ goto err;
+ }
+
+ pt = sync_pt_create(obj, sizeof(*pt), data.value);
+ if (!pt) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ sync_file = sync_file_create(&pt->base);
+ if (!sync_file) {
+ fence_put(&pt->base);
+ err = -ENOMEM;
+ goto err;
+ }
+
+ data.fence = fd;
+ if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
+ fput(sync_file->file);
+ err = -EFAULT;
+ goto err;
+ }
+
+ fd_install(fd, sync_file->file);
+
+ return 0;
+
+err:
+ put_unused_fd(fd);
+ return err;
+}
- sync_timeline_signal(&obj->obj);
+static long sw_sync_ioctl_inc(struct sync_timeline *obj, unsigned long arg)
+{
+ u32 value;
+
+ if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
+ return -EFAULT;
+
+ sync_timeline_signal(obj, value);
+
+ return 0;
+}
+
+static long sw_sync_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct sync_timeline *obj = file->private_data;
+
+ switch (cmd) {
+ case SW_SYNC_IOC_CREATE_FENCE:
+ return sw_sync_ioctl_create_fence(obj, arg);
+
+ case SW_SYNC_IOC_INC:
+ return sw_sync_ioctl_inc(obj, arg);
+
+ default:
+ return -ENOTTY;
+ }
}
-EXPORT_SYMBOL(sw_sync_timeline_inc);
+
+const struct file_operations sw_sync_debugfs_fops = {
+ .open = sw_sync_debugfs_open,
+ .release = sw_sync_debugfs_release,
+ .unlocked_ioctl = sw_sync_ioctl,
+ .compat_ioctl = sw_sync_ioctl,
+};
diff --git a/drivers/staging/android/sync_debug.c b/drivers/staging/android/sync_debug.c
index 5f57499c9..4c5a85595 100644
--- a/drivers/staging/android/sync_debug.c
+++ b/drivers/staging/android/sync_debug.c
@@ -15,21 +15,7 @@
*/
#include <linux/debugfs.h>
-#include <linux/export.h>
-#include <linux/file.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/poll.h>
-#include <linux/sched.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/anon_inodes.h>
-#include <linux/time64.h>
-#include <linux/sync_file.h>
-#include "sw_sync.h"
-
-#ifdef CONFIG_DEBUG_FS
+#include "sync_debug.h"
static struct dentry *dbgfs;
@@ -105,7 +91,7 @@ static void sync_print_fence(struct seq_file *s, struct fence *fence, bool show)
seq_printf(s, "@%lld.%09ld", (s64)ts64.tv_sec, ts64.tv_nsec);
}
- if ((!fence || fence->ops->timeline_value_str) &&
+ if (fence->ops->timeline_value_str &&
fence->ops->fence_value_str) {
char value[64];
bool success;
@@ -113,10 +99,9 @@ static void sync_print_fence(struct seq_file *s, struct fence *fence, bool show)
fence->ops->fence_value_str(fence, value, sizeof(value));
success = strlen(value);
- if (success)
+ if (success) {
seq_printf(s, ": %s", value);
- if (success && fence) {
fence->ops->timeline_value_str(fence, value,
sizeof(value));
@@ -133,22 +118,13 @@ static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
struct list_head *pos;
unsigned long flags;
- seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
-
- if (obj->ops->timeline_value_str) {
- char value[64];
-
- obj->ops->timeline_value_str(obj, value, sizeof(value));
- seq_printf(s, ": %s", value);
- }
-
- seq_puts(s, "\n");
+ seq_printf(s, "%s: %d\n", obj->name, obj->value);
spin_lock_irqsave(&obj->child_list_lock, flags);
list_for_each(pos, &obj->child_list_head) {
- struct fence *fence =
- container_of(pos, struct fence, child_list);
- sync_print_fence(s, fence, false);
+ struct sync_pt *pt =
+ container_of(pos, struct sync_pt, child_list);
+ sync_print_fence(s, &pt->base, false);
}
spin_unlock_irqrestore(&obj->child_list_lock, flags);
}
@@ -209,126 +185,19 @@ static const struct file_operations sync_info_debugfs_fops = {
.release = single_release,
};
-/*
- * *WARNING*
- *
- * improper use of this can result in deadlocking kernel drivers from userspace.
- */
-
-/* opening sw_sync create a new sync obj */
-static int sw_sync_debugfs_open(struct inode *inode, struct file *file)
-{
- struct sw_sync_timeline *obj;
- char task_comm[TASK_COMM_LEN];
-
- get_task_comm(task_comm, current);
-
- obj = sw_sync_timeline_create(task_comm);
- if (!obj)
- return -ENOMEM;
-
- file->private_data = obj;
-
- return 0;
-}
-
-static int sw_sync_debugfs_release(struct inode *inode, struct file *file)
-{
- struct sw_sync_timeline *obj = file->private_data;
-
- sync_timeline_destroy(&obj->obj);
- return 0;
-}
-
-static long sw_sync_ioctl_create_fence(struct sw_sync_timeline *obj,
- unsigned long arg)
-{
- int fd = get_unused_fd_flags(O_CLOEXEC);
- int err;
- struct fence *fence;
- struct sync_file *sync_file;
- struct sw_sync_create_fence_data data;
-
- if (fd < 0)
- return fd;
-
- if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
- err = -EFAULT;
- goto err;
- }
-
- fence = sw_sync_pt_create(obj, data.value);
- if (!fence) {
- err = -ENOMEM;
- goto err;
- }
-
- sync_file = sync_file_create(fence);
- if (!sync_file) {
- fence_put(fence);
- err = -ENOMEM;
- goto err;
- }
-
- data.fence = fd;
- if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
- fput(sync_file->file);
- err = -EFAULT;
- goto err;
- }
-
- fd_install(fd, sync_file->file);
-
- return 0;
-
-err:
- put_unused_fd(fd);
- return err;
-}
-
-static long sw_sync_ioctl_inc(struct sw_sync_timeline *obj, unsigned long arg)
-{
- u32 value;
-
- if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
- return -EFAULT;
-
- sw_sync_timeline_inc(obj, value);
-
- return 0;
-}
-
-static long sw_sync_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- struct sw_sync_timeline *obj = file->private_data;
-
- switch (cmd) {
- case SW_SYNC_IOC_CREATE_FENCE:
- return sw_sync_ioctl_create_fence(obj, arg);
-
- case SW_SYNC_IOC_INC:
- return sw_sync_ioctl_inc(obj, arg);
-
- default:
- return -ENOTTY;
- }
-}
-
-static const struct file_operations sw_sync_debugfs_fops = {
- .open = sw_sync_debugfs_open,
- .release = sw_sync_debugfs_release,
- .unlocked_ioctl = sw_sync_ioctl,
- .compat_ioctl = sw_sync_ioctl,
-};
-
static __init int sync_debugfs_init(void)
{
dbgfs = debugfs_create_dir("sync", NULL);
- debugfs_create_file("info", 0444, dbgfs, NULL, &sync_info_debugfs_fops);
- debugfs_create_file("sw_sync", 0644, dbgfs, NULL,
- &sw_sync_debugfs_fops);
+ /*
+ * The debugfs files won't ever get removed and thus, there is
+ * no need to protect it against removal races. The use of
+ * debugfs_create_file_unsafe() is actually safe here.
+ */
+ debugfs_create_file_unsafe("info", 0444, dbgfs, NULL,
+ &sync_info_debugfs_fops);
+ debugfs_create_file_unsafe("sw_sync", 0644, dbgfs, NULL,
+ &sw_sync_debugfs_fops);
return 0;
}
@@ -359,5 +228,3 @@ void sync_dump(void)
}
}
}
-
-#endif
diff --git a/drivers/staging/android/sync_debug.h b/drivers/staging/android/sync_debug.h
new file mode 100644
index 000000000..fab66396d
--- /dev/null
+++ b/drivers/staging/android/sync_debug.h
@@ -0,0 +1,84 @@
+/*
+ * include/linux/sync.h
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_SYNC_H
+#define _LINUX_SYNC_H
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/fence.h>
+
+#include <linux/sync_file.h>
+#include <uapi/linux/sync_file.h>
+
+/**
+ * struct sync_timeline - sync object
+ * @kref: reference count on fence.
+ * @name: name of the sync_timeline. Useful for debugging
+ * @child_list_head: list of children sync_pts for this sync_timeline
+ * @child_list_lock: lock protecting @child_list_head and fence.status
+ * @active_list_head: list of active (unsignaled/errored) sync_pts
+ * @sync_timeline_list: membership in global sync_timeline_list
+ */
+struct sync_timeline {
+ struct kref kref;
+ char name[32];
+
+ /* protected by child_list_lock */
+ u64 context;
+ int value;
+
+ struct list_head child_list_head;
+ spinlock_t child_list_lock;
+
+ struct list_head active_list_head;
+
+ struct list_head sync_timeline_list;
+};
+
+static inline struct sync_timeline *fence_parent(struct fence *fence)
+{
+ return container_of(fence->lock, struct sync_timeline,
+ child_list_lock);
+}
+
+/**
+ * struct sync_pt - sync_pt object
+ * @base: base fence object
+ * @child_list: sync timeline child's list
+ * @active_list: sync timeline active child's list
+ */
+struct sync_pt {
+ struct fence base;
+ struct list_head child_list;
+ struct list_head active_list;
+};
+
+#ifdef CONFIG_SW_SYNC
+
+extern const struct file_operations sw_sync_debugfs_fops;
+
+void sync_timeline_debug_add(struct sync_timeline *obj);
+void sync_timeline_debug_remove(struct sync_timeline *obj);
+void sync_file_debug_add(struct sync_file *fence);
+void sync_file_debug_remove(struct sync_file *fence);
+void sync_dump(void);
+
+#else
+# define sync_timeline_debug_add(obj)
+# define sync_timeline_debug_remove(obj)
+# define sync_file_debug_add(fence)
+# define sync_file_debug_remove(fence)
+# define sync_dump()
+#endif
+
+#endif /* _LINUX_SYNC_H */
diff --git a/drivers/staging/android/trace/sync.h b/drivers/staging/android/trace/sync.h
index a0f80f416..6b5ce9640 100644
--- a/drivers/staging/android/trace/sync.h
+++ b/drivers/staging/android/trace/sync.h
@@ -5,7 +5,7 @@
#if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_SYNC_H
-#include "../sync.h"
+#include "../sync_debug.h"
#include <linux/tracepoint.h>
TRACE_EVENT(sync_timeline,
@@ -15,21 +15,15 @@ TRACE_EVENT(sync_timeline,
TP_STRUCT__entry(
__string(name, timeline->name)
- __array(char, value, 32)
+ __field(u32, value)
),
TP_fast_assign(
__assign_str(name, timeline->name);
- if (timeline->ops->timeline_value_str) {
- timeline->ops->timeline_value_str(timeline,
- __entry->value,
- sizeof(__entry->value));
- } else {
- __entry->value[0] = '\0';
- }
+ __entry->value = timeline->value;
),
- TP_printk("name=%s value=%s", __get_str(name), __entry->value)
+ TP_printk("name=%s value=%d", __get_str(name), __entry->value)
);
#endif /* if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ) */
diff --git a/drivers/staging/comedi/comedi.h b/drivers/staging/comedi/comedi.h
index ad5297f6d..08fb26b51 100644
--- a/drivers/staging/comedi/comedi.h
+++ b/drivers/staging/comedi/comedi.h
@@ -779,7 +779,7 @@ struct comedi_subdinfo {
unsigned int flags;
unsigned int range_type;
unsigned int settling_time_0;
- unsigned insn_bits_support;
+ unsigned int insn_bits_support;
unsigned int unused[8];
};
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index 629080f39..1999eed4f 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -312,8 +312,8 @@ static void comedi_file_reset(struct file *file)
}
cfp->last_attached = dev->attached;
cfp->last_detach_count = dev->detach_count;
- ACCESS_ONCE(cfp->read_subdev) = read_s;
- ACCESS_ONCE(cfp->write_subdev) = write_s;
+ WRITE_ONCE(cfp->read_subdev, read_s);
+ WRITE_ONCE(cfp->write_subdev, write_s);
}
static void comedi_file_check(struct file *file)
@@ -331,7 +331,7 @@ static struct comedi_subdevice *comedi_file_read_subdevice(struct file *file)
struct comedi_file *cfp = file->private_data;
comedi_file_check(file);
- return ACCESS_ONCE(cfp->read_subdev);
+ return READ_ONCE(cfp->read_subdev);
}
static struct comedi_subdevice *comedi_file_write_subdevice(struct file *file)
@@ -339,7 +339,7 @@ static struct comedi_subdevice *comedi_file_write_subdevice(struct file *file)
struct comedi_file *cfp = file->private_data;
comedi_file_check(file);
- return ACCESS_ONCE(cfp->write_subdev);
+ return READ_ONCE(cfp->write_subdev);
}
static int resize_async_buffer(struct comedi_device *dev,
@@ -1256,16 +1256,17 @@ static int parse_insn(struct comedi_device *dev, struct comedi_insn *insn,
switch (insn->insn) {
case INSN_GTOD:
{
- struct timeval tv;
+ struct timespec64 tv;
if (insn->n != 2) {
ret = -EINVAL;
break;
}
- do_gettimeofday(&tv);
- data[0] = tv.tv_sec;
- data[1] = tv.tv_usec;
+ ktime_get_real_ts64(&tv);
+ /* unsigned data safe until 2106 */
+ data[0] = (unsigned int)tv.tv_sec;
+ data[1] = tv.tv_nsec / NSEC_PER_USEC;
ret = 2;
break;
@@ -1992,7 +1993,7 @@ static int do_setrsubd_ioctl(struct comedi_device *dev, unsigned long arg,
!(s_old->async->cmd.flags & CMDF_WRITE))
return -EBUSY;
- ACCESS_ONCE(cfp->read_subdev) = s_new;
+ WRITE_ONCE(cfp->read_subdev, s_new);
return 0;
}
@@ -2034,7 +2035,7 @@ static int do_setwsubd_ioctl(struct comedi_device *dev, unsigned long arg,
(s_old->async->cmd.flags & CMDF_WRITE))
return -EBUSY;
- ACCESS_ONCE(cfp->write_subdev) = s_new;
+ WRITE_ONCE(cfp->write_subdev, s_new);
return 0;
}
diff --git a/drivers/staging/comedi/drivers/addi_apci_1564.c b/drivers/staging/comedi/drivers/addi_apci_1564.c
index f1ccfbd4c..9bfb79c2e 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1564.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1564.c
@@ -21,9 +21,62 @@
* details.
*/
+/*
+ * Driver: addi_apci_1564
+ * Description: ADDI-DATA APCI-1564 Digital I/O board
+ * Devices: [ADDI-DATA] APCI-1564 (addi_apci_1564)
+ * Author: H Hartley Sweeten <hsweeten@visionengravers.com>
+ * Updated: Thu, 02 Jun 2016 13:12:46 -0700
+ * Status: untested
+ *
+ * Configuration Options: not applicable, uses comedi PCI auto config
+ *
+ * This board has the following features:
+ * - 32 optically isolated digital inputs (24V), 16 of which can
+ * generate change-of-state (COS) interrupts (channels 4 to 19)
+ * - 32 optically isolated digital outputs (10V to 36V)
+ * - 1 8-bit watchdog for resetting the outputs
+ * - 1 12-bit timer
+ * - 3 32-bit counters
+ * - 2 diagnostic inputs
+ *
+ * The COS, timer, and counter subdevices all use the dev->read_subdev to
+ * return the interrupt status. The sample data is updated and returned when
+ * any of these subdevices generate an interrupt. The sample data format is:
+ *
+ * Bit Description
+ * ----- ------------------------------------------
+ * 31 COS interrupt
+ * 30 timer interrupt
+ * 29 counter 2 interrupt
+ * 28 counter 1 interrupt
+ * 27 counter 0 interrupt
+ * 26:20 not used
+ * 19:4 COS digital input state (channels 19 to 4)
+ * 3:0 not used
+ *
+ * The COS interrupts must be configured using an INSN_CONFIG_DIGITAL_TRIG
+ * instruction before they can be enabled by an async command. The COS
+ * interrupts will stay active until canceled.
+ *
+ * The timer subdevice does not use an async command. All control is handled
+ * by the (*insn_config).
+ *
+ * FIXME: The format of the ADDI_TCW_TIMEBASE_REG is not descibed in the
+ * datasheet I have. The INSN_CONFIG_SET_CLOCK_SRC currently just writes
+ * the raw data[1] to this register along with the raw data[2] value to the
+ * ADDI_TCW_RELOAD_REG. If anyone tests this and can determine the actual
+ * timebase/reload operation please let me know.
+ *
+ * The counter subdevice also does not use an async command. All control is
+ * handled by the (*insn_config).
+ *
+ * FIXME: The operation of the counters is not really described in the
+ * datasheet I have. The (*insn_config) needs more work.
+ */
+
#include <linux/module.h>
#include <linux/interrupt.h>
-#include <linux/sched.h>
#include "../comedi_pci.h"
#include "addi_tcw.h"
@@ -77,6 +130,7 @@
#define APCI1564_DI_REG 0x00
#define APCI1564_DI_INT_MODE1_REG 0x04
#define APCI1564_DI_INT_MODE2_REG 0x08
+#define APCI1564_DI_INT_MODE_MASK 0x000ffff0 /* chans [19:4] */
#define APCI1564_DI_INT_STATUS_REG 0x0c
#define APCI1564_DI_IRQ_REG 0x10
#define APCI1564_DI_IRQ_ENA BIT(2)
@@ -90,14 +144,7 @@
#define APCI1564_DO_INT_STATUS_VCC BIT(0)
#define APCI1564_DO_IRQ_REG 0x20
#define APCI1564_DO_IRQ_INTR BIT(0)
-#define APCI1564_WDOG_REG 0x24
-#define APCI1564_WDOG_RELOAD_REG 0x28
-#define APCI1564_WDOG_TIMEBASE_REG 0x2c
-#define APCI1564_WDOG_CTRL_REG 0x30
-#define APCI1564_WDOG_STATUS_REG 0x34
-#define APCI1564_WDOG_IRQ_REG 0x38
-#define APCI1564_WDOG_WARN_TIMEVAL_REG 0x3c
-#define APCI1564_WDOG_WARN_TIMEBASE_REG 0x40
+#define APCI1564_WDOG_IOBASE 0x24
/*
* devpriv->timer Register Map (see addi_tcw.h for register/bit defines)
@@ -111,18 +158,24 @@
*/
#define APCI1564_COUNTER(x) ((x) * 0x20)
+/*
+ * The dev->read_subdev is used to return the interrupt events along with
+ * the state of the interrupt capable inputs.
+ */
+#define APCI1564_EVENT_COS BIT(31)
+#define APCI1564_EVENT_TIMER BIT(30)
+#define APCI1564_EVENT_COUNTER(x) BIT(27 + (x)) /* counter 0-2 */
+#define APCI1564_EVENT_MASK 0xfff0000f /* all but [19:4] */
+
struct apci1564_private {
unsigned long eeprom; /* base address of EEPROM register */
unsigned long timer; /* base address of 12-bit timer */
unsigned long counters; /* base address of 32-bit counters */
- unsigned int mode1; /* riding-edge/high level channels */
+ unsigned int mode1; /* rising-edge/high level channels */
unsigned int mode2; /* falling-edge/low level channels */
unsigned int ctrl; /* interrupt mode OR (edge) . AND (level) */
- struct task_struct *tsk_current;
};
-#include "addi-data/hwdrv_apci1564.c"
-
static int apci1564_reset(struct comedi_device *dev)
{
struct apci1564_private *devpriv = dev->private;
@@ -138,7 +191,7 @@ static int apci1564_reset(struct comedi_device *dev)
outl(0x0, dev->iobase + APCI1564_DO_INT_CTRL_REG);
/* Reset the watchdog registers */
- addi_watchdog_reset(dev->iobase + APCI1564_WDOG_REG);
+ addi_watchdog_reset(dev->iobase + APCI1564_WDOG_IOBASE);
/* Reset the timer registers */
outl(0x0, devpriv->timer + ADDI_TCW_CTRL_REG);
@@ -165,55 +218,54 @@ static irqreturn_t apci1564_interrupt(int irq, void *d)
unsigned int ctrl;
unsigned int chan;
+ s->state &= ~APCI1564_EVENT_MASK;
+
status = inl(dev->iobase + APCI1564_DI_IRQ_REG);
if (status & APCI1564_DI_IRQ_ENA) {
- /* disable the interrupt */
+ /* get the COS interrupt state and set the event flag */
+ s->state = inl(dev->iobase + APCI1564_DI_INT_STATUS_REG);
+ s->state &= APCI1564_DI_INT_MODE_MASK;
+ s->state |= APCI1564_EVENT_COS;
+
+ /* clear the interrupt */
outl(status & ~APCI1564_DI_IRQ_ENA,
dev->iobase + APCI1564_DI_IRQ_REG);
-
- s->state = inl(dev->iobase + APCI1564_DI_INT_STATUS_REG) &
- 0xffff;
- comedi_buf_write_samples(s, &s->state, 1);
- comedi_handle_events(dev, s);
-
- /* enable the interrupt */
outl(status, dev->iobase + APCI1564_DI_IRQ_REG);
}
status = inl(devpriv->timer + ADDI_TCW_IRQ_REG);
- if (status & 0x01) {
- /* Disable Timer Interrupt */
+ if (status & ADDI_TCW_IRQ) {
+ s->state |= APCI1564_EVENT_TIMER;
+
+ /* clear the interrupt */
ctrl = inl(devpriv->timer + ADDI_TCW_CTRL_REG);
outl(0x0, devpriv->timer + ADDI_TCW_CTRL_REG);
-
- /* Send a signal to from kernel to user space */
- send_sig(SIGIO, devpriv->tsk_current, 0);
-
- /* Enable Timer Interrupt */
outl(ctrl, devpriv->timer + ADDI_TCW_CTRL_REG);
}
if (devpriv->counters) {
- for (chan = 0; chan < 4; chan++) {
+ for (chan = 0; chan < 3; chan++) {
unsigned long iobase;
iobase = devpriv->counters + APCI1564_COUNTER(chan);
status = inl(iobase + ADDI_TCW_IRQ_REG);
- if (status & 0x01) {
- /* Disable Counter Interrupt */
+ if (status & ADDI_TCW_IRQ) {
+ s->state |= APCI1564_EVENT_COUNTER(chan);
+
+ /* clear the interrupt */
ctrl = inl(iobase + ADDI_TCW_CTRL_REG);
outl(0x0, iobase + ADDI_TCW_CTRL_REG);
-
- /* Send a signal to from kernel to user space */
- send_sig(SIGIO, devpriv->tsk_current, 0);
-
- /* Enable Counter Interrupt */
outl(ctrl, iobase + ADDI_TCW_CTRL_REG);
}
}
}
+ if (s->state & APCI1564_EVENT_MASK) {
+ comedi_buf_write_samples(s, &s->state, 1);
+ comedi_handle_events(dev, s);
+ }
+
return IRQ_HANDLED;
}
@@ -255,7 +307,7 @@ static int apci1564_diag_insn_bits(struct comedi_device *dev,
/*
* Change-Of-State (COS) interrupt configuration
*
- * Channels 0 to 15 are interruptible. These channels can be configured
+ * Channels 4 to 19 are interruptible. These channels can be configured
* to generate interrupts based on AND/OR logic for the desired channels.
*
* OR logic
@@ -343,6 +395,10 @@ static int apci1564_cos_insn_config(struct comedi_device *dev,
default:
return -EINVAL;
}
+
+ /* ensure the mode bits are in-range for channels [19:4] */
+ devpriv->mode1 &= APCI1564_DI_INT_MODE_MASK;
+ devpriv->mode2 &= APCI1564_DI_INT_MODE_MASK;
break;
default:
return -EINVAL;
@@ -409,7 +465,7 @@ static int apci1564_cos_cmd(struct comedi_device *dev,
{
struct apci1564_private *devpriv = dev->private;
- if (!devpriv->ctrl) {
+ if (!devpriv->ctrl && !(devpriv->mode1 || devpriv->mode2)) {
dev_warn(dev->class_dev,
"Interrupts disabled due to mode configuration!\n");
return -EINVAL;
@@ -433,6 +489,173 @@ static int apci1564_cos_cancel(struct comedi_device *dev,
return 0;
}
+static int apci1564_timer_insn_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct apci1564_private *devpriv = dev->private;
+ unsigned int val;
+
+ switch (data[0]) {
+ case INSN_CONFIG_ARM:
+ if (data[1] > s->maxdata)
+ return -EINVAL;
+ outl(data[1], devpriv->timer + ADDI_TCW_RELOAD_REG);
+ outl(ADDI_TCW_CTRL_IRQ_ENA | ADDI_TCW_CTRL_TIMER_ENA,
+ devpriv->timer + ADDI_TCW_CTRL_REG);
+ break;
+ case INSN_CONFIG_DISARM:
+ outl(0x0, devpriv->timer + ADDI_TCW_CTRL_REG);
+ break;
+ case INSN_CONFIG_GET_COUNTER_STATUS:
+ data[1] = 0;
+ val = inl(devpriv->timer + ADDI_TCW_CTRL_REG);
+ if (val & ADDI_TCW_CTRL_IRQ_ENA)
+ data[1] |= COMEDI_COUNTER_ARMED;
+ if (val & ADDI_TCW_CTRL_TIMER_ENA)
+ data[1] |= COMEDI_COUNTER_COUNTING;
+ val = inl(devpriv->timer + ADDI_TCW_STATUS_REG);
+ if (val & ADDI_TCW_STATUS_OVERFLOW)
+ data[1] |= COMEDI_COUNTER_TERMINAL_COUNT;
+ data[2] = COMEDI_COUNTER_ARMED | COMEDI_COUNTER_COUNTING |
+ COMEDI_COUNTER_TERMINAL_COUNT;
+ break;
+ case INSN_CONFIG_SET_CLOCK_SRC:
+ if (data[2] > s->maxdata)
+ return -EINVAL;
+ outl(data[1], devpriv->timer + ADDI_TCW_TIMEBASE_REG);
+ outl(data[2], devpriv->timer + ADDI_TCW_RELOAD_REG);
+ break;
+ case INSN_CONFIG_GET_CLOCK_SRC:
+ data[1] = inl(devpriv->timer + ADDI_TCW_TIMEBASE_REG);
+ data[2] = inl(devpriv->timer + ADDI_TCW_RELOAD_REG);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return insn->n;
+}
+
+static int apci1564_timer_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct apci1564_private *devpriv = dev->private;
+
+ /* just write the last last to the reload register */
+ if (insn->n) {
+ unsigned int val = data[insn->n - 1];
+
+ outl(val, devpriv->timer + ADDI_TCW_RELOAD_REG);
+ }
+
+ return insn->n;
+}
+
+static int apci1564_timer_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct apci1564_private *devpriv = dev->private;
+ int i;
+
+ /* return the actual value of the timer */
+ for (i = 0; i < insn->n; i++)
+ data[i] = inl(devpriv->timer + ADDI_TCW_VAL_REG);
+
+ return insn->n;
+}
+
+static int apci1564_counter_insn_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct apci1564_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned long iobase = devpriv->counters + APCI1564_COUNTER(chan);
+ unsigned int val;
+
+ switch (data[0]) {
+ case INSN_CONFIG_ARM:
+ val = inl(iobase + ADDI_TCW_CTRL_REG);
+ val |= ADDI_TCW_CTRL_IRQ_ENA | ADDI_TCW_CTRL_CNTR_ENA;
+ outl(data[1], iobase + ADDI_TCW_RELOAD_REG);
+ outl(val, iobase + ADDI_TCW_CTRL_REG);
+ break;
+ case INSN_CONFIG_DISARM:
+ val = inl(iobase + ADDI_TCW_CTRL_REG);
+ val &= ~(ADDI_TCW_CTRL_IRQ_ENA | ADDI_TCW_CTRL_CNTR_ENA);
+ outl(val, iobase + ADDI_TCW_CTRL_REG);
+ break;
+ case INSN_CONFIG_SET_COUNTER_MODE:
+ /*
+ * FIXME: The counter operation is not described in the
+ * datasheet. For now just write the raw data[1] value to
+ * the control register.
+ */
+ outl(data[1], iobase + ADDI_TCW_CTRL_REG);
+ break;
+ case INSN_CONFIG_GET_COUNTER_STATUS:
+ data[1] = 0;
+ val = inl(iobase + ADDI_TCW_CTRL_REG);
+ if (val & ADDI_TCW_CTRL_IRQ_ENA)
+ data[1] |= COMEDI_COUNTER_ARMED;
+ if (val & ADDI_TCW_CTRL_CNTR_ENA)
+ data[1] |= COMEDI_COUNTER_COUNTING;
+ val = inl(iobase + ADDI_TCW_STATUS_REG);
+ if (val & ADDI_TCW_STATUS_OVERFLOW)
+ data[1] |= COMEDI_COUNTER_TERMINAL_COUNT;
+ data[2] = COMEDI_COUNTER_ARMED | COMEDI_COUNTER_COUNTING |
+ COMEDI_COUNTER_TERMINAL_COUNT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return insn->n;
+}
+
+static int apci1564_counter_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct apci1564_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned long iobase = devpriv->counters + APCI1564_COUNTER(chan);
+
+ /* just write the last last to the reload register */
+ if (insn->n) {
+ unsigned int val = data[insn->n - 1];
+
+ outl(val, iobase + ADDI_TCW_RELOAD_REG);
+ }
+
+ return insn->n;
+}
+
+static int apci1564_counter_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct apci1564_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned long iobase = devpriv->counters + APCI1564_COUNTER(chan);
+ int i;
+
+ /* return the actual value of the counter */
+ for (i = 0; i < insn->n; i++)
+ data[i] = inl(iobase + ADDI_TCW_VAL_REG);
+
+ return insn->n;
+}
+
static int apci1564_auto_attach(struct comedi_device *dev,
unsigned long context_unused)
{
@@ -501,7 +724,7 @@ static int apci1564_auto_attach(struct comedi_device *dev,
if (dev->irq) {
dev->read_subdev = s;
s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE | SDF_CMD_READ;
+ s->subdev_flags = SDF_READABLE | SDF_CMD_READ | SDF_LSAMPL;
s->n_chan = 1;
s->maxdata = 1;
s->range_table = &range_digital;
@@ -543,7 +766,7 @@ static int apci1564_auto_attach(struct comedi_device *dev,
/* Initialize the watchdog subdevice */
s = &dev->subdevices[5];
- ret = addi_watchdog_init(s, dev->iobase + APCI1564_WDOG_REG);
+ ret = addi_watchdog_init(s, dev->iobase + APCI1564_WDOG_IOBASE);
if (ret)
return ret;
diff --git a/drivers/staging/comedi/drivers/adl_pci9118.c b/drivers/staging/comedi/drivers/adl_pci9118.c
index 4437ea3ab..be70bd333 100644
--- a/drivers/staging/comedi/drivers/adl_pci9118.c
+++ b/drivers/staging/comedi/drivers/adl_pci9118.c
@@ -570,7 +570,7 @@ static int pci9118_ai_cancel(struct comedi_device *dev,
/* set default config (disable burst and triggers) */
devpriv->ai_cfg = PCI9118_AI_CFG_PDTRG | PCI9118_AI_CFG_PETRG;
outl(devpriv->ai_cfg, dev->iobase + PCI9118_AI_CFG_REG);
- /* reset acqusition control */
+ /* reset acquisition control */
devpriv->ai_ctrl = 0;
outl(devpriv->ai_ctrl, dev->iobase + PCI9118_AI_CTRL_REG);
outl(0, dev->iobase + PCI9118_AI_BURST_NUM_REG);
@@ -1022,12 +1022,12 @@ static int pci9118_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
/*
* Configure analog input and load the chanlist.
- * The acqusition control bits are enabled later.
+ * The acquisition control bits are enabled later.
*/
pci9118_set_chanlist(dev, s, cmd->chanlist_len, cmd->chanlist,
devpriv->ai_add_front, devpriv->ai_add_back);
- /* Determine acqusition mode and calculate timing */
+ /* Determine acquisition mode and calculate timing */
devpriv->ai_do = 0;
if (cmd->scan_begin_src != TRIG_TIMER &&
cmd->convert_src == TRIG_TIMER) {
@@ -1097,7 +1097,7 @@ static int pci9118_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
if (devpriv->ai_do == 0) {
dev_err(dev->class_dev,
- "Unable to determine acqusition mode! BUG in (*do_cmdtest)?\n");
+ "Unable to determine acquisition mode! BUG in (*do_cmdtest)?\n");
return -EINVAL;
}
diff --git a/drivers/staging/comedi/drivers/adv_pci1760.c b/drivers/staging/comedi/drivers/adv_pci1760.c
index d7dd1e55e..9f525ff72 100644
--- a/drivers/staging/comedi/drivers/adv_pci1760.c
+++ b/drivers/staging/comedi/drivers/adv_pci1760.c
@@ -196,6 +196,7 @@ static int pci1760_pwm_ns_to_div(unsigned int flags, unsigned int ns)
break;
case CMDF_ROUND_DOWN:
divisor = ns / PCI1760_PWM_TIMEBASE;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c
index c773b8ca6..1f9c08a84 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas64.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas64.c
@@ -1238,7 +1238,7 @@ static void disable_plx_interrupts(struct comedi_device *dev)
devpriv->plx_intcsr_bits = 0;
writel(devpriv->plx_intcsr_bits,
- devpriv->plx9080_iobase + PLX_INTRCS_REG);
+ devpriv->plx9080_iobase + PLX_REG_INTCSR);
}
static void disable_ai_interrupts(struct comedi_device *dev)
@@ -1291,14 +1291,14 @@ static void init_plx9080(struct comedi_device *dev)
void __iomem *plx_iobase = devpriv->plx9080_iobase;
devpriv->plx_control_bits =
- readl(devpriv->plx9080_iobase + PLX_CONTROL_REG);
+ readl(devpriv->plx9080_iobase + PLX_REG_CNTRL);
#ifdef __BIG_ENDIAN
- bits = BIGEND_DMA0 | BIGEND_DMA1;
+ bits = PLX_BIGEND_DMA0 | PLX_BIGEND_DMA1;
#else
bits = 0;
#endif
- writel(bits, devpriv->plx9080_iobase + PLX_BIGEND_REG);
+ writel(bits, devpriv->plx9080_iobase + PLX_REG_BIGEND);
disable_plx_interrupts(dev);
@@ -1308,38 +1308,39 @@ static void init_plx9080(struct comedi_device *dev)
/* configure dma0 mode */
bits = 0;
/* enable ready input, not sure if this is necessary */
- bits |= PLX_DMA_EN_READYIN_BIT;
+ bits |= PLX_DMAMODE_READYIEN;
/* enable bterm, not sure if this is necessary */
- bits |= PLX_EN_BTERM_BIT;
+ bits |= PLX_DMAMODE_BTERMIEN;
/* enable dma chaining */
- bits |= PLX_EN_CHAIN_BIT;
+ bits |= PLX_DMAMODE_CHAINEN;
/* enable interrupt on dma done
* (probably don't need this, since chain never finishes) */
- bits |= PLX_EN_DMA_DONE_INTR_BIT;
+ bits |= PLX_DMAMODE_DONEIEN;
/* don't increment local address during transfers
* (we are transferring from a fixed fifo register) */
- bits |= PLX_LOCAL_ADDR_CONST_BIT;
+ bits |= PLX_DMAMODE_LACONST;
/* route dma interrupt to pci bus */
- bits |= PLX_DMA_INTR_PCI_BIT;
+ bits |= PLX_DMAMODE_INTRPCI;
/* enable demand mode */
- bits |= PLX_DEMAND_MODE_BIT;
+ bits |= PLX_DMAMODE_DEMAND;
/* enable local burst mode */
- bits |= PLX_DMA_LOCAL_BURST_EN_BIT;
+ bits |= PLX_DMAMODE_BURSTEN;
/* 4020 uses 32 bit dma */
if (board->layout == LAYOUT_4020)
- bits |= PLX_LOCAL_BUS_32_WIDE_BITS;
+ bits |= PLX_DMAMODE_WIDTH32;
else /* localspace0 bus is 16 bits wide */
- bits |= PLX_LOCAL_BUS_16_WIDE_BITS;
- writel(bits, plx_iobase + PLX_DMA1_MODE_REG);
+ bits |= PLX_DMAMODE_WIDTH16;
+ writel(bits, plx_iobase + PLX_REG_DMAMODE1);
if (ao_cmd_is_supported(board))
- writel(bits, plx_iobase + PLX_DMA0_MODE_REG);
+ writel(bits, plx_iobase + PLX_REG_DMAMODE0);
/* enable interrupts on plx 9080 */
devpriv->plx_intcsr_bits |=
- ICS_AERR | ICS_PERR | ICS_PIE | ICS_PLIE | ICS_PAIE | ICS_LIE |
- ICS_DMA0_E | ICS_DMA1_E;
+ PLX_INTCSR_LSEABORTEN | PLX_INTCSR_LSEPARITYEN | PLX_INTCSR_PIEN |
+ PLX_INTCSR_PLIEN | PLX_INTCSR_PABORTIEN | PLX_INTCSR_LIOEN |
+ PLX_INTCSR_DMA0IEN | PLX_INTCSR_DMA1IEN;
writel(devpriv->plx_intcsr_bits,
- devpriv->plx9080_iobase + PLX_INTRCS_REG);
+ devpriv->plx9080_iobase + PLX_REG_INTCSR);
}
static void disable_ai_pacing(struct comedi_device *dev)
@@ -1533,8 +1534,8 @@ static int alloc_and_init_dma_members(struct comedi_device *dev)
cpu_to_le32((devpriv->ai_dma_desc_bus_addr +
((i + 1) % ai_dma_ring_count(board)) *
sizeof(devpriv->ai_dma_desc[0])) |
- PLX_DESC_IN_PCI_BIT | PLX_INTR_TERM_COUNT |
- PLX_XFER_LOCAL_TO_PCI);
+ PLX_DMADPR_DESCPCI | PLX_DMADPR_TCINTR |
+ PLX_DMADPR_XFERL2P);
}
if (ao_cmd_is_supported(board)) {
for (i = 0; i < AO_DMA_RING_COUNT; i++) {
@@ -1548,8 +1549,8 @@ static int alloc_and_init_dma_members(struct comedi_device *dev)
cpu_to_le32((devpriv->ao_dma_desc_bus_addr +
((i + 1) % (AO_DMA_RING_COUNT)) *
sizeof(devpriv->ao_dma_desc[0])) |
- PLX_DESC_IN_PCI_BIT |
- PLX_INTR_TERM_COUNT);
+ PLX_DMADPR_DESCPCI |
+ PLX_DMADPR_TCINTR);
}
}
return 0;
@@ -1613,9 +1614,9 @@ static const int i2c_low_udelay = 10;
static void i2c_set_sda(struct comedi_device *dev, int state)
{
struct pcidas64_private *devpriv = dev->private;
- static const int data_bit = CTL_EE_W;
+ static const int data_bit = PLX_CNTRL_EEWB;
void __iomem *plx_control_addr = devpriv->plx9080_iobase +
- PLX_CONTROL_REG;
+ PLX_REG_CNTRL;
if (state) {
/* set data line high */
@@ -1634,9 +1635,9 @@ static void i2c_set_sda(struct comedi_device *dev, int state)
static void i2c_set_scl(struct comedi_device *dev, int state)
{
struct pcidas64_private *devpriv = dev->private;
- static const int clock_bit = CTL_USERO;
+ static const int clock_bit = PLX_CNTRL_USERO;
void __iomem *plx_control_addr = devpriv->plx9080_iobase +
- PLX_CONTROL_REG;
+ PLX_REG_CNTRL;
if (state) {
/* set clock line high */
@@ -1707,7 +1708,7 @@ static void i2c_write(struct comedi_device *dev, unsigned int address,
*/
/* make sure we dont send anything to eeprom */
- devpriv->plx_control_bits &= ~CTL_EE_CS;
+ devpriv->plx_control_bits &= ~PLX_CNTRL_EECS;
i2c_stop(dev);
i2c_start(dev);
@@ -2367,14 +2368,8 @@ static inline void dma_start_sync(struct comedi_device *dev,
/* spinlock for plx dma control/status reg */
spin_lock_irqsave(&dev->spinlock, flags);
- if (channel)
- writeb(PLX_DMA_EN_BIT | PLX_DMA_START_BIT |
- PLX_CLEAR_DMA_INTR_BIT,
- devpriv->plx9080_iobase + PLX_DMA1_CS_REG);
- else
- writeb(PLX_DMA_EN_BIT | PLX_DMA_START_BIT |
- PLX_CLEAR_DMA_INTR_BIT,
- devpriv->plx9080_iobase + PLX_DMA0_CS_REG);
+ writeb(PLX_DMACSR_ENABLE | PLX_DMACSR_START | PLX_DMACSR_CLEARINTR,
+ devpriv->plx9080_iobase + PLX_REG_DMACSR(channel));
spin_unlock_irqrestore(&dev->spinlock, flags);
}
@@ -2552,21 +2547,17 @@ static inline void load_first_dma_descriptor(struct comedi_device *dev,
* block. Initializing them to zero seems to fix the problem.
*/
if (dma_channel) {
- writel(0,
- devpriv->plx9080_iobase + PLX_DMA1_TRANSFER_SIZE_REG);
- writel(0, devpriv->plx9080_iobase + PLX_DMA1_PCI_ADDRESS_REG);
- writel(0,
- devpriv->plx9080_iobase + PLX_DMA1_LOCAL_ADDRESS_REG);
+ writel(0, devpriv->plx9080_iobase + PLX_REG_DMASIZ1);
+ writel(0, devpriv->plx9080_iobase + PLX_REG_DMAPADR1);
+ writel(0, devpriv->plx9080_iobase + PLX_REG_DMALADR1);
writel(descriptor_bits,
- devpriv->plx9080_iobase + PLX_DMA1_DESCRIPTOR_REG);
+ devpriv->plx9080_iobase + PLX_REG_DMADPR1);
} else {
- writel(0,
- devpriv->plx9080_iobase + PLX_DMA0_TRANSFER_SIZE_REG);
- writel(0, devpriv->plx9080_iobase + PLX_DMA0_PCI_ADDRESS_REG);
- writel(0,
- devpriv->plx9080_iobase + PLX_DMA0_LOCAL_ADDRESS_REG);
+ writel(0, devpriv->plx9080_iobase + PLX_REG_DMASIZ0);
+ writel(0, devpriv->plx9080_iobase + PLX_REG_DMAPADR0);
+ writel(0, devpriv->plx9080_iobase + PLX_REG_DMALADR0);
writel(descriptor_bits,
- devpriv->plx9080_iobase + PLX_DMA0_DESCRIPTOR_REG);
+ devpriv->plx9080_iobase + PLX_REG_DMADPR0);
}
}
@@ -2643,9 +2634,9 @@ static int ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
/* give location of first dma descriptor */
load_first_dma_descriptor(dev, 1,
devpriv->ai_dma_desc_bus_addr |
- PLX_DESC_IN_PCI_BIT |
- PLX_INTR_TERM_COUNT |
- PLX_XFER_LOCAL_TO_PCI);
+ PLX_DMADPR_DESCPCI |
+ PLX_DMADPR_TCINTR |
+ PLX_DMADPR_XFERL2P);
dma_start_sync(dev, 1);
}
@@ -2803,12 +2794,7 @@ static void drain_dma_buffers(struct comedi_device *dev, unsigned int channel)
int num_samples = 0;
void __iomem *pci_addr_reg;
- if (channel)
- pci_addr_reg =
- devpriv->plx9080_iobase + PLX_DMA1_PCI_ADDRESS_REG;
- else
- pci_addr_reg =
- devpriv->plx9080_iobase + PLX_DMA0_PCI_ADDRESS_REG;
+ pci_addr_reg = devpriv->plx9080_iobase + PLX_REG_DMAPADR(channel);
/* loop until we have read all the full buffers */
for (j = 0, next_transfer_addr = readl(pci_addr_reg);
@@ -2850,12 +2836,12 @@ static void handle_ai_interrupt(struct comedi_device *dev,
}
/* spin lock makes sure no one else changes plx dma control reg */
spin_lock_irqsave(&dev->spinlock, flags);
- dma1_status = readb(devpriv->plx9080_iobase + PLX_DMA1_CS_REG);
- if (plx_status & ICS_DMA1_A) { /* dma chan 1 interrupt */
- writeb((dma1_status & PLX_DMA_EN_BIT) | PLX_CLEAR_DMA_INTR_BIT,
- devpriv->plx9080_iobase + PLX_DMA1_CS_REG);
+ dma1_status = readb(devpriv->plx9080_iobase + PLX_REG_DMACSR1);
+ if (plx_status & PLX_INTCSR_DMA1IA) { /* dma chan 1 interrupt */
+ writeb((dma1_status & PLX_DMACSR_ENABLE) | PLX_DMACSR_CLEARINTR,
+ devpriv->plx9080_iobase + PLX_REG_DMACSR1);
- if (dma1_status & PLX_DMA_EN_BIT)
+ if (dma1_status & PLX_DMACSR_ENABLE)
drain_dma_buffers(dev, 1);
}
spin_unlock_irqrestore(&dev->spinlock, flags);
@@ -2902,12 +2888,12 @@ static int last_ao_dma_load_completed(struct comedi_device *dev)
unsigned short dma_status;
buffer_index = prev_ao_dma_index(dev);
- dma_status = readb(devpriv->plx9080_iobase + PLX_DMA0_CS_REG);
- if ((dma_status & PLX_DMA_DONE_BIT) == 0)
+ dma_status = readb(devpriv->plx9080_iobase + PLX_REG_DMACSR0);
+ if ((dma_status & PLX_DMACSR_DONE) == 0)
return 0;
transfer_address =
- readl(devpriv->plx9080_iobase + PLX_DMA0_PCI_ADDRESS_REG);
+ readl(devpriv->plx9080_iobase + PLX_REG_DMAPADR0);
if (transfer_address != devpriv->ao_buffer_bus_addr[buffer_index])
return 0;
@@ -2917,8 +2903,8 @@ static int last_ao_dma_load_completed(struct comedi_device *dev)
static inline int ao_dma_needs_restart(struct comedi_device *dev,
unsigned short dma_status)
{
- if ((dma_status & PLX_DMA_DONE_BIT) == 0 ||
- (dma_status & PLX_DMA_EN_BIT) == 0)
+ if ((dma_status & PLX_DMACSR_DONE) == 0 ||
+ (dma_status & PLX_DMACSR_ENABLE) == 0)
return 0;
if (last_ao_dma_load_completed(dev))
return 0;
@@ -2931,9 +2917,8 @@ static void restart_ao_dma(struct comedi_device *dev)
struct pcidas64_private *devpriv = dev->private;
unsigned int dma_desc_bits;
- dma_desc_bits =
- readl(devpriv->plx9080_iobase + PLX_DMA0_DESCRIPTOR_REG);
- dma_desc_bits &= ~PLX_END_OF_CHAIN_BIT;
+ dma_desc_bits = readl(devpriv->plx9080_iobase + PLX_REG_DMADPR0);
+ dma_desc_bits &= ~PLX_DMADPR_CHAINEND;
load_first_dma_descriptor(dev, 0, dma_desc_bits);
dma_start_sync(dev, 0);
@@ -2974,14 +2959,14 @@ static unsigned int load_ao_dma_buffer(struct comedi_device *dev,
devpriv->ao_dma_desc[buffer_index].transfer_size = cpu_to_le32(nbytes);
/* set end of chain bit so we catch underruns */
next_bits = le32_to_cpu(devpriv->ao_dma_desc[buffer_index].next);
- next_bits |= PLX_END_OF_CHAIN_BIT;
+ next_bits |= PLX_DMADPR_CHAINEND;
devpriv->ao_dma_desc[buffer_index].next = cpu_to_le32(next_bits);
/*
* clear end of chain bit on previous buffer now that we have set it
* for the last buffer
*/
next_bits = le32_to_cpu(devpriv->ao_dma_desc[prev_buffer_index].next);
- next_bits &= ~PLX_END_OF_CHAIN_BIT;
+ next_bits &= ~PLX_DMADPR_CHAINEND;
devpriv->ao_dma_desc[prev_buffer_index].next = cpu_to_le32(next_bits);
devpriv->ao_dma_index = (buffer_index + 1) % AO_DMA_RING_COUNT;
@@ -2994,8 +2979,7 @@ static void load_ao_dma(struct comedi_device *dev, const struct comedi_cmd *cmd)
struct pcidas64_private *devpriv = dev->private;
unsigned int num_bytes;
unsigned int next_transfer_addr;
- void __iomem *pci_addr_reg =
- devpriv->plx9080_iobase + PLX_DMA0_PCI_ADDRESS_REG;
+ void __iomem *pci_addr_reg = devpriv->plx9080_iobase + PLX_REG_DMAPADR0;
unsigned int buffer_index;
do {
@@ -3030,17 +3014,18 @@ static void handle_ao_interrupt(struct comedi_device *dev,
/* spin lock makes sure no one else changes plx dma control reg */
spin_lock_irqsave(&dev->spinlock, flags);
- dma0_status = readb(devpriv->plx9080_iobase + PLX_DMA0_CS_REG);
- if (plx_status & ICS_DMA0_A) { /* dma chan 0 interrupt */
- if ((dma0_status & PLX_DMA_EN_BIT) &&
- !(dma0_status & PLX_DMA_DONE_BIT))
- writeb(PLX_DMA_EN_BIT | PLX_CLEAR_DMA_INTR_BIT,
- devpriv->plx9080_iobase + PLX_DMA0_CS_REG);
- else
- writeb(PLX_CLEAR_DMA_INTR_BIT,
- devpriv->plx9080_iobase + PLX_DMA0_CS_REG);
+ dma0_status = readb(devpriv->plx9080_iobase + PLX_REG_DMACSR0);
+ if (plx_status & PLX_INTCSR_DMA0IA) { /* dma chan 0 interrupt */
+ if ((dma0_status & PLX_DMACSR_ENABLE) &&
+ !(dma0_status & PLX_DMACSR_DONE)) {
+ writeb(PLX_DMACSR_ENABLE | PLX_DMACSR_CLEARINTR,
+ devpriv->plx9080_iobase + PLX_REG_DMACSR0);
+ } else {
+ writeb(PLX_DMACSR_CLEARINTR,
+ devpriv->plx9080_iobase + PLX_REG_DMACSR0);
+ }
spin_unlock_irqrestore(&dev->spinlock, flags);
- if (dma0_status & PLX_DMA_EN_BIT) {
+ if (dma0_status & PLX_DMACSR_ENABLE) {
load_ao_dma(dev, cmd);
/* try to recover from dma end-of-chain event */
if (ao_dma_needs_restart(dev, dma0_status))
@@ -3069,7 +3054,7 @@ static irqreturn_t handle_interrupt(int irq, void *d)
uint32_t plx_status;
uint32_t plx_bits;
- plx_status = readl(devpriv->plx9080_iobase + PLX_INTRCS_REG);
+ plx_status = readl(devpriv->plx9080_iobase + PLX_REG_INTCSR);
status = readw(devpriv->main_iobase + HW_STATUS_REG);
/*
@@ -3083,10 +3068,11 @@ static irqreturn_t handle_interrupt(int irq, void *d)
handle_ai_interrupt(dev, status, plx_status);
handle_ao_interrupt(dev, status, plx_status);
- /* clear possible plx9080 interrupt sources */
- if (plx_status & ICS_LDIA) { /* clear local doorbell interrupt */
- plx_bits = readl(devpriv->plx9080_iobase + PLX_DBR_OUT_REG);
- writel(plx_bits, devpriv->plx9080_iobase + PLX_DBR_OUT_REG);
+ /* clear possible plx9080 interrupt sources */
+ if (plx_status & PLX_INTCSR_LDBIA) {
+ /* clear local doorbell interrupt */
+ plx_bits = readl(devpriv->plx9080_iobase + PLX_REG_L2PDBELL);
+ writel(plx_bits, devpriv->plx9080_iobase + PLX_REG_L2PDBELL);
}
return IRQ_HANDLED;
@@ -3324,7 +3310,7 @@ static int ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
set_dac_select_reg(dev, cmd);
set_dac_interval_regs(dev, cmd);
load_first_dma_descriptor(dev, 0, devpriv->ao_dma_desc_bus_addr |
- PLX_DESC_IN_PCI_BIT | PLX_INTR_TERM_COUNT);
+ PLX_DMADPR_DESCPCI | PLX_DMADPR_TCINTR);
set_dac_control1_reg(dev, cmd);
s->async->inttrig = ao_inttrig;
@@ -3725,19 +3711,19 @@ static uint16_t read_eeprom(struct comedi_device *dev, uint8_t address)
unsigned int bitstream = (read_command << 8) | address;
unsigned int bit;
void __iomem * const plx_control_addr =
- devpriv->plx9080_iobase + PLX_CONTROL_REG;
+ devpriv->plx9080_iobase + PLX_REG_CNTRL;
uint16_t value;
static const int value_length = 16;
static const int eeprom_udelay = 1;
udelay(eeprom_udelay);
- devpriv->plx_control_bits &= ~CTL_EE_CLK & ~CTL_EE_CS;
+ devpriv->plx_control_bits &= ~PLX_CNTRL_EESK & ~PLX_CNTRL_EECS;
/* make sure we don't send anything to the i2c bus on 4020 */
- devpriv->plx_control_bits |= CTL_USERO;
+ devpriv->plx_control_bits |= PLX_CNTRL_USERO;
writel(devpriv->plx_control_bits, plx_control_addr);
/* activate serial eeprom */
udelay(eeprom_udelay);
- devpriv->plx_control_bits |= CTL_EE_CS;
+ devpriv->plx_control_bits |= PLX_CNTRL_EECS;
writel(devpriv->plx_control_bits, plx_control_addr);
/* write read command and desired memory address */
@@ -3745,16 +3731,16 @@ static uint16_t read_eeprom(struct comedi_device *dev, uint8_t address)
/* set bit to be written */
udelay(eeprom_udelay);
if (bitstream & bit)
- devpriv->plx_control_bits |= CTL_EE_W;
+ devpriv->plx_control_bits |= PLX_CNTRL_EEWB;
else
- devpriv->plx_control_bits &= ~CTL_EE_W;
+ devpriv->plx_control_bits &= ~PLX_CNTRL_EEWB;
writel(devpriv->plx_control_bits, plx_control_addr);
/* clock in bit */
udelay(eeprom_udelay);
- devpriv->plx_control_bits |= CTL_EE_CLK;
+ devpriv->plx_control_bits |= PLX_CNTRL_EESK;
writel(devpriv->plx_control_bits, plx_control_addr);
udelay(eeprom_udelay);
- devpriv->plx_control_bits &= ~CTL_EE_CLK;
+ devpriv->plx_control_bits &= ~PLX_CNTRL_EESK;
writel(devpriv->plx_control_bits, plx_control_addr);
}
/* read back value from eeprom memory location */
@@ -3762,19 +3748,19 @@ static uint16_t read_eeprom(struct comedi_device *dev, uint8_t address)
for (bit = 1 << (value_length - 1); bit; bit >>= 1) {
/* clock out bit */
udelay(eeprom_udelay);
- devpriv->plx_control_bits |= CTL_EE_CLK;
+ devpriv->plx_control_bits |= PLX_CNTRL_EESK;
writel(devpriv->plx_control_bits, plx_control_addr);
udelay(eeprom_udelay);
- devpriv->plx_control_bits &= ~CTL_EE_CLK;
+ devpriv->plx_control_bits &= ~PLX_CNTRL_EESK;
writel(devpriv->plx_control_bits, plx_control_addr);
udelay(eeprom_udelay);
- if (readl(plx_control_addr) & CTL_EE_R)
+ if (readl(plx_control_addr) & PLX_CNTRL_EERB)
value |= bit;
}
/* deactivate eeprom serial input */
udelay(eeprom_udelay);
- devpriv->plx_control_bits &= ~CTL_EE_CS;
+ devpriv->plx_control_bits &= ~PLX_CNTRL_EECS;
writel(devpriv->plx_control_bits, plx_control_addr);
return value;
@@ -3962,7 +3948,8 @@ static int setup_subdevices(struct comedi_device *dev)
/* serial EEPROM, if present */
s = &dev->subdevices[8];
- if (readl(devpriv->plx9080_iobase + PLX_CONTROL_REG) & CTL_EECHK) {
+ if (readl(devpriv->plx9080_iobase + PLX_REG_CNTRL) &
+ PLX_CNTRL_EEPRESENT) {
s->type = COMEDI_SUBD_MEMORY;
s->subdev_flags = SDF_READABLE | SDF_INTERNAL;
s->n_chan = 128;
@@ -4019,16 +4006,16 @@ static int auto_attach(struct comedi_device *dev,
}
/* figure out what local addresses are */
- local_range = readl(devpriv->plx9080_iobase + PLX_LAS0RNG_REG) &
- LRNG_MEM_MASK;
- local_decode = readl(devpriv->plx9080_iobase + PLX_LAS0MAP_REG) &
- local_range & LMAP_MEM_MASK;
+ local_range = readl(devpriv->plx9080_iobase + PLX_REG_LAS0RR) &
+ PLX_LASRR_MEM_MASK;
+ local_decode = readl(devpriv->plx9080_iobase + PLX_REG_LAS0BA) &
+ local_range & PLX_LASBA_MEM_MASK;
devpriv->local0_iobase = ((uint32_t)devpriv->main_phys_iobase &
~local_range) | local_decode;
- local_range = readl(devpriv->plx9080_iobase + PLX_LAS1RNG_REG) &
- LRNG_MEM_MASK;
- local_decode = readl(devpriv->plx9080_iobase + PLX_LAS1MAP_REG) &
- local_range & LMAP_MEM_MASK;
+ local_range = readl(devpriv->plx9080_iobase + PLX_REG_LAS1RR) &
+ PLX_LASRR_MEM_MASK;
+ local_decode = readl(devpriv->plx9080_iobase + PLX_REG_LAS1BA) &
+ local_range & PLX_LASBA_MEM_MASK;
devpriv->local1_iobase = ((uint32_t)devpriv->dio_counter_phys_iobase &
~local_range) | local_decode;
diff --git a/drivers/staging/comedi/drivers/comedi_bond.c b/drivers/staging/comedi/drivers/comedi_bond.c
index 50b76eccb..64a5ea381 100644
--- a/drivers/staging/comedi/drivers/comedi_bond.c
+++ b/drivers/staging/comedi/drivers/comedi_bond.c
@@ -55,16 +55,16 @@
struct bonded_device {
struct comedi_device *dev;
- unsigned minor;
- unsigned subdev;
- unsigned nchans;
+ unsigned int minor;
+ unsigned int subdev;
+ unsigned int nchans;
};
struct comedi_bond_private {
char name[256];
struct bonded_device **devs;
- unsigned ndevs;
- unsigned nchans;
+ unsigned int ndevs;
+ unsigned int nchans;
};
static int bonding_dio_insn_bits(struct comedi_device *dev,
diff --git a/drivers/staging/comedi/drivers/daqboard2000.c b/drivers/staging/comedi/drivers/daqboard2000.c
index 73a46e3a6..b00f24ad9 100644
--- a/drivers/staging/comedi/drivers/daqboard2000.c
+++ b/drivers/staging/comedi/drivers/daqboard2000.c
@@ -116,12 +116,12 @@
#define DAQBOARD2000_SUBSYSTEM_IDS4 0x0004 /* Daqboard/2000 - 4 Dacs */
/* Initialization bits for the Serial EEPROM Control Register */
-#define DAQBOARD2000_SECRProgPinHi 0x8001767e
-#define DAQBOARD2000_SECRProgPinLo 0x8000767e
-#define DAQBOARD2000_SECRLocalBusHi 0xc000767e
-#define DAQBOARD2000_SECRLocalBusLo 0x8000767e
-#define DAQBOARD2000_SECRReloadHi 0xa000767e
-#define DAQBOARD2000_SECRReloadLo 0x8000767e
+#define DB2K_SECR_PROG_PIN_HI 0x8001767e
+#define DB2K_SECR_PROG_PIN_LO 0x8000767e
+#define DB2K_SECR_LOCAL_BUS_HI 0xc000767e
+#define DB2K_SECR_LOCAL_BUS_LO 0x8000767e
+#define DB2K_SECR_RELOAD_HI 0xa000767e
+#define DB2K_SECR_RELOAD_LO 0x8000767e
/* SECR status bits */
#define DAQBOARD2000_EEPROM_PRESENT 0x10000000
@@ -151,119 +151,108 @@ static const struct comedi_lrange range_daqboard2000_ai = {
/*
* Register Memory Map
*/
-#define acqControl 0x00 /* u16 */
-#define acqScanListFIFO 0x02 /* u16 */
-#define acqPacerClockDivLow 0x04 /* u32 */
-#define acqScanCounter 0x08 /* u16 */
-#define acqPacerClockDivHigh 0x0a /* u16 */
-#define acqTriggerCount 0x0c /* u16 */
-#define acqResultsFIFO 0x10 /* u16 */
-#define acqResultsShadow 0x14 /* u16 */
-#define acqAdcResult 0x18 /* u16 */
-#define dacScanCounter 0x1c /* u16 */
-#define dacControl 0x20 /* u16 */
-#define dacFIFO 0x24 /* s16 */
-#define dacPacerClockDiv 0x2a /* u16 */
-#define refDacs 0x2c /* u16 */
-#define dioControl 0x30 /* u16 */
-#define dioP3hsioData 0x32 /* s16 */
-#define dioP3Control 0x34 /* u16 */
-#define calEepromControl 0x36 /* u16 */
-#define dacSetting(x) (0x38 + (x)*2) /* s16 */
-#define dioP2ExpansionIO8Bit 0x40 /* s16 */
-#define ctrTmrControl 0x80 /* u16 */
-#define ctrInput(x) (0x88 + (x)*2) /* s16 */
-#define timerDivisor(x) (0xa0 + (x)*2) /* u16 */
-#define dmaControl 0xb0 /* u16 */
-#define trigControl 0xb2 /* u16 */
-#define calEeprom 0xb8 /* u16 */
-#define acqDigitalMark 0xba /* u16 */
-#define trigDacs 0xbc /* u16 */
-#define dioP2ExpansionIO16Bit(x) (0xc0 + (x)*2) /* s16 */
+#define DB2K_REG_ACQ_CONTROL 0x00 /* u16 (w) */
+#define DB2K_REG_ACQ_STATUS 0x00 /* u16 (r) */
+#define DB2K_REG_ACQ_SCAN_LIST_FIFO 0x02 /* u16 */
+#define DB2K_REG_ACQ_PACER_CLOCK_DIV_LOW 0x04 /* u32 */
+#define DB2K_REG_ACQ_SCAN_COUNTER 0x08 /* u16 */
+#define DB2K_REG_ACQ_PACER_CLOCK_DIV_HIGH 0x0a /* u16 */
+#define DB2K_REG_ACQ_TRIGGER_COUNT 0x0c /* u16 */
+#define DB2K_REG_ACQ_RESULTS_FIFO 0x10 /* u16 */
+#define DB2K_REG_ACQ_RESULTS_SHADOW 0x14 /* u16 */
+#define DB2K_REG_ACQ_ADC_RESULT 0x18 /* u16 */
+#define DB2K_REG_DAC_SCAN_COUNTER 0x1c /* u16 */
+#define DB2K_REG_DAC_CONTROL 0x20 /* u16 (w) */
+#define DB2K_REG_DAC_STATUS 0x20 /* u16 (r) */
+#define DB2K_REG_DAC_FIFO 0x24 /* s16 */
+#define DB2K_REG_DAC_PACER_CLOCK_DIV 0x2a /* u16 */
+#define DB2K_REG_REF_DACS 0x2c /* u16 */
+#define DB2K_REG_DIO_CONTROL 0x30 /* u16 */
+#define DB2K_REG_P3_HSIO_DATA 0x32 /* s16 */
+#define DB2K_REG_P3_CONTROL 0x34 /* u16 */
+#define DB2K_REG_CAL_EEPROM_CONTROL 0x36 /* u16 */
+#define DB2K_REG_DAC_SETTING(x) (0x38 + (x) * 2) /* s16 */
+#define DB2K_REG_DIO_P2_EXP_IO_8_BIT 0x40 /* s16 */
+#define DB2K_REG_COUNTER_TIMER_CONTROL 0x80 /* u16 */
+#define DB2K_REG_COUNTER_INPUT(x) (0x88 + (x) * 2) /* s16 */
+#define DB2K_REG_TIMER_DIV(x) (0xa0 + (x) * 2) /* u16 */
+#define DB2K_REG_DMA_CONTROL 0xb0 /* u16 */
+#define DB2K_REG_TRIG_CONTROL 0xb2 /* u16 */
+#define DB2K_REG_CAL_EEPROM 0xb8 /* u16 */
+#define DB2K_REG_ACQ_DIGITAL_MARK 0xba /* u16 */
+#define DB2K_REG_TRIG_DACS 0xbc /* u16 */
+#define DB2K_REG_DIO_P2_EXP_IO_16_BIT(x) (0xc0 + (x) * 2) /* s16 */
/* Scan Sequencer programming */
-#define DAQBOARD2000_SeqStartScanList 0x0011
-#define DAQBOARD2000_SeqStopScanList 0x0010
+#define DB2K_ACQ_CONTROL_SEQ_START_SCAN_LIST 0x0011
+#define DB2K_ACQ_CONTROL_SEQ_STOP_SCAN_LIST 0x0010
/* Prepare for acquisition */
-#define DAQBOARD2000_AcqResetScanListFifo 0x0004
-#define DAQBOARD2000_AcqResetResultsFifo 0x0002
-#define DAQBOARD2000_AcqResetConfigPipe 0x0001
-
-/* Acqusition status bits */
-#define DAQBOARD2000_AcqResultsFIFOMore1Sample 0x0001
-#define DAQBOARD2000_AcqResultsFIFOHasValidData 0x0002
-#define DAQBOARD2000_AcqResultsFIFOOverrun 0x0004
-#define DAQBOARD2000_AcqLogicScanning 0x0008
-#define DAQBOARD2000_AcqConfigPipeFull 0x0010
-#define DAQBOARD2000_AcqScanListFIFOEmpty 0x0020
-#define DAQBOARD2000_AcqAdcNotReady 0x0040
-#define DAQBOARD2000_ArbitrationFailure 0x0080
-#define DAQBOARD2000_AcqPacerOverrun 0x0100
-#define DAQBOARD2000_DacPacerOverrun 0x0200
-#define DAQBOARD2000_AcqHardwareError 0x01c0
-
-/* Scan Sequencer programming */
-#define DAQBOARD2000_SeqStartScanList 0x0011
-#define DAQBOARD2000_SeqStopScanList 0x0010
+#define DB2K_ACQ_CONTROL_RESET_SCAN_LIST_FIFO 0x0004
+#define DB2K_ACQ_CONTROL_RESET_RESULTS_FIFO 0x0002
+#define DB2K_ACQ_CONTROL_RESET_CONFIG_PIPE 0x0001
/* Pacer Clock Control */
-#define DAQBOARD2000_AdcPacerInternal 0x0030
-#define DAQBOARD2000_AdcPacerExternal 0x0032
-#define DAQBOARD2000_AdcPacerEnable 0x0031
-#define DAQBOARD2000_AdcPacerEnableDacPacer 0x0034
-#define DAQBOARD2000_AdcPacerDisable 0x0030
-#define DAQBOARD2000_AdcPacerNormalMode 0x0060
-#define DAQBOARD2000_AdcPacerCompatibilityMode 0x0061
-#define DAQBOARD2000_AdcPacerInternalOutEnable 0x0008
-#define DAQBOARD2000_AdcPacerExternalRising 0x0100
+#define DB2K_ACQ_CONTROL_ADC_PACER_INTERNAL 0x0030
+#define DB2K_ACQ_CONTROL_ADC_PACER_EXTERNAL 0x0032
+#define DB2K_ACQ_CONTROL_ADC_PACER_ENABLE 0x0031
+#define DB2K_ACQ_CONTROL_ADC_PACER_ENABLE_DAC_PACER 0x0034
+#define DB2K_ACQ_CONTROL_ADC_PACER_DISABLE 0x0030
+#define DB2K_ACQ_CONTROL_ADC_PACER_NORMAL_MODE 0x0060
+#define DB2K_ACQ_CONTROL_ADC_PACER_COMPATIBILITY_MODE 0x0061
+#define DB2K_ACQ_CONTROL_ADC_PACER_INTERNAL_OUT_ENABLE 0x0008
+#define DB2K_ACQ_CONTROL_ADC_PACER_EXTERNAL_RISING 0x0100
+
+/* Acquisition status bits */
+#define DB2K_ACQ_STATUS_RESULTS_FIFO_MORE_1_SAMPLE 0x0001
+#define DB2K_ACQ_STATUS_RESULTS_FIFO_HAS_DATA 0x0002
+#define DB2K_ACQ_STATUS_RESULTS_FIFO_OVERRUN 0x0004
+#define DB2K_ACQ_STATUS_LOGIC_SCANNING 0x0008
+#define DB2K_ACQ_STATUS_CONFIG_PIPE_FULL 0x0010
+#define DB2K_ACQ_STATUS_SCAN_LIST_FIFO_EMPTY 0x0020
+#define DB2K_ACQ_STATUS_ADC_NOT_READY 0x0040
+#define DB2K_ACQ_STATUS_ARBITRATION_FAILURE 0x0080
+#define DB2K_ACQ_STATUS_ADC_PACER_OVERRUN 0x0100
+#define DB2K_ACQ_STATUS_DAC_PACER_OVERRUN 0x0200
/* DAC status */
-#define DAQBOARD2000_DacFull 0x0001
-#define DAQBOARD2000_RefBusy 0x0002
-#define DAQBOARD2000_TrgBusy 0x0004
-#define DAQBOARD2000_CalBusy 0x0008
-#define DAQBOARD2000_Dac0Busy 0x0010
-#define DAQBOARD2000_Dac1Busy 0x0020
-#define DAQBOARD2000_Dac2Busy 0x0040
-#define DAQBOARD2000_Dac3Busy 0x0080
+#define DB2K_DAC_STATUS_DAC_FULL 0x0001
+#define DB2K_DAC_STATUS_REF_BUSY 0x0002
+#define DB2K_DAC_STATUS_TRIG_BUSY 0x0004
+#define DB2K_DAC_STATUS_CAL_BUSY 0x0008
+#define DB2K_DAC_STATUS_DAC_BUSY(x) (0x0010 << (x))
/* DAC control */
-#define DAQBOARD2000_Dac0Enable 0x0021
-#define DAQBOARD2000_Dac1Enable 0x0031
-#define DAQBOARD2000_Dac2Enable 0x0041
-#define DAQBOARD2000_Dac3Enable 0x0051
-#define DAQBOARD2000_DacEnableBit 0x0001
-#define DAQBOARD2000_Dac0Disable 0x0020
-#define DAQBOARD2000_Dac1Disable 0x0030
-#define DAQBOARD2000_Dac2Disable 0x0040
-#define DAQBOARD2000_Dac3Disable 0x0050
-#define DAQBOARD2000_DacResetFifo 0x0004
-#define DAQBOARD2000_DacPatternDisable 0x0060
-#define DAQBOARD2000_DacPatternEnable 0x0061
-#define DAQBOARD2000_DacSelectSignedData 0x0002
-#define DAQBOARD2000_DacSelectUnsignedData 0x0000
+#define DB2K_DAC_CONTROL_ENABLE_BIT 0x0001
+#define DB2K_DAC_CONTROL_DATA_IS_SIGNED 0x0002
+#define DB2K_DAC_CONTROL_RESET_FIFO 0x0004
+#define DB2K_DAC_CONTROL_DAC_DISABLE(x) (0x0020 + ((x) << 4))
+#define DB2K_DAC_CONTROL_DAC_ENABLE(x) (0x0021 + ((x) << 4))
+#define DB2K_DAC_CONTROL_PATTERN_DISABLE 0x0060
+#define DB2K_DAC_CONTROL_PATTERN_ENABLE 0x0061
/* Trigger Control */
-#define DAQBOARD2000_TrigAnalog 0x0000
-#define DAQBOARD2000_TrigTTL 0x0010
-#define DAQBOARD2000_TrigTransHiLo 0x0004
-#define DAQBOARD2000_TrigTransLoHi 0x0000
-#define DAQBOARD2000_TrigAbove 0x0000
-#define DAQBOARD2000_TrigBelow 0x0004
-#define DAQBOARD2000_TrigLevelSense 0x0002
-#define DAQBOARD2000_TrigEdgeSense 0x0000
-#define DAQBOARD2000_TrigEnable 0x0001
-#define DAQBOARD2000_TrigDisable 0x0000
+#define DB2K_TRIG_CONTROL_TYPE_ANALOG 0x0000
+#define DB2K_TRIG_CONTROL_TYPE_TTL 0x0010
+#define DB2K_TRIG_CONTROL_EDGE_HI_LO 0x0004
+#define DB2K_TRIG_CONTROL_EDGE_LO_HI 0x0000
+#define DB2K_TRIG_CONTROL_LEVEL_ABOVE 0x0000
+#define DB2K_TRIG_CONTROL_LEVEL_BELOW 0x0004
+#define DB2K_TRIG_CONTROL_SENSE_LEVEL 0x0002
+#define DB2K_TRIG_CONTROL_SENSE_EDGE 0x0000
+#define DB2K_TRIG_CONTROL_ENABLE 0x0001
+#define DB2K_TRIG_CONTROL_DISABLE 0x0000
/* Reference Dac Selection */
-#define DAQBOARD2000_PosRefDacSelect 0x0100
-#define DAQBOARD2000_NegRefDacSelect 0x0000
+#define DB2K_REF_DACS_SET 0x0080
+#define DB2K_REF_DACS_SELECT_POS_REF 0x0100
+#define DB2K_REF_DACS_SELECT_NEG_REF 0x0000
struct daq200_boardtype {
const char *name;
int id;
};
+
static const struct daq200_boardtype boardtypes[] = {
{"ids2", DAQBOARD2000_SUBSYSTEM_IDS2},
{"ids4", DAQBOARD2000_SUBSYSTEM_IDS4},
@@ -276,15 +265,16 @@ struct daqboard2000_private {
void __iomem *plx;
};
-static void writeAcqScanListEntry(struct comedi_device *dev, u16 entry)
+static void daqboard2000_write_acq_scan_list_entry(struct comedi_device *dev,
+ u16 entry)
{
- /* udelay(4); */
- writew(entry & 0x00ff, dev->mmio + acqScanListFIFO);
- /* udelay(4); */
- writew((entry >> 8) & 0x00ff, dev->mmio + acqScanListFIFO);
+ writew(entry & 0x00ff, dev->mmio + DB2K_REG_ACQ_SCAN_LIST_FIFO);
+ writew((entry >> 8) & 0x00ff,
+ dev->mmio + DB2K_REG_ACQ_SCAN_LIST_FIFO);
}
-static void setup_sampling(struct comedi_device *dev, int chan, int gain)
+static void daqboard2000_setup_sampling(struct comedi_device *dev, int chan,
+ int gain)
{
u16 word0, word1, word2, word3;
@@ -315,17 +305,13 @@ static void setup_sampling(struct comedi_device *dev, int chan, int gain)
word3 = 0;
break;
}
-/*
- dev->eeprom.correctionDACSE[i][j][k].offset = 0x800;
- dev->eeprom.correctionDACSE[i][j][k].gain = 0xc00;
-*/
/* These should be read from EEPROM */
- word2 |= 0x0800;
- word3 |= 0xc000;
- writeAcqScanListEntry(dev, word0);
- writeAcqScanListEntry(dev, word1);
- writeAcqScanListEntry(dev, word2);
- writeAcqScanListEntry(dev, word3);
+ word2 |= 0x0800; /* offset */
+ word3 |= 0xc000; /* gain */
+ daqboard2000_write_acq_scan_list_entry(dev, word0);
+ daqboard2000_write_acq_scan_list_entry(dev, word1);
+ daqboard2000_write_acq_scan_list_entry(dev, word2);
+ daqboard2000_write_acq_scan_list_entry(dev, word3);
}
static int daqboard2000_ai_status(struct comedi_device *dev,
@@ -335,7 +321,7 @@ static int daqboard2000_ai_status(struct comedi_device *dev,
{
unsigned int status;
- status = readw(dev->mmio + acqControl);
+ status = readw(dev->mmio + DB2K_REG_ACQ_STATUS);
if (status & context)
return 0;
return -EBUSY;
@@ -350,50 +336,58 @@ static int daqboard2000_ai_insn_read(struct comedi_device *dev,
int ret;
int i;
- writew(DAQBOARD2000_AcqResetScanListFifo |
- DAQBOARD2000_AcqResetResultsFifo |
- DAQBOARD2000_AcqResetConfigPipe, dev->mmio + acqControl);
+ writew(DB2K_ACQ_CONTROL_RESET_SCAN_LIST_FIFO |
+ DB2K_ACQ_CONTROL_RESET_RESULTS_FIFO |
+ DB2K_ACQ_CONTROL_RESET_CONFIG_PIPE,
+ dev->mmio + DB2K_REG_ACQ_CONTROL);
/*
* If pacer clock is not set to some high value (> 10 us), we
* risk multiple samples to be put into the result FIFO.
*/
/* 1 second, should be long enough */
- writel(1000000, dev->mmio + acqPacerClockDivLow);
- writew(0, dev->mmio + acqPacerClockDivHigh);
+ writel(1000000, dev->mmio + DB2K_REG_ACQ_PACER_CLOCK_DIV_LOW);
+ writew(0, dev->mmio + DB2K_REG_ACQ_PACER_CLOCK_DIV_HIGH);
gain = CR_RANGE(insn->chanspec);
chan = CR_CHAN(insn->chanspec);
- /* This doesn't look efficient. I decided to take the conservative
+ /*
+ * This doesn't look efficient. I decided to take the conservative
* approach when I did the insn conversion. Perhaps it would be
* better to have broken it completely, then someone would have been
- * forced to fix it. --ds */
+ * forced to fix it. --ds
+ */
for (i = 0; i < insn->n; i++) {
- setup_sampling(dev, chan, gain);
+ daqboard2000_setup_sampling(dev, chan, gain);
/* Enable reading from the scanlist FIFO */
- writew(DAQBOARD2000_SeqStartScanList, dev->mmio + acqControl);
+ writew(DB2K_ACQ_CONTROL_SEQ_START_SCAN_LIST,
+ dev->mmio + DB2K_REG_ACQ_CONTROL);
ret = comedi_timeout(dev, s, insn, daqboard2000_ai_status,
- DAQBOARD2000_AcqConfigPipeFull);
+ DB2K_ACQ_STATUS_CONFIG_PIPE_FULL);
if (ret)
return ret;
- writew(DAQBOARD2000_AdcPacerEnable, dev->mmio + acqControl);
+ writew(DB2K_ACQ_CONTROL_ADC_PACER_ENABLE,
+ dev->mmio + DB2K_REG_ACQ_CONTROL);
ret = comedi_timeout(dev, s, insn, daqboard2000_ai_status,
- DAQBOARD2000_AcqLogicScanning);
+ DB2K_ACQ_STATUS_LOGIC_SCANNING);
if (ret)
return ret;
- ret = comedi_timeout(dev, s, insn, daqboard2000_ai_status,
- DAQBOARD2000_AcqResultsFIFOHasValidData);
+ ret =
+ comedi_timeout(dev, s, insn, daqboard2000_ai_status,
+ DB2K_ACQ_STATUS_RESULTS_FIFO_HAS_DATA);
if (ret)
return ret;
- data[i] = readw(dev->mmio + acqResultsFIFO);
- writew(DAQBOARD2000_AdcPacerDisable, dev->mmio + acqControl);
- writew(DAQBOARD2000_SeqStopScanList, dev->mmio + acqControl);
+ data[i] = readw(dev->mmio + DB2K_REG_ACQ_RESULTS_FIFO);
+ writew(DB2K_ACQ_CONTROL_ADC_PACER_DISABLE,
+ dev->mmio + DB2K_REG_ACQ_CONTROL);
+ writew(DB2K_ACQ_CONTROL_SEQ_STOP_SCAN_LIST,
+ dev->mmio + DB2K_REG_ACQ_CONTROL);
}
return i;
@@ -407,8 +401,8 @@ static int daqboard2000_ao_eoc(struct comedi_device *dev,
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int status;
- status = readw(dev->mmio + dacControl);
- if ((status & ((chan + 1) * 0x0010)) == 0)
+ status = readw(dev->mmio + DB2K_REG_DAC_STATUS);
+ if ((status & DB2K_DAC_STATUS_DAC_BUSY(chan)) == 0)
return 0;
return -EBUSY;
}
@@ -425,7 +419,7 @@ static int daqboard2000_ao_insn_write(struct comedi_device *dev,
unsigned int val = data[i];
int ret;
- writew(val, dev->mmio + dacSetting(chan));
+ writew(val, dev->mmio + DB2K_REG_DAC_SETTING(chan));
ret = comedi_timeout(dev, s, insn, daqboard2000_ao_eoc, 0);
if (ret)
@@ -437,39 +431,39 @@ static int daqboard2000_ao_insn_write(struct comedi_device *dev,
return insn->n;
}
-static void daqboard2000_resetLocalBus(struct comedi_device *dev)
+static void daqboard2000_reset_local_bus(struct comedi_device *dev)
{
struct daqboard2000_private *devpriv = dev->private;
- writel(DAQBOARD2000_SECRLocalBusHi, devpriv->plx + 0x6c);
+ writel(DB2K_SECR_LOCAL_BUS_HI, devpriv->plx + 0x6c);
mdelay(10);
- writel(DAQBOARD2000_SECRLocalBusLo, devpriv->plx + 0x6c);
+ writel(DB2K_SECR_LOCAL_BUS_LO, devpriv->plx + 0x6c);
mdelay(10);
}
-static void daqboard2000_reloadPLX(struct comedi_device *dev)
+static void daqboard2000_reload_plx(struct comedi_device *dev)
{
struct daqboard2000_private *devpriv = dev->private;
- writel(DAQBOARD2000_SECRReloadLo, devpriv->plx + 0x6c);
+ writel(DB2K_SECR_RELOAD_LO, devpriv->plx + 0x6c);
mdelay(10);
- writel(DAQBOARD2000_SECRReloadHi, devpriv->plx + 0x6c);
+ writel(DB2K_SECR_RELOAD_HI, devpriv->plx + 0x6c);
mdelay(10);
- writel(DAQBOARD2000_SECRReloadLo, devpriv->plx + 0x6c);
+ writel(DB2K_SECR_RELOAD_LO, devpriv->plx + 0x6c);
mdelay(10);
}
-static void daqboard2000_pulseProgPin(struct comedi_device *dev)
+static void daqboard2000_pulse_prog_pin(struct comedi_device *dev)
{
struct daqboard2000_private *devpriv = dev->private;
- writel(DAQBOARD2000_SECRProgPinHi, devpriv->plx + 0x6c);
+ writel(DB2K_SECR_PROG_PIN_HI, devpriv->plx + 0x6c);
mdelay(10);
- writel(DAQBOARD2000_SECRProgPinLo, devpriv->plx + 0x6c);
+ writel(DB2K_SECR_PROG_PIN_LO, devpriv->plx + 0x6c);
mdelay(10); /* Not in the original code, but I like symmetry... */
}
-static int daqboard2000_pollCPLD(struct comedi_device *dev, int mask)
+static int daqboard2000_poll_cpld(struct comedi_device *dev, int mask)
{
int result = 0;
int i;
@@ -482,17 +476,17 @@ static int daqboard2000_pollCPLD(struct comedi_device *dev, int mask)
result = 1;
break;
}
- udelay(100);
+ usleep_range(100, 1000);
}
udelay(5);
return result;
}
-static int daqboard2000_writeCPLD(struct comedi_device *dev, int data)
+static int daqboard2000_write_cpld(struct comedi_device *dev, int data)
{
int result = 0;
- udelay(10);
+ usleep_range(10, 20);
writew(data, dev->mmio + 0x1000);
if ((readw(dev->mmio + 0x1000) & DAQBOARD2000_CPLD_INIT) ==
DAQBOARD2000_CPLD_INIT) {
@@ -501,9 +495,9 @@ static int daqboard2000_writeCPLD(struct comedi_device *dev, int data)
return result;
}
-static int initialize_daqboard2000(struct comedi_device *dev,
- const u8 *cpld_array, size_t len,
- unsigned long context)
+static int daqboard2000_load_firmware(struct comedi_device *dev,
+ const u8 *cpld_array, size_t len,
+ unsigned long context)
{
struct daqboard2000_private *devpriv = dev->private;
int result = -EIO;
@@ -518,10 +512,10 @@ static int initialize_daqboard2000(struct comedi_device *dev,
return -EIO;
for (retry = 0; retry < 3; retry++) {
- daqboard2000_resetLocalBus(dev);
- daqboard2000_reloadPLX(dev);
- daqboard2000_pulseProgPin(dev);
- if (daqboard2000_pollCPLD(dev, DAQBOARD2000_CPLD_INIT)) {
+ daqboard2000_reset_local_bus(dev);
+ daqboard2000_reload_plx(dev);
+ daqboard2000_pulse_prog_pin(dev);
+ if (daqboard2000_poll_cpld(dev, DAQBOARD2000_CPLD_INIT)) {
for (i = 0; i < len; i++) {
if (cpld_array[i] == 0xff &&
cpld_array[i + 1] == 0x20)
@@ -530,12 +524,12 @@ static int initialize_daqboard2000(struct comedi_device *dev,
for (; i < len; i += 2) {
int data =
(cpld_array[i] << 8) + cpld_array[i + 1];
- if (!daqboard2000_writeCPLD(dev, data))
+ if (!daqboard2000_write_cpld(dev, data))
break;
}
if (i >= len) {
- daqboard2000_resetLocalBus(dev);
- daqboard2000_reloadPLX(dev);
+ daqboard2000_reset_local_bus(dev);
+ daqboard2000_reload_plx(dev);
result = 0;
break;
}
@@ -544,79 +538,83 @@ static int initialize_daqboard2000(struct comedi_device *dev,
return result;
}
-static void daqboard2000_adcStopDmaTransfer(struct comedi_device *dev)
+static void daqboard2000_adc_stop_dma_transfer(struct comedi_device *dev)
{
}
-static void daqboard2000_adcDisarm(struct comedi_device *dev)
+static void daqboard2000_adc_disarm(struct comedi_device *dev)
{
/* Disable hardware triggers */
udelay(2);
- writew(DAQBOARD2000_TrigAnalog | DAQBOARD2000_TrigDisable,
- dev->mmio + trigControl);
+ writew(DB2K_TRIG_CONTROL_TYPE_ANALOG | DB2K_TRIG_CONTROL_DISABLE,
+ dev->mmio + DB2K_REG_TRIG_CONTROL);
udelay(2);
- writew(DAQBOARD2000_TrigTTL | DAQBOARD2000_TrigDisable,
- dev->mmio + trigControl);
+ writew(DB2K_TRIG_CONTROL_TYPE_TTL | DB2K_TRIG_CONTROL_DISABLE,
+ dev->mmio + DB2K_REG_TRIG_CONTROL);
/* Stop the scan list FIFO from loading the configuration pipe */
udelay(2);
- writew(DAQBOARD2000_SeqStopScanList, dev->mmio + acqControl);
+ writew(DB2K_ACQ_CONTROL_SEQ_STOP_SCAN_LIST,
+ dev->mmio + DB2K_REG_ACQ_CONTROL);
/* Stop the pacer clock */
udelay(2);
- writew(DAQBOARD2000_AdcPacerDisable, dev->mmio + acqControl);
+ writew(DB2K_ACQ_CONTROL_ADC_PACER_DISABLE,
+ dev->mmio + DB2K_REG_ACQ_CONTROL);
/* Stop the input dma (abort channel 1) */
- daqboard2000_adcStopDmaTransfer(dev);
+ daqboard2000_adc_stop_dma_transfer(dev);
}
-static void daqboard2000_activateReferenceDacs(struct comedi_device *dev)
+static void daqboard2000_activate_reference_dacs(struct comedi_device *dev)
{
unsigned int val;
int timeout;
/* Set the + reference dac value in the FPGA */
- writew(0x80 | DAQBOARD2000_PosRefDacSelect, dev->mmio + refDacs);
+ writew(DB2K_REF_DACS_SET | DB2K_REF_DACS_SELECT_POS_REF,
+ dev->mmio + DB2K_REG_REF_DACS);
for (timeout = 0; timeout < 20; timeout++) {
- val = readw(dev->mmio + dacControl);
- if ((val & DAQBOARD2000_RefBusy) == 0)
+ val = readw(dev->mmio + DB2K_REG_DAC_STATUS);
+ if ((val & DB2K_DAC_STATUS_REF_BUSY) == 0)
break;
udelay(2);
}
/* Set the - reference dac value in the FPGA */
- writew(0x80 | DAQBOARD2000_NegRefDacSelect, dev->mmio + refDacs);
+ writew(DB2K_REF_DACS_SET | DB2K_REF_DACS_SELECT_NEG_REF,
+ dev->mmio + DB2K_REG_REF_DACS);
for (timeout = 0; timeout < 20; timeout++) {
- val = readw(dev->mmio + dacControl);
- if ((val & DAQBOARD2000_RefBusy) == 0)
+ val = readw(dev->mmio + DB2K_REG_DAC_STATUS);
+ if ((val & DB2K_DAC_STATUS_REF_BUSY) == 0)
break;
udelay(2);
}
}
-static void daqboard2000_initializeCtrs(struct comedi_device *dev)
+static void daqboard2000_initialize_ctrs(struct comedi_device *dev)
{
}
-static void daqboard2000_initializeTmrs(struct comedi_device *dev)
+static void daqboard2000_initialize_tmrs(struct comedi_device *dev)
{
}
-static void daqboard2000_dacDisarm(struct comedi_device *dev)
+static void daqboard2000_dac_disarm(struct comedi_device *dev)
{
}
-static void daqboard2000_initializeAdc(struct comedi_device *dev)
+static void daqboard2000_initialize_adc(struct comedi_device *dev)
{
- daqboard2000_adcDisarm(dev);
- daqboard2000_activateReferenceDacs(dev);
- daqboard2000_initializeCtrs(dev);
- daqboard2000_initializeTmrs(dev);
+ daqboard2000_adc_disarm(dev);
+ daqboard2000_activate_reference_dacs(dev);
+ daqboard2000_initialize_ctrs(dev);
+ daqboard2000_initialize_tmrs(dev);
}
-static void daqboard2000_initializeDac(struct comedi_device *dev)
+static void daqboard2000_initialize_dac(struct comedi_device *dev)
{
- daqboard2000_dacDisarm(dev);
+ daqboard2000_dac_disarm(dev);
}
static int daqboard2000_8255_cb(struct comedi_device *dev,
@@ -683,12 +681,12 @@ static int daqboard2000_auto_attach(struct comedi_device *dev,
result = comedi_load_firmware(dev, &comedi_to_pci_dev(dev)->dev,
DAQBOARD2000_FIRMWARE,
- initialize_daqboard2000, 0);
+ daqboard2000_load_firmware, 0);
if (result < 0)
return result;
- daqboard2000_initializeAdc(dev);
- daqboard2000_initializeDac(dev);
+ daqboard2000_initialize_adc(dev);
+ daqboard2000_initialize_dac(dev);
s = &dev->subdevices[0];
/* ai subdevice */
@@ -714,7 +712,7 @@ static int daqboard2000_auto_attach(struct comedi_device *dev,
s = &dev->subdevices[2];
return subdev_8255_init(dev, s, daqboard2000_8255_cb,
- dioP2ExpansionIO8Bit);
+ DB2K_REG_DIO_P2_EXP_IO_8_BIT);
}
static void daqboard2000_detach(struct comedi_device *dev)
diff --git a/drivers/staging/comedi/drivers/das16.c b/drivers/staging/comedi/drivers/das16.c
index fd8e0b76f..5d157951f 100644
--- a/drivers/staging/comedi/drivers/das16.c
+++ b/drivers/staging/comedi/drivers/das16.c
@@ -92,37 +92,37 @@
#define DAS16_AO_LSB_REG(x) ((x) ? 0x06 : 0x04)
#define DAS16_AO_MSB_REG(x) ((x) ? 0x07 : 0x05)
#define DAS16_STATUS_REG 0x08
-#define DAS16_STATUS_BUSY (1 << 7)
-#define DAS16_STATUS_UNIPOLAR (1 << 6)
-#define DAS16_STATUS_MUXBIT (1 << 5)
-#define DAS16_STATUS_INT (1 << 4)
+#define DAS16_STATUS_BUSY BIT(7)
+#define DAS16_STATUS_UNIPOLAR BIT(6)
+#define DAS16_STATUS_MUXBIT BIT(5)
+#define DAS16_STATUS_INT BIT(4)
#define DAS16_CTRL_REG 0x09
-#define DAS16_CTRL_INTE (1 << 7)
+#define DAS16_CTRL_INTE BIT(7)
#define DAS16_CTRL_IRQ(x) (((x) & 0x7) << 4)
-#define DAS16_CTRL_DMAE (1 << 2)
+#define DAS16_CTRL_DMAE BIT(2)
#define DAS16_CTRL_PACING_MASK (3 << 0)
#define DAS16_CTRL_INT_PACER (3 << 0)
#define DAS16_CTRL_EXT_PACER (2 << 0)
#define DAS16_CTRL_SOFT_PACER (0 << 0)
#define DAS16_PACER_REG 0x0a
#define DAS16_PACER_BURST_LEN(x) (((x) & 0xf) << 4)
-#define DAS16_PACER_CTR0 (1 << 1)
-#define DAS16_PACER_TRIG0 (1 << 0)
+#define DAS16_PACER_CTR0 BIT(1)
+#define DAS16_PACER_TRIG0 BIT(0)
#define DAS16_GAIN_REG 0x0b
#define DAS16_TIMER_BASE_REG 0x0c /* to 0x0f */
#define DAS1600_CONV_REG 0x404
-#define DAS1600_CONV_DISABLE (1 << 6)
+#define DAS1600_CONV_DISABLE BIT(6)
#define DAS1600_BURST_REG 0x405
-#define DAS1600_BURST_VAL (1 << 6)
+#define DAS1600_BURST_VAL BIT(6)
#define DAS1600_ENABLE_REG 0x406
-#define DAS1600_ENABLE_VAL (1 << 6)
+#define DAS1600_ENABLE_VAL BIT(6)
#define DAS1600_STATUS_REG 0x407
-#define DAS1600_STATUS_BME (1 << 6)
-#define DAS1600_STATUS_ME (1 << 5)
-#define DAS1600_STATUS_CD (1 << 4)
-#define DAS1600_STATUS_WS (1 << 1)
-#define DAS1600_STATUS_CLK_10MHZ (1 << 0)
+#define DAS1600_STATUS_BME BIT(6)
+#define DAS1600_STATUS_ME BIT(5)
+#define DAS1600_STATUS_CD BIT(4)
+#define DAS1600_STATUS_WS BIT(1)
+#define DAS1600_STATUS_CLK_10MHZ BIT(0)
static const struct comedi_lrange range_das1x01_bip = {
4, {
@@ -198,6 +198,7 @@ enum {
das16_pg_1601,
das16_pg_1602,
};
+
static const int *const das16_gainlists[] = {
NULL,
das16jr_gainlist,
@@ -428,8 +429,10 @@ static const struct das16_board das16_boards[] = {
},
};
-/* Period for timer interrupt in jiffies. It's a function
- * to deal with possibility of dynamic HZ patches */
+/*
+ * Period for timer interrupt in jiffies. It's a function
+ * to deal with possibility of dynamic HZ patches
+ */
static inline int timer_period(void)
{
return HZ / 20;
diff --git a/drivers/staging/comedi/drivers/das16m1.c b/drivers/staging/comedi/drivers/das16m1.c
index 3a37373fb..bb8d6ec06 100644
--- a/drivers/staging/comedi/drivers/das16m1.c
+++ b/drivers/staging/comedi/drivers/das16m1.c
@@ -1,56 +1,52 @@
/*
- comedi/drivers/das16m1.c
- CIO-DAS16/M1 driver
- Author: Frank Mori Hess, based on code from the das16
- driver.
- Copyright (C) 2001 Frank Mori Hess <fmhess@users.sourceforge.net>
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * Comedi driver for CIO-DAS16/M1
+ * Author: Frank Mori Hess, based on code from the das16 driver.
+ * Copyright (C) 2001 Frank Mori Hess <fmhess@users.sourceforge.net>
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
/*
-Driver: das16m1
-Description: CIO-DAS16/M1
-Author: Frank Mori Hess <fmhess@users.sourceforge.net>
-Devices: [Measurement Computing] CIO-DAS16/M1 (das16m1)
-Status: works
-
-This driver supports a single board - the CIO-DAS16/M1.
-As far as I know, there are no other boards that have
-the same register layout. Even the CIO-DAS16/M1/16 is
-significantly different.
-
-I was _barely_ able to reach the full 1 MHz capability
-of this board, using a hard real-time interrupt
-(set the TRIG_RT flag in your struct comedi_cmd and use
-rtlinux or RTAI). The board can't do dma, so the bottleneck is
-pulling the data across the ISA bus. I timed the interrupt
-handler, and it took my computer ~470 microseconds to pull 512
-samples from the board. So at 1 Mhz sampling rate,
-expect your CPU to be spending almost all of its
-time in the interrupt handler.
-
-This board has some unusual restrictions for its channel/gain list. If the
-list has 2 or more channels in it, then two conditions must be satisfied:
-(1) - even/odd channels must appear at even/odd indices in the list
-(2) - the list must have an even number of entries.
-
-Options:
- [0] - base io address
- [1] - irq (optional, but you probably want it)
-
-irq can be omitted, although the cmd interface will not work without it.
-*/
+ * Driver: das16m1
+ * Description: CIO-DAS16/M1
+ * Author: Frank Mori Hess <fmhess@users.sourceforge.net>
+ * Devices: [Measurement Computing] CIO-DAS16/M1 (das16m1)
+ * Status: works
+ *
+ * This driver supports a single board - the CIO-DAS16/M1. As far as I know,
+ * there are no other boards that have the same register layout. Even the
+ * CIO-DAS16/M1/16 is significantly different.
+ *
+ * I was _barely_ able to reach the full 1 MHz capability of this board, using
+ * a hard real-time interrupt (set the TRIG_RT flag in your struct comedi_cmd
+ * and use rtlinux or RTAI). The board can't do dma, so the bottleneck is
+ * pulling the data across the ISA bus. I timed the interrupt handler, and it
+ * took my computer ~470 microseconds to pull 512 samples from the board. So
+ * at 1 Mhz sampling rate, expect your CPU to be spending almost all of its
+ * time in the interrupt handler.
+ *
+ * This board has some unusual restrictions for its channel/gain list. If the
+ * list has 2 or more channels in it, then two conditions must be satisfied:
+ * (1) - even/odd channels must appear at even/odd indices in the list
+ * (2) - the list must have an even number of entries.
+ *
+ * Configuration options:
+ * [0] - base io address
+ * [1] - irq (optional, but you probably want it)
+ *
+ * irq can be omitted, although the cmd interface will not work without it.
+ */
#include <linux/module.h>
#include <linux/slab.h>
@@ -60,52 +56,38 @@ irq can be omitted, although the cmd interface will not work without it.
#include "8255.h"
#include "comedi_8254.h"
-#define DAS16M1_SIZE2 8
-
-#define FIFO_SIZE 1024 /* 1024 sample fifo */
-
/*
- CIO-DAS16_M1.pdf
-
- "cio-das16/m1"
-
- 0 a/d bits 0-3, mux start 12 bit
- 1 a/d bits 4-11 unused
- 2 status control
- 3 di 4 bit do 4 bit
- 4 unused clear interrupt
- 5 interrupt, pacer
- 6 channel/gain queue address
- 7 channel/gain queue data
- 89ab 8254
- cdef 8254
- 400 8255
- 404-407 8254
-
-*/
-
-#define DAS16M1_AI 0 /* 16-bit wide register */
-#define AI_CHAN(x) ((x) & 0xf)
-#define DAS16M1_CS 2
-#define EXT_TRIG_BIT 0x1
-#define OVRUN 0x20
-#define IRQDATA 0x80
-#define DAS16M1_DIO 3
-#define DAS16M1_CLEAR_INTR 4
-#define DAS16M1_INTR_CONTROL 5
-#define EXT_PACER 0x2
-#define INT_PACER 0x3
-#define PACER_MASK 0x3
-#define INTE 0x80
-#define DAS16M1_QUEUE_ADDR 6
-#define DAS16M1_QUEUE_DATA 7
-#define Q_CHAN(x) ((x) & 0x7)
-#define Q_RANGE(x) (((x) & 0xf) << 4)
-#define UNIPOLAR 0x40
-#define DAS16M1_8254_FIRST 0x8
-#define DAS16M1_8254_SECOND 0xc
-#define DAS16M1_82C55 0x400
-#define DAS16M1_8254_THIRD 0x404
+ * Register map (dev->iobase)
+ */
+#define DAS16M1_AI_REG 0x00 /* 16-bit register */
+#define DAS16M1_AI_TO_CHAN(x) (((x) >> 0) & 0xf)
+#define DAS16M1_AI_TO_SAMPLE(x) (((x) >> 4) & 0xfff)
+#define DAS16M1_CS_REG 0x02
+#define DAS16M1_CS_EXT_TRIG BIT(0)
+#define DAS16M1_CS_OVRUN BIT(5)
+#define DAS16M1_CS_IRQDATA BIT(7)
+#define DAS16M1_DI_REG 0x03
+#define DAS16M1_DO_REG 0x03
+#define DAS16M1_CLR_INTR_REG 0x04
+#define DAS16M1_INTR_CTRL_REG 0x05
+#define DAS16M1_INTR_CTRL_PACER(x) (((x) & 0x3) << 0)
+#define DAS16M1_INTR_CTRL_PACER_EXT DAS16M1_INTR_CTRL_PACER(2)
+#define DAS16M1_INTR_CTRL_PACER_INT DAS16M1_INTR_CTRL_PACER(3)
+#define DAS16M1_INTR_CTRL_PACER_MASK DAS16M1_INTR_CTRL_PACER(3)
+#define DAS16M1_INTR_CTRL_IRQ(x) (((x) & 0x7) << 4)
+#define DAS16M1_INTR_CTRL_INTE BIT(7)
+#define DAS16M1_Q_ADDR_REG 0x06
+#define DAS16M1_Q_REG 0x07
+#define DAS16M1_Q_CHAN(x) (((x) & 0x7) << 0)
+#define DAS16M1_Q_RANGE(x) (((x) & 0xf) << 4)
+#define DAS16M1_8254_IOBASE1 0x08
+#define DAS16M1_8254_IOBASE2 0x0c
+#define DAS16M1_8255_IOBASE 0x400
+#define DAS16M1_8254_IOBASE3 0x404
+
+#define DAS16M1_SIZE2 0x08
+
+#define DAS16M1_AI_FIFO_SZ 1024 /* # samples */
static const struct comedi_lrange range_das16m1 = {
9, {
@@ -121,29 +103,46 @@ static const struct comedi_lrange range_das16m1 = {
}
};
-struct das16m1_private_struct {
+struct das16m1_private {
struct comedi_8254 *counter;
- unsigned int control_state;
- unsigned int adc_count; /* number of samples completed */
- /* initial value in lower half of hardware conversion counter,
- * needed to keep track of whether new count has been loaded into
- * counter yet (loaded by first sample conversion) */
+ unsigned int intr_ctrl;
+ unsigned int adc_count;
u16 initial_hw_count;
- unsigned short ai_buffer[FIFO_SIZE];
+ unsigned short ai_buffer[DAS16M1_AI_FIFO_SZ];
unsigned long extra_iobase;
};
-static inline unsigned short munge_sample(unsigned short data)
+static void das16m1_ai_set_queue(struct comedi_device *dev,
+ unsigned int *chanspec, unsigned int len)
{
- return (data >> 4) & 0xfff;
+ unsigned int i;
+
+ for (i = 0; i < len; i++) {
+ unsigned int chan = CR_CHAN(chanspec[i]);
+ unsigned int range = CR_RANGE(chanspec[i]);
+
+ outb(i, dev->iobase + DAS16M1_Q_ADDR_REG);
+ outb(DAS16M1_Q_CHAN(chan) | DAS16M1_Q_RANGE(range),
+ dev->iobase + DAS16M1_Q_REG);
+ }
}
-static void munge_sample_array(unsigned short *array, unsigned int num_elements)
+static void das16m1_ai_munge(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ void *data, unsigned int num_bytes,
+ unsigned int start_chan_index)
{
+ unsigned short *array = data;
+ unsigned int nsamples = comedi_bytes_to_samples(s, num_bytes);
unsigned int i;
- for (i = 0; i < num_elements; i++)
- array[i] = munge_sample(array[i]);
+ /*
+ * The fifo values have the channel number in the lower 4-bits and
+ * the sample in the upper 12-bits. This just shifts the values
+ * to remove the channel numbers.
+ */
+ for (i = 0; i < nsamples; i++)
+ array[i] = DAS16M1_AI_TO_SAMPLE(array[i]);
}
static int das16m1_ai_check_chanlist(struct comedi_device *dev,
@@ -174,8 +173,9 @@ static int das16m1_ai_check_chanlist(struct comedi_device *dev,
return 0;
}
-static int das16m1_cmd_test(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_cmd *cmd)
+static int das16m1_ai_cmdtest(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_cmd *cmd)
{
int err = 0;
@@ -245,17 +245,13 @@ static int das16m1_cmd_test(struct comedi_device *dev,
return 0;
}
-static int das16m1_cmd_exec(struct comedi_device *dev,
- struct comedi_subdevice *s)
+static int das16m1_ai_cmd(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
- struct das16m1_private_struct *devpriv = dev->private;
+ struct das16m1_private *devpriv = dev->private;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
- unsigned int byte, i;
-
- /* disable interrupts and internal pacer */
- devpriv->control_state &= ~INTE & ~PACER_MASK;
- outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL);
+ unsigned int byte;
/* set software count */
devpriv->adc_count = 0;
@@ -274,48 +270,47 @@ static int das16m1_cmd_exec(struct comedi_device *dev,
*/
devpriv->initial_hw_count = comedi_8254_read(devpriv->counter, 1);
- /* setup channel/gain queue */
- for (i = 0; i < cmd->chanlist_len; i++) {
- outb(i, dev->iobase + DAS16M1_QUEUE_ADDR);
- byte =
- Q_CHAN(CR_CHAN(cmd->chanlist[i])) |
- Q_RANGE(CR_RANGE(cmd->chanlist[i]));
- outb(byte, dev->iobase + DAS16M1_QUEUE_DATA);
- }
+ das16m1_ai_set_queue(dev, cmd->chanlist, cmd->chanlist_len);
/* enable interrupts and set internal pacer counter mode and counts */
- devpriv->control_state &= ~PACER_MASK;
+ devpriv->intr_ctrl &= ~DAS16M1_INTR_CTRL_PACER_MASK;
if (cmd->convert_src == TRIG_TIMER) {
comedi_8254_update_divisors(dev->pacer);
comedi_8254_pacer_enable(dev->pacer, 1, 2, true);
- devpriv->control_state |= INT_PACER;
+ devpriv->intr_ctrl |= DAS16M1_INTR_CTRL_PACER_INT;
} else { /* TRIG_EXT */
- devpriv->control_state |= EXT_PACER;
+ devpriv->intr_ctrl |= DAS16M1_INTR_CTRL_PACER_EXT;
}
/* set control & status register */
byte = 0;
- /* if we are using external start trigger (also board dislikes having
- * both start and conversion triggers external simultaneously) */
+ /*
+ * If we are using external start trigger (also board dislikes having
+ * both start and conversion triggers external simultaneously).
+ */
if (cmd->start_src == TRIG_EXT && cmd->convert_src != TRIG_EXT)
- byte |= EXT_TRIG_BIT;
+ byte |= DAS16M1_CS_EXT_TRIG;
- outb(byte, dev->iobase + DAS16M1_CS);
- /* clear interrupt bit */
- outb(0, dev->iobase + DAS16M1_CLEAR_INTR);
+ outb(byte, dev->iobase + DAS16M1_CS_REG);
+
+ /* clear interrupt */
+ outb(0, dev->iobase + DAS16M1_CLR_INTR_REG);
- devpriv->control_state |= INTE;
- outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL);
+ devpriv->intr_ctrl |= DAS16M1_INTR_CTRL_INTE;
+ outb(devpriv->intr_ctrl, dev->iobase + DAS16M1_INTR_CTRL_REG);
return 0;
}
-static int das16m1_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
+static int das16m1_ai_cancel(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
- struct das16m1_private_struct *devpriv = dev->private;
+ struct das16m1_private *devpriv = dev->private;
- devpriv->control_state &= ~INTE & ~PACER_MASK;
- outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL);
+ /* disable interrupts and pacer */
+ devpriv->intr_ctrl &= ~(DAS16M1_INTR_CTRL_INTE |
+ DAS16M1_INTR_CTRL_PACER_MASK);
+ outb(devpriv->intr_ctrl, dev->iobase + DAS16M1_INTR_CTRL_REG);
return 0;
}
@@ -327,67 +322,58 @@ static int das16m1_ai_eoc(struct comedi_device *dev,
{
unsigned int status;
- status = inb(dev->iobase + DAS16M1_CS);
- if (status & IRQDATA)
+ status = inb(dev->iobase + DAS16M1_CS_REG);
+ if (status & DAS16M1_CS_IRQDATA)
return 0;
return -EBUSY;
}
-static int das16m1_ai_rinsn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int das16m1_ai_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct das16m1_private_struct *devpriv = dev->private;
int ret;
- int n;
- int byte;
-
- /* disable interrupts and internal pacer */
- devpriv->control_state &= ~INTE & ~PACER_MASK;
- outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL);
-
- /* setup channel/gain queue */
- outb(0, dev->iobase + DAS16M1_QUEUE_ADDR);
- byte =
- Q_CHAN(CR_CHAN(insn->chanspec)) | Q_RANGE(CR_RANGE(insn->chanspec));
- outb(byte, dev->iobase + DAS16M1_QUEUE_DATA);
-
- for (n = 0; n < insn->n; n++) {
- /* clear IRQDATA bit */
- outb(0, dev->iobase + DAS16M1_CLEAR_INTR);
+ int i;
+
+ das16m1_ai_set_queue(dev, &insn->chanspec, 1);
+
+ for (i = 0; i < insn->n; i++) {
+ unsigned short val;
+
+ /* clear interrupt */
+ outb(0, dev->iobase + DAS16M1_CLR_INTR_REG);
/* trigger conversion */
- outb(0, dev->iobase);
+ outb(0, dev->iobase + DAS16M1_AI_REG);
ret = comedi_timeout(dev, s, insn, das16m1_ai_eoc, 0);
if (ret)
return ret;
- data[n] = munge_sample(inw(dev->iobase));
+ val = inw(dev->iobase + DAS16M1_AI_REG);
+ data[i] = DAS16M1_AI_TO_SAMPLE(val);
}
- return n;
+ return insn->n;
}
-static int das16m1_di_rbits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int das16m1_di_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- unsigned int bits;
-
- bits = inb(dev->iobase + DAS16M1_DIO) & 0xf;
- data[1] = bits;
- data[0] = 0;
+ data[1] = inb(dev->iobase + DAS16M1_DI_REG) & 0xf;
return insn->n;
}
-static int das16m1_do_wbits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static int das16m1_do_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
if (comedi_dio_update_state(s, data))
- outb(s->state, dev->iobase + DAS16M1_DIO);
+ outb(s->state, dev->iobase + DAS16M1_DO_REG);
data[1] = s->state;
@@ -396,33 +382,33 @@ static int das16m1_do_wbits(struct comedi_device *dev,
static void das16m1_handler(struct comedi_device *dev, unsigned int status)
{
- struct das16m1_private_struct *devpriv = dev->private;
- struct comedi_subdevice *s;
- struct comedi_async *async;
- struct comedi_cmd *cmd;
+ struct das16m1_private *devpriv = dev->private;
+ struct comedi_subdevice *s = dev->read_subdev;
+ struct comedi_async *async = s->async;
+ struct comedi_cmd *cmd = &async->cmd;
u16 num_samples;
u16 hw_counter;
- s = dev->read_subdev;
- async = s->async;
- cmd = &async->cmd;
-
/* figure out how many samples are in fifo */
hw_counter = comedi_8254_read(devpriv->counter, 1);
- /* make sure hardware counter reading is not bogus due to initial value
- * not having been loaded yet */
+ /*
+ * Make sure hardware counter reading is not bogus due to initial
+ * value not having been loaded yet.
+ */
if (devpriv->adc_count == 0 &&
hw_counter == devpriv->initial_hw_count) {
num_samples = 0;
} else {
- /* The calculation of num_samples looks odd, but it uses the
+ /*
+ * The calculation of num_samples looks odd, but it uses the
* following facts. 16 bit hardware counter is initialized with
* value of zero (which really means 0x1000). The counter
* decrements by one on each conversion (when the counter
* decrements from zero it goes to 0xffff). num_samples is a
* 16 bit variable, so it will roll over in a similar fashion
* to the hardware counter. Work it out, and this is what you
- * get. */
+ * get.
+ */
num_samples = -hw_counter - devpriv->adc_count;
}
/* check if we only need some of the points */
@@ -431,10 +417,9 @@ static void das16m1_handler(struct comedi_device *dev, unsigned int status)
num_samples = cmd->stop_arg * cmd->chanlist_len;
}
/* make sure we dont try to get too many points if fifo has overrun */
- if (num_samples > FIFO_SIZE)
- num_samples = FIFO_SIZE;
+ if (num_samples > DAS16M1_AI_FIFO_SZ)
+ num_samples = DAS16M1_AI_FIFO_SZ;
insw(dev->iobase, devpriv->ai_buffer, num_samples);
- munge_sample_array(devpriv->ai_buffer, num_samples);
comedi_buf_write_samples(s, devpriv->ai_buffer, num_samples);
devpriv->adc_count += num_samples;
@@ -445,9 +430,11 @@ static void das16m1_handler(struct comedi_device *dev, unsigned int status)
}
}
- /* this probably won't catch overruns since the card doesn't generate
- * overrun interrupts, but we might as well try */
- if (status & OVRUN) {
+ /*
+ * This probably won't catch overruns since the card doesn't generate
+ * overrun interrupts, but we might as well try.
+ */
+ if (status & DAS16M1_CS_OVRUN) {
async->events |= COMEDI_CB_ERROR;
dev_err(dev->class_dev, "fifo overflow\n");
}
@@ -455,14 +442,15 @@ static void das16m1_handler(struct comedi_device *dev, unsigned int status)
comedi_handle_events(dev, s);
}
-static int das16m1_poll(struct comedi_device *dev, struct comedi_subdevice *s)
+static int das16m1_ai_poll(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
unsigned long flags;
unsigned int status;
/* prevent race with interrupt handler */
spin_lock_irqsave(&dev->spinlock, flags);
- status = inb(dev->iobase + DAS16M1_CS);
+ status = inb(dev->iobase + DAS16M1_CS_REG);
das16m1_handler(dev, status);
spin_unlock_irqrestore(&dev->spinlock, flags);
@@ -481,9 +469,9 @@ static irqreturn_t das16m1_interrupt(int irq, void *d)
/* prevent race with comedi_poll() */
spin_lock(&dev->spinlock);
- status = inb(dev->iobase + DAS16M1_CS);
+ status = inb(dev->iobase + DAS16M1_CS_REG);
- if ((status & (IRQDATA | OVRUN)) == 0) {
+ if ((status & (DAS16M1_CS_IRQDATA | DAS16M1_CS_OVRUN)) == 0) {
dev_err(dev->class_dev, "spurious interrupt\n");
spin_unlock(&dev->spinlock);
return IRQ_NONE;
@@ -492,7 +480,7 @@ static irqreturn_t das16m1_interrupt(int irq, void *d)
das16m1_handler(dev, status);
/* clear interrupt */
- outb(0, dev->iobase + DAS16M1_CLEAR_INTR);
+ outb(0, dev->iobase + DAS16M1_CLR_INTR_REG);
spin_unlock(&dev->spinlock);
return IRQ_HANDLED;
@@ -522,15 +510,10 @@ static int das16m1_irq_bits(unsigned int irq)
}
}
-/*
- * Options list:
- * 0 I/O base
- * 1 IRQ
- */
static int das16m1_attach(struct comedi_device *dev,
struct comedi_devconfig *it)
{
- struct das16m1_private_struct *devpriv;
+ struct das16m1_private *devpriv;
struct comedi_subdevice *s;
int ret;
@@ -541,12 +524,12 @@ static int das16m1_attach(struct comedi_device *dev,
ret = comedi_request_region(dev, it->options[0], 0x10);
if (ret)
return ret;
- /* Request an additional region for the 8255 */
- ret = __comedi_request_region(dev, dev->iobase + DAS16M1_82C55,
+ /* Request an additional region for the 8255 and 3rd 8254 */
+ ret = __comedi_request_region(dev, dev->iobase + DAS16M1_8255_IOBASE,
DAS16M1_SIZE2);
if (ret)
return ret;
- devpriv->extra_iobase = dev->iobase + DAS16M1_82C55;
+ devpriv->extra_iobase = dev->iobase + DAS16M1_8255_IOBASE;
/* only irqs 2, 3, 4, 5, 6, 7, 10, 11, 12, 14, and 15 are valid */
if ((1 << it->options[1]) & 0xdcfc) {
@@ -556,12 +539,12 @@ static int das16m1_attach(struct comedi_device *dev,
dev->irq = it->options[1];
}
- dev->pacer = comedi_8254_init(dev->iobase + DAS16M1_8254_SECOND,
+ dev->pacer = comedi_8254_init(dev->iobase + DAS16M1_8254_IOBASE2,
I8254_OSC_BASE_10MHZ, I8254_IO8, 0);
if (!dev->pacer)
return -ENOMEM;
- devpriv->counter = comedi_8254_init(dev->iobase + DAS16M1_8254_FIRST,
+ devpriv->counter = comedi_8254_init(dev->iobase + DAS16M1_8254_IOBASE1,
0, I8254_IO8, 0);
if (!devpriv->counter)
return -ENOMEM;
@@ -570,61 +553,62 @@ static int das16m1_attach(struct comedi_device *dev,
if (ret)
return ret;
+ /* Analog Input subdevice */
s = &dev->subdevices[0];
- /* ai */
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_DIFF;
- s->n_chan = 8;
- s->maxdata = (1 << 12) - 1;
- s->range_table = &range_das16m1;
- s->insn_read = das16m1_ai_rinsn;
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_READABLE | SDF_DIFF;
+ s->n_chan = 8;
+ s->maxdata = 0x0fff;
+ s->range_table = &range_das16m1;
+ s->insn_read = das16m1_ai_insn_read;
if (dev->irq) {
dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->len_chanlist = 256;
- s->do_cmdtest = das16m1_cmd_test;
- s->do_cmd = das16m1_cmd_exec;
- s->cancel = das16m1_cancel;
- s->poll = das16m1_poll;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = 256;
+ s->do_cmdtest = das16m1_ai_cmdtest;
+ s->do_cmd = das16m1_ai_cmd;
+ s->cancel = das16m1_ai_cancel;
+ s->poll = das16m1_ai_poll;
+ s->munge = das16m1_ai_munge;
}
+ /* Digital Input subdevice */
s = &dev->subdevices[1];
- /* di */
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 4;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = das16m1_di_rbits;
-
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 4;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = das16m1_di_insn_bits;
+
+ /* Digital Output subdevice */
s = &dev->subdevices[2];
- /* do */
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 4;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = das16m1_do_wbits;
-
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 4;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = das16m1_do_insn_bits;
+
+ /* Digital I/O subdevice (8255) */
s = &dev->subdevices[3];
- /* 8255 */
- ret = subdev_8255_init(dev, s, NULL, DAS16M1_82C55);
+ ret = subdev_8255_init(dev, s, NULL, DAS16M1_8255_IOBASE);
if (ret)
return ret;
/* initialize digital output lines */
- outb(0, dev->iobase + DAS16M1_DIO);
+ outb(0, dev->iobase + DAS16M1_DO_REG);
/* set the interrupt level */
- devpriv->control_state = das16m1_irq_bits(dev->irq) << 4;
- outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL);
+ devpriv->intr_ctrl = DAS16M1_INTR_CTRL_IRQ(das16m1_irq_bits(dev->irq));
+ outb(devpriv->intr_ctrl, dev->iobase + DAS16M1_INTR_CTRL_REG);
return 0;
}
static void das16m1_detach(struct comedi_device *dev)
{
- struct das16m1_private_struct *devpriv = dev->private;
+ struct das16m1_private *devpriv = dev->private;
if (devpriv) {
if (devpriv->extra_iobase)
@@ -643,5 +627,5 @@ static struct comedi_driver das16m1_driver = {
module_comedi_driver(das16m1_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_DESCRIPTION("Comedi driver for CIO-DAS16/M1 ISA cards");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/das6402.c b/drivers/staging/comedi/drivers/das6402.c
index 1701294b7..0fdf5e021 100644
--- a/drivers/staging/comedi/drivers/das6402.c
+++ b/drivers/staging/comedi/drivers/das6402.c
@@ -50,48 +50,50 @@
#define DAS6402_AO_LSB_REG(x) (0x04 + ((x) * 2))
#define DAS6402_AO_MSB_REG(x) (0x05 + ((x) * 2))
#define DAS6402_STATUS_REG 0x08
-#define DAS6402_STATUS_FFNE (1 << 0)
-#define DAS6402_STATUS_FHALF (1 << 1)
-#define DAS6402_STATUS_FFULL (1 << 2)
-#define DAS6402_STATUS_XINT (1 << 3)
-#define DAS6402_STATUS_INT (1 << 4)
-#define DAS6402_STATUS_XTRIG (1 << 5)
-#define DAS6402_STATUS_INDGT (1 << 6)
-#define DAS6402_STATUS_10MHZ (1 << 7)
-#define DAS6402_STATUS_W_CLRINT (1 << 0)
-#define DAS6402_STATUS_W_CLRXTR (1 << 1)
-#define DAS6402_STATUS_W_CLRXIN (1 << 2)
-#define DAS6402_STATUS_W_EXTEND (1 << 4)
-#define DAS6402_STATUS_W_ARMED (1 << 5)
-#define DAS6402_STATUS_W_POSTMODE (1 << 6)
-#define DAS6402_STATUS_W_10MHZ (1 << 7)
+#define DAS6402_STATUS_FFNE BIT(0)
+#define DAS6402_STATUS_FHALF BIT(1)
+#define DAS6402_STATUS_FFULL BIT(2)
+#define DAS6402_STATUS_XINT BIT(3)
+#define DAS6402_STATUS_INT BIT(4)
+#define DAS6402_STATUS_XTRIG BIT(5)
+#define DAS6402_STATUS_INDGT BIT(6)
+#define DAS6402_STATUS_10MHZ BIT(7)
+#define DAS6402_STATUS_W_CLRINT BIT(0)
+#define DAS6402_STATUS_W_CLRXTR BIT(1)
+#define DAS6402_STATUS_W_CLRXIN BIT(2)
+#define DAS6402_STATUS_W_EXTEND BIT(4)
+#define DAS6402_STATUS_W_ARMED BIT(5)
+#define DAS6402_STATUS_W_POSTMODE BIT(6)
+#define DAS6402_STATUS_W_10MHZ BIT(7)
#define DAS6402_CTRL_REG 0x09
-#define DAS6402_CTRL_SOFT_TRIG (0 << 0)
-#define DAS6402_CTRL_EXT_FALL_TRIG (1 << 0)
-#define DAS6402_CTRL_EXT_RISE_TRIG (2 << 0)
-#define DAS6402_CTRL_PACER_TRIG (3 << 0)
-#define DAS6402_CTRL_BURSTEN (1 << 2)
-#define DAS6402_CTRL_XINTE (1 << 3)
+#define DAS6402_CTRL_TRIG(x) ((x) << 0)
+#define DAS6402_CTRL_SOFT_TRIG DAS6402_CTRL_TRIG(0)
+#define DAS6402_CTRL_EXT_FALL_TRIG DAS6402_CTRL_TRIG(1)
+#define DAS6402_CTRL_EXT_RISE_TRIG DAS6402_CTRL_TRIG(2)
+#define DAS6402_CTRL_PACER_TRIG DAS6402_CTRL_TRIG(3)
+#define DAS6402_CTRL_BURSTEN BIT(2)
+#define DAS6402_CTRL_XINTE BIT(3)
#define DAS6402_CTRL_IRQ(x) ((x) << 4)
-#define DAS6402_CTRL_INTE (1 << 7)
+#define DAS6402_CTRL_INTE BIT(7)
#define DAS6402_TRIG_REG 0x0a
-#define DAS6402_TRIG_TGEN (1 << 0)
-#define DAS6402_TRIG_TGSEL (1 << 1)
-#define DAS6402_TRIG_TGPOL (1 << 2)
-#define DAS6402_TRIG_PRETRIG (1 << 3)
+#define DAS6402_TRIG_TGEN BIT(0)
+#define DAS6402_TRIG_TGSEL BIT(1)
+#define DAS6402_TRIG_TGPOL BIT(2)
+#define DAS6402_TRIG_PRETRIG BIT(3)
#define DAS6402_AO_RANGE(_chan, _range) ((_range) << ((_chan) ? 6 : 4))
#define DAS6402_AO_RANGE_MASK(_chan) (3 << ((_chan) ? 6 : 4))
#define DAS6402_MODE_REG 0x0b
-#define DAS6402_MODE_RANGE(x) ((x) << 0)
-#define DAS6402_MODE_POLLED (0 << 2)
-#define DAS6402_MODE_FIFONEPTY (1 << 2)
-#define DAS6402_MODE_FIFOHFULL (2 << 2)
-#define DAS6402_MODE_EOB (3 << 2)
-#define DAS6402_MODE_ENHANCED (1 << 4)
-#define DAS6402_MODE_SE (1 << 5)
-#define DAS6402_MODE_UNI (1 << 6)
-#define DAS6402_MODE_DMA1 (0 << 7)
-#define DAS6402_MODE_DMA3 (1 << 7)
+#define DAS6402_MODE_RANGE(x) ((x) << 2)
+#define DAS6402_MODE_POLLED DAS6402_MODE_RANGE(0)
+#define DAS6402_MODE_FIFONEPTY DAS6402_MODE_RANGE(1)
+#define DAS6402_MODE_FIFOHFULL DAS6402_MODE_RANGE(2)
+#define DAS6402_MODE_EOB DAS6402_MODE_RANGE(3)
+#define DAS6402_MODE_ENHANCED BIT(4)
+#define DAS6402_MODE_SE BIT(5)
+#define DAS6402_MODE_UNI BIT(6)
+#define DAS6402_MODE_DMA(x) ((x) << 7)
+#define DAS6402_MODE_DMA1 DAS6402_MODE_DMA(0)
+#define DAS6402_MODE_DMA3 DAS6402_MODE_DMA(1)
#define DAS6402_TIMER_BASE 0x0c
static const struct comedi_lrange das6402_ai_ranges = {
diff --git a/drivers/staging/comedi/drivers/das800.c b/drivers/staging/comedi/drivers/das800.c
index b02f12201..fd4cb4911 100644
--- a/drivers/staging/comedi/drivers/das800.c
+++ b/drivers/staging/comedi/drivers/das800.c
@@ -1,56 +1,56 @@
/*
- comedi/drivers/das800.c
- Driver for Keitley das800 series boards and compatibles
- Copyright (C) 2000 Frank Mori Hess <fmhess@users.sourceforge.net>
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * comedi/drivers/das800.c
+ * Driver for Keitley das800 series boards and compatibles
+ * Copyright (C) 2000 Frank Mori Hess <fmhess@users.sourceforge.net>
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
/*
-Driver: das800
-Description: Keithley Metrabyte DAS800 (& compatibles)
-Author: Frank Mori Hess <fmhess@users.sourceforge.net>
-Devices: [Keithley Metrabyte] DAS-800 (das-800), DAS-801 (das-801),
- DAS-802 (das-802),
- [Measurement Computing] CIO-DAS800 (cio-das800),
- CIO-DAS801 (cio-das801), CIO-DAS802 (cio-das802),
- CIO-DAS802/16 (cio-das802/16)
-Status: works, cio-das802/16 untested - email me if you have tested it
-
-Configuration options:
- [0] - I/O port base address
- [1] - IRQ (optional, required for timed or externally triggered conversions)
-
-Notes:
- IRQ can be omitted, although the cmd interface will not work without it.
-
- All entries in the channel/gain list must use the same gain and be
- consecutive channels counting upwards in channel number (these are
- hardware limitations.)
-
- I've never tested the gain setting stuff since I only have a
- DAS-800 board with fixed gain.
-
- The cio-das802/16 does not have a fifo-empty status bit! Therefore
- only fifo-half-full transfers are possible with this card.
-
-cmd triggers supported:
- start_src: TRIG_NOW | TRIG_EXT
- scan_begin_src: TRIG_FOLLOW
- scan_end_src: TRIG_COUNT
- convert_src: TRIG_TIMER | TRIG_EXT
- stop_src: TRIG_NONE | TRIG_COUNT
-*/
+ * Driver: das800
+ * Description: Keithley Metrabyte DAS800 (& compatibles)
+ * Author: Frank Mori Hess <fmhess@users.sourceforge.net>
+ * Devices: [Keithley Metrabyte] DAS-800 (das-800), DAS-801 (das-801),
+ * DAS-802 (das-802),
+ * [Measurement Computing] CIO-DAS800 (cio-das800),
+ * CIO-DAS801 (cio-das801), CIO-DAS802 (cio-das802),
+ * CIO-DAS802/16 (cio-das802/16)
+ * Status: works, cio-das802/16 untested - email me if you have tested it
+ *
+ * Configuration options:
+ * [0] - I/O port base address
+ * [1] - IRQ (optional, required for timed or externally triggered conversions)
+ *
+ * Notes:
+ * IRQ can be omitted, although the cmd interface will not work without it.
+ *
+ * All entries in the channel/gain list must use the same gain and be
+ * consecutive channels counting upwards in channel number (these are
+ * hardware limitations.)
+ *
+ * I've never tested the gain setting stuff since I only have a
+ * DAS-800 board with fixed gain.
+ *
+ * The cio-das802/16 does not have a fifo-empty status bit! Therefore
+ * only fifo-half-full transfers are possible with this card.
+ *
+ * cmd triggers supported:
+ * start_src: TRIG_NOW | TRIG_EXT
+ * scan_begin_src: TRIG_FOLLOW
+ * scan_end_src: TRIG_COUNT
+ * convert_src: TRIG_TIMER | TRIG_EXT
+ * stop_src: TRIG_NONE | TRIG_COUNT
+ */
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -218,7 +218,7 @@ struct das800_private {
};
static void das800_ind_write(struct comedi_device *dev,
- unsigned val, unsigned reg)
+ unsigned int val, unsigned int reg)
{
/*
* Select dev->iobase + 2 to be desired register
@@ -228,7 +228,7 @@ static void das800_ind_write(struct comedi_device *dev,
outb(val, dev->iobase + 2);
}
-static unsigned das800_ind_read(struct comedi_device *dev, unsigned reg)
+static unsigned int das800_ind_read(struct comedi_device *dev, unsigned int reg)
{
/*
* Select dev->iobase + 7 to be desired register
diff --git a/drivers/staging/comedi/drivers/dmm32at.c b/drivers/staging/comedi/drivers/dmm32at.c
index 958c0d4aa..b8606ded0 100644
--- a/drivers/staging/comedi/drivers/dmm32at.c
+++ b/drivers/staging/comedi/drivers/dmm32at.c
@@ -46,73 +46,75 @@
#define DMM32AT_AI_START_CONV_REG 0x00
#define DMM32AT_AI_LSB_REG 0x00
#define DMM32AT_AUX_DOUT_REG 0x01
-#define DMM32AT_AUX_DOUT2 (1 << 2) /* J3.42 - OUT2 (OUT2EN) */
-#define DMM32AT_AUX_DOUT1 (1 << 1) /* J3.43 */
-#define DMM32AT_AUX_DOUT0 (1 << 0) /* J3.44 - OUT0 (OUT0EN) */
+#define DMM32AT_AUX_DOUT2 BIT(2) /* J3.42 - OUT2 (OUT2EN) */
+#define DMM32AT_AUX_DOUT1 BIT(1) /* J3.43 */
+#define DMM32AT_AUX_DOUT0 BIT(0) /* J3.44 - OUT0 (OUT0EN) */
#define DMM32AT_AI_MSB_REG 0x01
#define DMM32AT_AI_LO_CHAN_REG 0x02
#define DMM32AT_AI_HI_CHAN_REG 0x03
#define DMM32AT_AUX_DI_REG 0x04
-#define DMM32AT_AUX_DI_DACBUSY (1 << 7)
-#define DMM32AT_AUX_DI_CALBUSY (1 << 6)
-#define DMM32AT_AUX_DI3 (1 << 3) /* J3.45 - ADCLK (CLKSEL) */
-#define DMM32AT_AUX_DI2 (1 << 2) /* J3.46 - GATE12 (GT12EN) */
-#define DMM32AT_AUX_DI1 (1 << 1) /* J3.47 - GATE0 (GT0EN) */
-#define DMM32AT_AUX_DI0 (1 << 0) /* J3.48 - CLK0 (SRC0) */
+#define DMM32AT_AUX_DI_DACBUSY BIT(7)
+#define DMM32AT_AUX_DI_CALBUSY BIT(6)
+#define DMM32AT_AUX_DI3 BIT(3) /* J3.45 - ADCLK (CLKSEL) */
+#define DMM32AT_AUX_DI2 BIT(2) /* J3.46 - GATE12 (GT12EN) */
+#define DMM32AT_AUX_DI1 BIT(1) /* J3.47 - GATE0 (GT0EN) */
+#define DMM32AT_AUX_DI0 BIT(0) /* J3.48 - CLK0 (SRC0) */
#define DMM32AT_AO_LSB_REG 0x04
#define DMM32AT_AO_MSB_REG 0x05
#define DMM32AT_AO_MSB_DACH(x) ((x) << 6)
#define DMM32AT_FIFO_DEPTH_REG 0x06
#define DMM32AT_FIFO_CTRL_REG 0x07
-#define DMM32AT_FIFO_CTRL_FIFOEN (1 << 3)
-#define DMM32AT_FIFO_CTRL_SCANEN (1 << 2)
-#define DMM32AT_FIFO_CTRL_FIFORST (1 << 1)
+#define DMM32AT_FIFO_CTRL_FIFOEN BIT(3)
+#define DMM32AT_FIFO_CTRL_SCANEN BIT(2)
+#define DMM32AT_FIFO_CTRL_FIFORST BIT(1)
#define DMM32AT_FIFO_STATUS_REG 0x07
-#define DMM32AT_FIFO_STATUS_EF (1 << 7)
-#define DMM32AT_FIFO_STATUS_HF (1 << 6)
-#define DMM32AT_FIFO_STATUS_FF (1 << 5)
-#define DMM32AT_FIFO_STATUS_OVF (1 << 4)
-#define DMM32AT_FIFO_STATUS_FIFOEN (1 << 3)
-#define DMM32AT_FIFO_STATUS_SCANEN (1 << 2)
+#define DMM32AT_FIFO_STATUS_EF BIT(7)
+#define DMM32AT_FIFO_STATUS_HF BIT(6)
+#define DMM32AT_FIFO_STATUS_FF BIT(5)
+#define DMM32AT_FIFO_STATUS_OVF BIT(4)
+#define DMM32AT_FIFO_STATUS_FIFOEN BIT(3)
+#define DMM32AT_FIFO_STATUS_SCANEN BIT(2)
#define DMM32AT_FIFO_STATUS_PAGE_MASK (3 << 0)
#define DMM32AT_CTRL_REG 0x08
-#define DMM32AT_CTRL_RESETA (1 << 5)
-#define DMM32AT_CTRL_RESETD (1 << 4)
-#define DMM32AT_CTRL_INTRST (1 << 3)
-#define DMM32AT_CTRL_PAGE_8254 (0 << 0)
-#define DMM32AT_CTRL_PAGE_8255 (1 << 0)
-#define DMM32AT_CTRL_PAGE_CALIB (3 << 0)
+#define DMM32AT_CTRL_RESETA BIT(5)
+#define DMM32AT_CTRL_RESETD BIT(4)
+#define DMM32AT_CTRL_INTRST BIT(3)
+#define DMM32AT_CTRL_PAGE(x) ((x) << 0)
+#define DMM32AT_CTRL_PAGE_8254 DMM32AT_CTRL_PAGE(0)
+#define DMM32AT_CTRL_PAGE_8255 DMM32AT_CTRL_PAGE(1)
+#define DMM32AT_CTRL_PAGE_CALIB DMM32AT_CTRL_PAGE(3)
#define DMM32AT_AI_STATUS_REG 0x08
-#define DMM32AT_AI_STATUS_STS (1 << 7)
-#define DMM32AT_AI_STATUS_SD1 (1 << 6)
-#define DMM32AT_AI_STATUS_SD0 (1 << 5)
+#define DMM32AT_AI_STATUS_STS BIT(7)
+#define DMM32AT_AI_STATUS_SD1 BIT(6)
+#define DMM32AT_AI_STATUS_SD0 BIT(5)
#define DMM32AT_AI_STATUS_ADCH_MASK (0x1f << 0)
#define DMM32AT_INTCLK_REG 0x09
-#define DMM32AT_INTCLK_ADINT (1 << 7)
-#define DMM32AT_INTCLK_DINT (1 << 6)
-#define DMM32AT_INTCLK_TINT (1 << 5)
-#define DMM32AT_INTCLK_CLKEN (1 << 1) /* 1=see below 0=software */
-#define DMM32AT_INTCLK_CLKSEL (1 << 0) /* 1=OUT2 0=EXTCLK */
+#define DMM32AT_INTCLK_ADINT BIT(7)
+#define DMM32AT_INTCLK_DINT BIT(6)
+#define DMM32AT_INTCLK_TINT BIT(5)
+#define DMM32AT_INTCLK_CLKEN BIT(1) /* 1=see below 0=software */
+#define DMM32AT_INTCLK_CLKSEL BIT(0) /* 1=OUT2 0=EXTCLK */
#define DMM32AT_CTRDIO_CFG_REG 0x0a
-#define DMM32AT_CTRDIO_CFG_FREQ12 (1 << 7) /* CLK12 1=100KHz 0=10MHz */
-#define DMM32AT_CTRDIO_CFG_FREQ0 (1 << 6) /* CLK0 1=10KHz 0=10MHz */
-#define DMM32AT_CTRDIO_CFG_OUT2EN (1 << 5) /* J3.42 1=OUT2 is DOUT2 */
-#define DMM32AT_CTRDIO_CFG_OUT0EN (1 << 4) /* J3,44 1=OUT0 is DOUT0 */
-#define DMM32AT_CTRDIO_CFG_GT0EN (1 << 2) /* J3.47 1=DIN1 is GATE0 */
-#define DMM32AT_CTRDIO_CFG_SRC0 (1 << 1) /* CLK0 is 0=FREQ0 1=J3.48 */
-#define DMM32AT_CTRDIO_CFG_GT12EN (1 << 0) /* J3.46 1=DIN2 is GATE12 */
+#define DMM32AT_CTRDIO_CFG_FREQ12 BIT(7) /* CLK12 1=100KHz 0=10MHz */
+#define DMM32AT_CTRDIO_CFG_FREQ0 BIT(6) /* CLK0 1=10KHz 0=10MHz */
+#define DMM32AT_CTRDIO_CFG_OUT2EN BIT(5) /* J3.42 1=OUT2 is DOUT2 */
+#define DMM32AT_CTRDIO_CFG_OUT0EN BIT(4) /* J3,44 1=OUT0 is DOUT0 */
+#define DMM32AT_CTRDIO_CFG_GT0EN BIT(2) /* J3.47 1=DIN1 is GATE0 */
+#define DMM32AT_CTRDIO_CFG_SRC0 BIT(1) /* CLK0 is 0=FREQ0 1=J3.48 */
+#define DMM32AT_CTRDIO_CFG_GT12EN BIT(0) /* J3.46 1=DIN2 is GATE12 */
#define DMM32AT_AI_CFG_REG 0x0b
-#define DMM32AT_AI_CFG_SCINT_20US (0 << 4)
-#define DMM32AT_AI_CFG_SCINT_15US (1 << 4)
-#define DMM32AT_AI_CFG_SCINT_10US (2 << 4)
-#define DMM32AT_AI_CFG_SCINT_5US (3 << 4)
-#define DMM32AT_AI_CFG_RANGE (1 << 3) /* 0=5V 1=10V */
-#define DMM32AT_AI_CFG_ADBU (1 << 2) /* 0=bipolar 1=unipolar */
+#define DMM32AT_AI_CFG_SCINT(x) ((x) << 4)
+#define DMM32AT_AI_CFG_SCINT_20US DMM32AT_AI_CFG_SCINT(0)
+#define DMM32AT_AI_CFG_SCINT_15US DMM32AT_AI_CFG_SCINT(1)
+#define DMM32AT_AI_CFG_SCINT_10US DMM32AT_AI_CFG_SCINT(2)
+#define DMM32AT_AI_CFG_SCINT_5US DMM32AT_AI_CFG_SCINT(3)
+#define DMM32AT_AI_CFG_RANGE BIT(3) /* 0=5V 1=10V */
+#define DMM32AT_AI_CFG_ADBU BIT(2) /* 0=bipolar 1=unipolar */
#define DMM32AT_AI_CFG_GAIN(x) ((x) << 0)
#define DMM32AT_AI_READBACK_REG 0x0b
-#define DMM32AT_AI_READBACK_WAIT (1 << 7) /* DMM32AT_AI_STATUS_STS */
-#define DMM32AT_AI_READBACK_RANGE (1 << 3)
-#define DMM32AT_AI_READBACK_ADBU (1 << 2)
+#define DMM32AT_AI_READBACK_WAIT BIT(7) /* DMM32AT_AI_STATUS_STS */
+#define DMM32AT_AI_READBACK_RANGE BIT(3)
+#define DMM32AT_AI_READBACK_ADBU BIT(2)
#define DMM32AT_AI_READBACK_GAIN_MASK (3 << 0)
#define DMM32AT_CLK1 0x0d
diff --git a/drivers/staging/comedi/drivers/dt2801.c b/drivers/staging/comedi/drivers/dt2801.c
index 6c7b4d27c..c2ce1eb87 100644
--- a/drivers/staging/comedi/drivers/dt2801.c
+++ b/drivers/staging/comedi/drivers/dt2801.c
@@ -4,30 +4,30 @@
*
*/
/*
-Driver: dt2801
-Description: Data Translation DT2801 series and DT01-EZ
-Author: ds
-Status: works
-Devices: [Data Translation] DT2801 (dt2801), DT2801-A, DT2801/5716A,
- DT2805, DT2805/5716A, DT2808, DT2818, DT2809, DT01-EZ
-
-This driver can autoprobe the type of board.
-
-Configuration options:
- [0] - I/O port base address
- [1] - unused
- [2] - A/D reference 0=differential, 1=single-ended
- [3] - A/D range
- 0 = [-10, 10]
- 1 = [0,10]
- [4] - D/A 0 range
- 0 = [-10, 10]
- 1 = [-5,5]
- 2 = [-2.5,2.5]
- 3 = [0,10]
- 4 = [0,5]
- [5] - D/A 1 range (same choices)
-*/
+ * Driver: dt2801
+ * Description: Data Translation DT2801 series and DT01-EZ
+ * Author: ds
+ * Status: works
+ * Devices: [Data Translation] DT2801 (dt2801), DT2801-A, DT2801/5716A,
+ * DT2805, DT2805/5716A, DT2808, DT2818, DT2809, DT01-EZ
+ *
+ * This driver can autoprobe the type of board.
+ *
+ * Configuration options:
+ * [0] - I/O port base address
+ * [1] - unused
+ * [2] - A/D reference 0=differential, 1=single-ended
+ * [3] - A/D range
+ * 0 = [-10, 10]
+ * 1 = [0,10]
+ * [4] - D/A 0 range
+ * 0 = [-10, 10]
+ * 1 = [-5,5]
+ * 2 = [-2.5,2.5]
+ * 3 = [0,10]
+ * 4 = [0,5]
+ * [5] - D/A 1 range (same choices)
+ */
#include <linux/module.h>
#include "../comedidev.h"
@@ -65,9 +65,10 @@ Configuration options:
#define DT_C_SET_AD 0xd
#define DT_C_READ_AD 0xe
-/* Command modifiers (only used with read/write), EXTTRIG can be
- used with some other commands.
-*/
+/*
+ * Command modifiers (only used with read/write), EXTTRIG can be
+ * used with some other commands.
+ */
#define DT_MOD_DMA BIT(4)
#define DT_MOD_CONT BIT(5)
#define DT_MOD_EXTCLK BIT(6)
@@ -135,9 +136,10 @@ struct dt2801_board {
int dabits;
};
-/* Typeid's for the different boards of the DT2801-series
- (taken from the test-software, that comes with the board)
- */
+/*
+ * Typeid's for the different boards of the DT2801-series
+ * (taken from the test-software, that comes with the board)
+ */
static const struct dt2801_board boardtypes[] = {
{
.name = "dt2801",
@@ -209,15 +211,18 @@ struct dt2801_private {
const struct comedi_lrange *dac_range_types[2];
};
-/* These are the low-level routines:
- writecommand: write a command to the board
- writedata: write data byte
- readdata: read data byte
+/*
+ * These are the low-level routines:
+ * writecommand: write a command to the board
+ * writedata: write data byte
+ * readdata: read data byte
*/
-/* Only checks DataOutReady-flag, not the Ready-flag as it is done
- in the examples of the manual. I don't see why this should be
- necessary. */
+/*
+ * Only checks DataOutReady-flag, not the Ready-flag as it is done
+ * in the examples of the manual. I don't see why this should be
+ * necessary.
+ */
static int dt2801_readdata(struct comedi_device *dev, int *data)
{
int stat = 0;
@@ -517,14 +522,14 @@ static int dt2801_dio_insn_config(struct comedi_device *dev,
}
/*
- options:
- [0] - i/o base
- [1] - unused
- [2] - a/d 0=differential, 1=single-ended
- [3] - a/d range 0=[-10,10], 1=[0,10]
- [4] - dac0 range 0=[-10,10], 1=[-5,5], 2=[-2.5,2.5] 3=[0,10], 4=[0,5]
- [5] - dac1 range 0=[-10,10], 1=[-5,5], 2=[-2.5,2.5] 3=[0,10], 4=[0,5]
-*/
+ * options:
+ * [0] - i/o base
+ * [1] - unused
+ * [2] - a/d 0=differential, 1=single-ended
+ * [3] - a/d range 0=[-10,10], 1=[0,10]
+ * [4] - dac0 range 0=[-10,10], 1=[-5,5], 2=[-2.5,2.5] 3=[0,10], 4=[0,5]
+ * [5] - dac1 range 0=[-10,10], 1=[-5,5], 2=[-2.5,2.5] 3=[0,10], 4=[0,5]
+ */
static int dt2801_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
const struct dt2801_board *board;
diff --git a/drivers/staging/comedi/drivers/dt2811.c b/drivers/staging/comedi/drivers/dt2811.c
index a80773291..8bbd93814 100644
--- a/drivers/staging/comedi/drivers/dt2811.c
+++ b/drivers/staging/comedi/drivers/dt2811.c
@@ -1,224 +1,469 @@
/*
- comedi/drivers/dt2811.c
- Hardware driver for Data Translation DT2811
-
- COMEDI - Linux Control and Measurement Device Interface
- History:
- Base Version - David A. Schleef <ds@schleef.org>
- December 1998 - Updated to work. David does not have a DT2811
- board any longer so this was suffering from bitrot.
- Updated performed by ...
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
+ * Comedi driver for Data Translation DT2811
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
+
/*
-Driver: dt2811
-Description: Data Translation DT2811
-Author: ds
-Devices: [Data Translation] DT2811-PGL (dt2811-pgl), DT2811-PGH (dt2811-pgh)
-Status: works
-
-Configuration options:
- [0] - I/O port base address
- [1] - IRQ, although this is currently unused
- [2] - A/D reference
- 0 = signle-ended
- 1 = differential
- 2 = pseudo-differential (common reference)
- [3] - A/D range
- 0 = [-5, 5]
- 1 = [-2.5, 2.5]
- 2 = [0, 5]
- [4] - D/A 0 range (same choices)
- [4] - D/A 1 range (same choices)
-*/
+ * Driver: dt2811
+ * Description: Data Translation DT2811
+ * Author: ds
+ * Devices: [Data Translation] DT2811-PGL (dt2811-pgl), DT2811-PGH (dt2811-pgh)
+ * Status: works
+ *
+ * Configuration options:
+ * [0] - I/O port base address
+ * [1] - IRQ (optional, needed for async command support)
+ * [2] - A/D reference (# of analog inputs)
+ * 0 = single-ended (16 channels)
+ * 1 = differential (8 channels)
+ * 2 = pseudo-differential (16 channels)
+ * [3] - A/D range (deprecated, see below)
+ * [4] - D/A 0 range (deprecated, see below)
+ * [5] - D/A 1 range (deprecated, see below)
+ *
+ * Notes:
+ * - A/D ranges are not programmable but the gain is. The AI subdevice has
+ * a range_table containing all the possible analog input range/gain
+ * options for the dt2811-pgh or dt2811-pgl. Use the range that matches
+ * your board configuration and the desired gain to correctly convert
+ * between data values and physical units and to set the correct output
+ * gain.
+ * - D/A ranges are not programmable. The AO subdevice has a range_table
+ * containing all the possible analog output ranges. Use the range
+ * that matches your board configuration to convert between data
+ * values and physical units.
+ */
#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+
#include "../comedidev.h"
-static const struct comedi_lrange range_dt2811_pgh_ai_5_unipolar = {
- 4, {
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25),
- UNI_RANGE(0.625)
- }
+/*
+ * Register I/O map
+ */
+#define DT2811_ADCSR_REG 0x00 /* r/w A/D Control/Status */
+#define DT2811_ADCSR_ADDONE BIT(7) /* r 1=A/D conv done */
+#define DT2811_ADCSR_ADERROR BIT(6) /* r 1=A/D error */
+#define DT2811_ADCSR_ADBUSY BIT(5) /* r 1=A/D busy */
+#define DT2811_ADCSR_CLRERROR BIT(4)
+#define DT2811_ADCSR_DMAENB BIT(3) /* r/w 1=dma ena */
+#define DT2811_ADCSR_INTENB BIT(2) /* r/w 1=interupts ena */
+#define DT2811_ADCSR_ADMODE(x) (((x) & 0x3) << 0)
+
+#define DT2811_ADGCR_REG 0x01 /* r/w A/D Gain/Channel */
+#define DT2811_ADGCR_GAIN(x) (((x) & 0x3) << 6)
+#define DT2811_ADGCR_CHAN(x) (((x) & 0xf) << 0)
+
+#define DT2811_ADDATA_LO_REG 0x02 /* r A/D Data low byte */
+#define DT2811_ADDATA_HI_REG 0x03 /* r A/D Data high byte */
+
+#define DT2811_DADATA_LO_REG(x) (0x02 + ((x) * 2)) /* w D/A Data low */
+#define DT2811_DADATA_HI_REG(x) (0x03 + ((x) * 2)) /* w D/A Data high */
+
+#define DT2811_DI_REG 0x06 /* r Digital Input Port 0 */
+#define DT2811_DO_REG 0x06 /* w Digital Output Port 1 */
+
+#define DT2811_TMRCTR_REG 0x07 /* r/w Timer/Counter */
+#define DT2811_TMRCTR_MANTISSA(x) (((x) & 0x7) << 3)
+#define DT2811_TMRCTR_EXPONENT(x) (((x) & 0x7) << 0)
+
+#define DT2811_OSC_BASE 1666 /* 600 kHz = 1666.6667ns */
+
+/*
+ * Timer frequency control:
+ * DT2811_TMRCTR_MANTISSA DT2811_TMRCTR_EXPONENT
+ * val divisor frequency val multiply divisor/divide frequency by
+ * 0 1 600 kHz 0 1
+ * 1 10 60 kHz 1 10
+ * 2 2 300 kHz 2 100
+ * 3 3 200 kHz 3 1000
+ * 4 4 150 kHz 4 10000
+ * 5 5 120 kHz 5 100000
+ * 6 6 100 kHz 6 1000000
+ * 7 12 50 kHz 7 10000000
+ */
+const unsigned int dt2811_clk_dividers[] = {
+ 1, 10, 2, 3, 4, 5, 6, 12
};
-static const struct comedi_lrange range_dt2811_pgh_ai_2_5_bipolar = {
- 4, {
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- BIP_RANGE(0.3125)
- }
+const unsigned int dt2811_clk_multipliers[] = {
+ 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000
};
-static const struct comedi_lrange range_dt2811_pgh_ai_5_bipolar = {
- 4, {
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625)
+/*
+ * The Analog Input range is set using jumpers on the board.
+ *
+ * Input Range W9 W10
+ * -5V to +5V In Out
+ * -2.5V to +2.5V In In
+ * 0V to +5V Out In
+ *
+ * The gain may be set to 1, 2, 4, or 8 (on the dt2811-pgh) or to
+ * 1, 10, 100, 500 (on the dt2811-pgl).
+ */
+static const struct comedi_lrange dt2811_pgh_ai_ranges = {
+ 12, {
+ BIP_RANGE(5), /* range 0: gain=1 */
+ BIP_RANGE(2.5), /* range 1: gain=2 */
+ BIP_RANGE(1.25), /* range 2: gain=4 */
+ BIP_RANGE(0.625), /* range 3: gain=8 */
+
+ BIP_RANGE(2.5), /* range 0+4: gain=1 */
+ BIP_RANGE(1.25), /* range 1+4: gain=2 */
+ BIP_RANGE(0.625), /* range 2+4: gain=4 */
+ BIP_RANGE(0.3125), /* range 3+4: gain=8 */
+
+ UNI_RANGE(5), /* range 0+8: gain=1 */
+ UNI_RANGE(2.5), /* range 1+8: gain=2 */
+ UNI_RANGE(1.25), /* range 2+8: gain=4 */
+ UNI_RANGE(0.625) /* range 3+8: gain=8 */
}
};
-static const struct comedi_lrange range_dt2811_pgl_ai_5_unipolar = {
- 4, {
- UNI_RANGE(5),
- UNI_RANGE(0.5),
- UNI_RANGE(0.05),
- UNI_RANGE(0.01)
+static const struct comedi_lrange dt2811_pgl_ai_ranges = {
+ 12, {
+ BIP_RANGE(5), /* range 0: gain=1 */
+ BIP_RANGE(0.5), /* range 1: gain=10 */
+ BIP_RANGE(0.05), /* range 2: gain=100 */
+ BIP_RANGE(0.01), /* range 3: gain=500 */
+
+ BIP_RANGE(2.5), /* range 0+4: gain=1 */
+ BIP_RANGE(0.25), /* range 1+4: gain=10 */
+ BIP_RANGE(0.025), /* range 2+4: gain=100 */
+ BIP_RANGE(0.005), /* range 3+4: gain=500 */
+
+ UNI_RANGE(5), /* range 0+8: gain=1 */
+ UNI_RANGE(0.5), /* range 1+8: gain=10 */
+ UNI_RANGE(0.05), /* range 2+8: gain=100 */
+ UNI_RANGE(0.01) /* range 3+8: gain=500 */
}
};
-static const struct comedi_lrange range_dt2811_pgl_ai_2_5_bipolar = {
- 4, {
+/*
+ * The Analog Output range is set per-channel using jumpers on the board.
+ *
+ * DAC0 Jumpers DAC1 Jumpers
+ * Output Range W5 W6 W7 W8 W1 W2 W3 W4
+ * -5V to +5V In Out In Out In Out In Out
+ * -2.5V to +2.5V In Out Out In In Out Out In
+ * 0 to +5V Out In Out In Out In Out In
+ */
+static const struct comedi_lrange dt2811_ao_ranges = {
+ 3, {
+ BIP_RANGE(5), /* default setting from factory */
BIP_RANGE(2.5),
- BIP_RANGE(0.25),
- BIP_RANGE(0.025),
- BIP_RANGE(0.005)
+ UNI_RANGE(5)
}
};
-static const struct comedi_lrange range_dt2811_pgl_ai_5_bipolar = {
- 4, {
- BIP_RANGE(5),
- BIP_RANGE(0.5),
- BIP_RANGE(0.05),
- BIP_RANGE(0.01)
- }
+struct dt2811_board {
+ const char *name;
+ unsigned int is_pgh:1;
};
-/*
+static const struct dt2811_board dt2811_boards[] = {
+ {
+ .name = "dt2811-pgh",
+ .is_pgh = 1,
+ }, {
+ .name = "dt2811-pgl",
+ },
+};
- 0x00 ADCSR R/W A/D Control/Status Register
- bit 7 - (R) 1 indicates A/D conversion done
- reading ADDAT clears bit
- (W) ignored
- bit 6 - (R) 1 indicates A/D error
- (W) ignored
- bit 5 - (R) 1 indicates A/D busy, cleared at end
- of conversion
- (W) ignored
- bit 4 - (R) 0
- (W)
- bit 3 - (R) 0
- bit 2 - (R/W) 1 indicates interrupts enabled
- bits 1,0 - (R/W) mode bits
- 00 single conversion on ADGCR load
- 01 continuous conversion, internal clock,
- (clock enabled on ADGCR load)
- 10 continuous conversion, internal clock,
- external trigger
- 11 continuous conversion, external clock,
- external trigger
-
- 0x01 ADGCR R/W A/D Gain/Channel Register
- bit 6,7 - (R/W) gain select
- 00 gain=1, both PGH, PGL models
- 01 gain=2 PGH, 10 PGL
- 10 gain=4 PGH, 100 PGL
- 11 gain=8 PGH, 500 PGL
- bit 4,5 - reserved
- bit 3-0 - (R/W) channel select
- channel number from 0-15
-
- 0x02,0x03 (R) ADDAT A/D Data Register
- (W) DADAT0 D/A Data Register 0
- 0x02 low byte
- 0x03 high byte
-
- 0x04,0x05 (W) DADAT0 D/A Data Register 1
-
- 0x06 (R) DIO0 Digital Input Port 0
- (W) DIO1 Digital Output Port 1
-
- 0x07 TMRCTR (R/W) Timer/Counter Register
- bits 6,7 - reserved
- bits 5-3 - Timer frequency control (mantissa)
- 543 divisor freqency (kHz)
- 000 1 600
- 001 10 60
- 010 2 300
- 011 3 200
- 100 4 150
- 101 5 120
- 110 6 100
- 111 12 50
- bits 2-0 - Timer frequency control (exponent)
- 210 multiply divisor/divide frequency by
- 000 1
- 001 10
- 010 100
- 011 1000
- 100 10000
- 101 100000
- 110 1000000
- 111 10000000
+struct dt2811_private {
+ unsigned int ai_divisor;
+};
- */
+static unsigned int dt2811_ai_read_sample(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ unsigned int val;
-#define TIMEOUT 10000
+ val = inb(dev->iobase + DT2811_ADDATA_LO_REG) |
+ (inb(dev->iobase + DT2811_ADDATA_HI_REG) << 8);
-#define DT2811_ADCSR 0
-#define DT2811_ADGCR 1
-#define DT2811_ADDATLO 2
-#define DT2811_ADDATHI 3
-#define DT2811_DADAT0LO 2
-#define DT2811_DADAT0HI 3
-#define DT2811_DADAT1LO 4
-#define DT2811_DADAT1HI 5
-#define DT2811_DIO 6
-#define DT2811_TMRCTR 7
+ return val & s->maxdata;
+}
-/*
- * flags
- */
+static irqreturn_t dt2811_interrupt(int irq, void *d)
+{
+ struct comedi_device *dev = d;
+ struct comedi_subdevice *s = dev->read_subdev;
+ struct comedi_async *async = s->async;
+ struct comedi_cmd *cmd = &async->cmd;
+ unsigned int status;
-/* ADCSR */
+ if (!dev->attached)
+ return IRQ_NONE;
-#define DT2811_ADDONE 0x80
-#define DT2811_ADERROR 0x40
-#define DT2811_ADBUSY 0x20
-#define DT2811_CLRERROR 0x10
-#define DT2811_INTENB 0x04
-#define DT2811_ADMODE 0x03
+ status = inb(dev->iobase + DT2811_ADCSR_REG);
-struct dt2811_board {
- const char *name;
- const struct comedi_lrange *bip_5;
- const struct comedi_lrange *bip_2_5;
- const struct comedi_lrange *unip_5;
-};
+ if (status & DT2811_ADCSR_ADERROR) {
+ async->events |= COMEDI_CB_OVERFLOW;
-enum { card_2811_pgh, card_2811_pgl };
+ outb(status | DT2811_ADCSR_CLRERROR,
+ dev->iobase + DT2811_ADCSR_REG);
+ }
-struct dt2811_private {
- int ntrig;
- int curadchan;
- enum {
- adc_singleended, adc_diff, adc_pseudo_diff
- } adc_mux;
- enum {
- dac_bipolar_5, dac_bipolar_2_5, dac_unipolar_5
- } dac_range[2];
- const struct comedi_lrange *range_type_list[2];
-};
+ if (status & DT2811_ADCSR_ADDONE) {
+ unsigned short val;
-static const struct comedi_lrange *dac_range_types[] = {
- &range_bipolar5,
- &range_bipolar2_5,
- &range_unipolar5
-};
+ val = dt2811_ai_read_sample(dev, s);
+ comedi_buf_write_samples(s, &val, 1);
+ }
+
+ if (cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg)
+ async->events |= COMEDI_CB_EOA;
+
+ comedi_handle_events(dev, s);
+
+ return IRQ_HANDLED;
+}
+
+static int dt2811_ai_cancel(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ /*
+ * Mode 0
+ * Single conversion
+ *
+ * Loading a chanspec will trigger a conversion.
+ */
+ outb(DT2811_ADCSR_ADMODE(0), dev->iobase + DT2811_ADCSR_REG);
+
+ return 0;
+}
+
+static void dt2811_ai_set_chanspec(struct comedi_device *dev,
+ unsigned int chanspec)
+{
+ unsigned int chan = CR_CHAN(chanspec);
+ unsigned int range = CR_RANGE(chanspec);
+
+ outb(DT2811_ADGCR_CHAN(chan) | DT2811_ADGCR_GAIN(range),
+ dev->iobase + DT2811_ADGCR_REG);
+}
+
+static int dt2811_ai_cmd(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ struct dt2811_private *devpriv = dev->private;
+ struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned int mode;
+
+ if (cmd->start_src == TRIG_NOW) {
+ /*
+ * Mode 1
+ * Continuous conversion, internal trigger and clock
+ *
+ * This resets the trigger flip-flop, disabling A/D strobes.
+ * The timer/counter register is loaded with the division
+ * ratio which will give the required sample rate.
+ *
+ * Loading the first chanspec sets the trigger flip-flop,
+ * enabling the timer/counter. A/D strobes are then generated
+ * at the rate set by the internal clock/divider.
+ */
+ mode = DT2811_ADCSR_ADMODE(1);
+ } else { /* TRIG_EXT */
+ if (cmd->convert_src == TRIG_TIMER) {
+ /*
+ * Mode 2
+ * Continuous conversion, external trigger
+ *
+ * Similar to Mode 1, with the exception that the
+ * trigger flip-flop must be set by a negative edge
+ * on the external trigger input.
+ */
+ mode = DT2811_ADCSR_ADMODE(2);
+ } else { /* TRIG_EXT */
+ /*
+ * Mode 3
+ * Continuous conversion, external trigger, clock
+ *
+ * Similar to Mode 2, with the exception that the
+ * conversion rate is set by the frequency on the
+ * external clock/divider.
+ */
+ mode = DT2811_ADCSR_ADMODE(3);
+ }
+ }
+ outb(mode | DT2811_ADCSR_INTENB, dev->iobase + DT2811_ADCSR_REG);
+
+ /* load timer */
+ outb(devpriv->ai_divisor, dev->iobase + DT2811_TMRCTR_REG);
+
+ /* load chanspec - enables timer */
+ dt2811_ai_set_chanspec(dev, cmd->chanlist[0]);
+
+ return 0;
+}
+
+static unsigned int dt2811_ns_to_timer(unsigned int *nanosec,
+ unsigned int flags)
+{
+ unsigned long long ns = *nanosec;
+ unsigned int ns_lo = COMEDI_MIN_SPEED;
+ unsigned int ns_hi = 0;
+ unsigned int divisor_hi = 0;
+ unsigned int divisor_lo = 0;
+ unsigned int _div;
+ unsigned int _mult;
+
+ /*
+ * Work through all the divider/multiplier values to find the two
+ * closest divisors to generate the requested nanosecond timing.
+ */
+ for (_div = 0; _div <= 7; _div++) {
+ for (_mult = 0; _mult <= 7; _mult++) {
+ unsigned int div = dt2811_clk_dividers[_div];
+ unsigned int mult = dt2811_clk_multipliers[_mult];
+ unsigned long long divider = div * mult;
+ unsigned int divisor = DT2811_TMRCTR_MANTISSA(_div) |
+ DT2811_TMRCTR_EXPONENT(_mult);
+
+ /*
+ * The timer can be configured to run at a slowest
+ * speed of 0.005hz (600 Khz/120000000), which requires
+ * 37-bits to represent the nanosecond value. Limit the
+ * slowest timing to what comedi handles (32-bits).
+ */
+ ns = divider * DT2811_OSC_BASE;
+ if (ns > COMEDI_MIN_SPEED)
+ continue;
+
+ /* Check for fastest found timing */
+ if (ns <= *nanosec && ns > ns_hi) {
+ ns_hi = ns;
+ divisor_hi = divisor;
+ }
+ /* Check for slowest found timing */
+ if (ns >= *nanosec && ns < ns_lo) {
+ ns_lo = ns;
+ divisor_lo = divisor;
+ }
+ }
+ }
+
+ /*
+ * The slowest found timing will be invalid if the requested timing
+ * is faster than what can be generated by the timer. Fix it so that
+ * CMDF_ROUND_UP returns valid timing.
+ */
+ if (ns_lo == COMEDI_MIN_SPEED) {
+ ns_lo = ns_hi;
+ divisor_lo = divisor_hi;
+ }
+ /*
+ * The fastest found timing will be invalid if the requested timing
+ * is less than what can be generated by the timer. Fix it so that
+ * CMDF_ROUND_NEAREST and CMDF_ROUND_DOWN return valid timing.
+ */
+ if (ns_hi == 0) {
+ ns_hi = ns_lo;
+ divisor_hi = divisor_lo;
+ }
+
+ switch (flags & CMDF_ROUND_MASK) {
+ case CMDF_ROUND_NEAREST:
+ default:
+ if (ns_hi - *nanosec < *nanosec - ns_lo) {
+ *nanosec = ns_lo;
+ return divisor_lo;
+ }
+ *nanosec = ns_hi;
+ return divisor_hi;
+ case CMDF_ROUND_UP:
+ *nanosec = ns_lo;
+ return divisor_lo;
+ case CMDF_ROUND_DOWN:
+ *nanosec = ns_hi;
+ return divisor_hi;
+ }
+}
+
+static int dt2811_ai_cmdtest(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_cmd *cmd)
+{
+ struct dt2811_private *devpriv = dev->private;
+ unsigned int arg;
+ int err = 0;
+
+ /* Step 1 : check if triggers are trivially valid */
+
+ err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_EXT);
+ err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_FOLLOW);
+ err |= comedi_check_trigger_src(&cmd->convert_src,
+ TRIG_TIMER | TRIG_EXT);
+ err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
+ err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
+
+ if (err)
+ return 1;
+
+ /* Step 2a : make sure trigger sources are unique */
+
+ err |= comedi_check_trigger_is_unique(cmd->start_src);
+ err |= comedi_check_trigger_is_unique(cmd->convert_src);
+ err |= comedi_check_trigger_is_unique(cmd->stop_src);
+
+ /* Step 2b : and mutually compatible */
+
+ if (cmd->convert_src == TRIG_EXT && cmd->start_src != TRIG_EXT)
+ err |= -EINVAL;
+
+ if (err)
+ return 2;
+
+ /* Step 3: check if arguments are trivially valid */
+
+ err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
+ err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
+ if (cmd->convert_src == TRIG_TIMER)
+ err |= comedi_check_trigger_arg_min(&cmd->convert_arg, 12500);
+ err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
+ cmd->chanlist_len);
+ if (cmd->stop_src == TRIG_COUNT)
+ err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
+ else /* TRIG_NONE */
+ err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
+
+ if (err)
+ return 3;
+
+ /* Step 4: fix up any arguments */
+
+ if (cmd->convert_src == TRIG_TIMER) {
+ arg = cmd->convert_arg;
+ devpriv->ai_divisor = dt2811_ns_to_timer(&arg, cmd->flags);
+ err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
+ } else { /* TRIG_EXT */
+ /* The convert_arg is used to set the divisor. */
+ devpriv->ai_divisor = cmd->convert_arg;
+ }
+
+ if (err)
+ return 4;
+
+ /* Step 5: check channel list if it exists */
+
+ return 0;
+}
static int dt2811_ai_eoc(struct comedi_device *dev,
struct comedi_subdevice *s,
@@ -227,32 +472,33 @@ static int dt2811_ai_eoc(struct comedi_device *dev,
{
unsigned int status;
- status = inb(dev->iobase + DT2811_ADCSR);
- if ((status & DT2811_ADBUSY) == 0)
+ status = inb(dev->iobase + DT2811_ADCSR_REG);
+ if ((status & DT2811_ADCSR_ADBUSY) == 0)
return 0;
return -EBUSY;
}
-static int dt2811_ai_insn(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int dt2811_ai_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int chan = CR_CHAN(insn->chanspec);
int ret;
int i;
+ /* We will already be in Mode 0 */
for (i = 0; i < insn->n; i++) {
- outb(chan, dev->iobase + DT2811_ADGCR);
+ /* load chanspec and trigger conversion */
+ dt2811_ai_set_chanspec(dev, insn->chanspec);
ret = comedi_timeout(dev, s, insn, dt2811_ai_eoc, 0);
if (ret)
return ret;
- data[i] = inb(dev->iobase + DT2811_ADDATLO);
- data[i] |= inb(dev->iobase + DT2811_ADDATHI) << 8;
- data[i] &= 0xfff;
+ data[i] = dt2811_ai_read_sample(dev, s);
}
- return i;
+ return insn->n;
}
static int dt2811_ao_insn_write(struct comedi_device *dev,
@@ -266,9 +512,9 @@ static int dt2811_ao_insn_write(struct comedi_device *dev,
for (i = 0; i < insn->n; i++) {
val = data[i];
- outb(val & 0xff, dev->iobase + DT2811_DADAT0LO + 2 * chan);
+ outb(val & 0xff, dev->iobase + DT2811_DADATA_LO_REG(chan));
outb((val >> 8) & 0xff,
- dev->iobase + DT2811_DADAT0HI + 2 * chan);
+ dev->iobase + DT2811_DADATA_HI_REG(chan));
}
s->readback[chan] = val;
@@ -277,9 +523,10 @@ static int dt2811_ao_insn_write(struct comedi_device *dev,
static int dt2811_di_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- data[1] = inb(dev->iobase + DT2811_DIO);
+ data[1] = inb(dev->iobase + DT2811_DI_REG);
return insn->n;
}
@@ -290,185 +537,118 @@ static int dt2811_do_insn_bits(struct comedi_device *dev,
unsigned int *data)
{
if (comedi_dio_update_state(s, data))
- outb(s->state, dev->iobase + DT2811_DIO);
+ outb(s->state, dev->iobase + DT2811_DO_REG);
data[1] = s->state;
return insn->n;
}
-/*
- options[0] Board base address
- options[1] IRQ
- options[2] Input configuration
- 0 == single-ended
- 1 == differential
- 2 == pseudo-differential
- options[3] Analog input range configuration
- 0 == bipolar 5 (-5V -- +5V)
- 1 == bipolar 2.5V (-2.5V -- +2.5V)
- 2 == unipolar 5V (0V -- +5V)
- options[4] Analog output 0 range configuration
- 0 == bipolar 5 (-5V -- +5V)
- 1 == bipolar 2.5V (-2.5V -- +2.5V)
- 2 == unipolar 5V (0V -- +5V)
- options[5] Analog output 1 range configuration
- 0 == bipolar 5 (-5V -- +5V)
- 1 == bipolar 2.5V (-2.5V -- +2.5V)
- 2 == unipolar 5V (0V -- +5V)
-*/
+static void dt2811_reset(struct comedi_device *dev)
+{
+ /* This is the initialization sequence from the users manual */
+ outb(DT2811_ADCSR_ADMODE(0), dev->iobase + DT2811_ADCSR_REG);
+ usleep_range(100, 1000);
+ inb(dev->iobase + DT2811_ADDATA_LO_REG);
+ inb(dev->iobase + DT2811_ADDATA_HI_REG);
+ outb(DT2811_ADCSR_ADMODE(0) | DT2811_ADCSR_CLRERROR,
+ dev->iobase + DT2811_ADCSR_REG);
+}
+
static int dt2811_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
- /* int i; */
const struct dt2811_board *board = dev->board_ptr;
struct dt2811_private *devpriv;
- int ret;
struct comedi_subdevice *s;
+ int ret;
+
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
+ if (!devpriv)
+ return -ENOMEM;
ret = comedi_request_region(dev, it->options[0], 0x8);
if (ret)
return ret;
-#if 0
- outb(0, dev->iobase + DT2811_ADCSR);
- udelay(100);
- i = inb(dev->iobase + DT2811_ADDATLO);
- i = inb(dev->iobase + DT2811_ADDATHI);
-#endif
+ dt2811_reset(dev);
+
+ /* IRQ's 2,3,5,7 are valid for async command support */
+ if (it->options[1] <= 7 && (BIT(it->options[1]) & 0xac)) {
+ ret = request_irq(it->options[1], dt2811_interrupt, 0,
+ dev->board_name, dev);
+ if (ret == 0)
+ dev->irq = it->options[1];
+ }
ret = comedi_alloc_subdevices(dev, 4);
if (ret)
return ret;
- devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
- if (!devpriv)
- return -ENOMEM;
-
- switch (it->options[2]) {
- case 0:
- devpriv->adc_mux = adc_singleended;
- break;
- case 1:
- devpriv->adc_mux = adc_diff;
- break;
- case 2:
- devpriv->adc_mux = adc_pseudo_diff;
- break;
- default:
- devpriv->adc_mux = adc_singleended;
- break;
- }
- switch (it->options[4]) {
- case 0:
- devpriv->dac_range[0] = dac_bipolar_5;
- break;
- case 1:
- devpriv->dac_range[0] = dac_bipolar_2_5;
- break;
- case 2:
- devpriv->dac_range[0] = dac_unipolar_5;
- break;
- default:
- devpriv->dac_range[0] = dac_bipolar_5;
- break;
- }
- switch (it->options[5]) {
- case 0:
- devpriv->dac_range[1] = dac_bipolar_5;
- break;
- case 1:
- devpriv->dac_range[1] = dac_bipolar_2_5;
- break;
- case 2:
- devpriv->dac_range[1] = dac_unipolar_5;
- break;
- default:
- devpriv->dac_range[1] = dac_bipolar_5;
- break;
- }
-
+ /* Analog Input subdevice */
s = &dev->subdevices[0];
- /* initialize the ADC subdevice */
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND;
- s->n_chan = devpriv->adc_mux == adc_diff ? 8 : 16;
- s->insn_read = dt2811_ai_insn;
- s->maxdata = 0xfff;
- switch (it->options[3]) {
- case 0:
- default:
- s->range_table = board->bip_5;
- break;
- case 1:
- s->range_table = board->bip_2_5;
- break;
- case 2:
- s->range_table = board->unip_5;
- break;
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_READABLE |
+ ((it->options[2] == 1) ? SDF_DIFF :
+ (it->options[2] == 2) ? SDF_COMMON : SDF_GROUND);
+ s->n_chan = (it->options[2] == 1) ? 8 : 16;
+ s->maxdata = 0x0fff;
+ s->range_table = board->is_pgh ? &dt2811_pgh_ai_ranges
+ : &dt2811_pgl_ai_ranges;
+ s->insn_read = dt2811_ai_insn_read;
+ if (dev->irq) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = 1;
+ s->do_cmdtest = dt2811_ai_cmdtest;
+ s->do_cmd = dt2811_ai_cmd;
+ s->cancel = dt2811_ai_cancel;
}
+ /* Analog Output subdevice */
s = &dev->subdevices[1];
- /* ao subdevice */
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 2;
- s->maxdata = 0xfff;
- s->range_table_list = devpriv->range_type_list;
- devpriv->range_type_list[0] = dac_range_types[devpriv->dac_range[0]];
- devpriv->range_type_list[1] = dac_range_types[devpriv->dac_range[1]];
- s->insn_write = dt2811_ao_insn_write;
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 2;
+ s->maxdata = 0x0fff;
+ s->range_table = &dt2811_ao_ranges;
+ s->insn_write = dt2811_ao_insn_write;
ret = comedi_alloc_subdev_readback(s);
if (ret)
return ret;
+ /* Digital Input subdevice */
s = &dev->subdevices[2];
- /* di subdevice */
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 8;
- s->insn_bits = dt2811_di_insn_bits;
- s->maxdata = 1;
- s->range_table = &range_digital;
-
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 8;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = dt2811_di_insn_bits;
+
+ /* Digital Output subdevice */
s = &dev->subdevices[3];
- /* do subdevice */
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 8;
- s->insn_bits = dt2811_do_insn_bits;
- s->maxdata = 1;
- s->state = 0;
- s->range_table = &range_digital;
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 8;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = dt2811_do_insn_bits;
return 0;
}
-static const struct dt2811_board boardtypes[] = {
- {
- .name = "dt2811-pgh",
- .bip_5 = &range_dt2811_pgh_ai_5_bipolar,
- .bip_2_5 = &range_dt2811_pgh_ai_2_5_bipolar,
- .unip_5 = &range_dt2811_pgh_ai_5_unipolar,
- }, {
- .name = "dt2811-pgl",
- .bip_5 = &range_dt2811_pgl_ai_5_bipolar,
- .bip_2_5 = &range_dt2811_pgl_ai_2_5_bipolar,
- .unip_5 = &range_dt2811_pgl_ai_5_unipolar,
- },
-};
-
static struct comedi_driver dt2811_driver = {
.driver_name = "dt2811",
.module = THIS_MODULE,
.attach = dt2811_attach,
.detach = comedi_legacy_detach,
- .board_name = &boardtypes[0].name,
- .num_names = ARRAY_SIZE(boardtypes),
+ .board_name = &dt2811_boards[0].name,
+ .num_names = ARRAY_SIZE(dt2811_boards),
.offset = sizeof(struct dt2811_board),
};
module_comedi_driver(dt2811_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_DESCRIPTION("Comedi driver for Data Translation DT2811 series boards");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/dt2814.c b/drivers/staging/comedi/drivers/dt2814.c
index 66705f9a0..2f903bedc 100644
--- a/drivers/staging/comedi/drivers/dt2814.c
+++ b/drivers/staging/comedi/drivers/dt2814.c
@@ -1,38 +1,38 @@
/*
- comedi/drivers/dt2814.c
- Hardware driver for Data Translation DT2814
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1998 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * comedi/drivers/dt2814.c
+ * Hardware driver for Data Translation DT2814
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1998 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
/*
-Driver: dt2814
-Description: Data Translation DT2814
-Author: ds
-Status: complete
-Devices: [Data Translation] DT2814 (dt2814)
-
-Configuration options:
- [0] - I/O port base address
- [1] - IRQ
-
-This card has 16 analog inputs multiplexed onto a 12 bit ADC. There
-is a minimally useful onboard clock. The base frequency for the
-clock is selected by jumpers, and the clock divider can be selected
-via programmed I/O. Unfortunately, the clock divider can only be
-a power of 10, from 1 to 10^7, of which only 3 or 4 are useful. In
-addition, the clock does not seem to be very accurate.
-*/
+ * Driver: dt2814
+ * Description: Data Translation DT2814
+ * Author: ds
+ * Status: complete
+ * Devices: [Data Translation] DT2814 (dt2814)
+ *
+ * Configuration options:
+ * [0] - I/O port base address
+ * [1] - IRQ
+ *
+ * This card has 16 analog inputs multiplexed onto a 12 bit ADC. There
+ * is a minimally useful onboard clock. The base frequency for the
+ * clock is selected by jumpers, and the clock divider can be selected
+ * via programmed I/O. Unfortunately, the clock divider can only be
+ * a power of 10, from 1 to 10^7, of which only 3 or 4 are useful. In
+ * addition, the clock does not seem to be very accurate.
+ */
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -215,8 +215,10 @@ static irqreturn_t dt2814_interrupt(int irq, void *d)
int i;
outb(0, dev->iobase + DT2814_CSR);
- /* note: turning off timed mode triggers another
- sample. */
+ /*
+ * note: turning off timed mode triggers another
+ * sample.
+ */
for (i = 0; i < DT2814_TIMEOUT; i++) {
if (inb(dev->iobase + DT2814_CSR) & DT2814_FINISH)
diff --git a/drivers/staging/comedi/drivers/dt2815.c b/drivers/staging/comedi/drivers/dt2815.c
index fb08569c1..0be77cc40 100644
--- a/drivers/staging/comedi/drivers/dt2815.c
+++ b/drivers/staging/comedi/drivers/dt2815.c
@@ -1,55 +1,55 @@
/*
- comedi/drivers/dt2815.c
- Hardware driver for Data Translation DT2815
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1999 Anders Blomdell <anders.blomdell@control.lth.se>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
+ * comedi/drivers/dt2815.c
+ * Hardware driver for Data Translation DT2815
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1999 Anders Blomdell <anders.blomdell@control.lth.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
/*
-Driver: dt2815
-Description: Data Translation DT2815
-Author: ds
-Status: mostly complete, untested
-Devices: [Data Translation] DT2815 (dt2815)
-
-I'm not sure anyone has ever tested this board. If you have information
-contrary, please update.
-
-Configuration options:
- [0] - I/O port base base address
- [1] - IRQ (unused)
- [2] - Voltage unipolar/bipolar configuration
- 0 == unipolar 5V (0V -- +5V)
- 1 == bipolar 5V (-5V -- +5V)
- [3] - Current offset configuration
- 0 == disabled (0mA -- +32mAV)
- 1 == enabled (+4mA -- +20mAV)
- [4] - Firmware program configuration
- 0 == program 1 (see manual table 5-4)
- 1 == program 2 (see manual table 5-4)
- 2 == program 3 (see manual table 5-4)
- 3 == program 4 (see manual table 5-4)
- [5] - Analog output 0 range configuration
- 0 == voltage
- 1 == current
- [6] - Analog output 1 range configuration (same options)
- [7] - Analog output 2 range configuration (same options)
- [8] - Analog output 3 range configuration (same options)
- [9] - Analog output 4 range configuration (same options)
- [10] - Analog output 5 range configuration (same options)
- [11] - Analog output 6 range configuration (same options)
- [12] - Analog output 7 range configuration (same options)
-*/
+ * Driver: dt2815
+ * Description: Data Translation DT2815
+ * Author: ds
+ * Status: mostly complete, untested
+ * Devices: [Data Translation] DT2815 (dt2815)
+ *
+ * I'm not sure anyone has ever tested this board. If you have information
+ * contrary, please update.
+ *
+ * Configuration options:
+ * [0] - I/O port base base address
+ * [1] - IRQ (unused)
+ * [2] - Voltage unipolar/bipolar configuration
+ * 0 == unipolar 5V (0V -- +5V)
+ * 1 == bipolar 5V (-5V -- +5V)
+ * [3] - Current offset configuration
+ * 0 == disabled (0mA -- +32mAV)
+ * 1 == enabled (+4mA -- +20mAV)
+ * [4] - Firmware program configuration
+ * 0 == program 1 (see manual table 5-4)
+ * 1 == program 2 (see manual table 5-4)
+ * 2 == program 3 (see manual table 5-4)
+ * 3 == program 4 (see manual table 5-4)
+ * [5] - Analog output 0 range configuration
+ * 0 == voltage
+ * 1 == current
+ * [6] - Analog output 1 range configuration (same options)
+ * [7] - Analog output 2 range configuration (same options)
+ * [8] - Analog output 3 range configuration (same options)
+ * [9] - Analog output 4 range configuration (same options)
+ * [10] - Analog output 5 range configuration (same options)
+ * [11] - Analog output 6 range configuration (same options)
+ * [12] - Analog output 7 range configuration (same options)
+ */
#include <linux/module.h>
#include "../comedidev.h"
@@ -120,27 +120,27 @@ static int dt2815_ao_insn(struct comedi_device *dev, struct comedi_subdevice *s,
}
/*
- options[0] Board base address
- options[1] IRQ (not applicable)
- options[2] Voltage unipolar/bipolar configuration
- 0 == unipolar 5V (0V -- +5V)
- 1 == bipolar 5V (-5V -- +5V)
- options[3] Current offset configuration
- 0 == disabled (0mA -- +32mAV)
- 1 == enabled (+4mA -- +20mAV)
- options[4] Firmware program configuration
- 0 == program 1 (see manual table 5-4)
- 1 == program 2 (see manual table 5-4)
- 2 == program 3 (see manual table 5-4)
- 3 == program 4 (see manual table 5-4)
- options[5] Analog output 0 range configuration
- 0 == voltage
- 1 == current
- options[6] Analog output 1 range configuration
- ...
- options[12] Analog output 7 range configuration
- 0 == voltage
- 1 == current
+ * options[0] Board base address
+ * options[1] IRQ (not applicable)
+ * options[2] Voltage unipolar/bipolar configuration
+ * 0 == unipolar 5V (0V -- +5V)
+ * 1 == bipolar 5V (-5V -- +5V)
+ * options[3] Current offset configuration
+ * 0 == disabled (0mA -- +32mAV)
+ * 1 == enabled (+4mA -- +20mAV)
+ * options[4] Firmware program configuration
+ * 0 == program 1 (see manual table 5-4)
+ * 1 == program 2 (see manual table 5-4)
+ * 2 == program 3 (see manual table 5-4)
+ * 3 == program 4 (see manual table 5-4)
+ * options[5] Analog output 0 range configuration
+ * 0 == voltage
+ * 1 == current
+ * options[6] Analog output 1 range configuration
+ * ...
+ * options[12] Analog output 7 range configuration
+ * 0 == voltage
+ * 1 == current
*/
static int dt2815_attach(struct comedi_device *dev, struct comedi_devconfig *it)
diff --git a/drivers/staging/comedi/drivers/dt2817.c b/drivers/staging/comedi/drivers/dt2817.c
index 5131deebf..39d2566e4 100644
--- a/drivers/staging/comedi/drivers/dt2817.c
+++ b/drivers/staging/comedi/drivers/dt2817.c
@@ -1,37 +1,37 @@
/*
- comedi/drivers/dt2817.c
- Hardware driver for Data Translation DT2817
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1998 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * comedi/drivers/dt2817.c
+ * Hardware driver for Data Translation DT2817
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1998 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
/*
-Driver: dt2817
-Description: Data Translation DT2817
-Author: ds
-Status: complete
-Devices: [Data Translation] DT2817 (dt2817)
-
-A very simple digital I/O card. Four banks of 8 lines, each bank
-is configurable for input or output. One wonders why it takes a
-50 page manual to describe this thing.
-
-The driver (which, btw, is much less than 50 pages) has 1 subdevice
-with 32 channels, configurable in groups of 8.
-
-Configuration options:
- [0] - I/O port base base address
-*/
+ * Driver: dt2817
+ * Description: Data Translation DT2817
+ * Author: ds
+ * Status: complete
+ * Devices: [Data Translation] DT2817 (dt2817)
+ *
+ * A very simple digital I/O card. Four banks of 8 lines, each bank
+ * is configurable for input or output. One wonders why it takes a
+ * 50 page manual to describe this thing.
+ *
+ * The driver (which, btw, is much less than 50 pages) has 1 subdevice
+ * with 32 channels, configurable in groups of 8.
+ *
+ * Configuration options:
+ * [0] - I/O port base base address
+ */
#include <linux/module.h>
#include "../comedidev.h"
diff --git a/drivers/staging/comedi/drivers/gsc_hpdi.c b/drivers/staging/comedi/drivers/gsc_hpdi.c
index 63b5cbc44..af4b4175a 100644
--- a/drivers/staging/comedi/drivers/gsc_hpdi.c
+++ b/drivers/staging/comedi/drivers/gsc_hpdi.c
@@ -158,10 +158,7 @@ static void gsc_hpdi_drain_dma(struct comedi_device *dev, unsigned int channel)
unsigned int size;
unsigned int next;
- if (channel)
- next = readl(devpriv->plx9080_mmio + PLX_DMA1_PCI_ADDRESS_REG);
- else
- next = readl(devpriv->plx9080_mmio + PLX_DMA0_PCI_ADDRESS_REG);
+ next = readl(devpriv->plx9080_mmio + PLX_REG_DMAPADR(channel));
idx = devpriv->dma_desc_index;
start = le32_to_cpu(devpriv->dma_desc[idx].pci_start_addr);
@@ -201,8 +198,9 @@ static irqreturn_t gsc_hpdi_interrupt(int irq, void *d)
if (!dev->attached)
return IRQ_NONE;
- plx_status = readl(devpriv->plx9080_mmio + PLX_INTRCS_REG);
- if ((plx_status & (ICS_DMA0_A | ICS_DMA1_A | ICS_LIA)) == 0)
+ plx_status = readl(devpriv->plx9080_mmio + PLX_REG_INTCSR);
+ if ((plx_status &
+ (PLX_INTCSR_DMA0IA | PLX_INTCSR_DMA1IA | PLX_INTCSR_PLIA)) == 0)
return IRQ_NONE;
hpdi_intr_status = readl(dev->mmio + INTERRUPT_STATUS_REG);
@@ -213,32 +211,32 @@ static irqreturn_t gsc_hpdi_interrupt(int irq, void *d)
/* spin lock makes sure no one else changes plx dma control reg */
spin_lock_irqsave(&dev->spinlock, flags);
- dma0_status = readb(devpriv->plx9080_mmio + PLX_DMA0_CS_REG);
- if (plx_status & ICS_DMA0_A) {
+ dma0_status = readb(devpriv->plx9080_mmio + PLX_REG_DMACSR0);
+ if (plx_status & PLX_INTCSR_DMA0IA) {
/* dma chan 0 interrupt */
- writeb((dma0_status & PLX_DMA_EN_BIT) | PLX_CLEAR_DMA_INTR_BIT,
- devpriv->plx9080_mmio + PLX_DMA0_CS_REG);
+ writeb((dma0_status & PLX_DMACSR_ENABLE) | PLX_DMACSR_CLEARINTR,
+ devpriv->plx9080_mmio + PLX_REG_DMACSR0);
- if (dma0_status & PLX_DMA_EN_BIT)
+ if (dma0_status & PLX_DMACSR_ENABLE)
gsc_hpdi_drain_dma(dev, 0);
}
spin_unlock_irqrestore(&dev->spinlock, flags);
/* spin lock makes sure no one else changes plx dma control reg */
spin_lock_irqsave(&dev->spinlock, flags);
- dma1_status = readb(devpriv->plx9080_mmio + PLX_DMA1_CS_REG);
- if (plx_status & ICS_DMA1_A) {
+ dma1_status = readb(devpriv->plx9080_mmio + PLX_REG_DMACSR1);
+ if (plx_status & PLX_INTCSR_DMA1IA) {
/* XXX */ /* dma chan 1 interrupt */
- writeb((dma1_status & PLX_DMA_EN_BIT) | PLX_CLEAR_DMA_INTR_BIT,
- devpriv->plx9080_mmio + PLX_DMA1_CS_REG);
+ writeb((dma1_status & PLX_DMACSR_ENABLE) | PLX_DMACSR_CLEARINTR,
+ devpriv->plx9080_mmio + PLX_REG_DMACSR1);
}
spin_unlock_irqrestore(&dev->spinlock, flags);
/* clear possible plx9080 interrupt sources */
- if (plx_status & ICS_LDIA) {
+ if (plx_status & PLX_INTCSR_LDBIA) {
/* clear local doorbell interrupt */
- plx_bits = readl(devpriv->plx9080_mmio + PLX_DBR_OUT_REG);
- writel(plx_bits, devpriv->plx9080_mmio + PLX_DBR_OUT_REG);
+ plx_bits = readl(devpriv->plx9080_mmio + PLX_REG_L2PDBELL);
+ writel(plx_bits, devpriv->plx9080_mmio + PLX_REG_L2PDBELL);
}
if (hpdi_board_status & RX_OVERRUN_BIT) {
@@ -307,19 +305,19 @@ static int gsc_hpdi_cmd(struct comedi_device *dev,
* occasionally cause problems with transfer of first dma
* block. Initializing them to zero seems to fix the problem.
*/
- writel(0, devpriv->plx9080_mmio + PLX_DMA0_TRANSFER_SIZE_REG);
- writel(0, devpriv->plx9080_mmio + PLX_DMA0_PCI_ADDRESS_REG);
- writel(0, devpriv->plx9080_mmio + PLX_DMA0_LOCAL_ADDRESS_REG);
+ writel(0, devpriv->plx9080_mmio + PLX_REG_DMASIZ0);
+ writel(0, devpriv->plx9080_mmio + PLX_REG_DMAPADR0);
+ writel(0, devpriv->plx9080_mmio + PLX_REG_DMALADR0);
/* give location of first dma descriptor */
- bits = devpriv->dma_desc_phys_addr | PLX_DESC_IN_PCI_BIT |
- PLX_INTR_TERM_COUNT | PLX_XFER_LOCAL_TO_PCI;
- writel(bits, devpriv->plx9080_mmio + PLX_DMA0_DESCRIPTOR_REG);
+ bits = devpriv->dma_desc_phys_addr | PLX_DMADPR_DESCPCI |
+ PLX_DMADPR_TCINTR | PLX_DMADPR_XFERL2P;
+ writel(bits, devpriv->plx9080_mmio + PLX_REG_DMADPR0);
/* enable dma transfer */
spin_lock_irqsave(&dev->spinlock, flags);
- writeb(PLX_DMA_EN_BIT | PLX_DMA_START_BIT | PLX_CLEAR_DMA_INTR_BIT,
- devpriv->plx9080_mmio + PLX_DMA0_CS_REG);
+ writeb(PLX_DMACSR_ENABLE | PLX_DMACSR_START | PLX_DMACSR_CLEARINTR,
+ devpriv->plx9080_mmio + PLX_REG_DMACSR0);
spin_unlock_irqrestore(&dev->spinlock, flags);
if (cmd->stop_src == TRIG_COUNT)
@@ -424,8 +422,8 @@ static int gsc_hpdi_setup_dma_descriptors(struct comedi_device *dev,
{
struct hpdi_private *devpriv = dev->private;
dma_addr_t phys_addr = devpriv->dma_desc_phys_addr;
- u32 next_bits = PLX_DESC_IN_PCI_BIT | PLX_INTR_TERM_COUNT |
- PLX_XFER_LOCAL_TO_PCI;
+ u32 next_bits = PLX_DMADPR_DESCPCI | PLX_DMADPR_TCINTR |
+ PLX_DMADPR_XFERL2P;
unsigned int offset = 0;
unsigned int idx = 0;
unsigned int i;
@@ -536,9 +534,10 @@ static int gsc_hpdi_init(struct comedi_device *dev)
/* enable interrupts */
plx_intcsr_bits =
- ICS_AERR | ICS_PERR | ICS_PIE | ICS_PLIE | ICS_PAIE | ICS_LIE |
- ICS_DMA0_E;
- writel(plx_intcsr_bits, devpriv->plx9080_mmio + PLX_INTRCS_REG);
+ PLX_INTCSR_LSEABORTEN | PLX_INTCSR_LSEPARITYEN | PLX_INTCSR_PIEN |
+ PLX_INTCSR_PLIEN | PLX_INTCSR_PABORTIEN | PLX_INTCSR_LIOEN |
+ PLX_INTCSR_DMA0IEN;
+ writel(plx_intcsr_bits, devpriv->plx9080_mmio + PLX_REG_INTCSR);
return 0;
}
@@ -550,13 +549,13 @@ static void gsc_hpdi_init_plx9080(struct comedi_device *dev)
void __iomem *plx_iobase = devpriv->plx9080_mmio;
#ifdef __BIG_ENDIAN
- bits = BIGEND_DMA0 | BIGEND_DMA1;
+ bits = PLX_BIGEND_DMA0 | PLX_BIGEND_DMA1;
#else
bits = 0;
#endif
- writel(bits, devpriv->plx9080_mmio + PLX_BIGEND_REG);
+ writel(bits, devpriv->plx9080_mmio + PLX_REG_BIGEND);
- writel(0, devpriv->plx9080_mmio + PLX_INTRCS_REG);
+ writel(0, devpriv->plx9080_mmio + PLX_REG_INTCSR);
gsc_hpdi_abort_dma(dev, 0);
gsc_hpdi_abort_dma(dev, 1);
@@ -564,27 +563,27 @@ static void gsc_hpdi_init_plx9080(struct comedi_device *dev)
/* configure dma0 mode */
bits = 0;
/* enable ready input */
- bits |= PLX_DMA_EN_READYIN_BIT;
+ bits |= PLX_DMAMODE_READYIEN;
/* enable dma chaining */
- bits |= PLX_EN_CHAIN_BIT;
+ bits |= PLX_DMAMODE_CHAINEN;
/*
* enable interrupt on dma done
* (probably don't need this, since chain never finishes)
*/
- bits |= PLX_EN_DMA_DONE_INTR_BIT;
+ bits |= PLX_DMAMODE_DONEIEN;
/*
* don't increment local address during transfers
* (we are transferring from a fixed fifo register)
*/
- bits |= PLX_LOCAL_ADDR_CONST_BIT;
+ bits |= PLX_DMAMODE_LACONST;
/* route dma interrupt to pci bus */
- bits |= PLX_DMA_INTR_PCI_BIT;
+ bits |= PLX_DMAMODE_INTRPCI;
/* enable demand mode */
- bits |= PLX_DEMAND_MODE_BIT;
+ bits |= PLX_DMAMODE_DEMAND;
/* enable local burst mode */
- bits |= PLX_DMA_LOCAL_BURST_EN_BIT;
- bits |= PLX_LOCAL_BUS_32_WIDE_BITS;
- writel(bits, plx_iobase + PLX_DMA0_MODE_REG);
+ bits |= PLX_DMAMODE_BURSTEN;
+ bits |= PLX_DMAMODE_WIDTH32;
+ writel(bits, plx_iobase + PLX_REG_DMAMODE0);
}
static int gsc_hpdi_auto_attach(struct comedi_device *dev,
@@ -680,7 +679,7 @@ static void gsc_hpdi_detach(struct comedi_device *dev)
free_irq(dev->irq, dev);
if (devpriv) {
if (devpriv->plx9080_mmio) {
- writel(0, devpriv->plx9080_mmio + PLX_INTRCS_REG);
+ writel(0, devpriv->plx9080_mmio + PLX_REG_INTCSR);
iounmap(devpriv->plx9080_mmio);
}
if (dev->mmio)
diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c
index f1793bd50..4bd7326cf 100644
--- a/drivers/staging/comedi/drivers/jr3_pci.c
+++ b/drivers/staging/comedi/drivers/jr3_pci.c
@@ -1,20 +1,20 @@
/*
- comedi/drivers/jr3_pci.c
- hardware driver for JR3/PCI force sensor board
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2007 Anders Blomdell <anders.blomdell@control.lth.se>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * comedi/drivers/jr3_pci.c
+ * hardware driver for JR3/PCI force sensor board
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2007 Anders Blomdell <anders.blomdell@control.lth.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
/*
* Driver: jr3_pci
* Description: JR3/PCI force sensor board
@@ -226,7 +226,7 @@ static unsigned int jr3_pci_ai_read_chan(struct comedi_device *dev,
if (chan < 56) {
unsigned int axis = chan % 8;
- unsigned filter = chan / 8;
+ unsigned int filter = chan / 8;
switch (axis) {
case 0:
@@ -685,7 +685,7 @@ static int jr3_pci_auto_attach(struct comedi_device *dev,
if (sizeof(struct jr3_channel) != 0xc00) {
dev_err(dev->class_dev,
"sizeof(struct jr3_channel) = %x [expected %x]\n",
- (unsigned)sizeof(struct jr3_channel), 0xc00);
+ (unsigned int)sizeof(struct jr3_channel), 0xc00);
return -EINVAL;
}
diff --git a/drivers/staging/comedi/drivers/me_daq.c b/drivers/staging/comedi/drivers/me_daq.c
index 5d877a2e6..0e2f9f346 100644
--- a/drivers/staging/comedi/drivers/me_daq.c
+++ b/drivers/staging/comedi/drivers/me_daq.c
@@ -150,7 +150,7 @@ struct me_private_data {
unsigned short dac_ctrl; /* Mirror of the DAC_CONTROL register */
};
-static inline void sleep(unsigned sec)
+static inline void sleep(unsigned int sec)
{
schedule_timeout_interruptible(sec * HZ);
}
diff --git a/drivers/staging/comedi/drivers/mpc624.c b/drivers/staging/comedi/drivers/mpc624.c
index 826e4399c..9bda76143 100644
--- a/drivers/staging/comedi/drivers/mpc624.c
+++ b/drivers/staging/comedi/drivers/mpc624.c
@@ -103,7 +103,7 @@ static const struct comedi_lrange range_mpc624_bipolar1 = {
/* BIP_RANGE(1.01) this is correct, */
/* but my MPC-624 actually seems to have a range of 2.02 */
BIP_RANGE(2.02)
- }
+ }
};
static const struct comedi_lrange range_mpc624_bipolar10 = {
@@ -112,7 +112,7 @@ static const struct comedi_lrange range_mpc624_bipolar10 = {
/* BIP_RANGE(10.1) this is correct, */
/* but my MPC-624 actually seems to have a range of 20.2 */
BIP_RANGE(20.2)
- }
+ }
};
static unsigned int mpc624_ai_get_sample(struct comedi_device *dev,
diff --git a/drivers/staging/comedi/drivers/ni_65xx.c b/drivers/staging/comedi/drivers/ni_65xx.c
index 251117be1..07f38e385 100644
--- a/drivers/staging/comedi/drivers/ni_65xx.c
+++ b/drivers/staging/comedi/drivers/ni_65xx.c
@@ -151,10 +151,10 @@ enum ni_65xx_boardid {
struct ni_65xx_board {
const char *name;
- unsigned num_dio_ports;
- unsigned num_di_ports;
- unsigned num_do_ports;
- unsigned legacy_invert:1;
+ unsigned int num_dio_ports;
+ unsigned int num_di_ports;
+ unsigned int num_do_ports;
+ unsigned int legacy_invert:1;
};
static const struct ni_65xx_board ni_65xx_boards[] = {
@@ -360,7 +360,7 @@ static int ni_65xx_dio_insn_config(struct comedi_device *dev,
unsigned long base_port = (unsigned long)s->private;
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int chan_mask = NI_65XX_CHAN_TO_MASK(chan);
- unsigned port = base_port + NI_65XX_CHAN_TO_PORT(chan);
+ unsigned int port = base_port + NI_65XX_CHAN_TO_PORT(chan);
unsigned int interval;
unsigned int val;
@@ -428,14 +428,14 @@ static int ni_65xx_dio_insn_bits(struct comedi_device *dev,
unsigned long base_port = (unsigned long)s->private;
unsigned int base_chan = CR_CHAN(insn->chanspec);
int last_port_offset = NI_65XX_CHAN_TO_PORT(s->n_chan - 1);
- unsigned read_bits = 0;
+ unsigned int read_bits = 0;
int port_offset;
for (port_offset = NI_65XX_CHAN_TO_PORT(base_chan);
port_offset <= last_port_offset; port_offset++) {
- unsigned port = base_port + port_offset;
+ unsigned int port = base_port + port_offset;
int base_port_channel = NI_65XX_PORT_TO_CHAN(port_offset);
- unsigned port_mask, port_data, bits;
+ unsigned int port_mask, port_data, bits;
int bitshift = base_port_channel - base_chan;
if (bitshift >= 32)
@@ -640,7 +640,7 @@ static int ni_65xx_auto_attach(struct comedi_device *dev,
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
const struct ni_65xx_board *board = NULL;
struct comedi_subdevice *s;
- unsigned i;
+ unsigned int i;
int ret;
if (context < ARRAY_SIZE(ni_65xx_boards))
diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c
index ed04dea91..c5ee05028 100644
--- a/drivers/staging/comedi/drivers/ni_pcidio.c
+++ b/drivers/staging/comedi/drivers/ni_pcidio.c
@@ -170,12 +170,12 @@ comedi_nonfree_firmware tarball available from http://www.comedi.org
#define DMA_Line_Control_Group1 76
#define DMA_Line_Control_Group2 108
/* channel zero is none */
-static inline unsigned primary_DMAChannel_bits(unsigned channel)
+static inline unsigned int primary_DMAChannel_bits(unsigned int channel)
{
return channel & 0x3;
}
-static inline unsigned secondary_DMAChannel_bits(unsigned channel)
+static inline unsigned int secondary_DMAChannel_bits(unsigned int channel)
{
return (channel << 2) & 0xc;
}
diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
index 344aa343e..d8917392b 100644
--- a/drivers/staging/comedi/drivers/ni_pcimio.c
+++ b/drivers/staging/comedi/drivers/ni_pcimio.c
@@ -1064,12 +1064,12 @@ static void m_series_init_eeprom_buffer(struct comedi_device *dev)
struct mite *mite = devpriv->mite;
resource_size_t daq_phys_addr;
static const int Start_Cal_EEPROM = 0x400;
- static const unsigned window_size = 10;
+ static const unsigned int window_size = 10;
static const int serial_number_eeprom_offset = 0x4;
static const int serial_number_eeprom_length = 0x4;
- unsigned old_iodwbsr_bits;
- unsigned old_iodwbsr1_bits;
- unsigned old_iodwcr1_bits;
+ unsigned int old_iodwbsr_bits;
+ unsigned int old_iodwbsr1_bits;
+ unsigned int old_iodwcr1_bits;
int i;
/* IO Window 1 needs to be temporarily mapped to read the eeprom */
diff --git a/drivers/staging/comedi/drivers/pcmmio.c b/drivers/staging/comedi/drivers/pcmmio.c
index 10472e6dd..70ad497dd 100644
--- a/drivers/staging/comedi/drivers/pcmmio.c
+++ b/drivers/staging/comedi/drivers/pcmmio.c
@@ -84,25 +84,25 @@
#define PCMMIO_AI_LSB_REG 0x00
#define PCMMIO_AI_MSB_REG 0x01
#define PCMMIO_AI_CMD_REG 0x02
-#define PCMMIO_AI_CMD_SE (1 << 7)
-#define PCMMIO_AI_CMD_ODD_CHAN (1 << 6)
+#define PCMMIO_AI_CMD_SE BIT(7)
+#define PCMMIO_AI_CMD_ODD_CHAN BIT(6)
#define PCMMIO_AI_CMD_CHAN_SEL(x) (((x) & 0x3) << 4)
#define PCMMIO_AI_CMD_RANGE(x) (((x) & 0x3) << 2)
#define PCMMIO_RESOURCE_REG 0x02
#define PCMMIO_RESOURCE_IRQ(x) (((x) & 0xf) << 0)
#define PCMMIO_AI_STATUS_REG 0x03
-#define PCMMIO_AI_STATUS_DATA_READY (1 << 7)
-#define PCMMIO_AI_STATUS_DATA_DMA_PEND (1 << 6)
-#define PCMMIO_AI_STATUS_CMD_DMA_PEND (1 << 5)
-#define PCMMIO_AI_STATUS_IRQ_PEND (1 << 4)
-#define PCMMIO_AI_STATUS_DATA_DRQ_ENA (1 << 2)
-#define PCMMIO_AI_STATUS_REG_SEL (1 << 3)
-#define PCMMIO_AI_STATUS_CMD_DRQ_ENA (1 << 1)
-#define PCMMIO_AI_STATUS_IRQ_ENA (1 << 0)
+#define PCMMIO_AI_STATUS_DATA_READY BIT(7)
+#define PCMMIO_AI_STATUS_DATA_DMA_PEND BIT(6)
+#define PCMMIO_AI_STATUS_CMD_DMA_PEND BIT(5)
+#define PCMMIO_AI_STATUS_IRQ_PEND BIT(4)
+#define PCMMIO_AI_STATUS_DATA_DRQ_ENA BIT(2)
+#define PCMMIO_AI_STATUS_REG_SEL BIT(3)
+#define PCMMIO_AI_STATUS_CMD_DRQ_ENA BIT(1)
+#define PCMMIO_AI_STATUS_IRQ_ENA BIT(0)
#define PCMMIO_AI_RES_ENA_REG 0x03
#define PCMMIO_AI_RES_ENA_CMD_REG_ACCESS (0 << 3)
-#define PCMMIO_AI_RES_ENA_AI_RES_ACCESS (1 << 3)
-#define PCMMIO_AI_RES_ENA_DIO_RES_ACCESS (1 << 4)
+#define PCMMIO_AI_RES_ENA_AI_RES_ACCESS BIT(3)
+#define PCMMIO_AI_RES_ENA_DIO_RES_ACCESS BIT(4)
#define PCMMIO_AI_2ND_ADC_OFFSET 0x04
#define PCMMIO_AO_LSB_REG 0x08
@@ -125,14 +125,14 @@
#define PCMMIO_AO_CMD_CHAN_SEL(x) (((x) & 0x03) << 1)
#define PCMMIO_AO_CMD_CHAN_SEL_ALL (0x0f << 0)
#define PCMMIO_AO_STATUS_REG 0x0b
-#define PCMMIO_AO_STATUS_DATA_READY (1 << 7)
-#define PCMMIO_AO_STATUS_DATA_DMA_PEND (1 << 6)
-#define PCMMIO_AO_STATUS_CMD_DMA_PEND (1 << 5)
-#define PCMMIO_AO_STATUS_IRQ_PEND (1 << 4)
-#define PCMMIO_AO_STATUS_DATA_DRQ_ENA (1 << 2)
-#define PCMMIO_AO_STATUS_REG_SEL (1 << 3)
-#define PCMMIO_AO_STATUS_CMD_DRQ_ENA (1 << 1)
-#define PCMMIO_AO_STATUS_IRQ_ENA (1 << 0)
+#define PCMMIO_AO_STATUS_DATA_READY BIT(7)
+#define PCMMIO_AO_STATUS_DATA_DMA_PEND BIT(6)
+#define PCMMIO_AO_STATUS_CMD_DMA_PEND BIT(5)
+#define PCMMIO_AO_STATUS_IRQ_PEND BIT(4)
+#define PCMMIO_AO_STATUS_DATA_DRQ_ENA BIT(2)
+#define PCMMIO_AO_STATUS_REG_SEL BIT(3)
+#define PCMMIO_AO_STATUS_CMD_DRQ_ENA BIT(1)
+#define PCMMIO_AO_STATUS_IRQ_ENA BIT(0)
#define PCMMIO_AO_RESOURCE_ENA_REG 0x0b
#define PCMMIO_AO_2ND_DAC_OFFSET 0x04
diff --git a/drivers/staging/comedi/drivers/pcmuio.c b/drivers/staging/comedi/drivers/pcmuio.c
index 7ea813022..8ad64f262 100644
--- a/drivers/staging/comedi/drivers/pcmuio.c
+++ b/drivers/staging/comedi/drivers/pcmuio.c
@@ -307,7 +307,7 @@ static void pcmuio_stop_intr(struct comedi_device *dev,
static void pcmuio_handle_intr_subdev(struct comedi_device *dev,
struct comedi_subdevice *s,
- unsigned triggered)
+ unsigned int triggered)
{
struct pcmuio_private *devpriv = dev->private;
int asic = pcmuio_subdevice_to_asic(s);
diff --git a/drivers/staging/comedi/drivers/plx9080.h b/drivers/staging/comedi/drivers/plx9080.h
index 8d1aee00b..0e20cc5c9 100644
--- a/drivers/staging/comedi/drivers/plx9080.h
+++ b/drivers/staging/comedi/drivers/plx9080.h
@@ -3,15 +3,6 @@
*
* Copyright (C) 2002,2003 Frank Mori Hess <fmhess@users.sourceforge.net>
*
- * I modified this file from the plx9060.h header for the
- * wanXL device driver in the linux kernel,
- * for the register offsets and bit definitions. Made minor modifications,
- * added plx9080 registers and
- * stripped out stuff that was specifically for the wanXL driver.
- * Note: I've only made sure the definitions are correct as far
- * as I make use of them. There are still various plx9060-isms
- * left in this header file.
- *
********************************************************************
*
* Copyright (C) 1999 RG Studio s.c.
@@ -28,392 +19,611 @@
#ifndef __COMEDI_PLX9080_H
#define __COMEDI_PLX9080_H
-/* descriptor block used for chained dma transfers */
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+
+/**
+ * struct plx_dma_desc - DMA descriptor format for PLX PCI 9080
+ * @pci_start_addr: PCI Bus address for transfer (DMAPADR).
+ * @local_start_addr: Local Bus address for transfer (DMALADR).
+ * @transfer_size: Transfer size in bytes (max 8 MiB) (DMASIZ).
+ * @next: Address of next descriptor + flags (DMADPR).
+ *
+ * Describes the format of a scatter-gather DMA descriptor for the PLX
+ * PCI 9080. All members are raw, little-endian register values that
+ * will be transferred by the DMA engine from local or PCI memory into
+ * corresponding registers for the DMA channel.
+ *
+ * The DMA descriptors must be aligned on a 16-byte boundary. Bits 3:0
+ * of @next contain flags describing the address space of the next
+ * descriptor (local or PCI), an "end of chain" marker, an "interrupt on
+ * terminal count" bit, and a data transfer direction.
+ */
struct plx_dma_desc {
__le32 pci_start_addr;
__le32 local_start_addr;
- /* transfer_size is in bytes, only first 23 bits of register are used */
__le32 transfer_size;
- /*
- * address of next descriptor (quad word aligned), plus some
- * additional bits (see PLX_DMA0_DESCRIPTOR_REG)
- */
__le32 next;
};
-/**********************************************************************
-** Register Offsets and Bit Definitions
-**
-** Note: All offsets zero relative. IE. Some standard base address
-** must be added to the Register Number to properly access the register.
-**
-**********************************************************************/
-
-/* L, Local Addr Space 0 Range Register */
-#define PLX_LAS0RNG_REG 0x0000
-/* L, Local Addr Space 1 Range Register */
-#define PLX_LAS1RNG_REG 0x00f0
-#define LRNG_IO 0x00000001 /* Map to: 1=I/O, 0=Mem */
-#define LRNG_ANY32 0x00000000 /* Locate anywhere in 32 bit */
-#define LRNG_LT1MB 0x00000002 /* Locate in 1st meg */
-#define LRNG_ANY64 0x00000004 /* Locate anywhere in 64 bit */
-/* bits that specify range for memory io */
-#define LRNG_MEM_MASK 0xfffffff0
-/* bits that specify range for normal io */
-#define LRNG_IO_MASK 0xfffffffa
-/* L, Local Addr Space 0 Remap Register */
-#define PLX_LAS0MAP_REG 0x0004
-/* L, Local Addr Space 1 Remap Register */
-#define PLX_LAS1MAP_REG 0x00f4
-#define LMAP_EN 0x00000001 /* Enable slave decode */
-/* bits that specify decode for memory io */
-#define LMAP_MEM_MASK 0xfffffff0
-/* bits that specify decode bits for normal io */
-#define LMAP_IO_MASK 0xfffffffa
-
/*
- * Mode/Arbitration Register.
+ * Register Offsets and Bit Definitions
*/
-#define PLX_MARB_REG 0x8 /* L, Local Arbitration Register */
-#define PLX_DMAARB_REG 0xac
-enum marb_bits {
- MARB_LLT_MASK = 0x000000ff, /* Local Bus Latency Timer */
- MARB_LPT_MASK = 0x0000ff00, /* Local Bus Pause Timer */
- MARB_LTEN = 0x00010000, /* Latency Timer Enable */
- MARB_LPEN = 0x00020000, /* Pause Timer Enable */
- MARB_BREQ = 0x00040000, /* Local Bus BREQ Enable */
- MARB_DMA_PRIORITY_MASK = 0x00180000,
- /* local bus direct slave give up bus mode */
- MARB_LBDS_GIVE_UP_BUS_MODE = 0x00200000,
- /* direct slave LLOCKo# enable */
- MARB_DS_LLOCK_ENABLE = 0x00400000,
- MARB_PCI_REQUEST_MODE = 0x00800000,
- MARB_PCIV21_MODE = 0x01000000, /* pci specification v2.1 mode */
- MARB_PCI_READ_NO_WRITE_MODE = 0x02000000,
- MARB_PCI_READ_WITH_WRITE_FLUSH_MODE = 0x04000000,
- /* gate local bus latency timer with BREQ */
- MARB_GATE_TIMER_WITH_BREQ = 0x08000000,
- MARB_PCI_READ_NO_FLUSH_MODE = 0x10000000,
- MARB_USE_SUBSYSTEM_IDS = 0x20000000,
-};
-
-#define PLX_BIGEND_REG 0xc
-enum bigend_bits {
- /* use big endian ordering for configuration register accesses */
- BIGEND_CONFIG = 0x1,
- BIGEND_DIRECT_MASTER = 0x2,
- BIGEND_DIRECT_SLAVE_LOCAL0 = 0x4,
- BIGEND_ROM = 0x8,
- /*
- * use byte lane consisting of most significant bits instead of
- * least significant
- */
- BIGEND_BYTE_LANE = 0x10,
- BIGEND_DIRECT_SLAVE_LOCAL1 = 0x20,
- BIGEND_DMA1 = 0x40,
- BIGEND_DMA0 = 0x80,
-};
+/* Local Address Space 0 Range Register */
+#define PLX_REG_LAS0RR 0x0000
+/* Local Address Space 1 Range Register */
+#define PLX_REG_LAS1RR 0x00f0
+
+#define PLX_LASRR_IO BIT(0) /* Map to: 1=I/O, 0=Mem */
+#define PLX_LASRR_ANY32 (BIT(1) * 0) /* Locate anywhere in 32 bit */
+#define PLX_LASRR_LT1MB (BIT(1) * 1) /* Locate in 1st meg */
+#define PLX_LASRR_ANY64 (BIT(1) * 2) /* Locate anywhere in 64 bit */
+#define PLX_LASRR_MLOC_MASK GENMASK(2, 1) /* Memory location bits */
+#define PLX_LASRR_PREFETCH BIT(3) /* Memory is prefetchable */
+/* bits that specify range for memory space decode bits */
+#define PLX_LASRR_MEM_MASK GENMASK(31, 4)
+/* bits that specify range for i/o space decode bits */
+#define PLX_LASRR_IO_MASK GENMASK(31, 2)
+
+/* Local Address Space 0 Local Base Address (Remap) Register */
+#define PLX_REG_LAS0BA 0x0004
+/* Local Address Space 1 Local Base Address (Remap) Register */
+#define PLX_REG_LAS1BA 0x00f4
+
+#define PLX_LASBA_EN BIT(0) /* Enable slave decode */
+/* bits that specify local base address for memory space */
+#define PLX_LASBA_MEM_MASK GENMASK(31, 4)
+/* bits that specify local base address for i/o space */
+#define PLX_LASBA_IO_MASK GENMASK(31, 2)
+
+/* Mode/Arbitration Register */
+#define PLX_REG_MARBR 0x0008
+/* DMA Arbitration Register (alias of MARBR). */
+#define PLX_REG_DMAARB 0x00ac
+
+/* Local Bus Latency Timer */
+#define PLX_MARBR_LT(x) (BIT(0) * ((x) & 0xff))
+#define PLX_MARBR_LT_MASK GENMASK(7, 0)
+#define PLX_MARBR_LT_SHIFT 0
+/* Local Bus Pause Timer */
+#define PLX_MARBR_PT(x) (BIT(8) * ((x) & 0xff))
+#define PLX_MARBR_PT_MASK GENMASK(15, 8)
+#define PLX_MARBR_PT_SHIFT 8
+/* Local Bus Latency Timer Enable */
+#define PLX_MARBR_LTEN BIT(16)
+/* Local Bus Pause Timer Enable */
+#define PLX_MARBR_PTEN BIT(17)
+/* Local Bus BREQ Enable */
+#define PLX_MARBR_BREQEN BIT(18)
+/* DMA Channel Priority */
+#define PLX_MARBR_PRIO_ROT (BIT(19) * 0) /* Rotational priority */
+#define PLX_MARBR_PRIO_DMA0 (BIT(19) * 1) /* DMA channel 0 has priority */
+#define PLX_MARBR_PRIO_DMA1 (BIT(19) * 2) /* DMA channel 1 has priority */
+#define PLX_MARBR_PRIO_MASK GENMASK(20, 19)
+/* Local Bus Direct Slave Give Up Bus Mode */
+#define PLX_MARBR_DSGUBM BIT(21)
+/* Direct Slace LLOCKo# Enable */
+#define PLX_MARBR_DSLLOCKOEN BIT(22)
+/* PCI Request Mode */
+#define PLX_MARBR_PCIREQM BIT(23)
+/* PCI Specification v2.1 Mode */
+#define PLX_MARBR_PCIV21M BIT(24)
+/* PCI Read No Write Mode */
+#define PLX_MARBR_PCIRNWM BIT(25)
+/* PCI Read with Write Flush Mode */
+#define PLX_MARBR_PCIRWFM BIT(26)
+/* Gate Local Bus Latency Timer with BREQ */
+#define PLX_MARBR_GLTBREQ BIT(27)
+/* PCI Read No Flush Mode */
+#define PLX_MARBR_PCIRNFM BIT(28)
/*
-** Note: The Expansion ROM stuff is only relevant to the PC environment.
-** This expansion ROM code is executed by the host CPU at boot time.
-** For this reason no bit definitions are provided here.
+ * Make reads from PCI Configuration register 0 return Subsystem ID and
+ * Subsystem Vendor ID instead of Device ID and Vendor ID
*/
-#define PLX_ROMRNG_REG 0x0010 /* L, Expn ROM Space Range Register */
-/* L, Local Addr Space Range Register */
-#define PLX_ROMMAP_REG 0x0014
-
-#define PLX_REGION0_REG 0x0018 /* L, Local Bus Region 0 Descriptor */
-#define RGN_WIDTH 0x00000002 /* Local bus width bits */
-#define RGN_8BITS 0x00000000 /* 08 bit Local Bus */
-#define RGN_16BITS 0x00000001 /* 16 bit Local Bus */
-#define RGN_32BITS 0x00000002 /* 32 bit Local Bus */
-#define RGN_MWS 0x0000003C /* Memory Access Wait States */
-#define RGN_0MWS 0x00000000
-#define RGN_1MWS 0x00000004
-#define RGN_2MWS 0x00000008
-#define RGN_3MWS 0x0000000C
-#define RGN_4MWS 0x00000010
-#define RGN_6MWS 0x00000018
-#define RGN_8MWS 0x00000020
-#define RGN_MRE 0x00000040 /* Memory Space Ready Input Enable */
-#define RGN_MBE 0x00000080 /* Memory Space Bterm Input Enable */
-#define RGN_READ_PREFETCH_DISABLE 0x00000100
-#define RGN_ROM_PREFETCH_DISABLE 0x00000200
-#define RGN_READ_PREFETCH_COUNT_ENABLE 0x00000400
-#define RGN_RWS 0x003C0000 /* Expn ROM Wait States */
-#define RGN_RRE 0x00400000 /* ROM Space Ready Input Enable */
-#define RGN_RBE 0x00800000 /* ROM Space Bterm Input Enable */
-#define RGN_MBEN 0x01000000 /* Memory Space Burst Enable */
-#define RGN_RBEN 0x04000000 /* ROM Space Burst Enable */
-#define RGN_THROT 0x08000000 /* De-assert TRDY when FIFO full */
-#define RGN_TRD 0xF0000000 /* Target Ready Delay /8 */
-
-#define PLX_REGION1_REG 0x00f8 /* L, Local Bus Region 1 Descriptor */
-
-#define PLX_DMRNG_REG 0x001C /* L, Direct Master Range Register */
-
-#define PLX_LBAPMEM_REG 0x0020 /* L, Lcl Base Addr for PCI mem space */
-
-#define PLX_LBAPIO_REG 0x0024 /* L, Lcl Base Addr for PCI I/O space */
-
-#define PLX_DMMAP_REG 0x0028 /* L, Direct Master Remap Register */
-#define DMM_MAE 0x00000001 /* Direct Mstr Memory Acc Enable */
-#define DMM_IAE 0x00000002 /* Direct Mstr I/O Acc Enable */
-#define DMM_LCK 0x00000004 /* LOCK Input Enable */
-#define DMM_PF4 0x00000008 /* Prefetch 4 Mode Enable */
-#define DMM_THROT 0x00000010 /* Assert IRDY when read FIFO full */
-#define DMM_PAF0 0x00000000 /* Programmable Almost fill level */
-#define DMM_PAF1 0x00000020 /* Programmable Almost fill level */
-#define DMM_PAF2 0x00000040 /* Programmable Almost fill level */
-#define DMM_PAF3 0x00000060 /* Programmable Almost fill level */
-#define DMM_PAF4 0x00000080 /* Programmable Almost fill level */
-#define DMM_PAF5 0x000000A0 /* Programmable Almost fill level */
-#define DMM_PAF6 0x000000C0 /* Programmable Almost fill level */
-#define DMM_PAF7 0x000000D0 /* Programmable Almost fill level */
-#define DMM_MAP 0xFFFF0000 /* Remap Address Bits */
-
-#define PLX_CAR_REG 0x002C /* L, Configuration Address Register */
-#define CAR_CT0 0x00000000 /* Config Type 0 */
-#define CAR_CT1 0x00000001 /* Config Type 1 */
-#define CAR_REG 0x000000FC /* Register Number Bits */
-#define CAR_FUN 0x00000700 /* Function Number Bits */
-#define CAR_DEV 0x0000F800 /* Device Number Bits */
-#define CAR_BUS 0x00FF0000 /* Bus Number Bits */
-#define CAR_CFG 0x80000000 /* Config Spc Access Enable */
-
-#define PLX_DBR_IN_REG 0x0060 /* L, PCI to Local Doorbell Register */
-
-#define PLX_DBR_OUT_REG 0x0064 /* L, Local to PCI Doorbell Register */
-
-#define PLX_INTRCS_REG 0x0068 /* L, Interrupt Control/Status Reg */
-#define ICS_AERR 0x00000001 /* Assert LSERR on ABORT */
-#define ICS_PERR 0x00000002 /* Assert LSERR on Parity Error */
-#define ICS_SERR 0x00000004 /* Generate PCI SERR# */
-#define ICS_MBIE 0x00000008 /* mailbox interrupt enable */
-#define ICS_PIE 0x00000100 /* PCI Interrupt Enable */
-#define ICS_PDIE 0x00000200 /* PCI Doorbell Interrupt Enable */
-#define ICS_PAIE 0x00000400 /* PCI Abort Interrupt Enable */
-#define ICS_PLIE 0x00000800 /* PCI Local Int Enable */
-#define ICS_RAE 0x00001000 /* Retry Abort Enable */
-#define ICS_PDIA 0x00002000 /* PCI Doorbell Interrupt Active */
-#define ICS_PAIA 0x00004000 /* PCI Abort Interrupt Active */
-#define ICS_LIA 0x00008000 /* Local Interrupt Active */
-#define ICS_LIE 0x00010000 /* Local Interrupt Enable */
-#define ICS_LDIE 0x00020000 /* Local Doorbell Int Enable */
-#define ICS_DMA0_E 0x00040000 /* DMA #0 Interrupt Enable */
-#define ICS_DMA1_E 0x00080000 /* DMA #1 Interrupt Enable */
-#define ICS_LDIA 0x00100000 /* Local Doorbell Int Active */
-#define ICS_DMA0_A 0x00200000 /* DMA #0 Interrupt Active */
-#define ICS_DMA1_A 0x00400000 /* DMA #1 Interrupt Active */
-#define ICS_BIA 0x00800000 /* BIST Interrupt Active */
-#define ICS_TA_DM 0x01000000 /* Target Abort - Direct Master */
-#define ICS_TA_DMA0 0x02000000 /* Target Abort - DMA #0 */
-#define ICS_TA_DMA1 0x04000000 /* Target Abort - DMA #1 */
-#define ICS_TA_RA 0x08000000 /* Target Abort - Retry Timeout */
-/* mailbox x is active */
-#define ICS_MBIA(x) (0x10000000 << ((x) & 0x3))
-
-#define PLX_CONTROL_REG 0x006C /* L, EEPROM Cntl & PCI Cmd Codes */
-#define CTL_RDMA 0x0000000E /* DMA Read Command */
-#define CTL_WDMA 0x00000070 /* DMA Write Command */
-#define CTL_RMEM 0x00000600 /* Memory Read Command */
-#define CTL_WMEM 0x00007000 /* Memory Write Command */
-#define CTL_USERO 0x00010000 /* USERO output pin control bit */
-#define CTL_USERI 0x00020000 /* USERI input pin bit */
-#define CTL_EE_CLK 0x01000000 /* EEPROM Clock line */
-#define CTL_EE_CS 0x02000000 /* EEPROM Chip Select */
-#define CTL_EE_W 0x04000000 /* EEPROM Write bit */
-#define CTL_EE_R 0x08000000 /* EEPROM Read bit */
-#define CTL_EECHK 0x10000000 /* EEPROM Present bit */
-#define CTL_EERLD 0x20000000 /* EEPROM Reload Register */
-#define CTL_RESET 0x40000000 /* !! Adapter Reset !! */
-#define CTL_READY 0x80000000 /* Local Init Done */
-
-#define PLX_ID_REG 0x70 /* hard-coded plx vendor and device ids */
-
-#define PLX_REVISION_REG 0x74 /* silicon revision */
-
-#define PLX_DMA0_MODE_REG 0x80 /* dma channel 0 mode register */
-#define PLX_DMA1_MODE_REG 0x94 /* dma channel 0 mode register */
-#define PLX_LOCAL_BUS_16_WIDE_BITS 0x1
-#define PLX_LOCAL_BUS_32_WIDE_BITS 0x3
-#define PLX_LOCAL_BUS_WIDTH_MASK 0x3
-#define PLX_DMA_EN_READYIN_BIT 0x40 /* enable ready in input */
-#define PLX_EN_BTERM_BIT 0x80 /* enable BTERM# input */
-#define PLX_DMA_LOCAL_BURST_EN_BIT 0x100 /* enable local burst mode */
-#define PLX_EN_CHAIN_BIT 0x200 /* enables chaining */
-/* enables interrupt on dma done */
-#define PLX_EN_DMA_DONE_INTR_BIT 0x400
-/* hold local address constant (don't increment) */
-#define PLX_LOCAL_ADDR_CONST_BIT 0x800
-/* enables demand-mode for dma transfer */
-#define PLX_DEMAND_MODE_BIT 0x1000
-#define PLX_EOT_ENABLE_BIT 0x4000
-#define PLX_STOP_MODE_BIT 0x8000
-/* routes dma interrupt to pci bus (instead of local bus) */
-#define PLX_DMA_INTR_PCI_BIT 0x20000
-
-/* pci address that dma transfers start at */
-#define PLX_DMA0_PCI_ADDRESS_REG 0x84
-#define PLX_DMA1_PCI_ADDRESS_REG 0x98
-
-/* local address that dma transfers start at */
-#define PLX_DMA0_LOCAL_ADDRESS_REG 0x88
-#define PLX_DMA1_LOCAL_ADDRESS_REG 0x9c
-
-/* number of bytes to transfer (first 23 bits) */
-#define PLX_DMA0_TRANSFER_SIZE_REG 0x8c
-#define PLX_DMA1_TRANSFER_SIZE_REG 0xa0
-
-#define PLX_DMA0_DESCRIPTOR_REG 0x90 /* descriptor pointer register */
-#define PLX_DMA1_DESCRIPTOR_REG 0xa4
-/* descriptor is located in pci space (not local space) */
-#define PLX_DESC_IN_PCI_BIT 0x1
-#define PLX_END_OF_CHAIN_BIT 0x2 /* end of chain bit */
-/* interrupt when this descriptor's transfer is finished */
-#define PLX_INTR_TERM_COUNT 0x4
-/* transfer from local to pci bus (not pci to local) */
-#define PLX_XFER_LOCAL_TO_PCI 0x8
-
-#define PLX_DMA0_CS_REG 0xa8 /* command status register */
-#define PLX_DMA1_CS_REG 0xa9
-#define PLX_DMA_EN_BIT 0x1 /* enable dma channel */
-#define PLX_DMA_START_BIT 0x2 /* start dma transfer */
-#define PLX_DMA_ABORT_BIT 0x4 /* abort dma transfer */
-#define PLX_CLEAR_DMA_INTR_BIT 0x8 /* clear dma interrupt */
-#define PLX_DMA_DONE_BIT 0x10 /* transfer done status bit */
-
-#define PLX_DMA0_THRESHOLD_REG 0xb0 /* command status register */
+#define PLX_MARBR_SUBSYSIDS BIT(29)
+
+/* Big/Little Endian Descriptor Register */
+#define PLX_REG_BIGEND 0x000c
+
+/* Configuration Register Big Endian Mode */
+#define PLX_BIGEND_CONFIG BIT(0)
+/* Direct Master Big Endian Mode */
+#define PLX_BIGEND_DM BIT(1)
+/* Direct Slave Address Space 0 Big Endian Mode */
+#define PLX_BIGEND_DSAS0 BIT(2)
+/* Direct Slave Expansion ROM Big Endian Mode */
+#define PLX_BIGEND_EROM BIT(3)
+/* Big Endian Byte Lane Mode - use most significant byte lanes */
+#define PLX_BIGEND_BEBLM BIT(4)
+/* Direct Slave Address Space 1 Big Endian Mode */
+#define PLX_BIGEND_DSAS1 BIT(5)
+/* DMA Channel 1 Big Endian Mode */
+#define PLX_BIGEND_DMA1 BIT(6)
+/* DMA Channel 0 Big Endian Mode */
+#define PLX_BIGEND_DMA0 BIT(7)
+/* DMA Channel N Big Endian Mode (N <= 1) */
+#define PLX_BIGEND_DMA(n) ((n) ? PLX_BIGEND_DMA1 : PLX_BIGEND_DMA0)
/*
- * Accesses near the end of memory can cause the PLX chip
- * to pre-fetch data off of end-of-ram. Limit the size of
- * memory so host-side accesses cannot occur.
+ * Note: The Expansion ROM stuff is only relevant to the PC environment.
+ * This expansion ROM code is executed by the host CPU at boot time.
+ * For this reason no bit definitions are provided here.
*/
-#define PLX_PREFETCH 32
+/* Expansion ROM Range Register */
+#define PLX_REG_EROMRR 0x0010
+/* Expansion ROM Local Base Address (Remap) Register */
+#define PLX_REG_EROMBA 0x0014
+
+/* Local Address Space 0/Expansion ROM Bus Region Descriptor Register */
+#define PLX_REG_LBRD0 0x0018
+/* Local Address Space 1 Bus Region Descriptor Register */
+#define PLX_REG_LBRD1 0x00f8
+
+/* Memory Space Local Bus Width */
+#define PLX_LBRD_MSWIDTH8 (BIT(0) * 0) /* 8 bits wide */
+#define PLX_LBRD_MSWIDTH16 (BIT(0) * 1) /* 16 bits wide */
+#define PLX_LBRD_MSWIDTH32 (BIT(0) * 2) /* 32 bits wide */
+#define PLX_LBRD_MSWIDTH32A (BIT(0) * 3) /* 32 bits wide */
+#define PLX_LBRD_MSWIDTH_MASK GENMASK(1, 0)
+#define PLX_LBRD_MSWIDTH_SHIFT 0
+/* Memory Space Internal Wait States */
+#define PLX_LBRD_MSIWS(x) (BIT(2) * ((x) & 0xf))
+#define PLX_LBRD_MSIWS_MASK GENMASK(5, 2)
+#define PLX_LBRD_MSIWS_SHIFT 2
+/* Memory Space Ready Input Enable */
+#define PLX_LBRD_MSREADYIEN BIT(6)
+/* Memory Space BTERM# Input Enable */
+#define PLX_LBRD_MSBTERMIEN BIT(7)
+/* Memory Space 0 Prefetch Disable (LBRD0 only) */
+#define PLX_LBRD0_MSPREDIS BIT(8)
+/* Memory Space 1 Burst Enable (LBRD1 only) */
+#define PLX_LBRD1_MSBURSTEN BIT(8)
+/* Expansion ROM Space Prefetch Disable (LBRD0 only) */
+#define PLX_LBRD0_EROMPREDIS BIT(9)
+/* Memory Space 1 Prefetch Disable (LBRD1 only) */
+#define PLX_LBRD1_MSPREDIS BIT(9)
+/* Read Prefetch Count Enable */
+#define PLX_LBRD_RPFCOUNTEN BIT(10)
+/* Prefetch Counter */
+#define PLX_LBRD_PFCOUNT(x) (BIT(11) * ((x) & 0xf))
+#define PLX_LBRD_PFCOUNT_MASK GENMASK(14, 11)
+#define PLX_LBRD_PFCOUNT_SHIFT 11
+/* Expansion ROM Space Local Bus Width (LBRD0 only) */
+#define PLX_LBRD0_EROMWIDTH8 (BIT(16) * 0) /* 8 bits wide */
+#define PLX_LBRD0_EROMWIDTH16 (BIT(16) * 1) /* 16 bits wide */
+#define PLX_LBRD0_EROMWIDTH32 (BIT(16) * 2) /* 32 bits wide */
+#define PLX_LBRD0_EROMWIDTH32A (BIT(16) * 3) /* 32 bits wide */
+#define PLX_LBRD0_EROMWIDTH_MASK GENMASK(17, 16)
+#define PLX_LBRD0_EROMWIDTH_SHIFT 16
+/* Expansion ROM Space Internal Wait States (LBRD0 only) */
+#define PLX_LBRD0_EROMIWS(x) (BIT(18) * ((x) & 0xf))
+#define PLX_LBRD0_EROMIWS_MASK GENMASK(21, 18)
+#define PLX_LBRD0_EROMIWS_SHIFT 18
+/* Expansion ROM Space Ready Input Enable (LBDR0 only) */
+#define PLX_LBRD0_EROMREADYIEN BIT(22)
+/* Expansion ROM Space BTERM# Input Enable (LBRD0 only) */
+#define PLX_LBRD0_EROMBTERMIEN BIT(23)
+/* Memory Space 0 Burst Enable (LBRD0 only) */
+#define PLX_LBRD0_MSBURSTEN BIT(24)
+/* Extra Long Load From Serial EEPROM (LBRD0 only) */
+#define PLX_LBRD0_EELONGLOAD BIT(25)
+/* Expansion ROM Space Burst Enable (LBRD0 only) */
+#define PLX_LBRD0_EROMBURSTEN BIT(26)
+/* Direct Slave PCI Write Mode - assert TRDY# when FIFO full (LBRD0 only) */
+#define PLX_LBRD0_DSWMTRDY BIT(27)
+/* PCI Target Retry Delay Clocks / 8 (LBRD0 only) */
+#define PLX_LBRD0_TRDELAY(x) (BIT(28) * ((x) & 0xF))
+#define PLX_LBRD0_TRDELAY_MASK GENMASK(31, 28)
+#define PLX_LBRD0_TRDELAY_SHIFT 28
+
+/* Local Range Register for Direct Master to PCI */
+#define PLX_REG_DMRR 0x001c
+
+/* Local Bus Base Address Register for Direct Master to PCI Memory */
+#define PLX_REG_DMLBAM 0x0020
+
+/* Local Base Address Register for Direct Master to PCI IO/CFG */
+#define PLX_REG_DMLBAI 0x0024
+
+/* PCI Base Address (Remap) Register for Direct Master to PCI Memory */
+#define PLX_REG_DMPBAM 0x0028
+
+/* Direct Master Memory Access Enable */
+#define PLX_DMPBAM_MEMACCEN BIT(0)
+/* Direct Master I/O Access Enable */
+#define PLX_DMPBAM_IOACCEN BIT(1)
+/* LLOCK# Input Enable */
+#define PLX_DMPBAM_LLOCKIEN BIT(2)
+/* Direct Master Read Prefetch Size Control (bits 12, 3) */
+#define PLX_DMPBAM_RPSIZECONT ((BIT(12) * 0) | (BIT(3) * 0))
+#define PLX_DMPBAM_RPSIZE4 ((BIT(12) * 0) | (BIT(3) * 1))
+#define PLX_DMPBAM_RPSIZE8 ((BIT(12) * 1) | (BIT(3) * 0))
+#define PLX_DMPBAM_RPSIZE16 ((BIT(12) * 1) | (BIT(3) * 1))
+#define PLX_DMPBAM_RPSIZE_MASK (BIT(12) | BIT(3))
+/* Direct Master PCI Read Mode - deassert IRDY when FIFO full */
+#define PLX_DMPBAM_RMIRDY BIT(4)
+/* Programmable Almost Full Level (bits 10, 8:5) */
+#define PLX_DMPBAM_PAFL(x) ((BIT(10) * !!((x) & 0x10)) | \
+ (BIT(5) * ((x) & 0xf)))
+#define PLX_DMPBAM_TO_PAFL(v) ((((BIT(10) & (v)) >> 1) | \
+ (GENMASK(8, 5) & (v))) >> 5)
+#define PLX_DMPBAM_PAFL_MASK (BIT(10) | GENMASK(8, 5))
+/* Write And Invalidate Mode */
+#define PLX_DMPBAM_WIM BIT(9)
+/* Direct Master Prefetch Limit */
+#define PLX_DBPBAM_PFLIMIT BIT(11)
+/* I/O Remap Select */
+#define PLX_DMPBAM_IOREMAPSEL BIT(13)
+/* Direct Master Write Delay */
+#define PLX_DMPBAM_WDELAYNONE (BIT(14) * 0)
+#define PLX_DMPBAM_WDELAY4 (BIT(14) * 1)
+#define PLX_DMPBAM_WDELAY8 (BIT(14) * 2)
+#define PLX_DMPBAM_WDELAY16 (BIT(14) * 3)
+#define PLX_DMPBAM_WDELAY_MASK GENMASK(15, 14)
+/* Remap of Local-to-PCI Space Into PCI Address Space */
+#define PLX_DMPBAM_REMAP_MASK GENMASK(31, 16)
+
+/* PCI Configuration Address Register for Direct Master to PCI IO/CFG */
+#define PLX_REG_DMCFGA 0x002c
+
+/* Congiguration Type */
+#define PLX_DMCFGA_TYPE0 (BIT(0) * 0)
+#define PLX_DMCFGA_TYPE1 (BIT(0) * 1)
+#define PLX_DMCFGA_TYPE_MASK GENMASK(1, 0)
+/* Register Number */
+#define PLX_DMCFGA_REGNUM(x) (BIT(2) * ((x) & 0x3f))
+#define PLX_DMCFGA_REGNUM_MASK GENMASK(7, 2)
+#define PLX_DMCFGA_REGNUM_SHIFT 2
+/* Function Number */
+#define PLX_DMCFGA_FUNCNUM(x) (BIT(8) * ((x) & 0x7))
+#define PLX_DMCFGA_FUNCNUM_MASK GENMASK(10, 8)
+#define PLX_DMCFGA_FUNCNUM_SHIFT 8
+/* Device Number */
+#define PLX_DMCFGA_DEVNUM(x) (BIT(11) * ((x) & 0x1f))
+#define PLX_DMCFGA_DEVNUM_MASK GENMASK(15, 11)
+#define PLX_DMCFGA_DEVNUM_SHIFT 11
+/* Bus Number */
+#define PLX_DMCFGA_BUSNUM(x) (BIT(16) * ((x) & 0xff))
+#define PLX_DMCFGA_BUSNUM_MASK GENMASK(23, 16)
+#define PLX_DMCFGA_BUSNUM_SHIFT 16
+/* Configuration Enable */
+#define PLX_DMCFGA_CONFIGEN BIT(31)
/*
- * The PCI Interface, via the PCI-9060 Chip, has up to eight (8) Mailbox
- * Registers. The PUTS (Power-Up Test Suite) handles the board-side
- * interface/interaction using the first 4 registers. Specifications for
- * the use of the full PUTS' command and status interface is contained
- * within a separate SBE PUTS Manual. The Host-Side Device Driver only
- * uses a subset of the full PUTS interface.
+ * Mailbox Register N (N <= 7)
+ *
+ * Note that if the I2O feature is enabled (QSR[0] is set), Mailbox Register 0
+ * is replaced by the Inbound Queue Port, and Mailbox Register 1 is replaced
+ * by the Outbound Queue Port. However, Mailbox Register 0 and 1 are always
+ * accessible at alternative offsets if the I2O feature is enabled.
*/
+#define PLX_REG_MBOX(n) (0x0040 + (n) * 4)
+#define PLX_REG_MBOX0 PLX_REG_MBOX(0)
+#define PLX_REG_MBOX1 PLX_REG_MBOX(1)
+#define PLX_REG_MBOX2 PLX_REG_MBOX(2)
+#define PLX_REG_MBOX3 PLX_REG_MBOX(3)
+#define PLX_REG_MBOX4 PLX_REG_MBOX(4)
+#define PLX_REG_MBOX5 PLX_REG_MBOX(5)
+#define PLX_REG_MBOX6 PLX_REG_MBOX(6)
+#define PLX_REG_MBOX7 PLX_REG_MBOX(7)
+
+/* Alternative offsets for Mailbox Registers 0 and 1 (in case I2O is enabled) */
+#define PLX_REG_ALT_MBOX(n) ((n) < 2 ? 0x0078 + (n) * 4 : PLX_REG_MBOX(n))
+#define PLX_REG_ALT_MBOX0 PLX_REG_ALT_MBOX(0)
+#define PLX_REG_ALT_MBOX1 PLX_REG_ALT_MBOX(1)
+
+/* PCI-to-Local Doorbell Register */
+#define PLX_REG_P2LDBELL 0x0060
+
+/* Local-to-PCI Doorbell Register */
+#define PLX_REG_L2PDBELL 0x0064
+
+/* Interrupt Control/Status Register */
+#define PLX_REG_INTCSR 0x0068
+
+/* Enable Local Bus LSERR# when PCI Bus Target Abort or Master Abort occurs */
+#define PLX_INTCSR_LSEABORTEN BIT(0)
+/* Enable Local Bus LSERR# when PCI parity error occurs */
+#define PLX_INTCSR_LSEPARITYEN BIT(1)
+/* Generate PCI Bus SERR# when set to 1 */
+#define PLX_INTCSR_GENSERR BIT(2)
+/* Mailbox Interrupt Enable (local bus interrupts on PCI write to MBOX0-3) */
+#define PLX_INTCSR_MBIEN BIT(3)
+/* PCI Interrupt Enable */
+#define PLX_INTCSR_PIEN BIT(8)
+/* PCI Doorbell Interrupt Enable */
+#define PLX_INTCSR_PDBIEN BIT(9)
+/* PCI Abort Interrupt Enable */
+#define PLX_INTCSR_PABORTIEN BIT(10)
+/* PCI Local Interrupt Enable */
+#define PLX_INTCSR_PLIEN BIT(11)
+/* Retry Abort Enable (for diagnostic purposes only) */
+#define PLX_INTCSR_RAEN BIT(12)
+/* PCI Doorbell Interrupt Active (read-only) */
+#define PLX_INTCSR_PDBIA BIT(13)
+/* PCI Abort Interrupt Active (read-only) */
+#define PLX_INTCSR_PABORTIA BIT(14)
+/* Local Interrupt (LINTi#) Active (read-only) */
+#define PLX_INTCSR_PLIA BIT(15)
+/* Local Interrupt Output (LINTo#) Enable */
+#define PLX_INTCSR_LIOEN BIT(16)
+/* Local Doorbell Interrupt Enable */
+#define PLX_INTCSR_LDBIEN BIT(17)
+/* DMA Channel 0 Interrupt Enable */
+#define PLX_INTCSR_DMA0IEN BIT(18)
+/* DMA Channel 1 Interrupt Enable */
+#define PLX_INTCSR_DMA1IEN BIT(19)
+/* DMA Channel N Interrupt Enable (N <= 1) */
+#define PLX_INTCSR_DMAIEN(n) ((n) ? PLX_INTCSR_DMA1IEN : PLX_INTCSR_DMA0IEN)
+/* Local Doorbell Interrupt Active (read-only) */
+#define PLX_INTCSR_LDBIA BIT(20)
+/* DMA Channel 0 Interrupt Active (read-only) */
+#define PLX_INTCSR_DMA0IA BIT(21)
+/* DMA Channel 1 Interrupt Active (read-only) */
+#define PLX_INTCSR_DMA1IA BIT(22)
+/* DMA Channel N Interrupt Active (N <= 1) (read-only) */
+#define PLX_INTCSR_DMAIA(n) ((n) ? PLX_INTCSR_DMA1IA : PLX_INTCSR_DMA0IA)
+/* BIST Interrupt Active (read-only) */
+#define PLX_INTCSR_BISTIA BIT(23)
+/* Direct Master Not Bus Master During Master Or Target Abort (read-only) */
+#define PLX_INTCSR_ABNOTDM BIT(24)
+/* DMA Channel 0 Not Bus Master During Master Or Target Abort (read-only) */
+#define PLX_INTCSR_ABNOTDMA0 BIT(25)
+/* DMA Channel 1 Not Bus Master During Master Or Target Abort (read-only) */
+#define PLX_INTCSR_ABNOTDMA1 BIT(26)
+/* DMA Channel N Not Bus Master During Master Or Target Abort (read-only) */
+#define PLX_INTCSR_ABNOTDMA(n) ((n) ? PLX_INTCSR_ABNOTDMA1 \
+ : PLX_INTCSR_ABNOTDMA0)
+/* Target Abort Not Generated After 256 Master Retries (read-only) */
+#define PLX_INTCSR_ABNOTRETRY BIT(27)
+/* PCI Wrote Mailbox 0 (enabled if bit 3 set) (read-only) */
+#define PLX_INTCSR_MB0IA BIT(28)
+/* PCI Wrote Mailbox 1 (enabled if bit 3 set) (read-only) */
+#define PLX_INTCSR_MB1IA BIT(29)
+/* PCI Wrote Mailbox 2 (enabled if bit 3 set) (read-only) */
+#define PLX_INTCSR_MB2IA BIT(30)
+/* PCI Wrote Mailbox 3 (enabled if bit 3 set) (read-only) */
+#define PLX_INTCSR_MB3IA BIT(31)
+/* PCI Wrote Mailbox N (N <= 3) (enabled if bit 3 set) (read-only) */
+#define PLX_INTCSR_MBIA(n) BIT(28 + (n))
-/*****************************************/
-/*** MAILBOX #(-1) - MEM ACCESS STS ***/
-/*****************************************/
-
-#define MBX_STS_VALID 0x57584744 /* 'WXGD' */
-#define MBX_STS_DILAV 0x44475857 /* swapped = 'DGXW' */
-
-/*****************************************/
-/*** MAILBOX #0 - PUTS STATUS ***/
-/*****************************************/
-
-#define MBX_STS_MASK 0x000000ff /* PUTS Status Register bits */
-#define MBX_STS_TMASK 0x0000000f /* register bits for TEST number */
-
-#define MBX_STS_PCIRESET 0x00000100 /* Host issued PCI reset request */
-#define MBX_STS_BUSY 0x00000080 /* PUTS is in progress */
-#define MBX_STS_ERROR 0x00000040 /* PUTS has failed */
/*
- * Undefined -> status in transition. We are in process of changing bits;
- * we SET Error bit before RESET of Busy bit
+ * Serial EEPROM Control, PCI Command Codes, User I/O Control,
+ * Init Control Register
*/
-#define MBX_STS_RESERVED 0x000000c0
-
-#define MBX_RESERVED_5 0x00000020 /* FYI: reserved/unused bit */
-#define MBX_RESERVED_4 0x00000010 /* FYI: reserved/unused bit */
-
-/******************************************/
-/*** MAILBOX #1 - PUTS COMMANDS ***/
-/******************************************/
-
+#define PLX_REG_CNTRL 0x006c
+
+/* PCI Read Command Code For DMA */
+#define PLX_CNTRL_CCRDMA(x) (BIT(0) * ((x) & 0xf))
+#define PLX_CNTRL_CCRDMA_MASK GENMASK(3, 0)
+#define PLX_CNTRL_CCRDMA_SHIFT 0
+#define PLX_CNTRL_CCRDMA_NORMAL PLX_CNTRL_CCRDMA(14) /* value after reset */
+/* PCI Write Command Code For DMA 0 */
+#define PLX_CNTRL_CCWDMA(x) (BIT(4) * ((x) & 0xf))
+#define PLX_CNTRL_CCWDMA_MASK GENMASK(7, 4)
+#define PLX_CNTRL_CCWDMA_SHIFT 4
+#define PLX_CNTRL_CCWDMA_NORMAL PLX_CNTRL_CCWDMA(7) /* value after reset */
+/* PCI Memory Read Command Code For Direct Master */
+#define PLX_CNTRL_CCRDM(x) (BIT(8) * ((x) & 0xf))
+#define PLX_CNTRL_CCRDM_MASK GENMASK(11, 8)
+#define PLX_CNTRL_CCRDM_SHIFT 8
+#define PLX_CNTRL_CCRDM_NORMAL PLX_CNTRL_CCRDM(6) /* value after reset */
+/* PCI Memory Write Command Code For Direct Master */
+#define PLX_CNTRL_CCWDM(x) (BIT(12) * ((x) & 0xf))
+#define PLX_CNTRL_CCWDM_MASK GENMASK(15, 12)
+#define PLX_CNTRL_CCWDM_SHIFT 12
+#define PLX_CNTRL_CCWDM_NORMAL PLX_CNTRL_CCWDM(7) /* value after reset */
+/* General Purpose Output (USERO) */
+#define PLX_CNTRL_USERO BIT(16)
+/* General Purpose Input (USERI) (read-only) */
+#define PLX_CNTRL_USERI BIT(17)
+/* Serial EEPROM Clock Output (EESK) */
+#define PLX_CNTRL_EESK BIT(24)
+/* Serial EEPROM Chip Select Output (EECS) */
+#define PLX_CNTRL_EECS BIT(25)
+/* Serial EEPROM Data Write Bit (EEDI (sic)) */
+#define PLX_CNTRL_EEWB BIT(26)
+/* Serial EEPROM Data Read Bit (EEDO (sic)) (read-only) */
+#define PLX_CNTRL_EERB BIT(27)
+/* Serial EEPROM Present (read-only) */
+#define PLX_CNTRL_EEPRESENT BIT(28)
+/* Reload Configuration Registers from EEPROM */
+#define PLX_CNTRL_EERELOAD BIT(29)
+/* PCI Adapter Software Reset (asserts LRESETo#) */
+#define PLX_CNTRL_RESET BIT(30)
+/* Local Init Status (read-only) */
+#define PLX_CNTRL_INITDONE BIT(31)
/*
- * Any attempt to execute an unimplement command results in the PUTS
- * interface executing a NOOP and continuing as if the offending command
- * completed normally. Note: this supplies a simple method to interrogate
- * mailbox command processing functionality.
+ * Combined command code stuff for convenience.
*/
+#define PLX_CNTRL_CC_MASK \
+ (PLX_CNTRL_CCRDMA_MASK | PLX_CNTRL_CCWDMA_MASK | \
+ PLX_CNTRL_CCRDM_MASK | PLX_CNTRL_CCWDM_MASK)
+#define PLX_CNTRL_CC_NORMAL \
+ (PLX_CNTRL_CCRDMA_NORMAL | PLX_CNTRL_CCWDMA_NORMAL | \
+ PLX_CNTRL_CCRDM_NORMAL | PLX_CNTRL_CCWDM_NORMAL) /* val after reset */
+
+/* PCI Permanent Configuration ID Register (hard-coded PLX vendor and device) */
+#define PLX_REG_PCIHIDR 0x0070
+
+/* Hard-coded ID for PLX PCI 9080 */
+#define PLX_PCIHIDR_9080 0x908010b5
+
+/* PCI Permanent Revision ID Register (hard-coded silicon revision) (8-bit). */
+#define PLX_REG_PCIHREV 0x0074
+
+/* DMA Channel N Mode Register (N <= 1) */
+#define PLX_REG_DMAMODE(n) ((n) ? PLX_REG_DMAMODE1 : PLX_REG_DMAMODE0)
+#define PLX_REG_DMAMODE0 0x0080
+#define PLX_REG_DMAMODE1 0x0094
+
+/* Local Bus Width */
+#define PLX_DMAMODE_WIDTH8 (BIT(0) * 0) /* 8 bits wide */
+#define PLX_DMAMODE_WIDTH16 (BIT(0) * 1) /* 16 bits wide */
+#define PLX_DMAMODE_WIDTH32 (BIT(0) * 2) /* 32 bits wide */
+#define PLX_DMAMODE_WIDTH32A (BIT(0) * 3) /* 32 bits wide */
+#define PLX_DMAMODE_WIDTH_MASK GENMASK(1, 0)
+#define PLX_DMAMODE_WIDTH_SHIFT 0
+/* Internal Wait States */
+#define PLX_DMAMODE_IWS(x) (BIT(2) * ((x) & 0xf))
+#define PLX_DMAMODE_IWS_MASK GENMASK(5, 2)
+#define PLX_DMAMODE_SHIFT 2
+/* Ready Input Enable */
+#define PLX_DMAMODE_READYIEN BIT(6)
+/* BTERM# Input Enable */
+#define PLX_DMAMODE_BTERMIEN BIT(7)
+/* Local Burst Enable */
+#define PLX_DMAMODE_BURSTEN BIT(8)
+/* Chaining Enable */
+#define PLX_DMAMODE_CHAINEN BIT(9)
+/* Done Interrupt Enable */
+#define PLX_DMAMODE_DONEIEN BIT(10)
+/* Hold Local Address Constant */
+#define PLX_DMAMODE_LACONST BIT(11)
+/* Demand Mode */
+#define PLX_DMAMODE_DEMAND BIT(12)
+/* Write And Invalidate Mode */
+#define PLX_DMAMODE_WINVALIDATE BIT(13)
+/* DMA EOT Enable - enables EOT0# or EOT1# input pin */
+#define PLX_DMAMODE_EOTEN BIT(14)
+/* DMA Stop Data Transfer Mode - 0:BLAST; 1:EOT asserted or DREQ deasserted */
+#define PLX_DMAMODE_STOP BIT(15)
+/* DMA Clear Count Mode - count in descriptor cleared on completion */
+#define PLX_DMAMODE_CLRCOUNT BIT(16)
+/* DMA Channel Interrupt Select - 0:local bus interrupt; 1:PCI interrupt */
+#define PLX_DMAMODE_INTRPCI BIT(17)
+
+/* DMA Channel N PCI Address Register (N <= 1) */
+#define PLX_REG_DMAPADR(n) ((n) ? PLX_REG_DMAPADR1 : PLX_REG_DMAPADR0)
+#define PLX_REG_DMAPADR0 0x0084
+#define PLX_REG_DMAPADR1 0x0098
+
+/* DMA Channel N Local Address Register (N <= 1) */
+#define PLX_REG_DMALADR(n) ((n) ? PLX_REG_DMALADR1 : PLX_REG_DMALADR0)
+#define PLX_REG_DMALADR0 0x0088
+#define PLX_REG_DMALADR1 0x009c
+
+/* DMA Channel N Transfer Size (Bytes) Register (N <= 1) (first 23 bits) */
+#define PLX_REG_DMASIZ(n) ((n) ? PLX_REG_DMASIZ1 : PLX_REG_DMASIZ0)
+#define PLX_REG_DMASIZ0 0x008c
+#define PLX_REG_DMASIZ1 0x00a0
+
+/* DMA Channel N Descriptor Pointer Register (N <= 1) */
+#define PLX_REG_DMADPR(n) ((n) ? PLX_REG_DMADPR1 : PLX_REG_DMADPR0)
+#define PLX_REG_DMADPR0 0x0090
+#define PLX_REG_DMADPR1 0x00a4
+
+/* Descriptor Located In PCI Address Space (not local address space) */
+#define PLX_DMADPR_DESCPCI BIT(0)
+/* End Of Chain */
+#define PLX_DMADPR_CHAINEND BIT(1)
+/* Interrupt After Terminal Count */
+#define PLX_DMADPR_TCINTR BIT(2)
+/* Direction Of Transfer Local Bus To PCI (not PCI to local) */
+#define PLX_DMADPR_XFERL2P BIT(3)
+/* Next Descriptor Address Bits 31:4 (16 byte boundary) */
+#define PLX_DMADPR_NEXT_MASK GENMASK(31, 4)
+
+/* DMA Channel N Command/Status Register (N <= 1) (8-bit) */
+#define PLX_REG_DMACSR(n) ((n) ? PLX_REG_DMACSR1 : PLX_REG_DMACSR0)
+#define PLX_REG_DMACSR0 0x00a8
+#define PLX_REG_DMACSR1 0x00a9
+
+/* Channel Enable */
+#define PLX_DMACSR_ENABLE BIT(0)
+/* Channel Start - write 1 to start transfer (write-only) */
+#define PLX_DMACSR_START BIT(1)
+/* Channel Abort - write 1 to abort transfer (write-only) */
+#define PLX_DMACSR_ABORT BIT(2)
+/* Clear Interrupt - write 1 to clear DMA Channel Interrupt (write-only) */
+#define PLX_DMACSR_CLEARINTR BIT(3)
+/* Channel Done - transfer complete/inactive (read-only) */
+#define PLX_DMACSR_DONE BIT(4)
+
+/* DMA Threshold Register */
+#define PLX_REG_DMATHR 0x00b0
-#define MBX_CMD_MASK 0xffff0000 /* PUTS Command Register bits */
-
-#define MBX_CMD_ABORTJ 0x85000000 /* abort and jump */
-#define MBX_CMD_RESETP 0x86000000 /* reset and pause at start */
-#define MBX_CMD_PAUSE 0x87000000 /* pause immediately */
-#define MBX_CMD_PAUSEC 0x88000000 /* pause on completion */
-#define MBX_CMD_RESUME 0x89000000 /* resume operation */
-#define MBX_CMD_STEP 0x8a000000 /* single step tests */
-
-#define MBX_CMD_BSWAP 0x8c000000 /* identify byte swap scheme */
-#define MBX_CMD_BSWAP_0 0x8c000000 /* use scheme 0 */
-#define MBX_CMD_BSWAP_1 0x8c000001 /* use scheme 1 */
-
-/* setup host memory access window size */
-#define MBX_CMD_SETHMS 0x8d000000
-/* setup host memory access base address */
-#define MBX_CMD_SETHBA 0x8e000000
-/* perform memory setup and continue (IE. Done) */
-#define MBX_CMD_MGO 0x8f000000
-#define MBX_CMD_NOOP 0xFF000000 /* dummy, illegal command */
-
-/*****************************************/
-/*** MAILBOX #2 - MEMORY SIZE ***/
-/*****************************************/
-
-#define MBX_MEMSZ_MASK 0xffff0000 /* PUTS Memory Size Register bits */
-
-#define MBX_MEMSZ_128KB 0x00020000 /* 128 kilobyte board */
-#define MBX_MEMSZ_256KB 0x00040000 /* 256 kilobyte board */
-#define MBX_MEMSZ_512KB 0x00080000 /* 512 kilobyte board */
-#define MBX_MEMSZ_1MB 0x00100000 /* 1 megabyte board */
-#define MBX_MEMSZ_2MB 0x00200000 /* 2 megabyte board */
-#define MBX_MEMSZ_4MB 0x00400000 /* 4 megabyte board */
-#define MBX_MEMSZ_8MB 0x00800000 /* 8 megabyte board */
-#define MBX_MEMSZ_16MB 0x01000000 /* 16 megabyte board */
-
-/***************************************/
-/*** MAILBOX #2 - BOARD TYPE ***/
-/***************************************/
-
-#define MBX_BTYPE_MASK 0x0000ffff /* PUTS Board Type Register */
-/* PUTS Board Family Register */
-#define MBX_BTYPE_FAMILY_MASK 0x0000ff00
-#define MBX_BTYPE_SUBTYPE_MASK 0x000000ff /* PUTS Board Subtype */
-
-#define MBX_BTYPE_PLX9060 0x00000100 /* PLX family type */
-#define MBX_BTYPE_PLX9080 0x00000300 /* PLX wanXL100s family type */
-
-#define MBX_BTYPE_WANXL_4 0x00000104 /* wanXL400, 4-port */
-#define MBX_BTYPE_WANXL_2 0x00000102 /* wanXL200, 2-port */
-#define MBX_BTYPE_WANXL_1s 0x00000301 /* wanXL100s, 1-port */
-#define MBX_BTYPE_WANXL_1t 0x00000401 /* wanXL100T1, 1-port */
+/*
+ * DMA Threshold constraints:
+ * (C0PLAF + 1) + (C0PLAE + 1) <= 32
+ * (C0LPAF + 1) + (C0LPAE + 1) <= 32
+ * (C1PLAF + 1) + (C1PLAE + 1) <= 16
+ * (C1LPAF + 1) + (C1LPAE + 1) <= 16
+ */
-/*****************************************/
-/*** MAILBOX #3 - SHMQ MAILBOX ***/
-/*****************************************/
+/* DMA Channel 0 PCI-to-Local Almost Full (divided by 2, minus 1) */
+#define PLX_DMATHR_C0PLAF(x) (BIT(0) * ((x) & 0xf))
+#define PLX_DMATHR_C0PLAF_MASK GENMASK(3, 0)
+#define PLX_DMATHR_C0PLAF_SHIFT 0
+/* DMA Channel 0 Local-to-PCI Almost Empty (divided by 2, minus 1) */
+#define PLX_DMATHR_C0LPAE(x) (BIT(4) * ((x) & 0xf))
+#define PLX_DMATHR_C0LPAE_MASK GENMASK(7, 4)
+#define PLX_DMATHR_C0LPAE_SHIFT 4
+/* DMA Channel 0 Local-to-PCI Almost Full (divided by 2, minus 1) */
+#define PLX_DMATHR_C0LPAF(x) (BIT(8) * ((x) & 0xf))
+#define PLX_DMATHR_C0LPAF_MASK GENMASK(11, 8)
+#define PLX_DMATHR_C0LPAF_SHIFT 8
+/* DMA Channel 0 PCI-to-Local Almost Empty (divided by 2, minus 1) */
+#define PLX_DMATHR_C0PLAE(x) (BIT(12) * ((x) & 0xf))
+#define PLX_DMATHR_C0PLAE_MASK GENMASK(15, 12)
+#define PLX_DMATHR_C0PLAE_SHIFT 12
+/* DMA Channel 1 PCI-to-Local Almost Full (divided by 2, minus 1) */
+#define PLX_DMATHR_C1PLAF(x) (BIT(16) * ((x) & 0xf))
+#define PLX_DMATHR_C1PLAF_MASK GENMASK(19, 16)
+#define PLX_DMATHR_C1PLAF_SHIFT 16
+/* DMA Channel 1 Local-to-PCI Almost Empty (divided by 2, minus 1) */
+#define PLX_DMATHR_C1LPAE(x) (BIT(20) * ((x) & 0xf))
+#define PLX_DMATHR_C1LPAE_MASK GENMASK(23, 20)
+#define PLX_DMATHR_C1LPAE_SHIFT 20
+/* DMA Channel 1 Local-to-PCI Almost Full (divided by 2, minus 1) */
+#define PLX_DMATHR_C1LPAF(x) (BIT(24) * ((x) & 0xf))
+#define PLX_DMATHR_C1LPAF_MASK GENMASK(27, 24)
+#define PLX_DMATHR_C1LPAF_SHIFT 24
+/* DMA Channel 1 PCI-to-Local Almost Empty (divided by 2, minus 1) */
+#define PLX_DMATHR_C1PLAE(x) (BIT(28) * ((x) & 0xf))
+#define PLX_DMATHR_C1PLAE_MASK GENMASK(31, 28)
+#define PLX_DMATHR_C1PLAE_SHIFT 28
-#define MBX_SMBX_MASK 0x000000ff /* PUTS SHMQ Mailbox bits */
+/*
+ * Messaging Queue Registers OPLFIS, OPLFIM, IQP, OQP, MQCR, QBAR, IFHPR,
+ * IFTPR, IPHPR, IPTPR, OFHPR, OFTPR, OPHPR, OPTPR, and QSR have been omitted.
+ * They are used by the I2O feature. (IQP and OQP occupy the usual offsets of
+ * the MBOX0 and MBOX1 registers if the I2O feature is enabled, but MBOX0 and
+ * MBOX1 are accessible via alternative offsets.
+ */
-/***************************************/
-/*** GENERIC HOST-SIDE DRIVER ***/
-/***************************************/
+/* Queue Status/Control Register */
+#define PLX_REG_QSR 0x00e8
-#define MBX_ERR 0
-#define MBX_OK 1
+/* Value of QSR after reset - disables I2O feature completely. */
+#define PLX_QSR_VALUE_AFTER_RESET 0x00000050
-/* mailbox check routine - type of testing */
-#define MBXCHK_STS 0x00 /* check for PUTS status */
-#define MBXCHK_NOWAIT 0x01 /* dont care about PUTS status */
+/*
+ * Accesses near the end of memory can cause the PLX chip
+ * to pre-fetch data off of end-of-ram. Limit the size of
+ * memory so host-side accesses cannot occur.
+ */
-/* system allocates this many bytes for address mapping mailbox space */
-#define MBX_ADDR_SPACE_360 0x80 /* wanXL100s/200/400 */
-#define MBX_ADDR_MASK_360 (MBX_ADDR_SPACE_360 - 1)
+#define PLX_PREFETCH 32
+/**
+ * plx9080_abort_dma - Abort a PLX PCI 9080 DMA transfer
+ * @iobase: Remapped base address of configuration registers.
+ * @channel: DMA channel number (0 or 1).
+ *
+ * Aborts the DMA transfer on the channel, which must have been enabled
+ * and started beforehand.
+ *
+ * Return:
+ * %0 on success.
+ * -%ETIMEDOUT if timed out waiting for abort to complete.
+ */
static inline int plx9080_abort_dma(void __iomem *iobase, unsigned int channel)
{
void __iomem *dma_cs_addr;
@@ -421,29 +631,26 @@ static inline int plx9080_abort_dma(void __iomem *iobase, unsigned int channel)
const int timeout = 10000;
unsigned int i;
- if (channel)
- dma_cs_addr = iobase + PLX_DMA1_CS_REG;
- else
- dma_cs_addr = iobase + PLX_DMA0_CS_REG;
+ dma_cs_addr = iobase + PLX_REG_DMACSR(channel);
- /* abort dma transfer if necessary */
+ /* abort dma transfer if necessary */
dma_status = readb(dma_cs_addr);
- if ((dma_status & PLX_DMA_EN_BIT) == 0)
+ if ((dma_status & PLX_DMACSR_ENABLE) == 0)
return 0;
- /* wait to make sure done bit is zero */
- for (i = 0; (dma_status & PLX_DMA_DONE_BIT) && i < timeout; i++) {
+ /* wait to make sure done bit is zero */
+ for (i = 0; (dma_status & PLX_DMACSR_DONE) && i < timeout; i++) {
udelay(1);
dma_status = readb(dma_cs_addr);
}
if (i == timeout)
return -ETIMEDOUT;
- /* disable and abort channel */
- writeb(PLX_DMA_ABORT_BIT, dma_cs_addr);
- /* wait for dma done bit */
+ /* disable and abort channel */
+ writeb(PLX_DMACSR_ABORT, dma_cs_addr);
+ /* wait for dma done bit */
dma_status = readb(dma_cs_addr);
- for (i = 0; (dma_status & PLX_DMA_DONE_BIT) == 0 && i < timeout; i++) {
+ for (i = 0; (dma_status & PLX_DMACSR_DONE) == 0 && i < timeout; i++) {
udelay(1);
dma_status = readb(dma_cs_addr);
}
diff --git a/drivers/staging/comedi/drivers/quatech_daqp_cs.c b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
index e9e431391..802f51e46 100644
--- a/drivers/staging/comedi/drivers/quatech_daqp_cs.c
+++ b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
@@ -643,7 +643,7 @@ static int daqp_ao_insn_write(struct comedi_device *dev,
outb(0, dev->iobase + DAQP_AUX_REG);
for (i = 0; i > insn->n; i++) {
- unsigned val = data[i];
+ unsigned int val = data[i];
int ret;
/* D/A transfer rate is about 8ms */
diff --git a/drivers/staging/comedi/drivers/rtd520.c b/drivers/staging/comedi/drivers/rtd520.c
index 9b6c56773..e00e9c626 100644
--- a/drivers/staging/comedi/drivers/rtd520.c
+++ b/drivers/staging/comedi/drivers/rtd520.c
@@ -362,7 +362,7 @@ struct rtd_private {
long ai_count; /* total transfer size (samples) */
int xfer_count; /* # to transfer data. 0->1/2FIFO */
int flags; /* flag event modes */
- unsigned fifosz;
+ unsigned int fifosz;
/* 8254 Timer/Counter gate and clock sources */
unsigned char timer_gate_src[3];
@@ -491,9 +491,9 @@ static void rtd_load_channelgain_list(struct comedi_device *dev,
static int rtd520_probe_fifo_depth(struct comedi_device *dev)
{
unsigned int chanspec = CR_PACK(0, 0, AREF_GROUND);
- unsigned i;
- static const unsigned limit = 0x2000;
- unsigned fifo_size = 0;
+ unsigned int i;
+ static const unsigned int limit = 0x2000;
+ unsigned int fifo_size = 0;
writel(0, dev->mmio + LAS0_ADC_FIFO_CLEAR);
rtd_load_channelgain_list(dev, 1, &chanspec);
@@ -501,7 +501,7 @@ static int rtd520_probe_fifo_depth(struct comedi_device *dev)
writel(0, dev->mmio + LAS0_ADC_CONVERSION);
/* convert samples */
for (i = 0; i < limit; ++i) {
- unsigned fifo_status;
+ unsigned int fifo_status;
/* trigger conversion */
writew(0, dev->mmio + LAS0_ADC);
usleep_range(1, 1000);
@@ -1175,7 +1175,7 @@ static void rtd_reset(struct comedi_device *dev)
writel(0, dev->mmio + LAS0_BOARD_RESET);
usleep_range(100, 1000); /* needed? */
- writel(0, devpriv->lcfg + PLX_INTRCS_REG);
+ writel(0, devpriv->lcfg + PLX_REG_INTCSR);
writew(0, dev->mmio + LAS0_IT);
writew(~0, dev->mmio + LAS0_CLEAR);
readw(dev->mmio + LAS0_CLEAR);
@@ -1316,7 +1316,8 @@ static int rtd_auto_attach(struct comedi_device *dev,
devpriv->fifosz = ret;
if (dev->irq)
- writel(ICS_PIE | ICS_PLIE, devpriv->lcfg + PLX_INTRCS_REG);
+ writel(PLX_INTCSR_PIEN | PLX_INTCSR_PLIEN,
+ devpriv->lcfg + PLX_REG_INTCSR);
return 0;
}
diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c
index c5e08635e..4a87b4b52 100644
--- a/drivers/staging/comedi/drivers/s626.c
+++ b/drivers/staging/comedi/drivers/s626.c
@@ -708,7 +708,7 @@ static uint16_t s626_get_mode_a(struct comedi_device *dev,
uint16_t cra;
uint16_t crb;
uint16_t setup;
- unsigned cntsrc, clkmult, clkpol, encmode;
+ unsigned int cntsrc, clkmult, clkpol, encmode;
/* Fetch CRA and CRB register images. */
cra = s626_debi_read(dev, S626_LP_CRA(chan));
@@ -763,7 +763,7 @@ static uint16_t s626_get_mode_b(struct comedi_device *dev,
uint16_t cra;
uint16_t crb;
uint16_t setup;
- unsigned cntsrc, clkmult, clkpol, encmode;
+ unsigned int cntsrc, clkmult, clkpol, encmode;
/* Fetch CRA and CRB register images. */
cra = s626_debi_read(dev, S626_LP_CRA(chan));
@@ -838,7 +838,7 @@ static void s626_set_mode_a(struct comedi_device *dev,
struct s626_private *devpriv = dev->private;
uint16_t cra;
uint16_t crb;
- unsigned cntsrc, clkmult, clkpol;
+ unsigned int cntsrc, clkmult, clkpol;
/* Initialize CRA and CRB images. */
/* Preload trigger is passed through. */
@@ -916,7 +916,7 @@ static void s626_set_mode_b(struct comedi_device *dev,
struct s626_private *devpriv = dev->private;
uint16_t cra;
uint16_t crb;
- unsigned cntsrc, clkmult, clkpol;
+ unsigned int cntsrc, clkmult, clkpol;
/* Initialize CRA and CRB images. */
/* IndexSrc is passed through. */
diff --git a/drivers/staging/comedi/drivers/s626.h b/drivers/staging/comedi/drivers/s626.h
index b83424e75..6a00a64c6 100644
--- a/drivers/staging/comedi/drivers/s626.h
+++ b/drivers/staging/comedi/drivers/s626.h
@@ -29,8 +29,10 @@
#define S626_ENCODER_CHANNELS 6
#define S626_DIO_CHANNELS 48
#define S626_DIO_BANKS 3 /* Number of DIO groups. */
-#define S626_DIO_EXTCHANS 40 /* Number of extended-capability
- * DIO channels. */
+#define S626_DIO_EXTCHANS 40 /*
+ * Number of extended-capability
+ * DIO channels.
+ */
#define S626_NUM_TRIMDACS 12 /* Number of valid TrimDAC channels. */
@@ -48,21 +50,29 @@
#define S626_GSEL_BIPOLAR10V 0x00A0 /* S626_LP_GSEL setting 10V bipolar. */
/* Error codes that must be visible to this base class. */
-#define S626_ERR_ILLEGAL_PARM 0x00010000 /* Illegal function parameter
- * value was specified. */
+#define S626_ERR_ILLEGAL_PARM 0x00010000 /*
+ * Illegal function parameter
+ * value was specified.
+ */
#define S626_ERR_I2C 0x00020000 /* I2C error. */
-#define S626_ERR_COUNTERSETUP 0x00200000 /* Illegal setup specified for
- * counter channel. */
+#define S626_ERR_COUNTERSETUP 0x00200000 /*
+ * Illegal setup specified for
+ * counter channel.
+ */
#define S626_ERR_DEBI_TIMEOUT 0x00400000 /* DEBI transfer timed out. */
/*
* Organization (physical order) and size (in DWORDs) of logical DMA buffers
* contained by ANA_DMABUF.
*/
-#define S626_ADC_DMABUF_DWORDS 40 /* ADC DMA buffer must hold 16 samples,
- * plus pre/post garbage samples. */
-#define S626_DAC_WDMABUF_DWORDS 1 /* DAC output DMA buffer holds a single
- * sample. */
+#define S626_ADC_DMABUF_DWORDS 40 /*
+ * ADC DMA buffer must hold 16 samples,
+ * plus pre/post garbage samples.
+ */
+#define S626_DAC_WDMABUF_DWORDS 1 /*
+ * DAC output DMA buffer holds a single
+ * sample.
+ */
/* All remaining space in 4KB DMA buffer is available for the RPS1 program. */
@@ -95,60 +105,90 @@
#define S626_RPS_IRQ 0x60000000 /* IRQ */
#define S626_RPS_LOGICAL_OR 0x08000000 /* Logical OR conditionals. */
-#define S626_RPS_INVERT 0x04000000 /* Test for negated
- * semaphores. */
+#define S626_RPS_INVERT 0x04000000 /*
+ * Test for negated
+ * semaphores.
+ */
#define S626_RPS_DEBI 0x00000002 /* DEBI done */
-#define S626_RPS_SIG0 0x00200000 /* RPS semaphore 0
- * (used by ADC). */
-#define S626_RPS_SIG1 0x00400000 /* RPS semaphore 1
- * (used by DAC). */
-#define S626_RPS_SIG2 0x00800000 /* RPS semaphore 2
- * (not used). */
+#define S626_RPS_SIG0 0x00200000 /*
+ * RPS semaphore 0
+ * (used by ADC).
+ */
+#define S626_RPS_SIG1 0x00400000 /*
+ * RPS semaphore 1
+ * (used by DAC).
+ */
+#define S626_RPS_SIG2 0x00800000 /*
+ * RPS semaphore 2
+ * (not used).
+ */
#define S626_RPS_GPIO2 0x00080000 /* RPS GPIO2 */
#define S626_RPS_GPIO3 0x00100000 /* RPS GPIO3 */
-#define S626_RPS_SIGADC S626_RPS_SIG0 /* Trigger/status for
- * ADC's RPS program. */
-#define S626_RPS_SIGDAC S626_RPS_SIG1 /* Trigger/status for
- * DAC's RPS program. */
+#define S626_RPS_SIGADC S626_RPS_SIG0 /*
+ * Trigger/status for
+ * ADC's RPS program.
+ */
+#define S626_RPS_SIGDAC S626_RPS_SIG1 /*
+ * Trigger/status for
+ * DAC's RPS program.
+ */
/* RPS clock parameters. */
-#define S626_RPSCLK_SCALAR 8 /* This is apparent ratio of
- * PCI/RPS clks (undocumented!!). */
+#define S626_RPSCLK_SCALAR 8 /*
+ * This is apparent ratio of
+ * PCI/RPS clks (undocumented!!).
+ */
#define S626_RPSCLK_PER_US (33 / S626_RPSCLK_SCALAR)
- /* Number of RPS clocks in one
- * microsecond. */
+ /*
+ * Number of RPS clocks in one
+ * microsecond.
+ */
/* Event counter source addresses. */
#define S626_SBA_RPS_A0 0x27 /* Time of RPS0 busy, in PCI clocks. */
/* GPIO constants. */
-#define S626_GPIO_BASE 0x10004000 /* GPIO 0,2,3 = inputs,
- * GPIO3 = IRQ; GPIO1 = out. */
+#define S626_GPIO_BASE 0x10004000 /*
+ * GPIO 0,2,3 = inputs,
+ * GPIO3 = IRQ; GPIO1 = out.
+ */
#define S626_GPIO1_LO 0x00000000 /* GPIO1 set to LOW. */
#define S626_GPIO1_HI 0x00001000 /* GPIO1 set to HIGH. */
/* Primary Status Register (PSR) constants. */
#define S626_PSR_DEBI_E 0x00040000 /* DEBI event flag. */
#define S626_PSR_DEBI_S 0x00080000 /* DEBI status flag. */
-#define S626_PSR_A2_IN 0x00008000 /* Audio output DMA2 protection
- * address reached. */
-#define S626_PSR_AFOU 0x00000800 /* Audio FIFO under/overflow
- * detected. */
-#define S626_PSR_GPIO2 0x00000020 /* GPIO2 input pin: 0=AdcBusy,
- * 1=AdcIdle. */
-#define S626_PSR_EC0S 0x00000001 /* Event counter 0 threshold
- * reached. */
+#define S626_PSR_A2_IN 0x00008000 /*
+ * Audio output DMA2 protection
+ * address reached.
+ */
+#define S626_PSR_AFOU 0x00000800 /*
+ * Audio FIFO under/overflow
+ * detected.
+ */
+#define S626_PSR_GPIO2 0x00000020 /*
+ * GPIO2 input pin: 0=AdcBusy,
+ * 1=AdcIdle.
+ */
+#define S626_PSR_EC0S 0x00000001 /*
+ * Event counter 0 threshold
+ * reached.
+ */
/* Secondary Status Register (SSR) constants. */
-#define S626_SSR_AF2_OUT 0x00000200 /* Audio 2 output FIFO
- * under/overflow detected. */
+#define S626_SSR_AF2_OUT 0x00000200 /*
+ * Audio 2 output FIFO
+ * under/overflow detected.
+ */
/* Master Control Register 1 (MC1) constants. */
#define S626_MC1_SOFT_RESET 0x80000000 /* Invoke 7146 soft reset. */
-#define S626_MC1_SHUTDOWN 0x3FFF0000 /* Shut down all MC1-controlled
- * enables. */
+#define S626_MC1_SHUTDOWN 0x3FFF0000 /*
+ * Shut down all MC1-controlled
+ * enables.
+ */
#define S626_MC1_ERPS1 0x2000 /* Enab/disable RPS task 1. */
#define S626_MC1_ERPS0 0x1000 /* Enab/disable RPS task 0. */
@@ -177,15 +217,23 @@
#define S626_P_DEBIAD 0x0088 /* DEBI target address. */
#define S626_P_I2CCTRL 0x008C /* I2C control. */
#define S626_P_I2CSTAT 0x0090 /* I2C status. */
-#define S626_P_BASEA2_IN 0x00AC /* Audio input 2 base physical DMAbuf
- * address. */
-#define S626_P_PROTA2_IN 0x00B0 /* Audio input 2 physical DMAbuf
- * protection address. */
+#define S626_P_BASEA2_IN 0x00AC /*
+ * Audio input 2 base physical DMAbuf
+ * address.
+ */
+#define S626_P_PROTA2_IN 0x00B0 /*
+ * Audio input 2 physical DMAbuf
+ * protection address.
+ */
#define S626_P_PAGEA2_IN 0x00B4 /* Audio input 2 paging attributes. */
-#define S626_P_BASEA2_OUT 0x00B8 /* Audio output 2 base physical DMAbuf
- * address. */
-#define S626_P_PROTA2_OUT 0x00BC /* Audio output 2 physical DMAbuf
- * protection address. */
+#define S626_P_BASEA2_OUT 0x00B8 /*
+ * Audio output 2 base physical DMAbuf
+ * address.
+ */
+#define S626_P_PROTA2_OUT 0x00BC /*
+ * Audio output 2 physical DMAbuf
+ * protection address.
+ */
#define S626_P_PAGEA2_OUT 0x00C0 /* Audio output 2 paging attributes. */
#define S626_P_RPSPAGE0 0x00C4 /* RPS0 page. */
#define S626_P_RPSPAGE1 0x00C8 /* RPS1 page. */
@@ -205,8 +253,10 @@
#define S626_P_PSR 0x0110 /* Primary status. */
#define S626_P_SSR 0x0114 /* Secondary status. */
#define S626_P_EC1R 0x0118 /* Event counter set 1. */
-#define S626_P_ADP4 0x0138 /* Logical audio DMA pointer of audio
- * input FIFO A2_IN. */
+#define S626_P_ADP4 0x0138 /*
+ * Logical audio DMA pointer of audio
+ * input FIFO A2_IN.
+ */
#define S626_P_FB_BUFFER1 0x0144 /* Audio feedback buffer 1. */
#define S626_P_FB_BUFFER2 0x0148 /* Audio feedback buffer 2. */
#define S626_P_TSL1 0x0180 /* Audio time slot list 1. */
@@ -243,13 +293,19 @@
#define S626_LP_RDMISC2 0x0082 /* Read Misc2. */
/* Bit masks for MISC1 register that are the same for reads and writes. */
-#define S626_MISC1_WENABLE 0x8000 /* enab writes to MISC2 (except Clear
- * Watchdog bit). */
+#define S626_MISC1_WENABLE 0x8000 /*
+ * enab writes to MISC2 (except Clear
+ * Watchdog bit).
+ */
#define S626_MISC1_WDISABLE 0x0000 /* Disable writes to MISC2. */
-#define S626_MISC1_EDCAP 0x1000 /* Enable edge capture on DIO chans
- * specified by S626_LP_WRCAPSELx. */
-#define S626_MISC1_NOEDCAP 0x0000 /* Disable edge capture on specified
- * DIO chans. */
+#define S626_MISC1_EDCAP 0x1000 /*
+ * Enable edge capture on DIO chans
+ * specified by S626_LP_WRCAPSELx.
+ */
+#define S626_MISC1_NOEDCAP 0x0000 /*
+ * Disable edge capture on specified
+ * DIO chans.
+ */
/* Bit masks for MISC1 register reads. */
#define S626_RDMISC1_WDTIMEOUT 0x4000 /* Watchdog timer timed out. */
@@ -268,35 +324,49 @@
#define S626_A1_RUN 0x20000000 /* Run A1 based on TSL1. */
#define S626_A1_SWAP 0x00200000 /* Use big-endian for A1. */
#define S626_A2_SWAP 0x00100000 /* Use big-endian for A2. */
-#define S626_WS_MODES 0x00019999 /* WS0 = TSL1 trigger input,
- * WS1-WS4 = CS* outputs. */
-
-#if S626_PLATFORM == S626_INTEL /* Base ACON1 config: always run
- * A1 based on TSL1. */
+#define S626_WS_MODES 0x00019999 /*
+ * WS0 = TSL1 trigger input,
+ * WS1-WS4 = CS* outputs.
+ */
+
+#if S626_PLATFORM == S626_INTEL /*
+ * Base ACON1 config: always run
+ * A1 based on TSL1.
+ */
#define S626_ACON1_BASE (S626_WS_MODES | S626_A1_RUN)
#elif S626_PLATFORM == S626_MOTOROLA
#define S626_ACON1_BASE \
(S626_WS_MODES | S626_A1_RUN | S626_A1_SWAP | S626_A2_SWAP)
#endif
-#define S626_ACON1_ADCSTART S626_ACON1_BASE /* Start ADC: run A1
- * based on TSL1. */
+#define S626_ACON1_ADCSTART S626_ACON1_BASE /*
+ * Start ADC: run A1
+ * based on TSL1.
+ */
#define S626_ACON1_DACSTART (S626_ACON1_BASE | S626_A2_RUN)
/* Start transmit to DAC: run A2 based on TSL2. */
#define S626_ACON1_DACSTOP S626_ACON1_BASE /* Halt A2. */
/* Bit masks for ACON2 register. */
#define S626_A1_CLKSRC_BCLK1 0x00000000 /* A1 bit rate = BCLK1 (ADC). */
-#define S626_A2_CLKSRC_X1 0x00800000 /* A2 bit rate = ACLK/1
- * (DACs). */
-#define S626_A2_CLKSRC_X2 0x00C00000 /* A2 bit rate = ACLK/2
- * (DACs). */
-#define S626_A2_CLKSRC_X4 0x01400000 /* A2 bit rate = ACLK/4
- * (DACs). */
+#define S626_A2_CLKSRC_X1 0x00800000 /*
+ * A2 bit rate = ACLK/1
+ * (DACs).
+ */
+#define S626_A2_CLKSRC_X2 0x00C00000 /*
+ * A2 bit rate = ACLK/2
+ * (DACs).
+ */
+#define S626_A2_CLKSRC_X4 0x01400000 /*
+ * A2 bit rate = ACLK/4
+ * (DACs).
+ */
#define S626_INVERT_BCLK2 0x00100000 /* Invert BCLK2 (DACs). */
#define S626_BCLK2_OE 0x00040000 /* Enable BCLK2 (DACs). */
-#define S626_ACON2_XORMASK 0x000C0000 /* XOR mask for ACON2
- * active-low bits. */
+#define S626_ACON2_XORMASK 0x000C0000 /*
+ * XOR mask for ACON2
+ * active-low bits.
+ */
#define S626_ACON2_INIT (S626_ACON2_XORMASK ^ \
(S626_A1_CLKSRC_BCLK1 | S626_A2_CLKSRC_X2 | \
@@ -308,12 +378,18 @@
#define S626_WS3 0x10000000
#define S626_WS4 0x08000000
#define S626_RSD1 0x01000000 /* Shift A1 data in on SD1. */
-#define S626_SDW_A1 0x00800000 /* Store rcv'd char at next char
- * slot of DWORD1 buffer. */
-#define S626_SIB_A1 0x00400000 /* Store rcv'd char at next
- * char slot of FB1 buffer. */
-#define S626_SF_A1 0x00200000 /* Write unsigned long
- * buffer to input FIFO. */
+#define S626_SDW_A1 0x00800000 /*
+ * Store rcv'd char at next char
+ * slot of DWORD1 buffer.
+ */
+#define S626_SIB_A1 0x00400000 /*
+ * Store rcv'd char at next
+ * char slot of FB1 buffer.
+ */
+#define S626_SF_A1 0x00200000 /*
+ * Write unsigned long
+ * buffer to input FIFO.
+ */
/* Select parallel-to-serial converter's data source: */
#define S626_XFIFO_0 0x00000000 /* Data fifo byte 0. */
@@ -324,31 +400,45 @@
#define S626_XFB1 0x00000050 /* FB_BUFFER byte 1. */
#define S626_XFB2 0x00000060 /* FB_BUFFER byte 2. */
#define S626_XFB3 0x00000070 /* FB_BUFFER byte 3. */
-#define S626_SIB_A2 0x00000200 /* Store next dword from A2's
+#define S626_SIB_A2 0x00000200 /*
+ * Store next dword from A2's
* input shifter to FB2
- * buffer. */
-#define S626_SF_A2 0x00000100 /* Store next dword from A2's
+ * buffer.
+ */
+#define S626_SF_A2 0x00000100 /*
+ * Store next dword from A2's
* input shifter to its input
- * fifo. */
-#define S626_LF_A2 0x00000080 /* Load next dword from A2's
+ * fifo.
+ */
+#define S626_LF_A2 0x00000080 /*
+ * Load next dword from A2's
* output fifo into its
- * output dword buffer. */
+ * output dword buffer.
+ */
#define S626_XSD2 0x00000008 /* Shift data out on SD2. */
#define S626_RSD3 0x00001800 /* Shift data in on SD3. */
#define S626_RSD2 0x00001000 /* Shift data in on SD2. */
-#define S626_LOW_A2 0x00000002 /* Drive last SD low for 7 clks,
- * then tri-state. */
+#define S626_LOW_A2 0x00000002 /*
+ * Drive last SD low for 7 clks,
+ * then tri-state.
+ */
#define S626_EOS 0x00000001 /* End of superframe. */
/* I2C configuration constants. */
-#define S626_I2C_CLKSEL 0x0400 /* I2C bit rate =
- * PCIclk/480 = 68.75 KHz. */
-#define S626_I2C_BITRATE 68.75 /* I2C bus data bit rate
+#define S626_I2C_CLKSEL 0x0400 /*
+ * I2C bit rate =
+ * PCIclk/480 = 68.75 KHz.
+ */
+#define S626_I2C_BITRATE 68.75 /*
+ * I2C bus data bit rate
* (determined by
- * S626_I2C_CLKSEL) in KHz. */
-#define S626_I2C_WRTIME 15.0 /* Worst case time, in msec,
+ * S626_I2C_CLKSEL) in KHz.
+ */
+#define S626_I2C_WRTIME 15.0 /*
+ * Worst case time, in msec,
* for EEPROM internal write
- * op. */
+ * op.
+ */
/* I2C manifest constants. */
@@ -368,8 +458,10 @@
#define S626_I2C_B0(ATTR, VAL) (((ATTR) << 2) | ((VAL) << 8))
/* DEBI command constants. */
-#define S626_DEBI_CMD_SIZE16 (2 << 17) /* Transfer size is always
- * 2 bytes. */
+#define S626_DEBI_CMD_SIZE16 (2 << 17) /*
+ * Transfer size is always
+ * 2 bytes.
+ */
#define S626_DEBI_CMD_READ 0x00010000 /* Read operation. */
#define S626_DEBI_CMD_WRITE 0x00000000 /* Write operation. */
@@ -380,42 +472,58 @@
#define S626_DEBI_CMD_WRWORD (S626_DEBI_CMD_WRITE | S626_DEBI_CMD_SIZE16)
/* DEBI configuration constants. */
-#define S626_DEBI_CFG_XIRQ_EN 0x80000000 /* Enable external interrupt
- * on GPIO3. */
+#define S626_DEBI_CFG_XIRQ_EN 0x80000000 /*
+ * Enable external interrupt
+ * on GPIO3.
+ */
#define S626_DEBI_CFG_XRESUME 0x40000000 /* Resume block */
- /* Transfer when XIRQ
- * deasserted. */
+ /*
+ * Transfer when XIRQ
+ * deasserted.
+ */
#define S626_DEBI_CFG_TOQ 0x03C00000 /* Timeout (15 PCI cycles). */
#define S626_DEBI_CFG_FAST 0x10000000 /* Fast mode enable. */
/* 4-bit field that specifies DEBI timeout value in PCI clock cycles: */
-#define S626_DEBI_CFG_TOUT_BIT 22 /* Finish DEBI cycle after this many
- * clocks. */
+#define S626_DEBI_CFG_TOUT_BIT 22 /*
+ * Finish DEBI cycle after this many
+ * clocks.
+ */
/* 2-bit field that specifies Endian byte lane steering: */
-#define S626_DEBI_CFG_SWAP_NONE 0x00000000 /* Straight - don't swap any
- * bytes (Intel). */
+#define S626_DEBI_CFG_SWAP_NONE 0x00000000 /*
+ * Straight - don't swap any
+ * bytes (Intel).
+ */
#define S626_DEBI_CFG_SWAP_2 0x00100000 /* 2-byte swap (Motorola). */
#define S626_DEBI_CFG_SWAP_4 0x00200000 /* 4-byte swap. */
-#define S626_DEBI_CFG_SLAVE16 0x00080000 /* Slave is able to serve
- * 16-bit cycles. */
-#define S626_DEBI_CFG_INC 0x00040000 /* Enable address increment
- * for block transfers. */
+#define S626_DEBI_CFG_SLAVE16 0x00080000 /*
+ * Slave is able to serve
+ * 16-bit cycles.
+ */
+#define S626_DEBI_CFG_INC 0x00040000 /*
+ * Enable address increment
+ * for block transfers.
+ */
#define S626_DEBI_CFG_INTEL 0x00020000 /* Intel style local bus. */
#define S626_DEBI_CFG_TIMEROFF 0x00010000 /* Disable timer. */
#if S626_PLATFORM == S626_INTEL
-#define S626_DEBI_TOUT 7 /* Wait 7 PCI clocks (212 ns) before
- * polling RDY. */
+#define S626_DEBI_TOUT 7 /*
+ * Wait 7 PCI clocks (212 ns) before
+ * polling RDY.
+ */
/* Intel byte lane steering (pass through all byte lanes). */
#define S626_DEBI_SWAP S626_DEBI_CFG_SWAP_NONE
#elif S626_PLATFORM == S626_MOTOROLA
-#define S626_DEBI_TOUT 15 /* Wait 15 PCI clocks (454 ns) maximum
- * before timing out. */
+#define S626_DEBI_TOUT 15 /*
+ * Wait 15 PCI clocks (454 ns) maximum
+ * before timing out.
+ */
/* Motorola byte lane steering. */
#define S626_DEBI_SWAP S626_DEBI_CFG_SWAP_2
@@ -429,10 +537,14 @@
/* LoadSrc values: */
#define S626_LOADSRC_INDX 0 /* Preload core in response to Index. */
-#define S626_LOADSRC_OVER 1 /* Preload core in response to
- * Overflow. */
-#define S626_LOADSRCB_OVERA 2 /* Preload B core in response to
- * A Overflow. */
+#define S626_LOADSRC_OVER 1 /*
+ * Preload core in response to
+ * Overflow.
+ */
+#define S626_LOADSRCB_OVERA 2 /*
+ * Preload B core in response to
+ * A Overflow.
+ */
#define S626_LOADSRC_NONE 3 /* Never preload core. */
/* IntSrc values: */
@@ -469,10 +581,14 @@
#define S626_CNTSRC_SYSCLK_DOWN 3 /* System clock down */
/* ClkPol values: */
-#define S626_CLKPOL_POS 0 /* Counter/Extender clock is
- * active high. */
-#define S626_CLKPOL_NEG 1 /* Counter/Extender clock is
- * active low. */
+#define S626_CLKPOL_POS 0 /*
+ * Counter/Extender clock is
+ * active high.
+ */
+#define S626_CLKPOL_NEG 1 /*
+ * Counter/Extender clock is
+ * active low.
+ */
#define S626_CNTDIR_UP 0 /* Timer counts up. */
#define S626_CNTDIR_DOWN 1 /* Timer counts down. */
@@ -488,8 +604,10 @@
/* Sanity-check limits for parameters. */
-#define S626_NUM_COUNTERS 6 /* Maximum valid counter
- * logical channel number. */
+#define S626_NUM_COUNTERS 6 /*
+ * Maximum valid counter
+ * logical channel number.
+ */
#define S626_NUM_INTSOURCES 4
#define S626_NUM_LATCHSOURCES 4
#define S626_NUM_CLKMULTS 4
diff --git a/drivers/staging/comedi/drivers/serial2002.c b/drivers/staging/comedi/drivers/serial2002.c
index 7a1defcf2..0d33e520f 100644
--- a/drivers/staging/comedi/drivers/serial2002.c
+++ b/drivers/staging/comedi/drivers/serial2002.c
@@ -95,7 +95,7 @@ struct serial_data {
#define S2002_CFG_SIGN(x) (((x) >> 13) & 0x1)
#define S2002_CFG_BASE(x) (((x) >> 14) & 0xfffff)
-static long serial2002_tty_ioctl(struct file *f, unsigned op,
+static long serial2002_tty_ioctl(struct file *f, unsigned int op,
unsigned long param)
{
if (f->f_op->unlocked_ioctl)
@@ -379,7 +379,10 @@ static int serial2002_setup_subdevice(struct comedi_subdevice *s,
range_table_list[chan] =
(const struct comedi_lrange *)&range[j];
}
- maxdata_list[chan] = ((long long)1 << cfg[j].bits) - 1;
+ if (cfg[j].bits < 32)
+ maxdata_list[chan] = (1u << cfg[j].bits) - 1;
+ else
+ maxdata_list[chan] = 0xffffffff;
chan++;
}
}
diff --git a/drivers/staging/emxx_udc/Kconfig b/drivers/staging/emxx_udc/Kconfig
index cc3402020..d7577096f 100644
--- a/drivers/staging/emxx_udc/Kconfig
+++ b/drivers/staging/emxx_udc/Kconfig
@@ -1,5 +1,5 @@
config USB_EMXX
- bool "EMXX USB Function Device Controller"
+ tristate "EMXX USB Function Device Controller"
depends on USB_GADGET && (ARCH_SHMOBILE || (ARM && COMPILE_TEST))
help
The Emma Mobile series of SoCs from Renesas Electronics and
diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
index 3bd91758b..3b56b2826 100644
--- a/drivers/staging/emxx_udc/emxx_udc.c
+++ b/drivers/staging/emxx_udc/emxx_udc.c
@@ -15,7 +15,7 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/ioport.h>
@@ -39,9 +39,11 @@
#include "emxx_udc.h"
+#define DRIVER_DESC "EMXX UDC driver"
#define DMA_ADDR_INVALID (~(dma_addr_t)0)
static const char driver_name[] = "emxx_udc";
+static const char driver_desc[] = DRIVER_DESC;
/*===========================================================================*/
/* Prototype */
@@ -3296,6 +3298,28 @@ static void nbu2ss_drv_shutdown(struct platform_device *pdev)
}
/*-------------------------------------------------------------------------*/
+static int nbu2ss_drv_remove(struct platform_device *pdev)
+{
+ struct nbu2ss_udc *udc;
+ struct nbu2ss_ep *ep;
+ int i;
+
+ udc = &udc_controller;
+
+ for (i = 0; i < NUM_ENDPOINTS; i++) {
+ ep = &udc->ep[i];
+ if (ep->virt_buf)
+ dma_free_coherent(NULL, PAGE_SIZE,
+ (void *)ep->virt_buf, ep->phys_buf);
+ }
+
+ /* Interrupt Handler - Release */
+ free_irq(INT_VBUS, udc);
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
static int nbu2ss_drv_suspend(struct platform_device *pdev, pm_message_t state)
{
struct nbu2ss_udc *udc;
@@ -3347,12 +3371,16 @@ static int nbu2ss_drv_resume(struct platform_device *pdev)
static struct platform_driver udc_driver = {
.probe = nbu2ss_drv_probe,
.shutdown = nbu2ss_drv_shutdown,
+ .remove = nbu2ss_drv_remove,
.suspend = nbu2ss_drv_suspend,
.resume = nbu2ss_drv_resume,
.driver = {
- .name = driver_name,
- .suppress_bind_attrs = true,
+ .name = driver_name,
},
};
-builtin_platform_driver(udc_driver);
+module_platform_driver(udc_driver);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Renesas Electronics Corporation");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index 0c1a77caf..4c281df16 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -391,11 +391,11 @@ static void fbtft_update_display(struct fbtft_par *par, unsigned start_line,
if (unlikely(timeit)) {
ts_end = ktime_get();
- if (ktime_to_ns(par->update_time))
+ if (!ktime_to_ns(par->update_time))
par->update_time = ts_start;
- par->update_time = ts_start;
fps = ktime_us_delta(ts_start, par->update_time);
+ par->update_time = ts_start;
fps = fps ? 1000000 / fps : 0;
throughput = ktime_us_delta(ts_end, ts_start);
diff --git a/drivers/staging/fsl-mc/bus/dpbp.c b/drivers/staging/fsl-mc/bus/dpbp.c
index c31fe1bca..fe271fbd6 100644
--- a/drivers/staging/fsl-mc/bus/dpbp.c
+++ b/drivers/staging/fsl-mc/bus/dpbp.c
@@ -1,4 +1,4 @@
-/* Copyright 2013-2014 Freescale Semiconductor Inc.
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -57,12 +57,14 @@ int dpbp_open(struct fsl_mc_io *mc_io,
u16 *token)
{
struct mc_command cmd = { 0 };
+ struct dpbp_cmd_open *cmd_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPBP_CMDID_OPEN,
cmd_flags, 0);
- cmd.params[0] |= mc_enc(0, 32, dpbp_id);
+ cmd_params = (struct dpbp_cmd_open *)cmd.params;
+ cmd_params->dpbp_id = cpu_to_le32(dpbp_id);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -70,7 +72,7 @@ int dpbp_open(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
+ *token = mc_cmd_hdr_read_token(&cmd);
return err;
}
@@ -143,7 +145,7 @@ int dpbp_create(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
+ *token = mc_cmd_hdr_read_token(&cmd);
return 0;
}
@@ -231,6 +233,7 @@ int dpbp_is_enabled(struct fsl_mc_io *mc_io,
int *en)
{
struct mc_command cmd = { 0 };
+ struct dpbp_rsp_is_enabled *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPBP_CMDID_IS_ENABLED, cmd_flags,
@@ -242,7 +245,8 @@ int dpbp_is_enabled(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *en = (int)mc_dec(cmd.params[0], 0, 1);
+ rsp_params = (struct dpbp_rsp_is_enabled *)cmd.params;
+ *en = rsp_params->enabled & DPBP_ENABLE;
return 0;
}
@@ -286,14 +290,16 @@ int dpbp_set_irq(struct fsl_mc_io *mc_io,
struct dpbp_irq_cfg *irq_cfg)
{
struct mc_command cmd = { 0 };
+ struct dpbp_cmd_set_irq *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ,
cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 8, irq_index);
- cmd.params[0] |= mc_enc(32, 32, irq_cfg->val);
- cmd.params[1] |= mc_enc(0, 64, irq_cfg->addr);
- cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num);
+ cmd_params = (struct dpbp_cmd_set_irq *)cmd.params;
+ cmd_params->irq_index = irq_index;
+ cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
+ cmd_params->irq_addr = cpu_to_le64(irq_cfg->addr);
+ cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -319,12 +325,15 @@ int dpbp_get_irq(struct fsl_mc_io *mc_io,
struct dpbp_irq_cfg *irq_cfg)
{
struct mc_command cmd = { 0 };
+ struct dpbp_cmd_get_irq *cmd_params;
+ struct dpbp_rsp_get_irq *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ,
cmd_flags, token);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
+ cmd_params = (struct dpbp_cmd_get_irq *)cmd.params;
+ cmd_params->irq_index = irq_index;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -332,10 +341,12 @@ int dpbp_get_irq(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- irq_cfg->val = (u32)mc_dec(cmd.params[0], 0, 32);
- irq_cfg->addr = (u64)mc_dec(cmd.params[1], 0, 64);
- irq_cfg->irq_num = (int)mc_dec(cmd.params[2], 0, 32);
- *type = (int)mc_dec(cmd.params[2], 32, 32);
+ rsp_params = (struct dpbp_rsp_get_irq *)cmd.params;
+ irq_cfg->val = le32_to_cpu(rsp_params->irq_val);
+ irq_cfg->addr = le64_to_cpu(rsp_params->irq_addr);
+ irq_cfg->irq_num = le32_to_cpu(rsp_params->irq_num);
+ *type = le32_to_cpu(rsp_params->type);
+
return 0;
}
@@ -361,12 +372,14 @@ int dpbp_set_irq_enable(struct fsl_mc_io *mc_io,
u8 en)
{
struct mc_command cmd = { 0 };
+ struct dpbp_cmd_set_irq_enable *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ_ENABLE,
cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 8, en);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
+ cmd_params = (struct dpbp_cmd_set_irq_enable *)cmd.params;
+ cmd_params->enable = en & DPBP_ENABLE;
+ cmd_params->irq_index = irq_index;
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -389,12 +402,15 @@ int dpbp_get_irq_enable(struct fsl_mc_io *mc_io,
u8 *en)
{
struct mc_command cmd = { 0 };
+ struct dpbp_cmd_get_irq_enable *cmd_params;
+ struct dpbp_rsp_get_irq_enable *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_ENABLE,
cmd_flags, token);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
+ cmd_params = (struct dpbp_cmd_get_irq_enable *)cmd.params;
+ cmd_params->irq_index = irq_index;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -402,7 +418,8 @@ int dpbp_get_irq_enable(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *en = (u8)mc_dec(cmd.params[0], 0, 8);
+ rsp_params = (struct dpbp_rsp_get_irq_enable *)cmd.params;
+ *en = rsp_params->enabled & DPBP_ENABLE;
return 0;
}
@@ -429,12 +446,14 @@ int dpbp_set_irq_mask(struct fsl_mc_io *mc_io,
u32 mask)
{
struct mc_command cmd = { 0 };
+ struct dpbp_cmd_set_irq_mask *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ_MASK,
cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 32, mask);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
+ cmd_params = (struct dpbp_cmd_set_irq_mask *)cmd.params;
+ cmd_params->mask = cpu_to_le32(mask);
+ cmd_params->irq_index = irq_index;
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -460,12 +479,15 @@ int dpbp_get_irq_mask(struct fsl_mc_io *mc_io,
u32 *mask)
{
struct mc_command cmd = { 0 };
+ struct dpbp_cmd_get_irq_mask *cmd_params;
+ struct dpbp_rsp_get_irq_mask *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_MASK,
cmd_flags, token);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
+ cmd_params = (struct dpbp_cmd_get_irq_mask *)cmd.params;
+ cmd_params->irq_index = irq_index;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -473,7 +495,9 @@ int dpbp_get_irq_mask(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *mask = (u32)mc_dec(cmd.params[0], 0, 32);
+ rsp_params = (struct dpbp_rsp_get_irq_mask *)cmd.params;
+ *mask = le32_to_cpu(rsp_params->mask);
+
return 0;
}
@@ -497,13 +521,16 @@ int dpbp_get_irq_status(struct fsl_mc_io *mc_io,
u32 *status)
{
struct mc_command cmd = { 0 };
+ struct dpbp_cmd_get_irq_status *cmd_params;
+ struct dpbp_rsp_get_irq_status *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_STATUS,
cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 32, *status);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
+ cmd_params = (struct dpbp_cmd_get_irq_status *)cmd.params;
+ cmd_params->status = cpu_to_le32(*status);
+ cmd_params->irq_index = irq_index;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -511,7 +538,9 @@ int dpbp_get_irq_status(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *status = (u32)mc_dec(cmd.params[0], 0, 32);
+ rsp_params = (struct dpbp_rsp_get_irq_status *)cmd.params;
+ *status = le32_to_cpu(rsp_params->status);
+
return 0;
}
@@ -535,12 +564,14 @@ int dpbp_clear_irq_status(struct fsl_mc_io *mc_io,
u32 status)
{
struct mc_command cmd = { 0 };
+ struct dpbp_cmd_clear_irq_status *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPBP_CMDID_CLEAR_IRQ_STATUS,
cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 32, status);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
+ cmd_params = (struct dpbp_cmd_clear_irq_status *)cmd.params;
+ cmd_params->status = cpu_to_le32(status);
+ cmd_params->irq_index = irq_index;
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -562,6 +593,7 @@ int dpbp_get_attributes(struct fsl_mc_io *mc_io,
struct dpbp_attr *attr)
{
struct mc_command cmd = { 0 };
+ struct dpbp_rsp_get_attributes *rsp_params;
int err;
/* prepare command */
@@ -574,10 +606,12 @@ int dpbp_get_attributes(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- attr->bpid = (u16)mc_dec(cmd.params[0], 16, 16);
- attr->id = (int)mc_dec(cmd.params[0], 32, 32);
- attr->version.major = (u16)mc_dec(cmd.params[1], 0, 16);
- attr->version.minor = (u16)mc_dec(cmd.params[1], 16, 16);
+ rsp_params = (struct dpbp_rsp_get_attributes *)cmd.params;
+ attr->bpid = le16_to_cpu(rsp_params->bpid);
+ attr->id = le32_to_cpu(rsp_params->id);
+ attr->version.major = le16_to_cpu(rsp_params->version_major);
+ attr->version.minor = le16_to_cpu(rsp_params->version_minor);
+
return 0;
}
EXPORT_SYMBOL(dpbp_get_attributes);
@@ -597,19 +631,19 @@ int dpbp_set_notifications(struct fsl_mc_io *mc_io,
struct dpbp_notification_cfg *cfg)
{
struct mc_command cmd = { 0 };
+ struct dpbp_cmd_set_notifications *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_NOTIFICATIONS,
- cmd_flags,
- token);
-
- cmd.params[0] |= mc_enc(0, 32, cfg->depletion_entry);
- cmd.params[0] |= mc_enc(32, 32, cfg->depletion_exit);
- cmd.params[1] |= mc_enc(0, 32, cfg->surplus_entry);
- cmd.params[1] |= mc_enc(32, 32, cfg->surplus_exit);
- cmd.params[2] |= mc_enc(0, 16, cfg->options);
- cmd.params[3] |= mc_enc(0, 64, cfg->message_ctx);
- cmd.params[4] |= mc_enc(0, 64, cfg->message_iova);
+ cmd_flags, token);
+ cmd_params = (struct dpbp_cmd_set_notifications *)cmd.params;
+ cmd_params->depletion_entry = cpu_to_le32(cfg->depletion_entry);
+ cmd_params->depletion_exit = cpu_to_le32(cfg->depletion_exit);
+ cmd_params->surplus_entry = cpu_to_le32(cfg->surplus_entry);
+ cmd_params->surplus_exit = cpu_to_le32(cfg->surplus_exit);
+ cmd_params->options = cpu_to_le16(cfg->options);
+ cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
+ cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -630,6 +664,7 @@ int dpbp_get_notifications(struct fsl_mc_io *mc_io,
struct dpbp_notification_cfg *cfg)
{
struct mc_command cmd = { 0 };
+ struct dpbp_rsp_get_notifications *rsp_params;
int err;
/* prepare command */
@@ -643,13 +678,14 @@ int dpbp_get_notifications(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- cfg->depletion_entry = (u32)mc_dec(cmd.params[0], 0, 32);
- cfg->depletion_exit = (u32)mc_dec(cmd.params[0], 32, 32);
- cfg->surplus_entry = (u32)mc_dec(cmd.params[1], 0, 32);
- cfg->surplus_exit = (u32)mc_dec(cmd.params[1], 32, 32);
- cfg->options = (u16)mc_dec(cmd.params[2], 0, 16);
- cfg->message_ctx = (u64)mc_dec(cmd.params[3], 0, 64);
- cfg->message_iova = (u64)mc_dec(cmd.params[4], 0, 64);
+ rsp_params = (struct dpbp_rsp_get_notifications *)cmd.params;
+ cfg->depletion_entry = le32_to_cpu(rsp_params->depletion_entry);
+ cfg->depletion_exit = le32_to_cpu(rsp_params->depletion_exit);
+ cfg->surplus_entry = le32_to_cpu(rsp_params->surplus_entry);
+ cfg->surplus_exit = le32_to_cpu(rsp_params->surplus_exit);
+ cfg->options = le16_to_cpu(rsp_params->options);
+ cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
+ cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
return 0;
}
diff --git a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
index c9b52dd7b..d098a6d8f 100644
--- a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
+++ b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
@@ -1,4 +1,4 @@
-/* Copyright 2013-2015 Freescale Semiconductor Inc.
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -53,4 +53,88 @@
#define DPMCP_CMDID_GET_IRQ_MASK 0x015
#define DPMCP_CMDID_GET_IRQ_STATUS 0x016
+struct dpmcp_cmd_open {
+ __le32 dpmcp_id;
+};
+
+struct dpmcp_cmd_create {
+ __le32 portal_id;
+};
+
+struct dpmcp_cmd_set_irq {
+ /* cmd word 0 */
+ u8 irq_index;
+ u8 pad[3];
+ __le32 irq_val;
+ /* cmd word 1 */
+ __le64 irq_addr;
+ /* cmd word 2 */
+ __le32 irq_num;
+};
+
+struct dpmcp_cmd_get_irq {
+ __le32 pad;
+ u8 irq_index;
+};
+
+struct dpmcp_rsp_get_irq {
+ /* cmd word 0 */
+ __le32 irq_val;
+ __le32 pad;
+ /* cmd word 1 */
+ __le64 irq_paddr;
+ /* cmd word 2 */
+ __le32 irq_num;
+ __le32 type;
+};
+
+#define DPMCP_ENABLE 0x1
+
+struct dpmcp_cmd_set_irq_enable {
+ u8 enable;
+ u8 pad[3];
+ u8 irq_index;
+};
+
+struct dpmcp_cmd_get_irq_enable {
+ __le32 pad;
+ u8 irq_index;
+};
+
+struct dpmcp_rsp_get_irq_enable {
+ u8 enabled;
+};
+
+struct dpmcp_cmd_set_irq_mask {
+ __le32 mask;
+ u8 irq_index;
+};
+
+struct dpmcp_cmd_get_irq_mask {
+ __le32 pad;
+ u8 irq_index;
+};
+
+struct dpmcp_rsp_get_irq_mask {
+ __le32 mask;
+};
+
+struct dpmcp_cmd_get_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+struct dpmcp_rsp_get_irq_status {
+ __le32 status;
+};
+
+struct dpmcp_rsp_get_attributes {
+ /* response word 0 */
+ __le32 pad;
+ __le32 id;
+ /* response word 1 */
+ __le16 version_major;
+ __le16 version_minor;
+};
+
#endif /* _FSL_DPMCP_CMD_H */
diff --git a/drivers/staging/fsl-mc/bus/dpmcp.c b/drivers/staging/fsl-mc/bus/dpmcp.c
index fd6dd4e07..064401762 100644
--- a/drivers/staging/fsl-mc/bus/dpmcp.c
+++ b/drivers/staging/fsl-mc/bus/dpmcp.c
@@ -1,4 +1,4 @@
-/* Copyright 2013-2015 Freescale Semiconductor Inc.
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -57,12 +57,14 @@ int dpmcp_open(struct fsl_mc_io *mc_io,
u16 *token)
{
struct mc_command cmd = { 0 };
+ struct dpmcp_cmd_open *cmd_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPMCP_CMDID_OPEN,
cmd_flags, 0);
- cmd.params[0] |= mc_enc(0, 32, dpmcp_id);
+ cmd_params = (struct dpmcp_cmd_open *)cmd.params;
+ cmd_params->dpmcp_id = cpu_to_le32(dpmcp_id);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -70,7 +72,7 @@ int dpmcp_open(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
+ *token = mc_cmd_hdr_read_token(&cmd);
return err;
}
@@ -127,12 +129,15 @@ int dpmcp_create(struct fsl_mc_io *mc_io,
u16 *token)
{
struct mc_command cmd = { 0 };
+ struct dpmcp_cmd_create *cmd_params;
+
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CREATE,
cmd_flags, 0);
- cmd.params[0] |= mc_enc(0, 32, cfg->portal_id);
+ cmd_params = (struct dpmcp_cmd_create *)cmd.params;
+ cmd_params->portal_id = cpu_to_le32(cfg->portal_id);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -140,7 +145,7 @@ int dpmcp_create(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
+ *token = mc_cmd_hdr_read_token(&cmd);
return 0;
}
@@ -206,14 +211,16 @@ int dpmcp_set_irq(struct fsl_mc_io *mc_io,
struct dpmcp_irq_cfg *irq_cfg)
{
struct mc_command cmd = { 0 };
+ struct dpmcp_cmd_set_irq *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ,
cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 8, irq_index);
- cmd.params[0] |= mc_enc(32, 32, irq_cfg->val);
- cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr);
- cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num);
+ cmd_params = (struct dpmcp_cmd_set_irq *)cmd.params;
+ cmd_params->irq_index = irq_index;
+ cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
+ cmd_params->irq_addr = cpu_to_le64(irq_cfg->paddr);
+ cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -239,12 +246,15 @@ int dpmcp_get_irq(struct fsl_mc_io *mc_io,
struct dpmcp_irq_cfg *irq_cfg)
{
struct mc_command cmd = { 0 };
+ struct dpmcp_cmd_get_irq *cmd_params;
+ struct dpmcp_rsp_get_irq *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ,
cmd_flags, token);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
+ cmd_params = (struct dpmcp_cmd_get_irq *)cmd.params;
+ cmd_params->irq_index = irq_index;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -252,10 +262,11 @@ int dpmcp_get_irq(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- irq_cfg->val = (u32)mc_dec(cmd.params[0], 0, 32);
- irq_cfg->paddr = (u64)mc_dec(cmd.params[1], 0, 64);
- irq_cfg->irq_num = (int)mc_dec(cmd.params[2], 0, 32);
- *type = (int)mc_dec(cmd.params[2], 32, 32);
+ rsp_params = (struct dpmcp_rsp_get_irq *)cmd.params;
+ irq_cfg->val = le32_to_cpu(rsp_params->irq_val);
+ irq_cfg->paddr = le64_to_cpu(rsp_params->irq_paddr);
+ irq_cfg->irq_num = le32_to_cpu(rsp_params->irq_num);
+ *type = le32_to_cpu(rsp_params->type);
return 0;
}
@@ -281,12 +292,14 @@ int dpmcp_set_irq_enable(struct fsl_mc_io *mc_io,
u8 en)
{
struct mc_command cmd = { 0 };
+ struct dpmcp_cmd_set_irq_enable *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ_ENABLE,
cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 8, en);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
+ cmd_params = (struct dpmcp_cmd_set_irq_enable *)cmd.params;
+ cmd_params->enable = en & DPMCP_ENABLE;
+ cmd_params->irq_index = irq_index;
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -309,12 +322,15 @@ int dpmcp_get_irq_enable(struct fsl_mc_io *mc_io,
u8 *en)
{
struct mc_command cmd = { 0 };
+ struct dpmcp_cmd_get_irq_enable *cmd_params;
+ struct dpmcp_rsp_get_irq_enable *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_ENABLE,
cmd_flags, token);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
+ cmd_params = (struct dpmcp_cmd_get_irq_enable *)cmd.params;
+ cmd_params->irq_index = irq_index;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -322,7 +338,8 @@ int dpmcp_get_irq_enable(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *en = (u8)mc_dec(cmd.params[0], 0, 8);
+ rsp_params = (struct dpmcp_rsp_get_irq_enable *)cmd.params;
+ *en = rsp_params->enabled & DPMCP_ENABLE;
return 0;
}
@@ -349,12 +366,15 @@ int dpmcp_set_irq_mask(struct fsl_mc_io *mc_io,
u32 mask)
{
struct mc_command cmd = { 0 };
+ struct dpmcp_cmd_set_irq_mask *cmd_params;
+
/* prepare command */
cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ_MASK,
cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 32, mask);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
+ cmd_params = (struct dpmcp_cmd_set_irq_mask *)cmd.params;
+ cmd_params->mask = cpu_to_le32(mask);
+ cmd_params->irq_index = irq_index;
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -380,12 +400,16 @@ int dpmcp_get_irq_mask(struct fsl_mc_io *mc_io,
u32 *mask)
{
struct mc_command cmd = { 0 };
+ struct dpmcp_cmd_get_irq_mask *cmd_params;
+ struct dpmcp_rsp_get_irq_mask *rsp_params;
+
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_MASK,
cmd_flags, token);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
+ cmd_params = (struct dpmcp_cmd_get_irq_mask *)cmd.params;
+ cmd_params->irq_index = irq_index;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -393,7 +417,9 @@ int dpmcp_get_irq_mask(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *mask = (u32)mc_dec(cmd.params[0], 0, 32);
+ rsp_params = (struct dpmcp_rsp_get_irq_mask *)cmd.params;
+ *mask = le32_to_cpu(rsp_params->mask);
+
return 0;
}
@@ -417,12 +443,16 @@ int dpmcp_get_irq_status(struct fsl_mc_io *mc_io,
u32 *status)
{
struct mc_command cmd = { 0 };
+ struct dpmcp_cmd_get_irq_status *cmd_params;
+ struct dpmcp_rsp_get_irq_status *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_STATUS,
cmd_flags, token);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
+ cmd_params = (struct dpmcp_cmd_get_irq_status *)cmd.params;
+ cmd_params->status = cpu_to_le32(*status);
+ cmd_params->irq_index = irq_index;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -430,7 +460,9 @@ int dpmcp_get_irq_status(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *status = (u32)mc_dec(cmd.params[0], 0, 32);
+ rsp_params = (struct dpmcp_rsp_get_irq_status *)cmd.params;
+ *status = le32_to_cpu(rsp_params->status);
+
return 0;
}
@@ -450,6 +482,7 @@ int dpmcp_get_attributes(struct fsl_mc_io *mc_io,
struct dpmcp_attr *attr)
{
struct mc_command cmd = { 0 };
+ struct dpmcp_rsp_get_attributes *rsp_params;
int err;
/* prepare command */
@@ -462,8 +495,10 @@ int dpmcp_get_attributes(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- attr->id = (int)mc_dec(cmd.params[0], 32, 32);
- attr->version.major = (u16)mc_dec(cmd.params[1], 0, 16);
- attr->version.minor = (u16)mc_dec(cmd.params[1], 16, 16);
+ rsp_params = (struct dpmcp_rsp_get_attributes *)cmd.params;
+ attr->id = le32_to_cpu(rsp_params->id);
+ attr->version.major = le16_to_cpu(rsp_params->version_major);
+ attr->version.minor = le16_to_cpu(rsp_params->version_minor);
+
return 0;
}
diff --git a/drivers/staging/fsl-mc/bus/dpmng-cmd.h b/drivers/staging/fsl-mc/bus/dpmng-cmd.h
index ba8cfa963..779bf9c25 100644
--- a/drivers/staging/fsl-mc/bus/dpmng-cmd.h
+++ b/drivers/staging/fsl-mc/bus/dpmng-cmd.h
@@ -1,4 +1,4 @@
-/* Copyright 2013-2014 Freescale Semiconductor Inc.
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -44,4 +44,14 @@
#define DPMNG_CMDID_GET_CONT_ID 0x830
#define DPMNG_CMDID_GET_VERSION 0x831
+struct dpmng_rsp_get_container_id {
+ __le32 container_id;
+};
+
+struct dpmng_rsp_get_version {
+ __le32 revision;
+ __le32 version_major;
+ __le32 version_minor;
+};
+
#endif /* __FSL_DPMNG_CMD_H */
diff --git a/drivers/staging/fsl-mc/bus/dpmng.c b/drivers/staging/fsl-mc/bus/dpmng.c
index f633fcd86..660bbe7ea 100644
--- a/drivers/staging/fsl-mc/bus/dpmng.c
+++ b/drivers/staging/fsl-mc/bus/dpmng.c
@@ -1,4 +1,4 @@
-/* Copyright 2013-2014 Freescale Semiconductor Inc.
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -48,6 +48,7 @@ int mc_get_version(struct fsl_mc_io *mc_io,
struct mc_version *mc_ver_info)
{
struct mc_command cmd = { 0 };
+ struct dpmng_rsp_get_version *rsp_params;
int err;
/* prepare command */
@@ -61,12 +62,14 @@ int mc_get_version(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- mc_ver_info->revision = mc_dec(cmd.params[0], 0, 32);
- mc_ver_info->major = mc_dec(cmd.params[0], 32, 32);
- mc_ver_info->minor = mc_dec(cmd.params[1], 0, 32);
+ rsp_params = (struct dpmng_rsp_get_version *)cmd.params;
+ mc_ver_info->revision = le32_to_cpu(rsp_params->revision);
+ mc_ver_info->major = le32_to_cpu(rsp_params->version_major);
+ mc_ver_info->minor = le32_to_cpu(rsp_params->version_minor);
return 0;
}
+EXPORT_SYMBOL(mc_get_version);
/**
* dpmng_get_container_id() - Get container ID associated with a given portal.
@@ -81,6 +84,7 @@ int dpmng_get_container_id(struct fsl_mc_io *mc_io,
int *container_id)
{
struct mc_command cmd = { 0 };
+ struct dpmng_rsp_get_container_id *rsp_params;
int err;
/* prepare command */
@@ -94,7 +98,8 @@ int dpmng_get_container_id(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *container_id = mc_dec(cmd.params[0], 0, 32);
+ rsp_params = (struct dpmng_rsp_get_container_id *)cmd.params;
+ *container_id = le32_to_cpu(rsp_params->container_id);
return 0;
}
diff --git a/drivers/staging/fsl-mc/bus/dprc-cmd.h b/drivers/staging/fsl-mc/bus/dprc-cmd.h
index 9b854fa8e..bb127f4a3 100644
--- a/drivers/staging/fsl-mc/bus/dprc-cmd.h
+++ b/drivers/staging/fsl-mc/bus/dprc-cmd.h
@@ -1,4 +1,4 @@
-/* Copyright 2013-2014 Freescale Semiconductor Inc.
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -84,4 +84,381 @@
#define DPRC_CMDID_GET_CONNECTION 0x16C
+struct dprc_cmd_open {
+ __le32 container_id;
+};
+
+struct dprc_cmd_create_container {
+ /* cmd word 0 */
+ __le32 options;
+ __le16 icid;
+ __le16 pad0;
+ /* cmd word 1 */
+ __le32 pad1;
+ __le32 portal_id;
+ /* cmd words 2-3 */
+ u8 label[16];
+};
+
+struct dprc_rsp_create_container {
+ /* response word 0 */
+ __le64 pad0;
+ /* response word 1 */
+ __le32 child_container_id;
+ __le32 pad1;
+ /* response word 2 */
+ __le64 child_portal_addr;
+};
+
+struct dprc_cmd_destroy_container {
+ __le32 child_container_id;
+};
+
+struct dprc_cmd_reset_container {
+ __le32 child_container_id;
+};
+
+struct dprc_cmd_set_irq {
+ /* cmd word 0 */
+ __le32 irq_val;
+ u8 irq_index;
+ u8 pad[3];
+ /* cmd word 1 */
+ __le64 irq_addr;
+ /* cmd word 2 */
+ __le32 irq_num;
+};
+
+struct dprc_cmd_get_irq {
+ __le32 pad;
+ u8 irq_index;
+};
+
+struct dprc_rsp_get_irq {
+ /* response word 0 */
+ __le32 irq_val;
+ __le32 pad;
+ /* response word 1 */
+ __le64 irq_addr;
+ /* response word 2 */
+ __le32 irq_num;
+ __le32 type;
+};
+
+#define DPRC_ENABLE 0x1
+
+struct dprc_cmd_set_irq_enable {
+ u8 enable;
+ u8 pad[3];
+ u8 irq_index;
+};
+
+struct dprc_cmd_get_irq_enable {
+ __le32 pad;
+ u8 irq_index;
+};
+
+struct dprc_rsp_get_irq_enable {
+ u8 enabled;
+};
+
+struct dprc_cmd_set_irq_mask {
+ __le32 mask;
+ u8 irq_index;
+};
+
+struct dprc_cmd_get_irq_mask {
+ __le32 pad;
+ u8 irq_index;
+};
+
+struct dprc_rsp_get_irq_mask {
+ __le32 mask;
+};
+
+struct dprc_cmd_get_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+struct dprc_rsp_get_irq_status {
+ __le32 status;
+};
+
+struct dprc_cmd_clear_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+struct dprc_rsp_get_attributes {
+ /* response word 0 */
+ __le32 container_id;
+ __le16 icid;
+ __le16 pad;
+ /* response word 1 */
+ __le32 options;
+ __le32 portal_id;
+ /* response word 2 */
+ __le16 version_major;
+ __le16 version_minor;
+};
+
+struct dprc_cmd_set_res_quota {
+ /* cmd word 0 */
+ __le32 child_container_id;
+ __le16 quota;
+ __le16 pad;
+ /* cmd words 1-2 */
+ u8 type[16];
+};
+
+struct dprc_cmd_get_res_quota {
+ /* cmd word 0 */
+ __le32 child_container_id;
+ __le32 pad;
+ /* cmd word 1-2 */
+ u8 type[16];
+};
+
+struct dprc_rsp_get_res_quota {
+ __le32 pad;
+ __le16 quota;
+};
+
+struct dprc_cmd_assign {
+ /* cmd word 0 */
+ __le32 container_id;
+ __le32 options;
+ /* cmd word 1 */
+ __le32 num;
+ __le32 id_base_align;
+ /* cmd word 2-3 */
+ u8 type[16];
+};
+
+struct dprc_cmd_unassign {
+ /* cmd word 0 */
+ __le32 child_container_id;
+ __le32 options;
+ /* cmd word 1 */
+ __le32 num;
+ __le32 id_base_align;
+ /* cmd word 2-3 */
+ u8 type[16];
+};
+
+struct dprc_rsp_get_pool_count {
+ __le32 pool_count;
+};
+
+struct dprc_cmd_get_pool {
+ __le32 pool_index;
+};
+
+struct dprc_rsp_get_pool {
+ /* response word 0 */
+ __le64 pad;
+ /* response word 1-2 */
+ u8 type[16];
+};
+
+struct dprc_rsp_get_obj_count {
+ __le32 pad;
+ __le32 obj_count;
+};
+
+struct dprc_cmd_get_obj {
+ __le32 obj_index;
+};
+
+struct dprc_rsp_get_obj {
+ /* response word 0 */
+ __le32 pad0;
+ __le32 id;
+ /* response word 1 */
+ __le16 vendor;
+ u8 irq_count;
+ u8 region_count;
+ __le32 state;
+ /* response word 2 */
+ __le16 version_major;
+ __le16 version_minor;
+ __le16 flags;
+ __le16 pad1;
+ /* response word 3-4 */
+ u8 type[16];
+ /* response word 5-6 */
+ u8 label[16];
+};
+
+struct dprc_cmd_get_obj_desc {
+ /* cmd word 0 */
+ __le32 obj_id;
+ __le32 pad;
+ /* cmd word 1-2 */
+ u8 type[16];
+};
+
+struct dprc_rsp_get_obj_desc {
+ /* response word 0 */
+ __le32 pad0;
+ __le32 id;
+ /* response word 1 */
+ __le16 vendor;
+ u8 irq_count;
+ u8 region_count;
+ __le32 state;
+ /* response word 2 */
+ __le16 version_major;
+ __le16 version_minor;
+ __le16 flags;
+ __le16 pad1;
+ /* response word 3-4 */
+ u8 type[16];
+ /* response word 5-6 */
+ u8 label[16];
+};
+
+struct dprc_cmd_get_res_count {
+ /* cmd word 0 */
+ __le64 pad;
+ /* cmd word 1-2 */
+ u8 type[16];
+};
+
+struct dprc_rsp_get_res_count {
+ __le32 res_count;
+};
+
+struct dprc_cmd_get_res_ids {
+ /* cmd word 0 */
+ u8 pad0[5];
+ u8 iter_status;
+ __le16 pad1;
+ /* cmd word 1 */
+ __le32 base_id;
+ __le32 last_id;
+ /* cmd word 2-3 */
+ u8 type[16];
+};
+
+struct dprc_rsp_get_res_ids {
+ /* response word 0 */
+ u8 pad0[5];
+ u8 iter_status;
+ __le16 pad1;
+ /* response word 1 */
+ __le32 base_id;
+ __le32 last_id;
+};
+
+struct dprc_cmd_get_obj_region {
+ /* cmd word 0 */
+ __le32 obj_id;
+ __le16 pad0;
+ u8 region_index;
+ u8 pad1;
+ /* cmd word 1-2 */
+ __le64 pad2[2];
+ /* cmd word 3-4 */
+ u8 obj_type[16];
+};
+
+struct dprc_rsp_get_obj_region {
+ /* response word 0 */
+ __le64 pad;
+ /* response word 1 */
+ __le64 base_addr;
+ /* response word 2 */
+ __le32 size;
+};
+
+struct dprc_cmd_set_obj_label {
+ /* cmd word 0 */
+ __le32 obj_id;
+ __le32 pad;
+ /* cmd word 1-2 */
+ u8 label[16];
+ /* cmd word 3-4 */
+ u8 obj_type[16];
+};
+
+struct dprc_cmd_set_obj_irq {
+ /* cmd word 0 */
+ __le32 irq_val;
+ u8 irq_index;
+ u8 pad[3];
+ /* cmd word 1 */
+ __le64 irq_addr;
+ /* cmd word 2 */
+ __le32 irq_num;
+ __le32 obj_id;
+ /* cmd word 3-4 */
+ u8 obj_type[16];
+};
+
+struct dprc_cmd_get_obj_irq {
+ /* cmd word 0 */
+ __le32 obj_id;
+ u8 irq_index;
+ u8 pad[3];
+ /* cmd word 1-2 */
+ u8 obj_type[16];
+};
+
+struct dprc_rsp_get_obj_irq {
+ /* response word 0 */
+ __le32 irq_val;
+ __le32 pad;
+ /* response word 1 */
+ __le64 irq_addr;
+ /* response word 2 */
+ __le32 irq_num;
+ __le32 type;
+};
+
+struct dprc_cmd_connect {
+ /* cmd word 0 */
+ __le32 ep1_id;
+ __le32 ep1_interface_id;
+ /* cmd word 1 */
+ __le32 ep2_id;
+ __le32 ep2_interface_id;
+ /* cmd word 2-3 */
+ u8 ep1_type[16];
+ /* cmd word 4 */
+ __le32 max_rate;
+ __le32 committed_rate;
+ /* cmd word 5-6 */
+ u8 ep2_type[16];
+};
+
+struct dprc_cmd_disconnect {
+ /* cmd word 0 */
+ __le32 id;
+ __le32 interface_id;
+ /* cmd word 1-2 */
+ u8 type[16];
+};
+
+struct dprc_cmd_get_connection {
+ /* cmd word 0 */
+ __le32 ep1_id;
+ __le32 ep1_interface_id;
+ /* cmd word 1-2 */
+ u8 ep1_type[16];
+};
+
+struct dprc_rsp_get_connection {
+ /* response word 0-2 */
+ __le64 pad[3];
+ /* response word 3 */
+ __le32 ep2_id;
+ __le32 ep2_interface_id;
+ /* response word 4-5 */
+ u8 ep2_type[16];
+ /* response word 6 */
+ __le32 state;
+};
+
#endif /* _FSL_DPRC_CMD_H */
diff --git a/drivers/staging/fsl-mc/bus/dprc-driver.c b/drivers/staging/fsl-mc/bus/dprc-driver.c
index 7fc47173c..d2a71f14b 100644
--- a/drivers/staging/fsl-mc/bus/dprc-driver.c
+++ b/drivers/staging/fsl-mc/bus/dprc-driver.c
@@ -760,7 +760,12 @@ error_cleanup_msi_domain:
*/
static void dprc_teardown_irq(struct fsl_mc_device *mc_dev)
{
+ struct fsl_mc_device_irq *irq = mc_dev->irqs[0];
+
(void)disable_dprc_irq(mc_dev);
+
+ devm_free_irq(&mc_dev->dev, irq->msi_desc->irq, &mc_dev->dev);
+
fsl_mc_free_irqs(mc_dev);
}
@@ -791,21 +796,28 @@ static int dprc_remove(struct fsl_mc_device *mc_dev)
dprc_teardown_irq(mc_dev);
device_for_each_child(&mc_dev->dev, NULL, __fsl_mc_device_remove);
+
+ if (dev_get_msi_domain(&mc_dev->dev)) {
+ fsl_mc_cleanup_irq_pool(mc_bus);
+ dev_set_msi_domain(&mc_dev->dev, NULL);
+ }
+
dprc_cleanup_all_resource_pools(mc_dev);
+
error = dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
if (error < 0)
dev_err(&mc_dev->dev, "dprc_close() failed: %d\n", error);
- if (dev_get_msi_domain(&mc_dev->dev)) {
- fsl_mc_cleanup_irq_pool(mc_bus);
- dev_set_msi_domain(&mc_dev->dev, NULL);
+ if (!fsl_mc_is_root_dprc(&mc_dev->dev)) {
+ fsl_destroy_mc_io(mc_dev->mc_io);
+ mc_dev->mc_io = NULL;
}
dev_info(&mc_dev->dev, "DPRC device unbound from driver");
return 0;
}
-static const struct fsl_mc_device_match_id match_id_table[] = {
+static const struct fsl_mc_device_id match_id_table[] = {
{
.vendor = FSL_MC_VENDOR_FREESCALE,
.obj_type = "dprc"},
diff --git a/drivers/staging/fsl-mc/bus/dprc.c b/drivers/staging/fsl-mc/bus/dprc.c
index a2c47377c..c26054981 100644
--- a/drivers/staging/fsl-mc/bus/dprc.c
+++ b/drivers/staging/fsl-mc/bus/dprc.c
@@ -1,4 +1,4 @@
-/* Copyright 2013-2014 Freescale Semiconductor Inc.
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -51,12 +51,14 @@ int dprc_open(struct fsl_mc_io *mc_io,
u16 *token)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_open *cmd_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_OPEN, cmd_flags,
0);
- cmd.params[0] |= mc_enc(0, 32, container_id);
+ cmd_params = (struct dprc_cmd_open *)cmd.params;
+ cmd_params->container_id = cpu_to_le32(container_id);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -64,7 +66,7 @@ int dprc_open(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
+ *token = mc_cmd_hdr_read_token(&cmd);
return 0;
}
@@ -115,28 +117,17 @@ int dprc_create_container(struct fsl_mc_io *mc_io,
u64 *child_portal_offset)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_create_container *cmd_params;
+ struct dprc_rsp_create_container *rsp_params;
int err;
/* prepare command */
- cmd.params[0] |= mc_enc(32, 16, cfg->icid);
- cmd.params[0] |= mc_enc(0, 32, cfg->options);
- cmd.params[1] |= mc_enc(32, 32, cfg->portal_id);
- cmd.params[2] |= mc_enc(0, 8, cfg->label[0]);
- cmd.params[2] |= mc_enc(8, 8, cfg->label[1]);
- cmd.params[2] |= mc_enc(16, 8, cfg->label[2]);
- cmd.params[2] |= mc_enc(24, 8, cfg->label[3]);
- cmd.params[2] |= mc_enc(32, 8, cfg->label[4]);
- cmd.params[2] |= mc_enc(40, 8, cfg->label[5]);
- cmd.params[2] |= mc_enc(48, 8, cfg->label[6]);
- cmd.params[2] |= mc_enc(56, 8, cfg->label[7]);
- cmd.params[3] |= mc_enc(0, 8, cfg->label[8]);
- cmd.params[3] |= mc_enc(8, 8, cfg->label[9]);
- cmd.params[3] |= mc_enc(16, 8, cfg->label[10]);
- cmd.params[3] |= mc_enc(24, 8, cfg->label[11]);
- cmd.params[3] |= mc_enc(32, 8, cfg->label[12]);
- cmd.params[3] |= mc_enc(40, 8, cfg->label[13]);
- cmd.params[3] |= mc_enc(48, 8, cfg->label[14]);
- cmd.params[3] |= mc_enc(56, 8, cfg->label[15]);
+ cmd_params = (struct dprc_cmd_create_container *)cmd.params;
+ cmd_params->options = cpu_to_le32(cfg->options);
+ cmd_params->icid = cpu_to_le16(cfg->icid);
+ cmd_params->portal_id = cpu_to_le32(cfg->portal_id);
+ strncpy(cmd_params->label, cfg->label, 16);
+ cmd_params->label[15] = '\0';
cmd.header = mc_encode_cmd_header(DPRC_CMDID_CREATE_CONT,
cmd_flags, token);
@@ -147,8 +138,9 @@ int dprc_create_container(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *child_container_id = mc_dec(cmd.params[1], 0, 32);
- *child_portal_offset = mc_dec(cmd.params[2], 0, 64);
+ rsp_params = (struct dprc_rsp_create_container *)cmd.params;
+ *child_container_id = le32_to_cpu(rsp_params->child_container_id);
+ *child_portal_offset = le64_to_cpu(rsp_params->child_portal_addr);
return 0;
}
@@ -181,11 +173,13 @@ int dprc_destroy_container(struct fsl_mc_io *mc_io,
int child_container_id)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_destroy_container *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_DESTROY_CONT,
cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 32, child_container_id);
+ cmd_params = (struct dprc_cmd_destroy_container *)cmd.params;
+ cmd_params->child_container_id = cpu_to_le32(child_container_id);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -219,11 +213,13 @@ int dprc_reset_container(struct fsl_mc_io *mc_io,
int child_container_id)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_reset_container *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_RESET_CONT,
cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 32, child_container_id);
+ cmd_params = (struct dprc_cmd_reset_container *)cmd.params;
+ cmd_params->child_container_id = cpu_to_le32(child_container_id);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -249,13 +245,16 @@ int dprc_get_irq(struct fsl_mc_io *mc_io,
struct dprc_irq_cfg *irq_cfg)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_get_irq *cmd_params;
+ struct dprc_rsp_get_irq *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ,
cmd_flags,
token);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
+ cmd_params = (struct dprc_cmd_get_irq *)cmd.params;
+ cmd_params->irq_index = irq_index;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -263,10 +262,11 @@ int dprc_get_irq(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- irq_cfg->val = mc_dec(cmd.params[0], 0, 32);
- irq_cfg->paddr = mc_dec(cmd.params[1], 0, 64);
- irq_cfg->irq_num = mc_dec(cmd.params[2], 0, 32);
- *type = mc_dec(cmd.params[2], 32, 32);
+ rsp_params = (struct dprc_rsp_get_irq *)cmd.params;
+ irq_cfg->val = le32_to_cpu(rsp_params->irq_val);
+ irq_cfg->paddr = le64_to_cpu(rsp_params->irq_addr);
+ irq_cfg->irq_num = le32_to_cpu(rsp_params->irq_num);
+ *type = le32_to_cpu(rsp_params->type);
return 0;
}
@@ -288,15 +288,17 @@ int dprc_set_irq(struct fsl_mc_io *mc_io,
struct dprc_irq_cfg *irq_cfg)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_set_irq *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ,
cmd_flags,
token);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
- cmd.params[0] |= mc_enc(0, 32, irq_cfg->val);
- cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr);
- cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num);
+ cmd_params = (struct dprc_cmd_set_irq *)cmd.params;
+ cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
+ cmd_params->irq_index = irq_index;
+ cmd_params->irq_addr = cpu_to_le64(irq_cfg->paddr);
+ cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -319,12 +321,15 @@ int dprc_get_irq_enable(struct fsl_mc_io *mc_io,
u8 *en)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_get_irq_enable *cmd_params;
+ struct dprc_rsp_get_irq_enable *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_ENABLE,
cmd_flags, token);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
+ cmd_params = (struct dprc_cmd_get_irq_enable *)cmd.params;
+ cmd_params->irq_index = irq_index;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -332,7 +337,8 @@ int dprc_get_irq_enable(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *en = mc_dec(cmd.params[0], 0, 8);
+ rsp_params = (struct dprc_rsp_get_irq_enable *)cmd.params;
+ *en = rsp_params->enabled & DPRC_ENABLE;
return 0;
}
@@ -359,12 +365,14 @@ int dprc_set_irq_enable(struct fsl_mc_io *mc_io,
u8 en)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_set_irq_enable *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ_ENABLE,
cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 8, en);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
+ cmd_params = (struct dprc_cmd_set_irq_enable *)cmd.params;
+ cmd_params->enable = en & DPRC_ENABLE;
+ cmd_params->irq_index = irq_index;
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -390,12 +398,15 @@ int dprc_get_irq_mask(struct fsl_mc_io *mc_io,
u32 *mask)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_get_irq_mask *cmd_params;
+ struct dprc_rsp_get_irq_mask *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_MASK,
cmd_flags, token);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
+ cmd_params = (struct dprc_cmd_get_irq_mask *)cmd.params;
+ cmd_params->irq_index = irq_index;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -403,7 +414,8 @@ int dprc_get_irq_mask(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *mask = mc_dec(cmd.params[0], 0, 32);
+ rsp_params = (struct dprc_rsp_get_irq_mask *)cmd.params;
+ *mask = le32_to_cpu(rsp_params->mask);
return 0;
}
@@ -431,12 +443,14 @@ int dprc_set_irq_mask(struct fsl_mc_io *mc_io,
u32 mask)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_set_irq_mask *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ_MASK,
cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 32, mask);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
+ cmd_params = (struct dprc_cmd_set_irq_mask *)cmd.params;
+ cmd_params->mask = cpu_to_le32(mask);
+ cmd_params->irq_index = irq_index;
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -461,13 +475,16 @@ int dprc_get_irq_status(struct fsl_mc_io *mc_io,
u32 *status)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_get_irq_status *cmd_params;
+ struct dprc_rsp_get_irq_status *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_STATUS,
cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 32, *status);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
+ cmd_params = (struct dprc_cmd_get_irq_status *)cmd.params;
+ cmd_params->status = cpu_to_le32(*status);
+ cmd_params->irq_index = irq_index;
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -475,7 +492,8 @@ int dprc_get_irq_status(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *status = mc_dec(cmd.params[0], 0, 32);
+ rsp_params = (struct dprc_rsp_get_irq_status *)cmd.params;
+ *status = le32_to_cpu(rsp_params->status);
return 0;
}
@@ -499,12 +517,14 @@ int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
u32 status)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_clear_irq_status *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLEAR_IRQ_STATUS,
cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 32, status);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
+ cmd_params = (struct dprc_cmd_clear_irq_status *)cmd.params;
+ cmd_params->status = cpu_to_le32(status);
+ cmd_params->irq_index = irq_index;
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -525,6 +545,7 @@ int dprc_get_attributes(struct fsl_mc_io *mc_io,
struct dprc_attributes *attr)
{
struct mc_command cmd = { 0 };
+ struct dprc_rsp_get_attributes *rsp_params;
int err;
/* prepare command */
@@ -538,12 +559,13 @@ int dprc_get_attributes(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- attr->container_id = mc_dec(cmd.params[0], 0, 32);
- attr->icid = mc_dec(cmd.params[0], 32, 16);
- attr->options = mc_dec(cmd.params[1], 0, 32);
- attr->portal_id = mc_dec(cmd.params[1], 32, 32);
- attr->version.major = mc_dec(cmd.params[2], 0, 16);
- attr->version.minor = mc_dec(cmd.params[2], 16, 16);
+ rsp_params = (struct dprc_rsp_get_attributes *)cmd.params;
+ attr->container_id = le32_to_cpu(rsp_params->container_id);
+ attr->icid = le16_to_cpu(rsp_params->icid);
+ attr->options = le32_to_cpu(rsp_params->options);
+ attr->portal_id = le32_to_cpu(rsp_params->portal_id);
+ attr->version.major = le16_to_cpu(rsp_params->version_major);
+ attr->version.minor = le16_to_cpu(rsp_params->version_minor);
return 0;
}
@@ -581,28 +603,16 @@ int dprc_set_res_quota(struct fsl_mc_io *mc_io,
u16 quota)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_set_res_quota *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_RES_QUOTA,
cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 32, child_container_id);
- cmd.params[0] |= mc_enc(32, 16, quota);
- cmd.params[1] |= mc_enc(0, 8, type[0]);
- cmd.params[1] |= mc_enc(8, 8, type[1]);
- cmd.params[1] |= mc_enc(16, 8, type[2]);
- cmd.params[1] |= mc_enc(24, 8, type[3]);
- cmd.params[1] |= mc_enc(32, 8, type[4]);
- cmd.params[1] |= mc_enc(40, 8, type[5]);
- cmd.params[1] |= mc_enc(48, 8, type[6]);
- cmd.params[1] |= mc_enc(56, 8, type[7]);
- cmd.params[2] |= mc_enc(0, 8, type[8]);
- cmd.params[2] |= mc_enc(8, 8, type[9]);
- cmd.params[2] |= mc_enc(16, 8, type[10]);
- cmd.params[2] |= mc_enc(24, 8, type[11]);
- cmd.params[2] |= mc_enc(32, 8, type[12]);
- cmd.params[2] |= mc_enc(40, 8, type[13]);
- cmd.params[2] |= mc_enc(48, 8, type[14]);
- cmd.params[2] |= mc_enc(56, 8, '\0');
+ cmd_params = (struct dprc_cmd_set_res_quota *)cmd.params;
+ cmd_params->child_container_id = cpu_to_le32(child_container_id);
+ cmd_params->quota = cpu_to_le16(quota);
+ strncpy(cmd_params->type, type, 16);
+ cmd_params->type[15] = '\0';
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -631,28 +641,17 @@ int dprc_get_res_quota(struct fsl_mc_io *mc_io,
u16 *quota)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_get_res_quota *cmd_params;
+ struct dprc_rsp_get_res_quota *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_QUOTA,
cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 32, child_container_id);
- cmd.params[1] |= mc_enc(0, 8, type[0]);
- cmd.params[1] |= mc_enc(8, 8, type[1]);
- cmd.params[1] |= mc_enc(16, 8, type[2]);
- cmd.params[1] |= mc_enc(24, 8, type[3]);
- cmd.params[1] |= mc_enc(32, 8, type[4]);
- cmd.params[1] |= mc_enc(40, 8, type[5]);
- cmd.params[1] |= mc_enc(48, 8, type[6]);
- cmd.params[1] |= mc_enc(56, 8, type[7]);
- cmd.params[2] |= mc_enc(0, 8, type[8]);
- cmd.params[2] |= mc_enc(8, 8, type[9]);
- cmd.params[2] |= mc_enc(16, 8, type[10]);
- cmd.params[2] |= mc_enc(24, 8, type[11]);
- cmd.params[2] |= mc_enc(32, 8, type[12]);
- cmd.params[2] |= mc_enc(40, 8, type[13]);
- cmd.params[2] |= mc_enc(48, 8, type[14]);
- cmd.params[2] |= mc_enc(56, 8, '\0');
+ cmd_params = (struct dprc_cmd_get_res_quota *)cmd.params;
+ cmd_params->child_container_id = cpu_to_le32(child_container_id);
+ strncpy(cmd_params->type, type, 16);
+ cmd_params->type[15] = '\0';
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -660,7 +659,8 @@ int dprc_get_res_quota(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *quota = mc_dec(cmd.params[0], 32, 16);
+ rsp_params = (struct dprc_rsp_get_res_quota *)cmd.params;
+ *quota = le16_to_cpu(rsp_params->quota);
return 0;
}
@@ -704,30 +704,18 @@ int dprc_assign(struct fsl_mc_io *mc_io,
struct dprc_res_req *res_req)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_assign *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_ASSIGN,
cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 32, container_id);
- cmd.params[0] |= mc_enc(32, 32, res_req->options);
- cmd.params[1] |= mc_enc(0, 32, res_req->num);
- cmd.params[1] |= mc_enc(32, 32, res_req->id_base_align);
- cmd.params[2] |= mc_enc(0, 8, res_req->type[0]);
- cmd.params[2] |= mc_enc(8, 8, res_req->type[1]);
- cmd.params[2] |= mc_enc(16, 8, res_req->type[2]);
- cmd.params[2] |= mc_enc(24, 8, res_req->type[3]);
- cmd.params[2] |= mc_enc(32, 8, res_req->type[4]);
- cmd.params[2] |= mc_enc(40, 8, res_req->type[5]);
- cmd.params[2] |= mc_enc(48, 8, res_req->type[6]);
- cmd.params[2] |= mc_enc(56, 8, res_req->type[7]);
- cmd.params[3] |= mc_enc(0, 8, res_req->type[8]);
- cmd.params[3] |= mc_enc(8, 8, res_req->type[9]);
- cmd.params[3] |= mc_enc(16, 8, res_req->type[10]);
- cmd.params[3] |= mc_enc(24, 8, res_req->type[11]);
- cmd.params[3] |= mc_enc(32, 8, res_req->type[12]);
- cmd.params[3] |= mc_enc(40, 8, res_req->type[13]);
- cmd.params[3] |= mc_enc(48, 8, res_req->type[14]);
- cmd.params[3] |= mc_enc(56, 8, res_req->type[15]);
+ cmd_params = (struct dprc_cmd_assign *)cmd.params;
+ cmd_params->container_id = cpu_to_le32(container_id);
+ cmd_params->options = cpu_to_le32(res_req->options);
+ cmd_params->num = cpu_to_le32(res_req->num);
+ cmd_params->id_base_align = cpu_to_le32(res_req->id_base_align);
+ strncpy(cmd_params->type, res_req->type, 16);
+ cmd_params->type[15] = '\0';
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -755,31 +743,19 @@ int dprc_unassign(struct fsl_mc_io *mc_io,
struct dprc_res_req *res_req)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_unassign *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_UNASSIGN,
cmd_flags,
token);
- cmd.params[0] |= mc_enc(0, 32, child_container_id);
- cmd.params[0] |= mc_enc(32, 32, res_req->options);
- cmd.params[1] |= mc_enc(0, 32, res_req->num);
- cmd.params[1] |= mc_enc(32, 32, res_req->id_base_align);
- cmd.params[2] |= mc_enc(0, 8, res_req->type[0]);
- cmd.params[2] |= mc_enc(8, 8, res_req->type[1]);
- cmd.params[2] |= mc_enc(16, 8, res_req->type[2]);
- cmd.params[2] |= mc_enc(24, 8, res_req->type[3]);
- cmd.params[2] |= mc_enc(32, 8, res_req->type[4]);
- cmd.params[2] |= mc_enc(40, 8, res_req->type[5]);
- cmd.params[2] |= mc_enc(48, 8, res_req->type[6]);
- cmd.params[2] |= mc_enc(56, 8, res_req->type[7]);
- cmd.params[3] |= mc_enc(0, 8, res_req->type[8]);
- cmd.params[3] |= mc_enc(8, 8, res_req->type[9]);
- cmd.params[3] |= mc_enc(16, 8, res_req->type[10]);
- cmd.params[3] |= mc_enc(24, 8, res_req->type[11]);
- cmd.params[3] |= mc_enc(32, 8, res_req->type[12]);
- cmd.params[3] |= mc_enc(40, 8, res_req->type[13]);
- cmd.params[3] |= mc_enc(48, 8, res_req->type[14]);
- cmd.params[3] |= mc_enc(56, 8, res_req->type[15]);
+ cmd_params = (struct dprc_cmd_unassign *)cmd.params;
+ cmd_params->child_container_id = cpu_to_le32(child_container_id);
+ cmd_params->options = cpu_to_le32(res_req->options);
+ cmd_params->num = cpu_to_le32(res_req->num);
+ cmd_params->id_base_align = cpu_to_le32(res_req->id_base_align);
+ strncpy(cmd_params->type, res_req->type, 16);
+ cmd_params->type[15] = '\0';
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -800,6 +776,7 @@ int dprc_get_pool_count(struct fsl_mc_io *mc_io,
int *pool_count)
{
struct mc_command cmd = { 0 };
+ struct dprc_rsp_get_pool_count *rsp_params;
int err;
/* prepare command */
@@ -812,7 +789,8 @@ int dprc_get_pool_count(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *pool_count = mc_dec(cmd.params[0], 0, 32);
+ rsp_params = (struct dprc_rsp_get_pool_count *)cmd.params;
+ *pool_count = le32_to_cpu(rsp_params->pool_count);
return 0;
}
@@ -839,13 +817,16 @@ int dprc_get_pool(struct fsl_mc_io *mc_io,
char *type)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_get_pool *cmd_params;
+ struct dprc_rsp_get_pool *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_POOL,
cmd_flags,
token);
- cmd.params[0] |= mc_enc(0, 32, pool_index);
+ cmd_params = (struct dprc_cmd_get_pool *)cmd.params;
+ cmd_params->pool_index = cpu_to_le32(pool_index);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -853,21 +834,8 @@ int dprc_get_pool(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- type[0] = mc_dec(cmd.params[1], 0, 8);
- type[1] = mc_dec(cmd.params[1], 8, 8);
- type[2] = mc_dec(cmd.params[1], 16, 8);
- type[3] = mc_dec(cmd.params[1], 24, 8);
- type[4] = mc_dec(cmd.params[1], 32, 8);
- type[5] = mc_dec(cmd.params[1], 40, 8);
- type[6] = mc_dec(cmd.params[1], 48, 8);
- type[7] = mc_dec(cmd.params[1], 56, 8);
- type[8] = mc_dec(cmd.params[2], 0, 8);
- type[9] = mc_dec(cmd.params[2], 8, 8);
- type[10] = mc_dec(cmd.params[2], 16, 8);
- type[11] = mc_dec(cmd.params[2], 24, 8);
- type[12] = mc_dec(cmd.params[2], 32, 8);
- type[13] = mc_dec(cmd.params[2], 40, 8);
- type[14] = mc_dec(cmd.params[2], 48, 8);
+ rsp_params = (struct dprc_rsp_get_pool *)cmd.params;
+ strncpy(type, rsp_params->type, 16);
type[15] = '\0';
return 0;
@@ -888,6 +856,7 @@ int dprc_get_obj_count(struct fsl_mc_io *mc_io,
int *obj_count)
{
struct mc_command cmd = { 0 };
+ struct dprc_rsp_get_obj_count *rsp_params;
int err;
/* prepare command */
@@ -900,7 +869,8 @@ int dprc_get_obj_count(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *obj_count = mc_dec(cmd.params[0], 32, 32);
+ rsp_params = (struct dprc_rsp_get_obj_count *)cmd.params;
+ *obj_count = le32_to_cpu(rsp_params->obj_count);
return 0;
}
@@ -928,13 +898,16 @@ int dprc_get_obj(struct fsl_mc_io *mc_io,
struct dprc_obj_desc *obj_desc)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_get_obj *cmd_params;
+ struct dprc_rsp_get_obj *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ,
cmd_flags,
token);
- cmd.params[0] |= mc_enc(0, 32, obj_index);
+ cmd_params = (struct dprc_cmd_get_obj *)cmd.params;
+ cmd_params->obj_index = cpu_to_le32(obj_index);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -942,45 +915,18 @@ int dprc_get_obj(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- obj_desc->id = mc_dec(cmd.params[0], 32, 32);
- obj_desc->vendor = mc_dec(cmd.params[1], 0, 16);
- obj_desc->irq_count = mc_dec(cmd.params[1], 16, 8);
- obj_desc->region_count = mc_dec(cmd.params[1], 24, 8);
- obj_desc->state = mc_dec(cmd.params[1], 32, 32);
- obj_desc->ver_major = mc_dec(cmd.params[2], 0, 16);
- obj_desc->ver_minor = mc_dec(cmd.params[2], 16, 16);
- obj_desc->flags = mc_dec(cmd.params[2], 32, 16);
- obj_desc->type[0] = mc_dec(cmd.params[3], 0, 8);
- obj_desc->type[1] = mc_dec(cmd.params[3], 8, 8);
- obj_desc->type[2] = mc_dec(cmd.params[3], 16, 8);
- obj_desc->type[3] = mc_dec(cmd.params[3], 24, 8);
- obj_desc->type[4] = mc_dec(cmd.params[3], 32, 8);
- obj_desc->type[5] = mc_dec(cmd.params[3], 40, 8);
- obj_desc->type[6] = mc_dec(cmd.params[3], 48, 8);
- obj_desc->type[7] = mc_dec(cmd.params[3], 56, 8);
- obj_desc->type[8] = mc_dec(cmd.params[4], 0, 8);
- obj_desc->type[9] = mc_dec(cmd.params[4], 8, 8);
- obj_desc->type[10] = mc_dec(cmd.params[4], 16, 8);
- obj_desc->type[11] = mc_dec(cmd.params[4], 24, 8);
- obj_desc->type[12] = mc_dec(cmd.params[4], 32, 8);
- obj_desc->type[13] = mc_dec(cmd.params[4], 40, 8);
- obj_desc->type[14] = mc_dec(cmd.params[4], 48, 8);
+ rsp_params = (struct dprc_rsp_get_obj *)cmd.params;
+ obj_desc->id = le32_to_cpu(rsp_params->id);
+ obj_desc->vendor = le16_to_cpu(rsp_params->vendor);
+ obj_desc->irq_count = rsp_params->irq_count;
+ obj_desc->region_count = rsp_params->region_count;
+ obj_desc->state = le32_to_cpu(rsp_params->state);
+ obj_desc->ver_major = le16_to_cpu(rsp_params->version_major);
+ obj_desc->ver_minor = le16_to_cpu(rsp_params->version_minor);
+ obj_desc->flags = le16_to_cpu(rsp_params->flags);
+ strncpy(obj_desc->type, rsp_params->type, 16);
obj_desc->type[15] = '\0';
- obj_desc->label[0] = mc_dec(cmd.params[5], 0, 8);
- obj_desc->label[1] = mc_dec(cmd.params[5], 8, 8);
- obj_desc->label[2] = mc_dec(cmd.params[5], 16, 8);
- obj_desc->label[3] = mc_dec(cmd.params[5], 24, 8);
- obj_desc->label[4] = mc_dec(cmd.params[5], 32, 8);
- obj_desc->label[5] = mc_dec(cmd.params[5], 40, 8);
- obj_desc->label[6] = mc_dec(cmd.params[5], 48, 8);
- obj_desc->label[7] = mc_dec(cmd.params[5], 56, 8);
- obj_desc->label[8] = mc_dec(cmd.params[6], 0, 8);
- obj_desc->label[9] = mc_dec(cmd.params[6], 8, 8);
- obj_desc->label[10] = mc_dec(cmd.params[6], 16, 8);
- obj_desc->label[11] = mc_dec(cmd.params[6], 24, 8);
- obj_desc->label[12] = mc_dec(cmd.params[6], 32, 8);
- obj_desc->label[13] = mc_dec(cmd.params[6], 40, 8);
- obj_desc->label[14] = mc_dec(cmd.params[6], 48, 8);
+ strncpy(obj_desc->label, rsp_params->label, 16);
obj_desc->label[15] = '\0';
return 0;
}
@@ -1007,29 +953,18 @@ int dprc_get_obj_desc(struct fsl_mc_io *mc_io,
struct dprc_obj_desc *obj_desc)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_get_obj_desc *cmd_params;
+ struct dprc_rsp_get_obj_desc *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_DESC,
cmd_flags,
token);
- cmd.params[0] |= mc_enc(0, 32, obj_id);
- cmd.params[1] |= mc_enc(0, 8, obj_type[0]);
- cmd.params[1] |= mc_enc(8, 8, obj_type[1]);
- cmd.params[1] |= mc_enc(16, 8, obj_type[2]);
- cmd.params[1] |= mc_enc(24, 8, obj_type[3]);
- cmd.params[1] |= mc_enc(32, 8, obj_type[4]);
- cmd.params[1] |= mc_enc(40, 8, obj_type[5]);
- cmd.params[1] |= mc_enc(48, 8, obj_type[6]);
- cmd.params[1] |= mc_enc(56, 8, obj_type[7]);
- cmd.params[2] |= mc_enc(0, 8, obj_type[8]);
- cmd.params[2] |= mc_enc(8, 8, obj_type[9]);
- cmd.params[2] |= mc_enc(16, 8, obj_type[10]);
- cmd.params[2] |= mc_enc(24, 8, obj_type[11]);
- cmd.params[2] |= mc_enc(32, 8, obj_type[12]);
- cmd.params[2] |= mc_enc(40, 8, obj_type[13]);
- cmd.params[2] |= mc_enc(48, 8, obj_type[14]);
- cmd.params[2] |= mc_enc(56, 8, obj_type[15]);
+ cmd_params = (struct dprc_cmd_get_obj_desc *)cmd.params;
+ cmd_params->obj_id = cpu_to_le32(obj_id);
+ strncpy(cmd_params->type, obj_type, 16);
+ cmd_params->type[15] = '\0';
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -1037,46 +972,19 @@ int dprc_get_obj_desc(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- obj_desc->id = (int)mc_dec(cmd.params[0], 32, 32);
- obj_desc->vendor = (u16)mc_dec(cmd.params[1], 0, 16);
- obj_desc->vendor = (u8)mc_dec(cmd.params[1], 16, 8);
- obj_desc->region_count = (u8)mc_dec(cmd.params[1], 24, 8);
- obj_desc->state = (u32)mc_dec(cmd.params[1], 32, 32);
- obj_desc->ver_major = (u16)mc_dec(cmd.params[2], 0, 16);
- obj_desc->ver_minor = (u16)mc_dec(cmd.params[2], 16, 16);
- obj_desc->flags = mc_dec(cmd.params[2], 32, 16);
- obj_desc->type[0] = (char)mc_dec(cmd.params[3], 0, 8);
- obj_desc->type[1] = (char)mc_dec(cmd.params[3], 8, 8);
- obj_desc->type[2] = (char)mc_dec(cmd.params[3], 16, 8);
- obj_desc->type[3] = (char)mc_dec(cmd.params[3], 24, 8);
- obj_desc->type[4] = (char)mc_dec(cmd.params[3], 32, 8);
- obj_desc->type[5] = (char)mc_dec(cmd.params[3], 40, 8);
- obj_desc->type[6] = (char)mc_dec(cmd.params[3], 48, 8);
- obj_desc->type[7] = (char)mc_dec(cmd.params[3], 56, 8);
- obj_desc->type[8] = (char)mc_dec(cmd.params[4], 0, 8);
- obj_desc->type[9] = (char)mc_dec(cmd.params[4], 8, 8);
- obj_desc->type[10] = (char)mc_dec(cmd.params[4], 16, 8);
- obj_desc->type[11] = (char)mc_dec(cmd.params[4], 24, 8);
- obj_desc->type[12] = (char)mc_dec(cmd.params[4], 32, 8);
- obj_desc->type[13] = (char)mc_dec(cmd.params[4], 40, 8);
- obj_desc->type[14] = (char)mc_dec(cmd.params[4], 48, 8);
- obj_desc->type[15] = (char)mc_dec(cmd.params[4], 56, 8);
- obj_desc->label[0] = (char)mc_dec(cmd.params[5], 0, 8);
- obj_desc->label[1] = (char)mc_dec(cmd.params[5], 8, 8);
- obj_desc->label[2] = (char)mc_dec(cmd.params[5], 16, 8);
- obj_desc->label[3] = (char)mc_dec(cmd.params[5], 24, 8);
- obj_desc->label[4] = (char)mc_dec(cmd.params[5], 32, 8);
- obj_desc->label[5] = (char)mc_dec(cmd.params[5], 40, 8);
- obj_desc->label[6] = (char)mc_dec(cmd.params[5], 48, 8);
- obj_desc->label[7] = (char)mc_dec(cmd.params[5], 56, 8);
- obj_desc->label[8] = (char)mc_dec(cmd.params[6], 0, 8);
- obj_desc->label[9] = (char)mc_dec(cmd.params[6], 8, 8);
- obj_desc->label[10] = (char)mc_dec(cmd.params[6], 16, 8);
- obj_desc->label[11] = (char)mc_dec(cmd.params[6], 24, 8);
- obj_desc->label[12] = (char)mc_dec(cmd.params[6], 32, 8);
- obj_desc->label[13] = (char)mc_dec(cmd.params[6], 40, 8);
- obj_desc->label[14] = (char)mc_dec(cmd.params[6], 48, 8);
- obj_desc->label[15] = (char)mc_dec(cmd.params[6], 56, 8);
+ rsp_params = (struct dprc_rsp_get_obj_desc *)cmd.params;
+ obj_desc->id = le32_to_cpu(rsp_params->id);
+ obj_desc->vendor = le16_to_cpu(rsp_params->vendor);
+ obj_desc->irq_count = rsp_params->irq_count;
+ obj_desc->region_count = rsp_params->region_count;
+ obj_desc->state = le32_to_cpu(rsp_params->state);
+ obj_desc->ver_major = le16_to_cpu(rsp_params->version_major);
+ obj_desc->ver_minor = le16_to_cpu(rsp_params->version_minor);
+ obj_desc->flags = le16_to_cpu(rsp_params->flags);
+ strncpy(obj_desc->type, rsp_params->type, 16);
+ obj_desc->type[15] = '\0';
+ strncpy(obj_desc->label, rsp_params->label, 16);
+ obj_desc->label[15] = '\0';
return 0;
}
@@ -1103,32 +1011,20 @@ int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
struct dprc_irq_cfg *irq_cfg)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_set_obj_irq *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_OBJ_IRQ,
cmd_flags,
token);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
- cmd.params[0] |= mc_enc(0, 32, irq_cfg->val);
- cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr);
- cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num);
- cmd.params[2] |= mc_enc(32, 32, obj_id);
- cmd.params[3] |= mc_enc(0, 8, obj_type[0]);
- cmd.params[3] |= mc_enc(8, 8, obj_type[1]);
- cmd.params[3] |= mc_enc(16, 8, obj_type[2]);
- cmd.params[3] |= mc_enc(24, 8, obj_type[3]);
- cmd.params[3] |= mc_enc(32, 8, obj_type[4]);
- cmd.params[3] |= mc_enc(40, 8, obj_type[5]);
- cmd.params[3] |= mc_enc(48, 8, obj_type[6]);
- cmd.params[3] |= mc_enc(56, 8, obj_type[7]);
- cmd.params[4] |= mc_enc(0, 8, obj_type[8]);
- cmd.params[4] |= mc_enc(8, 8, obj_type[9]);
- cmd.params[4] |= mc_enc(16, 8, obj_type[10]);
- cmd.params[4] |= mc_enc(24, 8, obj_type[11]);
- cmd.params[4] |= mc_enc(32, 8, obj_type[12]);
- cmd.params[4] |= mc_enc(40, 8, obj_type[13]);
- cmd.params[4] |= mc_enc(48, 8, obj_type[14]);
- cmd.params[4] |= mc_enc(56, 8, obj_type[15]);
+ cmd_params = (struct dprc_cmd_set_obj_irq *)cmd.params;
+ cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
+ cmd_params->irq_index = irq_index;
+ cmd_params->irq_addr = cpu_to_le64(irq_cfg->paddr);
+ cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
+ cmd_params->obj_id = cpu_to_le32(obj_id);
+ strncpy(cmd_params->obj_type, obj_type, 16);
+ cmd_params->obj_type[15] = '\0';
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -1159,30 +1055,19 @@ int dprc_get_obj_irq(struct fsl_mc_io *mc_io,
struct dprc_irq_cfg *irq_cfg)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_get_obj_irq *cmd_params;
+ struct dprc_rsp_get_obj_irq *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_IRQ,
cmd_flags,
token);
- cmd.params[0] |= mc_enc(0, 32, obj_id);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
- cmd.params[1] |= mc_enc(0, 8, obj_type[0]);
- cmd.params[1] |= mc_enc(8, 8, obj_type[1]);
- cmd.params[1] |= mc_enc(16, 8, obj_type[2]);
- cmd.params[1] |= mc_enc(24, 8, obj_type[3]);
- cmd.params[1] |= mc_enc(32, 8, obj_type[4]);
- cmd.params[1] |= mc_enc(40, 8, obj_type[5]);
- cmd.params[1] |= mc_enc(48, 8, obj_type[6]);
- cmd.params[1] |= mc_enc(56, 8, obj_type[7]);
- cmd.params[2] |= mc_enc(0, 8, obj_type[8]);
- cmd.params[2] |= mc_enc(8, 8, obj_type[9]);
- cmd.params[2] |= mc_enc(16, 8, obj_type[10]);
- cmd.params[2] |= mc_enc(24, 8, obj_type[11]);
- cmd.params[2] |= mc_enc(32, 8, obj_type[12]);
- cmd.params[2] |= mc_enc(40, 8, obj_type[13]);
- cmd.params[2] |= mc_enc(48, 8, obj_type[14]);
- cmd.params[2] |= mc_enc(56, 8, obj_type[15]);
+ cmd_params = (struct dprc_cmd_get_obj_irq *)cmd.params;
+ cmd_params->obj_id = cpu_to_le32(obj_id);
+ cmd_params->irq_index = irq_index;
+ strncpy(cmd_params->obj_type, obj_type, 16);
+ cmd_params->obj_type[15] = '\0';
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -1190,10 +1075,11 @@ int dprc_get_obj_irq(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- irq_cfg->val = (u32)mc_dec(cmd.params[0], 0, 32);
- irq_cfg->paddr = (u64)mc_dec(cmd.params[1], 0, 64);
- irq_cfg->irq_num = (int)mc_dec(cmd.params[2], 0, 32);
- *type = (int)mc_dec(cmd.params[2], 32, 32);
+ rsp_params = (struct dprc_rsp_get_obj_irq *)cmd.params;
+ irq_cfg->val = le32_to_cpu(rsp_params->irq_val);
+ irq_cfg->paddr = le64_to_cpu(rsp_params->irq_addr);
+ irq_cfg->irq_num = le32_to_cpu(rsp_params->irq_num);
+ *type = le32_to_cpu(rsp_params->type);
return 0;
}
@@ -1218,29 +1104,16 @@ int dprc_get_res_count(struct fsl_mc_io *mc_io,
int *res_count)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_get_res_count *cmd_params;
+ struct dprc_rsp_get_res_count *rsp_params;
int err;
- *res_count = 0;
-
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_COUNT,
cmd_flags, token);
- cmd.params[1] |= mc_enc(0, 8, type[0]);
- cmd.params[1] |= mc_enc(8, 8, type[1]);
- cmd.params[1] |= mc_enc(16, 8, type[2]);
- cmd.params[1] |= mc_enc(24, 8, type[3]);
- cmd.params[1] |= mc_enc(32, 8, type[4]);
- cmd.params[1] |= mc_enc(40, 8, type[5]);
- cmd.params[1] |= mc_enc(48, 8, type[6]);
- cmd.params[1] |= mc_enc(56, 8, type[7]);
- cmd.params[2] |= mc_enc(0, 8, type[8]);
- cmd.params[2] |= mc_enc(8, 8, type[9]);
- cmd.params[2] |= mc_enc(16, 8, type[10]);
- cmd.params[2] |= mc_enc(24, 8, type[11]);
- cmd.params[2] |= mc_enc(32, 8, type[12]);
- cmd.params[2] |= mc_enc(40, 8, type[13]);
- cmd.params[2] |= mc_enc(48, 8, type[14]);
- cmd.params[2] |= mc_enc(56, 8, '\0');
+ cmd_params = (struct dprc_cmd_get_res_count *)cmd.params;
+ strncpy(cmd_params->type, type, 16);
+ cmd_params->type[15] = '\0';
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -1248,7 +1121,8 @@ int dprc_get_res_count(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- *res_count = mc_dec(cmd.params[0], 0, 32);
+ rsp_params = (struct dprc_rsp_get_res_count *)cmd.params;
+ *res_count = le32_to_cpu(rsp_params->res_count);
return 0;
}
@@ -1271,30 +1145,19 @@ int dprc_get_res_ids(struct fsl_mc_io *mc_io,
struct dprc_res_ids_range_desc *range_desc)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_get_res_ids *cmd_params;
+ struct dprc_rsp_get_res_ids *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_IDS,
cmd_flags, token);
- cmd.params[0] |= mc_enc(42, 7, range_desc->iter_status);
- cmd.params[1] |= mc_enc(0, 32, range_desc->base_id);
- cmd.params[1] |= mc_enc(32, 32, range_desc->last_id);
- cmd.params[2] |= mc_enc(0, 8, type[0]);
- cmd.params[2] |= mc_enc(8, 8, type[1]);
- cmd.params[2] |= mc_enc(16, 8, type[2]);
- cmd.params[2] |= mc_enc(24, 8, type[3]);
- cmd.params[2] |= mc_enc(32, 8, type[4]);
- cmd.params[2] |= mc_enc(40, 8, type[5]);
- cmd.params[2] |= mc_enc(48, 8, type[6]);
- cmd.params[2] |= mc_enc(56, 8, type[7]);
- cmd.params[3] |= mc_enc(0, 8, type[8]);
- cmd.params[3] |= mc_enc(8, 8, type[9]);
- cmd.params[3] |= mc_enc(16, 8, type[10]);
- cmd.params[3] |= mc_enc(24, 8, type[11]);
- cmd.params[3] |= mc_enc(32, 8, type[12]);
- cmd.params[3] |= mc_enc(40, 8, type[13]);
- cmd.params[3] |= mc_enc(48, 8, type[14]);
- cmd.params[3] |= mc_enc(56, 8, '\0');
+ cmd_params = (struct dprc_cmd_get_res_ids *)cmd.params;
+ cmd_params->iter_status = range_desc->iter_status;
+ cmd_params->base_id = cpu_to_le32(range_desc->base_id);
+ cmd_params->last_id = cpu_to_le32(range_desc->last_id);
+ strncpy(cmd_params->type, type, 16);
+ cmd_params->type[15] = '\0';
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -1302,9 +1165,10 @@ int dprc_get_res_ids(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- range_desc->iter_status = mc_dec(cmd.params[0], 42, 7);
- range_desc->base_id = mc_dec(cmd.params[1], 0, 32);
- range_desc->last_id = mc_dec(cmd.params[1], 32, 32);
+ rsp_params = (struct dprc_rsp_get_res_ids *)cmd.params;
+ range_desc->iter_status = rsp_params->iter_status;
+ range_desc->base_id = le32_to_cpu(rsp_params->base_id);
+ range_desc->last_id = le32_to_cpu(rsp_params->last_id);
return 0;
}
@@ -1331,29 +1195,18 @@ int dprc_get_obj_region(struct fsl_mc_io *mc_io,
struct dprc_region_desc *region_desc)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_get_obj_region *cmd_params;
+ struct dprc_rsp_get_obj_region *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG,
cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 32, obj_id);
- cmd.params[0] |= mc_enc(48, 8, region_index);
- cmd.params[3] |= mc_enc(0, 8, obj_type[0]);
- cmd.params[3] |= mc_enc(8, 8, obj_type[1]);
- cmd.params[3] |= mc_enc(16, 8, obj_type[2]);
- cmd.params[3] |= mc_enc(24, 8, obj_type[3]);
- cmd.params[3] |= mc_enc(32, 8, obj_type[4]);
- cmd.params[3] |= mc_enc(40, 8, obj_type[5]);
- cmd.params[3] |= mc_enc(48, 8, obj_type[6]);
- cmd.params[3] |= mc_enc(56, 8, obj_type[7]);
- cmd.params[4] |= mc_enc(0, 8, obj_type[8]);
- cmd.params[4] |= mc_enc(8, 8, obj_type[9]);
- cmd.params[4] |= mc_enc(16, 8, obj_type[10]);
- cmd.params[4] |= mc_enc(24, 8, obj_type[11]);
- cmd.params[4] |= mc_enc(32, 8, obj_type[12]);
- cmd.params[4] |= mc_enc(40, 8, obj_type[13]);
- cmd.params[4] |= mc_enc(48, 8, obj_type[14]);
- cmd.params[4] |= mc_enc(56, 8, '\0');
+ cmd_params = (struct dprc_cmd_get_obj_region *)cmd.params;
+ cmd_params->obj_id = cpu_to_le32(obj_id);
+ cmd_params->region_index = region_index;
+ strncpy(cmd_params->obj_type, obj_type, 16);
+ cmd_params->obj_type[15] = '\0';
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -1361,8 +1214,9 @@ int dprc_get_obj_region(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- region_desc->base_offset = mc_dec(cmd.params[1], 0, 64);
- region_desc->size = mc_dec(cmd.params[2], 0, 32);
+ rsp_params = (struct dprc_rsp_get_obj_region *)cmd.params;
+ region_desc->base_offset = le64_to_cpu(rsp_params->base_addr);
+ region_desc->size = le32_to_cpu(rsp_params->size);
return 0;
}
@@ -1387,45 +1241,18 @@ int dprc_set_obj_label(struct fsl_mc_io *mc_io,
char *label)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_set_obj_label *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_OBJ_LABEL,
cmd_flags,
token);
-
- cmd.params[0] |= mc_enc(0, 32, obj_id);
- cmd.params[1] |= mc_enc(0, 8, label[0]);
- cmd.params[1] |= mc_enc(8, 8, label[1]);
- cmd.params[1] |= mc_enc(16, 8, label[2]);
- cmd.params[1] |= mc_enc(24, 8, label[3]);
- cmd.params[1] |= mc_enc(32, 8, label[4]);
- cmd.params[1] |= mc_enc(40, 8, label[5]);
- cmd.params[1] |= mc_enc(48, 8, label[6]);
- cmd.params[1] |= mc_enc(56, 8, label[7]);
- cmd.params[2] |= mc_enc(0, 8, label[8]);
- cmd.params[2] |= mc_enc(8, 8, label[9]);
- cmd.params[2] |= mc_enc(16, 8, label[10]);
- cmd.params[2] |= mc_enc(24, 8, label[11]);
- cmd.params[2] |= mc_enc(32, 8, label[12]);
- cmd.params[2] |= mc_enc(40, 8, label[13]);
- cmd.params[2] |= mc_enc(48, 8, label[14]);
- cmd.params[2] |= mc_enc(56, 8, label[15]);
- cmd.params[3] |= mc_enc(0, 8, obj_type[0]);
- cmd.params[3] |= mc_enc(8, 8, obj_type[1]);
- cmd.params[3] |= mc_enc(16, 8, obj_type[2]);
- cmd.params[3] |= mc_enc(24, 8, obj_type[3]);
- cmd.params[3] |= mc_enc(32, 8, obj_type[4]);
- cmd.params[3] |= mc_enc(40, 8, obj_type[5]);
- cmd.params[3] |= mc_enc(48, 8, obj_type[6]);
- cmd.params[3] |= mc_enc(56, 8, obj_type[7]);
- cmd.params[4] |= mc_enc(0, 8, obj_type[8]);
- cmd.params[4] |= mc_enc(8, 8, obj_type[9]);
- cmd.params[4] |= mc_enc(16, 8, obj_type[10]);
- cmd.params[4] |= mc_enc(24, 8, obj_type[11]);
- cmd.params[4] |= mc_enc(32, 8, obj_type[12]);
- cmd.params[4] |= mc_enc(40, 8, obj_type[13]);
- cmd.params[4] |= mc_enc(48, 8, obj_type[14]);
- cmd.params[4] |= mc_enc(56, 8, obj_type[15]);
+ cmd_params = (struct dprc_cmd_set_obj_label *)cmd.params;
+ cmd_params->obj_id = cpu_to_le32(obj_id);
+ strncpy(cmd_params->label, label, 16);
+ cmd_params->label[15] = '\0';
+ strncpy(cmd_params->obj_type, obj_type, 16);
+ cmd_params->obj_type[15] = '\0';
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -1453,49 +1280,23 @@ int dprc_connect(struct fsl_mc_io *mc_io,
const struct dprc_connection_cfg *cfg)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_connect *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_CONNECT,
cmd_flags,
token);
- cmd.params[0] |= mc_enc(0, 32, endpoint1->id);
- cmd.params[0] |= mc_enc(32, 32, endpoint1->if_id);
- cmd.params[1] |= mc_enc(0, 32, endpoint2->id);
- cmd.params[1] |= mc_enc(32, 32, endpoint2->if_id);
- cmd.params[2] |= mc_enc(0, 8, endpoint1->type[0]);
- cmd.params[2] |= mc_enc(8, 8, endpoint1->type[1]);
- cmd.params[2] |= mc_enc(16, 8, endpoint1->type[2]);
- cmd.params[2] |= mc_enc(24, 8, endpoint1->type[3]);
- cmd.params[2] |= mc_enc(32, 8, endpoint1->type[4]);
- cmd.params[2] |= mc_enc(40, 8, endpoint1->type[5]);
- cmd.params[2] |= mc_enc(48, 8, endpoint1->type[6]);
- cmd.params[2] |= mc_enc(56, 8, endpoint1->type[7]);
- cmd.params[3] |= mc_enc(0, 8, endpoint1->type[8]);
- cmd.params[3] |= mc_enc(8, 8, endpoint1->type[9]);
- cmd.params[3] |= mc_enc(16, 8, endpoint1->type[10]);
- cmd.params[3] |= mc_enc(24, 8, endpoint1->type[11]);
- cmd.params[3] |= mc_enc(32, 8, endpoint1->type[12]);
- cmd.params[3] |= mc_enc(40, 8, endpoint1->type[13]);
- cmd.params[3] |= mc_enc(48, 8, endpoint1->type[14]);
- cmd.params[3] |= mc_enc(56, 8, endpoint1->type[15]);
- cmd.params[4] |= mc_enc(0, 32, cfg->max_rate);
- cmd.params[4] |= mc_enc(32, 32, cfg->committed_rate);
- cmd.params[5] |= mc_enc(0, 8, endpoint2->type[0]);
- cmd.params[5] |= mc_enc(8, 8, endpoint2->type[1]);
- cmd.params[5] |= mc_enc(16, 8, endpoint2->type[2]);
- cmd.params[5] |= mc_enc(24, 8, endpoint2->type[3]);
- cmd.params[5] |= mc_enc(32, 8, endpoint2->type[4]);
- cmd.params[5] |= mc_enc(40, 8, endpoint2->type[5]);
- cmd.params[5] |= mc_enc(48, 8, endpoint2->type[6]);
- cmd.params[5] |= mc_enc(56, 8, endpoint2->type[7]);
- cmd.params[6] |= mc_enc(0, 8, endpoint2->type[8]);
- cmd.params[6] |= mc_enc(8, 8, endpoint2->type[9]);
- cmd.params[6] |= mc_enc(16, 8, endpoint2->type[10]);
- cmd.params[6] |= mc_enc(24, 8, endpoint2->type[11]);
- cmd.params[6] |= mc_enc(32, 8, endpoint2->type[12]);
- cmd.params[6] |= mc_enc(40, 8, endpoint2->type[13]);
- cmd.params[6] |= mc_enc(48, 8, endpoint2->type[14]);
- cmd.params[6] |= mc_enc(56, 8, endpoint2->type[15]);
+ cmd_params = (struct dprc_cmd_connect *)cmd.params;
+ cmd_params->ep1_id = cpu_to_le32(endpoint1->id);
+ cmd_params->ep1_interface_id = cpu_to_le32(endpoint1->if_id);
+ cmd_params->ep2_id = cpu_to_le32(endpoint2->id);
+ cmd_params->ep2_interface_id = cpu_to_le32(endpoint2->if_id);
+ strncpy(cmd_params->ep1_type, endpoint1->type, 16);
+ cmd_params->ep1_type[15] = '\0';
+ cmd_params->max_rate = cpu_to_le32(cfg->max_rate);
+ cmd_params->committed_rate = cpu_to_le32(cfg->committed_rate);
+ strncpy(cmd_params->ep2_type, endpoint2->type, 16);
+ cmd_params->ep2_type[15] = '\0';
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -1516,29 +1317,17 @@ int dprc_disconnect(struct fsl_mc_io *mc_io,
const struct dprc_endpoint *endpoint)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_disconnect *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_DISCONNECT,
cmd_flags,
token);
- cmd.params[0] |= mc_enc(0, 32, endpoint->id);
- cmd.params[0] |= mc_enc(32, 32, endpoint->if_id);
- cmd.params[1] |= mc_enc(0, 8, endpoint->type[0]);
- cmd.params[1] |= mc_enc(8, 8, endpoint->type[1]);
- cmd.params[1] |= mc_enc(16, 8, endpoint->type[2]);
- cmd.params[1] |= mc_enc(24, 8, endpoint->type[3]);
- cmd.params[1] |= mc_enc(32, 8, endpoint->type[4]);
- cmd.params[1] |= mc_enc(40, 8, endpoint->type[5]);
- cmd.params[1] |= mc_enc(48, 8, endpoint->type[6]);
- cmd.params[1] |= mc_enc(56, 8, endpoint->type[7]);
- cmd.params[2] |= mc_enc(0, 8, endpoint->type[8]);
- cmd.params[2] |= mc_enc(8, 8, endpoint->type[9]);
- cmd.params[2] |= mc_enc(16, 8, endpoint->type[10]);
- cmd.params[2] |= mc_enc(24, 8, endpoint->type[11]);
- cmd.params[2] |= mc_enc(32, 8, endpoint->type[12]);
- cmd.params[2] |= mc_enc(40, 8, endpoint->type[13]);
- cmd.params[2] |= mc_enc(48, 8, endpoint->type[14]);
- cmd.params[2] |= mc_enc(56, 8, endpoint->type[15]);
+ cmd_params = (struct dprc_cmd_disconnect *)cmd.params;
+ cmd_params->id = cpu_to_le32(endpoint->id);
+ cmd_params->interface_id = cpu_to_le32(endpoint->if_id);
+ strncpy(cmd_params->type, endpoint->type, 16);
+ cmd_params->type[15] = '\0';
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -1567,30 +1356,19 @@ int dprc_get_connection(struct fsl_mc_io *mc_io,
int *state)
{
struct mc_command cmd = { 0 };
+ struct dprc_cmd_get_connection *cmd_params;
+ struct dprc_rsp_get_connection *rsp_params;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONNECTION,
cmd_flags,
token);
- cmd.params[0] |= mc_enc(0, 32, endpoint1->id);
- cmd.params[0] |= mc_enc(32, 32, endpoint1->if_id);
- cmd.params[1] |= mc_enc(0, 8, endpoint1->type[0]);
- cmd.params[1] |= mc_enc(8, 8, endpoint1->type[1]);
- cmd.params[1] |= mc_enc(16, 8, endpoint1->type[2]);
- cmd.params[1] |= mc_enc(24, 8, endpoint1->type[3]);
- cmd.params[1] |= mc_enc(32, 8, endpoint1->type[4]);
- cmd.params[1] |= mc_enc(40, 8, endpoint1->type[5]);
- cmd.params[1] |= mc_enc(48, 8, endpoint1->type[6]);
- cmd.params[1] |= mc_enc(56, 8, endpoint1->type[7]);
- cmd.params[2] |= mc_enc(0, 8, endpoint1->type[8]);
- cmd.params[2] |= mc_enc(8, 8, endpoint1->type[9]);
- cmd.params[2] |= mc_enc(16, 8, endpoint1->type[10]);
- cmd.params[2] |= mc_enc(24, 8, endpoint1->type[11]);
- cmd.params[2] |= mc_enc(32, 8, endpoint1->type[12]);
- cmd.params[2] |= mc_enc(40, 8, endpoint1->type[13]);
- cmd.params[2] |= mc_enc(48, 8, endpoint1->type[14]);
- cmd.params[2] |= mc_enc(56, 8, endpoint1->type[15]);
+ cmd_params = (struct dprc_cmd_get_connection *)cmd.params;
+ cmd_params->ep1_id = cpu_to_le32(endpoint1->id);
+ cmd_params->ep1_interface_id = cpu_to_le32(endpoint1->if_id);
+ strncpy(cmd_params->ep1_type, endpoint1->type, 16);
+ cmd_params->ep1_type[15] = '\0';
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -1598,25 +1376,12 @@ int dprc_get_connection(struct fsl_mc_io *mc_io,
return err;
/* retrieve response parameters */
- endpoint2->id = mc_dec(cmd.params[3], 0, 32);
- endpoint2->if_id = mc_dec(cmd.params[3], 32, 32);
- endpoint2->type[0] = mc_dec(cmd.params[4], 0, 8);
- endpoint2->type[1] = mc_dec(cmd.params[4], 8, 8);
- endpoint2->type[2] = mc_dec(cmd.params[4], 16, 8);
- endpoint2->type[3] = mc_dec(cmd.params[4], 24, 8);
- endpoint2->type[4] = mc_dec(cmd.params[4], 32, 8);
- endpoint2->type[5] = mc_dec(cmd.params[4], 40, 8);
- endpoint2->type[6] = mc_dec(cmd.params[4], 48, 8);
- endpoint2->type[7] = mc_dec(cmd.params[4], 56, 8);
- endpoint2->type[8] = mc_dec(cmd.params[5], 0, 8);
- endpoint2->type[9] = mc_dec(cmd.params[5], 8, 8);
- endpoint2->type[10] = mc_dec(cmd.params[5], 16, 8);
- endpoint2->type[11] = mc_dec(cmd.params[5], 24, 8);
- endpoint2->type[12] = mc_dec(cmd.params[5], 32, 8);
- endpoint2->type[13] = mc_dec(cmd.params[5], 40, 8);
- endpoint2->type[14] = mc_dec(cmd.params[5], 48, 8);
- endpoint2->type[15] = mc_dec(cmd.params[5], 56, 8);
- *state = mc_dec(cmd.params[6], 0, 32);
+ rsp_params = (struct dprc_rsp_get_connection *)cmd.params;
+ endpoint2->id = le32_to_cpu(rsp_params->ep2_id);
+ endpoint2->if_id = le32_to_cpu(rsp_params->ep2_interface_id);
+ strncpy(endpoint2->type, rsp_params->ep2_type, 16);
+ endpoint2->type[15] = '\0';
+ *state = le32_to_cpu(rsp_params->state);
return 0;
}
diff --git a/drivers/staging/fsl-mc/bus/mc-allocator.c b/drivers/staging/fsl-mc/bus/mc-allocator.c
index fb08f22a7..e59d85060 100644
--- a/drivers/staging/fsl-mc/bus/mc-allocator.c
+++ b/drivers/staging/fsl-mc/bus/mc-allocator.c
@@ -717,7 +717,7 @@ static int fsl_mc_allocator_remove(struct fsl_mc_device *mc_dev)
return 0;
}
-static const struct fsl_mc_device_match_id match_id_table[] = {
+static const struct fsl_mc_device_id match_id_table[] = {
{
.vendor = FSL_MC_VENDOR_FREESCALE,
.obj_type = "dpbp",
diff --git a/drivers/staging/fsl-mc/bus/mc-bus.c b/drivers/staging/fsl-mc/bus/mc-bus.c
index 405364307..db3afdbdf 100644
--- a/drivers/staging/fsl-mc/bus/mc-bus.c
+++ b/drivers/staging/fsl-mc/bus/mc-bus.c
@@ -24,8 +24,6 @@
static struct kmem_cache *mc_dev_cache;
-static bool fsl_mc_is_root_dprc(struct device *dev);
-
/**
* fsl_mc_bus_match - device to driver matching callback
* @dev: the MC object device structure to match against
@@ -36,7 +34,7 @@ static bool fsl_mc_is_root_dprc(struct device *dev);
*/
static int fsl_mc_bus_match(struct device *dev, struct device_driver *drv)
{
- const struct fsl_mc_device_match_id *id;
+ const struct fsl_mc_device_id *id;
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(drv);
bool found = false;
@@ -78,14 +76,45 @@ out:
*/
static int fsl_mc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
{
- pr_debug("%s invoked\n", __func__);
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+
+ if (add_uevent_var(env, "MODALIAS=fsl-mc:v%08Xd%s",
+ mc_dev->obj_desc.vendor,
+ mc_dev->obj_desc.type))
+ return -ENOMEM;
+
return 0;
}
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+
+ return sprintf(buf, "fsl-mc:v%08Xd%s\n", mc_dev->obj_desc.vendor,
+ mc_dev->obj_desc.type);
+}
+static DEVICE_ATTR_RO(modalias);
+
+static struct attribute *fsl_mc_dev_attrs[] = {
+ &dev_attr_modalias.attr,
+ NULL,
+};
+
+static const struct attribute_group fsl_mc_dev_group = {
+ .attrs = fsl_mc_dev_attrs,
+};
+
+static const struct attribute_group *fsl_mc_dev_groups[] = {
+ &fsl_mc_dev_group,
+ NULL,
+};
+
struct bus_type fsl_mc_bus_type = {
.name = "fsl-mc",
.match = fsl_mc_bus_match,
.uevent = fsl_mc_bus_uevent,
+ .dev_groups = fsl_mc_dev_groups,
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_type);
@@ -216,19 +245,6 @@ static void fsl_mc_get_root_dprc(struct device *dev,
}
}
-/**
- * fsl_mc_is_root_dprc - function to check if a given device is a root dprc
- */
-static bool fsl_mc_is_root_dprc(struct device *dev)
-{
- struct device *root_dprc_dev;
-
- fsl_mc_get_root_dprc(dev, &root_dprc_dev);
- if (!root_dprc_dev)
- return false;
- return dev == root_dprc_dev;
-}
-
static int get_dprc_attr(struct fsl_mc_io *mc_io,
int container_id, struct dprc_attributes *attr)
{
@@ -393,6 +409,19 @@ error_cleanup_regions:
}
/**
+ * fsl_mc_is_root_dprc - function to check if a given device is a root dprc
+ */
+bool fsl_mc_is_root_dprc(struct device *dev)
+{
+ struct device *root_dprc_dev;
+
+ fsl_mc_get_root_dprc(dev, &root_dprc_dev);
+ if (!root_dprc_dev)
+ return false;
+ return dev == root_dprc_dev;
+}
+
+/**
* Add a newly discovered MC object device to be visible in Linux
*/
int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
@@ -550,10 +579,6 @@ void fsl_mc_device_remove(struct fsl_mc_device *mc_dev)
if (strcmp(mc_dev->obj_desc.type, "dprc") == 0) {
mc_bus = to_fsl_mc_bus(mc_dev);
- if (mc_dev->mc_io) {
- fsl_destroy_mc_io(mc_dev->mc_io);
- mc_dev->mc_io = NULL;
- }
if (fsl_mc_is_root_dprc(&mc_dev->dev)) {
if (atomic_read(&root_dprc_count) > 0)
@@ -781,6 +806,10 @@ static int fsl_mc_bus_remove(struct platform_device *pdev)
return -EINVAL;
fsl_mc_device_remove(mc->root_mc_bus_dev);
+
+ fsl_destroy_mc_io(mc->root_mc_bus_dev->mc_io);
+ mc->root_mc_bus_dev->mc_io = NULL;
+
dev_info(&pdev->dev, "Root MC bus device removed");
return 0;
}
diff --git a/drivers/staging/fsl-mc/bus/mc-msi.c b/drivers/staging/fsl-mc/bus/mc-msi.c
index e202b2b88..c7be156ae 100644
--- a/drivers/staging/fsl-mc/bus/mc-msi.c
+++ b/drivers/staging/fsl-mc/bus/mc-msi.c
@@ -20,11 +20,26 @@
#include "../include/mc-sys.h"
#include "dprc-cmd.h"
+/*
+ * Generate a unique ID identifying the interrupt (only used within the MSI
+ * irqdomain. Combine the icid with the interrupt index.
+ */
+static irq_hw_number_t fsl_mc_domain_calc_hwirq(struct fsl_mc_device *dev,
+ struct msi_desc *desc)
+{
+ /*
+ * Make the base hwirq value for ICID*10000 so it is readable
+ * as a decimal value in /proc/interrupts.
+ */
+ return (irq_hw_number_t)(desc->fsl_mc.msi_index + (dev->icid * 10000));
+}
+
static void fsl_mc_msi_set_desc(msi_alloc_info_t *arg,
struct msi_desc *desc)
{
arg->desc = desc;
- arg->hwirq = (irq_hw_number_t)desc->fsl_mc.msi_index;
+ arg->hwirq = fsl_mc_domain_calc_hwirq(to_fsl_mc_device(desc->dev),
+ desc);
}
static void fsl_mc_msi_update_dom_ops(struct msi_domain_info *info)
diff --git a/drivers/staging/fsl-mc/bus/mc-sys.c b/drivers/staging/fsl-mc/bus/mc-sys.c
index 810a611c1..0c185abe6 100644
--- a/drivers/staging/fsl-mc/bus/mc-sys.c
+++ b/drivers/staging/fsl-mc/bus/mc-sys.c
@@ -53,8 +53,20 @@
#define MC_CMD_COMPLETION_POLLING_MIN_SLEEP_USECS 10
#define MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS 500
-#define MC_CMD_HDR_READ_CMDID(_hdr) \
- ((u16)mc_dec((_hdr), MC_CMD_HDR_CMDID_O, MC_CMD_HDR_CMDID_S))
+static enum mc_cmd_status mc_cmd_hdr_read_status(struct mc_command *cmd)
+{
+ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
+
+ return (enum mc_cmd_status)hdr->status;
+}
+
+static u16 mc_cmd_hdr_read_cmdid(struct mc_command *cmd)
+{
+ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
+ u16 cmd_id = le16_to_cpu(hdr->cmd_id);
+
+ return (cmd_id & MC_CMD_HDR_CMDID_MASK) >> MC_CMD_HDR_CMDID_SHIFT;
+}
/**
* Creates an MC I/O object
@@ -261,10 +273,11 @@ static inline void mc_write_command(struct mc_command __iomem *portal,
/* copy command parameters into the portal */
for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++)
- writeq(cmd->params[i], &portal->params[i]);
+ __raw_writeq(cmd->params[i], &portal->params[i]);
+ __iowmb();
/* submit the command by writing the header */
- writeq(cmd->header, &portal->header);
+ __raw_writeq(cmd->header, &portal->header);
}
/**
@@ -284,14 +297,17 @@ static inline enum mc_cmd_status mc_read_response(struct mc_command __iomem *
enum mc_cmd_status status;
/* Copy command response header from MC portal: */
- resp->header = readq(&portal->header);
- status = MC_CMD_HDR_READ_STATUS(resp->header);
+ __iormb();
+ resp->header = __raw_readq(&portal->header);
+ __iormb();
+ status = mc_cmd_hdr_read_status(resp);
if (status != MC_CMD_STATUS_OK)
return status;
/* Copy command response data from MC portal: */
for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++)
- resp->params[i] = readq(&portal->params[i]);
+ resp->params[i] = __raw_readq(&portal->params[i]);
+ __iormb();
return status;
}
@@ -331,10 +347,8 @@ static int mc_polling_wait_preemptible(struct fsl_mc_io *mc_io,
dev_dbg(mc_io->dev,
"MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n",
mc_io->portal_phys_addr,
- (unsigned int)
- MC_CMD_HDR_READ_TOKEN(cmd->header),
- (unsigned int)
- MC_CMD_HDR_READ_CMDID(cmd->header));
+ (unsigned int)mc_cmd_hdr_read_token(cmd),
+ (unsigned int)mc_cmd_hdr_read_cmdid(cmd));
return -ETIMEDOUT;
}
@@ -373,10 +387,8 @@ static int mc_polling_wait_atomic(struct fsl_mc_io *mc_io,
dev_dbg(mc_io->dev,
"MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n",
mc_io->portal_phys_addr,
- (unsigned int)
- MC_CMD_HDR_READ_TOKEN(cmd->header),
- (unsigned int)
- MC_CMD_HDR_READ_CMDID(cmd->header));
+ (unsigned int)mc_cmd_hdr_read_token(cmd),
+ (unsigned int)mc_cmd_hdr_read_cmdid(cmd));
return -ETIMEDOUT;
}
@@ -429,8 +441,8 @@ int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd)
dev_dbg(mc_io->dev,
"MC command failed: portal: %#llx, obj handle: %#x, command: %#x, status: %s (%#x)\n",
mc_io->portal_phys_addr,
- (unsigned int)MC_CMD_HDR_READ_TOKEN(cmd->header),
- (unsigned int)MC_CMD_HDR_READ_CMDID(cmd->header),
+ (unsigned int)mc_cmd_hdr_read_token(cmd),
+ (unsigned int)mc_cmd_hdr_read_cmdid(cmd),
mc_status_to_string(status),
(unsigned int)status);
diff --git a/drivers/staging/fsl-mc/include/dpbp-cmd.h b/drivers/staging/fsl-mc/include/dpbp-cmd.h
index c57b454a2..4828ccd0c 100644
--- a/drivers/staging/fsl-mc/include/dpbp-cmd.h
+++ b/drivers/staging/fsl-mc/include/dpbp-cmd.h
@@ -1,4 +1,4 @@
-/* Copyright 2013-2014 Freescale Semiconductor Inc.
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -59,4 +59,127 @@
#define DPBP_CMDID_SET_NOTIFICATIONS 0x01b0
#define DPBP_CMDID_GET_NOTIFICATIONS 0x01b1
+
+struct dpbp_cmd_open {
+ __le32 dpbp_id;
+};
+
+#define DPBP_ENABLE 0x1
+
+struct dpbp_rsp_is_enabled {
+ u8 enabled;
+};
+
+struct dpbp_cmd_set_irq {
+ /* cmd word 0 */
+ u8 irq_index;
+ u8 pad[3];
+ __le32 irq_val;
+ /* cmd word 1 */
+ __le64 irq_addr;
+ /* cmd word 2 */
+ __le32 irq_num;
+};
+
+struct dpbp_cmd_get_irq {
+ __le32 pad;
+ u8 irq_index;
+};
+
+struct dpbp_rsp_get_irq {
+ /* response word 0 */
+ __le32 irq_val;
+ __le32 pad;
+ /* response word 1 */
+ __le64 irq_addr;
+ /* response word 2 */
+ __le32 irq_num;
+ __le32 type;
+};
+
+struct dpbp_cmd_set_irq_enable {
+ u8 enable;
+ u8 pad[3];
+ u8 irq_index;
+};
+
+struct dpbp_cmd_get_irq_enable {
+ __le32 pad;
+ u8 irq_index;
+};
+
+struct dpbp_rsp_get_irq_enable {
+ u8 enabled;
+};
+
+struct dpbp_cmd_set_irq_mask {
+ __le32 mask;
+ u8 irq_index;
+};
+
+struct dpbp_cmd_get_irq_mask {
+ __le32 pad;
+ u8 irq_index;
+};
+
+struct dpbp_rsp_get_irq_mask {
+ __le32 mask;
+};
+
+struct dpbp_cmd_get_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+struct dpbp_rsp_get_irq_status {
+ __le32 status;
+};
+
+struct dpbp_cmd_clear_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+struct dpbp_rsp_get_attributes {
+ /* response word 0 */
+ __le16 pad;
+ __le16 bpid;
+ __le32 id;
+ /* response word 1 */
+ __le16 version_major;
+ __le16 version_minor;
+};
+
+struct dpbp_cmd_set_notifications {
+ /* cmd word 0 */
+ __le32 depletion_entry;
+ __le32 depletion_exit;
+ /* cmd word 1 */
+ __le32 surplus_entry;
+ __le32 surplus_exit;
+ /* cmd word 2 */
+ __le16 options;
+ __le16 pad[3];
+ /* cmd word 3 */
+ __le64 message_ctx;
+ /* cmd word 4 */
+ __le64 message_iova;
+};
+
+struct dpbp_rsp_get_notifications {
+ /* response word 0 */
+ __le32 depletion_entry;
+ __le32 depletion_exit;
+ /* response word 1 */
+ __le32 surplus_entry;
+ __le32 surplus_exit;
+ /* response word 2 */
+ __le16 options;
+ __le16 pad[3];
+ /* response word 3 */
+ __le64 message_ctx;
+ /* response word 4 */
+ __le64 message_iova;
+};
+
#endif /* _FSL_DPBP_CMD_H */
diff --git a/drivers/staging/fsl-mc/include/mc-cmd.h b/drivers/staging/fsl-mc/include/mc-cmd.h
index 65277e3de..5decb9890 100644
--- a/drivers/staging/fsl-mc/include/mc-cmd.h
+++ b/drivers/staging/fsl-mc/include/mc-cmd.h
@@ -34,18 +34,14 @@
#define MC_CMD_NUM_OF_PARAMS 7
-#define MAKE_UMASK64(_width) \
- ((u64)((_width) < 64 ? ((u64)1 << (_width)) - 1 : -1))
-
-static inline u64 mc_enc(int lsoffset, int width, u64 val)
-{
- return (u64)(((u64)val & MAKE_UMASK64(width)) << lsoffset);
-}
-
-static inline u64 mc_dec(u64 val, int lsoffset, int width)
-{
- return (u64)((val >> lsoffset) & MAKE_UMASK64(width));
-}
+struct mc_cmd_header {
+ u8 src_id;
+ u8 flags_hw;
+ u8 status;
+ u8 flags_sw;
+ __le16 token;
+ __le16 cmd_id;
+};
struct mc_command {
u64 header;
@@ -72,60 +68,41 @@ enum mc_cmd_status {
*/
/* High priority flag */
-#define MC_CMD_FLAG_PRI 0x00008000
+#define MC_CMD_FLAG_PRI 0x80
/* Command completion flag */
-#define MC_CMD_FLAG_INTR_DIS 0x01000000
-
-/*
- * TODO Remove following two defines after completion of flib 8.0.0
- * integration
- */
-#define MC_CMD_PRI_LOW 0 /*!< Low Priority command indication */
-#define MC_CMD_PRI_HIGH 1 /*!< High Priority command indication */
-
-#define MC_CMD_HDR_CMDID_O 52 /* Command ID field offset */
-#define MC_CMD_HDR_CMDID_S 12 /* Command ID field size */
-#define MC_CMD_HDR_TOKEN_O 38 /* Token field offset */
-#define MC_CMD_HDR_TOKEN_S 10 /* Token field size */
-#define MC_CMD_HDR_STATUS_O 16 /* Status field offset */
-#define MC_CMD_HDR_STATUS_S 8 /* Status field size*/
-#define MC_CMD_HDR_FLAGS_O 0 /* Flags field offset */
-#define MC_CMD_HDR_FLAGS_S 32 /* Flags field size*/
-#define MC_CMD_HDR_FLAGS_MASK 0xFF00FF00 /* Command flags mask */
-
-#define MC_CMD_HDR_READ_STATUS(_hdr) \
- ((enum mc_cmd_status)mc_dec((_hdr), \
- MC_CMD_HDR_STATUS_O, MC_CMD_HDR_STATUS_S))
-
-#define MC_CMD_HDR_READ_TOKEN(_hdr) \
- ((u16)mc_dec((_hdr), MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S))
-
-#define MC_CMD_HDR_READ_FLAGS(_hdr) \
- ((u32)mc_dec((_hdr), MC_CMD_HDR_FLAGS_O, MC_CMD_HDR_FLAGS_S))
+#define MC_CMD_FLAG_INTR_DIS 0x01
-#define MC_EXT_OP(_ext, _param, _offset, _width, _type, _arg) \
- ((_ext)[_param] |= mc_enc((_offset), (_width), _arg))
-
-#define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \
- ((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg))
-
-#define MC_RSP_OP(_cmd, _param, _offset, _width, _type, _arg) \
- (_arg = (_type)mc_dec(_cmd.params[_param], (_offset), (_width)))
+#define MC_CMD_HDR_CMDID_MASK 0xFFF0
+#define MC_CMD_HDR_CMDID_SHIFT 4
+#define MC_CMD_HDR_TOKEN_MASK 0xFFC0
+#define MC_CMD_HDR_TOKEN_SHIFT 6
static inline u64 mc_encode_cmd_header(u16 cmd_id,
u32 cmd_flags,
u16 token)
{
- u64 hdr;
+ u64 header = 0;
+ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&header;
+
+ hdr->cmd_id = cpu_to_le16((cmd_id << MC_CMD_HDR_CMDID_SHIFT) &
+ MC_CMD_HDR_CMDID_MASK);
+ hdr->token = cpu_to_le16((token << MC_CMD_HDR_TOKEN_SHIFT) &
+ MC_CMD_HDR_TOKEN_MASK);
+ hdr->status = MC_CMD_STATUS_READY;
+ if (cmd_flags & MC_CMD_FLAG_PRI)
+ hdr->flags_hw = MC_CMD_FLAG_PRI;
+ if (cmd_flags & MC_CMD_FLAG_INTR_DIS)
+ hdr->flags_sw = MC_CMD_FLAG_INTR_DIS;
+
+ return header;
+}
- hdr = mc_enc(MC_CMD_HDR_CMDID_O, MC_CMD_HDR_CMDID_S, cmd_id);
- hdr |= mc_enc(MC_CMD_HDR_FLAGS_O, MC_CMD_HDR_FLAGS_S,
- (cmd_flags & MC_CMD_HDR_FLAGS_MASK));
- hdr |= mc_enc(MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S, token);
- hdr |= mc_enc(MC_CMD_HDR_STATUS_O, MC_CMD_HDR_STATUS_S,
- MC_CMD_STATUS_READY);
+static inline u16 mc_cmd_hdr_read_token(struct mc_command *cmd)
+{
+ struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
+ u16 token = le16_to_cpu(hdr->token);
- return hdr;
+ return (token & MC_CMD_HDR_TOKEN_MASK) >> MC_CMD_HDR_TOKEN_SHIFT;
}
#endif /* __FSL_MC_CMD_H */
diff --git a/drivers/staging/fsl-mc/include/mc.h b/drivers/staging/fsl-mc/include/mc.h
index ac7c1ce68..853cbf38a 100644
--- a/drivers/staging/fsl-mc/include/mc.h
+++ b/drivers/staging/fsl-mc/include/mc.h
@@ -39,7 +39,7 @@ struct fsl_mc_bus;
*/
struct fsl_mc_driver {
struct device_driver driver;
- const struct fsl_mc_device_match_id *match_id_table;
+ const struct fsl_mc_device_id *match_id_table;
int (*probe)(struct fsl_mc_device *dev);
int (*remove)(struct fsl_mc_device *dev);
void (*shutdown)(struct fsl_mc_device *dev);
@@ -51,23 +51,6 @@ struct fsl_mc_driver {
container_of(_drv, struct fsl_mc_driver, driver)
/**
- * struct fsl_mc_device_match_id - MC object device Id entry for driver matching
- * @vendor: vendor ID
- * @obj_type: MC object type
- * @ver_major: MC object version major number
- * @ver_minor: MC object version minor number
- *
- * Type of entries in the "device Id" table for MC object devices supported by
- * a MC object device driver. The last entry of the table has vendor set to 0x0
- */
-struct fsl_mc_device_match_id {
- u16 vendor;
- const char obj_type[16];
- u32 ver_major;
- u32 ver_minor;
-};
-
-/**
* enum fsl_mc_pool_type - Types of allocatable MC bus resources
*
* Entries in these enum are used as indices in the array of resource
@@ -224,6 +207,8 @@ int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev);
void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev);
+bool fsl_mc_is_root_dprc(struct device *dev);
+
extern struct bus_type fsl_mc_bus_type;
#endif /* _FSL_MC_H_ */
diff --git a/drivers/staging/iio/accel/Kconfig b/drivers/staging/iio/accel/Kconfig
index f066aa30f..1c994b57c 100644
--- a/drivers/staging/iio/accel/Kconfig
+++ b/drivers/staging/iio/accel/Kconfig
@@ -51,20 +51,6 @@ config ADIS16240
To compile this driver as a module, say M here: the module will be
called adis16240.
-config LIS3L02DQ
- tristate "ST Microelectronics LIS3L02DQ Accelerometer Driver"
- depends on SPI
- select IIO_TRIGGER if IIO_BUFFER
- depends on !IIO_BUFFER || IIO_KFIFO_BUF
- depends on GPIOLIB || COMPILE_TEST
- help
- Say Y here to build SPI support for the ST microelectronics
- accelerometer. The driver supplies direct access via sysfs files
- and an event interface via a character device.
-
- To compile this driver as a module, say M here: the module will be
- called lis3l02dq.
-
config SCA3000
depends on IIO_BUFFER
depends on SPI
diff --git a/drivers/staging/iio/accel/Makefile b/drivers/staging/iio/accel/Makefile
index 415329c96..1810a434a 100644
--- a/drivers/staging/iio/accel/Makefile
+++ b/drivers/staging/iio/accel/Makefile
@@ -14,9 +14,5 @@ obj-$(CONFIG_ADIS16209) += adis16209.o
adis16240-y := adis16240_core.o
obj-$(CONFIG_ADIS16240) += adis16240.o
-lis3l02dq-y := lis3l02dq_core.o
-lis3l02dq-$(CONFIG_IIO_BUFFER) += lis3l02dq_ring.o
-obj-$(CONFIG_LIS3L02DQ) += lis3l02dq.o
-
sca3000-y := sca3000_core.o sca3000_ring.o
obj-$(CONFIG_SCA3000) += sca3000.o
diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
index ec1218182..b5625f5d5 100644
--- a/drivers/staging/iio/accel/sca3000_core.c
+++ b/drivers/staging/iio/accel/sca3000_core.c
@@ -774,7 +774,7 @@ static irqreturn_t sca3000_event_handler(int irq, void *private)
struct iio_dev *indio_dev = private;
struct sca3000_state *st = iio_priv(indio_dev);
int ret, val;
- s64 last_timestamp = iio_get_time_ns();
+ s64 last_timestamp = iio_get_time_ns(indio_dev);
/*
* Could lead if badly timed to an extra read of status reg,
@@ -1046,6 +1046,8 @@ static int sca3000_clean_setup(struct sca3000_state *st)
/* Disable ring buffer */
ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
+ if (ret < 0)
+ goto error_ret;
ret = sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL,
(ret & SCA3000_OUT_CTRL_PROT_MASK)
| SCA3000_OUT_CTRL_BUF_X_EN
diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c
index a06b46cb8..2177f1dd2 100644
--- a/drivers/staging/iio/adc/ad7280a.c
+++ b/drivers/staging/iio/adc/ad7280a.c
@@ -705,7 +705,7 @@ static irqreturn_t ad7280_event_handler(int irq, void *private)
IIO_EV_DIR_RISING,
IIO_EV_TYPE_THRESH,
0, 0, 0),
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
else if (((channels[i] >> 11) & 0xFFF) <=
st->cell_threshlow)
iio_push_event(indio_dev,
@@ -715,7 +715,7 @@ static irqreturn_t ad7280_event_handler(int irq, void *private)
IIO_EV_DIR_FALLING,
IIO_EV_TYPE_THRESH,
0, 0, 0),
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
} else {
if (((channels[i] >> 11) & 0xFFF) >= st->aux_threshhigh)
iio_push_event(indio_dev,
@@ -724,7 +724,7 @@ static irqreturn_t ad7280_event_handler(int irq, void *private)
0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_RISING),
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
else if (((channels[i] >> 11) & 0xFFF) <=
st->aux_threshlow)
iio_push_event(indio_dev,
@@ -733,7 +733,7 @@ static irqreturn_t ad7280_event_handler(int irq, void *private)
0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_FALLING),
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
}
}
diff --git a/drivers/staging/iio/adc/ad7606_ring.c b/drivers/staging/iio/adc/ad7606_ring.c
index a6f8eb112..0572df9aa 100644
--- a/drivers/staging/iio/adc/ad7606_ring.c
+++ b/drivers/staging/iio/adc/ad7606_ring.c
@@ -77,7 +77,8 @@ static void ad7606_poll_bh_to_ring(struct work_struct *work_s)
goto done;
}
- iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns());
+ iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ iio_get_time_ns(indio_dev));
done:
gpio_set_value(st->pdata->gpio_convst, 0);
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/staging/iio/adc/ad7816.c b/drivers/staging/iio/adc/ad7816.c
index ac3735c7f..5e8115b01 100644
--- a/drivers/staging/iio/adc/ad7816.c
+++ b/drivers/staging/iio/adc/ad7816.c
@@ -253,7 +253,8 @@ static const struct attribute_group ad7816_attribute_group = {
static irqreturn_t ad7816_event_handler(int irq, void *private)
{
- iio_push_event(private, IIO_EVENT_CODE_AD7816_OTI, iio_get_time_ns());
+ iio_push_event(private, IIO_EVENT_CODE_AD7816_OTI,
+ iio_get_time_ns((struct iio_dev *)private));
return IRQ_HANDLED;
}
diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c
index a10e7d8e6..3faffe59c 100644
--- a/drivers/staging/iio/addac/adt7316.c
+++ b/drivers/staging/iio/addac/adt7316.c
@@ -1752,7 +1752,7 @@ static irqreturn_t adt7316_event_handler(int irq, void *private)
if ((chip->id & ID_FAMILY_MASK) != ID_ADT75XX)
stat1 &= 0x1F;
- time = iio_get_time_ns();
+ time = iio_get_time_ns(indio_dev);
if (stat1 & BIT(0))
iio_push_event(indio_dev,
IIO_UNMOD_EVENT_CODE(IIO_TEMP, 0,
@@ -1804,7 +1804,7 @@ static irqreturn_t adt7316_event_handler(int irq, void *private)
0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_RISING),
- iio_get_time_ns());
+ iio_get_time_ns(indio_dev));
}
return IRQ_HANDLED;
diff --git a/drivers/staging/iio/cdc/ad7150.c b/drivers/staging/iio/cdc/ad7150.c
index f6b9a1032..5578a077f 100644
--- a/drivers/staging/iio/cdc/ad7150.c
+++ b/drivers/staging/iio/cdc/ad7150.c
@@ -493,7 +493,7 @@ static irqreturn_t ad7150_event_handler(int irq, void *private)
struct iio_dev *indio_dev = private;
struct ad7150_chip_info *chip = iio_priv(indio_dev);
u8 int_status;
- s64 timestamp = iio_get_time_ns();
+ s64 timestamp = iio_get_time_ns(indio_dev);
int ret;
ret = i2c_smbus_read_byte_data(chip->client, AD7150_STATUS);
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 170ac980a..24c348d2f 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -419,6 +419,7 @@ static ssize_t ad5933_store(struct device *dev,
mutex_lock(&indio_dev->mlock);
switch ((u32)this_attr->address) {
case AD5933_OUT_RANGE:
+ ret = -EINVAL;
for (i = 0; i < 4; i++)
if (val == st->range_avail[i]) {
st->ctrl_hb &= ~AD5933_CTRL_RANGE(0x3);
@@ -426,7 +427,6 @@ static ssize_t ad5933_store(struct device *dev,
ret = ad5933_cmd(st, 0);
break;
}
- ret = -EINVAL;
break;
case AD5933_IN_PGA_GAIN:
if (sysfs_streq(buf, "1")) {
diff --git a/drivers/staging/iio/light/tsl2x7x_core.c b/drivers/staging/iio/light/tsl2x7x_core.c
index d553c8e18..ea15bc1c3 100644
--- a/drivers/staging/iio/light/tsl2x7x_core.c
+++ b/drivers/staging/iio/light/tsl2x7x_core.c
@@ -1554,7 +1554,7 @@ static irqreturn_t tsl2x7x_event_handler(int irq, void *private)
{
struct iio_dev *indio_dev = private;
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- s64 timestamp = iio_get_time_ns();
+ s64 timestamp = iio_get_time_ns(indio_dev);
int ret;
u8 value;
diff --git a/drivers/staging/ks7010/Kconfig b/drivers/staging/ks7010/Kconfig
new file mode 100644
index 000000000..0b9217674
--- /dev/null
+++ b/drivers/staging/ks7010/Kconfig
@@ -0,0 +1,10 @@
+config KS7010
+ tristate "KeyStream KS7010 SDIO support"
+ depends on MMC && WIRELESS
+ select WIRELESS_EXT
+ select WEXT_PRIV
+ select FW_LOADER
+ help
+ This is a driver for KeyStream KS7010 based SDIO WIFI cards. It is
+ found on at least later Spectec SDW-821 (FCC-ID "S2Y-WLAN-11G-K" only,
+ sadly not FCC-ID "S2Y-WLAN-11B-G") and Spectec SDW-823 microSD cards.
diff --git a/drivers/staging/ks7010/Makefile b/drivers/staging/ks7010/Makefile
new file mode 100644
index 000000000..69fcf8d65
--- /dev/null
+++ b/drivers/staging/ks7010/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_KS7010) += ks7010.o
+
+ccflags-y += -DKS_WLAN_DEBUG=0
+ks7010-y := michael_mic.o ks_hostif.o ks_wlan_net.o ks7010_sdio.o
diff --git a/drivers/staging/ks7010/TODO b/drivers/staging/ks7010/TODO
new file mode 100644
index 000000000..2938d35be
--- /dev/null
+++ b/drivers/staging/ks7010/TODO
@@ -0,0 +1,36 @@
+KS7010 Linux driver
+===================
+
+This driver is based on source code from the Ben Nanonote extra repository [1]
+which is based on the original v007 release from Renesas [2]. Some more
+background info about the chipset can be found here [3] and here [4]. Thank
+you to all which already participated in cleaning up the driver so far!
+
+[1] http://projects.qi-hardware.com/index.php/p/openwrt-packages/source/tree/master/ks7010/src
+[2] http://downloads.qi-hardware.com/software/ks7010_sdio_v007.tar.bz2
+[3] http://en.qi-hardware.com/wiki/Ben_NanoNote_Wi-Fi
+[4] https://wikidevi.com/wiki/Renesas
+
+TODO
+----
+
+First a few words what not to do (at least not blindly):
+
+- don't be overly strict with the 80 char limit. Only if it REALLY makes the
+ code more readable
+- No '#if 0/1' removal unless the surrounding code is understood and removal is
+ really OK. There might be some hints hidden there.
+
+Now the TODOs:
+
+- fix codechecker warnings (checkpatch, sparse, smatch). But PLEASE make sure
+ that you are not only silencing the warning but really fixing code. You
+ should understand the change you submit.
+- fix the 'card removal' event when card is inserted when booting
+- check what other upstream wireless mechanisms can be used instead of the
+ custom ones here
+
+Please send any patches to:
+Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Wolfram Sang <wsa@the-dreams.de>
+Linux Driver Project Developer List <driverdev-devel@linuxdriverproject.org>
diff --git a/drivers/staging/ks7010/eap_packet.h b/drivers/staging/ks7010/eap_packet.h
new file mode 100644
index 000000000..16a392abd
--- /dev/null
+++ b/drivers/staging/ks7010/eap_packet.h
@@ -0,0 +1,129 @@
+#ifndef EAP_PACKET_H
+#define EAP_PACKET_H
+
+#define WBIT(n) (1 << (n))
+
+#ifndef ETH_ALEN
+#define ETH_ALEN 6
+#endif
+
+struct ether_hdr {
+ unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
+ unsigned char h_source[ETH_ALEN]; /* source ether addr */
+ unsigned char h_dest_snap;
+ unsigned char h_source_snap;
+ unsigned char h_command;
+ unsigned char h_vendor_id[3];
+ unsigned short h_proto; /* packet type ID field */
+#define ETHER_PROTOCOL_TYPE_EAP 0x888e
+#define ETHER_PROTOCOL_TYPE_IP 0x0800
+#define ETHER_PROTOCOL_TYPE_ARP 0x0806
+ /* followed by length octets of data */
+} __attribute__ ((packed));
+
+struct ieee802_1x_hdr {
+ unsigned char version;
+ unsigned char type;
+ unsigned short length;
+ /* followed by length octets of data */
+} __attribute__ ((packed));
+
+#define EAPOL_VERSION 2
+
+enum { IEEE802_1X_TYPE_EAP_PACKET = 0,
+ IEEE802_1X_TYPE_EAPOL_START = 1,
+ IEEE802_1X_TYPE_EAPOL_LOGOFF = 2,
+ IEEE802_1X_TYPE_EAPOL_KEY = 3,
+ IEEE802_1X_TYPE_EAPOL_ENCAPSULATED_ASF_ALERT = 4
+};
+
+enum { EAPOL_KEY_TYPE_RC4 = 1, EAPOL_KEY_TYPE_RSN = 2,
+ EAPOL_KEY_TYPE_WPA = 254
+};
+
+#define IEEE8021X_REPLAY_COUNTER_LEN 8
+#define IEEE8021X_KEY_SIGN_LEN 16
+#define IEEE8021X_KEY_IV_LEN 16
+
+#define IEEE8021X_KEY_INDEX_FLAG 0x80
+#define IEEE8021X_KEY_INDEX_MASK 0x03
+
+struct ieee802_1x_eapol_key {
+ unsigned char type;
+ unsigned short key_length;
+ /* does not repeat within the life of the keying material used to
+ * encrypt the Key field; 64-bit NTP timestamp MAY be used here */
+ unsigned char replay_counter[IEEE8021X_REPLAY_COUNTER_LEN];
+ unsigned char key_iv[IEEE8021X_KEY_IV_LEN]; /* cryptographically random number */
+ unsigned char key_index; /* key flag in the most significant bit:
+ * 0 = broadcast (default key),
+ * 1 = unicast (key mapping key); key index is in the
+ * 7 least significant bits */
+ /* HMAC-MD5 message integrity check computed with MS-MPPE-Send-Key as
+ * the key */
+ unsigned char key_signature[IEEE8021X_KEY_SIGN_LEN];
+
+ /* followed by key: if packet body length = 44 + key length, then the
+ * key field (of key_length bytes) contains the key in encrypted form;
+ * if packet body length = 44, key field is absent and key_length
+ * represents the number of least significant octets from
+ * MS-MPPE-Send-Key attribute to be used as the keying material;
+ * RC4 key used in encryption = Key-IV + MS-MPPE-Recv-Key */
+} __attribute__ ((packed));
+
+#define WPA_NONCE_LEN 32
+#define WPA_REPLAY_COUNTER_LEN 8
+
+struct wpa_eapol_key {
+ unsigned char type;
+ unsigned short key_info;
+ unsigned short key_length;
+ unsigned char replay_counter[WPA_REPLAY_COUNTER_LEN];
+ unsigned char key_nonce[WPA_NONCE_LEN];
+ unsigned char key_iv[16];
+ unsigned char key_rsc[8];
+ unsigned char key_id[8]; /* Reserved in IEEE 802.11i/RSN */
+ unsigned char key_mic[16];
+ unsigned short key_data_length;
+ /* followed by key_data_length bytes of key_data */
+} __attribute__ ((packed));
+
+#define WPA_KEY_INFO_TYPE_MASK (WBIT(0) | WBIT(1) | WBIT(2))
+#define WPA_KEY_INFO_TYPE_HMAC_MD5_RC4 WBIT(0)
+#define WPA_KEY_INFO_TYPE_HMAC_SHA1_AES WBIT(1)
+#define WPA_KEY_INFO_KEY_TYPE WBIT(3) /* 1 = Pairwise, 0 = Group key */
+/* bit4..5 is used in WPA, but is reserved in IEEE 802.11i/RSN */
+#define WPA_KEY_INFO_KEY_INDEX_MASK (WBIT(4) | WBIT(5))
+#define WPA_KEY_INFO_KEY_INDEX_SHIFT 4
+#define WPA_KEY_INFO_INSTALL WBIT(6) /* pairwise */
+#define WPA_KEY_INFO_TXRX WBIT(6) /* group */
+#define WPA_KEY_INFO_ACK WBIT(7)
+#define WPA_KEY_INFO_MIC WBIT(8)
+#define WPA_KEY_INFO_SECURE WBIT(9)
+#define WPA_KEY_INFO_ERROR WBIT(10)
+#define WPA_KEY_INFO_REQUEST WBIT(11)
+#define WPA_KEY_INFO_ENCR_KEY_DATA WBIT(12) /* IEEE 802.11i/RSN only */
+
+#define WPA_CAPABILITY_PREAUTH WBIT(0)
+
+#define GENERIC_INFO_ELEM 0xdd
+#define RSN_INFO_ELEM 0x30
+
+enum {
+ REASON_UNSPECIFIED = 1,
+ REASON_DEAUTH_LEAVING = 3,
+ REASON_INVALID_IE = 13,
+ REASON_MICHAEL_MIC_FAILURE = 14,
+ REASON_4WAY_HANDSHAKE_TIMEOUT = 15,
+ REASON_GROUP_KEY_UPDATE_TIMEOUT = 16,
+ REASON_IE_IN_4WAY_DIFFERS = 17,
+ REASON_GROUP_CIPHER_NOT_VALID = 18,
+ REASON_PAIRWISE_CIPHER_NOT_VALID = 19,
+ REASON_AKMP_NOT_VALID = 20,
+ REASON_UNSUPPORTED_RSN_IE_VERSION = 21,
+ REASON_INVALID_RSN_IE_CAPAB = 22,
+ REASON_IEEE_802_1X_AUTH_FAILED = 23,
+ REASON_CIPHER_SUITE_REJECTED = 24
+};
+
+#endif /* EAP_PACKET_H */
diff --git a/drivers/staging/ks7010/ks7010_sdio.c b/drivers/staging/ks7010/ks7010_sdio.c
new file mode 100644
index 000000000..aa4d7fe28
--- /dev/null
+++ b/drivers/staging/ks7010/ks7010_sdio.c
@@ -0,0 +1,1236 @@
+/*
+ * Driver for KeyStream, KS7010 based SDIO cards.
+ *
+ * Copyright (C) 2006-2008 KeyStream Corp.
+ * Copyright (C) 2009 Renesas Technology Corp.
+ * Copyright (C) 2016 Sang Engineering, Wolfram Sang
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/firmware.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/workqueue.h>
+#include <asm/atomic.h>
+
+#include "ks_wlan.h"
+#include "ks_wlan_ioctl.h"
+#include "ks_hostif.h"
+#include "ks7010_sdio.h"
+
+#define KS7010_FUNC_NUM 1
+#define KS7010_IO_BLOCK_SIZE 512
+#define KS7010_MAX_CLOCK 25000000
+
+static const struct sdio_device_id ks7010_sdio_ids[] = {
+ {SDIO_DEVICE(SDIO_VENDOR_ID_KS_CODE_A, SDIO_DEVICE_ID_KS_7010)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_KS_CODE_B, SDIO_DEVICE_ID_KS_7010)},
+ { /* all zero */ }
+};
+MODULE_DEVICE_TABLE(sdio, ks7010_sdio_ids);
+
+/* macro */
+
+#define inc_txqhead(priv) \
+ ( priv->tx_dev.qhead = (priv->tx_dev.qhead + 1) % TX_DEVICE_BUFF_SIZE )
+#define inc_txqtail(priv) \
+ ( priv->tx_dev.qtail = (priv->tx_dev.qtail + 1) % TX_DEVICE_BUFF_SIZE )
+#define cnt_txqbody(priv) \
+ (((priv->tx_dev.qtail + TX_DEVICE_BUFF_SIZE) - (priv->tx_dev.qhead)) % TX_DEVICE_BUFF_SIZE )
+
+#define inc_rxqhead(priv) \
+ ( priv->rx_dev.qhead = (priv->rx_dev.qhead + 1) % RX_DEVICE_BUFF_SIZE )
+#define inc_rxqtail(priv) \
+ ( priv->rx_dev.qtail = (priv->rx_dev.qtail + 1) % RX_DEVICE_BUFF_SIZE )
+#define cnt_rxqbody(priv) \
+ (((priv->rx_dev.qtail + RX_DEVICE_BUFF_SIZE) - (priv->rx_dev.qhead)) % RX_DEVICE_BUFF_SIZE )
+
+static int ks7010_sdio_read(struct ks_wlan_private *priv, unsigned int address,
+ unsigned char *buffer, int length)
+{
+ struct ks_sdio_card *card;
+ int rc;
+
+ card = priv->ks_wlan_hw.sdio_card;
+
+ if (length == 1) /* CMD52 */
+ *buffer = sdio_readb(card->func, address, &rc);
+ else /* CMD53 multi-block transfer */
+ rc = sdio_memcpy_fromio(card->func, buffer, address, length);
+
+ if (rc != 0)
+ DPRINTK(1, "sdio error=%d size=%d\n", rc, length);
+
+ return rc;
+}
+
+static int ks7010_sdio_write(struct ks_wlan_private *priv, unsigned int address,
+ unsigned char *buffer, int length)
+{
+ struct ks_sdio_card *card;
+ int rc;
+
+ card = priv->ks_wlan_hw.sdio_card;
+
+ if (length == 1) /* CMD52 */
+ sdio_writeb(card->func, *buffer, (unsigned int)address, &rc);
+ else /* CMD53 */
+ rc = sdio_memcpy_toio(card->func, (unsigned int)address, buffer,
+ length);
+
+ if (rc != 0)
+ DPRINTK(1, "sdio error=%d size=%d\n", rc, length);
+
+ return rc;
+}
+
+void ks_wlan_hw_sleep_doze_request(struct ks_wlan_private *priv)
+{
+ unsigned char rw_data;
+ int retval;
+
+ DPRINTK(4, "\n");
+
+ /* clear request */
+ atomic_set(&priv->sleepstatus.doze_request, 0);
+
+ if (atomic_read(&priv->sleepstatus.status) == 0) {
+ rw_data = GCR_B_DOZE;
+ retval =
+ ks7010_sdio_write(priv, GCR_B, &rw_data, sizeof(rw_data));
+ if (retval) {
+ DPRINTK(1, " error : GCR_B=%02X\n", rw_data);
+ goto out;
+ }
+ DPRINTK(4, "PMG SET!! : GCR_B=%02X\n", rw_data);
+ DPRINTK(3, "sleep_mode=SLP_SLEEP\n");
+ atomic_set(&priv->sleepstatus.status, 1);
+ priv->last_doze = jiffies;
+ } else {
+ DPRINTK(1, "sleep_mode=%d\n", priv->sleep_mode);
+ }
+
+ out:
+ priv->sleep_mode = atomic_read(&priv->sleepstatus.status);
+ return;
+}
+
+void ks_wlan_hw_sleep_wakeup_request(struct ks_wlan_private *priv)
+{
+ unsigned char rw_data;
+ int retval;
+
+ DPRINTK(4, "\n");
+
+ /* clear request */
+ atomic_set(&priv->sleepstatus.wakeup_request, 0);
+
+ if (atomic_read(&priv->sleepstatus.status) == 1) {
+ rw_data = WAKEUP_REQ;
+ retval =
+ ks7010_sdio_write(priv, WAKEUP, &rw_data, sizeof(rw_data));
+ if (retval) {
+ DPRINTK(1, " error : WAKEUP=%02X\n", rw_data);
+ goto out;
+ }
+ DPRINTK(4, "wake up : WAKEUP=%02X\n", rw_data);
+ atomic_set(&priv->sleepstatus.status, 0);
+ priv->last_wakeup = jiffies;
+ ++priv->wakeup_count;
+ } else {
+ DPRINTK(1, "sleep_mode=%d\n", priv->sleep_mode);
+ }
+
+ out:
+ priv->sleep_mode = atomic_read(&priv->sleepstatus.status);
+ return;
+}
+
+void ks_wlan_hw_wakeup_request(struct ks_wlan_private *priv)
+{
+ unsigned char rw_data;
+ int retval;
+
+ DPRINTK(4, "\n");
+ if (atomic_read(&priv->psstatus.status) == PS_SNOOZE) {
+ rw_data = WAKEUP_REQ;
+ retval =
+ ks7010_sdio_write(priv, WAKEUP, &rw_data, sizeof(rw_data));
+ if (retval) {
+ DPRINTK(1, " error : WAKEUP=%02X\n", rw_data);
+ }
+ DPRINTK(4, "wake up : WAKEUP=%02X\n", rw_data);
+ priv->last_wakeup = jiffies;
+ ++priv->wakeup_count;
+ } else {
+ DPRINTK(1, "psstatus=%d\n",
+ atomic_read(&priv->psstatus.status));
+ }
+}
+
+int _ks_wlan_hw_power_save(struct ks_wlan_private *priv)
+{
+ int rc = 0;
+ unsigned char rw_data;
+ int retval;
+
+ if (priv->reg.powermgt == POWMGT_ACTIVE_MODE)
+ return rc;
+
+ if (priv->reg.operation_mode == MODE_INFRASTRUCTURE &&
+ (priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+
+ //DPRINTK(1,"psstatus.status=%d\n",atomic_read(&priv->psstatus.status));
+ if (priv->dev_state == DEVICE_STATE_SLEEP) {
+ switch (atomic_read(&priv->psstatus.status)) {
+ case PS_SNOOZE: /* 4 */
+ break;
+ default:
+ DPRINTK(5, "\npsstatus.status=%d\npsstatus.confirm_wait=%d\npsstatus.snooze_guard=%d\ncnt_txqbody=%d\n",
+ atomic_read(&priv->psstatus.status),
+ atomic_read(&priv->psstatus.confirm_wait),
+ atomic_read(&priv->psstatus.snooze_guard),
+ cnt_txqbody(priv));
+
+ if (!atomic_read(&priv->psstatus.confirm_wait)
+ && !atomic_read(&priv->psstatus.snooze_guard)
+ && !cnt_txqbody(priv)) {
+ retval =
+ ks7010_sdio_read(priv, INT_PENDING,
+ &rw_data,
+ sizeof(rw_data));
+ if (retval) {
+ DPRINTK(1,
+ " error : INT_PENDING=%02X\n",
+ rw_data);
+ queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,
+ &priv->ks_wlan_hw.rw_wq, 1);
+ break;
+ }
+ if (!rw_data) {
+ rw_data = GCR_B_DOZE;
+ retval =
+ ks7010_sdio_write(priv,
+ GCR_B,
+ &rw_data,
+ sizeof(rw_data));
+ if (retval) {
+ DPRINTK(1,
+ " error : GCR_B=%02X\n",
+ rw_data);
+ queue_delayed_work
+ (priv->ks_wlan_hw.ks7010sdio_wq,
+ &priv->ks_wlan_hw.rw_wq, 1);
+ break;
+ }
+ DPRINTK(4,
+ "PMG SET!! : GCR_B=%02X\n",
+ rw_data);
+ atomic_set(&priv->psstatus.
+ status, PS_SNOOZE);
+ DPRINTK(3,
+ "psstatus.status=PS_SNOOZE\n");
+ } else {
+ queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,
+ &priv->ks_wlan_hw.rw_wq, 1);
+ }
+ } else {
+ queue_delayed_work(priv->ks_wlan_hw.
+ ks7010sdio_wq,
+ &priv->ks_wlan_hw.rw_wq,
+ 0);
+ }
+ break;
+ }
+ }
+
+ }
+
+ return rc;
+}
+
+int ks_wlan_hw_power_save(struct ks_wlan_private *priv)
+{
+ queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,
+ &priv->ks_wlan_hw.rw_wq, 1);
+ return 0;
+}
+
+static int enqueue_txdev(struct ks_wlan_private *priv, unsigned char *p,
+ unsigned long size,
+ void (*complete_handler) (void *arg1, void *arg2),
+ void *arg1, void *arg2)
+{
+ struct tx_device_buffer *sp;
+
+ if (priv->dev_state < DEVICE_STATE_BOOT) {
+ kfree(p);
+ if (complete_handler != NULL)
+ (*complete_handler) (arg1, arg2);
+ return 1;
+ }
+
+ if ((TX_DEVICE_BUFF_SIZE - 1) <= cnt_txqbody(priv)) {
+ /* in case of buffer overflow */
+ DPRINTK(1, "tx buffer overflow\n");
+ kfree(p);
+ if (complete_handler != NULL)
+ (*complete_handler) (arg1, arg2);
+ return 1;
+ }
+
+ sp = &priv->tx_dev.tx_dev_buff[priv->tx_dev.qtail];
+ sp->sendp = p;
+ sp->size = size;
+ sp->complete_handler = complete_handler;
+ sp->arg1 = arg1;
+ sp->arg2 = arg2;
+ inc_txqtail(priv);
+
+ return 0;
+}
+
+/* write data */
+static int write_to_device(struct ks_wlan_private *priv, unsigned char *buffer,
+ unsigned long size)
+{
+ int rc, retval;
+ unsigned char rw_data;
+ struct hostif_hdr *hdr;
+ hdr = (struct hostif_hdr *)buffer;
+ rc = 0;
+
+ DPRINTK(4, "size=%d\n", hdr->size);
+ if (hdr->event < HIF_DATA_REQ || HIF_REQ_MAX < hdr->event) {
+ DPRINTK(1, "unknown event=%04X\n", hdr->event);
+ return 0;
+ }
+
+ retval = ks7010_sdio_write(priv, DATA_WINDOW, buffer, size);
+ if (retval) {
+ DPRINTK(1, " write error : retval=%d\n", retval);
+ return -4;
+ }
+
+ rw_data = WRITE_STATUS_BUSY;
+ retval =
+ ks7010_sdio_write(priv, WRITE_STATUS, &rw_data, sizeof(rw_data));
+ if (retval) {
+ DPRINTK(1, " error : WRITE_STATUS=%02X\n", rw_data);
+ return -3;
+ }
+
+ return 0;
+}
+
+static void tx_device_task(void *dev)
+{
+ struct ks_wlan_private *priv = (struct ks_wlan_private *)dev;
+ struct tx_device_buffer *sp;
+ int rc = 0;
+
+ DPRINTK(4, "\n");
+ if (cnt_txqbody(priv) > 0
+ && atomic_read(&priv->psstatus.status) != PS_SNOOZE) {
+ sp = &priv->tx_dev.tx_dev_buff[priv->tx_dev.qhead];
+ if (priv->dev_state >= DEVICE_STATE_BOOT) {
+ rc = write_to_device(priv, sp->sendp, sp->size);
+ if (rc) {
+ DPRINTK(1, "write_to_device error !!(%d)\n",
+ rc);
+ queue_delayed_work(priv->ks_wlan_hw.
+ ks7010sdio_wq,
+ &priv->ks_wlan_hw.rw_wq, 1);
+ return;
+ }
+
+ }
+ kfree(sp->sendp); /* allocated memory free */
+ if (sp->complete_handler != NULL) /* TX Complete */
+ (*sp->complete_handler) (sp->arg1, sp->arg2);
+ inc_txqhead(priv);
+
+ if (cnt_txqbody(priv) > 0) {
+ queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,
+ &priv->ks_wlan_hw.rw_wq, 0);
+ }
+ }
+ return;
+}
+
+int ks_wlan_hw_tx(struct ks_wlan_private *priv, void *p, unsigned long size,
+ void (*complete_handler) (void *arg1, void *arg2),
+ void *arg1, void *arg2)
+{
+ int result = 0;
+ struct hostif_hdr *hdr;
+ hdr = (struct hostif_hdr *)p;
+
+ if (hdr->event < HIF_DATA_REQ || HIF_REQ_MAX < hdr->event) {
+ DPRINTK(1, "unknown event=%04X\n", hdr->event);
+ return 0;
+ }
+
+ /* add event to hostt buffer */
+ priv->hostt.buff[priv->hostt.qtail] = hdr->event;
+ priv->hostt.qtail = (priv->hostt.qtail + 1) % SME_EVENT_BUFF_SIZE;
+
+ DPRINTK(4, "event=%04X\n", hdr->event);
+ spin_lock(&priv->tx_dev.tx_dev_lock);
+ result = enqueue_txdev(priv, p, size, complete_handler, arg1, arg2);
+ spin_unlock(&priv->tx_dev.tx_dev_lock);
+
+ if (cnt_txqbody(priv) > 0) {
+ queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,
+ &priv->ks_wlan_hw.rw_wq, 0);
+ }
+ return result;
+}
+
+static void rx_event_task(unsigned long dev)
+{
+ struct ks_wlan_private *priv = (struct ks_wlan_private *)dev;
+ struct rx_device_buffer *rp;
+
+ DPRINTK(4, "\n");
+
+ if (cnt_rxqbody(priv) > 0 && priv->dev_state >= DEVICE_STATE_BOOT) {
+ rp = &priv->rx_dev.rx_dev_buff[priv->rx_dev.qhead];
+ hostif_receive(priv, rp->data, rp->size);
+ inc_rxqhead(priv);
+
+ if (cnt_rxqbody(priv) > 0) {
+ tasklet_schedule(&priv->ks_wlan_hw.rx_bh_task);
+ }
+ }
+
+ return;
+}
+
+static void ks_wlan_hw_rx(void *dev, uint16_t size)
+{
+ struct ks_wlan_private *priv = (struct ks_wlan_private *)dev;
+ int retval;
+ struct rx_device_buffer *rx_buffer;
+ struct hostif_hdr *hdr;
+ unsigned char read_status;
+ unsigned short event = 0;
+
+ DPRINTK(4, "\n");
+
+ /* receive data */
+ if (cnt_rxqbody(priv) >= (RX_DEVICE_BUFF_SIZE - 1)) {
+ /* in case of buffer overflow */
+ DPRINTK(1, "rx buffer overflow \n");
+ goto error_out;
+ }
+ rx_buffer = &priv->rx_dev.rx_dev_buff[priv->rx_dev.qtail];
+
+ retval =
+ ks7010_sdio_read(priv, DATA_WINDOW, &rx_buffer->data[0],
+ hif_align_size(size));
+ if (retval) {
+ goto error_out;
+ }
+
+ /* length check */
+ if (size > 2046 || size == 0) {
+#ifdef KS_WLAN_DEBUG
+ if (KS_WLAN_DEBUG > 5)
+ print_hex_dump_bytes("INVALID DATA dump: ",
+ DUMP_PREFIX_OFFSET,
+ rx_buffer->data, 32);
+#endif
+ /* rx_status update */
+ read_status = READ_STATUS_IDLE;
+ retval =
+ ks7010_sdio_write(priv, READ_STATUS, &read_status,
+ sizeof(read_status));
+ if (retval) {
+ DPRINTK(1, " error : READ_STATUS=%02X\n", read_status);
+ }
+ goto error_out;
+ }
+
+ hdr = (struct hostif_hdr *)&rx_buffer->data[0];
+ rx_buffer->size = le16_to_cpu(hdr->size) + sizeof(hdr->size);
+ event = hdr->event;
+ inc_rxqtail(priv);
+
+ /* read status update */
+ read_status = READ_STATUS_IDLE;
+ retval =
+ ks7010_sdio_write(priv, READ_STATUS, &read_status,
+ sizeof(read_status));
+ if (retval) {
+ DPRINTK(1, " error : READ_STATUS=%02X\n", read_status);
+ }
+ DPRINTK(4, "READ_STATUS=%02X\n", read_status);
+
+ if (atomic_read(&priv->psstatus.confirm_wait)) {
+ if (IS_HIF_CONF(event)) {
+ DPRINTK(4, "IS_HIF_CONF true !!\n");
+ atomic_dec(&priv->psstatus.confirm_wait);
+ }
+ }
+
+ /* rx_event_task((void *)priv); */
+ tasklet_schedule(&priv->ks_wlan_hw.rx_bh_task);
+
+ error_out:
+ return;
+}
+
+static void ks7010_rw_function(struct work_struct *work)
+{
+ struct hw_info_t *hw;
+ struct ks_wlan_private *priv;
+ unsigned char rw_data;
+ int retval;
+
+ hw = container_of(work, struct hw_info_t, rw_wq.work);
+ priv = container_of(hw, struct ks_wlan_private, ks_wlan_hw);
+
+ DPRINTK(4, "\n");
+
+ /* wiat after DOZE */
+ if (time_after(priv->last_doze + ((30 * HZ) / 1000), jiffies)) {
+ DPRINTK(4, "wait after DOZE \n");
+ queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,
+ &priv->ks_wlan_hw.rw_wq, 1);
+ return;
+ }
+
+ /* wiat after WAKEUP */
+ while (time_after(priv->last_wakeup + ((30 * HZ) / 1000), jiffies)) {
+ DPRINTK(4, "wait after WAKEUP \n");
+/* queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,&priv->ks_wlan_hw.rw_wq,
+ (priv->last_wakeup + ((30*HZ)/1000) - jiffies));*/
+ printk("wake: %lu %lu\n", priv->last_wakeup + (30 * HZ) / 1000,
+ jiffies);
+ msleep(30);
+ }
+
+ sdio_claim_host(priv->ks_wlan_hw.sdio_card->func);
+
+ /* power save wakeup */
+ if (atomic_read(&priv->psstatus.status) == PS_SNOOZE) {
+ if (cnt_txqbody(priv) > 0) {
+ ks_wlan_hw_wakeup_request(priv);
+ queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,
+ &priv->ks_wlan_hw.rw_wq, 1);
+ }
+ goto err_out;
+ }
+
+ /* sleep mode doze */
+ if (atomic_read(&priv->sleepstatus.doze_request) == 1) {
+ ks_wlan_hw_sleep_doze_request(priv);
+ goto err_out;
+ }
+ /* sleep mode wakeup */
+ if (atomic_read(&priv->sleepstatus.wakeup_request) == 1) {
+ ks_wlan_hw_sleep_wakeup_request(priv);
+ goto err_out;
+ }
+
+ /* read (WriteStatus/ReadDataSize FN1:00_0014) */
+ retval =
+ ks7010_sdio_read(priv, WSTATUS_RSIZE, &rw_data, sizeof(rw_data));
+ if (retval) {
+ DPRINTK(1, " error : WSTATUS_RSIZE=%02X psstatus=%d\n", rw_data,
+ atomic_read(&priv->psstatus.status));
+ goto err_out;
+ }
+ DPRINTK(4, "WSTATUS_RSIZE=%02X\n", rw_data);
+
+ if (rw_data & RSIZE_MASK) { /* Read schedule */
+ ks_wlan_hw_rx((void *)priv,
+ (uint16_t) (((rw_data & RSIZE_MASK) << 4)));
+ }
+ if ((rw_data & WSTATUS_MASK)) {
+ tx_device_task((void *)priv);
+ }
+ _ks_wlan_hw_power_save(priv);
+
+ err_out:
+ sdio_release_host(priv->ks_wlan_hw.sdio_card->func);
+
+ return;
+}
+
+static void ks_sdio_interrupt(struct sdio_func *func)
+{
+ int retval;
+ struct ks_sdio_card *card;
+ struct ks_wlan_private *priv;
+ unsigned char status, rsize, rw_data;
+
+ card = sdio_get_drvdata(func);
+ priv = card->priv;
+ DPRINTK(4, "\n");
+
+ if (priv->dev_state >= DEVICE_STATE_BOOT) {
+ retval =
+ ks7010_sdio_read(priv, INT_PENDING, &status,
+ sizeof(status));
+ if (retval) {
+ DPRINTK(1, "read INT_PENDING Failed!!(%d)\n", retval);
+ goto intr_out;
+ }
+ DPRINTK(4, "INT_PENDING=%02X\n", rw_data);
+
+ /* schedule task for interrupt status */
+ /* bit7 -> Write General Communication B register */
+ /* read (General Communication B register) */
+ /* bit5 -> Write Status Idle */
+ /* bit2 -> Read Status Busy */
+ if (status & INT_GCR_B
+ || atomic_read(&priv->psstatus.status) == PS_SNOOZE) {
+ retval =
+ ks7010_sdio_read(priv, GCR_B, &rw_data,
+ sizeof(rw_data));
+ if (retval) {
+ DPRINTK(1, " error : GCR_B=%02X\n", rw_data);
+ goto intr_out;
+ }
+ /* DPRINTK(1, "GCR_B=%02X\n", rw_data); */
+ if (rw_data == GCR_B_ACTIVE) {
+ if (atomic_read(&priv->psstatus.status) ==
+ PS_SNOOZE) {
+ atomic_set(&priv->psstatus.status,
+ PS_WAKEUP);
+ priv->wakeup_count = 0;
+ }
+ complete(&priv->psstatus.wakeup_wait);
+ }
+
+ }
+
+ do {
+ /* read (WriteStatus/ReadDataSize FN1:00_0014) */
+ retval =
+ ks7010_sdio_read(priv, WSTATUS_RSIZE, &rw_data,
+ sizeof(rw_data));
+ if (retval) {
+ DPRINTK(1, " error : WSTATUS_RSIZE=%02X\n",
+ rw_data);
+ goto intr_out;
+ }
+ DPRINTK(4, "WSTATUS_RSIZE=%02X\n", rw_data);
+ rsize = rw_data & RSIZE_MASK;
+ if (rsize) { /* Read schedule */
+ ks_wlan_hw_rx((void *)priv,
+ (uint16_t) (((rsize) << 4)));
+ }
+ if (rw_data & WSTATUS_MASK) {
+#if 0
+ if (status & INT_WRITE_STATUS
+ && !cnt_txqbody(priv)) {
+ /* dummy write for interrupt clear */
+ rw_data = 0;
+ retval =
+ ks7010_sdio_write(priv, DATA_WINDOW,
+ &rw_data,
+ sizeof(rw_data));
+ if (retval) {
+ DPRINTK(1,
+ "write DATA_WINDOW Failed!!(%d)\n",
+ retval);
+ }
+ status &= ~INT_WRITE_STATUS;
+ } else {
+#endif
+ if (atomic_read(&priv->psstatus.status) == PS_SNOOZE) {
+ if (cnt_txqbody(priv)) {
+ ks_wlan_hw_wakeup_request(priv);
+ queue_delayed_work
+ (priv->ks_wlan_hw.
+ ks7010sdio_wq,
+ &priv->ks_wlan_hw.
+ rw_wq, 1);
+ return;
+ }
+ } else {
+ tx_device_task((void *)priv);
+ }
+#if 0
+ }
+#endif
+ }
+ } while (rsize);
+ }
+
+ intr_out:
+ queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,
+ &priv->ks_wlan_hw.rw_wq, 0);
+ return;
+}
+
+static int trx_device_init(struct ks_wlan_private *priv)
+{
+ /* initialize values (tx) */
+ priv->tx_dev.qtail = priv->tx_dev.qhead = 0;
+
+ /* initialize values (rx) */
+ priv->rx_dev.qtail = priv->rx_dev.qhead = 0;
+
+ /* initialize spinLock (tx,rx) */
+ spin_lock_init(&priv->tx_dev.tx_dev_lock);
+ spin_lock_init(&priv->rx_dev.rx_dev_lock);
+
+ tasklet_init(&priv->ks_wlan_hw.rx_bh_task, rx_event_task,
+ (unsigned long)priv);
+
+ return 0;
+}
+
+static void trx_device_exit(struct ks_wlan_private *priv)
+{
+ struct tx_device_buffer *sp;
+
+ /* tx buffer clear */
+ while (cnt_txqbody(priv) > 0) {
+ sp = &priv->tx_dev.tx_dev_buff[priv->tx_dev.qhead];
+ kfree(sp->sendp); /* allocated memory free */
+ if (sp->complete_handler != NULL) /* TX Complete */
+ (*sp->complete_handler) (sp->arg1, sp->arg2);
+ inc_txqhead(priv);
+ }
+
+ tasklet_kill(&priv->ks_wlan_hw.rx_bh_task);
+
+ return;
+}
+
+static int ks7010_sdio_update_index(struct ks_wlan_private *priv, u32 index)
+{
+ int rc = 0;
+ int retval;
+ unsigned char *data_buf;
+ data_buf = NULL;
+
+ data_buf = kmalloc(sizeof(u32), GFP_KERNEL);
+ if (!data_buf) {
+ rc = 1;
+ goto error_out;
+ }
+
+ memcpy(data_buf, &index, sizeof(index));
+ retval = ks7010_sdio_write(priv, WRITE_INDEX, data_buf, sizeof(index));
+ if (retval) {
+ rc = 2;
+ goto error_out;
+ }
+
+ retval = ks7010_sdio_write(priv, READ_INDEX, data_buf, sizeof(index));
+ if (retval) {
+ rc = 3;
+ goto error_out;
+ }
+ error_out:
+ if (data_buf)
+ kfree(data_buf);
+ return rc;
+}
+
+#define ROM_BUFF_SIZE (64*1024)
+static int ks7010_sdio_data_compare(struct ks_wlan_private *priv, u32 address,
+ unsigned char *data, unsigned int size)
+{
+ int rc = 0;
+ int retval;
+ unsigned char *read_buf;
+ read_buf = NULL;
+ read_buf = kmalloc(ROM_BUFF_SIZE, GFP_KERNEL);
+ if (!read_buf) {
+ rc = 1;
+ goto error_out;
+ }
+ retval = ks7010_sdio_read(priv, address, read_buf, size);
+ if (retval) {
+ rc = 2;
+ goto error_out;
+ }
+ retval = memcmp(data, read_buf, size);
+
+ if (retval) {
+ DPRINTK(0, "data compare error (%d) \n", retval);
+ rc = 3;
+ goto error_out;
+ }
+ error_out:
+ if (read_buf)
+ kfree(read_buf);
+ return rc;
+}
+
+static int ks7010_upload_firmware(struct ks_wlan_private *priv,
+ struct ks_sdio_card *card)
+{
+ unsigned int size, offset, n = 0;
+ unsigned char *rom_buf;
+ unsigned char rw_data = 0;
+ int retval, rc = 0;
+ int length;
+ const struct firmware *fw_entry = NULL;
+
+ rom_buf = NULL;
+
+ /* buffer allocate */
+ rom_buf = kmalloc(ROM_BUFF_SIZE, GFP_KERNEL);
+ if (!rom_buf) {
+ rc = 3;
+ goto error_out0;
+ }
+
+ sdio_claim_host(card->func);
+
+ /* Firmware running ? */
+ retval = ks7010_sdio_read(priv, GCR_A, &rw_data, sizeof(rw_data));
+ if (rw_data == GCR_A_RUN) {
+ DPRINTK(0, "MAC firmware running ...\n");
+ rc = 0;
+ goto error_out0;
+ }
+
+ retval = reject_firmware(&fw_entry, ROM_FILE, &priv->ks_wlan_hw.sdio_card->func->dev);
+ if (retval)
+ return retval;
+
+ length = fw_entry->size;
+
+ /* Load Program */
+ n = 0;
+ do {
+ if (length >= ROM_BUFF_SIZE) {
+ size = ROM_BUFF_SIZE;
+ length = length - ROM_BUFF_SIZE;
+ } else {
+ size = length;
+ length = 0;
+ }
+ DPRINTK(4, "size = %d\n", size);
+ if (size == 0)
+ break;
+ memcpy(rom_buf, fw_entry->data + n, size);
+ /* Update write index */
+ offset = n;
+ retval =
+ ks7010_sdio_update_index(priv,
+ KS7010_IRAM_ADDRESS + offset);
+ if (retval) {
+ rc = 6;
+ goto error_out1;
+ }
+
+ /* Write data */
+ retval = ks7010_sdio_write(priv, DATA_WINDOW, rom_buf, size);
+ if (retval) {
+ rc = 8;
+ goto error_out1;
+ }
+
+ /* compare */
+ retval =
+ ks7010_sdio_data_compare(priv, DATA_WINDOW, rom_buf, size);
+ if (retval) {
+ rc = 9;
+ goto error_out1;
+ }
+ n += size;
+
+ } while (size);
+
+ /* Remap request */
+ rw_data = GCR_A_REMAP;
+ retval = ks7010_sdio_write(priv, GCR_A, &rw_data, sizeof(rw_data));
+ if (retval) {
+ rc = 11;
+ goto error_out1;
+ }
+ DPRINTK(4, " REMAP Request : GCR_A=%02X\n", rw_data);
+
+ /* Firmware running check */
+ for (n = 0; n < 50; ++n) {
+ mdelay(10); /* wait_ms(10); */
+ retval =
+ ks7010_sdio_read(priv, GCR_A, &rw_data, sizeof(rw_data));
+ if (retval) {
+ rc = 11;
+ goto error_out1;
+ }
+ if (rw_data == GCR_A_RUN)
+ break;
+ }
+ DPRINTK(4, "firmware wakeup (%d)!!!!\n", n);
+ if ((50) <= n) {
+ DPRINTK(1, "firmware can't start\n");
+ rc = 12;
+ goto error_out1;
+ }
+
+ rc = 0;
+
+ error_out1:
+ release_firmware(fw_entry);
+ error_out0:
+ sdio_release_host(card->func);
+ if (rom_buf)
+ kfree(rom_buf);
+ return rc;
+}
+
+static void ks7010_card_init(struct ks_wlan_private *priv)
+{
+ DPRINTK(5, "\ncard_init_task()\n");
+
+ /* init_waitqueue_head(&priv->confirm_wait); */
+ init_completion(&priv->confirm_wait);
+
+ DPRINTK(5, "init_completion()\n");
+
+ /* get mac address & firmware version */
+ hostif_sme_enqueue(priv, SME_START);
+
+ DPRINTK(5, "hostif_sme_enqueu()\n");
+
+ if (!wait_for_completion_interruptible_timeout
+ (&priv->confirm_wait, 5 * HZ)) {
+ DPRINTK(1, "wait time out!! SME_START\n");
+ }
+
+ if (priv->mac_address_valid && priv->version_size) {
+ priv->dev_state = DEVICE_STATE_PREINIT;
+ }
+
+ hostif_sme_enqueue(priv, SME_GET_EEPROM_CKSUM);
+
+ /* load initial wireless parameter */
+ hostif_sme_enqueue(priv, SME_STOP_REQUEST);
+
+ hostif_sme_enqueue(priv, SME_RTS_THRESHOLD_REQUEST);
+ hostif_sme_enqueue(priv, SME_FRAGMENTATION_THRESHOLD_REQUEST);
+
+ hostif_sme_enqueue(priv, SME_WEP_INDEX_REQUEST);
+ hostif_sme_enqueue(priv, SME_WEP_KEY1_REQUEST);
+ hostif_sme_enqueue(priv, SME_WEP_KEY2_REQUEST);
+ hostif_sme_enqueue(priv, SME_WEP_KEY3_REQUEST);
+ hostif_sme_enqueue(priv, SME_WEP_KEY4_REQUEST);
+
+ hostif_sme_enqueue(priv, SME_WEP_FLAG_REQUEST);
+ hostif_sme_enqueue(priv, SME_RSN_ENABLED_REQUEST);
+ hostif_sme_enqueue(priv, SME_MODE_SET_REQUEST);
+ hostif_sme_enqueue(priv, SME_START_REQUEST);
+
+ if (!wait_for_completion_interruptible_timeout
+ (&priv->confirm_wait, 5 * HZ)) {
+ DPRINTK(1, "wait time out!! wireless parameter set\n");
+ }
+
+ if (priv->dev_state >= DEVICE_STATE_PREINIT) {
+ DPRINTK(1, "DEVICE READY!!\n");
+ priv->dev_state = DEVICE_STATE_READY;
+ } else {
+ DPRINTK(1, "dev_state=%d\n", priv->dev_state);
+ }
+}
+
+static void ks7010_init_defaults(struct ks_wlan_private *priv)
+{
+ priv->reg.tx_rate = TX_RATE_AUTO;
+ priv->reg.preamble = LONG_PREAMBLE;
+ priv->reg.powermgt = POWMGT_ACTIVE_MODE;
+ priv->reg.scan_type = ACTIVE_SCAN;
+ priv->reg.beacon_lost_count = 20;
+ priv->reg.rts = 2347UL;
+ priv->reg.fragment = 2346UL;
+ priv->reg.phy_type = D_11BG_COMPATIBLE_MODE;
+ priv->reg.cts_mode = CTS_MODE_FALSE;
+ priv->reg.rate_set.body[11] = TX_RATE_54M;
+ priv->reg.rate_set.body[10] = TX_RATE_48M;
+ priv->reg.rate_set.body[9] = TX_RATE_36M;
+ priv->reg.rate_set.body[8] = TX_RATE_18M;
+ priv->reg.rate_set.body[7] = TX_RATE_9M;
+ priv->reg.rate_set.body[6] = TX_RATE_24M | BASIC_RATE;
+ priv->reg.rate_set.body[5] = TX_RATE_12M | BASIC_RATE;
+ priv->reg.rate_set.body[4] = TX_RATE_6M | BASIC_RATE;
+ priv->reg.rate_set.body[3] = TX_RATE_11M | BASIC_RATE;
+ priv->reg.rate_set.body[2] = TX_RATE_5M | BASIC_RATE;
+ priv->reg.rate_set.body[1] = TX_RATE_2M | BASIC_RATE;
+ priv->reg.rate_set.body[0] = TX_RATE_1M | BASIC_RATE;
+ priv->reg.tx_rate = TX_RATE_FULL_AUTO;
+ priv->reg.rate_set.size = 12;
+}
+
+static int ks7010_sdio_probe(struct sdio_func *func,
+ const struct sdio_device_id *device)
+{
+ struct ks_wlan_private *priv;
+ struct ks_sdio_card *card;
+ struct net_device *netdev;
+ unsigned char rw_data;
+ int ret;
+
+ DPRINTK(5, "ks7010_sdio_probe()\n");
+
+ priv = NULL;
+ netdev = NULL;
+
+ /* initilize ks_sdio_card */
+ card = kzalloc(sizeof(struct ks_sdio_card), GFP_KERNEL);
+ if (!card)
+ return -ENOMEM;
+
+ card->func = func;
+ spin_lock_init(&card->lock);
+
+ /*** Initialize SDIO ***/
+ sdio_claim_host(func);
+
+ /* bus setting */
+ /* Issue config request to override clock rate */
+
+ /* function blocksize set */
+ ret = sdio_set_block_size(func, KS7010_IO_BLOCK_SIZE);
+ DPRINTK(5, "multi_block=%d sdio_set_block_size()=%d %d\n",
+ func->card->cccr.multi_block, func->cur_blksize, ret);
+
+ /* Allocate the slot current */
+
+ /* function enable */
+ ret = sdio_enable_func(func);
+ DPRINTK(5, "sdio_enable_func() %d\n", ret);
+ if (ret)
+ goto error_free_card;
+
+ /* interrupt disable */
+ sdio_writeb(func, 0, INT_ENABLE, &ret);
+ if (ret)
+ goto error_free_card;
+ sdio_writeb(func, 0xff, INT_PENDING, &ret);
+ if (ret)
+ goto error_disable_func;
+
+ /* setup interrupt handler */
+ ret = sdio_claim_irq(func, ks_sdio_interrupt);
+ if (ret)
+ goto error_disable_func;
+
+ sdio_release_host(func);
+
+ sdio_set_drvdata(func, card);
+
+ DPRINTK(5, "class = 0x%X, vendor = 0x%X, "
+ "device = 0x%X\n", func->class, func->vendor, func->device);
+
+ /* private memory allocate */
+ netdev = alloc_etherdev(sizeof(*priv));
+ if (netdev == NULL) {
+ printk(KERN_ERR "ks7010 : Unable to alloc new net device\n");
+ goto error_release_irq;
+ }
+ if (dev_alloc_name(netdev, "wlan%d") < 0) {
+ printk(KERN_ERR "ks7010 : Couldn't get name!\n");
+ goto error_free_netdev;
+ }
+
+ priv = netdev_priv(netdev);
+
+ card->priv = priv;
+ SET_NETDEV_DEV(netdev, &card->func->dev); /* for create sysfs symlinks */
+
+ /* private memory initialize */
+ priv->ks_wlan_hw.sdio_card = card;
+ init_completion(&priv->ks_wlan_hw.ks7010_sdio_wait);
+ priv->ks_wlan_hw.read_buf = NULL;
+ priv->ks_wlan_hw.read_buf = kmalloc(RX_DATA_SIZE, GFP_KERNEL);
+ if (!priv->ks_wlan_hw.read_buf) {
+ goto error_free_netdev;
+ }
+ priv->dev_state = DEVICE_STATE_PREBOOT;
+ priv->net_dev = netdev;
+ priv->firmware_version[0] = '\0';
+ priv->version_size = 0;
+ priv->last_doze = jiffies; /* set current jiffies */
+ priv->last_wakeup = jiffies;
+ memset(&priv->nstats, 0, sizeof(priv->nstats));
+ memset(&priv->wstats, 0, sizeof(priv->wstats));
+
+ /* sleep mode */
+ atomic_set(&priv->sleepstatus.doze_request, 0);
+ atomic_set(&priv->sleepstatus.wakeup_request, 0);
+ atomic_set(&priv->sleepstatus.wakeup_request, 0);
+
+ trx_device_init(priv);
+ hostif_init(priv);
+ ks_wlan_net_start(netdev);
+
+ ks7010_init_defaults(priv);
+
+ /* Upload firmware */
+ ret = ks7010_upload_firmware(priv, card); /* firmware load */
+ if (ret) {
+ printk(KERN_ERR
+ "ks7010: firmware load failed !! retern code = %d\n",
+ ret);
+ goto error_free_read_buf;
+ }
+
+ /* interrupt setting */
+ /* clear Interrupt status write (ARMtoSD_InterruptPending FN1:00_0024) */
+ rw_data = 0xff;
+ sdio_claim_host(func);
+ ret = ks7010_sdio_write(priv, INT_PENDING, &rw_data, sizeof(rw_data));
+ sdio_release_host(func);
+ if (ret) {
+ DPRINTK(1, " error : INT_PENDING=%02X\n", rw_data);
+ }
+ DPRINTK(4, " clear Interrupt : INT_PENDING=%02X\n", rw_data);
+
+ /* enable ks7010sdio interrupt (INT_GCR_B|INT_READ_STATUS|INT_WRITE_STATUS) */
+ rw_data = (INT_GCR_B | INT_READ_STATUS | INT_WRITE_STATUS);
+ sdio_claim_host(func);
+ ret = ks7010_sdio_write(priv, INT_ENABLE, &rw_data, sizeof(rw_data));
+ sdio_release_host(func);
+ if (ret) {
+ DPRINTK(1, " error : INT_ENABLE=%02X\n", rw_data);
+ }
+ DPRINTK(4, " enable Interrupt : INT_ENABLE=%02X\n", rw_data);
+ priv->dev_state = DEVICE_STATE_BOOT;
+
+ priv->ks_wlan_hw.ks7010sdio_wq = create_workqueue("ks7010sdio_wq");
+ if (!priv->ks_wlan_hw.ks7010sdio_wq) {
+ DPRINTK(1, "create_workqueue failed !!\n");
+ goto error_free_read_buf;
+ }
+
+ INIT_DELAYED_WORK(&priv->ks_wlan_hw.rw_wq, ks7010_rw_function);
+ ks7010_card_init(priv);
+
+ ret = register_netdev(priv->net_dev);
+ if (ret)
+ goto error_free_read_buf;
+
+ return 0;
+
+ error_free_read_buf:
+ kfree(priv->ks_wlan_hw.read_buf);
+ priv->ks_wlan_hw.read_buf = NULL;
+ error_free_netdev:
+ free_netdev(priv->net_dev);
+ card->priv = NULL;
+ error_release_irq:
+ sdio_claim_host(func);
+ sdio_release_irq(func);
+ error_disable_func:
+ sdio_disable_func(func);
+ error_free_card:
+ sdio_release_host(func);
+ sdio_set_drvdata(func, NULL);
+ kfree(card);
+
+ return -ENODEV;
+}
+
+static void ks7010_sdio_remove(struct sdio_func *func)
+{
+ int ret;
+ struct ks_sdio_card *card;
+ struct ks_wlan_private *priv;
+ struct net_device *netdev;
+ DPRINTK(1, "ks7010_sdio_remove()\n");
+
+ card = sdio_get_drvdata(func);
+
+ if (card == NULL)
+ return;
+
+ DPRINTK(1, "priv = card->priv\n");
+ priv = card->priv;
+ netdev = priv->net_dev;
+ if (priv) {
+ ks_wlan_net_stop(netdev);
+ DPRINTK(1, "ks_wlan_net_stop\n");
+
+ /* interrupt disable */
+ sdio_claim_host(func);
+ sdio_writeb(func, 0, INT_ENABLE, &ret);
+ sdio_writeb(func, 0xff, INT_PENDING, &ret);
+ sdio_release_host(func);
+ DPRINTK(1, "interrupt disable\n");
+
+ /* send stop request to MAC */
+ {
+ struct hostif_stop_request_t *pp;
+ pp = (struct hostif_stop_request_t *)
+ kzalloc(hif_align_size(sizeof(*pp)), GFP_KERNEL);
+ if (pp == NULL) {
+ DPRINTK(3, "allocate memory failed..\n");
+ return; /* to do goto ni suru */
+ }
+ pp->header.size =
+ cpu_to_le16((uint16_t)
+ (sizeof(*pp) -
+ sizeof(pp->header.size)));
+ pp->header.event = cpu_to_le16((uint16_t) HIF_STOP_REQ);
+
+ sdio_claim_host(func);
+ write_to_device(priv, (unsigned char *)pp,
+ hif_align_size(sizeof(*pp)));
+ sdio_release_host(func);
+ kfree(pp);
+ }
+ DPRINTK(1, "STOP Req\n");
+
+ if (priv->ks_wlan_hw.ks7010sdio_wq) {
+ flush_workqueue(priv->ks_wlan_hw.ks7010sdio_wq);
+ destroy_workqueue(priv->ks_wlan_hw.ks7010sdio_wq);
+ }
+ DPRINTK(1,
+ "destroy_workqueue(priv->ks_wlan_hw.ks7010sdio_wq);\n");
+
+ hostif_exit(priv);
+ DPRINTK(1, "hostif_exit\n");
+
+ unregister_netdev(netdev);
+
+ trx_device_exit(priv);
+ if (priv->ks_wlan_hw.read_buf) {
+ kfree(priv->ks_wlan_hw.read_buf);
+ }
+ free_netdev(priv->net_dev);
+ card->priv = NULL;
+ }
+
+ sdio_claim_host(func);
+ sdio_release_irq(func);
+ DPRINTK(1, "sdio_release_irq()\n");
+ sdio_disable_func(func);
+ DPRINTK(1, "sdio_disable_func()\n");
+ sdio_release_host(func);
+
+ sdio_set_drvdata(func, NULL);
+
+ kfree(card);
+ DPRINTK(1, "kfree()\n");
+
+ DPRINTK(5, " Bye !!\n");
+ return;
+}
+
+static struct sdio_driver ks7010_sdio_driver = {
+ .name = "ks7010_sdio",
+ .id_table = ks7010_sdio_ids,
+ .probe = ks7010_sdio_probe,
+ .remove = ks7010_sdio_remove,
+};
+
+module_driver(ks7010_sdio_driver, sdio_register_driver, sdio_unregister_driver);
+MODULE_AUTHOR("Sang Engineering, Qi-Hardware, KeyStream");
+MODULE_DESCRIPTION("Driver for KeyStream KS7010 based SDIO cards");
+MODULE_LICENSE("GPL v2");
+/*(DEBLOBBED)*/
diff --git a/drivers/staging/ks7010/ks7010_sdio.h b/drivers/staging/ks7010/ks7010_sdio.h
new file mode 100644
index 000000000..55ff06534
--- /dev/null
+++ b/drivers/staging/ks7010/ks7010_sdio.h
@@ -0,0 +1,147 @@
+/*
+ * Driver for KeyStream, KS7010 based SDIO cards.
+ *
+ * Copyright (C) 2006-2008 KeyStream Corp.
+ * Copyright (C) 2009 Renesas Technology Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _KS7010_SDIO_H
+#define _KS7010_SDIO_H
+
+#ifdef DEVICE_ALIGNMENT
+#undef DEVICE_ALIGNMENT
+#endif
+#define DEVICE_ALIGNMENT 32
+
+/* SDIO KeyStream vendor and device */
+#define SDIO_VENDOR_ID_KS_CODE_A 0x005b
+#define SDIO_VENDOR_ID_KS_CODE_B 0x0023
+/* Older sources suggest earlier versions were named 7910 or 79xx */
+#define SDIO_DEVICE_ID_KS_7010 0x7910
+
+/* Read Status Register */
+#define READ_STATUS 0x000000
+#define READ_STATUS_BUSY 0
+#define READ_STATUS_IDLE 1
+
+/* Read Index Register */
+#define READ_INDEX 0x000004
+
+/* Read Data Size Register */
+#define READ_DATA_SIZE 0x000008
+
+/* Write Status Register */
+#define WRITE_STATUS 0x00000C
+#define WRITE_STATUS_BUSY 0
+#define WRITE_STATUS_IDLE 1
+
+/* Write Index Register */
+#define WRITE_INDEX 0x000010
+
+/* Write Status/Read Data Size Register
+ * for network packet (less than 2048 bytes data)
+ */
+#define WSTATUS_RSIZE 0x000014
+#define WSTATUS_MASK 0x80 /* Write Status Register value */
+#define RSIZE_MASK 0x7F /* Read Data Size Register value [10:4] */
+
+/* ARM to SD interrupt Enable */
+#define INT_ENABLE 0x000020
+/* ARM to SD interrupt Pending */
+#define INT_PENDING 0x000024
+
+#define INT_GCR_B (1<<7)
+#define INT_GCR_A (1<<6)
+#define INT_WRITE_STATUS (1<<5)
+#define INT_WRITE_INDEX (1<<4)
+#define INT_WRITE_SIZE (1<<3)
+#define INT_READ_STATUS (1<<2)
+#define INT_READ_INDEX (1<<1)
+#define INT_READ_SIZE (1<<0)
+
+/* General Communication Register A */
+#define GCR_A 0x000028
+#define GCR_A_INIT 0
+#define GCR_A_REMAP 1
+#define GCR_A_RUN 2
+
+/* General Communication Register B */
+#define GCR_B 0x00002C
+#define GCR_B_ACTIVE 0
+#define GCR_B_DOZE 1
+
+/* Wakeup Register */
+/* #define WAKEUP 0x008104 */
+/* #define WAKEUP_REQ 0x00 */
+#define WAKEUP 0x008018
+#define WAKEUP_REQ 0x5a
+
+/* AHB Data Window 0x010000-0x01FFFF */
+#define DATA_WINDOW 0x010000
+#define WINDOW_SIZE 64*1024
+
+#define KS7010_IRAM_ADDRESS 0x06000000
+
+/*
+ * struct define
+ */
+struct hw_info_t {
+ struct ks_sdio_card *sdio_card;
+ struct completion ks7010_sdio_wait;
+ struct workqueue_struct *ks7010sdio_wq;
+ struct delayed_work rw_wq;
+ unsigned char *read_buf;
+ struct tasklet_struct rx_bh_task;
+};
+
+struct ks_sdio_packet {
+ struct ks_sdio_packet *next;
+ u16 nb;
+ u8 buffer[0] __attribute__ ((aligned(4)));
+};
+
+struct ks_sdio_card {
+ struct sdio_func *func;
+ struct ks_wlan_private *priv;
+ spinlock_t lock;
+};
+
+/* Tx Device struct */
+#define TX_DEVICE_BUFF_SIZE 1024
+
+struct tx_device_buffer {
+ unsigned char *sendp; /* pointer of send req data */
+ unsigned int size;
+ void (*complete_handler) (void *arg1, void *arg2);
+ void *arg1;
+ void *arg2;
+};
+
+struct tx_device {
+ struct tx_device_buffer tx_dev_buff[TX_DEVICE_BUFF_SIZE];
+ unsigned int qhead; /* tx buffer queue first pointer */
+ unsigned int qtail; /* tx buffer queue last pointer */
+ spinlock_t tx_dev_lock;
+};
+
+/* Rx Device struct */
+#define RX_DATA_SIZE (2 + 2 + 2347 + 1)
+#define RX_DEVICE_BUFF_SIZE 32
+
+struct rx_device_buffer {
+ unsigned char data[RX_DATA_SIZE];
+ unsigned int size;
+};
+
+struct rx_device {
+ struct rx_device_buffer rx_dev_buff[RX_DEVICE_BUFF_SIZE];
+ unsigned int qhead; /* rx buffer queue first pointer */
+ unsigned int qtail; /* rx buffer queue last pointer */
+ spinlock_t rx_dev_lock;
+};
+#define ROM_FILE "/*(DEBLOBBED)*/"
+
+#endif /* _KS7010_SDIO_H */
diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c
new file mode 100644
index 000000000..a8822fe2b
--- /dev/null
+++ b/drivers/staging/ks7010/ks_hostif.c
@@ -0,0 +1,2760 @@
+/*
+ * Driver for KeyStream wireless LAN cards.
+ *
+ * Copyright (C) 2005-2008 KeyStream Corp.
+ * Copyright (C) 2009 Renesas Technology Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "ks_wlan.h"
+#include "ks_hostif.h"
+#include "eap_packet.h"
+#include "michael_mic.h"
+
+#include <linux/if_ether.h>
+#include <linux/if_arp.h>
+
+/* Include Wireless Extension definition and check version */
+#include <net/iw_handler.h> /* New driver API */
+
+extern int ks_wlan_hw_tx(struct ks_wlan_private *priv, void *p,
+ unsigned long size,
+ void (*complete_handler) (void *arg1, void *arg2),
+ void *arg1, void *arg2);
+extern void send_packet_complete(void *, void *);
+
+extern void ks_wlan_hw_wakeup_request(struct ks_wlan_private *priv);
+extern int ks_wlan_hw_power_save(struct ks_wlan_private *priv);
+
+/* macro */
+#define inc_smeqhead(priv) \
+ ( priv->sme_i.qhead = (priv->sme_i.qhead + 1) % SME_EVENT_BUFF_SIZE )
+#define inc_smeqtail(priv) \
+ ( priv->sme_i.qtail = (priv->sme_i.qtail + 1) % SME_EVENT_BUFF_SIZE )
+#define cnt_smeqbody(priv) \
+ (((priv->sme_i.qtail + SME_EVENT_BUFF_SIZE) - (priv->sme_i.qhead)) % SME_EVENT_BUFF_SIZE )
+
+#define KS_WLAN_MEM_FLAG (GFP_ATOMIC)
+
+static
+inline u8 get_BYTE(struct ks_wlan_private *priv)
+{
+ u8 data;
+ data = *(priv->rxp)++;
+ /* length check in advance ! */
+ --(priv->rx_size);
+ return data;
+}
+
+static
+inline u16 get_WORD(struct ks_wlan_private *priv)
+{
+ u16 data;
+ data = (get_BYTE(priv) & 0xff);
+ data |= ((get_BYTE(priv) << 8) & 0xff00);
+ return data;
+}
+
+static
+inline u32 get_DWORD(struct ks_wlan_private *priv)
+{
+ u32 data;
+ data = (get_BYTE(priv) & 0xff);
+ data |= ((get_BYTE(priv) << 8) & 0x0000ff00);
+ data |= ((get_BYTE(priv) << 16) & 0x00ff0000);
+ data |= ((get_BYTE(priv) << 24) & 0xff000000);
+ return data;
+}
+
+void ks_wlan_hw_wakeup_task(struct work_struct *work)
+{
+ struct ks_wlan_private *priv =
+ container_of(work, struct ks_wlan_private, ks_wlan_wakeup_task);
+ int ps_status = atomic_read(&priv->psstatus.status);
+
+ if (ps_status == PS_SNOOZE) {
+ ks_wlan_hw_wakeup_request(priv);
+ if (!wait_for_completion_interruptible_timeout(&priv->psstatus.wakeup_wait, HZ / 50)) { /* 20ms timeout */
+ DPRINTK(1, "wake up timeout !!!\n");
+ schedule_work(&priv->ks_wlan_wakeup_task);
+ return;
+ }
+ } else {
+ DPRINTK(1, "ps_status=%d\n", ps_status);
+ }
+
+ /* power save */
+ if (atomic_read(&priv->sme_task.count) > 0) {
+ DPRINTK(4, "sme task enable.\n");
+ tasklet_enable(&priv->sme_task);
+ }
+}
+
+static
+int ks_wlan_do_power_save(struct ks_wlan_private *priv)
+{
+ int rc = 0;
+
+ DPRINTK(4, "psstatus.status=%d\n", atomic_read(&priv->psstatus.status));
+
+ if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+ hostif_sme_enqueue(priv, SME_POW_MNGMT_REQUEST);
+ } else {
+ priv->dev_state = DEVICE_STATE_READY;
+ }
+ return rc;
+}
+
+static
+int get_current_ap(struct ks_wlan_private *priv, struct link_ap_info_t *ap_info)
+{
+ struct local_ap_t *ap;
+ union iwreq_data wrqu;
+ struct net_device *netdev = priv->net_dev;
+ int rc = 0;
+
+ DPRINTK(3, "\n");
+ ap = &(priv->current_ap);
+
+ if ((priv->connect_status & CONNECT_STATUS_MASK) == DISCONNECT_STATUS) {
+ memset(ap, 0, sizeof(struct local_ap_t));
+ return 1;
+ }
+
+ /* bssid */
+ memcpy(&(ap->bssid[0]), &(ap_info->bssid[0]), ETH_ALEN);
+ /* essid */
+ memcpy(&(ap->ssid.body[0]), &(priv->reg.ssid.body[0]),
+ priv->reg.ssid.size);
+ ap->ssid.size = priv->reg.ssid.size;
+ /* rate_set */
+ memcpy(&(ap->rate_set.body[0]), &(ap_info->rate_set.body[0]),
+ ap_info->rate_set.size);
+ ap->rate_set.size = ap_info->rate_set.size;
+ if (ap_info->ext_rate_set.size) {
+ /* rate_set */
+ memcpy(&(ap->rate_set.body[ap->rate_set.size]),
+ &(ap_info->ext_rate_set.body[0]),
+ ap_info->ext_rate_set.size);
+ ap->rate_set.size += ap_info->ext_rate_set.size;
+ }
+ /* channel */
+ ap->channel = ap_info->ds_parameter.channel;
+ /* rssi */
+ ap->rssi = ap_info->rssi;
+ /* sq */
+ ap->sq = ap_info->sq;
+ /* noise */
+ ap->noise = ap_info->noise;
+ /* capability */
+ ap->capability = ap_info->capability;
+ /* rsn */
+ if ((ap_info->rsn_mode & RSN_MODE_WPA2)
+ && (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)) {
+ ap->rsn_ie.id = 0x30;
+ if (ap_info->rsn.size <= RSN_IE_BODY_MAX) {
+ ap->rsn_ie.size = ap_info->rsn.size;
+ memcpy(&(ap->rsn_ie.body[0]), &(ap_info->rsn.body[0]),
+ ap_info->rsn.size);
+ } else {
+ ap->rsn_ie.size = RSN_IE_BODY_MAX;
+ memcpy(&(ap->rsn_ie.body[0]), &(ap_info->rsn.body[0]),
+ RSN_IE_BODY_MAX);
+ }
+ } else if ((ap_info->rsn_mode & RSN_MODE_WPA)
+ && (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA)) {
+ ap->wpa_ie.id = 0xdd;
+ if (ap_info->rsn.size <= RSN_IE_BODY_MAX) {
+ ap->wpa_ie.size = ap_info->rsn.size;
+ memcpy(&(ap->wpa_ie.body[0]), &(ap_info->rsn.body[0]),
+ ap_info->rsn.size);
+ } else {
+ ap->wpa_ie.size = RSN_IE_BODY_MAX;
+ memcpy(&(ap->wpa_ie.body[0]), &(ap_info->rsn.body[0]),
+ RSN_IE_BODY_MAX);
+ }
+ } else {
+ ap->rsn_ie.id = 0;
+ ap->rsn_ie.size = 0;
+ ap->wpa_ie.id = 0;
+ ap->wpa_ie.size = 0;
+ }
+
+ wrqu.data.length = 0;
+ wrqu.data.flags = 0;
+ wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+ if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+ memcpy(wrqu.ap_addr.sa_data,
+ &(priv->current_ap.bssid[0]), ETH_ALEN);
+ DPRINTK(3,
+ "IWEVENT: connect bssid=%02x:%02x:%02x:%02x:%02x:%02x\n",
+ (unsigned char)wrqu.ap_addr.sa_data[0],
+ (unsigned char)wrqu.ap_addr.sa_data[1],
+ (unsigned char)wrqu.ap_addr.sa_data[2],
+ (unsigned char)wrqu.ap_addr.sa_data[3],
+ (unsigned char)wrqu.ap_addr.sa_data[4],
+ (unsigned char)wrqu.ap_addr.sa_data[5]);
+ wireless_send_event(netdev, SIOCGIWAP, &wrqu, NULL);
+ }
+ DPRINTK(4, "\n Link AP\n");
+ DPRINTK(4, " bssid=%02X:%02X:%02X:%02X:%02X:%02X\n \
+ essid=%s\n rate_set=%02X,%02X,%02X,%02X,%02X,%02X,%02X,%02X\n channel=%d\n \
+ rssi=%d\n sq=%d\n capability=%04X\n", ap->bssid[0], ap->bssid[1], ap->bssid[2], ap->bssid[3], ap->bssid[4], ap->bssid[5], &(ap->ssid.body[0]), ap->rate_set.body[0], ap->rate_set.body[1], ap->rate_set.body[2], ap->rate_set.body[3], ap->rate_set.body[4], ap->rate_set.body[5], ap->rate_set.body[6], ap->rate_set.body[7], ap->channel, ap->rssi, ap->sq, ap->capability);
+ DPRINTK(4, "\n Link AP\n rsn.mode=%d\n rsn.size=%d\n",
+ ap_info->rsn_mode, ap_info->rsn.size);
+ DPRINTK(4, "\n ext_rate_set_size=%d\n rate_set_size=%d\n",
+ ap_info->ext_rate_set.size, ap_info->rate_set.size);
+
+ return rc;
+}
+
+static
+int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info,
+ struct local_ap_t *ap)
+{
+ unsigned char *bp;
+ int bsize, offset;
+ int rc = 0;
+
+ DPRINTK(3, "\n");
+ memset(ap, 0, sizeof(struct local_ap_t));
+
+ /* bssid */
+ memcpy(&(ap->bssid[0]), &(ap_info->bssid[0]), ETH_ALEN);
+ /* rssi */
+ ap->rssi = ap_info->rssi;
+ /* sq */
+ ap->sq = ap_info->sq;
+ /* noise */
+ ap->noise = ap_info->noise;
+ /* capability */
+ ap->capability = ap_info->capability;
+ /* channel */
+ ap->channel = ap_info->ch_info;
+
+ bp = &(ap_info->body[0]);
+ bsize = ap_info->body_size;
+ offset = 0;
+
+ while (bsize > offset) {
+ /* DPRINTK(4, "Element ID=%d \n",*bp); */
+ switch (*bp) {
+ case 0: /* ssid */
+ if (*(bp + 1) <= SSID_MAX_SIZE) {
+ ap->ssid.size = *(bp + 1);
+ } else {
+ DPRINTK(1, "size over :: ssid size=%d \n",
+ *(bp + 1));
+ ap->ssid.size = SSID_MAX_SIZE;
+ }
+ memcpy(&(ap->ssid.body[0]), bp + 2, ap->ssid.size);
+ break;
+ case 1: /* rate */
+ case 50: /* ext rate */
+ if ((*(bp + 1) + ap->rate_set.size) <=
+ RATE_SET_MAX_SIZE) {
+ memcpy(&(ap->rate_set.body[ap->rate_set.size]),
+ bp + 2, *(bp + 1));
+ ap->rate_set.size += *(bp + 1);
+ } else {
+ DPRINTK(1, "size over :: rate size=%d \n",
+ (*(bp + 1) + ap->rate_set.size));
+ memcpy(&(ap->rate_set.body[ap->rate_set.size]),
+ bp + 2,
+ RATE_SET_MAX_SIZE - ap->rate_set.size);
+ ap->rate_set.size +=
+ (RATE_SET_MAX_SIZE - ap->rate_set.size);
+ }
+ break;
+ case 3: /* DS parameter */
+ break;
+ case 48: /* RSN(WPA2) */
+ ap->rsn_ie.id = *bp;
+ if (*(bp + 1) <= RSN_IE_BODY_MAX) {
+ ap->rsn_ie.size = *(bp + 1);
+ } else {
+ DPRINTK(1, "size over :: rsn size=%d \n",
+ *(bp + 1));
+ ap->rsn_ie.size = RSN_IE_BODY_MAX;
+ }
+ memcpy(&(ap->rsn_ie.body[0]), bp + 2, ap->rsn_ie.size);
+ break;
+ case 221: /* WPA */
+ if (!memcmp(bp + 2, "\x00\x50\xf2\x01", 4)) { /* WPA OUI check */
+ ap->wpa_ie.id = *bp;
+ if (*(bp + 1) <= RSN_IE_BODY_MAX) {
+ ap->wpa_ie.size = *(bp + 1);
+ } else {
+ DPRINTK(1,
+ "size over :: wpa size=%d \n",
+ *(bp + 1));
+ ap->wpa_ie.size = RSN_IE_BODY_MAX;
+ }
+ memcpy(&(ap->wpa_ie.body[0]), bp + 2,
+ ap->wpa_ie.size);
+ }
+ break;
+
+ case 2: /* FH parameter */
+ case 4: /* CF parameter */
+ case 5: /* TIM */
+ case 6: /* IBSS parameter */
+ case 7: /* Country */
+ case 42: /* ERP information */
+ case 47: /* Reserve ID 47 Broadcom AP */
+ break;
+ default:
+ DPRINTK(4, "unknown Element ID=%d \n", *bp);
+ break;
+ }
+ offset += 2; /* id & size field */
+ offset += *(bp + 1); /* +size offset */
+ bp += (*(bp + 1) + 2); /* pointer update */
+ }
+
+ return rc;
+}
+
+static
+void hostif_data_indication(struct ks_wlan_private *priv)
+{
+ unsigned int rx_ind_size; /* indicate data size */
+ struct sk_buff *skb;
+ unsigned short auth_type;
+ unsigned char temp[256];
+
+ unsigned char RecvMIC[8];
+ char buf[128];
+ struct ether_hdr *eth_hdr;
+ unsigned short eth_proto;
+ unsigned long now;
+ struct mic_failure_t *mic_failure;
+ struct ieee802_1x_hdr *aa1x_hdr;
+ struct wpa_eapol_key *eap_key;
+ struct michel_mic_t michel_mic;
+ union iwreq_data wrqu;
+
+ DPRINTK(3, "\n");
+
+ /* min length check */
+ if (priv->rx_size <= ETH_HLEN) {
+ DPRINTK(3, "rx_size = %d\n", priv->rx_size);
+ priv->nstats.rx_errors++;
+ return;
+ }
+
+ auth_type = get_WORD(priv); /* AuthType */
+ get_WORD(priv); /* Reserve Area */
+
+ eth_hdr = (struct ether_hdr *)(priv->rxp);
+ eth_proto = ntohs(eth_hdr->h_proto);
+ DPRINTK(3, "ether protocol = %04X\n", eth_proto);
+
+ /* source address check */
+ if (!memcmp(&priv->eth_addr[0], eth_hdr->h_source, ETH_ALEN)) {
+ DPRINTK(1, "invalid : source is own mac address !!\n");
+ DPRINTK(1,
+ "eth_hdrernet->h_dest=%02X:%02X:%02X:%02X:%02X:%02X\n",
+ eth_hdr->h_source[0], eth_hdr->h_source[1],
+ eth_hdr->h_source[2], eth_hdr->h_source[3],
+ eth_hdr->h_source[4], eth_hdr->h_source[5]);
+ priv->nstats.rx_errors++;
+ return;
+ }
+
+ /* for WPA */
+ if (auth_type != TYPE_DATA && priv->wpa.rsn_enabled) {
+ if (memcmp(&eth_hdr->h_source[0], &priv->eth_addr[0], ETH_ALEN)) { /* source address check */
+ if (eth_hdr->h_dest_snap != eth_hdr->h_source_snap) {
+ DPRINTK(1, "invalid data format\n");
+ priv->nstats.rx_errors++;
+ return;
+ }
+ if (((auth_type == TYPE_PMK1
+ && priv->wpa.pairwise_suite ==
+ IW_AUTH_CIPHER_TKIP) || (auth_type == TYPE_GMK1
+ && priv->wpa.
+ group_suite ==
+ IW_AUTH_CIPHER_TKIP)
+ || (auth_type == TYPE_GMK2
+ && priv->wpa.group_suite ==
+ IW_AUTH_CIPHER_TKIP))
+ && priv->wpa.key[auth_type - 1].key_len) {
+ DPRINTK(4, "TKIP: protocol=%04X: size=%u\n",
+ eth_proto, priv->rx_size);
+ /* MIC save */
+ memcpy(&RecvMIC[0],
+ (priv->rxp) + ((priv->rx_size) - 8), 8);
+ priv->rx_size = priv->rx_size - 8;
+ if (auth_type > 0 && auth_type < 4) { /* auth_type check */
+ MichaelMICFunction(&michel_mic, (uint8_t *) priv->wpa.key[auth_type - 1].rx_mic_key, (uint8_t *) priv->rxp, (int)priv->rx_size, (uint8_t) 0, /* priority */
+ (uint8_t *)
+ michel_mic.Result);
+ }
+ if (memcmp(michel_mic.Result, RecvMIC, 8)) {
+ now = jiffies;
+ mic_failure = &priv->wpa.mic_failure;
+ /* MIC FAILURE */
+ if (mic_failure->last_failure_time &&
+ (now -
+ mic_failure->last_failure_time) /
+ HZ >= 60) {
+ mic_failure->failure = 0;
+ }
+ DPRINTK(4, "MIC FAILURE \n");
+ if (mic_failure->failure == 0) {
+ mic_failure->failure = 1;
+ mic_failure->counter = 0;
+ } else if (mic_failure->failure == 1) {
+ mic_failure->failure = 2;
+ mic_failure->counter =
+ (uint16_t) ((now -
+ mic_failure->
+ last_failure_time)
+ / HZ);
+ if (!mic_failure->counter) /* mic_failure counter value range 1-60 */
+ mic_failure->counter =
+ 1;
+ }
+ priv->wpa.mic_failure.
+ last_failure_time = now;
+ /* needed parameters: count, keyid, key type, TSC */
+ sprintf(buf,
+ "MLME-MICHAELMICFAILURE.indication(keyid=%d %scast addr="
+ "%02x:%02x:%02x:%02x:%02x:%02x)",
+ auth_type - 1,
+ eth_hdr->
+ h_dest[0] & 0x01 ? "broad" :
+ "uni", eth_hdr->h_source[0],
+ eth_hdr->h_source[1],
+ eth_hdr->h_source[2],
+ eth_hdr->h_source[3],
+ eth_hdr->h_source[4],
+ eth_hdr->h_source[5]);
+ memset(&wrqu, 0, sizeof(wrqu));
+ wrqu.data.length = strlen(buf);
+ DPRINTK(4,
+ "IWEVENT:MICHAELMICFAILURE\n");
+ wireless_send_event(priv->net_dev,
+ IWEVCUSTOM, &wrqu,
+ buf);
+ return;
+ }
+ }
+ }
+ }
+
+ if ((priv->connect_status & FORCE_DISCONNECT) ||
+ priv->wpa.mic_failure.failure == 2) {
+ return;
+ }
+
+ /* check 13th byte at rx data */
+ switch (*(priv->rxp + 12)) {
+ case 0xAA: /* SNAP */
+ rx_ind_size = priv->rx_size - 6;
+ skb = dev_alloc_skb(rx_ind_size);
+ DPRINTK(4, "SNAP, rx_ind_size = %d\n", rx_ind_size);
+
+ if (skb) {
+ memcpy(skb_put(skb, 12), priv->rxp, 12); /* 8802/FDDI MAC copy */
+ /* (SNAP+UI..) skip */
+ memcpy(skb_put(skb, rx_ind_size - 12), priv->rxp + 18, rx_ind_size - 12); /* copy after Type */
+
+ aa1x_hdr = (struct ieee802_1x_hdr *)(priv->rxp + 20);
+ if (aa1x_hdr->type == IEEE802_1X_TYPE_EAPOL_KEY
+ && priv->wpa.rsn_enabled) {
+ eap_key =
+ (struct wpa_eapol_key *)(aa1x_hdr + 1);
+ atomic_set(&priv->psstatus.snooze_guard, 1);
+ }
+
+ /* rx indication */
+ skb->dev = priv->net_dev;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ priv->nstats.rx_packets++;
+ priv->nstats.rx_bytes += rx_ind_size;
+ skb->dev->last_rx = jiffies;
+ netif_rx(skb);
+ } else {
+ printk(KERN_WARNING
+ "%s: Memory squeeze, dropping packet.\n",
+ skb->dev->name);
+ priv->nstats.rx_dropped++;
+ }
+ break;
+ case 0xF0: /* NETBEUI/NetBIOS */
+ rx_ind_size = (priv->rx_size + 2);
+ skb = dev_alloc_skb(rx_ind_size);
+ DPRINTK(3, "NETBEUI/NetBIOS rx_ind_size=%d\n", rx_ind_size);
+
+ if (skb) {
+ memcpy(skb_put(skb, 12), priv->rxp, 12); /* 8802/FDDI MAC copy */
+
+ temp[0] = (((rx_ind_size - 12) >> 8) & 0xff); /* NETBEUI size add */
+ temp[1] = ((rx_ind_size - 12) & 0xff);
+ memcpy(skb_put(skb, 2), temp, 2);
+
+ memcpy(skb_put(skb, rx_ind_size - 14), priv->rxp + 12, rx_ind_size - 14); /* copy after Type */
+
+ aa1x_hdr = (struct ieee802_1x_hdr *)(priv->rxp + 14);
+ if (aa1x_hdr->type == IEEE802_1X_TYPE_EAPOL_KEY
+ && priv->wpa.rsn_enabled) {
+ eap_key =
+ (struct wpa_eapol_key *)(aa1x_hdr + 1);
+ atomic_set(&priv->psstatus.snooze_guard, 1);
+ }
+
+ /* rx indication */
+ skb->dev = priv->net_dev;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ priv->nstats.rx_packets++;
+ priv->nstats.rx_bytes += rx_ind_size;
+ skb->dev->last_rx = jiffies;
+ netif_rx(skb);
+ } else {
+ printk(KERN_WARNING
+ "%s: Memory squeeze, dropping packet.\n",
+ skb->dev->name);
+ priv->nstats.rx_dropped++;
+ }
+ break;
+ default: /* other rx data */
+ DPRINTK(2, "invalid data format\n");
+ priv->nstats.rx_errors++;
+ }
+}
+
+static
+void hostif_mib_get_confirm(struct ks_wlan_private *priv)
+{
+ struct net_device *dev = priv->net_dev;
+ uint32_t mib_status;
+ uint32_t mib_attribute;
+ uint16_t mib_val_size;
+ uint16_t mib_val_type;
+
+ DPRINTK(3, "\n");
+
+ mib_status = get_DWORD(priv); /* MIB status */
+ mib_attribute = get_DWORD(priv); /* MIB atttibute */
+ mib_val_size = get_WORD(priv); /* MIB value size */
+ mib_val_type = get_WORD(priv); /* MIB value type */
+
+ if (mib_status != 0) {
+ /* in case of error */
+ DPRINTK(1, "attribute=%08X, status=%08X\n", mib_attribute,
+ mib_status);
+ return;
+ }
+
+ switch (mib_attribute) {
+ case DOT11_MAC_ADDRESS:
+ /* MAC address */
+ DPRINTK(3, " mib_attribute=DOT11_MAC_ADDRESS\n");
+ hostif_sme_enqueue(priv, SME_GET_MAC_ADDRESS);
+ memcpy(priv->eth_addr, priv->rxp, ETH_ALEN);
+ priv->mac_address_valid = 1;
+ dev->dev_addr[0] = priv->eth_addr[0];
+ dev->dev_addr[1] = priv->eth_addr[1];
+ dev->dev_addr[2] = priv->eth_addr[2];
+ dev->dev_addr[3] = priv->eth_addr[3];
+ dev->dev_addr[4] = priv->eth_addr[4];
+ dev->dev_addr[5] = priv->eth_addr[5];
+ dev->dev_addr[6] = 0x00;
+ dev->dev_addr[7] = 0x00;
+ printk(KERN_INFO
+ "ks_wlan: MAC ADDRESS = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ priv->eth_addr[0], priv->eth_addr[1], priv->eth_addr[2],
+ priv->eth_addr[3], priv->eth_addr[4], priv->eth_addr[5]);
+ break;
+ case DOT11_PRODUCT_VERSION:
+ /* firmware version */
+ DPRINTK(3, " mib_attribute=DOT11_PRODUCT_VERSION\n");
+ priv->version_size = priv->rx_size;
+ memcpy(priv->firmware_version, priv->rxp, priv->rx_size);
+ priv->firmware_version[priv->rx_size] = '\0';
+ printk(KERN_INFO "ks_wlan: firmware ver. = %s\n",
+ priv->firmware_version);
+ hostif_sme_enqueue(priv, SME_GET_PRODUCT_VERSION);
+ /* wake_up_interruptible_all(&priv->confirm_wait); */
+ complete(&priv->confirm_wait);
+ break;
+ case LOCAL_GAIN:
+ memcpy(&priv->gain, priv->rxp, sizeof(priv->gain));
+ DPRINTK(3, "TxMode=%d, RxMode=%d, TxGain=%d, RxGain=%d\n",
+ priv->gain.TxMode, priv->gain.RxMode, priv->gain.TxGain,
+ priv->gain.RxGain);
+ break;
+ case LOCAL_EEPROM_SUM:
+ memcpy(&priv->eeprom_sum, priv->rxp, sizeof(priv->eeprom_sum));
+ DPRINTK(1, "eeprom_sum.type=%x, eeprom_sum.result=%x\n",
+ priv->eeprom_sum.type, priv->eeprom_sum.result);
+ if (priv->eeprom_sum.type == 0) {
+ priv->eeprom_checksum = EEPROM_CHECKSUM_NONE;
+ } else if (priv->eeprom_sum.type == 1) {
+ if (priv->eeprom_sum.result == 0) {
+ priv->eeprom_checksum = EEPROM_NG;
+ printk("LOCAL_EEPROM_SUM NG\n");
+ } else if (priv->eeprom_sum.result == 1) {
+ priv->eeprom_checksum = EEPROM_OK;
+ }
+ } else {
+ printk("LOCAL_EEPROM_SUM error!\n");
+ }
+ break;
+ default:
+ DPRINTK(1, "mib_attribute=%08x\n", (unsigned int)mib_attribute);
+ break;
+ }
+}
+
+static
+void hostif_mib_set_confirm(struct ks_wlan_private *priv)
+{
+ uint32_t mib_status; /* +04 MIB Status */
+ uint32_t mib_attribute; /* +08 MIB attribute */
+
+ DPRINTK(3, "\n");
+
+ mib_status = get_DWORD(priv); /* MIB Status */
+ mib_attribute = get_DWORD(priv); /* MIB attribute */
+
+ if (mib_status != 0) {
+ /* in case of error */
+ DPRINTK(1, "error :: attribute=%08X, status=%08X\n",
+ mib_attribute, mib_status);
+ }
+
+ switch (mib_attribute) {
+ case DOT11_RTS_THRESHOLD:
+ hostif_sme_enqueue(priv, SME_RTS_THRESHOLD_CONFIRM);
+ break;
+ case DOT11_FRAGMENTATION_THRESHOLD:
+ hostif_sme_enqueue(priv, SME_FRAGMENTATION_THRESHOLD_CONFIRM);
+ break;
+ case DOT11_WEP_DEFAULT_KEY_ID:
+ if (!priv->wpa.wpa_enabled)
+ hostif_sme_enqueue(priv, SME_WEP_INDEX_CONFIRM);
+ break;
+ case DOT11_WEP_DEFAULT_KEY_VALUE1:
+ DPRINTK(2, "DOT11_WEP_DEFAULT_KEY_VALUE1:mib_status=%d\n",
+ (int)mib_status);
+ if (priv->wpa.rsn_enabled)
+ hostif_sme_enqueue(priv, SME_SET_PMK_TSC);
+ else
+ hostif_sme_enqueue(priv, SME_WEP_KEY1_CONFIRM);
+ break;
+ case DOT11_WEP_DEFAULT_KEY_VALUE2:
+ DPRINTK(2, "DOT11_WEP_DEFAULT_KEY_VALUE2:mib_status=%d\n",
+ (int)mib_status);
+ if (priv->wpa.rsn_enabled)
+ hostif_sme_enqueue(priv, SME_SET_GMK1_TSC);
+ else
+ hostif_sme_enqueue(priv, SME_WEP_KEY2_CONFIRM);
+ break;
+ case DOT11_WEP_DEFAULT_KEY_VALUE3:
+ DPRINTK(2, "DOT11_WEP_DEFAULT_KEY_VALUE3:mib_status=%d\n",
+ (int)mib_status);
+ if (priv->wpa.rsn_enabled)
+ hostif_sme_enqueue(priv, SME_SET_GMK2_TSC);
+ else
+ hostif_sme_enqueue(priv, SME_WEP_KEY3_CONFIRM);
+ break;
+ case DOT11_WEP_DEFAULT_KEY_VALUE4:
+ DPRINTK(2, "DOT11_WEP_DEFAULT_KEY_VALUE4:mib_status=%d\n",
+ (int)mib_status);
+ if (!priv->wpa.rsn_enabled)
+ hostif_sme_enqueue(priv, SME_WEP_KEY4_CONFIRM);
+ break;
+ case DOT11_PRIVACY_INVOKED:
+ if (!priv->wpa.rsn_enabled)
+ hostif_sme_enqueue(priv, SME_WEP_FLAG_CONFIRM);
+ break;
+ case DOT11_RSN_ENABLED:
+ DPRINTK(2, "DOT11_RSN_ENABLED:mib_status=%d\n",
+ (int)mib_status);
+ hostif_sme_enqueue(priv, SME_RSN_ENABLED_CONFIRM);
+ break;
+ case LOCAL_RSN_MODE:
+ hostif_sme_enqueue(priv, SME_RSN_MODE_CONFIRM);
+ break;
+ case LOCAL_MULTICAST_ADDRESS:
+ hostif_sme_enqueue(priv, SME_MULTICAST_REQUEST);
+ break;
+ case LOCAL_MULTICAST_FILTER:
+ hostif_sme_enqueue(priv, SME_MULTICAST_CONFIRM);
+ break;
+ case LOCAL_CURRENTADDRESS:
+ priv->mac_address_valid = 1;
+ break;
+ case DOT11_RSN_CONFIG_MULTICAST_CIPHER:
+ DPRINTK(2, "DOT11_RSN_CONFIG_MULTICAST_CIPHER:mib_status=%d\n",
+ (int)mib_status);
+ hostif_sme_enqueue(priv, SME_RSN_MCAST_CONFIRM);
+ break;
+ case DOT11_RSN_CONFIG_UNICAST_CIPHER:
+ DPRINTK(2, "DOT11_RSN_CONFIG_UNICAST_CIPHER:mib_status=%d\n",
+ (int)mib_status);
+ hostif_sme_enqueue(priv, SME_RSN_UCAST_CONFIRM);
+ break;
+ case DOT11_RSN_CONFIG_AUTH_SUITE:
+ DPRINTK(2, "DOT11_RSN_CONFIG_AUTH_SUITE:mib_status=%d\n",
+ (int)mib_status);
+ hostif_sme_enqueue(priv, SME_RSN_AUTH_CONFIRM);
+ break;
+ case DOT11_PMK_TSC:
+ DPRINTK(2, "DOT11_PMK_TSC:mib_status=%d\n", (int)mib_status);
+ break;
+ case DOT11_GMK1_TSC:
+ DPRINTK(2, "DOT11_GMK1_TSC:mib_status=%d\n", (int)mib_status);
+ if (atomic_read(&priv->psstatus.snooze_guard)) {
+ atomic_set(&priv->psstatus.snooze_guard, 0);
+ }
+ break;
+ case DOT11_GMK2_TSC:
+ DPRINTK(2, "DOT11_GMK2_TSC:mib_status=%d\n", (int)mib_status);
+ if (atomic_read(&priv->psstatus.snooze_guard)) {
+ atomic_set(&priv->psstatus.snooze_guard, 0);
+ }
+ break;
+ case LOCAL_PMK:
+ DPRINTK(2, "LOCAL_PMK:mib_status=%d\n", (int)mib_status);
+ break;
+ case LOCAL_GAIN:
+ DPRINTK(2, "LOCAL_GAIN:mib_status=%d\n", (int)mib_status);
+ break;
+#ifdef WPS
+ case LOCAL_WPS_ENABLE:
+ DPRINTK(2, "LOCAL_WPS_ENABLE:mib_status=%d\n", (int)mib_status);
+ break;
+ case LOCAL_WPS_PROBE_REQ:
+ DPRINTK(2, "LOCAL_WPS_PROBE_REQ:mib_status=%d\n",
+ (int)mib_status);
+ break;
+#endif /* WPS */
+ case LOCAL_REGION:
+ DPRINTK(2, "LOCAL_REGION:mib_status=%d\n", (int)mib_status);
+ default:
+ break;
+ }
+}
+
+static
+void hostif_power_mngmt_confirm(struct ks_wlan_private *priv)
+{
+ DPRINTK(3, "\n");
+
+ if (priv->reg.powermgt > POWMGT_ACTIVE_MODE &&
+ priv->reg.operation_mode == MODE_INFRASTRUCTURE) {
+ atomic_set(&priv->psstatus.confirm_wait, 0);
+ priv->dev_state = DEVICE_STATE_SLEEP;
+ ks_wlan_hw_power_save(priv);
+ } else {
+ priv->dev_state = DEVICE_STATE_READY;
+ }
+
+}
+
+static
+void hostif_sleep_confirm(struct ks_wlan_private *priv)
+{
+ DPRINTK(3, "\n");
+
+ atomic_set(&priv->sleepstatus.doze_request, 1);
+ queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,
+ &priv->ks_wlan_hw.rw_wq, 1);
+}
+
+static
+void hostif_start_confirm(struct ks_wlan_private *priv)
+{
+#ifdef WPS
+ union iwreq_data wrqu;
+ wrqu.data.length = 0;
+ wrqu.data.flags = 0;
+ wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+ if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+ memset(wrqu.ap_addr.sa_data, '\0', ETH_ALEN);
+ DPRINTK(3, "IWEVENT: disconnect\n");
+ wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
+ }
+#endif
+ DPRINTK(3, " scan_ind_count=%d\n", priv->scan_ind_count);
+ hostif_sme_enqueue(priv, SME_START_CONFIRM);
+}
+
+static
+void hostif_connect_indication(struct ks_wlan_private *priv)
+{
+ unsigned short connect_code;
+ unsigned int tmp = 0;
+ unsigned int old_status = priv->connect_status;
+ struct net_device *netdev = priv->net_dev;
+ union iwreq_data wrqu0;
+ connect_code = get_WORD(priv);
+
+ switch (connect_code) {
+ case RESULT_CONNECT: /* connect */
+ DPRINTK(3, "connect :: scan_ind_count=%d\n",
+ priv->scan_ind_count);
+ if (!(priv->connect_status & FORCE_DISCONNECT))
+ netif_carrier_on(netdev);
+ tmp = FORCE_DISCONNECT & priv->connect_status;
+ priv->connect_status = tmp + CONNECT_STATUS;
+ break;
+ case RESULT_DISCONNECT: /* disconnect */
+ DPRINTK(3, "disconnect :: scan_ind_count=%d\n",
+ priv->scan_ind_count);
+ netif_carrier_off(netdev);
+ tmp = FORCE_DISCONNECT & priv->connect_status;
+ priv->connect_status = tmp + DISCONNECT_STATUS;
+ break;
+ default:
+ DPRINTK(1, "unknown connect_code=%d :: scan_ind_count=%d\n",
+ connect_code, priv->scan_ind_count);
+ netif_carrier_off(netdev);
+ tmp = FORCE_DISCONNECT & priv->connect_status;
+ priv->connect_status = tmp + DISCONNECT_STATUS;
+ break;
+ }
+
+ get_current_ap(priv, (struct link_ap_info_t *)priv->rxp);
+ if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS &&
+ (old_status & CONNECT_STATUS_MASK) == DISCONNECT_STATUS) {
+ /* for power save */
+ atomic_set(&priv->psstatus.snooze_guard, 0);
+ atomic_set(&priv->psstatus.confirm_wait, 0);
+ }
+ ks_wlan_do_power_save(priv);
+
+ wrqu0.data.length = 0;
+ wrqu0.data.flags = 0;
+ wrqu0.ap_addr.sa_family = ARPHRD_ETHER;
+ if ((priv->connect_status & CONNECT_STATUS_MASK) == DISCONNECT_STATUS &&
+ (old_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+ memset(wrqu0.ap_addr.sa_data, '\0', ETH_ALEN);
+ DPRINTK(3, "IWEVENT: disconnect\n");
+ DPRINTK(3, "disconnect :: scan_ind_count=%d\n",
+ priv->scan_ind_count);
+ wireless_send_event(netdev, SIOCGIWAP, &wrqu0, NULL);
+ }
+ priv->scan_ind_count = 0;
+}
+
+static
+void hostif_scan_indication(struct ks_wlan_private *priv)
+{
+ int i;
+ struct ap_info_t *ap_info;
+
+ DPRINTK(3, "scan_ind_count = %d\n", priv->scan_ind_count);
+ ap_info = (struct ap_info_t *)(priv->rxp);
+
+ if (priv->scan_ind_count != 0) {
+ for (i = 0; i < priv->aplist.size; i++) { /* bssid check */
+ if (!memcmp
+ (&(ap_info->bssid[0]),
+ &(priv->aplist.ap[i].bssid[0]), ETH_ALEN)) {
+ if (ap_info->frame_type ==
+ FRAME_TYPE_PROBE_RESP)
+ get_ap_information(priv, ap_info,
+ &(priv->aplist.
+ ap[i]));
+ return;
+ }
+ }
+ }
+ priv->scan_ind_count++;
+ if (priv->scan_ind_count < LOCAL_APLIST_MAX + 1) {
+ DPRINTK(4, " scan_ind_count=%d :: aplist.size=%d\n",
+ priv->scan_ind_count, priv->aplist.size);
+ get_ap_information(priv, (struct ap_info_t *)(priv->rxp),
+ &(priv->aplist.
+ ap[priv->scan_ind_count - 1]));
+ priv->aplist.size = priv->scan_ind_count;
+ } else {
+ DPRINTK(4, " count over :: scan_ind_count=%d\n",
+ priv->scan_ind_count);
+ }
+
+}
+
+static
+void hostif_stop_confirm(struct ks_wlan_private *priv)
+{
+ unsigned int tmp = 0;
+ unsigned int old_status = priv->connect_status;
+ struct net_device *netdev = priv->net_dev;
+ union iwreq_data wrqu0;
+
+ DPRINTK(3, "\n");
+ if (priv->dev_state == DEVICE_STATE_SLEEP)
+ priv->dev_state = DEVICE_STATE_READY;
+
+ /* disconnect indication */
+ if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+ netif_carrier_off(netdev);
+ tmp = FORCE_DISCONNECT & priv->connect_status;
+ priv->connect_status = tmp | DISCONNECT_STATUS;
+ printk("IWEVENT: disconnect\n");
+
+ wrqu0.data.length = 0;
+ wrqu0.data.flags = 0;
+ wrqu0.ap_addr.sa_family = ARPHRD_ETHER;
+ if ((priv->connect_status & CONNECT_STATUS_MASK) ==
+ DISCONNECT_STATUS
+ && (old_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+ memset(wrqu0.ap_addr.sa_data, '\0', ETH_ALEN);
+ DPRINTK(3, "IWEVENT: disconnect\n");
+ printk("IWEVENT: disconnect\n");
+ DPRINTK(3, "disconnect :: scan_ind_count=%d\n",
+ priv->scan_ind_count);
+ wireless_send_event(netdev, SIOCGIWAP, &wrqu0, NULL);
+ }
+ priv->scan_ind_count = 0;
+ }
+
+ hostif_sme_enqueue(priv, SME_STOP_CONFIRM);
+}
+
+static
+void hostif_ps_adhoc_set_confirm(struct ks_wlan_private *priv)
+{
+ DPRINTK(3, "\n");
+ priv->infra_status = 0; /* infrastructure mode cancel */
+ hostif_sme_enqueue(priv, SME_MODE_SET_CONFIRM);
+
+}
+
+static
+void hostif_infrastructure_set_confirm(struct ks_wlan_private *priv)
+{
+ uint16_t result_code;
+ DPRINTK(3, "\n");
+ result_code = get_WORD(priv);
+ DPRINTK(3, "result code = %d\n", result_code);
+ priv->infra_status = 1; /* infrastructure mode set */
+ hostif_sme_enqueue(priv, SME_MODE_SET_CONFIRM);
+}
+
+static
+void hostif_adhoc_set_confirm(struct ks_wlan_private *priv)
+{
+ DPRINTK(3, "\n");
+ priv->infra_status = 1; /* infrastructure mode set */
+ hostif_sme_enqueue(priv, SME_MODE_SET_CONFIRM);
+}
+
+static
+void hostif_associate_indication(struct ks_wlan_private *priv)
+{
+ struct association_request_t *assoc_req;
+ struct association_response_t *assoc_resp;
+ unsigned char *pb;
+ union iwreq_data wrqu;
+ char buf[IW_CUSTOM_MAX];
+ char *pbuf = &buf[0];
+ int i;
+
+ static const char associnfo_leader0[] = "ASSOCINFO(ReqIEs=";
+ static const char associnfo_leader1[] = " RespIEs=";
+
+ DPRINTK(3, "\n");
+ assoc_req = (struct association_request_t *)(priv->rxp);
+ assoc_resp = (struct association_response_t *)(assoc_req + 1);
+ pb = (unsigned char *)(assoc_resp + 1);
+
+ memset(&wrqu, 0, sizeof(wrqu));
+ memcpy(pbuf, associnfo_leader0, sizeof(associnfo_leader0) - 1);
+ wrqu.data.length += sizeof(associnfo_leader0) - 1;
+ pbuf += sizeof(associnfo_leader0) - 1;
+
+ for (i = 0; i < assoc_req->reqIEs_size; i++)
+ pbuf += sprintf(pbuf, "%02x", *(pb + i));
+ wrqu.data.length += (assoc_req->reqIEs_size) * 2;
+
+ memcpy(pbuf, associnfo_leader1, sizeof(associnfo_leader1) - 1);
+ wrqu.data.length += sizeof(associnfo_leader1) - 1;
+ pbuf += sizeof(associnfo_leader1) - 1;
+
+ pb += assoc_req->reqIEs_size;
+ for (i = 0; i < assoc_resp->respIEs_size; i++)
+ pbuf += sprintf(pbuf, "%02x", *(pb + i));
+ wrqu.data.length += (assoc_resp->respIEs_size) * 2;
+
+ pbuf += sprintf(pbuf, ")");
+ wrqu.data.length += 1;
+
+ DPRINTK(3, "IWEVENT:ASSOCINFO\n");
+ wireless_send_event(priv->net_dev, IWEVCUSTOM, &wrqu, buf);
+}
+
+static
+void hostif_bss_scan_confirm(struct ks_wlan_private *priv)
+{
+ unsigned int result_code;
+ struct net_device *dev = priv->net_dev;
+ union iwreq_data wrqu;
+ result_code = get_DWORD(priv);
+ DPRINTK(2, "result=%d :: scan_ind_count=%d\n", result_code,
+ priv->scan_ind_count);
+
+ priv->sme_i.sme_flag &= ~SME_AP_SCAN;
+ hostif_sme_enqueue(priv, SME_BSS_SCAN_CONFIRM);
+
+ wrqu.data.length = 0;
+ wrqu.data.flags = 0;
+ DPRINTK(3, "IWEVENT: SCAN CONFIRM\n");
+ wireless_send_event(dev, SIOCGIWSCAN, &wrqu, NULL);
+ priv->scan_ind_count = 0;
+}
+
+static
+void hostif_phy_information_confirm(struct ks_wlan_private *priv)
+{
+ struct iw_statistics *wstats = &priv->wstats;
+ unsigned char rssi, signal, noise;
+ unsigned char LinkSpeed;
+ unsigned int TransmittedFrameCount, ReceivedFragmentCount;
+ unsigned int FailedCount, FCSErrorCount;
+
+ DPRINTK(3, "\n");
+ rssi = get_BYTE(priv);
+ signal = get_BYTE(priv);
+ noise = get_BYTE(priv);
+ LinkSpeed = get_BYTE(priv);
+ TransmittedFrameCount = get_DWORD(priv);
+ ReceivedFragmentCount = get_DWORD(priv);
+ FailedCount = get_DWORD(priv);
+ FCSErrorCount = get_DWORD(priv);
+
+ DPRINTK(4, "phyinfo confirm rssi=%d signal=%d\n", rssi, signal);
+ priv->current_rate = (LinkSpeed & RATE_MASK);
+ wstats->qual.qual = signal;
+ wstats->qual.level = 256 - rssi;
+ wstats->qual.noise = 0; /* invalid noise value */
+ wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
+
+ DPRINTK(3, "\n rssi=%u\n signal=%u\n LinkSpeed=%ux500Kbps\n \
+ TransmittedFrameCount=%u\n ReceivedFragmentCount=%u\n FailedCount=%u\n \
+ FCSErrorCount=%u\n", rssi, signal, LinkSpeed, TransmittedFrameCount, ReceivedFragmentCount, FailedCount, FCSErrorCount);
+
+ /* wake_up_interruptible_all(&priv->confirm_wait); */
+ complete(&priv->confirm_wait);
+}
+
+static
+void hostif_mic_failure_confirm(struct ks_wlan_private *priv)
+{
+ DPRINTK(3, "mic_failure=%u\n", priv->wpa.mic_failure.failure);
+ hostif_sme_enqueue(priv, SME_MIC_FAILURE_CONFIRM);
+}
+
+static
+void hostif_event_check(struct ks_wlan_private *priv)
+{
+ unsigned short event;
+
+ DPRINTK(4, "\n");
+ event = get_WORD(priv); /* get event */
+ switch (event) {
+ case HIF_DATA_IND:
+ hostif_data_indication(priv);
+ break;
+ case HIF_MIB_GET_CONF:
+ hostif_mib_get_confirm(priv);
+ break;
+ case HIF_MIB_SET_CONF:
+ hostif_mib_set_confirm(priv);
+ break;
+ case HIF_POWERMGT_CONF:
+ hostif_power_mngmt_confirm(priv);
+ break;
+ case HIF_SLEEP_CONF:
+ hostif_sleep_confirm(priv);
+ break;
+ case HIF_START_CONF:
+ hostif_start_confirm(priv);
+ break;
+ case HIF_CONNECT_IND:
+ hostif_connect_indication(priv);
+ break;
+ case HIF_STOP_CONF:
+ hostif_stop_confirm(priv);
+ break;
+ case HIF_PS_ADH_SET_CONF:
+ hostif_ps_adhoc_set_confirm(priv);
+ break;
+ case HIF_INFRA_SET_CONF:
+ case HIF_INFRA_SET2_CONF:
+ hostif_infrastructure_set_confirm(priv);
+ break;
+ case HIF_ADH_SET_CONF:
+ case HIF_ADH_SET2_CONF:
+ hostif_adhoc_set_confirm(priv);
+ break;
+ case HIF_ASSOC_INFO_IND:
+ hostif_associate_indication(priv);
+ break;
+ case HIF_MIC_FAILURE_CONF:
+ hostif_mic_failure_confirm(priv);
+ break;
+ case HIF_SCAN_CONF:
+ hostif_bss_scan_confirm(priv);
+ break;
+ case HIF_PHY_INFO_CONF:
+ case HIF_PHY_INFO_IND:
+ hostif_phy_information_confirm(priv);
+ break;
+ case HIF_SCAN_IND:
+ hostif_scan_indication(priv);
+ break;
+ case HIF_AP_SET_CONF:
+ default:
+ //DPRINTK(1, "undefined event[%04X]\n", event);
+ printk("undefined event[%04X]\n", event);
+ /* wake_up_all(&priv->confirm_wait); */
+ complete(&priv->confirm_wait);
+ break;
+ }
+
+ /* add event to hostt buffer */
+ priv->hostt.buff[priv->hostt.qtail] = event;
+ priv->hostt.qtail = (priv->hostt.qtail + 1) % SME_EVENT_BUFF_SIZE;
+}
+
+#define CHECK_ALINE(size) (size%4 ? (size+(4-(size%4))):size)
+
+int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *packet)
+{
+ unsigned int packet_len = 0;
+
+ unsigned char *buffer = NULL;
+ unsigned int length = 0;
+ struct hostif_data_request_t *pp;
+ unsigned char *p;
+ int result = 0;
+ unsigned short eth_proto;
+ struct ether_hdr *eth_hdr;
+ struct michel_mic_t michel_mic;
+ unsigned short keyinfo = 0;
+ struct ieee802_1x_hdr *aa1x_hdr;
+ struct wpa_eapol_key *eap_key;
+ struct ethhdr *eth;
+
+ packet_len = packet->len;
+ if (packet_len > ETH_FRAME_LEN) {
+ DPRINTK(1, "bad length packet_len=%d \n", packet_len);
+ dev_kfree_skb(packet);
+ return -1;
+ }
+
+ if (((priv->connect_status & CONNECT_STATUS_MASK) == DISCONNECT_STATUS)
+ || (priv->connect_status & FORCE_DISCONNECT)
+ || priv->wpa.mic_failure.stop) {
+ DPRINTK(3, " DISCONNECT\n");
+ if (netif_queue_stopped(priv->net_dev))
+ netif_wake_queue(priv->net_dev);
+ if (packet)
+ dev_kfree_skb(packet);
+
+ return 0;
+ }
+
+ /* for PowerSave */
+ if (atomic_read(&priv->psstatus.status) == PS_SNOOZE) { /* power save wakeup */
+ if (!netif_queue_stopped(priv->net_dev))
+ netif_stop_queue(priv->net_dev);
+ }
+
+ DPRINTK(4, "skb_buff length=%d\n", packet_len);
+ pp = (struct hostif_data_request_t *)
+ kmalloc(hif_align_size(sizeof(*pp) + 6 + packet_len + 8),
+ KS_WLAN_MEM_FLAG);
+
+ if (pp == NULL) {
+ DPRINTK(3, "allocate memory failed..\n");
+ dev_kfree_skb(packet);
+ return -2;
+ }
+
+ p = (unsigned char *)pp->data;
+
+ buffer = packet->data;
+ length = packet->len;
+
+ /* packet check */
+ eth = (struct ethhdr *)packet->data;
+ if (memcmp(&priv->eth_addr[0], eth->h_source, ETH_ALEN)) {
+ DPRINTK(1, "invalid mac address !!\n");
+ DPRINTK(1, "ethernet->h_source=%02X:%02X:%02X:%02X:%02X:%02X\n",
+ eth->h_source[0], eth->h_source[1], eth->h_source[2],
+ eth->h_source[3], eth->h_source[4], eth->h_source[5]);
+ return -3;
+ }
+
+ /* MAC address copy */
+ memcpy(p, buffer, 12); /* DST/SRC MAC address */
+ p += 12;
+ buffer += 12;
+ length -= 12;
+ /* EtherType/Length check */
+ if (*(buffer + 1) + (*buffer << 8) > 1500) {
+ /* ProtocolEAP = *(buffer+1) + (*buffer << 8); */
+ /* DPRINTK(2, "Send [SNAP]Type %x\n",ProtocolEAP); */
+ /* SAP/CTL/OUI(6 byte) add */
+ *p++ = 0xAA; /* DSAP */
+ *p++ = 0xAA; /* SSAP */
+ *p++ = 0x03; /* CTL */
+ *p++ = 0x00; /* OUI ("000000") */
+ *p++ = 0x00; /* OUI ("000000") */
+ *p++ = 0x00; /* OUI ("000000") */
+ packet_len += 6;
+ } else {
+ DPRINTK(4, "DIX\n");
+ /* Length(2 byte) delete */
+ buffer += 2;
+ length -= 2;
+ packet_len -= 2;
+ }
+
+ /* pp->data copy */
+ memcpy(p, buffer, length);
+
+ p += length;
+
+ /* for WPA */
+ eth_hdr = (struct ether_hdr *)&pp->data[0];
+ eth_proto = ntohs(eth_hdr->h_proto);
+
+ /* for MIC FAILUER REPORT check */
+ if (eth_proto == ETHER_PROTOCOL_TYPE_EAP
+ && priv->wpa.mic_failure.failure > 0) {
+ aa1x_hdr = (struct ieee802_1x_hdr *)(eth_hdr + 1);
+ if (aa1x_hdr->type == IEEE802_1X_TYPE_EAPOL_KEY) {
+ eap_key = (struct wpa_eapol_key *)(aa1x_hdr + 1);
+ keyinfo = ntohs(eap_key->key_info);
+ }
+ }
+
+ if (priv->wpa.rsn_enabled && priv->wpa.key[0].key_len) {
+ if (eth_proto == ETHER_PROTOCOL_TYPE_EAP
+ && !(priv->wpa.key[1].key_len)
+ && !(priv->wpa.key[2].key_len)
+ && !(priv->wpa.key[3].key_len)) {
+ pp->auth_type = cpu_to_le16((uint16_t) TYPE_AUTH); /* no encryption */
+ } else {
+ if (priv->wpa.pairwise_suite == IW_AUTH_CIPHER_TKIP) {
+ MichaelMICFunction(&michel_mic, (uint8_t *) priv->wpa.key[0].tx_mic_key, (uint8_t *) & pp->data[0], (int)packet_len, (uint8_t) 0, /* priority */
+ (uint8_t *) michel_mic.
+ Result);
+ memcpy(p, michel_mic.Result, 8);
+ length += 8;
+ packet_len += 8;
+ p += 8;
+ pp->auth_type =
+ cpu_to_le16((uint16_t) TYPE_DATA);
+
+ } else if (priv->wpa.pairwise_suite ==
+ IW_AUTH_CIPHER_CCMP) {
+ pp->auth_type =
+ cpu_to_le16((uint16_t) TYPE_DATA);
+ }
+ }
+ } else {
+ if (eth_proto == ETHER_PROTOCOL_TYPE_EAP)
+ pp->auth_type = cpu_to_le16((uint16_t) TYPE_AUTH);
+ else
+ pp->auth_type = cpu_to_le16((uint16_t) TYPE_DATA);
+ }
+
+ /* header value set */
+ pp->header.size =
+ cpu_to_le16((uint16_t)
+ (sizeof(*pp) - sizeof(pp->header.size) + packet_len));
+ pp->header.event = cpu_to_le16((uint16_t) HIF_DATA_REQ);
+
+ /* tx request */
+ result =
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp) + packet_len),
+ (void *)send_packet_complete, (void *)priv,
+ (void *)packet);
+
+ /* MIC FAILUER REPORT check */
+ if (eth_proto == ETHER_PROTOCOL_TYPE_EAP
+ && priv->wpa.mic_failure.failure > 0) {
+ if (keyinfo & WPA_KEY_INFO_ERROR
+ && keyinfo & WPA_KEY_INFO_REQUEST) {
+ DPRINTK(3, " MIC ERROR Report SET : %04X\n", keyinfo);
+ hostif_sme_enqueue(priv, SME_MIC_FAILURE_REQUEST);
+ }
+ if (priv->wpa.mic_failure.failure == 2)
+ priv->wpa.mic_failure.stop = 1;
+ }
+
+ return result;
+}
+
+#define ps_confirm_wait_inc(priv) do{if(atomic_read(&priv->psstatus.status) > PS_ACTIVE_SET){ \
+ atomic_inc(&priv->psstatus.confirm_wait); \
+ /* atomic_set(&priv->psstatus.status, PS_CONF_WAIT);*/ \
+ } }while(0)
+
+static
+void hostif_mib_get_request(struct ks_wlan_private *priv,
+ unsigned long mib_attribute)
+{
+ struct hostif_mib_get_request_t *pp;
+
+ DPRINTK(3, "\n");
+
+ /* make primitive */
+ pp = (struct hostif_mib_get_request_t *)
+ kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+ if (pp == NULL) {
+ DPRINTK(3, "allocate memory failed..\n");
+ return;
+ }
+ pp->header.size =
+ cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
+ pp->header.event = cpu_to_le16((uint16_t) HIF_MIB_GET_REQ);
+ pp->mib_attribute = cpu_to_le32((uint32_t) mib_attribute);
+
+ /* send to device request */
+ ps_confirm_wait_inc(priv);
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+}
+
+static
+void hostif_mib_set_request(struct ks_wlan_private *priv,
+ unsigned long mib_attribute, unsigned short size,
+ unsigned short type, void *vp)
+{
+ struct hostif_mib_set_request_t *pp;
+
+ DPRINTK(3, "\n");
+
+ if (priv->dev_state < DEVICE_STATE_BOOT) {
+ DPRINTK(3, "DeviceRemove\n");
+ return;
+ }
+
+ /* make primitive */
+ pp = (struct hostif_mib_set_request_t *)
+ kmalloc(hif_align_size(sizeof(*pp) + size), KS_WLAN_MEM_FLAG);
+ if (pp == NULL) {
+ DPRINTK(3, "allocate memory failed..\n");
+ return;
+ }
+
+ pp->header.size =
+ cpu_to_le16((uint16_t)
+ (sizeof(*pp) - sizeof(pp->header.size) + size));
+ pp->header.event = cpu_to_le16((uint16_t) HIF_MIB_SET_REQ);
+ pp->mib_attribute = cpu_to_le32((uint32_t) mib_attribute);
+ pp->mib_value.size = cpu_to_le16((uint16_t) size);
+ pp->mib_value.type = cpu_to_le16((uint16_t) type);
+ memcpy(&pp->mib_value.body, vp, size);
+
+ /* send to device request */
+ ps_confirm_wait_inc(priv);
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp) + size), NULL, NULL,
+ NULL);
+}
+
+static
+void hostif_start_request(struct ks_wlan_private *priv, unsigned char mode)
+{
+ struct hostif_start_request_t *pp;
+
+ DPRINTK(3, "\n");
+
+ /* make primitive */
+ pp = (struct hostif_start_request_t *)
+ kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+ if (pp == NULL) {
+ DPRINTK(3, "allocate memory failed..\n");
+ return;
+ }
+ pp->header.size =
+ cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
+ pp->header.event = cpu_to_le16((uint16_t) HIF_START_REQ);
+ pp->mode = cpu_to_le16((uint16_t) mode);
+
+ /* send to device request */
+ ps_confirm_wait_inc(priv);
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+
+ priv->aplist.size = 0;
+ priv->scan_ind_count = 0;
+}
+
+static
+void hostif_ps_adhoc_set_request(struct ks_wlan_private *priv)
+{
+ struct hostif_ps_adhoc_set_request_t *pp;
+ uint16_t capability;
+
+ DPRINTK(3, "\n");
+
+ /* make primitive */
+ pp = (struct hostif_ps_adhoc_set_request_t *)
+ kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+ if (pp == NULL) {
+ DPRINTK(3, "allocate memory failed..\n");
+ return;
+ }
+ memset(pp, 0, sizeof(*pp));
+ pp->header.size =
+ cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
+ pp->header.event = cpu_to_le16((uint16_t) HIF_PS_ADH_SET_REQ);
+ pp->phy_type = cpu_to_le16((uint16_t) (priv->reg.phy_type));
+ pp->cts_mode = cpu_to_le16((uint16_t) (priv->reg.cts_mode));
+ pp->scan_type = cpu_to_le16((uint16_t) (priv->reg.scan_type));
+ pp->channel = cpu_to_le16((uint16_t) (priv->reg.channel));
+ pp->rate_set.size = priv->reg.rate_set.size;
+ memcpy(&pp->rate_set.body[0], &priv->reg.rate_set.body[0],
+ priv->reg.rate_set.size);
+
+ capability = 0x0000;
+ if (priv->reg.preamble == SHORT_PREAMBLE) {
+ /* short preamble */
+ capability |= BSS_CAP_SHORT_PREAMBLE;
+ }
+ capability &= ~(BSS_CAP_PBCC); /* pbcc not support */
+ if (priv->reg.phy_type != D_11B_ONLY_MODE) {
+ capability |= BSS_CAP_SHORT_SLOT_TIME; /* ShortSlotTime support */
+ capability &= ~(BSS_CAP_DSSS_OFDM); /* DSSS OFDM */
+ }
+ pp->capability = cpu_to_le16((uint16_t) capability);
+
+ /* send to device request */
+ ps_confirm_wait_inc(priv);
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+}
+
+static
+void hostif_infrastructure_set_request(struct ks_wlan_private *priv)
+{
+ struct hostif_infrastructure_set_request_t *pp;
+ uint16_t capability;
+
+ DPRINTK(3, "ssid.size=%d \n", priv->reg.ssid.size);
+
+ /* make primitive */
+ pp = (struct hostif_infrastructure_set_request_t *)
+ kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+ if (pp == NULL) {
+ DPRINTK(3, "allocate memory failed..\n");
+ return;
+ }
+ pp->header.size =
+ cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
+ pp->header.event = cpu_to_le16((uint16_t) HIF_INFRA_SET_REQ);
+ pp->phy_type = cpu_to_le16((uint16_t) (priv->reg.phy_type));
+ pp->cts_mode = cpu_to_le16((uint16_t) (priv->reg.cts_mode));
+ pp->scan_type = cpu_to_le16((uint16_t) (priv->reg.scan_type));
+
+ pp->rate_set.size = priv->reg.rate_set.size;
+ memcpy(&pp->rate_set.body[0], &priv->reg.rate_set.body[0],
+ priv->reg.rate_set.size);
+ pp->ssid.size = priv->reg.ssid.size;
+ memcpy(&pp->ssid.body[0], &priv->reg.ssid.body[0], priv->reg.ssid.size);
+
+ capability = 0x0000;
+ if (priv->reg.preamble == SHORT_PREAMBLE) {
+ /* short preamble */
+ capability |= BSS_CAP_SHORT_PREAMBLE;
+ }
+ capability &= ~(BSS_CAP_PBCC); /* pbcc not support */
+ if (priv->reg.phy_type != D_11B_ONLY_MODE) {
+ capability |= BSS_CAP_SHORT_SLOT_TIME; /* ShortSlotTime support */
+ capability &= ~(BSS_CAP_DSSS_OFDM); /* DSSS OFDM not support */
+ }
+ pp->capability = cpu_to_le16((uint16_t) capability);
+ pp->beacon_lost_count =
+ cpu_to_le16((uint16_t) (priv->reg.beacon_lost_count));
+ pp->auth_type = cpu_to_le16((uint16_t) (priv->reg.authenticate_type));
+
+ pp->channel_list.body[0] = 1;
+ pp->channel_list.body[1] = 8;
+ pp->channel_list.body[2] = 2;
+ pp->channel_list.body[3] = 9;
+ pp->channel_list.body[4] = 3;
+ pp->channel_list.body[5] = 10;
+ pp->channel_list.body[6] = 4;
+ pp->channel_list.body[7] = 11;
+ pp->channel_list.body[8] = 5;
+ pp->channel_list.body[9] = 12;
+ pp->channel_list.body[10] = 6;
+ pp->channel_list.body[11] = 13;
+ pp->channel_list.body[12] = 7;
+ if (priv->reg.phy_type == D_11G_ONLY_MODE) {
+ pp->channel_list.size = 13;
+ } else {
+ pp->channel_list.body[13] = 14;
+ pp->channel_list.size = 14;
+ }
+
+ /* send to device request */
+ ps_confirm_wait_inc(priv);
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+}
+
+void hostif_infrastructure_set2_request(struct ks_wlan_private *priv)
+{
+ struct hostif_infrastructure_set2_request_t *pp;
+ uint16_t capability;
+
+ DPRINTK(2, "ssid.size=%d \n", priv->reg.ssid.size);
+
+ /* make primitive */
+ pp = (struct hostif_infrastructure_set2_request_t *)
+ kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+ if (pp == NULL) {
+ DPRINTK(3, "allocate memory failed..\n");
+ return;
+ }
+ pp->header.size =
+ cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
+ pp->header.event = cpu_to_le16((uint16_t) HIF_INFRA_SET2_REQ);
+ pp->phy_type = cpu_to_le16((uint16_t) (priv->reg.phy_type));
+ pp->cts_mode = cpu_to_le16((uint16_t) (priv->reg.cts_mode));
+ pp->scan_type = cpu_to_le16((uint16_t) (priv->reg.scan_type));
+
+ pp->rate_set.size = priv->reg.rate_set.size;
+ memcpy(&pp->rate_set.body[0], &priv->reg.rate_set.body[0],
+ priv->reg.rate_set.size);
+ pp->ssid.size = priv->reg.ssid.size;
+ memcpy(&pp->ssid.body[0], &priv->reg.ssid.body[0], priv->reg.ssid.size);
+
+ capability = 0x0000;
+ if (priv->reg.preamble == SHORT_PREAMBLE) {
+ /* short preamble */
+ capability |= BSS_CAP_SHORT_PREAMBLE;
+ }
+ capability &= ~(BSS_CAP_PBCC); /* pbcc not support */
+ if (priv->reg.phy_type != D_11B_ONLY_MODE) {
+ capability |= BSS_CAP_SHORT_SLOT_TIME; /* ShortSlotTime support */
+ capability &= ~(BSS_CAP_DSSS_OFDM); /* DSSS OFDM not support */
+ }
+ pp->capability = cpu_to_le16((uint16_t) capability);
+ pp->beacon_lost_count =
+ cpu_to_le16((uint16_t) (priv->reg.beacon_lost_count));
+ pp->auth_type = cpu_to_le16((uint16_t) (priv->reg.authenticate_type));
+
+ pp->channel_list.body[0] = 1;
+ pp->channel_list.body[1] = 8;
+ pp->channel_list.body[2] = 2;
+ pp->channel_list.body[3] = 9;
+ pp->channel_list.body[4] = 3;
+ pp->channel_list.body[5] = 10;
+ pp->channel_list.body[6] = 4;
+ pp->channel_list.body[7] = 11;
+ pp->channel_list.body[8] = 5;
+ pp->channel_list.body[9] = 12;
+ pp->channel_list.body[10] = 6;
+ pp->channel_list.body[11] = 13;
+ pp->channel_list.body[12] = 7;
+ if (priv->reg.phy_type == D_11G_ONLY_MODE) {
+ pp->channel_list.size = 13;
+ } else {
+ pp->channel_list.body[13] = 14;
+ pp->channel_list.size = 14;
+ }
+
+ memcpy(pp->bssid, priv->reg.bssid, ETH_ALEN);
+
+ /* send to device request */
+ ps_confirm_wait_inc(priv);
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+}
+
+static
+void hostif_adhoc_set_request(struct ks_wlan_private *priv)
+{
+ struct hostif_adhoc_set_request_t *pp;
+ uint16_t capability;
+
+ DPRINTK(3, "\n");
+
+ /* make primitive */
+ pp = (struct hostif_adhoc_set_request_t *)
+ kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+ if (pp == NULL) {
+ DPRINTK(3, "allocate memory failed..\n");
+ return;
+ }
+ memset(pp, 0, sizeof(*pp));
+ pp->header.size =
+ cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
+ pp->header.event = cpu_to_le16((uint16_t) HIF_ADH_SET_REQ);
+ pp->phy_type = cpu_to_le16((uint16_t) (priv->reg.phy_type));
+ pp->cts_mode = cpu_to_le16((uint16_t) (priv->reg.cts_mode));
+ pp->scan_type = cpu_to_le16((uint16_t) (priv->reg.scan_type));
+ pp->channel = cpu_to_le16((uint16_t) (priv->reg.channel));
+ pp->rate_set.size = priv->reg.rate_set.size;
+ memcpy(&pp->rate_set.body[0], &priv->reg.rate_set.body[0],
+ priv->reg.rate_set.size);
+ pp->ssid.size = priv->reg.ssid.size;
+ memcpy(&pp->ssid.body[0], &priv->reg.ssid.body[0], priv->reg.ssid.size);
+
+ capability = 0x0000;
+ if (priv->reg.preamble == SHORT_PREAMBLE) {
+ /* short preamble */
+ capability |= BSS_CAP_SHORT_PREAMBLE;
+ }
+ capability &= ~(BSS_CAP_PBCC); /* pbcc not support */
+ if (priv->reg.phy_type != D_11B_ONLY_MODE) {
+ capability |= BSS_CAP_SHORT_SLOT_TIME; /* ShortSlotTime support */
+ capability &= ~(BSS_CAP_DSSS_OFDM); /* DSSS OFDM not support */
+ }
+ pp->capability = cpu_to_le16((uint16_t) capability);
+
+ /* send to device request */
+ ps_confirm_wait_inc(priv);
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+}
+
+static
+void hostif_adhoc_set2_request(struct ks_wlan_private *priv)
+{
+ struct hostif_adhoc_set2_request_t *pp;
+ uint16_t capability;
+
+ DPRINTK(3, "\n");
+
+ /* make primitive */
+ pp = (struct hostif_adhoc_set2_request_t *)
+ kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+ if (pp == NULL) {
+ DPRINTK(3, "allocate memory failed..\n");
+ return;
+ }
+ memset(pp, 0, sizeof(*pp));
+ pp->header.size =
+ cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
+ pp->header.event = cpu_to_le16((uint16_t) HIF_ADH_SET_REQ);
+ pp->phy_type = cpu_to_le16((uint16_t) (priv->reg.phy_type));
+ pp->cts_mode = cpu_to_le16((uint16_t) (priv->reg.cts_mode));
+ pp->scan_type = cpu_to_le16((uint16_t) (priv->reg.scan_type));
+ pp->rate_set.size = priv->reg.rate_set.size;
+ memcpy(&pp->rate_set.body[0], &priv->reg.rate_set.body[0],
+ priv->reg.rate_set.size);
+ pp->ssid.size = priv->reg.ssid.size;
+ memcpy(&pp->ssid.body[0], &priv->reg.ssid.body[0], priv->reg.ssid.size);
+
+ capability = 0x0000;
+ if (priv->reg.preamble == SHORT_PREAMBLE) {
+ /* short preamble */
+ capability |= BSS_CAP_SHORT_PREAMBLE;
+ }
+ capability &= ~(BSS_CAP_PBCC); /* pbcc not support */
+ if (priv->reg.phy_type != D_11B_ONLY_MODE) {
+ capability |= BSS_CAP_SHORT_SLOT_TIME; /* ShortSlotTime support */
+ capability &= ~(BSS_CAP_DSSS_OFDM); /* DSSS OFDM not support */
+ }
+ pp->capability = cpu_to_le16((uint16_t) capability);
+
+ pp->channel_list.body[0] = priv->reg.channel;
+ pp->channel_list.size = 1;
+ memcpy(pp->bssid, priv->reg.bssid, ETH_ALEN);
+
+ /* send to device request */
+ ps_confirm_wait_inc(priv);
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+}
+
+static
+void hostif_stop_request(struct ks_wlan_private *priv)
+{
+ struct hostif_stop_request_t *pp;
+
+ DPRINTK(3, "\n");
+
+ /* make primitive */
+ pp = (struct hostif_stop_request_t *)
+ kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+ if (pp == NULL) {
+ DPRINTK(3, "allocate memory failed..\n");
+ return;
+ }
+ pp->header.size =
+ cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
+ pp->header.event = cpu_to_le16((uint16_t) HIF_STOP_REQ);
+
+ /* send to device request */
+ ps_confirm_wait_inc(priv);
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+}
+
+static
+void hostif_phy_information_request(struct ks_wlan_private *priv)
+{
+ struct hostif_phy_information_request_t *pp;
+
+ DPRINTK(3, "\n");
+
+ /* make primitive */
+ pp = (struct hostif_phy_information_request_t *)
+ kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+ if (pp == NULL) {
+ DPRINTK(3, "allocate memory failed..\n");
+ return;
+ }
+ pp->header.size =
+ cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
+ pp->header.event = cpu_to_le16((uint16_t) HIF_PHY_INFO_REQ);
+ if (priv->reg.phy_info_timer) {
+ pp->type = cpu_to_le16((uint16_t) TIME_TYPE);
+ pp->time = cpu_to_le16((uint16_t) (priv->reg.phy_info_timer));
+ } else {
+ pp->type = cpu_to_le16((uint16_t) NORMAL_TYPE);
+ pp->time = cpu_to_le16((uint16_t) 0);
+ }
+
+ /* send to device request */
+ ps_confirm_wait_inc(priv);
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+}
+
+static
+void hostif_power_mngmt_request(struct ks_wlan_private *priv,
+ unsigned long mode, unsigned long wake_up,
+ unsigned long receiveDTIMs)
+{
+ struct hostif_power_mngmt_request_t *pp;
+
+ DPRINTK(3, "mode=%lu wake_up=%lu receiveDTIMs=%lu\n", mode, wake_up,
+ receiveDTIMs);
+ /* make primitive */
+ pp = (struct hostif_power_mngmt_request_t *)
+ kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+ if (pp == NULL) {
+ DPRINTK(3, "allocate memory failed..\n");
+ return;
+ }
+ pp->header.size =
+ cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
+ pp->header.event = cpu_to_le16((uint16_t) HIF_POWERMGT_REQ);
+ pp->mode = cpu_to_le32((uint32_t) mode);
+ pp->wake_up = cpu_to_le32((uint32_t) wake_up);
+ pp->receiveDTIMs = cpu_to_le32((uint32_t) receiveDTIMs);
+
+ /* send to device request */
+ ps_confirm_wait_inc(priv);
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+}
+
+static
+void hostif_sleep_request(struct ks_wlan_private *priv, unsigned long mode)
+{
+ struct hostif_sleep_request_t *pp;
+
+ DPRINTK(3, "mode=%lu \n", mode);
+
+ if (mode == SLP_SLEEP) {
+ /* make primitive */
+ pp = (struct hostif_sleep_request_t *)
+ kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+ if (pp == NULL) {
+ DPRINTK(3, "allocate memory failed..\n");
+ return;
+ }
+ pp->header.size =
+ cpu_to_le16((uint16_t)
+ (sizeof(*pp) - sizeof(pp->header.size)));
+ pp->header.event = cpu_to_le16((uint16_t) HIF_SLEEP_REQ);
+
+ /* send to device request */
+ ps_confirm_wait_inc(priv);
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL,
+ NULL);
+ } else if (mode == SLP_ACTIVE) {
+ atomic_set(&priv->sleepstatus.wakeup_request, 1);
+ queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,
+ &priv->ks_wlan_hw.rw_wq, 1);
+ } else {
+ DPRINTK(3, "invalid mode %ld \n", mode);
+ return;
+ }
+}
+
+static
+void hostif_bss_scan_request(struct ks_wlan_private *priv,
+ unsigned long scan_type, uint8_t * scan_ssid,
+ uint8_t scan_ssid_len)
+{
+ struct hostif_bss_scan_request_t *pp;
+
+ DPRINTK(2, "\n");
+ /* make primitive */
+ pp = (struct hostif_bss_scan_request_t *)
+ kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+ if (pp == NULL) {
+ DPRINTK(3, "allocate memory failed..\n");
+ return;
+ }
+ pp->header.size =
+ cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
+ pp->header.event = cpu_to_le16((uint16_t) HIF_SCAN_REQ);
+ pp->scan_type = scan_type;
+
+ pp->ch_time_min = cpu_to_le32((uint32_t) 110); /* default value */
+ pp->ch_time_max = cpu_to_le32((uint32_t) 130); /* default value */
+ pp->channel_list.body[0] = 1;
+ pp->channel_list.body[1] = 8;
+ pp->channel_list.body[2] = 2;
+ pp->channel_list.body[3] = 9;
+ pp->channel_list.body[4] = 3;
+ pp->channel_list.body[5] = 10;
+ pp->channel_list.body[6] = 4;
+ pp->channel_list.body[7] = 11;
+ pp->channel_list.body[8] = 5;
+ pp->channel_list.body[9] = 12;
+ pp->channel_list.body[10] = 6;
+ pp->channel_list.body[11] = 13;
+ pp->channel_list.body[12] = 7;
+ if (priv->reg.phy_type == D_11G_ONLY_MODE) {
+ pp->channel_list.size = 13;
+ } else {
+ pp->channel_list.body[13] = 14;
+ pp->channel_list.size = 14;
+ }
+ pp->ssid.size = 0;
+
+ /* specified SSID SCAN */
+ if (scan_ssid_len > 0 && scan_ssid_len <= 32) {
+ pp->ssid.size = scan_ssid_len;
+ memcpy(&pp->ssid.body[0], scan_ssid, scan_ssid_len);
+ }
+
+ /* send to device request */
+ ps_confirm_wait_inc(priv);
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+
+ priv->aplist.size = 0;
+ priv->scan_ind_count = 0;
+}
+
+static
+void hostif_mic_failure_request(struct ks_wlan_private *priv,
+ unsigned short failure_count,
+ unsigned short timer)
+{
+ struct hostif_mic_failure_request_t *pp;
+
+ DPRINTK(3, "count=%d :: timer=%d\n", failure_count, timer);
+ /* make primitive */
+ pp = (struct hostif_mic_failure_request_t *)
+ kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+ if (pp == NULL) {
+ DPRINTK(3, "allocate memory failed..\n");
+ return;
+ }
+ pp->header.size =
+ cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
+ pp->header.event = cpu_to_le16((uint16_t) HIF_MIC_FAILURE_REQ);
+ pp->failure_count = cpu_to_le16((uint16_t) failure_count);
+ pp->timer = cpu_to_le16((uint16_t) timer);
+
+ /* send to device request */
+ ps_confirm_wait_inc(priv);
+ ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+}
+
+/* Device I/O Recieve indicate */
+static void devio_rec_ind(struct ks_wlan_private *priv, unsigned char *p,
+ unsigned int size)
+{
+ if (priv->device_open_status) {
+ spin_lock(&priv->dev_read_lock); /* request spin lock */
+ priv->dev_data[atomic_read(&priv->rec_count)] = p;
+ priv->dev_size[atomic_read(&priv->rec_count)] = size;
+
+ if (atomic_read(&priv->event_count) != DEVICE_STOCK_COUNT) {
+ /* rx event count inc */
+ atomic_inc(&priv->event_count);
+ }
+ atomic_inc(&priv->rec_count);
+ if (atomic_read(&priv->rec_count) == DEVICE_STOCK_COUNT)
+ atomic_set(&priv->rec_count, 0);
+
+ wake_up_interruptible_all(&priv->devread_wait);
+
+ /* release spin lock */
+ spin_unlock(&priv->dev_read_lock);
+ }
+}
+
+void hostif_receive(struct ks_wlan_private *priv, unsigned char *p,
+ unsigned int size)
+{
+ DPRINTK(4, "\n");
+
+ devio_rec_ind(priv, p, size);
+
+ priv->rxp = p;
+ priv->rx_size = size;
+
+ if (get_WORD(priv) == priv->rx_size) { /* length check !! */
+ hostif_event_check(priv); /* event check */
+ }
+}
+
+static
+void hostif_sme_set_wep(struct ks_wlan_private *priv, int type)
+{
+ uint32_t val;
+ switch (type) {
+ case SME_WEP_INDEX_REQUEST:
+ val = cpu_to_le32((uint32_t) (priv->reg.wep_index));
+ hostif_mib_set_request(priv, DOT11_WEP_DEFAULT_KEY_ID,
+ sizeof(val), MIB_VALUE_TYPE_INT, &val);
+ break;
+ case SME_WEP_KEY1_REQUEST:
+ if (!priv->wpa.wpa_enabled)
+ hostif_mib_set_request(priv,
+ DOT11_WEP_DEFAULT_KEY_VALUE1,
+ priv->reg.wep_key[0].size,
+ MIB_VALUE_TYPE_OSTRING,
+ &priv->reg.wep_key[0].val[0]);
+ break;
+ case SME_WEP_KEY2_REQUEST:
+ if (!priv->wpa.wpa_enabled)
+ hostif_mib_set_request(priv,
+ DOT11_WEP_DEFAULT_KEY_VALUE2,
+ priv->reg.wep_key[1].size,
+ MIB_VALUE_TYPE_OSTRING,
+ &priv->reg.wep_key[1].val[0]);
+ break;
+ case SME_WEP_KEY3_REQUEST:
+ if (!priv->wpa.wpa_enabled)
+ hostif_mib_set_request(priv,
+ DOT11_WEP_DEFAULT_KEY_VALUE3,
+ priv->reg.wep_key[2].size,
+ MIB_VALUE_TYPE_OSTRING,
+ &priv->reg.wep_key[2].val[0]);
+ break;
+ case SME_WEP_KEY4_REQUEST:
+ if (!priv->wpa.wpa_enabled)
+ hostif_mib_set_request(priv,
+ DOT11_WEP_DEFAULT_KEY_VALUE4,
+ priv->reg.wep_key[3].size,
+ MIB_VALUE_TYPE_OSTRING,
+ &priv->reg.wep_key[3].val[0]);
+ break;
+ case SME_WEP_FLAG_REQUEST:
+ val = cpu_to_le32((uint32_t) (priv->reg.privacy_invoked));
+ hostif_mib_set_request(priv, DOT11_PRIVACY_INVOKED,
+ sizeof(val), MIB_VALUE_TYPE_BOOL, &val);
+ break;
+ }
+
+ return;
+}
+
+struct wpa_suite_t {
+ unsigned short size;
+ unsigned char suite[4][CIPHER_ID_LEN];
+} __attribute__ ((packed));
+
+struct rsn_mode_t {
+ uint32_t rsn_mode;
+ uint16_t rsn_capability;
+} __attribute__ ((packed));
+
+static
+void hostif_sme_set_rsn(struct ks_wlan_private *priv, int type)
+{
+ struct wpa_suite_t wpa_suite;
+ struct rsn_mode_t rsn_mode;
+ uint32_t val;
+
+ memset(&wpa_suite, 0, sizeof(wpa_suite));
+
+ switch (type) {
+ case SME_RSN_UCAST_REQUEST:
+ wpa_suite.size = cpu_to_le16((uint16_t) 1);
+ switch (priv->wpa.pairwise_suite) {
+ case IW_AUTH_CIPHER_NONE:
+ if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
+ memcpy(&wpa_suite.suite[0][0],
+ CIPHER_ID_WPA2_NONE, CIPHER_ID_LEN);
+ else
+ memcpy(&wpa_suite.suite[0][0],
+ CIPHER_ID_WPA_NONE, CIPHER_ID_LEN);
+ break;
+ case IW_AUTH_CIPHER_WEP40:
+ if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
+ memcpy(&wpa_suite.suite[0][0],
+ CIPHER_ID_WPA2_WEP40, CIPHER_ID_LEN);
+ else
+ memcpy(&wpa_suite.suite[0][0],
+ CIPHER_ID_WPA_WEP40, CIPHER_ID_LEN);
+ break;
+ case IW_AUTH_CIPHER_TKIP:
+ if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
+ memcpy(&wpa_suite.suite[0][0],
+ CIPHER_ID_WPA2_TKIP, CIPHER_ID_LEN);
+ else
+ memcpy(&wpa_suite.suite[0][0],
+ CIPHER_ID_WPA_TKIP, CIPHER_ID_LEN);
+ break;
+ case IW_AUTH_CIPHER_CCMP:
+ if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
+ memcpy(&wpa_suite.suite[0][0],
+ CIPHER_ID_WPA2_CCMP, CIPHER_ID_LEN);
+ else
+ memcpy(&wpa_suite.suite[0][0],
+ CIPHER_ID_WPA_CCMP, CIPHER_ID_LEN);
+ break;
+ case IW_AUTH_CIPHER_WEP104:
+ if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
+ memcpy(&wpa_suite.suite[0][0],
+ CIPHER_ID_WPA2_WEP104, CIPHER_ID_LEN);
+ else
+ memcpy(&wpa_suite.suite[0][0],
+ CIPHER_ID_WPA_WEP104, CIPHER_ID_LEN);
+ break;
+ }
+
+ hostif_mib_set_request(priv, DOT11_RSN_CONFIG_UNICAST_CIPHER,
+ sizeof(wpa_suite.size) +
+ CIPHER_ID_LEN * wpa_suite.size,
+ MIB_VALUE_TYPE_OSTRING, &wpa_suite);
+ break;
+ case SME_RSN_MCAST_REQUEST:
+ switch (priv->wpa.group_suite) {
+ case IW_AUTH_CIPHER_NONE:
+ if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
+ memcpy(&wpa_suite.suite[0][0],
+ CIPHER_ID_WPA2_NONE, CIPHER_ID_LEN);
+ else
+ memcpy(&wpa_suite.suite[0][0],
+ CIPHER_ID_WPA_NONE, CIPHER_ID_LEN);
+ break;
+ case IW_AUTH_CIPHER_WEP40:
+ if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
+ memcpy(&wpa_suite.suite[0][0],
+ CIPHER_ID_WPA2_WEP40, CIPHER_ID_LEN);
+ else
+ memcpy(&wpa_suite.suite[0][0],
+ CIPHER_ID_WPA_WEP40, CIPHER_ID_LEN);
+ break;
+ case IW_AUTH_CIPHER_TKIP:
+ if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
+ memcpy(&wpa_suite.suite[0][0],
+ CIPHER_ID_WPA2_TKIP, CIPHER_ID_LEN);
+ else
+ memcpy(&wpa_suite.suite[0][0],
+ CIPHER_ID_WPA_TKIP, CIPHER_ID_LEN);
+ break;
+ case IW_AUTH_CIPHER_CCMP:
+ if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
+ memcpy(&wpa_suite.suite[0][0],
+ CIPHER_ID_WPA2_CCMP, CIPHER_ID_LEN);
+ else
+ memcpy(&wpa_suite.suite[0][0],
+ CIPHER_ID_WPA_CCMP, CIPHER_ID_LEN);
+ break;
+ case IW_AUTH_CIPHER_WEP104:
+ if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
+ memcpy(&wpa_suite.suite[0][0],
+ CIPHER_ID_WPA2_WEP104, CIPHER_ID_LEN);
+ else
+ memcpy(&wpa_suite.suite[0][0],
+ CIPHER_ID_WPA_WEP104, CIPHER_ID_LEN);
+ break;
+ }
+
+ hostif_mib_set_request(priv, DOT11_RSN_CONFIG_MULTICAST_CIPHER,
+ CIPHER_ID_LEN, MIB_VALUE_TYPE_OSTRING,
+ &wpa_suite.suite[0][0]);
+ break;
+ case SME_RSN_AUTH_REQUEST:
+ wpa_suite.size = cpu_to_le16((uint16_t) 1);
+ switch (priv->wpa.key_mgmt_suite) {
+ case IW_AUTH_KEY_MGMT_802_1X:
+ if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
+ memcpy(&wpa_suite.suite[0][0],
+ KEY_MGMT_ID_WPA2_1X, KEY_MGMT_ID_LEN);
+ else
+ memcpy(&wpa_suite.suite[0][0],
+ KEY_MGMT_ID_WPA_1X, KEY_MGMT_ID_LEN);
+ break;
+ case IW_AUTH_KEY_MGMT_PSK:
+ if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
+ memcpy(&wpa_suite.suite[0][0],
+ KEY_MGMT_ID_WPA2_PSK, KEY_MGMT_ID_LEN);
+ else
+ memcpy(&wpa_suite.suite[0][0],
+ KEY_MGMT_ID_WPA_PSK, KEY_MGMT_ID_LEN);
+ break;
+ case 0:
+ if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
+ memcpy(&wpa_suite.suite[0][0],
+ KEY_MGMT_ID_WPA2_NONE, KEY_MGMT_ID_LEN);
+ else
+ memcpy(&wpa_suite.suite[0][0],
+ KEY_MGMT_ID_WPA_NONE, KEY_MGMT_ID_LEN);
+ break;
+ case 4:
+ if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
+ memcpy(&wpa_suite.suite[0][0],
+ KEY_MGMT_ID_WPA2_WPANONE,
+ KEY_MGMT_ID_LEN);
+ else
+ memcpy(&wpa_suite.suite[0][0],
+ KEY_MGMT_ID_WPA_WPANONE,
+ KEY_MGMT_ID_LEN);
+ break;
+ }
+
+ hostif_mib_set_request(priv, DOT11_RSN_CONFIG_AUTH_SUITE,
+ sizeof(wpa_suite.size) +
+ KEY_MGMT_ID_LEN * wpa_suite.size,
+ MIB_VALUE_TYPE_OSTRING, &wpa_suite);
+ break;
+ case SME_RSN_ENABLED_REQUEST:
+ val = cpu_to_le32((uint32_t) (priv->wpa.rsn_enabled));
+ hostif_mib_set_request(priv, DOT11_RSN_ENABLED,
+ sizeof(val), MIB_VALUE_TYPE_BOOL, &val);
+ break;
+ case SME_RSN_MODE_REQUEST:
+ if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) {
+ rsn_mode.rsn_mode =
+ cpu_to_le32((uint32_t) RSN_MODE_WPA2);
+ rsn_mode.rsn_capability = cpu_to_le16((uint16_t) 0);
+ } else if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA) {
+ rsn_mode.rsn_mode =
+ cpu_to_le32((uint32_t) RSN_MODE_WPA);
+ rsn_mode.rsn_capability = cpu_to_le16((uint16_t) 0);
+ } else {
+ rsn_mode.rsn_mode =
+ cpu_to_le32((uint32_t) RSN_MODE_NONE);
+ rsn_mode.rsn_capability = cpu_to_le16((uint16_t) 0);
+ }
+ hostif_mib_set_request(priv, LOCAL_RSN_MODE, sizeof(rsn_mode),
+ MIB_VALUE_TYPE_OSTRING, &rsn_mode);
+ break;
+
+ }
+ return;
+}
+
+static
+void hostif_sme_mode_setup(struct ks_wlan_private *priv)
+{
+ unsigned char rate_size;
+ unsigned char rate_octet[RATE_SET_MAX_SIZE];
+ int i = 0;
+
+ /* rate setting if rate segging is auto for changing phy_type (#94) */
+ if (priv->reg.tx_rate == TX_RATE_FULL_AUTO) {
+ if (priv->reg.phy_type == D_11B_ONLY_MODE) {
+ priv->reg.rate_set.body[3] = TX_RATE_11M;
+ priv->reg.rate_set.body[2] = TX_RATE_5M;
+ priv->reg.rate_set.body[1] = TX_RATE_2M | BASIC_RATE;
+ priv->reg.rate_set.body[0] = TX_RATE_1M | BASIC_RATE;
+ priv->reg.rate_set.size = 4;
+ } else { /* D_11G_ONLY_MODE or D_11BG_COMPATIBLE_MODE */
+ priv->reg.rate_set.body[11] = TX_RATE_54M;
+ priv->reg.rate_set.body[10] = TX_RATE_48M;
+ priv->reg.rate_set.body[9] = TX_RATE_36M;
+ priv->reg.rate_set.body[8] = TX_RATE_18M;
+ priv->reg.rate_set.body[7] = TX_RATE_9M;
+ priv->reg.rate_set.body[6] = TX_RATE_24M | BASIC_RATE;
+ priv->reg.rate_set.body[5] = TX_RATE_12M | BASIC_RATE;
+ priv->reg.rate_set.body[4] = TX_RATE_6M | BASIC_RATE;
+ priv->reg.rate_set.body[3] = TX_RATE_11M | BASIC_RATE;
+ priv->reg.rate_set.body[2] = TX_RATE_5M | BASIC_RATE;
+ priv->reg.rate_set.body[1] = TX_RATE_2M | BASIC_RATE;
+ priv->reg.rate_set.body[0] = TX_RATE_1M | BASIC_RATE;
+ priv->reg.rate_set.size = 12;
+ }
+ }
+
+ /* rate mask by phy setting */
+ if (priv->reg.phy_type == D_11B_ONLY_MODE) {
+ for (i = 0; i < priv->reg.rate_set.size; i++) {
+ if (IS_11B_RATE(priv->reg.rate_set.body[i])) {
+ if ((priv->reg.rate_set.body[i] & RATE_MASK) >=
+ TX_RATE_5M)
+ rate_octet[i] =
+ priv->reg.rate_set.
+ body[i] & RATE_MASK;
+ else
+ rate_octet[i] =
+ priv->reg.rate_set.body[i];
+ } else
+ break;
+ }
+
+ } else { /* D_11G_ONLY_MODE or D_11BG_COMPATIBLE_MODE */
+ for (i = 0; i < priv->reg.rate_set.size; i++) {
+ if (IS_11BG_RATE(priv->reg.rate_set.body[i])) {
+ if (IS_OFDM_EXT_RATE
+ (priv->reg.rate_set.body[i]))
+ rate_octet[i] =
+ priv->reg.rate_set.
+ body[i] & RATE_MASK;
+ else
+ rate_octet[i] =
+ priv->reg.rate_set.body[i];
+ } else
+ break;
+ }
+ }
+ rate_size = i;
+ if (rate_size == 0) {
+ if (priv->reg.phy_type == D_11G_ONLY_MODE)
+ rate_octet[0] = TX_RATE_6M | BASIC_RATE;
+ else
+ rate_octet[0] = TX_RATE_2M | BASIC_RATE;
+ rate_size = 1;
+ }
+
+ /* rate set update */
+ priv->reg.rate_set.size = rate_size;
+ memcpy(&priv->reg.rate_set.body[0], &rate_octet[0], rate_size);
+
+ switch (priv->reg.operation_mode) {
+ case MODE_PSEUDO_ADHOC:
+ /* Pseudo Ad-Hoc mode */
+ hostif_ps_adhoc_set_request(priv);
+ break;
+ case MODE_INFRASTRUCTURE:
+ /* Infrastructure mode */
+ if (!is_valid_ether_addr((u8 *) priv->reg.bssid)) {
+ hostif_infrastructure_set_request(priv);
+ } else {
+ hostif_infrastructure_set2_request(priv);
+ DPRINTK(2,
+ "Infra bssid = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ priv->reg.bssid[0], priv->reg.bssid[1],
+ priv->reg.bssid[2], priv->reg.bssid[3],
+ priv->reg.bssid[4], priv->reg.bssid[5]);
+ }
+ break;
+ case MODE_ADHOC:
+ /* IEEE802.11 Ad-Hoc mode */
+ if (!is_valid_ether_addr((u8 *) priv->reg.bssid)) {
+ hostif_adhoc_set_request(priv);
+ } else {
+ hostif_adhoc_set2_request(priv);
+ DPRINTK(2,
+ "Adhoc bssid = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ priv->reg.bssid[0], priv->reg.bssid[1],
+ priv->reg.bssid[2], priv->reg.bssid[3],
+ priv->reg.bssid[4], priv->reg.bssid[5]);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return;
+}
+
+static
+void hostif_sme_multicast_set(struct ks_wlan_private *priv)
+{
+
+ struct net_device *dev = priv->net_dev;
+ int mc_count;
+ struct netdev_hw_addr *ha;
+ char set_address[NIC_MAX_MCAST_LIST * ETH_ALEN];
+ unsigned long filter_type;
+ int i = 0;
+
+ DPRINTK(3, "\n");
+
+ spin_lock(&priv->multicast_spin);
+
+ memset(set_address, 0, NIC_MAX_MCAST_LIST * ETH_ALEN);
+
+ if (dev->flags & IFF_PROMISC) {
+ filter_type = cpu_to_le32((uint32_t) MCAST_FILTER_PROMISC);
+ hostif_mib_set_request(priv, LOCAL_MULTICAST_FILTER,
+ sizeof(filter_type), MIB_VALUE_TYPE_BOOL,
+ &filter_type);
+ } else if ((netdev_mc_count(dev) > NIC_MAX_MCAST_LIST)
+ || (dev->flags & IFF_ALLMULTI)) {
+ filter_type = cpu_to_le32((uint32_t) MCAST_FILTER_MCASTALL);
+ hostif_mib_set_request(priv, LOCAL_MULTICAST_FILTER,
+ sizeof(filter_type), MIB_VALUE_TYPE_BOOL,
+ &filter_type);
+ } else {
+ if (priv->sme_i.sme_flag & SME_MULTICAST) {
+ mc_count = netdev_mc_count(dev);
+ netdev_for_each_mc_addr(ha, dev) {
+ memcpy(&set_address[i * ETH_ALEN], ha->addr,
+ ETH_ALEN);
+ i++;
+ }
+ priv->sme_i.sme_flag &= ~SME_MULTICAST;
+ hostif_mib_set_request(priv, LOCAL_MULTICAST_ADDRESS,
+ (ETH_ALEN * mc_count),
+ MIB_VALUE_TYPE_OSTRING,
+ &set_address[0]);
+ } else {
+ filter_type =
+ cpu_to_le32((uint32_t) MCAST_FILTER_MCAST);
+ priv->sme_i.sme_flag |= SME_MULTICAST;
+ hostif_mib_set_request(priv, LOCAL_MULTICAST_FILTER,
+ sizeof(filter_type),
+ MIB_VALUE_TYPE_BOOL,
+ &filter_type);
+ }
+ }
+
+ spin_unlock(&priv->multicast_spin);
+
+}
+
+static
+void hostif_sme_powermgt_set(struct ks_wlan_private *priv)
+{
+ unsigned long mode, wake_up, receiveDTIMs;
+
+ DPRINTK(3, "\n");
+ switch (priv->reg.powermgt) {
+ case POWMGT_ACTIVE_MODE:
+ mode = POWER_ACTIVE;
+ wake_up = 0;
+ receiveDTIMs = 0;
+ break;
+ case POWMGT_SAVE1_MODE:
+ if (priv->reg.operation_mode == MODE_INFRASTRUCTURE) {
+ mode = POWER_SAVE;
+ wake_up = 0;
+ receiveDTIMs = 0;
+ } else {
+ mode = POWER_ACTIVE;
+ wake_up = 0;
+ receiveDTIMs = 0;
+ }
+ break;
+ case POWMGT_SAVE2_MODE:
+ if (priv->reg.operation_mode == MODE_INFRASTRUCTURE) {
+ mode = POWER_SAVE;
+ wake_up = 0;
+ receiveDTIMs = 1;
+ } else {
+ mode = POWER_ACTIVE;
+ wake_up = 0;
+ receiveDTIMs = 0;
+ }
+ break;
+ default:
+ mode = POWER_ACTIVE;
+ wake_up = 0;
+ receiveDTIMs = 0;
+ break;
+ }
+ hostif_power_mngmt_request(priv, mode, wake_up, receiveDTIMs);
+
+ return;
+}
+
+static
+void hostif_sme_sleep_set(struct ks_wlan_private *priv)
+{
+ DPRINTK(3, "\n");
+ switch (priv->sleep_mode) {
+ case SLP_SLEEP:
+ hostif_sleep_request(priv, priv->sleep_mode);
+ break;
+ case SLP_ACTIVE:
+ hostif_sleep_request(priv, priv->sleep_mode);
+ break;
+ default:
+ break;
+ }
+
+ return;
+}
+
+static
+void hostif_sme_set_key(struct ks_wlan_private *priv, int type)
+{
+ uint32_t val;
+ switch (type) {
+ case SME_SET_FLAG:
+ val = cpu_to_le32((uint32_t) (priv->reg.privacy_invoked));
+ hostif_mib_set_request(priv, DOT11_PRIVACY_INVOKED,
+ sizeof(val), MIB_VALUE_TYPE_BOOL, &val);
+ break;
+ case SME_SET_TXKEY:
+ val = cpu_to_le32((uint32_t) (priv->wpa.txkey));
+ hostif_mib_set_request(priv, DOT11_WEP_DEFAULT_KEY_ID,
+ sizeof(val), MIB_VALUE_TYPE_INT, &val);
+ break;
+ case SME_SET_KEY1:
+ hostif_mib_set_request(priv, DOT11_WEP_DEFAULT_KEY_VALUE1,
+ priv->wpa.key[0].key_len,
+ MIB_VALUE_TYPE_OSTRING,
+ &priv->wpa.key[0].key_val[0]);
+ break;
+ case SME_SET_KEY2:
+ hostif_mib_set_request(priv, DOT11_WEP_DEFAULT_KEY_VALUE2,
+ priv->wpa.key[1].key_len,
+ MIB_VALUE_TYPE_OSTRING,
+ &priv->wpa.key[1].key_val[0]);
+ break;
+ case SME_SET_KEY3:
+ hostif_mib_set_request(priv, DOT11_WEP_DEFAULT_KEY_VALUE3,
+ priv->wpa.key[2].key_len,
+ MIB_VALUE_TYPE_OSTRING,
+ &priv->wpa.key[2].key_val[0]);
+ break;
+ case SME_SET_KEY4:
+ hostif_mib_set_request(priv, DOT11_WEP_DEFAULT_KEY_VALUE4,
+ priv->wpa.key[3].key_len,
+ MIB_VALUE_TYPE_OSTRING,
+ &priv->wpa.key[3].key_val[0]);
+ break;
+ case SME_SET_PMK_TSC:
+ hostif_mib_set_request(priv, DOT11_PMK_TSC,
+ WPA_RX_SEQ_LEN, MIB_VALUE_TYPE_OSTRING,
+ &priv->wpa.key[0].rx_seq[0]);
+ break;
+ case SME_SET_GMK1_TSC:
+ hostif_mib_set_request(priv, DOT11_GMK1_TSC,
+ WPA_RX_SEQ_LEN, MIB_VALUE_TYPE_OSTRING,
+ &priv->wpa.key[1].rx_seq[0]);
+ break;
+ case SME_SET_GMK2_TSC:
+ hostif_mib_set_request(priv, DOT11_GMK2_TSC,
+ WPA_RX_SEQ_LEN, MIB_VALUE_TYPE_OSTRING,
+ &priv->wpa.key[2].rx_seq[0]);
+ break;
+ }
+ return;
+}
+
+static
+void hostif_sme_set_pmksa(struct ks_wlan_private *priv)
+{
+ struct pmk_cache_t {
+ uint16_t size;
+ struct {
+ uint8_t bssid[ETH_ALEN];
+ uint8_t pmkid[IW_PMKID_LEN];
+ } __attribute__ ((packed)) list[PMK_LIST_MAX];
+ } __attribute__ ((packed)) pmkcache;
+ struct pmk_t *pmk;
+ struct list_head *ptr;
+ int i;
+
+ DPRINTK(4, "pmklist.size=%d\n", priv->pmklist.size);
+ i = 0;
+ list_for_each(ptr, &priv->pmklist.head) {
+ pmk = list_entry(ptr, struct pmk_t, list);
+ if (i < PMK_LIST_MAX) {
+ memcpy(pmkcache.list[i].bssid, pmk->bssid, ETH_ALEN);
+ memcpy(pmkcache.list[i].pmkid, pmk->pmkid,
+ IW_PMKID_LEN);
+ i++;
+ }
+ }
+ pmkcache.size = cpu_to_le16((uint16_t) (priv->pmklist.size));
+ hostif_mib_set_request(priv, LOCAL_PMK,
+ sizeof(priv->pmklist.size) + (ETH_ALEN +
+ IW_PMKID_LEN) *
+ (priv->pmklist.size), MIB_VALUE_TYPE_OSTRING,
+ &pmkcache);
+}
+
+/* execute sme */
+static
+void hostif_sme_execute(struct ks_wlan_private *priv, int event)
+{
+ uint32_t val;
+
+ DPRINTK(3, "event=%d\n", event);
+ switch (event) {
+ case SME_START:
+ if (priv->dev_state == DEVICE_STATE_BOOT) {
+ hostif_mib_get_request(priv, DOT11_MAC_ADDRESS);
+ }
+ break;
+ case SME_MULTICAST_REQUEST:
+ hostif_sme_multicast_set(priv);
+ break;
+ case SME_MACADDRESS_SET_REQUEST:
+ hostif_mib_set_request(priv, LOCAL_CURRENTADDRESS, ETH_ALEN,
+ MIB_VALUE_TYPE_OSTRING,
+ &priv->eth_addr[0]);
+ break;
+ case SME_BSS_SCAN_REQUEST:
+ hostif_bss_scan_request(priv, priv->reg.scan_type,
+ priv->scan_ssid, priv->scan_ssid_len);
+ break;
+ case SME_POW_MNGMT_REQUEST:
+ hostif_sme_powermgt_set(priv);
+ break;
+ case SME_PHY_INFO_REQUEST:
+ hostif_phy_information_request(priv);
+ break;
+ case SME_MIC_FAILURE_REQUEST:
+ if (priv->wpa.mic_failure.failure == 1) {
+ hostif_mic_failure_request(priv,
+ priv->wpa.mic_failure.
+ failure - 1, 0);
+ } else if (priv->wpa.mic_failure.failure == 2) {
+ hostif_mic_failure_request(priv,
+ priv->wpa.mic_failure.
+ failure - 1,
+ priv->wpa.mic_failure.
+ counter);
+ } else
+ DPRINTK(4,
+ "SME_MIC_FAILURE_REQUEST: failure count=%u error?\n",
+ priv->wpa.mic_failure.failure);
+ break;
+ case SME_MIC_FAILURE_CONFIRM:
+ if (priv->wpa.mic_failure.failure == 2) {
+ if (priv->wpa.mic_failure.stop)
+ priv->wpa.mic_failure.stop = 0;
+ priv->wpa.mic_failure.failure = 0;
+ hostif_start_request(priv, priv->reg.operation_mode);
+ }
+ break;
+ case SME_GET_MAC_ADDRESS:
+ if (priv->dev_state == DEVICE_STATE_BOOT) {
+ hostif_mib_get_request(priv, DOT11_PRODUCT_VERSION);
+ }
+ break;
+ case SME_GET_PRODUCT_VERSION:
+ if (priv->dev_state == DEVICE_STATE_BOOT) {
+ priv->dev_state = DEVICE_STATE_PREINIT;
+ }
+ break;
+ case SME_STOP_REQUEST:
+ hostif_stop_request(priv);
+ break;
+ case SME_RTS_THRESHOLD_REQUEST:
+ val = cpu_to_le32((uint32_t) (priv->reg.rts));
+ hostif_mib_set_request(priv, DOT11_RTS_THRESHOLD,
+ sizeof(val), MIB_VALUE_TYPE_INT, &val);
+ break;
+ case SME_FRAGMENTATION_THRESHOLD_REQUEST:
+ val = cpu_to_le32((uint32_t) (priv->reg.fragment));
+ hostif_mib_set_request(priv, DOT11_FRAGMENTATION_THRESHOLD,
+ sizeof(val), MIB_VALUE_TYPE_INT, &val);
+ break;
+ case SME_WEP_INDEX_REQUEST:
+ case SME_WEP_KEY1_REQUEST:
+ case SME_WEP_KEY2_REQUEST:
+ case SME_WEP_KEY3_REQUEST:
+ case SME_WEP_KEY4_REQUEST:
+ case SME_WEP_FLAG_REQUEST:
+ hostif_sme_set_wep(priv, event);
+ break;
+ case SME_RSN_UCAST_REQUEST:
+ case SME_RSN_MCAST_REQUEST:
+ case SME_RSN_AUTH_REQUEST:
+ case SME_RSN_ENABLED_REQUEST:
+ case SME_RSN_MODE_REQUEST:
+ hostif_sme_set_rsn(priv, event);
+ break;
+ case SME_SET_FLAG:
+ case SME_SET_TXKEY:
+ case SME_SET_KEY1:
+ case SME_SET_KEY2:
+ case SME_SET_KEY3:
+ case SME_SET_KEY4:
+ case SME_SET_PMK_TSC:
+ case SME_SET_GMK1_TSC:
+ case SME_SET_GMK2_TSC:
+ hostif_sme_set_key(priv, event);
+ break;
+ case SME_SET_PMKSA:
+ hostif_sme_set_pmksa(priv);
+ break;
+#ifdef WPS
+ case SME_WPS_ENABLE_REQUEST:
+ hostif_mib_set_request(priv, LOCAL_WPS_ENABLE,
+ sizeof(priv->wps.wps_enabled),
+ MIB_VALUE_TYPE_INT,
+ &priv->wps.wps_enabled);
+ break;
+ case SME_WPS_PROBE_REQUEST:
+ hostif_mib_set_request(priv, LOCAL_WPS_PROBE_REQ,
+ priv->wps.ielen,
+ MIB_VALUE_TYPE_OSTRING, priv->wps.ie);
+ break;
+#endif /* WPS */
+ case SME_MODE_SET_REQUEST:
+ hostif_sme_mode_setup(priv);
+ break;
+ case SME_SET_GAIN:
+ hostif_mib_set_request(priv, LOCAL_GAIN,
+ sizeof(priv->gain),
+ MIB_VALUE_TYPE_OSTRING, &priv->gain);
+ break;
+ case SME_GET_GAIN:
+ hostif_mib_get_request(priv, LOCAL_GAIN);
+ break;
+ case SME_GET_EEPROM_CKSUM:
+ priv->eeprom_checksum = EEPROM_FW_NOT_SUPPORT; /* initialize */
+ hostif_mib_get_request(priv, LOCAL_EEPROM_SUM);
+ break;
+ case SME_START_REQUEST:
+ hostif_start_request(priv, priv->reg.operation_mode);
+ break;
+ case SME_START_CONFIRM:
+ /* for power save */
+ atomic_set(&priv->psstatus.snooze_guard, 0);
+ atomic_set(&priv->psstatus.confirm_wait, 0);
+ if (priv->dev_state == DEVICE_STATE_PREINIT) {
+ priv->dev_state = DEVICE_STATE_INIT;
+ }
+ /* wake_up_interruptible_all(&priv->confirm_wait); */
+ complete(&priv->confirm_wait);
+ break;
+ case SME_SLEEP_REQUEST:
+ hostif_sme_sleep_set(priv);
+ break;
+ case SME_SET_REGION:
+ val = cpu_to_le32((uint32_t) (priv->region));
+ hostif_mib_set_request(priv, LOCAL_REGION,
+ sizeof(val), MIB_VALUE_TYPE_INT, &val);
+ break;
+ case SME_MULTICAST_CONFIRM:
+ case SME_BSS_SCAN_CONFIRM:
+ case SME_POW_MNGMT_CONFIRM:
+ case SME_PHY_INFO_CONFIRM:
+ case SME_STOP_CONFIRM:
+ case SME_RTS_THRESHOLD_CONFIRM:
+ case SME_FRAGMENTATION_THRESHOLD_CONFIRM:
+ case SME_WEP_INDEX_CONFIRM:
+ case SME_WEP_KEY1_CONFIRM:
+ case SME_WEP_KEY2_CONFIRM:
+ case SME_WEP_KEY3_CONFIRM:
+ case SME_WEP_KEY4_CONFIRM:
+ case SME_WEP_FLAG_CONFIRM:
+ case SME_RSN_UCAST_CONFIRM:
+ case SME_RSN_MCAST_CONFIRM:
+ case SME_RSN_AUTH_CONFIRM:
+ case SME_RSN_ENABLED_CONFIRM:
+ case SME_RSN_MODE_CONFIRM:
+ case SME_MODE_SET_CONFIRM:
+ break;
+ case SME_TERMINATE:
+ default:
+ break;
+ }
+}
+
+static
+void hostif_sme_task(unsigned long dev)
+{
+ struct ks_wlan_private *priv = (struct ks_wlan_private *)dev;
+
+ DPRINTK(3, "\n");
+
+ if (priv->dev_state >= DEVICE_STATE_BOOT) {
+ if (0 < cnt_smeqbody(priv)
+ && priv->dev_state >= DEVICE_STATE_BOOT) {
+ hostif_sme_execute(priv,
+ priv->sme_i.event_buff[priv->sme_i.
+ qhead]);
+ inc_smeqhead(priv);
+ if (0 < cnt_smeqbody(priv))
+ tasklet_schedule(&priv->sme_task);
+ }
+ }
+ return;
+}
+
+/* send to Station Management Entity module */
+void hostif_sme_enqueue(struct ks_wlan_private *priv, unsigned short event)
+{
+ DPRINTK(3, "\n");
+
+ /* enqueue sme event */
+ if (cnt_smeqbody(priv) < (SME_EVENT_BUFF_SIZE - 1)) {
+ priv->sme_i.event_buff[priv->sme_i.qtail] = event;
+ inc_smeqtail(priv);
+ //DPRINTK(3,"inc_smeqtail \n");
+#ifdef KS_WLAN_DEBUG
+ if (priv->sme_i.max_event_count < cnt_smeqbody(priv))
+ priv->sme_i.max_event_count = cnt_smeqbody(priv);
+#endif /* KS_WLAN_DEBUG */
+ } else {
+ /* in case of buffer overflow */
+ //DPRINTK(2,"sme queue buffer overflow\n");
+ printk("sme queue buffer overflow\n");
+ }
+
+ tasklet_schedule(&priv->sme_task);
+
+}
+
+int hostif_init(struct ks_wlan_private *priv)
+{
+ int rc = 0;
+ int i;
+
+ DPRINTK(3, "\n");
+
+ priv->aplist.size = 0;
+ for (i = 0; i < LOCAL_APLIST_MAX; i++)
+ memset(&(priv->aplist.ap[i]), 0, sizeof(struct local_ap_t));
+ priv->infra_status = 0;
+ priv->current_rate = 4;
+ priv->connect_status = DISCONNECT_STATUS;
+
+ spin_lock_init(&priv->multicast_spin);
+
+ spin_lock_init(&priv->dev_read_lock);
+ init_waitqueue_head(&priv->devread_wait);
+ priv->dev_count = 0;
+ atomic_set(&priv->event_count, 0);
+ atomic_set(&priv->rec_count, 0);
+
+ /* for power save */
+ atomic_set(&priv->psstatus.status, PS_NONE);
+ atomic_set(&priv->psstatus.confirm_wait, 0);
+ atomic_set(&priv->psstatus.snooze_guard, 0);
+ /* init_waitqueue_head(&priv->psstatus.wakeup_wait); */
+ init_completion(&priv->psstatus.wakeup_wait);
+ //INIT_WORK(&priv->ks_wlan_wakeup_task, ks_wlan_hw_wakeup_task, (void *)priv);
+ INIT_WORK(&priv->ks_wlan_wakeup_task, ks_wlan_hw_wakeup_task);
+
+ /* WPA */
+ memset(&(priv->wpa), 0, sizeof(priv->wpa));
+ priv->wpa.rsn_enabled = 0;
+ priv->wpa.mic_failure.failure = 0;
+ priv->wpa.mic_failure.last_failure_time = 0;
+ priv->wpa.mic_failure.stop = 0;
+ memset(&(priv->pmklist), 0, sizeof(priv->pmklist));
+ INIT_LIST_HEAD(&priv->pmklist.head);
+ for (i = 0; i < PMK_LIST_MAX; i++)
+ INIT_LIST_HEAD(&priv->pmklist.pmk[i].list);
+
+ priv->sme_i.sme_status = SME_IDLE;
+ priv->sme_i.qhead = priv->sme_i.qtail = 0;
+#ifdef KS_WLAN_DEBUG
+ priv->sme_i.max_event_count = 0;
+#endif
+ spin_lock_init(&priv->sme_i.sme_spin);
+ priv->sme_i.sme_flag = 0;
+
+ tasklet_init(&priv->sme_task, hostif_sme_task, (unsigned long)priv);
+
+ return rc;
+}
+
+void hostif_exit(struct ks_wlan_private *priv)
+{
+ tasklet_kill(&priv->sme_task);
+ return;
+}
diff --git a/drivers/staging/ks7010/ks_hostif.h b/drivers/staging/ks7010/ks_hostif.h
new file mode 100644
index 000000000..dc806b5b4
--- /dev/null
+++ b/drivers/staging/ks7010/ks_hostif.h
@@ -0,0 +1,644 @@
+/*
+ * Driver for KeyStream wireless LAN
+ *
+ * Copyright (c) 2005-2008 KeyStream Corp.
+ * Copyright (C) 2009 Renesas Technology Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _KS_HOSTIF_H_
+#define _KS_HOSTIF_H_
+/*
+ * HOST-MAC I/F events
+ */
+#define HIF_DATA_REQ 0xE001
+#define HIF_DATA_IND 0xE801
+#define HIF_MIB_GET_REQ 0xE002
+#define HIF_MIB_GET_CONF 0xE802
+#define HIF_MIB_SET_REQ 0xE003
+#define HIF_MIB_SET_CONF 0xE803
+#define HIF_POWERMGT_REQ 0xE004
+#define HIF_POWERMGT_CONF 0xE804
+#define HIF_START_REQ 0xE005
+#define HIF_START_CONF 0xE805
+#define HIF_CONNECT_IND 0xE806
+#define HIF_STOP_REQ 0xE006
+#define HIF_STOP_CONF 0xE807
+#define HIF_PS_ADH_SET_REQ 0xE007
+#define HIF_PS_ADH_SET_CONF 0xE808
+#define HIF_INFRA_SET_REQ 0xE008
+#define HIF_INFRA_SET_CONF 0xE809
+#define HIF_ADH_SET_REQ 0xE009
+#define HIF_ADH_SET_CONF 0xE80A
+#define HIF_AP_SET_REQ 0xE00A
+#define HIF_AP_SET_CONF 0xE80B
+#define HIF_ASSOC_INFO_IND 0xE80C
+#define HIF_MIC_FAILURE_REQ 0xE00B
+#define HIF_MIC_FAILURE_CONF 0xE80D
+#define HIF_SCAN_REQ 0xE00C
+#define HIF_SCAN_CONF 0xE80E
+#define HIF_PHY_INFO_REQ 0xE00D
+#define HIF_PHY_INFO_CONF 0xE80F
+#define HIF_SLEEP_REQ 0xE00E
+#define HIF_SLEEP_CONF 0xE810
+#define HIF_PHY_INFO_IND 0xE811
+#define HIF_SCAN_IND 0xE812
+#define HIF_INFRA_SET2_REQ 0xE00F
+#define HIF_INFRA_SET2_CONF 0xE813
+#define HIF_ADH_SET2_REQ 0xE010
+#define HIF_ADH_SET2_CONF 0xE814
+
+#define HIF_REQ_MAX 0xE010
+
+/*
+ * HOST-MAC I/F data structure
+ * Byte alignmet Little Endian
+ */
+
+struct hostif_hdr {
+ uint16_t size;
+ uint16_t event;
+} __attribute__ ((packed));
+
+struct hostif_data_request_t {
+ struct hostif_hdr header;
+ uint16_t auth_type;
+#define TYPE_DATA 0x0000
+#define TYPE_AUTH 0x0001
+ uint16_t reserved;
+ uint8_t data[0];
+} __attribute__ ((packed));
+
+struct hostif_data_indication_t {
+ struct hostif_hdr header;
+ uint16_t auth_type;
+/* #define TYPE_DATA 0x0000 */
+#define TYPE_PMK1 0x0001
+#define TYPE_GMK1 0x0002
+#define TYPE_GMK2 0x0003
+ uint16_t reserved;
+ uint8_t data[0];
+} __attribute__ ((packed));
+
+#define CHANNEL_LIST_MAX_SIZE 14
+struct channel_list_t {
+ uint8_t size;
+ uint8_t body[CHANNEL_LIST_MAX_SIZE];
+ uint8_t pad;
+} __attribute__ ((packed));
+
+/* MIB Attribute */
+#define DOT11_MAC_ADDRESS 0x21010100 /* MAC Address (R) */
+#define DOT11_PRODUCT_VERSION 0x31024100 /* FirmWare Version (R) */
+#define DOT11_RTS_THRESHOLD 0x21020100 /* RTS Threshold (R/W) */
+#define DOT11_FRAGMENTATION_THRESHOLD 0x21050100 /* Fragment Threshold (R/W) */
+#define DOT11_PRIVACY_INVOKED 0x15010100 /* WEP ON/OFF (W) */
+#define DOT11_WEP_DEFAULT_KEY_ID 0x15020100 /* WEP Index (W) */
+#define DOT11_WEP_DEFAULT_KEY_VALUE1 0x13020101 /* WEP Key#1(TKIP AES: PairwiseTemporalKey) (W) */
+#define DOT11_WEP_DEFAULT_KEY_VALUE2 0x13020102 /* WEP Key#2(TKIP AES: GroupKey1) (W) */
+#define DOT11_WEP_DEFAULT_KEY_VALUE3 0x13020103 /* WEP Key#3(TKIP AES: GroupKey2) (W) */
+#define DOT11_WEP_DEFAULT_KEY_VALUE4 0x13020104 /* WEP Key#4 (W) */
+#define DOT11_WEP_LIST 0x13020100 /* WEP LIST */
+#define DOT11_DESIRED_SSID 0x11090100 /* SSID */
+#define DOT11_CURRENT_CHANNEL 0x45010100 /* channel set */
+#define DOT11_OPERATION_RATE_SET 0x11110100 /* rate set */
+
+#define LOCAL_AP_SEARCH_INTEAVAL 0xF1010100 /* AP search interval (R/W) */
+#define LOCAL_CURRENTADDRESS 0xF1050100 /* MAC Adress change (W) */
+#define LOCAL_MULTICAST_ADDRESS 0xF1060100 /* Multicast Adress (W) */
+#define LOCAL_MULTICAST_FILTER 0xF1060200 /* Multicast Adress Filter enable/disable (W) */
+#define LOCAL_SEARCHED_AP_LIST 0xF1030100 /* AP list (R) */
+#define LOCAL_LINK_AP_STATUS 0xF1040100 /* Link AP status (R) */
+#define LOCAL_PACKET_STATISTICS 0xF1020100 /* tx,rx packets statistics */
+#define LOCAL_AP_SCAN_LIST_TYPE_SET 0xF1030200 /* AP_SCAN_LIST_TYPE */
+
+#define DOT11_RSN_ENABLED 0x15070100 /* WPA enable/disable (W) */
+#define LOCAL_RSN_MODE 0x56010100 /* RSN mode WPA/WPA2 (W) */
+#define DOT11_RSN_CONFIG_MULTICAST_CIPHER 0x51040100 /* GroupKeyCipherSuite (W) */
+#define DOT11_RSN_CONFIG_UNICAST_CIPHER 0x52020100 /* PairwiseKeyCipherSuite (W) */
+#define DOT11_RSN_CONFIG_AUTH_SUITE 0x53020100 /* AuthenticationKeyManagementSuite (W) */
+#define DOT11_RSN_CONFIG_VERSION 0x51020100 /* RSN version (W) */
+#define LOCAL_RSN_CONFIG_ALL 0x5F010100 /* RSN CONFIG ALL (W) */
+#define DOT11_PMK_TSC 0x55010100 /* PMK_TSC (W) */
+#define DOT11_GMK1_TSC 0x55010101 /* GMK1_TSC (W) */
+#define DOT11_GMK2_TSC 0x55010102 /* GMK2_TSC (W) */
+#define DOT11_GMK3_TSC 0x55010103 /* GMK3_TSC */
+#define LOCAL_PMK 0x58010100 /* Pairwise Master Key cache (W) */
+
+#define LOCAL_REGION 0xF10A0100 /* Region setting */
+
+#ifdef WPS
+#define LOCAL_WPS_ENABLE 0xF10B0100 /* WiFi Protected Setup */
+#define LOCAL_WPS_PROBE_REQ 0xF10C0100 /* WPS Probe Request */
+#endif /* WPS */
+
+#define LOCAL_GAIN 0xF10D0100 /* Carrer sense threshold for demo ato show */
+#define LOCAL_EEPROM_SUM 0xF10E0100 /* EEPROM checksum information */
+
+struct hostif_mib_get_request_t {
+ struct hostif_hdr header;
+ uint32_t mib_attribute;
+} __attribute__ ((packed));
+
+struct hostif_mib_value_t {
+ uint16_t size;
+ uint16_t type;
+#define MIB_VALUE_TYPE_NULL 0
+#define MIB_VALUE_TYPE_INT 1
+#define MIB_VALUE_TYPE_BOOL 2
+#define MIB_VALUE_TYPE_COUNT32 3
+#define MIB_VALUE_TYPE_OSTRING 4
+ uint8_t body[0];
+} __attribute__ ((packed));
+
+struct hostif_mib_get_confirm_t {
+ struct hostif_hdr header;
+ uint32_t mib_status;
+#define MIB_SUCCESS 0
+#define MIB_INVALID 1
+#define MIB_READ_ONLY 2
+#define MIB_WRITE_ONLY 3
+ uint32_t mib_attribute;
+ struct hostif_mib_value_t mib_value;
+} __attribute__ ((packed));
+
+struct hostif_mib_set_request_t {
+ struct hostif_hdr header;
+ uint32_t mib_attribute;
+ struct hostif_mib_value_t mib_value;
+} __attribute__ ((packed));
+
+struct hostif_mib_set_confirm_t {
+ struct hostif_hdr header;
+ uint32_t mib_status;
+ uint32_t mib_attribute;
+} __attribute__ ((packed));
+
+struct hostif_power_mngmt_request_t {
+ struct hostif_hdr header;
+ uint32_t mode;
+#define POWER_ACTIVE 1
+#define POWER_SAVE 2
+ uint32_t wake_up;
+#define SLEEP_FALSE 0
+#define SLEEP_TRUE 1 /* not used */
+ uint32_t receiveDTIMs;
+#define DTIM_FALSE 0
+#define DTIM_TRUE 1
+} __attribute__ ((packed));
+
+/* power management mode */
+enum {
+ POWMGT_ACTIVE_MODE = 0,
+ POWMGT_SAVE1_MODE,
+ POWMGT_SAVE2_MODE
+};
+
+#define RESULT_SUCCESS 0
+#define RESULT_INVALID_PARAMETERS 1
+#define RESULT_NOT_SUPPORTED 2
+/* #define RESULT_ALREADY_RUNNING 3 */
+#define RESULT_ALREADY_RUNNING 7
+
+struct hostif_power_mngmt_confirm_t {
+ struct hostif_hdr header;
+ uint16_t result_code;
+} __attribute__ ((packed));
+
+struct hostif_start_request_t {
+ struct hostif_hdr header;
+ uint16_t mode;
+#define MODE_PSEUDO_ADHOC 0
+#define MODE_INFRASTRUCTURE 1
+#define MODE_AP 2 /* not used */
+#define MODE_ADHOC 3
+} __attribute__ ((packed));
+
+struct hostif_start_confirm_t {
+ struct hostif_hdr header;
+ uint16_t result_code;
+} __attribute__ ((packed));
+
+#define SSID_MAX_SIZE 32
+struct ssid_t {
+ uint8_t size;
+ uint8_t body[SSID_MAX_SIZE];
+ uint8_t ssid_pad;
+} __attribute__ ((packed));
+
+#define RATE_SET_MAX_SIZE 16
+struct rate_set8_t {
+ uint8_t size;
+ uint8_t body[8];
+ uint8_t rate_pad;
+} __attribute__ ((packed));
+
+struct FhParms_t {
+ uint16_t dwellTime;
+ uint8_t hopSet;
+ uint8_t hopPattern;
+ uint8_t hopIndex;
+} __attribute__ ((packed));
+
+struct DsParms_t {
+ uint8_t channel;
+} __attribute__ ((packed));
+
+struct CfParms_t {
+ uint8_t count;
+ uint8_t period;
+ uint16_t maxDuration;
+ uint16_t durRemaining;
+} __attribute__ ((packed));
+
+struct IbssParms_t {
+ uint16_t atimWindow;
+} __attribute__ ((packed));
+
+struct rsn_t {
+ uint8_t size;
+#define RSN_BODY_SIZE 64
+ uint8_t body[RSN_BODY_SIZE];
+} __attribute__ ((packed));
+
+struct ErpParams_t {
+ uint8_t erp_info;
+} __attribute__ ((packed));
+
+struct rate_set16_t {
+ uint8_t size;
+ uint8_t body[16];
+ uint8_t rate_pad;
+} __attribute__ ((packed));
+
+struct ap_info_t {
+ uint8_t bssid[6]; /* +00 */
+ uint8_t rssi; /* +06 */
+ uint8_t sq; /* +07 */
+ uint8_t noise; /* +08 */
+ uint8_t pad0; /* +09 */
+ uint16_t beacon_period; /* +10 */
+ uint16_t capability; /* +12 */
+#define BSS_CAP_ESS (1<<0)
+#define BSS_CAP_IBSS (1<<1)
+#define BSS_CAP_CF_POLABLE (1<<2)
+#define BSS_CAP_CF_POLL_REQ (1<<3)
+#define BSS_CAP_PRIVACY (1<<4)
+#define BSS_CAP_SHORT_PREAMBLE (1<<5)
+#define BSS_CAP_PBCC (1<<6)
+#define BSS_CAP_CHANNEL_AGILITY (1<<7)
+#define BSS_CAP_SHORT_SLOT_TIME (1<<10)
+#define BSS_CAP_DSSS_OFDM (1<<13)
+ uint8_t frame_type; /* +14 */
+ uint8_t ch_info; /* +15 */
+#define FRAME_TYPE_BEACON 0x80
+#define FRAME_TYPE_PROBE_RESP 0x50
+ uint16_t body_size; /* +16 */
+ uint8_t body[1024]; /* +18 */
+ /* +1032 */
+} __attribute__ ((packed));
+
+struct link_ap_info_t {
+ uint8_t bssid[6]; /* +00 */
+ uint8_t rssi; /* +06 */
+ uint8_t sq; /* +07 */
+ uint8_t noise; /* +08 */
+ uint8_t pad0; /* +09 */
+ uint16_t beacon_period; /* +10 */
+ uint16_t capability; /* +12 */
+ struct rate_set8_t rate_set; /* +14 */
+ struct FhParms_t fh_parameter; /* +24 */
+ struct DsParms_t ds_parameter; /* +29 */
+ struct CfParms_t cf_parameter; /* +30 */
+ struct IbssParms_t ibss_parameter; /* +36 */
+ struct ErpParams_t erp_parameter; /* +38 */
+ uint8_t pad1; /* +39 */
+ struct rate_set8_t ext_rate_set; /* +40 */
+ uint8_t DTIM_period; /* +50 */
+ uint8_t rsn_mode; /* +51 */
+#define RSN_MODE_NONE 0
+#define RSN_MODE_WPA 1
+#define RSN_MODE_WPA2 2
+ struct {
+ uint8_t size; /* +52 */
+ uint8_t body[128]; /* +53 */
+ } __attribute__ ((packed)) rsn;
+} __attribute__ ((packed));
+
+struct hostif_connect_indication_t {
+ struct hostif_hdr header;
+ uint16_t connect_code;
+#define RESULT_CONNECT 0
+#define RESULT_DISCONNECT 1
+ struct link_ap_info_t link_ap_info;
+} __attribute__ ((packed));
+
+struct hostif_stop_request_t {
+ struct hostif_hdr header;
+} __attribute__ ((packed));
+
+struct hostif_stop_confirm_t {
+ struct hostif_hdr header;
+ uint16_t result_code;
+} __attribute__ ((packed));
+
+struct hostif_ps_adhoc_set_request_t {
+ struct hostif_hdr header;
+ uint16_t phy_type;
+#define D_11B_ONLY_MODE 0
+#define D_11G_ONLY_MODE 1
+#define D_11BG_COMPATIBLE_MODE 2
+#define D_11A_ONLY_MODE 3
+ uint16_t cts_mode;
+#define CTS_MODE_FALSE 0
+#define CTS_MODE_TRUE 1
+ uint16_t channel;
+ struct rate_set16_t rate_set;
+ uint16_t capability; /* bit5:preamble bit6:pbcc pbcc not supported always 0
+ * bit10:ShortSlotTime bit13:DSSS-OFDM DSSS-OFDM not supported always 0 */
+ uint16_t scan_type;
+} __attribute__ ((packed));
+
+struct hostif_ps_adhoc_set_confirm_t {
+ struct hostif_hdr header;
+ uint16_t result_code;
+} __attribute__ ((packed));
+
+struct hostif_infrastructure_set_request_t {
+ struct hostif_hdr header;
+ uint16_t phy_type;
+ uint16_t cts_mode;
+ struct rate_set16_t rate_set;
+ struct ssid_t ssid;
+ uint16_t capability; /* bit5:preamble bit6:pbcc pbcc not supported always 0
+ * bit10:ShortSlotTime bit13:DSSS-OFDM DSSS-OFDM not supported always 0 */
+ uint16_t beacon_lost_count;
+ uint16_t auth_type;
+#define AUTH_TYPE_OPEN_SYSTEM 0
+#define AUTH_TYPE_SHARED_KEY 1
+ struct channel_list_t channel_list;
+ uint16_t scan_type;
+} __attribute__ ((packed));
+
+struct hostif_infrastructure_set2_request_t {
+ struct hostif_hdr header;
+ uint16_t phy_type;
+ uint16_t cts_mode;
+ struct rate_set16_t rate_set;
+ struct ssid_t ssid;
+ uint16_t capability; /* bit5:preamble bit6:pbcc pbcc not supported always 0
+ * bit10:ShortSlotTime bit13:DSSS-OFDM DSSS-OFDM not supported always 0 */
+ uint16_t beacon_lost_count;
+ uint16_t auth_type;
+#define AUTH_TYPE_OPEN_SYSTEM 0
+#define AUTH_TYPE_SHARED_KEY 1
+ struct channel_list_t channel_list;
+ uint16_t scan_type;
+ uint8_t bssid[ETH_ALEN];
+} __attribute__ ((packed));
+
+struct hostif_infrastructure_set_confirm_t {
+ struct hostif_hdr header;
+ uint16_t result_code;
+} __attribute__ ((packed));
+
+struct hostif_adhoc_set_request_t {
+ struct hostif_hdr header;
+ uint16_t phy_type;
+ uint16_t cts_mode;
+ uint16_t channel;
+ struct rate_set16_t rate_set;
+ struct ssid_t ssid;
+ uint16_t capability; /* bit5:preamble bit6:pbcc pbcc not supported always 0
+ * bit10:ShortSlotTime bit13:DSSS-OFDM DSSS-OFDM not supported always 0 */
+ uint16_t scan_type;
+} __attribute__ ((packed));
+
+struct hostif_adhoc_set2_request_t {
+ struct hostif_hdr header;
+ uint16_t phy_type;
+ uint16_t cts_mode;
+ uint16_t reserved;
+ struct rate_set16_t rate_set;
+ struct ssid_t ssid;
+ uint16_t capability; /* bit5:preamble bit6:pbcc pbcc not supported always 0
+ * bit10:ShortSlotTime bit13:DSSS-OFDM DSSS-OFDM not supported always 0 */
+ uint16_t scan_type;
+ struct channel_list_t channel_list;
+ uint8_t bssid[ETH_ALEN];
+} __attribute__ ((packed));
+
+struct hostif_adhoc_set_confirm_t {
+ struct hostif_hdr header;
+ uint16_t result_code;
+} __attribute__ ((packed));
+
+struct last_associate_t {
+ uint8_t type;
+ uint8_t status;
+} __attribute__ ((packed));
+
+struct association_request_t {
+ uint8_t type;
+#define FRAME_TYPE_ASSOC_REQ 0x00
+#define FRAME_TYPE_REASSOC_REQ 0x20
+ uint8_t pad;
+ uint16_t capability;
+ uint16_t listen_interval;
+ uint8_t ap_address[6];
+ uint16_t reqIEs_size;
+} __attribute__ ((packed));
+
+struct association_response_t {
+ uint8_t type;
+#define FRAME_TYPE_ASSOC_RESP 0x10
+#define FRAME_TYPE_REASSOC_RESP 0x30
+ uint8_t pad;
+ uint16_t capability;
+ uint16_t status;
+ uint16_t association_id;
+ uint16_t respIEs_size;
+} __attribute__ ((packed));
+
+struct hostif_associate_indication_t {
+ struct hostif_hdr header;
+ struct association_request_t assoc_req;
+ struct association_response_t assoc_resp;
+ /* followed by (reqIEs_size + respIEs_size) octets of data */
+ /* reqIEs data *//* respIEs data */
+} __attribute__ ((packed));
+
+struct hostif_bss_scan_request_t {
+ struct hostif_hdr header;
+ uint8_t scan_type;
+#define ACTIVE_SCAN 0
+#define PASSIVE_SCAN 1
+ uint8_t pad[3];
+ uint32_t ch_time_min;
+ uint32_t ch_time_max;
+ struct channel_list_t channel_list;
+ struct ssid_t ssid;
+} __attribute__ ((packed));
+
+struct hostif_bss_scan_confirm_t {
+ struct hostif_hdr header;
+ uint16_t result_code;
+ uint16_t reserved;
+} __attribute__ ((packed));
+
+struct hostif_phy_information_request_t {
+ struct hostif_hdr header;
+ uint16_t type;
+#define NORMAL_TYPE 0
+#define TIME_TYPE 1
+ uint16_t time; /* unit 100ms */
+} __attribute__ ((packed));
+
+struct hostif_phy_information_confirm_t {
+ struct hostif_hdr header;
+ uint8_t rssi;
+ uint8_t sq;
+ uint8_t noise;
+ uint8_t link_speed;
+ uint32_t tx_frame;
+ uint32_t rx_frame;
+ uint32_t tx_error;
+ uint32_t rx_error;
+} __attribute__ ((packed));
+
+/* sleep mode */
+#define SLP_ACTIVE 0
+#define SLP_SLEEP 1
+struct hostif_sleep_request_t {
+ struct hostif_hdr header;
+} __attribute__ ((packed));
+
+struct hostif_sleep_confirm_t {
+ struct hostif_hdr header;
+ uint16_t result_code;
+} __attribute__ ((packed));
+
+struct hostif_mic_failure_request_t {
+ struct hostif_hdr header;
+ uint16_t failure_count;
+ uint16_t timer;
+} __attribute__ ((packed));
+
+struct hostif_mic_failure_confirm_t {
+ struct hostif_hdr header;
+ uint16_t result_code;
+} __attribute__ ((packed));
+
+#define BASIC_RATE 0x80
+#define RATE_MASK 0x7F
+
+#define TX_RATE_AUTO 0xff
+#define TX_RATE_1M_FIXED 0
+#define TX_RATE_2M_FIXED 1
+#define TX_RATE_1_2M_AUTO 2
+#define TX_RATE_5M_FIXED 3
+#define TX_RATE_11M_FIXED 4
+
+#define TX_RATE_FULL_AUTO 0
+#define TX_RATE_11_AUTO 1
+#define TX_RATE_11B_AUTO 2
+#define TX_RATE_11BG_AUTO 3
+#define TX_RATE_MANUAL_AUTO 4
+#define TX_RATE_FIXED 5
+
+/* 11b rate */
+#define TX_RATE_1M (uint8_t)(10/5) /* 11b 11g basic rate */
+#define TX_RATE_2M (uint8_t)(20/5) /* 11b 11g basic rate */
+#define TX_RATE_5M (uint8_t)(55/5) /* 11g basic rate */
+#define TX_RATE_11M (uint8_t)(110/5) /* 11g basic rate */
+
+/* 11g rate */
+#define TX_RATE_6M (uint8_t)(60/5) /* 11g basic rate */
+#define TX_RATE_12M (uint8_t)(120/5) /* 11g basic rate */
+#define TX_RATE_24M (uint8_t)(240/5) /* 11g basic rate */
+#define TX_RATE_9M (uint8_t)(90/5)
+#define TX_RATE_18M (uint8_t)(180/5)
+#define TX_RATE_36M (uint8_t)(360/5)
+#define TX_RATE_48M (uint8_t)(480/5)
+#define TX_RATE_54M (uint8_t)(540/5)
+
+#define IS_11B_RATE(A) (((A&RATE_MASK)==TX_RATE_1M)||((A&RATE_MASK)==TX_RATE_2M)||\
+ ((A&RATE_MASK)==TX_RATE_5M)||((A&RATE_MASK)==TX_RATE_11M))
+
+#define IS_OFDM_RATE(A) (((A&RATE_MASK)==TX_RATE_6M)||((A&RATE_MASK)==TX_RATE_12M)||\
+ ((A&RATE_MASK)==TX_RATE_24M)||((A&RATE_MASK)==TX_RATE_9M)||\
+ ((A&RATE_MASK)==TX_RATE_18M)||((A&RATE_MASK)==TX_RATE_36M)||\
+ ((A&RATE_MASK)==TX_RATE_48M)||((A&RATE_MASK)==TX_RATE_54M))
+
+#define IS_11BG_RATE(A) (IS_11B_RATE(A)||IS_OFDM_RATE(A))
+
+#define IS_OFDM_EXT_RATE(A) (((A&RATE_MASK)==TX_RATE_9M)||((A&RATE_MASK)==TX_RATE_18M)||\
+ ((A&RATE_MASK)==TX_RATE_36M)||((A&RATE_MASK)==TX_RATE_48M)||\
+ ((A&RATE_MASK)==TX_RATE_54M))
+
+enum {
+ CONNECT_STATUS = 0,
+ DISCONNECT_STATUS
+};
+
+/* preamble type */
+enum {
+ LONG_PREAMBLE = 0,
+ SHORT_PREAMBLE
+};
+
+/* multicast filter */
+#define MCAST_FILTER_MCAST 0
+#define MCAST_FILTER_MCASTALL 1
+#define MCAST_FILTER_PROMISC 2
+
+#define NIC_MAX_MCAST_LIST 32
+
+/* macro function */
+#define HIF_EVENT_MASK 0xE800
+#define IS_HIF_IND(_EVENT) ((_EVENT&HIF_EVENT_MASK)==0xE800 && \
+ ((_EVENT&~HIF_EVENT_MASK)==0x0001 || \
+ (_EVENT&~HIF_EVENT_MASK)==0x0006 || \
+ (_EVENT&~HIF_EVENT_MASK)==0x000C || \
+ (_EVENT&~HIF_EVENT_MASK)==0x0011 || \
+ (_EVENT&~HIF_EVENT_MASK)==0x0012))
+
+#define IS_HIF_CONF(_EVENT) ((_EVENT&HIF_EVENT_MASK)==0xE800 && \
+ (_EVENT&~HIF_EVENT_MASK)>0x0000 && \
+ (_EVENT&~HIF_EVENT_MASK)<0x0012 && \
+ !IS_HIF_IND(_EVENT) )
+
+#ifdef __KERNEL__
+
+#include "ks_wlan.h"
+
+/* function prototype */
+extern int hostif_data_request(struct ks_wlan_private *priv,
+ struct sk_buff *packet);
+extern void hostif_receive(struct ks_wlan_private *priv, unsigned char *p,
+ unsigned int size);
+extern void hostif_sme_enqueue(struct ks_wlan_private *priv, uint16_t event);
+extern int hostif_init(struct ks_wlan_private *priv);
+extern void hostif_exit(struct ks_wlan_private *priv);
+
+static
+inline int hif_align_size(int size)
+{
+#ifdef KS_ATOM
+ if (size < 1024)
+ size = 1024;
+#endif
+#ifdef DEVICE_ALIGNMENT
+ return (size % DEVICE_ALIGNMENT) ? size + DEVICE_ALIGNMENT -
+ (size % DEVICE_ALIGNMENT) : size;
+#else
+ return size;
+#endif
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* _KS_HOSTIF_H_ */
diff --git a/drivers/staging/ks7010/ks_wlan.h b/drivers/staging/ks7010/ks_wlan.h
new file mode 100644
index 000000000..f05dc0122
--- /dev/null
+++ b/drivers/staging/ks7010/ks_wlan.h
@@ -0,0 +1,505 @@
+/*
+ * Driver for KeyStream IEEE802.11 b/g wireless LAN cards.
+ *
+ * Copyright (C) 2006-2008 KeyStream Corp.
+ * Copyright (C) 2009 Renesas Technology Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _KS_WLAN_H
+#define _KS_WLAN_H
+
+#define WPS
+
+#include <linux/version.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <linux/spinlock.h> /* spinlock_t */
+#include <linux/sched.h> /* wait_queue_head_t */
+#include <linux/types.h> /* pid_t */
+#include <linux/netdevice.h> /* struct net_device_stats, struct sk_buff */
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+#include <asm/atomic.h> /* struct atmic_t */
+#include <linux/timer.h> /* struct timer_list */
+#include <linux/string.h>
+#include <linux/completion.h> /* struct completion */
+#include <linux/workqueue.h>
+
+#include <asm/io.h>
+
+#include "ks7010_sdio.h"
+
+#ifdef KS_WLAN_DEBUG
+#define DPRINTK(n, fmt, args...) \
+ if (KS_WLAN_DEBUG>(n)) printk(KERN_NOTICE "%s: "fmt, __FUNCTION__, ## args)
+#else
+#define DPRINTK(n, fmt, args...)
+#endif
+
+struct ks_wlan_parameter {
+ uint8_t operation_mode; /* Operation Mode */
+ uint8_t channel; /* Channel */
+ uint8_t tx_rate; /* Transmit Rate */
+ struct {
+ uint8_t size;
+ uint8_t body[16];
+ } rate_set;
+ uint8_t bssid[ETH_ALEN]; /* BSSID */
+ struct {
+ uint8_t size;
+ uint8_t body[32 + 1];
+ } ssid; /* SSID */
+ uint8_t preamble; /* Preamble */
+ uint8_t powermgt; /* PowerManagementMode */
+ uint32_t scan_type; /* AP List Scan Type */
+#define BEACON_LOST_COUNT_MIN 0
+#define BEACON_LOST_COUNT_MAX 65535
+ uint32_t beacon_lost_count; /* Beacon Lost Count */
+ uint32_t rts; /* RTS Threashold */
+ uint32_t fragment; /* Fragmentation Threashold */
+ uint32_t privacy_invoked;
+ uint32_t wep_index;
+ struct {
+ uint8_t size;
+ uint8_t val[13 * 2 + 1];
+ } wep_key[4];
+ uint16_t authenticate_type;
+ uint16_t phy_type; /* 11b/11g/11bg mode type */
+ uint16_t cts_mode; /* for 11g/11bg mode cts mode */
+ uint16_t phy_info_timer; /* phy information timer */
+};
+
+enum {
+ DEVICE_STATE_OFF = 0, /* this means hw_unavailable is != 0 */
+ DEVICE_STATE_PREBOOT, /* we are in a pre-boot state (empty RAM) */
+ DEVICE_STATE_BOOT, /* boot state (fw upload, run fw) */
+ DEVICE_STATE_PREINIT, /* pre-init state */
+ DEVICE_STATE_INIT, /* init state (restore MIB backup to device) */
+ DEVICE_STATE_READY, /* driver&device are in operational state */
+ DEVICE_STATE_SLEEP /* device in sleep mode */
+};
+
+/* SME flag */
+#define SME_MODE_SET (1<<0)
+#define SME_RTS (1<<1)
+#define SME_FRAG (1<<2)
+#define SME_WEP_FLAG (1<<3)
+#define SME_WEP_INDEX (1<<4)
+#define SME_WEP_VAL1 (1<<5)
+#define SME_WEP_VAL2 (1<<6)
+#define SME_WEP_VAL3 (1<<7)
+#define SME_WEP_VAL4 (1<<8)
+#define SME_WEP_VAL_MASK (SME_WEP_VAL1|SME_WEP_VAL2|SME_WEP_VAL3|SME_WEP_VAL4)
+#define SME_RSN (1<<9)
+#define SME_RSN_MULTICAST (1<<10)
+#define SME_RSN_UNICAST (1<<11)
+#define SME_RSN_AUTH (1<<12)
+
+#define SME_AP_SCAN (1<<13)
+#define SME_MULTICAST (1<<14)
+
+/* SME Event */
+enum {
+ SME_START,
+
+ SME_MULTICAST_REQUEST,
+ SME_MACADDRESS_SET_REQUEST,
+ SME_BSS_SCAN_REQUEST,
+ SME_SET_FLAG,
+ SME_SET_TXKEY,
+ SME_SET_KEY1,
+ SME_SET_KEY2,
+ SME_SET_KEY3,
+ SME_SET_KEY4,
+ SME_SET_PMK_TSC,
+ SME_SET_GMK1_TSC,
+ SME_SET_GMK2_TSC,
+ SME_SET_GMK3_TSC,
+ SME_SET_PMKSA,
+ SME_POW_MNGMT_REQUEST,
+ SME_PHY_INFO_REQUEST,
+ SME_MIC_FAILURE_REQUEST,
+ SME_GET_MAC_ADDRESS,
+ SME_GET_PRODUCT_VERSION,
+ SME_STOP_REQUEST,
+ SME_RTS_THRESHOLD_REQUEST,
+ SME_FRAGMENTATION_THRESHOLD_REQUEST,
+ SME_WEP_INDEX_REQUEST,
+ SME_WEP_KEY1_REQUEST,
+ SME_WEP_KEY2_REQUEST,
+ SME_WEP_KEY3_REQUEST,
+ SME_WEP_KEY4_REQUEST,
+ SME_WEP_FLAG_REQUEST,
+ SME_RSN_UCAST_REQUEST,
+ SME_RSN_MCAST_REQUEST,
+ SME_RSN_AUTH_REQUEST,
+ SME_RSN_ENABLED_REQUEST,
+ SME_RSN_MODE_REQUEST,
+#ifdef WPS
+ SME_WPS_ENABLE_REQUEST,
+ SME_WPS_PROBE_REQUEST,
+#endif
+ SME_SET_GAIN,
+ SME_GET_GAIN,
+ SME_SLEEP_REQUEST,
+ SME_SET_REGION,
+ SME_MODE_SET_REQUEST,
+ SME_START_REQUEST,
+ SME_GET_EEPROM_CKSUM,
+
+ SME_MIC_FAILURE_CONFIRM,
+ SME_START_CONFIRM,
+
+ SME_MULTICAST_CONFIRM,
+ SME_BSS_SCAN_CONFIRM,
+ SME_GET_CURRENT_AP,
+ SME_POW_MNGMT_CONFIRM,
+ SME_PHY_INFO_CONFIRM,
+ SME_STOP_CONFIRM,
+ SME_RTS_THRESHOLD_CONFIRM,
+ SME_FRAGMENTATION_THRESHOLD_CONFIRM,
+ SME_WEP_INDEX_CONFIRM,
+ SME_WEP_KEY1_CONFIRM,
+ SME_WEP_KEY2_CONFIRM,
+ SME_WEP_KEY3_CONFIRM,
+ SME_WEP_KEY4_CONFIRM,
+ SME_WEP_FLAG_CONFIRM,
+ SME_RSN_UCAST_CONFIRM,
+ SME_RSN_MCAST_CONFIRM,
+ SME_RSN_AUTH_CONFIRM,
+ SME_RSN_ENABLED_CONFIRM,
+ SME_RSN_MODE_CONFIRM,
+ SME_MODE_SET_CONFIRM,
+ SME_SLEEP_CONFIRM,
+
+ SME_RSN_SET_CONFIRM,
+ SME_WEP_SET_CONFIRM,
+ SME_TERMINATE,
+
+ SME_EVENT_SIZE /* end */
+};
+
+/* SME Status */
+enum {
+ SME_IDLE,
+ SME_SETUP,
+ SME_DISCONNECT,
+ SME_CONNECT
+};
+
+#define SME_EVENT_BUFF_SIZE 128
+
+struct sme_info {
+ int sme_status;
+ int event_buff[SME_EVENT_BUFF_SIZE];
+ unsigned int qhead;
+ unsigned int qtail;
+#ifdef KS_WLAN_DEBUG
+ /* for debug */
+ unsigned int max_event_count;
+#endif
+ spinlock_t sme_spin;
+ unsigned long sme_flag;
+};
+
+struct hostt_t {
+ int buff[SME_EVENT_BUFF_SIZE];
+ unsigned int qhead;
+ unsigned int qtail;
+};
+
+#define RSN_IE_BODY_MAX 64
+struct rsn_ie_t {
+ uint8_t id; /* 0xdd = WPA or 0x30 = RSN */
+ uint8_t size; /* max ? 255 ? */
+ uint8_t body[RSN_IE_BODY_MAX];
+} __attribute__ ((packed));
+
+#ifdef WPS
+#define WPS_IE_BODY_MAX 255
+struct wps_ie_t {
+ uint8_t id; /* 221 'dd <len> 00 50 F2 04' */
+ uint8_t size; /* max ? 255 ? */
+ uint8_t body[WPS_IE_BODY_MAX];
+} __attribute__ ((packed));
+#endif /* WPS */
+
+struct local_ap_t {
+ uint8_t bssid[6];
+ uint8_t rssi;
+ uint8_t sq;
+ struct {
+ uint8_t size;
+ uint8_t body[32];
+ uint8_t ssid_pad;
+ } ssid;
+ struct {
+ uint8_t size;
+ uint8_t body[16];
+ uint8_t rate_pad;
+ } rate_set;
+ uint16_t capability;
+ uint8_t channel;
+ uint8_t noise;
+ struct rsn_ie_t wpa_ie;
+ struct rsn_ie_t rsn_ie;
+#ifdef WPS
+ struct wps_ie_t wps_ie;
+#endif /* WPS */
+};
+
+#define LOCAL_APLIST_MAX 31
+#define LOCAL_CURRENT_AP LOCAL_APLIST_MAX
+struct local_aplist_t {
+ int size;
+ struct local_ap_t ap[LOCAL_APLIST_MAX + 1];
+};
+
+struct local_gain_t {
+ uint8_t TxMode;
+ uint8_t RxMode;
+ uint8_t TxGain;
+ uint8_t RxGain;
+};
+
+struct local_eeprom_sum_t {
+ uint8_t type;
+ uint8_t result;
+};
+
+enum {
+ EEPROM_OK,
+ EEPROM_CHECKSUM_NONE,
+ EEPROM_FW_NOT_SUPPORT,
+ EEPROM_NG,
+};
+
+/* Power Save Status */
+enum {
+ PS_NONE,
+ PS_ACTIVE_SET,
+ PS_SAVE_SET,
+ PS_CONF_WAIT,
+ PS_SNOOZE,
+ PS_WAKEUP
+};
+
+struct power_save_status_t {
+ atomic_t status; /* initialvalue 0 */
+ struct completion wakeup_wait;
+ atomic_t confirm_wait;
+ atomic_t snooze_guard;
+};
+
+struct sleep_status_t {
+ atomic_t status; /* initialvalue 0 */
+ atomic_t doze_request;
+ atomic_t wakeup_request;
+};
+
+/* WPA */
+struct scan_ext_t {
+ unsigned int flag;
+ char ssid[IW_ESSID_MAX_SIZE + 1];
+};
+
+enum {
+ CIPHER_NONE,
+ CIPHER_WEP40,
+ CIPHER_TKIP,
+ CIPHER_CCMP,
+ CIPHER_WEP104
+};
+
+#define CIPHER_ID_WPA_NONE "\x00\x50\xf2\x00"
+#define CIPHER_ID_WPA_WEP40 "\x00\x50\xf2\x01"
+#define CIPHER_ID_WPA_TKIP "\x00\x50\xf2\x02"
+#define CIPHER_ID_WPA_CCMP "\x00\x50\xf2\x04"
+#define CIPHER_ID_WPA_WEP104 "\x00\x50\xf2\x05"
+
+#define CIPHER_ID_WPA2_NONE "\x00\x0f\xac\x00"
+#define CIPHER_ID_WPA2_WEP40 "\x00\x0f\xac\x01"
+#define CIPHER_ID_WPA2_TKIP "\x00\x0f\xac\x02"
+#define CIPHER_ID_WPA2_CCMP "\x00\x0f\xac\x04"
+#define CIPHER_ID_WPA2_WEP104 "\x00\x0f\xac\x05"
+
+#define CIPHER_ID_LEN 4
+
+enum {
+ KEY_MGMT_802_1X,
+ KEY_MGMT_PSK,
+ KEY_MGMT_WPANONE,
+};
+
+#define KEY_MGMT_ID_WPA_NONE "\x00\x50\xf2\x00"
+#define KEY_MGMT_ID_WPA_1X "\x00\x50\xf2\x01"
+#define KEY_MGMT_ID_WPA_PSK "\x00\x50\xf2\x02"
+#define KEY_MGMT_ID_WPA_WPANONE "\x00\x50\xf2\xff"
+
+#define KEY_MGMT_ID_WPA2_NONE "\x00\x0f\xac\x00"
+#define KEY_MGMT_ID_WPA2_1X "\x00\x0f\xac\x01"
+#define KEY_MGMT_ID_WPA2_PSK "\x00\x0f\xac\x02"
+#define KEY_MGMT_ID_WPA2_WPANONE "\x00\x0f\xac\xff"
+
+#define KEY_MGMT_ID_LEN 4
+
+#define MIC_KEY_SIZE 8
+
+struct wpa_key_t {
+ uint32_t ext_flags; /* IW_ENCODE_EXT_xxx */
+ uint8_t tx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */
+ uint8_t rx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */
+ struct sockaddr addr; /* ff:ff:ff:ff:ff:ff for broadcast/multicast
+ * (group) keys or unicast address for
+ * individual keys */
+ uint16_t alg;
+ uint16_t key_len; /* WEP: 5 or 13, TKIP: 32, CCMP: 16 */
+ uint8_t key_val[IW_ENCODING_TOKEN_MAX];
+ uint8_t tx_mic_key[MIC_KEY_SIZE];
+ uint8_t rx_mic_key[MIC_KEY_SIZE];
+};
+#define WPA_KEY_INDEX_MAX 4
+#define WPA_RX_SEQ_LEN 6
+
+struct mic_failure_t {
+ uint16_t failure; /* MIC Failure counter 0 or 1 or 2 */
+ uint16_t counter; /* 1sec counter 0-60 */
+ uint32_t last_failure_time;
+ int stop; /* stop flag */
+};
+
+struct wpa_status_t {
+ int wpa_enabled;
+ unsigned int rsn_enabled;
+ int version;
+ int pairwise_suite; /* unicast cipher */
+ int group_suite; /* multicast cipher */
+ int key_mgmt_suite; /* authentication key management suite */
+ int auth_alg;
+ int txkey;
+ struct wpa_key_t key[WPA_KEY_INDEX_MAX];
+ struct scan_ext_t scan_ext;
+ struct mic_failure_t mic_failure;
+};
+
+#include <linux/list.h>
+#define PMK_LIST_MAX 8
+struct pmk_list_t {
+ uint16_t size;
+ struct list_head head;
+ struct pmk_t {
+ struct list_head list;
+ uint8_t bssid[ETH_ALEN];
+ uint8_t pmkid[IW_PMKID_LEN];
+ } pmk[PMK_LIST_MAX];
+};
+
+#ifdef WPS
+struct wps_status_t {
+ int wps_enabled;
+ int ielen;
+ uint8_t ie[255];
+};
+#endif /* WPS */
+
+struct ks_wlan_private {
+
+ struct hw_info_t ks_wlan_hw; /* hardware information */
+
+ struct net_device *net_dev;
+ int reg_net; /* register_netdev */
+ struct net_device_stats nstats;
+ struct iw_statistics wstats;
+
+ struct completion confirm_wait;
+
+ /* trx device & sme */
+ struct tx_device tx_dev;
+ struct rx_device rx_dev;
+ struct sme_info sme_i;
+ u8 *rxp;
+ unsigned int rx_size;
+ struct tasklet_struct sme_task;
+ struct work_struct ks_wlan_wakeup_task;
+ int scan_ind_count;
+
+ unsigned char eth_addr[ETH_ALEN];
+
+ struct local_aplist_t aplist;
+ struct local_ap_t current_ap;
+ struct power_save_status_t psstatus;
+ struct sleep_status_t sleepstatus;
+ struct wpa_status_t wpa;
+ struct pmk_list_t pmklist;
+ /* wireless parameter */
+ struct ks_wlan_parameter reg;
+ uint8_t current_rate;
+
+ char nick[IW_ESSID_MAX_SIZE + 1];
+
+ spinlock_t multicast_spin;
+
+ spinlock_t dev_read_lock;
+ wait_queue_head_t devread_wait;
+
+ unsigned int need_commit; /* for ioctl */
+
+ /* DeviceIoControl */
+ int device_open_status;
+ atomic_t event_count;
+ atomic_t rec_count;
+ int dev_count;
+#define DEVICE_STOCK_COUNT 20
+ unsigned char *dev_data[DEVICE_STOCK_COUNT];
+ int dev_size[DEVICE_STOCK_COUNT];
+
+ /* ioctl : IOCTL_FIRMWARE_VERSION */
+ unsigned char firmware_version[128 + 1];
+ int version_size;
+
+ int mac_address_valid; /* Mac Address Status */
+
+ int dev_state;
+
+ struct sk_buff *skb;
+ unsigned int cur_rx; /* Index into the Rx buffer of next Rx pkt. */
+ /* spinlock_t lock; */
+#define FORCE_DISCONNECT 0x80000000
+#define CONNECT_STATUS_MASK 0x7FFFFFFF
+ uint32_t connect_status; /* connect status */
+ int infra_status; /* Infractructure status */
+
+ uint8_t data_buff[0x1000];
+
+ uint8_t scan_ssid_len;
+ uint8_t scan_ssid[IW_ESSID_MAX_SIZE + 1];
+ struct local_gain_t gain;
+#ifdef WPS
+ struct net_device *l2_dev;
+ int l2_fd;
+ struct wps_status_t wps;
+#endif /* WPS */
+ uint8_t sleep_mode;
+
+ uint8_t region;
+ struct local_eeprom_sum_t eeprom_sum;
+ uint8_t eeprom_checksum;
+
+ struct hostt_t hostt;
+
+ unsigned long last_doze;
+ unsigned long last_wakeup;
+
+ uint wakeup_count; /* for detect wakeup loop */
+};
+
+extern int ks_wlan_net_start(struct net_device *dev);
+extern int ks_wlan_net_stop(struct net_device *dev);
+
+#endif /* _KS_WLAN_H */
diff --git a/drivers/staging/ks7010/ks_wlan_ioctl.h b/drivers/staging/ks7010/ks_wlan_ioctl.h
new file mode 100644
index 000000000..49369e497
--- /dev/null
+++ b/drivers/staging/ks7010/ks_wlan_ioctl.h
@@ -0,0 +1,67 @@
+/*
+ * Driver for KeyStream 11b/g wireless LAN
+ *
+ * Copyright (c) 2005-2008 KeyStream Corp.
+ * Copyright (C) 2009 Renesas Technology Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _KS_WLAN_IOCTL_H
+#define _KS_WLAN_IOCTL_H
+
+#include <linux/wireless.h>
+/* The low order bit identify a SET (0) or a GET (1) ioctl. */
+
+/* SIOCIWFIRSTPRIV+0 */
+/* former KS_WLAN_GET_DRIVER_VERSION SIOCIWFIRSTPRIV+1 */
+/* SIOCIWFIRSTPRIV+2 */
+#define KS_WLAN_GET_FIRM_VERSION SIOCIWFIRSTPRIV+3
+#ifdef WPS
+#define KS_WLAN_SET_WPS_ENABLE SIOCIWFIRSTPRIV+4
+#define KS_WLAN_GET_WPS_ENABLE SIOCIWFIRSTPRIV+5
+#define KS_WLAN_SET_WPS_PROBE_REQ SIOCIWFIRSTPRIV+6
+#endif
+#define KS_WLAN_GET_EEPROM_CKSUM SIOCIWFIRSTPRIV+7
+#define KS_WLAN_SET_PREAMBLE SIOCIWFIRSTPRIV+8
+#define KS_WLAN_GET_PREAMBLE SIOCIWFIRSTPRIV+9
+#define KS_WLAN_SET_POWER_SAVE SIOCIWFIRSTPRIV+10
+#define KS_WLAN_GET_POWER_SAVE SIOCIWFIRSTPRIV+11
+#define KS_WLAN_SET_SCAN_TYPE SIOCIWFIRSTPRIV+12
+#define KS_WLAN_GET_SCAN_TYPE SIOCIWFIRSTPRIV+13
+#define KS_WLAN_SET_RX_GAIN SIOCIWFIRSTPRIV+14
+#define KS_WLAN_GET_RX_GAIN SIOCIWFIRSTPRIV+15
+#define KS_WLAN_HOSTT SIOCIWFIRSTPRIV+16 /* unused */
+//#define KS_WLAN_SET_REGION SIOCIWFIRSTPRIV+17
+#define KS_WLAN_SET_BEACON_LOST SIOCIWFIRSTPRIV+18
+#define KS_WLAN_GET_BEACON_LOST SIOCIWFIRSTPRIV+19
+
+#define KS_WLAN_SET_TX_GAIN SIOCIWFIRSTPRIV+20
+#define KS_WLAN_GET_TX_GAIN SIOCIWFIRSTPRIV+21
+
+/* for KS7010 */
+#define KS_WLAN_SET_PHY_TYPE SIOCIWFIRSTPRIV+22
+#define KS_WLAN_GET_PHY_TYPE SIOCIWFIRSTPRIV+23
+#define KS_WLAN_SET_CTS_MODE SIOCIWFIRSTPRIV+24
+#define KS_WLAN_GET_CTS_MODE SIOCIWFIRSTPRIV+25
+/* SIOCIWFIRSTPRIV+26 */
+/* SIOCIWFIRSTPRIV+27 */
+#define KS_WLAN_SET_SLEEP_MODE SIOCIWFIRSTPRIV+28 /* sleep mode */
+#define KS_WLAN_GET_SLEEP_MODE SIOCIWFIRSTPRIV+29 /* sleep mode */
+/* SIOCIWFIRSTPRIV+30 */
+/* SIOCIWFIRSTPRIV+31 */
+
+#ifdef __KERNEL__
+
+#include "ks_wlan.h"
+#include <linux/netdevice.h>
+
+extern int ks_wlan_read_config_file(struct ks_wlan_private *priv);
+extern int ks_wlan_setup_parameter(struct ks_wlan_private *priv,
+ unsigned int commit_flag);
+
+#endif /* __KERNEL__ */
+
+#endif /* _KS_WLAN_IOCTL_H */
diff --git a/drivers/staging/ks7010/ks_wlan_net.c b/drivers/staging/ks7010/ks_wlan_net.c
new file mode 100644
index 000000000..1e21eb1c4
--- /dev/null
+++ b/drivers/staging/ks7010/ks_wlan_net.c
@@ -0,0 +1,3528 @@
+/*
+ * Driver for KeyStream 11b/g wireless LAN
+ *
+ * Copyright (C) 2005-2008 KeyStream Corp.
+ * Copyright (C) 2009 Renesas Technology Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+#include <linux/rtnetlink.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/mii.h>
+#include <linux/pci.h>
+#include <linux/ctype.h>
+#include <linux/timer.h>
+#include <asm/atomic.h>
+#include <linux/io.h>
+#include <asm/uaccess.h>
+
+static int wep_on_off;
+#define WEP_OFF 0
+#define WEP_ON_64BIT 1
+#define WEP_ON_128BIT 2
+
+#include "ks_wlan.h"
+#include "ks_hostif.h"
+#include "ks_wlan_ioctl.h"
+
+/* Include Wireless Extension definition and check version */
+#include <linux/wireless.h>
+#define WIRELESS_SPY /* enable iwspy support */
+#include <net/iw_handler.h> /* New driver API */
+
+/* Frequency list (map channels to frequencies) */
+static const long frequency_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442,
+ 2447, 2452, 2457, 2462, 2467, 2472, 2484
+};
+
+/* A few details needed for WEP (Wireless Equivalent Privacy) */
+#define MAX_KEY_SIZE 13 /* 128 (?) bits */
+#define MIN_KEY_SIZE 5 /* 40 bits RC4 - WEP */
+typedef struct wep_key_t {
+ u16 len;
+ u8 key[16]; /* 40-bit and 104-bit keys */
+} wep_key_t;
+
+/* Backward compatibility */
+#ifndef IW_ENCODE_NOKEY
+#define IW_ENCODE_NOKEY 0x0800 /* Key is write only, so not present */
+#define IW_ENCODE_MODE (IW_ENCODE_DISABLED | IW_ENCODE_RESTRICTED | IW_ENCODE_OPEN)
+#endif /* IW_ENCODE_NOKEY */
+
+/* List of Wireless Handlers (new API) */
+static const struct iw_handler_def ks_wlan_handler_def;
+
+#define KSC_OPNOTSUPP /* Operation Not Support */
+
+/*
+ * function prototypes
+ */
+extern int ks_wlan_hw_tx(struct ks_wlan_private *priv, void *p,
+ unsigned long size,
+ void (*complete_handler) (void *arg1, void *arg2),
+ void *arg1, void *arg2);
+static int ks_wlan_open(struct net_device *dev);
+static void ks_wlan_tx_timeout(struct net_device *dev);
+static int ks_wlan_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static int ks_wlan_close(struct net_device *dev);
+static void ks_wlan_set_multicast_list(struct net_device *dev);
+static struct net_device_stats *ks_wlan_get_stats(struct net_device *dev);
+static int ks_wlan_set_mac_address(struct net_device *dev, void *addr);
+static int ks_wlan_netdev_ioctl(struct net_device *dev, struct ifreq *rq,
+ int cmd);
+
+static atomic_t update_phyinfo;
+static struct timer_list update_phyinfo_timer;
+static
+int ks_wlan_update_phy_information(struct ks_wlan_private *priv)
+{
+ struct iw_statistics *wstats = &priv->wstats;
+
+ DPRINTK(4, "in_interrupt = %ld\n", in_interrupt());
+
+ if (priv->dev_state < DEVICE_STATE_READY) {
+ return -1; /* not finished initialize */
+ }
+ if (atomic_read(&update_phyinfo))
+ return 1;
+
+ /* The status */
+ wstats->status = priv->reg.operation_mode; /* Operation mode */
+
+ /* Signal quality and co. But where is the noise level ??? */
+ hostif_sme_enqueue(priv, SME_PHY_INFO_REQUEST);
+
+ /* interruptible_sleep_on_timeout(&priv->confirm_wait, HZ/2); */
+ if (!wait_for_completion_interruptible_timeout
+ (&priv->confirm_wait, HZ / 2)) {
+ DPRINTK(1, "wait time out!!\n");
+ }
+
+ atomic_inc(&update_phyinfo);
+ update_phyinfo_timer.expires = jiffies + HZ; /* 1sec */
+ add_timer(&update_phyinfo_timer);
+
+ return 0;
+}
+
+static
+void ks_wlan_update_phyinfo_timeout(unsigned long ptr)
+{
+ DPRINTK(4, "in_interrupt = %ld\n", in_interrupt());
+ atomic_set(&update_phyinfo, 0);
+}
+
+int ks_wlan_setup_parameter(struct ks_wlan_private *priv,
+ unsigned int commit_flag)
+{
+ DPRINTK(2, "\n");
+
+ hostif_sme_enqueue(priv, SME_STOP_REQUEST);
+
+ if (commit_flag & SME_RTS)
+ hostif_sme_enqueue(priv, SME_RTS_THRESHOLD_REQUEST);
+ if (commit_flag & SME_FRAG)
+ hostif_sme_enqueue(priv, SME_FRAGMENTATION_THRESHOLD_REQUEST);
+
+ if (commit_flag & SME_WEP_INDEX)
+ hostif_sme_enqueue(priv, SME_WEP_INDEX_REQUEST);
+ if (commit_flag & SME_WEP_VAL1)
+ hostif_sme_enqueue(priv, SME_WEP_KEY1_REQUEST);
+ if (commit_flag & SME_WEP_VAL2)
+ hostif_sme_enqueue(priv, SME_WEP_KEY2_REQUEST);
+ if (commit_flag & SME_WEP_VAL3)
+ hostif_sme_enqueue(priv, SME_WEP_KEY3_REQUEST);
+ if (commit_flag & SME_WEP_VAL4)
+ hostif_sme_enqueue(priv, SME_WEP_KEY4_REQUEST);
+ if (commit_flag & SME_WEP_FLAG)
+ hostif_sme_enqueue(priv, SME_WEP_FLAG_REQUEST);
+
+ if (commit_flag & SME_RSN) {
+ hostif_sme_enqueue(priv, SME_RSN_ENABLED_REQUEST);
+ hostif_sme_enqueue(priv, SME_RSN_MODE_REQUEST);
+ }
+ if (commit_flag & SME_RSN_MULTICAST)
+ hostif_sme_enqueue(priv, SME_RSN_MCAST_REQUEST);
+ if (commit_flag & SME_RSN_UNICAST)
+ hostif_sme_enqueue(priv, SME_RSN_UCAST_REQUEST);
+ if (commit_flag & SME_RSN_AUTH)
+ hostif_sme_enqueue(priv, SME_RSN_AUTH_REQUEST);
+
+ hostif_sme_enqueue(priv, SME_MODE_SET_REQUEST);
+
+ hostif_sme_enqueue(priv, SME_START_REQUEST);
+
+ return 0;
+}
+
+/*
+ * Initial Wireless Extension code for Ks_Wlannet driver by :
+ * Jean Tourrilhes <jt@hpl.hp.com> - HPL - 17 November 00
+ * Conversion to new driver API by :
+ * Jean Tourrilhes <jt@hpl.hp.com> - HPL - 26 March 02
+ * Javier also did a good amount of work here, adding some new extensions
+ * and fixing my code. Let's just say that without him this code just
+ * would not work at all... - Jean II
+ */
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get protocol name */
+static int ks_wlan_get_name(struct net_device *dev,
+ struct iw_request_info *info, char *cwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (priv->dev_state < DEVICE_STATE_READY) {
+ strcpy(cwrq, "NOT READY!");
+ } else if (priv->reg.phy_type == D_11B_ONLY_MODE) {
+ strcpy(cwrq, "IEEE 802.11b");
+ } else if (priv->reg.phy_type == D_11G_ONLY_MODE) {
+ strcpy(cwrq, "IEEE 802.11g");
+ } else {
+ strcpy(cwrq, "IEEE 802.11b/g");
+ }
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set frequency */
+static int ks_wlan_set_freq(struct net_device *dev,
+ struct iw_request_info *info, struct iw_freq *fwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ int rc = -EINPROGRESS; /* Call commit handler */
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+
+ /* for SLEEP MODE */
+ /* If setting by frequency, convert to a channel */
+ if ((fwrq->e == 1) &&
+ (fwrq->m >= (int)2.412e8) && (fwrq->m <= (int)2.487e8)) {
+ int f = fwrq->m / 100000;
+ int c = 0;
+ while ((c < 14) && (f != frequency_list[c]))
+ c++;
+ /* Hack to fall through... */
+ fwrq->e = 0;
+ fwrq->m = c + 1;
+ }
+ /* Setting by channel number */
+ if ((fwrq->m > 1000) || (fwrq->e > 0))
+ rc = -EOPNOTSUPP;
+ else {
+ int channel = fwrq->m;
+ /* We should do a better check than that,
+ * based on the card capability !!! */
+ if ((channel < 1) || (channel > 14)) {
+ printk(KERN_DEBUG
+ "%s: New channel value of %d is invalid!\n",
+ dev->name, fwrq->m);
+ rc = -EINVAL;
+ } else {
+ /* Yes ! We can set it !!! */
+ priv->reg.channel = (u8) (channel);
+ priv->need_commit |= SME_MODE_SET;
+ }
+ }
+
+ return rc;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get frequency */
+static int ks_wlan_get_freq(struct net_device *dev,
+ struct iw_request_info *info, struct iw_freq *fwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ int f;
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+ f = (int)priv->current_ap.channel;
+ } else
+ f = (int)priv->reg.channel;
+ fwrq->m = frequency_list[f - 1] * 100000;
+ fwrq->e = 1;
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set ESSID */
+static int ks_wlan_set_essid(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ size_t len;
+
+ DPRINTK(2, " %d\n", dwrq->flags);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+
+ /* for SLEEP MODE */
+ /* Check if we asked for `any' */
+ if (dwrq->flags == 0) {
+ /* Just send an empty SSID list */
+ memset(priv->reg.ssid.body, 0, sizeof(priv->reg.ssid.body));
+ priv->reg.ssid.size = 0;
+ } else {
+#if 1
+ len = dwrq->length;
+ /* iwconfig uses nul termination in SSID.. */
+ if (len > 0 && extra[len - 1] == '\0')
+ len--;
+
+ /* Check the size of the string */
+ if (len > IW_ESSID_MAX_SIZE) {
+ return -EINVAL;
+ }
+#else
+ /* Check the size of the string */
+ if (dwrq->length > IW_ESSID_MAX_SIZE + 1) {
+ return -E2BIG;
+ }
+#endif
+
+ /* Set the SSID */
+ memset(priv->reg.ssid.body, 0, sizeof(priv->reg.ssid.body));
+
+#if 1
+ memcpy(priv->reg.ssid.body, extra, len);
+ priv->reg.ssid.size = len;
+#else
+ memcpy(priv->reg.ssid.body, extra, dwrq->length);
+ priv->reg.ssid.size = dwrq->length;
+#endif
+ }
+ /* Write it to the card */
+ priv->need_commit |= SME_MODE_SET;
+
+// return -EINPROGRESS; /* Call commit handler */
+ ks_wlan_setup_parameter(priv, priv->need_commit);
+ priv->need_commit = 0;
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get ESSID */
+static int ks_wlan_get_essid(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+
+ /* for SLEEP MODE */
+ /* Note : if dwrq->flags != 0, we should
+ * get the relevant SSID from the SSID list... */
+ if (priv->reg.ssid.size) {
+ /* Get the current SSID */
+ memcpy(extra, priv->reg.ssid.body, priv->reg.ssid.size);
+#if 0
+ extra[priv->reg.ssid.size] = '\0';
+#endif
+ /* If none, we may want to get the one that was set */
+
+ /* Push it out ! */
+#if 1
+ dwrq->length = priv->reg.ssid.size;
+#else
+ dwrq->length = priv->reg.ssid.size + 1;
+#endif
+ dwrq->flags = 1; /* active */
+ } else {
+#if 1
+ dwrq->length = 0;
+#else
+ extra[0] = '\0';
+ dwrq->length = 1;
+#endif
+ dwrq->flags = 0; /* ANY */
+ }
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set AP address */
+static int ks_wlan_set_wap(struct net_device *dev, struct iw_request_info *info,
+ struct sockaddr *ap_addr, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ DPRINTK(2, "\n");
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (priv->reg.operation_mode == MODE_ADHOC ||
+ priv->reg.operation_mode == MODE_INFRASTRUCTURE) {
+ memcpy(priv->reg.bssid, (u8 *) & ap_addr->sa_data, ETH_ALEN);
+
+ if (is_valid_ether_addr((u8 *) priv->reg.bssid)) {
+ priv->need_commit |= SME_MODE_SET;
+ }
+ } else {
+ memset(priv->reg.bssid, 0x0, ETH_ALEN);
+ return -EOPNOTSUPP;
+ }
+
+ DPRINTK(2, "bssid = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ priv->reg.bssid[0], priv->reg.bssid[1], priv->reg.bssid[2],
+ priv->reg.bssid[3], priv->reg.bssid[4], priv->reg.bssid[5]);
+
+ /* Write it to the card */
+ if (priv->need_commit) {
+ priv->need_commit |= SME_MODE_SET;
+ return -EINPROGRESS; /* Call commit handler */
+ }
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get AP address */
+static int ks_wlan_get_wap(struct net_device *dev, struct iw_request_info *info,
+ struct sockaddr *awrq, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+ memcpy(awrq->sa_data, &(priv->current_ap.bssid[0]), ETH_ALEN);
+ } else {
+ memset(awrq->sa_data, 0, ETH_ALEN);
+ }
+
+ awrq->sa_family = ARPHRD_ETHER;
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set Nickname */
+static int ks_wlan_set_nick(struct net_device *dev,
+ struct iw_request_info *info, struct iw_point *dwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+
+ /* for SLEEP MODE */
+ /* Check the size of the string */
+ if (dwrq->length > 16 + 1) {
+ return -E2BIG;
+ }
+ memset(priv->nick, 0, sizeof(priv->nick));
+ memcpy(priv->nick, extra, dwrq->length);
+
+ return -EINPROGRESS; /* Call commit handler */
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get Nickname */
+static int ks_wlan_get_nick(struct net_device *dev,
+ struct iw_request_info *info, struct iw_point *dwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ strncpy(extra, priv->nick, 16);
+ extra[16] = '\0';
+ dwrq->length = strlen(extra) + 1;
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set Bit-Rate */
+static int ks_wlan_set_rate(struct net_device *dev,
+ struct iw_request_info *info, struct iw_param *vwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ int i = 0;
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (priv->reg.phy_type == D_11B_ONLY_MODE) {
+ if (vwrq->fixed == 1) {
+ switch (vwrq->value) {
+ case 11000000:
+ case 5500000:
+ priv->reg.rate_set.body[0] =
+ (uint8_t) (vwrq->value / 500000);
+ break;
+ case 2000000:
+ case 1000000:
+ priv->reg.rate_set.body[0] =
+ ((uint8_t) (vwrq->value / 500000)) |
+ BASIC_RATE;
+ break;
+ default:
+ return -EINVAL;
+ }
+ priv->reg.tx_rate = TX_RATE_FIXED;
+ priv->reg.rate_set.size = 1;
+ } else { /* vwrq->fixed == 0 */
+ if (vwrq->value > 0) {
+ switch (vwrq->value) {
+ case 11000000:
+ priv->reg.rate_set.body[3] =
+ TX_RATE_11M;
+ i++;
+ case 5500000:
+ priv->reg.rate_set.body[2] = TX_RATE_5M;
+ i++;
+ case 2000000:
+ priv->reg.rate_set.body[1] =
+ TX_RATE_2M | BASIC_RATE;
+ i++;
+ case 1000000:
+ priv->reg.rate_set.body[0] =
+ TX_RATE_1M | BASIC_RATE;
+ i++;
+ break;
+ default:
+ return -EINVAL;
+ }
+ priv->reg.tx_rate = TX_RATE_MANUAL_AUTO;
+ priv->reg.rate_set.size = i;
+ } else {
+ priv->reg.rate_set.body[3] = TX_RATE_11M;
+ priv->reg.rate_set.body[2] = TX_RATE_5M;
+ priv->reg.rate_set.body[1] =
+ TX_RATE_2M | BASIC_RATE;
+ priv->reg.rate_set.body[0] =
+ TX_RATE_1M | BASIC_RATE;
+ priv->reg.tx_rate = TX_RATE_FULL_AUTO;
+ priv->reg.rate_set.size = 4;
+ }
+ }
+ } else { /* D_11B_ONLY_MODE or D_11BG_COMPATIBLE_MODE */
+ if (vwrq->fixed == 1) {
+ switch (vwrq->value) {
+ case 54000000:
+ case 48000000:
+ case 36000000:
+ case 18000000:
+ case 9000000:
+ priv->reg.rate_set.body[0] =
+ (uint8_t) (vwrq->value / 500000);
+ break;
+ case 24000000:
+ case 12000000:
+ case 11000000:
+ case 6000000:
+ case 5500000:
+ case 2000000:
+ case 1000000:
+ priv->reg.rate_set.body[0] =
+ ((uint8_t) (vwrq->value / 500000)) |
+ BASIC_RATE;
+ break;
+ default:
+ return -EINVAL;
+ }
+ priv->reg.tx_rate = TX_RATE_FIXED;
+ priv->reg.rate_set.size = 1;
+ } else { /* vwrq->fixed == 0 */
+ if (vwrq->value > 0) {
+ switch (vwrq->value) {
+ case 54000000:
+ priv->reg.rate_set.body[11] =
+ TX_RATE_54M;
+ i++;
+ case 48000000:
+ priv->reg.rate_set.body[10] =
+ TX_RATE_48M;
+ i++;
+ case 36000000:
+ priv->reg.rate_set.body[9] =
+ TX_RATE_36M;
+ i++;
+ case 24000000:
+ case 18000000:
+ case 12000000:
+ case 11000000:
+ case 9000000:
+ case 6000000:
+ if (vwrq->value == 24000000) {
+ priv->reg.rate_set.body[8] =
+ TX_RATE_18M;
+ i++;
+ priv->reg.rate_set.body[7] =
+ TX_RATE_9M;
+ i++;
+ priv->reg.rate_set.body[6] =
+ TX_RATE_24M | BASIC_RATE;
+ i++;
+ priv->reg.rate_set.body[5] =
+ TX_RATE_12M | BASIC_RATE;
+ i++;
+ priv->reg.rate_set.body[4] =
+ TX_RATE_6M | BASIC_RATE;
+ i++;
+ priv->reg.rate_set.body[3] =
+ TX_RATE_11M | BASIC_RATE;
+ i++;
+ } else if (vwrq->value == 18000000) {
+ priv->reg.rate_set.body[7] =
+ TX_RATE_18M;
+ i++;
+ priv->reg.rate_set.body[6] =
+ TX_RATE_9M;
+ i++;
+ priv->reg.rate_set.body[5] =
+ TX_RATE_12M | BASIC_RATE;
+ i++;
+ priv->reg.rate_set.body[4] =
+ TX_RATE_6M | BASIC_RATE;
+ i++;
+ priv->reg.rate_set.body[3] =
+ TX_RATE_11M | BASIC_RATE;
+ i++;
+ } else if (vwrq->value == 12000000) {
+ priv->reg.rate_set.body[6] =
+ TX_RATE_9M;
+ i++;
+ priv->reg.rate_set.body[5] =
+ TX_RATE_12M | BASIC_RATE;
+ i++;
+ priv->reg.rate_set.body[4] =
+ TX_RATE_6M | BASIC_RATE;
+ i++;
+ priv->reg.rate_set.body[3] =
+ TX_RATE_11M | BASIC_RATE;
+ i++;
+ } else if (vwrq->value == 11000000) {
+ priv->reg.rate_set.body[5] =
+ TX_RATE_9M;
+ i++;
+ priv->reg.rate_set.body[4] =
+ TX_RATE_6M | BASIC_RATE;
+ i++;
+ priv->reg.rate_set.body[3] =
+ TX_RATE_11M | BASIC_RATE;
+ i++;
+ } else if (vwrq->value == 9000000) {
+ priv->reg.rate_set.body[4] =
+ TX_RATE_9M;
+ i++;
+ priv->reg.rate_set.body[3] =
+ TX_RATE_6M | BASIC_RATE;
+ i++;
+ } else { /* vwrq->value == 6000000 */
+ priv->reg.rate_set.body[3] =
+ TX_RATE_6M | BASIC_RATE;
+ i++;
+ }
+ case 5500000:
+ priv->reg.rate_set.body[2] =
+ TX_RATE_5M | BASIC_RATE;
+ i++;
+ case 2000000:
+ priv->reg.rate_set.body[1] =
+ TX_RATE_2M | BASIC_RATE;
+ i++;
+ case 1000000:
+ priv->reg.rate_set.body[0] =
+ TX_RATE_1M | BASIC_RATE;
+ i++;
+ break;
+ default:
+ return -EINVAL;
+ }
+ priv->reg.tx_rate = TX_RATE_MANUAL_AUTO;
+ priv->reg.rate_set.size = i;
+ } else {
+ priv->reg.rate_set.body[11] = TX_RATE_54M;
+ priv->reg.rate_set.body[10] = TX_RATE_48M;
+ priv->reg.rate_set.body[9] = TX_RATE_36M;
+ priv->reg.rate_set.body[8] = TX_RATE_18M;
+ priv->reg.rate_set.body[7] = TX_RATE_9M;
+ priv->reg.rate_set.body[6] =
+ TX_RATE_24M | BASIC_RATE;
+ priv->reg.rate_set.body[5] =
+ TX_RATE_12M | BASIC_RATE;
+ priv->reg.rate_set.body[4] =
+ TX_RATE_6M | BASIC_RATE;
+ priv->reg.rate_set.body[3] =
+ TX_RATE_11M | BASIC_RATE;
+ priv->reg.rate_set.body[2] =
+ TX_RATE_5M | BASIC_RATE;
+ priv->reg.rate_set.body[1] =
+ TX_RATE_2M | BASIC_RATE;
+ priv->reg.rate_set.body[0] =
+ TX_RATE_1M | BASIC_RATE;
+ priv->reg.tx_rate = TX_RATE_FULL_AUTO;
+ priv->reg.rate_set.size = 12;
+ }
+ }
+ }
+
+ priv->need_commit |= SME_MODE_SET;
+
+ return -EINPROGRESS; /* Call commit handler */
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get Bit-Rate */
+static int ks_wlan_get_rate(struct net_device *dev,
+ struct iw_request_info *info, struct iw_param *vwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ DPRINTK(2, "in_interrupt = %ld update_phyinfo = %d\n",
+ in_interrupt(), atomic_read(&update_phyinfo));
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (!atomic_read(&update_phyinfo)) {
+ ks_wlan_update_phy_information(priv);
+ }
+ vwrq->value = ((priv->current_rate) & RATE_MASK) * 500000;
+ if (priv->reg.tx_rate == TX_RATE_FIXED)
+ vwrq->fixed = 1;
+ else
+ vwrq->fixed = 0;
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set RTS threshold */
+static int ks_wlan_set_rts(struct net_device *dev, struct iw_request_info *info,
+ struct iw_param *vwrq, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ int rthr = vwrq->value;
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (vwrq->disabled)
+ rthr = 2347;
+ if ((rthr < 0) || (rthr > 2347)) {
+ return -EINVAL;
+ }
+ priv->reg.rts = rthr;
+ priv->need_commit |= SME_RTS;
+
+ return -EINPROGRESS; /* Call commit handler */
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get RTS threshold */
+static int ks_wlan_get_rts(struct net_device *dev, struct iw_request_info *info,
+ struct iw_param *vwrq, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ vwrq->value = priv->reg.rts;
+ vwrq->disabled = (vwrq->value >= 2347);
+ vwrq->fixed = 1;
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set Fragmentation threshold */
+static int ks_wlan_set_frag(struct net_device *dev,
+ struct iw_request_info *info, struct iw_param *vwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ int fthr = vwrq->value;
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (vwrq->disabled)
+ fthr = 2346;
+ if ((fthr < 256) || (fthr > 2346)) {
+ return -EINVAL;
+ }
+ fthr &= ~0x1; /* Get an even value - is it really needed ??? */
+ priv->reg.fragment = fthr;
+ priv->need_commit |= SME_FRAG;
+
+ return -EINPROGRESS; /* Call commit handler */
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get Fragmentation threshold */
+static int ks_wlan_get_frag(struct net_device *dev,
+ struct iw_request_info *info, struct iw_param *vwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ vwrq->value = priv->reg.fragment;
+ vwrq->disabled = (vwrq->value >= 2346);
+ vwrq->fixed = 1;
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set Mode of Operation */
+static int ks_wlan_set_mode(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ DPRINTK(2, "mode=%d\n", *uwrq);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ switch (*uwrq) {
+ case IW_MODE_ADHOC:
+ priv->reg.operation_mode = MODE_ADHOC;
+ priv->need_commit |= SME_MODE_SET;
+ break;
+ case IW_MODE_INFRA:
+ priv->reg.operation_mode = MODE_INFRASTRUCTURE;
+ priv->need_commit |= SME_MODE_SET;
+ break;
+ case IW_MODE_AUTO:
+ case IW_MODE_MASTER:
+ case IW_MODE_REPEAT:
+ case IW_MODE_SECOND:
+ case IW_MODE_MONITOR:
+ default:
+ return -EINVAL;
+ }
+
+ return -EINPROGRESS; /* Call commit handler */
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get Mode of Operation */
+static int ks_wlan_get_mode(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+
+ /* for SLEEP MODE */
+ /* If not managed, assume it's ad-hoc */
+ switch (priv->reg.operation_mode) {
+ case MODE_INFRASTRUCTURE:
+ *uwrq = IW_MODE_INFRA;
+ break;
+ case MODE_ADHOC:
+ *uwrq = IW_MODE_ADHOC;
+ break;
+ default:
+ *uwrq = IW_MODE_ADHOC;
+ }
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set Encryption Key */
+static int ks_wlan_set_encode(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ wep_key_t key;
+ int index = (dwrq->flags & IW_ENCODE_INDEX);
+ int current_index = priv->reg.wep_index;
+ int i;
+
+ DPRINTK(2, "flags=%04X\n", dwrq->flags);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+
+ /* for SLEEP MODE */
+ /* index check */
+ if ((index < 0) || (index > 4))
+ return -EINVAL;
+ else if (index == 0)
+ index = current_index;
+ else
+ index--;
+
+ /* Is WEP supported ? */
+ /* Basic checking: do we have a key to set ? */
+ if (dwrq->length > 0) {
+ if (dwrq->length > MAX_KEY_SIZE) { /* Check the size of the key */
+ return -EINVAL;
+ }
+ if (dwrq->length > MIN_KEY_SIZE) { /* Set the length */
+ key.len = MAX_KEY_SIZE;
+ priv->reg.privacy_invoked = 0x01;
+ priv->need_commit |= SME_WEP_FLAG;
+ wep_on_off = WEP_ON_128BIT;
+ } else {
+ if (dwrq->length > 0) {
+ key.len = MIN_KEY_SIZE;
+ priv->reg.privacy_invoked = 0x01;
+ priv->need_commit |= SME_WEP_FLAG;
+ wep_on_off = WEP_ON_64BIT;
+ } else { /* Disable the key */
+ key.len = 0;
+ }
+ }
+ /* Check if the key is not marked as invalid */
+ if (!(dwrq->flags & IW_ENCODE_NOKEY)) {
+ /* Cleanup */
+ memset(key.key, 0, MAX_KEY_SIZE);
+ /* Copy the key in the driver */
+ if (copy_from_user
+ (key.key, dwrq->pointer, dwrq->length)) {
+ key.len = 0;
+ return -EFAULT;
+ }
+ /* Send the key to the card */
+ priv->reg.wep_key[index].size = key.len;
+ for (i = 0; i < (priv->reg.wep_key[index].size); i++) {
+ priv->reg.wep_key[index].val[i] = key.key[i];
+ }
+ priv->need_commit |= (SME_WEP_VAL1 << index);
+ priv->reg.wep_index = index;
+ priv->need_commit |= SME_WEP_INDEX;
+ }
+ } else {
+ if (dwrq->flags & IW_ENCODE_DISABLED) {
+ priv->reg.wep_key[0].size = 0;
+ priv->reg.wep_key[1].size = 0;
+ priv->reg.wep_key[2].size = 0;
+ priv->reg.wep_key[3].size = 0;
+ priv->reg.privacy_invoked = 0x00;
+ if (priv->reg.authenticate_type == AUTH_TYPE_SHARED_KEY) {
+ priv->need_commit |= SME_MODE_SET;
+ }
+ priv->reg.authenticate_type = AUTH_TYPE_OPEN_SYSTEM;
+ wep_on_off = WEP_OFF;
+ priv->need_commit |= SME_WEP_FLAG;
+ } else {
+ /* Do we want to just set the transmit key index ? */
+ if ((index >= 0) && (index < 4)) {
+ /* set_wep_key(priv, index, 0, 0, 1); xxx */
+ if (priv->reg.wep_key[index].size) {
+ priv->reg.wep_index = index;
+ priv->need_commit |= SME_WEP_INDEX;
+ } else
+ return -EINVAL;
+ }
+ }
+ }
+
+ /* Commit the changes if needed */
+ if (dwrq->flags & IW_ENCODE_MODE)
+ priv->need_commit |= SME_WEP_FLAG;
+
+ if (dwrq->flags & IW_ENCODE_OPEN) {
+ if (priv->reg.authenticate_type == AUTH_TYPE_SHARED_KEY) {
+ priv->need_commit |= SME_MODE_SET;
+ }
+ priv->reg.authenticate_type = AUTH_TYPE_OPEN_SYSTEM;
+ } else if (dwrq->flags & IW_ENCODE_RESTRICTED) {
+ if (priv->reg.authenticate_type == AUTH_TYPE_OPEN_SYSTEM) {
+ priv->need_commit |= SME_MODE_SET;
+ }
+ priv->reg.authenticate_type = AUTH_TYPE_SHARED_KEY;
+ }
+// return -EINPROGRESS; /* Call commit handler */
+ if (priv->need_commit) {
+ ks_wlan_setup_parameter(priv, priv->need_commit);
+ priv->need_commit = 0;
+ }
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get Encryption Key */
+static int ks_wlan_get_encode(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ char zeros[16];
+ int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ dwrq->flags = IW_ENCODE_DISABLED;
+
+ /* Check encryption mode */
+ switch (priv->reg.authenticate_type) {
+ case AUTH_TYPE_OPEN_SYSTEM:
+ dwrq->flags = IW_ENCODE_OPEN;
+ break;
+ case AUTH_TYPE_SHARED_KEY:
+ dwrq->flags = IW_ENCODE_RESTRICTED;
+ break;
+ }
+
+ memset(zeros, 0, sizeof(zeros));
+
+ /* Which key do we want ? -1 -> tx index */
+ if ((index < 0) || (index >= 4))
+ index = priv->reg.wep_index;
+ if (priv->reg.privacy_invoked) {
+ dwrq->flags &= ~IW_ENCODE_DISABLED;
+ /* dwrq->flags |= IW_ENCODE_NOKEY; */
+ }
+ dwrq->flags |= index + 1;
+ DPRINTK(2, "encoding flag = 0x%04X\n", dwrq->flags);
+ /* Copy the key to the user buffer */
+ if ((index >= 0) && (index < 4))
+ dwrq->length = priv->reg.wep_key[index].size;
+ if (dwrq->length > 16) {
+ dwrq->length = 0;
+ }
+#if 1 /* IW_ENCODE_NOKEY; */
+ if (dwrq->length) {
+ if ((index >= 0) && (index < 4))
+ memcpy(extra, priv->reg.wep_key[index].val,
+ dwrq->length);
+ } else
+ memcpy(extra, zeros, dwrq->length);
+#endif
+ return 0;
+}
+
+#ifndef KSC_OPNOTSUPP
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set Tx-Power */
+static int ks_wlan_set_txpow(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq, char *extra)
+{
+ return -EOPNOTSUPP; /* Not Support */
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get Tx-Power */
+static int ks_wlan_get_txpow(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq, char *extra)
+{
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+
+ /* for SLEEP MODE */
+ /* Not Support */
+ vwrq->value = 0;
+ vwrq->disabled = (vwrq->value == 0);
+ vwrq->fixed = 1;
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set Retry limits */
+static int ks_wlan_set_retry(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq, char *extra)
+{
+ return -EOPNOTSUPP; /* Not Support */
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get Retry limits */
+static int ks_wlan_get_retry(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq, char *extra)
+{
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+
+ /* for SLEEP MODE */
+ /* Not Support */
+ vwrq->value = 0;
+ vwrq->disabled = (vwrq->value == 0);
+ vwrq->fixed = 1;
+ return 0;
+}
+#endif /* KSC_OPNOTSUPP */
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get range info */
+static int ks_wlan_get_range(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ struct iw_range *range = (struct iw_range *)extra;
+ int i, k;
+
+ DPRINTK(2, "\n");
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ dwrq->length = sizeof(struct iw_range);
+ memset(range, 0, sizeof(*range));
+ range->min_nwid = 0x0000;
+ range->max_nwid = 0x0000;
+ range->num_channels = 14;
+ /* Should be based on cap_rid.country to give only
+ * what the current card support */
+ k = 0;
+ for (i = 0; i < 13; i++) { /* channel 1 -- 13 */
+ range->freq[k].i = i + 1; /* List index */
+ range->freq[k].m = frequency_list[i] * 100000;
+ range->freq[k++].e = 1; /* Values in table in MHz -> * 10^5 * 10 */
+ }
+ range->num_frequency = k;
+ if (priv->reg.phy_type == D_11B_ONLY_MODE || priv->reg.phy_type == D_11BG_COMPATIBLE_MODE) { /* channel 14 */
+ range->freq[13].i = 14; /* List index */
+ range->freq[13].m = frequency_list[13] * 100000;
+ range->freq[13].e = 1; /* Values in table in MHz -> * 10^5 * 10 */
+ range->num_frequency = 14;
+ }
+
+ /* Hum... Should put the right values there */
+ range->max_qual.qual = 100;
+ range->max_qual.level = 256 - 128; /* 0 dBm? */
+ range->max_qual.noise = 256 - 128;
+ range->sensitivity = 1;
+
+ if (priv->reg.phy_type == D_11B_ONLY_MODE) {
+ range->bitrate[0] = 1e6;
+ range->bitrate[1] = 2e6;
+ range->bitrate[2] = 5.5e6;
+ range->bitrate[3] = 11e6;
+ range->num_bitrates = 4;
+ } else { /* D_11G_ONLY_MODE or D_11BG_COMPATIBLE_MODE */
+ range->bitrate[0] = 1e6;
+ range->bitrate[1] = 2e6;
+ range->bitrate[2] = 5.5e6;
+ range->bitrate[3] = 11e6;
+
+ range->bitrate[4] = 6e6;
+ range->bitrate[5] = 9e6;
+ range->bitrate[6] = 12e6;
+ if (IW_MAX_BITRATES < 9) {
+ range->bitrate[7] = 54e6;
+ range->num_bitrates = 8;
+ } else {
+ range->bitrate[7] = 18e6;
+ range->bitrate[8] = 24e6;
+ range->bitrate[9] = 36e6;
+ range->bitrate[10] = 48e6;
+ range->bitrate[11] = 54e6;
+
+ range->num_bitrates = 12;
+ }
+ }
+
+ /* Set an indication of the max TCP throughput
+ * in bit/s that we can expect using this interface.
+ * May be use for QoS stuff... Jean II */
+ if (i > 2)
+ range->throughput = 5000 * 1000;
+ else
+ range->throughput = 1500 * 1000;
+
+ range->min_rts = 0;
+ range->max_rts = 2347;
+ range->min_frag = 256;
+ range->max_frag = 2346;
+
+ range->encoding_size[0] = 5; /* WEP: RC4 40 bits */
+ range->encoding_size[1] = 13; /* WEP: RC4 ~128 bits */
+ range->num_encoding_sizes = 2;
+ range->max_encoding_tokens = 4;
+
+ /* power management not support */
+ range->pmp_flags = IW_POWER_ON;
+ range->pmt_flags = IW_POWER_ON;
+ range->pm_capa = 0;
+
+ /* Transmit Power - values are in dBm( or mW) */
+ range->txpower[0] = -256;
+ range->num_txpower = 1;
+ range->txpower_capa = IW_TXPOW_DBM;
+ /* range->txpower_capa = IW_TXPOW_MWATT; */
+
+ range->we_version_source = 21;
+ range->we_version_compiled = WIRELESS_EXT;
+
+ range->retry_capa = IW_RETRY_ON;
+ range->retry_flags = IW_RETRY_ON;
+ range->r_time_flags = IW_RETRY_ON;
+
+ /* Experimental measurements - boundary 11/5.5 Mb/s */
+ /* Note : with or without the (local->rssi), results
+ * are somewhat different. - Jean II */
+ range->avg_qual.qual = 50;
+ range->avg_qual.level = 186; /* -70 dBm */
+ range->avg_qual.noise = 0;
+
+ /* Event capability (kernel + driver) */
+ range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
+ IW_EVENT_CAPA_MASK(SIOCGIWAP) |
+ IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
+ range->event_capa[1] = IW_EVENT_CAPA_K_1;
+ range->event_capa[4] = (IW_EVENT_CAPA_MASK(IWEVCUSTOM) |
+ IW_EVENT_CAPA_MASK(IWEVMICHAELMICFAILURE));
+
+ /* encode extension (WPA) capability */
+ range->enc_capa = (IW_ENC_CAPA_WPA |
+ IW_ENC_CAPA_WPA2 |
+ IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP);
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set Power Management */
+static int ks_wlan_set_power(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ short enabled;
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ enabled = vwrq->disabled ? 0 : 1;
+ if (enabled == 0) { /* 0 */
+ priv->reg.powermgt = POWMGT_ACTIVE_MODE;
+ } else if (enabled) { /* 1 */
+ if (priv->reg.operation_mode == MODE_INFRASTRUCTURE)
+ priv->reg.powermgt = POWMGT_SAVE1_MODE;
+ else
+ return -EINVAL;
+ } else if (enabled) { /* 2 */
+ if (priv->reg.operation_mode == MODE_INFRASTRUCTURE)
+ priv->reg.powermgt = POWMGT_SAVE2_MODE;
+ else
+ return -EINVAL;
+ } else
+ return -EINVAL;
+
+ hostif_sme_enqueue(priv, SME_POW_MNGMT_REQUEST);
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get Power Management */
+static int ks_wlan_get_power(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (priv->reg.powermgt > 0)
+ vwrq->disabled = 0;
+ else
+ vwrq->disabled = 1;
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get wirless statistics */
+static int ks_wlan_get_iwstats(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_quality *vwrq, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ vwrq->qual = 0; /* not supported */
+ vwrq->level = priv->wstats.qual.level;
+ vwrq->noise = 0; /* not supported */
+ vwrq->updated = 0;
+
+ return 0;
+}
+
+#ifndef KSC_OPNOTSUPP
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set Sensitivity */
+static int ks_wlan_set_sens(struct net_device *dev,
+ struct iw_request_info *info, struct iw_param *vwrq,
+ char *extra)
+{
+ return -EOPNOTSUPP; /* Not Support */
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get Sensitivity */
+static int ks_wlan_get_sens(struct net_device *dev,
+ struct iw_request_info *info, struct iw_param *vwrq,
+ char *extra)
+{
+ /* Not Support */
+ vwrq->value = 0;
+ vwrq->disabled = (vwrq->value == 0);
+ vwrq->fixed = 1;
+ return 0;
+}
+#endif /* KSC_OPNOTSUPP */
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get AP List */
+/* Note : this is deprecated in favor of IWSCAN */
+static int ks_wlan_get_aplist(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ struct sockaddr *address = (struct sockaddr *)extra;
+ struct iw_quality qual[LOCAL_APLIST_MAX];
+
+ int i;
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ for (i = 0; i < priv->aplist.size; i++) {
+ memcpy(address[i].sa_data, &(priv->aplist.ap[i].bssid[0]),
+ ETH_ALEN);
+ address[i].sa_family = ARPHRD_ETHER;
+ qual[i].level = 256 - priv->aplist.ap[i].rssi;
+ qual[i].qual = priv->aplist.ap[i].sq;
+ qual[i].noise = 0; /* invalid noise value */
+ qual[i].updated = 7;
+ }
+ if (i) {
+ dwrq->flags = 1; /* Should be define'd */
+ memcpy(extra + sizeof(struct sockaddr) * i,
+ &qual, sizeof(struct iw_quality) * i);
+ }
+ dwrq->length = i;
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : Initiate Scan */
+static int ks_wlan_set_scan(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ struct iw_scan_req *req = NULL;
+ DPRINTK(2, "\n");
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+
+ /* for SLEEP MODE */
+ /* specified SSID SCAN */
+ if (wrqu->data.length == sizeof(struct iw_scan_req)
+ && wrqu->data.flags & IW_SCAN_THIS_ESSID) {
+ req = (struct iw_scan_req *)extra;
+ priv->scan_ssid_len = req->essid_len;
+ memcpy(priv->scan_ssid, req->essid, priv->scan_ssid_len);
+ } else {
+ priv->scan_ssid_len = 0;
+ }
+
+ priv->sme_i.sme_flag |= SME_AP_SCAN;
+ hostif_sme_enqueue(priv, SME_BSS_SCAN_REQUEST);
+
+ /* At this point, just return to the user. */
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Translate scan data returned from the card to a card independent
+ * format that the Wireless Tools will understand - Jean II
+ */
+static inline char *ks_wlan_translate_scan(struct net_device *dev,
+ struct iw_request_info *info,
+ char *current_ev, char *end_buf,
+ struct local_ap_t *ap)
+{
+ /* struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv; */
+ struct iw_event iwe; /* Temporary buffer */
+ u16 capabilities;
+ char *current_val; /* For rates */
+ int i;
+ static const char rsn_leader[] = "rsn_ie=";
+ static const char wpa_leader[] = "wpa_ie=";
+ char buf0[RSN_IE_BODY_MAX * 2 + 30];
+ char buf1[RSN_IE_BODY_MAX * 2 + 30];
+ char *pbuf;
+ /* First entry *MUST* be the AP MAC address */
+ iwe.cmd = SIOCGIWAP;
+ iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+ memcpy(iwe.u.ap_addr.sa_data, ap->bssid, ETH_ALEN);
+ current_ev =
+ iwe_stream_add_event(info, current_ev, end_buf, &iwe,
+ IW_EV_ADDR_LEN);
+
+ /* Other entries will be displayed in the order we give them */
+
+ /* Add the ESSID */
+ iwe.u.data.length = ap->ssid.size;
+ if (iwe.u.data.length > 32)
+ iwe.u.data.length = 32;
+ iwe.cmd = SIOCGIWESSID;
+ iwe.u.data.flags = 1;
+ current_ev =
+ iwe_stream_add_point(info, current_ev, end_buf, &iwe,
+ &(ap->ssid.body[0]));
+
+ /* Add mode */
+ iwe.cmd = SIOCGIWMODE;
+ capabilities = le16_to_cpu(ap->capability);
+ if (capabilities & (BSS_CAP_ESS | BSS_CAP_IBSS)) {
+ if (capabilities & BSS_CAP_ESS)
+ iwe.u.mode = IW_MODE_INFRA;
+ else
+ iwe.u.mode = IW_MODE_ADHOC;
+ current_ev =
+ iwe_stream_add_event(info, current_ev, end_buf, &iwe,
+ IW_EV_UINT_LEN);
+ }
+
+ /* Add frequency */
+ iwe.cmd = SIOCGIWFREQ;
+ iwe.u.freq.m = ap->channel;
+ iwe.u.freq.m = frequency_list[iwe.u.freq.m - 1] * 100000;
+ iwe.u.freq.e = 1;
+ current_ev =
+ iwe_stream_add_event(info, current_ev, end_buf, &iwe,
+ IW_EV_FREQ_LEN);
+
+ /* Add quality statistics */
+ iwe.cmd = IWEVQUAL;
+ iwe.u.qual.level = 256 - ap->rssi;
+ iwe.u.qual.qual = ap->sq;
+ iwe.u.qual.noise = 0; /* invalid noise value */
+ current_ev =
+ iwe_stream_add_event(info, current_ev, end_buf, &iwe,
+ IW_EV_QUAL_LEN);
+
+ /* Add encryption capability */
+ iwe.cmd = SIOCGIWENCODE;
+ if (capabilities & BSS_CAP_PRIVACY)
+ iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+ else
+ iwe.u.data.flags = IW_ENCODE_DISABLED;
+ iwe.u.data.length = 0;
+ current_ev =
+ iwe_stream_add_point(info, current_ev, end_buf, &iwe,
+ &(ap->ssid.body[0]));
+
+ /* Rate : stuffing multiple values in a single event require a bit
+ * more of magic - Jean II */
+ current_val = current_ev + IW_EV_LCP_LEN;
+
+ iwe.cmd = SIOCGIWRATE;
+ /* Those two flags are ignored... */
+ iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
+
+ /* Max 16 values */
+ for (i = 0; i < 16; i++) {
+ /* NULL terminated */
+ if (i >= ap->rate_set.size)
+ break;
+ /* Bit rate given in 500 kb/s units (+ 0x80) */
+ iwe.u.bitrate.value = ((ap->rate_set.body[i] & 0x7f) * 500000);
+ /* Add new value to event */
+ current_val =
+ iwe_stream_add_value(info, current_ev, current_val, end_buf,
+ &iwe, IW_EV_PARAM_LEN);
+ }
+ /* Check if we added any event */
+ if ((current_val - current_ev) > IW_EV_LCP_LEN)
+ current_ev = current_val;
+
+#define GENERIC_INFO_ELEM_ID 0xdd
+#define RSN_INFO_ELEM_ID 0x30
+ if (ap->rsn_ie.id == RSN_INFO_ELEM_ID && ap->rsn_ie.size != 0) {
+ pbuf = &buf0[0];
+ memset(&iwe, 0, sizeof(iwe));
+ iwe.cmd = IWEVCUSTOM;
+ memcpy(buf0, rsn_leader, sizeof(rsn_leader) - 1);
+ iwe.u.data.length += sizeof(rsn_leader) - 1;
+ pbuf += sizeof(rsn_leader) - 1;
+
+ pbuf += sprintf(pbuf, "%02x", ap->rsn_ie.id);
+ pbuf += sprintf(pbuf, "%02x", ap->rsn_ie.size);
+ iwe.u.data.length += 4;
+
+ for (i = 0; i < ap->rsn_ie.size; i++)
+ pbuf += sprintf(pbuf, "%02x", ap->rsn_ie.body[i]);
+ iwe.u.data.length += (ap->rsn_ie.size) * 2;
+
+ DPRINTK(4, "ap->rsn.size=%d\n", ap->rsn_ie.size);
+
+ current_ev =
+ iwe_stream_add_point(info, current_ev, end_buf, &iwe,
+ &buf0[0]);
+ }
+ if (ap->wpa_ie.id == GENERIC_INFO_ELEM_ID && ap->wpa_ie.size != 0) {
+ pbuf = &buf1[0];
+ memset(&iwe, 0, sizeof(iwe));
+ iwe.cmd = IWEVCUSTOM;
+ memcpy(buf1, wpa_leader, sizeof(wpa_leader) - 1);
+ iwe.u.data.length += sizeof(wpa_leader) - 1;
+ pbuf += sizeof(wpa_leader) - 1;
+
+ pbuf += sprintf(pbuf, "%02x", ap->wpa_ie.id);
+ pbuf += sprintf(pbuf, "%02x", ap->wpa_ie.size);
+ iwe.u.data.length += 4;
+
+ for (i = 0; i < ap->wpa_ie.size; i++)
+ pbuf += sprintf(pbuf, "%02x", ap->wpa_ie.body[i]);
+ iwe.u.data.length += (ap->wpa_ie.size) * 2;
+
+ DPRINTK(4, "ap->rsn.size=%d\n", ap->wpa_ie.size);
+ DPRINTK(4, "iwe.u.data.length=%d\n", iwe.u.data.length);
+
+ current_ev =
+ iwe_stream_add_point(info, current_ev, end_buf, &iwe,
+ &buf1[0]);
+ }
+
+ /* The other data in the scan result are not really
+ * interesting, so for now drop it - Jean II */
+ return current_ev;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : Read Scan Results */
+static int ks_wlan_get_scan(struct net_device *dev,
+ struct iw_request_info *info, struct iw_point *dwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ int i;
+ char *current_ev = extra;
+ DPRINTK(2, "\n");
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (priv->sme_i.sme_flag & SME_AP_SCAN) {
+ DPRINTK(2, "flag AP_SCAN\n");
+ return -EAGAIN;
+ }
+
+ if (priv->aplist.size == 0) {
+ /* Client error, no scan results...
+ * The caller need to restart the scan. */
+ DPRINTK(2, "aplist 0\n");
+ return -ENODATA;
+ }
+#if 0
+ /* current connect ap */
+ if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+ if ((extra + dwrq->length) - current_ev <= IW_EV_ADDR_LEN) {
+ dwrq->length = 0;
+ return -E2BIG;
+ }
+ current_ev = ks_wlan_translate_scan(dev, current_ev,
+// extra + IW_SCAN_MAX_DATA,
+ extra + dwrq->length,
+ &(priv->current_ap));
+ }
+#endif
+ /* Read and parse all entries */
+ for (i = 0; i < priv->aplist.size; i++) {
+ if ((extra + dwrq->length) - current_ev <= IW_EV_ADDR_LEN) {
+ dwrq->length = 0;
+ return -E2BIG;
+ }
+ /* Translate to WE format this entry */
+ current_ev = ks_wlan_translate_scan(dev, info, current_ev,
+// extra + IW_SCAN_MAX_DATA,
+ extra + dwrq->length,
+ &(priv->aplist.ap[i]));
+ }
+ /* Length of data */
+ dwrq->length = (current_ev - extra);
+ dwrq->flags = 0;
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Commit handler : called after a bunch of SET operations */
+static int ks_wlan_config_commit(struct net_device *dev,
+ struct iw_request_info *info, void *zwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (!priv->need_commit)
+ return 0;
+
+ ks_wlan_setup_parameter(priv, priv->need_commit);
+ priv->need_commit = 0;
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless handler : set association ie params */
+static int ks_wlan_set_genie(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ DPRINTK(2, "\n");
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ return 0;
+// return -EOPNOTSUPP;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless handler : set authentication mode params */
+static int ks_wlan_set_auth_mode(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ int index = (vwrq->flags & IW_AUTH_INDEX);
+ int value = vwrq->value;
+
+ DPRINTK(2, "index=%d:value=%08X\n", index, value);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ switch (index) {
+ case IW_AUTH_WPA_VERSION: /* 0 */
+ switch (value) {
+ case IW_AUTH_WPA_VERSION_DISABLED:
+ priv->wpa.version = value;
+ if (priv->wpa.rsn_enabled) {
+ priv->wpa.rsn_enabled = 0;
+ }
+ priv->need_commit |= SME_RSN;
+ break;
+ case IW_AUTH_WPA_VERSION_WPA:
+ case IW_AUTH_WPA_VERSION_WPA2:
+ priv->wpa.version = value;
+ if (!(priv->wpa.rsn_enabled)) {
+ priv->wpa.rsn_enabled = 1;
+ }
+ priv->need_commit |= SME_RSN;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ case IW_AUTH_CIPHER_PAIRWISE: /* 1 */
+ switch (value) {
+ case IW_AUTH_CIPHER_NONE:
+ if (priv->reg.privacy_invoked) {
+ priv->reg.privacy_invoked = 0x00;
+ priv->need_commit |= SME_WEP_FLAG;
+ }
+ break;
+ case IW_AUTH_CIPHER_WEP40:
+ case IW_AUTH_CIPHER_TKIP:
+ case IW_AUTH_CIPHER_CCMP:
+ case IW_AUTH_CIPHER_WEP104:
+ if (!priv->reg.privacy_invoked) {
+ priv->reg.privacy_invoked = 0x01;
+ priv->need_commit |= SME_WEP_FLAG;
+ }
+ priv->wpa.pairwise_suite = value;
+ priv->need_commit |= SME_RSN_UNICAST;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ case IW_AUTH_CIPHER_GROUP: /* 2 */
+ switch (value) {
+ case IW_AUTH_CIPHER_NONE:
+ if (priv->reg.privacy_invoked) {
+ priv->reg.privacy_invoked = 0x00;
+ priv->need_commit |= SME_WEP_FLAG;
+ }
+ break;
+ case IW_AUTH_CIPHER_WEP40:
+ case IW_AUTH_CIPHER_TKIP:
+ case IW_AUTH_CIPHER_CCMP:
+ case IW_AUTH_CIPHER_WEP104:
+ if (!priv->reg.privacy_invoked) {
+ priv->reg.privacy_invoked = 0x01;
+ priv->need_commit |= SME_WEP_FLAG;
+ }
+ priv->wpa.group_suite = value;
+ priv->need_commit |= SME_RSN_MULTICAST;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ case IW_AUTH_KEY_MGMT: /* 3 */
+ switch (value) {
+ case IW_AUTH_KEY_MGMT_802_1X:
+ case IW_AUTH_KEY_MGMT_PSK:
+ case 0: /* NONE or 802_1X_NO_WPA */
+ case 4: /* WPA_NONE */
+ priv->wpa.key_mgmt_suite = value;
+ priv->need_commit |= SME_RSN_AUTH;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ case IW_AUTH_80211_AUTH_ALG: /* 6 */
+ switch (value) {
+ case IW_AUTH_ALG_OPEN_SYSTEM:
+ priv->wpa.auth_alg = value;
+ priv->reg.authenticate_type = AUTH_TYPE_OPEN_SYSTEM;
+ break;
+ case IW_AUTH_ALG_SHARED_KEY:
+ priv->wpa.auth_alg = value;
+ priv->reg.authenticate_type = AUTH_TYPE_SHARED_KEY;
+ break;
+ case IW_AUTH_ALG_LEAP:
+ default:
+ return -EOPNOTSUPP;
+ }
+ priv->need_commit |= SME_MODE_SET;
+ break;
+ case IW_AUTH_WPA_ENABLED: /* 7 */
+ priv->wpa.wpa_enabled = value;
+ break;
+ case IW_AUTH_PRIVACY_INVOKED: /* 10 */
+ if ((value && !priv->reg.privacy_invoked) ||
+ (!value && priv->reg.privacy_invoked)) {
+ priv->reg.privacy_invoked = value ? 0x01 : 0x00;
+ priv->need_commit |= SME_WEP_FLAG;
+ }
+ break;
+ case IW_AUTH_RX_UNENCRYPTED_EAPOL: /* 4 */
+ case IW_AUTH_TKIP_COUNTERMEASURES: /* 5 */
+ case IW_AUTH_DROP_UNENCRYPTED: /* 8 */
+ case IW_AUTH_ROAMING_CONTROL: /* 9 */
+ default:
+ break;
+ }
+
+ /* return -EINPROGRESS; */
+ if (priv->need_commit) {
+ ks_wlan_setup_parameter(priv, priv->need_commit);
+ priv->need_commit = 0;
+ }
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless handler : get authentication mode params */
+static int ks_wlan_get_auth_mode(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *vwrq, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ int index = (vwrq->flags & IW_AUTH_INDEX);
+ DPRINTK(2, "index=%d\n", index);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+
+ /* for SLEEP MODE */
+ /* WPA (not used ?? wpa_supplicant) */
+ switch (index) {
+ case IW_AUTH_WPA_VERSION:
+ vwrq->value = priv->wpa.version;
+ break;
+ case IW_AUTH_CIPHER_PAIRWISE:
+ vwrq->value = priv->wpa.pairwise_suite;
+ break;
+ case IW_AUTH_CIPHER_GROUP:
+ vwrq->value = priv->wpa.group_suite;
+ break;
+ case IW_AUTH_KEY_MGMT:
+ vwrq->value = priv->wpa.key_mgmt_suite;
+ break;
+ case IW_AUTH_80211_AUTH_ALG:
+ vwrq->value = priv->wpa.auth_alg;
+ break;
+ case IW_AUTH_WPA_ENABLED:
+ vwrq->value = priv->wpa.rsn_enabled;
+ break;
+ case IW_AUTH_RX_UNENCRYPTED_EAPOL: /* OK??? */
+ case IW_AUTH_TKIP_COUNTERMEASURES:
+ case IW_AUTH_DROP_UNENCRYPTED:
+ default:
+ /* return -EOPNOTSUPP; */
+ break;
+ }
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set encoding token & mode (WPA)*/
+static int ks_wlan_set_encode_ext(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ struct iw_encode_ext *enc;
+ int index = dwrq->flags & IW_ENCODE_INDEX;
+ unsigned int commit = 0;
+
+ enc = (struct iw_encode_ext *)extra;
+
+ DPRINTK(2, "flags=%04X:: ext_flags=%08X\n", dwrq->flags,
+ enc->ext_flags);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (index < 1 || index > 4)
+ return -EINVAL;
+ else
+ index--;
+
+ if (dwrq->flags & IW_ENCODE_DISABLED) {
+ priv->wpa.key[index].key_len = 0;
+ }
+
+ if (enc) {
+ priv->wpa.key[index].ext_flags = enc->ext_flags;
+ if (enc->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
+ priv->wpa.txkey = index;
+ commit |= SME_WEP_INDEX;
+ } else if (enc->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) {
+ memcpy(&priv->wpa.key[index].rx_seq[0],
+ enc->rx_seq, IW_ENCODE_SEQ_MAX_SIZE);
+ }
+
+ memcpy(&priv->wpa.key[index].addr.sa_data[0],
+ &enc->addr.sa_data[0], ETH_ALEN);
+
+ switch (enc->alg) {
+ case IW_ENCODE_ALG_NONE:
+ if (priv->reg.privacy_invoked) {
+ priv->reg.privacy_invoked = 0x00;
+ commit |= SME_WEP_FLAG;
+ }
+ priv->wpa.key[index].key_len = 0;
+
+ break;
+ case IW_ENCODE_ALG_WEP:
+ case IW_ENCODE_ALG_CCMP:
+ if (!priv->reg.privacy_invoked) {
+ priv->reg.privacy_invoked = 0x01;
+ commit |= SME_WEP_FLAG;
+ }
+ if (enc->key_len) {
+ memcpy(&priv->wpa.key[index].key_val[0],
+ &enc->key[0], enc->key_len);
+ priv->wpa.key[index].key_len = enc->key_len;
+ commit |= (SME_WEP_VAL1 << index);
+ }
+ break;
+ case IW_ENCODE_ALG_TKIP:
+ if (!priv->reg.privacy_invoked) {
+ priv->reg.privacy_invoked = 0x01;
+ commit |= SME_WEP_FLAG;
+ }
+ if (enc->key_len == 32) {
+ memcpy(&priv->wpa.key[index].key_val[0],
+ &enc->key[0], enc->key_len - 16);
+ priv->wpa.key[index].key_len =
+ enc->key_len - 16;
+ if (priv->wpa.key_mgmt_suite == 4) { /* WPA_NONE */
+ memcpy(&priv->wpa.key[index].
+ tx_mic_key[0], &enc->key[16], 8);
+ memcpy(&priv->wpa.key[index].
+ rx_mic_key[0], &enc->key[16], 8);
+ } else {
+ memcpy(&priv->wpa.key[index].
+ tx_mic_key[0], &enc->key[16], 8);
+ memcpy(&priv->wpa.key[index].
+ rx_mic_key[0], &enc->key[24], 8);
+ }
+ commit |= (SME_WEP_VAL1 << index);
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ priv->wpa.key[index].alg = enc->alg;
+ } else
+ return -EINVAL;
+
+ if (commit) {
+ if (commit & SME_WEP_INDEX)
+ hostif_sme_enqueue(priv, SME_SET_TXKEY);
+ if (commit & SME_WEP_VAL_MASK)
+ hostif_sme_enqueue(priv, SME_SET_KEY1 + index);
+ if (commit & SME_WEP_FLAG)
+ hostif_sme_enqueue(priv, SME_WEP_FLAG_REQUEST);
+ }
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get encoding token & mode (WPA)*/
+static int ks_wlan_get_encode_ext(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+
+ /* for SLEEP MODE */
+ /* WPA (not used ?? wpa_supplicant)
+ struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
+ struct iw_encode_ext *enc;
+ enc = (struct iw_encode_ext *)extra;
+ int index = dwrq->flags & IW_ENCODE_INDEX;
+ WPA (not used ?? wpa_supplicant) */
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : PMKSA cache operation (WPA2) */
+static int ks_wlan_set_pmksa(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ struct iw_pmksa *pmksa;
+ int i;
+ struct pmk_t *pmk;
+ struct list_head *ptr;
+
+ DPRINTK(2, "\n");
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (!extra) {
+ return -EINVAL;
+ }
+ pmksa = (struct iw_pmksa *)extra;
+ DPRINTK(2, "cmd=%d\n", pmksa->cmd);
+
+ switch (pmksa->cmd) {
+ case IW_PMKSA_ADD:
+ if (list_empty(&priv->pmklist.head)) { /* new list */
+ for (i = 0; i < PMK_LIST_MAX; i++) {
+ pmk = &priv->pmklist.pmk[i];
+ if (!memcmp
+ ("\x00\x00\x00\x00\x00\x00", pmk->bssid,
+ ETH_ALEN))
+ break;
+ }
+ memcpy(pmk->bssid, pmksa->bssid.sa_data, ETH_ALEN);
+ memcpy(pmk->pmkid, pmksa->pmkid, IW_PMKID_LEN);
+ list_add(&pmk->list, &priv->pmklist.head);
+ priv->pmklist.size++;
+ } else { /* search cache data */
+ list_for_each(ptr, &priv->pmklist.head) {
+ pmk = list_entry(ptr, struct pmk_t, list);
+ if (!memcmp(pmksa->bssid.sa_data, pmk->bssid, ETH_ALEN)) { /* match address! list move to head. */
+ memcpy(pmk->pmkid, pmksa->pmkid,
+ IW_PMKID_LEN);
+ list_move(&pmk->list,
+ &priv->pmklist.head);
+ break;
+ }
+ }
+ if (ptr == &priv->pmklist.head) { /* not find address. */
+ if (PMK_LIST_MAX > priv->pmklist.size) { /* new cache data */
+ for (i = 0; i < PMK_LIST_MAX; i++) {
+ pmk = &priv->pmklist.pmk[i];
+ if (!memcmp
+ ("\x00\x00\x00\x00\x00\x00",
+ pmk->bssid, ETH_ALEN))
+ break;
+ }
+ memcpy(pmk->bssid, pmksa->bssid.sa_data,
+ ETH_ALEN);
+ memcpy(pmk->pmkid, pmksa->pmkid,
+ IW_PMKID_LEN);
+ list_add(&pmk->list,
+ &priv->pmklist.head);
+ priv->pmklist.size++;
+ } else { /* overwrite old cache data */
+ pmk =
+ list_entry(priv->pmklist.head.prev,
+ struct pmk_t, list);
+ memcpy(pmk->bssid, pmksa->bssid.sa_data,
+ ETH_ALEN);
+ memcpy(pmk->pmkid, pmksa->pmkid,
+ IW_PMKID_LEN);
+ list_move(&pmk->list,
+ &priv->pmklist.head);
+ }
+ }
+ }
+ break;
+ case IW_PMKSA_REMOVE:
+ if (list_empty(&priv->pmklist.head)) { /* list empty */
+ return -EINVAL;
+ } else { /* search cache data */
+ list_for_each(ptr, &priv->pmklist.head) {
+ pmk = list_entry(ptr, struct pmk_t, list);
+ if (!memcmp(pmksa->bssid.sa_data, pmk->bssid, ETH_ALEN)) { /* match address! list del. */
+ memset(pmk->bssid, 0, ETH_ALEN);
+ memset(pmk->pmkid, 0, IW_PMKID_LEN);
+ list_del_init(&pmk->list);
+ break;
+ }
+ }
+ if (ptr == &priv->pmklist.head) { /* not find address. */
+ return 0;
+ }
+ }
+ break;
+ case IW_PMKSA_FLUSH:
+ memset(&(priv->pmklist), 0, sizeof(priv->pmklist));
+ INIT_LIST_HEAD(&priv->pmklist.head);
+ for (i = 0; i < PMK_LIST_MAX; i++)
+ INIT_LIST_HEAD(&priv->pmklist.pmk[i].list);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ hostif_sme_enqueue(priv, SME_SET_PMKSA);
+ return 0;
+}
+
+static struct iw_statistics *ks_get_wireless_stats(struct net_device *dev)
+{
+
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ struct iw_statistics *wstats = &priv->wstats;
+
+ if (!atomic_read(&update_phyinfo)) {
+ if (priv->dev_state < DEVICE_STATE_READY)
+ return NULL; /* not finished initialize */
+ else
+ return wstats;
+ }
+
+ /* Packets discarded in the wireless adapter due to wireless
+ * specific problems */
+ wstats->discard.nwid = 0; /* Rx invalid nwid */
+ wstats->discard.code = 0; /* Rx invalid crypt */
+ wstats->discard.fragment = 0; /* Rx invalid frag */
+ wstats->discard.retries = 0; /* Tx excessive retries */
+ wstats->discard.misc = 0; /* Invalid misc */
+ wstats->miss.beacon = 0; /* Missed beacon */
+
+ return wstats;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : set stop request */
+static int ks_wlan_set_stop_request(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ DPRINTK(2, "\n");
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (!(*uwrq))
+ return -EINVAL;
+
+ hostif_sme_enqueue(priv, SME_STOP_REQUEST);
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set MLME */
+#include <linux/ieee80211.h>
+static int ks_wlan_set_mlme(struct net_device *dev,
+ struct iw_request_info *info, struct iw_point *dwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ struct iw_mlme *mlme = (struct iw_mlme *)extra;
+ __u32 mode;
+
+ DPRINTK(2, ":%d :%d\n", mlme->cmd, mlme->reason_code);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ switch (mlme->cmd) {
+ case IW_MLME_DEAUTH:
+ if (mlme->reason_code == WLAN_REASON_MIC_FAILURE) {
+ return 0;
+ }
+ case IW_MLME_DISASSOC:
+ mode = 1;
+ return ks_wlan_set_stop_request(dev, NULL, &mode, NULL);
+ default:
+ return -EOPNOTSUPP; /* Not Support */
+ }
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get firemware version */
+static int ks_wlan_get_firmware_version(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ strcpy(extra, &(priv->firmware_version[0]));
+ dwrq->length = priv->version_size + 1;
+ return 0;
+}
+
+#if 0
+/*------------------------------------------------------------------*/
+/* Private handler : set force disconnect status */
+static int ks_wlan_set_detach(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (*uwrq == CONNECT_STATUS) { /* 0 */
+ priv->connect_status &= ~FORCE_DISCONNECT;
+ if ((priv->connect_status & CONNECT_STATUS_MASK) ==
+ CONNECT_STATUS)
+ netif_carrier_on(dev);
+ } else if (*uwrq == DISCONNECT_STATUS) { /* 1 */
+ priv->connect_status |= FORCE_DISCONNECT;
+ netif_carrier_off(dev);
+ } else
+ return -EINVAL;
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get force disconnect status */
+static int ks_wlan_get_detach(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ *uwrq = ((priv->connect_status & FORCE_DISCONNECT) ? 1 : 0);
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get connect status */
+static int ks_wlan_get_connect(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ *uwrq = (priv->connect_status & CONNECT_STATUS_MASK);
+ return 0;
+}
+#endif
+
+/*------------------------------------------------------------------*/
+/* Private handler : set preamble */
+static int ks_wlan_set_preamble(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (*uwrq == LONG_PREAMBLE) { /* 0 */
+ priv->reg.preamble = LONG_PREAMBLE;
+ } else if (*uwrq == SHORT_PREAMBLE) { /* 1 */
+ priv->reg.preamble = SHORT_PREAMBLE;
+ } else
+ return -EINVAL;
+
+ priv->need_commit |= SME_MODE_SET;
+ return -EINPROGRESS; /* Call commit handler */
+
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get preamble */
+static int ks_wlan_get_preamble(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ *uwrq = priv->reg.preamble;
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : set power save mode */
+static int ks_wlan_set_powermgt(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (*uwrq == POWMGT_ACTIVE_MODE) { /* 0 */
+ priv->reg.powermgt = POWMGT_ACTIVE_MODE;
+ } else if (*uwrq == POWMGT_SAVE1_MODE) { /* 1 */
+ if (priv->reg.operation_mode == MODE_INFRASTRUCTURE)
+ priv->reg.powermgt = POWMGT_SAVE1_MODE;
+ else
+ return -EINVAL;
+ } else if (*uwrq == POWMGT_SAVE2_MODE) { /* 2 */
+ if (priv->reg.operation_mode == MODE_INFRASTRUCTURE)
+ priv->reg.powermgt = POWMGT_SAVE2_MODE;
+ else
+ return -EINVAL;
+ } else
+ return -EINVAL;
+
+ hostif_sme_enqueue(priv, SME_POW_MNGMT_REQUEST);
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get power save made */
+static int ks_wlan_get_powermgt(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ *uwrq = priv->reg.powermgt;
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : set scan type */
+static int ks_wlan_set_scan_type(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (*uwrq == ACTIVE_SCAN) { /* 0 */
+ priv->reg.scan_type = ACTIVE_SCAN;
+ } else if (*uwrq == PASSIVE_SCAN) { /* 1 */
+ priv->reg.scan_type = PASSIVE_SCAN;
+ } else
+ return -EINVAL;
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get scan type */
+static int ks_wlan_get_scan_type(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ *uwrq = priv->reg.scan_type;
+ return 0;
+}
+
+#if 0
+/*------------------------------------------------------------------*/
+/* Private handler : write raw data to device */
+static int ks_wlan_data_write(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq, char *extra)
+{
+ struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
+ unsigned char *wbuff = NULL;
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ wbuff = (unsigned char *)kmalloc(dwrq->length, GFP_ATOMIC);
+ if (!wbuff)
+ return -EFAULT;
+ memcpy(wbuff, extra, dwrq->length);
+
+ /* write to device */
+ ks_wlan_hw_tx(priv, wbuff, dwrq->length, NULL, NULL, NULL);
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : read raw data form device */
+static int ks_wlan_data_read(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq, char *extra)
+{
+ struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
+ unsigned short read_length;
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (!atomic_read(&priv->event_count)) {
+ if (priv->dev_state < DEVICE_STATE_BOOT) { /* Remove device */
+ read_length = 4;
+ memset(extra, 0xff, read_length);
+ dwrq->length = read_length;
+ return 0;
+ }
+ read_length = 0;
+ memset(extra, 0, 1);
+ dwrq->length = 0;
+ return 0;
+ }
+
+ if (atomic_read(&priv->event_count) > 0)
+ atomic_dec(&priv->event_count);
+
+ spin_lock(&priv->dev_read_lock); /* request spin lock */
+
+ /* Copy length max size 0x07ff */
+ if (priv->dev_size[priv->dev_count] > 2047)
+ read_length = 2047;
+ else
+ read_length = priv->dev_size[priv->dev_count];
+
+ /* Copy data */
+ memcpy(extra, &(priv->dev_data[priv->dev_count][0]), read_length);
+
+ spin_unlock(&priv->dev_read_lock); /* release spin lock */
+
+ /* Initialize */
+ priv->dev_data[priv->dev_count] = 0;
+ priv->dev_size[priv->dev_count] = 0;
+
+ priv->dev_count++;
+ if (priv->dev_count == DEVICE_STOCK_COUNT)
+ priv->dev_count = 0;
+
+ /* Set read size */
+ dwrq->length = read_length;
+
+ return 0;
+}
+#endif
+
+#if 0
+/*------------------------------------------------------------------*/
+/* Private handler : get wep string */
+#define WEP_ASCII_BUFF_SIZE (17+64*4+1)
+static int ks_wlan_get_wep_ascii(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq, char *extra)
+{
+ struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
+ int i, j, len = 0;
+ char tmp[WEP_ASCII_BUFF_SIZE];
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ strcpy(tmp, " WEP keys ASCII \n");
+ len += strlen(" WEP keys ASCII \n");
+
+ for (i = 0; i < 4; i++) {
+ strcpy(tmp + len, "\t[");
+ len += strlen("\t[");
+ tmp[len] = '1' + i;
+ len++;
+ strcpy(tmp + len, "] ");
+ len += strlen("] ");
+ if (priv->reg.wep_key[i].size) {
+ strcpy(tmp + len,
+ (priv->reg.wep_key[i].size <
+ 6 ? "(40bits) [" : "(104bits) ["));
+ len +=
+ strlen((priv->reg.wep_key[i].size <
+ 6 ? "(40bits) [" : "(104bits) ["));
+ for (j = 0; j < priv->reg.wep_key[i].size; j++, len++)
+ tmp[len] =
+ (isprint(priv->reg.wep_key[i].val[j]) ?
+ priv->reg.wep_key[i].val[j] : ' ');
+
+ strcpy(tmp + len, "]\n");
+ len += strlen("]\n");
+ } else {
+ strcpy(tmp + len, "off\n");
+ len += strlen("off\n");
+ }
+ }
+
+ memcpy(extra, tmp, len);
+ dwrq->length = len + 1;
+ return 0;
+}
+#endif
+
+/*------------------------------------------------------------------*/
+/* Private handler : set beacon lost count */
+static int ks_wlan_set_beacon_lost(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (*uwrq >= BEACON_LOST_COUNT_MIN && *uwrq <= BEACON_LOST_COUNT_MAX) {
+ priv->reg.beacon_lost_count = *uwrq;
+ } else
+ return -EINVAL;
+
+ if (priv->reg.operation_mode == MODE_INFRASTRUCTURE) {
+ priv->need_commit |= SME_MODE_SET;
+ return -EINPROGRESS; /* Call commit handler */
+ } else
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get beacon lost count */
+static int ks_wlan_get_beacon_lost(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ *uwrq = priv->reg.beacon_lost_count;
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : set phy type */
+static int ks_wlan_set_phy_type(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (*uwrq == D_11B_ONLY_MODE) { /* 0 */
+ priv->reg.phy_type = D_11B_ONLY_MODE;
+ } else if (*uwrq == D_11G_ONLY_MODE) { /* 1 */
+ priv->reg.phy_type = D_11G_ONLY_MODE;
+ } else if (*uwrq == D_11BG_COMPATIBLE_MODE) { /* 2 */
+ priv->reg.phy_type = D_11BG_COMPATIBLE_MODE;
+ } else
+ return -EINVAL;
+
+ priv->need_commit |= SME_MODE_SET;
+ return -EINPROGRESS; /* Call commit handler */
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get phy type */
+static int ks_wlan_get_phy_type(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ *uwrq = priv->reg.phy_type;
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : set cts mode */
+static int ks_wlan_set_cts_mode(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (*uwrq == CTS_MODE_FALSE) { /* 0 */
+ priv->reg.cts_mode = CTS_MODE_FALSE;
+ } else if (*uwrq == CTS_MODE_TRUE) { /* 1 */
+ if (priv->reg.phy_type == D_11G_ONLY_MODE ||
+ priv->reg.phy_type == D_11BG_COMPATIBLE_MODE)
+ priv->reg.cts_mode = CTS_MODE_TRUE;
+ else
+ priv->reg.cts_mode = CTS_MODE_FALSE;
+ } else
+ return -EINVAL;
+
+ priv->need_commit |= SME_MODE_SET;
+ return -EINPROGRESS; /* Call commit handler */
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get cts mode */
+static int ks_wlan_get_cts_mode(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ *uwrq = priv->reg.cts_mode;
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : set sleep mode */
+static int ks_wlan_set_sleep_mode(struct net_device *dev,
+ struct iw_request_info *info,
+ __u32 * uwrq, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ DPRINTK(2, "\n");
+
+ if (*uwrq == SLP_SLEEP) {
+ priv->sleep_mode = *uwrq;
+ printk("SET_SLEEP_MODE %d\n", priv->sleep_mode);
+
+ hostif_sme_enqueue(priv, SME_STOP_REQUEST);
+ hostif_sme_enqueue(priv, SME_SLEEP_REQUEST);
+
+ } else if (*uwrq == SLP_ACTIVE) {
+ priv->sleep_mode = *uwrq;
+ printk("SET_SLEEP_MODE %d\n", priv->sleep_mode);
+ hostif_sme_enqueue(priv, SME_SLEEP_REQUEST);
+ } else {
+ printk("SET_SLEEP_MODE %d errror\n", *uwrq);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get sleep mode */
+static int ks_wlan_get_sleep_mode(struct net_device *dev,
+ struct iw_request_info *info,
+ __u32 * uwrq, char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ DPRINTK(2, "GET_SLEEP_MODE %d\n", priv->sleep_mode);
+ *uwrq = priv->sleep_mode;
+
+ return 0;
+}
+
+#if 0
+/*------------------------------------------------------------------*/
+/* Private handler : set phy information timer */
+static int ks_wlan_set_phy_information_timer(struct net_device *dev,
+ struct iw_request_info *info,
+ __u32 * uwrq, char *extra)
+{
+ struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (*uwrq >= 0 && *uwrq <= 0xFFFF) /* 0-65535 */
+ priv->reg.phy_info_timer = (uint16_t) * uwrq;
+ else
+ return -EINVAL;
+
+ hostif_sme_enqueue(priv, SME_PHY_INFO_REQUEST);
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get phy information timer */
+static int ks_wlan_get_phy_information_timer(struct net_device *dev,
+ struct iw_request_info *info,
+ __u32 * uwrq, char *extra)
+{
+ struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ *uwrq = priv->reg.phy_info_timer;
+ return 0;
+}
+#endif
+
+#ifdef WPS
+/*------------------------------------------------------------------*/
+/* Private handler : set WPS enable */
+static int ks_wlan_set_wps_enable(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ DPRINTK(2, "\n");
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (*uwrq == 0 || *uwrq == 1)
+ priv->wps.wps_enabled = *uwrq;
+ else
+ return -EINVAL;
+
+ hostif_sme_enqueue(priv, SME_WPS_ENABLE_REQUEST);
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get WPS enable */
+static int ks_wlan_get_wps_enable(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+ DPRINTK(2, "\n");
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ *uwrq = priv->wps.wps_enabled;
+ printk("return=%d\n", *uwrq);
+
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : set WPS probe req */
+static int ks_wlan_set_wps_probe_req(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq, char *extra)
+{
+ uint8_t *p = extra;
+ unsigned char len;
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ DPRINTK(2, "\n");
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ DPRINTK(2, "dwrq->length=%d\n", dwrq->length);
+
+ /* length check */
+ if (p[1] + 2 != dwrq->length || dwrq->length > 256) {
+ return -EINVAL;
+ }
+
+ priv->wps.ielen = p[1] + 2 + 1; /* IE header + IE + sizeof(len) */
+ len = p[1] + 2; /* IE header + IE */
+
+ memcpy(priv->wps.ie, &len, sizeof(len));
+ p = memcpy(priv->wps.ie + 1, p, len);
+
+ DPRINTK(2, "%d(%#x): %02X %02X %02X %02X ... %02X %02X %02X\n",
+ priv->wps.ielen, priv->wps.ielen, p[0], p[1], p[2], p[3],
+ p[priv->wps.ielen - 3], p[priv->wps.ielen - 2],
+ p[priv->wps.ielen - 1]);
+
+ hostif_sme_enqueue(priv, SME_WPS_PROBE_REQUEST);
+
+ return 0;
+}
+
+#if 0
+/*------------------------------------------------------------------*/
+/* Private handler : get WPS probe req */
+static int ks_wlan_get_wps_probe_req(struct net_device *dev,
+ struct iw_request_info *info,
+ __u32 * uwrq, char *extra)
+{
+ struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
+ DPRINTK(2, "\n");
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ return 0;
+}
+#endif
+#endif /* WPS */
+
+/*------------------------------------------------------------------*/
+/* Private handler : set tx gain control value */
+static int ks_wlan_set_tx_gain(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (*uwrq >= 0 && *uwrq <= 0xFF) /* 0-255 */
+ priv->gain.TxGain = (uint8_t) * uwrq;
+ else
+ return -EINVAL;
+
+ if (priv->gain.TxGain < 0xFF)
+ priv->gain.TxMode = 1;
+ else
+ priv->gain.TxMode = 0;
+
+ hostif_sme_enqueue(priv, SME_SET_GAIN);
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get tx gain control value */
+static int ks_wlan_get_tx_gain(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ *uwrq = priv->gain.TxGain;
+ hostif_sme_enqueue(priv, SME_GET_GAIN);
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : set rx gain control value */
+static int ks_wlan_set_rx_gain(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (*uwrq >= 0 && *uwrq <= 0xFF) /* 0-255 */
+ priv->gain.RxGain = (uint8_t) * uwrq;
+ else
+ return -EINVAL;
+
+ if (priv->gain.RxGain < 0xFF)
+ priv->gain.RxMode = 1;
+ else
+ priv->gain.RxMode = 0;
+
+ hostif_sme_enqueue(priv, SME_SET_GAIN);
+ return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get rx gain control value */
+static int ks_wlan_get_rx_gain(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ *uwrq = priv->gain.RxGain;
+ hostif_sme_enqueue(priv, SME_GET_GAIN);
+ return 0;
+}
+
+#if 0
+/*------------------------------------------------------------------*/
+/* Private handler : set region value */
+static int ks_wlan_set_region(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
+
+ if (priv->sleep_mode == SLP_SLEEP) {
+ return -EPERM;
+ }
+ /* for SLEEP MODE */
+ if (*uwrq >= 0x9 && *uwrq <= 0xF) /* 0x9-0xf */
+ priv->region = (uint8_t) * uwrq;
+ else
+ return -EINVAL;
+
+ hostif_sme_enqueue(priv, SME_SET_REGION);
+ return 0;
+}
+#endif
+
+/*------------------------------------------------------------------*/
+/* Private handler : get eeprom checksum result */
+static int ks_wlan_get_eeprom_cksum(struct net_device *dev,
+ struct iw_request_info *info, __u32 * uwrq,
+ char *extra)
+{
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ *uwrq = priv->eeprom_checksum;
+ return 0;
+}
+
+static void print_hif_event(int event)
+{
+
+ switch (event) {
+ case HIF_DATA_REQ:
+ printk("HIF_DATA_REQ\n");
+ break;
+ case HIF_DATA_IND:
+ printk("HIF_DATA_IND\n");
+ break;
+ case HIF_MIB_GET_REQ:
+ printk("HIF_MIB_GET_REQ\n");
+ break;
+ case HIF_MIB_GET_CONF:
+ printk("HIF_MIB_GET_CONF\n");
+ break;
+ case HIF_MIB_SET_REQ:
+ printk("HIF_MIB_SET_REQ\n");
+ break;
+ case HIF_MIB_SET_CONF:
+ printk("HIF_MIB_SET_CONF\n");
+ break;
+ case HIF_POWERMGT_REQ:
+ printk("HIF_POWERMGT_REQ\n");
+ break;
+ case HIF_POWERMGT_CONF:
+ printk("HIF_POWERMGT_CONF\n");
+ break;
+ case HIF_START_REQ:
+ printk("HIF_START_REQ\n");
+ break;
+ case HIF_START_CONF:
+ printk("HIF_START_CONF\n");
+ break;
+ case HIF_CONNECT_IND:
+ printk("HIF_CONNECT_IND\n");
+ break;
+ case HIF_STOP_REQ:
+ printk("HIF_STOP_REQ\n");
+ break;
+ case HIF_STOP_CONF:
+ printk("HIF_STOP_CONF\n");
+ break;
+ case HIF_PS_ADH_SET_REQ:
+ printk("HIF_PS_ADH_SET_REQ\n");
+ break;
+ case HIF_PS_ADH_SET_CONF:
+ printk("HIF_PS_ADH_SET_CONF\n");
+ break;
+ case HIF_INFRA_SET_REQ:
+ printk("HIF_INFRA_SET_REQ\n");
+ break;
+ case HIF_INFRA_SET_CONF:
+ printk("HIF_INFRA_SET_CONF\n");
+ break;
+ case HIF_ADH_SET_REQ:
+ printk("HIF_ADH_SET_REQ\n");
+ break;
+ case HIF_ADH_SET_CONF:
+ printk("HIF_ADH_SET_CONF\n");
+ break;
+ case HIF_AP_SET_REQ:
+ printk("HIF_AP_SET_REQ\n");
+ break;
+ case HIF_AP_SET_CONF:
+ printk("HIF_AP_SET_CONF\n");
+ break;
+ case HIF_ASSOC_INFO_IND:
+ printk("HIF_ASSOC_INFO_IND\n");
+ break;
+ case HIF_MIC_FAILURE_REQ:
+ printk("HIF_MIC_FAILURE_REQ\n");
+ break;
+ case HIF_MIC_FAILURE_CONF:
+ printk("HIF_MIC_FAILURE_CONF\n");
+ break;
+ case HIF_SCAN_REQ:
+ printk("HIF_SCAN_REQ\n");
+ break;
+ case HIF_SCAN_CONF:
+ printk("HIF_SCAN_CONF\n");
+ break;
+ case HIF_PHY_INFO_REQ:
+ printk("HIF_PHY_INFO_REQ\n");
+ break;
+ case HIF_PHY_INFO_CONF:
+ printk("HIF_PHY_INFO_CONF\n");
+ break;
+ case HIF_SLEEP_REQ:
+ printk("HIF_SLEEP_REQ\n");
+ break;
+ case HIF_SLEEP_CONF:
+ printk("HIF_SLEEP_CONF\n");
+ break;
+ case HIF_PHY_INFO_IND:
+ printk("HIF_PHY_INFO_IND\n");
+ break;
+ case HIF_SCAN_IND:
+ printk("HIF_SCAN_IND\n");
+ break;
+ case HIF_INFRA_SET2_REQ:
+ printk("HIF_INFRA_SET2_REQ\n");
+ break;
+ case HIF_INFRA_SET2_CONF:
+ printk("HIF_INFRA_SET2_CONF\n");
+ break;
+ case HIF_ADH_SET2_REQ:
+ printk("HIF_ADH_SET2_REQ\n");
+ break;
+ case HIF_ADH_SET2_CONF:
+ printk("HIF_ADH_SET2_CONF\n");
+ }
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get host command history */
+static int ks_wlan_hostt(struct net_device *dev, struct iw_request_info *info,
+ __u32 * uwrq, char *extra)
+{
+ int i, event;
+ struct ks_wlan_private *priv =
+ (struct ks_wlan_private *)netdev_priv(dev);
+
+ for (i = 63; i >= 0; i--) {
+ event =
+ priv->hostt.buff[(priv->hostt.qtail - 1 - i) %
+ SME_EVENT_BUFF_SIZE];
+ print_hif_event(event);
+ }
+ return 0;
+}
+
+/* Structures to export the Wireless Handlers */
+
+static const struct iw_priv_args ks_wlan_private_args[] = {
+/*{ cmd, set_args, get_args, name[16] } */
+ {KS_WLAN_GET_FIRM_VERSION, IW_PRIV_TYPE_NONE,
+ IW_PRIV_TYPE_CHAR | (128 + 1), "GetFirmwareVer"},
+#ifdef WPS
+ {KS_WLAN_SET_WPS_ENABLE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ IW_PRIV_TYPE_NONE, "SetWPSEnable"},
+ {KS_WLAN_GET_WPS_ENABLE, IW_PRIV_TYPE_NONE,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetW"},
+ {KS_WLAN_SET_WPS_PROBE_REQ, IW_PRIV_TYPE_BYTE | 2047, IW_PRIV_TYPE_NONE,
+ "SetWPSProbeReq"},
+#endif /* WPS */
+ {KS_WLAN_SET_PREAMBLE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ IW_PRIV_TYPE_NONE, "SetPreamble"},
+ {KS_WLAN_GET_PREAMBLE, IW_PRIV_TYPE_NONE,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetPreamble"},
+ {KS_WLAN_SET_POWER_SAVE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ IW_PRIV_TYPE_NONE, "SetPowerSave"},
+ {KS_WLAN_GET_POWER_SAVE, IW_PRIV_TYPE_NONE,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetPowerSave"},
+ {KS_WLAN_SET_SCAN_TYPE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ IW_PRIV_TYPE_NONE, "SetScanType"},
+ {KS_WLAN_GET_SCAN_TYPE, IW_PRIV_TYPE_NONE,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetScanType"},
+ {KS_WLAN_SET_RX_GAIN, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ IW_PRIV_TYPE_NONE, "SetRxGain"},
+ {KS_WLAN_GET_RX_GAIN, IW_PRIV_TYPE_NONE,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetRxGain"},
+ {KS_WLAN_HOSTT, IW_PRIV_TYPE_NONE, IW_PRIV_TYPE_CHAR | (128 + 1),
+ "hostt"},
+ {KS_WLAN_SET_BEACON_LOST, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ IW_PRIV_TYPE_NONE, "SetBeaconLost"},
+ {KS_WLAN_GET_BEACON_LOST, IW_PRIV_TYPE_NONE,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetBeaconLost"},
+ {KS_WLAN_SET_SLEEP_MODE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ IW_PRIV_TYPE_NONE, "SetSleepMode"},
+ {KS_WLAN_GET_SLEEP_MODE, IW_PRIV_TYPE_NONE,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetSleepMode"},
+ {KS_WLAN_SET_TX_GAIN, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ IW_PRIV_TYPE_NONE, "SetTxGain"},
+ {KS_WLAN_GET_TX_GAIN, IW_PRIV_TYPE_NONE,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetTxGain"},
+ {KS_WLAN_SET_PHY_TYPE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ IW_PRIV_TYPE_NONE, "SetPhyType"},
+ {KS_WLAN_GET_PHY_TYPE, IW_PRIV_TYPE_NONE,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetPhyType"},
+ {KS_WLAN_SET_CTS_MODE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ IW_PRIV_TYPE_NONE, "SetCtsMode"},
+ {KS_WLAN_GET_CTS_MODE, IW_PRIV_TYPE_NONE,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetCtsMode"},
+ {KS_WLAN_GET_EEPROM_CKSUM, IW_PRIV_TYPE_NONE,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetChecksum"},
+};
+
+static const iw_handler ks_wlan_handler[] = {
+ (iw_handler) ks_wlan_config_commit, /* SIOCSIWCOMMIT */
+ (iw_handler) ks_wlan_get_name, /* SIOCGIWNAME */
+ (iw_handler) NULL, /* SIOCSIWNWID */
+ (iw_handler) NULL, /* SIOCGIWNWID */
+ (iw_handler) ks_wlan_set_freq, /* SIOCSIWFREQ */
+ (iw_handler) ks_wlan_get_freq, /* SIOCGIWFREQ */
+ (iw_handler) ks_wlan_set_mode, /* SIOCSIWMODE */
+ (iw_handler) ks_wlan_get_mode, /* SIOCGIWMODE */
+#ifndef KSC_OPNOTSUPP
+ (iw_handler) ks_wlan_set_sens, /* SIOCSIWSENS */
+ (iw_handler) ks_wlan_get_sens, /* SIOCGIWSENS */
+#else /* KSC_OPNOTSUPP */
+ (iw_handler) NULL, /* SIOCSIWSENS */
+ (iw_handler) NULL, /* SIOCGIWSENS */
+#endif /* KSC_OPNOTSUPP */
+ (iw_handler) NULL, /* SIOCSIWRANGE */
+ (iw_handler) ks_wlan_get_range, /* SIOCGIWRANGE */
+ (iw_handler) NULL, /* SIOCSIWPRIV */
+ (iw_handler) NULL, /* SIOCGIWPRIV */
+ (iw_handler) NULL, /* SIOCSIWSTATS */
+ (iw_handler) ks_wlan_get_iwstats, /* SIOCGIWSTATS */
+ (iw_handler) NULL, /* SIOCSIWSPY */
+ (iw_handler) NULL, /* SIOCGIWSPY */
+ (iw_handler) NULL, /* SIOCSIWTHRSPY */
+ (iw_handler) NULL, /* SIOCGIWTHRSPY */
+ (iw_handler) ks_wlan_set_wap, /* SIOCSIWAP */
+ (iw_handler) ks_wlan_get_wap, /* SIOCGIWAP */
+// (iw_handler) NULL, /* SIOCSIWMLME */
+ (iw_handler) ks_wlan_set_mlme, /* SIOCSIWMLME */
+ (iw_handler) ks_wlan_get_aplist, /* SIOCGIWAPLIST */
+ (iw_handler) ks_wlan_set_scan, /* SIOCSIWSCAN */
+ (iw_handler) ks_wlan_get_scan, /* SIOCGIWSCAN */
+ (iw_handler) ks_wlan_set_essid, /* SIOCSIWESSID */
+ (iw_handler) ks_wlan_get_essid, /* SIOCGIWESSID */
+ (iw_handler) ks_wlan_set_nick, /* SIOCSIWNICKN */
+ (iw_handler) ks_wlan_get_nick, /* SIOCGIWNICKN */
+ (iw_handler) NULL, /* -- hole -- */
+ (iw_handler) NULL, /* -- hole -- */
+ (iw_handler) ks_wlan_set_rate, /* SIOCSIWRATE */
+ (iw_handler) ks_wlan_get_rate, /* SIOCGIWRATE */
+ (iw_handler) ks_wlan_set_rts, /* SIOCSIWRTS */
+ (iw_handler) ks_wlan_get_rts, /* SIOCGIWRTS */
+ (iw_handler) ks_wlan_set_frag, /* SIOCSIWFRAG */
+ (iw_handler) ks_wlan_get_frag, /* SIOCGIWFRAG */
+#ifndef KSC_OPNOTSUPP
+ (iw_handler) ks_wlan_set_txpow, /* SIOCSIWTXPOW */
+ (iw_handler) ks_wlan_get_txpow, /* SIOCGIWTXPOW */
+ (iw_handler) ks_wlan_set_retry, /* SIOCSIWRETRY */
+ (iw_handler) ks_wlan_get_retry, /* SIOCGIWRETRY */
+#else /* KSC_OPNOTSUPP */
+ (iw_handler) NULL, /* SIOCSIWTXPOW */
+ (iw_handler) NULL, /* SIOCGIWTXPOW */
+ (iw_handler) NULL, /* SIOCSIWRETRY */
+ (iw_handler) NULL, /* SIOCGIWRETRY */
+#endif /* KSC_OPNOTSUPP */
+ (iw_handler) ks_wlan_set_encode, /* SIOCSIWENCODE */
+ (iw_handler) ks_wlan_get_encode, /* SIOCGIWENCODE */
+ (iw_handler) ks_wlan_set_power, /* SIOCSIWPOWER */
+ (iw_handler) ks_wlan_get_power, /* SIOCGIWPOWER */
+ (iw_handler) NULL, /* -- hole -- */
+ (iw_handler) NULL, /* -- hole -- */
+// (iw_handler) NULL, /* SIOCSIWGENIE */
+ (iw_handler) ks_wlan_set_genie, /* SIOCSIWGENIE */
+ (iw_handler) NULL, /* SIOCGIWGENIE */
+ (iw_handler) ks_wlan_set_auth_mode, /* SIOCSIWAUTH */
+ (iw_handler) ks_wlan_get_auth_mode, /* SIOCGIWAUTH */
+ (iw_handler) ks_wlan_set_encode_ext, /* SIOCSIWENCODEEXT */
+ (iw_handler) ks_wlan_get_encode_ext, /* SIOCGIWENCODEEXT */
+ (iw_handler) ks_wlan_set_pmksa, /* SIOCSIWPMKSA */
+ (iw_handler) NULL, /* -- hole -- */
+};
+
+/* private_handler */
+static const iw_handler ks_wlan_private_handler[] = {
+ (iw_handler) NULL, /* 0 */
+ (iw_handler) NULL, /* 1, used to be: KS_WLAN_GET_DRIVER_VERSION */
+ (iw_handler) NULL, /* 2 */
+ (iw_handler) ks_wlan_get_firmware_version, /* 3 KS_WLAN_GET_FIRM_VERSION */
+#ifdef WPS
+ (iw_handler) ks_wlan_set_wps_enable, /* 4 KS_WLAN_SET_WPS_ENABLE */
+ (iw_handler) ks_wlan_get_wps_enable, /* 5 KS_WLAN_GET_WPS_ENABLE */
+ (iw_handler) ks_wlan_set_wps_probe_req, /* 6 KS_WLAN_SET_WPS_PROBE_REQ */
+#else
+ (iw_handler) NULL, /* 4 */
+ (iw_handler) NULL, /* 5 */
+ (iw_handler) NULL, /* 6 */
+#endif /* WPS */
+
+ (iw_handler) ks_wlan_get_eeprom_cksum, /* 7 KS_WLAN_GET_CONNECT */
+ (iw_handler) ks_wlan_set_preamble, /* 8 KS_WLAN_SET_PREAMBLE */
+ (iw_handler) ks_wlan_get_preamble, /* 9 KS_WLAN_GET_PREAMBLE */
+ (iw_handler) ks_wlan_set_powermgt, /* 10 KS_WLAN_SET_POWER_SAVE */
+ (iw_handler) ks_wlan_get_powermgt, /* 11 KS_WLAN_GET_POWER_SAVE */
+ (iw_handler) ks_wlan_set_scan_type, /* 12 KS_WLAN_SET_SCAN_TYPE */
+ (iw_handler) ks_wlan_get_scan_type, /* 13 KS_WLAN_GET_SCAN_TYPE */
+ (iw_handler) ks_wlan_set_rx_gain, /* 14 KS_WLAN_SET_RX_GAIN */
+ (iw_handler) ks_wlan_get_rx_gain, /* 15 KS_WLAN_GET_RX_GAIN */
+ (iw_handler) ks_wlan_hostt, /* 16 KS_WLAN_HOSTT */
+ (iw_handler) NULL, /* 17 */
+ (iw_handler) ks_wlan_set_beacon_lost, /* 18 KS_WLAN_SET_BECAN_LOST */
+ (iw_handler) ks_wlan_get_beacon_lost, /* 19 KS_WLAN_GET_BECAN_LOST */
+ (iw_handler) ks_wlan_set_tx_gain, /* 20 KS_WLAN_SET_TX_GAIN */
+ (iw_handler) ks_wlan_get_tx_gain, /* 21 KS_WLAN_GET_TX_GAIN */
+ (iw_handler) ks_wlan_set_phy_type, /* 22 KS_WLAN_SET_PHY_TYPE */
+ (iw_handler) ks_wlan_get_phy_type, /* 23 KS_WLAN_GET_PHY_TYPE */
+ (iw_handler) ks_wlan_set_cts_mode, /* 24 KS_WLAN_SET_CTS_MODE */
+ (iw_handler) ks_wlan_get_cts_mode, /* 25 KS_WLAN_GET_CTS_MODE */
+ (iw_handler) NULL, /* 26 */
+ (iw_handler) NULL, /* 27 */
+ (iw_handler) ks_wlan_set_sleep_mode, /* 28 KS_WLAN_SET_SLEEP_MODE */
+ (iw_handler) ks_wlan_get_sleep_mode, /* 29 KS_WLAN_GET_SLEEP_MODE */
+ (iw_handler) NULL, /* 30 */
+ (iw_handler) NULL, /* 31 */
+};
+
+static const struct iw_handler_def ks_wlan_handler_def = {
+ .num_standard = sizeof(ks_wlan_handler) / sizeof(iw_handler),
+ .num_private = sizeof(ks_wlan_private_handler) / sizeof(iw_handler),
+ .num_private_args =
+ sizeof(ks_wlan_private_args) / sizeof(struct iw_priv_args),
+ .standard = (iw_handler *) ks_wlan_handler,
+ .private = (iw_handler *) ks_wlan_private_handler,
+ .private_args = (struct iw_priv_args *)ks_wlan_private_args,
+ .get_wireless_stats = ks_get_wireless_stats,
+};
+
+static int ks_wlan_netdev_ioctl(struct net_device *dev, struct ifreq *rq,
+ int cmd)
+{
+ int rc = 0;
+ struct iwreq *wrq = (struct iwreq *)rq;
+ switch (cmd) {
+ case SIOCIWFIRSTPRIV + 20: /* KS_WLAN_SET_STOP_REQ */
+ rc = ks_wlan_set_stop_request(dev, NULL, &(wrq->u.mode), NULL);
+ break;
+ // All other calls are currently unsupported
+ default:
+ rc = -EOPNOTSUPP;
+ }
+
+ DPRINTK(5, "return=%d\n", rc);
+ return rc;
+}
+
+static
+struct net_device_stats *ks_wlan_get_stats(struct net_device *dev)
+{
+ struct ks_wlan_private *priv = netdev_priv(dev);
+
+ if (priv->dev_state < DEVICE_STATE_READY) {
+ return NULL; /* not finished initialize */
+ }
+
+ return &priv->nstats;
+}
+
+static
+int ks_wlan_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct ks_wlan_private *priv = netdev_priv(dev);
+ struct sockaddr *mac_addr = (struct sockaddr *)addr;
+ if (netif_running(dev))
+ return -EBUSY;
+ memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
+ memcpy(priv->eth_addr, mac_addr->sa_data, ETH_ALEN);
+
+ priv->mac_address_valid = 0;
+ hostif_sme_enqueue(priv, SME_MACADDRESS_SET_REQUEST);
+ printk(KERN_INFO
+ "ks_wlan: MAC ADDRESS = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ priv->eth_addr[0], priv->eth_addr[1], priv->eth_addr[2],
+ priv->eth_addr[3], priv->eth_addr[4], priv->eth_addr[5]);
+ return 0;
+}
+
+static
+void ks_wlan_tx_timeout(struct net_device *dev)
+{
+ struct ks_wlan_private *priv = netdev_priv(dev);
+
+ DPRINTK(1, "head(%d) tail(%d)!!\n", priv->tx_dev.qhead,
+ priv->tx_dev.qtail);
+ if (!netif_queue_stopped(dev)) {
+ netif_stop_queue(dev);
+ }
+ priv->nstats.tx_errors++;
+ netif_wake_queue(dev);
+
+ return;
+}
+
+static
+int ks_wlan_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ks_wlan_private *priv = netdev_priv(dev);
+ int rc = 0;
+
+ DPRINTK(3, "in_interrupt()=%ld\n", in_interrupt());
+
+ if (skb == NULL) {
+ printk(KERN_ERR "ks_wlan: skb == NULL!!!\n");
+ return 0;
+ }
+ if (priv->dev_state < DEVICE_STATE_READY) {
+ dev_kfree_skb(skb);
+ return 0; /* not finished initialize */
+ }
+
+ if (netif_running(dev))
+ netif_stop_queue(dev);
+
+ rc = hostif_data_request(priv, skb);
+ netif_trans_update(dev);
+
+ DPRINTK(4, "rc=%d\n", rc);
+ if (rc) {
+ rc = 0;
+ }
+
+ return rc;
+}
+
+void send_packet_complete(void *arg1, void *arg2)
+{
+ struct ks_wlan_private *priv = (struct ks_wlan_private *)arg1;
+ struct sk_buff *packet = (struct sk_buff *)arg2;
+
+ DPRINTK(3, "\n");
+
+ priv->nstats.tx_bytes += packet->len;
+ priv->nstats.tx_packets++;
+
+ if (netif_queue_stopped(priv->net_dev))
+ netif_wake_queue(priv->net_dev);
+
+ if (packet) {
+ dev_kfree_skb(packet);
+ packet = NULL;
+ }
+
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ This routine is not state sensitive and need not be SMP locked. */
+static
+void ks_wlan_set_multicast_list(struct net_device *dev)
+{
+ struct ks_wlan_private *priv = netdev_priv(dev);
+
+ DPRINTK(4, "\n");
+ if (priv->dev_state < DEVICE_STATE_READY) {
+ return; /* not finished initialize */
+ }
+ hostif_sme_enqueue(priv, SME_MULTICAST_REQUEST);
+
+ return;
+}
+
+static
+int ks_wlan_open(struct net_device *dev)
+{
+ struct ks_wlan_private *priv = netdev_priv(dev);
+
+ priv->cur_rx = 0;
+
+ if (!priv->mac_address_valid) {
+ printk(KERN_ERR "ks_wlan : %s Not READY !!\n", dev->name);
+ return -EBUSY;
+ } else
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static
+int ks_wlan_close(struct net_device *dev)
+{
+
+ netif_stop_queue(dev);
+
+ DPRINTK(4, "%s: Shutting down ethercard, status was 0x%4.4x.\n",
+ dev->name, 0x00);
+
+ return 0;
+}
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (3*HZ)
+static const unsigned char dummy_addr[] =
+ { 0x00, 0x0b, 0xe3, 0x00, 0x00, 0x00 };
+
+static const struct net_device_ops ks_wlan_netdev_ops = {
+ .ndo_start_xmit = ks_wlan_start_xmit,
+ .ndo_open = ks_wlan_open,
+ .ndo_stop = ks_wlan_close,
+ .ndo_do_ioctl = ks_wlan_netdev_ioctl,
+ .ndo_set_mac_address = ks_wlan_set_mac_address,
+ .ndo_get_stats = ks_wlan_get_stats,
+ .ndo_tx_timeout = ks_wlan_tx_timeout,
+ .ndo_set_rx_mode = ks_wlan_set_multicast_list,
+};
+
+int ks_wlan_net_start(struct net_device *dev)
+{
+ struct ks_wlan_private *priv;
+ /* int rc; */
+
+ priv = netdev_priv(dev);
+ priv->mac_address_valid = 0;
+ priv->need_commit = 0;
+
+ priv->device_open_status = 1;
+
+ /* phy information update timer */
+ atomic_set(&update_phyinfo, 0);
+ init_timer(&update_phyinfo_timer);
+ update_phyinfo_timer.function = ks_wlan_update_phyinfo_timeout;
+ update_phyinfo_timer.data = (unsigned long)priv;
+
+ /* dummy address set */
+ memcpy(priv->eth_addr, dummy_addr, ETH_ALEN);
+ dev->dev_addr[0] = priv->eth_addr[0];
+ dev->dev_addr[1] = priv->eth_addr[1];
+ dev->dev_addr[2] = priv->eth_addr[2];
+ dev->dev_addr[3] = priv->eth_addr[3];
+ dev->dev_addr[4] = priv->eth_addr[4];
+ dev->dev_addr[5] = priv->eth_addr[5];
+ dev->dev_addr[6] = 0x00;
+ dev->dev_addr[7] = 0x00;
+
+ /* The ks_wlan-specific entries in the device structure. */
+ dev->netdev_ops = &ks_wlan_netdev_ops;
+ dev->wireless_handlers = (struct iw_handler_def *)&ks_wlan_handler_def;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ netif_carrier_off(dev);
+
+ return 0;
+}
+
+int ks_wlan_net_stop(struct net_device *dev)
+{
+ struct ks_wlan_private *priv = netdev_priv(dev);
+
+ int ret = 0;
+ priv->device_open_status = 0;
+ del_timer_sync(&update_phyinfo_timer);
+
+ if (netif_running(dev))
+ netif_stop_queue(dev);
+
+ return ret;
+}
+
+int ks_wlan_reset(struct net_device *dev)
+{
+ return 0;
+}
diff --git a/drivers/staging/ks7010/michael_mic.c b/drivers/staging/ks7010/michael_mic.c
new file mode 100644
index 000000000..e14c109b3
--- /dev/null
+++ b/drivers/staging/ks7010/michael_mic.c
@@ -0,0 +1,139 @@
+/*
+ * Driver for KeyStream wireless LAN
+ *
+ * Copyright (C) 2005-2008 KeyStream Corp.
+ * Copyright (C) 2009 Renesas Technology Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include "michael_mic.h"
+
+// Rotation functions on 32 bit values
+#define ROL32( A, n ) ( ((A) << (n)) | ( ((A)>>(32-(n))) & ( (1UL << (n)) - 1 ) ) )
+#define ROR32( A, n ) ROL32( (A), 32-(n) )
+// Convert from Byte[] to UInt32 in a portable way
+#define getUInt32( A, B ) (uint32_t)(A[B+0] << 0) + (A[B+1] << 8) + (A[B+2] << 16) + (A[B+3] << 24)
+
+// Convert from UInt32 to Byte[] in a portable way
+#define putUInt32( A, B, C ) A[B+0] = (uint8_t) (C & 0xff); \
+ A[B+1] = (uint8_t) ((C>>8) & 0xff); \
+ A[B+2] = (uint8_t) ((C>>16) & 0xff); \
+ A[B+3] = (uint8_t) ((C>>24) & 0xff)
+
+// Reset the state to the empty message.
+#define MichaelClear( A ) A->L = A->K0; \
+ A->R = A->K1; \
+ A->nBytesInM = 0;
+
+static
+void MichaelInitializeFunction(struct michel_mic_t *Mic, uint8_t * key)
+{
+ // Set the key
+ Mic->K0 = getUInt32(key, 0);
+ Mic->K1 = getUInt32(key, 4);
+
+ //clear();
+ MichaelClear(Mic);
+}
+
+#define MichaelBlockFunction(L, R) \
+do{ \
+ R ^= ROL32( L, 17 ); \
+ L += R; \
+ R ^= ((L & 0xff00ff00) >> 8) | ((L & 0x00ff00ff) << 8); \
+ L += R; \
+ R ^= ROL32( L, 3 ); \
+ L += R; \
+ R ^= ROR32( L, 2 ); \
+ L += R; \
+}while(0)
+
+static
+void MichaelAppend(struct michel_mic_t *Mic, uint8_t * src, int nBytes)
+{
+ int addlen;
+ if (Mic->nBytesInM) {
+ addlen = 4 - Mic->nBytesInM;
+ if (addlen > nBytes)
+ addlen = nBytes;
+ memcpy(&Mic->M[Mic->nBytesInM], src, addlen);
+ Mic->nBytesInM += addlen;
+ src += addlen;
+ nBytes -= addlen;
+
+ if (Mic->nBytesInM < 4)
+ return;
+
+ Mic->L ^= getUInt32(Mic->M, 0);
+ MichaelBlockFunction(Mic->L, Mic->R);
+ Mic->nBytesInM = 0;
+ }
+
+ while (nBytes >= 4) {
+ Mic->L ^= getUInt32(src, 0);
+ MichaelBlockFunction(Mic->L, Mic->R);
+ src += 4;
+ nBytes -= 4;
+ }
+
+ if (nBytes > 0) {
+ Mic->nBytesInM = nBytes;
+ memcpy(Mic->M, src, nBytes);
+ }
+}
+
+static
+void MichaelGetMIC(struct michel_mic_t *Mic, uint8_t * dst)
+{
+ uint8_t *data = Mic->M;
+ switch (Mic->nBytesInM) {
+ case 0:
+ Mic->L ^= 0x5a;
+ break;
+ case 1:
+ Mic->L ^= data[0] | 0x5a00;
+ break;
+ case 2:
+ Mic->L ^= data[0] | (data[1] << 8) | 0x5a0000;
+ break;
+ case 3:
+ Mic->L ^= data[0] | (data[1] << 8) | (data[2] << 16) |
+ 0x5a000000;
+ break;
+ }
+ MichaelBlockFunction(Mic->L, Mic->R);
+ MichaelBlockFunction(Mic->L, Mic->R);
+ // The appendByte function has already computed the result.
+ putUInt32(dst, 0, Mic->L);
+ putUInt32(dst, 4, Mic->R);
+
+ // Reset to the empty message.
+ MichaelClear(Mic);
+}
+
+void MichaelMICFunction(struct michel_mic_t *Mic, uint8_t * Key,
+ uint8_t * Data, int Len, uint8_t priority,
+ uint8_t * Result)
+{
+ uint8_t pad_data[4] = { priority, 0, 0, 0 };
+ // Compute the MIC value
+ /*
+ * IEEE802.11i page 47
+ * Figure 43g TKIP MIC processing format
+ * +--+--+--------+--+----+--+--+--+--+--+--+--+--+
+ * |6 |6 |1 |3 |M |1 |1 |1 |1 |1 |1 |1 |1 | Octet
+ * +--+--+--------+--+----+--+--+--+--+--+--+--+--+
+ * |DA|SA|Priority|0 |Data|M0|M1|M2|M3|M4|M5|M6|M7|
+ * +--+--+--------+--+----+--+--+--+--+--+--+--+--+
+ */
+ MichaelInitializeFunction(Mic, Key);
+ MichaelAppend(Mic, (uint8_t *) Data, 12); /* |DA|SA| */
+ MichaelAppend(Mic, pad_data, 4); /* |Priority|0|0|0| */
+ MichaelAppend(Mic, (uint8_t *) (Data + 12), Len - 12); /* |Data| */
+ MichaelGetMIC(Mic, Result);
+}
diff --git a/drivers/staging/ks7010/michael_mic.h b/drivers/staging/ks7010/michael_mic.h
new file mode 100644
index 000000000..c7e4eb280
--- /dev/null
+++ b/drivers/staging/ks7010/michael_mic.h
@@ -0,0 +1,26 @@
+/*
+ * Driver for KeyStream wireless LAN
+ *
+ * Copyright (C) 2005-2008 KeyStream Corp.
+ * Copyright (C) 2009 Renesas Technology Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* MichelMIC routine define */
+struct michel_mic_t {
+ uint32_t K0; // Key
+ uint32_t K1; // Key
+ uint32_t L; // Current state
+ uint32_t R; // Current state
+ uint8_t M[4]; // Message accumulator (single word)
+ int nBytesInM; // # bytes in M
+ uint8_t Result[8];
+};
+
+extern
+void MichaelMICFunction(struct michel_mic_t *Mic, uint8_t * Key,
+ uint8_t * Data, int Len, uint8_t priority,
+ uint8_t * Result);
diff --git a/drivers/staging/lustre/include/linux/libcfs/curproc.h b/drivers/staging/lustre/include/linux/libcfs/curproc.h
index 1edfca58c..be0675d8f 100644
--- a/drivers/staging/lustre/include/linux/libcfs/curproc.h
+++ b/drivers/staging/lustre/include/linux/libcfs/curproc.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
index 4141afb10..3f6447c65 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
index 455c54d0d..25adab19f 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
index 2e008bffc..d3f9a6020 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
@@ -16,10 +16,6 @@
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see http://www.gnu.org/licenses
*
- * Please contact Oracle Corporation, Inc., 500 Oracle Parkway, Redwood Shores,
- * CA 94065 USA or visit www.oracle.com if you need additional information or
- * have any questions.
- *
* GPL HEADER END
*/
/*
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
index 119986bc7..6949a1846 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
index 4b9102bd9..cce6b58e3 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h
index ac4e8cfe6..8c75d5075 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
index 2fd2a9690..4daa3823f 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
index e02cde5ae..0ee60ff33 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_time.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_time.h
index 2c7ec2d28..008da4497 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_time.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_time.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h
index f9b20c5ac..a7e1340e6 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
index a268ef7aa..e8695e4a3 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
index 7656b09b8..b646acd1f 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-dlc.h b/drivers/staging/lustre/include/linux/lnet/lib-dlc.h
index 6ce9accb9..dfff17088 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-dlc.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-dlc.h
@@ -35,7 +35,7 @@
#define MAX_NUM_SHOW_ENTRIES 32
#define LNET_MAX_STR_LEN 128
#define LNET_MAX_SHOW_NUM_CPT 128
-#define LNET_UNDEFINED_HOPS ((__u32) -1)
+#define LNET_UNDEFINED_HOPS ((__u32)(-1))
struct lnet_ioctl_config_lnd_cmn_tunables {
__u32 lct_version;
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-types.h b/drivers/staging/lustre/include/linux/lnet/lib-types.h
index 24c4a08e6..7967b013c 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-types.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-types.h
@@ -38,6 +38,7 @@
#include <linux/kthread.h>
#include <linux/uio.h>
#include <linux/types.h>
+#include <linux/completion.h>
#include "types.h"
#include "lnetctl.h"
@@ -610,7 +611,7 @@ typedef struct {
/* rcd ready for free */
struct list_head ln_rcd_zombie;
/* serialise startup/shutdown */
- struct semaphore ln_rc_signal;
+ struct completion ln_rc_signal;
struct mutex ln_api_mutex;
struct mutex ln_lnd_mutex;
diff --git a/drivers/staging/lustre/include/linux/lnet/types.h b/drivers/staging/lustre/include/linux/lnet/types.h
index 1c679cb72..e098b6c08 100644
--- a/drivers/staging/lustre/include/linux/lnet/types.h
+++ b/drivers/staging/lustre/include/linux/lnet/types.h
@@ -68,9 +68,9 @@ typedef __u64 lnet_nid_t;
typedef __u32 lnet_pid_t;
/** wildcard NID that matches any end-point address */
-#define LNET_NID_ANY ((lnet_nid_t) -1)
+#define LNET_NID_ANY ((lnet_nid_t)(-1))
/** wildcard PID that matches any lnet_pid_t */
-#define LNET_PID_ANY ((lnet_pid_t) -1)
+#define LNET_PID_ANY ((lnet_pid_t)(-1))
#define LNET_PID_RESERVED 0xf0000000 /* reserved bits in PID */
#define LNET_PID_USERFLAG 0x80000000 /* set in userspace peers */
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
index 6c59f2ff2..4f5978b37 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -44,7 +40,7 @@
static lnd_t the_o2iblnd;
-kib_data_t kiblnd_data;
+struct kib_data kiblnd_data;
static __u32 kiblnd_cksum(void *ptr, int nob)
{
@@ -98,40 +94,40 @@ static char *kiblnd_msgtype2str(int type)
static int kiblnd_msgtype2size(int type)
{
- const int hdr_size = offsetof(kib_msg_t, ibm_u);
+ const int hdr_size = offsetof(struct kib_msg, ibm_u);
switch (type) {
case IBLND_MSG_CONNREQ:
case IBLND_MSG_CONNACK:
- return hdr_size + sizeof(kib_connparams_t);
+ return hdr_size + sizeof(struct kib_connparams);
case IBLND_MSG_NOOP:
return hdr_size;
case IBLND_MSG_IMMEDIATE:
- return offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[0]);
+ return offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[0]);
case IBLND_MSG_PUT_REQ:
- return hdr_size + sizeof(kib_putreq_msg_t);
+ return hdr_size + sizeof(struct kib_putreq_msg);
case IBLND_MSG_PUT_ACK:
- return hdr_size + sizeof(kib_putack_msg_t);
+ return hdr_size + sizeof(struct kib_putack_msg);
case IBLND_MSG_GET_REQ:
- return hdr_size + sizeof(kib_get_msg_t);
+ return hdr_size + sizeof(struct kib_get_msg);
case IBLND_MSG_PUT_NAK:
case IBLND_MSG_PUT_DONE:
case IBLND_MSG_GET_DONE:
- return hdr_size + sizeof(kib_completion_msg_t);
+ return hdr_size + sizeof(struct kib_completion_msg);
default:
return -1;
}
}
-static int kiblnd_unpack_rd(kib_msg_t *msg, int flip)
+static int kiblnd_unpack_rd(struct kib_msg *msg, int flip)
{
- kib_rdma_desc_t *rd;
+ struct kib_rdma_desc *rd;
int nob;
int n;
int i;
@@ -156,7 +152,7 @@ static int kiblnd_unpack_rd(kib_msg_t *msg, int flip)
return 1;
}
- nob = offsetof(kib_msg_t, ibm_u) +
+ nob = offsetof(struct kib_msg, ibm_u) +
kiblnd_rd_msg_size(rd, msg->ibm_type, n);
if (msg->ibm_nob < nob) {
@@ -176,10 +172,10 @@ static int kiblnd_unpack_rd(kib_msg_t *msg, int flip)
return 0;
}
-void kiblnd_pack_msg(lnet_ni_t *ni, kib_msg_t *msg, int version,
+void kiblnd_pack_msg(lnet_ni_t *ni, struct kib_msg *msg, int version,
int credits, lnet_nid_t dstnid, __u64 dststamp)
{
- kib_net_t *net = ni->ni_data;
+ struct kib_net *net = ni->ni_data;
/*
* CAVEAT EMPTOR! all message fields not set here should have been
@@ -202,9 +198,9 @@ void kiblnd_pack_msg(lnet_ni_t *ni, kib_msg_t *msg, int version,
}
}
-int kiblnd_unpack_msg(kib_msg_t *msg, int nob)
+int kiblnd_unpack_msg(struct kib_msg *msg, int nob)
{
- const int hdr_size = offsetof(kib_msg_t, ibm_u);
+ const int hdr_size = offsetof(struct kib_msg, ibm_u);
__u32 msg_cksum;
__u16 version;
int msg_nob;
@@ -315,10 +311,10 @@ int kiblnd_unpack_msg(kib_msg_t *msg, int nob)
return 0;
}
-int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid)
+int kiblnd_create_peer(lnet_ni_t *ni, struct kib_peer **peerp, lnet_nid_t nid)
{
- kib_peer_t *peer;
- kib_net_t *net = ni->ni_data;
+ struct kib_peer *peer;
+ struct kib_net *net = ni->ni_data;
int cpt = lnet_cpt_of_nid(nid);
unsigned long flags;
@@ -357,9 +353,9 @@ int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid)
return 0;
}
-void kiblnd_destroy_peer(kib_peer_t *peer)
+void kiblnd_destroy_peer(struct kib_peer *peer)
{
- kib_net_t *net = peer->ibp_ni->ni_data;
+ struct kib_net *net = peer->ibp_ni->ni_data;
LASSERT(net);
LASSERT(!atomic_read(&peer->ibp_refcount));
@@ -378,7 +374,7 @@ void kiblnd_destroy_peer(kib_peer_t *peer)
atomic_dec(&net->ibn_npeers);
}
-kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid)
+struct kib_peer *kiblnd_find_peer_locked(lnet_nid_t nid)
{
/*
* the caller is responsible for accounting the additional reference
@@ -386,10 +382,10 @@ kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid)
*/
struct list_head *peer_list = kiblnd_nid2peerlist(nid);
struct list_head *tmp;
- kib_peer_t *peer;
+ struct kib_peer *peer;
list_for_each(tmp, peer_list) {
- peer = list_entry(tmp, kib_peer_t, ibp_list);
+ peer = list_entry(tmp, struct kib_peer, ibp_list);
LASSERT(!kiblnd_peer_idle(peer));
if (peer->ibp_nid != nid)
@@ -404,7 +400,7 @@ kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid)
return NULL;
}
-void kiblnd_unlink_peer_locked(kib_peer_t *peer)
+void kiblnd_unlink_peer_locked(struct kib_peer *peer)
{
LASSERT(list_empty(&peer->ibp_conns));
@@ -417,7 +413,7 @@ void kiblnd_unlink_peer_locked(kib_peer_t *peer)
static int kiblnd_get_peer_info(lnet_ni_t *ni, int index,
lnet_nid_t *nidp, int *count)
{
- kib_peer_t *peer;
+ struct kib_peer *peer;
struct list_head *ptmp;
int i;
unsigned long flags;
@@ -426,7 +422,7 @@ static int kiblnd_get_peer_info(lnet_ni_t *ni, int index,
for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
- peer = list_entry(ptmp, kib_peer_t, ibp_list);
+ peer = list_entry(ptmp, struct kib_peer, ibp_list);
LASSERT(!kiblnd_peer_idle(peer));
if (peer->ibp_ni != ni)
@@ -448,17 +444,17 @@ static int kiblnd_get_peer_info(lnet_ni_t *ni, int index,
return -ENOENT;
}
-static void kiblnd_del_peer_locked(kib_peer_t *peer)
+static void kiblnd_del_peer_locked(struct kib_peer *peer)
{
struct list_head *ctmp;
struct list_head *cnxt;
- kib_conn_t *conn;
+ struct kib_conn *conn;
if (list_empty(&peer->ibp_conns)) {
kiblnd_unlink_peer_locked(peer);
} else {
list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
- conn = list_entry(ctmp, kib_conn_t, ibc_list);
+ conn = list_entry(ctmp, struct kib_conn, ibc_list);
kiblnd_close_conn_locked(conn, 0);
}
@@ -475,7 +471,7 @@ static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid)
LIST_HEAD(zombies);
struct list_head *ptmp;
struct list_head *pnxt;
- kib_peer_t *peer;
+ struct kib_peer *peer;
int lo;
int hi;
int i;
@@ -494,7 +490,7 @@ static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid)
for (i = lo; i <= hi; i++) {
list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
- peer = list_entry(ptmp, kib_peer_t, ibp_list);
+ peer = list_entry(ptmp, struct kib_peer, ibp_list);
LASSERT(!kiblnd_peer_idle(peer));
if (peer->ibp_ni != ni)
@@ -522,11 +518,11 @@ static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid)
return rc;
}
-static kib_conn_t *kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index)
+static struct kib_conn *kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index)
{
- kib_peer_t *peer;
+ struct kib_peer *peer;
struct list_head *ptmp;
- kib_conn_t *conn;
+ struct kib_conn *conn;
struct list_head *ctmp;
int i;
unsigned long flags;
@@ -535,7 +531,7 @@ static kib_conn_t *kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index)
for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
- peer = list_entry(ptmp, kib_peer_t, ibp_list);
+ peer = list_entry(ptmp, struct kib_peer, ibp_list);
LASSERT(!kiblnd_peer_idle(peer));
if (peer->ibp_ni != ni)
@@ -545,7 +541,7 @@ static kib_conn_t *kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index)
if (index-- > 0)
continue;
- conn = list_entry(ctmp, kib_conn_t,
+ conn = list_entry(ctmp, struct kib_conn,
ibc_list);
kiblnd_conn_addref(conn);
read_unlock_irqrestore(
@@ -594,7 +590,7 @@ static void kiblnd_setup_mtu_locked(struct rdma_cm_id *cmid)
cmid->route.path_rec->mtu = mtu;
}
-static int kiblnd_get_completion_vector(kib_conn_t *conn, int cpt)
+static int kiblnd_get_completion_vector(struct kib_conn *conn, int cpt)
{
cpumask_t *mask;
int vectors;
@@ -621,7 +617,7 @@ static int kiblnd_get_completion_vector(kib_conn_t *conn, int cpt)
return 1;
}
-kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
+struct kib_conn *kiblnd_create_conn(struct kib_peer *peer, struct rdma_cm_id *cmid,
int state, int version)
{
/*
@@ -634,12 +630,12 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
* its ref on 'cmid').
*/
rwlock_t *glock = &kiblnd_data.kib_global_lock;
- kib_net_t *net = peer->ibp_ni->ni_data;
- kib_dev_t *dev;
+ struct kib_net *net = peer->ibp_ni->ni_data;
+ struct kib_dev *dev;
struct ib_qp_init_attr *init_qp_attr;
struct kib_sched_info *sched;
struct ib_cq_init_attr cq_attr = {};
- kib_conn_t *conn;
+ struct kib_conn *conn;
struct ib_cq *cq;
unsigned long flags;
int cpt;
@@ -723,7 +719,7 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
write_unlock_irqrestore(glock, flags);
LIBCFS_CPT_ALLOC(conn->ibc_rxs, lnet_cpt_table(), cpt,
- IBLND_RX_MSGS(conn) * sizeof(kib_rx_t));
+ IBLND_RX_MSGS(conn) * sizeof(struct kib_rx));
if (!conn->ibc_rxs) {
CERROR("Cannot allocate RX buffers\n");
goto failed_2;
@@ -833,10 +829,10 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
return NULL;
}
-void kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn)
+void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn)
{
struct rdma_cm_id *cmid = conn->ibc_cmid;
- kib_peer_t *peer = conn->ibc_peer;
+ struct kib_peer *peer = conn->ibc_peer;
int rc;
LASSERT(!in_interrupt());
@@ -879,7 +875,7 @@ void kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn)
if (conn->ibc_rxs) {
LIBCFS_FREE(conn->ibc_rxs,
- IBLND_RX_MSGS(conn) * sizeof(kib_rx_t));
+ IBLND_RX_MSGS(conn) * sizeof(struct kib_rx));
}
if (conn->ibc_connvars)
@@ -890,7 +886,7 @@ void kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn)
/* See CAVEAT EMPTOR above in kiblnd_create_conn */
if (conn->ibc_state != IBLND_CONN_INIT) {
- kib_net_t *net = peer->ibp_ni->ni_data;
+ struct kib_net *net = peer->ibp_ni->ni_data;
kiblnd_peer_decref(peer);
rdma_destroy_id(cmid);
@@ -900,15 +896,15 @@ void kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn)
LIBCFS_FREE(conn, sizeof(*conn));
}
-int kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why)
+int kiblnd_close_peer_conns_locked(struct kib_peer *peer, int why)
{
- kib_conn_t *conn;
+ struct kib_conn *conn;
struct list_head *ctmp;
struct list_head *cnxt;
int count = 0;
list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
- conn = list_entry(ctmp, kib_conn_t, ibc_list);
+ conn = list_entry(ctmp, struct kib_conn, ibc_list);
CDEBUG(D_NET, "Closing conn -> %s, version: %x, reason: %d\n",
libcfs_nid2str(peer->ibp_nid),
@@ -921,16 +917,16 @@ int kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why)
return count;
}
-int kiblnd_close_stale_conns_locked(kib_peer_t *peer,
+int kiblnd_close_stale_conns_locked(struct kib_peer *peer,
int version, __u64 incarnation)
{
- kib_conn_t *conn;
+ struct kib_conn *conn;
struct list_head *ctmp;
struct list_head *cnxt;
int count = 0;
list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
- conn = list_entry(ctmp, kib_conn_t, ibc_list);
+ conn = list_entry(ctmp, struct kib_conn, ibc_list);
if (conn->ibc_version == version &&
conn->ibc_incarnation == incarnation)
@@ -951,7 +947,7 @@ int kiblnd_close_stale_conns_locked(kib_peer_t *peer,
static int kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid)
{
- kib_peer_t *peer;
+ struct kib_peer *peer;
struct list_head *ptmp;
struct list_head *pnxt;
int lo;
@@ -972,7 +968,7 @@ static int kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid)
for (i = lo; i <= hi; i++) {
list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
- peer = list_entry(ptmp, kib_peer_t, ibp_list);
+ peer = list_entry(ptmp, struct kib_peer, ibp_list);
LASSERT(!kiblnd_peer_idle(peer));
if (peer->ibp_ni != ni)
@@ -1016,7 +1012,7 @@ static int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
break;
}
case IOC_LIBCFS_GET_CONN: {
- kib_conn_t *conn;
+ struct kib_conn *conn;
rc = 0;
conn = kiblnd_get_conn_by_idx(ni, data->ioc_count);
@@ -1052,7 +1048,7 @@ static void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
unsigned long last_alive = 0;
unsigned long now = cfs_time_current();
rwlock_t *glock = &kiblnd_data.kib_global_lock;
- kib_peer_t *peer;
+ struct kib_peer *peer;
unsigned long flags;
read_lock_irqsave(glock, flags);
@@ -1078,7 +1074,7 @@ static void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
last_alive ? cfs_duration_sec(now - last_alive) : -1);
}
-static void kiblnd_free_pages(kib_pages_t *p)
+static void kiblnd_free_pages(struct kib_pages *p)
{
int npages = p->ibp_npages;
int i;
@@ -1088,22 +1084,22 @@ static void kiblnd_free_pages(kib_pages_t *p)
__free_page(p->ibp_pages[i]);
}
- LIBCFS_FREE(p, offsetof(kib_pages_t, ibp_pages[npages]));
+ LIBCFS_FREE(p, offsetof(struct kib_pages, ibp_pages[npages]));
}
-int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages)
+int kiblnd_alloc_pages(struct kib_pages **pp, int cpt, int npages)
{
- kib_pages_t *p;
+ struct kib_pages *p;
int i;
LIBCFS_CPT_ALLOC(p, lnet_cpt_table(), cpt,
- offsetof(kib_pages_t, ibp_pages[npages]));
+ offsetof(struct kib_pages, ibp_pages[npages]));
if (!p) {
CERROR("Can't allocate descriptor for %d pages\n", npages);
return -ENOMEM;
}
- memset(p, 0, offsetof(kib_pages_t, ibp_pages[npages]));
+ memset(p, 0, offsetof(struct kib_pages, ibp_pages[npages]));
p->ibp_npages = npages;
for (i = 0; i < npages; i++) {
@@ -1121,9 +1117,9 @@ int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages)
return 0;
}
-void kiblnd_unmap_rx_descs(kib_conn_t *conn)
+void kiblnd_unmap_rx_descs(struct kib_conn *conn)
{
- kib_rx_t *rx;
+ struct kib_rx *rx;
int i;
LASSERT(conn->ibc_rxs);
@@ -1145,9 +1141,9 @@ void kiblnd_unmap_rx_descs(kib_conn_t *conn)
conn->ibc_rx_pages = NULL;
}
-void kiblnd_map_rx_descs(kib_conn_t *conn)
+void kiblnd_map_rx_descs(struct kib_conn *conn)
{
- kib_rx_t *rx;
+ struct kib_rx *rx;
struct page *pg;
int pg_off;
int ipg;
@@ -1158,7 +1154,7 @@ void kiblnd_map_rx_descs(kib_conn_t *conn)
rx = &conn->ibc_rxs[i];
rx->rx_conn = conn;
- rx->rx_msg = (kib_msg_t *)(((char *)page_address(pg)) + pg_off);
+ rx->rx_msg = (struct kib_msg *)(((char *)page_address(pg)) + pg_off);
rx->rx_msgaddr = kiblnd_dma_map_single(conn->ibc_hdev->ibh_ibdev,
rx->rx_msg,
@@ -1183,10 +1179,10 @@ void kiblnd_map_rx_descs(kib_conn_t *conn)
}
}
-static void kiblnd_unmap_tx_pool(kib_tx_pool_t *tpo)
+static void kiblnd_unmap_tx_pool(struct kib_tx_pool *tpo)
{
- kib_hca_dev_t *hdev = tpo->tpo_hdev;
- kib_tx_t *tx;
+ struct kib_hca_dev *hdev = tpo->tpo_hdev;
+ struct kib_tx *tx;
int i;
LASSERT(!tpo->tpo_pool.po_allocated);
@@ -1206,9 +1202,9 @@ static void kiblnd_unmap_tx_pool(kib_tx_pool_t *tpo)
tpo->tpo_hdev = NULL;
}
-static kib_hca_dev_t *kiblnd_current_hdev(kib_dev_t *dev)
+static struct kib_hca_dev *kiblnd_current_hdev(struct kib_dev *dev)
{
- kib_hca_dev_t *hdev;
+ struct kib_hca_dev *hdev;
unsigned long flags;
int i = 0;
@@ -1232,14 +1228,14 @@ static kib_hca_dev_t *kiblnd_current_hdev(kib_dev_t *dev)
return hdev;
}
-static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo)
+static void kiblnd_map_tx_pool(struct kib_tx_pool *tpo)
{
- kib_pages_t *txpgs = tpo->tpo_tx_pages;
- kib_pool_t *pool = &tpo->tpo_pool;
- kib_net_t *net = pool->po_owner->ps_net;
- kib_dev_t *dev;
+ struct kib_pages *txpgs = tpo->tpo_tx_pages;
+ struct kib_pool *pool = &tpo->tpo_pool;
+ struct kib_net *net = pool->po_owner->ps_net;
+ struct kib_dev *dev;
struct page *page;
- kib_tx_t *tx;
+ struct kib_tx *tx;
int page_offset;
int ipage;
int i;
@@ -1260,7 +1256,7 @@ static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo)
page = txpgs->ibp_pages[ipage];
tx = &tpo->tpo_tx_descs[i];
- tx->tx_msg = (kib_msg_t *)(((char *)page_address(page)) +
+ tx->tx_msg = (struct kib_msg *)(((char *)page_address(page)) +
page_offset);
tx->tx_msgaddr = kiblnd_dma_map_single(
@@ -1283,11 +1279,11 @@ static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo)
}
}
-struct ib_mr *kiblnd_find_rd_dma_mr(struct lnet_ni *ni, kib_rdma_desc_t *rd,
+struct ib_mr *kiblnd_find_rd_dma_mr(struct lnet_ni *ni, struct kib_rdma_desc *rd,
int negotiated_nfrags)
{
- kib_net_t *net = ni->ni_data;
- kib_hca_dev_t *hdev = net->ibn_dev->ibd_hdev;
+ struct kib_net *net = ni->ni_data;
+ struct kib_hca_dev *hdev = net->ibn_dev->ibd_hdev;
struct lnet_ioctl_config_o2iblnd_tunables *tunables;
__u16 nfrags;
int mod;
@@ -1304,7 +1300,7 @@ struct ib_mr *kiblnd_find_rd_dma_mr(struct lnet_ni *ni, kib_rdma_desc_t *rd,
return hdev->ibh_mrs;
}
-static void kiblnd_destroy_fmr_pool(kib_fmr_pool_t *fpo)
+static void kiblnd_destroy_fmr_pool(struct kib_fmr_pool *fpo)
{
LASSERT(!fpo->fpo_map_count);
@@ -1335,7 +1331,7 @@ static void kiblnd_destroy_fmr_pool(kib_fmr_pool_t *fpo)
static void kiblnd_destroy_fmr_pool_list(struct list_head *head)
{
- kib_fmr_pool_t *fpo, *tmp;
+ struct kib_fmr_pool *fpo, *tmp;
list_for_each_entry_safe(fpo, tmp, head, fpo_list) {
list_del(&fpo->fpo_list);
@@ -1361,7 +1357,7 @@ kiblnd_fmr_flush_trigger(struct lnet_ioctl_config_o2iblnd_tunables *tunables,
return max(IBLND_FMR_POOL_FLUSH, size);
}
-static int kiblnd_alloc_fmr_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t *fpo)
+static int kiblnd_alloc_fmr_pool(struct kib_fmr_poolset *fps, struct kib_fmr_pool *fpo)
{
struct ib_fmr_pool_param param = {
.max_pages_per_fmr = LNET_MAX_PAYLOAD / PAGE_SIZE,
@@ -1388,7 +1384,7 @@ static int kiblnd_alloc_fmr_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t *fpo)
return rc;
}
-static int kiblnd_alloc_freg_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t *fpo)
+static int kiblnd_alloc_freg_pool(struct kib_fmr_poolset *fps, struct kib_fmr_pool *fpo)
{
struct kib_fast_reg_descriptor *frd, *tmp;
int i, rc;
@@ -1438,12 +1434,12 @@ out:
return rc;
}
-static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps,
- kib_fmr_pool_t **pp_fpo)
+static int kiblnd_create_fmr_pool(struct kib_fmr_poolset *fps,
+ struct kib_fmr_pool **pp_fpo)
{
- kib_dev_t *dev = fps->fps_net->ibn_dev;
+ struct kib_dev *dev = fps->fps_net->ibn_dev;
struct ib_device_attr *dev_attr;
- kib_fmr_pool_t *fpo;
+ struct kib_fmr_pool *fpo;
int rc;
LIBCFS_CPT_ALLOC(fpo, lnet_cpt_table(), fps->fps_cpt, sizeof(*fpo));
@@ -1488,7 +1484,7 @@ out_fpo:
return rc;
}
-static void kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps,
+static void kiblnd_fail_fmr_poolset(struct kib_fmr_poolset *fps,
struct list_head *zombies)
{
if (!fps->fps_net) /* intialized? */
@@ -1497,8 +1493,8 @@ static void kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps,
spin_lock(&fps->fps_lock);
while (!list_empty(&fps->fps_pool_list)) {
- kib_fmr_pool_t *fpo = list_entry(fps->fps_pool_list.next,
- kib_fmr_pool_t, fpo_list);
+ struct kib_fmr_pool *fpo = list_entry(fps->fps_pool_list.next,
+ struct kib_fmr_pool, fpo_list);
fpo->fpo_failed = 1;
list_del(&fpo->fpo_list);
if (!fpo->fpo_map_count)
@@ -1510,7 +1506,7 @@ static void kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps,
spin_unlock(&fps->fps_lock);
}
-static void kiblnd_fini_fmr_poolset(kib_fmr_poolset_t *fps)
+static void kiblnd_fini_fmr_poolset(struct kib_fmr_poolset *fps)
{
if (fps->fps_net) { /* initialized? */
kiblnd_destroy_fmr_pool_list(&fps->fps_failed_pool_list);
@@ -1519,11 +1515,11 @@ static void kiblnd_fini_fmr_poolset(kib_fmr_poolset_t *fps)
}
static int
-kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, int ncpts,
- kib_net_t *net,
+kiblnd_init_fmr_poolset(struct kib_fmr_poolset *fps, int cpt, int ncpts,
+ struct kib_net *net,
struct lnet_ioctl_config_o2iblnd_tunables *tunables)
{
- kib_fmr_pool_t *fpo;
+ struct kib_fmr_pool *fpo;
int rc;
memset(fps, 0, sizeof(*fps));
@@ -1546,7 +1542,7 @@ kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, int ncpts,
return rc;
}
-static int kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, unsigned long now)
+static int kiblnd_fmr_pool_is_idle(struct kib_fmr_pool *fpo, unsigned long now)
{
if (fpo->fpo_map_count) /* still in use */
return 0;
@@ -1556,10 +1552,10 @@ static int kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, unsigned long now)
}
static int
-kiblnd_map_tx_pages(kib_tx_t *tx, kib_rdma_desc_t *rd)
+kiblnd_map_tx_pages(struct kib_tx *tx, struct kib_rdma_desc *rd)
{
__u64 *pages = tx->tx_pages;
- kib_hca_dev_t *hdev;
+ struct kib_hca_dev *hdev;
int npages;
int size;
int i;
@@ -1577,13 +1573,13 @@ kiblnd_map_tx_pages(kib_tx_t *tx, kib_rdma_desc_t *rd)
return npages;
}
-void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status)
+void kiblnd_fmr_pool_unmap(struct kib_fmr *fmr, int status)
{
LIST_HEAD(zombies);
- kib_fmr_pool_t *fpo = fmr->fmr_pool;
- kib_fmr_poolset_t *fps;
+ struct kib_fmr_pool *fpo = fmr->fmr_pool;
+ struct kib_fmr_poolset *fps;
unsigned long now = cfs_time_current();
- kib_fmr_pool_t *tmp;
+ struct kib_fmr_pool *tmp;
int rc;
if (!fpo)
@@ -1633,14 +1629,14 @@ void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status)
kiblnd_destroy_fmr_pool_list(&zombies);
}
-int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, kib_tx_t *tx,
- kib_rdma_desc_t *rd, __u32 nob, __u64 iov,
- kib_fmr_t *fmr)
+int kiblnd_fmr_pool_map(struct kib_fmr_poolset *fps, struct kib_tx *tx,
+ struct kib_rdma_desc *rd, __u32 nob, __u64 iov,
+ struct kib_fmr *fmr)
{
__u64 *pages = tx->tx_pages;
bool is_rx = (rd != tx->tx_rd);
bool tx_pages_mapped = 0;
- kib_fmr_pool_t *fpo;
+ struct kib_fmr_pool *fpo;
int npages = 0;
__u64 version;
int rc;
@@ -1780,7 +1776,7 @@ int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, kib_tx_t *tx,
goto again;
}
-static void kiblnd_fini_pool(kib_pool_t *pool)
+static void kiblnd_fini_pool(struct kib_pool *pool)
{
LASSERT(list_empty(&pool->po_free_list));
LASSERT(!pool->po_allocated);
@@ -1788,7 +1784,7 @@ static void kiblnd_fini_pool(kib_pool_t *pool)
CDEBUG(D_NET, "Finalize %s pool\n", pool->po_owner->ps_name);
}
-static void kiblnd_init_pool(kib_poolset_t *ps, kib_pool_t *pool, int size)
+static void kiblnd_init_pool(struct kib_poolset *ps, struct kib_pool *pool, int size)
{
CDEBUG(D_NET, "Initialize %s pool\n", ps->ps_name);
@@ -1801,10 +1797,10 @@ static void kiblnd_init_pool(kib_poolset_t *ps, kib_pool_t *pool, int size)
static void kiblnd_destroy_pool_list(struct list_head *head)
{
- kib_pool_t *pool;
+ struct kib_pool *pool;
while (!list_empty(head)) {
- pool = list_entry(head->next, kib_pool_t, po_list);
+ pool = list_entry(head->next, struct kib_pool, po_list);
list_del(&pool->po_list);
LASSERT(pool->po_owner);
@@ -1812,15 +1808,15 @@ static void kiblnd_destroy_pool_list(struct list_head *head)
}
}
-static void kiblnd_fail_poolset(kib_poolset_t *ps, struct list_head *zombies)
+static void kiblnd_fail_poolset(struct kib_poolset *ps, struct list_head *zombies)
{
if (!ps->ps_net) /* intialized? */
return;
spin_lock(&ps->ps_lock);
while (!list_empty(&ps->ps_pool_list)) {
- kib_pool_t *po = list_entry(ps->ps_pool_list.next,
- kib_pool_t, po_list);
+ struct kib_pool *po = list_entry(ps->ps_pool_list.next,
+ struct kib_pool, po_list);
po->po_failed = 1;
list_del(&po->po_list);
if (!po->po_allocated)
@@ -1831,7 +1827,7 @@ static void kiblnd_fail_poolset(kib_poolset_t *ps, struct list_head *zombies)
spin_unlock(&ps->ps_lock);
}
-static void kiblnd_fini_poolset(kib_poolset_t *ps)
+static void kiblnd_fini_poolset(struct kib_poolset *ps)
{
if (ps->ps_net) { /* initialized? */
kiblnd_destroy_pool_list(&ps->ps_failed_pool_list);
@@ -1839,14 +1835,14 @@ static void kiblnd_fini_poolset(kib_poolset_t *ps)
}
}
-static int kiblnd_init_poolset(kib_poolset_t *ps, int cpt,
- kib_net_t *net, char *name, int size,
+static int kiblnd_init_poolset(struct kib_poolset *ps, int cpt,
+ struct kib_net *net, char *name, int size,
kib_ps_pool_create_t po_create,
kib_ps_pool_destroy_t po_destroy,
kib_ps_node_init_t nd_init,
kib_ps_node_fini_t nd_fini)
{
- kib_pool_t *pool;
+ struct kib_pool *pool;
int rc;
memset(ps, 0, sizeof(*ps));
@@ -1874,7 +1870,7 @@ static int kiblnd_init_poolset(kib_poolset_t *ps, int cpt,
return rc;
}
-static int kiblnd_pool_is_idle(kib_pool_t *pool, unsigned long now)
+static int kiblnd_pool_is_idle(struct kib_pool *pool, unsigned long now)
{
if (pool->po_allocated) /* still in use */
return 0;
@@ -1883,11 +1879,11 @@ static int kiblnd_pool_is_idle(kib_pool_t *pool, unsigned long now)
return cfs_time_aftereq(now, pool->po_deadline);
}
-void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node)
+void kiblnd_pool_free_node(struct kib_pool *pool, struct list_head *node)
{
LIST_HEAD(zombies);
- kib_poolset_t *ps = pool->po_owner;
- kib_pool_t *tmp;
+ struct kib_poolset *ps = pool->po_owner;
+ struct kib_pool *tmp;
unsigned long now = cfs_time_current();
spin_lock(&ps->ps_lock);
@@ -1913,10 +1909,10 @@ void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node)
kiblnd_destroy_pool_list(&zombies);
}
-struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps)
+struct list_head *kiblnd_pool_alloc_node(struct kib_poolset *ps)
{
struct list_head *node;
- kib_pool_t *pool;
+ struct kib_pool *pool;
unsigned int interval = 1;
unsigned long time_before;
unsigned int trips = 0;
@@ -1986,9 +1982,9 @@ struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps)
goto again;
}
-static void kiblnd_destroy_tx_pool(kib_pool_t *pool)
+static void kiblnd_destroy_tx_pool(struct kib_pool *pool)
{
- kib_tx_pool_t *tpo = container_of(pool, kib_tx_pool_t, tpo_pool);
+ struct kib_tx_pool *tpo = container_of(pool, struct kib_tx_pool, tpo_pool);
int i;
LASSERT(!pool->po_allocated);
@@ -2002,7 +1998,7 @@ static void kiblnd_destroy_tx_pool(kib_pool_t *pool)
goto out;
for (i = 0; i < pool->po_size; i++) {
- kib_tx_t *tx = &tpo->tpo_tx_descs[i];
+ struct kib_tx *tx = &tpo->tpo_tx_descs[i];
list_del(&tx->tx_list);
if (tx->tx_pages)
@@ -2011,8 +2007,8 @@ static void kiblnd_destroy_tx_pool(kib_pool_t *pool)
sizeof(*tx->tx_pages));
if (tx->tx_frags)
LIBCFS_FREE(tx->tx_frags,
- IBLND_MAX_RDMA_FRAGS *
- sizeof(*tx->tx_frags));
+ (1 + IBLND_MAX_RDMA_FRAGS) *
+ sizeof(*tx->tx_frags));
if (tx->tx_wrq)
LIBCFS_FREE(tx->tx_wrq,
(1 + IBLND_MAX_RDMA_FRAGS) *
@@ -2023,12 +2019,12 @@ static void kiblnd_destroy_tx_pool(kib_pool_t *pool)
sizeof(*tx->tx_sge));
if (tx->tx_rd)
LIBCFS_FREE(tx->tx_rd,
- offsetof(kib_rdma_desc_t,
+ offsetof(struct kib_rdma_desc,
rd_frags[IBLND_MAX_RDMA_FRAGS]));
}
LIBCFS_FREE(tpo->tpo_tx_descs,
- pool->po_size * sizeof(kib_tx_t));
+ pool->po_size * sizeof(struct kib_tx));
out:
kiblnd_fini_pool(pool);
LIBCFS_FREE(tpo, sizeof(*tpo));
@@ -2041,13 +2037,13 @@ static int kiblnd_tx_pool_size(int ncpts)
return max(IBLND_TX_POOL, ntx);
}
-static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size,
- kib_pool_t **pp_po)
+static int kiblnd_create_tx_pool(struct kib_poolset *ps, int size,
+ struct kib_pool **pp_po)
{
int i;
int npg;
- kib_pool_t *pool;
- kib_tx_pool_t *tpo;
+ struct kib_pool *pool;
+ struct kib_tx_pool *tpo;
LIBCFS_CPT_ALLOC(tpo, lnet_cpt_table(), ps->ps_cpt, sizeof(*tpo));
if (!tpo) {
@@ -2068,17 +2064,17 @@ static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size,
}
LIBCFS_CPT_ALLOC(tpo->tpo_tx_descs, lnet_cpt_table(), ps->ps_cpt,
- size * sizeof(kib_tx_t));
+ size * sizeof(struct kib_tx));
if (!tpo->tpo_tx_descs) {
CERROR("Can't allocate %d tx descriptors\n", size);
ps->ps_pool_destroy(pool);
return -ENOMEM;
}
- memset(tpo->tpo_tx_descs, 0, size * sizeof(kib_tx_t));
+ memset(tpo->tpo_tx_descs, 0, size * sizeof(struct kib_tx));
for (i = 0; i < size; i++) {
- kib_tx_t *tx = &tpo->tpo_tx_descs[i];
+ struct kib_tx *tx = &tpo->tpo_tx_descs[i];
tx->tx_pool = tpo;
if (ps->ps_net->ibn_fmr_ps) {
@@ -2090,11 +2086,12 @@ static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size,
}
LIBCFS_CPT_ALLOC(tx->tx_frags, lnet_cpt_table(), ps->ps_cpt,
- IBLND_MAX_RDMA_FRAGS * sizeof(*tx->tx_frags));
+ (1 + IBLND_MAX_RDMA_FRAGS) *
+ sizeof(*tx->tx_frags));
if (!tx->tx_frags)
break;
- sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS);
+ sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS + 1);
LIBCFS_CPT_ALLOC(tx->tx_wrq, lnet_cpt_table(), ps->ps_cpt,
(1 + IBLND_MAX_RDMA_FRAGS) *
@@ -2109,7 +2106,7 @@ static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size,
break;
LIBCFS_CPT_ALLOC(tx->tx_rd, lnet_cpt_table(), ps->ps_cpt,
- offsetof(kib_rdma_desc_t,
+ offsetof(struct kib_rdma_desc,
rd_frags[IBLND_MAX_RDMA_FRAGS]));
if (!tx->tx_rd)
break;
@@ -2125,22 +2122,23 @@ static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size,
return -ENOMEM;
}
-static void kiblnd_tx_init(kib_pool_t *pool, struct list_head *node)
+static void kiblnd_tx_init(struct kib_pool *pool, struct list_head *node)
{
- kib_tx_poolset_t *tps = container_of(pool->po_owner, kib_tx_poolset_t,
- tps_poolset);
- kib_tx_t *tx = list_entry(node, kib_tx_t, tx_list);
+ struct kib_tx_poolset *tps = container_of(pool->po_owner,
+ struct kib_tx_poolset,
+ tps_poolset);
+ struct kib_tx *tx = list_entry(node, struct kib_tx, tx_list);
tx->tx_cookie = tps->tps_next_tx_cookie++;
}
-static void kiblnd_net_fini_pools(kib_net_t *net)
+static void kiblnd_net_fini_pools(struct kib_net *net)
{
int i;
cfs_cpt_for_each(i, lnet_cpt_table()) {
- kib_tx_poolset_t *tps;
- kib_fmr_poolset_t *fps;
+ struct kib_tx_poolset *tps;
+ struct kib_fmr_poolset *fps;
if (net->ibn_tx_ps) {
tps = net->ibn_tx_ps[i];
@@ -2164,7 +2162,7 @@ static void kiblnd_net_fini_pools(kib_net_t *net)
}
}
-static int kiblnd_net_init_pools(kib_net_t *net, lnet_ni_t *ni, __u32 *cpts,
+static int kiblnd_net_init_pools(struct kib_net *net, lnet_ni_t *ni, __u32 *cpts,
int ncpts)
{
struct lnet_ioctl_config_o2iblnd_tunables *tunables;
@@ -2206,7 +2204,7 @@ static int kiblnd_net_init_pools(kib_net_t *net, lnet_ni_t *ni, __u32 *cpts,
* number of CPTs that exist, i.e net->ibn_fmr_ps[cpt].
*/
net->ibn_fmr_ps = cfs_percpt_alloc(lnet_cpt_table(),
- sizeof(kib_fmr_poolset_t));
+ sizeof(struct kib_fmr_poolset));
if (!net->ibn_fmr_ps) {
CERROR("Failed to allocate FMR pool array\n");
rc = -ENOMEM;
@@ -2234,7 +2232,7 @@ static int kiblnd_net_init_pools(kib_net_t *net, lnet_ni_t *ni, __u32 *cpts,
* number of CPTs that exist, i.e net->ibn_tx_ps[cpt].
*/
net->ibn_tx_ps = cfs_percpt_alloc(lnet_cpt_table(),
- sizeof(kib_tx_poolset_t));
+ sizeof(struct kib_tx_poolset));
if (!net->ibn_tx_ps) {
CERROR("Failed to allocate tx pool array\n");
rc = -ENOMEM;
@@ -2263,7 +2261,7 @@ static int kiblnd_net_init_pools(kib_net_t *net, lnet_ni_t *ni, __u32 *cpts,
return rc;
}
-static int kiblnd_hdev_get_attr(kib_hca_dev_t *hdev)
+static int kiblnd_hdev_get_attr(struct kib_hca_dev *hdev)
{
/*
* It's safe to assume a HCA can handle a page size
@@ -2283,7 +2281,7 @@ static int kiblnd_hdev_get_attr(kib_hca_dev_t *hdev)
return -EINVAL;
}
-static void kiblnd_hdev_cleanup_mrs(kib_hca_dev_t *hdev)
+static void kiblnd_hdev_cleanup_mrs(struct kib_hca_dev *hdev)
{
if (!hdev->ibh_mrs)
return;
@@ -2293,7 +2291,7 @@ static void kiblnd_hdev_cleanup_mrs(kib_hca_dev_t *hdev)
hdev->ibh_mrs = NULL;
}
-void kiblnd_hdev_destroy(kib_hca_dev_t *hdev)
+void kiblnd_hdev_destroy(struct kib_hca_dev *hdev)
{
kiblnd_hdev_cleanup_mrs(hdev);
@@ -2306,7 +2304,7 @@ void kiblnd_hdev_destroy(kib_hca_dev_t *hdev)
LIBCFS_FREE(hdev, sizeof(*hdev));
}
-static int kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev)
+static int kiblnd_hdev_setup_mrs(struct kib_hca_dev *hdev)
{
struct ib_mr *mr;
int rc;
@@ -2335,7 +2333,7 @@ static int kiblnd_dummy_callback(struct rdma_cm_id *cmid,
return 0;
}
-static int kiblnd_dev_need_failover(kib_dev_t *dev)
+static int kiblnd_dev_need_failover(struct kib_dev *dev)
{
struct rdma_cm_id *cmid;
struct sockaddr_in srcaddr;
@@ -2389,15 +2387,15 @@ static int kiblnd_dev_need_failover(kib_dev_t *dev)
return rc;
}
-int kiblnd_dev_failover(kib_dev_t *dev)
+int kiblnd_dev_failover(struct kib_dev *dev)
{
LIST_HEAD(zombie_tpo);
LIST_HEAD(zombie_ppo);
LIST_HEAD(zombie_fpo);
struct rdma_cm_id *cmid = NULL;
- kib_hca_dev_t *hdev = NULL;
+ struct kib_hca_dev *hdev = NULL;
struct ib_pd *pd;
- kib_net_t *net;
+ struct kib_net *net;
struct sockaddr_in addr;
unsigned long flags;
int rc = 0;
@@ -2522,7 +2520,7 @@ int kiblnd_dev_failover(kib_dev_t *dev)
return rc;
}
-void kiblnd_destroy_dev(kib_dev_t *dev)
+void kiblnd_destroy_dev(struct kib_dev *dev)
{
LASSERT(!dev->ibd_nnets);
LASSERT(list_empty(&dev->ibd_nets));
@@ -2536,10 +2534,10 @@ void kiblnd_destroy_dev(kib_dev_t *dev)
LIBCFS_FREE(dev, sizeof(*dev));
}
-static kib_dev_t *kiblnd_create_dev(char *ifname)
+static struct kib_dev *kiblnd_create_dev(char *ifname)
{
struct net_device *netdev;
- kib_dev_t *dev;
+ struct kib_dev *dev;
__u32 netmask;
__u32 ip;
int up;
@@ -2654,7 +2652,7 @@ static void kiblnd_base_shutdown(void)
static void kiblnd_shutdown(lnet_ni_t *ni)
{
- kib_net_t *net = ni->ni_data;
+ struct kib_net *net = ni->ni_data;
rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
int i;
unsigned long flags;
@@ -2851,7 +2849,7 @@ static int kiblnd_start_schedulers(struct kib_sched_info *sched)
return rc;
}
-static int kiblnd_dev_start_threads(kib_dev_t *dev, int newdev, __u32 *cpts,
+static int kiblnd_dev_start_threads(struct kib_dev *dev, int newdev, __u32 *cpts,
int ncpts)
{
int cpt;
@@ -2877,10 +2875,10 @@ static int kiblnd_dev_start_threads(kib_dev_t *dev, int newdev, __u32 *cpts,
return 0;
}
-static kib_dev_t *kiblnd_dev_search(char *ifname)
+static struct kib_dev *kiblnd_dev_search(char *ifname)
{
- kib_dev_t *alias = NULL;
- kib_dev_t *dev;
+ struct kib_dev *alias = NULL;
+ struct kib_dev *dev;
char *colon;
char *colon2;
@@ -2912,8 +2910,8 @@ static kib_dev_t *kiblnd_dev_search(char *ifname)
static int kiblnd_startup(lnet_ni_t *ni)
{
char *ifname;
- kib_dev_t *ibdev = NULL;
- kib_net_t *net;
+ struct kib_dev *ibdev = NULL;
+ struct kib_net *net;
struct timespec64 tv;
unsigned long flags;
int rc;
@@ -3020,11 +3018,11 @@ static void __exit ko2iblnd_exit(void)
static int __init ko2iblnd_init(void)
{
- CLASSERT(sizeof(kib_msg_t) <= IBLND_MSG_SIZE);
- CLASSERT(offsetof(kib_msg_t,
+ CLASSERT(sizeof(struct kib_msg) <= IBLND_MSG_SIZE);
+ CLASSERT(offsetof(struct kib_msg,
ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
<= IBLND_MSG_SIZE);
- CLASSERT(offsetof(kib_msg_t,
+ CLASSERT(offsetof(struct kib_msg,
ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
<= IBLND_MSG_SIZE);
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
index b22984fd9..078a0c3e8 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -78,12 +74,12 @@
#define IBLND_N_SCHED 2
#define IBLND_N_SCHED_HIGH 4
-typedef struct {
+struct kib_tunables {
int *kib_dev_failover; /* HCA failover */
unsigned int *kib_service; /* IB service number */
int *kib_min_reconnect_interval; /* first failed connection retry... */
int *kib_max_reconnect_interval; /* exponentially increasing to this */
- int *kib_cksum; /* checksum kib_msg_t? */
+ int *kib_cksum; /* checksum struct kib_msg? */
int *kib_timeout; /* comms timeout (seconds) */
int *kib_keepalive; /* keepalive timeout (seconds) */
int *kib_ntx; /* # tx descs */
@@ -94,22 +90,22 @@ typedef struct {
int *kib_require_priv_port; /* accept only privileged ports */
int *kib_use_priv_port; /* use privileged port for active connect */
int *kib_nscheds; /* # threads on each CPT */
-} kib_tunables_t;
+};
-extern kib_tunables_t kiblnd_tunables;
+extern struct kib_tunables kiblnd_tunables;
#define IBLND_MSG_QUEUE_SIZE_V1 8 /* V1 only : # messages/RDMAs in-flight */
#define IBLND_CREDIT_HIGHWATER_V1 7 /* V1 only : when eagerly to return credits */
#define IBLND_CREDITS_DEFAULT 8 /* default # of peer credits */
-#define IBLND_CREDITS_MAX ((typeof(((kib_msg_t *) 0)->ibm_credits)) - 1) /* Max # of peer credits */
+#define IBLND_CREDITS_MAX ((typeof(((struct kib_msg *)0)->ibm_credits)) - 1) /* Max # of peer credits */
/* when eagerly to return credits */
#define IBLND_CREDITS_HIGHWATER(t, v) ((v) == IBLND_MSG_VERSION_1 ? \
IBLND_CREDIT_HIGHWATER_V1 : \
t->lnd_peercredits_hiw)
-#define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(&init_net, \
+#define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(current->nsproxy->net_ns, \
cb, dev, \
ps, qpt)
@@ -150,7 +146,7 @@ struct kib_hca_dev;
#define KIB_IFNAME_SIZE 256
#endif
-typedef struct {
+struct kib_dev {
struct list_head ibd_list; /* chain on kib_devs */
struct list_head ibd_fail_list; /* chain on kib_failed_devs */
__u32 ibd_ifip; /* IPoIB interface IP */
@@ -165,9 +161,9 @@ typedef struct {
unsigned int ibd_can_failover; /* IPoIB interface is a bonding master */
struct list_head ibd_nets;
struct kib_hca_dev *ibd_hdev;
-} kib_dev_t;
+};
-typedef struct kib_hca_dev {
+struct kib_hca_dev {
struct rdma_cm_id *ibh_cmid; /* listener cmid */
struct ib_device *ibh_ibdev; /* IB device */
int ibh_page_shift; /* page shift of current HCA */
@@ -177,19 +173,19 @@ typedef struct kib_hca_dev {
__u64 ibh_mr_size; /* size of MR */
struct ib_mr *ibh_mrs; /* global MR */
struct ib_pd *ibh_pd; /* PD */
- kib_dev_t *ibh_dev; /* owner */
+ struct kib_dev *ibh_dev; /* owner */
atomic_t ibh_ref; /* refcount */
-} kib_hca_dev_t;
+};
/** # of seconds to keep pool alive */
#define IBLND_POOL_DEADLINE 300
/** # of seconds to retry if allocation failed */
#define IBLND_POOL_RETRY 1
-typedef struct {
+struct kib_pages {
int ibp_npages; /* # pages */
struct page *ibp_pages[0]; /* page array */
-} kib_pages_t;
+};
struct kib_pool;
struct kib_poolset;
@@ -204,7 +200,7 @@ struct kib_net;
#define IBLND_POOL_NAME_LEN 32
-typedef struct kib_poolset {
+struct kib_poolset {
spinlock_t ps_lock; /* serialize */
struct kib_net *ps_net; /* network it belongs to */
char ps_name[IBLND_POOL_NAME_LEN]; /* pool set name */
@@ -220,31 +216,31 @@ typedef struct kib_poolset {
kib_ps_pool_destroy_t ps_pool_destroy; /* destroy a pool */
kib_ps_node_init_t ps_node_init; /* initialize new allocated node */
kib_ps_node_fini_t ps_node_fini; /* finalize node */
-} kib_poolset_t;
+};
-typedef struct kib_pool {
+struct kib_pool {
struct list_head po_list; /* chain on pool list */
struct list_head po_free_list; /* pre-allocated node */
- kib_poolset_t *po_owner; /* pool_set of this pool */
+ struct kib_poolset *po_owner; /* pool_set of this pool */
unsigned long po_deadline; /* deadline of this pool */
int po_allocated; /* # of elements in use */
int po_failed; /* pool is created on failed HCA */
int po_size; /* # of pre-allocated elements */
-} kib_pool_t;
+};
-typedef struct {
- kib_poolset_t tps_poolset; /* pool-set */
+struct kib_tx_poolset {
+ struct kib_poolset tps_poolset; /* pool-set */
__u64 tps_next_tx_cookie; /* cookie of TX */
-} kib_tx_poolset_t;
+};
-typedef struct {
- kib_pool_t tpo_pool; /* pool */
- struct kib_hca_dev *tpo_hdev; /* device for this pool */
- struct kib_tx *tpo_tx_descs; /* all the tx descriptors */
- kib_pages_t *tpo_tx_pages; /* premapped tx msg pages */
-} kib_tx_pool_t;
+struct kib_tx_pool {
+ struct kib_pool tpo_pool; /* pool */
+ struct kib_hca_dev *tpo_hdev; /* device for this pool */
+ struct kib_tx *tpo_tx_descs; /* all the tx descriptors */
+ struct kib_pages *tpo_tx_pages; /* premapped tx msg pages */
+};
-typedef struct {
+struct kib_fmr_poolset {
spinlock_t fps_lock; /* serialize */
struct kib_net *fps_net; /* IB network */
struct list_head fps_pool_list; /* FMR pool list */
@@ -257,7 +253,7 @@ typedef struct {
int fps_increasing; /* is allocating new pool */
unsigned long fps_next_retry; /* time stamp for retry if*/
/* failed to allocate */
-} kib_fmr_poolset_t;
+};
struct kib_fast_reg_descriptor { /* For fast registration */
struct list_head frd_list;
@@ -267,10 +263,10 @@ struct kib_fast_reg_descriptor { /* For fast registration */
bool frd_valid;
};
-typedef struct {
- struct list_head fpo_list; /* chain on pool list */
- struct kib_hca_dev *fpo_hdev; /* device for this pool */
- kib_fmr_poolset_t *fpo_owner; /* owner of this pool */
+struct kib_fmr_pool {
+ struct list_head fpo_list; /* chain on pool list */
+ struct kib_hca_dev *fpo_hdev; /* device for this pool */
+ struct kib_fmr_poolset *fpo_owner; /* owner of this pool */
union {
struct {
struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */
@@ -284,17 +280,17 @@ typedef struct {
int fpo_failed; /* fmr pool is failed */
int fpo_map_count; /* # of mapped FMR */
int fpo_is_fmr;
-} kib_fmr_pool_t;
+};
-typedef struct {
- kib_fmr_pool_t *fmr_pool; /* pool of FMR */
+struct kib_fmr {
+ struct kib_fmr_pool *fmr_pool; /* pool of FMR */
struct ib_pool_fmr *fmr_pfmr; /* IB pool fmr */
struct kib_fast_reg_descriptor *fmr_frd;
u32 fmr_key;
-} kib_fmr_t;
+};
-typedef struct kib_net {
- struct list_head ibn_list; /* chain on kib_dev_t::ibd_nets */
+struct kib_net {
+ struct list_head ibn_list; /* chain on struct kib_dev::ibd_nets */
__u64 ibn_incarnation;/* my epoch */
int ibn_init; /* initialisation state */
int ibn_shutdown; /* shutting down? */
@@ -302,11 +298,11 @@ typedef struct kib_net {
atomic_t ibn_npeers; /* # peers extant */
atomic_t ibn_nconns; /* # connections extant */
- kib_tx_poolset_t **ibn_tx_ps; /* tx pool-set */
- kib_fmr_poolset_t **ibn_fmr_ps; /* fmr pool-set */
+ struct kib_tx_poolset **ibn_tx_ps; /* tx pool-set */
+ struct kib_fmr_poolset **ibn_fmr_ps; /* fmr pool-set */
- kib_dev_t *ibn_dev; /* underlying IB device */
-} kib_net_t;
+ struct kib_dev *ibn_dev; /* underlying IB device */
+};
#define KIB_THREAD_SHIFT 16
#define KIB_THREAD_ID(cpt, tid) ((cpt) << KIB_THREAD_SHIFT | (tid))
@@ -322,7 +318,7 @@ struct kib_sched_info {
int ibs_cpt; /* CPT id */
};
-typedef struct {
+struct kib_data {
int kib_init; /* initialisation state */
int kib_shutdown; /* shut down? */
struct list_head kib_devs; /* IB devices extant */
@@ -349,7 +345,7 @@ typedef struct {
spinlock_t kib_connd_lock; /* serialise */
struct ib_qp_attr kib_error_qpa; /* QP->ERROR */
struct kib_sched_info **kib_scheds; /* percpt data for schedulers */
-} kib_data_t;
+};
#define IBLND_INIT_NOTHING 0
#define IBLND_INIT_DATA 1
@@ -360,51 +356,51 @@ typedef struct {
* These are sent in sender's byte order (i.e. receiver flips).
*/
-typedef struct kib_connparams {
+struct kib_connparams {
__u16 ibcp_queue_depth;
__u16 ibcp_max_frags;
__u32 ibcp_max_msg_size;
-} WIRE_ATTR kib_connparams_t;
+} WIRE_ATTR;
-typedef struct {
+struct kib_immediate_msg {
lnet_hdr_t ibim_hdr; /* portals header */
char ibim_payload[0]; /* piggy-backed payload */
-} WIRE_ATTR kib_immediate_msg_t;
+} WIRE_ATTR;
-typedef struct {
+struct kib_rdma_frag {
__u32 rf_nob; /* # bytes this frag */
__u64 rf_addr; /* CAVEAT EMPTOR: misaligned!! */
-} WIRE_ATTR kib_rdma_frag_t;
+} WIRE_ATTR;
-typedef struct {
+struct kib_rdma_desc {
__u32 rd_key; /* local/remote key */
__u32 rd_nfrags; /* # fragments */
- kib_rdma_frag_t rd_frags[0]; /* buffer frags */
-} WIRE_ATTR kib_rdma_desc_t;
+ struct kib_rdma_frag rd_frags[0]; /* buffer frags */
+} WIRE_ATTR;
-typedef struct {
+struct kib_putreq_msg {
lnet_hdr_t ibprm_hdr; /* portals header */
__u64 ibprm_cookie; /* opaque completion cookie */
-} WIRE_ATTR kib_putreq_msg_t;
+} WIRE_ATTR;
-typedef struct {
+struct kib_putack_msg {
__u64 ibpam_src_cookie; /* reflected completion cookie */
__u64 ibpam_dst_cookie; /* opaque completion cookie */
- kib_rdma_desc_t ibpam_rd; /* sender's sink buffer */
-} WIRE_ATTR kib_putack_msg_t;
+ struct kib_rdma_desc ibpam_rd; /* sender's sink buffer */
+} WIRE_ATTR;
-typedef struct {
+struct kib_get_msg {
lnet_hdr_t ibgm_hdr; /* portals header */
__u64 ibgm_cookie; /* opaque completion cookie */
- kib_rdma_desc_t ibgm_rd; /* rdma descriptor */
-} WIRE_ATTR kib_get_msg_t;
+ struct kib_rdma_desc ibgm_rd; /* rdma descriptor */
+} WIRE_ATTR;
-typedef struct {
+struct kib_completion_msg {
__u64 ibcm_cookie; /* opaque completion cookie */
__s32 ibcm_status; /* < 0 failure: >= 0 length */
-} WIRE_ATTR kib_completion_msg_t;
+} WIRE_ATTR;
-typedef struct {
+struct kib_msg {
/* First 2 fields fixed FOR ALL TIME */
__u32 ibm_magic; /* I'm an ibnal message */
__u16 ibm_version; /* this is my version number */
@@ -419,14 +415,14 @@ typedef struct {
__u64 ibm_dststamp; /* destination's incarnation */
union {
- kib_connparams_t connparams;
- kib_immediate_msg_t immediate;
- kib_putreq_msg_t putreq;
- kib_putack_msg_t putack;
- kib_get_msg_t get;
- kib_completion_msg_t completion;
+ struct kib_connparams connparams;
+ struct kib_immediate_msg immediate;
+ struct kib_putreq_msg putreq;
+ struct kib_putack_msg putack;
+ struct kib_get_msg get;
+ struct kib_completion_msg completion;
} WIRE_ATTR ibm_u;
-} WIRE_ATTR kib_msg_t;
+} WIRE_ATTR;
#define IBLND_MSG_MAGIC LNET_PROTO_IB_MAGIC /* unique magic */
@@ -445,14 +441,14 @@ typedef struct {
#define IBLND_MSG_GET_REQ 0xd6 /* getreq (sink->src) */
#define IBLND_MSG_GET_DONE 0xd7 /* completion (src->sink: all OK) */
-typedef struct {
+struct kib_rej {
__u32 ibr_magic; /* sender's magic */
__u16 ibr_version; /* sender's version */
__u8 ibr_why; /* reject reason */
__u8 ibr_padding; /* padding */
__u64 ibr_incarnation; /* incarnation of peer */
- kib_connparams_t ibr_cp; /* connection parameters */
-} WIRE_ATTR kib_rej_t;
+ struct kib_connparams ibr_cp; /* connection parameters */
+} WIRE_ATTR;
/* connection rejection reasons */
#define IBLND_REJECT_CONN_RACE 1 /* You lost connection race */
@@ -467,28 +463,26 @@ typedef struct {
/***********************************************************************/
-typedef struct kib_rx /* receive message */
-{
+struct kib_rx { /* receive message */
struct list_head rx_list; /* queue for attention */
struct kib_conn *rx_conn; /* owning conn */
int rx_nob; /* # bytes received (-1 while posted) */
enum ib_wc_status rx_status; /* completion status */
- kib_msg_t *rx_msg; /* message buffer (host vaddr) */
+ struct kib_msg *rx_msg; /* message buffer (host vaddr) */
__u64 rx_msgaddr; /* message buffer (I/O addr) */
DECLARE_PCI_UNMAP_ADDR(rx_msgunmap); /* for dma_unmap_single() */
struct ib_recv_wr rx_wrq; /* receive work item... */
struct ib_sge rx_sge; /* ...and its memory */
-} kib_rx_t;
+};
#define IBLND_POSTRX_DONT_POST 0 /* don't post */
#define IBLND_POSTRX_NO_CREDIT 1 /* post: no credits */
#define IBLND_POSTRX_PEER_CREDIT 2 /* post: give peer back 1 credit */
#define IBLND_POSTRX_RSRVD_CREDIT 3 /* post: give self back 1 reserved credit */
-typedef struct kib_tx /* transmit message */
-{
+struct kib_tx { /* transmit message */
struct list_head tx_list; /* queue on idle_txs ibc_tx_queue etc. */
- kib_tx_pool_t *tx_pool; /* pool I'm from */
+ struct kib_tx_pool *tx_pool; /* pool I'm from */
struct kib_conn *tx_conn; /* owning conn */
short tx_sending; /* # tx callbacks outstanding */
short tx_queued; /* queued for sending */
@@ -497,28 +491,28 @@ typedef struct kib_tx /* transmit message */
unsigned long tx_deadline; /* completion deadline */
__u64 tx_cookie; /* completion cookie */
lnet_msg_t *tx_lntmsg[2]; /* lnet msgs to finalize on completion */
- kib_msg_t *tx_msg; /* message buffer (host vaddr) */
+ struct kib_msg *tx_msg; /* message buffer (host vaddr) */
__u64 tx_msgaddr; /* message buffer (I/O addr) */
DECLARE_PCI_UNMAP_ADDR(tx_msgunmap); /* for dma_unmap_single() */
int tx_nwrq; /* # send work items */
struct ib_rdma_wr *tx_wrq; /* send work items... */
struct ib_sge *tx_sge; /* ...and their memory */
- kib_rdma_desc_t *tx_rd; /* rdma descriptor */
+ struct kib_rdma_desc *tx_rd; /* rdma descriptor */
int tx_nfrags; /* # entries in... */
struct scatterlist *tx_frags; /* dma_map_sg descriptor */
__u64 *tx_pages; /* rdma phys page addrs */
- kib_fmr_t fmr; /* FMR */
+ struct kib_fmr fmr; /* FMR */
int tx_dmadir; /* dma direction */
-} kib_tx_t;
+};
-typedef struct kib_connvars {
- kib_msg_t cv_msg; /* connection-in-progress variables */
-} kib_connvars_t;
+struct kib_connvars {
+ struct kib_msg cv_msg; /* connection-in-progress variables */
+};
-typedef struct kib_conn {
+struct kib_conn {
struct kib_sched_info *ibc_sched; /* scheduler information */
struct kib_peer *ibc_peer; /* owning peer */
- kib_hca_dev_t *ibc_hdev; /* HCA bound on */
+ struct kib_hca_dev *ibc_hdev; /* HCA bound on */
struct list_head ibc_list; /* stash on peer's conn list */
struct list_head ibc_sched_list; /* schedule for attention */
__u16 ibc_version; /* version of connection */
@@ -553,14 +547,14 @@ typedef struct kib_conn {
/* reserve an ACK/DONE msg */
struct list_head ibc_active_txs; /* active tx awaiting completion */
spinlock_t ibc_lock; /* serialise */
- kib_rx_t *ibc_rxs; /* the rx descs */
- kib_pages_t *ibc_rx_pages; /* premapped rx msg pages */
+ struct kib_rx *ibc_rxs; /* the rx descs */
+ struct kib_pages *ibc_rx_pages; /* premapped rx msg pages */
struct rdma_cm_id *ibc_cmid; /* CM id */
struct ib_cq *ibc_cq; /* completion queue */
- kib_connvars_t *ibc_connvars; /* in-progress connection state */
-} kib_conn_t;
+ struct kib_connvars *ibc_connvars; /* in-progress connection state */
+};
#define IBLND_CONN_INIT 0 /* being initialised */
#define IBLND_CONN_ACTIVE_CONNECT 1 /* active sending req */
@@ -569,7 +563,7 @@ typedef struct kib_conn {
#define IBLND_CONN_CLOSING 4 /* being closed */
#define IBLND_CONN_DISCONNECTED 5 /* disconnected */
-typedef struct kib_peer {
+struct kib_peer {
struct list_head ibp_list; /* stash on global peer list */
lnet_nid_t ibp_nid; /* who's on the other end(s) */
lnet_ni_t *ibp_ni; /* LNet interface */
@@ -596,11 +590,11 @@ typedef struct kib_peer {
__u16 ibp_max_frags;
/* max_peer_credits */
__u16 ibp_queue_depth;
-} kib_peer_t;
+};
-extern kib_data_t kiblnd_data;
+extern struct kib_data kiblnd_data;
-void kiblnd_hdev_destroy(kib_hca_dev_t *hdev);
+void kiblnd_hdev_destroy(struct kib_hca_dev *hdev);
int kiblnd_msg_queue_size(int version, struct lnet_ni *ni);
@@ -645,14 +639,14 @@ kiblnd_concurrent_sends(int version, struct lnet_ni *ni)
}
static inline void
-kiblnd_hdev_addref_locked(kib_hca_dev_t *hdev)
+kiblnd_hdev_addref_locked(struct kib_hca_dev *hdev)
{
LASSERT(atomic_read(&hdev->ibh_ref) > 0);
atomic_inc(&hdev->ibh_ref);
}
static inline void
-kiblnd_hdev_decref(kib_hca_dev_t *hdev)
+kiblnd_hdev_decref(struct kib_hca_dev *hdev)
{
LASSERT(atomic_read(&hdev->ibh_ref) > 0);
if (atomic_dec_and_test(&hdev->ibh_ref))
@@ -660,7 +654,7 @@ kiblnd_hdev_decref(kib_hca_dev_t *hdev)
}
static inline int
-kiblnd_dev_can_failover(kib_dev_t *dev)
+kiblnd_dev_can_failover(struct kib_dev *dev)
{
if (!list_empty(&dev->ibd_fail_list)) /* already scheduled */
return 0;
@@ -716,7 +710,7 @@ do { \
} while (0)
static inline bool
-kiblnd_peer_connecting(kib_peer_t *peer)
+kiblnd_peer_connecting(struct kib_peer *peer)
{
return peer->ibp_connecting ||
peer->ibp_reconnecting ||
@@ -724,7 +718,7 @@ kiblnd_peer_connecting(kib_peer_t *peer)
}
static inline bool
-kiblnd_peer_idle(kib_peer_t *peer)
+kiblnd_peer_idle(struct kib_peer *peer)
{
return !kiblnd_peer_connecting(peer) && list_empty(&peer->ibp_conns);
}
@@ -739,23 +733,23 @@ kiblnd_nid2peerlist(lnet_nid_t nid)
}
static inline int
-kiblnd_peer_active(kib_peer_t *peer)
+kiblnd_peer_active(struct kib_peer *peer)
{
/* Am I in the peer hash table? */
return !list_empty(&peer->ibp_list);
}
-static inline kib_conn_t *
-kiblnd_get_conn_locked(kib_peer_t *peer)
+static inline struct kib_conn *
+kiblnd_get_conn_locked(struct kib_peer *peer)
{
LASSERT(!list_empty(&peer->ibp_conns));
/* just return the first connection */
- return list_entry(peer->ibp_conns.next, kib_conn_t, ibc_list);
+ return list_entry(peer->ibp_conns.next, struct kib_conn, ibc_list);
}
static inline int
-kiblnd_send_keepalive(kib_conn_t *conn)
+kiblnd_send_keepalive(struct kib_conn *conn)
{
return (*kiblnd_tunables.kib_keepalive > 0) &&
cfs_time_after(jiffies, conn->ibc_last_send +
@@ -764,7 +758,7 @@ kiblnd_send_keepalive(kib_conn_t *conn)
}
static inline int
-kiblnd_need_noop(kib_conn_t *conn)
+kiblnd_need_noop(struct kib_conn *conn)
{
struct lnet_ioctl_config_o2iblnd_tunables *tunables;
lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
@@ -800,14 +794,14 @@ kiblnd_need_noop(kib_conn_t *conn)
}
static inline void
-kiblnd_abort_receives(kib_conn_t *conn)
+kiblnd_abort_receives(struct kib_conn *conn)
{
ib_modify_qp(conn->ibc_cmid->qp,
&kiblnd_data.kib_error_qpa, IB_QP_STATE);
}
static inline const char *
-kiblnd_queue2str(kib_conn_t *conn, struct list_head *q)
+kiblnd_queue2str(struct kib_conn *conn, struct list_head *q)
{
if (q == &conn->ibc_tx_queue)
return "tx_queue";
@@ -858,21 +852,21 @@ kiblnd_wreqid2type(__u64 wreqid)
}
static inline void
-kiblnd_set_conn_state(kib_conn_t *conn, int state)
+kiblnd_set_conn_state(struct kib_conn *conn, int state)
{
conn->ibc_state = state;
mb();
}
static inline void
-kiblnd_init_msg(kib_msg_t *msg, int type, int body_nob)
+kiblnd_init_msg(struct kib_msg *msg, int type, int body_nob)
{
msg->ibm_type = type;
- msg->ibm_nob = offsetof(kib_msg_t, ibm_u) + body_nob;
+ msg->ibm_nob = offsetof(struct kib_msg, ibm_u) + body_nob;
}
static inline int
-kiblnd_rd_size(kib_rdma_desc_t *rd)
+kiblnd_rd_size(struct kib_rdma_desc *rd)
{
int i;
int size;
@@ -884,25 +878,25 @@ kiblnd_rd_size(kib_rdma_desc_t *rd)
}
static inline __u64
-kiblnd_rd_frag_addr(kib_rdma_desc_t *rd, int index)
+kiblnd_rd_frag_addr(struct kib_rdma_desc *rd, int index)
{
return rd->rd_frags[index].rf_addr;
}
static inline __u32
-kiblnd_rd_frag_size(kib_rdma_desc_t *rd, int index)
+kiblnd_rd_frag_size(struct kib_rdma_desc *rd, int index)
{
return rd->rd_frags[index].rf_nob;
}
static inline __u32
-kiblnd_rd_frag_key(kib_rdma_desc_t *rd, int index)
+kiblnd_rd_frag_key(struct kib_rdma_desc *rd, int index)
{
return rd->rd_key;
}
static inline int
-kiblnd_rd_consume_frag(kib_rdma_desc_t *rd, int index, __u32 nob)
+kiblnd_rd_consume_frag(struct kib_rdma_desc *rd, int index, __u32 nob)
{
if (nob < rd->rd_frags[index].rf_nob) {
rd->rd_frags[index].rf_addr += nob;
@@ -915,14 +909,14 @@ kiblnd_rd_consume_frag(kib_rdma_desc_t *rd, int index, __u32 nob)
}
static inline int
-kiblnd_rd_msg_size(kib_rdma_desc_t *rd, int msgtype, int n)
+kiblnd_rd_msg_size(struct kib_rdma_desc *rd, int msgtype, int n)
{
LASSERT(msgtype == IBLND_MSG_GET_REQ ||
msgtype == IBLND_MSG_PUT_ACK);
return msgtype == IBLND_MSG_GET_REQ ?
- offsetof(kib_get_msg_t, ibgm_rd.rd_frags[n]) :
- offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[n]);
+ offsetof(struct kib_get_msg, ibgm_rd.rd_frags[n]) :
+ offsetof(struct kib_putack_msg, ibpam_rd.rd_frags[n]);
}
static inline __u64
@@ -981,17 +975,17 @@ static inline unsigned int kiblnd_sg_dma_len(struct ib_device *dev,
#define KIBLND_CONN_PARAM(e) ((e)->param.conn.private_data)
#define KIBLND_CONN_PARAM_LEN(e) ((e)->param.conn.private_data_len)
-struct ib_mr *kiblnd_find_rd_dma_mr(struct lnet_ni *ni, kib_rdma_desc_t *rd,
+struct ib_mr *kiblnd_find_rd_dma_mr(struct lnet_ni *ni, struct kib_rdma_desc *rd,
int negotiated_nfrags);
-void kiblnd_map_rx_descs(kib_conn_t *conn);
-void kiblnd_unmap_rx_descs(kib_conn_t *conn);
-void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node);
-struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps);
+void kiblnd_map_rx_descs(struct kib_conn *conn);
+void kiblnd_unmap_rx_descs(struct kib_conn *conn);
+void kiblnd_pool_free_node(struct kib_pool *pool, struct list_head *node);
+struct list_head *kiblnd_pool_alloc_node(struct kib_poolset *ps);
-int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, kib_tx_t *tx,
- kib_rdma_desc_t *rd, __u32 nob, __u64 iov,
- kib_fmr_t *fmr);
-void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status);
+int kiblnd_fmr_pool_map(struct kib_fmr_poolset *fps, struct kib_tx *tx,
+ struct kib_rdma_desc *rd, __u32 nob, __u64 iov,
+ struct kib_fmr *fmr);
+void kiblnd_fmr_pool_unmap(struct kib_fmr *fmr, int status);
int kiblnd_tunables_setup(struct lnet_ni *ni);
void kiblnd_tunables_init(void);
@@ -1001,30 +995,31 @@ int kiblnd_scheduler(void *arg);
int kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name);
int kiblnd_failover_thread(void *arg);
-int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages);
+int kiblnd_alloc_pages(struct kib_pages **pp, int cpt, int npages);
int kiblnd_cm_callback(struct rdma_cm_id *cmid,
struct rdma_cm_event *event);
int kiblnd_translate_mtu(int value);
-int kiblnd_dev_failover(kib_dev_t *dev);
-int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid);
-void kiblnd_destroy_peer(kib_peer_t *peer);
-bool kiblnd_reconnect_peer(kib_peer_t *peer);
-void kiblnd_destroy_dev(kib_dev_t *dev);
-void kiblnd_unlink_peer_locked(kib_peer_t *peer);
-kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid);
-int kiblnd_close_stale_conns_locked(kib_peer_t *peer,
+int kiblnd_dev_failover(struct kib_dev *dev);
+int kiblnd_create_peer(lnet_ni_t *ni, struct kib_peer **peerp, lnet_nid_t nid);
+void kiblnd_destroy_peer(struct kib_peer *peer);
+bool kiblnd_reconnect_peer(struct kib_peer *peer);
+void kiblnd_destroy_dev(struct kib_dev *dev);
+void kiblnd_unlink_peer_locked(struct kib_peer *peer);
+struct kib_peer *kiblnd_find_peer_locked(lnet_nid_t nid);
+int kiblnd_close_stale_conns_locked(struct kib_peer *peer,
int version, __u64 incarnation);
-int kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why);
+int kiblnd_close_peer_conns_locked(struct kib_peer *peer, int why);
-kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
- int state, int version);
-void kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn);
-void kiblnd_close_conn(kib_conn_t *conn, int error);
-void kiblnd_close_conn_locked(kib_conn_t *conn, int error);
+struct kib_conn *kiblnd_create_conn(struct kib_peer *peer,
+ struct rdma_cm_id *cmid,
+ int state, int version);
+void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn);
+void kiblnd_close_conn(struct kib_conn *conn, int error);
+void kiblnd_close_conn_locked(struct kib_conn *conn, int error);
-void kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid);
+void kiblnd_launch_tx(lnet_ni_t *ni, struct kib_tx *tx, lnet_nid_t nid);
void kiblnd_txlist_done(lnet_ni_t *ni, struct list_head *txlist,
int status);
@@ -1032,10 +1027,10 @@ void kiblnd_qp_event(struct ib_event *event, void *arg);
void kiblnd_cq_event(struct ib_event *event, void *arg);
void kiblnd_cq_completion(struct ib_cq *cq, void *arg);
-void kiblnd_pack_msg(lnet_ni_t *ni, kib_msg_t *msg, int version,
+void kiblnd_pack_msg(lnet_ni_t *ni, struct kib_msg *msg, int version,
int credits, lnet_nid_t dstnid, __u64 dststamp);
-int kiblnd_unpack_msg(kib_msg_t *msg, int nob);
-int kiblnd_post_rx(kib_rx_t *rx, int credit);
+int kiblnd_unpack_msg(struct kib_msg *msg, int nob);
+int kiblnd_post_rx(struct kib_rx *rx, int credit);
int kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg);
int kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index 845e49a52..596a697b9 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -40,22 +36,22 @@
#include "o2iblnd.h"
-static void kiblnd_peer_alive(kib_peer_t *peer);
-static void kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error);
-static void kiblnd_check_sends(kib_conn_t *conn);
-static void kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx,
+static void kiblnd_peer_alive(struct kib_peer *peer);
+static void kiblnd_peer_connect_failed(struct kib_peer *peer, int active, int error);
+static void kiblnd_check_sends(struct kib_conn *conn);
+static void kiblnd_init_tx_msg(lnet_ni_t *ni, struct kib_tx *tx,
int type, int body_nob);
-static int kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
- int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie);
-static void kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn);
-static void kiblnd_queue_tx(kib_tx_t *tx, kib_conn_t *conn);
-static void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx);
+static int kiblnd_init_rdma(struct kib_conn *conn, struct kib_tx *tx, int type,
+ int resid, struct kib_rdma_desc *dstrd, __u64 dstcookie);
+static void kiblnd_queue_tx_locked(struct kib_tx *tx, struct kib_conn *conn);
+static void kiblnd_queue_tx(struct kib_tx *tx, struct kib_conn *conn);
+static void kiblnd_unmap_tx(lnet_ni_t *ni, struct kib_tx *tx);
static void
-kiblnd_tx_done(lnet_ni_t *ni, kib_tx_t *tx)
+kiblnd_tx_done(lnet_ni_t *ni, struct kib_tx *tx)
{
lnet_msg_t *lntmsg[2];
- kib_net_t *net = ni->ni_data;
+ struct kib_net *net = ni->ni_data;
int rc;
int i;
@@ -97,10 +93,10 @@ kiblnd_tx_done(lnet_ni_t *ni, kib_tx_t *tx)
void
kiblnd_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int status)
{
- kib_tx_t *tx;
+ struct kib_tx *tx;
while (!list_empty(txlist)) {
- tx = list_entry(txlist->next, kib_tx_t, tx_list);
+ tx = list_entry(txlist->next, struct kib_tx, tx_list);
list_del(&tx->tx_list);
/* complete now */
@@ -110,19 +106,19 @@ kiblnd_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int status)
}
}
-static kib_tx_t *
+static struct kib_tx *
kiblnd_get_idle_tx(lnet_ni_t *ni, lnet_nid_t target)
{
- kib_net_t *net = (kib_net_t *)ni->ni_data;
+ struct kib_net *net = (struct kib_net *)ni->ni_data;
struct list_head *node;
- kib_tx_t *tx;
- kib_tx_poolset_t *tps;
+ struct kib_tx *tx;
+ struct kib_tx_poolset *tps;
tps = net->ibn_tx_ps[lnet_cpt_of_nid(target)];
node = kiblnd_pool_alloc_node(&tps->tps_poolset);
if (!node)
return NULL;
- tx = list_entry(node, kib_tx_t, tx_list);
+ tx = list_entry(node, struct kib_tx, tx_list);
LASSERT(!tx->tx_nwrq);
LASSERT(!tx->tx_queued);
@@ -138,9 +134,9 @@ kiblnd_get_idle_tx(lnet_ni_t *ni, lnet_nid_t target)
}
static void
-kiblnd_drop_rx(kib_rx_t *rx)
+kiblnd_drop_rx(struct kib_rx *rx)
{
- kib_conn_t *conn = rx->rx_conn;
+ struct kib_conn *conn = rx->rx_conn;
struct kib_sched_info *sched = conn->ibc_sched;
unsigned long flags;
@@ -153,10 +149,10 @@ kiblnd_drop_rx(kib_rx_t *rx)
}
int
-kiblnd_post_rx(kib_rx_t *rx, int credit)
+kiblnd_post_rx(struct kib_rx *rx, int credit)
{
- kib_conn_t *conn = rx->rx_conn;
- kib_net_t *net = conn->ibc_peer->ibp_ni->ni_data;
+ struct kib_conn *conn = rx->rx_conn;
+ struct kib_net *net = conn->ibc_peer->ibp_ni->ni_data;
struct ib_recv_wr *bad_wrq = NULL;
struct ib_mr *mr = conn->ibc_hdev->ibh_mrs;
int rc;
@@ -223,13 +219,13 @@ out:
return rc;
}
-static kib_tx_t *
-kiblnd_find_waiting_tx_locked(kib_conn_t *conn, int txtype, __u64 cookie)
+static struct kib_tx *
+kiblnd_find_waiting_tx_locked(struct kib_conn *conn, int txtype, __u64 cookie)
{
struct list_head *tmp;
list_for_each(tmp, &conn->ibc_active_txs) {
- kib_tx_t *tx = list_entry(tmp, kib_tx_t, tx_list);
+ struct kib_tx *tx = list_entry(tmp, struct kib_tx, tx_list);
LASSERT(!tx->tx_queued);
LASSERT(tx->tx_sending || tx->tx_waiting);
@@ -249,9 +245,9 @@ kiblnd_find_waiting_tx_locked(kib_conn_t *conn, int txtype, __u64 cookie)
}
static void
-kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie)
+kiblnd_handle_completion(struct kib_conn *conn, int txtype, int status, __u64 cookie)
{
- kib_tx_t *tx;
+ struct kib_tx *tx;
lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
int idle;
@@ -287,10 +283,10 @@ kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie)
}
static void
-kiblnd_send_completion(kib_conn_t *conn, int type, int status, __u64 cookie)
+kiblnd_send_completion(struct kib_conn *conn, int type, int status, __u64 cookie)
{
lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
- kib_tx_t *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
+ struct kib_tx *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
if (!tx) {
CERROR("Can't get tx for completion %x for %s\n",
@@ -300,19 +296,19 @@ kiblnd_send_completion(kib_conn_t *conn, int type, int status, __u64 cookie)
tx->tx_msg->ibm_u.completion.ibcm_status = status;
tx->tx_msg->ibm_u.completion.ibcm_cookie = cookie;
- kiblnd_init_tx_msg(ni, tx, type, sizeof(kib_completion_msg_t));
+ kiblnd_init_tx_msg(ni, tx, type, sizeof(struct kib_completion_msg));
kiblnd_queue_tx(tx, conn);
}
static void
-kiblnd_handle_rx(kib_rx_t *rx)
+kiblnd_handle_rx(struct kib_rx *rx)
{
- kib_msg_t *msg = rx->rx_msg;
- kib_conn_t *conn = rx->rx_conn;
+ struct kib_msg *msg = rx->rx_msg;
+ struct kib_conn *conn = rx->rx_conn;
lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
int credits = msg->ibm_credits;
- kib_tx_t *tx;
+ struct kib_tx *tx;
int rc = 0;
int rc2;
int post_credit;
@@ -467,12 +463,12 @@ kiblnd_handle_rx(kib_rx_t *rx)
}
static void
-kiblnd_rx_complete(kib_rx_t *rx, int status, int nob)
+kiblnd_rx_complete(struct kib_rx *rx, int status, int nob)
{
- kib_msg_t *msg = rx->rx_msg;
- kib_conn_t *conn = rx->rx_conn;
+ struct kib_msg *msg = rx->rx_msg;
+ struct kib_conn *conn = rx->rx_conn;
lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
- kib_net_t *net = ni->ni_data;
+ struct kib_net *net = ni->ni_data;
int rc;
int err = -EIO;
@@ -561,10 +557,10 @@ kiblnd_kvaddr_to_page(unsigned long vaddr)
}
static int
-kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, __u32 nob)
+kiblnd_fmr_map_tx(struct kib_net *net, struct kib_tx *tx, struct kib_rdma_desc *rd, __u32 nob)
{
- kib_hca_dev_t *hdev;
- kib_fmr_poolset_t *fps;
+ struct kib_hca_dev *hdev;
+ struct kib_fmr_poolset *fps;
int cpt;
int rc;
@@ -593,9 +589,9 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, __u32 nob)
return 0;
}
-static void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx)
+static void kiblnd_unmap_tx(lnet_ni_t *ni, struct kib_tx *tx)
{
- kib_net_t *net = ni->ni_data;
+ struct kib_net *net = ni->ni_data;
LASSERT(net);
@@ -609,11 +605,11 @@ static void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx)
}
}
-static int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
+static int kiblnd_map_tx(lnet_ni_t *ni, struct kib_tx *tx, struct kib_rdma_desc *rd,
int nfrags)
{
- kib_net_t *net = ni->ni_data;
- kib_hca_dev_t *hdev = net->ibn_dev->ibd_hdev;
+ struct kib_net *net = ni->ni_data;
+ struct kib_hca_dev *hdev = net->ibn_dev->ibd_hdev;
struct ib_mr *mr = NULL;
__u32 nob;
int i;
@@ -651,10 +647,10 @@ static int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
}
static int
-kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
+kiblnd_setup_rd_iov(lnet_ni_t *ni, struct kib_tx *tx, struct kib_rdma_desc *rd,
unsigned int niov, struct kvec *iov, int offset, int nob)
{
- kib_net_t *net = ni->ni_data;
+ struct kib_net *net = ni->ni_data;
struct page *page;
struct scatterlist *sg;
unsigned long vaddr;
@@ -689,6 +685,10 @@ kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
sg_set_page(sg, page, fragnob, page_offset);
sg = sg_next(sg);
+ if (!sg) {
+ CERROR("lacking enough sg entries to map tx\n");
+ return -EFAULT;
+ }
if (offset + fragnob < iov->iov_len) {
offset += fragnob;
@@ -704,10 +704,10 @@ kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
}
static int
-kiblnd_setup_rd_kiov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
+kiblnd_setup_rd_kiov(lnet_ni_t *ni, struct kib_tx *tx, struct kib_rdma_desc *rd,
int nkiov, lnet_kiov_t *kiov, int offset, int nob)
{
- kib_net_t *net = ni->ni_data;
+ struct kib_net *net = ni->ni_data;
struct scatterlist *sg;
int fragnob;
@@ -733,6 +733,10 @@ kiblnd_setup_rd_kiov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
sg_set_page(sg, kiov->kiov_page, fragnob,
kiov->kiov_offset + offset);
sg = sg_next(sg);
+ if (!sg) {
+ CERROR("lacking enough sg entries to map tx\n");
+ return -EFAULT;
+ }
offset = 0;
kiov++;
@@ -744,11 +748,11 @@ kiblnd_setup_rd_kiov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
}
static int
-kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit)
+kiblnd_post_tx_locked(struct kib_conn *conn, struct kib_tx *tx, int credit)
__must_hold(&conn->ibc_lock)
{
- kib_msg_t *msg = tx->tx_msg;
- kib_peer_t *peer = conn->ibc_peer;
+ struct kib_msg *msg = tx->tx_msg;
+ struct kib_peer *peer = conn->ibc_peer;
struct lnet_ni *ni = peer->ibp_ni;
int ver = conn->ibc_version;
int rc;
@@ -901,11 +905,11 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit)
}
static void
-kiblnd_check_sends(kib_conn_t *conn)
+kiblnd_check_sends(struct kib_conn *conn)
{
int ver = conn->ibc_version;
lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
- kib_tx_t *tx;
+ struct kib_tx *tx;
/* Don't send anything until after the connection is established */
if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
@@ -924,7 +928,7 @@ kiblnd_check_sends(kib_conn_t *conn)
while (conn->ibc_reserved_credits > 0 &&
!list_empty(&conn->ibc_tx_queue_rsrvd)) {
tx = list_entry(conn->ibc_tx_queue_rsrvd.next,
- kib_tx_t, tx_list);
+ struct kib_tx, tx_list);
list_del(&tx->tx_list);
list_add_tail(&tx->tx_list, &conn->ibc_tx_queue);
conn->ibc_reserved_credits--;
@@ -948,16 +952,16 @@ kiblnd_check_sends(kib_conn_t *conn)
if (!list_empty(&conn->ibc_tx_queue_nocred)) {
credit = 0;
tx = list_entry(conn->ibc_tx_queue_nocred.next,
- kib_tx_t, tx_list);
+ struct kib_tx, tx_list);
} else if (!list_empty(&conn->ibc_tx_noops)) {
LASSERT(!IBLND_OOB_CAPABLE(ver));
credit = 1;
tx = list_entry(conn->ibc_tx_noops.next,
- kib_tx_t, tx_list);
+ struct kib_tx, tx_list);
} else if (!list_empty(&conn->ibc_tx_queue)) {
credit = 1;
tx = list_entry(conn->ibc_tx_queue.next,
- kib_tx_t, tx_list);
+ struct kib_tx, tx_list);
} else {
break;
}
@@ -970,10 +974,10 @@ kiblnd_check_sends(kib_conn_t *conn)
}
static void
-kiblnd_tx_complete(kib_tx_t *tx, int status)
+kiblnd_tx_complete(struct kib_tx *tx, int status)
{
int failed = (status != IB_WC_SUCCESS);
- kib_conn_t *conn = tx->tx_conn;
+ struct kib_conn *conn = tx->tx_conn;
int idle;
LASSERT(tx->tx_sending > 0);
@@ -1025,12 +1029,12 @@ kiblnd_tx_complete(kib_tx_t *tx, int status)
}
static void
-kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob)
+kiblnd_init_tx_msg(lnet_ni_t *ni, struct kib_tx *tx, int type, int body_nob)
{
- kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev;
+ struct kib_hca_dev *hdev = tx->tx_pool->tpo_hdev;
struct ib_sge *sge = &tx->tx_sge[tx->tx_nwrq];
struct ib_rdma_wr *wrq = &tx->tx_wrq[tx->tx_nwrq];
- int nob = offsetof(kib_msg_t, ibm_u) + body_nob;
+ int nob = offsetof(struct kib_msg, ibm_u) + body_nob;
struct ib_mr *mr = hdev->ibh_mrs;
LASSERT(tx->tx_nwrq >= 0);
@@ -1057,11 +1061,11 @@ kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob)
}
static int
-kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
- int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie)
+kiblnd_init_rdma(struct kib_conn *conn, struct kib_tx *tx, int type,
+ int resid, struct kib_rdma_desc *dstrd, __u64 dstcookie)
{
- kib_msg_t *ibmsg = tx->tx_msg;
- kib_rdma_desc_t *srcrd = tx->tx_rd;
+ struct kib_msg *ibmsg = tx->tx_msg;
+ struct kib_rdma_desc *srcrd = tx->tx_rd;
struct ib_sge *sge = &tx->tx_sge[0];
struct ib_rdma_wr *wrq, *next;
int rc = resid;
@@ -1099,7 +1103,7 @@ kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
wrknob = min(min(kiblnd_rd_frag_size(srcrd, srcidx),
kiblnd_rd_frag_size(dstrd, dstidx)),
- (__u32) resid);
+ (__u32)resid);
sge = &tx->tx_sge[tx->tx_nwrq];
sge->addr = kiblnd_rd_frag_addr(srcrd, srcidx);
@@ -1135,13 +1139,13 @@ kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
ibmsg->ibm_u.completion.ibcm_status = rc;
ibmsg->ibm_u.completion.ibcm_cookie = dstcookie;
kiblnd_init_tx_msg(conn->ibc_peer->ibp_ni, tx,
- type, sizeof(kib_completion_msg_t));
+ type, sizeof(struct kib_completion_msg));
return rc;
}
static void
-kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn)
+kiblnd_queue_tx_locked(struct kib_tx *tx, struct kib_conn *conn)
{
struct list_head *q;
@@ -1196,7 +1200,7 @@ kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn)
}
static void
-kiblnd_queue_tx(kib_tx_t *tx, kib_conn_t *conn)
+kiblnd_queue_tx(struct kib_tx *tx, struct kib_conn *conn)
{
spin_lock(&conn->ibc_lock);
kiblnd_queue_tx_locked(tx, conn);
@@ -1243,11 +1247,11 @@ static int kiblnd_resolve_addr(struct rdma_cm_id *cmid,
}
static void
-kiblnd_connect_peer(kib_peer_t *peer)
+kiblnd_connect_peer(struct kib_peer *peer)
{
struct rdma_cm_id *cmid;
- kib_dev_t *dev;
- kib_net_t *net = peer->ibp_ni->ni_data;
+ struct kib_dev *dev;
+ struct kib_net *net = peer->ibp_ni->ni_data;
struct sockaddr_in srcaddr;
struct sockaddr_in dstaddr;
int rc;
@@ -1311,7 +1315,7 @@ kiblnd_connect_peer(kib_peer_t *peer)
}
bool
-kiblnd_reconnect_peer(kib_peer_t *peer)
+kiblnd_reconnect_peer(struct kib_peer *peer)
{
rwlock_t *glock = &kiblnd_data.kib_global_lock;
char *reason = NULL;
@@ -1361,11 +1365,11 @@ no_reconnect:
}
void
-kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
+kiblnd_launch_tx(lnet_ni_t *ni, struct kib_tx *tx, lnet_nid_t nid)
{
- kib_peer_t *peer;
- kib_peer_t *peer2;
- kib_conn_t *conn;
+ struct kib_peer *peer;
+ struct kib_peer *peer2;
+ struct kib_conn *conn;
rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
unsigned long flags;
int rc;
@@ -1468,7 +1472,7 @@ kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
peer->ibp_connecting = 1;
/* always called with a ref on ni, which prevents ni being shutdown */
- LASSERT(!((kib_net_t *)ni->ni_data)->ibn_shutdown);
+ LASSERT(!((struct kib_net *)ni->ni_data)->ibn_shutdown);
if (tx)
list_add_tail(&tx->tx_list, &peer->ibp_tx_queue);
@@ -1495,9 +1499,9 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
unsigned int payload_offset = lntmsg->msg_offset;
unsigned int payload_nob = lntmsg->msg_len;
- kib_msg_t *ibmsg;
- kib_rdma_desc_t *rd;
- kib_tx_t *tx;
+ struct kib_msg *ibmsg;
+ struct kib_rdma_desc *rd;
+ struct kib_tx *tx;
int nob;
int rc;
@@ -1528,7 +1532,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
break; /* send IMMEDIATE */
/* is the REPLY message too small for RDMA? */
- nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[lntmsg->msg_md->md_length]);
+ nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[lntmsg->msg_md->md_length]);
if (nob <= IBLND_MSG_SIZE)
break; /* send IMMEDIATE */
@@ -1558,7 +1562,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
return -EIO;
}
- nob = offsetof(kib_get_msg_t, ibgm_rd.rd_frags[rd->rd_nfrags]);
+ nob = offsetof(struct kib_get_msg, ibgm_rd.rd_frags[rd->rd_nfrags]);
ibmsg->ibm_u.get.ibgm_cookie = tx->tx_cookie;
ibmsg->ibm_u.get.ibgm_hdr = *hdr;
@@ -1580,7 +1584,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
case LNET_MSG_REPLY:
case LNET_MSG_PUT:
/* Is the payload small enough not to need RDMA? */
- nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[payload_nob]);
+ nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[payload_nob]);
if (nob <= IBLND_MSG_SIZE)
break; /* send IMMEDIATE */
@@ -1610,7 +1614,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
ibmsg = tx->tx_msg;
ibmsg->ibm_u.putreq.ibprm_hdr = *hdr;
ibmsg->ibm_u.putreq.ibprm_cookie = tx->tx_cookie;
- kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_REQ, sizeof(kib_putreq_msg_t));
+ kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_REQ, sizeof(struct kib_putreq_msg));
tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */
tx->tx_waiting = 1; /* waiting for PUT_{ACK,NAK} */
@@ -1620,7 +1624,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
/* send IMMEDIATE */
- LASSERT(offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[payload_nob])
+ LASSERT(offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[payload_nob])
<= IBLND_MSG_SIZE);
tx = kiblnd_get_idle_tx(ni, target.nid);
@@ -1635,16 +1639,16 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
if (payload_kiov)
lnet_copy_kiov2flat(IBLND_MSG_SIZE, ibmsg,
- offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
+ offsetof(struct kib_msg, ibm_u.immediate.ibim_payload),
payload_niov, payload_kiov,
payload_offset, payload_nob);
else
lnet_copy_iov2flat(IBLND_MSG_SIZE, ibmsg,
- offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
+ offsetof(struct kib_msg, ibm_u.immediate.ibim_payload),
payload_niov, payload_iov,
payload_offset, payload_nob);
- nob = offsetof(kib_immediate_msg_t, ibim_payload[payload_nob]);
+ nob = offsetof(struct kib_immediate_msg, ibim_payload[payload_nob]);
kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob);
tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */
@@ -1653,7 +1657,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
}
static void
-kiblnd_reply(lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg)
+kiblnd_reply(lnet_ni_t *ni, struct kib_rx *rx, lnet_msg_t *lntmsg)
{
lnet_process_id_t target = lntmsg->msg_target;
unsigned int niov = lntmsg->msg_niov;
@@ -1661,7 +1665,7 @@ kiblnd_reply(lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg)
lnet_kiov_t *kiov = lntmsg->msg_kiov;
unsigned int offset = lntmsg->msg_offset;
unsigned int nob = lntmsg->msg_len;
- kib_tx_t *tx;
+ struct kib_tx *tx;
int rc;
tx = kiblnd_get_idle_tx(ni, rx->rx_conn->ibc_peer->ibp_nid);
@@ -1718,10 +1722,10 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
unsigned int offset, unsigned int mlen, unsigned int rlen)
{
- kib_rx_t *rx = private;
- kib_msg_t *rxmsg = rx->rx_msg;
- kib_conn_t *conn = rx->rx_conn;
- kib_tx_t *tx;
+ struct kib_rx *rx = private;
+ struct kib_msg *rxmsg = rx->rx_msg;
+ struct kib_conn *conn = rx->rx_conn;
+ struct kib_tx *tx;
int nob;
int post_credit = IBLND_POSTRX_PEER_CREDIT;
int rc = 0;
@@ -1736,7 +1740,7 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
LBUG();
case IBLND_MSG_IMMEDIATE:
- nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[rlen]);
+ nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[rlen]);
if (nob > rx->rx_nob) {
CERROR("Immediate message from %s too big: %d(%d)\n",
libcfs_nid2str(rxmsg->ibm_u.immediate.ibim_hdr.src_nid),
@@ -1748,19 +1752,19 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
if (kiov)
lnet_copy_flat2kiov(niov, kiov, offset,
IBLND_MSG_SIZE, rxmsg,
- offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
+ offsetof(struct kib_msg, ibm_u.immediate.ibim_payload),
mlen);
else
lnet_copy_flat2iov(niov, iov, offset,
IBLND_MSG_SIZE, rxmsg,
- offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
+ offsetof(struct kib_msg, ibm_u.immediate.ibim_payload),
mlen);
lnet_finalize(ni, lntmsg, 0);
break;
case IBLND_MSG_PUT_REQ: {
- kib_msg_t *txmsg;
- kib_rdma_desc_t *rd;
+ struct kib_msg *txmsg;
+ struct kib_rdma_desc *rd;
if (!mlen) {
lnet_finalize(ni, lntmsg, 0);
@@ -1796,7 +1800,7 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
break;
}
- nob = offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[rd->rd_nfrags]);
+ nob = offsetof(struct kib_putack_msg, ibpam_rd.rd_frags[rd->rd_nfrags]);
txmsg->ibm_u.putack.ibpam_src_cookie = rxmsg->ibm_u.putreq.ibprm_cookie;
txmsg->ibm_u.putack.ibpam_dst_cookie = tx->tx_cookie;
@@ -1847,7 +1851,7 @@ kiblnd_thread_fini(void)
}
static void
-kiblnd_peer_alive(kib_peer_t *peer)
+kiblnd_peer_alive(struct kib_peer *peer)
{
/* This is racy, but everyone's only writing cfs_time_current() */
peer->ibp_last_alive = cfs_time_current();
@@ -1855,7 +1859,7 @@ kiblnd_peer_alive(kib_peer_t *peer)
}
static void
-kiblnd_peer_notify(kib_peer_t *peer)
+kiblnd_peer_notify(struct kib_peer *peer)
{
int error = 0;
unsigned long last_alive = 0;
@@ -1878,7 +1882,7 @@ kiblnd_peer_notify(kib_peer_t *peer)
}
void
-kiblnd_close_conn_locked(kib_conn_t *conn, int error)
+kiblnd_close_conn_locked(struct kib_conn *conn, int error)
{
/*
* This just does the immediate housekeeping. 'error' is zero for a
@@ -1888,8 +1892,8 @@ kiblnd_close_conn_locked(kib_conn_t *conn, int error)
* already dealing with it (either to set it up or tear it down).
* Caller holds kib_global_lock exclusively in irq context
*/
- kib_peer_t *peer = conn->ibc_peer;
- kib_dev_t *dev;
+ struct kib_peer *peer = conn->ibc_peer;
+ struct kib_dev *dev;
unsigned long flags;
LASSERT(error || conn->ibc_state >= IBLND_CONN_ESTABLISHED);
@@ -1918,7 +1922,7 @@ kiblnd_close_conn_locked(kib_conn_t *conn, int error)
list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
}
- dev = ((kib_net_t *)peer->ibp_ni->ni_data)->ibn_dev;
+ dev = ((struct kib_net *)peer->ibp_ni->ni_data)->ibn_dev;
list_del(&conn->ibc_list);
/* connd (see below) takes over ibc_list's ref */
@@ -1948,7 +1952,7 @@ kiblnd_close_conn_locked(kib_conn_t *conn, int error)
}
void
-kiblnd_close_conn(kib_conn_t *conn, int error)
+kiblnd_close_conn(struct kib_conn *conn, int error)
{
unsigned long flags;
@@ -1960,11 +1964,11 @@ kiblnd_close_conn(kib_conn_t *conn, int error)
}
static void
-kiblnd_handle_early_rxs(kib_conn_t *conn)
+kiblnd_handle_early_rxs(struct kib_conn *conn)
{
unsigned long flags;
- kib_rx_t *rx;
- kib_rx_t *tmp;
+ struct kib_rx *rx;
+ struct kib_rx *tmp;
LASSERT(!in_interrupt());
LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
@@ -1982,17 +1986,17 @@ kiblnd_handle_early_rxs(kib_conn_t *conn)
}
static void
-kiblnd_abort_txs(kib_conn_t *conn, struct list_head *txs)
+kiblnd_abort_txs(struct kib_conn *conn, struct list_head *txs)
{
LIST_HEAD(zombies);
struct list_head *tmp;
struct list_head *nxt;
- kib_tx_t *tx;
+ struct kib_tx *tx;
spin_lock(&conn->ibc_lock);
list_for_each_safe(tmp, nxt, txs) {
- tx = list_entry(tmp, kib_tx_t, tx_list);
+ tx = list_entry(tmp, struct kib_tx, tx_list);
if (txs == &conn->ibc_active_txs) {
LASSERT(!tx->tx_queued);
@@ -2017,7 +2021,7 @@ kiblnd_abort_txs(kib_conn_t *conn, struct list_head *txs)
}
static void
-kiblnd_finalise_conn(kib_conn_t *conn)
+kiblnd_finalise_conn(struct kib_conn *conn)
{
LASSERT(!in_interrupt());
LASSERT(conn->ibc_state > IBLND_CONN_INIT);
@@ -2045,7 +2049,7 @@ kiblnd_finalise_conn(kib_conn_t *conn)
}
static void
-kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error)
+kiblnd_peer_connect_failed(struct kib_peer *peer, int active, int error)
{
LIST_HEAD(zombies);
unsigned long flags;
@@ -2099,11 +2103,11 @@ kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error)
}
static void
-kiblnd_connreq_done(kib_conn_t *conn, int status)
+kiblnd_connreq_done(struct kib_conn *conn, int status)
{
- kib_peer_t *peer = conn->ibc_peer;
- kib_tx_t *tx;
- kib_tx_t *tmp;
+ struct kib_peer *peer = conn->ibc_peer;
+ struct kib_tx *tx;
+ struct kib_tx *tmp;
struct list_head txs;
unsigned long flags;
int active;
@@ -2209,7 +2213,7 @@ kiblnd_connreq_done(kib_conn_t *conn, int status)
}
static void
-kiblnd_reject(struct rdma_cm_id *cmid, kib_rej_t *rej)
+kiblnd_reject(struct rdma_cm_id *cmid, struct kib_rej *rej)
{
int rc;
@@ -2223,17 +2227,17 @@ static int
kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
{
rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
- kib_msg_t *reqmsg = priv;
- kib_msg_t *ackmsg;
- kib_dev_t *ibdev;
- kib_peer_t *peer;
- kib_peer_t *peer2;
- kib_conn_t *conn;
+ struct kib_msg *reqmsg = priv;
+ struct kib_msg *ackmsg;
+ struct kib_dev *ibdev;
+ struct kib_peer *peer;
+ struct kib_peer *peer2;
+ struct kib_conn *conn;
lnet_ni_t *ni = NULL;
- kib_net_t *net = NULL;
+ struct kib_net *net = NULL;
lnet_nid_t nid;
struct rdma_conn_param cp;
- kib_rej_t rej;
+ struct kib_rej rej;
int version = IBLND_MSG_VERSION;
unsigned long flags;
int rc;
@@ -2242,7 +2246,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
LASSERT(!in_interrupt());
/* cmid inherits 'context' from the corresponding listener id */
- ibdev = (kib_dev_t *)cmid->context;
+ ibdev = (struct kib_dev *)cmid->context;
LASSERT(ibdev);
memset(&rej, 0, sizeof(rej));
@@ -2260,7 +2264,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
goto failed;
}
- if (priv_nob < offsetof(kib_msg_t, ibm_type)) {
+ if (priv_nob < offsetof(struct kib_msg, ibm_type)) {
CERROR("Short connection request\n");
goto failed;
}
@@ -2295,7 +2299,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
ni = lnet_net2ni(LNET_NIDNET(reqmsg->ibm_dstnid));
if (ni) {
- net = (kib_net_t *)ni->ni_data;
+ net = (struct kib_net *)ni->ni_data;
rej.ibr_incarnation = net->ibn_incarnation;
}
@@ -2534,11 +2538,11 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
}
static void
-kiblnd_check_reconnect(kib_conn_t *conn, int version,
- __u64 incarnation, int why, kib_connparams_t *cp)
+kiblnd_check_reconnect(struct kib_conn *conn, int version,
+ __u64 incarnation, int why, struct kib_connparams *cp)
{
rwlock_t *glock = &kiblnd_data.kib_global_lock;
- kib_peer_t *peer = conn->ibc_peer;
+ struct kib_peer *peer = conn->ibc_peer;
char *reason;
int msg_size = IBLND_MSG_SIZE;
int frag_num = -1;
@@ -2647,9 +2651,9 @@ out:
}
static void
-kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob)
+kiblnd_rejected(struct kib_conn *conn, int reason, void *priv, int priv_nob)
{
- kib_peer_t *peer = conn->ibc_peer;
+ struct kib_peer *peer = conn->ibc_peer;
LASSERT(!in_interrupt());
LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
@@ -2667,9 +2671,9 @@ kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob)
break;
case IB_CM_REJ_CONSUMER_DEFINED:
- if (priv_nob >= offsetof(kib_rej_t, ibr_padding)) {
- kib_rej_t *rej = priv;
- kib_connparams_t *cp = NULL;
+ if (priv_nob >= offsetof(struct kib_rej, ibr_padding)) {
+ struct kib_rej *rej = priv;
+ struct kib_connparams *cp = NULL;
int flip = 0;
__u64 incarnation = -1;
@@ -2692,7 +2696,7 @@ kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob)
flip = 1;
}
- if (priv_nob >= sizeof(kib_rej_t) &&
+ if (priv_nob >= sizeof(struct kib_rej) &&
rej->ibr_version > IBLND_MSG_VERSION_1) {
/*
* priv_nob is always 148 in current version
@@ -2775,12 +2779,12 @@ kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob)
}
static void
-kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob)
+kiblnd_check_connreply(struct kib_conn *conn, void *priv, int priv_nob)
{
- kib_peer_t *peer = conn->ibc_peer;
+ struct kib_peer *peer = conn->ibc_peer;
lnet_ni_t *ni = peer->ibp_ni;
- kib_net_t *net = ni->ni_data;
- kib_msg_t *msg = priv;
+ struct kib_net *net = ni->ni_data;
+ struct kib_msg *msg = priv;
int ver = conn->ibc_version;
int rc = kiblnd_unpack_msg(msg, priv_nob);
unsigned long flags;
@@ -2877,9 +2881,9 @@ kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob)
static int
kiblnd_active_connect(struct rdma_cm_id *cmid)
{
- kib_peer_t *peer = (kib_peer_t *)cmid->context;
- kib_conn_t *conn;
- kib_msg_t *msg;
+ struct kib_peer *peer = (struct kib_peer *)cmid->context;
+ struct kib_conn *conn;
+ struct kib_msg *msg;
struct rdma_conn_param cp;
int version;
__u64 incarnation;
@@ -2944,8 +2948,8 @@ kiblnd_active_connect(struct rdma_cm_id *cmid)
int
kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
{
- kib_peer_t *peer;
- kib_conn_t *conn;
+ struct kib_peer *peer;
+ struct kib_conn *conn;
int rc;
switch (event->event) {
@@ -2963,7 +2967,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
return rc;
case RDMA_CM_EVENT_ADDR_ERROR:
- peer = (kib_peer_t *)cmid->context;
+ peer = (struct kib_peer *)cmid->context;
CNETERR("%s: ADDR ERROR %d\n",
libcfs_nid2str(peer->ibp_nid), event->status);
kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH);
@@ -2971,7 +2975,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
return -EHOSTUNREACH; /* rc destroys cmid */
case RDMA_CM_EVENT_ADDR_RESOLVED:
- peer = (kib_peer_t *)cmid->context;
+ peer = (struct kib_peer *)cmid->context;
CDEBUG(D_NET, "%s Addr resolved: %d\n",
libcfs_nid2str(peer->ibp_nid), event->status);
@@ -2994,7 +2998,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
return rc; /* rc destroys cmid */
case RDMA_CM_EVENT_ROUTE_ERROR:
- peer = (kib_peer_t *)cmid->context;
+ peer = (struct kib_peer *)cmid->context;
CNETERR("%s: ROUTE ERROR %d\n",
libcfs_nid2str(peer->ibp_nid), event->status);
kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH);
@@ -3002,7 +3006,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
return -EHOSTUNREACH; /* rc destroys cmid */
case RDMA_CM_EVENT_ROUTE_RESOLVED:
- peer = (kib_peer_t *)cmid->context;
+ peer = (struct kib_peer *)cmid->context;
CDEBUG(D_NET, "%s Route resolved: %d\n",
libcfs_nid2str(peer->ibp_nid), event->status);
@@ -3016,7 +3020,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
return event->status; /* rc destroys cmid */
case RDMA_CM_EVENT_UNREACHABLE:
- conn = (kib_conn_t *)cmid->context;
+ conn = (struct kib_conn *)cmid->context;
LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
CNETERR("%s: UNREACHABLE %d\n",
@@ -3026,7 +3030,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
return 0;
case RDMA_CM_EVENT_CONNECT_ERROR:
- conn = (kib_conn_t *)cmid->context;
+ conn = (struct kib_conn *)cmid->context;
LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
CNETERR("%s: CONNECT ERROR %d\n",
@@ -3036,7 +3040,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
return 0;
case RDMA_CM_EVENT_REJECTED:
- conn = (kib_conn_t *)cmid->context;
+ conn = (struct kib_conn *)cmid->context;
switch (conn->ibc_state) {
default:
LBUG();
@@ -3058,7 +3062,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
return 0;
case RDMA_CM_EVENT_ESTABLISHED:
- conn = (kib_conn_t *)cmid->context;
+ conn = (struct kib_conn *)cmid->context;
switch (conn->ibc_state) {
default:
LBUG();
@@ -3084,7 +3088,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
CDEBUG(D_NET, "Ignore TIMEWAIT_EXIT event\n");
return 0;
case RDMA_CM_EVENT_DISCONNECTED:
- conn = (kib_conn_t *)cmid->context;
+ conn = (struct kib_conn *)cmid->context;
if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
CERROR("%s DISCONNECTED\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid));
@@ -3113,13 +3117,13 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
}
static int
-kiblnd_check_txs_locked(kib_conn_t *conn, struct list_head *txs)
+kiblnd_check_txs_locked(struct kib_conn *conn, struct list_head *txs)
{
- kib_tx_t *tx;
+ struct kib_tx *tx;
struct list_head *ttmp;
list_for_each(ttmp, txs) {
- tx = list_entry(ttmp, kib_tx_t, tx_list);
+ tx = list_entry(ttmp, struct kib_tx, tx_list);
if (txs != &conn->ibc_active_txs) {
LASSERT(tx->tx_queued);
@@ -3140,7 +3144,7 @@ kiblnd_check_txs_locked(kib_conn_t *conn, struct list_head *txs)
}
static int
-kiblnd_conn_timed_out_locked(kib_conn_t *conn)
+kiblnd_conn_timed_out_locked(struct kib_conn *conn)
{
return kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue) ||
kiblnd_check_txs_locked(conn, &conn->ibc_tx_noops) ||
@@ -3156,10 +3160,10 @@ kiblnd_check_conns(int idx)
LIST_HEAD(checksends);
struct list_head *peers = &kiblnd_data.kib_peers[idx];
struct list_head *ptmp;
- kib_peer_t *peer;
- kib_conn_t *conn;
- kib_conn_t *temp;
- kib_conn_t *tmp;
+ struct kib_peer *peer;
+ struct kib_conn *conn;
+ struct kib_conn *temp;
+ struct kib_conn *tmp;
struct list_head *ctmp;
unsigned long flags;
@@ -3171,13 +3175,13 @@ kiblnd_check_conns(int idx)
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
list_for_each(ptmp, peers) {
- peer = list_entry(ptmp, kib_peer_t, ibp_list);
+ peer = list_entry(ptmp, struct kib_peer, ibp_list);
list_for_each(ctmp, &peer->ibp_conns) {
int timedout;
int sendnoop;
- conn = list_entry(ctmp, kib_conn_t, ibc_list);
+ conn = list_entry(ctmp, struct kib_conn, ibc_list);
LASSERT(conn->ibc_state == IBLND_CONN_ESTABLISHED);
@@ -3235,7 +3239,7 @@ kiblnd_check_conns(int idx)
}
static void
-kiblnd_disconnect_conn(kib_conn_t *conn)
+kiblnd_disconnect_conn(struct kib_conn *conn)
{
LASSERT(!in_interrupt());
LASSERT(current == kiblnd_data.kib_connd);
@@ -3264,7 +3268,7 @@ kiblnd_connd(void *arg)
spinlock_t *lock= &kiblnd_data.kib_connd_lock;
wait_queue_t wait;
unsigned long flags;
- kib_conn_t *conn;
+ struct kib_conn *conn;
int timeout;
int i;
int dropped_lock;
@@ -3284,10 +3288,10 @@ kiblnd_connd(void *arg)
dropped_lock = 0;
if (!list_empty(&kiblnd_data.kib_connd_zombies)) {
- kib_peer_t *peer = NULL;
+ struct kib_peer *peer = NULL;
conn = list_entry(kiblnd_data.kib_connd_zombies.next,
- kib_conn_t, ibc_list);
+ struct kib_conn, ibc_list);
list_del(&conn->ibc_list);
if (conn->ibc_reconnect) {
peer = conn->ibc_peer;
@@ -3314,7 +3318,7 @@ kiblnd_connd(void *arg)
if (!list_empty(&kiblnd_data.kib_connd_conns)) {
conn = list_entry(kiblnd_data.kib_connd_conns.next,
- kib_conn_t, ibc_list);
+ struct kib_conn, ibc_list);
list_del(&conn->ibc_list);
spin_unlock_irqrestore(lock, flags);
@@ -3338,7 +3342,7 @@ kiblnd_connd(void *arg)
break;
conn = list_entry(kiblnd_data.kib_reconn_list.next,
- kib_conn_t, ibc_list);
+ struct kib_conn, ibc_list);
list_del(&conn->ibc_list);
spin_unlock_irqrestore(lock, flags);
@@ -3409,7 +3413,7 @@ kiblnd_connd(void *arg)
void
kiblnd_qp_event(struct ib_event *event, void *arg)
{
- kib_conn_t *conn = arg;
+ struct kib_conn *conn = arg;
switch (event->event) {
case IB_EVENT_COMM_EST:
@@ -3471,7 +3475,7 @@ kiblnd_cq_completion(struct ib_cq *cq, void *arg)
* occurred. But in this case, !ibc_nrx && !ibc_nsends_posted
* and this CQ is about to be destroyed so I NOOP.
*/
- kib_conn_t *conn = arg;
+ struct kib_conn *conn = arg;
struct kib_sched_info *sched = conn->ibc_sched;
unsigned long flags;
@@ -3498,7 +3502,7 @@ kiblnd_cq_completion(struct ib_cq *cq, void *arg)
void
kiblnd_cq_event(struct ib_event *event, void *arg)
{
- kib_conn_t *conn = arg;
+ struct kib_conn *conn = arg;
CERROR("%s: async CQ event type %d\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event);
@@ -3509,7 +3513,7 @@ kiblnd_scheduler(void *arg)
{
long id = (long)arg;
struct kib_sched_info *sched;
- kib_conn_t *conn;
+ struct kib_conn *conn;
wait_queue_t wait;
unsigned long flags;
struct ib_wc wc;
@@ -3544,7 +3548,7 @@ kiblnd_scheduler(void *arg)
did_something = 0;
if (!list_empty(&sched->ibs_conns)) {
- conn = list_entry(sched->ibs_conns.next, kib_conn_t,
+ conn = list_entry(sched->ibs_conns.next, struct kib_conn,
ibc_sched_list);
/* take over kib_sched_conns' ref on conn... */
LASSERT(conn->ibc_scheduled);
@@ -3644,7 +3648,7 @@ int
kiblnd_failover_thread(void *arg)
{
rwlock_t *glock = &kiblnd_data.kib_global_lock;
- kib_dev_t *dev;
+ struct kib_dev *dev;
wait_queue_t wait;
unsigned long flags;
int rc;
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
index f8fdd4ae3..44e960f60 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -145,7 +141,7 @@ static int use_privileged_port = 1;
module_param(use_privileged_port, int, 0644);
MODULE_PARM_DESC(use_privileged_port, "use privileged port when initiating connection");
-kib_tunables_t kiblnd_tunables = {
+struct kib_tunables kiblnd_tunables = {
.kib_dev_failover = &dev_failover,
.kib_service = &service,
.kib_cksum = &cksum,
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
index 406c0e7a5..07ec54094 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -44,14 +40,14 @@
#include "socklnd.h"
static lnd_t the_ksocklnd;
-ksock_nal_data_t ksocknal_data;
+struct ksock_nal_data ksocknal_data;
-static ksock_interface_t *
+static struct ksock_interface *
ksocknal_ip2iface(lnet_ni_t *ni, __u32 ip)
{
- ksock_net_t *net = ni->ni_data;
+ struct ksock_net *net = ni->ni_data;
int i;
- ksock_interface_t *iface;
+ struct ksock_interface *iface;
for (i = 0; i < net->ksnn_ninterfaces; i++) {
LASSERT(i < LNET_MAX_INTERFACES);
@@ -64,10 +60,10 @@ ksocknal_ip2iface(lnet_ni_t *ni, __u32 ip)
return NULL;
}
-static ksock_route_t *
+static struct ksock_route *
ksocknal_create_route(__u32 ipaddr, int port)
{
- ksock_route_t *route;
+ struct ksock_route *route;
LIBCFS_ALLOC(route, sizeof(*route));
if (!route)
@@ -89,7 +85,7 @@ ksocknal_create_route(__u32 ipaddr, int port)
}
void
-ksocknal_destroy_route(ksock_route_t *route)
+ksocknal_destroy_route(struct ksock_route *route)
{
LASSERT(!atomic_read(&route->ksnr_refcount));
@@ -100,11 +96,11 @@ ksocknal_destroy_route(ksock_route_t *route)
}
static int
-ksocknal_create_peer(ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id)
+ksocknal_create_peer(struct ksock_peer **peerp, lnet_ni_t *ni, lnet_process_id_t id)
{
int cpt = lnet_cpt_of_nid(id.nid);
- ksock_net_t *net = ni->ni_data;
- ksock_peer_t *peer;
+ struct ksock_net *net = ni->ni_data;
+ struct ksock_peer *peer;
LASSERT(id.nid != LNET_NID_ANY);
LASSERT(id.pid != LNET_PID_ANY);
@@ -148,9 +144,9 @@ ksocknal_create_peer(ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id)
}
void
-ksocknal_destroy_peer(ksock_peer_t *peer)
+ksocknal_destroy_peer(struct ksock_peer *peer)
{
- ksock_net_t *net = peer->ksnp_ni->ni_data;
+ struct ksock_net *net = peer->ksnp_ni->ni_data;
CDEBUG(D_NET, "peer %s %p deleted\n",
libcfs_id2str(peer->ksnp_id), peer);
@@ -175,15 +171,15 @@ ksocknal_destroy_peer(ksock_peer_t *peer)
spin_unlock_bh(&net->ksnn_lock);
}
-ksock_peer_t *
+struct ksock_peer *
ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id)
{
struct list_head *peer_list = ksocknal_nid2peerlist(id.nid);
struct list_head *tmp;
- ksock_peer_t *peer;
+ struct ksock_peer *peer;
list_for_each(tmp, peer_list) {
- peer = list_entry(tmp, ksock_peer_t, ksnp_list);
+ peer = list_entry(tmp, struct ksock_peer, ksnp_list);
LASSERT(!peer->ksnp_closing);
@@ -202,10 +198,10 @@ ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id)
return NULL;
}
-ksock_peer_t *
+struct ksock_peer *
ksocknal_find_peer(lnet_ni_t *ni, lnet_process_id_t id)
{
- ksock_peer_t *peer;
+ struct ksock_peer *peer;
read_lock(&ksocknal_data.ksnd_global_lock);
peer = ksocknal_find_peer_locked(ni, id);
@@ -217,11 +213,11 @@ ksocknal_find_peer(lnet_ni_t *ni, lnet_process_id_t id)
}
static void
-ksocknal_unlink_peer_locked(ksock_peer_t *peer)
+ksocknal_unlink_peer_locked(struct ksock_peer *peer)
{
int i;
__u32 ip;
- ksock_interface_t *iface;
+ struct ksock_interface *iface;
for (i = 0; i < peer->ksnp_n_passive_ips; i++) {
LASSERT(i < LNET_MAX_INTERFACES);
@@ -253,9 +249,9 @@ ksocknal_get_peer_info(lnet_ni_t *ni, int index,
lnet_process_id_t *id, __u32 *myip, __u32 *peer_ip,
int *port, int *conn_count, int *share_count)
{
- ksock_peer_t *peer;
+ struct ksock_peer *peer;
struct list_head *ptmp;
- ksock_route_t *route;
+ struct ksock_route *route;
struct list_head *rtmp;
int i;
int j;
@@ -265,7 +261,7 @@ ksocknal_get_peer_info(lnet_ni_t *ni, int index,
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
- peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
+ peer = list_entry(ptmp, struct ksock_peer, ksnp_list);
if (peer->ksnp_ni != ni)
continue;
@@ -303,7 +299,7 @@ ksocknal_get_peer_info(lnet_ni_t *ni, int index,
if (index-- > 0)
continue;
- route = list_entry(rtmp, ksock_route_t,
+ route = list_entry(rtmp, struct ksock_route,
ksnr_list);
*id = peer->ksnp_id;
@@ -323,11 +319,11 @@ ksocknal_get_peer_info(lnet_ni_t *ni, int index,
}
static void
-ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn)
+ksocknal_associate_route_conn_locked(struct ksock_route *route, struct ksock_conn *conn)
{
- ksock_peer_t *peer = route->ksnr_peer;
+ struct ksock_peer *peer = route->ksnr_peer;
int type = conn->ksnc_type;
- ksock_interface_t *iface;
+ struct ksock_interface *iface;
conn->ksnc_route = route;
ksocknal_route_addref(route);
@@ -369,11 +365,11 @@ ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn)
}
static void
-ksocknal_add_route_locked(ksock_peer_t *peer, ksock_route_t *route)
+ksocknal_add_route_locked(struct ksock_peer *peer, struct ksock_route *route)
{
struct list_head *tmp;
- ksock_conn_t *conn;
- ksock_route_t *route2;
+ struct ksock_conn *conn;
+ struct ksock_route *route2;
LASSERT(!peer->ksnp_closing);
LASSERT(!route->ksnr_peer);
@@ -383,7 +379,7 @@ ksocknal_add_route_locked(ksock_peer_t *peer, ksock_route_t *route)
/* LASSERT(unique) */
list_for_each(tmp, &peer->ksnp_routes) {
- route2 = list_entry(tmp, ksock_route_t, ksnr_list);
+ route2 = list_entry(tmp, struct ksock_route, ksnr_list);
if (route2->ksnr_ipaddr == route->ksnr_ipaddr) {
CERROR("Duplicate route %s %pI4h\n",
@@ -399,7 +395,7 @@ ksocknal_add_route_locked(ksock_peer_t *peer, ksock_route_t *route)
list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
list_for_each(tmp, &peer->ksnp_conns) {
- conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+ conn = list_entry(tmp, struct ksock_conn, ksnc_list);
if (conn->ksnc_ipaddr != route->ksnr_ipaddr)
continue;
@@ -410,11 +406,11 @@ ksocknal_add_route_locked(ksock_peer_t *peer, ksock_route_t *route)
}
static void
-ksocknal_del_route_locked(ksock_route_t *route)
+ksocknal_del_route_locked(struct ksock_route *route)
{
- ksock_peer_t *peer = route->ksnr_peer;
- ksock_interface_t *iface;
- ksock_conn_t *conn;
+ struct ksock_peer *peer = route->ksnr_peer;
+ struct ksock_interface *iface;
+ struct ksock_conn *conn;
struct list_head *ctmp;
struct list_head *cnxt;
@@ -422,7 +418,7 @@ ksocknal_del_route_locked(ksock_route_t *route)
/* Close associated conns */
list_for_each_safe(ctmp, cnxt, &peer->ksnp_conns) {
- conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
+ conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
if (conn->ksnc_route != route)
continue;
@@ -455,10 +451,10 @@ int
ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
{
struct list_head *tmp;
- ksock_peer_t *peer;
- ksock_peer_t *peer2;
- ksock_route_t *route;
- ksock_route_t *route2;
+ struct ksock_peer *peer;
+ struct ksock_peer *peer2;
+ struct ksock_route *route;
+ struct ksock_route *route2;
int rc;
if (id.nid == LNET_NID_ANY ||
@@ -479,7 +475,7 @@ ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
write_lock_bh(&ksocknal_data.ksnd_global_lock);
/* always called with a ref on ni, so shutdown can't have started */
- LASSERT(!((ksock_net_t *) ni->ni_data)->ksnn_shutdown);
+ LASSERT(!((struct ksock_net *)ni->ni_data)->ksnn_shutdown);
peer2 = ksocknal_find_peer_locked(ni, id);
if (peer2) {
@@ -493,7 +489,7 @@ ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
route2 = NULL;
list_for_each(tmp, &peer->ksnp_routes) {
- route2 = list_entry(tmp, ksock_route_t, ksnr_list);
+ route2 = list_entry(tmp, struct ksock_route, ksnr_list);
if (route2->ksnr_ipaddr == ipaddr)
break;
@@ -514,10 +510,10 @@ ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
}
static void
-ksocknal_del_peer_locked(ksock_peer_t *peer, __u32 ip)
+ksocknal_del_peer_locked(struct ksock_peer *peer, __u32 ip)
{
- ksock_conn_t *conn;
- ksock_route_t *route;
+ struct ksock_conn *conn;
+ struct ksock_route *route;
struct list_head *tmp;
struct list_head *nxt;
int nshared;
@@ -528,7 +524,7 @@ ksocknal_del_peer_locked(ksock_peer_t *peer, __u32 ip)
ksocknal_peer_addref(peer);
list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
- route = list_entry(tmp, ksock_route_t, ksnr_list);
+ route = list_entry(tmp, struct ksock_route, ksnr_list);
/* no match */
if (!(!ip || route->ksnr_ipaddr == ip))
@@ -541,7 +537,7 @@ ksocknal_del_peer_locked(ksock_peer_t *peer, __u32 ip)
nshared = 0;
list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
- route = list_entry(tmp, ksock_route_t, ksnr_list);
+ route = list_entry(tmp, struct ksock_route, ksnr_list);
nshared += route->ksnr_share_count;
}
@@ -551,7 +547,7 @@ ksocknal_del_peer_locked(ksock_peer_t *peer, __u32 ip)
* left
*/
list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
- route = list_entry(tmp, ksock_route_t, ksnr_list);
+ route = list_entry(tmp, struct ksock_route, ksnr_list);
/* we should only be removing auto-entries */
LASSERT(!route->ksnr_share_count);
@@ -559,7 +555,7 @@ ksocknal_del_peer_locked(ksock_peer_t *peer, __u32 ip)
}
list_for_each_safe(tmp, nxt, &peer->ksnp_conns) {
- conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+ conn = list_entry(tmp, struct ksock_conn, ksnc_list);
ksocknal_close_conn_locked(conn, 0);
}
@@ -575,7 +571,7 @@ ksocknal_del_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
LIST_HEAD(zombies);
struct list_head *ptmp;
struct list_head *pnxt;
- ksock_peer_t *peer;
+ struct ksock_peer *peer;
int lo;
int hi;
int i;
@@ -593,7 +589,7 @@ ksocknal_del_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
for (i = lo; i <= hi; i++) {
list_for_each_safe(ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) {
- peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
+ peer = list_entry(ptmp, struct ksock_peer, ksnp_list);
if (peer->ksnp_ni != ni)
continue;
@@ -628,12 +624,12 @@ ksocknal_del_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
return rc;
}
-static ksock_conn_t *
+static struct ksock_conn *
ksocknal_get_conn_by_idx(lnet_ni_t *ni, int index)
{
- ksock_peer_t *peer;
+ struct ksock_peer *peer;
struct list_head *ptmp;
- ksock_conn_t *conn;
+ struct ksock_conn *conn;
struct list_head *ctmp;
int i;
@@ -641,7 +637,7 @@ ksocknal_get_conn_by_idx(lnet_ni_t *ni, int index)
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
- peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
+ peer = list_entry(ptmp, struct ksock_peer, ksnp_list);
LASSERT(!peer->ksnp_closing);
@@ -652,7 +648,7 @@ ksocknal_get_conn_by_idx(lnet_ni_t *ni, int index)
if (index-- > 0)
continue;
- conn = list_entry(ctmp, ksock_conn_t,
+ conn = list_entry(ctmp, struct ksock_conn,
ksnc_list);
ksocknal_conn_addref(conn);
read_unlock(&ksocknal_data.ksnd_global_lock);
@@ -665,11 +661,11 @@ ksocknal_get_conn_by_idx(lnet_ni_t *ni, int index)
return NULL;
}
-static ksock_sched_t *
+static struct ksock_sched *
ksocknal_choose_scheduler_locked(unsigned int cpt)
{
struct ksock_sched_info *info = ksocknal_data.ksnd_sched_info[cpt];
- ksock_sched_t *sched;
+ struct ksock_sched *sched;
int i;
LASSERT(info->ksi_nthreads > 0);
@@ -691,7 +687,7 @@ ksocknal_choose_scheduler_locked(unsigned int cpt)
static int
ksocknal_local_ipvec(lnet_ni_t *ni, __u32 *ipaddrs)
{
- ksock_net_t *net = ni->ni_data;
+ struct ksock_net *net = ni->ni_data;
int i;
int nip;
@@ -719,7 +715,7 @@ ksocknal_local_ipvec(lnet_ni_t *ni, __u32 *ipaddrs)
}
static int
-ksocknal_match_peerip(ksock_interface_t *iface, __u32 *ips, int nips)
+ksocknal_match_peerip(struct ksock_interface *iface, __u32 *ips, int nips)
{
int best_netmatch = 0;
int best_xor = 0;
@@ -751,12 +747,12 @@ ksocknal_match_peerip(ksock_interface_t *iface, __u32 *ips, int nips)
}
static int
-ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips)
+ksocknal_select_ips(struct ksock_peer *peer, __u32 *peerips, int n_peerips)
{
rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
- ksock_net_t *net = peer->ksnp_ni->ni_data;
- ksock_interface_t *iface;
- ksock_interface_t *best_iface;
+ struct ksock_net *net = peer->ksnp_ni->ni_data;
+ struct ksock_interface *iface;
+ struct ksock_interface *best_iface;
int n_ips;
int i;
int j;
@@ -862,17 +858,17 @@ ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips)
}
static void
-ksocknal_create_routes(ksock_peer_t *peer, int port,
+ksocknal_create_routes(struct ksock_peer *peer, int port,
__u32 *peer_ipaddrs, int npeer_ipaddrs)
{
- ksock_route_t *newroute = NULL;
+ struct ksock_route *newroute = NULL;
rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
lnet_ni_t *ni = peer->ksnp_ni;
- ksock_net_t *net = ni->ni_data;
+ struct ksock_net *net = ni->ni_data;
struct list_head *rtmp;
- ksock_route_t *route;
- ksock_interface_t *iface;
- ksock_interface_t *best_iface;
+ struct ksock_route *route;
+ struct ksock_interface *iface;
+ struct ksock_interface *best_iface;
int best_netmatch;
int this_netmatch;
int best_nroutes;
@@ -919,7 +915,7 @@ ksocknal_create_routes(ksock_peer_t *peer, int port,
/* Already got a route? */
route = NULL;
list_for_each(rtmp, &peer->ksnp_routes) {
- route = list_entry(rtmp, ksock_route_t, ksnr_list);
+ route = list_entry(rtmp, struct ksock_route, ksnr_list);
if (route->ksnr_ipaddr == newroute->ksnr_ipaddr)
break;
@@ -941,7 +937,7 @@ ksocknal_create_routes(ksock_peer_t *peer, int port,
/* Using this interface already? */
list_for_each(rtmp, &peer->ksnp_routes) {
- route = list_entry(rtmp, ksock_route_t,
+ route = list_entry(rtmp, struct ksock_route,
ksnr_list);
if (route->ksnr_myipaddr == iface->ksni_ipaddr)
@@ -985,7 +981,7 @@ ksocknal_create_routes(ksock_peer_t *peer, int port,
int
ksocknal_accept(lnet_ni_t *ni, struct socket *sock)
{
- ksock_connreq_t *cr;
+ struct ksock_connreq *cr;
int rc;
__u32 peer_ip;
int peer_port;
@@ -1014,9 +1010,9 @@ ksocknal_accept(lnet_ni_t *ni, struct socket *sock)
}
static int
-ksocknal_connecting(ksock_peer_t *peer, __u32 ipaddr)
+ksocknal_connecting(struct ksock_peer *peer, __u32 ipaddr)
{
- ksock_route_t *route;
+ struct ksock_route *route;
list_for_each_entry(route, &peer->ksnp_routes, ksnr_list) {
if (route->ksnr_ipaddr == ipaddr)
@@ -1026,7 +1022,7 @@ ksocknal_connecting(ksock_peer_t *peer, __u32 ipaddr)
}
int
-ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
+ksocknal_create_conn(lnet_ni_t *ni, struct ksock_route *route,
struct socket *sock, int type)
{
rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
@@ -1034,15 +1030,15 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
lnet_process_id_t peerid;
struct list_head *tmp;
__u64 incarnation;
- ksock_conn_t *conn;
- ksock_conn_t *conn2;
- ksock_peer_t *peer = NULL;
- ksock_peer_t *peer2;
- ksock_sched_t *sched;
+ struct ksock_conn *conn;
+ struct ksock_conn *conn2;
+ struct ksock_peer *peer = NULL;
+ struct ksock_peer *peer2;
+ struct ksock_sched *sched;
ksock_hello_msg_t *hello;
int cpt;
- ksock_tx_t *tx;
- ksock_tx_t *txtmp;
+ struct ksock_tx *tx;
+ struct ksock_tx *txtmp;
int rc;
int active;
char *warn = NULL;
@@ -1150,7 +1146,7 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
write_lock_bh(global_lock);
/* called with a ref on ni, so shutdown can't have started */
- LASSERT(!((ksock_net_t *) ni->ni_data)->ksnn_shutdown);
+ LASSERT(!((struct ksock_net *)ni->ni_data)->ksnn_shutdown);
peer2 = ksocknal_find_peer_locked(ni, peerid);
if (!peer2) {
@@ -1233,7 +1229,7 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
*/
if (conn->ksnc_ipaddr != conn->ksnc_myipaddr) {
list_for_each(tmp, &peer->ksnp_conns) {
- conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
+ conn2 = list_entry(tmp, struct ksock_conn, ksnc_list);
if (conn2->ksnc_ipaddr != conn->ksnc_ipaddr ||
conn2->ksnc_myipaddr != conn->ksnc_myipaddr ||
@@ -1273,7 +1269,7 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
* continually create duplicate routes.
*/
list_for_each(tmp, &peer->ksnp_routes) {
- route = list_entry(tmp, ksock_route_t, ksnr_list);
+ route = list_entry(tmp, struct ksock_route, ksnr_list);
if (route->ksnr_ipaddr != conn->ksnc_ipaddr)
continue;
@@ -1432,16 +1428,16 @@ failed_0:
}
void
-ksocknal_close_conn_locked(ksock_conn_t *conn, int error)
+ksocknal_close_conn_locked(struct ksock_conn *conn, int error)
{
/*
* This just does the immmediate housekeeping, and queues the
* connection for the reaper to terminate.
* Caller holds ksnd_global_lock exclusively in irq context
*/
- ksock_peer_t *peer = conn->ksnc_peer;
- ksock_route_t *route;
- ksock_conn_t *conn2;
+ struct ksock_peer *peer = conn->ksnc_peer;
+ struct ksock_route *route;
+ struct ksock_conn *conn2;
struct list_head *tmp;
LASSERT(!peer->ksnp_error);
@@ -1459,7 +1455,7 @@ ksocknal_close_conn_locked(ksock_conn_t *conn, int error)
conn2 = NULL;
list_for_each(tmp, &peer->ksnp_conns) {
- conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
+ conn2 = list_entry(tmp, struct ksock_conn, ksnc_list);
if (conn2->ksnc_route == route &&
conn2->ksnc_type == conn->ksnc_type)
@@ -1484,7 +1480,7 @@ ksocknal_close_conn_locked(ksock_conn_t *conn, int error)
/* No more connections to this peer */
if (!list_empty(&peer->ksnp_tx_queue)) {
- ksock_tx_t *tx;
+ struct ksock_tx *tx;
LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
@@ -1524,7 +1520,7 @@ ksocknal_close_conn_locked(ksock_conn_t *conn, int error)
}
void
-ksocknal_peer_failed(ksock_peer_t *peer)
+ksocknal_peer_failed(struct ksock_peer *peer)
{
int notify = 0;
unsigned long last_alive = 0;
@@ -1552,12 +1548,12 @@ ksocknal_peer_failed(ksock_peer_t *peer)
}
void
-ksocknal_finalize_zcreq(ksock_conn_t *conn)
+ksocknal_finalize_zcreq(struct ksock_conn *conn)
{
- ksock_peer_t *peer = conn->ksnc_peer;
- ksock_tx_t *tx;
- ksock_tx_t *temp;
- ksock_tx_t *tmp;
+ struct ksock_peer *peer = conn->ksnc_peer;
+ struct ksock_tx *tx;
+ struct ksock_tx *temp;
+ struct ksock_tx *tmp;
LIST_HEAD(zlist);
/*
@@ -1589,7 +1585,7 @@ ksocknal_finalize_zcreq(ksock_conn_t *conn)
}
void
-ksocknal_terminate_conn(ksock_conn_t *conn)
+ksocknal_terminate_conn(struct ksock_conn *conn)
{
/*
* This gets called by the reaper (guaranteed thread context) to
@@ -1597,8 +1593,8 @@ ksocknal_terminate_conn(ksock_conn_t *conn)
* ksnc_refcount will eventually hit zero, and then the reaper will
* destroy it.
*/
- ksock_peer_t *peer = conn->ksnc_peer;
- ksock_sched_t *sched = conn->ksnc_scheduler;
+ struct ksock_peer *peer = conn->ksnc_peer;
+ struct ksock_sched *sched = conn->ksnc_scheduler;
int failed = 0;
LASSERT(conn->ksnc_closing);
@@ -1656,7 +1652,7 @@ ksocknal_terminate_conn(ksock_conn_t *conn)
}
void
-ksocknal_queue_zombie_conn(ksock_conn_t *conn)
+ksocknal_queue_zombie_conn(struct ksock_conn *conn)
{
/* Queue the conn for the reaper to destroy */
@@ -1670,7 +1666,7 @@ ksocknal_queue_zombie_conn(ksock_conn_t *conn)
}
void
-ksocknal_destroy_conn(ksock_conn_t *conn)
+ksocknal_destroy_conn(struct ksock_conn *conn)
{
unsigned long last_rcv;
@@ -1730,15 +1726,15 @@ ksocknal_destroy_conn(ksock_conn_t *conn)
}
int
-ksocknal_close_peer_conns_locked(ksock_peer_t *peer, __u32 ipaddr, int why)
+ksocknal_close_peer_conns_locked(struct ksock_peer *peer, __u32 ipaddr, int why)
{
- ksock_conn_t *conn;
+ struct ksock_conn *conn;
struct list_head *ctmp;
struct list_head *cnxt;
int count = 0;
list_for_each_safe(ctmp, cnxt, &peer->ksnp_conns) {
- conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
+ conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
if (!ipaddr || conn->ksnc_ipaddr == ipaddr) {
count++;
@@ -1750,9 +1746,9 @@ ksocknal_close_peer_conns_locked(ksock_peer_t *peer, __u32 ipaddr, int why)
}
int
-ksocknal_close_conn_and_siblings(ksock_conn_t *conn, int why)
+ksocknal_close_conn_and_siblings(struct ksock_conn *conn, int why)
{
- ksock_peer_t *peer = conn->ksnc_peer;
+ struct ksock_peer *peer = conn->ksnc_peer;
__u32 ipaddr = conn->ksnc_ipaddr;
int count;
@@ -1768,7 +1764,7 @@ ksocknal_close_conn_and_siblings(ksock_conn_t *conn, int why)
int
ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr)
{
- ksock_peer_t *peer;
+ struct ksock_peer *peer;
struct list_head *ptmp;
struct list_head *pnxt;
int lo;
@@ -1789,7 +1785,7 @@ ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr)
for (i = lo; i <= hi; i++) {
list_for_each_safe(ptmp, pnxt,
&ksocknal_data.ksnd_peers[i]) {
- peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
+ peer = list_entry(ptmp, struct ksock_peer, ksnp_list);
if (!((id.nid == LNET_NID_ANY || id.nid == peer->ksnp_id.nid) &&
(id.pid == LNET_PID_ANY || id.pid == peer->ksnp_id.pid)))
@@ -1844,7 +1840,7 @@ ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
int connect = 1;
unsigned long last_alive = 0;
unsigned long now = cfs_time_current();
- ksock_peer_t *peer = NULL;
+ struct ksock_peer *peer = NULL;
rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
lnet_process_id_t id = {
.nid = nid,
@@ -1856,11 +1852,11 @@ ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
peer = ksocknal_find_peer_locked(ni, id);
if (peer) {
struct list_head *tmp;
- ksock_conn_t *conn;
+ struct ksock_conn *conn;
int bufnob;
list_for_each(tmp, &peer->ksnp_conns) {
- conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+ conn = list_entry(tmp, struct ksock_conn, ksnc_list);
bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
if (bufnob < conn->ksnc_tx_bufnob) {
@@ -1902,12 +1898,12 @@ ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
}
static void
-ksocknal_push_peer(ksock_peer_t *peer)
+ksocknal_push_peer(struct ksock_peer *peer)
{
int index;
int i;
struct list_head *tmp;
- ksock_conn_t *conn;
+ struct ksock_conn *conn;
for (index = 0; ; index++) {
read_lock(&ksocknal_data.ksnd_global_lock);
@@ -1917,7 +1913,7 @@ ksocknal_push_peer(ksock_peer_t *peer)
list_for_each(tmp, &peer->ksnp_conns) {
if (i++ == index) {
- conn = list_entry(tmp, ksock_conn_t,
+ conn = list_entry(tmp, struct ksock_conn,
ksnc_list);
ksocknal_conn_addref(conn);
break;
@@ -1954,7 +1950,7 @@ static int ksocknal_push(lnet_ni_t *ni, lnet_process_id_t id)
int peer_off; /* searching offset in peer hash table */
for (peer_off = 0; ; peer_off++) {
- ksock_peer_t *peer;
+ struct ksock_peer *peer;
int i = 0;
read_lock(&ksocknal_data.ksnd_global_lock);
@@ -1986,15 +1982,15 @@ static int ksocknal_push(lnet_ni_t *ni, lnet_process_id_t id)
static int
ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
{
- ksock_net_t *net = ni->ni_data;
- ksock_interface_t *iface;
+ struct ksock_net *net = ni->ni_data;
+ struct ksock_interface *iface;
int rc;
int i;
int j;
struct list_head *ptmp;
- ksock_peer_t *peer;
+ struct ksock_peer *peer;
struct list_head *rtmp;
- ksock_route_t *route;
+ struct ksock_route *route;
if (!ipaddress || !netmask)
return -EINVAL;
@@ -2017,7 +2013,7 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
- peer = list_entry(ptmp, ksock_peer_t,
+ peer = list_entry(ptmp, struct ksock_peer,
ksnp_list);
for (j = 0; j < peer->ksnp_n_passive_ips; j++)
@@ -2025,7 +2021,7 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
iface->ksni_npeers++;
list_for_each(rtmp, &peer->ksnp_routes) {
- route = list_entry(rtmp, ksock_route_t,
+ route = list_entry(rtmp, struct ksock_route,
ksnr_list);
if (route->ksnr_myipaddr == ipaddress)
@@ -2044,12 +2040,12 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
}
static void
-ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr)
+ksocknal_peer_del_interface_locked(struct ksock_peer *peer, __u32 ipaddr)
{
struct list_head *tmp;
struct list_head *nxt;
- ksock_route_t *route;
- ksock_conn_t *conn;
+ struct ksock_route *route;
+ struct ksock_conn *conn;
int i;
int j;
@@ -2063,7 +2059,7 @@ ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr)
}
list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
- route = list_entry(tmp, ksock_route_t, ksnr_list);
+ route = list_entry(tmp, struct ksock_route, ksnr_list);
if (route->ksnr_myipaddr != ipaddr)
continue;
@@ -2077,7 +2073,7 @@ ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr)
}
list_for_each_safe(tmp, nxt, &peer->ksnp_conns) {
- conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+ conn = list_entry(tmp, struct ksock_conn, ksnc_list);
if (conn->ksnc_myipaddr == ipaddr)
ksocknal_close_conn_locked(conn, 0);
@@ -2087,11 +2083,11 @@ ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr)
static int
ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress)
{
- ksock_net_t *net = ni->ni_data;
+ struct ksock_net *net = ni->ni_data;
int rc = -ENOENT;
struct list_head *tmp;
struct list_head *nxt;
- ksock_peer_t *peer;
+ struct ksock_peer *peer;
__u32 this_ip;
int i;
int j;
@@ -2115,7 +2111,7 @@ ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress)
for (j = 0; j < ksocknal_data.ksnd_peer_hash_size; j++) {
list_for_each_safe(tmp, nxt,
&ksocknal_data.ksnd_peers[j]) {
- peer = list_entry(tmp, ksock_peer_t, ksnp_list);
+ peer = list_entry(tmp, struct ksock_peer, ksnp_list);
if (peer->ksnp_ni != ni)
continue;
@@ -2139,8 +2135,8 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
switch (cmd) {
case IOC_LIBCFS_GET_INTERFACE: {
- ksock_net_t *net = ni->ni_data;
- ksock_interface_t *iface;
+ struct ksock_net *net = ni->ni_data;
+ struct ksock_interface *iface;
read_lock(&ksocknal_data.ksnd_global_lock);
@@ -2209,7 +2205,7 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
int txmem;
int rxmem;
int nagle;
- ksock_conn_t *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
+ struct ksock_conn *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
if (!conn)
return -ENOENT;
@@ -2284,8 +2280,8 @@ ksocknal_free_buffers(void)
if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
struct list_head zlist;
- ksock_tx_t *tx;
- ksock_tx_t *temp;
+ struct ksock_tx *tx;
+ struct ksock_tx *temp;
list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
@@ -2304,7 +2300,7 @@ static void
ksocknal_base_shutdown(void)
{
struct ksock_sched_info *info;
- ksock_sched_t *sched;
+ struct ksock_sched *sched;
int i;
int j;
@@ -2446,7 +2442,7 @@ ksocknal_base_startup(void)
goto failed;
cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
- ksock_sched_t *sched;
+ struct ksock_sched *sched;
int nthrs;
nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
@@ -2534,7 +2530,7 @@ ksocknal_base_startup(void)
static void
ksocknal_debug_peerhash(lnet_ni_t *ni)
{
- ksock_peer_t *peer = NULL;
+ struct ksock_peer *peer = NULL;
struct list_head *tmp;
int i;
@@ -2542,7 +2538,7 @@ ksocknal_debug_peerhash(lnet_ni_t *ni)
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
list_for_each(tmp, &ksocknal_data.ksnd_peers[i]) {
- peer = list_entry(tmp, ksock_peer_t, ksnp_list);
+ peer = list_entry(tmp, struct ksock_peer, ksnp_list);
if (peer->ksnp_ni == ni)
break;
@@ -2552,8 +2548,8 @@ ksocknal_debug_peerhash(lnet_ni_t *ni)
}
if (peer) {
- ksock_route_t *route;
- ksock_conn_t *conn;
+ struct ksock_route *route;
+ struct ksock_conn *conn;
CWARN("Active peer on shutdown: %s, ref %d, scnt %d, closing %d, accepting %d, err %d, zcookie %llu, txq %d, zc_req %d\n",
libcfs_id2str(peer->ksnp_id),
@@ -2565,7 +2561,7 @@ ksocknal_debug_peerhash(lnet_ni_t *ni)
!list_empty(&peer->ksnp_zc_req_list));
list_for_each(tmp, &peer->ksnp_routes) {
- route = list_entry(tmp, ksock_route_t, ksnr_list);
+ route = list_entry(tmp, struct ksock_route, ksnr_list);
CWARN("Route: ref %d, schd %d, conn %d, cnted %d, del %d\n",
atomic_read(&route->ksnr_refcount),
route->ksnr_scheduled, route->ksnr_connecting,
@@ -2573,7 +2569,7 @@ ksocknal_debug_peerhash(lnet_ni_t *ni)
}
list_for_each(tmp, &peer->ksnp_conns) {
- conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+ conn = list_entry(tmp, struct ksock_conn, ksnc_list);
CWARN("Conn: ref %d, sref %d, t %d, c %d\n",
atomic_read(&conn->ksnc_conn_refcount),
atomic_read(&conn->ksnc_sock_refcount),
@@ -2587,7 +2583,7 @@ ksocknal_debug_peerhash(lnet_ni_t *ni)
void
ksocknal_shutdown(lnet_ni_t *ni)
{
- ksock_net_t *net = ni->ni_data;
+ struct ksock_net *net = ni->ni_data;
int i;
lnet_process_id_t anyid = {0};
@@ -2637,7 +2633,7 @@ ksocknal_shutdown(lnet_ni_t *ni)
}
static int
-ksocknal_enumerate_interfaces(ksock_net_t *net)
+ksocknal_enumerate_interfaces(struct ksock_net *net)
{
char **names;
int i;
@@ -2694,7 +2690,7 @@ ksocknal_enumerate_interfaces(ksock_net_t *net)
}
static int
-ksocknal_search_new_ipif(ksock_net_t *net)
+ksocknal_search_new_ipif(struct ksock_net *net)
{
int new_ipif = 0;
int i;
@@ -2703,7 +2699,7 @@ ksocknal_search_new_ipif(ksock_net_t *net)
char *ifnam = &net->ksnn_interfaces[i].ksni_name[0];
char *colon = strchr(ifnam, ':');
int found = 0;
- ksock_net_t *tmp;
+ struct ksock_net *tmp;
int j;
if (colon) /* ignore alias device */
@@ -2760,7 +2756,7 @@ ksocknal_start_schedulers(struct ksock_sched_info *info)
for (i = 0; i < nthrs; i++) {
long id;
char name[20];
- ksock_sched_t *sched;
+ struct ksock_sched *sched;
id = KSOCK_THREAD_ID(info->ksi_cpt, info->ksi_nthreads + i);
sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
@@ -2782,7 +2778,7 @@ ksocknal_start_schedulers(struct ksock_sched_info *info)
}
static int
-ksocknal_net_start_threads(ksock_net_t *net, __u32 *cpts, int ncpts)
+ksocknal_net_start_threads(struct ksock_net *net, __u32 *cpts, int ncpts)
{
int newif = ksocknal_search_new_ipif(net);
int rc;
@@ -2810,7 +2806,7 @@ ksocknal_net_start_threads(ksock_net_t *net, __u32 *cpts, int ncpts)
int
ksocknal_startup(lnet_ni_t *ni)
{
- ksock_net_t *net;
+ struct ksock_net *net;
int rc;
int i;
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
index a60d72f94..a56632b4e 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
@@ -77,8 +77,7 @@
struct ksock_sched_info;
-typedef struct /* per scheduler state */
-{
+struct ksock_sched { /* per scheduler state */
spinlock_t kss_lock; /* serialise */
struct list_head kss_rx_conns; /* conn waiting to be read */
struct list_head kss_tx_conns; /* conn waiting to be written */
@@ -89,13 +88,13 @@ typedef struct /* per scheduler state */
struct ksock_sched_info *kss_info; /* owner of it */
struct page *kss_rx_scratch_pgs[LNET_MAX_IOV];
struct kvec kss_scratch_iov[LNET_MAX_IOV];
-} ksock_sched_t;
+};
struct ksock_sched_info {
int ksi_nthreads_max; /* max allowed threads */
int ksi_nthreads; /* number of threads */
int ksi_cpt; /* CPT id */
- ksock_sched_t *ksi_scheds; /* array of schedulers */
+ struct ksock_sched *ksi_scheds; /* array of schedulers */
};
#define KSOCK_CPT_SHIFT 16
@@ -103,16 +102,15 @@ struct ksock_sched_info {
#define KSOCK_THREAD_CPT(id) ((id) >> KSOCK_CPT_SHIFT)
#define KSOCK_THREAD_SID(id) ((id) & ((1UL << KSOCK_CPT_SHIFT) - 1))
-typedef struct /* in-use interface */
-{
+struct ksock_interface { /* in-use interface */
__u32 ksni_ipaddr; /* interface's IP address */
__u32 ksni_netmask; /* interface's network mask */
int ksni_nroutes; /* # routes using (active) */
int ksni_npeers; /* # peers using (passive) */
char ksni_name[IFNAMSIZ]; /* interface name */
-} ksock_interface_t;
+};
-typedef struct {
+struct ksock_tunables {
int *ksnd_timeout; /* "stuck" socket timeout
* (seconds) */
int *ksnd_nscheds; /* # scheduler threads in each
@@ -155,24 +153,24 @@ typedef struct {
* Chelsio TOE) */
int *ksnd_zc_recv_min_nfrags; /* minimum # of fragments to
* enable ZC receive */
-} ksock_tunables_t;
+};
-typedef struct {
+struct ksock_net {
__u64 ksnn_incarnation; /* my epoch */
spinlock_t ksnn_lock; /* serialise */
struct list_head ksnn_list; /* chain on global list */
int ksnn_npeers; /* # peers */
int ksnn_shutdown; /* shutting down? */
int ksnn_ninterfaces; /* IP interfaces */
- ksock_interface_t ksnn_interfaces[LNET_MAX_INTERFACES];
-} ksock_net_t;
+ struct ksock_interface ksnn_interfaces[LNET_MAX_INTERFACES];
+};
/** connd timeout */
#define SOCKNAL_CONND_TIMEOUT 120
/** reserved thread for accepting & creating new connd */
#define SOCKNAL_CONND_RESV 1
-typedef struct {
+struct ksock_nal_data {
int ksnd_init; /* initialisation state
*/
int ksnd_nnets; /* # networks set up */
@@ -229,7 +227,7 @@ typedef struct {
spinlock_t ksnd_tx_lock; /* serialise, g_lock
* unsafe */
-} ksock_nal_data_t;
+};
#define SOCKNAL_INIT_NOTHING 0
#define SOCKNAL_INIT_DATA 1
@@ -250,8 +248,7 @@ struct ksock_peer; /* forward ref */
struct ksock_route; /* forward ref */
struct ksock_proto; /* forward ref */
-typedef struct /* transmit packet */
-{
+struct ksock_tx { /* transmit packet */
struct list_head tx_list; /* queue on conn for transmission etc
*/
struct list_head tx_zc_list; /* queue on peer for ZC request */
@@ -281,20 +278,20 @@ typedef struct /* transmit packet */
struct kvec iov[1]; /* virt hdr + payload */
} virt;
} tx_frags;
-} ksock_tx_t;
+};
-#define KSOCK_NOOP_TX_SIZE (offsetof(ksock_tx_t, tx_frags.paged.kiov[0]))
+#define KSOCK_NOOP_TX_SIZE (offsetof(struct ksock_tx, tx_frags.paged.kiov[0]))
-/* network zero copy callback descriptor embedded in ksock_tx_t */
+/* network zero copy callback descriptor embedded in struct ksock_tx */
/*
* space for the rx frag descriptors; we either read a single contiguous
* header, or up to LNET_MAX_IOV frags of payload of either type.
*/
-typedef union {
+union ksock_rxiovspace {
struct kvec iov[LNET_MAX_IOV];
lnet_kiov_t kiov[LNET_MAX_IOV];
-} ksock_rxiovspace_t;
+};
#define SOCKNAL_RX_KSM_HEADER 1 /* reading ksock message header */
#define SOCKNAL_RX_LNET_HEADER 2 /* reading lnet message header */
@@ -303,7 +300,7 @@ typedef union {
#define SOCKNAL_RX_LNET_PAYLOAD 5 /* reading lnet payload (to deliver here) */
#define SOCKNAL_RX_SLOP 6 /* skipping body */
-typedef struct ksock_conn {
+struct ksock_conn {
struct ksock_peer *ksnc_peer; /* owning peer */
struct ksock_route *ksnc_route; /* owning route */
struct list_head ksnc_list; /* stash on peer's conn list */
@@ -314,8 +311,8 @@ typedef struct ksock_conn {
* write_space() callback */
atomic_t ksnc_conn_refcount;/* conn refcount */
atomic_t ksnc_sock_refcount;/* sock refcount */
- ksock_sched_t *ksnc_scheduler; /* who schedules this connection
- */
+ struct ksock_sched *ksnc_scheduler; /* who schedules this connection
+ */
__u32 ksnc_myipaddr; /* my IP */
__u32 ksnc_ipaddr; /* peer's IP */
int ksnc_port; /* peer's port */
@@ -341,7 +338,7 @@ typedef struct ksock_conn {
struct kvec *ksnc_rx_iov; /* the iovec frags */
int ksnc_rx_nkiov; /* # page frags */
lnet_kiov_t *ksnc_rx_kiov; /* the page frags */
- ksock_rxiovspace_t ksnc_rx_iov_space; /* space for frag descriptors */
+ union ksock_rxiovspace ksnc_rx_iov_space; /* space for frag descriptors */
__u32 ksnc_rx_csum; /* partial checksum for incoming
* data */
void *ksnc_cookie; /* rx lnet_finalize passthru arg
@@ -357,7 +354,7 @@ typedef struct ksock_conn {
struct list_head ksnc_tx_list; /* where I enq waiting for output
* space */
struct list_head ksnc_tx_queue; /* packets waiting to be sent */
- ksock_tx_t *ksnc_tx_carrier; /* next TX that can carry a LNet
+ struct ksock_tx *ksnc_tx_carrier; /* next TX that can carry a LNet
* message or ZC-ACK */
unsigned long ksnc_tx_deadline; /* when (in jiffies) tx times out
*/
@@ -367,9 +364,9 @@ typedef struct ksock_conn {
int ksnc_tx_scheduled; /* being progressed */
unsigned long ksnc_tx_last_post; /* time stamp of the last posted
* TX */
-} ksock_conn_t;
+};
-typedef struct ksock_route {
+struct ksock_route {
struct list_head ksnr_list; /* chain on peer route list */
struct list_head ksnr_connd_list; /* chain on ksnr_connd_routes */
struct ksock_peer *ksnr_peer; /* owning peer */
@@ -389,11 +386,11 @@ typedef struct ksock_route {
unsigned int ksnr_share_count; /* created explicitly? */
int ksnr_conn_count; /* # conns established by this
* route */
-} ksock_route_t;
+};
#define SOCKNAL_KEEPALIVE_PING 1 /* cookie for keepalive ping */
-typedef struct ksock_peer {
+struct ksock_peer {
struct list_head ksnp_list; /* stash on global peer list */
unsigned long ksnp_last_alive; /* when (in jiffies) I was last
* alive */
@@ -420,49 +417,49 @@ typedef struct ksock_peer {
/* preferred local interfaces */
__u32 ksnp_passive_ips[LNET_MAX_INTERFACES];
-} ksock_peer_t;
+};
-typedef struct ksock_connreq {
+struct ksock_connreq {
struct list_head ksncr_list; /* stash on ksnd_connd_connreqs */
lnet_ni_t *ksncr_ni; /* chosen NI */
struct socket *ksncr_sock; /* accepted socket */
-} ksock_connreq_t;
+};
-extern ksock_nal_data_t ksocknal_data;
-extern ksock_tunables_t ksocknal_tunables;
+extern struct ksock_nal_data ksocknal_data;
+extern struct ksock_tunables ksocknal_tunables;
#define SOCKNAL_MATCH_NO 0 /* TX can't match type of connection */
#define SOCKNAL_MATCH_YES 1 /* TX matches type of connection */
#define SOCKNAL_MATCH_MAY 2 /* TX can be sent on the connection, but not
* preferred */
-typedef struct ksock_proto {
+struct ksock_proto {
/* version number of protocol */
int pro_version;
/* handshake function */
- int (*pro_send_hello)(ksock_conn_t *, ksock_hello_msg_t *);
+ int (*pro_send_hello)(struct ksock_conn *, ksock_hello_msg_t *);
/* handshake function */
- int (*pro_recv_hello)(ksock_conn_t *, ksock_hello_msg_t *, int);
+ int (*pro_recv_hello)(struct ksock_conn *, ksock_hello_msg_t *, int);
/* message pack */
- void (*pro_pack)(ksock_tx_t *);
+ void (*pro_pack)(struct ksock_tx *);
/* message unpack */
void (*pro_unpack)(ksock_msg_t *);
/* queue tx on the connection */
- ksock_tx_t *(*pro_queue_tx_msg)(ksock_conn_t *, ksock_tx_t *);
+ struct ksock_tx *(*pro_queue_tx_msg)(struct ksock_conn *, struct ksock_tx *);
/* queue ZC ack on the connection */
- int (*pro_queue_tx_zcack)(ksock_conn_t *, ksock_tx_t *, __u64);
+ int (*pro_queue_tx_zcack)(struct ksock_conn *, struct ksock_tx *, __u64);
/* handle ZC request */
- int (*pro_handle_zcreq)(ksock_conn_t *, __u64, int);
+ int (*pro_handle_zcreq)(struct ksock_conn *, __u64, int);
/* handle ZC ACK */
- int (*pro_handle_zcack)(ksock_conn_t *, __u64, __u64);
+ int (*pro_handle_zcack)(struct ksock_conn *, __u64, __u64);
/*
* msg type matches the connection type:
@@ -471,12 +468,12 @@ typedef struct ksock_proto {
* return MATCH_YES : matching type
* return MATCH_MAY : can be backup
*/
- int (*pro_match_tx)(ksock_conn_t *, ksock_tx_t *, int);
-} ksock_proto_t;
+ int (*pro_match_tx)(struct ksock_conn *, struct ksock_tx *, int);
+};
-extern ksock_proto_t ksocknal_protocol_v1x;
-extern ksock_proto_t ksocknal_protocol_v2x;
-extern ksock_proto_t ksocknal_protocol_v3x;
+extern struct ksock_proto ksocknal_protocol_v1x;
+extern struct ksock_proto ksocknal_protocol_v2x;
+extern struct ksock_proto ksocknal_protocol_v3x;
#define KSOCK_PROTO_V1_MAJOR LNET_PROTO_TCP_VERSION_MAJOR
#define KSOCK_PROTO_V1_MINOR LNET_PROTO_TCP_VERSION_MINOR
@@ -517,17 +514,17 @@ ksocknal_nid2peerlist(lnet_nid_t nid)
}
static inline void
-ksocknal_conn_addref(ksock_conn_t *conn)
+ksocknal_conn_addref(struct ksock_conn *conn)
{
LASSERT(atomic_read(&conn->ksnc_conn_refcount) > 0);
atomic_inc(&conn->ksnc_conn_refcount);
}
-void ksocknal_queue_zombie_conn(ksock_conn_t *conn);
-void ksocknal_finalize_zcreq(ksock_conn_t *conn);
+void ksocknal_queue_zombie_conn(struct ksock_conn *conn);
+void ksocknal_finalize_zcreq(struct ksock_conn *conn);
static inline void
-ksocknal_conn_decref(ksock_conn_t *conn)
+ksocknal_conn_decref(struct ksock_conn *conn)
{
LASSERT(atomic_read(&conn->ksnc_conn_refcount) > 0);
if (atomic_dec_and_test(&conn->ksnc_conn_refcount))
@@ -535,7 +532,7 @@ ksocknal_conn_decref(ksock_conn_t *conn)
}
static inline int
-ksocknal_connsock_addref(ksock_conn_t *conn)
+ksocknal_connsock_addref(struct ksock_conn *conn)
{
int rc = -ESHUTDOWN;
@@ -551,7 +548,7 @@ ksocknal_connsock_addref(ksock_conn_t *conn)
}
static inline void
-ksocknal_connsock_decref(ksock_conn_t *conn)
+ksocknal_connsock_decref(struct ksock_conn *conn)
{
LASSERT(atomic_read(&conn->ksnc_sock_refcount) > 0);
if (atomic_dec_and_test(&conn->ksnc_sock_refcount)) {
@@ -563,17 +560,17 @@ ksocknal_connsock_decref(ksock_conn_t *conn)
}
static inline void
-ksocknal_tx_addref(ksock_tx_t *tx)
+ksocknal_tx_addref(struct ksock_tx *tx)
{
LASSERT(atomic_read(&tx->tx_refcount) > 0);
atomic_inc(&tx->tx_refcount);
}
-void ksocknal_tx_prep(ksock_conn_t *, ksock_tx_t *tx);
-void ksocknal_tx_done(lnet_ni_t *ni, ksock_tx_t *tx);
+void ksocknal_tx_prep(struct ksock_conn *, struct ksock_tx *tx);
+void ksocknal_tx_done(lnet_ni_t *ni, struct ksock_tx *tx);
static inline void
-ksocknal_tx_decref(ksock_tx_t *tx)
+ksocknal_tx_decref(struct ksock_tx *tx)
{
LASSERT(atomic_read(&tx->tx_refcount) > 0);
if (atomic_dec_and_test(&tx->tx_refcount))
@@ -581,16 +578,16 @@ ksocknal_tx_decref(ksock_tx_t *tx)
}
static inline void
-ksocknal_route_addref(ksock_route_t *route)
+ksocknal_route_addref(struct ksock_route *route)
{
LASSERT(atomic_read(&route->ksnr_refcount) > 0);
atomic_inc(&route->ksnr_refcount);
}
-void ksocknal_destroy_route(ksock_route_t *route);
+void ksocknal_destroy_route(struct ksock_route *route);
static inline void
-ksocknal_route_decref(ksock_route_t *route)
+ksocknal_route_decref(struct ksock_route *route)
{
LASSERT(atomic_read(&route->ksnr_refcount) > 0);
if (atomic_dec_and_test(&route->ksnr_refcount))
@@ -598,16 +595,16 @@ ksocknal_route_decref(ksock_route_t *route)
}
static inline void
-ksocknal_peer_addref(ksock_peer_t *peer)
+ksocknal_peer_addref(struct ksock_peer *peer)
{
LASSERT(atomic_read(&peer->ksnp_refcount) > 0);
atomic_inc(&peer->ksnp_refcount);
}
-void ksocknal_destroy_peer(ksock_peer_t *peer);
+void ksocknal_destroy_peer(struct ksock_peer *peer);
static inline void
-ksocknal_peer_decref(ksock_peer_t *peer)
+ksocknal_peer_decref(struct ksock_peer *peer)
{
LASSERT(atomic_read(&peer->ksnp_refcount) > 0);
if (atomic_dec_and_test(&peer->ksnp_refcount))
@@ -625,71 +622,71 @@ int ksocknal_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
int ksocknal_accept(lnet_ni_t *ni, struct socket *sock);
int ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip, int port);
-ksock_peer_t *ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id);
-ksock_peer_t *ksocknal_find_peer(lnet_ni_t *ni, lnet_process_id_t id);
-void ksocknal_peer_failed(ksock_peer_t *peer);
-int ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
+struct ksock_peer *ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id);
+struct ksock_peer *ksocknal_find_peer(lnet_ni_t *ni, lnet_process_id_t id);
+void ksocknal_peer_failed(struct ksock_peer *peer);
+int ksocknal_create_conn(lnet_ni_t *ni, struct ksock_route *route,
struct socket *sock, int type);
-void ksocknal_close_conn_locked(ksock_conn_t *conn, int why);
-void ksocknal_terminate_conn(ksock_conn_t *conn);
-void ksocknal_destroy_conn(ksock_conn_t *conn);
-int ksocknal_close_peer_conns_locked(ksock_peer_t *peer,
+void ksocknal_close_conn_locked(struct ksock_conn *conn, int why);
+void ksocknal_terminate_conn(struct ksock_conn *conn);
+void ksocknal_destroy_conn(struct ksock_conn *conn);
+int ksocknal_close_peer_conns_locked(struct ksock_peer *peer,
__u32 ipaddr, int why);
-int ksocknal_close_conn_and_siblings(ksock_conn_t *conn, int why);
+int ksocknal_close_conn_and_siblings(struct ksock_conn *conn, int why);
int ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr);
-ksock_conn_t *ksocknal_find_conn_locked(ksock_peer_t *peer,
- ksock_tx_t *tx, int nonblk);
+struct ksock_conn *ksocknal_find_conn_locked(struct ksock_peer *peer,
+ struct ksock_tx *tx, int nonblk);
-int ksocknal_launch_packet(lnet_ni_t *ni, ksock_tx_t *tx,
+int ksocknal_launch_packet(lnet_ni_t *ni, struct ksock_tx *tx,
lnet_process_id_t id);
-ksock_tx_t *ksocknal_alloc_tx(int type, int size);
-void ksocknal_free_tx(ksock_tx_t *tx);
-ksock_tx_t *ksocknal_alloc_tx_noop(__u64 cookie, int nonblk);
-void ksocknal_next_tx_carrier(ksock_conn_t *conn);
-void ksocknal_queue_tx_locked(ksock_tx_t *tx, ksock_conn_t *conn);
+struct ksock_tx *ksocknal_alloc_tx(int type, int size);
+void ksocknal_free_tx(struct ksock_tx *tx);
+struct ksock_tx *ksocknal_alloc_tx_noop(__u64 cookie, int nonblk);
+void ksocknal_next_tx_carrier(struct ksock_conn *conn);
+void ksocknal_queue_tx_locked(struct ksock_tx *tx, struct ksock_conn *conn);
void ksocknal_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int error);
void ksocknal_notify(lnet_ni_t *ni, lnet_nid_t gw_nid, int alive);
void ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when);
int ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name);
void ksocknal_thread_fini(void);
-void ksocknal_launch_all_connections_locked(ksock_peer_t *peer);
-ksock_route_t *ksocknal_find_connectable_route_locked(ksock_peer_t *peer);
-ksock_route_t *ksocknal_find_connecting_route_locked(ksock_peer_t *peer);
-int ksocknal_new_packet(ksock_conn_t *conn, int skip);
+void ksocknal_launch_all_connections_locked(struct ksock_peer *peer);
+struct ksock_route *ksocknal_find_connectable_route_locked(struct ksock_peer *peer);
+struct ksock_route *ksocknal_find_connecting_route_locked(struct ksock_peer *peer);
+int ksocknal_new_packet(struct ksock_conn *conn, int skip);
int ksocknal_scheduler(void *arg);
int ksocknal_connd(void *arg);
int ksocknal_reaper(void *arg);
-int ksocknal_send_hello(lnet_ni_t *ni, ksock_conn_t *conn,
+int ksocknal_send_hello(lnet_ni_t *ni, struct ksock_conn *conn,
lnet_nid_t peer_nid, ksock_hello_msg_t *hello);
-int ksocknal_recv_hello(lnet_ni_t *ni, ksock_conn_t *conn,
+int ksocknal_recv_hello(lnet_ni_t *ni, struct ksock_conn *conn,
ksock_hello_msg_t *hello, lnet_process_id_t *id,
__u64 *incarnation);
-void ksocknal_read_callback(ksock_conn_t *conn);
-void ksocknal_write_callback(ksock_conn_t *conn);
-
-int ksocknal_lib_zc_capable(ksock_conn_t *conn);
-void ksocknal_lib_save_callback(struct socket *sock, ksock_conn_t *conn);
-void ksocknal_lib_set_callback(struct socket *sock, ksock_conn_t *conn);
-void ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn);
-void ksocknal_lib_push_conn(ksock_conn_t *conn);
-int ksocknal_lib_get_conn_addrs(ksock_conn_t *conn);
+void ksocknal_read_callback(struct ksock_conn *conn);
+void ksocknal_write_callback(struct ksock_conn *conn);
+
+int ksocknal_lib_zc_capable(struct ksock_conn *conn);
+void ksocknal_lib_save_callback(struct socket *sock, struct ksock_conn *conn);
+void ksocknal_lib_set_callback(struct socket *sock, struct ksock_conn *conn);
+void ksocknal_lib_reset_callback(struct socket *sock, struct ksock_conn *conn);
+void ksocknal_lib_push_conn(struct ksock_conn *conn);
+int ksocknal_lib_get_conn_addrs(struct ksock_conn *conn);
int ksocknal_lib_setup_sock(struct socket *so);
-int ksocknal_lib_send_iov(ksock_conn_t *conn, ksock_tx_t *tx);
-int ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx);
-void ksocknal_lib_eager_ack(ksock_conn_t *conn);
-int ksocknal_lib_recv_iov(ksock_conn_t *conn);
-int ksocknal_lib_recv_kiov(ksock_conn_t *conn);
-int ksocknal_lib_get_conn_tunables(ksock_conn_t *conn, int *txmem,
+int ksocknal_lib_send_iov(struct ksock_conn *conn, struct ksock_tx *tx);
+int ksocknal_lib_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx);
+void ksocknal_lib_eager_ack(struct ksock_conn *conn);
+int ksocknal_lib_recv_iov(struct ksock_conn *conn);
+int ksocknal_lib_recv_kiov(struct ksock_conn *conn);
+int ksocknal_lib_get_conn_tunables(struct ksock_conn *conn, int *txmem,
int *rxmem, int *nagle);
-void ksocknal_read_callback(ksock_conn_t *conn);
-void ksocknal_write_callback(ksock_conn_t *conn);
+void ksocknal_read_callback(struct ksock_conn *conn);
+void ksocknal_write_callback(struct ksock_conn *conn);
int ksocknal_tunables_init(void);
-void ksocknal_lib_csum_tx(ksock_tx_t *tx);
+void ksocknal_lib_csum_tx(struct ksock_tx *tx);
-int ksocknal_lib_memory_pressure(ksock_conn_t *conn);
+int ksocknal_lib_memory_pressure(struct ksock_conn *conn);
int ksocknal_lib_bind_thread_to_cpu(int id);
#endif /* _SOCKLND_SOCKLND_H_ */
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
index 976fd7892..303576d81 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
@@ -23,10 +23,10 @@
#include "socklnd.h"
-ksock_tx_t *
+struct ksock_tx *
ksocknal_alloc_tx(int type, int size)
{
- ksock_tx_t *tx = NULL;
+ struct ksock_tx *tx = NULL;
if (type == KSOCK_MSG_NOOP) {
LASSERT(size == KSOCK_NOOP_TX_SIZE);
@@ -36,7 +36,7 @@ ksocknal_alloc_tx(int type, int size)
if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
tx = list_entry(ksocknal_data.ksnd_idle_noop_txs. \
- next, ksock_tx_t, tx_list);
+ next, struct ksock_tx, tx_list);
LASSERT(tx->tx_desc_size == size);
list_del(&tx->tx_list);
}
@@ -61,10 +61,10 @@ ksocknal_alloc_tx(int type, int size)
return tx;
}
-ksock_tx_t *
+struct ksock_tx *
ksocknal_alloc_tx_noop(__u64 cookie, int nonblk)
{
- ksock_tx_t *tx;
+ struct ksock_tx *tx;
tx = ksocknal_alloc_tx(KSOCK_MSG_NOOP, KSOCK_NOOP_TX_SIZE);
if (!tx) {
@@ -87,7 +87,7 @@ ksocknal_alloc_tx_noop(__u64 cookie, int nonblk)
}
void
-ksocknal_free_tx(ksock_tx_t *tx)
+ksocknal_free_tx(struct ksock_tx *tx)
{
atomic_dec(&ksocknal_data.ksnd_nactive_txs);
@@ -104,7 +104,7 @@ ksocknal_free_tx(ksock_tx_t *tx)
}
static int
-ksocknal_send_iov(ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_send_iov(struct ksock_conn *conn, struct ksock_tx *tx)
{
struct kvec *iov = tx->tx_iov;
int nob;
@@ -126,7 +126,7 @@ ksocknal_send_iov(ksock_conn_t *conn, ksock_tx_t *tx)
do {
LASSERT(tx->tx_niov > 0);
- if (nob < (int) iov->iov_len) {
+ if (nob < (int)iov->iov_len) {
iov->iov_base = (void *)((char *)iov->iov_base + nob);
iov->iov_len -= nob;
return rc;
@@ -141,7 +141,7 @@ ksocknal_send_iov(ksock_conn_t *conn, ksock_tx_t *tx)
}
static int
-ksocknal_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx)
{
lnet_kiov_t *kiov = tx->tx_kiov;
int nob;
@@ -179,7 +179,7 @@ ksocknal_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx)
}
static int
-ksocknal_transmit(ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_transmit(struct ksock_conn *conn, struct ksock_tx *tx)
{
int rc;
int bufnob;
@@ -247,7 +247,7 @@ ksocknal_transmit(ksock_conn_t *conn, ksock_tx_t *tx)
}
static int
-ksocknal_recv_iov(ksock_conn_t *conn)
+ksocknal_recv_iov(struct ksock_conn *conn)
{
struct kvec *iov = conn->ksnc_rx_iov;
int nob;
@@ -294,7 +294,7 @@ ksocknal_recv_iov(ksock_conn_t *conn)
}
static int
-ksocknal_recv_kiov(ksock_conn_t *conn)
+ksocknal_recv_kiov(struct ksock_conn *conn)
{
lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
int nob;
@@ -326,7 +326,7 @@ ksocknal_recv_kiov(ksock_conn_t *conn)
do {
LASSERT(conn->ksnc_rx_nkiov > 0);
- if (nob < (int) kiov->kiov_len) {
+ if (nob < (int)kiov->kiov_len) {
kiov->kiov_offset += nob;
kiov->kiov_len -= nob;
return -EAGAIN;
@@ -341,7 +341,7 @@ ksocknal_recv_kiov(ksock_conn_t *conn)
}
static int
-ksocknal_receive(ksock_conn_t *conn)
+ksocknal_receive(struct ksock_conn *conn)
{
/*
* Return 1 on success, 0 on EOF, < 0 on error.
@@ -391,7 +391,7 @@ ksocknal_receive(ksock_conn_t *conn)
}
void
-ksocknal_tx_done(lnet_ni_t *ni, ksock_tx_t *tx)
+ksocknal_tx_done(lnet_ni_t *ni, struct ksock_tx *tx)
{
lnet_msg_t *lnetmsg = tx->tx_lnetmsg;
int rc = (!tx->tx_resid && !tx->tx_zc_aborted) ? 0 : -EIO;
@@ -412,10 +412,10 @@ ksocknal_tx_done(lnet_ni_t *ni, ksock_tx_t *tx)
void
ksocknal_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int error)
{
- ksock_tx_t *tx;
+ struct ksock_tx *tx;
while (!list_empty(txlist)) {
- tx = list_entry(txlist->next, ksock_tx_t, tx_list);
+ tx = list_entry(txlist->next, struct ksock_tx, tx_list);
if (error && tx->tx_lnetmsg) {
CNETERR("Deleting packet type %d len %d %s->%s\n",
@@ -435,10 +435,10 @@ ksocknal_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int error)
}
static void
-ksocknal_check_zc_req(ksock_tx_t *tx)
+ksocknal_check_zc_req(struct ksock_tx *tx)
{
- ksock_conn_t *conn = tx->tx_conn;
- ksock_peer_t *peer = conn->ksnc_peer;
+ struct ksock_conn *conn = tx->tx_conn;
+ struct ksock_peer *peer = conn->ksnc_peer;
/*
* Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx
@@ -482,9 +482,9 @@ ksocknal_check_zc_req(ksock_tx_t *tx)
}
static void
-ksocknal_uncheck_zc_req(ksock_tx_t *tx)
+ksocknal_uncheck_zc_req(struct ksock_tx *tx)
{
- ksock_peer_t *peer = tx->tx_conn->ksnc_peer;
+ struct ksock_peer *peer = tx->tx_conn->ksnc_peer;
LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
LASSERT(tx->tx_zc_capable);
@@ -508,7 +508,7 @@ ksocknal_uncheck_zc_req(ksock_tx_t *tx)
}
static int
-ksocknal_process_transmit(ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_process_transmit(struct ksock_conn *conn, struct ksock_tx *tx)
{
int rc;
@@ -583,7 +583,7 @@ ksocknal_process_transmit(ksock_conn_t *conn, ksock_tx_t *tx)
}
static void
-ksocknal_launch_connection_locked(ksock_route_t *route)
+ksocknal_launch_connection_locked(struct ksock_route *route)
{
/* called holding write lock on ksnd_global_lock */
@@ -604,9 +604,9 @@ ksocknal_launch_connection_locked(ksock_route_t *route)
}
void
-ksocknal_launch_all_connections_locked(ksock_peer_t *peer)
+ksocknal_launch_all_connections_locked(struct ksock_peer *peer)
{
- ksock_route_t *route;
+ struct ksock_route *route;
/* called holding write lock on ksnd_global_lock */
for (;;) {
@@ -619,18 +619,18 @@ ksocknal_launch_all_connections_locked(ksock_peer_t *peer)
}
}
-ksock_conn_t *
-ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk)
+struct ksock_conn *
+ksocknal_find_conn_locked(struct ksock_peer *peer, struct ksock_tx *tx, int nonblk)
{
struct list_head *tmp;
- ksock_conn_t *conn;
- ksock_conn_t *typed = NULL;
- ksock_conn_t *fallback = NULL;
+ struct ksock_conn *conn;
+ struct ksock_conn *typed = NULL;
+ struct ksock_conn *fallback = NULL;
int tnob = 0;
int fnob = 0;
list_for_each(tmp, &peer->ksnp_conns) {
- ksock_conn_t *c = list_entry(tmp, ksock_conn_t, ksnc_list);
+ struct ksock_conn *c = list_entry(tmp, struct ksock_conn, ksnc_list);
int nob = atomic_read(&c->ksnc_tx_nob) +
c->ksnc_sock->sk->sk_wmem_queued;
int rc;
@@ -677,7 +677,7 @@ ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk)
}
void
-ksocknal_tx_prep(ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_tx_prep(struct ksock_conn *conn, struct ksock_tx *tx)
{
conn->ksnc_proto->pro_pack(tx);
@@ -687,11 +687,11 @@ ksocknal_tx_prep(ksock_conn_t *conn, ksock_tx_t *tx)
}
void
-ksocknal_queue_tx_locked(ksock_tx_t *tx, ksock_conn_t *conn)
+ksocknal_queue_tx_locked(struct ksock_tx *tx, struct ksock_conn *conn)
{
- ksock_sched_t *sched = conn->ksnc_scheduler;
+ struct ksock_sched *sched = conn->ksnc_scheduler;
ksock_msg_t *msg = &tx->tx_msg;
- ksock_tx_t *ztx = NULL;
+ struct ksock_tx *ztx = NULL;
int bufnob = 0;
/*
@@ -784,15 +784,15 @@ ksocknal_queue_tx_locked(ksock_tx_t *tx, ksock_conn_t *conn)
spin_unlock_bh(&sched->kss_lock);
}
-ksock_route_t *
-ksocknal_find_connectable_route_locked(ksock_peer_t *peer)
+struct ksock_route *
+ksocknal_find_connectable_route_locked(struct ksock_peer *peer)
{
unsigned long now = cfs_time_current();
struct list_head *tmp;
- ksock_route_t *route;
+ struct ksock_route *route;
list_for_each(tmp, &peer->ksnp_routes) {
- route = list_entry(tmp, ksock_route_t, ksnr_list);
+ route = list_entry(tmp, struct ksock_route, ksnr_list);
LASSERT(!route->ksnr_connecting || route->ksnr_scheduled);
@@ -820,14 +820,14 @@ ksocknal_find_connectable_route_locked(ksock_peer_t *peer)
return NULL;
}
-ksock_route_t *
-ksocknal_find_connecting_route_locked(ksock_peer_t *peer)
+struct ksock_route *
+ksocknal_find_connecting_route_locked(struct ksock_peer *peer)
{
struct list_head *tmp;
- ksock_route_t *route;
+ struct ksock_route *route;
list_for_each(tmp, &peer->ksnp_routes) {
- route = list_entry(tmp, ksock_route_t, ksnr_list);
+ route = list_entry(tmp, struct ksock_route, ksnr_list);
LASSERT(!route->ksnr_connecting || route->ksnr_scheduled);
@@ -839,10 +839,10 @@ ksocknal_find_connecting_route_locked(ksock_peer_t *peer)
}
int
-ksocknal_launch_packet(lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
+ksocknal_launch_packet(lnet_ni_t *ni, struct ksock_tx *tx, lnet_process_id_t id)
{
- ksock_peer_t *peer;
- ksock_conn_t *conn;
+ struct ksock_peer *peer;
+ struct ksock_conn *conn;
rwlock_t *g_lock;
int retry;
int rc;
@@ -942,7 +942,7 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
unsigned int payload_offset = lntmsg->msg_offset;
unsigned int payload_nob = lntmsg->msg_len;
- ksock_tx_t *tx;
+ struct ksock_tx *tx;
int desc_size;
int rc;
@@ -960,10 +960,10 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
LASSERT(!in_interrupt());
if (payload_iov)
- desc_size = offsetof(ksock_tx_t,
+ desc_size = offsetof(struct ksock_tx,
tx_frags.virt.iov[1 + payload_niov]);
else
- desc_size = offsetof(ksock_tx_t,
+ desc_size = offsetof(struct ksock_tx,
tx_frags.paged.kiov[payload_niov]);
if (lntmsg->msg_vmflush)
@@ -1037,7 +1037,7 @@ ksocknal_thread_fini(void)
}
int
-ksocknal_new_packet(ksock_conn_t *conn, int nob_to_skip)
+ksocknal_new_packet(struct ksock_conn *conn, int nob_to_skip)
{
static char ksocknal_slop_buffer[4096];
@@ -1120,7 +1120,7 @@ ksocknal_new_packet(ksock_conn_t *conn, int nob_to_skip)
}
static int
-ksocknal_process_receive(ksock_conn_t *conn)
+ksocknal_process_receive(struct ksock_conn *conn)
{
lnet_hdr_t *lhdr;
lnet_process_id_t *id;
@@ -1328,8 +1328,8 @@ ksocknal_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
unsigned int offset, unsigned int mlen, unsigned int rlen)
{
- ksock_conn_t *conn = private;
- ksock_sched_t *sched = conn->ksnc_scheduler;
+ struct ksock_conn *conn = private;
+ struct ksock_sched *sched = conn->ksnc_scheduler;
LASSERT(mlen <= rlen);
LASSERT(niov <= LNET_MAX_IOV);
@@ -1382,7 +1382,7 @@ ksocknal_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
}
static inline int
-ksocknal_sched_cansleep(ksock_sched_t *sched)
+ksocknal_sched_cansleep(struct ksock_sched *sched)
{
int rc;
@@ -1399,9 +1399,9 @@ ksocknal_sched_cansleep(ksock_sched_t *sched)
int ksocknal_scheduler(void *arg)
{
struct ksock_sched_info *info;
- ksock_sched_t *sched;
- ksock_conn_t *conn;
- ksock_tx_t *tx;
+ struct ksock_sched *sched;
+ struct ksock_conn *conn;
+ struct ksock_tx *tx;
int rc;
int nloops = 0;
long id = (long)arg;
@@ -1426,7 +1426,7 @@ int ksocknal_scheduler(void *arg)
if (!list_empty(&sched->kss_rx_conns)) {
conn = list_entry(sched->kss_rx_conns.next,
- ksock_conn_t, ksnc_rx_list);
+ struct ksock_conn, ksnc_rx_list);
list_del(&conn->ksnc_rx_list);
LASSERT(conn->ksnc_rx_scheduled);
@@ -1481,7 +1481,7 @@ int ksocknal_scheduler(void *arg)
}
conn = list_entry(sched->kss_tx_conns.next,
- ksock_conn_t, ksnc_tx_list);
+ struct ksock_conn, ksnc_tx_list);
list_del(&conn->ksnc_tx_list);
LASSERT(conn->ksnc_tx_scheduled);
@@ -1489,7 +1489,7 @@ int ksocknal_scheduler(void *arg)
LASSERT(!list_empty(&conn->ksnc_tx_queue));
tx = list_entry(conn->ksnc_tx_queue.next,
- ksock_tx_t, tx_list);
+ struct ksock_tx, tx_list);
if (conn->ksnc_tx_carrier == tx)
ksocknal_next_tx_carrier(conn);
@@ -1575,9 +1575,9 @@ int ksocknal_scheduler(void *arg)
* Add connection to kss_rx_conns of scheduler
* and wakeup the scheduler.
*/
-void ksocknal_read_callback(ksock_conn_t *conn)
+void ksocknal_read_callback(struct ksock_conn *conn)
{
- ksock_sched_t *sched;
+ struct ksock_sched *sched;
sched = conn->ksnc_scheduler;
@@ -1600,9 +1600,9 @@ void ksocknal_read_callback(ksock_conn_t *conn)
* Add connection to kss_tx_conns of scheduler
* and wakeup the scheduler.
*/
-void ksocknal_write_callback(ksock_conn_t *conn)
+void ksocknal_write_callback(struct ksock_conn *conn)
{
- ksock_sched_t *sched;
+ struct ksock_sched *sched;
sched = conn->ksnc_scheduler;
@@ -1623,7 +1623,7 @@ void ksocknal_write_callback(ksock_conn_t *conn)
spin_unlock_bh(&sched->kss_lock);
}
-static ksock_proto_t *
+static struct ksock_proto *
ksocknal_parse_proto_version(ksock_hello_msg_t *hello)
{
__u32 version = 0;
@@ -1666,11 +1666,11 @@ ksocknal_parse_proto_version(ksock_hello_msg_t *hello)
}
int
-ksocknal_send_hello(lnet_ni_t *ni, ksock_conn_t *conn,
+ksocknal_send_hello(lnet_ni_t *ni, struct ksock_conn *conn,
lnet_nid_t peer_nid, ksock_hello_msg_t *hello)
{
/* CAVEAT EMPTOR: this byte flips 'ipaddrs' */
- ksock_net_t *net = (ksock_net_t *)ni->ni_data;
+ struct ksock_net *net = (struct ksock_net *)ni->ni_data;
LASSERT(hello->kshm_nips <= LNET_MAX_INTERFACES);
@@ -1704,7 +1704,7 @@ ksocknal_invert_type(int type)
}
int
-ksocknal_recv_hello(lnet_ni_t *ni, ksock_conn_t *conn,
+ksocknal_recv_hello(lnet_ni_t *ni, struct ksock_conn *conn,
ksock_hello_msg_t *hello, lnet_process_id_t *peerid,
__u64 *incarnation)
{
@@ -1718,7 +1718,7 @@ ksocknal_recv_hello(lnet_ni_t *ni, ksock_conn_t *conn,
int timeout;
int proto_match;
int rc;
- ksock_proto_t *proto;
+ struct ksock_proto *proto;
lnet_process_id_t recv_id;
/* socket type set on active connections - not set on passive */
@@ -1847,10 +1847,10 @@ ksocknal_recv_hello(lnet_ni_t *ni, ksock_conn_t *conn,
}
static int
-ksocknal_connect(ksock_route_t *route)
+ksocknal_connect(struct ksock_route *route)
{
LIST_HEAD(zombies);
- ksock_peer_t *peer = route->ksnr_peer;
+ struct ksock_peer *peer = route->ksnr_peer;
int type;
int wanted;
struct socket *sock;
@@ -1989,7 +1989,7 @@ ksocknal_connect(ksock_route_t *route)
if (!list_empty(&peer->ksnp_tx_queue) &&
!peer->ksnp_accepting &&
!ksocknal_find_connecting_route_locked(peer)) {
- ksock_conn_t *conn;
+ struct ksock_conn *conn;
/*
* ksnp_tx_queue is queued on a conn on successful
@@ -1997,7 +1997,7 @@ ksocknal_connect(ksock_route_t *route)
*/
if (!list_empty(&peer->ksnp_conns)) {
conn = list_entry(peer->ksnp_conns.next,
- ksock_conn_t, ksnc_list);
+ struct ksock_conn, ksnc_list);
LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
}
@@ -2131,10 +2131,10 @@ ksocknal_connd_check_stop(time64_t sec, long *timeout)
* Go through connd_routes queue looking for a route that we can process
* right now, @timeout_p can be updated if we need to come back later
*/
-static ksock_route_t *
+static struct ksock_route *
ksocknal_connd_get_route_locked(signed long *timeout_p)
{
- ksock_route_t *route;
+ struct ksock_route *route;
unsigned long now;
now = cfs_time_current();
@@ -2158,7 +2158,7 @@ int
ksocknal_connd(void *arg)
{
spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
- ksock_connreq_t *cr;
+ struct ksock_connreq *cr;
wait_queue_t wait;
int nloops = 0;
int cons_retry = 0;
@@ -2174,7 +2174,7 @@ ksocknal_connd(void *arg)
ksocknal_data.ksnd_connd_running++;
while (!ksocknal_data.ksnd_shuttingdown) {
- ksock_route_t *route = NULL;
+ struct ksock_route *route = NULL;
time64_t sec = ktime_get_real_seconds();
long timeout = MAX_SCHEDULE_TIMEOUT;
int dropped_lock = 0;
@@ -2192,8 +2192,8 @@ ksocknal_connd(void *arg)
if (!list_empty(&ksocknal_data.ksnd_connd_connreqs)) {
/* Connection accepted by the listener */
- cr = list_entry(ksocknal_data.ksnd_connd_connreqs. \
- next, ksock_connreq_t, ksncr_list);
+ cr = list_entry(ksocknal_data.ksnd_connd_connreqs.next,
+ struct ksock_connreq, ksncr_list);
list_del(&cr->ksncr_list);
spin_unlock_bh(connd_lock);
@@ -2267,17 +2267,17 @@ ksocknal_connd(void *arg)
return 0;
}
-static ksock_conn_t *
-ksocknal_find_timed_out_conn(ksock_peer_t *peer)
+static struct ksock_conn *
+ksocknal_find_timed_out_conn(struct ksock_peer *peer)
{
/* We're called with a shared lock on ksnd_global_lock */
- ksock_conn_t *conn;
+ struct ksock_conn *conn;
struct list_head *ctmp;
list_for_each(ctmp, &peer->ksnp_conns) {
int error;
- conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
+ conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
/* Don't need the {get,put}connsock dance to deref ksnc_sock */
LASSERT(!conn->ksnc_closing);
@@ -2351,10 +2351,10 @@ ksocknal_find_timed_out_conn(ksock_peer_t *peer)
}
static inline void
-ksocknal_flush_stale_txs(ksock_peer_t *peer)
+ksocknal_flush_stale_txs(struct ksock_peer *peer)
{
- ksock_tx_t *tx;
- ksock_tx_t *tmp;
+ struct ksock_tx *tx;
+ struct ksock_tx *tmp;
LIST_HEAD(stale_txs);
write_lock_bh(&ksocknal_data.ksnd_global_lock);
@@ -2374,12 +2374,12 @@ ksocknal_flush_stale_txs(ksock_peer_t *peer)
}
static int
-ksocknal_send_keepalive_locked(ksock_peer_t *peer)
+ksocknal_send_keepalive_locked(struct ksock_peer *peer)
__must_hold(&ksocknal_data.ksnd_global_lock)
{
- ksock_sched_t *sched;
- ksock_conn_t *conn;
- ksock_tx_t *tx;
+ struct ksock_sched *sched;
+ struct ksock_conn *conn;
+ struct ksock_tx *tx;
if (list_empty(&peer->ksnp_conns)) /* last_alive will be updated by create_conn */
return 0;
@@ -2440,9 +2440,9 @@ static void
ksocknal_check_peer_timeouts(int idx)
{
struct list_head *peers = &ksocknal_data.ksnd_peers[idx];
- ksock_peer_t *peer;
- ksock_conn_t *conn;
- ksock_tx_t *tx;
+ struct ksock_peer *peer;
+ struct ksock_conn *conn;
+ struct ksock_tx *tx;
again:
/*
@@ -2483,8 +2483,8 @@ ksocknal_check_peer_timeouts(int idx)
* holding only shared lock
*/
if (!list_empty(&peer->ksnp_tx_queue)) {
- ksock_tx_t *tx = list_entry(peer->ksnp_tx_queue.next,
- ksock_tx_t, tx_list);
+ struct ksock_tx *tx = list_entry(peer->ksnp_tx_queue.next,
+ struct ksock_tx, tx_list);
if (cfs_time_aftereq(cfs_time_current(),
tx->tx_deadline)) {
@@ -2518,7 +2518,7 @@ ksocknal_check_peer_timeouts(int idx)
}
tx = list_entry(peer->ksnp_zc_req_list.next,
- ksock_tx_t, tx_zc_list);
+ struct ksock_tx, tx_zc_list);
deadline = tx->tx_deadline;
resid = tx->tx_resid;
conn = tx->tx_conn;
@@ -2544,8 +2544,8 @@ int
ksocknal_reaper(void *arg)
{
wait_queue_t wait;
- ksock_conn_t *conn;
- ksock_sched_t *sched;
+ struct ksock_conn *conn;
+ struct ksock_sched *sched;
struct list_head enomem_conns;
int nenomem_conns;
long timeout;
@@ -2563,7 +2563,7 @@ ksocknal_reaper(void *arg)
while (!ksocknal_data.ksnd_shuttingdown) {
if (!list_empty(&ksocknal_data.ksnd_deathrow_conns)) {
conn = list_entry(ksocknal_data.ksnd_deathrow_conns.next,
- ksock_conn_t, ksnc_list);
+ struct ksock_conn, ksnc_list);
list_del(&conn->ksnc_list);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
@@ -2577,7 +2577,7 @@ ksocknal_reaper(void *arg)
if (!list_empty(&ksocknal_data.ksnd_zombie_conns)) {
conn = list_entry(ksocknal_data.ksnd_zombie_conns.next,
- ksock_conn_t, ksnc_list);
+ struct ksock_conn, ksnc_list);
list_del(&conn->ksnc_list);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
@@ -2599,7 +2599,7 @@ ksocknal_reaper(void *arg)
/* reschedule all the connections that stalled with ENOMEM... */
nenomem_conns = 0;
while (!list_empty(&enomem_conns)) {
- conn = list_entry(enomem_conns.next, ksock_conn_t,
+ conn = list_entry(enomem_conns.next, struct ksock_conn,
ksnc_tx_list);
list_del(&conn->ksnc_tx_list);
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
index 964b4e338..6a17757fc 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -37,7 +33,7 @@
#include "socklnd.h"
int
-ksocknal_lib_get_conn_addrs(ksock_conn_t *conn)
+ksocknal_lib_get_conn_addrs(struct ksock_conn *conn)
{
int rc = lnet_sock_getaddr(conn->ksnc_sock, 1, &conn->ksnc_ipaddr,
&conn->ksnc_port);
@@ -60,7 +56,7 @@ ksocknal_lib_get_conn_addrs(ksock_conn_t *conn)
}
int
-ksocknal_lib_zc_capable(ksock_conn_t *conn)
+ksocknal_lib_zc_capable(struct ksock_conn *conn)
{
int caps = conn->ksnc_sock->sk->sk_route_caps;
@@ -75,7 +71,7 @@ ksocknal_lib_zc_capable(ksock_conn_t *conn)
}
int
-ksocknal_lib_send_iov(ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_lib_send_iov(struct ksock_conn *conn, struct ksock_tx *tx)
{
struct socket *sock = conn->ksnc_sock;
int nob;
@@ -118,7 +114,7 @@ ksocknal_lib_send_iov(ksock_conn_t *conn, ksock_tx_t *tx)
}
int
-ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_lib_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx)
{
struct socket *sock = conn->ksnc_sock;
lnet_kiov_t *kiov = tx->tx_kiov;
@@ -187,7 +183,7 @@ ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx)
}
void
-ksocknal_lib_eager_ack(ksock_conn_t *conn)
+ksocknal_lib_eager_ack(struct ksock_conn *conn)
{
int opt = 1;
struct socket *sock = conn->ksnc_sock;
@@ -203,7 +199,7 @@ ksocknal_lib_eager_ack(ksock_conn_t *conn)
}
int
-ksocknal_lib_recv_iov(ksock_conn_t *conn)
+ksocknal_lib_recv_iov(struct ksock_conn *conn)
{
#if SOCKNAL_SINGLE_FRAG_RX
struct kvec scratch;
@@ -309,7 +305,7 @@ ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov,
}
int
-ksocknal_lib_recv_kiov(ksock_conn_t *conn)
+ksocknal_lib_recv_kiov(struct ksock_conn *conn)
{
#if SOCKNAL_SINGLE_FRAG_RX || !SOCKNAL_RISK_KMAP_DEADLOCK
struct kvec scratch;
@@ -393,7 +389,7 @@ ksocknal_lib_recv_kiov(ksock_conn_t *conn)
}
void
-ksocknal_lib_csum_tx(ksock_tx_t *tx)
+ksocknal_lib_csum_tx(struct ksock_tx *tx)
{
int i;
__u32 csum;
@@ -432,7 +428,7 @@ ksocknal_lib_csum_tx(ksock_tx_t *tx)
}
int
-ksocknal_lib_get_conn_tunables(ksock_conn_t *conn, int *txmem, int *rxmem, int *nagle)
+ksocknal_lib_get_conn_tunables(struct ksock_conn *conn, int *txmem, int *rxmem, int *nagle)
{
struct socket *sock = conn->ksnc_sock;
int len;
@@ -562,7 +558,7 @@ ksocknal_lib_setup_sock(struct socket *sock)
}
void
-ksocknal_lib_push_conn(ksock_conn_t *conn)
+ksocknal_lib_push_conn(struct ksock_conn *conn)
{
struct sock *sk;
struct tcp_sock *tp;
@@ -599,7 +595,7 @@ ksocknal_lib_push_conn(ksock_conn_t *conn)
static void
ksocknal_data_ready(struct sock *sk)
{
- ksock_conn_t *conn;
+ struct ksock_conn *conn;
/* interleave correctly with closing sockets... */
LASSERT(!in_irq());
@@ -619,7 +615,7 @@ ksocknal_data_ready(struct sock *sk)
static void
ksocknal_write_space(struct sock *sk)
{
- ksock_conn_t *conn;
+ struct ksock_conn *conn;
int wspace;
int min_wpace;
@@ -663,14 +659,14 @@ ksocknal_write_space(struct sock *sk)
}
void
-ksocknal_lib_save_callback(struct socket *sock, ksock_conn_t *conn)
+ksocknal_lib_save_callback(struct socket *sock, struct ksock_conn *conn)
{
conn->ksnc_saved_data_ready = sock->sk->sk_data_ready;
conn->ksnc_saved_write_space = sock->sk->sk_write_space;
}
void
-ksocknal_lib_set_callback(struct socket *sock, ksock_conn_t *conn)
+ksocknal_lib_set_callback(struct socket *sock, struct ksock_conn *conn)
{
sock->sk->sk_user_data = conn;
sock->sk->sk_data_ready = ksocknal_data_ready;
@@ -678,7 +674,7 @@ ksocknal_lib_set_callback(struct socket *sock, ksock_conn_t *conn)
}
void
-ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn)
+ksocknal_lib_reset_callback(struct socket *sock, struct ksock_conn *conn)
{
/*
* Remove conn's network callbacks.
@@ -697,10 +693,10 @@ ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn)
}
int
-ksocknal_lib_memory_pressure(ksock_conn_t *conn)
+ksocknal_lib_memory_pressure(struct ksock_conn *conn)
{
int rc = 0;
- ksock_sched_t *sched;
+ struct ksock_sched *sched;
sched = conn->ksnc_scheduler;
spin_lock_bh(&sched->kss_lock);
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c
index 6329cbe66..fc7eec83a 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c
@@ -139,7 +139,7 @@ module_param(protocol, int, 0644);
MODULE_PARM_DESC(protocol, "protocol version");
#endif
-ksock_tunables_t ksocknal_tunables;
+struct ksock_tunables ksocknal_tunables;
int ksocknal_tunables_init(void)
{
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
index 32cc31e4c..82e174f6d 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
@@ -38,8 +38,8 @@
* pro_match_tx() : Called holding glock
*/
-static ksock_tx_t *
-ksocknal_queue_tx_msg_v1(ksock_conn_t *conn, ksock_tx_t *tx_msg)
+static struct ksock_tx *
+ksocknal_queue_tx_msg_v1(struct ksock_conn *conn, struct ksock_tx *tx_msg)
{
/* V1.x, just enqueue it */
list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
@@ -47,9 +47,9 @@ ksocknal_queue_tx_msg_v1(ksock_conn_t *conn, ksock_tx_t *tx_msg)
}
void
-ksocknal_next_tx_carrier(ksock_conn_t *conn)
+ksocknal_next_tx_carrier(struct ksock_conn *conn)
{
- ksock_tx_t *tx = conn->ksnc_tx_carrier;
+ struct ksock_tx *tx = conn->ksnc_tx_carrier;
/* Called holding BH lock: conn->ksnc_scheduler->kss_lock */
LASSERT(!list_empty(&conn->ksnc_tx_queue));
@@ -66,10 +66,10 @@ ksocknal_next_tx_carrier(ksock_conn_t *conn)
}
static int
-ksocknal_queue_tx_zcack_v2(ksock_conn_t *conn,
- ksock_tx_t *tx_ack, __u64 cookie)
+ksocknal_queue_tx_zcack_v2(struct ksock_conn *conn,
+ struct ksock_tx *tx_ack, __u64 cookie)
{
- ksock_tx_t *tx = conn->ksnc_tx_carrier;
+ struct ksock_tx *tx = conn->ksnc_tx_carrier;
LASSERT(!tx_ack ||
tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP);
@@ -112,10 +112,10 @@ ksocknal_queue_tx_zcack_v2(ksock_conn_t *conn,
return 1;
}
-static ksock_tx_t *
-ksocknal_queue_tx_msg_v2(ksock_conn_t *conn, ksock_tx_t *tx_msg)
+static struct ksock_tx *
+ksocknal_queue_tx_msg_v2(struct ksock_conn *conn, struct ksock_tx *tx_msg)
{
- ksock_tx_t *tx = conn->ksnc_tx_carrier;
+ struct ksock_tx *tx = conn->ksnc_tx_carrier;
/*
* Enqueue tx_msg:
@@ -149,10 +149,10 @@ ksocknal_queue_tx_msg_v2(ksock_conn_t *conn, ksock_tx_t *tx_msg)
}
static int
-ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn,
- ksock_tx_t *tx_ack, __u64 cookie)
+ksocknal_queue_tx_zcack_v3(struct ksock_conn *conn,
+ struct ksock_tx *tx_ack, __u64 cookie)
{
- ksock_tx_t *tx;
+ struct ksock_tx *tx;
if (conn->ksnc_type != SOCKLND_CONN_ACK)
return ksocknal_queue_tx_zcack_v2(conn, tx_ack, cookie);
@@ -267,7 +267,7 @@ ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn,
}
static int
-ksocknal_match_tx(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk)
+ksocknal_match_tx(struct ksock_conn *conn, struct ksock_tx *tx, int nonblk)
{
int nob;
@@ -311,7 +311,7 @@ ksocknal_match_tx(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk)
}
static int
-ksocknal_match_tx_v3(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk)
+ksocknal_match_tx_v3(struct ksock_conn *conn, struct ksock_tx *tx, int nonblk)
{
int nob;
@@ -355,18 +355,18 @@ ksocknal_match_tx_v3(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk)
/* (Sink) handle incoming ZC request from sender */
static int
-ksocknal_handle_zcreq(ksock_conn_t *c, __u64 cookie, int remote)
+ksocknal_handle_zcreq(struct ksock_conn *c, __u64 cookie, int remote)
{
- ksock_peer_t *peer = c->ksnc_peer;
- ksock_conn_t *conn;
- ksock_tx_t *tx;
+ struct ksock_peer *peer = c->ksnc_peer;
+ struct ksock_conn *conn;
+ struct ksock_tx *tx;
int rc;
read_lock(&ksocknal_data.ksnd_global_lock);
conn = ksocknal_find_conn_locked(peer, NULL, !!remote);
if (conn) {
- ksock_sched_t *sched = conn->ksnc_scheduler;
+ struct ksock_sched *sched = conn->ksnc_scheduler;
LASSERT(conn->ksnc_proto->pro_queue_tx_zcack);
@@ -399,12 +399,12 @@ ksocknal_handle_zcreq(ksock_conn_t *c, __u64 cookie, int remote)
/* (Sender) handle ZC_ACK from sink */
static int
-ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2)
+ksocknal_handle_zcack(struct ksock_conn *conn, __u64 cookie1, __u64 cookie2)
{
- ksock_peer_t *peer = conn->ksnc_peer;
- ksock_tx_t *tx;
- ksock_tx_t *temp;
- ksock_tx_t *tmp;
+ struct ksock_peer *peer = conn->ksnc_peer;
+ struct ksock_tx *tx;
+ struct ksock_tx *temp;
+ struct ksock_tx *tmp;
LIST_HEAD(zlist);
int count;
@@ -446,7 +446,7 @@ ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2)
}
static int
-ksocknal_send_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello)
+ksocknal_send_hello_v1(struct ksock_conn *conn, ksock_hello_msg_t *hello)
{
struct socket *sock = conn->ksnc_sock;
lnet_hdr_t *hdr;
@@ -503,7 +503,7 @@ ksocknal_send_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello)
if (!hello->kshm_nips)
goto out;
- for (i = 0; i < (int) hello->kshm_nips; i++)
+ for (i = 0; i < (int)hello->kshm_nips; i++)
hello->kshm_ips[i] = __cpu_to_le32(hello->kshm_ips[i]);
rc = lnet_sock_write(sock, hello->kshm_ips,
@@ -521,7 +521,7 @@ out:
}
static int
-ksocknal_send_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello)
+ksocknal_send_hello_v2(struct ksock_conn *conn, ksock_hello_msg_t *hello)
{
struct socket *sock = conn->ksnc_sock;
int rc;
@@ -563,7 +563,7 @@ ksocknal_send_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello)
}
static int
-ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello,
+ksocknal_recv_hello_v1(struct ksock_conn *conn, ksock_hello_msg_t *hello,
int timeout)
{
struct socket *sock = conn->ksnc_sock;
@@ -622,7 +622,7 @@ ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello,
goto out;
}
- for (i = 0; i < (int) hello->kshm_nips; i++) {
+ for (i = 0; i < (int)hello->kshm_nips; i++) {
hello->kshm_ips[i] = __le32_to_cpu(hello->kshm_ips[i]);
if (!hello->kshm_ips[i]) {
@@ -639,7 +639,7 @@ out:
}
static int
-ksocknal_recv_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout)
+ksocknal_recv_hello_v2(struct ksock_conn *conn, ksock_hello_msg_t *hello, int timeout)
{
struct socket *sock = conn->ksnc_sock;
int rc;
@@ -690,7 +690,7 @@ ksocknal_recv_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout
return rc;
}
- for (i = 0; i < (int) hello->kshm_nips; i++) {
+ for (i = 0; i < (int)hello->kshm_nips; i++) {
if (conn->ksnc_flip)
__swab32s(&hello->kshm_ips[i]);
@@ -705,7 +705,7 @@ ksocknal_recv_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout
}
static void
-ksocknal_pack_msg_v1(ksock_tx_t *tx)
+ksocknal_pack_msg_v1(struct ksock_tx *tx)
{
/* V1.x has no KSOCK_MSG_NOOP */
LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
@@ -719,7 +719,7 @@ ksocknal_pack_msg_v1(ksock_tx_t *tx)
}
static void
-ksocknal_pack_msg_v2(ksock_tx_t *tx)
+ksocknal_pack_msg_v2(struct ksock_tx *tx)
{
tx->tx_iov[0].iov_base = &tx->tx_msg;
@@ -755,7 +755,7 @@ ksocknal_unpack_msg_v2(ksock_msg_t *msg)
return; /* Do nothing */
}
-ksock_proto_t ksocknal_protocol_v1x = {
+struct ksock_proto ksocknal_protocol_v1x = {
.pro_version = KSOCK_PROTO_V1,
.pro_send_hello = ksocknal_send_hello_v1,
.pro_recv_hello = ksocknal_recv_hello_v1,
@@ -768,7 +768,7 @@ ksock_proto_t ksocknal_protocol_v1x = {
.pro_match_tx = ksocknal_match_tx
};
-ksock_proto_t ksocknal_protocol_v2x = {
+struct ksock_proto ksocknal_protocol_v2x = {
.pro_version = KSOCK_PROTO_V2,
.pro_send_hello = ksocknal_send_hello_v2,
.pro_recv_hello = ksocknal_recv_hello_v2,
@@ -781,7 +781,7 @@ ksock_proto_t ksocknal_protocol_v2x = {
.pro_match_tx = ksocknal_match_tx
};
-ksock_proto_t ksocknal_protocol_v3x = {
+struct ksock_proto ksocknal_protocol_v3x = {
.pro_version = KSOCK_PROTO_V3,
.pro_send_hello = ksocknal_send_hello_v2,
.pro_recv_hello = ksocknal_recv_hello_v2,
diff --git a/drivers/staging/lustre/lnet/libcfs/debug.c b/drivers/staging/lustre/lnet/libcfs/debug.c
index 8c260c3d5..42b15a769 100644
--- a/drivers/staging/lustre/lnet/libcfs/debug.c
+++ b/drivers/staging/lustre/lnet/libcfs/debug.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -366,12 +362,12 @@ void libcfs_debug_dumplog(void)
* get to schedule()
*/
init_waitqueue_entry(&wait, current);
- set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&debug_ctlwq, &wait);
dumper = kthread_run(libcfs_debug_dumplog_thread,
(void *)(long)current_pid(),
"libcfs_debug_dumper");
+ set_current_state(TASK_INTERRUPTIBLE);
if (IS_ERR(dumper))
pr_err("LustreError: cannot start log dump thread: %ld\n",
PTR_ERR(dumper));
diff --git a/drivers/staging/lustre/lnet/libcfs/fail.c b/drivers/staging/lustre/lnet/libcfs/fail.c
index 086e690bd..9288ee08d 100644
--- a/drivers/staging/lustre/lnet/libcfs/fail.c
+++ b/drivers/staging/lustre/lnet/libcfs/fail.c
@@ -16,10 +16,6 @@
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see http://www.gnu.org/licenses
*
- * Please contact Oracle Corporation, Inc., 500 Oracle Parkway, Redwood Shores,
- * CA 94065 USA or visit www.oracle.com if you need additional information or
- * have any questions.
- *
* GPL HEADER END
*/
/*
diff --git a/drivers/staging/lustre/lnet/libcfs/hash.c b/drivers/staging/lustre/lnet/libcfs/hash.c
index cc45ed82b..23283b6e0 100644
--- a/drivers/staging/lustre/lnet/libcfs/hash.c
+++ b/drivers/staging/lustre/lnet/libcfs/hash.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_string.c b/drivers/staging/lustre/lnet/libcfs/libcfs_string.c
index 50ac1536d..fc697cdfc 100644
--- a/drivers/staging/lustre/lnet/libcfs/libcfs_string.c
+++ b/drivers/staging/lustre/lnet/libcfs/libcfs_string.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
index 84f9b7b47..5c0116ade 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
@@ -99,6 +99,7 @@ static int cfs_crypto_hash_alloc(enum cfs_crypto_hash_alg hash_alg,
(*type)->cht_size);
if (err != 0) {
+ ahash_request_free(*req);
crypto_free_ahash(tfm);
return err;
}
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-curproc.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-curproc.c
index 13d31e8a9..3e22cad18 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-curproc.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-curproc.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c
index 638e4b33d..435b784c5 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c
index 86f32ffc5..a6a76a681 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c
@@ -11,7 +11,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
*/
/*
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
index d89f71ee4..38308f8b6 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c
index bbe19a684..291d286ea 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c
index 91c2ae8f9..8b551d270 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/libcfs/module.c b/drivers/staging/lustre/lnet/libcfs/module.c
index f2d041118..86b4d25ca 100644
--- a/drivers/staging/lustre/lnet/libcfs/module.c
+++ b/drivers/staging/lustre/lnet/libcfs/module.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/libcfs/prng.c b/drivers/staging/lustre/lnet/libcfs/prng.c
index c75ae9a68..a9bdb284f 100644
--- a/drivers/staging/lustre/lnet/libcfs/prng.c
+++ b/drivers/staging/lustre/lnet/libcfs/prng.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.c b/drivers/staging/lustre/lnet/libcfs/tracefile.c
index 7739b9469..1c7efdfaf 100644
--- a/drivers/staging/lustre/lnet/libcfs/tracefile.c
+++ b/drivers/staging/lustre/lnet/libcfs/tracefile.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.h b/drivers/staging/lustre/lnet/libcfs/tracefile.h
index ac84e7f4c..d878676bc 100644
--- a/drivers/staging/lustre/lnet/libcfs/tracefile.h
+++ b/drivers/staging/lustre/lnet/libcfs/tracefile.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/libcfs/workitem.c b/drivers/staging/lustre/lnet/libcfs/workitem.c
index 92236ae59..e98c818a1 100644
--- a/drivers/staging/lustre/lnet/libcfs/workitem.c
+++ b/drivers/staging/lustre/lnet/libcfs/workitem.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/lnet/acceptor.c b/drivers/staging/lustre/lnet/lnet/acceptor.c
index 1452bb3ad..8c50c99d8 100644
--- a/drivers/staging/lustre/lnet/lnet/acceptor.c
+++ b/drivers/staging/lustre/lnet/lnet/acceptor.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/lnet/api-ni.c b/drivers/staging/lustre/lnet/lnet/api-ni.c
index fe0dbe746..346db892f 100644
--- a/drivers/staging/lustre/lnet/lnet/api-ni.c
+++ b/drivers/staging/lustre/lnet/lnet/api-ni.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -1677,7 +1673,7 @@ lnet_fill_ni_info(struct lnet_ni *ni, struct lnet_ioctl_config_data *config)
if (!ni || !config)
return;
- net_config = (struct lnet_ioctl_net_config *) config->cfg_bulk;
+ net_config = (struct lnet_ioctl_net_config *)config->cfg_bulk;
if (!net_config)
return;
diff --git a/drivers/staging/lustre/lnet/lnet/config.c b/drivers/staging/lustre/lnet/lnet/config.c
index 480cc9c6c..a72afdf68 100644
--- a/drivers/staging/lustre/lnet/lnet/config.c
+++ b/drivers/staging/lustre/lnet/lnet/config.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/lnet/lib-eq.c b/drivers/staging/lustre/lnet/lnet/lib-eq.c
index adbcadbab..d05c6cc79 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-eq.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-eq.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/lnet/lib-md.c b/drivers/staging/lustre/lnet/lnet/lib-md.c
index 75d31217b..1834bf7a2 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-md.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-md.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/lnet/lib-me.c b/drivers/staging/lustre/lnet/lnet/lib-me.c
index e671aed37..b430046dc 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-me.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-me.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c
index c5d5bedb3..e6d3b801d 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-move.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-move.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/lnet/lib-msg.c b/drivers/staging/lustre/lnet/lnet/lib-msg.c
index f879d7f28..910e106e2 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-msg.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-msg.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/lnet/lo.c b/drivers/staging/lustre/lnet/lnet/lo.c
index 468eda611..08402712a 100644
--- a/drivers/staging/lustre/lnet/lnet/lo.c
+++ b/drivers/staging/lustre/lnet/lnet/lo.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/lnet/module.c b/drivers/staging/lustre/lnet/lnet/module.c
index 246b5c141..4ffbd3e44 100644
--- a/drivers/staging/lustre/lnet/lnet/module.c
+++ b/drivers/staging/lustre/lnet/lnet/module.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -200,7 +196,7 @@ static int __init lnet_init(void)
* Have to schedule a separate thread to avoid deadlocking
* in modload
*/
- (void) kthread_run(lnet_configure, NULL, "lnet_initd");
+ (void)kthread_run(lnet_configure, NULL, "lnet_initd");
}
return 0;
diff --git a/drivers/staging/lustre/lnet/lnet/net_fault.c b/drivers/staging/lustre/lnet/lnet/net_fault.c
index 7d76f28d3..e4aceb71c 100644
--- a/drivers/staging/lustre/lnet/lnet/net_fault.c
+++ b/drivers/staging/lustre/lnet/lnet/net_fault.c
@@ -760,9 +760,7 @@ lnet_delay_rule_add(struct lnet_fault_attr *attr)
wait_event(delay_dd.dd_ctl_waitq, delay_dd.dd_running);
}
- init_timer(&rule->dl_timer);
- rule->dl_timer.function = delay_timer_cb;
- rule->dl_timer.data = (unsigned long)rule;
+ setup_timer(&rule->dl_timer, delay_timer_cb, (unsigned long)rule);
spin_lock_init(&rule->dl_lock);
INIT_LIST_HEAD(&rule->dl_msg_list);
diff --git a/drivers/staging/lustre/lnet/lnet/nidstrings.c b/drivers/staging/lustre/lnet/lnet/nidstrings.c
index ebf468fbc..a6d7a6159 100644
--- a/drivers/staging/lustre/lnet/lnet/nidstrings.c
+++ b/drivers/staging/lustre/lnet/lnet/nidstrings.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/lnet/peer.c b/drivers/staging/lustre/lnet/lnet/peer.c
index b026feebc..e8061916c 100644
--- a/drivers/staging/lustre/lnet/lnet/peer.c
+++ b/drivers/staging/lustre/lnet/lnet/peer.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/lnet/router.c b/drivers/staging/lustre/lnet/lnet/router.c
index b01dc424c..063543233 100644
--- a/drivers/staging/lustre/lnet/lnet/router.c
+++ b/drivers/staging/lustre/lnet/lnet/router.c
@@ -18,6 +18,7 @@
*/
#define DEBUG_SUBSYSTEM S_LNET
+#include <linux/completion.h>
#include "../../include/linux/lnet/lib-lnet.h"
#define LNET_NRB_TINY_MIN 512 /* min value for each CPT */
@@ -1065,7 +1066,7 @@ lnet_router_checker_start(void)
return -EINVAL;
}
- sema_init(&the_lnet.ln_rc_signal, 0);
+ init_completion(&the_lnet.ln_rc_signal);
rc = LNetEQAlloc(0, lnet_router_checker_event, &the_lnet.ln_rc_eqh);
if (rc) {
@@ -1079,7 +1080,7 @@ lnet_router_checker_start(void)
rc = PTR_ERR(task);
CERROR("Can't start router checker thread: %d\n", rc);
/* block until event callback signals exit */
- down(&the_lnet.ln_rc_signal);
+ wait_for_completion(&the_lnet.ln_rc_signal);
rc = LNetEQFree(the_lnet.ln_rc_eqh);
LASSERT(!rc);
the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
@@ -1112,7 +1113,7 @@ lnet_router_checker_stop(void)
wake_up(&the_lnet.ln_rc_waitq);
/* block until event callback signals exit */
- down(&the_lnet.ln_rc_signal);
+ wait_for_completion(&the_lnet.ln_rc_signal);
LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN);
rc = LNetEQFree(the_lnet.ln_rc_eqh);
@@ -1295,7 +1296,7 @@ rescan:
lnet_prune_rc_data(1); /* wait for UNLINK */
the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
- up(&the_lnet.ln_rc_signal);
+ complete(&the_lnet.ln_rc_signal);
/* The unlink event callback will signal final completion */
return 0;
}
diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
index a63d86c4c..13d0454e7 100644
--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/selftest/conctl.c b/drivers/staging/lustre/lnet/selftest/conctl.c
index 408c614b6..b786f8b4a 100644
--- a/drivers/staging/lustre/lnet/selftest/conctl.c
+++ b/drivers/staging/lustre/lnet/selftest/conctl.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c
index 6f6875811..1be3cad72 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.c
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.h b/drivers/staging/lustre/lnet/selftest/conrpc.h
index 90c3385a3..7ec6fc969 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.h
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/selftest/console.c b/drivers/staging/lustre/lnet/selftest/console.c
index a03e52d29..4c33621f0 100644
--- a/drivers/staging/lustre/lnet/selftest/console.c
+++ b/drivers/staging/lustre/lnet/selftest/console.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/selftest/console.h b/drivers/staging/lustre/lnet/selftest/console.h
index becd22e41..78b147732 100644
--- a/drivers/staging/lustre/lnet/selftest/console.h
+++ b/drivers/staging/lustre/lnet/selftest/console.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
index 30e4f71f1..c2f121f44 100644
--- a/drivers/staging/lustre/lnet/selftest/framework.c
+++ b/drivers/staging/lustre/lnet/selftest/framework.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/selftest/module.c b/drivers/staging/lustre/lnet/selftest/module.c
index cc046b1d4..71485f992 100644
--- a/drivers/staging/lustre/lnet/selftest/module.c
+++ b/drivers/staging/lustre/lnet/selftest/module.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
index ad26fe9dd..9331ca4e3 100644
--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c
index 3c45a7cfa..3b26d6eb4 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.c
+++ b/drivers/staging/lustre/lnet/selftest/rpc.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.h b/drivers/staging/lustre/lnet/selftest/rpc.h
index c9b904cad..4ab2ee264 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.h
+++ b/drivers/staging/lustre/lnet/selftest/rpc.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/selftest/selftest.h b/drivers/staging/lustre/lnet/selftest/selftest.h
index 4eac1c9e6..d033ac03d 100644
--- a/drivers/staging/lustre/lnet/selftest/selftest.h
+++ b/drivers/staging/lustre/lnet/selftest/selftest.h
@@ -15,12 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- * copy of GPLv2].
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/selftest/timer.c b/drivers/staging/lustre/lnet/selftest/timer.c
index b6c4aae00..dcd22580b 100644
--- a/drivers/staging/lustre/lnet/selftest/timer.c
+++ b/drivers/staging/lustre/lnet/selftest/timer.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lnet/selftest/timer.h b/drivers/staging/lustre/lnet/selftest/timer.h
index f1fbebd8a..441d6d6b4 100644
--- a/drivers/staging/lustre/lnet/selftest/timer.h
+++ b/drivers/staging/lustre/lnet/selftest/timer.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/Kconfig b/drivers/staging/lustre/lustre/Kconfig
index 8ac7cd4d6..9f5d75f16 100644
--- a/drivers/staging/lustre/lustre/Kconfig
+++ b/drivers/staging/lustre/lustre/Kconfig
@@ -54,9 +54,3 @@ config LUSTRE_TRANSLATE_ERRNOS
bool
depends on LUSTRE_FS && !X86
default y
-
-config LUSTRE_LLITE_LLOOP
- tristate "Lustre virtual block device"
- depends on LUSTRE_FS && BLOCK
- depends on !PPC_64K_PAGES && !ARM64_64K_PAGES && !MICROBLAZE_64K_PAGES && !PAGE_SIZE_64KB && !IA64_PAGE_SIZE_64KB && !PARISC_PAGE_SIZE_64KB
- default m
diff --git a/drivers/staging/lustre/lustre/fid/fid_internal.h b/drivers/staging/lustre/lustre/fid/fid_internal.h
index b79a81397..5c53773ec 100644
--- a/drivers/staging/lustre/lustre/fid/fid_internal.h
+++ b/drivers/staging/lustre/lustre/fid/fid_internal.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/fid/fid_lib.c b/drivers/staging/lustre/lustre/fid/fid_lib.c
index dd65159eb..99ae7eb67 100644
--- a/drivers/staging/lustre/lustre/fid/fid_lib.c
+++ b/drivers/staging/lustre/lustre/fid/fid_lib.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/fid/fid_request.c b/drivers/staging/lustre/lustre/fid/fid_request.c
index 3a4df6264..454744d25 100644
--- a/drivers/staging/lustre/lustre/fid/fid_request.c
+++ b/drivers/staging/lustre/lustre/fid/fid_request.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -98,8 +94,10 @@ static int seq_client_rpc(struct lu_client_seq *seq,
* request here, otherwise if MDT0 is failed(umounted),
* it can not release the export of MDT0
*/
- if (seq->lcs_type == LUSTRE_SEQ_DATA)
- req->rq_no_delay = req->rq_no_resend = 1;
+ if (seq->lcs_type == LUSTRE_SEQ_DATA) {
+ req->rq_no_delay = 1;
+ req->rq_no_resend = 1;
+ }
debug_mask = D_CONSOLE;
} else {
if (seq->lcs_type == LUSTRE_SEQ_METADATA) {
diff --git a/drivers/staging/lustre/lustre/fid/lproc_fid.c b/drivers/staging/lustre/lustre/fid/lproc_fid.c
index 1f0e78686..81b7ca9ea 100644
--- a/drivers/staging/lustre/lustre/fid/lproc_fid.c
+++ b/drivers/staging/lustre/lustre/fid/lproc_fid.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/fld/fld_cache.c b/drivers/staging/lustre/lustre/fld/fld_cache.c
index 5a04e99d9..0100a935f 100644
--- a/drivers/staging/lustre/lustre/fld/fld_cache.c
+++ b/drivers/staging/lustre/lustre/fld/fld_cache.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/fld/fld_internal.h b/drivers/staging/lustre/lustre/fld/fld_internal.h
index 75d6a4863..f0efe5b9f 100644
--- a/drivers/staging/lustre/lustre/fld/fld_internal.h
+++ b/drivers/staging/lustre/lustre/fld/fld_internal.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/fld/fld_request.c b/drivers/staging/lustre/lustre/fld/fld_request.c
index 304c0ec26..e59d626a1 100644
--- a/drivers/staging/lustre/lustre/fld/fld_request.c
+++ b/drivers/staging/lustre/lustre/fld/fld_request.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/fld/lproc_fld.c b/drivers/staging/lustre/lustre/fld/lproc_fld.c
index ca898befe..61ac42079 100644
--- a/drivers/staging/lustre/lustre/fld/lproc_fld.c
+++ b/drivers/staging/lustre/lustre/fld/lproc_fld.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/cl_object.h b/drivers/staging/lustre/lustre/include/cl_object.h
index d4c33dd11..3cd4a2577 100644
--- a/drivers/staging/lustre/lustre/include/cl_object.h
+++ b/drivers/staging/lustre/lustre/include/cl_object.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -2326,7 +2322,8 @@ void cl_lock_descr_print(const struct lu_env *env, void *cookie,
*/
struct cl_client_cache {
/**
- * # of users (OSCs)
+ * # of client cache refcount
+ * # of users (OSCs) + 2 (held by llite and lov)
*/
atomic_t ccc_users;
/**
@@ -2361,6 +2358,13 @@ struct cl_client_cache {
};
+/**
+ * cl_cache functions
+ */
+struct cl_client_cache *cl_cache_init(unsigned long lru_page_max);
+void cl_cache_incref(struct cl_client_cache *cache);
+void cl_cache_decref(struct cl_client_cache *cache);
+
/** @} cl_page */
/** \defgroup cl_lock cl_lock
diff --git a/drivers/staging/lustre/lustre/include/interval_tree.h b/drivers/staging/lustre/lustre/include/interval_tree.h
index f6df3f33e..4a15228b5 100644
--- a/drivers/staging/lustre/lustre/include/interval_tree.h
+++ b/drivers/staging/lustre/lustre/include/interval_tree.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h b/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
index 79d8f9307..1eb64ec4b 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_lite.h b/drivers/staging/lustre/lustre/include/linux/lustre_lite.h
index 3420cfd12..d18e8a76b 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_lite.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_lite.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h b/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
index c6c7f5463..5842cb18b 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_user.h b/drivers/staging/lustre/lustre/include/linux/lustre_user.h
index 9cc2849f3..e967950e8 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_user.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_user.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/lprocfs_status.h b/drivers/staging/lustre/lustre/include/lprocfs_status.h
index 4146c9c39..d68e60e7f 100644
--- a/drivers/staging/lustre/lustre/include/lprocfs_status.h
+++ b/drivers/staging/lustre/lustre/include/lprocfs_status.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/lu_object.h b/drivers/staging/lustre/lustre/include/lu_object.h
index 281651218..6e25c1bb6 100644
--- a/drivers/staging/lustre/lustre/include/lu_object.h
+++ b/drivers/staging/lustre/lustre/include/lu_object.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -783,7 +779,7 @@ do { \
if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
lu_object_print(env, &msgdata, lu_cdebug_printer, object);\
- CDEBUG(mask, format, ## __VA_ARGS__); \
+ CDEBUG(mask, format "\n", ## __VA_ARGS__); \
} \
} while (0)
diff --git a/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h b/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h
index 07d45de69..c2340d643 100644
--- a/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h
+++ b/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
index 9c53c1792..051864c23 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -386,7 +382,7 @@ static inline __u64 fid_ver_oid(const struct lu_fid *fid)
* used for other purposes and not risk collisions with existing inodes.
*
* Different FID Format
- * http://arch.lustre.org/index.php?title=Interoperability_fids_zfs#NEW.0
+ * http://wiki.old.lustre.org/index.php/Architecture_-_Interoperability_fids_zfs
*/
enum fid_seq {
FID_SEQ_OST_MDT0 = 0,
@@ -704,7 +700,7 @@ static inline int fid_set_id(struct lu_fid *fid, __u64 oid)
* be passed through unchanged. Only legacy OST objects in "group 0"
* will be mapped into the IDIF namespace so that they can fit into the
* struct lu_fid fields without loss. For reference see:
- * http://arch.lustre.org/index.php?title=Interoperability_fids_zfs
+ * http://wiki.old.lustre.org/index.php/Architecture_-_Interoperability_fids_zfs
*/
static inline int ostid_to_fid(struct lu_fid *fid, struct ost_id *ostid,
__u32 ost_idx)
@@ -1241,8 +1237,16 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
*/
#define OBD_CONNECT_ATTRFID 0x4000ULL /*Server can GetAttr By Fid*/
#define OBD_CONNECT_NODEVOH 0x8000ULL /*No open hndl on specl nodes*/
-#define OBD_CONNECT_RMT_CLIENT 0x10000ULL /*Remote client */
-#define OBD_CONNECT_RMT_CLIENT_FORCE 0x20000ULL /*Remote client by force */
+#define OBD_CONNECT_RMT_CLIENT 0x10000ULL /* Remote client, never used
+ * in production. Removed in
+ * 2.9. Keep this flag to
+ * avoid reuse.
+ */
+#define OBD_CONNECT_RMT_CLIENT_FORCE 0x20000ULL /* Remote client by force,
+ * never used in production.
+ * Removed in 2.9. Keep this
+ * flag to avoid reuse
+ */
#define OBD_CONNECT_BRW_SIZE 0x40000ULL /*Max bytes per rpc */
#define OBD_CONNECT_QUOTA64 0x80000ULL /*Not used since 2.4 */
#define OBD_CONNECT_MDS_CAPA 0x100000ULL /*MDS capability */
@@ -1703,7 +1707,7 @@ lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic)
#define OBD_MD_FLXATTRLS (0x0000002000000000ULL) /* xattr list */
#define OBD_MD_FLXATTRRM (0x0000004000000000ULL) /* xattr remove */
#define OBD_MD_FLACL (0x0000008000000000ULL) /* ACL */
-#define OBD_MD_FLRMTPERM (0x0000010000000000ULL) /* remote permission */
+/* OBD_MD_FLRMTPERM (0x0000010000000000ULL) remote perm, obsolete */
#define OBD_MD_FLMDSCAPA (0x0000020000000000ULL) /* MDS capability */
#define OBD_MD_FLOSSCAPA (0x0000040000000000ULL) /* OSS capability */
#define OBD_MD_FLCKSPLIT (0x0000080000000000ULL) /* Check split on server */
@@ -1715,10 +1719,10 @@ lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic)
*/
#define OBD_MD_FLOBJCOUNT (0x0000400000000000ULL) /* for multiple destroy */
-#define OBD_MD_FLRMTLSETFACL (0x0001000000000000ULL) /* lfs lsetfacl case */
-#define OBD_MD_FLRMTLGETFACL (0x0002000000000000ULL) /* lfs lgetfacl case */
-#define OBD_MD_FLRMTRSETFACL (0x0004000000000000ULL) /* lfs rsetfacl case */
-#define OBD_MD_FLRMTRGETFACL (0x0008000000000000ULL) /* lfs rgetfacl case */
+/* OBD_MD_FLRMTLSETFACL (0x0001000000000000ULL) lfs lsetfacl, obsolete */
+/* OBD_MD_FLRMTLGETFACL (0x0002000000000000ULL) lfs lgetfacl, obsolete */
+/* OBD_MD_FLRMTRSETFACL (0x0004000000000000ULL) lfs rsetfacl, obsolete */
+/* OBD_MD_FLRMTRGETFACL (0x0008000000000000ULL) lfs rgetfacl, obsolete */
#define OBD_MD_FLDATAVERSION (0x0010000000000000ULL) /* iversion sum */
#define OBD_MD_FLRELEASED (0x0020000000000000ULL) /* file released */
@@ -2159,26 +2163,8 @@ enum {
CFS_SETUID_PERM = 0x01,
CFS_SETGID_PERM = 0x02,
CFS_SETGRP_PERM = 0x04,
- CFS_RMTACL_PERM = 0x08,
- CFS_RMTOWN_PERM = 0x10
};
-/* inode access permission for remote user, the inode info are omitted,
- * for client knows them.
- */
-struct mdt_remote_perm {
- __u32 rp_uid;
- __u32 rp_gid;
- __u32 rp_fsuid;
- __u32 rp_fsuid_h;
- __u32 rp_fsgid;
- __u32 rp_fsgid_h;
- __u32 rp_access_perm; /* MAY_READ/WRITE/EXEC */
- __u32 rp_padding;
-};
-
-void lustre_swab_mdt_remote_perm(struct mdt_remote_perm *p);
-
struct mdt_rec_setattr {
__u32 sa_opcode;
__u32 sa_cap;
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
index 59ba48ac3..ef6f38ff3 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -215,7 +211,7 @@ struct ost_id {
#define IOC_OBD_STATFS _IOWR('f', 164, struct obd_statfs *)
#define IOC_LOV_GETINFO _IOWR('f', 165, struct lov_user_mds_data *)
#define LL_IOC_FLUSHCTX _IOW('f', 166, long)
-#define LL_IOC_RMTACL _IOW('f', 167, long)
+/* LL_IOC_RMTACL 167 obsolete */
#define LL_IOC_GETOBDCOUNT _IOR('f', 168, long)
#define LL_IOC_LLOOP_ATTACH _IOWR('f', 169, long)
#define LL_IOC_LLOOP_DETACH _IOWR('f', 170, long)
@@ -542,19 +538,6 @@ struct identity_downcall_data {
__u32 idd_groups[0];
};
-/* for non-mapped uid/gid */
-#define NOBODY_UID 99
-#define NOBODY_GID 99
-
-#define INVALID_ID (-1)
-
-enum {
- RMT_LSETFACL = 1,
- RMT_LGETFACL = 2,
- RMT_RSETFACL = 3,
- RMT_RGETFACL = 4
-};
-
/* lustre volatile file support
* file name header: .^L^S^T^R:volatile"
*/
diff --git a/drivers/staging/lustre/lustre/include/lustre_acl.h b/drivers/staging/lustre/lustre/include/lustre_acl.h
index aa4cfa7b7..fecabe139 100644
--- a/drivers/staging/lustre/lustre/include/lustre_acl.h
+++ b/drivers/staging/lustre/lustre/include/lustre_acl.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/lustre_cfg.h b/drivers/staging/lustre/lustre/include/lustre_cfg.h
index e229e91f7..95a0be13c 100644
--- a/drivers/staging/lustre/lustre/include/lustre_cfg.h
+++ b/drivers/staging/lustre/lustre/include/lustre_cfg.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/lustre_debug.h b/drivers/staging/lustre/lustre/include/lustre_debug.h
index 8a089413c..93c1bdaf7 100644
--- a/drivers/staging/lustre/lustre/include/lustre_debug.h
+++ b/drivers/staging/lustre/lustre/include/lustre_debug.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/lustre_disk.h b/drivers/staging/lustre/lustre/include/lustre_disk.h
index b36821ffb..888645874 100644
--- a/drivers/staging/lustre/lustre/include/lustre_disk.h
+++ b/drivers/staging/lustre/lustre/include/lustre_disk.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
index 9cade144f..60051a5cf 100644
--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -1077,7 +1073,7 @@ void ldlm_lock2handle(const struct ldlm_lock *lock,
struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *, __u64 flags);
void ldlm_cancel_callback(struct ldlm_lock *);
int ldlm_lock_remove_from_lru(struct ldlm_lock *);
-int ldlm_lock_set_data(struct lustre_handle *, void *);
+int ldlm_lock_set_data(const struct lustre_handle *lockh, void *data);
/**
* Obtain a lock reference by its handle.
@@ -1166,10 +1162,10 @@ do { \
struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
void ldlm_lock_put(struct ldlm_lock *lock);
void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc);
-void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode);
-int ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode);
-void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode);
-void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode);
+void ldlm_lock_addref(const struct lustre_handle *lockh, __u32 mode);
+int ldlm_lock_addref_try(const struct lustre_handle *lockh, __u32 mode);
+void ldlm_lock_decref(const struct lustre_handle *lockh, __u32 mode);
+void ldlm_lock_decref_and_cancel(const struct lustre_handle *lockh, __u32 mode);
void ldlm_lock_fail_match_locked(struct ldlm_lock *lock);
void ldlm_lock_allow_match(struct ldlm_lock *lock);
void ldlm_lock_allow_match_locked(struct ldlm_lock *lock);
@@ -1178,10 +1174,10 @@ enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
enum ldlm_type type, ldlm_policy_data_t *,
enum ldlm_mode mode, struct lustre_handle *,
int unref);
-enum ldlm_mode ldlm_revalidate_lock_handle(struct lustre_handle *lockh,
+enum ldlm_mode ldlm_revalidate_lock_handle(const struct lustre_handle *lockh,
__u64 *bits);
void ldlm_lock_cancel(struct ldlm_lock *lock);
-void ldlm_lock_dump_handle(int level, struct lustre_handle *);
+void ldlm_lock_dump_handle(int level, const struct lustre_handle *);
void ldlm_unlink_lock_skiplist(struct ldlm_lock *req);
/* resource.c */
@@ -1255,9 +1251,9 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
enum ldlm_type type, __u8 with_policy,
enum ldlm_mode mode,
__u64 *flags, void *lvb, __u32 lvb_len,
- struct lustre_handle *lockh, int rc);
+ const struct lustre_handle *lockh, int rc);
int ldlm_cli_update_pool(struct ptlrpc_request *req);
-int ldlm_cli_cancel(struct lustre_handle *lockh,
+int ldlm_cli_cancel(const struct lustre_handle *lockh,
enum ldlm_cancel_flags cancel_flags);
int ldlm_cli_cancel_unused(struct ldlm_namespace *, const struct ldlm_res_id *,
enum ldlm_cancel_flags flags, void *opaque);
diff --git a/drivers/staging/lustre/lustre/include/lustre_eacl.h b/drivers/staging/lustre/lustre/include/lustre_eacl.h
index 0b66593a9..d1039e1ff 100644
--- a/drivers/staging/lustre/lustre/include/lustre_eacl.h
+++ b/drivers/staging/lustre/lustre/include/lustre_eacl.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -70,17 +66,6 @@ typedef struct {
#define CFS_ACL_XATTR_COUNT(size, prefix) \
(((size) - sizeof(prefix ## _header)) / sizeof(prefix ## _entry))
-extern ext_acl_xattr_header *
-lustre_posix_acl_xattr_2ext(posix_acl_xattr_header *header, int size);
-extern int
-lustre_posix_acl_xattr_filter(posix_acl_xattr_header *header, size_t size,
- posix_acl_xattr_header **out);
-extern void
-lustre_ext_acl_xattr_free(ext_acl_xattr_header *header);
-extern ext_acl_xattr_header *
-lustre_acl_xattr_merge2ext(posix_acl_xattr_header *posix_header, int size,
- ext_acl_xattr_header *ext_header);
-
#endif /* CONFIG_FS_POSIX_ACL */
/** @} eacl */
diff --git a/drivers/staging/lustre/lustre/include/lustre_export.h b/drivers/staging/lustre/lustre/include/lustre_export.h
index 3014d27e6..6e7cc4689 100644
--- a/drivers/staging/lustre/lustre/include/lustre_export.h
+++ b/drivers/staging/lustre/lustre/include/lustre_export.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -180,19 +176,6 @@ static inline int exp_connect_lru_resize(struct obd_export *exp)
return !!(exp_connect_flags(exp) & OBD_CONNECT_LRU_RESIZE);
}
-static inline int exp_connect_rmtclient(struct obd_export *exp)
-{
- return !!(exp_connect_flags(exp) & OBD_CONNECT_RMT_CLIENT);
-}
-
-static inline int client_is_remote(struct obd_export *exp)
-{
- struct obd_import *imp = class_exp2cliimp(exp);
-
- return !!(imp->imp_connect_data.ocd_connect_flags &
- OBD_CONNECT_RMT_CLIENT);
-}
-
static inline int exp_connect_vbr(struct obd_export *exp)
{
return !!(exp_connect_flags(exp) & OBD_CONNECT_VBR);
diff --git a/drivers/staging/lustre/lustre/include/lustre_fid.h b/drivers/staging/lustre/lustre/include/lustre_fid.h
index 12e8b585c..743671a54 100644
--- a/drivers/staging/lustre/lustre/include/lustre_fid.h
+++ b/drivers/staging/lustre/lustre/include/lustre_fid.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -45,7 +41,7 @@
*
* @{
*
- * http://wiki.lustre.org/index.php/Architecture_-_Interoperability_fids_zfs
+ * http://wiki.old.lustre.org/index.php/Architecture_-_Interoperability_fids_zfs
* describes the FID namespace and interoperability requirements for FIDs.
* The important parts of that document are included here for reference.
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_fld.h b/drivers/staging/lustre/lustre/include/lustre_fld.h
index 4cf2b0e61..932410d3e 100644
--- a/drivers/staging/lustre/lustre/include/lustre_fld.h
+++ b/drivers/staging/lustre/lustre/include/lustre_fld.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/lustre_ha.h b/drivers/staging/lustre/lustre/include/lustre_ha.h
index 5488a698d..cde7ed702 100644
--- a/drivers/staging/lustre/lustre/include/lustre_ha.h
+++ b/drivers/staging/lustre/lustre/include/lustre_ha.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/lustre_handles.h b/drivers/staging/lustre/lustre/include/lustre_handles.h
index 27f169d2e..1a63a6b9e 100644
--- a/drivers/staging/lustre/lustre/include/lustre_handles.h
+++ b/drivers/staging/lustre/lustre/include/lustre_handles.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/lustre_import.h b/drivers/staging/lustre/lustre/include/lustre_import.h
index 8325c82b3..4445be7a5 100644
--- a/drivers/staging/lustre/lustre/include/lustre_import.h
+++ b/drivers/staging/lustre/lustre/include/lustre_import.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/lustre_intent.h b/drivers/staging/lustre/lustre/include/lustre_intent.h
index c491d52d8..ed2b6c674 100644
--- a/drivers/staging/lustre/lustre/include/lustre_intent.h
+++ b/drivers/staging/lustre/lustre/include/lustre_intent.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -38,7 +34,11 @@
#define LUSTRE_INTENT_H
/* intent IT_XXX are defined in lustre/include/obd.h */
-struct lustre_intent_data {
+
+struct lookup_intent {
+ int it_op;
+ int it_create_mode;
+ __u64 it_flags;
int it_disposition;
int it_status;
__u64 it_lock_handle;
@@ -46,17 +46,23 @@ struct lustre_intent_data {
int it_lock_mode;
int it_remote_lock_mode;
__u64 it_remote_lock_handle;
- void *it_data;
+ struct ptlrpc_request *it_request;
unsigned int it_lock_set:1;
};
-struct lookup_intent {
- int it_op;
- int it_create_mode;
- __u64 it_flags;
- union {
- struct lustre_intent_data lustre;
- } d;
-};
+static inline int it_disposition(struct lookup_intent *it, int flag)
+{
+ return it->it_disposition & flag;
+}
+
+static inline void it_set_disposition(struct lookup_intent *it, int flag)
+{
+ it->it_disposition |= flag;
+}
+
+static inline void it_clear_disposition(struct lookup_intent *it, int flag)
+{
+ it->it_disposition &= ~flag;
+}
#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_lib.h b/drivers/staging/lustre/lustre/include/lustre_lib.h
index 00b976766..06958f217 100644
--- a/drivers/staging/lustre/lustre/include/lustre_lib.h
+++ b/drivers/staging/lustre/lustre/include/lustre_lib.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/lustre_lite.h b/drivers/staging/lustre/lustre/include/lustre_lite.h
index fcc5ebbce..b16897702 100644
--- a/drivers/staging/lustre/lustre/include/lustre_lite.h
+++ b/drivers/staging/lustre/lustre/include/lustre_lite.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/lustre_log.h b/drivers/staging/lustre/lustre/include/lustre_log.h
index 49618e186..b96e02317 100644
--- a/drivers/staging/lustre/lustre/include/lustre_log.h
+++ b/drivers/staging/lustre/lustre/include/lustre_log.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/lustre_mdc.h b/drivers/staging/lustre/lustre/include/lustre_mdc.h
index f267ff8a6..fa62b95d3 100644
--- a/drivers/staging/lustre/lustre/include/lustre_mdc.h
+++ b/drivers/staging/lustre/lustre/include/lustre_mdc.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -189,9 +185,6 @@ struct mdc_cache_waiter {
};
/* mdc/mdc_locks.c */
-int it_disposition(struct lookup_intent *it, int flag);
-void it_clear_disposition(struct lookup_intent *it, int flag);
-void it_set_disposition(struct lookup_intent *it, int flag);
int it_open_error(int phase, struct lookup_intent *it);
static inline bool cl_is_lov_delay_create(unsigned int flags)
diff --git a/drivers/staging/lustre/lustre/include/lustre_mds.h b/drivers/staging/lustre/lustre/include/lustre_mds.h
index 95d27ddec..4104bd9bd 100644
--- a/drivers/staging/lustre/lustre/include/lustre_mds.h
+++ b/drivers/staging/lustre/lustre/include/lustre_mds.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h
index a7973d5de..d5debd615 100644
--- a/drivers/staging/lustre/lustre/include/lustre_net.h
+++ b/drivers/staging/lustre/lustre/include/lustre_net.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -270,6 +266,11 @@
/* Macro to hide a typecast. */
#define ptlrpc_req_async_args(req) ((void *)&req->rq_async_args)
+struct ptlrpc_replay_async_args {
+ int praa_old_state;
+ int praa_old_status;
+};
+
/**
* Structure to single define portal connection.
*/
@@ -479,8 +480,9 @@ enum rq_phase {
RQ_PHASE_BULK = 0xebc0de02,
RQ_PHASE_INTERPRET = 0xebc0de03,
RQ_PHASE_COMPLETE = 0xebc0de04,
- RQ_PHASE_UNREGISTERING = 0xebc0de05,
- RQ_PHASE_UNDEFINED = 0xebc0de06
+ RQ_PHASE_UNREG_RPC = 0xebc0de05,
+ RQ_PHASE_UNREG_BULK = 0xebc0de06,
+ RQ_PHASE_UNDEFINED = 0xebc0de07
};
/** Type of request interpreter call-back */
@@ -1247,22 +1249,103 @@ struct ptlrpc_hpreq_ops {
void (*hpreq_fini)(struct ptlrpc_request *);
};
-/**
- * Represents remote procedure call.
- *
- * This is a staple structure used by everybody wanting to send a request
- * in Lustre.
- */
-struct ptlrpc_request {
- /* Request type: one of PTL_RPC_MSG_* */
- int rq_type;
- /** Result of request processing */
- int rq_status;
+struct ptlrpc_cli_req {
+ /** For bulk requests on client only: bulk descriptor */
+ struct ptlrpc_bulk_desc *cr_bulk;
+ /** optional time limit for send attempts */
+ long cr_delay_limit;
+ /** time request was first queued */
+ time_t cr_queued_time;
+ /** request sent timeval */
+ struct timespec64 cr_sent_tv;
+ /** time for request really sent out */
+ time_t cr_sent_out;
+ /** when req reply unlink must finish. */
+ time_t cr_reply_deadline;
+ /** when req bulk unlink must finish. */
+ time_t cr_bulk_deadline;
+ /** when req unlink must finish. */
+ time_t cr_req_deadline;
+ /** Portal to which this request would be sent */
+ short cr_req_ptl;
+ /** Portal where to wait for reply and where reply would be sent */
+ short cr_rep_ptl;
+ /** request resending number */
+ unsigned int cr_resend_nr;
+ /** What was import generation when this request was sent */
+ int cr_imp_gen;
+ enum lustre_imp_state cr_send_state;
+ /** Per-request waitq introduced by bug 21938 for recovery waiting */
+ wait_queue_head_t cr_set_waitq;
+ /** Link item for request set lists */
+ struct list_head cr_set_chain;
+ /** link to waited ctx */
+ struct list_head cr_ctx_chain;
+
+ /** client's half ctx */
+ struct ptlrpc_cli_ctx *cr_cli_ctx;
+ /** Link back to the request set */
+ struct ptlrpc_request_set *cr_set;
+ /** outgoing request MD handle */
+ lnet_handle_md_t cr_req_md_h;
+ /** request-out callback parameter */
+ struct ptlrpc_cb_id cr_req_cbid;
+ /** incoming reply MD handle */
+ lnet_handle_md_t cr_reply_md_h;
+ wait_queue_head_t cr_reply_waitq;
+ /** reply callback parameter */
+ struct ptlrpc_cb_id cr_reply_cbid;
+ /** Async completion handler, called when reply is received */
+ ptlrpc_interpterer_t cr_reply_interp;
+ /** Async completion context */
+ union ptlrpc_async_args cr_async_args;
+ /** Opaq data for replay and commit callbacks. */
+ void *cr_cb_data;
/**
- * Linkage item through which this request is included into
- * sending/delayed lists on client and into rqbd list on server
+ * Commit callback, called when request is committed and about to be
+ * freed.
*/
- struct list_head rq_list;
+ void (*cr_commit_cb)(struct ptlrpc_request *);
+ /** Replay callback, called after request is replayed at recovery */
+ void (*cr_replay_cb)(struct ptlrpc_request *);
+};
+
+/** client request member alias */
+/* NB: these alias should NOT be used by any new code, instead they should
+ * be removed step by step to avoid potential abuse
+ */
+#define rq_bulk rq_cli.cr_bulk
+#define rq_delay_limit rq_cli.cr_delay_limit
+#define rq_queued_time rq_cli.cr_queued_time
+#define rq_sent_tv rq_cli.cr_sent_tv
+#define rq_real_sent rq_cli.cr_sent_out
+#define rq_reply_deadline rq_cli.cr_reply_deadline
+#define rq_bulk_deadline rq_cli.cr_bulk_deadline
+#define rq_req_deadline rq_cli.cr_req_deadline
+#define rq_nr_resend rq_cli.cr_resend_nr
+#define rq_request_portal rq_cli.cr_req_ptl
+#define rq_reply_portal rq_cli.cr_rep_ptl
+#define rq_import_generation rq_cli.cr_imp_gen
+#define rq_send_state rq_cli.cr_send_state
+#define rq_set_chain rq_cli.cr_set_chain
+#define rq_ctx_chain rq_cli.cr_ctx_chain
+#define rq_set rq_cli.cr_set
+#define rq_set_waitq rq_cli.cr_set_waitq
+#define rq_cli_ctx rq_cli.cr_cli_ctx
+#define rq_req_md_h rq_cli.cr_req_md_h
+#define rq_req_cbid rq_cli.cr_req_cbid
+#define rq_reply_md_h rq_cli.cr_reply_md_h
+#define rq_reply_waitq rq_cli.cr_reply_waitq
+#define rq_reply_cbid rq_cli.cr_reply_cbid
+#define rq_interpret_reply rq_cli.cr_reply_interp
+#define rq_async_args rq_cli.cr_async_args
+#define rq_cb_data rq_cli.cr_cb_data
+#define rq_commit_cb rq_cli.cr_commit_cb
+#define rq_replay_cb rq_cli.cr_replay_cb
+
+struct ptlrpc_srv_req {
+ /** initial thread servicing this request */
+ struct ptlrpc_thread *sr_svc_thread;
/**
* Server side list of incoming unserved requests sorted by arrival
* time. Traversed from time to time to notice about to expire
@@ -1270,32 +1353,86 @@ struct ptlrpc_request {
* know server is alive and well, just very busy to service their
* requests in time
*/
- struct list_head rq_timed_list;
- /** server-side history, used for debugging purposes. */
- struct list_head rq_history_list;
+ struct list_head sr_timed_list;
/** server-side per-export list */
- struct list_head rq_exp_list;
- /** server-side hp handlers */
- struct ptlrpc_hpreq_ops *rq_ops;
-
- /** initial thread servicing this request */
- struct ptlrpc_thread *rq_svc_thread;
-
+ struct list_head sr_exp_list;
+ /** server-side history, used for debuging purposes. */
+ struct list_head sr_hist_list;
/** history sequence # */
- __u64 rq_history_seq;
+ __u64 sr_hist_seq;
+ /** the index of service's srv_at_array into which request is linked */
+ time_t sr_at_index;
+ /** authed uid */
+ uid_t sr_auth_uid;
+ /** authed uid mapped to */
+ uid_t sr_auth_mapped_uid;
+ /** RPC is generated from what part of Lustre */
+ enum lustre_sec_part sr_sp_from;
+ /** request session context */
+ struct lu_context sr_ses;
/** \addtogroup nrs
* @{
*/
/** stub for NRS request */
- struct ptlrpc_nrs_request rq_nrq;
+ struct ptlrpc_nrs_request sr_nrq;
/** @} nrs */
- /** the index of service's srv_at_array into which request is linked */
- u32 rq_at_index;
+ /** request arrival time */
+ struct timespec64 sr_arrival_time;
+ /** server's half ctx */
+ struct ptlrpc_svc_ctx *sr_svc_ctx;
+ /** (server side), pointed directly into req buffer */
+ struct ptlrpc_user_desc *sr_user_desc;
+ /** separated reply state */
+ struct ptlrpc_reply_state *sr_reply_state;
+ /** server-side hp handlers */
+ struct ptlrpc_hpreq_ops *sr_ops;
+ /** incoming request buffer */
+ struct ptlrpc_request_buffer_desc *sr_rqbd;
+};
+
+/** server request member alias */
+/* NB: these alias should NOT be used by any new code, instead they should
+ * be removed step by step to avoid potential abuse
+ */
+#define rq_svc_thread rq_srv.sr_svc_thread
+#define rq_timed_list rq_srv.sr_timed_list
+#define rq_exp_list rq_srv.sr_exp_list
+#define rq_history_list rq_srv.sr_hist_list
+#define rq_history_seq rq_srv.sr_hist_seq
+#define rq_at_index rq_srv.sr_at_index
+#define rq_auth_uid rq_srv.sr_auth_uid
+#define rq_auth_mapped_uid rq_srv.sr_auth_mapped_uid
+#define rq_sp_from rq_srv.sr_sp_from
+#define rq_session rq_srv.sr_ses
+#define rq_nrq rq_srv.sr_nrq
+#define rq_arrival_time rq_srv.sr_arrival_time
+#define rq_reply_state rq_srv.sr_reply_state
+#define rq_svc_ctx rq_srv.sr_svc_ctx
+#define rq_user_desc rq_srv.sr_user_desc
+#define rq_ops rq_srv.sr_ops
+#define rq_rqbd rq_srv.sr_rqbd
+
+/**
+ * Represents remote procedure call.
+ *
+ * This is a staple structure used by everybody wanting to send a request
+ * in Lustre.
+ */
+struct ptlrpc_request {
+ /* Request type: one of PTL_RPC_MSG_* */
+ int rq_type;
+ /** Result of request processing */
+ int rq_status;
+ /**
+ * Linkage item through which this request is included into
+ * sending/delayed lists on client and into rqbd list on server
+ */
+ struct list_head rq_list;
/** Lock to protect request flags and some other important bits, like
* rq_list
*/
spinlock_t rq_lock;
- /** client-side flags are serialized by rq_lock */
+ /** client-side flags are serialized by rq_lock @{ */
unsigned int rq_intr:1, rq_replied:1, rq_err:1,
rq_timedout:1, rq_resend:1, rq_restart:1,
/**
@@ -1311,18 +1448,15 @@ struct ptlrpc_request {
rq_no_resend:1, rq_waiting:1, rq_receiving_reply:1,
rq_no_delay:1, rq_net_err:1, rq_wait_ctx:1,
rq_early:1,
- rq_req_unlink:1, rq_reply_unlink:1,
+ rq_req_unlinked:1, /* unlinked request buffer from lnet */
+ rq_reply_unlinked:1, /* unlinked reply buffer from lnet */
rq_memalloc:1, /* req originated from "kswapd" */
- /* server-side flags */
- rq_packed_final:1, /* packed final reply */
- rq_hp:1, /* high priority RPC */
- rq_at_linked:1, /* link into service's srv_at_array */
- rq_reply_truncate:1,
rq_committed:1,
- /* whether the "rq_set" is a valid one */
+ rq_reply_truncated:1,
+ /** whether the "rq_set" is a valid one */
rq_invalid_rqset:1,
rq_generation_set:1,
- /* do not resend request on -EINPROGRESS */
+ /** do not resend request on -EINPROGRESS */
rq_no_retry_einprogress:1,
/* allow the req to be sent if the import is in recovery
* status
@@ -1330,20 +1464,24 @@ struct ptlrpc_request {
rq_allow_replay:1,
/* bulk request, sent to server, but uncommitted */
rq_unstable:1;
+ /** @} */
- unsigned int rq_nr_resend;
-
- enum rq_phase rq_phase; /* one of RQ_PHASE_* */
- enum rq_phase rq_next_phase; /* one of RQ_PHASE_* to be used next */
- atomic_t rq_refcount; /* client-side refcount for SENT race,
- * server-side refcount for multiple replies
- */
-
- /** Portal to which this request would be sent */
- short rq_request_portal; /* XXX FIXME bug 249 */
- /** Portal where to wait for reply and where reply would be sent */
- short rq_reply_portal; /* XXX FIXME bug 249 */
+ /** server-side flags @{ */
+ unsigned int
+ rq_hp:1, /**< high priority RPC */
+ rq_at_linked:1, /**< link into service's srv_at_array */
+ rq_packed_final:1; /**< packed final reply */
+ /** @} */
+ /** one of RQ_PHASE_* */
+ enum rq_phase rq_phase;
+ /** one of RQ_PHASE_* to be used next */
+ enum rq_phase rq_next_phase;
+ /**
+ * client-side refcount for SENT race, server-side refcount
+ * for multiple replies
+ */
+ atomic_t rq_refcount;
/**
* client-side:
* !rq_truncate : # reply bytes actually received,
@@ -1354,6 +1492,8 @@ struct ptlrpc_request {
int rq_reqlen;
/** Reply length */
int rq_replen;
+ /** Pool if request is from preallocated list */
+ struct ptlrpc_request_pool *rq_pool;
/** Request message - what client sent */
struct lustre_msg *rq_reqmsg;
/** Reply message - server response */
@@ -1366,19 +1506,20 @@ struct ptlrpc_request {
* List item to for replay list. Not yet committed requests get linked
* there.
* Also see \a rq_replay comment above.
+ * It's also link chain on obd_export::exp_req_replay_queue
*/
struct list_head rq_replay_list;
-
+ /** non-shared members for client & server request*/
+ union {
+ struct ptlrpc_cli_req rq_cli;
+ struct ptlrpc_srv_req rq_srv;
+ };
/**
* security and encryption data
* @{
*/
- struct ptlrpc_cli_ctx *rq_cli_ctx; /**< client's half ctx */
- struct ptlrpc_svc_ctx *rq_svc_ctx; /**< server's half ctx */
- struct list_head rq_ctx_chain; /**< link to waited ctx */
-
- struct sptlrpc_flavor rq_flvr; /**< for client & server */
- enum lustre_sec_part rq_sp_from;
+ /** description of flavors for client & server */
+ struct sptlrpc_flavor rq_flvr;
/* client/server security flags */
unsigned int
@@ -1388,7 +1529,6 @@ struct ptlrpc_request {
rq_bulk_write:1, /* request bulk write */
/* server authentication flags */
rq_auth_gss:1, /* authenticated by gss */
- rq_auth_remote:1, /* authed as remote user */
rq_auth_usr_root:1, /* authed as root */
rq_auth_usr_mdt:1, /* authed as mdt */
rq_auth_usr_ost:1, /* authed as ost */
@@ -1397,19 +1537,15 @@ struct ptlrpc_request {
rq_pack_bulk:1,
/* doesn't expect reply FIXME */
rq_no_reply:1,
- rq_pill_init:1; /* pill initialized */
-
- uid_t rq_auth_uid; /* authed uid */
- uid_t rq_auth_mapped_uid; /* authed uid mapped to */
-
- /* (server side), pointed directly into req buffer */
- struct ptlrpc_user_desc *rq_user_desc;
-
- /* various buffer pointers */
- struct lustre_msg *rq_reqbuf; /* req wrapper */
- char *rq_repbuf; /* rep buffer */
- struct lustre_msg *rq_repdata; /* rep wrapper msg */
- struct lustre_msg *rq_clrbuf; /* only in priv mode */
+ rq_pill_init:1, /* pill initialized */
+ rq_srv_req:1; /* server request */
+
+ /** various buffer pointers */
+ struct lustre_msg *rq_reqbuf; /**< req wrapper */
+ char *rq_repbuf; /**< rep buffer */
+ struct lustre_msg *rq_repdata; /**< rep wrapper msg */
+ /** only in priv mode */
+ struct lustre_msg *rq_clrbuf;
int rq_reqbuf_len; /* req wrapper buf len */
int rq_reqdata_len; /* req wrapper msg len */
int rq_repbuf_len; /* rep buffer len */
@@ -1426,97 +1562,28 @@ struct ptlrpc_request {
__u32 rq_req_swab_mask;
__u32 rq_rep_swab_mask;
- /** What was import generation when this request was sent */
- int rq_import_generation;
- enum lustre_imp_state rq_send_state;
-
/** how many early replies (for stats) */
int rq_early_count;
- /** client+server request */
- lnet_handle_md_t rq_req_md_h;
- struct ptlrpc_cb_id rq_req_cbid;
- /** optional time limit for send attempts */
- long rq_delay_limit;
- /** time request was first queued */
- unsigned long rq_queued_time;
-
- /* server-side... */
- /** request arrival time */
- struct timespec64 rq_arrival_time;
- /** separated reply state */
- struct ptlrpc_reply_state *rq_reply_state;
- /** incoming request buffer */
- struct ptlrpc_request_buffer_desc *rq_rqbd;
-
- /** client-only incoming reply */
- lnet_handle_md_t rq_reply_md_h;
- wait_queue_head_t rq_reply_waitq;
- struct ptlrpc_cb_id rq_reply_cbid;
-
+ /** Server-side, export on which request was received */
+ struct obd_export *rq_export;
+ /** import where request is being sent */
+ struct obd_import *rq_import;
/** our LNet NID */
lnet_nid_t rq_self;
/** Peer description (the other side) */
lnet_process_id_t rq_peer;
- /** Server-side, export on which request was received */
- struct obd_export *rq_export;
- /** Client side, import where request is being sent */
- struct obd_import *rq_import;
-
- /** Replay callback, called after request is replayed at recovery */
- void (*rq_replay_cb)(struct ptlrpc_request *);
/**
- * Commit callback, called when request is committed and about to be
- * freed.
+ * service time estimate (secs)
+ * If the request is not served by this time, it is marked as timed out.
*/
- void (*rq_commit_cb)(struct ptlrpc_request *);
- /** Opaq data for replay and commit callbacks. */
- void *rq_cb_data;
-
- /** For bulk requests on client only: bulk descriptor */
- struct ptlrpc_bulk_desc *rq_bulk;
-
- /** client outgoing req */
+ int rq_timeout;
/**
* when request/reply sent (secs), or time when request should be sent
*/
time64_t rq_sent;
- /** time for request really sent out */
- time64_t rq_real_sent;
-
- /** when request must finish. volatile
- * so that servers' early reply updates to the deadline aren't
- * kept in per-cpu cache
- */
- volatile time64_t rq_deadline;
- /** when req reply unlink must finish. */
- time64_t rq_reply_deadline;
- /** when req bulk unlink must finish. */
- time64_t rq_bulk_deadline;
- /**
- * service time estimate (secs)
- * If the requestsis not served by this time, it is marked as timed out.
- */
- int rq_timeout;
-
- /** Multi-rpc bits */
- /** Per-request waitq introduced by bug 21938 for recovery waiting */
- wait_queue_head_t rq_set_waitq;
- /** Link item for request set lists */
- struct list_head rq_set_chain;
- /** Link back to the request set */
- struct ptlrpc_request_set *rq_set;
- /** Async completion handler, called when reply is received */
- ptlrpc_interpterer_t rq_interpret_reply;
- /** Async completion context */
- union ptlrpc_async_args rq_async_args;
-
- /** Pool if request is from preallocated list */
- struct ptlrpc_request_pool *rq_pool;
-
- struct lu_context rq_session;
- struct lu_context rq_recov_session;
-
+ /** when request must finish. */
+ time64_t rq_deadline;
/** request format description */
struct req_capsule rq_pill;
};
@@ -1629,8 +1696,10 @@ ptlrpc_phase2str(enum rq_phase phase)
return "Interpret";
case RQ_PHASE_COMPLETE:
return "Complete";
- case RQ_PHASE_UNREGISTERING:
- return "Unregistering";
+ case RQ_PHASE_UNREG_RPC:
+ return "UnregRPC";
+ case RQ_PHASE_UNREG_BULK:
+ return "UnregBULK";
default:
return "?Phase?";
}
@@ -1657,7 +1726,7 @@ ptlrpc_rqphase2str(struct ptlrpc_request *req)
#define DEBUG_REQ_FLAGS(req) \
ptlrpc_rqphase2str(req), \
FLAG(req->rq_intr, "I"), FLAG(req->rq_replied, "R"), \
- FLAG(req->rq_err, "E"), \
+ FLAG(req->rq_err, "E"), FLAG(req->rq_net_err, "e"), \
FLAG(req->rq_timedout, "X") /* eXpired */, FLAG(req->rq_resend, "S"), \
FLAG(req->rq_restart, "T"), FLAG(req->rq_replay, "P"), \
FLAG(req->rq_no_resend, "N"), \
@@ -1665,7 +1734,7 @@ ptlrpc_rqphase2str(struct ptlrpc_request *req)
FLAG(req->rq_wait_ctx, "C"), FLAG(req->rq_hp, "H"), \
FLAG(req->rq_committed, "M")
-#define REQ_FLAGS_FMT "%s:%s%s%s%s%s%s%s%s%s%s%s%s"
+#define REQ_FLAGS_FMT "%s:%s%s%s%s%s%s%s%s%s%s%s%s%s"
void _debug_req(struct ptlrpc_request *req,
struct libcfs_debug_msg_data *data, const char *fmt, ...)
@@ -2316,8 +2385,7 @@ static inline int ptlrpc_client_bulk_active(struct ptlrpc_request *req)
desc = req->rq_bulk;
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
- req->rq_bulk_deadline > ktime_get_real_seconds())
+ if (req->rq_bulk_deadline > ktime_get_real_seconds())
return 1;
if (!desc)
@@ -2664,13 +2732,20 @@ ptlrpc_rqphase_move(struct ptlrpc_request *req, enum rq_phase new_phase)
if (req->rq_phase == new_phase)
return;
- if (new_phase == RQ_PHASE_UNREGISTERING) {
+ if (new_phase == RQ_PHASE_UNREG_RPC ||
+ new_phase == RQ_PHASE_UNREG_BULK) {
+ /* No embedded unregistering phases */
+ if (req->rq_phase == RQ_PHASE_UNREG_RPC ||
+ req->rq_phase == RQ_PHASE_UNREG_BULK)
+ return;
+
req->rq_next_phase = req->rq_phase;
if (req->rq_import)
atomic_inc(&req->rq_import->imp_unregistering);
}
- if (req->rq_phase == RQ_PHASE_UNREGISTERING) {
+ if (req->rq_phase == RQ_PHASE_UNREG_RPC ||
+ req->rq_phase == RQ_PHASE_UNREG_BULK) {
if (req->rq_import)
atomic_dec(&req->rq_import->imp_unregistering);
}
@@ -2687,9 +2762,6 @@ ptlrpc_rqphase_move(struct ptlrpc_request *req, enum rq_phase new_phase)
static inline int
ptlrpc_client_early(struct ptlrpc_request *req)
{
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
- req->rq_reply_deadline > ktime_get_real_seconds())
- return 0;
return req->rq_early;
}
@@ -2699,8 +2771,7 @@ ptlrpc_client_early(struct ptlrpc_request *req)
static inline int
ptlrpc_client_replied(struct ptlrpc_request *req)
{
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
- req->rq_reply_deadline > ktime_get_real_seconds())
+ if (req->rq_reply_deadline > ktime_get_real_seconds())
return 0;
return req->rq_replied;
}
@@ -2709,8 +2780,7 @@ ptlrpc_client_replied(struct ptlrpc_request *req)
static inline int
ptlrpc_client_recv(struct ptlrpc_request *req)
{
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
- req->rq_reply_deadline > ktime_get_real_seconds())
+ if (req->rq_reply_deadline > ktime_get_real_seconds())
return 1;
return req->rq_receiving_reply;
}
@@ -2721,13 +2791,16 @@ ptlrpc_client_recv_or_unlink(struct ptlrpc_request *req)
int rc;
spin_lock(&req->rq_lock);
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
- req->rq_reply_deadline > ktime_get_real_seconds()) {
+ if (req->rq_reply_deadline > ktime_get_real_seconds()) {
+ spin_unlock(&req->rq_lock);
+ return 1;
+ }
+ if (req->rq_req_deadline > ktime_get_real_seconds()) {
spin_unlock(&req->rq_lock);
return 1;
}
- rc = req->rq_receiving_reply;
- rc = rc || req->rq_req_unlink || req->rq_reply_unlink;
+ rc = !req->rq_req_unlinked || !req->rq_reply_unlinked ||
+ req->rq_receiving_reply;
spin_unlock(&req->rq_lock);
return rc;
}
diff --git a/drivers/staging/lustre/lustre/include/lustre_param.h b/drivers/staging/lustre/lustre/include/lustre_param.h
index a42cf90c1..82aadd32c 100644
--- a/drivers/staging/lustre/lustre/include/lustre_param.h
+++ b/drivers/staging/lustre/lustre/include/lustre_param.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/lustre_req_layout.h b/drivers/staging/lustre/lustre/include/lustre_req_layout.h
index 0aac4391e..544a43c86 100644
--- a/drivers/staging/lustre/lustre/include/lustre_req_layout.h
+++ b/drivers/staging/lustre/lustre/include/lustre_req_layout.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -164,7 +160,7 @@ extern struct req_format RQF_MDS_IS_SUBDIR;
extern struct req_format RQF_MDS_DONE_WRITING;
extern struct req_format RQF_MDS_REINT;
extern struct req_format RQF_MDS_REINT_CREATE;
-extern struct req_format RQF_MDS_REINT_CREATE_RMT_ACL;
+extern struct req_format RQF_MDS_REINT_CREATE_ACL;
extern struct req_format RQF_MDS_REINT_CREATE_SLAVE;
extern struct req_format RQF_MDS_REINT_CREATE_SYM;
extern struct req_format RQF_MDS_REINT_OPEN;
diff --git a/drivers/staging/lustre/lustre/include/lustre_sec.h b/drivers/staging/lustre/lustre/include/lustre_sec.h
index 01b4e6726..90c183424 100644
--- a/drivers/staging/lustre/lustre/include/lustre_sec.h
+++ b/drivers/staging/lustre/lustre/include/lustre_sec.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -221,13 +217,13 @@ enum sptlrpc_bulk_service {
#define SPTLRPC_FLVR_DEFAULT SPTLRPC_FLVR_NULL
-#define SPTLRPC_FLVR_INVALID ((__u32) 0xFFFFFFFF)
-#define SPTLRPC_FLVR_ANY ((__u32) 0xFFF00000)
+#define SPTLRPC_FLVR_INVALID ((__u32)0xFFFFFFFF)
+#define SPTLRPC_FLVR_ANY ((__u32)0xFFF00000)
/**
* extract the useful part from wire flavor
*/
-#define WIRE_FLVR(wflvr) (((__u32) (wflvr)) & 0x000FFFFF)
+#define WIRE_FLVR(wflvr) (((__u32)(wflvr)) & 0x000FFFFF)
/** @} flavor */
diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
index 2d926e0ee..a1bc2c478 100644
--- a/drivers/staging/lustre/lustre/include/obd.h
+++ b/drivers/staging/lustre/lustre/include/obd.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -232,6 +228,12 @@ enum {
#define MDC_MAX_RIF_DEFAULT 8
#define MDC_MAX_RIF_MAX 512
+enum obd_cl_sem_lock_class {
+ OBD_CLI_SEM_NORMAL,
+ OBD_CLI_SEM_MGC,
+ OBD_CLI_SEM_MDCOSC,
+};
+
struct mdc_rpc_lock;
struct obd_import;
struct client_obd {
@@ -419,7 +421,7 @@ struct lov_obd {
enum lustre_sec_part lov_sp_me;
/* Cached LRU and unstable data from upper layer */
- void *lov_cache;
+ struct cl_client_cache *lov_cache;
struct rw_semaphore lov_notify_lock;
@@ -1119,9 +1121,6 @@ struct md_ops {
ldlm_policy_data_t *, enum ldlm_mode,
enum ldlm_cancel_flags flags, void *opaque);
- int (*get_remote_perm)(struct obd_export *, const struct lu_fid *,
- __u32, struct ptlrpc_request **);
-
int (*intent_getattr_async)(struct obd_export *,
struct md_enqueue_info *,
struct ldlm_enqueue_info *);
diff --git a/drivers/staging/lustre/lustre/include/obd_cksum.h b/drivers/staging/lustre/lustre/include/obd_cksum.h
index f6c18df90..a8a81e662 100644
--- a/drivers/staging/lustre/lustre/include/obd_cksum.h
+++ b/drivers/staging/lustre/lustre/include/obd_cksum.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/include/obd_class.h b/drivers/staging/lustre/lustre/include/obd_class.h
index 32863bcb3..6482a9370 100644
--- a/drivers/staging/lustre/lustre/include/obd_class.h
+++ b/drivers/staging/lustre/lustre/include/obd_class.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -1654,16 +1650,6 @@ static inline int md_init_ea_size(struct obd_export *exp, int easize,
cookiesize, def_cookiesize);
}
-static inline int md_get_remote_perm(struct obd_export *exp,
- const struct lu_fid *fid, __u32 suppgid,
- struct ptlrpc_request **request)
-{
- EXP_CHECK_MD_OP(exp, get_remote_perm);
- EXP_MD_COUNTER_INCREMENT(exp, get_remote_perm);
- return MDP(exp->exp_obd, get_remote_perm)(exp, fid, suppgid,
- request);
-}
-
static inline int md_intent_getattr_async(struct obd_export *exp,
struct md_enqueue_info *minfo,
struct ldlm_enqueue_info *einfo)
diff --git a/drivers/staging/lustre/lustre/include/obd_support.h b/drivers/staging/lustre/lustre/include/obd_support.h
index 60034d39b..845e64a56 100644
--- a/drivers/staging/lustre/lustre/include/obd_support.h
+++ b/drivers/staging/lustre/lustre/include/obd_support.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -368,6 +364,9 @@ extern char obd_jobid_var[];
#define OBD_FAIL_PTLRPC_CLIENT_BULK_CB2 0x515
#define OBD_FAIL_PTLRPC_DELAY_IMP_FULL 0x516
#define OBD_FAIL_PTLRPC_CANCEL_RESEND 0x517
+#define OBD_FAIL_PTLRPC_DROP_BULK 0x51a
+#define OBD_FAIL_PTLRPC_LONG_REQ_UNLINK 0x51b
+#define OBD_FAIL_PTLRPC_LONG_BOTH_UNLINK 0x51c
#define OBD_FAIL_OBD_PING_NET 0x600
#define OBD_FAIL_OBD_LOG_CANCEL_NET 0x601
diff --git a/drivers/staging/lustre/lustre/ldlm/interval_tree.c b/drivers/staging/lustre/lustre/ldlm/interval_tree.c
index 323060626..f4a70ebdd 100644
--- a/drivers/staging/lustre/lustre/ldlm/interval_tree.c
+++ b/drivers/staging/lustre/lustre/ldlm/interval_tree.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/ldlm/l_lock.c b/drivers/staging/lustre/lustre/ldlm/l_lock.c
index 621323f6e..ea8840cb9 100644
--- a/drivers/staging/lustre/lustre/ldlm/l_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/l_lock.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
index cf1f17836..f5023d9b7 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
index 349bfcc9b..d6b61bc39 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -259,14 +255,13 @@ reprocess:
* overflow and underflow.
*/
if ((new->l_policy_data.l_flock.start >
- (lock->l_policy_data.l_flock.end + 1))
- && (lock->l_policy_data.l_flock.end !=
- OBD_OBJECT_EOF))
+ (lock->l_policy_data.l_flock.end + 1)) &&
+ (lock->l_policy_data.l_flock.end != OBD_OBJECT_EOF))
continue;
if ((new->l_policy_data.l_flock.end <
- (lock->l_policy_data.l_flock.start - 1))
- && (lock->l_policy_data.l_flock.start != 0))
+ (lock->l_policy_data.l_flock.start - 1)) &&
+ (lock->l_policy_data.l_flock.start != 0))
break;
if (new->l_policy_data.l_flock.start <
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c b/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c
index b1bed1e17..79f4e6fa1 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
index 32f227f37..e4cf65d2d 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
index b4ffbe2fc..7c832aae7 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -345,7 +341,8 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
* Set cl_chksum* to CRC32 for now to avoid returning screwed info
* through procfs.
*/
- cli->cl_cksum_type = cli->cl_supp_cksum_types = OBD_CKSUM_CRC32;
+ cli->cl_cksum_type = OBD_CKSUM_CRC32;
+ cli->cl_supp_cksum_types = OBD_CKSUM_CRC32;
atomic_set(&cli->cl_resends, OSC_DEFAULT_RESENDS);
/* This value may be reduced at connect time in
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
index bff94ea12..a5993f745 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -662,7 +658,7 @@ static void ldlm_add_ast_work_item(struct ldlm_lock *lock,
* r/w reference type is determined by \a mode
* Calls ldlm_lock_addref_internal.
*/
-void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode)
+void ldlm_lock_addref(const struct lustre_handle *lockh, __u32 mode)
{
struct ldlm_lock *lock;
@@ -704,7 +700,7 @@ void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
*
* \retval -EAGAIN lock is being canceled.
*/
-int ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode)
+int ldlm_lock_addref_try(const struct lustre_handle *lockh, __u32 mode)
{
struct ldlm_lock *lock;
int result;
@@ -836,7 +832,7 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
/**
* Decrease reader/writer refcount for LDLM lock with handle \a lockh
*/
-void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode)
+void ldlm_lock_decref(const struct lustre_handle *lockh, __u32 mode)
{
struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
@@ -853,7 +849,7 @@ EXPORT_SYMBOL(ldlm_lock_decref);
*
* Typical usage is for GROUP locks which we cannot allow to be cached.
*/
-void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode)
+void ldlm_lock_decref_and_cancel(const struct lustre_handle *lockh, __u32 mode)
{
struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
@@ -1322,7 +1318,7 @@ enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
}
EXPORT_SYMBOL(ldlm_lock_match);
-enum ldlm_mode ldlm_revalidate_lock_handle(struct lustre_handle *lockh,
+enum ldlm_mode ldlm_revalidate_lock_handle(const struct lustre_handle *lockh,
__u64 *bits)
{
struct ldlm_lock *lock;
@@ -1444,7 +1440,7 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
memcpy(data, lvb, size);
break;
default:
- LDLM_ERROR(lock, "Unknown LVB type: %d\n", lock->l_lvb_type);
+ LDLM_ERROR(lock, "Unknown LVB type: %d", lock->l_lvb_type);
dump_stack();
return -EINVAL;
}
@@ -1853,7 +1849,7 @@ EXPORT_SYMBOL(ldlm_lock_cancel);
/**
* Set opaque data into the lock that only makes sense to upper layer.
*/
-int ldlm_lock_set_data(struct lustre_handle *lockh, void *data)
+int ldlm_lock_set_data(const struct lustre_handle *lockh, void *data)
{
struct ldlm_lock *lock = ldlm_handle2lock(lockh);
int rc = -EINVAL;
@@ -1879,7 +1875,7 @@ struct export_cl_data {
*
* Used when printing all locks on a resource for debug purposes.
*/
-void ldlm_lock_dump_handle(int level, struct lustre_handle *lockh)
+void ldlm_lock_dump_handle(int level, const struct lustre_handle *lockh)
{
struct ldlm_lock *lock;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
index ab739f079..821939ff2 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -503,7 +499,7 @@ static int ldlm_handle_setinfo(struct ptlrpc_request *req)
static inline void ldlm_callback_errmsg(struct ptlrpc_request *req,
const char *msg, int rc,
- struct lustre_handle *handle)
+ const struct lustre_handle *handle)
{
DEBUG_REQ((req->rq_no_reply || rc) ? D_WARNING : D_DLMTRACE, req,
"%s: [nid %s] [rc %d] [lock %#llx]",
@@ -641,7 +637,8 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
*/
if ((ldlm_is_canceling(lock) && ldlm_is_bl_done(lock)) ||
ldlm_is_failed(lock)) {
- LDLM_DEBUG(lock, "callback on lock %#llx - lock disappeared\n",
+ LDLM_DEBUG(lock,
+ "callback on lock %#llx - lock disappeared",
dlm_req->lock_handle[0].cookie);
unlock_res_and_lock(lock);
LDLM_LOCK_RELEASE(lock);
@@ -1011,9 +1008,11 @@ static int ldlm_setup(void)
blp->blp_min_threads = LDLM_NTHRS_INIT;
blp->blp_max_threads = LDLM_NTHRS_MAX;
} else {
- blp->blp_min_threads = blp->blp_max_threads =
- min_t(int, LDLM_NTHRS_MAX, max_t(int, LDLM_NTHRS_INIT,
- ldlm_num_threads));
+ blp->blp_min_threads = min_t(int, LDLM_NTHRS_MAX,
+ max_t(int, LDLM_NTHRS_INIT,
+ ldlm_num_threads));
+
+ blp->blp_max_threads = blp->blp_min_threads;
}
for (i = 0; i < blp->blp_min_threads; i++) {
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c b/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c
index 0c1965dda..0aed39c46 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
index b913ba9cf..657ed4012 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
index 107314e28..af487f993 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -340,7 +336,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
enum ldlm_type type, __u8 with_policy,
enum ldlm_mode mode,
__u64 *flags, void *lvb, __u32 lvb_len,
- struct lustre_handle *lockh, int rc)
+ const struct lustre_handle *lockh, int rc)
{
struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
int is_replay = *flags & LDLM_FL_REPLAY;
@@ -715,7 +711,7 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
lock->l_req_extent = policy->l_extent;
}
- LDLM_DEBUG(lock, "client-side enqueue START, flags %llx\n",
+ LDLM_DEBUG(lock, "client-side enqueue START, flags %llx",
*flags);
}
@@ -1027,7 +1023,7 @@ EXPORT_SYMBOL(ldlm_cli_update_pool);
*
* Lock must not have any readers or writers by this time.
*/
-int ldlm_cli_cancel(struct lustre_handle *lockh,
+int ldlm_cli_cancel(const struct lustre_handle *lockh,
enum ldlm_cancel_flags cancel_flags)
{
struct obd_export *exp;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
index e99c89c34..51a28d96a 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -1279,7 +1275,7 @@ void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
{
check_res_locked(res);
- LDLM_DEBUG(lock, "About to add this lock:\n");
+ LDLM_DEBUG(lock, "About to add this lock:");
if (ldlm_is_destroyed(lock)) {
CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
diff --git a/drivers/staging/lustre/lustre/llite/Makefile b/drivers/staging/lustre/lustre/llite/Makefile
index 2ce10ff01..2cbb1b80b 100644
--- a/drivers/staging/lustre/lustre/llite/Makefile
+++ b/drivers/staging/lustre/lustre/llite/Makefile
@@ -1,11 +1,7 @@
obj-$(CONFIG_LUSTRE_FS) += lustre.o
-obj-$(CONFIG_LUSTRE_LLITE_LLOOP) += llite_lloop.o
lustre-y := dcache.o dir.o file.o llite_close.o llite_lib.o llite_nfs.o \
rw.o namei.o symlink.o llite_mmap.o \
- xattr.o xattr_cache.o remote_perm.o llite_rmtacl.o \
- rw26.o super25.o statahead.o \
+ xattr.o xattr_cache.o rw26.o super25.o statahead.o \
glimpse.o lcommon_cl.o lcommon_misc.o \
vvp_dev.o vvp_page.o vvp_lock.o vvp_io.o vvp_object.o vvp_req.o \
lproc_llite.o
-
-llite_lloop-y := lloop.o
diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c
index 1b6f82a1a..463b1a360 100644
--- a/drivers/staging/lustre/lustre/llite/dcache.c
+++ b/drivers/staging/lustre/lustre/llite/dcache.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -82,7 +78,7 @@ static void ll_release(struct dentry *de)
* INVALID) so d_lookup() matches it, but we have no lock on it (so
* lock_match() fails) and we spin around real_lookup().
*/
-static int ll_dcompare(const struct dentry *parent, const struct dentry *dentry,
+static int ll_dcompare(const struct dentry *dentry,
unsigned int len, const char *str,
const struct qstr *name)
{
@@ -206,27 +202,27 @@ int ll_d_init(struct dentry *de)
void ll_intent_drop_lock(struct lookup_intent *it)
{
- if (it->it_op && it->d.lustre.it_lock_mode) {
+ if (it->it_op && it->it_lock_mode) {
struct lustre_handle handle;
- handle.cookie = it->d.lustre.it_lock_handle;
+ handle.cookie = it->it_lock_handle;
CDEBUG(D_DLMTRACE, "releasing lock with cookie %#llx from it %p\n",
handle.cookie, it);
- ldlm_lock_decref(&handle, it->d.lustre.it_lock_mode);
+ ldlm_lock_decref(&handle, it->it_lock_mode);
/* bug 494: intent_release may be called multiple times, from
* this thread and we don't want to double-decref this lock
*/
- it->d.lustre.it_lock_mode = 0;
- if (it->d.lustre.it_remote_lock_mode != 0) {
- handle.cookie = it->d.lustre.it_remote_lock_handle;
+ it->it_lock_mode = 0;
+ if (it->it_remote_lock_mode != 0) {
+ handle.cookie = it->it_remote_lock_handle;
CDEBUG(D_DLMTRACE, "releasing remote lock with cookie%#llx from it %p\n",
handle.cookie, it);
ldlm_lock_decref(&handle,
- it->d.lustre.it_remote_lock_mode);
- it->d.lustre.it_remote_lock_mode = 0;
+ it->it_remote_lock_mode);
+ it->it_remote_lock_mode = 0;
}
}
}
@@ -237,13 +233,13 @@ void ll_intent_release(struct lookup_intent *it)
ll_intent_drop_lock(it);
/* We are still holding extra reference on a request, need to free it */
if (it_disposition(it, DISP_ENQ_OPEN_REF))
- ptlrpc_req_finished(it->d.lustre.it_data); /* ll_file_open */
+ ptlrpc_req_finished(it->it_request); /* ll_file_open */
if (it_disposition(it, DISP_ENQ_CREATE_REF)) /* create rec */
- ptlrpc_req_finished(it->d.lustre.it_data);
+ ptlrpc_req_finished(it->it_request);
- it->d.lustre.it_disposition = 0;
- it->d.lustre.it_data = NULL;
+ it->it_disposition = 0;
+ it->it_request = NULL;
}
void ll_invalidate_aliases(struct inode *inode)
@@ -253,7 +249,7 @@ void ll_invalidate_aliases(struct inode *inode)
CDEBUG(D_INODE, "marking dentries for ino "DFID"(%p) invalid\n",
PFID(ll_inode2fid(inode)), inode);
- ll_lock_dcache(inode);
+ spin_lock(&inode->i_lock);
hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
CDEBUG(D_DENTRY, "dentry in drop %pd (%p) parent %p inode %p flags %d\n",
dentry, dentry, dentry->d_parent,
@@ -261,7 +257,7 @@ void ll_invalidate_aliases(struct inode *inode)
d_lustre_invalidate(dentry, 0);
}
- ll_unlock_dcache(inode);
+ spin_unlock(&inode->i_lock);
}
int ll_revalidate_it_finish(struct ptlrpc_request *request,
@@ -283,7 +279,7 @@ int ll_revalidate_it_finish(struct ptlrpc_request *request,
void ll_lookup_finish_locks(struct lookup_intent *it, struct inode *inode)
{
- if (it->d.lustre.it_lock_mode && inode) {
+ if (it->it_lock_mode && inode) {
struct ll_sb_info *sbi = ll_i2sbi(inode);
CDEBUG(D_DLMTRACE, "setting l_data to inode "DFID"(%p)\n",
@@ -306,6 +302,17 @@ static int ll_revalidate_dentry(struct dentry *dentry,
{
struct inode *dir = d_inode(dentry->d_parent);
+ /* If this is intermediate component path lookup and we were able to get
+ * to this dentry, then its lock has not been revoked and the
+ * path component is valid.
+ */
+ if (lookup_flags & LOOKUP_PARENT)
+ return 1;
+
+ /* Symlink - always valid as long as the dentry was found */
+ if (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode))
+ return 1;
+
/*
* if open&create is set, talk to MDS to make sure file is created if
* necessary, because we can't do this in ->open() later since that's
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 4b00d1ac8..5b381779c 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -366,7 +362,7 @@ struct page *ll_get_dir_page(struct inode *dir, __u64 hash,
ll_finish_md_op_data(op_data);
- request = (struct ptlrpc_request *)it.d.lustre.it_data;
+ request = (struct ptlrpc_request *)it.it_request;
if (request)
ptlrpc_req_finished(request);
if (rc < 0) {
@@ -378,7 +374,7 @@ struct page *ll_get_dir_page(struct inode *dir, __u64 hash,
CDEBUG(D_INODE, "setting lr_lvb_inode to inode "DFID"(%p)\n",
PFID(ll_inode2fid(dir)), dir);
md_set_lock_data(ll_i2sbi(dir)->ll_md_exp,
- &it.d.lustre.it_lock_handle, dir, NULL);
+ &it.it_lock_handle, dir, NULL);
} else {
/* for cross-ref object, l_ast_data of the lock may not be set,
* we reset it here
@@ -1076,17 +1072,11 @@ static int copy_and_ioctl(int cmd, struct obd_export *exp,
void *copy;
int rc;
- copy = kzalloc(size, GFP_NOFS);
- if (!copy)
- return -ENOMEM;
-
- if (copy_from_user(copy, data, size)) {
- rc = -EFAULT;
- goto out;
- }
+ copy = memdup_user(data, size);
+ if (IS_ERR(copy))
+ return PTR_ERR(copy);
rc = obd_iocontrol(cmd, exp, size, copy, NULL);
-out:
kfree(copy);
return rc;
@@ -1107,8 +1097,7 @@ static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl)
case Q_QUOTAOFF:
case Q_SETQUOTA:
case Q_SETINFO:
- if (!capable(CFS_CAP_SYS_ADMIN) ||
- sbi->ll_flags & LL_SBI_RMT_CLIENT)
+ if (!capable(CFS_CAP_SYS_ADMIN))
return -EPERM;
break;
case Q_GETQUOTA:
@@ -1116,8 +1105,7 @@ static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl)
!uid_eq(current_euid(), make_kuid(&init_user_ns, id))) ||
(type == GRPQUOTA &&
!in_egroup_p(make_kgid(&init_user_ns, id)))) &&
- (!capable(CFS_CAP_SYS_ADMIN) ||
- sbi->ll_flags & LL_SBI_RMT_CLIENT))
+ !capable(CFS_CAP_SYS_ADMIN))
return -EPERM;
break;
case Q_GETINFO:
@@ -1128,9 +1116,6 @@ static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl)
}
if (valid != QC_GENERAL) {
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
- return -EOPNOTSUPP;
-
if (cmd == Q_GETINFO)
qctl->qc_cmd = Q_GETOINFO;
else if (cmd == Q_GETQUOTA)
@@ -1538,7 +1523,9 @@ skip_lmm:
st.st_atime = body->atime;
st.st_mtime = body->mtime;
st.st_ctime = body->ctime;
- st.st_ino = inode->i_ino;
+ st.st_ino = cl_fid_build_ino(&body->fid1,
+ sbi->ll_flags &
+ LL_SBI_32BIT_API);
lmdp = (struct lov_user_mds_data __user *)arg;
if (copy_to_user(&lmdp->lmd_st, &st, sizeof(st))) {
@@ -1631,8 +1618,7 @@ free_lmm:
struct obd_quotactl *oqctl;
int error = 0;
- if (!capable(CFS_CAP_SYS_ADMIN) ||
- sbi->ll_flags & LL_SBI_RMT_CLIENT)
+ if (!capable(CFS_CAP_SYS_ADMIN))
return -EPERM;
oqctl = kzalloc(sizeof(*oqctl), GFP_NOFS);
@@ -1655,8 +1641,7 @@ free_lmm:
case OBD_IOC_POLL_QUOTACHECK: {
struct if_quotacheck *check;
- if (!capable(CFS_CAP_SYS_ADMIN) ||
- sbi->ll_flags & LL_SBI_RMT_CLIENT)
+ if (!capable(CFS_CAP_SYS_ADMIN))
return -EPERM;
check = kzalloc(sizeof(*check), GFP_NOFS);
@@ -1713,20 +1698,6 @@ out_quotactl:
return ll_get_obd_name(inode, cmd, arg);
case LL_IOC_FLUSHCTX:
return ll_flush_ctx(inode);
-#ifdef CONFIG_FS_POSIX_ACL
- case LL_IOC_RMTACL: {
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT && is_root_inode(inode)) {
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
-
- rc = rct_add(&sbi->ll_rct, current_pid(), arg);
- if (!rc)
- fd->fd_flags |= LL_FILE_RMTACL;
- return rc;
- } else {
- return 0;
- }
- }
-#endif
case LL_IOC_GETOBDCOUNT: {
int count, vallen;
struct obd_export *exp;
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
index f47f2acaf..57281b9e3 100644
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ b/drivers/staging/lustre/lustre/llite/file.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -348,18 +344,6 @@ int ll_file_release(struct inode *inode, struct file *file)
CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
PFID(ll_inode2fid(inode)), inode);
-#ifdef CONFIG_FS_POSIX_ACL
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT && is_root_inode(inode)) {
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
-
- if (unlikely(fd->fd_flags & LL_FILE_RMTACL)) {
- fd->fd_flags &= ~LL_FILE_RMTACL;
- rct_del(&sbi->ll_rct, current_pid());
- et_search_free(&sbi->ll_et, current_pid());
- }
- }
-#endif
-
if (!is_root_inode(inode))
ll_stats_ops_tally(sbi, LPROC_LL_RELEASE, 1);
fd = LUSTRE_FPRIVATE(file);
@@ -415,7 +399,19 @@ static int ll_intent_file_open(struct dentry *dentry, void *lmm,
* parameters. No need for the open lock
*/
if (!lmm && lmmsize == 0) {
- itp->it_flags |= MDS_OPEN_LOCK;
+ struct ll_dentry_data *ldd = ll_d2d(dentry);
+ /*
+ * If we came via ll_iget_for_nfs, then we need to request
+ * struct ll_dentry_data *ldd = ll_d2d(file->f_dentry);
+ *
+ * NB: when ldd is NULL, it must have come via normal
+ * lookup path only, since ll_iget_for_nfs always calls
+ * ll_d_init().
+ */
+ if (ldd && ldd->lld_nfs_dentry) {
+ ldd->lld_nfs_dentry = 0;
+ itp->it_flags |= MDS_OPEN_LOCK;
+ }
if (itp->it_flags & FMODE_WRITE)
opc = LUSTRE_OPC_CREATE;
}
@@ -453,7 +449,7 @@ static int ll_intent_file_open(struct dentry *dentry, void *lmm,
}
rc = ll_prep_inode(&inode, req, NULL, itp);
- if (!rc && itp->d.lustre.it_lock_mode)
+ if (!rc && itp->it_lock_mode)
ll_set_lock_data(sbi->ll_md_exp, inode, itp, NULL);
out:
@@ -480,13 +476,12 @@ void ll_ioepoch_open(struct ll_inode_info *lli, __u64 ioepoch)
static int ll_och_fill(struct obd_export *md_exp, struct lookup_intent *it,
struct obd_client_handle *och)
{
- struct ptlrpc_request *req = it->d.lustre.it_data;
struct mdt_body *body;
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
+ body = req_capsule_server_get(&it->it_request->rq_pill, &RMF_MDT_BODY);
och->och_fh = body->handle;
och->och_fid = body->fid1;
- och->och_lease_handle.cookie = it->d.lustre.it_lock_handle;
+ och->och_lease_handle.cookie = it->it_lock_handle;
och->och_magic = OBD_CLIENT_HANDLE_MAGIC;
och->och_flags = it->it_flags;
@@ -504,7 +499,6 @@ static int ll_local_open(struct file *file, struct lookup_intent *it,
LASSERT(fd);
if (och) {
- struct ptlrpc_request *req = it->d.lustre.it_data;
struct mdt_body *body;
int rc;
@@ -512,13 +506,19 @@ static int ll_local_open(struct file *file, struct lookup_intent *it,
if (rc != 0)
return rc;
- body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
+ body = req_capsule_server_get(&it->it_request->rq_pill,
+ &RMF_MDT_BODY);
ll_ioepoch_open(lli, body->ioepoch);
}
LUSTRE_FPRIVATE(file) = fd;
ll_readahead_init(inode, &fd->fd_ras);
fd->fd_omode = it->it_flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC);
+
+ /* ll_cl_context initialize */
+ rwlock_init(&fd->fd_lock);
+ INIT_LIST_HEAD(&fd->fd_lccs);
+
return 0;
}
@@ -574,7 +574,7 @@ int ll_file_open(struct inode *inode, struct file *file)
return 0;
}
- if (!it || !it->d.lustre.it_disposition) {
+ if (!it || !it->it_disposition) {
/* Convert f_flags into access mode. We cannot use file->f_mode,
* because everything but O_ACCMODE mask was stripped from
* there
@@ -644,7 +644,7 @@ restart:
}
} else {
LASSERT(*och_usecount == 0);
- if (!it->d.lustre.it_disposition) {
+ if (!it->it_disposition) {
/* We cannot just request lock handle now, new ELC code
* means that one of other OPEN locks for this file
* could be cancelled, and since blocking ast handler
@@ -681,7 +681,7 @@ restart:
LASSERTF(it_disposition(it, DISP_ENQ_OPEN_REF),
"inode %p: disposition %x, status %d\n", inode,
- it_disposition(it, ~0), it->d.lustre.it_status);
+ it_disposition(it, ~0), it->it_status);
rc = ll_local_open(file, it, fd, *och_p);
if (rc)
@@ -724,7 +724,7 @@ out_openerr:
}
if (it && it_disposition(it, DISP_ENQ_OPEN_REF)) {
- ptlrpc_req_finished(it->d.lustre.it_data);
+ ptlrpc_req_finished(it->it_request);
it_clear_disposition(it, DISP_ENQ_OPEN_REF);
}
@@ -865,12 +865,12 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode,
/* already get lease, handle lease lock */
ll_set_lock_data(sbi->ll_md_exp, inode, &it, NULL);
- if (it.d.lustre.it_lock_mode == 0 ||
- it.d.lustre.it_lock_bits != MDS_INODELOCK_OPEN) {
+ if (it.it_lock_mode == 0 ||
+ it.it_lock_bits != MDS_INODELOCK_OPEN) {
/* open lock must return for lease */
CERROR(DFID "lease granted but no open lock, %d/%llu.\n",
- PFID(ll_inode2fid(inode)), it.d.lustre.it_lock_mode,
- it.d.lustre.it_lock_bits);
+ PFID(ll_inode2fid(inode)), it.it_lock_mode,
+ it.it_lock_bits);
rc = -EPROTO;
goto out_close;
}
@@ -880,10 +880,10 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode,
out_close:
/* Cancel open lock */
- if (it.d.lustre.it_lock_mode != 0) {
+ if (it.it_lock_mode != 0) {
ldlm_lock_decref_and_cancel(&och->och_lease_handle,
- it.d.lustre.it_lock_mode);
- it.d.lustre.it_lock_mode = 0;
+ it.it_lock_mode);
+ it.it_lock_mode = 0;
och->och_lease_handle.cookie = 0ULL;
}
rc2 = ll_close_inode_openhandle(sbi->ll_md_exp, inode, och, NULL);
@@ -1178,7 +1178,9 @@ restart:
CERROR("Unknown IO type - %u\n", vio->vui_io_subtype);
LBUG();
}
+ ll_cl_add(file, env, io);
result = cl_io_loop(env, io);
+ ll_cl_remove(file, env);
if (args->via_io_subtype == IO_NORMAL)
up_read(&lli->lli_trunc_sem);
if (write_mutex_locked)
@@ -1397,7 +1399,7 @@ int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
rc = ll_intent_file_open(dentry, lum, lum_size, &oit);
if (rc)
goto out_unlock;
- rc = oit.d.lustre.it_status;
+ rc = oit.it_status;
if (rc < 0)
goto out_req_free;
@@ -1410,7 +1412,7 @@ out_unlock:
out:
return rc;
out_req_free:
- ptlrpc_req_finished((struct ptlrpc_request *) oit.d.lustre.it_data);
+ ptlrpc_req_finished((struct ptlrpc_request *)oit.it_request);
goto out;
}
@@ -1698,7 +1700,7 @@ int ll_release_openhandle(struct inode *inode, struct lookup_intent *it)
out:
/* this one is in place of ll_file_open */
if (it_disposition(it, DISP_ENQ_OPEN_REF)) {
- ptlrpc_req_finished(it->d.lustre.it_data);
+ ptlrpc_req_finished(it->it_request);
it_clear_disposition(it, DISP_ENQ_OPEN_REF);
}
return rc;
@@ -2972,8 +2974,11 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
* here to preserve get_cwd functionality on 2.6.
* Bug 10503
*/
- if (!d_inode(dentry)->i_nlink)
+ if (!d_inode(dentry)->i_nlink) {
+ spin_lock(&inode->i_lock);
d_lustre_invalidate(dentry, 0);
+ spin_unlock(&inode->i_lock);
+ }
ll_lookup_finish_locks(&oit, inode);
} else if (!ll_have_md_lock(d_inode(dentry), &ibits, LCK_MINMODE)) {
@@ -3124,6 +3129,9 @@ struct posix_acl *ll_get_acl(struct inode *inode, int type)
spin_lock(&lli->lli_lock);
/* VFS' acl_permission_check->check_acl will release the refcount */
acl = posix_acl_dup(lli->lli_posix_acl);
+#ifdef CONFIG_FS_POSIX_ACL
+ forget_cached_acl(inode, type);
+#endif
spin_unlock(&lli->lli_lock);
return acl;
@@ -3150,9 +3158,6 @@ int ll_inode_permission(struct inode *inode, int mask)
CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), inode mode %x mask %o\n",
PFID(ll_inode2fid(inode)), inode, inode->i_mode, mask);
- if (ll_i2sbi(inode)->ll_flags & LL_SBI_RMT_CLIENT)
- return lustre_check_remote_perm(inode, mask);
-
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_INODE_PERM, 1);
rc = generic_permission(inode, mask);
@@ -3601,13 +3606,13 @@ again:
rc = md_enqueue(sbi->ll_md_exp, &einfo, &it, op_data, &lockh,
NULL, 0, NULL, 0);
- ptlrpc_req_finished(it.d.lustre.it_data);
- it.d.lustre.it_data = NULL;
+ ptlrpc_req_finished(it.it_request);
+ it.it_request = NULL;
ll_finish_md_op_data(op_data);
- mode = it.d.lustre.it_lock_mode;
- it.d.lustre.it_lock_mode = 0;
+ mode = it.it_lock_mode;
+ it.it_lock_mode = 0;
ll_intent_drop_lock(&it);
if (rc == 0) {
diff --git a/drivers/staging/lustre/lustre/llite/glimpse.c b/drivers/staging/lustre/lustre/llite/glimpse.c
index d8ea75424..92004a05f 100644
--- a/drivers/staging/lustre/lustre/llite/glimpse.c
+++ b/drivers/staging/lustre/lustre/llite/glimpse.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/llite/lcommon_cl.c b/drivers/staging/lustre/lustre/llite/lcommon_cl.c
index 6c00715b4..396e4e4f0 100644
--- a/drivers/staging/lustre/lustre/llite/lcommon_cl.c
+++ b/drivers/staging/lustre/lustre/llite/lcommon_cl.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/llite/lcommon_misc.c b/drivers/staging/lustre/lustre/llite/lcommon_misc.c
index 12f3e71f4..f6be105ee 100644
--- a/drivers/staging/lustre/lustre/llite/lcommon_misc.c
+++ b/drivers/staging/lustre/lustre/llite/lcommon_misc.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -100,7 +96,8 @@ int cl_ocd_update(struct obd_device *host,
__u64 flags;
int result;
- if (!strcmp(watched->obd_type->typ_name, LUSTRE_OSC_NAME)) {
+ if (!strcmp(watched->obd_type->typ_name, LUSTRE_OSC_NAME) &&
+ watched->obd_set_up && !watched->obd_stopping) {
cli = &watched->u.cli;
lco = owner;
flags = cli->cl_import->imp_connect_data.ocd_connect_flags;
@@ -115,9 +112,10 @@ int cl_ocd_update(struct obd_device *host,
mutex_unlock(&lco->lco_lock);
result = 0;
} else {
- CERROR("unexpected notification from %s %s!\n",
+ CERROR("unexpected notification from %s %s (setup:%d,stopping:%d)!\n",
watched->obd_type->typ_name,
- watched->obd_name);
+ watched->obd_name, watched->obd_set_up,
+ watched->obd_stopping);
result = -EINVAL;
}
return result;
diff --git a/drivers/staging/lustre/lustre/llite/llite_close.c b/drivers/staging/lustre/lustre/llite/llite_close.c
index 2df551d3a..2326b40a0 100644
--- a/drivers/staging/lustre/lustre/llite/llite_close.c
+++ b/drivers/staging/lustre/lustre/llite/llite_close.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index 3f2f30b65..4d6d589a1 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -68,6 +64,7 @@ struct ll_dentry_data {
struct lookup_intent *lld_it;
unsigned int lld_sa_generation;
unsigned int lld_invalid:1;
+ unsigned int lld_nfs_dentry:1;
struct rcu_head lld_rcu_head;
};
@@ -76,9 +73,6 @@ struct ll_dentry_data {
#define LLI_INODE_MAGIC 0x111d0de5
#define LLI_INODE_DEAD 0xdeadd00d
-/* remote client permission cache */
-#define REMOTE_PERM_HASHSIZE 16
-
struct ll_getname_data {
struct dir_context ctx;
char *lgd_name; /* points to a buffer with NAME_MAX+1 size */
@@ -86,19 +80,6 @@ struct ll_getname_data {
int lgd_found; /* inode matched? */
};
-/* llite setxid/access permission for user on remote client */
-struct ll_remote_perm {
- struct hlist_node lrp_list;
- uid_t lrp_uid;
- gid_t lrp_gid;
- uid_t lrp_fsuid;
- gid_t lrp_fsgid;
- int lrp_access_perm; /* MAY_READ/WRITE/EXEC, this
- * is access permission with
- * lrp_fsuid/lrp_fsgid.
- */
-};
-
struct ll_grouplock {
struct lu_env *lg_env;
struct cl_io *lg_io;
@@ -133,9 +114,6 @@ struct ll_inode_info {
spinlock_t lli_lock;
struct posix_acl *lli_posix_acl;
- struct hlist_head *lli_remote_perms;
- struct mutex lli_rmtperm_mutex;
-
/* identifying fields for both metadata and data stacks. */
struct lu_fid lli_fid;
/* Parent fid for accessing default stripe data on parent directory
@@ -145,8 +123,6 @@ struct ll_inode_info {
struct list_head lli_close_list;
- unsigned long lli_rmtperm_time;
-
/* handle is to be sent to MDS later on done_writing and setattr.
* Open handle data are needed for the recovery to reconstruct
* the inode state on the MDS. XXX: recovery is not ready yet.
@@ -411,7 +387,7 @@ enum stats_track_type {
#define LL_SBI_FLOCK 0x04
#define LL_SBI_USER_XATTR 0x08 /* support user xattr */
#define LL_SBI_ACL 0x10 /* support ACL */
-#define LL_SBI_RMT_CLIENT 0x40 /* remote client */
+/* LL_SBI_RMT_CLIENT 0x40 remote client */
#define LL_SBI_MDS_CAPA 0x80 /* support mds capa, obsolete */
#define LL_SBI_OSS_CAPA 0x100 /* support oss capa, obsolete */
#define LL_SBI_LOCALFLOCK 0x200 /* Local flocks support by kernel */
@@ -433,7 +409,7 @@ enum stats_track_type {
"xattr", \
"acl", \
"???", \
- "rmt_client", \
+ "???", \
"mds_capa", \
"oss_capa", \
"flock", \
@@ -449,26 +425,6 @@ enum stats_track_type {
"xattr", \
}
-#define RCE_HASHES 32
-
-struct rmtacl_ctl_entry {
- struct list_head rce_list;
- pid_t rce_key; /* hash key */
- int rce_ops; /* acl operation type */
-};
-
-struct rmtacl_ctl_table {
- spinlock_t rct_lock;
- struct list_head rct_entries[RCE_HASHES];
-};
-
-#define EE_HASHES 32
-
-struct eacl_table {
- spinlock_t et_lock;
- struct list_head et_entries[EE_HASHES];
-};
-
struct ll_sb_info {
/* this protects pglist and ra_info. It isn't safe to
* grab from interrupt contexts
@@ -497,7 +453,7 @@ struct ll_sb_info {
* any page which is sent to a server as part of a bulk request,
* but is uncommitted to stable storage.
*/
- struct cl_client_cache ll_cache;
+ struct cl_client_cache *ll_cache;
struct lprocfs_stats *ll_ra_stats;
@@ -533,8 +489,6 @@ struct ll_sb_info {
dev_t ll_sdev_orig; /* save s_dev before assign for
* clustered nfs
*/
- struct rmtacl_ctl_table ll_rct;
- struct eacl_table ll_et;
__kernel_fsid_t ll_fsid;
struct kobject ll_kobj; /* sysfs object */
struct super_block *ll_sb; /* struct super_block (for sysfs code)*/
@@ -640,6 +594,8 @@ struct ll_file_data {
* false: unknown failure, should report.
*/
bool fd_write_failed;
+ rwlock_t fd_lock; /* protect lcc list */
+ struct list_head fd_lccs; /* list of ll_cl_context */
};
struct lov_stripe_md;
@@ -715,8 +671,9 @@ void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras);
int ll_readahead(const struct lu_env *env, struct cl_io *io,
struct cl_page_list *queue, struct ll_readahead_state *ras,
bool hit);
-struct ll_cl_context *ll_cl_init(struct file *file, struct page *vmpage);
-void ll_cl_fini(struct ll_cl_context *lcc);
+struct ll_cl_context *ll_cl_find(struct file *file);
+void ll_cl_add(struct file *file, const struct lu_env *env, struct cl_io *io);
+void ll_cl_remove(struct file *file, const struct lu_env *env);
extern const struct address_space_operations ll_aops;
@@ -858,11 +815,11 @@ struct vvp_io_args {
};
struct ll_cl_context {
+ struct list_head lcc_list;
void *lcc_cookie;
+ const struct lu_env *lcc_env;
struct cl_io *lcc_io;
struct cl_page *lcc_page;
- struct lu_env *lcc_env;
- int lcc_refcheck;
};
struct ll_thread_info {
@@ -983,14 +940,6 @@ ssize_t ll_getxattr(struct dentry *dentry, struct inode *inode,
ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size);
int ll_removexattr(struct dentry *dentry, const char *name);
-/* llite/remote_perm.c */
-extern struct kmem_cache *ll_remote_perm_cachep;
-extern struct kmem_cache *ll_rmtperm_hash_cachep;
-
-void free_rmtperm_hash(struct hlist_head *hash);
-int ll_update_remote_perm(struct inode *inode, struct mdt_remote_perm *perm);
-int lustre_check_remote_perm(struct inode *inode, int mask);
-
/**
* Common IO arguments for various VFS I/O interfaces.
*/
@@ -1004,40 +953,7 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len);
void ll_ra_stats_inc(struct inode *inode, enum ra_stat which);
-/* llite/llite_rmtacl.c */
-#ifdef CONFIG_FS_POSIX_ACL
-struct eacl_entry {
- struct list_head ee_list;
- pid_t ee_key; /* hash key */
- struct lu_fid ee_fid;
- int ee_type; /* ACL type for ACCESS or DEFAULT */
- ext_acl_xattr_header *ee_acl;
-};
-
-u64 rce_ops2valid(int ops);
-struct rmtacl_ctl_entry *rct_search(struct rmtacl_ctl_table *rct, pid_t key);
-int rct_add(struct rmtacl_ctl_table *rct, pid_t key, int ops);
-int rct_del(struct rmtacl_ctl_table *rct, pid_t key);
-void rct_init(struct rmtacl_ctl_table *rct);
-void rct_fini(struct rmtacl_ctl_table *rct);
-
-void ee_free(struct eacl_entry *ee);
-int ee_add(struct eacl_table *et, pid_t key, struct lu_fid *fid, int type,
- ext_acl_xattr_header *header);
-struct eacl_entry *et_search_del(struct eacl_table *et, pid_t key,
- struct lu_fid *fid, int type);
-void et_search_free(struct eacl_table *et, pid_t key);
-void et_init(struct eacl_table *et);
-void et_fini(struct eacl_table *et);
-#else
-static inline u64 rce_ops2valid(int ops)
-{
- return 0;
-}
-#endif
-
/* statahead.c */
-
#define LL_SA_RPC_MIN 2
#define LL_SA_RPC_DEF 32
#define LL_SA_RPC_MAX 8192
@@ -1281,7 +1197,7 @@ static inline int ll_file_nolock(const struct file *file)
static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode,
struct lookup_intent *it, __u64 *bits)
{
- if (!it->d.lustre.it_lock_set) {
+ if (!it->it_lock_set) {
struct lustre_handle handle;
/* If this inode is a remote object, it will get two
@@ -1292,36 +1208,26 @@ static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode,
* LOOKUP and PERM locks, so revoking either locks will
* case the dcache being cleared
*/
- if (it->d.lustre.it_remote_lock_mode) {
- handle.cookie = it->d.lustre.it_remote_lock_handle;
+ if (it->it_remote_lock_mode) {
+ handle.cookie = it->it_remote_lock_handle;
CDEBUG(D_DLMTRACE, "setting l_data to inode "DFID"%p for remote lock %#llx\n",
PFID(ll_inode2fid(inode)), inode,
handle.cookie);
md_set_lock_data(exp, &handle.cookie, inode, NULL);
}
- handle.cookie = it->d.lustre.it_lock_handle;
+ handle.cookie = it->it_lock_handle;
CDEBUG(D_DLMTRACE, "setting l_data to inode "DFID"%p for lock %#llx\n",
PFID(ll_inode2fid(inode)), inode, handle.cookie);
md_set_lock_data(exp, &handle.cookie, inode,
- &it->d.lustre.it_lock_bits);
- it->d.lustre.it_lock_set = 1;
+ &it->it_lock_bits);
+ it->it_lock_set = 1;
}
if (bits)
- *bits = it->d.lustre.it_lock_bits;
-}
-
-static inline void ll_lock_dcache(struct inode *inode)
-{
- spin_lock(&inode->i_lock);
-}
-
-static inline void ll_unlock_dcache(struct inode *inode)
-{
- spin_unlock(&inode->i_lock);
+ *bits = it->it_lock_bits;
}
static inline int d_lustre_invalid(const struct dentry *dentry)
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index 96c7e9fc6..546063e72 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -87,15 +83,11 @@ static struct ll_sb_info *ll_init_sbi(struct super_block *sb)
pages = si.totalram - si.totalhigh;
lru_page_max = pages / 2;
- /* initialize ll_cache data */
- atomic_set(&sbi->ll_cache.ccc_users, 0);
- sbi->ll_cache.ccc_lru_max = lru_page_max;
- atomic_set(&sbi->ll_cache.ccc_lru_left, lru_page_max);
- spin_lock_init(&sbi->ll_cache.ccc_lru_lock);
- INIT_LIST_HEAD(&sbi->ll_cache.ccc_lru);
-
- atomic_set(&sbi->ll_cache.ccc_unstable_nr, 0);
- init_waitqueue_head(&sbi->ll_cache.ccc_unstable_waitq);
+ sbi->ll_cache = cl_cache_init(lru_page_max);
+ if (!sbi->ll_cache) {
+ kfree(sbi);
+ return NULL;
+ }
sbi->ll_ra_info.ra_max_pages_per_file = min(pages / 32,
SBI_DEFAULT_READAHEAD_MAX);
@@ -135,6 +127,11 @@ static void ll_free_sbi(struct super_block *sb)
{
struct ll_sb_info *sbi = ll_s2sbi(sb);
+ if (sbi->ll_cache) {
+ cl_cache_decref(sbi->ll_cache);
+ sbi->ll_cache = NULL;
+ }
+
kfree(sbi);
}
@@ -175,8 +172,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
OBD_CONNECT_VERSION | OBD_CONNECT_BRW_SIZE |
OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
OBD_CONNECT_AT | OBD_CONNECT_LOV_V3 |
- OBD_CONNECT_RMT_CLIENT | OBD_CONNECT_VBR |
- OBD_CONNECT_FULL20 | OBD_CONNECT_64BITHASH|
+ OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
+ OBD_CONNECT_64BITHASH |
OBD_CONNECT_EINPROGRESS |
OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
OBD_CONNECT_LAYOUTLOCK |
@@ -217,8 +214,6 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
/* real client */
data->ocd_connect_flags |= OBD_CONNECT_REAL;
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
- data->ocd_connect_flags |= OBD_CONNECT_RMT_CLIENT_FORCE;
data->ocd_brw_size = MD_MAX_BRW_SIZE;
@@ -311,18 +306,6 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
sbi->ll_flags &= ~LL_SBI_ACL;
}
- if (data->ocd_connect_flags & OBD_CONNECT_RMT_CLIENT) {
- if (!(sbi->ll_flags & LL_SBI_RMT_CLIENT)) {
- sbi->ll_flags |= LL_SBI_RMT_CLIENT;
- LCONSOLE_INFO("client is set as remote by default.\n");
- }
- } else {
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
- sbi->ll_flags &= ~LL_SBI_RMT_CLIENT;
- LCONSOLE_INFO("client claims to be remote, but server rejected, forced to be local.\n");
- }
- }
-
if (data->ocd_connect_flags & OBD_CONNECT_64BITHASH)
sbi->ll_flags |= LL_SBI_64BIT_HASH;
@@ -356,10 +339,9 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
OBD_CONNECT_REQPORTAL | OBD_CONNECT_BRW_SIZE |
OBD_CONNECT_CANCELSET | OBD_CONNECT_FID |
OBD_CONNECT_SRVLOCK | OBD_CONNECT_TRUNCLOCK|
- OBD_CONNECT_AT | OBD_CONNECT_RMT_CLIENT |
- OBD_CONNECT_OSS_CAPA | OBD_CONNECT_VBR|
- OBD_CONNECT_FULL20 | OBD_CONNECT_64BITHASH |
- OBD_CONNECT_MAXBYTES |
+ OBD_CONNECT_AT | OBD_CONNECT_OSS_CAPA |
+ OBD_CONNECT_VBR | OBD_CONNECT_FULL20 |
+ OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES |
OBD_CONNECT_EINPROGRESS |
OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_PINGLESS;
@@ -382,8 +364,6 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
}
data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
- data->ocd_connect_flags |= OBD_CONNECT_RMT_CLIENT_FORCE;
CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d ocd_grant: %d\n",
data->ocd_connect_flags,
@@ -446,9 +426,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
* XXX: move this to after cbd setup?
*/
valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS | OBD_MD_FLMODEASIZE;
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
- valid |= OBD_MD_FLRMTPERM;
- else if (sbi->ll_flags & LL_SBI_ACL)
+ if (sbi->ll_flags & LL_SBI_ACL)
valid |= OBD_MD_FLACL;
op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
@@ -504,13 +482,6 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
goto out_root;
}
-#ifdef CONFIG_FS_POSIX_ACL
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
- rct_init(&sbi->ll_rct);
- et_init(&sbi->ll_et);
- }
-#endif
-
checksum = sbi->ll_flags & LL_SBI_CHECKSUM;
err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
KEY_CHECKSUM, sizeof(checksum), &checksum,
@@ -518,8 +489,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
cl_sb_init(sb);
err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CACHE_SET),
- KEY_CACHE_SET, sizeof(sbi->ll_cache),
- &sbi->ll_cache, NULL);
+ KEY_CACHE_SET, sizeof(*sbi->ll_cache),
+ sbi->ll_cache, NULL);
sb->s_root = d_make_root(root);
if (!sb->s_root) {
@@ -564,8 +535,6 @@ out_lock_cn_cb:
out_dt:
obd_disconnect(sbi->ll_dt_exp);
sbi->ll_dt_exp = NULL;
- /* Make sure all OScs are gone, since cl_cache is accessing sbi. */
- obd_zombie_barrier();
out_md_fid:
obd_fid_fini(sbi->ll_md_exp->exp_obd);
out_md:
@@ -608,13 +577,6 @@ static void client_common_put_super(struct super_block *sb)
{
struct ll_sb_info *sbi = ll_s2sbi(sb);
-#ifdef CONFIG_FS_POSIX_ACL
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
- et_fini(&sbi->ll_et);
- rct_fini(&sbi->ll_rct);
- }
-#endif
-
ll_close_thread_shutdown(sbi->ll_lcq);
cl_sb_fini(sb);
@@ -622,10 +584,6 @@ static void client_common_put_super(struct super_block *sb)
obd_fid_fini(sbi->ll_dt_exp->exp_obd);
obd_disconnect(sbi->ll_dt_exp);
sbi->ll_dt_exp = NULL;
- /* wait till all OSCs are gone, since cl_cache is accessing sbi.
- * see LU-2543.
- */
- obd_zombie_barrier();
ldebugfs_unregister_mountpoint(sbi);
@@ -704,11 +662,6 @@ static int ll_options(char *options, int *flags)
*flags &= ~tmp;
goto next;
}
- tmp = ll_set_opt("remote_client", s1, LL_SBI_RMT_CLIENT);
- if (tmp) {
- *flags |= tmp;
- goto next;
- }
tmp = ll_set_opt("user_fid2path", s1, LL_SBI_USER_FID2PATH);
if (tmp) {
*flags |= tmp;
@@ -792,12 +745,9 @@ void ll_lli_init(struct ll_inode_info *lli)
lli->lli_maxbytes = MAX_LFS_FILESIZE;
spin_lock_init(&lli->lli_lock);
lli->lli_posix_acl = NULL;
- lli->lli_remote_perms = NULL;
- mutex_init(&lli->lli_rmtperm_mutex);
/* Do not set lli_fid, it has been initialized already. */
fid_zero(&lli->lli_pfid);
INIT_LIST_HEAD(&lli->lli_close_list);
- lli->lli_rmtperm_time = 0;
lli->lli_pending_och = NULL;
lli->lli_mds_read_och = NULL;
lli->lli_mds_write_och = NULL;
@@ -864,7 +814,8 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt)
try_module_get(THIS_MODULE);
/* client additional sb info */
- lsi->lsi_llsbi = sbi = ll_init_sbi(sb);
+ sbi = ll_init_sbi(sb);
+ lsi->lsi_llsbi = sbi;
if (!sbi) {
module_put(THIS_MODULE);
kfree(cfg);
@@ -965,12 +916,12 @@ void ll_put_super(struct super_block *sb)
if (!force) {
struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
- rc = l_wait_event(sbi->ll_cache.ccc_unstable_waitq,
- !atomic_read(&sbi->ll_cache.ccc_unstable_nr),
+ rc = l_wait_event(sbi->ll_cache->ccc_unstable_waitq,
+ !atomic_read(&sbi->ll_cache->ccc_unstable_nr),
&lwi);
}
- ccc_count = atomic_read(&sbi->ll_cache.ccc_unstable_nr);
+ ccc_count = atomic_read(&sbi->ll_cache->ccc_unstable_nr);
if (!force && rc != -EINTR)
LASSERTF(!ccc_count, "count: %i\n", ccc_count);
@@ -1078,17 +1029,9 @@ void ll_clear_inode(struct inode *inode)
ll_xattr_cache_destroy(inode);
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
- LASSERT(!lli->lli_posix_acl);
- if (lli->lli_remote_perms) {
- free_rmtperm_hash(lli->lli_remote_perms);
- lli->lli_remote_perms = NULL;
- }
- }
#ifdef CONFIG_FS_POSIX_ACL
- else if (lli->lli_posix_acl) {
+ if (lli->lli_posix_acl) {
LASSERT(atomic_read(&lli->lli_posix_acl->a_refcount) == 1);
- LASSERT(!lli->lli_remote_perms);
posix_acl_release(lli->lli_posix_acl);
lli->lli_posix_acl = NULL;
}
@@ -1540,12 +1483,8 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md)
lli->lli_maxbytes = MAX_LFS_FILESIZE;
}
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
- if (body->valid & OBD_MD_FLRMTPERM)
- ll_update_remote_perm(inode, md->remote_perm);
- }
#ifdef CONFIG_FS_POSIX_ACL
- else if (body->valid & OBD_MD_FLACL) {
+ if (body->valid & OBD_MD_FLACL) {
spin_lock(&lli->lli_lock);
if (lli->lli_posix_acl)
posix_acl_release(lli->lli_posix_acl);
@@ -1979,7 +1918,13 @@ int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
* At this point server returns to client's same fid as client
* generated for creating. So using ->fid1 is okay here.
*/
- LASSERT(fid_is_sane(&md.body->fid1));
+ if (!fid_is_sane(&md.body->fid1)) {
+ CERROR("%s: Fid is insane " DFID "\n",
+ ll_get_fsname(sb, NULL, 0),
+ PFID(&md.body->fid1));
+ rc = -EINVAL;
+ goto out;
+ }
*inode = ll_iget(sb, cl_fid_build_ino(&md.body->fid1,
sbi->ll_flags & LL_SBI_32BIT_API),
@@ -2006,11 +1951,11 @@ int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
* 3. proc2: refresh layout and layout lock granted
* 4. proc1: to apply a stale layout
*/
- if (it && it->d.lustre.it_lock_mode != 0) {
+ if (it && it->it_lock_mode != 0) {
struct lustre_handle lockh;
struct ldlm_lock *lock;
- lockh.cookie = it->d.lustre.it_lock_handle;
+ lockh.cookie = it->it_lock_handle;
lock = ldlm_handle2lock(&lockh);
LASSERT(lock);
if (ldlm_has_layout(lock)) {
diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c
index 88ef1cac9..66ee5db5f 100644
--- a/drivers/staging/lustre/lustre/llite/llite_mmap.c
+++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -200,18 +196,11 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
- /* we grab lli_trunc_sem to exclude truncate case.
- * Otherwise, we could add dirty pages into osc cache
- * while truncate is on-going.
- */
inode = vvp_object_inode(io->ci_obj);
lli = ll_i2info(inode);
- down_read(&lli->lli_trunc_sem);
result = cl_io_loop(env, io);
- up_read(&lli->lli_trunc_sem);
-
cfs_restore_sigs(set);
if (result == 0) {
@@ -315,8 +304,13 @@ static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
vio->u.fault.ft_flags = 0;
vio->u.fault.ft_flags_valid = false;
+ /* May call ll_readpage() */
+ ll_cl_add(vma->vm_file, env, io);
+
result = cl_io_loop(env, io);
+ ll_cl_remove(vma->vm_file, env);
+
/* ft_flags are only valid if we reached
* the call to filemap_fault
*/
diff --git a/drivers/staging/lustre/lustre/llite/llite_nfs.c b/drivers/staging/lustre/lustre/llite/llite_nfs.c
index c1eef6198..65972c892 100644
--- a/drivers/staging/lustre/lustre/llite/llite_nfs.c
+++ b/drivers/staging/lustre/lustre/llite/llite_nfs.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -172,6 +168,24 @@ ll_iget_for_nfs(struct super_block *sb, struct lu_fid *fid, struct lu_fid *paren
/* N.B. d_obtain_alias() drops inode ref on error */
result = d_obtain_alias(inode);
+ if (!IS_ERR(result)) {
+ int rc;
+
+ rc = ll_d_init(result);
+ if (rc < 0) {
+ dput(result);
+ result = ERR_PTR(rc);
+ } else {
+ struct ll_dentry_data *ldd = ll_d2d(result);
+
+ /*
+ * Need to signal to the ll_intent_file_open that
+ * we came from NFS and so opencache needs to be
+ * enabled for this one
+ */
+ ldd->lld_nfs_dentry = 1;
+ }
+ }
return result;
}
diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c
index 55d62eb11..e86bf3c53 100644
--- a/drivers/staging/lustre/lustre/llite/lproc_llite.c
+++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -180,11 +176,7 @@ LUSTRE_RO_ATTR(filesfree);
static ssize_t client_type_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
-
- return sprintf(buf, "%s client\n",
- sbi->ll_flags & LL_SBI_RMT_CLIENT ? "remote" : "local");
+ return sprintf(buf, "local client\n");
}
LUSTRE_RO_ATTR(client_type);
@@ -364,7 +356,7 @@ static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v)
{
struct super_block *sb = m->private;
struct ll_sb_info *sbi = ll_s2sbi(sb);
- struct cl_client_cache *cache = &sbi->ll_cache;
+ struct cl_client_cache *cache = sbi->ll_cache;
int shift = 20 - PAGE_SHIFT;
int max_cached_mb;
int unused_mb;
@@ -391,7 +383,7 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
{
struct super_block *sb = ((struct seq_file *)file->private_data)->private;
struct ll_sb_info *sbi = ll_s2sbi(sb);
- struct cl_client_cache *cache = &sbi->ll_cache;
+ struct cl_client_cache *cache = sbi->ll_cache;
struct lu_env *env;
int refcheck;
int mult, rc, pages_number;
@@ -830,7 +822,7 @@ static ssize_t unstable_stats_show(struct kobject *kobj,
{
struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
ll_kobj);
- struct cl_client_cache *cache = &sbi->ll_cache;
+ struct cl_client_cache *cache = sbi->ll_cache;
int pages, mb;
pages = atomic_read(&cache->ccc_unstable_nr);
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index 86e40ce39..2c4dc6973 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -144,7 +140,7 @@ static void ll_invalidate_negative_children(struct inode *dir)
{
struct dentry *dentry, *tmp_subdir;
- ll_lock_dcache(dir);
+ spin_lock(&dir->i_lock);
hlist_for_each_entry(dentry, &dir->i_dentry, d_u.d_alias) {
spin_lock(&dentry->d_lock);
if (!list_empty(&dentry->d_subdirs)) {
@@ -159,7 +155,7 @@ static void ll_invalidate_negative_children(struct inode *dir)
}
spin_unlock(&dentry->d_lock);
}
- ll_unlock_dcache(dir);
+ spin_unlock(&dir->i_lock);
}
int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
@@ -318,9 +314,10 @@ static struct dentry *ll_find_alias(struct inode *inode, struct dentry *dentry)
if (hlist_empty(&inode->i_dentry))
return NULL;
- discon_alias = invalid_alias = NULL;
+ discon_alias = NULL;
+ invalid_alias = NULL;
- ll_lock_dcache(inode);
+ spin_lock(&inode->i_lock);
hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
LASSERT(alias != dentry);
@@ -345,7 +342,7 @@ static struct dentry *ll_find_alias(struct inode *inode, struct dentry *dentry)
dget_dlock(alias);
spin_unlock(&alias->d_lock);
}
- ll_unlock_dcache(inode);
+ spin_unlock(&inode->i_lock);
return alias;
}
@@ -397,7 +394,7 @@ static int ll_lookup_it_finish(struct ptlrpc_request *request,
* when I return
*/
CDEBUG(D_DENTRY, "it %p it_disposition %x\n", it,
- it->d.lustre.it_disposition);
+ it->it_disposition);
if (!it_disposition(it, DISP_LOOKUP_NEG)) {
rc = ll_prep_inode(&inode, request, (*de)->d_sb, it);
if (rc)
@@ -435,7 +432,7 @@ static int ll_lookup_it_finish(struct ptlrpc_request *request,
/* Check that parent has UPDATE lock. */
struct lookup_intent parent_it = {
.it_op = IT_GETATTR,
- .d.lustre.it_lock_handle = 0 };
+ .it_lock_handle = 0 };
if (md_revalidate_lock(ll_i2mdexp(parent), &parent_it,
&ll_i2info(parent)->lli_fid, NULL)) {
@@ -630,13 +627,10 @@ static int ll_atomic_open(struct inode *dir, struct dentry *dentry,
if (d_really_is_positive(dentry) && it_disposition(it, DISP_OPEN_OPEN)) {
/* Open dentry. */
if (S_ISFIFO(d_inode(dentry)->i_mode)) {
- /* We cannot call open here as it would
- * deadlock.
+ /* We cannot call open here as it might
+ * deadlock. This case is unreachable in
+ * practice because of OBD_CONNECT_NODEVOH.
*/
- if (it_disposition(it, DISP_ENQ_OPEN_REF))
- ptlrpc_req_finished(
- (struct ptlrpc_request *)
- it->d.lustre.it_data);
rc = finish_no_open(file, de);
} else {
file->private_data = it;
@@ -667,10 +661,10 @@ static struct inode *ll_create_node(struct inode *dir, struct lookup_intent *it)
struct ll_sb_info *sbi = ll_i2sbi(dir);
int rc;
- LASSERT(it && it->d.lustre.it_disposition);
+ LASSERT(it && it->it_disposition);
LASSERT(it_disposition(it, DISP_ENQ_CREATE_REF));
- request = it->d.lustre.it_data;
+ request = it->it_request;
it_clear_disposition(it, DISP_ENQ_CREATE_REF);
rc = ll_prep_inode(&inode, request, dir->i_sb, it);
if (rc) {
diff --git a/drivers/staging/lustre/lustre/llite/rw.c b/drivers/staging/lustre/lustre/llite/rw.c
index 336397773..87393c4bd 100644
--- a/drivers/staging/lustre/lustre/llite/rw.c
+++ b/drivers/staging/lustre/lustre/llite/rw.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -59,84 +55,6 @@
#include "llite_internal.h"
#include "../include/linux/lustre_compat25.h"
-/**
- * Finalizes cl-data before exiting typical address_space operation. Dual to
- * ll_cl_init().
- */
-void ll_cl_fini(struct ll_cl_context *lcc)
-{
- struct lu_env *env = lcc->lcc_env;
- struct cl_io *io = lcc->lcc_io;
- struct cl_page *page = lcc->lcc_page;
-
- LASSERT(lcc->lcc_cookie == current);
- LASSERT(env);
-
- if (page) {
- lu_ref_del(&page->cp_reference, "cl_io", io);
- cl_page_put(env, page);
- }
-
- cl_env_put(env, &lcc->lcc_refcheck);
-}
-
-/**
- * Initializes common cl-data at the typical address_space operation entry
- * point.
- */
-struct ll_cl_context *ll_cl_init(struct file *file, struct page *vmpage)
-{
- struct ll_cl_context *lcc;
- struct lu_env *env;
- struct cl_io *io;
- struct cl_object *clob;
- struct vvp_io *vio;
-
- int refcheck;
- int result = 0;
-
- clob = ll_i2info(file_inode(file))->lli_clob;
- LASSERT(clob);
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return ERR_CAST(env);
-
- lcc = &ll_env_info(env)->lti_io_ctx;
- memset(lcc, 0, sizeof(*lcc));
- lcc->lcc_env = env;
- lcc->lcc_refcheck = refcheck;
- lcc->lcc_cookie = current;
-
- vio = vvp_env_io(env);
- io = vio->vui_cl.cis_io;
- lcc->lcc_io = io;
- if (!io)
- result = -EIO;
-
- if (result == 0 && vmpage) {
- struct cl_page *page;
-
- LASSERT(io->ci_state == CIS_IO_GOING);
- LASSERT(vio->vui_fd == LUSTRE_FPRIVATE(file));
- page = cl_page_find(env, clob, vmpage->index, vmpage,
- CPT_CACHEABLE);
- if (!IS_ERR(page)) {
- lcc->lcc_page = page;
- lu_ref_add(&page->cp_reference, "cl_io", io);
- result = 0;
- } else {
- result = PTR_ERR(page);
- }
- }
- if (result) {
- ll_cl_fini(lcc);
- lcc = ERR_PTR(result);
- }
-
- return lcc;
-}
-
static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which);
/**
@@ -1112,17 +1030,70 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc)
return result;
}
+struct ll_cl_context *ll_cl_find(struct file *file)
+{
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_cl_context *lcc;
+ struct ll_cl_context *found = NULL;
+
+ read_lock(&fd->fd_lock);
+ list_for_each_entry(lcc, &fd->fd_lccs, lcc_list) {
+ if (lcc->lcc_cookie == current) {
+ found = lcc;
+ break;
+ }
+ }
+ read_unlock(&fd->fd_lock);
+
+ return found;
+}
+
+void ll_cl_add(struct file *file, const struct lu_env *env, struct cl_io *io)
+{
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_cl_context *lcc = &ll_env_info(env)->lti_io_ctx;
+
+ memset(lcc, 0, sizeof(*lcc));
+ INIT_LIST_HEAD(&lcc->lcc_list);
+ lcc->lcc_cookie = current;
+ lcc->lcc_env = env;
+ lcc->lcc_io = io;
+
+ write_lock(&fd->fd_lock);
+ list_add(&lcc->lcc_list, &fd->fd_lccs);
+ write_unlock(&fd->fd_lock);
+}
+
+void ll_cl_remove(struct file *file, const struct lu_env *env)
+{
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ll_cl_context *lcc = &ll_env_info(env)->lti_io_ctx;
+
+ write_lock(&fd->fd_lock);
+ list_del_init(&lcc->lcc_list);
+ write_unlock(&fd->fd_lock);
+}
+
int ll_readpage(struct file *file, struct page *vmpage)
{
+ struct cl_object *clob = ll_i2info(file_inode(file))->lli_clob;
struct ll_cl_context *lcc;
+ const struct lu_env *env;
+ struct cl_io *io;
+ struct cl_page *page;
int result;
- lcc = ll_cl_init(file, vmpage);
- if (!IS_ERR(lcc)) {
- struct lu_env *env = lcc->lcc_env;
- struct cl_io *io = lcc->lcc_io;
- struct cl_page *page = lcc->lcc_page;
+ lcc = ll_cl_find(file);
+ if (!lcc) {
+ unlock_page(vmpage);
+ return -EIO;
+ }
+ env = lcc->lcc_env;
+ io = lcc->lcc_io;
+ LASSERT(io->ci_state == CIS_IO_GOING);
+ page = cl_page_find(env, clob, vmpage->index, vmpage, CPT_CACHEABLE);
+ if (!IS_ERR(page)) {
LASSERT(page->cp_type == CPT_CACHEABLE);
if (likely(!PageUptodate(vmpage))) {
cl_page_assume(env, io, page);
@@ -1132,10 +1103,10 @@ int ll_readpage(struct file *file, struct page *vmpage)
unlock_page(vmpage);
result = 0;
}
- ll_cl_fini(lcc);
+ cl_page_put(env, page);
} else {
unlock_page(vmpage);
- result = PTR_ERR(lcc);
+ result = PTR_ERR(page);
}
return result;
}
diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c
index c12a048fc..d98c7acc0 100644
--- a/drivers/staging/lustre/lustre/llite/rw26.c
+++ b/drivers/staging/lustre/lustre/llite/rw26.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -489,7 +485,7 @@ static int ll_write_begin(struct file *file, struct address_space *mapping,
struct page **pagep, void **fsdata)
{
struct ll_cl_context *lcc;
- struct lu_env *env;
+ const struct lu_env *env;
struct cl_io *io;
struct cl_page *page;
struct cl_object *clob = ll_i2info(mapping->host)->lli_clob;
@@ -501,9 +497,9 @@ static int ll_write_begin(struct file *file, struct address_space *mapping,
CDEBUG(D_VFSTRACE, "Writing %lu of %d to %d bytes\n", index, from, len);
- lcc = ll_cl_init(file, NULL);
- if (IS_ERR(lcc)) {
- result = PTR_ERR(lcc);
+ lcc = ll_cl_find(file);
+ if (!lcc) {
+ result = -EIO;
goto out;
}
@@ -579,8 +575,6 @@ out:
unlock_page(vmpage);
put_page(vmpage);
}
- if (!IS_ERR(lcc))
- ll_cl_fini(lcc);
} else {
*pagep = vmpage;
*fsdata = lcc;
@@ -593,7 +587,7 @@ static int ll_write_end(struct file *file, struct address_space *mapping,
struct page *vmpage, void *fsdata)
{
struct ll_cl_context *lcc = fsdata;
- struct lu_env *env;
+ const struct lu_env *env;
struct cl_io *io;
struct vvp_io *vio;
struct cl_page *page;
@@ -631,6 +625,10 @@ static int ll_write_end(struct file *file, struct address_space *mapping,
} else {
cl_page_disown(env, io, page);
+ lcc->lcc_page = NULL;
+ lu_ref_del(&page->cp_reference, "cl_io", io);
+ cl_page_put(env, page);
+
/* page list is not contiguous now, commit it now */
unplug = true;
}
@@ -639,7 +637,6 @@ static int ll_write_end(struct file *file, struct address_space *mapping,
file->f_flags & O_SYNC || IS_SYNC(file_inode(file)))
result = vvp_io_write_commit(env, io);
- ll_cl_fini(lcc);
return result >= 0 ? copied : result;
}
diff --git a/drivers/staging/lustre/lustre/llite/statahead.c b/drivers/staging/lustre/lustre/llite/statahead.c
index 6322f8866..c1cb6b19e 100644
--- a/drivers/staging/lustre/lustre/llite/statahead.c
+++ b/drivers/staging/lustre/lustre/llite/statahead.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -174,7 +170,8 @@ static inline int is_omitted_entry(struct ll_statahead_info *sai, __u64 index)
* Insert it into sai_entries tail when init.
*/
static struct ll_sa_entry *
-ll_sa_entry_alloc(struct ll_statahead_info *sai, __u64 index,
+ll_sa_entry_alloc(struct dentry *parent,
+ struct ll_statahead_info *sai, __u64 index,
const char *name, int len)
{
struct ll_inode_info *lli;
@@ -221,7 +218,8 @@ ll_sa_entry_alloc(struct ll_statahead_info *sai, __u64 index,
dname = (char *)entry + sizeof(struct ll_sa_entry);
memcpy(dname, name, len);
dname[len] = 0;
- entry->se_qstr.hash = full_name_hash(name, len);
+
+ entry->se_qstr.hash = full_name_hash(parent, name, len);
entry->se_qstr.len = len;
entry->se_qstr.name = dname;
@@ -650,7 +648,7 @@ static void ll_post_statahead(struct ll_statahead_info *sai)
}
}
- it->d.lustre.it_lock_handle = entry->se_handle;
+ it->it_lock_handle = entry->se_handle;
rc = md_revalidate_lock(ll_i2mdexp(dir), it, ll_inode2fid(dir), NULL);
if (rc != 1) {
rc = -EAGAIN;
@@ -704,7 +702,7 @@ static int ll_statahead_interpret(struct ptlrpc_request *req,
* process enqueues lock on child with parent lock held, eg.
* unlink.
*/
- handle = it->d.lustre.it_lock_handle;
+ handle = it->it_lock_handle;
ll_intent_drop_lock(it);
}
@@ -783,7 +781,7 @@ static int sa_args_init(struct inode *dir, struct inode *child,
struct ll_sa_entry *entry, struct md_enqueue_info **pmi,
struct ldlm_enqueue_info **pei)
{
- struct qstr *qstr = &entry->se_qstr;
+ const struct qstr *qstr = &entry->se_qstr;
struct ll_inode_info *lli = ll_i2info(dir);
struct md_enqueue_info *minfo;
struct ldlm_enqueue_info *einfo;
@@ -854,7 +852,7 @@ static int do_sa_revalidate(struct inode *dir, struct ll_sa_entry *entry,
{
struct inode *inode = d_inode(dentry);
struct lookup_intent it = { .it_op = IT_GETATTR,
- .d.lustre.it_lock_handle = 0 };
+ .it_lock_handle = 0 };
struct md_enqueue_info *minfo;
struct ldlm_enqueue_info *einfo;
int rc;
@@ -869,7 +867,7 @@ static int do_sa_revalidate(struct inode *dir, struct ll_sa_entry *entry,
rc = md_revalidate_lock(ll_i2mdexp(dir), &it, ll_inode2fid(inode),
NULL);
if (rc == 1) {
- entry->se_handle = it.d.lustre.it_lock_handle;
+ entry->se_handle = it.it_lock_handle;
ll_intent_release(&it);
return 1;
}
@@ -902,7 +900,7 @@ static void ll_statahead_one(struct dentry *parent, const char *entry_name,
int rc;
int rc1;
- entry = ll_sa_entry_alloc(sai, sai->sai_index, entry_name,
+ entry = ll_sa_entry_alloc(parent, sai, sai->sai_index, entry_name,
entry_name_len);
if (IS_ERR(entry))
return;
@@ -1342,7 +1340,7 @@ enum {
static int is_first_dirent(struct inode *dir, struct dentry *dentry)
{
struct ll_dir_chain chain;
- struct qstr *target = &dentry->d_name;
+ const struct qstr *target = &dentry->d_name;
struct page *page;
__u64 pos = 0;
int dot_de;
@@ -1573,7 +1571,7 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
if (entry->se_stat == SA_ENTRY_SUCC && entry->se_inode) {
struct inode *inode = entry->se_inode;
struct lookup_intent it = { .it_op = IT_GETATTR,
- .d.lustre.it_lock_handle =
+ .it_lock_handle =
entry->se_handle };
__u64 bits;
diff --git a/drivers/staging/lustre/lustre/llite/super25.c b/drivers/staging/lustre/lustre/llite/super25.c
index 415750b0b..3dd7e0eb0 100644
--- a/drivers/staging/lustre/lustre/llite/super25.c
+++ b/drivers/staging/lustre/lustre/llite/super25.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -118,19 +114,6 @@ static int __init lustre_init(void)
if (!ll_file_data_slab)
goto out_cache;
- ll_remote_perm_cachep = kmem_cache_create("ll_remote_perm_cache",
- sizeof(struct ll_remote_perm),
- 0, 0, NULL);
- if (!ll_remote_perm_cachep)
- goto out_cache;
-
- ll_rmtperm_hash_cachep = kmem_cache_create("ll_rmtperm_hash_cache",
- REMOTE_PERM_HASHSIZE *
- sizeof(struct list_head),
- 0, 0, NULL);
- if (!ll_rmtperm_hash_cachep)
- goto out_cache;
-
llite_root = debugfs_create_dir("llite", debugfs_lustre_root);
if (IS_ERR_OR_NULL(llite_root)) {
rc = llite_root ? PTR_ERR(llite_root) : -ENOMEM;
@@ -194,8 +177,6 @@ out_debugfs:
out_cache:
kmem_cache_destroy(ll_inode_cachep);
kmem_cache_destroy(ll_file_data_slab);
- kmem_cache_destroy(ll_remote_perm_cachep);
- kmem_cache_destroy(ll_rmtperm_hash_cachep);
return rc;
}
@@ -213,10 +194,6 @@ static void __exit lustre_exit(void)
vvp_global_fini();
kmem_cache_destroy(ll_inode_cachep);
- kmem_cache_destroy(ll_rmtperm_hash_cachep);
-
- kmem_cache_destroy(ll_remote_perm_cachep);
-
kmem_cache_destroy(ll_file_data_slab);
}
diff --git a/drivers/staging/lustre/lustre/llite/symlink.c b/drivers/staging/lustre/lustre/llite/symlink.c
index 3fc736ccf..8c8bdfe1a 100644
--- a/drivers/staging/lustre/lustre/llite/symlink.c
+++ b/drivers/staging/lustre/lustre/llite/symlink.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/llite/vvp_dev.c b/drivers/staging/lustre/lustre/llite/vvp_dev.c
index 47101de1c..e623216e9 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_dev.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_dev.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -150,8 +146,8 @@ struct lu_context_key vvp_session_key = {
.lct_fini = vvp_session_key_fini
};
-void *vvp_thread_key_init(const struct lu_context *ctx,
- struct lu_context_key *key)
+static void *vvp_thread_key_init(const struct lu_context *ctx,
+ struct lu_context_key *key)
{
struct vvp_thread_info *vti;
@@ -161,8 +157,8 @@ void *vvp_thread_key_init(const struct lu_context *ctx,
return vti;
}
-void vvp_thread_key_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
+static void vvp_thread_key_fini(const struct lu_context *ctx,
+ struct lu_context_key *key, void *data)
{
struct vvp_thread_info *vti = data;
@@ -564,7 +560,7 @@ static int vvp_pgcache_show(struct seq_file *f, void *v)
env = cl_env_get(&refcheck);
if (!IS_ERR(env)) {
- pos = *(loff_t *) v;
+ pos = *(loff_t *)v;
vvp_pgcache_id_unpack(pos, &id);
sbi = f->private;
clob = vvp_pgcache_obj(env, &sbi->ll_cl->cd_lu_dev, &id);
diff --git a/drivers/staging/lustre/lustre/llite/vvp_internal.h b/drivers/staging/lustre/lustre/llite/vvp_internal.h
index 27b9b0a01..79fc42846 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_internal.h
+++ b/drivers/staging/lustre/lustre/llite/vvp_internal.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index 5bf9592ae..94916dcc6 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -47,8 +43,8 @@
#include "llite_internal.h"
#include "vvp_internal.h"
-struct vvp_io *cl2vvp_io(const struct lu_env *env,
- const struct cl_io_slice *slice)
+static struct vvp_io *cl2vvp_io(const struct lu_env *env,
+ const struct cl_io_slice *slice)
{
struct vvp_io *vio;
@@ -954,7 +950,8 @@ static int vvp_io_write_start(const struct lu_env *env,
* out-of-order writes.
*/
ll_merge_attr(env, inode);
- pos = io->u.ci_wr.wr.crw_pos = i_size_read(inode);
+ pos = i_size_read(inode);
+ io->u.ci_wr.wr.crw_pos = pos;
vio->vui_iocb->ki_pos = pos;
} else {
LASSERT(vio->vui_iocb->ki_pos == pos);
@@ -1259,7 +1256,7 @@ static int vvp_io_read_page(const struct lu_env *env,
return 0;
}
-void vvp_io_end(const struct lu_env *env, const struct cl_io_slice *ios)
+static void vvp_io_end(const struct lu_env *env, const struct cl_io_slice *ios)
{
CLOBINVRNT(env, ios->cis_io->ci_obj,
vvp_object_invariant(ios->cis_io->ci_obj));
diff --git a/drivers/staging/lustre/lustre/llite/vvp_lock.c b/drivers/staging/lustre/lustre/llite/vvp_lock.c
index f5bd6c22e..64be0c9df 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_lock.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_lock.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/llite/vvp_object.c b/drivers/staging/lustre/lustre/llite/vvp_object.c
index 18c9df7eb..2c520b0bf 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_object.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_object.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/llite/vvp_page.c b/drivers/staging/lustre/lustre/llite/vvp_page.c
index 6cd2af7a9..2e566d90b 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_page.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_page.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/llite/vvp_req.c b/drivers/staging/lustre/lustre/llite/vvp_req.c
index fb886291a..9fe9d6c0a 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_req.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_req.c
@@ -60,10 +60,10 @@ static inline struct vvp_req *cl2vvp_req(const struct cl_req_slice *slice)
* - o_ioepoch,
*
*/
-void vvp_req_attr_set(const struct lu_env *env,
- const struct cl_req_slice *slice,
- const struct cl_object *obj,
- struct cl_req_attr *attr, u64 flags)
+static void vvp_req_attr_set(const struct lu_env *env,
+ const struct cl_req_slice *slice,
+ const struct cl_object *obj,
+ struct cl_req_attr *attr, u64 flags)
{
struct inode *inode;
struct obdo *oa;
@@ -87,8 +87,8 @@ void vvp_req_attr_set(const struct lu_env *env,
JOBSTATS_JOBID_SIZE);
}
-void vvp_req_completion(const struct lu_env *env,
- const struct cl_req_slice *slice, int ioret)
+static void vvp_req_completion(const struct lu_env *env,
+ const struct cl_req_slice *slice, int ioret)
{
struct vvp_req *vrq;
diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c
index 608014b0d..98303cf85 100644
--- a/drivers/staging/lustre/lustre/llite/xattr.c
+++ b/drivers/staging/lustre/lustre/llite/xattr.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -111,11 +107,6 @@ int ll_setxattr_common(struct inode *inode, const char *name,
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct ptlrpc_request *req = NULL;
int xattr_type, rc;
-#ifdef CONFIG_FS_POSIX_ACL
- struct rmtacl_ctl_entry *rce = NULL;
- posix_acl_xattr_header *new_value = NULL;
- ext_acl_xattr_header *acl = NULL;
-#endif
const char *pv = value;
xattr_type = get_xattr_type(name);
@@ -143,62 +134,9 @@ int ll_setxattr_common(struct inode *inode, const char *name,
strcmp(name, "security.selinux") == 0)
return -EOPNOTSUPP;
-#ifdef CONFIG_FS_POSIX_ACL
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT &&
- (xattr_type == XATTR_ACL_ACCESS_T ||
- xattr_type == XATTR_ACL_DEFAULT_T)) {
- rce = rct_search(&sbi->ll_rct, current_pid());
- if (!rce ||
- (rce->rce_ops != RMT_LSETFACL &&
- rce->rce_ops != RMT_RSETFACL))
- return -EOPNOTSUPP;
-
- if (rce->rce_ops == RMT_LSETFACL) {
- struct eacl_entry *ee;
-
- ee = et_search_del(&sbi->ll_et, current_pid(),
- ll_inode2fid(inode), xattr_type);
- if (valid & OBD_MD_FLXATTR) {
- acl = lustre_acl_xattr_merge2ext(
- (posix_acl_xattr_header *)value,
- size, ee->ee_acl);
- if (IS_ERR(acl)) {
- ee_free(ee);
- return PTR_ERR(acl);
- }
- size = CFS_ACL_XATTR_SIZE(\
- le32_to_cpu(acl->a_count), \
- ext_acl_xattr);
- pv = (const char *)acl;
- }
- ee_free(ee);
- } else if (rce->rce_ops == RMT_RSETFACL) {
- rc = lustre_posix_acl_xattr_filter(
- (posix_acl_xattr_header *)value,
- size, &new_value);
- if (unlikely(rc < 0))
- return rc;
- size = rc;
-
- pv = (const char *)new_value;
- } else {
- return -EOPNOTSUPP;
- }
-
- valid |= rce_ops2valid(rce->rce_ops);
- }
-#endif
rc = md_setxattr(sbi->ll_md_exp, ll_inode2fid(inode),
valid, name, pv, size, 0, flags,
ll_i2suppgid(inode), &req);
-#ifdef CONFIG_FS_POSIX_ACL
- /*
- * Release the posix ACL space.
- */
- kfree(new_value);
- if (acl)
- lustre_ext_acl_xattr_free(acl);
-#endif
if (rc) {
if (rc == -EOPNOTSUPP && xattr_type == XATTR_USER_T) {
LCONSOLE_INFO("Disabling user_xattr feature because it is not supported on the server\n");
@@ -288,7 +226,6 @@ int ll_getxattr_common(struct inode *inode, const char *name,
struct mdt_body *body;
int xattr_type, rc;
void *xdata;
- struct rmtacl_ctl_entry *rce = NULL;
struct ll_inode_info *lli = ll_i2info(inode);
CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
@@ -319,24 +256,11 @@ int ll_getxattr_common(struct inode *inode, const char *name,
return -EOPNOTSUPP;
#ifdef CONFIG_FS_POSIX_ACL
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT &&
- (xattr_type == XATTR_ACL_ACCESS_T ||
- xattr_type == XATTR_ACL_DEFAULT_T)) {
- rce = rct_search(&sbi->ll_rct, current_pid());
- if (!rce ||
- (rce->rce_ops != RMT_LSETFACL &&
- rce->rce_ops != RMT_LGETFACL &&
- rce->rce_ops != RMT_RSETFACL &&
- rce->rce_ops != RMT_RGETFACL))
- return -EOPNOTSUPP;
- }
-
/* posix acl is under protection of LOOKUP lock. when calling to this,
* we just have path resolution to the target inode, so we have great
* chance that cached ACL is uptodate.
*/
- if (xattr_type == XATTR_ACL_ACCESS_T &&
- !(sbi->ll_flags & LL_SBI_RMT_CLIENT)) {
+ if (xattr_type == XATTR_ACL_ACCESS_T) {
struct posix_acl *acl;
spin_lock(&lli->lli_lock);
@@ -378,9 +302,7 @@ do_getxattr:
} else {
getxattr_nocache:
rc = md_getxattr(sbi->ll_md_exp, ll_inode2fid(inode),
- valid | (rce ? rce_ops2valid(rce->rce_ops) : 0),
- name, NULL, 0, size, 0, &req);
-
+ valid, name, NULL, 0, size, 0, &req);
if (rc < 0)
goto out_xattr;
@@ -417,25 +339,6 @@ getxattr_nocache:
rc = body->eadatasize;
}
-#ifdef CONFIG_FS_POSIX_ACL
- if (rce && rce->rce_ops == RMT_LSETFACL) {
- ext_acl_xattr_header *acl;
-
- acl = lustre_posix_acl_xattr_2ext(buffer, rc);
- if (IS_ERR(acl)) {
- rc = PTR_ERR(acl);
- goto out;
- }
-
- rc = ee_add(&sbi->ll_et, current_pid(), ll_inode2fid(inode),
- xattr_type, acl);
- if (unlikely(rc < 0)) {
- lustre_ext_acl_xattr_free(acl);
- goto out;
- }
- }
-#endif
-
out_xattr:
if (rc == -EOPNOTSUPP && xattr_type == XATTR_USER_T) {
LCONSOLE_INFO(
diff --git a/drivers/staging/lustre/lustre/llite/xattr_cache.c b/drivers/staging/lustre/lustre/llite/xattr_cache.c
index d7e17abbe..8089da814 100644
--- a/drivers/staging/lustre/lustre/llite/xattr_cache.c
+++ b/drivers/staging/lustre/lustre/llite/xattr_cache.c
@@ -288,8 +288,8 @@ static int ll_xattr_find_get_lock(struct inode *inode,
LCK_PR);
if (mode != 0) {
/* fake oit in mdc_revalidate_lock() manner */
- oit->d.lustre.it_lock_handle = lockh.cookie;
- oit->d.lustre.it_lock_mode = mode;
+ oit->it_lock_handle = lockh.cookie;
+ oit->it_lock_mode = mode;
goto out;
}
}
@@ -315,7 +315,7 @@ static int ll_xattr_find_get_lock(struct inode *inode,
return rc;
}
- *req = (struct ptlrpc_request *)oit->d.lustre.it_data;
+ *req = oit->it_request;
out:
down_write(&lli->lli_xattrs_list_rwsem);
mutex_unlock(&lli->lli_xattrs_enq_lock);
@@ -362,10 +362,10 @@ static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit)
goto out_maybe_drop;
}
- if (oit->d.lustre.it_status < 0) {
+ if (oit->it_status < 0) {
CDEBUG(D_CACHE, "getxattr intent returned %d for fid "DFID"\n",
- oit->d.lustre.it_status, PFID(ll_inode2fid(inode)));
- rc = oit->d.lustre.it_status;
+ oit->it_status, PFID(ll_inode2fid(inode)));
+ rc = oit->it_status;
/* xattr data is so large that we don't want to cache it */
if (rc == -ERANGE)
rc = -EAGAIN;
@@ -448,8 +448,8 @@ out_destroy:
up_write(&lli->lli_xattrs_list_rwsem);
ldlm_lock_decref_and_cancel((struct lustre_handle *)
- &oit->d.lustre.it_lock_handle,
- oit->d.lustre.it_lock_mode);
+ &oit->it_lock_handle,
+ oit->it_lock_mode);
goto out_no_unlock;
}
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_fld.c b/drivers/staging/lustre/lustre/lmv/lmv_fld.c
index 378691b2a..a3d170aa6 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_fld.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_fld.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_intent.c b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
index e0958eaed..2f58fdab8 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_intent.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -84,11 +80,11 @@ static int lmv_intent_remote(struct obd_export *exp, void *lmm,
/*
* We got LOOKUP lock, but we really need attrs.
*/
- pmode = it->d.lustre.it_lock_mode;
+ pmode = it->it_lock_mode;
if (pmode) {
- plock.cookie = it->d.lustre.it_lock_handle;
- it->d.lustre.it_lock_mode = 0;
- it->d.lustre.it_data = NULL;
+ plock.cookie = it->it_lock_handle;
+ it->it_lock_mode = 0;
+ it->it_request = NULL;
}
LASSERT(fid_is_sane(&body->fid1));
@@ -134,14 +130,14 @@ static int lmv_intent_remote(struct obd_export *exp, void *lmm,
* maintain dcache consistency. Thus drop UPDATE|PERM lock here
* and put LOOKUP in request.
*/
- if (it->d.lustre.it_lock_mode != 0) {
- it->d.lustre.it_remote_lock_handle =
- it->d.lustre.it_lock_handle;
- it->d.lustre.it_remote_lock_mode = it->d.lustre.it_lock_mode;
+ if (it->it_lock_mode != 0) {
+ it->it_remote_lock_handle =
+ it->it_lock_handle;
+ it->it_remote_lock_mode = it->it_lock_mode;
}
- it->d.lustre.it_lock_handle = plock.cookie;
- it->d.lustre.it_lock_mode = pmode;
+ it->it_lock_handle = plock.cookie;
+ it->it_lock_mode = pmode;
out_free_op_data:
kfree(op_data);
@@ -201,9 +197,9 @@ static int lmv_intent_open(struct obd_export *exp, struct md_op_data *op_data,
* Nothing is found, do not access body->fid1 as it is zero and thus
* pointless.
*/
- if ((it->d.lustre.it_disposition & DISP_LOOKUP_NEG) &&
- !(it->d.lustre.it_disposition & DISP_OPEN_CREATE) &&
- !(it->d.lustre.it_disposition & DISP_OPEN_OPEN))
+ if ((it->it_disposition & DISP_LOOKUP_NEG) &&
+ !(it->it_disposition & DISP_OPEN_CREATE) &&
+ !(it->it_disposition & DISP_OPEN_OPEN))
return rc;
body = req_capsule_server_get(&(*reqp)->rq_pill, &RMF_MDT_BODY);
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_internal.h b/drivers/staging/lustre/lustre/lmv/lmv_internal.h
index 7007e4c48..0beafc49b 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_internal.h
+++ b/drivers/staging/lustre/lustre/lmv/lmv_internal.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
index 9e31f6b03..0e1588a43 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -1683,7 +1679,7 @@ lmv_enqueue_remote(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
struct lustre_handle *lockh, void *lmm, int lmmsize,
__u64 extra_lock_flags)
{
- struct ptlrpc_request *req = it->d.lustre.it_data;
+ struct ptlrpc_request *req = it->it_request;
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
struct lustre_handle plock;
@@ -1705,11 +1701,11 @@ lmv_enqueue_remote(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
/*
* We got LOOKUP lock, but we really need attrs.
*/
- pmode = it->d.lustre.it_lock_mode;
+ pmode = it->it_lock_mode;
LASSERT(pmode != 0);
memcpy(&plock, lockh, sizeof(plock));
- it->d.lustre.it_lock_mode = 0;
- it->d.lustre.it_data = NULL;
+ it->it_lock_mode = 0;
+ it->it_request = NULL;
fid1 = body->fid1;
ptlrpc_req_finished(req);
@@ -2611,27 +2607,6 @@ static int lmv_clear_open_replay_data(struct obd_export *exp,
return md_clear_open_replay_data(tgt->ltd_exp, och);
}
-static int lmv_get_remote_perm(struct obd_export *exp,
- const struct lu_fid *fid,
- __u32 suppgid, struct ptlrpc_request **request)
-{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_tgt_desc *tgt;
- int rc;
-
- rc = lmv_check_connect(obd);
- if (rc)
- return rc;
-
- tgt = lmv_find_target(lmv, fid);
- if (IS_ERR(tgt))
- return PTR_ERR(tgt);
-
- rc = md_get_remote_perm(tgt->ltd_exp, fid, suppgid, request);
- return rc;
-}
-
static int lmv_intent_getattr_async(struct obd_export *exp,
struct md_enqueue_info *minfo,
struct ldlm_enqueue_info *einfo)
@@ -2686,7 +2661,7 @@ static int lmv_quotactl(struct obd_device *unused, struct obd_export *exp,
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt = lmv->tgts[0];
int rc = 0, i;
- __u64 curspace, curinodes;
+ __u64 curspace = 0, curinodes = 0;
if (!tgt || !tgt->ltd_exp || !tgt->ltd_active ||
!lmv->desc.ld_tgt_count) {
@@ -2699,7 +2674,6 @@ static int lmv_quotactl(struct obd_device *unused, struct obd_export *exp,
return rc;
}
- curspace = curinodes = 0;
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
int err;
@@ -2796,7 +2770,6 @@ static struct md_ops lmv_md_ops = {
.free_lustre_md = lmv_free_lustre_md,
.set_open_replay_data = lmv_set_open_replay_data,
.clear_open_replay_data = lmv_clear_open_replay_data,
- .get_remote_perm = lmv_get_remote_perm,
.intent_getattr_async = lmv_intent_getattr_async,
.revalidate_lock = lmv_revalidate_lock
};
diff --git a/drivers/staging/lustre/lustre/lmv/lproc_lmv.c b/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
index b39e364a2..c29c361eb 100644
--- a/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
+++ b/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
index ac9744e88..9740568d9 100644
--- a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
+++ b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/lov/lov_dev.c b/drivers/staging/lustre/lustre/lov/lov_dev.c
index dae8e89bc..b1f260d43 100644
--- a/drivers/staging/lustre/lustre/lov/lov_dev.c
+++ b/drivers/staging/lustre/lustre/lov/lov_dev.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/lov/lov_ea.c b/drivers/staging/lustre/lustre/lov/lov_ea.c
index 460f0fa5e..5053dead1 100644
--- a/drivers/staging/lustre/lustre/lov/lov_ea.c
+++ b/drivers/staging/lustre/lustre/lov/lov_ea.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/lov/lov_internal.h b/drivers/staging/lustre/lustre/lov/lov_internal.h
index eef9afac8..12bd511e8 100644
--- a/drivers/staging/lustre/lustre/lov/lov_internal.h
+++ b/drivers/staging/lustre/lustre/lov/lov_internal.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/lov/lov_io.c b/drivers/staging/lustre/lustre/lov/lov_io.c
index 86cb3f8f9..84032a510 100644
--- a/drivers/staging/lustre/lustre/lov/lov_io.c
+++ b/drivers/staging/lustre/lustre/lov/lov_io.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/lov/lov_lock.c b/drivers/staging/lustre/lustre/lov/lov_lock.c
index 1b203d18c..f3a0583f2 100644
--- a/drivers/staging/lustre/lustre/lov/lov_lock.c
+++ b/drivers/staging/lustre/lustre/lov/lov_lock.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/lov/lov_merge.c b/drivers/staging/lustre/lustre/lov/lov_merge.c
index 56ef41d17..b9c90865f 100644
--- a/drivers/staging/lustre/lustre/lov/lov_merge.c
+++ b/drivers/staging/lustre/lustre/lov/lov_merge.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/lov/lov_obd.c b/drivers/staging/lustre/lustre/lov/lov_obd.c
index e15ef2ece..9b92d5522 100644
--- a/drivers/staging/lustre/lustre/lov/lov_obd.c
+++ b/drivers/staging/lustre/lustre/lov/lov_obd.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -896,6 +892,12 @@ static int lov_cleanup(struct obd_device *obd)
kfree(lov->lov_tgts);
lov->lov_tgt_size = 0;
}
+
+ if (lov->lov_cache) {
+ cl_cache_decref(lov->lov_cache);
+ lov->lov_cache = NULL;
+ }
+
return 0;
}
@@ -1772,7 +1774,8 @@ static int lov_fiemap(struct lov_obd *lov, __u32 keylen, void *key,
fm_start = fiemap->fm_start;
fm_length = fiemap->fm_length;
/* Calculate start stripe, last stripe and length of mapping */
- actual_start_stripe = start_stripe = lov_stripe_number(lsm, fm_start);
+ start_stripe = lov_stripe_number(lsm, fm_start);
+ actual_start_stripe = start_stripe;
fm_end = (fm_length == ~0ULL ? fm_key->oa.o_size :
fm_start + fm_length - 1);
/* If fm_length != ~0ULL but fm_start+fm_length-1 exceeds file size */
@@ -2095,11 +2098,9 @@ static int lov_set_info_async(const struct lu_env *env, struct obd_export *exp,
u32 count;
int i, rc = 0, err;
struct lov_tgt_desc *tgt;
- unsigned incr, check_uuid,
- do_inactive, no_set;
- unsigned next_id = 0, mds_con = 0;
+ unsigned int incr = 0, check_uuid = 0, do_inactive = 0, no_set = 0;
+ unsigned int next_id = 0, mds_con = 0;
- incr = check_uuid = do_inactive = no_set = 0;
if (!set) {
no_set = 1;
set = ptlrpc_prep_set();
@@ -2126,6 +2127,7 @@ static int lov_set_info_async(const struct lu_env *env, struct obd_export *exp,
LASSERT(!lov->lov_cache);
lov->lov_cache = val;
do_inactive = 1;
+ cl_cache_incref(lov->lov_cache);
}
for (i = 0; i < count; i++, val = (char *)val + incr) {
diff --git a/drivers/staging/lustre/lustre/lov/lov_object.c b/drivers/staging/lustre/lustre/lov/lov_object.c
index 561d493b2..f9621b0fd 100644
--- a/drivers/staging/lustre/lustre/lov/lov_object.c
+++ b/drivers/staging/lustre/lustre/lov/lov_object.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -185,8 +181,8 @@ static int lov_init_sub(const struct lu_env *env, struct lov_object *lov,
}
LU_OBJECT_DEBUG(mask, env, &stripe->co_lu,
- "stripe %d is already owned.\n", idx);
- LU_OBJECT_DEBUG(mask, env, old_obj, "owned.\n");
+ "stripe %d is already owned.", idx);
+ LU_OBJECT_DEBUG(mask, env, old_obj, "owned.");
LU_OBJECT_HEADER(mask, env, lov2lu(lov), "try to own.\n");
cl_object_put(env, stripe);
}
diff --git a/drivers/staging/lustre/lustre/lov/lov_offset.c b/drivers/staging/lustre/lustre/lov/lov_offset.c
index 9302f06c3..ecca74fbf 100644
--- a/drivers/staging/lustre/lustre/lov/lov_offset.c
+++ b/drivers/staging/lustre/lustre/lov/lov_offset.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -74,7 +70,7 @@ pgoff_t lov_stripe_pgoff(struct lov_stripe_md *lsm, pgoff_t stripe_index,
{
loff_t offset;
- offset = lov_stripe_size(lsm, stripe_index << PAGE_SHIFT, stripe);
+ offset = lov_stripe_size(lsm, (stripe_index << PAGE_SHIFT) + 1, stripe);
return offset >> PAGE_SHIFT;
}
diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c
index 0215ea54d..869ef41b1 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pack.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pack.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/lov/lov_page.c b/drivers/staging/lustre/lustre/lov/lov_page.c
index 0306f00c3..c17026f14 100644
--- a/drivers/staging/lustre/lustre/lov/lov_page.c
+++ b/drivers/staging/lustre/lustre/lov/lov_page.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/lov/lov_pool.c b/drivers/staging/lustre/lustre/lov/lov_pool.c
index 690292ece..4c2d21729 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pool.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pool.c
@@ -14,12 +14,8 @@
* in the LICENSE file that accompanied this code).
*
* You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see [sun.com URL with a
- * copy of GPLv2].
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * version 2 along with this program; If not, see
+ * http://http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/lov/lov_request.c b/drivers/staging/lustre/lustre/lov/lov_request.c
index 1be4b921c..4099b51f8 100644
--- a/drivers/staging/lustre/lustre/lov/lov_request.c
+++ b/drivers/staging/lustre/lustre/lov/lov_request.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_dev.c b/drivers/staging/lustre/lustre/lov/lovsub_dev.c
index 35f6b1d66..b519a1940 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_dev.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_dev.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_io.c b/drivers/staging/lustre/lustre/lov/lovsub_io.c
index 783ec687a..6a9820218 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_io.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_io.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_lock.c b/drivers/staging/lustre/lustre/lov/lovsub_lock.c
index e92edfb61..38f9b735c 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_lock.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_lock.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_object.c b/drivers/staging/lustre/lustre/lov/lovsub_object.c
index bcaae1e5b..fb2f2660b 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_object.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_object.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_page.c b/drivers/staging/lustre/lustre/lov/lovsub_page.c
index 9badedcce..b2e68c3e8 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_page.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_page.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/lov/lproc_lov.c b/drivers/staging/lustre/lustre/lov/lproc_lov.c
index 0dcb6b6a7..eb6d30d34 100644
--- a/drivers/staging/lustre/lustre/lov/lproc_lov.c
+++ b/drivers/staging/lustre/lustre/lov/lproc_lov.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c b/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
index 5c7a15dd7..98d15fb24 100644
--- a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
+++ b/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_internal.h b/drivers/staging/lustre/lustre/mdc/mdc_internal.h
index c5519aeb0..58f2841ca 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_internal.h
+++ b/drivers/staging/lustre/lustre/mdc/mdc_internal.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_lib.c b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
index 856c54e03..143bd7628 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_lib.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -471,6 +467,18 @@ void mdc_close_pack(struct ptlrpc_request *req, struct md_op_data *op_data)
rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
mdc_setattr_pack_rec(rec, op_data);
+ /*
+ * The client will zero out local timestamps when losing the IBITS lock
+ * so any new RPC timestamps will update the client inode's timestamps.
+ * There was a defect on the server side which allowed the atime to be
+ * overwritten by a zeroed-out atime packed into the close RPC.
+ *
+ * Proactively clear the MDS_ATTR_ATIME flag in the RPC in this case
+ * to avoid zeroing the atime on old unpatched servers. See LU-8041.
+ */
+ if (rec->sa_atime == 0)
+ rec->sa_valid &= ~MDS_ATTR_ATIME;
+
mdc_ioepoch_pack(epoch, op_data);
mdc_hsm_release_pack(req, op_data);
}
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_locks.c b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
index 3b1bc9111..f48b58423 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_locks.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -54,61 +50,43 @@ struct mdc_getattr_args {
struct ldlm_enqueue_info *ga_einfo;
};
-int it_disposition(struct lookup_intent *it, int flag)
-{
- return it->d.lustre.it_disposition & flag;
-}
-EXPORT_SYMBOL(it_disposition);
-
-void it_set_disposition(struct lookup_intent *it, int flag)
-{
- it->d.lustre.it_disposition |= flag;
-}
-EXPORT_SYMBOL(it_set_disposition);
-
-void it_clear_disposition(struct lookup_intent *it, int flag)
-{
- it->d.lustre.it_disposition &= ~flag;
-}
-EXPORT_SYMBOL(it_clear_disposition);
-
int it_open_error(int phase, struct lookup_intent *it)
{
if (it_disposition(it, DISP_OPEN_LEASE)) {
if (phase >= DISP_OPEN_LEASE)
- return it->d.lustre.it_status;
+ return it->it_status;
else
return 0;
}
if (it_disposition(it, DISP_OPEN_OPEN)) {
if (phase >= DISP_OPEN_OPEN)
- return it->d.lustre.it_status;
+ return it->it_status;
else
return 0;
}
if (it_disposition(it, DISP_OPEN_CREATE)) {
if (phase >= DISP_OPEN_CREATE)
- return it->d.lustre.it_status;
+ return it->it_status;
else
return 0;
}
if (it_disposition(it, DISP_LOOKUP_EXECD)) {
if (phase >= DISP_LOOKUP_EXECD)
- return it->d.lustre.it_status;
+ return it->it_status;
else
return 0;
}
if (it_disposition(it, DISP_IT_EXECD)) {
if (phase >= DISP_IT_EXECD)
- return it->d.lustre.it_status;
+ return it->it_status;
else
return 0;
}
- CERROR("it disp: %X, status: %d\n", it->d.lustre.it_disposition,
- it->d.lustre.it_status);
+ CERROR("it disp: %X, status: %d\n", it->it_disposition,
+ it->it_status);
LBUG();
return 0;
}
@@ -347,10 +325,6 @@ static struct ptlrpc_request *mdc_intent_open_pack(struct obd_export *exp,
mdc_open_pack(req, op_data, it->it_create_mode, 0, it->it_flags, lmm,
lmmsize);
- /* for remote client, fetch remote perm for current user */
- if (client_is_remote(exp))
- req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER,
- sizeof(struct mdt_remote_perm));
ptlrpc_request_set_replen(req);
return req;
}
@@ -444,9 +418,7 @@ static struct ptlrpc_request *mdc_intent_getattr_pack(struct obd_export *exp,
struct obd_device *obddev = class_exp2obd(exp);
u64 valid = OBD_MD_FLGETATTR | OBD_MD_FLEASIZE |
OBD_MD_FLMODEASIZE | OBD_MD_FLDIREA |
- OBD_MD_MEA |
- (client_is_remote(exp) ?
- OBD_MD_FLRMTPERM : OBD_MD_FLACL);
+ OBD_MD_MEA | OBD_MD_FLACL;
struct ldlm_intent *lit;
int rc;
int easize;
@@ -478,9 +450,6 @@ static struct ptlrpc_request *mdc_intent_getattr_pack(struct obd_export *exp,
mdc_getattr_pack(req, valid, it->it_flags, op_data, easize);
req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER, easize);
- if (client_is_remote(exp))
- req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER,
- sizeof(struct mdt_remote_perm));
ptlrpc_request_set_replen(req);
return req;
}
@@ -555,7 +524,6 @@ static int mdc_finish_enqueue(struct obd_export *exp,
struct req_capsule *pill = &req->rq_pill;
struct ldlm_request *lockreq;
struct ldlm_reply *lockrep;
- struct lustre_intent_data *intent = &it->d.lustre;
struct ldlm_lock *lock;
void *lvb_data = NULL;
int lvb_len = 0;
@@ -589,17 +557,17 @@ static int mdc_finish_enqueue(struct obd_export *exp,
lockrep = req_capsule_server_get(pill, &RMF_DLM_REP);
- intent->it_disposition = (int)lockrep->lock_policy_res1;
- intent->it_status = (int)lockrep->lock_policy_res2;
- intent->it_lock_mode = einfo->ei_mode;
- intent->it_lock_handle = lockh->cookie;
- intent->it_data = req;
+ it->it_disposition = (int)lockrep->lock_policy_res1;
+ it->it_status = (int)lockrep->lock_policy_res2;
+ it->it_lock_mode = einfo->ei_mode;
+ it->it_lock_handle = lockh->cookie;
+ it->it_request = req;
/* Technically speaking rq_transno must already be zero if
* it_status is in error, so the check is a bit redundant
*/
- if ((!req->rq_transno || intent->it_status < 0) && req->rq_replay)
- mdc_clear_replay_flag(req, intent->it_status);
+ if ((!req->rq_transno || it->it_status < 0) && req->rq_replay)
+ mdc_clear_replay_flag(req, it->it_status);
/* If we're doing an IT_OPEN which did not result in an actual
* successful open, then we need to remove the bit which saves
@@ -610,11 +578,11 @@ static int mdc_finish_enqueue(struct obd_export *exp,
* (bug 3440)
*/
if (it->it_op & IT_OPEN && req->rq_replay &&
- (!it_disposition(it, DISP_OPEN_OPEN) || intent->it_status != 0))
- mdc_clear_replay_flag(req, intent->it_status);
+ (!it_disposition(it, DISP_OPEN_OPEN) || it->it_status != 0))
+ mdc_clear_replay_flag(req, it->it_status);
DEBUG_REQ(D_RPCTRACE, req, "op: %d disposition: %x, status: %d",
- it->it_op, intent->it_disposition, intent->it_status);
+ it->it_op, it->it_disposition, it->it_status);
/* We know what to expect, so we do any byte flipping required here */
if (it->it_op & (IT_OPEN | IT_UNLINK | IT_LOOKUP | IT_GETATTR)) {
@@ -687,16 +655,6 @@ static int mdc_finish_enqueue(struct obd_export *exp,
memcpy(lmm, eadata, body->eadatasize);
}
}
-
- if (body->valid & OBD_MD_FLRMTPERM) {
- struct mdt_remote_perm *perm;
-
- LASSERT(client_is_remote(exp));
- perm = req_capsule_server_swab_get(pill, &RMF_ACL,
- lustre_swab_mdt_remote_perm);
- if (!perm)
- return -EPROTO;
- }
} else if (it->it_op & IT_LAYOUT) {
/* maybe the lock was granted right away and layout
* is packed into RMF_DLM_LVB of req
@@ -715,7 +673,7 @@ static int mdc_finish_enqueue(struct obd_export *exp,
if (lock && ldlm_has_layout(lock) && lvb_data) {
void *lmm;
- LDLM_DEBUG(lock, "layout lock returned by: %s, lvb_len: %d\n",
+ LDLM_DEBUG(lock, "layout lock returned by: %s, lvb_len: %d",
ldlm_it2str(it->it_op), lvb_len);
lmm = libcfs_kvzalloc(lvb_len, GFP_NOFS);
@@ -923,9 +881,9 @@ resend:
}
ptlrpc_req_finished(req);
- it->d.lustre.it_lock_handle = 0;
- it->d.lustre.it_lock_mode = 0;
- it->d.lustre.it_data = NULL;
+ it->it_lock_handle = 0;
+ it->it_lock_mode = 0;
+ it->it_request = NULL;
}
return rc;
@@ -949,8 +907,8 @@ static int mdc_finish_intent_lock(struct obd_export *exp,
/* The server failed before it even started executing the
* intent, i.e. because it couldn't unpack the request.
*/
- LASSERT(it->d.lustre.it_status != 0);
- return it->d.lustre.it_status;
+ LASSERT(it->it_status != 0);
+ return it->it_status;
}
rc = it_open_error(DISP_IT_EXECD, it);
if (rc)
@@ -1033,15 +991,15 @@ static int mdc_finish_intent_lock(struct obd_export *exp,
LDLM_IBITS, &policy, LCK_NL,
&old_lock, 0)) {
ldlm_lock_decref_and_cancel(lockh,
- it->d.lustre.it_lock_mode);
+ it->it_lock_mode);
memcpy(lockh, &old_lock, sizeof(old_lock));
- it->d.lustre.it_lock_handle = lockh->cookie;
+ it->it_lock_handle = lockh->cookie;
}
}
CDEBUG(D_DENTRY,
"D_IT dentry %.*s intent: %s status %d disp %x rc %d\n",
op_data->op_namelen, op_data->op_name, ldlm_it2str(it->it_op),
- it->d.lustre.it_status, it->d.lustre.it_disposition, rc);
+ it->it_status, it->it_disposition, rc);
return rc;
}
@@ -1057,8 +1015,8 @@ int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
ldlm_policy_data_t policy;
enum ldlm_mode mode;
- if (it->d.lustre.it_lock_handle) {
- lockh.cookie = it->d.lustre.it_lock_handle;
+ if (it->it_lock_handle) {
+ lockh.cookie = it->it_lock_handle;
mode = ldlm_revalidate_lock_handle(&lockh, bits);
} else {
fid_build_reg_res_name(fid, &res_id);
@@ -1099,11 +1057,11 @@ int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
}
if (mode) {
- it->d.lustre.it_lock_handle = lockh.cookie;
- it->d.lustre.it_lock_mode = mode;
+ it->it_lock_handle = lockh.cookie;
+ it->it_lock_mode = mode;
} else {
- it->d.lustre.it_lock_handle = 0;
- it->d.lustre.it_lock_mode = 0;
+ it->it_lock_handle = 0;
+ it->it_lock_mode = 0;
}
return !!mode;
@@ -1125,15 +1083,15 @@ int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
* ll_create/ll_open gets called.
*
* The server will return to us, in it_disposition, an indication of
- * exactly what d.lustre.it_status refers to.
+ * exactly what it_status refers to.
*
- * If DISP_OPEN_OPEN is set, then d.lustre.it_status refers to the open() call,
+ * If DISP_OPEN_OPEN is set, then it_status refers to the open() call,
* otherwise if DISP_OPEN_CREATE is set, then it status is the
* creation failure mode. In either case, one of DISP_LOOKUP_NEG or
* DISP_LOOKUP_POS will be set, indicating whether the child lookup
* was successful.
*
- * Else, if DISP_LOOKUP_EXECD then d.lustre.it_status is the rc of the
+ * Else, if DISP_LOOKUP_EXECD then it_status is the rc of the
* child lookup.
*/
int mdc_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
@@ -1166,7 +1124,7 @@ int mdc_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
* be called in revalidate_it if we already have a lock, let's
* verify that.
*/
- it->d.lustre.it_lock_handle = 0;
+ it->it_lock_handle = 0;
rc = mdc_revalidate_lock(exp, it, &op_data->op_fid2, NULL);
/* Only return failure if it was not GETATTR by cfid
* (from inode_revalidate)
@@ -1188,7 +1146,7 @@ int mdc_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
if (rc < 0)
return rc;
- *reqp = it->d.lustre.it_data;
+ *reqp = it->it_request;
rc = mdc_finish_intent_lock(exp, *reqp, op_data, it, &lockh);
return rc;
}
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_reint.c b/drivers/staging/lustre/lustre/mdc/mdc_reint.c
index 4ef3db147..5dba2c813 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_reint.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_reint.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -234,7 +230,7 @@ rebuild:
MDS_INODELOCK_UPDATE);
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_MDS_REINT_CREATE_RMT_ACL);
+ &RQF_MDS_REINT_CREATE_ACL);
if (!req) {
ldlm_lock_list_put(&cancels, l_bl_ast, count);
return -ENOMEM;
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c
index 86b744536..542801f04 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_request.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -150,16 +146,6 @@ static int mdc_getattr_common(struct obd_export *exp,
return -EPROTO;
}
- if (body->valid & OBD_MD_FLRMTPERM) {
- struct mdt_remote_perm *perm;
-
- LASSERT(client_is_remote(exp));
- perm = req_capsule_server_swab_get(pill, &RMF_ACL,
- lustre_swab_mdt_remote_perm);
- if (!perm)
- return -EPROTO;
- }
-
return 0;
}
@@ -190,11 +176,6 @@ static int mdc_getattr(struct obd_export *exp, struct md_op_data *op_data,
req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
op_data->op_mode);
- if (op_data->op_valid & OBD_MD_FLRMTPERM) {
- LASSERT(client_is_remote(exp));
- req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER,
- sizeof(struct mdt_remote_perm));
- }
ptlrpc_request_set_replen(req);
rc = mdc_getattr_common(exp, req);
@@ -434,7 +415,7 @@ static int mdc_unpack_acl(struct ptlrpc_request *req, struct lustre_md *md)
return rc;
}
- rc = posix_acl_valid(acl);
+ rc = posix_acl_valid(&init_user_ns, acl);
if (rc) {
CERROR("validate acl: %d\n", rc);
posix_acl_release(acl);
@@ -539,16 +520,7 @@ static int mdc_get_lustre_md(struct obd_export *exp,
}
rc = 0;
- if (md->body->valid & OBD_MD_FLRMTPERM) {
- /* remote permission */
- LASSERT(client_is_remote(exp));
- md->remote_perm = req_capsule_server_swab_get(pill, &RMF_ACL,
- lustre_swab_mdt_remote_perm);
- if (!md->remote_perm) {
- rc = -EPROTO;
- goto out;
- }
- } else if (md->body->valid & OBD_MD_FLACL) {
+ if (md->body->valid & OBD_MD_FLACL) {
/* for ACL, it's possible that FLACL is set but aclsize is zero.
* only when aclsize != 0 there's an actual segment for ACL
* in reply buffer.
@@ -665,7 +637,7 @@ int mdc_set_open_replay_data(struct obd_export *exp,
struct md_open_data *mod;
struct mdt_rec_create *rec;
struct mdt_body *body;
- struct ptlrpc_request *open_req = it->d.lustre.it_data;
+ struct ptlrpc_request *open_req = it->it_request;
struct obd_import *imp = open_req->rq_import;
if (!open_req->rq_replay)
@@ -1168,7 +1140,7 @@ static int mdc_ioc_hsm_progress(struct obd_export *exp,
goto out;
}
- mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, -1, 0);
+ mdc_pack_body(req, NULL, 0, 0, -1, 0);
/* Copy hsm_progress struct */
req_hpk = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_PROGRESS);
@@ -1202,7 +1174,7 @@ static int mdc_ioc_hsm_ct_register(struct obd_import *imp, __u32 archives)
goto out;
}
- mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, -1, 0);
+ mdc_pack_body(req, NULL, 0, 0, -1, 0);
/* Copy hsm_progress struct */
archive_mask = req_capsule_client_get(&req->rq_pill,
@@ -1241,7 +1213,7 @@ static int mdc_ioc_hsm_current_action(struct obd_export *exp,
return rc;
}
- mdc_pack_body(req, &op_data->op_fid1, OBD_MD_FLRMTPERM, 0,
+ mdc_pack_body(req, &op_data->op_fid1, 0, 0,
op_data->op_suppgids[0], 0);
ptlrpc_request_set_replen(req);
@@ -1277,7 +1249,7 @@ static int mdc_ioc_hsm_ct_unregister(struct obd_import *imp)
goto out;
}
- mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, -1, 0);
+ mdc_pack_body(req, NULL, 0, 0, -1, 0);
ptlrpc_request_set_replen(req);
@@ -1306,7 +1278,7 @@ static int mdc_ioc_hsm_state_get(struct obd_export *exp,
return rc;
}
- mdc_pack_body(req, &op_data->op_fid1, OBD_MD_FLRMTPERM, 0,
+ mdc_pack_body(req, &op_data->op_fid1, 0, 0,
op_data->op_suppgids[0], 0);
ptlrpc_request_set_replen(req);
@@ -1347,7 +1319,7 @@ static int mdc_ioc_hsm_state_set(struct obd_export *exp,
return rc;
}
- mdc_pack_body(req, &op_data->op_fid1, OBD_MD_FLRMTPERM, 0,
+ mdc_pack_body(req, &op_data->op_fid1, 0, 0,
op_data->op_suppgids[0], 0);
/* Copy states */
@@ -1394,7 +1366,7 @@ static int mdc_ioc_hsm_request(struct obd_export *exp,
return rc;
}
- mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, -1, 0);
+ mdc_pack_body(req, NULL, 0, 0, -1, 0);
/* Copy hsm_request struct */
req_hr = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_REQUEST);
@@ -1807,7 +1779,7 @@ static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
case IOC_OBD_STATFS: {
struct obd_statfs stat_buf = {0};
- if (*((__u32 *) data->ioc_inlbuf2) != 0) {
+ if (*((__u32 *)data->ioc_inlbuf2) != 0) {
rc = -ENODEV;
goto out;
}
@@ -2001,7 +1973,7 @@ static int mdc_hsm_copytool_send(int len, void *val)
if (len < sizeof(*lh) + sizeof(*hal)) {
CERROR("Short HSM message %d < %d\n", len,
- (int) (sizeof(*lh) + sizeof(*hal)));
+ (int)(sizeof(*lh) + sizeof(*hal)));
return -EPROTO;
}
if (lh->kuc_magic == __swab16(KUC_MAGIC)) {
@@ -2432,41 +2404,6 @@ static int mdc_process_config(struct obd_device *obd, u32 len, void *buf)
return rc;
}
-/* get remote permission for current user on fid */
-static int mdc_get_remote_perm(struct obd_export *exp, const struct lu_fid *fid,
- __u32 suppgid, struct ptlrpc_request **request)
-{
- struct ptlrpc_request *req;
- int rc;
-
- LASSERT(client_is_remote(exp));
-
- *request = NULL;
- req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_GETATTR);
- if (!req)
- return -ENOMEM;
-
- rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GETATTR);
- if (rc) {
- ptlrpc_request_free(req);
- return rc;
- }
-
- mdc_pack_body(req, fid, OBD_MD_FLRMTPERM, 0, suppgid, 0);
-
- req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER,
- sizeof(struct mdt_remote_perm));
-
- ptlrpc_request_set_replen(req);
-
- rc = ptlrpc_queue_wait(req);
- if (rc)
- ptlrpc_req_finished(req);
- else
- *request = req;
- return rc;
-}
-
static struct obd_ops mdc_obd_ops = {
.owner = THIS_MODULE,
.setup = mdc_setup,
@@ -2518,7 +2455,6 @@ static struct md_ops mdc_md_ops = {
.free_lustre_md = mdc_free_lustre_md,
.set_open_replay_data = mdc_set_open_replay_data,
.clear_open_replay_data = mdc_clear_open_replay_data,
- .get_remote_perm = mdc_get_remote_perm,
.intent_getattr_async = mdc_intent_getattr_async,
.revalidate_lock = mdc_revalidate_lock
};
diff --git a/drivers/staging/lustre/lustre/mgc/lproc_mgc.c b/drivers/staging/lustre/lustre/mgc/lproc_mgc.c
index 8d5bc5a75..0735220b2 100644
--- a/drivers/staging/lustre/lustre/mgc/lproc_mgc.c
+++ b/drivers/staging/lustre/lustre/mgc/lproc_mgc.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_internal.h b/drivers/staging/lustre/lustre/mgc/mgc_internal.h
index 82fb8f46e..f146f7521 100644
--- a/drivers/staging/lustre/lustre/mgc/mgc_internal.h
+++ b/drivers/staging/lustre/lustre/mgc/mgc_internal.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_request.c b/drivers/staging/lustre/lustre/mgc/mgc_request.c
index 2311a437c..9d0bd4745 100644
--- a/drivers/staging/lustre/lustre/mgc/mgc_request.c
+++ b/drivers/staging/lustre/lustre/mgc/mgc_request.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -500,7 +496,9 @@ static void do_requeue(struct config_llog_data *cld)
* export which is being disconnected. Take the client
* semaphore to make the check non-racy.
*/
- down_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
+ down_read_nested(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem,
+ OBD_CLI_SEM_MGC);
+
if (cld->cld_mgcexp->exp_obd->u.cli.cl_conn_count != 0) {
int rc;
@@ -1034,7 +1032,7 @@ static int mgc_set_info_async(const struct lu_env *env, struct obd_export *exp,
rc = sptlrpc_parse_flavor(val, &flvr);
if (rc) {
CERROR("invalid sptlrpc flavor %s to MGS\n",
- (char *) val);
+ (char *)val);
return rc;
}
@@ -1050,7 +1048,7 @@ static int mgc_set_info_async(const struct lu_env *env, struct obd_export *exp,
sptlrpc_flavor2name(&cli->cl_flvr_mgc,
str, sizeof(str));
LCONSOLE_ERROR("asking sptlrpc flavor %s to MGS but currently %s is in use\n",
- (char *) val, str);
+ (char *)val, str);
rc = -EPERM;
}
return rc;
diff --git a/drivers/staging/lustre/lustre/obdclass/Makefile b/drivers/staging/lustre/lustre/obdclass/Makefile
index c404eb386..df7e47f35 100644
--- a/drivers/staging/lustre/lustre/obdclass/Makefile
+++ b/drivers/staging/lustre/lustre/obdclass/Makefile
@@ -5,5 +5,4 @@ obdclass-y := linux/linux-module.o linux/linux-obdo.o linux/linux-sysctl.o \
genops.o uuid.o lprocfs_status.o lprocfs_counters.o \
lustre_handles.o lustre_peer.o statfs_pack.o \
obdo.o obd_config.o obd_mount.o lu_object.o lu_ref.o \
- cl_object.o cl_page.o cl_lock.o cl_io.o \
- acl.o kernelcomm.o
+ cl_object.o cl_page.o cl_lock.o cl_io.o kernelcomm.o
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_internal.h b/drivers/staging/lustre/lustre/obdclass/cl_internal.h
index 7eb0ad7b3..e866754a4 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_internal.h
+++ b/drivers/staging/lustre/lustre/obdclass/cl_internal.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_io.c b/drivers/staging/lustre/lustre/obdclass/cl_io.c
index 583fb5f33..e72f1fc00 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_io.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_io.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_lock.c b/drivers/staging/lustre/lustre/obdclass/cl_lock.c
index 26a576b63..9d7b5939b 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_lock.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_lock.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_object.c b/drivers/staging/lustre/lustre/obdclass/cl_object.c
index 5940f3031..91a5806d0 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_object.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -577,7 +573,7 @@ static inline struct cl_env *cl_env_fetch(void)
{
struct cl_env *cle;
- cle = cfs_hash_lookup(cl_env_hash, (void *) (long) current->pid);
+ cle = cfs_hash_lookup(cl_env_hash, (void *)(long)current->pid);
LASSERT(ergo(cle, cle->ce_magic == &cl_env_init0));
return cle;
}
@@ -588,7 +584,7 @@ static inline void cl_env_attach(struct cl_env *cle)
int rc;
LASSERT(!cle->ce_owner);
- cle->ce_owner = (void *) (long) current->pid;
+ cle->ce_owner = (void *)(long)current->pid;
rc = cfs_hash_add_unique(cl_env_hash, cle->ce_owner,
&cle->ce_node);
LASSERT(rc == 0);
@@ -599,7 +595,7 @@ static inline void cl_env_do_detach(struct cl_env *cle)
{
void *cookie;
- LASSERT(cle->ce_owner == (void *) (long) current->pid);
+ LASSERT(cle->ce_owner == (void *)(long)current->pid);
cookie = cfs_hash_del(cl_env_hash, cle->ce_owner,
&cle->ce_node);
LASSERT(cookie == cle);
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_page.c b/drivers/staging/lustre/lustre/obdclass/cl_page.c
index b754f516e..db2dc6b39 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_page.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_page.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -1076,3 +1072,49 @@ void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
slice->cpl_page = page;
}
EXPORT_SYMBOL(cl_page_slice_add);
+
+/**
+ * Allocate and initialize cl_cache, called by ll_init_sbi().
+ */
+struct cl_client_cache *cl_cache_init(unsigned long lru_page_max)
+{
+ struct cl_client_cache *cache = NULL;
+
+ cache = kzalloc(sizeof(*cache), GFP_KERNEL);
+ if (!cache)
+ return NULL;
+
+ /* Initialize cache data */
+ atomic_set(&cache->ccc_users, 1);
+ cache->ccc_lru_max = lru_page_max;
+ atomic_set(&cache->ccc_lru_left, lru_page_max);
+ spin_lock_init(&cache->ccc_lru_lock);
+ INIT_LIST_HEAD(&cache->ccc_lru);
+
+ atomic_set(&cache->ccc_unstable_nr, 0);
+ init_waitqueue_head(&cache->ccc_unstable_waitq);
+
+ return cache;
+}
+EXPORT_SYMBOL(cl_cache_init);
+
+/**
+ * Increase cl_cache refcount
+ */
+void cl_cache_incref(struct cl_client_cache *cache)
+{
+ atomic_inc(&cache->ccc_users);
+}
+EXPORT_SYMBOL(cl_cache_incref);
+
+/**
+ * Decrease cl_cache refcount and free the cache if refcount=0.
+ * Since llite, lov and osc all hold cl_cache refcount,
+ * the free will not cause race. (LU-6173)
+ */
+void cl_cache_decref(struct cl_client_cache *cache)
+{
+ if (atomic_dec_and_test(&cache->ccc_users))
+ kfree(cache);
+}
+EXPORT_SYMBOL(cl_cache_decref);
diff --git a/drivers/staging/lustre/lustre/obdclass/class_obd.c b/drivers/staging/lustre/lustre/obdclass/class_obd.c
index f48816af8..d9d2a1952 100644
--- a/drivers/staging/lustre/lustre/obdclass/class_obd.c
+++ b/drivers/staging/lustre/lustre/obdclass/class_obd.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/debug.c b/drivers/staging/lustre/lustre/obdclass/debug.c
index e4edfb2c0..8acf67239 100644
--- a/drivers/staging/lustre/lustre/obdclass/debug.c
+++ b/drivers/staging/lustre/lustre/obdclass/debug.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/genops.c b/drivers/staging/lustre/lustre/obdclass/genops.c
index d95f11d62..99c2da632 100644
--- a/drivers/staging/lustre/lustre/obdclass/genops.c
+++ b/drivers/staging/lustre/lustre/obdclass/genops.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/kernelcomm.c b/drivers/staging/lustre/lustre/obdclass/kernelcomm.c
index 8405eccda..a0f65c470 100644
--- a/drivers/staging/lustre/lustre/obdclass/kernelcomm.c
+++ b/drivers/staging/lustre/lustre/obdclass/kernelcomm.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
index 2cd452246..33342bfcc 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
index b41b65e2f..c6cc6a766 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
index e6bf414a4..8f70dd268 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/llog.c b/drivers/staging/lustre/lustre/obdclass/llog.c
index 79194d8cb..1784ca063 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -123,8 +119,10 @@ static int llog_read_header(const struct lu_env *env,
handle->lgh_last_idx = 0; /* header is record with index 0 */
llh->llh_count = 1; /* for the header record */
llh->llh_hdr.lrh_type = LLOG_HDR_MAGIC;
- llh->llh_hdr.lrh_len = llh->llh_tail.lrt_len = LLOG_CHUNK_SIZE;
- llh->llh_hdr.lrh_index = llh->llh_tail.lrt_index = 0;
+ llh->llh_hdr.lrh_len = LLOG_CHUNK_SIZE;
+ llh->llh_tail.lrt_len = LLOG_CHUNK_SIZE;
+ llh->llh_hdr.lrh_index = 0;
+ llh->llh_tail.lrt_index = 0;
llh->llh_timestamp = ktime_get_real_seconds();
if (uuid)
memcpy(&llh->llh_tgtuuid, uuid,
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_cat.c b/drivers/staging/lustre/lustre/obdclass/llog_cat.c
index c27d4ec1d..a82a29502 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_cat.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_cat.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_internal.h b/drivers/staging/lustre/lustre/obdclass/llog_internal.h
index 7fb48dda3..f7949525d 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_internal.h
+++ b/drivers/staging/lustre/lustre/obdclass/llog_internal.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_obd.c b/drivers/staging/lustre/lustre/obdclass/llog_obd.c
index 826623f52..6ace7e097 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_obd.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_obd.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_swab.c b/drivers/staging/lustre/lustre/obdclass/llog_swab.c
index 967ba2e1b..f7b9b1903 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_swab.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_swab.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
index 5a1eae1de..279b625f1 100644
--- a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
+++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c
index e04385760..9b03059f3 100644
--- a/drivers/staging/lustre/lustre/obdclass/lu_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_ref.c b/drivers/staging/lustre/lustre/obdclass/lu_ref.c
index 993697b66..e9f6040d1 100644
--- a/drivers/staging/lustre/lustre/obdclass/lu_ref.c
+++ b/drivers/staging/lustre/lustre/obdclass/lu_ref.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/lustre_handles.c b/drivers/staging/lustre/lustre/obdclass/lustre_handles.c
index 403ceea06..082f530c5 100644
--- a/drivers/staging/lustre/lustre/obdclass/lustre_handles.c
+++ b/drivers/staging/lustre/lustre/obdclass/lustre_handles.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/lustre_peer.c b/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
index b1abe023b..5974a9bf7 100644
--- a/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
+++ b/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_config.c b/drivers/staging/lustre/lustre/obdclass/obd_config.c
index cb1d65c3d..0eab12365 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_config.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_config.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -1021,8 +1017,8 @@ int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars,
/* Search proc entries */
while (lvars[j].name) {
var = &lvars[j];
- if (!class_match_param(key, var->name, NULL)
- && keylen == strlen(var->name)) {
+ if (!class_match_param(key, var->name, NULL) &&
+ keylen == strlen(var->name)) {
matched++;
rc = -EROFS;
if (var->fops && var->fops->write) {
@@ -1077,7 +1073,7 @@ int class_config_llog_handler(const struct lu_env *env,
{
struct config_llog_instance *clli = data;
int cfg_len = rec->lrh_len;
- char *cfg_buf = (char *) (rec + 1);
+ char *cfg_buf = (char *)(rec + 1);
int rc = 0;
switch (rec->lrh_type) {
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_mount.c b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
index e0c90adc7..aa84a50e9 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_mount.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -192,7 +188,7 @@ static int lustre_start_simple(char *obdname, char *type, char *uuid,
return rc;
}
-DEFINE_MUTEX(mgc_start_lock);
+static DEFINE_MUTEX(mgc_start_lock);
/** Set up a mgc obd to process startup logs
*
diff --git a/drivers/staging/lustre/lustre/obdclass/obdo.c b/drivers/staging/lustre/lustre/obdclass/obdo.c
index 748e33f01..8583a4a8c 100644
--- a/drivers/staging/lustre/lustre/obdclass/obdo.c
+++ b/drivers/staging/lustre/lustre/obdclass/obdo.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/statfs_pack.c b/drivers/staging/lustre/lustre/obdclass/statfs_pack.c
index fb4e3ae84..4bad1fa27 100644
--- a/drivers/staging/lustre/lustre/obdclass/statfs_pack.c
+++ b/drivers/staging/lustre/lustre/obdclass/statfs_pack.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdclass/uuid.c b/drivers/staging/lustre/lustre/obdclass/uuid.c
index b0b0157a6..abd9b1ae7 100644
--- a/drivers/staging/lustre/lustre/obdclass/uuid.c
+++ b/drivers/staging/lustre/lustre/obdclass/uuid.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
index 91ef06f17..5b29c4a44 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/osc/lproc_osc.c b/drivers/staging/lustre/lustre/osc/lproc_osc.c
index 33a113213..7e83d395b 100644
--- a/drivers/staging/lustre/lustre/osc/lproc_osc.c
+++ b/drivers/staging/lustre/lustre/osc/lproc_osc.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
index 5a14bea96..d01113580 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
+++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -127,9 +123,9 @@ static const char *oes_strings[] = {
/* ----- part 4 ----- */ \
## __VA_ARGS__); \
if (lvl == D_ERROR && __ext->oe_dlmlock) \
- LDLM_ERROR(__ext->oe_dlmlock, "extent: %p\n", __ext); \
+ LDLM_ERROR(__ext->oe_dlmlock, "extent: %p", __ext); \
else \
- LDLM_DEBUG(__ext->oe_dlmlock, "extent: %p\n", __ext); \
+ LDLM_DEBUG(__ext->oe_dlmlock, "extent: %p", __ext); \
} while (0)
#undef EASSERTF
@@ -1868,7 +1864,8 @@ void osc_dec_unstable_pages(struct ptlrpc_request *req)
LASSERT(page_count >= 0);
for (i = 0; i < page_count; i++)
- dec_zone_page_state(desc->bd_iov[i].kiov_page, NR_UNSTABLE_NFS);
+ dec_node_page_state(desc->bd_iov[i].kiov_page,
+ NR_UNSTABLE_NFS);
atomic_sub(page_count, &cli->cl_cache->ccc_unstable_nr);
LASSERT(atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0);
@@ -1902,7 +1899,8 @@ void osc_inc_unstable_pages(struct ptlrpc_request *req)
LASSERT(page_count >= 0);
for (i = 0; i < page_count; i++)
- inc_zone_page_state(desc->bd_iov[i].kiov_page, NR_UNSTABLE_NFS);
+ inc_node_page_state(desc->bd_iov[i].kiov_page,
+ NR_UNSTABLE_NFS);
LASSERT(atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0);
atomic_add(page_count, &cli->cl_cache->ccc_unstable_nr);
@@ -2371,7 +2369,7 @@ int osc_prep_async_page(struct osc_object *osc, struct osc_page *ops,
oap->oap_obj_off = offset;
LASSERT(!(offset & ~PAGE_MASK));
- if (!client_is_remote(exp) && capable(CFS_CAP_SYS_RESOURCE))
+ if (capable(CFS_CAP_SYS_RESOURCE))
oap->oap_brw_flags = OBD_BRW_NOQUOTA;
INIT_LIST_HEAD(&oap->oap_pending_item);
@@ -2410,8 +2408,7 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
/* Set the OBD_BRW_SRVLOCK before the page is queued. */
brw_flags |= ops->ops_srvlock ? OBD_BRW_SRVLOCK : 0;
- if (!client_is_remote(osc_export(osc)) &&
- capable(CFS_CAP_SYS_RESOURCE)) {
+ if (capable(CFS_CAP_SYS_RESOURCE)) {
brw_flags |= OBD_BRW_NOQUOTA;
cmd |= OBD_BRW_NOQUOTA;
}
@@ -2773,7 +2770,8 @@ int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj,
ext->oe_sync = 1;
ext->oe_urgent = 1;
ext->oe_start = start;
- ext->oe_end = ext->oe_max_end = end;
+ ext->oe_end = end;
+ ext->oe_max_end = end;
ext->oe_obj = obj;
ext->oe_srvlock = !!(brw_flags & OBD_BRW_SRVLOCK);
ext->oe_nr_pages = page_count;
@@ -3308,7 +3306,8 @@ int osc_lock_discard_pages(const struct lu_env *env, struct osc_object *osc,
goto out;
cb = mode == CLM_READ ? check_and_discard_cb : discard_cb;
- info->oti_fn_index = info->oti_next_index = start;
+ info->oti_fn_index = start;
+ info->oti_next_index = start;
do {
res = osc_page_gang_lookup(env, io, osc,
info->oti_next_index, end, cb, osc);
diff --git a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
index ae19d396b..c8c3f1ca7 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
+++ b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -66,7 +62,7 @@ struct osc_io {
/** super class */
struct cl_io_slice oi_cl;
/** true if this io is lockless. */
- int oi_lockless;
+ unsigned int oi_lockless;
/** how many LRU pages are reserved for this IO */
int oi_lru_reserved;
@@ -356,11 +352,6 @@ struct osc_page {
*/
unsigned ops_transfer_pinned:1,
/**
- * True for a `temporary page' created by read-ahead code, probably
- * outside of any DLM lock.
- */
- ops_temp:1,
- /**
* in LRU?
*/
ops_in_lru:1,
diff --git a/drivers/staging/lustre/lustre/osc/osc_dev.c b/drivers/staging/lustre/lustre/osc/osc_dev.c
index d4fe507f1..83d30c135 100644
--- a/drivers/staging/lustre/lustre/osc/osc_dev.c
+++ b/drivers/staging/lustre/lustre/osc/osc_dev.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/osc/osc_internal.h b/drivers/staging/lustre/lustre/osc/osc_internal.h
index 7fad82781..7a27f0961 100644
--- a/drivers/staging/lustre/lustre/osc/osc_internal.h
+++ b/drivers/staging/lustre/lustre/osc/osc_internal.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/osc/osc_io.c b/drivers/staging/lustre/lustre/osc/osc_io.c
index d534b0e0e..6e3dcd389 100644
--- a/drivers/staging/lustre/lustre/osc/osc_io.c
+++ b/drivers/staging/lustre/lustre/osc/osc_io.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -221,7 +217,8 @@ static void osc_page_touch_at(const struct lu_env *env,
kms > loi->loi_kms ? "" : "not ", loi->loi_kms, kms,
loi->loi_lvb.lvb_size);
- attr->cat_mtime = attr->cat_ctime = LTIME_S(CURRENT_TIME);
+ attr->cat_ctime = LTIME_S(CURRENT_TIME);
+ attr->cat_mtime = attr->cat_ctime;
valid = CAT_MTIME | CAT_CTIME;
if (kms > loi->loi_kms) {
attr->cat_kms = kms;
@@ -458,7 +455,8 @@ static int osc_io_setattr_start(const struct lu_env *env,
unsigned int cl_valid = 0;
if (ia_valid & ATTR_SIZE) {
- attr->cat_size = attr->cat_kms = size;
+ attr->cat_size = size;
+ attr->cat_kms = size;
cl_valid = CAT_SIZE | CAT_KMS;
}
if (ia_valid & ATTR_MTIME_SET) {
@@ -526,7 +524,8 @@ static void osc_io_setattr_end(const struct lu_env *env,
if (cbargs->opc_rpc_sent) {
wait_for_completion(&cbargs->opc_sync);
- result = io->ci_result = cbargs->opc_rc;
+ result = cbargs->opc_rc;
+ io->ci_result = cbargs->opc_rc;
}
if (result == 0) {
if (oio->oi_lockless) {
@@ -575,7 +574,8 @@ static int osc_io_write_start(const struct lu_env *env,
OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_SETTIME, 1);
cl_object_attr_lock(obj);
- attr->cat_mtime = attr->cat_ctime = ktime_get_real_seconds();
+ attr->cat_ctime = ktime_get_real_seconds();
+ attr->cat_mtime = attr->cat_ctime;
rc = cl_object_attr_set(env, obj, attr, CAT_MTIME | CAT_CTIME);
cl_object_attr_unlock(obj);
diff --git a/drivers/staging/lustre/lustre/osc/osc_lock.c b/drivers/staging/lustre/lustre/osc/osc_lock.c
index 16f9cd9d3..717d3ffb6 100644
--- a/drivers/staging/lustre/lustre/osc/osc_lock.c
+++ b/drivers/staging/lustre/lustre/osc/osc_lock.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -638,11 +634,10 @@ static int weigh_cb(const struct lu_env *env, struct cl_io *io,
if (cl_page_is_vmlocked(env, page) ||
PageDirty(page->cp_vmpage) || PageWriteback(page->cp_vmpage)
- ) {
- (*(unsigned long *)cbdata)++;
+ )
return CLP_GANG_ABORT;
- }
+ *(pgoff_t *)cbdata = osc_index(ops) + 1;
return CLP_GANG_OKAY;
}
@@ -652,7 +647,7 @@ static unsigned long osc_lock_weight(const struct lu_env *env,
{
struct cl_io *io = &osc_env_info(env)->oti_io;
struct cl_object *obj = cl_object_top(&oscobj->oo_cl);
- unsigned long npages = 0;
+ pgoff_t page_index;
int result;
io->ci_obj = obj;
@@ -661,11 +656,12 @@ static unsigned long osc_lock_weight(const struct lu_env *env,
if (result != 0)
return result;
+ page_index = cl_index(obj, extent->start);
do {
result = osc_page_gang_lookup(env, io, oscobj,
- cl_index(obj, extent->start),
+ page_index,
cl_index(obj, extent->end),
- weigh_cb, (void *)&npages);
+ weigh_cb, (void *)&page_index);
if (result == CLP_GANG_ABORT)
break;
if (result == CLP_GANG_RESCHED)
@@ -673,7 +669,7 @@ static unsigned long osc_lock_weight(const struct lu_env *env,
} while (result != CLP_GANG_OKAY);
cl_io_fini(env, io);
- return npages;
+ return result == CLP_GANG_ABORT ? 1 : 0;
}
/**
@@ -703,7 +699,7 @@ unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
LASSERT(dlmlock->l_resource->lr_type == LDLM_EXTENT);
obj = dlmlock->l_ast_data;
- if (obj) {
+ if (!obj) {
weight = 1;
goto out;
}
@@ -1120,7 +1116,8 @@ static void osc_lock_set_writer(const struct lu_env *env,
}
} else {
LASSERT(cl_io_is_mkwrite(io));
- io_start = io_end = io->u.ci_fault.ft_index;
+ io_start = io->u.ci_fault.ft_index;
+ io_end = io->u.ci_fault.ft_index;
}
if (descr->cld_mode >= CLM_WRITE &&
@@ -1171,7 +1168,7 @@ int osc_lock_init(const struct lu_env *env,
osc_lock_set_writer(env, io, obj, oscl);
- LDLM_DEBUG_NOLOCK("lock %p, osc lock %p, flags %llx\n",
+ LDLM_DEBUG_NOLOCK("lock %p, osc lock %p, flags %llx",
lock, oscl, oscl->ols_flags);
return 0;
diff --git a/drivers/staging/lustre/lustre/osc/osc_object.c b/drivers/staging/lustre/lustre/osc/osc_object.c
index 738ab10ab..d211d1905 100644
--- a/drivers/staging/lustre/lustre/osc/osc_object.c
+++ b/drivers/staging/lustre/lustre/osc/osc_object.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c
index c29c2eabe..355f496a2 100644
--- a/drivers/staging/lustre/lustre/osc/osc_page.c
+++ b/drivers/staging/lustre/lustre/osc/osc_page.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -52,13 +48,6 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
* @{
*/
-static int osc_page_protected(const struct lu_env *env,
- const struct osc_page *opg,
- enum cl_lock_mode mode, int unref)
-{
- return 1;
-}
-
/*****************************************************************************
*
* Page operations.
@@ -110,8 +99,6 @@ int osc_page_cache_add(const struct lu_env *env,
struct osc_page *opg = cl2osc_page(slice);
int result;
- LINVRNT(osc_page_protected(env, opg, CLM_WRITE, 0));
-
osc_page_transfer_get(opg, "transfer\0cache");
result = osc_queue_async_io(env, io, opg);
if (result != 0)
@@ -214,8 +201,6 @@ static void osc_page_delete(const struct lu_env *env,
struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
int rc;
- LINVRNT(opg->ops_temp || osc_page_protected(env, opg, CLM_READ, 1));
-
CDEBUG(D_TRACE, "%p\n", opg);
osc_page_transfer_put(env, opg);
rc = osc_teardown_async_page(env, obj, opg);
@@ -254,8 +239,6 @@ static void osc_page_clip(const struct lu_env *env,
struct osc_page *opg = cl2osc_page(slice);
struct osc_async_page *oap = &opg->ops_oap;
- LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
-
opg->ops_from = from;
opg->ops_to = to;
spin_lock(&oap->oap_lock);
@@ -269,8 +252,6 @@ static int osc_page_cancel(const struct lu_env *env,
struct osc_page *opg = cl2osc_page(slice);
int rc = 0;
- LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
-
/* Check if the transferring against this page
* is completed, or not even queued.
*/
@@ -320,10 +301,6 @@ int osc_page_init(const struct lu_env *env, struct cl_object *obj,
cl_page_slice_add(page, &opg->ops_cl, obj, index,
&osc_page_ops);
}
- /*
- * Cannot assert osc_page_protected() here as read-ahead
- * creates temporary pages outside of a lock.
- */
/* ops_inflight and ops_lru are the same field, but it doesn't
* hurt to initialize it twice :-)
*/
@@ -380,10 +357,6 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
enum cl_req_type crt, int brw_flags)
{
struct osc_async_page *oap = &opg->ops_oap;
- struct osc_object *obj = oap->oap_obj;
-
- LINVRNT(osc_page_protected(env, opg,
- crt == CRT_WRITE ? CLM_WRITE : CLM_READ, 1));
LASSERTF(oap->oap_magic == OAP_MAGIC, "Bad oap magic: oap %p, magic 0x%x\n",
oap, oap->oap_magic);
@@ -398,8 +371,7 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
if (osc_over_unstable_soft_limit(oap->oap_cli))
oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC;
- if (!client_is_remote(osc_export(obj)) &&
- capable(CFS_CAP_SYS_RESOURCE)) {
+ if (capable(CFS_CAP_SYS_RESOURCE)) {
oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
oap->oap_cmd |= OBD_BRW_NOQUOTA;
}
@@ -440,7 +412,7 @@ static int osc_cache_too_much(struct client_obd *cli)
int pages = atomic_read(&cli->cl_lru_in_list);
unsigned long budget;
- budget = cache->ccc_lru_max / atomic_read(&cache->ccc_users);
+ budget = cache->ccc_lru_max / (atomic_read(&cache->ccc_users) - 2);
/* if it's going to run out LRU slots, we should free some, but not
* too much to maintain fairness among OSCs.
@@ -740,7 +712,7 @@ int osc_lru_reclaim(struct client_obd *cli)
cache->ccc_lru_shrinkers++;
list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
- max_scans = atomic_read(&cache->ccc_users);
+ max_scans = atomic_read(&cache->ccc_users) - 2;
while (--max_scans > 0 && !list_empty(&cache->ccc_lru)) {
cli = list_entry(cache->ccc_lru.next, struct client_obd,
cl_lru_osc);
diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c
index 47417f88f..536b868ff 100644
--- a/drivers/staging/lustre/lustre/osc/osc_request.c
+++ b/drivers/staging/lustre/lustre/osc/osc_request.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -474,7 +470,8 @@ static int osc_real_create(struct obd_export *exp, struct obdo *oa,
DEBUG_REQ(D_HA, req,
"delorphan from OST integration");
/* Don't resend the delorphan req */
- req->rq_no_resend = req->rq_no_delay = 1;
+ req->rq_no_resend = 1;
+ req->rq_no_delay = 1;
}
rc = ptlrpc_queue_wait(req);
@@ -2249,7 +2246,7 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
struct lustre_handle lockh = { 0 };
struct ptlrpc_request *req = NULL;
int intent = *flags & LDLM_FL_HAS_INTENT;
- __u64 match_lvb = agl ? 0 : LDLM_FL_LVB_READY;
+ __u64 match_flags = *flags;
enum ldlm_mode mode;
int rc;
@@ -2284,7 +2281,11 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
mode = einfo->ei_mode;
if (einfo->ei_mode == LCK_PR)
mode |= LCK_PW;
- mode = ldlm_lock_match(obd->obd_namespace, *flags | match_lvb, res_id,
+ if (agl == 0)
+ match_flags |= LDLM_FL_LVB_READY;
+ if (intent != 0)
+ match_flags |= LDLM_FL_BLOCK_GRANTED;
+ mode = ldlm_lock_match(obd->obd_namespace, match_flags, res_id,
einfo->ei_type, policy, mode, &lockh, 0);
if (mode) {
struct ldlm_lock *matched;
@@ -2775,7 +2776,8 @@ static int osc_get_info(const struct lu_env *env, struct obd_export *exp,
tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
memcpy(tmp, key, keylen);
- req->rq_no_delay = req->rq_no_resend = 1;
+ req->rq_no_delay = 1;
+ req->rq_no_resend = 1;
ptlrpc_request_set_replen(req);
rc = ptlrpc_queue_wait(req);
if (rc)
@@ -2915,7 +2917,7 @@ static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
LASSERT(!cli->cl_cache); /* only once */
cli->cl_cache = val;
- atomic_inc(&cli->cl_cache->ccc_users);
+ cl_cache_incref(cli->cl_cache);
cli->cl_lru_left = &cli->cl_cache->ccc_lru_left;
/* add this osc into entity list */
@@ -3295,7 +3297,7 @@ static int osc_cleanup(struct obd_device *obd)
list_del_init(&cli->cl_lru_osc);
spin_unlock(&cli->cl_cache->ccc_lru_lock);
cli->cl_lru_left = NULL;
- atomic_dec(&cli->cl_cache->ccc_users);
+ cl_cache_decref(cli->cl_cache);
cli->cl_cache = NULL;
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c
index 4b7912a2c..d4463d7c8 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/client.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -587,14 +583,19 @@ static void __ptlrpc_free_req_to_pool(struct ptlrpc_request *request)
spin_unlock(&pool->prp_lock);
}
-static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
- __u32 version, int opcode,
- int count, __u32 *lengths, char **bufs,
- struct ptlrpc_cli_ctx *ctx)
+int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
+ __u32 version, int opcode, char **bufs,
+ struct ptlrpc_cli_ctx *ctx)
{
- struct obd_import *imp = request->rq_import;
+ int count;
+ struct obd_import *imp;
+ __u32 *lengths;
int rc;
+ count = req_capsule_filled_sizes(&request->rq_pill, RCL_CLIENT);
+ imp = request->rq_import;
+ lengths = request->rq_pill.rc_area[RCL_CLIENT];
+
if (unlikely(ctx)) {
request->rq_cli_ctx = sptlrpc_cli_ctx_get(ctx);
} else {
@@ -602,20 +603,16 @@ static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
if (rc)
goto out_free;
}
-
sptlrpc_req_set_flavor(request, opcode);
rc = lustre_pack_request(request, imp->imp_msg_magic, count,
lengths, bufs);
- if (rc) {
- LASSERT(!request->rq_pool);
+ if (rc)
goto out_ctx;
- }
lustre_msg_add_version(request->rq_reqmsg, version);
request->rq_send_state = LUSTRE_IMP_FULL;
request->rq_type = PTL_RPC_MSG_REQUEST;
- request->rq_export = NULL;
request->rq_req_cbid.cbid_fn = request_out_callback;
request->rq_req_cbid.cbid_arg = request;
@@ -624,6 +621,8 @@ static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
request->rq_reply_cbid.cbid_arg = request;
request->rq_reply_deadline = 0;
+ request->rq_bulk_deadline = 0;
+ request->rq_req_deadline = 0;
request->rq_phase = RQ_PHASE_NEW;
request->rq_next_phase = RQ_PHASE_UNDEFINED;
@@ -632,40 +631,49 @@ static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
ptlrpc_at_set_req_timeout(request);
- spin_lock_init(&request->rq_lock);
- INIT_LIST_HEAD(&request->rq_list);
- INIT_LIST_HEAD(&request->rq_timed_list);
- INIT_LIST_HEAD(&request->rq_replay_list);
- INIT_LIST_HEAD(&request->rq_ctx_chain);
- INIT_LIST_HEAD(&request->rq_set_chain);
- INIT_LIST_HEAD(&request->rq_history_list);
- INIT_LIST_HEAD(&request->rq_exp_list);
- init_waitqueue_head(&request->rq_reply_waitq);
- init_waitqueue_head(&request->rq_set_waitq);
request->rq_xid = ptlrpc_next_xid();
- atomic_set(&request->rq_refcount, 1);
-
lustre_msg_set_opc(request->rq_reqmsg, opcode);
+ /* Let's setup deadline for req/reply/bulk unlink for opcode. */
+ if (cfs_fail_val == opcode) {
+ time_t *fail_t = NULL, *fail2_t = NULL;
+
+ if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK)) {
+ fail_t = &request->rq_bulk_deadline;
+ } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) {
+ fail_t = &request->rq_reply_deadline;
+ } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REQ_UNLINK)) {
+ fail_t = &request->rq_req_deadline;
+ } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BOTH_UNLINK)) {
+ fail_t = &request->rq_reply_deadline;
+ fail2_t = &request->rq_bulk_deadline;
+ }
+
+ if (fail_t) {
+ *fail_t = ktime_get_real_seconds() + LONG_UNLINK;
+
+ if (fail2_t)
+ *fail2_t = ktime_get_real_seconds() +
+ LONG_UNLINK;
+
+ /* The RPC is infected, let the test change the
+ * fail_loc
+ */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(2));
+ set_current_state(TASK_RUNNING);
+ }
+ }
+
return 0;
+
out_ctx:
+ LASSERT(!request->rq_pool);
sptlrpc_cli_ctx_put(request->rq_cli_ctx, 1);
out_free:
class_import_put(imp);
return rc;
}
-
-int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
- __u32 version, int opcode, char **bufs,
- struct ptlrpc_cli_ctx *ctx)
-{
- int count;
-
- count = req_capsule_filled_sizes(&request->rq_pill, RCL_CLIENT);
- return __ptlrpc_request_bufs_pack(request, version, opcode, count,
- request->rq_pill.rc_area[RCL_CLIENT],
- bufs, ctx);
-}
EXPORT_SYMBOL(ptlrpc_request_bufs_pack);
/**
@@ -722,7 +730,9 @@ struct ptlrpc_request *__ptlrpc_request_alloc(struct obd_import *imp,
request = ptlrpc_prep_req_from_pool(pool);
if (request) {
- LASSERTF((unsigned long)imp > 0x1000, "%p\n", imp);
+ ptlrpc_cli_req_init(request);
+
+ LASSERTF((unsigned long)imp > 0x1000, "%p", imp);
LASSERT(imp != LP_POISON);
LASSERTF((unsigned long)imp->imp_client > 0x1000, "%p\n",
imp->imp_client);
@@ -1163,9 +1173,9 @@ static int after_reply(struct ptlrpc_request *req)
LASSERT(obd);
/* repbuf must be unlinked */
- LASSERT(!req->rq_receiving_reply && !req->rq_reply_unlink);
+ LASSERT(!req->rq_receiving_reply && req->rq_reply_unlinked);
- if (req->rq_reply_truncate) {
+ if (req->rq_reply_truncated) {
if (ptlrpc_no_resend(req)) {
DEBUG_REQ(D_ERROR, req, "reply buffer overflow, expected: %d, actual size: %d",
req->rq_nob_received, req->rq_repbuf_len);
@@ -1239,8 +1249,9 @@ static int after_reply(struct ptlrpc_request *req)
}
ktime_get_real_ts64(&work_start);
- timediff = (work_start.tv_sec - req->rq_arrival_time.tv_sec) * USEC_PER_SEC +
- (work_start.tv_nsec - req->rq_arrival_time.tv_nsec) / NSEC_PER_USEC;
+ timediff = (work_start.tv_sec - req->rq_sent_tv.tv_sec) * USEC_PER_SEC +
+ (work_start.tv_nsec - req->rq_sent_tv.tv_nsec) /
+ NSEC_PER_USEC;
if (obd->obd_svc_stats) {
lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR,
timediff);
@@ -1503,16 +1514,28 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
if (!(req->rq_phase == RQ_PHASE_RPC ||
req->rq_phase == RQ_PHASE_BULK ||
req->rq_phase == RQ_PHASE_INTERPRET ||
- req->rq_phase == RQ_PHASE_UNREGISTERING ||
+ req->rq_phase == RQ_PHASE_UNREG_RPC ||
+ req->rq_phase == RQ_PHASE_UNREG_BULK ||
req->rq_phase == RQ_PHASE_COMPLETE)) {
DEBUG_REQ(D_ERROR, req, "bad phase %x", req->rq_phase);
LBUG();
}
- if (req->rq_phase == RQ_PHASE_UNREGISTERING) {
+ if (req->rq_phase == RQ_PHASE_UNREG_RPC ||
+ req->rq_phase == RQ_PHASE_UNREG_BULK) {
LASSERT(req->rq_next_phase != req->rq_phase);
LASSERT(req->rq_next_phase != RQ_PHASE_UNDEFINED);
+ if (req->rq_req_deadline &&
+ !OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REQ_UNLINK))
+ req->rq_req_deadline = 0;
+ if (req->rq_reply_deadline &&
+ !OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK))
+ req->rq_reply_deadline = 0;
+ if (req->rq_bulk_deadline &&
+ !OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK))
+ req->rq_bulk_deadline = 0;
+
/*
* Skip processing until reply is unlinked. We
* can't return to pool before that and we can't
@@ -1520,7 +1543,10 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
* sure that all rdma transfers finished and will
* not corrupt any data.
*/
- if (ptlrpc_client_recv_or_unlink(req) ||
+ if (req->rq_phase == RQ_PHASE_UNREG_RPC &&
+ ptlrpc_client_recv_or_unlink(req))
+ continue;
+ if (req->rq_phase == RQ_PHASE_UNREG_BULK &&
ptlrpc_client_bulk_active(req))
continue;
@@ -1998,7 +2024,7 @@ void ptlrpc_interrupted_set(void *data)
list_entry(tmp, struct ptlrpc_request, rq_set_chain);
if (req->rq_phase != RQ_PHASE_RPC &&
- req->rq_phase != RQ_PHASE_UNREGISTERING)
+ req->rq_phase != RQ_PHASE_UNREG_RPC)
continue;
ptlrpc_mark_interrupted(req);
@@ -2195,11 +2221,11 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
{
if (!request)
return;
+ LASSERT(!request->rq_srv_req);
+ LASSERT(!request->rq_export);
LASSERTF(!request->rq_receiving_reply, "req %p\n", request);
- LASSERTF(!request->rq_rqbd, "req %p\n", request);/* client-side */
LASSERTF(list_empty(&request->rq_list), "req %p\n", request);
LASSERTF(list_empty(&request->rq_set_chain), "req %p\n", request);
- LASSERTF(list_empty(&request->rq_exp_list), "req %p\n", request);
LASSERTF(!request->rq_replay, "req %p\n", request);
req_capsule_fini(&request->rq_pill);
@@ -2225,10 +2251,7 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
if (request->rq_repbuf)
sptlrpc_cli_free_repbuf(request);
- if (request->rq_export) {
- class_export_put(request->rq_export);
- request->rq_export = NULL;
- }
+
if (request->rq_import) {
class_import_put(request->rq_import);
request->rq_import = NULL;
@@ -2313,8 +2336,9 @@ int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
/* Let's setup deadline for reply unlink. */
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
- async && request->rq_reply_deadline == 0)
- request->rq_reply_deadline = ktime_get_real_seconds()+LONG_UNLINK;
+ async && request->rq_reply_deadline == 0 && cfs_fail_val == 0)
+ request->rq_reply_deadline =
+ ktime_get_real_seconds() + LONG_UNLINK;
/* Nothing left to do. */
if (!ptlrpc_client_recv_or_unlink(request))
@@ -2327,7 +2351,7 @@ int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
return 1;
/* Move to "Unregistering" phase as reply was not unlinked yet. */
- ptlrpc_rqphase_move(request, RQ_PHASE_UNREGISTERING);
+ ptlrpc_rqphase_move(request, RQ_PHASE_UNREG_RPC);
/* Do not wait for unlink to finish. */
if (async)
@@ -2359,9 +2383,10 @@ int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
LASSERT(rc == -ETIMEDOUT);
DEBUG_REQ(D_WARNING, request,
- "Unexpectedly long timeout rvcng=%d unlnk=%d/%d",
+ "Unexpectedly long timeout receiving_reply=%d req_ulinked=%d reply_unlinked=%d",
request->rq_receiving_reply,
- request->rq_req_unlink, request->rq_reply_unlink);
+ request->rq_req_unlinked,
+ request->rq_reply_unlinked);
}
return 0;
}
@@ -2618,11 +2643,6 @@ int ptlrpc_queue_wait(struct ptlrpc_request *req)
}
EXPORT_SYMBOL(ptlrpc_queue_wait);
-struct ptlrpc_replay_async_args {
- int praa_old_state;
- int praa_old_status;
-};
-
/**
* Callback used for replayed requests reply processing.
* In case of successful reply calls registered request replay callback.
@@ -2961,7 +2981,6 @@ static void ptlrpcd_add_work_req(struct ptlrpc_request *req)
req->rq_timeout = obd_timeout;
req->rq_sent = ktime_get_real_seconds();
req->rq_deadline = req->rq_sent + req->rq_timeout;
- req->rq_reply_deadline = req->rq_deadline;
req->rq_phase = RQ_PHASE_INTERPRET;
req->rq_next_phase = RQ_PHASE_COMPLETE;
req->rq_xid = ptlrpc_next_xid();
@@ -3017,27 +3036,17 @@ void *ptlrpcd_alloc_work(struct obd_import *imp,
return ERR_PTR(-ENOMEM);
}
+ ptlrpc_cli_req_init(req);
+
req->rq_send_state = LUSTRE_IMP_FULL;
req->rq_type = PTL_RPC_MSG_REQUEST;
req->rq_import = class_import_get(imp);
- req->rq_export = NULL;
req->rq_interpret_reply = work_interpreter;
/* don't want reply */
- req->rq_receiving_reply = 0;
- req->rq_req_unlink = req->rq_reply_unlink = 0;
- req->rq_no_delay = req->rq_no_resend = 1;
+ req->rq_no_delay = 1;
+ req->rq_no_resend = 1;
req->rq_pill.rc_fmt = (void *)&worker_format;
- spin_lock_init(&req->rq_lock);
- INIT_LIST_HEAD(&req->rq_list);
- INIT_LIST_HEAD(&req->rq_replay_list);
- INIT_LIST_HEAD(&req->rq_set_chain);
- INIT_LIST_HEAD(&req->rq_history_list);
- INIT_LIST_HEAD(&req->rq_exp_list);
- init_waitqueue_head(&req->rq_reply_waitq);
- init_waitqueue_head(&req->rq_set_waitq);
- atomic_set(&req->rq_refcount, 1);
-
CLASSERT(sizeof(*args) <= sizeof(req->rq_async_args));
args = ptlrpc_req_async_args(req);
args->cb = cb;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/connection.c b/drivers/staging/lustre/lustre/ptlrpc/connection.c
index a14daff3f..177a379da 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/connection.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/connection.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/ptlrpc/events.c b/drivers/staging/lustre/lustre/ptlrpc/events.c
index fdcde9bbd..b1ce72511 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/events.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/events.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -55,27 +51,33 @@ void request_out_callback(lnet_event_t *ev)
{
struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
struct ptlrpc_request *req = cbid->cbid_arg;
+ bool wakeup = false;
- LASSERT(ev->type == LNET_EVENT_SEND ||
- ev->type == LNET_EVENT_UNLINK);
+ LASSERT(ev->type == LNET_EVENT_SEND || ev->type == LNET_EVENT_UNLINK);
LASSERT(ev->unlinked);
DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
sptlrpc_request_out_callback(req);
+
spin_lock(&req->rq_lock);
req->rq_real_sent = ktime_get_real_seconds();
- if (ev->unlinked)
- req->rq_req_unlink = 0;
+ req->rq_req_unlinked = 1;
+ /* reply_in_callback happened before request_out_callback? */
+ if (req->rq_reply_unlinked)
+ wakeup = true;
if (ev->type == LNET_EVENT_UNLINK || ev->status != 0) {
/* Failed send: make it seem like the reply timed out, just
* like failing sends in client.c does currently...
*/
-
req->rq_net_err = 1;
- ptlrpc_client_wake_req(req);
+ wakeup = true;
}
+
+ if (wakeup)
+ ptlrpc_client_wake_req(req);
+
spin_unlock(&req->rq_lock);
ptlrpc_req_finished(req);
@@ -104,7 +106,7 @@ void reply_in_callback(lnet_event_t *ev)
req->rq_receiving_reply = 0;
req->rq_early = 0;
if (ev->unlinked)
- req->rq_reply_unlink = 0;
+ req->rq_reply_unlinked = 1;
if (ev->status)
goto out_wake;
@@ -118,7 +120,7 @@ void reply_in_callback(lnet_event_t *ev)
if (ev->mlength < ev->rlength) {
CDEBUG(D_RPCTRACE, "truncate req %p rpc %d - %d+%d\n", req,
req->rq_replen, ev->rlength, ev->offset);
- req->rq_reply_truncate = 1;
+ req->rq_reply_truncated = 1;
req->rq_replied = 1;
req->rq_status = -EOVERFLOW;
req->rq_nob_received = ev->rlength + ev->offset;
@@ -135,7 +137,8 @@ void reply_in_callback(lnet_event_t *ev)
req->rq_early_count++; /* number received, client side */
- if (req->rq_replied) /* already got the real reply */
+ /* already got the real reply or buffers are already unlinked */
+ if (req->rq_replied || req->rq_reply_unlinked == 1)
goto out_wake;
req->rq_early = 1;
@@ -328,6 +331,7 @@ void request_in_callback(lnet_event_t *ev)
}
}
+ ptlrpc_srv_req_init(req);
/* NB we ABSOLUTELY RELY on req being zeroed, so pointers are NULL,
* flags are reset and scalars are zero. We only set the message
* size to non-zero if this was a successful receive.
@@ -341,10 +345,6 @@ void request_in_callback(lnet_event_t *ev)
req->rq_self = ev->target.nid;
req->rq_rqbd = rqbd;
req->rq_phase = RQ_PHASE_NEW;
- spin_lock_init(&req->rq_lock);
- INIT_LIST_HEAD(&req->rq_timed_list);
- INIT_LIST_HEAD(&req->rq_exp_list);
- atomic_set(&req->rq_refcount, 1);
if (ev->type == LNET_EVENT_PUT)
CDEBUG(D_INFO, "incoming req@%p x%llu msgsize %u\n",
req, req->rq_xid, ev->mlength);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/import.c b/drivers/staging/lustre/lustre/ptlrpc/import.c
index a4f7544f4..3292e6ea0 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/import.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/import.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -360,9 +356,8 @@ void ptlrpc_invalidate_import(struct obd_import *imp)
"still on delayed list");
}
- CERROR("%s: RPCs in \"%s\" phase found (%d). Network is sluggish? Waiting them to error out.\n",
+ CERROR("%s: Unregistering RPCs found (%d). Network is sluggish? Waiting them to error out.\n",
cli_tgt,
- ptlrpc_phase2str(RQ_PHASE_UNREGISTERING),
atomic_read(&imp->
imp_unregistering));
}
@@ -698,7 +693,8 @@ int ptlrpc_connect_import(struct obd_import *imp)
lustre_msg_add_op_flags(request->rq_reqmsg, MSG_CONNECT_NEXT_VER);
- request->rq_no_resend = request->rq_no_delay = 1;
+ request->rq_no_resend = 1;
+ request->rq_no_delay = 1;
request->rq_send_state = LUSTRE_IMP_CONNECTING;
/* Allow a slightly larger reply for future growth compatibility */
req_capsule_set_size(&request->rq_pill, &RMF_CONNECT_DATA, RCL_SERVER,
diff --git a/drivers/staging/lustre/lustre/ptlrpc/layout.c b/drivers/staging/lustre/lustre/ptlrpc/layout.c
index c0ecd1625..ab5d85174 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/layout.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/layout.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -198,7 +194,7 @@ static const struct req_msg_field *mds_reint_create_slave_client[] = {
&RMF_DLM_REQ
};
-static const struct req_msg_field *mds_reint_create_rmt_acl_client[] = {
+static const struct req_msg_field *mds_reint_create_acl_client[] = {
&RMF_PTLRPC_BODY,
&RMF_REC_REINT,
&RMF_CAPA1,
@@ -679,7 +675,7 @@ static struct req_format *req_formats[] = {
&RQF_MDS_DONE_WRITING,
&RQF_MDS_REINT,
&RQF_MDS_REINT_CREATE,
- &RQF_MDS_REINT_CREATE_RMT_ACL,
+ &RQF_MDS_REINT_CREATE_ACL,
&RQF_MDS_REINT_CREATE_SLAVE,
&RQF_MDS_REINT_CREATE_SYM,
&RQF_MDS_REINT_OPEN,
@@ -1242,10 +1238,10 @@ struct req_format RQF_MDS_REINT_CREATE =
mds_reint_create_client, mdt_body_capa);
EXPORT_SYMBOL(RQF_MDS_REINT_CREATE);
-struct req_format RQF_MDS_REINT_CREATE_RMT_ACL =
- DEFINE_REQ_FMT0("MDS_REINT_CREATE_RMT_ACL",
- mds_reint_create_rmt_acl_client, mdt_body_capa);
-EXPORT_SYMBOL(RQF_MDS_REINT_CREATE_RMT_ACL);
+struct req_format RQF_MDS_REINT_CREATE_ACL =
+ DEFINE_REQ_FMT0("MDS_REINT_CREATE_ACL",
+ mds_reint_create_acl_client, mdt_body_capa);
+EXPORT_SYMBOL(RQF_MDS_REINT_CREATE_ACL);
struct req_format RQF_MDS_REINT_CREATE_SLAVE =
DEFINE_REQ_FMT0("MDS_REINT_CREATE_EA",
diff --git a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c b/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
index a23ac5f9a..0f55c01fe 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/ptlrpc/llog_net.c b/drivers/staging/lustre/lustre/ptlrpc/llog_net.c
index fbccb6221..bccdace7e 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/llog_net.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/llog_net.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
index 64c0f1e17..bc93b7574 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -872,7 +868,8 @@ ptlrpc_lprocfs_svc_req_history_next(struct seq_file *s,
if (i > srhi->srhi_idx) { /* reset iterator for a new CPT */
srhi->srhi_req = NULL;
- seq = srhi->srhi_seq = 0;
+ seq = 0;
+ srhi->srhi_seq = 0;
} else { /* the next sequence */
seq = srhi->srhi_seq + (1 << svc->srv_cpt_bits);
}
@@ -1161,7 +1158,6 @@ void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes)
lprocfs_counter_add(svc_stats, idx, bytes);
}
-
EXPORT_SYMBOL(ptlrpc_lprocfs_brw);
void ptlrpc_lprocfs_unregister_service(struct ptlrpc_service *svc)
diff --git a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
index 10b8fe82a..11ec82545 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -251,7 +247,7 @@ int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async)
/* Let's setup deadline for reply unlink. */
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
- async && req->rq_bulk_deadline == 0)
+ async && req->rq_bulk_deadline == 0 && cfs_fail_val == 0)
req->rq_bulk_deadline = ktime_get_real_seconds() + LONG_UNLINK;
if (ptlrpc_client_bulk_active(req) == 0) /* completed or */
@@ -270,7 +266,7 @@ int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async)
return 1; /* never registered */
/* Move to "Unregistering" phase as bulk was not unlinked yet. */
- ptlrpc_rqphase_move(req, RQ_PHASE_UNREGISTERING);
+ ptlrpc_rqphase_move(req, RQ_PHASE_UNREG_BULK);
/* Do not wait for unlink to finish. */
if (async)
@@ -581,19 +577,18 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
}
spin_lock(&request->rq_lock);
- /* If the MD attach succeeds, there _will_ be a reply_in callback */
- request->rq_receiving_reply = !noreply;
- request->rq_req_unlink = 1;
/* We are responsible for unlinking the reply buffer */
- request->rq_reply_unlink = !noreply;
+ request->rq_reply_unlinked = noreply;
+ request->rq_receiving_reply = !noreply;
/* Clear any flags that may be present from previous sends. */
+ request->rq_req_unlinked = 0;
request->rq_replied = 0;
request->rq_err = 0;
request->rq_timedout = 0;
request->rq_net_err = 0;
request->rq_resend = 0;
request->rq_restart = 0;
- request->rq_reply_truncate = 0;
+ request->rq_reply_truncated = 0;
spin_unlock(&request->rq_lock);
if (!noreply) {
@@ -608,7 +603,7 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
reply_md.user_ptr = &request->rq_reply_cbid;
reply_md.eq_handle = ptlrpc_eq_h;
- /* We must see the unlink callback to unset rq_reply_unlink,
+ /* We must see the unlink callback to set rq_reply_unlinked,
* so we can't auto-unlink
*/
rc = LNetMDAttach(reply_me_h, reply_md, LNET_RETAIN,
@@ -637,7 +632,7 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_SEND, request->rq_timeout + 5);
- ktime_get_real_ts64(&request->rq_arrival_time);
+ ktime_get_real_ts64(&request->rq_sent_tv);
request->rq_sent = ktime_get_real_seconds();
/* We give the server rq_timeout secs to process the req, and
* add the network latency for our local timeout.
@@ -655,9 +650,10 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
connection,
request->rq_request_portal,
request->rq_xid, 0);
- if (rc == 0)
+ if (likely(rc == 0))
goto out;
+ request->rq_req_unlinked = 1;
ptlrpc_req_finished(request);
if (noreply)
goto out;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/nrs.c b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
index c444f5168..d88faf61e 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/nrs.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
@@ -769,7 +769,7 @@ static int nrs_policy_register(struct ptlrpc_nrs *nrs,
spin_unlock(&nrs->nrs_lock);
if (rc != 0)
- (void) nrs_policy_unregister(nrs, policy->pol_desc->pd_name);
+ (void)nrs_policy_unregister(nrs, policy->pol_desc->pd_name);
return rc;
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
index 811acf6fc..b514f18fa 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -1806,19 +1802,6 @@ void lustre_swab_obd_quotactl(struct obd_quotactl *q)
}
EXPORT_SYMBOL(lustre_swab_obd_quotactl);
-void lustre_swab_mdt_remote_perm(struct mdt_remote_perm *p)
-{
- __swab32s(&p->rp_uid);
- __swab32s(&p->rp_gid);
- __swab32s(&p->rp_fsuid);
- __swab32s(&p->rp_fsuid_h);
- __swab32s(&p->rp_fsgid);
- __swab32s(&p->rp_fsgid_h);
- __swab32s(&p->rp_access_perm);
- __swab32s(&p->rp_padding);
-};
-EXPORT_SYMBOL(lustre_swab_mdt_remote_perm);
-
void lustre_swab_fid2path(struct getinfo_fid2path *gf)
{
lustre_swab_lu_fid(&gf->gf_fid);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pers.c b/drivers/staging/lustre/lustre/ptlrpc/pers.c
index ec3af109a..6c820e944 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pers.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pers.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pinger.c b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
index 8a869315c..c0529d808 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pinger.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -57,7 +53,8 @@ ptlrpc_prep_ping(struct obd_import *imp)
LUSTRE_OBD_VERSION, OBD_PING);
if (req) {
ptlrpc_request_set_replen(req);
- req->rq_no_resend = req->rq_no_delay = 1;
+ req->rq_no_resend = 1;
+ req->rq_no_delay = 1;
}
return req;
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
index 6ca26c98d..a9831fab8 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -292,4 +288,47 @@ static inline void ptlrpc_reqset_put(struct ptlrpc_request_set *set)
if (atomic_dec_and_test(&set->set_refcount))
kfree(set);
}
+
+/** initialise ptlrpc common fields */
+static inline void ptlrpc_req_comm_init(struct ptlrpc_request *req)
+{
+ spin_lock_init(&req->rq_lock);
+ atomic_set(&req->rq_refcount, 1);
+ INIT_LIST_HEAD(&req->rq_list);
+ INIT_LIST_HEAD(&req->rq_replay_list);
+}
+
+/** initialise client side ptlrpc request */
+static inline void ptlrpc_cli_req_init(struct ptlrpc_request *req)
+{
+ struct ptlrpc_cli_req *cr = &req->rq_cli;
+
+ ptlrpc_req_comm_init(req);
+
+ req->rq_receiving_reply = 0;
+ req->rq_req_unlinked = 1;
+ req->rq_reply_unlinked = 1;
+
+ req->rq_receiving_reply = 0;
+ req->rq_req_unlinked = 1;
+ req->rq_reply_unlinked = 1;
+
+ INIT_LIST_HEAD(&cr->cr_set_chain);
+ INIT_LIST_HEAD(&cr->cr_ctx_chain);
+ init_waitqueue_head(&cr->cr_reply_waitq);
+ init_waitqueue_head(&cr->cr_set_waitq);
+}
+
+/** initialise server side ptlrpc request */
+static inline void ptlrpc_srv_req_init(struct ptlrpc_request *req)
+{
+ struct ptlrpc_srv_req *sr = &req->rq_srv;
+
+ ptlrpc_req_comm_init(req);
+ req->rq_srv_req = 1;
+ INIT_LIST_HEAD(&sr->sr_exp_list);
+ INIT_LIST_HEAD(&sr->sr_timed_list);
+ INIT_LIST_HEAD(&sr->sr_hist_list);
+}
+
#endif /* PTLRPC_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c
index a8ec0e9d7..a70d5843f 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
index 76a355a9d..0a374b6c2 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -161,9 +157,9 @@ static int ptlrpcd_users;
void ptlrpcd_wake(struct ptlrpc_request *req)
{
- struct ptlrpc_request_set *rq_set = req->rq_set;
+ struct ptlrpc_request_set *set = req->rq_set;
- wake_up(&rq_set->set_waitq);
+ wake_up(&set->set_waitq);
}
EXPORT_SYMBOL(ptlrpcd_wake);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/recover.c b/drivers/staging/lustre/lustre/ptlrpc/recover.c
index 30d9a164e..718b3a8d6 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/recover.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/recover.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec.c b/drivers/staging/lustre/lustre/ptlrpc/sec.c
index 187fd1d68..dbd819fa6 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -867,11 +863,9 @@ int sptlrpc_import_check_ctx(struct obd_import *imp)
if (!req)
return -ENOMEM;
- spin_lock_init(&req->rq_lock);
+ ptlrpc_cli_req_init(req);
atomic_set(&req->rq_refcount, 10000);
- INIT_LIST_HEAD(&req->rq_ctx_chain);
- init_waitqueue_head(&req->rq_reply_waitq);
- init_waitqueue_head(&req->rq_set_waitq);
+
req->rq_import = imp;
req->rq_flvr = sec->ps_flvr;
req->rq_cli_ctx = ctx;
@@ -1051,6 +1045,8 @@ int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
if (!early_req)
return -ENOMEM;
+ ptlrpc_cli_req_init(early_req);
+
early_size = req->rq_nob_received;
early_bufsz = size_roundup_power2(early_size);
early_buf = libcfs_kvzalloc(early_bufsz, GFP_NOFS);
@@ -1099,12 +1095,11 @@ int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
memcpy(early_buf, req->rq_repbuf, early_size);
spin_unlock(&req->rq_lock);
- spin_lock_init(&early_req->rq_lock);
early_req->rq_cli_ctx = sptlrpc_cli_ctx_get(req->rq_cli_ctx);
early_req->rq_flvr = req->rq_flvr;
early_req->rq_repbuf = early_buf;
early_req->rq_repbuf_len = early_bufsz;
- early_req->rq_repdata = (struct lustre_msg *) early_buf;
+ early_req->rq_repdata = (struct lustre_msg *)early_buf;
early_req->rq_repdata_len = early_size;
early_req->rq_early = 1;
early_req->rq_reqmsg = req->rq_reqmsg;
@@ -1556,7 +1551,7 @@ void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg,
/* move from segment + 1 to end segment */
LASSERT(msg->lm_magic == LUSTRE_MSG_MAGIC_V2);
oldmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
- movesize = oldmsg_size - ((unsigned long) src - (unsigned long) msg);
+ movesize = oldmsg_size - ((unsigned long)src - (unsigned long)msg);
LASSERT(movesize >= 0);
if (movesize)
@@ -2196,6 +2191,9 @@ int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset)
pud = lustre_msg_buf(msg, offset, 0);
+ if (!pud)
+ return -EINVAL;
+
pud->pud_uid = from_kuid(&init_user_ns, current_uid());
pud->pud_gid = from_kgid(&init_user_ns, current_gid());
pud->pud_fsuid = from_kuid(&init_user_ns, current_fsuid());
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
index 02e6cda4c..5f4d79718 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -273,7 +269,7 @@ static unsigned long enc_pools_shrink_scan(struct shrinker *s,
static inline
int npages_to_npools(unsigned long npages)
{
- return (int) ((npages + PAGES_PER_POOL - 1) / PAGES_PER_POOL);
+ return (int)((npages + PAGES_PER_POOL - 1) / PAGES_PER_POOL);
}
/*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_config.c b/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
index a51b18bbf..c14035479 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -648,7 +644,7 @@ static int logname2fsname(const char *logname, char *buf, int buflen)
return -EINVAL;
}
- len = min((int) (ptr - logname), buflen - 1);
+ len = min((int)(ptr - logname), buflen - 1);
memcpy(buf, logname, len);
buf[len] = '\0';
@@ -819,7 +815,7 @@ void sptlrpc_conf_client_adapt(struct obd_device *obd)
CDEBUG(D_SEC, "obd %s\n", obd->u.cli.cl_target_uuid.uuid);
/* serialize with connect/disconnect import */
- down_read(&obd->u.cli.cl_sem);
+ down_read_nested(&obd->u.cli.cl_sem, OBD_CLI_SEM_MDCOSC);
imp = obd->u.cli.cl_import;
if (imp) {
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c b/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c
index 9082da06b..9b9801ece 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c b/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c
index e610a8ddd..07273f577 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_null.c b/drivers/staging/lustre/lustre/ptlrpc/sec_null.c
index 40e5349de..70a61e12b 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_null.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_null.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -60,7 +56,7 @@ static struct ptlrpc_svc_ctx null_svc_ctx;
static inline
void null_encode_sec_part(struct lustre_msg *msg, enum lustre_sec_part sp)
{
- msg->lm_secflvr |= (((__u32) sp) & 0xFF) << 24;
+ msg->lm_secflvr |= (((__u32)sp) & 0xFF) << 24;
}
static inline
@@ -265,7 +261,8 @@ int null_enlarge_reqbuf(struct ptlrpc_sec *sec,
memcpy(newbuf, req->rq_reqbuf, req->rq_reqlen);
kvfree(req->rq_reqbuf);
- req->rq_reqbuf = req->rq_reqmsg = newbuf;
+ req->rq_reqbuf = newbuf;
+ req->rq_reqmsg = newbuf;
req->rq_reqbuf_len = alloc_size;
if (req->rq_import)
@@ -329,7 +326,7 @@ int null_alloc_rs(struct ptlrpc_request *req, int msgsize)
rs->rs_svc_ctx = req->rq_svc_ctx;
atomic_inc(&req->rq_svc_ctx->sc_refcount);
- rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
+ rs->rs_repbuf = (struct lustre_msg *)(rs + 1);
rs->rs_repbuf_len = rs_size - sizeof(*rs);
rs->rs_msg = rs->rs_repbuf;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
index 37c9f4c45..5c4590b0c 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -298,7 +294,7 @@ int plain_cli_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
bsd = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
- token = (struct plain_bulk_token *) bsd->bsd_data;
+ token = (struct plain_bulk_token *)bsd->bsd_data;
bsd->bsd_version = 0;
bsd->bsd_flags = 0;
@@ -343,7 +339,7 @@ int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
LASSERT(req->rq_repdata->lm_bufcount == PLAIN_PACK_SEGMENTS);
bsdv = lustre_msg_buf(req->rq_repdata, PLAIN_PACK_BULK_OFF, 0);
- tokenv = (struct plain_bulk_token *) bsdv->bsd_data;
+ tokenv = (struct plain_bulk_token *)bsdv->bsd_data;
if (req->rq_bulk_write) {
if (bsdv->bsd_flags & BSD_FL_ERR)
@@ -574,8 +570,12 @@ int plain_alloc_reqbuf(struct ptlrpc_sec *sec,
lustre_init_msg_v2(req->rq_reqbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0);
- if (req->rq_pack_udesc)
- sptlrpc_pack_user_desc(req->rq_reqbuf, PLAIN_PACK_USER_OFF);
+ if (req->rq_pack_udesc) {
+ int rc = sptlrpc_pack_user_desc(req->rq_reqbuf,
+ PLAIN_PACK_USER_OFF);
+ if (rc < 0)
+ return rc;
+ }
return 0;
}
@@ -811,7 +811,7 @@ int plain_alloc_rs(struct ptlrpc_request *req, int msgsize)
rs->rs_svc_ctx = req->rq_svc_ctx;
atomic_inc(&req->rq_svc_ctx->sc_refcount);
- rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
+ rs->rs_repbuf = (struct lustre_msg *)(rs + 1);
rs->rs_repbuf_len = rs_size - sizeof(*rs);
lustre_init_msg_v2(rs->rs_repbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
@@ -891,7 +891,7 @@ int plain_svc_unwrap_bulk(struct ptlrpc_request *req,
LASSERT(req->rq_pack_bulk);
bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
- tokenr = (struct plain_bulk_token *) bsdr->bsd_data;
+ tokenr = (struct plain_bulk_token *)bsdr->bsd_data;
bsdv = lustre_msg_buf(rs->rs_repbuf, PLAIN_PACK_BULK_OFF, 0);
bsdv->bsd_version = 0;
@@ -926,7 +926,7 @@ int plain_svc_wrap_bulk(struct ptlrpc_request *req,
bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
bsdv = lustre_msg_buf(rs->rs_repbuf, PLAIN_PACK_BULK_OFF, 0);
- tokenv = (struct plain_bulk_token *) bsdv->bsd_data;
+ tokenv = (struct plain_bulk_token *)bsdv->bsd_data;
bsdv->bsd_version = 0;
bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c
index 17c7b9749..4788c4940 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/service.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/service.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
diff --git a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
index aacc81083..6cc2b2edf 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
@@ -15,11 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
@@ -1269,8 +1265,6 @@ void lustre_assert_wire_constants(void)
OBD_MD_FLXATTRRM);
LASSERTF(OBD_MD_FLACL == (0x0000008000000000ULL), "found 0x%.16llxULL\n",
OBD_MD_FLACL);
- LASSERTF(OBD_MD_FLRMTPERM == (0x0000010000000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLRMTPERM);
LASSERTF(OBD_MD_FLMDSCAPA == (0x0000020000000000ULL), "found 0x%.16llxULL\n",
OBD_MD_FLMDSCAPA);
LASSERTF(OBD_MD_FLOSSCAPA == (0x0000040000000000ULL), "found 0x%.16llxULL\n",
@@ -1281,14 +1275,6 @@ void lustre_assert_wire_constants(void)
OBD_MD_FLCROSSREF);
LASSERTF(OBD_MD_FLGETATTRLOCK == (0x0000200000000000ULL), "found 0x%.16llxULL\n",
OBD_MD_FLGETATTRLOCK);
- LASSERTF(OBD_MD_FLRMTLSETFACL == (0x0001000000000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLRMTLSETFACL);
- LASSERTF(OBD_MD_FLRMTLGETFACL == (0x0002000000000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLRMTLGETFACL);
- LASSERTF(OBD_MD_FLRMTRSETFACL == (0x0004000000000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLRMTRSETFACL);
- LASSERTF(OBD_MD_FLRMTRGETFACL == (0x0008000000000000ULL), "found 0x%.16llxULL\n",
- OBD_MD_FLRMTRGETFACL);
LASSERTF(OBD_MD_FLDATAVERSION == (0x0010000000000000ULL), "found 0x%.16llxULL\n",
OBD_MD_FLDATAVERSION);
CLASSERT(OBD_FL_INLINEDATA == 0x00000001);
@@ -1895,44 +1881,6 @@ void lustre_assert_wire_constants(void)
LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->padding) == 4, "found %lld\n",
(long long)(int)sizeof(((struct mdt_ioepoch *)0)->padding));
- /* Checks for struct mdt_remote_perm */
- LASSERTF((int)sizeof(struct mdt_remote_perm) == 32, "found %lld\n",
- (long long)(int)sizeof(struct mdt_remote_perm));
- LASSERTF((int)offsetof(struct mdt_remote_perm, rp_uid) == 0, "found %lld\n",
- (long long)(int)offsetof(struct mdt_remote_perm, rp_uid));
- LASSERTF((int)sizeof(((struct mdt_remote_perm *)0)->rp_uid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_remote_perm *)0)->rp_uid));
- LASSERTF((int)offsetof(struct mdt_remote_perm, rp_gid) == 4, "found %lld\n",
- (long long)(int)offsetof(struct mdt_remote_perm, rp_gid));
- LASSERTF((int)sizeof(((struct mdt_remote_perm *)0)->rp_gid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_remote_perm *)0)->rp_gid));
- LASSERTF((int)offsetof(struct mdt_remote_perm, rp_fsuid) == 8, "found %lld\n",
- (long long)(int)offsetof(struct mdt_remote_perm, rp_fsuid));
- LASSERTF((int)sizeof(((struct mdt_remote_perm *)0)->rp_fsuid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_remote_perm *)0)->rp_fsuid));
- LASSERTF((int)offsetof(struct mdt_remote_perm, rp_fsgid) == 16, "found %lld\n",
- (long long)(int)offsetof(struct mdt_remote_perm, rp_fsgid));
- LASSERTF((int)sizeof(((struct mdt_remote_perm *)0)->rp_fsgid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_remote_perm *)0)->rp_fsgid));
- LASSERTF((int)offsetof(struct mdt_remote_perm, rp_access_perm) == 24, "found %lld\n",
- (long long)(int)offsetof(struct mdt_remote_perm, rp_access_perm));
- LASSERTF((int)sizeof(((struct mdt_remote_perm *)0)->rp_access_perm) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_remote_perm *)0)->rp_access_perm));
- LASSERTF((int)offsetof(struct mdt_remote_perm, rp_padding) == 28, "found %lld\n",
- (long long)(int)offsetof(struct mdt_remote_perm, rp_padding));
- LASSERTF((int)sizeof(((struct mdt_remote_perm *)0)->rp_padding) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct mdt_remote_perm *)0)->rp_padding));
- LASSERTF(CFS_SETUID_PERM == 0x00000001UL, "found 0x%.8xUL\n",
- (unsigned)CFS_SETUID_PERM);
- LASSERTF(CFS_SETGID_PERM == 0x00000002UL, "found 0x%.8xUL\n",
- (unsigned)CFS_SETGID_PERM);
- LASSERTF(CFS_SETGRP_PERM == 0x00000004UL, "found 0x%.8xUL\n",
- (unsigned)CFS_SETGRP_PERM);
- LASSERTF(CFS_RMTACL_PERM == 0x00000008UL, "found 0x%.8xUL\n",
- (unsigned)CFS_RMTACL_PERM);
- LASSERTF(CFS_RMTOWN_PERM == 0x00000010UL, "found 0x%.8xUL\n",
- (unsigned)CFS_RMTOWN_PERM);
-
/* Checks for struct mdt_rec_setattr */
LASSERTF((int)sizeof(struct mdt_rec_setattr) == 136, "found %lld\n",
(long long)(int)sizeof(struct mdt_rec_setattr));
diff --git a/drivers/staging/lustre/sysfs-fs-lustre b/drivers/staging/lustre/sysfs-fs-lustre
index 873e2cf31..20206ba96 100644
--- a/drivers/staging/lustre/sysfs-fs-lustre
+++ b/drivers/staging/lustre/sysfs-fs-lustre
@@ -294,6 +294,14 @@ Description:
Controls extended attributes client-side cache.
1 to enable, 0 to disable.
+What: /sys/fs/lustre/llite/<fsname>-<uuid>/unstable_stats
+Date: Apr 2016
+Contact: "Oleg Drokin" <oleg.drokin@intel.com>
+Description:
+ Shows number of pages that were sent and acknowledged by
+ server but were not yet committed and therefore still
+ pinned in client memory even though no longer dirty.
+
What: /sys/fs/lustre/ldlm/cancel_unused_locks_before_replay
Date: May 2015
Contact: "Oleg Drokin" <oleg.drokin@intel.com>
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index de7e9f52e..7292f2395 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -16,29 +16,25 @@ menuconfig STAGING_MEDIA
If in doubt, say N here.
-if STAGING_MEDIA
+if STAGING_MEDIA && MEDIA_SUPPORT
# Please keep them in alphabetic order
source "drivers/staging/media/bcm2048/Kconfig"
+source "drivers/staging/media/cec/Kconfig"
+
source "drivers/staging/media/cxd2099/Kconfig"
source "drivers/staging/media/davinci_vpfe/Kconfig"
-source "drivers/staging/media/mn88472/Kconfig"
-
-source "drivers/staging/media/mx2/Kconfig"
-
-source "drivers/staging/media/mx3/Kconfig"
-
-source "drivers/staging/media/omap1/Kconfig"
-
source "drivers/staging/media/omap4iss/Kconfig"
-source "drivers/staging/media/timb/Kconfig"
+source "drivers/staging/media/pulse8-cec/Kconfig"
source "drivers/staging/media/tw686x-kh/Kconfig"
+source "drivers/staging/media/s5p-cec/Kconfig"
+
# Keep LIRC at the end, as it has sub-menus
source "drivers/staging/media/lirc/Kconfig"
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index 60a35b3a4..87ce8ad1e 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -1,11 +1,9 @@
obj-$(CONFIG_I2C_BCM2048) += bcm2048/
+obj-$(CONFIG_MEDIA_CEC) += cec/
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_CEC) += s5p-cec/
obj-$(CONFIG_DVB_CXD2099) += cxd2099/
obj-$(CONFIG_LIRC_STAGING) += lirc/
obj-$(CONFIG_VIDEO_DM365_VPFE) += davinci_vpfe/
-obj-$(CONFIG_VIDEO_MX2) += mx2/
-obj-$(CONFIG_VIDEO_MX3) += mx3/
-obj-$(CONFIG_VIDEO_OMAP1) += omap1/
obj-$(CONFIG_VIDEO_OMAP4) += omap4iss/
-obj-$(CONFIG_DVB_MN88472) += mn88472/
-obj-$(CONFIG_VIDEO_TIMBERDALE) += timb/
+obj-$(CONFIG_USB_PULSE8_CEC) += pulse8-cec/
obj-$(CONFIG_VIDEO_TW686X_KH) += tw686x-kh/
diff --git a/drivers/staging/media/cec/Kconfig b/drivers/staging/media/cec/Kconfig
new file mode 100644
index 000000000..21457a1f6
--- /dev/null
+++ b/drivers/staging/media/cec/Kconfig
@@ -0,0 +1,15 @@
+config MEDIA_CEC
+ bool "CEC API (EXPERIMENTAL)"
+ depends on MEDIA_SUPPORT
+ select MEDIA_CEC_EDID
+ ---help---
+ Enable the CEC API.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cec.
+
+config MEDIA_CEC_DEBUG
+ bool "CEC debugfs interface (EXPERIMENTAL)"
+ depends on MEDIA_CEC && DEBUG_FS
+ ---help---
+ Turns on the DebugFS interface for CEC devices.
diff --git a/drivers/staging/media/cec/Makefile b/drivers/staging/media/cec/Makefile
new file mode 100644
index 000000000..bd7f3c593
--- /dev/null
+++ b/drivers/staging/media/cec/Makefile
@@ -0,0 +1,5 @@
+cec-objs := cec-core.o cec-adap.o cec-api.o
+
+ifeq ($(CONFIG_MEDIA_CEC),y)
+ obj-$(CONFIG_MEDIA_SUPPORT) += cec.o
+endif
diff --git a/drivers/staging/media/cec/TODO b/drivers/staging/media/cec/TODO
new file mode 100644
index 000000000..13224694a
--- /dev/null
+++ b/drivers/staging/media/cec/TODO
@@ -0,0 +1,32 @@
+The reason why cec.c is still in staging is that I would like
+to have a bit more confidence in the uABI. The kABI is fine,
+no problem there, but I would like to let the public API mature
+a bit.
+
+Once I'm confident that I didn't miss anything then the cec.c source
+can move to drivers/media and the linux/cec.h and linux/cec-funcs.h
+headers can move to uapi/linux and added to uapi/linux/Kbuild to make
+them public.
+
+Hopefully this will happen later in 2016.
+
+Other TODOs:
+
+- There are two possible replies to CEC_MSG_INITIATE_ARC. How to handle that?
+- Add a flag to inhibit passing CEC RC messages to the rc subsystem.
+ Applications should be able to choose this when calling S_LOG_ADDRS.
+- If the reply field of cec_msg is set then when the reply arrives it
+ is only sent to the filehandle that transmitted the original message
+ and not to any followers. Should this behavior change or perhaps
+ controlled through a cec_msg flag?
+- Should CEC_LOG_ADDR_TYPE_SPECIFIC be replaced by TYPE_2ND_TV and TYPE_PROCESSOR?
+ And also TYPE_SWITCH and TYPE_CDC_ONLY in addition to the TYPE_UNREGISTERED?
+ This should give the framework more information about the device type
+ since SPECIFIC and UNREGISTERED give no useful information.
+- Once this is out of staging this should no longer be a separate
+ config option, instead it should be selected by drivers that want it.
+- Revisit the IS_REACHABLE(RC_CORE): perhaps the RC_CORE support should
+ be enabled through a separate config option in drivers/media/Kconfig
+ or rc/Kconfig?
+
+Hans Verkuil <hans.verkuil@cisco.com>
diff --git a/drivers/staging/media/cec/cec-adap.c b/drivers/staging/media/cec/cec-adap.c
new file mode 100644
index 000000000..946986f3a
--- /dev/null
+++ b/drivers/staging/media/cec/cec-adap.c
@@ -0,0 +1,1664 @@
+/*
+ * cec-adap.c - HDMI Consumer Electronics Control framework - CEC adapter
+ *
+ * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/kmod.h>
+#include <linux/ktime.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "cec-priv.h"
+
+static int cec_report_features(struct cec_adapter *adap, unsigned int la_idx);
+static int cec_report_phys_addr(struct cec_adapter *adap, unsigned int la_idx);
+
+/*
+ * 400 ms is the time it takes for one 16 byte message to be
+ * transferred and 5 is the maximum number of retries. Add
+ * another 100 ms as a margin. So if the transmit doesn't
+ * finish before that time something is really wrong and we
+ * have to time out.
+ *
+ * This is a sign that something it really wrong and a warning
+ * will be issued.
+ */
+#define CEC_XFER_TIMEOUT_MS (5 * 400 + 100)
+
+#define call_op(adap, op, arg...) \
+ (adap->ops->op ? adap->ops->op(adap, ## arg) : 0)
+
+#define call_void_op(adap, op, arg...) \
+ do { \
+ if (adap->ops->op) \
+ adap->ops->op(adap, ## arg); \
+ } while (0)
+
+static int cec_log_addr2idx(const struct cec_adapter *adap, u8 log_addr)
+{
+ int i;
+
+ for (i = 0; i < adap->log_addrs.num_log_addrs; i++)
+ if (adap->log_addrs.log_addr[i] == log_addr)
+ return i;
+ return -1;
+}
+
+static unsigned int cec_log_addr2dev(const struct cec_adapter *adap, u8 log_addr)
+{
+ int i = cec_log_addr2idx(adap, log_addr);
+
+ return adap->log_addrs.primary_device_type[i < 0 ? 0 : i];
+}
+
+/*
+ * Queue a new event for this filehandle. If ts == 0, then set it
+ * to the current time.
+ *
+ * The two events that are currently defined do not need to keep track
+ * of intermediate events, so no actual queue of events is needed,
+ * instead just store the latest state and the total number of lost
+ * messages.
+ *
+ * Should new events be added in the future that require intermediate
+ * results to be queued as well, then a proper queue data structure is
+ * required. But until then, just keep it simple.
+ */
+void cec_queue_event_fh(struct cec_fh *fh,
+ const struct cec_event *new_ev, u64 ts)
+{
+ struct cec_event *ev = &fh->events[new_ev->event - 1];
+
+ if (ts == 0)
+ ts = ktime_get_ns();
+
+ mutex_lock(&fh->lock);
+ if (new_ev->event == CEC_EVENT_LOST_MSGS &&
+ fh->pending_events & (1 << new_ev->event)) {
+ /*
+ * If there is already a lost_msgs event, then just
+ * update the lost_msgs count. This effectively
+ * merges the old and new events into one.
+ */
+ ev->lost_msgs.lost_msgs += new_ev->lost_msgs.lost_msgs;
+ goto unlock;
+ }
+
+ /*
+ * Intermediate states are not interesting, so just
+ * overwrite any older event.
+ */
+ *ev = *new_ev;
+ ev->ts = ts;
+ fh->pending_events |= 1 << new_ev->event;
+
+unlock:
+ mutex_unlock(&fh->lock);
+ wake_up_interruptible(&fh->wait);
+}
+
+/* Queue a new event for all open filehandles. */
+static void cec_queue_event(struct cec_adapter *adap,
+ const struct cec_event *ev)
+{
+ u64 ts = ktime_get_ns();
+ struct cec_fh *fh;
+
+ mutex_lock(&adap->devnode.lock);
+ list_for_each_entry(fh, &adap->devnode.fhs, list)
+ cec_queue_event_fh(fh, ev, ts);
+ mutex_unlock(&adap->devnode.lock);
+}
+
+/*
+ * Queue a new message for this filehandle. If there is no more room
+ * in the queue, then send the LOST_MSGS event instead.
+ */
+static void cec_queue_msg_fh(struct cec_fh *fh, const struct cec_msg *msg)
+{
+ static const struct cec_event ev_lost_msg = {
+ .ts = 0,
+ .event = CEC_EVENT_LOST_MSGS,
+ .flags = 0,
+ {
+ .lost_msgs.lost_msgs = 1,
+ },
+ };
+ struct cec_msg_entry *entry;
+
+ mutex_lock(&fh->lock);
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ goto lost_msgs;
+
+ entry->msg = *msg;
+ /* Add new msg at the end of the queue */
+ list_add_tail(&entry->list, &fh->msgs);
+
+ /*
+ * if the queue now has more than CEC_MAX_MSG_RX_QUEUE_SZ
+ * messages, drop the oldest one and send a lost message event.
+ */
+ if (fh->queued_msgs == CEC_MAX_MSG_RX_QUEUE_SZ) {
+ list_del(&entry->list);
+ goto lost_msgs;
+ }
+ fh->queued_msgs++;
+ mutex_unlock(&fh->lock);
+ wake_up_interruptible(&fh->wait);
+ return;
+
+lost_msgs:
+ mutex_unlock(&fh->lock);
+ cec_queue_event_fh(fh, &ev_lost_msg, 0);
+}
+
+/*
+ * Queue the message for those filehandles that are in monitor mode.
+ * If valid_la is true (this message is for us or was sent by us),
+ * then pass it on to any monitoring filehandle. If this message
+ * isn't for us or from us, then only give it to filehandles that
+ * are in MONITOR_ALL mode.
+ *
+ * This can only happen if the CEC_CAP_MONITOR_ALL capability is
+ * set and the CEC adapter was placed in 'monitor all' mode.
+ */
+static void cec_queue_msg_monitor(struct cec_adapter *adap,
+ const struct cec_msg *msg,
+ bool valid_la)
+{
+ struct cec_fh *fh;
+ u32 monitor_mode = valid_la ? CEC_MODE_MONITOR :
+ CEC_MODE_MONITOR_ALL;
+
+ mutex_lock(&adap->devnode.lock);
+ list_for_each_entry(fh, &adap->devnode.fhs, list) {
+ if (fh->mode_follower >= monitor_mode)
+ cec_queue_msg_fh(fh, msg);
+ }
+ mutex_unlock(&adap->devnode.lock);
+}
+
+/*
+ * Queue the message for follower filehandles.
+ */
+static void cec_queue_msg_followers(struct cec_adapter *adap,
+ const struct cec_msg *msg)
+{
+ struct cec_fh *fh;
+
+ mutex_lock(&adap->devnode.lock);
+ list_for_each_entry(fh, &adap->devnode.fhs, list) {
+ if (fh->mode_follower == CEC_MODE_FOLLOWER)
+ cec_queue_msg_fh(fh, msg);
+ }
+ mutex_unlock(&adap->devnode.lock);
+}
+
+/* Notify userspace of an adapter state change. */
+static void cec_post_state_event(struct cec_adapter *adap)
+{
+ struct cec_event ev = {
+ .event = CEC_EVENT_STATE_CHANGE,
+ };
+
+ ev.state_change.phys_addr = adap->phys_addr;
+ ev.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
+ cec_queue_event(adap, &ev);
+}
+
+/*
+ * A CEC transmit (and a possible wait for reply) completed.
+ * If this was in blocking mode, then complete it, otherwise
+ * queue the message for userspace to dequeue later.
+ *
+ * This function is called with adap->lock held.
+ */
+static void cec_data_completed(struct cec_data *data)
+{
+ /*
+ * Delete this transmit from the filehandle's xfer_list since
+ * we're done with it.
+ *
+ * Note that if the filehandle is closed before this transmit
+ * finished, then the release() function will set data->fh to NULL.
+ * Without that we would be referring to a closed filehandle.
+ */
+ if (data->fh)
+ list_del(&data->xfer_list);
+
+ if (data->blocking) {
+ /*
+ * Someone is blocking so mark the message as completed
+ * and call complete.
+ */
+ data->completed = true;
+ complete(&data->c);
+ } else {
+ /*
+ * No blocking, so just queue the message if needed and
+ * free the memory.
+ */
+ if (data->fh)
+ cec_queue_msg_fh(data->fh, &data->msg);
+ kfree(data);
+ }
+}
+
+/*
+ * A pending CEC transmit needs to be cancelled, either because the CEC
+ * adapter is disabled or the transmit takes an impossibly long time to
+ * finish.
+ *
+ * This function is called with adap->lock held.
+ */
+static void cec_data_cancel(struct cec_data *data)
+{
+ /*
+ * It's either the current transmit, or it is a pending
+ * transmit. Take the appropriate action to clear it.
+ */
+ if (data->adap->transmitting == data) {
+ data->adap->transmitting = NULL;
+ } else {
+ list_del_init(&data->list);
+ if (!(data->msg.tx_status & CEC_TX_STATUS_OK))
+ data->adap->transmit_queue_sz--;
+ }
+
+ /* Mark it as an error */
+ data->msg.tx_ts = ktime_get_ns();
+ data->msg.tx_status = CEC_TX_STATUS_ERROR |
+ CEC_TX_STATUS_MAX_RETRIES;
+ data->attempts = 0;
+ data->msg.tx_error_cnt = 1;
+ /* Queue transmitted message for monitoring purposes */
+ cec_queue_msg_monitor(data->adap, &data->msg, 1);
+
+ cec_data_completed(data);
+}
+
+/*
+ * Main CEC state machine
+ *
+ * Wait until the thread should be stopped, or we are not transmitting and
+ * a new transmit message is queued up, in which case we start transmitting
+ * that message. When the adapter finished transmitting the message it will
+ * call cec_transmit_done().
+ *
+ * If the adapter is disabled, then remove all queued messages instead.
+ *
+ * If the current transmit times out, then cancel that transmit.
+ */
+int cec_thread_func(void *_adap)
+{
+ struct cec_adapter *adap = _adap;
+
+ for (;;) {
+ unsigned int signal_free_time;
+ struct cec_data *data;
+ bool timeout = false;
+ u8 attempts;
+
+ if (adap->transmitting) {
+ int err;
+
+ /*
+ * We are transmitting a message, so add a timeout
+ * to prevent the state machine to get stuck waiting
+ * for this message to finalize and add a check to
+ * see if the adapter is disabled in which case the
+ * transmit should be canceled.
+ */
+ err = wait_event_interruptible_timeout(adap->kthread_waitq,
+ kthread_should_stop() ||
+ (!adap->is_configured && !adap->is_configuring) ||
+ (!adap->transmitting &&
+ !list_empty(&adap->transmit_queue)),
+ msecs_to_jiffies(CEC_XFER_TIMEOUT_MS));
+ timeout = err == 0;
+ } else {
+ /* Otherwise we just wait for something to happen. */
+ wait_event_interruptible(adap->kthread_waitq,
+ kthread_should_stop() ||
+ (!adap->transmitting &&
+ !list_empty(&adap->transmit_queue)));
+ }
+
+ mutex_lock(&adap->lock);
+
+ if ((!adap->is_configured && !adap->is_configuring) ||
+ kthread_should_stop()) {
+ /*
+ * If the adapter is disabled, or we're asked to stop,
+ * then cancel any pending transmits.
+ */
+ while (!list_empty(&adap->transmit_queue)) {
+ data = list_first_entry(&adap->transmit_queue,
+ struct cec_data, list);
+ cec_data_cancel(data);
+ }
+ if (adap->transmitting)
+ cec_data_cancel(adap->transmitting);
+
+ /*
+ * Cancel the pending timeout work. We have to unlock
+ * the mutex when flushing the work since
+ * cec_wait_timeout() will take it. This is OK since
+ * no new entries can be added to wait_queue as long
+ * as adap->transmitting is NULL, which it is due to
+ * the cec_data_cancel() above.
+ */
+ while (!list_empty(&adap->wait_queue)) {
+ data = list_first_entry(&adap->wait_queue,
+ struct cec_data, list);
+
+ if (!cancel_delayed_work(&data->work)) {
+ mutex_unlock(&adap->lock);
+ flush_scheduled_work();
+ mutex_lock(&adap->lock);
+ }
+ cec_data_cancel(data);
+ }
+ goto unlock;
+ }
+
+ if (adap->transmitting && timeout) {
+ /*
+ * If we timeout, then log that. This really shouldn't
+ * happen and is an indication of a faulty CEC adapter
+ * driver, or the CEC bus is in some weird state.
+ */
+ dprintk(0, "message %*ph timed out!\n",
+ adap->transmitting->msg.len,
+ adap->transmitting->msg.msg);
+ /* Just give up on this. */
+ cec_data_cancel(adap->transmitting);
+ goto unlock;
+ }
+
+ /*
+ * If we are still transmitting, or there is nothing new to
+ * transmit, then just continue waiting.
+ */
+ if (adap->transmitting || list_empty(&adap->transmit_queue))
+ goto unlock;
+
+ /* Get a new message to transmit */
+ data = list_first_entry(&adap->transmit_queue,
+ struct cec_data, list);
+ list_del_init(&data->list);
+ adap->transmit_queue_sz--;
+ /* Make this the current transmitting message */
+ adap->transmitting = data;
+
+ /*
+ * Suggested number of attempts as per the CEC 2.0 spec:
+ * 4 attempts is the default, except for 'secondary poll
+ * messages', i.e. poll messages not sent during the adapter
+ * configuration phase when it allocates logical addresses.
+ */
+ if (data->msg.len == 1 && adap->is_configured)
+ attempts = 2;
+ else
+ attempts = 4;
+
+ /* Set the suggested signal free time */
+ if (data->attempts) {
+ /* should be >= 3 data bit periods for a retry */
+ signal_free_time = CEC_SIGNAL_FREE_TIME_RETRY;
+ } else if (data->new_initiator) {
+ /* should be >= 5 data bit periods for new initiator */
+ signal_free_time = CEC_SIGNAL_FREE_TIME_NEW_INITIATOR;
+ } else {
+ /*
+ * should be >= 7 data bit periods for sending another
+ * frame immediately after another.
+ */
+ signal_free_time = CEC_SIGNAL_FREE_TIME_NEXT_XFER;
+ }
+ if (data->attempts == 0)
+ data->attempts = attempts;
+
+ /* Tell the adapter to transmit, cancel on error */
+ if (adap->ops->adap_transmit(adap, data->attempts,
+ signal_free_time, &data->msg))
+ cec_data_cancel(data);
+
+unlock:
+ mutex_unlock(&adap->lock);
+
+ if (kthread_should_stop())
+ break;
+ }
+ return 0;
+}
+
+/*
+ * Called by the CEC adapter if a transmit finished.
+ */
+void cec_transmit_done(struct cec_adapter *adap, u8 status, u8 arb_lost_cnt,
+ u8 nack_cnt, u8 low_drive_cnt, u8 error_cnt)
+{
+ struct cec_data *data;
+ struct cec_msg *msg;
+ u64 ts = ktime_get_ns();
+
+ dprintk(2, "cec_transmit_done %02x\n", status);
+ mutex_lock(&adap->lock);
+ data = adap->transmitting;
+ if (!data) {
+ /*
+ * This can happen if a transmit was issued and the cable is
+ * unplugged while the transmit is ongoing. Ignore this
+ * transmit in that case.
+ */
+ dprintk(1, "cec_transmit_done without an ongoing transmit!\n");
+ goto unlock;
+ }
+
+ msg = &data->msg;
+
+ /* Drivers must fill in the status! */
+ WARN_ON(status == 0);
+ msg->tx_ts = ts;
+ msg->tx_status |= status;
+ msg->tx_arb_lost_cnt += arb_lost_cnt;
+ msg->tx_nack_cnt += nack_cnt;
+ msg->tx_low_drive_cnt += low_drive_cnt;
+ msg->tx_error_cnt += error_cnt;
+
+ /* Mark that we're done with this transmit */
+ adap->transmitting = NULL;
+
+ /*
+ * If there are still retry attempts left and there was an error and
+ * the hardware didn't signal that it retried itself (by setting
+ * CEC_TX_STATUS_MAX_RETRIES), then we will retry ourselves.
+ */
+ if (data->attempts > 1 &&
+ !(status & (CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_OK))) {
+ /* Retry this message */
+ data->attempts--;
+ /* Add the message in front of the transmit queue */
+ list_add(&data->list, &adap->transmit_queue);
+ adap->transmit_queue_sz++;
+ goto wake_thread;
+ }
+
+ data->attempts = 0;
+
+ /* Always set CEC_TX_STATUS_MAX_RETRIES on error */
+ if (!(status & CEC_TX_STATUS_OK))
+ msg->tx_status |= CEC_TX_STATUS_MAX_RETRIES;
+
+ /* Queue transmitted message for monitoring purposes */
+ cec_queue_msg_monitor(adap, msg, 1);
+
+ if ((status & CEC_TX_STATUS_OK) && adap->is_configured &&
+ msg->timeout) {
+ /*
+ * Queue the message into the wait queue if we want to wait
+ * for a reply.
+ */
+ list_add_tail(&data->list, &adap->wait_queue);
+ schedule_delayed_work(&data->work,
+ msecs_to_jiffies(msg->timeout));
+ } else {
+ /* Otherwise we're done */
+ cec_data_completed(data);
+ }
+
+wake_thread:
+ /*
+ * Wake up the main thread to see if another message is ready
+ * for transmitting or to retry the current message.
+ */
+ wake_up_interruptible(&adap->kthread_waitq);
+unlock:
+ mutex_unlock(&adap->lock);
+}
+EXPORT_SYMBOL_GPL(cec_transmit_done);
+
+/*
+ * Called when waiting for a reply times out.
+ */
+static void cec_wait_timeout(struct work_struct *work)
+{
+ struct cec_data *data = container_of(work, struct cec_data, work.work);
+ struct cec_adapter *adap = data->adap;
+
+ mutex_lock(&adap->lock);
+ /*
+ * Sanity check in case the timeout and the arrival of the message
+ * happened at the same time.
+ */
+ if (list_empty(&data->list))
+ goto unlock;
+
+ /* Mark the message as timed out */
+ list_del_init(&data->list);
+ data->msg.rx_ts = ktime_get_ns();
+ data->msg.rx_status = CEC_RX_STATUS_TIMEOUT;
+ cec_data_completed(data);
+unlock:
+ mutex_unlock(&adap->lock);
+}
+
+/*
+ * Transmit a message. The fh argument may be NULL if the transmit is not
+ * associated with a specific filehandle.
+ *
+ * This function is called with adap->lock held.
+ */
+int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
+ struct cec_fh *fh, bool block)
+{
+ struct cec_data *data;
+ u8 last_initiator = 0xff;
+ unsigned int timeout;
+ int res = 0;
+
+ msg->rx_ts = 0;
+ msg->tx_ts = 0;
+ msg->rx_status = 0;
+ msg->tx_status = 0;
+ msg->tx_arb_lost_cnt = 0;
+ msg->tx_nack_cnt = 0;
+ msg->tx_low_drive_cnt = 0;
+ msg->tx_error_cnt = 0;
+ msg->flags = 0;
+ msg->sequence = ++adap->sequence;
+ if (!msg->sequence)
+ msg->sequence = ++adap->sequence;
+
+ if (msg->reply && msg->timeout == 0) {
+ /* Make sure the timeout isn't 0. */
+ msg->timeout = 1000;
+ }
+
+ /* Sanity checks */
+ if (msg->len == 0 || msg->len > CEC_MAX_MSG_SIZE) {
+ dprintk(1, "cec_transmit_msg: invalid length %d\n", msg->len);
+ return -EINVAL;
+ }
+ if (msg->timeout && msg->len == 1) {
+ dprintk(1, "cec_transmit_msg: can't reply for poll msg\n");
+ return -EINVAL;
+ }
+ memset(msg->msg + msg->len, 0, sizeof(msg->msg) - msg->len);
+ if (msg->len == 1) {
+ if (cec_msg_initiator(msg) != 0xf ||
+ cec_msg_destination(msg) == 0xf) {
+ dprintk(1, "cec_transmit_msg: invalid poll message\n");
+ return -EINVAL;
+ }
+ if (cec_has_log_addr(adap, cec_msg_destination(msg))) {
+ /*
+ * If the destination is a logical address our adapter
+ * has already claimed, then just NACK this.
+ * It depends on the hardware what it will do with a
+ * POLL to itself (some OK this), so it is just as
+ * easy to handle it here so the behavior will be
+ * consistent.
+ */
+ msg->tx_ts = ktime_get_ns();
+ msg->tx_status = CEC_TX_STATUS_NACK |
+ CEC_TX_STATUS_MAX_RETRIES;
+ msg->tx_nack_cnt = 1;
+ return 0;
+ }
+ }
+ if (msg->len > 1 && !cec_msg_is_broadcast(msg) &&
+ cec_has_log_addr(adap, cec_msg_destination(msg))) {
+ dprintk(1, "cec_transmit_msg: destination is the adapter itself\n");
+ return -EINVAL;
+ }
+ if (cec_msg_initiator(msg) != 0xf &&
+ !cec_has_log_addr(adap, cec_msg_initiator(msg))) {
+ dprintk(1, "cec_transmit_msg: initiator has unknown logical address %d\n",
+ cec_msg_initiator(msg));
+ return -EINVAL;
+ }
+ if (!adap->is_configured && !adap->is_configuring)
+ return -ENONET;
+
+ if (adap->transmit_queue_sz >= CEC_MAX_MSG_TX_QUEUE_SZ)
+ return -EBUSY;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ if (msg->len > 1 && msg->msg[1] == CEC_MSG_CDC_MESSAGE) {
+ msg->msg[2] = adap->phys_addr >> 8;
+ msg->msg[3] = adap->phys_addr & 0xff;
+ }
+
+ if (msg->timeout)
+ dprintk(2, "cec_transmit_msg: %*ph (wait for 0x%02x%s)\n",
+ msg->len, msg->msg, msg->reply, !block ? ", nb" : "");
+ else
+ dprintk(2, "cec_transmit_msg: %*ph%s\n",
+ msg->len, msg->msg, !block ? " (nb)" : "");
+
+ data->msg = *msg;
+ data->fh = fh;
+ data->adap = adap;
+ data->blocking = block;
+
+ /*
+ * Determine if this message follows a message from the same
+ * initiator. Needed to determine the free signal time later on.
+ */
+ if (msg->len > 1) {
+ if (!(list_empty(&adap->transmit_queue))) {
+ const struct cec_data *last;
+
+ last = list_last_entry(&adap->transmit_queue,
+ const struct cec_data, list);
+ last_initiator = cec_msg_initiator(&last->msg);
+ } else if (adap->transmitting) {
+ last_initiator =
+ cec_msg_initiator(&adap->transmitting->msg);
+ }
+ }
+ data->new_initiator = last_initiator != cec_msg_initiator(msg);
+ init_completion(&data->c);
+ INIT_DELAYED_WORK(&data->work, cec_wait_timeout);
+
+ if (fh)
+ list_add_tail(&data->xfer_list, &fh->xfer_list);
+ list_add_tail(&data->list, &adap->transmit_queue);
+ adap->transmit_queue_sz++;
+ if (!adap->transmitting)
+ wake_up_interruptible(&adap->kthread_waitq);
+
+ /* All done if we don't need to block waiting for completion */
+ if (!block)
+ return 0;
+
+ /*
+ * If we don't get a completion before this time something is really
+ * wrong and we time out.
+ */
+ timeout = CEC_XFER_TIMEOUT_MS;
+ /* Add the requested timeout if we have to wait for a reply as well */
+ if (msg->timeout)
+ timeout += msg->timeout;
+
+ /*
+ * Release the lock and wait, retake the lock afterwards.
+ */
+ mutex_unlock(&adap->lock);
+ res = wait_for_completion_killable_timeout(&data->c,
+ msecs_to_jiffies(timeout));
+ mutex_lock(&adap->lock);
+
+ if (data->completed) {
+ /* The transmit completed (possibly with an error) */
+ *msg = data->msg;
+ kfree(data);
+ return 0;
+ }
+ /*
+ * The wait for completion timed out or was interrupted, so mark this
+ * as non-blocking and disconnect from the filehandle since it is
+ * still 'in flight'. When it finally completes it will just drop the
+ * result silently.
+ */
+ data->blocking = false;
+ if (data->fh)
+ list_del(&data->xfer_list);
+ data->fh = NULL;
+
+ if (res == 0) { /* timed out */
+ /* Check if the reply or the transmit failed */
+ if (msg->timeout && (msg->tx_status & CEC_TX_STATUS_OK))
+ msg->rx_status = CEC_RX_STATUS_TIMEOUT;
+ else
+ msg->tx_status = CEC_TX_STATUS_MAX_RETRIES;
+ }
+ return res > 0 ? 0 : res;
+}
+
+/* Helper function to be used by drivers and this framework. */
+int cec_transmit_msg(struct cec_adapter *adap, struct cec_msg *msg,
+ bool block)
+{
+ int ret;
+
+ mutex_lock(&adap->lock);
+ ret = cec_transmit_msg_fh(adap, msg, NULL, block);
+ mutex_unlock(&adap->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cec_transmit_msg);
+
+/*
+ * I don't like forward references but without this the low-level
+ * cec_received_msg() function would come after a bunch of high-level
+ * CEC protocol handling functions. That was very confusing.
+ */
+static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
+ bool is_reply);
+
+/* Called by the CEC adapter if a message is received */
+void cec_received_msg(struct cec_adapter *adap, struct cec_msg *msg)
+{
+ struct cec_data *data;
+ u8 msg_init = cec_msg_initiator(msg);
+ u8 msg_dest = cec_msg_destination(msg);
+ bool is_reply = false;
+ bool valid_la = true;
+
+ if (WARN_ON(!msg->len || msg->len > CEC_MAX_MSG_SIZE))
+ return;
+
+ msg->rx_ts = ktime_get_ns();
+ msg->rx_status = CEC_RX_STATUS_OK;
+ msg->sequence = msg->reply = msg->timeout = 0;
+ msg->tx_status = 0;
+ msg->tx_ts = 0;
+ msg->flags = 0;
+ memset(msg->msg + msg->len, 0, sizeof(msg->msg) - msg->len);
+
+ mutex_lock(&adap->lock);
+ dprintk(2, "cec_received_msg: %*ph\n", msg->len, msg->msg);
+
+ /* Check if this message was for us (directed or broadcast). */
+ if (!cec_msg_is_broadcast(msg))
+ valid_la = cec_has_log_addr(adap, msg_dest);
+
+ /* It's a valid message and not a poll or CDC message */
+ if (valid_la && msg->len > 1 && msg->msg[1] != CEC_MSG_CDC_MESSAGE) {
+ u8 cmd = msg->msg[1];
+ bool abort = cmd == CEC_MSG_FEATURE_ABORT;
+
+ /* The aborted command is in msg[2] */
+ if (abort)
+ cmd = msg->msg[2];
+
+ /*
+ * Walk over all transmitted messages that are waiting for a
+ * reply.
+ */
+ list_for_each_entry(data, &adap->wait_queue, list) {
+ struct cec_msg *dst = &data->msg;
+
+ /* Does the command match? */
+ if ((abort && cmd != dst->msg[1]) ||
+ (!abort && cmd != dst->reply))
+ continue;
+
+ /* Does the addressing match? */
+ if (msg_init != cec_msg_destination(dst) &&
+ !cec_msg_is_broadcast(dst))
+ continue;
+
+ /* We got a reply */
+ memcpy(dst->msg, msg->msg, msg->len);
+ dst->len = msg->len;
+ dst->rx_ts = msg->rx_ts;
+ dst->rx_status = msg->rx_status;
+ if (abort)
+ dst->rx_status |= CEC_RX_STATUS_FEATURE_ABORT;
+ /* Remove it from the wait_queue */
+ list_del_init(&data->list);
+
+ /* Cancel the pending timeout work */
+ if (!cancel_delayed_work(&data->work)) {
+ mutex_unlock(&adap->lock);
+ flush_scheduled_work();
+ mutex_lock(&adap->lock);
+ }
+ /*
+ * Mark this as a reply, provided someone is still
+ * waiting for the answer.
+ */
+ if (data->fh)
+ is_reply = true;
+ cec_data_completed(data);
+ break;
+ }
+ }
+ mutex_unlock(&adap->lock);
+
+ /* Pass the message on to any monitoring filehandles */
+ cec_queue_msg_monitor(adap, msg, valid_la);
+
+ /* We're done if it is not for us or a poll message */
+ if (!valid_la || msg->len <= 1)
+ return;
+
+ if (adap->log_addrs.log_addr_mask == 0)
+ return;
+
+ /*
+ * Process the message on the protocol level. If is_reply is true,
+ * then cec_receive_notify() won't pass on the reply to the listener(s)
+ * since that was already done by cec_data_completed() above.
+ */
+ cec_receive_notify(adap, msg, is_reply);
+}
+EXPORT_SYMBOL_GPL(cec_received_msg);
+
+/* Logical Address Handling */
+
+/*
+ * Attempt to claim a specific logical address.
+ *
+ * This function is called with adap->lock held.
+ */
+static int cec_config_log_addr(struct cec_adapter *adap,
+ unsigned int idx,
+ unsigned int log_addr)
+{
+ struct cec_log_addrs *las = &adap->log_addrs;
+ struct cec_msg msg = { };
+ int err;
+
+ if (cec_has_log_addr(adap, log_addr))
+ return 0;
+
+ /* Send poll message */
+ msg.len = 1;
+ msg.msg[0] = 0xf0 | log_addr;
+ err = cec_transmit_msg_fh(adap, &msg, NULL, true);
+
+ /*
+ * While trying to poll the physical address was reset
+ * and the adapter was unconfigured, so bail out.
+ */
+ if (!adap->is_configuring)
+ return -EINTR;
+
+ if (err)
+ return err;
+
+ if (msg.tx_status & CEC_TX_STATUS_OK)
+ return 0;
+
+ /*
+ * Message not acknowledged, so this logical
+ * address is free to use.
+ */
+ err = adap->ops->adap_log_addr(adap, log_addr);
+ if (err)
+ return err;
+
+ las->log_addr[idx] = log_addr;
+ las->log_addr_mask |= 1 << log_addr;
+ adap->phys_addrs[log_addr] = adap->phys_addr;
+
+ dprintk(2, "claimed addr %d (%d)\n", log_addr,
+ las->primary_device_type[idx]);
+ return 1;
+}
+
+/*
+ * Unconfigure the adapter: clear all logical addresses and send
+ * the state changed event.
+ *
+ * This function is called with adap->lock held.
+ */
+static void cec_adap_unconfigure(struct cec_adapter *adap)
+{
+ WARN_ON(adap->ops->adap_log_addr(adap, CEC_LOG_ADDR_INVALID));
+ adap->log_addrs.log_addr_mask = 0;
+ adap->is_configuring = false;
+ adap->is_configured = false;
+ memset(adap->phys_addrs, 0xff, sizeof(adap->phys_addrs));
+ wake_up_interruptible(&adap->kthread_waitq);
+ cec_post_state_event(adap);
+}
+
+/*
+ * Attempt to claim the required logical addresses.
+ */
+static int cec_config_thread_func(void *arg)
+{
+ /* The various LAs for each type of device */
+ static const u8 tv_log_addrs[] = {
+ CEC_LOG_ADDR_TV, CEC_LOG_ADDR_SPECIFIC,
+ CEC_LOG_ADDR_INVALID
+ };
+ static const u8 record_log_addrs[] = {
+ CEC_LOG_ADDR_RECORD_1, CEC_LOG_ADDR_RECORD_2,
+ CEC_LOG_ADDR_RECORD_3,
+ CEC_LOG_ADDR_BACKUP_1, CEC_LOG_ADDR_BACKUP_2,
+ CEC_LOG_ADDR_INVALID
+ };
+ static const u8 tuner_log_addrs[] = {
+ CEC_LOG_ADDR_TUNER_1, CEC_LOG_ADDR_TUNER_2,
+ CEC_LOG_ADDR_TUNER_3, CEC_LOG_ADDR_TUNER_4,
+ CEC_LOG_ADDR_BACKUP_1, CEC_LOG_ADDR_BACKUP_2,
+ CEC_LOG_ADDR_INVALID
+ };
+ static const u8 playback_log_addrs[] = {
+ CEC_LOG_ADDR_PLAYBACK_1, CEC_LOG_ADDR_PLAYBACK_2,
+ CEC_LOG_ADDR_PLAYBACK_3,
+ CEC_LOG_ADDR_BACKUP_1, CEC_LOG_ADDR_BACKUP_2,
+ CEC_LOG_ADDR_INVALID
+ };
+ static const u8 audiosystem_log_addrs[] = {
+ CEC_LOG_ADDR_AUDIOSYSTEM,
+ CEC_LOG_ADDR_INVALID
+ };
+ static const u8 specific_use_log_addrs[] = {
+ CEC_LOG_ADDR_SPECIFIC,
+ CEC_LOG_ADDR_BACKUP_1, CEC_LOG_ADDR_BACKUP_2,
+ CEC_LOG_ADDR_INVALID
+ };
+ static const u8 *type2addrs[6] = {
+ [CEC_LOG_ADDR_TYPE_TV] = tv_log_addrs,
+ [CEC_LOG_ADDR_TYPE_RECORD] = record_log_addrs,
+ [CEC_LOG_ADDR_TYPE_TUNER] = tuner_log_addrs,
+ [CEC_LOG_ADDR_TYPE_PLAYBACK] = playback_log_addrs,
+ [CEC_LOG_ADDR_TYPE_AUDIOSYSTEM] = audiosystem_log_addrs,
+ [CEC_LOG_ADDR_TYPE_SPECIFIC] = specific_use_log_addrs,
+ };
+ static const u16 type2mask[] = {
+ [CEC_LOG_ADDR_TYPE_TV] = CEC_LOG_ADDR_MASK_TV,
+ [CEC_LOG_ADDR_TYPE_RECORD] = CEC_LOG_ADDR_MASK_RECORD,
+ [CEC_LOG_ADDR_TYPE_TUNER] = CEC_LOG_ADDR_MASK_TUNER,
+ [CEC_LOG_ADDR_TYPE_PLAYBACK] = CEC_LOG_ADDR_MASK_PLAYBACK,
+ [CEC_LOG_ADDR_TYPE_AUDIOSYSTEM] = CEC_LOG_ADDR_MASK_AUDIOSYSTEM,
+ [CEC_LOG_ADDR_TYPE_SPECIFIC] = CEC_LOG_ADDR_MASK_SPECIFIC,
+ };
+ struct cec_adapter *adap = arg;
+ struct cec_log_addrs *las = &adap->log_addrs;
+ int err;
+ int i, j;
+
+ mutex_lock(&adap->lock);
+ dprintk(1, "physical address: %x.%x.%x.%x, claim %d logical addresses\n",
+ cec_phys_addr_exp(adap->phys_addr), las->num_log_addrs);
+ las->log_addr_mask = 0;
+
+ if (las->log_addr_type[0] == CEC_LOG_ADDR_TYPE_UNREGISTERED)
+ goto configured;
+
+ for (i = 0; i < las->num_log_addrs; i++) {
+ unsigned int type = las->log_addr_type[i];
+ const u8 *la_list;
+ u8 last_la;
+
+ /*
+ * The TV functionality can only map to physical address 0.
+ * For any other address, try the Specific functionality
+ * instead as per the spec.
+ */
+ if (adap->phys_addr && type == CEC_LOG_ADDR_TYPE_TV)
+ type = CEC_LOG_ADDR_TYPE_SPECIFIC;
+
+ la_list = type2addrs[type];
+ last_la = las->log_addr[i];
+ las->log_addr[i] = CEC_LOG_ADDR_INVALID;
+ if (last_la == CEC_LOG_ADDR_INVALID ||
+ last_la == CEC_LOG_ADDR_UNREGISTERED ||
+ !(last_la & type2mask[type]))
+ last_la = la_list[0];
+
+ err = cec_config_log_addr(adap, i, last_la);
+ if (err > 0) /* Reused last LA */
+ continue;
+
+ if (err < 0)
+ goto unconfigure;
+
+ for (j = 0; la_list[j] != CEC_LOG_ADDR_INVALID; j++) {
+ /* Tried this one already, skip it */
+ if (la_list[j] == last_la)
+ continue;
+ /* The backup addresses are CEC 2.0 specific */
+ if ((la_list[j] == CEC_LOG_ADDR_BACKUP_1 ||
+ la_list[j] == CEC_LOG_ADDR_BACKUP_2) &&
+ las->cec_version < CEC_OP_CEC_VERSION_2_0)
+ continue;
+
+ err = cec_config_log_addr(adap, i, la_list[j]);
+ if (err == 0) /* LA is in use */
+ continue;
+ if (err < 0)
+ goto unconfigure;
+ /* Done, claimed an LA */
+ break;
+ }
+
+ if (la_list[j] == CEC_LOG_ADDR_INVALID)
+ dprintk(1, "could not claim LA %d\n", i);
+ }
+
+ if (adap->log_addrs.log_addr_mask == 0 &&
+ !(las->flags & CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK))
+ goto unconfigure;
+
+configured:
+ if (adap->log_addrs.log_addr_mask == 0) {
+ /* Fall back to unregistered */
+ las->log_addr[0] = CEC_LOG_ADDR_UNREGISTERED;
+ las->log_addr_mask = 1 << las->log_addr[0];
+ for (i = 1; i < las->num_log_addrs; i++)
+ las->log_addr[i] = CEC_LOG_ADDR_INVALID;
+ }
+ adap->is_configured = true;
+ adap->is_configuring = false;
+ cec_post_state_event(adap);
+ mutex_unlock(&adap->lock);
+
+ for (i = 0; i < las->num_log_addrs; i++) {
+ if (las->log_addr[i] == CEC_LOG_ADDR_INVALID)
+ continue;
+
+ /*
+ * Report Features must come first according
+ * to CEC 2.0
+ */
+ if (las->log_addr[i] != CEC_LOG_ADDR_UNREGISTERED)
+ cec_report_features(adap, i);
+ cec_report_phys_addr(adap, i);
+ }
+ for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
+ las->log_addr[i] = CEC_LOG_ADDR_INVALID;
+ mutex_lock(&adap->lock);
+ adap->kthread_config = NULL;
+ mutex_unlock(&adap->lock);
+ complete(&adap->config_completion);
+ return 0;
+
+unconfigure:
+ for (i = 0; i < las->num_log_addrs; i++)
+ las->log_addr[i] = CEC_LOG_ADDR_INVALID;
+ cec_adap_unconfigure(adap);
+ adap->kthread_config = NULL;
+ mutex_unlock(&adap->lock);
+ complete(&adap->config_completion);
+ return 0;
+}
+
+/*
+ * Called from either __cec_s_phys_addr or __cec_s_log_addrs to claim the
+ * logical addresses.
+ *
+ * This function is called with adap->lock held.
+ */
+static void cec_claim_log_addrs(struct cec_adapter *adap, bool block)
+{
+ if (WARN_ON(adap->is_configuring || adap->is_configured))
+ return;
+
+ init_completion(&adap->config_completion);
+
+ /* Ready to kick off the thread */
+ adap->is_configuring = true;
+ adap->kthread_config = kthread_run(cec_config_thread_func, adap,
+ "ceccfg-%s", adap->name);
+ if (IS_ERR(adap->kthread_config)) {
+ adap->kthread_config = NULL;
+ } else if (block) {
+ mutex_unlock(&adap->lock);
+ wait_for_completion(&adap->config_completion);
+ mutex_lock(&adap->lock);
+ }
+}
+
+/* Set a new physical address and send an event notifying userspace of this.
+ *
+ * This function is called with adap->lock held.
+ */
+void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block)
+{
+ if (phys_addr == adap->phys_addr || adap->devnode.unregistered)
+ return;
+
+ if (phys_addr == CEC_PHYS_ADDR_INVALID ||
+ adap->phys_addr != CEC_PHYS_ADDR_INVALID) {
+ adap->phys_addr = CEC_PHYS_ADDR_INVALID;
+ cec_post_state_event(adap);
+ cec_adap_unconfigure(adap);
+ /* Disabling monitor all mode should always succeed */
+ if (adap->monitor_all_cnt)
+ WARN_ON(call_op(adap, adap_monitor_all_enable, false));
+ WARN_ON(adap->ops->adap_enable(adap, false));
+ if (phys_addr == CEC_PHYS_ADDR_INVALID)
+ return;
+ }
+
+ if (adap->ops->adap_enable(adap, true))
+ return;
+
+ if (adap->monitor_all_cnt &&
+ call_op(adap, adap_monitor_all_enable, true)) {
+ WARN_ON(adap->ops->adap_enable(adap, false));
+ return;
+ }
+ adap->phys_addr = phys_addr;
+ cec_post_state_event(adap);
+ if (adap->log_addrs.num_log_addrs)
+ cec_claim_log_addrs(adap, block);
+}
+
+void cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block)
+{
+ if (IS_ERR_OR_NULL(adap))
+ return;
+
+ if (WARN_ON(adap->capabilities & CEC_CAP_PHYS_ADDR))
+ return;
+ mutex_lock(&adap->lock);
+ __cec_s_phys_addr(adap, phys_addr, block);
+ mutex_unlock(&adap->lock);
+}
+EXPORT_SYMBOL_GPL(cec_s_phys_addr);
+
+/*
+ * Called from either the ioctl or a driver to set the logical addresses.
+ *
+ * This function is called with adap->lock held.
+ */
+int __cec_s_log_addrs(struct cec_adapter *adap,
+ struct cec_log_addrs *log_addrs, bool block)
+{
+ u16 type_mask = 0;
+ int i;
+
+ if (adap->devnode.unregistered)
+ return -ENODEV;
+
+ if (!log_addrs || log_addrs->num_log_addrs == 0) {
+ adap->log_addrs.num_log_addrs = 0;
+ cec_adap_unconfigure(adap);
+ return 0;
+ }
+
+ /* Ensure the osd name is 0-terminated */
+ log_addrs->osd_name[sizeof(log_addrs->osd_name) - 1] = '\0';
+
+ /* Sanity checks */
+ if (log_addrs->num_log_addrs > adap->available_log_addrs) {
+ dprintk(1, "num_log_addrs > %d\n", adap->available_log_addrs);
+ return -EINVAL;
+ }
+
+ /*
+ * Vendor ID is a 24 bit number, so check if the value is
+ * within the correct range.
+ */
+ if (log_addrs->vendor_id != CEC_VENDOR_ID_NONE &&
+ (log_addrs->vendor_id & 0xff000000) != 0)
+ return -EINVAL;
+
+ if (log_addrs->cec_version != CEC_OP_CEC_VERSION_1_4 &&
+ log_addrs->cec_version != CEC_OP_CEC_VERSION_2_0)
+ return -EINVAL;
+
+ if (log_addrs->num_log_addrs > 1)
+ for (i = 0; i < log_addrs->num_log_addrs; i++)
+ if (log_addrs->log_addr_type[i] ==
+ CEC_LOG_ADDR_TYPE_UNREGISTERED) {
+ dprintk(1, "num_log_addrs > 1 can't be combined with unregistered LA\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < log_addrs->num_log_addrs; i++) {
+ const u8 feature_sz = ARRAY_SIZE(log_addrs->features[0]);
+ u8 *features = log_addrs->features[i];
+ bool op_is_dev_features = false;
+
+ log_addrs->log_addr[i] = CEC_LOG_ADDR_INVALID;
+ if (type_mask & (1 << log_addrs->log_addr_type[i])) {
+ dprintk(1, "duplicate logical address type\n");
+ return -EINVAL;
+ }
+ type_mask |= 1 << log_addrs->log_addr_type[i];
+ if ((type_mask & (1 << CEC_LOG_ADDR_TYPE_RECORD)) &&
+ (type_mask & (1 << CEC_LOG_ADDR_TYPE_PLAYBACK))) {
+ /* Record already contains the playback functionality */
+ dprintk(1, "invalid record + playback combination\n");
+ return -EINVAL;
+ }
+ if (log_addrs->primary_device_type[i] >
+ CEC_OP_PRIM_DEVTYPE_PROCESSOR) {
+ dprintk(1, "unknown primary device type\n");
+ return -EINVAL;
+ }
+ if (log_addrs->primary_device_type[i] == 2) {
+ dprintk(1, "invalid primary device type\n");
+ return -EINVAL;
+ }
+ if (log_addrs->log_addr_type[i] > CEC_LOG_ADDR_TYPE_UNREGISTERED) {
+ dprintk(1, "unknown logical address type\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < feature_sz; i++) {
+ if ((features[i] & 0x80) == 0) {
+ if (op_is_dev_features)
+ break;
+ op_is_dev_features = true;
+ }
+ }
+ if (!op_is_dev_features || i == feature_sz) {
+ dprintk(1, "malformed features\n");
+ return -EINVAL;
+ }
+ /* Zero unused part of the feature array */
+ memset(features + i + 1, 0, feature_sz - i - 1);
+ }
+
+ if (log_addrs->cec_version >= CEC_OP_CEC_VERSION_2_0) {
+ if (log_addrs->num_log_addrs > 2) {
+ dprintk(1, "CEC 2.0 allows no more than 2 logical addresses\n");
+ return -EINVAL;
+ }
+ if (log_addrs->num_log_addrs == 2) {
+ if (!(type_mask & ((1 << CEC_LOG_ADDR_TYPE_AUDIOSYSTEM) |
+ (1 << CEC_LOG_ADDR_TYPE_TV)))) {
+ dprintk(1, "Two LAs is only allowed for audiosystem and TV\n");
+ return -EINVAL;
+ }
+ if (!(type_mask & ((1 << CEC_LOG_ADDR_TYPE_PLAYBACK) |
+ (1 << CEC_LOG_ADDR_TYPE_RECORD)))) {
+ dprintk(1, "An audiosystem/TV can only be combined with record or playback\n");
+ return -EINVAL;
+ }
+ }
+ }
+
+ /* Zero unused LAs */
+ for (i = log_addrs->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++) {
+ log_addrs->primary_device_type[i] = 0;
+ log_addrs->log_addr_type[i] = 0;
+ log_addrs->all_device_types[i] = 0;
+ memset(log_addrs->features[i], 0,
+ sizeof(log_addrs->features[i]));
+ }
+
+ log_addrs->log_addr_mask = adap->log_addrs.log_addr_mask;
+ adap->log_addrs = *log_addrs;
+ if (adap->phys_addr != CEC_PHYS_ADDR_INVALID)
+ cec_claim_log_addrs(adap, block);
+ return 0;
+}
+
+int cec_s_log_addrs(struct cec_adapter *adap,
+ struct cec_log_addrs *log_addrs, bool block)
+{
+ int err;
+
+ if (WARN_ON(adap->capabilities & CEC_CAP_LOG_ADDRS))
+ return -EINVAL;
+ mutex_lock(&adap->lock);
+ err = __cec_s_log_addrs(adap, log_addrs, block);
+ mutex_unlock(&adap->lock);
+ return err;
+}
+EXPORT_SYMBOL_GPL(cec_s_log_addrs);
+
+/* High-level core CEC message handling */
+
+/* Transmit the Report Features message */
+static int cec_report_features(struct cec_adapter *adap, unsigned int la_idx)
+{
+ struct cec_msg msg = { };
+ const struct cec_log_addrs *las = &adap->log_addrs;
+ const u8 *features = las->features[la_idx];
+ bool op_is_dev_features = false;
+ unsigned int idx;
+
+ /* This is 2.0 and up only */
+ if (adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0)
+ return 0;
+
+ /* Report Features */
+ msg.msg[0] = (las->log_addr[la_idx] << 4) | 0x0f;
+ msg.len = 4;
+ msg.msg[1] = CEC_MSG_REPORT_FEATURES;
+ msg.msg[2] = adap->log_addrs.cec_version;
+ msg.msg[3] = las->all_device_types[la_idx];
+
+ /* Write RC Profiles first, then Device Features */
+ for (idx = 0; idx < ARRAY_SIZE(las->features[0]); idx++) {
+ msg.msg[msg.len++] = features[idx];
+ if ((features[idx] & CEC_OP_FEAT_EXT) == 0) {
+ if (op_is_dev_features)
+ break;
+ op_is_dev_features = true;
+ }
+ }
+ return cec_transmit_msg(adap, &msg, false);
+}
+
+/* Transmit the Report Physical Address message */
+static int cec_report_phys_addr(struct cec_adapter *adap, unsigned int la_idx)
+{
+ const struct cec_log_addrs *las = &adap->log_addrs;
+ struct cec_msg msg = { };
+
+ /* Report Physical Address */
+ msg.msg[0] = (las->log_addr[la_idx] << 4) | 0x0f;
+ cec_msg_report_physical_addr(&msg, adap->phys_addr,
+ las->primary_device_type[la_idx]);
+ dprintk(2, "config: la %d pa %x.%x.%x.%x\n",
+ las->log_addr[la_idx],
+ cec_phys_addr_exp(adap->phys_addr));
+ return cec_transmit_msg(adap, &msg, false);
+}
+
+/* Transmit the Feature Abort message */
+static int cec_feature_abort_reason(struct cec_adapter *adap,
+ struct cec_msg *msg, u8 reason)
+{
+ struct cec_msg tx_msg = { };
+
+ /*
+ * Don't reply with CEC_MSG_FEATURE_ABORT to a CEC_MSG_FEATURE_ABORT
+ * message!
+ */
+ if (msg->msg[1] == CEC_MSG_FEATURE_ABORT)
+ return 0;
+ cec_msg_set_reply_to(&tx_msg, msg);
+ cec_msg_feature_abort(&tx_msg, msg->msg[1], reason);
+ return cec_transmit_msg(adap, &tx_msg, false);
+}
+
+static int cec_feature_abort(struct cec_adapter *adap, struct cec_msg *msg)
+{
+ return cec_feature_abort_reason(adap, msg,
+ CEC_OP_ABORT_UNRECOGNIZED_OP);
+}
+
+static int cec_feature_refused(struct cec_adapter *adap, struct cec_msg *msg)
+{
+ return cec_feature_abort_reason(adap, msg,
+ CEC_OP_ABORT_REFUSED);
+}
+
+/*
+ * Called when a CEC message is received. This function will do any
+ * necessary core processing. The is_reply bool is true if this message
+ * is a reply to an earlier transmit.
+ *
+ * The message is either a broadcast message or a valid directed message.
+ */
+static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
+ bool is_reply)
+{
+ bool is_broadcast = cec_msg_is_broadcast(msg);
+ u8 dest_laddr = cec_msg_destination(msg);
+ u8 init_laddr = cec_msg_initiator(msg);
+ u8 devtype = cec_log_addr2dev(adap, dest_laddr);
+ int la_idx = cec_log_addr2idx(adap, dest_laddr);
+ bool from_unregistered = init_laddr == 0xf;
+ struct cec_msg tx_cec_msg = { };
+
+ dprintk(1, "cec_receive_notify: %*ph\n", msg->len, msg->msg);
+
+ if (adap->ops->received) {
+ /* Allow drivers to process the message first */
+ if (adap->ops->received(adap, msg) != -ENOMSG)
+ return 0;
+ }
+
+ /*
+ * REPORT_PHYSICAL_ADDR, CEC_MSG_USER_CONTROL_PRESSED and
+ * CEC_MSG_USER_CONTROL_RELEASED messages always have to be
+ * handled by the CEC core, even if the passthrough mode is on.
+ * The others are just ignored if passthrough mode is on.
+ */
+ switch (msg->msg[1]) {
+ case CEC_MSG_GET_CEC_VERSION:
+ case CEC_MSG_GIVE_DEVICE_VENDOR_ID:
+ case CEC_MSG_ABORT:
+ case CEC_MSG_GIVE_DEVICE_POWER_STATUS:
+ case CEC_MSG_GIVE_PHYSICAL_ADDR:
+ case CEC_MSG_GIVE_OSD_NAME:
+ case CEC_MSG_GIVE_FEATURES:
+ /*
+ * Skip processing these messages if the passthrough mode
+ * is on.
+ */
+ if (adap->passthrough)
+ goto skip_processing;
+ /* Ignore if addressing is wrong */
+ if (is_broadcast || from_unregistered)
+ return 0;
+ break;
+
+ case CEC_MSG_USER_CONTROL_PRESSED:
+ case CEC_MSG_USER_CONTROL_RELEASED:
+ /* Wrong addressing mode: don't process */
+ if (is_broadcast || from_unregistered)
+ goto skip_processing;
+ break;
+
+ case CEC_MSG_REPORT_PHYSICAL_ADDR:
+ /*
+ * This message is always processed, regardless of the
+ * passthrough setting.
+ *
+ * Exception: don't process if wrong addressing mode.
+ */
+ if (!is_broadcast)
+ goto skip_processing;
+ break;
+
+ default:
+ break;
+ }
+
+ cec_msg_set_reply_to(&tx_cec_msg, msg);
+
+ switch (msg->msg[1]) {
+ /* The following messages are processed but still passed through */
+ case CEC_MSG_REPORT_PHYSICAL_ADDR: {
+ u16 pa = (msg->msg[2] << 8) | msg->msg[3];
+
+ if (!from_unregistered)
+ adap->phys_addrs[init_laddr] = pa;
+ dprintk(1, "Reported physical address %x.%x.%x.%x for logical address %d\n",
+ cec_phys_addr_exp(pa), init_laddr);
+ break;
+ }
+
+ case CEC_MSG_USER_CONTROL_PRESSED:
+ if (!(adap->capabilities & CEC_CAP_RC))
+ break;
+
+#if IS_REACHABLE(CONFIG_RC_CORE)
+ switch (msg->msg[2]) {
+ /*
+ * Play function, this message can have variable length
+ * depending on the specific play function that is used.
+ */
+ case 0x60:
+ if (msg->len == 2)
+ rc_keydown(adap->rc, RC_TYPE_CEC,
+ msg->msg[2], 0);
+ else
+ rc_keydown(adap->rc, RC_TYPE_CEC,
+ msg->msg[2] << 8 | msg->msg[3], 0);
+ break;
+ /*
+ * Other function messages that are not handled.
+ * Currently the RC framework does not allow to supply an
+ * additional parameter to a keypress. These "keys" contain
+ * other information such as channel number, an input number
+ * etc.
+ * For the time being these messages are not processed by the
+ * framework and are simply forwarded to the user space.
+ */
+ case 0x56: case 0x57:
+ case 0x67: case 0x68: case 0x69: case 0x6a:
+ break;
+ default:
+ rc_keydown(adap->rc, RC_TYPE_CEC, msg->msg[2], 0);
+ break;
+ }
+#endif
+ break;
+
+ case CEC_MSG_USER_CONTROL_RELEASED:
+ if (!(adap->capabilities & CEC_CAP_RC))
+ break;
+#if IS_REACHABLE(CONFIG_RC_CORE)
+ rc_keyup(adap->rc);
+#endif
+ break;
+
+ /*
+ * The remaining messages are only processed if the passthrough mode
+ * is off.
+ */
+ case CEC_MSG_GET_CEC_VERSION:
+ cec_msg_cec_version(&tx_cec_msg, adap->log_addrs.cec_version);
+ return cec_transmit_msg(adap, &tx_cec_msg, false);
+
+ case CEC_MSG_GIVE_PHYSICAL_ADDR:
+ /* Do nothing for CEC switches using addr 15 */
+ if (devtype == CEC_OP_PRIM_DEVTYPE_SWITCH && dest_laddr == 15)
+ return 0;
+ cec_msg_report_physical_addr(&tx_cec_msg, adap->phys_addr, devtype);
+ return cec_transmit_msg(adap, &tx_cec_msg, false);
+
+ case CEC_MSG_GIVE_DEVICE_VENDOR_ID:
+ if (adap->log_addrs.vendor_id == CEC_VENDOR_ID_NONE)
+ return cec_feature_abort(adap, msg);
+ cec_msg_device_vendor_id(&tx_cec_msg, adap->log_addrs.vendor_id);
+ return cec_transmit_msg(adap, &tx_cec_msg, false);
+
+ case CEC_MSG_ABORT:
+ /* Do nothing for CEC switches */
+ if (devtype == CEC_OP_PRIM_DEVTYPE_SWITCH)
+ return 0;
+ return cec_feature_refused(adap, msg);
+
+ case CEC_MSG_GIVE_OSD_NAME: {
+ if (adap->log_addrs.osd_name[0] == 0)
+ return cec_feature_abort(adap, msg);
+ cec_msg_set_osd_name(&tx_cec_msg, adap->log_addrs.osd_name);
+ return cec_transmit_msg(adap, &tx_cec_msg, false);
+ }
+
+ case CEC_MSG_GIVE_FEATURES:
+ if (adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0)
+ return cec_report_features(adap, la_idx);
+ return 0;
+
+ default:
+ /*
+ * Unprocessed messages are aborted if userspace isn't doing
+ * any processing either.
+ */
+ if (!is_broadcast && !is_reply && !adap->follower_cnt &&
+ !adap->cec_follower && msg->msg[1] != CEC_MSG_FEATURE_ABORT)
+ return cec_feature_abort(adap, msg);
+ break;
+ }
+
+skip_processing:
+ /* If this was a reply, then we're done */
+ if (is_reply)
+ return 0;
+
+ /*
+ * Send to the exclusive follower if there is one, otherwise send
+ * to all followers.
+ */
+ if (adap->cec_follower)
+ cec_queue_msg_fh(adap->cec_follower, msg);
+ else
+ cec_queue_msg_followers(adap, msg);
+ return 0;
+}
+
+/*
+ * Helper functions to keep track of the 'monitor all' use count.
+ *
+ * These functions are called with adap->lock held.
+ */
+int cec_monitor_all_cnt_inc(struct cec_adapter *adap)
+{
+ int ret = 0;
+
+ if (adap->monitor_all_cnt == 0)
+ ret = call_op(adap, adap_monitor_all_enable, 1);
+ if (ret == 0)
+ adap->monitor_all_cnt++;
+ return ret;
+}
+
+void cec_monitor_all_cnt_dec(struct cec_adapter *adap)
+{
+ adap->monitor_all_cnt--;
+ if (adap->monitor_all_cnt == 0)
+ WARN_ON(call_op(adap, adap_monitor_all_enable, 0));
+}
+
+#ifdef CONFIG_MEDIA_CEC_DEBUG
+/*
+ * Log the current state of the CEC adapter.
+ * Very useful for debugging.
+ */
+int cec_adap_status(struct seq_file *file, void *priv)
+{
+ struct cec_adapter *adap = dev_get_drvdata(file->private);
+ struct cec_data *data;
+
+ mutex_lock(&adap->lock);
+ seq_printf(file, "configured: %d\n", adap->is_configured);
+ seq_printf(file, "configuring: %d\n", adap->is_configuring);
+ seq_printf(file, "phys_addr: %x.%x.%x.%x\n",
+ cec_phys_addr_exp(adap->phys_addr));
+ seq_printf(file, "number of LAs: %d\n", adap->log_addrs.num_log_addrs);
+ seq_printf(file, "LA mask: 0x%04x\n", adap->log_addrs.log_addr_mask);
+ if (adap->cec_follower)
+ seq_printf(file, "has CEC follower%s\n",
+ adap->passthrough ? " (in passthrough mode)" : "");
+ if (adap->cec_initiator)
+ seq_puts(file, "has CEC initiator\n");
+ if (adap->monitor_all_cnt)
+ seq_printf(file, "file handles in Monitor All mode: %u\n",
+ adap->monitor_all_cnt);
+ data = adap->transmitting;
+ if (data)
+ seq_printf(file, "transmitting message: %*ph (reply: %02x, timeout: %ums)\n",
+ data->msg.len, data->msg.msg, data->msg.reply,
+ data->msg.timeout);
+ seq_printf(file, "pending transmits: %u\n", adap->transmit_queue_sz);
+ list_for_each_entry(data, &adap->transmit_queue, list) {
+ seq_printf(file, "queued tx message: %*ph (reply: %02x, timeout: %ums)\n",
+ data->msg.len, data->msg.msg, data->msg.reply,
+ data->msg.timeout);
+ }
+ list_for_each_entry(data, &adap->wait_queue, list) {
+ seq_printf(file, "message waiting for reply: %*ph (reply: %02x, timeout: %ums)\n",
+ data->msg.len, data->msg.msg, data->msg.reply,
+ data->msg.timeout);
+ }
+
+ call_void_op(adap, adap_status, file);
+ mutex_unlock(&adap->lock);
+ return 0;
+}
+#endif
diff --git a/drivers/staging/media/cec/cec-api.c b/drivers/staging/media/cec/cec-api.c
new file mode 100644
index 000000000..e274e2f22
--- /dev/null
+++ b/drivers/staging/media/cec/cec-api.c
@@ -0,0 +1,579 @@
+/*
+ * cec-api.c - HDMI Consumer Electronics Control framework - API
+ *
+ * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/kmod.h>
+#include <linux/ktime.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/version.h>
+
+#include "cec-priv.h"
+
+static inline struct cec_devnode *cec_devnode_data(struct file *filp)
+{
+ struct cec_fh *fh = filp->private_data;
+
+ return &fh->adap->devnode;
+}
+
+/* CEC file operations */
+
+static unsigned int cec_poll(struct file *filp,
+ struct poll_table_struct *poll)
+{
+ struct cec_devnode *devnode = cec_devnode_data(filp);
+ struct cec_fh *fh = filp->private_data;
+ struct cec_adapter *adap = fh->adap;
+ unsigned int res = 0;
+
+ if (!devnode->registered)
+ return POLLERR | POLLHUP;
+ mutex_lock(&adap->lock);
+ if (adap->is_configured &&
+ adap->transmit_queue_sz < CEC_MAX_MSG_TX_QUEUE_SZ)
+ res |= POLLOUT | POLLWRNORM;
+ if (fh->queued_msgs)
+ res |= POLLIN | POLLRDNORM;
+ if (fh->pending_events)
+ res |= POLLPRI;
+ poll_wait(filp, &fh->wait, poll);
+ mutex_unlock(&adap->lock);
+ return res;
+}
+
+static bool cec_is_busy(const struct cec_adapter *adap,
+ const struct cec_fh *fh)
+{
+ bool valid_initiator = adap->cec_initiator && adap->cec_initiator == fh;
+ bool valid_follower = adap->cec_follower && adap->cec_follower == fh;
+
+ /*
+ * Exclusive initiators and followers can always access the CEC adapter
+ */
+ if (valid_initiator || valid_follower)
+ return false;
+ /*
+ * All others can only access the CEC adapter if there is no
+ * exclusive initiator and they are in INITIATOR mode.
+ */
+ return adap->cec_initiator ||
+ fh->mode_initiator == CEC_MODE_NO_INITIATOR;
+}
+
+static long cec_adap_g_caps(struct cec_adapter *adap,
+ struct cec_caps __user *parg)
+{
+ struct cec_caps caps = {};
+
+ strlcpy(caps.driver, adap->devnode.parent->driver->name,
+ sizeof(caps.driver));
+ strlcpy(caps.name, adap->name, sizeof(caps.name));
+ caps.available_log_addrs = adap->available_log_addrs;
+ caps.capabilities = adap->capabilities;
+ caps.version = LINUX_VERSION_CODE;
+ if (copy_to_user(parg, &caps, sizeof(caps)))
+ return -EFAULT;
+ return 0;
+}
+
+static long cec_adap_g_phys_addr(struct cec_adapter *adap,
+ __u16 __user *parg)
+{
+ u16 phys_addr;
+
+ mutex_lock(&adap->lock);
+ phys_addr = adap->phys_addr;
+ mutex_unlock(&adap->lock);
+ if (copy_to_user(parg, &phys_addr, sizeof(phys_addr)))
+ return -EFAULT;
+ return 0;
+}
+
+static long cec_adap_s_phys_addr(struct cec_adapter *adap, struct cec_fh *fh,
+ bool block, __u16 __user *parg)
+{
+ u16 phys_addr;
+ long err;
+
+ if (!(adap->capabilities & CEC_CAP_PHYS_ADDR))
+ return -ENOTTY;
+ if (copy_from_user(&phys_addr, parg, sizeof(phys_addr)))
+ return -EFAULT;
+
+ err = cec_phys_addr_validate(phys_addr, NULL, NULL);
+ if (err)
+ return err;
+ mutex_lock(&adap->lock);
+ if (cec_is_busy(adap, fh))
+ err = -EBUSY;
+ else
+ __cec_s_phys_addr(adap, phys_addr, block);
+ mutex_unlock(&adap->lock);
+ return err;
+}
+
+static long cec_adap_g_log_addrs(struct cec_adapter *adap,
+ struct cec_log_addrs __user *parg)
+{
+ struct cec_log_addrs log_addrs;
+
+ mutex_lock(&adap->lock);
+ log_addrs = adap->log_addrs;
+ if (!adap->is_configured)
+ memset(log_addrs.log_addr, CEC_LOG_ADDR_INVALID,
+ sizeof(log_addrs.log_addr));
+ mutex_unlock(&adap->lock);
+
+ if (copy_to_user(parg, &log_addrs, sizeof(log_addrs)))
+ return -EFAULT;
+ return 0;
+}
+
+static long cec_adap_s_log_addrs(struct cec_adapter *adap, struct cec_fh *fh,
+ bool block, struct cec_log_addrs __user *parg)
+{
+ struct cec_log_addrs log_addrs;
+ long err = -EBUSY;
+
+ if (!(adap->capabilities & CEC_CAP_LOG_ADDRS))
+ return -ENOTTY;
+ if (copy_from_user(&log_addrs, parg, sizeof(log_addrs)))
+ return -EFAULT;
+ log_addrs.flags &= CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK;
+ mutex_lock(&adap->lock);
+ if (!adap->is_configuring &&
+ (!log_addrs.num_log_addrs || !adap->is_configured) &&
+ !cec_is_busy(adap, fh)) {
+ err = __cec_s_log_addrs(adap, &log_addrs, block);
+ if (!err)
+ log_addrs = adap->log_addrs;
+ }
+ mutex_unlock(&adap->lock);
+ if (err)
+ return err;
+ if (copy_to_user(parg, &log_addrs, sizeof(log_addrs)))
+ return -EFAULT;
+ return 0;
+}
+
+static long cec_transmit(struct cec_adapter *adap, struct cec_fh *fh,
+ bool block, struct cec_msg __user *parg)
+{
+ struct cec_msg msg = {};
+ long err = 0;
+
+ if (!(adap->capabilities & CEC_CAP_TRANSMIT))
+ return -ENOTTY;
+ if (copy_from_user(&msg, parg, sizeof(msg)))
+ return -EFAULT;
+ mutex_lock(&adap->lock);
+ if (!adap->is_configured)
+ err = -ENONET;
+ else if (cec_is_busy(adap, fh))
+ err = -EBUSY;
+ else
+ err = cec_transmit_msg_fh(adap, &msg, fh, block);
+ mutex_unlock(&adap->lock);
+ if (err)
+ return err;
+ if (copy_to_user(parg, &msg, sizeof(msg)))
+ return -EFAULT;
+ return 0;
+}
+
+/* Called by CEC_RECEIVE: wait for a message to arrive */
+static int cec_receive_msg(struct cec_fh *fh, struct cec_msg *msg, bool block)
+{
+ u32 timeout = msg->timeout;
+ int res;
+
+ do {
+ mutex_lock(&fh->lock);
+ /* Are there received messages queued up? */
+ if (fh->queued_msgs) {
+ /* Yes, return the first one */
+ struct cec_msg_entry *entry =
+ list_first_entry(&fh->msgs,
+ struct cec_msg_entry, list);
+
+ list_del(&entry->list);
+ *msg = entry->msg;
+ kfree(entry);
+ fh->queued_msgs--;
+ mutex_unlock(&fh->lock);
+ /* restore original timeout value */
+ msg->timeout = timeout;
+ return 0;
+ }
+
+ /* No, return EAGAIN in non-blocking mode or wait */
+ mutex_unlock(&fh->lock);
+
+ /* Return when in non-blocking mode */
+ if (!block)
+ return -EAGAIN;
+
+ if (msg->timeout) {
+ /* The user specified a timeout */
+ res = wait_event_interruptible_timeout(fh->wait,
+ fh->queued_msgs,
+ msecs_to_jiffies(msg->timeout));
+ if (res == 0)
+ res = -ETIMEDOUT;
+ else if (res > 0)
+ res = 0;
+ } else {
+ /* Wait indefinitely */
+ res = wait_event_interruptible(fh->wait,
+ fh->queued_msgs);
+ }
+ /* Exit on error, otherwise loop to get the new message */
+ } while (!res);
+ return res;
+}
+
+static long cec_receive(struct cec_adapter *adap, struct cec_fh *fh,
+ bool block, struct cec_msg __user *parg)
+{
+ struct cec_msg msg = {};
+ long err = 0;
+
+ if (copy_from_user(&msg, parg, sizeof(msg)))
+ return -EFAULT;
+ mutex_lock(&adap->lock);
+ if (!adap->is_configured && fh->mode_follower < CEC_MODE_MONITOR)
+ err = -ENONET;
+ mutex_unlock(&adap->lock);
+ if (err)
+ return err;
+
+ err = cec_receive_msg(fh, &msg, block);
+ if (err)
+ return err;
+ if (copy_to_user(parg, &msg, sizeof(msg)))
+ return -EFAULT;
+ return 0;
+}
+
+static long cec_dqevent(struct cec_adapter *adap, struct cec_fh *fh,
+ bool block, struct cec_event __user *parg)
+{
+ struct cec_event *ev = NULL;
+ u64 ts = ~0ULL;
+ unsigned int i;
+ long err = 0;
+
+ mutex_lock(&fh->lock);
+ while (!fh->pending_events && block) {
+ mutex_unlock(&fh->lock);
+ err = wait_event_interruptible(fh->wait, fh->pending_events);
+ if (err)
+ return err;
+ mutex_lock(&fh->lock);
+ }
+
+ /* Find the oldest event */
+ for (i = 0; i < CEC_NUM_EVENTS; i++) {
+ if (fh->pending_events & (1 << (i + 1)) &&
+ fh->events[i].ts <= ts) {
+ ev = &fh->events[i];
+ ts = ev->ts;
+ }
+ }
+ if (!ev) {
+ err = -EAGAIN;
+ goto unlock;
+ }
+
+ if (copy_to_user(parg, ev, sizeof(*ev))) {
+ err = -EFAULT;
+ goto unlock;
+ }
+
+ fh->pending_events &= ~(1 << ev->event);
+
+unlock:
+ mutex_unlock(&fh->lock);
+ return err;
+}
+
+static long cec_g_mode(struct cec_adapter *adap, struct cec_fh *fh,
+ u32 __user *parg)
+{
+ u32 mode = fh->mode_initiator | fh->mode_follower;
+
+ if (copy_to_user(parg, &mode, sizeof(mode)))
+ return -EFAULT;
+ return 0;
+}
+
+static long cec_s_mode(struct cec_adapter *adap, struct cec_fh *fh,
+ u32 __user *parg)
+{
+ u32 mode;
+ u8 mode_initiator;
+ u8 mode_follower;
+ long err = 0;
+
+ if (copy_from_user(&mode, parg, sizeof(mode)))
+ return -EFAULT;
+ if (mode & ~(CEC_MODE_INITIATOR_MSK | CEC_MODE_FOLLOWER_MSK))
+ return -EINVAL;
+
+ mode_initiator = mode & CEC_MODE_INITIATOR_MSK;
+ mode_follower = mode & CEC_MODE_FOLLOWER_MSK;
+
+ if (mode_initiator > CEC_MODE_EXCL_INITIATOR ||
+ mode_follower > CEC_MODE_MONITOR_ALL)
+ return -EINVAL;
+
+ if (mode_follower == CEC_MODE_MONITOR_ALL &&
+ !(adap->capabilities & CEC_CAP_MONITOR_ALL))
+ return -EINVAL;
+
+ /* Follower modes should always be able to send CEC messages */
+ if ((mode_initiator == CEC_MODE_NO_INITIATOR ||
+ !(adap->capabilities & CEC_CAP_TRANSMIT)) &&
+ mode_follower >= CEC_MODE_FOLLOWER &&
+ mode_follower <= CEC_MODE_EXCL_FOLLOWER_PASSTHRU)
+ return -EINVAL;
+
+ /* Monitor modes require CEC_MODE_NO_INITIATOR */
+ if (mode_initiator && mode_follower >= CEC_MODE_MONITOR)
+ return -EINVAL;
+
+ /* Monitor modes require CAP_NET_ADMIN */
+ if (mode_follower >= CEC_MODE_MONITOR && !capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ mutex_lock(&adap->lock);
+ /*
+ * You can't become exclusive follower if someone else already
+ * has that job.
+ */
+ if ((mode_follower == CEC_MODE_EXCL_FOLLOWER ||
+ mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) &&
+ adap->cec_follower && adap->cec_follower != fh)
+ err = -EBUSY;
+ /*
+ * You can't become exclusive initiator if someone else already
+ * has that job.
+ */
+ if (mode_initiator == CEC_MODE_EXCL_INITIATOR &&
+ adap->cec_initiator && adap->cec_initiator != fh)
+ err = -EBUSY;
+
+ if (!err) {
+ bool old_mon_all = fh->mode_follower == CEC_MODE_MONITOR_ALL;
+ bool new_mon_all = mode_follower == CEC_MODE_MONITOR_ALL;
+
+ if (old_mon_all != new_mon_all) {
+ if (new_mon_all)
+ err = cec_monitor_all_cnt_inc(adap);
+ else
+ cec_monitor_all_cnt_dec(adap);
+ }
+ }
+
+ if (err) {
+ mutex_unlock(&adap->lock);
+ return err;
+ }
+
+ if (fh->mode_follower == CEC_MODE_FOLLOWER)
+ adap->follower_cnt--;
+ if (mode_follower == CEC_MODE_FOLLOWER)
+ adap->follower_cnt++;
+ if (mode_follower == CEC_MODE_EXCL_FOLLOWER ||
+ mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) {
+ adap->passthrough =
+ mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU;
+ adap->cec_follower = fh;
+ } else if (adap->cec_follower == fh) {
+ adap->passthrough = false;
+ adap->cec_follower = NULL;
+ }
+ if (mode_initiator == CEC_MODE_EXCL_INITIATOR)
+ adap->cec_initiator = fh;
+ else if (adap->cec_initiator == fh)
+ adap->cec_initiator = NULL;
+ fh->mode_initiator = mode_initiator;
+ fh->mode_follower = mode_follower;
+ mutex_unlock(&adap->lock);
+ return 0;
+}
+
+static long cec_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct cec_devnode *devnode = cec_devnode_data(filp);
+ struct cec_fh *fh = filp->private_data;
+ struct cec_adapter *adap = fh->adap;
+ bool block = !(filp->f_flags & O_NONBLOCK);
+ void __user *parg = (void __user *)arg;
+
+ if (!devnode->registered)
+ return -ENODEV;
+
+ switch (cmd) {
+ case CEC_ADAP_G_CAPS:
+ return cec_adap_g_caps(adap, parg);
+
+ case CEC_ADAP_G_PHYS_ADDR:
+ return cec_adap_g_phys_addr(adap, parg);
+
+ case CEC_ADAP_S_PHYS_ADDR:
+ return cec_adap_s_phys_addr(adap, fh, block, parg);
+
+ case CEC_ADAP_G_LOG_ADDRS:
+ return cec_adap_g_log_addrs(adap, parg);
+
+ case CEC_ADAP_S_LOG_ADDRS:
+ return cec_adap_s_log_addrs(adap, fh, block, parg);
+
+ case CEC_TRANSMIT:
+ return cec_transmit(adap, fh, block, parg);
+
+ case CEC_RECEIVE:
+ return cec_receive(adap, fh, block, parg);
+
+ case CEC_DQEVENT:
+ return cec_dqevent(adap, fh, block, parg);
+
+ case CEC_G_MODE:
+ return cec_g_mode(adap, fh, parg);
+
+ case CEC_S_MODE:
+ return cec_s_mode(adap, fh, parg);
+
+ default:
+ return -ENOTTY;
+ }
+}
+
+static int cec_open(struct inode *inode, struct file *filp)
+{
+ struct cec_devnode *devnode =
+ container_of(inode->i_cdev, struct cec_devnode, cdev);
+ struct cec_adapter *adap = to_cec_adapter(devnode);
+ struct cec_fh *fh = kzalloc(sizeof(*fh), GFP_KERNEL);
+ /*
+ * Initial events that are automatically sent when the cec device is
+ * opened.
+ */
+ struct cec_event ev_state = {
+ .event = CEC_EVENT_STATE_CHANGE,
+ .flags = CEC_EVENT_FL_INITIAL_STATE,
+ };
+ int err;
+
+ if (!fh)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&fh->msgs);
+ INIT_LIST_HEAD(&fh->xfer_list);
+ mutex_init(&fh->lock);
+ init_waitqueue_head(&fh->wait);
+
+ fh->mode_initiator = CEC_MODE_INITIATOR;
+ fh->adap = adap;
+
+ err = cec_get_device(devnode);
+ if (err) {
+ kfree(fh);
+ return err;
+ }
+
+ filp->private_data = fh;
+
+ mutex_lock(&devnode->lock);
+ /* Queue up initial state events */
+ ev_state.state_change.phys_addr = adap->phys_addr;
+ ev_state.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
+ cec_queue_event_fh(fh, &ev_state, 0);
+
+ list_add(&fh->list, &devnode->fhs);
+ mutex_unlock(&devnode->lock);
+
+ return 0;
+}
+
+/* Override for the release function */
+static int cec_release(struct inode *inode, struct file *filp)
+{
+ struct cec_devnode *devnode = cec_devnode_data(filp);
+ struct cec_adapter *adap = to_cec_adapter(devnode);
+ struct cec_fh *fh = filp->private_data;
+
+ mutex_lock(&adap->lock);
+ if (adap->cec_initiator == fh)
+ adap->cec_initiator = NULL;
+ if (adap->cec_follower == fh) {
+ adap->cec_follower = NULL;
+ adap->passthrough = false;
+ }
+ if (fh->mode_follower == CEC_MODE_FOLLOWER)
+ adap->follower_cnt--;
+ if (fh->mode_follower == CEC_MODE_MONITOR_ALL)
+ cec_monitor_all_cnt_dec(adap);
+ mutex_unlock(&adap->lock);
+
+ mutex_lock(&devnode->lock);
+ list_del(&fh->list);
+ mutex_unlock(&devnode->lock);
+
+ /* Unhook pending transmits from this filehandle. */
+ mutex_lock(&adap->lock);
+ while (!list_empty(&fh->xfer_list)) {
+ struct cec_data *data =
+ list_first_entry(&fh->xfer_list, struct cec_data, xfer_list);
+
+ data->blocking = false;
+ data->fh = NULL;
+ list_del(&data->xfer_list);
+ }
+ mutex_unlock(&adap->lock);
+ while (!list_empty(&fh->msgs)) {
+ struct cec_msg_entry *entry =
+ list_first_entry(&fh->msgs, struct cec_msg_entry, list);
+
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ kfree(fh);
+
+ cec_put_device(devnode);
+ filp->private_data = NULL;
+ return 0;
+}
+
+const struct file_operations cec_devnode_fops = {
+ .owner = THIS_MODULE,
+ .open = cec_open,
+ .unlocked_ioctl = cec_ioctl,
+ .release = cec_release,
+ .poll = cec_poll,
+ .llseek = no_llseek,
+};
diff --git a/drivers/staging/media/cec/cec-core.c b/drivers/staging/media/cec/cec-core.c
new file mode 100644
index 000000000..3b1e4d2b1
--- /dev/null
+++ b/drivers/staging/media/cec/cec-core.c
@@ -0,0 +1,412 @@
+/*
+ * cec-core.c - HDMI Consumer Electronics Control framework - Core
+ *
+ * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/kmod.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "cec-priv.h"
+
+#define CEC_NUM_DEVICES 256
+#define CEC_NAME "cec"
+
+int cec_debug;
+module_param_named(debug, cec_debug, int, 0644);
+MODULE_PARM_DESC(debug, "debug level (0-2)");
+
+static dev_t cec_dev_t;
+
+/* Active devices */
+static DEFINE_MUTEX(cec_devnode_lock);
+static DECLARE_BITMAP(cec_devnode_nums, CEC_NUM_DEVICES);
+
+static struct dentry *top_cec_dir;
+
+/* dev to cec_devnode */
+#define to_cec_devnode(cd) container_of(cd, struct cec_devnode, dev)
+
+int cec_get_device(struct cec_devnode *devnode)
+{
+ /*
+ * Check if the cec device is available. This needs to be done with
+ * the devnode->lock held to prevent an open/unregister race:
+ * without the lock, the device could be unregistered and freed between
+ * the devnode->registered check and get_device() calls, leading to
+ * a crash.
+ */
+ mutex_lock(&devnode->lock);
+ /*
+ * return ENXIO if the cec device has been removed
+ * already or if it is not registered anymore.
+ */
+ if (!devnode->registered) {
+ mutex_unlock(&devnode->lock);
+ return -ENXIO;
+ }
+ /* and increase the device refcount */
+ get_device(&devnode->dev);
+ mutex_unlock(&devnode->lock);
+ return 0;
+}
+
+void cec_put_device(struct cec_devnode *devnode)
+{
+ put_device(&devnode->dev);
+}
+
+/* Called when the last user of the cec device exits. */
+static void cec_devnode_release(struct device *cd)
+{
+ struct cec_devnode *devnode = to_cec_devnode(cd);
+
+ mutex_lock(&cec_devnode_lock);
+ /* Mark device node number as free */
+ clear_bit(devnode->minor, cec_devnode_nums);
+ mutex_unlock(&cec_devnode_lock);
+
+ cec_delete_adapter(to_cec_adapter(devnode));
+}
+
+static struct bus_type cec_bus_type = {
+ .name = CEC_NAME,
+};
+
+/*
+ * Register a cec device node
+ *
+ * The registration code assigns minor numbers and registers the new device node
+ * with the kernel. An error is returned if no free minor number can be found,
+ * or if the registration of the device node fails.
+ *
+ * Zero is returned on success.
+ *
+ * Note that if the cec_devnode_register call fails, the release() callback of
+ * the cec_devnode structure is *not* called, so the caller is responsible for
+ * freeing any data.
+ */
+static int __must_check cec_devnode_register(struct cec_devnode *devnode,
+ struct module *owner)
+{
+ int minor;
+ int ret;
+
+ /* Initialization */
+ INIT_LIST_HEAD(&devnode->fhs);
+ mutex_init(&devnode->lock);
+
+ /* Part 1: Find a free minor number */
+ mutex_lock(&cec_devnode_lock);
+ minor = find_next_zero_bit(cec_devnode_nums, CEC_NUM_DEVICES, 0);
+ if (minor == CEC_NUM_DEVICES) {
+ mutex_unlock(&cec_devnode_lock);
+ pr_err("could not get a free minor\n");
+ return -ENFILE;
+ }
+
+ set_bit(minor, cec_devnode_nums);
+ mutex_unlock(&cec_devnode_lock);
+
+ devnode->minor = minor;
+ devnode->dev.bus = &cec_bus_type;
+ devnode->dev.devt = MKDEV(MAJOR(cec_dev_t), minor);
+ devnode->dev.release = cec_devnode_release;
+ devnode->dev.parent = devnode->parent;
+ dev_set_name(&devnode->dev, "cec%d", devnode->minor);
+ device_initialize(&devnode->dev);
+
+ /* Part 2: Initialize and register the character device */
+ cdev_init(&devnode->cdev, &cec_devnode_fops);
+ devnode->cdev.kobj.parent = &devnode->dev.kobj;
+ devnode->cdev.owner = owner;
+
+ ret = cdev_add(&devnode->cdev, devnode->dev.devt, 1);
+ if (ret < 0) {
+ pr_err("%s: cdev_add failed\n", __func__);
+ goto clr_bit;
+ }
+
+ ret = device_add(&devnode->dev);
+ if (ret)
+ goto cdev_del;
+
+ devnode->registered = true;
+ return 0;
+
+cdev_del:
+ cdev_del(&devnode->cdev);
+clr_bit:
+ mutex_lock(&cec_devnode_lock);
+ clear_bit(devnode->minor, cec_devnode_nums);
+ mutex_unlock(&cec_devnode_lock);
+ return ret;
+}
+
+/*
+ * Unregister a cec device node
+ *
+ * This unregisters the passed device. Future open calls will be met with
+ * errors.
+ *
+ * This function can safely be called if the device node has never been
+ * registered or has already been unregistered.
+ */
+static void cec_devnode_unregister(struct cec_devnode *devnode)
+{
+ struct cec_fh *fh;
+
+ mutex_lock(&devnode->lock);
+
+ /* Check if devnode was never registered or already unregistered */
+ if (!devnode->registered || devnode->unregistered) {
+ mutex_unlock(&devnode->lock);
+ return;
+ }
+
+ list_for_each_entry(fh, &devnode->fhs, list)
+ wake_up_interruptible(&fh->wait);
+
+ devnode->registered = false;
+ devnode->unregistered = true;
+ mutex_unlock(&devnode->lock);
+
+ device_del(&devnode->dev);
+ cdev_del(&devnode->cdev);
+ put_device(&devnode->dev);
+}
+
+struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
+ void *priv, const char *name, u32 caps,
+ u8 available_las, struct device *parent)
+{
+ struct cec_adapter *adap;
+ int res;
+
+ if (WARN_ON(!parent))
+ return ERR_PTR(-EINVAL);
+ if (WARN_ON(!caps))
+ return ERR_PTR(-EINVAL);
+ if (WARN_ON(!ops))
+ return ERR_PTR(-EINVAL);
+ if (WARN_ON(!available_las || available_las > CEC_MAX_LOG_ADDRS))
+ return ERR_PTR(-EINVAL);
+ adap = kzalloc(sizeof(*adap), GFP_KERNEL);
+ if (!adap)
+ return ERR_PTR(-ENOMEM);
+ adap->owner = parent->driver->owner;
+ adap->devnode.parent = parent;
+ strlcpy(adap->name, name, sizeof(adap->name));
+ adap->phys_addr = CEC_PHYS_ADDR_INVALID;
+ adap->log_addrs.cec_version = CEC_OP_CEC_VERSION_2_0;
+ adap->log_addrs.vendor_id = CEC_VENDOR_ID_NONE;
+ adap->capabilities = caps;
+ adap->available_log_addrs = available_las;
+ adap->sequence = 0;
+ adap->ops = ops;
+ adap->priv = priv;
+ memset(adap->phys_addrs, 0xff, sizeof(adap->phys_addrs));
+ mutex_init(&adap->lock);
+ INIT_LIST_HEAD(&adap->transmit_queue);
+ INIT_LIST_HEAD(&adap->wait_queue);
+ init_waitqueue_head(&adap->kthread_waitq);
+
+ adap->kthread = kthread_run(cec_thread_func, adap, "cec-%s", name);
+ if (IS_ERR(adap->kthread)) {
+ pr_err("cec-%s: kernel_thread() failed\n", name);
+ res = PTR_ERR(adap->kthread);
+ kfree(adap);
+ return ERR_PTR(res);
+ }
+
+ if (!(caps & CEC_CAP_RC))
+ return adap;
+
+#if IS_REACHABLE(CONFIG_RC_CORE)
+ /* Prepare the RC input device */
+ adap->rc = rc_allocate_device();
+ if (!adap->rc) {
+ pr_err("cec-%s: failed to allocate memory for rc_dev\n",
+ name);
+ kthread_stop(adap->kthread);
+ kfree(adap);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ snprintf(adap->input_name, sizeof(adap->input_name),
+ "RC for %s", name);
+ snprintf(adap->input_phys, sizeof(adap->input_phys),
+ "%s/input0", name);
+
+ adap->rc->input_name = adap->input_name;
+ adap->rc->input_phys = adap->input_phys;
+ adap->rc->input_id.bustype = BUS_CEC;
+ adap->rc->input_id.vendor = 0;
+ adap->rc->input_id.product = 0;
+ adap->rc->input_id.version = 1;
+ adap->rc->dev.parent = parent;
+ adap->rc->driver_type = RC_DRIVER_SCANCODE;
+ adap->rc->driver_name = CEC_NAME;
+ adap->rc->allowed_protocols = RC_BIT_CEC;
+ adap->rc->priv = adap;
+ adap->rc->map_name = RC_MAP_CEC;
+ adap->rc->timeout = MS_TO_NS(100);
+#else
+ adap->capabilities &= ~CEC_CAP_RC;
+#endif
+ return adap;
+}
+EXPORT_SYMBOL_GPL(cec_allocate_adapter);
+
+int cec_register_adapter(struct cec_adapter *adap)
+{
+ int res;
+
+ if (IS_ERR_OR_NULL(adap))
+ return 0;
+
+#if IS_REACHABLE(CONFIG_RC_CORE)
+ if (adap->capabilities & CEC_CAP_RC) {
+ res = rc_register_device(adap->rc);
+
+ if (res) {
+ pr_err("cec-%s: failed to prepare input device\n",
+ adap->name);
+ rc_free_device(adap->rc);
+ adap->rc = NULL;
+ return res;
+ }
+ }
+#endif
+
+ res = cec_devnode_register(&adap->devnode, adap->owner);
+ if (res) {
+#if IS_REACHABLE(CONFIG_RC_CORE)
+ /* Note: rc_unregister also calls rc_free */
+ rc_unregister_device(adap->rc);
+ adap->rc = NULL;
+#endif
+ return res;
+ }
+
+ dev_set_drvdata(&adap->devnode.dev, adap);
+#ifdef CONFIG_MEDIA_CEC_DEBUG
+ if (!top_cec_dir)
+ return 0;
+
+ adap->cec_dir = debugfs_create_dir(dev_name(&adap->devnode.dev), top_cec_dir);
+ if (IS_ERR_OR_NULL(adap->cec_dir)) {
+ pr_warn("cec-%s: Failed to create debugfs dir\n", adap->name);
+ return 0;
+ }
+ adap->status_file = debugfs_create_devm_seqfile(&adap->devnode.dev,
+ "status", adap->cec_dir, cec_adap_status);
+ if (IS_ERR_OR_NULL(adap->status_file)) {
+ pr_warn("cec-%s: Failed to create status file\n", adap->name);
+ debugfs_remove_recursive(adap->cec_dir);
+ adap->cec_dir = NULL;
+ }
+#endif
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cec_register_adapter);
+
+void cec_unregister_adapter(struct cec_adapter *adap)
+{
+ if (IS_ERR_OR_NULL(adap))
+ return;
+
+#if IS_REACHABLE(CONFIG_RC_CORE)
+ /* Note: rc_unregister also calls rc_free */
+ rc_unregister_device(adap->rc);
+ adap->rc = NULL;
+#endif
+ debugfs_remove_recursive(adap->cec_dir);
+ cec_devnode_unregister(&adap->devnode);
+}
+EXPORT_SYMBOL_GPL(cec_unregister_adapter);
+
+void cec_delete_adapter(struct cec_adapter *adap)
+{
+ if (IS_ERR_OR_NULL(adap))
+ return;
+ mutex_lock(&adap->lock);
+ __cec_s_phys_addr(adap, CEC_PHYS_ADDR_INVALID, false);
+ mutex_unlock(&adap->lock);
+ kthread_stop(adap->kthread);
+ if (adap->kthread_config)
+ kthread_stop(adap->kthread_config);
+#if IS_REACHABLE(CONFIG_RC_CORE)
+ if (adap->rc)
+ rc_free_device(adap->rc);
+#endif
+ kfree(adap);
+}
+EXPORT_SYMBOL_GPL(cec_delete_adapter);
+
+/*
+ * Initialise cec for linux
+ */
+static int __init cec_devnode_init(void)
+{
+ int ret;
+
+ pr_info("Linux cec interface: v0.10\n");
+ ret = alloc_chrdev_region(&cec_dev_t, 0, CEC_NUM_DEVICES,
+ CEC_NAME);
+ if (ret < 0) {
+ pr_warn("cec: unable to allocate major\n");
+ return ret;
+ }
+
+#ifdef CONFIG_MEDIA_CEC_DEBUG
+ top_cec_dir = debugfs_create_dir("cec", NULL);
+ if (IS_ERR_OR_NULL(top_cec_dir)) {
+ pr_warn("cec: Failed to create debugfs cec dir\n");
+ top_cec_dir = NULL;
+ }
+#endif
+
+ ret = bus_register(&cec_bus_type);
+ if (ret < 0) {
+ unregister_chrdev_region(cec_dev_t, CEC_NUM_DEVICES);
+ pr_warn("cec: bus_register failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void __exit cec_devnode_exit(void)
+{
+ debugfs_remove_recursive(top_cec_dir);
+ bus_unregister(&cec_bus_type);
+ unregister_chrdev_region(cec_dev_t, CEC_NUM_DEVICES);
+}
+
+subsys_initcall(cec_devnode_init);
+module_exit(cec_devnode_exit)
+
+MODULE_AUTHOR("Hans Verkuil <hans.verkuil@cisco.com>");
+MODULE_DESCRIPTION("Device node registration for cec drivers");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/cec/cec-priv.h b/drivers/staging/media/cec/cec-priv.h
new file mode 100644
index 000000000..70767a790
--- /dev/null
+++ b/drivers/staging/media/cec/cec-priv.h
@@ -0,0 +1,56 @@
+/*
+ * cec-priv.h - HDMI Consumer Electronics Control internal header
+ *
+ * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _CEC_PRIV_H
+#define _CEC_PRIV_H
+
+#include <linux/cec-funcs.h>
+#include <media/cec.h>
+
+#define dprintk(lvl, fmt, arg...) \
+ do { \
+ if (lvl <= cec_debug) \
+ pr_info("cec-%s: " fmt, adap->name, ## arg); \
+ } while (0)
+
+/* devnode to cec_adapter */
+#define to_cec_adapter(node) container_of(node, struct cec_adapter, devnode)
+
+/* cec-core.c */
+extern int cec_debug;
+int cec_get_device(struct cec_devnode *devnode);
+void cec_put_device(struct cec_devnode *devnode);
+
+/* cec-adap.c */
+int cec_monitor_all_cnt_inc(struct cec_adapter *adap);
+void cec_monitor_all_cnt_dec(struct cec_adapter *adap);
+int cec_adap_status(struct seq_file *file, void *priv);
+int cec_thread_func(void *_adap);
+void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block);
+int __cec_s_log_addrs(struct cec_adapter *adap,
+ struct cec_log_addrs *log_addrs, bool block);
+int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
+ struct cec_fh *fh, bool block);
+void cec_queue_event_fh(struct cec_fh *fh,
+ const struct cec_event *new_ev, u64 ts);
+
+/* cec-api.c */
+extern const struct file_operations cec_devnode_fops;
+
+#endif
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index ea3ddec75..3319fb8f7 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -542,7 +542,6 @@ static int vpfe_release(struct file *file)
video->io_usrs = 0;
/* Free buffers allocated */
vb2_queue_release(&video->buffer_queue);
- vb2_dma_contig_cleanup_ctx(video->alloc_ctx);
}
/* Decrement device users counter */
video->usrs--;
@@ -1092,7 +1091,7 @@ vpfe_g_dv_timings(struct file *file, void *fh,
* @nbuffers: ptr to number of buffers requested by application
* @nplanes:: contains number of distinct video planes needed to hold a frame
* @sizes[]: contains the size (in bytes) of each plane.
- * @alloc_ctxs: ptr to allocation context
+ * @alloc_devs: ptr to allocation context
*
* This callback function is called when reqbuf() is called to adjust
* the buffer nbuffers and buffer size
@@ -1100,7 +1099,7 @@ vpfe_g_dv_timings(struct file *file, void *fh,
static int
vpfe_buffer_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct vpfe_fh *fh = vb2_get_drv_priv(vq);
struct vpfe_video_device *video = fh->video;
@@ -1115,7 +1114,6 @@ vpfe_buffer_queue_setup(struct vb2_queue *vq,
*nplanes = 1;
sizes[0] = size;
- alloc_ctxs[0] = video->alloc_ctx;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
"nbuffers=%d, size=%lu\n", *nbuffers, size);
return 0;
@@ -1350,12 +1348,6 @@ static int vpfe_reqbufs(struct file *file, void *priv,
video->memory = req_buf->memory;
/* Initialize videobuf2 queue as per the buffer type */
- video->alloc_ctx = vb2_dma_contig_init_ctx(vpfe_dev->pdev);
- if (IS_ERR(video->alloc_ctx)) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Failed to get the context\n");
- return PTR_ERR(video->alloc_ctx);
- }
-
q = &video->buffer_queue;
q->type = req_buf->type;
q->io_modes = VB2_MMAP | VB2_USERPTR;
@@ -1365,11 +1357,11 @@ static int vpfe_reqbufs(struct file *file, void *priv,
q->mem_ops = &vb2_dma_contig_memops;
q->buf_struct_size = sizeof(struct vpfe_cap_buffer);
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->dev = vpfe_dev->pdev;
ret = vb2_queue_init(q);
if (ret) {
v4l2_err(&vpfe_dev->v4l2_dev, "vb2_queue_init() failed\n");
- vb2_dma_contig_cleanup_ctx(vpfe_dev->pdev);
return ret;
}
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.h b/drivers/staging/media/davinci_vpfe/vpfe_video.h
index 653334d53..aaec4403d 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.h
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.h
@@ -123,8 +123,6 @@ struct vpfe_video_device {
/* Used to store pixel format */
struct v4l2_format fmt;
struct vb2_queue buffer_queue;
- /* allocator-specific contexts for each plane */
- struct vb2_alloc_ctx *alloc_ctx;
/* Queue of filled frames */
struct list_head dma_queue;
spinlock_t irqlock;
diff --git a/drivers/staging/media/lirc/lirc_parallel.c b/drivers/staging/media/lirc/lirc_parallel.c
index 68ede6c56..3906ac6e6 100644
--- a/drivers/staging/media/lirc/lirc_parallel.c
+++ b/drivers/staging/media/lirc/lirc_parallel.c
@@ -305,9 +305,9 @@ static void lirc_lirc_irq_handler(void *blah)
/* enable interrupt */
/*
- enable_irq(irq);
- out(LIRC_PORT_IRQ, in(LIRC_PORT_IRQ)|LP_PINTEN);
- */
+ * enable_irq(irq);
+ * out(LIRC_PORT_IRQ, in(LIRC_PORT_IRQ)|LP_PINTEN);
+ */
}
/*** file operations ***/
@@ -620,7 +620,7 @@ static void kf(void *handle)
lirc_off();
/* this is a bit annoying when you actually print...*/
/*
- printk(KERN_INFO "%s: reclaimed port\n", LIRC_DRIVER_NAME);
+ * printk(KERN_INFO "%s: reclaimed port\n", LIRC_DRIVER_NAME);
*/
}
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index cf8da2355..90b7ff567 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -298,7 +298,7 @@ iss_video_check_format(struct iss_video *video, struct iss_video_fh *vfh)
static int iss_video_queue_setup(struct vb2_queue *vq,
unsigned int *count, unsigned int *num_planes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct iss_video_fh *vfh = vb2_get_drv_priv(vq);
struct iss_video *video = vfh->video;
@@ -310,8 +310,6 @@ static int iss_video_queue_setup(struct vb2_queue *vq,
if (sizes[0] == 0)
return -EINVAL;
- alloc_ctxs[0] = video->alloc_ctx;
-
*count = min(*count, video->capture_mem / PAGE_ALIGN(sizes[0]));
return 0;
@@ -1017,13 +1015,6 @@ static int iss_video_open(struct file *file)
goto done;
}
- video->alloc_ctx = vb2_dma_contig_init_ctx(video->iss->dev);
- if (IS_ERR(video->alloc_ctx)) {
- ret = PTR_ERR(video->alloc_ctx);
- omap4iss_put(video->iss);
- goto done;
- }
-
q = &handle->queue;
q->type = video->type;
@@ -1033,6 +1024,7 @@ static int iss_video_open(struct file *file)
q->mem_ops = &vb2_dma_contig_memops;
q->buf_struct_size = sizeof(struct iss_buffer);
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->dev = video->iss->dev;
ret = vb2_queue_init(q);
if (ret) {
diff --git a/drivers/staging/media/omap4iss/iss_video.h b/drivers/staging/media/omap4iss/iss_video.h
index c8bd2958a..d7e05d045 100644
--- a/drivers/staging/media/omap4iss/iss_video.h
+++ b/drivers/staging/media/omap4iss/iss_video.h
@@ -170,7 +170,6 @@ struct iss_video {
spinlock_t qlock; /* protects dmaqueue and error */
struct list_head dmaqueue;
enum iss_video_dmaqueue_flags dmaqueue_flags;
- struct vb2_alloc_ctx *alloc_ctx;
const struct iss_video_operations *ops;
};
diff --git a/drivers/staging/media/pulse8-cec/Kconfig b/drivers/staging/media/pulse8-cec/Kconfig
new file mode 100644
index 000000000..c6aa2d1c9
--- /dev/null
+++ b/drivers/staging/media/pulse8-cec/Kconfig
@@ -0,0 +1,10 @@
+config USB_PULSE8_CEC
+ tristate "Pulse Eight HDMI CEC"
+ depends on USB_ACM && MEDIA_CEC
+ select SERIO
+ select SERIO_SERPORT
+ ---help---
+ This is a cec driver for the Pulse Eight HDMI CEC device.
+
+ To compile this driver as a module, choose M here: the
+ module will be called pulse8-cec.
diff --git a/drivers/staging/media/pulse8-cec/Makefile b/drivers/staging/media/pulse8-cec/Makefile
new file mode 100644
index 000000000..9800690bc
--- /dev/null
+++ b/drivers/staging/media/pulse8-cec/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_USB_PULSE8_CEC) += pulse8-cec.o
diff --git a/drivers/staging/media/pulse8-cec/TODO b/drivers/staging/media/pulse8-cec/TODO
new file mode 100644
index 000000000..fa6660245
--- /dev/null
+++ b/drivers/staging/media/pulse8-cec/TODO
@@ -0,0 +1,52 @@
+This driver needs to mature a bit more and another round of
+code cleanups.
+
+Otherwise it looks to be in good shape. And of course the fact
+that the CEC framework is in staging at the moment also prevents
+this driver from being mainlined.
+
+Some notes:
+
+1) Regarding the "autonomous" mode of the Pulse-Eight: currently this
+is disabled, but the idea is that this allows basic functionality
+when the PC is off, and it can wake-up the PC through USB.
+
+To prevent the device to go into autonomous mode the driver would
+have to send MSGCODE_SET_CONTROLLED 1 and then send a ping every
+30 seconds (in practice once every 15 seconds would be good). When
+powering off or going to standby send MSGCODE_SET_CONTROLLED 0 to
+turn the autonomous mode back on.
+
+This needs to be implemented in the driver. Autonomous mode was
+added in firmware v2.
+
+2) Writing to the EEPROM can only be done once every 10 seconds.
+
+3) To use this driver you also need to patch the inputattach utility,
+this patch will be submitted once this driver is moved out of staging.
+
+diff -urN linuxconsoletools-1.4.9/utils/inputattach.c linuxconsoletools-1.4.9.new/utils/inputattach.c
+--- linuxconsoletools-1.4.9/utils/inputattach.c 2016-01-09 16:27:02.000000000 +0100
++++ linuxconsoletools-1.4.9.new/utils/inputattach.c 2016-03-20 11:35:31.707788967 +0100
+@@ -861,6 +861,9 @@
+ { "--wacom_iv", "-wacom_iv", "Wacom protocol IV tablet",
+ B9600, CS8 | CRTSCTS,
+ SERIO_WACOM_IV, 0x00, 0x00, 0, wacom_iv_init },
++{ "--pulse8-cec", "-pulse8-cec", "Pulse Eight HDMI CEC dongle",
++ B9600, CS8,
++ SERIO_PULSE8_CEC, 0x00, 0x00, 0, NULL },
+ { NULL, NULL, NULL, 0, 0, 0, 0, 0, 0, NULL }
+ };
+
+diff -urN linuxconsoletools-1.4.9/utils/serio-ids.h linuxconsoletools-1.4.9.new/utils/serio-ids.h
+--- linuxconsoletools-1.4.9/utils/serio-ids.h 2015-04-26 18:29:42.000000000 +0200
++++ linuxconsoletools-1.4.9.new/utils/serio-ids.h 2016-03-20 11:41:00.153558539 +0100
+@@ -131,5 +131,8 @@
+ #ifndef SERIO_EASYPEN
+ # define SERIO_EASYPEN 0x3f
+ #endif
++#ifndef SERIO_PULSE8_CEC
++# define SERIO_PULSE8_CEC 0x40
++#endif
+
+ #endif
diff --git a/drivers/staging/media/pulse8-cec/pulse8-cec.c b/drivers/staging/media/pulse8-cec/pulse8-cec.c
new file mode 100644
index 000000000..ed8bd95ad
--- /dev/null
+++ b/drivers/staging/media/pulse8-cec/pulse8-cec.c
@@ -0,0 +1,505 @@
+/*
+ * Pulse Eight HDMI CEC driver
+ *
+ * Copyright 2016 Hans Verkuil <hverkuil@xs4all.nl
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version of 2 of the License, or (at your
+ * option) any later version. See the file COPYING in the main directory of
+ * this archive for more details.
+ */
+
+#include <linux/completion.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/serio.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+
+#include <media/cec.h>
+
+MODULE_AUTHOR("Hans Verkuil <hverkuil@xs4all.nl>");
+MODULE_DESCRIPTION("Pulse Eight HDMI CEC driver");
+MODULE_LICENSE("GPL");
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "debug level (0-1)");
+
+enum pulse8_msgcodes {
+ MSGCODE_NOTHING = 0,
+ MSGCODE_PING,
+ MSGCODE_TIMEOUT_ERROR,
+ MSGCODE_HIGH_ERROR,
+ MSGCODE_LOW_ERROR,
+ MSGCODE_FRAME_START,
+ MSGCODE_FRAME_DATA,
+ MSGCODE_RECEIVE_FAILED,
+ MSGCODE_COMMAND_ACCEPTED, /* 0x08 */
+ MSGCODE_COMMAND_REJECTED,
+ MSGCODE_SET_ACK_MASK,
+ MSGCODE_TRANSMIT,
+ MSGCODE_TRANSMIT_EOM,
+ MSGCODE_TRANSMIT_IDLETIME,
+ MSGCODE_TRANSMIT_ACK_POLARITY,
+ MSGCODE_TRANSMIT_LINE_TIMEOUT,
+ MSGCODE_TRANSMIT_SUCCEEDED, /* 0x10 */
+ MSGCODE_TRANSMIT_FAILED_LINE,
+ MSGCODE_TRANSMIT_FAILED_ACK,
+ MSGCODE_TRANSMIT_FAILED_TIMEOUT_DATA,
+ MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE,
+ MSGCODE_FIRMWARE_VERSION,
+ MSGCODE_START_BOOTLOADER,
+ MSGCODE_GET_BUILDDATE,
+ MSGCODE_SET_CONTROLLED, /* 0x18 */
+ MSGCODE_GET_AUTO_ENABLED,
+ MSGCODE_SET_AUTO_ENABLED,
+ MSGCODE_GET_DEFAULT_LOGICAL_ADDRESS,
+ MSGCODE_SET_DEFAULT_LOGICAL_ADDRESS,
+ MSGCODE_GET_LOGICAL_ADDRESS_MASK,
+ MSGCODE_SET_LOGICAL_ADDRESS_MASK,
+ MSGCODE_GET_PHYSICAL_ADDRESS,
+ MSGCODE_SET_PHYSICAL_ADDRESS, /* 0x20 */
+ MSGCODE_GET_DEVICE_TYPE,
+ MSGCODE_SET_DEVICE_TYPE,
+ MSGCODE_GET_HDMI_VERSION,
+ MSGCODE_SET_HDMI_VERSION,
+ MSGCODE_GET_OSD_NAME,
+ MSGCODE_SET_OSD_NAME,
+ MSGCODE_WRITE_EEPROM,
+ MSGCODE_GET_ADAPTER_TYPE, /* 0x28 */
+ MSGCODE_SET_ACTIVE_SOURCE,
+
+ MSGCODE_FRAME_EOM = 0x80,
+ MSGCODE_FRAME_ACK = 0x40,
+};
+
+#define MSGSTART 0xff
+#define MSGEND 0xfe
+#define MSGESC 0xfd
+#define MSGOFFSET 3
+
+#define DATA_SIZE 256
+
+struct pulse8 {
+ struct device *dev;
+ struct serio *serio;
+ struct cec_adapter *adap;
+ struct completion cmd_done;
+ struct work_struct work;
+ struct cec_msg rx_msg;
+ u8 data[DATA_SIZE];
+ unsigned int len;
+ u8 buf[DATA_SIZE];
+ unsigned int idx;
+ bool escape;
+ bool started;
+};
+
+static void pulse8_irq_work_handler(struct work_struct *work)
+{
+ struct pulse8 *pulse8 =
+ container_of(work, struct pulse8, work);
+
+ switch (pulse8->data[0] & 0x3f) {
+ case MSGCODE_FRAME_DATA:
+ cec_received_msg(pulse8->adap, &pulse8->rx_msg);
+ break;
+ case MSGCODE_TRANSMIT_SUCCEEDED:
+ cec_transmit_done(pulse8->adap, CEC_TX_STATUS_OK,
+ 0, 0, 0, 0);
+ break;
+ case MSGCODE_TRANSMIT_FAILED_ACK:
+ cec_transmit_done(pulse8->adap, CEC_TX_STATUS_NACK,
+ 0, 1, 0, 0);
+ break;
+ case MSGCODE_TRANSMIT_FAILED_LINE:
+ case MSGCODE_TRANSMIT_FAILED_TIMEOUT_DATA:
+ case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE:
+ cec_transmit_done(pulse8->adap, CEC_TX_STATUS_ERROR,
+ 0, 0, 0, 1);
+ break;
+ }
+}
+
+static irqreturn_t pulse8_interrupt(struct serio *serio, unsigned char data,
+ unsigned int flags)
+{
+ struct pulse8 *pulse8 = serio_get_drvdata(serio);
+
+ if (!pulse8->started && data != MSGSTART)
+ return IRQ_HANDLED;
+ if (data == MSGESC) {
+ pulse8->escape = true;
+ return IRQ_HANDLED;
+ }
+ if (pulse8->escape) {
+ data += MSGOFFSET;
+ pulse8->escape = false;
+ } else if (data == MSGEND) {
+ struct cec_msg *msg = &pulse8->rx_msg;
+
+ if (debug)
+ dev_info(pulse8->dev, "received: %*ph\n",
+ pulse8->idx, pulse8->buf);
+ pulse8->data[0] = pulse8->buf[0];
+ switch (pulse8->buf[0] & 0x3f) {
+ case MSGCODE_FRAME_START:
+ msg->len = 1;
+ msg->msg[0] = pulse8->buf[1];
+ break;
+ case MSGCODE_FRAME_DATA:
+ if (msg->len == CEC_MAX_MSG_SIZE)
+ break;
+ msg->msg[msg->len++] = pulse8->buf[1];
+ if (pulse8->buf[0] & MSGCODE_FRAME_EOM)
+ schedule_work(&pulse8->work);
+ break;
+ case MSGCODE_TRANSMIT_SUCCEEDED:
+ case MSGCODE_TRANSMIT_FAILED_LINE:
+ case MSGCODE_TRANSMIT_FAILED_ACK:
+ case MSGCODE_TRANSMIT_FAILED_TIMEOUT_DATA:
+ case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE:
+ schedule_work(&pulse8->work);
+ break;
+ case MSGCODE_HIGH_ERROR:
+ case MSGCODE_LOW_ERROR:
+ case MSGCODE_RECEIVE_FAILED:
+ case MSGCODE_TIMEOUT_ERROR:
+ break;
+ case MSGCODE_COMMAND_ACCEPTED:
+ case MSGCODE_COMMAND_REJECTED:
+ default:
+ if (pulse8->idx == 0)
+ break;
+ memcpy(pulse8->data, pulse8->buf, pulse8->idx);
+ pulse8->len = pulse8->idx;
+ complete(&pulse8->cmd_done);
+ break;
+ }
+ pulse8->idx = 0;
+ pulse8->started = false;
+ return IRQ_HANDLED;
+ } else if (data == MSGSTART) {
+ pulse8->idx = 0;
+ pulse8->started = true;
+ return IRQ_HANDLED;
+ }
+
+ if (pulse8->idx >= DATA_SIZE) {
+ dev_dbg(pulse8->dev,
+ "throwing away %d bytes of garbage\n", pulse8->idx);
+ pulse8->idx = 0;
+ }
+ pulse8->buf[pulse8->idx++] = data;
+ return IRQ_HANDLED;
+}
+
+static void pulse8_disconnect(struct serio *serio)
+{
+ struct pulse8 *pulse8 = serio_get_drvdata(serio);
+
+ cec_unregister_adapter(pulse8->adap);
+ dev_info(&serio->dev, "disconnected\n");
+ serio_close(serio);
+ serio_set_drvdata(serio, NULL);
+ kfree(pulse8);
+}
+
+static int pulse8_send(struct serio *serio, const u8 *command, u8 cmd_len)
+{
+ int err = 0;
+
+ err = serio_write(serio, MSGSTART);
+ if (err)
+ return err;
+ for (; !err && cmd_len; command++, cmd_len--) {
+ if (*command >= MSGESC) {
+ err = serio_write(serio, MSGESC);
+ if (!err)
+ err = serio_write(serio, *command - MSGOFFSET);
+ } else {
+ err = serio_write(serio, *command);
+ }
+ }
+ if (!err)
+ err = serio_write(serio, 0xfe);
+
+ return err;
+}
+
+static int pulse8_send_and_wait(struct pulse8 *pulse8,
+ const u8 *cmd, u8 cmd_len, u8 response, u8 size)
+{
+ int err;
+
+ /*dev_info(pulse8->dev, "transmit: %*ph\n", cmd_len, cmd);*/
+ init_completion(&pulse8->cmd_done);
+
+ err = pulse8_send(pulse8->serio, cmd, cmd_len);
+ if (err)
+ return err;
+
+ if (!wait_for_completion_timeout(&pulse8->cmd_done, HZ))
+ return -ETIMEDOUT;
+ if ((pulse8->data[0] & 0x3f) == MSGCODE_COMMAND_REJECTED &&
+ cmd[0] != MSGCODE_SET_CONTROLLED &&
+ cmd[0] != MSGCODE_SET_AUTO_ENABLED &&
+ cmd[0] != MSGCODE_GET_BUILDDATE) {
+ u8 cmd_sc[2];
+
+ cmd_sc[0] = MSGCODE_SET_CONTROLLED;
+ cmd_sc[1] = 1;
+ err = pulse8_send_and_wait(pulse8, cmd_sc, 2,
+ MSGCODE_COMMAND_ACCEPTED, 1);
+ if (err)
+ return err;
+ init_completion(&pulse8->cmd_done);
+
+ err = pulse8_send(pulse8->serio, cmd, cmd_len);
+ if (err)
+ return err;
+
+ if (!wait_for_completion_timeout(&pulse8->cmd_done, HZ))
+ return -ETIMEDOUT;
+ }
+ if (response &&
+ ((pulse8->data[0] & 0x3f) != response || pulse8->len < size + 1)) {
+ dev_info(pulse8->dev, "transmit: failed %02x\n",
+ pulse8->data[0] & 0x3f);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int pulse8_setup(struct pulse8 *pulse8, struct serio *serio)
+{
+ u8 *data = pulse8->data + 1;
+ unsigned int count = 0;
+ unsigned int vers = 0;
+ u8 cmd[2];
+ int err;
+
+ cmd[0] = MSGCODE_PING;
+ err = pulse8_send_and_wait(pulse8, cmd, 1,
+ MSGCODE_COMMAND_ACCEPTED, 0);
+ cmd[0] = MSGCODE_FIRMWARE_VERSION;
+ if (!err)
+ err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 2);
+ if (err)
+ return err;
+
+ vers = (data[0] << 8) | data[1];
+
+ dev_info(pulse8->dev, "Firmware version %04x\n", vers);
+ if (vers < 2)
+ return 0;
+
+ cmd[0] = MSGCODE_GET_BUILDDATE;
+ if (!err)
+ err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 4);
+ if (!err) {
+ time_t date = (data[0] << 24) | (data[1] << 16) |
+ (data[2] << 8) | data[3];
+ struct tm tm;
+
+ time_to_tm(date, 0, &tm);
+
+ dev_info(pulse8->dev, "Firmware build date %04ld.%02d.%02d %02d:%02d:%02d\n",
+ tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec);
+ }
+
+ do {
+ if (count)
+ msleep(500);
+ cmd[0] = MSGCODE_SET_AUTO_ENABLED;
+ cmd[1] = 0;
+ err = pulse8_send_and_wait(pulse8, cmd, 2,
+ MSGCODE_COMMAND_ACCEPTED, 1);
+ if (err && count == 0) {
+ dev_info(pulse8->dev, "No Auto Enabled supported\n");
+ return 0;
+ }
+
+ cmd[0] = MSGCODE_GET_AUTO_ENABLED;
+ if (!err)
+ err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
+ if (!err && !data[0]) {
+ cmd[0] = MSGCODE_WRITE_EEPROM;
+ err = pulse8_send_and_wait(pulse8, cmd, 1,
+ MSGCODE_COMMAND_ACCEPTED, 1);
+ cmd[0] = MSGCODE_GET_AUTO_ENABLED;
+ if (!err)
+ err = pulse8_send_and_wait(pulse8, cmd, 1,
+ cmd[0], 1);
+ }
+ } while (!err && data[0] && count++ < 5);
+
+ if (!err && data[0])
+ err = -EIO;
+
+ return err;
+}
+
+static int pulse8_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct pulse8 *pulse8 = adap->priv;
+ u8 cmd[16];
+ int err;
+
+ cmd[0] = MSGCODE_SET_CONTROLLED;
+ cmd[1] = enable;
+ err = pulse8_send_and_wait(pulse8, cmd, 2,
+ MSGCODE_COMMAND_ACCEPTED, 1);
+ return enable ? err : 0;
+}
+
+static int pulse8_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
+{
+ struct pulse8 *pulse8 = adap->priv;
+ u16 mask = 0;
+ u8 cmd[3];
+ int err;
+
+ if (log_addr != CEC_LOG_ADDR_INVALID)
+ mask = 1 << log_addr;
+ cmd[0] = MSGCODE_SET_ACK_MASK;
+ cmd[1] = mask >> 8;
+ cmd[2] = mask & 0xff;
+ err = pulse8_send_and_wait(pulse8, cmd, 3,
+ MSGCODE_COMMAND_ACCEPTED, 0);
+ if (mask == 0)
+ return 0;
+ return err;
+}
+
+static int pulse8_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct pulse8 *pulse8 = adap->priv;
+ u8 cmd[2];
+ unsigned int i;
+ int err;
+
+ cmd[0] = MSGCODE_TRANSMIT_IDLETIME;
+ cmd[1] = signal_free_time;
+ err = pulse8_send_and_wait(pulse8, cmd, 2,
+ MSGCODE_COMMAND_ACCEPTED, 1);
+ cmd[0] = MSGCODE_TRANSMIT_ACK_POLARITY;
+ cmd[1] = cec_msg_is_broadcast(msg);
+ if (!err)
+ err = pulse8_send_and_wait(pulse8, cmd, 2,
+ MSGCODE_COMMAND_ACCEPTED, 1);
+ cmd[0] = msg->len == 1 ? MSGCODE_TRANSMIT_EOM : MSGCODE_TRANSMIT;
+ cmd[1] = msg->msg[0];
+ if (!err)
+ err = pulse8_send_and_wait(pulse8, cmd, 2,
+ MSGCODE_COMMAND_ACCEPTED, 1);
+ if (!err && msg->len > 1) {
+ cmd[0] = msg->len == 2 ? MSGCODE_TRANSMIT_EOM :
+ MSGCODE_TRANSMIT;
+ cmd[1] = msg->msg[1];
+ err = pulse8_send_and_wait(pulse8, cmd, 2,
+ MSGCODE_COMMAND_ACCEPTED, 1);
+ for (i = 0; !err && i + 2 < msg->len; i++) {
+ cmd[0] = (i + 2 == msg->len - 1) ?
+ MSGCODE_TRANSMIT_EOM : MSGCODE_TRANSMIT;
+ cmd[1] = msg->msg[i + 2];
+ err = pulse8_send_and_wait(pulse8, cmd, 2,
+ MSGCODE_COMMAND_ACCEPTED, 1);
+ }
+ }
+
+ return err;
+}
+
+static int pulse8_received(struct cec_adapter *adap, struct cec_msg *msg)
+{
+ return -ENOMSG;
+}
+
+static const struct cec_adap_ops pulse8_cec_adap_ops = {
+ .adap_enable = pulse8_cec_adap_enable,
+ .adap_log_addr = pulse8_cec_adap_log_addr,
+ .adap_transmit = pulse8_cec_adap_transmit,
+ .received = pulse8_received,
+};
+
+static int pulse8_connect(struct serio *serio, struct serio_driver *drv)
+{
+ u32 caps = CEC_CAP_TRANSMIT | CEC_CAP_LOG_ADDRS | CEC_CAP_PHYS_ADDR |
+ CEC_CAP_PASSTHROUGH | CEC_CAP_RC | CEC_CAP_MONITOR_ALL;
+ struct pulse8 *pulse8;
+ int err = -ENOMEM;
+
+ pulse8 = kzalloc(sizeof(*pulse8), GFP_KERNEL);
+
+ if (!pulse8)
+ return -ENOMEM;
+
+ pulse8->serio = serio;
+ pulse8->adap = cec_allocate_adapter(&pulse8_cec_adap_ops, pulse8,
+ "HDMI CEC", caps, 1, &serio->dev);
+ err = PTR_ERR_OR_ZERO(pulse8->adap);
+ if (err < 0)
+ goto free_device;
+
+ pulse8->dev = &serio->dev;
+ serio_set_drvdata(serio, pulse8);
+ INIT_WORK(&pulse8->work, pulse8_irq_work_handler);
+
+ err = serio_open(serio, drv);
+ if (err)
+ goto delete_adap;
+
+ err = pulse8_setup(pulse8, serio);
+ if (err)
+ goto close_serio;
+
+ err = cec_register_adapter(pulse8->adap);
+ if (err < 0)
+ goto close_serio;
+
+ pulse8->dev = &pulse8->adap->devnode.dev;
+ return 0;
+
+close_serio:
+ serio_close(serio);
+delete_adap:
+ cec_delete_adapter(pulse8->adap);
+ serio_set_drvdata(serio, NULL);
+free_device:
+ kfree(pulse8);
+ return err;
+}
+
+static struct serio_device_id pulse8_serio_ids[] = {
+ {
+ .type = SERIO_RS232,
+ .proto = SERIO_PULSE8_CEC,
+ .id = SERIO_ANY,
+ .extra = SERIO_ANY,
+ },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(serio, pulse8_serio_ids);
+
+static struct serio_driver pulse8_drv = {
+ .driver = {
+ .name = "pulse8-cec",
+ },
+ .description = "Pulse Eight HDMI CEC driver",
+ .id_table = pulse8_serio_ids,
+ .interrupt = pulse8_interrupt,
+ .connect = pulse8_connect,
+ .disconnect = pulse8_disconnect,
+};
+
+module_serio_driver(pulse8_drv);
diff --git a/drivers/staging/media/s5p-cec/Kconfig b/drivers/staging/media/s5p-cec/Kconfig
new file mode 100644
index 000000000..0315fd7ad
--- /dev/null
+++ b/drivers/staging/media/s5p-cec/Kconfig
@@ -0,0 +1,9 @@
+config VIDEO_SAMSUNG_S5P_CEC
+ tristate "Samsung S5P CEC driver"
+ depends on VIDEO_DEV && MEDIA_CEC && (PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST)
+ ---help---
+ This is a driver for Samsung S5P HDMI CEC interface. It uses the
+ generic CEC framework interface.
+ CEC bus is present in the HDMI connector and enables communication
+ between compatible devices.
+
diff --git a/drivers/staging/media/s5p-cec/Makefile b/drivers/staging/media/s5p-cec/Makefile
new file mode 100644
index 000000000..0e2cf4578
--- /dev/null
+++ b/drivers/staging/media/s5p-cec/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_CEC) += s5p-cec.o
+s5p-cec-y += s5p_cec.o exynos_hdmi_cecctrl.o
diff --git a/drivers/staging/media/s5p-cec/TODO b/drivers/staging/media/s5p-cec/TODO
new file mode 100644
index 000000000..f51d5268a
--- /dev/null
+++ b/drivers/staging/media/s5p-cec/TODO
@@ -0,0 +1,7 @@
+This driver depends on the CEC framework, which is currently in
+staging, so therefor this driver is in staging as well.
+
+In addition, this driver requires that userspace sets the physical
+address. However, this should be passed on from the corresponding
+samsung HDMI driver. It is very annoying if userspace has to do this,
+and other than USB CEC adapters this must be handled automatically.
diff --git a/drivers/staging/media/s5p-cec/exynos_hdmi_cec.h b/drivers/staging/media/s5p-cec/exynos_hdmi_cec.h
new file mode 100644
index 000000000..3e4fc7b05
--- /dev/null
+++ b/drivers/staging/media/s5p-cec/exynos_hdmi_cec.h
@@ -0,0 +1,38 @@
+/* drivers/media/platform/s5p-cec/exynos_hdmi_cec.h
+ *
+ * Copyright (c) 2010, 2014 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * Header file for interface of Samsung Exynos hdmi cec hardware
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _EXYNOS_HDMI_CEC_H_
+#define _EXYNOS_HDMI_CEC_H_ __FILE__
+
+#include <linux/regmap.h>
+#include <linux/miscdevice.h>
+#include "s5p_cec.h"
+
+void s5p_cec_set_divider(struct s5p_cec_dev *cec);
+void s5p_cec_enable_rx(struct s5p_cec_dev *cec);
+void s5p_cec_mask_rx_interrupts(struct s5p_cec_dev *cec);
+void s5p_cec_unmask_rx_interrupts(struct s5p_cec_dev *cec);
+void s5p_cec_mask_tx_interrupts(struct s5p_cec_dev *cec);
+void s5p_cec_unmask_tx_interrupts(struct s5p_cec_dev *cec);
+void s5p_cec_reset(struct s5p_cec_dev *cec);
+void s5p_cec_tx_reset(struct s5p_cec_dev *cec);
+void s5p_cec_rx_reset(struct s5p_cec_dev *cec);
+void s5p_cec_threshold(struct s5p_cec_dev *cec);
+void s5p_cec_copy_packet(struct s5p_cec_dev *cec, char *data,
+ size_t count, u8 retries);
+void s5p_cec_set_addr(struct s5p_cec_dev *cec, u32 addr);
+u32 s5p_cec_get_status(struct s5p_cec_dev *cec);
+void s5p_clr_pending_tx(struct s5p_cec_dev *cec);
+void s5p_clr_pending_rx(struct s5p_cec_dev *cec);
+void s5p_cec_get_rx_buf(struct s5p_cec_dev *cec, u32 size, u8 *buffer);
+
+#endif /* _EXYNOS_HDMI_CEC_H_ */
diff --git a/drivers/staging/media/s5p-cec/exynos_hdmi_cecctrl.c b/drivers/staging/media/s5p-cec/exynos_hdmi_cecctrl.c
new file mode 100644
index 000000000..ce95e0fcd
--- /dev/null
+++ b/drivers/staging/media/s5p-cec/exynos_hdmi_cecctrl.c
@@ -0,0 +1,209 @@
+/* drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c
+ *
+ * Copyright (c) 2009, 2014 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * cec ftn file for Samsung TVOUT driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/device.h>
+
+#include "exynos_hdmi_cec.h"
+#include "regs-cec.h"
+
+#define S5P_HDMI_FIN 24000000
+#define CEC_DIV_RATIO 320000
+
+#define CEC_MESSAGE_BROADCAST_MASK 0x0F
+#define CEC_MESSAGE_BROADCAST 0x0F
+#define CEC_FILTER_THRESHOLD 0x15
+
+void s5p_cec_set_divider(struct s5p_cec_dev *cec)
+{
+ u32 div_ratio, div_val;
+ unsigned int reg;
+
+ div_ratio = S5P_HDMI_FIN / CEC_DIV_RATIO - 1;
+
+ if (regmap_read(cec->pmu, EXYNOS_HDMI_PHY_CONTROL, &reg)) {
+ dev_err(cec->dev, "failed to read phy control\n");
+ return;
+ }
+
+ reg = (reg & ~(0x3FF << 16)) | (div_ratio << 16);
+
+ if (regmap_write(cec->pmu, EXYNOS_HDMI_PHY_CONTROL, reg)) {
+ dev_err(cec->dev, "failed to write phy control\n");
+ return;
+ }
+
+ div_val = CEC_DIV_RATIO * 0.00005 - 1;
+
+ writeb(0x0, cec->reg + S5P_CEC_DIVISOR_3);
+ writeb(0x0, cec->reg + S5P_CEC_DIVISOR_2);
+ writeb(0x0, cec->reg + S5P_CEC_DIVISOR_1);
+ writeb(div_val, cec->reg + S5P_CEC_DIVISOR_0);
+}
+
+void s5p_cec_enable_rx(struct s5p_cec_dev *cec)
+{
+ u8 reg;
+
+ reg = readb(cec->reg + S5P_CEC_RX_CTRL);
+ reg |= S5P_CEC_RX_CTRL_ENABLE;
+ writeb(reg, cec->reg + S5P_CEC_RX_CTRL);
+}
+
+void s5p_cec_mask_rx_interrupts(struct s5p_cec_dev *cec)
+{
+ u8 reg;
+
+ reg = readb(cec->reg + S5P_CEC_IRQ_MASK);
+ reg |= S5P_CEC_IRQ_RX_DONE;
+ reg |= S5P_CEC_IRQ_RX_ERROR;
+ writeb(reg, cec->reg + S5P_CEC_IRQ_MASK);
+}
+
+void s5p_cec_unmask_rx_interrupts(struct s5p_cec_dev *cec)
+{
+ u8 reg;
+
+ reg = readb(cec->reg + S5P_CEC_IRQ_MASK);
+ reg &= ~S5P_CEC_IRQ_RX_DONE;
+ reg &= ~S5P_CEC_IRQ_RX_ERROR;
+ writeb(reg, cec->reg + S5P_CEC_IRQ_MASK);
+}
+
+void s5p_cec_mask_tx_interrupts(struct s5p_cec_dev *cec)
+{
+ u8 reg;
+
+ reg = readb(cec->reg + S5P_CEC_IRQ_MASK);
+ reg |= S5P_CEC_IRQ_TX_DONE;
+ reg |= S5P_CEC_IRQ_TX_ERROR;
+ writeb(reg, cec->reg + S5P_CEC_IRQ_MASK);
+
+}
+
+void s5p_cec_unmask_tx_interrupts(struct s5p_cec_dev *cec)
+{
+ u8 reg;
+
+ reg = readb(cec->reg + S5P_CEC_IRQ_MASK);
+ reg &= ~S5P_CEC_IRQ_TX_DONE;
+ reg &= ~S5P_CEC_IRQ_TX_ERROR;
+ writeb(reg, cec->reg + S5P_CEC_IRQ_MASK);
+}
+
+void s5p_cec_reset(struct s5p_cec_dev *cec)
+{
+ u8 reg;
+
+ writeb(S5P_CEC_RX_CTRL_RESET, cec->reg + S5P_CEC_RX_CTRL);
+ writeb(S5P_CEC_TX_CTRL_RESET, cec->reg + S5P_CEC_TX_CTRL);
+
+ reg = readb(cec->reg + 0xc4);
+ reg &= ~0x1;
+ writeb(reg, cec->reg + 0xc4);
+}
+
+void s5p_cec_tx_reset(struct s5p_cec_dev *cec)
+{
+ writeb(S5P_CEC_TX_CTRL_RESET, cec->reg + S5P_CEC_TX_CTRL);
+}
+
+void s5p_cec_rx_reset(struct s5p_cec_dev *cec)
+{
+ u8 reg;
+
+ writeb(S5P_CEC_RX_CTRL_RESET, cec->reg + S5P_CEC_RX_CTRL);
+
+ reg = readb(cec->reg + 0xc4);
+ reg &= ~0x1;
+ writeb(reg, cec->reg + 0xc4);
+}
+
+void s5p_cec_threshold(struct s5p_cec_dev *cec)
+{
+ writeb(CEC_FILTER_THRESHOLD, cec->reg + S5P_CEC_RX_FILTER_TH);
+ writeb(0, cec->reg + S5P_CEC_RX_FILTER_CTRL);
+}
+
+void s5p_cec_copy_packet(struct s5p_cec_dev *cec, char *data,
+ size_t count, u8 retries)
+{
+ int i = 0;
+ u8 reg;
+
+ while (i < count) {
+ writeb(data[i], cec->reg + (S5P_CEC_TX_BUFF0 + (i * 4)));
+ i++;
+ }
+
+ writeb(count, cec->reg + S5P_CEC_TX_BYTES);
+ reg = readb(cec->reg + S5P_CEC_TX_CTRL);
+ reg |= S5P_CEC_TX_CTRL_START;
+ reg &= ~0x70;
+ reg |= retries << 4;
+
+ if ((data[0] & CEC_MESSAGE_BROADCAST_MASK) == CEC_MESSAGE_BROADCAST) {
+ dev_dbg(cec->dev, "Broadcast");
+ reg |= S5P_CEC_TX_CTRL_BCAST;
+ } else {
+ dev_dbg(cec->dev, "No Broadcast");
+ reg &= ~S5P_CEC_TX_CTRL_BCAST;
+ }
+
+ writeb(reg, cec->reg + S5P_CEC_TX_CTRL);
+ dev_dbg(cec->dev, "cec-tx: cec count (%zu): %*ph", count,
+ (int)count, data);
+}
+
+void s5p_cec_set_addr(struct s5p_cec_dev *cec, u32 addr)
+{
+ writeb(addr & 0x0F, cec->reg + S5P_CEC_LOGIC_ADDR);
+}
+
+u32 s5p_cec_get_status(struct s5p_cec_dev *cec)
+{
+ u32 status = 0;
+
+ status = readb(cec->reg + S5P_CEC_STATUS_0);
+ status |= readb(cec->reg + S5P_CEC_STATUS_1) << 8;
+ status |= readb(cec->reg + S5P_CEC_STATUS_2) << 16;
+ status |= readb(cec->reg + S5P_CEC_STATUS_3) << 24;
+
+ dev_dbg(cec->dev, "status = 0x%x!\n", status);
+
+ return status;
+}
+
+void s5p_clr_pending_tx(struct s5p_cec_dev *cec)
+{
+ writeb(S5P_CEC_IRQ_TX_DONE | S5P_CEC_IRQ_TX_ERROR,
+ cec->reg + S5P_CEC_IRQ_CLEAR);
+}
+
+void s5p_clr_pending_rx(struct s5p_cec_dev *cec)
+{
+ writeb(S5P_CEC_IRQ_RX_DONE | S5P_CEC_IRQ_RX_ERROR,
+ cec->reg + S5P_CEC_IRQ_CLEAR);
+}
+
+void s5p_cec_get_rx_buf(struct s5p_cec_dev *cec, u32 size, u8 *buffer)
+{
+ u32 i = 0;
+ char debug[40];
+
+ while (i < size) {
+ buffer[i] = readb(cec->reg + S5P_CEC_RX_BUFF0 + (i * 4));
+ sprintf(debug + i * 2, "%02x ", buffer[i]);
+ i++;
+ }
+ dev_dbg(cec->dev, "cec-rx: cec size(%d): %s", size, debug);
+}
diff --git a/drivers/staging/media/s5p-cec/regs-cec.h b/drivers/staging/media/s5p-cec/regs-cec.h
new file mode 100644
index 000000000..b2e7e1299
--- /dev/null
+++ b/drivers/staging/media/s5p-cec/regs-cec.h
@@ -0,0 +1,96 @@
+/* drivers/media/platform/s5p-cec/regs-cec.h
+ *
+ * Copyright (c) 2010 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * register header file for Samsung TVOUT driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __EXYNOS_REGS__H
+#define __EXYNOS_REGS__H
+
+/*
+ * Register part
+ */
+#define S5P_CEC_STATUS_0 (0x0000)
+#define S5P_CEC_STATUS_1 (0x0004)
+#define S5P_CEC_STATUS_2 (0x0008)
+#define S5P_CEC_STATUS_3 (0x000C)
+#define S5P_CEC_IRQ_MASK (0x0010)
+#define S5P_CEC_IRQ_CLEAR (0x0014)
+#define S5P_CEC_LOGIC_ADDR (0x0020)
+#define S5P_CEC_DIVISOR_0 (0x0030)
+#define S5P_CEC_DIVISOR_1 (0x0034)
+#define S5P_CEC_DIVISOR_2 (0x0038)
+#define S5P_CEC_DIVISOR_3 (0x003C)
+
+#define S5P_CEC_TX_CTRL (0x0040)
+#define S5P_CEC_TX_BYTES (0x0044)
+#define S5P_CEC_TX_STAT0 (0x0060)
+#define S5P_CEC_TX_STAT1 (0x0064)
+#define S5P_CEC_TX_BUFF0 (0x0080)
+#define S5P_CEC_TX_BUFF1 (0x0084)
+#define S5P_CEC_TX_BUFF2 (0x0088)
+#define S5P_CEC_TX_BUFF3 (0x008C)
+#define S5P_CEC_TX_BUFF4 (0x0090)
+#define S5P_CEC_TX_BUFF5 (0x0094)
+#define S5P_CEC_TX_BUFF6 (0x0098)
+#define S5P_CEC_TX_BUFF7 (0x009C)
+#define S5P_CEC_TX_BUFF8 (0x00A0)
+#define S5P_CEC_TX_BUFF9 (0x00A4)
+#define S5P_CEC_TX_BUFF10 (0x00A8)
+#define S5P_CEC_TX_BUFF11 (0x00AC)
+#define S5P_CEC_TX_BUFF12 (0x00B0)
+#define S5P_CEC_TX_BUFF13 (0x00B4)
+#define S5P_CEC_TX_BUFF14 (0x00B8)
+#define S5P_CEC_TX_BUFF15 (0x00BC)
+
+#define S5P_CEC_RX_CTRL (0x00C0)
+#define S5P_CEC_RX_STAT0 (0x00E0)
+#define S5P_CEC_RX_STAT1 (0x00E4)
+#define S5P_CEC_RX_BUFF0 (0x0100)
+#define S5P_CEC_RX_BUFF1 (0x0104)
+#define S5P_CEC_RX_BUFF2 (0x0108)
+#define S5P_CEC_RX_BUFF3 (0x010C)
+#define S5P_CEC_RX_BUFF4 (0x0110)
+#define S5P_CEC_RX_BUFF5 (0x0114)
+#define S5P_CEC_RX_BUFF6 (0x0118)
+#define S5P_CEC_RX_BUFF7 (0x011C)
+#define S5P_CEC_RX_BUFF8 (0x0120)
+#define S5P_CEC_RX_BUFF9 (0x0124)
+#define S5P_CEC_RX_BUFF10 (0x0128)
+#define S5P_CEC_RX_BUFF11 (0x012C)
+#define S5P_CEC_RX_BUFF12 (0x0130)
+#define S5P_CEC_RX_BUFF13 (0x0134)
+#define S5P_CEC_RX_BUFF14 (0x0138)
+#define S5P_CEC_RX_BUFF15 (0x013C)
+
+#define S5P_CEC_RX_FILTER_CTRL (0x0180)
+#define S5P_CEC_RX_FILTER_TH (0x0184)
+
+/*
+ * Bit definition part
+ */
+#define S5P_CEC_IRQ_TX_DONE (1<<0)
+#define S5P_CEC_IRQ_TX_ERROR (1<<1)
+#define S5P_CEC_IRQ_RX_DONE (1<<4)
+#define S5P_CEC_IRQ_RX_ERROR (1<<5)
+
+#define S5P_CEC_TX_CTRL_START (1<<0)
+#define S5P_CEC_TX_CTRL_BCAST (1<<1)
+#define S5P_CEC_TX_CTRL_RETRY (0x04<<4)
+#define S5P_CEC_TX_CTRL_RESET (1<<7)
+
+#define S5P_CEC_RX_CTRL_ENABLE (1<<0)
+#define S5P_CEC_RX_CTRL_RESET (1<<7)
+
+#define S5P_CEC_LOGIC_ADDR_MASK (0xF)
+
+/* PMU Registers for PHY */
+#define EXYNOS_HDMI_PHY_CONTROL 0x700
+
+#endif /* __EXYNOS_REGS__H */
diff --git a/drivers/staging/media/s5p-cec/s5p_cec.c b/drivers/staging/media/s5p-cec/s5p_cec.c
new file mode 100644
index 000000000..78333273c
--- /dev/null
+++ b/drivers/staging/media/s5p-cec/s5p_cec.c
@@ -0,0 +1,294 @@
+/* drivers/media/platform/s5p-cec/s5p_cec.c
+ *
+ * Samsung S5P CEC driver
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This driver is based on the "cec interface driver for exynos soc" by
+ * SangPil Moon.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/timer.h>
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <media/cec.h>
+
+#include "exynos_hdmi_cec.h"
+#include "regs-cec.h"
+#include "s5p_cec.h"
+
+#define CEC_NAME "s5p-cec"
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "debug level (0-2)");
+
+static int s5p_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct s5p_cec_dev *cec = adap->priv;
+
+ if (enable) {
+ pm_runtime_get_sync(cec->dev);
+
+ s5p_cec_reset(cec);
+
+ s5p_cec_set_divider(cec);
+ s5p_cec_threshold(cec);
+
+ s5p_cec_unmask_tx_interrupts(cec);
+ s5p_cec_unmask_rx_interrupts(cec);
+ s5p_cec_enable_rx(cec);
+ } else {
+ s5p_cec_mask_tx_interrupts(cec);
+ s5p_cec_mask_rx_interrupts(cec);
+ pm_runtime_disable(cec->dev);
+ }
+
+ return 0;
+}
+
+static int s5p_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
+{
+ struct s5p_cec_dev *cec = adap->priv;
+
+ s5p_cec_set_addr(cec, addr);
+ return 0;
+}
+
+static int s5p_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct s5p_cec_dev *cec = adap->priv;
+
+ /*
+ * Unclear if 0 retries are allowed by the hardware, so have 1 as
+ * the minimum.
+ */
+ s5p_cec_copy_packet(cec, msg->msg, msg->len, max(1, attempts - 1));
+ return 0;
+}
+
+static irqreturn_t s5p_cec_irq_handler(int irq, void *priv)
+{
+ struct s5p_cec_dev *cec = priv;
+ u32 status = 0;
+
+ status = s5p_cec_get_status(cec);
+
+ dev_dbg(cec->dev, "irq received\n");
+
+ if (status & CEC_STATUS_TX_DONE) {
+ if (status & CEC_STATUS_TX_ERROR) {
+ dev_dbg(cec->dev, "CEC_STATUS_TX_ERROR set\n");
+ cec->tx = STATE_ERROR;
+ } else {
+ dev_dbg(cec->dev, "CEC_STATUS_TX_DONE\n");
+ cec->tx = STATE_DONE;
+ }
+ s5p_clr_pending_tx(cec);
+ }
+
+ if (status & CEC_STATUS_RX_DONE) {
+ if (status & CEC_STATUS_RX_ERROR) {
+ dev_dbg(cec->dev, "CEC_STATUS_RX_ERROR set\n");
+ s5p_cec_rx_reset(cec);
+ s5p_cec_enable_rx(cec);
+ } else {
+ dev_dbg(cec->dev, "CEC_STATUS_RX_DONE set\n");
+ if (cec->rx != STATE_IDLE)
+ dev_dbg(cec->dev, "Buffer overrun (worker did not process previous message)\n");
+ cec->rx = STATE_BUSY;
+ cec->msg.len = status >> 24;
+ cec->msg.rx_status = CEC_RX_STATUS_OK;
+ s5p_cec_get_rx_buf(cec, cec->msg.len,
+ cec->msg.msg);
+ cec->rx = STATE_DONE;
+ s5p_cec_enable_rx(cec);
+ }
+ /* Clear interrupt pending bit */
+ s5p_clr_pending_rx(cec);
+ }
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t s5p_cec_irq_handler_thread(int irq, void *priv)
+{
+ struct s5p_cec_dev *cec = priv;
+
+ dev_dbg(cec->dev, "irq processing thread\n");
+ switch (cec->tx) {
+ case STATE_DONE:
+ cec_transmit_done(cec->adap, CEC_TX_STATUS_OK, 0, 0, 0, 0);
+ cec->tx = STATE_IDLE;
+ break;
+ case STATE_ERROR:
+ cec_transmit_done(cec->adap,
+ CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_ERROR,
+ 0, 0, 0, 1);
+ cec->tx = STATE_IDLE;
+ break;
+ case STATE_BUSY:
+ dev_err(cec->dev, "state set to busy, this should not occur here\n");
+ break;
+ default:
+ break;
+ }
+
+ switch (cec->rx) {
+ case STATE_DONE:
+ cec_received_msg(cec->adap, &cec->msg);
+ cec->rx = STATE_IDLE;
+ break;
+ default:
+ break;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static const struct cec_adap_ops s5p_cec_adap_ops = {
+ .adap_enable = s5p_cec_adap_enable,
+ .adap_log_addr = s5p_cec_adap_log_addr,
+ .adap_transmit = s5p_cec_adap_transmit,
+};
+
+static int s5p_cec_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct s5p_cec_dev *cec;
+ int ret;
+
+ cec = devm_kzalloc(&pdev->dev, sizeof(*cec), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ cec->dev = dev;
+
+ cec->irq = platform_get_irq(pdev, 0);
+ if (cec->irq < 0)
+ return cec->irq;
+
+ ret = devm_request_threaded_irq(dev, cec->irq, s5p_cec_irq_handler,
+ s5p_cec_irq_handler_thread, 0, pdev->name, cec);
+ if (ret)
+ return ret;
+
+ cec->clk = devm_clk_get(dev, "hdmicec");
+ if (IS_ERR(cec->clk))
+ return PTR_ERR(cec->clk);
+
+ cec->pmu = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "samsung,syscon-phandle");
+ if (IS_ERR(cec->pmu))
+ return -EPROBE_DEFER;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ cec->reg = devm_ioremap_resource(dev, res);
+ if (IS_ERR(cec->reg))
+ return PTR_ERR(cec->reg);
+
+ cec->adap = cec_allocate_adapter(&s5p_cec_adap_ops, cec,
+ CEC_NAME,
+ CEC_CAP_PHYS_ADDR | CEC_CAP_LOG_ADDRS | CEC_CAP_TRANSMIT |
+ CEC_CAP_PASSTHROUGH | CEC_CAP_RC,
+ 1, &pdev->dev);
+ ret = PTR_ERR_OR_ZERO(cec->adap);
+ if (ret)
+ return ret;
+ ret = cec_register_adapter(cec->adap);
+ if (ret) {
+ cec_delete_adapter(cec->adap);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, cec);
+ pm_runtime_enable(dev);
+
+ dev_dbg(dev, "successfuly probed\n");
+ return 0;
+}
+
+static int s5p_cec_remove(struct platform_device *pdev)
+{
+ struct s5p_cec_dev *cec = platform_get_drvdata(pdev);
+
+ cec_unregister_adapter(cec->adap);
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static int s5p_cec_runtime_suspend(struct device *dev)
+{
+ struct s5p_cec_dev *cec = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(cec->clk);
+ return 0;
+}
+
+static int s5p_cec_runtime_resume(struct device *dev)
+{
+ struct s5p_cec_dev *cec = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(cec->clk);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static int __maybe_unused s5p_cec_suspend(struct device *dev)
+{
+ if (pm_runtime_suspended(dev))
+ return 0;
+ return s5p_cec_runtime_suspend(dev);
+}
+
+static int __maybe_unused s5p_cec_resume(struct device *dev)
+{
+ if (pm_runtime_suspended(dev))
+ return 0;
+ return s5p_cec_runtime_resume(dev);
+}
+
+static const struct dev_pm_ops s5p_cec_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(s5p_cec_suspend, s5p_cec_resume)
+ SET_RUNTIME_PM_OPS(s5p_cec_runtime_suspend, s5p_cec_runtime_resume,
+ NULL)
+};
+
+static const struct of_device_id s5p_cec_match[] = {
+ {
+ .compatible = "samsung,s5p-cec",
+ },
+ {},
+};
+
+static struct platform_driver s5p_cec_pdrv = {
+ .probe = s5p_cec_probe,
+ .remove = s5p_cec_remove,
+ .driver = {
+ .name = CEC_NAME,
+ .of_match_table = s5p_cec_match,
+ .pm = &s5p_cec_pm_ops,
+ },
+};
+
+module_platform_driver(s5p_cec_pdrv);
+
+MODULE_AUTHOR("Kamil Debski <kamil@wypas.org>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Samsung S5P CEC driver");
diff --git a/drivers/staging/media/s5p-cec/s5p_cec.h b/drivers/staging/media/s5p-cec/s5p_cec.h
new file mode 100644
index 000000000..03732c13d
--- /dev/null
+++ b/drivers/staging/media/s5p-cec/s5p_cec.h
@@ -0,0 +1,76 @@
+/* drivers/media/platform/s5p-cec/s5p_cec.h
+ *
+ * Samsung S5P HDMI CEC driver
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _S5P_CEC_H_
+#define _S5P_CEC_H_ __FILE__
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/timer.h>
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <media/cec.h>
+
+#include "exynos_hdmi_cec.h"
+#include "regs-cec.h"
+#include "s5p_cec.h"
+
+#define CEC_NAME "s5p-cec"
+
+#define CEC_STATUS_TX_RUNNING (1 << 0)
+#define CEC_STATUS_TX_TRANSFERRING (1 << 1)
+#define CEC_STATUS_TX_DONE (1 << 2)
+#define CEC_STATUS_TX_ERROR (1 << 3)
+#define CEC_STATUS_TX_BYTES (0xFF << 8)
+#define CEC_STATUS_RX_RUNNING (1 << 16)
+#define CEC_STATUS_RX_RECEIVING (1 << 17)
+#define CEC_STATUS_RX_DONE (1 << 18)
+#define CEC_STATUS_RX_ERROR (1 << 19)
+#define CEC_STATUS_RX_BCAST (1 << 20)
+#define CEC_STATUS_RX_BYTES (0xFF << 24)
+
+#define CEC_WORKER_TX_DONE (1 << 0)
+#define CEC_WORKER_RX_MSG (1 << 1)
+
+/* CEC Rx buffer size */
+#define CEC_RX_BUFF_SIZE 16
+/* CEC Tx buffer size */
+#define CEC_TX_BUFF_SIZE 16
+
+enum cec_state {
+ STATE_IDLE,
+ STATE_BUSY,
+ STATE_DONE,
+ STATE_ERROR
+};
+
+struct s5p_cec_dev {
+ struct cec_adapter *adap;
+ struct clk *clk;
+ struct device *dev;
+ struct mutex lock;
+ struct regmap *pmu;
+ int irq;
+ void __iomem *reg;
+
+ enum cec_state rx;
+ enum cec_state tx;
+ struct cec_msg msg;
+};
+
+#endif /* _S5P_CEC_H_ */
diff --git a/drivers/staging/media/tw686x-kh/tw686x-kh-video.c b/drivers/staging/media/tw686x-kh/tw686x-kh-video.c
index 6ecb504a7..9bf32aec2 100644
--- a/drivers/staging/media/tw686x-kh/tw686x-kh-video.c
+++ b/drivers/staging/media/tw686x-kh/tw686x-kh-video.c
@@ -130,12 +130,11 @@ static void tw686x_get_format(struct tw686x_video_channel *vc,
static int tw686x_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
unsigned int *nplanes, unsigned int sizes[],
- void *alloc_ctxs[])
+ struct device *alloc_devs[])
{
struct tw686x_video_channel *vc = vb2_get_drv_priv(vq);
unsigned int size = vc->width * vc->height * vc->format->depth / 8;
- alloc_ctxs[0] = vc->alloc_ctx;
if (*nbuffers < 2)
*nbuffers = 2;
@@ -645,7 +644,6 @@ void tw686x_kh_video_free(struct tw686x_dev *dev)
v4l2_ctrl_handler_free(&vc->ctrl_handler);
if (vc->device)
video_unregister_device(vc->device);
- vb2_dma_sg_cleanup_ctx(vc->alloc_ctx);
for (n = 0; n < 2; n++) {
struct dma_desc *descs = &vc->sg_tables[n];
@@ -750,13 +748,6 @@ int tw686x_kh_video_init(struct tw686x_dev *dev)
goto error;
}
- vc->alloc_ctx = vb2_dma_sg_init_ctx(&dev->pci_dev->dev);
- if (IS_ERR(vc->alloc_ctx)) {
- pr_warn("Unable to initialize DMA scatter-gather context\n");
- err = PTR_ERR(vc->alloc_ctx);
- goto error;
- }
-
vc->vidq.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
vc->vidq.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
vc->vidq.drv_priv = vc;
@@ -766,6 +757,7 @@ int tw686x_kh_video_init(struct tw686x_dev *dev)
vc->vidq.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
vc->vidq.min_buffers_needed = 2;
vc->vidq.lock = &vc->vb_mutex;
+ vc->vidq.dev = &dev->pci_dev->dev;
vc->vidq.gfp_flags = GFP_DMA32;
err = vb2_queue_init(&vc->vidq);
diff --git a/drivers/staging/media/tw686x-kh/tw686x-kh.h b/drivers/staging/media/tw686x-kh/tw686x-kh.h
index dc257967d..6284a90d6 100644
--- a/drivers/staging/media/tw686x-kh/tw686x-kh.h
+++ b/drivers/staging/media/tw686x-kh/tw686x-kh.h
@@ -56,7 +56,6 @@ struct tw686x_video_channel {
struct video_device *device;
struct dma_desc sg_tables[2];
struct tw686x_vb2_buf *curr_bufs[2];
- void *alloc_ctx;
struct vdma_desc *sg_descs[2];
struct v4l2_ctrl_handler ctrl_handler;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
index 68931e5ec..09e9499b7 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
@@ -1799,8 +1799,8 @@ struct ieee80211_device {
short scanning;
short proto_started;
- struct semaphore wx_sem;
- struct semaphore scan_sem;
+ struct mutex wx_mutex;
+ struct mutex scan_mutex;
spinlock_t mgmt_tx_lock;
spinlock_t beacon_lock;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index d70559576..49db1b75c 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -427,7 +427,7 @@ void ieee80211_softmac_scan_syncro(struct ieee80211_device *ieee)
short ch = 0;
u8 channel_map[MAX_CHANNEL_NUMBER+1];
memcpy(channel_map, GET_DOT11D_INFO(ieee)->channel_map, MAX_CHANNEL_NUMBER+1);
- down(&ieee->scan_sem);
+ mutex_lock(&ieee->scan_mutex);
while(1)
{
@@ -475,13 +475,13 @@ void ieee80211_softmac_scan_syncro(struct ieee80211_device *ieee)
out:
if(ieee->state < IEEE80211_LINKED){
ieee->actscanning = false;
- up(&ieee->scan_sem);
+ mutex_unlock(&ieee->scan_mutex);
}
else{
ieee->sync_scan_hurryup = 0;
if(IS_DOT11D_ENABLE(ieee))
DOT11D_ScanComplete(ieee);
- up(&ieee->scan_sem);
+ mutex_unlock(&ieee->scan_mutex);
}
}
EXPORT_SYMBOL(ieee80211_softmac_scan_syncro);
@@ -495,7 +495,7 @@ static void ieee80211_softmac_scan_wq(struct work_struct *work)
memcpy(channel_map, GET_DOT11D_INFO(ieee)->channel_map, MAX_CHANNEL_NUMBER+1);
if(!ieee->ieee_up)
return;
- down(&ieee->scan_sem);
+ mutex_lock(&ieee->scan_mutex);
do{
ieee->current_network.channel =
(ieee->current_network.channel + 1) % MAX_CHANNEL_NUMBER;
@@ -517,7 +517,7 @@ static void ieee80211_softmac_scan_wq(struct work_struct *work)
schedule_delayed_work(&ieee->softmac_scan_wq, IEEE80211_SOFTMAC_SCAN_TIME);
- up(&ieee->scan_sem);
+ mutex_unlock(&ieee->scan_mutex);
return;
out:
if(IS_DOT11D_ENABLE(ieee))
@@ -525,7 +525,7 @@ out:
ieee->actscanning = false;
watchdog = 0;
ieee->scanning = 0;
- up(&ieee->scan_sem);
+ mutex_unlock(&ieee->scan_mutex);
}
@@ -579,7 +579,7 @@ static void ieee80211_softmac_stop_scan(struct ieee80211_device *ieee)
//ieee->sync_scan_hurryup = 1;
- down(&ieee->scan_sem);
+ mutex_lock(&ieee->scan_mutex);
// spin_lock_irqsave(&ieee->lock, flags);
if (ieee->scanning == 1) {
@@ -589,7 +589,7 @@ static void ieee80211_softmac_stop_scan(struct ieee80211_device *ieee)
}
// spin_unlock_irqrestore(&ieee->lock, flags);
- up(&ieee->scan_sem);
+ mutex_unlock(&ieee->scan_mutex);
}
void ieee80211_stop_scan(struct ieee80211_device *ieee)
@@ -621,7 +621,7 @@ static void ieee80211_start_scan(struct ieee80211_device *ieee)
}
-/* called with wx_sem held */
+/* called with wx_mutex held */
void ieee80211_start_scan_syncro(struct ieee80211_device *ieee)
{
if (IS_DOT11D_ENABLE(ieee) )
@@ -1389,7 +1389,7 @@ static void ieee80211_associate_procedure_wq(struct work_struct *work)
{
struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, associate_procedure_wq);
ieee->sync_scan_hurryup = 1;
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
if (ieee->data_hard_stop)
ieee->data_hard_stop(ieee->dev);
@@ -1402,7 +1402,7 @@ static void ieee80211_associate_procedure_wq(struct work_struct *work)
ieee->associate_seq = 1;
ieee80211_associate_step1(ieee);
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
}
inline void ieee80211_softmac_new_net(struct ieee80211_device *ieee, struct ieee80211_network *net)
@@ -2331,7 +2331,7 @@ static void ieee80211_start_ibss_wq(struct work_struct *work)
struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, start_ibss_wq);
/* iwconfig mode ad-hoc will schedule this and return
* on the other hand this will block further iwconfig SET
- * operations because of the wx_sem hold.
+ * operations because of the wx_mutex hold.
* Anyway some most set operations set a flag to speed-up
* (abort) this wq (when syncro scanning) before sleeping
* on the semaphore
@@ -2340,7 +2340,7 @@ static void ieee80211_start_ibss_wq(struct work_struct *work)
printk("==========oh driver down return\n");
return;
}
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
if (ieee->current_network.ssid_len == 0) {
strcpy(ieee->current_network.ssid, IEEE80211_DEFAULT_TX_ESSID);
@@ -2431,7 +2431,7 @@ static void ieee80211_start_ibss_wq(struct work_struct *work)
ieee->data_hard_resume(ieee->dev);
netif_carrier_on(ieee->dev);
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
}
inline void ieee80211_start_ibss(struct ieee80211_device *ieee)
@@ -2439,7 +2439,7 @@ inline void ieee80211_start_ibss(struct ieee80211_device *ieee)
schedule_delayed_work(&ieee->start_ibss_wq, 150);
}
-/* this is called only in user context, with wx_sem held */
+/* this is called only in user context, with wx_mutex held */
void ieee80211_start_bss(struct ieee80211_device *ieee)
{
unsigned long flags;
@@ -2505,7 +2505,7 @@ static void ieee80211_associate_retry_wq(struct work_struct *work)
struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, associate_retry_wq);
unsigned long flags;
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
if(!ieee->proto_started)
goto exit;
@@ -2537,7 +2537,7 @@ static void ieee80211_associate_retry_wq(struct work_struct *work)
spin_unlock_irqrestore(&ieee->lock, flags);
exit:
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
}
struct sk_buff *ieee80211_get_beacon_(struct ieee80211_device *ieee)
@@ -2583,9 +2583,9 @@ EXPORT_SYMBOL(ieee80211_get_beacon);
void ieee80211_softmac_stop_protocol(struct ieee80211_device *ieee)
{
ieee->sync_scan_hurryup = 1;
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
ieee80211_stop_protocol(ieee);
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
}
EXPORT_SYMBOL(ieee80211_softmac_stop_protocol);
@@ -2609,9 +2609,9 @@ void ieee80211_stop_protocol(struct ieee80211_device *ieee)
void ieee80211_softmac_start_protocol(struct ieee80211_device *ieee)
{
ieee->sync_scan_hurryup = 0;
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
ieee80211_start_protocol(ieee);
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
}
EXPORT_SYMBOL(ieee80211_softmac_start_protocol);
@@ -2728,8 +2728,8 @@ void ieee80211_softmac_init(struct ieee80211_device *ieee)
INIT_WORK(&ieee->wx_sync_scan_wq, ieee80211_wx_sync_scan_wq);
- sema_init(&ieee->wx_sem, 1);
- sema_init(&ieee->scan_sem, 1);
+ mutex_init(&ieee->wx_mutex);
+ mutex_init(&ieee->scan_mutex);
spin_lock_init(&ieee->mgmt_tx_lock);
spin_lock_init(&ieee->beacon_lock);
@@ -2742,14 +2742,14 @@ void ieee80211_softmac_init(struct ieee80211_device *ieee)
void ieee80211_softmac_free(struct ieee80211_device *ieee)
{
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
kfree(ieee->pDot11dInfo);
ieee->pDot11dInfo = NULL;
del_timer_sync(&ieee->associate_timer);
cancel_delayed_work(&ieee->associate_retry_wq);
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
}
/********************************************************
@@ -3138,7 +3138,7 @@ int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee, struct iw_poin
struct ieee_param *param;
int ret=0;
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
//IEEE_DEBUG_INFO("wpa_supplicant: len=%d\n", p->length);
if (p->length < sizeof(struct ieee_param) || !p->pointer) {
@@ -3183,7 +3183,7 @@ int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee, struct iw_poin
kfree(param);
out:
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
return ret;
}
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
index aad288a1f..21bd0dc40 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
@@ -34,7 +34,7 @@ int ieee80211_wx_set_freq(struct ieee80211_device *ieee, struct iw_request_info
int ret;
struct iw_freq *fwrq = &wrqu->freq;
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
if (ieee->iw_mode == IW_MODE_INFRA) {
ret = -EOPNOTSUPP;
@@ -79,7 +79,7 @@ int ieee80211_wx_set_freq(struct ieee80211_device *ieee, struct iw_request_info
ret = 0;
out:
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
return ret;
}
EXPORT_SYMBOL(ieee80211_wx_set_freq);
@@ -145,7 +145,7 @@ int ieee80211_wx_set_wap(struct ieee80211_device *ieee,
ieee->sync_scan_hurryup = 1;
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
/* use ifconfig hw ether */
if (ieee->iw_mode == IW_MODE_MASTER) {
ret = -1;
@@ -173,7 +173,7 @@ int ieee80211_wx_set_wap(struct ieee80211_device *ieee,
if (ifup)
ieee80211_start_protocol(ieee);
out:
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
return ret;
}
EXPORT_SYMBOL(ieee80211_wx_set_wap);
@@ -274,7 +274,7 @@ int ieee80211_wx_set_mode(struct ieee80211_device *ieee, struct iw_request_info
ieee->sync_scan_hurryup = 1;
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
if (wrqu->mode == ieee->iw_mode)
goto out;
@@ -293,7 +293,7 @@ int ieee80211_wx_set_mode(struct ieee80211_device *ieee, struct iw_request_info
}
out:
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
return 0;
}
EXPORT_SYMBOL(ieee80211_wx_set_mode);
@@ -353,7 +353,7 @@ void ieee80211_wx_sync_scan_wq(struct work_struct *work)
ieee80211_start_send_beacons(ieee);
netif_carrier_on(ieee->dev);
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
}
@@ -362,7 +362,7 @@ int ieee80211_wx_set_scan(struct ieee80211_device *ieee, struct iw_request_info
{
int ret = 0;
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
if (ieee->iw_mode == IW_MODE_MONITOR || !(ieee->proto_started)) {
ret = -1;
@@ -376,7 +376,7 @@ int ieee80211_wx_set_scan(struct ieee80211_device *ieee, struct iw_request_info
}
out:
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
return ret;
}
EXPORT_SYMBOL(ieee80211_wx_set_scan);
@@ -391,7 +391,7 @@ int ieee80211_wx_set_essid(struct ieee80211_device *ieee,
unsigned long flags;
ieee->sync_scan_hurryup = 1;
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
proto_started = ieee->proto_started;
@@ -430,7 +430,7 @@ int ieee80211_wx_set_essid(struct ieee80211_device *ieee,
if (proto_started)
ieee80211_start_protocol(ieee);
out:
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
return ret;
}
EXPORT_SYMBOL(ieee80211_wx_set_essid);
@@ -453,7 +453,7 @@ int ieee80211_wx_set_rawtx(struct ieee80211_device *ieee,
int enable = (parms[0] > 0);
short prev = ieee->raw_tx;
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
if (enable)
ieee->raw_tx = 1;
@@ -475,7 +475,7 @@ int ieee80211_wx_set_rawtx(struct ieee80211_device *ieee,
netif_carrier_off(ieee->dev);
}
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
return 0;
}
@@ -514,7 +514,7 @@ int ieee80211_wx_set_power(struct ieee80211_device *ieee,
{
int ret = 0;
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
if (wrqu->power.disabled) {
ieee->ps = IEEE80211_PS_DISABLED;
@@ -553,7 +553,7 @@ int ieee80211_wx_set_power(struct ieee80211_device *ieee,
}
exit:
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
return ret;
}
@@ -564,7 +564,7 @@ int ieee80211_wx_get_power(struct ieee80211_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
if (ieee->ps == IEEE80211_PS_DISABLED) {
wrqu->power.disabled = 1;
@@ -592,7 +592,7 @@ int ieee80211_wx_get_power(struct ieee80211_device *ieee,
wrqu->power.flags |= IW_POWER_UNICAST_R;
exit:
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
return 0;
}
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
index 208be5fc5..563d7fed6 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
@@ -253,7 +253,7 @@ int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
int i = 0;
int err = 0;
IEEE80211_DEBUG_WX("Getting scan\n");
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
spin_lock_irqsave(&ieee->lock, flags);
list_for_each_entry(network, &ieee->network_list, list) {
@@ -262,7 +262,7 @@ int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
{
err = -E2BIG;
break;
- }
+ }
if (ieee->scan_age == 0 ||
time_after(network->last_scanned + ieee->scan_age, jiffies))
ev = rtl819x_translate_scan(ieee, ev, stop, network, info);
@@ -277,7 +277,7 @@ int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
}
spin_unlock_irqrestore(&ieee->lock, flags);
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
wrqu->data.length = ev - extra;
wrqu->data.flags = 0;
diff --git a/drivers/staging/rtl8192u/r8180_93cx6.c b/drivers/staging/rtl8192u/r8180_93cx6.c
index 97d9b3f49..f35defc36 100644
--- a/drivers/staging/rtl8192u/r8180_93cx6.c
+++ b/drivers/staging/rtl8192u/r8180_93cx6.c
@@ -23,8 +23,11 @@
static void eprom_cs(struct net_device *dev, short bit)
{
u8 cmdreg;
+ int err;
- read_nic_byte_E(dev, EPROM_CMD, &cmdreg);
+ err = read_nic_byte_E(dev, EPROM_CMD, &cmdreg);
+ if (err)
+ return;
if (bit)
/* enable EPROM */
write_nic_byte_E(dev, EPROM_CMD, cmdreg | EPROM_CS_BIT);
@@ -40,8 +43,11 @@ static void eprom_cs(struct net_device *dev, short bit)
static void eprom_ck_cycle(struct net_device *dev)
{
u8 cmdreg;
+ int err;
- read_nic_byte_E(dev, EPROM_CMD, &cmdreg);
+ err = read_nic_byte_E(dev, EPROM_CMD, &cmdreg);
+ if (err)
+ return;
write_nic_byte_E(dev, EPROM_CMD, cmdreg | EPROM_CK_BIT);
force_pci_posting(dev);
udelay(EPROM_DELAY);
@@ -56,8 +62,11 @@ static void eprom_ck_cycle(struct net_device *dev)
static void eprom_w(struct net_device *dev, short bit)
{
u8 cmdreg;
+ int err;
- read_nic_byte_E(dev, EPROM_CMD, &cmdreg);
+ err = read_nic_byte_E(dev, EPROM_CMD, &cmdreg);
+ if (err)
+ return;
if (bit)
write_nic_byte_E(dev, EPROM_CMD, cmdreg | EPROM_W_BIT);
else
@@ -71,8 +80,12 @@ static void eprom_w(struct net_device *dev, short bit)
static short eprom_r(struct net_device *dev)
{
u8 bit;
+ int err;
+
+ err = read_nic_byte_E(dev, EPROM_CMD, &bit);
+ if (err)
+ return err;
- read_nic_byte_E(dev, EPROM_CMD, &bit);
udelay(EPROM_DELAY);
if (bit & EPROM_R_BIT)
@@ -93,7 +106,7 @@ static void eprom_send_bits_string(struct net_device *dev, short b[], int len)
}
-u32 eprom_read(struct net_device *dev, u32 addr)
+int eprom_read(struct net_device *dev, u32 addr)
{
struct r8192_priv *priv = ieee80211_priv(dev);
short read_cmd[] = {1, 1, 0};
@@ -101,6 +114,7 @@ u32 eprom_read(struct net_device *dev, u32 addr)
int i;
int addr_len;
u32 ret;
+ int err;
ret = 0;
/* enable EPROM programming */
@@ -144,7 +158,11 @@ u32 eprom_read(struct net_device *dev, u32 addr)
* and reading data. (eeprom outs a dummy 0)
*/
eprom_ck_cycle(dev);
- ret |= (eprom_r(dev)<<(15-i));
+ err = eprom_r(dev);
+ if (err < 0)
+ return err;
+
+ ret |= err<<(15-i);
}
eprom_cs(dev, 0);
diff --git a/drivers/staging/rtl8192u/r8180_93cx6.h b/drivers/staging/rtl8192u/r8180_93cx6.h
index b840348eb..9cf7f587c 100644
--- a/drivers/staging/rtl8192u/r8180_93cx6.h
+++ b/drivers/staging/rtl8192u/r8180_93cx6.h
@@ -40,4 +40,4 @@
#define EPROM_TXPW1 0x3d
-u32 eprom_read(struct net_device *dev, u32 addr); /* reads a 16 bits word */
+int eprom_read(struct net_device *dev, u32 addr); /* reads a 16 bits word */
diff --git a/drivers/staging/rtl8192u/r8192U.h b/drivers/staging/rtl8192u/r8192U.h
index ee1c72267..821afc0dd 100644
--- a/drivers/staging/rtl8192u/r8192U.h
+++ b/drivers/staging/rtl8192u/r8192U.h
@@ -879,8 +879,7 @@ typedef struct r8192_priv {
/* If 1, allow bad crc frame, reception in monitor mode */
short crcmon;
- struct semaphore wx_sem;
- struct semaphore rf_sem; /* Used to lock rf write operation */
+ struct mutex wx_mutex;
u8 rf_type; /* 0: 1T2R, 1: 2T4R */
RT_RF_TYPE_819xU rf_chip;
@@ -1129,10 +1128,10 @@ int read_nic_byte(struct net_device *dev, int x, u8 *data);
int read_nic_byte_E(struct net_device *dev, int x, u8 *data);
int read_nic_dword(struct net_device *dev, int x, u32 *data);
int read_nic_word(struct net_device *dev, int x, u16 *data);
-void write_nic_byte(struct net_device *dev, int x, u8 y);
-void write_nic_byte_E(struct net_device *dev, int x, u8 y);
-void write_nic_word(struct net_device *dev, int x, u16 y);
-void write_nic_dword(struct net_device *dev, int x, u32 y);
+int write_nic_byte(struct net_device *dev, int x, u8 y);
+int write_nic_byte_E(struct net_device *dev, int x, u8 y);
+int write_nic_word(struct net_device *dev, int x, u16 y);
+int write_nic_dword(struct net_device *dev, int x, u32 y);
void force_pci_posting(struct net_device *dev);
void rtl8192_rtx_disable(struct net_device *);
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index 8c1d73719..dd0970fac 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -253,7 +253,7 @@ u32 read_cam(struct net_device *dev, u8 addr)
return data;
}
-void write_nic_byte_E(struct net_device *dev, int indx, u8 data)
+int write_nic_byte_E(struct net_device *dev, int indx, u8 data)
{
int status;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
@@ -261,7 +261,7 @@ void write_nic_byte_E(struct net_device *dev, int indx, u8 data)
u8 *usbdata = kzalloc(sizeof(data), GFP_KERNEL);
if (!usbdata)
- return;
+ return -ENOMEM;
*usbdata = data;
status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
@@ -269,9 +269,12 @@ void write_nic_byte_E(struct net_device *dev, int indx, u8 data)
indx | 0xfe00, 0, usbdata, 1, HZ / 2);
kfree(usbdata);
- if (status < 0)
+ if (status < 0){
netdev_err(dev, "write_nic_byte_E TimeOut! status: %d\n",
status);
+ return status;
+ }
+ return 0;
}
int read_nic_byte_E(struct net_device *dev, int indx, u8 *data)
@@ -299,7 +302,7 @@ int read_nic_byte_E(struct net_device *dev, int indx, u8 *data)
}
/* as 92U has extend page from 4 to 16, so modify functions below. */
-void write_nic_byte(struct net_device *dev, int indx, u8 data)
+int write_nic_byte(struct net_device *dev, int indx, u8 data)
{
int status;
@@ -308,7 +311,7 @@ void write_nic_byte(struct net_device *dev, int indx, u8 data)
u8 *usbdata = kzalloc(sizeof(data), GFP_KERNEL);
if (!usbdata)
- return;
+ return -ENOMEM;
*usbdata = data;
status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
@@ -317,12 +320,16 @@ void write_nic_byte(struct net_device *dev, int indx, u8 data)
usbdata, 1, HZ / 2);
kfree(usbdata);
- if (status < 0)
+ if (status < 0) {
netdev_err(dev, "write_nic_byte TimeOut! status: %d\n", status);
+ return status;
+ }
+
+ return 0;
}
-void write_nic_word(struct net_device *dev, int indx, u16 data)
+int write_nic_word(struct net_device *dev, int indx, u16 data)
{
int status;
@@ -331,7 +338,7 @@ void write_nic_word(struct net_device *dev, int indx, u16 data)
u16 *usbdata = kzalloc(sizeof(data), GFP_KERNEL);
if (!usbdata)
- return;
+ return -ENOMEM;
*usbdata = data;
status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
@@ -340,12 +347,16 @@ void write_nic_word(struct net_device *dev, int indx, u16 data)
usbdata, 2, HZ / 2);
kfree(usbdata);
- if (status < 0)
+ if (status < 0) {
netdev_err(dev, "write_nic_word TimeOut! status: %d\n", status);
+ return status;
+ }
+
+ return 0;
}
-void write_nic_dword(struct net_device *dev, int indx, u32 data)
+int write_nic_dword(struct net_device *dev, int indx, u32 data)
{
int status;
@@ -354,7 +365,7 @@ void write_nic_dword(struct net_device *dev, int indx, u32 data)
u32 *usbdata = kzalloc(sizeof(data), GFP_KERNEL);
if (!usbdata)
- return;
+ return -ENOMEM;
*usbdata = data;
status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
@@ -364,9 +375,13 @@ void write_nic_dword(struct net_device *dev, int indx, u32 data)
kfree(usbdata);
- if (status < 0)
+ if (status < 0) {
netdev_err(dev, "write_nic_dword TimeOut! status: %d\n",
status);
+ return status;
+ }
+
+ return 0;
}
@@ -2361,8 +2376,7 @@ static void rtl8192_init_priv_lock(struct r8192_priv *priv)
{
spin_lock_init(&priv->tx_lock);
spin_lock_init(&priv->irq_lock);
- sema_init(&priv->wx_sem, 1);
- sema_init(&priv->rf_sem, 1);
+ mutex_init(&priv->wx_mutex);
mutex_init(&priv->mutex);
}
@@ -2421,7 +2435,7 @@ static inline u16 endian_swap(u16 *data)
return *data;
}
-static void rtl8192_read_eeprom_info(struct net_device *dev)
+static int rtl8192_read_eeprom_info(struct net_device *dev)
{
u16 wEPROM_ID = 0;
u8 bMac_Tmp_Addr[6] = {0x00, 0xe0, 0x4c, 0x00, 0x00, 0x02};
@@ -2429,9 +2443,13 @@ static void rtl8192_read_eeprom_info(struct net_device *dev)
struct r8192_priv *priv = ieee80211_priv(dev);
u16 tmpValue = 0;
int i;
+ int ret;
RT_TRACE(COMP_EPROM, "===========>%s()\n", __func__);
- wEPROM_ID = eprom_read(dev, 0); /* first read EEPROM ID out; */
+ ret = eprom_read(dev, 0); /* first read EEPROM ID out; */
+ if (ret < 0)
+ return ret;
+ wEPROM_ID = (u16)ret;
RT_TRACE(COMP_EPROM, "EEPROM ID is 0x%x\n", wEPROM_ID);
if (wEPROM_ID != RTL8190_EEPROM_ID)
@@ -2443,13 +2461,25 @@ static void rtl8192_read_eeprom_info(struct net_device *dev)
if (bLoad_From_EEPOM) {
tmpValue = eprom_read(dev, EEPROM_VID >> 1);
+ ret = eprom_read(dev, EEPROM_VID >> 1);
+ if (ret < 0)
+ return ret;
+ tmpValue = (u16)ret;
priv->eeprom_vid = endian_swap(&tmpValue);
- priv->eeprom_pid = eprom_read(dev, EEPROM_PID >> 1);
- tmpValue = eprom_read(dev, EEPROM_ChannelPlan >> 1);
+ ret = eprom_read(dev, EEPROM_PID >> 1);
+ if (ret < 0)
+ return ret;
+ priv->eeprom_pid = (u16)ret;
+ ret = eprom_read(dev, EEPROM_ChannelPlan >> 1);
+ if (ret < 0)
+ return ret;
+ tmpValue = (u16)ret;
priv->eeprom_ChannelPlan = (tmpValue & 0xff00) >> 8;
priv->btxpowerdata_readfromEEPORM = true;
- priv->eeprom_CustomerID =
- eprom_read(dev, (EEPROM_Customer_ID >> 1)) >> 8;
+ ret = eprom_read(dev, (EEPROM_Customer_ID >> 1)) >> 8;
+ if (ret < 0)
+ return ret;
+ priv->eeprom_CustomerID = (u16)ret;
} else {
priv->eeprom_vid = 0;
priv->eeprom_pid = 0;
@@ -2467,10 +2497,10 @@ static void rtl8192_read_eeprom_info(struct net_device *dev)
int i;
for (i = 0; i < 6; i += 2) {
- u16 tmp = 0;
-
- tmp = eprom_read(dev, (u16)((EEPROM_NODE_ADDRESS_BYTE_0 + i) >> 1));
- *(u16 *)(&dev->dev_addr[i]) = tmp;
+ ret = eprom_read(dev, (u16)((EEPROM_NODE_ADDRESS_BYTE_0 + i) >> 1));
+ if (ret < 0)
+ return ret;
+ *(u16 *)(&dev->dev_addr[i]) = (u16)ret;
}
} else {
memcpy(dev->dev_addr, bMac_Tmp_Addr, 6);
@@ -2482,52 +2512,72 @@ static void rtl8192_read_eeprom_info(struct net_device *dev)
if (priv->card_8192_version == (u8)VERSION_819xU_A) {
/* read Tx power gain offset of legacy OFDM to HT rate */
- if (bLoad_From_EEPOM)
- priv->EEPROMTxPowerDiff = (eprom_read(dev, (EEPROM_TxPowerDiff >> 1)) & 0xff00) >> 8;
- else
+ if (bLoad_From_EEPOM) {
+ ret = eprom_read(dev, (EEPROM_TxPowerDiff >> 1));
+ if (ret < 0)
+ return ret;
+ priv->EEPROMTxPowerDiff = ((u16)ret & 0xff00) >> 8;
+ } else
priv->EEPROMTxPowerDiff = EEPROM_Default_TxPower;
RT_TRACE(COMP_EPROM, "TxPowerDiff:%d\n", priv->EEPROMTxPowerDiff);
/* read ThermalMeter from EEPROM */
- if (bLoad_From_EEPOM)
- priv->EEPROMThermalMeter = (u8)(eprom_read(dev, (EEPROM_ThermalMeter >> 1)) & 0x00ff);
- else
+ if (bLoad_From_EEPOM) {
+ ret = eprom_read(dev, (EEPROM_ThermalMeter >> 1));
+ if (ret < 0)
+ return ret;
+ priv->EEPROMThermalMeter = (u8)((u16)ret & 0x00ff);
+ } else
priv->EEPROMThermalMeter = EEPROM_Default_ThermalMeter;
RT_TRACE(COMP_EPROM, "ThermalMeter:%d\n", priv->EEPROMThermalMeter);
/* for tx power track */
priv->TSSI_13dBm = priv->EEPROMThermalMeter * 100;
/* read antenna tx power offset of B/C/D to A from EEPROM */
- if (bLoad_From_EEPOM)
- priv->EEPROMPwDiff = (eprom_read(dev, (EEPROM_PwDiff >> 1)) & 0x0f00) >> 8;
- else
+ if (bLoad_From_EEPOM) {
+ ret = eprom_read(dev, (EEPROM_PwDiff >> 1));
+ if (ret < 0)
+ return ret;
+ priv->EEPROMPwDiff = ((u16)ret & 0x0f00) >> 8;
+ } else
priv->EEPROMPwDiff = EEPROM_Default_PwDiff;
RT_TRACE(COMP_EPROM, "TxPwDiff:%d\n", priv->EEPROMPwDiff);
/* Read CrystalCap from EEPROM */
- if (bLoad_From_EEPOM)
- priv->EEPROMCrystalCap = (eprom_read(dev, (EEPROM_CrystalCap >> 1)) & 0x0f);
- else
+ if (bLoad_From_EEPOM) {
+ ret = eprom_read(dev, (EEPROM_CrystalCap >> 1));
+ if (ret < 0)
+ return ret;
+ priv->EEPROMCrystalCap = (u16)ret & 0x0f;
+ } else
priv->EEPROMCrystalCap = EEPROM_Default_CrystalCap;
RT_TRACE(COMP_EPROM, "CrystalCap = %d\n", priv->EEPROMCrystalCap);
/* get per-channel Tx power level */
- if (bLoad_From_EEPOM)
- priv->EEPROM_Def_Ver = (eprom_read(dev, (EEPROM_TxPwIndex_Ver >> 1)) & 0xff00) >> 8;
- else
+ if (bLoad_From_EEPOM) {
+ ret = eprom_read(dev, (EEPROM_TxPwIndex_Ver >> 1));
+ if (ret < 0)
+ return ret;
+ priv->EEPROM_Def_Ver = ((u16)ret & 0xff00) >> 8;
+ } else
priv->EEPROM_Def_Ver = 1;
RT_TRACE(COMP_EPROM, "EEPROM_DEF_VER:%d\n", priv->EEPROM_Def_Ver);
if (priv->EEPROM_Def_Ver == 0) { /* old eeprom definition */
int i;
- if (bLoad_From_EEPOM)
- priv->EEPROMTxPowerLevelCCK = (eprom_read(dev, (EEPROM_TxPwIndex_CCK >> 1)) & 0xff) >> 8;
- else
+ if (bLoad_From_EEPOM) {
+ ret = eprom_read(dev, (EEPROM_TxPwIndex_CCK >> 1));
+ if (ret < 0)
+ return ret;
+ priv->EEPROMTxPowerLevelCCK = ((u16)ret & 0xff) >> 8;
+ } else
priv->EEPROMTxPowerLevelCCK = 0x10;
RT_TRACE(COMP_EPROM, "CCK Tx Power Levl: 0x%02x\n", priv->EEPROMTxPowerLevelCCK);
for (i = 0; i < 3; i++) {
if (bLoad_From_EEPOM) {
- tmpValue = eprom_read(dev, (EEPROM_TxPwIndex_OFDM_24G + i) >> 1);
+ ret = eprom_read(dev, (EEPROM_TxPwIndex_OFDM_24G + i) >> 1);
+ if ( ret < 0)
+ return ret;
if (((EEPROM_TxPwIndex_OFDM_24G + i) % 2) == 0)
- tmpValue = tmpValue & 0x00ff;
+ tmpValue = (u16)ret & 0x00ff;
else
- tmpValue = (tmpValue & 0xff00) >> 8;
+ tmpValue = ((u16)ret & 0xff00) >> 8;
} else {
tmpValue = 0x10;
}
@@ -2536,17 +2586,21 @@ static void rtl8192_read_eeprom_info(struct net_device *dev)
}
} else if (priv->EEPROM_Def_Ver == 1) {
if (bLoad_From_EEPOM) {
- tmpValue = eprom_read(dev,
- EEPROM_TxPwIndex_CCK_V1 >> 1);
- tmpValue = (tmpValue & 0xff00) >> 8;
+ ret = eprom_read(dev, EEPROM_TxPwIndex_CCK_V1 >> 1);
+ if (ret < 0)
+ return ret;
+ tmpValue = ((u16)ret & 0xff00) >> 8;
} else {
tmpValue = 0x10;
}
priv->EEPROMTxPowerLevelCCK_V1[0] = (u8)tmpValue;
- if (bLoad_From_EEPOM)
- tmpValue = eprom_read(dev, (EEPROM_TxPwIndex_CCK_V1 + 2) >> 1);
- else
+ if (bLoad_From_EEPOM) {
+ ret = eprom_read(dev, (EEPROM_TxPwIndex_CCK_V1 + 2) >> 1);
+ if (ret < 0)
+ return ret;
+ tmpValue = (u16)ret;
+ } else
tmpValue = 0x1010;
*((u16 *)(&priv->EEPROMTxPowerLevelCCK_V1[1])) = tmpValue;
if (bLoad_From_EEPOM)
@@ -2644,6 +2698,8 @@ static void rtl8192_read_eeprom_info(struct net_device *dev)
init_rate_adaptive(dev);
RT_TRACE(COMP_EPROM, "<===========%s()\n", __func__);
+
+ return 0;
}
static short rtl8192_get_channel_map(struct net_device *dev)
@@ -2664,6 +2720,7 @@ static short rtl8192_get_channel_map(struct net_device *dev)
static short rtl8192_init(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
+ int err;
memset(&(priv->stats), 0, sizeof(struct Stats));
memset(priv->txqueue_to_outpipemap, 0, 9);
@@ -2685,7 +2742,14 @@ static short rtl8192_init(struct net_device *dev)
rtl8192_init_priv_lock(priv);
rtl8192_init_priv_task(dev);
rtl8192_get_eeprom_size(dev);
- rtl8192_read_eeprom_info(dev);
+ err = rtl8192_read_eeprom_info(dev);
+ if (err) {
+ DMESG("Reading EEPROM info failed");
+ kfree(priv->pFirmware);
+ priv->pFirmware = NULL;
+ free_ieee80211(dev);
+ return err;
+ }
rtl8192_get_channel_map(dev);
init_hal_dm(dev);
setup_timer(&priv->watch_dog_timer, watch_dog_timer_callback,
@@ -3303,12 +3367,12 @@ RESET_START:
/* Set the variable for reset. */
priv->ResetProgress = RESET_TYPE_SILENT;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
if (priv->up == 0) {
RT_TRACE(COMP_ERR,
"%s():the driver is not up! return\n",
__func__);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return;
}
priv->up = 0;
@@ -3323,19 +3387,19 @@ RESET_START:
ieee->sync_scan_hurryup = 1;
if (ieee->state == IEEE80211_LINKED) {
- down(&ieee->wx_sem);
+ mutex_lock(&ieee->wx_mutex);
netdev_dbg(dev, "ieee->state is IEEE80211_LINKED\n");
ieee80211_stop_send_beacons(priv->ieee80211);
del_timer_sync(&ieee->associate_timer);
cancel_delayed_work(&ieee->associate_retry_wq);
ieee80211_stop_scan(ieee);
netif_carrier_off(dev);
- up(&ieee->wx_sem);
+ mutex_unlock(&ieee->wx_mutex);
} else {
netdev_dbg(dev, "ieee->state is NOT LINKED\n");
ieee80211_softmac_stop_protocol(priv->ieee80211);
}
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
RT_TRACE(COMP_RESET,
"%s():<==========down process is finished\n",
__func__);
@@ -3533,9 +3597,9 @@ static int rtl8192_open(struct net_device *dev)
struct r8192_priv *priv = ieee80211_priv(dev);
int ret;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = rtl8192_up(dev);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
@@ -3556,11 +3620,11 @@ static int rtl8192_close(struct net_device *dev)
struct r8192_priv *priv = ieee80211_priv(dev);
int ret;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = rtl8192_down(dev);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
@@ -3632,11 +3696,11 @@ static void rtl8192_restart(struct work_struct *work)
reset_wq);
struct net_device *dev = priv->ieee80211->dev;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
rtl8192_commit(dev);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
}
static void r8192_set_multicast(struct net_device *dev)
@@ -3659,12 +3723,12 @@ static int r8192_set_mac_adr(struct net_device *dev, void *mac)
struct r8192_priv *priv = ieee80211_priv(dev);
struct sockaddr *addr = mac;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ether_addr_copy(dev->dev_addr, addr->sa_data);
schedule_work(&priv->reset_wq);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return 0;
}
@@ -3681,7 +3745,7 @@ static int rtl8192_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
struct iw_point *p = &wrq->u.data;
struct ieee_param *ipw = NULL;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
if (p->length < sizeof(struct ieee_param) || !p->pointer) {
@@ -3774,7 +3838,7 @@ static int rtl8192_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
kfree(ipw);
ipw = NULL;
out:
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
diff --git a/drivers/staging/rtl8192u/r8192U_wx.c b/drivers/staging/rtl8192u/r8192U_wx.c
index 837704de3..d2f2f2460 100644
--- a/drivers/staging/rtl8192u/r8192U_wx.c
+++ b/drivers/staging/rtl8192u/r8192U_wx.c
@@ -67,11 +67,11 @@ static int r8192_wx_set_rate(struct net_device *dev,
int ret;
struct r8192_priv *priv = ieee80211_priv(dev);
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = ieee80211_wx_set_rate(priv->ieee80211, info, wrqu, extra);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
@@ -83,11 +83,11 @@ static int r8192_wx_set_rts(struct net_device *dev,
int ret;
struct r8192_priv *priv = ieee80211_priv(dev);
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = ieee80211_wx_set_rts(priv->ieee80211, info, wrqu, extra);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
@@ -108,11 +108,11 @@ static int r8192_wx_set_power(struct net_device *dev,
int ret;
struct r8192_priv *priv = ieee80211_priv(dev);
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = ieee80211_wx_set_power(priv->ieee80211, info, wrqu, extra);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
@@ -132,11 +132,11 @@ static int r8192_wx_force_reset(struct net_device *dev,
{
struct r8192_priv *priv = ieee80211_priv(dev);
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
netdev_dbg(dev, "%s(): force reset ! extra is %d\n", __func__, *extra);
priv->force_reset = *extra;
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return 0;
}
@@ -148,11 +148,11 @@ static int r8192_wx_set_rawtx(struct net_device *dev,
struct r8192_priv *priv = ieee80211_priv(dev);
int ret;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = ieee80211_wx_set_rawtx(priv->ieee80211, info, wrqu, extra);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
@@ -166,7 +166,7 @@ static int r8192_wx_set_crcmon(struct net_device *dev,
int *parms = (int *)extra;
int enable = (parms[0] > 0);
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
if (enable)
priv->crcmon = 1;
@@ -176,7 +176,7 @@ static int r8192_wx_set_crcmon(struct net_device *dev,
DMESG("bad CRC in monitor mode are %s",
priv->crcmon ? "accepted" : "rejected");
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return 0;
}
@@ -187,13 +187,13 @@ static int r8192_wx_set_mode(struct net_device *dev, struct iw_request_info *a,
struct r8192_priv *priv = ieee80211_priv(dev);
int ret;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = ieee80211_wx_set_mode(priv->ieee80211, a, wrqu, b);
rtl8192_set_rxconf(dev);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
@@ -338,7 +338,7 @@ static int r8192_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
}
}
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
if (priv->ieee80211->state != IEEE80211_LINKED) {
priv->ieee80211->scanning = 0;
ieee80211_softmac_scan_syncro(priv->ieee80211);
@@ -346,7 +346,7 @@ static int r8192_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
} else {
ret = ieee80211_wx_set_scan(priv->ieee80211, a, wrqu, b);
}
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
@@ -361,11 +361,11 @@ static int r8192_wx_get_scan(struct net_device *dev, struct iw_request_info *a,
if (!priv->up)
return -ENETDOWN;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = ieee80211_wx_get_scan(priv->ieee80211, a, wrqu, b);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
@@ -377,11 +377,11 @@ static int r8192_wx_set_essid(struct net_device *dev,
struct r8192_priv *priv = ieee80211_priv(dev);
int ret;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = ieee80211_wx_set_essid(priv->ieee80211, a, wrqu, b);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
@@ -393,11 +393,11 @@ static int r8192_wx_get_essid(struct net_device *dev,
int ret;
struct r8192_priv *priv = ieee80211_priv(dev);
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = ieee80211_wx_get_essid(priv->ieee80211, a, wrqu, b);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
@@ -408,11 +408,11 @@ static int r8192_wx_set_freq(struct net_device *dev, struct iw_request_info *a,
int ret;
struct r8192_priv *priv = ieee80211_priv(dev);
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = ieee80211_wx_set_freq(priv->ieee80211, a, wrqu, b);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
@@ -468,11 +468,11 @@ static int r8192_wx_set_wap(struct net_device *dev,
int ret;
struct r8192_priv *priv = ieee80211_priv(dev);
/* struct sockaddr *temp = (struct sockaddr *)awrq; */
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = ieee80211_wx_set_wap(priv->ieee80211, info, awrq, extra);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
@@ -515,12 +515,12 @@ static int r8192_wx_set_enc(struct net_device *dev,
if (!priv->up)
return -ENETDOWN;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
RT_TRACE(COMP_SEC, "Setting SW wep key");
ret = ieee80211_wx_set_encode(priv->ieee80211, info, wrqu, key);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
@@ -619,7 +619,7 @@ static int r8192_wx_set_retry(struct net_device *dev,
struct r8192_priv *priv = ieee80211_priv(dev);
int err = 0;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
if (wrqu->retry.flags & IW_RETRY_LIFETIME ||
wrqu->retry.disabled){
@@ -652,7 +652,7 @@ static int r8192_wx_set_retry(struct net_device *dev,
rtl8192_commit(dev);
exit:
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return err;
}
@@ -701,7 +701,7 @@ static int r8192_wx_set_sens(struct net_device *dev,
struct r8192_priv *priv = ieee80211_priv(dev);
short err = 0;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
if (priv->rf_set_sens == NULL) {
err = -1; /* we have not this support for this radio */
goto exit;
@@ -712,7 +712,7 @@ static int r8192_wx_set_sens(struct net_device *dev,
err = -EINVAL;
exit:
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return err;
}
@@ -727,7 +727,7 @@ static int r8192_wx_set_enc_ext(struct net_device *dev,
struct ieee80211_device *ieee = priv->ieee80211;
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = ieee80211_wx_set_encode_ext(priv->ieee80211, info, wrqu, extra);
{
@@ -790,7 +790,7 @@ static int r8192_wx_set_enc_ext(struct net_device *dev,
end_hw_sec:
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
@@ -801,9 +801,9 @@ static int r8192_wx_set_auth(struct net_device *dev,
int ret = 0;
struct r8192_priv *priv = ieee80211_priv(dev);
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = ieee80211_wx_set_auth(priv->ieee80211, info, &(data->param), extra);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
@@ -815,10 +815,10 @@ static int r8192_wx_set_mlme(struct net_device *dev,
int ret = 0;
struct r8192_priv *priv = ieee80211_priv(dev);
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = ieee80211_wx_set_mlme(priv->ieee80211, info, wrqu, extra);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
}
@@ -829,9 +829,9 @@ static int r8192_wx_set_gen_ie(struct net_device *dev,
int ret = 0;
struct r8192_priv *priv = ieee80211_priv(dev);
- down(&priv->wx_sem);
+ mutex_lock(&priv->wx_mutex);
ret = ieee80211_wx_set_gen_ie(priv->ieee80211, extra, data->data.length);
- up(&priv->wx_sem);
+ mutex_unlock(&priv->wx_mutex);
return ret;
diff --git a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
index 0da559d92..d0ba37789 100644
--- a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
@@ -1256,10 +1256,15 @@ void rtw_cfg80211_indicate_scan_done(struct rtw_wdev_priv *pwdev_priv,
DBG_8723A("%s with scan req\n", __func__);
if (pwdev_priv->scan_request->wiphy !=
- pwdev_priv->rtw_wdev->wiphy)
+ pwdev_priv->rtw_wdev->wiphy) {
DBG_8723A("error wiphy compare\n");
- else
- cfg80211_scan_done(pwdev_priv->scan_request, aborted);
+ } else {
+ struct cfg80211_scan_info info = {
+ .aborted = aborted,
+ };
+
+ cfg80211_scan_done(pwdev_priv->scan_request, &info);
+ }
pwdev_priv->scan_request = NULL;
} else {
diff --git a/drivers/staging/unisys/visorbus/iovmcall_gnuc.h b/drivers/staging/unisys/visorbus/iovmcall_gnuc.h
index b08b6ecc8..98ea7f381 100644
--- a/drivers/staging/unisys/visorbus/iovmcall_gnuc.h
+++ b/drivers/staging/unisys/visorbus/iovmcall_gnuc.h
@@ -22,7 +22,7 @@ __unisys_vmcall_gnuc(unsigned long tuple, unsigned long reg_ebx,
cpuid(0x00000001, &cpuid_eax, &cpuid_ebx, &cpuid_ecx, &cpuid_edx);
if (!(cpuid_ecx & 0x80000000))
- return -1;
+ return -EPERM;
__asm__ __volatile__(".byte 0x00f, 0x001, 0x0c1" : "=a"(result) :
"a"(tuple), "b"(reg_ebx), "c"(reg_ecx));
@@ -40,7 +40,7 @@ __unisys_extended_vmcall_gnuc(unsigned long long tuple,
cpuid(0x00000001, &cpuid_eax, &cpuid_ebx, &cpuid_ecx, &cpuid_edx);
if (!(cpuid_ecx & 0x80000000))
- return -1;
+ return -EPERM;
__asm__ __volatile__(".byte 0x00f, 0x001, 0x0c1" : "=a"(result) :
"a"(tuple), "b"(reg_ebx), "c"(reg_ecx), "d"(reg_edx));
diff --git a/drivers/staging/unisys/visorbus/visorbus_main.c b/drivers/staging/unisys/visorbus/visorbus_main.c
index 3a147dbbd..d32b8980a 100644
--- a/drivers/staging/unisys/visorbus/visorbus_main.c
+++ b/drivers/staging/unisys/visorbus/visorbus_main.c
@@ -876,10 +876,10 @@ write_vbus_chp_info(struct visorchannel *chan,
int off = sizeof(struct channel_header) + hdr_info->chp_info_offset;
if (hdr_info->chp_info_offset == 0)
- return -1;
+ return -EFAULT;
if (visorchannel_write(chan, off, info, sizeof(*info)) < 0)
- return -1;
+ return -EFAULT;
return 0;
}
@@ -895,10 +895,10 @@ write_vbus_bus_info(struct visorchannel *chan,
int off = sizeof(struct channel_header) + hdr_info->bus_info_offset;
if (hdr_info->bus_info_offset == 0)
- return -1;
+ return -EFAULT;
if (visorchannel_write(chan, off, info, sizeof(*info)) < 0)
- return -1;
+ return -EFAULT;
return 0;
}
@@ -915,10 +915,10 @@ write_vbus_dev_info(struct visorchannel *chan,
(hdr_info->device_info_struct_bytes * devix);
if (hdr_info->dev_info_offset == 0)
- return -1;
+ return -EFAULT;
if (visorchannel_write(chan, off, info, sizeof(*info)) < 0)
- return -1;
+ return -EFAULT;
return 0;
}
diff --git a/drivers/staging/unisys/visorbus/visorchipset.c b/drivers/staging/unisys/visorbus/visorchipset.c
index 5ba5936e2..d248c946a 100644
--- a/drivers/staging/unisys/visorbus/visorchipset.c
+++ b/drivers/staging/unisys/visorbus/visorchipset.c
@@ -1613,7 +1613,7 @@ parahotplug_request_complete(int id, u16 active)
}
spin_unlock(&parahotplug_request_list_lock);
- return -1;
+ return -EINVAL;
}
/*
diff --git a/drivers/staging/unisys/visorhba/visorhba_main.c b/drivers/staging/unisys/visorhba/visorhba_main.c
index 6a4570d10..120ba2097 100644
--- a/drivers/staging/unisys/visorhba/visorhba_main.c
+++ b/drivers/staging/unisys/visorhba/visorhba_main.c
@@ -16,6 +16,8 @@
#include <linux/debugfs.h>
#include <linux/skbuff.h>
#include <linux/kthread.h>
+#include <linux/idr.h>
+#include <linux/seq_file.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
@@ -33,7 +35,6 @@
#define MAX_BUF 8192
#define MAX_PENDING_REQUESTS (MIN_NUMSIGNALS * 2)
#define VISORHBA_ERROR_COUNT 30
-#define VISORHBA_OPEN_MAX 1
static int visorhba_queue_command_lck(struct scsi_cmnd *scsicmd,
void (*visorhba_cmnd_done)
@@ -50,14 +51,7 @@ static int visorhba_pause(struct visor_device *dev,
static int visorhba_resume(struct visor_device *dev,
visorbus_state_complete_func complete_func);
-static ssize_t info_debugfs_read(struct file *file, char __user *buf,
- size_t len, loff_t *offset);
-static int set_no_disk_inquiry_result(unsigned char *buf,
- size_t len, bool is_lun0);
static struct dentry *visorhba_debugfs_dir;
-static const struct file_operations debugfs_info_fops = {
- .read = info_debugfs_read,
-};
/* GUIDS for HBA channel type supported by this driver */
static struct visor_channeltype_descriptor visorhba_channel_types[] = {
@@ -99,14 +93,6 @@ struct scsipending {
char cmdtype; /* Type of pointer that is being stored */
};
-/* Work Data for dar_work_queue */
-struct diskaddremove {
- u8 add; /* 0-remove, 1-add */
- struct Scsi_Host *shost; /* Scsi Host for this visorhba instance */
- u32 channel, id, lun; /* Disk Path */
- struct diskaddremove *next;
-};
-
/* Each scsi_host has a host_data area that contains this struct. */
struct visorhba_devdata {
struct Scsi_Host *scsihost;
@@ -133,14 +119,21 @@ struct visorhba_devdata {
int devnum;
struct task_struct *thread;
int thread_wait_ms;
+
+ /*
+ * allows us to pass int handles back-and-forth between us and
+ * iovm, instead of raw pointers
+ */
+ struct idr idr;
+
+ struct dentry *debugfs_dir;
+ struct dentry *debugfs_info;
};
struct visorhba_devices_open {
struct visorhba_devdata *devdata;
};
-static struct visorhba_devices_open visorhbas_open[VISORHBA_OPEN_MAX];
-
#define for_each_vdisk_match(iter, list, match) \
for (iter = &list->head; iter->next; iter = iter->next) \
if ((iter->channel == match->channel) && \
@@ -191,7 +184,7 @@ static void visor_thread_stop(struct task_struct *task)
* Partition so that it can be handled when it completes. If new is
* NULL it is assumed the entry refers only to the cmdrsp.
* Returns insert_location where entry was added,
- * SCSI_MLQUEUE_DEVICE_BUSY if it can't
+ * -EBUSY if it can't
*/
static int add_scsipending_entry(struct visorhba_devdata *devdata,
char cmdtype, void *new)
@@ -206,7 +199,7 @@ static int add_scsipending_entry(struct visorhba_devdata *devdata,
insert_location = (insert_location + 1) % MAX_PENDING_REQUESTS;
if (insert_location == (int)devdata->nextinsert) {
spin_unlock_irqrestore(&devdata->privlock, flags);
- return -1;
+ return -EBUSY;
}
}
@@ -269,6 +262,62 @@ static struct uiscmdrsp *get_scsipending_cmdrsp(struct visorhba_devdata *ddata,
}
/**
+ * simple_idr_get - associate a provided pointer with an int value
+ * 1 <= value <= INT_MAX, and return this int value;
+ * the pointer value can be obtained later by passing
+ * this int value to idr_find()
+ * @idrtable: the data object maintaining the pointer<-->int mappings
+ * @p: the pointer value to be remembered
+ * @lock: a spinlock used when exclusive access to idrtable is needed
+ */
+static unsigned int simple_idr_get(struct idr *idrtable, void *p,
+ spinlock_t *lock)
+{
+ int id;
+ unsigned long flags;
+
+ idr_preload(GFP_KERNEL);
+ spin_lock_irqsave(lock, flags);
+ id = idr_alloc(idrtable, p, 1, INT_MAX, GFP_NOWAIT);
+ spin_unlock_irqrestore(lock, flags);
+ idr_preload_end();
+ if (id < 0)
+ return 0; /* failure */
+ return (unsigned int)(id); /* idr_alloc() guarantees > 0 */
+}
+
+/**
+ * setup_scsitaskmgmt_handles - stash the necessary handles so that the
+ * completion processing logic for a taskmgmt
+ * cmd will be able to find who to wake up
+ * and where to stash the result
+ */
+static void setup_scsitaskmgmt_handles(struct idr *idrtable, spinlock_t *lock,
+ struct uiscmdrsp *cmdrsp,
+ wait_queue_head_t *event, int *result)
+{
+ /* specify the event that has to be triggered when this */
+ /* cmd is complete */
+ cmdrsp->scsitaskmgmt.notify_handle =
+ simple_idr_get(idrtable, event, lock);
+ cmdrsp->scsitaskmgmt.notifyresult_handle =
+ simple_idr_get(idrtable, result, lock);
+}
+
+/**
+ * cleanup_scsitaskmgmt_handles - forget handles created by
+ * setup_scsitaskmgmt_handles()
+ */
+static void cleanup_scsitaskmgmt_handles(struct idr *idrtable,
+ struct uiscmdrsp *cmdrsp)
+{
+ if (cmdrsp->scsitaskmgmt.notify_handle)
+ idr_remove(idrtable, cmdrsp->scsitaskmgmt.notify_handle);
+ if (cmdrsp->scsitaskmgmt.notifyresult_handle)
+ idr_remove(idrtable, cmdrsp->scsitaskmgmt.notifyresult_handle);
+}
+
+/**
* forward_taskmgmt_command - send taskmegmt command to the Service
* Partition
* @tasktype: Type of taskmgmt command
@@ -303,10 +352,8 @@ static int forward_taskmgmt_command(enum task_mgmt_types tasktype,
/* issue TASK_MGMT_ABORT_TASK */
cmdrsp->cmdtype = CMD_SCSITASKMGMT_TYPE;
- /* specify the event that has to be triggered when this */
- /* cmd is complete */
- cmdrsp->scsitaskmgmt.notify_handle = (u64)&notifyevent;
- cmdrsp->scsitaskmgmt.notifyresult_handle = (u64)&notifyresult;
+ setup_scsitaskmgmt_handles(&devdata->idr, &devdata->privlock, cmdrsp,
+ &notifyevent, &notifyresult);
/* save destination */
cmdrsp->scsitaskmgmt.tasktype = tasktype;
@@ -315,6 +362,8 @@ static int forward_taskmgmt_command(enum task_mgmt_types tasktype,
cmdrsp->scsitaskmgmt.vdest.lun = scsidev->lun;
cmdrsp->scsitaskmgmt.handle = scsicmd_id;
+ dev_dbg(&scsidev->sdev_gendev,
+ "visorhba: initiating type=%d taskmgmt command\n", tasktype);
if (!visorchannel_signalinsert(devdata->dev->visorchannel,
IOCHAN_TO_IOPART,
cmdrsp))
@@ -327,17 +376,23 @@ static int forward_taskmgmt_command(enum task_mgmt_types tasktype,
msecs_to_jiffies(45000)))
goto err_del_scsipending_ent;
+ dev_dbg(&scsidev->sdev_gendev,
+ "visorhba: taskmgmt type=%d success; result=0x%x\n",
+ tasktype, notifyresult);
if (tasktype == TASK_MGMT_ABORT_TASK)
scsicmd->result = DID_ABORT << 16;
else
scsicmd->result = DID_RESET << 16;
scsicmd->scsi_done(scsicmd);
-
+ cleanup_scsitaskmgmt_handles(&devdata->idr, cmdrsp);
return SUCCESS;
err_del_scsipending_ent:
+ dev_dbg(&scsidev->sdev_gendev,
+ "visorhba: taskmgmt type=%d not executed\n", tasktype);
del_scsipending_ent(devdata, scsicmd_id);
+ cleanup_scsitaskmgmt_handles(&devdata->idr, cmdrsp);
return FAILED;
}
@@ -606,64 +661,76 @@ static struct scsi_host_template visorhba_driver_template = {
};
/**
- * info_debugfs_read - debugfs interface to dump visorhba states
- * @file: Debug file
- * @buf: buffer to send back to user
- * @len: len that can be written to buf
- * @offset: offset into buf
+ * info_debugfs_show - debugfs interface to dump visorhba states
*
- * Dumps information about the visorhba driver and devices
- * TODO: Make this per vhba
- * Returns bytes_read
+ * This presents a file in the debugfs tree named:
+ * /visorhba/vbus<x>:dev<y>/info
*/
-static ssize_t info_debugfs_read(struct file *file, char __user *buf,
- size_t len, loff_t *offset)
+static int info_debugfs_show(struct seq_file *seq, void *v)
{
- ssize_t bytes_read = 0;
- int str_pos = 0;
- u64 phys_flags_addr;
- int i;
- struct visorhba_devdata *devdata;
- char *vbuf;
+ struct visorhba_devdata *devdata = seq->private;
+
+ seq_printf(seq, "max_buff_len = %u\n", devdata->max_buff_len);
+ seq_printf(seq, "interrupts_rcvd = %llu\n", devdata->interrupts_rcvd);
+ seq_printf(seq, "interrupts_disabled = %llu\n",
+ devdata->interrupts_disabled);
+ seq_printf(seq, "interrupts_notme = %llu\n",
+ devdata->interrupts_notme);
+ seq_printf(seq, "flags_addr = %p\n", devdata->flags_addr);
+ if (devdata->flags_addr) {
+ u64 phys_flags_addr =
+ virt_to_phys((__force void *)devdata->flags_addr);
+ seq_printf(seq, "phys_flags_addr = 0x%016llx\n",
+ phys_flags_addr);
+ seq_printf(seq, "FeatureFlags = %llu\n",
+ (__le64)readq(devdata->flags_addr));
+ }
+ seq_printf(seq, "acquire_failed_cnt = %llu\n",
+ devdata->acquire_failed_cnt);
- if (len > MAX_BUF)
- len = MAX_BUF;
- vbuf = kzalloc(len, GFP_KERNEL);
- if (!vbuf)
- return -ENOMEM;
+ return 0;
+}
+
+static int info_debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, info_debugfs_show, inode->i_private);
+}
- for (i = 0; i < VISORHBA_OPEN_MAX; i++) {
- if (!visorhbas_open[i].devdata)
- continue;
-
- devdata = visorhbas_open[i].devdata;
-
- str_pos += scnprintf(vbuf + str_pos,
- len - str_pos, "max_buff_len:%u\n",
- devdata->max_buff_len);
-
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- "\ninterrupts_rcvd = %llu, interrupts_disabled = %llu\n",
- devdata->interrupts_rcvd,
- devdata->interrupts_disabled);
- str_pos += scnprintf(vbuf + str_pos,
- len - str_pos, "\ninterrupts_notme = %llu,\n",
- devdata->interrupts_notme);
- phys_flags_addr = virt_to_phys((__force void *)
- devdata->flags_addr);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos,
- "flags_addr = %p, phys_flags_addr=0x%016llx, FeatureFlags=%llu\n",
- devdata->flags_addr, phys_flags_addr,
- (__le64)readq(devdata->flags_addr));
- str_pos += scnprintf(vbuf + str_pos,
- len - str_pos, "acquire_failed_cnt:%llu\n",
- devdata->acquire_failed_cnt);
- str_pos += scnprintf(vbuf + str_pos, len - str_pos, "\n");
+static const struct file_operations info_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = info_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/**
+ * complete_taskmgmt_command - complete task management
+ * @cmdrsp: Response from the IOVM
+ *
+ * Service Partition returned the result of the task management
+ * command. Wake up anyone waiting for it.
+ * Returns void
+ */
+static inline void complete_taskmgmt_command
+(struct idr *idrtable, struct uiscmdrsp *cmdrsp, int result)
+{
+ wait_queue_head_t *wq =
+ idr_find(idrtable, cmdrsp->scsitaskmgmt.notify_handle);
+ int *scsi_result_ptr =
+ idr_find(idrtable, cmdrsp->scsitaskmgmt.notifyresult_handle);
+
+ if (unlikely(!(wq && scsi_result_ptr))) {
+ pr_err("visorhba: no completion context; cmd will time out\n");
+ return;
}
- bytes_read = simple_read_from_buffer(buf, len, offset, vbuf, str_pos);
- kfree(vbuf);
- return bytes_read;
+ /* copy the result of the taskmgmt and
+ * wake up the error handler that is waiting for this
+ */
+ pr_debug("visorhba: notifying initiator with result=0x%x\n", result);
+ *scsi_result_ptr = result;
+ wake_up_all(wq);
}
/**
@@ -701,17 +768,8 @@ static void visorhba_serverdown_complete(struct visorhba_devdata *devdata)
break;
case CMD_SCSITASKMGMT_TYPE:
cmdrsp = pendingdel->sent;
- cmdrsp->scsitaskmgmt.notifyresult_handle
- = TASK_MGMT_FAILED;
- wake_up_all((wait_queue_head_t *)
- cmdrsp->scsitaskmgmt.notify_handle);
- break;
- case CMD_VDISKMGMT_TYPE:
- cmdrsp = pendingdel->sent;
- cmdrsp->vdiskmgmt.notifyresult_handle
- = VDISK_MGMT_FAILED;
- wake_up_all((wait_queue_head_t *)
- cmdrsp->vdiskmgmt.notify_handle);
+ complete_taskmgmt_command(&devdata->idr, cmdrsp,
+ TASK_MGMT_FAILED);
break;
default:
break;
@@ -878,89 +936,6 @@ complete_scsi_command(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
scsicmd->scsi_done(scsicmd);
}
-/* DELETE VDISK TASK MGMT COMMANDS */
-static inline void complete_vdiskmgmt_command(struct uiscmdrsp *cmdrsp)
-{
- /* copy the result of the taskmgmt and
- * wake up the error handler that is waiting for this
- */
- cmdrsp->vdiskmgmt.notifyresult_handle = cmdrsp->vdiskmgmt.result;
- wake_up_all((wait_queue_head_t *)cmdrsp->vdiskmgmt.notify_handle);
-}
-
-/**
- * complete_taskmgmt_command - complete task management
- * @cmdrsp: Response from the IOVM
- *
- * Service Partition returned the result of the task management
- * command. Wake up anyone waiting for it.
- * Returns void
- */
-static inline void complete_taskmgmt_command(struct uiscmdrsp *cmdrsp)
-{
- /* copy the result of the taskgmgt and
- * wake up the error handler that is waiting for this
- */
- cmdrsp->vdiskmgmt.notifyresult_handle = cmdrsp->vdiskmgmt.result;
- wake_up_all((wait_queue_head_t *)cmdrsp->scsitaskmgmt.notify_handle);
-}
-
-static struct work_struct dar_work_queue;
-static struct diskaddremove *dar_work_queue_head;
-static spinlock_t dar_work_queue_lock; /* Lock to protet dar_work_queue_head */
-static unsigned short dar_work_queue_sched;
-
-/**
- * queue_disk_add_remove - IOSP has sent us a add/remove request
- * @dar: disk add/remove request
- *
- * Queue the work needed to add/remove a disk.
- * Returns void
- */
-static inline void queue_disk_add_remove(struct diskaddremove *dar)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dar_work_queue_lock, flags);
- if (!dar_work_queue_head) {
- dar_work_queue_head = dar;
- dar->next = NULL;
- } else {
- dar->next = dar_work_queue_head;
- dar_work_queue_head = dar;
- }
- if (!dar_work_queue_sched) {
- schedule_work(&dar_work_queue);
- dar_work_queue_sched = 1;
- }
- spin_unlock_irqrestore(&dar_work_queue_lock, flags);
-}
-
-/**
- * process_disk_notify - IOSP has sent a process disk notify event
- * @shost: Scsi hot
- * @cmdrsp: Response from the IOSP
- *
- * Queue it to the work queue.
- * Return void.
- */
-static void process_disk_notify(struct Scsi_Host *shost,
- struct uiscmdrsp *cmdrsp)
-{
- struct diskaddremove *dar;
-
- dar = kzalloc(sizeof(*dar), GFP_ATOMIC);
- if (!dar)
- return;
-
- dar->add = cmdrsp->disknotify.add;
- dar->shost = shost;
- dar->channel = cmdrsp->disknotify.channel;
- dar->id = cmdrsp->disknotify.id;
- dar->lun = cmdrsp->disknotify.lun;
- queue_disk_add_remove(dar);
-}
-
/**
* drain_queue - pull responses out of iochannel
* @cmdrsp: Response from the IOSP
@@ -973,7 +948,6 @@ static void
drain_queue(struct uiscmdrsp *cmdrsp, struct visorhba_devdata *devdata)
{
struct scsi_cmnd *scsicmd;
- struct Scsi_Host *shost = devdata->scsihost;
while (1) {
if (!visorchannel_signalremove(devdata->dev->visorchannel,
@@ -995,21 +969,12 @@ drain_queue(struct uiscmdrsp *cmdrsp, struct visorhba_devdata *devdata)
if (!del_scsipending_ent(devdata,
cmdrsp->scsitaskmgmt.handle))
break;
- complete_taskmgmt_command(cmdrsp);
- } else if (cmdrsp->cmdtype == CMD_NOTIFYGUEST_TYPE) {
- /* The vHba pointer has no meaning in a
- * guest partition. Let's be safe and set it
- * to NULL now. Do not use it here!
- */
- cmdrsp->disknotify.v_hba = NULL;
- process_disk_notify(shost, cmdrsp);
- } else if (cmdrsp->cmdtype == CMD_VDISKMGMT_TYPE) {
- if (!del_scsipending_ent(devdata,
- cmdrsp->vdiskmgmt.handle))
- break;
- complete_vdiskmgmt_command(cmdrsp);
- }
- /* cmdrsp is now available for resuse */
+ complete_taskmgmt_command(&devdata->idr, cmdrsp,
+ cmdrsp->scsitaskmgmt.result);
+ } else if (cmdrsp->cmdtype == CMD_NOTIFYGUEST_TYPE)
+ dev_err_once(&devdata->dev->device,
+ "ignoring unsupported NOTIFYGUEST\n");
+ /* cmdrsp is now available for re-use */
}
}
@@ -1107,7 +1072,7 @@ static int visorhba_probe(struct visor_device *dev)
struct Scsi_Host *scsihost;
struct vhba_config_max max;
struct visorhba_devdata *devdata = NULL;
- int i, err, channel_offset;
+ int err, channel_offset;
u64 features;
scsihost = scsi_host_alloc(&visorhba_driver_template,
@@ -1122,9 +1087,9 @@ static int visorhba_probe(struct visor_device *dev)
if (err < 0)
goto err_scsi_host_put;
- scsihost->max_id = (unsigned)max.max_id;
- scsihost->max_lun = (unsigned)max.max_lun;
- scsihost->cmd_per_lun = (unsigned)max.cmd_per_lun;
+ scsihost->max_id = (unsigned int)max.max_id;
+ scsihost->max_lun = (unsigned int)max.max_lun;
+ scsihost->cmd_per_lun = (unsigned int)max.cmd_per_lun;
scsihost->max_sectors =
(unsigned short)(max.max_io_size >> 9);
scsihost->sg_tablesize =
@@ -1136,16 +1101,24 @@ static int visorhba_probe(struct visor_device *dev)
goto err_scsi_host_put;
devdata = (struct visorhba_devdata *)scsihost->hostdata;
- for (i = 0; i < VISORHBA_OPEN_MAX; i++) {
- if (!visorhbas_open[i].devdata) {
- visorhbas_open[i].devdata = devdata;
- break;
- }
- }
-
devdata->dev = dev;
dev_set_drvdata(&dev->device, devdata);
+ devdata->debugfs_dir = debugfs_create_dir(dev_name(&dev->device),
+ visorhba_debugfs_dir);
+ if (!devdata->debugfs_dir) {
+ err = -ENOMEM;
+ goto err_scsi_remove_host;
+ }
+ devdata->debugfs_info =
+ debugfs_create_file("info", S_IRUSR | S_IRGRP,
+ devdata->debugfs_dir, devdata,
+ &info_debugfs_fops);
+ if (!devdata->debugfs_info) {
+ err = -ENOMEM;
+ goto err_debugfs_dir;
+ }
+
init_waitqueue_head(&devdata->rsp_queue);
spin_lock_init(&devdata->privlock);
devdata->serverdown = false;
@@ -1156,11 +1129,13 @@ static int visorhba_probe(struct visor_device *dev)
channel_header.features);
err = visorbus_read_channel(dev, channel_offset, &features, 8);
if (err)
- goto err_scsi_remove_host;
+ goto err_debugfs_info;
features |= ULTRA_IO_CHANNEL_IS_POLLING;
err = visorbus_write_channel(dev, channel_offset, &features, 8);
if (err)
- goto err_scsi_remove_host;
+ goto err_debugfs_info;
+
+ idr_init(&devdata->idr);
devdata->thread_wait_ms = 2;
devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
@@ -1170,6 +1145,12 @@ static int visorhba_probe(struct visor_device *dev)
return 0;
+err_debugfs_info:
+ debugfs_remove(devdata->debugfs_info);
+
+err_debugfs_dir:
+ debugfs_remove_recursive(devdata->debugfs_dir);
+
err_scsi_remove_host:
scsi_remove_host(scsihost);
@@ -1198,7 +1179,11 @@ static void visorhba_remove(struct visor_device *dev)
scsi_remove_host(scsihost);
scsi_host_put(scsihost);
+ idr_destroy(&devdata->idr);
+
dev_set_drvdata(&dev->device, NULL);
+ debugfs_remove(devdata->debugfs_info);
+ debugfs_remove_recursive(devdata->debugfs_dir);
}
/**
@@ -1209,26 +1194,17 @@ static void visorhba_remove(struct visor_device *dev)
*/
static int visorhba_init(void)
{
- struct dentry *ret;
int rc = -ENOMEM;
visorhba_debugfs_dir = debugfs_create_dir("visorhba", NULL);
if (!visorhba_debugfs_dir)
return -ENOMEM;
- ret = debugfs_create_file("info", S_IRUSR, visorhba_debugfs_dir, NULL,
- &debugfs_info_fops);
-
- if (!ret) {
- rc = -EIO;
- goto cleanup_debugfs;
- }
-
rc = visorbus_register_visor_driver(&visorhba_driver);
if (rc)
goto cleanup_debugfs;
- return rc;
+ return 0;
cleanup_debugfs:
debugfs_remove_recursive(visorhba_debugfs_dir);
diff --git a/drivers/staging/unisys/visorinput/visorinput.c b/drivers/staging/unisys/visorinput/visorinput.c
index 12a357078..d67cd7632 100644
--- a/drivers/staging/unisys/visorinput/visorinput.c
+++ b/drivers/staging/unisys/visorinput/visorinput.c
@@ -506,7 +506,7 @@ calc_button(int x)
case 3:
return BTN_RIGHT;
default:
- return -1;
+ return -EINVAL;
}
}
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index fd7c9a6cb..a28388d3d 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -1000,25 +1000,28 @@ visornic_set_multi(struct net_device *netdev)
struct uiscmdrsp *cmdrsp;
struct visornic_devdata *devdata = netdev_priv(netdev);
- /* any filtering changes */
- if (devdata->old_flags != netdev->flags) {
- if ((netdev->flags & IFF_PROMISC) !=
- (devdata->old_flags & IFF_PROMISC)) {
- cmdrsp = kmalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
- if (!cmdrsp)
- return;
- cmdrsp->cmdtype = CMD_NET_TYPE;
- cmdrsp->net.type = NET_RCV_PROMISC;
- cmdrsp->net.enbdis.context = netdev;
- cmdrsp->net.enbdis.enable =
- netdev->flags & IFF_PROMISC;
- visorchannel_signalinsert(devdata->dev->visorchannel,
- IOCHAN_TO_IOPART,
- cmdrsp);
- kfree(cmdrsp);
- }
- devdata->old_flags = netdev->flags;
- }
+ if (devdata->old_flags == netdev->flags)
+ return;
+
+ if ((netdev->flags & IFF_PROMISC) ==
+ (devdata->old_flags & IFF_PROMISC))
+ goto out_save_flags;
+
+ cmdrsp = kmalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
+ if (!cmdrsp)
+ return;
+ cmdrsp->cmdtype = CMD_NET_TYPE;
+ cmdrsp->net.type = NET_RCV_PROMISC;
+ cmdrsp->net.enbdis.context = netdev;
+ cmdrsp->net.enbdis.enable =
+ netdev->flags & IFF_PROMISC;
+ visorchannel_signalinsert(devdata->dev->visorchannel,
+ IOCHAN_TO_IOPART,
+ cmdrsp);
+ kfree(cmdrsp);
+
+out_save_flags:
+ devdata->old_flags = netdev->flags;
}
/**
@@ -1134,7 +1137,7 @@ repost_return(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata,
*
* Got a receive packet back from the IO Part, handle it and send
* it up the stack.
- * Returns void
+ * Returns 1 iff an skb was receieved, otherwise 0
*/
static int
visornic_rx(struct uiscmdrsp *cmdrsp)
@@ -1145,7 +1148,6 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
int cc, currsize, off;
struct ethhdr *eth;
unsigned long flags;
- int rx_count = 0;
/* post new rcv buf to the other end using the cmdrsp we have at hand
* post it without holding lock - but we'll use the signal lock to
@@ -1177,7 +1179,7 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
*/
spin_unlock_irqrestore(&devdata->priv_lock, flags);
repost_return(cmdrsp, devdata, skb, netdev);
- return rx_count;
+ return 0;
}
spin_unlock_irqrestore(&devdata->priv_lock, flags);
@@ -1196,7 +1198,7 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
dev_err(&devdata->netdev->dev,
"repost_return failed");
- return rx_count;
+ return 0;
}
/* length rcvd is greater than firstfrag in this skb rcv buf */
skb->tail += RCVPOST_BUF_SIZE; /* amount in skb->data */
@@ -1212,7 +1214,7 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
dev_err(&devdata->netdev->dev,
"repost_return failed");
- return rx_count;
+ return 0;
}
skb->tail += skb->len;
skb->data_len = 0; /* nothing rcvd in frag_list */
@@ -1231,7 +1233,7 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
if (cmdrsp->net.rcv.rcvbuf[0] != skb) {
if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
dev_err(&devdata->netdev->dev, "repost_return failed");
- return rx_count;
+ return 0;
}
if (cmdrsp->net.rcv.numrcvbufs > 1) {
@@ -1313,10 +1315,9 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
/* drop packet - don't forward it up to OS */
devdata->n_rcv_packets_not_accepted++;
repost_return(cmdrsp, devdata, skb, netdev);
- return rx_count;
+ return 0;
} while (0);
- rx_count++;
netif_receive_skb(skb);
/* netif_rx returns various values, but "in practice most drivers
* ignore the return value
@@ -1329,7 +1330,7 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
* new rcv buffer.
*/
repost_return(cmdrsp, devdata, skb, netdev);
- return rx_count;
+ return 1;
}
/**
@@ -1339,13 +1340,11 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
*
* Setup initial values for the visornic based on channel and default
* values.
- * Returns a pointer to the devdata if successful, else NULL
+ * Returns a pointer to the devdata structure
*/
static struct visornic_devdata *
devdata_initialize(struct visornic_devdata *devdata, struct visor_device *dev)
{
- if (!devdata)
- return NULL;
devdata->dev = dev;
devdata->incarnation_id = get_jiffies_64();
return devdata;
@@ -1793,7 +1792,7 @@ static int visornic_probe(struct visor_device *dev)
sizeof(struct sk_buff *), GFP_KERNEL);
if (!devdata->rcvbuf) {
err = -ENOMEM;
- goto cleanup_rcvbuf;
+ goto cleanup_netdev;
}
/* set the net_xmit outstanding threshold */
@@ -1814,12 +1813,12 @@ static int visornic_probe(struct visor_device *dev)
devdata->cmdrsp_rcv = kmalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
if (!devdata->cmdrsp_rcv) {
err = -ENOMEM;
- goto cleanup_cmdrsp_rcv;
+ goto cleanup_rcvbuf;
}
devdata->xmit_cmdrsp = kmalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
if (!devdata->xmit_cmdrsp) {
err = -ENOMEM;
- goto cleanup_xmit_cmdrsp;
+ goto cleanup_cmdrsp_rcv;
}
INIT_WORK(&devdata->timeout_reset, visornic_timeout_reset);
devdata->server_down = false;
@@ -2088,8 +2087,10 @@ static int visornic_init(void)
goto cleanup_debugfs;
err = visorbus_register_visor_driver(&visornic_driver);
- if (!err)
- return 0;
+ if (err)
+ goto cleanup_debugfs;
+
+ return 0;
cleanup_debugfs:
debugfs_remove_recursive(visornic_debugfs_dir);
diff --git a/drivers/staging/wilc1000/Makefile b/drivers/staging/wilc1000/Makefile
index cff27c598..2680fefa6 100644
--- a/drivers/staging/wilc1000/Makefile
+++ b/drivers/staging/wilc1000/Makefile
@@ -6,7 +6,6 @@ ccflags-y += -DFIRMWARE_1002="\"/*(DEBLOBBED)*/\"" \
ccflags-y += -I$(src)/ -DWILC_ASIC_A0 -DWILC_DEBUGFS
wilc1000-objs := wilc_wfi_cfgoperations.o linux_wlan.o linux_mon.o \
- wilc_msgqueue.o \
coreconfigurator.o host_interface.o \
wilc_wlan_cfg.o wilc_debugfs.o \
wilc_wlan.o
diff --git a/drivers/staging/wilc1000/TODO b/drivers/staging/wilc1000/TODO
index 95199d80a..ec93b2ee0 100644
--- a/drivers/staging/wilc1000/TODO
+++ b/drivers/staging/wilc1000/TODO
@@ -4,6 +4,11 @@ TODO:
- remove custom debug and tracing functions
- rework comments and function headers(also coding style)
- replace all semaphores with mutexes or completions
+- Move handling for each individual members of 'union message_body' out
+ into a separate 'struct work_struct' and completely remove the multiplexer
+ that is currently part of host_if_work(), allowing movement of the
+ implementation of each message handler into the callsite of the function
+ that currently queues the 'host_if_msg'.
- make spi and sdio components coexist in one build
- turn compile-time platform configuration (BEAGLE_BOARD,
PANDA_BOARD, PLAT_WMS8304, PLAT_RKXXXX, CUSTOMER_PLATFORM, ...)
diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c
index 953584248..78f524fcd 100644
--- a/drivers/staging/wilc1000/host_interface.c
+++ b/drivers/staging/wilc1000/host_interface.c
@@ -3,11 +3,14 @@
#include <linux/kthread.h>
#include <linux/delay.h>
#include <linux/completion.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
#include "host_interface.h"
+#include <linux/spinlock.h>
+#include <linux/errno.h>
#include "coreconfigurator.h"
#include "wilc_wlan.h"
#include "wilc_wlan_if.h"
-#include "wilc_msgqueue.h"
#include <linux/etherdevice.h>
#include "wilc_wfi_netdevice.h"
@@ -181,7 +184,6 @@ union message_body {
struct drv_handler drv;
struct set_multicast multicast_info;
struct op_mode mode;
- struct set_mac_addr set_mac_info;
struct get_mac_addr get_mac_info;
struct ba_session_info session_info;
struct remain_ch remain_on_ch;
@@ -195,6 +197,7 @@ struct host_if_msg {
u16 id;
union message_body body;
struct wilc_vif *vif;
+ struct work_struct work;
};
struct join_bss_param {
@@ -229,8 +232,7 @@ struct join_bss_param {
static struct host_if_drv *terminated_handle;
bool wilc_optaining_ip;
static u8 P2P_LISTEN_STATE;
-static struct task_struct *hif_thread_handler;
-static struct message_queue hif_msg_q;
+static struct workqueue_struct *hif_workqueue;
static struct completion hif_thread_comp;
static struct completion hif_driver_comp;
static struct completion hif_wait_response;
@@ -264,6 +266,27 @@ static struct wilc_vif *join_req_vif;
static void *host_int_ParseJoinBssParam(struct network_info *ptstrNetworkInfo);
static int host_int_get_ipaddress(struct wilc_vif *vif, u8 *ip_addr, u8 idx);
static s32 Handle_ScanDone(struct wilc_vif *vif, enum scan_event enuEvent);
+static void host_if_work(struct work_struct *work);
+
+/*!
+ * @author syounan
+ * @date 1 Sep 2010
+ * @note copied from FLO glue implementatuion
+ * @version 1.0
+ */
+static int wilc_enqueue_cmd(struct host_if_msg *msg)
+{
+ struct host_if_msg *new_msg;
+
+ new_msg = kmemdup(msg, sizeof(*new_msg), GFP_ATOMIC);
+ if (!new_msg)
+ return -ENOMEM;
+
+ INIT_WORK(&new_msg->work, host_if_work);
+ queue_work(hif_workqueue, &new_msg->work);
+ return 0;
+}
+
/* The u8IfIdx starts from 0 to NUM_CONCURRENT_IFC -1, but 0 index used as
* special purpose in wilc device, so we add 1 to the index to starts from 1.
@@ -417,10 +440,10 @@ static void handle_get_mac_address(struct wilc_vif *vif,
complete(&hif_wait_response);
}
-static s32 handle_cfg_param(struct wilc_vif *vif,
- struct cfg_param_attr *cfg_param_attr)
+static void handle_cfg_param(struct wilc_vif *vif,
+ struct cfg_param_attr *cfg_param_attr)
{
- s32 result = 0;
+ int ret = 0;
struct wid wid_list[32];
struct host_if_drv *hif_drv = vif->hif_drv;
int i = 0;
@@ -428,15 +451,16 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
mutex_lock(&hif_drv->cfg_values_lock);
if (cfg_param_attr->flag & BSS_TYPE) {
- if (cfg_param_attr->bss_type < 6) {
+ u8 bss_type = cfg_param_attr->bss_type;
+
+ if (bss_type < 6) {
wid_list[i].id = WID_BSS_TYPE;
- wid_list[i].val = (s8 *)&cfg_param_attr->bss_type;
+ wid_list[i].val = (s8 *)&bss_type;
wid_list[i].type = WID_CHAR;
wid_list[i].size = sizeof(char);
- hif_drv->cfg_values.bss_type = (u8)cfg_param_attr->bss_type;
+ hif_drv->cfg_values.bss_type = bss_type;
} else {
netdev_err(vif->ndev, "check value 6 over\n");
- result = -EINVAL;
goto unlock;
}
i++;
@@ -452,7 +476,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
hif_drv->cfg_values.auth_type = (u8)cfg_param_attr->auth_type;
} else {
netdev_err(vif->ndev, "Impossible value\n");
- result = -EINVAL;
goto unlock;
}
i++;
@@ -467,7 +490,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
hif_drv->cfg_values.auth_timeout = cfg_param_attr->auth_timeout;
} else {
netdev_err(vif->ndev, "Range(1 ~ 65535) over\n");
- result = -EINVAL;
goto unlock;
}
i++;
@@ -481,7 +503,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
hif_drv->cfg_values.power_mgmt_mode = (u8)cfg_param_attr->power_mgmt_mode;
} else {
netdev_err(vif->ndev, "Invalid power mode\n");
- result = -EINVAL;
goto unlock;
}
i++;
@@ -496,7 +517,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
hif_drv->cfg_values.short_retry_limit = cfg_param_attr->short_retry_limit;
} else {
netdev_err(vif->ndev, "Range(1~256) over\n");
- result = -EINVAL;
goto unlock;
}
i++;
@@ -511,7 +531,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
hif_drv->cfg_values.long_retry_limit = cfg_param_attr->long_retry_limit;
} else {
netdev_err(vif->ndev, "Range(1~256) over\n");
- result = -EINVAL;
goto unlock;
}
i++;
@@ -526,7 +545,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
hif_drv->cfg_values.frag_threshold = cfg_param_attr->frag_threshold;
} else {
netdev_err(vif->ndev, "Threshold Range fail\n");
- result = -EINVAL;
goto unlock;
}
i++;
@@ -541,7 +559,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
hif_drv->cfg_values.rts_threshold = cfg_param_attr->rts_threshold;
} else {
netdev_err(vif->ndev, "Threshold Range fail\n");
- result = -EINVAL;
goto unlock;
}
i++;
@@ -555,7 +572,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
hif_drv->cfg_values.preamble_type = cfg_param_attr->preamble_type;
} else {
netdev_err(vif->ndev, "Preamle Range(0~2) over\n");
- result = -EINVAL;
goto unlock;
}
i++;
@@ -569,7 +585,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
hif_drv->cfg_values.short_slot_allowed = (u8)cfg_param_attr->short_slot_allowed;
} else {
netdev_err(vif->ndev, "Short slot(2) over\n");
- result = -EINVAL;
goto unlock;
}
i++;
@@ -583,7 +598,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
hif_drv->cfg_values.txop_prot_disabled = (u8)cfg_param_attr->txop_prot_disabled;
} else {
netdev_err(vif->ndev, "TXOP prot disable\n");
- result = -EINVAL;
goto unlock;
}
i++;
@@ -598,7 +612,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
hif_drv->cfg_values.beacon_interval = cfg_param_attr->beacon_interval;
} else {
netdev_err(vif->ndev, "Beacon interval(1~65535)fail\n");
- result = -EINVAL;
goto unlock;
}
i++;
@@ -613,7 +626,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
hif_drv->cfg_values.dtim_period = cfg_param_attr->dtim_period;
} else {
netdev_err(vif->ndev, "DTIM range(1~255) fail\n");
- result = -EINVAL;
goto unlock;
}
i++;
@@ -627,7 +639,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
hif_drv->cfg_values.site_survey_enabled = (u8)cfg_param_attr->site_survey_enabled;
} else {
netdev_err(vif->ndev, "Site survey disable\n");
- result = -EINVAL;
goto unlock;
}
i++;
@@ -642,7 +653,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
hif_drv->cfg_values.site_survey_scan_time = cfg_param_attr->site_survey_scan_time;
} else {
netdev_err(vif->ndev, "Site scan time(1~65535) over\n");
- result = -EINVAL;
goto unlock;
}
i++;
@@ -657,7 +667,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
hif_drv->cfg_values.active_scan_time = cfg_param_attr->active_scan_time;
} else {
netdev_err(vif->ndev, "Active time(1~65535) over\n");
- result = -EINVAL;
goto unlock;
}
i++;
@@ -672,7 +681,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
hif_drv->cfg_values.passive_scan_time = cfg_param_attr->passive_scan_time;
} else {
netdev_err(vif->ndev, "Passive time(1~65535) over\n");
- result = -EINVAL;
goto unlock;
}
i++;
@@ -694,21 +702,19 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
hif_drv->cfg_values.curr_tx_rate = (u8)curr_tx_rate;
} else {
netdev_err(vif->ndev, "out of TX rate\n");
- result = -EINVAL;
goto unlock;
}
i++;
}
- result = wilc_send_config_pkt(vif, SET_CFG, wid_list,
- i, wilc_get_vif_idx(vif));
+ ret = wilc_send_config_pkt(vif, SET_CFG, wid_list,
+ i, wilc_get_vif_idx(vif));
- if (result)
+ if (ret)
netdev_err(vif->ndev, "Error in setting CFG params\n");
unlock:
mutex_unlock(&hif_drv->cfg_values_lock);
- return result;
}
static s32 handle_scan(struct wilc_vif *vif, struct scan_attr *scan_info)
@@ -1231,17 +1237,14 @@ static s32 Handle_RcvdNtwrkInfo(struct wilc_vif *vif,
}
for (i = 0; i < hif_drv->usr_scan_req.rcvd_ch_cnt; i++) {
- if ((hif_drv->usr_scan_req.net_info[i].bssid) &&
- (pstrNetworkInfo->bssid)) {
- if (memcmp(hif_drv->usr_scan_req.net_info[i].bssid,
- pstrNetworkInfo->bssid, 6) == 0) {
- if (pstrNetworkInfo->rssi <= hif_drv->usr_scan_req.net_info[i].rssi) {
- goto done;
- } else {
- hif_drv->usr_scan_req.net_info[i].rssi = pstrNetworkInfo->rssi;
- bNewNtwrkFound = false;
- break;
- }
+ if (memcmp(hif_drv->usr_scan_req.net_info[i].bssid,
+ pstrNetworkInfo->bssid, 6) == 0) {
+ if (pstrNetworkInfo->rssi <= hif_drv->usr_scan_req.net_info[i].rssi) {
+ goto done;
+ } else {
+ hif_drv->usr_scan_req.net_info[i].rssi = pstrNetworkInfo->rssi;
+ bNewNtwrkFound = false;
+ break;
}
}
}
@@ -1250,20 +1253,17 @@ static s32 Handle_RcvdNtwrkInfo(struct wilc_vif *vif,
if (hif_drv->usr_scan_req.rcvd_ch_cnt < MAX_NUM_SCANNED_NETWORKS) {
hif_drv->usr_scan_req.net_info[hif_drv->usr_scan_req.rcvd_ch_cnt].rssi = pstrNetworkInfo->rssi;
- if (hif_drv->usr_scan_req.net_info[hif_drv->usr_scan_req.rcvd_ch_cnt].bssid &&
- pstrNetworkInfo->bssid) {
- memcpy(hif_drv->usr_scan_req.net_info[hif_drv->usr_scan_req.rcvd_ch_cnt].bssid,
- pstrNetworkInfo->bssid, 6);
+ memcpy(hif_drv->usr_scan_req.net_info[hif_drv->usr_scan_req.rcvd_ch_cnt].bssid,
+ pstrNetworkInfo->bssid, 6);
- hif_drv->usr_scan_req.rcvd_ch_cnt++;
+ hif_drv->usr_scan_req.rcvd_ch_cnt++;
- pstrNetworkInfo->new_network = true;
- pJoinParams = host_int_ParseJoinBssParam(pstrNetworkInfo);
+ pstrNetworkInfo->new_network = true;
+ pJoinParams = host_int_ParseJoinBssParam(pstrNetworkInfo);
- hif_drv->usr_scan_req.scan_result(SCAN_EVENT_NETWORK_FOUND, pstrNetworkInfo,
- hif_drv->usr_scan_req.arg,
- pJoinParams);
- }
+ hif_drv->usr_scan_req.scan_result(SCAN_EVENT_NETWORK_FOUND, pstrNetworkInfo,
+ hif_drv->usr_scan_req.arg,
+ pJoinParams);
}
} else {
pstrNetworkInfo->new_network = false;
@@ -2364,7 +2364,7 @@ static void ListenTimerCB(unsigned long arg)
msg.vif = vif;
msg.body.remain_on_ch.id = vif->hif_drv->remain_on_ch.id;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "wilc_mq_send fail\n");
}
@@ -2464,187 +2464,171 @@ static void handle_get_tx_pwr(struct wilc_vif *vif, u8 *tx_pwr)
complete(&hif_wait_response);
}
-static int hostIFthread(void *pvArg)
+static void host_if_work(struct work_struct *work)
{
- u32 u32Ret;
- struct host_if_msg msg;
- struct wilc *wilc = pvArg;
- struct wilc_vif *vif;
-
- memset(&msg, 0, sizeof(struct host_if_msg));
-
- while (1) {
- wilc_mq_recv(&hif_msg_q, &msg, sizeof(struct host_if_msg), &u32Ret);
- vif = msg.vif;
- if (msg.id == HOST_IF_MSG_EXIT)
- break;
-
- if ((!wilc_initialized)) {
- usleep_range(200 * 1000, 200 * 1000);
- wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
- continue;
- }
-
- if (msg.id == HOST_IF_MSG_CONNECT &&
- vif->hif_drv->usr_scan_req.scan_result) {
- wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
- usleep_range(2 * 1000, 2 * 1000);
- continue;
- }
+ struct host_if_msg *msg;
+ struct wilc *wilc;
- switch (msg.id) {
- case HOST_IF_MSG_SCAN:
- handle_scan(msg.vif, &msg.body.scan_info);
- break;
+ msg = container_of(work, struct host_if_msg, work);
+ wilc = msg->vif->wilc;
- case HOST_IF_MSG_CONNECT:
- Handle_Connect(msg.vif, &msg.body.con_info);
- break;
+ if (msg->id == HOST_IF_MSG_CONNECT &&
+ msg->vif->hif_drv->usr_scan_req.scan_result) {
+ wilc_enqueue_cmd(msg);
+ usleep_range(2 * 1000, 2 * 1000);
+ goto free_msg;
+ }
+ switch (msg->id) {
+ case HOST_IF_MSG_SCAN:
+ handle_scan(msg->vif, &msg->body.scan_info);
+ break;
- case HOST_IF_MSG_RCVD_NTWRK_INFO:
- Handle_RcvdNtwrkInfo(msg.vif, &msg.body.net_info);
- break;
+ case HOST_IF_MSG_CONNECT:
+ Handle_Connect(msg->vif, &msg->body.con_info);
+ break;
- case HOST_IF_MSG_RCVD_GNRL_ASYNC_INFO:
- Handle_RcvdGnrlAsyncInfo(vif,
- &msg.body.async_info);
- break;
+ case HOST_IF_MSG_RCVD_NTWRK_INFO:
+ Handle_RcvdNtwrkInfo(msg->vif, &msg->body.net_info);
+ break;
- case HOST_IF_MSG_KEY:
- Handle_Key(msg.vif, &msg.body.key_info);
- break;
+ case HOST_IF_MSG_RCVD_GNRL_ASYNC_INFO:
+ Handle_RcvdGnrlAsyncInfo(msg->vif,
+ &msg->body.async_info);
+ break;
- case HOST_IF_MSG_CFG_PARAMS:
- handle_cfg_param(msg.vif, &msg.body.cfg_info);
- break;
+ case HOST_IF_MSG_KEY:
+ Handle_Key(msg->vif, &msg->body.key_info);
+ break;
- case HOST_IF_MSG_SET_CHANNEL:
- handle_set_channel(msg.vif, &msg.body.channel_info);
- break;
+ case HOST_IF_MSG_CFG_PARAMS:
+ handle_cfg_param(msg->vif, &msg->body.cfg_info);
+ break;
- case HOST_IF_MSG_DISCONNECT:
- Handle_Disconnect(msg.vif);
- break;
+ case HOST_IF_MSG_SET_CHANNEL:
+ handle_set_channel(msg->vif, &msg->body.channel_info);
+ break;
- case HOST_IF_MSG_RCVD_SCAN_COMPLETE:
- del_timer(&vif->hif_drv->scan_timer);
+ case HOST_IF_MSG_DISCONNECT:
+ Handle_Disconnect(msg->vif);
+ break;
- if (!wilc_wlan_get_num_conn_ifcs(wilc))
- wilc_chip_sleep_manually(wilc);
+ case HOST_IF_MSG_RCVD_SCAN_COMPLETE:
+ del_timer(&msg->vif->hif_drv->scan_timer);
- Handle_ScanDone(msg.vif, SCAN_EVENT_DONE);
+ if (!wilc_wlan_get_num_conn_ifcs(wilc))
+ wilc_chip_sleep_manually(wilc);
- if (vif->hif_drv->remain_on_ch_pending)
- Handle_RemainOnChan(msg.vif,
- &msg.body.remain_on_ch);
+ Handle_ScanDone(msg->vif, SCAN_EVENT_DONE);
- break;
+ if (msg->vif->hif_drv->remain_on_ch_pending)
+ Handle_RemainOnChan(msg->vif,
+ &msg->body.remain_on_ch);
- case HOST_IF_MSG_GET_RSSI:
- Handle_GetRssi(msg.vif);
- break;
+ break;
- case HOST_IF_MSG_GET_STATISTICS:
- Handle_GetStatistics(msg.vif,
- (struct rf_info *)msg.body.data);
- break;
+ case HOST_IF_MSG_GET_RSSI:
+ Handle_GetRssi(msg->vif);
+ break;
- case HOST_IF_MSG_ADD_BEACON:
- Handle_AddBeacon(msg.vif, &msg.body.beacon_info);
- break;
+ case HOST_IF_MSG_GET_STATISTICS:
+ Handle_GetStatistics(msg->vif,
+ (struct rf_info *)msg->body.data);
+ break;
- case HOST_IF_MSG_DEL_BEACON:
- Handle_DelBeacon(msg.vif);
- break;
+ case HOST_IF_MSG_ADD_BEACON:
+ Handle_AddBeacon(msg->vif, &msg->body.beacon_info);
+ break;
- case HOST_IF_MSG_ADD_STATION:
- Handle_AddStation(msg.vif, &msg.body.add_sta_info);
- break;
+ case HOST_IF_MSG_DEL_BEACON:
+ Handle_DelBeacon(msg->vif);
+ break;
- case HOST_IF_MSG_DEL_STATION:
- Handle_DelStation(msg.vif, &msg.body.del_sta_info);
- break;
+ case HOST_IF_MSG_ADD_STATION:
+ Handle_AddStation(msg->vif, &msg->body.add_sta_info);
+ break;
- case HOST_IF_MSG_EDIT_STATION:
- Handle_EditStation(msg.vif, &msg.body.edit_sta_info);
- break;
+ case HOST_IF_MSG_DEL_STATION:
+ Handle_DelStation(msg->vif, &msg->body.del_sta_info);
+ break;
- case HOST_IF_MSG_GET_INACTIVETIME:
- Handle_Get_InActiveTime(msg.vif, &msg.body.mac_info);
- break;
+ case HOST_IF_MSG_EDIT_STATION:
+ Handle_EditStation(msg->vif, &msg->body.edit_sta_info);
+ break;
- case HOST_IF_MSG_SCAN_TIMER_FIRED:
+ case HOST_IF_MSG_GET_INACTIVETIME:
+ Handle_Get_InActiveTime(msg->vif, &msg->body.mac_info);
+ break;
- Handle_ScanDone(msg.vif, SCAN_EVENT_ABORTED);
- break;
+ case HOST_IF_MSG_SCAN_TIMER_FIRED:
+ Handle_ScanDone(msg->vif, SCAN_EVENT_ABORTED);
+ break;
- case HOST_IF_MSG_CONNECT_TIMER_FIRED:
- Handle_ConnectTimeout(msg.vif);
- break;
+ case HOST_IF_MSG_CONNECT_TIMER_FIRED:
+ Handle_ConnectTimeout(msg->vif);
+ break;
- case HOST_IF_MSG_POWER_MGMT:
- Handle_PowerManagement(msg.vif,
- &msg.body.pwr_mgmt_info);
- break;
+ case HOST_IF_MSG_POWER_MGMT:
+ Handle_PowerManagement(msg->vif,
+ &msg->body.pwr_mgmt_info);
+ break;
- case HOST_IF_MSG_SET_WFIDRV_HANDLER:
- handle_set_wfi_drv_handler(msg.vif, &msg.body.drv);
- break;
+ case HOST_IF_MSG_SET_WFIDRV_HANDLER:
+ handle_set_wfi_drv_handler(msg->vif, &msg->body.drv);
+ break;
- case HOST_IF_MSG_SET_OPERATION_MODE:
- handle_set_operation_mode(msg.vif, &msg.body.mode);
- break;
+ case HOST_IF_MSG_SET_OPERATION_MODE:
+ handle_set_operation_mode(msg->vif, &msg->body.mode);
+ break;
- case HOST_IF_MSG_SET_IPADDRESS:
- handle_set_ip_address(vif,
- msg.body.ip_info.ip_addr,
- msg.body.ip_info.idx);
- break;
+ case HOST_IF_MSG_SET_IPADDRESS:
+ handle_set_ip_address(msg->vif,
+ msg->body.ip_info.ip_addr,
+ msg->body.ip_info.idx);
+ break;
- case HOST_IF_MSG_GET_IPADDRESS:
- handle_get_ip_address(vif, msg.body.ip_info.idx);
- break;
+ case HOST_IF_MSG_GET_IPADDRESS:
+ handle_get_ip_address(msg->vif, msg->body.ip_info.idx);
+ break;
- case HOST_IF_MSG_GET_MAC_ADDRESS:
- handle_get_mac_address(msg.vif,
- &msg.body.get_mac_info);
- break;
+ case HOST_IF_MSG_GET_MAC_ADDRESS:
+ handle_get_mac_address(msg->vif,
+ &msg->body.get_mac_info);
+ break;
- case HOST_IF_MSG_REMAIN_ON_CHAN:
- Handle_RemainOnChan(msg.vif, &msg.body.remain_on_ch);
- break;
+ case HOST_IF_MSG_REMAIN_ON_CHAN:
+ Handle_RemainOnChan(msg->vif, &msg->body.remain_on_ch);
+ break;
- case HOST_IF_MSG_REGISTER_FRAME:
- Handle_RegisterFrame(msg.vif, &msg.body.reg_frame);
- break;
+ case HOST_IF_MSG_REGISTER_FRAME:
+ Handle_RegisterFrame(msg->vif, &msg->body.reg_frame);
+ break;
- case HOST_IF_MSG_LISTEN_TIMER_FIRED:
- Handle_ListenStateExpired(msg.vif, &msg.body.remain_on_ch);
- break;
+ case HOST_IF_MSG_LISTEN_TIMER_FIRED:
+ Handle_ListenStateExpired(msg->vif, &msg->body.remain_on_ch);
+ break;
- case HOST_IF_MSG_SET_MULTICAST_FILTER:
- Handle_SetMulticastFilter(msg.vif, &msg.body.multicast_info);
- break;
+ case HOST_IF_MSG_SET_MULTICAST_FILTER:
+ Handle_SetMulticastFilter(msg->vif, &msg->body.multicast_info);
+ break;
- case HOST_IF_MSG_DEL_ALL_STA:
- Handle_DelAllSta(msg.vif, &msg.body.del_all_sta_info);
- break;
+ case HOST_IF_MSG_DEL_ALL_STA:
+ Handle_DelAllSta(msg->vif, &msg->body.del_all_sta_info);
+ break;
- case HOST_IF_MSG_SET_TX_POWER:
- handle_set_tx_pwr(msg.vif, msg.body.tx_power.tx_pwr);
- break;
+ case HOST_IF_MSG_SET_TX_POWER:
+ handle_set_tx_pwr(msg->vif, msg->body.tx_power.tx_pwr);
+ break;
- case HOST_IF_MSG_GET_TX_POWER:
- handle_get_tx_pwr(msg.vif, &msg.body.tx_power.tx_pwr);
- break;
- default:
- netdev_err(vif->ndev, "[Host Interface] undefined\n");
- break;
- }
+ case HOST_IF_MSG_GET_TX_POWER:
+ handle_get_tx_pwr(msg->vif, &msg->body.tx_power.tx_pwr);
+ break;
+ default:
+ netdev_err(msg->vif->ndev, "[Host Interface] undefined\n");
+ break;
}
-
+free_msg:
+ kfree(msg);
complete(&hif_thread_comp);
- return 0;
}
static void TimerCB_Scan(unsigned long arg)
@@ -2656,7 +2640,7 @@ static void TimerCB_Scan(unsigned long arg)
msg.vif = vif;
msg.id = HOST_IF_MSG_SCAN_TIMER_FIRED;
- wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ wilc_enqueue_cmd(&msg);
}
static void TimerCB_Connect(unsigned long arg)
@@ -2668,7 +2652,7 @@ static void TimerCB_Connect(unsigned long arg)
msg.vif = vif;
msg.id = HOST_IF_MSG_CONNECT_TIMER_FIRED;
- wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ wilc_enqueue_cmd(&msg);
}
s32 wilc_remove_key(struct host_if_drv *hif_drv, const u8 *pu8StaAddress)
@@ -2703,7 +2687,7 @@ int wilc_remove_wep_key(struct wilc_vif *vif, u8 index)
msg.vif = vif;
msg.body.key_info.attr.wep.index = index;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "Request to remove WEP key\n");
else
@@ -2732,7 +2716,7 @@ int wilc_set_wep_default_keyid(struct wilc_vif *vif, u8 index)
msg.vif = vif;
msg.body.key_info.attr.wep.index = index;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "Default key index\n");
else
@@ -2766,7 +2750,7 @@ int wilc_add_wep_key_bss_sta(struct wilc_vif *vif, const u8 *key, u8 len,
msg.body.key_info.attr.wep.key_len = len;
msg.body.key_info.attr.wep.index = index;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "STA - WEP Key\n");
wait_for_completion(&hif_drv->comp_test_key_block);
@@ -2801,7 +2785,7 @@ int wilc_add_wep_key_bss_ap(struct wilc_vif *vif, const u8 *key, u8 len,
msg.body.key_info.attr.wep.mode = mode;
msg.body.key_info.attr.wep.auth_type = auth_type;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "AP - WEP Key\n");
@@ -2857,7 +2841,7 @@ int wilc_add_ptk(struct wilc_vif *vif, const u8 *ptk, u8 ptk_key_len,
msg.body.key_info.attr.wpa.mode = cipher_mode;
msg.vif = vif;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "PTK Key\n");
@@ -2926,7 +2910,7 @@ int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len,
msg.body.key_info.attr.wpa.key_len = key_len;
msg.body.key_info.attr.wpa.seq_len = key_rsc_len;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "RX GTK\n");
else
@@ -2956,7 +2940,7 @@ int wilc_set_pmkid_info(struct wilc_vif *vif,
&pmkid->pmkidlist[i].pmkid, PMKID_LEN);
}
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "PMKID Info\n");
@@ -2974,7 +2958,7 @@ int wilc_get_mac_address(struct wilc_vif *vif, u8 *mac_addr)
msg.body.get_mac_info.mac_addr = mac_addr;
msg.vif = vif;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result) {
netdev_err(vif->ndev, "Failed to send get mac address\n");
return -EFAULT;
@@ -3038,7 +3022,7 @@ int wilc_set_join_req(struct wilc_vif *vif, u8 *bssid, const u8 *ssid,
if (hif_drv->hif_state < HOST_IF_CONNECTING)
hif_drv->hif_state = HOST_IF_CONNECTING;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result) {
netdev_err(vif->ndev, "send message: Set join request\n");
return -EFAULT;
@@ -3067,7 +3051,7 @@ int wilc_disconnect(struct wilc_vif *vif, u16 reason_code)
msg.id = HOST_IF_MSG_DISCONNECT;
msg.vif = vif;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "Failed to send message: disconnect\n");
else
@@ -3111,7 +3095,7 @@ int wilc_set_mac_chnl_num(struct wilc_vif *vif, u8 channel)
msg.body.channel_info.set_ch = channel;
msg.vif = vif;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result) {
netdev_err(vif->ndev, "wilc mq send fail\n");
return -EINVAL;
@@ -3131,7 +3115,7 @@ int wilc_set_wfi_drv_handler(struct wilc_vif *vif, int index, u8 mac_idx)
msg.body.drv.mac_idx = mac_idx;
msg.vif = vif;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result) {
netdev_err(vif->ndev, "wilc mq send fail\n");
result = -EINVAL;
@@ -3150,7 +3134,7 @@ int wilc_set_operation_mode(struct wilc_vif *vif, u32 mode)
msg.body.mode.mode = mode;
msg.vif = vif;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result) {
netdev_err(vif->ndev, "wilc mq send fail\n");
result = -EINVAL;
@@ -3177,7 +3161,7 @@ s32 wilc_get_inactive_time(struct wilc_vif *vif, const u8 *mac,
msg.id = HOST_IF_MSG_GET_INACTIVETIME;
msg.vif = vif;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "Failed to send get host ch param\n");
else
@@ -3198,7 +3182,7 @@ int wilc_get_rssi(struct wilc_vif *vif, s8 *rssi_level)
msg.id = HOST_IF_MSG_GET_RSSI;
msg.vif = vif;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result) {
netdev_err(vif->ndev, "Failed to send get host ch param\n");
return -EFAULT;
@@ -3226,7 +3210,7 @@ int wilc_get_statistics(struct wilc_vif *vif, struct rf_info *stats)
msg.body.data = (char *)stats;
msg.vif = vif;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result) {
netdev_err(vif->ndev, "Failed to send get host channel\n");
return -EFAULT;
@@ -3279,7 +3263,7 @@ int wilc_scan(struct wilc_vif *vif, u8 scan_source, u8 scan_type,
if (!scan_info->ies)
return -ENOMEM;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result) {
netdev_err(vif->ndev, "Error in sending message queue\n");
return -EINVAL;
@@ -3309,7 +3293,7 @@ int wilc_hif_set_cfg(struct wilc_vif *vif,
msg.body.cfg_info = *cfg_param;
msg.vif = vif;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
return result;
}
@@ -3371,21 +3355,17 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
init_completion(&hif_drv->comp_inactive_time);
if (clients_count == 0) {
- result = wilc_mq_create(&hif_msg_q);
-
if (result < 0) {
netdev_err(vif->ndev, "Failed to creat MQ\n");
goto _fail_;
}
-
- hif_thread_handler = kthread_run(hostIFthread, wilc,
- "WILC_kthread");
-
- if (IS_ERR(hif_thread_handler)) {
- netdev_err(vif->ndev, "Failed to creat Thread\n");
- result = -EFAULT;
- goto _fail_mq_;
+ hif_workqueue = create_singlethread_workqueue("WILC_wq");
+ if (!hif_workqueue) {
+ netdev_err(vif->ndev, "Failed to create workqueue\n");
+ result = -ENOMEM;
+ goto _fail_;
}
+
setup_timer(&periodic_rssi, GetPeriodicRSSI,
(unsigned long)vif);
mod_timer(&periodic_rssi, jiffies + msecs_to_jiffies(5000));
@@ -3411,10 +3391,7 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
clients_count++;
- return result;
-
-_fail_mq_:
- wilc_mq_destroy(&hif_msg_q);
+ destroy_workqueue(hif_workqueue);
_fail_:
return result;
}
@@ -3458,13 +3435,13 @@ int wilc_deinit(struct wilc_vif *vif)
msg.id = HOST_IF_MSG_EXIT;
msg.vif = vif;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result != 0)
netdev_err(vif->ndev, "deinit : Error(%d)\n", result);
else
wait_for_completion(&hif_thread_comp);
- wilc_mq_destroy(&hif_msg_q);
+ destroy_workqueue(hif_workqueue);
}
kfree(hif_drv);
@@ -3504,7 +3481,7 @@ void wilc_network_info_received(struct wilc *wilc, u8 *pu8Buffer,
msg.body.net_info.buffer = kmalloc(u32Length, GFP_KERNEL);
memcpy(msg.body.net_info.buffer, pu8Buffer, u32Length);
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "message parameters (%d)\n", result);
}
@@ -3549,7 +3526,7 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *pu8Buffer,
msg.body.async_info.buffer = kmalloc(u32Length, GFP_KERNEL);
memcpy(msg.body.async_info.buffer, pu8Buffer, u32Length);
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "synchronous info (%d)\n", result);
@@ -3580,7 +3557,7 @@ void wilc_scan_complete_received(struct wilc *wilc, u8 *pu8Buffer,
msg.id = HOST_IF_MSG_RCVD_SCAN_COMPLETE;
msg.vif = vif;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "complete param (%d)\n", result);
}
@@ -3606,7 +3583,7 @@ int wilc_remain_on_channel(struct wilc_vif *vif, u32 session_id,
msg.body.remain_on_ch.id = session_id;
msg.vif = vif;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "wilc mq send fail\n");
@@ -3631,7 +3608,7 @@ int wilc_listen_state_expired(struct wilc_vif *vif, u32 session_id)
msg.vif = vif;
msg.body.remain_on_ch.id = session_id;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "wilc mq send fail\n");
@@ -3662,7 +3639,7 @@ int wilc_frame_register(struct wilc_vif *vif, u16 frame_type, bool reg)
msg.body.reg_frame.reg = reg;
msg.vif = vif;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "wilc mq send fail\n");
@@ -3700,7 +3677,7 @@ int wilc_add_beacon(struct wilc_vif *vif, u32 interval, u32 dtim_period,
beacon_info->tail = NULL;
}
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "wilc mq send fail\n");
@@ -3722,7 +3699,7 @@ int wilc_del_beacon(struct wilc_vif *vif)
msg.id = HOST_IF_MSG_DEL_BEACON;
msg.vif = vif;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "wilc_mq_send fail\n");
@@ -3749,7 +3726,7 @@ int wilc_add_station(struct wilc_vif *vif, struct add_sta_param *sta_param)
return -ENOMEM;
}
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "wilc_mq_send fail\n");
return result;
@@ -3771,7 +3748,7 @@ int wilc_del_station(struct wilc_vif *vif, const u8 *mac_addr)
else
memcpy(del_sta_info->mac_addr, mac_addr, ETH_ALEN);
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "wilc_mq_send fail\n");
return result;
@@ -3801,7 +3778,7 @@ int wilc_del_allstation(struct wilc_vif *vif, u8 mac_addr[][ETH_ALEN])
return result;
del_all_sta_info->assoc_sta = assoc_sta;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "wilc_mq_send fail\n");
@@ -3832,7 +3809,7 @@ int wilc_edit_station(struct wilc_vif *vif,
return -ENOMEM;
}
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "wilc_mq_send fail\n");
@@ -3856,7 +3833,7 @@ int wilc_set_power_mgmt(struct wilc_vif *vif, bool enabled, u32 timeout)
pwr_mgmt_info->enabled = enabled;
pwr_mgmt_info->timeout = timeout;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "wilc_mq_send fail\n");
return result;
@@ -3877,7 +3854,7 @@ int wilc_setup_multicast_filter(struct wilc_vif *vif, bool enabled,
multicast_filter_param->enabled = enabled;
multicast_filter_param->cnt = count;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "wilc_mq_send fail\n");
return result;
@@ -4050,7 +4027,7 @@ int wilc_setup_ipaddress(struct wilc_vif *vif, u8 *ip_addr, u8 idx)
msg.vif = vif;
msg.body.ip_info.idx = idx;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "wilc_mq_send fail\n");
@@ -4070,7 +4047,7 @@ static int host_int_get_ipaddress(struct wilc_vif *vif, u8 *ip_addr, u8 idx)
msg.vif = vif;
msg.body.ip_info.idx = idx;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ result = wilc_enqueue_cmd(&msg);
if (result)
netdev_err(vif->ndev, "wilc_mq_send fail\n");
@@ -4088,7 +4065,7 @@ int wilc_set_tx_power(struct wilc_vif *vif, u8 tx_power)
msg.body.tx_power.tx_pwr = tx_power;
msg.vif = vif;
- ret = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ ret = wilc_enqueue_cmd(&msg);
if (ret)
netdev_err(vif->ndev, "wilc_mq_send fail\n");
@@ -4105,7 +4082,7 @@ int wilc_get_tx_power(struct wilc_vif *vif, u8 *tx_power)
msg.id = HOST_IF_MSG_GET_TX_POWER;
msg.vif = vif;
- ret = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ ret = wilc_enqueue_cmd(&msg);
if (ret)
netdev_err(vif->ndev, "Failed to get TX PWR\n");
diff --git a/drivers/staging/wilc1000/host_interface.h b/drivers/staging/wilc1000/host_interface.h
index 8d2dd0db0..ddfea29df 100644
--- a/drivers/staging/wilc1000/host_interface.h
+++ b/drivers/staging/wilc1000/host_interface.h
@@ -224,10 +224,6 @@ struct op_mode {
u32 mode;
};
-struct set_mac_addr {
- u8 mac_addr[ETH_ALEN];
-};
-
struct get_mac_addr {
u8 *mac_addr;
};
diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c
index 9a7fa90dc..ad6684916 100644
--- a/drivers/staging/wilc1000/linux_wlan.c
+++ b/drivers/staging/wilc1000/linux_wlan.c
@@ -20,7 +20,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
-
+#include <linux/mutex.h>
#include <linux/semaphore.h>
#include <linux/completion.h>
@@ -31,8 +31,6 @@ static struct notifier_block g_dev_notifier = {
.notifier_call = dev_state_ev_handler
};
-static struct semaphore close_exit_sync;
-
static int wlan_deinit_locks(struct net_device *dev);
static void wlan_deinitialize_threads(struct net_device *dev);
@@ -241,7 +239,7 @@ void wilc_mac_indicate(struct wilc *wilc, int flag)
(unsigned char *)&status, 4);
if (wilc->mac_status == WILC_MAC_STATUS_INIT) {
wilc->mac_status = status;
- up(&wilc->sync_event);
+ complete(&wilc->sync_event);
} else {
wilc->mac_status = status;
}
@@ -316,7 +314,7 @@ static int linux_wlan_txq_task(void *vp)
complete(&wl->txq_thread_started);
while (1) {
- down(&wl->txq_event);
+ wait_for_completion(&wl->txq_event);
if (wl->close) {
complete(&wl->txq_thread_started);
@@ -362,7 +360,7 @@ int wilc_wlan_get_firmware(struct net_device *dev)
goto _fail_;
if (reject_firmware(&wilc_firmware, firmware, wilc->dev) != 0) {
- netdev_err(dev, "%s - firmare not available\n", firmware);
+ netdev_err(dev, "%s - firmware not available\n", firmware);
ret = -1;
goto _fail_;
}
@@ -386,9 +384,9 @@ static int linux_wlan_start_firmware(struct net_device *dev)
if (ret < 0)
return ret;
- ret = wilc_lock_timeout(wilc, &wilc->sync_event, 5000);
- if (ret)
- return ret;
+ if (!wait_for_completion_timeout(&wilc->sync_event,
+ msecs_to_jiffies(5000)))
+ return -ETIME;
return 0;
}
@@ -650,7 +648,7 @@ void wilc1000_wlan_deinit(struct net_device *dev)
mutex_unlock(&wl->hif_cs);
}
if (&wl->txq_event)
- up(&wl->txq_event);
+ complete(&wl->txq_event);
wlan_deinitialize_threads(dev);
deinit_irq(dev);
@@ -679,12 +677,12 @@ static int wlan_init_locks(struct net_device *dev)
mutex_init(&wl->rxq_cs);
spin_lock_init(&wl->txq_spinlock);
- sema_init(&wl->txq_add_to_head_cs, 1);
+ mutex_init(&wl->txq_add_to_head_cs);
- sema_init(&wl->txq_event, 0);
+ init_completion(&wl->txq_event);
- sema_init(&wl->cfg_event, 0);
- sema_init(&wl->sync_event, 0);
+ init_completion(&wl->cfg_event);
+ init_completion(&wl->sync_event);
init_completion(&wl->txq_thread_started);
return 0;
@@ -717,10 +715,10 @@ static int wlan_initialize_threads(struct net_device *dev)
wilc->txq_thread = kthread_run(linux_wlan_txq_task, (void *)dev,
"K_TXQ_TASK");
- if (!wilc->txq_thread) {
+ if (IS_ERR(wilc->txq_thread)) {
netdev_err(dev, "couldn't create TXQ thread\n");
wilc->close = 0;
- return -ENOBUFS;
+ return PTR_ERR(wilc->txq_thread);
}
wait_for_completion(&wilc->txq_thread_started);
@@ -738,7 +736,7 @@ static void wlan_deinitialize_threads(struct net_device *dev)
wl->close = 1;
if (&wl->txq_event)
- up(&wl->txq_event);
+ complete(&wl->txq_event);
if (wl->txq_thread) {
kthread_stop(wl->txq_thread);
@@ -1088,7 +1086,6 @@ int wilc_mac_close(struct net_device *ndev)
WILC_WFI_deinit_mon_interface();
}
- up(&close_exit_sync);
vif->mac_opened = 0;
return 0;
@@ -1232,8 +1229,6 @@ void wilc_netdev_cleanup(struct wilc *wilc)
}
if (wilc && (wilc->vif[0]->ndev || wilc->vif[1]->ndev)) {
- wilc_lock_timeout(wilc, &close_exit_sync, 5 * 1000);
-
for (i = 0; i < NUM_CONCURRENT_IFC; i++)
if (wilc->vif[i]->ndev)
if (vif[i]->mac_opened)
@@ -1258,8 +1253,6 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
struct net_device *ndev;
struct wilc *wl;
- sema_init(&close_exit_sync, 0);
-
wl = kzalloc(sizeof(*wl), GFP_KERNEL);
if (!wl)
return -ENOMEM;
diff --git a/drivers/staging/wilc1000/wilc_sdio.c b/drivers/staging/wilc1000/wilc_sdio.c
index a839a7967..39b73fb27 100644
--- a/drivers/staging/wilc1000/wilc_sdio.c
+++ b/drivers/staging/wilc1000/wilc_sdio.c
@@ -1006,7 +1006,7 @@ static int sdio_sync_ext(struct wilc *wilc, int nint)
u32 reg;
if (nint > MAX_NUM_INT) {
- dev_err(&func->dev, "Too many interupts (%d)...\n", nint);
+ dev_err(&func->dev, "Too many interrupts (%d)...\n", nint);
return 0;
}
if (nint > MAX_NUN_INT_THRPT_ENH2) {
diff --git a/drivers/staging/wilc1000/wilc_spi.c b/drivers/staging/wilc1000/wilc_spi.c
index 4268e2f29..22cf4b785 100644
--- a/drivers/staging/wilc1000/wilc_spi.c
+++ b/drivers/staging/wilc1000/wilc_spi.c
@@ -1082,7 +1082,7 @@ static int wilc_spi_sync_ext(struct wilc *wilc, int nint)
int ret, i;
if (nint > MAX_NUM_INT) {
- dev_err(&spi->dev, "Too many interupts (%d)...\n", nint);
+ dev_err(&spi->dev, "Too many interrupts (%d)...\n", nint);
return 0;
}
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
index 51aff4ff7..2c2e8aca8 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
@@ -454,7 +454,11 @@ static void CfgScanResult(enum scan_event scan_event,
mutex_lock(&priv->scan_req_lock);
if (priv->pstrScanReq) {
- cfg80211_scan_done(priv->pstrScanReq, false);
+ struct cfg80211_scan_info info = {
+ .aborted = false,
+ };
+
+ cfg80211_scan_done(priv->pstrScanReq, &info);
priv->u32RcvdChCount = 0;
priv->bCfgScanning = false;
priv->pstrScanReq = NULL;
@@ -464,10 +468,14 @@ static void CfgScanResult(enum scan_event scan_event,
mutex_lock(&priv->scan_req_lock);
if (priv->pstrScanReq) {
+ struct cfg80211_scan_info info = {
+ .aborted = false,
+ };
+
update_scan_time();
refresh_scan(priv, 1, false);
- cfg80211_scan_done(priv->pstrScanReq, false);
+ cfg80211_scan_done(priv->pstrScanReq, &info);
priv->bCfgScanning = false;
priv->pstrScanReq = NULL;
}
@@ -625,8 +633,7 @@ static int scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
for (i = 0; i < request->n_ssids; i++) {
- if (request->ssids[i].ssid &&
- request->ssids[i].ssid_len != 0) {
+ if (request->ssids[i].ssid_len != 0) {
strHiddenNetwork.net_info[i].ssid = kmalloc(request->ssids[i].ssid_len, GFP_KERNEL);
memcpy(strHiddenNetwork.net_info[i].ssid, request->ssids[i].ssid, request->ssids[i].ssid_len);
strHiddenNetwork.net_info[i].ssid_len = request->ssids[i].ssid_len;
@@ -1184,7 +1191,7 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev,
struct wilc_priv *priv;
struct wilc_vif *vif;
u32 i = 0;
- u32 associatedsta = 0;
+ u32 associatedsta = ~0;
u32 inactive_time = 0;
priv = wiphy_priv(wiphy);
vif = netdev_priv(dev);
@@ -1197,7 +1204,7 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev,
}
}
- if (associatedsta == -1) {
+ if (associatedsta == ~0) {
netdev_err(dev, "sta required is not associated\n");
return -ENOENT;
}
diff --git a/drivers/staging/wilc1000/wilc_wfi_netdevice.h b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
index 3a561df6d..5cc6a82d8 100644
--- a/drivers/staging/wilc1000/wilc_wfi_netdevice.h
+++ b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
@@ -42,6 +42,8 @@
#include "host_interface.h"
#include "wilc_wlan.h"
#include <linux/wireless.h>
+#include <linux/completion.h>
+#include <linux/mutex.h>
#define FLOW_CONTROL_LOWER_THRESHOLD 128
#define FLOW_CONTROL_UPPER_THRESHOLD 256
@@ -170,15 +172,15 @@ struct wilc {
struct wilc_vif *vif[NUM_CONCURRENT_IFC];
u8 open_ifcs;
- struct semaphore txq_add_to_head_cs;
+ struct mutex txq_add_to_head_cs;
spinlock_t txq_spinlock;
struct mutex rxq_cs;
struct mutex hif_cs;
- struct semaphore cfg_event;
- struct semaphore sync_event;
- struct semaphore txq_event;
+ struct completion cfg_event;
+ struct completion sync_event;
+ struct completion txq_event;
struct completion txq_thread_started;
struct task_struct *txq_thread;
diff --git a/drivers/staging/wilc1000/wilc_wlan.c b/drivers/staging/wilc1000/wilc_wlan.c
index 11e16d56a..19a580939 100644
--- a/drivers/staging/wilc1000/wilc_wlan.c
+++ b/drivers/staging/wilc1000/wilc_wlan.c
@@ -1,3 +1,4 @@
+#include <linux/completion.h>
#include "wilc_wlan_if.h"
#include "wilc_wlan.h"
#include "wilc_wfi_netdevice.h"
@@ -89,7 +90,7 @@ static void wilc_wlan_txq_add_to_tail(struct net_device *dev,
spin_unlock_irqrestore(&wilc->txq_spinlock, flags);
- up(&wilc->txq_event);
+ complete(&wilc->txq_event);
}
static int wilc_wlan_txq_add_to_head(struct wilc_vif *vif,
@@ -98,9 +99,7 @@ static int wilc_wlan_txq_add_to_head(struct wilc_vif *vif,
unsigned long flags;
struct wilc *wilc = vif->wilc;
- if (wilc_lock_timeout(wilc, &wilc->txq_add_to_head_cs,
- CFG_PKTS_TIMEOUT))
- return -1;
+ mutex_lock(&wilc->txq_add_to_head_cs);
spin_lock_irqsave(&wilc->txq_spinlock, flags);
@@ -118,8 +117,8 @@ static int wilc_wlan_txq_add_to_head(struct wilc_vif *vif,
wilc->txq_entries += 1;
spin_unlock_irqrestore(&wilc->txq_spinlock, flags);
- up(&wilc->txq_add_to_head_cs);
- up(&wilc->txq_event);
+ mutex_unlock(&wilc->txq_add_to_head_cs);
+ complete(&wilc->txq_event);
return 0;
}
@@ -287,7 +286,8 @@ static int wilc_wlan_txq_filter_dup_tcp_ack(struct net_device *dev)
spin_unlock_irqrestore(&wilc->txq_spinlock, wilc->txq_spinlock_flags);
while (dropped > 0) {
- wilc_lock_timeout(wilc, &wilc->txq_event, 1);
+ wait_for_completion_timeout(&wilc->txq_event,
+ msecs_to_jiffies(1));
dropped--;
}
@@ -310,7 +310,7 @@ static int wilc_wlan_txq_add_cfg_pkt(struct wilc_vif *vif, u8 *buffer,
netdev_dbg(vif->ndev, "Adding config packet ...\n");
if (wilc->quit) {
netdev_dbg(vif->ndev, "Return due to clear function\n");
- up(&wilc->cfg_event);
+ complete(&wilc->cfg_event);
return 0;
}
@@ -571,8 +571,7 @@ int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count)
if (wilc->quit)
break;
- wilc_lock_timeout(wilc, &wilc->txq_add_to_head_cs,
- CFG_PKTS_TIMEOUT);
+ mutex_lock(&wilc->txq_add_to_head_cs);
wilc_wlan_txq_filter_dup_tcp_ack(dev);
tqe = wilc_wlan_txq_get_first(wilc);
i = 0;
@@ -753,7 +752,7 @@ _end_:
if (ret != 1)
break;
} while (0);
- up(&wilc->txq_add_to_head_cs);
+ mutex_unlock(&wilc->txq_add_to_head_cs);
wilc->txq_exit = 1;
*txq_count = wilc->txq_entries;
@@ -770,7 +769,7 @@ static void wilc_wlan_handle_rxq(struct wilc *wilc)
do {
if (wilc->quit) {
- up(&wilc->cfg_event);
+ complete(&wilc->cfg_event);
break;
}
rqe = wilc_wlan_rxq_remove(wilc);
@@ -821,7 +820,7 @@ static void wilc_wlan_handle_rxq(struct wilc *wilc)
wilc_wlan_cfg_indicate_rx(wilc, &buffer[pkt_offset + offset], pkt_len, &rsp);
if (rsp.type == WILC_CFG_RSP) {
if (wilc->cfg_seq_no == rsp.seq_no)
- up(&wilc->cfg_event);
+ complete(&wilc->cfg_event);
} else if (rsp.type == WILC_CFG_RSP_STATUS) {
wilc_mac_indicate(wilc, WILC_MAC_INDICATE_STATUS);
@@ -1229,11 +1228,12 @@ int wilc_wlan_cfg_set(struct wilc_vif *vif, int start, u16 wid, u8 *buffer,
if (wilc_wlan_cfg_commit(vif, WILC_CFG_SET, drv_handler))
ret_size = 0;
- if (wilc_lock_timeout(wilc, &wilc->cfg_event,
- CFG_PKTS_TIMEOUT)) {
+ if (!wait_for_completion_timeout(&wilc->cfg_event,
+ msecs_to_jiffies(CFG_PKTS_TIMEOUT))) {
netdev_dbg(vif->ndev, "Set Timed Out\n");
ret_size = 0;
}
+
wilc->cfg_frame_in_use = 0;
wilc->cfg_frame_offset = 0;
wilc->cfg_seq_no += 1;
@@ -1266,8 +1266,8 @@ int wilc_wlan_cfg_get(struct wilc_vif *vif, int start, u16 wid, int commit,
if (wilc_wlan_cfg_commit(vif, WILC_CFG_QUERY, drv_handler))
ret_size = 0;
- if (wilc_lock_timeout(wilc, &wilc->cfg_event,
- CFG_PKTS_TIMEOUT)) {
+ if (!wait_for_completion_timeout(&wilc->cfg_event,
+ msecs_to_jiffies(CFG_PKTS_TIMEOUT))) {
netdev_dbg(vif->ndev, "Get Timed Out\n");
ret_size = 0;
}
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index a6e6fb9f4..f46dfe6b2 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -338,6 +338,8 @@ static int prism2_scan(struct wiphy *wiphy,
struct p80211msg_dot11req_scan msg1;
struct p80211msg_dot11req_scan_results msg2;
struct cfg80211_bss *bss;
+ struct cfg80211_scan_info info = {};
+
int result;
int err = 0;
int numbss = 0;
@@ -440,7 +442,8 @@ static int prism2_scan(struct wiphy *wiphy,
err = prism2_result2err(msg2.resultcode.data);
exit:
- cfg80211_scan_done(request, err ? 1 : 0);
+ info.aborted = !!(err);
+ cfg80211_scan_done(request, &info);
priv->scan_request = NULL;
return err;
}
diff --git a/drivers/target/iscsi/cxgbit/Kconfig b/drivers/target/iscsi/cxgbit/Kconfig
index c9b6a3c75..bc6c1d5df 100644
--- a/drivers/target/iscsi/cxgbit/Kconfig
+++ b/drivers/target/iscsi/cxgbit/Kconfig
@@ -1,7 +1,7 @@
config ISCSI_TARGET_CXGB4
tristate "Chelsio iSCSI target offload driver"
depends on ISCSI_TARGET && CHELSIO_T4 && INET
- select CHELSIO_T4_UWIRE
+ select CHELSIO_LIB
---help---
To compile this driver as module, choose M here: the module
will be called cxgbit.
diff --git a/drivers/target/iscsi/cxgbit/Makefile b/drivers/target/iscsi/cxgbit/Makefile
index bd56c073d..4893ec29b 100644
--- a/drivers/target/iscsi/cxgbit/Makefile
+++ b/drivers/target/iscsi/cxgbit/Makefile
@@ -1,4 +1,5 @@
ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4
+ccflags-y += -Idrivers/net/ethernet/chelsio/libcxgb
ccflags-y += -Idrivers/target/iscsi
obj-$(CONFIG_ISCSI_TARGET_CXGB4) += cxgbit.o
diff --git a/drivers/target/iscsi/cxgbit/cxgbit.h b/drivers/target/iscsi/cxgbit/cxgbit.h
index 625c7f6de..90388698c 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit.h
+++ b/drivers/target/iscsi/cxgbit/cxgbit.h
@@ -37,7 +37,7 @@
#include "cxgb4.h"
#include "cxgb4_uld.h"
#include "l2t.h"
-#include "cxgb4_ppm.h"
+#include "libcxgb_ppm.h"
#include "cxgbit_lro.h"
extern struct mutex cdev_list_lock;
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c
index 60dccd02b..27dd11aff 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_main.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c
@@ -26,6 +26,8 @@ void _cxgbit_free_cdev(struct kref *kref)
struct cxgbit_device *cdev;
cdev = container_of(kref, struct cxgbit_device, kref);
+
+ cxgbi_ppm_release(cdev2ppm(cdev));
kfree(cdev);
}
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 79291869b..d545993df 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -522,7 +522,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
*/
if (cmd->data_length > FD_MAX_BYTES) {
pr_err("FILEIO: Not able to process I/O of %u bytes due to"
- "FD_MAX_BYTES: %u iovec count limitiation\n",
+ "FD_MAX_BYTES: %u iovec count limitation\n",
cmd->data_length, FD_MAX_BYTES);
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 2077bc286..372d74431 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -311,7 +311,8 @@ static void iblock_bio_done(struct bio *bio)
}
static struct bio *
-iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
+iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num, int op,
+ int op_flags)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
struct bio *bio;
@@ -333,18 +334,19 @@ iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
bio->bi_private = cmd;
bio->bi_end_io = &iblock_bio_done;
bio->bi_iter.bi_sector = lba;
+ bio_set_op_attrs(bio, op, op_flags);
return bio;
}
-static void iblock_submit_bios(struct bio_list *list, int rw)
+static void iblock_submit_bios(struct bio_list *list)
{
struct blk_plug plug;
struct bio *bio;
blk_start_plug(&plug);
while ((bio = bio_list_pop(list)))
- submit_bio(rw, bio);
+ submit_bio(bio);
blk_finish_plug(&plug);
}
@@ -386,9 +388,10 @@ iblock_execute_sync_cache(struct se_cmd *cmd)
bio = bio_alloc(GFP_KERNEL, 0);
bio->bi_end_io = iblock_end_io_flush;
bio->bi_bdev = ib_dev->ibd_bd;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
if (!immed)
bio->bi_private = cmd;
- submit_bio(WRITE_FLUSH, bio);
+ submit_bio(bio);
return 0;
}
@@ -477,7 +480,7 @@ iblock_execute_write_same(struct se_cmd *cmd)
goto fail;
cmd->priv = ibr;
- bio = iblock_get_bio(cmd, block_lba, 1);
+ bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE, 0);
if (!bio)
goto fail_free_ibr;
@@ -490,7 +493,8 @@ iblock_execute_write_same(struct se_cmd *cmd)
while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
!= sg->length) {
- bio = iblock_get_bio(cmd, block_lba, 1);
+ bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE,
+ 0);
if (!bio)
goto fail_put_bios;
@@ -503,7 +507,7 @@ iblock_execute_write_same(struct se_cmd *cmd)
sectors -= 1;
}
- iblock_submit_bios(&list, WRITE);
+ iblock_submit_bios(&list);
return 0;
fail_put_bios:
@@ -676,8 +680,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
struct scatterlist *sg;
u32 sg_num = sgl_nents;
unsigned bio_cnt;
- int rw = 0;
- int i;
+ int i, op, op_flags = 0;
if (data_direction == DMA_TO_DEVICE) {
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
@@ -686,18 +689,15 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
* Force writethrough using WRITE_FUA if a volatile write cache
* is not enabled, or if initiator set the Force Unit Access bit.
*/
+ op = REQ_OP_WRITE;
if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) {
if (cmd->se_cmd_flags & SCF_FUA)
- rw = WRITE_FUA;
+ op_flags = WRITE_FUA;
else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
- rw = WRITE_FUA;
- else
- rw = WRITE;
- } else {
- rw = WRITE;
+ op_flags = WRITE_FUA;
}
} else {
- rw = READ;
+ op = REQ_OP_READ;
}
ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
@@ -711,7 +711,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
return 0;
}
- bio = iblock_get_bio(cmd, block_lba, sgl_nents);
+ bio = iblock_get_bio(cmd, block_lba, sgl_nents, op, op_flags);
if (!bio)
goto fail_free_ibr;
@@ -731,11 +731,12 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
!= sg->length) {
if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
- iblock_submit_bios(&list, rw);
+ iblock_submit_bios(&list);
bio_cnt = 0;
}
- bio = iblock_get_bio(cmd, block_lba, sg_num);
+ bio = iblock_get_bio(cmd, block_lba, sg_num, op,
+ op_flags);
if (!bio)
goto fail_put_bios;
@@ -755,7 +756,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
goto fail_put_bios;
}
- iblock_submit_bios(&list, rw);
+ iblock_submit_bios(&list);
iblock_complete_cmd(cmd);
return 0;
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index de18790eb..9125d9358 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -876,19 +876,19 @@ static inline struct bio *pscsi_get_bio(int nr_vecs)
static sense_reason_t
pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
- enum dma_data_direction data_direction, struct bio **hbio)
+ struct request *req)
{
struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
- struct bio *bio = NULL, *tbio = NULL;
+ struct bio *bio = NULL;
struct page *page;
struct scatterlist *sg;
u32 data_len = cmd->data_length, i, len, bytes, off;
int nr_pages = (cmd->data_length + sgl[0].offset +
PAGE_SIZE - 1) >> PAGE_SHIFT;
int nr_vecs = 0, rc;
- int rw = (data_direction == DMA_TO_DEVICE);
+ int rw = (cmd->data_direction == DMA_TO_DEVICE);
- *hbio = NULL;
+ BUG_ON(!cmd->data_length);
pr_debug("PSCSI: nr_pages: %d\n", nr_pages);
@@ -922,21 +922,11 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
goto fail;
if (rw)
- bio->bi_rw |= REQ_WRITE;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
pr_debug("PSCSI: Allocated bio: %p,"
" dir: %s nr_vecs: %d\n", bio,
(rw) ? "rw" : "r", nr_vecs);
- /*
- * Set *hbio pointer to handle the case:
- * nr_pages > BIO_MAX_PAGES, where additional
- * bios need to be added to complete a given
- * command.
- */
- if (!*hbio)
- *hbio = tbio = bio;
- else
- tbio = tbio->bi_next = bio;
}
pr_debug("PSCSI: Calling bio_add_pc_page() i: %d"
@@ -955,11 +945,16 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
pr_debug("PSCSI: Reached bio->bi_vcnt max:"
" %d i: %d bio: %p, allocating another"
" bio\n", bio->bi_vcnt, i, bio);
+
+ rc = blk_rq_append_bio(req, bio);
+ if (rc) {
+ pr_err("pSCSI: failed to append bio\n");
+ goto fail;
+ }
+
/*
* Clear the pointer so that another bio will
- * be allocated with pscsi_get_bio() above, the
- * current bio has already been set *tbio and
- * bio->bi_next.
+ * be allocated with pscsi_get_bio() above.
*/
bio = NULL;
}
@@ -968,13 +963,16 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
}
}
+ if (bio) {
+ rc = blk_rq_append_bio(req, bio);
+ if (rc) {
+ pr_err("pSCSI: failed to append bio\n");
+ goto fail;
+ }
+ }
+
return 0;
fail:
- while (*hbio) {
- bio = *hbio;
- *hbio = (*hbio)->bi_next;
- bio_endio(bio);
- }
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
@@ -992,11 +990,9 @@ pscsi_execute_cmd(struct se_cmd *cmd)
{
struct scatterlist *sgl = cmd->t_data_sg;
u32 sgl_nents = cmd->t_data_nents;
- enum dma_data_direction data_direction = cmd->data_direction;
struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
struct pscsi_plugin_task *pt;
struct request *req;
- struct bio *hbio;
sense_reason_t ret;
/*
@@ -1012,31 +1008,21 @@ pscsi_execute_cmd(struct se_cmd *cmd)
memcpy(pt->pscsi_cdb, cmd->t_task_cdb,
scsi_command_size(cmd->t_task_cdb));
- if (!sgl) {
- req = blk_get_request(pdv->pdv_sd->request_queue,
- (data_direction == DMA_TO_DEVICE),
- GFP_KERNEL);
- if (IS_ERR(req)) {
- pr_err("PSCSI: blk_get_request() failed\n");
- ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- goto fail;
- }
+ req = blk_get_request(pdv->pdv_sd->request_queue,
+ (cmd->data_direction == DMA_TO_DEVICE),
+ GFP_KERNEL);
+ if (IS_ERR(req)) {
+ pr_err("PSCSI: blk_get_request() failed\n");
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto fail;
+ }
- blk_rq_set_block_pc(req);
- } else {
- BUG_ON(!cmd->data_length);
+ blk_rq_set_block_pc(req);
- ret = pscsi_map_sg(cmd, sgl, sgl_nents, data_direction, &hbio);
+ if (sgl) {
+ ret = pscsi_map_sg(cmd, sgl, sgl_nents, req);
if (ret)
- goto fail;
-
- req = blk_make_request(pdv->pdv_sd->request_queue, hbio,
- GFP_KERNEL);
- if (IS_ERR(req)) {
- pr_err("pSCSI: blk_make_request() failed\n");
- ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- goto fail_free_bio;
- }
+ goto fail_put_request;
}
req->end_io = pscsi_req_done;
@@ -1057,13 +1043,8 @@ pscsi_execute_cmd(struct se_cmd *cmd)
return 0;
-fail_free_bio:
- while (hbio) {
- struct bio *bio = hbio;
- hbio = hbio->bi_next;
- bio_endio(bio);
- }
- ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+fail_put_request:
+ blk_put_request(req);
fail:
kfree(pt);
return ret;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 42c2a44b8..6094a6bed 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -754,7 +754,15 @@ EXPORT_SYMBOL(target_complete_cmd);
void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
{
- if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) {
+ if (scsi_status != SAM_STAT_GOOD) {
+ return;
+ }
+
+ /*
+ * Calculate new residual count based upon length of SCSI data
+ * transferred.
+ */
+ if (length < cmd->data_length) {
if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
cmd->residual_count += cmd->data_length - length;
} else {
@@ -763,6 +771,12 @@ void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int len
}
cmd->data_length = length;
+ } else if (length > cmd->data_length) {
+ cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
+ cmd->residual_count = length - cmd->data_length;
+ } else {
+ cmd->se_cmd_flags &= ~(SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT);
+ cmd->residual_count = 0;
}
target_complete_cmd(cmd, scsi_status);
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index f5186a744..6ffbb603d 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -91,6 +91,7 @@ static void ft_tport_delete(struct ft_tport *tport)
ft_sess_delete_all(tport);
lport = tport->lport;
+ lport->service_params &= ~FCP_SPPF_TARG_FCN;
BUG_ON(tport != lport->prov[FC_TYPE_FCP]);
RCU_INIT_POINTER(lport->prov[FC_TYPE_FCP], NULL);
@@ -110,6 +111,7 @@ void ft_lport_add(struct fc_lport *lport, void *arg)
{
mutex_lock(&ft_lport_lock);
ft_tport_get(lport);
+ lport->service_params |= FCP_SPPF_TARG_FCN;
mutex_unlock(&ft_lport_lock);
}
diff --git a/drivers/thermal/clock_cooling.c b/drivers/thermal/clock_cooling.c
index 1b4ff0f4c..ed5dd0e88 100644
--- a/drivers/thermal/clock_cooling.c
+++ b/drivers/thermal/clock_cooling.c
@@ -426,6 +426,7 @@ clock_cooling_register(struct device *dev, const char *clock_name)
if (!ccdev)
return ERR_PTR(-ENOMEM);
+ mutex_init(&ccdev->lock);
ccdev->dev = dev;
ccdev->clk = devm_clk_get(dev, clock_name);
if (IS_ERR(ccdev->clk))
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 5b4b47ed9..a32b41783 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -740,12 +740,22 @@ static int cpufreq_power2state(struct thermal_cooling_device *cdev,
}
/* Bind cpufreq callbacks to thermal cooling device ops */
+
static struct thermal_cooling_device_ops cpufreq_cooling_ops = {
.get_max_state = cpufreq_get_max_state,
.get_cur_state = cpufreq_get_cur_state,
.set_cur_state = cpufreq_set_cur_state,
};
+static struct thermal_cooling_device_ops cpufreq_power_cooling_ops = {
+ .get_max_state = cpufreq_get_max_state,
+ .get_cur_state = cpufreq_get_cur_state,
+ .set_cur_state = cpufreq_set_cur_state,
+ .get_requested_power = cpufreq_get_requested_power,
+ .state2power = cpufreq_state2power,
+ .power2state = cpufreq_power2state,
+};
+
/* Notifier for cpufreq policy change */
static struct notifier_block thermal_cpufreq_notifier_block = {
.notifier_call = cpufreq_thermal_notifier,
@@ -787,22 +797,35 @@ __cpufreq_cooling_register(struct device_node *np,
const struct cpumask *clip_cpus, u32 capacitance,
get_static_t plat_static_func)
{
+ struct cpufreq_policy *policy;
struct thermal_cooling_device *cool_dev;
struct cpufreq_cooling_device *cpufreq_dev;
char dev_name[THERMAL_NAME_LENGTH];
struct cpufreq_frequency_table *pos, *table;
+ struct cpumask temp_mask;
unsigned int freq, i, num_cpus;
int ret;
+ struct thermal_cooling_device_ops *cooling_ops;
+
+ cpumask_and(&temp_mask, clip_cpus, cpu_online_mask);
+ policy = cpufreq_cpu_get(cpumask_first(&temp_mask));
+ if (!policy) {
+ pr_debug("%s: CPUFreq policy not found\n", __func__);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
- table = cpufreq_frequency_get_table(cpumask_first(clip_cpus));
+ table = policy->freq_table;
if (!table) {
pr_debug("%s: CPUFreq table not found\n", __func__);
- return ERR_PTR(-EPROBE_DEFER);
+ cool_dev = ERR_PTR(-ENODEV);
+ goto put_policy;
}
cpufreq_dev = kzalloc(sizeof(*cpufreq_dev), GFP_KERNEL);
- if (!cpufreq_dev)
- return ERR_PTR(-ENOMEM);
+ if (!cpufreq_dev) {
+ cool_dev = ERR_PTR(-ENOMEM);
+ goto put_policy;
+ }
num_cpus = cpumask_weight(clip_cpus);
cpufreq_dev->time_in_idle = kcalloc(num_cpus,
@@ -838,10 +861,6 @@ __cpufreq_cooling_register(struct device_node *np,
cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus);
if (capacitance) {
- cpufreq_cooling_ops.get_requested_power =
- cpufreq_get_requested_power;
- cpufreq_cooling_ops.state2power = cpufreq_state2power;
- cpufreq_cooling_ops.power2state = cpufreq_power2state;
cpufreq_dev->plat_get_static_power = plat_static_func;
ret = build_dyn_power_table(cpufreq_dev, capacitance);
@@ -849,6 +868,10 @@ __cpufreq_cooling_register(struct device_node *np,
cool_dev = ERR_PTR(ret);
goto free_table;
}
+
+ cooling_ops = &cpufreq_power_cooling_ops;
+ } else {
+ cooling_ops = &cpufreq_cooling_ops;
}
ret = get_idr(&cpufreq_idr, &cpufreq_dev->id);
@@ -873,7 +896,7 @@ __cpufreq_cooling_register(struct device_node *np,
cpufreq_dev->id);
cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
- &cpufreq_cooling_ops);
+ cooling_ops);
if (IS_ERR(cool_dev))
goto remove_idr;
@@ -892,7 +915,7 @@ __cpufreq_cooling_register(struct device_node *np,
CPUFREQ_POLICY_NOTIFIER);
mutex_unlock(&cooling_cpufreq_lock);
- return cool_dev;
+ goto put_policy;
remove_idr:
release_idr(&cpufreq_idr, cpufreq_dev->id);
@@ -906,6 +929,8 @@ free_time_in_idle:
kfree(cpufreq_dev->time_in_idle);
free_cdev:
kfree(cpufreq_dev);
+put_policy:
+ cpufreq_cpu_put(policy);
return cool_dev;
}
diff --git a/drivers/thermal/fair_share.c b/drivers/thermal/fair_share.c
index 34fe36504..68bd1b569 100644
--- a/drivers/thermal/fair_share.c
+++ b/drivers/thermal/fair_share.c
@@ -116,7 +116,9 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip)
instance->target = get_target_state(tz, cdev, percentage,
cur_trip_level);
+ mutex_lock(&instance->cdev->lock);
instance->cdev->updated = false;
+ mutex_unlock(&instance->cdev->lock);
thermal_cdev_update(cdev);
}
return 0;
diff --git a/drivers/thermal/gov_bang_bang.c b/drivers/thermal/gov_bang_bang.c
index fc52016d4..bb118a152 100644
--- a/drivers/thermal/gov_bang_bang.c
+++ b/drivers/thermal/gov_bang_bang.c
@@ -71,7 +71,9 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
dev_dbg(&instance->cdev->device, "target=%d\n",
(int)instance->target);
+ mutex_lock(&instance->cdev->lock);
instance->cdev->updated = false; /* cdev needs update */
+ mutex_unlock(&instance->cdev->lock);
}
mutex_unlock(&tz->lock);
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index c5547bd71..e473548b5 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -471,8 +471,6 @@ MODULE_DEVICE_TABLE(of, of_imx_thermal_match);
static int imx_thermal_probe(struct platform_device *pdev)
{
- const struct of_device_id *of_id =
- of_match_device(of_imx_thermal_match, &pdev->dev);
struct imx_thermal_data *data;
struct regmap *map;
int measure_freq;
@@ -490,7 +488,7 @@ static int imx_thermal_probe(struct platform_device *pdev)
}
data->tempmon = map;
- data->socdata = of_id->data;
+ data->socdata = of_device_get_match_data(&pdev->dev);
/* make sure the IRQ flag is clear before enabling irq on i.MX6SX */
if (data->socdata->version == TEMPMON_IMX6SX) {
diff --git a/drivers/thermal/int340x_thermal/int3406_thermal.c b/drivers/thermal/int340x_thermal/int3406_thermal.c
index a578cd257..1891f34ab 100644
--- a/drivers/thermal/int340x_thermal/int3406_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3406_thermal.c
@@ -225,7 +225,6 @@ static struct platform_driver int3406_thermal_driver = {
.remove = int3406_thermal_remove,
.driver = {
.name = "int3406 thermal",
- .owner = THIS_MODULE,
.acpi_match_table = int3406_thermal_match,
},
};
diff --git a/drivers/thermal/intel_pch_thermal.c b/drivers/thermal/intel_pch_thermal.c
index 6a6ec1c95..9b4815e81 100644
--- a/drivers/thermal/intel_pch_thermal.c
+++ b/drivers/thermal/intel_pch_thermal.c
@@ -21,6 +21,7 @@
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/thermal.h>
+#include <linux/pm.h>
/* Intel PCH thermal Device IDs */
#define PCH_THERMAL_DID_WPT 0x9CA4 /* Wildcat Point */
@@ -65,6 +66,7 @@ struct pch_thermal_device {
unsigned long crt_temp;
int hot_trip_id;
unsigned long hot_temp;
+ bool bios_enabled;
};
static int pch_wpt_init(struct pch_thermal_device *ptd, int *nr_trips)
@@ -75,8 +77,10 @@ static int pch_wpt_init(struct pch_thermal_device *ptd, int *nr_trips)
*nr_trips = 0;
/* Check if BIOS has already enabled thermal sensor */
- if (WPT_TSS_TSDSS & readb(ptd->hw_base + WPT_TSS))
+ if (WPT_TSS_TSDSS & readb(ptd->hw_base + WPT_TSS)) {
+ ptd->bios_enabled = true;
goto read_trips;
+ }
tsel = readb(ptd->hw_base + WPT_TSEL);
/*
@@ -130,9 +134,39 @@ static int pch_wpt_get_temp(struct pch_thermal_device *ptd, int *temp)
return 0;
}
+static int pch_wpt_suspend(struct pch_thermal_device *ptd)
+{
+ u8 tsel;
+
+ if (ptd->bios_enabled)
+ return 0;
+
+ tsel = readb(ptd->hw_base + WPT_TSEL);
+
+ writeb(tsel & 0xFE, ptd->hw_base + WPT_TSEL);
+
+ return 0;
+}
+
+static int pch_wpt_resume(struct pch_thermal_device *ptd)
+{
+ u8 tsel;
+
+ if (ptd->bios_enabled)
+ return 0;
+
+ tsel = readb(ptd->hw_base + WPT_TSEL);
+
+ writeb(tsel | WPT_TSEL_ETS, ptd->hw_base + WPT_TSEL);
+
+ return 0;
+}
+
struct pch_dev_ops {
int (*hw_init)(struct pch_thermal_device *ptd, int *nr_trips);
int (*get_temp)(struct pch_thermal_device *ptd, int *temp);
+ int (*suspend)(struct pch_thermal_device *ptd);
+ int (*resume)(struct pch_thermal_device *ptd);
};
@@ -140,6 +174,8 @@ struct pch_dev_ops {
static const struct pch_dev_ops pch_dev_ops_wpt = {
.hw_init = pch_wpt_init,
.get_temp = pch_wpt_get_temp,
+ .suspend = pch_wpt_suspend,
+ .resume = pch_wpt_resume,
};
static int pch_thermal_get_temp(struct thermal_zone_device *tzd, int *temp)
@@ -269,6 +305,22 @@ static void intel_pch_thermal_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
+static int intel_pch_thermal_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct pch_thermal_device *ptd = pci_get_drvdata(pdev);
+
+ return ptd->ops->suspend(ptd);
+}
+
+static int intel_pch_thermal_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct pch_thermal_device *ptd = pci_get_drvdata(pdev);
+
+ return ptd->ops->resume(ptd);
+}
+
static struct pci_device_id intel_pch_thermal_id[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_WPT) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_SKL) },
@@ -276,11 +328,17 @@ static struct pci_device_id intel_pch_thermal_id[] = {
};
MODULE_DEVICE_TABLE(pci, intel_pch_thermal_id);
+static const struct dev_pm_ops intel_pch_pm_ops = {
+ .suspend = intel_pch_thermal_suspend,
+ .resume = intel_pch_thermal_resume,
+};
+
static struct pci_driver intel_pch_thermal_driver = {
.name = "intel_pch_thermal",
.id_table = intel_pch_thermal_id,
.probe = intel_pch_thermal_probe,
.remove = intel_pch_thermal_remove,
+ .driver.pm = &intel_pch_pm_ops,
};
module_pci_driver(intel_pch_thermal_driver);
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index 015ce2eb6..0e4dc0afc 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -388,7 +388,7 @@ static int clamp_thread(void *arg)
int sleeptime;
unsigned long target_jiffies;
unsigned int guard;
- unsigned int compensation = 0;
+ unsigned int compensated_ratio;
int interval; /* jiffies to sleep for each attempt */
unsigned int duration_jiffies = msecs_to_jiffies(duration);
unsigned int window_size_now;
@@ -409,8 +409,11 @@ static int clamp_thread(void *arg)
* c-states, thus we need to compensate the injected idle ratio
* to achieve the actual target reported by the HW.
*/
- compensation = get_compensation(target_ratio);
- interval = duration_jiffies*100/(target_ratio+compensation);
+ compensated_ratio = target_ratio +
+ get_compensation(target_ratio);
+ if (compensated_ratio <= 0)
+ compensated_ratio = 1;
+ interval = duration_jiffies * 100 / compensated_ratio;
/* align idle time */
target_jiffies = roundup(jiffies, interval);
@@ -647,8 +650,8 @@ static int powerclamp_set_cur_state(struct thermal_cooling_device *cdev,
goto exit_set;
} else if (set_target_ratio > 0 && new_target_ratio == 0) {
pr_info("Stop forced idle injection\n");
- set_target_ratio = 0;
end_power_clamp();
+ set_target_ratio = 0;
} else /* adjust currently running */ {
set_target_ratio = new_target_ratio;
/* make new set_target_ratio visible to other cpus */
diff --git a/drivers/thermal/intel_soc_dts_thermal.c b/drivers/thermal/intel_soc_dts_thermal.c
index 4ebb31a35..b2bbaa1c6 100644
--- a/drivers/thermal/intel_soc_dts_thermal.c
+++ b/drivers/thermal/intel_soc_dts_thermal.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
#include "intel_soc_dts_iosf.h"
#define CRITICAL_OFFSET_FROM_TJ_MAX 5000
@@ -42,7 +43,8 @@ static irqreturn_t soc_irq_thread_fn(int irq, void *dev_data)
}
static const struct x86_cpu_id soc_thermal_ids[] = {
- { X86_VENDOR_INTEL, X86_FAMILY_ANY, 0x37, 0, BYT_SOC_DTS_APIC_IRQ},
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1, 0,
+ BYT_SOC_DTS_APIC_IRQ},
{}
};
MODULE_DEVICE_TABLE(x86cpu, soc_thermal_ids);
diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c
index 2f1a863a8..b4d3116cf 100644
--- a/drivers/thermal/power_allocator.c
+++ b/drivers/thermal/power_allocator.c
@@ -529,7 +529,9 @@ static void allow_maximum_power(struct thermal_zone_device *tz)
continue;
instance->target = 0;
+ mutex_lock(&instance->cdev->lock);
instance->cdev->updated = false;
+ mutex_unlock(&instance->cdev->lock);
thermal_cdev_update(instance->cdev);
}
}
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 71a339271..5f817923f 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -504,6 +504,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
if (IS_ERR(priv->zone)) {
dev_err(dev, "can't register thermal zone\n");
ret = PTR_ERR(priv->zone);
+ priv->zone = NULL;
goto error_unregister;
}
diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
index ea9366ad3..bcef2e7c4 100644
--- a/drivers/thermal/step_wise.c
+++ b/drivers/thermal/step_wise.c
@@ -175,7 +175,9 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
update_passive_instance(tz, trip_type, -1);
instance->initialized = true;
+ mutex_lock(&instance->cdev->lock);
instance->cdev->updated = false; /* cdev needs update */
+ mutex_unlock(&instance->cdev->lock);
}
mutex_unlock(&tz->lock);
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 5133cd1e1..e2fc6161d 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -1093,7 +1093,9 @@ int power_actor_set_power(struct thermal_cooling_device *cdev,
return ret;
instance->target = state;
+ mutex_lock(&cdev->lock);
cdev->updated = false;
+ mutex_unlock(&cdev->lock);
thermal_cdev_update(cdev);
return 0;
@@ -1623,11 +1625,13 @@ void thermal_cdev_update(struct thermal_cooling_device *cdev)
struct thermal_instance *instance;
unsigned long target = 0;
+ mutex_lock(&cdev->lock);
/* cooling device is updated*/
- if (cdev->updated)
+ if (cdev->updated) {
+ mutex_unlock(&cdev->lock);
return;
+ }
- mutex_lock(&cdev->lock);
/* Make sure cdev enters the deepest cooling state */
list_for_each_entry(instance, &cdev->thermal_instances, cdev_node) {
dev_dbg(&cdev->device, "zone%d->target=%lu\n",
@@ -1637,9 +1641,9 @@ void thermal_cdev_update(struct thermal_cooling_device *cdev)
if (instance->target > target)
target = instance->target;
}
- mutex_unlock(&cdev->lock);
cdev->ops->set_cur_state(cdev, target);
cdev->updated = true;
+ mutex_unlock(&cdev->lock);
trace_cdev_update(cdev, target);
dev_dbg(&cdev->device, "set to state %lu\n", target);
}
diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
index 06fd2ed9e..c41c77429 100644
--- a/drivers/thermal/thermal_hwmon.c
+++ b/drivers/thermal/thermal_hwmon.c
@@ -232,6 +232,7 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
return result;
}
+EXPORT_SYMBOL_GPL(thermal_add_hwmon_sysfs);
void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
{
@@ -270,3 +271,4 @@ void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
hwmon_device_unregister(hwmon->device);
kfree(hwmon);
}
+EXPORT_SYMBOL_GPL(thermal_remove_hwmon_sysfs);
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 9c15344b6..a8c20413d 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -651,6 +651,12 @@ static struct pci_device_id nhi_ids[] = {
{
.class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
.vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI,
+ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_INTEL,
.device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI,
.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
},
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 1e116f53d..9840fdecb 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -372,7 +372,9 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, u64 route)
if (sw->config.device_id != PCI_DEVICE_ID_INTEL_LIGHT_RIDGE &&
sw->config.device_id != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C &&
- sw->config.device_id != PCI_DEVICE_ID_INTEL_PORT_RIDGE)
+ sw->config.device_id != PCI_DEVICE_ID_INTEL_PORT_RIDGE &&
+ sw->config.device_id != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE &&
+ sw->config.device_id != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE)
tb_sw_warn(sw, "unsupported switch device id %#x\n",
sw->config.device_id);
diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
index c67a0baab..c3e1604fe 100644
--- a/drivers/tty/cyclades.c
+++ b/drivers/tty/cyclades.c
@@ -93,8 +93,6 @@ static void cy_send_xchar(struct tty_struct *tty, char ch);
#define SERIAL_XMIT_SIZE (min(PAGE_SIZE, 4096))
#endif
-#define STD_COM_FLAGS (0)
-
/* firmware stuff */
#define ZL_MAX_BLOCKS 16
#define DRIVER_VERSION 0x02010203
@@ -2288,7 +2286,6 @@ static int cy_get_serial_info(struct cyclades_port *info,
.closing_wait = info->port.closing_wait,
.baud_base = info->baud,
.custom_divisor = info->custom_divisor,
- .hub6 = 0, /*!!! */
};
return copy_to_user(retinfo, &tmp, sizeof(*retinfo)) ? -EFAULT : 0;
}
@@ -3084,7 +3081,6 @@ static int cy_init_card(struct cyclades_card *cinfo)
info->port.closing_wait = CLOSING_WAIT_DELAY;
info->port.close_delay = 5 * HZ / 10;
- info->port.flags = STD_COM_FLAGS;
init_completion(&info->shutdown_wait);
if (cy_is_Z(cinfo)) {
diff --git a/drivers/tty/hvc/hvc_console.h b/drivers/tty/hvc/hvc_console.h
index 913101980..798c48d0d 100644
--- a/drivers/tty/hvc/hvc_console.h
+++ b/drivers/tty/hvc/hvc_console.h
@@ -60,6 +60,7 @@ struct hvc_struct {
struct winsize ws;
struct work_struct tty_resize;
struct list_head next;
+ unsigned long flags;
};
/* implemented by a low level driver */
diff --git a/drivers/tty/hvc/hvc_irq.c b/drivers/tty/hvc/hvc_irq.c
index c9adb0559..bc7a96874 100644
--- a/drivers/tty/hvc/hvc_irq.c
+++ b/drivers/tty/hvc/hvc_irq.c
@@ -14,6 +14,11 @@ static irqreturn_t hvc_handle_interrupt(int irq, void *dev_instance)
/* if hvc_poll request a repoll, then kick the hvcd thread */
if (hvc_poll(dev_instance))
hvc_kick();
+
+ /*
+ * We're safe to always return IRQ_HANDLED as the hvcd thread will
+ * iterate through each hvc_struct.
+ */
return IRQ_HANDLED;
}
@@ -28,8 +33,8 @@ int notifier_add_irq(struct hvc_struct *hp, int irq)
hp->irq_requested = 0;
return 0;
}
- rc = request_irq(irq, hvc_handle_interrupt, 0,
- "hvc_console", hp);
+ rc = request_irq(irq, hvc_handle_interrupt, hp->flags,
+ "hvc_console", hp);
if (!rc)
hp->irq_requested = 1;
return rc;
diff --git a/drivers/tty/hvc/hvc_opal.c b/drivers/tty/hvc/hvc_opal.c
index 47b54c6ae..510799311 100644
--- a/drivers/tty/hvc/hvc_opal.c
+++ b/drivers/tty/hvc/hvc_opal.c
@@ -214,7 +214,13 @@ static int hvc_opal_probe(struct platform_device *dev)
dev->dev.of_node->full_name,
boot ? " (boot console)" : "");
- irq = opal_event_request(ilog2(OPAL_EVENT_CONSOLE_INPUT));
+ irq = irq_of_parse_and_map(dev->dev.of_node, 0);
+ if (!irq) {
+ pr_info("hvc%d: No interrupts property, using OPAL event\n",
+ termno);
+ irq = opal_event_request(ilog2(OPAL_EVENT_CONSOLE_INPUT));
+ }
+
if (!irq) {
pr_err("hvc_opal: Unable to map interrupt for device %s\n",
dev->dev.of_node->full_name);
@@ -224,6 +230,9 @@ static int hvc_opal_probe(struct platform_device *dev)
hp = hvc_alloc(termno, irq, ops, MAX_VIO_PUT_CHARS);
if (IS_ERR(hp))
return PTR_ERR(hp);
+
+ /* hvc consoles on powernv may need to share a single irq */
+ hp->flags = IRQF_SHARED;
dev_set_drvdata(&dev->dev, hp);
return 0;
diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
index 345cebb07..2685d59d2 100644
--- a/drivers/tty/ipwireless/tty.c
+++ b/drivers/tty/ipwireless/tty.c
@@ -252,20 +252,11 @@ static int ipwireless_get_serial_info(struct ipw_tty *tty,
{
struct serial_struct tmp;
- if (!retinfo)
- return (-EFAULT);
-
memset(&tmp, 0, sizeof(tmp));
tmp.type = PORT_UNKNOWN;
tmp.line = tty->index;
- tmp.port = 0;
- tmp.irq = 0;
- tmp.flags = 0;
tmp.baud_base = 115200;
- tmp.close_delay = 0;
- tmp.closing_wait = 0;
- tmp.custom_divisor = 0;
- tmp.hub6 = 0;
+
if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
return -EFAULT;
diff --git a/drivers/tty/metag_da.c b/drivers/tty/metag_da.c
index 932526228..25ccef2fe 100644
--- a/drivers/tty/metag_da.c
+++ b/drivers/tty/metag_da.c
@@ -323,12 +323,12 @@ static void dashtty_timer(unsigned long ignored)
if (channel >= 0)
fetch_data(channel);
- mod_timer_pinned(&poll_timer, jiffies + DA_TTY_POLL);
+ mod_timer(&poll_timer, jiffies + DA_TTY_POLL);
}
static void add_poll_timer(struct timer_list *poll_timer)
{
- setup_timer(poll_timer, dashtty_timer, 0);
+ setup_pinned_timer(poll_timer, dashtty_timer, 0);
poll_timer->expires = jiffies + DA_TTY_POLL;
/*
diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c
index a119176a1..234123b0c 100644
--- a/drivers/tty/mips_ejtag_fdc.c
+++ b/drivers/tty/mips_ejtag_fdc.c
@@ -689,7 +689,7 @@ static void mips_ejtag_fdc_tty_timer(unsigned long opaque)
mips_ejtag_fdc_handle(priv);
if (!priv->removing)
- mod_timer_pinned(&priv->poll_timer, jiffies + FDC_TTY_POLL);
+ mod_timer(&priv->poll_timer, jiffies + FDC_TTY_POLL);
}
/* TTY Port operations */
@@ -1002,7 +1002,7 @@ static int mips_ejtag_fdc_tty_probe(struct mips_cdmm_device *dev)
raw_spin_unlock_irq(&priv->lock);
} else {
/* If we didn't get an usable IRQ, poll instead */
- setup_timer(&priv->poll_timer, mips_ejtag_fdc_tty_timer,
+ setup_pinned_timer(&priv->poll_timer, mips_ejtag_fdc_tty_timer,
(unsigned long)priv);
priv->poll_timer.expires = jiffies + FDC_TTY_POLL;
/*
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
index 98d2bd167..69294ae15 100644
--- a/drivers/tty/mxser.c
+++ b/drivers/tty/mxser.c
@@ -1219,7 +1219,6 @@ static int mxser_get_serial_info(struct tty_struct *tty,
.close_delay = info->port.close_delay,
.closing_wait = info->port.closing_wait,
.custom_divisor = info->custom_divisor,
- .hub6 = 0
};
if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
return -EFAULT;
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
index 215a99237..1a16feac9 100644
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -53,11 +53,9 @@ struct old_serial_port {
unsigned int port;
unsigned int irq;
upf_t flags;
- unsigned char hub6;
unsigned char io_type;
unsigned char __iomem *iomem_base;
unsigned short iomem_reg_shift;
- unsigned long irqflags;
};
struct serial8250_config {
@@ -131,6 +129,16 @@ void serial8250_rpm_put(struct uart_8250_port *p);
int serial8250_em485_init(struct uart_8250_port *p);
void serial8250_em485_destroy(struct uart_8250_port *p);
+static inline void serial8250_out_MCR(struct uart_8250_port *up, int value)
+{
+ serial_out(up, UART_MCR, value);
+}
+
+static inline int serial8250_in_MCR(struct uart_8250_port *up)
+{
+ return serial_in(up, UART_MCR);
+}
+
#if defined(__alpha__) && !defined(CONFIG_PCI)
/*
* Digital did something really horribly wrong with the OUT1 and OUT2
@@ -237,9 +245,3 @@ static inline int serial_index(struct uart_port *port)
{
return port->minor - 64;
}
-
-#if 0
-#define DEBUG_INTR(fmt...) printk(fmt)
-#else
-#define DEBUG_INTR(fmt...) do { } while (0)
-#endif
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 0fbd7c033..dcf43f664 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -114,7 +114,7 @@ static irqreturn_t serial8250_interrupt(int irq, void *dev_id)
struct list_head *l, *end = NULL;
int pass_counter = 0, handled = 0;
- DEBUG_INTR("serial8250_interrupt(%d)...", irq);
+ pr_debug("%s(%d): start\n", __func__, irq);
spin_lock(&i->lock);
@@ -144,7 +144,7 @@ static irqreturn_t serial8250_interrupt(int irq, void *dev_id)
spin_unlock(&i->lock);
- DEBUG_INTR("end.\n");
+ pr_debug("%s(%d): end\n", __func__, irq);
return IRQ_RETVAL(handled);
}
@@ -546,10 +546,10 @@ static void __init serial8250_isa_init_ports(void)
port->iobase = old_serial_port[i].port;
port->irq = irq_canonicalize(old_serial_port[i].irq);
- port->irqflags = old_serial_port[i].irqflags;
+ port->irqflags = 0;
port->uartclk = old_serial_port[i].baud_base * 16;
port->flags = old_serial_port[i].flags;
- port->hub6 = old_serial_port[i].hub6;
+ port->hub6 = 0;
port->membase = old_serial_port[i].iomem_base;
port->iotype = old_serial_port[i].io_type;
port->regshift = old_serial_port[i].iomem_reg_shift;
@@ -675,7 +675,7 @@ static struct console univ8250_console = {
.device = uart_console_device,
.setup = univ8250_console_setup,
.match = univ8250_console_match,
- .flags = CON_PRINTBUFFER | CON_ANYTIME,
+ .flags = CON_PRINTBUFFER | CON_ANYTIME | CON_CONSDEV,
.index = -1,
.data = &serial8250_reg,
};
diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
index 7f33d1c8d..3590d0120 100644
--- a/drivers/tty/serial/8250/8250_dma.c
+++ b/drivers/tty/serial/8250/8250_dma.c
@@ -145,6 +145,7 @@ void serial8250_rx_dma_flush(struct uart_8250_port *p)
dmaengine_terminate_all(dma->rxchan);
}
}
+EXPORT_SYMBOL_GPL(serial8250_rx_dma_flush);
int serial8250_request_dma(struct uart_8250_port *p)
{
diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c
index 8d08ff5c4..85a12f032 100644
--- a/drivers/tty/serial/8250/8250_early.c
+++ b/drivers/tty/serial/8250/8250_early.c
@@ -150,6 +150,7 @@ EARLYCON_DECLARE(uart, early_serial8250_setup);
OF_EARLYCON_DECLARE(ns16550, "ns16550", early_serial8250_setup);
OF_EARLYCON_DECLARE(ns16550a, "ns16550a", early_serial8250_setup);
OF_EARLYCON_DECLARE(uart, "nvidia,tegra20-uart", early_serial8250_setup);
+OF_EARLYCON_DECLARE(uart, "snps,dw-apb-uart", early_serial8250_setup);
#ifdef CONFIG_SERIAL_8250_OMAP
diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c
index 870981dd9..0facc789f 100644
--- a/drivers/tty/serial/8250/8250_fintek.c
+++ b/drivers/tty/serial/8250/8250_fintek.c
@@ -13,6 +13,7 @@
#include <linux/pnp.h>
#include <linux/kernel.h>
#include <linux/serial_core.h>
+#include <linux/irq.h>
#include "8250.h"
#define ADDR_PORT 0
@@ -30,6 +31,12 @@
#define IO_ADDR2 0x60
#define LDN 0x7
+#define FINTEK_IRQ_MODE 0x70
+#define IRQ_SHARE BIT(4)
+#define IRQ_MODE_MASK (BIT(6) | BIT(5))
+#define IRQ_LEVEL_LOW 0
+#define IRQ_EDGE_HIGH BIT(5)
+
#define RS485 0xF0
#define RTS_INVERT BIT(5)
#define RS485_URA BIT(4)
@@ -176,10 +183,37 @@ static int find_base_port(struct fintek_8250 *pdata, u16 io_address)
return -ENODEV;
}
+static int fintek_8250_set_irq_mode(struct fintek_8250 *pdata, bool level_mode)
+{
+ int status;
+ u8 tmp;
+
+ status = fintek_8250_enter_key(pdata->base_port, pdata->key);
+ if (status)
+ return status;
+
+ outb(LDN, pdata->base_port + ADDR_PORT);
+ outb(pdata->index, pdata->base_port + DATA_PORT);
+
+ outb(FINTEK_IRQ_MODE, pdata->base_port + ADDR_PORT);
+ tmp = inb(pdata->base_port + DATA_PORT);
+
+ tmp &= ~IRQ_MODE_MASK;
+ tmp |= IRQ_SHARE;
+ if (!level_mode)
+ tmp |= IRQ_EDGE_HIGH;
+
+ outb(tmp, pdata->base_port + DATA_PORT);
+ fintek_8250_exit_key(pdata->base_port);
+ return 0;
+}
+
int fintek_8250_probe(struct uart_8250_port *uart)
{
struct fintek_8250 *pdata;
struct fintek_8250 probe_data;
+ struct irq_data *irq_data = irq_get_irq_data(uart->port.irq);
+ bool level_mode = irqd_is_level_type(irq_data);
if (find_base_port(&probe_data, uart->port.iobase))
return -ENODEV;
@@ -192,5 +226,5 @@ int fintek_8250_probe(struct uart_8250_port *uart)
uart->port.rs485_config = fintek_8250_rs485_config;
uart->port.private_data = pdata;
- return 0;
+ return fintek_8250_set_irq_mode(pdata, level_mode);
}
diff --git a/drivers/tty/serial/8250/8250_ingenic.c b/drivers/tty/serial/8250/8250_ingenic.c
index b0677f610..4d9dc10e2 100644
--- a/drivers/tty/serial/8250/8250_ingenic.c
+++ b/drivers/tty/serial/8250/8250_ingenic.c
@@ -48,7 +48,6 @@ static const struct of_device_id of_match[];
#define UART_MCR_MDCE BIT(7)
#define UART_MCR_FCM BIT(6)
-#if defined(CONFIG_SERIAL_EARLYCON) && !defined(MODULE)
static struct earlycon_device *early_device;
static uint8_t __init early_in(struct uart_port *port, int offset)
@@ -141,7 +140,6 @@ OF_EARLYCON_DECLARE(jz4775_uart, "ingenic,jz4775-uart",
EARLYCON_DECLARE(jz4780_uart, ingenic_early_console_setup);
OF_EARLYCON_DECLARE(jz4780_uart, "ingenic,jz4780-uart",
ingenic_early_console_setup);
-#endif /* CONFIG_SERIAL_EARLYCON */
static void ingenic_uart_serial_out(struct uart_port *p, int offset, int value)
{
diff --git a/drivers/tty/serial/8250/8250_mid.c b/drivers/tty/serial/8250/8250_mid.c
index 0f50a3f5e..20c5db2f4 100644
--- a/drivers/tty/serial/8250/8250_mid.c
+++ b/drivers/tty/serial/8250/8250_mid.c
@@ -96,13 +96,27 @@ static int tng_setup(struct mid8250 *mid, struct uart_port *p)
static int dnv_handle_irq(struct uart_port *p)
{
struct mid8250 *mid = p->private_data;
+ struct uart_8250_port *up = up_to_u8250p(p);
unsigned int fisr = serial_port_in(p, INTEL_MID_UART_DNV_FISR);
+ u32 status;
int ret = IRQ_NONE;
-
- if (fisr & BIT(2))
- ret |= hsu_dma_irq(&mid->dma_chip, 1);
- if (fisr & BIT(1))
- ret |= hsu_dma_irq(&mid->dma_chip, 0);
+ int err;
+
+ if (fisr & BIT(2)) {
+ err = hsu_dma_get_status(&mid->dma_chip, 1, &status);
+ if (err > 0) {
+ serial8250_rx_dma_flush(up);
+ ret |= IRQ_HANDLED;
+ } else if (err == 0)
+ ret |= hsu_dma_do_irq(&mid->dma_chip, 1, status);
+ }
+ if (fisr & BIT(1)) {
+ err = hsu_dma_get_status(&mid->dma_chip, 0, &status);
+ if (err > 0)
+ ret |= IRQ_HANDLED;
+ else if (err == 0)
+ ret |= hsu_dma_do_irq(&mid->dma_chip, 0, status);
+ }
if (fisr & BIT(0))
ret |= serial8250_handle_irq(p, serial_port_in(p, UART_IIR));
return ret;
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
index 3489fbcb7..3611ec9bb 100644
--- a/drivers/tty/serial/8250/8250_mtk.c
+++ b/drivers/tty/serial/8250/8250_mtk.c
@@ -301,7 +301,7 @@ static struct platform_driver mtk8250_platform_driver = {
};
module_platform_driver(mtk8250_platform_driver);
-#if defined(CONFIG_SERIAL_8250_CONSOLE) && !defined(MODULE)
+#ifdef CONFIG_SERIAL_8250_CONSOLE
static int __init early_mtk8250_setup(struct earlycon_device *device,
const char *options)
{
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index 2c44c792d..61ad6c3b2 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -280,7 +280,7 @@ static void omap8250_restore_regs(struct uart_8250_port *up)
serial_out(up, UART_EFR, UART_EFR_ECB);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
- serial_out(up, UART_MCR, UART_MCR_TCRTLR);
+ serial8250_out_MCR(up, UART_MCR_TCRTLR);
serial_out(up, UART_FCR, up->fcr);
omap8250_update_scr(up, priv);
@@ -296,7 +296,7 @@ static void omap8250_restore_regs(struct uart_8250_port *up)
serial_out(up, UART_LCR, 0);
/* drop TCR + TLR access, we setup XON/XOFF later */
- serial_out(up, UART_MCR, up->mcr);
+ serial8250_out_MCR(up, up->mcr);
serial_out(up, UART_IER, up->ier);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index e67a46301..bc51b32b2 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1136,11 +1136,11 @@ static int pci_quatech_rqopr(struct uart_8250_port *port)
static void pci_quatech_wqopr(struct uart_8250_port *port, u8 qopr)
{
unsigned long base = port->port.iobase;
- u8 LCR, val;
+ u8 LCR;
LCR = inb(base + UART_LCR);
outb(0xBF, base + UART_LCR);
- val = inb(base + UART_SCR);
+ inb(base + UART_SCR);
outb(qopr, base + UART_SCR);
outb(LCR, base + UART_LCR);
}
@@ -1865,6 +1865,16 @@ pci_wch_ch353_setup(struct serial_private *priv,
}
static int
+pci_wch_ch355_setup(struct serial_private *priv,
+ const struct pciserial_board *board,
+ struct uart_8250_port *port, int idx)
+{
+ port->port.flags |= UPF_FIXED_TYPE;
+ port->port.type = PORT_16550A;
+ return pci_default_setup(priv, board, port, idx);
+}
+
+static int
pci_wch_ch38x_setup(struct serial_private *priv,
const struct pciserial_board *board,
struct uart_8250_port *port, int idx)
@@ -1915,6 +1925,7 @@ pci_wch_ch38x_setup(struct serial_private *priv,
#define PCI_DEVICE_ID_WCH_CH353_2S1PF 0x5046
#define PCI_DEVICE_ID_WCH_CH353_1S1P 0x5053
#define PCI_DEVICE_ID_WCH_CH353_2S1P 0x7053
+#define PCI_DEVICE_ID_WCH_CH355_4S 0x7173
#define PCI_VENDOR_ID_AGESTAR 0x5372
#define PCI_DEVICE_ID_AGESTAR_9375 0x6872
#define PCI_VENDOR_ID_ASIX 0x9710
@@ -2655,6 +2666,14 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.subdevice = PCI_ANY_ID,
.setup = pci_wch_ch353_setup,
},
+ /* WCH CH355 4S card (16550 clone) */
+ {
+ .vendor = PCI_VENDOR_ID_WCH,
+ .device = PCI_DEVICE_ID_WCH_CH355_4S,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_wch_ch355_setup,
+ },
/* WCH CH382 2S card (16850 clone) */
{
.vendor = PCIE_VENDOR_ID_WCH,
@@ -3849,6 +3868,7 @@ static const struct pci_device_id blacklist[] = {
/* multi-io cards handled by parport_serial */
{ PCI_DEVICE(0x4348, 0x7053), }, /* WCH CH353 2S1P */
{ PCI_DEVICE(0x4348, 0x5053), }, /* WCH CH353 1S1P */
+ { PCI_DEVICE(0x4348, 0x7173), }, /* WCH CH355 4S */
{ PCI_DEVICE(0x1c00, 0x3250), }, /* WCH CH382 2S1P */
{ PCI_DEVICE(0x1c00, 0x3470), }, /* WCH CH384 4S */
@@ -5706,6 +5726,10 @@ static struct pci_device_id serial_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID,
0, 0, pbn_b0_bt_2_115200 },
+ { PCI_VENDOR_ID_WCH, PCI_DEVICE_ID_WCH_CH355_4S,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0, pbn_b0_bt_4_115200 },
+
{ PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH382_2S,
PCI_ANY_ID, PCI_ANY_ID,
0, 0, pbn_wch382_2 },
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index d4036038a..bdfa659b9 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -527,13 +527,13 @@ static void serial8250_clear_fifos(struct uart_8250_port *p)
static inline void serial8250_em485_rts_after_send(struct uart_8250_port *p)
{
- unsigned char mcr = serial_in(p, UART_MCR);
+ unsigned char mcr = serial8250_in_MCR(p);
if (p->port.rs485.flags & SER_RS485_RTS_AFTER_SEND)
mcr |= UART_MCR_RTS;
else
mcr &= ~UART_MCR_RTS;
- serial_out(p, UART_MCR, mcr);
+ serial8250_out_MCR(p, mcr);
}
static void serial8250_em485_handle_start_tx(unsigned long arg);
@@ -785,10 +785,10 @@ static int size_fifo(struct uart_8250_port *up)
old_lcr = serial_in(up, UART_LCR);
serial_out(up, UART_LCR, 0);
old_fcr = serial_in(up, UART_FCR);
- old_mcr = serial_in(up, UART_MCR);
+ old_mcr = serial8250_in_MCR(up);
serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
- serial_out(up, UART_MCR, UART_MCR_LOOP);
+ serial8250_out_MCR(up, UART_MCR_LOOP);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
old_dl = serial_dl_read(up);
serial_dl_write(up, 0x0001);
@@ -800,7 +800,7 @@ static int size_fifo(struct uart_8250_port *up)
(count < 256); count++)
serial_in(up, UART_RX);
serial_out(up, UART_FCR, old_fcr);
- serial_out(up, UART_MCR, old_mcr);
+ serial8250_out_MCR(up, old_mcr);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
serial_dl_write(up, old_dl);
serial_out(up, UART_LCR, old_lcr);
@@ -1040,17 +1040,17 @@ static void autoconfig_16550a(struct uart_8250_port *up)
* it's changed. If so, set baud_base in EXCR2 to 921600. -- dwmw2
*/
serial_out(up, UART_LCR, 0);
- status1 = serial_in(up, UART_MCR);
+ status1 = serial8250_in_MCR(up);
serial_out(up, UART_LCR, 0xE0);
status2 = serial_in(up, 0x02); /* EXCR1 */
if (!((status2 ^ status1) & UART_MCR_LOOP)) {
serial_out(up, UART_LCR, 0);
- serial_out(up, UART_MCR, status1 ^ UART_MCR_LOOP);
+ serial8250_out_MCR(up, status1 ^ UART_MCR_LOOP);
serial_out(up, UART_LCR, 0xE0);
status2 = serial_in(up, 0x02); /* EXCR1 */
serial_out(up, UART_LCR, 0);
- serial_out(up, UART_MCR, status1);
+ serial8250_out_MCR(up, status1);
if ((status2 ^ status1) & UART_MCR_LOOP) {
unsigned short quot;
@@ -1224,7 +1224,7 @@ static void autoconfig(struct uart_8250_port *up)
}
}
- save_mcr = serial_in(up, UART_MCR);
+ save_mcr = serial8250_in_MCR(up);
save_lcr = serial_in(up, UART_LCR);
/*
@@ -1237,9 +1237,9 @@ static void autoconfig(struct uart_8250_port *up)
* that conflicts with COM 1-4 --- we hope!
*/
if (!(port->flags & UPF_SKIP_TEST)) {
- serial_out(up, UART_MCR, UART_MCR_LOOP | 0x0A);
+ serial8250_out_MCR(up, UART_MCR_LOOP | 0x0A);
status1 = serial_in(up, UART_MSR) & 0xF0;
- serial_out(up, UART_MCR, save_mcr);
+ serial8250_out_MCR(up, save_mcr);
if (status1 != 0x90) {
spin_unlock_irqrestore(&port->lock, flags);
DEBUG_AUTOCONF("LOOP test failed (%02x) ",
@@ -1305,7 +1305,7 @@ static void autoconfig(struct uart_8250_port *up)
if (port->type == PORT_RSA)
serial_out(up, UART_RSA_FRR, 0);
#endif
- serial_out(up, UART_MCR, save_mcr);
+ serial8250_out_MCR(up, save_mcr);
serial8250_clear_fifos(up);
serial_in(up, UART_RX);
if (up->capabilities & UART_CAP_UUE)
@@ -1353,19 +1353,18 @@ static void autoconfig_irq(struct uart_8250_port *up)
/* forget possible initially masked and pending IRQ */
probe_irq_off(probe_irq_on());
- save_mcr = serial_in(up, UART_MCR);
+ save_mcr = serial8250_in_MCR(up);
save_ier = serial_in(up, UART_IER);
- serial_out(up, UART_MCR, UART_MCR_OUT1 | UART_MCR_OUT2);
+ serial8250_out_MCR(up, UART_MCR_OUT1 | UART_MCR_OUT2);
irqs = probe_irq_on();
- serial_out(up, UART_MCR, 0);
+ serial8250_out_MCR(up, 0);
udelay(10);
if (port->flags & UPF_FOURPORT) {
- serial_out(up, UART_MCR,
- UART_MCR_DTR | UART_MCR_RTS);
+ serial8250_out_MCR(up, UART_MCR_DTR | UART_MCR_RTS);
} else {
- serial_out(up, UART_MCR,
- UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2);
+ serial8250_out_MCR(up,
+ UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2);
}
serial_out(up, UART_IER, 0x0f); /* enable all intrs */
serial_in(up, UART_LSR);
@@ -1376,7 +1375,7 @@ static void autoconfig_irq(struct uart_8250_port *up)
udelay(20);
irq = probe_irq_off(irqs);
- serial_out(up, UART_MCR, save_mcr);
+ serial8250_out_MCR(up, save_mcr);
serial_out(up, UART_IER, save_ier);
if (port->flags & UPF_FOURPORT)
@@ -1549,14 +1548,14 @@ static inline void start_tx_rs485(struct uart_port *port)
del_timer(&em485->stop_tx_timer);
em485->active_timer = NULL;
- mcr = serial_in(up, UART_MCR);
+ mcr = serial8250_in_MCR(up);
if (!!(up->port.rs485.flags & SER_RS485_RTS_ON_SEND) !=
!!(mcr & UART_MCR_RTS)) {
if (up->port.rs485.flags & SER_RS485_RTS_ON_SEND)
mcr |= UART_MCR_RTS;
else
mcr &= ~UART_MCR_RTS;
- serial_out(up, UART_MCR, mcr);
+ serial8250_out_MCR(up, mcr);
if (up->port.rs485.delay_rts_before_send > 0) {
em485->active_timer = &em485->start_tx_timer;
@@ -1686,7 +1685,7 @@ static void serial8250_read_char(struct uart_8250_port *up, unsigned char lsr)
lsr &= port->read_status_mask;
if (lsr & UART_LSR_BI) {
- DEBUG_INTR("handling break....");
+ pr_debug("%s: handling break\n", __func__);
flag = TTY_BREAK;
} else if (lsr & UART_LSR_PE)
flag = TTY_PARITY;
@@ -1757,7 +1756,7 @@ void serial8250_tx_chars(struct uart_8250_port *up)
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
- DEBUG_INTR("THRE...");
+ pr_debug("%s: THRE\n", __func__);
/*
* With RPM enabled, we have to wait until the FIFO is empty before the
@@ -1823,7 +1822,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
status = serial_port_in(port, UART_LSR);
- DEBUG_INTR("status = %x...", status);
+ pr_debug("%s: status = %x\n", __func__, status);
if (status & (UART_LSR_DR | UART_LSR_BI)) {
if (!up->dma || handle_rx_dma(up, iir))
@@ -1861,7 +1860,6 @@ static int serial8250_default_handle_irq(struct uart_port *port)
*/
static int exar_handle_irq(struct uart_port *port)
{
- unsigned char int0, int1, int2, int3;
unsigned int iir = serial_port_in(port, UART_IIR);
int ret;
@@ -1869,10 +1867,10 @@ static int exar_handle_irq(struct uart_port *port)
if ((port->type == PORT_XR17V35X) ||
(port->type == PORT_XR17D15X)) {
- int0 = serial_port_in(port, 0x80);
- int1 = serial_port_in(port, 0x81);
- int2 = serial_port_in(port, 0x82);
- int3 = serial_port_in(port, 0x83);
+ serial_port_in(port, 0x80);
+ serial_port_in(port, 0x81);
+ serial_port_in(port, 0x82);
+ serial_port_in(port, 0x83);
}
return ret;
@@ -1944,7 +1942,7 @@ void serial8250_do_set_mctrl(struct uart_port *port, unsigned int mctrl)
mcr = (mcr & up->mcr_mask) | up->mcr_force | up->mcr;
- serial_port_out(port, UART_MCR, mcr);
+ serial8250_out_MCR(up, mcr);
}
EXPORT_SYMBOL_GPL(serial8250_do_set_mctrl);
@@ -1994,8 +1992,6 @@ static void wait_for_xmitr(struct uart_8250_port *up, int bits)
/* Wait up to 1s for flow control if necessary */
if (up->port.flags & UPF_CONS_FLOW) {
- unsigned int tmout;
-
for (tmout = 1000000; tmout; tmout--) {
unsigned int msr = serial_in(up, UART_MSR);
up->msr_saved_flags |= msr & MSR_SAVE_FLAGS;
@@ -3093,7 +3089,7 @@ static void serial8250_console_restore(struct uart_8250_port *up)
serial8250_set_divisor(port, baud, quot, frac);
serial_port_out(port, UART_LCR, up->lcr);
- serial_port_out(port, UART_MCR, UART_MCR_DTR | UART_MCR_RTS);
+ serial8250_out_MCR(up, UART_MCR_DTR | UART_MCR_RTS);
}
/*
diff --git a/drivers/tty/serial/8250/8250_uniphier.c b/drivers/tty/serial/8250/8250_uniphier.c
index efd1f9c04..b8d9c8c9d 100644
--- a/drivers/tty/serial/8250/8250_uniphier.c
+++ b/drivers/tty/serial/8250/8250_uniphier.c
@@ -35,7 +35,7 @@ struct uniphier8250_priv {
spinlock_t atomic_write_lock;
};
-#if defined(CONFIG_SERIAL_8250_CONSOLE) && !defined(MODULE)
+#ifdef CONFIG_SERIAL_8250_CONSOLE
static int __init uniphier_early_console_setup(struct earlycon_device *device,
const char *options)
{
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index e46761d20..7c6f7afca 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -387,7 +387,8 @@ config SERIAL_8250_MT6577
config SERIAL_8250_UNIPHIER
tristate "Support for UniPhier on-chip UART"
- depends on SERIAL_8250 && ARCH_UNIPHIER
+ depends on SERIAL_8250
+ depends on ARCH_UNIPHIER || COMPILE_TEST
help
If you have a UniPhier based board and want to use the on-chip
serial ports, say Y to this option. If unsure, say N.
@@ -395,7 +396,7 @@ config SERIAL_8250_UNIPHIER
config SERIAL_8250_INGENIC
tristate "Support for Ingenic SoC serial ports"
depends on SERIAL_8250
- depends on (OF_FLATTREE && SERIAL_8250_CONSOLE) || !SERIAL_EARLYCON
+ depends on OF_FLATTREE
depends on MIPS || COMPILE_TEST
help
If you have a system using an Ingenic SoC and wish to make use of
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 7e3a58c8b..518db24a5 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -736,6 +736,7 @@ config SERIAL_SH_SCI
tristate "SuperH SCI(F) serial port support"
depends on SUPERH || ARCH_RENESAS || H8300 || COMPILE_TEST
select SERIAL_CORE
+ select SERIAL_MCTRL_GPIO if GPIOLIB
config SERIAL_SH_SCI_NR_UARTS
int "Maximum number of SCI(F) serial ports"
@@ -1477,7 +1478,7 @@ config SERIAL_MPS2_UART_CONSOLE
config SERIAL_MPS2_UART
bool "MPS2 UART port"
- depends on ARM || COMPILE_TEST
+ depends on ARCH_MPS2 || COMPILE_TEST
select SERIAL_CORE
help
This driver support the UART ports on ARM MPS2.
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 1b7331e40..8a9e21338 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2553,11 +2553,17 @@ static int sbsa_uart_probe(struct platform_device *pdev)
if (!uap)
return -ENOMEM;
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "cannot obtain irq\n");
+ return ret;
+ }
+ uap->port.irq = ret;
+
uap->reg_offset = vendor_sbsa.reg_offset;
uap->vendor = &vendor_sbsa;
uap->fifosize = 32;
uap->port.iotype = vendor_sbsa.access_32b ? UPIO_MEM32 : UPIO_MEM;
- uap->port.irq = platform_get_irq(pdev, 0);
uap->port.ops = &sbsa_uart_pops;
uap->fixed_baud = baudrate;
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
index 1519d2ca7..73137f4aa 100644
--- a/drivers/tty/serial/ar933x_uart.c
+++ b/drivers/tty/serial/ar933x_uart.c
@@ -54,7 +54,7 @@ struct ar933x_uart_port {
static inline bool ar933x_uart_console_enabled(void)
{
- return config_enabled(CONFIG_SERIAL_AR933X_CONSOLE);
+ return IS_ENABLED(CONFIG_SERIAL_AR933X_CONSOLE);
}
static inline unsigned int ar933x_uart_read(struct ar933x_uart_port *up,
@@ -636,7 +636,7 @@ static int ar933x_uart_probe(struct platform_device *pdev)
int ret;
np = pdev->dev.of_node;
- if (config_enabled(CONFIG_OF) && np) {
+ if (IS_ENABLED(CONFIG_OF) && np) {
id = of_alias_get_id(np, "serial");
if (id < 0) {
dev_err(&pdev->dev, "unable to get alias id, err=%d\n",
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index f9c798cba..2eaa18dde 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -108,6 +108,12 @@ struct atmel_uart_char {
u16 ch;
};
+/*
+ * Be careful, the real size of the ring buffer is
+ * sizeof(atmel_uart_char) * ATMEL_SERIAL_RINGSIZE. It means that ring buffer
+ * can contain up to 1024 characters in PIO mode and up to 4096 characters in
+ * DMA mode.
+ */
#define ATMEL_SERIAL_RINGSIZE 1024
/*
@@ -145,10 +151,10 @@ struct atmel_uart_port {
dma_cookie_t cookie_rx;
struct scatterlist sg_tx;
struct scatterlist sg_rx;
- struct tasklet_struct tasklet;
- unsigned int irq_status;
+ struct tasklet_struct tasklet_rx;
+ struct tasklet_struct tasklet_tx;
+ atomic_t tasklet_shutdown;
unsigned int irq_status_prev;
- unsigned int status_change;
unsigned int tx_len;
struct circ_buf rx_ring;
@@ -281,6 +287,13 @@ static bool atmel_use_fifo(struct uart_port *port)
return atmel_port->fifo_size;
}
+static void atmel_tasklet_schedule(struct atmel_uart_port *atmel_port,
+ struct tasklet_struct *t)
+{
+ if (!atomic_read(&atmel_port->tasklet_shutdown))
+ tasklet_schedule(t);
+}
+
static unsigned int atmel_get_lines_status(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
@@ -712,7 +725,7 @@ static void atmel_rx_chars(struct uart_port *port)
status = atmel_uart_readl(port, ATMEL_US_CSR);
}
- tasklet_schedule(&atmel_port->tasklet);
+ atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx);
}
/*
@@ -783,7 +796,7 @@ static void atmel_complete_tx_dma(void *arg)
* remaining data from the beginning of xmit->buf to xmit->head.
*/
if (!uart_circ_empty(xmit))
- tasklet_schedule(&atmel_port->tasklet);
+ atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -968,7 +981,7 @@ static void atmel_complete_rx_dma(void *arg)
struct uart_port *port = arg;
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
- tasklet_schedule(&atmel_port->tasklet);
+ atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx);
}
static void atmel_release_rx_dma(struct uart_port *port)
@@ -1008,7 +1021,7 @@ static void atmel_rx_from_dma(struct uart_port *port)
if (dmastat == DMA_ERROR) {
dev_dbg(port->dev, "Get residue error, restart tasklet\n");
atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT);
- tasklet_schedule(&atmel_port->tasklet);
+ atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx);
return;
}
@@ -1162,8 +1175,11 @@ static void atmel_uart_timer_callback(unsigned long data)
struct uart_port *port = (void *)data;
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
- tasklet_schedule(&atmel_port->tasklet);
- mod_timer(&atmel_port->uart_timer, jiffies + uart_poll_timeout(port));
+ if (!atomic_read(&atmel_port->tasklet_shutdown)) {
+ tasklet_schedule(&atmel_port->tasklet_rx);
+ mod_timer(&atmel_port->uart_timer,
+ jiffies + uart_poll_timeout(port));
+ }
}
/*
@@ -1185,7 +1201,8 @@ atmel_handle_receive(struct uart_port *port, unsigned int pending)
if (pending & (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)) {
atmel_uart_writel(port, ATMEL_US_IDR,
(ATMEL_US_ENDRX | ATMEL_US_TIMEOUT));
- tasklet_schedule(&atmel_port->tasklet);
+ atmel_tasklet_schedule(atmel_port,
+ &atmel_port->tasklet_rx);
}
if (pending & (ATMEL_US_RXBRK | ATMEL_US_OVRE |
@@ -1197,7 +1214,8 @@ atmel_handle_receive(struct uart_port *port, unsigned int pending)
if (pending & ATMEL_US_TIMEOUT) {
atmel_uart_writel(port, ATMEL_US_IDR,
ATMEL_US_TIMEOUT);
- tasklet_schedule(&atmel_port->tasklet);
+ atmel_tasklet_schedule(atmel_port,
+ &atmel_port->tasklet_rx);
}
}
@@ -1227,7 +1245,7 @@ atmel_handle_transmit(struct uart_port *port, unsigned int pending)
/* Either PDC or interrupt transmission */
atmel_uart_writel(port, ATMEL_US_IDR,
atmel_port->tx_done_mask);
- tasklet_schedule(&atmel_port->tasklet);
+ atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
}
}
@@ -1239,14 +1257,27 @@ atmel_handle_status(struct uart_port *port, unsigned int pending,
unsigned int status)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ unsigned int status_change;
if (pending & (ATMEL_US_RIIC | ATMEL_US_DSRIC | ATMEL_US_DCDIC
| ATMEL_US_CTSIC)) {
- atmel_port->irq_status = status;
- atmel_port->status_change = atmel_port->irq_status ^
- atmel_port->irq_status_prev;
+ status_change = status ^ atmel_port->irq_status_prev;
atmel_port->irq_status_prev = status;
- tasklet_schedule(&atmel_port->tasklet);
+
+ if (status_change & (ATMEL_US_RI | ATMEL_US_DSR
+ | ATMEL_US_DCD | ATMEL_US_CTS)) {
+ /* TODO: All reads to CSR will clear these interrupts! */
+ if (status_change & ATMEL_US_RI)
+ port->icount.rng++;
+ if (status_change & ATMEL_US_DSR)
+ port->icount.dsr++;
+ if (status_change & ATMEL_US_DCD)
+ uart_handle_dcd_change(port, !(status & ATMEL_US_DCD));
+ if (status_change & ATMEL_US_CTS)
+ uart_handle_cts_change(port, !(status & ATMEL_US_CTS));
+
+ wake_up_interruptible(&port->state->port.delta_msr_wait);
+ }
}
}
@@ -1573,37 +1604,25 @@ static int atmel_prepare_rx_pdc(struct uart_port *port)
/*
* tasklet handling tty stuff outside the interrupt handler.
*/
-static void atmel_tasklet_func(unsigned long data)
+static void atmel_tasklet_rx_func(unsigned long data)
{
struct uart_port *port = (struct uart_port *)data;
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
- unsigned int status = atmel_port->irq_status;
- unsigned int status_change = atmel_port->status_change;
/* The interrupt handler does not take the lock */
spin_lock(&port->lock);
-
- atmel_port->schedule_tx(port);
-
- if (status_change & (ATMEL_US_RI | ATMEL_US_DSR
- | ATMEL_US_DCD | ATMEL_US_CTS)) {
- /* TODO: All reads to CSR will clear these interrupts! */
- if (status_change & ATMEL_US_RI)
- port->icount.rng++;
- if (status_change & ATMEL_US_DSR)
- port->icount.dsr++;
- if (status_change & ATMEL_US_DCD)
- uart_handle_dcd_change(port, !(status & ATMEL_US_DCD));
- if (status_change & ATMEL_US_CTS)
- uart_handle_cts_change(port, !(status & ATMEL_US_CTS));
-
- wake_up_interruptible(&port->state->port.delta_msr_wait);
-
- atmel_port->status_change = 0;
- }
-
atmel_port->schedule_rx(port);
+ spin_unlock(&port->lock);
+}
+static void atmel_tasklet_tx_func(unsigned long data)
+{
+ struct uart_port *port = (struct uart_port *)data;
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+
+ /* The interrupt handler does not take the lock */
+ spin_lock(&port->lock);
+ atmel_port->schedule_tx(port);
spin_unlock(&port->lock);
}
@@ -1787,7 +1806,11 @@ static int atmel_startup(struct uart_port *port)
return retval;
}
- tasklet_enable(&atmel_port->tasklet);
+ atomic_set(&atmel_port->tasklet_shutdown, 0);
+ tasklet_init(&atmel_port->tasklet_rx, atmel_tasklet_rx_func,
+ (unsigned long)port);
+ tasklet_init(&atmel_port->tasklet_tx, atmel_tasklet_tx_func,
+ (unsigned long)port);
/*
* Initialize DMA (if necessary)
@@ -1835,7 +1858,6 @@ static int atmel_startup(struct uart_port *port)
/* Save current CSR for comparison in atmel_tasklet_func() */
atmel_port->irq_status_prev = atmel_get_lines_status(port);
- atmel_port->irq_status = atmel_port->irq_status_prev;
/*
* Finally, enable the serial port
@@ -1907,29 +1929,36 @@ static void atmel_shutdown(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ /* Disable interrupts at device level */
+ atmel_uart_writel(port, ATMEL_US_IDR, -1);
+
+ /* Prevent spurious interrupts from scheduling the tasklet */
+ atomic_inc(&atmel_port->tasklet_shutdown);
+
/*
* Prevent any tasklets being scheduled during
* cleanup
*/
del_timer_sync(&atmel_port->uart_timer);
+ /* Make sure that no interrupt is on the fly */
+ synchronize_irq(port->irq);
+
/*
* Clear out any scheduled tasklets before
* we destroy the buffers
*/
- tasklet_disable(&atmel_port->tasklet);
- tasklet_kill(&atmel_port->tasklet);
+ tasklet_kill(&atmel_port->tasklet_rx);
+ tasklet_kill(&atmel_port->tasklet_tx);
/*
* Ensure everything is stopped and
- * disable all interrupts, port and break condition.
+ * disable port and break condition.
*/
atmel_stop_rx(port);
atmel_stop_tx(port);
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
- atmel_uart_writel(port, ATMEL_US_IDR, -1);
-
/*
* Shut-down the DMA.
@@ -2313,10 +2342,6 @@ static int atmel_init_port(struct atmel_uart_port *atmel_port,
port->irq = pdev->resource[1].start;
port->rs485_config = atmel_config_rs485;
- tasklet_init(&atmel_port->tasklet, atmel_tasklet_func,
- (unsigned long)port);
- tasklet_disable(&atmel_port->tasklet);
-
memset(&atmel_port->rx_ring, 0, sizeof(atmel_port->rx_ring));
if (pdata && pdata->regs) {
@@ -2701,6 +2726,7 @@ static int atmel_serial_probe(struct platform_device *pdev)
atmel_port->uart.line = ret;
atmel_serial_probe_fifos(atmel_port, pdev);
+ atomic_set(&atmel_port->tasklet_shutdown, 0);
spin_lock_init(&atmel_port->lock_suspended);
ret = atmel_init_port(atmel_port, pdev);
@@ -2797,7 +2823,8 @@ static int atmel_serial_remove(struct platform_device *pdev)
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
int ret = 0;
- tasklet_kill(&atmel_port->tasklet);
+ tasklet_kill(&atmel_port->tasklet_rx);
+ tasklet_kill(&atmel_port->tasklet_tx);
device_init_wakeup(&pdev->dev, 0);
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index c28e5c24d..5108fab95 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -813,8 +813,12 @@ static int bcm_uart_probe(struct platform_device *pdev)
struct clk *clk;
int ret;
- if (pdev->dev.of_node)
- pdev->id = of_alias_get_id(pdev->dev.of_node, "uart");
+ if (pdev->dev.of_node) {
+ pdev->id = of_alias_get_id(pdev->dev.of_node, "serial");
+
+ if (pdev->id < 0)
+ pdev->id = of_alias_get_id(pdev->dev.of_node, "uart");
+ }
if (pdev->id < 0 || pdev->id >= BCM63XX_NR_UARTS)
return -EINVAL;
diff --git a/drivers/tty/serial/clps711x.c b/drivers/tty/serial/clps711x.c
index 5beafd2d2..ac1328629 100644
--- a/drivers/tty/serial/clps711x.c
+++ b/drivers/tty/serial/clps711x.c
@@ -539,7 +539,7 @@ static int uart_clps711x_remove(struct platform_device *pdev)
}
static const struct of_device_id __maybe_unused clps711x_uart_dt_ids[] = {
- { .compatible = "cirrus,clps711x-uart", },
+ { .compatible = "cirrus,ep7209-uart", },
{ }
};
MODULE_DEVICE_TABLE(of, clps711x_uart_dt_ids);
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 3d7900337..7f95f782a 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -1830,7 +1830,13 @@ static int lpuart_probe(struct platform_device *pdev)
sport->port.dev = &pdev->dev;
sport->port.type = PORT_LPUART;
sport->port.iotype = UPIO_MEM;
- sport->port.irq = platform_get_irq(pdev, 0);
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "cannot obtain irq\n");
+ return ret;
+ }
+ sport->port.irq = ret;
+
if (sport->lpuart32)
sport->port.ops = &lpuart32_pops;
else
diff --git a/drivers/tty/serial/m32r_sio.c b/drivers/tty/serial/m32r_sio.c
index 68765f7c2..218b7118e 100644
--- a/drivers/tty/serial/m32r_sio.c
+++ b/drivers/tty/serial/m32r_sio.c
@@ -30,7 +30,6 @@
#define SUPPORT_SYSRQ
#endif
-#include <linux/module.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/ioport.h>
@@ -51,9 +50,6 @@
#define PASS_LIMIT 256
-/* Standard COM flags */
-#define STD_COM_FLAGS (UPF_BOOT_AUTOCONF | UPF_SKIP_TEST)
-
static const struct {
unsigned int port;
unsigned int irq;
@@ -892,7 +888,7 @@ static void __init m32r_sio_init_ports(void)
up->port.iobase = old_serial_port[i].port;
up->port.irq = irq_canonicalize(old_serial_port[i].irq);
up->port.uartclk = BAUD_RATE * 16;
- up->port.flags = STD_COM_FLAGS;
+ up->port.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST;
up->port.membase = 0;
up->port.iotype = 0;
up->port.regshift = 0;
@@ -1060,19 +1056,4 @@ static int __init m32r_sio_init(void)
return ret;
}
-
-static void __exit m32r_sio_exit(void)
-{
- int i;
-
- for (i = 0; i < UART_NR; i++)
- uart_remove_one_port(&m32r_sio_reg, &m32r_sio_ports[i].port);
-
- uart_unregister_driver(&m32r_sio_reg);
-}
-
-module_init(m32r_sio_init);
-module_exit(m32r_sio_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Generic M32R SIO serial driver");
+device_initcall(m32r_sio_init);
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index 3f6e0ab72..9360801df 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -1,7 +1,7 @@
/*
* Maxim (Dallas) MAX3107/8/9, MAX14830 serial driver
*
- * Copyright (C) 2012-2014 Alexander Shiyan <shc_work@mail.ru>
+ * Copyright (C) 2012-2016 Alexander Shiyan <shc_work@mail.ru>
*
* Based on max3100.c, by Christian Pellegrin <chripell@evolware.org>
* Based on max3110.c, by Feng Tang <feng.tang@intel.com>
@@ -32,6 +32,7 @@
#define MAX310X_NAME "max310x"
#define MAX310X_MAJOR 204
#define MAX310X_MINOR 209
+#define MAX310X_UART_NRMAX 16
/* MAX310X register definitions */
#define MAX310X_RHR_REG (0x00) /* RX FIFO */
@@ -155,10 +156,6 @@
#define MAX310X_LCR_FORCEPARITY_BIT (1 << 5) /* 9-bit multidrop parity */
#define MAX310X_LCR_TXBREAK_BIT (1 << 6) /* TX break enable */
#define MAX310X_LCR_RTS_BIT (1 << 7) /* RTS pin control */
-#define MAX310X_LCR_WORD_LEN_5 (0x00)
-#define MAX310X_LCR_WORD_LEN_6 (0x01)
-#define MAX310X_LCR_WORD_LEN_7 (0x02)
-#define MAX310X_LCR_WORD_LEN_8 (0x03)
/* IRDA register bits */
#define MAX310X_IRDA_IRDAEN_BIT (1 << 0) /* IRDA mode enable */
@@ -262,10 +259,10 @@ struct max310x_one {
struct uart_port port;
struct work_struct tx_work;
struct work_struct md_work;
+ struct work_struct rs_work;
};
struct max310x_port {
- struct uart_driver uart;
struct max310x_devtype *devtype;
struct regmap *regmap;
struct mutex mutex;
@@ -276,6 +273,17 @@ struct max310x_port {
struct max310x_one p[0];
};
+static struct uart_driver max310x_uart = {
+ .owner = THIS_MODULE,
+ .driver_name = MAX310X_NAME,
+ .dev_name = "ttyMAX",
+ .major = MAX310X_MAJOR,
+ .minor = MAX310X_MINOR,
+ .nr = MAX310X_UART_NRMAX,
+};
+
+static DECLARE_BITMAP(max310x_lines, MAX310X_UART_NRMAX);
+
static u8 max310x_port_read(struct uart_port *port, u8 reg)
{
struct max310x_port *s = dev_get_drvdata(port->dev);
@@ -594,9 +602,7 @@ static void max310x_handle_rx(struct uart_port *port, unsigned int rxlen)
unsigned int sts, ch, flag;
if (unlikely(rxlen >= port->fifosize)) {
- dev_warn_ratelimited(port->dev,
- "Port %i: Possible RX FIFO overrun\n",
- port->line);
+ dev_warn_ratelimited(port->dev, "Possible RX FIFO overrun\n");
port->icount.buf_overrun++;
/* Ensure sanity of RX level */
rxlen = port->fifosize;
@@ -715,13 +721,13 @@ static irqreturn_t max310x_ist(int irq, void *dev_id)
{
struct max310x_port *s = (struct max310x_port *)dev_id;
- if (s->uart.nr > 1) {
+ if (s->devtype->nr > 1) {
do {
unsigned int val = ~0;
WARN_ON_ONCE(regmap_read(s->regmap,
MAX310X_GLOBALIRQ_REG, &val));
- val = ((1 << s->uart.nr) - 1) & ~val;
+ val = ((1 << s->devtype->nr) - 1) & ~val;
if (!val)
break;
max310x_port_irq(s, fls(val) - 1);
@@ -796,7 +802,7 @@ static void max310x_set_termios(struct uart_port *port,
struct ktermios *termios,
struct ktermios *old)
{
- unsigned int lcr, flow = 0;
+ unsigned int lcr = 0, flow = 0;
int baud;
/* Mask termios capabilities we don't support */
@@ -805,17 +811,16 @@ static void max310x_set_termios(struct uart_port *port,
/* Word size */
switch (termios->c_cflag & CSIZE) {
case CS5:
- lcr = MAX310X_LCR_WORD_LEN_5;
break;
case CS6:
- lcr = MAX310X_LCR_WORD_LEN_6;
+ lcr = MAX310X_LCR_LENGTH0_BIT;
break;
case CS7:
- lcr = MAX310X_LCR_WORD_LEN_7;
+ lcr = MAX310X_LCR_LENGTH1_BIT;
break;
case CS8:
default:
- lcr = MAX310X_LCR_WORD_LEN_8;
+ lcr = MAX310X_LCR_LENGTH1_BIT | MAX310X_LCR_LENGTH0_BIT;
break;
}
@@ -877,36 +882,45 @@ static void max310x_set_termios(struct uart_port *port,
uart_update_timeout(port, termios->c_cflag, baud);
}
-static int max310x_rs485_config(struct uart_port *port,
- struct serial_rs485 *rs485)
+static void max310x_rs_proc(struct work_struct *ws)
{
+ struct max310x_one *one = container_of(ws, struct max310x_one, rs_work);
unsigned int val;
- if (rs485->delay_rts_before_send > 0x0f ||
- rs485->delay_rts_after_send > 0x0f)
- return -ERANGE;
+ val = (one->port.rs485.delay_rts_before_send << 4) |
+ one->port.rs485.delay_rts_after_send;
+ max310x_port_write(&one->port, MAX310X_HDPIXDELAY_REG, val);
- val = (rs485->delay_rts_before_send << 4) |
- rs485->delay_rts_after_send;
- max310x_port_write(port, MAX310X_HDPIXDELAY_REG, val);
- if (rs485->flags & SER_RS485_ENABLED) {
- max310x_port_update(port, MAX310X_MODE1_REG,
+ if (one->port.rs485.flags & SER_RS485_ENABLED) {
+ max310x_port_update(&one->port, MAX310X_MODE1_REG,
MAX310X_MODE1_TRNSCVCTRL_BIT,
MAX310X_MODE1_TRNSCVCTRL_BIT);
- max310x_port_update(port, MAX310X_MODE2_REG,
+ max310x_port_update(&one->port, MAX310X_MODE2_REG,
MAX310X_MODE2_ECHOSUPR_BIT,
MAX310X_MODE2_ECHOSUPR_BIT);
} else {
- max310x_port_update(port, MAX310X_MODE1_REG,
+ max310x_port_update(&one->port, MAX310X_MODE1_REG,
MAX310X_MODE1_TRNSCVCTRL_BIT, 0);
- max310x_port_update(port, MAX310X_MODE2_REG,
+ max310x_port_update(&one->port, MAX310X_MODE2_REG,
MAX310X_MODE2_ECHOSUPR_BIT, 0);
}
+}
+
+static int max310x_rs485_config(struct uart_port *port,
+ struct serial_rs485 *rs485)
+{
+ struct max310x_one *one = container_of(port, struct max310x_one, port);
+
+ if ((rs485->delay_rts_before_send > 0x0f) ||
+ (rs485->delay_rts_after_send > 0x0f))
+ return -ERANGE;
rs485->flags &= SER_RS485_RTS_ON_SEND | SER_RS485_ENABLED;
memset(rs485->padding, 0, sizeof(rs485->padding));
port->rs485 = *rs485;
+ schedule_work(&one->rs_work);
+
return 0;
}
@@ -1009,8 +1023,8 @@ static int __maybe_unused max310x_suspend(struct device *dev)
struct max310x_port *s = dev_get_drvdata(dev);
int i;
- for (i = 0; i < s->uart.nr; i++) {
- uart_suspend_port(&s->uart, &s->p[i].port);
+ for (i = 0; i < s->devtype->nr; i++) {
+ uart_suspend_port(&max310x_uart, &s->p[i].port);
s->devtype->power(&s->p[i].port, 0);
}
@@ -1022,9 +1036,9 @@ static int __maybe_unused max310x_resume(struct device *dev)
struct max310x_port *s = dev_get_drvdata(dev);
int i;
- for (i = 0; i < s->uart.nr; i++) {
+ for (i = 0; i < s->devtype->nr; i++) {
s->devtype->power(&s->p[i].port, 1);
- uart_resume_port(&s->uart, &s->p[i].port);
+ uart_resume_port(&max310x_uart, &s->p[i].port);
}
return 0;
@@ -1159,18 +1173,6 @@ static int max310x_probe(struct device *dev, struct max310x_devtype *devtype,
uartclk = max310x_set_ref_clk(s, freq, xtal);
dev_dbg(dev, "Reference clock set to %i Hz\n", uartclk);
- /* Register UART driver */
- s->uart.owner = THIS_MODULE;
- s->uart.dev_name = "ttyMAX";
- s->uart.major = MAX310X_MAJOR;
- s->uart.minor = MAX310X_MINOR;
- s->uart.nr = devtype->nr;
- ret = uart_register_driver(&s->uart);
- if (ret) {
- dev_err(dev, "Registering UART driver failed\n");
- goto out_clk;
- }
-
#ifdef CONFIG_GPIOLIB
/* Setup GPIO cotroller */
s->gpio.owner = THIS_MODULE;
@@ -1183,16 +1185,24 @@ static int max310x_probe(struct device *dev, struct max310x_devtype *devtype,
s->gpio.base = -1;
s->gpio.ngpio = devtype->nr * 4;
s->gpio.can_sleep = 1;
- ret = gpiochip_add_data(&s->gpio, s);
+ ret = devm_gpiochip_add_data(dev, &s->gpio, s);
if (ret)
- goto out_uart;
+ goto out_clk;
#endif
mutex_init(&s->mutex);
for (i = 0; i < devtype->nr; i++) {
+ unsigned int line;
+
+ line = find_first_zero_bit(max310x_lines, MAX310X_UART_NRMAX);
+ if (line == MAX310X_UART_NRMAX) {
+ ret = -ERANGE;
+ goto out_uart;
+ }
+
/* Initialize port data */
- s->p[i].port.line = i;
+ s->p[i].port.line = line;
s->p[i].port.dev = dev;
s->p[i].port.irq = irq;
s->p[i].port.type = PORT_MAX310X;
@@ -1214,10 +1224,19 @@ static int max310x_probe(struct device *dev, struct max310x_devtype *devtype,
MAX310X_MODE1_IRQSEL_BIT);
/* Initialize queue for start TX */
INIT_WORK(&s->p[i].tx_work, max310x_wq_proc);
- /* Initialize queue for changing mode */
+ /* Initialize queue for changing LOOPBACK mode */
INIT_WORK(&s->p[i].md_work, max310x_md_proc);
+ /* Initialize queue for changing RS485 mode */
+ INIT_WORK(&s->p[i].rs_work, max310x_rs_proc);
+
/* Register port */
- uart_add_one_port(&s->uart, &s->p[i].port);
+ ret = uart_add_one_port(&max310x_uart, &s->p[i].port);
+ if (ret) {
+ s->p[i].port.dev = NULL;
+ goto out_uart;
+ }
+ set_bit(line, max310x_lines);
+
/* Go to suspend mode */
devtype->power(&s->p[i].port, 0);
}
@@ -1230,14 +1249,15 @@ static int max310x_probe(struct device *dev, struct max310x_devtype *devtype,
dev_err(dev, "Unable to reguest IRQ %i\n", irq);
- mutex_destroy(&s->mutex);
-
-#ifdef CONFIG_GPIOLIB
- gpiochip_remove(&s->gpio);
-
out_uart:
-#endif
- uart_unregister_driver(&s->uart);
+ for (i = 0; i < devtype->nr; i++) {
+ if (s->p[i].port.dev) {
+ uart_remove_one_port(&max310x_uart, &s->p[i].port);
+ clear_bit(s->p[i].port.line, max310x_lines);
+ }
+ }
+
+ mutex_destroy(&s->mutex);
out_clk:
clk_disable_unprepare(s->clk);
@@ -1250,19 +1270,16 @@ static int max310x_remove(struct device *dev)
struct max310x_port *s = dev_get_drvdata(dev);
int i;
-#ifdef CONFIG_GPIOLIB
- gpiochip_remove(&s->gpio);
-#endif
-
- for (i = 0; i < s->uart.nr; i++) {
+ for (i = 0; i < s->devtype->nr; i++) {
cancel_work_sync(&s->p[i].tx_work);
cancel_work_sync(&s->p[i].md_work);
- uart_remove_one_port(&s->uart, &s->p[i].port);
+ cancel_work_sync(&s->p[i].rs_work);
+ uart_remove_one_port(&max310x_uart, &s->p[i].port);
+ clear_bit(s->p[i].port.line, max310x_lines);
s->devtype->power(&s->p[i].port, 0);
}
mutex_destroy(&s->mutex);
- uart_unregister_driver(&s->uart);
clk_disable_unprepare(s->clk);
return 0;
@@ -1335,7 +1352,7 @@ static const struct spi_device_id max310x_id_table[] = {
};
MODULE_DEVICE_TABLE(spi, max310x_id_table);
-static struct spi_driver max310x_uart_driver = {
+static struct spi_driver max310x_spi_driver = {
.driver = {
.name = MAX310X_NAME,
.of_match_table = of_match_ptr(max310x_dt_ids),
@@ -1345,9 +1362,36 @@ static struct spi_driver max310x_uart_driver = {
.remove = max310x_spi_remove,
.id_table = max310x_id_table,
};
-module_spi_driver(max310x_uart_driver);
#endif
+static int __init max310x_uart_init(void)
+{
+ int ret;
+
+ bitmap_zero(max310x_lines, MAX310X_UART_NRMAX);
+
+ ret = uart_register_driver(&max310x_uart);
+ if (ret)
+ return ret;
+
+#ifdef CONFIG_SPI_MASTER
+ spi_register_driver(&max310x_spi_driver);
+#endif
+
+ return 0;
+}
+module_init(max310x_uart_init);
+
+static void __exit max310x_uart_exit(void)
+{
+#ifdef CONFIG_SPI_MASTER
+ spi_unregister_driver(&max310x_spi_driver);
+#endif
+
+ uart_unregister_driver(&max310x_uart);
+}
+module_exit(max310x_uart_exit);
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>");
MODULE_DESCRIPTION("MAX310X serial driver");
diff --git a/drivers/tty/serial/mps2-uart.c b/drivers/tty/serial/mps2-uart.c
index da9e27d3c..492ec4b37 100644
--- a/drivers/tty/serial/mps2-uart.c
+++ b/drivers/tty/serial/mps2-uart.c
@@ -1,4 +1,6 @@
/*
+ * MPS2 UART driver
+ *
* Copyright (C) 2015 ARM Limited
*
* Author: Vladimir Murzin <vladimir.murzin@arm.com>
@@ -17,7 +19,6 @@
#include <linux/console.h>
#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -569,30 +570,20 @@ static int mps2_serial_probe(struct platform_device *pdev)
return 0;
}
-static int mps2_serial_remove(struct platform_device *pdev)
-{
- struct mps2_uart_port *mps_port = platform_get_drvdata(pdev);
-
- uart_remove_one_port(&mps2_uart_driver, &mps_port->port);
-
- return 0;
-}
-
#ifdef CONFIG_OF
static const struct of_device_id mps2_match[] = {
{ .compatible = "arm,mps2-uart", },
{},
};
-MODULE_DEVICE_TABLE(of, mps2_match);
#endif
static struct platform_driver mps2_serial_driver = {
.probe = mps2_serial_probe,
- .remove = mps2_serial_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = of_match_ptr(mps2_match),
+ .suppress_bind_attrs = true,
},
};
@@ -610,16 +601,4 @@ static int __init mps2_uart_init(void)
return ret;
}
-module_init(mps2_uart_init);
-
-static void __exit mps2_uart_exit(void)
-{
- platform_driver_unregister(&mps2_serial_driver);
- uart_unregister_driver(&mps2_uart_driver);
-}
-module_exit(mps2_uart_exit);
-
-MODULE_AUTHOR("Vladimir Murzin <vladimir.murzin@arm.com>");
-MODULE_DESCRIPTION("MPS2 UART driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:" DRIVER_NAME);
+arch_initcall(mps2_uart_init);
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 7d62610d9..7312e7e01 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -19,33 +19,147 @@
# define SUPPORT_SYSRQ
#endif
+#include <linux/kernel.h>
#include <linux/atomic.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
-#include <linux/hrtimer.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/ioport.h>
-#include <linux/irq.h>
+#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
-#include <linux/serial.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/of_device.h>
-
-#include "msm_serial.h"
-
-#define UARTDM_BURST_SIZE 16 /* in bytes */
-#define UARTDM_TX_AIGN(x) ((x) & ~0x3) /* valid for > 1p3 */
-#define UARTDM_TX_MAX 256 /* in bytes, valid for <= 1p3 */
-#define UARTDM_RX_SIZE (UART_XMIT_SIZE / 4)
+#include <linux/wait.h>
+
+#define UART_MR1 0x0000
+
+#define UART_MR1_AUTO_RFR_LEVEL0 0x3F
+#define UART_MR1_AUTO_RFR_LEVEL1 0x3FF00
+#define UART_DM_MR1_AUTO_RFR_LEVEL1 0xFFFFFF00
+#define UART_MR1_RX_RDY_CTL BIT(7)
+#define UART_MR1_CTS_CTL BIT(6)
+
+#define UART_MR2 0x0004
+#define UART_MR2_ERROR_MODE BIT(6)
+#define UART_MR2_BITS_PER_CHAR 0x30
+#define UART_MR2_BITS_PER_CHAR_5 (0x0 << 4)
+#define UART_MR2_BITS_PER_CHAR_6 (0x1 << 4)
+#define UART_MR2_BITS_PER_CHAR_7 (0x2 << 4)
+#define UART_MR2_BITS_PER_CHAR_8 (0x3 << 4)
+#define UART_MR2_STOP_BIT_LEN_ONE (0x1 << 2)
+#define UART_MR2_STOP_BIT_LEN_TWO (0x3 << 2)
+#define UART_MR2_PARITY_MODE_NONE 0x0
+#define UART_MR2_PARITY_MODE_ODD 0x1
+#define UART_MR2_PARITY_MODE_EVEN 0x2
+#define UART_MR2_PARITY_MODE_SPACE 0x3
+#define UART_MR2_PARITY_MODE 0x3
+
+#define UART_CSR 0x0008
+
+#define UART_TF 0x000C
+#define UARTDM_TF 0x0070
+
+#define UART_CR 0x0010
+#define UART_CR_CMD_NULL (0 << 4)
+#define UART_CR_CMD_RESET_RX (1 << 4)
+#define UART_CR_CMD_RESET_TX (2 << 4)
+#define UART_CR_CMD_RESET_ERR (3 << 4)
+#define UART_CR_CMD_RESET_BREAK_INT (4 << 4)
+#define UART_CR_CMD_START_BREAK (5 << 4)
+#define UART_CR_CMD_STOP_BREAK (6 << 4)
+#define UART_CR_CMD_RESET_CTS (7 << 4)
+#define UART_CR_CMD_RESET_STALE_INT (8 << 4)
+#define UART_CR_CMD_PACKET_MODE (9 << 4)
+#define UART_CR_CMD_MODE_RESET (12 << 4)
+#define UART_CR_CMD_SET_RFR (13 << 4)
+#define UART_CR_CMD_RESET_RFR (14 << 4)
+#define UART_CR_CMD_PROTECTION_EN (16 << 4)
+#define UART_CR_CMD_STALE_EVENT_DISABLE (6 << 8)
+#define UART_CR_CMD_STALE_EVENT_ENABLE (80 << 4)
+#define UART_CR_CMD_FORCE_STALE (4 << 8)
+#define UART_CR_CMD_RESET_TX_READY (3 << 8)
+#define UART_CR_TX_DISABLE BIT(3)
+#define UART_CR_TX_ENABLE BIT(2)
+#define UART_CR_RX_DISABLE BIT(1)
+#define UART_CR_RX_ENABLE BIT(0)
+#define UART_CR_CMD_RESET_RXBREAK_START ((1 << 11) | (2 << 4))
+
+#define UART_IMR 0x0014
+#define UART_IMR_TXLEV BIT(0)
+#define UART_IMR_RXSTALE BIT(3)
+#define UART_IMR_RXLEV BIT(4)
+#define UART_IMR_DELTA_CTS BIT(5)
+#define UART_IMR_CURRENT_CTS BIT(6)
+#define UART_IMR_RXBREAK_START BIT(10)
+
+#define UART_IPR_RXSTALE_LAST 0x20
+#define UART_IPR_STALE_LSB 0x1F
+#define UART_IPR_STALE_TIMEOUT_MSB 0x3FF80
+#define UART_DM_IPR_STALE_TIMEOUT_MSB 0xFFFFFF80
+
+#define UART_IPR 0x0018
+#define UART_TFWR 0x001C
+#define UART_RFWR 0x0020
+#define UART_HCR 0x0024
+
+#define UART_MREG 0x0028
+#define UART_NREG 0x002C
+#define UART_DREG 0x0030
+#define UART_MNDREG 0x0034
+#define UART_IRDA 0x0038
+#define UART_MISR_MODE 0x0040
+#define UART_MISR_RESET 0x0044
+#define UART_MISR_EXPORT 0x0048
+#define UART_MISR_VAL 0x004C
+#define UART_TEST_CTRL 0x0050
+
+#define UART_SR 0x0008
+#define UART_SR_HUNT_CHAR BIT(7)
+#define UART_SR_RX_BREAK BIT(6)
+#define UART_SR_PAR_FRAME_ERR BIT(5)
+#define UART_SR_OVERRUN BIT(4)
+#define UART_SR_TX_EMPTY BIT(3)
+#define UART_SR_TX_READY BIT(2)
+#define UART_SR_RX_FULL BIT(1)
+#define UART_SR_RX_READY BIT(0)
+
+#define UART_RF 0x000C
+#define UARTDM_RF 0x0070
+#define UART_MISR 0x0010
+#define UART_ISR 0x0014
+#define UART_ISR_TX_READY BIT(7)
+
+#define UARTDM_RXFS 0x50
+#define UARTDM_RXFS_BUF_SHIFT 0x7
+#define UARTDM_RXFS_BUF_MASK 0x7
+
+#define UARTDM_DMEN 0x3C
+#define UARTDM_DMEN_RX_SC_ENABLE BIT(5)
+#define UARTDM_DMEN_TX_SC_ENABLE BIT(4)
+
+#define UARTDM_DMEN_TX_BAM_ENABLE BIT(2) /* UARTDM_1P4 */
+#define UARTDM_DMEN_TX_DM_ENABLE BIT(0) /* < UARTDM_1P4 */
+
+#define UARTDM_DMEN_RX_BAM_ENABLE BIT(3) /* UARTDM_1P4 */
+#define UARTDM_DMEN_RX_DM_ENABLE BIT(1) /* < UARTDM_1P4 */
+
+#define UARTDM_DMRX 0x34
+#define UARTDM_NCF_TX 0x40
+#define UARTDM_RX_TOTAL_SNAP 0x38
+
+#define UARTDM_BURST_SIZE 16 /* in bytes */
+#define UARTDM_TX_AIGN(x) ((x) & ~0x3) /* valid for > 1p3 */
+#define UARTDM_TX_MAX 256 /* in bytes, valid for <= 1p3 */
+#define UARTDM_RX_SIZE (UART_XMIT_SIZE / 4)
enum {
UARTDM_1P1 = 1,
@@ -78,10 +192,65 @@ struct msm_port {
struct msm_dma rx_dma;
};
+#define UART_TO_MSM(uart_port) container_of(uart_port, struct msm_port, uart)
+
+static
+void msm_write(struct uart_port *port, unsigned int val, unsigned int off)
+{
+ writel_relaxed(val, port->membase + off);
+}
+
+static
+unsigned int msm_read(struct uart_port *port, unsigned int off)
+{
+ return readl_relaxed(port->membase + off);
+}
+
+/*
+ * Setup the MND registers to use the TCXO clock.
+ */
+static void msm_serial_set_mnd_regs_tcxo(struct uart_port *port)
+{
+ msm_write(port, 0x06, UART_MREG);
+ msm_write(port, 0xF1, UART_NREG);
+ msm_write(port, 0x0F, UART_DREG);
+ msm_write(port, 0x1A, UART_MNDREG);
+ port->uartclk = 1843200;
+}
+
+/*
+ * Setup the MND registers to use the TCXO clock divided by 4.
+ */
+static void msm_serial_set_mnd_regs_tcxoby4(struct uart_port *port)
+{
+ msm_write(port, 0x18, UART_MREG);
+ msm_write(port, 0xF6, UART_NREG);
+ msm_write(port, 0x0F, UART_DREG);
+ msm_write(port, 0x0A, UART_MNDREG);
+ port->uartclk = 1843200;
+}
+
+static void msm_serial_set_mnd_regs(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ /*
+ * These registers don't exist so we change the clk input rate
+ * on uartdm hardware instead
+ */
+ if (msm_port->is_uartdm)
+ return;
+
+ if (port->uartclk == 19200000)
+ msm_serial_set_mnd_regs_tcxo(port);
+ else if (port->uartclk == 4800000)
+ msm_serial_set_mnd_regs_tcxoby4(port);
+}
+
static void msm_handle_tx(struct uart_port *port);
static void msm_start_rx_dma(struct msm_port *msm_port);
-void msm_stop_dma(struct uart_port *port, struct msm_dma *dma)
+static void msm_stop_dma(struct uart_port *port, struct msm_dma *dma)
{
struct device *dev = port->dev;
unsigned int mapped;
@@ -388,10 +557,6 @@ static void msm_complete_rx_dma(void *args)
val &= ~dma->enable_bit;
msm_write(port, val, UARTDM_DMEN);
- /* Restore interrupts */
- msm_port->imr |= UART_IMR_RXLEV | UART_IMR_RXSTALE;
- msm_write(port, msm_port->imr, UART_IMR);
-
if (msm_read(port, UART_SR) & UART_SR_OVERRUN) {
port->icount.overrun++;
tty_insert_flip_char(tport, 0, TTY_OVERRUN);
diff --git a/drivers/tty/serial/pic32_uart.c b/drivers/tty/serial/pic32_uart.c
index 62a43bf56..7f8e99bbc 100644
--- a/drivers/tty/serial/pic32_uart.c
+++ b/drivers/tty/serial/pic32_uart.c
@@ -445,7 +445,6 @@ static int pic32_uart_startup(struct uart_port *port)
sport->idx);
if (!sport->irq_rx_name) {
dev_err(port->dev, "%s: kasprintf err!", __func__);
- kfree(sport->irq_fault_name);
ret = -ENOMEM;
goto out_f;
}
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index e156e39d6..b24b0556f 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -1720,7 +1720,7 @@ static int __init pmz_init_port(struct uart_pmac_port *uap)
r_ports = platform_get_resource(uap->pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(uap->pdev, 0);
- if (!r_ports || !irq)
+ if (!r_ports || irq <= 0)
return -ENODEV;
uap->port.mapbase = r_ports->start;
diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c
index 41eab75ba..cd9d9e878 100644
--- a/drivers/tty/serial/pxa.c
+++ b/drivers/tty/serial/pxa.c
@@ -27,7 +27,6 @@
#define SUPPORT_SYSRQ
#endif
-#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/console.h>
@@ -829,7 +828,6 @@ static const struct of_device_id serial_pxa_dt_ids[] = {
{ .compatible = "mrvl,mmp-uart", },
{}
};
-MODULE_DEVICE_TABLE(of, serial_pxa_dt_ids);
static int serial_pxa_probe_dt(struct platform_device *pdev,
struct uart_pxa_port *sport)
@@ -914,28 +912,15 @@ static int serial_pxa_probe(struct platform_device *dev)
return ret;
}
-static int serial_pxa_remove(struct platform_device *dev)
-{
- struct uart_pxa_port *sport = platform_get_drvdata(dev);
-
- uart_remove_one_port(&serial_pxa_reg, &sport->port);
-
- clk_unprepare(sport->clk);
- clk_put(sport->clk);
- kfree(sport);
-
- return 0;
-}
-
static struct platform_driver serial_pxa_driver = {
.probe = serial_pxa_probe,
- .remove = serial_pxa_remove,
.driver = {
.name = "pxa2xx-uart",
#ifdef CONFIG_PM
.pm = &serial_pxa_pm_ops,
#endif
+ .suppress_bind_attrs = true,
.of_match_table = serial_pxa_dt_ids,
},
};
@@ -954,15 +939,4 @@ static int __init serial_pxa_init(void)
return ret;
}
-
-static void __exit serial_pxa_exit(void)
-{
- platform_driver_unregister(&serial_pxa_driver);
- uart_unregister_driver(&serial_pxa_reg);
-}
-
-module_init(serial_pxa_init);
-module_exit(serial_pxa_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:pxa2xx-uart");
+device_initcall(serial_pxa_init);
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index f0bd2ec0d..ae2095a66 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -169,8 +169,7 @@ static void s3c24xx_serial_stop_tx(struct uart_port *port)
return;
if (s3c24xx_serial_has_interrupt_mask(port))
- __set_bit(S3C64XX_UINTM_TXD,
- portaddrl(port, S3C64XX_UINTM));
+ s3c24xx_set_bit(port, S3C64XX_UINTM_TXD, S3C64XX_UINTM);
else
disable_irq_nosync(ourport->tx_irq);
@@ -235,8 +234,7 @@ static void enable_tx_dma(struct s3c24xx_uart_port *ourport)
/* Mask Tx interrupt */
if (s3c24xx_serial_has_interrupt_mask(port))
- __set_bit(S3C64XX_UINTM_TXD,
- portaddrl(port, S3C64XX_UINTM));
+ s3c24xx_set_bit(port, S3C64XX_UINTM_TXD, S3C64XX_UINTM);
else
disable_irq_nosync(ourport->tx_irq);
@@ -269,8 +267,8 @@ static void enable_tx_pio(struct s3c24xx_uart_port *ourport)
/* Unmask Tx interrupt */
if (s3c24xx_serial_has_interrupt_mask(port))
- __clear_bit(S3C64XX_UINTM_TXD,
- portaddrl(port, S3C64XX_UINTM));
+ s3c24xx_clear_bit(port, S3C64XX_UINTM_TXD,
+ S3C64XX_UINTM);
else
enable_irq(ourport->tx_irq);
@@ -397,8 +395,8 @@ static void s3c24xx_serial_stop_rx(struct uart_port *port)
if (rx_enabled(port)) {
dbg("s3c24xx_serial_stop_rx: port=%p\n", port);
if (s3c24xx_serial_has_interrupt_mask(port))
- __set_bit(S3C64XX_UINTM_RXD,
- portaddrl(port, S3C64XX_UINTM));
+ s3c24xx_set_bit(port, S3C64XX_UINTM_RXD,
+ S3C64XX_UINTM);
else
disable_irq_nosync(ourport->rx_irq);
rx_enabled(port) = 0;
@@ -1069,7 +1067,7 @@ static int s3c64xx_serial_startup(struct uart_port *port)
spin_unlock_irqrestore(&port->lock, flags);
/* Enable Rx Interrupt */
- __clear_bit(S3C64XX_UINTM_RXD, portaddrl(port, S3C64XX_UINTM));
+ s3c24xx_clear_bit(port, S3C64XX_UINTM_RXD, S3C64XX_UINTM);
dbg("s3c64xx_serial_startup ok\n");
return ret;
@@ -1844,8 +1842,6 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
ourport->min_dma_size = max_t(int, ourport->port.fifosize,
dma_get_cache_alignment());
- probe_index++;
-
dbg("%s: initialising port %p...\n", __func__, ourport);
ret = s3c24xx_serial_init_port(ourport, pdev);
@@ -1875,6 +1871,8 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
if (ret < 0)
dev_err(&pdev->dev, "failed to add cpufreq notifier\n");
+ probe_index++;
+
return 0;
}
diff --git a/drivers/tty/serial/samsung.h b/drivers/tty/serial/samsung.h
index fc5deaa4f..2ae4fcee1 100644
--- a/drivers/tty/serial/samsung.h
+++ b/drivers/tty/serial/samsung.h
@@ -117,10 +117,38 @@ struct s3c24xx_uart_port {
#define portaddrl(port, reg) \
((unsigned long *)(unsigned long)((port)->membase + (reg)))
-#define rd_regb(port, reg) (__raw_readb(portaddr(port, reg)))
-#define rd_regl(port, reg) (__raw_readl(portaddr(port, reg)))
-
-#define wr_regb(port, reg, val) __raw_writeb(val, portaddr(port, reg))
-#define wr_regl(port, reg, val) __raw_writel(val, portaddr(port, reg))
+#define rd_regb(port, reg) (readb_relaxed(portaddr(port, reg)))
+#define rd_regl(port, reg) (readl_relaxed(portaddr(port, reg)))
+
+#define wr_regb(port, reg, val) writeb_relaxed(val, portaddr(port, reg))
+#define wr_regl(port, reg, val) writel_relaxed(val, portaddr(port, reg))
+
+/* Byte-order aware bit setting/clearing functions. */
+
+static inline void s3c24xx_set_bit(struct uart_port *port, int idx,
+ unsigned int reg)
+{
+ unsigned long flags;
+ u32 val;
+
+ local_irq_save(flags);
+ val = rd_regl(port, reg);
+ val |= (1 << idx);
+ wr_regl(port, reg, val);
+ local_irq_restore(flags);
+}
+
+static inline void s3c24xx_clear_bit(struct uart_port *port, int idx,
+ unsigned int reg)
+{
+ unsigned long flags;
+ u32 val;
+
+ local_irq_save(flags);
+ val = rd_regl(port, reg);
+ val &= ~(1 << idx);
+ wr_regl(port, reg, val);
+ local_irq_restore(flags);
+}
#endif
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index 1dba6719d..731ac35ac 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -1317,7 +1317,12 @@ static int tegra_uart_probe(struct platform_device *pdev)
}
u->iotype = UPIO_MEM32;
- u->irq = platform_get_irq(pdev, 0);
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Couldn't get IRQ\n");
+ return ret;
+ }
+ u->irq = ret;
u->regshift = 2;
ret = uart_add_one_port(&tegra_uart_driver, u);
if (ret < 0) {
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index a333c59cb..9fc15335c 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -887,7 +887,7 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
/*
* Free and release old regions
*/
- if (old_type != PORT_UNKNOWN)
+ if (old_type != PORT_UNKNOWN && uport->ops->release_port)
uport->ops->release_port(uport);
uport->iobase = new_port;
@@ -900,7 +900,7 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
/*
* Claim and map the new regions
*/
- if (uport->type != PORT_UNKNOWN) {
+ if (uport->type != PORT_UNKNOWN && uport->ops->request_port) {
retval = uport->ops->request_port(uport);
} else {
/* Always success - Jean II */
@@ -1125,7 +1125,7 @@ static int uart_do_autoconfig(struct tty_struct *tty,struct uart_state *state)
* If we already have a port type configured,
* we must release its resources.
*/
- if (uport->type != PORT_UNKNOWN)
+ if (uport->type != PORT_UNKNOWN && uport->ops->release_port)
uport->ops->release_port(uport);
flags = UART_CONFIG_TYPE;
@@ -2897,7 +2897,7 @@ int uart_remove_one_port(struct uart_driver *drv, struct uart_port *uport)
/*
* Free the port IO and memory resources, if any.
*/
- if (uport->type != PORT_UNKNOWN)
+ if (uport->type != PORT_UNKNOWN && uport->ops->release_port)
uport->ops->release_port(uport);
kfree(uport->tty_groups);
diff --git a/drivers/tty/serial/serial_mctrl_gpio.c b/drivers/tty/serial/serial_mctrl_gpio.c
index e8dd5097d..d2da6aa7f 100644
--- a/drivers/tty/serial/serial_mctrl_gpio.c
+++ b/drivers/tty/serial/serial_mctrl_gpio.c
@@ -52,6 +52,9 @@ void mctrl_gpio_set(struct mctrl_gpios *gpios, unsigned int mctrl)
int value_array[UART_GPIO_MAX];
unsigned int count = 0;
+ if (gpios == NULL)
+ return;
+
for (i = 0; i < UART_GPIO_MAX; i++)
if (gpios->gpio[i] && mctrl_gpios_desc[i].dir_out) {
desc_array[count] = gpios->gpio[i];
@@ -73,6 +76,9 @@ unsigned int mctrl_gpio_get(struct mctrl_gpios *gpios, unsigned int *mctrl)
{
enum mctrl_gpio_idx i;
+ if (gpios == NULL)
+ return *mctrl;
+
for (i = 0; i < UART_GPIO_MAX; i++) {
if (gpios->gpio[i] && !mctrl_gpios_desc[i].dir_out) {
if (gpiod_get_value(gpios->gpio[i]))
@@ -86,6 +92,27 @@ unsigned int mctrl_gpio_get(struct mctrl_gpios *gpios, unsigned int *mctrl)
}
EXPORT_SYMBOL_GPL(mctrl_gpio_get);
+unsigned int
+mctrl_gpio_get_outputs(struct mctrl_gpios *gpios, unsigned int *mctrl)
+{
+ enum mctrl_gpio_idx i;
+
+ if (gpios == NULL)
+ return *mctrl;
+
+ for (i = 0; i < UART_GPIO_MAX; i++) {
+ if (gpios->gpio[i] && mctrl_gpios_desc[i].dir_out) {
+ if (gpiod_get_value(gpios->gpio[i]))
+ *mctrl |= mctrl_gpios_desc[i].mctrl;
+ else
+ *mctrl &= ~mctrl_gpios_desc[i].mctrl;
+ }
+ }
+
+ return *mctrl;
+}
+EXPORT_SYMBOL_GPL(mctrl_gpio_get_outputs);
+
struct mctrl_gpios *mctrl_gpio_init_noauto(struct device *dev, unsigned int idx)
{
struct mctrl_gpios *gpios;
@@ -203,6 +230,9 @@ void mctrl_gpio_free(struct device *dev, struct mctrl_gpios *gpios)
{
enum mctrl_gpio_idx i;
+ if (gpios == NULL)
+ return;
+
for (i = 0; i < UART_GPIO_MAX; i++) {
if (gpios->irq[i])
devm_free_irq(gpios->port->dev, gpios->irq[i], gpios);
@@ -218,6 +248,9 @@ void mctrl_gpio_enable_ms(struct mctrl_gpios *gpios)
{
enum mctrl_gpio_idx i;
+ if (gpios == NULL)
+ return;
+
/* .enable_ms may be called multiple times */
if (gpios->mctrl_on)
return;
@@ -240,6 +273,9 @@ void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios)
{
enum mctrl_gpio_idx i;
+ if (gpios == NULL)
+ return;
+
if (!gpios->mctrl_on)
return;
diff --git a/drivers/tty/serial/serial_mctrl_gpio.h b/drivers/tty/serial/serial_mctrl_gpio.h
index 332a33ab0..fa000bcff 100644
--- a/drivers/tty/serial/serial_mctrl_gpio.h
+++ b/drivers/tty/serial/serial_mctrl_gpio.h
@@ -48,12 +48,19 @@ struct mctrl_gpios;
void mctrl_gpio_set(struct mctrl_gpios *gpios, unsigned int mctrl);
/*
- * Get state of the modem control output lines from GPIOs.
+ * Get state of the modem control input lines from GPIOs.
* The mctrl flags are updated and returned.
*/
unsigned int mctrl_gpio_get(struct mctrl_gpios *gpios, unsigned int *mctrl);
/*
+ * Get state of the modem control output lines from GPIOs.
+ * The mctrl flags are updated and returned.
+ */
+unsigned int
+mctrl_gpio_get_outputs(struct mctrl_gpios *gpios, unsigned int *mctrl);
+
+/*
* Returns the associated struct gpio_desc to the modem line gidx
*/
struct gpio_desc *mctrl_gpio_to_gpiod(struct mctrl_gpios *gpios,
@@ -107,6 +114,12 @@ unsigned int mctrl_gpio_get(struct mctrl_gpios *gpios, unsigned int *mctrl)
return *mctrl;
}
+static inline unsigned int
+mctrl_gpio_get_outputs(struct mctrl_gpios *gpios, unsigned int *mctrl)
+{
+ return *mctrl;
+}
+
static inline
struct gpio_desc *mctrl_gpio_to_gpiod(struct mctrl_gpios *gpios,
enum mctrl_gpio_idx gidx)
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 0130feb06..d86eee38a 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -57,6 +57,7 @@
#include <asm/sh_bios.h>
#endif
+#include "serial_mctrl_gpio.h"
#include "sh-sci.h"
/* Offsets into the sci_port->irqs array */
@@ -111,6 +112,7 @@ struct sci_port {
unsigned int error_clear;
unsigned int sampling_rate_mask;
resource_size_t reg_size;
+ struct mctrl_gpios *gpios;
/* Break timer */
struct timer_list break_timer;
@@ -139,6 +141,8 @@ struct sci_port {
struct timer_list rx_timer;
unsigned int rx_timeout;
#endif
+
+ bool autorts;
};
#define SCI_NPORTS CONFIG_SERIAL_SH_SCI_NR_UARTS
@@ -701,7 +705,6 @@ static void sci_poll_put_char(struct uart_port *port, unsigned char c)
static void sci_init_pins(struct uart_port *port, unsigned int cflag)
{
struct sci_port *s = to_sci_port(port);
- const struct plat_sci_reg *reg = sci_regmap[s->cfg->regtype] + SCSPTR;
/*
* Use port-specific handler if provided.
@@ -711,21 +714,28 @@ static void sci_init_pins(struct uart_port *port, unsigned int cflag)
return;
}
- /*
- * For the generic path SCSPTR is necessary. Bail out if that's
- * unavailable, too.
- */
- if (!reg->size)
- return;
-
- if ((s->cfg->capabilities & SCIx_HAVE_RTSCTS) &&
- ((!(cflag & CRTSCTS)))) {
- unsigned short status;
+ if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
+ u16 ctrl = serial_port_in(port, SCPCR);
+
+ /* Enable RXD and TXD pin functions */
+ ctrl &= ~(SCPCR_RXDC | SCPCR_TXDC);
+ if (to_sci_port(port)->cfg->capabilities & SCIx_HAVE_RTSCTS) {
+ /* RTS# is output, driven 1 */
+ ctrl |= SCPCR_RTSC;
+ serial_port_out(port, SCPDR,
+ serial_port_in(port, SCPDR) | SCPDR_RTSD);
+ /* Enable CTS# pin function */
+ ctrl &= ~SCPCR_CTSC;
+ }
+ serial_port_out(port, SCPCR, ctrl);
+ } else if (sci_getreg(port, SCSPTR)->size) {
+ u16 status = serial_port_in(port, SCSPTR);
- status = serial_port_in(port, SCSPTR);
- status &= ~SCSPTR_CTSIO;
- status |= SCSPTR_RTSIO;
- serial_port_out(port, SCSPTR, status); /* Set RTS = 1 */
+ /* RTS# is output, driven 1 */
+ status |= SCSPTR_RTSIO | SCSPTR_RTSDT;
+ /* CTS# and SCK are inputs */
+ status &= ~(SCSPTR_CTSIO | SCSPTR_SCKIO);
+ serial_port_out(port, SCSPTR, status);
}
}
@@ -1803,6 +1813,46 @@ static unsigned int sci_tx_empty(struct uart_port *port)
return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0;
}
+static void sci_set_rts(struct uart_port *port, bool state)
+{
+ if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
+ u16 data = serial_port_in(port, SCPDR);
+
+ /* Active low */
+ if (state)
+ data &= ~SCPDR_RTSD;
+ else
+ data |= SCPDR_RTSD;
+ serial_port_out(port, SCPDR, data);
+
+ /* RTS# is output */
+ serial_port_out(port, SCPCR,
+ serial_port_in(port, SCPCR) | SCPCR_RTSC);
+ } else if (sci_getreg(port, SCSPTR)->size) {
+ u16 ctrl = serial_port_in(port, SCSPTR);
+
+ /* Active low */
+ if (state)
+ ctrl &= ~SCSPTR_RTSDT;
+ else
+ ctrl |= SCSPTR_RTSDT;
+ serial_port_out(port, SCSPTR, ctrl);
+ }
+}
+
+static bool sci_get_cts(struct uart_port *port)
+{
+ if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
+ /* Active low */
+ return !(serial_port_in(port, SCPDR) & SCPDR_CTSD);
+ } else if (sci_getreg(port, SCSPTR)->size) {
+ /* Active low */
+ return !(serial_port_in(port, SCSPTR) & SCSPTR_CTSDT);
+ }
+
+ return true;
+}
+
/*
* Modem control is a bit of a mixed bag for SCI(F) ports. Generally
* CTS/RTS is supported in hardware by at least one port and controlled
@@ -1817,6 +1867,8 @@ static unsigned int sci_tx_empty(struct uart_port *port)
*/
static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
+ struct sci_port *s = to_sci_port(port);
+
if (mctrl & TIOCM_LOOP) {
const struct plat_sci_reg *reg;
@@ -1829,25 +1881,72 @@ static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl)
serial_port_in(port, SCFCR) |
SCFCR_LOOP);
}
+
+ mctrl_gpio_set(s->gpios, mctrl);
+
+ if (!(s->cfg->capabilities & SCIx_HAVE_RTSCTS))
+ return;
+
+ if (!(mctrl & TIOCM_RTS)) {
+ /* Disable Auto RTS */
+ serial_port_out(port, SCFCR,
+ serial_port_in(port, SCFCR) & ~SCFCR_MCE);
+
+ /* Clear RTS */
+ sci_set_rts(port, 0);
+ } else if (s->autorts) {
+ if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
+ /* Enable RTS# pin function */
+ serial_port_out(port, SCPCR,
+ serial_port_in(port, SCPCR) & ~SCPCR_RTSC);
+ }
+
+ /* Enable Auto RTS */
+ serial_port_out(port, SCFCR,
+ serial_port_in(port, SCFCR) | SCFCR_MCE);
+ } else {
+ /* Set RTS */
+ sci_set_rts(port, 1);
+ }
}
static unsigned int sci_get_mctrl(struct uart_port *port)
{
+ struct sci_port *s = to_sci_port(port);
+ struct mctrl_gpios *gpios = s->gpios;
+ unsigned int mctrl = 0;
+
+ mctrl_gpio_get(gpios, &mctrl);
+
/*
* CTS/RTS is handled in hardware when supported, while nothing
- * else is wired up. Keep it simple and simply assert DSR/CAR.
+ * else is wired up.
*/
- return TIOCM_DSR | TIOCM_CAR;
+ if (s->autorts) {
+ if (sci_get_cts(port))
+ mctrl |= TIOCM_CTS;
+ } else if (IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(gpios, UART_GPIO_CTS))) {
+ mctrl |= TIOCM_CTS;
+ }
+ if (IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(gpios, UART_GPIO_DSR)))
+ mctrl |= TIOCM_DSR;
+ if (IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(gpios, UART_GPIO_DCD)))
+ mctrl |= TIOCM_CAR;
+
+ return mctrl;
+}
+
+static void sci_enable_ms(struct uart_port *port)
+{
+ mctrl_gpio_enable_ms(to_sci_port(port)->gpios);
}
static void sci_break_ctl(struct uart_port *port, int break_state)
{
- struct sci_port *s = to_sci_port(port);
- const struct plat_sci_reg *reg = sci_regmap[s->cfg->regtype] + SCSPTR;
unsigned short scscr, scsptr;
/* check wheter the port has SCSPTR */
- if (!reg->size) {
+ if (!sci_getreg(port, SCSPTR)->size) {
/*
* Not supported by hardware. Most parts couple break and rx
* interrupts together, with break detection always enabled.
@@ -1873,7 +1972,6 @@ static void sci_break_ctl(struct uart_port *port, int break_state)
static int sci_startup(struct uart_port *port)
{
struct sci_port *s = to_sci_port(port);
- unsigned long flags;
int ret;
dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
@@ -1884,11 +1982,6 @@ static int sci_startup(struct uart_port *port)
sci_request_dma(port);
- spin_lock_irqsave(&port->lock, flags);
- sci_start_tx(port);
- sci_start_rx(port);
- spin_unlock_irqrestore(&port->lock, flags);
-
return 0;
}
@@ -1896,12 +1989,19 @@ static void sci_shutdown(struct uart_port *port)
{
struct sci_port *s = to_sci_port(port);
unsigned long flags;
+ u16 scr;
dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
+ s->autorts = false;
+ mctrl_gpio_disable_ms(to_sci_port(port)->gpios);
+
spin_lock_irqsave(&port->lock, flags);
sci_stop_rx(port);
sci_stop_tx(port);
+ /* Stop RX and TX, disable related interrupts, keep clock source */
+ scr = serial_port_in(port, SCSCR);
+ serial_port_out(port, SCSCR, scr & (SCSCR_CKE1 | SCSCR_CKE0));
spin_unlock_irqrestore(&port->lock, flags);
#ifdef CONFIG_SERIAL_SH_SCI_DMA
@@ -2056,6 +2156,15 @@ static void sci_reset(struct uart_port *port)
reg = sci_getreg(port, SCFCR);
if (reg->size)
serial_port_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST);
+
+ sci_clear_SCxSR(port,
+ SCxSR_RDxF_CLEAR(port) & SCxSR_ERROR_CLEAR(port) &
+ SCxSR_BREAK_CLEAR(port));
+ if (sci_getreg(port, SCLSR)->size) {
+ status = serial_port_in(port, SCLSR);
+ status &= ~(SCLSR_TO | SCLSR_ORER);
+ serial_port_out(port, SCLSR, status);
+ }
}
static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
@@ -2218,15 +2327,18 @@ done:
sci_init_pins(port, termios->c_cflag);
+ port->status &= ~UPSTAT_AUTOCTS;
+ s->autorts = false;
reg = sci_getreg(port, SCFCR);
if (reg->size) {
unsigned short ctrl = serial_port_in(port, SCFCR);
- if (s->cfg->capabilities & SCIx_HAVE_RTSCTS) {
- if (termios->c_cflag & CRTSCTS)
- ctrl |= SCFCR_MCE;
- else
- ctrl &= ~SCFCR_MCE;
+ if ((port->flags & UPF_HARD_FLOW) &&
+ (termios->c_cflag & CRTSCTS)) {
+ /* There is no CTS interrupt to restart the hardware */
+ port->status |= UPSTAT_AUTOCTS;
+ /* MCE is enabled when RTS is raised */
+ s->autorts = true;
}
/*
@@ -2300,6 +2412,9 @@ done:
sci_start_rx(port);
sci_port_disable(s);
+
+ if (UART_ENABLE_MS(port, termios->c_cflag))
+ sci_enable_ms(port);
}
static void sci_pm(struct uart_port *port, unsigned int state,
@@ -2425,6 +2540,7 @@ static struct uart_ops sci_uart_ops = {
.start_tx = sci_start_tx,
.stop_tx = sci_stop_tx,
.stop_rx = sci_stop_rx,
+ .enable_ms = sci_enable_ms,
.break_ctl = sci_break_ctl,
.startup = sci_startup,
.shutdown = sci_shutdown,
@@ -2890,6 +3006,9 @@ sci_parse_dt(struct platform_device *pdev, unsigned int *dev_id)
p->regtype = SCI_OF_REGTYPE(match->data);
p->scscr = SCSCR_RE | SCSCR_TE;
+ if (of_find_property(np, "uart-has-rtscts", NULL))
+ p->capabilities |= SCIx_HAVE_RTSCTS;
+
return p;
}
@@ -2912,6 +3031,21 @@ static int sci_probe_single(struct platform_device *dev,
if (ret)
return ret;
+ sciport->gpios = mctrl_gpio_init(&sciport->port, 0);
+ if (IS_ERR(sciport->gpios) && PTR_ERR(sciport->gpios) != -ENOSYS)
+ return PTR_ERR(sciport->gpios);
+
+ if (p->capabilities & SCIx_HAVE_RTSCTS) {
+ if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(sciport->gpios,
+ UART_GPIO_CTS)) ||
+ !IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(sciport->gpios,
+ UART_GPIO_RTS))) {
+ dev_err(&dev->dev, "Conflicting RTS/CTS config\n");
+ return -EINVAL;
+ }
+ sciport->port.flags |= UPF_HARD_FLOW;
+ }
+
ret = uart_add_one_port(&sci_uart_driver, &sciport->port);
if (ret) {
sci_cleanup_single(sciport);
diff --git a/drivers/tty/serial/sh-sci.h b/drivers/tty/serial/sh-sci.h
index 7a4fa185b..ffa6d688c 100644
--- a/drivers/tty/serial/sh-sci.h
+++ b/drivers/tty/serial/sh-sci.h
@@ -105,13 +105,16 @@ enum {
#define SCFCR_LOOP BIT(0) /* Loopback Test */
/* SCLSR (Line Status Register) on (H)SCIF */
+#define SCLSR_TO BIT(2) /* Timeout */
#define SCLSR_ORER BIT(0) /* Overrun Error */
/* SCSPTR (Serial Port Register), optional */
-#define SCSPTR_RTSIO BIT(7) /* Serial Port RTS Pin Input/Output */
-#define SCSPTR_RTSDT BIT(6) /* Serial Port RTS Pin Data */
-#define SCSPTR_CTSIO BIT(5) /* Serial Port CTS Pin Input/Output */
-#define SCSPTR_CTSDT BIT(4) /* Serial Port CTS Pin Data */
+#define SCSPTR_RTSIO BIT(7) /* Serial Port RTS# Pin Input/Output */
+#define SCSPTR_RTSDT BIT(6) /* Serial Port RTS# Pin Data */
+#define SCSPTR_CTSIO BIT(5) /* Serial Port CTS# Pin Input/Output */
+#define SCSPTR_CTSDT BIT(4) /* Serial Port CTS# Pin Data */
+#define SCSPTR_SCKIO BIT(3) /* Serial Port Clock Pin Input/Output */
+#define SCSPTR_SCKDT BIT(2) /* Serial Port Clock Pin Data */
#define SCSPTR_SPB2IO BIT(1) /* Serial Port Break Input/Output */
#define SCSPTR_SPB2DT BIT(0) /* Serial Port Break Data */
@@ -119,12 +122,18 @@ enum {
#define HSCIF_SRE BIT(15) /* Sampling Rate Register Enable */
/* SCPCR (Serial Port Control Register), SCIFA/SCIFB only */
-#define SCPCR_RTSC BIT(4) /* Serial Port RTS Pin / Output Pin */
-#define SCPCR_CTSC BIT(3) /* Serial Port CTS Pin / Input Pin */
+#define SCPCR_RTSC BIT(4) /* Serial Port RTS# Pin / Output Pin */
+#define SCPCR_CTSC BIT(3) /* Serial Port CTS# Pin / Input Pin */
+#define SCPCR_SCKC BIT(2) /* Serial Port SCK Pin / Output Pin */
+#define SCPCR_RXDC BIT(1) /* Serial Port RXD Pin / Input Pin */
+#define SCPCR_TXDC BIT(0) /* Serial Port TXD Pin / Output Pin */
/* SCPDR (Serial Port Data Register), SCIFA/SCIFB only */
-#define SCPDR_RTSD BIT(4) /* Serial Port RTS Output Pin Data */
-#define SCPDR_CTSD BIT(3) /* Serial Port CTS Input Pin Data */
+#define SCPDR_RTSD BIT(4) /* Serial Port RTS# Output Pin Data */
+#define SCPDR_CTSD BIT(3) /* Serial Port CTS# Input Pin Data */
+#define SCPDR_SCKD BIT(2) /* Serial Port SCK Output Pin Data */
+#define SCPDR_RXDD BIT(1) /* Serial Port RXD Input Pin Data */
+#define SCPDR_TXDD BIT(0) /* Serial Port TXD Output Pin Data */
/*
* BRG Clock Select Register (Some SCIF and HSCIF)
diff --git a/drivers/tty/serial/sirfsoc_uart.h b/drivers/tty/serial/sirfsoc_uart.h
index c3a885b4d..43756bd91 100644
--- a/drivers/tty/serial/sirfsoc_uart.h
+++ b/drivers/tty/serial/sirfsoc_uart.h
@@ -106,7 +106,7 @@ struct sirfsoc_uart_register {
enum sirfsoc_uart_type uart_type;
};
-u32 uart_usp_ff_full_mask(struct uart_port *port)
+static u32 uart_usp_ff_full_mask(struct uart_port *port)
{
u32 full_bit;
@@ -114,7 +114,7 @@ u32 uart_usp_ff_full_mask(struct uart_port *port)
return (1 << full_bit);
}
-u32 uart_usp_ff_empty_mask(struct uart_port *port)
+static u32 uart_usp_ff_empty_mask(struct uart_port *port)
{
u32 empty_bit;
diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
index ca0d3802f..4e603d060 100644
--- a/drivers/tty/serial/sunhv.c
+++ b/drivers/tty/serial/sunhv.c
@@ -490,12 +490,6 @@ static void sunhv_console_write_bychar(struct console *con, const char *s, unsig
locked = spin_trylock_irqsave(&port->lock, flags);
else
spin_lock_irqsave(&port->lock, flags);
- if (port->sysrq) {
- locked = 0;
- } else if (oops_in_progress) {
- locked = spin_trylock(&port->lock);
- } else
- spin_lock(&port->lock);
for (i = 0; i < n; i++) {
if (*s == '\n')
diff --git a/drivers/tty/serial/vt8500_serial.c b/drivers/tty/serial/vt8500_serial.c
index b384060e3..23cfc5e16 100644
--- a/drivers/tty/serial/vt8500_serial.c
+++ b/drivers/tty/serial/vt8500_serial.c
@@ -21,7 +21,6 @@
#include <linux/hrtimer.h>
#include <linux/delay.h>
-#include <linux/module.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/irq.h>
@@ -730,22 +729,12 @@ static int vt8500_serial_probe(struct platform_device *pdev)
return 0;
}
-static int vt8500_serial_remove(struct platform_device *pdev)
-{
- struct vt8500_port *vt8500_port = platform_get_drvdata(pdev);
-
- clk_disable_unprepare(vt8500_port->clk);
- uart_remove_one_port(&vt8500_uart_driver, &vt8500_port->uart);
-
- return 0;
-}
-
static struct platform_driver vt8500_platform_driver = {
.probe = vt8500_serial_probe,
- .remove = vt8500_serial_remove,
.driver = {
.name = "vt8500_serial",
.of_match_table = wmt_dt_ids,
+ .suppress_bind_attrs = true,
},
};
@@ -764,19 +753,4 @@ static int __init vt8500_serial_init(void)
return ret;
}
-
-static void __exit vt8500_serial_exit(void)
-{
-#ifdef CONFIG_SERIAL_VT8500_CONSOLE
- unregister_console(&vt8500_console);
-#endif
- platform_driver_unregister(&vt8500_platform_driver);
- uart_unregister_driver(&vt8500_uart_driver);
-}
-
-module_init(vt8500_serial_init);
-module_exit(vt8500_serial_exit);
-
-MODULE_AUTHOR("Alexey Charkov <alchark@gmail.com>");
-MODULE_DESCRIPTION("Driver for vt8500 serial device");
-MODULE_LICENSE("GPL v2");
+device_initcall(vt8500_serial_init);
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index cd46e64c4..9ca1a4d1b 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -976,6 +976,23 @@ static void cdns_uart_poll_put_char(struct uart_port *port, unsigned char c)
}
#endif
+static void cdns_uart_pm(struct uart_port *port, unsigned int state,
+ unsigned int oldstate)
+{
+ struct cdns_uart *cdns_uart = port->private_data;
+
+ switch (state) {
+ case UART_PM_STATE_OFF:
+ clk_disable(cdns_uart->uartclk);
+ clk_disable(cdns_uart->pclk);
+ break;
+ default:
+ clk_enable(cdns_uart->pclk);
+ clk_enable(cdns_uart->uartclk);
+ break;
+ }
+}
+
static struct uart_ops cdns_uart_ops = {
.set_mctrl = cdns_uart_set_mctrl,
.get_mctrl = cdns_uart_get_mctrl,
@@ -987,6 +1004,7 @@ static struct uart_ops cdns_uart_ops = {
.set_termios = cdns_uart_set_termios,
.startup = cdns_uart_startup,
.shutdown = cdns_uart_shutdown,
+ .pm = cdns_uart_pm,
.type = cdns_uart_type,
.verify_port = cdns_uart_verify_port,
.request_port = cdns_uart_request_port,
@@ -1350,12 +1368,12 @@ static int cdns_uart_probe(struct platform_device *pdev)
return PTR_ERR(cdns_uart_data->uartclk);
}
- rc = clk_prepare_enable(cdns_uart_data->pclk);
+ rc = clk_prepare(cdns_uart_data->pclk);
if (rc) {
dev_err(&pdev->dev, "Unable to enable pclk clock.\n");
return rc;
}
- rc = clk_prepare_enable(cdns_uart_data->uartclk);
+ rc = clk_prepare(cdns_uart_data->uartclk);
if (rc) {
dev_err(&pdev->dev, "Unable to enable device clock.\n");
goto err_out_clk_dis_pclk;
@@ -1422,9 +1440,9 @@ err_out_notif_unreg:
&cdns_uart_data->clk_rate_change_nb);
#endif
err_out_clk_disable:
- clk_disable_unprepare(cdns_uart_data->uartclk);
+ clk_unprepare(cdns_uart_data->uartclk);
err_out_clk_dis_pclk:
- clk_disable_unprepare(cdns_uart_data->pclk);
+ clk_unprepare(cdns_uart_data->pclk);
return rc;
}
@@ -1448,8 +1466,8 @@ static int cdns_uart_remove(struct platform_device *pdev)
#endif
rc = uart_remove_one_port(&cdns_uart_uart_driver, port);
port->mapbase = 0;
- clk_disable_unprepare(cdns_uart_data->uartclk);
- clk_disable_unprepare(cdns_uart_data->pclk);
+ clk_unprepare(cdns_uart_data->uartclk);
+ clk_unprepare(cdns_uart_data->pclk);
return rc;
}
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index e5139402e..52bbd27e9 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -363,6 +363,7 @@ static void moom_callback(struct work_struct *ignored)
struct oom_control oc = {
.zonelist = node_zonelist(first_memory_node, gfp_mask),
.nodemask = NULL,
+ .memcg = NULL,
.gfp_mask = gfp_mask,
.order = -1,
};
diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c
index c8c91f047..9d7ab7b66 100644
--- a/drivers/tty/vt/consolemap.c
+++ b/drivers/tty/vt/consolemap.c
@@ -499,9 +499,8 @@ con_insert_unipair(struct uni_pagedir *p, u_short unicode, u_short fontpos)
return 0;
}
-/* ui is a leftover from using a hashtable, but might be used again
- Caller must hold the lock */
-static int con_do_clear_unimap(struct vc_data *vc, struct unimapinit *ui)
+/* Caller must hold the lock */
+static int con_do_clear_unimap(struct vc_data *vc)
{
struct uni_pagedir *p, *q;
@@ -524,11 +523,11 @@ static int con_do_clear_unimap(struct vc_data *vc, struct unimapinit *ui)
return 0;
}
-int con_clear_unimap(struct vc_data *vc, struct unimapinit *ui)
+int con_clear_unimap(struct vc_data *vc)
{
int ret;
console_lock();
- ret = con_do_clear_unimap(vc, ui);
+ ret = con_do_clear_unimap(vc);
console_unlock();
return ret;
}
@@ -556,7 +555,7 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
int j, k;
u16 **p1, *p2, l;
- err1 = con_do_clear_unimap(vc, NULL);
+ err1 = con_do_clear_unimap(vc);
if (err1) {
console_unlock();
return err1;
@@ -677,7 +676,7 @@ int con_set_default_unimap(struct vc_data *vc)
/* The default font is always 256 characters */
- err = con_do_clear_unimap(vc, NULL);
+ err = con_do_clear_unimap(vc);
if (err)
return err;
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index 1e93a37e2..0f8caae42 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -567,7 +567,7 @@ static void fn_scroll_forw(struct vc_data *vc)
static void fn_scroll_back(struct vc_data *vc)
{
- scrollback(vc, 0);
+ scrollback(vc);
}
static void fn_show_mem(struct vc_data *vc)
@@ -1733,16 +1733,10 @@ int vt_do_diacrit(unsigned int cmd, void __user *udp, int perm)
return -EINVAL;
if (ct) {
- buf = kmalloc(ct * sizeof(struct kbdiacruc),
- GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
-
- if (copy_from_user(buf, a->kbdiacruc,
- ct * sizeof(struct kbdiacruc))) {
- kfree(buf);
- return -EFAULT;
- }
+ buf = memdup_user(a->kbdiacruc,
+ ct * sizeof(struct kbdiacruc));
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
}
spin_lock_irqsave(&kbd_event_lock, flags);
if (ct)
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 5b0fe97c4..2705ca960 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -277,13 +277,15 @@ static void notify_update(struct vc_data *vc)
* Low-Level Functions
*/
-#define IS_FG(vc) ((vc)->vc_num == fg_console)
+static inline bool con_is_fg(const struct vc_data *vc)
+{
+ return vc->vc_num == fg_console;
+}
-#ifdef VT_BUF_VRAM_ONLY
-#define DO_UPDATE(vc) 0
-#else
-#define DO_UPDATE(vc) (CON_IS_VISIBLE(vc) && !console_blanked)
-#endif
+static inline bool con_should_update(const struct vc_data *vc)
+{
+ return con_is_visible(vc) && !console_blanked;
+}
static inline unsigned short *screenpos(struct vc_data *vc, int offset, int viewed)
{
@@ -321,7 +323,7 @@ static void scrup(struct vc_data *vc, unsigned int t, unsigned int b, int nr)
nr = b - t - 1;
if (b > vc->vc_rows || t >= b || nr < 1)
return;
- if (CON_IS_VISIBLE(vc) && vc->vc_sw->con_scroll(vc, t, b, SM_UP, nr))
+ if (con_is_visible(vc) && vc->vc_sw->con_scroll(vc, t, b, SM_UP, nr))
return;
d = (unsigned short *)(vc->vc_origin + vc->vc_size_row * t);
s = (unsigned short *)(vc->vc_origin + vc->vc_size_row * (t + nr));
@@ -339,7 +341,7 @@ static void scrdown(struct vc_data *vc, unsigned int t, unsigned int b, int nr)
nr = b - t - 1;
if (b > vc->vc_rows || t >= b || nr < 1)
return;
- if (CON_IS_VISIBLE(vc) && vc->vc_sw->con_scroll(vc, t, b, SM_DOWN, nr))
+ if (con_is_visible(vc) && vc->vc_sw->con_scroll(vc, t, b, SM_DOWN, nr))
return;
s = (unsigned short *)(vc->vc_origin + vc->vc_size_row * t);
step = vc->vc_cols * nr;
@@ -349,7 +351,6 @@ static void scrdown(struct vc_data *vc, unsigned int t, unsigned int b, int nr)
static void do_update_region(struct vc_data *vc, unsigned long start, int count)
{
-#ifndef VT_BUF_VRAM_ONLY
unsigned int xx, yy, offset;
u16 *p;
@@ -390,14 +391,13 @@ static void do_update_region(struct vc_data *vc, unsigned long start, int count)
start = vc->vc_sw->con_getxy(vc, start, NULL, NULL);
}
}
-#endif
}
void update_region(struct vc_data *vc, unsigned long start, int count)
{
WARN_CONSOLE_UNLOCKED();
- if (DO_UPDATE(vc)) {
+ if (con_should_update(vc)) {
hide_cursor(vc);
do_update_region(vc, start, count);
set_cursor(vc);
@@ -413,7 +413,6 @@ static u8 build_attr(struct vc_data *vc, u8 _color, u8 _intensity, u8 _blink,
return vc->vc_sw->con_build_attr(vc, _color, _intensity,
_blink, _underline, _reverse, _italic);
-#ifndef VT_BUF_VRAM_ONLY
/*
* ++roman: I completely changed the attribute format for monochrome
* mode (!can_do_color). The formerly used MDA (monochrome display
@@ -448,9 +447,6 @@ static u8 build_attr(struct vc_data *vc, u8 _color, u8 _intensity, u8 _blink,
a <<= 1;
return a;
}
-#else
- return 0;
-#endif
}
static void update_attr(struct vc_data *vc)
@@ -470,10 +466,9 @@ void invert_screen(struct vc_data *vc, int offset, int count, int viewed)
count /= 2;
p = screenpos(vc, offset, viewed);
- if (vc->vc_sw->con_invert_region)
+ if (vc->vc_sw->con_invert_region) {
vc->vc_sw->con_invert_region(vc, p, count);
-#ifndef VT_BUF_VRAM_ONLY
- else {
+ } else {
u16 *q = p;
int cnt = count;
u16 a;
@@ -501,8 +496,8 @@ void invert_screen(struct vc_data *vc, int offset, int count, int viewed)
}
}
}
-#endif
- if (DO_UPDATE(vc))
+
+ if (con_should_update(vc))
do_update_region(vc, (unsigned long) p, count);
notify_update(vc);
}
@@ -519,7 +514,7 @@ void complement_pos(struct vc_data *vc, int offset)
if (old_offset != -1 && old_offset >= 0 &&
old_offset < vc->vc_screenbuf_size) {
scr_writew(old, screenpos(vc, old_offset, 1));
- if (DO_UPDATE(vc))
+ if (con_should_update(vc))
vc->vc_sw->con_putc(vc, old, oldy, oldx);
notify_update(vc);
}
@@ -534,7 +529,7 @@ void complement_pos(struct vc_data *vc, int offset)
old = scr_readw(p);
new = old ^ vc->vc_complement_mask;
scr_writew(new, p);
- if (DO_UPDATE(vc)) {
+ if (con_should_update(vc)) {
oldx = (offset >> 1) % vc->vc_cols;
oldy = (offset >> 1) / vc->vc_cols;
vc->vc_sw->con_putc(vc, new, oldy, oldx);
@@ -550,7 +545,7 @@ static void insert_char(struct vc_data *vc, unsigned int nr)
scr_memmovew(p + nr, p, (vc->vc_cols - vc->vc_x - nr) * 2);
scr_memsetw(p, vc->vc_video_erase_char, nr * 2);
vc->vc_need_wrap = 0;
- if (DO_UPDATE(vc))
+ if (con_should_update(vc))
do_update_region(vc, (unsigned long) p,
vc->vc_cols - vc->vc_x);
}
@@ -563,7 +558,7 @@ static void delete_char(struct vc_data *vc, unsigned int nr)
scr_memsetw(p + vc->vc_cols - vc->vc_x - nr, vc->vc_video_erase_char,
nr * 2);
vc->vc_need_wrap = 0;
- if (DO_UPDATE(vc))
+ if (con_should_update(vc))
do_update_region(vc, (unsigned long) p,
vc->vc_cols - vc->vc_x);
}
@@ -583,7 +578,7 @@ static void add_softcursor(struct vc_data *vc)
if ((type & 0x20) && ((softcursor_original & 0x7000) == (i & 0x7000))) i ^= 0x7000;
if ((type & 0x40) && ((i & 0x700) == ((i & 0x7000) >> 4))) i ^= 0x0700;
scr_writew(i, (u16 *) vc->vc_pos);
- if (DO_UPDATE(vc))
+ if (con_should_update(vc))
vc->vc_sw->con_putc(vc, i, vc->vc_y, vc->vc_x);
}
@@ -591,7 +586,7 @@ static void hide_softcursor(struct vc_data *vc)
{
if (softcursor_original != -1) {
scr_writew(softcursor_original, (u16 *)vc->vc_pos);
- if (DO_UPDATE(vc))
+ if (con_should_update(vc))
vc->vc_sw->con_putc(vc, softcursor_original,
vc->vc_y, vc->vc_x);
softcursor_original = -1;
@@ -608,8 +603,7 @@ static void hide_cursor(struct vc_data *vc)
static void set_cursor(struct vc_data *vc)
{
- if (!IS_FG(vc) || console_blanked ||
- vc->vc_mode == KD_GRAPHICS)
+ if (!con_is_fg(vc) || console_blanked || vc->vc_mode == KD_GRAPHICS)
return;
if (vc->vc_deccm) {
if (vc == sel_cons)
@@ -625,7 +619,7 @@ static void set_origin(struct vc_data *vc)
{
WARN_CONSOLE_UNLOCKED();
- if (!CON_IS_VISIBLE(vc) ||
+ if (!con_is_visible(vc) ||
!vc->vc_sw->con_set_origin ||
!vc->vc_sw->con_set_origin(vc))
vc->vc_origin = (unsigned long)vc->vc_screenbuf;
@@ -673,12 +667,12 @@ void redraw_screen(struct vc_data *vc, int is_switch)
struct vc_data *old_vc = vc_cons[fg_console].d;
if (old_vc == vc)
return;
- if (!CON_IS_VISIBLE(vc))
+ if (!con_is_visible(vc))
redraw = 1;
*vc->vc_display_fg = vc;
fg_console = vc->vc_num;
hide_cursor(old_vc);
- if (!CON_IS_VISIBLE(old_vc)) {
+ if (!con_is_visible(old_vc)) {
save_screen(old_vc);
set_origin(old_vc);
}
@@ -954,7 +948,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
tty_do_resize(tty, &ws);
}
- if (CON_IS_VISIBLE(vc))
+ if (con_is_visible(vc))
update_screen(vc);
vt_event_post(VT_EVENT_RESIZE, vc->vc_num, vc->vc_num);
return err;
@@ -1103,11 +1097,9 @@ static void gotoxay(struct vc_data *vc, int new_x, int new_y)
gotoxy(vc, new_x, vc->vc_decom ? (vc->vc_top + new_y) : new_y);
}
-void scrollback(struct vc_data *vc, int lines)
+void scrollback(struct vc_data *vc)
{
- if (!lines)
- lines = vc->vc_rows / 2;
- scrolldelta(-lines);
+ scrolldelta(-(vc->vc_rows / 2));
}
void scrollfront(struct vc_data *vc, int lines)
@@ -1186,7 +1178,7 @@ static void csi_J(struct vc_data *vc, int vpar)
scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char,
vc->vc_screenbuf_size >> 1);
set_origin(vc);
- if (CON_IS_VISIBLE(vc))
+ if (con_is_visible(vc))
update_screen(vc);
/* fall through */
case 2: /* erase whole display */
@@ -1197,7 +1189,7 @@ static void csi_J(struct vc_data *vc, int vpar)
return;
}
scr_memsetw(start, vc->vc_video_erase_char, 2 * count);
- if (DO_UPDATE(vc))
+ if (con_should_update(vc))
do_update_region(vc, (unsigned long) start, count);
vc->vc_need_wrap = 0;
}
@@ -1225,7 +1217,7 @@ static void csi_K(struct vc_data *vc, int vpar)
}
scr_memsetw(start, vc->vc_video_erase_char, 2 * count);
vc->vc_need_wrap = 0;
- if (DO_UPDATE(vc))
+ if (con_should_update(vc))
do_update_region(vc, (unsigned long) start, count);
}
@@ -1238,7 +1230,7 @@ static void csi_X(struct vc_data *vc, int vpar) /* erase the following vpar posi
count = (vpar > vc->vc_cols - vc->vc_x) ? (vc->vc_cols - vc->vc_x) : vpar;
scr_memsetw((unsigned short *)vc->vc_pos, vc->vc_video_erase_char, 2 * count);
- if (DO_UPDATE(vc))
+ if (con_should_update(vc))
vc->vc_sw->con_clear(vc, vc->vc_y, vc->vc_x, 1, count);
vc->vc_need_wrap = 0;
}
@@ -1255,48 +1247,87 @@ static void default_attr(struct vc_data *vc)
struct rgb { u8 r; u8 g; u8 b; };
-static struct rgb rgb_from_256(int i)
+static void rgb_from_256(int i, struct rgb *c)
{
- struct rgb c;
if (i < 8) { /* Standard colours. */
- c.r = i&1 ? 0xaa : 0x00;
- c.g = i&2 ? 0xaa : 0x00;
- c.b = i&4 ? 0xaa : 0x00;
+ c->r = i&1 ? 0xaa : 0x00;
+ c->g = i&2 ? 0xaa : 0x00;
+ c->b = i&4 ? 0xaa : 0x00;
} else if (i < 16) {
- c.r = i&1 ? 0xff : 0x55;
- c.g = i&2 ? 0xff : 0x55;
- c.b = i&4 ? 0xff : 0x55;
+ c->r = i&1 ? 0xff : 0x55;
+ c->g = i&2 ? 0xff : 0x55;
+ c->b = i&4 ? 0xff : 0x55;
} else if (i < 232) { /* 6x6x6 colour cube. */
- c.r = (i - 16) / 36 * 85 / 2;
- c.g = (i - 16) / 6 % 6 * 85 / 2;
- c.b = (i - 16) % 6 * 85 / 2;
+ c->r = (i - 16) / 36 * 85 / 2;
+ c->g = (i - 16) / 6 % 6 * 85 / 2;
+ c->b = (i - 16) % 6 * 85 / 2;
} else /* Grayscale ramp. */
- c.r = c.g = c.b = i * 10 - 2312;
- return c;
+ c->r = c->g = c->b = i * 10 - 2312;
}
-static void rgb_foreground(struct vc_data *vc, struct rgb c)
+static void rgb_foreground(struct vc_data *vc, const struct rgb *c)
{
- u8 hue, max = c.r;
- if (c.g > max)
- max = c.g;
- if (c.b > max)
- max = c.b;
- hue = (c.r > max/2 ? 4 : 0)
- | (c.g > max/2 ? 2 : 0)
- | (c.b > max/2 ? 1 : 0);
- if (hue == 7 && max <= 0x55)
- hue = 0, vc->vc_intensity = 2;
+ u8 hue = 0, max = max3(c->r, c->g, c->b);
+
+ if (c->r > max / 2)
+ hue |= 4;
+ if (c->g > max / 2)
+ hue |= 2;
+ if (c->b > max / 2)
+ hue |= 1;
+
+ if (hue == 7 && max <= 0x55) {
+ hue = 0;
+ vc->vc_intensity = 2;
+ } else if (max > 0xaa)
+ vc->vc_intensity = 2;
else
- vc->vc_intensity = (max > 0xaa) + 1;
+ vc->vc_intensity = 1;
+
vc->vc_color = (vc->vc_color & 0xf0) | hue;
}
-static void rgb_background(struct vc_data *vc, struct rgb c)
+static void rgb_background(struct vc_data *vc, const struct rgb *c)
{
/* For backgrounds, err on the dark side. */
vc->vc_color = (vc->vc_color & 0x0f)
- | (c.r&0x80) >> 1 | (c.g&0x80) >> 2 | (c.b&0x80) >> 3;
+ | (c->r&0x80) >> 1 | (c->g&0x80) >> 2 | (c->b&0x80) >> 3;
+}
+
+/*
+ * ITU T.416 Higher colour modes. They break the usual properties of SGR codes
+ * and thus need to be detected and ignored by hand. Strictly speaking, that
+ * standard also wants : rather than ; as separators, contrary to ECMA-48, but
+ * no one produces such codes and almost no one accepts them.
+ *
+ * Subcommands 3 (CMY) and 4 (CMYK) are so insane there's no point in
+ * supporting them.
+ */
+static int vc_t416_color(struct vc_data *vc, int i,
+ void(*set_color)(struct vc_data *vc, const struct rgb *c))
+{
+ struct rgb c;
+
+ i++;
+ if (i > vc->vc_npar)
+ return i;
+
+ if (vc->vc_par[i] == 5 && i < vc->vc_npar) {
+ /* 256 colours -- ubiquitous */
+ i++;
+ rgb_from_256(vc->vc_par[i], &c);
+ } else if (vc->vc_par[i] == 2 && i <= vc->vc_npar + 3) {
+ /* 24 bit -- extremely rare */
+ c.r = vc->vc_par[i + 1];
+ c.g = vc->vc_par[i + 2];
+ c.b = vc->vc_par[i + 3];
+ i += 3;
+ } else
+ return i;
+
+ set_color(vc, &c);
+
+ return i;
}
/* console_lock is held */
@@ -1306,135 +1337,91 @@ static void csi_m(struct vc_data *vc)
for (i = 0; i <= vc->vc_npar; i++)
switch (vc->vc_par[i]) {
- case 0: /* all attributes off */
- default_attr(vc);
- break;
- case 1:
- vc->vc_intensity = 2;
- break;
- case 2:
- vc->vc_intensity = 0;
- break;
- case 3:
- vc->vc_italic = 1;
- break;
- case 4:
- vc->vc_underline = 1;
- break;
- case 5:
- vc->vc_blink = 1;
- break;
- case 7:
- vc->vc_reverse = 1;
- break;
- case 10: /* ANSI X3.64-1979 (SCO-ish?)
- * Select primary font, don't display
- * control chars if defined, don't set
- * bit 8 on output.
- */
- vc->vc_translate = set_translate(vc->vc_charset == 0
- ? vc->vc_G0_charset
- : vc->vc_G1_charset, vc);
- vc->vc_disp_ctrl = 0;
- vc->vc_toggle_meta = 0;
- break;
- case 11: /* ANSI X3.64-1979 (SCO-ish?)
- * Select first alternate font, lets
- * chars < 32 be displayed as ROM chars.
- */
- vc->vc_translate = set_translate(IBMPC_MAP, vc);
- vc->vc_disp_ctrl = 1;
- vc->vc_toggle_meta = 0;
- break;
- case 12: /* ANSI X3.64-1979 (SCO-ish?)
- * Select second alternate font, toggle
- * high bit before displaying as ROM char.
- */
- vc->vc_translate = set_translate(IBMPC_MAP, vc);
- vc->vc_disp_ctrl = 1;
- vc->vc_toggle_meta = 1;
- break;
- case 21:
- case 22:
- vc->vc_intensity = 1;
- break;
- case 23:
- vc->vc_italic = 0;
- break;
- case 24:
- vc->vc_underline = 0;
- break;
- case 25:
- vc->vc_blink = 0;
- break;
- case 27:
- vc->vc_reverse = 0;
- break;
- case 38: /* ITU T.416
- * Higher colour modes.
- * They break the usual properties of SGR codes
- * and thus need to be detected and ignored by
- * hand. Strictly speaking, that standard also
- * wants : rather than ; as separators, contrary
- * to ECMA-48, but no one produces such codes
- * and almost no one accepts them.
- */
- i++;
- if (i > vc->vc_npar)
- break;
- if (vc->vc_par[i] == 5 && /* 256 colours */
- i < vc->vc_npar) { /* ubiquitous */
- i++;
- rgb_foreground(vc,
- rgb_from_256(vc->vc_par[i]));
- } else if (vc->vc_par[i] == 2 && /* 24 bit */
- i <= vc->vc_npar + 3) {/* extremely rare */
- struct rgb c = {
- .r = vc->vc_par[i + 1],
- .g = vc->vc_par[i + 2],
- .b = vc->vc_par[i + 3],
- };
- rgb_foreground(vc, c);
- i += 3;
- }
- /* Subcommands 3 (CMY) and 4 (CMYK) are so insane
- * there's no point in supporting them.
- */
- break;
- case 48:
- i++;
- if (i > vc->vc_npar)
- break;
- if (vc->vc_par[i] == 5 && /* 256 colours */
- i < vc->vc_npar) {
- i++;
- rgb_background(vc,
- rgb_from_256(vc->vc_par[i]));
- } else if (vc->vc_par[i] == 2 && /* 24 bit */
- i <= vc->vc_npar + 3) {
- struct rgb c = {
- .r = vc->vc_par[i + 1],
- .g = vc->vc_par[i + 2],
- .b = vc->vc_par[i + 3],
- };
- rgb_background(vc, c);
- i += 3;
- }
- break;
- case 39:
- vc->vc_color = (vc->vc_def_color & 0x0f) | (vc->vc_color & 0xf0);
- break;
- case 49:
- vc->vc_color = (vc->vc_def_color & 0xf0) | (vc->vc_color & 0x0f);
- break;
- default:
- if (vc->vc_par[i] >= 30 && vc->vc_par[i] <= 37)
- vc->vc_color = color_table[vc->vc_par[i] - 30]
- | (vc->vc_color & 0xf0);
- else if (vc->vc_par[i] >= 40 && vc->vc_par[i] <= 47)
- vc->vc_color = (color_table[vc->vc_par[i] - 40] << 4)
- | (vc->vc_color & 0x0f);
- break;
+ case 0: /* all attributes off */
+ default_attr(vc);
+ break;
+ case 1:
+ vc->vc_intensity = 2;
+ break;
+ case 2:
+ vc->vc_intensity = 0;
+ break;
+ case 3:
+ vc->vc_italic = 1;
+ break;
+ case 4:
+ vc->vc_underline = 1;
+ break;
+ case 5:
+ vc->vc_blink = 1;
+ break;
+ case 7:
+ vc->vc_reverse = 1;
+ break;
+ case 10: /* ANSI X3.64-1979 (SCO-ish?)
+ * Select primary font, don't display control chars if
+ * defined, don't set bit 8 on output.
+ */
+ vc->vc_translate = set_translate(vc->vc_charset == 0
+ ? vc->vc_G0_charset
+ : vc->vc_G1_charset, vc);
+ vc->vc_disp_ctrl = 0;
+ vc->vc_toggle_meta = 0;
+ break;
+ case 11: /* ANSI X3.64-1979 (SCO-ish?)
+ * Select first alternate font, lets chars < 32 be
+ * displayed as ROM chars.
+ */
+ vc->vc_translate = set_translate(IBMPC_MAP, vc);
+ vc->vc_disp_ctrl = 1;
+ vc->vc_toggle_meta = 0;
+ break;
+ case 12: /* ANSI X3.64-1979 (SCO-ish?)
+ * Select second alternate font, toggle high bit
+ * before displaying as ROM char.
+ */
+ vc->vc_translate = set_translate(IBMPC_MAP, vc);
+ vc->vc_disp_ctrl = 1;
+ vc->vc_toggle_meta = 1;
+ break;
+ case 21:
+ case 22:
+ vc->vc_intensity = 1;
+ break;
+ case 23:
+ vc->vc_italic = 0;
+ break;
+ case 24:
+ vc->vc_underline = 0;
+ break;
+ case 25:
+ vc->vc_blink = 0;
+ break;
+ case 27:
+ vc->vc_reverse = 0;
+ break;
+ case 38:
+ i = vc_t416_color(vc, i, rgb_foreground);
+ break;
+ case 48:
+ i = vc_t416_color(vc, i, rgb_background);
+ break;
+ case 39:
+ vc->vc_color = (vc->vc_def_color & 0x0f) |
+ (vc->vc_color & 0xf0);
+ break;
+ case 49:
+ vc->vc_color = (vc->vc_def_color & 0xf0) |
+ (vc->vc_color & 0x0f);
+ break;
+ default:
+ if (vc->vc_par[i] >= 30 && vc->vc_par[i] <= 37)
+ vc->vc_color = color_table[vc->vc_par[i] - 30]
+ | (vc->vc_color & 0xf0);
+ else if (vc->vc_par[i] >= 40 && vc->vc_par[i] <= 47)
+ vc->vc_color = (color_table[vc->vc_par[i] - 40] << 4)
+ | (vc->vc_color & 0x0f);
+ break;
}
update_attr(vc);
}
@@ -1496,7 +1483,6 @@ static void set_mode(struct vc_data *vc, int on_off)
clr_kbd(vc, decckm);
break;
case 3: /* 80/132 mode switch unimplemented */
- vc->vc_deccolm = on_off;
#if 0
vc_resize(deccolm ? 132 : 80, vc->vc_rows);
/* this alone does not suffice; some user mode
@@ -2178,18 +2164,20 @@ static int is_double_width(uint32_t ucs)
return bisearch(ucs, double_width, ARRAY_SIZE(double_width) - 1);
}
+static void con_flush(struct vc_data *vc, unsigned long draw_from,
+ unsigned long draw_to, int *draw_x)
+{
+ if (*draw_x < 0)
+ return;
+
+ vc->vc_sw->con_putcs(vc, (u16 *)draw_from,
+ (u16 *)draw_to - (u16 *)draw_from, vc->vc_y, *draw_x);
+ *draw_x = -1;
+}
+
/* acquires console_lock */
static int do_con_write(struct tty_struct *tty, const unsigned char *buf, int count)
{
-#ifdef VT_BUF_VRAM_ONLY
-#define FLUSH do { } while(0);
-#else
-#define FLUSH if (draw_x >= 0) { \
- vc->vc_sw->con_putcs(vc, (u16 *)draw_from, (u16 *)draw_to - (u16 *)draw_from, vc->vc_y, draw_x); \
- draw_x = -1; \
- }
-#endif
-
int c, tc, ok, n = 0, draw_x = -1;
unsigned int currcons;
unsigned long draw_from = 0, draw_to = 0;
@@ -2226,7 +2214,7 @@ static int do_con_write(struct tty_struct *tty, const unsigned char *buf, int co
charmask = himask ? 0x1ff : 0xff;
/* undraw cursor first */
- if (IS_FG(vc))
+ if (con_is_fg(vc))
hide_cursor(vc);
param.vc = vc;
@@ -2381,12 +2369,13 @@ rescan_last_byte:
} else {
vc_attr = ((vc->vc_attr) & 0x88) | (((vc->vc_attr) & 0x70) >> 4) | (((vc->vc_attr) & 0x07) << 4);
}
- FLUSH
+ con_flush(vc, draw_from, draw_to, &draw_x);
}
while (1) {
if (vc->vc_need_wrap || vc->vc_decim)
- FLUSH
+ con_flush(vc, draw_from, draw_to,
+ &draw_x);
if (vc->vc_need_wrap) {
cr(vc);
lf(vc);
@@ -2397,7 +2386,7 @@ rescan_last_byte:
((vc_attr << 8) & ~himask) + ((tc & 0x100) ? himask : 0) + (tc & 0xff) :
(vc_attr << 8) + tc,
(u16 *) vc->vc_pos);
- if (DO_UPDATE(vc) && draw_x < 0) {
+ if (con_should_update(vc) && draw_x < 0) {
draw_x = vc->vc_x;
draw_from = vc->vc_pos;
}
@@ -2416,9 +2405,8 @@ rescan_last_byte:
}
notify_write(vc, c);
- if (inverse) {
- FLUSH
- }
+ if (inverse)
+ con_flush(vc, draw_from, draw_to, &draw_x);
if (rescan) {
rescan = 0;
@@ -2429,15 +2417,14 @@ rescan_last_byte:
}
continue;
}
- FLUSH
+ con_flush(vc, draw_from, draw_to, &draw_x);
do_con_trol(tty, vc, orig);
}
- FLUSH
+ con_flush(vc, draw_from, draw_to, &draw_x);
console_conditional_schedule();
console_unlock();
notify_update(vc);
return n;
-#undef FLUSH
}
/*
@@ -2471,7 +2458,7 @@ static void console_callback(struct work_struct *ignored)
if (scrollback_delta) {
struct vc_data *vc = vc_cons[fg_console].d;
clear_selection();
- if (vc->vc_mode == KD_TEXT)
+ if (vc->vc_mode == KD_TEXT && vc->vc_sw->con_scrolldelta)
vc->vc_sw->con_scrolldelta(vc, scrollback_delta);
scrollback_delta = 0;
}
@@ -2583,7 +2570,7 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
goto quit;
/* undraw cursor first */
- if (IS_FG(vc))
+ if (con_is_fg(vc))
hide_cursor(vc);
start = (ushort *)vc->vc_pos;
@@ -2594,7 +2581,7 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
c = *b++;
if (c == 10 || c == 13 || c == 8 || vc->vc_need_wrap) {
if (cnt > 0) {
- if (CON_IS_VISIBLE(vc))
+ if (con_is_visible(vc))
vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, vc->vc_x);
vc->vc_x += cnt;
if (vc->vc_need_wrap)
@@ -2626,7 +2613,7 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
myx++;
}
if (cnt > 0) {
- if (CON_IS_VISIBLE(vc))
+ if (con_is_visible(vc))
vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, vc->vc_x);
vc->vc_x += cnt;
if (vc->vc_x == vc->vc_cols) {
@@ -3173,7 +3160,7 @@ static int do_bind_con_driver(const struct consw *csw, int first, int last,
j = i;
- if (CON_IS_VISIBLE(vc)) {
+ if (con_is_visible(vc)) {
k = i;
save_screen(vc);
}
@@ -3981,7 +3968,7 @@ static void set_palette(struct vc_data *vc)
{
WARN_CONSOLE_UNLOCKED();
- if (vc->vc_mode != KD_GRAPHICS)
+ if (vc->vc_mode != KD_GRAPHICS && vc->vc_sw->con_set_palette)
vc->vc_sw->con_set_palette(vc, color_table);
}
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index 97d5a7455..f62c59881 100644
--- a/drivers/tty/vt/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -1006,16 +1006,10 @@ int vt_ioctl(struct tty_struct *tty,
break;
case PIO_UNIMAPCLR:
- { struct unimapinit ui;
if (!perm)
return -EPERM;
- ret = copy_from_user(&ui, up, sizeof(struct unimapinit));
- if (ret)
- ret = -EFAULT;
- else
- con_clear_unimap(vc, &ui);
+ con_clear_unimap(vc);
break;
- }
case PIO_UNIMAP:
case GIO_UNIMAP:
diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig
index 3644a3500..5e5b9eb7e 100644
--- a/drivers/usb/chipidea/Kconfig
+++ b/drivers/usb/chipidea/Kconfig
@@ -4,8 +4,9 @@ config USB_CHIPIDEA
select EXTCON
help
Say Y here if your system has a dual role high speed USB
- controller based on ChipIdea silicon IP. Currently, only the
- peripheral mode is supported.
+ controller based on ChipIdea silicon IP. It supports:
+ Dual-role switch (ID, OTG FSM, sysfs), Host-only, and
+ Peripheral-only.
When compiled dynamically, the module will be called ci-hdrc.ko.
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 591e52009..0f3f62e81 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -946,7 +946,7 @@ static int wait_serial_change(struct acm *acm, unsigned long arg)
DECLARE_WAITQUEUE(wait, current);
struct async_icount old, new;
- if (arg & (TIOCM_DSR | TIOCM_RI | TIOCM_CD ))
+ if (arg & (TIOCM_DSR | TIOCM_RI | TIOCM_CD))
return -EINVAL;
do {
spin_lock_irq(&acm->read_lock);
@@ -1146,7 +1146,7 @@ static int acm_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_cdc_union_desc *union_header = NULL;
- struct usb_cdc_country_functional_desc *cfd = NULL;
+ struct usb_cdc_call_mgmt_descriptor *cmgmd = NULL;
unsigned char *buffer = intf->altsetting->extra;
int buflen = intf->altsetting->extralen;
struct usb_interface *control_interface;
@@ -1155,18 +1155,16 @@ static int acm_probe(struct usb_interface *intf,
struct usb_endpoint_descriptor *epread = NULL;
struct usb_endpoint_descriptor *epwrite = NULL;
struct usb_device *usb_dev = interface_to_usbdev(intf);
+ struct usb_cdc_parsed_header h;
struct acm *acm;
int minor;
int ctrlsize, readsize;
u8 *buf;
- u8 ac_management_function = 0;
- u8 call_management_function = 0;
- int call_interface_num = -1;
- int data_interface_num = -1;
+ int call_intf_num = -1;
+ int data_intf_num = -1;
unsigned long quirks;
int num_rx_buf;
int i;
- unsigned int elength = 0;
int combined_interfaces = 0;
struct device *tty_dev;
int rv = -ENOMEM;
@@ -1196,8 +1194,6 @@ static int acm_probe(struct usb_interface *intf,
}
if (!buflen) {
- if (!intf->cur_altsetting || !intf->cur_altsetting->endpoint)
- return -EINVAL;
if (intf->cur_altsetting->endpoint &&
intf->cur_altsetting->endpoint->extralen &&
intf->cur_altsetting->endpoint->extra) {
@@ -1212,74 +1208,24 @@ static int acm_probe(struct usb_interface *intf,
}
}
- while (buflen > 0) {
- elength = buffer[0];
- if (!elength) {
- dev_err(&intf->dev, "skipping garbage byte\n");
- elength = 1;
- goto next_desc;
- }
- if (buffer[1] != USB_DT_CS_INTERFACE) {
- dev_err(&intf->dev, "skipping garbage\n");
- goto next_desc;
- }
-
- switch (buffer[2]) {
- case USB_CDC_UNION_TYPE: /* we've found it */
- if (elength < sizeof(struct usb_cdc_union_desc))
- goto next_desc;
- if (union_header) {
- dev_err(&intf->dev, "More than one "
- "union descriptor, skipping ...\n");
- goto next_desc;
- }
- union_header = (struct usb_cdc_union_desc *)buffer;
- break;
- case USB_CDC_COUNTRY_TYPE: /* export through sysfs*/
- if (elength < sizeof(struct usb_cdc_country_functional_desc))
- goto next_desc;
- cfd = (struct usb_cdc_country_functional_desc *)buffer;
- break;
- case USB_CDC_HEADER_TYPE: /* maybe check version */
- break; /* for now we ignore it */
- case USB_CDC_ACM_TYPE:
- if (elength < 4)
- goto next_desc;
- ac_management_function = buffer[3];
- break;
- case USB_CDC_CALL_MANAGEMENT_TYPE:
- if (elength < 5)
- goto next_desc;
- call_management_function = buffer[3];
- call_interface_num = buffer[4];
- break;
- default:
- /*
- * there are LOTS more CDC descriptors that
- * could legitimately be found here.
- */
- dev_dbg(&intf->dev, "Ignoring descriptor: "
- "type %02x, length %ud\n",
- buffer[2], elength);
- break;
- }
-next_desc:
- buflen -= elength;
- buffer += elength;
- }
+ cdc_parse_cdc_header(&h, intf, buffer, buflen);
+ union_header = h.usb_cdc_union_desc;
+ cmgmd = h.usb_cdc_call_mgmt_descriptor;
+ if (cmgmd)
+ call_intf_num = cmgmd->bDataInterface;
if (!union_header) {
- if (call_interface_num > 0) {
+ if (call_intf_num > 0) {
dev_dbg(&intf->dev, "No union descriptor, using call management descriptor\n");
/* quirks for Droids MuIn LCD */
- if (quirks & NO_DATA_INTERFACE)
+ if (quirks & NO_DATA_INTERFACE) {
data_interface = usb_ifnum_to_if(usb_dev, 0);
- else
- data_interface = usb_ifnum_to_if(usb_dev, (data_interface_num = call_interface_num));
+ } else {
+ data_intf_num = call_intf_num;
+ data_interface = usb_ifnum_to_if(usb_dev, data_intf_num);
+ }
control_interface = intf;
} else {
- if (!intf->cur_altsetting)
- return -ENODEV;
if (intf->cur_altsetting->desc.bNumEndpoints != 3) {
dev_dbg(&intf->dev,"No union descriptor, giving up\n");
return -ENODEV;
@@ -1291,8 +1237,9 @@ next_desc:
}
}
} else {
+ data_intf_num = union_header->bSlaveInterface0;
control_interface = usb_ifnum_to_if(usb_dev, union_header->bMasterInterface0);
- data_interface = usb_ifnum_to_if(usb_dev, (data_interface_num = union_header->bSlaveInterface0));
+ data_interface = usb_ifnum_to_if(usb_dev, data_intf_num);
}
if (!control_interface || !data_interface) {
@@ -1300,7 +1247,7 @@ next_desc:
return -ENODEV;
}
- if (data_interface_num != call_interface_num)
+ if (data_intf_num != call_intf_num)
dev_dbg(&intf->dev, "Separate call control interface. That is not fully supported.\n");
if (control_interface == data_interface) {
@@ -1309,22 +1256,15 @@ next_desc:
combined_interfaces = 1;
/* a popular other OS doesn't use it */
quirks |= NO_CAP_LINE;
- if (!data_interface->cur_altsetting)
- return -EINVAL;
if (data_interface->cur_altsetting->desc.bNumEndpoints != 3) {
dev_err(&intf->dev, "This needs exactly 3 endpoints\n");
return -EINVAL;
}
look_for_collapsed_interface:
- if (!data_interface->cur_altsetting)
- return -EINVAL;
for (i = 0; i < 3; i++) {
struct usb_endpoint_descriptor *ep;
ep = &data_interface->cur_altsetting->endpoint[i].desc;
- if (!ep)
- return -ENODEV;
-
if (usb_endpoint_is_int_in(ep))
epctrl = ep;
else if (usb_endpoint_is_bulk_out(ep))
@@ -1343,12 +1283,8 @@ look_for_collapsed_interface:
skip_normal_probe:
/*workaround for switched interfaces */
- if (!data_interface->cur_altsetting)
- return -EINVAL;
if (data_interface->cur_altsetting->desc.bInterfaceClass
!= CDC_DATA_INTERFACE_TYPE) {
- if (!control_interface->cur_altsetting)
- return -EINVAL;
if (control_interface->cur_altsetting->desc.bInterfaceClass
== CDC_DATA_INTERFACE_TYPE) {
dev_dbg(&intf->dev,
@@ -1371,7 +1307,6 @@ skip_normal_probe:
if (data_interface->cur_altsetting->desc.bNumEndpoints < 2 ||
- !control_interface->cur_altsetting ||
control_interface->cur_altsetting->desc.bNumEndpoints == 0)
return -EINVAL;
@@ -1379,8 +1314,6 @@ skip_normal_probe:
epread = &data_interface->cur_altsetting->endpoint[0].desc;
epwrite = &data_interface->cur_altsetting->endpoint[1].desc;
- if (!epctrl || !epread || !epwrite)
- return -ENODEV;
/* workaround for switched endpoints */
if (!usb_endpoint_dir_in(epread)) {
@@ -1397,11 +1330,8 @@ made_compressed_probe:
goto alloc_fail;
minor = acm_alloc_minor(acm);
- if (minor < 0) {
- dev_err(&intf->dev, "no more free acm devices\n");
- kfree(acm);
- return -ENODEV;
- }
+ if (minor < 0)
+ goto alloc_fail1;
ctrlsize = usb_endpoint_maxp(epctrl);
readsize = usb_endpoint_maxp(epread) *
@@ -1412,7 +1342,8 @@ made_compressed_probe:
acm->data = data_interface;
acm->minor = minor;
acm->dev = usb_dev;
- acm->ctrl_caps = ac_management_function;
+ if (h.usb_cdc_acm_descriptor)
+ acm->ctrl_caps = h.usb_cdc_acm_descriptor->bmCapabilities;
if (quirks & NO_CAP_LINE)
acm->ctrl_caps &= ~USB_CDC_CAP_LINE;
acm->ctrlsize = ctrlsize;
@@ -1505,7 +1436,10 @@ made_compressed_probe:
if (i < 0)
goto alloc_fail7;
- if (cfd) { /* export the country data */
+ if (h.usb_cdc_country_functional_desc) { /* export the country data */
+ struct usb_cdc_country_functional_desc * cfd =
+ h.usb_cdc_country_functional_desc;
+
acm->country_codes = kmalloc(cfd->bLength - 4, GFP_KERNEL);
if (!acm->country_codes)
goto skip_countries;
@@ -1589,6 +1523,7 @@ alloc_fail4:
usb_free_coherent(usb_dev, ctrlsize, acm->ctrl_buffer, acm->ctrl_dma);
alloc_fail2:
acm_release_minor(acm);
+alloc_fail1:
kfree(acm);
alloc_fail:
return rv;
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 61ea87917..337948c42 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -875,38 +875,18 @@ static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id)
int rv = -EINVAL;
struct usb_host_interface *iface;
struct usb_endpoint_descriptor *ep;
- struct usb_cdc_dmm_desc *dmhd;
+ struct usb_cdc_parsed_header hdr;
u8 *buffer = intf->altsetting->extra;
int buflen = intf->altsetting->extralen;
u16 maxcom = WDM_DEFAULT_BUFSIZE;
if (!buffer)
goto err;
- while (buflen > 2) {
- if (buffer[1] != USB_DT_CS_INTERFACE) {
- dev_err(&intf->dev, "skipping garbage\n");
- goto next_desc;
- }
- switch (buffer[2]) {
- case USB_CDC_HEADER_TYPE:
- break;
- case USB_CDC_DMM_TYPE:
- dmhd = (struct usb_cdc_dmm_desc *)buffer;
- maxcom = le16_to_cpu(dmhd->wMaxCommand);
- dev_dbg(&intf->dev,
- "Finding maximum buffer length: %d", maxcom);
- break;
- default:
- dev_err(&intf->dev,
- "Ignoring extra header, type %d, length %d\n",
- buffer[2], buffer[0]);
- break;
- }
-next_desc:
- buflen -= buffer[0];
- buffer += buffer[0];
- }
+ cdc_parse_cdc_header(&hdr, intf, buffer, buflen);
+
+ if (hdr.usb_cdc_dmm_desc)
+ maxcom = le16_to_cpu(hdr.usb_cdc_dmm_desc->wMaxCommand);
iface = intf->cur_altsetting;
if (iface->desc.bNumEndpoints != 1)
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 917a55c44..ffe9f8875 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -141,6 +141,7 @@ static void usbtmc_delete(struct kref *kref)
struct usbtmc_device_data *data = to_usbtmc_data(kref);
usb_put_dev(data->usb_dev);
+ kfree(data);
}
static int usbtmc_open(struct inode *inode, struct file *filp)
@@ -1379,7 +1380,7 @@ static int usbtmc_probe(struct usb_interface *intf,
dev_dbg(&intf->dev, "%s called\n", __func__);
- data = devm_kzalloc(&intf->dev, sizeof(*data), GFP_KERNEL);
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c
index e3d01619d..5ef8da6e6 100644
--- a/drivers/usb/common/common.c
+++ b/drivers/usb/common/common.c
@@ -131,15 +131,17 @@ EXPORT_SYMBOL_GPL(usb_get_dr_mode);
* of_usb_get_dr_mode_by_phy - Get dual role mode for the controller device
* which is associated with the given phy device_node
* @np: Pointer to the given phy device_node
+ * @arg0: phandle args[0] for phy's with #phy-cells >= 1, or -1 for
+ * phys which do not have phy-cells
*
* In dts a usb controller associates with phy devices. The function gets
* the string from property 'dr_mode' of the controller associated with the
* given phy device node, and returns the correspondig enum usb_dr_mode.
*/
-enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *phy_np)
+enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0)
{
struct device_node *controller = NULL;
- struct device_node *phy;
+ struct of_phandle_args args;
const char *dr_mode;
int index;
int err;
@@ -148,12 +150,24 @@ enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *phy_np)
controller = of_find_node_with_property(controller, "phys");
index = 0;
do {
- phy = of_parse_phandle(controller, "phys", index);
- of_node_put(phy);
- if (phy == phy_np)
+ if (arg0 == -1) {
+ args.np = of_parse_phandle(controller, "phys",
+ index);
+ args.args_count = 0;
+ } else {
+ err = of_parse_phandle_with_args(controller,
+ "phys", "#phy-cells",
+ index, &args);
+ if (err)
+ break;
+ }
+
+ of_node_put(args.np);
+ if (args.np == np && (args.args_count == 0 ||
+ args.args[0] == arg0))
goto finish;
index++;
- } while (phy);
+ } while (args.np);
} while (controller);
finish:
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 50b6baa50..09c8d9ca6 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -2589,7 +2589,9 @@ static unsigned int usbdev_poll(struct file *file,
if (file->f_mode & FMODE_WRITE && !list_empty(&ps->async_completed))
mask |= POLLOUT | POLLWRNORM;
if (!connected(ps))
- mask |= POLLERR | POLLHUP;
+ mask |= POLLHUP;
+ if (list_empty(&ps->list))
+ mask |= POLLERR;
return mask;
}
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index ea681f157..0406a59f0 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -12,6 +12,7 @@
#include <linux/nls.h>
#include <linux/device.h>
#include <linux/scatterlist.h>
+#include <linux/usb/cdc.h>
#include <linux/usb/quirks.h>
#include <linux/usb/hcd.h> /* for usbcore internals */
#include <asm/byteorder.h>
@@ -2023,3 +2024,155 @@ int usb_driver_set_configuration(struct usb_device *udev, int config)
return 0;
}
EXPORT_SYMBOL_GPL(usb_driver_set_configuration);
+
+/**
+ * cdc_parse_cdc_header - parse the extra headers present in CDC devices
+ * @hdr: the place to put the results of the parsing
+ * @intf: the interface for which parsing is requested
+ * @buffer: pointer to the extra headers to be parsed
+ * @buflen: length of the extra headers
+ *
+ * This evaluates the extra headers present in CDC devices which
+ * bind the interfaces for data and control and provide details
+ * about the capabilities of the device.
+ *
+ * Return: number of descriptors parsed or -EINVAL
+ * if the header is contradictory beyond salvage
+ */
+
+int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr,
+ struct usb_interface *intf,
+ u8 *buffer,
+ int buflen)
+{
+ /* duplicates are ignored */
+ struct usb_cdc_union_desc *union_header = NULL;
+
+ /* duplicates are not tolerated */
+ struct usb_cdc_header_desc *header = NULL;
+ struct usb_cdc_ether_desc *ether = NULL;
+ struct usb_cdc_mdlm_detail_desc *detail = NULL;
+ struct usb_cdc_mdlm_desc *desc = NULL;
+
+ unsigned int elength;
+ int cnt = 0;
+
+ memset(hdr, 0x00, sizeof(struct usb_cdc_parsed_header));
+ hdr->phonet_magic_present = false;
+ while (buflen > 0) {
+ elength = buffer[0];
+ if (!elength) {
+ dev_err(&intf->dev, "skipping garbage byte\n");
+ elength = 1;
+ goto next_desc;
+ }
+ if (buffer[1] != USB_DT_CS_INTERFACE) {
+ dev_err(&intf->dev, "skipping garbage\n");
+ goto next_desc;
+ }
+
+ switch (buffer[2]) {
+ case USB_CDC_UNION_TYPE: /* we've found it */
+ if (elength < sizeof(struct usb_cdc_union_desc))
+ goto next_desc;
+ if (union_header) {
+ dev_err(&intf->dev, "More than one union descriptor, skipping ...\n");
+ goto next_desc;
+ }
+ union_header = (struct usb_cdc_union_desc *)buffer;
+ break;
+ case USB_CDC_COUNTRY_TYPE:
+ if (elength < sizeof(struct usb_cdc_country_functional_desc))
+ goto next_desc;
+ hdr->usb_cdc_country_functional_desc =
+ (struct usb_cdc_country_functional_desc *)buffer;
+ break;
+ case USB_CDC_HEADER_TYPE:
+ if (elength != sizeof(struct usb_cdc_header_desc))
+ goto next_desc;
+ if (header)
+ return -EINVAL;
+ header = (struct usb_cdc_header_desc *)buffer;
+ break;
+ case USB_CDC_ACM_TYPE:
+ if (elength < sizeof(struct usb_cdc_acm_descriptor))
+ goto next_desc;
+ hdr->usb_cdc_acm_descriptor =
+ (struct usb_cdc_acm_descriptor *)buffer;
+ break;
+ case USB_CDC_ETHERNET_TYPE:
+ if (elength != sizeof(struct usb_cdc_ether_desc))
+ goto next_desc;
+ if (ether)
+ return -EINVAL;
+ ether = (struct usb_cdc_ether_desc *)buffer;
+ break;
+ case USB_CDC_CALL_MANAGEMENT_TYPE:
+ if (elength < sizeof(struct usb_cdc_call_mgmt_descriptor))
+ goto next_desc;
+ hdr->usb_cdc_call_mgmt_descriptor =
+ (struct usb_cdc_call_mgmt_descriptor *)buffer;
+ break;
+ case USB_CDC_DMM_TYPE:
+ if (elength < sizeof(struct usb_cdc_dmm_desc))
+ goto next_desc;
+ hdr->usb_cdc_dmm_desc =
+ (struct usb_cdc_dmm_desc *)buffer;
+ break;
+ case USB_CDC_MDLM_TYPE:
+ if (elength < sizeof(struct usb_cdc_mdlm_desc *))
+ goto next_desc;
+ if (desc)
+ return -EINVAL;
+ desc = (struct usb_cdc_mdlm_desc *)buffer;
+ break;
+ case USB_CDC_MDLM_DETAIL_TYPE:
+ if (elength < sizeof(struct usb_cdc_mdlm_detail_desc *))
+ goto next_desc;
+ if (detail)
+ return -EINVAL;
+ detail = (struct usb_cdc_mdlm_detail_desc *)buffer;
+ break;
+ case USB_CDC_NCM_TYPE:
+ if (elength < sizeof(struct usb_cdc_ncm_desc))
+ goto next_desc;
+ hdr->usb_cdc_ncm_desc = (struct usb_cdc_ncm_desc *)buffer;
+ break;
+ case USB_CDC_MBIM_TYPE:
+ if (elength < sizeof(struct usb_cdc_mbim_desc))
+ goto next_desc;
+
+ hdr->usb_cdc_mbim_desc = (struct usb_cdc_mbim_desc *)buffer;
+ break;
+ case USB_CDC_MBIM_EXTENDED_TYPE:
+ if (elength < sizeof(struct usb_cdc_mbim_extended_desc))
+ break;
+ hdr->usb_cdc_mbim_extended_desc =
+ (struct usb_cdc_mbim_extended_desc *)buffer;
+ break;
+ case CDC_PHONET_MAGIC_NUMBER:
+ hdr->phonet_magic_present = true;
+ break;
+ default:
+ /*
+ * there are LOTS more CDC descriptors that
+ * could legitimately be found here.
+ */
+ dev_dbg(&intf->dev, "Ignoring descriptor: type %02x, length %ud\n",
+ buffer[2], elength);
+ goto next_desc;
+ }
+ cnt++;
+next_desc:
+ buflen -= elength;
+ buffer += elength;
+ }
+ hdr->usb_cdc_union_desc = union_header;
+ hdr->usb_cdc_header_desc = header;
+ hdr->usb_cdc_mdlm_detail_desc = detail;
+ hdr->usb_cdc_mdlm_desc = desc;
+ hdr->usb_cdc_ether_desc = ether;
+ return cnt;
+}
+
+EXPORT_SYMBOL(cdc_parse_cdc_header);
diff --git a/drivers/usb/dwc2/Kconfig b/drivers/usb/dwc2/Kconfig
index c1f29caa8..e838701d6 100644
--- a/drivers/usb/dwc2/Kconfig
+++ b/drivers/usb/dwc2/Kconfig
@@ -55,6 +55,7 @@ endchoice
config USB_DWC2_PCI
tristate "DWC2 PCI"
depends on PCI
+ depends on USB_GADGET || !USB_GADGET
default n
select NOP_USB_XCEIV
help
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index dec0b21fc..d64551243 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -166,7 +166,7 @@ struct dwc2_hsotg_req;
* means that it is sending data to the Host.
* @index: The index for the endpoint registers.
* @mc: Multi Count - number of transactions per microframe
- * @interval - Interval for periodic endpoints
+ * @interval - Interval for periodic endpoints, in frames or microframes.
* @name: The name array passed to the USB core.
* @halted: Set if the endpoint has been halted.
* @periodic: Set if this is a periodic ep, such as Interrupt
@@ -177,6 +177,8 @@ struct dwc2_hsotg_req;
* @fifo_load: The amount of data loaded into the FIFO (periodic IN)
* @last_load: The offset of data for the last start of request.
* @size_loaded: The last loaded size for DxEPTSIZE for periodic IN
+ * @target_frame: Targeted frame num to setup next ISOC transfer
+ * @frame_overrun: Indicates SOF number overrun in DSTS
*
* This is the driver's state for each registered enpoint, allowing it
* to keep track of transactions that need doing. Each endpoint has a
@@ -213,7 +215,9 @@ struct dwc2_hsotg_ep {
unsigned int periodic:1;
unsigned int isochronous:1;
unsigned int send_zlp:1;
- unsigned int has_correct_parity:1;
+ unsigned int target_frame;
+#define TARGET_FRAME_INITIAL 0xFFFFFFFF
+ bool frame_overrun;
char name[10];
};
@@ -864,6 +868,7 @@ struct dwc2_hsotg {
void *priv;
int irq;
struct clk *clk;
+ struct reset_control *reset;
unsigned int queuing_high_bandwidth:1;
unsigned int srp_success:1;
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 26cf09d0f..af46adfae 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -97,6 +97,25 @@ static inline bool using_dma(struct dwc2_hsotg *hsotg)
}
/**
+ * dwc2_gadget_incr_frame_num - Increments the targeted frame number.
+ * @hs_ep: The endpoint
+ * @increment: The value to increment by
+ *
+ * This function will also check if the frame number overruns DSTS_SOFFN_LIMIT.
+ * If an overrun occurs it will wrap the value and set the frame_overrun flag.
+ */
+static inline void dwc2_gadget_incr_frame_num(struct dwc2_hsotg_ep *hs_ep)
+{
+ hs_ep->target_frame += hs_ep->interval;
+ if (hs_ep->target_frame > DSTS_SOFFN_LIMIT) {
+ hs_ep->frame_overrun = 1;
+ hs_ep->target_frame &= DSTS_SOFFN_LIMIT;
+ } else {
+ hs_ep->frame_overrun = 0;
+ }
+}
+
+/**
* dwc2_hsotg_en_gsint - enable one or more of the general interrupt
* @hsotg: The device state
* @ints: A bitmask of the interrupts to enable
@@ -504,6 +523,23 @@ static unsigned get_ep_limit(struct dwc2_hsotg_ep *hs_ep)
}
/**
+* dwc2_hsotg_read_frameno - read current frame number
+* @hsotg: The device instance
+*
+* Return the current frame number
+*/
+static u32 dwc2_hsotg_read_frameno(struct dwc2_hsotg *hsotg)
+{
+ u32 dsts;
+
+ dsts = dwc2_readl(hsotg->regs + DSTS);
+ dsts &= DSTS_SOFFN_MASK;
+ dsts >>= DSTS_SOFFN_SHIFT;
+
+ return dsts;
+}
+
+/**
* dwc2_hsotg_start_req - start a USB request from an endpoint's queue
* @hsotg: The controller state.
* @hs_ep: The endpoint to process a request for
@@ -631,8 +667,17 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
__func__, &ureq->dma, dma_reg);
}
+ if (hs_ep->isochronous && hs_ep->interval == 1) {
+ hs_ep->target_frame = dwc2_hsotg_read_frameno(hsotg);
+ dwc2_gadget_incr_frame_num(hs_ep);
+
+ if (hs_ep->target_frame & 0x1)
+ ctrl |= DXEPCTL_SETODDFR;
+ else
+ ctrl |= DXEPCTL_SETEVENFR;
+ }
+
ctrl |= DXEPCTL_EPENA; /* ensure ep enabled */
- ctrl |= DXEPCTL_USBACTEP;
dev_dbg(hsotg->dev, "ep0 state:%d\n", hsotg->ep0_state);
@@ -659,14 +704,6 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
}
/*
- * clear the INTknTXFEmpMsk when we start request, more as a aide
- * to debugging to see what is going on.
- */
- if (dir_in)
- dwc2_writel(DIEPMSK_INTKNTXFEMPMSK,
- hsotg->regs + DIEPINT(index));
-
- /*
* Note, trying to clear the NAK here causes problems with transmit
* on the S3C6400 ending up with the TXFIFO becoming full.
*/
@@ -773,6 +810,30 @@ static void dwc2_hsotg_handle_unaligned_buf_complete(struct dwc2_hsotg *hsotg,
hs_req->saved_req_buf = NULL;
}
+/**
+ * dwc2_gadget_target_frame_elapsed - Checks target frame
+ * @hs_ep: The driver endpoint to check
+ *
+ * Returns 1 if targeted frame elapsed. If returned 1 then we need to drop
+ * corresponding transfer.
+ */
+static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep)
+{
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ u32 target_frame = hs_ep->target_frame;
+ u32 current_frame = dwc2_hsotg_read_frameno(hsotg);
+ bool frame_overrun = hs_ep->frame_overrun;
+
+ if (!frame_overrun && current_frame >= target_frame)
+ return true;
+
+ if (frame_overrun && current_frame >= target_frame &&
+ ((current_frame - target_frame) < DSTS_SOFFN_LIMIT / 2))
+ return true;
+
+ return false;
+}
+
static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
gfp_t gfp_flags)
{
@@ -812,9 +873,18 @@ static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
first = list_empty(&hs_ep->queue);
list_add_tail(&hs_req->queue, &hs_ep->queue);
- if (first)
- dwc2_hsotg_start_req(hs, hs_ep, hs_req, false);
+ if (first) {
+ if (!hs_ep->isochronous) {
+ dwc2_hsotg_start_req(hs, hs_ep, hs_req, false);
+ return 0;
+ }
+
+ while (dwc2_gadget_target_frame_elapsed(hs_ep))
+ dwc2_gadget_incr_frame_num(hs_ep);
+ if (hs_ep->target_frame != TARGET_FRAME_INITIAL)
+ dwc2_hsotg_start_req(hs, hs_ep, hs_req, false);
+ }
return 0;
}
@@ -1035,6 +1105,42 @@ static struct dwc2_hsotg_req *get_ep_head(struct dwc2_hsotg_ep *hs_ep)
}
/**
+ * dwc2_gadget_start_next_request - Starts next request from ep queue
+ * @hs_ep: Endpoint structure
+ *
+ * If queue is empty and EP is ISOC-OUT - unmasks OUTTKNEPDIS which is masked
+ * in its handler. Hence we need to unmask it here to be able to do
+ * resynchronization.
+ */
+static void dwc2_gadget_start_next_request(struct dwc2_hsotg_ep *hs_ep)
+{
+ u32 mask;
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ int dir_in = hs_ep->dir_in;
+ struct dwc2_hsotg_req *hs_req;
+ u32 epmsk_reg = dir_in ? DIEPMSK : DOEPMSK;
+
+ if (!list_empty(&hs_ep->queue)) {
+ hs_req = get_ep_head(hs_ep);
+ dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, false);
+ return;
+ }
+ if (!hs_ep->isochronous)
+ return;
+
+ if (dir_in) {
+ dev_dbg(hsotg->dev, "%s: No more ISOC-IN requests\n",
+ __func__);
+ } else {
+ dev_dbg(hsotg->dev, "%s: No more ISOC-OUT requests\n",
+ __func__);
+ mask = dwc2_readl(hsotg->regs + epmsk_reg);
+ mask |= DOEPMSK_OUTTKNEPDISMSK;
+ dwc2_writel(mask, hsotg->regs + epmsk_reg);
+ }
+}
+
+/**
* dwc2_hsotg_process_req_feature - process request {SET,CLEAR}_FEATURE
* @hsotg: The device state
* @ctrl: USB control request
@@ -1044,7 +1150,6 @@ static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
{
struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
struct dwc2_hsotg_req *hs_req;
- bool restart;
bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE);
struct dwc2_hsotg_ep *ep;
int ret;
@@ -1127,12 +1232,7 @@ static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
/* If we have pending request, then start it */
if (!ep->req) {
- restart = !list_empty(&ep->queue);
- if (restart) {
- hs_req = get_ep_head(ep);
- dwc2_hsotg_start_req(hsotg, ep,
- hs_req, false);
- }
+ dwc2_gadget_start_next_request(ep);
}
}
@@ -1373,7 +1473,6 @@ static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg,
struct dwc2_hsotg_req *hs_req,
int result)
{
- bool restart;
if (!hs_req) {
dev_dbg(hsotg->dev, "%s: nothing to complete?\n", __func__);
@@ -1417,11 +1516,7 @@ static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg,
*/
if (!hs_ep->req && result >= 0) {
- restart = !list_empty(&hs_ep->queue);
- if (restart) {
- hs_req = get_ep_head(hs_ep);
- dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, false);
- }
+ dwc2_gadget_start_next_request(hs_ep);
}
}
@@ -1597,32 +1692,16 @@ static void dwc2_hsotg_handle_outdone(struct dwc2_hsotg *hsotg, int epnum)
* adjust the ISOC parity here.
*/
if (!using_dma(hsotg)) {
- hs_ep->has_correct_parity = 1;
if (hs_ep->isochronous && hs_ep->interval == 1)
dwc2_hsotg_change_ep_iso_parity(hsotg, DOEPCTL(epnum));
+ else if (hs_ep->isochronous && hs_ep->interval > 1)
+ dwc2_gadget_incr_frame_num(hs_ep);
}
dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, result);
}
/**
- * dwc2_hsotg_read_frameno - read current frame number
- * @hsotg: The device instance
- *
- * Return the current frame number
- */
-static u32 dwc2_hsotg_read_frameno(struct dwc2_hsotg *hsotg)
-{
- u32 dsts;
-
- dsts = dwc2_readl(hsotg->regs + DSTS);
- dsts &= DSTS_SOFFN_MASK;
- dsts >>= DSTS_SOFFN_SHIFT;
-
- return dsts;
-}
-
-/**
* dwc2_hsotg_handle_rx - RX FIFO has data
* @hsotg: The device instance
*
@@ -1937,6 +2016,190 @@ static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg,
}
/**
+ * dwc2_gadget_read_ep_interrupts - reads interrupts for given ep
+ * @hsotg: The device state.
+ * @idx: Index of ep.
+ * @dir_in: Endpoint direction 1-in 0-out.
+ *
+ * Reads for endpoint with given index and direction, by masking
+ * epint_reg with coresponding mask.
+ */
+static u32 dwc2_gadget_read_ep_interrupts(struct dwc2_hsotg *hsotg,
+ unsigned int idx, int dir_in)
+{
+ u32 epmsk_reg = dir_in ? DIEPMSK : DOEPMSK;
+ u32 epint_reg = dir_in ? DIEPINT(idx) : DOEPINT(idx);
+ u32 ints;
+ u32 mask;
+ u32 diepempmsk;
+
+ mask = dwc2_readl(hsotg->regs + epmsk_reg);
+ diepempmsk = dwc2_readl(hsotg->regs + DIEPEMPMSK);
+ mask |= ((diepempmsk >> idx) & 0x1) ? DIEPMSK_TXFIFOEMPTY : 0;
+ mask |= DXEPINT_SETUP_RCVD;
+
+ ints = dwc2_readl(hsotg->regs + epint_reg);
+ ints &= mask;
+ return ints;
+}
+
+/**
+ * dwc2_gadget_handle_ep_disabled - handle DXEPINT_EPDISBLD
+ * @hs_ep: The endpoint on which interrupt is asserted.
+ *
+ * This interrupt indicates that the endpoint has been disabled per the
+ * application's request.
+ *
+ * For IN endpoints flushes txfifo, in case of BULK clears DCTL_CGNPINNAK,
+ * in case of ISOC completes current request.
+ *
+ * For ISOC-OUT endpoints completes expired requests. If there is remaining
+ * request starts it.
+ */
+static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
+{
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ struct dwc2_hsotg_req *hs_req;
+ unsigned char idx = hs_ep->index;
+ int dir_in = hs_ep->dir_in;
+ u32 epctl_reg = dir_in ? DIEPCTL(idx) : DOEPCTL(idx);
+ int dctl = dwc2_readl(hsotg->regs + DCTL);
+
+ dev_dbg(hsotg->dev, "%s: EPDisbld\n", __func__);
+
+ if (dir_in) {
+ int epctl = dwc2_readl(hsotg->regs + epctl_reg);
+
+ dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index);
+
+ if (hs_ep->isochronous) {
+ dwc2_hsotg_complete_in(hsotg, hs_ep);
+ return;
+ }
+
+ if ((epctl & DXEPCTL_STALL) && (epctl & DXEPCTL_EPTYPE_BULK)) {
+ int dctl = dwc2_readl(hsotg->regs + DCTL);
+
+ dctl |= DCTL_CGNPINNAK;
+ dwc2_writel(dctl, hsotg->regs + DCTL);
+ }
+ return;
+ }
+
+ if (dctl & DCTL_GOUTNAKSTS) {
+ dctl |= DCTL_CGOUTNAK;
+ dwc2_writel(dctl, hsotg->regs + DCTL);
+ }
+
+ if (!hs_ep->isochronous)
+ return;
+
+ if (list_empty(&hs_ep->queue)) {
+ dev_dbg(hsotg->dev, "%s: complete_ep 0x%p, ep->queue empty!\n",
+ __func__, hs_ep);
+ return;
+ }
+
+ do {
+ hs_req = get_ep_head(hs_ep);
+ if (hs_req)
+ dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req,
+ -ENODATA);
+ dwc2_gadget_incr_frame_num(hs_ep);
+ } while (dwc2_gadget_target_frame_elapsed(hs_ep));
+
+ dwc2_gadget_start_next_request(hs_ep);
+}
+
+/**
+ * dwc2_gadget_handle_out_token_ep_disabled - handle DXEPINT_OUTTKNEPDIS
+ * @hs_ep: The endpoint on which interrupt is asserted.
+ *
+ * This is starting point for ISOC-OUT transfer, synchronization done with
+ * first out token received from host while corresponding EP is disabled.
+ *
+ * Device does not know initial frame in which out token will come. For this
+ * HW generates OUTTKNEPDIS - out token is received while EP is disabled. Upon
+ * getting this interrupt SW starts calculation for next transfer frame.
+ */
+static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
+{
+ struct dwc2_hsotg *hsotg = ep->parent;
+ int dir_in = ep->dir_in;
+ u32 doepmsk;
+
+ if (dir_in || !ep->isochronous)
+ return;
+
+ dwc2_hsotg_complete_request(hsotg, ep, get_ep_head(ep), -ENODATA);
+
+ if (ep->interval > 1 &&
+ ep->target_frame == TARGET_FRAME_INITIAL) {
+ u32 dsts;
+ u32 ctrl;
+
+ dsts = dwc2_readl(hsotg->regs + DSTS);
+ ep->target_frame = dwc2_hsotg_read_frameno(hsotg);
+ dwc2_gadget_incr_frame_num(ep);
+
+ ctrl = dwc2_readl(hsotg->regs + DOEPCTL(ep->index));
+ if (ep->target_frame & 0x1)
+ ctrl |= DXEPCTL_SETODDFR;
+ else
+ ctrl |= DXEPCTL_SETEVENFR;
+
+ dwc2_writel(ctrl, hsotg->regs + DOEPCTL(ep->index));
+ }
+
+ dwc2_gadget_start_next_request(ep);
+ doepmsk = dwc2_readl(hsotg->regs + DOEPMSK);
+ doepmsk &= ~DOEPMSK_OUTTKNEPDISMSK;
+ dwc2_writel(doepmsk, hsotg->regs + DOEPMSK);
+}
+
+/**
+* dwc2_gadget_handle_nak - handle NAK interrupt
+* @hs_ep: The endpoint on which interrupt is asserted.
+*
+* This is starting point for ISOC-IN transfer, synchronization done with
+* first IN token received from host while corresponding EP is disabled.
+*
+* Device does not know when first one token will arrive from host. On first
+* token arrival HW generates 2 interrupts: 'in token received while FIFO empty'
+* and 'NAK'. NAK interrupt for ISOC-IN means that token has arrived and ZLP was
+* sent in response to that as there was no data in FIFO. SW is basing on this
+* interrupt to obtain frame in which token has come and then based on the
+* interval calculates next frame for transfer.
+*/
+static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
+{
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ int dir_in = hs_ep->dir_in;
+
+ if (!dir_in || !hs_ep->isochronous)
+ return;
+
+ if (hs_ep->target_frame == TARGET_FRAME_INITIAL) {
+ hs_ep->target_frame = dwc2_hsotg_read_frameno(hsotg);
+ if (hs_ep->interval > 1) {
+ u32 ctrl = dwc2_readl(hsotg->regs +
+ DIEPCTL(hs_ep->index));
+ if (hs_ep->target_frame & 0x1)
+ ctrl |= DXEPCTL_SETODDFR;
+ else
+ ctrl |= DXEPCTL_SETEVENFR;
+
+ dwc2_writel(ctrl, hsotg->regs + DIEPCTL(hs_ep->index));
+ }
+
+ dwc2_hsotg_complete_request(hsotg, hs_ep,
+ get_ep_head(hs_ep), 0);
+ }
+
+ dwc2_gadget_incr_frame_num(hs_ep);
+}
+
+/**
* dwc2_hsotg_epint - handle an in/out endpoint interrupt
* @hsotg: The driver state
* @idx: The index for the endpoint (0..15)
@@ -1954,7 +2217,7 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
u32 ints;
u32 ctrl;
- ints = dwc2_readl(hsotg->regs + epint_reg);
+ ints = dwc2_gadget_read_ep_interrupts(hsotg, idx, dir_in);
ctrl = dwc2_readl(hsotg->regs + epctl_reg);
/* Clear endpoint interrupts */
@@ -1973,11 +2236,10 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
if (idx == 0 && (ints & (DXEPINT_SETUP | DXEPINT_SETUP_RCVD)))
ints &= ~DXEPINT_XFERCOMPL;
- if (ints & DXEPINT_XFERCOMPL) {
- hs_ep->has_correct_parity = 1;
- if (hs_ep->isochronous && hs_ep->interval == 1)
- dwc2_hsotg_change_ep_iso_parity(hsotg, epctl_reg);
+ if (ints & DXEPINT_STSPHSERCVD)
+ dev_dbg(hsotg->dev, "%s: StsPhseRcvd asserted\n", __func__);
+ if (ints & DXEPINT_XFERCOMPL) {
dev_dbg(hsotg->dev,
"%s: XferCompl: DxEPCTL=0x%08x, DXEPTSIZ=%08x\n",
__func__, dwc2_readl(hsotg->regs + epctl_reg),
@@ -1988,7 +2250,12 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
* at completing IN requests here
*/
if (dir_in) {
+ if (hs_ep->isochronous && hs_ep->interval > 1)
+ dwc2_gadget_incr_frame_num(hs_ep);
+
dwc2_hsotg_complete_in(hsotg, hs_ep);
+ if (ints & DXEPINT_NAKINTRPT)
+ ints &= ~DXEPINT_NAKINTRPT;
if (idx == 0 && !hs_ep->req)
dwc2_hsotg_enqueue_setup(hsotg);
@@ -1997,28 +2264,21 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
* We're using DMA, we need to fire an OutDone here
* as we ignore the RXFIFO.
*/
+ if (hs_ep->isochronous && hs_ep->interval > 1)
+ dwc2_gadget_incr_frame_num(hs_ep);
dwc2_hsotg_handle_outdone(hsotg, idx);
}
}
- if (ints & DXEPINT_EPDISBLD) {
- dev_dbg(hsotg->dev, "%s: EPDisbld\n", __func__);
+ if (ints & DXEPINT_EPDISBLD)
+ dwc2_gadget_handle_ep_disabled(hs_ep);
- if (dir_in) {
- int epctl = dwc2_readl(hsotg->regs + epctl_reg);
+ if (ints & DXEPINT_OUTTKNEPDIS)
+ dwc2_gadget_handle_out_token_ep_disabled(hs_ep);
- dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index);
-
- if ((epctl & DXEPCTL_STALL) &&
- (epctl & DXEPCTL_EPTYPE_BULK)) {
- int dctl = dwc2_readl(hsotg->regs + DCTL);
-
- dctl |= DCTL_CGNPINNAK;
- dwc2_writel(dctl, hsotg->regs + DCTL);
- }
- }
- }
+ if (ints & DXEPINT_NAKINTRPT)
+ dwc2_gadget_handle_nak(hs_ep);
if (ints & DXEPINT_AHBERR)
dev_dbg(hsotg->dev, "%s: AHBErr\n", __func__);
@@ -2046,20 +2306,20 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
if (dir_in && !hs_ep->isochronous) {
/* not sure if this is important, but we'll clear it anyway */
- if (ints & DIEPMSK_INTKNTXFEMPMSK) {
+ if (ints & DXEPINT_INTKNTXFEMP) {
dev_dbg(hsotg->dev, "%s: ep%d: INTknTXFEmpMsk\n",
__func__, idx);
}
/* this probably means something bad is happening */
- if (ints & DIEPMSK_INTKNEPMISMSK) {
+ if (ints & DXEPINT_INTKNEPMIS) {
dev_warn(hsotg->dev, "%s: ep%d: INTknEP\n",
__func__, idx);
}
/* FIFO has space or is empty (see GAHBCFG) */
if (hsotg->dedicated_fifos &&
- ints & DIEPMSK_TXFIFOEMPTY) {
+ ints & DXEPINT_TXFEMP) {
dev_dbg(hsotg->dev, "%s: ep%d: TxFIFOEmpty\n",
__func__, idx);
if (!using_dma(hsotg))
@@ -2322,18 +2582,16 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
dwc2_writel(((hsotg->dedicated_fifos && !using_dma(hsotg)) ?
DIEPMSK_TXFIFOEMPTY | DIEPMSK_INTKNTXFEMPMSK : 0) |
DIEPMSK_EPDISBLDMSK | DIEPMSK_XFERCOMPLMSK |
- DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK |
- DIEPMSK_INTKNEPMISMSK,
+ DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK,
hsotg->regs + DIEPMSK);
/*
* don't need XferCompl, we get that from RXFIFO in slave mode. In
* DMA mode we may need this.
*/
- dwc2_writel((using_dma(hsotg) ? (DIEPMSK_XFERCOMPLMSK |
- DIEPMSK_TIMEOUTMSK) : 0) |
+ dwc2_writel((using_dma(hsotg) ? (DIEPMSK_XFERCOMPLMSK) : 0) |
DOEPMSK_EPDISBLDMSK | DOEPMSK_AHBERRMSK |
- DOEPMSK_SETUPMSK,
+ DOEPMSK_SETUPMSK | DOEPMSK_STSPHSERCVDMSK,
hsotg->regs + DOEPMSK);
dwc2_writel(0, hsotg->regs + DAINTMSK);
@@ -2414,6 +2672,85 @@ void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg)
}
/**
+ * dwc2_gadget_handle_incomplete_isoc_in - handle incomplete ISO IN Interrupt.
+ * @hsotg: The device state:
+ *
+ * This interrupt indicates one of the following conditions occurred while
+ * transmitting an ISOC transaction.
+ * - Corrupted IN Token for ISOC EP.
+ * - Packet not complete in FIFO.
+ *
+ * The following actions will be taken:
+ * - Determine the EP
+ * - Disable EP; when 'Endpoint Disabled' interrupt is received Flush FIFO
+ */
+static void dwc2_gadget_handle_incomplete_isoc_in(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_hsotg_ep *hs_ep;
+ u32 epctrl;
+ u32 idx;
+
+ dev_dbg(hsotg->dev, "Incomplete isoc in interrupt received:\n");
+
+ for (idx = 1; idx <= hsotg->num_of_eps; idx++) {
+ hs_ep = hsotg->eps_in[idx];
+ epctrl = dwc2_readl(hsotg->regs + DIEPCTL(idx));
+ if ((epctrl & DXEPCTL_EPENA) && hs_ep->isochronous &&
+ dwc2_gadget_target_frame_elapsed(hs_ep)) {
+ epctrl |= DXEPCTL_SNAK;
+ epctrl |= DXEPCTL_EPDIS;
+ dwc2_writel(epctrl, hsotg->regs + DIEPCTL(idx));
+ }
+ }
+
+ /* Clear interrupt */
+ dwc2_writel(GINTSTS_INCOMPL_SOIN, hsotg->regs + GINTSTS);
+}
+
+/**
+ * dwc2_gadget_handle_incomplete_isoc_out - handle incomplete ISO OUT Interrupt
+ * @hsotg: The device state:
+ *
+ * This interrupt indicates one of the following conditions occurred while
+ * transmitting an ISOC transaction.
+ * - Corrupted OUT Token for ISOC EP.
+ * - Packet not complete in FIFO.
+ *
+ * The following actions will be taken:
+ * - Determine the EP
+ * - Set DCTL_SGOUTNAK and unmask GOUTNAKEFF if target frame elapsed.
+ */
+static void dwc2_gadget_handle_incomplete_isoc_out(struct dwc2_hsotg *hsotg)
+{
+ u32 gintsts;
+ u32 gintmsk;
+ u32 epctrl;
+ struct dwc2_hsotg_ep *hs_ep;
+ int idx;
+
+ dev_dbg(hsotg->dev, "%s: GINTSTS_INCOMPL_SOOUT\n", __func__);
+
+ for (idx = 1; idx <= hsotg->num_of_eps; idx++) {
+ hs_ep = hsotg->eps_out[idx];
+ epctrl = dwc2_readl(hsotg->regs + DOEPCTL(idx));
+ if ((epctrl & DXEPCTL_EPENA) && hs_ep->isochronous &&
+ dwc2_gadget_target_frame_elapsed(hs_ep)) {
+ /* Unmask GOUTNAKEFF interrupt */
+ gintmsk = dwc2_readl(hsotg->regs + GINTMSK);
+ gintmsk |= GINTSTS_GOUTNAKEFF;
+ dwc2_writel(gintmsk, hsotg->regs + GINTMSK);
+
+ gintsts = dwc2_readl(hsotg->regs + GINTSTS);
+ if (!(gintsts & GINTSTS_GOUTNAKEFF))
+ __orr32(hsotg->regs + DCTL, DCTL_SGOUTNAK);
+ }
+ }
+
+ /* Clear interrupt */
+ dwc2_writel(GINTSTS_INCOMPL_SOOUT, hsotg->regs + GINTSTS);
+}
+
+/**
* dwc2_hsotg_irq - handle device interrupt
* @irq: The IRQ number triggered
* @pw: The pw value when registered the handler.
@@ -2545,11 +2882,29 @@ irq_retry:
*/
if (gintsts & GINTSTS_GOUTNAKEFF) {
- dev_info(hsotg->dev, "GOUTNakEff triggered\n");
+ u8 idx;
+ u32 epctrl;
+ u32 gintmsk;
+ struct dwc2_hsotg_ep *hs_ep;
- __orr32(hsotg->regs + DCTL, DCTL_CGOUTNAK);
+ /* Mask this interrupt */
+ gintmsk = dwc2_readl(hsotg->regs + GINTMSK);
+ gintmsk &= ~GINTSTS_GOUTNAKEFF;
+ dwc2_writel(gintmsk, hsotg->regs + GINTMSK);
- dwc2_hsotg_dump(hsotg);
+ dev_dbg(hsotg->dev, "GOUTNakEff triggered\n");
+ for (idx = 1; idx <= hsotg->num_of_eps; idx++) {
+ hs_ep = hsotg->eps_out[idx];
+ epctrl = dwc2_readl(hsotg->regs + DOEPCTL(idx));
+
+ if ((epctrl & DXEPCTL_EPENA) && hs_ep->isochronous) {
+ epctrl |= DXEPCTL_SNAK;
+ epctrl |= DXEPCTL_EPDIS;
+ dwc2_writel(epctrl, hsotg->regs + DOEPCTL(idx));
+ }
+ }
+
+ /* This interrupt bit is cleared in DXEPINT_EPDISBLD handler */
}
if (gintsts & GINTSTS_GINNAKEFF) {
@@ -2560,39 +2915,11 @@ irq_retry:
dwc2_hsotg_dump(hsotg);
}
- if (gintsts & GINTSTS_INCOMPL_SOIN) {
- u32 idx, epctl_reg;
- struct dwc2_hsotg_ep *hs_ep;
-
- dev_dbg(hsotg->dev, "%s: GINTSTS_INCOMPL_SOIN\n", __func__);
- for (idx = 1; idx < hsotg->num_of_eps; idx++) {
- hs_ep = hsotg->eps_in[idx];
-
- if (!hs_ep->isochronous || hs_ep->has_correct_parity)
- continue;
+ if (gintsts & GINTSTS_INCOMPL_SOIN)
+ dwc2_gadget_handle_incomplete_isoc_in(hsotg);
- epctl_reg = DIEPCTL(idx);
- dwc2_hsotg_change_ep_iso_parity(hsotg, epctl_reg);
- }
- dwc2_writel(GINTSTS_INCOMPL_SOIN, hsotg->regs + GINTSTS);
- }
-
- if (gintsts & GINTSTS_INCOMPL_SOOUT) {
- u32 idx, epctl_reg;
- struct dwc2_hsotg_ep *hs_ep;
-
- dev_dbg(hsotg->dev, "%s: GINTSTS_INCOMPL_SOOUT\n", __func__);
- for (idx = 1; idx < hsotg->num_of_eps; idx++) {
- hs_ep = hsotg->eps_out[idx];
-
- if (!hs_ep->isochronous || hs_ep->has_correct_parity)
- continue;
-
- epctl_reg = DOEPCTL(idx);
- dwc2_hsotg_change_ep_iso_parity(hsotg, epctl_reg);
- }
- dwc2_writel(GINTSTS_INCOMPL_SOOUT, hsotg->regs + GINTSTS);
- }
+ if (gintsts & GINTSTS_INCOMPL_SOOUT)
+ dwc2_gadget_handle_incomplete_isoc_out(hsotg);
/*
* if we've had fifo events, we should try and go around the
@@ -2624,6 +2951,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
u32 epctrl_reg;
u32 epctrl;
u32 mps;
+ u32 mask;
unsigned int dir_in;
unsigned int i, val, size;
int ret = 0;
@@ -2666,15 +2994,6 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
*/
epctrl |= DXEPCTL_USBACTEP;
- /*
- * set the NAK status on the endpoint, otherwise we might try and
- * do something with data that we've yet got a request to process
- * since the RXFIFO will take data for an endpoint even if the
- * size register hasn't been set.
- */
-
- epctrl |= DXEPCTL_SNAK;
-
/* update the endpoint state */
dwc2_hsotg_set_ep_maxpacket(hsotg, hs_ep->index, mps, dir_in);
@@ -2683,18 +3002,24 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
hs_ep->periodic = 0;
hs_ep->halted = 0;
hs_ep->interval = desc->bInterval;
- hs_ep->has_correct_parity = 0;
-
- if (hs_ep->interval > 1 && hs_ep->mc > 1)
- dev_err(hsotg->dev, "MC > 1 when interval is not 1\n");
switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
case USB_ENDPOINT_XFER_ISOC:
epctrl |= DXEPCTL_EPTYPE_ISO;
epctrl |= DXEPCTL_SETEVENFR;
hs_ep->isochronous = 1;
- if (dir_in)
+ hs_ep->interval = 1 << (desc->bInterval - 1);
+ hs_ep->target_frame = TARGET_FRAME_INITIAL;
+ if (dir_in) {
hs_ep->periodic = 1;
+ mask = dwc2_readl(hsotg->regs + DIEPMSK);
+ mask |= DIEPMSK_NAKMSK;
+ dwc2_writel(mask, hsotg->regs + DIEPMSK);
+ } else {
+ mask = dwc2_readl(hsotg->regs + DOEPMSK);
+ mask |= DOEPMSK_OUTTKNEPDISMSK;
+ dwc2_writel(mask, hsotg->regs + DOEPMSK);
+ }
break;
case USB_ENDPOINT_XFER_BULK:
@@ -2705,6 +3030,9 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
if (dir_in)
hs_ep->periodic = 1;
+ if (hsotg->gadget.speed == USB_SPEED_HIGH)
+ hs_ep->interval = 1 << (desc->bInterval - 1);
+
epctrl |= DXEPCTL_EPTYPE_INTERRUPT;
break;
@@ -2758,7 +3086,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
}
/* for non control endpoints, set PID to D0 */
- if (index)
+ if (index && !hs_ep->isochronous)
epctrl |= DXEPCTL_SETD0PID;
dev_dbg(hsotg->dev, "%s: write DxEPCTL=0x%08x\n",
@@ -2875,10 +3203,8 @@ static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
dev_warn(hsotg->dev,
"%s: timeout DIEPINT.NAKEFF\n", __func__);
} else {
- /* Clear any pending nak effect interrupt */
- dwc2_writel(GINTSTS_GOUTNAKEFF, hsotg->regs + GINTSTS);
-
- __orr32(hsotg->regs + DCTL, DCTL_SGOUTNAK);
+ if (!(dwc2_readl(hsotg->regs + GINTSTS) & GINTSTS_GOUTNAKEFF))
+ __orr32(hsotg->regs + DCTL, DCTL_SGOUTNAK);
/* Wait for global nak to take effect */
if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
index b5c7793a2..137543532 100644
--- a/drivers/usb/dwc2/hcd_queue.c
+++ b/drivers/usb/dwc2/hcd_queue.c
@@ -367,7 +367,8 @@ static void pmap_unschedule(unsigned long *map, int bits_per_period,
* @fmt: The format for printf.
* @...: The args for printf.
*/
-static void cat_printf(char **buf, size_t *size, const char *fmt, ...)
+static __printf(3, 4)
+void cat_printf(char **buf, size_t *size, const char *fmt, ...)
{
va_list args;
int i;
diff --git a/drivers/usb/dwc2/hw.h b/drivers/usb/dwc2/hw.h
index 281b57b36..efc3bcde2 100644
--- a/drivers/usb/dwc2/hw.h
+++ b/drivers/usb/dwc2/hw.h
@@ -459,6 +459,9 @@
#define DSTS_SUSPSTS (1 << 0)
#define DIEPMSK HSOTG_REG(0x810)
+#define DIEPMSK_NAKMSK (1 << 13)
+#define DIEPMSK_BNAININTRMSK (1 << 9)
+#define DIEPMSK_TXFIFOUNDRNMSK (1 << 8)
#define DIEPMSK_TXFIFOEMPTY (1 << 7)
#define DIEPMSK_INEPNAKEFFMSK (1 << 6)
#define DIEPMSK_INTKNEPMISMSK (1 << 5)
@@ -470,6 +473,7 @@
#define DOEPMSK HSOTG_REG(0x814)
#define DOEPMSK_BACK2BACKSETUP (1 << 6)
+#define DOEPMSK_STSPHSERCVDMSK (1 << 5)
#define DOEPMSK_OUTTKNEPDISMSK (1 << 4)
#define DOEPMSK_SETUPMSK (1 << 3)
#define DOEPMSK_AHBERRMSK (1 << 2)
@@ -486,6 +490,7 @@
#define DTKNQR2 HSOTG_REG(0x824)
#define DTKNQR3 HSOTG_REG(0x830)
#define DTKNQR4 HSOTG_REG(0x834)
+#define DIEPEMPMSK HSOTG_REG(0x834)
#define DVBUSDIS HSOTG_REG(0x828)
#define DVBUSPULSE HSOTG_REG(0x82C)
@@ -544,9 +549,18 @@
#define DIEPINT(_a) HSOTG_REG(0x908 + ((_a) * 0x20))
#define DOEPINT(_a) HSOTG_REG(0xB08 + ((_a) * 0x20))
#define DXEPINT_SETUP_RCVD (1 << 15)
+#define DXEPINT_NYETINTRPT (1 << 14)
+#define DXEPINT_NAKINTRPT (1 << 13)
+#define DXEPINT_BBLEERRINTRPT (1 << 12)
+#define DXEPINT_PKTDRPSTS (1 << 11)
+#define DXEPINT_BNAINTR (1 << 9)
+#define DXEPINT_TXFIFOUNDRN (1 << 8)
+#define DXEPINT_OUTPKTERR (1 << 8)
+#define DXEPINT_TXFEMP (1 << 7)
#define DXEPINT_INEPNAKEFF (1 << 6)
#define DXEPINT_BACK2BACKSETUP (1 << 6)
#define DXEPINT_INTKNEPMIS (1 << 5)
+#define DXEPINT_STSPHSERCVD (1 << 5)
#define DXEPINT_INTKNTXFEMP (1 << 4)
#define DXEPINT_OUTTKNEPDIS (1 << 4)
#define DXEPINT_TIMEOUT (1 << 3)
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index fc6f5251d..530959a8a 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -45,6 +45,7 @@
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
#include <linux/platform_data/s3c-hsotg.h>
+#include <linux/reset.h>
#include <linux/usb/of.h>
@@ -337,6 +338,24 @@ static int dwc2_lowlevel_hw_init(struct dwc2_hsotg *hsotg)
{
int i, ret;
+ hsotg->reset = devm_reset_control_get_optional(hsotg->dev, "dwc2");
+ if (IS_ERR(hsotg->reset)) {
+ ret = PTR_ERR(hsotg->reset);
+ switch (ret) {
+ case -ENOENT:
+ case -ENOTSUPP:
+ hsotg->reset = NULL;
+ break;
+ default:
+ dev_err(hsotg->dev, "error getting reset control %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (hsotg->reset)
+ reset_control_deassert(hsotg->reset);
+
/* Set default UTMI width */
hsotg->phyif = GUSBCFG_PHYIF16;
@@ -434,6 +453,9 @@ static int dwc2_driver_remove(struct platform_device *dev)
if (hsotg->ll_hw_enabled)
dwc2_lowlevel_hw_disable(hsotg);
+ if (hsotg->reset)
+ reset_control_assert(hsotg->reset);
+
return 0;
}
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index a590cd225..35d092456 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -41,14 +41,13 @@
#include <linux/usb/of.h>
#include <linux/usb/otg.h>
-#include "platform_data.h"
#include "core.h"
#include "gadget.h"
#include "io.h"
#include "debug.h"
-/* -------------------------------------------------------------------------- */
+#define DWC3_DEFAULT_AUTOSUSPEND_DELAY 5000 /* ms */
void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
{
@@ -149,9 +148,8 @@ static int dwc3_soft_reset(struct dwc3 *dwc)
/*
* dwc3_frame_length_adjustment - Adjusts frame length if required
* @dwc3: Pointer to our controller context structure
- * @fladj: Value of GFLADJ_30MHZ to adjust frame length
*/
-static void dwc3_frame_length_adjustment(struct dwc3 *dwc, u32 fladj)
+static void dwc3_frame_length_adjustment(struct dwc3 *dwc)
{
u32 reg;
u32 dft;
@@ -159,15 +157,15 @@ static void dwc3_frame_length_adjustment(struct dwc3 *dwc, u32 fladj)
if (dwc->revision < DWC3_REVISION_250A)
return;
- if (fladj == 0)
+ if (dwc->fladj == 0)
return;
reg = dwc3_readl(dwc->regs, DWC3_GFLADJ);
dft = reg & DWC3_GFLADJ_30MHZ_MASK;
- if (!dev_WARN_ONCE(dwc->dev, dft == fladj,
+ if (!dev_WARN_ONCE(dwc->dev, dft == dwc->fladj,
"request value same as default, ignoring\n")) {
reg &= ~DWC3_GFLADJ_30MHZ_MASK;
- reg |= DWC3_GFLADJ_30MHZ_SDBND_SEL | fladj;
+ reg |= DWC3_GFLADJ_30MHZ_SDBND_SEL | dwc->fladj;
dwc3_writel(dwc->regs, DWC3_GFLADJ, reg);
}
}
@@ -507,6 +505,21 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
return 0;
}
+static void dwc3_core_exit(struct dwc3 *dwc)
+{
+ dwc3_event_buffers_cleanup(dwc);
+
+ usb_phy_shutdown(dwc->usb2_phy);
+ usb_phy_shutdown(dwc->usb3_phy);
+ phy_exit(dwc->usb2_generic_phy);
+ phy_exit(dwc->usb3_generic_phy);
+
+ usb_phy_set_suspend(dwc->usb2_phy, 1);
+ usb_phy_set_suspend(dwc->usb3_phy, 1);
+ phy_power_off(dwc->usb2_generic_phy);
+ phy_power_off(dwc->usb3_generic_phy);
+}
+
/**
* dwc3_core_init - Low-level initialization of DWC3 Core
* @dwc: Pointer to our controller context structure
@@ -556,6 +569,10 @@ static int dwc3_core_init(struct dwc3 *dwc)
if (ret)
goto err0;
+ ret = dwc3_phy_setup(dwc);
+ if (ret)
+ goto err0;
+
reg = dwc3_readl(dwc->regs, DWC3_GCTL);
reg &= ~DWC3_GCTL_SCALEDOWN_MASK;
@@ -622,22 +639,45 @@ static int dwc3_core_init(struct dwc3 *dwc)
if (dwc->revision < DWC3_REVISION_190A)
reg |= DWC3_GCTL_U2RSTECN;
- dwc3_core_num_eps(dwc);
-
dwc3_writel(dwc->regs, DWC3_GCTL, reg);
- ret = dwc3_alloc_scratch_buffers(dwc);
- if (ret)
- goto err1;
+ dwc3_core_num_eps(dwc);
ret = dwc3_setup_scratch_buffers(dwc);
if (ret)
+ goto err1;
+
+ /* Adjust Frame Length */
+ dwc3_frame_length_adjustment(dwc);
+
+ usb_phy_set_suspend(dwc->usb2_phy, 0);
+ usb_phy_set_suspend(dwc->usb3_phy, 0);
+ ret = phy_power_on(dwc->usb2_generic_phy);
+ if (ret < 0)
goto err2;
+ ret = phy_power_on(dwc->usb3_generic_phy);
+ if (ret < 0)
+ goto err3;
+
+ ret = dwc3_event_buffers_setup(dwc);
+ if (ret) {
+ dev_err(dwc->dev, "failed to setup event buffers\n");
+ goto err4;
+ }
+
return 0;
+err4:
+ phy_power_off(dwc->usb2_generic_phy);
+
+err3:
+ phy_power_off(dwc->usb3_generic_phy);
+
err2:
- dwc3_free_scratch_buffers(dwc);
+ usb_phy_set_suspend(dwc->usb2_phy, 1);
+ usb_phy_set_suspend(dwc->usb3_phy, 1);
+ dwc3_core_exit(dwc);
err1:
usb_phy_shutdown(dwc->usb2_phy);
@@ -649,15 +689,6 @@ err0:
return ret;
}
-static void dwc3_core_exit(struct dwc3 *dwc)
-{
- dwc3_free_scratch_buffers(dwc);
- usb_phy_shutdown(dwc->usb2_phy);
- usb_phy_shutdown(dwc->usb3_phy);
- phy_exit(dwc->usb2_generic_phy);
- phy_exit(dwc->usb3_generic_phy);
-}
-
static int dwc3_core_get_phy(struct dwc3 *dwc)
{
struct device *dev = dwc->dev;
@@ -735,7 +766,8 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
ret = dwc3_gadget_init(dwc);
if (ret) {
- dev_err(dev, "failed to initialize gadget\n");
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to initialize gadget\n");
return ret;
}
break;
@@ -743,7 +775,8 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
ret = dwc3_host_init(dwc);
if (ret) {
- dev_err(dev, "failed to initialize host\n");
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to initialize host\n");
return ret;
}
break;
@@ -751,13 +784,15 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_OTG);
ret = dwc3_host_init(dwc);
if (ret) {
- dev_err(dev, "failed to initialize host\n");
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to initialize host\n");
return ret;
}
ret = dwc3_gadget_init(dwc);
if (ret) {
- dev_err(dev, "failed to initialize gadget\n");
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to initialize gadget\n");
return ret;
}
break;
@@ -793,13 +828,11 @@ static void dwc3_core_exit_mode(struct dwc3 *dwc)
static int dwc3_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct dwc3_platform_data *pdata = dev_get_platdata(dev);
struct resource *res;
struct dwc3 *dwc;
u8 lpm_nyet_threshold;
u8 tx_de_emphasis;
u8 hird_threshold;
- u32 fladj = 0;
int ret;
@@ -814,16 +847,6 @@ static int dwc3_probe(struct platform_device *pdev)
dwc->mem = mem;
dwc->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(dev, "missing IRQ\n");
- return -ENODEV;
- }
- dwc->xhci_resources[1].start = res->start;
- dwc->xhci_resources[1].end = res->end;
- dwc->xhci_resources[1].flags = res->flags;
- dwc->xhci_resources[1].name = res->name;
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "missing memory resource\n");
@@ -909,40 +932,7 @@ static int dwc3_probe(struct platform_device *pdev)
device_property_read_string(dev, "snps,hsphy_interface",
&dwc->hsphy_interface);
device_property_read_u32(dev, "snps,quirk-frame-length-adjustment",
- &fladj);
-
- if (pdata) {
- dwc->maximum_speed = pdata->maximum_speed;
- dwc->has_lpm_erratum = pdata->has_lpm_erratum;
- if (pdata->lpm_nyet_threshold)
- lpm_nyet_threshold = pdata->lpm_nyet_threshold;
- dwc->is_utmi_l1_suspend = pdata->is_utmi_l1_suspend;
- if (pdata->hird_threshold)
- hird_threshold = pdata->hird_threshold;
-
- dwc->usb3_lpm_capable = pdata->usb3_lpm_capable;
- dwc->dr_mode = pdata->dr_mode;
-
- dwc->disable_scramble_quirk = pdata->disable_scramble_quirk;
- dwc->u2exit_lfps_quirk = pdata->u2exit_lfps_quirk;
- dwc->u2ss_inp3_quirk = pdata->u2ss_inp3_quirk;
- dwc->req_p1p2p3_quirk = pdata->req_p1p2p3_quirk;
- dwc->del_p1p2p3_quirk = pdata->del_p1p2p3_quirk;
- dwc->del_phy_power_chg_quirk = pdata->del_phy_power_chg_quirk;
- dwc->lfps_filter_quirk = pdata->lfps_filter_quirk;
- dwc->rx_detect_poll_quirk = pdata->rx_detect_poll_quirk;
- dwc->dis_u3_susphy_quirk = pdata->dis_u3_susphy_quirk;
- dwc->dis_u2_susphy_quirk = pdata->dis_u2_susphy_quirk;
- dwc->dis_enblslpm_quirk = pdata->dis_enblslpm_quirk;
- dwc->dis_rxdet_inp3_quirk = pdata->dis_rxdet_inp3_quirk;
-
- dwc->tx_de_emphasis_quirk = pdata->tx_de_emphasis_quirk;
- if (pdata->tx_de_emphasis)
- tx_de_emphasis = pdata->tx_de_emphasis;
-
- dwc->hsphy_interface = pdata->hsphy_interface;
- fladj = pdata->fladj_value;
- }
+ &dwc->fladj);
dwc->lpm_nyet_threshold = lpm_nyet_threshold;
dwc->tx_de_emphasis = tx_de_emphasis;
@@ -953,10 +943,6 @@ static int dwc3_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dwc);
dwc3_cache_hwparams(dwc);
- ret = dwc3_phy_setup(dwc);
- if (ret)
- goto err0;
-
ret = dwc3_core_get_phy(dwc);
if (ret)
goto err0;
@@ -969,29 +955,43 @@ static int dwc3_probe(struct platform_device *pdev)
dma_set_coherent_mask(dev, dev->parent->coherent_dma_mask);
}
+ pm_runtime_set_active(dev);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_autosuspend_delay(dev, DWC3_DEFAULT_AUTOSUSPEND_DELAY);
pm_runtime_enable(dev);
- pm_runtime_get_sync(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ goto err1;
+
pm_runtime_forbid(dev);
ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE);
if (ret) {
dev_err(dwc->dev, "failed to allocate event buffers\n");
ret = -ENOMEM;
- goto err1;
+ goto err2;
}
- if (IS_ENABLED(CONFIG_USB_DWC3_HOST))
+ if (IS_ENABLED(CONFIG_USB_DWC3_HOST) &&
+ (dwc->dr_mode == USB_DR_MODE_OTG ||
+ dwc->dr_mode == USB_DR_MODE_UNKNOWN))
dwc->dr_mode = USB_DR_MODE_HOST;
- else if (IS_ENABLED(CONFIG_USB_DWC3_GADGET))
+ else if (IS_ENABLED(CONFIG_USB_DWC3_GADGET) &&
+ (dwc->dr_mode == USB_DR_MODE_OTG ||
+ dwc->dr_mode == USB_DR_MODE_UNKNOWN))
dwc->dr_mode = USB_DR_MODE_PERIPHERAL;
if (dwc->dr_mode == USB_DR_MODE_UNKNOWN)
dwc->dr_mode = USB_DR_MODE_OTG;
+ ret = dwc3_alloc_scratch_buffers(dwc);
+ if (ret)
+ goto err3;
+
ret = dwc3_core_init(dwc);
if (ret) {
dev_err(dev, "failed to initialize core\n");
- goto err1;
+ goto err4;
}
/* Check the maximum_speed parameter */
@@ -1021,31 +1021,12 @@ static int dwc3_probe(struct platform_device *pdev)
break;
}
- /* Adjust Frame Length */
- dwc3_frame_length_adjustment(dwc, fladj);
-
- usb_phy_set_suspend(dwc->usb2_phy, 0);
- usb_phy_set_suspend(dwc->usb3_phy, 0);
- ret = phy_power_on(dwc->usb2_generic_phy);
- if (ret < 0)
- goto err2;
-
- ret = phy_power_on(dwc->usb3_generic_phy);
- if (ret < 0)
- goto err3;
-
- ret = dwc3_event_buffers_setup(dwc);
- if (ret) {
- dev_err(dwc->dev, "failed to setup event buffers\n");
- goto err4;
- }
-
ret = dwc3_core_init_mode(dwc);
if (ret)
goto err5;
dwc3_debugfs_init(dwc);
- pm_runtime_allow(dev);
+ pm_runtime_put(dev);
return 0;
@@ -1053,19 +1034,18 @@ err5:
dwc3_event_buffers_cleanup(dwc);
err4:
- phy_power_off(dwc->usb3_generic_phy);
+ dwc3_free_scratch_buffers(dwc);
err3:
- phy_power_off(dwc->usb2_generic_phy);
+ dwc3_free_event_buffers(dwc);
+ dwc3_ulpi_exit(dwc);
err2:
- usb_phy_set_suspend(dwc->usb2_phy, 1);
- usb_phy_set_suspend(dwc->usb3_phy, 1);
- dwc3_core_exit(dwc);
+ pm_runtime_allow(&pdev->dev);
err1:
- dwc3_free_event_buffers(dwc);
- dwc3_ulpi_exit(dwc);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
err0:
/*
@@ -1083,6 +1063,7 @@ static int dwc3_remove(struct platform_device *pdev)
struct dwc3 *dwc = platform_get_drvdata(pdev);
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pm_runtime_get_sync(&pdev->dev);
/*
* restore res->start back to its original value so that, in case the
* probe is deferred, we don't end up getting error in request the
@@ -1092,133 +1073,193 @@ static int dwc3_remove(struct platform_device *pdev)
dwc3_debugfs_exit(dwc);
dwc3_core_exit_mode(dwc);
- dwc3_event_buffers_cleanup(dwc);
- dwc3_free_event_buffers(dwc);
-
- usb_phy_set_suspend(dwc->usb2_phy, 1);
- usb_phy_set_suspend(dwc->usb3_phy, 1);
- phy_power_off(dwc->usb2_generic_phy);
- phy_power_off(dwc->usb3_generic_phy);
dwc3_core_exit(dwc);
dwc3_ulpi_exit(dwc);
pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_allow(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ dwc3_free_event_buffers(dwc);
+ dwc3_free_scratch_buffers(dwc);
+
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int dwc3_suspend(struct device *dev)
+#ifdef CONFIG_PM
+static int dwc3_suspend_common(struct dwc3 *dwc)
{
- struct dwc3 *dwc = dev_get_drvdata(dev);
unsigned long flags;
- spin_lock_irqsave(&dwc->lock, flags);
-
switch (dwc->dr_mode) {
case USB_DR_MODE_PERIPHERAL:
case USB_DR_MODE_OTG:
+ spin_lock_irqsave(&dwc->lock, flags);
dwc3_gadget_suspend(dwc);
- /* FALLTHROUGH */
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ break;
case USB_DR_MODE_HOST:
default:
- dwc3_event_buffers_cleanup(dwc);
+ /* do nothing */
break;
}
- dwc->gctl = dwc3_readl(dwc->regs, DWC3_GCTL);
- spin_unlock_irqrestore(&dwc->lock, flags);
+ dwc3_core_exit(dwc);
- usb_phy_shutdown(dwc->usb3_phy);
- usb_phy_shutdown(dwc->usb2_phy);
- phy_exit(dwc->usb2_generic_phy);
- phy_exit(dwc->usb3_generic_phy);
+ return 0;
+}
- usb_phy_set_suspend(dwc->usb2_phy, 1);
- usb_phy_set_suspend(dwc->usb3_phy, 1);
- WARN_ON(phy_power_off(dwc->usb2_generic_phy) < 0);
- WARN_ON(phy_power_off(dwc->usb3_generic_phy) < 0);
+static int dwc3_resume_common(struct dwc3 *dwc)
+{
+ unsigned long flags;
+ int ret;
- pinctrl_pm_select_sleep_state(dev);
+ ret = dwc3_core_init(dwc);
+ if (ret)
+ return ret;
+
+ switch (dwc->dr_mode) {
+ case USB_DR_MODE_PERIPHERAL:
+ case USB_DR_MODE_OTG:
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc3_gadget_resume(dwc);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ /* FALLTHROUGH */
+ case USB_DR_MODE_HOST:
+ default:
+ /* do nothing */
+ break;
+ }
return 0;
}
-static int dwc3_resume(struct device *dev)
+static int dwc3_runtime_checks(struct dwc3 *dwc)
{
- struct dwc3 *dwc = dev_get_drvdata(dev);
- unsigned long flags;
+ switch (dwc->dr_mode) {
+ case USB_DR_MODE_PERIPHERAL:
+ case USB_DR_MODE_OTG:
+ if (dwc->connected)
+ return -EBUSY;
+ break;
+ case USB_DR_MODE_HOST:
+ default:
+ /* do nothing */
+ break;
+ }
+
+ return 0;
+}
+
+static int dwc3_runtime_suspend(struct device *dev)
+{
+ struct dwc3 *dwc = dev_get_drvdata(dev);
int ret;
- pinctrl_pm_select_default_state(dev);
+ if (dwc3_runtime_checks(dwc))
+ return -EBUSY;
- usb_phy_set_suspend(dwc->usb2_phy, 0);
- usb_phy_set_suspend(dwc->usb3_phy, 0);
- ret = phy_power_on(dwc->usb2_generic_phy);
- if (ret < 0)
+ ret = dwc3_suspend_common(dwc);
+ if (ret)
return ret;
- ret = phy_power_on(dwc->usb3_generic_phy);
- if (ret < 0)
- goto err_usb2phy_power;
+ device_init_wakeup(dev, true);
- usb_phy_init(dwc->usb3_phy);
- usb_phy_init(dwc->usb2_phy);
- ret = phy_init(dwc->usb2_generic_phy);
- if (ret < 0)
- goto err_usb3phy_power;
+ return 0;
+}
- ret = phy_init(dwc->usb3_generic_phy);
- if (ret < 0)
- goto err_usb2phy_init;
+static int dwc3_runtime_resume(struct device *dev)
+{
+ struct dwc3 *dwc = dev_get_drvdata(dev);
+ int ret;
- spin_lock_irqsave(&dwc->lock, flags);
+ device_init_wakeup(dev, false);
- dwc3_event_buffers_setup(dwc);
- dwc3_writel(dwc->regs, DWC3_GCTL, dwc->gctl);
+ ret = dwc3_resume_common(dwc);
+ if (ret)
+ return ret;
switch (dwc->dr_mode) {
case USB_DR_MODE_PERIPHERAL:
case USB_DR_MODE_OTG:
- dwc3_gadget_resume(dwc);
- /* FALLTHROUGH */
+ dwc3_gadget_process_pending_events(dwc);
+ break;
case USB_DR_MODE_HOST:
default:
/* do nothing */
break;
}
- spin_unlock_irqrestore(&dwc->lock, flags);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put(dev);
- pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
+ return 0;
+}
+
+static int dwc3_runtime_idle(struct device *dev)
+{
+ struct dwc3 *dwc = dev_get_drvdata(dev);
+
+ switch (dwc->dr_mode) {
+ case USB_DR_MODE_PERIPHERAL:
+ case USB_DR_MODE_OTG:
+ if (dwc3_runtime_checks(dwc))
+ return -EBUSY;
+ break;
+ case USB_DR_MODE_HOST:
+ default:
+ /* do nothing */
+ break;
+ }
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_autosuspend(dev);
return 0;
+}
+#endif /* CONFIG_PM */
-err_usb2phy_init:
- phy_exit(dwc->usb2_generic_phy);
+#ifdef CONFIG_PM_SLEEP
+static int dwc3_suspend(struct device *dev)
+{
+ struct dwc3 *dwc = dev_get_drvdata(dev);
+ int ret;
-err_usb3phy_power:
- phy_power_off(dwc->usb3_generic_phy);
+ ret = dwc3_suspend_common(dwc);
+ if (ret)
+ return ret;
-err_usb2phy_power:
- phy_power_off(dwc->usb2_generic_phy);
+ pinctrl_pm_select_sleep_state(dev);
- return ret;
+ return 0;
}
+static int dwc3_resume(struct device *dev)
+{
+ struct dwc3 *dwc = dev_get_drvdata(dev);
+ int ret;
+
+ pinctrl_pm_select_default_state(dev);
+
+ ret = dwc3_resume_common(dwc);
+ if (ret)
+ return ret;
+
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
static const struct dev_pm_ops dwc3_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(dwc3_suspend, dwc3_resume)
+ SET_RUNTIME_PM_OPS(dwc3_runtime_suspend, dwc3_runtime_resume,
+ dwc3_runtime_idle)
};
-#define DWC3_PM_OPS &(dwc3_dev_pm_ops)
-#else
-#define DWC3_PM_OPS NULL
-#endif
-
#ifdef CONFIG_OF
static const struct of_device_id of_dwc3_match[] = {
{
@@ -1250,7 +1291,7 @@ static struct platform_driver dwc3_driver = {
.name = "dwc3",
.of_match_table = of_match_ptr(of_dwc3_match),
.acpi_match_table = ACPI_PTR(dwc3_acpi_match),
- .pm = DWC3_PM_OPS,
+ .pm = &dwc3_dev_pm_ops,
},
};
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 654050684..45d6de510 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -86,6 +86,7 @@
#define DWC3_GCTL 0xc110
#define DWC3_GEVTEN 0xc114
#define DWC3_GSTS 0xc118
+#define DWC3_GUCTL1 0xc11c
#define DWC3_GSNPSID 0xc120
#define DWC3_GGPIO 0xc124
#define DWC3_GUID 0xc128
@@ -138,10 +139,12 @@
#define DWC3_DGCMDPAR 0xc710
#define DWC3_DGCMD 0xc714
#define DWC3_DALEPENA 0xc720
-#define DWC3_DEPCMDPAR2(n) (0xc800 + (n * 0x10))
-#define DWC3_DEPCMDPAR1(n) (0xc804 + (n * 0x10))
-#define DWC3_DEPCMDPAR0(n) (0xc808 + (n * 0x10))
-#define DWC3_DEPCMD(n) (0xc80c + (n * 0x10))
+
+#define DWC3_DEP_BASE(n) (0xc800 + (n * 0x10))
+#define DWC3_DEPCMDPAR2 0x00
+#define DWC3_DEPCMDPAR1 0x04
+#define DWC3_DEPCMDPAR0 0x08
+#define DWC3_DEPCMD 0x0c
/* OTG Registers */
#define DWC3_OCFG 0xcc00
@@ -231,6 +234,14 @@
#define DWC3_GEVNTSIZ_INTMASK (1 << 31)
#define DWC3_GEVNTSIZ_SIZE(n) ((n) & 0xffff)
+/* Global HWPARAMS0 Register */
+#define DWC3_GHWPARAMS0_USB3_MODE(n) ((n) & 0x3)
+#define DWC3_GHWPARAMS0_MBUS_TYPE(n) (((n) >> 3) & 0x7)
+#define DWC3_GHWPARAMS0_SBUS_TYPE(n) (((n) >> 6) & 0x3)
+#define DWC3_GHWPARAMS0_MDWIDTH(n) (((n) >> 8) & 0xff)
+#define DWC3_GHWPARAMS0_SDWIDTH(n) (((n) >> 16) & 0xff)
+#define DWC3_GHWPARAMS0_AWIDTH(n) (((n) >> 24) & 0xff)
+
/* Global HWPARAMS1 Register */
#define DWC3_GHWPARAMS1_EN_PWROPT(n) (((n) & (3 << 24)) >> 24)
#define DWC3_GHWPARAMS1_EN_PWROPT_NO 0
@@ -260,6 +271,10 @@
/* Global HWPARAMS6 Register */
#define DWC3_GHWPARAMS6_EN_FPGA (1 << 7)
+/* Global HWPARAMS7 Register */
+#define DWC3_GHWPARAMS7_RAM1_DEPTH(n) ((n) & 0xffff)
+#define DWC3_GHWPARAMS7_RAM2_DEPTH(n) (((n) >> 16) & 0xffff)
+
/* Global Frame Length Adjustment Register */
#define DWC3_GFLADJ_30MHZ_SDBND_SEL (1 << 7)
#define DWC3_GFLADJ_30MHZ_MASK 0x3f
@@ -468,6 +483,8 @@ struct dwc3_event_buffer {
* @endpoint: usb endpoint
* @pending_list: list of pending requests for this endpoint
* @started_list: list of started requests on this endpoint
+ * @lock: spinlock for endpoint request queue traversal
+ * @regs: pointer to first endpoint register
* @trb_pool: array of transaction buffers
* @trb_pool_dma: dma address of @trb_pool
* @trb_enqueue: enqueue 'pointer' into TRB array
@@ -480,6 +497,8 @@ struct dwc3_event_buffer {
* @type: set to bmAttributes & USB_ENDPOINT_XFERTYPE_MASK
* @resource_index: Resource transfer index
* @interval: the interval on which the ISOC transfer is started
+ * @allocated_requests: number of requests allocated
+ * @queued_requests: number of requests queued for transfer
* @name: a human readable name e.g. ep1out-bulk
* @direction: true for TX, false for RX
* @stream_capable: true when streams are enabled
@@ -489,6 +508,9 @@ struct dwc3_ep {
struct list_head pending_list;
struct list_head started_list;
+ spinlock_t lock;
+ void __iomem *regs;
+
struct dwc3_trb *trb_pool;
dma_addr_t trb_pool_dma;
const struct usb_ss_ep_comp_descriptor *comp_desc;
@@ -521,6 +543,8 @@ struct dwc3_ep {
u8 number;
u8 type;
u8 resource_index;
+ u32 allocated_requests;
+ u32 queued_requests;
u32 interval;
char name[20];
@@ -712,6 +736,8 @@ struct dwc3_scratchpad_array {
* @gadget_driver: pointer to the gadget driver
* @regs: base address for our registers
* @regs_size: address space size
+ * @fladj: frame length adjustment
+ * @irq_gadget: peripheral controller's IRQ number
* @nr_scratch: number of scratch buffers
* @u1u2: only used on revisions <1.83a for workaround
* @maximum_speed: maximum speed requested (mainly for testing purposes)
@@ -744,6 +770,7 @@ struct dwc3_scratchpad_array {
* @lpm_nyet_threshold: LPM NYET response threshold
* @hird_threshold: HIRD threshold
* @hsphy_interface: "utmi" or "ulpi"
+ * @connected: true when we're connected to a host, false otherwise
* @delayed_status: true when gadget driver asks for delayed status
* @ep0_bounced: true when we used bounce buffer
* @ep0_expect_in: true when we expect a DATA IN transfer
@@ -754,6 +781,7 @@ struct dwc3_scratchpad_array {
* 0 - utmi_sleep_n
* 1 - utmi_l1_suspend_n
* @is_fpga: true when we are using the FPGA board
+ * @pending_events: true when we have pending IRQs to be handled
* @pullups_connected: true when Run/Stop bit is set
* @setup_packet_pending: true when there's a Setup Packet in FIFO. Workaround
* @start_config_issued: true when StartConfig command has been issued
@@ -818,10 +846,8 @@ struct dwc3 {
enum usb_dr_mode dr_mode;
- /* used for suspend/resume */
- u32 dcfg;
- u32 gctl;
-
+ u32 fladj;
+ u32 irq_gadget;
u32 nr_scratch;
u32 u1u2;
u32 maximum_speed;
@@ -860,7 +886,7 @@ struct dwc3 {
* just so dwc31 revisions are always larger than dwc3.
*/
#define DWC3_REVISION_IS_DWC31 0x80000000
-#define DWC3_USB31_REVISION_110A (0x3131302a | DWC3_REVISION_IS_USB31)
+#define DWC3_USB31_REVISION_110A (0x3131302a | DWC3_REVISION_IS_DWC31)
enum dwc3_ep0_next ep0_next_event;
enum dwc3_ep0_state ep0state;
@@ -890,6 +916,7 @@ struct dwc3 {
const char *hsphy_interface;
+ unsigned connected:1;
unsigned delayed_status:1;
unsigned ep0_bounced:1;
unsigned ep0_expect_in:1;
@@ -897,6 +924,7 @@ struct dwc3 {
unsigned has_lpm_erratum:1;
unsigned is_utmi_l1_suspend:1;
unsigned is_fpga:1;
+ unsigned pending_events:1;
unsigned pullups_connected:1;
unsigned setup_packet_pending:1;
unsigned three_stage_setup:1;
@@ -1094,8 +1122,8 @@ void dwc3_gadget_exit(struct dwc3 *dwc);
int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode);
int dwc3_gadget_get_link_state(struct dwc3 *dwc);
int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state);
-int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
- unsigned cmd, struct dwc3_gadget_ep_cmd_params *params);
+int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
+ struct dwc3_gadget_ep_cmd_params *params);
int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param);
#else
static inline int dwc3_gadget_init(struct dwc3 *dwc)
@@ -1110,8 +1138,8 @@ static inline int dwc3_gadget_set_link_state(struct dwc3 *dwc,
enum dwc3_link_state state)
{ return 0; }
-static inline int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
- unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
+static inline int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
+ struct dwc3_gadget_ep_cmd_params *params)
{ return 0; }
static inline int dwc3_send_gadget_generic_command(struct dwc3 *dwc,
int cmd, u32 param)
@@ -1122,6 +1150,7 @@ static inline int dwc3_send_gadget_generic_command(struct dwc3 *dwc,
#if !IS_ENABLED(CONFIG_USB_DWC3_HOST)
int dwc3_gadget_suspend(struct dwc3 *dwc);
int dwc3_gadget_resume(struct dwc3 *dwc);
+void dwc3_gadget_process_pending_events(struct dwc3 *dwc);
#else
static inline int dwc3_gadget_suspend(struct dwc3 *dwc)
{
@@ -1132,6 +1161,10 @@ static inline int dwc3_gadget_resume(struct dwc3 *dwc)
{
return 0;
}
+
+static inline void dwc3_gadget_process_pending_events(struct dwc3 *dwc)
+{
+}
#endif /* !IS_ENABLED(CONFIG_USB_DWC3_HOST) */
#if IS_ENABLED(CONFIG_USB_DWC3_ULPI)
diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
index 71e318025..33ab2a203 100644
--- a/drivers/usb/dwc3/debug.h
+++ b/drivers/usb/dwc3/debug.h
@@ -128,56 +128,112 @@ dwc3_gadget_link_string(enum dwc3_link_state link_state)
* dwc3_gadget_event_string - returns event name
* @event: the event code
*/
-static inline const char *dwc3_gadget_event_string(u8 event)
+static inline const char *
+dwc3_gadget_event_string(const struct dwc3_event_devt *event)
{
- switch (event) {
+ static char str[256];
+ enum dwc3_link_state state = event->event_info & DWC3_LINK_STATE_MASK;
+
+ switch (event->type) {
case DWC3_DEVICE_EVENT_DISCONNECT:
- return "Disconnect";
+ sprintf(str, "Disconnect: [%s]",
+ dwc3_gadget_link_string(state));
+ break;
case DWC3_DEVICE_EVENT_RESET:
- return "Reset";
+ sprintf(str, "Reset [%s]", dwc3_gadget_link_string(state));
+ break;
case DWC3_DEVICE_EVENT_CONNECT_DONE:
- return "Connection Done";
+ sprintf(str, "Connection Done [%s]",
+ dwc3_gadget_link_string(state));
+ break;
case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
- return "Link Status Change";
+ sprintf(str, "Link Change [%s]",
+ dwc3_gadget_link_string(state));
+ break;
case DWC3_DEVICE_EVENT_WAKEUP:
- return "WakeUp";
+ sprintf(str, "WakeUp [%s]", dwc3_gadget_link_string(state));
+ break;
case DWC3_DEVICE_EVENT_EOPF:
- return "End-Of-Frame";
+ sprintf(str, "End-Of-Frame [%s]",
+ dwc3_gadget_link_string(state));
+ break;
case DWC3_DEVICE_EVENT_SOF:
- return "Start-Of-Frame";
+ sprintf(str, "Start-Of-Frame [%s]",
+ dwc3_gadget_link_string(state));
+ break;
case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
- return "Erratic Error";
+ sprintf(str, "Erratic Error [%s]",
+ dwc3_gadget_link_string(state));
+ break;
case DWC3_DEVICE_EVENT_CMD_CMPL:
- return "Command Complete";
+ sprintf(str, "Command Complete [%s]",
+ dwc3_gadget_link_string(state));
+ break;
case DWC3_DEVICE_EVENT_OVERFLOW:
- return "Overflow";
+ sprintf(str, "Overflow [%s]", dwc3_gadget_link_string(state));
+ break;
+ default:
+ sprintf(str, "UNKNOWN");
}
- return "UNKNOWN";
+ return str;
}
/**
* dwc3_ep_event_string - returns event name
* @event: then event code
*/
-static inline const char *dwc3_ep_event_string(u8 event)
+static inline const char *
+dwc3_ep_event_string(const struct dwc3_event_depevt *event)
{
- switch (event) {
+ u8 epnum = event->endpoint_number;
+ static char str[256];
+ int status;
+ int ret;
+
+ ret = sprintf(str, "ep%d%s: ", epnum >> 1,
+ (epnum & 1) ? "in" : "out");
+ if (ret < 0)
+ return "UNKNOWN";
+
+ switch (event->endpoint_event) {
case DWC3_DEPEVT_XFERCOMPLETE:
- return "Transfer Complete";
+ strcat(str, "Transfer Complete");
+ break;
case DWC3_DEPEVT_XFERINPROGRESS:
- return "Transfer In-Progress";
+ strcat(str, "Transfer In-Progress");
+ break;
case DWC3_DEPEVT_XFERNOTREADY:
- return "Transfer Not Ready";
+ strcat(str, "Transfer Not Ready");
+ status = event->status & DEPEVT_STATUS_TRANSFER_ACTIVE;
+ strcat(str, status ? " (Active)" : " (Not Active)");
+ break;
case DWC3_DEPEVT_RXTXFIFOEVT:
- return "FIFO";
+ strcat(str, "FIFO");
+ break;
case DWC3_DEPEVT_STREAMEVT:
- return "Stream";
+ status = event->status;
+
+ switch (status) {
+ case DEPEVT_STREAMEVT_FOUND:
+ sprintf(str + ret, " Stream %d Found",
+ event->parameters);
+ break;
+ case DEPEVT_STREAMEVT_NOTFOUND:
+ default:
+ strcat(str, " Stream Not Found");
+ break;
+ }
+
+ break;
case DWC3_DEPEVT_EPCMDCMPLT:
- return "Endpoint Command Complete";
+ strcat(str, "Endpoint Command Complete");
+ break;
+ default:
+ sprintf(str, "UNKNOWN");
}
- return "UNKNOWN";
+ return str;
}
/**
@@ -214,6 +270,46 @@ static inline const char *dwc3_gadget_event_type_string(u8 event)
}
}
+static inline const char *dwc3_decode_event(u32 event)
+{
+ const union dwc3_event evt = (union dwc3_event) event;
+
+ if (evt.type.is_devspec)
+ return dwc3_gadget_event_string(&evt.devt);
+ else
+ return dwc3_ep_event_string(&evt.depevt);
+}
+
+static inline const char *dwc3_ep_cmd_status_string(int status)
+{
+ switch (status) {
+ case -ETIMEDOUT:
+ return "Timed Out";
+ case 0:
+ return "Successful";
+ case DEPEVT_TRANSFER_NO_RESOURCE:
+ return "No Resource";
+ case DEPEVT_TRANSFER_BUS_EXPIRY:
+ return "Bus Expiry";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static inline const char *dwc3_gadget_generic_cmd_status_string(int status)
+{
+ switch (status) {
+ case -ETIMEDOUT:
+ return "Timed Out";
+ case 0:
+ return "Successful";
+ case 1:
+ return "Error";
+ default:
+ return "UNKNOWN";
+ }
+}
+
void dwc3_trace(void (*trace)(struct va_format *), const char *fmt, ...);
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index b1dd3c6d7..31926dda4 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -36,9 +36,32 @@
#define dump_register(nm) \
{ \
.name = __stringify(nm), \
- .offset = DWC3_ ##nm - DWC3_GLOBALS_REGS_START, \
+ .offset = DWC3_ ##nm, \
}
+#define dump_ep_register_set(n) \
+ { \
+ .name = "DEPCMDPAR2("__stringify(n)")", \
+ .offset = DWC3_DEP_BASE(n) + \
+ DWC3_DEPCMDPAR2, \
+ }, \
+ { \
+ .name = "DEPCMDPAR1("__stringify(n)")", \
+ .offset = DWC3_DEP_BASE(n) + \
+ DWC3_DEPCMDPAR1, \
+ }, \
+ { \
+ .name = "DEPCMDPAR0("__stringify(n)")", \
+ .offset = DWC3_DEP_BASE(n) + \
+ DWC3_DEPCMDPAR0, \
+ }, \
+ { \
+ .name = "DEPCMD("__stringify(n)")", \
+ .offset = DWC3_DEP_BASE(n) + \
+ DWC3_DEPCMD, \
+ }
+
+
static const struct debugfs_reg32 dwc3_regs[] = {
dump_register(GSBUSCFG0),
dump_register(GSBUSCFG1),
@@ -47,6 +70,7 @@ static const struct debugfs_reg32 dwc3_regs[] = {
dump_register(GCTL),
dump_register(GEVTEN),
dump_register(GSTS),
+ dump_register(GUCTL1),
dump_register(GSNPSID),
dump_register(GGPIO),
dump_register(GUID),
@@ -218,137 +242,38 @@ static const struct debugfs_reg32 dwc3_regs[] = {
dump_register(DGCMD),
dump_register(DALEPENA),
- dump_register(DEPCMDPAR2(0)),
- dump_register(DEPCMDPAR2(1)),
- dump_register(DEPCMDPAR2(2)),
- dump_register(DEPCMDPAR2(3)),
- dump_register(DEPCMDPAR2(4)),
- dump_register(DEPCMDPAR2(5)),
- dump_register(DEPCMDPAR2(6)),
- dump_register(DEPCMDPAR2(7)),
- dump_register(DEPCMDPAR2(8)),
- dump_register(DEPCMDPAR2(9)),
- dump_register(DEPCMDPAR2(10)),
- dump_register(DEPCMDPAR2(11)),
- dump_register(DEPCMDPAR2(12)),
- dump_register(DEPCMDPAR2(13)),
- dump_register(DEPCMDPAR2(14)),
- dump_register(DEPCMDPAR2(15)),
- dump_register(DEPCMDPAR2(16)),
- dump_register(DEPCMDPAR2(17)),
- dump_register(DEPCMDPAR2(18)),
- dump_register(DEPCMDPAR2(19)),
- dump_register(DEPCMDPAR2(20)),
- dump_register(DEPCMDPAR2(21)),
- dump_register(DEPCMDPAR2(22)),
- dump_register(DEPCMDPAR2(23)),
- dump_register(DEPCMDPAR2(24)),
- dump_register(DEPCMDPAR2(25)),
- dump_register(DEPCMDPAR2(26)),
- dump_register(DEPCMDPAR2(27)),
- dump_register(DEPCMDPAR2(28)),
- dump_register(DEPCMDPAR2(29)),
- dump_register(DEPCMDPAR2(30)),
- dump_register(DEPCMDPAR2(31)),
-
- dump_register(DEPCMDPAR1(0)),
- dump_register(DEPCMDPAR1(1)),
- dump_register(DEPCMDPAR1(2)),
- dump_register(DEPCMDPAR1(3)),
- dump_register(DEPCMDPAR1(4)),
- dump_register(DEPCMDPAR1(5)),
- dump_register(DEPCMDPAR1(6)),
- dump_register(DEPCMDPAR1(7)),
- dump_register(DEPCMDPAR1(8)),
- dump_register(DEPCMDPAR1(9)),
- dump_register(DEPCMDPAR1(10)),
- dump_register(DEPCMDPAR1(11)),
- dump_register(DEPCMDPAR1(12)),
- dump_register(DEPCMDPAR1(13)),
- dump_register(DEPCMDPAR1(14)),
- dump_register(DEPCMDPAR1(15)),
- dump_register(DEPCMDPAR1(16)),
- dump_register(DEPCMDPAR1(17)),
- dump_register(DEPCMDPAR1(18)),
- dump_register(DEPCMDPAR1(19)),
- dump_register(DEPCMDPAR1(20)),
- dump_register(DEPCMDPAR1(21)),
- dump_register(DEPCMDPAR1(22)),
- dump_register(DEPCMDPAR1(23)),
- dump_register(DEPCMDPAR1(24)),
- dump_register(DEPCMDPAR1(25)),
- dump_register(DEPCMDPAR1(26)),
- dump_register(DEPCMDPAR1(27)),
- dump_register(DEPCMDPAR1(28)),
- dump_register(DEPCMDPAR1(29)),
- dump_register(DEPCMDPAR1(30)),
- dump_register(DEPCMDPAR1(31)),
-
- dump_register(DEPCMDPAR0(0)),
- dump_register(DEPCMDPAR0(1)),
- dump_register(DEPCMDPAR0(2)),
- dump_register(DEPCMDPAR0(3)),
- dump_register(DEPCMDPAR0(4)),
- dump_register(DEPCMDPAR0(5)),
- dump_register(DEPCMDPAR0(6)),
- dump_register(DEPCMDPAR0(7)),
- dump_register(DEPCMDPAR0(8)),
- dump_register(DEPCMDPAR0(9)),
- dump_register(DEPCMDPAR0(10)),
- dump_register(DEPCMDPAR0(11)),
- dump_register(DEPCMDPAR0(12)),
- dump_register(DEPCMDPAR0(13)),
- dump_register(DEPCMDPAR0(14)),
- dump_register(DEPCMDPAR0(15)),
- dump_register(DEPCMDPAR0(16)),
- dump_register(DEPCMDPAR0(17)),
- dump_register(DEPCMDPAR0(18)),
- dump_register(DEPCMDPAR0(19)),
- dump_register(DEPCMDPAR0(20)),
- dump_register(DEPCMDPAR0(21)),
- dump_register(DEPCMDPAR0(22)),
- dump_register(DEPCMDPAR0(23)),
- dump_register(DEPCMDPAR0(24)),
- dump_register(DEPCMDPAR0(25)),
- dump_register(DEPCMDPAR0(26)),
- dump_register(DEPCMDPAR0(27)),
- dump_register(DEPCMDPAR0(28)),
- dump_register(DEPCMDPAR0(29)),
- dump_register(DEPCMDPAR0(30)),
- dump_register(DEPCMDPAR0(31)),
-
- dump_register(DEPCMD(0)),
- dump_register(DEPCMD(1)),
- dump_register(DEPCMD(2)),
- dump_register(DEPCMD(3)),
- dump_register(DEPCMD(4)),
- dump_register(DEPCMD(5)),
- dump_register(DEPCMD(6)),
- dump_register(DEPCMD(7)),
- dump_register(DEPCMD(8)),
- dump_register(DEPCMD(9)),
- dump_register(DEPCMD(10)),
- dump_register(DEPCMD(11)),
- dump_register(DEPCMD(12)),
- dump_register(DEPCMD(13)),
- dump_register(DEPCMD(14)),
- dump_register(DEPCMD(15)),
- dump_register(DEPCMD(16)),
- dump_register(DEPCMD(17)),
- dump_register(DEPCMD(18)),
- dump_register(DEPCMD(19)),
- dump_register(DEPCMD(20)),
- dump_register(DEPCMD(21)),
- dump_register(DEPCMD(22)),
- dump_register(DEPCMD(23)),
- dump_register(DEPCMD(24)),
- dump_register(DEPCMD(25)),
- dump_register(DEPCMD(26)),
- dump_register(DEPCMD(27)),
- dump_register(DEPCMD(28)),
- dump_register(DEPCMD(29)),
- dump_register(DEPCMD(30)),
- dump_register(DEPCMD(31)),
+ dump_ep_register_set(0),
+ dump_ep_register_set(1),
+ dump_ep_register_set(2),
+ dump_ep_register_set(3),
+ dump_ep_register_set(4),
+ dump_ep_register_set(5),
+ dump_ep_register_set(6),
+ dump_ep_register_set(7),
+ dump_ep_register_set(8),
+ dump_ep_register_set(9),
+ dump_ep_register_set(10),
+ dump_ep_register_set(11),
+ dump_ep_register_set(12),
+ dump_ep_register_set(13),
+ dump_ep_register_set(14),
+ dump_ep_register_set(15),
+ dump_ep_register_set(16),
+ dump_ep_register_set(17),
+ dump_ep_register_set(18),
+ dump_ep_register_set(19),
+ dump_ep_register_set(20),
+ dump_ep_register_set(21),
+ dump_ep_register_set(22),
+ dump_ep_register_set(23),
+ dump_ep_register_set(24),
+ dump_ep_register_set(25),
+ dump_ep_register_set(26),
+ dump_ep_register_set(27),
+ dump_ep_register_set(28),
+ dump_ep_register_set(29),
+ dump_ep_register_set(30),
+ dump_ep_register_set(31),
dump_register(OCFG),
dump_register(OCTL),
@@ -939,7 +864,7 @@ void dwc3_debugfs_init(struct dwc3 *dwc)
dwc->regset->regs = dwc3_regs;
dwc->regset->nregs = ARRAY_SIZE(dwc3_regs);
- dwc->regset->base = dwc->regs;
+ dwc->regset->base = dwc->regs - DWC3_GLOBALS_REGS_START;
file = debugfs_create_regset32("regdump", S_IRUGO, root, dwc->regset);
if (!file)
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index 974335377..e56d59b19 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -61,6 +61,7 @@ static int dwc3_of_simple_probe(struct platform_device *pdev)
if (!simple->clks)
return -ENOMEM;
+ platform_set_drvdata(pdev, simple);
simple->dev = dev;
for (i = 0; i < simple->num_clocks; i++) {
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index af264493b..29e80cc9b 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -165,7 +165,7 @@ static void dwc3_omap_write_utmi_ctrl(struct dwc3_omap *omap, u32 value)
static u32 dwc3_omap_read_irq0_status(struct dwc3_omap *omap)
{
- return dwc3_omap_readl(omap->base, USBOTGSS_IRQSTATUS_0 -
+ return dwc3_omap_readl(omap->base, USBOTGSS_IRQSTATUS_RAW_0 -
omap->irq0_offset);
}
@@ -178,7 +178,7 @@ static void dwc3_omap_write_irq0_status(struct dwc3_omap *omap, u32 value)
static u32 dwc3_omap_read_irqmisc_status(struct dwc3_omap *omap)
{
- return dwc3_omap_readl(omap->base, USBOTGSS_IRQSTATUS_MISC +
+ return dwc3_omap_readl(omap->base, USBOTGSS_IRQSTATUS_RAW_MISC +
omap->irqmisc_offset);
}
@@ -231,35 +231,30 @@ static void dwc3_omap_set_mailbox(struct dwc3_omap *omap,
}
val = dwc3_omap_read_utmi_ctrl(omap);
- val &= ~(USBOTGSS_UTMI_OTG_CTRL_IDDIG
- | USBOTGSS_UTMI_OTG_CTRL_VBUSVALID
- | USBOTGSS_UTMI_OTG_CTRL_SESSEND);
- val |= USBOTGSS_UTMI_OTG_CTRL_SESSVALID
- | USBOTGSS_UTMI_OTG_CTRL_POWERPRESENT;
+ val &= ~USBOTGSS_UTMI_OTG_CTRL_IDDIG;
dwc3_omap_write_utmi_ctrl(omap, val);
break;
case OMAP_DWC3_VBUS_VALID:
val = dwc3_omap_read_utmi_ctrl(omap);
val &= ~USBOTGSS_UTMI_OTG_CTRL_SESSEND;
- val |= USBOTGSS_UTMI_OTG_CTRL_IDDIG
- | USBOTGSS_UTMI_OTG_CTRL_VBUSVALID
- | USBOTGSS_UTMI_OTG_CTRL_SESSVALID
- | USBOTGSS_UTMI_OTG_CTRL_POWERPRESENT;
+ val |= USBOTGSS_UTMI_OTG_CTRL_VBUSVALID
+ | USBOTGSS_UTMI_OTG_CTRL_SESSVALID;
dwc3_omap_write_utmi_ctrl(omap, val);
break;
case OMAP_DWC3_ID_FLOAT:
if (omap->vbus_reg)
regulator_disable(omap->vbus_reg);
+ val = dwc3_omap_read_utmi_ctrl(omap);
+ val |= USBOTGSS_UTMI_OTG_CTRL_IDDIG;
+ dwc3_omap_write_utmi_ctrl(omap, val);
case OMAP_DWC3_VBUS_OFF:
val = dwc3_omap_read_utmi_ctrl(omap);
val &= ~(USBOTGSS_UTMI_OTG_CTRL_SESSVALID
- | USBOTGSS_UTMI_OTG_CTRL_VBUSVALID
- | USBOTGSS_UTMI_OTG_CTRL_POWERPRESENT);
- val |= USBOTGSS_UTMI_OTG_CTRL_SESSEND
- | USBOTGSS_UTMI_OTG_CTRL_IDDIG;
+ | USBOTGSS_UTMI_OTG_CTRL_VBUSVALID);
+ val |= USBOTGSS_UTMI_OTG_CTRL_SESSEND;
dwc3_omap_write_utmi_ctrl(omap, val);
break;
@@ -268,19 +263,38 @@ static void dwc3_omap_set_mailbox(struct dwc3_omap *omap,
}
}
+static void dwc3_omap_enable_irqs(struct dwc3_omap *omap);
+static void dwc3_omap_disable_irqs(struct dwc3_omap *omap);
+
static irqreturn_t dwc3_omap_interrupt(int irq, void *_omap)
{
struct dwc3_omap *omap = _omap;
+
+ if (dwc3_omap_read_irqmisc_status(omap) ||
+ dwc3_omap_read_irq0_status(omap)) {
+ /* mask irqs */
+ dwc3_omap_disable_irqs(omap);
+ return IRQ_WAKE_THREAD;
+ }
+
+ return IRQ_NONE;
+}
+
+static irqreturn_t dwc3_omap_interrupt_thread(int irq, void *_omap)
+{
+ struct dwc3_omap *omap = _omap;
u32 reg;
+ /* clear irq status flags */
reg = dwc3_omap_read_irqmisc_status(omap);
-
dwc3_omap_write_irqmisc_status(omap, reg);
reg = dwc3_omap_read_irq0_status(omap);
-
dwc3_omap_write_irq0_status(omap, reg);
+ /* unmask irqs */
+ dwc3_omap_enable_irqs(omap);
+
return IRQ_HANDLED;
}
@@ -497,8 +511,9 @@ static int dwc3_omap_probe(struct platform_device *pdev)
/* check the DMA Status */
reg = dwc3_omap_readl(omap->base, USBOTGSS_SYSCONFIG);
- ret = devm_request_irq(dev, omap->irq, dwc3_omap_interrupt, 0,
- "dwc3-omap", omap);
+ ret = devm_request_threaded_irq(dev, omap->irq, dwc3_omap_interrupt,
+ dwc3_omap_interrupt_thread, IRQF_SHARED,
+ "dwc3-omap", omap);
if (ret) {
dev_err(dev, "failed to request IRQ #%d --> %d\n",
omap->irq, ret);
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 2fd50578b..6df0f5dad 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -20,11 +20,11 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/pci.h>
+#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
#include <linux/gpio/consumer.h>
#include <linux/acpi.h>
-
-#include "platform_data.h"
+#include <linux/delay.h>
#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd
#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI 0xabce
@@ -52,62 +52,70 @@ static int dwc3_pci_quirks(struct pci_dev *pdev, struct platform_device *dwc3)
{
if (pdev->vendor == PCI_VENDOR_ID_AMD &&
pdev->device == PCI_DEVICE_ID_AMD_NL_USB) {
- struct dwc3_platform_data pdata;
-
- memset(&pdata, 0, sizeof(pdata));
-
- pdata.has_lpm_erratum = true;
- pdata.lpm_nyet_threshold = 0xf;
-
- pdata.u2exit_lfps_quirk = true;
- pdata.u2ss_inp3_quirk = true;
- pdata.req_p1p2p3_quirk = true;
- pdata.del_p1p2p3_quirk = true;
- pdata.del_phy_power_chg_quirk = true;
- pdata.lfps_filter_quirk = true;
- pdata.rx_detect_poll_quirk = true;
-
- pdata.tx_de_emphasis_quirk = true;
- pdata.tx_de_emphasis = 1;
-
- /*
- * FIXME these quirks should be removed when AMD NL
- * taps out
- */
- pdata.disable_scramble_quirk = true;
- pdata.dis_u3_susphy_quirk = true;
- pdata.dis_u2_susphy_quirk = true;
-
- return platform_device_add_data(dwc3, &pdata, sizeof(pdata));
+ struct property_entry properties[] = {
+ PROPERTY_ENTRY_BOOL("snps,has-lpm-erratum"),
+ PROPERTY_ENTRY_U8("snps,lpm-nyet-threshold", 0xf),
+ PROPERTY_ENTRY_BOOL("snps,u2exit_lfps_quirk"),
+ PROPERTY_ENTRY_BOOL("snps,u2ss_inp3_quirk"),
+ PROPERTY_ENTRY_BOOL("snps,req_p1p2p3_quirk"),
+ PROPERTY_ENTRY_BOOL("snps,del_p1p2p3_quirk"),
+ PROPERTY_ENTRY_BOOL("snps,del_phy_power_chg_quirk"),
+ PROPERTY_ENTRY_BOOL("snps,lfps_filter_quirk"),
+ PROPERTY_ENTRY_BOOL("snps,rx_detect_poll_quirk"),
+ PROPERTY_ENTRY_BOOL("snps,tx_de_emphasis_quirk"),
+ PROPERTY_ENTRY_U8("snps,tx_de_emphasis", 1),
+ /*
+ * FIXME these quirks should be removed when AMD NL
+ * tapes out
+ */
+ PROPERTY_ENTRY_BOOL("snps,disable_scramble_quirk"),
+ PROPERTY_ENTRY_BOOL("snps,dis_u3_susphy_quirk"),
+ PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
+ { },
+ };
+
+ return platform_device_add_properties(dwc3, properties);
}
- if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
- pdev->device == PCI_DEVICE_ID_INTEL_BYT) {
- struct gpio_desc *gpio;
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
+ int ret;
+
+ struct property_entry properties[] = {
+ PROPERTY_ENTRY_STRING("dr-mode", "peripheral"),
+ { }
+ };
- acpi_dev_add_driver_gpios(ACPI_COMPANION(&pdev->dev),
- acpi_dwc3_byt_gpios);
+ ret = platform_device_add_properties(dwc3, properties);
+ if (ret < 0)
+ return ret;
- /*
- * These GPIOs will turn on the USB2 PHY. Note that we have to
- * put the gpio descriptors again here because the phy driver
- * might want to grab them, too.
- */
- gpio = gpiod_get_optional(&pdev->dev, "cs", GPIOD_OUT_LOW);
- if (IS_ERR(gpio))
- return PTR_ERR(gpio);
+ if (pdev->device == PCI_DEVICE_ID_INTEL_BYT) {
+ struct gpio_desc *gpio;
- gpiod_set_value_cansleep(gpio, 1);
- gpiod_put(gpio);
+ acpi_dev_add_driver_gpios(ACPI_COMPANION(&pdev->dev),
+ acpi_dwc3_byt_gpios);
- gpio = gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(gpio))
- return PTR_ERR(gpio);
+ /*
+ * These GPIOs will turn on the USB2 PHY. Note that we have to
+ * put the gpio descriptors again here because the phy driver
+ * might want to grab them, too.
+ */
+ gpio = gpiod_get_optional(&pdev->dev, "cs", GPIOD_OUT_LOW);
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
- if (gpio) {
gpiod_set_value_cansleep(gpio, 1);
gpiod_put(gpio);
- usleep_range(10000, 11000);
+
+ gpio = gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
+
+ if (gpio) {
+ gpiod_set_value_cansleep(gpio, 1);
+ gpiod_put(gpio);
+ usleep_range(10000, 11000);
+ }
}
}
@@ -115,15 +123,14 @@ static int dwc3_pci_quirks(struct pci_dev *pdev, struct platform_device *dwc3)
(pdev->device == PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 ||
pdev->device == PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI ||
pdev->device == PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31)) {
-
- struct dwc3_platform_data pdata;
-
- memset(&pdata, 0, sizeof(pdata));
- pdata.usb3_lpm_capable = true;
- pdata.has_lpm_erratum = true;
- pdata.dis_enblslpm_quirk = true;
-
- return platform_device_add_data(dwc3, &pdata, sizeof(pdata));
+ struct property_entry properties[] = {
+ PROPERTY_ENTRY_BOOL("snps,usb3_lpm_capable"),
+ PROPERTY_ENTRY_BOOL("snps,has-lpm-erratum"),
+ PROPERTY_ENTRY_BOOL("snps,dis_enblslpm_quirk"),
+ { },
+ };
+
+ return platform_device_add_properties(dwc3, properties);
}
return 0;
@@ -181,7 +188,11 @@ static int dwc3_pci_probe(struct pci_dev *pci,
goto err;
}
+ device_init_wakeup(dev, true);
+ device_set_run_wake(dev, true);
pci_set_drvdata(pci, dwc3);
+ pm_runtime_put(dev);
+
return 0;
err:
platform_device_put(dwc3);
@@ -190,6 +201,8 @@ err:
static void dwc3_pci_remove(struct pci_dev *pci)
{
+ device_init_wakeup(&pci->dev, false);
+ pm_runtime_get(&pci->dev);
acpi_dev_remove_driver_gpios(ACPI_COMPANION(&pci->dev));
platform_device_unregister(pci_get_drvdata(pci));
}
@@ -221,11 +234,52 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
};
MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table);
+#ifdef CONFIG_PM
+static int dwc3_pci_runtime_suspend(struct device *dev)
+{
+ if (device_run_wake(dev))
+ return 0;
+
+ return -EBUSY;
+}
+
+static int dwc3_pci_runtime_resume(struct device *dev)
+{
+ struct platform_device *dwc3 = dev_get_drvdata(dev);
+
+ return pm_runtime_get(&dwc3->dev);
+}
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_PM_SLEEP
+static int dwc3_pci_pm_dummy(struct device *dev)
+{
+ /*
+ * There's nothing to do here. No, seriously. Everything is either taken
+ * care either by PCI subsystem or dwc3/core.c, so we have nothing
+ * missing here.
+ *
+ * So you'd think we didn't need this at all, but PCI subsystem will
+ * bail out if we don't have a valid callback :-s
+ */
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static struct dev_pm_ops dwc3_pci_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(dwc3_pci_pm_dummy, dwc3_pci_pm_dummy)
+ SET_RUNTIME_PM_OPS(dwc3_pci_runtime_suspend, dwc3_pci_runtime_resume,
+ NULL)
+};
+
static struct pci_driver dwc3_pci_driver = {
.name = "dwc3-pci",
.id_table = dwc3_pci_id_table,
.probe = dwc3_pci_probe,
.remove = dwc3_pci_remove,
+ .driver = {
+ .pm = &dwc3_pci_dev_pm_ops,
+ }
};
MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 51b52a79d..fe79d771d 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -98,8 +98,7 @@ static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma,
trace_dwc3_prepare_trb(dep, trb);
- ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
- DWC3_DEPCMD_STARTTRANSFER, &params);
+ ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_STARTTRANSFER, &params);
if (ret < 0) {
dwc3_trace(trace_dwc3_ep0, "%s STARTTRANSFER failed",
dep->name);
@@ -107,9 +106,7 @@ static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma,
}
dep->flags |= DWC3_EP_BUSY;
- dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc,
- dep->number);
-
+ dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
dwc->ep0_next_event = DWC3_EP0_COMPLETE;
return 0;
@@ -499,7 +496,7 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
case USB_RECIP_ENDPOINT:
switch (wValue) {
case USB_ENDPOINT_HALT:
- dep = dwc3_wIndex_to_dep(dwc, wIndex);
+ dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
if (!dep)
return -EINVAL;
if (set == 0 && (dep->flags & DWC3_EP_WEDGE))
@@ -622,8 +619,8 @@ static void dwc3_ep0_set_sel_cmpl(struct usb_ep *ep, struct usb_request *req)
struct timing {
u8 u1sel;
u8 u1pel;
- u16 u2sel;
- u16 u2pel;
+ __le16 u2sel;
+ __le16 u2pel;
} __packed timing;
int ret;
@@ -980,7 +977,7 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
ret = usb_gadget_map_request(&dwc->gadget, &req->request,
dep->number);
if (ret) {
- dwc3_trace(trace_dwc3_ep0, "failed to map request\n");
+ dwc3_trace(trace_dwc3_ep0, "failed to map request");
return;
}
@@ -1008,7 +1005,7 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
ret = usb_gadget_map_request(&dwc->gadget, &req->request,
dep->number);
if (ret) {
- dwc3_trace(trace_dwc3_ep0, "failed to map request\n");
+ dwc3_trace(trace_dwc3_ep0, "failed to map request");
return;
}
@@ -1058,7 +1055,7 @@ static void dwc3_ep0_end_control_data(struct dwc3 *dwc, struct dwc3_ep *dep)
cmd |= DWC3_DEPCMD_CMDIOC;
cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
memset(&params, 0, sizeof(params));
- ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
+ ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
WARN_ON_ONCE(ret);
dep->resource_index = 0;
}
@@ -1112,11 +1109,8 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
void dwc3_ep0_interrupt(struct dwc3 *dwc,
const struct dwc3_event_depevt *event)
{
- u8 epnum = event->endpoint_number;
-
- dwc3_trace(trace_dwc3_ep0, "%s while ep%d%s in state '%s'",
- dwc3_ep_event_string(event->endpoint_event),
- epnum >> 1, (epnum & 1) ? "in" : "out",
+ dwc3_trace(trace_dwc3_ep0, "%s: state '%s'",
+ dwc3_ep_event_string(event),
dwc3_ep0_state_string(dwc->ep0state));
switch (event->endpoint_event) {
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 05a5300aa..685446189 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -145,21 +145,29 @@ int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
return -ETIMEDOUT;
}
-static void dwc3_ep_inc_enq(struct dwc3_ep *dep)
+/**
+ * dwc3_ep_inc_trb() - Increment a TRB index.
+ * @index - Pointer to the TRB index to increment.
+ *
+ * The index should never point to the link TRB. After incrementing,
+ * if it is point to the link TRB, wrap around to the beginning. The
+ * link TRB is always at the last TRB entry.
+ */
+static void dwc3_ep_inc_trb(u8 *index)
{
- dep->trb_enqueue++;
- dep->trb_enqueue %= DWC3_TRB_NUM;
+ (*index)++;
+ if (*index == (DWC3_TRB_NUM - 1))
+ *index = 0;
}
-static void dwc3_ep_inc_deq(struct dwc3_ep *dep)
+static void dwc3_ep_inc_enq(struct dwc3_ep *dep)
{
- dep->trb_dequeue++;
- dep->trb_dequeue %= DWC3_TRB_NUM;
+ dwc3_ep_inc_trb(&dep->trb_enqueue);
}
-static int dwc3_ep_is_last_trb(unsigned int index)
+static void dwc3_ep_inc_deq(struct dwc3_ep *dep)
{
- return index == DWC3_TRB_NUM - 1;
+ dwc3_ep_inc_trb(&dep->trb_dequeue);
}
void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
@@ -172,13 +180,6 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
i = 0;
do {
dwc3_ep_inc_deq(dep);
- /*
- * Skip LINK TRB. We can't use req->trb and check for
- * DWC3_TRBCTL_LINK_TRB because it points the TRB we
- * just completed (not the LINK TRB).
- */
- if (dwc3_ep_is_last_trb(dep->trb_dequeue))
- dwc3_ep_inc_deq(dep);
} while(++i < req->request.num_mapped_sgs);
req->started = false;
}
@@ -199,57 +200,54 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
spin_unlock(&dwc->lock);
usb_gadget_giveback_request(&dep->endpoint, &req->request);
spin_lock(&dwc->lock);
+
+ if (dep->number > 1)
+ pm_runtime_put(dwc->dev);
}
int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
{
u32 timeout = 500;
+ int status = 0;
+ int ret = 0;
u32 reg;
- trace_dwc3_gadget_generic_cmd(cmd, param);
-
dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
do {
reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
if (!(reg & DWC3_DGCMD_CMDACT)) {
- dwc3_trace(trace_dwc3_gadget,
- "Command Complete --> %d",
- DWC3_DGCMD_STATUS(reg));
- if (DWC3_DGCMD_STATUS(reg))
- return -EINVAL;
- return 0;
+ status = DWC3_DGCMD_STATUS(reg);
+ if (status)
+ ret = -EINVAL;
+ break;
}
+ } while (timeout--);
- /*
- * We can't sleep here, because it's also called from
- * interrupt context.
- */
- timeout--;
- if (!timeout) {
- dwc3_trace(trace_dwc3_gadget,
- "Command Timed Out");
- return -ETIMEDOUT;
- }
- udelay(1);
- } while (1);
+ if (!timeout) {
+ ret = -ETIMEDOUT;
+ status = -ETIMEDOUT;
+ }
+
+ trace_dwc3_gadget_generic_cmd(cmd, param, status);
+
+ return ret;
}
static int __dwc3_gadget_wakeup(struct dwc3 *dwc);
-int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
- unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
+int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
+ struct dwc3_gadget_ep_cmd_params *params)
{
- struct dwc3_ep *dep = dwc->eps[ep];
+ struct dwc3 *dwc = dep->dwc;
u32 timeout = 500;
u32 reg;
+ int cmd_status = 0;
int susphy = false;
int ret = -EINVAL;
- trace_dwc3_gadget_ep_cmd(dep, cmd, params);
-
/*
* Synopsys Databook 2.60a states, on section 6.3.2.5.[1-8], that if
* we're issuing an endpoint command, we must check if
@@ -281,26 +279,21 @@ int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
}
}
- dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0);
- dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1);
- dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2);
+ dwc3_writel(dep->regs, DWC3_DEPCMDPAR0, params->param0);
+ dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1);
+ dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2);
- dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT);
+ dwc3_writel(dep->regs, DWC3_DEPCMD, cmd | DWC3_DEPCMD_CMDACT);
do {
- reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep));
+ reg = dwc3_readl(dep->regs, DWC3_DEPCMD);
if (!(reg & DWC3_DEPCMD_CMDACT)) {
- int cmd_status = DWC3_DEPCMD_STATUS(reg);
-
- dwc3_trace(trace_dwc3_gadget,
- "Command Complete --> %d",
- cmd_status);
+ cmd_status = DWC3_DEPCMD_STATUS(reg);
switch (cmd_status) {
case 0:
ret = 0;
break;
case DEPEVT_TRANSFER_NO_RESOURCE:
- dwc3_trace(trace_dwc3_gadget, "%s: no resource available");
ret = -EINVAL;
break;
case DEPEVT_TRANSFER_BUS_EXPIRY:
@@ -315,7 +308,6 @@ int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
* give a hint to the gadget driver that this is
* the case by returning -EAGAIN.
*/
- dwc3_trace(trace_dwc3_gadget, "%s: bus expiry");
ret = -EAGAIN;
break;
default:
@@ -324,21 +316,14 @@ int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
break;
}
+ } while (--timeout);
- /*
- * We can't sleep here, because it is also called from
- * interrupt context.
- */
- timeout--;
- if (!timeout) {
- dwc3_trace(trace_dwc3_gadget,
- "Command Timed Out");
- ret = -ETIMEDOUT;
- break;
- }
+ if (timeout == 0) {
+ ret = -ETIMEDOUT;
+ cmd_status = -ETIMEDOUT;
+ }
- udelay(1);
- } while (1);
+ trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status);
if (unlikely(susphy)) {
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
@@ -363,12 +348,13 @@ static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep)
* IN transfers due to a mishandled error condition. Synopsys
* STAR 9000614252.
*/
- if (dep->direction && (dwc->revision >= DWC3_REVISION_260A))
+ if (dep->direction && (dwc->revision >= DWC3_REVISION_260A) &&
+ (dwc->gadget.speed >= USB_SPEED_SUPER))
cmd |= DWC3_DEPCMD_CLEARPENDIN;
memset(&params, 0, sizeof(params));
- return dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
+ return dwc3_send_gadget_ep_cmd(dep, cmd, &params);
}
static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
@@ -456,7 +442,7 @@ static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
memset(&params, 0x00, sizeof(params));
cmd = DWC3_DEPCMD_DEPSTARTCFG;
- ret = dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
+ ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
if (ret)
return ret;
@@ -477,10 +463,14 @@ static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
const struct usb_endpoint_descriptor *desc,
const struct usb_ss_ep_comp_descriptor *comp_desc,
- bool ignore, bool restore)
+ bool modify, bool restore)
{
struct dwc3_gadget_ep_cmd_params params;
+ if (dev_WARN_ONCE(dwc->dev, modify && restore,
+ "Can't modify and restore\n"))
+ return -EINVAL;
+
memset(&params, 0x00, sizeof(params));
params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
@@ -489,30 +479,22 @@ static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
/* Burst size is only needed in SuperSpeed mode */
if (dwc->gadget.speed >= USB_SPEED_SUPER) {
u32 burst = dep->endpoint.maxburst;
- u32 nump;
- u32 reg;
-
- /* update NumP */
- reg = dwc3_readl(dwc->regs, DWC3_DCFG);
- nump = DWC3_DCFG_NUMP(reg);
- nump = max(nump, burst);
- reg &= ~DWC3_DCFG_NUMP_MASK;
- reg |= nump << DWC3_DCFG_NUMP_SHIFT;
- dwc3_writel(dwc->regs, DWC3_DCFG, reg);
-
params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst - 1);
}
- if (ignore)
- params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM;
-
- if (restore) {
+ if (modify) {
+ params.param0 |= DWC3_DEPCFG_ACTION_MODIFY;
+ } else if (restore) {
params.param0 |= DWC3_DEPCFG_ACTION_RESTORE;
params.param2 |= dep->saved_state;
+ } else {
+ params.param0 |= DWC3_DEPCFG_ACTION_INIT;
}
- params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
- | DWC3_DEPCFG_XFER_NOT_READY_EN;
+ params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN;
+
+ if (dep->number <= 1 || usb_endpoint_xfer_isoc(desc))
+ params.param1 |= DWC3_DEPCFG_XFER_NOT_READY_EN;
if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
@@ -543,8 +525,7 @@ static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
dep->interval = 1 << (desc->bInterval - 1);
}
- return dwc3_send_gadget_ep_cmd(dwc, dep->number,
- DWC3_DEPCMD_SETEPCONFIG, &params);
+ return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
}
static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
@@ -555,8 +536,8 @@ static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
- return dwc3_send_gadget_ep_cmd(dwc, dep->number,
- DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
+ return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETTRANSFRESOURCE,
+ &params);
}
/**
@@ -569,7 +550,7 @@ static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
const struct usb_endpoint_descriptor *desc,
const struct usb_ss_ep_comp_descriptor *comp_desc,
- bool ignore, bool restore)
+ bool modify, bool restore)
{
struct dwc3 *dwc = dep->dwc;
u32 reg;
@@ -583,7 +564,7 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
return ret;
}
- ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore,
+ ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, modify,
restore);
if (ret)
return ret;
@@ -602,38 +583,24 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
if (usb_endpoint_xfer_control(desc))
- goto out;
+ return 0;
+
+ /* Initialize the TRB ring */
+ dep->trb_dequeue = 0;
+ dep->trb_enqueue = 0;
+ memset(dep->trb_pool, 0,
+ sizeof(struct dwc3_trb) * DWC3_TRB_NUM);
/* Link TRB. The HWO bit is never reset */
trb_st_hw = &dep->trb_pool[0];
trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
- memset(trb_link, 0, sizeof(*trb_link));
-
trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
}
-out:
- switch (usb_endpoint_type(desc)) {
- case USB_ENDPOINT_XFER_CONTROL:
- /* don't change name */
- break;
- case USB_ENDPOINT_XFER_ISOC:
- strlcat(dep->name, "-isoc", sizeof(dep->name));
- break;
- case USB_ENDPOINT_XFER_BULK:
- strlcat(dep->name, "-bulk", sizeof(dep->name));
- break;
- case USB_ENDPOINT_XFER_INT:
- strlcat(dep->name, "-int", sizeof(dep->name));
- break;
- default:
- dev_err(dwc->dev, "invalid endpoint transfer type\n");
- }
-
return 0;
}
@@ -642,15 +609,13 @@ static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
{
struct dwc3_request *req;
- if (!list_empty(&dep->started_list)) {
- dwc3_stop_active_transfer(dwc, dep->number, true);
+ dwc3_stop_active_transfer(dwc, dep->number, true);
- /* - giveback all requests to gadget driver */
- while (!list_empty(&dep->started_list)) {
- req = next_request(&dep->started_list);
+ /* - giveback all requests to gadget driver */
+ while (!list_empty(&dep->started_list)) {
+ req = next_request(&dep->started_list);
- dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
- }
+ dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
}
while (!list_empty(&dep->pending_list)) {
@@ -691,10 +656,6 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
dep->type = 0;
dep->flags = 0;
- snprintf(dep->name, sizeof(dep->name), "ep%d%s",
- dep->number >> 1,
- (dep->number & 1) ? "in" : "out");
-
return 0;
}
@@ -786,6 +747,8 @@ static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
req->epnum = dep->number;
req->dep = dep;
+ dep->allocated_requests++;
+
trace_dwc3_alloc_request(req);
return &req->request;
@@ -795,7 +758,9 @@ static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
struct usb_request *request)
{
struct dwc3_request *req = to_dwc3_request(request);
+ struct dwc3_ep *dep = to_dwc3_ep(ep);
+ dep->allocated_requests--;
trace_dwc3_free_request(req);
kfree(req);
}
@@ -827,9 +792,6 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
}
dwc3_ep_inc_enq(dep);
- /* Skip the LINK-TRB */
- if (dwc3_ep_is_last_trb(dep->trb_enqueue))
- dwc3_ep_inc_enq(dep);
trb->size = DWC3_TRB_SIZE_LENGTH(length);
trb->bpl = lower_32_bits(dma);
@@ -868,7 +830,7 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
if (!req->request.no_interrupt && !chain)
trb->ctrl |= DWC3_TRB_CTRL_IOC | DWC3_TRB_CTRL_ISP_IMI;
- if (last)
+ if (last && !usb_endpoint_xfer_isoc(dep->endpoint.desc))
trb->ctrl |= DWC3_TRB_CTRL_LST;
if (chain)
@@ -879,137 +841,169 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
trb->ctrl |= DWC3_TRB_CTRL_HWO;
+ dep->queued_requests++;
+
trace_dwc3_prepare_trb(dep, trb);
}
-/*
- * dwc3_prepare_trbs - setup TRBs from requests
- * @dep: endpoint for which requests are being prepared
- * @starting: true if the endpoint is idle and no requests are queued.
+/**
+ * dwc3_ep_prev_trb() - Returns the previous TRB in the ring
+ * @dep: The endpoint with the TRB ring
+ * @index: The index of the current TRB in the ring
*
- * The function goes through the requests list and sets up TRBs for the
- * transfers. The function returns once there are no more TRBs available or
- * it runs out of requests.
+ * Returns the TRB prior to the one pointed to by the index. If the
+ * index is 0, we will wrap backwards, skip the link TRB, and return
+ * the one just before that.
*/
-static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
+static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index)
{
- struct dwc3_request *req, *n;
- u32 trbs_left;
- unsigned int last_one = 0;
+ if (!index)
+ index = DWC3_TRB_NUM - 2;
+ else
+ index = dep->trb_enqueue - 1;
- BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
+ return &dep->trb_pool[index];
+}
- trbs_left = dep->trb_dequeue - dep->trb_enqueue;
+static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
+{
+ struct dwc3_trb *tmp;
+ u8 trbs_left;
/*
- * If enqueue & dequeue are equal than it is either full or empty. If we
- * are starting to process requests then we are empty. Otherwise we are
- * full and don't do anything
+ * If enqueue & dequeue are equal than it is either full or empty.
+ *
+ * One way to know for sure is if the TRB right before us has HWO bit
+ * set or not. If it has, then we're definitely full and can't fit any
+ * more transfers in our ring.
*/
- if (!trbs_left) {
- if (!starting)
- return;
+ if (dep->trb_enqueue == dep->trb_dequeue) {
+ tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
+ if (tmp->ctrl & DWC3_TRB_CTRL_HWO)
+ return 0;
- trbs_left = DWC3_TRB_NUM;
+ return DWC3_TRB_NUM - 1;
}
- /* The last TRB is a link TRB, not used for xfer */
- if (trbs_left <= 1)
- return;
+ trbs_left = dep->trb_dequeue - dep->trb_enqueue;
+ trbs_left &= (DWC3_TRB_NUM - 1);
- list_for_each_entry_safe(req, n, &dep->pending_list, list) {
- unsigned length;
- dma_addr_t dma;
- last_one = false;
-
- if (req->request.num_mapped_sgs > 0) {
- struct usb_request *request = &req->request;
- struct scatterlist *sg = request->sg;
- struct scatterlist *s;
- int i;
-
- for_each_sg(sg, s, request->num_mapped_sgs, i) {
- unsigned chain = true;
-
- length = sg_dma_len(s);
- dma = sg_dma_address(s);
-
- if (i == (request->num_mapped_sgs - 1) ||
- sg_is_last(s)) {
- if (list_empty(&dep->pending_list))
- last_one = true;
- chain = false;
- }
+ if (dep->trb_dequeue < dep->trb_enqueue)
+ trbs_left--;
- trbs_left--;
- if (!trbs_left)
- last_one = true;
+ return trbs_left;
+}
- if (last_one)
- chain = false;
+static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
+ struct dwc3_request *req, unsigned int trbs_left,
+ unsigned int more_coming)
+{
+ struct usb_request *request = &req->request;
+ struct scatterlist *sg = request->sg;
+ struct scatterlist *s;
+ unsigned int last = false;
+ unsigned int length;
+ dma_addr_t dma;
+ int i;
- dwc3_prepare_one_trb(dep, req, dma, length,
- last_one, chain, i);
+ for_each_sg(sg, s, request->num_mapped_sgs, i) {
+ unsigned chain = true;
- if (last_one)
- break;
- }
+ length = sg_dma_len(s);
+ dma = sg_dma_address(s);
- if (last_one)
- break;
- } else {
- dma = req->request.dma;
- length = req->request.length;
- trbs_left--;
+ if (sg_is_last(s)) {
+ if (usb_endpoint_xfer_int(dep->endpoint.desc) ||
+ !more_coming)
+ last = true;
- if (!trbs_left)
- last_one = 1;
+ chain = false;
+ }
- /* Is this the last request? */
- if (list_is_last(&req->list, &dep->pending_list))
- last_one = 1;
+ if (!trbs_left--)
+ last = true;
- dwc3_prepare_one_trb(dep, req, dma, length,
- last_one, false, 0);
+ if (last)
+ chain = false;
- if (last_one)
- break;
- }
+ dwc3_prepare_one_trb(dep, req, dma, length,
+ last, chain, i);
+
+ if (last)
+ break;
}
}
-static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
- int start_new)
+static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
+ struct dwc3_request *req, unsigned int trbs_left,
+ unsigned int more_coming)
+{
+ unsigned int last = false;
+ unsigned int length;
+ dma_addr_t dma;
+
+ dma = req->request.dma;
+ length = req->request.length;
+
+ if (!trbs_left)
+ last = true;
+
+ /* Is this the last request? */
+ if (usb_endpoint_xfer_int(dep->endpoint.desc) || !more_coming)
+ last = true;
+
+ dwc3_prepare_one_trb(dep, req, dma, length,
+ last, false, 0);
+}
+
+/*
+ * dwc3_prepare_trbs - setup TRBs from requests
+ * @dep: endpoint for which requests are being prepared
+ *
+ * The function goes through the requests list and sets up TRBs for the
+ * transfers. The function returns once there are no more TRBs available or
+ * it runs out of requests.
+ */
+static void dwc3_prepare_trbs(struct dwc3_ep *dep)
+{
+ struct dwc3_request *req, *n;
+ unsigned int more_coming;
+ u32 trbs_left;
+
+ BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
+
+ trbs_left = dwc3_calc_trbs_left(dep);
+ if (!trbs_left)
+ return;
+
+ more_coming = dep->allocated_requests - dep->queued_requests;
+
+ list_for_each_entry_safe(req, n, &dep->pending_list, list) {
+ if (req->request.num_mapped_sgs > 0)
+ dwc3_prepare_one_trb_sg(dep, req, trbs_left--,
+ more_coming);
+ else
+ dwc3_prepare_one_trb_linear(dep, req, trbs_left--,
+ more_coming);
+
+ if (!trbs_left)
+ return;
+ }
+}
+
+static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param)
{
struct dwc3_gadget_ep_cmd_params params;
struct dwc3_request *req;
struct dwc3 *dwc = dep->dwc;
+ int starting;
int ret;
u32 cmd;
- if (start_new && (dep->flags & DWC3_EP_BUSY)) {
- dwc3_trace(trace_dwc3_gadget, "%s: endpoint busy", dep->name);
- return -EBUSY;
- }
-
- /*
- * If we are getting here after a short-out-packet we don't enqueue any
- * new requests as we try to set the IOC bit only on the last request.
- */
- if (start_new) {
- if (list_empty(&dep->started_list))
- dwc3_prepare_trbs(dep, start_new);
-
- /* req points to the first request which will be sent */
- req = next_request(&dep->started_list);
- } else {
- dwc3_prepare_trbs(dep, start_new);
+ starting = !(dep->flags & DWC3_EP_BUSY);
- /*
- * req points to the first request where HWO changed from 0 to 1
- */
- req = next_request(&dep->started_list);
- }
+ dwc3_prepare_trbs(dep);
+ req = next_request(&dep->started_list);
if (!req) {
dep->flags |= DWC3_EP_PENDING_REQUEST;
return 0;
@@ -1017,16 +1011,17 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
memset(&params, 0, sizeof(params));
- if (start_new) {
+ if (starting) {
params.param0 = upper_32_bits(req->trb_dma);
params.param1 = lower_32_bits(req->trb_dma);
- cmd = DWC3_DEPCMD_STARTTRANSFER;
+ cmd = DWC3_DEPCMD_STARTTRANSFER |
+ DWC3_DEPCMD_PARAM(cmd_param);
} else {
- cmd = DWC3_DEPCMD_UPDATETRANSFER;
+ cmd = DWC3_DEPCMD_UPDATETRANSFER |
+ DWC3_DEPCMD_PARAM(dep->resource_index);
}
- cmd |= DWC3_DEPCMD_PARAM(cmd_param);
- ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
+ ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
if (ret < 0) {
/*
* FIXME we need to iterate over the list of requests
@@ -1041,9 +1036,8 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
dep->flags |= DWC3_EP_BUSY;
- if (start_new) {
- dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc,
- dep->number);
+ if (starting) {
+ dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
WARN_ON_ONCE(!dep->resource_index);
}
@@ -1066,7 +1060,7 @@ static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
/* 4 micro frames in the future */
uf = cur_uf + dep->interval * 4;
- __dwc3_gadget_kick_transfer(dep, uf, 1);
+ __dwc3_gadget_kick_transfer(dep, uf);
}
static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
@@ -1087,18 +1081,20 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
if (!dep->endpoint.desc) {
dwc3_trace(trace_dwc3_gadget,
- "trying to queue request %p to disabled %s\n",
+ "trying to queue request %p to disabled %s",
&req->request, dep->endpoint.name);
return -ESHUTDOWN;
}
if (WARN(req->dep != dep, "request %p belongs to '%s'\n",
&req->request, req->dep->name)) {
- dwc3_trace(trace_dwc3_gadget, "request %p belongs to '%s'\n",
+ dwc3_trace(trace_dwc3_gadget, "request %p belongs to '%s'",
&req->request, req->dep->name);
return -EINVAL;
}
+ pm_runtime_get(dwc->dev);
+
req->request.actual = 0;
req->request.status = -EINPROGRESS;
req->direction = dep->direction;
@@ -1133,9 +1129,8 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
* little bit faster.
*/
if (!usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
- !usb_endpoint_xfer_int(dep->endpoint.desc) &&
- !(dep->flags & DWC3_EP_BUSY)) {
- ret = __dwc3_gadget_kick_transfer(dep, 0, true);
+ !usb_endpoint_xfer_int(dep->endpoint.desc)) {
+ ret = __dwc3_gadget_kick_transfer(dep, 0);
goto out;
}
@@ -1165,7 +1160,7 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
return 0;
}
- ret = __dwc3_gadget_kick_transfer(dep, 0, true);
+ ret = __dwc3_gadget_kick_transfer(dep, 0);
if (!ret)
dep->flags &= ~DWC3_EP_PENDING_REQUEST;
@@ -1181,8 +1176,7 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
(dep->flags & DWC3_EP_BUSY) &&
!(dep->flags & DWC3_EP_MISSED_ISOC)) {
WARN_ON_ONCE(!dep->resource_index);
- ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index,
- false);
+ ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index);
goto out;
}
@@ -1192,12 +1186,12 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
* handled.
*/
if (dep->stream_capable)
- ret = __dwc3_gadget_kick_transfer(dep, 0, true);
+ ret = __dwc3_gadget_kick_transfer(dep, 0);
out:
if (ret && ret != -EBUSY)
dwc3_trace(trace_dwc3_gadget,
- "%s: failed to kick transfers\n",
+ "%s: failed to kick transfers",
dep->name);
if (ret == -EBUSY)
ret = 0;
@@ -1217,7 +1211,7 @@ static int __dwc3_gadget_ep_queue_zlp(struct dwc3 *dwc, struct dwc3_ep *dep)
struct usb_request *request;
struct usb_ep *ep = &dep->endpoint;
- dwc3_trace(trace_dwc3_gadget, "queueing ZLP\n");
+ dwc3_trace(trace_dwc3_gadget, "queueing ZLP");
request = dwc3_gadget_ep_alloc_request(ep, GFP_ATOMIC);
if (!request)
return -ENOMEM;
@@ -1321,23 +1315,36 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
memset(&params, 0x00, sizeof(params));
if (value) {
- if (!protocol && ((dep->direction && dep->flags & DWC3_EP_BUSY) ||
- (!list_empty(&dep->started_list) ||
- !list_empty(&dep->pending_list)))) {
+ struct dwc3_trb *trb;
+
+ unsigned transfer_in_flight;
+ unsigned started;
+
+ if (dep->number > 1)
+ trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
+ else
+ trb = &dwc->ep0_trb[dep->trb_enqueue];
+
+ transfer_in_flight = trb->ctrl & DWC3_TRB_CTRL_HWO;
+ started = !list_empty(&dep->started_list);
+
+ if (!protocol && ((dep->direction && transfer_in_flight) ||
+ (!dep->direction && started))) {
dwc3_trace(trace_dwc3_gadget,
"%s: pending request, cannot halt",
dep->name);
return -EAGAIN;
}
- ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
- DWC3_DEPCMD_SETSTALL, &params);
+ ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETSTALL,
+ &params);
if (ret)
dev_err(dwc->dev, "failed to set STALL on %s\n",
dep->name);
else
dep->flags |= DWC3_EP_STALL;
} else {
+
ret = dwc3_send_clear_stall_ep_cmd(dep);
if (ret)
dev_err(dwc->dev, "failed to clear STALL on %s\n",
@@ -1427,7 +1434,7 @@ static int dwc3_gadget_get_frame(struct usb_gadget *g)
static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
{
- unsigned long timeout;
+ int retries;
int ret;
u32 reg;
@@ -1446,8 +1453,8 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
speed = reg & DWC3_DSTS_CONNECTSPD;
if ((speed == DWC3_DSTS_SUPERSPEED) ||
(speed == DWC3_DSTS_SUPERSPEED_PLUS)) {
- dwc3_trace(trace_dwc3_gadget, "no wakeup on SuperSpeed\n");
- return -EINVAL;
+ dwc3_trace(trace_dwc3_gadget, "no wakeup on SuperSpeed");
+ return 0;
}
link_state = DWC3_DSTS_USBLNKST(reg);
@@ -1458,7 +1465,7 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
break;
default:
dwc3_trace(trace_dwc3_gadget,
- "can't wakeup from '%s'\n",
+ "can't wakeup from '%s'",
dwc3_gadget_link_string(link_state));
return -EINVAL;
}
@@ -1478,9 +1485,9 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
}
/* poll until Link State changes to ON */
- timeout = jiffies + msecs_to_jiffies(100);
+ retries = 20000;
- while (!time_after(jiffies, timeout)) {
+ while (retries--) {
reg = dwc3_readl(dwc->regs, DWC3_DSTS);
/* in HS, means ON */
@@ -1527,6 +1534,9 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
u32 reg;
u32 timeout = 500;
+ if (pm_runtime_suspended(dwc->dev))
+ return 0;
+
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
if (is_on) {
if (dwc->revision <= DWC3_REVISION_187A) {
@@ -1555,18 +1565,11 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
do {
reg = dwc3_readl(dwc->regs, DWC3_DSTS);
- if (is_on) {
- if (!(reg & DWC3_DSTS_DEVCTRLHLT))
- break;
- } else {
- if (reg & DWC3_DSTS_DEVCTRLHLT)
- break;
- }
- timeout--;
- if (!timeout)
- return -ETIMEDOUT;
- udelay(1);
- } while (1);
+ reg &= DWC3_DSTS_DEVCTRLHLT;
+ } while (--timeout && !(!is_on ^ !reg));
+
+ if (!timeout)
+ return -ETIMEDOUT;
dwc3_trace(trace_dwc3_gadget, "gadget %s data soft-%s",
dwc->gadget_driver
@@ -1618,36 +1621,52 @@ static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
-static int dwc3_gadget_start(struct usb_gadget *g,
- struct usb_gadget_driver *driver)
+/**
+ * dwc3_gadget_setup_nump - Calculate and initialize NUMP field of DCFG
+ * dwc: pointer to our context structure
+ *
+ * The following looks like complex but it's actually very simple. In order to
+ * calculate the number of packets we can burst at once on OUT transfers, we're
+ * gonna use RxFIFO size.
+ *
+ * To calculate RxFIFO size we need two numbers:
+ * MDWIDTH = size, in bits, of the internal memory bus
+ * RAM2_DEPTH = depth, in MDWIDTH, of internal RAM2 (where RxFIFO sits)
+ *
+ * Given these two numbers, the formula is simple:
+ *
+ * RxFIFO Size = (RAM2_DEPTH * MDWIDTH / 8) - 24 - 16;
+ *
+ * 24 bytes is for 3x SETUP packets
+ * 16 bytes is a clock domain crossing tolerance
+ *
+ * Given RxFIFO Size, NUMP = RxFIFOSize / 1024;
+ */
+static void dwc3_gadget_setup_nump(struct dwc3 *dwc)
{
- struct dwc3 *dwc = gadget_to_dwc(g);
- struct dwc3_ep *dep;
- unsigned long flags;
- int ret = 0;
- int irq;
- u32 reg;
+ u32 ram2_depth;
+ u32 mdwidth;
+ u32 nump;
+ u32 reg;
- irq = platform_get_irq(to_platform_device(dwc->dev), 0);
- ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
- IRQF_SHARED, "dwc3", dwc->ev_buf);
- if (ret) {
- dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
- irq, ret);
- goto err0;
- }
+ ram2_depth = DWC3_GHWPARAMS7_RAM2_DEPTH(dwc->hwparams.hwparams7);
+ mdwidth = DWC3_GHWPARAMS0_MDWIDTH(dwc->hwparams.hwparams0);
- spin_lock_irqsave(&dwc->lock, flags);
+ nump = ((ram2_depth * mdwidth / 8) - 24 - 16) / 1024;
+ nump = min_t(u32, nump, 16);
- if (dwc->gadget_driver) {
- dev_err(dwc->dev, "%s is already bound to %s\n",
- dwc->gadget.name,
- dwc->gadget_driver->driver.name);
- ret = -EBUSY;
- goto err1;
- }
+ /* update NumP */
+ reg = dwc3_readl(dwc->regs, DWC3_DCFG);
+ reg &= ~DWC3_DCFG_NUMP_MASK;
+ reg |= nump << DWC3_DCFG_NUMP_SHIFT;
+ dwc3_writel(dwc->regs, DWC3_DCFG, reg);
+}
- dwc->gadget_driver = driver;
+static int __dwc3_gadget_start(struct dwc3 *dwc)
+{
+ struct dwc3_ep *dep;
+ int ret = 0;
+ u32 reg;
reg = dwc3_readl(dwc->regs, DWC3_DCFG);
reg &= ~(DWC3_DCFG_SPEED_MASK);
@@ -1670,16 +1689,16 @@ static int dwc3_gadget_start(struct usb_gadget *g,
} else {
switch (dwc->maximum_speed) {
case USB_SPEED_LOW:
- reg |= DWC3_DSTS_LOWSPEED;
+ reg |= DWC3_DCFG_LOWSPEED;
break;
case USB_SPEED_FULL:
- reg |= DWC3_DSTS_FULLSPEED1;
+ reg |= DWC3_DCFG_FULLSPEED1;
break;
case USB_SPEED_HIGH:
- reg |= DWC3_DSTS_HIGHSPEED;
+ reg |= DWC3_DCFG_HIGHSPEED;
break;
case USB_SPEED_SUPER_PLUS:
- reg |= DWC3_DSTS_SUPERSPEED_PLUS;
+ reg |= DWC3_DCFG_SUPERSPEED_PLUS;
break;
default:
dev_err(dwc->dev, "invalid dwc->maximum_speed (%d)\n",
@@ -1703,6 +1722,8 @@ static int dwc3_gadget_start(struct usb_gadget *g,
reg &= ~DWC3_GRXTHRCFG_PKTCNTSEL;
dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
+ dwc3_gadget_setup_nump(dwc);
+
/* Start with SuperSpeed Default */
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
@@ -1711,7 +1732,7 @@ static int dwc3_gadget_start(struct usb_gadget *g,
false);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
- goto err2;
+ goto err0;
}
dep = dwc->eps[1];
@@ -1719,7 +1740,7 @@ static int dwc3_gadget_start(struct usb_gadget *g,
false);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
- goto err3;
+ goto err1;
}
/* begin to receive SETUP packets */
@@ -1728,43 +1749,79 @@ static int dwc3_gadget_start(struct usb_gadget *g,
dwc3_gadget_enable_irq(dwc);
- spin_unlock_irqrestore(&dwc->lock, flags);
-
return 0;
-err3:
- __dwc3_gadget_ep_disable(dwc->eps[0]);
-
-err2:
- dwc->gadget_driver = NULL;
-
err1:
- spin_unlock_irqrestore(&dwc->lock, flags);
-
- free_irq(irq, dwc->ev_buf);
+ __dwc3_gadget_ep_disable(dwc->eps[0]);
err0:
return ret;
}
-static int dwc3_gadget_stop(struct usb_gadget *g)
+static int dwc3_gadget_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
{
struct dwc3 *dwc = gadget_to_dwc(g);
unsigned long flags;
+ int ret = 0;
int irq;
+ irq = dwc->irq_gadget;
+ ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
+ IRQF_SHARED, "dwc3", dwc->ev_buf);
+ if (ret) {
+ dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
+ irq, ret);
+ goto err0;
+ }
+
spin_lock_irqsave(&dwc->lock, flags);
+ if (dwc->gadget_driver) {
+ dev_err(dwc->dev, "%s is already bound to %s\n",
+ dwc->gadget.name,
+ dwc->gadget_driver->driver.name);
+ ret = -EBUSY;
+ goto err1;
+ }
+
+ dwc->gadget_driver = driver;
+
+ if (pm_runtime_active(dwc->dev))
+ __dwc3_gadget_start(dwc);
+
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return 0;
+
+err1:
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ free_irq(irq, dwc);
+
+err0:
+ return ret;
+}
+
+static void __dwc3_gadget_stop(struct dwc3 *dwc)
+{
+ if (pm_runtime_suspended(dwc->dev))
+ return;
dwc3_gadget_disable_irq(dwc);
__dwc3_gadget_ep_disable(dwc->eps[0]);
__dwc3_gadget_ep_disable(dwc->eps[1]);
+}
- dwc->gadget_driver = NULL;
+static int dwc3_gadget_stop(struct usb_gadget *g)
+{
+ struct dwc3 *dwc = gadget_to_dwc(g);
+ unsigned long flags;
+ spin_lock_irqsave(&dwc->lock, flags);
+ __dwc3_gadget_stop(dwc);
+ dwc->gadget_driver = NULL;
spin_unlock_irqrestore(&dwc->lock, flags);
- irq = platform_get_irq(to_platform_device(dwc->dev), 0);
- free_irq(irq, dwc->ev_buf);
+ free_irq(dwc->irq_gadget, dwc->ev_buf);
return 0;
}
@@ -1787,7 +1844,7 @@ static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
u8 i;
for (i = 0; i < num; i++) {
- u8 epnum = (i << 1) | (!!direction);
+ u8 epnum = (i << 1) | (direction ? 1 : 0);
dep = kzalloc(sizeof(*dep), GFP_KERNEL);
if (!dep)
@@ -1796,12 +1853,14 @@ static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
dep->dwc = dwc;
dep->number = epnum;
dep->direction = !!direction;
+ dep->regs = dwc->regs + DWC3_DEP_BASE(epnum);
dwc->eps[epnum] = dep;
snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
(epnum & 1) ? "in" : "out");
dep->endpoint.name = dep->name;
+ spin_lock_init(&dep->lock);
dwc3_trace(trace_dwc3_gadget, "initializing %s", dep->name);
@@ -1904,6 +1963,7 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
unsigned int s_pkt = 0;
unsigned int trb_status;
+ dep->queued_requests--;
trace_dwc3_complete_trb(dep, trb);
/*
@@ -1920,16 +1980,7 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
- /*
- * We continue despite the error. There is not much we
- * can do. If we don't clean it up we loop forever. If
- * we skip the TRB then it gets overwritten after a
- * while since we use them in a ring buffer. A BUG()
- * would help. Lets hope that if this occurs, someone
- * fixes the root cause instead of looking away :)
- */
- dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
- dep->name, trb);
+ return 1;
count = trb->size & DWC3_TRB_SIZE_MASK;
@@ -1938,7 +1989,7 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
dwc3_trace(trace_dwc3_gadget,
- "%s: incomplete IN transfer\n",
+ "%s: incomplete IN transfer",
dep->name);
/*
* If missed isoc occurred and there is
@@ -2028,6 +2079,14 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
break;
} while (1);
+ /*
+ * Our endpoint might get disabled by another thread during
+ * dwc3_gadget_giveback(). If that happens, we're just gonna return 1
+ * early on so DWC3_EP_BUSY flag gets cleared
+ */
+ if (!dep->endpoint.desc)
+ return 1;
+
if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
list_empty(&dep->started_list)) {
if (list_empty(&dep->pending_list)) {
@@ -2065,7 +2124,7 @@ static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
status = -ECONNRESET;
clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
- if (clean_busy && (is_xfer_complete ||
+ if (clean_busy && (!dep->endpoint.desc || is_xfer_complete ||
usb_endpoint_xfer_isoc(dep->endpoint.desc)))
dep->flags &= ~DWC3_EP_BUSY;
@@ -2094,10 +2153,18 @@ static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
dwc->u1u2 = 0;
}
+ /*
+ * Our endpoint might get disabled by another thread during
+ * dwc3_gadget_giveback(). If that happens, we're just gonna return 1
+ * early on so DWC3_EP_BUSY flag gets cleared
+ */
+ if (!dep->endpoint.desc)
+ return;
+
if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
int ret;
- ret = __dwc3_gadget_kick_transfer(dep, 0, is_xfer_complete);
+ ret = __dwc3_gadget_kick_transfer(dep, 0);
if (!ret || ret == -EBUSY)
return;
}
@@ -2125,7 +2192,7 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
dwc3_trace(trace_dwc3_gadget,
- "%s is an Isochronous endpoint\n",
+ "%s is an Isochronous endpoint",
dep->name);
return;
}
@@ -2148,12 +2215,12 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
dep->name, active ? "Transfer Active"
: "Transfer Not Active");
- ret = __dwc3_gadget_kick_transfer(dep, 0, !active);
+ ret = __dwc3_gadget_kick_transfer(dep, 0);
if (!ret || ret == -EBUSY)
return;
dwc3_trace(trace_dwc3_gadget,
- "%s: failed to kick transfers\n",
+ "%s: failed to kick transfers",
dep->name);
}
@@ -2176,11 +2243,11 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
/* FALLTHROUGH */
default:
dwc3_trace(trace_dwc3_gadget,
- "unable to find suitable stream\n");
+ "unable to find suitable stream");
}
break;
case DWC3_DEPEVT_RXTXFIFOEVT:
- dwc3_trace(trace_dwc3_gadget, "%s FIFO Overrun\n", dep->name);
+ dwc3_trace(trace_dwc3_gadget, "%s FIFO Overrun", dep->name);
break;
case DWC3_DEPEVT_EPCMDCMPLT:
dwc3_trace(trace_dwc3_gadget, "Endpoint Command Complete");
@@ -2263,7 +2330,7 @@ static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
cmd |= DWC3_DEPCMD_CMDIOC;
cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
memset(&params, 0, sizeof(params));
- ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
+ ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
WARN_ON_ONCE(ret);
dep->resource_index = 0;
dep->flags &= ~DWC3_EP_BUSY;
@@ -2326,12 +2393,16 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
dwc->gadget.speed = USB_SPEED_UNKNOWN;
dwc->setup_packet_pending = false;
usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED);
+
+ dwc->connected = false;
}
static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
{
u32 reg;
+ dwc->connected = true;
+
/*
* WORKAROUND: DWC3 revisions <1.88a have an issue which
* would cause a missing Disconnect Event if there's a
@@ -2419,12 +2490,12 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
dwc3_update_ram_clk_sel(dwc, speed);
switch (speed) {
- case DWC3_DCFG_SUPERSPEED_PLUS:
+ case DWC3_DSTS_SUPERSPEED_PLUS:
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
dwc->gadget.ep0->maxpacket = 512;
dwc->gadget.speed = USB_SPEED_SUPER_PLUS;
break;
- case DWC3_DCFG_SUPERSPEED:
+ case DWC3_DSTS_SUPERSPEED:
/*
* WORKAROUND: DWC3 revisions <1.90a have an issue which
* would cause a missing USB3 Reset event.
@@ -2445,18 +2516,18 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
dwc->gadget.ep0->maxpacket = 512;
dwc->gadget.speed = USB_SPEED_SUPER;
break;
- case DWC3_DCFG_HIGHSPEED:
+ case DWC3_DSTS_HIGHSPEED:
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
dwc->gadget.ep0->maxpacket = 64;
dwc->gadget.speed = USB_SPEED_HIGH;
break;
- case DWC3_DCFG_FULLSPEED2:
- case DWC3_DCFG_FULLSPEED1:
+ case DWC3_DSTS_FULLSPEED2:
+ case DWC3_DSTS_FULLSPEED1:
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
dwc->gadget.ep0->maxpacket = 64;
dwc->gadget.speed = USB_SPEED_FULL;
break;
- case DWC3_DCFG_LOWSPEED:
+ case DWC3_DSTS_LOWSPEED:
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
dwc->gadget.ep0->maxpacket = 8;
dwc->gadget.speed = USB_SPEED_LOW;
@@ -2466,8 +2537,8 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
/* Enable USB2 LPM Capability */
if ((dwc->revision > DWC3_REVISION_194A) &&
- (speed != DWC3_DCFG_SUPERSPEED) &&
- (speed != DWC3_DCFG_SUPERSPEED_PLUS)) {
+ (speed != DWC3_DSTS_SUPERSPEED) &&
+ (speed != DWC3_DSTS_SUPERSPEED_PLUS)) {
reg = dwc3_readl(dwc->regs, DWC3_DCFG);
reg |= DWC3_DCFG_LPM_CAP;
dwc3_writel(dwc->regs, DWC3_DCFG, reg);
@@ -2636,6 +2707,17 @@ static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
dwc->link_state = next;
}
+static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc,
+ unsigned int evtinfo)
+{
+ enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
+
+ if (dwc->link_state != next && next == DWC3_LINK_STATE_U3)
+ dwc3_suspend_gadget(dwc);
+
+ dwc->link_state = next;
+}
+
static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
unsigned int evtinfo)
{
@@ -2687,7 +2769,20 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc,
dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
break;
case DWC3_DEVICE_EVENT_EOPF:
- dwc3_trace(trace_dwc3_gadget, "End of Periodic Frame");
+ /* It changed to be suspend event for version 2.30a and above */
+ if (dwc->revision < DWC3_REVISION_230A) {
+ dwc3_trace(trace_dwc3_gadget, "End of Periodic Frame");
+ } else {
+ dwc3_trace(trace_dwc3_gadget, "U3/L1-L2 Suspend Event");
+
+ /*
+ * Ignore suspend event until the gadget enters into
+ * USB_STATE_CONFIGURED state.
+ */
+ if (dwc->gadget.state >= USB_STATE_CONFIGURED)
+ dwc3_gadget_suspend_interrupt(dwc,
+ event->event_info);
+ }
break;
case DWC3_DEVICE_EVENT_SOF:
dwc3_trace(trace_dwc3_gadget, "Start of Periodic Frame");
@@ -2793,6 +2888,13 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
u32 count;
u32 reg;
+ if (pm_runtime_suspended(dwc->dev)) {
+ pm_runtime_get(dwc->dev);
+ disable_irq_nosync(dwc->irq_gadget);
+ dwc->pending_events = true;
+ return IRQ_HANDLED;
+ }
+
count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
count &= DWC3_GEVNTCOUNT_MASK;
if (!count)
@@ -2824,7 +2926,33 @@ static irqreturn_t dwc3_interrupt(int irq, void *_evt)
*/
int dwc3_gadget_init(struct dwc3 *dwc)
{
- int ret;
+ int ret, irq;
+ struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
+
+ irq = platform_get_irq_byname(dwc3_pdev, "peripheral");
+ if (irq == -EPROBE_DEFER)
+ return irq;
+
+ if (irq <= 0) {
+ irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
+ if (irq == -EPROBE_DEFER)
+ return irq;
+
+ if (irq <= 0) {
+ irq = platform_get_irq(dwc3_pdev, 0);
+ if (irq <= 0) {
+ if (irq != -EPROBE_DEFER) {
+ dev_err(dwc->dev,
+ "missing peripheral IRQ\n");
+ }
+ if (!irq)
+ irq = -EINVAL;
+ return irq;
+ }
+ }
+ }
+
+ dwc->irq_gadget = irq;
dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
&dwc->ctrl_req_addr, GFP_KERNEL);
@@ -2887,7 +3015,7 @@ int dwc3_gadget_init(struct dwc3 *dwc)
*/
if (dwc->revision < DWC3_REVISION_220A)
dwc3_trace(trace_dwc3_gadget,
- "Changing max_speed on rev %08x\n",
+ "Changing max_speed on rev %08x",
dwc->revision);
dwc->gadget.max_speed = dwc->maximum_speed;
@@ -2961,61 +3089,50 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
int dwc3_gadget_suspend(struct dwc3 *dwc)
{
+ int ret;
+
if (!dwc->gadget_driver)
return 0;
- if (dwc->pullups_connected) {
- dwc3_gadget_disable_irq(dwc);
- dwc3_gadget_run_stop(dwc, true, true);
- }
-
- __dwc3_gadget_ep_disable(dwc->eps[0]);
- __dwc3_gadget_ep_disable(dwc->eps[1]);
+ ret = dwc3_gadget_run_stop(dwc, false, false);
+ if (ret < 0)
+ return ret;
- dwc->dcfg = dwc3_readl(dwc->regs, DWC3_DCFG);
+ dwc3_disconnect_gadget(dwc);
+ __dwc3_gadget_stop(dwc);
return 0;
}
int dwc3_gadget_resume(struct dwc3 *dwc)
{
- struct dwc3_ep *dep;
int ret;
if (!dwc->gadget_driver)
return 0;
- /* Start with SuperSpeed Default */
- dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
-
- dep = dwc->eps[0];
- ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
- false);
- if (ret)
+ ret = __dwc3_gadget_start(dwc);
+ if (ret < 0)
goto err0;
- dep = dwc->eps[1];
- ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
- false);
- if (ret)
+ ret = dwc3_gadget_run_stop(dwc, true, false);
+ if (ret < 0)
goto err1;
- /* begin to receive SETUP packets */
- dwc->ep0state = EP0_SETUP_PHASE;
- dwc3_ep0_out_start(dwc);
-
- dwc3_writel(dwc->regs, DWC3_DCFG, dwc->dcfg);
-
- if (dwc->pullups_connected) {
- dwc3_gadget_enable_irq(dwc);
- dwc3_gadget_run_stop(dwc, true, false);
- }
-
return 0;
err1:
- __dwc3_gadget_ep_disable(dwc->eps[0]);
+ __dwc3_gadget_stop(dwc);
err0:
return ret;
}
+
+void dwc3_gadget_process_pending_events(struct dwc3 *dwc)
+{
+ if (dwc->pending_events) {
+ dwc3_interrupt(dwc->irq_gadget, dwc->ev_buf);
+ dwc->pending_events = false;
+ enable_irq(dwc->irq_gadget);
+ }
+}
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index f21c0fccb..e4a1d974a 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -95,11 +95,11 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol);
*
* Caller should take care of locking
*/
-static inline u32 dwc3_gadget_ep_get_transfer_index(struct dwc3 *dwc, u8 number)
+static inline u32 dwc3_gadget_ep_get_transfer_index(struct dwc3_ep *dep)
{
u32 res_id;
- res_id = dwc3_readl(dwc->regs, DWC3_DEPCMD(number));
+ res_id = dwc3_readl(dep->regs, DWC3_DEPCMD);
return DWC3_DEPCMD_GET_RSC_IDX(res_id);
}
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index c679f6378..f6533c68f 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -16,15 +16,55 @@
*/
#include <linux/platform_device.h>
-#include <linux/usb/xhci_pdriver.h>
#include "core.h"
int dwc3_host_init(struct dwc3 *dwc)
{
+ struct property_entry props[2];
struct platform_device *xhci;
- struct usb_xhci_pdata pdata;
- int ret;
+ int ret, irq;
+ struct resource *res;
+ struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
+
+ irq = platform_get_irq_byname(dwc3_pdev, "host");
+ if (irq == -EPROBE_DEFER)
+ return irq;
+
+ if (irq <= 0) {
+ irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
+ if (irq == -EPROBE_DEFER)
+ return irq;
+
+ if (irq <= 0) {
+ irq = platform_get_irq(dwc3_pdev, 0);
+ if (irq <= 0) {
+ if (irq != -EPROBE_DEFER) {
+ dev_err(dwc->dev,
+ "missing host IRQ\n");
+ }
+ if (!irq)
+ irq = -EINVAL;
+ return irq;
+ } else {
+ res = platform_get_resource(dwc3_pdev,
+ IORESOURCE_IRQ, 0);
+ }
+ } else {
+ res = platform_get_resource_byname(dwc3_pdev,
+ IORESOURCE_IRQ,
+ "dwc_usb3");
+ }
+
+ } else {
+ res = platform_get_resource_byname(dwc3_pdev, IORESOURCE_IRQ,
+ "host");
+ }
+
+ dwc->xhci_resources[1].start = irq;
+ dwc->xhci_resources[1].end = irq;
+ dwc->xhci_resources[1].flags = res->flags;
+ dwc->xhci_resources[1].name = res->name;
xhci = platform_device_alloc("xhci-hcd", PLATFORM_DEVID_AUTO);
if (!xhci) {
@@ -47,14 +87,15 @@ int dwc3_host_init(struct dwc3 *dwc)
goto err1;
}
- memset(&pdata, 0, sizeof(pdata));
-
- pdata.usb3_lpm_capable = dwc->usb3_lpm_capable;
+ memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props));
- ret = platform_device_add_data(xhci, &pdata, sizeof(pdata));
- if (ret) {
- dev_err(dwc->dev, "couldn't add platform data to xHCI device\n");
- goto err1;
+ if (dwc->usb3_lpm_capable) {
+ props[0].name = "usb3-lpm-capable";
+ ret = platform_device_add_properties(xhci, props);
+ if (ret) {
+ dev_err(dwc->dev, "failed to add properties to xHCI\n");
+ goto err1;
+ }
}
phy_create_lookup(dwc->usb2_generic_phy, "usb2-phy",
diff --git a/drivers/usb/dwc3/io.h b/drivers/usb/dwc3/io.h
index 6a79c8e66..a06f9a8fe 100644
--- a/drivers/usb/dwc3/io.h
+++ b/drivers/usb/dwc3/io.h
@@ -26,7 +26,6 @@
static inline u32 dwc3_readl(void __iomem *base, u32 offset)
{
- u32 offs = offset - DWC3_GLOBALS_REGS_START;
u32 value;
/*
@@ -34,7 +33,7 @@ static inline u32 dwc3_readl(void __iomem *base, u32 offset)
* space, see dwc3_probe in core.c.
* However, the offsets are given starting from xHCI address space.
*/
- value = readl(base + offs);
+ value = readl(base + offset - DWC3_GLOBALS_REGS_START);
/*
* When tracing we want to make it easy to find the correct address on
@@ -49,14 +48,12 @@ static inline u32 dwc3_readl(void __iomem *base, u32 offset)
static inline void dwc3_writel(void __iomem *base, u32 offset, u32 value)
{
- u32 offs = offset - DWC3_GLOBALS_REGS_START;
-
/*
* We requested the mem region starting from the Globals address
* space, see dwc3_probe in core.c.
* However, the offsets are given starting from xHCI address space.
*/
- writel(value, base + offs);
+ writel(value, base + offset - DWC3_GLOBALS_REGS_START);
/*
* When tracing we want to make it easy to find the correct address on
diff --git a/drivers/usb/dwc3/trace.h b/drivers/usb/dwc3/trace.h
index 3ac7252f4..d24cefd19 100644
--- a/drivers/usb/dwc3/trace.h
+++ b/drivers/usb/dwc3/trace.h
@@ -71,7 +71,8 @@ DECLARE_EVENT_CLASS(dwc3_log_event,
TP_fast_assign(
__entry->event = event;
),
- TP_printk("event %08x", __entry->event)
+ TP_printk("event (%08x): %s", __entry->event,
+ dwc3_decode_event(__entry->event))
);
DEFINE_EVENT(dwc3_log_event, dwc3_event,
@@ -85,21 +86,21 @@ DECLARE_EVENT_CLASS(dwc3_log_ctrl,
TP_STRUCT__entry(
__field(__u8, bRequestType)
__field(__u8, bRequest)
- __field(__le16, wValue)
- __field(__le16, wIndex)
- __field(__le16, wLength)
+ __field(__u16, wValue)
+ __field(__u16, wIndex)
+ __field(__u16, wLength)
),
TP_fast_assign(
__entry->bRequestType = ctrl->bRequestType;
__entry->bRequest = ctrl->bRequest;
- __entry->wValue = ctrl->wValue;
- __entry->wIndex = ctrl->wIndex;
- __entry->wLength = ctrl->wLength;
+ __entry->wValue = le16_to_cpu(ctrl->wValue);
+ __entry->wIndex = le16_to_cpu(ctrl->wIndex);
+ __entry->wLength = le16_to_cpu(ctrl->wLength);
),
TP_printk("bRequestType %02x bRequest %02x wValue %04x wIndex %04x wLength %d",
__entry->bRequestType, __entry->bRequest,
- le16_to_cpu(__entry->wValue), le16_to_cpu(__entry->wIndex),
- le16_to_cpu(__entry->wLength)
+ __entry->wValue, __entry->wIndex,
+ __entry->wLength
)
);
@@ -166,37 +167,41 @@ DEFINE_EVENT(dwc3_log_request, dwc3_gadget_giveback,
);
DECLARE_EVENT_CLASS(dwc3_log_generic_cmd,
- TP_PROTO(unsigned int cmd, u32 param),
- TP_ARGS(cmd, param),
+ TP_PROTO(unsigned int cmd, u32 param, int status),
+ TP_ARGS(cmd, param, status),
TP_STRUCT__entry(
__field(unsigned int, cmd)
__field(u32, param)
+ __field(int, status)
),
TP_fast_assign(
__entry->cmd = cmd;
__entry->param = param;
+ __entry->status = status;
),
- TP_printk("cmd '%s' [%d] param %08x",
+ TP_printk("cmd '%s' [%d] param %08x --> status: %s",
dwc3_gadget_generic_cmd_string(__entry->cmd),
- __entry->cmd, __entry->param
+ __entry->cmd, __entry->param,
+ dwc3_gadget_generic_cmd_status_string(__entry->status)
)
);
DEFINE_EVENT(dwc3_log_generic_cmd, dwc3_gadget_generic_cmd,
- TP_PROTO(unsigned int cmd, u32 param),
- TP_ARGS(cmd, param)
+ TP_PROTO(unsigned int cmd, u32 param, int status),
+ TP_ARGS(cmd, param, status)
);
DECLARE_EVENT_CLASS(dwc3_log_gadget_ep_cmd,
TP_PROTO(struct dwc3_ep *dep, unsigned int cmd,
- struct dwc3_gadget_ep_cmd_params *params),
- TP_ARGS(dep, cmd, params),
+ struct dwc3_gadget_ep_cmd_params *params, int cmd_status),
+ TP_ARGS(dep, cmd, params, cmd_status),
TP_STRUCT__entry(
__dynamic_array(char, name, DWC3_MSG_MAX)
__field(unsigned int, cmd)
__field(u32, param0)
__field(u32, param1)
__field(u32, param2)
+ __field(int, cmd_status)
),
TP_fast_assign(
snprintf(__get_str(name), DWC3_MSG_MAX, "%s", dep->name);
@@ -204,18 +209,20 @@ DECLARE_EVENT_CLASS(dwc3_log_gadget_ep_cmd,
__entry->param0 = params->param0;
__entry->param1 = params->param1;
__entry->param2 = params->param2;
+ __entry->cmd_status = cmd_status;
),
- TP_printk("%s: cmd '%s' [%d] params %08x %08x %08x",
+ TP_printk("%s: cmd '%s' [%d] params %08x %08x %08x --> status: %s",
__get_str(name), dwc3_gadget_ep_cmd_string(__entry->cmd),
__entry->cmd, __entry->param0,
- __entry->param1, __entry->param2
+ __entry->param1, __entry->param2,
+ dwc3_ep_cmd_status_string(__entry->cmd_status)
)
);
DEFINE_EVENT(dwc3_log_gadget_ep_cmd, dwc3_gadget_ep_cmd,
TP_PROTO(struct dwc3_ep *dep, unsigned int cmd,
- struct dwc3_gadget_ep_cmd_params *params),
- TP_ARGS(dep, cmd, params)
+ struct dwc3_gadget_ep_cmd_params *params, int cmd_status),
+ TP_ARGS(dep, cmd, params, cmd_status)
);
DECLARE_EVENT_CLASS(dwc3_log_trb,
@@ -224,6 +231,8 @@ DECLARE_EVENT_CLASS(dwc3_log_trb,
TP_STRUCT__entry(
__dynamic_array(char, name, DWC3_MSG_MAX)
__field(struct dwc3_trb *, trb)
+ __field(u32, allocated)
+ __field(u32, queued)
__field(u32, bpl)
__field(u32, bph)
__field(u32, size)
@@ -232,14 +241,53 @@ DECLARE_EVENT_CLASS(dwc3_log_trb,
TP_fast_assign(
snprintf(__get_str(name), DWC3_MSG_MAX, "%s", dep->name);
__entry->trb = trb;
+ __entry->allocated = dep->allocated_requests;
+ __entry->queued = dep->queued_requests;
__entry->bpl = trb->bpl;
__entry->bph = trb->bph;
__entry->size = trb->size;
__entry->ctrl = trb->ctrl;
),
- TP_printk("%s: trb %p bph %08x bpl %08x size %08x ctrl %08x",
- __get_str(name), __entry->trb, __entry->bph, __entry->bpl,
- __entry->size, __entry->ctrl
+ TP_printk("%s: %d/%d trb %p buf %08x%08x size %d ctrl %08x (%c%c%c%c:%c%c:%s)",
+ __get_str(name), __entry->queued, __entry->allocated,
+ __entry->trb, __entry->bph, __entry->bpl,
+ __entry->size, __entry->ctrl,
+ __entry->ctrl & DWC3_TRB_CTRL_HWO ? 'H' : 'h',
+ __entry->ctrl & DWC3_TRB_CTRL_LST ? 'L' : 'l',
+ __entry->ctrl & DWC3_TRB_CTRL_CHN ? 'C' : 'c',
+ __entry->ctrl & DWC3_TRB_CTRL_CSP ? 'S' : 's',
+ __entry->ctrl & DWC3_TRB_CTRL_ISP_IMI ? 'S' : 's',
+ __entry->ctrl & DWC3_TRB_CTRL_IOC ? 'C' : 'c',
+ ({char *s;
+ switch (__entry->ctrl & 0x3f0) {
+ case DWC3_TRBCTL_NORMAL:
+ s = "normal";
+ break;
+ case DWC3_TRBCTL_CONTROL_SETUP:
+ s = "setup";
+ break;
+ case DWC3_TRBCTL_CONTROL_STATUS2:
+ s = "status2";
+ break;
+ case DWC3_TRBCTL_CONTROL_STATUS3:
+ s = "status3";
+ break;
+ case DWC3_TRBCTL_CONTROL_DATA:
+ s = "data";
+ break;
+ case DWC3_TRBCTL_ISOCHRONOUS_FIRST:
+ s = "isoc-first";
+ break;
+ case DWC3_TRBCTL_ISOCHRONOUS:
+ s = "isoc";
+ break;
+ case DWC3_TRBCTL_LINK_TRB:
+ s = "link";
+ break;
+ default:
+ s = "UNKNOWN";
+ break;
+ } s; })
)
);
diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
index 8cfc3191b..12731e67d 100644
--- a/drivers/usb/early/ehci-dbgp.c
+++ b/drivers/usb/early/ehci-dbgp.c
@@ -13,7 +13,7 @@
#include <linux/console.h>
#include <linux/errno.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/pci_regs.h>
#include <linux/pci_ids.h>
#include <linux/usb/ch9.h>
@@ -1093,5 +1093,5 @@ static int __init kgdbdbgp_start_thread(void)
return 0;
}
-module_init(kgdbdbgp_start_thread);
+device_initcall(kgdbdbgp_start_thread);
#endif /* CONFIG_KGDB */
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 2057add43..3c3f31cee 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -114,7 +114,7 @@ config USB_GADGET_VBUS_DRAW
config USB_GADGET_STORAGE_NUM_BUFFERS
int "Number of storage pipeline buffers"
- range 2 32
+ range 2 256
default 2
help
Usually 2 buffers are enough to establish a good buffering
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index eb648485a..5ebe6af79 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1913,6 +1913,8 @@ unknown:
break;
case USB_RECIP_ENDPOINT:
+ if (!cdev->config)
+ break;
endp = ((w_index & 0x80) >> 3) | (w_index & 0x0f);
list_for_each_entry(f, &cdev->config->functions, list) {
if (test_bit(endp, f->endpoints))
@@ -2124,14 +2126,14 @@ int composite_os_desc_req_prepare(struct usb_composite_dev *cdev,
cdev->os_desc_req = usb_ep_alloc_request(ep0, GFP_KERNEL);
if (!cdev->os_desc_req) {
- ret = PTR_ERR(cdev->os_desc_req);
+ ret = -ENOMEM;
goto end;
}
/* OS feature descriptor length <= 4kB */
cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL);
if (!cdev->os_desc_req->buf) {
- ret = PTR_ERR(cdev->os_desc_req->buf);
+ ret = -ENOMEM;
kfree(cdev->os_desc_req);
goto end;
}
diff --git a/drivers/usb/gadget/config.c b/drivers/usb/gadget/config.c
index e6c0542a0..17a6077b8 100644
--- a/drivers/usb/gadget/config.c
+++ b/drivers/usb/gadget/config.c
@@ -93,7 +93,7 @@ int usb_gadget_config_buf(
*cp = *config;
/* then interface/endpoint/class/vendor/... */
- len = usb_descriptor_fillbuf(USB_DT_CONFIG_SIZE + (u8*)buf,
+ len = usb_descriptor_fillbuf(USB_DT_CONFIG_SIZE + (u8 *)buf,
length - USB_DT_CONFIG_SIZE, desc);
if (len < 0)
return len;
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 70cf3477f..f9237fe2b 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -1490,7 +1490,9 @@ void unregister_gadget_item(struct config_item *item)
{
struct gadget_info *gi = to_gadget_info(item);
+ mutex_lock(&gi->lock);
unregister_gadget(gi);
+ mutex_unlock(&gi->lock);
}
EXPORT_SYMBOL_GPL(unregister_gadget_item);
diff --git a/drivers/usb/gadget/function/f_eem.c b/drivers/usb/gadget/function/f_eem.c
index d58bfc32b..007ec6e4a 100644
--- a/drivers/usb/gadget/function/f_eem.c
+++ b/drivers/usb/gadget/function/f_eem.c
@@ -341,11 +341,15 @@ static struct sk_buff *eem_wrap(struct gether *port, struct sk_buff *skb)
{
struct sk_buff *skb2 = NULL;
struct usb_ep *in = port->in_ep;
- int padlen = 0;
- u16 len = skb->len;
+ int headroom, tailroom, padlen = 0;
+ u16 len;
- int headroom = skb_headroom(skb);
- int tailroom = skb_tailroom(skb);
+ if (!skb)
+ return NULL;
+
+ len = skb->len;
+ headroom = skb_headroom(skb);
+ tailroom = skb_tailroom(skb);
/* When (len + EEM_HLEN + ETH_FCS_LEN) % in->maxpacket) is 0,
* stick two bytes of zero-length EEM packet on the end.
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index cc33d2667..5c8429f23 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -130,6 +130,12 @@ struct ffs_epfile {
struct dentry *dentry;
+ /*
+ * Buffer for holding data from partial reads which may happen since
+ * we’re rounding user read requests to a multiple of a max packet size.
+ */
+ struct ffs_buffer *read_buffer; /* P: epfile->mutex */
+
char name[5];
unsigned char in; /* P: ffs->eps_lock */
@@ -138,6 +144,12 @@ struct ffs_epfile {
unsigned char _pad;
};
+struct ffs_buffer {
+ size_t length;
+ char *data;
+ char storage[];
+};
+
/* ffs_io_data structure ***************************************************/
struct ffs_io_data {
@@ -640,6 +652,49 @@ static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req)
}
}
+static ssize_t ffs_copy_to_iter(void *data, int data_len, struct iov_iter *iter)
+{
+ ssize_t ret = copy_to_iter(data, data_len, iter);
+ if (likely(ret == data_len))
+ return ret;
+
+ if (unlikely(iov_iter_count(iter)))
+ return -EFAULT;
+
+ /*
+ * Dear user space developer!
+ *
+ * TL;DR: To stop getting below error message in your kernel log, change
+ * user space code using functionfs to align read buffers to a max
+ * packet size.
+ *
+ * Some UDCs (e.g. dwc3) require request sizes to be a multiple of a max
+ * packet size. When unaligned buffer is passed to functionfs, it
+ * internally uses a larger, aligned buffer so that such UDCs are happy.
+ *
+ * Unfortunately, this means that host may send more data than was
+ * requested in read(2) system call. f_fs doesn’t know what to do with
+ * that excess data so it simply drops it.
+ *
+ * Was the buffer aligned in the first place, no such problem would
+ * happen.
+ *
+ * Data may be dropped only in AIO reads. Synchronous reads are handled
+ * by splitting a request into multiple parts. This splitting may still
+ * be a problem though so it’s likely best to align the buffer
+ * regardless of it being AIO or not..
+ *
+ * This only affects OUT endpoints, i.e. reading data with a read(2),
+ * aio_read(2) etc. system calls. Writing data to an IN endpoint is not
+ * affected.
+ */
+ pr_err("functionfs read size %d > requested size %zd, dropping excess data. "
+ "Align read buffer size to max packet size to avoid the problem.\n",
+ data_len, ret);
+
+ return ret;
+}
+
static void ffs_user_copy_worker(struct work_struct *work)
{
struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
@@ -650,9 +705,7 @@ static void ffs_user_copy_worker(struct work_struct *work)
if (io_data->read && ret > 0) {
use_mm(io_data->mm);
- ret = copy_to_iter(io_data->buf, ret, &io_data->data);
- if (ret != io_data->req->actual && iov_iter_count(&io_data->data))
- ret = -EFAULT;
+ ret = ffs_copy_to_iter(io_data->buf, ret, &io_data->data);
unuse_mm(io_data->mm);
}
@@ -680,6 +733,58 @@ static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
schedule_work(&io_data->work);
}
+/* Assumes epfile->mutex is held. */
+static ssize_t __ffs_epfile_read_buffered(struct ffs_epfile *epfile,
+ struct iov_iter *iter)
+{
+ struct ffs_buffer *buf = epfile->read_buffer;
+ ssize_t ret;
+ if (!buf)
+ return 0;
+
+ ret = copy_to_iter(buf->data, buf->length, iter);
+ if (buf->length == ret) {
+ kfree(buf);
+ epfile->read_buffer = NULL;
+ } else if (unlikely(iov_iter_count(iter))) {
+ ret = -EFAULT;
+ } else {
+ buf->length -= ret;
+ buf->data += ret;
+ }
+ return ret;
+}
+
+/* Assumes epfile->mutex is held. */
+static ssize_t __ffs_epfile_read_data(struct ffs_epfile *epfile,
+ void *data, int data_len,
+ struct iov_iter *iter)
+{
+ struct ffs_buffer *buf;
+
+ ssize_t ret = copy_to_iter(data, data_len, iter);
+ if (likely(data_len == ret))
+ return ret;
+
+ if (unlikely(iov_iter_count(iter)))
+ return -EFAULT;
+
+ /* See ffs_copy_to_iter for more context. */
+ pr_warn("functionfs read size %d > requested size %zd, splitting request into multiple reads.",
+ data_len, ret);
+
+ data_len -= ret;
+ buf = kmalloc(sizeof(*buf) + data_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ buf->length = data_len;
+ buf->data = buf->storage;
+ memcpy(buf->storage, data + ret, data_len);
+ epfile->read_buffer = buf;
+
+ return ret;
+}
+
static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
{
struct ffs_epfile *epfile = file->private_data;
@@ -709,21 +814,40 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
if (halt && epfile->isoc)
return -EINVAL;
+ /* We will be using request and read_buffer */
+ ret = ffs_mutex_lock(&epfile->mutex, file->f_flags & O_NONBLOCK);
+ if (unlikely(ret))
+ goto error;
+
/* Allocate & copy */
if (!halt) {
+ struct usb_gadget *gadget;
+
+ /*
+ * Do we have buffered data from previous partial read? Check
+ * that for synchronous case only because we do not have
+ * facility to ‘wake up’ a pending asynchronous read and push
+ * buffered data to it which we would need to make things behave
+ * consistently.
+ */
+ if (!io_data->aio && io_data->read) {
+ ret = __ffs_epfile_read_buffered(epfile, &io_data->data);
+ if (ret)
+ goto error_mutex;
+ }
+
/*
* if we _do_ wait above, the epfile->ffs->gadget might be NULL
* before the waiting completes, so do not assign to 'gadget'
* earlier
*/
- struct usb_gadget *gadget = epfile->ffs->gadget;
- size_t copied;
+ gadget = epfile->ffs->gadget;
spin_lock_irq(&epfile->ffs->eps_lock);
/* In the meantime, endpoint got disabled or changed. */
if (epfile->ep != ep) {
- spin_unlock_irq(&epfile->ffs->eps_lock);
- return -ESHUTDOWN;
+ ret = -ESHUTDOWN;
+ goto error_lock;
}
data_len = iov_iter_count(&io_data->data);
/*
@@ -735,22 +859,17 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
spin_unlock_irq(&epfile->ffs->eps_lock);
data = kmalloc(data_len, GFP_KERNEL);
- if (unlikely(!data))
- return -ENOMEM;
- if (!io_data->read) {
- copied = copy_from_iter(data, data_len, &io_data->data);
- if (copied != data_len) {
- ret = -EFAULT;
- goto error;
- }
+ if (unlikely(!data)) {
+ ret = -ENOMEM;
+ goto error_mutex;
+ }
+ if (!io_data->read &&
+ copy_from_iter(data, data_len, &io_data->data) != data_len) {
+ ret = -EFAULT;
+ goto error_mutex;
}
}
- /* We will be using request */
- ret = ffs_mutex_lock(&epfile->mutex, file->f_flags & O_NONBLOCK);
- if (unlikely(ret))
- goto error;
-
spin_lock_irq(&epfile->ffs->eps_lock);
if (epfile->ep != ep) {
@@ -803,18 +922,13 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
interrupted = ep->status < 0;
}
- /*
- * XXX We may end up silently droping data here. Since data_len
- * (i.e. req->length) may be bigger than len (after being
- * rounded up to maxpacketsize), we may end up with more data
- * then user space has space for.
- */
- ret = interrupted ? -EINTR : ep->status;
- if (io_data->read && ret > 0) {
- ret = copy_to_iter(data, ret, &io_data->data);
- if (!ret)
- ret = -EFAULT;
- }
+ if (interrupted)
+ ret = -EINTR;
+ else if (io_data->read && ep->status > 0)
+ ret = __ffs_epfile_read_data(epfile, data, ep->status,
+ &io_data->data);
+ else
+ ret = ep->status;
goto error_mutex;
} else if (!(req = usb_ep_alloc_request(ep->ep, GFP_KERNEL))) {
ret = -ENOMEM;
@@ -980,6 +1094,8 @@ ffs_epfile_release(struct inode *inode, struct file *file)
ENTER();
+ kfree(epfile->read_buffer);
+ epfile->read_buffer = NULL;
ffs_data_closed(epfile->ffs);
return 0;
@@ -1605,19 +1721,24 @@ static void ffs_func_eps_disable(struct ffs_function *func)
unsigned count = func->ffs->eps_count;
unsigned long flags;
- spin_lock_irqsave(&func->ffs->eps_lock, flags);
do {
+ if (epfile)
+ mutex_lock(&epfile->mutex);
+ spin_lock_irqsave(&func->ffs->eps_lock, flags);
/* pending requests get nuked */
if (likely(ep->ep))
usb_ep_disable(ep->ep);
++ep;
+ spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
if (epfile) {
epfile->ep = NULL;
+ kfree(epfile->read_buffer);
+ epfile->read_buffer = NULL;
+ mutex_unlock(&epfile->mutex);
++epfile;
}
} while (--count);
- spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
}
static int ffs_func_eps_enable(struct ffs_function *func)
@@ -2227,8 +2348,8 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
{
u32 str_count, needed_count, lang_count;
struct usb_gadget_strings **stringtabs, *t;
- struct usb_string *strings, *s;
const char *data = _data;
+ struct usb_string *s;
ENTER();
@@ -2286,7 +2407,6 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
stringtabs = vla_ptr(vlabuf, d, stringtabs);
t = vla_ptr(vlabuf, d, stringtab);
s = vla_ptr(vlabuf, d, strings);
- strings = s;
}
/* For each language */
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 5c6d4d7ca..2505117e8 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -2655,18 +2655,6 @@ void fsg_common_put(struct fsg_common *common)
}
EXPORT_SYMBOL_GPL(fsg_common_put);
-/* check if fsg_num_buffers is within a valid range */
-static inline int fsg_num_buffers_validate(unsigned int fsg_num_buffers)
-{
-#define FSG_MAX_NUM_BUFFERS 32
-
- if (fsg_num_buffers >= 2 && fsg_num_buffers <= FSG_MAX_NUM_BUFFERS)
- return 0;
- pr_err("fsg_num_buffers %u is out of range (%d to %d)\n",
- fsg_num_buffers, 2, FSG_MAX_NUM_BUFFERS);
- return -EINVAL;
-}
-
static struct fsg_common *fsg_common_setup(struct fsg_common *common)
{
if (!common) {
@@ -2709,11 +2697,7 @@ static void _fsg_common_free_buffers(struct fsg_buffhd *buffhds, unsigned n)
int fsg_common_set_num_buffers(struct fsg_common *common, unsigned int n)
{
struct fsg_buffhd *bh, *buffhds;
- int i, rc;
-
- rc = fsg_num_buffers_validate(n);
- if (rc != 0)
- return rc;
+ int i;
buffhds = kcalloc(n, sizeof(*buffhds), GFP_KERNEL);
if (!buffhds)
@@ -3401,10 +3385,6 @@ static ssize_t fsg_opts_num_buffers_store(struct config_item *item,
if (ret)
goto end;
- ret = fsg_num_buffers_validate(num);
- if (ret)
- goto end;
-
fsg_common_set_num_buffers(opts->common, num);
ret = len;
diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c
index c8005823b..16562e461 100644
--- a/drivers/usb/gadget/function/f_rndis.c
+++ b/drivers/usb/gadget/function/f_rndis.c
@@ -374,6 +374,9 @@ static struct sk_buff *rndis_add_header(struct gether *port,
{
struct sk_buff *skb2;
+ if (!skb)
+ return NULL;
+
skb2 = skb_realloc_headroom(skb, sizeof(struct rndis_packet_msg_type));
rndis_add_hdr(skb2);
diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c
index 943c21aaf..ab6ac1b74 100644
--- a/drivers/usb/gadget/function/rndis.c
+++ b/drivers/usb/gadget/function/rndis.c
@@ -680,6 +680,12 @@ static int rndis_reset_response(struct rndis_params *params,
{
rndis_reset_cmplt_type *resp;
rndis_resp_t *r;
+ u8 *xbuf;
+ u32 length;
+
+ /* drain the response queue */
+ while ((xbuf = rndis_get_next_response(params, &length)))
+ rndis_free_response(params, xbuf);
r = rndis_add_response(params, sizeof(rndis_reset_cmplt_type));
if (!r)
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index a3f7e7c55..5f562c1ec 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -556,7 +556,8 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
/* Multi frame CDC protocols may store the frame for
* later which is not a dropped frame.
*/
- if (dev->port_usb->supports_multi_frame)
+ if (dev->port_usb &&
+ dev->port_usb->supports_multi_frame)
goto multiframe;
goto drop;
}
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index 99c486264..a91fe6476 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -375,10 +375,15 @@ __acquires(&port->port_lock)
*/
{
struct list_head *pool = &port->write_pool;
- struct usb_ep *in = port->port_usb->in;
+ struct usb_ep *in;
int status = 0;
bool do_tty_wake = false;
+ if (!port->port_usb)
+ return status;
+
+ in = port->port_usb->in;
+
while (!port->write_busy && !list_empty(pool)) {
struct usb_request *req;
int len;
@@ -907,7 +912,6 @@ static int gs_write(struct tty_struct *tty, const unsigned char *buf, int count)
{
struct gs_port *port = tty->driver_data;
unsigned long flags;
- int status;
pr_vdebug("gs_write: ttyGS%d (%p) writing %d bytes\n",
port->port_num, tty, count);
@@ -917,7 +921,7 @@ static int gs_write(struct tty_struct *tty, const unsigned char *buf, int count)
count = gs_buf_put(&port->port_write_buf, buf, count);
/* treat count == 0 as flush_chars() */
if (port->port_usb)
- status = gs_start_tx(port);
+ gs_start_tx(port);
spin_unlock_irqrestore(&port->port_lock, flags);
return count;
diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c
index 66753ba7a..31125a4a2 100644
--- a/drivers/usb/gadget/function/uvc_configfs.c
+++ b/drivers/usb/gadget/function/uvc_configfs.c
@@ -2023,7 +2023,7 @@ static int uvcg_streaming_class_allow_link(struct config_item *src,
if (!data) {
kfree(*class_array);
*class_array = NULL;
- ret = PTR_ERR(data);
+ ret = -ENOMEM;
goto unlock;
}
cl_arr = *class_array;
diff --git a/drivers/usb/gadget/function/uvc_queue.c b/drivers/usb/gadget/function/uvc_queue.c
index 912694f3d..6377e9fee 100644
--- a/drivers/usb/gadget/function/uvc_queue.c
+++ b/drivers/usb/gadget/function/uvc_queue.c
@@ -43,7 +43,7 @@
static int uvc_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], void *alloc_ctxs[])
+ unsigned int sizes[], struct device *alloc_devs[])
{
struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
struct uvc_video *video = container_of(queue, struct uvc_video, queue);
diff --git a/drivers/usb/gadget/legacy/g_ffs.c b/drivers/usb/gadget/legacy/g_ffs.c
index f85639ef8..6da7316f8 100644
--- a/drivers/usb/gadget/legacy/g_ffs.c
+++ b/drivers/usb/gadget/legacy/g_ffs.c
@@ -265,7 +265,7 @@ static void *functionfs_acquire_dev(struct ffs_dev *dev)
{
if (!try_module_get(THIS_MODULE))
return ERR_PTR(-ENOENT);
-
+
return NULL;
}
@@ -275,7 +275,7 @@ static void functionfs_release_dev(struct ffs_dev *dev)
}
/*
- * The caller of this function takes ffs_lock
+ * The caller of this function takes ffs_lock
*/
static int functionfs_ready_callback(struct ffs_data *ffs)
{
@@ -294,12 +294,12 @@ static int functionfs_ready_callback(struct ffs_data *ffs)
++missing_funcs;
gfs_registered = false;
}
-
+
return ret;
}
/*
- * The caller of this function takes ffs_lock
+ * The caller of this function takes ffs_lock
*/
static void functionfs_closed_callback(struct ffs_data *ffs)
{
@@ -347,17 +347,14 @@ static int gfs_bind(struct usb_composite_dev *cdev)
#ifdef CONFIG_USB_FUNCTIONFS_RNDIS
{
- struct f_rndis_opts *rndis_opts;
-
fi_rndis = usb_get_function_instance("rndis");
if (IS_ERR(fi_rndis)) {
ret = PTR_ERR(fi_rndis);
goto error;
}
- rndis_opts = container_of(fi_rndis, struct f_rndis_opts,
- func_inst);
#ifndef CONFIG_USB_FUNCTIONFS_ETH
- net = rndis_opts->net;
+ net = container_of(fi_rndis, struct f_rndis_opts,
+ func_inst)->net;
#endif
}
#endif
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index be6479830..16104b5eb 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -606,7 +606,7 @@ ep_read_iter(struct kiocb *iocb, struct iov_iter *to)
}
if (is_sync_kiocb(iocb)) {
value = ep_io(epdata, buf, len);
- if (value >= 0 && copy_to_iter(buf, value, to))
+ if (value >= 0 && (copy_to_iter(buf, value, to) != value))
value = -EFAULT;
} else {
struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index 7c289416f..658b8da60 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -312,7 +312,7 @@ config USB_NET2272_DMA
If unsure, say "N" here. The driver works fine in PIO mode.
config USB_NET2280
- tristate "NetChip 228x / PLX USB338x"
+ tristate "NetChip NET228x / PLX USB3x8x"
depends on PCI
help
NetChip 2280 / 2282 is a PCI based USB peripheral controller which
@@ -322,6 +322,8 @@ config USB_NET2280
(for control transfers) and several endpoints with dedicated
functions.
+ PLX 2380 is a PCIe version of the PLX 2380.
+
PLX 3380 / 3382 is a PCIe based USB peripheral controller which
supports full, high speed USB 2.0 and super speed USB 3.0
data transfers.
diff --git a/drivers/usb/gadget/udc/Makefile b/drivers/usb/gadget/udc/Makefile
index dfee53446..98e74ed9f 100644
--- a/drivers/usb/gadget/udc/Makefile
+++ b/drivers/usb/gadget/udc/Makefile
@@ -1,3 +1,8 @@
+# define_trace.h needs to know how to find our header
+CFLAGS_trace.o := -I$(src)
+
+udc-core-y := core.o trace.o
+
#
# USB peripheral controller drivers
#
diff --git a/drivers/usb/gadget/udc/amd5536udc.c b/drivers/usb/gadget/udc/amd5536udc.c
index 39d70b4a8..ea03ca7ae 100644
--- a/drivers/usb/gadget/udc/amd5536udc.c
+++ b/drivers/usb/gadget/udc/amd5536udc.c
@@ -2340,7 +2340,6 @@ static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix)
struct udc_ep *ep;
struct udc_request *req;
struct udc_data_dma *td;
- unsigned dma_done;
unsigned len;
ep = &dev->ep[ep_ix];
@@ -2385,13 +2384,8 @@ static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix)
*/
if (use_dma_ppb_du) {
td = udc_get_last_dma_desc(req);
- if (td) {
- dma_done =
- AMD_GETBITS(td->status,
- UDC_DMA_IN_STS_BS);
- /* don't care DMA done */
+ if (td)
req->req.actual = req->req.length;
- }
} else {
/* assume all bytes transferred */
req->req.actual = req->req.length;
@@ -3417,4 +3411,3 @@ module_pci_driver(udc_pci_driver);
MODULE_DESCRIPTION(UDC_MOD_DESCRIPTION);
MODULE_AUTHOR("Thomas Dahlmann");
MODULE_LICENSE("GPL");
-
diff --git a/drivers/usb/gadget/udc/bdc/bdc_cmd.c b/drivers/usb/gadget/udc/bdc/bdc_cmd.c
index 6a4155c4b..4d5e9188b 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_cmd.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_cmd.c
@@ -57,7 +57,6 @@ static int bdc_submit_cmd(struct bdc *bdc, u32 cmd_sc,
u32 param0, u32 param1, u32 param2)
{
u32 temp, cmd_status;
- int reset_bdc = 0;
int ret;
temp = bdc_readl(bdc->regs, BDC_CMDSC);
@@ -94,7 +93,6 @@ static int bdc_submit_cmd(struct bdc *bdc, u32 cmd_sc,
case BDC_CMDS_INTL:
dev_err(bdc->dev, "BDC Internal error\n");
- reset_bdc = 1;
ret = -ECONNRESET;
break;
@@ -102,7 +100,6 @@ static int bdc_submit_cmd(struct bdc *bdc, u32 cmd_sc,
dev_err(bdc->dev,
"command timedout waited for %dusec\n",
BDC_CMD_TIMEOUT);
- reset_bdc = 1;
ret = -ECONNRESET;
break;
default:
diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c
index d6199507f..ccaa74ab6 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_ep.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c
@@ -81,7 +81,7 @@ static void ep_bd_list_free(struct bdc_ep *ep, u32 num_tabs)
continue;
}
if (!bd_table->start_bd) {
- dev_dbg(bdc->dev, "bd dma pool not allocted\n");
+ dev_dbg(bdc->dev, "bd dma pool not allocated\n");
continue;
}
@@ -702,11 +702,9 @@ static int ep0_queue(struct bdc_ep *ep, struct bdc_req *req)
/* Queue data stage */
static int ep0_queue_data_stage(struct bdc *bdc)
{
- struct usb_request *ep0_usb_req;
struct bdc_ep *ep;
dev_dbg(bdc->dev, "%s\n", __func__);
- ep0_usb_req = &bdc->ep0_req.usb_req;
ep = bdc->bdc_ep_array[1];
bdc->ep0_req.ep = ep;
bdc->ep0_req.usb_req.complete = NULL;
@@ -1393,10 +1391,8 @@ static int ep0_set_sel(struct bdc *bdc,
{
struct bdc_ep *ep;
u16 wLength;
- u16 wValue;
dev_dbg(bdc->dev, "%s\n", __func__);
- wValue = le16_to_cpu(setup_pkt->wValue);
wLength = le16_to_cpu(setup_pkt->wLength);
if (unlikely(wLength != 6)) {
dev_err(bdc->dev, "%s Wrong wLength:%d\n", __func__, wLength);
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
new file mode 100644
index 000000000..40c04bb25
--- /dev/null
+++ b/drivers/usb/gadget/udc/core.c
@@ -0,0 +1,1526 @@
+/**
+ * udc.c - Core UDC Framework
+ *
+ * Copyright (C) 2010 Texas Instruments
+ * Author: Felipe Balbi <balbi@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb.h>
+
+#include "trace.h"
+
+/**
+ * struct usb_udc - describes one usb device controller
+ * @driver - the gadget driver pointer. For use by the class code
+ * @dev - the child device to the actual controller
+ * @gadget - the gadget. For use by the class code
+ * @list - for use by the udc class driver
+ * @vbus - for udcs who care about vbus status, this value is real vbus status;
+ * for udcs who do not care about vbus status, this value is always true
+ *
+ * This represents the internal data structure which is used by the UDC-class
+ * to hold information about udc driver and gadget together.
+ */
+struct usb_udc {
+ struct usb_gadget_driver *driver;
+ struct usb_gadget *gadget;
+ struct device dev;
+ struct list_head list;
+ bool vbus;
+};
+
+static struct class *udc_class;
+static LIST_HEAD(udc_list);
+static LIST_HEAD(gadget_driver_pending_list);
+static DEFINE_MUTEX(udc_lock);
+
+static int udc_bind_to_driver(struct usb_udc *udc,
+ struct usb_gadget_driver *driver);
+
+/* ------------------------------------------------------------------------- */
+
+/**
+ * usb_ep_set_maxpacket_limit - set maximum packet size limit for endpoint
+ * @ep:the endpoint being configured
+ * @maxpacket_limit:value of maximum packet size limit
+ *
+ * This function should be used only in UDC drivers to initialize endpoint
+ * (usually in probe function).
+ */
+void usb_ep_set_maxpacket_limit(struct usb_ep *ep,
+ unsigned maxpacket_limit)
+{
+ ep->maxpacket_limit = maxpacket_limit;
+ ep->maxpacket = maxpacket_limit;
+
+ trace_usb_ep_set_maxpacket_limit(ep, 0);
+}
+EXPORT_SYMBOL_GPL(usb_ep_set_maxpacket_limit);
+
+/**
+ * usb_ep_enable - configure endpoint, making it usable
+ * @ep:the endpoint being configured. may not be the endpoint named "ep0".
+ * drivers discover endpoints through the ep_list of a usb_gadget.
+ *
+ * When configurations are set, or when interface settings change, the driver
+ * will enable or disable the relevant endpoints. while it is enabled, an
+ * endpoint may be used for i/o until the driver receives a disconnect() from
+ * the host or until the endpoint is disabled.
+ *
+ * the ep0 implementation (which calls this routine) must ensure that the
+ * hardware capabilities of each endpoint match the descriptor provided
+ * for it. for example, an endpoint named "ep2in-bulk" would be usable
+ * for interrupt transfers as well as bulk, but it likely couldn't be used
+ * for iso transfers or for endpoint 14. some endpoints are fully
+ * configurable, with more generic names like "ep-a". (remember that for
+ * USB, "in" means "towards the USB master".)
+ *
+ * returns zero, or a negative error code.
+ */
+int usb_ep_enable(struct usb_ep *ep)
+{
+ int ret = 0;
+
+ if (ep->enabled)
+ goto out;
+
+ ret = ep->ops->enable(ep, ep->desc);
+ if (ret) {
+ ret = ret;
+ goto out;
+ }
+
+ ep->enabled = true;
+
+out:
+ trace_usb_ep_enable(ep, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_ep_enable);
+
+/**
+ * usb_ep_disable - endpoint is no longer usable
+ * @ep:the endpoint being unconfigured. may not be the endpoint named "ep0".
+ *
+ * no other task may be using this endpoint when this is called.
+ * any pending and uncompleted requests will complete with status
+ * indicating disconnect (-ESHUTDOWN) before this call returns.
+ * gadget drivers must call usb_ep_enable() again before queueing
+ * requests to the endpoint.
+ *
+ * returns zero, or a negative error code.
+ */
+int usb_ep_disable(struct usb_ep *ep)
+{
+ int ret = 0;
+
+ if (!ep->enabled)
+ goto out;
+
+ ret = ep->ops->disable(ep);
+ if (ret) {
+ ret = ret;
+ goto out;
+ }
+
+ ep->enabled = false;
+
+out:
+ trace_usb_ep_disable(ep, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_ep_disable);
+
+/**
+ * usb_ep_alloc_request - allocate a request object to use with this endpoint
+ * @ep:the endpoint to be used with with the request
+ * @gfp_flags:GFP_* flags to use
+ *
+ * Request objects must be allocated with this call, since they normally
+ * need controller-specific setup and may even need endpoint-specific
+ * resources such as allocation of DMA descriptors.
+ * Requests may be submitted with usb_ep_queue(), and receive a single
+ * completion callback. Free requests with usb_ep_free_request(), when
+ * they are no longer needed.
+ *
+ * Returns the request, or null if one could not be allocated.
+ */
+struct usb_request *usb_ep_alloc_request(struct usb_ep *ep,
+ gfp_t gfp_flags)
+{
+ struct usb_request *req = NULL;
+
+ req = ep->ops->alloc_request(ep, gfp_flags);
+
+ trace_usb_ep_alloc_request(ep, req, req ? 0 : -ENOMEM);
+
+ return req;
+}
+EXPORT_SYMBOL_GPL(usb_ep_alloc_request);
+
+/**
+ * usb_ep_free_request - frees a request object
+ * @ep:the endpoint associated with the request
+ * @req:the request being freed
+ *
+ * Reverses the effect of usb_ep_alloc_request().
+ * Caller guarantees the request is not queued, and that it will
+ * no longer be requeued (or otherwise used).
+ */
+void usb_ep_free_request(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ ep->ops->free_request(ep, req);
+ trace_usb_ep_free_request(ep, req, 0);
+}
+EXPORT_SYMBOL_GPL(usb_ep_free_request);
+
+/**
+ * usb_ep_queue - queues (submits) an I/O request to an endpoint.
+ * @ep:the endpoint associated with the request
+ * @req:the request being submitted
+ * @gfp_flags: GFP_* flags to use in case the lower level driver couldn't
+ * pre-allocate all necessary memory with the request.
+ *
+ * This tells the device controller to perform the specified request through
+ * that endpoint (reading or writing a buffer). When the request completes,
+ * including being canceled by usb_ep_dequeue(), the request's completion
+ * routine is called to return the request to the driver. Any endpoint
+ * (except control endpoints like ep0) may have more than one transfer
+ * request queued; they complete in FIFO order. Once a gadget driver
+ * submits a request, that request may not be examined or modified until it
+ * is given back to that driver through the completion callback.
+ *
+ * Each request is turned into one or more packets. The controller driver
+ * never merges adjacent requests into the same packet. OUT transfers
+ * will sometimes use data that's already buffered in the hardware.
+ * Drivers can rely on the fact that the first byte of the request's buffer
+ * always corresponds to the first byte of some USB packet, for both
+ * IN and OUT transfers.
+ *
+ * Bulk endpoints can queue any amount of data; the transfer is packetized
+ * automatically. The last packet will be short if the request doesn't fill it
+ * out completely. Zero length packets (ZLPs) should be avoided in portable
+ * protocols since not all usb hardware can successfully handle zero length
+ * packets. (ZLPs may be explicitly written, and may be implicitly written if
+ * the request 'zero' flag is set.) Bulk endpoints may also be used
+ * for interrupt transfers; but the reverse is not true, and some endpoints
+ * won't support every interrupt transfer. (Such as 768 byte packets.)
+ *
+ * Interrupt-only endpoints are less functional than bulk endpoints, for
+ * example by not supporting queueing or not handling buffers that are
+ * larger than the endpoint's maxpacket size. They may also treat data
+ * toggle differently.
+ *
+ * Control endpoints ... after getting a setup() callback, the driver queues
+ * one response (even if it would be zero length). That enables the
+ * status ack, after transferring data as specified in the response. Setup
+ * functions may return negative error codes to generate protocol stalls.
+ * (Note that some USB device controllers disallow protocol stall responses
+ * in some cases.) When control responses are deferred (the response is
+ * written after the setup callback returns), then usb_ep_set_halt() may be
+ * used on ep0 to trigger protocol stalls. Depending on the controller,
+ * it may not be possible to trigger a status-stage protocol stall when the
+ * data stage is over, that is, from within the response's completion
+ * routine.
+ *
+ * For periodic endpoints, like interrupt or isochronous ones, the usb host
+ * arranges to poll once per interval, and the gadget driver usually will
+ * have queued some data to transfer at that time.
+ *
+ * Returns zero, or a negative error code. Endpoints that are not enabled
+ * report errors; errors will also be
+ * reported when the usb peripheral is disconnected.
+ */
+int usb_ep_queue(struct usb_ep *ep,
+ struct usb_request *req, gfp_t gfp_flags)
+{
+ int ret = 0;
+
+ if (WARN_ON_ONCE(!ep->enabled && ep->address)) {
+ ret = -ESHUTDOWN;
+ goto out;
+ }
+
+ ret = ep->ops->queue(ep, req, gfp_flags);
+
+out:
+ trace_usb_ep_queue(ep, req, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_ep_queue);
+
+/**
+ * usb_ep_dequeue - dequeues (cancels, unlinks) an I/O request from an endpoint
+ * @ep:the endpoint associated with the request
+ * @req:the request being canceled
+ *
+ * If the request is still active on the endpoint, it is dequeued and its
+ * completion routine is called (with status -ECONNRESET); else a negative
+ * error code is returned. This is guaranteed to happen before the call to
+ * usb_ep_dequeue() returns.
+ *
+ * Note that some hardware can't clear out write fifos (to unlink the request
+ * at the head of the queue) except as part of disconnecting from usb. Such
+ * restrictions prevent drivers from supporting configuration changes,
+ * even to configuration zero (a "chapter 9" requirement).
+ */
+int usb_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
+{
+ int ret;
+
+ ret = ep->ops->dequeue(ep, req);
+ trace_usb_ep_dequeue(ep, req, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_ep_dequeue);
+
+/**
+ * usb_ep_set_halt - sets the endpoint halt feature.
+ * @ep: the non-isochronous endpoint being stalled
+ *
+ * Use this to stall an endpoint, perhaps as an error report.
+ * Except for control endpoints,
+ * the endpoint stays halted (will not stream any data) until the host
+ * clears this feature; drivers may need to empty the endpoint's request
+ * queue first, to make sure no inappropriate transfers happen.
+ *
+ * Note that while an endpoint CLEAR_FEATURE will be invisible to the
+ * gadget driver, a SET_INTERFACE will not be. To reset endpoints for the
+ * current altsetting, see usb_ep_clear_halt(). When switching altsettings,
+ * it's simplest to use usb_ep_enable() or usb_ep_disable() for the endpoints.
+ *
+ * Returns zero, or a negative error code. On success, this call sets
+ * underlying hardware state that blocks data transfers.
+ * Attempts to halt IN endpoints will fail (returning -EAGAIN) if any
+ * transfer requests are still queued, or if the controller hardware
+ * (usually a FIFO) still holds bytes that the host hasn't collected.
+ */
+int usb_ep_set_halt(struct usb_ep *ep)
+{
+ int ret;
+
+ ret = ep->ops->set_halt(ep, 1);
+ trace_usb_ep_set_halt(ep, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_ep_set_halt);
+
+/**
+ * usb_ep_clear_halt - clears endpoint halt, and resets toggle
+ * @ep:the bulk or interrupt endpoint being reset
+ *
+ * Use this when responding to the standard usb "set interface" request,
+ * for endpoints that aren't reconfigured, after clearing any other state
+ * in the endpoint's i/o queue.
+ *
+ * Returns zero, or a negative error code. On success, this call clears
+ * the underlying hardware state reflecting endpoint halt and data toggle.
+ * Note that some hardware can't support this request (like pxa2xx_udc),
+ * and accordingly can't correctly implement interface altsettings.
+ */
+int usb_ep_clear_halt(struct usb_ep *ep)
+{
+ int ret;
+
+ ret = ep->ops->set_halt(ep, 0);
+ trace_usb_ep_clear_halt(ep, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_ep_clear_halt);
+
+/**
+ * usb_ep_set_wedge - sets the halt feature and ignores clear requests
+ * @ep: the endpoint being wedged
+ *
+ * Use this to stall an endpoint and ignore CLEAR_FEATURE(HALT_ENDPOINT)
+ * requests. If the gadget driver clears the halt status, it will
+ * automatically unwedge the endpoint.
+ *
+ * Returns zero on success, else negative errno.
+ */
+int usb_ep_set_wedge(struct usb_ep *ep)
+{
+ int ret;
+
+ if (ep->ops->set_wedge)
+ ret = ep->ops->set_wedge(ep);
+ else
+ ret = ep->ops->set_halt(ep, 1);
+
+ trace_usb_ep_set_wedge(ep, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_ep_set_wedge);
+
+/**
+ * usb_ep_fifo_status - returns number of bytes in fifo, or error
+ * @ep: the endpoint whose fifo status is being checked.
+ *
+ * FIFO endpoints may have "unclaimed data" in them in certain cases,
+ * such as after aborted transfers. Hosts may not have collected all
+ * the IN data written by the gadget driver (and reported by a request
+ * completion). The gadget driver may not have collected all the data
+ * written OUT to it by the host. Drivers that need precise handling for
+ * fault reporting or recovery may need to use this call.
+ *
+ * This returns the number of such bytes in the fifo, or a negative
+ * errno if the endpoint doesn't use a FIFO or doesn't support such
+ * precise handling.
+ */
+int usb_ep_fifo_status(struct usb_ep *ep)
+{
+ int ret;
+
+ if (ep->ops->fifo_status)
+ ret = ep->ops->fifo_status(ep);
+ else
+ ret = -EOPNOTSUPP;
+
+ trace_usb_ep_fifo_status(ep, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_ep_fifo_status);
+
+/**
+ * usb_ep_fifo_flush - flushes contents of a fifo
+ * @ep: the endpoint whose fifo is being flushed.
+ *
+ * This call may be used to flush the "unclaimed data" that may exist in
+ * an endpoint fifo after abnormal transaction terminations. The call
+ * must never be used except when endpoint is not being used for any
+ * protocol translation.
+ */
+void usb_ep_fifo_flush(struct usb_ep *ep)
+{
+ if (ep->ops->fifo_flush)
+ ep->ops->fifo_flush(ep);
+
+ trace_usb_ep_fifo_flush(ep, 0);
+}
+EXPORT_SYMBOL_GPL(usb_ep_fifo_flush);
+
+/* ------------------------------------------------------------------------- */
+
+/**
+ * usb_gadget_frame_number - returns the current frame number
+ * @gadget: controller that reports the frame number
+ *
+ * Returns the usb frame number, normally eleven bits from a SOF packet,
+ * or negative errno if this device doesn't support this capability.
+ */
+int usb_gadget_frame_number(struct usb_gadget *gadget)
+{
+ int ret;
+
+ ret = gadget->ops->get_frame(gadget);
+
+ trace_usb_gadget_frame_number(gadget, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_frame_number);
+
+/**
+ * usb_gadget_wakeup - tries to wake up the host connected to this gadget
+ * @gadget: controller used to wake up the host
+ *
+ * Returns zero on success, else negative error code if the hardware
+ * doesn't support such attempts, or its support has not been enabled
+ * by the usb host. Drivers must return device descriptors that report
+ * their ability to support this, or hosts won't enable it.
+ *
+ * This may also try to use SRP to wake the host and start enumeration,
+ * even if OTG isn't otherwise in use. OTG devices may also start
+ * remote wakeup even when hosts don't explicitly enable it.
+ */
+int usb_gadget_wakeup(struct usb_gadget *gadget)
+{
+ int ret = 0;
+
+ if (!gadget->ops->wakeup) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ ret = gadget->ops->wakeup(gadget);
+
+out:
+ trace_usb_gadget_wakeup(gadget, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_wakeup);
+
+/**
+ * usb_gadget_set_selfpowered - sets the device selfpowered feature.
+ * @gadget:the device being declared as self-powered
+ *
+ * this affects the device status reported by the hardware driver
+ * to reflect that it now has a local power supply.
+ *
+ * returns zero on success, else negative errno.
+ */
+int usb_gadget_set_selfpowered(struct usb_gadget *gadget)
+{
+ int ret = 0;
+
+ if (!gadget->ops->set_selfpowered) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ ret = gadget->ops->set_selfpowered(gadget, 1);
+
+out:
+ trace_usb_gadget_set_selfpowered(gadget, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_set_selfpowered);
+
+/**
+ * usb_gadget_clear_selfpowered - clear the device selfpowered feature.
+ * @gadget:the device being declared as bus-powered
+ *
+ * this affects the device status reported by the hardware driver.
+ * some hardware may not support bus-powered operation, in which
+ * case this feature's value can never change.
+ *
+ * returns zero on success, else negative errno.
+ */
+int usb_gadget_clear_selfpowered(struct usb_gadget *gadget)
+{
+ int ret = 0;
+
+ if (!gadget->ops->set_selfpowered) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ ret = gadget->ops->set_selfpowered(gadget, 0);
+
+out:
+ trace_usb_gadget_clear_selfpowered(gadget, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_clear_selfpowered);
+
+/**
+ * usb_gadget_vbus_connect - Notify controller that VBUS is powered
+ * @gadget:The device which now has VBUS power.
+ * Context: can sleep
+ *
+ * This call is used by a driver for an external transceiver (or GPIO)
+ * that detects a VBUS power session starting. Common responses include
+ * resuming the controller, activating the D+ (or D-) pullup to let the
+ * host detect that a USB device is attached, and starting to draw power
+ * (8mA or possibly more, especially after SET_CONFIGURATION).
+ *
+ * Returns zero on success, else negative errno.
+ */
+int usb_gadget_vbus_connect(struct usb_gadget *gadget)
+{
+ int ret = 0;
+
+ if (!gadget->ops->vbus_session) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ ret = gadget->ops->vbus_session(gadget, 1);
+
+out:
+ trace_usb_gadget_vbus_connect(gadget, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_vbus_connect);
+
+/**
+ * usb_gadget_vbus_draw - constrain controller's VBUS power usage
+ * @gadget:The device whose VBUS usage is being described
+ * @mA:How much current to draw, in milliAmperes. This should be twice
+ * the value listed in the configuration descriptor bMaxPower field.
+ *
+ * This call is used by gadget drivers during SET_CONFIGURATION calls,
+ * reporting how much power the device may consume. For example, this
+ * could affect how quickly batteries are recharged.
+ *
+ * Returns zero on success, else negative errno.
+ */
+int usb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
+{
+ int ret = 0;
+
+ if (!gadget->ops->vbus_draw) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ ret = gadget->ops->vbus_draw(gadget, mA);
+ if (!ret)
+ gadget->mA = mA;
+
+out:
+ trace_usb_gadget_vbus_draw(gadget, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_vbus_draw);
+
+/**
+ * usb_gadget_vbus_disconnect - notify controller about VBUS session end
+ * @gadget:the device whose VBUS supply is being described
+ * Context: can sleep
+ *
+ * This call is used by a driver for an external transceiver (or GPIO)
+ * that detects a VBUS power session ending. Common responses include
+ * reversing everything done in usb_gadget_vbus_connect().
+ *
+ * Returns zero on success, else negative errno.
+ */
+int usb_gadget_vbus_disconnect(struct usb_gadget *gadget)
+{
+ int ret = 0;
+
+ if (!gadget->ops->vbus_session) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ ret = gadget->ops->vbus_session(gadget, 0);
+
+out:
+ trace_usb_gadget_vbus_disconnect(gadget, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_vbus_disconnect);
+
+/**
+ * usb_gadget_connect - software-controlled connect to USB host
+ * @gadget:the peripheral being connected
+ *
+ * Enables the D+ (or potentially D-) pullup. The host will start
+ * enumerating this gadget when the pullup is active and a VBUS session
+ * is active (the link is powered). This pullup is always enabled unless
+ * usb_gadget_disconnect() has been used to disable it.
+ *
+ * Returns zero on success, else negative errno.
+ */
+int usb_gadget_connect(struct usb_gadget *gadget)
+{
+ int ret = 0;
+
+ if (!gadget->ops->pullup) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (gadget->deactivated) {
+ /*
+ * If gadget is deactivated we only save new state.
+ * Gadget will be connected automatically after activation.
+ */
+ gadget->connected = true;
+ goto out;
+ }
+
+ ret = gadget->ops->pullup(gadget, 1);
+ if (!ret)
+ gadget->connected = 1;
+
+out:
+ trace_usb_gadget_connect(gadget, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_connect);
+
+/**
+ * usb_gadget_disconnect - software-controlled disconnect from USB host
+ * @gadget:the peripheral being disconnected
+ *
+ * Disables the D+ (or potentially D-) pullup, which the host may see
+ * as a disconnect (when a VBUS session is active). Not all systems
+ * support software pullup controls.
+ *
+ * Returns zero on success, else negative errno.
+ */
+int usb_gadget_disconnect(struct usb_gadget *gadget)
+{
+ int ret = 0;
+
+ if (!gadget->ops->pullup) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (gadget->deactivated) {
+ /*
+ * If gadget is deactivated we only save new state.
+ * Gadget will stay disconnected after activation.
+ */
+ gadget->connected = false;
+ goto out;
+ }
+
+ ret = gadget->ops->pullup(gadget, 0);
+ if (!ret)
+ gadget->connected = 0;
+
+out:
+ trace_usb_gadget_disconnect(gadget, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_disconnect);
+
+/**
+ * usb_gadget_deactivate - deactivate function which is not ready to work
+ * @gadget: the peripheral being deactivated
+ *
+ * This routine may be used during the gadget driver bind() call to prevent
+ * the peripheral from ever being visible to the USB host, unless later
+ * usb_gadget_activate() is called. For example, user mode components may
+ * need to be activated before the system can talk to hosts.
+ *
+ * Returns zero on success, else negative errno.
+ */
+int usb_gadget_deactivate(struct usb_gadget *gadget)
+{
+ int ret = 0;
+
+ if (gadget->deactivated)
+ goto out;
+
+ if (gadget->connected) {
+ ret = usb_gadget_disconnect(gadget);
+ if (ret)
+ goto out;
+
+ /*
+ * If gadget was being connected before deactivation, we want
+ * to reconnect it in usb_gadget_activate().
+ */
+ gadget->connected = true;
+ }
+ gadget->deactivated = true;
+
+out:
+ trace_usb_gadget_deactivate(gadget, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_deactivate);
+
+/**
+ * usb_gadget_activate - activate function which is not ready to work
+ * @gadget: the peripheral being activated
+ *
+ * This routine activates gadget which was previously deactivated with
+ * usb_gadget_deactivate() call. It calls usb_gadget_connect() if needed.
+ *
+ * Returns zero on success, else negative errno.
+ */
+int usb_gadget_activate(struct usb_gadget *gadget)
+{
+ int ret = 0;
+
+ if (!gadget->deactivated)
+ goto out;
+
+ gadget->deactivated = false;
+
+ /*
+ * If gadget has been connected before deactivation, or became connected
+ * while it was being deactivated, we call usb_gadget_connect().
+ */
+ if (gadget->connected)
+ ret = usb_gadget_connect(gadget);
+
+out:
+ trace_usb_gadget_activate(gadget, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_activate);
+
+/* ------------------------------------------------------------------------- */
+
+#ifdef CONFIG_HAS_DMA
+
+int usb_gadget_map_request_by_dev(struct device *dev,
+ struct usb_request *req, int is_in)
+{
+ if (req->length == 0)
+ return 0;
+
+ if (req->num_sgs) {
+ int mapped;
+
+ mapped = dma_map_sg(dev, req->sg, req->num_sgs,
+ is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ if (mapped == 0) {
+ dev_err(dev, "failed to map SGs\n");
+ return -EFAULT;
+ }
+
+ req->num_mapped_sgs = mapped;
+ } else {
+ req->dma = dma_map_single(dev, req->buf, req->length,
+ is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(dev, req->dma)) {
+ dev_err(dev, "failed to map buffer\n");
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_map_request_by_dev);
+
+int usb_gadget_map_request(struct usb_gadget *gadget,
+ struct usb_request *req, int is_in)
+{
+ return usb_gadget_map_request_by_dev(gadget->dev.parent, req, is_in);
+}
+EXPORT_SYMBOL_GPL(usb_gadget_map_request);
+
+void usb_gadget_unmap_request_by_dev(struct device *dev,
+ struct usb_request *req, int is_in)
+{
+ if (req->length == 0)
+ return;
+
+ if (req->num_mapped_sgs) {
+ dma_unmap_sg(dev, req->sg, req->num_sgs,
+ is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+ req->num_mapped_sgs = 0;
+ } else {
+ dma_unmap_single(dev, req->dma, req->length,
+ is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ }
+}
+EXPORT_SYMBOL_GPL(usb_gadget_unmap_request_by_dev);
+
+void usb_gadget_unmap_request(struct usb_gadget *gadget,
+ struct usb_request *req, int is_in)
+{
+ usb_gadget_unmap_request_by_dev(gadget->dev.parent, req, is_in);
+}
+EXPORT_SYMBOL_GPL(usb_gadget_unmap_request);
+
+#endif /* CONFIG_HAS_DMA */
+
+/* ------------------------------------------------------------------------- */
+
+/**
+ * usb_gadget_giveback_request - give the request back to the gadget layer
+ * Context: in_interrupt()
+ *
+ * This is called by device controller drivers in order to return the
+ * completed request back to the gadget layer.
+ */
+void usb_gadget_giveback_request(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ if (likely(req->status == 0))
+ usb_led_activity(USB_LED_EVENT_GADGET);
+
+ trace_usb_gadget_giveback_request(ep, req, 0);
+
+ req->complete(ep, req);
+}
+EXPORT_SYMBOL_GPL(usb_gadget_giveback_request);
+
+/* ------------------------------------------------------------------------- */
+
+/**
+ * gadget_find_ep_by_name - returns ep whose name is the same as sting passed
+ * in second parameter or NULL if searched endpoint not found
+ * @g: controller to check for quirk
+ * @name: name of searched endpoint
+ */
+struct usb_ep *gadget_find_ep_by_name(struct usb_gadget *g, const char *name)
+{
+ struct usb_ep *ep;
+
+ gadget_for_each_ep(ep, g) {
+ if (!strcmp(ep->name, name))
+ return ep;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(gadget_find_ep_by_name);
+
+/* ------------------------------------------------------------------------- */
+
+int usb_gadget_ep_match_desc(struct usb_gadget *gadget,
+ struct usb_ep *ep, struct usb_endpoint_descriptor *desc,
+ struct usb_ss_ep_comp_descriptor *ep_comp)
+{
+ u8 type;
+ u16 max;
+ int num_req_streams = 0;
+
+ /* endpoint already claimed? */
+ if (ep->claimed)
+ return 0;
+
+ type = usb_endpoint_type(desc);
+ max = 0x7ff & usb_endpoint_maxp(desc);
+
+ if (usb_endpoint_dir_in(desc) && !ep->caps.dir_in)
+ return 0;
+ if (usb_endpoint_dir_out(desc) && !ep->caps.dir_out)
+ return 0;
+
+ if (max > ep->maxpacket_limit)
+ return 0;
+
+ /* "high bandwidth" works only at high speed */
+ if (!gadget_is_dualspeed(gadget) && usb_endpoint_maxp(desc) & (3<<11))
+ return 0;
+
+ switch (type) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ /* only support ep0 for portable CONTROL traffic */
+ return 0;
+ case USB_ENDPOINT_XFER_ISOC:
+ if (!ep->caps.type_iso)
+ return 0;
+ /* ISO: limit 1023 bytes full speed, 1024 high/super speed */
+ if (!gadget_is_dualspeed(gadget) && max > 1023)
+ return 0;
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ if (!ep->caps.type_bulk)
+ return 0;
+ if (ep_comp && gadget_is_superspeed(gadget)) {
+ /* Get the number of required streams from the
+ * EP companion descriptor and see if the EP
+ * matches it
+ */
+ num_req_streams = ep_comp->bmAttributes & 0x1f;
+ if (num_req_streams > ep->max_streams)
+ return 0;
+ }
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ /* Bulk endpoints handle interrupt transfers,
+ * except the toggle-quirky iso-synch kind
+ */
+ if (!ep->caps.type_int && !ep->caps.type_bulk)
+ return 0;
+ /* INT: limit 64 bytes full speed, 1024 high/super speed */
+ if (!gadget_is_dualspeed(gadget) && max > 64)
+ return 0;
+ break;
+ }
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_ep_match_desc);
+
+/* ------------------------------------------------------------------------- */
+
+static void usb_gadget_state_work(struct work_struct *work)
+{
+ struct usb_gadget *gadget = work_to_gadget(work);
+ struct usb_udc *udc = gadget->udc;
+
+ if (udc)
+ sysfs_notify(&udc->dev.kobj, NULL, "state");
+}
+
+void usb_gadget_set_state(struct usb_gadget *gadget,
+ enum usb_device_state state)
+{
+ gadget->state = state;
+ schedule_work(&gadget->work);
+}
+EXPORT_SYMBOL_GPL(usb_gadget_set_state);
+
+/* ------------------------------------------------------------------------- */
+
+static void usb_udc_connect_control(struct usb_udc *udc)
+{
+ if (udc->vbus)
+ usb_gadget_connect(udc->gadget);
+ else
+ usb_gadget_disconnect(udc->gadget);
+}
+
+/**
+ * usb_udc_vbus_handler - updates the udc core vbus status, and try to
+ * connect or disconnect gadget
+ * @gadget: The gadget which vbus change occurs
+ * @status: The vbus status
+ *
+ * The udc driver calls it when it wants to connect or disconnect gadget
+ * according to vbus status.
+ */
+void usb_udc_vbus_handler(struct usb_gadget *gadget, bool status)
+{
+ struct usb_udc *udc = gadget->udc;
+
+ if (udc) {
+ udc->vbus = status;
+ usb_udc_connect_control(udc);
+ }
+}
+EXPORT_SYMBOL_GPL(usb_udc_vbus_handler);
+
+/**
+ * usb_gadget_udc_reset - notifies the udc core that bus reset occurs
+ * @gadget: The gadget which bus reset occurs
+ * @driver: The gadget driver we want to notify
+ *
+ * If the udc driver has bus reset handler, it needs to call this when the bus
+ * reset occurs, it notifies the gadget driver that the bus reset occurs as
+ * well as updates gadget state.
+ */
+void usb_gadget_udc_reset(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ driver->reset(gadget);
+ usb_gadget_set_state(gadget, USB_STATE_DEFAULT);
+}
+EXPORT_SYMBOL_GPL(usb_gadget_udc_reset);
+
+/**
+ * usb_gadget_udc_start - tells usb device controller to start up
+ * @udc: The UDC to be started
+ *
+ * This call is issued by the UDC Class driver when it's about
+ * to register a gadget driver to the device controller, before
+ * calling gadget driver's bind() method.
+ *
+ * It allows the controller to be powered off until strictly
+ * necessary to have it powered on.
+ *
+ * Returns zero on success, else negative errno.
+ */
+static inline int usb_gadget_udc_start(struct usb_udc *udc)
+{
+ return udc->gadget->ops->udc_start(udc->gadget, udc->driver);
+}
+
+/**
+ * usb_gadget_udc_stop - tells usb device controller we don't need it anymore
+ * @gadget: The device we want to stop activity
+ * @driver: The driver to unbind from @gadget
+ *
+ * This call is issued by the UDC Class driver after calling
+ * gadget driver's unbind() method.
+ *
+ * The details are implementation specific, but it can go as
+ * far as powering off UDC completely and disable its data
+ * line pullups.
+ */
+static inline void usb_gadget_udc_stop(struct usb_udc *udc)
+{
+ udc->gadget->ops->udc_stop(udc->gadget);
+}
+
+/**
+ * usb_udc_release - release the usb_udc struct
+ * @dev: the dev member within usb_udc
+ *
+ * This is called by driver's core in order to free memory once the last
+ * reference is released.
+ */
+static void usb_udc_release(struct device *dev)
+{
+ struct usb_udc *udc;
+
+ udc = container_of(dev, struct usb_udc, dev);
+ dev_dbg(dev, "releasing '%s'\n", dev_name(dev));
+ kfree(udc);
+}
+
+static const struct attribute_group *usb_udc_attr_groups[];
+
+static void usb_udc_nop_release(struct device *dev)
+{
+ dev_vdbg(dev, "%s\n", __func__);
+}
+
+/**
+ * usb_add_gadget_udc_release - adds a new gadget to the udc class driver list
+ * @parent: the parent device to this udc. Usually the controller driver's
+ * device.
+ * @gadget: the gadget to be added to the list.
+ * @release: a gadget release function.
+ *
+ * Returns zero on success, negative errno otherwise.
+ */
+int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
+ void (*release)(struct device *dev))
+{
+ struct usb_udc *udc;
+ struct usb_gadget_driver *driver;
+ int ret = -ENOMEM;
+
+ udc = kzalloc(sizeof(*udc), GFP_KERNEL);
+ if (!udc)
+ goto err1;
+
+ dev_set_name(&gadget->dev, "gadget");
+ INIT_WORK(&gadget->work, usb_gadget_state_work);
+ gadget->dev.parent = parent;
+
+ if (release)
+ gadget->dev.release = release;
+ else
+ gadget->dev.release = usb_udc_nop_release;
+
+ ret = device_register(&gadget->dev);
+ if (ret)
+ goto err2;
+
+ device_initialize(&udc->dev);
+ udc->dev.release = usb_udc_release;
+ udc->dev.class = udc_class;
+ udc->dev.groups = usb_udc_attr_groups;
+ udc->dev.parent = parent;
+ ret = dev_set_name(&udc->dev, "%s", kobject_name(&parent->kobj));
+ if (ret)
+ goto err3;
+
+ udc->gadget = gadget;
+ gadget->udc = udc;
+
+ mutex_lock(&udc_lock);
+ list_add_tail(&udc->list, &udc_list);
+
+ ret = device_add(&udc->dev);
+ if (ret)
+ goto err4;
+
+ usb_gadget_set_state(gadget, USB_STATE_NOTATTACHED);
+ udc->vbus = true;
+
+ /* pick up one of pending gadget drivers */
+ list_for_each_entry(driver, &gadget_driver_pending_list, pending) {
+ if (!driver->udc_name || strcmp(driver->udc_name,
+ dev_name(&udc->dev)) == 0) {
+ ret = udc_bind_to_driver(udc, driver);
+ if (ret != -EPROBE_DEFER)
+ list_del(&driver->pending);
+ if (ret)
+ goto err5;
+ break;
+ }
+ }
+
+ mutex_unlock(&udc_lock);
+
+ return 0;
+
+err5:
+ device_del(&udc->dev);
+
+err4:
+ list_del(&udc->list);
+ mutex_unlock(&udc_lock);
+
+err3:
+ put_device(&udc->dev);
+ device_del(&gadget->dev);
+
+err2:
+ put_device(&gadget->dev);
+ kfree(udc);
+
+err1:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_add_gadget_udc_release);
+
+/**
+ * usb_get_gadget_udc_name - get the name of the first UDC controller
+ * This functions returns the name of the first UDC controller in the system.
+ * Please note that this interface is usefull only for legacy drivers which
+ * assume that there is only one UDC controller in the system and they need to
+ * get its name before initialization. There is no guarantee that the UDC
+ * of the returned name will be still available, when gadget driver registers
+ * itself.
+ *
+ * Returns pointer to string with UDC controller name on success, NULL
+ * otherwise. Caller should kfree() returned string.
+ */
+char *usb_get_gadget_udc_name(void)
+{
+ struct usb_udc *udc;
+ char *name = NULL;
+
+ /* For now we take the first available UDC */
+ mutex_lock(&udc_lock);
+ list_for_each_entry(udc, &udc_list, list) {
+ if (!udc->driver) {
+ name = kstrdup(udc->gadget->name, GFP_KERNEL);
+ break;
+ }
+ }
+ mutex_unlock(&udc_lock);
+ return name;
+}
+EXPORT_SYMBOL_GPL(usb_get_gadget_udc_name);
+
+/**
+ * usb_add_gadget_udc - adds a new gadget to the udc class driver list
+ * @parent: the parent device to this udc. Usually the controller
+ * driver's device.
+ * @gadget: the gadget to be added to the list
+ *
+ * Returns zero on success, negative errno otherwise.
+ */
+int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget)
+{
+ return usb_add_gadget_udc_release(parent, gadget, NULL);
+}
+EXPORT_SYMBOL_GPL(usb_add_gadget_udc);
+
+static void usb_gadget_remove_driver(struct usb_udc *udc)
+{
+ dev_dbg(&udc->dev, "unregistering UDC driver [%s]\n",
+ udc->driver->function);
+
+ kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
+
+ usb_gadget_disconnect(udc->gadget);
+ udc->driver->disconnect(udc->gadget);
+ udc->driver->unbind(udc->gadget);
+ usb_gadget_udc_stop(udc);
+
+ udc->driver = NULL;
+ udc->dev.driver = NULL;
+ udc->gadget->dev.driver = NULL;
+}
+
+/**
+ * usb_del_gadget_udc - deletes @udc from udc_list
+ * @gadget: the gadget to be removed.
+ *
+ * This, will call usb_gadget_unregister_driver() if
+ * the @udc is still busy.
+ */
+void usb_del_gadget_udc(struct usb_gadget *gadget)
+{
+ struct usb_udc *udc = gadget->udc;
+
+ if (!udc)
+ return;
+
+ dev_vdbg(gadget->dev.parent, "unregistering gadget\n");
+
+ mutex_lock(&udc_lock);
+ list_del(&udc->list);
+
+ if (udc->driver) {
+ struct usb_gadget_driver *driver = udc->driver;
+
+ usb_gadget_remove_driver(udc);
+ list_add(&driver->pending, &gadget_driver_pending_list);
+ }
+ mutex_unlock(&udc_lock);
+
+ kobject_uevent(&udc->dev.kobj, KOBJ_REMOVE);
+ flush_work(&gadget->work);
+ device_unregister(&udc->dev);
+ device_unregister(&gadget->dev);
+}
+EXPORT_SYMBOL_GPL(usb_del_gadget_udc);
+
+/* ------------------------------------------------------------------------- */
+
+static int udc_bind_to_driver(struct usb_udc *udc, struct usb_gadget_driver *driver)
+{
+ int ret;
+
+ dev_dbg(&udc->dev, "registering UDC driver [%s]\n",
+ driver->function);
+
+ udc->driver = driver;
+ udc->dev.driver = &driver->driver;
+ udc->gadget->dev.driver = &driver->driver;
+
+ ret = driver->bind(udc->gadget, driver);
+ if (ret)
+ goto err1;
+ ret = usb_gadget_udc_start(udc);
+ if (ret) {
+ driver->unbind(udc->gadget);
+ goto err1;
+ }
+ usb_udc_connect_control(udc);
+
+ kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
+ return 0;
+err1:
+ if (ret != -EISNAM)
+ dev_err(&udc->dev, "failed to start %s: %d\n",
+ udc->driver->function, ret);
+ udc->driver = NULL;
+ udc->dev.driver = NULL;
+ udc->gadget->dev.driver = NULL;
+ return ret;
+}
+
+int usb_gadget_probe_driver(struct usb_gadget_driver *driver)
+{
+ struct usb_udc *udc = NULL;
+ int ret = -ENODEV;
+
+ if (!driver || !driver->bind || !driver->setup)
+ return -EINVAL;
+
+ mutex_lock(&udc_lock);
+ if (driver->udc_name) {
+ list_for_each_entry(udc, &udc_list, list) {
+ ret = strcmp(driver->udc_name, dev_name(&udc->dev));
+ if (!ret)
+ break;
+ }
+ if (!ret && !udc->driver)
+ goto found;
+ } else {
+ list_for_each_entry(udc, &udc_list, list) {
+ /* For now we take the first one */
+ if (!udc->driver)
+ goto found;
+ }
+ }
+
+ if (!driver->match_existing_only) {
+ list_add_tail(&driver->pending, &gadget_driver_pending_list);
+ pr_info("udc-core: couldn't find an available UDC - added [%s] to list of pending drivers\n",
+ driver->function);
+ ret = 0;
+ }
+
+ mutex_unlock(&udc_lock);
+ return ret;
+found:
+ ret = udc_bind_to_driver(udc, driver);
+ mutex_unlock(&udc_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_probe_driver);
+
+int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+{
+ struct usb_udc *udc = NULL;
+ int ret = -ENODEV;
+
+ if (!driver || !driver->unbind)
+ return -EINVAL;
+
+ mutex_lock(&udc_lock);
+ list_for_each_entry(udc, &udc_list, list)
+ if (udc->driver == driver) {
+ usb_gadget_remove_driver(udc);
+ usb_gadget_set_state(udc->gadget,
+ USB_STATE_NOTATTACHED);
+ ret = 0;
+ break;
+ }
+
+ if (ret) {
+ list_del(&driver->pending);
+ ret = 0;
+ }
+ mutex_unlock(&udc_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_unregister_driver);
+
+/* ------------------------------------------------------------------------- */
+
+static ssize_t usb_udc_srp_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t n)
+{
+ struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
+
+ if (sysfs_streq(buf, "1"))
+ usb_gadget_wakeup(udc->gadget);
+
+ return n;
+}
+static DEVICE_ATTR(srp, S_IWUSR, NULL, usb_udc_srp_store);
+
+static ssize_t usb_udc_softconn_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t n)
+{
+ struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
+
+ if (!udc->driver) {
+ dev_err(dev, "soft-connect without a gadget driver\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (sysfs_streq(buf, "connect")) {
+ usb_gadget_udc_start(udc);
+ usb_gadget_connect(udc->gadget);
+ } else if (sysfs_streq(buf, "disconnect")) {
+ usb_gadget_disconnect(udc->gadget);
+ udc->driver->disconnect(udc->gadget);
+ usb_gadget_udc_stop(udc);
+ } else {
+ dev_err(dev, "unsupported command '%s'\n", buf);
+ return -EINVAL;
+ }
+
+ return n;
+}
+static DEVICE_ATTR(soft_connect, S_IWUSR, NULL, usb_udc_softconn_store);
+
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
+ struct usb_gadget *gadget = udc->gadget;
+
+ return sprintf(buf, "%s\n", usb_state_string(gadget->state));
+}
+static DEVICE_ATTR_RO(state);
+
+#define USB_UDC_SPEED_ATTR(name, param) \
+ssize_t name##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct usb_udc *udc = container_of(dev, struct usb_udc, dev); \
+ return snprintf(buf, PAGE_SIZE, "%s\n", \
+ usb_speed_string(udc->gadget->param)); \
+} \
+static DEVICE_ATTR_RO(name)
+
+static USB_UDC_SPEED_ATTR(current_speed, speed);
+static USB_UDC_SPEED_ATTR(maximum_speed, max_speed);
+
+#define USB_UDC_ATTR(name) \
+ssize_t name##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct usb_udc *udc = container_of(dev, struct usb_udc, dev); \
+ struct usb_gadget *gadget = udc->gadget; \
+ \
+ return snprintf(buf, PAGE_SIZE, "%d\n", gadget->name); \
+} \
+static DEVICE_ATTR_RO(name)
+
+static USB_UDC_ATTR(is_otg);
+static USB_UDC_ATTR(is_a_peripheral);
+static USB_UDC_ATTR(b_hnp_enable);
+static USB_UDC_ATTR(a_hnp_support);
+static USB_UDC_ATTR(a_alt_hnp_support);
+static USB_UDC_ATTR(is_selfpowered);
+
+static struct attribute *usb_udc_attrs[] = {
+ &dev_attr_srp.attr,
+ &dev_attr_soft_connect.attr,
+ &dev_attr_state.attr,
+ &dev_attr_current_speed.attr,
+ &dev_attr_maximum_speed.attr,
+
+ &dev_attr_is_otg.attr,
+ &dev_attr_is_a_peripheral.attr,
+ &dev_attr_b_hnp_enable.attr,
+ &dev_attr_a_hnp_support.attr,
+ &dev_attr_a_alt_hnp_support.attr,
+ &dev_attr_is_selfpowered.attr,
+ NULL,
+};
+
+static const struct attribute_group usb_udc_attr_group = {
+ .attrs = usb_udc_attrs,
+};
+
+static const struct attribute_group *usb_udc_attr_groups[] = {
+ &usb_udc_attr_group,
+ NULL,
+};
+
+static int usb_udc_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
+ int ret;
+
+ ret = add_uevent_var(env, "USB_UDC_NAME=%s", udc->gadget->name);
+ if (ret) {
+ dev_err(dev, "failed to add uevent USB_UDC_NAME\n");
+ return ret;
+ }
+
+ if (udc->driver) {
+ ret = add_uevent_var(env, "USB_UDC_DRIVER=%s",
+ udc->driver->function);
+ if (ret) {
+ dev_err(dev, "failed to add uevent USB_UDC_DRIVER\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int __init usb_udc_init(void)
+{
+ udc_class = class_create(THIS_MODULE, "udc");
+ if (IS_ERR(udc_class)) {
+ pr_err("failed to create udc class --> %ld\n",
+ PTR_ERR(udc_class));
+ return PTR_ERR(udc_class);
+ }
+
+ udc_class->dev_uevent = usb_udc_uevent;
+ return 0;
+}
+subsys_initcall(usb_udc_init);
+
+static void __exit usb_udc_exit(void)
+{
+ class_destroy(udc_class);
+}
+module_exit(usb_udc_exit);
+
+MODULE_DESCRIPTION("UDC Framework");
+MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index dde44450d..77d07904f 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -647,12 +647,10 @@ static int dummy_disable(struct usb_ep *_ep)
static struct usb_request *dummy_alloc_request(struct usb_ep *_ep,
gfp_t mem_flags)
{
- struct dummy_ep *ep;
struct dummy_request *req;
if (!_ep)
return NULL;
- ep = usb_ep_to_dummy_ep(_ep);
req = kzalloc(sizeof(*req), mem_flags);
if (!req)
@@ -2444,9 +2442,6 @@ static int dummy_start(struct usb_hcd *hcd)
static void dummy_stop(struct usb_hcd *hcd)
{
- struct dummy *dum;
-
- dum = hcd_to_dummy_hcd(hcd)->dum;
device_remove_file(dummy_dev(hcd_to_dummy_hcd(hcd)), &dev_attr_urbs);
dev_info(dummy_dev(hcd_to_dummy_hcd(hcd)), "stopped\n");
}
diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c
index cf8819a5c..8bb011ea7 100644
--- a/drivers/usb/gadget/udc/fsl_qe_udc.c
+++ b/drivers/usb/gadget/udc/fsl_qe_udc.c
@@ -1878,11 +1878,8 @@ static int qe_get_frame(struct usb_gadget *gadget)
tmp = in_be16(&udc->usb_param->frame_n);
if (tmp & 0x8000)
- tmp = tmp & 0x07ff;
- else
- tmp = -EINVAL;
-
- return (int)tmp;
+ return tmp & 0x07ff;
+ return -EINVAL;
}
static int fsl_qe_start(struct usb_gadget *gadget,
diff --git a/drivers/usb/gadget/udc/m66592-udc.c b/drivers/usb/gadget/udc/m66592-udc.c
index b1cfa96cc..6e977dc22 100644
--- a/drivers/usb/gadget/udc/m66592-udc.c
+++ b/drivers/usb/gadget/udc/m66592-udc.c
@@ -1199,8 +1199,6 @@ static irqreturn_t m66592_irq(int irq, void *_m66592)
struct m66592 *m66592 = _m66592;
u16 intsts0;
u16 intenb0;
- u16 brdysts, nrdysts, bempsts;
- u16 brdyenb, nrdyenb, bempenb;
u16 savepipe;
u16 mask0;
@@ -1224,12 +1222,10 @@ static irqreturn_t m66592_irq(int irq, void *_m66592)
mask0 = intsts0 & intenb0;
if (mask0) {
- brdysts = m66592_read(m66592, M66592_BRDYSTS);
- nrdysts = m66592_read(m66592, M66592_NRDYSTS);
- bempsts = m66592_read(m66592, M66592_BEMPSTS);
- brdyenb = m66592_read(m66592, M66592_BRDYENB);
- nrdyenb = m66592_read(m66592, M66592_NRDYENB);
- bempenb = m66592_read(m66592, M66592_BEMPENB);
+ u16 brdysts = m66592_read(m66592, M66592_BRDYSTS);
+ u16 bempsts = m66592_read(m66592, M66592_BEMPSTS);
+ u16 brdyenb = m66592_read(m66592, M66592_BRDYENB);
+ u16 bempenb = m66592_read(m66592, M66592_BEMPENB);
if (mask0 & M66592_VBINT) {
m66592_write(m66592, 0xffff & ~M66592_VBINT,
@@ -1408,28 +1404,20 @@ static int m66592_dequeue(struct usb_ep *_ep, struct usb_request *_req)
static int m66592_set_halt(struct usb_ep *_ep, int value)
{
- struct m66592_ep *ep;
- struct m66592_request *req;
+ struct m66592_ep *ep = container_of(_ep, struct m66592_ep, ep);
unsigned long flags;
int ret = 0;
- ep = container_of(_ep, struct m66592_ep, ep);
- req = list_entry(ep->queue.next, struct m66592_request, queue);
-
spin_lock_irqsave(&ep->m66592->lock, flags);
if (!list_empty(&ep->queue)) {
ret = -EAGAIN;
- goto out;
- }
- if (value) {
+ } else if (value) {
ep->busy = 1;
pipe_stall(ep->m66592, ep->pipenum);
} else {
ep->busy = 0;
pipe_stop(ep->m66592, ep->pipenum);
}
-
-out:
spin_unlock_irqrestore(&ep->m66592->lock, flags);
return ret;
}
diff --git a/drivers/usb/gadget/udc/mv_u3d_core.c b/drivers/usb/gadget/udc/mv_u3d_core.c
index dafe74eb9..b9e19a591 100644
--- a/drivers/usb/gadget/udc/mv_u3d_core.c
+++ b/drivers/usb/gadget/udc/mv_u3d_core.c
@@ -119,18 +119,14 @@ static int mv_u3d_process_ep_req(struct mv_u3d *u3d, int index,
struct mv_u3d_req *curr_req)
{
struct mv_u3d_trb *curr_trb;
- dma_addr_t cur_deq_lo;
- struct mv_u3d_ep_context *curr_ep_context;
- int trb_complete, actual, remaining_length = 0;
+ int actual, remaining_length = 0;
int direction, ep_num;
int retval = 0;
u32 tmp, status, length;
- curr_ep_context = &u3d->ep_context[index];
direction = index % 2;
ep_num = index / 2;
- trb_complete = 0;
actual = curr_req->req.length;
while (!list_empty(&curr_req->trb_list)) {
@@ -143,15 +139,10 @@ static int mv_u3d_process_ep_req(struct mv_u3d *u3d, int index,
}
curr_trb->trb_hw->ctrl.own = 0;
- if (direction == MV_U3D_EP_DIR_OUT) {
+ if (direction == MV_U3D_EP_DIR_OUT)
tmp = ioread32(&u3d->vuc_regs->rxst[ep_num].statuslo);
- cur_deq_lo =
- ioread32(&u3d->vuc_regs->rxst[ep_num].curdeqlo);
- } else {
+ else
tmp = ioread32(&u3d->vuc_regs->txst[ep_num].statuslo);
- cur_deq_lo =
- ioread32(&u3d->vuc_regs->txst[ep_num].curdeqlo);
- }
status = tmp >> MV_U3D_XFERSTATUS_COMPLETE_SHIFT;
length = tmp & MV_U3D_XFERSTATUS_TRB_LENGTH_MASK;
@@ -527,7 +518,6 @@ static int mv_u3d_ep_enable(struct usb_ep *_ep,
{
struct mv_u3d *u3d;
struct mv_u3d_ep *ep;
- struct mv_u3d_ep_context *ep_context;
u16 max = 0;
unsigned maxburst = 0;
u32 epxcr, direction;
@@ -548,9 +538,6 @@ static int mv_u3d_ep_enable(struct usb_ep *_ep,
_ep->maxburst = 1;
maxburst = _ep->maxburst;
- /* Get the endpoint context address */
- ep_context = (struct mv_u3d_ep_context *)ep->ep_context;
-
/* Set the max burst size */
switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
case USB_ENDPOINT_XFER_BULK:
@@ -633,7 +620,6 @@ static int mv_u3d_ep_disable(struct usb_ep *_ep)
{
struct mv_u3d *u3d;
struct mv_u3d_ep *ep;
- struct mv_u3d_ep_context *ep_context;
u32 epxcr, direction;
unsigned long flags;
@@ -646,9 +632,6 @@ static int mv_u3d_ep_disable(struct usb_ep *_ep)
u3d = ep->u3d;
- /* Get the endpoint context address */
- ep_context = ep->ep_context;
-
direction = mv_u3d_ep_dir(ep);
/* nuke all pending requests (does flush) */
diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
index 81b6229c7..ce73b3552 100644
--- a/drivers/usb/gadget/udc/mv_udc_core.c
+++ b/drivers/usb/gadget/udc/mv_udc_core.c
@@ -129,7 +129,7 @@ static int process_ep_req(struct mv_udc *udc, int index,
{
struct mv_dtd *curr_dtd;
struct mv_dqh *curr_dqh;
- int td_complete, actual, remaining_length;
+ int actual, remaining_length;
int i, direction;
int retval = 0;
u32 errors;
@@ -139,7 +139,6 @@ static int process_ep_req(struct mv_udc *udc, int index,
direction = index % 2;
curr_dtd = curr_req->head;
- td_complete = 0;
actual = curr_req->req.length;
for (i = 0; i < curr_req->dtd_count; i++) {
@@ -412,11 +411,8 @@ static int req_to_dtd(struct mv_req *req)
unsigned count;
int is_last, is_first = 1;
struct mv_dtd *dtd, *last_dtd = NULL;
- struct mv_udc *udc;
dma_addr_t dma;
- udc = req->ep->udc;
-
do {
dtd = build_dtd(req, &count, &dma, &is_last);
if (dtd == NULL)
@@ -567,7 +563,7 @@ static int mv_ep_disable(struct usb_ep *_ep)
struct mv_udc *udc;
struct mv_ep *ep;
struct mv_dqh *dqh;
- u32 bit_pos, epctrlx, direction;
+ u32 epctrlx, direction;
unsigned long flags;
ep = container_of(_ep, struct mv_ep, ep);
@@ -582,7 +578,6 @@ static int mv_ep_disable(struct usb_ep *_ep)
spin_lock_irqsave(&udc->lock, flags);
direction = ep_dir(ep);
- bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
/* Reset the max packet length and the interrupt on Setup */
dqh->max_packet_length = 0;
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index 18f5ebd44..7c6113432 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -329,12 +329,10 @@ static int net2272_disable(struct usb_ep *_ep)
static struct usb_request *
net2272_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
{
- struct net2272_ep *ep;
struct net2272_request *req;
if (!_ep)
return NULL;
- ep = container_of(_ep, struct net2272_ep, ep);
req = kzalloc(sizeof(*req), gfp_flags);
if (!req)
@@ -348,10 +346,8 @@ net2272_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
static void
net2272_free_request(struct usb_ep *_ep, struct usb_request *_req)
{
- struct net2272_ep *ep;
struct net2272_request *req;
- ep = container_of(_ep, struct net2272_ep, ep);
if (!_ep || !_req)
return;
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index c894b94b2..614ab951a 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -211,7 +211,7 @@ net2280_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
goto print_err;
}
- if (dev->quirks & PLX_SUPERSPEED) {
+ if (dev->quirks & PLX_PCIE) {
if ((desc->bEndpointAddress & 0x0f) >= 0x0c) {
ret = -EDOM;
goto print_err;
@@ -245,7 +245,7 @@ net2280_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
/* set type, direction, address; reset fifo counters */
writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat);
- if ((dev->quirks & PLX_SUPERSPEED) && dev->enhanced_mode) {
+ if ((dev->quirks & PLX_PCIE) && dev->enhanced_mode) {
tmp = readl(&ep->cfg->ep_cfg);
/* If USB ep number doesn't match hardware ep number */
if ((tmp & 0xf) != usb_endpoint_num(desc)) {
@@ -316,7 +316,7 @@ net2280_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
BIT(CLEAR_NAK_OUT_PACKETS_MODE), &ep->regs->ep_rsp);
}
- if (dev->quirks & PLX_SUPERSPEED)
+ if (dev->quirks & PLX_PCIE)
ep_clear_seqnum(ep);
writel(tmp, &ep->cfg->ep_cfg);
@@ -527,7 +527,7 @@ static int net2280_disable(struct usb_ep *_ep)
spin_lock_irqsave(&ep->dev->lock, flags);
nuke(ep);
- if (ep->dev->quirks & PLX_SUPERSPEED)
+ if (ep->dev->quirks & PLX_PCIE)
ep_reset_338x(ep->dev->regs, ep);
else
ep_reset_228x(ep->dev->regs, ep);
@@ -862,7 +862,7 @@ static void start_queue(struct net2280_ep *ep, u32 dmactl, u32 td_dma)
writel(readl(&dma->dmastat), &dma->dmastat);
writel(td_dma, &dma->dmadesc);
- if (ep->dev->quirks & PLX_SUPERSPEED)
+ if (ep->dev->quirks & PLX_PCIE)
dmactl |= BIT(DMA_REQUEST_OUTSTANDING);
writel(dmactl, &dma->dmactl);
@@ -1046,7 +1046,7 @@ net2280_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
/* kickstart this i/o queue? */
if (list_empty(&ep->queue) && !ep->stopped &&
- !((dev->quirks & PLX_SUPERSPEED) && ep->dma &&
+ !((dev->quirks & PLX_PCIE) && ep->dma &&
(readl(&ep->regs->ep_rsp) & BIT(CLEAR_ENDPOINT_HALT)))) {
/* use DMA if the endpoint supports it, else pio */
@@ -1169,7 +1169,7 @@ static void scan_dma_completions(struct net2280_ep *ep)
break;
} else if (!ep->is_in &&
(req->req.length % ep->ep.maxpacket) &&
- !(ep->dev->quirks & PLX_SUPERSPEED)) {
+ !(ep->dev->quirks & PLX_PCIE)) {
tmp = readl(&ep->regs->ep_stat);
/* AVOID TROUBLE HERE by not issuing short reads from
@@ -1367,7 +1367,7 @@ net2280_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
ep->wedged = 1;
} else {
clear_halt(ep);
- if (ep->dev->quirks & PLX_SUPERSPEED &&
+ if (ep->dev->quirks & PLX_PCIE &&
!list_empty(&ep->queue) && ep->td_dma)
restart_dma(ep);
ep->wedged = 0;
@@ -2394,7 +2394,7 @@ static int net2280_start(struct usb_gadget *_gadget,
*/
net2280_led_active(dev, 1);
- if ((dev->quirks & PLX_SUPERSPEED) && !dev->bug7734_patched)
+ if ((dev->quirks & PLX_PCIE) && !dev->bug7734_patched)
defect7374_enable_data_eps_zero(dev);
ep0_start(dev);
@@ -3063,7 +3063,7 @@ static void handle_stat0_irqs(struct net2280 *dev, u32 stat)
}
ep->stopped = 0;
dev->protocol_stall = 0;
- if (!(dev->quirks & PLX_SUPERSPEED)) {
+ if (!(dev->quirks & PLX_PCIE)) {
if (ep->dev->quirks & PLX_2280)
tmp = BIT(FIFO_OVERFLOW) |
BIT(FIFO_UNDERFLOW);
@@ -3090,7 +3090,7 @@ static void handle_stat0_irqs(struct net2280 *dev, u32 stat)
cpu_to_le32s(&u.raw[0]);
cpu_to_le32s(&u.raw[1]);
- if ((dev->quirks & PLX_SUPERSPEED) && !dev->bug7734_patched)
+ if ((dev->quirks & PLX_PCIE) && !dev->bug7734_patched)
defect7374_workaround(dev, u.r);
tmp = 0;
@@ -3173,7 +3173,7 @@ static void handle_stat0_irqs(struct net2280 *dev, u32 stat)
} else {
ep_vdbg(dev, "%s clear halt\n", e->ep.name);
clear_halt(e);
- if ((ep->dev->quirks & PLX_SUPERSPEED) &&
+ if ((ep->dev->quirks & PLX_PCIE) &&
!list_empty(&e->queue) && e->td_dma)
restart_dma(e);
}
@@ -3195,7 +3195,7 @@ static void handle_stat0_irqs(struct net2280 *dev, u32 stat)
if (e->ep.name == ep0name)
goto do_stall;
set_halt(e);
- if ((dev->quirks & PLX_SUPERSPEED) && e->dma)
+ if ((dev->quirks & PLX_PCIE) && e->dma)
abort_dma(e);
allow_status(ep);
ep_vdbg(dev, "%s set halt\n", ep->ep.name);
@@ -3234,7 +3234,7 @@ do_stall:
#undef w_length
next_endpoints:
- if ((dev->quirks & PLX_SUPERSPEED) && dev->enhanced_mode) {
+ if ((dev->quirks & PLX_PCIE) && dev->enhanced_mode) {
u32 mask = (BIT(ENDPOINT_0_INTERRUPT) |
USB3380_IRQSTAT0_EP_INTR_MASK_IN |
USB3380_IRQSTAT0_EP_INTR_MASK_OUT);
@@ -3399,7 +3399,7 @@ __acquires(dev->lock)
writel(tmp, &dma->dmastat);
/* dma sync*/
- if (dev->quirks & PLX_SUPERSPEED) {
+ if (dev->quirks & PLX_PCIE) {
u32 r_dmacount = readl(&dma->dmacount);
if (!ep->is_in && (r_dmacount & 0x00FFFFFF) &&
(tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT)))
@@ -3468,7 +3468,7 @@ static irqreturn_t net2280_irq(int irq, void *_dev)
/* control requests and PIO */
handle_stat0_irqs(dev, readl(&dev->regs->irqstat0));
- if (dev->quirks & PLX_SUPERSPEED) {
+ if (dev->quirks & PLX_PCIE) {
/* re-enable interrupt to trigger any possible new interrupt */
u32 pciirqenb1 = readl(&dev->regs->pciirqenb1);
writel(pciirqenb1 & 0x7FFFFFFF, &dev->regs->pciirqenb1);
@@ -3513,7 +3513,7 @@ static void net2280_remove(struct pci_dev *pdev)
}
if (dev->got_irq)
free_irq(pdev->irq, dev);
- if (dev->quirks & PLX_SUPERSPEED)
+ if (dev->quirks & PLX_PCIE)
pci_disable_msi(pdev);
if (dev->regs)
iounmap(dev->regs);
@@ -3593,7 +3593,7 @@ static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev->dep = (struct net2280_dep_regs __iomem *) (base + 0x0200);
dev->epregs = (struct net2280_ep_regs __iomem *) (base + 0x0300);
- if (dev->quirks & PLX_SUPERSPEED) {
+ if (dev->quirks & PLX_PCIE) {
u32 fsmvalue;
u32 usbstat;
dev->usb_ext = (struct usb338x_usb_ext_regs __iomem *)
@@ -3637,7 +3637,7 @@ static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto done;
}
- if (dev->quirks & PLX_SUPERSPEED)
+ if (dev->quirks & PLX_PCIE)
if (pci_enable_msi(pdev))
ep_err(dev, "Failed to enable MSI mode\n");
@@ -3755,10 +3755,19 @@ static const struct pci_device_id pci_ids[] = { {
.class = PCI_CLASS_SERIAL_USB_DEVICE,
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_PLX,
+ .device = 0x2380,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = PLX_PCIE,
+ },
+ {
+ .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
+ .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_PLX,
.device = 0x3380,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
- .driver_data = PLX_SUPERSPEED,
+ .driver_data = PLX_PCIE | PLX_SUPERSPEED,
},
{
.class = PCI_CLASS_SERIAL_USB_DEVICE,
@@ -3767,7 +3776,7 @@ static const struct pci_device_id pci_ids[] = { {
.device = 0x3382,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
- .driver_data = PLX_SUPERSPEED,
+ .driver_data = PLX_PCIE | PLX_SUPERSPEED,
},
{ /* end: all zeroes */ }
};
diff --git a/drivers/usb/gadget/udc/net2280.h b/drivers/usb/gadget/udc/net2280.h
index 0d32052bf..2736a9575 100644
--- a/drivers/usb/gadget/udc/net2280.h
+++ b/drivers/usb/gadget/udc/net2280.h
@@ -47,6 +47,7 @@ set_idx_reg(struct net2280_regs __iomem *regs, u32 index, u32 value)
#define PLX_LEGACY BIT(0)
#define PLX_2280 BIT(1)
#define PLX_SUPERSPEED BIT(2)
+#define PLX_PCIE BIT(3)
#define REG_DIAG 0x0
#define RETRY_COUNTER 16
diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
index 717514298..a97da645c 100644
--- a/drivers/usb/gadget/udc/pch_udc.c
+++ b/drivers/usb/gadget/udc/pch_udc.c
@@ -1984,9 +1984,8 @@ static int pch_udc_pcd_set_halt(struct usb_ep *usbep, int halt)
if (ep->num == PCH_UDC_EP0)
ep->dev->stall = 1;
pch_udc_ep_set_stall(ep);
- pch_udc_enable_ep_interrupts(ep->dev,
- PCH_UDC_EPINT(ep->in,
- ep->num));
+ pch_udc_enable_ep_interrupts(
+ ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
} else {
pch_udc_ep_clear_stall(ep);
}
@@ -2451,16 +2450,11 @@ static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
*/
static void pch_udc_postsvc_epinters(struct pch_udc_dev *dev, int ep_num)
{
- struct pch_udc_ep *ep;
- struct pch_udc_request *req;
-
- ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
- if (!list_empty(&ep->queue)) {
- req = list_entry(ep->queue.next, struct pch_udc_request, queue);
- pch_udc_enable_ep_interrupts(ep->dev,
- PCH_UDC_EPINT(ep->in, ep->num));
- pch_udc_ep_clear_nak(ep);
- }
+ struct pch_udc_ep *ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
+ if (list_empty(&ep->queue))
+ return;
+ pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
+ pch_udc_ep_clear_nak(ep);
}
/**
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c
index 001a3b74a..ad140aa00 100644
--- a/drivers/usb/gadget/udc/pxa27x_udc.c
+++ b/drivers/usb/gadget/udc/pxa27x_udc.c
@@ -1825,13 +1825,10 @@ fail:
* Disables all udc endpoints (even control endpoint), report disconnect to
* the gadget user.
*/
-static void stop_activity(struct pxa_udc *udc, struct usb_gadget_driver *driver)
+static void stop_activity(struct pxa_udc *udc)
{
int i;
- /* don't disconnect drivers more than once */
- if (udc->gadget.speed == USB_SPEED_UNKNOWN)
- driver = NULL;
udc->gadget.speed = USB_SPEED_UNKNOWN;
for (i = 0; i < NR_USB_ENDPOINTS; i++)
@@ -1848,7 +1845,7 @@ static int pxa27x_udc_stop(struct usb_gadget *g)
{
struct pxa_udc *udc = to_pxa(g);
- stop_activity(udc, NULL);
+ stop_activity(udc);
udc_disable(udc);
udc->driver = NULL;
@@ -2296,7 +2293,7 @@ static void irq_udc_reset(struct pxa_udc *udc)
if ((udccr & UDCCR_UDA) == 0) {
dev_dbg(udc->dev, "USB reset start\n");
- stop_activity(udc, udc->driver);
+ stop_activity(udc);
}
udc->gadget.speed = USB_SPEED_FULL;
memset(&udc->stats, 0, sizeof udc->stats);
diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
index 8b300e6da..f2c886209 100644
--- a/drivers/usb/gadget/udc/r8a66597-udc.c
+++ b/drivers/usb/gadget/udc/r8a66597-udc.c
@@ -1464,8 +1464,6 @@ static irqreturn_t r8a66597_irq(int irq, void *_r8a66597)
struct r8a66597 *r8a66597 = _r8a66597;
u16 intsts0;
u16 intenb0;
- u16 brdysts, nrdysts, bempsts;
- u16 brdyenb, nrdyenb, bempenb;
u16 savepipe;
u16 mask0;
@@ -1481,12 +1479,10 @@ static irqreturn_t r8a66597_irq(int irq, void *_r8a66597)
mask0 = intsts0 & intenb0;
if (mask0) {
- brdysts = r8a66597_read(r8a66597, BRDYSTS);
- nrdysts = r8a66597_read(r8a66597, NRDYSTS);
- bempsts = r8a66597_read(r8a66597, BEMPSTS);
- brdyenb = r8a66597_read(r8a66597, BRDYENB);
- nrdyenb = r8a66597_read(r8a66597, NRDYENB);
- bempenb = r8a66597_read(r8a66597, BEMPENB);
+ u16 brdysts = r8a66597_read(r8a66597, BRDYSTS);
+ u16 bempsts = r8a66597_read(r8a66597, BEMPSTS);
+ u16 brdyenb = r8a66597_read(r8a66597, BRDYENB);
+ u16 bempenb = r8a66597_read(r8a66597, BEMPENB);
if (mask0 & VBINT) {
r8a66597_write(r8a66597, 0xffff & ~VBINT,
@@ -1658,20 +1654,14 @@ static int r8a66597_dequeue(struct usb_ep *_ep, struct usb_request *_req)
static int r8a66597_set_halt(struct usb_ep *_ep, int value)
{
- struct r8a66597_ep *ep;
- struct r8a66597_request *req;
+ struct r8a66597_ep *ep = container_of(_ep, struct r8a66597_ep, ep);
unsigned long flags;
int ret = 0;
- ep = container_of(_ep, struct r8a66597_ep, ep);
- req = get_request_from_ep(ep);
-
spin_lock_irqsave(&ep->r8a66597->lock, flags);
if (!list_empty(&ep->queue)) {
ret = -EAGAIN;
- goto out;
- }
- if (value) {
+ } else if (value) {
ep->busy = 1;
pipe_stall(ep->r8a66597, ep->pipenum);
} else {
@@ -1679,8 +1669,6 @@ static int r8a66597_set_halt(struct usb_ep *_ep, int value)
ep->wedge = 0;
pipe_stop(ep->r8a66597, ep->pipenum);
}
-
-out:
spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
return ret;
}
diff --git a/drivers/usb/gadget/udc/trace.c b/drivers/usb/gadget/udc/trace.c
new file mode 100644
index 000000000..8c551ab91
--- /dev/null
+++ b/drivers/usb/gadget/udc/trace.c
@@ -0,0 +1,18 @@
+/**
+ * trace.c - USB Gadget Framework Trace Support
+ *
+ * Copyright (C) 2016 Intel Corporation
+ * Author: Felipe Balbi <felipe.balbi@linux.intel.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/drivers/usb/gadget/udc/trace.h b/drivers/usb/gadget/udc/trace.h
new file mode 100644
index 000000000..da29874b5
--- /dev/null
+++ b/drivers/usb/gadget/udc/trace.h
@@ -0,0 +1,298 @@
+/**
+ * udc.c - Core UDC Framework
+ *
+ * Copyright (C) 2016 Intel Corporation
+ * Author: Felipe Balbi <felipe.balbi@linux.intel.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM gadget
+
+#if !defined(__UDC_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __UDC_TRACE_H
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include <asm/byteorder.h>
+#include <linux/usb/gadget.h>
+
+DECLARE_EVENT_CLASS(udc_log_gadget,
+ TP_PROTO(struct usb_gadget *g, int ret),
+ TP_ARGS(g, ret),
+ TP_STRUCT__entry(
+ __field(enum usb_device_speed, speed)
+ __field(enum usb_device_speed, max_speed)
+ __field(enum usb_device_state, state)
+ __field(unsigned, mA)
+ __field(unsigned, sg_supported)
+ __field(unsigned, is_otg)
+ __field(unsigned, is_a_peripheral)
+ __field(unsigned, b_hnp_enable)
+ __field(unsigned, a_hnp_support)
+ __field(unsigned, hnp_polling_support)
+ __field(unsigned, host_request_flag)
+ __field(unsigned, quirk_ep_out_aligned_size)
+ __field(unsigned, quirk_altset_not_supp)
+ __field(unsigned, quirk_stall_not_supp)
+ __field(unsigned, quirk_zlp_not_supp)
+ __field(unsigned, is_selfpowered)
+ __field(unsigned, deactivated)
+ __field(unsigned, connected)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ __entry->speed = g->speed;
+ __entry->max_speed = g->max_speed;
+ __entry->state = g->state;
+ __entry->mA = g->mA;
+ __entry->sg_supported = g->sg_supported;
+ __entry->is_otg = g->is_otg;
+ __entry->is_a_peripheral = g->is_a_peripheral;
+ __entry->b_hnp_enable = g->b_hnp_enable;
+ __entry->a_hnp_support = g->a_hnp_support;
+ __entry->hnp_polling_support = g->hnp_polling_support;
+ __entry->host_request_flag = g->host_request_flag;
+ __entry->quirk_ep_out_aligned_size = g->quirk_ep_out_aligned_size;
+ __entry->quirk_altset_not_supp = g->quirk_altset_not_supp;
+ __entry->quirk_stall_not_supp = g->quirk_stall_not_supp;
+ __entry->quirk_zlp_not_supp = g->quirk_zlp_not_supp;
+ __entry->is_selfpowered = g->is_selfpowered;
+ __entry->deactivated = g->deactivated;
+ __entry->connected = g->connected;
+ __entry->ret = ret;
+ ),
+ TP_printk("speed %d/%d state %d %dmA [%s%s%s%s%s%s%s%s%s%s%s%s%s%s] --> %d",
+ __entry->speed, __entry->max_speed, __entry->state, __entry->mA,
+ __entry->sg_supported ? "sg:" : "",
+ __entry->is_otg ? "OTG:" : "",
+ __entry->is_a_peripheral ? "a_peripheral:" : "",
+ __entry->b_hnp_enable ? "b_hnp:" : "",
+ __entry->a_hnp_support ? "a_hnp:" : "",
+ __entry->hnp_polling_support ? "hnp_poll:" : "",
+ __entry->host_request_flag ? "hostreq:" : "",
+ __entry->quirk_ep_out_aligned_size ? "out_aligned:" : "",
+ __entry->quirk_altset_not_supp ? "no_altset:" : "",
+ __entry->quirk_stall_not_supp ? "no_stall:" : "",
+ __entry->quirk_zlp_not_supp ? "no_zlp" : "",
+ __entry->is_selfpowered ? "self-powered:" : "bus-powered:",
+ __entry->deactivated ? "deactivated:" : "activated:",
+ __entry->connected ? "connected" : "disconnected",
+ __entry->ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_frame_number,
+ TP_PROTO(struct usb_gadget *g, int ret),
+ TP_ARGS(g, ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_wakeup,
+ TP_PROTO(struct usb_gadget *g, int ret),
+ TP_ARGS(g, ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_set_selfpowered,
+ TP_PROTO(struct usb_gadget *g, int ret),
+ TP_ARGS(g, ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_clear_selfpowered,
+ TP_PROTO(struct usb_gadget *g, int ret),
+ TP_ARGS(g, ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_vbus_connect,
+ TP_PROTO(struct usb_gadget *g, int ret),
+ TP_ARGS(g, ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_vbus_draw,
+ TP_PROTO(struct usb_gadget *g, int ret),
+ TP_ARGS(g, ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_vbus_disconnect,
+ TP_PROTO(struct usb_gadget *g, int ret),
+ TP_ARGS(g, ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_connect,
+ TP_PROTO(struct usb_gadget *g, int ret),
+ TP_ARGS(g, ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_disconnect,
+ TP_PROTO(struct usb_gadget *g, int ret),
+ TP_ARGS(g, ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_deactivate,
+ TP_PROTO(struct usb_gadget *g, int ret),
+ TP_ARGS(g, ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_activate,
+ TP_PROTO(struct usb_gadget *g, int ret),
+ TP_ARGS(g, ret)
+);
+
+DECLARE_EVENT_CLASS(udc_log_ep,
+ TP_PROTO(struct usb_ep *ep, int ret),
+ TP_ARGS(ep, ret),
+ TP_STRUCT__entry(
+ __dynamic_array(char, name, UDC_TRACE_STR_MAX)
+ __field(unsigned, maxpacket)
+ __field(unsigned, maxpacket_limit)
+ __field(unsigned, max_streams)
+ __field(unsigned, mult)
+ __field(unsigned, maxburst)
+ __field(u8, address)
+ __field(bool, claimed)
+ __field(bool, enabled)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ snprintf(__get_str(name), UDC_TRACE_STR_MAX, "%s", ep->name);
+ __entry->maxpacket = ep->maxpacket;
+ __entry->maxpacket_limit = ep->maxpacket_limit;
+ __entry->max_streams = ep->max_streams;
+ __entry->mult = ep->mult;
+ __entry->maxburst = ep->maxburst;
+ __entry->address = ep->address,
+ __entry->claimed = ep->claimed;
+ __entry->enabled = ep->enabled;
+ __entry->ret = ret;
+ ),
+ TP_printk("%s: mps %d/%d streams %d mult %d burst %d addr %02x %s%s --> %d",
+ __get_str(name), __entry->maxpacket, __entry->maxpacket_limit,
+ __entry->max_streams, __entry->mult, __entry->maxburst,
+ __entry->address, __entry->claimed ? "claimed:" : "released:",
+ __entry->enabled ? "enabled" : "disabled", ret)
+);
+
+DEFINE_EVENT(udc_log_ep, usb_ep_set_maxpacket_limit,
+ TP_PROTO(struct usb_ep *ep, int ret),
+ TP_ARGS(ep, ret)
+);
+
+DEFINE_EVENT(udc_log_ep, usb_ep_enable,
+ TP_PROTO(struct usb_ep *ep, int ret),
+ TP_ARGS(ep, ret)
+);
+
+DEFINE_EVENT(udc_log_ep, usb_ep_disable,
+ TP_PROTO(struct usb_ep *ep, int ret),
+ TP_ARGS(ep, ret)
+);
+
+DEFINE_EVENT(udc_log_ep, usb_ep_set_halt,
+ TP_PROTO(struct usb_ep *ep, int ret),
+ TP_ARGS(ep, ret)
+);
+
+DEFINE_EVENT(udc_log_ep, usb_ep_clear_halt,
+ TP_PROTO(struct usb_ep *ep, int ret),
+ TP_ARGS(ep, ret)
+);
+
+DEFINE_EVENT(udc_log_ep, usb_ep_set_wedge,
+ TP_PROTO(struct usb_ep *ep, int ret),
+ TP_ARGS(ep, ret)
+);
+
+DEFINE_EVENT(udc_log_ep, usb_ep_fifo_status,
+ TP_PROTO(struct usb_ep *ep, int ret),
+ TP_ARGS(ep, ret)
+);
+
+DEFINE_EVENT(udc_log_ep, usb_ep_fifo_flush,
+ TP_PROTO(struct usb_ep *ep, int ret),
+ TP_ARGS(ep, ret)
+);
+
+DECLARE_EVENT_CLASS(udc_log_req,
+ TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret),
+ TP_ARGS(ep, req, ret),
+ TP_STRUCT__entry(
+ __dynamic_array(char, name, UDC_TRACE_STR_MAX)
+ __field(unsigned, length)
+ __field(unsigned, actual)
+ __field(unsigned, num_sgs)
+ __field(unsigned, num_mapped_sgs)
+ __field(unsigned, stream_id)
+ __field(unsigned, no_interrupt)
+ __field(unsigned, zero)
+ __field(unsigned, short_not_ok)
+ __field(int, status)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ snprintf(__get_str(name), UDC_TRACE_STR_MAX, "%s", ep->name);
+ __entry->length = req->length;
+ __entry->actual = req->actual;
+ __entry->num_sgs = req->num_sgs;
+ __entry->num_mapped_sgs = req->num_mapped_sgs;
+ __entry->stream_id = req->stream_id;
+ __entry->no_interrupt = req->no_interrupt;
+ __entry->zero = req->zero;
+ __entry->short_not_ok = req->short_not_ok;
+ __entry->status = req->status;
+ __entry->ret = ret;
+ ),
+ TP_printk("%s: length %d/%d sgs %d/%d stream %d %s%s%s status %d --> %d",
+ __get_str(name), __entry->actual, __entry->length,
+ __entry->num_mapped_sgs, __entry->num_sgs, __entry->stream_id,
+ __entry->zero ? "Z" : "z",
+ __entry->short_not_ok ? "S" : "s",
+ __entry->no_interrupt ? "i" : "I",
+ __entry->status, __entry->ret
+ )
+);
+
+DEFINE_EVENT(udc_log_req, usb_ep_alloc_request,
+ TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret),
+ TP_ARGS(ep, req, ret)
+);
+
+DEFINE_EVENT(udc_log_req, usb_ep_free_request,
+ TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret),
+ TP_ARGS(ep, req, ret)
+);
+
+DEFINE_EVENT(udc_log_req, usb_ep_queue,
+ TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret),
+ TP_ARGS(ep, req, ret)
+);
+
+DEFINE_EVENT(udc_log_req, usb_ep_dequeue,
+ TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret),
+ TP_ARGS(ep, req, ret)
+);
+
+DEFINE_EVENT(udc_log_req, usb_gadget_giveback_request,
+ TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret),
+ TP_ARGS(ep, req, ret)
+);
+
+#endif /* __UDC_TRACE_H */
+
+/* this part has to be here */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
index 1cbb0ac6b..f8bf290f1 100644
--- a/drivers/usb/gadget/udc/udc-xilinx.c
+++ b/drivers/usb/gadget/udc/udc-xilinx.c
@@ -2055,7 +2055,6 @@ static int xudc_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct resource *res;
struct xusb_udc *udc;
- struct xusb_ep *ep0;
int irq;
int ret;
u32 ier;
@@ -2119,8 +2118,6 @@ static int xudc_probe(struct platform_device *pdev)
xudc_eps_init(udc);
- ep0 = &udc->ep[0];
-
/* Set device address to 0.*/
udc->write_fn(udc->addr, XUSB_ADDRESS_OFFSET, 0);
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index d8f567480..2e710a4cc 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -180,7 +180,7 @@ config USB_EHCI_MXC
config USB_EHCI_HCD_OMAP
tristate "EHCI support for OMAP3 and later chips"
depends on ARCH_OMAP
- select NOP_USB_XCEIV
+ depends on NOP_USB_XCEIV
default y
---help---
Enables support for the on-chip EHCI controller on
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index 1757ebb47..6816b8c37 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -39,11 +39,12 @@
#define DRIVER_DESC "EHCI generic platform driver"
#define EHCI_MAX_CLKS 3
+#define EHCI_MAX_RSTS 3
#define hcd_to_ehci_priv(h) ((struct ehci_platform_priv *)hcd_to_ehci(h)->priv)
struct ehci_platform_priv {
struct clk *clks[EHCI_MAX_CLKS];
- struct reset_control *rst;
+ struct reset_control *rsts[EHCI_MAX_RSTS];
struct phy **phys;
int num_phys;
bool reset_on_resume;
@@ -149,7 +150,7 @@ static int ehci_platform_probe(struct platform_device *dev)
struct usb_ehci_pdata *pdata = dev_get_platdata(&dev->dev);
struct ehci_platform_priv *priv;
struct ehci_hcd *ehci;
- int err, irq, phy_num, clk = 0;
+ int err, irq, phy_num, clk = 0, rst;
if (usb_disabled())
return -ENODEV;
@@ -234,16 +235,20 @@ static int ehci_platform_probe(struct platform_device *dev)
}
}
- priv->rst = devm_reset_control_get_optional(&dev->dev, NULL);
- if (IS_ERR(priv->rst)) {
- err = PTR_ERR(priv->rst);
- if (err == -EPROBE_DEFER)
- goto err_put_clks;
- priv->rst = NULL;
- } else {
- err = reset_control_deassert(priv->rst);
+ for (rst = 0; rst < EHCI_MAX_RSTS; rst++) {
+ priv->rsts[rst] = devm_reset_control_get_shared_by_index(
+ &dev->dev, rst);
+ if (IS_ERR(priv->rsts[rst])) {
+ err = PTR_ERR(priv->rsts[rst]);
+ if (err == -EPROBE_DEFER)
+ goto err_reset;
+ priv->rsts[rst] = NULL;
+ break;
+ }
+
+ err = reset_control_deassert(priv->rsts[rst]);
if (err)
- goto err_put_clks;
+ goto err_reset;
}
if (pdata->big_endian_desc)
@@ -300,8 +305,8 @@ err_power:
if (pdata->power_off)
pdata->power_off(dev);
err_reset:
- if (priv->rst)
- reset_control_assert(priv->rst);
+ while (--rst >= 0)
+ reset_control_assert(priv->rsts[rst]);
err_put_clks:
while (--clk >= 0)
clk_put(priv->clks[clk]);
@@ -319,15 +324,15 @@ static int ehci_platform_remove(struct platform_device *dev)
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct usb_ehci_pdata *pdata = dev_get_platdata(&dev->dev);
struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
- int clk;
+ int clk, rst;
usb_remove_hcd(hcd);
if (pdata->power_off)
pdata->power_off(dev);
- if (priv->rst)
- reset_control_assert(priv->rst);
+ for (rst = 0; rst < EHCI_MAX_RSTS && priv->rsts[rst]; rst++)
+ reset_control_assert(priv->rsts[rst]);
for (clk = 0; clk < EHCI_MAX_CLKS && priv->clks[clk]; clk++)
clk_put(priv->clks[clk]);
diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c
index c369c29e4..2f7690092 100644
--- a/drivers/usb/host/max3421-hcd.c
+++ b/drivers/usb/host/max3421-hcd.c
@@ -1675,7 +1675,7 @@ max3421_gpout_set_value(struct usb_hcd *hcd, u8 pin_number, u8 value)
if (pin_number > 7)
return;
- mask = 1u << pin_number;
+ mask = 1u << (pin_number % 4);
idx = pin_number / 4;
if (value)
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 0449235d4..1700908b8 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -500,7 +500,6 @@ static int ohci_init (struct ohci_hcd *ohci)
setup_timer(&ohci->io_watchdog, io_watchdog_func,
(unsigned long) ohci);
- set_timer_slack(&ohci->io_watchdog, msecs_to_jiffies(20));
ohci->hcca = dma_alloc_coherent (hcd->self.controller,
sizeof(*ohci->hcca), &ohci->hcca_dma, GFP_KERNEL);
diff --git a/drivers/usb/host/ohci-platform.c b/drivers/usb/host/ohci-platform.c
index ae1c988da..898b74086 100644
--- a/drivers/usb/host/ohci-platform.c
+++ b/drivers/usb/host/ohci-platform.c
@@ -33,11 +33,12 @@
#define DRIVER_DESC "OHCI generic platform driver"
#define OHCI_MAX_CLKS 3
+#define OHCI_MAX_RESETS 2
#define hcd_to_ohci_priv(h) ((struct ohci_platform_priv *)hcd_to_ohci(h)->priv)
struct ohci_platform_priv {
struct clk *clks[OHCI_MAX_CLKS];
- struct reset_control *rst;
+ struct reset_control *resets[OHCI_MAX_RESETS];
struct phy **phys;
int num_phys;
};
@@ -117,7 +118,7 @@ static int ohci_platform_probe(struct platform_device *dev)
struct usb_ohci_pdata *pdata = dev_get_platdata(&dev->dev);
struct ohci_platform_priv *priv;
struct ohci_hcd *ohci;
- int err, irq, phy_num, clk = 0;
+ int err, irq, phy_num, clk = 0, rst = 0;
if (usb_disabled())
return -ENODEV;
@@ -195,19 +196,21 @@ static int ohci_platform_probe(struct platform_device *dev)
break;
}
}
-
- }
-
- priv->rst = devm_reset_control_get_optional(&dev->dev, NULL);
- if (IS_ERR(priv->rst)) {
- err = PTR_ERR(priv->rst);
- if (err == -EPROBE_DEFER)
- goto err_put_clks;
- priv->rst = NULL;
- } else {
- err = reset_control_deassert(priv->rst);
- if (err)
- goto err_put_clks;
+ for (rst = 0; rst < OHCI_MAX_RESETS; rst++) {
+ priv->resets[rst] =
+ devm_reset_control_get_shared_by_index(
+ &dev->dev, rst);
+ if (IS_ERR(priv->resets[rst])) {
+ err = PTR_ERR(priv->resets[rst]);
+ if (err == -EPROBE_DEFER)
+ goto err_reset;
+ priv->resets[rst] = NULL;
+ break;
+ }
+ err = reset_control_deassert(priv->resets[rst]);
+ if (err)
+ goto err_reset;
+ }
}
if (pdata->big_endian_desc)
@@ -265,8 +268,8 @@ err_power:
if (pdata->power_off)
pdata->power_off(dev);
err_reset:
- if (priv->rst)
- reset_control_assert(priv->rst);
+ while (--rst >= 0)
+ reset_control_assert(priv->resets[rst]);
err_put_clks:
while (--clk >= 0)
clk_put(priv->clks[clk]);
@@ -284,15 +287,15 @@ static int ohci_platform_remove(struct platform_device *dev)
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct usb_ohci_pdata *pdata = dev_get_platdata(&dev->dev);
struct ohci_platform_priv *priv = hcd_to_ohci_priv(hcd);
- int clk;
+ int clk, rst;
usb_remove_hcd(hcd);
if (pdata->power_off)
pdata->power_off(dev);
- if (priv->rst)
- reset_control_assert(priv->rst);
+ for (rst = 0; rst < OHCI_MAX_RESETS && priv->resets[rst]; rst++)
+ reset_control_assert(priv->resets[rst]);
for (clk = 0; clk < OHCI_MAX_CLKS && priv->clks[clk]; clk++)
clk_put(priv->clks[clk]);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index bad0d1f9a..6afe32381 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -37,7 +37,9 @@
* "All components of all Command and Transfer TRBs shall be initialized to '0'"
*/
static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
- unsigned int cycle_state, gfp_t flags)
+ unsigned int cycle_state,
+ unsigned int max_packet,
+ gfp_t flags)
{
struct xhci_segment *seg;
dma_addr_t dma;
@@ -53,6 +55,14 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
return NULL;
}
+ if (max_packet) {
+ seg->bounce_buf = kzalloc(max_packet, flags | GFP_DMA);
+ if (!seg->bounce_buf) {
+ dma_pool_free(xhci->segment_pool, seg->trbs, dma);
+ kfree(seg);
+ return NULL;
+ }
+ }
/* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
if (cycle_state == 0) {
for (i = 0; i < TRBS_PER_SEGMENT; i++)
@@ -70,6 +80,7 @@ static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
seg->trbs = NULL;
}
+ kfree(seg->bounce_buf);
kfree(seg);
}
@@ -317,11 +328,11 @@ static void xhci_initialize_ring_info(struct xhci_ring *ring,
static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
struct xhci_segment **first, struct xhci_segment **last,
unsigned int num_segs, unsigned int cycle_state,
- enum xhci_ring_type type, gfp_t flags)
+ enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
{
struct xhci_segment *prev;
- prev = xhci_segment_alloc(xhci, cycle_state, flags);
+ prev = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
if (!prev)
return -ENOMEM;
num_segs--;
@@ -330,7 +341,7 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
while (num_segs > 0) {
struct xhci_segment *next;
- next = xhci_segment_alloc(xhci, cycle_state, flags);
+ next = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
if (!next) {
prev = *first;
while (prev) {
@@ -360,7 +371,7 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
*/
static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
unsigned int num_segs, unsigned int cycle_state,
- enum xhci_ring_type type, gfp_t flags)
+ enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
{
struct xhci_ring *ring;
int ret;
@@ -370,13 +381,15 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
return NULL;
ring->num_segs = num_segs;
+ ring->bounce_buf_len = max_packet;
INIT_LIST_HEAD(&ring->td_list);
ring->type = type;
if (num_segs == 0)
return ring;
ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg,
- &ring->last_seg, num_segs, cycle_state, type, flags);
+ &ring->last_seg, num_segs, cycle_state, type,
+ max_packet, flags);
if (ret)
goto fail;
@@ -470,7 +483,8 @@ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
ring->num_segs : num_segs_needed;
ret = xhci_alloc_segments_for_ring(xhci, &first, &last,
- num_segs, ring->cycle_state, ring->type, flags);
+ num_segs, ring->cycle_state, ring->type,
+ ring->bounce_buf_len, flags);
if (ret)
return -ENOMEM;
@@ -652,7 +666,8 @@ struct xhci_ring *xhci_stream_id_to_ring(
*/
struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
unsigned int num_stream_ctxs,
- unsigned int num_streams, gfp_t mem_flags)
+ unsigned int num_streams,
+ unsigned int max_packet, gfp_t mem_flags)
{
struct xhci_stream_info *stream_info;
u32 cur_stream;
@@ -704,9 +719,11 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
* and add their segment DMA addresses to the radix tree.
* Stream 0 is reserved.
*/
+
for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
stream_info->stream_rings[cur_stream] =
- xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, mem_flags);
+ xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, max_packet,
+ mem_flags);
cur_ring = stream_info->stream_rings[cur_stream];
if (!cur_ring)
goto cleanup_rings;
@@ -1003,7 +1020,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
}
/* Allocate endpoint 0 ring */
- dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, flags);
+ dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, 0, flags);
if (!dev->eps[0].ring)
goto fail;
@@ -1434,22 +1451,6 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
return -EINVAL;
ring_type = usb_endpoint_type(&ep->desc);
- /* Set up the endpoint ring */
- virt_dev->eps[ep_index].new_ring =
- xhci_ring_alloc(xhci, 2, 1, ring_type, mem_flags);
- if (!virt_dev->eps[ep_index].new_ring) {
- /* Attempt to use the ring cache */
- if (virt_dev->num_rings_cached == 0)
- return -ENOMEM;
- virt_dev->num_rings_cached--;
- virt_dev->eps[ep_index].new_ring =
- virt_dev->ring_cache[virt_dev->num_rings_cached];
- virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
- xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
- 1, ring_type);
- }
- virt_dev->eps[ep_index].skip = false;
- ep_ring = virt_dev->eps[ep_index].new_ring;
/*
* Get values to fill the endpoint context, mostly from ep descriptor.
@@ -1479,6 +1480,23 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
if ((xhci->hci_version > 0x100) && HCC2_LEC(xhci->hcc_params2))
mult = 0;
+ /* Set up the endpoint ring */
+ virt_dev->eps[ep_index].new_ring =
+ xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags);
+ if (!virt_dev->eps[ep_index].new_ring) {
+ /* Attempt to use the ring cache */
+ if (virt_dev->num_rings_cached == 0)
+ return -ENOMEM;
+ virt_dev->num_rings_cached--;
+ virt_dev->eps[ep_index].new_ring =
+ virt_dev->ring_cache[virt_dev->num_rings_cached];
+ virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
+ xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
+ 1, ring_type);
+ }
+ virt_dev->eps[ep_index].skip = false;
+ ep_ring = virt_dev->eps[ep_index].new_ring;
+
/* Fill the endpoint context */
ep_ctx->ep_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_HI(max_esit_payload) |
EP_INTERVAL(interval) |
@@ -2409,7 +2427,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
goto fail;
/* Set up the command ring to have one segments for now. */
- xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags);
+ xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, 0, flags);
if (!xhci->cmd_ring)
goto fail;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
@@ -2454,7 +2472,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
*/
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring");
xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
- flags);
+ 0, flags);
if (!xhci->event_ring)
goto fail;
if (xhci_check_trb_in_td_math(xhci) < 0)
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 69f7fabdb..d7b0f97ab 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -388,7 +388,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
* need to have the registers polled during D3, so avoid D3cold.
*/
if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
- pdev->no_d3cold = true;
+ pci_d3cold_disable(pdev);
if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
xhci_pme_quirk(hcd);
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 1f3f981fe..ed56bf9ed 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -18,7 +18,6 @@
#include <linux/platform_device.h>
#include <linux/usb/phy.h>
#include <linux/slab.h>
-#include <linux/usb/xhci_pdriver.h>
#include <linux/acpi.h>
#include "xhci.h"
@@ -138,8 +137,6 @@ MODULE_DEVICE_TABLE(of, usb_xhci_of_match);
static int xhci_plat_probe(struct platform_device *pdev)
{
- struct device_node *node = pdev->dev.of_node;
- struct usb_xhci_pdata *pdata = dev_get_platdata(&pdev->dev);
const struct of_device_id *match;
const struct hc_driver *driver;
struct xhci_hcd *xhci;
@@ -202,7 +199,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
}
xhci = hcd_to_xhci(hcd);
- match = of_match_node(usb_xhci_of_match, node);
+ match = of_match_node(usb_xhci_of_match, pdev->dev.of_node);
if (match) {
const struct xhci_plat_priv *priv_match = match->data;
struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
@@ -223,8 +220,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
goto disable_clk;
}
- if ((node && of_property_read_bool(node, "usb3-lpm-capable")) ||
- (pdata && pdata->usb3_lpm_capable))
+ if (device_property_read_bool(&pdev->dev, "usb3-lpm-capable"))
xhci->quirks |= XHCI_LPM_SUPPORT;
if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index e262cccbc..797137e26 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -66,6 +66,7 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
+#include <linux/dma-mapping.h>
#include "xhci.h"
#include "xhci-trace.h"
#include "xhci-mtk.h"
@@ -88,36 +89,25 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
return seg->dma + (segment_offset * sizeof(*trb));
}
-/* Does this link TRB point to the first segment in a ring,
- * or was the previous TRB the last TRB on the last segment in the ERST?
- */
-static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
- struct xhci_segment *seg, union xhci_trb *trb)
+static bool trb_is_link(union xhci_trb *trb)
{
- if (ring == xhci->event_ring)
- return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
- (seg->next == xhci->event_ring->first_seg);
- else
- return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
+ return TRB_TYPE_LINK_LE32(trb->link.control);
}
-/* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
- * segment? I.e. would the updated event TRB pointer step off the end of the
- * event seg?
- */
-static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
- struct xhci_segment *seg, union xhci_trb *trb)
+static bool last_trb_on_seg(struct xhci_segment *seg, union xhci_trb *trb)
{
- if (ring == xhci->event_ring)
- return trb == &seg->trbs[TRBS_PER_SEGMENT];
- else
- return TRB_TYPE_LINK_LE32(trb->link.control);
+ return trb == &seg->trbs[TRBS_PER_SEGMENT - 1];
}
-static int enqueue_is_link_trb(struct xhci_ring *ring)
+static bool last_trb_on_ring(struct xhci_ring *ring,
+ struct xhci_segment *seg, union xhci_trb *trb)
{
- struct xhci_link_trb *link = &ring->enqueue->link;
- return TRB_TYPE_LINK_LE32(link->control);
+ return last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg);
+}
+
+static bool link_trb_toggles_cycle(union xhci_trb *trb)
+{
+ return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
}
/* Updates trb to point to the next TRB in the ring, and updates seg if the next
@@ -129,7 +119,7 @@ static void next_trb(struct xhci_hcd *xhci,
struct xhci_segment **seg,
union xhci_trb **trb)
{
- if (last_trb(xhci, ring, *seg, *trb)) {
+ if (trb_is_link(*trb)) {
*seg = (*seg)->next;
*trb = ((*seg)->trbs);
} else {
@@ -145,32 +135,29 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
ring->deq_updates++;
- /*
- * If this is not event ring, and the dequeue pointer
- * is not on a link TRB, there is one more usable TRB
- */
- if (ring->type != TYPE_EVENT &&
- !last_trb(xhci, ring, ring->deq_seg, ring->dequeue))
- ring->num_trbs_free++;
-
- do {
- /*
- * Update the dequeue pointer further if that was a link TRB or
- * we're at the end of an event ring segment (which doesn't have
- * link TRBS)
- */
- if (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) {
- if (ring->type == TYPE_EVENT &&
- last_trb_on_last_seg(xhci, ring,
- ring->deq_seg, ring->dequeue)) {
- ring->cycle_state ^= 1;
- }
- ring->deq_seg = ring->deq_seg->next;
- ring->dequeue = ring->deq_seg->trbs;
- } else {
+ /* event ring doesn't have link trbs, check for last trb */
+ if (ring->type == TYPE_EVENT) {
+ if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
ring->dequeue++;
+ return;
}
- } while (last_trb(xhci, ring, ring->deq_seg, ring->dequeue));
+ if (last_trb_on_ring(ring, ring->deq_seg, ring->dequeue))
+ ring->cycle_state ^= 1;
+ ring->deq_seg = ring->deq_seg->next;
+ ring->dequeue = ring->deq_seg->trbs;
+ return;
+ }
+
+ /* All other rings have link trbs */
+ if (!trb_is_link(ring->dequeue)) {
+ ring->dequeue++;
+ ring->num_trbs_free++;
+ }
+ while (trb_is_link(ring->dequeue)) {
+ ring->deq_seg = ring->deq_seg->next;
+ ring->dequeue = ring->deq_seg->trbs;
+ }
+ return;
}
/*
@@ -198,50 +185,42 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
/* If this is not event ring, there is one less usable TRB */
- if (ring->type != TYPE_EVENT &&
- !last_trb(xhci, ring, ring->enq_seg, ring->enqueue))
+ if (!trb_is_link(ring->enqueue))
ring->num_trbs_free--;
next = ++(ring->enqueue);
ring->enq_updates++;
- /* Update the dequeue pointer further if that was a link TRB or we're at
- * the end of an event ring segment (which doesn't have link TRBS)
- */
- while (last_trb(xhci, ring, ring->enq_seg, next)) {
- if (ring->type != TYPE_EVENT) {
- /*
- * If the caller doesn't plan on enqueueing more
- * TDs before ringing the doorbell, then we
- * don't want to give the link TRB to the
- * hardware just yet. We'll give the link TRB
- * back in prepare_ring() just before we enqueue
- * the TD at the top of the ring.
- */
- if (!chain && !more_trbs_coming)
- break;
+ /* Update the dequeue pointer further if that was a link TRB */
+ while (trb_is_link(next)) {
- /* If we're not dealing with 0.95 hardware or
- * isoc rings on AMD 0.96 host,
- * carry over the chain bit of the previous TRB
- * (which may mean the chain bit is cleared).
- */
- if (!(ring->type == TYPE_ISOC &&
- (xhci->quirks & XHCI_AMD_0x96_HOST))
- && !xhci_link_trb_quirk(xhci)) {
- next->link.control &=
- cpu_to_le32(~TRB_CHAIN);
- next->link.control |=
- cpu_to_le32(chain);
- }
- /* Give this link TRB to the hardware */
- wmb();
- next->link.control ^= cpu_to_le32(TRB_CYCLE);
+ /*
+ * If the caller doesn't plan on enqueueing more TDs before
+ * ringing the doorbell, then we don't want to give the link TRB
+ * to the hardware just yet. We'll give the link TRB back in
+ * prepare_ring() just before we enqueue the TD at the top of
+ * the ring.
+ */
+ if (!chain && !more_trbs_coming)
+ break;
- /* Toggle the cycle bit after the last ring segment. */
- if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
- ring->cycle_state ^= 1;
- }
+ /* If we're not dealing with 0.95 hardware or isoc rings on
+ * AMD 0.96 host, carry over the chain bit of the previous TRB
+ * (which may mean the chain bit is cleared).
+ */
+ if (!(ring->type == TYPE_ISOC &&
+ (xhci->quirks & XHCI_AMD_0x96_HOST)) &&
+ !xhci_link_trb_quirk(xhci)) {
+ next->link.control &= cpu_to_le32(~TRB_CHAIN);
+ next->link.control |= cpu_to_le32(chain);
}
+ /* Give this link TRB to the hardware */
+ wmb();
+ next->link.control ^= cpu_to_le32(TRB_CYCLE);
+
+ /* Toggle the cycle bit after the last ring segment. */
+ if (link_trb_toggles_cycle(next))
+ ring->cycle_state ^= 1;
+
ring->enq_seg = ring->enq_seg->next;
ring->enqueue = ring->enq_seg->trbs;
next = ring->enqueue;
@@ -626,6 +605,31 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
}
}
+void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, struct xhci_ring *ring,
+ struct xhci_td *td)
+{
+ struct device *dev = xhci_to_hcd(xhci)->self.controller;
+ struct xhci_segment *seg = td->bounce_seg;
+ struct urb *urb = td->urb;
+
+ if (!seg || !urb)
+ return;
+
+ if (usb_urb_dir_out(urb)) {
+ dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
+ DMA_TO_DEVICE);
+ return;
+ }
+
+ /* for in tranfers we need to copy the data from bounce to sg */
+ sg_pcopy_from_buffer(urb->sg, urb->num_mapped_sgs, seg->bounce_buf,
+ seg->bounce_len, seg->bounce_offs);
+ dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
+ DMA_FROM_DEVICE);
+ seg->bounce_len = 0;
+ seg->bounce_offs = 0;
+}
+
/*
* When we get a command completion for a Stop Endpoint Command, we need to
* unlink any cancelled TDs from the ring. There are two ways to do that:
@@ -745,6 +749,9 @@ remove_finished_td:
/* Doesn't matter what we pass for status, since the core will
* just overwrite it (because the URB has been unlinked).
*/
+ ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
+ if (ep_ring && cur_td->bounce_seg)
+ xhci_unmap_td_bounce_buffer(xhci, ep_ring, cur_td);
xhci_giveback_urb_in_irq(xhci, cur_td, 0);
/* Stop processing the cancelled list if the watchdog timer is
@@ -767,6 +774,9 @@ static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
list_del_init(&cur_td->td_list);
if (!list_empty(&cur_td->cancelled_td_list))
list_del_init(&cur_td->cancelled_td_list);
+
+ if (cur_td->bounce_seg)
+ xhci_unmap_td_bounce_buffer(xhci, ring, cur_td);
xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
}
}
@@ -921,7 +931,7 @@ static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
* the dequeue pointer one segment further, or we'll jump off
* the segment into la-la-land.
*/
- if (last_trb(xhci, ep_ring, ep_ring->deq_seg, ep_ring->dequeue)) {
+ if (trb_is_link(ep_ring->dequeue)) {
ep_ring->deq_seg = ep_ring->deq_seg->next;
ep_ring->dequeue = ep_ring->deq_seg->trbs;
}
@@ -930,8 +940,7 @@ static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
/* We have more usable TRBs */
ep_ring->num_trbs_free++;
ep_ring->dequeue++;
- if (last_trb(xhci, ep_ring, ep_ring->deq_seg,
- ep_ring->dequeue)) {
+ if (trb_is_link(ep_ring->dequeue)) {
if (ep_ring->dequeue ==
dev->eps[ep_index].queued_deq_ptr)
break;
@@ -1870,6 +1879,10 @@ td_cleanup:
urb = td->urb;
urb_priv = urb->hcpriv;
+ /* if a bounce buffer was used to align this td then unmap it */
+ if (td->bounce_seg)
+ xhci_unmap_td_bounce_buffer(xhci, ep_ring, td);
+
/* Do one last check of the actual transfer length.
* If the host controller said we transferred more data than the buffer
* length, urb->actual_length will be a very big number (since it's
@@ -2870,36 +2883,29 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
}
}
- if (enqueue_is_link_trb(ep_ring)) {
- struct xhci_ring *ring = ep_ring;
- union xhci_trb *next;
-
- next = ring->enqueue;
+ while (trb_is_link(ep_ring->enqueue)) {
+ /* If we're not dealing with 0.95 hardware or isoc rings
+ * on AMD 0.96 host, clear the chain bit.
+ */
+ if (!xhci_link_trb_quirk(xhci) &&
+ !(ep_ring->type == TYPE_ISOC &&
+ (xhci->quirks & XHCI_AMD_0x96_HOST)))
+ ep_ring->enqueue->link.control &=
+ cpu_to_le32(~TRB_CHAIN);
+ else
+ ep_ring->enqueue->link.control |=
+ cpu_to_le32(TRB_CHAIN);
- while (last_trb(xhci, ring, ring->enq_seg, next)) {
- /* If we're not dealing with 0.95 hardware or isoc rings
- * on AMD 0.96 host, clear the chain bit.
- */
- if (!xhci_link_trb_quirk(xhci) &&
- !(ring->type == TYPE_ISOC &&
- (xhci->quirks & XHCI_AMD_0x96_HOST)))
- next->link.control &= cpu_to_le32(~TRB_CHAIN);
- else
- next->link.control |= cpu_to_le32(TRB_CHAIN);
+ wmb();
+ ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
- wmb();
- next->link.control ^= cpu_to_le32(TRB_CYCLE);
+ /* Toggle the cycle bit after the last ring segment. */
+ if (link_trb_toggles_cycle(ep_ring->enqueue))
+ ep_ring->cycle_state ^= 1;
- /* Toggle the cycle bit after the last ring segment. */
- if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
- ring->cycle_state ^= 1;
- }
- ring->enq_seg = ring->enq_seg->next;
- ring->enqueue = ring->enq_seg->trbs;
- next = ring->enqueue;
- }
+ ep_ring->enq_seg = ep_ring->enq_seg->next;
+ ep_ring->enqueue = ep_ring->enq_seg->trbs;
}
-
return 0;
}
@@ -3097,7 +3103,7 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
*/
static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
int trb_buff_len, unsigned int td_total_len,
- struct urb *urb, unsigned int num_trbs_left)
+ struct urb *urb, bool more_trbs_coming)
{
u32 maxp, total_packet_count;
@@ -3106,7 +3112,7 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
return ((td_total_len - transferred) >> 10);
/* One TRB with a zero-length data packet. */
- if (num_trbs_left == 0 || (transferred == 0 && trb_buff_len == 0) ||
+ if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) ||
trb_buff_len == td_total_len)
return 0;
@@ -3121,37 +3127,103 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
return (total_packet_count - ((transferred + trb_buff_len) / maxp));
}
+
+static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
+ u32 *trb_buff_len, struct xhci_segment *seg)
+{
+ struct device *dev = xhci_to_hcd(xhci)->self.controller;
+ unsigned int unalign;
+ unsigned int max_pkt;
+ u32 new_buff_len;
+
+ max_pkt = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
+ unalign = (enqd_len + *trb_buff_len) % max_pkt;
+
+ /* we got lucky, last normal TRB data on segment is packet aligned */
+ if (unalign == 0)
+ return 0;
+
+ xhci_dbg(xhci, "Unaligned %d bytes, buff len %d\n",
+ unalign, *trb_buff_len);
+
+ /* is the last nornal TRB alignable by splitting it */
+ if (*trb_buff_len > unalign) {
+ *trb_buff_len -= unalign;
+ xhci_dbg(xhci, "split align, new buff len %d\n", *trb_buff_len);
+ return 0;
+ }
+
+ /*
+ * We want enqd_len + trb_buff_len to sum up to a number aligned to
+ * number which is divisible by the endpoint's wMaxPacketSize. IOW:
+ * (size of currently enqueued TRBs + remainder) % wMaxPacketSize == 0.
+ */
+ new_buff_len = max_pkt - (enqd_len % max_pkt);
+
+ if (new_buff_len > (urb->transfer_buffer_length - enqd_len))
+ new_buff_len = (urb->transfer_buffer_length - enqd_len);
+
+ /* create a max max_pkt sized bounce buffer pointed to by last trb */
+ if (usb_urb_dir_out(urb)) {
+ sg_pcopy_to_buffer(urb->sg, urb->num_mapped_sgs,
+ seg->bounce_buf, new_buff_len, enqd_len);
+ seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
+ max_pkt, DMA_TO_DEVICE);
+ } else {
+ seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
+ max_pkt, DMA_FROM_DEVICE);
+ }
+
+ if (dma_mapping_error(dev, seg->bounce_dma)) {
+ /* try without aligning. Some host controllers survive */
+ xhci_warn(xhci, "Failed mapping bounce buffer, not aligning\n");
+ return 0;
+ }
+ *trb_buff_len = new_buff_len;
+ seg->bounce_len = new_buff_len;
+ seg->bounce_offs = enqd_len;
+
+ xhci_dbg(xhci, "Bounce align, new buff len %d\n", *trb_buff_len);
+
+ return 1;
+}
+
/* This is very similar to what ehci-q.c qtd_fill() does */
int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index)
{
- struct xhci_ring *ep_ring;
+ struct xhci_ring *ring;
struct urb_priv *urb_priv;
struct xhci_td *td;
struct xhci_generic_trb *start_trb;
struct scatterlist *sg = NULL;
- bool more_trbs_coming;
- bool zero_length_needed;
- unsigned int num_trbs, last_trb_num, i;
+ bool more_trbs_coming = true;
+ bool need_zero_pkt = false;
+ bool first_trb = true;
+ unsigned int num_trbs;
unsigned int start_cycle, num_sgs = 0;
- unsigned int running_total, block_len, trb_buff_len;
- unsigned int full_len;
- int ret;
+ unsigned int enqd_len, block_len, trb_buff_len, full_len;
+ int sent_len, ret;
u32 field, length_field, remainder;
- u64 addr;
+ u64 addr, send_addr;
- ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
- if (!ep_ring)
+ ring = xhci_urb_to_transfer_ring(xhci, urb);
+ if (!ring)
return -EINVAL;
+ full_len = urb->transfer_buffer_length;
/* If we have scatter/gather list, we use it. */
if (urb->num_sgs) {
num_sgs = urb->num_mapped_sgs;
sg = urb->sg;
+ addr = (u64) sg_dma_address(sg);
+ block_len = sg_dma_len(sg);
num_trbs = count_sg_trbs_needed(urb);
- } else
+ } else {
num_trbs = count_trbs_needed(urb);
-
+ addr = (u64) urb->transfer_dma;
+ block_len = full_len;
+ }
ret = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
num_trbs, urb, 0, mem_flags);
@@ -3160,20 +3232,9 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
urb_priv = urb->hcpriv;
- last_trb_num = num_trbs - 1;
-
/* Deal with URB_ZERO_PACKET - need one more td/trb */
- zero_length_needed = urb->transfer_flags & URB_ZERO_PACKET &&
- urb_priv->length == 2;
- if (zero_length_needed) {
- num_trbs++;
- xhci_dbg(xhci, "Creating zero length td.\n");
- ret = prepare_transfer(xhci, xhci->devs[slot_id],
- ep_index, urb->stream_id,
- 1, urb, 1, mem_flags);
- if (unlikely(ret < 0))
- return ret;
- }
+ if (urb->transfer_flags & URB_ZERO_PACKET && urb_priv->length > 1)
+ need_zero_pkt = true;
td = urb_priv->td[0];
@@ -3182,102 +3243,98 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
* until we've finished creating all the other TRBs. The ring's cycle
* state may change as we enqueue the other TRBs, so save it too.
*/
- start_trb = &ep_ring->enqueue->generic;
- start_cycle = ep_ring->cycle_state;
-
- full_len = urb->transfer_buffer_length;
- running_total = 0;
- block_len = 0;
+ start_trb = &ring->enqueue->generic;
+ start_cycle = ring->cycle_state;
+ send_addr = addr;
/* Queue the TRBs, even if they are zero-length */
- for (i = 0; i < num_trbs; i++) {
+ for (enqd_len = 0; first_trb || enqd_len < full_len;
+ enqd_len += trb_buff_len) {
field = TRB_TYPE(TRB_NORMAL);
- if (block_len == 0) {
- /* A new contiguous block. */
- if (sg) {
- addr = (u64) sg_dma_address(sg);
- block_len = sg_dma_len(sg);
- } else {
- addr = (u64) urb->transfer_dma;
- block_len = full_len;
- }
- /* TRB buffer should not cross 64KB boundaries */
- trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
- trb_buff_len = min_t(unsigned int,
- trb_buff_len,
- block_len);
- } else {
- /* Further through the contiguous block. */
- trb_buff_len = block_len;
- if (trb_buff_len > TRB_MAX_BUFF_SIZE)
- trb_buff_len = TRB_MAX_BUFF_SIZE;
- }
+ /* TRB buffer should not cross 64KB boundaries */
+ trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
+ trb_buff_len = min_t(unsigned int, trb_buff_len, block_len);
- if (running_total + trb_buff_len > full_len)
- trb_buff_len = full_len - running_total;
+ if (enqd_len + trb_buff_len > full_len)
+ trb_buff_len = full_len - enqd_len;
/* Don't change the cycle bit of the first TRB until later */
- if (i == 0) {
+ if (first_trb) {
+ first_trb = false;
if (start_cycle == 0)
field |= TRB_CYCLE;
} else
- field |= ep_ring->cycle_state;
+ field |= ring->cycle_state;
/* Chain all the TRBs together; clear the chain bit in the last
* TRB to indicate it's the last TRB in the chain.
*/
- if (i < last_trb_num) {
+ if (enqd_len + trb_buff_len < full_len) {
field |= TRB_CHAIN;
- } else {
- field |= TRB_IOC;
- if (i == last_trb_num)
- td->last_trb = ep_ring->enqueue;
- else if (zero_length_needed) {
- trb_buff_len = 0;
- urb_priv->td[1]->last_trb = ep_ring->enqueue;
+ if (trb_is_link(ring->enqueue + 1)) {
+ if (xhci_align_td(xhci, urb, enqd_len,
+ &trb_buff_len,
+ ring->enq_seg)) {
+ send_addr = ring->enq_seg->bounce_dma;
+ /* assuming TD won't span 2 segs */
+ td->bounce_seg = ring->enq_seg;
+ }
}
}
+ if (enqd_len + trb_buff_len >= full_len) {
+ field &= ~TRB_CHAIN;
+ field |= TRB_IOC;
+ more_trbs_coming = false;
+ td->last_trb = ring->enqueue;
+ }
/* Only set interrupt on short packet for IN endpoints */
if (usb_urb_dir_in(urb))
field |= TRB_ISP;
/* Set the TRB length, TD size, and interrupter fields. */
- remainder = xhci_td_remainder(xhci, running_total,
- trb_buff_len, full_len,
- urb, num_trbs - i - 1);
+ remainder = xhci_td_remainder(xhci, enqd_len, trb_buff_len,
+ full_len, urb, more_trbs_coming);
length_field = TRB_LEN(trb_buff_len) |
TRB_TD_SIZE(remainder) |
TRB_INTR_TARGET(0);
- if (i < num_trbs - 1)
- more_trbs_coming = true;
- else
- more_trbs_coming = false;
- queue_trb(xhci, ep_ring, more_trbs_coming,
- lower_32_bits(addr),
- upper_32_bits(addr),
+ queue_trb(xhci, ring, more_trbs_coming | need_zero_pkt,
+ lower_32_bits(send_addr),
+ upper_32_bits(send_addr),
length_field,
field);
- running_total += trb_buff_len;
addr += trb_buff_len;
- block_len -= trb_buff_len;
-
- if (sg) {
- if (block_len == 0) {
- /* New sg entry */
- --num_sgs;
- if (num_sgs == 0)
- break;
+ sent_len = trb_buff_len;
+
+ while (sg && sent_len >= block_len) {
+ /* New sg entry */
+ --num_sgs;
+ sent_len -= block_len;
+ if (num_sgs != 0) {
sg = sg_next(sg);
+ block_len = sg_dma_len(sg);
+ addr = (u64) sg_dma_address(sg);
+ addr += sent_len;
}
}
+ block_len -= sent_len;
+ send_addr = addr;
+ }
+
+ if (need_zero_pkt) {
+ ret = prepare_transfer(xhci, xhci->devs[slot_id],
+ ep_index, urb->stream_id,
+ 1, urb, 1, mem_flags);
+ urb_priv->td[1]->last_trb = ring->enqueue;
+ field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC;
+ queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field);
}
- check_trb_math(urb, running_total);
+ check_trb_math(urb, enqd_len);
giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
start_cycle, start_trb);
return 0;
@@ -3671,7 +3728,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
/* Set the TRB length, TD size, & interrupter fields. */
remainder = xhci_td_remainder(xhci, running_total,
trb_buff_len, td_len,
- urb, trbs_per_td - j - 1);
+ urb, more_trbs_coming);
length_field = TRB_LEN(trb_buff_len) |
TRB_INTR_TARGET(0);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index f2f9518c5..01d96c9b3 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -490,8 +490,6 @@ static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
xhci->comp_mode_recovery_timer.expires = jiffies +
msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
- set_timer_slack(&xhci->comp_mode_recovery_timer,
- msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
add_timer(&xhci->comp_mode_recovery_timer);
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"Compliance mode recovery timer initialized");
@@ -3139,6 +3137,7 @@ int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
struct xhci_input_control_ctx *ctrl_ctx;
unsigned int ep_index;
unsigned int num_stream_ctxs;
+ unsigned int max_packet;
unsigned long flags;
u32 changed_ep_bitmask = 0;
@@ -3212,9 +3211,11 @@ int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
for (i = 0; i < num_eps; i++) {
ep_index = xhci_get_endpoint_index(&eps[i]->desc);
+ max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&eps[i]->desc));
vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
num_stream_ctxs,
- num_streams, mem_flags);
+ num_streams,
+ max_packet, mem_flags);
if (!vdev->eps[ep_index].stream_info)
goto cleanup;
/* Set maxPstreams in endpoint context and update deq ptr to
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index b0b8d0f87..b2c1dc5dc 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1347,6 +1347,11 @@ struct xhci_segment {
/* private to HCD */
struct xhci_segment *next;
dma_addr_t dma;
+ /* Max packet sized bounce buffer for td-fragmant alignment */
+ dma_addr_t bounce_dma;
+ void *bounce_buf;
+ unsigned int bounce_offs;
+ unsigned int bounce_len;
};
struct xhci_td {
@@ -1356,6 +1361,7 @@ struct xhci_td {
struct xhci_segment *start_seg;
union xhci_trb *first_trb;
union xhci_trb *last_trb;
+ struct xhci_segment *bounce_seg;
/* actual_length of the URB has already been set */
bool urb_length_set;
};
@@ -1405,6 +1411,7 @@ struct xhci_ring {
unsigned int num_segs;
unsigned int num_trbs_free;
unsigned int num_trbs_free_temp;
+ unsigned int bounce_buf_len;
enum xhci_ring_type type;
bool last_td_was_short;
struct radix_tree_root *trb_address_map;
@@ -1807,7 +1814,8 @@ void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
unsigned int ep_index);
struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
unsigned int num_stream_ctxs,
- unsigned int num_streams, gfp_t flags);
+ unsigned int num_streams,
+ unsigned int max_packet, gfp_t flags);
void xhci_free_stream_info(struct xhci_hcd *xhci,
struct xhci_stream_info *stream_info);
void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
diff --git a/drivers/usb/image/microtek.h b/drivers/usb/image/microtek.h
index ccce318f2..7e32ae787 100644
--- a/drivers/usb/image/microtek.h
+++ b/drivers/usb/image/microtek.h
@@ -13,11 +13,11 @@ typedef void (*mts_scsi_cmnd_callback)(struct scsi_cmnd *);
struct mts_transfer_context
{
- struct mts_desc* instance;
+ struct mts_desc *instance;
mts_scsi_cmnd_callback final_callback;
struct scsi_cmnd *srb;
- void* data;
+ void *data;
unsigned data_length;
int data_pipe;
int fragment;
@@ -38,7 +38,7 @@ struct mts_desc {
u8 ep_response;
u8 ep_image;
- struct Scsi_Host * host;
+ struct Scsi_Host *host;
struct urb *urb;
struct mts_transfer_context context;
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index e9e5ae521..eb8f8d37c 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -79,15 +79,6 @@ config USB_LCD
To compile this driver as a module, choose M here: the
module will be called usblcd.
-config USB_LED
- tristate "USB LED driver support"
- help
- Say Y here if you want to connect an USBLED device to your
- computer's USB port.
-
- To compile this driver as a module, choose M here: the
- module will be called usbled.
-
config USB_CYPRESS_CY7C63
tristate "Cypress CY7C63xxx USB driver support"
help
@@ -260,11 +251,12 @@ config USB_CHAOSKEY
tristate "ChaosKey random number generator driver support"
depends on HW_RANDOM
help
- Say Y here if you want to connect an AltusMetrum ChaosKey to
- your computer's USB port. The ChaosKey is a hardware random
- number generator which hooks into the kernel entropy pool to
- ensure a large supply of entropy for /dev/random and
- /dev/urandom and also provides direct access via /dev/chaoskeyX
+ Say Y here if you want to connect an AltusMetrum ChaosKey or
+ Araneus Alea I to your computer's USB port. These devices
+ are hardware random number generators which hook into the
+ kernel entropy pool to ensure a large supply of entropy for
+ /dev/random and /dev/urandom and also provides direct access
+ via /dev/chaoskeyX
To compile this driver as a module, choose M here: the
module will be called chaoskey.
diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile
index 2769cf635..3d79faaad 100644
--- a/drivers/usb/misc/Makefile
+++ b/drivers/usb/misc/Makefile
@@ -15,7 +15,6 @@ obj-$(CONFIG_USB_IOWARRIOR) += iowarrior.o
obj-$(CONFIG_USB_ISIGHTFW) += isight_firmware.o
obj-$(CONFIG_USB_LCD) += usblcd.o
obj-$(CONFIG_USB_LD) += ldusb.o
-obj-$(CONFIG_USB_LED) += usbled.o
obj-$(CONFIG_USB_LEGOTOWER) += legousbtower.o
obj-$(CONFIG_USB_RIO500) += rio500.o
obj-$(CONFIG_USB_TEST) += usbtest.o
diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c
index 76350e4ee..6ddd08a32 100644
--- a/drivers/usb/misc/chaoskey.c
+++ b/drivers/usb/misc/chaoskey.c
@@ -55,9 +55,13 @@ MODULE_LICENSE("GPL");
#define CHAOSKEY_VENDOR_ID 0x1d50 /* OpenMoko */
#define CHAOSKEY_PRODUCT_ID 0x60c6 /* ChaosKey */
+#define ALEA_VENDOR_ID 0x12d8 /* Araneus */
+#define ALEA_PRODUCT_ID 0x0001 /* Alea I */
+
#define CHAOSKEY_BUF_LEN 64 /* max size of USB full speed packet */
-#define NAK_TIMEOUT (HZ) /* stall/wait timeout for device */
+#define NAK_TIMEOUT (HZ) /* normal stall/wait timeout */
+#define ALEA_FIRST_TIMEOUT (HZ*3) /* first stall/wait timeout for Alea */
#ifdef CONFIG_USB_DYNAMIC_MINORS
#define USB_CHAOSKEY_MINOR_BASE 0
@@ -69,6 +73,7 @@ MODULE_LICENSE("GPL");
static const struct usb_device_id chaoskey_table[] = {
{ USB_DEVICE(CHAOSKEY_VENDOR_ID, CHAOSKEY_PRODUCT_ID) },
+ { USB_DEVICE(ALEA_VENDOR_ID, ALEA_PRODUCT_ID) },
{ },
};
MODULE_DEVICE_TABLE(usb, chaoskey_table);
@@ -84,6 +89,7 @@ struct chaoskey {
int open; /* open count */
bool present; /* device not disconnected */
bool reading; /* ongoing IO */
+ bool reads_started; /* track first read for Alea */
int size; /* size of buf */
int valid; /* bytes of buf read */
int used; /* bytes of buf consumed */
@@ -188,6 +194,9 @@ static int chaoskey_probe(struct usb_interface *interface,
dev->in_ep = in_ep;
+ if (udev->descriptor.idVendor != ALEA_VENDOR_ID)
+ dev->reads_started = 1;
+
dev->size = size;
dev->present = 1;
@@ -357,6 +366,7 @@ static int _chaoskey_fill(struct chaoskey *dev)
{
DEFINE_WAIT(wait);
int result;
+ bool started;
usb_dbg(dev->interface, "fill");
@@ -389,10 +399,17 @@ static int _chaoskey_fill(struct chaoskey *dev)
goto out;
}
+ /* The first read on the Alea takes a little under 2 seconds.
+ * Reads after the first read take only a few microseconds
+ * though. Presumably the entropy-generating circuit needs
+ * time to ramp up. So, we wait longer on the first read.
+ */
+ started = dev->reads_started;
+ dev->reads_started = true;
result = wait_event_interruptible_timeout(
dev->wait_q,
!dev->reading,
- NAK_TIMEOUT);
+ (started ? NAK_TIMEOUT : ALEA_FIRST_TIMEOUT) );
if (result < 0)
goto out;
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c
index 52c27cab7..9b5b3b228 100644
--- a/drivers/usb/misc/ftdi-elan.c
+++ b/drivers/usb/misc/ftdi-elan.c
@@ -665,7 +665,7 @@ static ssize_t ftdi_elan_read(struct file *file, char __user *buffer,
{
char data[30 *3 + 4];
char *d = data;
- int m = (sizeof(data) - 1) / 3;
+ int m = (sizeof(data) - 1) / 3 - 1;
int bytes_read = 0;
int retry_on_empty = 10;
int retry_on_timeout = 5;
@@ -1684,7 +1684,7 @@ wait:if (ftdi->disconnected > 0) {
int i = 0;
char data[30 *3 + 4];
char *d = data;
- int m = (sizeof(data) - 1) / 3;
+ int m = (sizeof(data) - 1) / 3 - 1;
int l = 0;
struct u132_target *target = &ftdi->target[ed];
struct u132_command *command = &ftdi->command[
@@ -1876,7 +1876,7 @@ more:{
if (packet_bytes > 2) {
char diag[30 *3 + 4];
char *d = diag;
- int m = (sizeof(diag) - 1) / 3;
+ int m = (sizeof(diag) - 1) / 3 - 1;
char *b = ftdi->bulk_in_buffer;
int bytes_read = 0;
diag[0] = 0;
@@ -2053,7 +2053,7 @@ static int ftdi_elan_synchronize(struct usb_ftdi *ftdi)
if (packet_bytes > 2) {
char diag[30 *3 + 4];
char *d = diag;
- int m = (sizeof(diag) - 1) / 3;
+ int m = (sizeof(diag) - 1) / 3 - 1;
char *b = ftdi->bulk_in_buffer;
int bytes_read = 0;
unsigned char c = 0;
@@ -2155,7 +2155,7 @@ more:{
if (packet_bytes > 2) {
char diag[30 *3 + 4];
char *d = diag;
- int m = (sizeof(diag) - 1) / 3;
+ int m = (sizeof(diag) - 1) / 3 - 1;
char *b = ftdi->bulk_in_buffer;
int bytes_read = 0;
diag[0] = 0;
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index 7771be3ac..4dd531ac5 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -898,24 +898,6 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
dev->interrupt_in_interval = interrupt_in_interval ? interrupt_in_interval : dev->interrupt_in_endpoint->bInterval;
dev->interrupt_out_interval = interrupt_out_interval ? interrupt_out_interval : dev->interrupt_out_endpoint->bInterval;
- /* we can register the device now, as it is ready */
- usb_set_intfdata (interface, dev);
-
- retval = usb_register_dev (interface, &tower_class);
-
- if (retval) {
- /* something prevented us from registering this driver */
- dev_err(idev, "Not able to get a minor for this device.\n");
- usb_set_intfdata (interface, NULL);
- goto error;
- }
- dev->minor = interface->minor;
-
- /* let the user know what node this device is now attached to */
- dev_info(&interface->dev, "LEGO USB Tower #%d now attached to major "
- "%d minor %d\n", (dev->minor - LEGO_USB_TOWER_MINOR_BASE),
- USB_MAJOR, dev->minor);
-
/* get the firmware version and log it */
result = usb_control_msg (udev,
usb_rcvctrlpipe(udev, 0),
@@ -936,6 +918,23 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
get_version_reply.minor,
le16_to_cpu(get_version_reply.build_no));
+ /* we can register the device now, as it is ready */
+ usb_set_intfdata (interface, dev);
+
+ retval = usb_register_dev (interface, &tower_class);
+
+ if (retval) {
+ /* something prevented us from registering this driver */
+ dev_err(idev, "Not able to get a minor for this device.\n");
+ usb_set_intfdata (interface, NULL);
+ goto error;
+ }
+ dev->minor = interface->minor;
+
+ /* let the user know what node this device is now attached to */
+ dev_info(&interface->dev, "LEGO USB Tower #%d now attached to major "
+ "%d minor %d\n", (dev->minor - LEGO_USB_TOWER_MINOR_BASE),
+ USB_MAJOR, dev->minor);
exit:
return retval;
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index 15666ad7c..02abfcdfb 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -1285,18 +1285,22 @@ int sisusb_readb(struct sisusb_usb_data *sisusb, u32 adr, u8 *data)
}
int sisusb_copy_memory(struct sisusb_usb_data *sisusb, char *src,
- u32 dest, int length, size_t *bytes_written)
+ u32 dest, int length)
{
+ size_t dummy;
+
return sisusb_write_mem_bulk(sisusb, dest, src, length,
- NULL, 0, bytes_written);
+ NULL, 0, &dummy);
}
#ifdef SISUSBENDIANTEST
-int sisusb_read_memory(struct sisusb_usb_data *sisusb, char *dest,
- u32 src, int length, size_t *bytes_written)
+static int sisusb_read_memory(struct sisusb_usb_data *sisusb, char *dest,
+ u32 src, int length)
{
+ size_t dummy;
+
return sisusb_read_mem_bulk(sisusb, src, dest, length,
- NULL, bytes_written);
+ NULL, &dummy);
}
#endif
#endif
@@ -1306,16 +1310,14 @@ static void sisusb_testreadwrite(struct sisusb_usb_data *sisusb)
{
static char srcbuffer[] = { 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77 };
char destbuffer[10];
- size_t dummy;
int i, j;
- sisusb_copy_memory(sisusb, srcbuffer, sisusb->vrambase, 7, &dummy);
+ sisusb_copy_memory(sisusb, srcbuffer, sisusb->vrambase, 7);
for (i = 1; i <= 7; i++) {
dev_dbg(&sisusb->sisusb_dev->dev,
"sisusb: rwtest %d bytes\n", i);
- sisusb_read_memory(sisusb, destbuffer, sisusb->vrambase,
- i, &dummy);
+ sisusb_read_memory(sisusb, destbuffer, sisusb->vrambase, i);
for (j = 0; j < i; j++) {
dev_dbg(&sisusb->sisusb_dev->dev,
"rwtest read[%d] = %x\n",
@@ -2276,7 +2278,6 @@ int sisusb_reset_text_mode(struct sisusb_usb_data *sisusb, int init)
const struct font_desc *myfont;
u8 *tempbuf;
u16 *tempbufb;
- size_t written;
static const char bootstring[] =
"SiSUSB VGA text console, (C) 2005 Thomas Winischhofer.";
static const char bootlogo[] = "(o_ //\\ V_/_";
@@ -2343,18 +2344,15 @@ int sisusb_reset_text_mode(struct sisusb_usb_data *sisusb, int init)
*(tempbufb++) = 0x0700 | bootstring[i++];
ret |= sisusb_copy_memory(sisusb, tempbuf,
- sisusb->vrambase, 8192, &written);
+ sisusb->vrambase, 8192);
vfree(tempbuf);
}
} else if (sisusb->scrbuf) {
-
ret |= sisusb_copy_memory(sisusb, (char *)sisusb->scrbuf,
- sisusb->vrambase, sisusb->scrbuf_size,
- &written);
-
+ sisusb->vrambase, sisusb->scrbuf_size);
}
if (sisusb->sisusb_cursor_size_from >= 0 &&
diff --git a/drivers/usb/misc/sisusbvga/sisusb_con.c b/drivers/usb/misc/sisusbvga/sisusb_con.c
index afa853209..460cebf32 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_con.c
+++ b/drivers/usb/misc/sisusbvga/sisusb_con.c
@@ -370,7 +370,6 @@ static void
sisusbcon_putc(struct vc_data *c, int ch, int y, int x)
{
struct sisusb_usb_data *sisusb;
- ssize_t written;
sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num);
if (!sisusb)
@@ -384,7 +383,7 @@ sisusbcon_putc(struct vc_data *c, int ch, int y, int x)
sisusb_copy_memory(sisusb, (char *)SISUSB_VADDR(x, y),
- (long)SISUSB_HADDR(x, y), 2, &written);
+ (long)SISUSB_HADDR(x, y), 2);
mutex_unlock(&sisusb->lock);
}
@@ -395,7 +394,6 @@ sisusbcon_putcs(struct vc_data *c, const unsigned short *s,
int count, int y, int x)
{
struct sisusb_usb_data *sisusb;
- ssize_t written;
u16 *dest;
int i;
@@ -420,7 +418,7 @@ sisusbcon_putcs(struct vc_data *c, const unsigned short *s,
}
sisusb_copy_memory(sisusb, (char *)SISUSB_VADDR(x, y),
- (long)SISUSB_HADDR(x, y), count * 2, &written);
+ (long)SISUSB_HADDR(x, y), count * 2);
mutex_unlock(&sisusb->lock);
}
@@ -431,7 +429,6 @@ sisusbcon_clear(struct vc_data *c, int y, int x, int height, int width)
{
struct sisusb_usb_data *sisusb;
u16 eattr = c->vc_video_erase_char;
- ssize_t written;
int i, length, cols;
u16 *dest;
@@ -475,41 +472,7 @@ sisusbcon_clear(struct vc_data *c, int y, int x, int height, int width)
sisusb_copy_memory(sisusb, (unsigned char *)SISUSB_VADDR(x, y),
- (long)SISUSB_HADDR(x, y), length, &written);
-
- mutex_unlock(&sisusb->lock);
-}
-
-/* Interface routine */
-static void
-sisusbcon_bmove(struct vc_data *c, int sy, int sx,
- int dy, int dx, int height, int width)
-{
- struct sisusb_usb_data *sisusb;
- ssize_t written;
- int cols, length;
-
- if (width <= 0 || height <= 0)
- return;
-
- sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num);
- if (!sisusb)
- return;
-
- /* sisusb->lock is down */
-
- cols = sisusb->sisusb_num_columns;
-
- if (sisusb_is_inactive(c, sisusb)) {
- mutex_unlock(&sisusb->lock);
- return;
- }
-
- length = ((height * cols) - dx - (cols - width - dx)) * 2;
-
-
- sisusb_copy_memory(sisusb, (unsigned char *)SISUSB_VADDR(dx, dy),
- (long)SISUSB_HADDR(dx, dy), length, &written);
+ (long)SISUSB_HADDR(x, y), length);
mutex_unlock(&sisusb->lock);
}
@@ -519,7 +482,6 @@ static int
sisusbcon_switch(struct vc_data *c)
{
struct sisusb_usb_data *sisusb;
- ssize_t written;
int length;
/* Returnvalue 0 means we have fully restored screen,
@@ -559,7 +521,7 @@ sisusbcon_switch(struct vc_data *c)
sisusb_copy_memory(sisusb, (unsigned char *)c->vc_origin,
(long)SISUSB_HADDR(0, 0),
- length, &written);
+ length);
mutex_unlock(&sisusb->lock);
@@ -600,7 +562,7 @@ sisusbcon_save_screen(struct vc_data *c)
}
/* interface routine */
-static int
+static void
sisusbcon_set_palette(struct vc_data *c, const unsigned char *table)
{
struct sisusb_usb_data *sisusb;
@@ -608,18 +570,18 @@ sisusbcon_set_palette(struct vc_data *c, const unsigned char *table)
/* Return value not used by vt */
- if (!CON_IS_VISIBLE(c))
- return -EINVAL;
+ if (!con_is_visible(c))
+ return;
sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num);
if (!sisusb)
- return -EINVAL;
+ return;
/* sisusb->lock is down */
if (sisusb_is_inactive(c, sisusb)) {
mutex_unlock(&sisusb->lock);
- return -EINVAL;
+ return;
}
for (i = j = 0; i < 16; i++) {
@@ -634,8 +596,6 @@ sisusbcon_set_palette(struct vc_data *c, const unsigned char *table)
}
mutex_unlock(&sisusb->lock);
-
- return 0;
}
/* interface routine */
@@ -644,7 +604,6 @@ sisusbcon_blank(struct vc_data *c, int blank, int mode_switch)
{
struct sisusb_usb_data *sisusb;
u8 sr1, cr17, pmreg, cr63;
- ssize_t written;
int ret = 0;
sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num);
@@ -672,7 +631,7 @@ sisusbcon_blank(struct vc_data *c, int blank, int mode_switch)
(unsigned char *)c->vc_origin,
(u32)(sisusb->vrambase +
(c->vc_origin - sisusb->scrbuf)),
- c->vc_screenbuf_size, &written);
+ c->vc_screenbuf_size);
sisusb->con_blanked = 1;
ret = 1;
break;
@@ -723,24 +682,22 @@ sisusbcon_blank(struct vc_data *c, int blank, int mode_switch)
}
/* interface routine */
-static int
+static void
sisusbcon_scrolldelta(struct vc_data *c, int lines)
{
struct sisusb_usb_data *sisusb;
int margin = c->vc_size_row * 4;
int ul, we, p, st;
- /* The return value does not seem to be used */
-
sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num);
if (!sisusb)
- return 0;
+ return;
/* sisusb->lock is down */
if (sisusb_is_inactive(c, sisusb)) {
mutex_unlock(&sisusb->lock);
- return 0;
+ return;
}
if (!lines) /* Turn scrollback off */
@@ -780,8 +737,6 @@ sisusbcon_scrolldelta(struct vc_data *c, int lines)
sisusbcon_set_start_address(sisusb, c);
mutex_unlock(&sisusb->lock);
-
- return 1;
}
/* Interface routine */
@@ -860,7 +815,6 @@ sisusbcon_scroll_area(struct vc_data *c, struct sisusb_usb_data *sisusb,
int cols = sisusb->sisusb_num_columns;
int length = ((b - t) * cols) * 2;
u16 eattr = c->vc_video_erase_char;
- ssize_t written;
/* sisusb->lock is down */
@@ -890,7 +844,7 @@ sisusbcon_scroll_area(struct vc_data *c, struct sisusb_usb_data *sisusb,
}
sisusb_copy_memory(sisusb, (char *)SISUSB_VADDR(0, t),
- (long)SISUSB_HADDR(0, t), length, &written);
+ (long)SISUSB_HADDR(0, t), length);
mutex_unlock(&sisusb->lock);
@@ -903,7 +857,6 @@ sisusbcon_scroll(struct vc_data *c, int t, int b, int dir, int lines)
{
struct sisusb_usb_data *sisusb;
u16 eattr = c->vc_video_erase_char;
- ssize_t written;
int copyall = 0;
unsigned long oldorigin;
unsigned int delta = lines * c->vc_size_row;
@@ -996,18 +949,18 @@ sisusbcon_scroll(struct vc_data *c, int t, int b, int dir, int lines)
sisusb_copy_memory(sisusb,
(char *)c->vc_origin,
(u32)(sisusb->vrambase + originoffset),
- c->vc_screenbuf_size, &written);
+ c->vc_screenbuf_size);
else if (dir == SM_UP)
sisusb_copy_memory(sisusb,
(char *)c->vc_origin + c->vc_screenbuf_size - delta,
(u32)sisusb->vrambase + originoffset +
c->vc_screenbuf_size - delta,
- delta, &written);
+ delta);
else
sisusb_copy_memory(sisusb,
(char *)c->vc_origin,
(u32)(sisusb->vrambase + originoffset),
- delta, &written);
+ delta);
c->vc_scr_end = c->vc_origin + c->vc_screenbuf_size;
c->vc_visible_origin = c->vc_origin;
@@ -1273,7 +1226,7 @@ sisusbcon_do_font_op(struct sisusb_usb_data *sisusb, int set, int slot,
struct vc_data *vc = vc_cons[i].d;
if (vc && vc->vc_sw == &sisusb_con) {
- if (CON_IS_VISIBLE(vc)) {
+ if (con_is_visible(vc)) {
vc->vc_sw->con_cursor(vc, CM_DRAW);
}
vc->vc_font.height = fh;
@@ -1385,7 +1338,6 @@ static const struct consw sisusb_con = {
.con_putcs = sisusbcon_putcs,
.con_cursor = sisusbcon_cursor,
.con_scroll = sisusbcon_scroll,
- .con_bmove = sisusbcon_bmove,
.con_switch = sisusbcon_switch,
.con_blank = sisusbcon_blank,
.con_font_set = sisusbcon_font_set,
@@ -1433,15 +1385,12 @@ static const struct consw sisusb_dummy_con = {
.con_putcs = SISUSBCONDUMMY,
.con_cursor = SISUSBCONDUMMY,
.con_scroll = SISUSBCONDUMMY,
- .con_bmove = SISUSBCONDUMMY,
.con_switch = SISUSBCONDUMMY,
.con_blank = SISUSBCONDUMMY,
.con_font_set = SISUSBCONDUMMY,
.con_font_get = SISUSBCONDUMMY,
.con_font_default = SISUSBCONDUMMY,
.con_font_copy = SISUSBCONDUMMY,
- .con_set_palette = SISUSBCONDUMMY,
- .con_scrolldelta = SISUSBCONDUMMY,
};
int
diff --git a/drivers/usb/misc/sisusbvga/sisusb_init.h b/drivers/usb/misc/sisusbvga/sisusb_init.h
index c46ce42d4..e79a616f0 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_init.h
+++ b/drivers/usb/misc/sisusbvga/sisusb_init.h
@@ -828,7 +828,7 @@ void sisusb_delete(struct kref *kref);
int sisusb_writeb(struct sisusb_usb_data *sisusb, u32 adr, u8 data);
int sisusb_readb(struct sisusb_usb_data *sisusb, u32 adr, u8 * data);
int sisusb_copy_memory(struct sisusb_usb_data *sisusb, char *src,
- u32 dest, int length, size_t * bytes_written);
+ u32 dest, int length);
int sisusb_reset_text_mode(struct sisusb_usb_data *sisusb, int init);
int sisusbcon_do_font_op(struct sisusb_usb_data *sisusb, int set, int slot,
u8 * arg, int cmapsz, int ch512, int dorecalc,
diff --git a/drivers/usb/misc/usb3503.c b/drivers/usb/misc/usb3503.c
index b45cb77c0..8e7737d7a 100644
--- a/drivers/usb/misc/usb3503.c
+++ b/drivers/usb/misc/usb3503.c
@@ -330,6 +330,17 @@ static int usb3503_i2c_probe(struct i2c_client *i2c,
return usb3503_probe(hub);
}
+static int usb3503_i2c_remove(struct i2c_client *i2c)
+{
+ struct usb3503 *hub;
+
+ hub = i2c_get_clientdata(i2c);
+ if (hub->clk)
+ clk_disable_unprepare(hub->clk);
+
+ return 0;
+}
+
static int usb3503_platform_probe(struct platform_device *pdev)
{
struct usb3503 *hub;
@@ -338,10 +349,22 @@ static int usb3503_platform_probe(struct platform_device *pdev)
if (!hub)
return -ENOMEM;
hub->dev = &pdev->dev;
+ platform_set_drvdata(pdev, hub);
return usb3503_probe(hub);
}
+static int usb3503_platform_remove(struct platform_device *pdev)
+{
+ struct usb3503 *hub;
+
+ hub = platform_get_drvdata(pdev);
+ if (hub->clk)
+ clk_disable_unprepare(hub->clk);
+
+ return 0;
+}
+
#ifdef CONFIG_PM_SLEEP
static int usb3503_i2c_suspend(struct device *dev)
{
@@ -395,6 +418,7 @@ static struct i2c_driver usb3503_i2c_driver = {
.of_match_table = of_match_ptr(usb3503_of_match),
},
.probe = usb3503_i2c_probe,
+ .remove = usb3503_i2c_remove,
.id_table = usb3503_id,
};
@@ -404,6 +428,7 @@ static struct platform_driver usb3503_platform_driver = {
.of_match_table = of_match_ptr(usb3503_of_match),
},
.probe = usb3503_platform_probe,
+ .remove = usb3503_platform_remove,
};
static int __init usb3503_init(void)
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 886526b5f..73cfa13fc 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -87,7 +87,7 @@ config USB_MUSB_DA8XX
config USB_MUSB_TUSB6010
tristate "TUSB6010"
depends on HAS_IOMEM
- depends on ARCH_OMAP2PLUS || COMPILE_TEST
+ depends on (ARCH_OMAP2PLUS || COMPILE_TEST) && !BLACKFIN
depends on NOP_USB_XCEIV = USB_MUSB_HDRC # both built-in or both modules
config USB_MUSB_OMAP2PLUS
diff --git a/drivers/usb/musb/Makefile b/drivers/usb/musb/Makefile
index f95befe18..689d42aba 100644
--- a/drivers/usb/musb/Makefile
+++ b/drivers/usb/musb/Makefile
@@ -2,9 +2,12 @@
# for USB OTG silicon based on Mentor Graphics INVENTRA designs
#
+# define_trace.h needs to know how to find our header
+CFLAGS_musb_trace.o := -I$(src)
+
obj-$(CONFIG_USB_MUSB_HDRC) += musb_hdrc.o
-musb_hdrc-y := musb_core.o
+musb_hdrc-y := musb_core.o musb_trace.o
musb_hdrc-$(CONFIG_USB_MUSB_HOST)$(CONFIG_USB_MUSB_DUAL_ROLE) += musb_virthub.o musb_host.o
musb_hdrc-$(CONFIG_USB_MUSB_GADGET)$(CONFIG_USB_MUSB_DUAL_ROLE) += musb_gadget_ep0.o musb_gadget.o
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index cc134109b..1ae48e64e 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -14,6 +14,7 @@
#include "musb_core.h"
#include "musb_debug.h"
#include "cppi_dma.h"
+#include "davinci.h"
/* CPPI DMA status 7-mar-2006:
@@ -232,7 +233,7 @@ static void cppi_controller_stop(struct cppi *controller)
musb_writel(tibase, DAVINCI_RXCPPI_INTCLR_REG,
DAVINCI_DMA_ALL_CHANNELS_ENABLE);
- dev_dbg(musb->controller, "Tearing down RX and TX Channels\n");
+ musb_dbg(musb, "Tearing down RX and TX Channels");
for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
/* FIXME restructure of txdma to use bds like rxdma */
controller->tx[i].last_processed = NULL;
@@ -297,13 +298,13 @@ cppi_channel_allocate(struct dma_controller *c,
*/
if (transmit) {
if (index >= ARRAY_SIZE(controller->tx)) {
- dev_dbg(musb->controller, "no %cX%d CPPI channel\n", 'T', index);
+ musb_dbg(musb, "no %cX%d CPPI channel", 'T', index);
return NULL;
}
cppi_ch = controller->tx + index;
} else {
if (index >= ARRAY_SIZE(controller->rx)) {
- dev_dbg(musb->controller, "no %cX%d CPPI channel\n", 'R', index);
+ musb_dbg(musb, "no %cX%d CPPI channel", 'R', index);
return NULL;
}
cppi_ch = controller->rx + index;
@@ -314,13 +315,13 @@ cppi_channel_allocate(struct dma_controller *c,
* with the other DMA engine too
*/
if (cppi_ch->hw_ep)
- dev_dbg(musb->controller, "re-allocating DMA%d %cX channel %p\n",
+ musb_dbg(musb, "re-allocating DMA%d %cX channel %p",
index, transmit ? 'T' : 'R', cppi_ch);
cppi_ch->hw_ep = ep;
cppi_ch->channel.status = MUSB_DMA_STATUS_FREE;
cppi_ch->channel.max_len = 0x7fffffff;
- dev_dbg(musb->controller, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R');
+ musb_dbg(musb, "Allocate CPPI%d %cX", index, transmit ? 'T' : 'R');
return &cppi_ch->channel;
}
@@ -335,8 +336,8 @@ static void cppi_channel_release(struct dma_channel *channel)
c = container_of(channel, struct cppi_channel, channel);
tibase = c->controller->tibase;
if (!c->hw_ep)
- dev_dbg(c->controller->musb->controller,
- "releasing idle DMA channel %p\n", c);
+ musb_dbg(c->controller->musb,
+ "releasing idle DMA channel %p", c);
else if (!c->transmit)
core_rxirq_enable(tibase, c->index + 1);
@@ -354,11 +355,10 @@ cppi_dump_rx(int level, struct cppi_channel *c, const char *tag)
musb_ep_select(base, c->index + 1);
- dev_dbg(c->controller->musb->controller,
+ musb_dbg(c->controller->musb,
"RX DMA%d%s: %d left, csr %04x, "
"%08x H%08x S%08x C%08x, "
- "B%08x L%08x %08x .. %08x"
- "\n",
+ "B%08x L%08x %08x .. %08x",
c->index, tag,
musb_readl(c->controller->tibase,
DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index),
@@ -385,11 +385,10 @@ cppi_dump_tx(int level, struct cppi_channel *c, const char *tag)
musb_ep_select(base, c->index + 1);
- dev_dbg(c->controller->musb->controller,
+ musb_dbg(c->controller->musb,
"TX DMA%d%s: csr %04x, "
"H%08x S%08x C%08x %08x, "
- "F%08x L%08x .. %08x"
- "\n",
+ "F%08x L%08x .. %08x",
c->index, tag,
musb_readw(c->hw_ep->regs, MUSB_TXCSR),
@@ -590,7 +589,7 @@ cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx)
length = min(n_bds * maxpacket, length);
}
- dev_dbg(musb->controller, "TX DMA%d, pktSz %d %s bds %d dma 0x%llx len %u\n",
+ musb_dbg(musb, "TX DMA%d, pktSz %d %s bds %d dma 0x%llx len %u",
tx->index,
maxpacket,
rndis ? "rndis" : "transparent",
@@ -647,7 +646,7 @@ cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx)
bd->hw_options |= CPPI_ZERO_SET;
}
- dev_dbg(musb->controller, "TXBD %p: nxt %08x buf %08x len %04x opt %08x\n",
+ musb_dbg(musb, "TXBD %p: nxt %08x buf %08x len %04x opt %08x",
bd, bd->hw_next, bd->hw_bufp,
bd->hw_off_len, bd->hw_options);
@@ -813,8 +812,8 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
length = min(n_bds * maxpacket, length);
- dev_dbg(musb->controller, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) "
- "dma 0x%llx len %u %u/%u\n",
+ musb_dbg(musb, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) "
+ "dma 0x%llx len %u %u/%u",
rx->index, maxpacket,
onepacket
? (is_rndis ? "rndis" : "onepacket")
@@ -924,7 +923,7 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
& 0xffff;
if (i < (2 + n_bds)) {
- dev_dbg(musb->controller, "bufcnt%d underrun - %d (for %d)\n",
+ musb_dbg(musb, "bufcnt%d underrun - %d (for %d)",
rx->index, i, n_bds);
musb_writel(tibase,
DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
@@ -973,7 +972,7 @@ static int cppi_channel_program(struct dma_channel *ch,
/* WARN_ON(1); */
break;
case MUSB_DMA_STATUS_UNKNOWN:
- dev_dbg(musb->controller, "%cX DMA%d not allocated!\n",
+ musb_dbg(musb, "%cX DMA%d not allocated!",
cppi_ch->transmit ? 'T' : 'R',
cppi_ch->index);
/* FALLTHROUGH */
@@ -1029,8 +1028,8 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
if (!completed && (bd->hw_options & CPPI_OWN_SET))
break;
- dev_dbg(musb->controller, "C/RXBD %llx: nxt %08x buf %08x "
- "off.len %08x opt.len %08x (%d)\n",
+ musb_dbg(musb, "C/RXBD %llx: nxt %08x buf %08x "
+ "off.len %08x opt.len %08x (%d)",
(unsigned long long)bd->dma, bd->hw_next, bd->hw_bufp,
bd->hw_off_len, bd->hw_options,
rx->channel.actual_len);
@@ -1051,7 +1050,7 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
* CPPI ignores those BDs even though OWN is still set.
*/
completed = true;
- dev_dbg(musb->controller, "rx short %d/%d (%d)\n",
+ musb_dbg(musb, "rx short %d/%d (%d)",
len, bd->buflen,
rx->channel.actual_len);
}
@@ -1101,7 +1100,7 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
musb_ep_select(cppi->mregs, rx->index + 1);
csr = musb_readw(regs, MUSB_RXCSR);
if (csr & MUSB_RXCSR_DMAENAB) {
- dev_dbg(musb->controller, "list%d %p/%p, last %llx%s, csr %04x\n",
+ musb_dbg(musb, "list%d %p/%p, last %llx%s, csr %04x",
rx->index,
rx->head, rx->tail,
rx->last_processed
@@ -1164,7 +1163,7 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id)
return IRQ_NONE;
}
- dev_dbg(musb->controller, "CPPI IRQ Tx%x Rx%x\n", tx, rx);
+ musb_dbg(musb, "CPPI IRQ Tx%x Rx%x", tx, rx);
/* process TX channels */
for (index = 0; tx; tx = tx >> 1, index++) {
@@ -1192,7 +1191,7 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id)
* that needs to be acknowledged.
*/
if (NULL == bd) {
- dev_dbg(musb->controller, "null BD\n");
+ musb_dbg(musb, "null BD");
musb_writel(&tx_ram->tx_complete, 0, 0);
continue;
}
@@ -1207,7 +1206,7 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id)
if (bd->hw_options & CPPI_OWN_SET)
break;
- dev_dbg(musb->controller, "C/TXBD %p n %x b %x off %x opt %x\n",
+ musb_dbg(musb, "C/TXBD %p n %x b %x off %x opt %x",
bd, bd->hw_next, bd->hw_bufp,
bd->hw_off_len, bd->hw_options);
diff --git a/drivers/usb/musb/cppi_dma.h b/drivers/usb/musb/cppi_dma.h
index 59bf949e5..7fdfb71a8 100644
--- a/drivers/usb/musb/cppi_dma.h
+++ b/drivers/usb/musb/cppi_dma.h
@@ -7,17 +7,10 @@
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/dmapool.h>
+#include <linux/dmaengine.h>
-#include "musb_dma.h"
#include "musb_core.h"
-
-
-/* FIXME fully isolate CPPI from DaVinci ... the "CPPI generic" registers
- * would seem to be shared with the TUSB6020 (over VLYNQ).
- */
-
-#include "davinci.h"
-
+#include "musb_dma.h"
/* CPPI RX/TX state RAM */
@@ -131,4 +124,24 @@ struct cppi {
/* CPPI IRQ handler */
extern irqreturn_t cppi_interrupt(int, void *);
+struct cppi41_dma_channel {
+ struct dma_channel channel;
+ struct cppi41_dma_controller *controller;
+ struct musb_hw_ep *hw_ep;
+ struct dma_chan *dc;
+ dma_cookie_t cookie;
+ u8 port_num;
+ u8 is_tx;
+ u8 is_allocated;
+ u8 usb_toggle;
+
+ dma_addr_t buf_addr;
+ u32 total_len;
+ u32 prog_len;
+ u32 transferred;
+ u32 packet_sz;
+ struct list_head tx_check;
+ int tx_zlp;
+};
+
#endif /* end of ifndef _CPPI_DMA_H_ */
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index f824336de..74fc3069c 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -102,6 +102,7 @@
#include <linux/usb.h>
#include "musb_core.h"
+#include "musb_trace.h"
#define TA_WAIT_BCON(m) max_t(int, (m)->a_wait_bcon, OTG_TIME_A_WAIT_BCON)
@@ -258,31 +259,43 @@ static u32 musb_default_busctl_offset(u8 epnum, u16 offset)
static u8 musb_default_readb(const void __iomem *addr, unsigned offset)
{
- return __raw_readb(addr + offset);
+ u8 data = __raw_readb(addr + offset);
+
+ trace_musb_readb(__builtin_return_address(0), addr, offset, data);
+ return data;
}
static void musb_default_writeb(void __iomem *addr, unsigned offset, u8 data)
{
+ trace_musb_writeb(__builtin_return_address(0), addr, offset, data);
__raw_writeb(data, addr + offset);
}
static u16 musb_default_readw(const void __iomem *addr, unsigned offset)
{
- return __raw_readw(addr + offset);
+ u16 data = __raw_readw(addr + offset);
+
+ trace_musb_readw(__builtin_return_address(0), addr, offset, data);
+ return data;
}
static void musb_default_writew(void __iomem *addr, unsigned offset, u16 data)
{
+ trace_musb_writew(__builtin_return_address(0), addr, offset, data);
__raw_writew(data, addr + offset);
}
static u32 musb_default_readl(const void __iomem *addr, unsigned offset)
{
- return __raw_readl(addr + offset);
+ u32 data = __raw_readl(addr + offset);
+
+ trace_musb_readl(__builtin_return_address(0), addr, offset, data);
+ return data;
}
static void musb_default_writel(void __iomem *addr, unsigned offset, u32 data)
{
+ trace_musb_writel(__builtin_return_address(0), addr, offset, data);
__raw_writel(data, addr + offset);
}
@@ -461,20 +474,21 @@ static void musb_otg_timer_func(unsigned long data)
spin_lock_irqsave(&musb->lock, flags);
switch (musb->xceiv->otg->state) {
case OTG_STATE_B_WAIT_ACON:
- dev_dbg(musb->controller, "HNP: b_wait_acon timeout; back to b_peripheral\n");
+ musb_dbg(musb,
+ "HNP: b_wait_acon timeout; back to b_peripheral");
musb_g_disconnect(musb);
musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
musb->is_active = 0;
break;
case OTG_STATE_A_SUSPEND:
case OTG_STATE_A_WAIT_BCON:
- dev_dbg(musb->controller, "HNP: %s timeout\n",
+ musb_dbg(musb, "HNP: %s timeout",
usb_otg_state_string(musb->xceiv->otg->state));
musb_platform_set_vbus(musb, 0);
musb->xceiv->otg->state = OTG_STATE_A_WAIT_VFALL;
break;
default:
- dev_dbg(musb->controller, "HNP: Unhandled mode %s\n",
+ musb_dbg(musb, "HNP: Unhandled mode %s",
usb_otg_state_string(musb->xceiv->otg->state));
}
spin_unlock_irqrestore(&musb->lock, flags);
@@ -489,17 +503,17 @@ void musb_hnp_stop(struct musb *musb)
void __iomem *mbase = musb->mregs;
u8 reg;
- dev_dbg(musb->controller, "HNP: stop from %s\n",
+ musb_dbg(musb, "HNP: stop from %s",
usb_otg_state_string(musb->xceiv->otg->state));
switch (musb->xceiv->otg->state) {
case OTG_STATE_A_PERIPHERAL:
musb_g_disconnect(musb);
- dev_dbg(musb->controller, "HNP: back to %s\n",
+ musb_dbg(musb, "HNP: back to %s",
usb_otg_state_string(musb->xceiv->otg->state));
break;
case OTG_STATE_B_HOST:
- dev_dbg(musb->controller, "HNP: Disabling HR\n");
+ musb_dbg(musb, "HNP: Disabling HR");
if (hcd)
hcd->self.is_b_host = 0;
musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
@@ -510,7 +524,7 @@ void musb_hnp_stop(struct musb *musb)
/* REVISIT: Start SESSION_REQUEST here? */
break;
default:
- dev_dbg(musb->controller, "HNP: Stopping in unknown state %s\n",
+ musb_dbg(musb, "HNP: Stopping in unknown state %s",
usb_otg_state_string(musb->xceiv->otg->state));
}
@@ -541,8 +555,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
{
irqreturn_t handled = IRQ_NONE;
- dev_dbg(musb->controller, "<== DevCtl=%02x, int_usb=0x%x\n", devctl,
- int_usb);
+ musb_dbg(musb, "<== DevCtl=%02x, int_usb=0x%x", devctl, int_usb);
/* in host mode, the peripheral may issue remote wakeup.
* in peripheral mode, the host may resume the link.
@@ -550,7 +563,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
*/
if (int_usb & MUSB_INTR_RESUME) {
handled = IRQ_HANDLED;
- dev_dbg(musb->controller, "RESUME (%s)\n",
+ musb_dbg(musb, "RESUME (%s)",
usb_otg_state_string(musb->xceiv->otg->state));
if (devctl & MUSB_DEVCTL_HM) {
@@ -619,11 +632,11 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS
&& (devctl & MUSB_DEVCTL_BDEVICE)) {
- dev_dbg(musb->controller, "SessReq while on B state\n");
+ musb_dbg(musb, "SessReq while on B state");
return IRQ_HANDLED;
}
- dev_dbg(musb->controller, "SESSION_REQUEST (%s)\n",
+ musb_dbg(musb, "SESSION_REQUEST (%s)",
usb_otg_state_string(musb->xceiv->otg->state));
/* IRQ arrives from ID pin sense or (later, if VBUS power
@@ -714,7 +727,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
}
if (int_usb & MUSB_INTR_SUSPEND) {
- dev_dbg(musb->controller, "SUSPEND (%s) devctl %02x\n",
+ musb_dbg(musb, "SUSPEND (%s) devctl %02x",
usb_otg_state_string(musb->xceiv->otg->state), devctl);
handled = IRQ_HANDLED;
@@ -743,7 +756,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
musb->is_active = musb->g.b_hnp_enable;
if (musb->is_active) {
musb->xceiv->otg->state = OTG_STATE_B_WAIT_ACON;
- dev_dbg(musb->controller, "HNP: Setting timer for b_ase0_brst\n");
+ musb_dbg(musb, "HNP: Setting timer for b_ase0_brst");
mod_timer(&musb->otg_timer, jiffies
+ msecs_to_jiffies(
OTG_TIME_B_ASE0_BRST));
@@ -760,7 +773,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
break;
case OTG_STATE_B_HOST:
/* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
- dev_dbg(musb->controller, "REVISIT: SUSPEND as B_HOST\n");
+ musb_dbg(musb, "REVISIT: SUSPEND as B_HOST");
break;
default:
/* "should not happen" */
@@ -797,14 +810,14 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
switch (musb->xceiv->otg->state) {
case OTG_STATE_B_PERIPHERAL:
if (int_usb & MUSB_INTR_SUSPEND) {
- dev_dbg(musb->controller, "HNP: SUSPEND+CONNECT, now b_host\n");
+ musb_dbg(musb, "HNP: SUSPEND+CONNECT, now b_host");
int_usb &= ~MUSB_INTR_SUSPEND;
goto b_host;
} else
- dev_dbg(musb->controller, "CONNECT as b_peripheral???\n");
+ musb_dbg(musb, "CONNECT as b_peripheral???");
break;
case OTG_STATE_B_WAIT_ACON:
- dev_dbg(musb->controller, "HNP: CONNECT, now b_host\n");
+ musb_dbg(musb, "HNP: CONNECT, now b_host");
b_host:
musb->xceiv->otg->state = OTG_STATE_B_HOST;
if (musb->hcd)
@@ -823,12 +836,12 @@ b_host:
musb_host_poke_root_hub(musb);
- dev_dbg(musb->controller, "CONNECT (%s) devctl %02x\n",
+ musb_dbg(musb, "CONNECT (%s) devctl %02x",
usb_otg_state_string(musb->xceiv->otg->state), devctl);
}
if (int_usb & MUSB_INTR_DISCONNECT) {
- dev_dbg(musb->controller, "DISCONNECT (%s) as %s, devctl %02x\n",
+ musb_dbg(musb, "DISCONNECT (%s) as %s, devctl %02x",
usb_otg_state_string(musb->xceiv->otg->state),
MUSB_MODE(musb), devctl);
handled = IRQ_HANDLED;
@@ -891,7 +904,7 @@ b_host:
if (is_host_active(musb))
musb_recover_from_babble(musb);
} else {
- dev_dbg(musb->controller, "BUS RESET as %s\n",
+ musb_dbg(musb, "BUS RESET as %s",
usb_otg_state_string(musb->xceiv->otg->state));
switch (musb->xceiv->otg->state) {
case OTG_STATE_A_SUSPEND:
@@ -899,7 +912,7 @@ b_host:
/* FALLTHROUGH */
case OTG_STATE_A_WAIT_BCON: /* OPT TD.4.7-900ms */
/* never use invalid T(a_wait_bcon) */
- dev_dbg(musb->controller, "HNP: in %s, %d msec timeout\n",
+ musb_dbg(musb, "HNP: in %s, %d msec timeout",
usb_otg_state_string(musb->xceiv->otg->state),
TA_WAIT_BCON(musb));
mod_timer(&musb->otg_timer, jiffies
@@ -910,7 +923,7 @@ b_host:
musb_g_reset(musb);
break;
case OTG_STATE_B_WAIT_ACON:
- dev_dbg(musb->controller, "HNP: RESET (%s), to b_peripheral\n",
+ musb_dbg(musb, "HNP: RESET (%s), to b_peripheral",
usb_otg_state_string(musb->xceiv->otg->state));
musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
musb_g_reset(musb);
@@ -922,7 +935,7 @@ b_host:
musb_g_reset(musb);
break;
default:
- dev_dbg(musb->controller, "Unhandled BUS RESET as %s\n",
+ musb_dbg(musb, "Unhandled BUS RESET as %s",
usb_otg_state_string(musb->xceiv->otg->state));
}
}
@@ -1030,7 +1043,7 @@ void musb_start(struct musb *musb)
u8 devctl = musb_readb(regs, MUSB_DEVCTL);
u8 power;
- dev_dbg(musb->controller, "<== devctl %02x\n", devctl);
+ musb_dbg(musb, "<== devctl %02x", devctl);
musb_enable_interrupts(musb);
musb_writeb(regs, MUSB_TESTMODE, 0);
@@ -1078,7 +1091,7 @@ void musb_stop(struct musb *musb)
/* stop IRQs, timers, ... */
musb_platform_disable(musb);
musb_generic_disable(musb);
- dev_dbg(musb->controller, "HDRC disabled\n");
+ musb_dbg(musb, "HDRC disabled");
/* FIXME
* - mark host and/or peripheral drivers unusable/inactive
@@ -1391,7 +1404,7 @@ static int ep_config_from_hw(struct musb *musb)
void __iomem *mbase = musb->mregs;
int ret = 0;
- dev_dbg(musb->controller, "<== static silicon ep config\n");
+ musb_dbg(musb, "<== static silicon ep config");
/* FIXME pick up ep0 maxpacket size */
@@ -1532,8 +1545,7 @@ static int musb_core_init(u16 musb_type, struct musb *musb)
hw_ep->tx_reinit = 1;
if (hw_ep->max_packet_sz_tx) {
- dev_dbg(musb->controller,
- "%s: hw_ep %d%s, %smax %d\n",
+ musb_dbg(musb, "%s: hw_ep %d%s, %smax %d",
musb_driver_name, i,
hw_ep->is_shared_fifo ? "shared" : "tx",
hw_ep->tx_double_buffered
@@ -1541,8 +1553,7 @@ static int musb_core_init(u16 musb_type, struct musb *musb)
hw_ep->max_packet_sz_tx);
}
if (hw_ep->max_packet_sz_rx && !hw_ep->is_shared_fifo) {
- dev_dbg(musb->controller,
- "%s: hw_ep %d%s, %smax %d\n",
+ musb_dbg(musb, "%s: hw_ep %d%s, %smax %d",
musb_driver_name, i,
"rx",
hw_ep->rx_double_buffered
@@ -1550,7 +1561,7 @@ static int musb_core_init(u16 musb_type, struct musb *musb)
hw_ep->max_packet_sz_rx);
}
if (!(hw_ep->max_packet_sz_tx || hw_ep->max_packet_sz_rx))
- dev_dbg(musb->controller, "hw_ep %d not configured\n", i);
+ musb_dbg(musb, "hw_ep %d not configured", i);
}
return 0;
@@ -1577,9 +1588,7 @@ irqreturn_t musb_interrupt(struct musb *musb)
devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
- dev_dbg(musb->controller, "** IRQ %s usb%04x tx%04x rx%04x\n",
- is_host_active(musb) ? "host" : "peripheral",
- musb->int_usb, musb->int_tx, musb->int_rx);
+ trace_musb_isr(musb);
/**
* According to Mentor Graphics' documentation, flowchart on page 98,
@@ -1976,7 +1985,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
* Fail when the board needs a feature that's not enabled.
*/
if (!plat) {
- dev_dbg(dev, "no platform_data?\n");
+ dev_err(dev, "no platform_data?\n");
status = -ENODEV;
goto fail0;
}
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index e499b862a..d4d7c56b4 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -5,7 +5,9 @@
#include <linux/platform_device.h>
#include <linux/of.h>
+#include "cppi_dma.h"
#include "musb_core.h"
+#include "musb_trace.h"
#define RNDIS_REG(x) (0x80 + ((x - 1) * 4))
@@ -22,26 +24,6 @@
#define USB_CTRL_AUTOREQ 0xd0
#define USB_TDOWN 0xd8
-struct cppi41_dma_channel {
- struct dma_channel channel;
- struct cppi41_dma_controller *controller;
- struct musb_hw_ep *hw_ep;
- struct dma_chan *dc;
- dma_cookie_t cookie;
- u8 port_num;
- u8 is_tx;
- u8 is_allocated;
- u8 usb_toggle;
-
- dma_addr_t buf_addr;
- u32 total_len;
- u32 prog_len;
- u32 transferred;
- u32 packet_sz;
- struct list_head tx_check;
- int tx_zlp;
-};
-
#define MUSB_DMA_NUM_CHANNELS 15
struct cppi41_dma_controller {
@@ -96,8 +78,8 @@ static void update_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
if (!toggle && toggle == cppi41_channel->usb_toggle) {
csr |= MUSB_RXCSR_H_DATATOGGLE | MUSB_RXCSR_H_WR_DATATOGGLE;
musb_writew(cppi41_channel->hw_ep->regs, MUSB_RXCSR, csr);
- dev_dbg(cppi41_channel->controller->musb->controller,
- "Restoring DATA1 toggle.\n");
+ musb_dbg(cppi41_channel->controller->musb,
+ "Restoring DATA1 toggle.");
}
cppi41_channel->usb_toggle = toggle;
@@ -145,6 +127,8 @@ static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
csr = MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY;
musb_writew(epio, MUSB_TXCSR, csr);
}
+
+ trace_musb_cppi41_done(cppi41_channel);
musb_dma_completion(musb, hw_ep->epnum, cppi41_channel->is_tx);
} else {
/* next iteration, reload */
@@ -173,6 +157,7 @@ static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
dma_desc->callback = cppi41_dma_callback;
dma_desc->callback_param = &cppi41_channel->channel;
cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
+ trace_musb_cppi41_cont(cppi41_channel);
dma_async_issue_pending(dc);
if (!cppi41_channel->is_tx) {
@@ -240,10 +225,7 @@ static void cppi41_dma_callback(void *private_data)
transferred = cppi41_channel->prog_len - txstate.residue;
cppi41_channel->transferred += transferred;
- dev_dbg(musb->controller, "DMA transfer done on hw_ep=%d bytes=%d/%d\n",
- hw_ep->epnum, cppi41_channel->transferred,
- cppi41_channel->total_len);
-
+ trace_musb_cppi41_gb(cppi41_channel);
update_rx_toggle(cppi41_channel);
if (cppi41_channel->transferred == cppi41_channel->total_len ||
@@ -374,12 +356,6 @@ static bool cppi41_configure_channel(struct dma_channel *channel,
struct musb *musb = cppi41_channel->controller->musb;
unsigned use_gen_rndis = 0;
- dev_dbg(musb->controller,
- "configure ep%d/%x packet_sz=%d, mode=%d, dma_addr=0x%llx, len=%d is_tx=%d\n",
- cppi41_channel->port_num, RNDIS_REG(cppi41_channel->port_num),
- packet_sz, mode, (unsigned long long) dma_addr,
- len, cppi41_channel->is_tx);
-
cppi41_channel->buf_addr = dma_addr;
cppi41_channel->total_len = len;
cppi41_channel->transferred = 0;
@@ -431,6 +407,8 @@ static bool cppi41_configure_channel(struct dma_channel *channel,
cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
cppi41_channel->channel.rx_packet_done = false;
+ trace_musb_cppi41_config(cppi41_channel);
+
save_rx_toggle(cppi41_channel);
dma_async_issue_pending(dc);
return true;
@@ -461,6 +439,7 @@ static struct dma_channel *cppi41_dma_channel_allocate(struct dma_controller *c,
cppi41_channel->hw_ep = hw_ep;
cppi41_channel->is_allocated = 1;
+ trace_musb_cppi41_alloc(cppi41_channel);
return &cppi41_channel->channel;
}
@@ -468,6 +447,7 @@ static void cppi41_dma_channel_release(struct dma_channel *channel)
{
struct cppi41_dma_channel *cppi41_channel = channel->private_data;
+ trace_musb_cppi41_free(cppi41_channel);
if (cppi41_channel->is_allocated) {
cppi41_channel->is_allocated = 0;
channel->status = MUSB_DMA_STATUS_FREE;
@@ -537,8 +517,7 @@ static int cppi41_dma_channel_abort(struct dma_channel *channel)
u16 csr;
is_tx = cppi41_channel->is_tx;
- dev_dbg(musb->controller, "abort channel=%d, is_tx=%d\n",
- cppi41_channel->port_num, is_tx);
+ trace_musb_cppi41_abort(cppi41_channel);
if (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)
return 0;
diff --git a/drivers/usb/musb/musb_debug.h b/drivers/usb/musb/musb_debug.h
index 27ba8f799..9a78877a8 100644
--- a/drivers/usb/musb/musb_debug.h
+++ b/drivers/usb/musb/musb_debug.h
@@ -42,6 +42,8 @@
#define INFO(fmt, args...) yprintk(KERN_INFO, fmt, ## args)
#define ERR(fmt, args...) yprintk(KERN_ERR, fmt, ## args)
+void musb_dbg(struct musb *musb, const char *fmt, ...);
+
#ifdef CONFIG_DEBUG_FS
int musb_init_debugfs(struct musb *musb);
void musb_exit_debugfs(struct musb *musb);
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index eeb7d9ecf..253717963 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -52,30 +52,6 @@
static const struct of_device_id musb_dsps_of_match[];
/**
- * avoid using musb_readx()/musb_writex() as glue layer should not be
- * dependent on musb core layer symbols.
- */
-static inline u8 dsps_readb(const void __iomem *addr, unsigned offset)
-{
- return __raw_readb(addr + offset);
-}
-
-static inline u32 dsps_readl(const void __iomem *addr, unsigned offset)
-{
- return __raw_readl(addr + offset);
-}
-
-static inline void dsps_writeb(void __iomem *addr, unsigned offset, u8 data)
-{
- __raw_writeb(data, addr + offset);
-}
-
-static inline void dsps_writel(void __iomem *addr, unsigned offset, u32 data)
-{
- __raw_writel(data, addr + offset);
-}
-
-/**
* DSPS musb wrapper register offset.
* FIXME: This should be expanded to have all the wrapper registers from TI DSPS
* musb ips.
@@ -223,8 +199,8 @@ static void dsps_musb_enable(struct musb *musb)
((musb->epmask & wrp->rxep_mask) << wrp->rxep_shift);
coremask = (wrp->usb_bitmap & ~MUSB_INTR_SOF);
- dsps_writel(reg_base, wrp->epintr_set, epmask);
- dsps_writel(reg_base, wrp->coreintr_set, coremask);
+ musb_writel(reg_base, wrp->epintr_set, epmask);
+ musb_writel(reg_base, wrp->coreintr_set, coremask);
/* start polling for ID change in dual-role idle mode */
if (musb->xceiv->otg->state == OTG_STATE_B_IDLE &&
musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE)
@@ -244,10 +220,10 @@ static void dsps_musb_disable(struct musb *musb)
const struct dsps_musb_wrapper *wrp = glue->wrp;
void __iomem *reg_base = musb->ctrl_base;
- dsps_writel(reg_base, wrp->coreintr_clear, wrp->usb_bitmap);
- dsps_writel(reg_base, wrp->epintr_clear,
+ musb_writel(reg_base, wrp->coreintr_clear, wrp->usb_bitmap);
+ musb_writel(reg_base, wrp->epintr_clear,
wrp->txep_bitmap | wrp->rxep_bitmap);
- dsps_writeb(musb->mregs, MUSB_DEVCTL, 0);
+ musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
}
static void otg_timer(unsigned long _musb)
@@ -265,14 +241,14 @@ static void otg_timer(unsigned long _musb)
* We poll because DSPS IP's won't expose several OTG-critical
* status change events (from the transceiver) otherwise.
*/
- devctl = dsps_readb(mregs, MUSB_DEVCTL);
+ devctl = musb_readb(mregs, MUSB_DEVCTL);
dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl,
usb_otg_state_string(musb->xceiv->otg->state));
spin_lock_irqsave(&musb->lock, flags);
switch (musb->xceiv->otg->state) {
case OTG_STATE_A_WAIT_BCON:
- dsps_writeb(musb->mregs, MUSB_DEVCTL, 0);
+ musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
skip_session = 1;
/* fall */
@@ -286,13 +262,13 @@ static void otg_timer(unsigned long _musb)
MUSB_HST_MODE(musb);
}
if (!(devctl & MUSB_DEVCTL_SESSION) && !skip_session)
- dsps_writeb(mregs, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
+ musb_writeb(mregs, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
mod_timer(&glue->timer, jiffies +
msecs_to_jiffies(wrp->poll_timeout));
break;
case OTG_STATE_A_WAIT_VFALL:
musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
- dsps_writel(musb->ctrl_base, wrp->coreintr_set,
+ musb_writel(musb->ctrl_base, wrp->coreintr_set,
MUSB_INTR_VBUSERROR << wrp->usb_shift);
break;
default:
@@ -315,29 +291,29 @@ static irqreturn_t dsps_interrupt(int irq, void *hci)
spin_lock_irqsave(&musb->lock, flags);
/* Get endpoint interrupts */
- epintr = dsps_readl(reg_base, wrp->epintr_status);
+ epintr = musb_readl(reg_base, wrp->epintr_status);
musb->int_rx = (epintr & wrp->rxep_bitmap) >> wrp->rxep_shift;
musb->int_tx = (epintr & wrp->txep_bitmap) >> wrp->txep_shift;
if (epintr)
- dsps_writel(reg_base, wrp->epintr_status, epintr);
+ musb_writel(reg_base, wrp->epintr_status, epintr);
/* Get usb core interrupts */
- usbintr = dsps_readl(reg_base, wrp->coreintr_status);
+ usbintr = musb_readl(reg_base, wrp->coreintr_status);
if (!usbintr && !epintr)
goto out;
musb->int_usb = (usbintr & wrp->usb_bitmap) >> wrp->usb_shift;
if (usbintr)
- dsps_writel(reg_base, wrp->coreintr_status, usbintr);
+ musb_writel(reg_base, wrp->coreintr_status, usbintr);
dev_dbg(musb->controller, "usbintr (%x) epintr(%x)\n",
usbintr, epintr);
if (usbintr & ((1 << wrp->drvvbus) << wrp->usb_shift)) {
- int drvvbus = dsps_readl(reg_base, wrp->status);
+ int drvvbus = musb_readl(reg_base, wrp->status);
void __iomem *mregs = musb->mregs;
- u8 devctl = dsps_readb(mregs, MUSB_DEVCTL);
+ u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
int err;
err = musb->int_usb & MUSB_INTR_VBUSERROR;
@@ -442,7 +418,7 @@ static int dsps_musb_init(struct musb *musb)
musb->phy = devm_phy_get(dev->parent, "usb2-phy");
/* Returns zero if e.g. not clocked */
- rev = dsps_readl(reg_base, wrp->revision);
+ rev = musb_readl(reg_base, wrp->revision);
if (!rev)
return -ENODEV;
@@ -463,14 +439,14 @@ static int dsps_musb_init(struct musb *musb)
setup_timer(&glue->timer, otg_timer, (unsigned long) musb);
/* Reset the musb */
- dsps_writel(reg_base, wrp->control, (1 << wrp->reset));
+ musb_writel(reg_base, wrp->control, (1 << wrp->reset));
musb->isr = dsps_interrupt;
/* reset the otgdisable bit, needed for host mode to work */
- val = dsps_readl(reg_base, wrp->phy_utmi);
+ val = musb_readl(reg_base, wrp->phy_utmi);
val &= ~(1 << wrp->otg_disable);
- dsps_writel(musb->ctrl_base, wrp->phy_utmi, val);
+ musb_writel(musb->ctrl_base, wrp->phy_utmi, val);
/*
* Check whether the dsps version has babble control enabled.
@@ -478,11 +454,11 @@ static int dsps_musb_init(struct musb *musb)
* If MUSB_BABBLE_CTL returns 0x4 then we have the babble control
* logic enabled.
*/
- val = dsps_readb(musb->mregs, MUSB_BABBLE_CTL);
+ val = musb_readb(musb->mregs, MUSB_BABBLE_CTL);
if (val & MUSB_BABBLE_RCV_DISABLE) {
glue->sw_babble_enabled = true;
val |= MUSB_BABBLE_SW_SESSION_CTRL;
- dsps_writeb(musb->mregs, MUSB_BABBLE_CTL, val);
+ musb_writeb(musb->mregs, MUSB_BABBLE_CTL, val);
}
return dsps_musb_dbg_init(musb, glue);
@@ -510,7 +486,7 @@ static int dsps_musb_set_mode(struct musb *musb, u8 mode)
void __iomem *ctrl_base = musb->ctrl_base;
u32 reg;
- reg = dsps_readl(ctrl_base, wrp->mode);
+ reg = musb_readl(ctrl_base, wrp->mode);
switch (mode) {
case MUSB_HOST:
@@ -523,8 +499,8 @@ static int dsps_musb_set_mode(struct musb *musb, u8 mode)
*/
reg |= (1 << wrp->iddig_mux);
- dsps_writel(ctrl_base, wrp->mode, reg);
- dsps_writel(ctrl_base, wrp->phy_utmi, 0x02);
+ musb_writel(ctrl_base, wrp->mode, reg);
+ musb_writel(ctrl_base, wrp->phy_utmi, 0x02);
break;
case MUSB_PERIPHERAL:
reg |= (1 << wrp->iddig);
@@ -536,10 +512,10 @@ static int dsps_musb_set_mode(struct musb *musb, u8 mode)
*/
reg |= (1 << wrp->iddig_mux);
- dsps_writel(ctrl_base, wrp->mode, reg);
+ musb_writel(ctrl_base, wrp->mode, reg);
break;
case MUSB_OTG:
- dsps_writel(ctrl_base, wrp->phy_utmi, 0x02);
+ musb_writel(ctrl_base, wrp->phy_utmi, 0x02);
break;
default:
dev_err(glue->dev, "unsupported mode %d\n", mode);
@@ -554,7 +530,7 @@ static bool dsps_sw_babble_control(struct musb *musb)
u8 babble_ctl;
bool session_restart = false;
- babble_ctl = dsps_readb(musb->mregs, MUSB_BABBLE_CTL);
+ babble_ctl = musb_readb(musb->mregs, MUSB_BABBLE_CTL);
dev_dbg(musb->controller, "babble: MUSB_BABBLE_CTL value %x\n",
babble_ctl);
/*
@@ -571,14 +547,14 @@ static bool dsps_sw_babble_control(struct musb *musb)
* babble is due to noise, then set transmit idle (d7 bit)
* to resume normal operation
*/
- babble_ctl = dsps_readb(musb->mregs, MUSB_BABBLE_CTL);
+ babble_ctl = musb_readb(musb->mregs, MUSB_BABBLE_CTL);
babble_ctl |= MUSB_BABBLE_FORCE_TXIDLE;
- dsps_writeb(musb->mregs, MUSB_BABBLE_CTL, babble_ctl);
+ musb_writeb(musb->mregs, MUSB_BABBLE_CTL, babble_ctl);
/* wait till line monitor flag cleared */
dev_dbg(musb->controller, "Set TXIDLE, wait J to clear\n");
do {
- babble_ctl = dsps_readb(musb->mregs, MUSB_BABBLE_CTL);
+ babble_ctl = musb_readb(musb->mregs, MUSB_BABBLE_CTL);
udelay(1);
} while ((babble_ctl & MUSB_BABBLE_STUCK_J) && timeout--);
@@ -896,13 +872,13 @@ static int dsps_suspend(struct device *dev)
return 0;
mbase = musb->ctrl_base;
- glue->context.control = dsps_readl(mbase, wrp->control);
- glue->context.epintr = dsps_readl(mbase, wrp->epintr_set);
- glue->context.coreintr = dsps_readl(mbase, wrp->coreintr_set);
- glue->context.phy_utmi = dsps_readl(mbase, wrp->phy_utmi);
- glue->context.mode = dsps_readl(mbase, wrp->mode);
- glue->context.tx_mode = dsps_readl(mbase, wrp->tx_mode);
- glue->context.rx_mode = dsps_readl(mbase, wrp->rx_mode);
+ glue->context.control = musb_readl(mbase, wrp->control);
+ glue->context.epintr = musb_readl(mbase, wrp->epintr_set);
+ glue->context.coreintr = musb_readl(mbase, wrp->coreintr_set);
+ glue->context.phy_utmi = musb_readl(mbase, wrp->phy_utmi);
+ glue->context.mode = musb_readl(mbase, wrp->mode);
+ glue->context.tx_mode = musb_readl(mbase, wrp->tx_mode);
+ glue->context.rx_mode = musb_readl(mbase, wrp->rx_mode);
return 0;
}
@@ -918,13 +894,13 @@ static int dsps_resume(struct device *dev)
return 0;
mbase = musb->ctrl_base;
- dsps_writel(mbase, wrp->control, glue->context.control);
- dsps_writel(mbase, wrp->epintr_set, glue->context.epintr);
- dsps_writel(mbase, wrp->coreintr_set, glue->context.coreintr);
- dsps_writel(mbase, wrp->phy_utmi, glue->context.phy_utmi);
- dsps_writel(mbase, wrp->mode, glue->context.mode);
- dsps_writel(mbase, wrp->tx_mode, glue->context.tx_mode);
- dsps_writel(mbase, wrp->rx_mode, glue->context.rx_mode);
+ musb_writel(mbase, wrp->control, glue->context.control);
+ musb_writel(mbase, wrp->epintr_set, glue->context.epintr);
+ musb_writel(mbase, wrp->coreintr_set, glue->context.coreintr);
+ musb_writel(mbase, wrp->phy_utmi, glue->context.phy_utmi);
+ musb_writel(mbase, wrp->mode, glue->context.mode);
+ musb_writel(mbase, wrp->tx_mode, glue->context.tx_mode);
+ musb_writel(mbase, wrp->rx_mode, glue->context.rx_mode);
if (musb->xceiv->otg->state == OTG_STATE_B_IDLE &&
musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE)
mod_timer(&glue->timer, jiffies +
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index af2a3a7ad..6d1e975e9 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -44,6 +44,7 @@
#include <linux/slab.h>
#include "musb_core.h"
+#include "musb_trace.h"
/* ----------------------------------------------------------------------- */
@@ -167,15 +168,7 @@ __acquires(ep->musb->lock)
if (!dma_mapping_error(&musb->g.dev, request->dma))
unmap_dma_buffer(req, musb);
- if (request->status == 0)
- dev_dbg(musb->controller, "%s done request %p, %d/%d\n",
- ep->end_point.name, request,
- req->request.actual, req->request.length);
- else
- dev_dbg(musb->controller, "%s request %p, %d/%d fault %d\n",
- ep->end_point.name, request,
- req->request.actual, req->request.length,
- request->status);
+ trace_musb_req_gb(req);
usb_gadget_giveback_request(&req->ep->end_point, &req->request);
spin_lock(&musb->lock);
ep->busy = busy;
@@ -217,8 +210,7 @@ static void nuke(struct musb_ep *ep, const int status)
}
value = c->channel_abort(ep->dma);
- dev_dbg(musb->controller, "%s: abort DMA --> %d\n",
- ep->name, value);
+ musb_dbg(musb, "%s: abort DMA --> %d", ep->name, value);
c->channel_release(ep->dma);
ep->dma = NULL;
}
@@ -266,14 +258,14 @@ static void txstate(struct musb *musb, struct musb_request *req)
/* Check if EP is disabled */
if (!musb_ep->desc) {
- dev_dbg(musb->controller, "ep:%s disabled - ignore request\n",
+ musb_dbg(musb, "ep:%s disabled - ignore request",
musb_ep->end_point.name);
return;
}
/* we shouldn't get here while DMA is active ... but we do ... */
if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
- dev_dbg(musb->controller, "dma pending...\n");
+ musb_dbg(musb, "dma pending...");
return;
}
@@ -285,18 +277,18 @@ static void txstate(struct musb *musb, struct musb_request *req)
(int)(request->length - request->actual));
if (csr & MUSB_TXCSR_TXPKTRDY) {
- dev_dbg(musb->controller, "%s old packet still ready , txcsr %03x\n",
+ musb_dbg(musb, "%s old packet still ready , txcsr %03x",
musb_ep->end_point.name, csr);
return;
}
if (csr & MUSB_TXCSR_P_SENDSTALL) {
- dev_dbg(musb->controller, "%s stalling, txcsr %03x\n",
+ musb_dbg(musb, "%s stalling, txcsr %03x",
musb_ep->end_point.name, csr);
return;
}
- dev_dbg(musb->controller, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
+ musb_dbg(musb, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x",
epnum, musb_ep->packet_sz, fifo_count,
csr);
@@ -424,7 +416,7 @@ static void txstate(struct musb *musb, struct musb_request *req)
}
/* host may already have the data when this message shows... */
- dev_dbg(musb->controller, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
+ musb_dbg(musb, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d",
musb_ep->end_point.name, use_dma ? "dma" : "pio",
request->actual, request->length,
musb_readw(epio, MUSB_TXCSR),
@@ -450,8 +442,9 @@ void musb_g_tx(struct musb *musb, u8 epnum)
req = next_request(musb_ep);
request = &req->request;
+ trace_musb_req_tx(req);
csr = musb_readw(epio, MUSB_TXCSR);
- dev_dbg(musb->controller, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr);
+ musb_dbg(musb, "<== %s, txcsr %04x", musb_ep->end_point.name, csr);
dma = is_dma_capable() ? musb_ep->dma : NULL;
@@ -480,7 +473,7 @@ void musb_g_tx(struct musb *musb, u8 epnum)
* SHOULD NOT HAPPEN... has with CPPI though, after
* changing SENDSTALL (and other cases); harmless?
*/
- dev_dbg(musb->controller, "%s dma still busy?\n", musb_ep->end_point.name);
+ musb_dbg(musb, "%s dma still busy?", musb_ep->end_point.name);
return;
}
@@ -497,7 +490,7 @@ void musb_g_tx(struct musb *musb, u8 epnum)
/* Ensure writebuffer is empty. */
csr = musb_readw(epio, MUSB_TXCSR);
request->actual += musb_ep->dma->actual_len;
- dev_dbg(musb->controller, "TXCSR%d %04x, DMA off, len %zu, req %p\n",
+ musb_dbg(musb, "TXCSR%d %04x, DMA off, len %zu, req %p",
epnum, csr, musb_ep->dma->actual_len, request);
}
@@ -524,7 +517,6 @@ void musb_g_tx(struct musb *musb, u8 epnum)
if (csr & MUSB_TXCSR_TXPKTRDY)
return;
- dev_dbg(musb->controller, "sending zero pkt\n");
musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE
| MUSB_TXCSR_TXPKTRDY);
request->zero = 0;
@@ -543,7 +535,7 @@ void musb_g_tx(struct musb *musb, u8 epnum)
musb_ep_select(mbase, epnum);
req = musb_ep->desc ? next_request(musb_ep) : NULL;
if (!req) {
- dev_dbg(musb->controller, "%s idle now\n",
+ musb_dbg(musb, "%s idle now",
musb_ep->end_point.name);
return;
}
@@ -579,19 +571,19 @@ static void rxstate(struct musb *musb, struct musb_request *req)
/* Check if EP is disabled */
if (!musb_ep->desc) {
- dev_dbg(musb->controller, "ep:%s disabled - ignore request\n",
+ musb_dbg(musb, "ep:%s disabled - ignore request",
musb_ep->end_point.name);
return;
}
/* We shouldn't get here while DMA is active, but we do... */
if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
- dev_dbg(musb->controller, "DMA pending...\n");
+ musb_dbg(musb, "DMA pending...");
return;
}
if (csr & MUSB_RXCSR_P_SENDSTALL) {
- dev_dbg(musb->controller, "%s stalling, RXCSR %04x\n",
+ musb_dbg(musb, "%s stalling, RXCSR %04x",
musb_ep->end_point.name, csr);
return;
}
@@ -766,7 +758,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
}
len = request->length - request->actual;
- dev_dbg(musb->controller, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
+ musb_dbg(musb, "%s OUT/RX pio fifo %d/%d, maxpacket %d",
musb_ep->end_point.name,
fifo_count, len,
musb_ep->packet_sz);
@@ -849,12 +841,13 @@ void musb_g_rx(struct musb *musb, u8 epnum)
if (!req)
return;
+ trace_musb_req_rx(req);
request = &req->request;
csr = musb_readw(epio, MUSB_RXCSR);
dma = is_dma_capable() ? musb_ep->dma : NULL;
- dev_dbg(musb->controller, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name,
+ musb_dbg(musb, "<== %s, rxcsr %04x%s %p", musb_ep->end_point.name,
csr, dma ? " (dma)" : "", request);
if (csr & MUSB_RXCSR_P_SENTSTALL) {
@@ -869,18 +862,18 @@ void musb_g_rx(struct musb *musb, u8 epnum)
csr &= ~MUSB_RXCSR_P_OVERRUN;
musb_writew(epio, MUSB_RXCSR, csr);
- dev_dbg(musb->controller, "%s iso overrun on %p\n", musb_ep->name, request);
+ musb_dbg(musb, "%s iso overrun on %p", musb_ep->name, request);
if (request->status == -EINPROGRESS)
request->status = -EOVERFLOW;
}
if (csr & MUSB_RXCSR_INCOMPRX) {
/* REVISIT not necessarily an error */
- dev_dbg(musb->controller, "%s, incomprx\n", musb_ep->end_point.name);
+ musb_dbg(musb, "%s, incomprx", musb_ep->end_point.name);
}
if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
/* "should not happen"; likely RXPKTRDY pending for DMA */
- dev_dbg(musb->controller, "%s busy, csr %04x\n",
+ musb_dbg(musb, "%s busy, csr %04x",
musb_ep->end_point.name, csr);
return;
}
@@ -894,11 +887,6 @@ void musb_g_rx(struct musb *musb, u8 epnum)
request->actual += musb_ep->dma->actual_len;
- dev_dbg(musb->controller, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n",
- epnum, csr,
- musb_readw(epio, MUSB_RXCSR),
- musb_ep->dma->actual_len, request);
-
#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
defined(CONFIG_USB_UX500_DMA)
/* Autoclear doesn't clear RxPktRdy for short packets */
@@ -996,7 +984,7 @@ static int musb_gadget_enable(struct usb_ep *ep,
ok = musb->hb_iso_rx;
if (!ok) {
- dev_dbg(musb->controller, "no support for high bandwidth ISO\n");
+ musb_dbg(musb, "no support for high bandwidth ISO");
goto fail;
}
musb_ep->hb_mult = (tmp >> 11) & 3;
@@ -1019,7 +1007,7 @@ static int musb_gadget_enable(struct usb_ep *ep,
goto fail;
if (tmp > hw_ep->max_packet_sz_tx) {
- dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
+ musb_dbg(musb, "packet size beyond hardware FIFO size");
goto fail;
}
@@ -1062,7 +1050,7 @@ static int musb_gadget_enable(struct usb_ep *ep,
goto fail;
if (tmp > hw_ep->max_packet_sz_rx) {
- dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
+ musb_dbg(musb, "packet size beyond hardware FIFO size");
goto fail;
}
@@ -1174,7 +1162,7 @@ static int musb_gadget_disable(struct usb_ep *ep)
spin_unlock_irqrestore(&(musb->lock), flags);
- dev_dbg(musb->controller, "%s\n", musb_ep->end_point.name);
+ musb_dbg(musb, "%s", musb_ep->end_point.name);
return status;
}
@@ -1186,19 +1174,17 @@ static int musb_gadget_disable(struct usb_ep *ep)
struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
{
struct musb_ep *musb_ep = to_musb_ep(ep);
- struct musb *musb = musb_ep->musb;
struct musb_request *request = NULL;
request = kzalloc(sizeof *request, gfp_flags);
- if (!request) {
- dev_dbg(musb->controller, "not enough memory\n");
+ if (!request)
return NULL;
- }
request->request.dma = DMA_ADDR_INVALID;
request->epnum = musb_ep->current_epnum;
request->ep = musb_ep;
+ trace_musb_req_alloc(request);
return &request->request;
}
@@ -1208,7 +1194,10 @@ struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
*/
void musb_free_request(struct usb_ep *ep, struct usb_request *req)
{
- kfree(to_musb_request(req));
+ struct musb_request *request = to_musb_request(req);
+
+ trace_musb_req_free(request);
+ kfree(request);
}
static LIST_HEAD(buffers);
@@ -1225,10 +1214,7 @@ struct free_record {
*/
void musb_ep_restart(struct musb *musb, struct musb_request *req)
{
- dev_dbg(musb->controller, "<== %s request %p len %u on hw_ep%d\n",
- req->tx ? "TX/IN" : "RX/OUT",
- &req->request, req->request.length, req->epnum);
-
+ trace_musb_req_start(req);
musb_ep_select(musb->mregs, req->epnum);
if (req->tx)
txstate(musb, req);
@@ -1259,7 +1245,7 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
if (request->ep != musb_ep)
return -EINVAL;
- dev_dbg(musb->controller, "<== to %s request=%p\n", ep->name, req);
+ trace_musb_req_enq(request);
/* request is mine now... */
request->request.actual = 0;
@@ -1273,7 +1259,7 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
/* don't queue if the ep is down */
if (!musb_ep->desc) {
- dev_dbg(musb->controller, "req %p queued to %s while ep %s\n",
+ musb_dbg(musb, "req %p queued to %s while ep %s",
req, ep->name, "disabled");
status = -ESHUTDOWN;
unmap_dma_buffer(request, musb);
@@ -1301,9 +1287,11 @@ static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
int status = 0;
struct musb *musb = musb_ep->musb;
- if (!ep || !request || to_musb_request(request)->ep != musb_ep)
+ if (!ep || !request || req->ep != musb_ep)
return -EINVAL;
+ trace_musb_req_deq(req);
+
spin_lock_irqsave(&musb->lock, flags);
list_for_each_entry(r, &musb_ep->req_list, list) {
@@ -1311,7 +1299,8 @@ static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
break;
}
if (r != req) {
- dev_dbg(musb->controller, "request %p not queued to %s\n", request, ep->name);
+ dev_err(musb->controller, "request %p not queued to %s\n",
+ request, ep->name);
status = -EINVAL;
goto done;
}
@@ -1377,7 +1366,7 @@ static int musb_gadget_set_halt(struct usb_ep *ep, int value)
request = next_request(musb_ep);
if (value) {
if (request) {
- dev_dbg(musb->controller, "request in progress, cannot halt %s\n",
+ musb_dbg(musb, "request in progress, cannot halt %s",
ep->name);
status = -EAGAIN;
goto done;
@@ -1386,7 +1375,8 @@ static int musb_gadget_set_halt(struct usb_ep *ep, int value)
if (musb_ep->is_in) {
csr = musb_readw(epio, MUSB_TXCSR);
if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
- dev_dbg(musb->controller, "FIFO busy, cannot halt %s\n", ep->name);
+ musb_dbg(musb, "FIFO busy, cannot halt %s",
+ ep->name);
status = -EAGAIN;
goto done;
}
@@ -1395,7 +1385,7 @@ static int musb_gadget_set_halt(struct usb_ep *ep, int value)
musb_ep->wedged = 0;
/* set/clear the stall and toggle bits */
- dev_dbg(musb->controller, "%s: %s stall\n", ep->name, value ? "set" : "clear");
+ musb_dbg(musb, "%s: %s stall", ep->name, value ? "set" : "clear");
if (musb_ep->is_in) {
csr = musb_readw(epio, MUSB_TXCSR);
csr |= MUSB_TXCSR_P_WZC_BITS
@@ -1422,7 +1412,7 @@ static int musb_gadget_set_halt(struct usb_ep *ep, int value)
/* maybe start the first request in the queue */
if (!musb_ep->busy && !value && request) {
- dev_dbg(musb->controller, "restarting the request\n");
+ musb_dbg(musb, "restarting the request");
musb_ep_restart(musb, request);
}
@@ -1558,7 +1548,7 @@ static int musb_gadget_wakeup(struct usb_gadget *gadget)
case OTG_STATE_B_IDLE:
/* Start SRP ... OTG not required. */
devctl = musb_readb(mregs, MUSB_DEVCTL);
- dev_dbg(musb->controller, "Sending SRP: devctl: %02x\n", devctl);
+ musb_dbg(musb, "Sending SRP: devctl: %02x", devctl);
devctl |= MUSB_DEVCTL_SESSION;
musb_writeb(mregs, MUSB_DEVCTL, devctl);
devctl = musb_readb(mregs, MUSB_DEVCTL);
@@ -1586,7 +1576,7 @@ static int musb_gadget_wakeup(struct usb_gadget *gadget)
status = 0;
goto done;
default:
- dev_dbg(musb->controller, "Unhandled wake: %s\n",
+ musb_dbg(musb, "Unhandled wake: %s",
usb_otg_state_string(musb->xceiv->otg->state));
goto done;
}
@@ -1596,7 +1586,7 @@ static int musb_gadget_wakeup(struct usb_gadget *gadget)
power = musb_readb(mregs, MUSB_POWER);
power |= MUSB_POWER_RESUME;
musb_writeb(mregs, MUSB_POWER, power);
- dev_dbg(musb->controller, "issue wakeup\n");
+ musb_dbg(musb, "issue wakeup");
/* FIXME do this next chunk in a timer callback, no udelay */
mdelay(2);
@@ -1628,7 +1618,7 @@ static void musb_pullup(struct musb *musb, int is_on)
/* FIXME if on, HdrcStart; if off, HdrcStop */
- dev_dbg(musb->controller, "gadget D+ pullup %s\n",
+ musb_dbg(musb, "gadget D+ pullup %s",
is_on ? "on" : "off");
musb_writeb(musb->mregs, MUSB_POWER, power);
}
@@ -1636,7 +1626,7 @@ static void musb_pullup(struct musb *musb, int is_on)
#if 0
static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
{
- dev_dbg(musb->controller, "<= %s =>\n", __func__);
+ musb_dbg(musb, "<= %s =>\n", __func__);
/*
* FIXME iff driver's softconnect flag is set (as it is during probe,
@@ -2011,7 +2001,7 @@ void musb_g_suspend(struct musb *musb)
u8 devctl;
devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
- dev_dbg(musb->controller, "devctl %02x\n", devctl);
+ musb_dbg(musb, "musb_g_suspend: devctl %02x", devctl);
switch (musb->xceiv->otg->state) {
case OTG_STATE_B_IDLE:
@@ -2030,7 +2020,7 @@ void musb_g_suspend(struct musb *musb)
/* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
* A_PERIPHERAL may need care too
*/
- WARNING("unhandled SUSPEND transition (%s)\n",
+ WARNING("unhandled SUSPEND transition (%s)",
usb_otg_state_string(musb->xceiv->otg->state));
}
}
@@ -2047,7 +2037,7 @@ void musb_g_disconnect(struct musb *musb)
void __iomem *mregs = musb->mregs;
u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
- dev_dbg(musb->controller, "devctl %02x\n", devctl);
+ musb_dbg(musb, "musb_g_disconnect: devctl %02x", devctl);
/* clear HR */
musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION);
@@ -2064,7 +2054,7 @@ void musb_g_disconnect(struct musb *musb)
switch (musb->xceiv->otg->state) {
default:
- dev_dbg(musb->controller, "Unhandled disconnect %s, setting a_idle\n",
+ musb_dbg(musb, "Unhandled disconnect %s, setting a_idle",
usb_otg_state_string(musb->xceiv->otg->state));
musb->xceiv->otg->state = OTG_STATE_A_IDLE;
MUSB_HST_MODE(musb);
@@ -2094,7 +2084,7 @@ __acquires(musb->lock)
u8 devctl = musb_readb(mbase, MUSB_DEVCTL);
u8 power;
- dev_dbg(musb->controller, "<== %s driver '%s'\n",
+ musb_dbg(musb, "<== %s driver '%s'",
(devctl & MUSB_DEVCTL_BDEVICE)
? "B-Device" : "A-Device",
musb->gadget_driver
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
index 10d30afe4..844a309fe 100644
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -206,7 +206,7 @@ static inline void musb_try_b_hnp_enable(struct musb *musb)
void __iomem *mbase = musb->mregs;
u8 devctl;
- dev_dbg(musb->controller, "HNP: Setting HR\n");
+ musb_dbg(musb, "HNP: Setting HR");
devctl = musb_readb(mbase, MUSB_DEVCTL);
musb_writeb(mbase, MUSB_DEVCTL, devctl | MUSB_DEVCTL_HR);
}
@@ -303,7 +303,7 @@ __acquires(musb->lock)
/* Maybe start the first request in the queue */
request = next_request(musb_ep);
if (!musb_ep->busy && request) {
- dev_dbg(musb->controller, "restarting the request\n");
+ musb_dbg(musb, "restarting the request");
musb_ep_restart(musb, request);
}
@@ -550,7 +550,7 @@ static void ep0_txstate(struct musb *musb)
if (!req) {
/* WARN_ON(1); */
- dev_dbg(musb->controller, "odd; csr0 %04x\n", musb_readw(regs, MUSB_CSR0));
+ musb_dbg(musb, "odd; csr0 %04x", musb_readw(regs, MUSB_CSR0));
return;
}
@@ -607,7 +607,7 @@ musb_read_setup(struct musb *musb, struct usb_ctrlrequest *req)
/* NOTE: earlier 2.6 versions changed setup packets to host
* order, but now USB packets always stay in USB byte order.
*/
- dev_dbg(musb->controller, "SETUP req%02x.%02x v%04x i%04x l%d\n",
+ musb_dbg(musb, "SETUP req%02x.%02x v%04x i%04x l%d",
req->bRequestType,
req->bRequest,
le16_to_cpu(req->wValue),
@@ -675,7 +675,7 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb)
csr = musb_readw(regs, MUSB_CSR0);
len = musb_readb(regs, MUSB_COUNT0);
- dev_dbg(musb->controller, "csr %04x, count %d, ep0stage %s\n",
+ musb_dbg(musb, "csr %04x, count %d, ep0stage %s",
csr, len, decode_ep0stage(musb->ep0_state));
if (csr & MUSB_CSR0_P_DATAEND) {
@@ -752,7 +752,7 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb)
/* enter test mode if needed (exit by reset) */
else if (musb->test_mode) {
- dev_dbg(musb->controller, "entering TESTMODE\n");
+ musb_dbg(musb, "entering TESTMODE");
if (MUSB_TEST_PACKET == musb->test_mode_nr)
musb_load_testpacket(musb);
@@ -864,7 +864,7 @@ setup:
break;
}
- dev_dbg(musb->controller, "handled %d, csr %04x, ep0stage %s\n",
+ musb_dbg(musb, "handled %d, csr %04x, ep0stage %s",
handled, csr,
decode_ep0stage(musb->ep0_state));
@@ -881,7 +881,7 @@ setup:
if (handled < 0) {
musb_ep_select(mbase, 0);
stall:
- dev_dbg(musb->controller, "stall (%d)\n", handled);
+ musb_dbg(musb, "stall (%d)", handled);
musb->ackpend |= MUSB_CSR0_P_SENDSTALL;
musb->ep0_state = MUSB_EP0_STAGE_IDLE;
finish:
@@ -961,7 +961,7 @@ musb_g_ep0_queue(struct usb_ep *e, struct usb_request *r, gfp_t gfp_flags)
status = 0;
break;
default:
- dev_dbg(musb->controller, "ep0 request queued in state %d\n",
+ musb_dbg(musb, "ep0 request queued in state %d",
musb->ep0_state);
status = -EINVAL;
goto cleanup;
@@ -970,7 +970,7 @@ musb_g_ep0_queue(struct usb_ep *e, struct usb_request *r, gfp_t gfp_flags)
/* add request to the list */
list_add_tail(&req->list, &ep->req_list);
- dev_dbg(musb->controller, "queue to %s (%s), length=%d\n",
+ musb_dbg(musb, "queue to %s (%s), length=%d",
ep->name, ep->is_in ? "IN/TX" : "OUT/RX",
req->request.length);
@@ -1063,7 +1063,7 @@ static int musb_g_ep0_halt(struct usb_ep *e, int value)
musb->ackpend = 0;
break;
default:
- dev_dbg(musb->controller, "ep0 can't halt in state %d\n", musb->ep0_state);
+ musb_dbg(musb, "ep0 can't halt in state %d", musb->ep0_state);
status = -EINVAL;
}
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index d227a71d8..53bc4ceef 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -44,6 +44,7 @@
#include "musb_core.h"
#include "musb_host.h"
+#include "musb_trace.h"
/* MUSB HOST status 22-mar-2006
*
@@ -131,7 +132,7 @@ static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
* I found using a usb-ethernet device and running iperf
* (client on AM335x) has very high chance to trigger it.
*
- * Better to turn on dev_dbg() in musb_cleanup_urb() with
+ * Better to turn on musb_dbg() in musb_cleanup_urb() with
* CPPI enabled to see the issue when aborting the tx channel.
*/
if (dev_WARN_ONCE(musb->controller, retries-- < 1,
@@ -225,8 +226,6 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
void *buf = urb->transfer_buffer;
u32 offset = 0;
struct musb_hw_ep *hw_ep = qh->hw_ep;
- unsigned pipe = urb->pipe;
- u8 address = usb_pipedevice(pipe);
int epnum = hw_ep->epnum;
/* initialize software qh state */
@@ -254,16 +253,7 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
len = urb->transfer_buffer_length - urb->actual_length;
}
- dev_dbg(musb->controller, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
- qh, urb, address, qh->epnum,
- is_in ? "in" : "out",
- ({char *s; switch (qh->type) {
- case USB_ENDPOINT_XFER_CONTROL: s = ""; break;
- case USB_ENDPOINT_XFER_BULK: s = "-bulk"; break;
- case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break;
- default: s = "-intr"; break;
- } s; }),
- epnum, buf + offset, len);
+ trace_musb_urb_start(musb, urb);
/* Configure endpoint */
musb_ep_set_qh(hw_ep, is_in, qh);
@@ -277,7 +267,7 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
switch (qh->type) {
case USB_ENDPOINT_XFER_ISOC:
case USB_ENDPOINT_XFER_INT:
- dev_dbg(musb->controller, "check whether there's still time for periodic Tx\n");
+ musb_dbg(musb, "check whether there's still time for periodic Tx");
frame = musb_readw(mbase, MUSB_FRAME);
/* FIXME this doesn't implement that scheduling policy ...
* or handle framecounter wrapping
@@ -291,7 +281,7 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
} else {
qh->frame = urb->start_frame;
/* enable SOF interrupt so we can count down */
- dev_dbg(musb->controller, "SOF for %d\n", epnum);
+ musb_dbg(musb, "SOF for %d", epnum);
#if 1 /* ifndef CONFIG_ARCH_DAVINCI */
musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
#endif
@@ -299,7 +289,7 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
break;
default:
start:
- dev_dbg(musb->controller, "Start TX%d %s\n", epnum,
+ musb_dbg(musb, "Start TX%d %s", epnum,
hw_ep->tx_channel ? "dma" : "pio");
if (!hw_ep->tx_channel)
@@ -314,14 +304,7 @@ static void musb_giveback(struct musb *musb, struct urb *urb, int status)
__releases(musb->lock)
__acquires(musb->lock)
{
- dev_dbg(musb->controller,
- "complete %p %pF (%d), dev%d ep%d%s, %d/%d\n",
- urb, urb->complete, status,
- usb_pipedevice(urb->pipe),
- usb_pipeendpoint(urb->pipe),
- usb_pipein(urb->pipe) ? "in" : "out",
- urb->actual_length, urb->transfer_buffer_length
- );
+ trace_musb_urb_gb(musb, urb);
usb_hcd_unlink_urb_from_ep(musb->hcd, urb);
spin_unlock(&musb->lock);
@@ -441,7 +424,7 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb,
* for RX, until we have a test case to understand the behavior of TX.
*/
if ((!status || !is_in) && qh && qh->is_ready) {
- dev_dbg(musb->controller, "... next ep%d %cX urb %p\n",
+ musb_dbg(musb, "... next ep%d %cX urb %p",
hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
musb_start_urb(musb, is_in, qh);
}
@@ -486,7 +469,7 @@ musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
/* musb_ep_select(mbase, epnum); */
rx_count = musb_readw(epio, MUSB_RXCOUNT);
- dev_dbg(musb->controller, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count,
+ musb_dbg(musb, "RX%d count %d, buffer %p len %d/%d", epnum, rx_count,
urb->transfer_buffer, qh->offset,
urb->transfer_buffer_length);
@@ -508,7 +491,7 @@ musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
status = -EOVERFLOW;
urb->error_count++;
}
- dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length);
+ musb_dbg(musb, "OVERFLOW %d into %d", rx_count, length);
do_flush = 1;
} else
length = rx_count;
@@ -526,7 +509,7 @@ musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
if (rx_count > length) {
if (urb->status == -EINPROGRESS)
urb->status = -EOVERFLOW;
- dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length);
+ musb_dbg(musb, "OVERFLOW %d into %d", rx_count, length);
do_flush = 1;
} else
length = rx_count;
@@ -750,8 +733,8 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
u8 use_dma = 1;
u16 csr;
- dev_dbg(musb->controller, "%s hw%d urb %p spd%d dev%d ep%d%s "
- "h_addr%02x h_port%02x bytes %d\n",
+ musb_dbg(musb, "%s hw%d urb %p spd%d dev%d ep%d%s "
+ "h_addr%02x h_port%02x bytes %d",
is_out ? "-->" : "<--",
epnum, urb, urb->dev->speed,
qh->addr_reg, qh->epnum, is_out ? "out" : "in",
@@ -969,7 +952,7 @@ finish:
}
csr |= MUSB_RXCSR_H_REQPKT;
- dev_dbg(musb->controller, "RXCSR%d := %04x\n", epnum, csr);
+ musb_dbg(musb, "RXCSR%d := %04x", epnum, csr);
musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
}
@@ -1085,15 +1068,15 @@ static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
request = (struct usb_ctrlrequest *) urb->setup_packet;
if (!request->wLength) {
- dev_dbg(musb->controller, "start no-DATA\n");
+ musb_dbg(musb, "start no-DATA");
break;
} else if (request->bRequestType & USB_DIR_IN) {
- dev_dbg(musb->controller, "start IN-DATA\n");
+ musb_dbg(musb, "start IN-DATA");
musb->ep0_stage = MUSB_EP0_IN;
more = true;
break;
} else {
- dev_dbg(musb->controller, "start OUT-DATA\n");
+ musb_dbg(musb, "start OUT-DATA");
musb->ep0_stage = MUSB_EP0_OUT;
more = true;
}
@@ -1105,7 +1088,7 @@ static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
if (fifo_count) {
fifo_dest = (u8 *) (urb->transfer_buffer
+ urb->actual_length);
- dev_dbg(musb->controller, "Sending %d byte%s to ep0 fifo %p\n",
+ musb_dbg(musb, "Sending %d byte%s to ep0 fifo %p",
fifo_count,
(fifo_count == 1) ? "" : "s",
fifo_dest);
@@ -1150,7 +1133,7 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb)
? musb_readb(epio, MUSB_COUNT0)
: 0;
- dev_dbg(musb->controller, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
+ musb_dbg(musb, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d",
csr, qh, len, urb, musb->ep0_stage);
/* if we just did status stage, we are done */
@@ -1161,15 +1144,15 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb)
/* prepare status */
if (csr & MUSB_CSR0_H_RXSTALL) {
- dev_dbg(musb->controller, "STALLING ENDPOINT\n");
+ musb_dbg(musb, "STALLING ENDPOINT");
status = -EPIPE;
} else if (csr & MUSB_CSR0_H_ERROR) {
- dev_dbg(musb->controller, "no response, csr0 %04x\n", csr);
+ musb_dbg(musb, "no response, csr0 %04x", csr);
status = -EPROTO;
} else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
- dev_dbg(musb->controller, "control NAK timeout\n");
+ musb_dbg(musb, "control NAK timeout");
/* NOTE: this code path would be a good place to PAUSE a
* control transfer, if another one is queued, so that
@@ -1184,7 +1167,7 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb)
}
if (status) {
- dev_dbg(musb->controller, "aborting\n");
+ musb_dbg(musb, "aborting");
retval = IRQ_HANDLED;
if (urb)
urb->status = status;
@@ -1237,7 +1220,7 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb)
/* flag status stage */
musb->ep0_stage = MUSB_EP0_STATUS;
- dev_dbg(musb->controller, "ep0 STATUS, csr %04x\n", csr);
+ musb_dbg(musb, "ep0 STATUS, csr %04x", csr);
}
musb_writew(epio, MUSB_CSR0, csr);
@@ -1291,38 +1274,37 @@ void musb_host_tx(struct musb *musb, u8 epnum)
/* with CPPI, DMA sometimes triggers "extra" irqs */
if (!urb) {
- dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
+ musb_dbg(musb, "extra TX%d ready, csr %04x", epnum, tx_csr);
return;
}
pipe = urb->pipe;
dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
- dev_dbg(musb->controller, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr,
+ trace_musb_urb_tx(musb, urb);
+ musb_dbg(musb, "OUT/TX%d end, csr %04x%s", epnum, tx_csr,
dma ? ", dma" : "");
/* check for errors */
if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
/* dma was disabled, fifo flushed */
- dev_dbg(musb->controller, "TX end %d stall\n", epnum);
+ musb_dbg(musb, "TX end %d stall", epnum);
/* stall; record URB status */
status = -EPIPE;
} else if (tx_csr & MUSB_TXCSR_H_ERROR) {
/* (NON-ISO) dma was disabled, fifo flushed */
- dev_dbg(musb->controller, "TX 3strikes on ep=%d\n", epnum);
+ musb_dbg(musb, "TX 3strikes on ep=%d", epnum);
status = -ETIMEDOUT;
} else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
if (USB_ENDPOINT_XFER_BULK == qh->type && qh->mux == 1
&& !list_is_singular(&musb->out_bulk)) {
- dev_dbg(musb->controller,
- "NAK timeout on TX%d ep\n", epnum);
+ musb_dbg(musb, "NAK timeout on TX%d ep", epnum);
musb_bulk_nak_timeout(musb, hw_ep, 0);
} else {
- dev_dbg(musb->controller,
- "TX end=%d device not responding\n", epnum);
+ musb_dbg(musb, "TX ep%d device not responding", epnum);
/* NOTE: this code path would be a good place to PAUSE a
* transfer, if there's some other (nonperiodic) tx urb
* that could use this fifo. (dma complicates it...)
@@ -1368,7 +1350,7 @@ done:
/* second cppi case */
if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
- dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
+ musb_dbg(musb, "extra TX%d ready, csr %04x", epnum, tx_csr);
return;
}
@@ -1427,8 +1409,9 @@ done:
* FIFO mode too...
*/
if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) {
- dev_dbg(musb->controller, "DMA complete but packet still in FIFO, "
- "CSR %04x\n", tx_csr);
+ musb_dbg(musb,
+ "DMA complete but FIFO not empty, CSR %04x",
+ tx_csr);
return;
}
}
@@ -1494,7 +1477,7 @@ done:
return;
}
} else if (tx_csr & MUSB_TXCSR_DMAENAB) {
- dev_dbg(musb->controller, "not complete, but DMA enabled?\n");
+ musb_dbg(musb, "not complete, but DMA enabled?");
return;
}
@@ -1723,7 +1706,7 @@ static int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma,
d_status = -EOVERFLOW;
urb->error_count++;
}
- dev_dbg(musb->controller, "** OVERFLOW %d into %d\n",
+ musb_dbg(musb, "** OVERFLOW %d into %d",
rx_count, d->length);
length = d->length;
@@ -1847,28 +1830,26 @@ void musb_host_rx(struct musb *musb, u8 epnum)
* usbtest #11 (unlinks) triggers it regularly, sometimes
* with fifo full. (Only with DMA??)
*/
- dev_dbg(musb->controller, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val,
- musb_readw(epio, MUSB_RXCOUNT));
+ musb_dbg(musb, "BOGUS RX%d ready, csr %04x, count %d",
+ epnum, val, musb_readw(epio, MUSB_RXCOUNT));
musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
return;
}
pipe = urb->pipe;
- dev_dbg(musb->controller, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n",
- epnum, rx_csr, urb->actual_length,
- dma ? dma->actual_len : 0);
+ trace_musb_urb_rx(musb, urb);
/* check for errors, concurrent stall & unlink is not really
* handled yet! */
if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
- dev_dbg(musb->controller, "RX end %d STALL\n", epnum);
+ musb_dbg(musb, "RX end %d STALL", epnum);
/* stall; record URB status */
status = -EPIPE;
} else if (rx_csr & MUSB_RXCSR_H_ERROR) {
- dev_dbg(musb->controller, "end %d RX proto error\n", epnum);
+ musb_dbg(musb, "end %d RX proto error", epnum);
status = -EPROTO;
musb_writeb(epio, MUSB_RXINTERVAL, 0);
@@ -1879,7 +1860,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
} else if (rx_csr & MUSB_RXCSR_DATAERROR) {
if (USB_ENDPOINT_XFER_ISOC != qh->type) {
- dev_dbg(musb->controller, "RX end %d NAK timeout\n", epnum);
+ musb_dbg(musb, "RX end %d NAK timeout", epnum);
/* NOTE: NAKing is *NOT* an error, so we want to
* continue. Except ... if there's a request for
@@ -1902,12 +1883,12 @@ void musb_host_rx(struct musb *musb, u8 epnum)
goto finish;
} else {
- dev_dbg(musb->controller, "RX end %d ISO data error\n", epnum);
+ musb_dbg(musb, "RX end %d ISO data error", epnum);
/* packet error reported later */
iso_err = true;
}
} else if (rx_csr & MUSB_RXCSR_INCOMPRX) {
- dev_dbg(musb->controller, "end %d high bandwidth incomplete ISO packet RX\n",
+ musb_dbg(musb, "end %d high bandwidth incomplete ISO packet RX",
epnum);
status = -EPROTO;
}
@@ -1952,7 +1933,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
done = true;
}
- dev_dbg(musb->controller, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr,
+ musb_dbg(musb, "RXCSR%d %04x, reqpkt, len %zu%s", epnum, rx_csr,
xfer_len, dma ? ", dma" : "");
rx_csr &= ~MUSB_RXCSR_H_REQPKT;
@@ -1973,8 +1954,8 @@ void musb_host_rx(struct musb *musb, u8 epnum)
if (musb_dma_inventra(musb) || musb_dma_ux500(musb) ||
musb_dma_cppi41(musb)) {
done = musb_rx_dma_inventra_cppi41(c, hw_ep, qh, urb, xfer_len);
- dev_dbg(hw_ep->musb->controller,
- "ep %d dma %s, rxcsr %04x, rxcount %d\n",
+ musb_dbg(hw_ep->musb,
+ "ep %d dma %s, rxcsr %04x, rxcount %d",
epnum, done ? "off" : "reset",
musb_readw(epio, MUSB_RXCSR),
musb_readw(epio, MUSB_RXCOUNT));
@@ -2001,8 +1982,8 @@ void musb_host_rx(struct musb *musb, u8 epnum)
/* we are expecting IN packets */
if ((musb_dma_inventra(musb) || musb_dma_ux500(musb) ||
musb_dma_cppi41(musb)) && dma) {
- dev_dbg(hw_ep->musb->controller,
- "RX%d count %d, buffer 0x%llx len %d/%d\n",
+ musb_dbg(hw_ep->musb,
+ "RX%d count %d, buffer 0x%llx len %d/%d",
epnum, musb_readw(epio, MUSB_RXCOUNT),
(unsigned long long) urb->transfer_dma
+ urb->actual_length,
@@ -2054,7 +2035,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
done = musb_host_packet_rx(musb, urb,
epnum, iso_err);
}
- dev_dbg(musb->controller, "read %spacket\n", done ? "last " : "");
+ musb_dbg(musb, "read %spacket", done ? "last " : "");
}
}
@@ -2178,7 +2159,7 @@ static int musb_schedule(
idle = 1;
qh->mux = 0;
hw_ep = musb->endpoints + best_end;
- dev_dbg(musb->controller, "qh %p periodic slot %d\n", qh, best_end);
+ musb_dbg(musb, "qh %p periodic slot %d", qh, best_end);
success:
if (head) {
idle = list_empty(head);
@@ -2210,6 +2191,8 @@ static int musb_urb_enqueue(
if (!is_host_active(musb) || !musb->is_active)
return -ENODEV;
+ trace_musb_urb_enq(musb, urb);
+
spin_lock_irqsave(&musb->lock, flags);
ret = usb_hcd_link_urb_to_ep(hcd, urb);
qh = ret ? NULL : hep->hcpriv;
@@ -2400,8 +2383,7 @@ static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
dma = is_in ? ep->rx_channel : ep->tx_channel;
if (dma) {
status = ep->musb->dma_controller->channel_abort(dma);
- dev_dbg(musb->controller,
- "abort %cX%d DMA for urb %p --> %d\n",
+ musb_dbg(musb, "abort %cX%d DMA for urb %p --> %d",
is_in ? 'R' : 'T', ep->epnum,
urb, status);
urb->actual_length += dma->actual_len;
@@ -2447,10 +2429,7 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
int is_in = usb_pipein(urb->pipe);
int ret;
- dev_dbg(musb->controller, "urb=%p, dev%d ep%d%s\n", urb,
- usb_pipedevice(urb->pipe),
- usb_pipeendpoint(urb->pipe),
- is_in ? "in" : "out");
+ trace_musb_urb_deq(musb, urb);
spin_lock_irqsave(&musb->lock, flags);
ret = usb_hcd_check_unlink_urb(hcd, urb, status);
diff --git a/drivers/usb/musb/musb_trace.c b/drivers/usb/musb/musb_trace.c
new file mode 100644
index 000000000..70973d901
--- /dev/null
+++ b/drivers/usb/musb/musb_trace.c
@@ -0,0 +1,33 @@
+/*
+ * musb_trace.c - MUSB Controller Trace Support
+ *
+ * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Author: Bin Liu <b-liu@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define CREATE_TRACE_POINTS
+#include "musb_trace.h"
+
+void musb_dbg(struct musb *musb, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ trace_musb_log(musb, &vaf);
+
+ va_end(args);
+}
diff --git a/drivers/usb/musb/musb_trace.h b/drivers/usb/musb/musb_trace.h
new file mode 100644
index 000000000..f031c9e74
--- /dev/null
+++ b/drivers/usb/musb/musb_trace.h
@@ -0,0 +1,371 @@
+/*
+ * musb_trace.h - MUSB Controller Trace Support
+ *
+ * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Author: Bin Liu <b-liu@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM musb
+
+#if !defined(__MUSB_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MUSB_TRACE_H
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include <linux/usb.h>
+#include "musb_core.h"
+#ifdef CONFIG_USB_TI_CPPI41_DMA
+#include "cppi_dma.h"
+#endif
+
+#define MUSB_MSG_MAX 500
+
+TRACE_EVENT(musb_log,
+ TP_PROTO(struct musb *musb, struct va_format *vaf),
+ TP_ARGS(musb, vaf),
+ TP_STRUCT__entry(
+ __string(name, dev_name(musb->controller))
+ __dynamic_array(char, msg, MUSB_MSG_MAX)
+ ),
+ TP_fast_assign(
+ __assign_str(name, dev_name(musb->controller));
+ vsnprintf(__get_str(msg), MUSB_MSG_MAX, vaf->fmt, *vaf->va);
+ ),
+ TP_printk("%s: %s", __get_str(name), __get_str(msg))
+);
+
+DECLARE_EVENT_CLASS(musb_regb,
+ TP_PROTO(void *caller, const void *addr, unsigned int offset, u8 data),
+ TP_ARGS(caller, addr, offset, data),
+ TP_STRUCT__entry(
+ __field(void *, caller)
+ __field(const void *, addr)
+ __field(unsigned int, offset)
+ __field(u8, data)
+ ),
+ TP_fast_assign(
+ __entry->caller = caller;
+ __entry->addr = addr;
+ __entry->offset = offset;
+ __entry->data = data;
+ ),
+ TP_printk("%pS: %p + %04x: %02x",
+ __entry->caller, __entry->addr, __entry->offset, __entry->data)
+);
+
+DEFINE_EVENT(musb_regb, musb_readb,
+ TP_PROTO(void *caller, const void *addr, unsigned int offset, u8 data),
+ TP_ARGS(caller, addr, offset, data)
+);
+
+DEFINE_EVENT(musb_regb, musb_writeb,
+ TP_PROTO(void *caller, const void *addr, unsigned int offset, u8 data),
+ TP_ARGS(caller, addr, offset, data)
+);
+
+DECLARE_EVENT_CLASS(musb_regw,
+ TP_PROTO(void *caller, const void *addr, unsigned int offset, u16 data),
+ TP_ARGS(caller, addr, offset, data),
+ TP_STRUCT__entry(
+ __field(void *, caller)
+ __field(const void *, addr)
+ __field(unsigned int, offset)
+ __field(u16, data)
+ ),
+ TP_fast_assign(
+ __entry->caller = caller;
+ __entry->addr = addr;
+ __entry->offset = offset;
+ __entry->data = data;
+ ),
+ TP_printk("%pS: %p + %04x: %04x",
+ __entry->caller, __entry->addr, __entry->offset, __entry->data)
+);
+
+DEFINE_EVENT(musb_regw, musb_readw,
+ TP_PROTO(void *caller, const void *addr, unsigned int offset, u16 data),
+ TP_ARGS(caller, addr, offset, data)
+);
+
+DEFINE_EVENT(musb_regw, musb_writew,
+ TP_PROTO(void *caller, const void *addr, unsigned int offset, u16 data),
+ TP_ARGS(caller, addr, offset, data)
+);
+
+DECLARE_EVENT_CLASS(musb_regl,
+ TP_PROTO(void *caller, const void *addr, unsigned int offset, u32 data),
+ TP_ARGS(caller, addr, offset, data),
+ TP_STRUCT__entry(
+ __field(void *, caller)
+ __field(const void *, addr)
+ __field(unsigned int, offset)
+ __field(u32, data)
+ ),
+ TP_fast_assign(
+ __entry->caller = caller;
+ __entry->addr = addr;
+ __entry->offset = offset;
+ __entry->data = data;
+ ),
+ TP_printk("%pS: %p + %04x: %08x",
+ __entry->caller, __entry->addr, __entry->offset, __entry->data)
+);
+
+DEFINE_EVENT(musb_regl, musb_readl,
+ TP_PROTO(void *caller, const void *addr, unsigned int offset, u32 data),
+ TP_ARGS(caller, addr, offset, data)
+);
+
+DEFINE_EVENT(musb_regl, musb_writel,
+ TP_PROTO(void *caller, const void *addr, unsigned int offset, u32 data),
+ TP_ARGS(caller, addr, offset, data)
+);
+
+TRACE_EVENT(musb_isr,
+ TP_PROTO(struct musb *musb),
+ TP_ARGS(musb),
+ TP_STRUCT__entry(
+ __string(name, dev_name(musb->controller))
+ __field(u8, int_usb)
+ __field(u16, int_tx)
+ __field(u16, int_rx)
+ ),
+ TP_fast_assign(
+ __assign_str(name, dev_name(musb->controller));
+ __entry->int_usb = musb->int_usb;
+ __entry->int_tx = musb->int_tx;
+ __entry->int_rx = musb->int_rx;
+ ),
+ TP_printk("%s: usb %02x, tx %04x, rx %04x",
+ __get_str(name), __entry->int_usb,
+ __entry->int_tx, __entry->int_rx
+ )
+);
+
+DECLARE_EVENT_CLASS(musb_urb,
+ TP_PROTO(struct musb *musb, struct urb *urb),
+ TP_ARGS(musb, urb),
+ TP_STRUCT__entry(
+ __string(name, dev_name(musb->controller))
+ __field(struct urb *, urb)
+ __field(unsigned int, pipe)
+ __field(int, status)
+ __field(unsigned int, flag)
+ __field(u32, buf_len)
+ __field(u32, actual_len)
+ ),
+ TP_fast_assign(
+ __assign_str(name, dev_name(musb->controller));
+ __entry->urb = urb;
+ __entry->pipe = urb->pipe;
+ __entry->status = urb->status;
+ __entry->flag = urb->transfer_flags;
+ __entry->buf_len = urb->transfer_buffer_length;
+ __entry->actual_len = urb->actual_length;
+ ),
+ TP_printk("%s: %p, dev%d ep%d%s, flag 0x%x, len %d/%d, status %d",
+ __get_str(name), __entry->urb,
+ usb_pipedevice(__entry->pipe),
+ usb_pipeendpoint(__entry->pipe),
+ usb_pipein(__entry->pipe) ? "in" : "out",
+ __entry->flag,
+ __entry->actual_len, __entry->buf_len,
+ __entry->status
+ )
+);
+
+DEFINE_EVENT(musb_urb, musb_urb_start,
+ TP_PROTO(struct musb *musb, struct urb *urb),
+ TP_ARGS(musb, urb)
+);
+
+DEFINE_EVENT(musb_urb, musb_urb_gb,
+ TP_PROTO(struct musb *musb, struct urb *urb),
+ TP_ARGS(musb, urb)
+);
+
+DEFINE_EVENT(musb_urb, musb_urb_rx,
+ TP_PROTO(struct musb *musb, struct urb *urb),
+ TP_ARGS(musb, urb)
+);
+
+DEFINE_EVENT(musb_urb, musb_urb_tx,
+ TP_PROTO(struct musb *musb, struct urb *urb),
+ TP_ARGS(musb, urb)
+);
+
+DEFINE_EVENT(musb_urb, musb_urb_enq,
+ TP_PROTO(struct musb *musb, struct urb *urb),
+ TP_ARGS(musb, urb)
+);
+
+DEFINE_EVENT(musb_urb, musb_urb_deq,
+ TP_PROTO(struct musb *musb, struct urb *urb),
+ TP_ARGS(musb, urb)
+);
+
+DECLARE_EVENT_CLASS(musb_req,
+ TP_PROTO(struct musb_request *req),
+ TP_ARGS(req),
+ TP_STRUCT__entry(
+ __field(struct usb_request *, req)
+ __field(u8, is_tx)
+ __field(u8, epnum)
+ __field(int, status)
+ __field(unsigned int, buf_len)
+ __field(unsigned int, actual_len)
+ __field(unsigned int, zero)
+ __field(unsigned int, short_not_ok)
+ __field(unsigned int, no_interrupt)
+ ),
+ TP_fast_assign(
+ __entry->req = &req->request;
+ __entry->is_tx = req->tx;
+ __entry->epnum = req->epnum;
+ __entry->status = req->request.status;
+ __entry->buf_len = req->request.length;
+ __entry->actual_len = req->request.actual;
+ __entry->zero = req->request.zero;
+ __entry->short_not_ok = req->request.short_not_ok;
+ __entry->no_interrupt = req->request.no_interrupt;
+ ),
+ TP_printk("%p, ep%d %s, %s%s%s, len %d/%d, status %d",
+ __entry->req, __entry->epnum,
+ __entry->is_tx ? "tx/IN" : "rx/OUT",
+ __entry->zero ? "Z" : "z",
+ __entry->short_not_ok ? "S" : "s",
+ __entry->no_interrupt ? "I" : "i",
+ __entry->actual_len, __entry->buf_len,
+ __entry->status
+ )
+);
+
+DEFINE_EVENT(musb_req, musb_req_gb,
+ TP_PROTO(struct musb_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(musb_req, musb_req_tx,
+ TP_PROTO(struct musb_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(musb_req, musb_req_rx,
+ TP_PROTO(struct musb_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(musb_req, musb_req_alloc,
+ TP_PROTO(struct musb_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(musb_req, musb_req_free,
+ TP_PROTO(struct musb_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(musb_req, musb_req_start,
+ TP_PROTO(struct musb_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(musb_req, musb_req_enq,
+ TP_PROTO(struct musb_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(musb_req, musb_req_deq,
+ TP_PROTO(struct musb_request *req),
+ TP_ARGS(req)
+);
+
+#ifdef CONFIG_USB_TI_CPPI41_DMA
+DECLARE_EVENT_CLASS(musb_cppi41,
+ TP_PROTO(struct cppi41_dma_channel *ch),
+ TP_ARGS(ch),
+ TP_STRUCT__entry(
+ __field(struct cppi41_dma_channel *, ch)
+ __string(name, dev_name(ch->hw_ep->musb->controller))
+ __field(u8, hwep)
+ __field(u8, port)
+ __field(u8, is_tx)
+ __field(u32, len)
+ __field(u32, prog_len)
+ __field(u32, xferred)
+ ),
+ TP_fast_assign(
+ __entry->ch = ch;
+ __assign_str(name, dev_name(ch->hw_ep->musb->controller));
+ __entry->hwep = ch->hw_ep->epnum;
+ __entry->port = ch->port_num;
+ __entry->is_tx = ch->is_tx;
+ __entry->len = ch->total_len;
+ __entry->prog_len = ch->prog_len;
+ __entry->xferred = ch->transferred;
+ ),
+ TP_printk("%s: %p, hwep%d ch%d%s, prog_len %d, len %d/%d",
+ __get_str(name), __entry->ch, __entry->hwep,
+ __entry->port, __entry->is_tx ? "tx" : "rx",
+ __entry->prog_len, __entry->xferred, __entry->len
+ )
+);
+
+DEFINE_EVENT(musb_cppi41, musb_cppi41_done,
+ TP_PROTO(struct cppi41_dma_channel *ch),
+ TP_ARGS(ch)
+);
+
+DEFINE_EVENT(musb_cppi41, musb_cppi41_gb,
+ TP_PROTO(struct cppi41_dma_channel *ch),
+ TP_ARGS(ch)
+);
+
+DEFINE_EVENT(musb_cppi41, musb_cppi41_config,
+ TP_PROTO(struct cppi41_dma_channel *ch),
+ TP_ARGS(ch)
+);
+
+DEFINE_EVENT(musb_cppi41, musb_cppi41_cont,
+ TP_PROTO(struct cppi41_dma_channel *ch),
+ TP_ARGS(ch)
+);
+
+DEFINE_EVENT(musb_cppi41, musb_cppi41_alloc,
+ TP_PROTO(struct cppi41_dma_channel *ch),
+ TP_ARGS(ch)
+);
+
+DEFINE_EVENT(musb_cppi41, musb_cppi41_abort,
+ TP_PROTO(struct cppi41_dma_channel *ch),
+ TP_ARGS(ch)
+);
+
+DEFINE_EVENT(musb_cppi41, musb_cppi41_free,
+ TP_PROTO(struct cppi41_dma_channel *ch),
+ TP_ARGS(ch)
+);
+#endif /* CONFIG_USB_TI_CPPI41_DMA */
+
+#endif /* __MUSB_TRACE_H */
+
+/* this part has to be here */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE musb_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index 92d5f7186..fe08e776f 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -55,8 +55,7 @@ void musb_host_finish_resume(struct work_struct *work)
power = musb_readb(musb->mregs, MUSB_POWER);
power &= ~MUSB_POWER_RESUME;
- dev_dbg(musb->controller, "root port resume stopped, power %02x\n",
- power);
+ musb_dbg(musb, "root port resume stopped, power %02x", power);
musb_writeb(musb->mregs, MUSB_POWER, power);
/*
@@ -104,7 +103,7 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
break;
}
- dev_dbg(musb->controller, "Root port suspended, power %02x\n", power);
+ musb_dbg(musb, "Root port suspended, power %02x", power);
musb->port1_status |= USB_PORT_STAT_SUSPEND;
switch (musb->xceiv->otg->state) {
@@ -123,7 +122,7 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
musb_platform_try_idle(musb, 0);
break;
default:
- dev_dbg(musb->controller, "bogus rh suspend? %s\n",
+ musb_dbg(musb, "bogus rh suspend? %s",
usb_otg_state_string(musb->xceiv->otg->state));
}
} else if (power & MUSB_POWER_SUSPENDM) {
@@ -131,7 +130,7 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
power |= MUSB_POWER_RESUME;
musb_writeb(mbase, MUSB_POWER, power);
- dev_dbg(musb->controller, "Root port resuming, power %02x\n", power);
+ musb_dbg(musb, "Root port resuming, power %02x", power);
/* later, GetPortStatus will stop RESUME signaling */
musb->port1_status |= MUSB_PORT_STAT_RESUME;
@@ -146,7 +145,7 @@ void musb_port_reset(struct musb *musb, bool do_reset)
void __iomem *mbase = musb->mregs;
if (musb->xceiv->otg->state == OTG_STATE_B_IDLE) {
- dev_dbg(musb->controller, "HNP: Returning from HNP; no hub reset from b_idle\n");
+ musb_dbg(musb, "HNP: Returning from HNP; no hub reset from b_idle");
musb->port1_status &= ~USB_PORT_STAT_RESET;
return;
}
@@ -194,7 +193,7 @@ void musb_port_reset(struct musb *musb, bool do_reset)
schedule_delayed_work(&musb->deassert_reset_work,
msecs_to_jiffies(50));
} else {
- dev_dbg(musb->controller, "root port reset stopped\n");
+ musb_dbg(musb, "root port reset stopped");
musb_platform_pre_root_reset_end(musb);
musb_writeb(mbase, MUSB_POWER,
power & ~MUSB_POWER_RESET);
@@ -202,7 +201,7 @@ void musb_port_reset(struct musb *musb, bool do_reset)
power = musb_readb(mbase, MUSB_POWER);
if (power & MUSB_POWER_HSMODE) {
- dev_dbg(musb->controller, "high-speed device connected\n");
+ musb_dbg(musb, "high-speed device connected");
musb->port1_status |= USB_PORT_STAT_HIGH_SPEED;
}
@@ -242,7 +241,7 @@ void musb_root_disconnect(struct musb *musb)
musb->xceiv->otg->state = OTG_STATE_B_IDLE;
break;
default:
- dev_dbg(musb->controller, "host disconnect (%s)\n",
+ musb_dbg(musb, "host disconnect (%s)",
usb_otg_state_string(musb->xceiv->otg->state));
}
}
@@ -291,6 +290,7 @@ int musb_hub_control(
u32 temp;
int retval = 0;
unsigned long flags;
+ bool start_musb = false;
spin_lock_irqsave(&musb->lock, flags);
@@ -337,7 +337,7 @@ int musb_hub_control(
default:
goto error;
}
- dev_dbg(musb->controller, "clear feature %d\n", wValue);
+ musb_dbg(musb, "clear feature %d", wValue);
musb->port1_status &= ~(1 << wValue);
break;
case GetHubDescriptor:
@@ -372,8 +372,7 @@ int musb_hub_control(
(__le32 *) buf);
/* port change status is more interesting */
- dev_dbg(musb->controller, "port status %08x\n",
- musb->port1_status);
+ musb_dbg(musb, "port status %08x", musb->port1_status);
break;
case SetPortFeature:
if ((wIndex & 0xff) != 1)
@@ -392,7 +391,7 @@ int musb_hub_control(
* logic relating to VBUS power-up.
*/
if (!hcd->self.is_b_host && musb_has_gadget(musb))
- musb_start(musb);
+ start_musb = true;
break;
case USB_PORT_FEAT_RESET:
musb_port_reset(musb, true);
@@ -443,7 +442,7 @@ int musb_hub_control(
default:
goto error;
}
- dev_dbg(musb->controller, "set feature %d\n", wValue);
+ musb_dbg(musb, "set feature %d", wValue);
musb->port1_status |= 1 << wValue;
break;
@@ -453,5 +452,9 @@ error:
retval = -EPIPE;
}
spin_unlock_irqrestore(&musb->lock, flags);
+
+ if (start_musb)
+ musb_start(musb);
+
return retval;
}
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index 8abfe4ec6..3620073da 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -117,7 +117,7 @@ static void configure_channel(struct dma_channel *channel,
u8 bchannel = musb_channel->idx;
u16 csr = 0;
- dev_dbg(musb->controller, "%p, pkt_sz %d, addr %pad, len %d, mode %d\n",
+ musb_dbg(musb, "%p, pkt_sz %d, addr %pad, len %d, mode %d",
channel, packet_sz, &dma_addr, len, mode);
if (mode) {
@@ -152,7 +152,7 @@ static int dma_channel_program(struct dma_channel *channel,
struct musb_dma_controller *controller = musb_channel->controller;
struct musb *musb = controller->private_data;
- dev_dbg(musb->controller, "ep%d-%s pkt_sz %d, dma_addr %pad length %d, mode %d\n",
+ musb_dbg(musb, "ep%d-%s pkt_sz %d, dma_addr %pad length %d, mode %d",
musb_channel->epnum,
musb_channel->transmit ? "Tx" : "Rx",
packet_sz, &dma_addr, len, mode);
@@ -266,7 +266,7 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
#endif
if (!int_hsdma) {
- dev_dbg(musb->controller, "spurious DMA irq\n");
+ musb_dbg(musb, "spurious DMA irq");
for (bchannel = 0; bchannel < MUSB_HSDMA_CHANNELS; bchannel++) {
musb_channel = (struct musb_dma_channel *)
@@ -280,7 +280,7 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
}
}
- dev_dbg(musb->controller, "int_hsdma = 0x%x\n", int_hsdma);
+ musb_dbg(musb, "int_hsdma = 0x%x", int_hsdma);
if (!int_hsdma)
goto done;
@@ -307,7 +307,7 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
channel->actual_len = addr
- musb_channel->start_addr;
- dev_dbg(musb->controller, "ch %p, 0x%x -> 0x%x (%zu / %d) %s\n",
+ musb_dbg(musb, "ch %p, 0x%x -> 0x%x (%zu / %d) %s",
channel, musb_channel->start_addr,
addr, channel->actual_len,
musb_channel->len,
diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c
index 76500515d..c6ee16660 100644
--- a/drivers/usb/musb/sunxi.c
+++ b/drivers/usb/musb/sunxi.c
@@ -256,12 +256,10 @@ static int sunxi_musb_init(struct musb *musb)
writeb(SUNXI_MUSB_VEND0_PIO_MODE, musb->mregs + SUNXI_MUSB_VEND0);
/* Register notifier before calling phy_init() */
- if (musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE) {
- ret = extcon_register_notifier(glue->extcon, EXTCON_USB_HOST,
- &glue->host_nb);
- if (ret)
- goto error_reset_assert;
- }
+ ret = extcon_register_notifier(glue->extcon, EXTCON_USB_HOST,
+ &glue->host_nb);
+ if (ret)
+ goto error_reset_assert;
ret = phy_init(glue->phy);
if (ret)
@@ -275,9 +273,8 @@ static int sunxi_musb_init(struct musb *musb)
return 0;
error_unregister_notifier:
- if (musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE)
- extcon_unregister_notifier(glue->extcon, EXTCON_USB_HOST,
- &glue->host_nb);
+ extcon_unregister_notifier(glue->extcon, EXTCON_USB_HOST,
+ &glue->host_nb);
error_reset_assert:
if (test_bit(SUNXI_MUSB_FL_HAS_RESET, &glue->flags))
reset_control_assert(glue->rst);
@@ -301,9 +298,8 @@ static int sunxi_musb_exit(struct musb *musb)
phy_exit(glue->phy);
- if (musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE)
- extcon_unregister_notifier(glue->extcon, EXTCON_USB_HOST,
- &glue->host_nb);
+ extcon_unregister_notifier(glue->extcon, EXTCON_USB_HOST,
+ &glue->host_nb);
if (test_bit(SUNXI_MUSB_FL_HAS_RESET, &glue->flags))
reset_control_assert(glue->rst);
@@ -315,25 +311,6 @@ static int sunxi_musb_exit(struct musb *musb)
return 0;
}
-static int sunxi_set_mode(struct musb *musb, u8 mode)
-{
- struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
- int ret;
-
- if (mode == MUSB_HOST) {
- ret = phy_power_on(glue->phy);
- if (ret)
- return ret;
-
- set_bit(SUNXI_MUSB_FL_PHY_ON, &glue->flags);
- /* Stop musb work from turning vbus off again */
- set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
- musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
- }
-
- return 0;
-}
-
static void sunxi_musb_enable(struct musb *musb)
{
struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
@@ -354,13 +331,13 @@ static void sunxi_musb_disable(struct musb *musb)
clear_bit(SUNXI_MUSB_FL_ENABLED, &glue->flags);
}
-struct dma_controller *sunxi_musb_dma_controller_create(struct musb *musb,
- void __iomem *base)
+static struct dma_controller *
+sunxi_musb_dma_controller_create(struct musb *musb, void __iomem *base)
{
return NULL;
}
-void sunxi_musb_dma_controller_destroy(struct dma_controller *c)
+static void sunxi_musb_dma_controller_destroy(struct dma_controller *c)
{
}
@@ -582,7 +559,6 @@ static const struct musb_platform_ops sunxi_musb_ops = {
.exit = sunxi_musb_exit,
.enable = sunxi_musb_enable,
.disable = sunxi_musb_disable,
- .set_mode = sunxi_set_mode,
.fifo_offset = sunxi_musb_fifo_offset,
.ep_offset = sunxi_musb_ep_offset,
.busctl_offset = sunxi_musb_busctl_offset,
@@ -638,10 +614,6 @@ static int sunxi_musb_probe(struct platform_device *pdev)
return -EINVAL;
}
- glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
- if (!glue)
- return -ENOMEM;
-
memset(&pdata, 0, sizeof(pdata));
switch (usb_get_dr_mode(&pdev->dev)) {
#if defined CONFIG_USB_MUSB_DUAL_ROLE || defined CONFIG_USB_MUSB_HOST
@@ -649,15 +621,13 @@ static int sunxi_musb_probe(struct platform_device *pdev)
pdata.mode = MUSB_PORT_MODE_HOST;
break;
#endif
+#if defined CONFIG_USB_MUSB_DUAL_ROLE || defined CONFIG_USB_MUSB_GADGET
+ case USB_DR_MODE_PERIPHERAL:
+ pdata.mode = MUSB_PORT_MODE_GADGET;
+ break;
+#endif
#ifdef CONFIG_USB_MUSB_DUAL_ROLE
case USB_DR_MODE_OTG:
- glue->extcon = extcon_get_edev_by_phandle(&pdev->dev, 0);
- if (IS_ERR(glue->extcon)) {
- if (PTR_ERR(glue->extcon) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- dev_err(&pdev->dev, "Invalid or missing extcon\n");
- return PTR_ERR(glue->extcon);
- }
pdata.mode = MUSB_PORT_MODE_DUAL_ROLE;
break;
#endif
@@ -668,6 +638,10 @@ static int sunxi_musb_probe(struct platform_device *pdev)
pdata.platform_ops = &sunxi_musb_ops;
pdata.config = &sunxi_musb_hdrc_config;
+ glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
+ if (!glue)
+ return -ENOMEM;
+
glue->dev = &pdev->dev;
INIT_WORK(&glue->work, sunxi_musb_work);
glue->host_nb.notifier_call = sunxi_musb_host_notifier;
@@ -701,6 +675,14 @@ static int sunxi_musb_probe(struct platform_device *pdev)
}
}
+ glue->extcon = extcon_get_edev_by_phandle(&pdev->dev, 0);
+ if (IS_ERR(glue->extcon)) {
+ if (PTR_ERR(glue->extcon) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_err(&pdev->dev, "Invalid or missing extcon\n");
+ return PTR_ERR(glue->extcon);
+ }
+
glue->phy = devm_phy_get(&pdev->dev, "usb");
if (IS_ERR(glue->phy)) {
if (PTR_ERR(glue->phy) == -EPROBE_DEFER)
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index c6904742e..b9c409a18 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -21,6 +21,7 @@ config AB8500_USB
config FSL_USB2_OTG
bool "Freescale USB OTG Transceiver Driver"
depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM && PM
+ depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
select USB_PHY
help
Enable this to support Freescale USB OTG transceiver.
@@ -29,6 +30,7 @@ config ISP1301_OMAP
tristate "Philips ISP1301 with OMAP OTG"
depends on I2C && ARCH_OMAP_OTG
depends on USB
+ depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
select USB_PHY
help
If you say yes here you get support for the Philips ISP1301
@@ -43,7 +45,7 @@ config ISP1301_OMAP
config KEYSTONE_USB_PHY
tristate "Keystone USB PHY Driver"
depends on ARCH_KEYSTONE || COMPILE_TEST
- select NOP_USB_XCEIV
+ depends on NOP_USB_XCEIV
help
Enable this to support Keystone USB phy. This driver provides
interface to interact with USB 2.0 and USB 3.0 PHY that is part
@@ -51,6 +53,7 @@ config KEYSTONE_USB_PHY
config NOP_USB_XCEIV
tristate "NOP USB Transceiver Driver"
+ depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, NOP can't be built-in
select USB_PHY
help
This driver is to be used by all the usb transceiver which are either
@@ -63,9 +66,9 @@ config AM335X_CONTROL_USB
config AM335X_PHY_USB
tristate "AM335x USB PHY Driver"
depends on ARM || COMPILE_TEST
+ depends on NOP_USB_XCEIV
select USB_PHY
select AM335X_CONTROL_USB
- select NOP_USB_XCEIV
select USB_COMMON
help
This driver provides PHY support for that phy which part for the
@@ -92,6 +95,7 @@ config TWL6030_USB
config USB_GPIO_VBUS
tristate "GPIO based peripheral-only VBUS sensing 'transceiver'"
depends on GPIOLIB || COMPILE_TEST
+ depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
select USB_PHY
help
Provides simple GPIO VBUS sensing for controllers with an
@@ -112,6 +116,7 @@ config OMAP_OTG
config TAHVO_USB
tristate "Tahvo USB transceiver driver"
depends on MFD_RETU && EXTCON
+ depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
select USB_PHY
help
Enable this to support USB transceiver on Tahvo. This is used
@@ -140,6 +145,7 @@ config USB_ISP1301
config USB_MSM_OTG
tristate "Qualcomm on-chip USB OTG controller support"
depends on (USB || USB_GADGET) && (ARCH_QCOM || COMPILE_TEST)
+ depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
depends on RESET_CONTROLLER
depends on EXTCON
select USB_PHY
@@ -169,6 +175,7 @@ config USB_QCOM_8X16_PHY
config USB_MV_OTG
tristate "Marvell USB OTG support"
depends on USB_EHCI_MV && USB_MV_UDC && PM && USB_OTG
+ depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
select USB_PHY
help
Say Y here if you want to build Marvell USB OTG transciever
diff --git a/drivers/usb/phy/phy-am335x.c b/drivers/usb/phy/phy-am335x.c
index a262a4343..7e5aece76 100644
--- a/drivers/usb/phy/phy-am335x.c
+++ b/drivers/usb/phy/phy-am335x.c
@@ -54,7 +54,7 @@ static int am335x_phy_probe(struct platform_device *pdev)
return am_phy->id;
}
- am_phy->dr_mode = of_usb_get_dr_mode_by_phy(pdev->dev.of_node);
+ am_phy->dr_mode = of_usb_get_dr_mode_by_phy(pdev->dev.of_node, -1);
ret = usb_phy_gen_create_phy(dev, &am_phy->usb_phy_gen, NULL);
if (ret)
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
index 980c9dee0..427efb5ee 100644
--- a/drivers/usb/phy/phy-generic.c
+++ b/drivers/usb/phy/phy-generic.c
@@ -144,14 +144,18 @@ static irqreturn_t nop_gpio_vbus_thread(int irq, void *data)
int usb_gen_phy_init(struct usb_phy *phy)
{
struct usb_phy_generic *nop = dev_get_drvdata(phy->dev);
+ int ret;
if (!IS_ERR(nop->vcc)) {
if (regulator_enable(nop->vcc))
dev_err(phy->dev, "Failed to enable power\n");
}
- if (!IS_ERR(nop->clk))
- clk_prepare_enable(nop->clk);
+ if (!IS_ERR(nop->clk)) {
+ ret = clk_prepare_enable(nop->clk);
+ if (ret)
+ return ret;
+ }
nop_reset(nop);
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index 72b387d59..8a3475972 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/device.h>
+#include <linux/extcon.h>
#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
@@ -35,6 +36,8 @@
#include <linux/of_device.h>
#include <linux/reboot.h>
#include <linux/reset.h>
+#include <linux/types.h>
+#include <linux/usb/otg.h>
#include <linux/usb.h>
#include <linux/usb/otg.h>
@@ -42,10 +45,183 @@
#include <linux/usb/ulpi.h>
#include <linux/usb/gadget.h>
#include <linux/usb/hcd.h>
-#include <linux/usb/msm_hsusb.h>
#include <linux/usb/msm_hsusb_hw.h>
#include <linux/regulator/consumer.h>
+/**
+ * OTG control
+ *
+ * OTG_NO_CONTROL Id/VBUS notifications not required. Useful in host
+ * only configuration.
+ * OTG_PHY_CONTROL Id/VBUS notifications comes form USB PHY.
+ * OTG_PMIC_CONTROL Id/VBUS notifications comes from PMIC hardware.
+ * OTG_USER_CONTROL Id/VBUS notifcations comes from User via sysfs.
+ *
+ */
+enum otg_control_type {
+ OTG_NO_CONTROL = 0,
+ OTG_PHY_CONTROL,
+ OTG_PMIC_CONTROL,
+ OTG_USER_CONTROL,
+};
+
+/**
+ * PHY used in
+ *
+ * INVALID_PHY Unsupported PHY
+ * CI_45NM_INTEGRATED_PHY Chipidea 45nm integrated PHY
+ * SNPS_28NM_INTEGRATED_PHY Synopsis 28nm integrated PHY
+ *
+ */
+enum msm_usb_phy_type {
+ INVALID_PHY = 0,
+ CI_45NM_INTEGRATED_PHY,
+ SNPS_28NM_INTEGRATED_PHY,
+};
+
+#define IDEV_CHG_MAX 1500
+#define IUNIT 100
+
+/**
+ * Different states involved in USB charger detection.
+ *
+ * USB_CHG_STATE_UNDEFINED USB charger is not connected or detection
+ * process is not yet started.
+ * USB_CHG_STATE_WAIT_FOR_DCD Waiting for Data pins contact.
+ * USB_CHG_STATE_DCD_DONE Data pin contact is detected.
+ * USB_CHG_STATE_PRIMARY_DONE Primary detection is completed (Detects
+ * between SDP and DCP/CDP).
+ * USB_CHG_STATE_SECONDARY_DONE Secondary detection is completed (Detects
+ * between DCP and CDP).
+ * USB_CHG_STATE_DETECTED USB charger type is determined.
+ *
+ */
+enum usb_chg_state {
+ USB_CHG_STATE_UNDEFINED = 0,
+ USB_CHG_STATE_WAIT_FOR_DCD,
+ USB_CHG_STATE_DCD_DONE,
+ USB_CHG_STATE_PRIMARY_DONE,
+ USB_CHG_STATE_SECONDARY_DONE,
+ USB_CHG_STATE_DETECTED,
+};
+
+/**
+ * USB charger types
+ *
+ * USB_INVALID_CHARGER Invalid USB charger.
+ * USB_SDP_CHARGER Standard downstream port. Refers to a downstream port
+ * on USB2.0 compliant host/hub.
+ * USB_DCP_CHARGER Dedicated charger port (AC charger/ Wall charger).
+ * USB_CDP_CHARGER Charging downstream port. Enumeration can happen and
+ * IDEV_CHG_MAX can be drawn irrespective of USB state.
+ *
+ */
+enum usb_chg_type {
+ USB_INVALID_CHARGER = 0,
+ USB_SDP_CHARGER,
+ USB_DCP_CHARGER,
+ USB_CDP_CHARGER,
+};
+
+/**
+ * struct msm_otg_platform_data - platform device data
+ * for msm_otg driver.
+ * @phy_init_seq: PHY configuration sequence values. Value of -1 is reserved as
+ * "do not overwrite default vaule at this address".
+ * @phy_init_sz: PHY configuration sequence size.
+ * @vbus_power: VBUS power on/off routine.
+ * @power_budget: VBUS power budget in mA (0 will be treated as 500mA).
+ * @mode: Supported mode (OTG/peripheral/host).
+ * @otg_control: OTG switch controlled by user/Id pin
+ */
+struct msm_otg_platform_data {
+ int *phy_init_seq;
+ int phy_init_sz;
+ void (*vbus_power)(bool on);
+ unsigned power_budget;
+ enum usb_dr_mode mode;
+ enum otg_control_type otg_control;
+ enum msm_usb_phy_type phy_type;
+ void (*setup_gpio)(enum usb_otg_state state);
+};
+
+/**
+ * struct msm_usb_cable - structure for exteternal connector cable
+ * state tracking
+ * @nb: hold event notification callback
+ * @conn: used for notification registration
+ */
+struct msm_usb_cable {
+ struct notifier_block nb;
+ struct extcon_dev *extcon;
+};
+
+/**
+ * struct msm_otg: OTG driver data. Shared by HCD and DCD.
+ * @otg: USB OTG Transceiver structure.
+ * @pdata: otg device platform data.
+ * @irq: IRQ number assigned for HSUSB controller.
+ * @clk: clock struct of usb_hs_clk.
+ * @pclk: clock struct of usb_hs_pclk.
+ * @core_clk: clock struct of usb_hs_core_clk.
+ * @regs: ioremapped register base address.
+ * @inputs: OTG state machine inputs(Id, SessValid etc).
+ * @sm_work: OTG state machine work.
+ * @in_lpm: indicates low power mode (LPM) state.
+ * @async_int: Async interrupt arrived.
+ * @cur_power: The amount of mA available from downstream port.
+ * @chg_work: Charger detection work.
+ * @chg_state: The state of charger detection process.
+ * @chg_type: The type of charger attached.
+ * @dcd_retires: The retry count used to track Data contact
+ * detection process.
+ * @manual_pullup: true if VBUS is not routed to USB controller/phy
+ * and controller driver therefore enables pull-up explicitly before
+ * starting controller using usbcmd run/stop bit.
+ * @vbus: VBUS signal state trakining, using extcon framework
+ * @id: ID signal state trakining, using extcon framework
+ * @switch_gpio: Descriptor for GPIO used to control external Dual
+ * SPDT USB Switch.
+ * @reboot: Used to inform the driver to route USB D+/D- line to Device
+ * connector
+ */
+struct msm_otg {
+ struct usb_phy phy;
+ struct msm_otg_platform_data *pdata;
+ int irq;
+ struct clk *clk;
+ struct clk *pclk;
+ struct clk *core_clk;
+ void __iomem *regs;
+#define ID 0
+#define B_SESS_VLD 1
+ unsigned long inputs;
+ struct work_struct sm_work;
+ atomic_t in_lpm;
+ int async_int;
+ unsigned cur_power;
+ int phy_number;
+ struct delayed_work chg_work;
+ enum usb_chg_state chg_state;
+ enum usb_chg_type chg_type;
+ u8 dcd_retries;
+ struct regulator *v3p3;
+ struct regulator *v1p8;
+ struct regulator *vddcx;
+
+ struct reset_control *phy_rst;
+ struct reset_control *link_rst;
+ int vdd_levels[3];
+
+ bool manual_pullup;
+
+ struct msm_usb_cable vbus;
+ struct msm_usb_cable id;
+
+ struct gpio_desc *switch_gpio;
+ struct notifier_block reboot;
+};
+
#define MSM_USB_BASE (motg->regs)
#define DRIVER_NAME "msm_otg"
diff --git a/drivers/usb/phy/phy-omap-otg.c b/drivers/usb/phy/phy-omap-otg.c
index c4bf2de6d..6523af4f8 100644
--- a/drivers/usb/phy/phy-omap-otg.c
+++ b/drivers/usb/phy/phy-omap-otg.c
@@ -140,6 +140,8 @@ static int omap_otg_probe(struct platform_device *pdev)
(rev >> 4) & 0xf, rev & 0xf, config->extcon, otg_dev->id,
otg_dev->vbus);
+ platform_set_drvdata(pdev, otg_dev);
+
return 0;
}
@@ -148,7 +150,7 @@ static int omap_otg_remove(struct platform_device *pdev)
struct otg_device *otg_dev = platform_get_drvdata(pdev);
struct extcon_dev *edev = otg_dev->extcon;
- extcon_unregister_notifier(edev, EXTCON_USB_HOST,&otg_dev->id_nb);
+ extcon_unregister_notifier(edev, EXTCON_USB_HOST, &otg_dev->id_nb);
extcon_unregister_notifier(edev, EXTCON_USB, &otg_dev->vbus_nb);
return 0;
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index 8c81aac09..ac67bab91 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -698,7 +698,7 @@ probe_end_fifo_exit:
probe_end_pipe_exit:
usbhs_pipe_remove(priv);
- dev_info(&pdev->dev, "probe failed\n");
+ dev_info(&pdev->dev, "probe failed (%d)\n", ret);
return ret;
}
diff --git a/drivers/usb/renesas_usbhs/rcar3.c b/drivers/usb/renesas_usbhs/rcar3.c
index 38b01f2ae..1d70add92 100644
--- a/drivers/usb/renesas_usbhs/rcar3.c
+++ b/drivers/usb/renesas_usbhs/rcar3.c
@@ -23,7 +23,7 @@
#define UGCTRL2_RESERVED_3 0x00000001 /* bit[3:0] should be B'0001 */
#define UGCTRL2_USB0SEL_OTG 0x00000030
-void usbhs_write32(struct usbhs_priv *priv, u32 reg, u32 data)
+static void usbhs_write32(struct usbhs_priv *priv, u32 reg, u32 data)
{
iowrite32(data, priv->base + reg);
}
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 96a70789b..54a4de0ef 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -118,6 +118,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
{ USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
+ { USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */
{ USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
{ USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
{ USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
@@ -496,12 +497,10 @@ static int cp210x_write_reg_block(struct usb_serial_port *port, u8 req,
void *dmabuf;
int result;
- dmabuf = kmalloc(bufsize, GFP_KERNEL);
+ dmabuf = kmemdup(buf, bufsize, GFP_KERNEL);
if (!dmabuf)
return -ENOMEM;
- memcpy(dmabuf, buf, bufsize);
-
result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
req, REQTYPE_HOST_TO_INTERFACE, 0,
port_priv->bInterfaceNumber, dmabuf, bufsize,
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index ae8c0365a..944de657a 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -350,6 +350,7 @@ void usb_serial_generic_read_bulk_callback(struct urb *urb)
struct usb_serial_port *port = urb->context;
unsigned char *data = urb->transfer_buffer;
unsigned long flags;
+ int status = urb->status;
int i;
for (i = 0; i < ARRAY_SIZE(port->read_urbs); ++i) {
@@ -360,22 +361,22 @@ void usb_serial_generic_read_bulk_callback(struct urb *urb)
dev_dbg(&port->dev, "%s - urb %d, len %d\n", __func__, i,
urb->actual_length);
- switch (urb->status) {
+ switch (status) {
case 0:
break;
case -ENOENT:
case -ECONNRESET:
case -ESHUTDOWN:
dev_dbg(&port->dev, "%s - urb stopped: %d\n",
- __func__, urb->status);
+ __func__, status);
return;
case -EPIPE:
dev_err(&port->dev, "%s - urb stopped: %d\n",
- __func__, urb->status);
+ __func__, status);
return;
default:
dev_dbg(&port->dev, "%s - nonzero urb status: %d\n",
- __func__, urb->status);
+ __func__, status);
goto resubmit;
}
@@ -399,6 +400,7 @@ void usb_serial_generic_write_bulk_callback(struct urb *urb)
{
unsigned long flags;
struct usb_serial_port *port = urb->context;
+ int status = urb->status;
int i;
for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i) {
@@ -410,22 +412,22 @@ void usb_serial_generic_write_bulk_callback(struct urb *urb)
set_bit(i, &port->write_urbs_free);
spin_unlock_irqrestore(&port->lock, flags);
- switch (urb->status) {
+ switch (status) {
case 0:
break;
case -ENOENT:
case -ECONNRESET:
case -ESHUTDOWN:
dev_dbg(&port->dev, "%s - urb stopped: %d\n",
- __func__, urb->status);
+ __func__, status);
return;
case -EPIPE:
dev_err_console(port, "%s - urb stopped: %d\n",
- __func__, urb->status);
+ __func__, status);
return;
default:
dev_err_console(port, "%s - nonzero urb status: %d\n",
- __func__, urb->status);
+ __func__, status);
goto resubmit;
}
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 510d14bd0..1cfc7b2a0 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -1,5 +1,4 @@
-/* vi: ts=8 sw=8
- *
+/*
* TI 3410/5052 USB Serial Driver
*
* Copyright (C) 2004 Texas Instruments
@@ -35,9 +34,238 @@
#include <linux/usb.h>
#include <linux/usb/serial.h>
-#include "ti_usb_3410_5052.h"
-
-/* Defines */
+/* Configuration ids */
+#define TI_BOOT_CONFIG 1
+#define TI_ACTIVE_CONFIG 2
+
+/* Vendor and product ids */
+#define TI_VENDOR_ID 0x0451
+#define IBM_VENDOR_ID 0x04b3
+#define TI_3410_PRODUCT_ID 0x3410
+#define IBM_4543_PRODUCT_ID 0x4543
+#define IBM_454B_PRODUCT_ID 0x454b
+#define IBM_454C_PRODUCT_ID 0x454c
+#define TI_3410_EZ430_ID 0xF430 /* TI ez430 development tool */
+#define TI_5052_BOOT_PRODUCT_ID 0x5052 /* no EEPROM, no firmware */
+#define TI_5152_BOOT_PRODUCT_ID 0x5152 /* no EEPROM, no firmware */
+#define TI_5052_EEPROM_PRODUCT_ID 0x505A /* EEPROM, no firmware */
+#define TI_5052_FIRMWARE_PRODUCT_ID 0x505F /* firmware is running */
+#define FRI2_PRODUCT_ID 0x5053 /* Fish River Island II */
+
+/* Multi-Tech vendor and product ids */
+#define MTS_VENDOR_ID 0x06E0
+#define MTS_GSM_NO_FW_PRODUCT_ID 0xF108
+#define MTS_CDMA_NO_FW_PRODUCT_ID 0xF109
+#define MTS_CDMA_PRODUCT_ID 0xF110
+#define MTS_GSM_PRODUCT_ID 0xF111
+#define MTS_EDGE_PRODUCT_ID 0xF112
+#define MTS_MT9234MU_PRODUCT_ID 0xF114
+#define MTS_MT9234ZBA_PRODUCT_ID 0xF115
+#define MTS_MT9234ZBAOLD_PRODUCT_ID 0x0319
+
+/* Abbott Diabetics vendor and product ids */
+#define ABBOTT_VENDOR_ID 0x1a61
+#define ABBOTT_STEREO_PLUG_ID 0x3410
+#define ABBOTT_PRODUCT_ID ABBOTT_STEREO_PLUG_ID
+#define ABBOTT_STRIP_PORT_ID 0x3420
+
+/* Honeywell vendor and product IDs */
+#define HONEYWELL_VENDOR_ID 0x10ac
+#define HONEYWELL_HGI80_PRODUCT_ID 0x0102 /* Honeywell HGI80 */
+
+/* Moxa UPORT 11x0 vendor and product IDs */
+#define MXU1_VENDOR_ID 0x110a
+#define MXU1_1110_PRODUCT_ID 0x1110
+#define MXU1_1130_PRODUCT_ID 0x1130
+#define MXU1_1150_PRODUCT_ID 0x1150
+#define MXU1_1151_PRODUCT_ID 0x1151
+#define MXU1_1131_PRODUCT_ID 0x1131
+
+/* Commands */
+#define TI_GET_VERSION 0x01
+#define TI_GET_PORT_STATUS 0x02
+#define TI_GET_PORT_DEV_INFO 0x03
+#define TI_GET_CONFIG 0x04
+#define TI_SET_CONFIG 0x05
+#define TI_OPEN_PORT 0x06
+#define TI_CLOSE_PORT 0x07
+#define TI_START_PORT 0x08
+#define TI_STOP_PORT 0x09
+#define TI_TEST_PORT 0x0A
+#define TI_PURGE_PORT 0x0B
+#define TI_RESET_EXT_DEVICE 0x0C
+#define TI_WRITE_DATA 0x80
+#define TI_READ_DATA 0x81
+#define TI_REQ_TYPE_CLASS 0x82
+
+/* Module identifiers */
+#define TI_I2C_PORT 0x01
+#define TI_IEEE1284_PORT 0x02
+#define TI_UART1_PORT 0x03
+#define TI_UART2_PORT 0x04
+#define TI_RAM_PORT 0x05
+
+/* Modem status */
+#define TI_MSR_DELTA_CTS 0x01
+#define TI_MSR_DELTA_DSR 0x02
+#define TI_MSR_DELTA_RI 0x04
+#define TI_MSR_DELTA_CD 0x08
+#define TI_MSR_CTS 0x10
+#define TI_MSR_DSR 0x20
+#define TI_MSR_RI 0x40
+#define TI_MSR_CD 0x80
+#define TI_MSR_DELTA_MASK 0x0F
+#define TI_MSR_MASK 0xF0
+
+/* Line status */
+#define TI_LSR_OVERRUN_ERROR 0x01
+#define TI_LSR_PARITY_ERROR 0x02
+#define TI_LSR_FRAMING_ERROR 0x04
+#define TI_LSR_BREAK 0x08
+#define TI_LSR_ERROR 0x0F
+#define TI_LSR_RX_FULL 0x10
+#define TI_LSR_TX_EMPTY 0x20
+
+/* Line control */
+#define TI_LCR_BREAK 0x40
+
+/* Modem control */
+#define TI_MCR_LOOP 0x04
+#define TI_MCR_DTR 0x10
+#define TI_MCR_RTS 0x20
+
+/* Mask settings */
+#define TI_UART_ENABLE_RTS_IN 0x0001
+#define TI_UART_DISABLE_RTS 0x0002
+#define TI_UART_ENABLE_PARITY_CHECKING 0x0008
+#define TI_UART_ENABLE_DSR_OUT 0x0010
+#define TI_UART_ENABLE_CTS_OUT 0x0020
+#define TI_UART_ENABLE_X_OUT 0x0040
+#define TI_UART_ENABLE_XA_OUT 0x0080
+#define TI_UART_ENABLE_X_IN 0x0100
+#define TI_UART_ENABLE_DTR_IN 0x0800
+#define TI_UART_DISABLE_DTR 0x1000
+#define TI_UART_ENABLE_MS_INTS 0x2000
+#define TI_UART_ENABLE_AUTO_START_DMA 0x4000
+
+/* Parity */
+#define TI_UART_NO_PARITY 0x00
+#define TI_UART_ODD_PARITY 0x01
+#define TI_UART_EVEN_PARITY 0x02
+#define TI_UART_MARK_PARITY 0x03
+#define TI_UART_SPACE_PARITY 0x04
+
+/* Stop bits */
+#define TI_UART_1_STOP_BITS 0x00
+#define TI_UART_1_5_STOP_BITS 0x01
+#define TI_UART_2_STOP_BITS 0x02
+
+/* Bits per character */
+#define TI_UART_5_DATA_BITS 0x00
+#define TI_UART_6_DATA_BITS 0x01
+#define TI_UART_7_DATA_BITS 0x02
+#define TI_UART_8_DATA_BITS 0x03
+
+/* 232/485 modes */
+#define TI_UART_232 0x00
+#define TI_UART_485_RECEIVER_DISABLED 0x01
+#define TI_UART_485_RECEIVER_ENABLED 0x02
+
+/* Pipe transfer mode and timeout */
+#define TI_PIPE_MODE_CONTINUOUS 0x01
+#define TI_PIPE_MODE_MASK 0x03
+#define TI_PIPE_TIMEOUT_MASK 0x7C
+#define TI_PIPE_TIMEOUT_ENABLE 0x80
+
+/* Config struct */
+struct ti_uart_config {
+ __u16 wBaudRate;
+ __u16 wFlags;
+ __u8 bDataBits;
+ __u8 bParity;
+ __u8 bStopBits;
+ char cXon;
+ char cXoff;
+ __u8 bUartMode;
+} __packed;
+
+/* Get port status */
+struct ti_port_status {
+ __u8 bCmdCode;
+ __u8 bModuleId;
+ __u8 bErrorCode;
+ __u8 bMSR;
+ __u8 bLSR;
+} __packed;
+
+/* Purge modes */
+#define TI_PURGE_OUTPUT 0x00
+#define TI_PURGE_INPUT 0x80
+
+/* Read/Write data */
+#define TI_RW_DATA_ADDR_SFR 0x10
+#define TI_RW_DATA_ADDR_IDATA 0x20
+#define TI_RW_DATA_ADDR_XDATA 0x30
+#define TI_RW_DATA_ADDR_CODE 0x40
+#define TI_RW_DATA_ADDR_GPIO 0x50
+#define TI_RW_DATA_ADDR_I2C 0x60
+#define TI_RW_DATA_ADDR_FLASH 0x70
+#define TI_RW_DATA_ADDR_DSP 0x80
+
+#define TI_RW_DATA_UNSPECIFIED 0x00
+#define TI_RW_DATA_BYTE 0x01
+#define TI_RW_DATA_WORD 0x02
+#define TI_RW_DATA_DOUBLE_WORD 0x04
+
+struct ti_write_data_bytes {
+ __u8 bAddrType;
+ __u8 bDataType;
+ __u8 bDataCounter;
+ __be16 wBaseAddrHi;
+ __be16 wBaseAddrLo;
+ __u8 bData[0];
+} __packed;
+
+struct ti_read_data_request {
+ __u8 bAddrType;
+ __u8 bDataType;
+ __u8 bDataCounter;
+ __be16 wBaseAddrHi;
+ __be16 wBaseAddrLo;
+} __packed;
+
+struct ti_read_data_bytes {
+ __u8 bCmdCode;
+ __u8 bModuleId;
+ __u8 bErrorCode;
+ __u8 bData[0];
+} __packed;
+
+/* Interrupt struct */
+struct ti_interrupt {
+ __u8 bICode;
+ __u8 bIInfo;
+} __packed;
+
+/* Interrupt codes */
+#define TI_CODE_HARDWARE_ERROR 0xFF
+#define TI_CODE_DATA_ERROR 0x03
+#define TI_CODE_MODEM_STATUS 0x04
+
+/* Download firmware max packet size */
+#define TI_DOWNLOAD_MAX_PACKET_SIZE 64
+
+/* Firmware image header */
+struct ti_firmware_header {
+ __le16 wLength;
+ __u8 bCheckSum;
+} __packed;
+
+/* UART addresses */
+#define TI_UART1_BASE_ADDR 0xFFA0 /* UART 1 base address */
+#define TI_UART2_BASE_ADDR 0xFFB0 /* UART 2 base address */
+#define TI_UART_OFFSET_LCR 0x0002 /* UART MCR register offset */
+#define TI_UART_OFFSET_MCR 0x0004 /* UART MCR register offset */
#define TI_DRIVER_AUTHOR "Al Borchers <alborchers@steinerpoint.com>"
#define TI_DRIVER_DESC "TI USB 3410/5052 Serial Driver"
@@ -58,9 +286,6 @@
#define TI_EXTRA_VID_PID_COUNT 5
-
-/* Structures */
-
struct ti_port {
int tp_is_open;
__u8 tp_msr;
@@ -84,9 +309,6 @@ struct ti_device {
int td_urb_error;
};
-
-/* Function Declarations */
-
static int ti_startup(struct usb_serial *serial);
static void ti_release(struct usb_serial *serial);
static int ti_port_probe(struct usb_serial_port *port);
@@ -136,13 +358,8 @@ static int ti_write_byte(struct usb_serial_port *port, struct ti_device *tdev,
static int ti_download_firmware(struct ti_device *tdev);
-
-/* Data */
-
-/* module parameters */
static int closing_wait = TI_DEFAULT_CLOSING_WAIT;
-/* supported devices */
static const struct usb_device_id ti_id_table_3410[] = {
{ USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) },
@@ -174,7 +391,7 @@ static const struct usb_device_id ti_id_table_5052[] = {
{ USB_DEVICE(TI_VENDOR_ID, TI_5152_BOOT_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) },
- { } /* terminator */
+ { }
};
static const struct usb_device_id ti_id_table_combined[] = {
@@ -275,8 +492,6 @@ static struct usb_serial_driver * const serial_drivers[] = {
&ti_1port_device, &ti_2port_device, NULL
};
-/* Module */
-
MODULE_AUTHOR(TI_DRIVER_AUTHOR);
MODULE_DESCRIPTION(TI_DRIVER_DESC);
MODULE_LICENSE("GPL");
@@ -291,8 +506,6 @@ MODULE_DEVICE_TABLE(usb, ti_id_table_combined);
module_usb_serial_driver(serial_drivers, ti_id_table_combined);
-/* Functions */
-
static int ti_startup(struct usb_serial *serial)
{
struct ti_device *tdev;
@@ -308,7 +521,6 @@ static int ti_startup(struct usb_serial *serial)
dev->descriptor.bNumConfigurations,
dev->actconfig->desc.bConfigurationValue);
- /* create device structure */
tdev = kzalloc(sizeof(struct ti_device), GFP_KERNEL);
if (!tdev)
return -ENOMEM;
@@ -424,7 +636,7 @@ static int ti_open(struct tty_struct *tty, struct usb_serial_port *port)
struct urb *urb;
int port_number;
int status;
- __u16 open_settings = (__u8)(TI_PIPE_MODE_CONTINOUS |
+ __u16 open_settings = (__u8)(TI_PIPE_MODE_CONTINUOUS |
TI_PIPE_TIMEOUT_ENABLE |
(TI_TRANSFER_TIMEOUT << 2));
@@ -943,6 +1155,15 @@ static void ti_break(struct tty_struct *tty, int break_state)
dev_dbg(&port->dev, "%s - error setting break, %d\n", __func__, status);
}
+static int ti_get_port_from_code(unsigned char code)
+{
+ return (code >> 4) - 3;
+}
+
+static int ti_get_func_from_code(unsigned char code)
+{
+ return code & 0x0f;
+}
static void ti_interrupt_callback(struct urb *urb)
{
@@ -984,8 +1205,8 @@ static void ti_interrupt_callback(struct urb *urb)
goto exit;
}
- port_number = TI_GET_PORT_FROM_CODE(data[0]);
- function = TI_GET_FUNC_FROM_CODE(data[0]);
+ port_number = ti_get_port_from_code(data[0]);
+ function = ti_get_func_from_code(data[0]);
dev_dbg(dev, "%s - port_number %d, function %d, data 0x%02X\n",
__func__, port_number, function, data[1]);
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index ef2d8cde6..8c5f01151 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -1070,17 +1070,17 @@ int usb_stor_probe2(struct us_data *us)
result = usb_stor_acquire_resources(us);
if (result)
goto BadDevice;
+ usb_autopm_get_interface_no_resume(us->pusb_intf);
snprintf(us->scsi_name, sizeof(us->scsi_name), "usb-storage %s",
dev_name(&us->pusb_intf->dev));
result = scsi_add_host(us_to_host(us), dev);
if (result) {
dev_warn(dev,
"Unable to add the scsi host\n");
- goto BadDevice;
+ goto HostAddErr;
}
/* Submit the delayed_work for SCSI-device scanning */
- usb_autopm_get_interface_no_resume(us->pusb_intf);
set_bit(US_FLIDX_SCAN_PENDING, &us->dflags);
if (delay_use > 0)
@@ -1090,6 +1090,8 @@ int usb_stor_probe2(struct us_data *us)
return 0;
/* We come here if there are any problems */
+HostAddErr:
+ usb_autopm_put_interface_no_suspend(us->pusb_intf);
BadDevice:
usb_stor_dbg(us, "storage_probe() failed\n");
release_everything(us);
diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
index c7508cbce..9f490375a 100644
--- a/drivers/usb/usbip/usbip_common.h
+++ b/drivers/usb/usbip/usbip_common.h
@@ -245,7 +245,7 @@ enum usbip_side {
#define USBIP_EH_RESET (1 << 2)
#define USBIP_EH_UNUSABLE (1 << 3)
-#define SDEV_EVENT_REMOVED (USBIP_EH_SHUTDOWN | USBIP_EH_RESET | USBIP_EH_BYE)
+#define SDEV_EVENT_REMOVED (USBIP_EH_SHUTDOWN | USBIP_EH_BYE)
#define SDEV_EVENT_DOWN (USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
#define SDEV_EVENT_ERROR_TCP (USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
#define SDEV_EVENT_ERROR_SUBMIT (USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
diff --git a/drivers/usb/usbip/vudc_rx.c b/drivers/usb/usbip/vudc_rx.c
index 344bd9473..e429b59f6 100644
--- a/drivers/usb/usbip/vudc_rx.c
+++ b/drivers/usb/usbip/vudc_rx.c
@@ -142,7 +142,7 @@ static int v_recv_cmd_submit(struct vudc *udc,
urb_p->urb->status = -EINPROGRESS;
/* FIXME: more pipe setup to please usbip_common */
- urb_p->urb->pipe &= ~(11 << 30);
+ urb_p->urb->pipe &= ~(3 << 30);
switch (urb_p->ep->type) {
case USB_ENDPOINT_XFER_BULK:
urb_p->urb->pipe |= (PIPE_BULK << 30);
diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c
index 99397fa1e..0f98f2c74 100644
--- a/drivers/usb/usbip/vudc_sysfs.c
+++ b/drivers/usb/usbip/vudc_sysfs.c
@@ -40,7 +40,7 @@ int get_gadget_descs(struct vudc *udc)
struct usb_ctrlrequest req;
int ret;
- if (!udc || !udc->driver || !udc->pullup)
+ if (!udc->driver || !udc->pullup)
return -EINVAL;
req.bRequestType = USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE;
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 188b1ff03..d624a5277 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -110,6 +110,74 @@ static inline bool vfio_pci_is_vga(struct pci_dev *pdev)
return (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
}
+static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev)
+{
+ struct resource *res;
+ int bar;
+ struct vfio_pci_dummy_resource *dummy_res;
+
+ INIT_LIST_HEAD(&vdev->dummy_resources_list);
+
+ for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
+ res = vdev->pdev->resource + bar;
+
+ if (!IS_ENABLED(CONFIG_VFIO_PCI_MMAP))
+ goto no_mmap;
+
+ if (!(res->flags & IORESOURCE_MEM))
+ goto no_mmap;
+
+ /*
+ * The PCI core shouldn't set up a resource with a
+ * type but zero size. But there may be bugs that
+ * cause us to do that.
+ */
+ if (!resource_size(res))
+ goto no_mmap;
+
+ if (resource_size(res) >= PAGE_SIZE) {
+ vdev->bar_mmap_supported[bar] = true;
+ continue;
+ }
+
+ if (!(res->start & ~PAGE_MASK)) {
+ /*
+ * Add a dummy resource to reserve the remainder
+ * of the exclusive page in case that hot-add
+ * device's bar is assigned into it.
+ */
+ dummy_res = kzalloc(sizeof(*dummy_res), GFP_KERNEL);
+ if (dummy_res == NULL)
+ goto no_mmap;
+
+ dummy_res->resource.name = "vfio sub-page reserved";
+ dummy_res->resource.start = res->end + 1;
+ dummy_res->resource.end = res->start + PAGE_SIZE - 1;
+ dummy_res->resource.flags = res->flags;
+ if (request_resource(res->parent,
+ &dummy_res->resource)) {
+ kfree(dummy_res);
+ goto no_mmap;
+ }
+ dummy_res->index = bar;
+ list_add(&dummy_res->res_next,
+ &vdev->dummy_resources_list);
+ vdev->bar_mmap_supported[bar] = true;
+ continue;
+ }
+ /*
+ * Here we don't handle the case when the BAR is not page
+ * aligned because we can't expect the BAR will be
+ * assigned into the same location in a page in guest
+ * when we passthrough the BAR. And it's hard to access
+ * this BAR in userspace because we have no way to get
+ * the BAR's location in a page.
+ */
+no_mmap:
+ vdev->bar_mmap_supported[bar] = false;
+ }
+}
+
static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev);
static void vfio_pci_disable(struct vfio_pci_device *vdev);
@@ -218,12 +286,15 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev)
}
}
+ vfio_pci_probe_mmaps(vdev);
+
return 0;
}
static void vfio_pci_disable(struct vfio_pci_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
+ struct vfio_pci_dummy_resource *dummy_res, *tmp;
int i, bar;
/* Stop the device from further DMA */
@@ -252,6 +323,13 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev)
vdev->barmap[bar] = NULL;
}
+ list_for_each_entry_safe(dummy_res, tmp,
+ &vdev->dummy_resources_list, res_next) {
+ list_del(&dummy_res->res_next);
+ release_resource(&dummy_res->resource);
+ kfree(dummy_res);
+ }
+
vdev->needs_reset = true;
/*
@@ -623,9 +701,7 @@ static long vfio_pci_ioctl(void *device_data,
info.flags = VFIO_REGION_INFO_FLAG_READ |
VFIO_REGION_INFO_FLAG_WRITE;
- if (IS_ENABLED(CONFIG_VFIO_PCI_MMAP) &&
- pci_resource_flags(pdev, info.index) &
- IORESOURCE_MEM && info.size >= PAGE_SIZE) {
+ if (vdev->bar_mmap_supported[info.index]) {
info.flags |= VFIO_REGION_INFO_FLAG_MMAP;
if (info.index == vdev->msix_bar) {
ret = msix_sparse_mmap_cap(vdev, &caps);
@@ -1049,16 +1125,16 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
return -EINVAL;
if (index >= VFIO_PCI_ROM_REGION_INDEX)
return -EINVAL;
- if (!(pci_resource_flags(pdev, index) & IORESOURCE_MEM))
+ if (!vdev->bar_mmap_supported[index])
return -EINVAL;
- phys_len = pci_resource_len(pdev, index);
+ phys_len = PAGE_ALIGN(pci_resource_len(pdev, index));
req_len = vma->vm_end - vma->vm_start;
pgoff = vma->vm_pgoff &
((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
req_start = pgoff << PAGE_SHIFT;
- if (phys_len < PAGE_SIZE || req_start + req_len > phys_len)
+ if (req_start + req_len > phys_len)
return -EINVAL;
if (index == vdev->msix_bar) {
diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
index 016c14a1b..2128de86c 100644
--- a/drivers/vfio/pci/vfio_pci_private.h
+++ b/drivers/vfio/pci/vfio_pci_private.h
@@ -57,9 +57,16 @@ struct vfio_pci_region {
u32 flags;
};
+struct vfio_pci_dummy_resource {
+ struct resource resource;
+ int index;
+ struct list_head res_next;
+};
+
struct vfio_pci_device {
struct pci_dev *pdev;
void __iomem *barmap[PCI_STD_RESOURCE_END + 1];
+ bool bar_mmap_supported[PCI_STD_RESOURCE_END + 1];
u8 *pci_config_map;
u8 *vconfig;
struct perm_bits *msi_perm;
@@ -88,6 +95,7 @@ struct vfio_pci_device {
int refcnt;
struct eventfd_ctx *err_trigger;
struct eventfd_ctx *req_trigger;
+ struct list_head dummy_resources_list;
};
#define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX)
diff --git a/drivers/vfio/platform/vfio_amba.c b/drivers/vfio/platform/vfio_amba.c
index a66479bd0..31372fbf6 100644
--- a/drivers/vfio/platform/vfio_amba.c
+++ b/drivers/vfio/platform/vfio_amba.c
@@ -68,6 +68,7 @@ static int vfio_amba_probe(struct amba_device *adev, const struct amba_id *id)
vdev->get_resource = get_amba_resource;
vdev->get_irq = get_amba_irq;
vdev->parent_module = THIS_MODULE;
+ vdev->reset_required = false;
ret = vfio_platform_probe_common(vdev, &adev->dev);
if (ret) {
diff --git a/drivers/vfio/platform/vfio_platform.c b/drivers/vfio/platform/vfio_platform.c
index b1cc3a768..6561751a1 100644
--- a/drivers/vfio/platform/vfio_platform.c
+++ b/drivers/vfio/platform/vfio_platform.c
@@ -23,6 +23,10 @@
#define DRIVER_AUTHOR "Antonios Motakis <a.motakis@virtualopensystems.com>"
#define DRIVER_DESC "VFIO for platform devices - User Level meta-driver"
+static bool reset_required = true;
+module_param(reset_required, bool, 0444);
+MODULE_PARM_DESC(reset_required, "override reset requirement (default: 1)");
+
/* probing devices from the linux platform bus */
static struct resource *get_platform_resource(struct vfio_platform_device *vdev,
@@ -66,6 +70,7 @@ static int vfio_platform_probe(struct platform_device *pdev)
vdev->get_resource = get_platform_resource;
vdev->get_irq = get_platform_irq;
vdev->parent_module = THIS_MODULE;
+ vdev->reset_required = reset_required;
ret = vfio_platform_probe_common(vdev, &pdev->dev);
if (ret)
diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
index e65b142d3..1cf2d462b 100644
--- a/drivers/vfio/platform/vfio_platform_common.c
+++ b/drivers/vfio/platform/vfio_platform_common.c
@@ -13,6 +13,7 @@
*/
#include <linux/device.h>
+#include <linux/acpi.h>
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -27,6 +28,8 @@
#define DRIVER_AUTHOR "Antonios Motakis <a.motakis@virtualopensystems.com>"
#define DRIVER_DESC "VFIO platform base module"
+#define VFIO_PLATFORM_IS_ACPI(vdev) ((vdev)->acpihid != NULL)
+
static LIST_HEAD(reset_list);
static DEFINE_MUTEX(driver_lock);
@@ -41,7 +44,7 @@ static vfio_platform_reset_fn_t vfio_platform_lookup_reset(const char *compat,
if (!strcmp(iter->compat, compat) &&
try_module_get(iter->owner)) {
*module = iter->owner;
- reset_fn = iter->reset;
+ reset_fn = iter->of_reset;
break;
}
}
@@ -49,20 +52,91 @@ static vfio_platform_reset_fn_t vfio_platform_lookup_reset(const char *compat,
return reset_fn;
}
-static void vfio_platform_get_reset(struct vfio_platform_device *vdev)
+static int vfio_platform_acpi_probe(struct vfio_platform_device *vdev,
+ struct device *dev)
{
- vdev->reset = vfio_platform_lookup_reset(vdev->compat,
- &vdev->reset_module);
- if (!vdev->reset) {
+ struct acpi_device *adev;
+
+ if (acpi_disabled)
+ return -ENOENT;
+
+ adev = ACPI_COMPANION(dev);
+ if (!adev) {
+ pr_err("VFIO: ACPI companion device not found for %s\n",
+ vdev->name);
+ return -ENODEV;
+ }
+
+#ifdef CONFIG_ACPI
+ vdev->acpihid = acpi_device_hid(adev);
+#endif
+ return WARN_ON(!vdev->acpihid) ? -EINVAL : 0;
+}
+
+int vfio_platform_acpi_call_reset(struct vfio_platform_device *vdev,
+ const char **extra_dbg)
+{
+#ifdef CONFIG_ACPI
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct device *dev = vdev->device;
+ acpi_handle handle = ACPI_HANDLE(dev);
+ acpi_status acpi_ret;
+
+ acpi_ret = acpi_evaluate_object(handle, "_RST", NULL, &buffer);
+ if (ACPI_FAILURE(acpi_ret)) {
+ if (extra_dbg)
+ *extra_dbg = acpi_format_exception(acpi_ret);
+ return -EINVAL;
+ }
+
+ return 0;
+#else
+ return -ENOENT;
+#endif
+}
+
+bool vfio_platform_acpi_has_reset(struct vfio_platform_device *vdev)
+{
+#ifdef CONFIG_ACPI
+ struct device *dev = vdev->device;
+ acpi_handle handle = ACPI_HANDLE(dev);
+
+ return acpi_has_method(handle, "_RST");
+#else
+ return false;
+#endif
+}
+
+static bool vfio_platform_has_reset(struct vfio_platform_device *vdev)
+{
+ if (VFIO_PLATFORM_IS_ACPI(vdev))
+ return vfio_platform_acpi_has_reset(vdev);
+
+ return vdev->of_reset ? true : false;
+}
+
+static int vfio_platform_get_reset(struct vfio_platform_device *vdev)
+{
+ if (VFIO_PLATFORM_IS_ACPI(vdev))
+ return vfio_platform_acpi_has_reset(vdev) ? 0 : -ENOENT;
+
+ vdev->of_reset = vfio_platform_lookup_reset(vdev->compat,
+ &vdev->reset_module);
+ if (!vdev->of_reset) {
request_module("vfio-reset:%s", vdev->compat);
- vdev->reset = vfio_platform_lookup_reset(vdev->compat,
- &vdev->reset_module);
+ vdev->of_reset = vfio_platform_lookup_reset(vdev->compat,
+ &vdev->reset_module);
}
+
+ return vdev->of_reset ? 0 : -ENOENT;
}
static void vfio_platform_put_reset(struct vfio_platform_device *vdev)
{
- if (vdev->reset)
+ if (VFIO_PLATFORM_IS_ACPI(vdev))
+ return;
+
+ if (vdev->of_reset)
module_put(vdev->reset_module);
}
@@ -134,6 +208,21 @@ static void vfio_platform_regions_cleanup(struct vfio_platform_device *vdev)
kfree(vdev->regions);
}
+static int vfio_platform_call_reset(struct vfio_platform_device *vdev,
+ const char **extra_dbg)
+{
+ if (VFIO_PLATFORM_IS_ACPI(vdev)) {
+ dev_info(vdev->device, "reset\n");
+ return vfio_platform_acpi_call_reset(vdev, extra_dbg);
+ } else if (vdev->of_reset) {
+ dev_info(vdev->device, "reset\n");
+ return vdev->of_reset(vdev);
+ }
+
+ dev_warn(vdev->device, "no reset function found!\n");
+ return -EINVAL;
+}
+
static void vfio_platform_release(void *device_data)
{
struct vfio_platform_device *vdev = device_data;
@@ -141,11 +230,14 @@ static void vfio_platform_release(void *device_data)
mutex_lock(&driver_lock);
if (!(--vdev->refcnt)) {
- if (vdev->reset) {
- dev_info(vdev->device, "reset\n");
- vdev->reset(vdev);
- } else {
- dev_warn(vdev->device, "no reset function found!\n");
+ const char *extra_dbg = NULL;
+ int ret;
+
+ ret = vfio_platform_call_reset(vdev, &extra_dbg);
+ if (ret && vdev->reset_required) {
+ dev_warn(vdev->device, "reset driver is required and reset call failed in release (%d) %s\n",
+ ret, extra_dbg ? extra_dbg : "");
+ WARN_ON(1);
}
vfio_platform_regions_cleanup(vdev);
vfio_platform_irq_cleanup(vdev);
@@ -167,6 +259,8 @@ static int vfio_platform_open(void *device_data)
mutex_lock(&driver_lock);
if (!vdev->refcnt) {
+ const char *extra_dbg = NULL;
+
ret = vfio_platform_regions_init(vdev);
if (ret)
goto err_reg;
@@ -175,11 +269,11 @@ static int vfio_platform_open(void *device_data)
if (ret)
goto err_irq;
- if (vdev->reset) {
- dev_info(vdev->device, "reset\n");
- vdev->reset(vdev);
- } else {
- dev_warn(vdev->device, "no reset function found!\n");
+ ret = vfio_platform_call_reset(vdev, &extra_dbg);
+ if (ret && vdev->reset_required) {
+ dev_warn(vdev->device, "reset driver is required and reset call failed in open (%d) %s\n",
+ ret, extra_dbg ? extra_dbg : "");
+ goto err_rst;
}
}
@@ -188,6 +282,8 @@ static int vfio_platform_open(void *device_data)
mutex_unlock(&driver_lock);
return 0;
+err_rst:
+ vfio_platform_irq_cleanup(vdev);
err_irq:
vfio_platform_regions_cleanup(vdev);
err_reg:
@@ -213,7 +309,7 @@ static long vfio_platform_ioctl(void *device_data,
if (info.argsz < minsz)
return -EINVAL;
- if (vdev->reset)
+ if (vfio_platform_has_reset(vdev))
vdev->flags |= VFIO_DEVICE_FLAGS_RESET;
info.flags = vdev->flags;
info.num_regions = vdev->num_regions;
@@ -312,10 +408,7 @@ static long vfio_platform_ioctl(void *device_data,
return ret;
} else if (cmd == VFIO_DEVICE_RESET) {
- if (vdev->reset)
- return vdev->reset(vdev);
- else
- return -EINVAL;
+ return vfio_platform_call_reset(vdev, NULL);
}
return -ENOTTY;
@@ -544,6 +637,37 @@ static const struct vfio_device_ops vfio_platform_ops = {
.mmap = vfio_platform_mmap,
};
+int vfio_platform_of_probe(struct vfio_platform_device *vdev,
+ struct device *dev)
+{
+ int ret;
+
+ ret = device_property_read_string(dev, "compatible",
+ &vdev->compat);
+ if (ret)
+ pr_err("VFIO: cannot retrieve compat for %s\n",
+ vdev->name);
+
+ return ret;
+}
+
+/*
+ * There can be two kernel build combinations. One build where
+ * ACPI is not selected in Kconfig and another one with the ACPI Kconfig.
+ *
+ * In the first case, vfio_platform_acpi_probe will return since
+ * acpi_disabled is 1. DT user will not see any kind of messages from
+ * ACPI.
+ *
+ * In the second case, both DT and ACPI is compiled in but the system is
+ * booting with any of these combinations.
+ *
+ * If the firmware is DT type, then acpi_disabled is 1. The ACPI probe routine
+ * terminates immediately without any messages.
+ *
+ * If the firmware is ACPI type, then acpi_disabled is 0. All other checks are
+ * valid checks. We cannot claim that this system is DT.
+ */
int vfio_platform_probe_common(struct vfio_platform_device *vdev,
struct device *dev)
{
@@ -553,15 +677,23 @@ int vfio_platform_probe_common(struct vfio_platform_device *vdev,
if (!vdev)
return -EINVAL;
- ret = device_property_read_string(dev, "compatible", &vdev->compat);
- if (ret) {
- pr_err("VFIO: cannot retrieve compat for %s\n", vdev->name);
- return -EINVAL;
- }
+ ret = vfio_platform_acpi_probe(vdev, dev);
+ if (ret)
+ ret = vfio_platform_of_probe(vdev, dev);
+
+ if (ret)
+ return ret;
vdev->device = dev;
- group = iommu_group_get(dev);
+ ret = vfio_platform_get_reset(vdev);
+ if (ret && vdev->reset_required) {
+ pr_err("vfio: no reset function found for device %s\n",
+ vdev->name);
+ return ret;
+ }
+
+ group = vfio_iommu_group_get(dev);
if (!group) {
pr_err("VFIO: No IOMMU group for device %s\n", vdev->name);
return -EINVAL;
@@ -569,12 +701,10 @@ int vfio_platform_probe_common(struct vfio_platform_device *vdev,
ret = vfio_add_group_dev(dev, &vfio_platform_ops, vdev);
if (ret) {
- iommu_group_put(group);
+ vfio_iommu_group_put(group, dev);
return ret;
}
- vfio_platform_get_reset(vdev);
-
mutex_init(&vdev->igate);
return 0;
@@ -589,7 +719,7 @@ struct vfio_platform_device *vfio_platform_remove_common(struct device *dev)
if (vdev) {
vfio_platform_put_reset(vdev);
- iommu_group_put(dev->iommu_group);
+ vfio_iommu_group_put(dev->iommu_group, dev);
}
return vdev;
@@ -611,7 +741,7 @@ void vfio_platform_unregister_reset(const char *compat,
mutex_lock(&driver_lock);
list_for_each_entry_safe(iter, temp, &reset_list, link) {
- if (!strcmp(iter->compat, compat) && (iter->reset == fn)) {
+ if (!strcmp(iter->compat, compat) && (iter->of_reset == fn)) {
list_del(&iter->link);
break;
}
diff --git a/drivers/vfio/platform/vfio_platform_private.h b/drivers/vfio/platform/vfio_platform_private.h
index 42816dd28..85ffe5d9d 100644
--- a/drivers/vfio/platform/vfio_platform_private.h
+++ b/drivers/vfio/platform/vfio_platform_private.h
@@ -58,6 +58,7 @@ struct vfio_platform_device {
struct mutex igate;
struct module *parent_module;
const char *compat;
+ const char *acpihid;
struct module *reset_module;
struct device *device;
@@ -71,7 +72,9 @@ struct vfio_platform_device {
struct resource*
(*get_resource)(struct vfio_platform_device *vdev, int i);
int (*get_irq)(struct vfio_platform_device *vdev, int i);
- int (*reset)(struct vfio_platform_device *vdev);
+ int (*of_reset)(struct vfio_platform_device *vdev);
+
+ bool reset_required;
};
typedef int (*vfio_platform_reset_fn_t)(struct vfio_platform_device *vdev);
@@ -80,7 +83,7 @@ struct vfio_platform_reset_node {
struct list_head link;
char *compat;
struct module *owner;
- vfio_platform_reset_fn_t reset;
+ vfio_platform_reset_fn_t of_reset;
};
extern int vfio_platform_probe_common(struct vfio_platform_device *vdev,
@@ -103,7 +106,7 @@ extern void vfio_platform_unregister_reset(const char *compat,
static struct vfio_platform_reset_node __reset ## _node = { \
.owner = THIS_MODULE, \
.compat = __compat, \
- .reset = __reset, \
+ .of_reset = __reset, \
}; \
__vfio_platform_register_reset(&__reset ## _node)
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 6fd6fa546..d1d70e0b0 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -1711,8 +1711,8 @@ EXPORT_SYMBOL_GPL(vfio_group_get_external_user);
void vfio_group_put_external_user(struct vfio_group *group)
{
- vfio_group_put(group);
vfio_group_try_dissolve_container(group);
+ vfio_group_put(group);
}
EXPORT_SYMBOL_GPL(vfio_group_put_external_user);
diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig
index 533eaf04f..40764ecad 100644
--- a/drivers/vhost/Kconfig
+++ b/drivers/vhost/Kconfig
@@ -2,7 +2,6 @@ config VHOST_NET
tristate "Host kernel accelerator for virtio net"
depends on NET && EVENTFD && (TUN || !TUN) && (MACVTAP || !MACVTAP)
select VHOST
- select VHOST_RING
---help---
This kernel module can be loaded in host kernel to accelerate
guest networking with virtio_net. Not to be confused with virtio_net
@@ -15,17 +14,24 @@ config VHOST_SCSI
tristate "VHOST_SCSI TCM fabric driver"
depends on TARGET_CORE && EVENTFD && m
select VHOST
- select VHOST_RING
default n
---help---
Say M here to enable the vhost_scsi TCM fabric module
for use with virtio-scsi guests
-config VHOST_RING
- tristate
+config VHOST_VSOCK
+ tristate "vhost virtio-vsock driver"
+ depends on VSOCKETS && EVENTFD
+ select VIRTIO_VSOCKETS_COMMON
+ select VHOST
+ default n
---help---
- This option is selected by any driver which needs to access
- the host side of a virtio ring.
+ This kernel module can be loaded in the host kernel to provide AF_VSOCK
+ sockets for communicating with guests. The guests must have the
+ virtio_transport.ko driver loaded to use the virtio-vsock device.
+
+ To compile this driver as a module, choose M here: the module will be called
+ vhost_vsock.
config VHOST
tristate
diff --git a/drivers/vhost/Kconfig.vringh b/drivers/vhost/Kconfig.vringh
new file mode 100644
index 000000000..6a4490c09
--- /dev/null
+++ b/drivers/vhost/Kconfig.vringh
@@ -0,0 +1,5 @@
+config VHOST_RING
+ tristate
+ ---help---
+ This option is selected by any driver which needs to access
+ the host side of a virtio ring.
diff --git a/drivers/vhost/Makefile b/drivers/vhost/Makefile
index e0441c34d..6b012b986 100644
--- a/drivers/vhost/Makefile
+++ b/drivers/vhost/Makefile
@@ -4,5 +4,9 @@ vhost_net-y := net.o
obj-$(CONFIG_VHOST_SCSI) += vhost_scsi.o
vhost_scsi-y := scsi.o
+obj-$(CONFIG_VHOST_VSOCK) += vhost_vsock.o
+vhost_vsock-y := vsock.o
+
obj-$(CONFIG_VHOST_RING) += vringh.o
+
obj-$(CONFIG_VHOST) += vhost.o
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index f744eeb3e..5dc128a8d 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -61,7 +61,8 @@ MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"
enum {
VHOST_NET_FEATURES = VHOST_FEATURES |
(1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
- (1ULL << VIRTIO_NET_F_MRG_RXBUF)
+ (1ULL << VIRTIO_NET_F_MRG_RXBUF) |
+ (1ULL << VIRTIO_F_IOMMU_PLATFORM)
};
enum {
@@ -301,6 +302,32 @@ static bool vhost_can_busy_poll(struct vhost_dev *dev,
!vhost_has_work(dev);
}
+static void vhost_net_disable_vq(struct vhost_net *n,
+ struct vhost_virtqueue *vq)
+{
+ struct vhost_net_virtqueue *nvq =
+ container_of(vq, struct vhost_net_virtqueue, vq);
+ struct vhost_poll *poll = n->poll + (nvq - n->vqs);
+ if (!vq->private_data)
+ return;
+ vhost_poll_stop(poll);
+}
+
+static int vhost_net_enable_vq(struct vhost_net *n,
+ struct vhost_virtqueue *vq)
+{
+ struct vhost_net_virtqueue *nvq =
+ container_of(vq, struct vhost_net_virtqueue, vq);
+ struct vhost_poll *poll = n->poll + (nvq - n->vqs);
+ struct socket *sock;
+
+ sock = vq->private_data;
+ if (!sock)
+ return 0;
+
+ return vhost_poll_start(poll, sock->file);
+}
+
static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
struct vhost_virtqueue *vq,
struct iovec iov[], unsigned int iov_size,
@@ -308,7 +335,7 @@ static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
{
unsigned long uninitialized_var(endtime);
int r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
- out_num, in_num, NULL, NULL);
+ out_num, in_num, NULL, NULL);
if (r == vq->num && vq->busyloop_timeout) {
preempt_disable();
@@ -318,7 +345,7 @@ static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
cpu_relax_lowlatency();
preempt_enable();
r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
- out_num, in_num, NULL, NULL);
+ out_num, in_num, NULL, NULL);
}
return r;
@@ -351,6 +378,9 @@ static void handle_tx(struct vhost_net *net)
if (!sock)
goto out;
+ if (!vq_iotlb_prefetch(vq))
+ goto out;
+
vhost_disable_notify(&net->dev, vq);
hdr_size = nvq->vhost_hlen;
@@ -455,10 +485,14 @@ out:
static int peek_head_len(struct sock *sk)
{
+ struct socket *sock = sk->sk_socket;
struct sk_buff *head;
int len = 0;
unsigned long flags;
+ if (sock->ops->peek_len)
+ return sock->ops->peek_len(sock);
+
spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
head = skb_peek(&sk->sk_receive_queue);
if (likely(head)) {
@@ -471,6 +505,16 @@ static int peek_head_len(struct sock *sk)
return len;
}
+static int sk_has_rx_data(struct sock *sk)
+{
+ struct socket *sock = sk->sk_socket;
+
+ if (sock->ops->peek_len)
+ return sock->ops->peek_len(sock);
+
+ return skb_queue_empty(&sk->sk_receive_queue);
+}
+
static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
{
struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
@@ -487,7 +531,7 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
endtime = busy_clock() + vq->busyloop_timeout;
while (vhost_can_busy_poll(&net->dev, endtime) &&
- skb_queue_empty(&sk->sk_receive_queue) &&
+ !sk_has_rx_data(sk) &&
vhost_vq_avail_empty(&net->dev, vq))
cpu_relax_lowlatency();
@@ -612,7 +656,12 @@ static void handle_rx(struct vhost_net *net)
sock = vq->private_data;
if (!sock)
goto out;
+
+ if (!vq_iotlb_prefetch(vq))
+ goto out;
+
vhost_disable_notify(&net->dev, vq);
+ vhost_net_disable_vq(net, vq);
vhost_hlen = nvq->vhost_hlen;
sock_hlen = nvq->sock_hlen;
@@ -629,7 +678,7 @@ static void handle_rx(struct vhost_net *net)
likely(mergeable) ? UIO_MAXIOV : 1);
/* On error, stop handling until the next kick. */
if (unlikely(headcount < 0))
- break;
+ goto out;
/* On overrun, truncate and discard */
if (unlikely(headcount > UIO_MAXIOV)) {
iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1);
@@ -648,7 +697,7 @@ static void handle_rx(struct vhost_net *net)
}
/* Nothing new? Wait for eventfd to tell us
* they refilled. */
- break;
+ goto out;
}
/* We don't need to be notified again. */
iov_iter_init(&msg.msg_iter, READ, vq->iov, in, vhost_len);
@@ -676,7 +725,7 @@ static void handle_rx(struct vhost_net *net)
&fixup) != sizeof(hdr)) {
vq_err(vq, "Unable to write vnet_hdr "
"at addr %p\n", vq->iov->iov_base);
- break;
+ goto out;
}
} else {
/* Header came from socket; we'll need to patch
@@ -692,7 +741,7 @@ static void handle_rx(struct vhost_net *net)
&fixup) != sizeof num_buffers) {
vq_err(vq, "Failed num_buffers write");
vhost_discard_vq_desc(vq, headcount);
- break;
+ goto out;
}
vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
headcount);
@@ -701,9 +750,10 @@ static void handle_rx(struct vhost_net *net)
total_len += vhost_len;
if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
vhost_poll_queue(&vq->poll);
- break;
+ goto out;
}
}
+ vhost_net_enable_vq(net, vq);
out:
mutex_unlock(&vq->mutex);
}
@@ -782,32 +832,6 @@ static int vhost_net_open(struct inode *inode, struct file *f)
return 0;
}
-static void vhost_net_disable_vq(struct vhost_net *n,
- struct vhost_virtqueue *vq)
-{
- struct vhost_net_virtqueue *nvq =
- container_of(vq, struct vhost_net_virtqueue, vq);
- struct vhost_poll *poll = n->poll + (nvq - n->vqs);
- if (!vq->private_data)
- return;
- vhost_poll_stop(poll);
-}
-
-static int vhost_net_enable_vq(struct vhost_net *n,
- struct vhost_virtqueue *vq)
-{
- struct vhost_net_virtqueue *nvq =
- container_of(vq, struct vhost_net_virtqueue, vq);
- struct vhost_poll *poll = n->poll + (nvq - n->vqs);
- struct socket *sock;
-
- sock = vq->private_data;
- if (!sock)
- return 0;
-
- return vhost_poll_start(poll, sock->file);
-}
-
static struct socket *vhost_net_stop_vq(struct vhost_net *n,
struct vhost_virtqueue *vq)
{
@@ -1036,20 +1060,20 @@ static long vhost_net_reset_owner(struct vhost_net *n)
struct socket *tx_sock = NULL;
struct socket *rx_sock = NULL;
long err;
- struct vhost_memory *memory;
+ struct vhost_umem *umem;
mutex_lock(&n->dev.mutex);
err = vhost_dev_check_owner(&n->dev);
if (err)
goto done;
- memory = vhost_dev_reset_owner_prepare();
- if (!memory) {
+ umem = vhost_dev_reset_owner_prepare();
+ if (!umem) {
err = -ENOMEM;
goto done;
}
vhost_net_stop(n, &tx_sock, &rx_sock);
vhost_net_flush(n);
- vhost_dev_reset_owner(&n->dev, memory);
+ vhost_dev_reset_owner(&n->dev, umem);
vhost_net_vq_reset(n);
done:
mutex_unlock(&n->dev.mutex);
@@ -1080,10 +1104,14 @@ static int vhost_net_set_features(struct vhost_net *n, u64 features)
}
mutex_lock(&n->dev.mutex);
if ((features & (1 << VHOST_F_LOG_ALL)) &&
- !vhost_log_access_ok(&n->dev)) {
- mutex_unlock(&n->dev.mutex);
- return -EFAULT;
+ !vhost_log_access_ok(&n->dev))
+ goto out_unlock;
+
+ if ((features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))) {
+ if (vhost_init_device_iotlb(&n->dev, true))
+ goto out_unlock;
}
+
for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
mutex_lock(&n->vqs[i].vq.mutex);
n->vqs[i].vq.acked_features = features;
@@ -1093,6 +1121,10 @@ static int vhost_net_set_features(struct vhost_net *n, u64 features)
}
mutex_unlock(&n->dev.mutex);
return 0;
+
+out_unlock:
+ mutex_unlock(&n->dev.mutex);
+ return -EFAULT;
}
static long vhost_net_set_owner(struct vhost_net *n)
@@ -1166,9 +1198,40 @@ static long vhost_net_compat_ioctl(struct file *f, unsigned int ioctl,
}
#endif
+static ssize_t vhost_net_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+ struct file *file = iocb->ki_filp;
+ struct vhost_net *n = file->private_data;
+ struct vhost_dev *dev = &n->dev;
+ int noblock = file->f_flags & O_NONBLOCK;
+
+ return vhost_chr_read_iter(dev, to, noblock);
+}
+
+static ssize_t vhost_net_chr_write_iter(struct kiocb *iocb,
+ struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct vhost_net *n = file->private_data;
+ struct vhost_dev *dev = &n->dev;
+
+ return vhost_chr_write_iter(dev, from);
+}
+
+static unsigned int vhost_net_chr_poll(struct file *file, poll_table *wait)
+{
+ struct vhost_net *n = file->private_data;
+ struct vhost_dev *dev = &n->dev;
+
+ return vhost_chr_poll(file, dev, wait);
+}
+
static const struct file_operations vhost_net_fops = {
.owner = THIS_MODULE,
.release = vhost_net_release,
+ .read_iter = vhost_net_chr_read_iter,
+ .write_iter = vhost_net_chr_write_iter,
+ .poll = vhost_net_chr_poll,
.unlocked_ioctl = vhost_net_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = vhost_net_compat_ioctl,
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index 388eec4e1..97fb2f8fa 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -220,20 +220,20 @@ static long vhost_test_reset_owner(struct vhost_test *n)
{
void *priv = NULL;
long err;
- struct vhost_memory *memory;
+ struct vhost_umem *umem;
mutex_lock(&n->dev.mutex);
err = vhost_dev_check_owner(&n->dev);
if (err)
goto done;
- memory = vhost_dev_reset_owner_prepare();
- if (!memory) {
+ umem = vhost_dev_reset_owner_prepare();
+ if (!umem) {
err = -ENOMEM;
goto done;
}
vhost_test_stop(n, &priv);
vhost_test_flush(n);
- vhost_dev_reset_owner(&n->dev, memory);
+ vhost_dev_reset_owner(&n->dev, umem);
done:
mutex_unlock(&n->dev.mutex);
return err;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 669fef1e2..c6f2d89c0 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -27,6 +27,7 @@
#include <linux/cgroup.h>
#include <linux/module.h>
#include <linux/sort.h>
+#include <linux/interval_tree_generic.h>
#include "vhost.h"
@@ -34,6 +35,10 @@ static ushort max_mem_regions = 64;
module_param(max_mem_regions, ushort, 0444);
MODULE_PARM_DESC(max_mem_regions,
"Maximum number of memory regions in memory map. (default: 64)");
+static int max_iotlb_entries = 2048;
+module_param(max_iotlb_entries, int, 0444);
+MODULE_PARM_DESC(max_iotlb_entries,
+ "Maximum number of iotlb entries. (default: 2048)");
enum {
VHOST_MEMORY_F_LOG = 0x1,
@@ -42,6 +47,10 @@ enum {
#define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
#define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
+INTERVAL_TREE_DEFINE(struct vhost_umem_node,
+ rb, __u64, __subtree_last,
+ START, LAST, , vhost_umem_interval_tree);
+
#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
{
@@ -131,6 +140,19 @@ static void vhost_reset_is_le(struct vhost_virtqueue *vq)
vq->is_le = virtio_legacy_is_little_endian();
}
+struct vhost_flush_struct {
+ struct vhost_work work;
+ struct completion wait_event;
+};
+
+static void vhost_flush_work(struct vhost_work *work)
+{
+ struct vhost_flush_struct *s;
+
+ s = container_of(work, struct vhost_flush_struct, work);
+ complete(&s->wait_event);
+}
+
static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
poll_table *pt)
{
@@ -155,11 +177,9 @@ static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
{
- INIT_LIST_HEAD(&work->node);
+ clear_bit(VHOST_WORK_QUEUED, &work->flags);
work->fn = fn;
init_waitqueue_head(&work->done);
- work->flushing = 0;
- work->queue_seq = work->done_seq = 0;
}
EXPORT_SYMBOL_GPL(vhost_work_init);
@@ -211,31 +231,17 @@ void vhost_poll_stop(struct vhost_poll *poll)
}
EXPORT_SYMBOL_GPL(vhost_poll_stop);
-static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
- unsigned seq)
-{
- int left;
-
- spin_lock_irq(&dev->work_lock);
- left = seq - work->done_seq;
- spin_unlock_irq(&dev->work_lock);
- return left <= 0;
-}
-
void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
{
- unsigned seq;
- int flushing;
+ struct vhost_flush_struct flush;
- spin_lock_irq(&dev->work_lock);
- seq = work->queue_seq;
- work->flushing++;
- spin_unlock_irq(&dev->work_lock);
- wait_event(work->done, vhost_work_seq_done(dev, work, seq));
- spin_lock_irq(&dev->work_lock);
- flushing = --work->flushing;
- spin_unlock_irq(&dev->work_lock);
- BUG_ON(flushing < 0);
+ if (dev->worker) {
+ init_completion(&flush.wait_event);
+ vhost_work_init(&flush.work, vhost_flush_work);
+
+ vhost_work_queue(dev, &flush.work);
+ wait_for_completion(&flush.wait_event);
+ }
}
EXPORT_SYMBOL_GPL(vhost_work_flush);
@@ -249,16 +255,16 @@ EXPORT_SYMBOL_GPL(vhost_poll_flush);
void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
{
- unsigned long flags;
+ if (!dev->worker)
+ return;
- spin_lock_irqsave(&dev->work_lock, flags);
- if (list_empty(&work->node)) {
- list_add_tail(&work->node, &dev->work_list);
- work->queue_seq++;
- spin_unlock_irqrestore(&dev->work_lock, flags);
+ if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) {
+ /* We can only add the work to the list after we're
+ * sure it was not in the list.
+ */
+ smp_mb();
+ llist_add(&work->node, &dev->work_list);
wake_up_process(dev->worker);
- } else {
- spin_unlock_irqrestore(&dev->work_lock, flags);
}
}
EXPORT_SYMBOL_GPL(vhost_work_queue);
@@ -266,7 +272,7 @@ EXPORT_SYMBOL_GPL(vhost_work_queue);
/* A lockless hint for busy polling code to exit the loop */
bool vhost_has_work(struct vhost_dev *dev)
{
- return !list_empty(&dev->work_list);
+ return !llist_empty(&dev->work_list);
}
EXPORT_SYMBOL_GPL(vhost_has_work);
@@ -300,17 +306,18 @@ static void vhost_vq_reset(struct vhost_dev *dev,
vq->call_ctx = NULL;
vq->call = NULL;
vq->log_ctx = NULL;
- vq->memory = NULL;
vhost_reset_is_le(vq);
vhost_disable_cross_endian(vq);
vq->busyloop_timeout = 0;
+ vq->umem = NULL;
+ vq->iotlb = NULL;
}
static int vhost_worker(void *data)
{
struct vhost_dev *dev = data;
- struct vhost_work *work = NULL;
- unsigned uninitialized_var(seq);
+ struct vhost_work *work, *work_next;
+ struct llist_node *node;
mm_segment_t oldfs = get_fs();
set_fs(USER_DS);
@@ -320,35 +327,25 @@ static int vhost_worker(void *data)
/* mb paired w/ kthread_stop */
set_current_state(TASK_INTERRUPTIBLE);
- spin_lock_irq(&dev->work_lock);
- if (work) {
- work->done_seq = seq;
- if (work->flushing)
- wake_up_all(&work->done);
- }
-
if (kthread_should_stop()) {
- spin_unlock_irq(&dev->work_lock);
__set_current_state(TASK_RUNNING);
break;
}
- if (!list_empty(&dev->work_list)) {
- work = list_first_entry(&dev->work_list,
- struct vhost_work, node);
- list_del_init(&work->node);
- seq = work->queue_seq;
- } else
- work = NULL;
- spin_unlock_irq(&dev->work_lock);
- if (work) {
+ node = llist_del_all(&dev->work_list);
+ if (!node)
+ schedule();
+
+ node = llist_reverse_order(node);
+ /* make sure flag is seen after deletion */
+ smp_wmb();
+ llist_for_each_entry_safe(work, work_next, node, node) {
+ clear_bit(VHOST_WORK_QUEUED, &work->flags);
__set_current_state(TASK_RUNNING);
work->fn(work);
if (need_resched())
schedule();
- } else
- schedule();
-
+ }
}
unuse_mm(dev->mm);
set_fs(oldfs);
@@ -407,11 +404,16 @@ void vhost_dev_init(struct vhost_dev *dev,
mutex_init(&dev->mutex);
dev->log_ctx = NULL;
dev->log_file = NULL;
- dev->memory = NULL;
+ dev->umem = NULL;
+ dev->iotlb = NULL;
dev->mm = NULL;
- spin_lock_init(&dev->work_lock);
- INIT_LIST_HEAD(&dev->work_list);
dev->worker = NULL;
+ init_llist_head(&dev->work_list);
+ init_waitqueue_head(&dev->wait);
+ INIT_LIST_HEAD(&dev->read_list);
+ INIT_LIST_HEAD(&dev->pending_list);
+ spin_lock_init(&dev->iotlb_lock);
+
for (i = 0; i < dev->nvqs; ++i) {
vq = dev->vqs[i];
@@ -512,27 +514,36 @@ err_mm:
}
EXPORT_SYMBOL_GPL(vhost_dev_set_owner);
-struct vhost_memory *vhost_dev_reset_owner_prepare(void)
+static void *vhost_kvzalloc(unsigned long size)
{
- return kmalloc(offsetof(struct vhost_memory, regions), GFP_KERNEL);
+ void *n = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+
+ if (!n)
+ n = vzalloc(size);
+ return n;
+}
+
+struct vhost_umem *vhost_dev_reset_owner_prepare(void)
+{
+ return vhost_kvzalloc(sizeof(struct vhost_umem));
}
EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare);
/* Caller should have device mutex */
-void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_memory *memory)
+void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_umem *umem)
{
int i;
vhost_dev_cleanup(dev, true);
/* Restore memory to default empty mapping. */
- memory->nregions = 0;
- dev->memory = memory;
+ INIT_LIST_HEAD(&umem->umem_list);
+ dev->umem = umem;
/* We don't need VQ locks below since vhost_dev_cleanup makes sure
* VQs aren't running.
*/
for (i = 0; i < dev->nvqs; ++i)
- dev->vqs[i]->memory = memory;
+ dev->vqs[i]->umem = umem;
}
EXPORT_SYMBOL_GPL(vhost_dev_reset_owner);
@@ -549,6 +560,47 @@ void vhost_dev_stop(struct vhost_dev *dev)
}
EXPORT_SYMBOL_GPL(vhost_dev_stop);
+static void vhost_umem_free(struct vhost_umem *umem,
+ struct vhost_umem_node *node)
+{
+ vhost_umem_interval_tree_remove(node, &umem->umem_tree);
+ list_del(&node->link);
+ kfree(node);
+ umem->numem--;
+}
+
+static void vhost_umem_clean(struct vhost_umem *umem)
+{
+ struct vhost_umem_node *node, *tmp;
+
+ if (!umem)
+ return;
+
+ list_for_each_entry_safe(node, tmp, &umem->umem_list, link)
+ vhost_umem_free(umem, node);
+
+ kvfree(umem);
+}
+
+static void vhost_clear_msg(struct vhost_dev *dev)
+{
+ struct vhost_msg_node *node, *n;
+
+ spin_lock(&dev->iotlb_lock);
+
+ list_for_each_entry_safe(node, n, &dev->read_list, node) {
+ list_del(&node->node);
+ kfree(node);
+ }
+
+ list_for_each_entry_safe(node, n, &dev->pending_list, node) {
+ list_del(&node->node);
+ kfree(node);
+ }
+
+ spin_unlock(&dev->iotlb_lock);
+}
+
/* Caller should have device mutex if and only if locked is set */
void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
{
@@ -575,9 +627,13 @@ void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
fput(dev->log_file);
dev->log_file = NULL;
/* No one will access memory at this point */
- kvfree(dev->memory);
- dev->memory = NULL;
- WARN_ON(!list_empty(&dev->work_list));
+ vhost_umem_clean(dev->umem);
+ dev->umem = NULL;
+ vhost_umem_clean(dev->iotlb);
+ dev->iotlb = NULL;
+ vhost_clear_msg(dev);
+ wake_up_interruptible_poll(&dev->wait, POLLIN | POLLRDNORM);
+ WARN_ON(!llist_empty(&dev->work_list));
if (dev->worker) {
kthread_stop(dev->worker);
dev->worker = NULL;
@@ -601,26 +657,34 @@ static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
(sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
}
+static bool vhost_overflow(u64 uaddr, u64 size)
+{
+ /* Make sure 64 bit math will not overflow. */
+ return uaddr > ULONG_MAX || size > ULONG_MAX || uaddr > ULONG_MAX - size;
+}
+
/* Caller should have vq mutex and device mutex. */
-static int vq_memory_access_ok(void __user *log_base, struct vhost_memory *mem,
+static int vq_memory_access_ok(void __user *log_base, struct vhost_umem *umem,
int log_all)
{
- int i;
+ struct vhost_umem_node *node;
- if (!mem)
+ if (!umem)
return 0;
- for (i = 0; i < mem->nregions; ++i) {
- struct vhost_memory_region *m = mem->regions + i;
- unsigned long a = m->userspace_addr;
- if (m->memory_size > ULONG_MAX)
+ list_for_each_entry(node, &umem->umem_list, link) {
+ unsigned long a = node->userspace_addr;
+
+ if (vhost_overflow(node->userspace_addr, node->size))
return 0;
- else if (!access_ok(VERIFY_WRITE, (void __user *)a,
- m->memory_size))
+
+
+ if (!access_ok(VERIFY_WRITE, (void __user *)a,
+ node->size))
return 0;
else if (log_all && !log_access_ok(log_base,
- m->guest_phys_addr,
- m->memory_size))
+ node->start,
+ node->size))
return 0;
}
return 1;
@@ -628,7 +692,7 @@ static int vq_memory_access_ok(void __user *log_base, struct vhost_memory *mem,
/* Can we switch to this memory table? */
/* Caller should have device mutex but not vq mutex */
-static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
+static int memory_access_ok(struct vhost_dev *d, struct vhost_umem *umem,
int log_all)
{
int i;
@@ -641,7 +705,8 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL);
/* If ring is inactive, will check when it's enabled. */
if (d->vqs[i]->private_data)
- ok = vq_memory_access_ok(d->vqs[i]->log_base, mem, log);
+ ok = vq_memory_access_ok(d->vqs[i]->log_base,
+ umem, log);
else
ok = 1;
mutex_unlock(&d->vqs[i]->mutex);
@@ -651,12 +716,385 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
return 1;
}
+static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
+ struct iovec iov[], int iov_size, int access);
+
+static int vhost_copy_to_user(struct vhost_virtqueue *vq, void *to,
+ const void *from, unsigned size)
+{
+ int ret;
+
+ if (!vq->iotlb)
+ return __copy_to_user(to, from, size);
+ else {
+ /* This function should be called after iotlb
+ * prefetch, which means we're sure that all vq
+ * could be access through iotlb. So -EAGAIN should
+ * not happen in this case.
+ */
+ /* TODO: more fast path */
+ struct iov_iter t;
+ ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov,
+ ARRAY_SIZE(vq->iotlb_iov),
+ VHOST_ACCESS_WO);
+ if (ret < 0)
+ goto out;
+ iov_iter_init(&t, WRITE, vq->iotlb_iov, ret, size);
+ ret = copy_to_iter(from, size, &t);
+ if (ret == size)
+ ret = 0;
+ }
+out:
+ return ret;
+}
+
+static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to,
+ void *from, unsigned size)
+{
+ int ret;
+
+ if (!vq->iotlb)
+ return __copy_from_user(to, from, size);
+ else {
+ /* This function should be called after iotlb
+ * prefetch, which means we're sure that vq
+ * could be access through iotlb. So -EAGAIN should
+ * not happen in this case.
+ */
+ /* TODO: more fast path */
+ struct iov_iter f;
+ ret = translate_desc(vq, (u64)(uintptr_t)from, size, vq->iotlb_iov,
+ ARRAY_SIZE(vq->iotlb_iov),
+ VHOST_ACCESS_RO);
+ if (ret < 0) {
+ vq_err(vq, "IOTLB translation failure: uaddr "
+ "%p size 0x%llx\n", from,
+ (unsigned long long) size);
+ goto out;
+ }
+ iov_iter_init(&f, READ, vq->iotlb_iov, ret, size);
+ ret = copy_from_iter(to, size, &f);
+ if (ret == size)
+ ret = 0;
+ }
+
+out:
+ return ret;
+}
+
+static void __user *__vhost_get_user(struct vhost_virtqueue *vq,
+ void *addr, unsigned size)
+{
+ int ret;
+
+ /* This function should be called after iotlb
+ * prefetch, which means we're sure that vq
+ * could be access through iotlb. So -EAGAIN should
+ * not happen in this case.
+ */
+ /* TODO: more fast path */
+ ret = translate_desc(vq, (u64)(uintptr_t)addr, size, vq->iotlb_iov,
+ ARRAY_SIZE(vq->iotlb_iov),
+ VHOST_ACCESS_RO);
+ if (ret < 0) {
+ vq_err(vq, "IOTLB translation failure: uaddr "
+ "%p size 0x%llx\n", addr,
+ (unsigned long long) size);
+ return NULL;
+ }
+
+ if (ret != 1 || vq->iotlb_iov[0].iov_len != size) {
+ vq_err(vq, "Non atomic userspace memory access: uaddr "
+ "%p size 0x%llx\n", addr,
+ (unsigned long long) size);
+ return NULL;
+ }
+
+ return vq->iotlb_iov[0].iov_base;
+}
+
+#define vhost_put_user(vq, x, ptr) \
+({ \
+ int ret = -EFAULT; \
+ if (!vq->iotlb) { \
+ ret = __put_user(x, ptr); \
+ } else { \
+ __typeof__(ptr) to = \
+ (__typeof__(ptr)) __vhost_get_user(vq, ptr, sizeof(*ptr)); \
+ if (to != NULL) \
+ ret = __put_user(x, to); \
+ else \
+ ret = -EFAULT; \
+ } \
+ ret; \
+})
+
+#define vhost_get_user(vq, x, ptr) \
+({ \
+ int ret; \
+ if (!vq->iotlb) { \
+ ret = __get_user(x, ptr); \
+ } else { \
+ __typeof__(ptr) from = \
+ (__typeof__(ptr)) __vhost_get_user(vq, ptr, sizeof(*ptr)); \
+ if (from != NULL) \
+ ret = __get_user(x, from); \
+ else \
+ ret = -EFAULT; \
+ } \
+ ret; \
+})
+
+static void vhost_dev_lock_vqs(struct vhost_dev *d)
+{
+ int i = 0;
+ for (i = 0; i < d->nvqs; ++i)
+ mutex_lock(&d->vqs[i]->mutex);
+}
+
+static void vhost_dev_unlock_vqs(struct vhost_dev *d)
+{
+ int i = 0;
+ for (i = 0; i < d->nvqs; ++i)
+ mutex_unlock(&d->vqs[i]->mutex);
+}
+
+static int vhost_new_umem_range(struct vhost_umem *umem,
+ u64 start, u64 size, u64 end,
+ u64 userspace_addr, int perm)
+{
+ struct vhost_umem_node *tmp, *node = kmalloc(sizeof(*node), GFP_ATOMIC);
+
+ if (!node)
+ return -ENOMEM;
+
+ if (umem->numem == max_iotlb_entries) {
+ tmp = list_first_entry(&umem->umem_list, typeof(*tmp), link);
+ vhost_umem_free(umem, tmp);
+ }
+
+ node->start = start;
+ node->size = size;
+ node->last = end;
+ node->userspace_addr = userspace_addr;
+ node->perm = perm;
+ INIT_LIST_HEAD(&node->link);
+ list_add_tail(&node->link, &umem->umem_list);
+ vhost_umem_interval_tree_insert(node, &umem->umem_tree);
+ umem->numem++;
+
+ return 0;
+}
+
+static void vhost_del_umem_range(struct vhost_umem *umem,
+ u64 start, u64 end)
+{
+ struct vhost_umem_node *node;
+
+ while ((node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
+ start, end)))
+ vhost_umem_free(umem, node);
+}
+
+static void vhost_iotlb_notify_vq(struct vhost_dev *d,
+ struct vhost_iotlb_msg *msg)
+{
+ struct vhost_msg_node *node, *n;
+
+ spin_lock(&d->iotlb_lock);
+
+ list_for_each_entry_safe(node, n, &d->pending_list, node) {
+ struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb;
+ if (msg->iova <= vq_msg->iova &&
+ msg->iova + msg->size - 1 > vq_msg->iova &&
+ vq_msg->type == VHOST_IOTLB_MISS) {
+ vhost_poll_queue(&node->vq->poll);
+ list_del(&node->node);
+ kfree(node);
+ }
+ }
+
+ spin_unlock(&d->iotlb_lock);
+}
+
+static int umem_access_ok(u64 uaddr, u64 size, int access)
+{
+ unsigned long a = uaddr;
+
+ /* Make sure 64 bit math will not overflow. */
+ if (vhost_overflow(uaddr, size))
+ return -EFAULT;
+
+ if ((access & VHOST_ACCESS_RO) &&
+ !access_ok(VERIFY_READ, (void __user *)a, size))
+ return -EFAULT;
+ if ((access & VHOST_ACCESS_WO) &&
+ !access_ok(VERIFY_WRITE, (void __user *)a, size))
+ return -EFAULT;
+ return 0;
+}
+
+int vhost_process_iotlb_msg(struct vhost_dev *dev,
+ struct vhost_iotlb_msg *msg)
+{
+ int ret = 0;
+
+ vhost_dev_lock_vqs(dev);
+ switch (msg->type) {
+ case VHOST_IOTLB_UPDATE:
+ if (!dev->iotlb) {
+ ret = -EFAULT;
+ break;
+ }
+ if (umem_access_ok(msg->uaddr, msg->size, msg->perm)) {
+ ret = -EFAULT;
+ break;
+ }
+ if (vhost_new_umem_range(dev->iotlb, msg->iova, msg->size,
+ msg->iova + msg->size - 1,
+ msg->uaddr, msg->perm)) {
+ ret = -ENOMEM;
+ break;
+ }
+ vhost_iotlb_notify_vq(dev, msg);
+ break;
+ case VHOST_IOTLB_INVALIDATE:
+ vhost_del_umem_range(dev->iotlb, msg->iova,
+ msg->iova + msg->size - 1);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ vhost_dev_unlock_vqs(dev);
+ return ret;
+}
+ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
+ struct iov_iter *from)
+{
+ struct vhost_msg_node node;
+ unsigned size = sizeof(struct vhost_msg);
+ size_t ret;
+ int err;
+
+ if (iov_iter_count(from) < size)
+ return 0;
+ ret = copy_from_iter(&node.msg, size, from);
+ if (ret != size)
+ goto done;
+
+ switch (node.msg.type) {
+ case VHOST_IOTLB_MSG:
+ err = vhost_process_iotlb_msg(dev, &node.msg.iotlb);
+ if (err)
+ ret = err;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+done:
+ return ret;
+}
+EXPORT_SYMBOL(vhost_chr_write_iter);
+
+unsigned int vhost_chr_poll(struct file *file, struct vhost_dev *dev,
+ poll_table *wait)
+{
+ unsigned int mask = 0;
+
+ poll_wait(file, &dev->wait, wait);
+
+ if (!list_empty(&dev->read_list))
+ mask |= POLLIN | POLLRDNORM;
+
+ return mask;
+}
+EXPORT_SYMBOL(vhost_chr_poll);
+
+ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
+ int noblock)
+{
+ DEFINE_WAIT(wait);
+ struct vhost_msg_node *node;
+ ssize_t ret = 0;
+ unsigned size = sizeof(struct vhost_msg);
+
+ if (iov_iter_count(to) < size)
+ return 0;
+
+ while (1) {
+ if (!noblock)
+ prepare_to_wait(&dev->wait, &wait,
+ TASK_INTERRUPTIBLE);
+
+ node = vhost_dequeue_msg(dev, &dev->read_list);
+ if (node)
+ break;
+ if (noblock) {
+ ret = -EAGAIN;
+ break;
+ }
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ if (!dev->iotlb) {
+ ret = -EBADFD;
+ break;
+ }
+
+ schedule();
+ }
+
+ if (!noblock)
+ finish_wait(&dev->wait, &wait);
+
+ if (node) {
+ ret = copy_to_iter(&node->msg, size, to);
+
+ if (ret != size || node->msg.type != VHOST_IOTLB_MISS) {
+ kfree(node);
+ return ret;
+ }
+
+ vhost_enqueue_msg(dev, &dev->pending_list, node);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vhost_chr_read_iter);
+
+static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access)
+{
+ struct vhost_dev *dev = vq->dev;
+ struct vhost_msg_node *node;
+ struct vhost_iotlb_msg *msg;
+
+ node = vhost_new_msg(vq, VHOST_IOTLB_MISS);
+ if (!node)
+ return -ENOMEM;
+
+ msg = &node->msg.iotlb;
+ msg->type = VHOST_IOTLB_MISS;
+ msg->iova = iova;
+ msg->perm = access;
+
+ vhost_enqueue_msg(dev, &dev->read_list, node);
+
+ return 0;
+}
+
static int vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
struct vring_desc __user *desc,
struct vring_avail __user *avail,
struct vring_used __user *used)
+
{
size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
+
return access_ok(VERIFY_READ, desc, num * sizeof *desc) &&
access_ok(VERIFY_READ, avail,
sizeof *avail + num * sizeof *avail->ring + s) &&
@@ -664,11 +1102,59 @@ static int vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
sizeof *used + num * sizeof *used->ring + s);
}
+static int iotlb_access_ok(struct vhost_virtqueue *vq,
+ int access, u64 addr, u64 len)
+{
+ const struct vhost_umem_node *node;
+ struct vhost_umem *umem = vq->iotlb;
+ u64 s = 0, size;
+
+ while (len > s) {
+ node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
+ addr,
+ addr + len - 1);
+ if (node == NULL || node->start > addr) {
+ vhost_iotlb_miss(vq, addr, access);
+ return false;
+ } else if (!(node->perm & access)) {
+ /* Report the possible access violation by
+ * request another translation from userspace.
+ */
+ return false;
+ }
+
+ size = node->size - addr + node->start;
+ s += size;
+ addr += size;
+ }
+
+ return true;
+}
+
+int vq_iotlb_prefetch(struct vhost_virtqueue *vq)
+{
+ size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
+ unsigned int num = vq->num;
+
+ if (!vq->iotlb)
+ return 1;
+
+ return iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->desc,
+ num * sizeof *vq->desc) &&
+ iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->avail,
+ sizeof *vq->avail +
+ num * sizeof *vq->avail->ring + s) &&
+ iotlb_access_ok(vq, VHOST_ACCESS_WO, (u64)(uintptr_t)vq->used,
+ sizeof *vq->used +
+ num * sizeof *vq->used->ring + s);
+}
+EXPORT_SYMBOL_GPL(vq_iotlb_prefetch);
+
/* Can we log writes? */
/* Caller should have device mutex but not vq mutex */
int vhost_log_access_ok(struct vhost_dev *dev)
{
- return memory_access_ok(dev, dev->memory, 1);
+ return memory_access_ok(dev, dev->umem, 1);
}
EXPORT_SYMBOL_GPL(vhost_log_access_ok);
@@ -679,7 +1165,7 @@ static int vq_log_access_ok(struct vhost_virtqueue *vq,
{
size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
- return vq_memory_access_ok(log_base, vq->memory,
+ return vq_memory_access_ok(log_base, vq->umem,
vhost_has_feature(vq, VHOST_F_LOG_ALL)) &&
(!vq->log_used || log_access_ok(log_base, vq->log_addr,
sizeof *vq->used +
@@ -690,33 +1176,36 @@ static int vq_log_access_ok(struct vhost_virtqueue *vq,
/* Caller should have vq mutex and device mutex */
int vhost_vq_access_ok(struct vhost_virtqueue *vq)
{
+ if (vq->iotlb) {
+ /* When device IOTLB was used, the access validation
+ * will be validated during prefetching.
+ */
+ return 1;
+ }
return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used) &&
vq_log_access_ok(vq, vq->log_base);
}
EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
-static int vhost_memory_reg_sort_cmp(const void *p1, const void *p2)
+static struct vhost_umem *vhost_umem_alloc(void)
{
- const struct vhost_memory_region *r1 = p1, *r2 = p2;
- if (r1->guest_phys_addr < r2->guest_phys_addr)
- return 1;
- if (r1->guest_phys_addr > r2->guest_phys_addr)
- return -1;
- return 0;
-}
+ struct vhost_umem *umem = vhost_kvzalloc(sizeof(*umem));
-static void *vhost_kvzalloc(unsigned long size)
-{
- void *n = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+ if (!umem)
+ return NULL;
- if (!n)
- n = vzalloc(size);
- return n;
+ umem->umem_tree = RB_ROOT;
+ umem->numem = 0;
+ INIT_LIST_HEAD(&umem->umem_list);
+
+ return umem;
}
static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
{
- struct vhost_memory mem, *newmem, *oldmem;
+ struct vhost_memory mem, *newmem;
+ struct vhost_memory_region *region;
+ struct vhost_umem *newumem, *oldumem;
unsigned long size = offsetof(struct vhost_memory, regions);
int i;
@@ -736,24 +1225,47 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
kvfree(newmem);
return -EFAULT;
}
- sort(newmem->regions, newmem->nregions, sizeof(*newmem->regions),
- vhost_memory_reg_sort_cmp, NULL);
- if (!memory_access_ok(d, newmem, 0)) {
+ newumem = vhost_umem_alloc();
+ if (!newumem) {
kvfree(newmem);
- return -EFAULT;
+ return -ENOMEM;
}
- oldmem = d->memory;
- d->memory = newmem;
+
+ for (region = newmem->regions;
+ region < newmem->regions + mem.nregions;
+ region++) {
+ if (vhost_new_umem_range(newumem,
+ region->guest_phys_addr,
+ region->memory_size,
+ region->guest_phys_addr +
+ region->memory_size - 1,
+ region->userspace_addr,
+ VHOST_ACCESS_RW))
+ goto err;
+ }
+
+ if (!memory_access_ok(d, newumem, 0))
+ goto err;
+
+ oldumem = d->umem;
+ d->umem = newumem;
/* All memory accesses are done under some VQ mutex. */
for (i = 0; i < d->nvqs; ++i) {
mutex_lock(&d->vqs[i]->mutex);
- d->vqs[i]->memory = newmem;
+ d->vqs[i]->umem = newumem;
mutex_unlock(&d->vqs[i]->mutex);
}
- kvfree(oldmem);
+
+ kvfree(newmem);
+ vhost_umem_clean(oldumem);
return 0;
+
+err:
+ vhost_umem_clean(newumem);
+ kvfree(newmem);
+ return -EFAULT;
}
long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
@@ -974,6 +1486,30 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
}
EXPORT_SYMBOL_GPL(vhost_vring_ioctl);
+int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled)
+{
+ struct vhost_umem *niotlb, *oiotlb;
+ int i;
+
+ niotlb = vhost_umem_alloc();
+ if (!niotlb)
+ return -ENOMEM;
+
+ oiotlb = d->iotlb;
+ d->iotlb = niotlb;
+
+ for (i = 0; i < d->nvqs; ++i) {
+ mutex_lock(&d->vqs[i]->mutex);
+ d->vqs[i]->iotlb = niotlb;
+ mutex_unlock(&d->vqs[i]->mutex);
+ }
+
+ vhost_umem_clean(oiotlb);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vhost_init_device_iotlb);
+
/* Caller must have device mutex */
long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
{
@@ -1056,28 +1592,6 @@ done:
}
EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
-static const struct vhost_memory_region *find_region(struct vhost_memory *mem,
- __u64 addr, __u32 len)
-{
- const struct vhost_memory_region *reg;
- int start = 0, end = mem->nregions;
-
- while (start < end) {
- int slot = start + (end - start) / 2;
- reg = mem->regions + slot;
- if (addr >= reg->guest_phys_addr)
- end = slot;
- else
- start = slot + 1;
- }
-
- reg = mem->regions + start;
- if (addr >= reg->guest_phys_addr &&
- reg->guest_phys_addr + reg->memory_size > addr)
- return reg;
- return NULL;
-}
-
/* TODO: This is really inefficient. We need something like get_user()
* (instruction directly accesses the data, with an exception table entry
* returning -EFAULT). See Documentation/x86/exception-tables.txt.
@@ -1156,7 +1670,8 @@ EXPORT_SYMBOL_GPL(vhost_log_write);
static int vhost_update_used_flags(struct vhost_virtqueue *vq)
{
void __user *used;
- if (__put_user(cpu_to_vhost16(vq, vq->used_flags), &vq->used->flags) < 0)
+ if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags),
+ &vq->used->flags) < 0)
return -EFAULT;
if (unlikely(vq->log_used)) {
/* Make sure the flag is seen before log. */
@@ -1174,7 +1689,8 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq)
static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
{
- if (__put_user(cpu_to_vhost16(vq, vq->avail_idx), vhost_avail_event(vq)))
+ if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
+ vhost_avail_event(vq)))
return -EFAULT;
if (unlikely(vq->log_used)) {
void __user *used;
@@ -1208,15 +1724,20 @@ int vhost_vq_init_access(struct vhost_virtqueue *vq)
if (r)
goto err;
vq->signalled_used_valid = false;
- if (!access_ok(VERIFY_READ, &vq->used->idx, sizeof vq->used->idx)) {
+ if (!vq->iotlb &&
+ !access_ok(VERIFY_READ, &vq->used->idx, sizeof vq->used->idx)) {
r = -EFAULT;
goto err;
}
- r = __get_user(last_used_idx, &vq->used->idx);
- if (r)
+ r = vhost_get_user(vq, last_used_idx, &vq->used->idx);
+ if (r) {
+ vq_err(vq, "Can't access used idx at %p\n",
+ &vq->used->idx);
goto err;
+ }
vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx);
return 0;
+
err:
vq->is_le = is_le;
return r;
@@ -1224,36 +1745,48 @@ err:
EXPORT_SYMBOL_GPL(vhost_vq_init_access);
static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
- struct iovec iov[], int iov_size)
+ struct iovec iov[], int iov_size, int access)
{
- const struct vhost_memory_region *reg;
- struct vhost_memory *mem;
+ const struct vhost_umem_node *node;
+ struct vhost_dev *dev = vq->dev;
+ struct vhost_umem *umem = dev->iotlb ? dev->iotlb : dev->umem;
struct iovec *_iov;
u64 s = 0;
int ret = 0;
- mem = vq->memory;
while ((u64)len > s) {
u64 size;
if (unlikely(ret >= iov_size)) {
ret = -ENOBUFS;
break;
}
- reg = find_region(mem, addr, len);
- if (unlikely(!reg)) {
- ret = -EFAULT;
+
+ node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
+ addr, addr + len - 1);
+ if (node == NULL || node->start > addr) {
+ if (umem != dev->iotlb) {
+ ret = -EFAULT;
+ break;
+ }
+ ret = -EAGAIN;
+ break;
+ } else if (!(node->perm & access)) {
+ ret = -EPERM;
break;
}
+
_iov = iov + ret;
- size = reg->memory_size - addr + reg->guest_phys_addr;
+ size = node->size - addr + node->start;
_iov->iov_len = min((u64)len - s, size);
_iov->iov_base = (void __user *)(unsigned long)
- (reg->userspace_addr + addr - reg->guest_phys_addr);
+ (node->userspace_addr + addr - node->start);
s += size;
addr += size;
++ret;
}
+ if (ret == -EAGAIN)
+ vhost_iotlb_miss(vq, addr, access);
return ret;
}
@@ -1288,7 +1821,7 @@ static int get_indirect(struct vhost_virtqueue *vq,
unsigned int i = 0, count, found = 0;
u32 len = vhost32_to_cpu(vq, indirect->len);
struct iov_iter from;
- int ret;
+ int ret, access;
/* Sanity check */
if (unlikely(len % sizeof desc)) {
@@ -1300,9 +1833,10 @@ static int get_indirect(struct vhost_virtqueue *vq,
}
ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect,
- UIO_MAXIOV);
+ UIO_MAXIOV, VHOST_ACCESS_RO);
if (unlikely(ret < 0)) {
- vq_err(vq, "Translation failure %d in indirect.\n", ret);
+ if (ret != -EAGAIN)
+ vq_err(vq, "Translation failure %d in indirect.\n", ret);
return ret;
}
iov_iter_init(&from, READ, vq->indirect, ret, len);
@@ -1340,16 +1874,22 @@ static int get_indirect(struct vhost_virtqueue *vq,
return -EINVAL;
}
+ if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
+ access = VHOST_ACCESS_WO;
+ else
+ access = VHOST_ACCESS_RO;
+
ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
vhost32_to_cpu(vq, desc.len), iov + iov_count,
- iov_size - iov_count);
+ iov_size - iov_count, access);
if (unlikely(ret < 0)) {
- vq_err(vq, "Translation failure %d indirect idx %d\n",
- ret, i);
+ if (ret != -EAGAIN)
+ vq_err(vq, "Translation failure %d indirect idx %d\n",
+ ret, i);
return ret;
}
/* If this is an input descriptor, increment that count. */
- if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) {
+ if (access == VHOST_ACCESS_WO) {
*in_num += ret;
if (unlikely(log)) {
log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
@@ -1388,11 +1928,11 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
u16 last_avail_idx;
__virtio16 avail_idx;
__virtio16 ring_head;
- int ret;
+ int ret, access;
/* Check it isn't doing very strange things with descriptor numbers. */
last_avail_idx = vq->last_avail_idx;
- if (unlikely(__get_user(avail_idx, &vq->avail->idx))) {
+ if (unlikely(vhost_get_user(vq, avail_idx, &vq->avail->idx))) {
vq_err(vq, "Failed to access avail idx at %p\n",
&vq->avail->idx);
return -EFAULT;
@@ -1414,8 +1954,8 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
/* Grab the next descriptor number they're advertising, and increment
* the index we've seen. */
- if (unlikely(__get_user(ring_head,
- &vq->avail->ring[last_avail_idx & (vq->num - 1)]))) {
+ if (unlikely(vhost_get_user(vq, ring_head,
+ &vq->avail->ring[last_avail_idx & (vq->num - 1)]))) {
vq_err(vq, "Failed to read head: idx %d address %p\n",
last_avail_idx,
&vq->avail->ring[last_avail_idx % vq->num]);
@@ -1450,7 +1990,8 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
i, vq->num, head);
return -EINVAL;
}
- ret = __copy_from_user(&desc, vq->desc + i, sizeof desc);
+ ret = vhost_copy_from_user(vq, &desc, vq->desc + i,
+ sizeof desc);
if (unlikely(ret)) {
vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
i, vq->desc + i);
@@ -1461,22 +2002,28 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
out_num, in_num,
log, log_num, &desc);
if (unlikely(ret < 0)) {
- vq_err(vq, "Failure detected "
- "in indirect descriptor at idx %d\n", i);
+ if (ret != -EAGAIN)
+ vq_err(vq, "Failure detected "
+ "in indirect descriptor at idx %d\n", i);
return ret;
}
continue;
}
+ if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
+ access = VHOST_ACCESS_WO;
+ else
+ access = VHOST_ACCESS_RO;
ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
vhost32_to_cpu(vq, desc.len), iov + iov_count,
- iov_size - iov_count);
+ iov_size - iov_count, access);
if (unlikely(ret < 0)) {
- vq_err(vq, "Translation failure %d descriptor idx %d\n",
- ret, i);
+ if (ret != -EAGAIN)
+ vq_err(vq, "Translation failure %d descriptor idx %d\n",
+ ret, i);
return ret;
}
- if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) {
+ if (access == VHOST_ACCESS_WO) {
/* If this is an input descriptor,
* increment that count. */
*in_num += ret;
@@ -1538,15 +2085,15 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
start = vq->last_used_idx & (vq->num - 1);
used = vq->used->ring + start;
if (count == 1) {
- if (__put_user(heads[0].id, &used->id)) {
+ if (vhost_put_user(vq, heads[0].id, &used->id)) {
vq_err(vq, "Failed to write used id");
return -EFAULT;
}
- if (__put_user(heads[0].len, &used->len)) {
+ if (vhost_put_user(vq, heads[0].len, &used->len)) {
vq_err(vq, "Failed to write used len");
return -EFAULT;
}
- } else if (__copy_to_user(used, heads, count * sizeof *used)) {
+ } else if (vhost_copy_to_user(vq, used, heads, count * sizeof *used)) {
vq_err(vq, "Failed to write used");
return -EFAULT;
}
@@ -1590,7 +2137,8 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
/* Make sure buffer is written before we update index. */
smp_wmb();
- if (__put_user(cpu_to_vhost16(vq, vq->last_used_idx), &vq->used->idx)) {
+ if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx),
+ &vq->used->idx)) {
vq_err(vq, "Failed to increment used idx");
return -EFAULT;
}
@@ -1622,7 +2170,7 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
__virtio16 flags;
- if (__get_user(flags, &vq->avail->flags)) {
+ if (vhost_get_user(vq, flags, &vq->avail->flags)) {
vq_err(vq, "Failed to get flags");
return true;
}
@@ -1636,7 +2184,7 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
if (unlikely(!v))
return true;
- if (__get_user(event, vhost_used_event(vq))) {
+ if (vhost_get_user(vq, event, vhost_used_event(vq))) {
vq_err(vq, "Failed to get used event idx");
return true;
}
@@ -1678,7 +2226,7 @@ bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
__virtio16 avail_idx;
int r;
- r = __get_user(avail_idx, &vq->avail->idx);
+ r = vhost_get_user(vq, avail_idx, &vq->avail->idx);
if (r)
return false;
@@ -1713,7 +2261,7 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
/* They could have slipped one in as we were doing that: make
* sure it's written, then check again. */
smp_mb();
- r = __get_user(avail_idx, &vq->avail->idx);
+ r = vhost_get_user(vq, avail_idx, &vq->avail->idx);
if (r) {
vq_err(vq, "Failed to check avail idx at %p: %d\n",
&vq->avail->idx, r);
@@ -1741,6 +2289,47 @@ void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
}
EXPORT_SYMBOL_GPL(vhost_disable_notify);
+/* Create a new message. */
+struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type)
+{
+ struct vhost_msg_node *node = kmalloc(sizeof *node, GFP_KERNEL);
+ if (!node)
+ return NULL;
+ node->vq = vq;
+ node->msg.type = type;
+ return node;
+}
+EXPORT_SYMBOL_GPL(vhost_new_msg);
+
+void vhost_enqueue_msg(struct vhost_dev *dev, struct list_head *head,
+ struct vhost_msg_node *node)
+{
+ spin_lock(&dev->iotlb_lock);
+ list_add_tail(&node->node, head);
+ spin_unlock(&dev->iotlb_lock);
+
+ wake_up_interruptible_poll(&dev->wait, POLLIN | POLLRDNORM);
+}
+EXPORT_SYMBOL_GPL(vhost_enqueue_msg);
+
+struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
+ struct list_head *head)
+{
+ struct vhost_msg_node *node = NULL;
+
+ spin_lock(&dev->iotlb_lock);
+ if (!list_empty(head)) {
+ node = list_first_entry(head, struct vhost_msg_node,
+ node);
+ list_del(&node->node);
+ }
+ spin_unlock(&dev->iotlb_lock);
+
+ return node;
+}
+EXPORT_SYMBOL_GPL(vhost_dequeue_msg);
+
+
static int __init vhost_init(void)
{
return 0;
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index d36d8beb3..78f3c5fc0 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -15,13 +15,15 @@
struct vhost_work;
typedef void (*vhost_work_fn_t)(struct vhost_work *work);
+#define VHOST_WORK_QUEUED 1
struct vhost_work {
- struct list_head node;
+ struct llist_node node;
vhost_work_fn_t fn;
wait_queue_head_t done;
int flushing;
unsigned queue_seq;
unsigned done_seq;
+ unsigned long flags;
};
/* Poll a file (eventfd or socket) */
@@ -53,6 +55,27 @@ struct vhost_log {
u64 len;
};
+#define START(node) ((node)->start)
+#define LAST(node) ((node)->last)
+
+struct vhost_umem_node {
+ struct rb_node rb;
+ struct list_head link;
+ __u64 start;
+ __u64 last;
+ __u64 size;
+ __u64 userspace_addr;
+ __u32 perm;
+ __u32 flags_padding;
+ __u64 __subtree_last;
+};
+
+struct vhost_umem {
+ struct rb_root umem_tree;
+ struct list_head umem_list;
+ int numem;
+};
+
/* The virtqueue structure describes a queue attached to a device. */
struct vhost_virtqueue {
struct vhost_dev *dev;
@@ -98,10 +121,12 @@ struct vhost_virtqueue {
u64 log_addr;
struct iovec iov[UIO_MAXIOV];
+ struct iovec iotlb_iov[64];
struct iovec *indirect;
struct vring_used_elem *heads;
/* Protected by virtqueue mutex. */
- struct vhost_memory *memory;
+ struct vhost_umem *umem;
+ struct vhost_umem *iotlb;
void *private_data;
u64 acked_features;
/* Log write descriptors */
@@ -118,25 +143,35 @@ struct vhost_virtqueue {
u32 busyloop_timeout;
};
+struct vhost_msg_node {
+ struct vhost_msg msg;
+ struct vhost_virtqueue *vq;
+ struct list_head node;
+};
+
struct vhost_dev {
- struct vhost_memory *memory;
struct mm_struct *mm;
struct mutex mutex;
struct vhost_virtqueue **vqs;
int nvqs;
struct file *log_file;
struct eventfd_ctx *log_ctx;
- spinlock_t work_lock;
- struct list_head work_list;
+ struct llist_head work_list;
struct task_struct *worker;
+ struct vhost_umem *umem;
+ struct vhost_umem *iotlb;
+ spinlock_t iotlb_lock;
+ struct list_head read_list;
+ struct list_head pending_list;
+ wait_queue_head_t wait;
};
void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs);
long vhost_dev_set_owner(struct vhost_dev *dev);
bool vhost_dev_has_owner(struct vhost_dev *dev);
long vhost_dev_check_owner(struct vhost_dev *);
-struct vhost_memory *vhost_dev_reset_owner_prepare(void);
-void vhost_dev_reset_owner(struct vhost_dev *, struct vhost_memory *);
+struct vhost_umem *vhost_dev_reset_owner_prepare(void);
+void vhost_dev_reset_owner(struct vhost_dev *, struct vhost_umem *);
void vhost_dev_cleanup(struct vhost_dev *, bool locked);
void vhost_dev_stop(struct vhost_dev *);
long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
@@ -165,6 +200,21 @@ bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
unsigned int log_num, u64 len);
+int vq_iotlb_prefetch(struct vhost_virtqueue *vq);
+
+struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type);
+void vhost_enqueue_msg(struct vhost_dev *dev,
+ struct list_head *head,
+ struct vhost_msg_node *node);
+struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
+ struct list_head *head);
+unsigned int vhost_chr_poll(struct file *file, struct vhost_dev *dev,
+ poll_table *wait);
+ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
+ int noblock);
+ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
+ struct iov_iter *from);
+int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled);
#define vq_err(vq, fmt, ...) do { \
pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
new file mode 100644
index 000000000..e3b30ea9e
--- /dev/null
+++ b/drivers/vhost/vsock.c
@@ -0,0 +1,723 @@
+/*
+ * vhost transport for vsock
+ *
+ * Copyright (C) 2013-2015 Red Hat, Inc.
+ * Author: Asias He <asias@redhat.com>
+ * Stefan Hajnoczi <stefanha@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ */
+#include <linux/miscdevice.h>
+#include <linux/atomic.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/vmalloc.h>
+#include <net/sock.h>
+#include <linux/virtio_vsock.h>
+#include <linux/vhost.h>
+
+#include <net/af_vsock.h>
+#include "vhost.h"
+
+#define VHOST_VSOCK_DEFAULT_HOST_CID 2
+
+enum {
+ VHOST_VSOCK_FEATURES = VHOST_FEATURES,
+};
+
+/* Used to track all the vhost_vsock instances on the system. */
+static DEFINE_SPINLOCK(vhost_vsock_lock);
+static LIST_HEAD(vhost_vsock_list);
+
+struct vhost_vsock {
+ struct vhost_dev dev;
+ struct vhost_virtqueue vqs[2];
+
+ /* Link to global vhost_vsock_list, protected by vhost_vsock_lock */
+ struct list_head list;
+
+ struct vhost_work send_pkt_work;
+ spinlock_t send_pkt_list_lock;
+ struct list_head send_pkt_list; /* host->guest pending packets */
+
+ atomic_t queued_replies;
+
+ u32 guest_cid;
+};
+
+static u32 vhost_transport_get_local_cid(void)
+{
+ return VHOST_VSOCK_DEFAULT_HOST_CID;
+}
+
+static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
+{
+ struct vhost_vsock *vsock;
+
+ spin_lock_bh(&vhost_vsock_lock);
+ list_for_each_entry(vsock, &vhost_vsock_list, list) {
+ u32 other_cid = vsock->guest_cid;
+
+ /* Skip instances that have no CID yet */
+ if (other_cid == 0)
+ continue;
+
+ if (other_cid == guest_cid) {
+ spin_unlock_bh(&vhost_vsock_lock);
+ return vsock;
+ }
+ }
+ spin_unlock_bh(&vhost_vsock_lock);
+
+ return NULL;
+}
+
+static void
+vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
+ struct vhost_virtqueue *vq)
+{
+ struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
+ bool added = false;
+ bool restart_tx = false;
+
+ mutex_lock(&vq->mutex);
+
+ if (!vq->private_data)
+ goto out;
+
+ /* Avoid further vmexits, we're already processing the virtqueue */
+ vhost_disable_notify(&vsock->dev, vq);
+
+ for (;;) {
+ struct virtio_vsock_pkt *pkt;
+ struct iov_iter iov_iter;
+ unsigned out, in;
+ size_t nbytes;
+ size_t len;
+ int head;
+
+ spin_lock_bh(&vsock->send_pkt_list_lock);
+ if (list_empty(&vsock->send_pkt_list)) {
+ spin_unlock_bh(&vsock->send_pkt_list_lock);
+ vhost_enable_notify(&vsock->dev, vq);
+ break;
+ }
+
+ pkt = list_first_entry(&vsock->send_pkt_list,
+ struct virtio_vsock_pkt, list);
+ list_del_init(&pkt->list);
+ spin_unlock_bh(&vsock->send_pkt_list_lock);
+
+ head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
+ &out, &in, NULL, NULL);
+ if (head < 0) {
+ spin_lock_bh(&vsock->send_pkt_list_lock);
+ list_add(&pkt->list, &vsock->send_pkt_list);
+ spin_unlock_bh(&vsock->send_pkt_list_lock);
+ break;
+ }
+
+ if (head == vq->num) {
+ spin_lock_bh(&vsock->send_pkt_list_lock);
+ list_add(&pkt->list, &vsock->send_pkt_list);
+ spin_unlock_bh(&vsock->send_pkt_list_lock);
+
+ /* We cannot finish yet if more buffers snuck in while
+ * re-enabling notify.
+ */
+ if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
+ vhost_disable_notify(&vsock->dev, vq);
+ continue;
+ }
+ break;
+ }
+
+ if (out) {
+ virtio_transport_free_pkt(pkt);
+ vq_err(vq, "Expected 0 output buffers, got %u\n", out);
+ break;
+ }
+
+ len = iov_length(&vq->iov[out], in);
+ iov_iter_init(&iov_iter, READ, &vq->iov[out], in, len);
+
+ nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
+ if (nbytes != sizeof(pkt->hdr)) {
+ virtio_transport_free_pkt(pkt);
+ vq_err(vq, "Faulted on copying pkt hdr\n");
+ break;
+ }
+
+ nbytes = copy_to_iter(pkt->buf, pkt->len, &iov_iter);
+ if (nbytes != pkt->len) {
+ virtio_transport_free_pkt(pkt);
+ vq_err(vq, "Faulted on copying pkt buf\n");
+ break;
+ }
+
+ vhost_add_used(vq, head, sizeof(pkt->hdr) + pkt->len);
+ added = true;
+
+ if (pkt->reply) {
+ int val;
+
+ val = atomic_dec_return(&vsock->queued_replies);
+
+ /* Do we have resources to resume tx processing? */
+ if (val + 1 == tx_vq->num)
+ restart_tx = true;
+ }
+
+ virtio_transport_free_pkt(pkt);
+ }
+ if (added)
+ vhost_signal(&vsock->dev, vq);
+
+out:
+ mutex_unlock(&vq->mutex);
+
+ if (restart_tx)
+ vhost_poll_queue(&tx_vq->poll);
+}
+
+static void vhost_transport_send_pkt_work(struct vhost_work *work)
+{
+ struct vhost_virtqueue *vq;
+ struct vhost_vsock *vsock;
+
+ vsock = container_of(work, struct vhost_vsock, send_pkt_work);
+ vq = &vsock->vqs[VSOCK_VQ_RX];
+
+ vhost_transport_do_send_pkt(vsock, vq);
+}
+
+static int
+vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
+{
+ struct vhost_vsock *vsock;
+ struct vhost_virtqueue *vq;
+ int len = pkt->len;
+
+ /* Find the vhost_vsock according to guest context id */
+ vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
+ if (!vsock) {
+ virtio_transport_free_pkt(pkt);
+ return -ENODEV;
+ }
+
+ vq = &vsock->vqs[VSOCK_VQ_RX];
+
+ if (pkt->reply)
+ atomic_inc(&vsock->queued_replies);
+
+ spin_lock_bh(&vsock->send_pkt_list_lock);
+ list_add_tail(&pkt->list, &vsock->send_pkt_list);
+ spin_unlock_bh(&vsock->send_pkt_list_lock);
+
+ vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
+ return len;
+}
+
+static struct virtio_vsock_pkt *
+vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
+ unsigned int out, unsigned int in)
+{
+ struct virtio_vsock_pkt *pkt;
+ struct iov_iter iov_iter;
+ size_t nbytes;
+ size_t len;
+
+ if (in != 0) {
+ vq_err(vq, "Expected 0 input buffers, got %u\n", in);
+ return NULL;
+ }
+
+ pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
+ if (!pkt)
+ return NULL;
+
+ len = iov_length(vq->iov, out);
+ iov_iter_init(&iov_iter, WRITE, vq->iov, out, len);
+
+ nbytes = copy_from_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
+ if (nbytes != sizeof(pkt->hdr)) {
+ vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n",
+ sizeof(pkt->hdr), nbytes);
+ kfree(pkt);
+ return NULL;
+ }
+
+ if (le16_to_cpu(pkt->hdr.type) == VIRTIO_VSOCK_TYPE_STREAM)
+ pkt->len = le32_to_cpu(pkt->hdr.len);
+
+ /* No payload */
+ if (!pkt->len)
+ return pkt;
+
+ /* The pkt is too big */
+ if (pkt->len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) {
+ kfree(pkt);
+ return NULL;
+ }
+
+ pkt->buf = kmalloc(pkt->len, GFP_KERNEL);
+ if (!pkt->buf) {
+ kfree(pkt);
+ return NULL;
+ }
+
+ nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter);
+ if (nbytes != pkt->len) {
+ vq_err(vq, "Expected %u byte payload, got %zu bytes\n",
+ pkt->len, nbytes);
+ virtio_transport_free_pkt(pkt);
+ return NULL;
+ }
+
+ return pkt;
+}
+
+/* Is there space left for replies to rx packets? */
+static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
+{
+ struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX];
+ int val;
+
+ smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
+ val = atomic_read(&vsock->queued_replies);
+
+ return val < vq->num;
+}
+
+static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
+{
+ struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
+ poll.work);
+ struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
+ dev);
+ struct virtio_vsock_pkt *pkt;
+ int head;
+ unsigned int out, in;
+ bool added = false;
+
+ mutex_lock(&vq->mutex);
+
+ if (!vq->private_data)
+ goto out;
+
+ vhost_disable_notify(&vsock->dev, vq);
+ for (;;) {
+ u32 len;
+
+ if (!vhost_vsock_more_replies(vsock)) {
+ /* Stop tx until the device processes already
+ * pending replies. Leave tx virtqueue
+ * callbacks disabled.
+ */
+ goto no_more_replies;
+ }
+
+ head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
+ &out, &in, NULL, NULL);
+ if (head < 0)
+ break;
+
+ if (head == vq->num) {
+ if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
+ vhost_disable_notify(&vsock->dev, vq);
+ continue;
+ }
+ break;
+ }
+
+ pkt = vhost_vsock_alloc_pkt(vq, out, in);
+ if (!pkt) {
+ vq_err(vq, "Faulted on pkt\n");
+ continue;
+ }
+
+ len = pkt->len;
+
+ /* Only accept correctly addressed packets */
+ if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid)
+ virtio_transport_recv_pkt(pkt);
+ else
+ virtio_transport_free_pkt(pkt);
+
+ vhost_add_used(vq, head, sizeof(pkt->hdr) + len);
+ added = true;
+ }
+
+no_more_replies:
+ if (added)
+ vhost_signal(&vsock->dev, vq);
+
+out:
+ mutex_unlock(&vq->mutex);
+}
+
+static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
+{
+ struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
+ poll.work);
+ struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
+ dev);
+
+ vhost_transport_do_send_pkt(vsock, vq);
+}
+
+static int vhost_vsock_start(struct vhost_vsock *vsock)
+{
+ size_t i;
+ int ret;
+
+ mutex_lock(&vsock->dev.mutex);
+
+ ret = vhost_dev_check_owner(&vsock->dev);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
+ struct vhost_virtqueue *vq = &vsock->vqs[i];
+
+ mutex_lock(&vq->mutex);
+
+ if (!vhost_vq_access_ok(vq)) {
+ ret = -EFAULT;
+ mutex_unlock(&vq->mutex);
+ goto err_vq;
+ }
+
+ if (!vq->private_data) {
+ vq->private_data = vsock;
+ vhost_vq_init_access(vq);
+ }
+
+ mutex_unlock(&vq->mutex);
+ }
+
+ mutex_unlock(&vsock->dev.mutex);
+ return 0;
+
+err_vq:
+ for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
+ struct vhost_virtqueue *vq = &vsock->vqs[i];
+
+ mutex_lock(&vq->mutex);
+ vq->private_data = NULL;
+ mutex_unlock(&vq->mutex);
+ }
+err:
+ mutex_unlock(&vsock->dev.mutex);
+ return ret;
+}
+
+static int vhost_vsock_stop(struct vhost_vsock *vsock)
+{
+ size_t i;
+ int ret;
+
+ mutex_lock(&vsock->dev.mutex);
+
+ ret = vhost_dev_check_owner(&vsock->dev);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
+ struct vhost_virtqueue *vq = &vsock->vqs[i];
+
+ mutex_lock(&vq->mutex);
+ vq->private_data = NULL;
+ mutex_unlock(&vq->mutex);
+ }
+
+err:
+ mutex_unlock(&vsock->dev.mutex);
+ return ret;
+}
+
+static void vhost_vsock_free(struct vhost_vsock *vsock)
+{
+ kvfree(vsock);
+}
+
+static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
+{
+ struct vhost_virtqueue **vqs;
+ struct vhost_vsock *vsock;
+ int ret;
+
+ /* This struct is large and allocation could fail, fall back to vmalloc
+ * if there is no other way.
+ */
+ vsock = kzalloc(sizeof(*vsock), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+ if (!vsock) {
+ vsock = vmalloc(sizeof(*vsock));
+ if (!vsock)
+ return -ENOMEM;
+ }
+
+ vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL);
+ if (!vqs) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ atomic_set(&vsock->queued_replies, 0);
+
+ vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX];
+ vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX];
+ vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
+ vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
+
+ vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs));
+
+ file->private_data = vsock;
+ spin_lock_init(&vsock->send_pkt_list_lock);
+ INIT_LIST_HEAD(&vsock->send_pkt_list);
+ vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
+
+ spin_lock_bh(&vhost_vsock_lock);
+ list_add_tail(&vsock->list, &vhost_vsock_list);
+ spin_unlock_bh(&vhost_vsock_lock);
+ return 0;
+
+out:
+ vhost_vsock_free(vsock);
+ return ret;
+}
+
+static void vhost_vsock_flush(struct vhost_vsock *vsock)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++)
+ if (vsock->vqs[i].handle_kick)
+ vhost_poll_flush(&vsock->vqs[i].poll);
+ vhost_work_flush(&vsock->dev, &vsock->send_pkt_work);
+}
+
+static void vhost_vsock_reset_orphans(struct sock *sk)
+{
+ struct vsock_sock *vsk = vsock_sk(sk);
+
+ /* vmci_transport.c doesn't take sk_lock here either. At least we're
+ * under vsock_table_lock so the sock cannot disappear while we're
+ * executing.
+ */
+
+ if (!vhost_vsock_get(vsk->local_addr.svm_cid)) {
+ sock_set_flag(sk, SOCK_DONE);
+ vsk->peer_shutdown = SHUTDOWN_MASK;
+ sk->sk_state = SS_UNCONNECTED;
+ sk->sk_err = ECONNRESET;
+ sk->sk_error_report(sk);
+ }
+}
+
+static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
+{
+ struct vhost_vsock *vsock = file->private_data;
+
+ spin_lock_bh(&vhost_vsock_lock);
+ list_del(&vsock->list);
+ spin_unlock_bh(&vhost_vsock_lock);
+
+ /* Iterating over all connections for all CIDs to find orphans is
+ * inefficient. Room for improvement here. */
+ vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
+
+ vhost_vsock_stop(vsock);
+ vhost_vsock_flush(vsock);
+ vhost_dev_stop(&vsock->dev);
+
+ spin_lock_bh(&vsock->send_pkt_list_lock);
+ while (!list_empty(&vsock->send_pkt_list)) {
+ struct virtio_vsock_pkt *pkt;
+
+ pkt = list_first_entry(&vsock->send_pkt_list,
+ struct virtio_vsock_pkt, list);
+ list_del_init(&pkt->list);
+ virtio_transport_free_pkt(pkt);
+ }
+ spin_unlock_bh(&vsock->send_pkt_list_lock);
+
+ vhost_dev_cleanup(&vsock->dev, false);
+ kfree(vsock->dev.vqs);
+ vhost_vsock_free(vsock);
+ return 0;
+}
+
+static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
+{
+ struct vhost_vsock *other;
+
+ /* Refuse reserved CIDs */
+ if (guest_cid <= VMADDR_CID_HOST ||
+ guest_cid == U32_MAX)
+ return -EINVAL;
+
+ /* 64-bit CIDs are not yet supported */
+ if (guest_cid > U32_MAX)
+ return -EINVAL;
+
+ /* Refuse if CID is already in use */
+ other = vhost_vsock_get(guest_cid);
+ if (other && other != vsock)
+ return -EADDRINUSE;
+
+ spin_lock_bh(&vhost_vsock_lock);
+ vsock->guest_cid = guest_cid;
+ spin_unlock_bh(&vhost_vsock_lock);
+
+ return 0;
+}
+
+static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
+{
+ struct vhost_virtqueue *vq;
+ int i;
+
+ if (features & ~VHOST_VSOCK_FEATURES)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&vsock->dev.mutex);
+ if ((features & (1 << VHOST_F_LOG_ALL)) &&
+ !vhost_log_access_ok(&vsock->dev)) {
+ mutex_unlock(&vsock->dev.mutex);
+ return -EFAULT;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
+ vq = &vsock->vqs[i];
+ mutex_lock(&vq->mutex);
+ vq->acked_features = features;
+ mutex_unlock(&vq->mutex);
+ }
+ mutex_unlock(&vsock->dev.mutex);
+ return 0;
+}
+
+static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
+ unsigned long arg)
+{
+ struct vhost_vsock *vsock = f->private_data;
+ void __user *argp = (void __user *)arg;
+ u64 guest_cid;
+ u64 features;
+ int start;
+ int r;
+
+ switch (ioctl) {
+ case VHOST_VSOCK_SET_GUEST_CID:
+ if (copy_from_user(&guest_cid, argp, sizeof(guest_cid)))
+ return -EFAULT;
+ return vhost_vsock_set_cid(vsock, guest_cid);
+ case VHOST_VSOCK_SET_RUNNING:
+ if (copy_from_user(&start, argp, sizeof(start)))
+ return -EFAULT;
+ if (start)
+ return vhost_vsock_start(vsock);
+ else
+ return vhost_vsock_stop(vsock);
+ case VHOST_GET_FEATURES:
+ features = VHOST_VSOCK_FEATURES;
+ if (copy_to_user(argp, &features, sizeof(features)))
+ return -EFAULT;
+ return 0;
+ case VHOST_SET_FEATURES:
+ if (copy_from_user(&features, argp, sizeof(features)))
+ return -EFAULT;
+ return vhost_vsock_set_features(vsock, features);
+ default:
+ mutex_lock(&vsock->dev.mutex);
+ r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
+ if (r == -ENOIOCTLCMD)
+ r = vhost_vring_ioctl(&vsock->dev, ioctl, argp);
+ else
+ vhost_vsock_flush(vsock);
+ mutex_unlock(&vsock->dev.mutex);
+ return r;
+ }
+}
+
+static const struct file_operations vhost_vsock_fops = {
+ .owner = THIS_MODULE,
+ .open = vhost_vsock_dev_open,
+ .release = vhost_vsock_dev_release,
+ .llseek = noop_llseek,
+ .unlocked_ioctl = vhost_vsock_dev_ioctl,
+};
+
+static struct miscdevice vhost_vsock_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "vhost-vsock",
+ .fops = &vhost_vsock_fops,
+};
+
+static struct virtio_transport vhost_transport = {
+ .transport = {
+ .get_local_cid = vhost_transport_get_local_cid,
+
+ .init = virtio_transport_do_socket_init,
+ .destruct = virtio_transport_destruct,
+ .release = virtio_transport_release,
+ .connect = virtio_transport_connect,
+ .shutdown = virtio_transport_shutdown,
+
+ .dgram_enqueue = virtio_transport_dgram_enqueue,
+ .dgram_dequeue = virtio_transport_dgram_dequeue,
+ .dgram_bind = virtio_transport_dgram_bind,
+ .dgram_allow = virtio_transport_dgram_allow,
+
+ .stream_enqueue = virtio_transport_stream_enqueue,
+ .stream_dequeue = virtio_transport_stream_dequeue,
+ .stream_has_data = virtio_transport_stream_has_data,
+ .stream_has_space = virtio_transport_stream_has_space,
+ .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
+ .stream_is_active = virtio_transport_stream_is_active,
+ .stream_allow = virtio_transport_stream_allow,
+
+ .notify_poll_in = virtio_transport_notify_poll_in,
+ .notify_poll_out = virtio_transport_notify_poll_out,
+ .notify_recv_init = virtio_transport_notify_recv_init,
+ .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
+ .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
+ .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
+ .notify_send_init = virtio_transport_notify_send_init,
+ .notify_send_pre_block = virtio_transport_notify_send_pre_block,
+ .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
+ .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
+
+ .set_buffer_size = virtio_transport_set_buffer_size,
+ .set_min_buffer_size = virtio_transport_set_min_buffer_size,
+ .set_max_buffer_size = virtio_transport_set_max_buffer_size,
+ .get_buffer_size = virtio_transport_get_buffer_size,
+ .get_min_buffer_size = virtio_transport_get_min_buffer_size,
+ .get_max_buffer_size = virtio_transport_get_max_buffer_size,
+ },
+
+ .send_pkt = vhost_transport_send_pkt,
+};
+
+static int __init vhost_vsock_init(void)
+{
+ int ret;
+
+ ret = vsock_core_init(&vhost_transport.transport);
+ if (ret < 0)
+ return ret;
+ return misc_register(&vhost_vsock_misc);
+};
+
+static void __exit vhost_vsock_exit(void)
+{
+ misc_deregister(&vhost_vsock_misc);
+ vsock_core_exit();
+};
+
+module_init(vhost_vsock_init);
+module_exit(vhost_vsock_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Asias He");
+MODULE_DESCRIPTION("vhost transport for vsock ");
diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c
index e5b14f526..939f05783 100644
--- a/drivers/video/backlight/lp855x_bl.c
+++ b/drivers/video/backlight/lp855x_bl.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/backlight.h>
+#include <linux/delay.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/platform_data/lp855x.h>
@@ -74,6 +75,7 @@ struct lp855x {
struct lp855x_platform_data *pdata;
struct pwm_device *pwm;
struct regulator *supply; /* regulator for VDD input */
+ struct regulator *enable; /* regulator for EN/VDDIO input */
};
static int lp855x_write_byte(struct lp855x *lp, u8 reg, u8 data)
@@ -433,6 +435,19 @@ static int lp855x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
lp->supply = NULL;
}
+ lp->enable = devm_regulator_get_optional(lp->dev, "enable");
+ if (IS_ERR(lp->enable)) {
+ ret = PTR_ERR(lp->enable);
+ if (ret == -ENODEV) {
+ lp->enable = NULL;
+ } else {
+ if (ret != -EPROBE_DEFER)
+ dev_err(lp->dev, "error getting enable regulator: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
if (lp->supply) {
ret = regulator_enable(lp->supply);
if (ret < 0) {
@@ -441,6 +456,20 @@ static int lp855x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
}
}
+ if (lp->enable) {
+ ret = regulator_enable(lp->enable);
+ if (ret < 0) {
+ dev_err(lp->dev, "failed to enable vddio: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * LP8555 datasheet says t_RESPONSE (time between VDDIO and
+ * I2C) is 1ms.
+ */
+ usleep_range(1000, 2000);
+ }
+
i2c_set_clientdata(cl, lp);
ret = lp855x_configure(lp);
diff --git a/drivers/video/console/dummycon.c b/drivers/video/console/dummycon.c
index 0efc52f11..9269d5685 100644
--- a/drivers/video/console/dummycon.c
+++ b/drivers/video/console/dummycon.c
@@ -64,14 +64,11 @@ const struct consw dummy_con = {
.con_putcs = DUMMY,
.con_cursor = DUMMY,
.con_scroll = DUMMY,
- .con_bmove = DUMMY,
.con_switch = DUMMY,
.con_blank = DUMMY,
.con_font_set = DUMMY,
.con_font_get = DUMMY,
.con_font_default = DUMMY,
.con_font_copy = DUMMY,
- .con_set_palette = DUMMY,
- .con_scrolldelta = DUMMY,
};
EXPORT_SYMBOL_GPL(dummy_con);
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index afd3301ac..b87f5cfda 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -170,8 +170,7 @@ static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx,
int height, int width);
static int fbcon_switch(struct vc_data *vc);
static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch);
-static int fbcon_set_palette(struct vc_data *vc, const unsigned char *table);
-static int fbcon_scrolldelta(struct vc_data *vc, int lines);
+static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table);
/*
* Internal routines
@@ -381,7 +380,7 @@ static void fb_flashcursor(struct work_struct *work)
if (ops && ops->currcon != -1)
vc = vc_cons[ops->currcon].d;
- if (!vc || !CON_IS_VISIBLE(vc) ||
+ if (!vc || !con_is_visible(vc) ||
registered_fb[con2fb_map[vc->vc_num]] != info ||
vc->vc_deccm != 1) {
console_unlock();
@@ -619,7 +618,7 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info,
erase,
vc->vc_size_row * logo_lines);
- if (CON_IS_VISIBLE(vc) && vc->vc_mode == KD_TEXT) {
+ if (con_is_visible(vc) && vc->vc_mode == KD_TEXT) {
fbcon_clear_margins(vc, 0);
update_screen(vc);
}
@@ -1113,7 +1112,7 @@ static void fbcon_init(struct vc_data *vc, int init)
*
* We need to do it in fbcon_init() to prevent screen corruption.
*/
- if (CON_IS_VISIBLE(vc) && vc->vc_mode == KD_TEXT) {
+ if (con_is_visible(vc) && vc->vc_mode == KD_TEXT) {
if (info->fbops->fb_set_par &&
!(ops->flags & FBCON_FLAGS_INIT)) {
ret = info->fbops->fb_set_par(info);
@@ -1193,7 +1192,7 @@ static void fbcon_deinit(struct vc_data *vc)
if (!ops)
goto finished;
- if (CON_IS_VISIBLE(vc))
+ if (con_is_visible(vc))
fbcon_del_cursor_timer(info);
ops->flags &= ~FBCON_FLAGS_INIT;
@@ -1398,7 +1397,7 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
rows /= vc->vc_font.height;
vc_resize(vc, cols, rows);
- if (CON_IS_VISIBLE(vc)) {
+ if (con_is_visible(vc)) {
update_screen(vc);
if (softback_buf)
fbcon_update_softback(vc);
@@ -2146,7 +2145,7 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
return -EINVAL;
DPRINTK("resize now %ix%i\n", var.xres, var.yres);
- if (CON_IS_VISIBLE(vc)) {
+ if (con_is_visible(vc)) {
var.activate = FB_ACTIVATE_NOW |
FB_ACTIVATE_FORCE;
fb_set_var(info, &var);
@@ -2449,7 +2448,7 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
int cnt;
char *old_data = NULL;
- if (CON_IS_VISIBLE(vc) && softback_lines)
+ if (con_is_visible(vc) && softback_lines)
fbcon_set_origin(vc);
resize = (w != vc->vc_font.width) || (h != vc->vc_font.height);
@@ -2530,9 +2529,9 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
cols /= w;
rows /= h;
vc_resize(vc, cols, rows);
- if (CON_IS_VISIBLE(vc) && softback_buf)
+ if (con_is_visible(vc) && softback_buf)
fbcon_update_softback(vc);
- } else if (CON_IS_VISIBLE(vc)
+ } else if (con_is_visible(vc)
&& vc->vc_mode == KD_TEXT) {
fbcon_clear_margins(vc, 0);
update_screen(vc);
@@ -2652,17 +2651,17 @@ static struct fb_cmap palette_cmap = {
0, 16, palette_red, palette_green, palette_blue, NULL
};
-static int fbcon_set_palette(struct vc_data *vc, const unsigned char *table)
+static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table)
{
struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
int i, j, k, depth;
u8 val;
if (fbcon_is_inactive(vc, info))
- return -EINVAL;
+ return;
- if (!CON_IS_VISIBLE(vc))
- return 0;
+ if (!con_is_visible(vc))
+ return;
depth = fb_get_color_depth(&info->var, &info->fix);
if (depth > 3) {
@@ -2684,7 +2683,7 @@ static int fbcon_set_palette(struct vc_data *vc, const unsigned char *table)
} else
fb_copy_cmap(fb_default_cmap(1 << depth), &palette_cmap);
- return fb_set_cmap(&palette_cmap, info);
+ fb_set_cmap(&palette_cmap, info);
}
static u16 *fbcon_screen_pos(struct vc_data *vc, int offset)
@@ -2765,7 +2764,7 @@ static void fbcon_invert_region(struct vc_data *vc, u16 * p, int cnt)
}
}
-static int fbcon_scrolldelta(struct vc_data *vc, int lines)
+static void fbcon_scrolldelta(struct vc_data *vc, int lines)
{
struct fb_info *info = registered_fb[con2fb_map[fg_console]];
struct fbcon_ops *ops = info->fbcon_par;
@@ -2774,9 +2773,9 @@ static int fbcon_scrolldelta(struct vc_data *vc, int lines)
if (softback_top) {
if (vc->vc_num != fg_console)
- return 0;
+ return;
if (vc->vc_mode != KD_TEXT || !lines)
- return 0;
+ return;
if (logo_shown >= 0) {
struct vc_data *conp2 = vc_cons[logo_shown].d;
@@ -2809,11 +2808,11 @@ static int fbcon_scrolldelta(struct vc_data *vc, int lines)
fbcon_cursor(vc, CM_ERASE | CM_SOFTBACK);
fbcon_redraw_softback(vc, disp, lines);
fbcon_cursor(vc, CM_DRAW | CM_SOFTBACK);
- return 0;
+ return;
}
if (!scrollback_phys_max)
- return -ENOSYS;
+ return;
scrollback_old = scrollback_current;
scrollback_current -= lines;
@@ -2822,10 +2821,10 @@ static int fbcon_scrolldelta(struct vc_data *vc, int lines)
else if (scrollback_current > scrollback_max)
scrollback_current = scrollback_max;
if (scrollback_current == scrollback_old)
- return 0;
+ return;
if (fbcon_is_inactive(vc, info))
- return 0;
+ return;
fbcon_cursor(vc, CM_ERASE);
@@ -2852,7 +2851,6 @@ static int fbcon_scrolldelta(struct vc_data *vc, int lines)
if (!scrollback_current)
fbcon_cursor(vc, CM_DRAW);
- return 0;
}
static int fbcon_set_origin(struct vc_data *vc)
@@ -2904,7 +2902,7 @@ static void fbcon_modechanged(struct fb_info *info)
p = &fb_display[vc->vc_num];
set_blitting_type(vc, info);
- if (CON_IS_VISIBLE(vc)) {
+ if (con_is_visible(vc)) {
var_to_display(p, &info->var, info);
cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
@@ -2943,7 +2941,7 @@ static void fbcon_set_all_vcs(struct fb_info *info)
registered_fb[con2fb_map[i]] != info)
continue;
- if (CON_IS_VISIBLE(vc)) {
+ if (con_is_visible(vc)) {
fg = i;
continue;
}
@@ -3182,7 +3180,7 @@ static void fbcon_fb_blanked(struct fb_info *info, int blank)
registered_fb[con2fb_map[ops->currcon]] != info)
return;
- if (CON_IS_VISIBLE(vc)) {
+ if (con_is_visible(vc)) {
if (blank)
do_blank_screen(0);
else
@@ -3336,7 +3334,6 @@ static const struct consw fb_con = {
.con_putcs = fbcon_putcs,
.con_cursor = fbcon_cursor,
.con_scroll = fbcon_scroll,
- .con_bmove = fbcon_bmove,
.con_switch = fbcon_switch,
.con_blank = fbcon_blank,
.con_font_set = fbcon_set_font,
diff --git a/drivers/video/console/mdacon.c b/drivers/video/console/mdacon.c
index 8edc06253..bacbb044d 100644
--- a/drivers/video/console/mdacon.c
+++ b/drivers/video/console/mdacon.c
@@ -444,48 +444,11 @@ static void mdacon_clear(struct vc_data *c, int y, int x,
}
}
-static void mdacon_bmove(struct vc_data *c, int sy, int sx,
- int dy, int dx, int height, int width)
-{
- u16 *src, *dest;
-
- if (width <= 0 || height <= 0)
- return;
-
- if (sx==0 && dx==0 && width==mda_num_columns) {
- scr_memmovew(MDA_ADDR(0,dy), MDA_ADDR(0,sy), height*width*2);
-
- } else if (dy < sy || (dy == sy && dx < sx)) {
- src = MDA_ADDR(sx, sy);
- dest = MDA_ADDR(dx, dy);
-
- for (; height > 0; height--) {
- scr_memmovew(dest, src, width*2);
- src += mda_num_columns;
- dest += mda_num_columns;
- }
- } else {
- src = MDA_ADDR(sx, sy+height-1);
- dest = MDA_ADDR(dx, dy+height-1);
-
- for (; height > 0; height--) {
- scr_memmovew(dest, src, width*2);
- src -= mda_num_columns;
- dest -= mda_num_columns;
- }
- }
-}
-
static int mdacon_switch(struct vc_data *c)
{
return 1; /* redrawing needed */
}
-static int mdacon_set_palette(struct vc_data *c, const unsigned char *table)
-{
- return -EINVAL;
-}
-
static int mdacon_blank(struct vc_data *c, int blank, int mode_switch)
{
if (mda_type == TYPE_MDA) {
@@ -505,11 +468,6 @@ static int mdacon_blank(struct vc_data *c, int blank, int mode_switch)
}
}
-static int mdacon_scrolldelta(struct vc_data *c, int lines)
-{
- return 0;
-}
-
static void mdacon_cursor(struct vc_data *c, int mode)
{
if (mode == CM_ERASE) {
@@ -574,11 +532,8 @@ static const struct consw mda_con = {
.con_putcs = mdacon_putcs,
.con_cursor = mdacon_cursor,
.con_scroll = mdacon_scroll,
- .con_bmove = mdacon_bmove,
.con_switch = mdacon_switch,
.con_blank = mdacon_blank,
- .con_set_palette = mdacon_set_palette,
- .con_scrolldelta = mdacon_scrolldelta,
.con_build_attr = mdacon_build_attr,
.con_invert_region = mdacon_invert_region,
};
diff --git a/drivers/video/console/newport_con.c b/drivers/video/console/newport_con.c
index 0553dfe68..e3b9521e4 100644
--- a/drivers/video/console/newport_con.c
+++ b/drivers/video/console/newport_con.c
@@ -574,17 +574,6 @@ static int newport_font_set(struct vc_data *vc, struct console_font *font, unsig
return newport_set_font(vc->vc_num, font);
}
-static int newport_set_palette(struct vc_data *vc, const unsigned char *table)
-{
- return -EINVAL;
-}
-
-static int newport_scrolldelta(struct vc_data *vc, int lines)
-{
- /* there is (nearly) no off-screen memory, so we can't scroll back */
- return 0;
-}
-
static int newport_scroll(struct vc_data *vc, int t, int b, int dir,
int lines)
{
@@ -684,34 +673,6 @@ static int newport_scroll(struct vc_data *vc, int t, int b, int dir,
return 1;
}
-static void newport_bmove(struct vc_data *vc, int sy, int sx, int dy,
- int dx, int h, int w)
-{
- short xs, ys, xe, ye, xoffs, yoffs;
-
- xs = sx << 3;
- xe = ((sx + w) << 3) - 1;
- /*
- * as bmove is only used to move stuff around in the same line
- * (h == 1), we don't care about wrap arounds caused by topscan != 0
- */
- ys = ((sy << 4) + topscan) & 0x3ff;
- ye = (((sy + h) << 4) - 1 + topscan) & 0x3ff;
- xoffs = (dx - sx) << 3;
- yoffs = (dy - sy) << 4;
- if (xoffs > 0) {
- /* move to the right, exchange starting points */
- swap(xe, xs);
- }
- newport_wait(npregs);
- npregs->set.drawmode0 = (NPORT_DMODE0_S2S | NPORT_DMODE0_BLOCK |
- NPORT_DMODE0_DOSETUP | NPORT_DMODE0_STOPX
- | NPORT_DMODE0_STOPY);
- npregs->set.xystarti = (xs << 16) | ys;
- npregs->set.xyendi = (xe << 16) | ye;
- npregs->go.xymove = (xoffs << 16) | yoffs;
-}
-
static int newport_dummy(struct vc_data *c)
{
return 0;
@@ -729,13 +690,10 @@ const struct consw newport_con = {
.con_putcs = newport_putcs,
.con_cursor = newport_cursor,
.con_scroll = newport_scroll,
- .con_bmove = newport_bmove,
.con_switch = newport_switch,
.con_blank = newport_blank,
.con_font_set = newport_font_set,
.con_font_default = newport_font_default,
- .con_set_palette = newport_set_palette,
- .con_scrolldelta = newport_scrolldelta,
.con_set_origin = DUMMY,
.con_save_screen = DUMMY
};
diff --git a/drivers/video/console/sticon.c b/drivers/video/console/sticon.c
index e440c2d9f..3a10ac195 100644
--- a/drivers/video/console/sticon.c
+++ b/drivers/video/console/sticon.c
@@ -79,11 +79,6 @@ static const char *sticon_startup(void)
return "STI console";
}
-static int sticon_set_palette(struct vc_data *c, const unsigned char *table)
-{
- return -EINVAL;
-}
-
static void sticon_putc(struct vc_data *conp, int c, int ypos, int xpos)
{
int redraw_cursor = 0;
@@ -182,22 +177,6 @@ static int sticon_scroll(struct vc_data *conp, int t, int b, int dir, int count)
return 0;
}
-static void sticon_bmove(struct vc_data *conp, int sy, int sx,
- int dy, int dx, int height, int width)
-{
- if (!width || !height)
- return;
-#if 0
- if (((sy <= p->cursor_y) && (p->cursor_y < sy+height) &&
- (sx <= p->cursor_x) && (p->cursor_x < sx+width)) ||
- ((dy <= p->cursor_y) && (p->cursor_y < dy+height) &&
- (dx <= p->cursor_x) && (p->cursor_x < dx+width)))
- sticon_cursor(p, CM_ERASE /*|CM_SOFTBACK*/);
-#endif
-
- sti_bmove(sticon_sti, sy, sx, dy, dx, height, width);
-}
-
static void sticon_init(struct vc_data *c, int init)
{
struct sti_struct *sti = sticon_sti;
@@ -256,11 +235,6 @@ static int sticon_blank(struct vc_data *c, int blank, int mode_switch)
return 1;
}
-static int sticon_scrolldelta(struct vc_data *conp, int lines)
-{
- return 0;
-}
-
static u16 *sticon_screen_pos(struct vc_data *conp, int offset)
{
int line;
@@ -355,11 +329,8 @@ static const struct consw sti_con = {
.con_putcs = sticon_putcs,
.con_cursor = sticon_cursor,
.con_scroll = sticon_scroll,
- .con_bmove = sticon_bmove,
.con_switch = sticon_switch,
.con_blank = sticon_blank,
- .con_set_palette = sticon_set_palette,
- .con_scrolldelta = sticon_scrolldelta,
.con_set_origin = sticon_set_origin,
.con_save_screen = sticon_save_screen,
.con_build_attr = sticon_build_attr,
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 8bf911002..11576611a 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -80,7 +80,7 @@ static void vgacon_deinit(struct vc_data *c);
static void vgacon_cursor(struct vc_data *c, int mode);
static int vgacon_switch(struct vc_data *c);
static int vgacon_blank(struct vc_data *c, int blank, int mode_switch);
-static int vgacon_scrolldelta(struct vc_data *c, int lines);
+static void vgacon_scrolldelta(struct vc_data *c, int lines);
static int vgacon_set_origin(struct vc_data *c);
static void vgacon_save_screen(struct vc_data *c);
static int vgacon_scroll(struct vc_data *c, int t, int b, int dir,
@@ -248,18 +248,18 @@ static void vgacon_restore_screen(struct vc_data *c)
}
}
-static int vgacon_scrolldelta(struct vc_data *c, int lines)
+static void vgacon_scrolldelta(struct vc_data *c, int lines)
{
int start, end, count, soff;
if (!lines) {
c->vc_visible_origin = c->vc_origin;
vga_set_mem_top(c);
- return 1;
+ return;
}
if (!vgacon_scrollback)
- return 1;
+ return;
if (!vgacon_scrollback_save) {
vgacon_cursor(c, CM_ERASE);
@@ -320,8 +320,6 @@ static int vgacon_scrolldelta(struct vc_data *c, int lines)
scr_memcpyw(d, s, diff * c->vc_size_row);
} else
vgacon_cursor(c, CM_MOVE);
-
- return 1;
}
#else
#define vgacon_scrollback_startup(...) do { } while (0)
@@ -334,7 +332,7 @@ static void vgacon_restore_screen(struct vc_data *c)
vgacon_scrolldelta(c, 0);
}
-static int vgacon_scrolldelta(struct vc_data *c, int lines)
+static void vgacon_scrolldelta(struct vc_data *c, int lines)
{
if (!lines) /* Turn scrollback off */
c->vc_visible_origin = c->vc_origin;
@@ -362,7 +360,6 @@ static int vgacon_scrolldelta(struct vc_data *c, int lines)
c->vc_visible_origin = vga_vram_base + (p + ul) % we;
}
vga_set_mem_top(c);
- return 1;
}
#endif /* CONFIG_VGACON_SOFT_SCROLLBACK */
@@ -592,7 +589,7 @@ static void vgacon_init(struct vc_data *c, int init)
static void vgacon_deinit(struct vc_data *c)
{
/* When closing the active console, reset video origin */
- if (CON_IS_VISIBLE(c)) {
+ if (con_is_visible(c)) {
c->vc_visible_origin = vga_vram_base;
vga_set_mem_top(c);
}
@@ -859,16 +856,13 @@ static void vga_set_palette(struct vc_data *vc, const unsigned char *table)
}
}
-static int vgacon_set_palette(struct vc_data *vc, const unsigned char *table)
+static void vgacon_set_palette(struct vc_data *vc, const unsigned char *table)
{
#ifdef CAN_LOAD_PALETTE
if (vga_video_type != VIDEO_TYPE_VGAC || vga_palette_blanked
- || !CON_IS_VISIBLE(vc))
- return -EINVAL;
+ || !con_is_visible(vc))
+ return;
vga_set_palette(vc, table);
- return 0;
-#else
- return -EINVAL;
#endif
}
@@ -1254,7 +1248,7 @@ static int vgacon_adjust_height(struct vc_data *vc, unsigned fontheight)
struct vc_data *c = vc_cons[i].d;
if (c && c->vc_sw == &vga_con) {
- if (CON_IS_VISIBLE(c)) {
+ if (con_is_visible(c)) {
/* void size to cause regs to be rewritten */
cursor_size_lastfrom = 0;
cursor_size_lastto = 0;
@@ -1318,7 +1312,7 @@ static int vgacon_resize(struct vc_data *c, unsigned int width,
return success */
return (user) ? 0 : -EINVAL;
- if (CON_IS_VISIBLE(c) && !vga_is_gfx) /* who knows */
+ if (con_is_visible(c) && !vga_is_gfx) /* who knows */
vgacon_doresize(c, width, height);
return 0;
}
@@ -1427,7 +1421,6 @@ const struct consw vga_con = {
.con_putcs = DUMMY,
.con_cursor = vgacon_cursor,
.con_scroll = vgacon_scroll,
- .con_bmove = DUMMY,
.con_switch = vgacon_switch,
.con_blank = vgacon_blank,
.con_font_set = vgacon_font_set,
diff --git a/drivers/video/fbdev/bfin_adv7393fb.c b/drivers/video/fbdev/bfin_adv7393fb.c
index 8fe41caac..e2d7d039c 100644
--- a/drivers/video/fbdev/bfin_adv7393fb.c
+++ b/drivers/video/fbdev/bfin_adv7393fb.c
@@ -10,6 +10,8 @@
* TODO: Code Cleanup
*/
+#define DRIVER_NAME "bfin-adv7393"
+
#define pr_fmt(fmt) DRIVER_NAME ": " fmt
#include <linux/module.h>
diff --git a/drivers/video/fbdev/bfin_adv7393fb.h b/drivers/video/fbdev/bfin_adv7393fb.h
index cd591b515..afd0380e1 100644
--- a/drivers/video/fbdev/bfin_adv7393fb.h
+++ b/drivers/video/fbdev/bfin_adv7393fb.h
@@ -59,8 +59,6 @@ enum {
BLANK_OFF,
};
-#define DRIVER_NAME "bfin-adv7393"
-
struct adv7393fb_modes {
const s8 name[25]; /* Full name */
u16 xres; /* Active Horizonzal Pixels */
diff --git a/drivers/video/fbdev/clps711x-fb.c b/drivers/video/fbdev/clps711x-fb.c
index 649b32f78..ff561073e 100644
--- a/drivers/video/fbdev/clps711x-fb.c
+++ b/drivers/video/fbdev/clps711x-fb.c
@@ -273,7 +273,7 @@ static int clps711x_fb_probe(struct platform_device *pdev)
}
cfb->syscon =
- syscon_regmap_lookup_by_compatible("cirrus,clps711x-syscon1");
+ syscon_regmap_lookup_by_compatible("cirrus,ep7209-syscon1");
if (IS_ERR(cfb->syscon)) {
ret = PTR_ERR(cfb->syscon);
goto out_fb_release;
@@ -376,7 +376,7 @@ static int clps711x_fb_remove(struct platform_device *pdev)
}
static const struct of_device_id clps711x_fb_dt_ids[] = {
- { .compatible = "cirrus,clps711x-fb", },
+ { .compatible = "cirrus,ep7209-fb", },
{ }
};
MODULE_DEVICE_TABLE(of, clps711x_fb_dt_ids);
diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c
index 47c3191ec..62c0cf796 100644
--- a/drivers/video/fbdev/core/fbmon.c
+++ b/drivers/video/fbdev/core/fbmon.c
@@ -1496,7 +1496,6 @@ int fb_parse_edid(unsigned char *edid, struct fb_var_screeninfo *var)
}
void fb_edid_to_monspecs(unsigned char *edid, struct fb_monspecs *specs)
{
- specs = NULL;
}
void fb_edid_add_monspecs(unsigned char *edid, struct fb_monspecs *specs)
{
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c b/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c
index 8511c648a..9d78411a3 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c
@@ -14,7 +14,7 @@
#include <linux/platform_device.h>
#include <linux/of.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include <video/omap-panel-data.h>
struct panel_drv_data {
@@ -25,7 +25,6 @@ struct panel_drv_data {
struct omap_video_timings timings;
- enum omap_dss_venc_type connector_type;
bool invert_polarity;
};
@@ -45,10 +44,6 @@ static const struct omap_video_timings tvc_pal_timings = {
static const struct of_device_id tvc_of_match[];
-struct tvc_of_data {
- enum omap_dss_venc_type connector_type;
-};
-
#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
static int tvc_connect(struct omap_dss_device *dssdev)
@@ -99,7 +94,7 @@ static int tvc_enable(struct omap_dss_device *dssdev)
in->ops.atv->set_timings(in, &ddata->timings);
if (!ddata->dev->of_node) {
- in->ops.atv->set_type(in, ddata->connector_type);
+ in->ops.atv->set_type(in, OMAP_DSS_VENC_TYPE_COMPOSITE);
in->ops.atv->invert_vid_out_polarity(in,
ddata->invert_polarity);
@@ -207,7 +202,6 @@ static int tvc_probe_pdata(struct platform_device *pdev)
ddata->in = in;
- ddata->connector_type = pdata->connector_type;
ddata->invert_polarity = pdata->invert_polarity;
dssdev = &ddata->dssdev;
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c b/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c
index d811e6dca..06e1db345 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c
@@ -16,8 +16,7 @@
#include <drm/drm_edid.h>
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
+#include <video/omapfb_dss.h>
static const struct omap_video_timings dvic_default_timings = {
.x_res = 640,
@@ -236,46 +235,6 @@ static struct omap_dss_driver dvic_driver = {
.detect = dvic_detect,
};
-static int dvic_probe_pdata(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct connector_dvi_platform_data *pdata;
- struct omap_dss_device *in, *dssdev;
- int i2c_bus_num;
-
- pdata = dev_get_platdata(&pdev->dev);
- i2c_bus_num = pdata->i2c_bus_num;
-
- if (i2c_bus_num != -1) {
- struct i2c_adapter *adapter;
-
- adapter = i2c_get_adapter(i2c_bus_num);
- if (!adapter) {
- dev_err(&pdev->dev,
- "Failed to get I2C adapter, bus %d\n",
- i2c_bus_num);
- return -EPROBE_DEFER;
- }
-
- ddata->i2c_adapter = adapter;
- }
-
- in = omap_dss_find_output(pdata->source);
- if (in == NULL) {
- i2c_put_adapter(ddata->i2c_adapter);
-
- dev_err(&pdev->dev, "Failed to find video source\n");
- return -EPROBE_DEFER;
- }
-
- ddata->in = in;
-
- dssdev = &ddata->dssdev;
- dssdev->name = pdata->name;
-
- return 0;
-}
-
static int dvic_probe_of(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
@@ -313,23 +272,18 @@ static int dvic_probe(struct platform_device *pdev)
struct omap_dss_device *dssdev;
int r;
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
if (!ddata)
return -ENOMEM;
platform_set_drvdata(pdev, ddata);
- if (dev_get_platdata(&pdev->dev)) {
- r = dvic_probe_pdata(pdev);
- if (r)
- return r;
- } else if (pdev->dev.of_node) {
- r = dvic_probe_of(pdev);
- if (r)
- return r;
- } else {
- return -ENODEV;
- }
+ r = dvic_probe_of(pdev);
+ if (r)
+ return r;
ddata->timings = dvic_default_timings;
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c b/drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c
index 6ee4129bc..58d5803ed 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c
@@ -17,8 +17,7 @@
#include <drm/drm_edid.h>
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
+#include <video/omapfb_dss.h>
static const struct omap_video_timings hdmic_default_timings = {
.x_res = 640,
@@ -206,30 +205,6 @@ static struct omap_dss_driver hdmic_driver = {
.set_hdmi_infoframe = hdmic_set_infoframe,
};
-static int hdmic_probe_pdata(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct connector_hdmi_platform_data *pdata;
- struct omap_dss_device *in, *dssdev;
-
- pdata = dev_get_platdata(&pdev->dev);
-
- ddata->hpd_gpio = -ENODEV;
-
- in = omap_dss_find_output(pdata->source);
- if (in == NULL) {
- dev_err(&pdev->dev, "Failed to find video source\n");
- return -EPROBE_DEFER;
- }
-
- ddata->in = in;
-
- dssdev = &ddata->dssdev;
- dssdev->name = pdata->name;
-
- return 0;
-}
-
static int hdmic_probe_of(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
@@ -261,6 +236,9 @@ static int hdmic_probe(struct platform_device *pdev)
struct omap_dss_device *dssdev;
int r;
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
if (!ddata)
return -ENOMEM;
@@ -268,17 +246,9 @@ static int hdmic_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ddata);
ddata->dev = &pdev->dev;
- if (dev_get_platdata(&pdev->dev)) {
- r = hdmic_probe_pdata(pdev);
- if (r)
- return r;
- } else if (pdev->dev.of_node) {
- r = hdmic_probe_of(pdev);
- if (r)
- return r;
- } else {
- return -ENODEV;
- }
+ r = hdmic_probe_of(pdev);
+ if (r)
+ return r;
if (gpio_is_valid(ddata->hpd_gpio)) {
r = devm_gpio_request_one(&pdev->dev, ddata->hpd_gpio,
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c b/drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c
index 8c246c213..a9a67167c 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c
@@ -20,7 +20,7 @@
#include <linux/slab.h>
#include <linux/of_gpio.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
struct panel_drv_data {
struct omap_dss_device dssdev;
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c b/drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c
index d9048b3df..8c0953d06 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c
@@ -15,8 +15,7 @@
#include <linux/slab.h>
#include <linux/of_gpio.h>
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
+#include <video/omapfb_dss.h>
struct panel_drv_data {
struct omap_dss_device dssdev;
@@ -166,32 +165,6 @@ static const struct omapdss_dvi_ops tfp410_dvi_ops = {
.get_timings = tfp410_get_timings,
};
-static int tfp410_probe_pdata(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct encoder_tfp410_platform_data *pdata;
- struct omap_dss_device *dssdev, *in;
-
- pdata = dev_get_platdata(&pdev->dev);
-
- ddata->pd_gpio = pdata->power_down_gpio;
-
- ddata->data_lines = pdata->data_lines;
-
- in = omap_dss_find_output(pdata->source);
- if (in == NULL) {
- dev_err(&pdev->dev, "Failed to find video source\n");
- return -ENODEV;
- }
-
- ddata->in = in;
-
- dssdev = &ddata->dssdev;
- dssdev->name = pdata->name;
-
- return 0;
-}
-
static int tfp410_probe_of(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
@@ -225,23 +198,18 @@ static int tfp410_probe(struct platform_device *pdev)
struct omap_dss_device *dssdev;
int r;
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
if (!ddata)
return -ENOMEM;
platform_set_drvdata(pdev, ddata);
- if (dev_get_platdata(&pdev->dev)) {
- r = tfp410_probe_pdata(pdev);
- if (r)
- return r;
- } else if (pdev->dev.of_node) {
- r = tfp410_probe_of(pdev);
- if (r)
- return r;
- } else {
- return -ENODEV;
- }
+ r = tfp410_probe_of(pdev);
+ if (r)
+ return r;
if (gpio_is_valid(ddata->pd_gpio)) {
r = devm_gpio_request_one(&pdev->dev, ddata->pd_gpio,
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c b/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c
index 677e2545f..80dc47347 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c
@@ -16,8 +16,7 @@
#include <linux/platform_device.h>
#include <linux/gpio/consumer.h>
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
+#include <video/omapfb_dss.h>
struct panel_drv_data {
struct omap_dss_device dssdev;
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c
index e780fd4f8..ace3d818a 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c
@@ -16,7 +16,7 @@
#include <linux/of.h>
#include <linux/of_gpio.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include <video/omap-panel-data.h>
#include <video/of_display_timing.h>
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
index 3414c2609..b58012b82 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
@@ -25,8 +25,7 @@
#include <linux/of_device.h>
#include <linux/of_gpio.h>
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
+#include <video/omapfb_dss.h>
#include <video/mipi_display.h>
/* DSI Virtual channel. Hardcoded for now. */
@@ -1127,40 +1126,6 @@ static struct omap_dss_driver dsicm_ops = {
.memory_read = dsicm_memory_read,
};
-static int dsicm_probe_pdata(struct platform_device *pdev)
-{
- const struct panel_dsicm_platform_data *pdata;
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *dssdev, *in;
-
- pdata = dev_get_platdata(&pdev->dev);
-
- in = omap_dss_find_output(pdata->source);
- if (in == NULL) {
- dev_err(&pdev->dev, "failed to find video source\n");
- return -EPROBE_DEFER;
- }
- ddata->in = in;
-
- ddata->reset_gpio = pdata->reset_gpio;
-
- if (pdata->use_ext_te)
- ddata->ext_te_gpio = pdata->ext_te_gpio;
- else
- ddata->ext_te_gpio = -1;
-
- ddata->ulps_timeout = pdata->ulps_timeout;
-
- ddata->use_dsi_backlight = pdata->use_dsi_backlight;
-
- ddata->pin_config = pdata->pin_config;
-
- dssdev = &ddata->dssdev;
- dssdev->name = pdata->name;
-
- return 0;
-}
-
static int dsicm_probe_of(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
@@ -1207,6 +1172,9 @@ static int dsicm_probe(struct platform_device *pdev)
dev_dbg(dev, "probe\n");
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
if (!ddata)
return -ENOMEM;
@@ -1214,17 +1182,9 @@ static int dsicm_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ddata);
ddata->pdev = pdev;
- if (dev_get_platdata(dev)) {
- r = dsicm_probe_pdata(pdev);
- if (r)
- return r;
- } else if (pdev->dev.of_node) {
- r = dsicm_probe_of(pdev);
- if (r)
- return r;
- } else {
- return -ENODEV;
- }
+ r = dsicm_probe_of(pdev);
+ if (r)
+ return r;
ddata->timings.x_res = 864;
ddata->timings.y_res = 480;
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c
index 18eb60e9c..f14691ce8 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c
@@ -16,8 +16,7 @@
#include <linux/mutex.h>
#include <linux/gpio.h>
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
+#include <video/omapfb_dss.h>
static struct omap_video_timings lb035q02_timings = {
.x_res = 320,
@@ -240,44 +239,6 @@ static struct omap_dss_driver lb035q02_ops = {
.get_resolution = omapdss_default_get_resolution,
};
-static int lb035q02_probe_pdata(struct spi_device *spi)
-{
- const struct panel_lb035q02_platform_data *pdata;
- struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
- struct omap_dss_device *dssdev, *in;
- int r;
-
- pdata = dev_get_platdata(&spi->dev);
-
- in = omap_dss_find_output(pdata->source);
- if (in == NULL) {
- dev_err(&spi->dev, "failed to find video source '%s'\n",
- pdata->source);
- return -EPROBE_DEFER;
- }
-
- ddata->in = in;
-
- ddata->data_lines = pdata->data_lines;
-
- dssdev = &ddata->dssdev;
- dssdev->name = pdata->name;
-
- r = devm_gpio_request_one(&spi->dev, pdata->enable_gpio,
- GPIOF_OUT_INIT_LOW, "panel enable");
- if (r)
- goto err_gpio;
-
- ddata->enable_gpio = gpio_to_desc(pdata->enable_gpio);
-
- ddata->backlight_gpio = pdata->backlight_gpio;
-
- return 0;
-err_gpio:
- omap_dss_put_device(ddata->in);
- return r;
-}
-
static int lb035q02_probe_of(struct spi_device *spi)
{
struct device_node *node = spi->dev.of_node;
@@ -312,6 +273,9 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi)
struct omap_dss_device *dssdev;
int r;
+ if (!spi->dev.of_node)
+ return -ENODEV;
+
ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL);
if (ddata == NULL)
return -ENOMEM;
@@ -320,17 +284,9 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi)
ddata->spi = spi;
- if (dev_get_platdata(&spi->dev)) {
- r = lb035q02_probe_pdata(spi);
- if (r)
- return r;
- } else if (spi->dev.of_node) {
- r = lb035q02_probe_of(spi);
- if (r)
- return r;
- } else {
- return -ENODEV;
- }
+ r = lb035q02_probe_of(spi);
+ if (r)
+ return r;
if (gpio_is_valid(ddata->backlight_gpio)) {
r = devm_gpio_request_one(&spi->dev, ddata->backlight_gpio,
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c
index 8a928c9a2..a2cbadd3e 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c
@@ -18,8 +18,7 @@
#include <linux/gpio.h>
#include <linux/of_gpio.h>
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
+#include <video/omapfb_dss.h>
struct panel_drv_data {
struct omap_dss_device dssdev;
@@ -233,33 +232,6 @@ static struct omap_dss_driver nec_8048_ops = {
};
-static int nec_8048_probe_pdata(struct spi_device *spi)
-{
- const struct panel_nec_nl8048hl11_platform_data *pdata;
- struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
- struct omap_dss_device *dssdev, *in;
-
- pdata = dev_get_platdata(&spi->dev);
-
- ddata->qvga_gpio = pdata->qvga_gpio;
- ddata->res_gpio = pdata->res_gpio;
-
- in = omap_dss_find_output(pdata->source);
- if (in == NULL) {
- dev_err(&spi->dev, "failed to find video source '%s'\n",
- pdata->source);
- return -EPROBE_DEFER;
- }
- ddata->in = in;
-
- ddata->data_lines = pdata->data_lines;
-
- dssdev = &ddata->dssdev;
- dssdev->name = pdata->name;
-
- return 0;
-}
-
static int nec_8048_probe_of(struct spi_device *spi)
{
struct device_node *node = spi->dev.of_node;
@@ -296,6 +268,9 @@ static int nec_8048_probe(struct spi_device *spi)
dev_dbg(&spi->dev, "%s\n", __func__);
+ if (!spi->dev.of_node)
+ return -ENODEV;
+
spi->mode = SPI_MODE_0;
spi->bits_per_word = 32;
@@ -315,17 +290,9 @@ static int nec_8048_probe(struct spi_device *spi)
ddata->spi = spi;
- if (dev_get_platdata(&spi->dev)) {
- r = nec_8048_probe_pdata(spi);
- if (r)
- return r;
- } else if (spi->dev.of_node) {
- r = nec_8048_probe_of(spi);
- if (r)
- return r;
- } else {
- return -ENODEV;
- }
+ r = nec_8048_probe_of(spi);
+ if (r)
+ return r;
if (gpio_is_valid(ddata->qvga_gpio)) {
r = devm_gpio_request_one(&spi->dev, ddata->qvga_gpio,
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
index 1954ec913..a8be18a87 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
@@ -17,8 +17,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/regulator/consumer.h>
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
+#include <video/omapfb_dss.h>
struct panel_drv_data {
struct omap_dss_device dssdev;
@@ -197,69 +196,6 @@ static struct omap_dss_driver sharp_ls_ops = {
.get_resolution = omapdss_default_get_resolution,
};
-static int sharp_ls_get_gpio(struct device *dev, int gpio, unsigned long flags,
- char *desc, struct gpio_desc **gpiod)
-{
- int r;
-
- r = devm_gpio_request_one(dev, gpio, flags, desc);
- if (r) {
- *gpiod = NULL;
- return r == -ENOENT ? 0 : r;
- }
-
- *gpiod = gpio_to_desc(gpio);
-
- return 0;
-}
-
-static int sharp_ls_probe_pdata(struct platform_device *pdev)
-{
- const struct panel_sharp_ls037v7dw01_platform_data *pdata;
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *dssdev, *in;
- int r;
-
- pdata = dev_get_platdata(&pdev->dev);
-
- in = omap_dss_find_output(pdata->source);
- if (in == NULL) {
- dev_err(&pdev->dev, "failed to find video source '%s'\n",
- pdata->source);
- return -EPROBE_DEFER;
- }
-
- ddata->in = in;
-
- ddata->data_lines = pdata->data_lines;
-
- dssdev = &ddata->dssdev;
- dssdev->name = pdata->name;
-
- r = sharp_ls_get_gpio(&pdev->dev, pdata->mo_gpio, GPIOF_OUT_INIT_LOW,
- "lcd MO", &ddata->mo_gpio);
- if (r)
- return r;
- r = sharp_ls_get_gpio(&pdev->dev, pdata->lr_gpio, GPIOF_OUT_INIT_HIGH,
- "lcd LR", &ddata->lr_gpio);
- if (r)
- return r;
- r = sharp_ls_get_gpio(&pdev->dev, pdata->ud_gpio, GPIOF_OUT_INIT_HIGH,
- "lcd UD", &ddata->ud_gpio);
- if (r)
- return r;
- r = sharp_ls_get_gpio(&pdev->dev, pdata->resb_gpio, GPIOF_OUT_INIT_LOW,
- "lcd RESB", &ddata->resb_gpio);
- if (r)
- return r;
- r = sharp_ls_get_gpio(&pdev->dev, pdata->ini_gpio, GPIOF_OUT_INIT_LOW,
- "lcd INI", &ddata->ini_gpio);
- if (r)
- return r;
-
- return 0;
-}
-
static int sharp_ls_get_gpio_of(struct device *dev, int index, int val,
const char *desc, struct gpio_desc **gpiod)
{
@@ -330,23 +266,18 @@ static int sharp_ls_probe(struct platform_device *pdev)
struct omap_dss_device *dssdev;
int r;
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
if (ddata == NULL)
return -ENOMEM;
platform_set_drvdata(pdev, ddata);
- if (dev_get_platdata(&pdev->dev)) {
- r = sharp_ls_probe_pdata(pdev);
- if (r)
- return r;
- } else if (pdev->dev.of_node) {
- r = sharp_ls_probe_of(pdev);
- if (r)
- return r;
- } else {
- return -ENODEV;
- }
+ r = sharp_ls_probe_of(pdev);
+ if (r)
+ return r;
ddata->videomode = sharp_ls_timings;
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c
index 31efcca80..468560a6d 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c
@@ -33,7 +33,7 @@
#include <linux/of.h>
#include <linux/of_gpio.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include <video/omap-panel-data.h>
#define MIPID_CMD_READ_DISP_ID 0x04
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c
index 4d657f3ab..b529a8c2b 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c
@@ -28,8 +28,7 @@
#include <linux/delay.h>
#include <linux/spi/spi.h>
#include <linux/gpio.h>
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
+#include <video/omapfb_dss.h>
struct panel_drv_data {
struct omap_dss_device dssdev;
@@ -365,31 +364,6 @@ static struct omap_dss_driver td028ttec1_ops = {
.check_timings = td028ttec1_panel_check_timings,
};
-static int td028ttec1_panel_probe_pdata(struct spi_device *spi)
-{
- const struct panel_tpo_td028ttec1_platform_data *pdata;
- struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
- struct omap_dss_device *dssdev, *in;
-
- pdata = dev_get_platdata(&spi->dev);
-
- in = omap_dss_find_output(pdata->source);
- if (in == NULL) {
- dev_err(&spi->dev, "failed to find video source '%s'\n",
- pdata->source);
- return -EPROBE_DEFER;
- }
-
- ddata->in = in;
-
- ddata->data_lines = pdata->data_lines;
-
- dssdev = &ddata->dssdev;
- dssdev->name = pdata->name;
-
- return 0;
-}
-
static int td028ttec1_probe_of(struct spi_device *spi)
{
struct device_node *node = spi->dev.of_node;
@@ -415,6 +389,9 @@ static int td028ttec1_panel_probe(struct spi_device *spi)
dev_dbg(&spi->dev, "%s\n", __func__);
+ if (!spi->dev.of_node)
+ return -ENODEV;
+
spi->bits_per_word = 9;
spi->mode = SPI_MODE_3;
@@ -432,17 +409,9 @@ static int td028ttec1_panel_probe(struct spi_device *spi)
ddata->spi_dev = spi;
- if (dev_get_platdata(&spi->dev)) {
- r = td028ttec1_panel_probe_pdata(spi);
- if (r)
- return r;
- } else if (spi->dev.of_node) {
- r = td028ttec1_probe_of(spi);
- if (r)
- return r;
- } else {
- return -ENODEV;
- }
+ r = td028ttec1_probe_of(spi);
+ if (r)
+ return r;
ddata->videomode = td028ttec1_panel_timings;
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
index 68e3b68a2..51e628b85 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
@@ -19,8 +19,7 @@
#include <linux/slab.h>
#include <linux/of_gpio.h>
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
+#include <video/omapfb_dss.h>
#define TPO_R02_MODE(x) ((x) & 7)
#define TPO_R02_MODE_800x480 7
@@ -465,32 +464,6 @@ static struct omap_dss_driver tpo_td043_ops = {
};
-static int tpo_td043_probe_pdata(struct spi_device *spi)
-{
- const struct panel_tpo_td043mtea1_platform_data *pdata;
- struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
- struct omap_dss_device *dssdev, *in;
-
- pdata = dev_get_platdata(&spi->dev);
-
- ddata->nreset_gpio = pdata->nreset_gpio;
-
- in = omap_dss_find_output(pdata->source);
- if (in == NULL) {
- dev_err(&spi->dev, "failed to find video source '%s'\n",
- pdata->source);
- return -EPROBE_DEFER;
- }
- ddata->in = in;
-
- ddata->data_lines = pdata->data_lines;
-
- dssdev = &ddata->dssdev;
- dssdev->name = pdata->name;
-
- return 0;
-}
-
static int tpo_td043_probe_of(struct spi_device *spi)
{
struct device_node *node = spi->dev.of_node;
@@ -524,6 +497,9 @@ static int tpo_td043_probe(struct spi_device *spi)
dev_dbg(&spi->dev, "%s\n", __func__);
+ if (!spi->dev.of_node)
+ return -ENODEV;
+
spi->bits_per_word = 16;
spi->mode = SPI_MODE_0;
@@ -541,17 +517,9 @@ static int tpo_td043_probe(struct spi_device *spi)
ddata->spi = spi;
- if (dev_get_platdata(&spi->dev)) {
- r = tpo_td043_probe_pdata(spi);
- if (r)
- return r;
- } else if (spi->dev.of_node) {
- r = tpo_td043_probe_of(spi);
- if (r)
- return r;
- } else {
- return -ENODEV;
- }
+ r = tpo_td043_probe_of(spi);
+ if (r)
+ return r;
ddata->mode = TPO_R02_MODE_800x480;
memcpy(ddata->gamma, tpo_td043_def_gamma, sizeof(ddata->gamma));
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/apply.c b/drivers/video/fbdev/omap2/omapfb/dss/apply.c
index 663ccc3bf..2481f4871 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/apply.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/apply.c
@@ -23,7 +23,7 @@
#include <linux/spinlock.h>
#include <linux/jiffies.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/core.c b/drivers/video/fbdev/omap2/omapfb/dss/core.c
index 5a87179b7..29de48275 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/core.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/core.c
@@ -35,7 +35,7 @@
#include <linux/suspend.h>
#include <linux/slab.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
#include "dss_features.h"
@@ -208,8 +208,6 @@ static int __init omap_dss_probe(struct platform_device *pdev)
core.default_display_name = def_disp_name;
else if (pdata->default_display_name)
core.default_display_name = pdata->default_display_name;
- else if (pdata->default_device)
- core.default_display_name = pdata->default_device->name;
register_pm_notifier(&omap_dss_pm_notif_block);
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c
index 6607db37a..3691bde4c 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c
@@ -26,7 +26,7 @@
#include <linux/interrupt.h>
#include <linux/seq_file.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
index 5491e304f..7a75dfda9 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
@@ -41,7 +41,7 @@
#include <linux/of.h>
#include <linux/component.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc_coefs.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc_coefs.c
index 038c15b04..59c9a5c47 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dispc_coefs.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc_coefs.c
@@ -18,7 +18,7 @@
*/
#include <linux/kernel.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dispc.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c
index 75b528602..b3fdbfd0b 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c
@@ -25,7 +25,7 @@
#include <linux/platform_device.h>
#include <linux/sysfs.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
static ssize_t display_name_show(struct omap_dss_device *dssdev, char *buf)
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/display.c b/drivers/video/fbdev/omap2/omapfb/dss/display.c
index ef5b90279..dd5468695 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/display.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/display.c
@@ -28,7 +28,7 @@
#include <linux/platform_device.h>
#include <linux/of.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dpi.c b/drivers/video/fbdev/omap2/omapfb/dss/dpi.c
index 7953e6a52..da09806b9 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dpi.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dpi.c
@@ -34,7 +34,7 @@
#include <linux/clk.h>
#include <linux/component.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
index d63e59807..9e4800a4e 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
@@ -42,7 +42,7 @@
#include <linux/of_platform.h>
#include <linux/component.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include <video/mipi_display.h>
#include "dss.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c b/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
index bf407b6ba..d356a252a 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
@@ -18,7 +18,7 @@
#include <linux/of.h>
#include <linux/seq_file.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss.c b/drivers/video/fbdev/omap2/omapfb/dss/dss.c
index 0078c4d1f..47d7f69ad 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dss.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dss.c
@@ -41,7 +41,7 @@
#include <linux/suspend.h>
#include <linux/component.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss.h b/drivers/video/fbdev/omap2/omapfb/dss/dss.h
index 0184a8461..a3cc0ca8f 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dss.h
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dss.h
@@ -73,6 +73,17 @@
#define FLD_MOD(orig, val, start, end) \
(((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end))
+enum omap_dss_clk_source {
+ OMAP_DSS_CLK_SRC_FCK = 0, /* OMAP2/3: DSS1_ALWON_FCLK
+ * OMAP4: DSS_FCLK */
+ OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC, /* OMAP3: DSI1_PLL_FCLK
+ * OMAP4: PLL1_CLK1 */
+ OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI, /* OMAP3: DSI2_PLL_FCLK
+ * OMAP4: PLL1_CLK2 */
+ OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC, /* OMAP4: PLL2_CLK1 */
+ OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI, /* OMAP4: PLL2_CLK2 */
+};
+
enum dss_io_pad_mode {
DSS_IO_PAD_MODE_RESET,
DSS_IO_PAD_MODE_RFBI,
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c b/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c
index c886a2927..8fc843b56 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c
@@ -23,7 +23,7 @@
#include <linux/err.h>
#include <linux/slab.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi.h b/drivers/video/fbdev/omap2/omapfb/dss/hdmi.h
index 53616b02b..f6de87e07 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi.h
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi.h
@@ -23,7 +23,8 @@
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/hdmi.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
+#include <sound/omap-hdmi-audio.h>
#include "dss.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
index 2e71aec83..926a6f20d 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
@@ -33,7 +33,7 @@
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/component.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include <sound/omap-hdmi-audio.h>
#include "hdmi4_core.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
index aade6d996..0ee829a16 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
@@ -38,7 +38,7 @@
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/component.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include <sound/omap-hdmi-audio.h>
#include "hdmi5_core.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c
index 1b8fcc6c4..189a5ad12 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c
@@ -4,7 +4,7 @@
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/of.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "hdmi.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c
index 1f5d19c11..9a13c35fd 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c
@@ -13,7 +13,7 @@
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
#include "hdmi.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c
index 06e23a7c4..eac3665ab 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c
@@ -17,7 +17,7 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
#include "hdmi.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c
index 7c544bc56..705373e4c 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c
@@ -14,7 +14,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/platform_device.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
#include "hdmi.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c
index a7414fb12..9e2a67fdf 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c
@@ -26,7 +26,7 @@
#include <linux/platform_device.h>
#include <linux/jiffies.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/manager.c b/drivers/video/fbdev/omap2/omapfb/dss/manager.c
index 08a67f4f6..69f86d2cc 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/manager.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/manager.c
@@ -28,7 +28,7 @@
#include <linux/platform_device.h>
#include <linux/jiffies.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
#include "dss_features.h"
@@ -69,7 +69,6 @@ int dss_init_overlay_managers(void)
break;
}
- mgr->caps = 0;
mgr->supported_displays =
dss_feat_get_supported_displays(mgr->id);
mgr->supported_outputs =
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/output.c b/drivers/video/fbdev/omap2/omapfb/dss/output.c
index 16072159b..bed9a9782 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/output.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/output.c
@@ -21,7 +21,7 @@
#include <linux/slab.h>
#include <linux/of.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c
index 4cc5ddebf..f1f6c0aea 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c
@@ -26,7 +26,7 @@
#include <linux/kobject.h>
#include <linux/platform_device.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/overlay.c b/drivers/video/fbdev/omap2/omapfb/dss/overlay.c
index 2f7cee985..d6c5d75d2 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/overlay.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/overlay.c
@@ -30,7 +30,7 @@
#include <linux/delay.h>
#include <linux/slab.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/pll.c b/drivers/video/fbdev/omap2/omapfb/dss/pll.c
index f974ddcd3..0564c5606 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/pll.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/pll.c
@@ -22,7 +22,7 @@
#include <linux/regulator/consumer.h>
#include <linux/sched.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/rfbi.c b/drivers/video/fbdev/omap2/omapfb/dss/rfbi.c
index aea6a1d0f..562b0c4ae 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/rfbi.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/rfbi.c
@@ -38,7 +38,7 @@
#include <linux/pm_runtime.h>
#include <linux/component.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
struct rfbi_reg { u16 idx; };
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/sdi.c b/drivers/video/fbdev/omap2/omapfb/dss/sdi.c
index d747cc6b5..c4be732a4 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/sdi.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/sdi.c
@@ -29,7 +29,7 @@
#include <linux/of.h>
#include <linux/component.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
static struct {
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/venc.c b/drivers/video/fbdev/omap2/omapfb/dss/venc.c
index 26e0ee30a..392464da1 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/venc.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/venc.c
@@ -37,7 +37,7 @@
#include <linux/of.h>
#include <linux/component.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/video-pll.c b/drivers/video/fbdev/omap2/omapfb/dss/video-pll.c
index b1ec59e42..a890540f2 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/video-pll.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/video-pll.c
@@ -17,7 +17,7 @@
#include <linux/platform_device.h>
#include <linux/sched.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include "dss.h"
#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
index 9ddfdd63b..ef6927307 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
@@ -30,7 +30,7 @@
#include <linux/export.h>
#include <linux/sizes.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include <video/omapvrfb.h>
#include "omapfb.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
index d3af01c94..1d7c012f0 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
@@ -30,7 +30,7 @@
#include <linux/platform_device.h>
#include <linux/omapfb.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include <video/omapvrfb.h>
#include "omapfb.h"
@@ -1332,7 +1332,7 @@ static void omapfb_free_fbmem(struct fb_info *fbi)
}
dma_free_attrs(fbdev->dev, rg->size, rg->token, rg->dma_handle,
- &rg->attrs);
+ rg->attrs);
rg->token = NULL;
rg->vaddr = NULL;
@@ -1370,7 +1370,7 @@ static int omapfb_alloc_fbmem(struct fb_info *fbi, unsigned long size,
struct omapfb2_device *fbdev = ofbi->fbdev;
struct omapfb2_mem_region *rg;
void *token;
- DEFINE_DMA_ATTRS(attrs);
+ unsigned long attrs;
dma_addr_t dma_handle;
int r;
@@ -1386,15 +1386,15 @@ static int omapfb_alloc_fbmem(struct fb_info *fbi, unsigned long size,
size = PAGE_ALIGN(size);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ attrs = DMA_ATTR_WRITE_COMBINE;
if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB)
- dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
+ attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
DBG("allocating %lu bytes for fb %d\n", size, ofbi->id);
token = dma_alloc_attrs(fbdev->dev, size, &dma_handle,
- GFP_KERNEL, &attrs);
+ GFP_KERNEL, attrs);
if (token == NULL) {
dev_err(fbdev->dev, "failed to allocate framebuffer\n");
@@ -1408,7 +1408,7 @@ static int omapfb_alloc_fbmem(struct fb_info *fbi, unsigned long size,
r = omap_vrfb_request_ctx(&rg->vrfb);
if (r) {
dma_free_attrs(fbdev->dev, size, token, dma_handle,
- &attrs);
+ attrs);
dev_err(fbdev->dev, "vrfb create ctx failed\n");
return r;
}
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c b/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
index 18fa9e1d0..8087a009c 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
@@ -29,7 +29,7 @@
#include <linux/mm.h>
#include <linux/omapfb.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#include <video/omapvrfb.h>
#include "omapfb.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb.h b/drivers/video/fbdev/omap2/omapfb/omapfb.h
index 623cd872a..555487d6d 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb.h
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb.h
@@ -28,10 +28,9 @@
#endif
#include <linux/rwsem.h>
-#include <linux/dma-attrs.h>
#include <linux/dma-mapping.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
#ifdef DEBUG
extern bool omapfb_debug;
@@ -51,7 +50,7 @@ extern bool omapfb_debug;
struct omapfb2_mem_region {
int id;
- struct dma_attrs attrs;
+ unsigned long attrs;
void *token;
dma_addr_t dma_handle;
u32 paddr;
diff --git a/drivers/video/logo/logo.c b/drivers/video/logo/logo.c
index 10fbfd8ab..b6bc4a0bd 100644
--- a/drivers/video/logo/logo.c
+++ b/drivers/video/logo/logo.c
@@ -36,11 +36,11 @@ static int __init fb_logo_late_init(void)
late_initcall(fb_logo_late_init);
-/* logo's are marked __initdata. Use __init_refok to tell
+/* logo's are marked __initdata. Use __ref to tell
* modpost that it is intended that this function uses data
* marked __initdata.
*/
-const struct linux_logo * __init_refok fb_find_logo(int depth)
+const struct linux_logo * __ref fb_find_logo(int depth)
{
const struct linux_logo *logo = NULL;
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index f6ea8f4ba..4e7003db1 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -30,6 +30,7 @@
#include <linux/oom.h>
#include <linux/wait.h>
#include <linux/mm.h>
+#include <linux/mount.h>
/*
* Balloon device works in 4K page units. So each page is pointed to by
@@ -45,6 +46,10 @@ static int oom_pages = OOM_VBALLOON_DEFAULT_PAGES;
module_param(oom_pages, int, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(oom_pages, "pages to free on OOM");
+#ifdef CONFIG_BALLOON_COMPACTION
+static struct vfsmount *balloon_mnt;
+#endif
+
struct virtio_balloon {
struct virtio_device *vdev;
struct virtqueue *inflate_vq, *deflate_vq, *stats_vq;
@@ -492,6 +497,24 @@ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info,
return MIGRATEPAGE_SUCCESS;
}
+
+static struct dentry *balloon_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
+{
+ static const struct dentry_operations ops = {
+ .d_dname = simple_dname,
+ };
+
+ return mount_pseudo(fs_type, "balloon-kvm:", NULL, &ops,
+ BALLOON_KVM_MAGIC);
+}
+
+static struct file_system_type balloon_fs = {
+ .name = "balloon-kvm",
+ .mount = balloon_mount,
+ .kill_sb = kill_anon_super,
+};
+
#endif /* CONFIG_BALLOON_COMPACTION */
static int virtballoon_probe(struct virtio_device *vdev)
@@ -521,9 +544,6 @@ static int virtballoon_probe(struct virtio_device *vdev)
vb->vdev = vdev;
balloon_devinfo_init(&vb->vb_dev_info);
-#ifdef CONFIG_BALLOON_COMPACTION
- vb->vb_dev_info.migratepage = virtballoon_migratepage;
-#endif
err = init_vqs(vb);
if (err)
@@ -533,13 +553,33 @@ static int virtballoon_probe(struct virtio_device *vdev)
vb->nb.priority = VIRTBALLOON_OOM_NOTIFY_PRIORITY;
err = register_oom_notifier(&vb->nb);
if (err < 0)
- goto out_oom_notify;
+ goto out_del_vqs;
+
+#ifdef CONFIG_BALLOON_COMPACTION
+ balloon_mnt = kern_mount(&balloon_fs);
+ if (IS_ERR(balloon_mnt)) {
+ err = PTR_ERR(balloon_mnt);
+ unregister_oom_notifier(&vb->nb);
+ goto out_del_vqs;
+ }
+
+ vb->vb_dev_info.migratepage = virtballoon_migratepage;
+ vb->vb_dev_info.inode = alloc_anon_inode(balloon_mnt->mnt_sb);
+ if (IS_ERR(vb->vb_dev_info.inode)) {
+ err = PTR_ERR(vb->vb_dev_info.inode);
+ kern_unmount(balloon_mnt);
+ unregister_oom_notifier(&vb->nb);
+ vb->vb_dev_info.inode = NULL;
+ goto out_del_vqs;
+ }
+ vb->vb_dev_info.inode->i_mapping->a_ops = &balloon_aops;
+#endif
virtio_device_ready(vdev);
return 0;
-out_oom_notify:
+out_del_vqs:
vdev->config->del_vqs(vdev);
out_free_vb:
kfree(vb);
@@ -573,6 +613,8 @@ static void virtballoon_remove(struct virtio_device *vdev)
cancel_work_sync(&vb->update_balloon_stats_work);
remove_common(vb);
+ if (vb->vb_dev_info.inode)
+ iput(vb->vb_dev_info.inode);
kfree(vb);
}
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 2ebf30e57..ed9c9eeed 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -117,7 +117,10 @@ struct vring_virtqueue {
#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
/*
- * The interaction between virtio and a possible IOMMU is a mess.
+ * Modern virtio devices have feature bits to specify whether they need a
+ * quirk and bypass the IOMMU. If not there, just use the DMA API.
+ *
+ * If there, the interaction between virtio and DMA API is messy.
*
* On most systems with virtio, physical addresses match bus addresses,
* and it doesn't particularly matter whether we use the DMA API.
@@ -133,10 +136,18 @@ struct vring_virtqueue {
*
* For the time being, we preserve historic behavior and bypass the DMA
* API.
+ *
+ * TODO: install a per-device DMA ops structure that does the right thing
+ * taking into account all the above quirks, and use the DMA API
+ * unconditionally on data path.
*/
static bool vring_use_dma_api(struct virtio_device *vdev)
{
+ if (!virtio_has_iommu_quirk(vdev))
+ return true;
+
+ /* Otherwise, we are left to guess. */
/*
* In theory, it's possible to have a buggy QEMU-supposed
* emulated Q35 IOMMU and Xen enabled at the same time. On
@@ -156,7 +167,7 @@ static bool vring_use_dma_api(struct virtio_device *vdev)
* making all of the arch DMA ops work on the vring device itself
* is a mess. For now, we use the parent device for DMA ops.
*/
-struct device *vring_dma_dev(const struct vring_virtqueue *vq)
+static struct device *vring_dma_dev(const struct vring_virtqueue *vq)
{
return vq->vq.vdev->dev.parent;
}
@@ -417,6 +428,7 @@ unmap_release:
if (indirect)
kfree(desc);
+ END_USE(vq);
return -EIO;
}
@@ -1101,6 +1113,8 @@ void vring_transport_features(struct virtio_device *vdev)
break;
case VIRTIO_F_VERSION_1:
break;
+ case VIRTIO_F_IOMMU_PLATFORM:
+ break;
default:
/* We don't understand this bit. */
__virtio_clear_bit(vdev, i);
diff --git a/drivers/w1/slaves/w1_ds2406.c b/drivers/w1/slaves/w1_ds2406.c
index d488961a8..51f2f66d6 100644
--- a/drivers/w1/slaves/w1_ds2406.c
+++ b/drivers/w1/slaves/w1_ds2406.c
@@ -153,16 +153,4 @@ static struct w1_family w1_family_12 = {
.fid = W1_FAMILY_DS2406,
.fops = &w1_f12_fops,
};
-
-static int __init w1_f12_init(void)
-{
- return w1_register_family(&w1_family_12);
-}
-
-static void __exit w1_f12_exit(void)
-{
- w1_unregister_family(&w1_family_12);
-}
-
-module_init(w1_f12_init);
-module_exit(w1_f12_exit);
+module_w1_family(w1_family_12);
diff --git a/drivers/w1/slaves/w1_ds2408.c b/drivers/w1/slaves/w1_ds2408.c
index 7dfa0e116..aec5958e6 100644
--- a/drivers/w1/slaves/w1_ds2408.c
+++ b/drivers/w1/slaves/w1_ds2408.c
@@ -351,16 +351,4 @@ static struct w1_family w1_family_29 = {
.fid = W1_FAMILY_DS2408,
.fops = &w1_f29_fops,
};
-
-static int __init w1_f29_init(void)
-{
- return w1_register_family(&w1_family_29);
-}
-
-static void __exit w1_f29_exit(void)
-{
- w1_unregister_family(&w1_family_29);
-}
-
-module_init(w1_f29_init);
-module_exit(w1_f29_exit);
+module_w1_family(w1_family_29);
diff --git a/drivers/w1/slaves/w1_ds2413.c b/drivers/w1/slaves/w1_ds2413.c
index ee28fc1ff..f2e1c5153 100644
--- a/drivers/w1/slaves/w1_ds2413.c
+++ b/drivers/w1/slaves/w1_ds2413.c
@@ -135,16 +135,4 @@ static struct w1_family w1_family_3a = {
.fid = W1_FAMILY_DS2413,
.fops = &w1_f3a_fops,
};
-
-static int __init w1_f3a_init(void)
-{
- return w1_register_family(&w1_family_3a);
-}
-
-static void __exit w1_f3a_exit(void)
-{
- w1_unregister_family(&w1_family_3a);
-}
-
-module_init(w1_f3a_init);
-module_exit(w1_f3a_exit);
+module_w1_family(w1_family_3a);
diff --git a/drivers/w1/slaves/w1_ds2423.c b/drivers/w1/slaves/w1_ds2423.c
index 7e41b7d91..4ab54fd9d 100644
--- a/drivers/w1/slaves/w1_ds2423.c
+++ b/drivers/w1/slaves/w1_ds2423.c
@@ -138,19 +138,7 @@ static struct w1_family w1_family_1d = {
.fid = W1_COUNTER_DS2423,
.fops = &w1_f1d_fops,
};
-
-static int __init w1_f1d_init(void)
-{
- return w1_register_family(&w1_family_1d);
-}
-
-static void __exit w1_f1d_exit(void)
-{
- w1_unregister_family(&w1_family_1d);
-}
-
-module_init(w1_f1d_init);
-module_exit(w1_f1d_exit);
+module_w1_family(w1_family_1d);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mika Laitio <lamikr@pilppa.org>");
diff --git a/drivers/w1/slaves/w1_ds2431.c b/drivers/w1/slaves/w1_ds2431.c
index 9c4ff9d28..80572cb63 100644
--- a/drivers/w1/slaves/w1_ds2431.c
+++ b/drivers/w1/slaves/w1_ds2431.c
@@ -288,19 +288,7 @@ static struct w1_family w1_family_2d = {
.fid = W1_EEPROM_DS2431,
.fops = &w1_f2d_fops,
};
-
-static int __init w1_f2d_init(void)
-{
- return w1_register_family(&w1_family_2d);
-}
-
-static void __exit w1_f2d_fini(void)
-{
- w1_unregister_family(&w1_family_2d);
-}
-
-module_init(w1_f2d_init);
-module_exit(w1_f2d_fini);
+module_w1_family(w1_family_2d);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Bernhard Weirich <bernhard.weirich@riedel.net>");
diff --git a/drivers/w1/slaves/w1_ds2433.c b/drivers/w1/slaves/w1_ds2433.c
index 72319a968..6cf378c89 100644
--- a/drivers/w1/slaves/w1_ds2433.c
+++ b/drivers/w1/slaves/w1_ds2433.c
@@ -305,16 +305,4 @@ static struct w1_family w1_family_23 = {
.fid = W1_EEPROM_DS2433,
.fops = &w1_f23_fops,
};
-
-static int __init w1_f23_init(void)
-{
- return w1_register_family(&w1_family_23);
-}
-
-static void __exit w1_f23_fini(void)
-{
- w1_unregister_family(&w1_family_23);
-}
-
-module_init(w1_f23_init);
-module_exit(w1_f23_fini);
+module_w1_family(w1_family_23);
diff --git a/drivers/w1/slaves/w1_ds2760.c b/drivers/w1/slaves/w1_ds2760.c
index d9079d48d..ffa37f773 100644
--- a/drivers/w1/slaves/w1_ds2760.c
+++ b/drivers/w1/slaves/w1_ds2760.c
@@ -121,25 +121,14 @@ static const struct attribute_group *w1_ds2760_groups[] = {
NULL,
};
-static DEFINE_IDA(bat_ida);
-
static int w1_ds2760_add_slave(struct w1_slave *sl)
{
int ret;
- int id;
struct platform_device *pdev;
- id = ida_simple_get(&bat_ida, 0, 0, GFP_KERNEL);
- if (id < 0) {
- ret = id;
- goto noid;
- }
-
- pdev = platform_device_alloc("ds2760-battery", id);
- if (!pdev) {
- ret = -ENOMEM;
- goto pdev_alloc_failed;
- }
+ pdev = platform_device_alloc("ds2760-battery", PLATFORM_DEVID_AUTO);
+ if (!pdev)
+ return -ENOMEM;
pdev->dev.parent = &sl->dev;
ret = platform_device_add(pdev);
@@ -148,24 +137,19 @@ static int w1_ds2760_add_slave(struct w1_slave *sl)
dev_set_drvdata(&sl->dev, pdev);
- goto success;
+ return 0;
pdev_add_failed:
platform_device_put(pdev);
-pdev_alloc_failed:
- ida_simple_remove(&bat_ida, id);
-noid:
-success:
+
return ret;
}
static void w1_ds2760_remove_slave(struct w1_slave *sl)
{
struct platform_device *pdev = dev_get_drvdata(&sl->dev);
- int id = pdev->id;
platform_device_unregister(pdev);
- ida_simple_remove(&bat_ida, id);
}
static struct w1_family_ops w1_ds2760_fops = {
@@ -178,28 +162,13 @@ static struct w1_family w1_ds2760_family = {
.fid = W1_FAMILY_DS2760,
.fops = &w1_ds2760_fops,
};
-
-static int __init w1_ds2760_init(void)
-{
- pr_info("1-Wire driver for the DS2760 battery monitor chip - (c) 2004-2005, Szabolcs Gyurko\n");
- ida_init(&bat_ida);
- return w1_register_family(&w1_ds2760_family);
-}
-
-static void __exit w1_ds2760_exit(void)
-{
- w1_unregister_family(&w1_ds2760_family);
- ida_destroy(&bat_ida);
-}
+module_w1_family(w1_ds2760_family);
EXPORT_SYMBOL(w1_ds2760_read);
EXPORT_SYMBOL(w1_ds2760_write);
EXPORT_SYMBOL(w1_ds2760_store_eeprom);
EXPORT_SYMBOL(w1_ds2760_recall_eeprom);
-module_init(w1_ds2760_init);
-module_exit(w1_ds2760_exit);
-
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Szabolcs Gyurko <szabolcs.gyurko@tlt.hu>");
MODULE_DESCRIPTION("1-wire Driver Dallas 2760 battery monitor chip");
diff --git a/drivers/w1/slaves/w1_ds2780.c b/drivers/w1/slaves/w1_ds2780.c
index 50e85f792..f5c2aa429 100644
--- a/drivers/w1/slaves/w1_ds2780.c
+++ b/drivers/w1/slaves/w1_ds2780.c
@@ -113,25 +113,14 @@ static const struct attribute_group *w1_ds2780_groups[] = {
NULL,
};
-static DEFINE_IDA(bat_ida);
-
static int w1_ds2780_add_slave(struct w1_slave *sl)
{
int ret;
- int id;
struct platform_device *pdev;
- id = ida_simple_get(&bat_ida, 0, 0, GFP_KERNEL);
- if (id < 0) {
- ret = id;
- goto noid;
- }
-
- pdev = platform_device_alloc("ds2780-battery", id);
- if (!pdev) {
- ret = -ENOMEM;
- goto pdev_alloc_failed;
- }
+ pdev = platform_device_alloc("ds2780-battery", PLATFORM_DEVID_AUTO);
+ if (!pdev)
+ return -ENOMEM;
pdev->dev.parent = &sl->dev;
ret = platform_device_add(pdev);
@@ -144,19 +133,15 @@ static int w1_ds2780_add_slave(struct w1_slave *sl)
pdev_add_failed:
platform_device_put(pdev);
-pdev_alloc_failed:
- ida_simple_remove(&bat_ida, id);
-noid:
+
return ret;
}
static void w1_ds2780_remove_slave(struct w1_slave *sl)
{
struct platform_device *pdev = dev_get_drvdata(&sl->dev);
- int id = pdev->id;
platform_device_unregister(pdev);
- ida_simple_remove(&bat_ida, id);
}
static struct w1_family_ops w1_ds2780_fops = {
@@ -169,21 +154,7 @@ static struct w1_family w1_ds2780_family = {
.fid = W1_FAMILY_DS2780,
.fops = &w1_ds2780_fops,
};
-
-static int __init w1_ds2780_init(void)
-{
- ida_init(&bat_ida);
- return w1_register_family(&w1_ds2780_family);
-}
-
-static void __exit w1_ds2780_exit(void)
-{
- w1_unregister_family(&w1_ds2780_family);
- ida_destroy(&bat_ida);
-}
-
-module_init(w1_ds2780_init);
-module_exit(w1_ds2780_exit);
+module_w1_family(w1_ds2780_family);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Clifton Barnes <cabarnes@indesign-llc.com>");
diff --git a/drivers/w1/slaves/w1_ds2781.c b/drivers/w1/slaves/w1_ds2781.c
index 1eb98fb16..9c03e014c 100644
--- a/drivers/w1/slaves/w1_ds2781.c
+++ b/drivers/w1/slaves/w1_ds2781.c
@@ -17,7 +17,6 @@
#include <linux/types.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
-#include <linux/idr.h>
#include "../w1.h"
#include "../w1_int.h"
@@ -111,25 +110,14 @@ static const struct attribute_group *w1_ds2781_groups[] = {
NULL,
};
-static DEFINE_IDA(bat_ida);
-
static int w1_ds2781_add_slave(struct w1_slave *sl)
{
int ret;
- int id;
struct platform_device *pdev;
- id = ida_simple_get(&bat_ida, 0, 0, GFP_KERNEL);
- if (id < 0) {
- ret = id;
- goto noid;
- }
-
- pdev = platform_device_alloc("ds2781-battery", id);
- if (!pdev) {
- ret = -ENOMEM;
- goto pdev_alloc_failed;
- }
+ pdev = platform_device_alloc("ds2781-battery", PLATFORM_DEVID_AUTO);
+ if (!pdev)
+ return -ENOMEM;
pdev->dev.parent = &sl->dev;
ret = platform_device_add(pdev);
@@ -142,19 +130,15 @@ static int w1_ds2781_add_slave(struct w1_slave *sl)
pdev_add_failed:
platform_device_put(pdev);
-pdev_alloc_failed:
- ida_simple_remove(&bat_ida, id);
-noid:
+
return ret;
}
static void w1_ds2781_remove_slave(struct w1_slave *sl)
{
struct platform_device *pdev = dev_get_drvdata(&sl->dev);
- int id = pdev->id;
platform_device_unregister(pdev);
- ida_simple_remove(&bat_ida, id);
}
static struct w1_family_ops w1_ds2781_fops = {
@@ -167,21 +151,7 @@ static struct w1_family w1_ds2781_family = {
.fid = W1_FAMILY_DS2781,
.fops = &w1_ds2781_fops,
};
-
-static int __init w1_ds2781_init(void)
-{
- ida_init(&bat_ida);
- return w1_register_family(&w1_ds2781_family);
-}
-
-static void __exit w1_ds2781_exit(void)
-{
- w1_unregister_family(&w1_ds2781_family);
- ida_destroy(&bat_ida);
-}
-
-module_init(w1_ds2781_init);
-module_exit(w1_ds2781_exit);
+module_w1_family(w1_ds2781_family);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Renata Sayakhova <renata@oktetlabs.ru>");
diff --git a/drivers/w1/slaves/w1_ds28e04.c b/drivers/w1/slaves/w1_ds28e04.c
index 365d6dff2..5e348d38e 100644
--- a/drivers/w1/slaves/w1_ds28e04.c
+++ b/drivers/w1/slaves/w1_ds28e04.c
@@ -427,16 +427,4 @@ static struct w1_family w1_family_1C = {
.fid = W1_FAMILY_DS28E04,
.fops = &w1_f1C_fops,
};
-
-static int __init w1_f1C_init(void)
-{
- return w1_register_family(&w1_family_1C);
-}
-
-static void __exit w1_f1C_fini(void)
-{
- w1_unregister_family(&w1_family_1C);
-}
-
-module_init(w1_f1C_init);
-module_exit(w1_f1C_fini);
+module_w1_family(w1_family_1C);
diff --git a/drivers/w1/w1_family.h b/drivers/w1/w1_family.h
index ed5dcb80a..10a7a0767 100644
--- a/drivers/w1/w1_family.h
+++ b/drivers/w1/w1_family.h
@@ -88,4 +88,16 @@ struct w1_family * w1_family_registered(u8);
void w1_unregister_family(struct w1_family *);
int w1_register_family(struct w1_family *);
+/**
+ * module_w1_driver() - Helper macro for registering a 1-Wire families
+ * @__w1_family: w1_family struct
+ *
+ * Helper macro for 1-Wire families which do not do anything special in module
+ * init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ */
+#define module_w1_family(__w1_family) \
+ module_driver(__w1_family, w1_register_family, \
+ w1_unregister_family)
+
#endif /* __W1_FAMILY_H */
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index b4b3e2564..1bffe006c 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -48,7 +48,6 @@ config WATCHDOG_NOWAYOUT
config WATCHDOG_SYSFS
bool "Read different watchdog information through sysfs"
- default n
help
Say Y here if you want to enable watchdog device status read through
sysfs attributes.
@@ -516,6 +515,15 @@ config MAX63XX_WATCHDOG
help
Support for memory mapped max63{69,70,71,72,73,74} watchdog timer.
+config MAX77620_WATCHDOG
+ tristate "Maxim Max77620 Watchdog Timer"
+ depends on MFD_MAX77620
+ help
+ This is the driver for the Max77620 watchdog timer.
+ Say 'Y' here to enable the watchdog timer support for
+ MAX77620 chips. To compile this driver as a module,
+ choose M here: the module will be called max77620_wdt.
+
config IMX2_WDT
tristate "IMX2+ Watchdog"
depends on ARCH_MXC || ARCH_LAYERSCAPE
@@ -609,6 +617,16 @@ config QCOM_WDT
To compile this driver as a module, choose M here: the
module will be called qcom_wdt.
+config MESON_GXBB_WATCHDOG
+ tristate "Amlogic Meson GXBB SoCs watchdog support"
+ depends on ARCH_MESON
+ select WATCHDOG_CORE
+ help
+ Say Y here to include support for the watchdog timer
+ in Amlogic Meson GXBB SoCs.
+ To compile this driver as a module, choose M here: the
+ module will be called meson_gxbb_wdt.
+
config MESON_WATCHDOG
tristate "Amlogic Meson SoCs watchdog support"
depends on ARCH_MESON
@@ -669,6 +687,19 @@ config RENESAS_WDT
This driver adds watchdog support for the integrated watchdogs in the
Renesas R-Car and other SH-Mobile SoCs (usually named RWDT or SWDT).
+config ASPEED_WATCHDOG
+ tristate "Aspeed 2400 watchdog support"
+ depends on ARCH_ASPEED || COMPILE_TEST
+ select WATCHDOG_CORE
+ help
+ Say Y here to include support for the watchdog timer
+ in Apseed BMC SoCs.
+
+ This driver is required to reboot the SoC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called aspeed_wdt.
+
# AVR32 Architecture
config AT32AP700X_WDT
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index a46e7c138..c22ad3ea3 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -67,6 +67,7 @@ obj-$(CONFIG_ST_LPC_WATCHDOG) += st_lpc_wdt.o
obj-$(CONFIG_QCOM_WDT) += qcom-wdt.o
obj-$(CONFIG_BCM_KONA_WDT) += bcm_kona_wdt.o
obj-$(CONFIG_TEGRA_WATCHDOG) += tegra_wdt.o
+obj-$(CONFIG_MESON_GXBB_WATCHDOG) += meson_gxbb_wdt.o
obj-$(CONFIG_MESON_WATCHDOG) += meson_wdt.o
obj-$(CONFIG_MEDIATEK_WATCHDOG) += mtk_wdt.o
obj-$(CONFIG_DIGICOLOR_WATCHDOG) += digicolor_wdt.o
@@ -74,6 +75,7 @@ obj-$(CONFIG_LPC18XX_WATCHDOG) += lpc18xx_wdt.o
obj-$(CONFIG_BCM7038_WDT) += bcm7038_wdt.o
obj-$(CONFIG_ATLAS7_WATCHDOG) += atlas7_wdt.o
obj-$(CONFIG_RENESAS_WDT) += renesas_wdt.o
+obj-$(CONFIG_ASPEED_WATCHDOG) += aspeed_wdt.o
# AVR32 Architecture
obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o
@@ -203,6 +205,7 @@ obj-$(CONFIG_TANGOX_WATCHDOG) += tangox_wdt.o
obj-$(CONFIG_WM831X_WATCHDOG) += wm831x_wdt.o
obj-$(CONFIG_WM8350_WATCHDOG) += wm8350_wdt.o
obj-$(CONFIG_MAX63XX_WATCHDOG) += max63xx_wdt.o
+obj-$(CONFIG_MAX77620_WATCHDOG) += max77620_wdt.o
obj-$(CONFIG_ZIIRAVE_WATCHDOG) += ziirave_wdt.o
obj-$(CONFIG_SOFT_WATCHDOG) += softdog.o
obj-$(CONFIG_MENF21BMC_WATCHDOG) += menf21bmc_wdt.o
diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c
new file mode 100644
index 000000000..f5ad8023c
--- /dev/null
+++ b/drivers/watchdog/aspeed_wdt.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright 2016 IBM Corporation
+ *
+ * Joel Stanley <joel@jms.id.au>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/watchdog.h>
+
+struct aspeed_wdt {
+ struct watchdog_device wdd;
+ void __iomem *base;
+ u32 ctrl;
+};
+
+static const struct of_device_id aspeed_wdt_of_table[] = {
+ { .compatible = "aspeed,ast2400-wdt" },
+ { .compatible = "aspeed,ast2500-wdt" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table);
+
+#define WDT_STATUS 0x00
+#define WDT_RELOAD_VALUE 0x04
+#define WDT_RESTART 0x08
+#define WDT_CTRL 0x0C
+#define WDT_CTRL_RESET_MODE_SOC (0x00 << 5)
+#define WDT_CTRL_RESET_MODE_FULL_CHIP (0x01 << 5)
+#define WDT_CTRL_1MHZ_CLK BIT(4)
+#define WDT_CTRL_WDT_EXT BIT(3)
+#define WDT_CTRL_WDT_INTR BIT(2)
+#define WDT_CTRL_RESET_SYSTEM BIT(1)
+#define WDT_CTRL_ENABLE BIT(0)
+
+#define WDT_RESTART_MAGIC 0x4755
+
+/* 32 bits at 1MHz, in milliseconds */
+#define WDT_MAX_TIMEOUT_MS 4294967
+#define WDT_DEFAULT_TIMEOUT 30
+#define WDT_RATE_1MHZ 1000000
+
+static struct aspeed_wdt *to_aspeed_wdt(struct watchdog_device *wdd)
+{
+ return container_of(wdd, struct aspeed_wdt, wdd);
+}
+
+static void aspeed_wdt_enable(struct aspeed_wdt *wdt, int count)
+{
+ wdt->ctrl |= WDT_CTRL_ENABLE;
+
+ writel(0, wdt->base + WDT_CTRL);
+ writel(count, wdt->base + WDT_RELOAD_VALUE);
+ writel(WDT_RESTART_MAGIC, wdt->base + WDT_RESTART);
+ writel(wdt->ctrl, wdt->base + WDT_CTRL);
+}
+
+static int aspeed_wdt_start(struct watchdog_device *wdd)
+{
+ struct aspeed_wdt *wdt = to_aspeed_wdt(wdd);
+
+ aspeed_wdt_enable(wdt, wdd->timeout * WDT_RATE_1MHZ);
+
+ return 0;
+}
+
+static int aspeed_wdt_stop(struct watchdog_device *wdd)
+{
+ struct aspeed_wdt *wdt = to_aspeed_wdt(wdd);
+
+ wdt->ctrl &= ~WDT_CTRL_ENABLE;
+ writel(wdt->ctrl, wdt->base + WDT_CTRL);
+
+ return 0;
+}
+
+static int aspeed_wdt_ping(struct watchdog_device *wdd)
+{
+ struct aspeed_wdt *wdt = to_aspeed_wdt(wdd);
+
+ writel(WDT_RESTART_MAGIC, wdt->base + WDT_RESTART);
+
+ return 0;
+}
+
+static int aspeed_wdt_set_timeout(struct watchdog_device *wdd,
+ unsigned int timeout)
+{
+ struct aspeed_wdt *wdt = to_aspeed_wdt(wdd);
+ u32 actual;
+
+ wdd->timeout = timeout;
+
+ actual = min(timeout, wdd->max_hw_heartbeat_ms * 1000);
+
+ writel(actual * WDT_RATE_1MHZ, wdt->base + WDT_RELOAD_VALUE);
+ writel(WDT_RESTART_MAGIC, wdt->base + WDT_RESTART);
+
+ return 0;
+}
+
+static int aspeed_wdt_restart(struct watchdog_device *wdd,
+ unsigned long action, void *data)
+{
+ struct aspeed_wdt *wdt = to_aspeed_wdt(wdd);
+
+ aspeed_wdt_enable(wdt, 128 * WDT_RATE_1MHZ / 1000);
+
+ mdelay(1000);
+
+ return 0;
+}
+
+static const struct watchdog_ops aspeed_wdt_ops = {
+ .start = aspeed_wdt_start,
+ .stop = aspeed_wdt_stop,
+ .ping = aspeed_wdt_ping,
+ .set_timeout = aspeed_wdt_set_timeout,
+ .restart = aspeed_wdt_restart,
+ .owner = THIS_MODULE,
+};
+
+static const struct watchdog_info aspeed_wdt_info = {
+ .options = WDIOF_KEEPALIVEPING
+ | WDIOF_MAGICCLOSE
+ | WDIOF_SETTIMEOUT,
+ .identity = KBUILD_MODNAME,
+};
+
+static int aspeed_wdt_remove(struct platform_device *pdev)
+{
+ struct aspeed_wdt *wdt = platform_get_drvdata(pdev);
+
+ watchdog_unregister_device(&wdt->wdd);
+
+ return 0;
+}
+
+static int aspeed_wdt_probe(struct platform_device *pdev)
+{
+ struct aspeed_wdt *wdt;
+ struct resource *res;
+ int ret;
+
+ wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ if (!wdt)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ wdt->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(wdt->base))
+ return PTR_ERR(wdt->base);
+
+ /*
+ * The ast2400 wdt can run at PCLK, or 1MHz. The ast2500 only
+ * runs at 1MHz. We chose to always run at 1MHz, as there's no
+ * good reason to have a faster watchdog counter.
+ */
+ wdt->wdd.info = &aspeed_wdt_info;
+ wdt->wdd.ops = &aspeed_wdt_ops;
+ wdt->wdd.max_hw_heartbeat_ms = WDT_MAX_TIMEOUT_MS;
+ wdt->wdd.parent = &pdev->dev;
+
+ wdt->wdd.timeout = WDT_DEFAULT_TIMEOUT;
+ watchdog_init_timeout(&wdt->wdd, 0, &pdev->dev);
+
+ /*
+ * Control reset on a per-device basis to ensure the
+ * host is not affected by a BMC reboot, so only reset
+ * the SOC and not the full chip
+ */
+ wdt->ctrl = WDT_CTRL_RESET_MODE_SOC |
+ WDT_CTRL_1MHZ_CLK |
+ WDT_CTRL_RESET_SYSTEM;
+
+ if (readl(wdt->base + WDT_CTRL) & WDT_CTRL_ENABLE) {
+ aspeed_wdt_start(&wdt->wdd);
+ set_bit(WDOG_HW_RUNNING, &wdt->wdd.status);
+ }
+
+ ret = watchdog_register_device(&wdt->wdd);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, wdt);
+
+ return 0;
+}
+
+static struct platform_driver aspeed_watchdog_driver = {
+ .probe = aspeed_wdt_probe,
+ .remove = aspeed_wdt_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = of_match_ptr(aspeed_wdt_of_table),
+ },
+};
+module_platform_driver(aspeed_watchdog_driver);
+
+MODULE_DESCRIPTION("Aspeed Watchdog Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c
index 2e6164c4a..4dddd8298 100644
--- a/drivers/watchdog/bcm2835_wdt.c
+++ b/drivers/watchdog/bcm2835_wdt.c
@@ -82,12 +82,6 @@ static int bcm2835_wdt_stop(struct watchdog_device *wdog)
return 0;
}
-static int bcm2835_wdt_set_timeout(struct watchdog_device *wdog, unsigned int t)
-{
- wdog->timeout = t;
- return 0;
-}
-
static unsigned int bcm2835_wdt_get_timeleft(struct watchdog_device *wdog)
{
struct bcm2835_wdt *wdt = watchdog_get_drvdata(wdog);
@@ -96,15 +90,14 @@ static unsigned int bcm2835_wdt_get_timeleft(struct watchdog_device *wdog)
return WDOG_TICKS_TO_SECS(ret & PM_WDOG_TIME_SET);
}
-static struct watchdog_ops bcm2835_wdt_ops = {
+static const struct watchdog_ops bcm2835_wdt_ops = {
.owner = THIS_MODULE,
.start = bcm2835_wdt_start,
.stop = bcm2835_wdt_stop,
- .set_timeout = bcm2835_wdt_set_timeout,
.get_timeleft = bcm2835_wdt_get_timeleft,
};
-static struct watchdog_info bcm2835_wdt_info = {
+static const struct watchdog_info bcm2835_wdt_info = {
.options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE |
WDIOF_KEEPALIVEPING,
.identity = "Broadcom BCM2835 Watchdog timer",
diff --git a/drivers/watchdog/da9063_wdt.c b/drivers/watchdog/da9063_wdt.c
index a100f6488..5d6b4e5f7 100644
--- a/drivers/watchdog/da9063_wdt.c
+++ b/drivers/watchdog/da9063_wdt.c
@@ -34,6 +34,7 @@ static const unsigned int wdt_timeout[] = { 0, 2, 4, 8, 16, 32, 65, 131 };
#define DA9063_WDT_MIN_TIMEOUT wdt_timeout[DA9063_TWDSCALE_MIN]
#define DA9063_WDT_MAX_TIMEOUT wdt_timeout[DA9063_TWDSCALE_MAX]
#define DA9063_WDG_TIMEOUT wdt_timeout[3]
+#define DA9063_RESET_PROTECTION_MS 256
struct da9063_watchdog {
struct da9063 *da9063;
@@ -171,6 +172,7 @@ static int da9063_wdt_probe(struct platform_device *pdev)
wdt->wdtdev.ops = &da9063_watchdog_ops;
wdt->wdtdev.min_timeout = DA9063_WDT_MIN_TIMEOUT;
wdt->wdtdev.max_timeout = DA9063_WDT_MAX_TIMEOUT;
+ wdt->wdtdev.min_hw_heartbeat_ms = DA9063_RESET_PROTECTION_MS;
wdt->wdtdev.timeout = DA9063_WDG_TIMEOUT;
wdt->wdtdev.parent = &pdev->dev;
diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
index d4ba262da..1b7e91690 100644
--- a/drivers/watchdog/f71808e_wdt.c
+++ b/drivers/watchdog/f71808e_wdt.c
@@ -45,9 +45,11 @@
#define SIO_REG_DEVREV 0x22 /* Device revision */
#define SIO_REG_MANID 0x23 /* Fintek ID (2 bytes) */
#define SIO_REG_ROM_ADDR_SEL 0x27 /* ROM address select */
+#define SIO_F81866_REG_PORT_SEL 0x27 /* F81866 Multi-Function Register */
#define SIO_REG_MFUNCT1 0x29 /* Multi function select 1 */
#define SIO_REG_MFUNCT2 0x2a /* Multi function select 2 */
#define SIO_REG_MFUNCT3 0x2b /* Multi function select 3 */
+#define SIO_F81866_REG_GPIO1 0x2c /* F81866 GPIO1 Enable Register */
#define SIO_REG_ENABLE 0x30 /* Logical device enable */
#define SIO_REG_ADDR 0x60 /* Logical device address (2 bytes) */
@@ -60,6 +62,7 @@
#define SIO_F71882_ID 0x0541 /* Chipset ID */
#define SIO_F71889_ID 0x0723 /* Chipset ID */
#define SIO_F81865_ID 0x0704 /* Chipset ID */
+#define SIO_F81866_ID 0x1010 /* Chipset ID */
#define F71808FG_REG_WDO_CONF 0xf0
#define F71808FG_REG_WDT_CONF 0xf5
@@ -116,7 +119,8 @@ module_param(start_withtimeout, uint, 0);
MODULE_PARM_DESC(start_withtimeout, "Start watchdog timer on module load with"
" given initial timeout. Zero (default) disables this feature.");
-enum chips { f71808fg, f71858fg, f71862fg, f71869, f71882fg, f71889fg, f81865 };
+enum chips { f71808fg, f71858fg, f71862fg, f71869, f71882fg, f71889fg, f81865,
+ f81866};
static const char *f71808e_names[] = {
"f71808fg",
@@ -126,6 +130,7 @@ static const char *f71808e_names[] = {
"f71882fg",
"f71889fg",
"f81865",
+ "f81866",
};
/* Super-I/O Function prototypes */
@@ -370,6 +375,22 @@ static int watchdog_start(void)
superio_clear_bit(watchdog.sioaddr, SIO_REG_MFUNCT3, 5);
break;
+ case f81866:
+ /* Set pin 70 to WDTRST# */
+ superio_clear_bit(watchdog.sioaddr, SIO_F81866_REG_PORT_SEL,
+ BIT(3) | BIT(0));
+ superio_set_bit(watchdog.sioaddr, SIO_F81866_REG_PORT_SEL,
+ BIT(2));
+ /*
+ * GPIO1 Control Register when 27h BIT3:2 = 01 & BIT0 = 0.
+ * The PIN 70(GPIO15/WDTRST) is controlled by 2Ch:
+ * BIT5: 0 -> WDTRST#
+ * 1 -> GPIO15
+ */
+ superio_clear_bit(watchdog.sioaddr, SIO_F81866_REG_GPIO1,
+ BIT(5));
+ break;
+
default:
/*
* 'default' label to shut up the compiler and catch
@@ -382,7 +403,7 @@ static int watchdog_start(void)
superio_select(watchdog.sioaddr, SIO_F71808FG_LD_WDT);
superio_set_bit(watchdog.sioaddr, SIO_REG_ENABLE, 0);
- if (watchdog.type == f81865)
+ if (watchdog.type == f81865 || watchdog.type == f81866)
superio_set_bit(watchdog.sioaddr, F81865_REG_WDO_CONF,
F81865_FLAG_WDOUT_EN);
else
@@ -788,6 +809,9 @@ static int __init f71808e_find(int sioaddr)
case SIO_F81865_ID:
watchdog.type = f81865;
break;
+ case SIO_F81866_ID:
+ watchdog.type = f81866;
+ break;
default:
pr_info("Unrecognized Fintek device: %04x\n",
(unsigned int)devid);
diff --git a/drivers/watchdog/gpio_wdt.c b/drivers/watchdog/gpio_wdt.c
index ba066e4a7..93457cabc 100644
--- a/drivers/watchdog/gpio_wdt.c
+++ b/drivers/watchdog/gpio_wdt.c
@@ -151,6 +151,8 @@ static int gpio_wdt_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
+ platform_set_drvdata(pdev, priv);
+
priv->gpio = of_get_gpio_flags(pdev->dev.of_node, 0, &flags);
if (!gpio_is_valid(priv->gpio))
return priv->gpio;
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 0acc6c5f7..54cab189a 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -150,6 +150,7 @@ static inline u32 no_reboot_bit(void)
u32 enable_bit;
switch (iTCO_wdt_private.iTCO_version) {
+ case 5:
case 3:
enable_bit = 0x00000010;
break;
@@ -512,6 +513,7 @@ static int iTCO_wdt_probe(struct platform_device *dev)
/* Clear out the (probably old) status */
switch (iTCO_wdt_private.iTCO_version) {
+ case 5:
case 4:
outw(0x0008, TCO1_STS); /* Clear the Time Out Status bit */
outw(0x0002, TCO2_STS); /* Clear SECOND_TO_STS bit */
diff --git a/drivers/watchdog/max77620_wdt.c b/drivers/watchdog/max77620_wdt.c
new file mode 100644
index 000000000..48b84df2a
--- /dev/null
+++ b/drivers/watchdog/max77620_wdt.c
@@ -0,0 +1,227 @@
+/*
+ * Maxim MAX77620 Watchdog Driver
+ *
+ * Copyright (C) 2016 NVIDIA CORPORATION. All rights reserved.
+ *
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mfd/max77620.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/watchdog.h>
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+
+struct max77620_wdt {
+ struct device *dev;
+ struct regmap *rmap;
+ struct watchdog_device wdt_dev;
+};
+
+static int max77620_wdt_start(struct watchdog_device *wdt_dev)
+{
+ struct max77620_wdt *wdt = watchdog_get_drvdata(wdt_dev);
+
+ return regmap_update_bits(wdt->rmap, MAX77620_REG_CNFGGLBL2,
+ MAX77620_WDTEN, MAX77620_WDTEN);
+}
+
+static int max77620_wdt_stop(struct watchdog_device *wdt_dev)
+{
+ struct max77620_wdt *wdt = watchdog_get_drvdata(wdt_dev);
+
+ return regmap_update_bits(wdt->rmap, MAX77620_REG_CNFGGLBL2,
+ MAX77620_WDTEN, 0);
+}
+
+static int max77620_wdt_ping(struct watchdog_device *wdt_dev)
+{
+ struct max77620_wdt *wdt = watchdog_get_drvdata(wdt_dev);
+
+ return regmap_update_bits(wdt->rmap, MAX77620_REG_CNFGGLBL3,
+ MAX77620_WDTC_MASK, 0x1);
+}
+
+static int max77620_wdt_set_timeout(struct watchdog_device *wdt_dev,
+ unsigned int timeout)
+{
+ struct max77620_wdt *wdt = watchdog_get_drvdata(wdt_dev);
+ unsigned int wdt_timeout;
+ u8 regval;
+ int ret;
+
+ switch (timeout) {
+ case 0 ... 2:
+ regval = MAX77620_TWD_2s;
+ wdt_timeout = 2;
+ break;
+
+ case 3 ... 16:
+ regval = MAX77620_TWD_16s;
+ wdt_timeout = 16;
+ break;
+
+ case 17 ... 64:
+ regval = MAX77620_TWD_64s;
+ wdt_timeout = 64;
+ break;
+
+ default:
+ regval = MAX77620_TWD_128s;
+ wdt_timeout = 128;
+ break;
+ }
+
+ ret = regmap_update_bits(wdt->rmap, MAX77620_REG_CNFGGLBL3,
+ MAX77620_WDTC_MASK, 0x1);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(wdt->rmap, MAX77620_REG_CNFGGLBL2,
+ MAX77620_TWD_MASK, regval);
+ if (ret < 0)
+ return ret;
+
+ wdt_dev->timeout = wdt_timeout;
+
+ return 0;
+}
+
+static const struct watchdog_info max77620_wdt_info = {
+ .identity = "max77620-watchdog",
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
+};
+
+static const struct watchdog_ops max77620_wdt_ops = {
+ .start = max77620_wdt_start,
+ .stop = max77620_wdt_stop,
+ .ping = max77620_wdt_ping,
+ .set_timeout = max77620_wdt_set_timeout,
+};
+
+static int max77620_wdt_probe(struct platform_device *pdev)
+{
+ struct max77620_wdt *wdt;
+ struct watchdog_device *wdt_dev;
+ unsigned int regval;
+ int ret;
+
+ wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ if (!wdt)
+ return -ENOMEM;
+
+ wdt->dev = &pdev->dev;
+ wdt->rmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!wdt->rmap) {
+ dev_err(wdt->dev, "Failed to get parent regmap\n");
+ return -ENODEV;
+ }
+
+ wdt_dev = &wdt->wdt_dev;
+ wdt_dev->info = &max77620_wdt_info;
+ wdt_dev->ops = &max77620_wdt_ops;
+ wdt_dev->min_timeout = 2;
+ wdt_dev->max_timeout = 128;
+ wdt_dev->max_hw_heartbeat_ms = 128 * 1000;
+
+ platform_set_drvdata(pdev, wdt);
+
+ /* Enable WD_RST_WK - WDT expire results in a restart */
+ ret = regmap_update_bits(wdt->rmap, MAX77620_REG_ONOFFCNFG2,
+ MAX77620_ONOFFCNFG2_WD_RST_WK,
+ MAX77620_ONOFFCNFG2_WD_RST_WK);
+ if (ret < 0) {
+ dev_err(wdt->dev, "Failed to set WD_RST_WK: %d\n", ret);
+ return ret;
+ }
+
+ /* Set WDT clear in OFF and sleep mode */
+ ret = regmap_update_bits(wdt->rmap, MAX77620_REG_CNFGGLBL2,
+ MAX77620_WDTOFFC | MAX77620_WDTSLPC,
+ MAX77620_WDTOFFC | MAX77620_WDTSLPC);
+ if (ret < 0) {
+ dev_err(wdt->dev, "Failed to set WDT OFF mode: %d\n", ret);
+ return ret;
+ }
+
+ /* Check if WDT running and if yes then set flags properly */
+ ret = regmap_read(wdt->rmap, MAX77620_REG_CNFGGLBL2, &regval);
+ if (ret < 0) {
+ dev_err(wdt->dev, "Failed to read WDT CFG register: %d\n", ret);
+ return ret;
+ }
+
+ switch (regval & MAX77620_TWD_MASK) {
+ case MAX77620_TWD_2s:
+ wdt_dev->timeout = 2;
+ break;
+ case MAX77620_TWD_16s:
+ wdt_dev->timeout = 16;
+ break;
+ case MAX77620_TWD_64s:
+ wdt_dev->timeout = 64;
+ break;
+ default:
+ wdt_dev->timeout = 128;
+ break;
+ }
+
+ if (regval & MAX77620_WDTEN)
+ set_bit(WDOG_HW_RUNNING, &wdt_dev->status);
+
+ watchdog_set_nowayout(wdt_dev, nowayout);
+ watchdog_set_drvdata(wdt_dev, wdt);
+
+ ret = watchdog_register_device(wdt_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "watchdog registration failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int max77620_wdt_remove(struct platform_device *pdev)
+{
+ struct max77620_wdt *wdt = platform_get_drvdata(pdev);
+
+ max77620_wdt_stop(&wdt->wdt_dev);
+ watchdog_unregister_device(&wdt->wdt_dev);
+
+ return 0;
+}
+
+static struct platform_device_id max77620_wdt_devtype[] = {
+ { .name = "max77620-watchdog", },
+ { },
+};
+
+static struct platform_driver max77620_wdt_driver = {
+ .driver = {
+ .name = "max77620-watchdog",
+ },
+ .probe = max77620_wdt_probe,
+ .remove = max77620_wdt_remove,
+ .id_table = max77620_wdt_devtype,
+};
+
+module_platform_driver(max77620_wdt_driver);
+
+MODULE_DESCRIPTION("Max77620 watchdog timer driver");
+
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
+ "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/meson_gxbb_wdt.c b/drivers/watchdog/meson_gxbb_wdt.c
new file mode 100644
index 000000000..44d180a2c
--- /dev/null
+++ b/drivers/watchdog/meson_gxbb_wdt.c
@@ -0,0 +1,270 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/watchdog.h>
+
+#define DEFAULT_TIMEOUT 30 /* seconds */
+
+#define GXBB_WDT_CTRL_REG 0x0
+#define GXBB_WDT_TCNT_REG 0x8
+#define GXBB_WDT_RSET_REG 0xc
+
+#define GXBB_WDT_CTRL_CLKDIV_EN BIT(25)
+#define GXBB_WDT_CTRL_CLK_EN BIT(24)
+#define GXBB_WDT_CTRL_EE_RESET BIT(21)
+#define GXBB_WDT_CTRL_EN BIT(18)
+#define GXBB_WDT_CTRL_DIV_MASK (BIT(18) - 1)
+
+#define GXBB_WDT_TCNT_SETUP_MASK (BIT(16) - 1)
+#define GXBB_WDT_TCNT_CNT_SHIFT 16
+
+struct meson_gxbb_wdt {
+ void __iomem *reg_base;
+ struct watchdog_device wdt_dev;
+ struct clk *clk;
+};
+
+static int meson_gxbb_wdt_start(struct watchdog_device *wdt_dev)
+{
+ struct meson_gxbb_wdt *data = watchdog_get_drvdata(wdt_dev);
+
+ writel(readl(data->reg_base + GXBB_WDT_CTRL_REG) | GXBB_WDT_CTRL_EN,
+ data->reg_base + GXBB_WDT_CTRL_REG);
+
+ return 0;
+}
+
+static int meson_gxbb_wdt_stop(struct watchdog_device *wdt_dev)
+{
+ struct meson_gxbb_wdt *data = watchdog_get_drvdata(wdt_dev);
+
+ writel(readl(data->reg_base + GXBB_WDT_CTRL_REG) & ~GXBB_WDT_CTRL_EN,
+ data->reg_base + GXBB_WDT_CTRL_REG);
+
+ return 0;
+}
+
+static int meson_gxbb_wdt_ping(struct watchdog_device *wdt_dev)
+{
+ struct meson_gxbb_wdt *data = watchdog_get_drvdata(wdt_dev);
+
+ writel(0, data->reg_base + GXBB_WDT_RSET_REG);
+
+ return 0;
+}
+
+static int meson_gxbb_wdt_set_timeout(struct watchdog_device *wdt_dev,
+ unsigned int timeout)
+{
+ struct meson_gxbb_wdt *data = watchdog_get_drvdata(wdt_dev);
+ unsigned long tcnt = timeout * 1000;
+
+ if (tcnt > GXBB_WDT_TCNT_SETUP_MASK)
+ tcnt = GXBB_WDT_TCNT_SETUP_MASK;
+
+ wdt_dev->timeout = timeout;
+
+ meson_gxbb_wdt_ping(wdt_dev);
+
+ writel(tcnt, data->reg_base + GXBB_WDT_TCNT_REG);
+
+ return 0;
+}
+
+static unsigned int meson_gxbb_wdt_get_timeleft(struct watchdog_device *wdt_dev)
+{
+ struct meson_gxbb_wdt *data = watchdog_get_drvdata(wdt_dev);
+ unsigned long reg;
+
+ reg = readl(data->reg_base + GXBB_WDT_TCNT_REG);
+
+ return ((reg >> GXBB_WDT_TCNT_CNT_SHIFT) -
+ (reg & GXBB_WDT_TCNT_SETUP_MASK)) / 1000;
+}
+
+static const struct watchdog_ops meson_gxbb_wdt_ops = {
+ .start = meson_gxbb_wdt_start,
+ .stop = meson_gxbb_wdt_stop,
+ .ping = meson_gxbb_wdt_ping,
+ .set_timeout = meson_gxbb_wdt_set_timeout,
+ .get_timeleft = meson_gxbb_wdt_get_timeleft,
+};
+
+static const struct watchdog_info meson_gxbb_wdt_info = {
+ .identity = "Meson GXBB Watchdog",
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
+};
+
+static int __maybe_unused meson_gxbb_wdt_resume(struct device *dev)
+{
+ struct meson_gxbb_wdt *data = dev_get_drvdata(dev);
+
+ if (watchdog_active(&data->wdt_dev))
+ meson_gxbb_wdt_start(&data->wdt_dev);
+
+ return 0;
+}
+
+static int __maybe_unused meson_gxbb_wdt_suspend(struct device *dev)
+{
+ struct meson_gxbb_wdt *data = dev_get_drvdata(dev);
+
+ if (watchdog_active(&data->wdt_dev))
+ meson_gxbb_wdt_stop(&data->wdt_dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops meson_gxbb_wdt_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(meson_gxbb_wdt_suspend, meson_gxbb_wdt_resume)
+};
+
+static const struct of_device_id meson_gxbb_wdt_dt_ids[] = {
+ { .compatible = "amlogic,meson-gxbb-wdt", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, meson_gxbb_wdt_dt_ids);
+
+static int meson_gxbb_wdt_probe(struct platform_device *pdev)
+{
+ struct meson_gxbb_wdt *data;
+ struct resource *res;
+ int ret;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(data->reg_base))
+ return PTR_ERR(data->reg_base);
+
+ data->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(data->clk))
+ return PTR_ERR(data->clk);
+
+ clk_prepare_enable(data->clk);
+
+ platform_set_drvdata(pdev, data);
+
+ data->wdt_dev.parent = &pdev->dev;
+ data->wdt_dev.info = &meson_gxbb_wdt_info;
+ data->wdt_dev.ops = &meson_gxbb_wdt_ops;
+ data->wdt_dev.max_hw_heartbeat_ms = GXBB_WDT_TCNT_SETUP_MASK;
+ data->wdt_dev.min_timeout = 1;
+ data->wdt_dev.timeout = DEFAULT_TIMEOUT;
+ watchdog_set_drvdata(&data->wdt_dev, data);
+
+ /* Setup with 1ms timebase */
+ writel(((clk_get_rate(data->clk) / 1000) & GXBB_WDT_CTRL_DIV_MASK) |
+ GXBB_WDT_CTRL_EE_RESET |
+ GXBB_WDT_CTRL_CLK_EN |
+ GXBB_WDT_CTRL_CLKDIV_EN,
+ data->reg_base + GXBB_WDT_CTRL_REG);
+
+ meson_gxbb_wdt_set_timeout(&data->wdt_dev, data->wdt_dev.timeout);
+
+ ret = watchdog_register_device(&data->wdt_dev);
+ if (ret) {
+ clk_disable_unprepare(data->clk);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int meson_gxbb_wdt_remove(struct platform_device *pdev)
+{
+ struct meson_gxbb_wdt *data = platform_get_drvdata(pdev);
+
+ watchdog_unregister_device(&data->wdt_dev);
+
+ clk_disable_unprepare(data->clk);
+
+ return 0;
+}
+
+static void meson_gxbb_wdt_shutdown(struct platform_device *pdev)
+{
+ struct meson_gxbb_wdt *data = platform_get_drvdata(pdev);
+
+ meson_gxbb_wdt_stop(&data->wdt_dev);
+}
+
+static struct platform_driver meson_gxbb_wdt_driver = {
+ .probe = meson_gxbb_wdt_probe,
+ .remove = meson_gxbb_wdt_remove,
+ .shutdown = meson_gxbb_wdt_shutdown,
+ .driver = {
+ .name = "meson-gxbb-wdt",
+ .pm = &meson_gxbb_wdt_pm_ops,
+ .of_match_table = meson_gxbb_wdt_dt_ids,
+ },
+};
+
+module_platform_driver(meson_gxbb_wdt_driver);
+
+MODULE_ALIAS("platform:meson-gxbb-wdt");
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_DESCRIPTION("Amlogic Meson GXBB Watchdog timer driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/watchdog/nv_tco.c b/drivers/watchdog/nv_tco.c
index bd917bb75..a0fabf6f9 100644
--- a/drivers/watchdog/nv_tco.c
+++ b/drivers/watchdog/nv_tco.c
@@ -294,6 +294,8 @@ static const struct pci_device_id tco_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS,
PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP78S_SMBUS,
+ PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP79_SMBUS,
PCI_ANY_ID, PCI_ANY_ID, },
{ 0, }, /* End of list */
diff --git a/drivers/watchdog/pcwd.c b/drivers/watchdog/pcwd.c
index e936f15dc..3ad5206d7 100644
--- a/drivers/watchdog/pcwd.c
+++ b/drivers/watchdog/pcwd.c
@@ -992,19 +992,7 @@ static struct isa_driver pcwd_isa_driver = {
},
};
-static int __init pcwd_init_module(void)
-{
- return isa_register_driver(&pcwd_isa_driver, PCWD_ISA_NR_CARDS);
-}
-
-static void __exit pcwd_cleanup_module(void)
-{
- isa_unregister_driver(&pcwd_isa_driver);
- pr_info("Watchdog Module Unloaded\n");
-}
-
-module_init(pcwd_init_module);
-module_exit(pcwd_cleanup_module);
+module_isa_driver(pcwd_isa_driver, PCWD_ISA_NR_CARDS);
MODULE_AUTHOR("Ken Hollis <kenji@bitgate.com>, "
"Wim Van Sebroeck <wim@iguana.be>");
diff --git a/drivers/watchdog/pic32-dmt.c b/drivers/watchdog/pic32-dmt.c
index 962f58c03..c797305f8 100644
--- a/drivers/watchdog/pic32-dmt.c
+++ b/drivers/watchdog/pic32-dmt.c
@@ -176,8 +176,8 @@ static int pic32_dmt_probe(struct platform_device *pdev)
struct watchdog_device *wdd = &pic32_dmt_wdd;
dmt = devm_kzalloc(&pdev->dev, sizeof(*dmt), GFP_KERNEL);
- if (IS_ERR(dmt))
- return PTR_ERR(dmt);
+ if (!dmt)
+ return -ENOMEM;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dmt->regs = devm_ioremap_resource(&pdev->dev, mem);
@@ -245,7 +245,6 @@ static struct platform_driver pic32_dmt_driver = {
.remove = pic32_dmt_remove,
.driver = {
.name = "pic32-dmt",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(pic32_dmt_of_ids),
}
};
diff --git a/drivers/watchdog/pic32-wdt.c b/drivers/watchdog/pic32-wdt.c
index 6047aa89a..e2761068d 100644
--- a/drivers/watchdog/pic32-wdt.c
+++ b/drivers/watchdog/pic32-wdt.c
@@ -174,8 +174,8 @@ static int pic32_wdt_drv_probe(struct platform_device *pdev)
struct resource *mem;
wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
- if (IS_ERR(wdt))
- return PTR_ERR(wdt);
+ if (!wdt)
+ return -ENOMEM;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
wdt->regs = devm_ioremap_resource(&pdev->dev, mem);
@@ -183,8 +183,8 @@ static int pic32_wdt_drv_probe(struct platform_device *pdev)
return PTR_ERR(wdt->regs);
wdt->rst_base = devm_ioremap(&pdev->dev, PIC32_BASE_RESET, 0x10);
- if (IS_ERR(wdt->rst_base))
- return PTR_ERR(wdt->rst_base);
+ if (!wdt->rst_base)
+ return -ENOMEM;
wdt->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(wdt->clk)) {
@@ -251,7 +251,6 @@ static struct platform_driver pic32_wdt_driver = {
.remove = pic32_wdt_drv_remove,
.driver = {
.name = "pic32-wdt",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(pic32_wdt_dt_ids),
}
};
diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
index a043fa4f6..5796b5d1b 100644
--- a/drivers/watchdog/qcom-wdt.c
+++ b/drivers/watchdog/qcom-wdt.c
@@ -18,19 +18,45 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/watchdog.h>
+#include <linux/of_device.h>
+
+enum wdt_reg {
+ WDT_RST,
+ WDT_EN,
+ WDT_STS,
+ WDT_BARK_TIME,
+ WDT_BITE_TIME,
+};
-#define WDT_RST 0x38
-#define WDT_EN 0x40
-#define WDT_STS 0x44
-#define WDT_BITE_TIME 0x5C
+static const u32 reg_offset_data_apcs_tmr[] = {
+ [WDT_RST] = 0x38,
+ [WDT_EN] = 0x40,
+ [WDT_STS] = 0x44,
+ [WDT_BARK_TIME] = 0x4C,
+ [WDT_BITE_TIME] = 0x5C,
+};
+
+static const u32 reg_offset_data_kpss[] = {
+ [WDT_RST] = 0x4,
+ [WDT_EN] = 0x8,
+ [WDT_STS] = 0xC,
+ [WDT_BARK_TIME] = 0x10,
+ [WDT_BITE_TIME] = 0x14,
+};
struct qcom_wdt {
struct watchdog_device wdd;
struct clk *clk;
unsigned long rate;
void __iomem *base;
+ const u32 *layout;
};
+static void __iomem *wdt_addr(struct qcom_wdt *wdt, enum wdt_reg reg)
+{
+ return wdt->base + wdt->layout[reg];
+}
+
static inline
struct qcom_wdt *to_qcom_wdt(struct watchdog_device *wdd)
{
@@ -41,10 +67,11 @@ static int qcom_wdt_start(struct watchdog_device *wdd)
{
struct qcom_wdt *wdt = to_qcom_wdt(wdd);
- writel(0, wdt->base + WDT_EN);
- writel(1, wdt->base + WDT_RST);
- writel(wdd->timeout * wdt->rate, wdt->base + WDT_BITE_TIME);
- writel(1, wdt->base + WDT_EN);
+ writel(0, wdt_addr(wdt, WDT_EN));
+ writel(1, wdt_addr(wdt, WDT_RST));
+ writel(wdd->timeout * wdt->rate, wdt_addr(wdt, WDT_BARK_TIME));
+ writel(wdd->timeout * wdt->rate, wdt_addr(wdt, WDT_BITE_TIME));
+ writel(1, wdt_addr(wdt, WDT_EN));
return 0;
}
@@ -52,7 +79,7 @@ static int qcom_wdt_stop(struct watchdog_device *wdd)
{
struct qcom_wdt *wdt = to_qcom_wdt(wdd);
- writel(0, wdt->base + WDT_EN);
+ writel(0, wdt_addr(wdt, WDT_EN));
return 0;
}
@@ -60,7 +87,7 @@ static int qcom_wdt_ping(struct watchdog_device *wdd)
{
struct qcom_wdt *wdt = to_qcom_wdt(wdd);
- writel(1, wdt->base + WDT_RST);
+ writel(1, wdt_addr(wdt, WDT_RST));
return 0;
}
@@ -83,10 +110,11 @@ static int qcom_wdt_restart(struct watchdog_device *wdd, unsigned long action,
*/
timeout = 128 * wdt->rate / 1000;
- writel(0, wdt->base + WDT_EN);
- writel(1, wdt->base + WDT_RST);
- writel(timeout, wdt->base + WDT_BITE_TIME);
- writel(1, wdt->base + WDT_EN);
+ writel(0, wdt_addr(wdt, WDT_EN));
+ writel(1, wdt_addr(wdt, WDT_RST));
+ writel(timeout, wdt_addr(wdt, WDT_BARK_TIME));
+ writel(timeout, wdt_addr(wdt, WDT_BITE_TIME));
+ writel(1, wdt_addr(wdt, WDT_EN));
/*
* Actually make sure the above sequence hits hardware before sleeping.
@@ -119,9 +147,16 @@ static int qcom_wdt_probe(struct platform_device *pdev)
struct qcom_wdt *wdt;
struct resource *res;
struct device_node *np = pdev->dev.of_node;
+ const u32 *regs;
u32 percpu_offset;
int ret;
+ regs = of_device_get_match_data(&pdev->dev);
+ if (!regs) {
+ dev_err(&pdev->dev, "Unsupported QCOM WDT module\n");
+ return -ENODEV;
+ }
+
wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
return -ENOMEM;
@@ -172,6 +207,7 @@ static int qcom_wdt_probe(struct platform_device *pdev)
wdt->wdd.min_timeout = 1;
wdt->wdd.max_timeout = 0x10000000U / wdt->rate;
wdt->wdd.parent = &pdev->dev;
+ wdt->layout = regs;
if (readl(wdt->base + WDT_STS) & 1)
wdt->wdd.bootstatus = WDIOF_CARDRESET;
@@ -208,8 +244,9 @@ static int qcom_wdt_remove(struct platform_device *pdev)
}
static const struct of_device_id qcom_wdt_of_table[] = {
- { .compatible = "qcom,kpss-timer" },
- { .compatible = "qcom,scss-timer" },
+ { .compatible = "qcom,kpss-timer", .data = reg_offset_data_apcs_tmr },
+ { .compatible = "qcom,scss-timer", .data = reg_offset_data_apcs_tmr },
+ { .compatible = "qcom,kpss-wdt", .data = reg_offset_data_kpss },
{ },
};
MODULE_DEVICE_TABLE(of, qcom_wdt_of_table);
diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c
index ad383f6f1..ce0c38bd0 100644
--- a/drivers/watchdog/sbsa_gwdt.c
+++ b/drivers/watchdog/sbsa_gwdt.c
@@ -180,15 +180,6 @@ static int sbsa_gwdt_keepalive(struct watchdog_device *wdd)
return 0;
}
-static unsigned int sbsa_gwdt_status(struct watchdog_device *wdd)
-{
- struct sbsa_gwdt *gwdt = watchdog_get_drvdata(wdd);
- u32 status = readl(gwdt->control_base + SBSA_GWDT_WCS);
-
- /* is the watchdog timer running? */
- return (status & SBSA_GWDT_WCS_EN) << WDOG_ACTIVE;
-}
-
static int sbsa_gwdt_start(struct watchdog_device *wdd)
{
struct sbsa_gwdt *gwdt = watchdog_get_drvdata(wdd);
@@ -228,7 +219,6 @@ static struct watchdog_ops sbsa_gwdt_ops = {
.owner = THIS_MODULE,
.start = sbsa_gwdt_start,
.stop = sbsa_gwdt_stop,
- .status = sbsa_gwdt_status,
.ping = sbsa_gwdt_keepalive,
.set_timeout = sbsa_gwdt_set_timeout,
.get_timeleft = sbsa_gwdt_get_timeleft,
@@ -273,7 +263,7 @@ static int sbsa_gwdt_probe(struct platform_device *pdev)
wdd->info = &sbsa_gwdt_info;
wdd->ops = &sbsa_gwdt_ops;
wdd->min_timeout = 1;
- wdd->max_timeout = U32_MAX / gwdt->clk;
+ wdd->max_hw_heartbeat_ms = U32_MAX / gwdt->clk * 1000;
wdd->timeout = DEFAULT_TIMEOUT;
watchdog_set_drvdata(wdd, gwdt);
watchdog_set_nowayout(wdd, nowayout);
@@ -283,6 +273,8 @@ static int sbsa_gwdt_probe(struct platform_device *pdev)
dev_warn(dev, "System reset by WDT.\n");
wdd->bootstatus |= WDIOF_CARDRESET;
}
+ if (status & SBSA_GWDT_WCS_EN)
+ set_bit(WDOG_HW_RUNNING, &wdd->status);
if (action) {
irq = platform_get_irq(pdev, 0);
@@ -310,7 +302,7 @@ static int sbsa_gwdt_probe(struct platform_device *pdev)
* the timeout is (WOR * 2), so the maximum timeout should be doubled.
*/
if (!action)
- wdd->max_timeout *= 2;
+ wdd->max_hw_heartbeat_ms *= 2;
watchdog_init_timeout(wdd, timeout, dev);
/*
diff --git a/drivers/watchdog/sirfsoc_wdt.c b/drivers/watchdog/sirfsoc_wdt.c
index d0578ab2e..3050a0031 100644
--- a/drivers/watchdog/sirfsoc_wdt.c
+++ b/drivers/watchdog/sirfsoc_wdt.c
@@ -39,13 +39,18 @@ MODULE_PARM_DESC(timeout, "Default watchdog timeout (in seconds)");
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+static void __iomem *sirfsoc_wdt_base(struct watchdog_device *wdd)
+{
+ return (void __iomem __force *)watchdog_get_drvdata(wdd);
+}
+
static unsigned int sirfsoc_wdt_gettimeleft(struct watchdog_device *wdd)
{
u32 counter, match;
void __iomem *wdt_base;
int time_left;
- wdt_base = watchdog_get_drvdata(wdd);
+ wdt_base = sirfsoc_wdt_base(wdd);
counter = readl(wdt_base + SIRFSOC_TIMER_COUNTER_LO);
match = readl(wdt_base +
SIRFSOC_TIMER_MATCH_0 + (SIRFSOC_TIMER_WDT_INDEX << 2));
@@ -61,7 +66,7 @@ static int sirfsoc_wdt_updatetimeout(struct watchdog_device *wdd)
void __iomem *wdt_base;
timeout_ticks = wdd->timeout * CLOCK_FREQ;
- wdt_base = watchdog_get_drvdata(wdd);
+ wdt_base = sirfsoc_wdt_base(wdd);
/* Enable the latch before reading the LATCH_LO register */
writel(1, wdt_base + SIRFSOC_TIMER_LATCH);
@@ -79,7 +84,7 @@ static int sirfsoc_wdt_updatetimeout(struct watchdog_device *wdd)
static int sirfsoc_wdt_enable(struct watchdog_device *wdd)
{
- void __iomem *wdt_base = watchdog_get_drvdata(wdd);
+ void __iomem *wdt_base = sirfsoc_wdt_base(wdd);
sirfsoc_wdt_updatetimeout(wdd);
/*
@@ -96,7 +101,7 @@ static int sirfsoc_wdt_enable(struct watchdog_device *wdd)
static int sirfsoc_wdt_disable(struct watchdog_device *wdd)
{
- void __iomem *wdt_base = watchdog_get_drvdata(wdd);
+ void __iomem *wdt_base = sirfsoc_wdt_base(wdd);
writel(0, wdt_base + SIRFSOC_TIMER_WATCHDOG_EN);
writel(readl(wdt_base + SIRFSOC_TIMER_INT_EN)
@@ -150,7 +155,7 @@ static int sirfsoc_wdt_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
- watchdog_set_drvdata(&sirfsoc_wdd, base);
+ watchdog_set_drvdata(&sirfsoc_wdd, (__force void *)base);
watchdog_init_timeout(&sirfsoc_wdd, timeout, &pdev->dev);
watchdog_set_nowayout(&sirfsoc_wdd, nowayout);
diff --git a/drivers/watchdog/softdog.c b/drivers/watchdog/softdog.c
index 99a06f9e3..b067edf24 100644
--- a/drivers/watchdog/softdog.c
+++ b/drivers/watchdog/softdog.c
@@ -17,36 +17,19 @@
*
* Software only watchdog driver. Unlike its big brother the WDT501P
* driver this won't always recover a failed machine.
- *
- * 03/96: Angelo Haritsis <ah@doc.ic.ac.uk> :
- * Modularised.
- * Added soft_margin; use upon insmod to change the timer delay.
- * NB: uses same minor as wdt (WATCHDOG_MINOR); we could use separate
- * minors.
- *
- * 19980911 Alan Cox
- * Made SMP safe for 2.3.x
- *
- * 20011127 Joel Becker (jlbec@evilplan.org>
- * Added soft_noboot; Allows testing the softdog trigger without
- * requiring a recompile.
- * Added WDIOC_GETTIMEOUT and WDIOC_SETTIMOUT.
- *
- * 20020530 Joel Becker <joel.becker@oracle.com>
- * Added Matt Domsch's nowayout module option.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/types.h>
+#include <linux/reboot.h>
#include <linux/timer.h>
+#include <linux/types.h>
#include <linux/watchdog.h>
-#include <linux/reboot.h>
-#include <linux/init.h>
-#include <linux/jiffies.h>
-#include <linux/kernel.h>
#define TIMER_MARGIN 60 /* Default is 60 seconds */
static unsigned int soft_margin = TIMER_MARGIN; /* in seconds */
@@ -71,25 +54,12 @@ module_param(soft_panic, int, 0);
MODULE_PARM_DESC(soft_panic,
"Softdog action, set to 1 to panic, 0 to reboot (default=0)");
-/*
- * Our timer
- */
-
-static void watchdog_fire(unsigned long);
-
-static struct timer_list watchdog_ticktock =
- TIMER_INITIALIZER(watchdog_fire, 0, 0);
-
-/*
- * If the timer expires..
- */
-
-static void watchdog_fire(unsigned long data)
+static void softdog_fire(unsigned long data)
{
module_put(THIS_MODULE);
- if (soft_noboot)
+ if (soft_noboot) {
pr_crit("Triggered - Reboot ignored\n");
- else if (soft_panic) {
+ } else if (soft_panic) {
pr_crit("Initiating panic\n");
panic("Software Watchdog Timer expired");
} else {
@@ -99,35 +69,24 @@ static void watchdog_fire(unsigned long data)
}
}
-/*
- * Softdog operations
- */
+static struct timer_list softdog_ticktock =
+ TIMER_INITIALIZER(softdog_fire, 0, 0);
static int softdog_ping(struct watchdog_device *w)
{
- if (!mod_timer(&watchdog_ticktock, jiffies+(w->timeout*HZ)))
+ if (!mod_timer(&softdog_ticktock, jiffies + (w->timeout * HZ)))
__module_get(THIS_MODULE);
return 0;
}
static int softdog_stop(struct watchdog_device *w)
{
- if (del_timer(&watchdog_ticktock))
+ if (del_timer(&softdog_ticktock))
module_put(THIS_MODULE);
return 0;
}
-static int softdog_set_timeout(struct watchdog_device *w, unsigned int t)
-{
- w->timeout = t;
- return 0;
-}
-
-/*
- * Kernel Interfaces
- */
-
static struct watchdog_info softdog_info = {
.identity = "Software Watchdog",
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
@@ -137,29 +96,21 @@ static struct watchdog_ops softdog_ops = {
.owner = THIS_MODULE,
.start = softdog_ping,
.stop = softdog_stop,
- .set_timeout = softdog_set_timeout,
};
static struct watchdog_device softdog_dev = {
.info = &softdog_info,
.ops = &softdog_ops,
.min_timeout = 1,
- .max_timeout = 0xFFFF
+ .max_timeout = 65535,
+ .timeout = TIMER_MARGIN,
};
-static int __init watchdog_init(void)
+static int __init softdog_init(void)
{
int ret;
- /* Check that the soft_margin value is within it's range;
- if not reset to the default */
- if (soft_margin < 1 || soft_margin > 65535) {
- pr_info("soft_margin must be 0 < soft_margin < 65536, using %d\n",
- TIMER_MARGIN);
- return -EINVAL;
- }
- softdog_dev.timeout = soft_margin;
-
+ watchdog_init_timeout(&softdog_dev, soft_margin, NULL);
watchdog_set_nowayout(&softdog_dev, nowayout);
watchdog_stop_on_reboot(&softdog_dev);
@@ -167,19 +118,18 @@ static int __init watchdog_init(void)
if (ret)
return ret;
- pr_info("Software Watchdog Timer: 0.08 initialized. soft_noboot=%d soft_margin=%d sec soft_panic=%d (nowayout=%d)\n",
- soft_noboot, soft_margin, soft_panic, nowayout);
+ pr_info("initialized. soft_noboot=%d soft_margin=%d sec soft_panic=%d (nowayout=%d)\n",
+ soft_noboot, softdog_dev.timeout, soft_panic, nowayout);
return 0;
}
+module_init(softdog_init);
-static void __exit watchdog_exit(void)
+static void __exit softdog_exit(void)
{
watchdog_unregister_device(&softdog_dev);
}
-
-module_init(watchdog_init);
-module_exit(watchdog_exit);
+module_exit(softdog_exit);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("Software Watchdog Device Driver");
diff --git a/drivers/watchdog/tangox_wdt.c b/drivers/watchdog/tangox_wdt.c
index cfbed7e05..202c4b9cc 100644
--- a/drivers/watchdog/tangox_wdt.c
+++ b/drivers/watchdog/tangox_wdt.c
@@ -149,7 +149,7 @@ static int tangox_wdt_probe(struct platform_device *pdev)
dev->wdt.ops = &tangox_wdt_ops;
dev->wdt.timeout = DEFAULT_TIMEOUT;
dev->wdt.min_timeout = 1;
- dev->wdt.max_timeout = (U32_MAX - 1) / dev->clk_rate;
+ dev->wdt.max_hw_heartbeat_ms = (U32_MAX - 1) / dev->clk_rate;
watchdog_init_timeout(&dev->wdt, timeout, &pdev->dev);
watchdog_set_nowayout(&dev->wdt, nowayout);
@@ -170,7 +170,7 @@ static int tangox_wdt_probe(struct platform_device *pdev)
* already running.
*/
if (readl(dev->base + WD_COUNTER)) {
- set_bit(WDOG_ACTIVE, &dev->wdt.status);
+ set_bit(WDOG_HW_RUNNING, &dev->wdt.status);
tangox_wdt_start(&dev->wdt);
}
diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
index 7c3ba58ae..6abb83cd7 100644
--- a/drivers/watchdog/watchdog_core.c
+++ b/drivers/watchdog/watchdog_core.c
@@ -88,7 +88,7 @@ static void watchdog_check_min_max_timeout(struct watchdog_device *wdd)
* Check that we have valid min and max timeout values, if
* not reset them both to 0 (=not used or unknown)
*/
- if (wdd->min_timeout > wdd->max_timeout) {
+ if (!wdd->max_hw_heartbeat_ms && wdd->min_timeout > wdd->max_timeout) {
pr_info("Invalid min and max timeout values, resetting to 0!\n");
wdd->min_timeout = 0;
wdd->max_timeout = 0;
@@ -329,6 +329,43 @@ void watchdog_unregister_device(struct watchdog_device *wdd)
EXPORT_SYMBOL_GPL(watchdog_unregister_device);
+static void devm_watchdog_unregister_device(struct device *dev, void *res)
+{
+ watchdog_unregister_device(*(struct watchdog_device **)res);
+}
+
+/**
+ * devm_watchdog_register_device() - resource managed watchdog_register_device()
+ * @dev: device that is registering this watchdog device
+ * @wdd: watchdog device
+ *
+ * Managed watchdog_register_device(). For watchdog device registered by this
+ * function, watchdog_unregister_device() is automatically called on driver
+ * detach. See watchdog_register_device() for more information.
+ */
+int devm_watchdog_register_device(struct device *dev,
+ struct watchdog_device *wdd)
+{
+ struct watchdog_device **rcwdd;
+ int ret;
+
+ rcwdd = devres_alloc(devm_watchdog_unregister_device, sizeof(*wdd),
+ GFP_KERNEL);
+ if (!rcwdd)
+ return -ENOMEM;
+
+ ret = watchdog_register_device(wdd);
+ if (!ret) {
+ *rcwdd = wdd;
+ devres_add(dev, rcwdd);
+ } else {
+ devres_free(rcwdd);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_watchdog_register_device);
+
static int __init watchdog_deferred_registration(void)
{
mutex_lock(&wtd_deferred_reg_mutex);
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index 3595cffa2..040bf8382 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -69,6 +69,7 @@ struct watchdog_core_data {
unsigned long status; /* Internal status bits */
#define _WDOG_DEV_OPEN 0 /* Opened ? */
#define _WDOG_ALLOW_RELEASE 1 /* Did we receive the magic char ? */
+#define _WDOG_KEEPALIVE 2 /* Did we receive a keepalive ? */
};
/* the dev_t structure to store the dynamically allocated watchdog devices */
@@ -92,9 +93,13 @@ static inline bool watchdog_need_worker(struct watchdog_device *wdd)
* thus is aware that the framework supports generating heartbeat
* requests.
* - Userspace requests a longer timeout than the hardware can handle.
+ *
+ * Alternatively, if userspace has not opened the watchdog
+ * device, we take care of feeding the watchdog if it is
+ * running.
*/
- return hm && ((watchdog_active(wdd) && t > hm) ||
- (t && !watchdog_active(wdd) && watchdog_hw_running(wdd)));
+ return (hm && watchdog_active(wdd) && t > hm) ||
+ (t && !watchdog_active(wdd) && watchdog_hw_running(wdd));
}
static long watchdog_next_keepalive(struct watchdog_device *wdd)
@@ -107,7 +112,7 @@ static long watchdog_next_keepalive(struct watchdog_device *wdd)
unsigned int hw_heartbeat_ms;
virt_timeout = wd_data->last_keepalive + msecs_to_jiffies(timeout_ms);
- hw_heartbeat_ms = min(timeout_ms, wdd->max_hw_heartbeat_ms);
+ hw_heartbeat_ms = min_not_zero(timeout_ms, wdd->max_hw_heartbeat_ms);
keepalive_interval = msecs_to_jiffies(hw_heartbeat_ms / 2);
if (!watchdog_active(wdd))
@@ -180,6 +185,8 @@ static int watchdog_ping(struct watchdog_device *wdd)
if (!watchdog_active(wdd) && !watchdog_hw_running(wdd))
return 0;
+ set_bit(_WDOG_KEEPALIVE, &wd_data->status);
+
wd_data->last_keepalive = jiffies;
return __watchdog_ping(wdd);
}
@@ -219,6 +226,8 @@ static int watchdog_start(struct watchdog_device *wdd)
if (watchdog_active(wdd))
return 0;
+ set_bit(_WDOG_KEEPALIVE, &wd_data->status);
+
started_at = jiffies;
if (watchdog_hw_running(wdd) && wdd->ops->ping)
err = wdd->ops->ping(wdd);
@@ -258,10 +267,12 @@ static int watchdog_stop(struct watchdog_device *wdd)
return -EBUSY;
}
- if (wdd->ops->stop)
+ if (wdd->ops->stop) {
+ clear_bit(WDOG_HW_RUNNING, &wdd->status);
err = wdd->ops->stop(wdd);
- else
+ } else {
set_bit(WDOG_HW_RUNNING, &wdd->status);
+ }
if (err == 0) {
clear_bit(WDOG_ACTIVE, &wdd->status);
@@ -282,10 +293,27 @@ static int watchdog_stop(struct watchdog_device *wdd)
static unsigned int watchdog_get_status(struct watchdog_device *wdd)
{
- if (!wdd->ops->status)
- return 0;
+ struct watchdog_core_data *wd_data = wdd->wd_data;
+ unsigned int status;
+
+ if (wdd->ops->status)
+ status = wdd->ops->status(wdd);
+ else
+ status = wdd->bootstatus & (WDIOF_CARDRESET |
+ WDIOF_OVERHEAT |
+ WDIOF_FANFAULT |
+ WDIOF_EXTERN1 |
+ WDIOF_EXTERN2 |
+ WDIOF_POWERUNDER |
+ WDIOF_POWEROVER);
- return wdd->ops->status(wdd);
+ if (test_bit(_WDOG_ALLOW_RELEASE, &wd_data->status))
+ status |= WDIOF_MAGICCLOSE;
+
+ if (test_and_clear_bit(_WDOG_KEEPALIVE, &wd_data->status))
+ status |= WDIOF_KEEPALIVEPING;
+
+ return status;
}
/*
@@ -361,7 +389,7 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr,
status = watchdog_get_status(wdd);
mutex_unlock(&wd_data->lock);
- return sprintf(buf, "%u\n", status);
+ return sprintf(buf, "0x%x\n", status);
}
static DEVICE_ATTR_RO(status);
@@ -429,9 +457,7 @@ static umode_t wdt_is_visible(struct kobject *kobj, struct attribute *attr,
struct watchdog_device *wdd = dev_get_drvdata(dev);
umode_t mode = attr->mode;
- if (attr == &dev_attr_status.attr && !wdd->ops->status)
- mode = 0;
- else if (attr == &dev_attr_timeleft.attr && !wdd->ops->get_timeleft)
+ if (attr == &dev_attr_timeleft.attr && !wdd->ops->get_timeleft)
mode = 0;
return mode;
@@ -948,17 +974,22 @@ int __init watchdog_dev_init(void)
err = class_register(&watchdog_class);
if (err < 0) {
pr_err("couldn't register class\n");
- return err;
+ goto err_register;
}
err = alloc_chrdev_region(&watchdog_devt, 0, MAX_DOGS, "watchdog");
if (err < 0) {
pr_err("watchdog: unable to allocate char dev region\n");
- class_unregister(&watchdog_class);
- return err;
+ goto err_alloc;
}
return 0;
+
+err_alloc:
+ class_unregister(&watchdog_class);
+err_register:
+ destroy_workqueue(watchdog_wq);
+ return err;
}
/*
diff --git a/drivers/watchdog/ziirave_wdt.c b/drivers/watchdog/ziirave_wdt.c
index cbe373de3..fa1efef3c 100644
--- a/drivers/watchdog/ziirave_wdt.c
+++ b/drivers/watchdog/ziirave_wdt.c
@@ -339,7 +339,7 @@ static int ziirave_wdt_remove(struct i2c_client *client)
}
static struct i2c_device_id ziirave_wdt_id[] = {
- { "ziirave-wdt", 0 },
+ { "rave-wdt", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ziirave_wdt_id);
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 979a83172..f15bb3b78 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -275,7 +275,7 @@ config XEN_HAVE_PVMMU
config XEN_EFI
def_bool y
- depends on X86_64 && EFI
+ depends on (ARM || ARM64 || X86_64) && EFI
config XEN_AUTO_XLATE
def_bool y
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 030e91b38..8feab810a 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -10,6 +10,7 @@ CFLAGS_features.o := $(nostackp)
CFLAGS_efi.o += -fshort-wchar
LDFLAGS += $(call ld-option, --no-wchar-size-warning)
+dom0-$(CONFIG_ARM64) += arm-device.o
dom0-$(CONFIG_PCI) += pci.o
dom0-$(CONFIG_USB_SUPPORT) += dbgp.o
dom0-$(CONFIG_XEN_ACPI) += acpi.o $(xen-pad-y)
diff --git a/drivers/xen/arm-device.c b/drivers/xen/arm-device.c
new file mode 100644
index 000000000..778acf80a
--- /dev/null
+++ b/drivers/xen/arm-device.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2015, Linaro Limited, Shannon Zhao
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+#include <xen/xen.h>
+#include <xen/page.h>
+#include <xen/interface/memory.h>
+#include <asm/xen/hypervisor.h>
+#include <asm/xen/hypercall.h>
+
+static int xen_unmap_device_mmio(const struct resource *resources,
+ unsigned int count)
+{
+ unsigned int i, j, nr;
+ int rc = 0;
+ const struct resource *r;
+ struct xen_remove_from_physmap xrp;
+
+ for (i = 0; i < count; i++) {
+ r = &resources[i];
+ nr = DIV_ROUND_UP(resource_size(r), XEN_PAGE_SIZE);
+ if ((resource_type(r) != IORESOURCE_MEM) || (nr == 0))
+ continue;
+
+ for (j = 0; j < nr; j++) {
+ xrp.domid = DOMID_SELF;
+ xrp.gpfn = XEN_PFN_DOWN(r->start) + j;
+ rc = HYPERVISOR_memory_op(XENMEM_remove_from_physmap,
+ &xrp);
+ if (rc)
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+static int xen_map_device_mmio(const struct resource *resources,
+ unsigned int count)
+{
+ unsigned int i, j, nr;
+ int rc = 0;
+ const struct resource *r;
+ xen_pfn_t *gpfns;
+ xen_ulong_t *idxs;
+ int *errs;
+ struct xen_add_to_physmap_range xatp;
+
+ for (i = 0; i < count; i++) {
+ r = &resources[i];
+ nr = DIV_ROUND_UP(resource_size(r), XEN_PAGE_SIZE);
+ if ((resource_type(r) != IORESOURCE_MEM) || (nr == 0))
+ continue;
+
+ gpfns = kzalloc(sizeof(xen_pfn_t) * nr, GFP_KERNEL);
+ idxs = kzalloc(sizeof(xen_ulong_t) * nr, GFP_KERNEL);
+ errs = kzalloc(sizeof(int) * nr, GFP_KERNEL);
+ if (!gpfns || !idxs || !errs) {
+ kfree(gpfns);
+ kfree(idxs);
+ kfree(errs);
+ rc = -ENOMEM;
+ goto unmap;
+ }
+
+ for (j = 0; j < nr; j++) {
+ /*
+ * The regions are always mapped 1:1 to DOM0 and this is
+ * fine because the memory map for DOM0 is the same as
+ * the host (except for the RAM).
+ */
+ gpfns[j] = XEN_PFN_DOWN(r->start) + j;
+ idxs[j] = XEN_PFN_DOWN(r->start) + j;
+ }
+
+ xatp.domid = DOMID_SELF;
+ xatp.size = nr;
+ xatp.space = XENMAPSPACE_dev_mmio;
+
+ set_xen_guest_handle(xatp.gpfns, gpfns);
+ set_xen_guest_handle(xatp.idxs, idxs);
+ set_xen_guest_handle(xatp.errs, errs);
+
+ rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
+ kfree(gpfns);
+ kfree(idxs);
+ kfree(errs);
+ if (rc)
+ goto unmap;
+ }
+
+ return rc;
+
+unmap:
+ xen_unmap_device_mmio(resources, i);
+ return rc;
+}
+
+static int xen_platform_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct platform_device *pdev = to_platform_device(data);
+ int r = 0;
+
+ if (pdev->num_resources == 0 || pdev->resource == NULL)
+ return NOTIFY_OK;
+
+ switch (action) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ r = xen_map_device_mmio(pdev->resource, pdev->num_resources);
+ break;
+ case BUS_NOTIFY_DEL_DEVICE:
+ r = xen_unmap_device_mmio(pdev->resource, pdev->num_resources);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+ if (r)
+ dev_err(&pdev->dev, "Platform: Failed to %s device %s MMIO!\n",
+ action == BUS_NOTIFY_ADD_DEVICE ? "map" :
+ (action == BUS_NOTIFY_DEL_DEVICE ? "unmap" : "?"),
+ pdev->name);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block platform_device_nb = {
+ .notifier_call = xen_platform_notifier,
+};
+
+static int __init register_xen_platform_notifier(void)
+{
+ if (!xen_initial_domain() || acpi_disabled)
+ return 0;
+
+ return bus_register_notifier(&platform_bus_type, &platform_device_nb);
+}
+
+arch_initcall(register_xen_platform_notifier);
+
+#ifdef CONFIG_ARM_AMBA
+#include <linux/amba/bus.h>
+
+static int xen_amba_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct amba_device *adev = to_amba_device(data);
+ int r = 0;
+
+ switch (action) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ r = xen_map_device_mmio(&adev->res, 1);
+ break;
+ case BUS_NOTIFY_DEL_DEVICE:
+ r = xen_unmap_device_mmio(&adev->res, 1);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+ if (r)
+ dev_err(&adev->dev, "AMBA: Failed to %s device %s MMIO!\n",
+ action == BUS_NOTIFY_ADD_DEVICE ? "map" :
+ (action == BUS_NOTIFY_DEL_DEVICE ? "unmap" : "?"),
+ adev->dev.init_name);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block amba_device_nb = {
+ .notifier_call = xen_amba_notifier,
+};
+
+static int __init register_xen_amba_notifier(void)
+{
+ if (!xen_initial_domain() || acpi_disabled)
+ return 0;
+
+ return bus_register_notifier(&amba_bustype, &amba_device_nb);
+}
+
+arch_initcall(register_xen_amba_notifier);
+#endif
diff --git a/drivers/xen/efi.c b/drivers/xen/efi.c
index e9d213544..22f71ffd3 100644
--- a/drivers/xen/efi.c
+++ b/drivers/xen/efi.c
@@ -38,7 +38,7 @@
#define efi_data(op) (op.u.efi_runtime_call)
-static efi_status_t xen_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
+efi_status_t xen_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
{
struct xen_platform_op op = INIT_EFI_OP(get_time);
@@ -59,8 +59,9 @@ static efi_status_t xen_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
return efi_data(op).status;
}
+EXPORT_SYMBOL_GPL(xen_efi_get_time);
-static efi_status_t xen_efi_set_time(efi_time_t *tm)
+efi_status_t xen_efi_set_time(efi_time_t *tm)
{
struct xen_platform_op op = INIT_EFI_OP(set_time);
@@ -72,10 +73,10 @@ static efi_status_t xen_efi_set_time(efi_time_t *tm)
return efi_data(op).status;
}
+EXPORT_SYMBOL_GPL(xen_efi_set_time);
-static efi_status_t xen_efi_get_wakeup_time(efi_bool_t *enabled,
- efi_bool_t *pending,
- efi_time_t *tm)
+efi_status_t xen_efi_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
+ efi_time_t *tm)
{
struct xen_platform_op op = INIT_EFI_OP(get_wakeup_time);
@@ -95,8 +96,9 @@ static efi_status_t xen_efi_get_wakeup_time(efi_bool_t *enabled,
return efi_data(op).status;
}
+EXPORT_SYMBOL_GPL(xen_efi_get_wakeup_time);
-static efi_status_t xen_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
+efi_status_t xen_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
{
struct xen_platform_op op = INIT_EFI_OP(set_wakeup_time);
@@ -113,12 +115,11 @@ static efi_status_t xen_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
return efi_data(op).status;
}
+EXPORT_SYMBOL_GPL(xen_efi_set_wakeup_time);
-static efi_status_t xen_efi_get_variable(efi_char16_t *name,
- efi_guid_t *vendor,
- u32 *attr,
- unsigned long *data_size,
- void *data)
+efi_status_t xen_efi_get_variable(efi_char16_t *name, efi_guid_t *vendor,
+ u32 *attr, unsigned long *data_size,
+ void *data)
{
struct xen_platform_op op = INIT_EFI_OP(get_variable);
@@ -138,10 +139,11 @@ static efi_status_t xen_efi_get_variable(efi_char16_t *name,
return efi_data(op).status;
}
+EXPORT_SYMBOL_GPL(xen_efi_get_variable);
-static efi_status_t xen_efi_get_next_variable(unsigned long *name_size,
- efi_char16_t *name,
- efi_guid_t *vendor)
+efi_status_t xen_efi_get_next_variable(unsigned long *name_size,
+ efi_char16_t *name,
+ efi_guid_t *vendor)
{
struct xen_platform_op op = INIT_EFI_OP(get_next_variable_name);
@@ -161,12 +163,11 @@ static efi_status_t xen_efi_get_next_variable(unsigned long *name_size,
return efi_data(op).status;
}
+EXPORT_SYMBOL_GPL(xen_efi_get_next_variable);
-static efi_status_t xen_efi_set_variable(efi_char16_t *name,
- efi_guid_t *vendor,
- u32 attr,
- unsigned long data_size,
- void *data)
+efi_status_t xen_efi_set_variable(efi_char16_t *name, efi_guid_t *vendor,
+ u32 attr, unsigned long data_size,
+ void *data)
{
struct xen_platform_op op = INIT_EFI_OP(set_variable);
@@ -183,11 +184,11 @@ static efi_status_t xen_efi_set_variable(efi_char16_t *name,
return efi_data(op).status;
}
+EXPORT_SYMBOL_GPL(xen_efi_set_variable);
-static efi_status_t xen_efi_query_variable_info(u32 attr,
- u64 *storage_space,
- u64 *remaining_space,
- u64 *max_variable_size)
+efi_status_t xen_efi_query_variable_info(u32 attr, u64 *storage_space,
+ u64 *remaining_space,
+ u64 *max_variable_size)
{
struct xen_platform_op op = INIT_EFI_OP(query_variable_info);
@@ -205,8 +206,9 @@ static efi_status_t xen_efi_query_variable_info(u32 attr,
return efi_data(op).status;
}
+EXPORT_SYMBOL_GPL(xen_efi_query_variable_info);
-static efi_status_t xen_efi_get_next_high_mono_count(u32 *count)
+efi_status_t xen_efi_get_next_high_mono_count(u32 *count)
{
struct xen_platform_op op = INIT_EFI_OP(get_next_high_monotonic_count);
@@ -217,10 +219,10 @@ static efi_status_t xen_efi_get_next_high_mono_count(u32 *count)
return efi_data(op).status;
}
+EXPORT_SYMBOL_GPL(xen_efi_get_next_high_mono_count);
-static efi_status_t xen_efi_update_capsule(efi_capsule_header_t **capsules,
- unsigned long count,
- unsigned long sg_list)
+efi_status_t xen_efi_update_capsule(efi_capsule_header_t **capsules,
+ unsigned long count, unsigned long sg_list)
{
struct xen_platform_op op = INIT_EFI_OP(update_capsule);
@@ -237,11 +239,11 @@ static efi_status_t xen_efi_update_capsule(efi_capsule_header_t **capsules,
return efi_data(op).status;
}
+EXPORT_SYMBOL_GPL(xen_efi_update_capsule);
-static efi_status_t xen_efi_query_capsule_caps(efi_capsule_header_t **capsules,
- unsigned long count,
- u64 *max_size,
- int *reset_type)
+efi_status_t xen_efi_query_capsule_caps(efi_capsule_header_t **capsules,
+ unsigned long count, u64 *max_size,
+ int *reset_type)
{
struct xen_platform_op op = INIT_EFI_OP(query_capsule_capabilities);
@@ -260,111 +262,4 @@ static efi_status_t xen_efi_query_capsule_caps(efi_capsule_header_t **capsules,
return efi_data(op).status;
}
-
-static efi_char16_t vendor[100] __initdata;
-
-static efi_system_table_t efi_systab_xen __initdata = {
- .hdr = {
- .signature = EFI_SYSTEM_TABLE_SIGNATURE,
- .revision = 0, /* Initialized later. */
- .headersize = 0, /* Ignored by Linux Kernel. */
- .crc32 = 0, /* Ignored by Linux Kernel. */
- .reserved = 0
- },
- .fw_vendor = EFI_INVALID_TABLE_ADDR, /* Initialized later. */
- .fw_revision = 0, /* Initialized later. */
- .con_in_handle = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */
- .con_in = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */
- .con_out_handle = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */
- .con_out = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */
- .stderr_handle = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */
- .stderr = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */
- .runtime = (efi_runtime_services_t *)EFI_INVALID_TABLE_ADDR,
- /* Not used under Xen. */
- .boottime = (efi_boot_services_t *)EFI_INVALID_TABLE_ADDR,
- /* Not used under Xen. */
- .nr_tables = 0, /* Initialized later. */
- .tables = EFI_INVALID_TABLE_ADDR /* Initialized later. */
-};
-
-static const struct efi efi_xen __initconst = {
- .systab = NULL, /* Initialized later. */
- .runtime_version = 0, /* Initialized later. */
- .mps = EFI_INVALID_TABLE_ADDR,
- .acpi = EFI_INVALID_TABLE_ADDR,
- .acpi20 = EFI_INVALID_TABLE_ADDR,
- .smbios = EFI_INVALID_TABLE_ADDR,
- .smbios3 = EFI_INVALID_TABLE_ADDR,
- .sal_systab = EFI_INVALID_TABLE_ADDR,
- .boot_info = EFI_INVALID_TABLE_ADDR,
- .hcdp = EFI_INVALID_TABLE_ADDR,
- .uga = EFI_INVALID_TABLE_ADDR,
- .uv_systab = EFI_INVALID_TABLE_ADDR,
- .fw_vendor = EFI_INVALID_TABLE_ADDR,
- .runtime = EFI_INVALID_TABLE_ADDR,
- .config_table = EFI_INVALID_TABLE_ADDR,
- .get_time = xen_efi_get_time,
- .set_time = xen_efi_set_time,
- .get_wakeup_time = xen_efi_get_wakeup_time,
- .set_wakeup_time = xen_efi_set_wakeup_time,
- .get_variable = xen_efi_get_variable,
- .get_next_variable = xen_efi_get_next_variable,
- .set_variable = xen_efi_set_variable,
- .query_variable_info = xen_efi_query_variable_info,
- .update_capsule = xen_efi_update_capsule,
- .query_capsule_caps = xen_efi_query_capsule_caps,
- .get_next_high_mono_count = xen_efi_get_next_high_mono_count,
- .reset_system = NULL, /* Functionality provided by Xen. */
- .set_virtual_address_map = NULL, /* Not used under Xen. */
- .flags = 0 /* Initialized later. */
-};
-
-efi_system_table_t __init *xen_efi_probe(void)
-{
- struct xen_platform_op op = {
- .cmd = XENPF_firmware_info,
- .u.firmware_info = {
- .type = XEN_FW_EFI_INFO,
- .index = XEN_FW_EFI_CONFIG_TABLE
- }
- };
- union xenpf_efi_info *info = &op.u.firmware_info.u.efi_info;
-
- if (!xen_initial_domain() || HYPERVISOR_platform_op(&op) < 0)
- return NULL;
-
- /* Here we know that Xen runs on EFI platform. */
-
- efi = efi_xen;
-
- efi_systab_xen.tables = info->cfg.addr;
- efi_systab_xen.nr_tables = info->cfg.nent;
-
- op.cmd = XENPF_firmware_info;
- op.u.firmware_info.type = XEN_FW_EFI_INFO;
- op.u.firmware_info.index = XEN_FW_EFI_VENDOR;
- info->vendor.bufsz = sizeof(vendor);
- set_xen_guest_handle(info->vendor.name, vendor);
-
- if (HYPERVISOR_platform_op(&op) == 0) {
- efi_systab_xen.fw_vendor = __pa_symbol(vendor);
- efi_systab_xen.fw_revision = info->vendor.revision;
- } else
- efi_systab_xen.fw_vendor = __pa_symbol(L"UNKNOWN");
-
- op.cmd = XENPF_firmware_info;
- op.u.firmware_info.type = XEN_FW_EFI_INFO;
- op.u.firmware_info.index = XEN_FW_EFI_VERSION;
-
- if (HYPERVISOR_platform_op(&op) == 0)
- efi_systab_xen.hdr.revision = info->version;
-
- op.cmd = XENPF_firmware_info;
- op.u.firmware_info.type = XEN_FW_EFI_INFO;
- op.u.firmware_info.index = XEN_FW_EFI_RT_VERSION;
-
- if (HYPERVISOR_platform_op(&op) == 0)
- efi.runtime_version = info->version;
-
- return &efi_systab_xen;
-}
+EXPORT_SYMBOL_GPL(xen_efi_query_capsule_caps);
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 71d49a95f..d5dbdb9d2 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -895,7 +895,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
handle_percpu_irq, "ipi");
- bind_ipi.vcpu = cpu;
+ bind_ipi.vcpu = xen_vcpu_nr(cpu);
if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
&bind_ipi) != 0)
BUG();
@@ -991,7 +991,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
handle_edge_irq, "virq");
bind_virq.virq = virq;
- bind_virq.vcpu = cpu;
+ bind_virq.vcpu = xen_vcpu_nr(cpu);
ret = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
&bind_virq);
if (ret == 0)
@@ -1211,7 +1211,8 @@ void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
#ifdef CONFIG_X86
if (unlikely(vector == XEN_NMI_VECTOR)) {
- int rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi, cpu, NULL);
+ int rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi, xen_vcpu_nr(cpu),
+ NULL);
if (rc < 0)
printk(KERN_WARNING "Sending nmi to CPU%d failed (rc:%d)\n", cpu, rc);
return;
@@ -1318,7 +1319,7 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
/* Send future instances of this interrupt to other vcpu. */
bind_vcpu.port = evtchn;
- bind_vcpu.vcpu = tcpu;
+ bind_vcpu.vcpu = xen_vcpu_nr(tcpu);
/*
* Mask the event while changing the VCPU binding to prevent
@@ -1458,7 +1459,7 @@ static void restore_cpu_virqs(unsigned int cpu)
/* Get a new binding from Xen. */
bind_virq.virq = virq;
- bind_virq.vcpu = cpu;
+ bind_virq.vcpu = xen_vcpu_nr(cpu);
if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
&bind_virq) != 0)
BUG();
@@ -1482,7 +1483,7 @@ static void restore_cpu_ipis(unsigned int cpu)
BUG_ON(ipi_from_irq(irq) != ipi);
/* Get a new binding from Xen. */
- bind_ipi.vcpu = cpu;
+ bind_ipi.vcpu = xen_vcpu_nr(cpu);
if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
&bind_ipi) != 0)
BUG();
diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
index 9289a1771..266c2c733 100644
--- a/drivers/xen/events/events_fifo.c
+++ b/drivers/xen/events/events_fifo.c
@@ -113,7 +113,7 @@ static int init_control_block(int cpu,
init_control.control_gfn = virt_to_gfn(control_block);
init_control.offset = 0;
- init_control.vcpu = cpu;
+ init_control.vcpu = xen_vcpu_nr(cpu);
return HYPERVISOR_event_channel_op(EVTCHNOP_init_control, &init_control);
}
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index f4edd6df3..e8c7f09d0 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -55,6 +55,7 @@
#include <xen/xen.h>
#include <xen/events.h>
#include <xen/evtchn.h>
+#include <xen/xen-ops.h>
#include <asm/xen/hypervisor.h>
struct per_user_data {
@@ -73,8 +74,12 @@ struct per_user_data {
wait_queue_head_t evtchn_wait;
struct fasync_struct *evtchn_async_queue;
const char *name;
+
+ domid_t restrict_domid;
};
+#define UNRESTRICTED_DOMID ((domid_t)-1)
+
struct user_evtchn {
struct rb_node node;
struct per_user_data *user;
@@ -443,12 +448,16 @@ static long evtchn_ioctl(struct file *file,
struct ioctl_evtchn_bind_virq bind;
struct evtchn_bind_virq bind_virq;
+ rc = -EACCES;
+ if (u->restrict_domid != UNRESTRICTED_DOMID)
+ break;
+
rc = -EFAULT;
if (copy_from_user(&bind, uarg, sizeof(bind)))
break;
bind_virq.virq = bind.virq;
- bind_virq.vcpu = 0;
+ bind_virq.vcpu = xen_vcpu_nr(0);
rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
&bind_virq);
if (rc != 0)
@@ -468,6 +477,11 @@ static long evtchn_ioctl(struct file *file,
if (copy_from_user(&bind, uarg, sizeof(bind)))
break;
+ rc = -EACCES;
+ if (u->restrict_domid != UNRESTRICTED_DOMID &&
+ u->restrict_domid != bind.remote_domain)
+ break;
+
bind_interdomain.remote_dom = bind.remote_domain;
bind_interdomain.remote_port = bind.remote_port;
rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
@@ -485,6 +499,10 @@ static long evtchn_ioctl(struct file *file,
struct ioctl_evtchn_bind_unbound_port bind;
struct evtchn_alloc_unbound alloc_unbound;
+ rc = -EACCES;
+ if (u->restrict_domid != UNRESTRICTED_DOMID)
+ break;
+
rc = -EFAULT;
if (copy_from_user(&bind, uarg, sizeof(bind)))
break;
@@ -553,6 +571,27 @@ static long evtchn_ioctl(struct file *file,
break;
}
+ case IOCTL_EVTCHN_RESTRICT_DOMID: {
+ struct ioctl_evtchn_restrict_domid ierd;
+
+ rc = -EACCES;
+ if (u->restrict_domid != UNRESTRICTED_DOMID)
+ break;
+
+ rc = -EFAULT;
+ if (copy_from_user(&ierd, uarg, sizeof(ierd)))
+ break;
+
+ rc = -EINVAL;
+ if (ierd.domid == 0 || ierd.domid >= DOMID_FIRST_RESERVED)
+ break;
+
+ u->restrict_domid = ierd.domid;
+ rc = 0;
+
+ break;
+ }
+
default:
rc = -ENOSYS;
break;
@@ -601,6 +640,8 @@ static int evtchn_open(struct inode *inode, struct file *filp)
mutex_init(&u->ring_cons_mutex);
spin_lock_init(&u->ring_prod_lock);
+ u->restrict_domid = UNRESTRICTED_DOMID;
+
filp->private_data = u;
return nonseekable_open(inode, filp);
diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c
index 4547a91bc..7a47c4c9f 100644
--- a/drivers/xen/gntalloc.c
+++ b/drivers/xen/gntalloc.c
@@ -504,7 +504,7 @@ static int gntalloc_mmap(struct file *filp, struct vm_area_struct *vma)
struct gntalloc_file_private_data *priv = filp->private_data;
struct gntalloc_vma_private_data *vm_priv;
struct gntalloc_gref *gref;
- int count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ int count = vma_pages(vma);
int rv, i;
if (!(vma->vm_flags & VM_SHARED)) {
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 67939578c..bb952121e 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -982,7 +982,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
{
struct gntdev_priv *priv = flip->private_data;
int index = vma->vm_pgoff;
- int count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ int count = vma_pages(vma);
struct grant_map *map;
int i, err = -EINVAL;
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index df2e6f783..702040fe2 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -582,7 +582,7 @@ static long privcmd_ioctl(struct file *file,
static void privcmd_close(struct vm_area_struct *vma)
{
struct page **pages = vma->vm_private_data;
- int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ int numpgs = vma_pages(vma);
int numgfns = (vma->vm_end - vma->vm_start) >> XEN_PAGE_SHIFT;
int rc;
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 7399782c0..87e6035c9 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -294,7 +294,7 @@ error:
void *
xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *ret;
int order = get_order(size);
@@ -346,7 +346,7 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent);
void
xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
- dma_addr_t dev_addr, struct dma_attrs *attrs)
+ dma_addr_t dev_addr, unsigned long attrs)
{
int order = get_order(size);
phys_addr_t phys;
@@ -378,7 +378,7 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
phys_addr_t map, phys = page_to_phys(page) + offset;
dma_addr_t dev_addr = xen_phys_to_bus(phys);
@@ -434,7 +434,7 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
*/
static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
phys_addr_t paddr = xen_bus_to_phys(dev_addr);
@@ -462,7 +462,7 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
xen_unmap_single(hwdev, dev_addr, size, dir, attrs);
}
@@ -538,7 +538,7 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device);
int
xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *sg;
int i;
@@ -599,7 +599,7 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg_attrs);
void
xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *sg;
int i;
diff --git a/drivers/xen/time.c b/drivers/xen/time.c
index 71078425c..ac5f23fca 100644
--- a/drivers/xen/time.c
+++ b/drivers/xen/time.c
@@ -6,6 +6,7 @@
#include <linux/math64.h>
#include <linux/gfp.h>
+#include <asm/paravirt.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
@@ -46,27 +47,31 @@ static u64 get64(const u64 *p)
return ret;
}
-/*
- * Runstate accounting
- */
-void xen_get_runstate_snapshot(struct vcpu_runstate_info *res)
+static void xen_get_runstate_snapshot_cpu(struct vcpu_runstate_info *res,
+ unsigned int cpu)
{
u64 state_time;
struct vcpu_runstate_info *state;
BUG_ON(preemptible());
- state = this_cpu_ptr(&xen_runstate);
+ state = per_cpu_ptr(&xen_runstate, cpu);
- /*
- * The runstate info is always updated by the hypervisor on
- * the current CPU, so there's no need to use anything
- * stronger than a compiler barrier when fetching it.
- */
do {
state_time = get64(&state->state_entry_time);
+ rmb(); /* Hypervisor might update data. */
*res = READ_ONCE(*state);
- } while (get64(&state->state_entry_time) != state_time);
+ rmb(); /* Hypervisor might update data. */
+ } while (get64(&state->state_entry_time) != state_time ||
+ (state_time & XEN_RUNSTATE_UPDATE));
+}
+
+/*
+ * Runstate accounting
+ */
+void xen_get_runstate_snapshot(struct vcpu_runstate_info *res)
+{
+ xen_get_runstate_snapshot_cpu(res, smp_processor_id());
}
/* return true when a vcpu could run but has no real cpu to run on */
@@ -75,6 +80,14 @@ bool xen_vcpu_stolen(int vcpu)
return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable;
}
+u64 xen_steal_clock(int cpu)
+{
+ struct vcpu_runstate_info state;
+
+ xen_get_runstate_snapshot_cpu(&state, cpu);
+ return state.time[RUNSTATE_runnable] + state.time[RUNSTATE_offline];
+}
+
void xen_setup_runstate_info(int cpu)
{
struct vcpu_register_runstate_memory_area area;
@@ -82,7 +95,20 @@ void xen_setup_runstate_info(int cpu)
area.addr.v = &per_cpu(xen_runstate, cpu);
if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area,
- cpu, &area))
+ xen_vcpu_nr(cpu), &area))
BUG();
}
+void __init xen_time_setup_guest(void)
+{
+ bool xen_runstate_remote;
+
+ xen_runstate_remote = !HYPERVISOR_vm_assist(VMASST_CMD_enable,
+ VMASST_TYPE_runstate_update_flag);
+
+ pv_time_ops.steal_clock = xen_steal_clock;
+
+ static_key_slow_inc(&paravirt_steal_enabled);
+ if (xen_runstate_remote)
+ static_key_slow_inc(&paravirt_steal_rq_enabled);
+}
diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
index 6a25533da..9e9286d08 100644
--- a/drivers/xen/xen-pciback/conf_space.c
+++ b/drivers/xen/xen-pciback/conf_space.c
@@ -148,7 +148,7 @@ int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size,
struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
const struct config_field_entry *cfg_entry;
const struct config_field *field;
- int req_start, req_end, field_start, field_end;
+ int field_start, field_end;
/* if read fails for any reason, return 0
* (as if device didn't respond) */
u32 value = 0, tmp_val;
@@ -178,12 +178,10 @@ int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size,
list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
field = cfg_entry->field;
- req_start = offset;
- req_end = offset + size;
field_start = OFFSET(cfg_entry);
field_end = OFFSET(cfg_entry) + field->size;
- if (req_end > field_start && field_end > req_start) {
+ if (offset + size > field_start && field_end > offset) {
err = conf_space_read(dev, cfg_entry, field_start,
&tmp_val);
if (err)
@@ -191,7 +189,7 @@ int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size,
value = merge_value(value, tmp_val,
get_mask(field->size),
- field_start - req_start);
+ field_start - offset);
}
}
@@ -211,7 +209,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
const struct config_field_entry *cfg_entry;
const struct config_field *field;
u32 tmp_val;
- int req_start, req_end, field_start, field_end;
+ int field_start, field_end;
if (unlikely(verbose_request))
printk(KERN_DEBUG
@@ -224,21 +222,17 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
field = cfg_entry->field;
- req_start = offset;
- req_end = offset + size;
field_start = OFFSET(cfg_entry);
field_end = OFFSET(cfg_entry) + field->size;
- if (req_end > field_start && field_end > req_start) {
- tmp_val = 0;
-
- err = xen_pcibk_config_read(dev, field_start,
- field->size, &tmp_val);
+ if (offset + size > field_start && field_end > offset) {
+ err = conf_space_read(dev, cfg_entry, field_start,
+ &tmp_val);
if (err)
break;
tmp_val = merge_value(tmp_val, value, get_mask(size),
- req_start - field_start);
+ offset - field_start);
err = conf_space_write(dev, cfg_entry, field_start,
tmp_val);
diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c
index 9ead1c2ff..5fbfd9cfb 100644
--- a/drivers/xen/xen-pciback/conf_space_header.c
+++ b/drivers/xen/xen-pciback/conf_space_header.c
@@ -209,58 +209,35 @@ static int bar_read(struct pci_dev *dev, int offset, u32 * value, void *data)
return 0;
}
-static inline void read_dev_bar(struct pci_dev *dev,
- struct pci_bar_info *bar_info, int offset,
- u32 len_mask)
+static void *bar_init(struct pci_dev *dev, int offset)
{
- int pos;
- struct resource *res = dev->resource;
+ unsigned int pos;
+ const struct resource *res = dev->resource;
+ struct pci_bar_info *bar = kzalloc(sizeof(*bar), GFP_KERNEL);
+
+ if (!bar)
+ return ERR_PTR(-ENOMEM);
if (offset == PCI_ROM_ADDRESS || offset == PCI_ROM_ADDRESS1)
pos = PCI_ROM_RESOURCE;
else {
pos = (offset - PCI_BASE_ADDRESS_0) / 4;
- if (pos && ((res[pos - 1].flags & (PCI_BASE_ADDRESS_SPACE |
- PCI_BASE_ADDRESS_MEM_TYPE_MASK)) ==
- (PCI_BASE_ADDRESS_SPACE_MEMORY |
- PCI_BASE_ADDRESS_MEM_TYPE_64))) {
- bar_info->val = res[pos - 1].start >> 32;
- bar_info->len_val = -resource_size(&res[pos - 1]) >> 32;
- return;
+ if (pos && (res[pos - 1].flags & IORESOURCE_MEM_64)) {
+ bar->val = res[pos - 1].start >> 32;
+ bar->len_val = -resource_size(&res[pos - 1]) >> 32;
+ return bar;
}
}
if (!res[pos].flags ||
(res[pos].flags & (IORESOURCE_DISABLED | IORESOURCE_UNSET |
IORESOURCE_BUSY)))
- return;
-
- bar_info->val = res[pos].start |
- (res[pos].flags & PCI_REGION_FLAG_MASK);
- bar_info->len_val = -resource_size(&res[pos]) |
- (res[pos].flags & PCI_REGION_FLAG_MASK);
-}
+ return bar;
-static void *bar_init(struct pci_dev *dev, int offset)
-{
- struct pci_bar_info *bar = kzalloc(sizeof(*bar), GFP_KERNEL);
-
- if (!bar)
- return ERR_PTR(-ENOMEM);
-
- read_dev_bar(dev, bar, offset, ~0);
-
- return bar;
-}
-
-static void *rom_init(struct pci_dev *dev, int offset)
-{
- struct pci_bar_info *bar = kzalloc(sizeof(*bar), GFP_KERNEL);
-
- if (!bar)
- return ERR_PTR(-ENOMEM);
-
- read_dev_bar(dev, bar, offset, ~PCI_ROM_ADDRESS_ENABLE);
+ bar->val = res[pos].start |
+ (res[pos].flags & PCI_REGION_FLAG_MASK);
+ bar->len_val = -resource_size(&res[pos]) |
+ (res[pos].flags & PCI_REGION_FLAG_MASK);
return bar;
}
@@ -383,7 +360,7 @@ static const struct config_field header_common[] = {
{ \
.offset = reg_offset, \
.size = 4, \
- .init = rom_init, \
+ .init = bar_init, \
.reset = bar_reset, \
.release = bar_release, \
.u.dw.read = bar_read, \
diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h
index 4d529f3e4..7af369b6a 100644
--- a/drivers/xen/xen-pciback/pciback.h
+++ b/drivers/xen/xen-pciback/pciback.h
@@ -55,7 +55,6 @@ struct xen_pcibk_dev_data {
/* Used by XenBus and xen_pcibk_ops.c */
extern wait_queue_head_t xen_pcibk_aer_wait_queue;
-extern struct workqueue_struct *xen_pcibk_wq;
/* Used by pcistub.c and conf_space_quirks.c */
extern struct list_head xen_pcibk_quirks;
diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
index 2f19dd755..f8c77751f 100644
--- a/drivers/xen/xen-pciback/pciback_ops.c
+++ b/drivers/xen/xen-pciback/pciback_ops.c
@@ -310,7 +310,7 @@ void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev)
* already processing a request */
if (test_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags)
&& !test_and_set_bit(_PDEVF_op_active, &pdev->flags)) {
- queue_work(xen_pcibk_wq, &pdev->op_work);
+ schedule_work(&pdev->op_work);
}
/*_XEN_PCIB_active should have been cleared by pcifront. And also make
sure xen_pcibk is waiting for ack by checking _PCIB_op_pending*/
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index c252eb3f0..5ce878c51 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -17,7 +17,6 @@
#include "pciback.h"
#define INVALID_EVTCHN_IRQ (-1)
-struct workqueue_struct *xen_pcibk_wq;
static bool __read_mostly passthrough;
module_param(passthrough, bool, S_IRUGO);
@@ -76,8 +75,7 @@ static void xen_pcibk_disconnect(struct xen_pcibk_device *pdev)
/* If the driver domain started an op, make sure we complete it
* before releasing the shared memory */
- /* Note, the workqueue does not use spinlocks at all.*/
- flush_workqueue(xen_pcibk_wq);
+ flush_work(&pdev->op_work);
if (pdev->sh_info != NULL) {
xenbus_unmap_ring_vfree(pdev->xdev, pdev->sh_info);
@@ -733,11 +731,6 @@ const struct xen_pcibk_backend *__read_mostly xen_pcibk_backend;
int __init xen_pcibk_xenbus_register(void)
{
- xen_pcibk_wq = create_workqueue("xen_pciback_workqueue");
- if (!xen_pcibk_wq) {
- pr_err("%s: create xen_pciback_workqueue failed\n", __func__);
- return -EFAULT;
- }
xen_pcibk_backend = &xen_pcibk_vpci_backend;
if (passthrough)
xen_pcibk_backend = &xen_pcibk_passthrough_backend;
@@ -747,6 +740,5 @@ int __init xen_pcibk_xenbus_register(void)
void __exit xen_pcibk_xenbus_unregister(void)
{
- destroy_workqueue(xen_pcibk_wq);
xenbus_unregister_driver(&xen_pcibk_driver);
}
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c
index 53a085fca..666207132 100644
--- a/drivers/xen/xen-selfballoon.c
+++ b/drivers/xen/xen-selfballoon.c
@@ -195,7 +195,7 @@ static void selfballoon_process(struct work_struct *work)
MB2PAGES(selfballoon_reserved_mb);
#ifdef CONFIG_FRONTSWAP
/* allow space for frontswap pages to be repatriated */
- if (frontswap_selfshrinking && frontswap_enabled)
+ if (frontswap_selfshrinking)
goal_pages += frontswap_curr_pages();
#endif
if (cur_pages > goal_pages)
@@ -230,7 +230,7 @@ static void selfballoon_process(struct work_struct *work)
reset_timer = true;
}
#ifdef CONFIG_FRONTSWAP
- if (frontswap_selfshrinking && frontswap_enabled) {
+ if (frontswap_selfshrinking) {
frontswap_selfshrink();
reset_timer = true;
}
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index bcb53bdc4..611a23119 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -31,7 +31,6 @@
#include "xenbus_probe.h"
-static struct workqueue_struct *xenbus_frontend_wq;
/* device/<type>/<id> => <type>-<id> */
static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename)
@@ -109,13 +108,7 @@ static int xenbus_frontend_dev_resume(struct device *dev)
if (xen_store_domain_type == XS_LOCAL) {
struct xenbus_device *xdev = to_xenbus_device(dev);
- if (!xenbus_frontend_wq) {
- pr_err("%s: no workqueue to process delayed resume\n",
- xdev->nodename);
- return -EFAULT;
- }
-
- queue_work(xenbus_frontend_wq, &xdev->work);
+ schedule_work(&xdev->work);
return 0;
}
@@ -485,12 +478,6 @@ static int __init xenbus_probe_frontend_init(void)
register_xenstore_notifier(&xenstore_notifier);
- if (xen_store_domain_type == XS_LOCAL) {
- xenbus_frontend_wq = create_workqueue("xenbus_frontend");
- if (!xenbus_frontend_wq)
- pr_warn("create xenbus frontend workqueue failed, S3 resume is likely to fail\n");
- }
-
return 0;
}
subsys_initcall(xenbus_probe_frontend_init);
diff --git a/drivers/xen/xlate_mmu.c b/drivers/xen/xlate_mmu.c
index 5063c5e79..23f1387b3 100644
--- a/drivers/xen/xlate_mmu.c
+++ b/drivers/xen/xlate_mmu.c
@@ -29,6 +29,8 @@
*/
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
@@ -37,6 +39,7 @@
#include <xen/page.h>
#include <xen/interface/xen.h>
#include <xen/interface/memory.h>
+#include <xen/balloon.h>
typedef void (*xen_gfn_fn_t)(unsigned long gfn, void *data);
@@ -185,3 +188,77 @@ int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
return 0;
}
EXPORT_SYMBOL_GPL(xen_xlate_unmap_gfn_range);
+
+struct map_balloon_pages {
+ xen_pfn_t *pfns;
+ unsigned int idx;
+};
+
+static void setup_balloon_gfn(unsigned long gfn, void *data)
+{
+ struct map_balloon_pages *info = data;
+
+ info->pfns[info->idx++] = gfn;
+}
+
+/**
+ * xen_xlate_map_ballooned_pages - map a new set of ballooned pages
+ * @gfns: returns the array of corresponding GFNs
+ * @virt: returns the virtual address of the mapped region
+ * @nr_grant_frames: number of GFNs
+ * @return 0 on success, error otherwise
+ *
+ * This allocates a set of ballooned pages and maps them into the
+ * kernel's address space.
+ */
+int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt,
+ unsigned long nr_grant_frames)
+{
+ struct page **pages;
+ xen_pfn_t *pfns;
+ void *vaddr;
+ struct map_balloon_pages data;
+ int rc;
+ unsigned long nr_pages;
+
+ BUG_ON(nr_grant_frames == 0);
+ nr_pages = DIV_ROUND_UP(nr_grant_frames, XEN_PFN_PER_PAGE);
+ pages = kcalloc(nr_pages, sizeof(pages[0]), GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+ pfns = kcalloc(nr_grant_frames, sizeof(pfns[0]), GFP_KERNEL);
+ if (!pfns) {
+ kfree(pages);
+ return -ENOMEM;
+ }
+ rc = alloc_xenballooned_pages(nr_pages, pages);
+ if (rc) {
+ pr_warn("%s Couldn't balloon alloc %ld pages rc:%d\n", __func__,
+ nr_pages, rc);
+ kfree(pages);
+ kfree(pfns);
+ return rc;
+ }
+
+ data.pfns = pfns;
+ data.idx = 0;
+ xen_for_each_gfn(pages, nr_grant_frames, setup_balloon_gfn, &data);
+
+ vaddr = vmap(pages, nr_pages, 0, PAGE_KERNEL);
+ if (!vaddr) {
+ pr_warn("%s Couldn't map %ld pages rc:%d\n", __func__,
+ nr_pages, rc);
+ free_xenballooned_pages(nr_pages, pages);
+ kfree(pages);
+ kfree(pfns);
+ return -ENOMEM;
+ }
+ kfree(pages);
+
+ *gfns = pfns;
+ *virt = vaddr;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xen_xlate_map_ballooned_pages);